deploy: efce3b64b3ee5e20fd1bc98e585406690c98d330
diff --git a/contributors/index.html b/contributors/index.html
index 624d66a..72ce952 100644
--- a/contributors/index.html
+++ b/contributors/index.html
@@ -2084,7 +2084,7 @@
                         </a>
                         
                     </span>
-                    <div class="wankai123 wankai123">211</div>
+                    <div class="wankai123 wankai123">212</div>
                     <div class="wankai123 wankai123">7</div>
                     
                     
@@ -10007,20 +10007,6 @@
                     
                     
 
-                    <span class="pg-yang pg-yang dn">
-                        
-                        <a class="link" href="https://github.com/pg-yang" target="_blank">
-                              <i class="iconfont icon-github"></i>
-                              pg-yang
-                        </a>
-                        
-                    </span>
-                    <div class="pg-yang pg-yang">19</div>
-                    <div class="pg-yang pg-yang">28</div>
-                    
-                    
-                    
-
                     <span class="xu1009 xu1009 dn">
                         
                         <a class="link" href="https://github.com/xu1009" target="_blank">
@@ -10030,7 +10016,21 @@
                         
                     </span>
                     <div class="xu1009 xu1009">19</div>
-                    <div class="xu1009 xu1009">29</div>
+                    <div class="xu1009 xu1009">28</div>
+                    
+                    
+                    
+
+                    <span class="pg-yang pg-yang dn">
+                        
+                        <a class="link" href="https://github.com/pg-yang" target="_blank">
+                              <i class="iconfont icon-github"></i>
+                              pg-yang
+                        </a>
+                        
+                    </span>
+                    <div class="pg-yang pg-yang">19</div>
+                    <div class="pg-yang pg-yang">29</div>
                     
                     
                     
@@ -10147,20 +10147,6 @@
                     
                     
 
-                    <span class="liqiangz liqiangz dn">
-                        
-                        <a class="link" href="https://github.com/liqiangz" target="_blank">
-                              <i class="iconfont icon-github"></i>
-                              liqiangz
-                        </a>
-                        
-                    </span>
-                    <div class="liqiangz liqiangz">13</div>
-                    <div class="liqiangz liqiangz">38</div>
-                    
-                    
-                    
-
                     <span class="nisiyong nisiyong dn">
                         
                         <a class="link" href="https://github.com/nisiyong" target="_blank">
@@ -10170,7 +10156,21 @@
                         
                     </span>
                     <div class="nisiyong nisiyong">13</div>
-                    <div class="nisiyong nisiyong">39</div>
+                    <div class="nisiyong nisiyong">38</div>
+                    
+                    
+                    
+
+                    <span class="liqiangz liqiangz dn">
+                        
+                        <a class="link" href="https://github.com/liqiangz" target="_blank">
+                              <i class="iconfont icon-github"></i>
+                              liqiangz
+                        </a>
+                        
+                    </span>
+                    <div class="liqiangz liqiangz">13</div>
+                    <div class="liqiangz liqiangz">39</div>
                     
                     
                     
@@ -10203,20 +10203,6 @@
                     
                     
 
-                    <span class="bai-yang bai-yang dn">
-                        
-                        <a class="link" href="https://github.com/bai-yang" target="_blank">
-                              <i class="iconfont icon-github"></i>
-                              bai-yang
-                        </a>
-                        
-                    </span>
-                    <div class="bai-yang bai-yang">11</div>
-                    <div class="bai-yang bai-yang">42</div>
-                    
-                    
-                    
-
                     <span class="zhangkewei zhangkewei dn">
                         
                         <a class="link" href="https://github.com/zhangkewei" target="_blank">
@@ -10226,77 +10212,21 @@
                         
                     </span>
                     <div class="zhangkewei zhangkewei">11</div>
-                    <div class="zhangkewei zhangkewei">43</div>
+                    <div class="zhangkewei zhangkewei">42</div>
                     
                     
                     
 
-                    <span class="heyanlong heyanlong dn">
+                    <span class="bai-yang bai-yang dn">
                         
-                        <a class="link" href="https://github.com/heyanlong" target="_blank">
+                        <a class="link" href="https://github.com/bai-yang" target="_blank">
                               <i class="iconfont icon-github"></i>
-                              heyanlong
+                              bai-yang
                         </a>
                         
                     </span>
-                    <div class="heyanlong heyanlong">10</div>
-                    <div class="heyanlong heyanlong">44</div>
-                    
-                    
-                    
-
-                    <span class="xzyJavaX xzyjavax dn">
-                        
-                        <a class="link" href="https://github.com/xzyJavaX" target="_blank">
-                              <i class="iconfont icon-github"></i>
-                              xzyJavaX
-                        </a>
-                        
-                    </span>
-                    <div class="xzyJavaX xzyjavax">10</div>
-                    <div class="xzyJavaX xzyjavax">45</div>
-                    
-                    
-                    
-
-                    <span class="songzhendong songzhendong dn">
-                        
-                        <a class="link" href="https://github.com/songzhendong" target="_blank">
-                              <i class="iconfont icon-github"></i>
-                              songzhendong
-                        </a>
-                        
-                    </span>
-                    <div class="songzhendong songzhendong">10</div>
-                    <div class="songzhendong songzhendong">46</div>
-                    
-                    
-                    
-
-                    <span class="adermxzs adermxzs dn">
-                        
-                        <a class="link" href="https://github.com/adermxzs" target="_blank">
-                              <i class="iconfont icon-github"></i>
-                              adermxzs
-                        </a>
-                        
-                    </span>
-                    <div class="adermxzs adermxzs">10</div>
-                    <div class="adermxzs adermxzs">47</div>
-                    
-                    
-                    
-
-                    <span class="TinyAllen tinyallen dn">
-                        
-                        <a class="link" href="https://github.com/TinyAllen" target="_blank">
-                              <i class="iconfont icon-github"></i>
-                              TinyAllen
-                        </a>
-                        
-                    </span>
-                    <div class="TinyAllen tinyallen">10</div>
-                    <div class="TinyAllen tinyallen">48</div>
+                    <div class="bai-yang bai-yang">11</div>
+                    <div class="bai-yang bai-yang">43</div>
                     
                     
                     
@@ -10310,7 +10240,63 @@
                         
                     </span>
                     <div class="Jtrust jtrust">10</div>
-                    <div class="Jtrust jtrust">49</div>
+                    <div class="Jtrust jtrust">44</div>
+                    
+                    
+                    
+
+                    <span class="heyanlong heyanlong dn">
+                        
+                        <a class="link" href="https://github.com/heyanlong" target="_blank">
+                              <i class="iconfont icon-github"></i>
+                              heyanlong
+                        </a>
+                        
+                    </span>
+                    <div class="heyanlong heyanlong">10</div>
+                    <div class="heyanlong heyanlong">45</div>
+                    
+                    
+                    
+
+                    <span class="xzyJavaX xzyjavax dn">
+                        
+                        <a class="link" href="https://github.com/xzyJavaX" target="_blank">
+                              <i class="iconfont icon-github"></i>
+                              xzyJavaX
+                        </a>
+                        
+                    </span>
+                    <div class="xzyJavaX xzyjavax">10</div>
+                    <div class="xzyJavaX xzyjavax">46</div>
+                    
+                    
+                    
+
+                    <span class="songzhendong songzhendong dn">
+                        
+                        <a class="link" href="https://github.com/songzhendong" target="_blank">
+                              <i class="iconfont icon-github"></i>
+                              songzhendong
+                        </a>
+                        
+                    </span>
+                    <div class="songzhendong songzhendong">10</div>
+                    <div class="songzhendong songzhendong">47</div>
+                    
+                    
+                    
+
+                    <span class="adermxzs adermxzs dn">
+                        
+                        <a class="link" href="https://github.com/adermxzs" target="_blank">
+                              <i class="iconfont icon-github"></i>
+                              adermxzs
+                        </a>
+                        
+                    </span>
+                    <div class="adermxzs adermxzs">10</div>
+                    <div class="adermxzs adermxzs">48</div>
                     
                     
                     
@@ -10324,7 +10310,21 @@
                         
                     </span>
                     <div class="55846420** 55846420**">10</div>
-                    <div class="55846420** 55846420**">50</div>
+                    <div class="55846420** 55846420**">49</div>
+                    
+                    
+                    
+
+                    <span class="TinyAllen tinyallen dn">
+                        
+                        <a class="link" href="https://github.com/TinyAllen" target="_blank">
+                              <i class="iconfont icon-github"></i>
+                              TinyAllen
+                        </a>
+                        
+                    </span>
+                    <div class="TinyAllen tinyallen">10</div>
+                    <div class="TinyAllen tinyallen">50</div>
                     
                     
                     
@@ -10343,16 +10343,16 @@
                     
                     
 
-                    <span class="IluckySi iluckysi dn">
+                    <span class="CzyerChen czyerchen dn">
                         
-                        <a class="link" href="https://github.com/IluckySi" target="_blank">
+                        <a class="link" href="https://github.com/CzyerChen" target="_blank">
                               <i class="iconfont icon-github"></i>
-                              IluckySi
+                              CzyerChen
                         </a>
                         
                     </span>
-                    <div class="IluckySi iluckysi">9</div>
-                    <div class="IluckySi iluckysi">52</div>
+                    <div class="CzyerChen czyerchen">9</div>
+                    <div class="CzyerChen czyerchen">52</div>
                     
                     
                     
@@ -10371,16 +10371,16 @@
                     
                     
 
-                    <span class="wendal wendal dn">
+                    <span class="IluckySi iluckysi dn">
                         
-                        <a class="link" href="https://github.com/wendal" target="_blank">
+                        <a class="link" href="https://github.com/IluckySi" target="_blank">
                               <i class="iconfont icon-github"></i>
-                              wendal
+                              IluckySi
                         </a>
                         
                     </span>
-                    <div class="wendal wendal">8</div>
-                    <div class="wendal wendal">54</div>
+                    <div class="IluckySi iluckysi">9</div>
+                    <div class="IluckySi iluckysi">54</div>
                     
                     
                     
@@ -10399,30 +10399,30 @@
                     
                     
 
-                    <span class="CzyerChen czyerchen dn">
+                    <span class="wendal wendal dn">
                         
-                        <a class="link" href="https://github.com/CzyerChen" target="_blank">
+                        <a class="link" href="https://github.com/wendal" target="_blank">
                               <i class="iconfont icon-github"></i>
-                              CzyerChen
+                              wendal
                         </a>
                         
                     </span>
-                    <div class="CzyerChen czyerchen">8</div>
-                    <div class="CzyerChen czyerchen">56</div>
+                    <div class="wendal wendal">8</div>
+                    <div class="wendal wendal">56</div>
                     
                     
                     
 
-                    <span class="zhyyu zhyyu dn">
+                    <span class="tristaZero tristazero dn">
                         
-                        <a class="link" href="https://github.com/zhyyu" target="_blank">
+                        <a class="link" href="https://github.com/tristaZero" target="_blank">
                               <i class="iconfont icon-github"></i>
-                              zhyyu
+                              tristaZero
                         </a>
                         
                     </span>
-                    <div class="zhyyu zhyyu">7</div>
-                    <div class="zhyyu zhyyu">57</div>
+                    <div class="tristaZero tristazero">7</div>
+                    <div class="tristaZero tristazero">57</div>
                     
                     
                     
@@ -10441,16 +10441,16 @@
                     
                     
 
-                    <span class="tristaZero tristazero dn">
+                    <span class="zhyyu zhyyu dn">
                         
-                        <a class="link" href="https://github.com/tristaZero" target="_blank">
+                        <a class="link" href="https://github.com/zhyyu" target="_blank">
                               <i class="iconfont icon-github"></i>
-                              tristaZero
+                              zhyyu
                         </a>
                         
                     </span>
-                    <div class="tristaZero tristazero">7</div>
-                    <div class="tristaZero tristazero">59</div>
+                    <div class="zhyyu zhyyu">7</div>
+                    <div class="zhyyu zhyyu">59</div>
                     
                     
                     
@@ -21046,7 +21046,7 @@
                         </a>
                         
                     </span>
-                    <div class="wu-sheng wu-sheng">21</div>
+                    <div class="wu-sheng wu-sheng">22</div>
                     <div class="wu-sheng wu-sheng">2</div>
                     
                     
@@ -21060,7 +21060,7 @@
                         </a>
                         
                     </span>
-                    <div class="hanahmily hanahmily">14</div>
+                    <div class="hanahmily hanahmily">15</div>
                     <div class="hanahmily hanahmily">3</div>
                     
                     
diff --git a/docs/main/next/en/academy/diagnose-service-mesh-network-performance-with-ebpf/index.html b/docs/main/next/en/academy/diagnose-service-mesh-network-performance-with-ebpf/index.html
index 1864061..e895476 100644
--- a/docs/main/next/en/academy/diagnose-service-mesh-network-performance-with-ebpf/index.html
+++ b/docs/main/next/en/academy/diagnose-service-mesh-network-performance-with-ebpf/index.html
@@ -2411,7 +2411,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/academy/scaling-with-apache-skywalking/index.html b/docs/main/next/en/academy/scaling-with-apache-skywalking/index.html
index 76d53ee..36fa1f7 100644
--- a/docs/main/next/en/academy/scaling-with-apache-skywalking/index.html
+++ b/docs/main/next/en/academy/scaling-with-apache-skywalking/index.html
@@ -2408,7 +2408,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/api/browser-http-api-protocol/index.html b/docs/main/next/en/api/browser-http-api-protocol/index.html
index aa7e09f..e28cfbb 100644
--- a/docs/main/next/en/api/browser-http-api-protocol/index.html
+++ b/docs/main/next/en/api/browser-http-api-protocol/index.html
@@ -2420,7 +2420,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/api/browser-protocol/index.html b/docs/main/next/en/api/browser-protocol/index.html
index d7597c2..6220b56 100644
--- a/docs/main/next/en/api/browser-protocol/index.html
+++ b/docs/main/next/en/api/browser-protocol/index.html
@@ -2420,7 +2420,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/api/event/index.html b/docs/main/next/en/api/event/index.html
index 0ee2261..495d3cd 100644
--- a/docs/main/next/en/api/event/index.html
+++ b/docs/main/next/en/api/event/index.html
@@ -2411,7 +2411,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/api/instance-properties/index.html b/docs/main/next/en/api/instance-properties/index.html
index c6606c5..6473b3b 100644
--- a/docs/main/next/en/api/instance-properties/index.html
+++ b/docs/main/next/en/api/instance-properties/index.html
@@ -2411,7 +2411,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/api/jvm-protocol/index.html b/docs/main/next/en/api/jvm-protocol/index.html
index 7617bde..20ef59f 100644
--- a/docs/main/next/en/api/jvm-protocol/index.html
+++ b/docs/main/next/en/api/jvm-protocol/index.html
@@ -2417,7 +2417,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/api/log-data-protocol/index.html b/docs/main/next/en/api/log-data-protocol/index.html
index 397eb88..f64f2d9 100644
--- a/docs/main/next/en/api/log-data-protocol/index.html
+++ b/docs/main/next/en/api/log-data-protocol/index.html
@@ -2417,7 +2417,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/api/logql-service/index.html b/docs/main/next/en/api/logql-service/index.html
index f8b41d4..84a9210 100644
--- a/docs/main/next/en/api/logql-service/index.html
+++ b/docs/main/next/en/api/logql-service/index.html
@@ -2414,7 +2414,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/api/meter/index.html b/docs/main/next/en/api/meter/index.html
index 67a8f48..d060b8e 100644
--- a/docs/main/next/en/api/meter/index.html
+++ b/docs/main/next/en/api/meter/index.html
@@ -2411,7 +2411,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/api/metrics-query-expression/index.html b/docs/main/next/en/api/metrics-query-expression/index.html
index 74273a6..a3122a2 100644
--- a/docs/main/next/en/api/metrics-query-expression/index.html
+++ b/docs/main/next/en/api/metrics-query-expression/index.html
@@ -43,7 +43,7 @@
 Metrics Expression Metrics Expression will return a collection of time-series values.
 Common Value Metrics Expression:">
 
-<meta itemprop="wordCount" content="2028">
+<meta itemprop="wordCount" content="2309">
 
 
 
@@ -2417,7 +2417,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
@@ -2786,8 +2786,10 @@
 </ul>
 <p>Expression:</p>
 <div class="highlight"><pre style="background-color:#fff;-moz-tab-size:4;-o-tab-size:4;tab-size:4"><code class="language-text" data-lang="text">top_n(&lt;metric_name&gt;, &lt;top_number&gt;, &lt;order&gt;)
-</code></pre></div><p><code>top_number</code> is the number of the top results, should be a positive integer.</p>
-<p><code>order</code> is the order of the top results. The value of <code>order</code> can be <code>asc</code> or <code>des</code>.</p>
+</code></pre></div><ul>
+<li><code>top_number</code> is the number of the top results, should be a positive integer.</li>
+<li><code>order</code> is the order of the top results. The value of <code>order</code> can be <code>asc</code> or <code>des</code>.</li>
+</ul>
 <p>For example:
 If we want to query the current service&rsquo;s top 10 instances with the highest <code>service_instance_cpm</code> metric value, we can use the following expression
 under specific service:</p>
@@ -2808,10 +2810,10 @@
 <h2 id="aggregatelabels-operation">AggregateLabels Operation</h2>
 <p>AggregateLabels Operation takes an expression and performs an aggregate calculation on its <code>Labeled Value Metrics</code> results. It aggregates a group of <code>TIME_SERIES_VALUES</code> into a single <code>TIME_SERIES_VALUES</code>.</p>
 <p>Expression:</p>
-<div class="highlight"><pre style="background-color:#fff;-moz-tab-size:4;-o-tab-size:4;tab-size:4"><code class="language-text" data-lang="text">aggregate_labels(Expression, AggregateType&lt;Optional&gt;(&lt;label1_name&gt;,&lt;label2_name&gt;...))
+<div class="highlight"><pre style="background-color:#fff;-moz-tab-size:4;-o-tab-size:4;tab-size:4"><code class="language-text" data-lang="text">aggregate_labels(Expression, &lt;AggregateType&gt;(&lt;label1_name&gt;,&lt;label2_name&gt;...))
 </code></pre></div><ul>
 <li><code>AggregateType</code> is the type of the aggregation operation.</li>
-<li><code>&lt;label1_name&gt;,&lt;label2_name&gt;...</code> is the label names that need to be aggregated. If not specified, all labels will be aggregated.</li>
+<li><code>&lt;label1_name&gt;,&lt;label2_name&gt;...</code> is the label names that need to be aggregated. If not specified, all labels will be aggregated. Optional.</li>
 </ul>
 <table>
 <thead>
@@ -2877,7 +2879,7 @@
 <p>Trend Operation takes an expression and performs a trend calculation on its results.</p>
 <p>Expression:</p>
 <div class="highlight"><pre style="background-color:#fff;-moz-tab-size:4;-o-tab-size:4;tab-size:4"><code class="language-text" data-lang="text">&lt;Trend-Operator&gt;(Metrics Expression, time_range)
-</code></pre></div><p><code>time_range</code> is the positive int of the calculated range. The unit will automatically align with to the query <a href="https://github.com/apache/skywalking/tree/a63601318d802ef7f2f520758725824d317385cf/oap-server/server-core/src/main/java/org/apache/skywalking/oap/server/core/query/enumeration/Step.java">Step</a>,
+</code></pre></div><p><code>time_range</code> is the positive int of the calculated range. The unit will automatically align with to the query <a href="https://github.com/apache/skywalking/tree/b3044ee71464c117ffcbf80a5b669da0ad64c260/oap-server/server-core/src/main/java/org/apache/skywalking/oap/server/core/query/enumeration/Step.java">Step</a>,
 for example, if the query Step is <code>MINUTE</code>, the unit of <code>time_range</code> is <code>minute</code>.</p>
 <table>
 <thead>
@@ -2914,7 +2916,45 @@
 </ul>
 <h3 id="result-type-10">Result Type</h3>
 <p>TIME_SERIES_VALUES.</p>
-<h2 id="expression-query-example">Expression Query Example</h2>
+<h2 id="sort-operation">Sort Operation</h2>
+<h3 id="sortvalues-operation">SortValues Operation</h3>
+<p>SortValues Operation takes an expression and sorts the values of the input expression result.</p>
+<p>Expression:</p>
+<div class="highlight"><pre style="background-color:#fff;-moz-tab-size:4;-o-tab-size:4;tab-size:4"><code class="language-text" data-lang="text">sort_values(Expression, &lt;limit&gt;, &lt;order&gt;)
+</code></pre></div><ul>
+<li><code>limit</code> is the number of the sort results, should be a positive integer, if not specified, will return all results. Optional.</li>
+<li><code>order</code> is the order of the sort results. The value of <code>order</code> can be <code>asc</code> or <code>des</code>.</li>
+</ul>
+<p>For example:
+If we want to sort the <code>service_resp_time</code> metric values in descending order and get the top 10 values, we can use the following expression:</p>
+<div class="highlight"><pre style="background-color:#fff;-moz-tab-size:4;-o-tab-size:4;tab-size:4"><code class="language-text" data-lang="text">sort_values(service_resp_time, 10, des)
+</code></pre></div><h4 id="result-type-11">Result Type</h4>
+<p>The result type follows the input expression.</p>
+<h3 id="sortlabelvalues-operation">SortLabelValues Operation</h3>
+<p>SortLabelValues Operation takes an expression and sorts the label values of the input expression result. This function uses <code>natural sort order</code>.</p>
+<p>Expression:</p>
+<div class="highlight"><pre style="background-color:#fff;-moz-tab-size:4;-o-tab-size:4;tab-size:4"><code class="language-text" data-lang="text">sort_label_values(Expression, &lt;order&gt;, &lt;label1_name&gt;, &lt;label2_name&gt; ...)
+</code></pre></div><ul>
+<li><code>order</code> is the order of the sort results. The value of <code>order</code> can be <code>asc</code> or <code>des</code>.</li>
+<li><code>&lt;label1_name&gt;, &lt;label2_name&gt; ...</code> is the label names that need to be sorted by their values. At least one label name should be specified.
+The labels in the head of the list will be sorted first, and if the label not be included in the expression result will be ignored.</li>
+</ul>
+<p>For example:
+If we want to sort the <code>service_percentile</code> metric label values in descending order by the <code>p</code> label, we can use the following expression:</p>
+<div class="highlight"><pre style="background-color:#fff;-moz-tab-size:4;-o-tab-size:4;tab-size:4"><code class="language-text" data-lang="text">sort_label_values(service_percentile{p=&#39;50,75,90,95,99&#39;}, des, p)
+</code></pre></div><p>For multiple labels, assume the metric has 2 labels:</p>
+<div class="highlight"><pre style="background-color:#fff;-moz-tab-size:4;-o-tab-size:4;tab-size:4"><code class="language-text" data-lang="text">metric{label1=&#39;a&#39;, label2=&#39;2a&#39;} 
+metric{label1=&#39;a&#39;, label2=&#39;2c&#39;}
+metric{label1=&#39;b&#39;, label2=&#39;2a&#39;}
+metric{label1=&#39;b&#39;, label2=&#39;2c&#39;}
+</code></pre></div><p>If we want to sort the <code>metric</code> metric label values in descending order by the <code>label1</code> and <code>label2</code> labels, we can use the following expression:</p>
+<div class="highlight"><pre style="background-color:#fff;-moz-tab-size:4;-o-tab-size:4;tab-size:4"><code class="language-text" data-lang="text">sort_label_values(metric, des, label1, label2)
+</code></pre></div><p>And the result will be:</p>
+<div class="highlight"><pre style="background-color:#fff;-moz-tab-size:4;-o-tab-size:4;tab-size:4"><code class="language-text" data-lang="text">metric{label1=&#39;b&#39;, label2=&#39;2c&#39;}
+metric{label1=&#39;b&#39;, label2=&#39;2a&#39;}
+metric{label1=&#39;a&#39;, label2=&#39;2c&#39;}
+metric{label1=&#39;a&#39;, label2=&#39;2a&#39;}
+</code></pre></div><h2 id="expression-query-example">Expression Query Example</h2>
 <h3 id="labeled-value-metrics-1">Labeled Value Metrics</h3>
 <div class="highlight"><pre style="background-color:#fff;-moz-tab-size:4;-o-tab-size:4;tab-size:4"><code class="language-text" data-lang="text">service_percentile{p=&#39;50,95&#39;}
 </code></pre></div><p>The example result is:</p>
@@ -3190,6 +3230,12 @@
             <li><a href="#result-type-10">Result Type</a></li>
           </ul>
         </li>
+        <li><a href="#sort-operation">Sort Operation</a>
+          <ul>
+            <li><a href="#sortvalues-operation">SortValues Operation</a></li>
+            <li><a href="#sortlabelvalues-operation">SortLabelValues Operation</a></li>
+          </ul>
+        </li>
         <li><a href="#expression-query-example">Expression Query Example</a>
           <ul>
             <li><a href="#labeled-value-metrics-1">Labeled Value Metrics</a></li>
diff --git a/docs/main/next/en/api/profiling-protocol/index.html b/docs/main/next/en/api/profiling-protocol/index.html
index 627f95e..2de8fec 100644
--- a/docs/main/next/en/api/profiling-protocol/index.html
+++ b/docs/main/next/en/api/profiling-protocol/index.html
@@ -2414,7 +2414,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/api/promql-service/index.html b/docs/main/next/en/api/promql-service/index.html
index c5abf5f..3e57e26 100644
--- a/docs/main/next/en/api/promql-service/index.html
+++ b/docs/main/next/en/api/promql-service/index.html
@@ -2414,7 +2414,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
@@ -3008,7 +3008,7 @@
   }
 }
 </code></pre></div><h2 id="metrics-type-for-query">Metrics Type For Query</h2>
-<h3 id="supported-metrics-scopehttpsgithubcomapacheskywalkingtreea63601318d802ef7f2f520758725824d317385cfoap-serverserver-coresrcmainjavaorgapacheskywalkingoapservercorequeryenumerationscopejavacatalog">Supported Metrics <a href="https://github.com/apache/skywalking/tree/a63601318d802ef7f2f520758725824d317385cf/oap-server/server-core/src/main/java/org/apache/skywalking/oap/server/core/query/enumeration/Scope.java">Scope</a>(Catalog)</h3>
+<h3 id="supported-metrics-scopehttpsgithubcomapacheskywalkingtreeb3044ee71464c117ffcbf80a5b669da0ad64c260oap-serverserver-coresrcmainjavaorgapacheskywalkingoapservercorequeryenumerationscopejavacatalog">Supported Metrics <a href="https://github.com/apache/skywalking/tree/b3044ee71464c117ffcbf80a5b669da0ad64c260/oap-server/server-core/src/main/java/org/apache/skywalking/oap/server/core/query/enumeration/Scope.java">Scope</a>(Catalog)</h3>
 <p>Not all scopes are supported for now, please check the following table:</p>
 <table>
 <thead>
@@ -3049,7 +3049,7 @@
 </tbody>
 </table>
 <h3 id="general-labels">General labels</h3>
-<p>Each metric contains general labels: <a href="https://github.com/apache/skywalking/tree/a63601318d802ef7f2f520758725824d317385cf/oap-server/server-core/src/main/java/org/apache/skywalking/oap/server/core/analysis/Layer.java">layer</a>.
+<p>Each metric contains general labels: <a href="https://github.com/apache/skywalking/tree/b3044ee71464c117ffcbf80a5b669da0ad64c260/oap-server/server-core/src/main/java/org/apache/skywalking/oap/server/core/analysis/Layer.java">layer</a>.
 Different metrics will have different labels depending on their Scope and metric value type.</p>
 <table>
 <thead>
@@ -3378,7 +3378,7 @@
         </li>
         <li><a href="#metrics-type-for-query">Metrics Type For Query</a>
           <ul>
-            <li><a href="#supported-metrics-scopehttpsgithubcomapacheskywalkingtreea63601318d802ef7f2f520758725824d317385cfoap-serverserver-coresrcmainjavaorgapacheskywalkingoapservercorequeryenumerationscopejavacatalog">Supported Metrics <a href="https://github.com/apache/skywalking/tree/a63601318d802ef7f2f520758725824d317385cf/oap-server/server-core/src/main/java/org/apache/skywalking/oap/server/core/query/enumeration/Scope.java">Scope</a>(Catalog)</a></li>
+            <li><a href="#supported-metrics-scopehttpsgithubcomapacheskywalkingtreeb3044ee71464c117ffcbf80a5b669da0ad64c260oap-serverserver-coresrcmainjavaorgapacheskywalkingoapservercorequeryenumerationscopejavacatalog">Supported Metrics <a href="https://github.com/apache/skywalking/tree/b3044ee71464c117ffcbf80a5b669da0ad64c260/oap-server/server-core/src/main/java/org/apache/skywalking/oap/server/core/query/enumeration/Scope.java">Scope</a>(Catalog)</a></li>
             <li><a href="#general-labels">General labels</a></li>
             <li><a href="#common-value-metrics">Common Value Metrics</a></li>
             <li><a href="#labeled-value-metrics">Labeled Value Metrics</a></li>
diff --git a/docs/main/next/en/api/query-protocol-deprecated/index.html b/docs/main/next/en/api/query-protocol-deprecated/index.html
index 556c257..5c83922 100644
--- a/docs/main/next/en/api/query-protocol-deprecated/index.html
+++ b/docs/main/next/en/api/query-protocol-deprecated/index.html
@@ -2417,7 +2417,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/api/query-protocol/index.html b/docs/main/next/en/api/query-protocol/index.html
index d7066c3..03ae38a 100644
--- a/docs/main/next/en/api/query-protocol/index.html
+++ b/docs/main/next/en/api/query-protocol/index.html
@@ -2420,7 +2420,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/api/trace-data-protocol-v3/index.html b/docs/main/next/en/api/trace-data-protocol-v3/index.html
index d0f7fc1..90522b1 100644
--- a/docs/main/next/en/api/trace-data-protocol-v3/index.html
+++ b/docs/main/next/en/api/trace-data-protocol-v3/index.html
@@ -2417,7 +2417,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/api/x-process-correlation-headers-v1/index.html b/docs/main/next/en/api/x-process-correlation-headers-v1/index.html
index 31fb01c..715dc4d 100644
--- a/docs/main/next/en/api/x-process-correlation-headers-v1/index.html
+++ b/docs/main/next/en/api/x-process-correlation-headers-v1/index.html
@@ -2411,7 +2411,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/api/x-process-propagation-headers-v3/index.html b/docs/main/next/en/api/x-process-propagation-headers-v3/index.html
index 4278cdd..766a924 100644
--- a/docs/main/next/en/api/x-process-propagation-headers-v3/index.html
+++ b/docs/main/next/en/api/x-process-propagation-headers-v3/index.html
@@ -2408,7 +2408,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/changes/changes-5.x/index.html b/docs/main/next/en/changes/changes-5.x/index.html
index 17dcc24..b6c15af 100644
--- a/docs/main/next/en/changes/changes-5.x/index.html
+++ b/docs/main/next/en/changes/changes-5.x/index.html
@@ -2408,7 +2408,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/changes/changes-6.x/index.html b/docs/main/next/en/changes/changes-6.x/index.html
index 6590e59..0bf570c 100644
--- a/docs/main/next/en/changes/changes-6.x/index.html
+++ b/docs/main/next/en/changes/changes-6.x/index.html
@@ -2408,7 +2408,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/changes/changes-7.0.0/index.html b/docs/main/next/en/changes/changes-7.0.0/index.html
index b8d51e7..92f653d 100644
--- a/docs/main/next/en/changes/changes-7.0.0/index.html
+++ b/docs/main/next/en/changes/changes-7.0.0/index.html
@@ -2408,7 +2408,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/changes/changes-8.0.0/index.html b/docs/main/next/en/changes/changes-8.0.0/index.html
index f3edb68..785cdd8 100644
--- a/docs/main/next/en/changes/changes-8.0.0/index.html
+++ b/docs/main/next/en/changes/changes-8.0.0/index.html
@@ -2408,7 +2408,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/changes/changes-8.0.1/index.html b/docs/main/next/en/changes/changes-8.0.1/index.html
index a59e8b0..763d1cd 100644
--- a/docs/main/next/en/changes/changes-8.0.1/index.html
+++ b/docs/main/next/en/changes/changes-8.0.1/index.html
@@ -2408,7 +2408,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/changes/changes-8.1.0/index.html b/docs/main/next/en/changes/changes-8.1.0/index.html
index 13eb270..d243c24 100644
--- a/docs/main/next/en/changes/changes-8.1.0/index.html
+++ b/docs/main/next/en/changes/changes-8.1.0/index.html
@@ -2408,7 +2408,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/changes/changes-8.2.0/index.html b/docs/main/next/en/changes/changes-8.2.0/index.html
index a83ba57..5b267e0 100644
--- a/docs/main/next/en/changes/changes-8.2.0/index.html
+++ b/docs/main/next/en/changes/changes-8.2.0/index.html
@@ -2408,7 +2408,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/changes/changes-8.3.0/index.html b/docs/main/next/en/changes/changes-8.3.0/index.html
index b2f422a..e28e06a 100644
--- a/docs/main/next/en/changes/changes-8.3.0/index.html
+++ b/docs/main/next/en/changes/changes-8.3.0/index.html
@@ -2408,7 +2408,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/changes/changes-8.4.0/index.html b/docs/main/next/en/changes/changes-8.4.0/index.html
index e9651d6..363cc32f 100644
--- a/docs/main/next/en/changes/changes-8.4.0/index.html
+++ b/docs/main/next/en/changes/changes-8.4.0/index.html
@@ -2408,7 +2408,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/changes/changes-8.5.0/index.html b/docs/main/next/en/changes/changes-8.5.0/index.html
index ffec0f5..fce0033 100644
--- a/docs/main/next/en/changes/changes-8.5.0/index.html
+++ b/docs/main/next/en/changes/changes-8.5.0/index.html
@@ -2408,7 +2408,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/changes/changes-8.6.0/index.html b/docs/main/next/en/changes/changes-8.6.0/index.html
index 1ed5e93..e8edf0d 100644
--- a/docs/main/next/en/changes/changes-8.6.0/index.html
+++ b/docs/main/next/en/changes/changes-8.6.0/index.html
@@ -2408,7 +2408,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/changes/changes-8.7.0/index.html b/docs/main/next/en/changes/changes-8.7.0/index.html
index b8503b7..059060a 100644
--- a/docs/main/next/en/changes/changes-8.7.0/index.html
+++ b/docs/main/next/en/changes/changes-8.7.0/index.html
@@ -2408,7 +2408,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/changes/changes-8.8.0/index.html b/docs/main/next/en/changes/changes-8.8.0/index.html
index 2ca5b44..226bf9a 100644
--- a/docs/main/next/en/changes/changes-8.8.0/index.html
+++ b/docs/main/next/en/changes/changes-8.8.0/index.html
@@ -2408,7 +2408,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/changes/changes-8.8.1/index.html b/docs/main/next/en/changes/changes-8.8.1/index.html
index b81a1c5..4038f1e 100644
--- a/docs/main/next/en/changes/changes-8.8.1/index.html
+++ b/docs/main/next/en/changes/changes-8.8.1/index.html
@@ -2408,7 +2408,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/changes/changes-8.9.0/index.html b/docs/main/next/en/changes/changes-8.9.0/index.html
index c7359db..f3e75b6 100644
--- a/docs/main/next/en/changes/changes-8.9.0/index.html
+++ b/docs/main/next/en/changes/changes-8.9.0/index.html
@@ -2408,7 +2408,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/changes/changes-8.9.1/index.html b/docs/main/next/en/changes/changes-8.9.1/index.html
index c016ca4..132b3c4 100644
--- a/docs/main/next/en/changes/changes-8.9.1/index.html
+++ b/docs/main/next/en/changes/changes-8.9.1/index.html
@@ -2408,7 +2408,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/changes/changes-9.0.0/index.html b/docs/main/next/en/changes/changes-9.0.0/index.html
index 53ed1bd..628b402 100644
--- a/docs/main/next/en/changes/changes-9.0.0/index.html
+++ b/docs/main/next/en/changes/changes-9.0.0/index.html
@@ -2408,7 +2408,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/changes/changes-9.1.0/index.html b/docs/main/next/en/changes/changes-9.1.0/index.html
index 16dd353..411ec24 100644
--- a/docs/main/next/en/changes/changes-9.1.0/index.html
+++ b/docs/main/next/en/changes/changes-9.1.0/index.html
@@ -2408,7 +2408,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/changes/changes-9.2.0/index.html b/docs/main/next/en/changes/changes-9.2.0/index.html
index 328c12f..47d1e32 100644
--- a/docs/main/next/en/changes/changes-9.2.0/index.html
+++ b/docs/main/next/en/changes/changes-9.2.0/index.html
@@ -2408,7 +2408,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/changes/changes-9.3.0/index.html b/docs/main/next/en/changes/changes-9.3.0/index.html
index e0db23c..68bcb91 100644
--- a/docs/main/next/en/changes/changes-9.3.0/index.html
+++ b/docs/main/next/en/changes/changes-9.3.0/index.html
@@ -2408,7 +2408,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/changes/changes-9.4.0/index.html b/docs/main/next/en/changes/changes-9.4.0/index.html
index da409c4..1103cb1 100644
--- a/docs/main/next/en/changes/changes-9.4.0/index.html
+++ b/docs/main/next/en/changes/changes-9.4.0/index.html
@@ -2408,7 +2408,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/changes/changes-9.5.0/index.html b/docs/main/next/en/changes/changes-9.5.0/index.html
index 86710a2..331cbd1 100644
--- a/docs/main/next/en/changes/changes-9.5.0/index.html
+++ b/docs/main/next/en/changes/changes-9.5.0/index.html
@@ -2408,7 +2408,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/changes/changes-9.6.0/index.html b/docs/main/next/en/changes/changes-9.6.0/index.html
index e6ebe95..5c96928 100644
--- a/docs/main/next/en/changes/changes-9.6.0/index.html
+++ b/docs/main/next/en/changes/changes-9.6.0/index.html
@@ -2408,7 +2408,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/changes/changes-9.7.0/index.html b/docs/main/next/en/changes/changes-9.7.0/index.html
index ed96790..99353aa 100644
--- a/docs/main/next/en/changes/changes-9.7.0/index.html
+++ b/docs/main/next/en/changes/changes-9.7.0/index.html
@@ -2408,7 +2408,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/changes/changes/index.html b/docs/main/next/en/changes/changes/index.html
index 7cf7c53..1abd950 100644
--- a/docs/main/next/en/changes/changes/index.html
+++ b/docs/main/next/en/changes/changes/index.html
@@ -37,7 +37,7 @@
 <meta itemprop="name" content="10.0.0">
 <meta itemprop="description" content="10.0.0 Project  Support Java 21 runtime. Support oap-java21 image for Java 21 runtime. Upgrade OTEL collector version to 0.92.0 in all e2e tests. Switch CI macOS runner to m1. Upgrade PostgreSQL driver to 42.4.4 to fix CVE-2024-1597. Remove CLI(swctl) from the image. Remove CLI_VERSION variable from Makefile build. Add BanyanDB to docker-compose quickstart. Bump up Armeria, jackson, netty, jetcd and grpc to fix CVEs.  OAP Server  Add layer parameter to the global topology graphQL query.">
 
-<meta itemprop="wordCount" content="1292">
+<meta itemprop="wordCount" content="1298">
 
 
 
@@ -2408,7 +2408,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
@@ -2618,6 +2618,7 @@
 <li>Add component definition(ID=152) for <code>c3p0</code>(JDBC3 Connection and Statement Pooling).</li>
 <li>Fix MQE <code>top_n</code> global query.</li>
 <li>Fix inaccurate Pulsar and Bookkeeper metrics.</li>
+<li>MQE support <code>sort_values</code> and <code>sort_label_values</code> functions.</li>
 </ul>
 <h4 id="ui">UI</h4>
 <ul>
diff --git a/docs/main/next/en/concepts-and-designs/backend-overview/index.html b/docs/main/next/en/concepts-and-designs/backend-overview/index.html
index 3c2cc86..c439417 100644
--- a/docs/main/next/en/concepts-and-designs/backend-overview/index.html
+++ b/docs/main/next/en/concepts-and-designs/backend-overview/index.html
@@ -2417,7 +2417,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/concepts-and-designs/ebpf-cpu-profiling/index.html b/docs/main/next/en/concepts-and-designs/ebpf-cpu-profiling/index.html
index c01fda8..081d931 100644
--- a/docs/main/next/en/concepts-and-designs/ebpf-cpu-profiling/index.html
+++ b/docs/main/next/en/concepts-and-designs/ebpf-cpu-profiling/index.html
@@ -2408,7 +2408,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/concepts-and-designs/event/index.html b/docs/main/next/en/concepts-and-designs/event/index.html
index 401ae5d..ec7d322 100644
--- a/docs/main/next/en/concepts-and-designs/event/index.html
+++ b/docs/main/next/en/concepts-and-designs/event/index.html
@@ -2408,7 +2408,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/concepts-and-designs/lal/index.html b/docs/main/next/en/concepts-and-designs/lal/index.html
index fcc2a2c..9eccd70 100644
--- a/docs/main/next/en/concepts-and-designs/lal/index.html
+++ b/docs/main/next/en/concepts-and-designs/lal/index.html
@@ -2411,7 +2411,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
@@ -2639,7 +2639,7 @@
 </code></pre></div><ul>
 <li><code>layer</code></li>
 </ul>
-<p><code>layer</code> extracts the <a href="https://github.com/apache/skywalking/tree/a63601318d802ef7f2f520758725824d317385cf/oap-server/server-core/src/main/java/org/apache/skywalking/oap/server/core/analysis/Layer.java">layer</a> from the <code>parsed</code> result, and set it into the <code>LogData</code>, which will be persisted (if
+<p><code>layer</code> extracts the <a href="https://github.com/apache/skywalking/tree/b3044ee71464c117ffcbf80a5b669da0ad64c260/oap-server/server-core/src/main/java/org/apache/skywalking/oap/server/core/analysis/Layer.java">layer</a> from the <code>parsed</code> result, and set it into the <code>LogData</code>, which will be persisted (if
 not dropped) and is used to associate with service.</p>
 <ul>
 <li><code>tag</code></li>
diff --git a/docs/main/next/en/concepts-and-designs/mal/index.html b/docs/main/next/en/concepts-and-designs/mal/index.html
index 9da04ba..745f4a9 100644
--- a/docs/main/next/en/concepts-and-designs/mal/index.html
+++ b/docs/main/next/en/concepts-and-designs/mal/index.html
@@ -2414,7 +2414,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
@@ -2623,7 +2623,7 @@
 <p>If users want to get the latest time from <code>last_server_state_sync_time_in_seconds</code>:</p>
 <pre><code>last_server_state_sync_time_in_seconds.tagEqual('production', 'catalog').downsampling(LATEST)
 </code></pre><h2 id="metric-level-function">Metric level function</h2>
-<p>They extract level relevant labels from metric labels, then informs the meter-system the level and <a href="https://github.com/apache/skywalking/tree/a63601318d802ef7f2f520758725824d317385cf/oap-server/server-core/src/main/java/org/apache/skywalking/oap/server/core/analysis/Layer.java">layer</a> to which this metric belongs.</p>
+<p>They extract level relevant labels from metric labels, then informs the meter-system the level and <a href="https://github.com/apache/skywalking/tree/b3044ee71464c117ffcbf80a5b669da0ad64c260/oap-server/server-core/src/main/java/org/apache/skywalking/oap/server/core/analysis/Layer.java">layer</a> to which this metric belongs.</p>
 <ul>
 <li><code>service([svc_label1, svc_label2...], Layer)</code> extracts service level labels from the array argument, extracts layer from <code>Layer</code> argument.</li>
 <li><code>instance([svc_label1, svc_label2...], [ins_label1, ins_label2...], Layer, Closure&lt;Map&lt;String, String&gt;&gt; propertiesExtractor)</code> extracts service level labels from the first array argument,
@@ -2641,7 +2641,7 @@
 <p>The OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP fails to start up. The files
 are located at <code>$CLASSPATH/otel-rules</code>, <code>$CLASSPATH/meter-analyzer-config</code>, <code>$CLASSPATH/envoy-metrics-rules</code> and <code>$CLASSPATH/zabbix-rules</code>.</p>
 <p>The file is written in YAML format, defined by the scheme described below. Brackets indicate that a parameter is optional.</p>
-<p>A full example can be found <a href="https://github.com/apache/skywalking/tree/a63601318d802ef7f2f520758725824d317385cf/oap-server/server-starter/src/main/resources/otel-rules/oap.yaml">here</a></p>
+<p>A full example can be found <a href="https://github.com/apache/skywalking/tree/b3044ee71464c117ffcbf80a5b669da0ad64c260/oap-server/server-starter/src/main/resources/otel-rules/oap.yaml">here</a></p>
 <p>Generic placeholders are defined as follows:</p>
 <ul>
 <li><code>&lt;string&gt;</code>: A regular string.</li>
@@ -2666,7 +2666,7 @@
 </span><span style="color:#bbb"></span><span style="color:#998;font-style:italic"># MAL expression.</span><span style="color:#bbb">
 </span><span style="color:#bbb"></span><span style="color:#000080">exp</span>:<span style="color:#bbb"> </span>&lt;string&gt;<span style="color:#bbb">
 </span></code></pre></div><h2 id="more-examples">More Examples</h2>
-<p>Please refer to <a href="https://github.com/apache/skywalking/tree/a63601318d802ef7f2f520758725824d317385cf/oap-server/server-starter/src/main/resources/otel-rules/oap.yaml">OAP Self-Observability</a>.</p>
+<p>Please refer to <a href="https://github.com/apache/skywalking/tree/b3044ee71464c117ffcbf80a5b669da0ad64c260/oap-server/server-starter/src/main/resources/otel-rules/oap.yaml">OAP Self-Observability</a>.</p>
 
 </div>
 <div>
diff --git a/docs/main/next/en/concepts-and-designs/manual-sdk/index.html b/docs/main/next/en/concepts-and-designs/manual-sdk/index.html
index 7da6b71..bff783b 100644
--- a/docs/main/next/en/concepts-and-designs/manual-sdk/index.html
+++ b/docs/main/next/en/concepts-and-designs/manual-sdk/index.html
@@ -2414,7 +2414,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/concepts-and-designs/meter/index.html b/docs/main/next/en/concepts-and-designs/meter/index.html
index 4fdcd25..25d9cdb 100644
--- a/docs/main/next/en/concepts-and-designs/meter/index.html
+++ b/docs/main/next/en/concepts-and-designs/meter/index.html
@@ -2411,7 +2411,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/concepts-and-designs/oal/index.html b/docs/main/next/en/concepts-and-designs/oal/index.html
index ed0a19a..bb11704 100644
--- a/docs/main/next/en/concepts-and-designs/oal/index.html
+++ b/docs/main/next/en/concepts-and-designs/oal/index.html
@@ -2411,7 +2411,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/concepts-and-designs/overview/index.html b/docs/main/next/en/concepts-and-designs/overview/index.html
index 9ac26a8..6f5903b 100644
--- a/docs/main/next/en/concepts-and-designs/overview/index.html
+++ b/docs/main/next/en/concepts-and-designs/overview/index.html
@@ -2414,7 +2414,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/concepts-and-designs/probe-introduction/index.html b/docs/main/next/en/concepts-and-designs/probe-introduction/index.html
index de672a1..ab2eed5 100644
--- a/docs/main/next/en/concepts-and-designs/probe-introduction/index.html
+++ b/docs/main/next/en/concepts-and-designs/probe-introduction/index.html
@@ -2411,7 +2411,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/concepts-and-designs/profiling/index.html b/docs/main/next/en/concepts-and-designs/profiling/index.html
index c65a905..ad8d009 100644
--- a/docs/main/next/en/concepts-and-designs/profiling/index.html
+++ b/docs/main/next/en/concepts-and-designs/profiling/index.html
@@ -2411,7 +2411,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/concepts-and-designs/project-goals/index.html b/docs/main/next/en/concepts-and-designs/project-goals/index.html
index 4a148e6..99fe1ce 100644
--- a/docs/main/next/en/concepts-and-designs/project-goals/index.html
+++ b/docs/main/next/en/concepts-and-designs/project-goals/index.html
@@ -2414,7 +2414,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/concepts-and-designs/scope-definitions/index.html b/docs/main/next/en/concepts-and-designs/scope-definitions/index.html
index dbee0c4..54e1ac2 100644
--- a/docs/main/next/en/concepts-and-designs/scope-definitions/index.html
+++ b/docs/main/next/en/concepts-and-designs/scope-definitions/index.html
@@ -2414,7 +2414,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/concepts-and-designs/sdk-profiling/index.html b/docs/main/next/en/concepts-and-designs/sdk-profiling/index.html
index 9be8a90..3550229 100644
--- a/docs/main/next/en/concepts-and-designs/sdk-profiling/index.html
+++ b/docs/main/next/en/concepts-and-designs/sdk-profiling/index.html
@@ -2411,7 +2411,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/concepts-and-designs/service-agent/index.html b/docs/main/next/en/concepts-and-designs/service-agent/index.html
index 75e8852..7aee33a 100644
--- a/docs/main/next/en/concepts-and-designs/service-agent/index.html
+++ b/docs/main/next/en/concepts-and-designs/service-agent/index.html
@@ -2411,7 +2411,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/concepts-and-designs/service-hierarchy-configuration/index.html b/docs/main/next/en/concepts-and-designs/service-hierarchy-configuration/index.html
index 39fedb0..bf62df2 100644
--- a/docs/main/next/en/concepts-and-designs/service-hierarchy-configuration/index.html
+++ b/docs/main/next/en/concepts-and-designs/service-hierarchy-configuration/index.html
@@ -2414,7 +2414,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/concepts-and-designs/service-hierarchy/index.html b/docs/main/next/en/concepts-and-designs/service-hierarchy/index.html
index ea37062..55c59e2 100644
--- a/docs/main/next/en/concepts-and-designs/service-hierarchy/index.html
+++ b/docs/main/next/en/concepts-and-designs/service-hierarchy/index.html
@@ -2414,7 +2414,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/debugging/config_dump/index.html b/docs/main/next/en/debugging/config_dump/index.html
index 5f63551..5f08b59 100644
--- a/docs/main/next/en/debugging/config_dump/index.html
+++ b/docs/main/next/en/debugging/config_dump/index.html
@@ -2414,7 +2414,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
@@ -2463,7 +2463,7 @@
 	<h1 id="scratch-the-oap-config-dump">Scratch The OAP Config Dump</h1>
 <p>SkyWalking OAP behaviors could be controlled through hundreds of configurations. It is hard to know what is the final
 configuration as all the configurations could be overrided by system environments.</p>
-<p>The core config file <a href="https://github.com/apache/skywalking/tree/a63601318d802ef7f2f520758725824d317385cf/oap-server/server-starter/src/main/resources/application.yml">application.yml</a> lists all
+<p>The core config file <a href="https://github.com/apache/skywalking/tree/b3044ee71464c117ffcbf80a5b669da0ad64c260/oap-server/server-starter/src/main/resources/application.yml">application.yml</a> lists all
 the configurations
 and their default values. However, it is still hard to know the runtime value.</p>
 <p>Scratch is a tool to dump the final configuration. It is provided within OAP rest server, which could be accessed
diff --git a/docs/main/next/en/faq/compatible-with-other-javaagent-bytecode-processing/index.html b/docs/main/next/en/faq/compatible-with-other-javaagent-bytecode-processing/index.html
index e65594c..c476a56 100644
--- a/docs/main/next/en/faq/compatible-with-other-javaagent-bytecode-processing/index.html
+++ b/docs/main/next/en/faq/compatible-with-other-javaagent-bytecode-processing/index.html
@@ -2414,7 +2414,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/faq/enhancerequireobjectcache-cast-exception/index.html b/docs/main/next/en/faq/enhancerequireobjectcache-cast-exception/index.html
index 9210ae1..108f1d1 100644
--- a/docs/main/next/en/faq/enhancerequireobjectcache-cast-exception/index.html
+++ b/docs/main/next/en/faq/enhancerequireobjectcache-cast-exception/index.html
@@ -2411,7 +2411,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/faq/es-server-faq/index.html b/docs/main/next/en/faq/es-server-faq/index.html
index 500e0ac..50ba1e7 100644
--- a/docs/main/next/en/faq/es-server-faq/index.html
+++ b/docs/main/next/en/faq/es-server-faq/index.html
@@ -2414,7 +2414,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/faq/es-version-conflict/index.html b/docs/main/next/en/faq/es-version-conflict/index.html
index 487ab67..ba0fe90 100644
--- a/docs/main/next/en/faq/es-version-conflict/index.html
+++ b/docs/main/next/en/faq/es-version-conflict/index.html
@@ -2411,7 +2411,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/faq/hour-day-metrics-stopping/index.html b/docs/main/next/en/faq/hour-day-metrics-stopping/index.html
index b9cc80a..5b24067 100644
--- a/docs/main/next/en/faq/hour-day-metrics-stopping/index.html
+++ b/docs/main/next/en/faq/hour-day-metrics-stopping/index.html
@@ -2411,7 +2411,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/faq/how-to-build-with-mac-m1/index.html b/docs/main/next/en/faq/how-to-build-with-mac-m1/index.html
index 436d9d1..d7e2c0b 100644
--- a/docs/main/next/en/faq/how-to-build-with-mac-m1/index.html
+++ b/docs/main/next/en/faq/how-to-build-with-mac-m1/index.html
@@ -2408,7 +2408,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/faq/import-project-eclipse-requireitems-exception/index.html b/docs/main/next/en/faq/import-project-eclipse-requireitems-exception/index.html
index 5ba7ff56..05b886d 100644
--- a/docs/main/next/en/faq/import-project-eclipse-requireitems-exception/index.html
+++ b/docs/main/next/en/faq/import-project-eclipse-requireitems-exception/index.html
@@ -2414,7 +2414,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/faq/install_agent_on_websphere/index.html b/docs/main/next/en/faq/install_agent_on_websphere/index.html
index a82a80a..e882d5e 100644
--- a/docs/main/next/en/faq/install_agent_on_websphere/index.html
+++ b/docs/main/next/en/faq/install_agent_on_websphere/index.html
@@ -2411,7 +2411,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/faq/kafka-plugin/index.html b/docs/main/next/en/faq/kafka-plugin/index.html
index f929589..a74a216 100644
--- a/docs/main/next/en/faq/kafka-plugin/index.html
+++ b/docs/main/next/en/faq/kafka-plugin/index.html
@@ -2411,7 +2411,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/faq/maven-compile-npm-failure/index.html b/docs/main/next/en/faq/maven-compile-npm-failure/index.html
index 86b8974..9f5e703 100644
--- a/docs/main/next/en/faq/maven-compile-npm-failure/index.html
+++ b/docs/main/next/en/faq/maven-compile-npm-failure/index.html
@@ -2414,7 +2414,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/faq/memory-leak-enhance-worker-thread/index.html b/docs/main/next/en/faq/memory-leak-enhance-worker-thread/index.html
index 1dbfadf..a7890ff 100644
--- a/docs/main/next/en/faq/memory-leak-enhance-worker-thread/index.html
+++ b/docs/main/next/en/faq/memory-leak-enhance-worker-thread/index.html
@@ -2411,7 +2411,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/faq/new-elasticsearch-storage-option-explanation-in-9.2.0/index.html b/docs/main/next/en/faq/new-elasticsearch-storage-option-explanation-in-9.2.0/index.html
index 596d90f..2b4f2aa 100644
--- a/docs/main/next/en/faq/new-elasticsearch-storage-option-explanation-in-9.2.0/index.html
+++ b/docs/main/next/en/faq/new-elasticsearch-storage-option-explanation-in-9.2.0/index.html
@@ -2411,7 +2411,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/faq/protoc-plugin-fails-when-build/index.html b/docs/main/next/en/faq/protoc-plugin-fails-when-build/index.html
index b82855f..de84507 100644
--- a/docs/main/next/en/faq/protoc-plugin-fails-when-build/index.html
+++ b/docs/main/next/en/faq/protoc-plugin-fails-when-build/index.html
@@ -2408,7 +2408,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/faq/readme/index.html b/docs/main/next/en/faq/readme/index.html
index c1e9b1a..41584fa 100644
--- a/docs/main/next/en/faq/readme/index.html
+++ b/docs/main/next/en/faq/readme/index.html
@@ -2411,7 +2411,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/faq/thrift-plugin/index.html b/docs/main/next/en/faq/thrift-plugin/index.html
index db12b29..5c54d57 100644
--- a/docs/main/next/en/faq/thrift-plugin/index.html
+++ b/docs/main/next/en/faq/thrift-plugin/index.html
@@ -2414,7 +2414,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/faq/time-and-timezone/index.html b/docs/main/next/en/faq/time-and-timezone/index.html
index 96be85e..b5afd76 100644
--- a/docs/main/next/en/faq/time-and-timezone/index.html
+++ b/docs/main/next/en/faq/time-and-timezone/index.html
@@ -2411,7 +2411,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/faq/unexpected-endpoint-register/index.html b/docs/main/next/en/faq/unexpected-endpoint-register/index.html
index 492a39f..8476a3c 100644
--- a/docs/main/next/en/faq/unexpected-endpoint-register/index.html
+++ b/docs/main/next/en/faq/unexpected-endpoint-register/index.html
@@ -2411,7 +2411,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/faq/v3-version-upgrade/index.html b/docs/main/next/en/faq/v3-version-upgrade/index.html
index 12eb4e8..97558c8 100644
--- a/docs/main/next/en/faq/v3-version-upgrade/index.html
+++ b/docs/main/next/en/faq/v3-version-upgrade/index.html
@@ -2414,7 +2414,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/faq/v6-version-upgrade/index.html b/docs/main/next/en/faq/v6-version-upgrade/index.html
index cb77f00..6258ca4 100644
--- a/docs/main/next/en/faq/v6-version-upgrade/index.html
+++ b/docs/main/next/en/faq/v6-version-upgrade/index.html
@@ -2417,7 +2417,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/faq/v8-version-upgrade/index.html b/docs/main/next/en/faq/v8-version-upgrade/index.html
index 5171088..66fe54e 100644
--- a/docs/main/next/en/faq/v8-version-upgrade/index.html
+++ b/docs/main/next/en/faq/v8-version-upgrade/index.html
@@ -2414,7 +2414,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/faq/v9-version-upgrade/index.html b/docs/main/next/en/faq/v9-version-upgrade/index.html
index 99b9ea7..84e45be 100644
--- a/docs/main/next/en/faq/v9-version-upgrade/index.html
+++ b/docs/main/next/en/faq/v9-version-upgrade/index.html
@@ -2411,7 +2411,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
@@ -2458,7 +2458,7 @@
               
 <div class="td-content">
 	<h1 id="v9-upgrade">V9 upgrade</h1>
-<p>Starting from v9, SkyWalking introduces the new core concept <a href="https://github.com/apache/skywalking/tree/a63601318d802ef7f2f520758725824d317385cf/oap-server/server-core/src/main/java/org/apache/skywalking/oap/server/core/analysis/Layer.java"><strong>Layer</strong></a>.
+<p>Starting from v9, SkyWalking introduces the new core concept <a href="https://github.com/apache/skywalking/tree/b3044ee71464c117ffcbf80a5b669da0ad64c260/oap-server/server-core/src/main/java/org/apache/skywalking/oap/server/core/analysis/Layer.java"><strong>Layer</strong></a>.
 A layer represents an abstract framework in computer science, such as Operating System(OS_LINUX layer),
 Kubernetes(k8s layer). This kind of layer would be catalogs on the new <a href="https://github.com/apache/skywalking-booster-ui">booster UI</a> of various services/instances detected by different technologies.
 The query-protocol <a href="https://github.com/apache/skywalking-query-protocol/blob/master/metadata-v2.graphqls">metadata-v2</a> has been used.
@@ -2472,8 +2472,8 @@
 <h2 id="incompatibility">Incompatibility</h2>
 <ol>
 <li>The <a href="https://github.com/apache/skywalking-query-protocol/blob/master/ui-configuration.graphqls">UI configuration protocol</a> has been changed by following the design of new <a href="https://github.com/apache/skywalking-booster-ui">booster UI</a>. So, the RocketBot UI can&rsquo;t work with the v9 backend. You need to remove <code>ui_template</code> index/template/table in your chosen storage, and reboot OAP in <code>default</code> or <code>init</code> mode.</li>
-<li>MAL: <a href="https://github.com/apache/skywalking/tree/a63601318d802ef7f2f520758725824d317385cf/docs/en/concepts-and-designs/mal.md">metric level function</a> add an required argument <code>Layer</code>. Previous MAL expressions should add this argument.</li>
-<li>LAL: <a href="https://github.com/apache/skywalking/tree/a63601318d802ef7f2f520758725824d317385cf/docs/en/concepts-and-designs/lal.md">Extractor</a> add function <code>layer</code>. If don&rsquo;t set it manual, the default layer is <code>GENERAL</code> and the logs from <code>ALS</code> the
+<li>MAL: <a href="https://github.com/apache/skywalking/tree/b3044ee71464c117ffcbf80a5b669da0ad64c260/docs/en/concepts-and-designs/mal.md">metric level function</a> add an required argument <code>Layer</code>. Previous MAL expressions should add this argument.</li>
+<li>LAL: <a href="https://github.com/apache/skywalking/tree/b3044ee71464c117ffcbf80a5b669da0ad64c260/docs/en/concepts-and-designs/lal.md">Extractor</a> add function <code>layer</code>. If don&rsquo;t set it manual, the default layer is <code>GENERAL</code> and the logs from <code>ALS</code> the
 default layer is <code>mesh</code>.</li>
 <li>Storage:Add <code>service_id</code>, <code>short_name</code> and <code>layer</code> columns to table <code>ServiceTraffic</code>.
 These data would be incompatible with previous versions.
diff --git a/docs/main/next/en/faq/vnode/index.html b/docs/main/next/en/faq/vnode/index.html
index 8b60ade..7a6b995 100644
--- a/docs/main/next/en/faq/vnode/index.html
+++ b/docs/main/next/en/faq/vnode/index.html
@@ -2414,7 +2414,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/faq/why-clickhouse-not-supported/index.html b/docs/main/next/en/faq/why-clickhouse-not-supported/index.html
index 5a1fbbc..1c79910 100644
--- a/docs/main/next/en/faq/why-clickhouse-not-supported/index.html
+++ b/docs/main/next/en/faq/why-clickhouse-not-supported/index.html
@@ -2411,7 +2411,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/faq/why-have-traces-no-others/index.html b/docs/main/next/en/faq/why-have-traces-no-others/index.html
index 9ba2409..af590c9 100644
--- a/docs/main/next/en/faq/why-have-traces-no-others/index.html
+++ b/docs/main/next/en/faq/why-have-traces-no-others/index.html
@@ -2411,7 +2411,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/faq/why_mq_not_involved/index.html b/docs/main/next/en/faq/why_mq_not_involved/index.html
index 83e2464..251c6ed 100644
--- a/docs/main/next/en/faq/why_mq_not_involved/index.html
+++ b/docs/main/next/en/faq/why_mq_not_involved/index.html
@@ -2414,7 +2414,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/guides/asf/committer/index.html b/docs/main/next/en/guides/asf/committer/index.html
index 107e717..da45b17 100644
--- a/docs/main/next/en/guides/asf/committer/index.html
+++ b/docs/main/next/en/guides/asf/committer/index.html
@@ -2414,7 +2414,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/guides/backend-oal-scripts/index.html b/docs/main/next/en/guides/backend-oal-scripts/index.html
index bf5146a..ca28045 100644
--- a/docs/main/next/en/guides/backend-oal-scripts/index.html
+++ b/docs/main/next/en/guides/backend-oal-scripts/index.html
@@ -2414,7 +2414,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/guides/backend-profile-export/index.html b/docs/main/next/en/guides/backend-profile-export/index.html
index 808161d..5851389 100644
--- a/docs/main/next/en/guides/backend-profile-export/index.html
+++ b/docs/main/next/en/guides/backend-profile-export/index.html
@@ -2411,7 +2411,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/guides/benchmark/index.html b/docs/main/next/en/guides/benchmark/index.html
index dba3536..966b5b7 100644
--- a/docs/main/next/en/guides/benchmark/index.html
+++ b/docs/main/next/en/guides/benchmark/index.html
@@ -2414,7 +2414,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/guides/community/index.html b/docs/main/next/en/guides/community/index.html
index acfeea7..9544410 100644
--- a/docs/main/next/en/guides/community/index.html
+++ b/docs/main/next/en/guides/community/index.html
@@ -2411,7 +2411,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/guides/component-library-settings/index.html b/docs/main/next/en/guides/component-library-settings/index.html
index b13869c..ea86ca7 100644
--- a/docs/main/next/en/guides/component-library-settings/index.html
+++ b/docs/main/next/en/guides/component-library-settings/index.html
@@ -2414,7 +2414,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/guides/dependencies/index.html b/docs/main/next/en/guides/dependencies/index.html
index e69e8a2..9c58e3c 100644
--- a/docs/main/next/en/guides/dependencies/index.html
+++ b/docs/main/next/en/guides/dependencies/index.html
@@ -2411,7 +2411,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/guides/e2e/index.html b/docs/main/next/en/guides/e2e/index.html
index 7804efa..0ae3c43 100644
--- a/docs/main/next/en/guides/e2e/index.html
+++ b/docs/main/next/en/guides/e2e/index.html
@@ -2411,7 +2411,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/guides/how-to-build/index.html b/docs/main/next/en/guides/how-to-build/index.html
index e7d2016..9a90895 100644
--- a/docs/main/next/en/guides/how-to-build/index.html
+++ b/docs/main/next/en/guides/how-to-build/index.html
@@ -2417,7 +2417,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
@@ -2539,7 +2539,7 @@
 </blockquote>
 <h3 id="building-docker-images">Building docker images</h3>
 <p>You can build docker images of <code>backend</code> and <code>ui</code> with <code>Makefile</code> located in root folder.</p>
-<p>Refer to <a href="https://github.com/apache/skywalking/tree/a63601318d802ef7f2f520758725824d317385cf/docker">Build docker image</a> for more details.</p>
+<p>Refer to <a href="https://github.com/apache/skywalking/tree/b3044ee71464c117ffcbf80a5b669da0ad64c260/docker">Build docker image</a> for more details.</p>
 <h2 id="setting-up-your-intellij-idea">Setting up your IntelliJ IDEA</h2>
 <p><strong>NOTE</strong>: If you clone the codes from GitHub, please make sure that you have finished steps 1 to 3 in section <strong><a href="#building-from-github">Build from GitHub</a></strong>. If you download the source codes from the official website of SkyWalking, please make sure that you have followed the steps in section <strong><a href="#building-from-apache-source-code-release">Build from Apache source code release</a></strong>.</p>
 <ol>
diff --git a/docs/main/next/en/guides/how-to-bump-up-zipkin/index.html b/docs/main/next/en/guides/how-to-bump-up-zipkin/index.html
index fded6e3..25ece68 100644
--- a/docs/main/next/en/guides/how-to-bump-up-zipkin/index.html
+++ b/docs/main/next/en/guides/how-to-bump-up-zipkin/index.html
@@ -2411,7 +2411,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/guides/how-to-release/index.html b/docs/main/next/en/guides/how-to-release/index.html
index 2254450..b5ec166 100644
--- a/docs/main/next/en/guides/how-to-release/index.html
+++ b/docs/main/next/en/guides/how-to-release/index.html
@@ -2417,7 +2417,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
@@ -2673,7 +2673,7 @@
 
 - Apache SkyWalking Team
 </code></pre><h2 id="publish-the-docker-images">Publish the Docker images</h2>
-<p>We have a <a href="https://github.com/apache/skywalking/tree/a63601318d802ef7f2f520758725824d317385cf/.github/workflows/publish-docker.yaml">GitHub workflow</a> to automatically publish the Docker images to
+<p>We have a <a href="https://github.com/apache/skywalking/tree/b3044ee71464c117ffcbf80a5b669da0ad64c260/.github/workflows/publish-docker.yaml">GitHub workflow</a> to automatically publish the Docker images to
 Docker Hub after you set the version from <code>pre-release</code> to <code>release</code>, all you need to do is to watch that workflow and see
 whether it succeeds, if it fails, you can use the following steps to publish the Docker images in your local machine.</p>
 <div class="highlight"><pre style="background-color:#fff;-moz-tab-size:4;-o-tab-size:4;tab-size:4"><code class="language-shell" data-lang="shell"><span style="color:#0086b3">export</span> <span style="color:#008080">SW_VERSION</span><span style="color:#000;font-weight:bold">=</span>x.y.z
diff --git a/docs/main/next/en/guides/i18n/index.html b/docs/main/next/en/guides/i18n/index.html
index dc3e93e..3891fc0 100644
--- a/docs/main/next/en/guides/i18n/index.html
+++ b/docs/main/next/en/guides/i18n/index.html
@@ -2414,7 +2414,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
@@ -2465,7 +2465,7 @@
 <p>This section explains how to manage translations for internationalization of menu items.</p>
 </blockquote>
 <p>SkyWalking UI&rsquo;s internationalization translations are in the <a href="https://github.com/apache/skywalking-booster-ui/tree/main/src/locales/lang">src/locales/lang</a>.
-The translations include <code>menu name</code> and <code>description</code>. The translation key of <code>menu name</code> is the value of <code>i18nKey</code> from <a href="https://github.com/apache/skywalking/tree/a63601318d802ef7f2f520758725824d317385cf/oap-server/server-starter/src/main/resources/ui-initialized-templates/menu.yaml">menu definition file</a>. The translation key of <code>description</code> consists of the <code>i18nKey</code> value and <code>_desc</code> suffix. The <code>description</code> contents will be displayed on the Marketplace page.</p>
+The translations include <code>menu name</code> and <code>description</code>. The translation key of <code>menu name</code> is the value of <code>i18nKey</code> from <a href="https://github.com/apache/skywalking/tree/b3044ee71464c117ffcbf80a5b669da0ad64c260/oap-server/server-starter/src/main/resources/ui-initialized-templates/menu.yaml">menu definition file</a>. The translation key of <code>description</code> consists of the <code>i18nKey</code> value and <code>_desc</code> suffix. The <code>description</code> contents will be displayed on the Marketplace page.</p>
 <p>The following is a typical <code>menu name</code> and <code>description</code> for i18nKey=<code>general_service</code></p>
 <div class="highlight"><pre style="background-color:#fff;-moz-tab-size:4;-o-tab-size:4;tab-size:4"><code class="language-json" data-lang="json">{
   <span style="color:#000080">&#34;general_service&#34;</span>: <span style="color:#d14">&#34;General Service&#34;</span>,
diff --git a/docs/main/next/en/guides/it-guide/index.html b/docs/main/next/en/guides/it-guide/index.html
index f081233..fbf6012 100644
--- a/docs/main/next/en/guides/it-guide/index.html
+++ b/docs/main/next/en/guides/it-guide/index.html
@@ -2411,7 +2411,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/guides/source-extension/index.html b/docs/main/next/en/guides/source-extension/index.html
index 93e22c3..7c06638 100644
--- a/docs/main/next/en/guides/source-extension/index.html
+++ b/docs/main/next/en/guides/source-extension/index.html
@@ -2414,7 +2414,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/papers/stam/index.html b/docs/main/next/en/papers/stam/index.html
index 66596e0..12e3b9a 100644
--- a/docs/main/next/en/papers/stam/index.html
+++ b/docs/main/next/en/papers/stam/index.html
@@ -2411,7 +2411,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/security/readme/index.html b/docs/main/next/en/security/readme/index.html
index 107e645..b6ed09c 100644
--- a/docs/main/next/en/security/readme/index.html
+++ b/docs/main/next/en/security/readme/index.html
@@ -2414,7 +2414,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/setup/ai-pipeline/http-restful-uri-pattern/index.html b/docs/main/next/en/setup/ai-pipeline/http-restful-uri-pattern/index.html
index 8113212..080c680 100644
--- a/docs/main/next/en/setup/ai-pipeline/http-restful-uri-pattern/index.html
+++ b/docs/main/next/en/setup/ai-pipeline/http-restful-uri-pattern/index.html
@@ -2411,7 +2411,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
@@ -2468,7 +2468,7 @@
 processing the further telemetry traffic.</p>
 <h2 id="set-up-oap-to-connect-remote-uri-recognition-server">Set up OAP to connect remote URI recognition server</h2>
 <p><code>uriRecognitionServerAddr</code> and <code>uriRecognitionServerPort</code> are the configurations to set up the remote URI recognition server.</p>
-<p>The URI recognition server is a gRPC server, which is defined in <a href="https://github.com/apache/skywalking/tree/a63601318d802ef7f2f520758725824d317385cf/oap-server/ai-pipeline/src/main/proto/ai_http_uri_recognition.proto">URIRecognition.proto</a>.</p>
+<p>The URI recognition server is a gRPC server, which is defined in <a href="https://github.com/apache/skywalking/tree/b3044ee71464c117ffcbf80a5b669da0ad64c260/oap-server/ai-pipeline/src/main/proto/ai_http_uri_recognition.proto">URIRecognition.proto</a>.</p>
 <div class="highlight"><pre style="background-color:#fff;-moz-tab-size:4;-o-tab-size:4;tab-size:4"><code class="language-protobuf" data-lang="protobuf"><span style="color:#000;font-weight:bold">service</span> HttpUriRecognitionService {<span style="color:#a61717;background-color:#e3d2d2">
 </span><span style="color:#a61717;background-color:#e3d2d2"></span>    <span style="color:#998;font-style:italic">// Sync for the pattern recognition dictionary.
 </span><span style="color:#998;font-style:italic"></span>    <span style="color:#000;font-weight:bold">rpc</span> fetchAllPatterns(HttpUriRecognitionSyncRequest) <span style="color:#000;font-weight:bold">returns</span> (HttpUriRecognitionResponse) {}<span style="color:#a61717;background-color:#e3d2d2">
diff --git a/docs/main/next/en/setup/ai-pipeline/introduction/index.html b/docs/main/next/en/setup/ai-pipeline/introduction/index.html
index 9b68afc..ead9d86 100644
--- a/docs/main/next/en/setup/ai-pipeline/introduction/index.html
+++ b/docs/main/next/en/setup/ai-pipeline/introduction/index.html
@@ -2414,7 +2414,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/setup/backend/advanced-deployment/index.html b/docs/main/next/en/setup/backend/advanced-deployment/index.html
index a2567d7..b3600fc 100644
--- a/docs/main/next/en/setup/backend/advanced-deployment/index.html
+++ b/docs/main/next/en/setup/backend/advanced-deployment/index.html
@@ -2420,7 +2420,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/setup/backend/apdex-threshold/index.html b/docs/main/next/en/setup/backend/apdex-threshold/index.html
index 7aee93f..e6c04b4 100644
--- a/docs/main/next/en/setup/backend/apdex-threshold/index.html
+++ b/docs/main/next/en/setup/backend/apdex-threshold/index.html
@@ -2414,7 +2414,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/setup/backend/aws-firehose-receiver/index.html b/docs/main/next/en/setup/backend/aws-firehose-receiver/index.html
index b056cf2..2371c1e 100644
--- a/docs/main/next/en/setup/backend/aws-firehose-receiver/index.html
+++ b/docs/main/next/en/setup/backend/aws-firehose-receiver/index.html
@@ -2411,7 +2411,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/setup/backend/backend-activemq-monitoring/index.html b/docs/main/next/en/setup/backend/backend-activemq-monitoring/index.html
index 2e79e89..8fd5c18 100644
--- a/docs/main/next/en/setup/backend/backend-activemq-monitoring/index.html
+++ b/docs/main/next/en/setup/backend/backend-activemq-monitoring/index.html
@@ -2411,7 +2411,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
@@ -2473,11 +2473,11 @@
 <h2 id="setup">Setup</h2>
 <ol>
 <li><a href="https://activemq.apache.org/components/classic/documentation/jmx">Enable JMX</a> in <code>activemq.xml</code>, the JMX remote port defaults to <code>1616</code>, you can change it through <code>ACTIVEMQ_SUNJMX_START</code>. The example for ActiveMQ configuration, refer
-to <a href="https://github.com/apache/skywalking/tree/a63601318d802ef7f2f520758725824d317385cf/test/e2e-v2/cases/activemq/config/amq/activemq.xml">here</a>.</li>
+to <a href="https://github.com/apache/skywalking/tree/b3044ee71464c117ffcbf80a5b669da0ad64c260/test/e2e-v2/cases/activemq/config/amq/activemq.xml">here</a>.</li>
 <li>Set up <a href="https://github.com/prometheus/jmx_exporter">jmx prometheus exporter</a> which runs as a Java Agent(recommended) of ActiveMQ classic. If you work with docker, you also can set up <a href="https://github.com/bitnami/containers/tree/main/bitnami/jmx-exporter">a single server</a> for exporter, refer
-to <a href="https://github.com/apache/skywalking/tree/a63601318d802ef7f2f520758725824d317385cf/test/e2e-v2/cases/activemq/config/amq/config.yaml">here</a>(note the configuration of <code>includeObjectNames</code>).</li>
+to <a href="https://github.com/apache/skywalking/tree/b3044ee71464c117ffcbf80a5b669da0ad64c260/test/e2e-v2/cases/activemq/config/amq/config.yaml">here</a>(note the configuration of <code>includeObjectNames</code>).</li>
 <li>Set up <a href="https://opentelemetry.io/docs/collector/getting-started/#docker">OpenTelemetry Collector</a>. The example for OpenTelemetry Collector configuration, refer
-to <a href="https://github.com/apache/skywalking/tree/a63601318d802ef7f2f520758725824d317385cf/test/e2e-v2/cases/activemq/otel-collector-config.yaml">here</a>.</li>
+to <a href="https://github.com/apache/skywalking/tree/b3044ee71464c117ffcbf80a5b669da0ad64c260/test/e2e-v2/cases/activemq/otel-collector-config.yaml">here</a>.</li>
 <li>Config SkyWalking <a href="../opentelemetry-receiver">OpenTelemetry receiver</a>.</li>
 </ol>
 <h2 id="activemq-classic-monitoring-1">ActiveMQ classic Monitoring</h2>
diff --git a/docs/main/next/en/setup/backend/backend-alarm/index.html b/docs/main/next/en/setup/backend/backend-alarm/index.html
index a726c5e..ac0f193 100644
--- a/docs/main/next/en/setup/backend/backend-alarm/index.html
+++ b/docs/main/next/en/setup/backend/backend-alarm/index.html
@@ -2414,7 +2414,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/setup/backend/backend-apisix-monitoring/index.html b/docs/main/next/en/setup/backend/backend-apisix-monitoring/index.html
index 8a6be15..2dbbfb1 100644
--- a/docs/main/next/en/setup/backend/backend-apisix-monitoring/index.html
+++ b/docs/main/next/en/setup/backend/backend-apisix-monitoring/index.html
@@ -2411,7 +2411,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
@@ -2470,7 +2470,7 @@
 <h3 id="set-up">Set up</h3>
 <ol>
 <li>Enable APISIX <a href="https://apisix.apache.org/docs/apisix/plugins/prometheus/">APISIX Prometheus plugin</a> .</li>
-<li>Set up <a href="https://opentelemetry.io/docs/collector/getting-started/#docker">OpenTelemetry Collector </a>. For details on Prometheus Receiver in OpenTelemetry Collector, refer to <a href="https://github.com/apache/skywalking/tree/a63601318d802ef7f2f520758725824d317385cf/test/e2e-v2/cases/apisix/otel-collector/otel-collector-config.yaml">here</a>.</li>
+<li>Set up <a href="https://opentelemetry.io/docs/collector/getting-started/#docker">OpenTelemetry Collector </a>. For details on Prometheus Receiver in OpenTelemetry Collector, refer to <a href="https://github.com/apache/skywalking/tree/b3044ee71464c117ffcbf80a5b669da0ad64c260/test/e2e-v2/cases/apisix/otel-collector/otel-collector-config.yaml">here</a>.</li>
 <li>Config SkyWalking <a href="../opentelemetry-receiver">OpenTelemetry receiver</a>.</li>
 </ol>
 <h3 id="apisix-monitoring-1">APISIX Monitoring</h3>
diff --git a/docs/main/next/en/setup/backend/backend-aws-api-gateway-monitoring/index.html b/docs/main/next/en/setup/backend/backend-aws-api-gateway-monitoring/index.html
index f21543a..a769387 100644
--- a/docs/main/next/en/setup/backend/backend-aws-api-gateway-monitoring/index.html
+++ b/docs/main/next/en/setup/backend/backend-aws-api-gateway-monitoring/index.html
@@ -2411,7 +2411,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/setup/backend/backend-aws-dynamodb-monitoring/index.html b/docs/main/next/en/setup/backend/backend-aws-dynamodb-monitoring/index.html
index b8f90eb..caa119c 100644
--- a/docs/main/next/en/setup/backend/backend-aws-dynamodb-monitoring/index.html
+++ b/docs/main/next/en/setup/backend/backend-aws-dynamodb-monitoring/index.html
@@ -2411,7 +2411,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/setup/backend/backend-aws-eks-monitoring/index.html b/docs/main/next/en/setup/backend/backend-aws-eks-monitoring/index.html
index 3f68c13..31ea19a 100644
--- a/docs/main/next/en/setup/backend/backend-aws-eks-monitoring/index.html
+++ b/docs/main/next/en/setup/backend/backend-aws-eks-monitoring/index.html
@@ -2411,7 +2411,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/setup/backend/backend-aws-s3-monitoring/index.html b/docs/main/next/en/setup/backend/backend-aws-s3-monitoring/index.html
index 308ac6f..fb9d818 100644
--- a/docs/main/next/en/setup/backend/backend-aws-s3-monitoring/index.html
+++ b/docs/main/next/en/setup/backend/backend-aws-s3-monitoring/index.html
@@ -2411,7 +2411,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/setup/backend/backend-bookkeeper-monitoring/index.html b/docs/main/next/en/setup/backend/backend-bookkeeper-monitoring/index.html
index d8c1999..8e330ba 100644
--- a/docs/main/next/en/setup/backend/backend-bookkeeper-monitoring/index.html
+++ b/docs/main/next/en/setup/backend/backend-bookkeeper-monitoring/index.html
@@ -2411,7 +2411,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
@@ -2473,7 +2473,7 @@
 <li>Set up <a href="https://bookkeeper.apache.org/docs/deployment/manual">BookKeeper Cluster</a>.</li>
 <li>Set up <a href="https://opentelemetry.io/docs/collector/getting-started/#kubernetes">OpenTelemetry Collector</a>. The example
 for OpenTelemetry Collector configuration, refer
-to <a href="https://github.com/apache/skywalking/tree/a63601318d802ef7f2f520758725824d317385cf/test/e2e-v2/cases/pulsar/otel-collector-config.yaml">here</a>.</li>
+to <a href="https://github.com/apache/skywalking/tree/b3044ee71464c117ffcbf80a5b669da0ad64c260/test/e2e-v2/cases/pulsar/otel-collector-config.yaml">here</a>.</li>
 <li>Config SkyWalking <a href="../opentelemetry-receiver">OpenTelemetry receiver</a>.</li>
 </ol>
 <h2 id="bookkeeper-monitoring-1">BookKeeper Monitoring</h2>
diff --git a/docs/main/next/en/setup/backend/backend-clickhouse-monitoring/index.html b/docs/main/next/en/setup/backend/backend-clickhouse-monitoring/index.html
index 3f0485c..b9210fe 100644
--- a/docs/main/next/en/setup/backend/backend-clickhouse-monitoring/index.html
+++ b/docs/main/next/en/setup/backend/backend-clickhouse-monitoring/index.html
@@ -2411,7 +2411,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
@@ -2477,7 +2477,7 @@
 .</li>
 <li>Set up <a href="https://opentelemetry.io/docs/collector/getting-started/#docker">OpenTelemetry Collector </a>. For details on
 Prometheus Receiver in OpenTelemetry Collector, refer
-to <a href="https://github.com/apache/skywalking/tree/a63601318d802ef7f2f520758725824d317385cf/test/e2e-v2/cases/mysql/prometheus-mysql-exporter/otel-collector-config.yaml">here</a>.</li>
+to <a href="https://github.com/apache/skywalking/tree/b3044ee71464c117ffcbf80a5b669da0ad64c260/test/e2e-v2/cases/mysql/prometheus-mysql-exporter/otel-collector-config.yaml">here</a>.</li>
 <li>Config SkyWalking <a href="../opentelemetry-receiver">OpenTelemetry receiver</a>.</li>
 </ol>
 <h3 id="clickhouse-monitoring-1">ClickHouse Monitoring</h3>
diff --git a/docs/main/next/en/setup/backend/backend-cluster/index.html b/docs/main/next/en/setup/backend/backend-cluster/index.html
index 7aeb39f..ffff952 100644
--- a/docs/main/next/en/setup/backend/backend-cluster/index.html
+++ b/docs/main/next/en/setup/backend/backend-cluster/index.html
@@ -2411,7 +2411,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/setup/backend/backend-continuous-profiling/index.html b/docs/main/next/en/setup/backend/backend-continuous-profiling/index.html
index 34c1d8c..d2d424b 100644
--- a/docs/main/next/en/setup/backend/backend-continuous-profiling/index.html
+++ b/docs/main/next/en/setup/backend/backend-continuous-profiling/index.html
@@ -2411,7 +2411,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/setup/backend/backend-data-generator/index.html b/docs/main/next/en/setup/backend/backend-data-generator/index.html
index cbf410c..04bf356 100644
--- a/docs/main/next/en/setup/backend/backend-data-generator/index.html
+++ b/docs/main/next/en/setup/backend/backend-data-generator/index.html
@@ -2417,7 +2417,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/setup/backend/backend-docker/index.html b/docs/main/next/en/setup/backend/backend-docker/index.html
index 612f1fd..20c3ce8 100644
--- a/docs/main/next/en/setup/backend/backend-docker/index.html
+++ b/docs/main/next/en/setup/backend/backend-docker/index.html
@@ -2417,7 +2417,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/setup/backend/backend-ebpf-profiling/index.html b/docs/main/next/en/setup/backend/backend-ebpf-profiling/index.html
index 16fdbb0..58667d7 100644
--- a/docs/main/next/en/setup/backend/backend-ebpf-profiling/index.html
+++ b/docs/main/next/en/setup/backend/backend-ebpf-profiling/index.html
@@ -2414,7 +2414,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/setup/backend/backend-elasticsearch-monitoring/index.html b/docs/main/next/en/setup/backend/backend-elasticsearch-monitoring/index.html
index b88ebda..2c3a9ce 100644
--- a/docs/main/next/en/setup/backend/backend-elasticsearch-monitoring/index.html
+++ b/docs/main/next/en/setup/backend/backend-elasticsearch-monitoring/index.html
@@ -2411,7 +2411,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
@@ -2473,7 +2473,7 @@
 <ol>
 <li>Setup <a href="https://github.com/prometheus-community/elasticsearch_exporter">elasticsearch-exporter</a>.</li>
 <li>Set up <a href="https://opentelemetry.io/docs/collector/getting-started/#kubernetes">OpenTelemetry Collector</a>. The example for OpenTelemetry Collector configuration, refer
-to <a href="https://github.com/apache/skywalking/tree/a63601318d802ef7f2f520758725824d317385cf/test/e2e-v2/cases/elasticsearch/otel-collector-config.yaml">here</a>.</li>
+to <a href="https://github.com/apache/skywalking/tree/b3044ee71464c117ffcbf80a5b669da0ad64c260/test/e2e-v2/cases/elasticsearch/otel-collector-config.yaml">here</a>.</li>
 <li>Config SkyWalking <a href="../opentelemetry-receiver">OpenTelemetry receiver</a>.</li>
 </ol>
 <h2 id="elasticsearch-monitoring-1">Elasticsearch Monitoring</h2>
diff --git a/docs/main/next/en/setup/backend/backend-expose/index.html b/docs/main/next/en/setup/backend/backend-expose/index.html
index bd01d79..a719acc 100644
--- a/docs/main/next/en/setup/backend/backend-expose/index.html
+++ b/docs/main/next/en/setup/backend/backend-expose/index.html
@@ -2414,7 +2414,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/setup/backend/backend-health-check/index.html b/docs/main/next/en/setup/backend/backend-health-check/index.html
index f4b688b..5f4f0e3 100644
--- a/docs/main/next/en/setup/backend/backend-health-check/index.html
+++ b/docs/main/next/en/setup/backend/backend-health-check/index.html
@@ -2414,7 +2414,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/setup/backend/backend-init-mode/index.html b/docs/main/next/en/setup/backend/backend-init-mode/index.html
index 9a0f95a..84bf655 100644
--- a/docs/main/next/en/setup/backend/backend-init-mode/index.html
+++ b/docs/main/next/en/setup/backend/backend-init-mode/index.html
@@ -2411,7 +2411,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/setup/backend/backend-ip-port/index.html b/docs/main/next/en/setup/backend/backend-ip-port/index.html
index 867c96f..13fc4aa 100644
--- a/docs/main/next/en/setup/backend/backend-ip-port/index.html
+++ b/docs/main/next/en/setup/backend/backend-ip-port/index.html
@@ -2414,7 +2414,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/setup/backend/backend-k8s-monitoring-metrics-cadvisor/index.html b/docs/main/next/en/setup/backend/backend-k8s-monitoring-metrics-cadvisor/index.html
index 96e38e7..a2c1435 100644
--- a/docs/main/next/en/setup/backend/backend-k8s-monitoring-metrics-cadvisor/index.html
+++ b/docs/main/next/en/setup/backend/backend-k8s-monitoring-metrics-cadvisor/index.html
@@ -2411,7 +2411,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/setup/backend/backend-k8s-monitoring-rover/index.html b/docs/main/next/en/setup/backend/backend-k8s-monitoring-rover/index.html
index b2b5299..5e3c63f 100644
--- a/docs/main/next/en/setup/backend/backend-k8s-monitoring-rover/index.html
+++ b/docs/main/next/en/setup/backend/backend-k8s-monitoring-rover/index.html
@@ -2411,7 +2411,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/setup/backend/backend-k8s-monitoring/index.html b/docs/main/next/en/setup/backend/backend-k8s-monitoring/index.html
index 76c2b46..bb8e5eb 100644
--- a/docs/main/next/en/setup/backend/backend-k8s-monitoring/index.html
+++ b/docs/main/next/en/setup/backend/backend-k8s-monitoring/index.html
@@ -2411,7 +2411,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/setup/backend/backend-k8s-network-monitoring/index.html b/docs/main/next/en/setup/backend/backend-k8s-network-monitoring/index.html
index 456adcc..46e2b99 100644
--- a/docs/main/next/en/setup/backend/backend-k8s-network-monitoring/index.html
+++ b/docs/main/next/en/setup/backend/backend-k8s-network-monitoring/index.html
@@ -2411,7 +2411,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/setup/backend/backend-k8s/index.html b/docs/main/next/en/setup/backend/backend-k8s/index.html
index 496eeab..7e6cdf2 100644
--- a/docs/main/next/en/setup/backend/backend-k8s/index.html
+++ b/docs/main/next/en/setup/backend/backend-k8s/index.html
@@ -2414,7 +2414,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/setup/backend/backend-kafka-monitoring/index.html b/docs/main/next/en/setup/backend/backend-kafka-monitoring/index.html
index 3370f3b..1534312 100644
--- a/docs/main/next/en/setup/backend/backend-kafka-monitoring/index.html
+++ b/docs/main/next/en/setup/backend/backend-kafka-monitoring/index.html
@@ -2411,7 +2411,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
@@ -2473,7 +2473,7 @@
 <li>Setup <a href="https://github.com/prometheus/jmx_exporter">prometheus_JMX_Exporter</a>. This is an example for JMX Exporter configuration <a href="https://github.com/prometheus/jmx_exporter/blob/main/example_configs/kafka-2_0_0.yml">kafka-2_0_0.yml</a>.</li>
 <li>Set up <a href="https://opentelemetry.io/docs/collector/getting-started/#kubernetes">OpenTelemetry Collector</a>. The example
 for OpenTelemetry Collector configuration, refer
-to <a href="https://github.com/apache/skywalking/tree/a63601318d802ef7f2f520758725824d317385cf/test/e2e-v2/cases/kafka/kafka-monitoring/otel-collector-config.yaml">here</a>.</li>
+to <a href="https://github.com/apache/skywalking/tree/b3044ee71464c117ffcbf80a5b669da0ad64c260/test/e2e-v2/cases/kafka/kafka-monitoring/otel-collector-config.yaml">here</a>.</li>
 <li>Config SkyWalking <a href="../opentelemetry-receiver">OpenTelemetry receiver</a>.</li>
 </ol>
 <h2 id="kafka-monitoring-1">Kafka Monitoring</h2>
diff --git a/docs/main/next/en/setup/backend/backend-load-balancer/index.html b/docs/main/next/en/setup/backend/backend-load-balancer/index.html
index 2d123f7..e67d30d 100644
--- a/docs/main/next/en/setup/backend/backend-load-balancer/index.html
+++ b/docs/main/next/en/setup/backend/backend-load-balancer/index.html
@@ -2411,7 +2411,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/setup/backend/backend-meter/index.html b/docs/main/next/en/setup/backend/backend-meter/index.html
index 39c37b3..83d5db0 100644
--- a/docs/main/next/en/setup/backend/backend-meter/index.html
+++ b/docs/main/next/en/setup/backend/backend-meter/index.html
@@ -2417,7 +2417,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/setup/backend/backend-mongodb-monitoring/index.html b/docs/main/next/en/setup/backend/backend-mongodb-monitoring/index.html
index 7baa4d0..f2badbc 100644
--- a/docs/main/next/en/setup/backend/backend-mongodb-monitoring/index.html
+++ b/docs/main/next/en/setup/backend/backend-mongodb-monitoring/index.html
@@ -2411,7 +2411,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
@@ -2473,7 +2473,7 @@
 <ol>
 <li>Setup <a href="https://github.com/percona/mongodb_exporter">mongodb-exporter</a>.</li>
 <li>Set up <a href="https://opentelemetry.io/docs/collector/getting-started/#docker">OpenTelemetry Collector</a>. The example for OpenTelemetry Collector configuration, refer
-to <a href="https://github.com/apache/skywalking/tree/a63601318d802ef7f2f520758725824d317385cf/test/e2e-v2/cases/mongodb/otel-collector-config.yaml">here</a>.</li>
+to <a href="https://github.com/apache/skywalking/tree/b3044ee71464c117ffcbf80a5b669da0ad64c260/test/e2e-v2/cases/mongodb/otel-collector-config.yaml">here</a>.</li>
 <li>Config SkyWalking <a href="../opentelemetry-receiver">OpenTelemetry receiver</a>.</li>
 </ol>
 <h2 id="mongodb-monitoring-1">MongoDB Monitoring</h2>
diff --git a/docs/main/next/en/setup/backend/backend-mysql-monitoring/index.html b/docs/main/next/en/setup/backend/backend-mysql-monitoring/index.html
index 185d09b..d64e1fe 100644
--- a/docs/main/next/en/setup/backend/backend-mysql-monitoring/index.html
+++ b/docs/main/next/en/setup/backend/backend-mysql-monitoring/index.html
@@ -2411,7 +2411,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
@@ -2470,7 +2470,7 @@
 <h3 id="set-up">Set up</h3>
 <ol>
 <li>Set up <a href="https://github.com/prometheus/mysqld_exporter#using-docker">mysqld_exporter</a>.</li>
-<li>Set up <a href="https://opentelemetry.io/docs/collector/getting-started/#docker">OpenTelemetry Collector </a>. For details on Prometheus Receiver in OpenTelemetry Collector, refer to <a href="https://github.com/apache/skywalking/tree/a63601318d802ef7f2f520758725824d317385cf/test/e2e-v2/cases/mysql/prometheus-mysql-exporter/otel-collector-config.yaml">here</a>.</li>
+<li>Set up <a href="https://opentelemetry.io/docs/collector/getting-started/#docker">OpenTelemetry Collector </a>. For details on Prometheus Receiver in OpenTelemetry Collector, refer to <a href="https://github.com/apache/skywalking/tree/b3044ee71464c117ffcbf80a5b669da0ad64c260/test/e2e-v2/cases/mysql/prometheus-mysql-exporter/otel-collector-config.yaml">here</a>.</li>
 <li>Config SkyWalking <a href="../opentelemetry-receiver">OpenTelemetry receiver</a>.</li>
 </ol>
 <h3 id="mysqlmariadb-monitoring-1">MySQL/MariaDB Monitoring</h3>
@@ -2582,8 +2582,8 @@
 <h3 id="set-up-1">Set up</h3>
 <ol>
 <li>Set up <a href="https://docs.fluentbit.io/manual/installation/docker">fluentbit</a>.</li>
-<li>Config fluentbit from <a href="https://github.com/apache/skywalking/tree/a63601318d802ef7f2f520758725824d317385cf/test/e2e-v2/cases/mysql/mysql-slowsql/fluent-bit.conf">here</a> for MySQL or <a href="https://github.com/apache/skywalking/tree/a63601318d802ef7f2f520758725824d317385cf/test/e2e-v2/cases/mariadb/mariadb-slowsql/fluent-bit.conf">here</a> for MariaDB.</li>
-<li>Enable slow log from <a href="https://github.com/apache/skywalking/tree/a63601318d802ef7f2f520758725824d317385cf/test/e2e-v2/cases/mysql/mysql-slowsql/my.cnf">here</a> for MySQL or <a href="https://github.com/apache/skywalking/tree/a63601318d802ef7f2f520758725824d317385cf/test/e2e-v2/cases/mariadb/mariadb-slowsql/my.cnf">here</a> for MariaDB.</li>
+<li>Config fluentbit from <a href="https://github.com/apache/skywalking/tree/b3044ee71464c117ffcbf80a5b669da0ad64c260/test/e2e-v2/cases/mysql/mysql-slowsql/fluent-bit.conf">here</a> for MySQL or <a href="https://github.com/apache/skywalking/tree/b3044ee71464c117ffcbf80a5b669da0ad64c260/test/e2e-v2/cases/mariadb/mariadb-slowsql/fluent-bit.conf">here</a> for MariaDB.</li>
+<li>Enable slow log from <a href="https://github.com/apache/skywalking/tree/b3044ee71464c117ffcbf80a5b669da0ad64c260/test/e2e-v2/cases/mysql/mysql-slowsql/my.cnf">here</a> for MySQL or <a href="https://github.com/apache/skywalking/tree/b3044ee71464c117ffcbf80a5b669da0ad64c260/test/e2e-v2/cases/mariadb/mariadb-slowsql/my.cnf">here</a> for MariaDB.</li>
 </ol>
 <h3 id="slow-sql-monitoring">Slow SQL Monitoring</h3>
 <p>Slow SQL monitoring provides monitoring of the slow SQL statements of the MySQL/MariaDB server. MySQL/MariaDB server is cataloged as a <code>Layer: MYSQL</code> <code>Service</code> in OAP.</p>
diff --git a/docs/main/next/en/setup/backend/backend-nginx-monitoring/index.html b/docs/main/next/en/setup/backend/backend-nginx-monitoring/index.html
index c9fbf32..d3f7828 100644
--- a/docs/main/next/en/setup/backend/backend-nginx-monitoring/index.html
+++ b/docs/main/next/en/setup/backend/backend-nginx-monitoring/index.html
@@ -2414,7 +2414,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
@@ -2474,7 +2474,7 @@
 </ol>
 <h3 id="set-up">Set up</h3>
 <ol>
-<li>Collect Nginx metrics and expose the following four metrics by <a href="https://github.com/knyar/nginx-lua-prometheus">nginx-lua-prometheus</a>. For details on metrics definition, refer to <a href="https://github.com/apache/skywalking/tree/a63601318d802ef7f2f520758725824d317385cf/test/e2e-v2/cases/nginx/nginx.conf">here</a>.</li>
+<li>Collect Nginx metrics and expose the following four metrics by <a href="https://github.com/knyar/nginx-lua-prometheus">nginx-lua-prometheus</a>. For details on metrics definition, refer to <a href="https://github.com/apache/skywalking/tree/b3044ee71464c117ffcbf80a5b669da0ad64c260/test/e2e-v2/cases/nginx/nginx.conf">here</a>.</li>
 </ol>
 <ul>
 <li>histogram: nginx_http_latency</li>
@@ -2483,7 +2483,7 @@
 <li>counter: nginx_http_requests_total</li>
 </ul>
 <ol start="2">
-<li>Set up <a href="https://opentelemetry.io/docs/collector/getting-started/#docker">OpenTelemetry Collector </a>. For details on Prometheus Receiver in OpenTelemetry Collector, refer to <a href="https://github.com/apache/skywalking/tree/a63601318d802ef7f2f520758725824d317385cf/test/e2e-v2/cases/nginx/otel-collector-config.yaml">here</a>.</li>
+<li>Set up <a href="https://opentelemetry.io/docs/collector/getting-started/#docker">OpenTelemetry Collector </a>. For details on Prometheus Receiver in OpenTelemetry Collector, refer to <a href="https://github.com/apache/skywalking/tree/b3044ee71464c117ffcbf80a5b669da0ad64c260/test/e2e-v2/cases/nginx/otel-collector-config.yaml">here</a>.</li>
 <li>Config SkyWalking <a href="../opentelemetry-receiver">OpenTelemetry receiver</a>.</li>
 </ol>
 <h3 id="nginx-monitoring-1">Nginx Monitoring</h3>
@@ -2750,7 +2750,7 @@
 <h3 id="set-up-1">Set up</h3>
 <ol>
 <li>Install <a href="https://docs.fluentbit.io/manual/installation/docker">fluentbit</a>.</li>
-<li>Config fluent bit with fluent-bit.conf, refer to <a href="https://github.com/apache/skywalking/tree/a63601318d802ef7f2f520758725824d317385cf/test/e2e-v2/cases/nginx/fluent-bit.conf">here</a>.</li>
+<li>Config fluent bit with fluent-bit.conf, refer to <a href="https://github.com/apache/skywalking/tree/b3044ee71464c117ffcbf80a5b669da0ad64c260/test/e2e-v2/cases/nginx/fluent-bit.conf">here</a>.</li>
 </ol>
 <h3 id="error-log-monitoring">Error Log Monitoring</h3>
 <p>Error Log monitoring provides monitoring of the error.log of the Nginx server.</p>
diff --git a/docs/main/next/en/setup/backend/backend-postgresql-monitoring/index.html b/docs/main/next/en/setup/backend/backend-postgresql-monitoring/index.html
index 5328f41..cea6bde 100644
--- a/docs/main/next/en/setup/backend/backend-postgresql-monitoring/index.html
+++ b/docs/main/next/en/setup/backend/backend-postgresql-monitoring/index.html
@@ -2411,7 +2411,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
@@ -2470,7 +2470,7 @@
 <h3 id="set-up">Set up</h3>
 <ol>
 <li>Set up <a href="https://github.com/prometheus-community/postgres_exporter#quick-start">postgres-exporter</a>.</li>
-<li>Set up <a href="https://opentelemetry.io/docs/collector/getting-started/#docker">OpenTelemetry Collector </a>. For details on Prometheus Receiver in OpenTelemetry Collector, refer to <a href="https://github.com/apache/skywalking/tree/a63601318d802ef7f2f520758725824d317385cf/test/e2e-v2/cases/postgresql/postgres-exporter/otel-collector-config.yaml">here</a>.</li>
+<li>Set up <a href="https://opentelemetry.io/docs/collector/getting-started/#docker">OpenTelemetry Collector </a>. For details on Prometheus Receiver in OpenTelemetry Collector, refer to <a href="https://github.com/apache/skywalking/tree/b3044ee71464c117ffcbf80a5b669da0ad64c260/test/e2e-v2/cases/postgresql/postgres-exporter/otel-collector-config.yaml">here</a>.</li>
 <li>Config SkyWalking <a href="../opentelemetry-receiver">OpenTelemetry receiver</a>.</li>
 </ol>
 <h3 id="postgresql-monitoring-1">PostgreSQL Monitoring</h3>
@@ -2679,8 +2679,8 @@
 <h3 id="set-up-1">Set up</h3>
 <ol>
 <li>Set up <a href="https://docs.fluentbit.io/manual/installation/docker">fluentbit</a>.</li>
-<li>Config <a href="https://github.com/apache/skywalking/tree/a63601318d802ef7f2f520758725824d317385cf/test/e2e-v2/cases/postgresql/postgres-exporter/fluent-bit.conf">fluentbit</a></li>
-<li>Config PostgreSQL to enable slow log. <a href="https://github.com/apache/skywalking/tree/a63601318d802ef7f2f520758725824d317385cf/test/e2e-v2/cases/postgresql/postgres-exporter/postgresql.conf">Example</a>.</li>
+<li>Config <a href="https://github.com/apache/skywalking/tree/b3044ee71464c117ffcbf80a5b669da0ad64c260/test/e2e-v2/cases/postgresql/postgres-exporter/fluent-bit.conf">fluentbit</a></li>
+<li>Config PostgreSQL to enable slow log. <a href="https://github.com/apache/skywalking/tree/b3044ee71464c117ffcbf80a5b669da0ad64c260/test/e2e-v2/cases/postgresql/postgres-exporter/postgresql.conf">Example</a>.</li>
 </ol>
 <h3 id="slow-sql-monitoring">Slow SQL Monitoring</h3>
 <p>Slow SQL monitoring provides monitoring of the slow SQL statements of the PostgreSQL server. PostgreSQL Cluster is cataloged as a <code>Layer: POSTGRESQL</code> <code>Service</code> in OAP.
diff --git a/docs/main/next/en/setup/backend/backend-profile-thread-merging/index.html b/docs/main/next/en/setup/backend/backend-profile-thread-merging/index.html
index 66e9dd1..f784041 100644
--- a/docs/main/next/en/setup/backend/backend-profile-thread-merging/index.html
+++ b/docs/main/next/en/setup/backend/backend-profile-thread-merging/index.html
@@ -2408,7 +2408,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
@@ -2512,7 +2512,7 @@
 </li>
 </ul>
 <h2 id="profile-data-debugging">Profile data debugging</h2>
-<p>Please follow the <a href="../../../guides/backend-profile-export#export-using-command-line">exporter tool</a> to package profile data. Unzip the profile data and use <a href="https://github.com/apache/skywalking/tree/a63601318d802ef7f2f520758725824d317385cf/oap-server/server-tools/profile-exporter/tool-profile-snapshot-bootstrap/src/test/java/org/apache/skywalking/oap/server/tool/profile/exporter/ProfileExportedAnalyze.java">analyzer main function</a> to run it.</p>
+<p>Please follow the <a href="../../../guides/backend-profile-export#export-using-command-line">exporter tool</a> to package profile data. Unzip the profile data and use <a href="https://github.com/apache/skywalking/tree/b3044ee71464c117ffcbf80a5b669da0ad64c260/oap-server/server-tools/profile-exporter/tool-profile-snapshot-bootstrap/src/test/java/org/apache/skywalking/oap/server/tool/profile/exporter/ProfileExportedAnalyze.java">analyzer main function</a> to run it.</p>
 
 </div>
 <div>
diff --git a/docs/main/next/en/setup/backend/backend-pulsar-monitoring/index.html b/docs/main/next/en/setup/backend/backend-pulsar-monitoring/index.html
index e354170..b1ad865 100644
--- a/docs/main/next/en/setup/backend/backend-pulsar-monitoring/index.html
+++ b/docs/main/next/en/setup/backend/backend-pulsar-monitoring/index.html
@@ -2411,7 +2411,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
@@ -2473,7 +2473,7 @@
 <li>Set up <a href="https://pulsar.apache.org/docs/3.1.x/getting-started-docker-compose/">Pulsar Cluster</a>. (Pulsar cluster includes pulsar broker cluster and Bookkeeper bookie cluster.)</li>
 <li>Set up <a href="https://opentelemetry.io/docs/collector/getting-started/#kubernetes">OpenTelemetry Collector</a>. The example
 for OpenTelemetry Collector configuration, refer
-to <a href="https://github.com/apache/skywalking/tree/a63601318d802ef7f2f520758725824d317385cf/test/e2e-v2/cases/pulsar/otel-collector-config.yaml">here</a>.</li>
+to <a href="https://github.com/apache/skywalking/tree/b3044ee71464c117ffcbf80a5b669da0ad64c260/test/e2e-v2/cases/pulsar/otel-collector-config.yaml">here</a>.</li>
 <li>Config SkyWalking <a href="../opentelemetry-receiver">OpenTelemetry receiver</a>.</li>
 </ol>
 <h2 id="pulsar-monitoring-1">Pulsar Monitoring</h2>
diff --git a/docs/main/next/en/setup/backend/backend-rabbitmq-monitoring/index.html b/docs/main/next/en/setup/backend/backend-rabbitmq-monitoring/index.html
index 69a2000..ca9ddc7 100644
--- a/docs/main/next/en/setup/backend/backend-rabbitmq-monitoring/index.html
+++ b/docs/main/next/en/setup/backend/backend-rabbitmq-monitoring/index.html
@@ -2411,7 +2411,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
@@ -2477,7 +2477,7 @@
 <li>Setup <a href="https://www.rabbitmq.com/prometheus.html#installation">rabbitmq_prometheus</a>.</li>
 <li>Set up <a href="https://opentelemetry.io/docs/collector/getting-started/#kubernetes">OpenTelemetry Collector</a>. The example
 for OpenTelemetry Collector configuration, refer
-to <a href="https://github.com/apache/skywalking/tree/a63601318d802ef7f2f520758725824d317385cf/test/e2e-v2/cases/rabbitmq/otel-collector-config.yaml">here</a>.</li>
+to <a href="https://github.com/apache/skywalking/tree/b3044ee71464c117ffcbf80a5b669da0ad64c260/test/e2e-v2/cases/rabbitmq/otel-collector-config.yaml">here</a>.</li>
 <li>Config SkyWalking <a href="../opentelemetry-receiver">OpenTelemetry receiver</a>.</li>
 </ol>
 <h2 id="rabbitmq-monitoring-1">RabbitMQ Monitoring</h2>
diff --git a/docs/main/next/en/setup/backend/backend-redis-monitoring/index.html b/docs/main/next/en/setup/backend/backend-redis-monitoring/index.html
index 4ce2825..02fd809 100644
--- a/docs/main/next/en/setup/backend/backend-redis-monitoring/index.html
+++ b/docs/main/next/en/setup/backend/backend-redis-monitoring/index.html
@@ -2411,7 +2411,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
@@ -2470,7 +2470,7 @@
 <h3 id="set-up">Set up</h3>
 <ol>
 <li>Set up <a href="https://github.com/oliver006/redis_exporter#building-and-running-the-exporter">redis-exporter</a>.</li>
-<li>Set up <a href="https://opentelemetry.io/docs/collector/getting-started/#docker">OpenTelemetry Collector</a>. For details on Redis Receiver in OpenTelemetry Collector, refer to <a href="https://github.com/apache/skywalking/tree/a63601318d802ef7f2f520758725824d317385cf/test/e2e-v2/cases/redis/redis-exporter/otel-collector-config.yaml">here</a>.</li>
+<li>Set up <a href="https://opentelemetry.io/docs/collector/getting-started/#docker">OpenTelemetry Collector</a>. For details on Redis Receiver in OpenTelemetry Collector, refer to <a href="https://github.com/apache/skywalking/tree/b3044ee71464c117ffcbf80a5b669da0ad64c260/test/e2e-v2/cases/redis/redis-exporter/otel-collector-config.yaml">here</a>.</li>
 <li>Config SkyWalking <a href="../opentelemetry-receiver">OpenTelemetry receiver</a>.</li>
 </ol>
 <h3 id="redis-monitoring-1">Redis Monitoring</h3>
@@ -2575,7 +2575,7 @@
 <p>SkyWalking leverages <a href="https://fluentbit.io/">fluentbit</a> or other log agents for collecting slow commands from Redis.</p>
 <h3 id="data-flow-1">Data flow</h3>
 <ol>
-<li>Execute <a href="https://github.com/apache/skywalking/tree/a63601318d802ef7f2f520758725824d317385cf/test/e2e-v2/cases/redis/redis-exporter/scripts/slowlog.sh">commands</a> periodically to collect slow logs from Redis and save the result locally.</li>
+<li>Execute <a href="https://github.com/apache/skywalking/tree/b3044ee71464c117ffcbf80a5b669da0ad64c260/test/e2e-v2/cases/redis/redis-exporter/scripts/slowlog.sh">commands</a> periodically to collect slow logs from Redis and save the result locally.</li>
 <li>Fluent-bit agent collects slow logs from local file.</li>
 <li>fluent-bit agent sends data to SkyWalking OAP Server using native meter APIs via HTTP.</li>
 <li>The SkyWalking OAP Server parses the expression with <a href="../../../concepts-and-designs/lal">LAL</a> to parse/extract and store the results.</li>
@@ -2583,13 +2583,13 @@
 <h3 id="set-up-1">Set up</h3>
 <ol>
 <li>Set up <a href="https://docs.fluentbit.io/manual/installation/docker">fluentbit</a>.</li>
-<li>Config fluentbit from <a href="https://github.com/apache/skywalking/tree/a63601318d802ef7f2f520758725824d317385cf/test/e2e-v2/cases/redis/redis-exporter/fluent-bit.conf">here</a> for Redis.</li>
-<li>Config slow log from <a href="https://github.com/apache/skywalking/tree/a63601318d802ef7f2f520758725824d317385cf/test/e2e-v2/cases/redis/redis-exporter/redis.conf">here</a> for Redis.</li>
-<li>Periodically execute the <a href="https://github.com/apache/skywalking/tree/a63601318d802ef7f2f520758725824d317385cf/test/e2e-v2/cases/redis/redis-exporter/scripts/slowlog.sh">commands</a>.</li>
+<li>Config fluentbit from <a href="https://github.com/apache/skywalking/tree/b3044ee71464c117ffcbf80a5b669da0ad64c260/test/e2e-v2/cases/redis/redis-exporter/fluent-bit.conf">here</a> for Redis.</li>
+<li>Config slow log from <a href="https://github.com/apache/skywalking/tree/b3044ee71464c117ffcbf80a5b669da0ad64c260/test/e2e-v2/cases/redis/redis-exporter/redis.conf">here</a> for Redis.</li>
+<li>Periodically execute the <a href="https://github.com/apache/skywalking/tree/b3044ee71464c117ffcbf80a5b669da0ad64c260/test/e2e-v2/cases/redis/redis-exporter/scripts/slowlog.sh">commands</a>.</li>
 </ol>
 <p><strong>Notice:</strong></p>
 <p>1.The <code>slowlog-log-slower-than</code> and <code>slowlog-max-len</code> configuration items in the configuration file are for the slow log, the former indicating that execution time longer than the specified time (in milliseconds) will be logged to the slowlog, and the latter indicating the maximum number of slow logs that will be stored in the slow log file.
-2.In the e2e test, SkyWalking uses cron to periodically execute the redis command to fetch the slow logs and write them to a local file, which is then collected by fluent-bit to send the data to the OAP. You can see the relevant configuration files <a href="https://github.com/apache/skywalking/tree/a63601318d802ef7f2f520758725824d317385cf/test/e2e-v2/cases/redis/redis-exporter">here</a>.You can also get slow logs periodically and send them to OAP in other ways than using cron and fluent-bit.</p>
+2.In the e2e test, SkyWalking uses cron to periodically execute the redis command to fetch the slow logs and write them to a local file, which is then collected by fluent-bit to send the data to the OAP. You can see the relevant configuration files <a href="https://github.com/apache/skywalking/tree/b3044ee71464c117ffcbf80a5b669da0ad64c260/test/e2e-v2/cases/redis/redis-exporter">here</a>.You can also get slow logs periodically and send them to OAP in other ways than using cron and fluent-bit.</p>
 <h3 id="slow-commands-monitoring">Slow Commands Monitoring</h3>
 <p>Slow SQL monitoring provides monitoring of the slow commands of the Redis servers. Redis servers are cataloged as a <code>Layer: REDIS</code> <code>Service</code> in OAP.</p>
 <h4 id="supported-metrics-1">Supported Metrics</h4>
diff --git a/docs/main/next/en/setup/backend/backend-rocketmq-monitoring/index.html b/docs/main/next/en/setup/backend/backend-rocketmq-monitoring/index.html
index 7fe2cb0..ca78b0b 100644
--- a/docs/main/next/en/setup/backend/backend-rocketmq-monitoring/index.html
+++ b/docs/main/next/en/setup/backend/backend-rocketmq-monitoring/index.html
@@ -2411,7 +2411,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
@@ -2473,7 +2473,7 @@
 <ol>
 <li>Setup <a href="https://github.com/apache/rocketmq-exporter">rocketmq-exporter</a>.</li>
 <li>Set up <a href="https://opentelemetry.io/docs/collector/getting-started/#docker">OpenTelemetry Collector</a>. The example for OpenTelemetry Collector configuration, refer
-to <a href="https://github.com/apache/skywalking/tree/a63601318d802ef7f2f520758725824d317385cf/test/e2e-v2/cases/rocketmq/otel-collector-config.yaml">here</a>.</li>
+to <a href="https://github.com/apache/skywalking/tree/b3044ee71464c117ffcbf80a5b669da0ad64c260/test/e2e-v2/cases/rocketmq/otel-collector-config.yaml">here</a>.</li>
 <li>Config SkyWalking <a href="../opentelemetry-receiver">OpenTelemetry receiver</a>.</li>
 </ol>
 <h2 id="rocketmq-monitoring-1">RocketMQ Monitoring</h2>
diff --git a/docs/main/next/en/setup/backend/backend-setting-override/index.html b/docs/main/next/en/setup/backend/backend-setting-override/index.html
index 6c11058..79cca47 100644
--- a/docs/main/next/en/setup/backend/backend-setting-override/index.html
+++ b/docs/main/next/en/setup/backend/backend-setting-override/index.html
@@ -2429,7 +2429,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/setup/backend/backend-setup/index.html b/docs/main/next/en/setup/backend/backend-setup/index.html
index d4e9b81..c93a133 100644
--- a/docs/main/next/en/setup/backend/backend-setup/index.html
+++ b/docs/main/next/en/setup/backend/backend-setup/index.html
@@ -2417,7 +2417,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/setup/backend/backend-start-up-mode/index.html b/docs/main/next/en/setup/backend/backend-start-up-mode/index.html
index c294dc1..cdc72a5 100644
--- a/docs/main/next/en/setup/backend/backend-start-up-mode/index.html
+++ b/docs/main/next/en/setup/backend/backend-start-up-mode/index.html
@@ -2417,7 +2417,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/setup/backend/backend-storage/index.html b/docs/main/next/en/setup/backend/backend-storage/index.html
index aa3b3fd..e8ac39b 100644
--- a/docs/main/next/en/setup/backend/backend-storage/index.html
+++ b/docs/main/next/en/setup/backend/backend-storage/index.html
@@ -2414,7 +2414,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/setup/backend/backend-telemetry/index.html b/docs/main/next/en/setup/backend/backend-telemetry/index.html
index 66c68ca..ca05eae 100644
--- a/docs/main/next/en/setup/backend/backend-telemetry/index.html
+++ b/docs/main/next/en/setup/backend/backend-telemetry/index.html
@@ -2417,7 +2417,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
@@ -2493,7 +2493,7 @@
 </span></code></pre></div><ol start="2">
 <li>Set up OpenTelemetry to scrape the metrics from OAP telemetry.</li>
 </ol>
-<p>Refer to <a href="https://github.com/apache/skywalking/tree/a63601318d802ef7f2f520758725824d317385cf/test/e2e-v2/cases/so11y/otel-collector-config.yaml">the E2E test case</a> as an example.</p>
+<p>Refer to <a href="https://github.com/apache/skywalking/tree/b3044ee71464c117ffcbf80a5b669da0ad64c260/test/e2e-v2/cases/so11y/otel-collector-config.yaml">the E2E test case</a> as an example.</p>
 <p>For Kubernetes deployments, read the following section, otherwise you should be able to
 adjust the configurations below to fit your scenarios.</p>
 <h3 id="service-discovery-on-kubernetes">Service discovery on Kubernetes</h3>
diff --git a/docs/main/next/en/setup/backend/backend-token-auth/index.html b/docs/main/next/en/setup/backend/backend-token-auth/index.html
index e12176f..592936d 100644
--- a/docs/main/next/en/setup/backend/backend-token-auth/index.html
+++ b/docs/main/next/en/setup/backend/backend-token-auth/index.html
@@ -2417,7 +2417,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/setup/backend/backend-trace-profiling/index.html b/docs/main/next/en/setup/backend/backend-trace-profiling/index.html
index 21eeb5e..96c891e 100644
--- a/docs/main/next/en/setup/backend/backend-trace-profiling/index.html
+++ b/docs/main/next/en/setup/backend/backend-trace-profiling/index.html
@@ -2411,7 +2411,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/setup/backend/backend-vm-monitoring/index.html b/docs/main/next/en/setup/backend/backend-vm-monitoring/index.html
index b85e304..99e53ca 100644
--- a/docs/main/next/en/setup/backend/backend-vm-monitoring/index.html
+++ b/docs/main/next/en/setup/backend/backend-vm-monitoring/index.html
@@ -2411,7 +2411,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
@@ -2483,7 +2483,7 @@
 <p><strong>For OpenTelemetry receiver:</strong></p>
 <ol>
 <li>Setup <a href="https://prometheus.io/docs/guides/node-exporter/">Prometheus node-exporter</a>.</li>
-<li>Setup <a href="https://opentelemetry.io/docs/collector/">OpenTelemetry Collector</a>. This is an example for OpenTelemetry Collector configuration <a href="https://github.com/apache/skywalking/tree/a63601318d802ef7f2f520758725824d317385cf/test/e2e-v2/cases/vm/prometheus-node-exporter/otel-collector-config.yaml">otel-collector-config.yaml</a>.</li>
+<li>Setup <a href="https://opentelemetry.io/docs/collector/">OpenTelemetry Collector</a>. This is an example for OpenTelemetry Collector configuration <a href="https://github.com/apache/skywalking/tree/b3044ee71464c117ffcbf80a5b669da0ad64c260/test/e2e-v2/cases/vm/prometheus-node-exporter/otel-collector-config.yaml">otel-collector-config.yaml</a>.</li>
 <li>Config SkyWalking <a href="../opentelemetry-receiver">OpenTelemetry receiver</a>.</li>
 </ol>
 <p><strong>For Telegraf receiver:</strong></p>
diff --git a/docs/main/next/en/setup/backend/backend-win-monitoring/index.html b/docs/main/next/en/setup/backend/backend-win-monitoring/index.html
index 1ea3fa0..61ad2ad 100644
--- a/docs/main/next/en/setup/backend/backend-win-monitoring/index.html
+++ b/docs/main/next/en/setup/backend/backend-win-monitoring/index.html
@@ -2414,7 +2414,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
@@ -2475,7 +2475,7 @@
 <p><strong>For OpenTelemetry receiver:</strong></p>
 <ol>
 <li>Setup <a href="https://github.com/prometheus-community/windows_exporter">Prometheus windows_exporter</a>.</li>
-<li>Setup <a href="https://opentelemetry.io/docs/collector/">OpenTelemetry Collector </a>. This is an example for OpenTelemetry Collector configuration <a href="https://github.com/apache/skywalking/tree/a63601318d802ef7f2f520758725824d317385cf/test/e2e-v2/cases/win/prometheus-windows_exporter/otel-collector-config.yaml">otel-collector-config.yaml</a>.</li>
+<li>Setup <a href="https://opentelemetry.io/docs/collector/">OpenTelemetry Collector </a>. This is an example for OpenTelemetry Collector configuration <a href="https://github.com/apache/skywalking/tree/b3044ee71464c117ffcbf80a5b669da0ad64c260/test/e2e-v2/cases/win/prometheus-windows_exporter/otel-collector-config.yaml">otel-collector-config.yaml</a>.</li>
 <li>Config SkyWalking <a href="../opentelemetry-receiver">OpenTelemetry receiver</a>.</li>
 </ol>
 <h2 id="supported-metrics">Supported Metrics</h2>
diff --git a/docs/main/next/en/setup/backend/backend-zabbix/index.html b/docs/main/next/en/setup/backend/backend-zabbix/index.html
index d80cacf..dda146f 100644
--- a/docs/main/next/en/setup/backend/backend-zabbix/index.html
+++ b/docs/main/next/en/setup/backend/backend-zabbix/index.html
@@ -2411,7 +2411,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
@@ -2476,7 +2476,7 @@
 <p>The OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP fails to start up. The files
 are located at <code>$CLASSPATH/zabbix-rules</code>.</p>
 <p>The file is written in YAML format, defined by the scheme described below. Square brackets indicate that a parameter is optional.</p>
-<p>An example for Zabbix agent configuration could be found <a href="https://github.com/apache/skywalking/tree/a63601318d802ef7f2f520758725824d317385cf/test/e2e-v2/cases/vm/zabbix/zabbix_agentd.conf">here</a>.
+<p>An example for Zabbix agent configuration could be found <a href="https://github.com/apache/skywalking/tree/b3044ee71464c117ffcbf80a5b669da0ad64c260/test/e2e-v2/cases/vm/zabbix/zabbix_agentd.conf">here</a>.
 You can find details on Zabbix agent items from <a href="https://www.zabbix.com/documentation/current/manual/config/items/itemtypes/zabbix_agent">Zabbix Agent documentation</a>.</p>
 <h3 id="configuration-file-1">Configuration file</h3>
 <div class="highlight"><pre style="background-color:#fff;-moz-tab-size:4;-o-tab-size:4;tab-size:4"><code class="language-yaml" data-lang="yaml"><span style="color:#998;font-style:italic"># initExp is the expression that initializes the current configuration file</span><span style="color:#bbb">
diff --git a/docs/main/next/en/setup/backend/configuration-vocabulary/index.html b/docs/main/next/en/setup/backend/configuration-vocabulary/index.html
index 1c446bc..fffb755 100644
--- a/docs/main/next/en/setup/backend/configuration-vocabulary/index.html
+++ b/docs/main/next/en/setup/backend/configuration-vocabulary/index.html
@@ -2411,7 +2411,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/setup/backend/dashboards-so11y-satellite/index.html b/docs/main/next/en/setup/backend/dashboards-so11y-satellite/index.html
index fafad0a..c0e0c97 100644
--- a/docs/main/next/en/setup/backend/dashboards-so11y-satellite/index.html
+++ b/docs/main/next/en/setup/backend/dashboards-so11y-satellite/index.html
@@ -2411,7 +2411,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/setup/backend/dashboards-so11y/index.html b/docs/main/next/en/setup/backend/dashboards-so11y/index.html
index db2bbc3..70dedd9 100644
--- a/docs/main/next/en/setup/backend/dashboards-so11y/index.html
+++ b/docs/main/next/en/setup/backend/dashboards-so11y/index.html
@@ -2411,7 +2411,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/setup/backend/dynamic-config-apollo/index.html b/docs/main/next/en/setup/backend/dynamic-config-apollo/index.html
index 5ce091c..4d47cc8 100644
--- a/docs/main/next/en/setup/backend/dynamic-config-apollo/index.html
+++ b/docs/main/next/en/setup/backend/dynamic-config-apollo/index.html
@@ -2420,7 +2420,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/setup/backend/dynamic-config-configmap/index.html b/docs/main/next/en/setup/backend/dynamic-config-configmap/index.html
index f7c931c..3156ab7 100644
--- a/docs/main/next/en/setup/backend/dynamic-config-configmap/index.html
+++ b/docs/main/next/en/setup/backend/dynamic-config-configmap/index.html
@@ -2414,7 +2414,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/setup/backend/dynamic-config-consul/index.html b/docs/main/next/en/setup/backend/dynamic-config-consul/index.html
index fe17a12..1133ab8 100644
--- a/docs/main/next/en/setup/backend/dynamic-config-consul/index.html
+++ b/docs/main/next/en/setup/backend/dynamic-config-consul/index.html
@@ -2414,7 +2414,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/setup/backend/dynamic-config-etcd/index.html b/docs/main/next/en/setup/backend/dynamic-config-etcd/index.html
index aef3896..ab2e912 100644
--- a/docs/main/next/en/setup/backend/dynamic-config-etcd/index.html
+++ b/docs/main/next/en/setup/backend/dynamic-config-etcd/index.html
@@ -2420,7 +2420,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/setup/backend/dynamic-config-nacos/index.html b/docs/main/next/en/setup/backend/dynamic-config-nacos/index.html
index 64d9c4b..c0c9a6f 100644
--- a/docs/main/next/en/setup/backend/dynamic-config-nacos/index.html
+++ b/docs/main/next/en/setup/backend/dynamic-config-nacos/index.html
@@ -2411,7 +2411,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/setup/backend/dynamic-config-service/index.html b/docs/main/next/en/setup/backend/dynamic-config-service/index.html
index e91c392..9cd15a0 100644
--- a/docs/main/next/en/setup/backend/dynamic-config-service/index.html
+++ b/docs/main/next/en/setup/backend/dynamic-config-service/index.html
@@ -2417,7 +2417,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
@@ -2464,7 +2464,7 @@
               
 <div class="td-content">
 	<h1 id="dynamic-configuration-service-dcs">Dynamic Configuration Service, DCS</h1>
-<p><a href="https://github.com/apache/skywalking/tree/a63601318d802ef7f2f520758725824d317385cf/oap-server/server-configuration/grpc-configuration-sync/src/main/proto/configuration-service.proto">Dynamic Configuration Service</a>
+<p><a href="https://github.com/apache/skywalking/tree/b3044ee71464c117ffcbf80a5b669da0ad64c260/oap-server/server-configuration/grpc-configuration-sync/src/main/proto/configuration-service.proto">Dynamic Configuration Service</a>
 is a gRPC service which requires implementation of the upstream system.
 The SkyWalking OAP fetches the configuration from the implementation (any system) after you open the implementation like this:</p>
 <div class="highlight"><pre style="background-color:#fff;-moz-tab-size:4;-o-tab-size:4;tab-size:4"><code class="language-yaml" data-lang="yaml"><span style="color:#000080">configuration</span>:<span style="color:#bbb">
diff --git a/docs/main/next/en/setup/backend/dynamic-config-zookeeper/index.html b/docs/main/next/en/setup/backend/dynamic-config-zookeeper/index.html
index b72b56f..6343d92 100644
--- a/docs/main/next/en/setup/backend/dynamic-config-zookeeper/index.html
+++ b/docs/main/next/en/setup/backend/dynamic-config-zookeeper/index.html
@@ -2414,7 +2414,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/setup/backend/dynamic-config/index.html b/docs/main/next/en/setup/backend/dynamic-config/index.html
index f53f30b..d8da183a 100644
--- a/docs/main/next/en/setup/backend/dynamic-config/index.html
+++ b/docs/main/next/en/setup/backend/dynamic-config/index.html
@@ -2420,7 +2420,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/setup/backend/dynamical-logging/index.html b/docs/main/next/en/setup/backend/dynamical-logging/index.html
index 192c94d..3574af6 100644
--- a/docs/main/next/en/setup/backend/dynamical-logging/index.html
+++ b/docs/main/next/en/setup/backend/dynamical-logging/index.html
@@ -2414,7 +2414,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/setup/backend/endpoint-grouping-rules/index.html b/docs/main/next/en/setup/backend/endpoint-grouping-rules/index.html
index 60e8705..53343f1 100644
--- a/docs/main/next/en/setup/backend/endpoint-grouping-rules/index.html
+++ b/docs/main/next/en/setup/backend/endpoint-grouping-rules/index.html
@@ -2411,7 +2411,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/setup/backend/exporter/index.html b/docs/main/next/en/setup/backend/exporter/index.html
index bcd19d6..17def7e 100644
--- a/docs/main/next/en/setup/backend/exporter/index.html
+++ b/docs/main/next/en/setup/backend/exporter/index.html
@@ -2417,7 +2417,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/setup/backend/filelog-native/index.html b/docs/main/next/en/setup/backend/filelog-native/index.html
index 4fdd51c..47a3bdc 100644
--- a/docs/main/next/en/setup/backend/filelog-native/index.html
+++ b/docs/main/next/en/setup/backend/filelog-native/index.html
@@ -2414,7 +2414,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
@@ -2474,14 +2474,14 @@
 configs <code>enableNativeJsonLog</code>.</p>
 <p>Take the following Filebeat config YAML as an example to set up Filebeat:</p>
 <ul>
-<li><a href="https://github.com/apache/skywalking/tree/a63601318d802ef7f2f520758725824d317385cf/test/e2e-v2/cases/kafka/log/filebeat.yml">filebeat.yml</a></li>
+<li><a href="https://github.com/apache/skywalking/tree/b3044ee71464c117ffcbf80a5b669da0ad64c260/test/e2e-v2/cases/kafka/log/filebeat.yml">filebeat.yml</a></li>
 </ul>
 <h3 id="fluentd">Fluentd</h3>
 <p>Fluentd supports using Kafka to transport logs. Open <a href="../kafka-fetcher#kafka-fetcher">kafka-fetcher</a> and enable
 configs <code>enableNativeJsonLog</code>.</p>
 <p>Take the following fluentd config file as an example to set up Fluentd:</p>
 <ul>
-<li><a href="https://github.com/apache/skywalking/tree/a63601318d802ef7f2f520758725824d317385cf/test/e2e-v2/cases/kafka/log/fluentd.conf">fluentd.conf</a></li>
+<li><a href="https://github.com/apache/skywalking/tree/b3044ee71464c117ffcbf80a5b669da0ad64c260/test/e2e-v2/cases/kafka/log/fluentd.conf">fluentd.conf</a></li>
 </ul>
 <h3 id="fluent-bit">Fluent-bit</h3>
 <p>Fluent-bit sends logs to OAP directly through HTTP(rest port).
@@ -2489,7 +2489,7 @@
 inactivated)</p>
 <p>Take the following fluent-bit config files as an example to set up Fluent-bit:</p>
 <ul>
-<li><a href="https://github.com/apache/skywalking/tree/a63601318d802ef7f2f520758725824d317385cf/test/e2e-v2/cases/log/fluent-bit/fluent-bit.conf">fluent-bit.conf</a></li>
+<li><a href="https://github.com/apache/skywalking/tree/b3044ee71464c117ffcbf80a5b669da0ad64c260/test/e2e-v2/cases/log/fluent-bit/fluent-bit.conf">fluent-bit.conf</a></li>
 </ul>
 
 </div>
diff --git a/docs/main/next/en/setup/backend/grpc-security/index.html b/docs/main/next/en/setup/backend/grpc-security/index.html
index 468a2d4..85535bd 100644
--- a/docs/main/next/en/setup/backend/grpc-security/index.html
+++ b/docs/main/next/en/setup/backend/grpc-security/index.html
@@ -2411,7 +2411,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
@@ -2466,7 +2466,7 @@
 <p>The first step is to generate certificates and private key files for encrypting communication.</p>
 <h3 id="creating-ssltls-certificates">Creating SSL/TLS Certificates</h3>
 <p>The first step is to generate certificates and key files for encrypting communication. This is fairly straightforward: use <code>openssl</code> from the command line.</p>
-<p>Use this <a href="https://github.com/apache/skywalking/tree/a63601318d802ef7f2f520758725824d317385cf/tools/TLS/tls_key_generate.sh">script</a> if you are not familiar with how to generate key files.</p>
+<p>Use this <a href="https://github.com/apache/skywalking/tree/b3044ee71464c117ffcbf80a5b669da0ad64c260/tools/TLS/tls_key_generate.sh">script</a> if you are not familiar with how to generate key files.</p>
 <p>We need the following files:</p>
 <ul>
 <li><code>ca.crt</code>: A certificate authority public key for a client to validate the server&rsquo;s certificate.</li>
@@ -2511,7 +2511,7 @@
 </span><span style="color:#bbb">    </span><span style="color:#000080">gRPCSslCertChainPath</span>:<span style="color:#bbb"> </span>${SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH:&#34;/path/to/server.crt&#34;}<span style="color:#bbb">
 </span><span style="color:#bbb">    </span><span style="color:#000080">gRPCSslTrustedCAsPath</span>:<span style="color:#bbb"> </span>${SW_RECEIVER_GRPC_SSL_TRUSTED_CAS_PATH:&#34;/path/to/ca.crt&#34;}<span style="color:#bbb">
 </span><span style="color:#bbb">    </span><span style="color:#000080">authentication</span>:<span style="color:#bbb"> </span>${SW_AUTHENTICATION:&#34;&#34;}<span style="color:#bbb">
-</span></code></pre></div><p>You can still use this <a href="https://github.com/apache/skywalking/tree/a63601318d802ef7f2f520758725824d317385cf/tools/TLS/tls_key_generate.sh">script</a> to generate CA certificate and the key files of server-side(for OAP Server) and client-side(for Agent/Satellite).
+</span></code></pre></div><p>You can still use this <a href="https://github.com/apache/skywalking/tree/b3044ee71464c117ffcbf80a5b669da0ad64c260/tools/TLS/tls_key_generate.sh">script</a> to generate CA certificate and the key files of server-side(for OAP Server) and client-side(for Agent/Satellite).
 You have to notice the keys, including server and client-side, are from the same CA certificate.</p>
 
 </div>
diff --git a/docs/main/next/en/setup/backend/kafka-fetcher/index.html b/docs/main/next/en/setup/backend/kafka-fetcher/index.html
index 503b44c..6b653b6 100644
--- a/docs/main/next/en/setup/backend/kafka-fetcher/index.html
+++ b/docs/main/next/en/setup/backend/kafka-fetcher/index.html
@@ -2411,7 +2411,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/setup/backend/log-agent-native/index.html b/docs/main/next/en/setup/backend/log-agent-native/index.html
index 8779a9d..de0d9c2 100644
--- a/docs/main/next/en/setup/backend/log-agent-native/index.html
+++ b/docs/main/next/en/setup/backend/log-agent-native/index.html
@@ -2414,7 +2414,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
@@ -2482,9 +2482,9 @@
 to report logs through files with automatically injected trace context.</p>
 <p>Log framework config examples:</p>
 <ul>
-<li><a href="https://github.com/apache/skywalking/tree/a63601318d802ef7f2f520758725824d317385cf/test/e2e-v2/java-test-service/e2e-service-provider/src/main/resources/log4j.properties">log4j1.x fileAppender</a></li>
-<li><a href="https://github.com/apache/skywalking/tree/a63601318d802ef7f2f520758725824d317385cf/test/e2e-v2/java-test-service/e2e-service-provider/src/main/resources/log4j2.xml">log4j2.x fileAppender</a></li>
-<li><a href="https://github.com/apache/skywalking/tree/a63601318d802ef7f2f520758725824d317385cf/test/e2e-v2/java-test-service/e2e-service-provider/src/main/resources/logback.xml">logback fileAppender</a></li>
+<li><a href="https://github.com/apache/skywalking/tree/b3044ee71464c117ffcbf80a5b669da0ad64c260/test/e2e-v2/java-test-service/e2e-service-provider/src/main/resources/log4j.properties">log4j1.x fileAppender</a></li>
+<li><a href="https://github.com/apache/skywalking/tree/b3044ee71464c117ffcbf80a5b669da0ad64c260/test/e2e-v2/java-test-service/e2e-service-provider/src/main/resources/log4j2.xml">log4j2.x fileAppender</a></li>
+<li><a href="https://github.com/apache/skywalking/tree/b3044ee71464c117ffcbf80a5b669da0ad64c260/test/e2e-v2/java-test-service/e2e-service-provider/src/main/resources/logback.xml">logback fileAppender</a></li>
 </ul>
 <h2 id="python-agent-log-reporter">Python agent log reporter</h2>
 <p><a href="https://github.com/apache/skywalking-python">SkyWalking Python Agent</a> implements a log reporter for the <a href="https://docs.python.org/3/library/logging.html">logging
diff --git a/docs/main/next/en/setup/backend/log-analyzer/index.html b/docs/main/next/en/setup/backend/log-analyzer/index.html
index 32afd27..6009786 100644
--- a/docs/main/next/en/setup/backend/log-analyzer/index.html
+++ b/docs/main/next/en/setup/backend/log-analyzer/index.html
@@ -2411,7 +2411,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/setup/backend/log-otlp/index.html b/docs/main/next/en/setup/backend/log-otlp/index.html
index 423a8e4..e944f2d 100644
--- a/docs/main/next/en/setup/backend/log-otlp/index.html
+++ b/docs/main/next/en/setup/backend/log-otlp/index.html
@@ -2411,7 +2411,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/setup/backend/metrics-exporter/index.html b/docs/main/next/en/setup/backend/metrics-exporter/index.html
index 9314a70..643eb31 100644
--- a/docs/main/next/en/setup/backend/metrics-exporter/index.html
+++ b/docs/main/next/en/setup/backend/metrics-exporter/index.html
@@ -2408,7 +2408,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/setup/backend/micrometer-observations/index.html b/docs/main/next/en/setup/backend/micrometer-observations/index.html
index 573214e..25901e7 100644
--- a/docs/main/next/en/setup/backend/micrometer-observations/index.html
+++ b/docs/main/next/en/setup/backend/micrometer-observations/index.html
@@ -2414,7 +2414,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
@@ -2473,7 +2473,7 @@
 </span><span style="color:#bbb">  </span><span style="color:#000080">default</span>:<span style="color:#bbb">
 </span></code></pre></div><ol start="2">
 <li>
-<p>Configure the meter config file. It already has the <a href="https://github.com/apache/skywalking/tree/a63601318d802ef7f2f520758725824d317385cf/oap-server/server-starter/src/main/resources/meter-analyzer-config/spring-micrometer.yaml">spring sleuth meter config</a>.
+<p>Configure the meter config file. It already has the <a href="https://github.com/apache/skywalking/tree/b3044ee71464c117ffcbf80a5b669da0ad64c260/oap-server/server-starter/src/main/resources/meter-analyzer-config/spring-micrometer.yaml">spring sleuth meter config</a>.
 If you have a customized meter at the agent side, please configure the meter using the steps set out in the <a href="../backend-meter#manual-meter-api">meter document</a>.</p>
 </li>
 <li>
diff --git a/docs/main/next/en/setup/backend/mq/index.html b/docs/main/next/en/setup/backend/mq/index.html
index fc57fa4..f366cac 100644
--- a/docs/main/next/en/setup/backend/mq/index.html
+++ b/docs/main/next/en/setup/backend/mq/index.html
@@ -2411,7 +2411,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/setup/backend/on-demand-pod-log/index.html b/docs/main/next/en/setup/backend/on-demand-pod-log/index.html
index bbe3847..830e909 100644
--- a/docs/main/next/en/setup/backend/on-demand-pod-log/index.html
+++ b/docs/main/next/en/setup/backend/on-demand-pod-log/index.html
@@ -2411,7 +2411,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/setup/backend/opentelemetry-receiver/index.html b/docs/main/next/en/setup/backend/opentelemetry-receiver/index.html
index db58b96..1955df7 100644
--- a/docs/main/next/en/setup/backend/opentelemetry-receiver/index.html
+++ b/docs/main/next/en/setup/backend/opentelemetry-receiver/index.html
@@ -2417,7 +2417,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/setup/backend/otlp-trace/index.html b/docs/main/next/en/setup/backend/otlp-trace/index.html
index 640ad15..4a9cafb 100644
--- a/docs/main/next/en/setup/backend/otlp-trace/index.html
+++ b/docs/main/next/en/setup/backend/otlp-trace/index.html
@@ -2414,7 +2414,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/setup/backend/service-auto-grouping/index.html b/docs/main/next/en/setup/backend/service-auto-grouping/index.html
index 4674f02..fa3e9e9 100644
--- a/docs/main/next/en/setup/backend/service-auto-grouping/index.html
+++ b/docs/main/next/en/setup/backend/service-auto-grouping/index.html
@@ -2411,7 +2411,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/setup/backend/slow-cache-command/index.html b/docs/main/next/en/setup/backend/slow-cache-command/index.html
index 82a51b4..dd3a533 100644
--- a/docs/main/next/en/setup/backend/slow-cache-command/index.html
+++ b/docs/main/next/en/setup/backend/slow-cache-command/index.html
@@ -2420,7 +2420,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/setup/backend/slow-db-statement/index.html b/docs/main/next/en/setup/backend/slow-db-statement/index.html
index 5c3c340..4675057 100644
--- a/docs/main/next/en/setup/backend/slow-db-statement/index.html
+++ b/docs/main/next/en/setup/backend/slow-db-statement/index.html
@@ -2420,7 +2420,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/setup/backend/storages/banyandb/index.html b/docs/main/next/en/setup/backend/storages/banyandb/index.html
index a7aec17..a73c6ff 100644
--- a/docs/main/next/en/setup/backend/storages/banyandb/index.html
+++ b/docs/main/next/en/setup/backend/storages/banyandb/index.html
@@ -2414,7 +2414,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/setup/backend/storages/elasticsearch/index.html b/docs/main/next/en/setup/backend/storages/elasticsearch/index.html
index 3fdc0da..4132702 100644
--- a/docs/main/next/en/setup/backend/storages/elasticsearch/index.html
+++ b/docs/main/next/en/setup/backend/storages/elasticsearch/index.html
@@ -2411,7 +2411,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/setup/backend/storages/h2/index.html b/docs/main/next/en/setup/backend/storages/h2/index.html
index a0c32d5..a4eb4d2 100644
--- a/docs/main/next/en/setup/backend/storages/h2/index.html
+++ b/docs/main/next/en/setup/backend/storages/h2/index.html
@@ -2411,7 +2411,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/setup/backend/storages/mysql/index.html b/docs/main/next/en/setup/backend/storages/mysql/index.html
index f345312..a918a7d 100644
--- a/docs/main/next/en/setup/backend/storages/mysql/index.html
+++ b/docs/main/next/en/setup/backend/storages/mysql/index.html
@@ -2414,7 +2414,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/setup/backend/storages/postgresql/index.html b/docs/main/next/en/setup/backend/storages/postgresql/index.html
index 6b90b68..3d8fd0c 100644
--- a/docs/main/next/en/setup/backend/storages/postgresql/index.html
+++ b/docs/main/next/en/setup/backend/storages/postgresql/index.html
@@ -2411,7 +2411,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/setup/backend/telegraf-receiver/index.html b/docs/main/next/en/setup/backend/telegraf-receiver/index.html
index 9ced66a..deaba55 100644
--- a/docs/main/next/en/setup/backend/telegraf-receiver/index.html
+++ b/docs/main/next/en/setup/backend/telegraf-receiver/index.html
@@ -2414,7 +2414,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/setup/backend/trace-sampling/index.html b/docs/main/next/en/setup/backend/trace-sampling/index.html
index 97de558..437055a 100644
--- a/docs/main/next/en/setup/backend/trace-sampling/index.html
+++ b/docs/main/next/en/setup/backend/trace-sampling/index.html
@@ -2411,7 +2411,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/setup/backend/ttl/index.html b/docs/main/next/en/setup/backend/ttl/index.html
index f7d85f2..dcfa878 100644
--- a/docs/main/next/en/setup/backend/ttl/index.html
+++ b/docs/main/next/en/setup/backend/ttl/index.html
@@ -2414,7 +2414,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/setup/backend/ui-grafana/index.html b/docs/main/next/en/setup/backend/ui-grafana/index.html
index 77787d2..a54d4fb 100644
--- a/docs/main/next/en/setup/backend/ui-grafana/index.html
+++ b/docs/main/next/en/setup/backend/ui-grafana/index.html
@@ -2414,7 +2414,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
@@ -2482,7 +2482,7 @@
 <h3 id="dashboards-settings">Dashboards Settings</h3>
 <p>The following steps are the example of config a <code>General Service</code> dashboard:</p>
 <ol>
-<li>Create a dashboard named <code>General Service</code>. A <a href="https://github.com/apache/skywalking/tree/a63601318d802ef7f2f520758725824d317385cf/oap-server/server-core/src/main/java/org/apache/skywalking/oap/server/core/analysis/Layer.java">layer</a> is recommended as a dashboard.</li>
+<li>Create a dashboard named <code>General Service</code>. A <a href="https://github.com/apache/skywalking/tree/b3044ee71464c117ffcbf80a5b669da0ad64c260/oap-server/server-core/src/main/java/org/apache/skywalking/oap/server/core/analysis/Layer.java">layer</a> is recommended as a dashboard.</li>
 <li>Configure variables for the dashboard:
 <img src="https://skywalking.apache.org/screenshots/9.6.0/promql/grafana-variables.jpg"/>
 After configure, you can select the service/instance/endpoint on the top of the dashboard:
diff --git a/docs/main/next/en/setup/backend/ui-setup/index.html b/docs/main/next/en/setup/backend/ui-setup/index.html
index 47de402..d989f3b 100644
--- a/docs/main/next/en/setup/backend/ui-setup/index.html
+++ b/docs/main/next/en/setup/backend/ui-setup/index.html
@@ -2417,7 +2417,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/setup/backend/uninstrumented-gateways/index.html b/docs/main/next/en/setup/backend/uninstrumented-gateways/index.html
index 8f9eae6..c0b621d 100644
--- a/docs/main/next/en/setup/backend/uninstrumented-gateways/index.html
+++ b/docs/main/next/en/setup/backend/uninstrumented-gateways/index.html
@@ -2408,7 +2408,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/setup/backend/zipkin-trace/index.html b/docs/main/next/en/setup/backend/zipkin-trace/index.html
index 5846cc8..32a4c72 100644
--- a/docs/main/next/en/setup/backend/zipkin-trace/index.html
+++ b/docs/main/next/en/setup/backend/zipkin-trace/index.html
@@ -2417,7 +2417,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/setup/envoy/als_setting/index.html b/docs/main/next/en/setup/envoy/als_setting/index.html
index a155643..03eb71b 100644
--- a/docs/main/next/en/setup/envoy/als_setting/index.html
+++ b/docs/main/next/en/setup/envoy/als_setting/index.html
@@ -2414,7 +2414,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
@@ -2537,7 +2537,7 @@
 <p><code>persistence</code> analyzer adapts the Envoy access log format to
 SkyWalking&rsquo;s <a href="https://github.com/apache/skywalking-data-collect-protocol/blob/master/logging/Logging.proto">native log format</a>, and forwards the formatted logs to <a href="../../../concepts-and-designs/lal">LAL</a>, where you can configure persistent
 conditions, such as <code>sampler</code>, only persist error logs, etc. SkyWalking provides a default configuration
-file <a href="https://github.com/apache/skywalking/tree/a63601318d802ef7f2f520758725824d317385cf/oap-server/server-starter/src/main/resources/lal/envoy-als.yaml"><code>envoy-als.yaml</code></a> that you can
+file <a href="https://github.com/apache/skywalking/tree/b3044ee71464c117ffcbf80a5b669da0ad64c260/oap-server/server-starter/src/main/resources/lal/envoy-als.yaml"><code>envoy-als.yaml</code></a> that you can
 adjust as per your needs. Please make sure to activate this rule via adding the rule name <code>envoy-als</code>
 into config item <code>log-analyzer/default/lalFiles</code> (or environment variable <code>SW_LOG_LAL_FILES</code>,
 e.g. <code>SW_LOG_LAL_FILES=envoy-als</code>).</p>
diff --git a/docs/main/next/en/setup/envoy/examples/metrics/readme/index.html b/docs/main/next/en/setup/envoy/examples/metrics/readme/index.html
index 6d537a2..11aabff 100644
--- a/docs/main/next/en/setup/envoy/examples/metrics/readme/index.html
+++ b/docs/main/next/en/setup/envoy/examples/metrics/readme/index.html
@@ -2414,7 +2414,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/setup/envoy/metrics_service_setting/index.html b/docs/main/next/en/setup/envoy/metrics_service_setting/index.html
index 27cad99..4337a5c 100644
--- a/docs/main/next/en/setup/envoy/metrics_service_setting/index.html
+++ b/docs/main/next/en/setup/envoy/metrics_service_setting/index.html
@@ -2411,7 +2411,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/setup/istio/readme/index.html b/docs/main/next/en/setup/istio/readme/index.html
index f162bfb..3dc3f4e 100644
--- a/docs/main/next/en/setup/istio/readme/index.html
+++ b/docs/main/next/en/setup/istio/readme/index.html
@@ -2417,7 +2417,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/setup/service-agent/agent-compatibility/index.html b/docs/main/next/en/setup/service-agent/agent-compatibility/index.html
index c3319ea..d475e11 100644
--- a/docs/main/next/en/setup/service-agent/agent-compatibility/index.html
+++ b/docs/main/next/en/setup/service-agent/agent-compatibility/index.html
@@ -2411,7 +2411,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/setup/service-agent/browser-agent/index.html b/docs/main/next/en/setup/service-agent/browser-agent/index.html
index 77b4dd7..645f0cd 100644
--- a/docs/main/next/en/setup/service-agent/browser-agent/index.html
+++ b/docs/main/next/en/setup/service-agent/browser-agent/index.html
@@ -2417,7 +2417,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/setup/service-agent/server-agents/index.html b/docs/main/next/en/setup/service-agent/server-agents/index.html
index 8e59e33..4dd569c 100644
--- a/docs/main/next/en/setup/service-agent/server-agents/index.html
+++ b/docs/main/next/en/setup/service-agent/server-agents/index.html
@@ -2414,7 +2414,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/setup/service-agent/virtual-cache/index.html b/docs/main/next/en/setup/service-agent/virtual-cache/index.html
index d6fd9fe..034040e 100644
--- a/docs/main/next/en/setup/service-agent/virtual-cache/index.html
+++ b/docs/main/next/en/setup/service-agent/virtual-cache/index.html
@@ -2417,7 +2417,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/setup/service-agent/virtual-database/index.html b/docs/main/next/en/setup/service-agent/virtual-database/index.html
index ce6808c..0c0a02b 100644
--- a/docs/main/next/en/setup/service-agent/virtual-database/index.html
+++ b/docs/main/next/en/setup/service-agent/virtual-database/index.html
@@ -2411,7 +2411,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/setup/service-agent/virtual-mq/index.html b/docs/main/next/en/setup/service-agent/virtual-mq/index.html
index ada4a3d..d3af051 100644
--- a/docs/main/next/en/setup/service-agent/virtual-mq/index.html
+++ b/docs/main/next/en/setup/service-agent/virtual-mq/index.html
@@ -2417,7 +2417,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/setup/zipkin/tracing/index.html b/docs/main/next/en/setup/zipkin/tracing/index.html
index 447e52f..4ec27de 100644
--- a/docs/main/next/en/setup/zipkin/tracing/index.html
+++ b/docs/main/next/en/setup/zipkin/tracing/index.html
@@ -2408,7 +2408,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/swip/readme/index.html b/docs/main/next/en/swip/readme/index.html
index 5a07a05..123d645 100644
--- a/docs/main/next/en/swip/readme/index.html
+++ b/docs/main/next/en/swip/readme/index.html
@@ -2411,7 +2411,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/swip/swip-1/index.html b/docs/main/next/en/swip/swip-1/index.html
index da8a528..98ab3d0 100644
--- a/docs/main/next/en/swip/swip-1/index.html
+++ b/docs/main/next/en/swip/swip-1/index.html
@@ -2408,7 +2408,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/swip/swip-2/index.html b/docs/main/next/en/swip/swip-2/index.html
index 1475fd5..17299eb 100644
--- a/docs/main/next/en/swip/swip-2/index.html
+++ b/docs/main/next/en/swip/swip-2/index.html
@@ -2414,7 +2414,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/swip/swip-3/index.html b/docs/main/next/en/swip/swip-3/index.html
index fe838eb..55522c5 100644
--- a/docs/main/next/en/swip/swip-3/index.html
+++ b/docs/main/next/en/swip/swip-3/index.html
@@ -2414,7 +2414,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/swip/swip-4/index.html b/docs/main/next/en/swip/swip-4/index.html
index 09af444..d96b881 100644
--- a/docs/main/next/en/swip/swip-4/index.html
+++ b/docs/main/next/en/swip/swip-4/index.html
@@ -2411,7 +2411,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/swip/swip-5/index.html b/docs/main/next/en/swip/swip-5/index.html
index 9f999f2..fb8bf29 100644
--- a/docs/main/next/en/swip/swip-5/index.html
+++ b/docs/main/next/en/swip/swip-5/index.html
@@ -2411,7 +2411,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/swip/swip-8/index.html b/docs/main/next/en/swip/swip-8/index.html
index cd61611..153ae91 100644
--- a/docs/main/next/en/swip/swip-8/index.html
+++ b/docs/main/next/en/swip/swip-8/index.html
@@ -2414,7 +2414,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/en/ui/readme/index.html b/docs/main/next/en/ui/readme/index.html
index 9a7ccba..6a9f57b 100644
--- a/docs/main/next/en/ui/readme/index.html
+++ b/docs/main/next/en/ui/readme/index.html
@@ -2417,7 +2417,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/main/next/readme/index.html b/docs/main/next/readme/index.html
index 14abf90..2a291c0 100644
--- a/docs/main/next/readme/index.html
+++ b/docs/main/next/readme/index.html
@@ -2414,7 +2414,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: a636013</div>
+                  <div class="commit-id">Commit Id: b3044ee</div>
                 
 
 
diff --git a/docs/skywalking-java/next/en/contribution/compiling/index.html b/docs/skywalking-java/next/en/contribution/compiling/index.html
index a0eb0a9..dfa7f07 100644
--- a/docs/skywalking-java/next/en/contribution/compiling/index.html
+++ b/docs/skywalking-java/next/en/contribution/compiling/index.html
@@ -734,7 +734,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: 75deff1</div>
+                  <div class="commit-id">Commit Id: 1e5463c</div>
                 
 
 
diff --git a/docs/skywalking-java/next/en/contribution/release-java-agent/index.html b/docs/skywalking-java/next/en/contribution/release-java-agent/index.html
index 559f7d2..9618ce0 100644
--- a/docs/skywalking-java/next/en/contribution/release-java-agent/index.html
+++ b/docs/skywalking-java/next/en/contribution/release-java-agent/index.html
@@ -734,7 +734,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: 75deff1</div>
+                  <div class="commit-id">Commit Id: 1e5463c</div>
                 
 
 
diff --git a/docs/skywalking-java/next/en/faq/ext-dirs/index.html b/docs/skywalking-java/next/en/faq/ext-dirs/index.html
index a0516b3..82681c3 100644
--- a/docs/skywalking-java/next/en/faq/ext-dirs/index.html
+++ b/docs/skywalking-java/next/en/faq/ext-dirs/index.html
@@ -728,7 +728,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: 75deff1</div>
+                  <div class="commit-id">Commit Id: 1e5463c</div>
                 
 
 
diff --git a/docs/skywalking-java/next/en/faq/osgi/index.html b/docs/skywalking-java/next/en/faq/osgi/index.html
index 1c3c8e8..0845c1a 100644
--- a/docs/skywalking-java/next/en/faq/osgi/index.html
+++ b/docs/skywalking-java/next/en/faq/osgi/index.html
@@ -728,7 +728,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: 75deff1</div>
+                  <div class="commit-id">Commit Id: 1e5463c</div>
                 
 
 
diff --git a/docs/skywalking-java/next/en/setup/service-agent/java-agent/advanced-features/index.html b/docs/skywalking-java/next/en/setup/service-agent/java-agent/advanced-features/index.html
index c4bea48..0880466 100644
--- a/docs/skywalking-java/next/en/setup/service-agent/java-agent/advanced-features/index.html
+++ b/docs/skywalking-java/next/en/setup/service-agent/java-agent/advanced-features/index.html
@@ -725,7 +725,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: 75deff1</div>
+                  <div class="commit-id">Commit Id: 1e5463c</div>
                 
 
 
diff --git a/docs/skywalking-java/next/en/setup/service-agent/java-agent/advanced-reporters/index.html b/docs/skywalking-java/next/en/setup/service-agent/java-agent/advanced-reporters/index.html
index e6edcc4..8de7e6b 100644
--- a/docs/skywalking-java/next/en/setup/service-agent/java-agent/advanced-reporters/index.html
+++ b/docs/skywalking-java/next/en/setup/service-agent/java-agent/advanced-reporters/index.html
@@ -728,7 +728,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: 75deff1</div>
+                  <div class="commit-id">Commit Id: 1e5463c</div>
                 
 
 
diff --git a/docs/skywalking-java/next/en/setup/service-agent/java-agent/agent-optional-plugins/kotlin-coroutine-plugin/index.html b/docs/skywalking-java/next/en/setup/service-agent/java-agent/agent-optional-plugins/kotlin-coroutine-plugin/index.html
index a2b2054..a7da3cc 100644
--- a/docs/skywalking-java/next/en/setup/service-agent/java-agent/agent-optional-plugins/kotlin-coroutine-plugin/index.html
+++ b/docs/skywalking-java/next/en/setup/service-agent/java-agent/agent-optional-plugins/kotlin-coroutine-plugin/index.html
@@ -734,7 +734,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: 75deff1</div>
+                  <div class="commit-id">Commit Id: 1e5463c</div>
                 
 
 
diff --git a/docs/skywalking-java/next/en/setup/service-agent/java-agent/agent-optional-plugins/oracle-resin-plugins/index.html b/docs/skywalking-java/next/en/setup/service-agent/java-agent/agent-optional-plugins/oracle-resin-plugins/index.html
index 50871c3..debc102 100644
--- a/docs/skywalking-java/next/en/setup/service-agent/java-agent/agent-optional-plugins/oracle-resin-plugins/index.html
+++ b/docs/skywalking-java/next/en/setup/service-agent/java-agent/agent-optional-plugins/oracle-resin-plugins/index.html
@@ -728,7 +728,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: 75deff1</div>
+                  <div class="commit-id">Commit Id: 1e5463c</div>
                 
 
 
diff --git a/docs/skywalking-java/next/en/setup/service-agent/java-agent/agent-optional-plugins/spring-annotation-plugin/index.html b/docs/skywalking-java/next/en/setup/service-agent/java-agent/agent-optional-plugins/spring-annotation-plugin/index.html
index 01a474b..725ab7a 100644
--- a/docs/skywalking-java/next/en/setup/service-agent/java-agent/agent-optional-plugins/spring-annotation-plugin/index.html
+++ b/docs/skywalking-java/next/en/setup/service-agent/java-agent/agent-optional-plugins/spring-annotation-plugin/index.html
@@ -728,7 +728,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: 75deff1</div>
+                  <div class="commit-id">Commit Id: 1e5463c</div>
                 
 
 
diff --git a/docs/skywalking-java/next/en/setup/service-agent/java-agent/agent-optional-plugins/trace-ignore-plugin/index.html b/docs/skywalking-java/next/en/setup/service-agent/java-agent/agent-optional-plugins/trace-ignore-plugin/index.html
index 4af6c6f..c174e9a 100644
--- a/docs/skywalking-java/next/en/setup/service-agent/java-agent/agent-optional-plugins/trace-ignore-plugin/index.html
+++ b/docs/skywalking-java/next/en/setup/service-agent/java-agent/agent-optional-plugins/trace-ignore-plugin/index.html
@@ -731,7 +731,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: 75deff1</div>
+                  <div class="commit-id">Commit Id: 1e5463c</div>
                 
 
 
diff --git a/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-dependency/index.html b/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-dependency/index.html
index 8df035c..b4925c6 100644
--- a/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-dependency/index.html
+++ b/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-dependency/index.html
@@ -731,7 +731,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: 75deff1</div>
+                  <div class="commit-id">Commit Id: 1e5463c</div>
                 
 
 
diff --git a/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-kafka/index.html b/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-kafka/index.html
index 4a1bad5..d500005 100644
--- a/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-kafka/index.html
+++ b/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-kafka/index.html
@@ -725,7 +725,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: 75deff1</div>
+                  <div class="commit-id">Commit Id: 1e5463c</div>
                 
 
 
diff --git a/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-log4j-1.x/index.html b/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-log4j-1.x/index.html
index dc6e3fa..dea6bef 100644
--- a/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-log4j-1.x/index.html
+++ b/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-log4j-1.x/index.html
@@ -725,7 +725,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: 75deff1</div>
+                  <div class="commit-id">Commit Id: 1e5463c</div>
                 
 
 
diff --git a/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-log4j-2.x/index.html b/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-log4j-2.x/index.html
index a257c11..7bd96bc 100644
--- a/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-log4j-2.x/index.html
+++ b/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-log4j-2.x/index.html
@@ -725,7 +725,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: 75deff1</div>
+                  <div class="commit-id">Commit Id: 1e5463c</div>
                 
 
 
diff --git a/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-logback-1.x/index.html b/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-logback-1.x/index.html
index 322f1c0..a2a2759 100644
--- a/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-logback-1.x/index.html
+++ b/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-logback-1.x/index.html
@@ -725,7 +725,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: 75deff1</div>
+                  <div class="commit-id">Commit Id: 1e5463c</div>
                 
 
 
diff --git a/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-meter/index.html b/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-meter/index.html
index 20feb9c..829fbe6 100644
--- a/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-meter/index.html
+++ b/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-meter/index.html
@@ -728,7 +728,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: 75deff1</div>
+                  <div class="commit-id">Commit Id: 1e5463c</div>
                 
 
 
diff --git a/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-micrometer-1.10/index.html b/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-micrometer-1.10/index.html
index 8a3b82e..31658f5 100644
--- a/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-micrometer-1.10/index.html
+++ b/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-micrometer-1.10/index.html
@@ -725,7 +725,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: 75deff1</div>
+                  <div class="commit-id">Commit Id: 1e5463c</div>
                 
 
 
diff --git a/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-micrometer/index.html b/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-micrometer/index.html
index a303a2c..226572a 100644
--- a/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-micrometer/index.html
+++ b/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-micrometer/index.html
@@ -725,7 +725,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: 75deff1</div>
+                  <div class="commit-id">Commit Id: 1e5463c</div>
                 
 
 
diff --git a/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-trace-annotation/index.html b/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-trace-annotation/index.html
index 3a203fe..3f99b40 100644
--- a/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-trace-annotation/index.html
+++ b/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-trace-annotation/index.html
@@ -725,7 +725,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: 75deff1</div>
+                  <div class="commit-id">Commit Id: 1e5463c</div>
                 
 
 
diff --git a/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-trace-correlation-context/index.html b/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-trace-correlation-context/index.html
index 3348c2d..58eca93 100644
--- a/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-trace-correlation-context/index.html
+++ b/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-trace-correlation-context/index.html
@@ -731,7 +731,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: 75deff1</div>
+                  <div class="commit-id">Commit Id: 1e5463c</div>
                 
 
 
diff --git a/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-trace-cross-thread/index.html b/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-trace-cross-thread/index.html
index 360fab4..d464eed 100644
--- a/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-trace-cross-thread/index.html
+++ b/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-trace-cross-thread/index.html
@@ -728,7 +728,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: 75deff1</div>
+                  <div class="commit-id">Commit Id: 1e5463c</div>
                 
 
 
diff --git a/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-trace-read-context/index.html b/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-trace-read-context/index.html
index 1010e1d..9b20885 100644
--- a/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-trace-read-context/index.html
+++ b/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-trace-read-context/index.html
@@ -728,7 +728,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: 75deff1</div>
+                  <div class="commit-id">Commit Id: 1e5463c</div>
                 
 
 
diff --git a/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-trace/index.html b/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-trace/index.html
index 0fdd29d..757404a 100644
--- a/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-trace/index.html
+++ b/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-trace/index.html
@@ -728,7 +728,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: 75deff1</div>
+                  <div class="commit-id">Commit Id: 1e5463c</div>
                 
 
 
diff --git a/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-tracer/index.html b/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-tracer/index.html
index 0c99325..2d20cc7 100644
--- a/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-tracer/index.html
+++ b/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-tracer/index.html
@@ -725,7 +725,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: 75deff1</div>
+                  <div class="commit-id">Commit Id: 1e5463c</div>
                 
 
 
diff --git a/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-webflux/index.html b/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-webflux/index.html
index 809b241..06c0577 100644
--- a/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-webflux/index.html
+++ b/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-webflux/index.html
@@ -737,7 +737,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: 75deff1</div>
+                  <div class="commit-id">Commit Id: 1e5463c</div>
                 
 
 
diff --git a/docs/skywalking-java/next/en/setup/service-agent/java-agent/bootstrap-plugins/index.html b/docs/skywalking-java/next/en/setup/service-agent/java-agent/bootstrap-plugins/index.html
index 58c36f0..a4397aa 100644
--- a/docs/skywalking-java/next/en/setup/service-agent/java-agent/bootstrap-plugins/index.html
+++ b/docs/skywalking-java/next/en/setup/service-agent/java-agent/bootstrap-plugins/index.html
@@ -731,7 +731,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: 75deff1</div>
+                  <div class="commit-id">Commit Id: 1e5463c</div>
                 
 
 
diff --git a/docs/skywalking-java/next/en/setup/service-agent/java-agent/configuration-discovery/index.html b/docs/skywalking-java/next/en/setup/service-agent/java-agent/configuration-discovery/index.html
index 4b3eb7f..8bef55d 100644
--- a/docs/skywalking-java/next/en/setup/service-agent/java-agent/configuration-discovery/index.html
+++ b/docs/skywalking-java/next/en/setup/service-agent/java-agent/configuration-discovery/index.html
@@ -731,7 +731,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: 75deff1</div>
+                  <div class="commit-id">Commit Id: 1e5463c</div>
                 
 
 
diff --git a/docs/skywalking-java/next/en/setup/service-agent/java-agent/configurations/index.html b/docs/skywalking-java/next/en/setup/service-agent/java-agent/configurations/index.html
index 78a05a4..8c376b5 100644
--- a/docs/skywalking-java/next/en/setup/service-agent/java-agent/configurations/index.html
+++ b/docs/skywalking-java/next/en/setup/service-agent/java-agent/configurations/index.html
@@ -728,7 +728,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: 75deff1</div>
+                  <div class="commit-id">Commit Id: 1e5463c</div>
                 
 
 
diff --git a/docs/skywalking-java/next/en/setup/service-agent/java-agent/containerization/index.html b/docs/skywalking-java/next/en/setup/service-agent/java-agent/containerization/index.html
index fc8e5fb..85242df 100644
--- a/docs/skywalking-java/next/en/setup/service-agent/java-agent/containerization/index.html
+++ b/docs/skywalking-java/next/en/setup/service-agent/java-agent/containerization/index.html
@@ -731,7 +731,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: 75deff1</div>
+                  <div class="commit-id">Commit Id: 1e5463c</div>
                 
 
 
diff --git a/docs/skywalking-java/next/en/setup/service-agent/java-agent/customize-enhance-trace/index.html b/docs/skywalking-java/next/en/setup/service-agent/java-agent/customize-enhance-trace/index.html
index ca2b574..aeb6b9b 100644
--- a/docs/skywalking-java/next/en/setup/service-agent/java-agent/customize-enhance-trace/index.html
+++ b/docs/skywalking-java/next/en/setup/service-agent/java-agent/customize-enhance-trace/index.html
@@ -737,7 +737,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: 75deff1</div>
+                  <div class="commit-id">Commit Id: 1e5463c</div>
                 
 
 
diff --git a/docs/skywalking-java/next/en/setup/service-agent/java-agent/how-to-disable-plugin/index.html b/docs/skywalking-java/next/en/setup/service-agent/java-agent/how-to-disable-plugin/index.html
index 7b8fae7..9174ecf 100644
--- a/docs/skywalking-java/next/en/setup/service-agent/java-agent/how-to-disable-plugin/index.html
+++ b/docs/skywalking-java/next/en/setup/service-agent/java-agent/how-to-disable-plugin/index.html
@@ -728,7 +728,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: 75deff1</div>
+                  <div class="commit-id">Commit Id: 1e5463c</div>
                 
 
 
diff --git a/docs/skywalking-java/next/en/setup/service-agent/java-agent/how-to-tolerate-exceptions/index.html b/docs/skywalking-java/next/en/setup/service-agent/java-agent/how-to-tolerate-exceptions/index.html
index a4c39fa..3126a58 100644
--- a/docs/skywalking-java/next/en/setup/service-agent/java-agent/how-to-tolerate-exceptions/index.html
+++ b/docs/skywalking-java/next/en/setup/service-agent/java-agent/how-to-tolerate-exceptions/index.html
@@ -728,7 +728,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: 75deff1</div>
+                  <div class="commit-id">Commit Id: 1e5463c</div>
                 
 
 
diff --git a/docs/skywalking-java/next/en/setup/service-agent/java-agent/java-plugin-development-guide/index.html b/docs/skywalking-java/next/en/setup/service-agent/java-agent/java-plugin-development-guide/index.html
index f71034b..05e2fc0 100644
--- a/docs/skywalking-java/next/en/setup/service-agent/java-agent/java-plugin-development-guide/index.html
+++ b/docs/skywalking-java/next/en/setup/service-agent/java-agent/java-plugin-development-guide/index.html
@@ -731,7 +731,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: 75deff1</div>
+                  <div class="commit-id">Commit Id: 1e5463c</div>
                 
 
 
@@ -1107,7 +1107,7 @@
   witnessMethodList<span style="color:#000;font-weight:bold">.</span><span style="color:#008080">add</span><span style="color:#000;font-weight:bold">(</span>witnessMethod<span style="color:#000;font-weight:bold">);</span>
   <span style="color:#000;font-weight:bold">return</span> witnessMethodList<span style="color:#000;font-weight:bold">;</span>
 <span style="color:#000;font-weight:bold">}</span>
-</code></pre></div><p>For more examples, see <a href="https://github.com/apache/skywalking-java/tree/75deff1923715ab3ff6d62867275a23aee62db0b/apm-sniffer/apm-agent-core/src/test/java/org/apache/skywalking/apm/agent/core/plugin/witness/WitnessTest.java">WitnessTest.java</a></p>
+</code></pre></div><p>For more examples, see <a href="https://github.com/apache/skywalking-java/tree/1e5463c3cc3c99bbe4b89b0f5ca847c8641d2347/apm-sniffer/apm-agent-core/src/test/java/org/apache/skywalking/apm/agent/core/plugin/witness/WitnessTest.java">WitnessTest.java</a></p>
 </li>
 </ol>
 <h3 id="implement-an-interceptor">Implement an interceptor</h3>
diff --git a/docs/skywalking-java/next/en/setup/service-agent/java-agent/logic-endpoint/index.html b/docs/skywalking-java/next/en/setup/service-agent/java-agent/logic-endpoint/index.html
index 5b753b6..8a09397 100644
--- a/docs/skywalking-java/next/en/setup/service-agent/java-agent/logic-endpoint/index.html
+++ b/docs/skywalking-java/next/en/setup/service-agent/java-agent/logic-endpoint/index.html
@@ -728,7 +728,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: 75deff1</div>
+                  <div class="commit-id">Commit Id: 1e5463c</div>
                 
 
 
diff --git a/docs/skywalking-java/next/en/setup/service-agent/java-agent/opentracing/index.html b/docs/skywalking-java/next/en/setup/service-agent/java-agent/opentracing/index.html
index 53f583d..7e87572 100644
--- a/docs/skywalking-java/next/en/setup/service-agent/java-agent/opentracing/index.html
+++ b/docs/skywalking-java/next/en/setup/service-agent/java-agent/opentracing/index.html
@@ -725,7 +725,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: 75deff1</div>
+                  <div class="commit-id">Commit Id: 1e5463c</div>
                 
 
 
diff --git a/docs/skywalking-java/next/en/setup/service-agent/java-agent/optional-plugins/index.html b/docs/skywalking-java/next/en/setup/service-agent/java-agent/optional-plugins/index.html
index 434b7f2..3b29545 100644
--- a/docs/skywalking-java/next/en/setup/service-agent/java-agent/optional-plugins/index.html
+++ b/docs/skywalking-java/next/en/setup/service-agent/java-agent/optional-plugins/index.html
@@ -731,7 +731,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: 75deff1</div>
+                  <div class="commit-id">Commit Id: 1e5463c</div>
                 
 
 
diff --git a/docs/skywalking-java/next/en/setup/service-agent/java-agent/plugin-list/index.html b/docs/skywalking-java/next/en/setup/service-agent/java-agent/plugin-list/index.html
index c67436c..4cfd8d1 100644
--- a/docs/skywalking-java/next/en/setup/service-agent/java-agent/plugin-list/index.html
+++ b/docs/skywalking-java/next/en/setup/service-agent/java-agent/plugin-list/index.html
@@ -725,7 +725,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: 75deff1</div>
+                  <div class="commit-id">Commit Id: 1e5463c</div>
                 
 
 
diff --git a/docs/skywalking-java/next/en/setup/service-agent/java-agent/plugin-test/index.html b/docs/skywalking-java/next/en/setup/service-agent/java-agent/plugin-test/index.html
index aac81e2..8304e2f 100644
--- a/docs/skywalking-java/next/en/setup/service-agent/java-agent/plugin-test/index.html
+++ b/docs/skywalking-java/next/en/setup/service-agent/java-agent/plugin-test/index.html
@@ -725,7 +725,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: 75deff1</div>
+                  <div class="commit-id">Commit Id: 1e5463c</div>
                 
 
 
@@ -777,22 +777,22 @@
 <h2 id="case-base-image-introduction">Case Base Image Introduction</h2>
 <p>The test framework provides <code>JVM-container</code> and <code>Tomcat-container</code> base images including JDK8 and JDK17. You can choose the best one for your test case. If both are suitable for your case, <strong><code>JVM-container</code> is preferred</strong>.</p>
 <h3 id="jvm-container-image-introduction">JVM-container Image Introduction</h3>
-<p><a href="https://github.com/apache/skywalking-java/tree/75deff1923715ab3ff6d62867275a23aee62db0b/test/plugin/containers/jvm-container">JVM-container</a> uses <code>eclipse-temurin:8-jdk</code> as the base image. <code>JVM-container</code> supports JDK8 and JDK17 as well in CI, which inherits <code>eclipse-temurin:8-jdk</code> and <code>eclipse-temurin:17-jdk</code>.
+<p><a href="https://github.com/apache/skywalking-java/tree/1e5463c3cc3c99bbe4b89b0f5ca847c8641d2347/test/plugin/containers/jvm-container">JVM-container</a> uses <code>eclipse-temurin:8-jdk</code> as the base image. <code>JVM-container</code> supports JDK8 and JDK17 as well in CI, which inherits <code>eclipse-temurin:8-jdk</code> and <code>eclipse-temurin:17-jdk</code>.
 It is supported to custom the base Java docker image by specify <code>base_image_java</code>.
 The test case project must be packaged as <code>project-name.zip</code>, including <code>startup.sh</code> and uber jar, by using <code>mvn clean package</code>.</p>
 <p>Take the following test projects as examples:</p>
 <ul>
-<li><a href="https://github.com/apache/skywalking-java/tree/75deff1923715ab3ff6d62867275a23aee62db0b/test/plugin/scenarios/sofarpc-scenario">sofarpc-scenario</a> is a single project case.</li>
-<li><a href="https://github.com/apache/skywalking-java/tree/75deff1923715ab3ff6d62867275a23aee62db0b/test/plugin/scenarios/webflux-scenario">webflux-scenario</a> is a case including multiple projects.</li>
-<li><a href="https://github.com/apache/skywalking-java/tree/75deff1923715ab3ff6d62867275a23aee62db0b/test/plugin/scenarios/jdk17-with-gson-scenario">jdk17-with-gson-scenario</a> is a single project case with JDK17.</li>
+<li><a href="https://github.com/apache/skywalking-java/tree/1e5463c3cc3c99bbe4b89b0f5ca847c8641d2347/test/plugin/scenarios/sofarpc-scenario">sofarpc-scenario</a> is a single project case.</li>
+<li><a href="https://github.com/apache/skywalking-java/tree/1e5463c3cc3c99bbe4b89b0f5ca847c8641d2347/test/plugin/scenarios/webflux-scenario">webflux-scenario</a> is a case including multiple projects.</li>
+<li><a href="https://github.com/apache/skywalking-java/tree/1e5463c3cc3c99bbe4b89b0f5ca847c8641d2347/test/plugin/scenarios/jdk17-with-gson-scenario">jdk17-with-gson-scenario</a> is a single project case with JDK17.</li>
 </ul>
 <h3 id="tomcat-container-image-introduction">Tomcat-container Image Introduction</h3>
-<p><a href="https://github.com/apache/skywalking-java/tree/75deff1923715ab3ff6d62867275a23aee62db0b/test/plugin/containers/tomcat-container">Tomcat-container</a> uses <code>tomcat:8.5-jdk8-openjdk</code>, <code>tomcat:8.5-jdk17-openjdk</code> as the base image.
+<p><a href="https://github.com/apache/skywalking-java/tree/1e5463c3cc3c99bbe4b89b0f5ca847c8641d2347/test/plugin/containers/tomcat-container">Tomcat-container</a> uses <code>tomcat:8.5-jdk8-openjdk</code>, <code>tomcat:8.5-jdk17-openjdk</code> as the base image.
 It is supported to custom the base Tomcat docker image by specify <code>base_image_tomcat</code>.
 The test case project must be packaged as <code>project-name.war</code> by using <code>mvn package</code>.</p>
 <p>Take the following test project as an example</p>
 <ul>
-<li><a href="https://github.com/apache/skywalking-java/tree/75deff1923715ab3ff6d62867275a23aee62db0b/test/plugin/scenarios/spring-4.3.x-scenario">spring-4.3.x-scenario</a></li>
+<li><a href="https://github.com/apache/skywalking-java/tree/1e5463c3cc3c99bbe4b89b0f5ca847c8641d2347/test/plugin/scenarios/spring-4.3.x-scenario">spring-4.3.x-scenario</a></li>
 </ul>
 <h2 id="test-project-hierarchical-structure">Test project hierarchical structure</h2>
 <p>The test case is an independent maven project, and it must be packaged as a war tar ball or zip file, depending on the chosen base image. Also, two external accessible endpoints usually two URLs) are required.</p>
@@ -984,10 +984,10 @@
 </blockquote>
 <p><strong>Take the following test cases as examples:</strong></p>
 <ul>
-<li><a href="https://github.com/apache/skywalking-java/tree/75deff1923715ab3ff6d62867275a23aee62db0b/test/plugin/scenarios/dubbo-2.7.x-scenario/configuration.yml">dubbo-2.7.x with JVM-container</a></li>
-<li><a href="https://github.com/apache/skywalking-java/tree/75deff1923715ab3ff6d62867275a23aee62db0b/test/plugin/scenarios/jetty-scenario/configuration.yml">jetty with JVM-container</a></li>
-<li><a href="https://github.com/apache/skywalking-java/tree/75deff1923715ab3ff6d62867275a23aee62db0b/test/plugin/scenarios/gateway-2.1.x-scenario/configuration.yml">gateway with runningMode</a></li>
-<li><a href="https://github.com/apache/skywalking-java/tree/75deff1923715ab3ff6d62867275a23aee62db0b/test/plugin/scenarios/canal-scenario/configuration.yml">canal with docker-compose</a></li>
+<li><a href="https://github.com/apache/skywalking-java/tree/1e5463c3cc3c99bbe4b89b0f5ca847c8641d2347/test/plugin/scenarios/dubbo-2.7.x-scenario/configuration.yml">dubbo-2.7.x with JVM-container</a></li>
+<li><a href="https://github.com/apache/skywalking-java/tree/1e5463c3cc3c99bbe4b89b0f5ca847c8641d2347/test/plugin/scenarios/jetty-scenario/configuration.yml">jetty with JVM-container</a></li>
+<li><a href="https://github.com/apache/skywalking-java/tree/1e5463c3cc3c99bbe4b89b0f5ca847c8641d2347/test/plugin/scenarios/gateway-2.1.x-scenario/configuration.yml">gateway with runningMode</a></li>
+<li><a href="https://github.com/apache/skywalking-java/tree/1e5463c3cc3c99bbe4b89b0f5ca847c8641d2347/test/plugin/scenarios/canal-scenario/configuration.yml">canal with docker-compose</a></li>
 </ul>
 <h3 id="expecteddatayaml">expectedData.yaml</h3>
 <p><strong>Operator for number</strong></p>
@@ -1436,8 +1436,8 @@
 </blockquote>
 <p><strong>Take the following test cases as examples</strong></p>
 <ul>
-<li><a href="https://github.com/apache/skywalking-java/tree/75deff1923715ab3ff6d62867275a23aee62db0b/test/plugin/scenarios/undertow-scenario/bin/startup.sh">undertow</a></li>
-<li><a href="https://github.com/apache/skywalking-java/tree/75deff1923715ab3ff6d62867275a23aee62db0b/test/plugin/scenarios/webflux-scenario/webflux-dist/bin/startup.sh">webflux</a></li>
+<li><a href="https://github.com/apache/skywalking-java/tree/1e5463c3cc3c99bbe4b89b0f5ca847c8641d2347/test/plugin/scenarios/undertow-scenario/bin/startup.sh">undertow</a></li>
+<li><a href="https://github.com/apache/skywalking-java/tree/1e5463c3cc3c99bbe4b89b0f5ca847c8641d2347/test/plugin/scenarios/webflux-scenario/webflux-dist/bin/startup.sh">webflux</a></li>
 </ul>
 <h2 id="best-practices">Best Practices</h2>
 <h3 id="how-to-use-the-archetype-to-create-a-test-case-project">How To Use The Archetype To Create A Test Case Project</h3>
diff --git a/docs/skywalking-java/next/en/setup/service-agent/java-agent/readme/index.html b/docs/skywalking-java/next/en/setup/service-agent/java-agent/readme/index.html
index 17f1dc7..f135ca7 100644
--- a/docs/skywalking-java/next/en/setup/service-agent/java-agent/readme/index.html
+++ b/docs/skywalking-java/next/en/setup/service-agent/java-agent/readme/index.html
@@ -725,7 +725,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: 75deff1</div>
+                  <div class="commit-id">Commit Id: 1e5463c</div>
                 
 
 
diff --git a/docs/skywalking-java/next/en/setup/service-agent/java-agent/setting-override/index.html b/docs/skywalking-java/next/en/setup/service-agent/java-agent/setting-override/index.html
index 99d3e29..301a07d 100644
--- a/docs/skywalking-java/next/en/setup/service-agent/java-agent/setting-override/index.html
+++ b/docs/skywalking-java/next/en/setup/service-agent/java-agent/setting-override/index.html
@@ -740,7 +740,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: 75deff1</div>
+                  <div class="commit-id">Commit Id: 1e5463c</div>
                 
 
 
diff --git a/docs/skywalking-java/next/en/setup/service-agent/java-agent/specified-agent-config/index.html b/docs/skywalking-java/next/en/setup/service-agent/java-agent/specified-agent-config/index.html
index acd3de2..f92f51d 100644
--- a/docs/skywalking-java/next/en/setup/service-agent/java-agent/specified-agent-config/index.html
+++ b/docs/skywalking-java/next/en/setup/service-agent/java-agent/specified-agent-config/index.html
@@ -728,7 +728,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: 75deff1</div>
+                  <div class="commit-id">Commit Id: 1e5463c</div>
                 
 
 
diff --git a/docs/skywalking-java/next/en/setup/service-agent/java-agent/supported-list/index.html b/docs/skywalking-java/next/en/setup/service-agent/java-agent/supported-list/index.html
index bd2f304..dbfc7ca 100644
--- a/docs/skywalking-java/next/en/setup/service-agent/java-agent/supported-list/index.html
+++ b/docs/skywalking-java/next/en/setup/service-agent/java-agent/supported-list/index.html
@@ -728,7 +728,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: 75deff1</div>
+                  <div class="commit-id">Commit Id: 1e5463c</div>
                 
 
 
diff --git a/docs/skywalking-java/next/en/setup/service-agent/java-agent/tls/index.html b/docs/skywalking-java/next/en/setup/service-agent/java-agent/tls/index.html
index 3dc2d04..60d42d1 100644
--- a/docs/skywalking-java/next/en/setup/service-agent/java-agent/tls/index.html
+++ b/docs/skywalking-java/next/en/setup/service-agent/java-agent/tls/index.html
@@ -734,7 +734,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: 75deff1</div>
+                  <div class="commit-id">Commit Id: 1e5463c</div>
                 
 
 
@@ -781,7 +781,7 @@
 <h2 id="creating-ssltls-certificates">Creating SSL/TLS Certificates</h2>
 <p>The first step is to generate certificates and key files for encrypting communication. This is
 fairly straightforward: use <code>openssl</code> from the command line.</p>
-<p>Use this <a href="https://github.com/apache/skywalking-java/tree/75deff1923715ab3ff6d62867275a23aee62db0b/tools/TLS/tls_key_generate.sh">script</a> if you are not familiar with how to generate key files.</p>
+<p>Use this <a href="https://github.com/apache/skywalking-java/tree/1e5463c3cc3c99bbe4b89b0f5ca847c8641d2347/tools/TLS/tls_key_generate.sh">script</a> if you are not familiar with how to generate key files.</p>
 <p>We need the following files:</p>
 <ul>
 <li><code>client.pem</code>: A private RSA key to sign and authenticate the public key. It&rsquo;s either a PKCS#8(PEM) or PKCS#1(DER).</li>
diff --git a/docs/skywalking-java/next/en/setup/service-agent/java-agent/token-auth/index.html b/docs/skywalking-java/next/en/setup/service-agent/java-agent/token-auth/index.html
index 19ba9d5..3677edf 100644
--- a/docs/skywalking-java/next/en/setup/service-agent/java-agent/token-auth/index.html
+++ b/docs/skywalking-java/next/en/setup/service-agent/java-agent/token-auth/index.html
@@ -737,7 +737,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: 75deff1</div>
+                  <div class="commit-id">Commit Id: 1e5463c</div>
                 
 
 
diff --git a/docs/skywalking-java/next/readme/index.html b/docs/skywalking-java/next/readme/index.html
index d88069b..fc59b3b 100644
--- a/docs/skywalking-java/next/readme/index.html
+++ b/docs/skywalking-java/next/readme/index.html
@@ -731,7 +731,7 @@
   })()
 </script>
 
-                  <div class="commit-id">Commit Id: 75deff1</div>
+                  <div class="commit-id">Commit Id: 1e5463c</div>
                 
 
 
diff --git a/index.json b/index.json
index fa3d45d..d796c15 100644
--- a/index.json
+++ b/index.json
@@ -1 +1 @@
-[{"body":"Apache SkyWalking从2015年开源到2024年,已经走过了9个年头,项目的规模和功能也得到了极大的丰富。 2024年4月至6月,SkyWalking社区联合纵目,举办线上的联合直播,分多个主题介绍SkyWalking的核心特性,也提供更多的答疑时间。\n2024年4月25日,SkyWalking创始人带来了第一次分享和Q\u0026amp;A\n 熟悉SkyWalking项目结构 介绍项目工程划分,边界,定位 SkyWalking文档使用,以及如何使用AI助手 Q\u0026amp;A  B站视频地址\n想参与直播的小伙伴,可以关注后续的直播安排和我们的B站直播预约\n","excerpt":"Apache SkyWalking从2015年开源到2024年,已经走过了9个年头,项目的规模和功能也得到了极大的丰富。 2024年4月至6月,SkyWalking社区联合纵目,举办线上的联合直播,分 …","ref":"/zh/2024-04-26-skywalking-in-practice-s01e01/","title":"SkyWalking从入门到精通 - 2024系列线上分享活动(第一讲)"},{"body":"","excerpt":"","ref":"/tags/activemq/","title":"ActiveMQ"},{"body":"Introduction Apache ActiveMQ Classic is a popular and powerful open-source messaging and integration pattern server. Founded in 2004, it has evolved into a mature and widely used open-source messaging middleware that complies with the Java Message Service (JMS). Today, with its stability and wide range of feature support, it still has a certain number of users of small and medium-sized enterprises. It‘s high-performance version Apache Artemis is developing rapidly and is also attracting attention from users of ActiveMQ.\nActiveMQ has broad support for JMX (Java Management Extensions), allowing to be monitored through JMX MBean. After enabling JMX, you can use JAVA\u0026rsquo;s built-in jconsole or VisualVM to view the metrics. In addition, some Collector components can also be used to convert JMX-style data into Prometheus-style data, which is suitable for more tools.\nOpenTelemetry as an industry-recognized, standardized solution that provides consistent and interoperable telemetry data collection, transmission, and analysis capabilities for distributed systems, and is also used here for data collection and transmission. Although it can directly accept JMX type data, the JMX indicators for collecting ActiveMQ are not in the standard library, and some versions are incompatible, so this article adopts two steps: convert JMX data into Prometheus-style indicator data, and then use OpenTelemetry to scrape HTTP endpoint data.\nSkyWalking as a one-stop distributed system monitoring solution, it accepts metrics from ActiveMQ and provides a basic monitoring dashboard.\nDeployment Please set up the following services:\n SkyWalking OAP, v10.0+. ActiveMQ v6.0.X+. JMX Exporter v0.20.0. If using docker, refer bitnami/jmx-exporter. OpenTelmetry-Collector v0.92.0.  Preparation The following describes how to deploy ActiveMQ with 2 single-node brokers and SkyWalking OAP with one single node. JMX Exporter runs in agent mode (recommended).\nConfiguration  Enable JMX in ActiveMQ, the JMX remote port defaults to 1616, you can change it through ACTIVEMQ_SUNJMX_START. Set up the exporter:  [Recommended] If run exporter in agent mode, need to append the startup parameter -DACTIVEMQ_OPTS=-javaagent:{activemqPath}/bin/jmx_prometheus_javaagent-0.20.0.jar=2345:{activemqPath}/conf/config.yaml in ActiveMQ env, then exporter server starts at the same time. If run exporter in single server, refer here to deploy the server alone. 2345 is open HTTP port that can be customized. JMX\u0026rsquo;s metrics can be queried through http://localhost:2345/metrics.    example of docker-compose.yml with agent exporter for ActiveMQ:\nversion:\u0026#39;3.8\u0026#39;services:amq1:image:apache/activemq-classic:latestcontainer_name:amq1hostname:amq1volumes:- ~/activemq1/conf/activemq.xml:/opt/apache-activemq/conf/activemq.xml- ~/activemq1/bin/jmx_prometheus_javaagent-0.20.0.jar:/opt/apache-activemq/bin/jmx_prometheus_javaagent-0.20.0.jar- ~/activemq1/conf/config.yaml:/opt/apache-activemq/conf/config.yamlports:- \u0026#34;61616:61616\u0026#34;- \u0026#34;8161:8161\u0026#34;- \u0026#34;2345:2345\u0026#34;environment:ACTIVEMQ_OPTS:\u0026#34;-javaagent:/opt/apache-activemq/bin/jmx_prometheus_javaagent-0.20.0.jar=2345:/opt/apache-activemq/conf/config.yaml\u0026#34;ACTIVEMQ_BROKER_NAME:broker-1networks:- amqtest amq2:image:apache/activemq-classic:latestcontainer_name:amq2hostname:amq2volumes:- ~/activemq2/conf/activemq.xml:/opt/apache-activemq/conf/activemq.xml- ~/activemq2/bin/jmx_prometheus_javaagent-0.20.0.jar:/opt/apache-activemq/bin/jmx_prometheus_javaagent-0.20.0.jar- ~/activemq2/conf/config.yaml:/opt/apache-activemq/conf/config.yaml ports:- \u0026#34;61617:61616\u0026#34;- \u0026#34;8162:8161\u0026#34;- \u0026#34;2346:2346\u0026#34;environment:ACTIVEMQ_OPTS:\u0026#34;-javaagent:/opt/apache-activemq/bin/jmx_prometheus_javaagent-0.20.0.jar=2346:/opt/apache-activemq/conf/config.yaml\u0026#34;ACTIVEMQ_BROKER_NAME:broker-2 networks:- amqtestotel-collector1:image:otel/opentelemetry-collector:latestcontainer_name:otel-collector1command:[\u0026#34;--config=/etc/otel-collector-config.yaml\u0026#34;]volumes:- ./otel-collector-config1.yaml:/etc/otel-collector-config.yamldepends_on:- amq1networks:- amqtest otel-collector2:image:otel/opentelemetry-collector:latestcontainer_name:otel-collector2command:[\u0026#34;--config=/etc/otel-collector-config.yaml\u0026#34;]volumes:- ./otel-collector-config2.yaml:/etc/otel-collector-config.yamldepends_on:- amq2networks:- amqtest networks:amqtest:example of otel-collector-config.yaml for OpenTelemetry:\nreceivers:prometheus:config:scrape_configs:- job_name:\u0026#39;activemq-monitoring\u0026#39;scrape_interval:30sstatic_configs:- targets:[\u0026#39;amq1:2345\u0026#39;]labels:cluster:activemq-broker1processors:batch:exporters:otlp:endpoint:oap:11800tls:insecure:trueservice:pipelines:metrics:receivers:- prometheusprocessors:- batchexporters:- otlpexample of config.yaml for ActiveMQ Exporter:\n---startDelaySeconds:10username:adminpassword:activemqssl:falselowercaseOutputName:falselowercaseOutputLabelNames:falseincludeObjectNames:[\u0026#34;org.apache.activemq:*\u0026#34;,\u0026#34;java.lang:type=OperatingSystem\u0026#34;,\u0026#34;java.lang:type=GarbageCollector,*\u0026#34;,\u0026#34;java.lang:type=Threading\u0026#34;,\u0026#34;java.lang:type=Runtime\u0026#34;,\u0026#34;java.lang:type=Memory\u0026#34;,\u0026#34;java.lang:name=*\u0026#34;]excludeObjectNames:[\u0026#34;org.apache.activemq:type=ColumnFamily,*\u0026#34;]autoExcludeObjectNameAttributes:trueexcludeObjectNameAttributes:\u0026#34;java.lang:type=OperatingSystem\u0026#34;:- \u0026#34;ObjectName\u0026#34;\u0026#34;java.lang:type=Runtime\u0026#34;:- \u0026#34;ClassPath\u0026#34;- \u0026#34;SystemProperties\u0026#34;rules:- pattern:\u0026#34;.*\u0026#34;Steps  Start ActiveMQ, and the Exporter(agent) and the service start at the same time. Start SkyWalking OAP and SkyWalking UI. Start OpenTelmetry-Collector.  After completed, node metrics will be captured and pushed to SkyWalking.\nMetrics Monitoring metrics involve in Cluster Metrics, Broker Metrics, and Destination Metrics.\n Cluster Metrics: including memory usage, rates of write/read, and average/max duration of write. Broker Metrics: including node state, number of connections, number of producers/consumers, and rate of write/read under the broker. Depending on the cluster mode, one cluster may include one or more brokers. Destination Metrics: including number of producers/consumers, messages in different states, queues, and enqueue duration in a queue/topic.  Cluster Metrics  System Load: range in [0, 100]. Thread Count: the number of threads currently used by the JVM. Heap Memory: capacity of heap memory. GC: memory of ActiveMQ is managed by Java\u0026rsquo;s garbage collection (GC) process. Enqueue/Dequeue/Dispatch/Expired Rate: growth rate of messages in different states. Average/Max Enqueue Time: time taken to join the queue.  Broker Metrics  Uptime: duration of the node. State: 1 = slave node, 0 = master node. Current Connentions: number of connections. Current Producer/Consumer Count: number of current producers/consumers. Increased Producer/Consumer Count: number of increased producers/consumers. Enqueue/Dequeue Count: number of enqueue and dequeue. Enqueue/Dequeue Rate: rate of enqueue and dequeue. Memory Percent Usage: amount of memory space used by undelivered messages. Store Percent Usage: space used by pending persistent messages. Temp Percent Usage: space used by non-persistent messages. Average/Max Message Size: number of messages. Queue Size: number of messages in the queue.  Destination Metrics  Produser/Consumer Count: number of producers/Consumers. Queue Size: unacknowledged messages of the queue. Memory usage: usage of memory. Enqueue/Dequeue/Dispatch/Expired/Inflight Count: number of messages in different states. Average/Max Message Size: number of messages. Average/Max Enqueue Time: time taken to join the queue.  Reference  ActiveMQ Classic clustering JMX Exporter Configuration JMX Exporter-Running the Standalone HTTP Server OpenTelemetry Collector Contrib Jmxreceiver  ","excerpt":"Introduction Apache ActiveMQ Classic is a popular and powerful open-source messaging and integration …","ref":"/blog/2024-04-19-monitoring-activemq-through-skywalking/","title":"Monitoring ActiveMQ through SkyWalking"},{"body":"","excerpt":"","ref":"/tags/","title":"Tags"},{"body":"引言 Apache ActiveMQ Classic 是一个流行且功能强大的开源消息传递和集成模式服务器。始于2004年,逐渐发展成为了一个成熟且广泛使用的开源消息中间件,符合Java消息服务(JMS)规范。 发展至今,凭借其稳定性和广泛的特性支持,仍然拥有一定数量的中小型企业的使用者。其高性能版本 Apache Artemis 目前处于快速发展阶段,也受到了 ActiveMQ 现有使用者的关注。\nActiveMQ 对 JMX(Java Management Extensions) 有广泛的支持,允许通过 JMX MBean 监视和控制代理的行为。 开启JMX之后,就可以使用 JAVA 自带的 jconsole 工具或者 VisualVM 等工具直观查看指标。此外也可以通过一些 Collector 组件,将 JMX 风格的数据转换为 prometheus 风格的数据,适配更多查询与展示工具。\nOpenTelemetry 作为业界公认的标准化解决方案,可为分布式系统提供一致且可互操作的遥测数据收集、传输和分析能力,这里也主要借助它实现数据的采集和传输。 它虽然可以直接接受 JMX 类型的数据,但是关于采集 ActiveMQ 的 JMX 指标并不在标准库,存在部分版本不兼容,因此本文采用两步:将 JMX 数据转换为 Prometheus 风格的指标数据,再使用 OpenTelemetry 传递。\nSkyWalking 作为一站式的分布式系统监控解决方案,接纳来自 ActiveMQ 的指标数据,并提供基础的指标监控面板。\n服务部署 请准备以下服务\n SkyWalking OAP, v10.0+。 ActiveMQ v6.0.X+。 JMX Exporter v0.20.0。如果你使用docker,参考使用 bitnami/jmx-exporter。 OpenTelmetry-Collector v0.92.0。  服务准备 以下通过 SkyWalking OAP 单节点、ActiveMQ 2个单节点服务的部署方式介绍。JMX Exporter 采用推荐的 agent 方式启动。\n配置流程  在 ActiveMQ 中开启JMX,其中 JMX 远程端口默认1616,如需修改可通过 ACTIVEMQ_SUNJMX_START 参数调整。 设置 Exporter:  如果采用推荐的 Agent 方式启动,需要追加启动参数 -DACTIVEMQ_OPTS=-javaagent:{activemqPath}/bin/jmx_prometheus_javaagent-0.20.0.jar=2345:{activemqPath}/conf/config.yaml 如果采用单独服务的方式启动,可以参考这里独立部署 Exporter 服务。 其中 2345 为开放的 HTTP 端口可自定义。最终可通过访问 http://localhost:2345/metrics 查询到 JMX 的指标数据。    采用 Agent Exporter 方式的 docker-compose.yml 配置样例:\nversion:\u0026#39;3.8\u0026#39;services:amq1:image:apache/activemq-classic:latestcontainer_name:amq1hostname:amq1volumes:- ~/activemq1/conf/activemq.xml:/opt/apache-activemq/conf/activemq.xml- ~/activemq1/bin/jmx_prometheus_javaagent-0.20.0.jar:/opt/apache-activemq/bin/jmx_prometheus_javaagent-0.20.0.jar- ~/activemq1/conf/config.yaml:/opt/apache-activemq/conf/config.yamlports:- \u0026#34;61616:61616\u0026#34;- \u0026#34;8161:8161\u0026#34;- \u0026#34;2345:2345\u0026#34;environment:ACTIVEMQ_OPTS:\u0026#34;-javaagent:/opt/apache-activemq/bin/jmx_prometheus_javaagent-0.20.0.jar=2345:/opt/apache-activemq/conf/config.yaml\u0026#34;ACTIVEMQ_BROKER_NAME:broker-1networks:- amqtest amq2:image:apache/activemq-classic:latestcontainer_name:amq2hostname:amq2volumes:- ~/activemq2/conf/activemq.xml:/opt/apache-activemq/conf/activemq.xml- ~/activemq2/bin/jmx_prometheus_javaagent-0.20.0.jar:/opt/apache-activemq/bin/jmx_prometheus_javaagent-0.20.0.jar- ~/activemq2/conf/config.yaml:/opt/apache-activemq/conf/config.yaml ports:- \u0026#34;61617:61616\u0026#34;- \u0026#34;8162:8161\u0026#34;- \u0026#34;2346:2346\u0026#34;environment:ACTIVEMQ_OPTS:\u0026#34;-javaagent:/opt/apache-activemq/bin/jmx_prometheus_javaagent-0.20.0.jar=2346:/opt/apache-activemq/conf/config.yaml\u0026#34;ACTIVEMQ_BROKER_NAME:broker-2 networks:- amqtestotel-collector1:image:otel/opentelemetry-collector:latestcontainer_name:otel-collector1command:[\u0026#34;--config=/etc/otel-collector-config.yaml\u0026#34;]volumes:- ./otel-collector-config1.yaml:/etc/otel-collector-config.yamldepends_on:- amq1networks:- amqtest otel-collector2:image:otel/opentelemetry-collector:latestcontainer_name:otel-collector2command:[\u0026#34;--config=/etc/otel-collector-config.yaml\u0026#34;]volumes:- ./otel-collector-config2.yaml:/etc/otel-collector-config.yamldepends_on:- amq2networks:- amqtest networks:amqtest:OpenTelemetry otel-collector-config.yaml 配置样例:\nreceivers:prometheus:config:scrape_configs:- job_name:\u0026#39;activemq-monitoring\u0026#39;scrape_interval:30sstatic_configs:- targets:[\u0026#39;amq1:2345\u0026#39;]labels:cluster:activemq-broker1processors:batch:exporters:otlp:endpoint:oap:11800tls:insecure:trueservice:pipelines:metrics:receivers:- prometheusprocessors:- batchexporters:- otlpActiveMQ Exporter config.yaml 配置样例:\n---startDelaySeconds:10username:adminpassword:activemqssl:falselowercaseOutputName:falselowercaseOutputLabelNames:falseincludeObjectNames:[\u0026#34;org.apache.activemq:*\u0026#34;,\u0026#34;java.lang:type=OperatingSystem\u0026#34;,\u0026#34;java.lang:type=GarbageCollector,*\u0026#34;,\u0026#34;java.lang:type=Threading\u0026#34;,\u0026#34;java.lang:type=Runtime\u0026#34;,\u0026#34;java.lang:type=Memory\u0026#34;,\u0026#34;java.lang:name=*\u0026#34;]excludeObjectNames:[\u0026#34;org.apache.activemq:type=ColumnFamily,*\u0026#34;]autoExcludeObjectNameAttributes:trueexcludeObjectNameAttributes:\u0026#34;java.lang:type=OperatingSystem\u0026#34;:- \u0026#34;ObjectName\u0026#34;\u0026#34;java.lang:type=Runtime\u0026#34;:- \u0026#34;ClassPath\u0026#34;- \u0026#34;SystemProperties\u0026#34;rules:- pattern:\u0026#34;.*\u0026#34;启动步骤  启动 ActiveMQ,Exporter 和服务同时启动。 启动 SkyWalking OAP 和 SkyWalking UI。 启动 OpenTelmetry-Collector。  以上步骤执行完成后,节点指标就会定时抓取后推送到 SkyWalking,经过分组聚合后前端页面可查看到 ActiveMQ 的面板数据。\n监控指标 监控指标主要分为3类:Cluster 指标、Broker 指标、Destination 指标\n Cluster 指标:主要关注集群的内存使用情况、数据写入与读取速率平均情况、平均与最大的写入时长等。 Broker 指标:主要关注 Broker 下节点状态、连接数、生产者消费者数量、写入读取速率等。根据集群形式不同,一个Cluster可能包括一个或多个Broker。 Destination 指标:主要关注 Queue/Topic 下的生产者消费者数量、不同状态消息数量、队列数量、入队时长等。  Cluster 指标  System Load:[0, 100]的值来反馈系统负载。 Thread Count:JVM 当前使用的线程数。 Heap Memory:堆内存的容量一定程度反映服务的处理性能。 GC:ActiveMQ 在 JVM 中运行,其内存由 Java 的垃圾回收 (GC) 进程管理,GC能直接反映服务的状态。 Enqueue/Dequeue/Dispatch/Expired Rate:不同状态信息的增长速率能直接反映生产活动。 Average/Max Enqueue Time:入队的耗时能一定程度影响生产者。  Broker 指标  Uptime:节点存活时长。 State:是否为从节点,1=从节点,0=主节点。 Current Connentions:目前的连接数。 Current Producer/Consumer Count:目前生产者消费者数量。 Increased Producer/Consumer Count:增长的生产者消费者数量。 Enqueue/Dequeue Count: 入队出队数量。 Enqueue/Dequeue Rate: 入队出队速率。 Memory Percent Usage:未送达消息使用的内存空间。 Store Percent Usage: 挂起的持久性消息占用的空间。 Temp Percent Usage:非持久化消息占用的空间。 Average/Max Message Size:消息量。 Queue Size:队列中消息量。  Destination 指标  Producer/Consumer Count:生产者/消费者数量。 Queue Size:队列的未消费数量。 Memory Usage:内存的使用。 Enqueue/Dequeue/Dispatch/Expired/Inflight Count:不同状态消息数。 Average/Max Enqueue Time:入队的耗时。 Average/Max Message Size:消息量。  参考文档  ActiveMQ Classic clustering JMX Exporter Configuration JMX Exporter-Running the Standalone HTTP Server OpenTelemetry Collector Contrib Jmxreceiver  ","excerpt":"引言 Apache ActiveMQ Classic 是一个流行且功能强大的开源消息传递和集成模式服务器。始于2004年,逐渐发展成为了一个成熟且广泛使用的开源消息中间件,符合Java消息服 …","ref":"/zh/2024-04-19-monitoring-activemq-through-skywalking/","title":"使用 SkyWalking 监控 ActiveMQ"},{"body":"Zixin Zhou(GitHub ID, CodePrometheus[1]) began the code contributions since Oct 28, 2023.\nUp to date, he has submitted 8 PRs in the Go agent repository, 7 PRs in the main repo, 1 PR in the UI repository and 2 PRs in the showcase repository.\nAt Apr 15th, 2024, the project management committee(PMC) passed the proposal of promoting him as a new committer. He has accepted the invitation at the same day.\nWelcome Zixin Zhou join the committer team.\n[1] https://github.com/CodePrometheus\n","excerpt":"Zixin Zhou(GitHub ID, CodePrometheus[1]) began the code contributions since Oct 28, 2023.\nUp to …","ref":"/events/welcome-zixin-zhou-as-new-committer/","title":"Welcome Zixin Zhou as new committer"},{"body":"SkyWalking Eyes 0.6.0 is released. Go to downloads page to find release tars.\n Add | as comment indicator by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/168 Correct the way of joining slack channels by @wu-sheng in https://github.com/apache/skywalking-eyes/pull/169 update: add weak-compatible to dependency check by @Two-Hearts in https://github.com/apache/skywalking-eyes/pull/171 feature: add support for Protocol Buffer by @spacewander in https://github.com/apache/skywalking-eyes/pull/172 feature: add support for OPA policy files by @spacewander in https://github.com/apache/skywalking-eyes/pull/174 add Eclipse Foundation specific Apache 2.0 license header by @gdams in https://github.com/apache/skywalking-eyes/pull/178 add instructions to fix header issues in markdown comment by @gdams in https://github.com/apache/skywalking-eyes/pull/179 bump action/setup-go to v5 by @gdams in https://github.com/apache/skywalking-eyes/pull/180 Draft release notes for 0.6.0 by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/181  Full Changelog: https://github.com/apache/skywalking-eyes/compare/v0.5.0...v0.6.0\n","excerpt":"SkyWalking Eyes 0.6.0 is released. Go to downloads page to find release tars.\n Add | as comment …","ref":"/events/release-apache-skywalking-eyes-0-6-0/","title":"Release Apache SkyWalking Eyes 0.6.0"},{"body":"SkyWalking Java Agent 9.2.0 is released. Go to downloads page to find release tars. Changes by Version\n9.2.0  Fix NoSuchMethodError in mvc-annotation-commons and change deprecated method. Fix forkjoinpool plugin in JDK11. Support for tracing spring-cloud-gateway 4.x in gateway-4.x-plugin. Fix re-transform bug when plugin enhanced class proxy parent method. Fix error HTTP status codes not recording as SLA failures in Vert.x plugins. Support for HttpExchange request tracing. Support tracing for async producing, batch sync consuming, and batch async consuming in rocketMQ-client-java-5.x-plugin. Convert the Redisson span into an async span. Rename system env name from sw_plugin_kafka_producer_config to SW_PLUGIN_KAFKA_PRODUCER_CONFIG. Support for ActiveMQ-Artemis messaging tracing. Archive the expired plugins impala-jdbc-2.6.x-plugin. Fix a bug in Spring Cloud Gateway if HttpClientFinalizer#send does not invoke, the span created at NettyRoutingFilterInterceptor can not stop. Fix not tracing in HttpClient v5 when HttpHost(arg[0]) is null but RoutingSupport#determineHost works. Support across thread tracing for SOFA-RPC. Update Jedis 4.x plugin to support Sharding and Cluster models.  Documentation  Update docs to describe expired-plugins.  All issues and pull requests are here\n","excerpt":"SkyWalking Java Agent 9.2.0 is released. Go to downloads page to find release tars. Changes by …","ref":"/events/release-apache-skywalking-java-agent-9-2-0/","title":"Release Apache SkyWalking Java Agent 9.2.0"},{"body":"SkyWalking Rover 0.6.0 is released. Go to downloads page to find release tars.\nFeatures  Enhance compatibility when profiling with SSL. Update LabelValue obtain pod information function to add default value parameter. Add HasOwnerName to judgement pod has owner name. Publish the latest Docker image tag. Improve the stability of Off CPU Profiling. Support collecting the access log from Kubernetes. Remove the scanner mode in the process discovery module. Upgrade Go library to 1.21, eBPF library to 0.13.2. Support using make docker.debug to building the debug docker image.  Bug Fixes Documentation  Update architecture diagram. Delete module design and project structure document. Adjust configuration modules during setup.  Issues and PR  All issues are here All and pull requests are here  ","excerpt":"SkyWalking Rover 0.6.0 is released. Go to downloads page to find release tars.\nFeatures  Enhance …","ref":"/events/release-apache-skwaylking-rover-0-6-0/","title":"Release Apache SkyWalking Rover 0.6.0"},{"body":"SkyWalking Cloud on Kubernetes 0.9.0 is released. Go to downloads page to find release tars.\n0.9.0 Features  Add a getting started document about how to deploy swck on the kubernetes cluster.  Bugs  Fix the bug that the java agent is duplicated injected when update the pod.  Chores  Bump up custom-metrics-apiserver Bump up golang to v1.22 Bump up controller-gen to v0.14.0  ","excerpt":"SkyWalking Cloud on Kubernetes 0.9.0 is released. Go to downloads page to find release tars.\n0.9.0 …","ref":"/events/release-apache-skywalking-cloud-on-kubernetes-0-9-0/","title":"Release Apache SkyWalking Cloud on Kubernetes 0.9.0"},{"body":"Background Apache SkyWalking is an open-source Application Performance Management system that helps users gather logs, traces, metrics, and events from various platforms and display them on the UI. With version 9.7.0, SkyWalking can collect access logs from probes in multiple languages and from Service Mesh, generating corresponding topologies, tracing, and other data. However, it could not initially collect and map access logs from applications in Kubernetes environments. This article explores how the 10.0.0 version of Apache SkyWalking employs eBPF technology to collect and store application access logs, addressing this limitation.\nWhy eBPF? To monitor the network traffic in Kubernetes, the following features support be support:\n Cross Language: Applications deployed in Kubernetes may be written in any programming language, making support for diverse languages important. Non-Intrusiveness: It\u0026rsquo;s imperative to monitor network traffic without making any modifications to the applications, as direct intervention with applications in Kubernetes is not feasible. Kernel Metrics Monitoring: Often, diagnosing network issues by analyzing traffic performance at the user-space level is insufficient. A deeper analysis incorporating kernel-space network traffic metrics is frequently necessary. Support for Various Network Protocols: Applications may communicate using different transport protocols, necessitating support for a range of protocols.  Given these requirements, eBPF emerges as a capable solution. In the next section, we will delve into detailed explanations of how Apache SkyWalking Rover resolves these aspects.\nKernel Monitoring and Protocol Analysis In previous articles, we\u0026rsquo;ve discussed how to monitor network traffic from programs written in various languages. This technique remains essential for network traffic monitoring, allowing for the collection of traffic data without language limitations. However, due to the unique aspects of our monitoring trigger mechanism and the specific features of kernel monitoring, these two areas warrant separate explanations.\nKernel Monitoring Kernel monitoring allows users to gain insights into network traffic performance based on the execution at the kernel level, specifically from Layer 2 (Data Link) to Layer 4 (Transport) of the OSI model.\nNetwork monitoring at the kernel layer is deference from the syscall (user-space) layer in terms of the metrics and identifiers used. While the syscalls layer can utilize file descriptors to correlate various operations, kernel layer network operations primarily use packets as unique identifiers. This discrepancy necessitates a mapping relationship that SkyWalking Rover can use to bind these two layers together for comprehensive monitoring.\nLet\u0026rsquo;s dive into the details of how data is monitored in both sending and receiving modes.\nObserve Sending When sending data, tracking the status and timing of each packet is crucial for understanding the state of each transmission. Within the kernel, operations progress from Layer 4 (L4) down to Layer 2 (L2), maintaining the same thread ID as during the syscalls layer, which simplifies data correlation.\nSkyWalking Rover monitors several key kernel functions to observe packet transmission dynamics, listed from L4 to L2:\n kprobe/tcp_sendmsg: Captures the time when a packet enters the L4 protocol stack for sending and the time it finishes processing. This function is essential for tracking the initial handling of packets at the transport layer. kprobe/tcp_transmit_skb: Records the total number of packet transmissions and the size of each packet sent. This function helps identify how many times a packet or a batch of packets is attempted to be sent, which is critical for understanding network throughput and congestion. tracepoint/tcp/tcp_retransmit_skb: Notes whether packet retransmission occurs, providing insights into network reliability and connection quality. Retransmissions can significantly impact application performance and user experience. tracepoint/skb/kfree_skb: Records packet loss during transmission and logs the reason for such occurrences. Understanding packet loss is crucial for diagnosing network issues and ensuring data integrity. kprobe/__ip_queue_xmit: Records the start and end times of processing by the L3 protocol. This function is vital for understanding the time taken for IP-level operations, including routing decisions. kprobe/nf_hook_slow: Records the total time and number of occurrences spent in Netfilter hooks, such as iptables rule evaluations. This monitoring point is important for assessing the impact of firewall rules and other filtering mechanisms on packet flow. kprobe/neigh_resolve_output: If resolving an unknown MAC address is necessary before sending a network request, this function records the occurrences and total time spent on this resolution. MAC address resolution times can affect the initial packet transmission delay. kprobe/__dev_queue_xmit: Records the start and end times of entering the L2 protocol stack, providing insights into the data link layer\u0026rsquo;s processing times. tracepoint/net/net_dev_start_xmit and tracepoint/net/net_dev_xmit: Records the actual time taken to transmit each packet at the network interface card (NIC). These functions are crucial for understanding the hardware-level performance and potential bottlenecks at the point of sending data to the physical network.  According to the interception of the above method, Apache SkyWalking Rover can provide key execution time and metrics for each level when sending network data, from the application layer (Layer 7) to the transport layer (Layer 4), and finally to the data link layer (Layer 2).\nObserve Receiving When receiving data, the focus is often on the time it takes for packets to travel from the network interface card (NIC) to the user space. Unlike the process of sending data, data receiving in the kernel proceeds from the data link layer (Layer 2) up to the transport layer (Layer 4), until the application layer (Layer 7) retrieves the packet\u0026rsquo;s content. In SkyWalking Rover, monitors the following key system functions to observe this process, listed from L2 to L4:\n tracepoint/net/netif_receive_skb: Records the time when a packet is received by the network interface card. This tracepoint is crucial for understanding the initial point of entry for incoming data into the system. kprobe/ip_rcv: Records the start and end times of packet processing at the network layer (Layer 3). This probe provides insights into how long it takes for the IP layer to handle routing, forwarding, and delivering packets to the correct application. kprobe/nf_hook_slow: Records the total time and occurrences spent in Netfilter hooks, same with the sending traffic flow. kprobe/tcp_v4_rcv: Records the start and end times of packet processing at the transport layer (Layer 4). This probe is key to understanding the efficiency of TCP operations, including connection management, congestion control, and data flow. tracepoint/skb/skb_copy_datagram_iovec: When application layer protocols use the data, this tracepoint binds the packet to the syscall layer data at Layer 7. This connection is essential for correlating the kernel\u0026rsquo;s handling of packets with their consumption by user-space applications.  Based on the above methods, network monitoring can help you understand the complete execution process and execution time from when data is received by the network card to when it is used by the program.\nMetrics By intercepting the methods mentioned above, we can gather key metrics that provide insights into network performance and behavior. These metrics include:\n Packets: The size of the packets and the frequency of their transmission or reception. These metric offers a fundamental understanding of the network load and the efficiency of data movement between the sender and receiver. Connections: The number of connections established or accepted between services and the time taken for these connections to be set up. This metric is crucial for analyzing the efficiency of communication and connection management between different services within the network. L2-L4 Events: The time spent on key events within the Layer 2 to Layer 4 protocols. This metric sheds light on the processing efficiency and potential bottlenecks within the lower layers of the network stack, which are essential for data transmission and reception.  Protocol Analyzing In previous articles, we have discussed parsing HTTP/1.x protocols. However, with HTTP/2.x, the protocol\u0026rsquo;s stateful nature and the pre-established connections between services complicate network profiling. This complexity makes it challenging for Apache SkyWalking Rover to fully perceive the connection context, hindering protocol parsing operations.\nTransitioning network monitoring to Daemon mode offers a solution to this challenge. By continuously observing service operations around the clock, SkyWalking Rover can begin monitoring as soon as a service starts. This immediate initiation allows for the tracking of the complete execution context, making the observation of stateful protocols like HTTP/2.x feasible.\nProbes To detect when a process is started, monitoring a specific trace point (tracepoint/sched/sched_process_fork) is essential. This approach enables the system to be aware of process initiation events. Given the necessity to filter process traffic based on certain criteria such as the process\u0026rsquo;s namespace, Apache SkyWalking Rover follows a series of steps to ensure accurate and efficient monitoring. These steps include:\n Monitoring Activation: The process is immediately added to a monitoring whitelist upon detection. This step ensures that the process is considered for monitoring from the moment it starts, without delay. Push to Queue: The process\u0026rsquo;s PID (Process ID) is pushed into a monitoring confirmation queue. This queue holds the PIDs of newly detected processes that are pending further confirmation from a user-space program. This asynchronous approach allows for the separation of immediate detection and subsequent processing, optimizing the monitoring workflow. User-Space Program Confirmation: The user-space program retrieves process PIDs from the queue and assesses whether each process should continue to be monitored. If a process is deemed unnecessary for monitoring, it is removed from the whitelist.  This process ensures that SkyWalking Rover can dynamically adapt its monitoring scope based on real-time conditions and configurations, allowing for both comprehensive coverage and efficient resource use.\nLimitations The monitoring of stateful protocols like HTTP/2.x currently faces certain limitations:\n Inability to Observe Pre-existing Connections: Monitoring the complete request and response cycle requires that monitoring be initiated before any connections are established. This requirement means that connections set up before the start of monitoring cannot be observed. Challenges with TLS Requests: Observing TLS encrypted traffic is complex because it relies on asynchronously attaching uprobes (user-space attaching) for observation. If new requests are made before these uprobes are successfully attached, it becomes impossible to access the data before encryption or after decryption.  Demo Next, let’s quickly demonstrate the Kubernetes monitoring feature, so you can understand more specifically what it accomplishes.\nDeploy SkyWalking Showcase SkyWalking Showcase contains a complete set of example services and can be monitored using SkyWalking. For more information, please check the official documentation.\nIn this demo, we only deploy service, the latest released SkyWalking OAP, and UI.\nexport FEATURE_FLAGS=java-agent-injector,single-node,elasticsearch,rover make deploy.kubernetes After deployment is complete, please run the following script to open SkyWalking UI: http://localhost:8080/.\nkubectl port-forward svc/ui 8080:8080 --namespace default Done Once deployed, Apache SkyWalking Rover automatically begins monitoring traffic within the system upon startup. Then, reports this traffic data to SkyWalking OAP, where it is ultimately stored in a database.\nIn the Service Dashboard within Kubernetes, you can view a list of monitored Kubernetes services. If any of these services have HTTP traffic, this information would be displayed alongside them in the dashboard.\nFigure 1: Kubernetes Service List\nAdditionally, within the Topology Tab, you can observe the topology among related services. In each service or call relationship, there would display relevant TCP and HTTP metrics.\nFigure 2: Kubernetes Service Topology\nWhen you select a specific service from the Service list, you can view service metrics at both the TCP and HTTP levels for the chosen service.\nFigure 3: Kubernetes Service TCP Metrics\nFigure 4: Kubernetes Service HTTP Metrics\nFurthermore, by using the Endpoint Tab, you can see which URIs have been accessed for the current service.\nFigure 5: Kubernetes Service Endpoint List\nConclusion In this article, I\u0026rsquo;ve detailed how to utilize eBPF technology for network monitoring of services within a Kubernetes cluster, a capability that has been implemented in Apache SkyWalking Rover. This approach leverages the power of eBPF to provide deep insights into network traffic and service interactions, enhancing visibility and observability across the cluster.\n","excerpt":"Background Apache SkyWalking is an open-source Application Performance Management system that helps …","ref":"/blog/2024-03-18-monitor-kubernetes-network-by-ebpf/","title":"Monitoring Kubernetes network traffic by using eBPF"},{"body":"SkyWalking Client JS 0.11.0 is released. Go to downloads page to find release tars.\n Fixed the bug that navigator.sendBeacon sent json to backend report \u0026ldquo;No suitable request converter found for a @RequestObject List\u0026rdquo;. Fix reading property from null. Pin selenium version and update license CI. Bump dependencies. Update README.  ","excerpt":"SkyWalking Client JS 0.11.0 is released. Go to downloads page to find release tars.\n Fixed the bug …","ref":"/events/release-apache-skywalking-client-js-0-11-0/","title":"Release Apache SkyWalking Client JS 0.11.0"},{"body":"背景 Apache SkyWalking 是一个开源的应用性能管理系统,帮助用户从各种平台收集日志、跟踪、指标和事件,并在用户界面上展示它们。\n在9.7.0版本中,Apache SkyWalking 可以从多语言的探针和 Service Mesh 中收集访问日志,并生成相应的拓扑图、链路和其他数据。 但是对于Kubernetes环境,暂时无法提供对应用程序的访问日志进行采集并生成拓扑图。本文探讨了Apache SkyWalking 10.0.0版本如何采用eBPF技术来收集和存储应用访问日志,解决了这一限制。\n为什么使用 eBPF? 为了在Kubernetes中监控网络流量,以下特性需得到支持:\n 跨语言: 在Kubernetes部署的应用可能使用任何编程语言编写,因此对多种语言的支持十分重要。 非侵入性: 监控网络流量时不对应用程序进行任何修改是必要的,因为直接干预Kubernetes中的应用程序是不可行的。 内核指标监控: 通常,仅通过分析用户空间级别的流量来诊断网络问题是不够的。经常需要深入分析,结合内核空间的网络流量指标。 支持多种网络协议: 应用程序可能使用不同的传输协议进行通信,这就需要支持一系列的协议。  鉴于这些要求,eBPF显现出作为一个有能力的解决方案。在下一节中,我们将深入讨论Apache SkyWalking Rover是如何解决这些方面作出更详细解释。\n内核监控与协议分析 在之前的文章中,我们讨论了如何对不同编程语言的程序进行网络流量获取。在网络流量监控中,我们仍然会使用该技术进行流量采集。 但是由于这次监控触发方式和内核监控方面的不同特性,所以这两部分会单独进行说明。\n内核监控 内核监控允许用户根据在内核层面的执行,洞察网络流量性能,特别是从OSI模型的第2层(数据链路层)到第4层(传输层)。\n内核层的网络监控与syscall(用户空间系统调用)层在关联指标不同。虽然syscall层可以利用文件描述符来关联各种操作,但内核层的网络操作主要使用数据包作为唯一标识符。 这种差异需要映射关系,Apache SkyWalking Rover可以使用它将这两层绑定在一起,进行全面监控。\n让我们深入了解数据在发送和接收模式下是如何被监控的。\n监控数据发送 在发送数据时,跟踪每个数据包的状态和时间对于理解每次传输的状态至关重要。在内核中,操作从第4层(L4)一直调用到第2层(L2),并且会保持与在syscall层相同的线程ID,这简化了数据的相关性分析。\nSkyWalking Rover监控了几个关键的内核函数,以观察数据包传输动态,顺序从L4到L2:\n kprobe/tcp_sendmsg: 记录数据包进入L4协议栈进行发送以及完成处理的时间。这个函数对于跟踪传输层对数据包的初始处理至关重要。 kprobe/tcp_transmit_skb: 记录数据包传输的总次数和每个发送的数据包的大小。这个函数有助于识别尝试发送一个数据包或一段时间内发送一批数据包的次数,这对于理解网络吞吐量和拥塞至关重要。 tracepoint/tcp/tcp_retransmit_skb: 记录是否发生数据包重传,提供网络可靠性和连接质量的见解。重传可以显著影响应用性能和用户体验。 tracepoint/skb/kfree_skb: 记录传输过程中的数据包丢失,并记录发生这种情况的原因。理解数据包丢失对于诊断网络问题和确保数据完整性至关重要。 kprobe/__ip_queue_xmit: 记录L3协议处理的开始和结束时间。这个功能对于理解IP级操作所需的时间至关重要,包括路由决策。 kprobe/nf_hook_slow: 记录在Netfilter钩子中花费的总时间和发生次数,例如 iptables 规则评估。这个函数对于评估防火墙规则和其他过滤机制对数据流的影响非常重要。 kprobe/neigh_resolve_output: 如果在发送网络请求之前需要解析未知的MAC地址,这个函数会记录发生的次数和在这个解析上花费的总时间。MAC地址解析时间可以影响初始数据包传输的延迟。 kprobe/__dev_queue_xmit: 记录进入L2协议栈的开始和结束时间,提供对数据链路层处理时间的见解。 tracepoint/net/net_dev_start_xmit and tracepoint/net/net_dev_xmit: 记录在网卡(NIC)上传输每个数据包所需的实际时间。这些功能对于理解硬件级性能和在将数据发送到物理网络时可能出现的瓶颈至关重要。  根据上述方法的拦截,Apache SkyWalking Rover可以在发送网络数据时为每个层级提供关键的执行时间和指标,从应用层(第7层)到传输层(第4层),最终到数据链路层(第2层)。\n监控数据接收 在接收数据时,通常关注的是数据包从网卡(NIC)到用户空间的传输时间。与发送数据的过程不同,在内核中接收数据是从数据链路层(第2层)开始,一直上升到传输层(第4层),直到应用层(第7层)检索到数据包的内容。\n在SkyWalking Rover中,监控以下关键系统功能以观察这一过程,顺序从L2到L4:\n tracepoint/net/netif_receive_skb: 记录网卡接收到数据包的时间。这个追踪点对于理解进入系统的传入数据的初始入口点至关重要。 kprobe/ip_rcv: 记录网络层(第3层)数据包处理的开始和结束时间。这个探针提供了IP层处理路由、转发和将数据包正确传递给应用程序所需时间的见解。 kprobe/nf_hook_slow: 记录在Netfilter钩子中花费的总时间和发生次数,与发送流量的情况相同。 kprobe/tcp_v4_rcv: 记录传输层(第4层)数据包处理的开始和结束时间。这个探针对于理解TCP操作的效率至关重要,包括连接管理、拥塞控制和数据流。 tracepoint/skb/skb_copy_datagram_iovec: 当应用层协议使用数据时,这个追踪点在第7层将数据包与syscall层的数据绑定。这种连接对于将内核对数据包的处理与用户空间应用程序的消费相关联是至关重要的。  基于上述方法,网络监控可以帮助您理解从网卡接收数据到程序使用数据的完整执行过程和执行时间。\n指标 通过拦截上述提到的方法,我们可以收集提供网络性能的关键指标。这些指标包括:\n 数据包: 数据包的大小及其传输或接收的频率。这些指标提供了对网络负载和数据在发送者与接收者之间传输效率的基本理解。 连接: 服务之间建立或接收的连接数量,以及设置这些连接所需的时间。这个指标对于分析网络内不同服务之间的通信效率和连接管理至关重要。 L2-L4 事件: 在第2层到第4层协议中关键事件上所花费的时间。这个指标揭示了网络堆栈较低层的处理效率和潜在瓶颈,这对于数据传输至关重要。  协议分析 在之前的文章中,我们已经讨论了解析 HTTP/1.x 协议。然而,对于 HTTP/2.x,协议的有状态性质和服务之间预先建立的连接使得网络分析变得复杂。 这种复杂性使得Apache SkyWalking Rover很难完全感知连接上下文,阻碍了协议解析操作。\n将网络监控转移到守护进程模式提供了一种解决这一挑战的方法。通过全天候不断观察服务,Apache SkyWalking Rover可以在服务启动时立即开始监控。 这种立即启动允许跟踪完整的执行上下文,使得观察像 HTTP/2.x 这样的有状态协议变得可行。\n追踪 为了检测到一个进程何时启动,监控一个特定的追踪点 (tracepoint/sched/sched_process_fork) 是必不可少的。这追踪点使系统能够意识到进程启动事件。\n鉴于需要根据某些标准(如进程的命名空间)过滤进程流量,Apache SkyWalking Rover遵循一系列步骤来确保准确和高效的监控。这些步骤包括:\n 启动监控: 一旦检测到进程,立即将其添加到监控白名单中。这一步确保从进程启动的那一刻起就考虑对其进行监控,不会有延迟。 推送队列: 进程的PID(进程ID)被推送到一个监控确认队列中。这个队列保存了新检测到的进程的PID,这些进程等待来自用户空间程序的进一步确认。这种异步方法对立即检测和后续处理进行分离,优化了监控工作流程。 用户态程序确认: 用户空间程序从队列中检索进程PID,并评估每个进程是否应该继续被监控。如果一个进程被认为不必要进行监控,它将被从白名单中移除。  这个过程确保了Apache SkyWalking Rover可以根据实时条件和配置动态调整其监控范围,允许既全面覆盖又有效的资源监控。\n限制 像 HTTP/2.x 这样的有状态协议的监控目前仍然面临一些限制:\n 无法观察现有连接: 要监控完整的请求和响应周期,需要在建立任何连接之前启动监控。这个要求意味着在监控开始之前建立的连接无法被观察到。 TLS请求的挑战: 观察TLS加密流量是复杂的,因为它依赖于异步加载uprobes(用户空间加载)进行观察。如果在成功加载这些uprobes之前发出新的请求,那么在加密之前或解密之后访问数据就变得不可能。  演示 接下来,让我们快速演示Kubernetes监控功能,以便更具体地了解它的功能。\n部署 SkyWalking Showcase SkyWalking Showcase 包含完整的示例服务,并可以使用 SkyWalking 进行监视。有关详细信息,请查看官方文档。\n在此演示中,我们只部署服务、最新发布的 SkyWalking OAP,UI和Rover。\nexport FEATURE_FLAGS=java-agent-injector,single-node,elasticsearch,rover make deploy.kubernetes 部署完成后,请运行以下脚本以打开 SkyWalking UI:http://localhost:8080/ 。\nkubectl port-forward svc/ui 8080:8080 --namespace default 完成 一旦部署,Apache SkyWalking Rover在启动时会自动开始监控系统中的流量。然后,它将这些流量数据报告给SkyWalking OAP,并最终存储在数据库中。\n在Kubernetes中的服务仪表板中,您可以查看被监控的Kubernetes服务列表。如果其中任何服务具有HTTP流量,这些指标信息将在列表中显示。\n图 1: Kubernetes 服务列表\n此外,在拓扑图选项卡中,您可以观察相关服务之间的拓扑关系。在每个服务节点或服务之间调用关系中,将显示相关的TCP和HTTP指标。\n图 2: Kubernetes 服务拓扑图\n当您从服务列表中选择特定服务时,您可以查看所选服务在TCP和HTTP级别的服务指标。\n图 3: Kubernetes 服务 TCP 指标\n图 4: Kubernetes 服务 HTTP 指标\n此外,通过使用端点选项卡,您可以查看当前服务所访问的URI。\n图 5: Kubernetes 服务端点列表\n结论 在本文中,我详细介绍了如何利用eBPF技术对Kubernetes集群中的服务进行网络流量监控,这是Apache SkyWalking Rover中实现的一项功能。\n这项功能利用了eBPF的强大功能,提供了对网络流量和服务交互的深入洞察,增强了对整个集群的可观测性。\n","excerpt":"背景 Apache SkyWalking 是一个开源的应用性能管理系统,帮助用户从各种平台收集日志、跟踪、指标和事件,并在用户界面上展示它们。\n在9.7.0版本中,Apache SkyWalking  …","ref":"/zh/2024-03-18-monitor-kubernetes-network-by-ebpf/","title":"使用 eBPF 监控 Kubernetes 网络流量"},{"body":"","excerpt":"","ref":"/tags/clickhouse/","title":"ClickHouse"},{"body":"Background ClickHouse is an open-source column-oriented database management system that allows generating analytical data reports in real-time, so it is widely used for online analytical processing (OLAP).\nApache SkyWalking is an open-source APM system that provides monitoring, tracing and diagnosing capabilities for distributed systems in Cloud Native architectures. Increasingly, App Service architectures incorporate Skywalking as an essential monitoring component of a service or instance.\nBoth ClickHouse and Skywalking are popular frameworks, and it would be great to monitor your ClickHouse database through Skywalking. Next, let\u0026rsquo;s share how to monitor ClickHouse database with Skywalking.\nPrerequisites and configurations Make sure you\u0026rsquo;ve met the following prerequisites before you start onboarding your monitor.\nConfig steps:\n Exposing prometheus endpoint. Fetching ClickHouse metrics by OpenTelemetry. Exporting metrics to Skywalking OAP server.  Prerequisites for setup The monitoring for ClickHouse relies on the embedded prometheus endpoint of ClickHouse and will not be supported in previous versions starting from v20.1.2.4.\nYou can check the version of your server:\n:) select version(); SELECT version() Query id: 2d3773ca-c320-41f6-b2ac-7ebe37eddc58 ┌─version()───┐ │ 24.2.1.2248 │ └─────────────┘ If your ClickHouse version is earlier than v20.1.2.4, you need to set up ClickHouse-exporter to access data.\nExpose prometheus Endpoint The embedded prometheus endpoint will make it easy for data collection, you just need to open the required configuration in the core configuration file config.xml of ClickHouse. In addition to your original configuration, you only need to modify the configuration of Prometheus.\n/etc/clickhouse-server/config.xml:\n\u0026lt;clickhouse\u0026gt; ...... \u0026lt;prometheus\u0026gt; \u0026lt;endpoint\u0026gt;/metrics\u0026lt;/endpoint\u0026gt; \u0026lt;port\u0026gt;9363\u0026lt;/port\u0026gt; \u0026lt;metrics\u0026gt;true\u0026lt;/metrics\u0026gt; \u0026lt;events\u0026gt;true\u0026lt;/events\u0026gt; \u0026lt;asynchronous_metrics\u0026gt;true\u0026lt;/asynchronous_metrics\u0026gt; \u0026lt;errors\u0026gt;true\u0026lt;/errors\u0026gt; \u0026lt;/prometheus\u0026gt; \u0026lt;/clickhouse\u0026gt; Settings:\n endpoint – HTTP endpoint for scraping metrics by prometheus server. Start from ‘/’. port – Port for endpoint. metrics – Expose metrics from the system.metrics table. events – Expose metrics from the system.events table. asynchronous_metrics – Expose current metrics values from the system.asynchronous_metrics table. errors - Expose the number of errors by error codes occurred since the last server restart. This information could be obtained from the system.errors as well.  Save the config and restart the ClickHouse server.\nIt contains more than 1,000 metrics, covering services、networks、disk、MergeTree、errors and so on. For more details, after restarting the server, you can call curl 127.0.0.1:9363/metrics to know about the metrics.\nYou also can check the metrics by tables to make a contrast.\n:) select * from system.metrics limit 10 SELECT * FROM system.metrics LIMIT 10 Query id: af677622-960e-4589-b2ca-0b6a40c443aa ┌─metric───────────────────────────────┬─value─┬─description─────────────────────────────────────────────────────────────────────┐ │ Query │ 1 │ Number of executing queries │ │ Merge │ 0 │ Number of executing background merges │ │ Move │ 0 │ Number of currently executing moves │ │ PartMutation │ 0 │ Number of mutations (ALTER DELETE/UPDATE) │ │ ReplicatedFetch │ 0 │ Number of data parts being fetched from replica │ │ ReplicatedSend │ 0 │ Number of data parts being sent to replicas │ │ ReplicatedChecks │ 0 │ Number of data parts checking for consistency │ │ BackgroundMergesAndMutationsPoolTask │ 0 │ Number of active merges and mutations in an associated background pool │ │ BackgroundMergesAndMutationsPoolSize │ 64 │ Limit on number of active merges and mutations in an associated background pool │ │ BackgroundFetchesPoolTask │ 0 │ Number of active fetches in an associated background pool │ └──────────────────────────────────────┴───────┴─────────────────────────────────────────────────────────────────────────────────┘ :) select * from system.events limit 10; SELECT * FROM system.events LIMIT 10 Query id: 32c618d0-037a-400a-92a4-59fde832e4e2 ┌─event────────────────────────────┬──value─┬─description────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐ │ Query │ 7 │ Number of queries to be interpreted and potentially executed. Does not include queries that failed to parse or were rejected due to AST size limits, quota limits or limits on the number of simultaneously running queries. May include internal queries initiated by ClickHouse itself. Does not count subqueries. │ │ SelectQuery │ 7 │ Same as Query, but only for SELECT queries. │ │ InitialQuery │ 7 │ Same as Query, but only counts initial queries (see is_initial_query). │ │ QueriesWithSubqueries │ 40 │ Count queries with all subqueries │ │ SelectQueriesWithSubqueries │ 40 │ Count SELECT queries with all subqueries │ │ QueryTimeMicroseconds │ 202862 │ Total time of all queries. │ │ SelectQueryTimeMicroseconds │ 202862 │ Total time of SELECT queries. │ │ FileOpen │ 40473 │ Number of files opened. │ │ Seek │ 100 │ Number of times the \u0026#39;lseek\u0026#39; function was called. │ │ ReadBufferFromFileDescriptorRead │ 67995 │ Number of reads (read/pread) from a file descriptor. Does not include sockets. │ └──────────────────────────────────┴────────┴────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ Start up Opentelemetry-Collector Configure OpenTelemetry based on your own requirements. Following the example below:\notel-collector-config.yaml:\nreceivers: prometheus: config: scrape_configs: - job_name: \u0026#39;clickhouse-monitoring\u0026#39; scrape_interval: 15s static_configs: - targets: [\u0026#39;127.0.0.1:9363\u0026#39;,\u0026#39;127.0.0.1:9364\u0026#39;,\u0026#39;127.0.0.1:9365\u0026#39;] labels: host_name: prometheus-clickhouse processors: batch: exporters: otlp: endpoint: 127.0.0.1:11800 tls: insecure: true service: pipelines: metrics: receivers: - prometheus processors: - batch exporters: - otlp Please ensure:\n job_name: 'clickhouse-monitoring' that marked the data from ClickHouse, If modified, it will be ignored. host_name defines the service name, you have to make one. endpoint point to the oap server address. the network between ClickHouse, OpenTelemetry Collector, and Skywalking OAP Server must be accessible.  If goes well, refresh the Skywalking-ui home page in a few seconds and you can see ClickHouse under the database menu.\nsuccess log:\n2024-03-12T03:57:39.407Z\tinfo\tservice@v0.93.0/telemetry.go:76\tSetting up own telemetry... 2024-03-12T03:57:39.412Z\tinfo\tservice@v0.93.0/telemetry.go:146\tServing metrics\t{\u0026quot;address\u0026quot;: \u0026quot;:8888\u0026quot;, \u0026quot;level\u0026quot;: \u0026quot;Basic\u0026quot;} 2024-03-12T03:57:39.416Z\tinfo\tservice@v0.93.0/service.go:139\tStarting otelcol...\t{\u0026quot;Version\u0026quot;: \u0026quot;0.93.0\u0026quot;, \u0026quot;NumCPU\u0026quot;: 4} 2024-03-12T03:57:39.416Z\tinfo\textensions/extensions.go:34\tStarting extensions... 2024-03-12T03:57:39.423Z\tinfo\tprometheusreceiver@v0.93.0/metrics_receiver.go:240\tStarting discovery manager\t{\u0026quot;kind\u0026quot;: \u0026quot;receiver\u0026quot;, \u0026quot;name\u0026quot;: \u0026quot;prometheus\u0026quot;, \u0026quot;data_type\u0026quot;: \u0026quot;metrics\u0026quot;} 2024-03-12T03:57:59.431Z\tinfo\tprometheusreceiver@v0.93.0/metrics_receiver.go:231\tScrape job added\t{\u0026quot;kind\u0026quot;: \u0026quot;receiver\u0026quot;, \u0026quot;name\u0026quot;: \u0026quot;prometheus\u0026quot;, \u0026quot;data_type\u0026quot;: \u0026quot;metrics\u0026quot;, \u0026quot;jobName\u0026quot;: \u0026quot;clickhouse-monitoring\u0026quot;} 2024-03-12T03:57:59.431Z\tinfo\tservice@v0.93.0/service.go:165\tEverything is ready. Begin running and processing data. 2024-03-12T03:57:59.432Z\tinfo\tprometheusreceiver@v0.93.0/metrics_receiver.go:282\tStarting scrape manager\t{\u0026quot;kind\u0026quot;: \u0026quot;receiver\u0026quot;, \u0026quot;name\u0026quot;: \u0026quot;prometheus\u0026quot;, \u0026quot;data_type\u0026quot;: \u0026quot;metrics\u0026quot;} ClickHouse monitoring dashboard About the dashboard The dashboard includes the service dashboard and the instance dashboard.\nMetrics include servers, queries, networks, insertions, replicas, MergeTree, ZooKeeper and embedded ClickHouse Keeper.\nThe service dashboard displays the metrics of the entire cluster.\nThe instance dashboard displays the metrics of an instance.\nAbout the metrics Here are some meanings of ClickHouse Instance metrics, more here.\n   Monitoring Panel Unit Description Data Source     CpuUsage count CPU time spent seen by OS per second(according to ClickHouse.system.dashboard.CPU Usage (cores)). ClickHouse   MemoryUsage percentage Total amount of memory (bytes) allocated by the server/ total amount of OS memory. ClickHouse   MemoryAvailable percentage Total amount of memory (bytes) available for program / total amount of OS memory. ClickHouse   Uptime sec The server uptime in seconds. It includes the time spent for server initialization before accepting connections. ClickHouse   Version string Version of the server in a single integer number in base-1000. ClickHouse   FileOpen count Number of files opened. ClickHouse     metrics about ZooKeeper are valid when managing cluster by ZooKeeper metrics about embedded ClickHouse Keeper are valid when ClickHouse Keeper is enabled  References  ClickHouse prometheus endpoint ClickHouse built-in observability dashboard ClickHouse Keeper  ","excerpt":"Background ClickHouse is an open-source column-oriented database management system that allows …","ref":"/blog/2024-03-12-monitoring-clickhouse-through-skywalking/","title":"Monitoring Clickhouse Server through SkyWalking"},{"body":"背景介绍 ClickHouse 是一个开源的面向列的数据库管理系统,可以实时生成分析数据报告,因此被广泛用于在线分析处理(OLAP)。\nApache SkyWalking 是一个开源的 APM 系统,为云原生架构中的分布式系统提供监控、跟踪和诊断能力。应用服务体系越来越多地将 Skywalking 作为服务或实例的基本监视组件。\nClickHouse 和 Skywalking 框架都是当下流行的服务组件,通过 Skywalking 监控您的 ClickHouse 数据库将是一个不错的选择。接下来,就来分享一下如何使用 Skywalking 监控 ClickHouse 数据库。\n前提与配置 在开始接入监控之前,请先确认以下前提条件。\n配置步骤:\n 暴露 Prometheus 端点。 通过 OpenTelemetry 拉取 ClickHouse 的指标数据。 将指标数据发送到 Skywalking OAP server.  使用的前提 ClickHouse 的监控依赖于 ClickHouse 的内嵌 Prometheus 端点配置,配置从 v20.1.2.4 开始支持,因此之前的老版本将无法支持。\n您可以检查 ClickHouse 服务的版本:\n:) select version(); SELECT version() Query id: 2d3773ca-c320-41f6-b2ac-7ebe37eddc58 ┌─version()───┐ │ 24.2.1.2248 │ └─────────────┘ 如果您的 ClickHouse 版本低于 v20.1.2.4,则需要依靠 ClickHouse-exporter 获取数据。\n暴露 Prometheus 端点 内嵌的 Prometheus 端点简化了数据采集流程,您只需要在 ClickHouse 的核心配置文件 config.xml 打开所需的配置即可。除了您原来的配置,您只需要参考如下修改 Prometheus 的配置。\n/etc/clickhouse-server/config.xml:\n\u0026lt;clickhouse\u0026gt; ...... \u0026lt;prometheus\u0026gt; \u0026lt;endpoint\u0026gt;/metrics\u0026lt;/endpoint\u0026gt; \u0026lt;port\u0026gt;9363\u0026lt;/port\u0026gt; \u0026lt;metrics\u0026gt;true\u0026lt;/metrics\u0026gt; \u0026lt;events\u0026gt;true\u0026lt;/events\u0026gt; \u0026lt;asynchronous_metrics\u0026gt;true\u0026lt;/asynchronous_metrics\u0026gt; \u0026lt;errors\u0026gt;true\u0026lt;/errors\u0026gt; \u0026lt;/prometheus\u0026gt; \u0026lt;/clickhouse\u0026gt; 配置说明:\n endpoint – 通过 prometheus 服务器抓取指标的 HTTP 端点。从/开始。 port – 端点的端口。 metrics – 暴露 system.metrics 表中的指标。 events – 暴露 system.events 表中的指标。 asynchronous_metrics – 暴露 system.asynchronous_metrics 表中的当前指标值。 errors - 按错误代码暴露自上次服务器重新启动以来发生的错误数。此信息也可以从 system.errors 中获得。  保存配置并重启 ClickHouse 服务。\n端点数据包含1000多个指标,涵盖服务、网络、磁盘、MergeTree、错误等。想了解更多指标细节,在重启服务后,可以调用 curl 127.0.0.1:9363/metrics 看到具体指标的内容。\n您还可以通过数据库表的数据与端点数据进行检查对比。\n:) select * from system.metrics limit 10 SELECT * FROM system.metrics LIMIT 10 Query id: af677622-960e-4589-b2ca-0b6a40c443aa ┌─metric───────────────────────────────┬─value─┬─description─────────────────────────────────────────────────────────────────────┐ │ Query │ 1 │ Number of executing queries │ │ Merge │ 0 │ Number of executing background merges │ │ Move │ 0 │ Number of currently executing moves │ │ PartMutation │ 0 │ Number of mutations (ALTER DELETE/UPDATE) │ │ ReplicatedFetch │ 0 │ Number of data parts being fetched from replica │ │ ReplicatedSend │ 0 │ Number of data parts being sent to replicas │ │ ReplicatedChecks │ 0 │ Number of data parts checking for consistency │ │ BackgroundMergesAndMutationsPoolTask │ 0 │ Number of active merges and mutations in an associated background pool │ │ BackgroundMergesAndMutationsPoolSize │ 64 │ Limit on number of active merges and mutations in an associated background pool │ │ BackgroundFetchesPoolTask │ 0 │ Number of active fetches in an associated background pool │ └──────────────────────────────────────┴───────┴─────────────────────────────────────────────────────────────────────────────────┘ :) select * from system.events limit 10; SELECT * FROM system.events LIMIT 10 Query id: 32c618d0-037a-400a-92a4-59fde832e4e2 ┌─event────────────────────────────┬──value─┬─description────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐ │ Query │ 7 │ Number of queries to be interpreted and potentially executed. Does not include queries that failed to parse or were rejected due to AST size limits, quota limits or limits on the number of simultaneously running queries. May include internal queries initiated by ClickHouse itself. Does not count subqueries. │ │ SelectQuery │ 7 │ Same as Query, but only for SELECT queries. │ │ InitialQuery │ 7 │ Same as Query, but only counts initial queries (see is_initial_query). │ │ QueriesWithSubqueries │ 40 │ Count queries with all subqueries │ │ SelectQueriesWithSubqueries │ 40 │ Count SELECT queries with all subqueries │ │ QueryTimeMicroseconds │ 202862 │ Total time of all queries. │ │ SelectQueryTimeMicroseconds │ 202862 │ Total time of SELECT queries. │ │ FileOpen │ 40473 │ Number of files opened. │ │ Seek │ 100 │ Number of times the \u0026#39;lseek\u0026#39; function was called. │ │ ReadBufferFromFileDescriptorRead │ 67995 │ Number of reads (read/pread) from a file descriptor. Does not include sockets. │ └──────────────────────────────────┴────────┴────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ 启动 Opentelemetry-Collector 根据自身环境 配置 OpenTelemetry。 您可参照下面的例子:\notel-collector-config.yaml:\nreceivers: prometheus: config: scrape_configs: - job_name: \u0026#39;clickhouse-monitoring\u0026#39; scrape_interval: 15s static_configs: - targets: [\u0026#39;127.0.0.1:9363\u0026#39;,\u0026#39;127.0.0.1:9364\u0026#39;,\u0026#39;127.0.0.1:9365\u0026#39;] labels: host_name: prometheus-clickhouse processors: batch: exporters: otlp: endpoint: 127.0.0.1:11800 tls: insecure: true service: pipelines: metrics: receivers: - prometheus processors: - batch exporters: - otlp 请着重关注:\n job_name: 'clickhouse-monitoring' 标记着来自 ClickHouse 的数据,如果自行修改,数据会被服务忽略。 host_name 定义服务的名称。 endpoint 指向您的 OAP 服务地址. ClickHouse、OpenTelemetry Collector 和 Skywalking OAP Server 之间的网络必须可访问。  如果进展顺利,几秒钟后刷新 Skywalking-ui 网页,您可以在数据库的菜单下看到 ClickHouse。\n启动成功日志样例:\n2024-03-12T03:57:39.407Z\tinfo\tservice@v0.93.0/telemetry.go:76\tSetting up own telemetry... 2024-03-12T03:57:39.412Z\tinfo\tservice@v0.93.0/telemetry.go:146\tServing metrics\t{\u0026quot;address\u0026quot;: \u0026quot;:8888\u0026quot;, \u0026quot;level\u0026quot;: \u0026quot;Basic\u0026quot;} 2024-03-12T03:57:39.416Z\tinfo\tservice@v0.93.0/service.go:139\tStarting otelcol...\t{\u0026quot;Version\u0026quot;: \u0026quot;0.93.0\u0026quot;, \u0026quot;NumCPU\u0026quot;: 4} 2024-03-12T03:57:39.416Z\tinfo\textensions/extensions.go:34\tStarting extensions... 2024-03-12T03:57:39.423Z\tinfo\tprometheusreceiver@v0.93.0/metrics_receiver.go:240\tStarting discovery manager\t{\u0026quot;kind\u0026quot;: \u0026quot;receiver\u0026quot;, \u0026quot;name\u0026quot;: \u0026quot;prometheus\u0026quot;, \u0026quot;data_type\u0026quot;: \u0026quot;metrics\u0026quot;} 2024-03-12T03:57:59.431Z\tinfo\tprometheusreceiver@v0.93.0/metrics_receiver.go:231\tScrape job added\t{\u0026quot;kind\u0026quot;: \u0026quot;receiver\u0026quot;, \u0026quot;name\u0026quot;: \u0026quot;prometheus\u0026quot;, \u0026quot;data_type\u0026quot;: \u0026quot;metrics\u0026quot;, \u0026quot;jobName\u0026quot;: \u0026quot;clickhouse-monitoring\u0026quot;} 2024-03-12T03:57:59.431Z\tinfo\tservice@v0.93.0/service.go:165\tEverything is ready. Begin running and processing data. 2024-03-12T03:57:59.432Z\tinfo\tprometheusreceiver@v0.93.0/metrics_receiver.go:282\tStarting scrape manager\t{\u0026quot;kind\u0026quot;: \u0026quot;receiver\u0026quot;, \u0026quot;name\u0026quot;: \u0026quot;prometheus\u0026quot;, \u0026quot;data_type\u0026quot;: \u0026quot;metrics\u0026quot;} ClickHouse 监控面板 关于面板 这个仪表盘包含服务仪表盘和实例仪表盘。\n指标涵盖服务器、查询、网络、插入、副本、MergeTree、ZooKeeper 和内嵌 ClickHouse Keeper。\n服务仪表盘主要展示整个集群相关的指标。\n实例仪表盘主要展示单个实例相关的指标。\n关于指标 以下是ClickHouse实例指标的一些含义,前往了解完整的指标列表。\n   面板名称 单位 指标含义 数据源     CpuUsage count 操作系统每秒花费的 CPU 时间(根据 ClickHouse.system.dashboard.CPU 使用率(核心数))。 ClickHouse   MemoryUsage percentage 服务器分配的内存总量(字节)/操作系统内存总量。 ClickHouse   MemoryAvailable percentage 可用于程序的内存总量(字节)/操作系统内存总量。 ClickHouse   Uptime sec 服务器正常运行时间(以秒为单位)。它包括在接受连接之前进行服务器初始化所花费的时间。 ClickHouse   Version string 以 base-1000 样式展示的服务器版本。 ClickHouse   FileOpen count 打开的文件数。 ClickHouse     ZooKeeper 的指标在 ZooKeeper 管理集群时有效。 内嵌ClickHouse Keeper的指标在开启内嵌 ClickHouse Keeper 配置时有效。  参考文档  ClickHouse prometheus endpoint ClickHouse built-in observability dashboard ClickHouse Keeper  ","excerpt":"背景介绍 ClickHouse 是一个开源的面向列的数据库管理系统,可以实时生成分析数据报告,因此被广泛用于在线分析处理(OLAP)。\nApache SkyWalking 是一个开源的 APM 系统, …","ref":"/zh/2024-03-12-monitoring-clickhouse-through-skywalking/","title":"使用 SkyWalking 监控 ClickHouse Server"},{"body":"","excerpt":"","ref":"/tags/rocketmq/","title":"RocketMQ"},{"body":"背景介绍 Apache RocketMQ 是一个开源的低延迟、高并发、高可用、高可靠的分布式消息中间件, 从SkyWalking OAP 10.0 版本开始, 新增了 对 RocketMQ Server的监控面板。本文将展示并介绍如何使用 Skywalking来监控RocketMQ\n部署 流程 通过RocketMQ官方提供的RocketMQ exporter来采集RocketMQ Server数据,再通过opentelmetry-collector来拉取RocketMQ exporter并传输到skywalking oap服务来处理\nDataFlow: 准备  Skywalking oap服务,v10.0 + RocketMQ v4.3.2 + RocketMQ exporter v0.0.2+ Opentelmetry-collector v0.87+  启动顺序  启动 RocketMQ namesrv 和 broker 启动 skywalking oap 和 ui 启动 RocketMQ exporter 启动 opentelmetry-collector  具体如何启动和配置请参考以上链接中官方教程.\n需要注意下的是 opentelmetry-collector 的配置文件.\njob_name: \u0026quot;rocketmq-monitoring\u0026quot; 请不要修改,否则 skywalking 不会处理这部分数据.\nrocketmq-exporter 替换成RocketMQ exporter 的地址.\nreplacement: rocketmq-cluster 中的rocketmq-cluster如果想要使用下文介绍的服务分层功能,请自行定义为其他服务层级相匹配的名称.\noap 为 skywalking oap 地址,请自行替换.\nreceivers: prometheus: config: scrape_configs: - job_name: \u0026quot;rocketmq-monitoring\u0026quot; scrape_interval: 30s static_configs: - targets: ['rocketmq-exporter:5557'] relabel_configs: - source_labels: [ ] target_label: cluster replacement: rocketmq-cluster exporters: otlp: endpoint: oap:11800 tls: insecure: true processors: batch: service: pipelines: metrics: receivers: - prometheus processors: - batch exporters: - otlp 监控指标 指标分为 三个维度, cluster,broker,topic\ncluster监控 cluster 主要是站在集群的角度来统计展示,比如\nMessages Produced Today 今日集群产生的消息数\nMax CommitLog Disk Ratio 展示集群中磁盘使用率最高的broker\nTotal Producer Tps 集群生产者tps\nbroker 监控 broker 主要是站在节点的角度来统计展示,比如\nProduce Tps 节点生产者tps\nProducer Message Size(MB)节点生产消息大小\ntopic 监控 topic 主要是站在主题的角度来统计展示,比如\nConsumer Group Count 消费该主题的消费者组个数\nConsumer Latency(s) 消费者组的消费延时时间\nBacklogged Messages 消费者组消费消息堆积\n注意:topic 维度是整个 topic 来聚合,并不是在一个 broker 上的 topic 聚合,在 dashboard 上你也可以看到 broker 跟 topic 是平级的。\n各个指标的含义可以在图标的 tip 上找到解释\n更多指标可以参考文档\ndemo 已经在 skywalking showcase 上线,可以在上面看到展示效果\n服务分层 skywalking 10 新增了重要功能Service Hierarchy,接收来自不同层级的服务数据,比如 java agent 上报,k8s 监控数据或者 otel 的监控数据. 根据设置规则如果发现这些服务名称符合匹配规则,则可以将这些不同层级的服务联系起来。\n如下图所示:\nskywalking 采集部署在 k8s 的 RocketMQ 服务端的k8s 数据,并接收来自 otel 的 RocketMQ 服务端监控数据,根据匹配规则这些服务具有相同的服务名称,则可以在 ui 上观察到它们的联系\n","excerpt":"背景介绍 Apache RocketMQ 是一个开源的低延迟、高并发、高可用、高可靠的分布式消息中间件, 从SkyWalking OAP 10.0 版本开始, 新增了 对 RocketMQ …","ref":"/zh/2024-02-29-rocketmq-monitoring-by-skywalking/","title":"使用 SkyWalking 监控 RocketMQ Server"},{"body":"SkyWalking Go 0.4.0 is released. Go to downloads page to find release tars.\nFeatures  Add support ignore suffix for span name. Adding go 1.21 and 1.22 in docker image.  Plugins  Support setting a discard type of reporter. Add redis.max_args_bytes parameter for redis plugin. Changing intercept point for gin, make sure interfaces could be grouped when params defined in relativePath. Support RocketMQ MQ. Support AMQP MQ. support Echov4 framework.  Documentation Bug Fixes  Fix users can not use async api in toolkit-trace. Fix cannot enhance the vendor management project. Fix SW_AGENT_REPORTER_GRPC_MAX_SEND_QUEUE not working on metricsSendCh \u0026amp; logSendCh chans of gRPC reporter. Fix ParseVendorModule error for special case in vendor/modules.txt. Fix enhance method error when unknown parameter type. Fix wrong tracing context when trace have been sampled. Fix enhance param error when there are multiple params. Fix lost trace when multi middleware handlerFunc in gin plugin. Fix DBQueryContext execute error in sql plugin. Fix stack overflow as endless logs triggered.  Issues and PR  All issues are here All and pull requests are here  ","excerpt":"SkyWalking Go 0.4.0 is released. Go to downloads page to find release tars.\nFeatures  Add support …","ref":"/events/release-apache-skwaylking-go-0.4.0/","title":"Release Apache SkyWalking Go 0.4.0"},{"body":"","excerpt":"","ref":"/tags/nginx/","title":"Nginx"},{"body":"背景介绍 在 Scala 中,纯函数式中主要使用 Fiber,而不是线程,诸如 Cats-Effect、ZIO 等 Effect 框架。 您可以将 Fiber 视为轻量级线程,它是一种并发模型,由框架本身掌控控制权,从而消除了上下文切换的开销。 基于这些 Effect 框架开发的 HTTP、gRCP、GraphQL 库而开发的应用,我们一般称为 纯函数式应用程序。\n我们以 ZIO 为切入点, 演示 SkyWalking Scala 如何支持 Effect 生态。\nZIO Trace 首先,我们想要实现 Fiber 上下文传递,而不是监控 Fiber 本身。对于一个大型应用来说,可能存在成千上万个 Fiber,监控 Fiber 本身的意义不大。\n虽然 Fiber 的 Span 是在活跃时才会创建,但难免会有目前遗漏的场景,所以提供了一个配置 plugin.ziov2.ignore_fiber_regexes。 它将使用正则去匹配 Fiber location,匹配上的 Fiber 将不会创建 Span。\nFiber Span的信息如下:\n下面是我们使用本 ZIO 插件,和一些官方插件(hikaricp、jdbc、pulsar)完成的 Trace:\n分析 在 ZIO 中,Fiber可以有两种方式被调度,它们都是 zio.Executor 的子类。当然您也可以使用自己的线程池,这样也需被 ZIO 包装,其实就类似下面的 blockingExecutor。\nabstract class Executor extends ExecutorPlatformSpecific { self =\u0026gt; def submit(runnable: Runnable)(implicit unsafe: Unsafe): Boolean } 一种是系统默认线程池 defaultExecutor:\nprivate[zio] trait RuntimePlatformSpecific { final val defaultExecutor: Executor = Executor.makeDefault() } 另一种是专用于阻塞 IO 的线程池 blockingExecutor:\nprivate[zio] trait RuntimePlatformSpecific { final val defaultBlockingExecutor: Executor = Blocking.blockingExecutor } 默认线程池 defaultExecutor 对于 defaultExecutor,其本身是很复杂的,但它就是一个 ZIO 的 Fiber 调度(执行)器:\n/** * A `ZScheduler` is an `Executor` that is optimized for running ZIO * applications. Inspired by \u0026#34;Making the Tokio Scheduler 10X Faster\u0026#34; by Carl * Lerche. [[https://tokio.rs/blog/2019-10-scheduler]] */ private final class ZScheduler extends Executor 由于它们都是 zio.Executor 的子类,我们只需要对其及其子类进行增强:\nfinal val ENHANCE_CLASS = LogicalMatchOperation.or( HierarchyMatch.byHierarchyMatch(\u0026#34;zio.Executor\u0026#34;), MultiClassNameMatch.byMultiClassMatch(\u0026#34;zio.Executor\u0026#34;) ) 它们都是线程池,我们只需要在 zio.Executor 的 submit 方法上进行类似 ThreadPoolExecutor 上下文捕获的操作,可以参考 jdk-threadpool-plugin\n这里需要注意,因为 Fiber 也是一种 Runnable:\nprivate[zio] trait FiberRunnable extends Runnable { def location: Trace def run(depth: Int): Unit } zio-v2x-plugin\n阻塞线程池 blockingExecutor 对于 blockingExecutor,其实它只是对 Java 线程池进行了一个包装:\nobject Blocking { val blockingExecutor: zio.Executor = zio.Executor.fromThreadPoolExecutor { val corePoolSize = 0 val maxPoolSize = Int.MaxValue val keepAliveTime = 60000L val timeUnit = TimeUnit.MILLISECONDS val workQueue = new SynchronousQueue[Runnable]() val threadFactory = new NamedThreadFactory(\u0026#34;zio-default-blocking\u0026#34;, true) val threadPool = new ThreadPoolExecutor( corePoolSize, maxPoolSize, keepAliveTime, timeUnit, workQueue, threadFactory ) threadPool } } 由于其本身是对 ThreadPoolExecutor 的封装,所以,当我们已经实现了 zio.Executor 的增强后,只需要使用官方 jdk-threadpool-plugin 插件即可。 这里我们还想要对代码进行定制修改和复用,所以重新使用 Scala 实现了一个 executors-plugin 插件。\n串连 Fiber 上下文 最后,上面谈到过,Fiber 也是一种 Runnable,因此还需要对 zio.internal.FiberRunnable 进行增强。大致分为两点,其实与 jdk-threading-plugin 是一样的。\n 每次创建 zio.internal.FiberRunnable 实例时,都需要保存 现场,即构造函数增强。 每次运行时创建一个过渡的 Span,将当前线程上下文与之前保存在构造函数中的上下文进行关联。Fiber 可能被不同线程执行,所以这是必须的。  zio-v2x-plugin\n说明 当我们完成了对 ZIO Fiber 的上下文传播处理后,任意基于 ZIO 的应用层框架都可以按照普通的 Java 插件思路去开发。 我们只需要找到一个全局切入点,这个切入点应该是每个请求都会调用的方法,然后对这个方法进行增强。\n要想激活插件,只需要在 Release Notes 下载插件,放到您的 skywalking-agent/plugins 目录,重新启动服务即可。\n如果您的项目使用 sbt assembly 打包,您可以参考这个 示例。该项目使用了下列技术栈:\nlibraryDependencies ++= Seq( \u0026#34;io.d11\u0026#34; %% \u0026#34;zhttp\u0026#34; % zioHttp2Version, \u0026#34;dev.zio\u0026#34; %% \u0026#34;zio\u0026#34; % zioVersion, \u0026#34;io.grpc\u0026#34; % \u0026#34;grpc-netty\u0026#34; % \u0026#34;1.50.1\u0026#34;, \u0026#34;com.thesamet.scalapb\u0026#34; %% \u0026#34;scalapb-runtime-grpc\u0026#34; % scalapb.compiler.Version.scalapbVersion ) ++ Seq( \u0026#34;dev.profunktor\u0026#34; %% \u0026#34;redis4cats-effects\u0026#34; % \u0026#34;1.3.0\u0026#34;, \u0026#34;dev.profunktor\u0026#34; %% \u0026#34;redis4cats-log4cats\u0026#34; % \u0026#34;1.3.0\u0026#34;, \u0026#34;dev.profunktor\u0026#34; %% \u0026#34;redis4cats-streams\u0026#34; % \u0026#34;1.3.0\u0026#34;, \u0026#34;org.typelevel\u0026#34; %% \u0026#34;log4cats-slf4j\u0026#34; % \u0026#34;2.5.0\u0026#34;, \u0026#34;dev.zio\u0026#34; %% \u0026#34;zio-interop-cats\u0026#34; % \u0026#34;23.0.03\u0026#34;, \u0026#34;ch.qos.logback\u0026#34; % \u0026#34;logback-classic\u0026#34; % \u0026#34;1.2.11\u0026#34;, \u0026#34;dev.zio\u0026#34; %% \u0026#34;zio-cache\u0026#34; % zioCacheVersion ) ","excerpt":"背景介绍 在 Scala 中,纯函数式中主要使用 Fiber,而不是线程,诸如 Cats-Effect、ZIO 等 Effect 框架。 您可以将 Fiber 视为轻量级线程,它是一种并发模型,由框架 …","ref":"/zh/2024-01-04-skywalking-for-scala-effect-runtime/","title":"SkyWalking 如何支持 ZIO 等 Scala Effect Runtime"},{"body":"Xiang Wei(GitHub ID, weixiang1862) made a lot of significant contributions to SkyWalking since 2023. He made dozens of pull requests to multiple SkyWalking repositories, including very important features, such as Loki LogQL support, Nginx monitoring, MongoDB monitoring, as well as bug fixes, blog posts, and showcase updates.\nHere are the complete pull request list grouped by repositories.\nskywalking  Support Nginx monitoring. (https://github.com/apache/skywalking/pull/11558) Fix JDBC Log query order. (https://github.com/apache/skywalking/pull/11544) Isolate MAL CounterWindow cache by metric name.(https://github.com/apache/skywalking/pull/11526) Support extract timestamp from patterned datetime string in LAL.(https://github.com/apache/skywalking/pull/11489) Adjust AlarmRecord alarmMessage column length to 512. (https://github.com/apache/skywalking/pull/11404) Use listening mode for Apollo configuration.(https://github.com/apache/skywalking/pull/11186) Support LogQL HTTP query APIs. (https://github.com/apache/skywalking/pull/11168) Support MongoDB monitoring (https://github.com/apache/skywalking/pull/11111) Support reduce aggregate function in MQE.(https://github.com/apache/skywalking/pull/11036) Fix instance query in JDBC implementation.(https://github.com/apache/skywalking/pull/11024) Fix metric session cache saving after batch insert when using mysql-connector-java.(https://github.com/apache/skywalking/pull/11012) Add component ID for WebSphere.(https://github.com/apache/skywalking/pull/10974) Support sumLabeled in MAL (https://github.com/apache/skywalking/pull/10916)  skywalking-java  Optimize plugin selector logic.(https://github.com/apache/skywalking-java/pull/651) Fix config length limitation.(https://github.com/apache/skywalking-java/pull/623) Optimize spring-cloud-gateway 2.1.x, 3.x witness class.(https://github.com/apache/skywalking-java/pull/610) Add WebSphere Liberty 23.x plugin.(https://github.com/apache/skywalking-java/pull/560)  skywalking-swck  Remove SwAgent default env JAVA_TOOL_OPTIONS.(https://github.com/apache/skywalking-swck/pull/106) Fix panic in storage reconciler.(https://github.com/apache/skywalking-swck/pull/94) Support inject java agent bootstrap-plugins.(https://github.com/apache/skywalking-swck/pull/91) Fix number env value format error in template yaml.(https://github.com/apache/skywalking-swck/pull/90)  skywalking-showcase  Nginx monitoring showcase.(https://github.com/apache/skywalking-showcase/pull/153) LogQL showcase. (https://github.com/apache/skywalking-showcase/pull/146) MongoDB monitoring showcase. (https://github.com/apache/skywalking-showcase/pull/144)##  skywalking-website  Add blog: monitoring-nginx-by-skywalking.(https://github.com/apache/skywalking-website/pull/666) Add blog: collect and analyse nginx access log by LAL.(https://github.com/apache/skywalking-website/pull/652) Add blog: integrating-skywalking-with-arthas.(https://github.com/apache/skywalking-website/pull/641)   At Dec. 28th, 2023, the project management committee (PMC) passed the proposal of promoting him as a new committer. He has accepted the invitation at the same day.\nWelcome to join the committer team, Xiang Wei! We are honored to have you in the team.\n","excerpt":"Xiang Wei(GitHub ID, weixiang1862) made a lot of significant contributions to SkyWalking since 2023. …","ref":"/events/welcome-xiang-wei-as-new-committer/","title":"Welcome Xiang Wei as new committer"},{"body":"Background Apache SkyWalking is an open-source application performance management system that helps users collect and aggregate logs, traces, metrics, and events, and display them on the UI.\nIn order to achieve monitoring capabilities for Nginx, we have introduced the Nginx monitoring dashboard in SkyWalking 9.7, and this article will demonstrate the use of this monitoring dashboard and introduce the meaning of related metrics.\nSetup Monitoring Dashboard Metric Define and Collection Since nginx-lua-prometheus is used to define and expose metrics, we need to install lua_nginx_module for Nginx, or use OpenResty directly.\nIn the following example, we define four metrics via nginx-lua-prometheus and expose the metrics interface via nginx ip:9145/metrics:\n histogram: nginx_http_latency,monitoring http latency gauge: nginx_http_connections,monitoring nginx http connections counter: nginx_http_size_bytes,monitoring http size of request and response counter: nginx_http_requests_total,monitoring total http request numbers  http { log_format main '$remote_addr - $remote_user [$time_local] \u0026quot;$request\u0026quot; ' '$status $body_bytes_sent \u0026quot;$http_referer\u0026quot; ' '\u0026quot;$http_user_agent\u0026quot; \u0026quot;$http_x_forwarded_for\u0026quot;'; access_log /var/log/nginx/access.log main; lua_shared_dict prometheus_metrics 10M; # lua_package_path \u0026quot;/path/to/nginx-lua-prometheus/?.lua;;\u0026quot;; init_worker_by_lua_block { prometheus = require(\u0026quot;prometheus\u0026quot;).init(\u0026quot;prometheus_metrics\u0026quot;) metric_bytes = prometheus:counter( \u0026quot;nginx_http_size_bytes\u0026quot;, \u0026quot;Total size of HTTP\u0026quot;, {\u0026quot;type\u0026quot;, \u0026quot;route\u0026quot;}) metric_requests = prometheus:counter( \u0026quot;nginx_http_requests_total\u0026quot;, \u0026quot;Number of HTTP requests\u0026quot;, {\u0026quot;status\u0026quot;, \u0026quot;route\u0026quot;}) metric_latency = prometheus:histogram( \u0026quot;nginx_http_latency\u0026quot;, \u0026quot;HTTP request latency\u0026quot;, {\u0026quot;route\u0026quot;}) metric_connections = prometheus:gauge( \u0026quot;nginx_http_connections\u0026quot;, \u0026quot;Number of HTTP connections\u0026quot;, {\u0026quot;state\u0026quot;}) } server { listen 8080; location /test { default_type application/json; return 200 '{\u0026quot;code\u0026quot;: 200, \u0026quot;message\u0026quot;: \u0026quot;success\u0026quot;}'; log_by_lua_block { metric_bytes:inc(tonumber(ngx.var.request_length), {\u0026quot;request\u0026quot;, \u0026quot;/test/**\u0026quot;}) metric_bytes:inc(tonumber(ngx.var.bytes_send), {\u0026quot;response\u0026quot;, \u0026quot;/test/**\u0026quot;}) metric_requests:inc(1, {ngx.var.status, \u0026quot;/test/**\u0026quot;}) metric_latency:observe(tonumber(ngx.var.request_time), {\u0026quot;/test/**\u0026quot;}) } } } server { listen 9145; location /metrics { content_by_lua_block { metric_connections:set(ngx.var.connections_reading, {\u0026quot;reading\u0026quot;}) metric_connections:set(ngx.var.connections_waiting, {\u0026quot;waiting\u0026quot;}) metric_connections:set(ngx.var.connections_writing, {\u0026quot;writing\u0026quot;}) prometheus:collect() } } } } In the above example, we exposed the route-level metrics, and you can also choose to expose the host-level metrics according to the monitoring granularity:\nhttp { log_by_lua_block { metric_bytes:inc(tonumber(ngx.var.request_length), {\u0026quot;request\u0026quot;, ngx.var.host}) metric_bytes:inc(tonumber(ngx.var.bytes_send), {\u0026quot;response\u0026quot;, ngx.var.host}) metric_requests:inc(1, {ngx.var.status, ngx.var.host}) metric_latency:observe(tonumber(ngx.var.request_time), {ngx.var.host}) } } or upstream-level metrics:\nupstream backend { server ip:port; } server { location /test_upstream { proxy_pass http://backend; log_by_lua_block { metric_bytes:inc(tonumber(ngx.var.request_length), {\u0026quot;request\u0026quot;, \u0026quot;upstream/backend\u0026quot;}) metric_bytes:inc(tonumber(ngx.var.bytes_send), {\u0026quot;response\u0026quot;, \u0026quot;upstream/backend\u0026quot;}) metric_requests:inc(1, {ngx.var.status, \u0026quot;upstream/backend\u0026quot;}) metric_latency:observe(tonumber(ngx.var.request_time), {\u0026quot;upstream/backend\u0026quot;}) } } } After defining the metrics, we start nginx and opentelemetry-collector to collect the metrics and send them to the SkyWalking backend for analysis and storage.\nPlease ensure that job_name: 'nginx-monitoring', otherwise the reported data will be ignored by SkyWalking. If you have multiple Nginx instances, you can distinguish them using the service and service_instance_id labels:\nreceivers: prometheus: config: scrape_configs: - job_name: 'nginx-monitoring' scrape_interval: 5s metrics_path: \u0026quot;/metrics\u0026quot; static_configs: - targets: ['nginx:9145'] labels: service: nginx service_instance_id: nginx-instance processors: batch: exporters: otlp: endpoint: oap:11800 tls: insecure: true service: pipelines: metrics: receivers: - prometheus processors: - batch exporters: - otlp If everything goes well, you will see the metric data reported by Nginx under the gateway menu of the skywalking-ui:\nAccess \u0026amp; Error Log Collection SkyWalking Nginx monitoring provides log collection and error log analysis. We can use fluent-bit to collect and report access logs and error logs to SkyWalking for analysis and storage.\nFluent-bit configuration below defines the log collection directory as /var/log/nginx/. The access and error logs will be reported through rest port 12800 of oap after being processed by rewrite_access_log and rewrite_error_log functions:\n[SERVICE] Flush 5 Daemon Off Log_Level warn [INPUT] Name tail Tag access Path /var/log/nginx/access.log [INPUT] Name tail Tag error Path /var/log/nginx/error.log [FILTER] Name lua Match access Script fluent-bit-script.lua Call rewrite_access_log [FILTER] Name lua Match error Script fluent-bit-script.lua Call rewrite_error_log [OUTPUT] Name stdout Match * Format json [OUTPUT] Name http Match * Host oap Port 12800 URI /v3/logs Format json In the fluent-bit-script.lua, we use LOG_KIND tag to distinguish between access logs and error logs.\nTo associate with the metrics, please ensure that the values of service and serviceInstance are consistent with the metric collection definition in the previous section.\nfunction rewrite_access_log(tag, timestamp, record) local newRecord = {} newRecord[\u0026quot;layer\u0026quot;] = \u0026quot;NGINX\u0026quot; newRecord[\u0026quot;service\u0026quot;] = \u0026quot;nginx::nginx\u0026quot; newRecord[\u0026quot;serviceInstance\u0026quot;] = \u0026quot;nginx-instance\u0026quot; newRecord[\u0026quot;body\u0026quot;] = { text = { text = record.log } } newRecord[\u0026quot;tags\u0026quot;] = { data = {{ key = \u0026quot;LOG_KIND\u0026quot;, value = \u0026quot;NGINX_ACCESS_LOG\u0026quot;}}} return 1, timestamp, newRecord end function rewrite_error_log(tag, timestamp, record) local newRecord = {} newRecord[\u0026quot;layer\u0026quot;] = \u0026quot;NGINX\u0026quot; newRecord[\u0026quot;service\u0026quot;] = \u0026quot;nginx::nginx\u0026quot; newRecord[\u0026quot;serviceInstance\u0026quot;] = \u0026quot;nginx-instance\u0026quot; newRecord[\u0026quot;body\u0026quot;] = { text = { text = record.log } } newRecord[\u0026quot;tags\u0026quot;] = { data = {{ key = \u0026quot;LOG_KIND\u0026quot;, value = \u0026quot;NGINX_ERROR_LOG\u0026quot; }}} return 1, timestamp, newRecord end After starting fluent-it, we can see the collected log information in the Log tab of the monitoring panel:\nMeaning of Metrics    Metric Name Unit Description Data Source     HTTP Request Trend  The increment rate of HTTP requests nginx-lua-prometheus   HTTP Latency ms The increment rate of the latency of HTTP requests nginx-lua-prometheus   HTTP Bandwidth KB The increment rate of the bandwidth of HTTP requests nginx-lua-prometheus   HTTP Connections  The avg number of the connections nginx-lua-prometheus   HTTP Status Trend % The increment rate of the status of HTTP requests nginx-lua-prometheus   HTTP Status 4xx Percent % The percentage of 4xx status of HTTP requests nginx-lua-prometheus   HTTP Status 5xx Percent % The percentage of 4xx status of HTTP requests nginx-lua-prometheus   Error Log Count  The count of log level of nginx error.log fluent-bit    References  nginx-lua-prometheus fluent-bit-lua-filter skywalking-apisix-monitoring  ","excerpt":"Background Apache SkyWalking is an open-source application performance management system that helps …","ref":"/blog/2023-12-23-monitoring-nginx-by-skywalking/","title":"Monitoring Nginx with SkyWalking"},{"body":"背景介绍 在前面的 Blog 使用 LAL 收集并分析 Nginx access log 中,我们以 Nginx access log 为切入点, 演示了 SkyWalking LAL 的日志分析能力。\n为了实现对 Nginx 更全面的监控能力,我们在 SkyWalking 9.7 中引入了 Nginx 监控面板,本文将演示该监控面板的使用,并介绍相关指标的含义。\n监控面板接入 Metric 定义与采集 由于使用了 nginx-lua-prometheus 来定义及暴露指标, 我们需要为 Nginx 安装 lua_nginx_module, 或者直接使用OpenResty。\n下面的例子中,我们通过 nginx-lua-prometheus 定义了四个指标,并通过 ip:9145/metrics 暴露指标接口:\n histogram: nginx_http_latency,监控 http 延时 gauge: nginx_http_connections,监控 http 连接数 counter: nginx_http_size_bytes,监控 http 请求和响应大小 counter: nginx_http_requests_total,监控 http 请求次数  http { log_format main '$remote_addr - $remote_user [$time_local] \u0026quot;$request\u0026quot; ' '$status $body_bytes_sent \u0026quot;$http_referer\u0026quot; ' '\u0026quot;$http_user_agent\u0026quot; \u0026quot;$http_x_forwarded_for\u0026quot;'; access_log /var/log/nginx/access.log main; lua_shared_dict prometheus_metrics 10M; # lua_package_path \u0026quot;/path/to/nginx-lua-prometheus/?.lua;;\u0026quot;; init_worker_by_lua_block { prometheus = require(\u0026quot;prometheus\u0026quot;).init(\u0026quot;prometheus_metrics\u0026quot;) metric_bytes = prometheus:counter( \u0026quot;nginx_http_size_bytes\u0026quot;, \u0026quot;Total size of HTTP\u0026quot;, {\u0026quot;type\u0026quot;, \u0026quot;route\u0026quot;}) metric_requests = prometheus:counter( \u0026quot;nginx_http_requests_total\u0026quot;, \u0026quot;Number of HTTP requests\u0026quot;, {\u0026quot;status\u0026quot;, \u0026quot;route\u0026quot;}) metric_latency = prometheus:histogram( \u0026quot;nginx_http_latency\u0026quot;, \u0026quot;HTTP request latency\u0026quot;, {\u0026quot;route\u0026quot;}) metric_connections = prometheus:gauge( \u0026quot;nginx_http_connections\u0026quot;, \u0026quot;Number of HTTP connections\u0026quot;, {\u0026quot;state\u0026quot;}) } server { listen 8080; location /test { default_type application/json; return 200 '{\u0026quot;code\u0026quot;: 200, \u0026quot;message\u0026quot;: \u0026quot;success\u0026quot;}'; log_by_lua_block { metric_bytes:inc(tonumber(ngx.var.request_length), {\u0026quot;request\u0026quot;, \u0026quot;/test/**\u0026quot;}) metric_bytes:inc(tonumber(ngx.var.bytes_send), {\u0026quot;response\u0026quot;, \u0026quot;/test/**\u0026quot;}) metric_requests:inc(1, {ngx.var.status, \u0026quot;/test/**\u0026quot;}) metric_latency:observe(tonumber(ngx.var.request_time), {\u0026quot;/test/**\u0026quot;}) } } } server { listen 9145; location /metrics { content_by_lua_block { metric_connections:set(ngx.var.connections_reading, {\u0026quot;reading\u0026quot;}) metric_connections:set(ngx.var.connections_waiting, {\u0026quot;waiting\u0026quot;}) metric_connections:set(ngx.var.connections_writing, {\u0026quot;writing\u0026quot;}) prometheus:collect() } } } } 上面的例子中,我们暴露了 route 级别的指标,你也可以根据监控粒度的需要,选择暴露 host 指标:\nhttp { log_by_lua_block { metric_bytes:inc(tonumber(ngx.var.request_length), {\u0026quot;request\u0026quot;, ngx.var.host}) metric_bytes:inc(tonumber(ngx.var.bytes_send), {\u0026quot;response\u0026quot;, ngx.var.host}) metric_requests:inc(1, {ngx.var.status, ngx.var.host}) metric_latency:observe(tonumber(ngx.var.request_time), {ngx.var.host}) } } 或者 upstream 指标:\nupstream backend { server ip:port; } server { location /test_upstream { proxy_pass http://backend; log_by_lua_block { metric_bytes:inc(tonumber(ngx.var.request_length), {\u0026quot;request\u0026quot;, \u0026quot;upstream/backend\u0026quot;}) metric_bytes:inc(tonumber(ngx.var.bytes_send), {\u0026quot;response\u0026quot;, \u0026quot;upstream/backend\u0026quot;}) metric_requests:inc(1, {ngx.var.status, \u0026quot;upstream/backend\u0026quot;}) metric_latency:observe(tonumber(ngx.var.request_time), {\u0026quot;upstream/backend\u0026quot;}) } } } 完成指标定义后,我们启动 nginx 和 opentelemetry-collector,将指标采集到 SkyWalking 后端进行分析和存储。\n请确保job_name: 'nginx-monitoring',否则上报的数据将被 SkyWalking 忽略。如果你有多个 Nginx 实例,你可以通过service及service_instance_id这两个 label 进行区分:\nreceivers: prometheus: config: scrape_configs: - job_name: 'nginx-monitoring' scrape_interval: 5s metrics_path: \u0026quot;/metrics\u0026quot; static_configs: - targets: ['nginx:9145'] labels: service: nginx service_instance_id: nginx-instance processors: batch: exporters: otlp: endpoint: oap:11800 tls: insecure: true service: pipelines: metrics: receivers: - prometheus processors: - batch exporters: - otlp 如果一切顺利,你将在 skywalking-ui 的网关菜单下看到 nginx 上报的指标数据:\nAccess \u0026amp; Error Log 采集 SkyWalking Nginx 监控提供了日志采集及错误日志统计功能,我们可以借助 fluent-bit 采集并上报 access log、error log 给 SkyWalking 分析存储。\n下面 fluent-bit 配置定义了日志采集目录为/var/log/nginx/,access 和 error log 经过 rewrite_access_log 和 rewrite_error_log 处理后会通过 oap 12800 端口进行上报:\n[SERVICE] Flush 5 Daemon Off Log_Level warn [INPUT] Name tail Tag access Path /var/log/nginx/access.log [INPUT] Name tail Tag error Path /var/log/nginx/error.log [FILTER] Name lua Match access Script fluent-bit-script.lua Call rewrite_access_log [FILTER] Name lua Match error Script fluent-bit-script.lua Call rewrite_error_log [OUTPUT] Name stdout Match * Format json [OUTPUT] Name http Match * Host oap Port 12800 URI /v3/logs Format json 在 fluent-bit-script.lua 中,我们通过 LOG_KIND 来区分 access log 和 error log。\n为了能够关联上文采集的 metric,请确保 service 和 serviceInstance 值与上文中指标采集定义一致。\nfunction rewrite_access_log(tag, timestamp, record) local newRecord = {} newRecord[\u0026quot;layer\u0026quot;] = \u0026quot;NGINX\u0026quot; newRecord[\u0026quot;service\u0026quot;] = \u0026quot;nginx::nginx\u0026quot; newRecord[\u0026quot;serviceInstance\u0026quot;] = \u0026quot;nginx-instance\u0026quot; newRecord[\u0026quot;body\u0026quot;] = { text = { text = record.log } } newRecord[\u0026quot;tags\u0026quot;] = { data = {{ key = \u0026quot;LOG_KIND\u0026quot;, value = \u0026quot;NGINX_ACCESS_LOG\u0026quot;}}} return 1, timestamp, newRecord end function rewrite_error_log(tag, timestamp, record) local newRecord = {} newRecord[\u0026quot;layer\u0026quot;] = \u0026quot;NGINX\u0026quot; newRecord[\u0026quot;service\u0026quot;] = \u0026quot;nginx::nginx\u0026quot; newRecord[\u0026quot;serviceInstance\u0026quot;] = \u0026quot;nginx-instance\u0026quot; newRecord[\u0026quot;body\u0026quot;] = { text = { text = record.log } } newRecord[\u0026quot;tags\u0026quot;] = { data = {{ key = \u0026quot;LOG_KIND\u0026quot;, value = \u0026quot;NGINX_ERROR_LOG\u0026quot; }}} return 1, timestamp, newRecord end 启动 fluent-it 后,我们便可以在监控面板的 Log tab 看到采集到的日志信息:\n面板指标含义    面板名称 单位 指标含义 数据源     HTTP Request Trend  每秒钟平均请求数 nginx-lua-prometheus   HTTP Latency ms 平均响应延时 nginx-lua-prometheus   HTTP Bandwidth KB 请求响应流量 nginx-lua-prometheus   HTTP Connections  nginx http 连接数 nginx-lua-prometheus   HTTP Status Trend % 每分钟 http 状态码统计 nginx-lua-prometheus   HTTP Status 4xx Percent % 4xx状态码比例 nginx-lua-prometheus   HTTP Status 5xx Percent % 5xx状态码比例 nginx-lua-prometheus   Error Log Count  每分钟错误日志数统计 fluent-bit    参考文档  nginx-lua-prometheus fluent-bit-lua-filter skywalking-apisix-monitoring  ","excerpt":"背景介绍 在前面的 Blog 使用 LAL 收集并分析 Nginx access log 中,我们以 Nginx access log 为切入点, 演示了 SkyWalking LAL 的日志分析能 …","ref":"/zh/2023-12-23-monitoring-nginx-by-skywalking/","title":"使用 SkyWalking 监控 Nginx"},{"body":"🚀 Dive into the World of Cutting-Edge Technology with Apache\u0026rsquo;s Finest! 🌐 Join me today as we embark on an exhilarating journey with two of Apache\u0026rsquo;s most brilliant minds - Sheng Wu and Trista Pan. We\u0026rsquo;re exploring the realms of Apache SkyWalking and Apache ShardingSphere, two groundbreaking initiatives that are reshaping the landscape of open-source technology. 🌟\nIn this exclusive session, we delve deep into Apache SkyWalking - an innovative observability platform that\u0026rsquo;s revolutionizing how we monitor and manage distributed systems in the cloud. Witness firsthand how SkyWalking is empowering developers and organizations to gain unparalleled insights into their applications, ensuring performance, reliability, and efficient troubleshooting. 🛰️🔍\nBut there\u0026rsquo;s more! We\u0026rsquo;re also unveiling the secrets of Apache ShardingSphere, a dynamic distributed database ecosystem. Learn how ShardingSphere is making waves in the world of big data, offering scalable, high-performance solutions for data sharding, encryption, and more. This is your gateway to understanding how these technologies are pivotal in handling massive data sets across various industries. 🌐💾\nWhether you\u0026rsquo;re a developer, tech enthusiast, or just curious about the future of open-source technology, this is a conversation you don\u0026rsquo;t want to miss! Get ready to be inspired and informed as we unlock new possibilities and applications of Apache SkyWalking and ShardingSphere. 🚀🌟\nJoin us, and let\u0026rsquo;s decode the future together!\n  Please join and follow Josh\u0026rsquo;s 龙之春 Youtube Coffee + Software with Josh Long Channel to learn more about technology and open source from telanted engineers and industry leads.\n","excerpt":"🚀 Dive into the World of Cutting-Edge Technology with Apache\u0026rsquo;s Finest! 🌐 Join me today as we …","ref":"/blog/2023-12-04-coffee+software-with-josh-long/","title":"[Video] Coffee + Software with Josh Long - Apache SkyWalking with Sheng Wu and Apache ShardingSphere with Trista Pan"},{"body":"SkyWalking CLI 0.13.0 is released. Go to downloads page to find release tars.\nFeatures  Add the sub-command menu get for get the ui menu items by @mrproliu in https://github.com/apache/skywalking-cli/pull/187  Bug Fixes  Fix the record list query does not support new OAP versions (with major version number \u0026gt; 9).  ","excerpt":"SkyWalking CLI 0.13.0 is released. Go to downloads page to find release tars.\nFeatures  Add the …","ref":"/events/release-apache-skywalking-cli-0-13-0/","title":"Release Apache SkyWalking CLI 0.13.0"},{"body":"SkyWalking Java Agent 9.1.0 is released. Go to downloads page to find release tars. Changes by Version\n9.1.0  Fix hbase onConstruct NPE in the file configuration scenario Fix the issue of createSpan failure caused by invalid request URL in HttpClient 4.x/5.x plugin Optimize ElasticSearch 6.x 7.x plugin compatibility Fix an issue with the httpasyncclient component where the isError state is incorrect. Support customization for the length limitation of string configurations Add max length configurations in agent.config file for service_name and instance_name Optimize spring-cloud-gateway 2.1.x, 3.x witness class. Support report MongoDB instance info in Mongodb 4.x plugin. To compatible upper and lower case Oracle TNS url parse. Support collecting ZGC memory pool metrics. Require OAP 9.7.0 to support these new metrics. Upgrade netty-codec-http2 to 4.1.100.Final Add a netty-http 4.1.x plugin to trace HTTP requests. Fix Impala Jdbc URL (including schema without properties) parsing exception. Optimize byte-buddy type description performance. Add eclipse-temurin:21-jre as another base image. Bump byte-buddy to 1.14.9 for JDK21 support. Add JDK21 plugin tests for Spring 6. Bump Lombok to 1.18.30 to adopt JDK21 compiling. Fix PostgreSQL Jdbc URL parsing exception. Bump up grpc version. Optimize plugin selector logic.  Documentation  Fix JDK requirement in the compiling docs. Add JDK21 support in the compiling docs.  All issues and pull requests are here\n","excerpt":"SkyWalking Java Agent 9.1.0 is released. Go to downloads page to find release tars. Changes by …","ref":"/events/release-apache-skywalking-java-agent-9-1-0/","title":"Release Apache SkyWalking Java Agent 9.1.0"},{"body":"","excerpt":"","ref":"/tags/video/","title":"Video"},{"body":"SkyWalking 9.7.0 is released. Go to downloads page to find release tars.\nDark Mode The dafult style mode is changed to the dark mode, and light mode is still available.\nNew Design Log View A new design for the log view is currently available. Easier to locate the logs, and more space for the raw text.\nProject  Bump Java agent to 9.1-dev in the e2e tests. Bump up netty to 4.1.100. Update Groovy 3 to 4.0.15. Support packaging the project in JDK21. Compiler source and target remain in JDK11.  OAP Server  ElasticSearchClient: Add deleteById API. Fix Custom alarm rules are overwritten by \u0026lsquo;resource/alarm-settings.yml\u0026rsquo; Support Kafka Monitoring. Support Pulsar server and BookKeeper server Monitoring. [Breaking Change] Elasticsearch storage merge all management data indices into one index management, including ui_template,ui_menu,continuous_profiling_policy. Add a release mechanism for alarm windows when it is expired in case of OOM. Fix Zipkin trace receiver response: make the HTTP status code from 200 to 202. Update BanyanDB Java Client to 0.5.0. Fix getInstances query in the BanyanDB Metadata DAO. BanyanDBStorageClient: Add keepAliveProperty API. Fix table exists check in the JDBC Storage Plugin. Enhance extensibility of HTTP Server library. Adjust AlarmRecord alarmMessage column length to 512. Fix EventHookCallback build event: build the layer from Service's Layer. Fix AlarmCore doAlarm: catch exception for each callback to avoid interruption. Optimize queryBasicTraces in TraceQueryEsDAO. Fix WebhookCallback send incorrect messages, add catch exception for each callback HTTP Post. Fix AlarmRule expression validation: add labeled metrics mock data for check. Support collect ZGC memory pool metrics. Add a component ID for Netty-http (ID=151). Add a component ID for Fiber (ID=5021). BanyanDBStorageClient: Add define(Property property, PropertyStore.Strategy strategy) API. Correct the file format and fix typos in the filenames for monitoring Kafka\u0026rsquo;s e2e tests. Support extract timestamp from patterned datetime string in LAL. Support output key parameters in the booting logs. Fix cannot query zipkin traces with annotationQuery parameter in the JDBC related storage. Fix limit doesn\u0026rsquo;t work for findEndpoint API in ES storage. Isolate MAL CounterWindow cache by metric name. Fix JDBC Log query order. Change the DataCarrier IF_POSSIBLE strategy to use ArrayBlockingQueue implementation. Change the policy of the queue(DataCarrier) in the L1 metric aggregate worker to IF_POSSIBLE mode. Add self-observability metric metrics_aggregator_abandon to count the number of abandon metrics. Support Nginx monitoring. Fix BanyanDB Metadata Query: make query single instance/process return full tags to avoid NPE. Repleace go2sky E2E to GO agent. Replace Metrics v2 protocol with MQE in UI templates and E2E Test. Fix incorrect apisix metrics otel rules. Support Scratch The OAP Config Dump. Support increase/rate function in the MQE query language. Group service endpoints into _abandoned when endpoints have high cardinality.  UI  Add new menu for kafka monitoring. Fix independent widget duration. Fix the display height of the link tree structure. Replace the name by shortName on service widget. Refactor: update pagination style. No visualization style change. Apply MQE on K8s layer UI-templates. Fix icons display in trace tree diagram. Fix: update tooltip style to support multiple metrics scrolling view in a metrics graph. Add a new widget to show jvm memory pool detail. Fix: avoid querying data with empty parameters. Add a title and a description for trace segments. Add Netty icon for Netty HTTP plugin. Add Pulsar menu i18n files. Refactor Logs view. Implement the Dark Theme. Change UI templates for Text widgets. Add Nginx menu i18n. Fix the height for trace widget. Polish list style. Fix Log associate with Trace. Enhance layout for broken Topology widget. Fix calls metric with call type for Topology widget. Fix changing metrics config for Topology widget. Fix routes for Tab widget. Remove OpenFunction(FAAS layer) relative UI templates and menu item. Fix: change colors to match dark theme for Network Profiling. Remove the description of OpenFunction in the UI i18n. Reduce component chunks to improve page loading resource time.  Documentation  Separate storage docs to different files, and add an estimated timeline for BanyanDB(end of 2023). Add topology configuration in UI-Grafana doc. Add missing metrics to the OpenTelemetry Metrics doc. Polish docs of Concepts and Designs. Fix incorrect notes of slowCacheReadThreshold. Update OAP setup and cluster coordinator docs to explain new booting parameters table in the logs, and how to setup cluster mode.  All issues and pull requests are here\n","excerpt":"SkyWalking 9.7.0 is released. Go to downloads page to find release tars.\nDark Mode The dafult style …","ref":"/events/release-apache-skywalking-apm-9.7.0/","title":"Release Apache SkyWalking APM 9.7.0"},{"body":"","excerpt":"","ref":"/zh_tags/conference/","title":"Conference"},{"body":"SkyWalking Summit 2023 @ Shanghai 会议时间:2023年11月4日 全天 地点:上海大华虹桥假日酒店 赞助商:纵目科技,Tetrate\n会议议程 与 PDF SkyWalking V9 In 2023 - 5 featured releases  吴晟 PDF  B站视频地址\n使用 Terraform 与 Ansible 快速部署 SkyWalking 集群  柯振旭 PDF  B站视频地址\n基于SkyWalking构建全域一体化观测平台  陈修能 PDF  B站视频地址\n云原生可观测性数据库BanyanDB  高洪涛 PDF  B站视频地址\n基于 SkyWalking Agent 的性能剖析和实时诊断  陆家靖 PDF  B站视频地址\n太保科技-多云环境下Zabbix的运用实践  田川 PDF  B站视频地址\nKubeSphere 在可观测性领域的探索与实践  霍秉杰 PDF  B站视频地址\n大型跨国企业的微服务治理  张文杰 PDF  B站视频地址\n","excerpt":"SkyWalking Summit 2023 @ Shanghai 会议时间:2023年11月4日 全天 地点:上海大华虹桥假日酒店 赞助商:纵目科技,Tetrate\n会议议程 与 PDF …","ref":"/zh/2023-11-04-skywalking-summit-shanghai/","title":"SkyWalking Summit 2023 @ Shanghai 会议回顾"},{"body":"","excerpt":"","ref":"/zh_tags/video/","title":"Video"},{"body":"","excerpt":"","ref":"/zh_tags/","title":"Zh_tags"},{"body":"SkyWalking Infra E2E 1.3.0 is released. Go to downloads page to find release tars.\nFeatures  Support sha256enc and sha512enc encoding in verify case. Support hasPrefix and hasSuffix string verifier in verify case. Bump up kind to v0.14.0. Add a field kubeconfig to support running e2e test on an existing kubernetes cluster. Support non-fail-fast execution of test cases support verify cases concurrently Add .exe suffix to windows build artifact Export the kubeconfig path during executing the following steps Automatically pull images before loading into KinD Support outputting the result of \u0026lsquo;verify\u0026rsquo; in YAML format and only outputting the summary of the result of \u0026lsquo;verify\u0026rsquo; Make e2e test itself in github action Support outputting the summary of \u0026lsquo;verify\u0026rsquo; in YAML format Make e2e output summary with numeric information Add \u0026lsquo;subtractor\u0026rsquo; function  Improvements  Bump up GHA to avoid too many warnings Leverage the built-in cache in setup-go@v4 Add batchOutput config to reduce outputs Disable batch mode by default, add it to GHA and enable by default Improve GitHub Actions usability and speed by using composite actions' new feature Migrate deprecated GitHub Actions command to recommended ones Bump up kind to v0.14.0 Optimization of the output information of verification verifier: notEmpty should be able to handle nil Remove invalid configuration in GitHub Actions  Bug Fixes  Fix deprecation warnings Ignore cancel error when copying container logs  Documentation  Add a doc to introduce how to use e2e to test itself  Issues and PR  All issues are here All pull requests are here  ","excerpt":"SkyWalking Infra E2E 1.3.0 is released. Go to downloads page to find release tars.\nFeatures  Support …","ref":"/events/release-apache-skywalking-infra-e2e-1-3-0/","title":"Release Apache SkyWalking Infra E2E 1.3.0"},{"body":"","excerpt":"","ref":"/tags/ospp/","title":"OSPP"},{"body":"Aapche SkyWalking PMC 和 committer团队参加了\u0026quot;开源之夏 2023\u0026quot;活动,作为导师,共获得了9个官方赞助名额。最终对学生开放如下任务\n SkyWalking 支持 GraalVM Skywalking Infra E2E 自测试 监控Apache Pulsar 统一BanyanDB的查询计划和查询执行器 使用Helm部署BanyanDB 编写go agent的gRPC插件 监控Kafka 集成SkyWalking PHP到SkyWalking E2E 测试 在线黄金指标异常检测  经过3个月的开发,上游评审,PMC成员评议,PMC Chair复议,OSPP官方委员会评审多个步骤,现公布项目参与人员与最终结果\n通过评审项目(共6个) SkyWalking 支持 GraalVM  学生:张跃骎 学校:辽宁大学 本科 合并PR:11354 后续情况说明:GraalVM因为复杂的生态,替代的代码将被分离到SkyWalking GraalVM Distro, 相关讨论,请参见Issue 11518  Skywalking Infra E2E 自测试  学生:王子忱 学校:华中师范大学 本科 合并PR:115, 116, 117, 118, 119 后续情况说明:此特性已经包含在发行版skywalking-infra-e2e v1.3.0中  统一BanyanDB的查询计划和查询执行器  学生:曾家华 学校:电子科技大学 本科 合并PR:343  使用Helm部署BanyanDB  学生:黄友亮 学校:北京邮电大学 硕士研究生 合并PR:1 情况说明:因为BanyanDB Helm为新项目,学生承接了项目初始化、功能提交、自动化测试,发布准备等多项任务。所参与功能包含在skywalking-banyandb-helm v0.1.0中  编写go agent的gRPC插件  学生:胡宇腾 学校:西安邮电大学 合并PR:88, 94 后续情况说明:该学生在开源之夏相关项目外,完成了feature: add support for iris #99和Go agent APIs功能开发。并发表文章SkyWalking Go Toolkit Trace 详解以及英文译本Detailed explanation of SkyWalking Go Toolkit Trace  监控Kafka  学生:王竹 学校:美国东北大学 ( Northeastern University) 合并PR:11282, UI 318  未通过评审项目(3个) 下列项目因为质量无法达到社区要求,违规等原因,将被标定为失败。 注:在开源之夏中失败的项目,其Pull Reqeust可能因为符合社区功能要求,也被接受合并。\n监控Apache Pulsar  学生:孟祥迎 学校:重庆邮电大学 本科 合并PR:11339 失败原因:项目申请成员,作为ASF Pulsar项目的Committer,在担任Pulsar开源之夏项目导师期间,但依然申请了学生参与项目。属于违规行为。SkyWalking PMC审查了此行为并通报开源之夏组委会。开源之夏组委会依据活动规则取消其结项奖金。  集成SkyWalking PHP到SkyWalking E2E 测试  学生:罗文 学校:San Jose State University B.S. 合并PR:11330 失败原因:根据pull reqeust中的提交记录,SkyWalking PMC Chair审查了提交明细,学生参与代码数量大幅度小于导师的提交代码。并在考虑到这个项目难度以及明显低于SkyWalking 开源之夏项目的平均水平的情况下,通报给开源之夏组委会。经过组委会综合评定,项目不合格。  在线黄金指标异常检测  学生:黄颖 学校:同济大学 研究生 合并PR:无 失败原因:项目在进度延迟后实现较为简单且粗糙,并且没有提供算法评估结果和文档等。在 PR 开启后的为期一个月审核合并期间,学生并未能成功按预定计划改善实现的质量和文档。和导师以及 SkyWalking 社区缺少沟通。  结语 SkyWalking社区每年都有近10位PMC成员或Committer参与开源之夏中,帮助在校学生了解顶级开源项目、开源社区的运作方式。我们希望大家在每年经过3个月的时间,能够真正的帮助在校学生了解开源和参与开源。 因为,社区即使在考虑到学生能力的情况下,不会明显的降低pull request的接受标准。希望今后的学生,能够在早期,积极、主动和导师,社区其他成员保持高频率的沟通,对参与的项目有更深入、准确的了解。\n","excerpt":"Aapche SkyWalking PMC 和 committer团队参加了\u0026quot;开源之夏 2023\u0026quot;活动,作为导师,共获得了9个官方赞助名额。 …","ref":"/zh/2023-11-09-ospp-summary/","title":"开源之夏 2023 SkyWalking 社区项目情况公示"},{"body":"SkyWalking NodeJS 0.7.0 is released. Go to downloads page to find release tars.\n Add deadline config for trace request (#118)  ","excerpt":"SkyWalking NodeJS 0.7.0 is released. Go to downloads page to find release tars.\n Add deadline config …","ref":"/events/release-apache-skywalking-nodejs-0-7-0/","title":"Release Apache SkyWalking for NodeJS 0.7.0"},{"body":"","excerpt":"","ref":"/tags/lal/","title":"LAL"},{"body":"","excerpt":"","ref":"/tags/logging/","title":"Logging"},{"body":"背景介绍 Nginx access log 中包含了丰富的信息,例如:日志时间、状态码、响应时间、body 大小等。通过收集并分析 access log,我们可以实现对 Nginx 中接口状态的监控。\n在本案例中,将由 fluent-bit 收集 access log,并通过 HTTP 将日志信息发送给 SkyWalking OAP Server 进行进一步的分析。\n环境准备 实验需要的 Nginx 及 Fluent-bit 相关配置文件都被上传到了Github,有需要的读者可以自行 git clone 并通过 docker compose 启动,本文中将介绍配置文件中几个关键点。\nNginx日志格式配置 LAL 目前支持 JSON、YAML 及 REGEX 日志解析,为了方便获取到日志中的指标字段,我们将 Nginx 的日志格式定义为 JSON.\nhttp { ... ... log_format main '{\u0026quot;remote_addr\u0026quot;: \u0026quot;$remote_addr\u0026quot;,' '\u0026quot;remote_user\u0026quot;: \u0026quot;$remote_user\u0026quot;,' '\u0026quot;request\u0026quot;: \u0026quot;$request\u0026quot;,' '\u0026quot;time\u0026quot;: \u0026quot;$time_iso8601\u0026quot;,' '\u0026quot;status\u0026quot;: \u0026quot;$status\u0026quot;,' '\u0026quot;request_time\u0026quot;:\u0026quot;$request_time\u0026quot;,' '\u0026quot;body_bytes_sent\u0026quot;: \u0026quot;$body_bytes_sent\u0026quot;,' '\u0026quot;http_referer\u0026quot;: \u0026quot;$http_referer\u0026quot;,' '\u0026quot;http_user_agent\u0026quot;: \u0026quot;$http_user_agent\u0026quot;,' '\u0026quot;http_x_forwarded_for\u0026quot;: \u0026quot;$http_x_forwarded_for\u0026quot;}'; access_log /var/log/nginx/access.log main; ... ... } Fluent bit Filter 我们通过 Fluent bit 的 lua filter 进行日志格式的改写,将其调整为 SkyWalking 所需要的格式,record的各个字段含义如下:\n body:日志内容体 service:服务名称 serviceInstance:实例名称  function rewrite_body(tag, timestamp, record) local newRecord = {} newRecord[\u0026quot;body\u0026quot;] = { json = { json = record.log } } newRecord[\u0026quot;service\u0026quot;] = \u0026quot;nginx::nginx\u0026quot; newRecord[\u0026quot;serviceInstance\u0026quot;] = \u0026quot;localhost\u0026quot; return 1, timestamp, newRecord end OAP 日志分析 LAL定义 在 filter 中,我们通过条件判断,只处理 service=nginx::nginx 的服务,其他服务依旧走默认逻辑:\n第一步,使用 json 指令对日志进行解析,解析的结果会被存放到 parsed 字段中,通过 parsed 字段我们可以获取 json 日志中的字段信息。\n第二步,使用 timestamp 指令解析 parsed.time 并将其赋值给日志的 timestamp 字段,这里的 time 就是access log json 中的 time。\n第三步,使用 tag 指令给日志打上对应的标签,标签的值依然可以通过 parsed 字段获取。\n第四步,使用 metrics 指令从日志中提取出指标信息,我们共提取了四个指标:\n nginx_log_count:Nginx 每次请求都会生成一条 access log,该指标可以帮助我们统计 Nginx 当前的请求数。 nginx_request_time:access log 中会记录请求时间,该指标可以帮助我们统计上游接口的响应时长。 nginx_body_bytes_sent:body 大小指标可以帮助我们了解网关上的流量情况。 nginx_status_code:状态码指标可以实现对状态码的监控,如果出现异常上涨可以结合 alarm 进行告警。  rules:- name:defaultlayer:GENERALdsl:|filter { if (log.service == \u0026#34;nginx::nginx\u0026#34;) { json { abortOnFailure true }extractor {timestamp parsed.time as String, \u0026#34;yyyy-MM-dd\u0026#39;T\u0026#39;HH:mm:ssXXX\u0026#34;tag status:parsed.statustag remote_addr:parsed.remote_addrmetrics {timestamp log.timestamp as Longlabels service: log.service, instance:log.serviceInstancename \u0026#34;nginx_log_count\u0026#34;value 1}metrics {timestamp log.timestamp as Longlabels service: log.service, instance:log.serviceInstancename \u0026#34;nginx_request_time\u0026#34;value parsed.request_time as Double}metrics {timestamp log.timestamp as Longlabels service: log.service, instance:log.serviceInstancename \u0026#34;nginx_body_bytes_sent\u0026#34;value parsed.body_bytes_sent as Long}metrics {timestamp log.timestamp as Longlabels service: log.service, instance: log.serviceInstance, status:parsed.statusname \u0026#34;nginx_status_code\u0026#34;value 1}}}sink {}}经过 LAL 处理后,我们已经可以在日志面板看到日志信息了,接下来我们将对 LAL 中提取的指标进行进一步分析:\nMAL定义 在 MAL 中,我们可以对上一步 LAL 中提取的指标进行进一步的分析聚合,下面的例子里:\nnginx_log_count、nginx_request_time、nginx_status_code 使用 sum 聚合函数处理,并使用 SUM 方式 downsampling,\nnginx_request_time 使用 avg 聚合函数求平均值,默认使用 AVG 方式 downsampling。\n完成聚合分析后,SkyWalking Meter System 会完成对上述指标的持久化。\nexpSuffix:service([\u0026#39;service\u0026#39;], Layer.GENERAL)metricPrefix:nginxmetricsRules:- name:cpmexp:nginx_log_count.sum([\u0026#39;service\u0026#39;]).downsampling(SUM)- name:avg_request_timeexp:nginx_request_time.avg([\u0026#39;service\u0026#39;])- name:body_bytes_sent_countexp:nginx_body_bytes_sent.sum([\u0026#39;service\u0026#39;]).downsampling(SUM)- name:status_code_countexp:nginx_status_code.sum([\u0026#39;service\u0026#39;,\u0026#39;status\u0026#39;]).downsampling(SUM)最后,我们便可以来到 SkyWalking UI 页面新建 Nginx 仪表板,使用刚刚 MAL 中定义的指标信息创建 Nginx Dashboard(也可以通过上文提到仓库中的 dashboard.json 直接导入测试):\n参考文档  Fluent Bit lua Filter Log Analysis Language Meter Analysis Language  ","excerpt":"背景介绍 Nginx access log 中包含了丰富的信息,例如:日志时间、状态码、响应时间、body 大小等。通过收集并分析 access log,我们可以实现对 Nginx 中接口状态的监控。 …","ref":"/zh/2023-10-29-collect-and-analyse-nginx-accesslog-by-lal/","title":"使用 LAL 收集并分析 Nginx access log"},{"body":"SkyWalking BanyanDB 0.5.0 is released. Go to downloads page to find release tars.\nFeatures  List all properties in a group. Implement Write-ahead Logging Document the clustering. Support multiple roles for banyand server. Support for recovery buffer using wal. Register the node role to the metadata registry. Implement the remote queue to spreading data to data nodes. Fix parse environment variables error Implement the distributed query engine. Add mod revision check to write requests. Add TTL to the property. Implement node selector (e.g. PickFirst Selector, Maglev Selector). Unified the buffers separated in blocks to a single buffer in the shard.  Bugs  BanyanDB ui unable to load icon. BanyanDB ui type error Fix timer not released BanyanDB ui misses fields when creating a group Fix data duplicate writing Syncing metadata change events from etcd instead of a local channel.  Chores  Bump several dependencies and tools. Drop redundant \u0026ldquo;discovery\u0026rdquo; module from banyand. \u0026ldquo;metadata\u0026rdquo; module is enough to play the node and shard discovery role.  ","excerpt":"SkyWalking BanyanDB 0.5.0 is released. Go to downloads page to find release tars.\nFeatures  List all …","ref":"/events/release-apache-skywalking-banyandb-0-5-0/","title":"Release Apache SkyWalking BanyanDB 0.5.0"},{"body":"SkyWalking Go 0.3.0 is released. Go to downloads page to find release tars.\nFeatures  Support manual tracing APIs for users.  Plugins  Support mux HTTP server framework. Support grpc server and client framework. Support iris framework.  Documentation  Add Tracing APIs document into Manual APIs.  Bug Fixes Issues and PR  All issues are here All and pull requests are here  ","excerpt":"SkyWalking Go 0.3.0 is released. Go to downloads page to find release tars.\nFeatures  Support manual …","ref":"/events/release-apache-skwaylking-go-0.3.0/","title":"Release Apache SkyWalking Go 0.3.0"},{"body":"Background SkyWalking Go is an open-source, non-intrusive Golang agent used for monitoring, tracing, and data collection within distributed systems. It enables users to observe the flow and latency of requests within the system, collect performance data from various system components for performance monitoring, and troubleshoot issues by tracing the complete path of requests.\nIn version v0.3.0, Skywalking Go introduced the toolkit trace tool. Trace APIs allow users to include critical operations, functions, or services in the tracing scope in situations where plugins do not support them. This inclusion enables tracking and monitoring of these operations and can be used for fault analysis, diagnosis, and performance monitoring.\nBefore diving into this, you can learn how to use the Skywalking Go agent by referring to the SkyWalking Go Agent Quick Start Guide.\nThe following sections will explain how to use these interfaces in specific scenarios.\nIntroducing the Trace Toolkit Execute the following command in the project\u0026rsquo;s root directory:\ngo get github.com/apache/skywalking-go/toolkit To use the toolkit trace interface, you need to import the package into your project:\n\u0026#34;github.com/apache/skywalking-go/toolkit/trace\u0026#34; Manual Tracing A Span is the fundamental unit of an operation in Tracing. It represents an operation within a specific timeframe, such as a request, a function call, or a specific action. It records essential information about a particular operation, including start and end times, the operation\u0026rsquo;s name, tags (key-value pairs), and relationships between operations. Multiple Spans can form a hierarchical structure.\nIn situations where Skywalking-go doesn\u0026rsquo;t support a particular framework, users can manually create Spans to obtain tracing information.\n(Here, I have removed the supported frameworks for the sake of the example. These are only examples. You should reference this when using the APIs in private and/or unsupported frameworks)\nFor example, when you need to trace an HTTP response, you can create a span using trace.CreateEntrySpan() within the method handling the request, and end the span using trace.StopSpan() after processing. When sending an HTTP request, use trace.CreateExitSpan() to create a span, and end the span after the request returns.\nHere are two HTTP services named consumer and provider. When a user accesses the consumer service, it receives the user\u0026rsquo;s request internally and then accesses the provider to obtain resources.\n// consumer.go package main import ( \u0026#34;io\u0026#34; \u0026#34;net/http\u0026#34; _ \u0026#34;github.com/apache/skywalking-go\u0026#34; \u0026#34;github.com/apache/skywalking-go/toolkit/trace\u0026#34; ) func getProvider() (*http.Response, error) { // Create an HTTP request \treq, err := http.NewRequest(\u0026#34;GET\u0026#34;, \u0026#34;http://localhost:9998/provider\u0026#34;, http.NoBody) // Create an ExitSpan before sending the HTTP request. \ttrace.CreateExitSpan(\u0026#34;GET:/provider\u0026#34;, \u0026#34;localhost:9999\u0026#34;, func(headerKey, headerValue string) error { // Injector adds specific header information to the request. \treq.Header.Add(headerKey, headerValue) return nil }) // Finish the ExitSpan and ensure it executes when the function returns using defer. \tdefer trace.StopSpan() // Send the request. \tclient := \u0026amp;http.Client{} resp, err := client.Do(req) if err != nil { return nil, err } return resp, nil } func consumerHandler(w http.ResponseWriter, r *http.Request) { // Create an EntrySpan to trace the execution of the consumerHandler method. \ttrace.CreateEntrySpan(r.Method+\u0026#34;/consumer\u0026#34;, func(headerKey string) (string, error) { // Extractor retrieves the header information added to the request. \treturn r.Header.Get(headerKey), nil }) // Finish the EntrySpan. \tdefer trace.StopSpan() // Prepare to send an HTTP request. \tresp, err := getProvider() body, err := io.ReadAll(resp.Body) if err != nil { return } _, _ = w.Write(body) } func main() { http.HandleFunc(\u0026#34;/consumer\u0026#34;, consumerHandler) _ = http.ListenAndServe(\u0026#34;:9999\u0026#34;, nil) } // provider.go package main import ( \u0026#34;net/http\u0026#34; _ \u0026#34;github.com/apache/skywalking-go\u0026#34; \u0026#34;github.com/apache/skywalking-go/toolkit/trace\u0026#34; ) func providerHandler(w http.ResponseWriter, r *http.Request) { //Create an EntrySpan to trace the execution of the providerHandler method. \ttrace.CreateEntrySpan(\u0026#34;GET:/provider\u0026#34;, func(headerKey string) (string, error) { return r.Header.Get(headerKey), nil }) // Finish the EntrySpan. \tdefer trace.StopSpan() _, _ = w.Write([]byte(\u0026#34;success from provider\u0026#34;)) } func main() { http.HandleFunc(\u0026#34;/provider\u0026#34;, providerHandler) _ = http.ListenAndServe(\u0026#34;:9998\u0026#34;, nil) } Then, in the terminal, execute:\ngo build -toolexec=\u0026#34;/path/to/go-agent\u0026#34; -a -o consumer ./consumer.go ./consumer go build -toolexec=\u0026#34;/path/to/go-agent\u0026#34; -a -o provider ./provider.go ./provider curl 127.0.0.1:9999/consumer At this point, the UI will display the span information you created.\nIf you need to trace methods that are executed only locally, you can use trace.CreateLocalSpan(). If you don\u0026rsquo;t need to monitor information or states from the other end, you can change ExitSpan and EntrySpan to LocalSpan.\nThe usage examples provided are for illustration purposes, and users can decide the tracing granularity and where in the program they need tracing.\nPlease note that if a program ends too quickly, it may cause tracing data to be unable to be asynchronously sent to the SkyWalking backend.\nPopulate The Span When there\u0026rsquo;s a necessity to record additional information, including creating/updating tags, appending logs, and setting a new operation name of the current traced Span, these APIs should be considered. These actions are used to enhance trace information, providing a more detailed and precise contextual description, which aids in better understanding the events or operations being traced.\nToolkit trace APIs provide a convenient way to access and manipulate trace data, including:\n Setting Tags: SetTag() Adding Logs: AddLog() Setting Span Names: SetOperationName() Getting various IDs: GetTraceID(), GetSegmentID(), GetSpanID()  For example, if you need to record the HTTP status code in a span, you can use the following interfaces while the span is not yet finished:\ntrace.CreateExitSpan(\u0026#34;GET:/provider\u0026#34;, \u0026#34;localhost:9999\u0026#34;, func(headerKey, headerValue string) error { r.Header.Add(headerKey, headerValue) return nil }) resp, err := http.Get(\u0026#34;http://localhost:9999/provider\u0026#34;) trace.SetTag(\u0026#34;status_code\u0026#34;, fmt.Sprintf(\u0026#34;%d\u0026#34;, resp.StatusCode)) spanID := trace.GetSpanID() trace.StopSpan() It\u0026rsquo;s important to note that when making these method calls, the current thread should have an active span.\nAsync APIs Async APIs work for manipulating spans across Goroutines. These scenarios might include:\n Applications involving concurrency or multiple goroutines where operating on Spans across different execution contexts is necessary. Updating or logging information for a Span during asynchronous operations. Requiring a delayed completion of a Span.  To use it, follow these steps:\n Obtain the return value of CreateSpan, which is SpanRef. Call spanRef.PrepareAsync() to prepare for operations in another goroutine. When the current goroutine\u0026rsquo;s work is done, call trace.StopSpan() to end the span (affecting only in the current goroutine). Pass the spanRef to another goroutine. After the work is done in any goroutine, call spanRef.AsyncFinish().  Here\u0026rsquo;s an example:\nspanRef, err := trace.CreateLocalSpan(\u0026#34;LocalSpan\u0026#34;) if err != nil { return } spanRef.PrepareAsync() go func(){ // some work  spanRef.AsyncFinish() }() // some work trace.StopSpan() Correlation Context Correlation Context is used to pass parameters within a Span, and the parent Span will pass the Correlation Context to all its child Spans. It allows the transmission of information between spans across different applications. The default number of elements in the Correlation Context is 3, and the content\u0026rsquo;s length cannot exceed 128 bytes.\nCorrelation Context is commonly applied in the following scenarios:\n Passing Information Between Spans: It facilitates the transfer of critical information between different Spans, enabling upstream and downstream Spans to understand the correlation and context between each other. Passing Business Parameters: In business scenarios, it involves transmitting specific parameters or information between different Spans, such as authentication tokens, business transaction IDs, and more.  Users can set the Correlation Context using trace.SetCorrelation(key, value) and then retrieve the corresponding value in downstream spans using value := trace.GetCorrelation(key).\nFor example, in the code below, we store the value in the tag of the span, making it easier to observe the result:\npackage main import ( _ \u0026#34;github.com/apache/skywalking-go\u0026#34; \u0026#34;github.com/apache/skywalking-go/toolkit/trace\u0026#34; \u0026#34;net/http\u0026#34; ) func providerHandler(w http.ResponseWriter, r *http.Request) { ctxValue := trace.GetCorrelation(\u0026#34;key\u0026#34;) trace.SetTag(\u0026#34;result\u0026#34;, ctxValue) } func consumerHandler(w http.ResponseWriter, r *http.Request) { trace.SetCorrelation(\u0026#34;key\u0026#34;, \u0026#34;value\u0026#34;) _, err := http.Get(\u0026#34;http://localhost:9999/provider\u0026#34;) if err != nil { return } } func main() { http.HandleFunc(\u0026#34;/provider\u0026#34;, providerHandler) http.HandleFunc(\u0026#34;/consumer\u0026#34;, consumerHandler) _ = http.ListenAndServe(\u0026#34;:9999\u0026#34;, nil) } Then, in the terminal, execute:\nexport SW_AGENT_NAME=server go build -toolexec=\u0026#34;/path/to/go-agent\u0026#34; -a -o server ./server.go ./server curl 127.0.0.1:9999/consumer Finally, in the providerHandler() span, you will find the information from the Correlation Context:\nConclusion This article provides an overview of Skywalking Go\u0026rsquo;s Trace APIs and their practical application. These APIs empower users with the ability to customize tracing functionality according to their specific needs.\nFor detailed information about the interfaces, please refer to the documentation: Tracing APIs.\nWelcome everyone to try out the new version.\n","excerpt":"Background SkyWalking Go is an open-source, non-intrusive Golang agent used for monitoring, tracing, …","ref":"/blog/2023-10-18-skywalking-toolkit-trace/","title":"Detailed explanation of SkyWalking Go Toolkit Trace"},{"body":"背景介绍 SkyWalking Go是一个开源的非侵入式Golang代理程序,用于监控、追踪和在分布式系统中进行数据收集。它使用户能够观察系统内请求的流程和延迟,从各个系统组件收集性能数据以进行性能监控,并通过追踪请求的完整路径来解决问题。\n在版本v0.3.0中,Skywalking Go引入了 toolkit-trace 工具。Trace APIs 允许用户在插件不支持的情况下将关键操作、函数或服务添加到追踪范围。从而实现追踪和监控这些操作,并可用于故障分析、诊断和性能监控。\n在深入了解之前,您可以参考SkyWalking Go Agent快速开始指南来学习如何使用SkyWalking Go Agent。\n下面将会介绍如何在特定场景中使用这些接口。\n导入 Trace Toolkit 在项目的根目录中执行以下命令:\ngo get github.com/apache/skywalking-go/toolkit 使用 toolkit trace 接口前,需要将该包导入到您的项目中:\n\u0026#34;github.com/apache/skywalking-go/toolkit/trace\u0026#34; 手动追踪 Span 是 Tracing 中单个操作的基本单元。它代表在特定时间范围内的操作,比如一个请求、一个函数调用或特定动作。Span记录了特定操作的关键信息,包括开始和结束时间、操作名称、标签(键-值对)以及操作之间的关系。多个 Span 可以形成层次结构。\n在遇到 Skywalking Go 不支持的框架的情况下,用户可以手动创建 Span 以获取追踪信息。\n(为了作为示例,我删除了已支持的框架。以下仅为示例。请在使用私有或不支持的框架的 API 时参考)\n例如,当需要追踪HTTP响应时,可以在处理请求的方法内部使用 trace.CreateEntrySpan() 来创建一个 span,在处理完成后使用 trace.StopSpan() 来结束这个 span。在发送HTTP请求时,使用 trace.CreateExitSpan() 来创建一个 span,在请求返回后结束这个 span。\n这里有两个名为 consumer 和 provider 的HTTP服务。当用户访问 consumer 服务时,它在内部接收用户的请求,然后访问 provider 以获取资源。\n// consumer.go package main import ( \u0026#34;io\u0026#34; \u0026#34;net/http\u0026#34; _ \u0026#34;github.com/apache/skywalking-go\u0026#34; \u0026#34;github.com/apache/skywalking-go/toolkit/trace\u0026#34; ) func getProvider() (*http.Response, error) { // 新建 HTTP 请求 \treq, err := http.NewRequest(\u0026#34;GET\u0026#34;, \u0026#34;http://localhost:9998/provider\u0026#34;, http.NoBody) // 在发送 HTTP 请求之前创建 ExitSpan \ttrace.CreateExitSpan(\u0026#34;GET:/provider\u0026#34;, \u0026#34;localhost:9999\u0026#34;, func(headerKey, headerValue string) error { // Injector 向请求中添加特定的 header 信息 \treq.Header.Add(headerKey, headerValue) return nil }) // 结束 ExitSpan,使用 defer 确保在函数返回时执行 \tdefer trace.StopSpan() // 发送请求 \tclient := \u0026amp;http.Client{} resp, err := client.Do(req) if err != nil { return nil, err } return resp, nil } func consumerHandler(w http.ResponseWriter, r *http.Request) { // 创建 EntrySpan 来追踪 consumerHandler 方法的执行 \ttrace.CreateEntrySpan(r.Method+\u0026#34;/consumer\u0026#34;, func(headerKey string) (string, error) { // Extractor 获取请求中添加的 header 信息 \treturn r.Header.Get(headerKey), nil }) // 结束 EntrySpan \tdefer trace.StopSpan() // 准备发送 HTTP 请求 \tresp, err := getProvider() body, err := io.ReadAll(resp.Body) if err != nil { return } _, _ = w.Write(body) } func main() { http.HandleFunc(\u0026#34;/consumer\u0026#34;, consumerHandler) _ = http.ListenAndServe(\u0026#34;:9999\u0026#34;, nil) } // provider.go package main import ( \u0026#34;net/http\u0026#34; _ \u0026#34;github.com/apache/skywalking-go\u0026#34; \u0026#34;github.com/apache/skywalking-go/toolkit/trace\u0026#34; ) func providerHandler(w http.ResponseWriter, r *http.Request) { // 创建 EntrySpan 来追踪 providerHandler 方法的执行 \ttrace.CreateEntrySpan(\u0026#34;GET:/provider\u0026#34;, func(headerKey string) (string, error) { return r.Header.Get(headerKey), nil }) // 结束 EntrySpan \tdefer trace.StopSpan() _, _ = w.Write([]byte(\u0026#34;success from provider\u0026#34;)) } func main() { http.HandleFunc(\u0026#34;/provider\u0026#34;, providerHandler) _ = http.ListenAndServe(\u0026#34;:9998\u0026#34;, nil) } 然后中终端中执行:\ngo build -toolexec=\u0026#34;/path/to/go-agent\u0026#34; -a -o consumer ./consumer.go ./consumer go build -toolexec=\u0026#34;/path/to/go-agent\u0026#34; -a -o provider ./provider.go ./provider curl 127.0.0.1:9999/consumer 此时 UI 中将会显示你所创建的span信息\n如果需要追踪仅在本地执行的方法,可以使用 trace.CreateLocalSpan()。如果不需要监控来自另一端的信息或状态,可以将 ExitSpan 和 EntrySpan 更改为 LocalSpan。\n以上方法仅作为示例,用户可以决定追踪的粒度以及程序中需要进行追踪的位置。\n注意,如果程序结束得太快,可能会导致 Tracing 数据无法异步发送到 SkyWalking 后端。\n填充 Span 当需要记录额外信息时,包括创建/更新标签、追加日志和设置当前被追踪 Span 的新操作名称时,可以使用这些API。这些操作用于增强追踪信息,提供更详细的上下文描述,有助于更好地理解被追踪的事件或操作。\nToolkit trace APIs 提供了一种简便的方式来访问和操作 Trace 数据:\n 设置标签:SetTag() 添加日志:AddLog() 设置 Span 名称:SetOperationName() 获取各种ID:GetTraceID(), GetSegmentID(), GetSpanID()  例如,如果需要在一个 Span 中记录HTTP状态码,就可以在 Span 未结束时调用以下接口:\ntrace.CreateExitSpan(\u0026#34;GET:/provider\u0026#34;, \u0026#34;localhost:9999\u0026#34;, func(headerKey, headerValue string) error { r.Header.Add(headerKey, headerValue) return nil }) resp, err := http.Get(\u0026#34;http://localhost:9999/provider\u0026#34;) trace.SetTag(\u0026#34;status_code\u0026#34;, fmt.Sprintf(\u0026#34;%d\u0026#34;, resp.StatusCode)) spanID := trace.GetSpanID() trace.StopSpan() 在调用这些方法时,当前线程需要有正在活跃的 span。\n异步 APIs 异步API 用于跨 goroutines 操作 spans。包括以下情况:\n 包含多个 goroutines 的程序,需要在不同上下文中中操作 Span。 在异步操作时更新或记录 Span 的信息。 延迟结束 Span。  按照以下步骤使用:\n 获取 CreateSpan 的返回值 SpanRef。 调用 spanRef.PrepareAsync() ,准备在另一个 goroutine 中执行操作。 当前 goroutine 工作结束后,调用 trace.StopSpan() 结束该 span(仅影响当前 goroutine)。 将 spanRef 传递给另一个 goroutine。 完成工作后在任意 goroutine 中调用 spanRef.AsyncFinish()。  以下为示例:\nspanRef, err := trace.CreateLocalSpan(\u0026#34;LocalSpan\u0026#34;) if err != nil { return } spanRef.PrepareAsync() go func(){ // some work \tspanRef.AsyncFinish() }() // some work trace.StopSpan() Correlation Context Correlation Context 用于在 Span 间传递参数,父 Span 会把 Correlation Context 递给其所有子 Spans。它允许在不同应用程序的 spans 之间传输信息。Correlation Context 的默认元素个数为3,其内容长度不能超过128字节。\nCorrelation Context 通常用于以下等情况:\n 在 Spans 之间传递信息:它允许关键信息在不同 Span 之间传输,使上游和下游 Spans 能够获取彼此之间的关联和上下文。 传递业务参数:在业务场景中,涉及在不同 Span 之间传输特定参数或信息,如认证令牌、交易ID等。  用户可以使用 trace.SetCorrelation(key, value) 设置 Correlation Context ,并可以使用 value := trace.GetCorrelation(key) 在下游 spans 中获取相应的值。\n例如在下面的代码中,我们将值存储在 span 的标签中,以便观察结果:\npackage main import ( _ \u0026#34;github.com/apache/skywalking-go\u0026#34; \u0026#34;github.com/apache/skywalking-go/toolkit/trace\u0026#34; \u0026#34;net/http\u0026#34; ) func providerHandler(w http.ResponseWriter, r *http.Request) { ctxValue := trace.GetCorrelation(\u0026#34;key\u0026#34;) trace.SetTag(\u0026#34;result\u0026#34;, ctxValue) } func consumerHandler(w http.ResponseWriter, r *http.Request) { trace.SetCorrelation(\u0026#34;key\u0026#34;, \u0026#34;value\u0026#34;) _, err := http.Get(\u0026#34;http://localhost:9999/provider\u0026#34;) if err != nil { return } } func main() { http.HandleFunc(\u0026#34;/provider\u0026#34;, providerHandler) http.HandleFunc(\u0026#34;/consumer\u0026#34;, consumerHandler) _ = http.ListenAndServe(\u0026#34;:9999\u0026#34;, nil) } 然后在终端执行:\nexport SW_AGENT_NAME=server go build -toolexec=\u0026#34;/path/to/go-agent\u0026#34; -a -o server ./server.go ./server curl 127.0.0.1:9999/consumer 最后在 providerHandler() 的 Span 中找到了 Correlation Context 的信息:\n总结 本文讲述了Skywalking Go的 Trace APIs 及其应用。它为用户提供了自定义追踪的功能。\n更多关于该接口的介绍见文档:Tracing APIs。\n欢迎大家来使用新版本。\n","excerpt":"背景介绍 SkyWalking Go是一个开源的非侵入式Golang代理程序,用于监控、追踪和在分布式系统中进行数据收集。它使用户能够观察系统内请求的流程和延迟,从各个系统组件收集性能数据以进行性能监 …","ref":"/zh/2023-10-18-skywalking-toolkit-trace/","title":"SkyWalking Go Toolkit Trace 详解"},{"body":"","excerpt":"","ref":"/tags/agent/","title":"Agent"},{"body":"CommunityOverCode (原 ApacheCon) 是 Apache 软件基金会(ASF)的官方全球系列大会。自 1998 年以来\u0026ndash;在 ASF 成立之前 \u0026ndash; ApacheCon 已经吸引了各个层次的参与者,在 300 多个 Apache 项目及其不同的社区中探索 \u0026ldquo;明天的技术\u0026rdquo;。CommunityOverCode 通过动手实作、主题演讲、实际案例研究、培训、黑客松活动等方式,展示 Apache 项目的最新发展和新兴创新。\nCommunityOverCode 展示了无处不在的 Apache 项目的最新突破和 Apache 孵化器中即将到来的创新,以及开源开发和以 Apache 之道领导社区驱动的项目。与会者可以了解到独立于商业利益、企业偏见或推销话术之外的核心开源技术。\nSkyWalking的Golang自动探针实践 刘晗 分布式追踪技术在可观测领域尤为重要,促使各个语言的追踪探针的易用性获得了更多的关注。目前在golang语言探针方面大多为手动埋点探针,接入流程过于复杂,而且局限性很强。本次讨论的重点着重于简化golang语言探针的接入方式,创新性的使用了自动埋点技术,并且突破了很多框架中对于上下文信息的依赖限制。\nB站视频地址\nBanyanDB一个高扩展性的分布式追踪数据库 高洪涛 追踪数据是一种用于分析微服务系统性能和故障的重要数据源,它记录了系统中每个请求的调用链路和相关指标。随着微服务系统的规模和复杂度的增长,追踪数据的量级也呈指数级增长,给追踪数据的存储和查询带来了巨大的挑战。传统的关系型数据库或者时序数据库往往难以满足追踪数据的高效存储和灵活查询的需求。 BanyanDB是一个专为追踪数据而设计的分布式数据库,它具有高扩展性、高性能、高可用性和高灵活性的特点。BanyanDB采用了基于时间序列的分片策略,将追踪数据按照时间范围划分为多个分片,每个分片可以独立地进行存储、复制和负载均衡。BanyanDB还支持多维索引,可以根据不同的维度对追踪数据进行快速过滤和聚合。 在本次演讲中,我们将介绍BanyanDB的设计思想、架构和实现细节,以及它在实际场景中的应用和效果。我们也将展示BanyanDB与其他数据库的对比和优势,以及它未来的发展方向和计划。\nB站视频地址\n","excerpt":"CommunityOverCode (原 ApacheCon) 是 Apache 软件基金会(ASF)的官方全球系列大会。自 1998 年以来\u0026ndash;在 ASF 成立之前 \u0026ndash; …","ref":"/zh/2023-08-20-coc-asia-2023/","title":"CommunityOverCode Conference 2023 Asia"},{"body":"","excerpt":"","ref":"/tags/database/","title":"Database"},{"body":"","excerpt":"","ref":"/tags/golang/","title":"Golang"},{"body":"SkyWalking PHP 0.7.0 is released. Go to downloads page to find release tars.\nWhat\u0026rsquo;s Changed  Start 0.7.0 development. by @jmjoy in https://github.com/apache/skywalking-php/pull/90 Add more info for error log. by @jmjoy in https://github.com/apache/skywalking-php/pull/91 Fix amqplib and predis argument problems. by @jmjoy in https://github.com/apache/skywalking-php/pull/92 Add Memcache plugin. by @jmjoy in https://github.com/apache/skywalking-php/pull/93 Refactor mysqli plugin, support procedural api. by @jmjoy in https://github.com/apache/skywalking-php/pull/94 Fix target address in cross process header. by @jmjoy in https://github.com/apache/skywalking-php/pull/95 Release SkyWalking PHP 0.7.0 by @jmjoy in https://github.com/apache/skywalking-php/pull/96  Full Changelog: https://github.com/apache/skywalking-php/compare/v0.7.0...v0.7.0\nPECL https://pecl.php.net/package/skywalking_agent/0.7.0\n","excerpt":"SkyWalking PHP 0.7.0 is released. Go to downloads page to find release tars.\nWhat\u0026rsquo;s Changed …","ref":"/events/release-apache-skywalking-php-0-7-0/","title":"Release Apache SkyWalking PHP 0.7.0"},{"body":"SkyWalking BanyanDB Helm 0.1.0 is released. Go to downloads page to find release tars.\nFeatures  Deploy banyandb with standalone mode by Chart  ","excerpt":"SkyWalking BanyanDB Helm 0.1.0 is released. Go to downloads page to find release tars.\nFeatures …","ref":"/events/release-apache-skywalking-banyandb-helm-0-1-0/","title":"Release Apache SkyWalking BanyanDB Helm 0.1.0"},{"body":"","excerpt":"","ref":"/tags/arthas/","title":"Arthas"},{"body":"背景介绍 Arthas 是一款常用的 Java 诊断工具,我们可以在 SkyWalking 监控到服务异常后,通过 Arthas 进一步分析和诊断以快速定位问题。\n在 Arthas 实际使用中,通常由开发人员拷贝或者下载安装包到服务对应的VM或者容器中,attach 到对应的 Java 进程进行问题排查。这一过程不可避免的会造成服务器敏感运维信息的扩散, 而且在分秒必争的问题排查过程中,这些繁琐的操作无疑会浪费大量时间。\nSkyWalking Java Agent 伴随 Java 服务一起启动,并定期上报服务、实例信息给OAP Server。我们可以借助 SkyWalking Java Agent 的插件化能力,开发一个 Arthas 控制插件, 由该插件管理 Arthas 运行生命周期,通过页面化的方式,完成Arthas的启动与停止。最终实现效果可以参考下图:\n要完成上述功能,我们需要实现以下几个关键点:\n 开发 agent arthas-control-plugin,执行 arthas 的启动与停止命令 开发 oap arthas-controller-module ,下发控制命令给 arthas agent plugin 定制 skywalking-ui, 连接 arthas-tunnel-server,发送 arthas 命令并获取执行结果  以上各个模块之间的交互流程如下图所示:\nconnect disconnect 本文涉及的所有代码均已发布在 github skywalking-x-arthas 上,如有需要,大家可以自行下载代码测试。 文章后半部分将主要介绍代码逻辑及其中包含的SkyWalking扩展点。\nagent arthas-control-plugin 首先在 skywalking-java/apm-sniffer/apm-sdk-plugin 下创建一个 arthas-control-plugin, 该模块在打包后会成为 skywalking-agent/plugins 下的一个插件, 其目录结构如下:\narthas-control-plugin/ ├── pom.xml └── src └── main ├── java │ └── org │ └── apache │ └── skywalking │ └── apm │ └── plugin │ └── arthas │ ├── config │ │ └── ArthasConfig.java # 模块配置 │ ├── service │ │ └── CommandListener.java # boot service,监听 oap command │ └── util │ ├── ArthasCtl.java # 控制 arthas 的启动与停止 │ └── ProcessUtils.java ├── proto │ └── ArthasCommandService.proto # 与oap server通信的 grpc 协议定义 └── resources └── META-INF └── services # boot service spi service └── org.apache.skywalking.apm.agent.core.boot.BootService 16 directories, 7 files 在 ArthasConfig.java 中,我们定义了以下配置,这些参数将在 arthas 启动时传递。\n以下的配置可以通过 agent.config 文件、system prop、env variable指定。 关于 skywalking-agent 配置的初始化的具体流程,大家可以参考 SnifferConfigInitializer 。\npublic class ArthasConfig { public static class Plugin { @PluginConfig(root = ArthasConfig.class) public static class Arthas { // arthas 目录  public static String ARTHAS_HOME; // arthas 启动时连接的tunnel server  public static String TUNNEL_SERVER; // arthas 会话超时时间  public static Long SESSION_TIMEOUT; // 禁用的 arthas command  public static String DISABLED_COMMANDS; } } } 接着,我们看下 CommandListener.java 的实现,CommandListener 实现了 BootService 接口, 并通过 resources/META-INF/services 下的文件暴露给 ServiceLoader。\nBootService 的定义如下,共有prepare()、boot()、onComplete()、shutdown()几个方法,这几个方法分别对应插件生命周期的不同阶段。\npublic interface BootService { void prepare() throws Throwable; void boot() throws Throwable; void onComplete() throws Throwable; void shutdown() throws Throwable; default int priority() { return 0; } } 在 ServiceManager 类的 boot() 方法中, 定义了BootService 的 load 与启动流程,该方法 由SkyWalkingAgent 的 premain 调用,在主程序运行前完成初始化与启动:\npublic enum ServiceManager { INSTANCE; ... ... public void boot() { bootedServices = loadAllServices(); prepare(); startup(); onComplete(); } ... ... } 回到我们 CommandListener 的 boot 方法,该方法在 agent 启动之初定义了一个定时任务,这个定时任务会轮询 oap ,查询是否需要启动或者停止arthas:\npublic class CommandListener implements BootService, GRPCChannelListener { ... ... @Override public void boot() throws Throwable { getCommandFuture = Executors.newSingleThreadScheduledExecutor( new DefaultNamedThreadFactory(\u0026#34;CommandListener\u0026#34;) ).scheduleWithFixedDelay( new RunnableWithExceptionProtection( this::getCommand, t -\u0026gt; LOGGER.error(\u0026#34;get arthas command error.\u0026#34;, t) ), 0, 2, TimeUnit.SECONDS ); } ... ... } getCommand方法中定义了start、stop的处理逻辑,分别对应页面上的 connect 和 disconnect 操作。 这两个 command 有分别转给 ArthasCtl 的 startArthas 和 stopArthas 两个方法处理,用来控制 arthas 的启停。\n在 startArthas 方法中,启动arthas-core.jar 并使用 skywalking-agent 的 serviceName 和 instanceName 注册连接至配置文件中指定的arthas-tunnel-server。\nArthasCtl 逻辑参考自 Arthas 的 BootStrap.java ,由于不是本篇文章的重点,这里不再赘述,感兴趣的小伙伴可以自行查看。\nswitch (commandResponse.getCommand()) { case START: if (alreadyAttached()) { LOGGER.warn(\u0026#34;arthas already attached, no need start again\u0026#34;); return; } try { arthasTelnetPort = SocketUtils.findAvailableTcpPort(); ArthasCtl.startArthas(PidUtils.currentLongPid(), arthasTelnetPort); } catch (Exception e) { LOGGER.info(\u0026#34;error when start arthas\u0026#34;, e); } break; case STOP: if (!alreadyAttached()) { LOGGER.warn(\u0026#34;no arthas attached, no need to stop\u0026#34;); return; } try { ArthasCtl.stopArthas(arthasTelnetPort); arthasTelnetPort = null; } catch (Exception e) { LOGGER.info(\u0026#34;error when stop arthas\u0026#34;, e); } break; } 看完 arthas 的启动与停止控制逻辑,我们回到 CommandListener 的 statusChanged 方法, 由于要和 oap 通信,这里我们按照惯例监听 grpc channel 的状态,只有状态正常时才会执行上面的getCommand轮询。\npublic class CommandListener implements BootService, GRPCChannelListener { ... ... @Override public void statusChanged(final GRPCChannelStatus status) { if (GRPCChannelStatus.CONNECTED.equals(status)) { Object channel = ServiceManager.INSTANCE.findService(GRPCChannelManager.class).getChannel(); // DO NOT REMOVE Channel CAST, or it will throw `incompatible types: org.apache.skywalking.apm.dependencies.io.grpc.Channel  // cannot be converted to io.grpc.Channel` exception when compile due to agent core\u0026#39;s shade of grpc dependencies.  commandServiceBlockingStub = ArthasCommandServiceGrpc.newBlockingStub((Channel) channel); } else { commandServiceBlockingStub = null; } this.status = status; } ... ... } 上面的代码,细心的小伙伴可能会发现,getChannel() 的返回值被向上转型成了 Object, 而在下面的 newBlockingStub 方法中,又强制转成了 Channel。\n看似有点多此一举,其实不然,我们将这里的转型去掉,尝试编译就会收到下面的错误:\n[ERROR] Failed to execute goal org.apache.maven.plugins:maven-compiler-plugin:3.10.1:compile (default-compile) on project arthas-control-plugin: Compilation failure [ERROR] .../CommandListener.java:[59,103] 不兼容的类型: org.apache.skywalking.apm.dependencies.io.grpc.Channel无法转换为io.grpc.Channel 上面的错误提示 ServiceManager.INSTANCE.findService(GRPCChannelManager.class).getChannel() 的返回值类型是 org.apache.skywalking.apm.dependencies.io.grpc.Channel,无法被赋值给 io.grpc.Channel 引用。\n我们查看GRPCChannelManager的getChannel()方法代码会发现,方法定义的返回值明明是 io.grpc.Channel,为什么编译时会报上面的错误?\n其实这是skywalking-agent的一个小魔法,由于 agent-core 最终会被打包进 skywalking-agent.jar,启动时由系统类装载器(或者其他父级类装载器)直接装载, 为了防止所依赖的类库和被监控服务的类发生版本冲突,agent 核心代码在打包时使用了maven-shade-plugin, 该插件会在 maven package 阶段改变 grpc 依赖的包名, 我们在源代码里看到的是 io.grpc.Channel,其实在真正运行时已经被改成了 org.apache.skywalking.apm.dependencies.io.grpc.Channel,这便可解释上面编译报错的原因。\n除了grpc以外,其他一些 well-known 的 dependency 也会进行 shade 操作,详情大家可以参考 apm-agent-core pom.xml :\n\u0026lt;plugin\u0026gt; \u0026lt;artifactId\u0026gt;maven-shade-plugin\u0026lt;/artifactId\u0026gt; \u0026lt;executions\u0026gt; \u0026lt;execution\u0026gt; \u0026lt;phase\u0026gt;package\u0026lt;/phase\u0026gt; \u0026lt;goals\u0026gt; \u0026lt;goal\u0026gt;shade\u0026lt;/goal\u0026gt; \u0026lt;/goals\u0026gt; \u0026lt;configuration\u0026gt; ... ... \u0026lt;relocations\u0026gt; \u0026lt;relocation\u0026gt; \u0026lt;pattern\u0026gt;${shade.com.google.source}\u0026lt;/pattern\u0026gt; \u0026lt;shadedPattern\u0026gt;${shade.com.google.target}\u0026lt;/shadedPattern\u0026gt; \u0026lt;/relocation\u0026gt; \u0026lt;relocation\u0026gt; \u0026lt;pattern\u0026gt;${shade.io.grpc.source}\u0026lt;/pattern\u0026gt; \u0026lt;shadedPattern\u0026gt;${shade.io.grpc.target}\u0026lt;/shadedPattern\u0026gt; \u0026lt;/relocation\u0026gt; \u0026lt;relocation\u0026gt; \u0026lt;pattern\u0026gt;${shade.io.netty.source}\u0026lt;/pattern\u0026gt; \u0026lt;shadedPattern\u0026gt;${shade.io.netty.target}\u0026lt;/shadedPattern\u0026gt; \u0026lt;/relocation\u0026gt; \u0026lt;relocation\u0026gt; \u0026lt;pattern\u0026gt;${shade.io.opencensus.source}\u0026lt;/pattern\u0026gt; \u0026lt;shadedPattern\u0026gt;${shade.io.opencensus.target}\u0026lt;/shadedPattern\u0026gt; \u0026lt;/relocation\u0026gt; \u0026lt;relocation\u0026gt; \u0026lt;pattern\u0026gt;${shade.io.perfmark.source}\u0026lt;/pattern\u0026gt; \u0026lt;shadedPattern\u0026gt;${shade.io.perfmark.target}\u0026lt;/shadedPattern\u0026gt; \u0026lt;/relocation\u0026gt; \u0026lt;relocation\u0026gt; \u0026lt;pattern\u0026gt;${shade.org.slf4j.source}\u0026lt;/pattern\u0026gt; \u0026lt;shadedPattern\u0026gt;${shade.org.slf4j.target}\u0026lt;/shadedPattern\u0026gt; \u0026lt;/relocation\u0026gt; \u0026lt;/relocations\u0026gt; ... ... \u0026lt;/configuration\u0026gt; \u0026lt;/execution\u0026gt; \u0026lt;/executions\u0026gt; \u0026lt;/plugin\u0026gt; 除了上面的注意点以外,我们来看一下另一个场景,假设我们需要在 agent plugin 的 interceptor 中使用 plugin 中定义的 BootService 会发生什么?\n我们回到 BootService 的加载逻辑,为了加载到 plugin 中定义的BootService,ServiceLoader 指定了类装载器为AgentClassLoader.getDefault(), (这行代码历史非常悠久,可以追溯到2018年:Allow use SkyWalking plugin to override service in Agent core. #1111 ), 由此可见,plugin 中定义的 BootService 的 classloader 是 AgentClassLoader.getDefault():\nvoid load(List\u0026lt;BootService\u0026gt; allServices) { for (final BootService bootService : ServiceLoader.load(BootService.class, AgentClassLoader.getDefault())) { allServices.add(bootService); } } 再来看下 interceptor 的加载逻辑,InterceptorInstanceLoader.java 的 load 方法规定了如果父加载器相同,plugin 中的 interceptor 将使用一个新创建的 AgentClassLoader (在绝大部分简单场景中,plugin 的 interceptor 都由同一个 AgentClassLoader 加载):\npublic static \u0026lt;T\u0026gt; T load(String className, ClassLoader targetClassLoader) throws IllegalAccessException, InstantiationException, ClassNotFoundException, AgentPackageNotFoundException { ... ... pluginLoader = EXTEND_PLUGIN_CLASSLOADERS.get(targetClassLoader); if (pluginLoader == null) { pluginLoader = new AgentClassLoader(targetClassLoader); EXTEND_PLUGIN_CLASSLOADERS.put(targetClassLoader, pluginLoader); } ... ... } 按照类装载器的委派机制,interceptor 中如果用到了 BootService,也会由当前的类的装载器去装载。 所以 ServiceManager 中装载的 BootService 和 interceptor 装载的 BootService 并不是同一个 (一个 class 文件被不同的 classloader 装载了两次),如果在 interceptor 中 调用 BootService 方法,同样会发生 cast 异常。 由此可见,目前的实现并不支持我们在interceptor中直接调用 plugin 中 BootService 的方法,如果需要调用,只能将 BootService 放到 agent-core 中,由更高级别的类装载器优先装载。\n这其实并不是 skywalking-agent 的问题,skywalking agent plugin 专注于自己的应用场景,只需要关注 trace、meter 以及默认 BootService 的覆盖就可以了。 只是我们如果有扩展 skywalking-agent 的需求,要对其类装载机制做到心中有数,否则可能会出现一些意想不到的问题。\noap arthas-controller-module 看完 agent-plugin 的实现,我们再来看看 oap 部分的修改,oap 同样是模块化的设计,我们可以很轻松的增加一个新的模块,在 /oap-server/ 目录下新建 arthas-controller 子模块:\narthas-controller/ ├── pom.xml └── src └── main ├── java │ └── org │ └── apache │ └── skywalking │ └── oap │ └── arthas │ ├── ArthasControllerModule.java # 模块定义 │ ├── ArthasControllerProvider.java # 模块逻辑实现者 │ ├── CommandQueue.java │ └── handler │ ├── CommandGrpcHandler.java # grpc handler,供 plugin 通信使用 │ └── CommandRestHandler.java # http handler,供 skywalking-ui 通信使用 ├── proto │ └── ArthasCommandService.proto └── resources └── META-INF └── services # 模块及模块实现的 spi service ├── org.apache.skywalking.oap.server.library.module.ModuleDefine └── org.apache.skywalking.oap.server.library.module.ModuleProvider 模块的定义非常简单,只包含一个模块名,由于我们新增的模块并不需要暴露service给其他模块调用,services 我们返回一个空数组\npublic class ArthasControllerModule extends ModuleDefine { public static final String NAME = \u0026#34;arthas-controller\u0026#34;; public ArthasControllerModule() { super(NAME); } @Override public Class\u0026lt;?\u0026gt;[] services() { return new Class[0]; } } 接着是模块实现者,实现者取名为 default,module 指定该 provider 所属模块,由于没有模块的自定义配置,newConfigCreator 我们返回null即可。 start 方法分别向 CoreModule 的 grpc 服务和 http 服务注册了两个 handler,grpc 服务和 http 服务就是我们熟知的 11800 和 12800 端口:\npublic class ArthasControllerProvider extends ModuleProvider { @Override public String name() { return \u0026#34;default\u0026#34;; } @Override public Class\u0026lt;? extends ModuleDefine\u0026gt; module() { return ArthasControllerModule.class; } @Override public ConfigCreator\u0026lt;?\u0026gt; newConfigCreator() { return null; } @Override public void prepare() throws ServiceNotProvidedException { } @Override public void start() throws ServiceNotProvidedException, ModuleStartException { // grpc service for agent  GRPCHandlerRegister grpcService = getManager().find(CoreModule.NAME) .provider() .getService(GRPCHandlerRegister.class); grpcService.addHandler( new CommandGrpcHandler() ); // rest service for ui  HTTPHandlerRegister restService = getManager().find(CoreModule.NAME) .provider() .getService(HTTPHandlerRegister.class); restService.addHandler( new CommandRestHandler(), Collections.singletonList(HttpMethod.POST) ); } @Override public void notifyAfterCompleted() throws ServiceNotProvidedException { } @Override public String[] requiredModules() { return new String[0]; } } 最后在配置文件中注册本模块及模块实现者,下面的配置表示 arthas-controller 这个 module 由 default provider 提供实现:\narthas-controller:selector:defaultdefault:CommandGrpcHandler 和 CommandHttpHandler 的逻辑非常简单,CommandHttpHandler 定义了 connect 和 disconnect 接口, 收到请求后会放到一个 Queue 中供 CommandGrpcHandler 消费,Queue 的实现如下,这里不再赘述:\npublic class CommandQueue { private static final Map\u0026lt;String, Command\u0026gt; COMMANDS = new ConcurrentHashMap\u0026lt;\u0026gt;(); // produce by connect、disconnect public static void produceCommand(String serviceName, String instanceName, Command command) { COMMANDS.put(serviceName + instanceName, command); } // consume by agent getCommand task public static Optional\u0026lt;Command\u0026gt; consumeCommand(String serviceName, String instanceName) { return Optional.ofNullable(COMMANDS.remove(serviceName + instanceName)); } } skywalking-ui arthas console 完成了 agent 和 oap 的开发,我们再看下 ui 部分:\n connect:调用oap server connect 接口,并连接 arthas-tunnel-server disconnect:调用oap server disconnect 接口,并与 arthas-tunnel-server 断开连接 arthas 命令交互,这部分代码主要参考 arthas,大家可以查看 web-ui console 的实现  修改完skywalking-ui的代码后,我们可以直接通过 npm run dev 测试了。\n如果需要通过主项目打包,别忘了在apm-webapp 的 ApplicationStartUp.java 类中添加一条 arthas 的路由:\nServer .builder() .port(port, SessionProtocol.HTTP) .service(\u0026#34;/arthas\u0026#34;, oap) .service(\u0026#34;/graphql\u0026#34;, oap) .service(\u0026#34;/internal/l7check\u0026#34;, HealthCheckService.of()) .service(\u0026#34;/zipkin/config.json\u0026#34;, zipkin) .serviceUnder(\u0026#34;/zipkin/api\u0026#34;, zipkin) .serviceUnder(\u0026#34;/zipkin\u0026#34;, FileService.of( ApplicationStartUp.class.getClassLoader(), \u0026#34;/zipkin-lens\u0026#34;) .orElse(zipkinIndexPage)) .serviceUnder(\u0026#34;/\u0026#34;, FileService.of( ApplicationStartUp.class.getClassLoader(), \u0026#34;/public\u0026#34;) .orElse(indexPage)) .build() .start() .join(); 总结  BootService 启动及停止流程 如何利用 BootService 实现自定义逻辑 Agent Plugin 的类装载机制 maven-shade-plugin 的使用与注意点 如何利用 ModuleDefine 与 ModuleProvider 定义新的模块 如何向 GRPC、HTTP Service 添加新的 handler  如果你还有任何的疑问,欢迎大家与我交流 。\n","excerpt":"背景介绍 Arthas 是一款常用的 Java 诊断工具,我们可以在 SkyWalking 监控到服务异常后,通过 Arthas 进一步分析和诊断以快速定位问题。\n在 Arthas 实际使用中,通常由 …","ref":"/zh/2023-09-17-integrating-skywalking-with-arthas/","title":"将 Apache SkyWalking 与 Arthas 集成"},{"body":"SkyWalking Eyes 0.5.0 is released. Go to downloads page to find release tars.\n feat(header templates): add support for AGPL-3.0 by @elijaholmos in https://github.com/apache/skywalking-eyes/pull/125 Upgrade go version to 1.18 by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/126 Add MulanPSL-2.0 support. by @jmjoy in https://github.com/apache/skywalking-eyes/pull/127 New Header Template: GPL-3.0-or-later by @ddlees in https://github.com/apache/skywalking-eyes/pull/128 Update README.md by @rovast in https://github.com/apache/skywalking-eyes/pull/129 Add more .env.[mode] support for VueJS project by @rovast in https://github.com/apache/skywalking-eyes/pull/130 Docker Multiple Architecture Support :fixes#9089 by @mohammedtabish0 in https://github.com/apache/skywalking-eyes/pull/132 Polish maven test for convenient debug by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/134 feat: list files by git when possible by @tisonkun in https://github.com/apache/skywalking-eyes/pull/133 Switch to npm ci for reliable builds by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/135 Fix optional dependencies are not excluded by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/136 Fix exclude not work for transitive dependencies and add recursive config by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/137 Add some tests for maven resovler by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/138 feat(header-fix): add Svelte support by @elijaholmos in https://github.com/apache/skywalking-eyes/pull/139 dep: do not write license files if they already exist by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/140 fix: not ignore *.txt to make sure files like CMakeLists.txt can be checked by @acelyc111 in https://github.com/apache/skywalking-eyes/pull/141 fix license header normalizer by @xiaoyawei in https://github.com/apache/skywalking-eyes/pull/142 Substitute variables in license content for header command by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/143 Correct indent in Apache-2.0 template by @tisonkun in https://github.com/apache/skywalking-eyes/pull/144 Add copyright-year configuration by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/145 dep/maven: use output file to store the dep tree for cleaner result by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/146 dep/maven: resolve dependencies before analysis by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/147 gha: switch to composite running mode and set up cache by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/149 gha: switch to composite running mode and set up cache by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/150 Fix GitHub Actions wrong path by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/151 Normalize license for cargo. by @jmjoy in https://github.com/apache/skywalking-eyes/pull/153 Remove space characters in license for cargo. by @jmjoy in https://github.com/apache/skywalking-eyes/pull/154 Bump up dependencies to fix CVE by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/155 Bump up GHA to depress warnings by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/156 Leverage the built-in cache in setup-go@v4 by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/157 Dependencies check should report unknown licneses by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/158 Fix wrong indentation in doc by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/159 Add EPL-2.0 header template by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/160 Fix wrong indentation in doc about multi license config by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/161 dependency resolve with default template and specified output of license by @crholm in https://github.com/apache/skywalking-eyes/pull/163 Bump up go git to support .gitconfig user path by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/164 Draft release notes for 0.5.0 by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/165 Remove \u0026ldquo;portions copyright\u0026rdquo; header normalizer by @antgamdia in https://github.com/apache/skywalking-eyes/pull/166  Full Changelog: https://github.com/apache/skywalking-eyes/compare/v0.4.0...v0.5.0\n","excerpt":"SkyWalking Eyes 0.5.0 is released. Go to downloads page to find release tars.\n feat(header …","ref":"/events/release-apache-skywalking-eyes-0-5-0/","title":"Release Apache SkyWalking Eyes 0.5.0"},{"body":"Abstract Apache SkyWalking hosts SkyWalking Summit 2023 on Nov. 4th, 2023, UTC+8, sponsored by ZMOps and Tetrate.\nWe are going to share SkyWalking\u0026rsquo;s roadmap, features, product experiences, and open-source culture.\nWelcome to join us.\nVenue Addr./地址 上海大华虹桥假日酒店\nDate 8:00 - 17:00, Nov 4th.\nRegister Register for IN-PERSON ticket\nCall For Proposals (CFP) The Call For Proposals open from now to 18:00 on Oct. 27th 2023, UTC+8. Submit your proposal at here\nWe have 1 open session and 8 sessions for the whole event.\n Open session is reserved for SkyWalking PMC members. 6 sessions are opened for CFP process. 2 sessions are reserved for sponsors.  Sponsors  ZMOps Inc. Tetrate Inc.  Anti-harassment policy SkyWalkingDay is dedicated to providing a harassment-free experience for everyone. We do not tolerate harassment of participants in any form. Sexual language and imagery will also not be tolerated in any event venue. Participants violating these rules may be sanctioned or expelled without a refund, at the discretion of the event organizers. Our anti-harassment policy can be found at Apache website.\nContact Us Send mail to dev@skywalking.apache.org.\n","excerpt":"Abstract Apache SkyWalking hosts SkyWalking Summit 2023 on Nov. 4th, 2023, UTC+8, sponsored by ZMOps …","ref":"/events/summit-23-cn/","title":"SkyWalking Summit 2023 @ Shanghai China"},{"body":"SkyWalking 9.6.0 is released. Go to downloads page to find release tars.\nNew Alerting Kernel  MQE(Metrics Query Expression) and a new notification mechanism are supported.  Support Loki LogQL  Newly added support for Loki LogQL and Grafana Loki Dashboard for SkyWalking collected logs  WARNING  ElasticSearch 6 storage relative tests are removed. It worked and is not promised due to end of life officially.  Project  Bump up Guava to 32.0.1 to avoid the lib listed as vulnerable due to CVE-2020-8908. This API is never used. Maven artifact skywalking-log-recevier-plugin is renamed to skywalking-log-receiver-plugin. Bump up cli version 0.11 to 0.12. Bump up the version of ASF parent pom to v30. Make builds reproducible for automatic releases CI.  OAP Server  Add Neo4j component ID(112) language: Python. Add Istio ServiceEntry registry to resolve unknown IPs in ALS. Wrap deleteProperty API to the BanyanDBStorageClient. [Breaking change] Remove matchedCounter from HttpUriRecognitionService#feedRawData. Remove patterns from HttpUriRecognitionService#feedRawData and add max 10 candidates of raw URIs for each pattern. Add component ID for WebSphere. Fix AI Pipeline uri caching NullPointer and IllegalArgument Exceptions. Fix NPE in metrics query when the metric is not exist. Remove E2E tests for Istio \u0026lt; 1.15, ElasticSearch \u0026lt; 7.16.3, they might still work but are not supported as planed. Scroll all results in ElasticSearch storage and refactor scrolling logics, including Service, Instance, Endpoint, Process, etc. Improve Kubernetes coordinator to remove Terminating OAP Pods in cluster. Support SW_CORE_SYNC_PERIOD_HTTP_URI_RECOGNITION_PATTERN and SW_CORE_TRAINING_PERIOD_HTTP_URI_RECOGNITION_PATTERN to control the period of training and sync HTTP URI recognition patterns. And shorten the default period to 10s for sync and 60s for training. Fix ElasticSearch scroller bug. Add component ID for Aerospike(ID=149). Packages with name recevier are renamed to receiver. BanyanDBMetricsDAO handles storeIDTag in multiGet for BanyanDBModelExtension. Fix endpoint grouping-related logic and enhance the performance of PatternTree retrieval. Fix metric session cache saving after batch insert when using mysql-connector-java. Support dynamic UI menu query. Add comment for docker/.env to explain the usage. Fix wrong environment variable name SW_OTEL_RECEIVER_ENABLED_OTEL_RULES to right SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES. Fix instance query in JDBC implementation. Set the SW_QUERY_MAX_QUERY_COMPLEXITY default value to 3000(was 1000). Accept length=4000 parameter value of the event. It was 2000. Tolerate parameter value in illegal JSON format. Update BanyanDB Java Client to 0.4.0 Support aggregate Labeled Value Metrics in MQE. [Breaking change] Change the default label name in MQE from label to _. Bump up grpc version to 1.53.0. [Breaking change] Removed \u0026lsquo;\u0026amp;\u0026rsquo; symbols from shell scripts to avoid OAP server process running as a background process. Revert part of #10616 to fix the unexpected changes: if there is no data we should return an array with 0s, but in #10616, an empty array is returned. Cache all service entity in memory for query. Bump up jackson version to 2.15.2. Increase the default memory size to avoid OOM. Bump up graphql-java to 21.0. Add Echo component ID(5015) language: Golang. Fix index out of bounds exception in aggregate_labels MQE function. Support MongoDB Server/Cluster monitoring powered by OTEL. Do not print configurations values in logs to avoid sensitive info leaked. Move created the latest index before retrieval indexes by aliases to avoid the 404 exception. This just prevents some interference from manual operations. Add more Go VM metrics, as new skywalking-go agent provided since its 0.2 release. Add component ID for Lock (ID=5016). [Breaking change] Adjust the structure of hooks in the alarm-settings.yml. Support multiple configs for each hook types and specifying the hooks in the alarm rule. Bump up Armeria to 1.24.3. Fix BooleanMatch and BooleanNotEqualMatch doing Boolean comparison. Support LogQL HTTP query APIs. Add Mux Server component ID(5017) language: Golang. Remove ElasticSearch 6.3.2 from our client lib tests. Bump up ElasticSearch server 8.8.1 to 8.9.0 for latest e2e testing. 8.1.0, 7.16.3 and 7.17.10 are still tested. Add OpenSearch 2.8.0 to our client lib tests. Use listening mode for apollo implementation of dynamic configuration. Add view_as_seq function in MQE for listing metrics in the given prioritized sequence. Fix the wrong default value of k8sServiceNameRule if it\u0026rsquo;s not explicitly set. Improve PromQL to allow for multiple metric operations within a single query. Fix MQE Binary Operation between labeled metrics and other type of value result. Add component ID for Nacos (ID=150). Support Compare Operation in MQE. Fix the Kubernetes resource cache not refreshed. Fix wrong classpath that might cause OOM in startup. Enhance the serviceRelation in MAL by adding settings for the delimiter and component fields. [Breaking change] Support MQE in the Alerting. The Alarm Rules configuration(alarm-settings.yml), add expression field and remove metrics-name/count/threshold/op/only-as-condition fields and remove composite-rules configuration. Check results in ALS as per downstream/upstream instead of per log. Fix GraphQL query listInstances not using endTime query Do not start server and Kafka consumer in init mode. Add Iris component ID(5018). Add OTLP Tracing support as a Zipkin trace input.  UI  Fix metric name browser_app_error_rate in Browser-Root dashboard. Fix display name of endpoint_cpm for endpoint list in General-Service dashboard. Implement customize menus and marketplace page. Fix minTraceDuration and maxTraceDuration types. Fix init minTime to Infinity. Bump dependencies to fix vulnerabilities. Add scss variables. Fix the title of instance list and notices in the continue profiling. Add a link to explain the expression metric, add units in the continue profiling widget. Calculate string width to set Tabs name width. [Breaking change] Removed \u0026lsquo;\u0026amp;\u0026rsquo; symbols from shell scripts to avoid web application server process running as a background process. Reset chart label. Fix service associates instances. Remove node-sass. Fix commit error on Windows. Apply MQE on MYSQL, POSTGRESQL, REDIS, ELASTICSEARCH and DYNAMODB layer UI-templates. Apply MQE on Virtual-Cache layer UI-templates Apply MQE on APISIX, AWS_EKS, AWS_GATEWAY and AWS_S3 layer UI templates. Apply MQE on RabbitMQ Dashboards. Apply MQE on Virtual-MQ layer UI-templates Apply MQE on Infra-Linux layer UI-templates Apply MQE on Infra-Windows layer UI-templates Apply MQE on Browser layer UI-templates. Implement MQE on topology widget. Fix getEndpoints keyword blank. Implement a breadcrumb component as navigation.  Documentation  Add Go agent into the server agent documentation. Add data unit description in the configuration of continuous profiling policy. Remove storage extension doc, as it is expired. Remove how to add menu doc, as SkyWalking supports marketplace and new backend-based setup. Separate contribution docs to a new menu structure. Add a doc to explain how to manage i18n. Add a doc to explain OTLP Trace support. Fix typo in dynamic-config-configmap.md. Fix out-dated docs about Kafka fetcher. Remove 3rd part fetchers from the docs, as they are not maintained anymore.  All issues and pull requests are here\n","excerpt":"SkyWalking 9.6.0 is released. Go to downloads page to find release tars.\nNew Alerting Kernel …","ref":"/events/release-apache-skywalking-apm-9.6.0/","title":"Release Apache SkyWalking APM 9.6.0"},{"body":"SkyWalking Java Agent 9.0.0 is released. Go to downloads page to find release tars. Changes by Version\n9.0.0 Kernel Updates  Support re-transform/hot-swap classes with other java agents, and remove the obsolete cache enhanced class feature. Implement new naming policies for names of auxiliary type, interceptor delegate field, renamed origin method, method access name, method cache value field. All names are under sw$ name trait. They are predictable and unchanged after re-transform.  * SWAuxiliaryTypeNamingStrategy Auxiliary type name pattern: \u0026lt;origin_class_name\u0026gt;$\u0026lt;name_trait\u0026gt;$auxiliary$\u0026lt;auxiliary_type_instance_hash\u0026gt; * DelegateNamingResolver Interceptor delegate field name pattern: \u0026lt;name_trait\u0026gt;$delegate$\u0026lt;class_name_hash\u0026gt;$\u0026lt;plugin_define_hash\u0026gt;$\u0026lt;intercept_point_hash\u0026gt; * SWMethodNameTransformer Renamed origin method pattern: \u0026lt;name_trait\u0026gt;$original$\u0026lt;method_name\u0026gt;$\u0026lt;method_description_hash\u0026gt; * SWImplementationContextFactory Method cache value field pattern: cachedValue$\u0026lt;name_trait\u0026gt;$\u0026lt;origin_class_name_hash\u0026gt;$\u0026lt;field_value_hash\u0026gt; Accessor method name pattern: \u0026lt;renamed_origin_method\u0026gt;$accessor$\u0026lt;name_trait\u0026gt;$\u0026lt;origin_class_name_hash\u0026gt; Here is an example of manipulated enhanced class with new naming policies of auxiliary classes, fields, and methods\nimport sample.mybatis.controller.HotelController$sw$auxiliary$19cja42; import sample.mybatis.controller.HotelController$sw$auxiliary$p257su0; import sample.mybatis.domain.Hotel; import sample.mybatis.service.HotelService; @RequestMapping(value={\u0026#34;/hotel\u0026#34;}) @RestController public class HotelController implements EnhancedInstance { @Autowired @lazy private HotelService hotelService; private volatile Object _$EnhancedClassField_ws; // Interceptor delegate fields  public static volatile /* synthetic */ InstMethodsInter sw$delegate$td03673$ain2do0$8im5jm1; public static volatile /* synthetic */ InstMethodsInter sw$delegate$td03673$ain2do0$edkmf61; public static volatile /* synthetic */ ConstructorInter sw$delegate$td03673$ain2do0$qs9unv1; public static volatile /* synthetic */ InstMethodsInter sw$delegate$td03673$fl4lnk1$m3ia3a2; public static volatile /* synthetic */ InstMethodsInter sw$delegate$td03673$fl4lnk1$sufrvp1; public static volatile /* synthetic */ ConstructorInter sw$delegate$td03673$fl4lnk1$cteu7s1; // Origin method cache value field  private static final /* synthetic */ Method cachedValue$sw$td03673$g5sobj1; public HotelController() { this(null); sw$delegate$td03673$ain2do0$qs9unv1.intercept(this, new Object[0]); } private /* synthetic */ HotelController(sw.auxiliary.p257su0 p257su02) { } @GetMapping(value={\u0026#34;city/{cityId}\u0026#34;}) public Hotel selectByCityId(@PathVariable(value=\u0026#34;cityId\u0026#34;) int n) { // call interceptor with auxiliary type and parameters and origin method object  return (Hotel)sw$delegate$td03673$ain2do0$8im5jm1.intercept(this, new Object[]{n}, new HotelController$sw$auxiliary$19cja42(this, n), cachedValue$sw$td03673$g5sobj1); } // Renamed origin method  private /* synthetic */ Hotel sw$origin$selectByCityId$a8458p3(int cityId) { /*22*/ return this.hotelService.selectByCityId(cityId); } // Accessor of renamed origin method, calling from auxiliary type  final /* synthetic */ Hotel sw$origin$selectByCityId$a8458p3$accessor$sw$td03673(int n) { // Calling renamed origin method  return this.sw$origin$selectByCityId$a8458p3(n); } @OverRide public Object getSkyWalkingDynamicField() { return this._$EnhancedClassField_ws; } @OverRide public void setSkyWalkingDynamicField(Object object) { this._$EnhancedClassField_ws = object; } static { ClassLoader.getSystemClassLoader().loadClass(\u0026#34;org.apache.skywalking.apm.dependencies.net.bytebuddy.dynamic.Nexus\u0026#34;).getMethod(\u0026#34;initialize\u0026#34;, Class.class, Integer.TYPE).invoke(null, HotelController.class, -1072476370); // Method object  cachedValue$sw$td03673$g5sobj1 = HotelController.class.getMethod(\u0026#34;selectByCityId\u0026#34;, Integer.TYPE); } } Auxiliary type of Constructor :\nclass HotelController$sw$auxiliary$p257su0 { } Auxiliary type of selectByCityId method:\nclass HotelController$sw$auxiliary$19cja42 implements Runnable, Callable { private HotelController argument0; private int argument1; public Object call() throws Exception { return this.argument0.sw$origin$selectByCityId$a8458p3$accessor$sw$td03673(this.argument1); } @OverRide public void run() { this.argument0.sw$origin$selectByCityId$a8458p3$accessor$sw$td03673(this.argument1); } HotelController$sw$auxiliary$19cja42(HotelController hotelController, int n) { this.argument0 = hotelController; this.argument1 = n; } } Features and Bug Fixes  Support Jdk17 ZGC metric collect Support Jetty 11.x plugin Support access to the sky-walking tracer context in spring gateway filter Fix the scenario of using the HBase plugin with spring-data-hadoop. Add RocketMQ 5.x plugin Fix the conflict between the logging kernel and the JDK threadpool plugin. Fix the thread safety bug of finishing operation for the span named \u0026ldquo;SpringCloudGateway/sendRequest\u0026rdquo; Fix NPE in guava-eventbus-plugin. Add WebSphere Liberty 23.x plugin Add Plugin to support aerospike Java client Add ClickHouse parsing to the jdbc-common plugin. Support to trace redisson lock Upgrade netty-codec-http2 to 4.1.94.Final Upgrade guava to 32.0.1 Fix issue with duplicate enhancement by ThreadPoolExecutor Add plugin to support for RESTeasy 6.x. Fix the conditions for resetting UUID, avoid the same uuid causing the configuration not to be updated. Fix witness class in springmvc-annotation-5.x-plugin to avoid falling into v3 use cases. Fix Jedis-2.x plugin bug and add test for Redis cluster scene Merge two instrumentation classes to avoid duplicate enhancements in MySQL plugins. Support asynchronous invocation in jetty client 9.0 and 9.x plugin Add nacos-client 2.x plugin Staticize the tags for preventing synchronization in JDK 8 Add RocketMQ-Client-Java 5.x plugin Fix NullPointerException in lettuce-5.x-plugin.  All issues and pull requests are here\n","excerpt":"SkyWalking Java Agent 9.0.0 is released. Go to downloads page to find release tars. Changes by …","ref":"/events/release-apache-skywalking-java-agent-9-0-0/","title":"Release Apache SkyWalking Java Agent 9.0.0"},{"body":"SkyWalking PHP 0.6.0 is released. Go to downloads page to find release tars.\nWhat\u0026rsquo;s Changed  Polish doc about Swoole by @wu-sheng in https://github.com/apache/skywalking-php/pull/73 Start 0.6.0 development. by @jmjoy in https://github.com/apache/skywalking-php/pull/74 Fix hook for Doctrine PDO class by @matikij in https://github.com/apache/skywalking-php/pull/76 Log Exception in tracing span when throw. by @jmjoy in https://github.com/apache/skywalking-php/pull/75 Upgrade dependencies and adapt. by @jmjoy in https://github.com/apache/skywalking-php/pull/77 Fix required rust version and add runing php-fpm notice in docs. by @jmjoy in https://github.com/apache/skywalking-php/pull/78 Bump openssl from 0.10.48 to 0.10.55 by @dependabot in https://github.com/apache/skywalking-php/pull/79 Fix the situation where the redis port is string. by @jmjoy in https://github.com/apache/skywalking-php/pull/80 Optionally enable zend observer api for auto instrumentation. by @jmjoy in https://github.com/apache/skywalking-php/pull/81 Fix the empty span situation in redis after hook. by @jmjoy in https://github.com/apache/skywalking-php/pull/82 Add mongodb pluhgin. by @jmjoy in https://github.com/apache/skywalking-php/pull/83 Update rust nightly toolchain in CI and format. by @jmjoy in https://github.com/apache/skywalking-php/pull/84 Add notice document for skywalking_agent.enable. by @jmjoy in https://github.com/apache/skywalking-php/pull/85 Upgrade dependencies. by @jmjoy in https://github.com/apache/skywalking-php/pull/86 Fix docs by @heyanlong in https://github.com/apache/skywalking-php/pull/87 Add kafka reporter. by @jmjoy in https://github.com/apache/skywalking-php/pull/88 Release SkyWalking PHP Agent 0.6.0 by @jmjoy in https://github.com/apache/skywalking-php/pull/89  New Contributors  @matikij made their first contribution in https://github.com/apache/skywalking-php/pull/76  Full Changelog: https://github.com/apache/skywalking-php/compare/v0.5.0...v0.6.0\nPECL https://pecl.php.net/package/skywalking_agent/0.6.0\n","excerpt":"SkyWalking PHP 0.6.0 is released. Go to downloads page to find release tars.\nWhat\u0026rsquo;s Changed …","ref":"/events/release-apache-skwaylking-php-0-6-0/","title":"Release Apache SkyWalking PHP 0.6.0"},{"body":"On Aug. 10th, 2023, HashiCorp announced to adopt the Business Source License (BSL) from Mozilla Public License v2.0 (MPL 2.0), here is their post. They officially annouced they have changed the license for the ALL of their open-source products from the previous MPL 2.0 to a source-available license, BSL 1.1. Meanwhile, HashiCorp APIs, SDKs, and almost all other libraries will remain MPL 2.0.\nHashiCorp Inc. is one of the most important vendors in the cloud-native landscape, as well as Golang ecosystem. This kind of changes would have potential implications for SkyWalking, which is closely integrated with cloud-native technology stacks.\nConclusion First  What does that mean for SkyWalking users?  SkyWalking community has evaluated our dependencies from HashiCorp products and libraries, the current conclusion is\nSkyWalking users would NOT suffer any implication. All components of SkyWalking don\u0026rsquo;t have hard-dependency on BSL license affected codes.\nSkyWalking community have found out all following dependencies of all relative repositories, all licenses are TRUELY stayed unchanged, and compatible with Apache 2.0 License.\n OAP Server @kezhenxu94 @wu-sheng  consul-client Apache 2.0 Repo archived on Jul 27, 2023   BanyanDB @hanahmily @lujiajing1126  Server @hanahmily  hashicorp/golang-lru MPL-2.0 hashicorp/hcl MPL-2.0   CLI @hanahmily No HashiCorp Dependency   SkyWalking OAP CLI @kezhenxu94  github.com/hashicorp/hcl v1.0.0 MPL-2.0 All under swck as transitive dependencies   SWCK @hanahmily  hashicorp/consul/api MPL-2.0 hashicorp/consul/sdk MPL-2.0 hashicorp/errwrap MPL-2.0 hashicorp/go-cleanhttp MPL-2.0 hashicorp/go-immutable-radix MPL-2.0 hashicorp/go-msgpack MIT hashicorp/go-multierror MPL-2.0 hashicorp/go-rootcerts MPL-2.0 hashicorp/go-sockaddr MPL-2.0 hashicorp/go-syslog MIT hashicorp/go-uuid MPL-2.0 hashicorp/go.net BSD-3 hashicorp/golang-lru MPL-2.0 hashicorp/hcl MPL-2.0 hashicorp/logutils MPL-2.0 hashicorp/mdns MIT hashicorp/memberlist MPL-2.0 hashicorp/serf MPL-2.0   Go agent @mrproliu  hashicorp/consul/api MPL-2.0 hashicorp/consul/sdk MPL-2.0 hashicorp/errwrap MPL-2.0 hashicorp/go-cleanhttp MPL-2.0 hashicorp/go-hclog MIT hashicorp/go-immutable-radix MPL-2.0 hashicorp/go-kms-wrapping/entropy MPL-2.0 hashicorp/go-kms-wrapping/entropy/v2 MPL-2.0 hashicorp/go-msgpack MIT hashicorp/go-multierror MPL-2.0 hashicorp/go-plugin MPL-2.0 hashicorp/go-retryablehttp MPL-2.0 hashicorp/go-rootcerts MPL-2.0 hashicorp/go-secure-stdlib/base62 MPL-2.0 hashicorp/go-secure-stdlib/mlock MPL-2.0 hashicorp/go-secure-stdlib/parseutil MPL-2.0 hashicorp/go-secure-stdlib/password MPL-2.0 hashicorp/go-secure-stdlib/tlsutil MPL-2.0 hashicorp/go-sockaddr MPL-2.0 hashicorp/go-syslog MIT hashicorp/go-uuid MPL-2.0 hashicorp/go-version MPL-2.0 hashicorp/go.net BSD-3-Clause hashicorp/golang-lru MPL-2.0 hashicorp/logutils MPL-2.0 hashicorp/mdns MIT hashicorp/memberlist MPL-2.0 hashicorp/serf MPL-2.0 hashicorp/vault/api MPL-2.0 hashicorp/vault/sdk MPL-2.0 hashicorp/yamux MPL-2.0   SkyWalking eyes @kezhenxu94  none   SkyWalking Infra e2e @kezhenxu94  all under swck as transitive dependencies   SkyWalking rover(ebpf agent) @mrproliu  hashicorp/consul/api MPL-2.0 hashicorp/consul/sdk MPL-2.0 hashicorp/errwrap MPL-2.0 hashicorp/go-cleanhttp MPL-2.0 hashicorp/go-hclog MIT hashicorp/go-immutable-radix MPL-2.0 hashicorp/go-msgpack MIT hashicorp/go-multierror MPL-2.0 hashicorp/go-retryablehttp MPL-2.0 hashicorp/go-rootcerts MPL-2.0 hashicorp/go-sockaddr MPL-2.0 hashicorp/go-syslog MIT hashicorp/go-uuid MPL-2.0 hashicorp/golang-lru MPL-2.0 hashicorp/hcl MPL-2.0 hashicorp/logutils MPL-2.0 hashicorp/mdns MIT hashicorp/memberlist MPL-2.0 hashicorp/serf MPL-2.0   SkyWalking satellite @mrproliu  hashicorp/consul/api MPL-2.0 hashicorp/consul/sdk MPL-2.0 hashicorp/errwrap MPL-2.0 hashicorp/go-cleanhttp MPL-2.0 hashicorp/go-immutable-radix MPL-2.0 hashicorp/go-msgpack MIT hashicorp/go-multierror MPL-2.0 hashicorp/go-rootcerts MPL-2.0 hashicorp/go-sockaddr MPL-2.0 hashicorp/go-syslog MIT hashicorp/go-uuid MPL-2.0 hashicorp/go.net BSD-3-Clause hashicorp/golang-lru MPL-2.0 hashicorp/hcl MPL-2.0 hashicorp/logutils MPL-2.0 hashicorp/mdns MIT hashicorp/memberlist MPL-2.0 hashicorp/serf MPL-2.0   SkyWalking Terraform (scripts) @kezhenxu94  No HashiCorp Dependency The scripts for Terraform users only. No hard requirement.    The GitHub ID is listed about the PMC members did the evaluations.\nFAQ If I am using Consul to manage SkyWalking Cluster or configurations, does this license change bring an implication? YES, anyone using their server sides would be affected once you upgrade to later released versions after Aug. 10th, 2023.\nThis is HashiCorp\u0026rsquo;s statement\n End users can continue to copy, modify, and redistribute the code for all non-commercial and commercial use, except where providing a competitive offering to HashiCorp. Partners can continue to build integrations for our joint customers. We will continue to work closely with the cloud service providers to ensure deep support for our mutual technologies. Customers of enterprise and cloud-managed HashiCorp products will see no change as well. Vendors who provide competitive services built on our community products will no longer be able to incorporate future releases, bug fixes, or security patches contributed to our products.\n So, notice that, the implication about whether voilating BSL 1.1 is determined by the HashiCorp Inc about the status of the identified competitive relationship. We can\u0026rsquo;t provide any suggestions. Please refer to FAQs and contacts for the official explanations.\nWill SkyWalking continoue to use HashiCorp Consul as an optional cluster coordinator and/or an optional dynamic configuration server? For short term, YES, we will keep that part of codes, as the licenses of the SDK and the APIs are still in the MPL 2.0.\nBut, during the evaluation, we noticed the consul client we are using is rickfast/consul-client which had been archived by the owner on Jul 27, 2023. So, we are facing the issues that no maintaining and no version to upgrade. If there is not a new consul Java client lib available, we may have to remove this to avoid CVEs or version incompatible with new released servers.\n","excerpt":"On Aug. 10th, 2023, HashiCorp announced to adopt the Business Source License (BSL) from Mozilla …","ref":"/blog/2023-08-13-hashicorp-bsl/","title":"The Statement for SkyWalking users on HashiCorp license changes"},{"body":"SkyWalking Rust 0.8.0 is released. Go to downloads page to find release tars.\nWhat\u0026rsquo;s Changed  Add kafka reporter. by @jmjoy in https://github.com/apache/skywalking-rust/pull/61 Rename AbstractSpan to HandleSpanObject. by @jmjoy in https://github.com/apache/skywalking-rust/pull/62 Bump to 0.8.0. by @jmjoy in https://github.com/apache/skywalking-rust/pull/63  ","excerpt":"SkyWalking Rust 0.8.0 is released. Go to downloads page to find release tars.\nWhat\u0026rsquo;s Changed …","ref":"/events/release-apache-skywalking-rust-0-8-0/","title":"Release Apache SkyWalking Rust 0.8.0"},{"body":"SkyWalking Cloud on Kubernetes 0.8.0 is released. Go to downloads page to find release tars.\nFeatures  [Breaking Change] Remove the way to configure the agent through Configmap.  Bugs  Fix errors in banyandb e2e test.  Chores  Bump up golang to v1.20. Bump up golangci-lint to v1.53.3. Bump up skywalking-java-agent to v8.16.0. Bump up kustomize to v4.5.6. Bump up SkyWalking OAP to 9.5.0.  ","excerpt":"SkyWalking Cloud on Kubernetes 0.8.0 is released. Go to downloads page to find release tars. …","ref":"/events/release-apache-skywalking-cloud-on-kubernetes-0-8-0/","title":"Release Apache SkyWalking Cloud on Kubernetes 0.8.0"},{"body":"","excerpt":"","ref":"/tags/metrics/","title":"Metrics"},{"body":"Announcing Apache SkyWalking Go 0.2.0 I\u0026rsquo;m excited to announce the release of Apache SkyWalking Go 0.2.0! This version packs several awesome new features that I\u0026rsquo;ll overview below.\nLog Reporting The log reporting feature allows the Go agent to automatically collect log content from supported logging frameworks like logrus and zap. The logs are organized and sent to the SkyWalking backend for visualization. You can see how the logs appear for each service in the SkyWalking UI:\nMaking Logs Searchable You can configure certain log fields to make them searchable in SkyWalking. Set the SW_AGENT_LOG_REPORTER_LABEL_KEYS environment variable to include additional fields beyond the default log level.\nFor example, with logrus:\n# define log with fields logrus.WithField(\u0026#34;module\u0026#34;, \u0026#34;test-service\u0026#34;).Info(\u0026#34;test log\u0026#34;) Metrics Reporting The agent can now collect and report custom metrics data from runtime/metrics to the backend. Supported metrics are documented here.\nAutomatic Instrumentation In 0.1.0, you had to manually integrate the agent into your apps. Now, the new commands can automatically analyze and instrument projects at a specified path, no code changes needed! Try using the following command to import skywalking-go into your project:\n# inject to project at current path skywalking-go-agent -inject=./ -all Or you can still use the original manual approach if preferred.\nGet It Now! Check out the CHANGELOG for the full list of additions and fixes. I encourage you to try out SkyWalking Go 0.2.0 today! Let me know if you have any feedback.\n","excerpt":"Announcing Apache SkyWalking Go 0.2.0 I\u0026rsquo;m excited to announce the release of Apache SkyWalking …","ref":"/blog/2023-07-31-skywalking-go-0.2.0-release/","title":"New Features of SkyWalking Go 0.2.0"},{"body":"SkyWalking Go 0.2.0 is released. Go to downloads page to find release tars.\nFeatures  Enhance the plugin rewrite ability to support switch and if/else in the plugin codes. Support inject the skywalking-go into project through agent. Support add configuration for plugin. Support metrics report API for plugin. Support report Golang runtime metrics. Support log reporter. Enhance the logrus logger plugin to support adapt without any settings method invoke. Disable sending observing data if the gRPC connection is not established for reducing the connection error log. Support enhance vendor management project. Support using base docker image to building the application.  Plugins  Support go-redis v9 redis client framework. Support collecting Native HTTP URI parameter on server side. Support Mongo database client framework. Support Native SQL database client framework with MySQL Driver. Support Logrus log report to the backend. Support Zap log report to the backend.  Documentation  Combine Supported Libraries and Performance Test into Plugins section. Add Tracing, Metrics and Logging document into Plugins section.  Bug Fixes  Fix throw panic when log the tracing context before agent core initialized. Fix plugin version matcher tryToFindThePluginVersion to support capital letters in module paths and versions.  Issues and PR  All issues are here All and pull requests are here  ","excerpt":"SkyWalking Go 0.2.0 is released. Go to downloads page to find release tars.\nFeatures  Enhance the …","ref":"/events/release-apache-skwaylking-go-0.2.0/","title":"Release Apache SkyWalking Go 0.2.0"},{"body":"今年 COSCUP 2023 在国立台湾科技大学举办。 COSCUP 是由台湾开放原始码社群联合推动的年度研讨会,起源于2006年,是台湾自由软体运动 (FOSSM) 重要的推动者之一。活动包括有讲座、摊位、社团同乐会等,除了邀请国际的重量级演讲者之外,台湾本土的自由软体推动者也经常在此发表演说,会议的发起人、工作人员与演讲者都是志愿参与的志工。COSCUP 的宗旨在于提供一个连接开放原始码开发者、使用者与推广者的平台。希望借由每年一度的研讨会来推动自由及开放原始码软体 (FLOSS)。由于有许多赞助商及热心捐助者,所有议程都是免费参加。\n在Go语言中使用自动增强探针完成链路追踪以及监控 B站视频地址\n刘晗,Tetrate\n  讲师介绍 刘晗,Tetrate 工程师,Apache SkyWalking PMC 成员,专注于应用性能可观测性领域。\n  议题概要\n   为什么需要自动增强探针 Go Agent演示 实现原理 未来展望  ","excerpt":"今年 COSCUP 2023 在国立台湾科技大学举办。 COSCUP 是由台湾开放原始码社群联合推动的年度研讨会,起源于2006年,是台湾自由软体运动 (FOSSM) 重要的推动者之一。活动包括有讲 …","ref":"/zh/2023-07-30-complete-auto-instrumentation-go-agent-for-distributed-tracing-and-monitoring/","title":"[视频] 在Go语言中使用自动增强探针完成链路追踪以及监控 - COSCUP Taiwan 2023"},{"body":"SkyWalking Kubernetes Helm Chart 4.5.0 is released. Go to downloads page to find release tars.\n Add helm chart for swck v0.7.0. Add pprof port export in satellite. Trunc the resource name in swck\u0026rsquo;s helm chart to no more than 63 characters. Adding the configmap into cluster role for oap init mode. Add config to set Pod securityContext. Keep the job name prefix the same as OAP Deployment name. Use startup probe option for first initialization of application Allow setting env for UI deployment. Add Istio ServiceEntry permissions.  ","excerpt":"SkyWalking Kubernetes Helm Chart 4.5.0 is released. Go to downloads page to find release tars.\n Add …","ref":"/events/release-apache-skywalking-kubernetes-helm-chart-4.5.0/","title":"Release Apache SkyWalking Kubernetes Helm Chart 4.5.0"},{"body":"SkyWalking BanyanDB 0.4.0 is released. Go to downloads page to find release tars.\nFeatures  Add TSDB concept document. [UI] Add YAML editor for inputting query criteria. Refactor TopN to support NULL group while keeping seriesID from the source measure. Add a sharded buffer to TSDB to replace Badger\u0026rsquo;s memtable. Badger KV only provides SST. Add a meter system to control the internal metrics. Add multiple metrics for measuring the storage subsystem. Refactor callback of TopNAggregation schema event to avoid deadlock and reload issue. Fix max ModRevision computation with inclusion of TopNAggregation Enhance meter performance Reduce logger creation frequency Add units to memory flags Introduce TSTable to customize the block\u0026rsquo;s structure Add /system endpoint to the monitoring server that displays a list of nodes' system information. Enhance the liaison module by implementing access logging. Add the Istio scenario stress test based on the data generated by the integration access log. Generalize the index\u0026rsquo;s docID to uint64. Remove redundant ID tag type. Improve granularity of index in measure by leveling up from data point to series. [UI] Add measure CRUD operations. [UI] Add indexRule CRUD operations. [UI] Add indexRuleBinding CRUD operations.  Bugs  Fix iterator leaks and ensure proper closure and introduce a closer to guarantee all iterators are closed Fix resource corrupts caused by update indexRule operation Set the maximum integer as the limit for aggregation or grouping operations when performing aggregation or grouping operations in a query plan.  Chores  Bump go to 1.20. Set KV\u0026rsquo;s minimum memtable size to 8MB [docs] Fix docs crud examples error Modified TestGoVersion to check for CPU architecture and Go Version Bump node to 18.16  ","excerpt":"SkyWalking BanyanDB 0.4.0 is released. Go to downloads page to find release tars.\nFeatures  Add TSDB …","ref":"/events/release-apache-skywalking-banyandb-0-4-0/","title":"Release Apache SkyWalking BanyanDB 0.4.0"},{"body":"Background In previous articles, We have discussed how to use SkyWalking and eBPF for performance problem detection within processes and networks. They are good methods to locate issues, but still there are some challenges:\n The timing of the task initiation: It\u0026rsquo;s always challenging to address the processes that require performance monitoring when problems occur. Typically, manual engagement is required to identify processes and the types of performance analysis necessary, which cause extra time during the crash recovery. The root cause locating and the time of crash recovery conflict with each other from time to time. In the real case, rebooting would be the first choice of recovery, meanwhile, it destroys the site of crashing. Resource consumption of tasks: The difficulties to determine the profiling scope. Wider profiling causes more resources than it should. We need a method to manage resource consumption and understand which processes necessitate performance analysis. Engineer capabilities: On-call is usually covered by the whole team, which have junior and senior engineers, even senior engineers have their understanding limitation of the complex distributed system, it is nearly impossible to understand the whole system by a single one person.  The Continuous Profiling is a new created mechanism to resolve the above issues.\nAutomate Profiling As profiling is resource costing and high experience required, how about introducing a method to narrow the scope and automate the profiling driven by polices creates by senior SRE engineer? So, in 9.5.0, SkyWalking first introduced preset policy rules for specific services to be monitored by the eBPF Agent in a low-energy manner, and run profiling when necessary automatically.\nPolicy Policy rules specify how to monitor target processes and determine the type of profiling task to initiate when certain threshold conditions are met.\nThese policy rules primarily consist of the following configuration information:\n Monitoring type: This specifies what kind of monitoring should be implemented on the target process. Threshold determination: This defines how to determine whether the target process requires the initiation of a profiling task. Trigger task: This specifies what kind of performance analysis task should be initiated.  Monitoring type The type of monitoring is determined by observing the data values of a specified process to generate corresponding metrics. These metric values can then facilitate subsequent threshold judgment operations. In eBPF observation, we believe the following metrics can most directly reflect the current performance of the program:\n   Monitor Type Unit Description     System Load Load System load average over a specified period.   Process CPU Percentage The CPU usage of the process as a percentage.   Process Thread Count Count The number of threads in the process.   HTTP Error Rate Percentage The percentage of HTTP requests that result in error responses (e.g., 4xx or 5xx status codes).   HTTP Avg Response Time Millisecond The average response time for HTTP requests.    Network related monitoring Monitoring network type metrics is not as simple as obtaining basic process information. It requires the initiation of eBPF programs and attaching them to the target process for observation. This is similar to the principles of network profiling task we introduced in the previous article, except that we no longer collect the full content of the data packets. Instead, we only collect the content of messages that match specified HTTP prefixes.\nBy using this method, we can significantly reduce the number of times the kernel sends data to the user space, and the user-space program can parse the data content with less system resource usage. This ultimately helps in conserving system resources.\nMetrics collector The eBPF agent would report metrics of processes periodically as follows to indicate the process performance in time.\n   Name Unit Description     process_cpu (0-100)% The CPU usage percent   process_thread_count count The thread count of process   system_load count The average system load for the last minute, each process have same value   http_error_rate (0-100)% The network request error rate percentage   http_avg_response_time ms The network average response duration    Threshold determination For the threshold determination, the judgement is made by the eBPF Agent based on the target monitoring process in its own memory, rather than relying on calculations performed by the SkyWalking backend. The advantage of this approach is that it doesn\u0026rsquo;t have to wait for the results of complex backend computations, and it reduces potential issues brought about by complicated interactions.\nBy using this method, the eBPF Agent can swiftly initiate tasks immediately after conditions are met, without any delay.\nIt includes the following configuration items:\n Threshold: Check if the monitoring value meets the specified expectations. Period: The time period(seconds) for monitoring data, which can also be understood as the most recent duration. Count: The number of times(seconds) the threshold is triggered within the detection period, which can also be understood as the total number of times the specified threshold rule is triggered in the most recent duration(seconds). Once the count check is met, the specified Profiling task will be started.  Trigger task When the eBPF Agent detects that the threshold determination in the specified policy meets the rules, it can initiate the corresponding task according to pre-configured rules. For each different target performance task, their task initiation parameters are different:\n On/Off CPU Profiling: It automatically performs performance analysis on processes that meet the conditions, defaulting to 10 minutes of monitoring. Network Profiling: It performs network performance analysis on all processes in the same Service Instance on the current machine, to prevent the cause of the issue from being unrealizable due to too few process being collected, defaulting to 10 minutes of monitoring.  Once the task is initiated, no new profiling tasks would be started for the current process for a certain period. The main reason for this is to prevent frequent task creation due to low threshold settings, which could affect program execution. The default time period is 20 minutes.\nData Flow The figure 1 illustrates the data flow of the continuous profiling feature:\nFigure 1: Data Flow of Continuous Profiling\neBPF Agent with Process Firstly, we need to ensure that the eBPF Agent and the process to be monitored are deployed on the same host machine, so that we can collect relevant data from the process. When the eBPF Agent detects a threshold validation rule that conforms to the policy, it immediately triggers the profiling task for the target process, thereby reducing any intermediate steps and accelerating the ability to pinpoint performance issues.\nSliding window The sliding window plays a crucial role in the eBPF Agent\u0026rsquo;s threshold determination process, as illustrated in the figure 2:\nFigure 2: Sliding Window in eBPF Agent\nEach element in the array represents the data value for a specified second in time. When the sliding window needs to verify whether it is responsible for a rule, it fetches the content of each element from a certain number of recent elements (period parameter). If an element exceeds the threshold, it is marked in red and counted. If the number of red elements exceeds a certain number, it is deemed to trigger a task.\nUsing a sliding window offers the following two advantages:\n Fast retrieval of recent content: With a sliding window, complex calculations are unnecessary. You can know the data by simply reading a certain number of recent array elements. Solving data spikes issues: Validation through count prevents situations where a data point suddenly spikes and then quickly returns to normal. Verification with multiple values can reveal whether exceeding the threshold is frequent or occasional.  eBPF Agent with SkyWalking Backend The eBPF Agent communicates periodically with the SkyWalking backend, involving three most crucial operations:\n Policy synchronization: Through periodic policy synchronization, the eBPF Agent can keep processes on the local machine updated with the latest policy rules as much as possible. Metrics sending: For processes that are already being monitored, the eBPF Agent periodically sends the collected data to the backend program. This facilitates real-time query of current data values by users, who can also compare this data with historical values or thresholds when problems arise. Profiling task reporting: When the eBPF detects that a certain process has triggered a policy rule, it automatically initiates a performance task, collects relevant information from the current process, and reports it to the SkyWalking backend. This allows users to know when, why, and what type of profiling task was triggered from the interface.  Demo Next, let\u0026rsquo;s quickly demonstrate the continuous profiling feature, so you can understand more specifically what it accomplishes.\nDeploy SkyWalking Showcase SkyWalking Showcase contains a complete set of example services and can be monitored using SkyWalking. For more information, please check the official documentation.\nIn this demo, we only deploy service, the latest released SkyWalking OAP, and UI.\nexport SW_OAP_IMAGE=apache/skywalking-oap-server:9.5.0 export SW_UI_IMAGE=apache/skywalking-ui:9.5.0 export SW_ROVER_IMAGE=apache/skywalking-rover:0.5.0 export FEATURE_FLAGS=mesh-with-agent,single-node,elasticsearch,rover make deploy.kubernetes After deployment is complete, please run the following script to open SkyWalking UI: http://localhost:8080/.\nkubectl port-forward svc/ui 8080:8080 --namespace default Create Continuous Profiling Policy Currently, continues profiling feature is set by default in the Service Mesh panel at the Service level.\nFigure 3: Continuous Policy Tab\nBy clicking on the edit button aside from the Policy List, the polices of current service could be created or updated.\nFigure 4: Edit Continuous Profiling Policy\nMultiple polices are supported. Every policy has the following configurations.\n Target Type: Specifies the type of profiling task to be triggered when the threshold determination is met. Items: For profiling task of the same target, one or more validation items can be specified. As long as one validation item meets the threshold determination, the corresponding performance analysis task will be launched.  Monitor Type: Specifies the type of monitoring to be carried out for the target process. Threshold: Depending on the type of monitoring, you need to fill in the corresponding threshold to complete the verification work. Period: Specifies the number of recent seconds of data you want to monitor. Count: Determines the total number of seconds triggered within the recent period. URI Regex/List: This is applicable to HTTP monitoring types, allowing URL filtering.    Done After clicking the save button, you can see the currently created monitoring rules, as shown in the figure 5:\nFigure 5: Continuous Profiling Monitoring Processes\nThe data can be divided into the following parts:\n Policy list: On the left, you can see the rule list you have created. Monitoring Summary List: Once a rule is selected, you can see which pods and processes would be monitored by this rule. It also summarizes how many profiling tasks have been triggered in the last 48 hours by the current pod or process, as well as the last trigger time. This list is also sorted in descending order by the number of triggers to facilitate your quick review.  When you click on a specific process, a new dashboard would show to list metrics and triggered profiling results.\nFigure 6: Continuous Profiling Triggered Tasks\nThe current figure contains the following data contents:\n Task Timeline: It lists all profiling tasks in the past 48 hours. And when the mouse hovers over a task, it would also display detailed information:  Task start and end time: It indicates when the current performance analysis task was triggered. Trigger reason: It would display the reason why the current process was profiled and list out the value of the metric exceeding the threshold when the profiling was triggered. so you can quickly understand the reason.   Task Detail: Similar to the CPU Profiling and Network Profiling introduced in previous articles, this would display the flame graph or process topology map of the current task, depending on the profiling type.  Meanwhile, on the Metrics tab, metrics relative to profiling policies are collected to retrieve the historical trend, in order to provide a comprehensive explanation of the trigger point about the profiling.\nFigure 7: Continuous Profiling Metrics\nConclusion In this article, I have detailed how the continuous profiling feature in SkyWalking and eBPF works. In general, it involves deploying the eBPF Agent service on the same machine where the process to be monitored resides, and monitoring the target process with low resource consumption. When it meets the threshold conditions, it would initiate more complex CPU Profiling and Network Profiling tasks.\nIn the future, we will offer even more features. Stay tuned!\n Twitter, ASFSkyWalking Slack. Send Request to join SkyWalking slack mail to the mail list(dev@skywalking.apache.org), we will invite you in. Subscribe to our medium list.  ","excerpt":"Background In previous articles, We have discussed how to use SkyWalking and eBPF for performance …","ref":"/blog/2023-06-25-intruducing-continuous-profiling-skywalking-with-ebpf/","title":"Activating Automatical Performance Analysis -- Continuous Profiling"},{"body":"","excerpt":"","ref":"/tags/ebpf/","title":"eBPF"},{"body":"","excerpt":"","ref":"/tags/profiling/","title":"Profiling"},{"body":"SkyWalking CLI 0.12.0 is released. Go to downloads page to find release tars.\n Add the sub-command records list for adapt the new record query API by @mrproliu in https://github.com/apache/skywalking-cli/pull/167 Add the attached events fields into the trace sub-command by @mrproliu in https://github.com/apache/skywalking-cli/pull/169 Add the sampling config file into the profiling ebpf create network sub-command by @mrproliu in https://github.com/apache/skywalking-cli/pull/171 Add the sub-command profiling continuous for adapt the new continuous profiling API by @mrproliu in https://github.com/apache/skywalking-cli/pull/173 Adapt the sub-command metrics for deprecate scope fron entity by @mrproliu in https://github.com/apache/skywalking-cli/pull/173 Add components in topology related sub-commands. @mrproliu in https://github.com/apache/skywalking-cli/pull/175 Add the sub-command metrics nullable for query the nullable metrics value. @mrproliu in https://github.com/apache/skywalking-cli/pull/176 Adapt the sub-command profiling trace for adapt the new trace profiling protocol. @mrproliu in https://github.com/apache/skywalking-cli/pull/177 Add isEmptyValue field in metrics related sub-commands. @mrproliu in https://github.com/apache/skywalking-cli/pull/180 Add the sub-command metrics execute for execute the metrics query. @mrproliu in https://github.com/apache/skywalking-cli/pull/182 Add the sub-command profiling continuous monitoring for query all continuous profiling monitoring instances. @mrproliu in https://github.com/apache/skywalking-cli/pull/182 Add continuousProfilingCauses.message field in the profiling ebpf list comamnds by @mrproliu in https://github.com/apache/skywalking-cli/pull/184  ","excerpt":"SkyWalking CLI 0.12.0 is released. Go to downloads page to find release tars.\n Add the sub-command …","ref":"/events/release-apache-skywalking-cli-0-12-0/","title":"Release Apache SkyWalking CLI 0.12.0"},{"body":"SkyWalking Rover 0.5.0 is released. Go to downloads page to find release tars.\nFeatures  Enhance the protocol reader for support long socket data. Add the syscall level event to the trace. Support OpenSSL 3.0.x. Optimized the data structure in BPF. Support continuous profiling. Improve the performance when getting goid in eBPF. Support build multiple architecture docker image: x86_64, arm64.  Bug Fixes  Fix HTTP method name in protocol analyzer. Fixed submitting multiple network profiling tasks with the same uri causing the rover to restart.  Documentation Issues and PR  All issues are here All and pull requests are here  ","excerpt":"SkyWalking Rover 0.5.0 is released. Go to downloads page to find release tars.\nFeatures  Enhance the …","ref":"/events/release-apache-skwaylking-rover-0-5-0/","title":"Release Apache SkyWalking Rover 0.5.0"},{"body":"SkyWalking Satellite 1.2.0 is released. Go to downloads page to find release tars.\nFeatures  Introduce pprof module. Support export multiple telemetry service. Update the base docker image. Add timeout configuration for gRPC client. Reduce log print when the enqueue data to the pipeline error. Support transmit the Continuous Profiling protocol.  Bug Fixes  Fix CVE-2022-41721. Use Go 19 to build the Docker image to fix CVEs.  Issues and PR  All issues are here All and pull requests are here  ","excerpt":"SkyWalking Satellite 1.2.0 is released. Go to downloads page to find release tars.\nFeatures …","ref":"/events/release-apache-skwaylking-satellite-1-2-0/","title":"Release Apache SkyWalking Satellite 1.2.0"},{"body":"","excerpt":"","ref":"/tags/tracing/","title":"Tracing"},{"body":"背景 在之前的文章中,我们讨论了如何使用 SkyWalking 和 eBPF 来检测性能问题,包括进程和网络。这些方法可以很好地定位问题,但仍然存在一些挑战:\n 任务启动的时间: 当需要进行性能监控时,解决需要性能监控的进程始终是一个挑战。通常需要手动参与,以标识进程和所需的性能分析类型,这会在崩溃恢复期间耗费额外的时间。根本原因定位和崩溃恢复时间有时会发生冲突。在实际情况中,重新启动可能是恢复的第一选择,同时也会破坏崩溃的现场。 任务的资源消耗: 确定分析范围的困难。过宽的分析范围会导致需要更多的资源。我们需要一种方法来管理资源消耗并了解哪些进程需要性能分析。 工程师能力: 通常由整个团队负责呼叫,其中有初级和高级工程师,即使是高级工程师也对复杂的分布式系统有其理解限制,单个人几乎无法理解整个系统。  持续剖析(Continuous Profiling) 是解决上述问题的新机制。\n自动剖析 由于性能分析的资源消耗和高经验要求,因此引入一种方法以缩小范围并由高级 SRE 工程师创建策略自动剖析。因此,在 9.5.0 中,SkyWalking 首先引入了预设策略规则,以低功耗方式监视特定服务的 eBPF 代理,并在必要时自动运行剖析。\n策略 策略规则指定了如何监视目标进程并确定在满足某些阈值条件时应启动何种类型的分析任务。\n这些策略规则主要包括以下配置信息:\n 监测类型: 这指定了应在目标进程上实施什么样的监测。 阈值确定: 这定义了如何确定目标进程是否需要启动分析任务。 触发任务: 这指定了应启动什么类型的性能分析任务。  监测类型 监测类型是通过观察指定进程的数据值来生成相应的指标来确定的。这些指标值可以促进后续的阈值判断操作。在 eBPF 观测中,我们认为以下指标最能直接反映程序的当前性能:\n   监测类型 单位 描述     系统负载 负载 在指定时间段内的系统负载平均值。   进程 CPU 百分比 进程的 CPU 使用率百分比。   进程线程计数 计数 进程中的线程数。   HTTP 错误率 百分比 导致错误响应(例如,4xx 或 5xx 状态代码)的 HTTP 请求的百分比。   HTTP 平均响应时间 毫秒 HTTP 请求的平均响应时间。    相关网络监测 监测网络类型的指标不像获取基本进程信息那么简单。它需要启动 eBPF 程序并将其附加到目标进程以进行观测。这类似于我们在先前文章中介绍的网络分析任务,不同的是我们不再收集数据包的完整内容。相反,我们仅收集与指定 HTTP 前缀匹配的消息的内容。\n通过使用此方法,我们可以大大减少内核向用户空间发送数据的次数,用户空间程序可以使用更少的系统资源来解析数据内容。这最终有助于节省系统资源。\n指标收集器 eBPF 代理会定期报告以下进程度量,以指示进程性能:\n   名称 单位 描述     process_cpu (0-100)% CPU 使用率百分比   process_thread_count 计数 进程中的线程数   system_load 计数 最近一分钟的平均系统负载,每个进程的值相同   http_error_rate (0-100)% 网络请求错误率百分比   http_avg_response_time 毫秒 网络平均响应持续时间    阈值确定 对于阈值的确定,eBPF 代理是基于其自身内存中的目标监测进程进行判断,而不是依赖于 SkyWalking 后端执行的计算。这种方法的优点在于,它不必等待复杂后端计算的结果,减少了复杂交互所带来的潜在问题。\n通过使用此方法,eBPF 代理可以在条件满足后立即启动任务,而无需任何延迟。\n它包括以下配置项:\n 阈值: 检查监测值是否符合指定的期望值。 周期: 监控数据的时间周期(秒),也可以理解为最近的持续时间。 计数: 检测期间触发阈值的次数(秒),也可以理解为最近持续时间内指定阈值规则触发的总次数(秒)。一旦满足计数检查,指定的分析任务将被开始。  触发任务 当 eBPF Agent 检测到指定策略中的阈值决策符合规则时,根据预配置的规则可以启动相应的任务。对于每个不同的目标性能任务,它们的任务启动参数都不同:\n On/Off CPU Profiling: 它会自动对符合条件的进程进行性能分析,缺省情况下监控时间为 10 分钟。 Network Profiling: 它会对当前机器上同一 Service Instance 中的所有进程进行网络性能分析,以防问题的原因因被收集进程太少而无法实现,缺省情况下监控时间为 10 分钟。  一旦任务启动,当前进程将在一定时间内不会启动新的剖析任务。主要原因是为了防止因低阈值设置而频繁创建任务,从而影响程序执行。缺省时间为 20 分钟。\n数据流 图 1 展示了持续剖析功能的数据流:\n图 1: 持续剖析的数据流\neBPF Agent进行进程跟踪 首先,我们需要确保 eBPF Agent 和要监测的进程部署在同一台主机上,以便我们可以从进程中收集相关数据。当 eBPF Agent 检测到符合策略的阈值验证规则时,它会立即为目标进程触发剖析任务,从而减少任何中间步骤并加速定位性能问题的能力。\n滑动窗口 滑动窗口在 eBPF Agent 的阈值决策过程中发挥着至关重要的作用,如图 2 所示:\n图 2: eBPF Agent 中的滑动窗口\n数组中的每个元素表示指定时间内的数据值。当滑动窗口需要验证是否负责某个规则时,它从最近的一定数量的元素 (period 参数) 中获取每个元素的内容。如果一个元素超过了阈值,则标记为红色并计数。如果红色元素的数量超过一定数量,则被认为触发了任务。\n使用滑动窗口具有以下两个优点:\n 快速检索最近的内容:使用滑动窗口,无需进行复杂的计算。你可以通过简单地读取一定数量的最近数组元素来了解数据。 解决数据峰值问题:通过计数进行验证,可以避免数据点突然增加然后快速返回正常的情况。使用多个值进行验证可以揭示超过阈值是频繁还是偶然发生的。  eBPF Agent与OAP后端通讯 eBPF Agent 定期与 SkyWalking 后端通信,涉及三个最关键的操作:\n 策略同步:通过定期的策略同步,eBPF Agent 可以尽可能地让本地机器上的进程与最新的策略规则保持同步。 指标发送:对于已经被监视的进程,eBPF Agent 定期将收集到的数据发送到后端程序。这就使用户能够实时查询当前数据值,用户也可以在出现问题时将此数据与历史值或阈值进行比较。 剖析任务报告:当 eBPF 检测到某个进程触发了策略规则时,它会自动启动性能任务,从当前进程收集相关信息,并将其报告给 SkyWalking 后端。这使用户可以从界面了解何时、为什么和触发了什么类型的剖析任务。  演示 接下来,让我们快速演示持续剖析功能,以便你更具体地了解它的功能。\n部署 SkyWalking Showcase SkyWalking Showcase 包含完整的示例服务,并可以使用 SkyWalking 进行监视。有关详细信息,请查看官方文档。\n在此演示中,我们只部署服务、最新发布的 SkyWalking OAP 和 UI。\nexport SW_OAP_IMAGE=apache/skywalking-oap-server:9.5.0 export SW_UI_IMAGE=apache/skywalking-ui:9.5.0 export SW_ROVER_IMAGE=apache/skywalking-rover:0.5.0 export FEATURE_FLAGS=mesh-with-agent,single-node,elasticsearch,rover make deploy.kubernetes 部署完成后,请运行以下脚本以打开 SkyWalking UI:http://localhost:8080/。\nkubectl port-forward svc/ui 8080:8080 --namespace default 创建持续剖析策略 目前,持续剖析功能在 Service Mesh 面板的 Service 级别中默认设置。\n图 3: 持续策略选项卡\n通过点击 Policy List 旁边的编辑按钮,可以创建或更新当前服务的策略。\n图 4: 编辑持续剖析策略\n支持多个策略。每个策略都有以下配置。\n Target Type:指定符合阈值决策时要触发的剖析任务的类型。 Items:对于相同目标的剖析任务,可以指定一个或多个验证项目。只要一个验证项目符合阈值决策,就会启动相应的性能分析任务。  Monitor Type:指定要为目标进程执行的监视类型。 Threshold:根据监视类型的不同,需要填写相应的阈值才能完成验证工作。 Period:指定你要监测的最近几秒钟的数据数量。 Count:确定最近时间段内触发的总秒数。 URI 正则表达式/列表:这适用于 HTTP 监控类型,允许 URL 过滤。    完成 单击保存按钮后,你可以看到当前已创建的监控规则,如图 5 所示:\n图 5: 持续剖析监控进程\n数据可以分为以下几个部分:\n 策略列表:在左侧,你可以看到已创建的规则列表。 监测摘要列表:选择规则后,你可以看到哪些 pod 和进程将受到该规则的监视。它还总结了当前 pod 或进程在过去 48 小时内触发的性能分析任务数量,以及最后一个触发时间。该列表还按触发次数降序排列,以便你快速查看。  当你单击特定进程时,将显示一个新的仪表板以列出指标和触发的剖析结果。\n图 6: 持续剖析触发的任务\n当前图包含以下数据内容:\n 任务时间轴:它列出了过去 48 小时的所有剖析任务。当鼠标悬停在任务上时,它还会显示详细信息:  任务的开始和结束时间:它指示当前性能分析任务何时被触发。 触发原因:它会显示为什么会对当前进程进行剖析,并列出当剖析被触发时超过阈值的度量值,以便你快速了解原因。   任务详情:与前几篇文章介绍的 CPU 剖析和网络剖析类似,它会显示当前任务的火焰图或进程拓扑图,具体取决于剖析类型。  同时,在 Metrics 选项卡中,收集与剖析策略相关的指标以检索历史趋势,以便在剖析的触发点提供全面的解释。\n图 7: 持续剖析指标\n结论 在本文中,我详细介绍了 SkyWalking 和 eBPF 中持续剖析功能的工作原理。通常情况下,它涉及将 eBPF Agent 服务部署在要监视的进程所在的同一台计算机上,并以低资源消耗监测目标进程。当它符合阈值条件时,它会启动更复杂的 CPU 剖析和网络剖析任务。\n在未来,我们将提供更多功能。敬请期待!\n Twitter:ASFSkyWalking Slack:向邮件列表 (dev@skywalking.apache.org) 发送“Request to join SkyWalking Slack”,我们会邀请你加入。 订阅我们的 Medium 列表。  ","excerpt":"背景 在之前的文章中,我们讨论了如何使用 SkyWalking 和 eBPF 来检测性能问题,包括进程和网络。这些方法可以很好地定位问题,但仍然存在一些挑战:\n 任务启动的时间: 当需要进行性能监控 …","ref":"/zh/2023-06-25-intruducing-continuous-profiling-skywalking-with-ebpf/","title":"自动化性能分析——持续剖析"},{"body":"SkyWalking 9.5.0 is released. Go to downloads page to find release tars.\nNew Topology Layout Elasticsearch Server Monitoring Project  Fix Duplicate class found due to the delombok goal.  OAP Server  Fix wrong layer of metric user error in DynamoDB monitoring. ElasticSearch storage does not check field types when OAP running in no-init mode. Support to bind TLS status as a part of component for service topology. Fix component ID priority bug. Fix component ID of topology overlap due to storage layer bugs. [Breaking Change] Enhance JDBC storage through merging tables and managing day-based table rolling. [Breaking Change] Sharding-MySQL implementations and tests get removed due to we have the day-based rolling mechanism by default Fix otel k8s-cluster rule add namespace dimension for MAL aggregation calculation(Deployment Status,Deployment Spec Replicas) Support continuous profiling feature. Support collect process level related metrics. Fix K8sRetag reads the wrong k8s service from the cache due to a possible namespace mismatch. [Breaking Change] Support cross-thread trace profiling. The data structure and query APIs are changed. Fix PromQL HTTP API /api/v1/labels response missing service label. Fix possible NPE when initialize IntList. Support parse PromQL expression has empty labels in the braces for metadata query. Support alarm metric OP !=. Support metrics query indicates whether value == 0 represents actually zero or no data. Fix NPE when query the not exist series indexes in ElasticSearch storage. Support collecting memory buff/cache metrics in VM monitoring. PromQL: Remove empty values from the query result, fix /api/v1/metadata param limit could cause out of bound. Support monitoring the total number metrics of k8s StatefulSet and DaemonSet. Support Amazon API Gateway monitoring. Bump up graphql-java to fix cve. Bump up Kubernetes Java client. Support Redis Monitoring. Add component ID for amqp, amqp-producer and amqp-consumer. Support no-proxy mode for aws-firehose receiver Bump up armeria to 1.23.1 Support Elasticsearch Monitoring. Fix PromQL HTTP API /api/v1/series response missing service label when matching metric. Support ServerSide TopN for BanyanDB. Add component ID for Jersey. Remove OpenCensus support, the related codes and docs as it\u0026rsquo;s sunsetting. Support dynamic configuration of searchableTracesTags Support exportErrorStatusTraceOnly for export the error status trace segments through the Kafka channel Add component ID for Grizzly. Fix potential NPE in Zipkin receiver when the Span is missing some fields. Filter out unknown_cluster metric data. Support RabbitMQ Monitoring. Support Redis slow logs collection. Fix data loss when query continuous profiling task record. Adapt the continuous profiling task query GraphQL. Support Metrics Query Expression(MQE) and allows users to do simple query-stage calculation through the expression. Deprecated metrics query v2 protocol. Deprecated record query protocol. Add component ID for go-redis. Add OpenSearch 2.8.0 to test case. Add ai-pipeline module. Support HTTP URI formatting through ai-pipeline to do pattern recognition. Add new HTTP URI grouping engine with benchmark. [Breaking Change] Use the new HTTP URI grouping engine to replace the old regex based mechanism. Support sumLabeled in MAL. Migrate from kubernetes-client/java to fabric8 client. Envoy ALS generated relation metrics considers http status codes \u0026gt;= 400 has an error at the client side. Add cause message field when query continuous profiling task.  UI  Revert: cpm5d function. This feature is cancelled from backend. Fix: alerting link breaks on the topology. Refactor Topology widget to make it more hierarchical.  Choose User as the first node. If User node is absent, choose the busiest node(which has the most calls of all). Do a left-to-right flow process. At the same level, list nodes from top to bottom in alphabetical order.   Fix filter ID when ReadRecords metric associates with trace. Add AWS API Gateway menu. Change trace profiling protocol. Add Redis menu. Optimize data types. Support isEmptyValue flag for metrics query. Add elasticsearch menu. [Clean UI templates before upgrade] Set showSymbol: true, and make the data point shows on the Line graph. Please clean ui_template index in elasticsearch storage or table in JDBC storage. [Clean UI templates before upgrade] UI templates: Simplify metric name with the label. Add MQ menu. Add Jeysey icon. Fix: set endpoint and instance selectors with url parameters correctly. Bump up dependencies versions icons-vue 1.1.4, element-plus 2.1.0, nanoid 3.3.6, postcss 8.4.23 Add OpenTelemetry log protocol support. [Breaking Change] Configuration key enabledOtelRules is renamed to enabledOtelMetricsRules and the corresponding environment variable is renamed to SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES. Add grizzly icon. Fix: the Instance List data display error. Fix: set topN type to Number. Support Metrics Query Expression(MQE) and allows users to do simple query-stage calculation through the expression. Bump up zipkin ui dependency to 2.24.1. Bump up vite to 4.0.5. Apply MQE on General and Virtual-Database layer UI-templates.  Documentation  Add Profiling related documentations. Add SUM_PER_MIN to MAL documentation. Make the log relative docs more clear, and easier for further more formats support. Update the cluster management and advanced deployment docs.  All issues and pull requests are here\n","excerpt":"SkyWalking 9.5.0 is released. Go to downloads page to find release tars.\nNew Topology Layout …","ref":"/events/release-apache-skywalking-apm-9.5.0/","title":"Release Apache SkyWalking APM 9.5.0"},{"body":"Celebrating 22k Stars! The Apache SkyWalking community is thrilled to reach the milestone of 22k stars on GitHub! This showcases its popularity and impact as an APM and observability tool.\nSince launching in 2016 to provide an open source APM solution, SkyWalking has evolved into a full stack observability platform with distributed tracing, metrics monitoring and alerting. It\u0026rsquo;s seeing widespread adoption globally, especially in Asia where APM needs are expanding rapidly.\nThe growing user base has enabled SkyWalking to achieve massive deployments demonstrating its ability to scale to extreme levels. There have been reported deployments collecting over 100TB of data from companies' complex distributed applications, monitoring over 8000 microservices and analyzing 100 billion distributed traces - providing end-to-end visibility, performance monitoring and issue troubleshooting for some of the largest distributed systems in the world.\nThis success and widespread adoption has attracted an active community of nearly 800 contributors, thanks in part to programs like GSoC and OSPP(Open Source Promotion Plan) that bring in university contributors. The SkyWalking team remains focused on building a reliable, performant platform to observe complex distributed systems. We\u0026rsquo;ll continue innovating with features like service mesh monitoring and metric analytics.Your ongoing support, feedback and contributions inspire us!\nThank you for helping SkyWalking reach 22k stars on GitHub! This is just the beginning - we have ambitious plans and can\u0026rsquo;t wait to have you along our journey!\n","excerpt":"Celebrating 22k Stars! The Apache SkyWalking community is thrilled to reach the milestone of 22k …","ref":"/blog/2023-06-13-celebrate-22k-stars/","title":"Celebrate 22k stars"},{"body":"本文演示如何将 Dubbo-Go 应用程序与 SkyWalking Go 集成,并在 SkyWalking UI 中查看结果。\n以前,如果你想要在 SkyWalking 中监控 Golang 应用程序,需要将项目与 go2sky 项目集成,并手动编写各种带有 go2sky 插件的框架。现在,我们有一个全新的项目( Skywalking Go ),允许你将 Golang 项目集成到 SkyWalking 中,几乎不需要编码,同时提供更大的灵活性和可扩展性。\n在本文中,我们将指导你快速将 skywalking-go 项目集成到 dubbo-go 项目中。\n演示包括以下步骤:\n 部署 SkyWalking:这涉及设置 SkyWalking 后端和 UI 程序,使你能够看到最终效果。 使用 SkyWalking Go 编译程序:在这里,你将把 SkyWalking Go Agent 编译到要监控的 Golang 程序中。 应用部署:你将导出环境变量并部署应用程序,以促进你的服务与 SkyWalking 后端之间的通信。 在 SkyWalking UI 上可视化:最后,你将发送请求并在 SkyWalking UI 中观察效果。  部署 SkyWalking 请从官方 SkyWalking 网站下载 SkyWalking APM 程序 。然后执行以下两个命令来启动服务:\n# 启动 OAP 后端 \u0026gt; bin/oapService.sh # 启动 UI \u0026gt; bin/webappService.sh 接下来,你可以访问地址 http://localhost:8080/ 。此时,由于尚未部署任何应用程序,因此你将看不到任何数据。\n使用 SkyWalking GO 编译 Dubbo Go 程序 这里将演示如何将 Dubbo-go 程序与SkyWalking Go Agent集成。请依次执行如下命令来创建一个新的项目:\n# 安装dubbo-go基础环境 \u0026gt; export GOPROXY=\u0026#34;https://goproxy.cn\u0026#34; \u0026gt; go install github.com/dubbogo/dubbogo-cli@latest \u0026gt; dubbogo-cli install all # 创建demo项目 \u0026gt; mkdir demo \u0026amp;\u0026amp; cd demo \u0026gt; dubbogo-cli newDemo . # 升级dubbo-go依赖到最新版本 \u0026gt; go get -u dubbo.apache.org/dubbo-go/v3 在项目的根目录中执行以下命令。此命令将下载 skywalking-go 所需的依赖项:\ngo get github.com/apache/skywalking-go 接下来,请分别在服务端和客户端的main包中引入。包含之后,代码将会更新为:\n// go-server/cmd/server.go package main import ( \u0026#34;context\u0026#34; ) import ( \u0026#34;dubbo.apache.org/dubbo-go/v3/common/logger\u0026#34; \u0026#34;dubbo.apache.org/dubbo-go/v3/config\u0026#34; _ \u0026#34;dubbo.apache.org/dubbo-go/v3/imports\u0026#34; \u0026#34;helloworld/api\u0026#34; // 引入skywalking-go \t_ \u0026#34;github.com/apache/skywalking-go\u0026#34; ) type GreeterProvider struct { api.UnimplementedGreeterServer } func (s *GreeterProvider) SayHello(ctx context.Context, in *api.HelloRequest) (*api.User, error) { logger.Infof(\u0026#34;Dubbo3 GreeterProvider get user name = %s\\n\u0026#34;, in.Name) return \u0026amp;api.User{Name: \u0026#34;Hello \u0026#34; + in.Name, Id: \u0026#34;12345\u0026#34;, Age: 21}, nil } // export DUBBO_GO_CONFIG_PATH= PATH_TO_SAMPLES/helloworld/go-server/conf/dubbogo.yaml func main() { config.SetProviderService(\u0026amp;GreeterProvider{}) if err := config.Load(); err != nil { panic(err) } select {} } 在客户端代码中除了需要引入skywalking-go之外,还需要在main方法中的最后一行增加主携程等待语句,以防止因为客户端快速关闭而无法将Tracing数据异步发送到SkyWalking后端:\npackage main import ( \u0026#34;context\u0026#34; ) import ( \u0026#34;dubbo.apache.org/dubbo-go/v3/common/logger\u0026#34; \u0026#34;dubbo.apache.org/dubbo-go/v3/config\u0026#34; _ \u0026#34;dubbo.apache.org/dubbo-go/v3/imports\u0026#34; \u0026#34;helloworld/api\u0026#34; // 引入skywalking-go \t_ \u0026#34;github.com/apache/skywalking-go\u0026#34; ) var grpcGreeterImpl = new(api.GreeterClientImpl) // export DUBBO_GO_CONFIG_PATH= PATH_TO_SAMPLES/helloworld/go-client/conf/dubbogo.yaml func main() { config.SetConsumerService(grpcGreeterImpl) if err := config.Load(); err != nil { panic(err) } logger.Info(\u0026#34;start to test dubbo\u0026#34;) req := \u0026amp;api.HelloRequest{ Name: \u0026#34;laurence\u0026#34;, } reply, err := grpcGreeterImpl.SayHello(context.Background(), req) if err != nil { logger.Error(err) } logger.Infof(\u0026#34;client response result: %v\\n\u0026#34;, reply) // 增加主携程等待语句 \tselect {} } 接下来,请从官方 SkyWalking 网站下载 Go Agent 程序 。当你使用 go build 命令进行编译时,请在 bin 目录中找到与当前操作系统匹配的代理程序,并添加 -toolexec=\u0026quot;/path/to/go-agent -a 参数。例如,请使用以下命令:\n# 进入项目主目录 \u0026gt; cd demo # 分别编译服务端和客户端 # -toolexec 参数定义为go-agent的路径 # -a 参数用于强制重新编译所有依赖项 \u0026gt; cd go-server \u0026amp;\u0026amp; go build -toolexec=\u0026#34;/path/to/go-agent\u0026#34; -a -o go-server cmd/server.go \u0026amp;\u0026amp; cd .. \u0026gt; cd go-client \u0026amp;\u0026amp; go build -toolexec=\u0026#34;/path/to/go-agent\u0026#34; -a -o go-client cmd/client.go \u0026amp;\u0026amp; cd .. 应用部署 在开始部署应用程序之前,你可以通过环境变量更改 SkyWalking 中当前应用程序的服务名称。你还可以更改其配置,例如服务器端的地址。有关详细信息,请参阅文档 。\n在这里,我们分别启动两个终端窗口来分别启动服务端和客户端。\n在服务端,将服务的名称更改为dubbo-server:\n# 导出dubbo-go服务端配置文件路径 export DUBBO_GO_CONFIG_PATH=/path/to/demo/go-server/conf/dubbogo.yaml # 导出skywalking-go的服务名称 export SW_AGENT_NAME=dubbo-server ./go-server/go-server 在客户端,将服务的名称更改为dubbo-client:\n# 导出dubbo-go客户端配置文件路径 export DUBBO_GO_CONFIG_PATH=/path/to/demo/go-client/conf/dubbogo.yaml # 导出skywalking-go的服务名称 export SW_AGENT_NAME=dubbo-client ./go-client/go-client 在 SkyWalking UI 上可视化 现在,由于客户端会自动像服务器端发送请求,现在就可以在 SkyWalking UI 中观察结果。\n几秒钟后,重新访问 http://localhost:8080 的 SkyWalking UI。能够在主页上看到部署的 dubbo-server 和 dubbo-client 服务。\n此外,在追踪页面上,可以看到刚刚发送的请求。\n并可以在拓扑图页面中看到服务之间的关系。\n总结 在本文中,我们指导你快速开发dubbo-go服务,并将其与 SkyWalking Go Agent 集成。这个过程也适用于你自己的任意 Golang 服务。最终,可以在 SkyWalking 服务中查看显示效果。如果你有兴趣了解 SkyWalking Go 代理当前支持的框架,请参阅此文档 。\n将来,我们将继续扩展 SkyWalking Go 的功能,添加更多插件支持。所以,请继续关注!\n","excerpt":"本文演示如何将 Dubbo-Go 应用程序与 SkyWalking Go 集成,并在 SkyWalking UI 中查看结果。\n以前,如果你想要在 SkyWalking 中监控 Golang 应用程 …","ref":"/zh/2023-06-05-quick-start-using-skywalking-go-monitoring-dubbo-go/","title":"使用SkyWalking go agent快速实现Dubbo Go监控"},{"body":"SkyWalking Go 0.1.0 is released. Go to downloads page to find release tars.\nFeatures  Initialize the agent core and user import library. Support gRPC reporter for management, tracing protocols. Automatic detect the log frameworks and inject the log context.  Plugins  Support Gin framework. Support Native HTTP server and client framework. Support Go Restful v3 framework. Support Dubbo server and client framework. Support Kratos v2 server and client framework. Support Go-Micro v4 server and client framework. Support GORM v2 database client framework.  Support MySQL Driver detection.    Documentation  Initialize the documentation.  Issues and PR  All issues are here All and pull requests are here  ","excerpt":"SkyWalking Go 0.1.0 is released. Go to downloads page to find release tars.\nFeatures  Initialize the …","ref":"/events/release-apache-skwaylking-go-0.1.0/","title":"Release Apache SkyWalking Go 0.1.0"},{"body":"SkyWalking Java Agent 8.16.0 is released. Go to downloads page to find release tars. Changes by Version\n8.16.0  Exclude synthetic methods for the WitnessMethod mechanism Support ForkJoinPool trace Support clickhouse-jdbc-plugin trace sql parameters Support monitor jetty server work thread pool metric Support Jersey REST framework Fix ClassCastException when SQLServer inserts data [Chore] Exclude org.checkerframework:checker-qual and com.google.j2objc:j2objc-annotations [Chore] Exclude proto files in the generated jar Fix Jedis-2.x plugin can not get host info in jedis 3.3.x+ Change the classloader to locate the agent path in AgentPackagePath, from SystemClassLoader to AgentPackagePath\u0026rsquo;s loader. Support Grizzly Trace Fix possible IllegalStateException when using Micrometer. Support Grizzly Work ThreadPool Metric Monitor Fix the gson dependency in the kafka-reporter-plugin. Fix deserialization of kafka producer json config in the kafka-reporter-plugin. Support to config custom decode methods for kafka configurations  All issues and pull requests are here\n","excerpt":"SkyWalking Java Agent 8.16.0 is released. Go to downloads page to find release tars. Changes by …","ref":"/events/release-apache-skywalking-java-agent-8-16-0/","title":"Release Apache SkyWalking Java Agent 8.16.0"},{"body":"Background Previously, if you wanted to monitor a Golang application in SkyWalking, you would integrate your project with the go2sky project and manually write various frameworks with go2sky plugins. Now, we have a brand-new project (Skywalking Go) that allows you to integrate your Golang projects into SkyWalking with almost zero coding, while offering greater flexibility and scalability.\nIn this article, we will guide you quickly integrating the skywalking-go project into your Golang project.\nQuick start This demonstration will consist of the following steps:\n Deploy SkyWalking: This involves setting up the SkyWalking backend and UI programs, enabling you to see the final effect. Compile Golang with SkyWalking Go: Here, you\u0026rsquo;ll compile the SkyWalking Go Agent into the Golang program you wish to monitor. Application Deployment: You\u0026rsquo;ll export environment variables and deploy the application to facilitate communication between your service and the SkyWalking backend. Visualization on SkyWalking UI: Finally, you\u0026rsquo;ll send requests and observe the effects within the SkyWalking UI.  Deploy SkyWalking Please download the SkyWalking APM program from the official SkyWalking website. Then execute the following two commands to start the service:\n# startup the OAP backend \u0026gt; bin/oapService.sh # startup the UI \u0026gt; bin/webappService.sh Next, you can access the address at http://localhost:8080/. At this point, as no applications have been deployed yet, you will not see any data.\nCompile Golang with SkyWalking GO Here is a simple business application here that starts an HTTP service.\npackage main import \u0026#34;net/http\u0026#34; func main() { http.HandleFunc(\u0026#34;/hello\u0026#34;, func(writer http.ResponseWriter, request *http.Request) { writer.Write([]byte(\u0026#34;Hello World\u0026#34;)) }) err := http.ListenAndServe(\u0026#34;:8000\u0026#34;, nil) if err != nil { panic(err) } } Execute the following command in the project\u0026rsquo;s root directory. This command will download the dependencies required for skywalking-go:\ngo get github.com/apache/skywalking-go Also, include it in the main package of the project. After the inclusion, the code will update to:\npackage main import ( \u0026#34;net/http\u0026#34; // This is an important step. DON\u0026#39;T MISS IT. \t_ \u0026#34;github.com/apache/skywalking-go\u0026#34; ) func main() { http.HandleFunc(\u0026#34;/hello\u0026#34;, func(writer http.ResponseWriter, request *http.Request) { writer.Write([]byte(\u0026#34;Hello World\u0026#34;)) }) err := http.ListenAndServe(\u0026#34;:8000\u0026#34;, nil) if err != nil { panic(err) } } Next, please download the Go Agent program from the official SkyWalking website. When you compile with the go build command, find the agent program that matches your current operating system in the bin directory, and add the -toolexec=\u0026quot;/path/to/go-agent -a parameter. For example, use the following command:\n# Build application with SkyWalking go agent # -toolexec parameter define the path of go-agent # -a parameter is used to force rebuild all packages \u0026gt; go build -toolexec=\u0026#34;/path/to/go-agent\u0026#34; -a -o test . Application Deployment Before you start to deploy the application, you can change the service name of the current application in SkyWalking through environment variables. You can also change its configuration such as the address with the server-side. For specific details, please refer to the documentation.\nHere, we\u0026rsquo;re just changing the name of the current service to demo.\n# Change the service name \u0026gt; export SW_AGENT_NAME=demo Next, you can start the application:\n# Start the application \u0026gt; ./test Visualization on SkyWalking UI Now, you can send a request to the application and observe the results in the SkyWalking UI.\n# Send a request \u0026gt; curl http://localhost:8000/hello After a few seconds, you can revisit the SkyWalking UI at http://localhost:8080. You will be able to see the demo service you deployed on the homepage.\nMoreover, on the Trace page, you can see the request you just sent.\nConclusion In this article, we\u0026rsquo;ve guided you to quickly develop a demo service and integrate it with SkyWalking Go Agent. This process is also applicable to your own Golang services. Ultimately, you can view the display effect in the SkyWalking service. If you\u0026rsquo;re interested in learning which frameworks the SkyWalking Go agent currently supports, please refer to this documentation.\nIn the future, we will continue to expand the functionality of SkyWalking Go, adding more plugin support. So, stay tuned!\n","excerpt":"Background Previously, if you wanted to monitor a Golang application in SkyWalking, you would …","ref":"/blog/2023-06-01-quick-start-with-skywalking-go-agent/","title":"Quick start with SkyWalking Go Agent"},{"body":"本文演示如何将应用程序与 SkyWalking Go 集成,并在 SkyWalking UI 中查看结果。\n以前,如果你想要在 SkyWalking 中监控 Golang 应用程序,需要将项目与 go2sky 项目集成,并手动编写各种带有 go2sky 插件的框架。现在,我们有一个全新的项目(Skywalking Go ),允许你将 Golang 项目集成到 SkyWalking 中,几乎不需要编码,同时提供更大的灵活性和可扩展性。\n在本文中,我们将指导你快速将 skywalking-go 项目集成到 Golang 项目中。\n演示包括以下步骤:\n 部署 SkyWalking:这涉及设置 SkyWalking 后端和 UI 程序,使你能够看到最终效果。 使用 SkyWalking Go 编译 Golang:在这里,你将把 SkyWalking Go Agent 编译到要监控的 Golang 程序中。 应用部署:你将导出环境变量并部署应用程序,以促进你的服务与 SkyWalking 后端之间的通信。 在 SkyWalking UI 上可视化:最后,你将发送请求并在 SkyWalking UI 中观察效果。  部署 SkyWalking 请从官方 SkyWalking 网站下载 SkyWalking APM 程序 。然后执行以下两个命令来启动服务:\n# 启动 OAP 后端 \u0026gt; bin/oapService.sh # 启动 UI \u0026gt; bin/webappService.sh 接下来,你可以访问地址 http://localhost:8080/ 。此时,由于尚未部署任何应用程序,因此你将看不到任何数据。\n使用 SkyWalking GO 编译 Golang 这里有一个简单的业务应用程序,启动了一个 HTTP 服务。\npackage main import \u0026#34;net/http\u0026#34; func main() { http.HandleFunc(\u0026#34;/hello\u0026#34;, func(writer http.ResponseWriter, request *http.Request) { writer.Write([]byte(\u0026#34;Hello World\u0026#34;)) }) err := http.ListenAndServe(\u0026#34;:8000\u0026#34;, nil) if err != nil { panic(err) } } 在项目的根目录中执行以下命令。此命令将下载 skywalking-go 所需的依赖项:\ngo get github.com/apache/skywalking-go 接下来,请将其包含在项目的 main 包中。包含之后,代码将会更新为:\npackage main import ( \u0026#34;net/http\u0026#34; _ \u0026#34;github.com/apache/skywalking-go\u0026#34; ) func main() { http.HandleFunc(\u0026#34;/hello\u0026#34;, func(writer http.ResponseWriter, request *http.Request) { writer.Write([]byte(\u0026#34;Hello World\u0026#34;)) }) err := http.ListenAndServe(\u0026#34;:8000\u0026#34;, nil) if err != nil { panic(err) } } 接下来,请从官方 SkyWalking 网站下载 Go Agent 程序 。当你使用 go build 命令进行编译时,请在 bin 目录中找到与当前操作系统匹配的代理程序,并添加 -toolexec=\u0026quot;/path/to/go-agent\u0026quot; -a 参数。例如,请使用以下命令:\ngo build -toolexec=\u0026#34;/path/to/go-agent\u0026#34; -a -o test . 应用部署 在开始部署应用程序之前,你可以通过环境变量更改 SkyWalking 中当前应用程序的服务名称。你还可以更改其配置,例如服务器端的地址。有关详细信息,请参阅文档 。\n在这里,我们只是将当前服务的名称更改为 demo。\n接下来,你可以启动应用程序:\nexport SW_AGENT_NAME=demo ./test 在 SkyWalking UI 上可视化 现在,向应用程序发送请求并在 SkyWalking UI 中观察结果。\n几秒钟后,重新访问 http://localhost:8080 的 SkyWalking UI。能够在主页上看到部署的 demo 服务。\n此外,在追踪页面上,可以看到刚刚发送的请求。\n总结 在本文中,我们指导你快速开发 demo 服务,并将其与 SkyWalking Go Agent 集成。这个过程也适用于你自己的 Golang 服务。最终,可以在 SkyWalking 服务中查看显示效果。如果你有兴趣了解 SkyWalking Go 代理当前支持的框架,请参阅此文档 。\n将来,我们将继续扩展 SkyWalking Go 的功能,添加更多插件支持。所以,请继续关注!\n","excerpt":"本文演示如何将应用程序与 SkyWalking Go 集成,并在 SkyWalking UI 中查看结果。\n以前,如果你想要在 SkyWalking 中监控 Golang 应用程序, …","ref":"/zh/2023-06-01-quick-start-with-skywalking-go-agent/","title":"SkyWalking Go Agent 快速开始指南"},{"body":"SkyWalking Rust 0.7.0 is released. Go to downloads page to find release tars.\nWhat\u0026rsquo;s Changed  Obtain Span object without intermediary. by @jmjoy in https://github.com/apache/skywalking-rust/pull/57 Rename module skywalking_proto to proto. by @jmjoy in https://github.com/apache/skywalking-rust/pull/59 Add Span::prepare_for_async method and AbstractSpan trait. by @jmjoy in https://github.com/apache/skywalking-rust/pull/58 Bump to 0.7.0. by @jmjoy in https://github.com/apache/skywalking-rust/pull/60  ","excerpt":"SkyWalking Rust 0.7.0 is released. Go to downloads page to find release tars.\nWhat\u0026rsquo;s Changed …","ref":"/events/release-apache-skywalking-rust-0-7-0/","title":"Release Apache SkyWalking Rust 0.7.0"},{"body":"SkyWalking PHP 0.5.0 is released. Go to downloads page to find release tars.\nWhat\u0026rsquo;s Changed  Bump openssl from 0.10.45 to 0.10.48 by @dependabot in https://github.com/apache/skywalking-php/pull/60 Make the SKYWALKING_AGENT_ENABLE work in the request hook as well. by @jmjoy in https://github.com/apache/skywalking-php/pull/61 Support tracing curl_multi_* api. by @jmjoy in https://github.com/apache/skywalking-php/pull/62 Fix parent endpoint and peer in segment ref and tag url in entry span. by @jmjoy in https://github.com/apache/skywalking-php/pull/63 Bump h2 from 0.3.15 to 0.3.17 by @dependabot in https://github.com/apache/skywalking-php/pull/65 Add amqplib plugin for producer. by @jmjoy in https://github.com/apache/skywalking-php/pull/64 Upgrade and adapt phper. by @jmjoy in https://github.com/apache/skywalking-php/pull/66 Refactor script create_package_xml. by @jmjoy in https://github.com/apache/skywalking-php/pull/67 Refactor predis plugin to hook Client. by @jmjoy in https://github.com/apache/skywalking-php/pull/68 Canonicalize unknown. by @jmjoy in https://github.com/apache/skywalking-php/pull/69 Bump guzzlehttp/psr7 from 2.4.0 to 2.5.0 in /tests/php by @dependabot in https://github.com/apache/skywalking-php/pull/70 Enhance support for Swoole. by @jmjoy in https://github.com/apache/skywalking-php/pull/71 Bump to 0.5.0. by @jmjoy in https://github.com/apache/skywalking-php/pull/72  Full Changelog: https://github.com/apache/skywalking-php/compare/v0.4.0...v0.5.0\nPECL https://pecl.php.net/package/skywalking_agent/0.5.0\n","excerpt":"SkyWalking PHP 0.5.0 is released. Go to downloads page to find release tars.\nWhat\u0026rsquo;s Changed …","ref":"/events/release-apache-skwaylking-php-0-5-0/","title":"Release Apache SkyWalking PHP 0.5.0"},{"body":"SkyWalking Python 1.0.1 is released! Go to downloads page to find release tars.\nPyPI Wheel: https://pypi.org/project/apache-skywalking/1.0.1/\nDockerHub Image: https://hub.docker.com/r/apache/skywalking-python\n  Upgrading from v1.0.0 to v1.0.1 is strongly encouraged\n This is a critical performance-oriented patch to address a CPU surge reported in https://github.com/apache/skywalking/issues/10672    Feature:\n Add a new workflow to push docker images for arm64 and amd64 (#297)    Plugins:\n Optimize loguru reporter plugin.(#302)    Fixes:\n Fix sw8 loss when use aiohttp (#299, issue#10669) Critical: Fix a bug that leads to high cpu usage (#300, issue#10672)    Others:\n Use Kraft mode in E2E Kafka reporter tests (#303)    New Contributors  @Forstwith made their first contribution in https://github.com/apache/skywalking-python/pull/299 @FAWC438 made their first contribution in https://github.com/apache/skywalking-python/pull/300  Full Changelog: https://github.com/apache/skywalking-python/compare/v1.0.0...v1.0.1\n","excerpt":"SkyWalking Python 1.0.1 is released! Go to downloads page to find release tars.\nPyPI Wheel: …","ref":"/events/release-apache-skywalking-python-1-0-1/","title":"Release Apache SkyWalking Python 1.0.1"},{"body":"本次活动于 2023 年 4 月 22 日在北京奥加美术馆酒店举行。该会议旨在探讨和分享有关可观测性的最佳实践, 包括在云原生应用程序和基础架构中实现可观测性的最新技术和工具。与会者将有机会了解行业领袖的最新见解,并与同行们分享经验和知识。 我们期待这次会议能够给云原生社区带来更多的启发和动力,推动我们在可观测性方面的进一步发展。\n圆桌讨论:云原生应用可观测性现状及趋势 B站视频地址\n嘉宾\n 罗广明,主持人 吴晟,Tetrate 创始工程师 向阳,云杉科技研发 VP 乔新亮,原苏宁科技副总裁,现彩食鲜 CTO 董江,中国移动云能力中心高级系统架构专家  为 Apache SkyWalking 构建 Grafana dashboards \u0026ndash; 基于对原生 PromQL 的支持 B站视频地址\n万凯,Tetrate\n  讲师介绍 万凯,Tetrate 工程师,Apache SkyWalking PMC 成员,专注于应用性能可观测性领域。\n  议题概要 本次分享将介绍 Apache SkyWalking 的新特性 PromQL Service,它将为 SkyWalking 带来更广泛的生态集成能力: 什么是 PromQL SkyWalking 的 PromQL Service 是什么,能够做什么 SkyWalking 中的基本概念和 metrics 的特性 如何使用 PromQL Service 使用 PromQL Service 构建 Grafana dashboards 的实践\n  ","excerpt":"本次活动于 2023 年 4 月 22 日在北京奥加美术馆酒店举行。该会议旨在探讨和分享有关可观测性的最佳实践, 包括在云原生应用程序和基础架构中实现可观测性的最新技术和工具。与会者将有机会了解行业领 …","ref":"/zh/2023-04-23-obs-summit-china/","title":"[视频] 可观测性峰会2023 - Observability Summit"},{"body":"SkyWalking Client JS 0.10.0 is released. Go to downloads page to find release tars.\n Fix the ability of Fetch constructure. Update README. Bump up dependencies.  ","excerpt":"SkyWalking Client JS 0.10.0 is released. Go to downloads page to find release tars.\n Fix the ability …","ref":"/events/release-apache-skywalking-client-js-0-10-0/","title":"Release Apache SkyWalking Client JS 0.10.0"},{"body":"SkyWalking Java Agent 8.15.0 is released. Go to downloads page to find release tars. Changes by Version\n8.15.0  Enhance lettuce plugin to adopt uniform tags. Expose complete Tracing APIs in the tracing toolkit. Add plugin to trace Spring 6 and Resttemplate 6. Move the baseline to JDK 17 for development, the runtime baseline is still Java 8 compatible. Remove Powermock entirely from the test cases. Fix H2 instrumentation point Refactor pipeline in jedis-plugin. Add plugin to support ClickHouse JDBC driver (0.3.2.*). Refactor kotlin coroutine plugin with CoroutineContext. Fix OracleURLParser ignoring actual port when :SID is absent. Change gRPC instrumentation point to fix plugin not working for server side. Fix servicecomb plugin trace break. Adapt Armeria\u0026rsquo;s plugins to the latest version 1.22.x Fix tomcat-10x-plugin and add test case to support tomcat7.x-8.x-9.x. Fix thrift plugin generate duplicate traceid when sendBase error occurs Support keep trace profiling when cross-thread. Fix unexpected whitespace of the command catalogs in several Redis plugins. Fix a thread leak in SamplingService when updated sampling policy in the runtime. Support MySQL plugin tracing SQL parameters when useServerPrepStmts Update the endpoint name of Undertow plugin to Method:Path. Build a dummy(empty) javadoc of finagle and jdk-http plugins due to incompatibility.  Documentation  Update docs of Tracing APIs, reorganize the API docs into six parts. Correct missing package name in native manual API docs. Add a FAQ doc about \u0026ldquo;How to make SkyWalking agent works in OSGI environment?\u0026rdquo;  All issues and pull requests are here\n","excerpt":"SkyWalking Java Agent 8.15.0 is released. Go to downloads page to find release tars. Changes by …","ref":"/events/release-apache-skywalking-java-agent-8-15-0/","title":"Release Apache SkyWalking Java Agent 8.15.0"},{"body":"SkyWalking PHP 0.4.0 is released. Go to downloads page to find release tars.\nWhat\u0026rsquo;s Changed  Bump tokio from 1.24.1 to 1.24.2 by @dependabot in https://github.com/apache/skywalking-php/pull/52 Bump to 0.4.0-dev by @heyanlong in https://github.com/apache/skywalking-php/pull/53 Avoid potential panic for logger. by @jmjoy in https://github.com/apache/skywalking-php/pull/54 Fix the curl plugin hook curl_setopt by mistake. by @jmjoy in https://github.com/apache/skywalking-php/pull/55 Update documents. by @jmjoy in https://github.com/apache/skywalking-php/pull/56 Upgrade dependencies and adapt the codes. by @jmjoy in https://github.com/apache/skywalking-php/pull/57 Add sub components licenses in dist material. by @jmjoy in https://github.com/apache/skywalking-php/pull/58 Bump to 0.4.0. by @jmjoy in https://github.com/apache/skywalking-php/pull/59  New Contributors  @dependabot made their first contribution in https://github.com/apache/skywalking-php/pull/52  Full Changelog: https://github.com/apache/skywalking-php/compare/v0.3.0...v0.4.0\nPECL https://pecl.php.net/package/skywalking_agent/0.4.0\n","excerpt":"SkyWalking PHP 0.4.0 is released. Go to downloads page to find release tars.\nWhat\u0026rsquo;s Changed …","ref":"/events/release-apache-skwaylking-php-0-4-0/","title":"Release Apache SkyWalking PHP 0.4.0"},{"body":"Background As an application performance monitoring tool for distributed systems, Apache SkyWalking provides monitoring, tracing, diagnosing capabilities for distributed system in Cloud Native architecture. Prometheus is an open-source systems monitoring and alerting toolkit with an active ecosystem. Especially Prometheus metrics receive widespread support through exporters and integrations. PromQL as Prometheus Querying Language containing a set of expressions and expose HTTP APIs to read metrics.\nSkyWalking supports to ingest Prometheus metrics through OpenTelemetry collector and through the aggregate calculation of these metrics to provide a variety of systems monitoring, such as Linux Monitoring and Kubernetes monitoring. SkyWalking already provides native UI and GraphQL API for users. But as designed to provide wider ecological integration capabilities, since 9.4.0, it provides PromQL Service, the third-party systems or visualization platforms that already support PromQL (such as Grafana), could obtain metrics through it. SkyWalking users will benefit from it when they integrate with different systems.\nWhat is PromQL Service in SkyWalking? PromQL Service is a query engine on the top of SkyWalking native GraphQL query, with additional query stage calculation capabilities powered by Prometheus expressions. It can accept PromQL HTTP API requests, parse Prometheus expressions, and transform between Prometheus metrics and SkyWalking metrics.\nThe PromQL Service follows all PromQL\u0026rsquo;s protocols and grammar and users can use it as they would with PromQL. As SkyWalking is fundamentally different from Prometheus in terms of metric classification, format, storage, etc. PromQL Service doesn\u0026rsquo;t have to implement the full PromQL feature. Refer to the documentation for the detail.\nSkyWalking Basic Concepts Here are some basic concepts and differences from Prometheus that users need to understand in order to use the PromQL service: Prometheus metrics specify the naming format and structure, the actual metric names and labels are determined by the client provider, and the details are stored. The user aggregates and calculates the metrics using the expression in PromQL. Unlike Prometheus, SkyWalking\u0026rsquo;s metric mechanism is built around the following core concepts with a hierarchical structure:\n Layer: represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), Kubernetes(k8s layer). This layer would be the owner of different services detected from different technologies. All Layers definitions can be found here. Service: Represents a set/group of workloads which provides the same behaviors for incoming requests. Service Instance: An individual workload in the Service group. Endpoint: A path in a service for incoming requests. Process: An operating system process. In some scenarios, a service instance is not a process, such as a pod Kubernetes could contain multiple processes.  The metric name and properties (labels) are configured by the SkyWalking OAP server based on the data source as well as OAL and MAL. SkyWalking provides the ability to down-sampling time series metrics, and generate different time bucket data (minute, hour, day).\nThe SkyWalking metric stream is as follows:\nTraffic  The metadata of the Service/ServiceRelation/Instance/ServiceInstanceRelation/Endpoint/EndpointRelation/Process/ProcessRelation. Include names, layers, properties, relations between them, etc.  Metric  Name: metric name, configuration from OAL and MAL. Entity: represents the metrics' belonging and used for the query. An Entity will contain the following information depending on the Scope: Scope represents the metrics level and in query stage represents the Scope catalog, Scope catalog provides high-dimension classifications for all scopes as a hierarchy structure.     Scope Entity Info     Service Service(include layer info)   ServiceInstance Service, ServiceInstance   Endpoint Service, Endpoint   ServiceRelation Service, DestService   ServiceInstanceRelation ServiceInstance, DestServiceInstance   EndpointRelation Endpoint, DestEndpoint   Process Service, ServiceInstance, Process   ProcessRelation Process, ServiceInstance, DestProcess     Value:   single value: long. labeled value: text, label1,value1|label2,value2|..., such as L2 aggregation,5000 | L1 aggregation,8000.   TimeBucket: the time is accurate to minute, hour, day.  How to use PromQL Service Setup PromQL Service is enabled by default after v9.4.0, so no additional configuration is required. The default ports, for example, can be configured by using OAP environment variables:\nrestHost: ${SW_PROMQL_REST_HOST:0.0.0.0} restPort: ${SW_PROMQL_REST_PORT:9090} restContextPath: ${SW_PROMQL_REST_CONTEXT_PATH:/} restMaxThreads: ${SW_PROMQL_REST_MAX_THREADS:200} restIdleTimeOut: ${SW_PROMQL_REST_IDLE_TIMEOUT:30000} restAcceptQueueSize: ${SW_PROMQL_REST_QUEUE_SIZE:0} Use Prometheus expression PromQL matches metric through the Prometheus expression. Here is a typical Prometheus metric.\nTo match the metric, the Prometheus expression is as follows:\nIn the PromQL Service, these reserved labels would be parsed as the metric name and entity info fields with other labels for the query. The mappings are as follows.\n   SkyWalking Concepts Prometheus expression     Metric name Metric name   Layer Label   Service Label   ServiceInstance Label\u0026lt;service_instance\u0026gt;   Endpoint Label   \u0026hellip; \u0026hellip;    For example, the following expressions are used to match query metrics: service_cpm, service_instance_cpm, endpoint_cpm\nservice_cpm{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;} service_instance_cpm{service=\u0026#39;agent::songs\u0026#39;, service_instance=\u0026#39;agent::songs_instance_1\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;} endpoint_cpm{service=\u0026#39;agent::songs\u0026#39;, endpoint=\u0026#39;GET:/songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;} Typical Query Example At here, we take the SkyWalking Showcase deployment as the playground to demonstrate how to use PromQL for SkyWalking metrics.\nThe following examples can be used to query the metadata and metrics of services through PromQL Service.\nGet metrics names Query:\nhttp://localhost:9099/api/v1/label/__name__/values Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;meter_mysql_instance_qps\u0026#34;, \u0026#34;service_cpm\u0026#34;, \u0026#34;envoy_cluster_up_rq_active\u0026#34;, \u0026#34;instance_jvm_class_loaded_class_count\u0026#34;, \u0026#34;k8s_cluster_memory_requests\u0026#34;, \u0026#34;meter_vm_memory_used\u0026#34;, \u0026#34;meter_apisix_sv_bandwidth_unmatched\u0026#34;, \u0026#34;meter_vm_memory_total\u0026#34;, ... ] } Select a metric and get the labels Query:\nhttp://localhost:9099/api/v1/labels?match[]=service_cpm Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;layer\u0026#34;, \u0026#34;service\u0026#34;, \u0026#34;top_n\u0026#34;, \u0026#34;order\u0026#34; ] } Get services from a specific layer Query:\nhttp://127.0.0.1:9099/api/v1/series?match[]=service_traffic{layer=\u0026#39;GENERAL\u0026#39;}\u0026amp;start=1677479336\u0026amp;end=1677479636 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::recommendation\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::app\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::gateway\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::frontend\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; } ] } Query specific metric for a service Query:\nhttp://127.0.0.1:9099/api/v1/query?query=service_cpm{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;} Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1679559960, \u0026#34;6\u0026#34; ] } ] } } About the range query and different metrics type for query can refer to the document here.\nBuild Grafana Dashboard From the above, we know the mechanism and how to query from PromQL Service, now we can build the Grafana Dashboard for the above service example. Note: All the following configurations are based on Grafana version 9.1.0.\nSkyWalking Showcase provides dashboards files such as services of General and Service Mesh layers, we can quickly create a dashboard for the General layer service by importing the dashboard JSON file.\nAfter the Grafana application is deployed, follow the steps below:\nConfigure Data Source First, we need to create a data source: In the data source config panel, chose Prometheus and set the URL to the OAP server address, the default port is 9090. Here set the data source name SkyWalking in case there are multiple Prometheus data sources.\nImport Dashboard File   Create a dashboard folder named SkyWalking.\n  Import the dashboard file into Grafana, there are two ways to get the file:\n From SkyWalking Showcase. Go to SkyWaking Demo: Preview metrics on Grafana, and export it from the General Service dashboard.    Done! Now we can see the dashboard is working, the services are in the drop-down list and the metrics are displayed on the panels.\n  This is an easy way to build, but we need to know how it works if we want to customize it.\nHow the dashboard works Dashboard Settings Open the Settings-Variables we can see the following variables:\nLet\u0026rsquo;s look at what each variable does:\n  $DS_SkyWalking\nThis is a data source ty variable that specifies the Prometheus data source which was defined earlier as SkyWalking.\n  $layer\nThis is a constant type because in the \u0026lsquo;General Service\u0026rsquo; dashboard, all services belong to the \u0026lsquo;GENERAL\u0026rsquo; layer, so they can be used directly in each query Note When you customize other layers, this value must be defined in the Layer mentioned above.\n  $service\nQuery type variable, to get all service names under this layer for the drop-down list.\nQuery expression:\nlabel_values(service_traffic{layer=\u0026#39;$layer\u0026#39;}, service) The query expression will query HTTP API /api/v1/series for service metadata in $layer and fetch the service name according to the label(service).\n  $service_instance\nSame as the $service is a query variable that is used to select all instances of the service in the drop-down list.\nQuery expression:\nlabel_values(instance_traffic{layer=\u0026#39;$layer\u0026#39;, service=\u0026#39;$service\u0026#39;}, service_instance) The query expression here not only specifies the $layer but also contains the variable $service, which is used to correlate with the services for the drop-down list.\n  $endpoint\nSame as the $service is a query variable that is used to select all endpoints of the service in the drop-down list.\nQuery expression:\nlabel_values(endpoint_traffic{layer=\u0026#39;$layer\u0026#39;, service=\u0026#39;$service\u0026#39;, keyword=\u0026#39;$endpoint_keyword\u0026#39;, limit=\u0026#39;$endpoint_limit\u0026#39;}, endpoint) The query expression here specifies the $layer and $service which are used to correlate with the services for the drop-down list. And also accept variables $endpoint_keyword and $endpoint_limit as filtering condition.\n  $endpoint_keyword\nA text type variable that the user can input to filter the return value of $endpoint.\n  $endpoint_limit\nCustom type, which the user can select to limit the maximum number of returned endpoints.\n  Panel Configurations There are several typical metrics panels on this dashboard, let\u0026rsquo;s see how it\u0026rsquo;s configured.\nCommon Value Metrics Select Time series chart panel Service Apdex and click edit.  Query expression service_apdex{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} / 10000 The metric scope is Service, add labels service and layer for the match, and the label value used the variables configured above. The calculation Divided by 10000 is used for matching the result units. The document for the query can refer to here.\n Set Query options --\u0026gt; Min interval = 1m, because the metrics min time bucket in SkyWalking is 1m. Set Connect null values --\u0026gt; Always and Show points --\u0026gt; Always because when the query interval \u0026gt; 1 hour or 1 day SkyWalking returns the hour/day step metrics values.  Labeled Value Metrics Select Time series chart panel Service Response Time Percentile and click edit.  Query expression service_percentile{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;, labels=\u0026#39;0,1,2,3,4\u0026#39;, relabels=\u0026#39;P50,P75,P90,P95,P99\u0026#39;} The metric scope is Service, add labels service and layer for the match, and the label value used the variables configured above. Add labels='0,1,2,3,4' filter the result label, and addrelabels='P50,P75,P90,P95,P99' rename the result label. The document for the query can refer to here.\n Set Query options --\u0026gt; Min interval = 1m, because the metrics min time bucket in SkyWalking is 1m. Set Connect null values --\u0026gt; Always and Show points --\u0026gt; Always because when the query interval \u0026gt; 1 hour or 1 day SkyWalking returns the hour/day step metrics values. Set Legend to {{label}} for show up.  Sort Metrics Select Time series chart panel Service Response Time Percentile and click edit.  Query expression service_instance_cpm{parent_service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;, top_n=\u0026#39;10\u0026#39;, order=\u0026#39;DES\u0026#39;} The expression is used for query the sore metrics under service, so add labels parent_service and layer for the match. Add top_n='10' and order='DES' filter the result. The document for the query can refer to here.\n Set Query options --\u0026gt; Min interval = 1m, because the metrics min time bucket in SkyWalking is 1m. Set the Calculation --\u0026gt; Latest*. Set Legend to {{service_instance}} for show up.  Conclusion In this article, we introduced what is the PromQL Service in SkyWalking and its background. Detailed how to use PromQL Service and the basic concepts related to SkyWalking, and show how to use PromQL Service to build Grafana dashboards for SkyWalking.\nIn the future, there will be more integrations by leveraging this protocol, such as CI/CD, HPA (scaling), etc.\n","excerpt":"Background As an application performance monitoring tool for distributed systems, Apache SkyWalking …","ref":"/blog/2023-03-17-build-grafana-dashboards-for-apache-skywalking-native-promql-support/","title":"Build Grafana dashboards for Apache SkyWalking -- Native PromQL Support"},{"body":"","excerpt":"","ref":"/tags/grafana/","title":"Grafana"},{"body":"","excerpt":"","ref":"/tags/metric/","title":"Metric"},{"body":"","excerpt":"","ref":"/tags/promql/","title":"PromQL"},{"body":"背景 Apache SkyWalking 作为分布式系统的应用性能监控工具,提供了对云原生架构下的分布式系统的监控、跟踪、诊断能力。Prometheus 是一个开源系统监控和警报工具包,具有活跃的生态系统。特别是 Prometheus 指标通过 导出器和集成 得到广泛支持。 PromQL 作为 Prometheus 查询语言,包含一组表达式并公开 HTTP API 以读取指标。\nSkyWalking 支持通过 OpenTelemetry 收集器 摄取 Prometheus 指标,并通过这些指标的聚合计算提供多种系统监控,例如 Linux 监控和 Kubernetes 监控。SkyWalking 已经为用户提供了 原生 UI 和 GraphQL API。但为了提供更广泛的生态整合能力,从 9.4.0 开始,它提供了 PromQL 服务,已经支持 PromQL 的第三方系统或可视化平台(如 Grafana),可以通过它获取指标。SkyWalking 用户在与不同系统集成时将从中受益。\nSkyWalking 中的 PromQL 服务是什么? PromQL 服务是 SkyWalking 原生 GraphQL 查询之上的查询引擎,具有由 Prometheus 表达式提供支持的附加查询阶段计算能力。它可以接受 PromQL HTTP API 请求,解析 Prometheus 表达式,并在 Prometheus 指标和 SkyWalking 指标之间进行转换。\nPromQL 服务遵循 PromQL 的所有协议和语法,用户可以像使用 PromQL 一样使用它。由于 SkyWalking 在度量分类、格式、存储等方面与 Prometheus 有根本不同,因此 PromQL 服务不必实现完整的 PromQL 功能。有关详细信息,请参阅文档。\nSkyWalking 基本概念 以下是用户使用 PromQL 服务需要了解的一些基本概念和与 Prometheus 的区别: Prometheus 指标指定命名格式和结构,实际指标名称和标签由客户端提供商确定,并存储详细信息。用户使用 PromQL 中的表达式聚合和计算指标。与 Prometheus 不同,SkyWalking 的度量机制是围绕以下具有层次结构的核心概念构建的:\n  层(Layer):表示计算机科学中的一个抽象框架,如 Operating System(OS_LINUX 层)、Kubernetes(k8s 层)。该层将是从不同技术检测到的不同服务的所有者。可以在此处\n找到所有层定义。\n  服务:表示一组 / 一组工作负载,它为传入请求提供相同的行为。\n  服务实例:服务组中的单个工作负载。\n  端点:传入请求的服务路径。\n  进程:操作系统进程。在某些场景下,service instance 不是一个进程,比如一个 Kubernetes Pod 可能包含多个进程。\n  Metric 名称和属性(标签)由 SkyWalking OAP 服务器根据数据源以及 OAL 和 MAL 配置。SkyWalking 提供了对时间序列指标进行下采样(down-sampling),并生成不同时间段数据(分钟、小时、天)的能力。\nSkyWalking 指标流如下:\n流量  Service/ServiceRelation/Instance/ServiceInstanceRelation/Endpoint/EndpointRelation/Process/ProcessRelation 的元数据。包括名称、层、属性、它们之间的关系等。  指标  名称(Name):指标名称,来自 OAL 和 MAL 的配置。 实体(Entity):表示指标的归属,用于查询。一个 Entity 根据 Scope 不同会包含如下信息: Scope 代表指标级别,在查询阶段代表 Scope catalog,Scope catalog 为所有的 scope 提供了高维的分类,层次结构。     Scope 实体信息     Service 服务(包括图层信息)   ServiceInstance 服务、服务实例   Endpoint 服务、端点   ServiceRelation 服务,目标服务   ServiceInstanceRelation 服务实例、目标服务实例   EndpointRelation 端点、目标端点   Process 服务、服务实例、流程   ProcessRelation 进程、服务实例、DestProcess     值:   单值:long 标签值:文本,label1,value1|label2,value2|... ,例如 L2 aggregation,5000 | L1 aggregation,8000   TimeBucket:时间精确到分钟、小时、天  如何使用 PromQL 服务 设置 PromQL 服务在 v9.4.0 之后默认开启,不需要额外配置。例如,可以使用 OAP 环境变量配置默认端口:\nrestHost: ${SW_PROMQL_REST_HOST:0.0.0.0} restPort: ${SW_PROMQL_REST_PORT:9090} restContextPath: ${SW_PROMQL_REST_CONTEXT_PATH:/} restMaxThreads: ${SW_PROMQL_REST_MAX_THREADS:200} restIdleTimeOut: ${SW_PROMQL_REST_IDLE_TIMEOUT:30000} restAcceptQueueSize: ${SW_PROMQL_REST_QUEUE_SIZE:0} 使用 Prometheus 表达式 PromQL 通过 Prometheus 表达式匹配指标。这是一个典型的 Prometheus 指标。\n为了匹配指标,Prometheus 表达式如下:\n在 PromQL 服务中,这些保留的标签将被解析为度量名称和实体信息字段以及用于查询的其他标签。映射如下。\n   SkyWalking 概念 Prometheus 表达     指标名称 指标名称   层 标签   服务 标签   服务实例 标签 \u0026lt;服务实例\u0026gt;   端点 标签   …… ……    例如,以下表达式用于匹配查询指标:service_cpm、service_instance_cpm、endpoint_cpm\nservice_cpm {service='agent::songs', layer='GENERAL'} service_instance_cpm {service='agent::songs', service_instance='agent::songs_instance_1', layer='GENERAL'} endpoint_cpm {service='agent::songs', endpoint='GET:/songs', layer='GENERAL'} 典型查询示例 在这里,我们将 SkyWalking Showcase 部署作为 Playground 来演示如何使用 PromQL 获取 SkyWalking 指标。\n以下示例可用于通过 PromQL 服务查询服务的元数据和指标。\n获取指标名称 查询:\nhttp://localhost:9099/api/v1/label/__name__/values 结果:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;meter_mysql_instance_qps\u0026#34;, \u0026#34;service_cpm\u0026#34;, \u0026#34;envoy_cluster_up_rq_active\u0026#34;, \u0026#34;instance_jvm_class_loaded_class_count\u0026#34;, \u0026#34;k8s_cluster_memory_requests\u0026#34;, \u0026#34;meter_vm_memory_used\u0026#34;, \u0026#34;meter_apisix_sv_bandwidth_unmatched\u0026#34;, \u0026#34;meter_vm_memory_total\u0026#34;, ... ] } 选择一个指标并获取标签 查询:\nhttp://localhost:9099/api/v1/labels?match []=service_cpm 结果:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;layer\u0026#34;, \u0026#34;service\u0026#34;, \u0026#34;top_n\u0026#34;, \u0026#34;order\u0026#34; ] } 从特定层获取服务 查询:\nhttp://127.0.0.1:9099/api/v1/series?match []=service_traffic {layer='GENERAL'}\u0026amp;start=1677479336\u0026amp;end=1677479636 结果:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ {\u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, {\u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::recommendation\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, {\u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::app\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, {\u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::gateway\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, {\u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::frontend\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; } ] } 查询服务的特定指标 查询:\nhttp://127.0.0.1:9099/api/v1/query?query=service_cpm {service='agent::songs', layer='GENERAL'} 结果:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ {\u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; },\u0026#34;value\u0026#34;: [ 1679559960, \u0026#34;6\u0026#34; ] } ] } } 关于range query和不同的metrics type for query 可以参考 这里的 文档。\n构建 Grafana Dashboard 从上面我们知道了 PromQL 服务的机制和查询方式,现在我们可以为上面的服务示例构建 Grafana Dashboard。注:以下所有配置均基于 Grafana 9.1.0 版本。\nSkyWalking Showcase 提供了 General Service 和 Service Mesh 层等 Dashboard 文件,我们可以通过导入 Dashboard JSON 文件快速为层服务创建 Dashboard。\n部署 Grafana 应用程序后,请按照以下步骤操作:\n配置数据源 首先,我们需要创建一个数据源: 在数据源配置面板中,选择 Prometheus 并设置 URL 为 OAP 服务器地址,默认端口为 9090。 SkyWalking 如果有多个 Prometheus 数据源,请在此处设置数据源名称。\n导入 Dashboard 文件   创建一个名为 SkyWalking 的 Dashboard 文件夹。\n  将 Dashboard 文件导入到 Grafana 中,有两种获取文件的方式:\n 来自 SkyWalking Showcase 转到 SkyWaking Demo:在 Grafana 上预览指标,并将其从 General Service Dashboard 导出。    完毕!现在我们可以看到 Dashboard 正在运行,服务位于下拉列表中,指标显示在面板上。\n  这是一种简单的构建方式,但是如果我们想要自定义它,我们需要知道它是如何工作的。\nDashboard 的工作原理 Dashboard 设置 打开 Settings-Variables 我们可以看到如下变量:\n让我们看看每个变量的作用:\n  $DS_SkyWalking\n这是一个数据源 ty 变量,它指定了之前定义为 SkyWalking 的 Prometheus 数据源。\n  $layer\n这是一个常量类型,因为在 \u0026lsquo;General Service\u0026rsquo; Dashboard 中,所有服务都属于 \u0026lsquo;GENERAL\u0026rsquo; 层,因此可以在每个查询中直接使用它们。注意,当您自定义其他层时,必须在 Layer 上面定义该值。\n  $service\n查询类型变量,为下拉列表获取该层下的所有服务名称。\n查询表达式:\nlabel_values (service_traffic {layer='$layer'}, service) 查询表达式将查询 HTTP API /api/v1/series,以获取 $layer 中服务元数据,并根据标签(服务)提取服务名称。\n  $service_instance\n与 $service 一样,是一个查询变量,用于在下拉列表中选择服务的所有实例。\n查询表达式:\nlabel_values (instance_traffic {layer='$layer', service='$service'}, service_instance) 这里的查询表达式不仅指定了 $layer 还包含 $service 变量,用于关联下拉列表的服务。\n  $endpoint\n与 $service 一样,是一个查询变量,用于在下拉列表中选择服务的所有端点。\n查询表达式:\nlabel_values (endpoint_traffic {layer='$layer', service='$service', keyword='$endpoint_keyword', limit='$endpoint_limit'}, endpoint) 此处的查询表达式指定 $layer 和 $service 用于与下拉列表的服务相关联的。并且还接受 $endpoint_keyword 和 $endpoint_limit 变量作为过滤条件。\n  $endpoint_keyword\n一个文本类型的变量,用户可以输入它来过滤 $endpoint 的返回值。\n  $endpoint_limit\n自定义类型,用户可以选择它以限制返回端点的最大数量。\n  Dashboard 配置 这个 Dashboard 上有几个典型的指标面板,让我们看看它是如何配置的。\n普通值指标 选择 Time series chart 面板 Service Apdex 并单击 edit。\n  查询表达式\nservice_apdex {service='$service', layer='$layer'} / 10000 指标范围为 Service,添加 service 和 layer 标签用于匹配,label 值使用上面配置的变量。该计算 Divided by 10000 用于匹配结果单位。查询文档可以参考 这里。\n  设置 Query options --\u0026gt; Min interval = 1m,因为 SkyWalking 中的指标最小时间段是 1m。\n  设置 Connect null values --\u0026gt; AlwaysShow points --\u0026gt; Always,因为当查询间隔大于 1 小时或 1 天时,SkyWalking 返回小时 / 天步长指标值。\n  标签值指标 选择 Time series chart 面板 Service Response Time Percentile 并单击 edit。\n  查询表达式\nservice_percentile {service='$service', layer='$layer', labels='0,1,2,3,4', relabels='P50,P75,P90,P95,P99'} 指标范围为 Service,添加 service 和 layer 标签用于匹配,label 值使用上面配置的变量。添加 labels='0,1,2,3,4' 过滤结果标签,并添加 relabels='P50,P75,P90,P95,P99' 重命名结果标签。查询文档可以参考 这里。\n  设置 Query options --\u0026gt; Min interval = 1m,因为 SkyWalking 中的指标最小时间段是 1m。\n  设置 Connect null values --\u0026gt; AlwaysShow points --\u0026gt; Always,因为当查询间隔 \u0026gt; 1 小时或 1 天时,SkyWalking 返回小时 / 天步长指标值。\n  设置 Legend 为 {{label}} 来展示。\n  排序指标 选择 Time series chart 面板 Service Response Time Percentile 并单击 edit。\n  查询表达式\nservice_instance_cpm {parent_service='$service', layer='$layer', top_n='10', order='DES'} 该表达式用于查询服务下的排序指标,因此添加标签 parent_service 和 layer 进行匹配。添加 top_n='10' 和 order='DES' 过滤结果。查询文档可以参考 这里。\n  设置 Query options --\u0026gt; Min interval = 1m,因为 SkyWalking 中的指标最小时间段是 1m。\n  设置 Calculation --\u0026gt; Latest*。\n  设置 Legend 为 {{service_instance}} 来展示。\n  结论 在这篇文章中,我们介绍了 SkyWalking 中的 PromQL 服务是什么以及它的背景。详细介绍了 PromQL 服务的使用方法和 SkyWalking 相关的基本概念,展示了如何使用 PromQL 服务为 SkyWalking 构建 Grafana Dashboard。\n未来,将会有更多的集成利用这个协议,比如 CI/CD、HPA(缩放)等。\n","excerpt":"背景 Apache SkyWalking 作为分布式系统的应用性能监控工具,提供了对云原生架构下的分布式系统的监控、跟踪、诊断能力。Prometheus 是一个开源系统监控和警报工具包,具有活跃的生态 …","ref":"/zh/2023-03-17-build-grafana-dashboards-for-apache-skywalking-native-promql-support/","title":"为 Apache SkyWalking 构建 Grafana Dashboard —— 原生 PromQL 支持"},{"body":"Background Apache SkyWalking is an open-source application performance management system that helps users collect and aggregate logs, traces, metrics, and events, and display them on the UI. Starting from OAP 9.4.0, SkyWalking has added AWS Firehose receiver, which is used to receive and calculate the data of CloudWatch metrics. In this article, we will take DynamoDB as an example to show how to use SkyWalking to receive and calculate CloudWatch metrics data for monitoring Amazon Web Services.\nWhat are Amazon CloudWatch and Amazon Kinesis Data Firehose? Amazon CloudWatch is a metrics repository, this tool can collect raw data from AWS (e.g. DynamoDB) and process it into readable metrics in near real-time. Also, we can use Metric Stream to continuously stream CloudWatch metrics to a selected target location for near real-time delivery and low latency. SkyWalking takes advantage of this feature to create metric streams and direct them to Amazon Kinesis Data Firehose transport streams for further transport processing.\nAmazon Kinesis Data Firehoseis an extract, transform, and load (ETL) service that reliably captures, transforms, and delivers streaming data to data lakes, data stores, and analytics services. SkyWalking takes advantage of this feature to eventually direct the metrics stream to the aws-firehose-receiver for OAP to calculate and ultimately display the metrics.\nThe flow chart is as follows.\nNotice  Due to Kinesis Data Firehose specifications, the URL of the HTTP endpoint must use the HTTPS protocol and must use port 443. Also, this URL must be proxied by Gateway and forwarded to the real aws-firehose-receiver. The TLS certificate must be signed by a CA and the self-signed certificate will not be trusted by Kinesis Data Firehose.  Setting up DynamoDB monitoring Next, let\u0026rsquo;s take DynamoDB as an example to illustrate the necessary settings in aws before using OAP to collect CloudWatch metrics:\n Go to Kinesis Console, create a data stream, and select Direct PUT for Source and HTTP Endpoint for Destination. And set HTTP Endpoint URL to Gateway URL. The rest of the configuration options can be configured as needed.  Go to the CloudWatch Console, select Metrics-Stream in the left control panel, and click Create metric stream. Select AWS/DynamoDB for namespace. Also, you can add other namespaces as needed. Kinesis Data Firehose selects the data stream created in the first step. Finally, set the output format to opentelemetry0.7. The rest of the configuration options can be configured as needed.  At this point, the AWS side of DynamoDB monitoring configuration is set up.\nSkyWalking OAP metrics processing analysis SkyWalking uses aws-firehose-receiver to receive and decode AWS metrics streams forwarded by Gateway, and send it to Opentelemetry-receiver for processing and transforming into SkyWalking metrics. Then, the metrics are analyzed and aggregated by Meter Analysis Language (MAL) and finally presented on the UI.\nThe MAL part and the UI part of SkyWalking support users' customization, to display the metrics data in a more diversified way. For details, please refer to MAL doc and UI doc.\nTypical metrics analysis Scope In SkyWalking, there is the concept of scope. By using scopes, we can classify and aggregate metrics more rationally. In the monitoring of DynamoDB, two of these scopes are used - Service and Endpoint.\nService represents a set of workloads that provide the same behavior for incoming requests. Commonly used as cluster-level scopes for services, user accounts are closer to the concept of clusters in AWS. So SkyWalking uses AWS account id as a key to map AWS accounts to Service types.\nSimilarly, Endpoint represents a logical concept, often used in services for the path of incoming requests, such as HTTP URI path or gRPC service class + method signature, and can also represent the table structure in the database. So SkyWalking maps DynamoDB tables to Endpoint type.\nMetrics    Metric Name Meaning     AccountMaxReads / AccountMaxWrites The maximum number of read/write capacity units that can be used by an account.   AccountMaxTableLevelReads / AccountMaxTableLevelWrites The maximum number of read/write capacity units that can be used by a table or global secondary index of an account.   AccountProvisionedReadCapacityUtilization / AccountProvisionedWriteCapacityUtilization The percentage of provisioned read/write capacity units utilized by an account.   MaxProvisionedTableReadCapacityUtilization / MaxProvisionedTableWriteCapacityUtilization The percentage of provisioned read/write capacity utilized by the highest provisioned read table or global secondary index of an account.    Above are some common account metrics (Serivce scope). They are various configuration information in DynamoDB, and SkyWalking can show a complete picture of the database configuration changes by monitoring these metrics.\n   Metric Name Meaning     ConsumedReadCapacityUnits / ConsumedWriteCapacityUnits The number of read/write capacity units consumed over the specified time period.   ReturnedItemCount The number of items returned by Query, Scan or ExecuteStatement (select) operations during the specified time period.   SuccessfulRequestLatency The latency of successful requests to DynamoDB or Amazon DynamoDB Streams during the specified time period.   TimeToLiveDeletedItemCount The number of items deleted by Time to Live (TTL) during the specified time period.    The above are some common table metrics (Endpoint scope), which will also be aggregated into account metrics. These metrics are generally used to analyze the performance of the database, and users can use them to determine the reasonable level of database configuration. For example, users can track how much of their provisioned throughput is used through ConsumedReadCapicityUnits / ConsumedReadCapicityUnits to determine the reasonableness of the preconfigured throughput of a table or account. For more information about provisioned throughput, see Provisioned Throughput Intro.\n   Metric Name Meaning     UserErrors Requests to DynamoDB or Amazon DynamoDB Streams that generate an HTTP 400 status code during the specified time period.   SystemErrors The requests to DynamoDB or Amazon DynamoDB Streams that generate an HTTP 500 status code during the specified time period.   ThrottledRequests Requests to DynamoDB that exceed the provisioned throughput limits on a resource.   TransactionConflict Rejected item-level requests due to transactional conflicts between concurrent requests on the same items.    The above are some common error metrics, among which UserErrors are account-level metrics and the rest are table-level metrics. Users can set alarms on these metrics, and if warnings appear, then it may indicate that there are some problems with the use of the database, and users need to check and verify by themselves.\nNotice SkyWalking\u0026rsquo;s metrics selection for DynamoDB comes directly from CloudWatch metrics, which can also be found at CloudWatch metrics doc to get metrics details.\nDemo In this section, we will demonstrate how to use terraform to create a DynamoDB table and other AWS services that can generate metrics streams, and deploy Skywalking to complete the metrics collection.\nFirst, you need a running gateway instance, such as NGINX, which is responsible for receiving metrics streams from AWS and forwarding them to the aws-firehose-receiver. Note that the gateway needs to be configured with certificates to accept HTTPS protocol requests.\nBelow is an example configuration for NGINX. The configuration does not need to be identical, as long as it can send incoming HTTPS requests to oap host:12801/aws/firehose/metrics.\nserver { listen 443 ssl; ssl_certificate /crt/test.pem; ssl_certificate_key /crt/test.key; ssl_session_timeout 5m; ssl_ciphers ECDHE-RSA-AES128-GCM-SHA256:ECDHE:ECDH:AES:HIGH:!NULL:!aNULL:!MD5:!ADH:!RC4; ssl_protocols TLSv1 TLSv1.1 TLSv1.2; ssl_prefer_server_ciphers on; location /aws/firehose/metrics { proxy_pass http://test.xyz:12801/aws/firehose/metrics; } } Deploying SkyWalking There are various ways to deploy SkyWalking, and you can get them directly from the release page.\nOf course, if you are more comfortable with Kubernetes, you can also find the appropriate deployment method from SkyWalking-kubernetes.\nPlease note that no matter which deployment method you use, please make sure that the OAP and UI version is 9.4.0 or higher and that port 12801 needs to be open.\nThe following is an example of a deployment using the helm command.\nexport SKYWALKING_RELEASE_VERSION=4.3.0 export SKYWALKING_RELEASE_NAME=skywalking export SKYWALKING_RELEASE_NAMESPACE=default helm install \u0026quot;${SKYWALKING_RELEASE_NAME}\u0026quot; \\ oci://registry-1.docker.io/apache/skywalking-helm \\ --version \u0026quot;${SKYWALKING_RELEASE_VERSION}\u0026quot; \\ -n \u0026quot;${SKYWALKING_RELEASE_NAMESPACE}\u0026quot; \\ --set oap.image.tag=9.4.0 \\ --set oap.storageType=elasticsearch \\ --set ui.image.tag=9.4.0 \\ --set oap.ports.firehose=12801 Start the corresponding AWS service The terraform configuration file is as follows (example modified inTerraform Registry - kinesis_firehose_delivery_stream):\n terraform configuration file  provider \u0026quot;aws\u0026quot; { region = \u0026quot;ap-northeast-1\u0026quot; access_key = \u0026quot;[need change]your access_key\u0026quot; secret_key = \u0026quot;[need change]your secret_key\u0026quot; } resource \u0026quot;aws_dynamodb_table\u0026quot; \u0026quot;basic-dynamodb-table\u0026quot; { name = \u0026quot;GameScores\u0026quot; billing_mode = \u0026quot;PROVISIONED\u0026quot; read_capacity = 20 write_capacity = 20 hash_key = \u0026quot;UserId\u0026quot; range_key = \u0026quot;GameTitle\u0026quot; attribute { name = \u0026quot;UserId\u0026quot; type = \u0026quot;S\u0026quot; } attribute { name = \u0026quot;GameTitle\u0026quot; type = \u0026quot;S\u0026quot; } attribute { name = \u0026quot;TopScore\u0026quot; type = \u0026quot;N\u0026quot; } ttl { attribute_name = \u0026quot;TimeToExist\u0026quot; enabled = true } global_secondary_index { name = \u0026quot;GameTitleIndex\u0026quot; hash_key = \u0026quot;GameTitle\u0026quot; range_key = \u0026quot;TopScore\u0026quot; write_capacity = 10 read_capacity = 10 projection_type = \u0026quot;INCLUDE\u0026quot; non_key_attributes = [\u0026quot;UserId\u0026quot;] } tags = { Name = \u0026quot;dynamodb-table-1\u0026quot; Environment = \u0026quot;production\u0026quot; } } resource \u0026quot;aws_cloudwatch_metric_stream\u0026quot; \u0026quot;main\u0026quot; { name = \u0026quot;my-metric-stream\u0026quot; role_arn = aws_iam_role.metric_stream_to_firehose.arn firehose_arn = aws_kinesis_firehose_delivery_stream.http_stream.arn output_format = \u0026quot;opentelemetry0.7\u0026quot; include_filter { namespace = \u0026quot;AWS/DynamoDB\u0026quot; } } # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-metric-streams-trustpolicy.html data \u0026quot;aws_iam_policy_document\u0026quot; \u0026quot;streams_assume_role\u0026quot; { statement { effect = \u0026quot;Allow\u0026quot; principals { type = \u0026quot;Service\u0026quot; identifiers = [\u0026quot;streams.metrics.cloudwatch.amazonaws.com\u0026quot;] } actions = [\u0026quot;sts:AssumeRole\u0026quot;] } } resource \u0026quot;aws_iam_role\u0026quot; \u0026quot;metric_stream_to_firehose\u0026quot; { name = \u0026quot;metric_stream_to_firehose_role\u0026quot; assume_role_policy = data.aws_iam_policy_document.streams_assume_role.json } # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-metric-streams-trustpolicy.html data \u0026quot;aws_iam_policy_document\u0026quot; \u0026quot;metric_stream_to_firehose\u0026quot; { statement { effect = \u0026quot;Allow\u0026quot; actions = [ \u0026quot;firehose:PutRecord\u0026quot;, \u0026quot;firehose:PutRecordBatch\u0026quot;, ] resources = [aws_kinesis_firehose_delivery_stream.http_stream.arn] } } resource \u0026quot;aws_iam_role_policy\u0026quot; \u0026quot;metric_stream_to_firehose\u0026quot; { name = \u0026quot;default\u0026quot; role = aws_iam_role.metric_stream_to_firehose.id policy = data.aws_iam_policy_document.metric_stream_to_firehose.json } resource \u0026quot;aws_s3_bucket\u0026quot; \u0026quot;bucket\u0026quot; { bucket = \u0026quot;metric-stream-test-bucket\u0026quot; } resource \u0026quot;aws_s3_bucket_acl\u0026quot; \u0026quot;bucket_acl\u0026quot; { bucket = aws_s3_bucket.bucket.id acl = \u0026quot;private\u0026quot; } data \u0026quot;aws_iam_policy_document\u0026quot; \u0026quot;firehose_assume_role\u0026quot; { statement { effect = \u0026quot;Allow\u0026quot; principals { type = \u0026quot;Service\u0026quot; identifiers = [\u0026quot;firehose.amazonaws.com\u0026quot;] } actions = [\u0026quot;sts:AssumeRole\u0026quot;] } } resource \u0026quot;aws_iam_role\u0026quot; \u0026quot;firehose_to_s3\u0026quot; { assume_role_policy = data.aws_iam_policy_document.firehose_assume_role.json } data \u0026quot;aws_iam_policy_document\u0026quot; \u0026quot;firehose_to_s3\u0026quot; { statement { effect = \u0026quot;Allow\u0026quot; actions = [ \u0026quot;s3:AbortMultipartUpload\u0026quot;, \u0026quot;s3:GetBucketLocation\u0026quot;, \u0026quot;s3:GetObject\u0026quot;, \u0026quot;s3:ListBucket\u0026quot;, \u0026quot;s3:ListBucketMultipartUploads\u0026quot;, \u0026quot;s3:PutObject\u0026quot;, ] resources = [ aws_s3_bucket.bucket.arn, \u0026quot;${aws_s3_bucket.bucket.arn}/*\u0026quot;, ] } } resource \u0026quot;aws_iam_role_policy\u0026quot; \u0026quot;firehose_to_s3\u0026quot; { name = \u0026quot;default\u0026quot; role = aws_iam_role.firehose_to_s3.id policy = data.aws_iam_policy_document.firehose_to_s3.json } resource \u0026quot;aws_kinesis_firehose_delivery_stream\u0026quot; \u0026quot;http_stream\u0026quot; { name = \u0026quot;metric-stream-test-stream\u0026quot; destination = \u0026quot;http_endpoint\u0026quot; http_endpoint_configuration { name = \u0026quot;test_http_endpoint\u0026quot; url = \u0026quot;[need change]Gateway url\u0026quot; role_arn = aws_iam_role.firehose_to_s3.arn } s3_configuration { role_arn = aws_iam_role.firehose_to_s3.arn bucket_arn = aws_s3_bucket.bucket.arn } }  Steps to use.\n  Get the access_key and secret_key of the AWS account.( For how to get them, please refer to create-access-key )\n  Fill in the access_key and secret_key you got in the previous step, and fill in the corresponding URL of your gateway in the corresponding location of aws_kinesis_firehose_delivery_stream configuration.\n  Copy the above content and save it to the main.tf file.\n  Execute the following code in the corresponding path.\n  terraform init terraform apply At this point, all the required AWS services have been successfully created, and you can check your console to see if the services were successfully created.\nDone! If all the above steps were successful, please wait for about five minutes. After that, you can visit the SkyWalking UI to see the metrics.\nCurrently, the metrics collected by SkyWalking by default are displayed as follows.\naccount metrics:\ntable metrics:\nOther services Currently, SkyWalking officially supports EKS, S3, DynamoDB monitoring. Users also refer to the OpenTelemetry receiver to configure OTel rules to collect and analyze CloudWatch metrics of other AWS services and display them through a custom dashboard.\nMaterial  Monitoring S3 metrics with Amazon CloudWatch Monitoring DynamoDB metrics with Amazon CloudWatch Supported metrics in AWS Firehose receiver of OAP Configuration Vocabulary | Apache SkyWalking  ","excerpt":"Background Apache SkyWalking is an open-source application performance management system that helps …","ref":"/blog/2023-03-13-skywalking-aws-dynamodb/","title":"Monitoring DynamoDB with SkyWalking"},{"body":"背景 Apache SkyWalking 是一个开源应用性能管理系统,帮助用户收集和聚合日志、追踪、指标和事件,并在 UI 上显示。从 OAP 9.4.0 开始,SkyWalking 新增了 AWS Firehose receiver,用来接收,计算CloudWatch metrics的数据。本文将以DynamoDB为例,展示如何使用 SkyWalking接收并计算 CloudWatch metrics 数据,以监控Amazon Web Services。\n什么是 Amazon CloudWatch 与 Amazon Kinesis Data Firehose ? Amazon CloudWatch 是一个指标存储库, 此工具可从 AWS中 ( 如 DynamoDB ) 收集原始数据,近实时处理为可读取的指标。同时,我们也可以使用指标流持续地将 CloudWatch 指标流式传输到所选的目标位置,实现近实时传送和低延迟。SkyWalking 利用此特性,创建指标流并将其导向 Amazon Kinesis Data Firehose 传输流,并由后者进一步传输处理。\nAmazon Kinesis Data Firehose是一项提取、转换、加载服务,可以将流式处理数据以可靠方式捕获、转换和提供到数据湖、数据存储和分析服务中。SkyWalking利用此特性,将指标流最终导向 aws-firehose-receiver,交由OAP计算并最终展示指标。\n整体过程流程图如下:\n注意  由于 Kinesis Data Firehose 规定,HTTP端点的URL必须使用HTTPS协议,且必须使用443端口。同时,此URL必须由Gateway代理并转发到真正的aws-firehose-receiver。 TLS 证书必须由CA签发的,自签证书不会被 Kinesis Data Firehose 信任。  设置DynamoDB监控 接下来以DynamoDB为例说明使用OAP 收集CloudWatch metrics 前,aws中必要的设置:\n 进入 Kinesis 控制台,创建数据流, Source选择 Direct PUT, Destination 选择 HTTP Endpoint. 并且设置HTTP Endpoint URL 为 Gateway对应URL。 其余配置选项可由需要自行配置。  进入 CloudWatch 控制台,在左侧控制面板中选择Metrics-Stream,点击Create metric stream。其中,namespace 选择 AWS/DynamoDB。同时,根据需要,也可以增加其他命名空间。 Kinesis Data Firehose选择在第一步中创建好的数据流。最后,设置输出格式为opentelemetry0.7。其余配置选项可由需要自行配置。  至此,DynamoDB监控配置的AWS方面设置完成。\nSkyWalking OAP 指标处理分析 SkyWalking 利用 aws-firehose-receiver 接收并解码由Gateway转发来的 AWS 指标流,交由Opentelemetry-receiver进行处理,转化为SkyWalking metrics。并由Meter Analysis Language (MAL)进行指标的分析与聚合,最终呈现在UI上。\n其中 MAL 部分以及 UI 部分,SkyWalking支持用户自由定制,从而更多样性的展示指标数据。详情请参考MAL doc 以及 UI doc。\n典型指标分析 作用域 SkyWalking中,有作用域 ( scope ) 的概念。通过作用域, 我们可以对指标进行更合理的分类与聚合。在对DynamoDB的监控中,使用到了其中两种作用域———Service和Endpoint。\nService表示一组工作负荷,这些工作负荷为传入请求提供相同的行为。常用作服务的集群级别作用域,在AWS中,用户的账户更接近集群的概念。 所以SkyWalking将AWS account id作为key,将AWS账户映射为Service类型。\n同理,Endpoint表示一种逻辑概念,常用于服务中用于传入请求的路径,例如 HTTP URI 路径或 gRPC 服务类 + 方法签名,也可以表示数据库中的表结构。所以SkyWalking将DynamoDB表映射为Endpoint类型。\n指标    指标名称 含义     AccountMaxReads / AccountMaxWrites 账户可以使用的最大 读取/写入 容量单位数。   AccountMaxTableLevelReads / AccountMaxTableLevelWrites 账户的表或全局二级索引可以使用的最大 读取/写入 容量单位数。   AccountProvisionedReadCapacityUtilization / AccountProvisionedWriteCapacityUtilization 账户使用的预置 读取/写入 容量单位百分比。   MaxProvisionedTableReadCapacityUtilization / MaxProvisionedTableWriteCapacityUtilization 账户的最高预调配 读取/写入 表或全局二级索引使用的预调配读取容量单位百分比。    以上为一些常用的账户指标(Serivce 作用域)。它们是DynamoDB中的各种配置信息,SkyWalking通过对这些指标的监控,可以完整的展示出数据库配置的变动情况。\n   指标名称 含义     ConsumedReadCapacityUnits / ConsumedWriteCapacityUnits 指定时间段内占用的 读取/写入 容量单位数   ReturnedItemCount Query、Scan 或 ExecuteStatement(可选择)操作在指定时段内返回的项目数。   SuccessfulRequestLatency 指定时间段内对于 DynamoDB 或 Amazon DynamoDB Streams 的成功请求的延迟。   TimeToLiveDeletedItemCount 指定时间段内按存活时间 (TTL) 删除的项目数。    以上为一些常用的表指标(Endpoint作用域),它们也会被聚合到账户指标中。这些指标一般用于分析数据库的性能,用户可以通过它们判断出数据库配置的合理程度。例如,用户可以通过ConsumedReadCapicityUnits / ConsumedReadCapicityUnits,跟踪预置吞吐量的使用,从而判断表或账户的预制吞吐量的合理性。关于预置吞吐量,请参见读/写容量模式。\n   指标名称 含义     UserErrors 在指定时间段内生成 HTTP 400 状态代码的对 DynamoDB 或 Amazon DynamoDB Streams 的请求。HTTP 400 通常表示客户端错误,如参数组合无效,尝试更新不存在的表或请求签名错误。   SystemErrors 在指定的时间段内生成 HTTP 500 状态代码的对 DynamoDB 或 Amazon DynamoDB Streams 的请求。HTTP 500 通常指示内部服务错误。   ThrottledRequests 超出资源(如表或索引)预置吞吐量限制的 DynamoDB 请求。   TransactionConflict 由于同一项目的并发请求之间的事务性冲突而被拒绝的项目级请求。    以上为一些常用的错误指标,其中UserErrors为用户级别指标,其余为表级别指标。用户可以在这些指标上设置告警,如果警告出现,那么可能说明数据库的使用出现了一些问题,需要用户自行查看验证。\n注意 SkyWalking对于DynamoDB的指标选取直接来源于CloudWatch metrics, 您也可以通过CloudWatch metrics doc来获取指标详细信息。\nDemo 在本节中,我们将演示如何利用terraform创建一个DynamoDB表,以及可以产生指标流的其他AWS服务,并部署Skywalking完成指标收集。\n首先,您需要一个正在运行的网关实例,例如 NGINX,它负责接收AWS传来的指标流并且转发到aws-firehose-receiver。注意, 网关需要配置证书以便接受HTTPS协议的请求。\n下面是一个NGINX的示例配置。配置不要求完全一致,只要能将收到的HTTPS请求发送到oap所在host:12801/aws/firehose/metrics即可。\nserver { listen 443 ssl; ssl_certificate /crt/test.pem; ssl_certificate_key /crt/test.key; ssl_session_timeout 5m; ssl_ciphers ECDHE-RSA-AES128-GCM-SHA256:ECDHE:ECDH:AES:HIGH:!NULL:!aNULL:!MD5:!ADH:!RC4; ssl_protocols TLSv1 TLSv1.1 TLSv1.2; ssl_prefer_server_ciphers on; location /aws/firehose/metrics { proxy_pass http://test.xyz:12801/aws/firehose/metrics; } } 部署SkyWalking SkyWalking的部署方式有很多种,您可以直接从release页面中直接获取。\n当然,如果您更习惯于 Kubernetes,您也可以从SkyWalking-kubernetes找到相应部署方式。\n请注意,无论使用哪种部署方式,请确保OAP和UI的版本为9.4.0以上,并且需要开放12801端口。\n下面是一个使用helm指令部署的示例:\nexport SKYWALKING_RELEASE_VERSION=4.3.0 export SKYWALKING_RELEASE_NAME=skywalking export SKYWALKING_RELEASE_NAMESPACE=default helm install \u0026quot;${SKYWALKING_RELEASE_NAME}\u0026quot; \\ oci://registry-1.docker.io/apache/skywalking-helm \\ --version \u0026quot;${SKYWALKING_RELEASE_VERSION}\u0026quot; \\ -n \u0026quot;${SKYWALKING_RELEASE_NAMESPACE}\u0026quot; \\ --set oap.image.tag=9.4.0 \\ --set oap.storageType=elasticsearch \\ --set ui.image.tag=9.4.0 \\ --set oap.ports.firehose=12801 开启对应AWS服务 terraform 配置文件如下(实例修改于Terraform Registry - kinesis_firehose_delivery_stream):\n terraform 配置文件  provider \u0026quot;aws\u0026quot; { region = \u0026quot;ap-northeast-1\u0026quot; access_key = \u0026quot;在这里填入您的access_key\u0026quot; secret_key = \u0026quot;在这里填入您的secret_key\u0026quot; } resource \u0026quot;aws_dynamodb_table\u0026quot; \u0026quot;basic-dynamodb-table\u0026quot; { name = \u0026quot;GameScores\u0026quot; billing_mode = \u0026quot;PROVISIONED\u0026quot; read_capacity = 20 write_capacity = 20 hash_key = \u0026quot;UserId\u0026quot; range_key = \u0026quot;GameTitle\u0026quot; attribute { name = \u0026quot;UserId\u0026quot; type = \u0026quot;S\u0026quot; } attribute { name = \u0026quot;GameTitle\u0026quot; type = \u0026quot;S\u0026quot; } attribute { name = \u0026quot;TopScore\u0026quot; type = \u0026quot;N\u0026quot; } ttl { attribute_name = \u0026quot;TimeToExist\u0026quot; enabled = true } global_secondary_index { name = \u0026quot;GameTitleIndex\u0026quot; hash_key = \u0026quot;GameTitle\u0026quot; range_key = \u0026quot;TopScore\u0026quot; write_capacity = 10 read_capacity = 10 projection_type = \u0026quot;INCLUDE\u0026quot; non_key_attributes = [\u0026quot;UserId\u0026quot;] } tags = { Name = \u0026quot;dynamodb-table-1\u0026quot; Environment = \u0026quot;production\u0026quot; } } resource \u0026quot;aws_cloudwatch_metric_stream\u0026quot; \u0026quot;main\u0026quot; { name = \u0026quot;my-metric-stream\u0026quot; role_arn = aws_iam_role.metric_stream_to_firehose.arn firehose_arn = aws_kinesis_firehose_delivery_stream.http_stream.arn output_format = \u0026quot;opentelemetry0.7\u0026quot; include_filter { namespace = \u0026quot;AWS/DynamoDB\u0026quot; } } # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-metric-streams-trustpolicy.html data \u0026quot;aws_iam_policy_document\u0026quot; \u0026quot;streams_assume_role\u0026quot; { statement { effect = \u0026quot;Allow\u0026quot; principals { type = \u0026quot;Service\u0026quot; identifiers = [\u0026quot;streams.metrics.cloudwatch.amazonaws.com\u0026quot;] } actions = [\u0026quot;sts:AssumeRole\u0026quot;] } } resource \u0026quot;aws_iam_role\u0026quot; \u0026quot;metric_stream_to_firehose\u0026quot; { name = \u0026quot;metric_stream_to_firehose_role\u0026quot; assume_role_policy = data.aws_iam_policy_document.streams_assume_role.json } # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-metric-streams-trustpolicy.html data \u0026quot;aws_iam_policy_document\u0026quot; \u0026quot;metric_stream_to_firehose\u0026quot; { statement { effect = \u0026quot;Allow\u0026quot; actions = [ \u0026quot;firehose:PutRecord\u0026quot;, \u0026quot;firehose:PutRecordBatch\u0026quot;, ] resources = [aws_kinesis_firehose_delivery_stream.http_stream.arn] } } resource \u0026quot;aws_iam_role_policy\u0026quot; \u0026quot;metric_stream_to_firehose\u0026quot; { name = \u0026quot;default\u0026quot; role = aws_iam_role.metric_stream_to_firehose.id policy = data.aws_iam_policy_document.metric_stream_to_firehose.json } resource \u0026quot;aws_s3_bucket\u0026quot; \u0026quot;bucket\u0026quot; { bucket = \u0026quot;metric-stream-test-bucket\u0026quot; } resource \u0026quot;aws_s3_bucket_acl\u0026quot; \u0026quot;bucket_acl\u0026quot; { bucket = aws_s3_bucket.bucket.id acl = \u0026quot;private\u0026quot; } data \u0026quot;aws_iam_policy_document\u0026quot; \u0026quot;firehose_assume_role\u0026quot; { statement { effect = \u0026quot;Allow\u0026quot; principals { type = \u0026quot;Service\u0026quot; identifiers = [\u0026quot;firehose.amazonaws.com\u0026quot;] } actions = [\u0026quot;sts:AssumeRole\u0026quot;] } } resource \u0026quot;aws_iam_role\u0026quot; \u0026quot;firehose_to_s3\u0026quot; { assume_role_policy = data.aws_iam_policy_document.firehose_assume_role.json } data \u0026quot;aws_iam_policy_document\u0026quot; \u0026quot;firehose_to_s3\u0026quot; { statement { effect = \u0026quot;Allow\u0026quot; actions = [ \u0026quot;s3:AbortMultipartUpload\u0026quot;, \u0026quot;s3:GetBucketLocation\u0026quot;, \u0026quot;s3:GetObject\u0026quot;, \u0026quot;s3:ListBucket\u0026quot;, \u0026quot;s3:ListBucketMultipartUploads\u0026quot;, \u0026quot;s3:PutObject\u0026quot;, ] resources = [ aws_s3_bucket.bucket.arn, \u0026quot;${aws_s3_bucket.bucket.arn}/*\u0026quot;, ] } } resource \u0026quot;aws_iam_role_policy\u0026quot; \u0026quot;firehose_to_s3\u0026quot; { name = \u0026quot;default\u0026quot; role = aws_iam_role.firehose_to_s3.id policy = data.aws_iam_policy_document.firehose_to_s3.json } resource \u0026quot;aws_kinesis_firehose_delivery_stream\u0026quot; \u0026quot;http_stream\u0026quot; { name = \u0026quot;metric-stream-test-stream\u0026quot; destination = \u0026quot;http_endpoint\u0026quot; http_endpoint_configuration { name = \u0026quot;test_http_endpoint\u0026quot; url = \u0026quot;这里填入Gateway的url\u0026quot; role_arn = aws_iam_role.firehose_to_s3.arn } s3_configuration { role_arn = aws_iam_role.firehose_to_s3.arn bucket_arn = aws_s3_bucket.bucket.arn } }  使用步骤:\n1.获取AWS账户的access_key以及secret_key。( 关于如何获取,请参考:create-access-key )\n2.将上一步中获取的access_key与secret_key填入对应位置,并将您的网关对应 url 填入 aws_kinesis_firehose_delivery_stream 配置的对应位置中。\n3.复制以上内容并保存到main.tf文件中。\n4.在对应路径下执行以下代码。\nterraform init terraform apply 至此,需要的AWS服务已全部建立成功,您可以检查您的控制台,查看服务是否成功创建。\n完成! 如果以上步骤全部成功,请耐心等待约五分钟。之后您可以访问SkyWalking UI,查看指标变动情况\n目前,SkyWalking 默认收集的指标展示如下:\n账户指标:\n表指标:\n现已支持的服务 目前SkyWalking官方支持EKS,S3,DynamoDB监控。 用户也参考 OpenTelemetry receiver 配置OTEL rules来收集,计算AWS其他服务的CloudWatch metrics,并且通过自定义dashboard展示。\n相关的资料  Monitoring S3 metrics with Amazon CloudWatch Monitoring DynamoDB metrics with Amazon CloudWatch Supported metrics in AWS Firehose receiver of OAP Configuration Vocabulary | Apache SkyWalking  ","excerpt":"背景 Apache SkyWalking 是一个开源应用性能管理系统,帮助用户收集和聚合日志、追踪、指标和事件,并在 UI 上显示。从 OAP 9.4.0 开始,SkyWalking 新增了 AWS …","ref":"/zh/2023-03-13-skywalking-aws-dynamodb/","title":"使用SkyWalking监控DynamoDB"},{"body":"SKyWalking OAP\u0026rsquo;s existing OpenTelemetry receiver can receive metrics through the OTLP protocol, and use MAL to analyze related metrics in real time. Starting from OAP 9.4.0, SkyWalking has added an AWS Firehose receiver to receive and analyze CloudWatch metrics data. This article will take EKS and S3 as examples to introduce the process of SkyWalking OAP receiving and analyzing the indicator data of AWS services.\nEKS OpenTelemetry Collector OpenTelemetry (OTel) is a series of tools, APIs, and SDKs that can generate, collect, and export telemetry data, such as metrics, logs, and traces. OTel Collector is mainly responsible for collecting, processing, and exporting. For telemetry data, Collector consists of the following main components:\n Receiver: Responsible for obtaining telemetry data, different receivers support different data sources, such as prometheus, kafka, otlp. Processor: Process data between receiver and exporter, such as adding or deleting attributes. Exporter: Responsible for sending data to different backends, such as kafka, SkyWalking OAP (via OTLP). Service: Components enabled as a unit configuration, only configured components will be enabled.  OpenTelemetry Protocol Specification(OTLP) OTLP mainly describes how to receive (pull) indicator data through gRPC and HTTP protocols. The OpenTelemetry receiver of SKyWalking OAP implements the OTLP/gRPC protocol, and the indicator data can be exported to OAP through the OTLP/gRPC exporter. Usually the data flow of a Collector is as follows:\nMonitor EKS with OTel EKS monitoring is realized through OTel. You only need to deploy OpenTelemetry Collector in the EKS cluster in the way of DaemonSet  \u0026ndash; use AWS Container Insights Receiver as the receiver, and set the address of otlp exporter to the address of OAP. In addition, it should be noted that OAP is used job_name : aws-cloud-eks-monitoring as the identifier of EKS metrics according to the attribute, so it is necessary to configure a processor in the collector to add this attribute.\nOTel Collector configuration demo extensions:health_check:receivers:awscontainerinsightreceiver:processors:# To enable OAP to correctly identify EKS metrics, add the job_name attributeresource/job-name:attributes:- key:job_name value:aws-cloud-eks-monitoringaction:insert # Specify OAP as exportersexporters:otlp:endpoint:oap-service:11800 tls:insecure:truelogging:loglevel:debug service:pipelines:metrics:receivers:[awscontainerinsightreceiver]processors:[resource/job-name]exporters:[otlp,logging]extensions:[health_check]By default, SkyWalking OAP counts the network, disk, CPU and other related indicator data in the three dimensions of Node, Pod, and Service. Only part of the content is shown here.\nPod dimensions Service dimensions EKS monitoring complete configuration  Click here to view complete k8s resource configuration  apiVersion:v1kind:ServiceAccountmetadata:name:aws-otel-sanamespace:aws-otel-eks---kind:ClusterRoleapiVersion:rbac.authorization.k8s.io/v1metadata:name:aoc-agent-rolerules:- apiGroups:[\u0026#34;\u0026#34;]resources:[\u0026#34;pods\u0026#34;,\u0026#34;nodes\u0026#34;,\u0026#34;endpoints\u0026#34;]verbs:[\u0026#34;list\u0026#34;,\u0026#34;watch\u0026#34;]- apiGroups:[\u0026#34;apps\u0026#34;]resources:[\u0026#34;replicasets\u0026#34;]verbs:[\u0026#34;list\u0026#34;,\u0026#34;watch\u0026#34;]- apiGroups:[\u0026#34;batch\u0026#34;]resources:[\u0026#34;jobs\u0026#34;]verbs:[\u0026#34;list\u0026#34;,\u0026#34;watch\u0026#34;]- apiGroups:[\u0026#34;\u0026#34;]resources:[\u0026#34;nodes/proxy\u0026#34;]verbs:[\u0026#34;get\u0026#34;]- apiGroups:[\u0026#34;\u0026#34;]resources:[\u0026#34;nodes/stats\u0026#34;,\u0026#34;configmaps\u0026#34;,\u0026#34;events\u0026#34;]verbs:[\u0026#34;create\u0026#34;,\u0026#34;get\u0026#34;]- apiGroups:[\u0026#34;\u0026#34;]resources:[\u0026#34;configmaps\u0026#34;]resourceNames:[\u0026#34;otel-container-insight-clusterleader\u0026#34;]verbs:[\u0026#34;get\u0026#34;,\u0026#34;update\u0026#34;]- apiGroups:[\u0026#34;coordination.k8s.io\u0026#34;]resources:[\u0026#34;leases\u0026#34;]verbs:[\u0026#34;create\u0026#34;,\u0026#34;get\u0026#34;,\u0026#34;update\u0026#34;]---kind:ClusterRoleBindingapiVersion:rbac.authorization.k8s.io/v1metadata:name:aoc-agent-role-bindingsubjects:- kind:ServiceAccountname:aws-otel-sanamespace:aws-otel-eksroleRef:kind:ClusterRolename:aoc-agent-roleapiGroup:rbac.authorization.k8s.io---apiVersion:v1kind:ConfigMapmetadata:name:otel-agent-confnamespace:aws-otel-ekslabels:app:opentelemetrycomponent:otel-agent-confdata:otel-agent-config:|extensions: health_check: receivers: awscontainerinsightreceiver: processors: resource/job-name: attributes: - key: job_name value: aws-cloud-eks-monitoring action: insert exporters: otlp: endpoint: oap-service:11800 tls: insecure: true logging: loglevel: debug service: pipelines: metrics: receivers: [awscontainerinsightreceiver] processors: [resource/job-name] exporters: [otlp,logging] extensions: [health_check]---apiVersion:apps/v1kind:DaemonSetmetadata:name:aws-otel-eks-cinamespace:aws-otel-eksspec:selector:matchLabels:name:aws-otel-eks-citemplate:metadata:labels:name:aws-otel-eks-cispec:containers:- name:aws-otel-collectorimage:amazon/aws-otel-collector:v0.23.0env:# Specify region- name:AWS_REGIONvalue:\u0026#34;ap-northeast-1\u0026#34;- name:K8S_NODE_NAMEvalueFrom:fieldRef:fieldPath:spec.nodeName- name:HOST_IPvalueFrom:fieldRef:fieldPath:status.hostIP- name:HOST_NAMEvalueFrom:fieldRef:fieldPath:spec.nodeName- name:K8S_NAMESPACEvalueFrom:fieldRef:fieldPath:metadata.namespaceimagePullPolicy:Alwayscommand:- \u0026#34;/awscollector\u0026#34;- \u0026#34;--config=/conf/otel-agent-config.yaml\u0026#34;volumeMounts:- name:rootfsmountPath:/rootfsreadOnly:true- name:dockersockmountPath:/var/run/docker.sockreadOnly:true- name:varlibdockermountPath:/var/lib/dockerreadOnly:true- name:containerdsockmountPath:/run/containerd/containerd.sockreadOnly:true- name:sysmountPath:/sysreadOnly:true- name:devdiskmountPath:/dev/diskreadOnly:true- name:otel-agent-config-volmountPath:/conf- name:otel-output-vol mountPath:/otel-outputresources:limits:cpu:200mmemory:200Mirequests:cpu:200mmemory:200Mivolumes:- configMap:name:otel-agent-confitems:- key:otel-agent-configpath:otel-agent-config.yamlname:otel-agent-config-vol- name:rootfshostPath:path:/- name:dockersockhostPath:path:/var/run/docker.sock- name:varlibdockerhostPath:path:/var/lib/docker- name:containerdsockhostPath:path:/run/containerd/containerd.sock- name:syshostPath:path:/sys- name:devdiskhostPath:path:/dev/disk/- name:otel-output-vol hostPath:path:/otel-outputserviceAccountName:aws-otel-sa S3 Amazon CloudWatch Amazon CloudWatch is a monitoring service provided by AWS. It is responsible for collecting indicator data of AWS services and resources. CloudWatch metrics stream is responsible for converting indicator data into stream processing data, and supports output in two formats: json and OTel v0.7.0.\nAmazon Kinesis Data Firehose (Firehose) Firehose is an extract, transform, load (ETL) service that reliably captures, transforms, and serves streaming data into data lakes, data stores (such as S3), and analytics services.\nTo ensure that external services can correctly receive indicator data, AWS provides Kinesis Data Firehose HTTP Endpoint Delivery Request and Response Specifications (Firehose Specifications) . Firhose pushes Json data by POST\nJson data example { \u0026#34;requestId\u0026#34;: \u0026#34;ed4acda5-034f-9f42-bba1-f29aea6d7d8f\u0026#34;, \u0026#34;timestamp\u0026#34;: 1578090901599 \u0026#34;records\u0026#34;: [ { \u0026#34;data\u0026#34;: \u0026#34;aGVsbG8=\u0026#34; }, { \u0026#34;data\u0026#34;: \u0026#34;aGVsbG8gd29ybGQ=\u0026#34; } ] }  requestId: Request id, which can achieve deduplication and debugging purposes. timestamp: Firehose generated the timestamp of the request (in milliseconds). records: Actual delivery records  data: The delivered data, encoded in base64, can be in json or OTel v0.7.0 format, depending on the format of CloudWatch data (described later). Skywalking currently supports OTel v0.7.0 format.    aws-firehose-receiver aws-firehose-receiver provides an HTTP Endpoint that implements Firehose Specifications: /aws/firehose/metrics. The figure below shows the data flow of monitoring DynamoDB, S3 and other services through CloudWatch, and using Firehose to send indicator data to SKywalking OAP.\nStep-by-step setup of S3 monitoring  Enter the S3 console and create a filter forRequest metrics: Amazon S3 \u0026gt;\u0026gt; Buckets \u0026gt;\u0026gt; (Your Bucket) \u0026gt;\u0026gt; Metrics \u0026gt;\u0026gt; metrics \u0026gt;\u0026gt; View additional charts \u0026gt;\u0026gt; Request metrics  Enter the Amazon Kinesis console, create a delivery stream, Source select Direct PUT, Destination select HTTP Endpoint. And set HTTP endpoint URL to https://your_domain/aws/firehose/metrics. Other configuration items:   Buffer hints: Set the size and period of the cache Access key just matches the AccessKey in aws-firehose-receiver Retry duration: Retry period Backup settings: Backup settings, optionally backup the posted data to S3 at the same time.  Enter the CloudWatch console Streams and click Create CloudWatch Stream. And Select your Kinesis Data Firehose stream configure the delivery stream created in the second step in the item. Note that it needs to be set Change output format to OpenTelemetry v0.7.0.  At this point, the S3 monitoring configuration settings are complete. The S3 metrics currently collected by SkyWalking by default are shown below:\nOther service Currently SkyWalking officially supports EKS, S3, DynamoDB monitoring. Users also refer to the OpenTelemetry receiver to configure OTel rules to collect and analyze CloudWatch metrics of other AWS services, and display them through a custom dashboard.\nMaterial  Monitoring S3 metrics with Amazon CloudWatch Monitoring DynamoDB metrics with Amazon CloudWatch Supported metrics in AWS Firehose receiver of OAP Configuration Vocabulary | Apache SkyWalking  ","excerpt":"SKyWalking OAP\u0026rsquo;s existing OpenTelemetry receiver can receive metrics through the OTLP …","ref":"/blog/2023-03-12-skywalking-aws-s3-eks/","title":"Monitoring AWS EKS and S3 with SkyWalking"},{"body":"SKyWalking OAP 现有的 OpenTelemetry receiver 可以通过OTLP协议接收指标(metrics),并且使用MAL实时分析相关指标。从OAP 9.4.0开始,SkyWalking 新增了AWS Firehose receiver,用来接收,分析CloudWatch metrics数据。本文将以EKS和S3为例介绍SkyWalking OAP 接收,分析 AWS 服务的指标数据的过程\nEKS OpenTelemetry Collector OpenTelemetry (OTel) 是一系列tools,API,SDK,可以生成,收集,导出遥测数据,比如 指标(metrics),日志(logs)和链路信息(traces),而OTel Collector主要负责收集、处理和导出遥测数据,Collector由以下主要组件组成:\n receiver: 负责获取遥测数据,不同的receiver支持不同的数据源,比如prometheus ,kafka,otlp, processor:在receiver和exporter之间处理数据,比如增加或者删除attributes, exporter:负责发送数据到不同的后端,比如kafka,SkyWalking OAP(通过OTLP) service: 作为一个单元配置启用的组件,只有配置的组件才会被启用  OpenTelemetry Protocol Specification(OTLP) OTLP 主要描述了如何通过gRPC,HTTP协议接收(拉取)指标数据。SKyWalking OAP的 OpenTelemetry receiver 实现了OTLP/gRPC协议,通过OTLP/gRPC exporter可以将指标数据导出到OAP。通常一个Collector的数据流向如下:\n使用OTel监控EKS EKS的监控就是通过OTel实现的,只需在EKS集群中以DaemonSet  的方式部署 OpenTelemetry Collector,使用 AWS Container Insights Receiver 作为receiver,并且设置otlp exporter的地址为OAP的的地址即可。另外需要注意的是OAP根据attribute job_name : aws-cloud-eks-monitoring 作为EKS metrics的标识,所以还需要再collector中配置一个processor来增加这个属性\nOTel Collector配置demo extensions:health_check:receivers:awscontainerinsightreceiver:processors:# 为了OAP能够正确识别EKS metrics,增加job_name attributeresource/job-name:attributes:- key:job_name value:aws-cloud-eks-monitoringaction:insert # 指定OAP作为 exportersexporters:otlp:endpoint:oap-service:11800 tls:insecure:truelogging:loglevel:debug service:pipelines:metrics:receivers:[awscontainerinsightreceiver]processors:[resource/job-name]exporters:[otlp,logging]extensions:[health_check]SkyWalking OAP 默认统计 Node,Pod,Service 三个维度的网络、磁盘、CPU等相关的指标数据,这里仅展示了部分内容\nPod 维度 Service 维度 EKS监控完整配置  Click here to view complete k8s resource configuration  apiVersion:v1kind:ServiceAccountmetadata:name:aws-otel-sanamespace:aws-otel-eks---kind:ClusterRoleapiVersion:rbac.authorization.k8s.io/v1metadata:name:aoc-agent-rolerules:- apiGroups:[\u0026#34;\u0026#34;]resources:[\u0026#34;pods\u0026#34;,\u0026#34;nodes\u0026#34;,\u0026#34;endpoints\u0026#34;]verbs:[\u0026#34;list\u0026#34;,\u0026#34;watch\u0026#34;]- apiGroups:[\u0026#34;apps\u0026#34;]resources:[\u0026#34;replicasets\u0026#34;]verbs:[\u0026#34;list\u0026#34;,\u0026#34;watch\u0026#34;]- apiGroups:[\u0026#34;batch\u0026#34;]resources:[\u0026#34;jobs\u0026#34;]verbs:[\u0026#34;list\u0026#34;,\u0026#34;watch\u0026#34;]- apiGroups:[\u0026#34;\u0026#34;]resources:[\u0026#34;nodes/proxy\u0026#34;]verbs:[\u0026#34;get\u0026#34;]- apiGroups:[\u0026#34;\u0026#34;]resources:[\u0026#34;nodes/stats\u0026#34;,\u0026#34;configmaps\u0026#34;,\u0026#34;events\u0026#34;]verbs:[\u0026#34;create\u0026#34;,\u0026#34;get\u0026#34;]- apiGroups:[\u0026#34;\u0026#34;]resources:[\u0026#34;configmaps\u0026#34;]resourceNames:[\u0026#34;otel-container-insight-clusterleader\u0026#34;]verbs:[\u0026#34;get\u0026#34;,\u0026#34;update\u0026#34;]- apiGroups:[\u0026#34;coordination.k8s.io\u0026#34;]resources:[\u0026#34;leases\u0026#34;]verbs:[\u0026#34;create\u0026#34;,\u0026#34;get\u0026#34;,\u0026#34;update\u0026#34;]---kind:ClusterRoleBindingapiVersion:rbac.authorization.k8s.io/v1metadata:name:aoc-agent-role-bindingsubjects:- kind:ServiceAccountname:aws-otel-sanamespace:aws-otel-eksroleRef:kind:ClusterRolename:aoc-agent-roleapiGroup:rbac.authorization.k8s.io---apiVersion:v1kind:ConfigMapmetadata:name:otel-agent-confnamespace:aws-otel-ekslabels:app:opentelemetrycomponent:otel-agent-confdata:otel-agent-config:|extensions: health_check: receivers: awscontainerinsightreceiver: processors: resource/job-name: attributes: - key: job_name value: aws-cloud-eks-monitoring action: insert exporters: otlp: endpoint: oap-service:11800 tls: insecure: true logging: loglevel: debug service: pipelines: metrics: receivers: [awscontainerinsightreceiver] processors: [resource/job-name] exporters: [otlp,logging] extensions: [health_check]---apiVersion:apps/v1kind:DaemonSetmetadata:name:aws-otel-eks-cinamespace:aws-otel-eksspec:selector:matchLabels:name:aws-otel-eks-citemplate:metadata:labels:name:aws-otel-eks-cispec:containers:- name:aws-otel-collectorimage:amazon/aws-otel-collector:v0.23.0env:# Specify region- name:AWS_REGIONvalue:\u0026#34;ap-northeast-1\u0026#34;- name:K8S_NODE_NAMEvalueFrom:fieldRef:fieldPath:spec.nodeName- name:HOST_IPvalueFrom:fieldRef:fieldPath:status.hostIP- name:HOST_NAMEvalueFrom:fieldRef:fieldPath:spec.nodeName- name:K8S_NAMESPACEvalueFrom:fieldRef:fieldPath:metadata.namespaceimagePullPolicy:Alwayscommand:- \u0026#34;/awscollector\u0026#34;- \u0026#34;--config=/conf/otel-agent-config.yaml\u0026#34;volumeMounts:- name:rootfsmountPath:/rootfsreadOnly:true- name:dockersockmountPath:/var/run/docker.sockreadOnly:true- name:varlibdockermountPath:/var/lib/dockerreadOnly:true- name:containerdsockmountPath:/run/containerd/containerd.sockreadOnly:true- name:sysmountPath:/sysreadOnly:true- name:devdiskmountPath:/dev/diskreadOnly:true- name:otel-agent-config-volmountPath:/conf- name:otel-output-vol mountPath:/otel-outputresources:limits:cpu:200mmemory:200Mirequests:cpu:200mmemory:200Mivolumes:- configMap:name:otel-agent-confitems:- key:otel-agent-configpath:otel-agent-config.yamlname:otel-agent-config-vol- name:rootfshostPath:path:/- name:dockersockhostPath:path:/var/run/docker.sock- name:varlibdockerhostPath:path:/var/lib/docker- name:containerdsockhostPath:path:/run/containerd/containerd.sock- name:syshostPath:path:/sys- name:devdiskhostPath:path:/dev/disk/- name:otel-output-vol hostPath:path:/otel-outputserviceAccountName:aws-otel-sa S3 Amazon CloudWatch Amazon CloudWatch 是AWS提供的监控服务,负责收集AWS 服务,资源的指标数据,CloudWatch metrics stream 负责将指标数据转换为流式处理数据,支持输出json,OTel v0.7.0 两种格式。\nAmazon Kinesis Data Firehose (Firehose) Firehose 是一项提取、转换、加载(ETL)服务,可以将流式处理数据以可靠方式捕获、转换和提供到数据湖、数据存储(比如S3)和分析服务中。\n为了确保外部服务能够正确地接收指标数据, AWS提供了 Kinesis Data Firehose HTTP Endpoint Delivery Request and Response Specifications (Firehose Specifications)。Firhose以POST的方式推送Json数据\nJson数据示例 { \u0026#34;requestId\u0026#34;: \u0026#34;ed4acda5-034f-9f42-bba1-f29aea6d7d8f\u0026#34;, \u0026#34;timestamp\u0026#34;: 1578090901599 \u0026#34;records\u0026#34;: [ { \u0026#34;data\u0026#34;: \u0026#34;aGVsbG8=\u0026#34; }, { \u0026#34;data\u0026#34;: \u0026#34;aGVsbG8gd29ybGQ=\u0026#34; } ] }  requestId: 请求id,可以实现去重,debug目的 timestamp: Firehose 产生该请求的时间戳(毫秒) records: 实际投递的记录  data: 投递的数据,以base64编码数据,可以是json或者OTel v0.7.0格式,取决于CloudWatch数据数据的格式(稍后会有描述)。Skywalking目前支持OTel v0.7.0格式    aws-firehose-receiver aws-firehose-receiver 就是提供了一个实现了Firehose Specifications的HTTP Endpoint:/aws/firehose/metrics。下图展示了通过CloudWatch监控DynamoDB,S3等服务,并利用Firehose将指标数据发送到SKywalking OAP的数据流向\n从上图可以看到 aws-firehose-receiver 将数据转换后交由 OpenTelemetry-receiver处理 ,所以 OpenTelemetry receiver 中配置的 otel-rules 同样可以适用CloudWatch metrics\n注意  因为 Kinesis Data Firehose 要求,必须在AWS Firehose receiver 前放置一个Gateway用来建立HTTPS链接。aws-firehose-receiver 将从v9.5.0开始支持HTTPS协议 TLS 证书必须是CA签发的  逐步设置S3监控  进入 S3控制台,通过 Amazon S3 \u0026gt;\u0026gt; Buckets \u0026gt;\u0026gt; (Your Bucket) \u0026gt;\u0026gt; Metrics \u0026gt;\u0026gt; metrics \u0026gt;\u0026gt; View additional charts \u0026gt;\u0026gt; Request metrics 为 Request metrics 创建filter  进入Amazon Kinesis 控制台,创建一个delivery stream, Source选择 Direct PUT, Destination 选择 HTTP Endpoint. 并且设置HTTP endpoint URL 为 https://your_domain/aws/firehose/metrics。其他配置项:  Buffer hints: 设置缓存的大小和周期 Access key 与aws-firehose-receiver中的AccessKey一致即可 Retry duration: 重试周期 Backup settings: 备份设置,可选地将投递的数据同时备份到S3。    进入 CloudWatch控制台,Streams 标签创建CloudWatch Stream。并且在Select your Kinesis Data Firehose stream项中配置第二步创建的delivery stream。注意需要设置Change output format 为 OpenTelemetry v0.7.0。  至此,S3监控配置设置完成。目前SkyWalking默认收集的S3 metrics 展示如下\n其他服务 目前SkyWalking官方支持EKS,S3,DynamoDB监控。 用户也参考 OpenTelemetry receiver 配置OTel rules来收集,分析AWS其他服务的CloudWatch metrics,并且通过自定义dashboard展示\n资料  Monitoring S3 metrics with Amazon CloudWatch Monitoring DynamoDB metrics with Amazon CloudWatch Supported metrics in AWS Firehose receiver of OAP Configuration Vocabulary | Apache SkyWalking  ","excerpt":"SKyWalking OAP 现有的 OpenTelemetry receiver 可以通过OTLP协议接收指标(metrics),并且使用MAL实时分析相关指标。从OAP 9.4.0开 …","ref":"/zh/2023-03-12-skywalking-aws-s3-eks/","title":"使用SkyWalking监控AWS EKS和S3"},{"body":"SkyWalking Rust 0.6.0 is released. Go to downloads page to find release tars.\nWhat\u0026rsquo;s Changed  Refactor span object api to make it more friendly. by @jmjoy in https://github.com/apache/skywalking-rust/pull/52 Refactor management report and keep alive api. by @jmjoy in https://github.com/apache/skywalking-rust/pull/53 Use stream and completed for a bulk to collect for grpc reporter. by @jmjoy in https://github.com/apache/skywalking-rust/pull/54 Add sub components licenses in dist material. by @jmjoy in https://github.com/apache/skywalking-rust/pull/55 Bump to 0.6.0. by @jmjoy in https://github.com/apache/skywalking-rust/pull/56  ","excerpt":"SkyWalking Rust 0.6.0 is released. Go to downloads page to find release tars.\nWhat\u0026rsquo;s Changed …","ref":"/events/release-apache-skywalking-rust-0-6-0/","title":"Release Apache SkyWalking Rust 0.6.0"},{"body":"SkyWalking 9.4.0 is released. Go to downloads page to find release tars.\nPromQL and Grafana Support Zipkin Lens UI Bundled AWS S3 and DynamoDB monitoring Project  Bump up Zipkin and Zipkin lens UI dependency to 2.24.0. Bump up Apache parent pom version to 29. Bump up Armeria version to 1.21.0. Clean up maven pom.xmls. Bump up Java version to 11. Bump up snakeyaml to 2.0.  OAP Server  Add ServerStatusService in the core module to provide a new way to expose booting status to other modules. Adds Micrometer as a new component.(ID=141) Refactor session cache in MetricsPersistentWorker. Cache enhancement - don\u0026rsquo;t read new metrics from database in minute dimensionality.   // When // (1) the time bucket of the server's latest stability status is provided // 1.1 the OAP has booted successfully // 1.2 the current dimensionality is in minute. // 1.3 the OAP cluster is rebalanced due to scaling // (2) the metrics are from the time after the timeOfLatestStabilitySts // (3) the metrics don't exist in the cache // the kernel should NOT try to load it from the database. // // Notice, about condition (2), // for the specific minute of booted successfully, the metrics are expected to load from database when // it doesn't exist in the cache.  Remove the offset of metric session timeout according to worker creation sequence. Correct MetricsExtension annotations declarations in manual entities. Support component IDs' priority in process relation metrics. Remove abandon logic in MergableBufferedData, which caused unexpected no-update. Fix miss set LastUpdateTimestamp that caused the metrics session to expire. Rename MAL rule spring-sleuth.yaml to spring-micrometer.yaml. Fix memory leak in Zipkin API. Remove the dependency of refresh_interval of ElasticSearch indices from elasticsearch/flushInterval config. Now, it uses core/persistentPeriod + 5s as refresh_interval for all indices instead. Change elasticsearch/flushInterval to 5s(was 15s). Optimize flushInterval of ElasticSearch BulkProcessor to avoid extra periodical flush in the continuous bulk streams. An unexpected dot is added when exp is a pure metric name and expPrefix != null. Support monitoring MariaDB. Remove measure/stream specific interval settings in BanyanDB. Add global-specific settings used to override global configurations (e.g segmentIntervalDays, blockIntervalHours) in BanyanDB. Use TTL-driven interval settings for the measure-default group in BanyanDB. Fix wrong group of non time-relative metadata in BanyanDB. Refactor StorageData#id to the new StorageID object from a String type. Support multiple component IDs in the service topology level. Add ElasticSearch.Keyword annotation to declare the target field type as keyword. [Breaking Change] Column component_id of service_relation_client_side and service_relation_server_side have been replaced by component_ids. Support priority definition in the component-libraries.yml. Enhance service topology query. When there are multiple components detected from the server side, the component type of the node would be determined by the priority, which was random in the previous release. Remove component_id from service_instance_relation_client_side and service_instance_relation_server_side. Make the satellite E2E test more stable. Add Istio 1.16 to test matrix. Register ValueColumn as Tag for Record in BanyanDB storage plugin. Bump up Netty to 4.1.86. Remove unnecessary additional columns when storage is in logical sharding mode. The cluster coordinator support watch mechanism for notifying RemoteClientManager and ServerStatusService. Fix ServiceMeshServiceDispatcher overwrite ServiceDispatcher debug file when open SW_OAL_ENGINE_DEBUG. Use groupBy and in operators to optimize topology query for BanyanDB storage plugin. Support server status watcher for MetricsPersistentWorker to check the metrics whether required initialization. Fix the meter value are not correct when using sumPerMinLabeld or sumHistogramPercentile MAL function. Fix cannot display attached events when using Zipkin Lens UI query traces. Remove time_bucket for both Stream and Measure kinds in BanyanDB plugin. Merge TIME_BUCKET of Metrics and Record into StorageData. Support no layer in the listServices query. Fix time_bucket of ServiceTraffic not set correctly in slowSql of MAL. Correct the TopN record query DAO of BanyanDB. Tweak interval settings of BanyanDB. Support monitoring AWS Cloud EKS. Bump BanyanDB Java client to 0.3.0-rc1. Remove id tag from measures. Add Banyandb.MeasureField to mark a column as a BanyanDB Measure field. Add BanyanDB.StoreIDTag to store a process\u0026rsquo;s id for searching. [Breaking Change] The supported version of ShardingSphere-Proxy is upgraded from 5.1.2 to 5.3.1. Due to the changes of ShardingSphere\u0026rsquo;s API, versions before 5.3.1 are not compatible. Add the eBPF network profiling E2E Test in the per storage. Fix TCP service instances are lack of instance properties like pod and namespace, which causes Pod log not to work for TCP workloads. Add Python HBase happybase module component ID(94). Fix gRPC alarm cannot update settings from dynamic configuration source. Add batchOfBytes configuration to limit the size of bulk flush. Add Python Websocket module component ID(7018). [Optional] Optimize single trace query performance by customizing routing in ElasticSearch. SkyWalking trace segments and Zipkin spans are using trace ID for routing. This is OFF by default, controlled by storage/elasticsearch/enableCustomRouting. Enhance OAP HTTP server to support HTTPS Remove handler scan in otel receiver, manual initialization instead Add aws-firehose-receiver to support collecting AWS CloudWatch metric(OpenTelemetry format). Notice, no HTTPS/TLS setup support. By following AWS Firehose request, it uses proxy request (https://... instead of /aws/firehose/metrics), there must be a proxy(Nginx, Envoy, etc.). Avoid Antlr dependencies' versions might be different in compile time and runtime. Now PrometheusMetricConverter#escapedName also support converting / to _. Add missing TCP throughput metrics. Refactor @Column annotation, swap Column#name and ElasticSearch.Column#columnAlias and rename ElasticSearch.Column#columnAlias to ElasticSearch.Column#legacyName. Add Python HTTPX module component ID(7019). Migrate tests from junit 4 to junit 5. Refactor http-based alarm plugins and extract common logic to HttpAlarmCallback. Support Amazon Simple Storage Service (Amazon S3) metrics monitoring Support process Sum metrics with AGGREGATION_TEMPORALITY_DELTA case Support Amazon DynamoDB monitoring. Support prometheus HTTP API and promQL. Scope in the Entity of Metrics query v1 protocol is not required and automatical correction. The scope is determined based on the metric itself. Add explicit ReadTimeout for ConsulConfigurationWatcher to avoid IllegalArgumentException: Cache watchInterval=10sec \u0026gt;= networkClientReadTimeout=10000ms. Fix DurationUtils.getDurationPoints exceed, when startTimeBucket equals endTimeBucket. Support process OpenTelemetry ExponentialHistogram metrics Add FreeRedis component ID(3018).  UI  Add Zipkin Lens UI to webapp, and proxy it to context path /zipkin. Migrate the build tool from vue cli to Vite4. Fix Instance Relation and Endpoint Relation dashboards show up. Add Micrometer icon. Update MySQL UI to support MariaDB. Add AWS menu for supporting AWS monitoring. Add missing FastAPI logo. Update the log details page to support the formatted display of JSON content. Fix build config. Avoid being unable to drag process nodes for the first time. Add node folder into ignore list. Add ElPopconfirm to component types. Add an iframe widget for zipkin UI. Optimize graph tooltips to make them more friendly. Bump json5 from 1.0.1 to 1.0.2. Add websockets icon. Implement independent mode for widgets. Bump http-cache-semantics from 4.1.0 to 4.1.1. Update menus for OpenFunction. Add auto fresh to widgets independent mode. Fix: clear trace ID on the Log and Trace widgets after using association. Fix: reset duration for query conditions after time range changes. Add AWS S3 menu. Refactor: optimize side bar component to make it more friendly. Fix: remove duplicate popup message for query result. Add logo for HTTPX. Refactor: optimize the attached events visualization in the trace widget. Update BanyanDB client to 0.3.1. Add AWS DynamoDB menu. Fix: add auto period to the independent mode for widgets. Optimize menus and add Windows monitoring menu. Add a calculation for the cpm5dAvg. add a cpm5d calculation. Fix data processing error in the eBPF profiling widget. Support for double quotes in SlowSQL statements. Fix: the wrong position of the menu when clicking the topology node.  Documentation  Remove Spring Sleuth docs, and add Spring MicroMeter Observations Analysis with the latest Java agent side enhancement. Update monitoring MySQL document to add the MariaDB part. Reorganize the protocols docs to a more clear API docs. Add documentation about replacing Zipkin server with SkyWalking OAP. Add Lens UI relative docs in Zipkin trace section. Add Profiling APIs. Fix backend telemetry doc and so11y dashboard doc as the OAP Prometheus fetcher was removed since 9.3.0  All issues and pull requests are here\n","excerpt":"SkyWalking 9.4.0 is released. Go to downloads page to find release tars.\nPromQL and Grafana Support …","ref":"/events/release-apache-skywalking-apm-9.4.0/","title":"Release Apache SkyWalking APM 9.4.0"},{"body":"SkyWalking BanyanDB 0.3.1 is released. Go to downloads page to find release tars.\nBugs  Fix the broken of schema chain. Add a timeout to all go leaking checkers.  Chores  Bump golang.org/x/net from 0.2.0 to 0.7.0.  ","excerpt":"SkyWalking BanyanDB 0.3.1 is released. Go to downloads page to find release tars.\nBugs  Fix the …","ref":"/events/release-apache-skywalking-banyandb-0-3-1/","title":"Release Apache SkyWalking BanyanDB 0.3.1"},{"body":"SkyWalking Python 1.0.0 is released! Go to downloads page to find release tars.\nPyPI Wheel: https://pypi.org/project/apache-skywalking/1.0.0/\nDockerHub Image: https://hub.docker.com/r/apache/skywalking-python\n  Important Notes and Breaking Changes:\n The new PVM metrics reported from Python agent requires SkyWalking OAP v9.3.0 to show out-of-the-box. BREAKING: Python 3.6 is no longer supported and may not function properly, Python 3.11 support is added and tested. BREAKING: A number of common configuration options and environment variables are renamed to follow the convention of Java agent, please check with the latest official documentation before upgrading. (#273, #282) https://skywalking.apache.org/docs/skywalking-python/v1.0.0/en/setup/configuration/ BREAKING: All agent core capabilities are now covered by test cases and enabled by default (Trace, Log, PVM runtime metrics, Profiler) BREAKING: DockerHub Python agent images since v1.0.0 will no longer include the run part in ENTRYPOINT [\u0026quot;sw-python\u0026quot;, \u0026quot;run\u0026quot;], user should prefix their command with [-d/--debug] run [-p/--prefork] \u0026lt;Command\u0026gt; for extra flexibility. Packaged wheel now provides a extra [all] option to support all three report protocols    Feature:\n Add support for Python 3.11 (#285) Add MeterReportService (gRPC, Kafka reporter) (default:enabled) (#231, #236, #241, #243) Add reporter for PVM runtime metrics (default:enabled) (#238, #247) Add Greenlet profiler (#246) Add test and support for Python Slim base images (#249) Add support for the tags of Virtual Cache for Redis (#263) Add a new configuration kafka_namespace to prefix the kafka topic names (#277) Add log reporter support for loguru (#276) Add experimental support for explicit os.fork(), restarts agent in forked process (#286) Add experimental sw-python CLI sw-python run [-p] flag (-p/\u0026ndash;prefork) to enable non-intrusive uWSGI and Gunicorn postfork support (#288)    Plugins:\n Add aioredis, aiormq, amqp, asyncpg, aio-pika, kombu RMQ plugins (#230 Missing test coverage) Add Confluent Kafka plugin (#233 Missing test coverage) Add HBase plugin Python HappyBase model (#266) Add FastAPI plugin websocket protocol support (#269) Add Websockets (client) plugin (#269) Add HTTPX plugin (#283)    Fixes:\n Allow RabbitMQ BlockingChannel.basic_consume() to link with outgoing spans (#224) Fix RabbitMQ basic_get bug (#225, #226) Fix case when tornado socket name is None (#227) Fix misspelled text \u0026ldquo;PostgreSLQ\u0026rdquo; -\u0026gt; \u0026ldquo;PostgreSQL\u0026rdquo; in Postgres-related plugins (#234) Make sure span.component initialized as Unknown rather than 0 (#242) Ignore websocket connections inside fastapi temporarily (#244, issue#9724) Fix Kafka-python plugin SkyWalking self reporter ignore condition (#249) Add primary endpoint in tracing context and endpoint info to log reporter (#261) Enforce tag class type conversion (#262) Fix sw_logging (log reporter) potentially throw exception leading to traceback confusion (#267) Avoid reporting meaningless tracecontext with logs when there\u0026rsquo;s no active span, UI will now show empty traceID (#272) Fix exception handler in profile_context (#273) Add namespace suffix to service name (#275) Add periodical instance property report to prevent data loss (#279) Fix sw_logging when Logger.disabled is true (#281)    Docs:\n New documentation on how to test locally (#222) New documentation on the newly added meter reporter feature (#240) New documentation on the newly added greenlet profiler and the original threading profiler (#250) Overhaul documentation on development setup and testing (#249) Add tables to state currently supported features of Python agent. (#271) New configuration documentation generator (#273)    Others:\n Pin CI SkyWalking License Eye (#221) Fix dead link due to the \u0026lsquo;next\u0026rsquo; url change (#235) Pin CI SkyWalking Infra-E2E (#251) Sync OAP, SWCTL versions in E2E and fix test cases (#249) Overhaul development flow with Poetry (#249) Fix grpcio-tools generated message type (#253) Switch plugin tests to use slim Python images (#268) Add unit tests to sw_filters (#269)    New Contributors  @ZEALi made their first contribution in https://github.com/apache/skywalking-python/pull/242 @westarest made their first contribution in https://github.com/apache/skywalking-python/pull/246 @Jedore made their first contribution in https://github.com/apache/skywalking-python/pull/263 @alidisi made their first contribution in https://github.com/apache/skywalking-python/pull/266 @SheltonZSL made their first contribution in https://github.com/apache/skywalking-python/pull/275 @XinweiLyu made their first contribution in https://github.com/apache/skywalking-python/pull/283  Full Changelog: https://github.com/apache/skywalking-python/compare/v0.8.0...v1.0.0\n","excerpt":"SkyWalking Python 1.0.0 is released! Go to downloads page to find release tars.\nPyPI Wheel: …","ref":"/events/release-apache-skywalking-python-1-0-0/","title":"Release Apache SkyWalking Python 1.0.0"},{"body":"SkyWalking BanyanDB 0.3.0 is released. Go to downloads page to find release tars.\nFeatures  Support 64-bit float type. Web Application. Close components in tsdb gracefully. Add TLS for the HTTP server. Use the table builder to compress data.  Bugs  Open blocks concurrently. Sync index writing and shard closing. TimestampRange query throws an exception if no data in this time range.  Chores  Fixes issues related to leaked goroutines. Add validations to APIs.  ","excerpt":"SkyWalking BanyanDB 0.3.0 is released. Go to downloads page to find release tars.\nFeatures  Support …","ref":"/events/release-apache-skywalking-banyandb-0-3-0/","title":"Release Apache SkyWalking BanyanDB 0.3.0"},{"body":"SkyWalking PHP 0.3.0 is released. Go to downloads page to find release tars.\nWhat\u0026rsquo;s Changed  Make explicit rust version requirement by @wu-sheng in https://github.com/apache/skywalking-php/pull/35 Update dependencies version limitation. by @jmjoy in https://github.com/apache/skywalking-php/pull/36 Startup 0.3.0 by @heyanlong in https://github.com/apache/skywalking-php/pull/37 Support PHP 8.2 by @heyanlong in https://github.com/apache/skywalking-php/pull/38 Fix php-fpm freeze after large amount of request. by @jmjoy in https://github.com/apache/skywalking-php/pull/39 Lock develop rust version to 1.65, upgrade deps. by @jmjoy in https://github.com/apache/skywalking-php/pull/41 Fix worker unexpected shutdown. by @jmjoy in https://github.com/apache/skywalking-php/pull/42 Update docs about installing rust. by @jmjoy in https://github.com/apache/skywalking-php/pull/43 Retry cargo test when failed in CI. by @jmjoy in https://github.com/apache/skywalking-php/pull/44 Hack dtor for mysqli to cleanup resources. by @jmjoy in https://github.com/apache/skywalking-php/pull/45 Report instance properties and keep alive. by @jmjoy in https://github.com/apache/skywalking-php/pull/46 Add configuration option skywalking_agent.runtime_dir. by @jmjoy in https://github.com/apache/skywalking-php/pull/47 Add authentication support. by @jmjoy in https://github.com/apache/skywalking-php/pull/48 Support TLS. by @jmjoy in https://github.com/apache/skywalking-php/pull/49 Periodic reporting instance properties. by @jmjoy in https://github.com/apache/skywalking-php/pull/50 Bump to 0.3.0. by @jmjoy in https://github.com/apache/skywalking-php/pull/51  Breaking  Remove http:// scheme in skywalking_agent.server_addr.  New Contributors  @wu-sheng made their first contribution in https://github.com/apache/skywalking-php/pull/35  Full Changelog: https://github.com/apache/skywalking-php/compare/v0.2.0...v0.3.0\nPECL https://pecl.php.net/package/skywalking_agent/0.3.0\n","excerpt":"SkyWalking PHP 0.3.0 is released. Go to downloads page to find release tars.\nWhat\u0026rsquo;s Changed …","ref":"/events/release-apache-skwaylking-php-0-3-0/","title":"Release Apache SkyWalking PHP 0.3.0"},{"body":"SkyWalking Java Agent 8.14.0 is released. Go to downloads page to find release tars. Changes by Version\n8.14.0  Polish test framework to support arm64/v8 platforms Fix wrong config name plugin.toolkit.use_qualified_name_as_operation_name, and system variable name SW_PLUGIN_TOOLKIT_USE_QUALIFIED_NAME_AS_OPERATION_NAME:false. They were toolit. Rename JDBI to JDBC Support collecting dubbo thread pool metrics Bump up byte-buddy to 1.12.19 Upgrade agent test tools [Breaking Change] Compatible with 3.x and 4.x RabbitMQ Client, rename rabbitmq-5.x-plugin to rabbitmq-plugin Polish JDBC plugins to make DBType accurate Report the agent version to OAP as an instance attribute Polish jedis-4.x-plugin to change command to lowercase, which is consistent with jedis-2.x-3.x-plugin Add micronauthttpclient,micronauthttpserver,memcached,ehcache,guavacache,jedis,redisson plugin config properties to agent.config Add Micrometer Observation support Add tags mq.message.keys and mq.message.tags for RocketMQ producer span Clean the trace context which injected into Pulsar MessageImpl after the instance recycled Fix In the higher version of mysql-connector-java 8x, there is an error in the value of db.instance. Add support for KafkaClients 3.x. Support to customize the collect period of JVM relative metrics. Upgrade netty-codec-http2 to 4.1.86.Final. Put Agent-Version property reading in the premain stage to avoid deadlock when using jarsigner. Add a config agent.enable(default: true) to support disabling the agent through system property -Dskywalking.agent.disable=false or system environment variable setting SW_AGENT_ENABLE=false. Enhance redisson plugin to adopt uniform tags.  Documentation  Update Plugin-test.md, support string operators start with and end with Polish agent configurations doc to fix type error  All issues and pull requests are here\n","excerpt":"SkyWalking Java Agent 8.14.0 is released. Go to downloads page to find release tars. Changes by …","ref":"/events/release-apache-skywalking-java-agent-8-14-0/","title":"Release Apache SkyWalking Java Agent 8.14.0"},{"body":"Background Apache SkyWalking is an open-source Application Performance Management system that helps users collect and aggregate logs, traces, metrics, and events for display on a UI. In the previous article, we introduced how to use Apache SkyWalking Rover to analyze the network performance issue in the service mesh environment. However, in business scenarios, users often rely on mature layer 7 protocols, such as HTTP, for interactions between systems. In this article, we will discuss how to use eBPF techniques to analyze performance bottlenecks of layer 7 protocols and how to enhance the tracing system using network sampling.\nThis article will show how to use Apache SkyWalking with eBPF to enhance metrics and traces in HTTP observability.\nHTTP Protocol Analysis HTTP is one of the most common Layer 7 protocols and is usually used to provide services to external parties and for inter-system communication. In the following sections, we will show how to identify and analyze HTTP/1.x protocols.\nProtocol Identification In HTTP/1.x, the client and server communicate through a single file descriptor (FD) on each side. Figure 1 shows the process of communication involving the following steps:\n Connect/accept: The client establishes a connection with the HTTP server, or the server accepts a connection from the client. Read/write (multiple times): The client or server reads and writes HTTPS requests and responses. A single request-response pair occurs within the same connection on each side. Close: The client and server close the connection.  To obtain HTTP content, it’s necessary to read it from the second step of this process. As defined in the RFC, the content is contained within the data of the Layer 4 protocol and can be obtained by parsing the data. The request and response pair can be correlated because they both occur within the same connection on each side.\nFigure 1: HTTP communication timeline.\nHTTP Pipeline HTTP pipelining is a feature of HTTP/1.1 that enables multiple HTTP requests to be sent over a single TCP connection without waiting for the corresponding responses. This feature is important because it ensures that the order of the responses on the server side matches the order of the requests.\nFigure 2 illustrates how this works. Consider the following scenario: an HTTP client sends multiple requests to a server, and the server responds by sending the HTTP responses in the same order as the requests. This means that the first request sent by the client will receive the first response from the server, the second request will receive the second response, and so on.\nWhen designing HTTP parsing, we should follow this principle by adding request data to a list and removing the first item when parsing a response. This ensures that the responses are processed in the correct order.\nFigure 2: HTTP/1.1 pipeline.\nMetrics Based on the identification of the HTTP content and process topology diagram mentioned in the previous article, we can combine these two to generate process-to-process metrics data.\nFigure 3 shows the metrics that currently support the analysis between the two processes. Based on the HTTP request and response data, we can analyze the following data:\n   Metrics Name Type Unit Description     Request CPM(Call Per Minute) Counter count The HTTP request count   Response Status CPM(Call Per Minute) Counter count The count of per HTTP response status code   Request Package Size Counter/Histogram Byte The request package size   Response Package Size Counter/Histogram Byte The response package size   Client Duration Counter/Histogram Millisecond The duration of single HTTP response on the client side   Server Duration Counter/Histogram Millisecond The duration of single HTTP response on the server side    Figure 3: Process-to-process metrics.\nHTTP and Trace During the HTTP process, if we unpack the HTTP requests and responses from raw data, we can use this data to correlate with the existing tracing system.\nTrace Context Identification In order to track the flow of requests between multiple services, the trace system usually creates a trace context when a request enters a service and passes it along to other services during the request-response process. For example, when an HTTP request is sent to another server, the trace context is included in the request header.\nFigure 4 displays the raw content of an HTTP request intercepted by Wireshark. The trace context information generated by the Zipkin Tracing system can be identified by the “X-B3” prefix in the header. By using eBPF to intercept the trace context in the HTTP header, we can connect the current request with the trace system.\nFigure 4: View of HTTP headers in Wireshark.\nTrace Event We have added the concept of an event to traces. An event can be attached to a span and consists of start and end times, tags, and summaries, allowing us to attach any desired information to the Trace.\nWhen performing eBPF network profiling, two events can be generated based on the request-response data. Figure 5 illustrates what happens when a service performs an HTTP request with profiling. The trace system generates trace context information and sends it in the request. When the service executes in the kernel, we can generate an event for the corresponding trace span by interacting with the request-response data and execution time in the kernel space.\nPreviously, we could only observe the execution status in the user space. However, by combining traces and eBPF technologies, we can now also get more information about the current trace in the kernel space, which would impact less performance for the target service if we do similar things in the tracing SDK and agent.\nFigure 5: Logical view of profiling an HTTP request and response.\nSampling To ensure efficient data storage and minimize unnecessary data sampling, we use a sampling mechanism for traces in our system. This mechanism triggers sampling only when certain conditions are met. We also provide a list of the top N traces, which allows users to quickly access the relevant request information for a specific trace.\nTo help users easily identify and analyze relevant events, we offer three different sampling rules:\n Slow Traces: Sampling is triggered when the response time for a request exceeds a specified threshold. Response Status [400, 500): Sampling is triggered when the response status code is greater than or equal to 400 and less than 500. Response Status [500, 600): Sampling is triggered when the response status code is greater than or equal to 500 and less than 600.  In addition, we recognize that not all request or response raw data may be necessary for analysis. For example, users may be more interested in requesting data when trying to identify performance issues, while they may be more interested in response data when troubleshooting errors. As such, we also provide configuration options for request or response events to allow users to specify which type of data they would like to sample.\nProfiling in a Service Mesh The SkyWalking and SkyWalking Rover projects have already implemented the HTTP protocol analyze and trace associations. How do they perform when running in a service mesh environment?\nDeployment Figure 6 demonstrates the deployment of SkyWalking and SkyWalking Rover in a service mesh environment. SkyWalking Rover is deployed as a DaemonSet on each machine where a service is located and communicates with the SkyWalking backend cluster. It automatically recognizes the services on the machine and reports metadata information to the SkyWalking backend cluster. When a new network profiling task arises, SkyWalking Rover senses the task and analyzes the designated processes, collecting and aggregating network data before ultimately reporting it back to the SkyWalking backend service.\nFigure 6: SkyWalking rover deployment topology in a service mesh.\nTracing Systems Starting from version 9.3.0, the SkyWalking backend fully supports all functions in the Zipkin server. Therefore, the SkyWalking backend can collect traces from both the SkyWalking and Zipkin protocols. Similarly, SkyWalking Rover can identify and analyze trace context in both the SkyWalking and Zipkin trace systems. In the following two sections, network analysis results will be displayed in the SkyWalking and Zipkin UI respectively.\nSkyWalking When SkyWalking performs network profiling, similar to the TCP metrics in the previous article, the SkyWalking UI will first display the topology between processes. When you open the dashboard of the line representing the traffic metrics between processes, you can see the metrics of HTTP traffic from the “HTTP/1.x” tab and the sampled HTTP requests with tracing in the “HTTP Requests” tab.\nAs shown in Figure 7, there are three lists in the tab, each corresponding to a condition in the event sampling rules. Each list displays the traces that meet the pre-specified conditions. When you click on an item in the trace list, you can view the complete trace.\nFigure 7: Sampled HTTP requests within tracing context.\nWhen you click on an item in the trace list, you can quickly view the specified trace. In Figure 8, we can see that in the current service-related span, there is a tag with a number indicating how many HTTP events are related to that trace span.\nSince we are in a service mesh environment, each service involves interacting with Envoy. Therefore, the current span includes Envoy’s request and response information. Additionally, since the current service has both incoming and outgoing requests, there are events in the corresponding span.\nFigure 8: Events in the trace detail.\nWhen the span is clicked, the details of the span will be displayed. If there are events in the current span, the relevant event information will be displayed on a time axis. As shown in Figure 9, there are a total of 6 related events in the current Span. Each event represents a data sample of an HTTP request/response. One of the events spans multiple time ranges, indicating a longer system call time. It may be due to a blocked system call, depending on the implementation details of the HTTP request in different languages. This can also help us query the possible causes of errors.\nFigure 9: Events in one trace span.\nFinally, we can click on a specific event to see its complete information. As shown in Figure 10, it displays the sampling information of a request, including the SkyWalking trace context protocol contained in the request header from the HTTP raw data. The raw request data allows you to quickly re-request the request to solve any issues.\nFigure 10: The detail of the event.\nZipkin Zipkin is one of the most widely used distributed tracing systems in the world. SkyWalking can function as an alternative server to provide advanced features for Zipkin users. Here, we use this way to bring the feature into the Zipkin ecosystem out-of-box. The new events would also be treated as a kind of Zipkin’s tags and annotations.\nTo add events to a Zipkin span, we need to do the following:\n Split the start and end times of each event into two annotations with a canonical name. Add the sampled HTTP raw data from the event to the Zipkin span tags, using the same event name for corresponding purposes.  Figures 11 and 12 show annotations and tags in the same span. In these figures, we can see that the span includes at least two events with the same event name and sequence suffix (e.g., “Start/Finished HTTP Request/Response Sampling-x” in the figure). Both events have separate timestamps to represent their relative times within the span. In the tags, the data content of the corresponding event is represented by the event name and sequence number, respectively.\nFigure 11: Event timestamp in the Zipkin span annotation.\nFigure 12: Event raw data in the Zipkin span tag.\nDemo In this section, we demonstrate how to perform network profiling in a service mesh and complete metrics collection and HTTP raw data sampling. To follow along, you will need a running Kubernetes environment.\nDeploy SkyWalking Showcase SkyWalking Showcase contains a complete set of example services and can be monitored using SkyWalking. For more information, please check the official documentation.\nIn this demo, we only deploy service, the latest released SkyWalking OAP, and UI.\nexport SW_OAP_IMAGE=apache/skywalking-oap-server:9.3.0 export SW_UI_IMAGE=apache/skywalking-ui:9.3.0 export SW_ROVER_IMAGE=apache/skywalking-rover:0.4.0 export FEATURE_FLAGS=mesh-with-agent,single-node,elasticsearch,rover make deploy.kubernetes After deployment is complete, please run the following script to open SkyWalking UI: http://localhost:8080/.\nkubectl port-forward svc/ui 8080:8080 --namespace default Start Network Profiling Task Currently, we can select the specific instances that we wish to monitor by clicking the Data Plane item in the Service Mesh panel and the Service item in the Kubernetes panel.\nIn figure 13, we have selected an instance with a list of tasks in the network profiling tab.\nFigure 13: Network Profiling tab in the Data Plane.\nWhen we click the Start button, as shown in Figure 14, we need to specify the sampling rules for the profiling task. The sampling rules consist of one or more rules, each of which is distinguished by a different URI regular expression. When the HTTP request URI matches the regular expression, the rule is used. If the URI regular expression is empty, the default rule is used. Using multiple rules can help us make different sampling configurations for different requests.\nEach rule has three parameters to determine if sampling is needed:\n Minimal Request Duration (ms): requests with a response time exceeding the specified time will be sampled. Sampling response status code between 400 and 499: all status codes in the range [400-499) will be sampled. Sampling response status code between 500 and 599: all status codes in the range [500-599) will be sampled.  Once the sampling configuration is complete, we can create the task.\nFigure 14: Create network profiling task page.\nDone! After a few seconds, you will see the process topology appear on the right side of the page.\nWhen you click on the line between processes, you can view the data between the two processes, which is divided into three tabs:\n TCP: displays TCP-related metrics. HTTP/1.x: displays metrics in the HTTP 1 protocol. HTTP Requests: displays the analyzed request and saves it to a list according to the sampling rule.  Figure 16: TCP metrics in a network profiling task.\nFigure 17: HTTP/1.x metrics in a network profiling task.\nFigure 18: HTTP sampled requests in a network profiling task.\nConclusion In this article, we detailed the overview of how to analyze the Layer 7 HTTP/1.x protocol in network analysis, and how to associate it with existing trace systems. This allows us to extend the scope of data we can observe from just user space to also include kernel-space data.\nIn the future, we will delve further into the analysis of kernel data, such as collecting information on TCP packet size, transmission frequency, network card, and help on enhancing distributed tracing from another perspective.\nAdditional Resources  SkyWalking Github Repo › SkyWalking Rover Github Repo › SkyWalking Rover Documentation › Diagnose Service Mesh Network Performance with eBPF blog post \u0026gt; SkyWalking Profiling Documentation \u0026gt; SkyWalking Trace Context Propagation \u0026gt; Zipkin Trace Context Propagation \u0026gt; RFC - Hypertext Transfer Protocol – HTTP/1.1 \u0026gt;  ","excerpt":"Background Apache SkyWalking is an open-source Application Performance Management system that helps …","ref":"/blog/ebpf-enhanced-http-observability-l7-metrics-and-tracing/","title":"eBPF enhanced HTTP observability - L7 metrics and tracing"},{"body":"","excerpt":"","ref":"/tags/http/","title":"HTTP"},{"body":"","excerpt":"","ref":"/tags/trace/","title":"Trace"},{"body":"背景 Apache SkyWalking 是一个开源应用性能管理系统,帮助用户收集和聚合日志、追踪、指标和事件,并在 UI 上显示。在上一篇文章中,我们介绍了如何使用 Apache SkyWalking Rover 分析服务网格环境中的网络性能问题。但是,在商业场景中,用户通常依靠成熟的第 7 层协议(如 HTTP)来进行系统之间的交互。在本文中,我们将讨论如何使用 eBPF 技术来分析第 7 层协议的性能瓶颈,以及如何使用网络采样来增强追踪系统。\n本文将演示如何使用 Apache SkyWalking 与 eBPF 来增强 HTTP 可观察性中的指标和追踪。\nHTTP 协议分析 HTTP 是最常用的 7 层协议之一,通常用于为外部方提供服务和进行系统间通信。在下面的章节中,我们将展示如何识别和分析 HTTP/1.x 协议。\n协议识别 在 HTTP/1.x 中,客户端和服务器通过两端的单个文件描述符(File Descriptor)进行通信。图 1 显示了涉及以下步骤的通信过程:\n Connect/Accept:客户端与 HTTP 服务器建立连接,或者服务器接受客户端的连接。 Read/Write(多次):客户端或服务器读取和写入 HTTPS 请求和响应。单个请求 - 响应对在每边的同一连接内发生。 Close:客户端和服务器关闭连接。  为了获取 HTTP 内容,必须从此过程的第二步读取它。根据 RFC 定义,内容包含在 4 层协议的数据中,可以通过解析数据来获取。请求和响应对可以相关联,因为它们都在两端的同一连接内发生。\n图 1:HTTP 通信时间线。\nHTTP 管线化 HTTP 管线化(Pipelining)是 HTTP/1.1 的一个特性,允许在等待对应的响应的情况下在单个 TCP 连接上发送多个 HTTP 请求。这个特性很重要,因为它确保了服务器端的响应顺序必须与请求的顺序匹配。\n图 2 说明了这是如何工作的,考虑以下情况:HTTP 客户端向服务器发送多个请求,服务器通过按照请求的顺序发送 HTTP 响应来响应。这意味着客户端发送的第一个请求将收到服务器的第一个响应,第二个请求将收到第二个响应,以此类推。\n在设计 HTTP 解析时,我们应该遵循这个原则,将请求数据添加到列表中,并在解析响应时删除第一个项目。这可以确保响应按正确的顺序处理。\n图 2: HTTP/1.1 管道。\n指标 根据前文提到的 HTTP 内容和流程拓扑图的识别,我们可以将这两者结合起来生成进程间的指标数据。\n图 3 显示了目前支持两个进程间分析的指标。基于 HTTP 请求和响应数据,可以分析以下数据:\n   指标名称 类型 单位 描述     请求 CPM(Call Per Minute) 计数器 计数 HTTP 请求计数   响应状态 CPM (Call Per Minute) 计数器 计数 每个 HTTP 响应状态码的计数   请求包大小 计数器 / 直方图 字节 请求包大小   响应包大小 计数器 / 直方图 字节 响应包大小   客户端持续时间 计数器 / 直方图 毫秒 客户端单个 HTTP 响应的持续时间   服务器持续时间 计数器 / 直方图 毫秒 服务器端单个 HTTP 响应的持续时间    图 3:进程到进程指标。\nHTTP 和追踪 在 HTTP 过程中,如果我们能够从原始数据中解包 HTTP 请求和响应,就可以使用这些数据与现有的追踪系统进行关联。\n追踪上下文标识 为了追踪多个服务之间的请求流,追踪系统通常在请求进入服务时创建追踪上下文,并在请求 - 响应过程中将其传递给其他服务。例如,当 HTTP 请求发送到另一个服务器时,追踪上下文包含在请求头中。\n图 4 显示了 Wireshark 拦截的 HTTP 请求的原始内容。由 Zipkin Tracing 系统生成的追踪上下文信息可以通过头中的 “X-B3” 前缀进行标识。通过使用 eBPF 拦截 HTTP 头中的追踪上下文,可以将当前请求与追踪系统连接起来。\n图 4:Wireshark 中的 HTTP Header 视图。\nTrace 事件 我们已经将事件这个概念加入了追踪中。事件可以附加到跨度上,并包含起始和结束时间、标签和摘要,允许我们将任何所需的信息附加到追踪中。\n在执行 eBPF 网络分析时,可以根据请求 - 响应数据生成两个事件。图 5 说明了在带分析的情况下执行 HTTP 请求时发生的情况。追踪系统生成追踪上下文信息并将其发送到请求中。当服务在内核中执行时,我们可以通过与内核空间中的请求 - 响应数据和执行时间交互,为相应的追踪跨度生成事件。\n以前,我们只能观察用户空间的执行状态。现在,通过结合追踪和 eBPF 技术,我们还可以在内核空间获取更多关于当前追踪的信息,如果我们在追踪 SDK 和代理中执行类似的操作,将对目标服务的性能产生较小的影响。\n图 5:分析 HTTP 请求和响应的逻辑视图。\n抽样 该机制仅在满足特定条件时触发抽样。我们还提供了前 N 条追踪的列表,允许用户快速访问特定追踪的相关请求信息。为了帮助用户轻松识别和分析相关事件,我们提供了三种不同的抽样规则:\n 慢速追踪:当请求的响应时间超过指定阈值时触发抽样。 响应状态 [400,500):当响应状态代码大于或等于 400 且小于 500 时触发抽样。 响应状态 [500,600):当响应状态代码大于或等于 500 且小于 600 时触发抽样。  此外,我们认识到分析时可能并不需要所有请求或响应的原始数据。例如,当试图识别性能问题时,用户可能更感兴趣于请求数据,而在解决错误时,他们可能更感兴趣于响应数据。因此,我们还提供了请求或响应事件的配置选项,允许用户指定要抽样的数据类型。\n服务网格中的分析 SkyWalking Rover 项目已经实现了 HTTP 协议的分析和追踪关联。当在服务网格环境中运行时它们的表现如何?\n部署 图 6 演示了 SkyWalking 和 SkyWalking Rover 在服务网格环境中的部署方式。SkyWalking Rover 作为一个 DaemonSet 部署在每台服务所在的机器上,并与 SkyWalking 后端集群通信。它会自动识别机器上的服务并向 SkyWalking 后端集群报告元数据信息。当出现新的网络分析任务时,SkyWalking Rover 会感知该任务并对指定的进程进行分析,在最终将数据报告回 SkyWalking 后端服务之前,收集和聚合网络数据。\n图 6:服务网格中的 SkyWalking rover 部署拓扑。\n追踪系统 从版本 9.3.0 开始,SkyWalking 后端完全支持 Zipkin 服务器中的所有功能。因此,SkyWalking 后端可以收集来自 SkyWalking 和 Zipkin 协议的追踪。同样,SkyWalking Rover 可以在 SkyWalking 和 Zipkin 追踪系统中识别和分析追踪上下文。在接下来的两节中,网络分析结果将分别在 SkyWalking 和 Zipkin UI 中显示。\nSkyWalking 当 SkyWalking 执行网络分析时,与前文中的 TCP 指标类似,SkyWalking UI 会首先显示进程间的拓扑图。当打开代表进程间流量指标的线的仪表板时,您可以在 “HTTP/1.x” 选项卡中看到 HTTP 流量的指标,并在 “HTTP Requests” 选项卡中看到带追踪的抽样的 HTTP 请求。\n如图 7 所示,选项卡中有三个列表,每个列表对应事件抽样规则中的一个条件。每个列表显示符合预先规定条件的追踪。当您单击追踪列表中的一个项目时,就可以查看完整的追踪。\n图 7:Tracing 上下文中的采样 HTTP 请求。\n当您单击追踪列表中的一个项目时,就可以快速查看指定的追踪。在图 8 中,我们可以看到在当前的服务相关的跨度中,有一个带有数字的标签,表示与该追踪跨度相关的 HTTP 事件数。\n由于我们在服务网格环境中,每个服务都涉及与 Envoy 交互。因此,当前的跨度包括 Envoy 的请求和响应信息。此外,由于当前的服务有传入和传出的请求,因此相应的跨度中有事件。\n图 8:Tracing 详细信息中的事件。\n当单击跨度时,将显示跨度的详细信息。如果当前跨度中有事件,则相关事件信息将在时间轴上显示。如图 9 所示,当前跨度中一共有 6 个相关事件。每个事件代表一个 HTTP 请求 / 响应的数据样本。其中一个事件跨越多个时间范围,表示较长的系统调用时间。这可能是由于系统调用被阻塞,具体取决于不同语言中的 HTTP 请求的实现细节。这也可以帮助我们查询错误的可能原因。\n图 9:一个 Tracing 范围内的事件。\n最后,我们可以单击特定的事件查看它的完整信息。如图 10 所示,它显示了一个请求的抽样信息,包括从 HTTP 原始数据中的请求头中包含的 SkyWalking 追踪上下文协议。原始请求数据允许您快速重新请求以解决任何问题。\n图 10:事件的详细信息。\nZipkin Zipkin 是世界上广泛使用的分布式追踪系统。SkyWalking 可以作为替代服务器,提供高级功能。在这里,我们使用这种方式将功能无缝集成到 Zipkin 生态系统中。新事件也将被视为 Zipkin 的标签和注释的一种。\n为 Zipkin 跨度添加事件,需要执行以下操作:\n 将每个事件的开始时间和结束时间分别拆分为两个具有规范名称的注释。 将抽样的 HTTP 原始数据从事件添加到 Zipkin 跨度标签中,使用相同的事件名称用于相应的目的。  图 11 和图 12 显示了同一跨度中的注释和标签。在这些图中,我们可以看到跨度包含至少两个具有相同事件名称和序列后缀的事件(例如,图中的 “Start/Finished HTTP Request/Response Sampling-x”)。这两个事件均具有单独的时间戳,用于表示其在跨度内的相对时间。在标签中,对应事件的数据内容分别由事件名称和序列号表示。\n图 11:Zipkin span 注释中的事件时间戳。\n图 12:Zipkin span 标签中的事件原始数据。\n演示 在本节中,我们将演示如何在服务网格中执行网络分析,并完成指标收集和 HTTP 原始数据抽样。要进行操作,您需要一个运行中的 Kubernetes 环境。\n部署 SkyWalking Showcase SkyWalking Showcase 包含一套完整的示例服务,可以使用 SkyWalking 进行监控。有关详细信息,请参阅官方文档。\n在本演示中,我们只部署了服务、最新发布的 SkyWalking OAP 和 UI。\nexport SW_OAP_IMAGE=apache/skywalking-oap-server:9.3.0 export SW_UI_IMAGE=apache/skywalking-ui:9.3.0 export SW_ROVER_IMAGE=apache/skywalking-rover:0.4.0 export FEATURE_FLAGS=mesh-with-agent,single-node,elasticsearch,rover make deploy.kubernetes 部署完成后,运行下面的脚本启动 SkyWalking UI:http://localhost:8080/。\nkubectl port-forward svc/ui 8080:8080 --namespace default 启动网络分析任务 目前,我们可以通过单击服务网格面板中的 Data Plane 项和 Kubernetes 面板中的 Service 项来选择要监视的特定实例。\n在图 13 中,我们已在网络分析选项卡中选择了一个具有任务列表的实例。\n图 13:数据平面中的网络分析选项卡。\n当我们单击 “开始” 按钮时,如图 14 所示,我们需要为分析任务指定抽样规则。抽样规则由一个或多个规则组成,每个规则都由不同的 URI 正则表达式区分。当 HTTP 请求的 URI 与正则表达式匹配时,将使用该规则。如果 URI 正则表达式为空,则使用默认规则。使用多个规则可以帮助我们为不同的请求配置不同的抽样配置。\n每个规则都有三个参数来确定是否需要抽样:\n 最小请求持续时间(毫秒):响应时间超过指定时间的请求将被抽样。 在 400 和 499 之间的抽样响应状态代码:范围 [400-499) 中的所有状态代码将被抽样。 在 500 和 599 之间的抽样响应状态代码:范围 [500-599) 中的所有状态码将被抽样。  抽样配置完成后,我们就可以创建任务了。\n图 14:创建网络分析任务页面。\n完成 几秒钟后,你会看到页面的右侧出现进程拓扑结构。\n图 15:网络分析任务中的流程拓扑。\n当您单击进程之间的线时,您可以查看两个过程之间的数据,它被分为三个选项卡:\n TCP:显示与 TCP 相关的指标。 HTTP/1.x:显示 HTTP 1 协议中的指标。 HTTP 请求:显示已分析的请求,并根据抽样规则保存到列表中。  图 16:网络分析任务中的 TCP 指标。\n图 17:网络分析任务中的 HTTP/1.x 指标。\n图 18:网络分析任务中的 HTTP 采样请求。\n总结 在本文中,我们详细介绍了如何在网络分析中分析 7 层 HTTP/1.x 协议,以及如何将其与现有追踪系统相关联。这使我们能够将我们能够观察到的数据从用户空间扩展到内核空间数据。\n在未来,我们将进一步探究内核数据的分析,例如收集 TCP 包大小、传输频率、网卡等信息,并从另一个角度提升分布式追踪。\n其他资源  SkyWalking Github Repo › SkyWalking Rover Github Repo › SkyWalking Rover Documentation › Diagnose Service Mesh Network Performance with eBPF blog post \u0026gt; SkyWalking Profiling Documentation \u0026gt; SkyWalking Trace Context Propagation \u0026gt; Zipkin Trace Context Propagation \u0026gt; RFC - Hypertext Transfer Protocol – HTTP/1.1 \u0026gt;  ","excerpt":"背景 Apache SkyWalking 是一个开源应用性能管理系统,帮助用户收集和聚合日志、追踪、指标和事件,并在 UI 上显示。在上一篇文章中,我们介绍了如何使用 Apache …","ref":"/zh/ebpf-enhanced-http-observability-l7-metrics-and-tracing/","title":"使用 eBPF 提升 HTTP 可观测性 - L7 指标和追踪"},{"body":"SkyWalking Rust 0.5.0 is released. Go to downloads page to find release tars.\nWhat\u0026rsquo;s Changed  Add management support. by @jmjoy in https://github.com/apache/skywalking-rust/pull/48 Add missing_docs lint and supply documents. by @jmjoy in https://github.com/apache/skywalking-rust/pull/49 Add authentication and custom intercept support. by @jmjoy in https://github.com/apache/skywalking-rust/pull/50 Bump to 0.5.0. by @jmjoy in https://github.com/apache/skywalking-rust/pull/51  ","excerpt":"SkyWalking Rust 0.5.0 is released. Go to downloads page to find release tars.\nWhat\u0026rsquo;s Changed …","ref":"/events/release-apache-skywalking-rust-0-5-0/","title":"Release Apache SkyWalking Rust 0.5.0"},{"body":"SkyWalking Satellite 1.1.0 is released. Go to downloads page to find release tars.\nFeatures  Support transmit the OpenTelemetry Metrics protocol. Upgrade to GO 1.18. Add Docker images for arm64 architecture. Support transmit Span Attached Event protocol data. Support dotnet CLRMetric forward.  Bug Fixes  Fix the missing return data when receive metrics in batch mode. Fix CVE-2022-21698, CVE-2022-27664.  Issues and PR  All issues are here All and pull requests are here  ","excerpt":"SkyWalking Satellite 1.1.0 is released. Go to downloads page to find release tars.\nFeatures  Support …","ref":"/events/release-apache-skwaylking-satellite-1-1-0/","title":"Release Apache SkyWalking Satellite 1.1.0"},{"body":"Apache SkyWalking is an open-source APM for a distributed system, Apache Software Foundation top-level project.\nOn Jan. 3rd, 2023, we received reports about Aliyun Trace Analysis Service. It provides a cloud service compatible with SkyWalking trace APIs and agents.\nOn their product page, there is a best-practice document describing about their service is not SkyWalking OAP, but can work with SkyWalking agents to support SkyWalking\u0026rsquo;s In-Process(Trace) Profiling.\nBUT, they copied the whole page of SkyWalking\u0026rsquo;s profiling UI, including page layout, words, and profiling task setup. The only difference is the color schemes.\nSkyWalking UI Aliyun Trace Analysis UI on their document page  The UI visualization is a part of the copyright. Aliyun declared their backend is NOT a re-distribution of SkyWalking repeatedly on their website, and they never mentioned this page is actually copied from upstream.\nThis is a LICENSE issue, violating SkyWalking\u0026rsquo;s copyright and Apache 2.0 License. They don\u0026rsquo;t respect Apache Software Foundation and Apache SkyWalking\u0026rsquo;s IP and Branding.\n","excerpt":"Apache SkyWalking is an open-source APM for a distributed system, Apache Software Foundation …","ref":"/blog/2023-01-03-aliyun-copy-page/","title":"[License Issue] Aliyun(阿里云)'s trace analysis service copied SkyWalking's trace profiling page."},{"body":"","excerpt":"","ref":"/tags/license/","title":"License"},{"body":"SkyWalking Rover 0.4.0 is released. Go to downloads page to find release tars.\nFeatures  Enhancing the render context for the Kubernetes process. Simplify the logic of network protocol analysis. Upgrade Go library to 1.18, eBPF library to 0.9.3. Make the Profiling module compatible with more Linux systems. Support monitor HTTP/1.x in the NETWORK profiling.  Bug Fixes Documentation  Adding support version of Linux documentation.  Issues and PR  All issues are here All and pull requests are here  ","excerpt":"SkyWalking Rover 0.4.0 is released. Go to downloads page to find release tars.\nFeatures  Enhancing …","ref":"/events/release-apache-skwaylking-rover-0-4-0/","title":"Release Apache SkyWalking Rover 0.4.0"},{"body":"Observability for modern distributed applications work is critical for understanding how they behave under a variety of conditions and for troubleshooting and resolving issues when they arise. Traces, metrics, and logs are regarded as fundamental parts of the observability stack. Traces are the footprints of distributed system executions, meanwhile, metrics measure system performance with numbers in the timeline. Essentially, they measure the performance from two dimensions. Being able to quickly visualize the connection between traces and corresponding metrics makes it possible to quickly diagnose which process flows are correlated to potentially pathological behavior. This powerful new capability is now available in SkyWalking 9.3.0.\nThe SkyWalking project started only with tracing, with a focus on 100% sampling-based metrics and topology analysis since 2018. When users face anomaly trends of time-series metrics, like a peak on the line chart, or histogram shows a larger gap between p95 and p95, the immediate question is, why is this happening? One of SkyWalking\u0026rsquo;s latest features, the trace-metric association, makes it much easier to answer that question and to address the root cause.\nHow Are Metrics Generated? SkyWalking provides three ways to calculate metrics:\n Metrics built from trace spans, depending on the span’s layer, kind, and tags. Metrics extracted from logs—a kind of keyword and tags-based metrics extraction. Metrics reported from mature and mainstream metrics/meter systems, such as OpenTelemetry, Prometheus, and Zabbix.  Tracing tracks the processes of requests between an application\u0026rsquo;s services. Most systems that generate traffic and performance-related metrics also generate tracing data, either from server-side trace-based aggregations or through client SDKs.\nUse SkyWalking to Reduce the Traditional Cost of Trace Indexing Tracing data and visualization are critical troubleshooting tools for both developers and operators alike because of how helpful they are in locating issue boundaries. But, because it has traditionally been difficult to find associations between metrics and traces, teams have added increasingly more tags into the spans, and search through various combinations. This trend of increased instrumentation and searching has required increased infrastructure investment to support this kind of search. SkyWalking\u0026rsquo;s metrics and tracing association capabilities can help reduce the cost of indexing and searching that data.\nFind the Associated Trace When looking for association between metrics and traces, the kind of metrics we\u0026rsquo;re dealing with determines their relationships to traces. Let’s review the standard request rate, error, and duration (RED) metrics to see how it works.\nSuccess Rate Metrics The success rate is determined by the return code, RPC response code, or exceptions of the process. When the success rate decreases, looking for errors in the traces of this service or pod are the first place to look to find clues.\nFigure 1: The success rate graph from SkyWalking\u0026rsquo;s 9.3.0 dashboard with the option to view related traces at a particular time.\nDrilling down from the peak of the success rate, SkyWalking lists all traces and their error status that were collected in this particular minute (Figure 2):\nFigure 2: SkyWalking shows related traces with an error status.\nRequests to /test can be located from the trace, and the span’s tag indicates a 404 response code of the HTTP request.\nFigure 3: A detail view of a request to http://frontend/test showing that the URI doesn\u0026rsquo;t exist.\nBy looking at the trace data, it becomes immediately clear that the drop in success rate is caused by requests to a nonexistent URI.\nAverage Response Time The average response time metric provides a general overview of service performance. When average response time is unstable, this usually means that the system is facing serious performance impacts.\nFigure 4: SkyWalking\u0026rsquo;s query UI for searching for related traces showing traces for requests that exceed a particular duration threshold.\nWhen you drill down from this metric, this query condition (Figure 4) will reveal the slowest traces of the service in this specific minute. Notice, at least 168ms is added as a condition automatically, to avoid scanning a large number of rows in the Database.\nApdex Apdex—the Application Performance Index—is a measure of response time based against a set threshold. It measures the ratio of satisfactory response times to unsatisfactory response times (Figure 5). The response time is measured from an asset request to completed delivery back to the requestor.\nFigure 5: The Apdex formula\nA user defines a response time tolerating threshold T. All responses handled in T or less time satisfy the user.\nFor example, if T is 1.2 seconds and a response completes in 0.5 seconds, then the user is satisfied. All responses greater than 1.2 seconds dissatisfy the user. Responses greater than 4.8 seconds frustrate the user.\nWhen the Apdex score decreases, we need to find related traces from two perspectives: slow traces and error status traces. SkyWalking\u0026rsquo;s new related tracing features offers a quick way to view both (Figure 6) directly from the Apdex graph.\nFigure 6: Show slow trace and error status traces from the Apdex graph\nService Response Time Percentile MetricThe percentile graph (Figure 7) provides p50, p75, p90, p95, and p99 latency ranks to measure the long-tail issues of service performance.\nFigure 7: The service response time percentile graph helps to highlight long-tail issues of service performance.\nThis percentile graph shows a typical long-tail issue. P99 latency is four times slower than the P95. When we use the association, we see the traces with latency between P95 - P99 and P99 - Infinity.\nThe traces of requests causing this kind of long-tail phenomena are automatically listing from there.\nFigure 8: Query parameters to search for traces based on latency.\nAre More Associations Available? SkyWalking provides more than just associations between between traces and metrics to help you find possible causal relationships and to avoid looking for the proverbial needle in a haystack.\nCurrently, SkyWalking 9.3.0 offers two more associations: metric-to-metric associations and event-to-metric associations.\nMetric-to-metric Associations There are dozens of metrics on the dashboard—which is great for getting a complete picture of application behavior. During a typical performance issue, the peaks of multiple metrics are affected simultaneously. But, trying to correlate peaks across all of these graphs can be difficult\u0026hellip;\nNow in SkyWalking 9.3.0, when you click the peak of one graph, the pop-out box lets you see associated metrics.\nFigure 9: SkyWalking\u0026rsquo;s option to view associated metrics.\nWhen you choose that option, all associated metrics graphs will show axis pointers (the dotted vertical lines) in all associated graphs like in Figure 10. This makes it easier to correlate the peaks in different graphs with each other. Often, these correlated peaks with have the same root cause.\nFigure 10: Axis pointers (vertical dotted lines) show associations between peaks across multiple metrics graphs.\nEvent-to-Metric Associations SkyWalking provides the event concept to associate possible service performance impacted by the infrastructure, such as new deployment even from k8s. Or, the anomaly had been detected by alerting or integrated AIOps engine.\nThe event to metrics association is also automatically, it could cover the time range of the event on the metric graphs(blue areas). If the area of event and peaks are matched, most likely this event covered this anomaly.\nFigure 11: SkyWalking\u0026rsquo;s event to metric association view.\nSkyWalking Makes it Easier and Faster to Find Root Causes SkyWalking now makes it easy to find associations between metrics, events, and traces, ultimately making it possible to identify root causes and fix problems fast. The associations we\u0026rsquo;ve discussed in this article are available out-of-box in the SkyWalking 9.3.0 release.\nFigure 12: Just click on the dots to see related traces and metrics associations.\nClick the dots on any metric graph, and you will see a View Related Traces item pop-out if this metric has logical mapping traces.\nConclusion In this blog, we took a look at the newly-added association feature between metrics and traces. With this new visualization, it\u0026rsquo;s now much easier to find key traces to identify root cause of issues.Associations in SkyWalking can go even deeper. Associations from metrics to traces is not the end of diagnosing system bottleneck. In the next post, we will introduce an eBPF powered trace enhancement where you’ll be able to see HTTP request and response details associated with tracing spans from network profiling. Stay tuned.\n","excerpt":"Observability for modern distributed applications work is critical for understanding how they behave …","ref":"/blog/boost-root-cause-analysis-quickly-with-skywalking-new-trace-metrics-association-feature/","title":"Boost Root Cause Analysis Quickly With SkyWalking’s New Trace-Metrics Association Feature"},{"body":"现代分布式应用程序工作的可观测性对于了解它们在各种条件下的行为方式以及在出现问题时进行故障排除和解决至关重要。追踪、指标和日志被视为可观测性堆栈的基本部分。Trace 是分布式系统执行的足迹,而 metric 则是用时间轴上的数字衡量系统性能。本质上,它们从两个维度衡量性能。能够快速可视化追踪和相应指标之间的联系,可以快速诊断哪些流程与潜在的异常相关。SkyWalking 9.3.0 现在提供了这一强大的新功能。\nSkyWalking 项目从 tracing 开始,从 2018 年开始专注于 100% 基于采样的指标和拓扑分析。当用户面对时间序列指标的异常趋势时,比如折线图上的峰值,或者直方图显示 p95 和 p95 之间的差距较大,直接的问题是,为什么会出现这种情况?SkyWalking 的最新功能之一,trace 与 metric 关联,使得回答这个问题和解决根本原因更加容易。\n指标是如何生成的? SkyWalking 提供了三种计算指标的方式:\n 根据追踪跨度构建的指标,具体取决于跨度的层、种类和标签。 从日志中提取指标—— 一种基于关键词和标签的指标提取。 从成熟和主流的指标 / 仪表系统报告的指标,例如 OpenTelemetry、Prometheus 和 Zabbix。  Tracing 追踪应用程序服务之间的请求过程。大多数生成流量和性能相关指标的系统也会生成追踪数据,这些数据来自服务器端基于追踪的聚合或通过客户端 SDK。\n使用 SkyWalking 降低追踪索引的传统成本 Trace 数据和可视化对于开发人员和运维人员来说都是至关重要的故障排除工具,因为它们在定位问题边界方面非常有帮助。但是,由于传统上很难找到指标和痕迹之间的关联,团队已经将越来越多的标签添加到跨度中,并搜索各种组合。这种增加仪器和搜索的趋势需要增加基础设施投资来支持这种搜索。SkyWalking 的指标和追踪关联功能有助于降低索引和搜索该数据的成本。\n查找关联的 trace 在寻找 metric 和 trace 之间的关联时,我们处理的指标类型决定了它们与 trace 的关系。让我们回顾一下标准请求*率、错误和持续时间(RED)*指标,看看它是如何工作的。\n成功率指标 成功率由返回码、RPC 响应码或进程异常决定。当成功率下降时,在这个服务或 Pod 的 trace 中寻找错误是第一个寻找线索的地方。\n图 1:SkyWalking 9.3.0 仪表板的成功率图表,带有在特定时间查看相关 trace 的选项。\n从成功率的峰值向下探索,SkyWalking 列出了在这一特定分钟内收集的所有 trace 及其错误状态(图 2):\n图 2:SkyWalking 显示具有错误状态的相关追踪。\n可以从 trace 中找到对 /test 的请求,并且 span 的标记指示 HTTP 请求的 404 响应代码。\n图 3:显示 URI 不存在的 http://frontend/test 请求的详细视图。\n通过查看 trace 数据,很明显成功率的下降是由对不存在的 URI 的请求引起的。\n平均响应时间 平均响应时间指标提供了服务性能的一般概览。当平均响应时间不稳定时,这通常意味着系统面临严重的性能影响。\n图 4:SkyWalking 用于搜索相关 trace 的查询 UI,显示超过特定持续时间阈值的请求的 trace。\n当您从该指标向下探索时,该查询条件(图 4)将揭示该特定分钟内服务的最慢 trace。请注意,至少 168ms 作为条件自动添加,以避免扫描数据库中的大量行。\nApdex Apdex(应用程序性能指数)是根据设定的阈值衡量响应时间的指标。它测量令人满意的响应时间与不令人满意的响应时间的比率(图 5)。响应时间是从资产请求到完成交付回请求者的时间。\n图 5:Apdex 公式\n用户定义响应时间容忍阈值 T。在 T 或更短时间内处理的所有响应都使用户满意。\n例如,如果 T 为 1.2 秒,响应在 0.5 秒内完成,则用户会感到满意。所有大于 1.2 秒的响应都会让用户不满意。超过 4.8 秒的响应会让用户感到沮丧。\n当 Apdex 分数下降时,我们需要从两个角度寻找相关的 trace:慢速和错误状态的 trace。SkyWalking 的新相关追踪功能提供了一种直接从 Apdex 图表查看两者(图 6)的快速方法。\n图 6:显示 Apdex 图中的慢速 trace 和错误状态 trace\n服务响应时间 百分位指标百分位图(图 7)提供 p50、p75、p90、p95 和 p99 延迟排名,以衡量服务性能的长尾问题。\n图 7:服务响应时间百分位图有助于突出服务性能的长尾问题。\n这个百分位数图显示了一个典型的长尾问题。P99 延迟比 P95 慢四倍。当我们使用关联时,我们会看到 P95 - P99 和 P99 - Infinity 之间具有延迟的 trace。\n造成这种长尾现象的请求 trace,就是从那里自动列出来的。\n图 8:用于根据延迟搜索 trace 的查询参数。\n是否有更多关联可用? SkyWalking 提供的不仅仅是 trace 和 metric 之间的关联,还可以帮助您找到可能的因果关系,避免大海捞针。\n目前,SkyWalking 9.3.0 提供了两种关联:metric-to-metric 关联和 event-to-metric 关联。\nMetric-to-metric 关联 仪表板上有许多指标 —— 这对于全面了解应用程序行为非常有用。在典型的性能问题中,多个指标的峰值会同时受到影响。但是,尝试关联所有这些图表中的峰值可能很困难……\n现在在 SkyWalking 9.3.0 中,当你点击一个图表的峰值时,弹出框可以让你看到相关的指标。\n图 9:SkyWalking 用于查看相关指标的选项。\n当您选择该选项时,所有关联的指标图表将在所有关联的图表中显示轴指针(垂直虚线),如图 10 所示。这使得将不同图表中的峰值相互关联起来变得更加容易。通常,这些相关的峰值具有相同的根本原因。\n图 10:轴指针(垂直虚线)显示多个指标图中峰值之间的关联。\nEvent-to-metric 关联 SkyWalking 提供了事件概念来关联可能受基础设施影响的服务性能,例如来自 Kubernetes 的新部署。或者,已通过警报或集成 AIOps 引擎检测到异常。\n事件到指标的关联也是自动的,它可以覆盖指标图上事件的时间范围(蓝色区域)。如果事件区域和峰值匹配,则很可能该事件覆盖了该异常。\n图 11:SkyWalking 的事件与指标关联视图。\nSkyWalking 使查找根本原因变得更加容易和快速 SkyWalking 现在可以轻松找到指标、事件和追踪之间的关联,最终可以确定根本原因并快速解决问题。我们在本文中讨论的关联在 SkyWalking 9.3.0 版本中开箱即用。\n图 12:只需单击圆点即可查看相关 trace 和 metric 关联。\n单击任何指标图上的点,如果该指标具有逻辑映射,您将看到一个查看相关 trace 弹出窗口。\n结论 在这篇博客中,我们了解了 metric 和 trace 之间新增的关联功能。有了这个新的可视化,现在可以更容易地找到关键 trace 来识别问题的根本原因。SkyWalking 中的关联可以更深入。从 metric 到 trace 的关联并不是诊断系统瓶颈的终点。在下一篇文章中,我们将介绍 eBPF 支持的追踪增强功能,您将看到与网络分析中的追踪跨度相关的 HTTP 请求和响应详细信息。敬请关注。\n","excerpt":"现代分布式应用程序工作的可观测性对于了解它们在各种条件下的行为方式以及在出现问题时进行故障排除和解决至关重要。追踪、指标和日志被视为可观测性堆栈的基本部分。Trace 是分布式系统执行的足迹, …","ref":"/zh/boost-root-cause-analysis-quickly-with-skywalking-new-trace-metrics-association-feature/","title":"SkyWalking 推出 trace-metric 关联功能助力快速根源问题排查"},{"body":"In cloud native applications, a request often needs to be processed through a series of APIs or backend services, some of which are parallel and some serial and located on different platforms or nodes. How do we determine the service paths and nodes a call goes through to help us troubleshoot the problem? This is where distributed tracing comes into play.\nThis article covers:\n How distributed tracing works How to choose distributed tracing software How to use distributed tracing in Istio How to view distributed tracing data using Bookinfo and SkyWalking as examples  Distributed Tracing Basics Distributed tracing is a method for tracing requests in a distributed system to help users better understand, control, and optimize distributed systems. There are two concepts used in distributed tracing: TraceID and SpanID. You can see them in Figure 1 below.\n TraceID is a globally unique ID that identifies the trace information of a request. All traces of a request belong to the same TraceID, and the TraceID remains constant throughout the trace of the request. SpanID is a locally unique ID that identifies a request’s trace information at a certain time. A request generates different SpanIDs at different periods, and SpanIDs are used to distinguish trace information for a request at different periods.  TraceID and SpanID are the basis of distributed tracing. They provide a uniform identifier for request tracing in distributed systems and facilitate users’ ability to query, manage, and analyze the trace information of requests.\nFigure 1: Trace and span\nThe following is the process of distributed tracing:\n When a system receives a request, the distributed tracing system assigns a TraceID to the request, which is used to chain together the entire chain of invocations. The distributed trace system generates a SpanID and ParentID for each service call within the system for the request, which is used to record the parent-child relationship of the call; a Span without a ParentID is used as the entry point of the call chain. TraceID and SpanID are to be passed during each service call. When viewing a distributed trace, query the full process of a particular request by TraceID.  How Istio Implements Distributed Tracing Istio’s distributed tracing is based on information collected by the Envoy proxy in the data plane. After a service request is intercepted by Envoy, Envoy adds tracing information as headers to the request forwarded to the destination workload. The following headers are relevant for distributed tracing:\n As TraceID: x-request-id Used to establish parent-child relationships for Span in the LightStep trace: x-ot-span-context\u0026lt;/li Used for Zipkin, also for Jaeger, SkyWalking, see b3-propagation:  x-b3-traceid x-b3-traceid x-b3-spanid x-b3-parentspanid x-b3-sampled x-b3-flags b3   For Datadog:  x-datadog-trace-id x-datadog-parent-id x-datadog-sampling-priority   For SkyWalking: sw8 For AWS X-Ray: x-amzn-trace-id  For more information on how to use these headers, please see the Envoy documentation.\nRegardless of the language of your application, Envoy will generate the appropriate tracing headers for you at the Ingress Gateway and forward these headers to the upstream cluster. However, in order to utilize the distributed tracing feature, you must modify your application code to attach the tracing headers to upstream requests. Since neither the service mesh nor the application can automatically propagate these headers, you can integrate the agent for distributed tracing into the application or manually propagate these headers in the application code itself. Once the tracing headers are propagated to all upstream requests, Envoy will send the tracing data to the tracer’s back-end processing, and then you can view the tracing data in the UI.\nFor example, look at the code of the Productpage service in the Bookinfo application. You can see that it integrates the Jaeger client library and synchronizes the header generated by Envoy with the HTTP requests to the Details and Reviews services in the getForwardHeaders (request) function.\ndef getForwardHeaders(request): headers = {} # Using Jaeger agent to get the x-b3-* headers span = get_current_span() carrier = {} tracer.inject( span_context=span.context, format=Format.HTTP_HEADERS, carrier=carrier) headers.update(carrier) # Dealing with the non x-b3-* header manually if \u0026#39;user\u0026#39; in session: headers[\u0026#39;end-user\u0026#39;] = session[\u0026#39;user\u0026#39;] incoming_headers = [ \u0026#39;x-request-id\u0026#39;, \u0026#39;x-ot-span-context\u0026#39;, \u0026#39;x-datadog-trace-id\u0026#39;, \u0026#39;x-datadog-parent-id\u0026#39;, \u0026#39;x-datadog-sampling-priority\u0026#39;, \u0026#39;traceparent\u0026#39;, \u0026#39;tracestate\u0026#39;, \u0026#39;x-cloud-trace-context\u0026#39;, \u0026#39;grpc-trace-bin\u0026#39;, \u0026#39;sw8\u0026#39;, \u0026#39;user-agent\u0026#39;, \u0026#39;cookie\u0026#39;, \u0026#39;authorization\u0026#39;, \u0026#39;jwt\u0026#39;, ] for ihdr in incoming_headers: val = request.headers.get(ihdr) if val is not None: headers[ihdr] = val return headers For more information, the Istio documentation provides answers to frequently asked questions about distributed tracing in Istio.\nHow to Choose A Distributed Tracing System Distributed tracing systems are similar in principle. There are many such systems on the market, such as Apache SkyWalking, Jaeger, Zipkin, Lightstep, Pinpoint, and so on. For our purposes here, we will choose three of them and compare them in several dimensions. Here are our inclusion criteria:\n They are currently the most popular open-source distributed tracing systems. All are based on the OpenTracing specification. They support integration with Istio and Envoy.     Items Apache SkyWalking Jaeger Zipkin     Implementations Language-based probes, service mesh probes, eBPF agent, third-party instrumental libraries (Zipkin currently supported) Language-based probes Language-based probes   Database ES, H2, MySQL, TiDB, Sharding-sphere, BanyanDB ES, MySQL, Cassandra, Memory ES, MySQL, Cassandra, Memory   Supported Languages Java, Rust, PHP, NodeJS, Go, Python, C++, .Net, Lua Java, Go, Python, NodeJS, C#, PHP, Ruby, C++ Java, Go, Python, NodeJS, C#, PHP, Ruby, C++   Initiator Personal Uber Twitter   Governance Apache Foundation CNCF CNCF   Version 9.3.0 1.39.0 2.23.19   Stars 20.9k 16.8k 15.8k    Although Apache SkyWalking’s agent does not support as many languages as Jaeger and Zipkin, SkyWalking’s implementation is richer and compatible with Jaeger and Zipkin trace data, and development is more active, so it is one of the best choices for building a telemetry platform.\nDemo Refer to the Istio documentation to install and configure Apache SkyWalking.\nEnvironment Description The following is the environment for our demo:\n Kubernetes 1.24.5 Istio 1.16 SkyWalking 9.1.0  Install Istio Before installing Istio, you can check the environment for any problems:\n$ istioctl experimental precheck ✔ No issues found when checking the cluster. Istio is safe to install or upgrade! To get started, check out https://istio.io/latest/docs/setup/getting-started/ Then install Istio and configure the destination for sending tracing messages as SkyWalking:\n# Initial Istio Operator istioctl operator init # Configure tracing destination kubectl apply -f - \u0026lt;\u0026lt;EOF apiVersion: install.istio.io/v1alpha1 kind: IstioOperator metadata: namespace: istio-system name: istio-with-skywalking spec: meshConfig: defaultProviders: tracing: - \u0026#34;skywalking\u0026#34; enableTracing: true extensionProviders: - name: \u0026#34;skywalking\u0026#34; skywalking: service: tracing.istio-system.svc.cluster.local port: 11800 EOF Deploy Apache SkyWalking Istio 1.16 supports distributed tracing using Apache SkyWalking. Install SkyWalking by executing the following code:\nkubectl apply -f https://raw.githubusercontent.com/istio/istio/release-1.16/samples/addons/extras/skywalking.yaml It will install the following components under the istio-system namespace:\n SkyWalking Observability Analysis Platform (OAP): Used to receive trace data, supports SkyWalking native data formats, Zipkin v1 and v2 and Jaeger format. UI: Used to query distributed trace data.  For more information about SkyWalking, please refer to the SkyWalking documentation.\nDeploy the Bookinfo Application Execute the following command to install the bookinfo application:\nkubectl label namespace default istio-injection=enabled kubectl apply -f samples/bookinfo/platform/kube/bookinfo.yaml kubectl apply -f samples/bookinfo/networking/bookinfo-gateway.yaml Launch the SkyWalking UI:\nistioctl dashboard skywalking Figure 2 shows all the services available in the bookinfo application:\nFigure 2: SkyWalking General Service page\nYou can also see information about instances, endpoints, topology, tracing, etc. For example, Figure 3 shows the service topology of the bookinfo application:\nFigure 3: Topology diagram of the Bookinfo application\nTracing views in SkyWalking can be displayed in a variety of formats, including list, tree, table, and statistics. See Figure 4:\nFigure 4: SkyWalking General Service trace supports multiple display formats\nTo facilitate our examination, set the sampling rate of the trace to 100%:\nkubectl apply -f - \u0026lt;\u0026lt;EOF apiVersion: telemetry.istio.io/v1alpha1 kind: Telemetry metadata: name: mesh-default namespace: istio-system spec: tracing: - randomSamplingPercentage: 100.00 EOF  Important: It’s generally not good practice to set the sampling rate to 100% in a production environment. To avoid the overhead of generating too many trace logs in production, please adjust the sampling strategy (sampling percentage).\n Uninstall After experimenting, uninstall Istio and SkyWalking by executing the following command.\nsamples/bookinfo/platform/kube/cleanup.sh istioctl unintall --purge kubectl delete namespace istio-system Understanding the Bookinfo Tracing Information Navigate to the General Service tab in the Apache SkyWalking UI, and you can see the trace information for the most recent istio-ingressgateway service, as shown in Figure 5. Click on each span to see the details.\nFigure 5: The table view shows the basic information about each span.\nSwitching to the list view, you can see the execution order and duration of each span, as shown in Figure 6:\nFigure 6: List display\nYou might want to know why such a straightforward application generates so much span data. Because after we inject the Envoy proxy into the pod, every request between services will be intercepted and processed by Envoy, as shown in Figure 7:\nFigure 7: Envoy intercepts requests to generate a span\nThe tracing process is shown in Figure 8:\nFigure 8: Trace of the Bookinfo application\nWe give each span a label with a serial number, and the time taken is indicated in parentheses. For illustration purposes, we have summarized all spans in the table below.\n   No. Endpoint Total Duration (ms) Component Duration (ms) Current Service Description     1 /productpage 190 0 istio-ingressgateway Envoy Outbound   2 /productpage 190 1 istio-ingressgateway Ingress -\u0026gt; Productpage network transmission   3 /productpage 189 1 productpage Envoy Inbound   4 /productpage 188 21 productpage Application internal processing   5 /details/0 8 1 productpage Envoy Outbound   6 /details/0 7 3 productpage Productpage -\u0026gt; Details network transmission   7 /details/0 4 0 details Envoy Inbound   8 /details/0 4 4 details Application internal processing   9 /reviews/0 159 0 productpage Envoy Outbound   10 /reviews/0 159 14 productpage Productpage -\u0026gt; Reviews network transmission   11 /reviews/0 145 1 reviews Envoy Inbound   12 /reviews/0 144 109 reviews Application internal processing   13 /ratings/0 35 2 reviews Envoy Outbound   14 /ratings/0 33 16 reviews Reviews -\u0026gt; Ratings network transmission   15 /ratings/0 17 1 ratings Envoy Inbound   16 /ratings/0 16 16 ratings Application internal processing    From the above information, it can be seen that:\n The total time consumed for this request is 190 ms. In Istio sidecar mode, each traffic flow in and out of the application container must pass through the Envoy proxy once, each time taking 0 to 2 ms. Network requests between Pods take between 1 and 16ms. This is because the data itself has errors and the start time of the Span is not necessarily equal to the end time of the parent Span. We can see that the most time-consuming part is the Reviews application, which takes 109 ms so that we can optimize it for that application.  Summary Distributed tracing is an indispensable tool for analyzing performance and troubleshooting modern distributed applications. In this tutorial, we’ve seen how, with just a few minor changes to your application code to propagate tracing headers, Istio makes distributed tracing simple to use. We’ve also reviewed Apache SkyWalking as one of the best distributed tracing systems that Istio supports. It is a fully functional platform for cloud native application analytics, with features such as metrics and log collection, alerting, Kubernetes monitoring, service mesh performance diagnosis using eBPF, and more.\n If you’re new to service mesh and Kubernetes security, we have a bunch of free online courses available at Tetrate Academy that will quickly get you up to speed with Istio and Envoy.\nIf you’re looking for a fast way to get to production with Istio, check out Tetrate Istio Distribution (TID). TID is Tetrate’s hardened, fully upstream Istio distribution, with FIPS-verified builds and support available. It’s a great way to get started with Istio knowing you have a trusted distribution to begin with, have an expert team supporting you, and also have the option to get to FIPS compliance quickly if you need to.\nOnce you have Istio up and running, you will probably need simpler ways to manage and secure your services beyond what’s available in Istio, that’s where Tetrate Service Bridge comes in. You can learn more about how Tetrate Service Bridge makes service mesh more secure, manageable, and resilient here, or contact us for a quick demo.\n","excerpt":"In cloud native applications, a request often needs to be processed through a series of APIs or …","ref":"/blog/how-to-use-skywalking-for-distributed-tracing-in-istio/","title":"How to Use SkyWalking for Distributed Tracing in Istio?"},{"body":"","excerpt":"","ref":"/tags/istio/","title":"Istio"},{"body":"","excerpt":"","ref":"/tags/service-mesh/","title":"Service Mesh"},{"body":"在云原生应用中,一次请求往往需要经过一系列的 API 或后台服务处理才能完成,这些服务有些是并行的,有些是串行的,而且位于不同的平台或节点。那么如何确定一次调用的经过的服务路径和节点以帮助我们进行问题排查?这时候就需要使用到分布式追踪。\n本文将向你介绍:\n 分布式追踪的原理 如何选择分布式追踪软件 在 Istio 中如何使用分布式追踪 以 Bookinfo 和 SkyWalking 为例说明如何查看分布式追踪数据  分布式追踪基础 分布式追踪是一种用来跟踪分布式系统中请求的方法,它可以帮助用户更好地理解、控制和优化分布式系统。分布式追踪中用到了两个概念:TraceID 和 SpanID。\n TraceID 是一个全局唯一的 ID,用来标识一个请求的追踪信息。一个请求的所有追踪信息都属于同一个 TraceID,TraceID 在整个请求的追踪过程中都是不变的; SpanID 是一个局部唯一的 ID,用来标识一个请求在某一时刻的追踪信息。一个请求在不同的时间段会产生不同的 SpanID,SpanID 用来区分一个请求在不同时间段的追踪信息;  TraceID 和 SpanID 是分布式追踪的基础,它们为分布式系统中请求的追踪提供了一个统一的标识,方便用户查询、管理和分析请求的追踪信息。\n下面是分布式追踪的过程:\n 当一个系统收到请求后,分布式追踪系统会为该请求分配一个 TraceID,用于串联起整个调用链; 分布式追踪系统会为该请求在系统内的每一次服务调用生成一个 SpanID 和 ParentID,用于记录调用的父子关系,没有 ParentID 的 Span 将作为调用链的入口; 每个服务调用过程中都要传递 TraceID 和 SpanID; 在查看分布式追踪时,通过 TraceID 查询某次请求的全过程;  Istio 如何实现分布式追踪 Istio 中的分布式追踪是基于数据平面中的 Envoy 代理实现的。服务请求在被劫持到 Envoy 中后,Envoy 在转发请求时会附加大量 Header,其中与分布式追踪相关的有:\n 作为 TraceID:x-request-id 用于在 LightStep 追踪系统中建立 Span 的父子关系:x-ot-span-context 用于 Zipkin,同时适用于 Jaeger、SkyWalking,详见 b3-propagation:  x-b3-traceid x-b3-spanid x-b3-parentspanid x-b3-sampled x-b3-flags b3   用于 Datadog:  x-datadog-trace-id x-datadog-parent-id x-datadog-sampling-priority   用于 SkyWalking:sw8 用于 AWS X-Ray:x-amzn-trace-id  关于这些 Header 的详细用法请参考 Envoy 文档 。\nEnvoy 会在 Ingress Gateway 中为你产生用于追踪的 Header,不论你的应用程序使用何种语言开发,Envoy 都会将这些 Header 转发到上游集群。但是,你还要对应用程序代码做一些小的修改,才能为使用分布式追踪功能。这是因为应用程序无法自动传播这些 Header,可以在程序中集成分布式追踪的 Agent,或者在代码中手动传播这些 Header。Envoy 会将追踪数据发送到 tracer 后端处理,然后就可以在 UI 中查看追踪数据了。\n例如在 Bookinfo 应用中的 Productpage 服务,如果你查看它的代码可以发现,其中集成了 Jaeger 客户端库,并在 getForwardHeaders (request) 方法中将 Envoy 生成的 Header 同步给对 Details 和 Reviews 服务的 HTTP 请求:\ndef getForwardHeaders(request): headers = {} # 使用 Jaeger agent 获取 x-b3-* header span = get_current_span() carrier = {} tracer.inject( span_context=span.context, format=Format.HTTP_HEADERS, carrier=carrier) headers.update(carrier) # 手动处理非 x-b3-* header if \u0026#39;user\u0026#39; in session: headers[\u0026#39;end-user\u0026#39;] = session[\u0026#39;user\u0026#39;] incoming_headers = [ \u0026#39;x-request-id\u0026#39;, \u0026#39;x-ot-span-context\u0026#39;, \u0026#39;x-datadog-trace-id\u0026#39;, \u0026#39;x-datadog-parent-id\u0026#39;, \u0026#39;x-datadog-sampling-priority\u0026#39;, \u0026#39;traceparent\u0026#39;, \u0026#39;tracestate\u0026#39;, \u0026#39;x-cloud-trace-context\u0026#39;, \u0026#39;grpc-trace-bin\u0026#39;, \u0026#39;sw8\u0026#39;, \u0026#39;user-agent\u0026#39;, \u0026#39;cookie\u0026#39;, \u0026#39;authorization\u0026#39;, \u0026#39;jwt\u0026#39;, ] for ihdr in incoming_headers: val = request.headers.get(ihdr) if val is not None: headers[ihdr] = val return headers 关于 Istio 中分布式追踪的常见问题请见 Istio 文档 。\n分布式追踪系统如何选择 分布式追踪系统的原理类似,市面上也有很多这样的系统,例如 Apache SkyWalking 、Jaeger 、Zipkin 、LightStep 、Pinpoint 等。我们将选择其中三个,从多个维度进行对比。之所以选择它们是因为:\n 它们是当前最流行的开源分布式追踪系统; 都是基于 OpenTracing 规范; 都支持与 Istio 及 Envoy 集成;     类别 Apache SkyWalking Jaeger Zipkin     实现方式 基于语言的探针、服务网格探针、eBPF agent、第三方指标库(当前支持 Zipkin) 基于语言的探针 基于语言的探针   数据存储 ES、H2、MySQL、TiDB、Sharding-sphere、BanyanDB ES、MySQL、Cassandra、内存 ES、MySQL、Cassandra、内存   支持语言 Java、Rust、PHP、NodeJS、Go、Python、C++、.NET、Lua Java、Go、Python、NodeJS、C#、PHP、Ruby、C++ Java、Go、Python、NodeJS、C#、PHP、Ruby、C++   发起者 个人 Uber Twitter   治理方式 Apache Foundation CNCF CNCF   版本 9.3.0 1.39.0 2.23.19   Star 数量 20.9k 16.8k 15.8k    分布式追踪系统对比表(数据截止时间 2022-12-07)\n虽然 Apache SkyWalking 的 Agent 支持的语言没有 Jaeger 和 Zipkin 多,但是 SkyWalking 的实现方式更丰富,并且与 Jaeger、Zipkin 的追踪数据兼容,开发更为活跃,且为国人开发,中文资料丰富,是构建遥测平台的最佳选择之一。\n实验 参考 Istio 文档 来安装和配置 Apache SkyWalking。\n环境说明 以下是我们实验的环境:\n Kubernetes 1.24.5 Istio 1.16 SkyWalking 9.1.0  安装 Istio 安装之前可以先检查下环境是否有问题:\n$ istioctl experimental precheck ✔ No issues found when checking the cluster. Istio is safe to install or upgrade! To get started, check out https://istio.io/latest/docs/setup/getting-started/ 然后安装 Istio 同时配置发送追踪信息的目的地为 SkyWalking:\n# 初始化 Istio Operator istioctl operator init # 安装 Istio 并配置使用 SkyWalking kubectl apply -f - \u0026lt;\u0026lt;EOF apiVersion: install.istio.io/v1alpha1 kind: IstioOperator metadata: namespace: istio-system name: istio-with-skywalking spec: meshConfig: defaultProviders: tracing: - \u0026#34;skywalking\u0026#34; enableTracing: true extensionProviders: - name: \u0026#34;skywalking\u0026#34; skywalking: service: tracing.istio-system.svc.cluster.local port: 11800 EOF 部署 Apache SkyWalking Istio 1.16 支持使用 Apache SkyWalking 进行分布式追踪,执行下面的代码安装 SkyWalking:\nkubectl apply -f https://raw.githubusercontent.com/istio/istio/release-1.16/samples/addons/extras/skywalking.yaml 它将在 istio-system 命名空间下安装:\n SkyWalking OAP (Observability Analysis Platform) :用于接收追踪数据,支持 SkyWalking 原生数据格式,Zipkin v1 和 v2 以及 Jaeger 格式。 UI :用于查询分布式追踪数据。  关于 SkyWalking 的详细信息请参考 SkyWalking 文档 。\n部署 Bookinfo 应用 执行下面的命令安装 bookinfo 示例:\nkubectl label namespace default istio-injection=enabled kubectl apply -f samples/bookinfo/platform/kube/bookinfo.yaml kubectl apply -f samples/bookinfo/networking/bookinfo-gateway.yaml 打开 SkyWalking UI:\nistioctl dashboard skywalking SkyWalking 的 General Service 页面展示了 bookinfo 应用中的所有服务。\n你还可以看到实例、端点、拓扑、追踪等信息。例如下图展示了 bookinfo 应用的服务拓扑。\nSkyWalking 的追踪视图有多种显示形式,如列表、树形、表格和统计。\nSkyWalking 通用服务追踪支持多种显示样式\n为了方便我们检查,将追踪的采样率设置为 100%:\nkubectl apply -f - \u0026lt;\u0026lt;EOF apiVersion: telemetry.istio.io/v1alpha1 kind: Telemetry metadata: name: mesh-default namespace: istio-system spec: tracing: - randomSamplingPercentage: 100.00 EOF 卸载 在实验完后,执行下面的命令卸载 Istio 和 SkyWalking:\nsamples/bookinfo/platform/kube/cleanup.sh istioctl unintall --purge kubectl delete namespace istio-system Bookinfo demo 追踪信息说明 在 Apache SkyWalking UI 中导航到 General Service 分页,查看最近的 istio-ingressgateway 服务的追踪信息,表视图如下所示。图中展示了此次请求所有 Span 的基本信息,点击每个 Span 可以查看详细信息。\n切换为列表视图,可以看到每个 Span 的执行顺序及持续时间,如下图所示。\n你可能会感到困惑,为什么这么简单的一个应用会产生如此多的 Span 信息?因为我们为 Pod 注入了 Envoy 代理之后,每个服务间的请求都会被 Envoy 拦截和处理,如下图所示。\n整个追踪流程如下图所示。\n图中给每一个 Span 标记了序号,并在括号里注明了耗时。为了便于说明我们将所有 Span 汇总在下面的表格中。\n   序号 方法 总耗时(ms) 组件耗时(ms) 当前服务 说明     1 /productpage 190 0 istio-ingressgateway Envoy Outbound   2 /productpage 190 1 istio-ingressgateway Ingress -\u0026gt; Productpage 网络传输   3 /productpage 189 1 productpage Envoy Inbound   4 /productpage 188 21 productpage 应用内部处理   5 /details/0 8 1 productpage Envoy Outbound   6 /details/0 7 3 productpage Productpage -\u0026gt; Details 网络传输   7 /details/0 4 0 details Envoy Inbound   8 /details/0 4 4 details 应用内部   9 /reviews/0 159 0 productpage Envoy Outbound   10 /reviews/0 159 14 productpage Productpage -\u0026gt; Reviews 网络传输   11 /reviews/0 145 1 reviews Envoy Inbound   12 /reviews/0 144 109 reviews 应用内部处理   13 /ratings/0 35 2 reviews Envoy Outbound   14 /ratings/0 33 16 reviews Reviews -\u0026gt; Ratings 网络传输   15 /ratings/0 17 1 ratings Envoy Inbound   16 /ratings/0 16 16 ratings 应用内部处理    从以上信息可以发现:\n 本次请求总耗时 190ms; 在 Istio sidecar 模式下,每次流量在进出应用容器时都需要经过一次 Envoy 代理,每次耗时在 0 到 2 ms; 在 Pod 间的网络请求耗时在 1 到 16ms 之间; 将耗时做多的调用链 Ingress Gateway -\u0026gt; Productpage -\u0026gt; Reviews -\u0026gt; Ratings 上的所有耗时累计 182 ms,小于请求总耗时 190ms,这是因为数据本身有误差,以及 Span 的开始时间并不一定等于父 Span 的结束时间,如果你在 SkyWalking 的追踪页面,选择「列表」样式查看追踪数据(见图 2)可以更直观的发现这个问题; 我们可以查看到最耗时的部分是 Reviews 应用,耗时 109ms,因此我们可以针对该应用进行优化;  总结 只要对应用代码稍作修改就可以在 Istio 很方便的使用分布式追踪功能。在 Istio 支持的众多分布式追踪系统中,Apache SkyWalking 是其中的佼佼者。它不仅支持分布式追踪,还支持指标和日志收集、报警、Kubernetes 和服务网格监控,使用 eBPF 诊断服务网格性能 等功能,是一个功能完备的云原生应用分析平台。本文中为了方便演示,将追踪采样率设置为了 100%,在生产使用时请根据需要调整采样策略(采样百分比),防止产生过多的追踪日志。\n 如果您不熟悉服务网格和 Kubernetes 安全性,我们在 Tetrate Academy 提供了一系列免费在线课程,可以让您快速了解 Istio 和 Envoy。\n如果您正在寻找一种快速将 Istio 投入生产的方法,请查看 Tetrate Istio Distribution (TID)。TID 是 Tetrate 的强化、完全上游的 Istio 发行版,具有经过 FIPS 验证的构建和支持。这是开始使用 Istio 的好方法,因为您知道您有一个值得信赖的发行版,有一个支持您的专家团队,并且如果需要,还可以选择快速获得 FIPS 合规性。\n一旦启动并运行 Istio,您可能需要更简单的方法来管理和保护您的服务,而不仅仅是 Istio 中可用的方法,这就是 Tetrate Service Bridge 的用武之地。您可以在这里详细了解 Tetrate Service Bridge 如何使服务网格更安全、更易于管理和弹性,或联系我们进行快速演示。\n","excerpt":"在云原生应用中,一次请求往往需要经过一系列的 API 或后台服务处理才能完成,这些服务有些是并行的,有些是串行的,而且位于不同的平台或节点。那么如何确定一次调用的经过的服务路径和节点以帮助我们进行问题 …","ref":"/zh/how-to-use-skywalking-for-distributed-tracing-in-istio/","title":"如何在 Istio 中使用 SkyWalking 进行分布式追踪?"},{"body":"","excerpt":"","ref":"/tags/aurora/","title":"Aurora"},{"body":"","excerpt":"","ref":"/tags/aws/","title":"AWS"},{"body":"","excerpt":"","ref":"/tags/demo/","title":"Demo"},{"body":"Introduction Apache SkyWalking is an open source APM tool for monitoring and troubleshooting distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. It provides distributed tracing, service mesh observability, metric aggregation and visualization, and alarm.\nIn this article, I will introduce how to quickly set up Apache SkyWalking on AWS EKS and RDS/Aurora, as well as a couple of sample services, monitoring services to observe SkyWalking itself.\nPrerequisites  AWS account AWS CLI Terraform kubectl  We can use the AWS web console or CLI to create all resources needed in this tutorial, but it can be too tedious and hard to debug when something goes wrong. So in this artical I will use Terraform to create all AWS resources, deploy SkyWalking, sample services, and load generator services (Locust).\nArchitecture The demo architecture is as follows:\ngraph LR subgraph AWS subgraph EKS subgraph istio-system namespace direction TB OAP[[SkyWalking OAP]] UI[[SkyWalking UI]] Istio[[istiod]] end subgraph sample namespace Service0[[Service0]] Service1[[Service1]] ServiceN[[Service ...]] end subgraph locust namespace LocustMaster[[Locust Master]] LocustWorkers0[[Locust Worker 0]] LocustWorkers1[[Locust Worker 1]] LocustWorkersN[[Locust Worker ...]] end end RDS[[RDS/Aurora]] end OAP --\u0026gt; RDS Service0 -. telemetry data -.-\u0026gt; OAP Service1 -. telemetry data -.-\u0026gt; OAP ServiceN -. telemetry data -.-\u0026gt; OAP UI --query--\u0026gt; OAP LocustWorkers0 -- traffic --\u0026gt; Service0 LocustWorkers1 -- traffic --\u0026gt; Service0 LocustWorkersN -- traffic --\u0026gt; Service0 Service0 --\u0026gt; Service1 --\u0026gt; ServiceN LocustMaster --\u0026gt; LocustWorkers0 LocustMaster --\u0026gt; LocustWorkers1 LocustMaster --\u0026gt; LocustWorkersN User --\u0026gt; LocustMaster As shown in the architecture diagram, we need to create the following AWS resources:\n EKS cluster RDS instance or Aurora cluster  Sounds simple, but there are a lot of things behind the scenes, such as VPC, subnets, security groups, etc. You have to configure them correctly to make sure the EKS cluster can connect to RDS instance/Aurora cluster otherwise the SkyWalking won\u0026rsquo;t work. Luckily, Terraform can help us to create and destroy all these resources automatically.\nI have created a Terraform module to create all AWS resources needed in this tutorial, you can find it in the GitHub repository.\nCreate AWS resources First, we need to clone the GitHub repository and cd into the folder:\ngit clone https://github.com/kezhenxu94/oap-load-test.git Then, we need to create a file named terraform.tfvars to specify the AWS region and other variables:\ncat \u0026gt; terraform.tfvars \u0026lt;\u0026lt;EOF aws_access_key = \u0026#34;\u0026#34; aws_secret_key = \u0026#34;\u0026#34; cluster_name = \u0026#34;skywalking-on-aws\u0026#34; region = \u0026#34;ap-east-1\u0026#34; db_type = \u0026#34;rds-postgresql\u0026#34; EOF If you have already configured the AWS CLI, you can skip the aws_access_key and aws_secret_key variables. To install SkyWalking with RDS postgresql, set the db_type to rds-postgresql, to install SkyWalking with Aurora postgresql, set the db_type to aurora-postgresql.\nThere are a lot of other variables you can configure, such as tags, sample services count, replicas, etc., you can find them in the variables.tf.\nThen, we can run the following commands to initialize the Terraform module and download the required providers, then create all AWS resources:\nterraform init terraform apply -var-file=terraform.tfvars Type yes to confirm the creation of all AWS resources, or add the -auto-approve flag to the terraform apply to skip the confirmation:\nterraform apply -var-file=terraform.tfvars -auto-approve Now what you need to do is to wait for the creation of all AWS resources to complete, it may take a few minutes. You can check the progress of the creation in the AWS web console, and check the deployment progress of the services inside the EKS cluster.\nGenerate traffic Besides creating necessary AWS resources, the Terraform module also deploys SkyWalking, sample services, and Locust load generator services to the EKS cluster.\nYou can access the Locust web UI to generate traffic to the sample services:\nopen http://$(kubectl get svc -n locust -l app=locust-master -o jsonpath=\u0026#39;{.items[0].status.loadBalancer.ingress[0].hostname}\u0026#39;):8089 The command opens the browser to the Locust web UI, you can configure the number of users and hatch rate to generate traffic.\nObserve SkyWalking You can access the SkyWalking web UI to observe the sample services.\nFirst you need to forward the SkyWalking UI port to local\nkubectl -n istio-system port-forward $(kubectl -n istio-system get pod -l app=skywalking -l component=ui -o name) 8080:8080 And then open the browser to http://localhost:8080 to access the SkyWalking web UI.\nObserve RDS/Aurora You can also access the RDS/Aurora web console to observe the performance of RDS/Aurora instance/Aurora cluste.\nTest Results Test 1: SkyWalking with EKS and RDS PostgreSQL Service Traffic RDS Performance SkyWalking Performance Test 2: SkyWalking with EKS and Aurora PostgreSQL Service Traffic RDS Performance SkyWalking Performance Clean up When you are done with the demo, you can run the following command to destroy all AWS resources:\nterraform destroy -var-file=terraform.tfvars -auto-approve ","excerpt":"Introduction Apache SkyWalking is an open source APM tool for monitoring and troubleshooting …","ref":"/blog/2022-12-13-how-to-run-apache-skywalking-on-aws-eks-rds/","title":"How to run Apache SkyWalking on AWS EKS and RDS/Aurora"},{"body":"","excerpt":"","ref":"/tags/observability/","title":"Observability"},{"body":"","excerpt":"","ref":"/tags/rds/","title":"RDS"},{"body":"","excerpt":"","ref":"/tags/skywalking/","title":"SkyWalking"},{"body":"介绍 Apache SkyWalking 是一个开源的 APM 工具,用于监控分布式系统和排除故障,特别是为微服务、云原生和基于容器(Docker、Kubernetes、Mesos)的架构而设计。它提供分布式跟踪、服务网格可观测性、指标聚合和可视化以及警报。\n在本文中,我将介绍如何在 AWS EKS 和 RDS/Aurora 上快速设置 Apache SkyWalking,以及几个示例服务,监控服务以观察 SkyWalking 本身。\n先决条件  AWS 账号 AWS CLI Terraform kubectl  我们可以使用 AWS Web 控制台或 CLI 来创建本教程所需的所有资源,但是当出现问题时,它可能过于繁琐且难以调试。因此,在本文中,我将使用 Terraform 创建所有 AWS 资源、部署 SkyWalking、示例服务和负载生成器服务 (Locust)。\n架构 演示架构如下:\ngraph LR subgraph AWS subgraph EKS subgraph istio-system namespace direction TB OAP[[SkyWalking OAP]] UI[[SkyWalking UI]] Istio[[istiod]] end subgraph sample namespace Service0[[Service0]] Service1[[Service1]] ServiceN[[Service ...]] end subgraph locust namespace LocustMaster[[Locust Master]] LocustWorkers0[[Locust Worker 0]] LocustWorkers1[[Locust Worker 1]] LocustWorkersN[[Locust Worker ...]] end end RDS[[RDS/Aurora]] end OAP --\u0026gt; RDS Service0 -. telemetry data -.-\u0026gt; OAP Service1 -. telemetry data -.-\u0026gt; OAP ServiceN -. telemetry data -.-\u0026gt; OAP UI --query--\u0026gt; OAP LocustWorkers0 -- traffic --\u0026gt; Service0 LocustWorkers1 -- traffic --\u0026gt; Service0 LocustWorkersN -- traffic --\u0026gt; Service0 Service0 --\u0026gt; Service1 --\u0026gt; ServiceN LocustMaster --\u0026gt; LocustWorkers0 LocustMaster --\u0026gt; LocustWorkers1 LocustMaster --\u0026gt; LocustWorkersN User --\u0026gt; LocustMaster 如架构图所示,我们需要创建以下 AWS 资源:\n EKS 集群 RDS 实例或 Aurora 集群  听起来很简单,但背后有很多东西,比如 VPC、子网、安全组等。你必须正确配置它们以确保 EKS 集群可以连接到 RDS 实例 / Aurora 集群,否则 SkyWalking 不会不工作。幸运的是,Terraform 可以帮助我们自动创建和销毁所有这些资源。\n我创建了一个 Terraform 模块来创建本教程所需的所有 AWS 资源,您可以在 GitHub 存储库中找到它。\n创建 AWS 资源 首先,我们需要将 GitHub 存储库克隆 cd 到文件夹中:\ngit clone https://github.com/kezhenxu94/oap-load-test.git 然后,我们需要创建一个文件 terraform.tfvars 来指定 AWS 区域和其他变量:\ncat \u0026gt; terraform.tfvars \u0026lt;\u0026lt;EOF aws_access_key = \u0026#34;\u0026#34; aws_secret_key = \u0026#34;\u0026#34; cluster_name = \u0026#34;skywalking-on-aws\u0026#34; region = \u0026#34;ap-east-1\u0026#34; db_type = \u0026#34;rds-postgresql\u0026#34; EOF 如果您已经配置了 AWS CLI,则可以跳过 aws_access_key 和 aws_secret_key 变量。要使用 RDS postgresql 安装 SkyWalking,请将 db_type 设置为 rds-postgresql,要使用 Aurora postgresql 安装 SkyWalking,请将 db_type 设置为 aurora-postgresql。\n您可以配置许多其他变量,例如标签、示例服务计数、副本等,您可以在 variables.tf 中找到它们。\n然后,我们可以运行以下命令来初始化 Terraform 模块并下载所需的提供程序,然后创建所有 AWS 资源:\nterraform init terraform apply -var-file=terraform.tfvars 键入 yes 以确认所有 AWS 资源的创建,或将标志 -auto-approve 添加到 terraform apply 以跳过确认:\nterraform apply -var-file=terraform.tfvars -auto-approve 现在你需要做的就是等待所有 AWS 资源的创建完成,这可能需要几分钟的时间。您可以在 AWS Web 控制台查看创建进度,也可以查看 EKS 集群内部服务的部署进度。\n产生流量 除了创建必要的 AWS 资源外,Terraform 模块还将 SkyWalking、示例服务和 Locust 负载生成器服务部署到 EKS 集群。\n您可以访问 Locust Web UI 以生成到示例服务的流量:\nopen http://$(kubectl get svc -n locust -l app=locust-master -o jsonpath=\u0026#39;{.items[0].status.loadBalancer.ingress[0].hostname}\u0026#39;):8089 该命令将浏览器打开到 Locust web UI,您可以配置用户数量和孵化率以生成流量。\n观察 SkyWalking 您可以访问 SkyWalking Web UI 来观察示例服务。\n首先需要将 SkyWalking UI 端口转发到本地:\nkubectl -n istio-system port-forward $(kubectl -n istio-system get pod -l app=skywalking -l component=ui -o name) 8080:8080 然后在浏览器中打开 http://localhost:8080 访问 SkyWalking web UI。\n观察 RDS/Aurora 您也可以访问 RDS/Aurora web 控制台,观察 RDS/Aurora 实例 / Aurora 集群的性能。\n试验结果 测试 1:使用 EKS 和 RDS PostgreSQL 的 SkyWalking 服务流量 RDS 性能 SkyWalking 性能 测试 2:使用 EKS 和 Aurora PostgreSQL 的 SkyWalking 服务流量 RDS 性能 SkyWalking 性能 清理 完成演示后,您可以运行以下命令销毁所有 AWS 资源:\nterraform destroy -var-file=terraform.tfvars -auto-approve ","excerpt":"介绍 Apache SkyWalking 是一个开源的 APM 工具,用于监控分布式系统和排除故障,特别是为微服务、云原生和基于容器(Docker、Kubernetes、Mesos)的架构而设计。它提 …","ref":"/zh/2022-12-13-how-to-run-apache-skywalking-on-aws-eks-rds/","title":"如何在 AWS EKS 和 RDS/Aurora 上运行 Apache SkyWalking"},{"body":"","excerpt":"","ref":"/tags/sharding-sphere/","title":"Sharding-Sphere"},{"body":"","excerpt":"","ref":"/tags/sharding-sphere-proxy/","title":"Sharding-Sphere-proxy"},{"body":"As an application performance monitoring tool for distributed systems, Apache SkyWalking observes metrics, logs, traces, and events in the service mesh.\nSkyWalking OAP’s dataflow processing architecture boasts high performance and is capable of dealing with massive data traffic in real-time. However, storing, updating, and querying massive amounts of data poses a great challenge to its backend storage system.\nBy default, SkyWalking provides storage methods including H2, OpenSearch, ElasticSearch, MySQL, TiDB, PostgreSQL, and BanyanDB. Among them, MySQL storage is suited to a single machine and table (MySQL cluster capability depends on your technology selection). Nevertheless, in the context of high-traffic business systems, the storage of monitoring data is put under great pressure and query performance is lowered.\nBased on MySQL storage, SkyWalking v9.3.0 provides a new storage method: MySQL-Sharding. It supports database and table sharding features thanks to ShardingSphere-Proxy, which is a mature solution for dealing with relational databases’ massive amounts of data.\n1. Architecture Deployment  SkyWalking will only interact with ShardingSphere-Proxy instead of directly connecting to the database. The connection exposed by each MySQL node is a data source managed by ShardingSphere-Proxy. ShardingSphere-Proxy will establish a virtual logical database based on the configuration and then carry out database and table sharding and routing according to the OAP provided data sharding rules. SkyWalking OAP creates data sharding rules and performs DDL and DML on a virtual logical database just like it does with MySQL.  2. Application Scenario Applicable to scenarios where MySQL is used for storage, but the single-table mode cannot meet the performance requirements created by business growth.\n3. How Does Data Sharding Work with SkyWalking? Data sharding defines the data Model in SkyWalking with the annotation @SQLDatabase.Sharding.\n@interface Sharding { ShardingAlgorithm shardingAlgorithm(); String dataSourceShardingColumn() default \u0026#34;\u0026#34;; String tableShardingColumn() default \u0026#34;\u0026#34;; } Note:\n shardingAlgorithm: Table sharding algorithm dataSourceShardingColumn: Database sharding key tableShardingColumn: Table sharding key\n SkyWalking selects database sharding key, table sharding key and table sharding algorithm based on @SQLDatabase.Sharding, in order to dynamically generate sharding rules for each table. Next, it performs rule definition by operating ShardingSphere-Proxy via DistSQL. ShardingSphere-Proxy carries out data sharding based on the rule definition.\n3.1 Database Sharding Method SkyWalking adopts a unified method to carry out database sharding. The number of databases that need to be sharded requires modulo by the hash value of the database sharding key, which should be the numeric suffix of the routing target database. Therefore, the routing target database is:\nds_{dataSourceShardingColumn.hashcode() % dataSourceList.size()} For example, we now have dataSourceList = ds_0…ds_n. If {dataSourceShardingColumn.hashcode() % dataSourceList.size() = 2}, all the data will be routed to the data source node ds_2.\n3.2 Table Sharding Method The table sharding algorithm mainly shards according to the data owing to the TTL mechanism. According to TTL, there will be one sharding table per day:\n{tableName = logicTableName_timeSeries (data)} To ensure that data within the TTL can be written and queried, the time series will generate the current date:\n{timeSeries = currentDate - TTL +1...currentDate + 1} For example, if TTL=3 and currentDate=20220907, sharding tables will be: logicTableName_20220905 logicTableName_20220906 logicTableName_20220907 logicTableName_20220908\nSkyWalking provides table sharding algorithms for different data models:\n   Algorithm Name Sharding Description Time Precision Requirements for Sharding Key Typical Application Data Model     NO_SHARDING No table sharding and single-table mode is maintained. N/A Data model with a small amount of data and no need for sharding.   TIME_RELATIVE_ID_SHARDING_ALGORITHM Shard by day using time_bucket in the ID column. time_bucket can be accurate to seconds, minutes, hours, or days in the same table. Various metrics.   TIME_SEC_RANGE_SHARDING_ALGORITHM Shard by day using time_bucket column. time_bucket must be accurate to seconds. SegmentRecordLogRecord, etc.   TIME_MIN_RANGE_SHARDING_ALGORITHM Shard by day using time_bucket column. time_bucket must be accurate to minutes. EndpointTraffic   TIME_BUCKET_SHARDING_ALGORITHM Shard by day using time_bucket column. time_bucket can be accurate to seconds, minutes, hours, and days in the same table. Service, Instance, Endpoint and other call relations such as ServiceRelationServerSideMetrics    4. TTL Mechanism   For sharding tables, delete the physical table deadline \u0026gt;= timeSeries according to TTL.\n{deadline = new DateTime().plusDays(-ttl)}   TTL timer will delete the expired tables according to the current date while updating sharding rules according to the new date and informing ShardingSphere-Proxy to create new sharding tables.\n  For a single table, use the previous method and delete the row record of deadline \u0026gt;=time_bucket.\n  5. Examples of Sharding Data Storage Next, we’ll take segment (Record type) and service_resp_time (Metrics type) as examples to illustrate the data storage logic and physical distribution. Here, imagine MySQL has two nodes ds_0 and ds_1.\nNote:\n The following storage table structure is just a simplified version as an example, and does not represent the real SkyWalking table structure.\n 5.1 segment The sharding configuration is as follows:\n@SQLDatabase.Sharding(shardingAlgorithm = ShardingAlgorithm.TIME_SEC_RANGE_SHARDING_ALGORITHM, dataSourceShardingColumn = service_id, tableShardingColumn = time_bucket) The logical database, table structures and actual ones are as follows:\n5.2 service_resp_time The sharding configuration is as follows:\n@SQLDatabase.Sharding(shardingAlgorithm = ShardingAlgorithm.TIME_RELATIVE_ID_SHARDING_ALGORITHM, tableShardingColumn = id, dataSourceShardingColumn = entity_id) The logical database and table structures and actual ones are as follows:\n6. How to Use ShardingSphere-Proxy? 6.1 Manual Deployment Here we take the deployment of a single-node SkyWalking OAP and ShardingSphere-Proxy 5.1.2 as an example. Please refer to the relevant documentation for the cluster deployment.\n Prepare the MySQL cluster. Deploy, install and configure ShardingSphere-Proxy:    conf/server.yaml and props.proxy-hint-enabled must be true. Refer to the link for the complete configuration.\n  conf/config-sharding.yaml configures logical database and dataSources list. The dataSource name must be prefixed with ds_ and start with ds_0. For details about the configuration, please refer to this page.\n   Deploy, install and configure SkyWalking OAP:    Set up OAP environment variables: ${SW_STORAGE:mysql-sharding},\n  Configure the connection information based on the actual deployment: ${SW_JDBC_URL} ${SW_DATA_SOURCE_USER} ${SW_DATA_SOURCE_PASSWORD}\n  Note:\n Connection information must correspond to ShardingSphere-Proxy virtual database.\n Configure the data source name configured by conf/config-sharding.yaml in ShardingSphere-Proxy to ${SW_JDBC_SHARDING_DATA_SOURCES} and separate names with commas.   Start the MySQL cluster. Start ShardingSphere-Proxy. Start SkyWalking OAP.  6.2 Running Demo with Docker Our GitHub repository provides a complete and operational demo based on Docker, allowing you to quickly grasp the operation’s effectiveness. The deployment includes the following:\n One OAP service. The TTL of Metrics and Record data set to 2 days. One sharding-proxy service with version 5.1.2. Its external port is 13307 and the logical database name is swtest. Two MySQL services. Their external ports are 3306 and 3307 respectively and they are configured as ds_0 and ds_1 in sharding-proxy’s conf/config-sharding.yaml. One provider service (simulated business programs used to verify trace and metrics and other data). Its external port is 9090. One consumer service (simulated business programs used to verify trace and metrics and other data). Its external port is 9092.  Download the demo program locally and run it directly in the directory skywalking-mysql-sharding-demo.\ndocker-compose up -d Note:\n The first startup may take some time to pull images and create all the tables.\n Once all the services are started, database tools can be used to check the creation of sharding-proxy logical tables and the actual physical sharding table in the two MySQL databases. Additionally, you can also connect the sharding-proxy logical database to view the data query routing. For example:\nPREVIEW SELECT * FROM SEGMENT The result is as follows:\nThe simulated business program provided by the demo can simulate business requests by requesting the consumer service to verify various types of data distribution:\ncurl http://127.0.0.1:9092/info 7. Conclusion In this blog, we introduced SkyWalking’s new storage feature, MySQL sharding, which leverage ShardingSphere-Proxy and covered details of its deployment architecture, application scenarios, sharding logic, and TTL mechanism. We’ve also provided sample data and deployment steps to help get started.\nSkyWalking offers a variety of storage options to fit many use cases. If you need a solution to store large volumes of telemetry data in a relational database, the new MySQL sharding feature is worth a look. For more information on the SkyWalking 9.3.0 release and where to get it, check out the release notes.\n","excerpt":"As an application performance monitoring tool for distributed systems, Apache SkyWalking observes …","ref":"/blog/skywalkings-new-storage-feature-based-on-shardingsphere-proxy-mysql-sharding/","title":"SkyWalking's New Storage Feature Based on ShardingSphere-Proxy: MySQL-Sharding"},{"body":"SkyWalking NodeJS 0.6.0 is released. Go to downloads page to find release tars.\n Add missing build doc by @kezhenxu94 in https://github.com/apache/skywalking-nodejs/pull/92 Fix invalid url error in axios plugin by @kezhenxu94 in https://github.com/apache/skywalking-nodejs/pull/93 Ignore no requests if ignoreSuffix is empty by @michaelzangl in https://github.com/apache/skywalking-nodejs/pull/94 Escape HTTP method in regexp by @michaelzangl in https://github.com/apache/skywalking-nodejs/pull/95 docs: grammar improvements by @BFergerson in https://github.com/apache/skywalking-nodejs/pull/97 fix: entry span url in endponts using Express middleware/router objects by @BFergerson in https://github.com/apache/skywalking-nodejs/pull/96 chore: use openapi format for endpoint uris by @BFergerson in https://github.com/apache/skywalking-nodejs/pull/98 AWS DynamoDB, Lambda, SQS and SNS plugins, webpack by @tom-pytel in https://github.com/apache/skywalking-nodejs/pull/100 Fix nits by @wu-sheng in https://github.com/apache/skywalking-nodejs/pull/101 Update AxiosPlugin for v1.0+ by @tom-pytel in https://github.com/apache/skywalking-nodejs/pull/102  ","excerpt":"SkyWalking NodeJS 0.6.0 is released. Go to downloads page to find release tars.\n Add missing build …","ref":"/events/release-apache-skywalking-nodejs-0-6-0/","title":"Release Apache SkyWalking for NodeJS 0.6.0"},{"body":"SkyWalking 9.3.0 is released. Go to downloads page to find release tars.\nMetrics Association    Dashboard Pop-up Trace Query          APISIX Dashboard Use Sharding MySQL as the Database Virtual Cache Performance Virtual MQ Performance Project  Bump up the embedded swctl version in OAP Docker image.  OAP Server  Add component ID(133) for impala JDBC Java agent plugin and component ID(134) for impala server. Use prepareStatement in H2SQLExecutor#getByIDs.(No function change). Bump up snakeyaml to 1.32 for fixing CVE. Fix DurationUtils.convertToTimeBucket missed verify date format. Enhance LAL to support converting LogData to DatabaseSlowStatement. [Breaking Change] Change the LAL script format(Add layer property). Adapt ElasticSearch 8.1+, migrate from removed APIs to recommended APIs. Support monitoring MySQL slow SQLs. Support analyzing cache related spans to provide metrics and slow commands for cache services from client side Optimize virtual database, fix dynamic config watcher NPE when default value is null Remove physical index existing check and keep template existing check only to avoid meaningless retry wait in no-init mode. Make sure instance list ordered in TTL processor to avoid TTL timer never runs. Support monitoring PostgreSQL slow SQLs. [Breaking Change] Support sharding MySQL database instances and tables by Shardingsphere-Proxy. SQL-Database requires removing tables log_tag/segment_tag/zipkin_query before OAP starts, if bump up from previous releases. Fix meter functions avgHistogram, avgHistogramPercentile, avgLabeled, sumHistogram having data conflict when downsampling. Do sorting readLabeledMetricsValues result forcedly in case the storage(database) doesn\u0026rsquo;t return data consistent with the parameter list. Fix the wrong watch semantics in Kubernetes watchers, which causes heavy traffic to API server in some Kubernetes clusters, we should use Get State and Start at Most Recent semantic instead of Start at Exact because we don\u0026rsquo;t need the changing history events, see https://kubernetes.io/docs/reference/using-api/api-concepts/#semantics-for-watch. Unify query services and DAOs codes time range condition to Duration. [Breaking Change]: Remove prometheus-fetcher plugin, please use OpenTelemetry to scrape Prometheus metrics and set up SkyWalking OpenTelemetry receiver instead. BugFix: histogram metrics sent to MAL should be treated as OpenTelemetry style, not Prometheus style: (-infinity, explicit_bounds[i]] for i == 0 (explicit_bounds[i-1], explicit_bounds[i]] for 0 \u0026lt; i \u0026lt; size(explicit_bounds) (explicit_bounds[i-1], +infinity) for i == size(explicit_bounds)  Support Golang runtime metrics analysis. Add APISIX metrics monitoring Support skywalking-client-js report empty service version and page path , set default version as latest and default page path as /(root). Fix the error fetching data (/browser_app_page_pv0) : Can't split endpoint id into 2 parts. [Breaking Change] Limit the max length of trace/log/alarm tag\u0026rsquo;s key=value, set the max length of column tags in tableslog_tag/segment_tag/alarm_record_tag and column query in zipkin_query and column tag_value in tag_autocomplete to 256. SQL-Database requires altering these columns' length or removing these tables before OAP starts, if bump up from previous releases. Optimize the creation conditions of profiling task. Lazy load the Kubernetes metadata and switch from event-driven to polling. Previously we set up watchers to watch the Kubernetes metadata changes, this is perfect when there are deployments changes and SkyWalking can react to the changes in real time. However when the cluster has many events (such as in large cluster or some special Kubernetes engine like OpenShift), the requests sent from SkyWalking becomes unpredictable, i.e. SkyWalking might send massive requests to Kubernetes API server, causing heavy load to the API server. This PR switches from the watcher mechanism to polling mechanism, SkyWalking polls the metadata in a specified interval, so that the requests sent to API server is predictable (~10 requests every interval, 3 minutes), and the requests count is constant regardless of the cluster\u0026rsquo;s changes. However with this change SkyWalking can\u0026rsquo;t react to the cluster changes in time, but the delay is acceptable in our case. Optimize the query time of tasks in ProfileTaskCache. Fix metrics was put into wrong slot of the window in the alerting kernel. Support sumPerMinLabeled in MAL. Bump up jackson databind, snakeyaml, grpc dependencies. Support export Trace and Log through Kafka. Add new config initialization mechanism of module provider. This is a ModuleManager lib kernel level change. [Breaking Change] Support new records query protocol, rename the column named service_id to entity_id for support difference entity. Please re-create top_n_database_statement index/table. Remove improper self-obs metrics in JvmMetricsHandler(for Kafka channel). gRPC stream canceling code is not logged as an error when the client cancels the stream. The client cancels the stream when the pod is terminated. [Breaking Change] Change the way of loading MAL rules(support pattern). Move k8s relative MAL files into /otel-rules/k8s. [Breaking Change] Refactor service mesh protobuf definitions and split TCP-related metrics to individual definition. Add TCP{Service,ServiceInstance,ServiceRelation,ServiceInstanceRelation} sources and split TCP-related entities out from original Service,ServiceInstance,ServiceRelation,ServiceInstanceRelation. [Breaking Change] TCP-related source names are changed, fields of TCP-related sources are changed, please refer to the latest oal/tcp.oal file. Do not log error logs when failed to create ElasticSearch index because the index is created already. Add virtual MQ analysis for native traces. Support Python runtime metrics analysis. Support sampledTrace in LAL. Support multiple rules with different names under the same layer of LAL script. (Optimization) Reduce the buffer size(queue) of MAL(only) metric streams. Set L1 queue size as 1/20, L2 queue size as 1/2. Support monitoring MySQL/PostgreSQL in the cluster mode. [Breaking Change] Migrate to BanyanDB v0.2.0.  Adopt new OR logical operator for,  MeasureIDs query BanyanDBProfileThreadSnapshotQueryDAO query Multiple Event conditions query Metrics query   Simplify Group check and creation Partially apply UITemplate changes Support index_only Return CompletableFuture\u0026lt;Void\u0026gt; directly from BanyanDB client Optimize data binary parse methods in *LogQueryDAO Support different indexType Support configuration for TTL and (block|segment) intervals   Elasticsearch storage: Provide system environment variable(SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS) and support specify the settings (number_of_shards/number_of_replicas) for each index individually. Elasticsearch storage: Support update index settings (number_of_shards/number_of_replicas) for the index template after rebooting. Optimize MQ Topology analysis. Use entry span\u0026rsquo;s peer from the consumer side as source service when no producer instrumentation(no cross-process reference). Refactor JDBC storage implementations to reuse logics. Fix ClassCastException in LoggingConfigWatcher. Support span attached event concept in Zipkin and SkyWalking trace query. Support span attached events on Zipkin lens UI. Force UTF-8 encoding in JsonLogHandler of kafka-fetcher-plugin. Fix max length to 512 of entity, instance and endpoint IDs in trace, log, profiling, topN tables(JDBC storages). The value was 200 by default. Add component IDs(135, 136, 137) for EventMesh server and client-side plugins. Bump up Kafka client to 2.8.1 to fix CVE-2021-38153. Remove lengthEnvVariable for Column as it never works as expected. Add LongText to support longer logs persistent as a text type in ElasticSearch, instead of a keyword, to avoid length limitation. Fix wrong system variable name SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI. It was opaenapi. Fix not-time-series model blocking OAP boots in no-init mode. Fix ShardingTopologyQueryDAO.loadServiceRelationsDetectedAtServerSide invoke backend miss parameter serviceIds. Changed system variable SW_SUPERDATASET_STORAGE_DAY_STEP to SW_STORAGE_ES_SUPER_DATASET_DAY_STEP to be consistent with other ES storage related variables. Fix ESEventQueryDAO missing metric_table boolQuery criteria. Add default entity name(_blank) if absent to avoid NPE in the decoding. This caused Can't split xxx id into 2 parts. Support dynamic config the sampling strategy in network profiling. Zipkin module support BanyanDB storage. Zipkin traces query API, sort the result set by start time by default. Enhance the cache mechanism in the metric persistent process.  This cache only worked when the metric is accessible(readable) from the database. Once the insert execution is delayed due to the scale, the cache loses efficacy. It only works for the last time update per minute, considering our 25s period. Fix ID conflicts for all JDBC storage implementations. Due to the insert delay, the JDBC storage implementation would still generate another new insert statement.   [Breaking Change] Remove core/default/enableDatabaseSession config. [Breaking Change] Add @BanyanDB.TimestampColumn to identify which column in Record is providing the timestamp(milliseconds) for BanyanDB, since BanyanDB stream requires a timestamp in milliseconds. For SQL-Database: add new column timestamp for tables profile_task_log/top_n_database_statement, requires altering this column or removing these tables before OAP starts, if bump up from previous releases. Fix Elasticsearch storage: In No-Sharding Mode, add specific analyzer to the template before index creation to avoid update index error. Internal API: remove undocumented ElasticSearch API usage and use documented one. Fix BanyanDB.ShardingKey annotation missed in the generated OAL metrics classes. Fix Elasticsearch storage: Query sortMetrics missing transform real index column name. Rename BanyanDB.ShardingKey to BanyanDB.SeriesID. Self-Observability: Add counters for metrics reading from DB or cached. Dashboard:Metrics Persistent Cache Count. Self-Observability: Fix GC Time calculation. Fix Elasticsearch storage: In No-Sharding Mode, column\u0026rsquo;s property indexOnly not applied and cannot be updated. Update the trace_id field as storage only(cannot be queried) in top_n_database_statement, top_n_cache_read_command, top_n_cache_read_command index.  UI  Fix: tab active incorrectly, when click tab space Add impala icon for impala JDBC Java agent plugin. (Webapp)Bump up snakeyaml to 1.31 for fixing CVE-2022-25857 [Breaking Change]: migrate from Spring Web to Armeria, now you should use the environment variable name SW_OAP_ADDRESS to change the OAP backend service addresses, like SW_OAP_ADDRESS=localhost:12800,localhost:12801, and use environment variable SW_SERVER_PORT to change the port. Other Spring-related configurations don\u0026rsquo;t take effect anymore. Polish the endpoint list graph. Fix styles for an adaptive height. Fix setting up a new time range after clicking the refresh button. Enhance the process topology graph to support dragging nodes. UI-template: Fix metrics calculation in general-service/mesh-service/faas-function top-list dashboard. Update MySQL dashboard to visualize collected slow SQLs. Add virtual cache dashboard. Remove responseCode fields of all OAL sources, as well as examples to avoid user\u0026rsquo;s confusion. Remove All from the endpoints selector. Enhance menu configurations to make it easier to change. Update PostgreSQL dashboard to visualize collected slow SQLs. Add Golang runtime metrics and cpu/memory used rate panels in General-Instance dashboard. Add gateway apisix menu. Query logs with the specific service ID. Bump d3-color from 3.0.1 to 3.1.0. Add Golang runtime metrics and cpu/memory used rate panels in FaaS-Instance dashboard. Revert logs on trace widget. Add a sub-menu for virtual mq. Add readRecords to metric types. Verify dashboard names for new dashboards. Associate metrics with the trace widget on dashboards. Fix configuration panel styles. Remove a un-use icon. Support labeled value on the service/instance/endpoint list widgets. Add menu for virtual MQ. Set selector props and update configuration panel styles. Add Python runtime metrics and cpu/memory utilization panels to General-Instance and Fass-Instance dashboards. Enhance the legend of metrics graph widget with the summary table. Add apache eventMesh logo file. Fix conditions for trace profiling. Fix tag keys list and duration condition. Fix typo. Fix condition logic for trace tree data. Enhance tags component to search tags with the input value. Fix topology loading style. Fix update metric processor for the readRecords and remove readSampledRecords from metrics selector. Add trace association for FAAS dashboards. Visualize attached events on the trace widget. Add HTTP/1.x metrics and HTTP req/resp body collecting tabs on the network profiling widget. Implement creating tasks ui for network profiling widget. Fix entity types for ProcessRelation. Add trace association for general service dashboards.  Documentation  Add metadata-uid setup doc about Kubernetes coordinator in the cluster management. Add a doc for adding menus to booster UI. Move general good read blogs from Agent Introduction to Academy. Add re-post for blog Scaling with Apache SkyWalking in the academy list. Add re-post for blog Diagnose Service Mesh Network Performance with eBPF in the academy list. Add Security Notice doc. Add new docs for Report Span Attached Events data collecting protocol. Add new docs for Record query protocol Update Server Agents and Compatibility for PHP agent. Add docs for profiling. Update the network profiling documentation.  All issues and pull requests are here\n","excerpt":"SkyWalking 9.3.0 is released. Go to downloads page to find release tars.\nMetrics Association …","ref":"/events/release-apache-skywalking-apm-9.3.0/","title":"Release Apache SkyWalking APM 9.3.0"},{"body":"","excerpt":"","ref":"/zh_tags/shardingsphere/","title":"ShardingSphere"},{"body":"","excerpt":"","ref":"/zh_tags/shardingsphere-proxy/","title":"ShardingSphere-proxy"},{"body":"Apache SkyWalking 作为一个分布式系统的应用性能监控工具,它观察服务网格中的指标、日志、痕迹和事件。其中 SkyWalking OAP 高性能的数据流处理架构能够实时处理庞大的数据流量,但是这些海量数据的存储更新和后续查询对后端存储系统带来了挑战。\nSkyWalking 默认已经提供了多种存储支持包括 H2、OpenSearch、ElasticSearch、MySQL、TiDB、PostgreSQL、BanyanDB。其中 MySQL 存储提供的是针对单机和单表的存储方式(MySQL 的集群能力需要自己选型提供),在面对高流量的业务系统时,监控数据的存储存在较大压力,同时影响查询性能。\n在 MySQL 存储基础上 SkyWalking v9.3.0 提供了一种新的存储方式 MySQL-Sharding,它提供了基于 ShardingSphere-Proxy 的分库分表特性,而分库分表是关系型数据库面对大数据量处理的成熟解决方案。\n部署架构 SkyWalking 使用 ShardingSphere-Proxy 的部署方式如下图所示。\n SkyWalking OAP 由直连数据库的方式变成只与 ShardingSphere-Proxy 进行交互; 每一个 MySQL 节点暴露的连接都是一个数据源,由 ShardingSphere-Proxy 进行统一管理; ShardingSphere-Proxy 会根据配置建立一个虚拟逻辑数据库,根据 OAP 提供的分库分表规则进行库表分片和路由; SkyWalking OAP 负责生成分库分表规则并且像操作 MySQL 一样对虚拟逻辑库执行 DDL 和 DML;  适用场景 希望使用 MySQL 作为存储,随着业务规模的增长,单表模式已经无法满足性能需要。\nSkyWalking 分库分表逻辑 分库分表逻辑通过注解 @SQLDatabase.Sharding 对 SkyWalking 中的数据模型 Model 进行定义:\n@interface Sharding { ShardingAlgorithm shardingAlgorithm(); String dataSourceShardingColumn() default \u0026#34;\u0026#34;; String tableShardingColumn() default \u0026#34;\u0026#34;; } 其中:\n  shardingAlgorithm:表分片算法\n  dataSourceShardingColumn:分库键\n  tableShardingColumn:分表键\n  SkyWalking 根据注解 @SQLDatabase.Sharding 选择分库键、分表键以及表分片算法对每个表动态生成分片规则通过 DistSQL 操作 Shardingsphere-Proxy 执行规则定义 Shardingsphere-Proxy 根据规则定义进行数据分片。\n分库方式 SkyWalking 对于分库采用统一的方式,路由目标库的数字后缀使用分库键的哈希值取模需要分库的数据库数量,所以路由目标库为:\nds_{dataSourceShardingColumn.hashcode() % dataSourceList.size()} 例如我们有 dataSourceList = ds_0...ds_n,如果\n{dataSourceShardingColumn.hashcode() % dataSourceList.size() = 2} 那么所有数据将会路由到 ds_2 这个数据源节点上。\n分表方式 由于 TTL 机制的存在,分表算法主要根据时间的日期进行分片,分片表的数量是根据 TTL 每天一个表:\n分片表名 = 逻辑表名_时间序列(日期):{tableName =logicTableName_timeSeries}\n为保证在 TTL 有效期内的数据能够被写入和查询,时间序列将生成当前日期\n{timeSeries = currentDate - TTL +1...currentDate + 1} 例如:如果 TTL=3, currentDate = 20220907,则分片表为:\nlogicTableName_20220905 logicTableName_20220906 logicTableName_20220907 logicTableName_20220908 SkyWalking 提供了多种不同的分表算法用于不同的数据模型:\n   算法名称 分片说明 分片键时间精度要求 典型应用数据模型     NO_SHARDING 不做任何表分片,保持单表模式 / 数据量小无需分片的数据模型   TIME_RELATIVE_ID_SHARDING_ALGORITHM 使用 ID 列中的 time_bucket 按天分片 time_bucket 的精度可以是同一表中的秒、分、小时和天 各类 Metrics 指标   TIME_SEC_RANGE_SHARDING_ALGORITHM 使用 time_bucket 列按天分片 time_bucket 的精度必须是秒 SegmentRecordLogRecord 等   TIME_MIN_RANGE_SHARDING_ALGORITHM 使用 time_bucket 列按天分片 time_bucket 的精度必须是分钟 EndpointTraffic   TIME_BUCKET_SHARDING_ALGORITHM 使用 time_bucket 列按天分片 time_bucket 的精度可以是同一个表中的秒、分、小时和天 Service、Instance、Endpoint 调用关系等如 ServiceRelationServerSideMetrics    TTL 机制  对于进行分片的表根据 TTL 直接删除 deadline \u0026gt;= timeSeries 的物理表 {deadline = new DateTime().plusDays(-ttl)} TTL 定时器在根据当前日期删除过期表的同时也会根据新日期更新分片规则,通知 ShardingSphere-Proxy 创建新的分片表 对于单表的延续之前的方式,删除 deadline \u0026gt;= time_bucket 的行记录  分片数据存储示例 下面以 segment(Record 类型)和 service_resp_time(Metrics 类型)两个为例说明数据存储的逻辑和物理分布。这里假设 MySQL 为 ds_0 和 ds_1 两个节点。\n注意:以下的存储表结构仅为简化后的存储示例,不表示 SkyWalking 真实的表结构。\nsegment 分片配置为:\n@SQLDatabase.Sharding(shardingAlgorithm = ShardingAlgorithm.TIME_SEC_RANGE_SHARDING_ALGORITHM, dataSourceShardingColumn = service_id, tableShardingColumn = time_bucket) 逻辑库表结构和实际库表如下图:\nservice_resp_time 分片配置为:\n@SQLDatabase.Sharding(shardingAlgorithm = ShardingAlgorithm.TIME_RELATIVE_ID_SHARDING_ALGORITHM, tableShardingColumn = id, dataSourceShardingColumn = entity_id) 逻辑库表结构和实际库表如下图:\n如何使用 你可以选择手动或使用 Docker 来运行 Demo。\n手动部署 这里以单节点 SkyWalking OAP 和 Shardingsphere-Proxy 5.1.2 部署为例,集群部署请参考其他相关文档。\n  准备好 MySQL 集群\n  部署安装并配置 Shardingsphere-Proxy:\n conf/server.yaml,props.proxy-hint-enabled 必须为 true,完整配置可参考这里。 conf/config-sharding.yaml,配置逻辑数据库和 dataSources 列表,dataSource 的名称必须以 ds_为前缀,并且从 ds_0 开始,完整配置可参考这里。    部署安装并配置 SkyWalking OAP:\n 设置 OAP 环境变量 ${SW_STORAGE:mysql-sharding} 根据实际部署情况配置连接信息: ${SW_JDBC_URL} ${SW_DATA_SOURCE_USER} ${SW_DATA_SOURCE_PASSWORD}  注意:连接信息需对应 Shardingsphere-Proxy 虚拟数据库。\n  将 Shardingsphere-Proxy 中 conf/config-sharding.yaml 配置的数据源名称配置在 ${SW_JDBC_SHARDING_DATA_SOURCES} 中,用 , 分割\n  启动 MySQL 集群\n  启动 Shardingsphere-Proxy\n  启动 SkyWalking OAP\n  使用 Docker 运行 Demo GitHub 资源库提供了一个基于 Docker 完整可运行的 demo:skywalking-mysql-sharding-demo,可以快速尝试实际运行效果。\n其中部署包含:\n oap 服务 1 个,Metrics 和 Record 数据的 TTL 均设为 2 天 sharding-proxy 服务 1 个版本为 5.1.2,对外端口为 13307,创建的逻辑库名称为 swtest mysql 服务 2 个,对外端口分别为 3306,3307,在 sharding-proxy 的 conf/config-sharding.yaml 中配置为 ds_0 和 ds_1 provider 服务 1 个(模拟业务程序用于验证 trace 和 metrics 等数据),对外端口为 9090 consumer 服务 1 个(模拟业务程序用于验证 trace 和 metrics 等数据),对外端口为 9092  将 Demo 程序获取到本地后,在 skywalking-mysql-sharding-demo 目录下直接运行:\ndocker-compose up -d 注意:初次启动由于拉取镜像和新建所有表可能需要一定的时间。\n所有服务启动完成之后可以通过数据库工具查看 sharding-proxy 逻辑表创建情况,以及两个 MySQL 库中实际的物理分片表创建情况。也可以连接 sharding-proxy 逻辑库 swtest 查看数据查询路由情况,如:\nPREVIEW SELECT * FROM SEGMENT 显示结果如下:\nDemo 提供的模拟业务程序可以通过请求 consumer 服务模拟业务请求,用于验证各类型数据分布:\ncurl http://127.0.0.1:9092/info 总结 在这篇文章中我们详细介绍了 SkyWalking 基于 ShardingSphere-Proxy 的 MySQL-Sharding 存储特性的部署架构、适应场景、核心分库分表逻辑以及 TTL 机制,并提供了运行后的数据存储示例和详细部署配置步骤以便大家快速理解上手。SkyWalking 提供了多种存储方式以供选择,如果你目前的需求如本文所述,欢迎使用该新特性。\n","excerpt":"Apache SkyWalking 作为一个分布式系统的应用性能监控工具,它观察服务网格中的指标、日志、痕迹和事件。其中 SkyWalking OAP 高性能的数据流处理架构能够实时处理庞大的数据流 …","ref":"/zh/skywalking-shardingsphere-proxy/","title":"SkyWalking 基于 ShardingSphere-Proxy 的 MySQL-Sharding 分库分表的存储特性介绍"},{"body":"SkyWalking Kubernetes Helm Chart 4.4.0 is released. Go to downloads page to find release tars.\n [Breaking Change]: remove .Values.oap.initEs, there is no need to use this to control whether to run init job anymore, SkyWalking Helm Chart automatically delete the init job when installing/upgrading. [Breaking Change]: remove files/config.d mechanism and use values.yaml files to put the configurations to override default config files in the /skywalking/config folder, using files/config.d is very limited and you have to clone the source codes if you want to use this mechanism, now you can simply use our Docker Helm Chart to install. Refactor oap init job, and support postgresql storage. Upgrade ElasticSearch Helm Chart dependency version.  ","excerpt":"SkyWalking Kubernetes Helm Chart 4.4.0 is released. Go to downloads page to find release tars. …","ref":"/events/release-apache-skywalking-kubernetes-helm-chart-4.4.0/","title":"Release Apache SkyWalking Kubernetes Helm Chart 4.4.0"},{"body":"SkyWalking PHP 0.2.0 is released. Go to downloads page to find release tars.\nWhat\u0026rsquo;s Changed  Update PECL user by @heyanlong in https://github.com/apache/skywalking-php/pull/12 Start up 0.2.0 by @heyanlong in https://github.com/apache/skywalking-php/pull/13 Update compiling project document. by @jmjoy in https://github.com/apache/skywalking-php/pull/14 Add PDO plugin, and switch unix datagram to stream. by @jmjoy in https://github.com/apache/skywalking-php/pull/15 Update readme about creating issue. by @jmjoy in https://github.com/apache/skywalking-php/pull/17 Fix package.xml role error by @heyanlong in https://github.com/apache/skywalking-php/pull/16 Add swoole support. by @jmjoy in https://github.com/apache/skywalking-php/pull/19 Add .fleet to .gitignore by @heyanlong in https://github.com/apache/skywalking-php/pull/20 [Feature] Add Mysql Improved Extension by @heyanlong in https://github.com/apache/skywalking-php/pull/18 Add predis plugin. by @jmjoy in https://github.com/apache/skywalking-php/pull/21 Take care of PDO false and DSN tailing semicolons. by @phanalpha in https://github.com/apache/skywalking-php/pull/22 Add container by @heyanlong in https://github.com/apache/skywalking-php/pull/23 Save PDO exceptions. by @phanalpha in https://github.com/apache/skywalking-php/pull/24 Update minimal supported PHP version to 7.2. by @jmjoy in https://github.com/apache/skywalking-php/pull/25 Utilize UnixListener for the worker process to accept reports. by @phanalpha in https://github.com/apache/skywalking-php/pull/26 Kill the worker on module shutdown. by @phanalpha in https://github.com/apache/skywalking-php/pull/28 Add plugin for memcached. by @jmjoy in https://github.com/apache/skywalking-php/pull/27 Upgrade rust mini version to 1.65. by @jmjoy in https://github.com/apache/skywalking-php/pull/30 Add plugin for phpredis. by @jmjoy in https://github.com/apache/skywalking-php/pull/29 Add missing request_id. by @jmjoy in https://github.com/apache/skywalking-php/pull/31 Adapt virtual cache. by @jmjoy in https://github.com/apache/skywalking-php/pull/32 Fix permission denied of unix socket. by @jmjoy in https://github.com/apache/skywalking-php/pull/33 Bump to 0.2.0. by @jmjoy in https://github.com/apache/skywalking-php/pull/34  New Contributors  @phanalpha made their first contribution in https://github.com/apache/skywalking-php/pull/22  Full Changelog: https://github.com/apache/skywalking-php/compare/v0.1.0...v0.2.0\nPECL https://pecl.php.net/package/skywalking_agent/0.2.0\n","excerpt":"SkyWalking PHP 0.2.0 is released. Go to downloads page to find release tars.\nWhat\u0026rsquo;s Changed …","ref":"/events/release-apache-skwaylking-php-0-2-0/","title":"Release Apache SkyWalking PHP 0.2.0"},{"body":"This is an official annoucement from SkyWalking team.\nDue to the Plan to End-of-life(EOL) all v8 releases in Nov. 2022 had been posted in 3 months, SkyWalking community doesn\u0026rsquo;t received any objection or a proposal about releasing a new patch version.\nNow, it is time to end the v8 series. All documents of v8 are not going to be hosted on the website. You only could find the artifacts and source codes from the Apache\u0026rsquo;s archive repository. The documents of each version are included in /docs/ folder in the source tars.\nThe SkyWalking community would reject the bug reports and release proposal due to its End-of-life(EOL) status. v9 provides more powerful features and covers all capabilities of the latest v8. Recommend upgrading to the latest.\nV8 was a memorable and significative release series, which makes the project globally adopted. It brought dev community scale up to over 500 contributors.\nWe want to highlight and thank all those contributors and end users again. You made today\u0026rsquo;s SkyWalking.\nWelcome more contributors and users to join the community, to contribute your ideas, experiences, and feedback. We need you to improve and enhance the project to a higher level.\n","excerpt":"This is an official annoucement from SkyWalking team.\nDue to the Plan to End-of-life(EOL) all v8 …","ref":"/events/v8-eol/","title":"SkyWalking v8 OAP server End-of-life(EOL)"},{"body":"SkyWalking BanyanDB 0.2.0 is released. Go to downloads page to find release tars.\nFeatures  Command line tool: bydbctl. Retention controller. Full-text searching. TopN aggregation. Add RESTFul style APIs based on gRPC gateway. Add \u0026ldquo;exists\u0026rdquo; endpoints to the schema registry. Support tag-based CRUD of the property. Support index-only tags. Support logical operator(and \u0026amp; or) for the query.  Bugs  \u0026ldquo;metadata\u0026rdquo; syncing pipeline complains about an \u0026ldquo;unknown group\u0026rdquo;. \u0026ldquo;having\u0026rdquo; semantic inconsistency. \u0026ldquo;tsdb\u0026rdquo; leaked goroutines.  Chores  \u0026ldquo;tsdb\u0026rdquo; structure optimization.  Merge the primary index into the LSM-based index Remove term metadata.   Memory parameters optimization. Bump go to 1.19.  ","excerpt":"SkyWalking BanyanDB 0.2.0 is released. Go to downloads page to find release tars.\nFeatures  Command …","ref":"/events/release-apache-skywalking-banyandb-0-2-0/","title":"Release Apache SkyWalking BanyanDB 0.2.0"},{"body":"SkyWalking Java Agent 8.13.0 is released. Go to downloads page to find release tars. Changes by Version\n8.13.0 This release begins to adopt SkyWalking 9.3.0+ Virtual Cache Analysis,Virtual MQ Analysis\n Support set-type in the agent or plugin configurations Optimize ConfigInitializer to output warning messages when the config value is truncated. Fix the default value of the Map field would merge rather than override by new values in the config. Support to set the value of Map/List field to an empty map/list. Add plugin to support Impala JDBC 2.6.x. Update guava-cache, jedis, memcached, ehcache plugins to adopt uniform tags. Fix Apache ShenYu plugin traceId empty string value. Add plugin to support brpc-java-3.x Update compose-start-script.template to make compatible with new version docker compose Bump up grpc to 1.50.0 to fix CVE-2022-3171 Polish up nats plugin to unify MQ related tags Correct the duration of the transaction span for Neo4J 4.x. Plugin-test configuration.yml dependencies support docker service command field Polish up rabbitmq-5.x plugin to fix missing broker tag on consumer side Polish up activemq plugin to fix missing broker tag on consumer side Enhance MQ plugin relative tests to check key tags not blank. Add RocketMQ test scenarios for version 4.3 - 4.9. No 4.0 - 4.2 release images for testing. Support mannual propagation of tracing context to next operators for webflux. Add MQ_TOPIC and MQ_BROKER tags for RocketMQ consumer\u0026rsquo;s span. Polish up Pulsar plugins to remove unnecessary dynamic value , set peer at consumer side Polish Kafka plugin to set peer at the consumer side. Polish NATS plugin to set peer at the consumer side. Polish ActiveMQ plugin to set peer at the consumer side. Polish RabbitMQ plugin to set peer at the consumer side.  Documentation  Update configuration doc about overriding default value as empty map/list accordingly. Update plugin dev tags for cache relative tags. Add plugin dev docs for virtual database tags. Add plugin dev docs for virtual MQ tags. Add doc about kafka plugin Manual APIs.  All issues and pull requests are here\n","excerpt":"SkyWalking Java Agent 8.13.0 is released. Go to downloads page to find release tars. Changes by …","ref":"/events/release-apache-skywalking-java-agent-8-13-0/","title":"Release Apache SkyWalking Java Agent 8.13.0"},{"body":"SkyWalking Client JS 0.9.0 is released. Go to downloads page to find release tars.\n Fix custom configurations when the page router changed for SPA. Fix reporting data by navigator.sendbeacon when pages is closed. Bump dependencies. Add Security Notice. Support adding custom tags to spans. Validate custom parameters for register.  ","excerpt":"SkyWalking Client JS 0.9.0 is released. Go to downloads page to find release tars.\n Fix custom …","ref":"/events/release-apache-skywalking-client-js-0-9-0/","title":"Release Apache SkyWalking Client JS 0.9.0"},{"body":"I am excited to announce a new SkyWalking committer, Yueqin Zhang(GitHub ID, yswdqz). Yueqin entered the SkyWalking community on Jul. 3rd[1], 2022, for the first time. Later, I knew he was invited by Yihao Chen, our committer, who is running an open-source program for students who can\u0026rsquo;t join Summer 2022 due to SkyWalking having limited slots.\nHis first PR[2] for Issue #7420 took 20 days to propose. I believe he took incredibly hard work in his own time. For every PMC member, we all were there. Purely following documents and existing codes to build a new feature is always not easy to start.\nAfter that, we had several private talks, he asked for more possible directions to join the community deeper. Then, I am honored to witness a great landscape extension in SkyWalking feature territory, SkyWalking adopts OpenTelemetry features quickly, and is powered by our powerful MAL and v9 kernel/UI, He built MySQL and PostgreSQL server monitoring, metrics, and slow SQLs collecting(through enhancing LAL with a new layer concept), under a new menu, .\nIt is unbelievable to see his contributions in the main repo, 8 PRs[3], LOC 4,857++, 1,627\u0026ndash;\nMeanwhile, this story continues, he is trying to build A lightweight and APM-oriented SQL parser module[4] under my mentoring. This would be another challenging idea, but also very useful to enhance existing virtual database perf. analyzing.\nI believe this would not be the end for the moment between SkyWalking and him.\nWelcome to join the team.\nReferrer \u0026amp; PMC member, Sheng Wu.\n [1] https://github.com/apache/skywalking/issues/7420#issuecomment-1173061870 [2] https://github.com/apache/skywalking-java/pull/286 [3] https://github.com/apache/skywalking/commits?author=yswdqz [4] https://github.com/apache/skywalking/issues/9661  ","excerpt":"I am excited to announce a new SkyWalking committer, Yueqin Zhang(GitHub ID, yswdqz). Yueqin entered …","ref":"/events/welcome-yueqin-zhang-as-new-committer/","title":"Welcome Yueqin Zhang as a new committer"},{"body":"SkyWalking PHP 0.1.0 is released. Go to downloads page to find release tars.\nWhat's Changed  [docs] Update README by @heyanlong in https://github.com/apache/skywalking-php/pull/1 Remove the CI limit first, in order to run CI. by @jmjoy in https://github.com/apache/skywalking-php/pull/3 Setup CI. by @jmjoy in https://github.com/apache/skywalking-php/pull/5 Implementation, with curl support. By @jmjoy in https://github.com/apache/skywalking-php/pull/4 Turn off Swoole support, and fix Makefile. By @jmjoy in https://github.com/apache/skywalking-php/pull/6 Update docs by @heyanlong in https://github.com/apache/skywalking-php/pull/7 Add PECL support. By @jmjoy in https://github.com/apache/skywalking-php/pull/8 Support macOS by replace ipc-channel with socket pair, upgrade dependencies and improve CI. by @jmjoy in https://github.com/apache/skywalking-php/pull/9 Add compile and release docs. By @jmjoy in https://github.com/apache/skywalking-php/pull/10 Update official documentation link. By @jmjoy in https://github.com/apache/skywalking-php/pull/11  New Contributors  @heyanlong made their first contribution in https://github.com/apache/skywalking-php/pull/1 @jmjoy made their first contribution in https://github.com/apache/skywalking-php/pull/3  Full Changelog: https://github.com/apache/skywalking-php/commits/v0.1.0\nPECL https://pecl.php.net/package/skywalking_agent/0.1.0\n","excerpt":"SkyWalking PHP 0.1.0 is released. Go to downloads page to find release tars.\nWhat's Changed  [docs] …","ref":"/events/release-apache-skwaylking-php-0-1-0/","title":"Release Apache SkyWalking PHP 0.1.0"},{"body":"Yanlong He (GitHub: heyanlong) is a SkyWalking committer for years. He was working on skyapm-php for years to support the SkyWalking ecosystem. That PHP agent has significant contributions for SkyWalking\u0026rsquo;s users adoption in the PHP landscape. Yanlong keeps active in supporting and maintaining the project to help the community.\nJiemin Xia (GitHub: jmjoy) is a new committer voted in July 2022. He is super active in this year. He took over the maintaince capatbilify from Rei Shimizu, who is too busy in his daily work. He leads on the Rust SDK, and is also a release manager for the Rust SDK.\nRecently, both of them are working with Yanlong He to build a new skywalking PHP agent.\nWe are having our PHP agent v0.1.0 for the community.\nSkyWalking PHP Agent\nNotice, SkyAPM PHP is going to be archived and replaced by SkyWalking PHP agent according to its project maintainer, Yanlong He. Our community would work more closely forward the new PHP agent together.\nLet\u0026rsquo;s welcome and congrats to our 31st and 32nd PMC members, Yanlong He and Jiemin Xia. We are honored to have you.\n","excerpt":"Yanlong He (GitHub: heyanlong) is a SkyWalking committer for years. He was working on skyapm-php for …","ref":"/events/welcome-heyanlong-xiajiemin-join-the-pmc/","title":"Welcome Yanlong He and Jiemin Xia to join the PMC"},{"body":"Background This article will show how to use Apache SkyWalking with eBPF to make network troubleshooting easier in a service mesh environment.\nApache SkyWalking is an application performance monitor tool for distributed systems. It observes metrics, logs, traces, and events in the service mesh environment and uses that data to generate a dependency graph of your pods and services. This dependency graph can provide quick insights into your system, especially when there\u0026rsquo;s an issue.\nHowever, when troubleshooting network issues in SkyWalking\u0026rsquo;s service topology, it is not always easy to pinpoint where the error actually is. There are two reasons for the difficulty:\n Traffic through the Envoy sidecar is not easy to observe. Data from Envoy\u0026rsquo;s Access Log Service (ALS) shows traffic between services (sidecar-to-sidecar), but not metrics on communication between the Envoy sidecar and the service it proxies. Without that information, it is more difficult to understand the impact of the sidecar. There is a lack of data from transport layer (OSI Layer 4) communication. Since services generally use application layer (OSI Layer 7) protocols such as HTTP, observability data is generally restricted to application layer communication. However, the root cause may actually be in the transport layer, which is typically opaque to observability tools.  Access to metrics from Envoy-to-service and transport layer communication can make it easier to diagnose service issues. To this end, SkyWalking needs to collect and analyze transport layer metrics between processes inside Kubernetes pods - a task well suited to eBPF. We investigated using eBPF for this purpose and present our results and a demo below.\nMonitoring Kubernetes Networks with eBPF With its origins as the Extended Berkeley Packet Filter, eBPF is a general purpose mechanism for injecting and running your own code into the Linux kernel and is an excellent tool for monitoring network traffic in Kubernetes Pods. In the next few sections, we'll provide an overview of how to use eBPF for network monitoring as background for introducing Skywalking Rover, a metrics collector and profiler powered by eBPF to diagnose CPU and network performance.\nHow Applications and the Network Interact Interactions between the application and the network can generally be divided into the following steps from higher to lower levels of abstraction:\n User Code: Application code uses high-level network libraries in the application stack to exchange data across the network, like sending and receiving HTTP requests. Network Library: When the network library receives a network request, it interacts with the language API to send the network data. Language API: Each language provides an API for operating the network, system, etc. When a request is received, it interacts with the system API. In Linux, this API is called syscalls. Linux API: When the Linux kernel receives the request through the API, it communicates with the socket to send the data, which is usually closer to an OSI Layer 4 protocol, such as TCP, UDP, etc. Socket Ops: Sending or receiving the data to/from the NIC.  Our hypothesis is that eBPF can monitor the network. There are two ways to implement the interception: User space (uprobe) or Kernel space (kprobe). The table below summarizes the differences.\n    Pros Cons     uprobe •\tGet more application-related contexts, such as whether the current request is HTTP or HTTPS.•\tRequests and responses can be intercepted by a single method •\tData structures can be unstable, so it is more difficult to get the desired data.  •\tImplementation may differ between language/library versions.  •\tDoes not work in applications without symbol tables.   kprobe •\tAvailable for all languages.  •\tThe data structure and methods are stable and do not require much adaptation.  •\tEasier correlation with underlying data, such as getting the destination address of TCP, OSI Layer 4 protocol metrics, etc. •\tA single request and response may be split into multiple probes.  •\tContextual information is not easy to get for stateful requests. For example header compression in HTTP/2.    For the general network performance monitor, we chose to use the kprobe (intercept the syscalls) for the following reasons:\n It\u0026rsquo;s available for applications written in any programming language, and it\u0026rsquo;s stable, so it saves a lot of development/adaptation costs. It can be correlated with metrics from the system level, which makes it easier to troubleshoot. As a single request and response are split into multiple probes, we can use technology to correlate them. For contextual information, It\u0026rsquo;s usually used in OSI Layer 7 protocol network analysis. So, if we just monitor the network performance, then they can be ignored.  Kprobes and network monitoring Following the network syscalls of Linux documentation, we can implement network monitoring by intercepting two types of methods: socket operations and send/receive methods.\nSocket Operations When accepting or connecting with another socket, we can get the following information:\n Connection information: Includes the remote address from the connection which helps us to understand which pod is connected. Connection statics: Includes basic metrics from sockets, such as round-trip time (RTT), lost packet count in TCP, etc. Socket and file descriptor (FD) mapping: Includes the relationship between the Linux file descriptor and socket object. It is useful when sending and receiving data through a Linux file descriptor.  Send/Receive The interface related to sending or receiving data is the focus of performance analysis. It mainly contains the following parameters:\n Socket file descriptor: The file descriptor of the current operation corresponding to the socket. Buffer: The data sent or received, passed as a byte array.  Based on the above parameters, we can analyze the following data:\n Bytes: The size of the packet in bytes. Protocol: The protocol analysis according to the buffer data, such as HTTP, MySQL, etc. Execution Time: The time it takes to send/receive the data.  At this point (Figure 1) we can analyze the following steps for the whole lifecycle of the connection:\n Connect/Accept: When the connection is created. Transform: Sending and receiving data on the connection. Close: When the connection is closed.  Figure 1\nProtocol and TLS The previous section described how to analyze connections using send or receive buffer data. For example, following the HTTP/1.1 message specification to analyze the connection. However, this does not work for TLS requests/responses.\nFigure 2\nWhen TLS is in use, the Linux Kernel transmits data encrypted in user space. In the figure above, The application usually transmits SSL data through a third-party library (such as OpenSSL). For this case, the Linux API can only get the encrypted data, so it cannot recognize any higher layer protocol. To decrypt inside eBPF, we need to follow these steps:\n Read unencrypted data through uprobe: Compatible multiple languages, using uprobe to capture the data that is not encrypted before sending or after receiving. In this way, we can get the original data and associate it with the socket. Associate with socket: We can associate unencrypted data with the socket.  OpenSSL Use case For example, the most common way to send/receive SSL data is to use OpenSSL as a shared library, specifically the SSL_read and SSL_write methods to submit the buffer data with the socket.\nFollowing the documentation, we can intercept these two methods, which are almost identical to the API in Linux. The source code of the SSL structure in OpenSSL shows that the Socket FD exists in the BIO object of the SSL structure, and we can get it by the offset.\nIn summary, with knowledge of how OpenSSL works, we can read unencrypted data in an eBPF function.\nIntroducing SkyWalking Rover, an eBPF-based Metrics Collector and Profiler SkyWalking Rover introduces the eBPF network profiling feature into the SkyWalking ecosystem. It\u0026rsquo;s currently supported in a Kubernetes environment, so must be deployed inside a Kubernetes cluster. Once the deployment is complete, SkyWalking Rover can monitor the network for all processes inside a given Pod. Based on the monitoring data, SkyWalking can generate the topology relationship diagram and metrics between processes.\nTopology Diagram The topology diagram can help us understand the network access between processes inside the same Pod, and between the process and external environment (other Pod or service). Additionally, it can identify the data direction of traffic based on the line flow direction.\nIn Figure 3 below, all nodes within the hexagon are the internal process of a Pod, and nodes outside the hexagon are externally associated services or Pods. Nodes are connected by lines, which indicate the direction of requests or responses between nodes (client or server). The protocol is indicated on the line, and it\u0026rsquo;s either HTTP(S), TCP, or TCP(TLS). Also, we can see in this figure that the line between Envoy and Python applications is bidirectional because Envoy intercepts all application traffic.\nFigure 3\nMetrics Once we recognize the network call relationship between processes through the topology, we can select a specific line and view the TCP metrics between the two processes.\nThe diagram below (Figure 4) shows the metrics of network monitoring between two processes. There are four metrics in each line. Two on the left side are on the client side, and two on the right side are on the server side. If the remote process is not in the same Pod, only one side of the metrics is displayed.\nFigure 4\nThe following two metric types are available:\n Counter: Records the total number of data in a certain period. Each counter contains the following data: a. Count: Execution count. b. Bytes: Packet size in bytes. c. Execution time: Execution duration. Histogram: Records the distribution of data in the buckets.  Based on the above data types, the following metrics are exposed:\n   Name Type Unit Description     Write Counter and histogram Millisecond The socket write counter.   Read Counter and histogram Millisecond The socket read counter.   Write RTT Counter and histogram Microsecond The socket write round trip time (RTT) counter.   Connect Counter and histogram Millisecond The socket connect/accept with another server/client counter.   Close Counter and histogram Millisecond The socket with other socket counter.   Retransmit Counter Millisecond The socket retransmit package counter.   Drop Counter Millisecond The socket drop package counter.    Demo In this section, we demonstrate how to perform network profiling in the service mesh. To follow along, you will need a running Kubernetes environment.\nNOTE: All commands and scripts are available in this GitHub repository.\nInstall Istio Istio is the most widely deployed service mesh, and comes with a complete demo application that we can use for testing. To install Istio and the demo application, follow these steps:\n Install Istio using the demo configuration profile. Label the default namespace, so Istio automatically injects Envoy sidecar proxies when we\u0026rsquo;ll deploy the application. Deploy the bookinfo application to the cluster. Deploy the traffic generator to generate some traffic to the application.  export ISTIO_VERSION=1.13.1 # install istio istioctl install -y --set profile=demo kubectl label namespace default istio-injection=enabled # deploy the bookinfo applications kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/platform/kube/bookinfo.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/bookinfo-gateway.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/destination-rule-all.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/virtual-service-all-v1.yaml # generate traffic kubectl apply -f https://raw.githubusercontent.com/mrproliu/skywalking-network-profiling-demo/main/resources/traffic-generator.yaml Install SkyWalking The following will install the storage, backend, and UI needed for SkyWalking:\ngit clone https://github.com/apache/skywalking-kubernetes.git cd skywalking-kubernetes cd chart helm dep up skywalking helm -n istio-system install skywalking skywalking \\  --set fullnameOverride=skywalking \\  --set elasticsearch.minimumMasterNodes=1 \\  --set elasticsearch.imageTag=7.5.1 \\  --set oap.replicas=1 \\  --set ui.image.repository=apache/skywalking-ui \\  --set ui.image.tag=9.2.0 \\  --set oap.image.tag=9.2.0 \\  --set oap.envoy.als.enabled=true \\  --set oap.image.repository=apache/skywalking-oap-server \\  --set oap.storageType=elasticsearch \\  --set oap.env.SW_METER_ANALYZER_ACTIVE_FILES=\u0026#39;network-profiling\u0026#39; Install SkyWalking Rover SkyWalking Rover is deployed on every node in Kubernetes, and it automatically detects the services in the Kubernetes cluster. The network profiling feature has been released in the version 0.3.0 of SkyWalking Rover. When a network monitoring task is created, the SkyWalking rover sends the data to the SkyWalking backend.\nkubectl apply -f https://raw.githubusercontent.com/mrproliu/skywalking-network-profiling-demo/main/resources/skywalking-rover.yaml Start the Network Profiling Task Once all deployments are completed, we must create a network profiling task for a specific instance of the service in the SkyWalking UI.\nTo open SkyWalking UI, run:\nkubectl port-forward svc/skywalking-ui 8080:80 --namespace istio-system Currently, we can select the specific instances that we wish to monitor by clicking the Data Plane item in the Service Mesh panel and the Service item in the Kubernetes panel.\nIn the figure below, we have selected an instance with a list of tasks in the network profiling tab. When we click the start button, the SkyWalking Rover starts monitoring this instance\u0026rsquo;s network.\nFigure 5\nDone! After a few seconds, you will see the process topology appear on the right side of the page.\nFigure 6\nWhen you click on the line between processes, you can see the TCP metrics between the two processes.\nFigure 7\nConclusion In this article, we detailed a problem that makes troubleshooting service mesh architectures difficult: lack of context between layers in the network stack. These are the cases when eBPF begins to really help with debugging/productivity when existing service mesh/envoy cannot. Then, we researched how eBPF could be applied to common communication, such as TLS. Finally, we demo the implementation of this process with SkyWalking Rover.\nFor now, we have completed the performance analysis for OSI layer 4 (mostly TCP). In the future, we will also introduce the analysis for OSI layer 7 protocols like HTTP.\nGet Started with Istio To get started with service mesh today, Tetrate Istio Distro is the easiest way to install, manage, and upgrade Istio. It provides a vetted upstream distribution of Istio that\u0026rsquo;s tested and optimized for specific platforms by Tetrate plus a CLI that facilitates acquiring, installing, and configuring multiple Istio versions. Tetrate Istio Distro also offers FIPS certified Istio builds for FedRAMP environments.\nFor enterprises that need a unified and consistent way to secure and manage services and traditional workloads across complex, heterogeneous deployment environments, we offer Tetrate Service Bridge, our flagship edge-to-workload application connectivity platform built on Istio and Envoy.\nContact us to learn more.\nAdditional Resources  SkyWalking Github Repo SkyWalking Rover Github Repo SkyWalking Rover Documentation Pinpoint Service Mesh Critical Performance impact by using eBPF blog post Apache SkyWalking with Native eBPF Agent presentation eBPF hook overview  ","excerpt":"Background This article will show how to use Apache SkyWalking with eBPF to make network …","ref":"/blog/diagnose-service-mesh-network-performance-with-ebpf/","title":"Diagnose Service Mesh Network Performance with eBPF"},{"body":"","excerpt":"","ref":"/zh_tags/ebpf/","title":"eBPF"},{"body":"","excerpt":"","ref":"/zh_tags/performance/","title":"Performance"},{"body":"","excerpt":"","ref":"/tags/performance/","title":"Performance"},{"body":"本文将展示如何利用 Apache SkyWalking 与 eBPF,使服务网格下的网络故障排除更加容易。\nApache SkyWalking 是一个分布式系统的应用性能监控工具。它观察服务网格中的指标、日志、痕迹和事件,并使用这些数据来生成 pod 和服务的依赖图。这个依赖关系图可以帮助你快速系统,尤其是在出现问题的时候。\n然而,在排除 SkyWalking 服务拓扑中的网络问题时,确定错误的实际位置有时候并不容易。造成这种困难的原因有两个:\n 通过 Envoy sidecar 的流量并不容易观察:来自 Envoy 的访问日志服务(ALS)的数据显示了服务之间的流量(sidecar-to-sidecar),但没有关于 Envoy sidecar 和它代理的服务之间的通信指标。如果没有这些信息,就很难理解 sidecar 的影响。 缺乏来自传输层(OSI 第 4 层)通信的数据:由于服务通常使用应用层(OSI 第 7 层)协议,如 HTTP,可观测性数据通常被限制在应用层通信中。然而,根本原因可能实际上是在传输层,而传输层对可观测性工具来说通常是不透明的。  获取 Envoy-to-service 和传输层通信的指标,可以更容易诊断服务问题。为此,SkyWalking 需要收集和分析 Kubernetes pod 内进程之间的传输层指标 —— 这项任务很适合 eBPF。我们调查了为此目的使用 eBPF 的情况,并在下面介绍了我们的结果和演示。\n用 eBPF 监控 Kubernetes 网络 eBPF 起源于 Extended Berkeley Packet Filter,是一种通用的机制,可以在 Linux 内核中注入和运行自己的代码,是监测 Kubernetes Pod 中网络流量的优秀工具。在接下来的几节中,我们将概述如何使用 eBPF 进行网络监控,作为介绍 Skywalking Rover 的背景,这是一个由 eBPF 驱动的指标收集器和分析器,用于诊断 CPU 和网络性能。\n应用程序和网络如何相互作用 应用程序和网络之间的互动一般可分为以下步骤,从较高的抽象层次到较低的抽象层次:\n 用户代码:应用程序代码使用应用程序堆栈中的高级网络库,在网络上交换数据,如发送和接收 HTTP 请求。 网络库:当网络库收到网络请求时,它与语言 API 进行交互以发送网络数据。 语言 API:每种语言都提供了一个操作网络、系统等的 API。当收到一个请求时,它与系统的 API 进行交互。在 Linux 中,这个 API 被称为系统调用(syscalls)。 Linux API:当 Linux 内核通过 API 收到请求时,它与套接字进行通信以发送数据,这通常更接近于 OSI 第四层协议,如 TCP、UDP 等。 Socket Ops:向 / 从网卡发送或接收数据。  我们的假设是,eBPF 可以监控网络。有两种方法可以实现拦截:用户空间(uprobe)或内核空间(kprobe)。下表总结了两者的区别。\n   方式 优点 缺点     uprobe • 获取更多与应用相关的上下文,例如当前请求是 HTTP 还是 HTTPS。 • 请求和响应可以通过一个方法来截获。 • 数据结构可能是不稳定的,所以更难获得所需的数据。 • 不同语言/库版本的实现可能不同。 • 在没有符号表的应用程序中不起作用。   kprobe • 可用于所有语言。 • 数据结构和方法很稳定,不需要太多调整。 • 更容易与底层数据相关联,如获得 TCP 的目标地址、OSI 第四层协议指标等。 • 一个单一的请求和响应可能被分割成多个 probe。 • 对于有状态的请求,上下文信息不容易得到。例如 HTTP/2 中的头压缩。    对于一般的网络性能监控,我们选择使用 kprobe(拦截系统调用),原因如下:\n 它可用于用任何编程语言编写的应用程序,而且很稳定,所以可以节省大量的开发 / 适应成本。 它可以与系统层面的指标相关联,这使得故障排除更加容易。 由于一个请求和响应被分割成多个 probe,我们可以利用技术将它们关联起来。 对于背景信息,它通常用于 OSI 第七层协议网络分析。因此,如果我们只是监测网络性能,那么它们可以被忽略。  Kprobes 和网络监控 按照 Linux 文档中的网络系统调用,我们可以通过两类拦截方法实现网络监控:套接字操作和发送 / 接收方法。\n套接字操作 当接受或与另一个套接字连接时,我们可以得到以下信息:\n 连接信息:包括来自连接的远程地址,这有助于我们了解哪个 pod 被连接。 连接统计 :包括来自套接字的基本指标,如往返时间(RTT)、TCP 的丢包数等。 套接字和文件描述符(FD)的映射:包括 Linux 文件描述符和套接字对象之间的关系。在通过 Linux 文件描述符发送和接收数据时,它很有用。  发送 / 接收 与发送或接收数据有关的接口是性能分析的重点。它主要包含以下参数:\n Socket 文件描述符:当前操作对应的套接字的文件描述符。 缓冲区:发送或接收的数据,以字节数组形式传递。  基于上述参数,我们可以分析以下数据:\n 字节:数据包的大小,以字节为单位。 协议:根据缓冲区的数据进行协议分析,如 HTTP、MySQL 等。 执行时间:发送 / 接收数据所需的时间。  在这一点上(图 1),我们可以分析出连接的整个生命周期的以下步骤:\n 连接 / 接受:当连接被创建时。 转化:在连接上发送和接收数据。 关闭:当连接被关闭时。  图 1\n协议和 TLS 上一节描述了如何使用发送或接收缓冲区数据来分析连接。例如,遵循 HTTP/1.1 消息规范来分析连接。然而,这对 TLS 请求 / 响应不起作用。\n图 2\n当使用 TLS 时,Linux 内核在用户空间中传输加密的数据。在上图中,应用程序通常通过第三方库(如 OpenSSL)传输 SSL 数据。对于这种情况,Linux API 只能得到加密的数据,所以它不能识别任何高层协议。为了在 eBPF 内部解密,我们需要遵循以下步骤:\n 通过 uprobe 读取未加密的数据:兼容多种语言,使用 uprobe 来捕获发送前或接收后没有加密的数据。通过这种方式,我们可以获得原始数据并将其与套接字联系起来。 与套接字关联:我们可以将未加密的数据与套接字关联。  OpenSSL 用例 例如,发送 / 接收 SSL 数据最常见的方法是使用 OpenSSL 作为共享库,特别是 SSL_read 和 SSL_write 方法,以提交缓冲区数据与套接字。\n按照文档,我们可以截获这两种方法,这与 Linux 中的 API 几乎相同。OpenSSL 中 SSL 结构的源代码显示, Socket FD 存在于 SSL 结构的 BIO 对象中,我们可以通过 offset 得到它。\n综上所述,通过对 OpenSSL 工作原理的了解,我们可以在一个 eBPF 函数中读取未加密的数据。\nSkyWalking Rover—— 基于 eBPF 的指标收集器和分析器 SkyWalking Rover 在 SkyWalking 生态系统中引入了 eBPF 网络分析功能。目前已在 Kubernetes 环境中得到支持,所以必须在 Kubernetes 集群内部署。部署完成后,SkyWalking Rover 可以监控特定 Pod 内所有进程的网络。基于监测数据,SkyWalking 可以生成进程之间的拓扑关系图和指标。\n拓扑结构图 拓扑图可以帮助我们了解同一 Pod 内的进程之间以及进程与外部环境(其他 Pod 或服务)之间的网络访问情况。此外,它还可以根据线路的流动方向来确定流量的数据方向。\n在下面的图 3 中,六边形内的所有节点都是一个 Pod 的内部进程,六边形外的节点是外部关联的服务或 Pod。节点由线连接,表示节点之间的请求或响应方向(客户端或服务器)。线条上标明了协议,它是 HTTP (S)、TCP 或 TCP (TLS)。另外,我们可以在这个图中看到,Envoy 和 Python 应用程序之间的线是双向的,因为 Envoy 拦截了所有的应用程序流量。\n图 3\n度量 一旦我们通过拓扑结构认识到进程之间的网络调用关系,我们就可以选择一个特定的线路,查看两个进程之间的 TCP 指标。\n下图(图4)显示了两个进程之间网络监控的指标。每行有四个指标。左边的两个是在客户端,右边的两个是在服务器端。如果远程进程不在同一个 Pod 中,则只显示一边的指标。\n图 4\n有以下两种度量类型。\n 计数器(Counter):记录一定时期内的数据总数。每个计数器包含以下数据。  计数:执行次数。 字节:数据包大小,以字节为单位。 执行时间:执行时间。   柱状图(Histogram):记录数据在桶中的分布。  基于上述数据类型,暴露了以下指标:\n   名称 类型 单位 描述     Write 计数器和柱状图 毫秒 套接字写计数器。   Read 计数器和柱状图 毫秒 套接字读计数器。   Write RTT 计数器和柱状图 微秒 套接字写入往返时间(RTT)计数器。   Connect 计数器和柱状图 毫秒 套接字连接/接受另一个服务器/客户端的计数器。   Close 计数器和柱状图 毫秒 有其他套接字的计数器。   Retransmit 计数器 毫秒 套接字重发包计数器   Drop 计数器 毫秒 套接字掉包计数器。    演示 在本节中,我们将演示如何在服务网格中执行网络分析。要跟上进度,你需要一个正在运行的 Kubernetes 环境。\n注意:所有的命令和脚本都可以在这个 GitHub 资源库中找到。\n安装 Istio Istio是最广泛部署的服务网格,并附带一个完整的演示应用程序,我们可以用来测试。要安装 Istio 和演示应用程序,请遵循以下步骤:\n 使用演示配置文件安装 Istio。 标记 default 命名空间,所以当我们要部署应用程序时,Istio 会自动注入 Envoy 的 sidecar 代理。 将 bookinfo 应用程序部署到集群上。 部署流量生成器,为应用程序生成一些流量。  export ISTIO_VERSION=1.13.1 # 安装 istio istioctl install -y --set profile=demo kubectl label namespace default istio-injection=enabled # 部署 bookinfo 应用程序 kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/platform/kube/bookinfo.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/bookinfo-gateway.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/destination-rule-all.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/virtual-service-all-v1.yaml # 产生流量 kubectl apply -f https://raw.githubusercontent.com/mrproliu/skywalking-network-profiling-demo/main/resources/traffic-generator.yaml 安装 SkyWalking 下面将安装 SkyWalking 所需的存储、后台和用户界面。\ngit clone https://github.com/apache/skywalking-kubernetes.git cd skywalking-kubernetes cd chart helm dep up skywalking helm -n istio-system install skywalking skywalking \\  --set fullnameOverride=skywalking \\  --set elasticsearch.minimumMasterNodes=1 \\  --set elasticsearch.imageTag=7.5.1 \\  --set oap.replicas=1 \\  --set ui.image.repository=apache/skywalking-ui \\  --set ui.image.tag=9.2.0 \\  --set oap.image.tag=9.2.0 \\  --set oap.envoy.als.enabled=true \\  --set oap.image.repository=apache/skywalking-oap-server \\  --set oap.storageType=elasticsearch \\  --set oap.env.SW_METER_ANALYZER_ACTIVE_FILES=\u0026#39;network-profiling\u0026#39; 安装 SkyWalking Rover SkyWalking Rover 部署在 Kubernetes 的每个节点上,它自动检测 Kubernetes 集群中的服务。网络剖析功能已经在 SkyWalking Rover 的 0.3.0 版本中发布。当网络监控任务被创建时,SkyWalking Rover 会将数据发送到 SkyWalking 后台。\nkubectl apply -f https://raw.githubusercontent.com/mrproliu/skywalking-network-profiling-demo/main/resources/skywalking-rover.yaml 启动网络分析任务 一旦所有部署完成,我们必须在 SkyWalking UI 中为服务的特定实例创建一个网络分析任务。\n要打开 SkyWalking UI,请运行:\nkubectl port-forward svc/skywalking-ui 8080:80 --namespace istio-system 目前,我们可以通过点击服务网格面板中的数据平面项目和 Kubernetes 面板中的服务项目来选择我们想要监控的特定实例。\n在下图中,我们选择了一个实例,在网络剖析标签里有一个任务列表。当我们点击启动按钮时,SkyWalking Rover 开始监测这个实例的网络。\n图 5\n完成 几秒钟后,你会看到页面的右侧出现进程拓扑结构。\n图 6\n当你点击进程之间的线时,你可以看到两个进程之间的 TCP 指标。\n图 7\n总结 在这篇文章中,我们详细介绍了一个使服务网格故障排除困难的问题:网络堆栈中各层之间缺乏上下文。这些情况下,当现有的服务网格 /envoy 不能时,eBPF 开始真正帮助调试 / 生产。然后,我们研究了如何将 eBPF 应用于普通的通信,如 TLS。最后,我们用 SkyWalking Rover 演示了这个过程的实现。\n目前,我们已经完成了对 OSI 第四层(主要是 TCP)的性能分析。在未来,我们还将介绍对 OSI 第 7 层协议的分析,如 HTTP。\n开始使用 Istio 开始使用服务网格,Tetrate Istio Distro 是安装、管理和升级 Istio 的最简单方法。它提供了一个经过审查的 Istio 上游发布,由 Tetrate 为特定平台进行测试和优化,加上一个 CLI,方便获取、安装和配置多个 Istio 版本。Tetrate Istio Distro 还为 FedRAMP 环境提供 FIPS 认证的 Istio 构建。\n对于需要以统一和一致的方式在复杂的异构部署环境中保护和管理服务和传统工作负载的企业,我们提供 Tetrate Service Bridge,这是我们建立在 Istio 和 Envoy 上的旗舰工作负载应用连接平台。\n联系我们以了解更多。\n其他资源  SkyWalking Github Repo SkyWalking Rover Github Repo SkyWalking Rover 文件 通过使用 eBPF 博文准确定位服务网格关键性能影响 Apache SkyWalking 与本地 eBPF 代理的介绍 eBPF hook概述  ","excerpt":"本文将展示如何利用 Apache SkyWalking 与 eBPF,使服务网格下的网络故障排除更加容易。\nApache SkyWalking 是一个分布式系统的应用性能监控工具。它观察服务网格中的指 …","ref":"/zh/diagnose-service-mesh-network-performance-with-ebpf/","title":"使用 eBPF 诊断服务网格网络性能"},{"body":"SkyWalking CLI 0.11.0 is released. Go to downloads page to find release tars.\n Add .github/scripts to release source tarball by @kezhenxu94 in https://github.com/apache/skywalking-cli/pull/140 Let the eBPF profiling could performs by service level by @mrproliu in https://github.com/apache/skywalking-cli/pull/141 Add the sub-command for estimate the process scale by @mrproliu in https://github.com/apache/skywalking-cli/pull/142 feature: update install.sh version regex by @Alexxxing in https://github.com/apache/skywalking-cli/pull/143 Update the commands relate to the process by @mrproliu in https://github.com/apache/skywalking-cli/pull/144 Add layer to event related commands by @fgksgf in https://github.com/apache/skywalking-cli/pull/145 Add layer to events.graphql by @fgksgf in https://github.com/apache/skywalking-cli/pull/146 Add layer field to alarms.graphql by @fgksgf in https://github.com/apache/skywalking-cli/pull/147 Upgrade crypto lib to fix cve by @kezhenxu94 in https://github.com/apache/skywalking-cli/pull/148 Remove layer field in the instance and process commands by @mrproliu in https://github.com/apache/skywalking-cli/pull/149 Remove duration flag in profiling ebpf schedules by @mrproliu in https://github.com/apache/skywalking-cli/pull/150 Remove total field in trace list and logs list commands by @mrproliu in https://github.com/apache/skywalking-cli/pull/152 Remove total field in event list, browser logs, alarm list commands. by @mrproliu in https://github.com/apache/skywalking-cli/pull/153 Add aggregate flag in profiling ebpf analysis commands by @mrproliu in https://github.com/apache/skywalking-cli/pull/154 event: fix event query should query all types by default by @kezhenxu94 in https://github.com/apache/skywalking-cli/pull/155 Fix a possible lint error and update CI lint version by @JarvisG495 in https://github.com/apache/skywalking-cli/pull/156 Add commands for support network profiling by @mrproliu in https://github.com/apache/skywalking-cli/pull/158 Add the components field in the process relation by @mrproliu in https://github.com/apache/skywalking-cli/pull/159 Trim license headers in query string by @kezhenxu94 in https://github.com/apache/skywalking-cli/pull/160 Bump up dependency swck version to fix CVE by @kezhenxu94 in https://github.com/apache/skywalking-cli/pull/161 Bump up swck dependency for transitive dep upgrade by @kezhenxu94 in https://github.com/apache/skywalking-cli/pull/162 Add the sub-commands for query sorted metrics/records by @mrproliu in https://github.com/apache/skywalking-cli/pull/163 Add compatibility documentation by @mrproliu in https://github.com/apache/skywalking-cli/pull/164 Overhaul licenses, prepare for 0.11.0 by @kezhenxu94 in https://github.com/apache/skywalking-cli/pull/165  ","excerpt":"SkyWalking CLI 0.11.0 is released. Go to downloads page to find release tars.\n Add .github/scripts …","ref":"/events/release-apache-skywalking-cli-0-11-0/","title":"Release Apache SkyWalking CLI 0.11.0"},{"body":"SkyWalking Kubernetes Helm Chart 4.3.0 is released. Go to downloads page to find release tars.\n Fix hasSuffix replace hasPrefix by @geffzhang in https://github.com/apache/skywalking-kubernetes/pull/86 Add \u0026ldquo;pods/log\u0026rdquo; permission to OAP so on-demand Pod log can work by @kezhenxu94 in https://github.com/apache/skywalking-kubernetes/pull/87 add .Values.oap.initEs to work with ES initial by @williamyao1982 in https://github.com/apache/skywalking-kubernetes/pull/88 Remove Istio adapter, add changelog for 4.3.0 by @kezhenxu94 in https://github.com/apache/skywalking-kubernetes/pull/89 Bump up helm chart version by @kezhenxu94 in https://github.com/apache/skywalking-kubernetes/pull/90  ","excerpt":"SkyWalking Kubernetes Helm Chart 4.3.0 is released. Go to downloads page to find release tars.\n Fix …","ref":"/events/release-apache-skywalking-kubernetes-helm-chart-4.3.0/","title":"Release Apache SkyWalking Kubernetes Helm Chart 4.3.0"},{"body":"SkyWalking Cloud on Kubernetes 0.7.0 is released. Go to downloads page to find release tars.\nFeatures  Replace go-bindata with embed lib. Add the OAPServerConfig CRD, webhooks and controller. Add the OAPServerDynamicConfig CRD, webhooks and controller. Add the SwAgent CRD, webhooks and controller. [Breaking Change] Remove the way to configure the agent through Configmap.  Bugs  Fix the error in e2e testing. Fix status inconsistent with CI. Bump up prometheus client version to fix cve.  Chores  Bump several dependencies of adapter. Update license eye version. Bump up SkyWalking OAP to 9.0.0. Bump up the k8s api of the e2e environment to v1.21.10.  ","excerpt":"SkyWalking Cloud on Kubernetes 0.7.0 is released. Go to downloads page to find release tars. …","ref":"/events/release-apache-skywalking-cloud-on-kubernetes-0-7-0/","title":"Release Apache SkyWalking Cloud on Kubernetes 0.7.0"},{"body":"SkyWalking Rover 0.3.0 is released. Go to downloads page to find release tars.\nFeatures  Support NETWORK Profiling. Let the logger as a configurable module. Support analyze the data of OpenSSL, BoringSSL library, GoTLS, NodeTLS in NETWORK Profiling. Enhancing the kubernetes process finder.  Bug Fixes  Fixed reading process paths incorrect when running as a container. Fix the crash caused by multiple profiling tasks.  Issues and PR  All issues are here All and pull requests are here  ","excerpt":"SkyWalking Rover 0.3.0 is released. Go to downloads page to find release tars.\nFeatures  Support …","ref":"/events/release-apache-skwaylking-rover-0-3-0/","title":"Release Apache SkyWalking Rover 0.3.0"},{"body":"SkyWalking Java Agent 8.12.0 is released. Go to downloads page to find release tars. Changes by Version\n8.12.0  Fix Shenyu plugin\u0026rsquo;s NPE in reading trace ID when IgnoredTracerContext is used in the context. Update witness class in elasticsearch-6.x-plugin, avoid throw NPE. Fix onHalfClose using span operation name /Request/onComplete instead of the wrong name /Request/onHalfClose. Add plugin to support RESTeasy 4.x. Add plugin to support hutool-http 5.x. Add plugin to support Tomcat 10.x. Save http status code regardless of it\u0026rsquo;s status. Upgrade byte-buddy to 1.12.13, and adopt byte-buddy APIs changes. Upgrade gson to 2.8.9. Upgrade netty-codec-http2 to 4.1.79.Final. Fix race condition causing agent to not reconnect after network error Force the injected high-priority classes in order to avoid NoClassDefFoundError. Plugin to support xxl-job 2.3.x. Add plugin to support Micronaut(HTTP Client/Server) 3.2.x-3.6.x Add plugin to support NATS Java client 2.14.x-2.15.x Remove inappropriate dependency from elasticsearch-7.x-plugin Upgrade jedis plugin to support 3.x(stream),4.x  Documentation  Add a section in Bootstrap-plugins doc, introducing HttpURLConnection Plugin compatibility. Update Plugin automatic test framework, fix inconsistent description about configuration.yml. Update Plugin automatic test framework, add expected data format of the log items.  All issues and pull requests are here\n","excerpt":"SkyWalking Java Agent 8.12.0 is released. Go to downloads page to find release tars. Changes by …","ref":"/events/release-apache-skywalking-java-agent-8-12-0/","title":"Release Apache SkyWalking Java Agent 8.12.0"},{"body":"This is an official annoucement from SkyWalking team.\nSkyWalking backend server and UI released significant 9.2.0 at Sep. 2nd, 2022. With the new added Layer concept, the ebpf agent, wider middleware server monitoring(Such as MySQL and PostgreSQL servers) powered by OpenTelemetry ecosystem, SkyWalking v9 has been much more powerful than the last v8 version(8.9.1).\nFrom now, we have resolved all found critical bugs since 9.0.0 release which could block the v8 users to upgrade. v9 releases also provide the as same compatibility as the 8.9.1 release. So, end users would not have a block when they apply to upgrade. (We don\u0026rsquo;t provide storage structure compatibility as usually, users should use an empty database to initialize for a new version.)\nAnd more importantly, we are confident that, v9 could provide a stable and higher performance APM in the product environment.\nThe 8.9.1 release was released at Dec., 2021. Since then, there is no one contributed any code, and there is no committer requested to begin a new iteration or plan to run a patch release. From the project management committee perspective, the 8.x had became inactive.\nWe are going to wait for another 3 month to official end 8.x series' life.\nNotice, this could be changed if there are at least 3 committers supporting to work on further 8.x releases officially, and provide a release plan.\n","excerpt":"This is an official annoucement from SkyWalking team.\nSkyWalking backend server and UI released …","ref":"/events/deprecate-v8/","title":"Plan to End-of-life(EOL) all v8 releases in Nov. 2022"},{"body":"SkyWalking 9.2.0 is released. Go to downloads page to find release tars.\neBPF Network Profiling for K8s Pod Event and Metrics Association MySQL Server Monitoring PostgreSQL Server Monitoring Project  [Critical] Fix a low performance issue of metrics persistent in the ElasticSearch storage implementation. One single metric could have to wait for an unnecessary 7~10s(System Env Variable SW_STORAGE_ES_FLUSH_INTERVAL) since 8.8.0 - 9.1.0 releases. Upgrade Armeria to 1.16.0, Kubernetes Java client to 15.0.1.  OAP Server  Add more entities for Zipkin to improve performance. ElasticSearch: scroll id should be updated when scrolling as it may change. Mesh: fix only last rule works when multiple rules are defined in metadata-service-mapping.yaml. Support sending alarm messages to PagerDuty. Support Zipkin kafka collector. Add VIRTUAL detect type to Process for Network Profiling. Add component ID(128) for Java Hutool plugin. Add Zipkin query exception handler, response error message for illegal arguments. Fix a NullPointerException in the endpoint analysis, which would cause missing MQ-related LocalSpan in the trace. Add forEach, processRelation function to MAL expression. Add expPrefix, initExp in MAL config. Add component ID(7015) for Python Bottle plugin. Remove legacy OAL percentile functions, p99, p95, p90, p75, p50 func(s). Revert #8066. Keep all metrics persistent even it is default value. Skip loading UI templates if folder is empty or doesn\u0026rsquo;t exist. Optimize ElasticSearch query performance by using _mGet and physical index name rather than alias in these scenarios, (a) Metrics aggregation (b) Zipkin query (c) Metrics query (d) Log query Support the NETWORK type of eBPF Profiling task. Support sumHistogram in MAL. [Breaking Change] Make the eBPF Profiling task support to the service instance level, index/table ebpf_profiling_task is required to be re-created when bump up from previous releases. Fix race condition in Banyandb storage Support SUM_PER_MIN downsampling in MAL. Support sumHistogramPercentile in MAL. Add VIRTUAL_CACHE to Layer, to fix conjectured Redis server, which icon can\u0026rsquo;t show on the topology. [Breaking Change] Elasticsearch storage merge all metrics/meter and records(without super datasets) indices into one physical index template metrics-all and records-all on the default setting. Provide system environment variable(SW_STORAGE_ES_LOGIC_SHARDING) to shard metrics/meter indices into multi-physical indices as the previous versions(one index template per metric/meter aggregation function). In the current one index mode, users still could choose to adjust ElasticSearch\u0026rsquo;s shard number(SW_STORAGE_ES_INDEX_SHARDS_NUMBER) to scale out. More details please refer to New ElasticSearch storage option explanation in 9.2.0 and backend-storage.md [Breaking Change] Index/table ebpf_profiling_schedule added a new column ebpf_profiling_schedule_id, the H2/Mysql/Tidb/Postgres storage users are required to re-created it when bump up from previous releases. Fix Zipkin trace query the max size of spans. Add tls and https component IDs for Network Profiling. Support Elasticsearch column alias for the compatibility between storage logicSharding model and no-logicSharding model. Support MySQL monitoring. Support PostgreSQL monitoring. Fix query services by serviceId error when Elasticsearch storage SW_STORAGE_ES_QUERY_MAX_SIZE \u0026gt; 10000. Support sending alarm messages to Discord. Fix query history process data failure. Optimize TTL mechanism for Elasticsearch storage, skip executed indices in one TTL rotation. Add Kubernetes support module to share codes between modules and reduce calls to Kubernetes API server. Bump up Kubernetes Java client to fix cve. Adapt OpenTelemetry native metrics protocol. [Breaking Change] rename configuration folder from otel-oc-rules to otel-rules. [Breaking Change] rename configuration field from enabledOcRules to enabledOtelRules and environment variable name from SW_OTEL_RECEIVER_ENABLED_OC_RULES to SW_OTEL_RECEIVER_ENABLED_OTEL_RULES. [Breaking Change] Fix JDBC TTL to delete additional tables data. SQL Database requires removing segment,segment_tag, logs, logs_tag, alarms, alarms_tag, zipkin_span, zipkin_query before OAP starts. SQL Database: add @SQLDatabase.ExtraColumn4AdditionalEntity to support add an extra column from parent to an additional table. Add component ID(131) for Java Micronaut plugin Add component ID(132) for Nats java client plugin  UI  Fix query conditions for the browser logs. Implement a URL parameter to activate tab index. Fix clear interval fail when switch autoRefresh to off. Optimize log tables. Fix log detail pop-up page doesn\u0026rsquo;t work. Optimize table widget to hide the whole metric column when no metric is set. Implement the Event widget. Remove event menu. Fix span detail text overlap. Add Python Bottle Plugin Logo. Implement an association between widgets(line, bar, area graphs) with time. Fix tag dropdown style. Hide the copy button when db.statement is empty. Fix legend metrics for topology. Dashboard: Add metrics association. Dashboard: Fix FaaS-Root document link and topology service relation dashboard link. Dashboard: Fix Mesh-Instance metric Throughput. Dashboard: Fix Mesh-Service-Relation metric Throughput and Proxy Sidecar Internal Latency in Nanoseconds (Client Response). Dashboard: Fix Mesh-Instance-Relation metric Throughput. Enhance associations for the Event widget. Add event widgets in dashboard where applicable. Fix dashboard list search box not work. Fix short time range. Fix event widget incompatibility in Safari. Refactor the tags component to support searching for tag keys and values. Implement the log widget and the trace widget associate with each other, remove log tables on the trace widget. Add log widget to general service root. Associate the event widget with the trace and log widget. Add the MySQL layer and update layer routers. Fix query order for trace list. Add a calculation to convert seconds to days. q* Add Spring Sleuth dashboard to general service instance. Support the process dashboard and create the time range text widget. Fix picking calendar with a wrong time range and setting a unique value for dashboard grid key. Add PostgreSQL to Database sub-menu. Implement the network profiling widget. Add Micronaut icon for Java plugin. Add Nats icon for Java plugin. Bump moment and @vue/cli-plugin-e2e-cypress. Add Network Profiling for Service Mesh DP instance and K8s pod panels.  Documentation  Fix invalid links in release docs. Clean up doc about event metrics. Add a table for metric calculations in the UI doc. Add an explanation for alerting kernel and its in-memory window mechanism. Add more docs for widget details. Update alarm doc introduce configuration property key Fix dependency license\u0026rsquo;s NOTICE and binary jar included issues in the source release. Add eBPF CPU profiling doc.  All issues and pull requests are here\n","excerpt":"SkyWalking 9.2.0 is released. Go to downloads page to find release tars.\neBPF Network Profiling for …","ref":"/events/release-apache-skywalking-apm-9.2.0/","title":"Release Apache SkyWalking APM 9.2.0"},{"body":"SkyWalking Rust 0.4.0 is released. Go to downloads page to find release tars.\nWhat\u0026rsquo;s Changed  Publish release doc. by @wu-sheng in https://github.com/apache/skywalking-rust/pull/31 Set up CI and approval requirements by @wu-sheng in https://github.com/apache/skywalking-rust/pull/32 Move skywalking_proto mod to single files. by @jmjoy in https://github.com/apache/skywalking-rust/pull/33 Polish the release doc. by @wu-sheng in https://github.com/apache/skywalking-rust/pull/34 Add serde support for protobuf generated struct. by @jmjoy in https://github.com/apache/skywalking-rust/pull/35 Improve LogReporter and fix tests. by @jmjoy in https://github.com/apache/skywalking-rust/pull/36 Split tracer inner segment sender and receiver into traits. by @jmjoy in https://github.com/apache/skywalking-rust/pull/37 Switch to use nightly rustfmt. by @jmjoy in https://github.com/apache/skywalking-rust/pull/38 Change Span to refer to SpanStack, rather than TracingContext. by @jmjoy in https://github.com/apache/skywalking-rust/pull/39 Adjust the trace structure. by @jmjoy in https://github.com/apache/skywalking-rust/pull/40 Add logging. by @jmjoy in https://github.com/apache/skywalking-rust/pull/41 Upgrade dependencies. by @jmjoy in https://github.com/apache/skywalking-rust/pull/42 Add feature vendored, to auto build protoc. by @jmjoy in https://github.com/apache/skywalking-rust/pull/43 Add metrics. by @jmjoy in https://github.com/apache/skywalking-rust/pull/44 Add more GH labels as new supports by @wu-sheng in https://github.com/apache/skywalking-rust/pull/45 Bump to 0.4.0. by @jmjoy in https://github.com/apache/skywalking-rust/pull/46 Fix trace id is not transmitted. by @jmjoy in https://github.com/apache/skywalking-rust/pull/47  ","excerpt":"SkyWalking Rust 0.4.0 is released. Go to downloads page to find release tars.\nWhat\u0026rsquo;s Changed …","ref":"/events/release-apache-skywalking-rust-0-4-0/","title":"Release Apache SkyWalking Rust 0.4.0"},{"body":"目录  开篇 为什么需要全链路监控 为什么选择SkyWalking 预研 POC 优化 未来  1、开篇 自从SkyWalking开始在公司推广,时不时会在排查问题的人群中听到这样的话:“你咋还没接SkyWalking?接入后,一眼就看出是哪儿的问题了\u0026hellip;\u0026quot;,正如同事所说的,在许多情况下,SkyWalking就是这么秀。作为实践者,我非常感谢SkyWalking,因为这款国产全链路监控产品给公司的的伙伴们带来了实实在在的帮助;也特别感谢公司的领导和同事们,正因为他们的支持和帮助,才让这套SkyWalking(V8.5.0)系统从起初的有用进化到现在的好用;从几十亿的Segment储能上限、几十秒的查询耗时,优化到千亿级的Segment储能、毫秒级的查询耗时。\n小提示:\n SkyWalking迭代速度很快,公司使用的是8.5.0版本,其新版本的性能肯定有改善。 Segment是SkyWalking中提出的概念,表示一次请求在某个服务内的执行链路片段的合集,一个请求在多个服务中先后产生的Segment串起来构成一个完整的Trace,如下图所示:  SkyWalking的这次实践,截止到现在有一年多的时间,回顾总结一下这段历程中的些许积累和收获,愿能反哺社区,给有需求的道友提供个案例借鉴;也希望能收获到专家们的指导建议,把项目做得更好。因为安全约束,要把有些内容和谐掉,但也努力把这段历程中那些**靓丽的风景,**尽可能完整的呈现给大家。\n2、为什么需要全链路监控 随着微服务架构的演进,单体应用按照服务维度进行拆分,组织架构也随之演进以横向、纵向维度拆分;一个业务请求的执行轨迹,也从单体应用时期一个应用实例内一个接口,变成多个服务实例的多个接口;对应到组织架构,可能跨越多个BU、多个Owner。虽然微服务架构高内聚低耦合的优势是不言而喻的,但是低耦合也有明显的副作用,它在现实中给跨部门沟通、协作带来额外的不可控的开销;因此开发者尤其是终端业务侧的架构师、管理者,特别需要一些可以帮助理解系统拓扑和用于分析性能问题的工具,便于在架构调整、性能检测和发生故障时,缩减沟通协作方面的精力和时间耗费,快速定位并解决问题。\n我所在的平安健康互联网股份有限公司(文中简称公司),是微服务架构的深度实践者。公司用互联网技术搭建医疗服务平台,致力于构筑专业的医患桥梁,提供专业、全面、高品质、一站式企业健康管理服务。为了进一步提高系统服务质量、提升问题响应效率,部门在21年结合自身的一些情况,决定对现行的全链路监控系统进行升级,目的与以下网络中常见的描述基本一致:\n 快速发现问题 判断故障影响范围 梳理服务依赖并判断依赖的合理性 分析链路性能并实施容量规划  3、为什么选择SkyWalking 在做技术选型时,网络中搜集的资料显示,谷歌的 Dapper系统,算是链路追踪领域的始祖。受其公开论文中提出的概念和理念的影响,一些优秀的企业、个人先后做出不少非常nice的产品,有些还在社区开源共建,如:韩国的Pinpoint,Twitter的Zipkin,Uber的Jaeger及中国的SkyWalking 等,我司选型立项的过程中综合考虑的因素较多,这里只归纳一下SkyWalking吸引我们的2个优势:\n  产品的完善度高:\n java生态,功能丰富 社区活跃,迭代迅速    链路追踪、拓扑分析的能力强:\n 插件丰富,探针无侵入。 采用先进的流式拓扑分析设计    “好东西不需要多说,实际行动告诉你“,这句话我个人非常喜欢,关于SkyWalking的众多的优点,网络上可以找到很多,此处先不逐一比较、赘述了。\n4、预研 当时最新版本8.5.0,梳理分析8.x的发布记录后,评估此版本的核心功能是蛮稳定的,于是基于此版本开始了SkyWalking的探索之旅。当时的认知是有限的,串行思维模型驱使我将关注的问题聚焦在架构原理是怎样、有什么副作用这2个方面:\n  架构和原理:\n agent端 主要关注 Java Agent的机制、SkyWalking Agent端的配置、插件的工作机制、数据采集及上报的机制。 服务端 主要关注 角色和职责、模块和配置、数据接收的机制、指标构建的机制、指标聚合的机制及指标存储的机制。 存储端 主要关注 数据量,存储架构要求以及资源评估。    副作用:\n 功能干扰 性能损耗    4.1 架构和原理 SkyWalking社区很棒,官网文档和官方出版的书籍有较系统化的讲解,因为自己在APM系统以及Java Agent方面有一些相关的经验沉淀,通过在这两个渠道的学习,对Agent端和OAP(服务端)很快便有了较系统化的认知。在做系统架构选型时,评估数据量会比较大(成千上万的JVM实例数,每天采集的Segment数量可能是50-100亿的级别),所以传输通道选择Kafka、存储选择Elasticsearch,如此简易版的架构以及数据流转如下图所示:\n这里有几处要解释一下:\n Agent上报数据给OAP端,有grpc通道和kafka通道,当时就盲猜grpc通道可能撑不住,所以选择kafka通道来削峰;kafka通道是在8.x里加入的。 千亿级的数据用ES来做存储肯定是可以的。 图中L1聚合的意思是:SkyWalking OAP服务端 接收数据后,构建metric并完成metric 的Level-1聚合,这里简称L1聚合。 图中L2聚合的意思是:服务端 基于metric的Level-1聚合结果,再做一次聚合,即Level-2聚合,这里简称L2聚合。后续把纯Mixed角色的集群拆成了两个集群。  4.2 副作用 对于质量团队和接入方来说,他们最关注的问题是,接入SkyWalking后:\n 是否对应用有功能性干扰 在运行期能带来哪些性能损耗  这两个问题从3个维度来得到答案:\n  网络资料显示:\n Agent带来的性能损耗在5%以内 未搜到功能性干扰相关的资料(盲猜没有这方面问题)    实现机制评估:\n 字节码增强机制是JVM提供的机制,SkyWalking使用的字节码操控框架ByteBuddy也是成熟稳定的;通过自定义ClassLoader来加载管理插件类,不会产生冲突和污染。 Agent内插件开发所使用的AOP机制是基于模板方法模式实现的,风控很到位,即使插件的实现逻辑有异常也不影响用户逻辑的执行; 插件采集数据跟上报逻辑之间用了一个轻量级的无锁环形队列进行解耦,算是一种保护机制;这个队列在MPSC场景下性能还不错;队列采用满时丢弃的策略,不会有积压阻塞和OOM。    性能测试验证\n 测试的老师针对dubbo、http 这两种常规RPC通信场景,进行压力测试和稳定性测试,结果与网络资料描述一致,符合预期。    5、POC 在POC阶段,接入几十个种子应用,在非生产环境试点观察,同时完善插件补全链路,对接公司的配置中心,对接发布系统,完善自监控.全面准备达到推广就绪状态。\n5.1 对接发布系统 为了对接公司的发布系统,方便系统的发布,将SkyWalking应用拆分为4个子应用:\n   应用 介绍     Webapp Skywalking的web端   Agent Skywalking的Agent端   OAP-Receiver skywakling的服务端,角色是Mixed或Receiver   OAP-Aggregator skywalking的服务端,角色是Aggregator    这里有个考虑,暂定先使用纯Mixed角色的单集群,有性能问题时就试试 Receiver+Aggregator双角色集群模式,最终选哪种视效果而定。\nSkyWalking Agent端是基于Java Agent机制实现的,采用的是启动挂载模式;启动挂载需在启动脚本里加入挂载Java Agent的逻辑,发布系统实现这个功能需要注意2点:\n 启动脚本挂载SkyWalking Agent的环节,尽量让用户无感知。 发布系统在挂载Agent的时候,给Agent指定应用名称和所属分组信息。  SkyWalking Agent的发布和升级也由发布系统来负责;Agent的升级采用了灰度管控的方案,控制的粒度是应用级和实例级两种:\n 按照应用灰度,可给应用指定使用什么版本的Agent 按照应用的实例灰度,可给应用指定其若干实例使用什么版本的Agent  5.2 完善插件补全链路 针对公司OLTP技术栈,量身定制了插件套,其中大部分在开源社区的插件库中有,缺失的部分通过自研快速补齐。\n这些插件给各组件的核心环节埋点,采集数据上报给SkyWalking后,Web端的【追踪】页面就能勾勒出丰满完美的请求执行链路;这对架构师理解真实架构,测试同学验证逻辑变更和分析性能损耗,开发同学精准定位问题都非常的有帮助。这里借官方在线Demo的截图一用(抱歉后端程序员,五毛特效都没做出来,丰满画面还请自行脑补)\n友情小提示:移除不用的插件对程序编译打包和减少应用启动耗时很有帮助。\n5.3压测稳测 测试的老师,针对SkyWalking Agent端的插件套,设计了丰富的用例,压力测试和稳定性测试的结果都符合预期;每家公司的标准不尽一致,此处不再赘述。\n5.4 对接自研的配置中心 把应用中繁杂的配置交给配置中心来管理是非常必要的,配置中心既能提供启动时的静态配置,又能管理运行期的动态配置,而且外部化配置的机制特别容易满足容器场景下应用的无状态化要求。啰嗦一下,举2个例子:\n 调优时,修改参数的值不用来一遍开发到测试再到生产的发布。 观测系统状态,修改日志配置后不需要来一遍开发到测试再到生产的发布。  Skywaling在外接配置中心这块儿,适配了市面中主流的配置中心产品。而公司的配置中心是自研的,需要对接一下,得益于SkyWalking提供的模块化管理机制,只用扩展一个模块即可。\n在POC阶段,梳理服务端各模块的功能,能感受到其配置化做的不错,配置项很丰富,管控的粒度也很细;在POC阶段几乎没有变动,除了对Webapp模块的外部化配置稍作改造,与配置中心打通以便在配置中心管理 Webapp模块中Ribbon和Hystrix的相关配置。\n5.5完善自监控 自监控是说监控SkyWalking系统内各模块的运转情况:\n   组件 监控方案 说明     kafka kafka-manager 它俩是老搭档了   Agent端 Skywalking Agent端会发心跳信息给服务端,可在Web端看到Agent的信息   OAP集群 prometheus 指标还算丰富,感觉缺的可以自己补充   ES集群 prometheus 指标还算丰富    完善自监控后的架构如下图所示:\n5.6 自研Native端SDK 公司移动端的应用很核心,也要使用链路追踪的功能,社区缺了这块,于是基于SkyWalking的协议,移动端的伙伴们自研了一套SDK,弥补了Native端链路数据的缺失,也在后来的秒开页面指标统计中发挥了作用。随着口口相传,不断有团队提出需求、加入建设,所以也在持续迭代中;内容很多,这里先不展开。\n5.7 小结 POC阶段数据量不大,主要是发现系统的各种功能性问题,查缺补漏。\n6、优化 SkyWalking的正式推广采用的是城市包围农村的策略;公司的核心应用作为第一批次接入,这个策略有几个好处:\n 核心应用的监管是重中之重,优先级默认最高。 核心应用的上下游应用,会随着大家对SkyWalking依赖的加深,而逐步自主接入。  当然安全是第一位的,无论新系统多好、多厉害,其引入都需遵守安全稳定的前提要求。既要安全又要快速还要方便,于是基于之前Agent灰度接入的能力,在发布系统中增加应用Owner自助式灰度接入和快速卸载SkyWalking Agent的能力,即应用负责人可自主选择哪个应用接入,接入几个实例,倘若遇到问题仅通过重启即可完成快速卸载;这个能力在推广的前期发挥了巨大的作用;毕竟安全第一,信任也需逐步建立。\n随着应用的接入、使用,我们也逐渐遇到了一些问题,这里按照时间递增的顺序将问题和优化效果快速的介绍给大家,更多技术原理的内容计划在【SkyWalking(v8.5.0)调优系列】补充。开始之前有几个事项要说明:\n 下文中提到的数字仅代表我司的情况,标注的Segment数量是处理这个问题的那段时间的情况,并不是说达到这个数量才开始出现这个现象。 这些数值以及当时的现象,受到宿主机配置、Segment数据的大小、存储处理能力等多种因素的影响;请关注调整的过程和效果,不必把数字和现象对号入座哈。  6.1 启动耗时: 问题: 有同事反馈应用启动变慢,排查发现容器中多数应用启动的总耗时,在接入SkyWalking前是2秒,接入后变成了16秒以上,公司很多核心应用的实例数很多,这样的启动损耗对它们的发布影响太大。\n优化:  记录启动耗时并随着其他启动数据上报到服务端,方便查看对比。 优化Kafka Reporter的启动过程,将启动耗时减少了3-4秒。 优化类匹配和增强环节(重点)后,容器中的应用启动总耗时从之前16秒以上降低到了3秒内。 梳理Kafka 启动和上报的过程中,顺带调整了Agent端的数据上报到kafka的分区选择策略,将一个JVM实例中的数据全部发送到同一个的分区中,如此在L1层的聚合就完成了JVM实例级的Metric聚合,需注意调整Kafka分片数来保证负载均衡。  6.2 kafka积压-6亿segment/天 问题: SkyWalking OAP端消费慢,导致Kafka中Segment积压。未能达到能用的目标。\n优化: 从SkyWalking OAP端的监控指标中没有定位出哪个环节的问题,把服务端单集群拆为双集群,即把 Mixed角色的集群 ,修改为 Receiver 角色(接收和L1聚合)的集群 ,并加入 Aggregation角色(L2聚合)的集群,调整成了双集群模式,数据流传如下图所示:\n6.3 kafka积压-8亿segment/天 问题: SkyWalking OAP端消费慢,导致Kafka中Segment积压,监控指标能看出是在ES存储环节慢,未能达到能用的目标。\n优化:  优化segment保存到ES的批处理过程,调整BulkProcessor的线程数和批处理大小。 优化metrics保存到ES的批处理过程,调整批处理的时间间隔、线程数、批处理大小以及刷盘时间。  6.4 kafka积压-20亿segment/天 问题: Aggregation集群的实例持续Full GC,Receiver集群通过grpc 给Aggregation集群发送metric失败。未能达到能用的目标。\n优化:  增加ES节点、分片,效果不明显。 ES集群有压力,但无法精准定位出是什么数据的什么操作引发的。采用分治策略,尝试将数据拆分,从OAP服务端读写逻辑调整,将ES单集群拆分为 trace集群 和 metric集群;之后对比ES的监控指标明确看出是metric集群读写压力太大。  优化Receiver集群metric的L1聚合,完成1分钟的数据聚合后,再提交给Aggregation集群做L2聚合。 Aggregation集群metric的L2 聚合是基于db实现的,会有 空读-写-再读-累加-更新写 这样的逻辑,每次写都会有读,调整逻辑是:提升读的性能,优化缓存机制减少读的触发;调整间隔,避免触发累加和更新。 将metric批量写ES操作调整成BulkProcessor。 ES的metric集群 使用SSD存储,增加节点数和分片数。  这一次的持续优化具有里程碑式的意义,Kafka消费很快,OAP各机器的Full GC没了,ES的各方面指标也很稳定;接下来开始优化查询,提升易用性。\n6.5 trace查询慢-25亿segment/天 问题: Web端【追踪】页中的查询都很慢,仅保存了15天的数据,按照traceId查询耗时要20多秒,按照条件查询trace列表的耗时更糟糕;这给人的感受就是“一肚子墨水倒不出来”,未能达到好用的目标。\n优化: ES查询优化方面的信息挺多,但通过百度筛选出解决此问题的有效方案,就要看咱家爱犬的品类了;当时搜集整理了并尝试了N多优化条款,可惜没有跟好运偶遇,结论是颜值不可靠。言归正传,影响读写性能的基本要素有3个:读写频率,数据规模,硬件性能;trace的情况从这三个维度来套一套模板:\n   要素 trace的情况 备注     读写频率 宏观来看是写多读少的状况    数据规模 按照每天50亿个segment来算,半个月是750亿,1个月是1500亿。    硬件性能 普通硬盘速度一般     这个分析没有得出具有指导意义的结论,读写频率这里粒度太粗,用户的使用情况跟时间也有紧密的关系,情况大概是:\n 当天的数据是读多写多(当天不断有新数据写入,基于紧急响应的需求,问题出现时可能是近实时的排查处理)。 前一天的数据是读多写少(一般也会有问题隔天密集上报的情况,0点后会有前一天数据延迟到达的情况)。 再早的话无新数据写入,数据越早被读的概率也越小。  基于以上分析,增加时间维度并细化更多的参考因素后,分析模型变成了这样:\n   要素 当天 当天-1 当天-2 ~ 当天-N     写频率 多 少 无   读(查询)频率 多 多 少   读响应速度要求 快 快 慢点也行   数据规模 50亿 50亿 50亿* (N-2)   宿主机性能要求 高 高 次高   硬盘速度要求 高(SSD) 高(SSD) 次高(机械)   硬件成本 高 高 次高   期望成本 低 低 低    从上表可以看出,整体呈现出hot-warm数据架构的需求之势,近1-2天为hot数据,之前的为warm数据;恰好ES7提供了hot-warm架构支持,按照hot-warm改造后架构如下图所示:\n 恰逢公司ES中台调优版的ES发布,其内置的ZSTD压缩算法 空间压缩效果非常显著。 对 trace集群进行hot-warm架构调整,查询耗时从20多秒变成了2-3秒,效果是非常明显的。 从查询逻辑进一步调整,充分利用ES的数据分片、路由机制,把全量检索调整为精准检索,即降低检索时需要扫描的数据量,把2-3秒优化到毫秒。  这里要炫一个5毛特效,这套机制下,Segment数据即使是保留半年的,按照TraceId查询的耗时也是毫秒。\n至此完成了查询千亿级Trace数据只要毫秒级耗时的阶段性优化。\n6.6 仪表盘和拓扑查询慢 问题: Web端的【拓扑】页,在开始只有几十个应用的时候,虽然很慢,但还是能看到数据,随着应用增多后,【拓扑】页面数据请求一直是超时(配置的60s超时)的,精力有限,先通过功能降级把这个页面隐藏了;【仪表盘】的指标查询也非常的慢,未能达到好用的目标。\n优化: Web端的【仪表盘】页和【拓扑】页是对SkyWalking里metric数据的展现,metric数据同trace数据一样满足hot-warm的特征。\n metric集群采用hot-warm架构调整,之后仪表盘中的查询耗时也都减小为毫秒级。 【拓扑】页接口依然是超时(60s),对拓扑这里做了几个针对性的调整:  把内部的循环调用合并,压缩调用次数。 去除非必要的查询。 拆分隔离通用索引中的数据,避免互相干扰。 全量检索调整为精准检索,即降低检索时需要扫描的数据量。    至此完成了拓扑页数据查询毫秒级耗时的阶段性优化。\n6.7 小结 SkyWalking调优这个阶段,恰逢上海疫情封城,既要为生存抢菜,又要翻阅学习着各种ES原理、调优的文档资料,一行一行反复的品味思考SkyWalking相关的源码,尝试各种方案去优化它,梦中都在努力提升它的性能。疫情让很多人变得焦虑烦躁,但以我的感受来看在系统的性能压力下疫情不值一提。凡事贵在坚持,时间搞定了诸多困难,调优的效果是很显著的。\n可能在业务价值驱动的价值观中这些技术优化不产生直接业务价值,顶多是五毛特效,但从其他维度来看它价值显著:\n 对个人来说,技术有提升。 对团队来说,实战练兵提升战力,团队协作加深友情;特别感谢ES中台这段时间的鼎力支持! 对公司来说,易用性的提升将充分发挥SkyWalking的价值,在问题发生时,给到同事们切实、高效的帮助,使得问题可以被快速响应;须知战争拼的是保障。  这期间其实也是有考虑过其他的2个方案的:\n 使用降低采样率的兜底方案;但为了得到更准确的指标数据,以及后续其他的规划而坚持了全采样。 采用ClickHouse优化存储;因为公司有定制优化的ES版本,所以就继续在ES上做存储优化,刚好借此机会验证一下。后续【全链路结构化日志】的存储会使用ClickHouse。  这个章节将内容聚焦在落地推广时期技术层面的准备和调优,未描述团队协调、推广等方面的情况;因每个公司情况不同,所以并未提及;但其实对多数公司来说,有些项目的推广比技术本身可能难度更大,这个项目也遇到过一些困难,PM去推广是既靠能力又靠颜值, 以后有机会再与大家探讨。\n7、未来 H5、Native以及后端应用都在持续接入中,相应的SDK也在不断的迭代;目前正在基于已建立的链路通道,完善【全链路业务状态追踪】和【全链路结构化日志追踪】,旨在给运营、客服、运维、开发等服务在一线的同事们提供多视角一站式的观测平台,全方位提升系统服务质量、提高问题响应速度。\n","excerpt":"目录  开篇 为什么需要全链路监控 为什么选择SkyWalking 预研 POC 优化 未来  1、开篇 自从SkyWalking开始在公司推广,时不时会在排查问题的人群中听到这样的话:“你咋还没 …","ref":"/zh/2022-08-30-pingan-jiankang/","title":"SkyWalking on the way - 平安健康千亿级的全链路追踪系统的建设与实践"},{"body":"Observability essential when working with distributed systems. Built on 3 pillars of metrics, logging and tracing, having the right tools in place to quickly identify and determine the root cause of an issue in production is imperative. In this Kongcast interview, we explore the benefits of having observability and demo the use of Apache SkyWalking. We walk through the capabilities that SkyWalking offers out of the box and debug a common HTTP 500 error using the tool.\nAndrew Kew is interviewed by Viktor Gamov, a developer advocate at Kong Inc\nAndrew is a highly passionate technologist with over 16 valuable years experience in building server side and cloud applications. Having spent the majority of his time in the Financial Services domain, his meritocratic rise to CTO of an Algorithmic Trading firm allowed him to not only steer the business from a technology standpoint, but build robust and scalable trading algorithms. His mantra is \u0026ldquo;right first time\u0026rdquo;, thus ensuring the projects or clients he is involved in are left in a better place than they were before he arrived.\nHe is the founder of a boutique software consultancy in the United Kingdom, QuadCorps Ltd, working in the API and Integration Ecosystem space and is currently on a residency programme at Kong Inc as a senior field engineer and technical account manager working across many of their enterprise strategic accounts.\n  ","excerpt":"Observability essential when working with distributed systems. Built on 3 pillars of metrics, …","ref":"/blog/2022-08-11-kongcast-20-distributed-tracing-using-skywalking-kong/","title":"[Video] Distributed tracing demo using Apache SkyWalking and Kong API Gateway"},{"body":"","excerpt":"","ref":"/tags/conference/","title":"Conference"},{"body":"","excerpt":"","ref":"/tags/kong/","title":"Kong"},{"body":"SkyWalking Rust 0.3.0 is released. Go to downloads page to find release tars.\nWhat\u0026rsquo;s Changed  Update README.md by @wu-sheng in https://github.com/apache/skywalking-rust/pull/24 Improve errors. by @jmjoy in https://github.com/apache/skywalking-rust/pull/25 Add tracer. by @jmjoy in https://github.com/apache/skywalking-rust/pull/26 Move e2e to workspace. by @jmjoy in https://github.com/apache/skywalking-rust/pull/27 Auto finalize context and span when dropped. by @jmjoy in https://github.com/apache/skywalking-rust/pull/28 Add context capture and continued methods. by @jmjoy in https://github.com/apache/skywalking-rust/pull/29 Bump to 0.3.0. by @jmjoy in https://github.com/apache/skywalking-rust/pull/30  ","excerpt":"SkyWalking Rust 0.3.0 is released. Go to downloads page to find release tars.\nWhat\u0026rsquo;s Changed …","ref":"/events/release-apache-skywalking-rust-0-3-0/","title":"Release Apache SkyWalking Rust 0.3.0"},{"body":"SkyWalking NodeJS 0.5.1 is released. Go to downloads page to find release tars.\nSkyWalking NodeJS 0.5.1 is a patch release that fixed a vulnerability(CVE-2022-36127) in all previous versions \u0026lt;=0.5.0, we recommend all users who are using versions \u0026lt;=0.5.0 should upgrade to this version.\nThe vulnerability could cause NodeJS services that has this agent installed to be unavailable if the header includes an illegal SkyWalking header, such as\n OAP is unhealthy and the downstream service\u0026rsquo;s agent can\u0026rsquo;t establish the connection. Some sampling mechanism is activated in downstream agents.  ","excerpt":"SkyWalking NodeJS 0.5.1 is released. Go to downloads page to find release tars.\nSkyWalking NodeJS …","ref":"/events/release-apache-skywalking-nodejs-0-5-1/","title":"[CVE-2022-36127] Release Apache SkyWalking for NodeJS 0.5.1"},{"body":"SkyWalking Eyes 0.4.0 is released. Go to downloads page to find release tars.\n Reorganize GHA by header and dependency. (#123) Add rust cargo support for dep command. (#121) Support license expression in dep check. (#120) Prune npm packages before listing all dependencies (#119) Add support for multiple licenses in the header config section (#118) Add excludes to license resolve config (#117) maven: set group:artifact as dependency name and extend functions in summary template (#116) Stablize summary context to perform consistant output (#115) Add custom license urls for identification (#114) Lazy initialize GitHub client for comment (#111) Make license identifying threshold configurable (#110) Use Google\u0026rsquo;s licensecheck to identify licenses (#107) dep: short circuit if user declare dep license (#108)  ","excerpt":"SkyWalking Eyes 0.4.0 is released. Go to downloads page to find release tars.\n Reorganize GHA by …","ref":"/events/release-apache-skywalking-eyes-0-4-0/","title":"Release Apache SkyWalking Eyes 0.4.0"},{"body":"SkyWalking NodeJS 0.5.0 is released. Go to downloads page to find release tars.\n Bump up grpc-node to 1.6.7 to fix CVE-2022-25878 (#85) Fix issue #9165 express router entry duplicated (#84) Fix skywalking s3 upload error #8824 (#82) Improved ignore path regex (#81) Upgrade data collect protocol (#78) Fix wrong instance properties (#77) Fix wrong command in release doc (#76)  ","excerpt":"SkyWalking NodeJS 0.5.0 is released. Go to downloads page to find release tars.\n Bump up grpc-node …","ref":"/events/release-apache-skywalking-nodejs-0-5-0/","title":"Release Apache SkyWalking for NodeJS 0.5.0"},{"body":"SkyWalking Infra E2E 1.2.0 is released. Go to downloads page to find release tars.\nFeatures  Expand kind file path with system environment. Support shutdown service during setup phase in compose mode. Expand kind file path with system environment. Support arbitrary os and arch. Support docker-compose v2 container naming. Support installing via go install and add install doc. Add retry when delete kind cluster. Upgrade to go1.18.  Bug Fixes  Fix the problem of parsing verify.retry.interval without setting value.  Documentation  Make trigger.times parameter doc more clear.  Issues and PR  All issues are here All and pull requests are here  ","excerpt":"SkyWalking Infra E2E 1.2.0 is released. Go to downloads page to find release tars.\nFeatures  Expand …","ref":"/events/release-apache-skywalking-infra-e2e-1-2-0/","title":"Release Apache SkyWalking Infra E2E 1.2.0"},{"body":"SkyWalking Python 0.8.0 is released. Go to downloads page to find release tars.\n  Feature:\n Update mySQL plugin to support two different parameter keys. (#186) Add a SW_AGENT_LOG_REPORTER_SAFE_MODE option to control the HTTP basic auth credential filter (#200)    Plugins:\n Add Psycopg(3.x) support (#168) Add MySQL support (#178) Add FastAPI support (#181) Drop support for flask 1.x due to dependency issue in Jinja2 and EOL (#195) Add Bottle support (#214)    Fixes:\n Spans now correctly reference finished parents (#161) Remove potential password leak from Aiohttp outgoing url (#175) Handle error when REMOTE_PORT is missing in Flask (#176) Fix sw-rabbitmq TypeError when there are no headers (#182) Fix agent bootstrap traceback not shown in sw-python CLI (#183) Fix local log stack depth overridden by agent log formatter (#192) Fix typo that cause user sitecustomize.py not loaded (#193) Fix instance property wrongly shown as UNKNOWN in OAP (#194) Fix multiple components inconsistently named on SkyWalking UI (#199) Fix SW_AGENT_LOGGING_LEVEL not properly set during startup (#196) Unify the http tag name with other agents (#208) Remove namespace to instance properties and add pid property (#205) Fix the properties are not set correctly (#198) Improved ignore path regex (#210) Fix sw_psycopg2 register_type() (#211) Fix psycopg2 register_type() second arg default (#212) Enhance Traceback depth (#206) Set spans whose http code \u0026gt; 400 to error (#187)    Docs:\n Add a FAQ doc on how to use with uwsgi (#188)    Others:\n Refactor current Python agent docs to serve on SkyWalking official website (#162) Refactor SkyWalking Python to use the CLI for CI instead of legacy setup (#165) Add support for Python 3.10 (#167) Move flake configs all together (#169) Introduce another set of flake8 extensions (#174) Add E2E test coverage for trace and logging (#199) Now Log reporter cause_exception_depth traceback limit defaults to 10 Enable faster CI by categorical parallelism (#170)    ","excerpt":"SkyWalking Python 0.8.0 is released. Go to downloads page to find release tars.\n  Feature:\n Update …","ref":"/events/release-apache-skywalking-python-0-8-0/","title":"Release Apache SkyWalking Python 0.8.0"},{"body":"SkyWalking Satellite 1.0.1 is released. Go to downloads page to find release tars.\nFeatures Bug Fixes  Fix metadata messed up when transferring Log data.  Issues and PR  All issues are here All and pull requests are here  ","excerpt":"SkyWalking Satellite 1.0.1 is released. Go to downloads page to find release tars.\nFeatures Bug …","ref":"/events/release-apache-skwaylking-satellite-1-0-1/","title":"Release Apache SkyWalking Satellite 1.0.1"},{"body":"Content Background Apache SkyWalking observes metrics, logs, traces, and events for services deployed into the service mesh. When troubleshooting, SkyWalking error analysis can be an invaluable tool helping to pinpoint where an error occurred. However, performance problems are more difficult: It’s often impossible to locate the root cause of performance problems with pre-existing observation data. To move beyond the status quo, dynamic debugging and troubleshooting are essential service performance tools. In this article, we\u0026rsquo;ll discuss how to use eBPF technology to improve the profiling feature in SkyWalking and analyze the performance impact in the service mesh.\nTrace Profiling in SkyWalking Since SkyWalking 7.0.0, Trace Profiling has helped developers find performance problems by periodically sampling the thread stack to let developers know which lines of code take more time. However, Trace Profiling is not suitable for the following scenarios:\n Thread Model: Trace Profiling is most useful for profiling code that executes in a single thread. It is less useful for middleware that relies heavily on async execution models. For example Goroutines in Go or Kotlin Coroutines. Language: Currently, Trace Profiling is only supported in Java and Python, since it’s not easy to obtain the thread stack in the runtimes of some languages such as Go and Node.js. Agent Binding: Trace Profiling requires Agent installation, which can be tricky depending on the language (e.g., PHP has to rely on its C kernel; Rust and C/C++ require manual instrumentation to make install). Trace Correlation: Since Trace Profiling is only associated with a single request it can be hard to determine which request is causing the problem. Short Lifecycle Services: Trace Profiling doesn\u0026rsquo;t support short-lived services for (at least) two reasons:  It\u0026rsquo;s hard to differentiate system performance from class code manipulation in the booting stage. Trace profiling is linked to an endpoint to identify performance impact, but there is no endpoint to match these short-lived services.    Fortunately, there are techniques that can go further than Trace Profiling in these situations.\nIntroduce eBPF We have found that eBPF — a technology that can run sandboxed programs in an operating system kernel and thus safely and efficiently extend the capabilities of the kernel without requiring kernel modifications or loading kernel modules — can help us fill gaps left by Trace Profiling. eBPF is a trending technology because it breaks the traditional barrier between user and kernel space. Programs can now inject bytecode that runs in the kernel, instead of having to recompile the kernel to customize it. This is naturally a good fit for observability.\nIn the figure below, we can see that when the system executes the execve syscalls, the eBPF program is triggered, and the current process runtime information is obtained by using function calls.\nUsing eBPF technology, we can expand the scope of Skywalking\u0026rsquo;s profiling capabilities:\n Global Performance Analysis: Before eBPF, data collection was limited to what agents can observe. Since eBPF programs run in the kernel, they can observe all threads. This is especially useful when you are not sure whether a performance problem is caused by a particular request. Data Content: eBPF can dump both user and kernel space thread stacks, so if a performance issue happens in kernel space, it’s easier to find. Agent Binding: All modern Linux kernels support eBPF, so there is no need to install anything. This means it is an orchestration-free vs an agent model. This reduces friction caused by built-in software which may not have the correct agents installed, such as Envoy in a Service Mesh. Sampling Type: Unlike Trace Profiling, eBPF is event-driven and, therefore, not constrained by interval polling. For example, eBPF can trigger events and collect more data depending on a transfer size threshold. This can allow the system to triage and prioritize data collection under extreme load.  eBPF Limitations While eBPF offers significant advantages for hunting performance bottlenecks, no technology is perfect. eBPF has a number of limitations described below. Fortunately, since SkyWalking does not require eBPF, the impact is limited.\n Linux Version Requirement: eBPF programs require a Linux kernel version above 4.4, with later kernel versions offering more data to be collected. The BCC has documented the features supported by different Linux kernel versions, with the differences between versions usually being what data can be collected with eBPF. Privileges Required: All processes that intend to load eBPF programs into the Linux kernel must be running in privileged mode. As such, bugs or other issues in such code may have a big impact. Weak Support for Dynamic Language: eBPF has weak support for JIT-based dynamic languages, such as Java. It also depends on what data you want to collect. For Profiling, eBPF does not support parsing the symbols of the program, which is why most eBPF-based profiling technologies only support static languages like C, C++, Go, and Rust. However, symbol mapping can sometimes be solved through tools provided by the language. For example, in Java, perf-map-agent can be used to generate the symbol mapping. However, dynamic languages don\u0026rsquo;t support the attach (uprobe) functionality that would allow us to trace execution events through symbols.  Introducing SkyWalking Rover SkyWalking Rover introduces the eBPF profiling feature into the SkyWalking ecosystem. The figure below shows the overall architecture of SkyWalking Rover. SkyWalking Rover is currently supported in Kubernetes environments and must be deployed inside a Kubernetes cluster. After establishing a connection with the SkyWalking backend server, it saves information about the processes on the current machine to SkyWalking. When the user creates an eBPF profiling task via the user interface, SkyWalking Rover receives the task and executes it in the relevant C, C++, Golang, and Rust language-based programs.\nOther than an eBPF-capable kernel, there are no additional prerequisites for deploying SkyWalking Rover.\nCPU Profiling with Rover CPU profiling is the most intuitive way to show service performance. Inspired by Brendan Gregg‘s blog post, we\u0026rsquo;ve divided CPU profiling into two types that we have implemented in Rover:\n On-CPU Profiling: Where threads are spending time running on-CPU. Off-CPU Profiling: Where time is spent waiting while blocked on I/O, locks, timers, paging/swapping, etc.  Profiling Envoy with eBPF Envoy is a popular proxy, used as the data plane by the Istio service mesh. In a Kubernetes cluster, Istio injects Envoy into each service’s pod as a sidecar where it transparently intercepts and processes incoming and outgoing traffic. As the data plane, any performance issues in Envoy can affect all service traffic in the mesh. In this scenario, it’s more powerful to use eBPF profiling to analyze issues in production caused by service mesh configuration.\nDemo Environment If you want to see this scenario in action, we\u0026rsquo;ve built a demo environment where we deploy an Nginx service for stress testing. Traffic is intercepted by Envoy and forwarded to Nginx. The commands to install the whole environment can be accessed through GitHub.\nOn-CPU Profiling On-CPU profiling is suitable for analyzing thread stacks when service CPU usage is high. If the stack is dumped more times, it means that the thread stack occupies more CPU resources.\nWhen installing Istio using the demo configuration profile, we found there are two places where we can optimize performance:\n Zipkin Tracing: Different Zipkin sampling percentages have a direct impact on QPS. Access Log Format: Reducing the fields of the Envoy access log can improve QPS.  Zipkin Tracing Zipkin with 100% sampling In the default demo configuration profile, Envoy is using 100% sampling as default tracing policy. How does that impact the performance?\nAs shown in the figure below, using the on-CPU profiling, we found that it takes about 16% of the CPU overhead. At a fixed consumption of 2 CPUs, its QPS can reach 5.7K.\nDisable Zipkin tracing At this point, we found that if Zipkin is not necessary, the sampling percentage can be reduced or we can even disable tracing. Based on the Istio documentation, we can disable tracing when installing the service mesh using the following command:\nistioctl install -y --set profile=demo \\  --set \u0026#39;meshConfig.enableTracing=false\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.tracing.sampling=0.0\u0026#39; After disabling tracing, we performed on-CPU profiling again. According to the figure below, we found that Zipkin has disappeared from the flame graph. With the same 2 CPU consumption as in the previous example, the QPS reached 9K, which is an almost 60% increase. Tracing with Throughput With the same CPU usage, we\u0026rsquo;ve discovered that Envoy performance greatly improves when the tracing feature is disabled. Of course, this requires us to make trade-offs between the number of samples Zipkin collects and the desired performance of Envoy (QPS).\nThe table below illustrates how different Zipkin sampling percentages under the same CPU usage affect QPS.\n   Zipkin sampling % QPS CPUs Note     100% (default) 5.7K 2 16% used by Zipkin   1% 8.1K 2 0.3% used by Zipkin   disabled 9.2K 2 0% used by Zipkin    Access Log Format Default Log Format In the default demo configuration profile, the default Access Log format contains a lot of data. The flame graph below shows various functions involved in parsing the data such as request headers, response headers, and streaming the body.\nSimplifying Access Log Format Typically, we don’t need all the information in the access log, so we can often simplify it to get what we need. The following command simplifies the access log format to only display basic information:\nistioctl install -y --set profile=demo \\  --set meshConfig.accessLogFormat=\u0026#34;[%START_TIME%] \\\u0026#34;%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\\\u0026#34; %RESPONSE_CODE%\\n\u0026#34; After simplifying the access log format, we found that the QPS increased from 5.7K to 5.9K. When executing the on-CPU profiling again, the CPU usage of log formatting dropped from 2.4% to 0.7%.\nSimplifying the log format helped us to improve the performance.\nOff-CPU Profiling Off-CPU profiling is suitable for performance issues that are not caused by high CPU usage. For example, when there are too many threads in one service, using off-CPU profiling could reveal which threads spend more time context switching.\nWe provide data aggregation in two dimensions:\n Switch count: The number of times a thread switches context. When the thread returns to the CPU, it completes one context switch. A thread stack with a higher switch count spends more time context switching. Switch duration: The time it takes a thread to switch the context. A thread stack with a higher switch duration spends more time off-CPU.  Write Access Log Enable Write Using the same environment and settings as before in the on-CPU test, we performed off-CPU profiling. As shown below, we found that access log writes accounted for about 28% of the total context switches. The \u0026ldquo;__write\u0026rdquo; shown below also indicates that this method is the Linux kernel method.\nDisable Write SkyWalking implements Envoy\u0026rsquo;s Access Log Service (ALS) feature which allows us to send access logs to the SkyWalking Observability Analysis Platform (OAP) using the gRPC protocol. Even by disabling the access logging, we can still use ALS to capture/aggregate the logs. We\u0026rsquo;ve disabled writing to the access log using the following command:\nistioctl install -y --set profile=demo --set meshConfig.accessLogFile=\u0026#34;\u0026#34; After disabling the Access Log feature, we performed the off-CPU profiling. File writing entries have disappeared as shown in the figure below. Envoy throughput also increased from 5.7K to 5.9K.\nConclusion In this article, we\u0026rsquo;ve examined the insights Apache Skywalking\u0026rsquo;s Trace Profiling can give us and how much more can be achieved with eBPF profiling. All of these features are implemented in skywalking-rover. In addition to on- and off-CPU profiling, you will also find the following features:\n Continuous profiling, helps you automatically profile without manual intervention. For example, when Rover detects that the CPU exceeds a configurable threshold, it automatically executes the on-CPU profiling task. More profiling types to enrich usage scenarios, such as network, and memory profiling.  ","excerpt":"Content Background Apache SkyWalking observes metrics, logs, traces, and events for services …","ref":"/blog/2022-07-05-pinpoint-service-mesh-critical-performance-impact-by-using-ebpf/","title":"Pinpoint Service Mesh Critical Performance Impact by using eBPF"},{"body":"SkyWalking Rust 0.2.0 is released. Go to downloads page to find release tars.\nWhat\u0026rsquo;s Changed  add a description to compile in README.md by @Shikugawa in https://github.com/apache/skywalking-rust/pull/16 Update NOTICE to 2022 by @wu-sheng in https://github.com/apache/skywalking-rust/pull/17 fix ignore /e2e/target folder by @tisonkun in https://github.com/apache/skywalking-rust/pull/18 Remove Cargo.lock, update dependencies, update submodule, disable build grpc server api. by @jmjoy in https://github.com/apache/skywalking-rust/pull/19 Enhance Trace Context machenism. by @jmjoy in https://github.com/apache/skywalking-rust/pull/20 chore(typo): fix typo in context/propagation/context.rs by @CherishCai in https://github.com/apache/skywalking-rust/pull/21 Feature(tonic-build): set tonic-build.build_server(false), do not build Server code. by @CherishCai in https://github.com/apache/skywalking-rust/pull/22 Rename crate name skywalking_rust to skywalking? by @jmjoy in https://github.com/apache/skywalking-rust/pull/23  ","excerpt":"SkyWalking Rust 0.2.0 is released. Go to downloads page to find release tars.\nWhat\u0026rsquo;s Changed …","ref":"/events/release-apache-skywalking-rust-0-2-0/","title":"Release Apache SkyWalking Rust 0.2.0"},{"body":"B站视频地址\n","excerpt":"B站视频地址","ref":"/zh/2022-06-23-more-than-tracing-logging-metrics/","title":"阿里云 - 可观测技术峰会 2022 - More than Tracing Logging Metrics"},{"body":"SkyWalking Java Agent 8.11.0 is released. Go to downloads page to find release tars. Changes by Version\n8.11.0  Fix cluster and namespace value duplicated(namespace value) in properties report. Add layer field to event when reporting. Remove redundant shade.package property. Add servicecomb-2.x plugin and Testcase. Fix NPE in gateway plugin when the timer triggers webflux webclient call. Add an optional plugin, trace-sampler-cpu-policy-plugin, which could disable trace collecting in high CPU load. Change the dateformat of logs to yyyy-MM-dd HH:mm:ss.SSS(was yyyy-MM-dd HH:mm:ss:SSS). Fix NPE in elasticsearch plugin. Grpc plugin support trace client async generic call(without grpc stubs), support Method type: UNARY、SERVER_STREAMING. Enhance Apache ShenYu (incubating) plugin: support trace grpc,sofarpc,motan,tars rpc proxy. Add primary endpoint name to log events. Fix Span not finished in gateway plugin when the gateway request timeout. Support -Dlog4j2.contextSelector=org.apache.logging.log4j.core.async.AsyncLoggerContextSelector in gRPC log report. Fix tcnative libraries relocation for aarch64. Add plugin.jdbc.trace_sql_parameters into Configuration Discovery Service. Fix argument type name of Array in postgresql-8.x-plugin from java.lang.String[] to [Ljava.lang.String; Add type name checking in ArgumentTypeNameMatch and ReturnTypeNameMatch Highlight ArgumentTypeNameMatch and ReturnTypeNameMatch type naming rule in docs/en/setup/service-agent/java-agent/Java-Plugin-Development-Guide.md Fix FileWriter scheduled task NPE Optimize gRPC Log reporter to set service name for the first element in the streaming.(No change for Kafka reporter)  All issues and pull requests are here\n","excerpt":"SkyWalking Java Agent 8.11.0 is released. Go to downloads page to find release tars. Changes by …","ref":"/events/release-apache-skywalking-java-agent-8-11-0/","title":"Release Apache SkyWalking Java Agent 8.11.0"},{"body":"SkyWalking Rover 0.2.0 is released. Go to downloads page to find release tars.\nFeatures  Support OFF_CPU Profiling. Introduce the BTFHub module. Update to using frequency mode to ON_CPU Profiling. Add logs in the profiling module logical.  Bug Fixes  Fix docker based process could not be detected.  Issues and PR  All issues are here All and pull requests are here  ","excerpt":"SkyWalking Rover 0.2.0 is released. Go to downloads page to find release tars.\nFeatures  Support …","ref":"/events/release-apache-skwaylking-rover-0-2-0/","title":"Release Apache SkyWalking Rover 0.2.0"},{"body":"SkyWalking 9.1.0 is released. Go to downloads page to find release tars.\n eBPF agent(skywalking rover) is integrated in the first time  BanyanDB(skywalking native database) is integrated and passed MVP phase. On-demand logs are provided first time in skywalking for all mesh services and k8s deployment as a zero cost log solution  Zipkin alternative is being official, and Zipkin\u0026rsquo;s HTTP APIs are supported as well as lens UI.  Changes by Version Project  [IMPORTANT] Remove InfluxDB 1.x and Apache IoTDB 0.X as storage options, check details at here. Remove converter-moshi 2.5.0, influx-java 2.15, iotdb java 0.12.5, thrift 0.14.1, moshi 1.5.0, msgpack 0.8.16 dependencies. Remove InfluxDB and IoTDB relative codes and E2E tests. Upgrade OAP dependencies zipkin to 2.23.16, H2 to 2.1.212, Apache Freemarker to 2.3.31, gRPC-java 1.46.0, netty to 4.1.76. Upgrade Webapp dependencies, spring-cloud-dependencies to 2021.0.2, logback-classic to 1.2.11 [IMPORTANT] Add BanyanDB storage implementation. Notice BanyanDB is currently under active development and SHOULD NOT be used in production cluster.  OAP Server  Add component definition(ID=127) for Apache ShenYu (incubating). Fix Zipkin receiver: Decode spans error, missing Layer for V9 and wrong time bucket for generate Service and Endpoint. [Refactor] Move SQLDatabase(H2/MySQL/PostgreSQL), ElasticSearch and BanyanDB specific configurations out of column. Support BanyanDB global index for entities. Log and Segment record entities declare this new feature. Remove unnecessary analyzer settings in columns of templates. Many were added due to analyzer\u0026rsquo;s default value. Simplify the Kafka Fetch configuration in cluster mode. [Breaking Change] Update the eBPF Profiling task to the service level, please delete index/table: ebpf_profiling_task, process_traffic. Fix event can\u0026rsquo;t split service ID into 2 parts. Fix OAP Self-Observability metric GC Time calculation. Set SW_QUERY_MAX_QUERY_COMPLEXITY default value to 1000 Webapp module (for UI) enabled compression. [Breaking Change] Add layer field to event, report an event without layer is not allowed. Fix ES flush thread stops when flush schedule task throws exception, such as ElasticSearch flush failed. Fix ES BulkProcessor in BatchProcessEsDAO was initialized multiple times and created multiple ES flush schedule tasks. HTTPServer support the handler register with allowed HTTP methods. [Critical] Revert Enhance DataCarrier#MultipleChannelsConsumer to add priority to avoid consuming issues. Fix the problem that some configurations (such as group.id) did not take effect due to the override order when using the kafkaConsumerConfig property to extend the configuration in Kafka Fetcher. Remove build time from the OAP version. Add data-generator module to run OAP in testing mode, generating mock data for testing. Support receive Kubernetes processes from gRPC protocol. Fix the problem that es index(TimeSeriesTable, eg. endpoint_traffic, alarm_record) didn\u0026rsquo;t create even after rerun with init-mode. This problem caused the OAP server to fail to start when the OAP server was down for more than a day. Support autocomplete tags in traces query. [Breaking Change] Replace all configurations **_JETTY_** to **_REST_**. Add the support eBPF profiling field into the process entity. E2E: fix log test miss verify LAL and metrics. Enhance Converter mechanism in kernel level to make BanyanDB native feature more effective. Add TermsAggregation properties collect_mode and execution_hint. Add \u0026ldquo;execution_hint\u0026rdquo;: \u0026ldquo;map\u0026rdquo;, \u0026ldquo;collect_mode\u0026rdquo;: \u0026ldquo;breadth_first\u0026rdquo; for aggregation and topology query to improve 5-10x performance. Clean up scroll contexts after used. Support autocomplete tags in logs query. Enhance Deprecated MetricQuery(v1) getValues querying to asynchronous concurrency query Fix the pod match error when the service has multiple selector in kubernetes environment. VM monitoring adapts the 0.50.0 of the opentelemetry-collector. Add Envoy internal cost metrics. Remove Layer concept from ServiceInstance. Remove unnecessary onCompleted on gRPC onError callback. Remove Layer concept form Process. Update to list all eBPF profiling schedulers without duration. Storage(ElasticSearch): add search options to tolerate inexisting indices. Fix the problem that MQ has the wrong Layer type. Fix NoneStream model has wrong downsampling(was Second, should be Minute). SQL Database: provide @SQLDatabase.AdditionalEntity to support create additional tables from a model. [Breaking Change] SQL Database: remove SQL Database config maxSizeOfArrayColumn and numOfSearchableValuesPerTag. [Breaking Change] SQL Database: move Tags list from Segment,Logs,Alarms to their additional table. [Breaking Change] Remove total field in Trace, Log, Event, Browser log, and alarm list query. Support OFF_CPU eBPF Profiling. Fix SumAggregationBuilder#build should use the SumAggregation rather than MaxAggregation. Add TiDB, OpenSearch, Postgres storage optional to Trace and eBPF Profiling E2E testing. Add OFF CPU eBPF Profiling E2E Testing. Fix searchableTag as rpc.status_code and http.status_code. status_code had been removed. Fix scroll query failure exception. Add profileDataQueryBatchSize config in Elasticsearch Storage. Add APIs to query Pod log on demand. Remove OAL for events. Simplify the format index name logical in ES storage. Add instance properties extractor in MAL. Support Zipkin traces collect and zipkin traces query API. [Breaking Change] Zipkin receiver mechanism changes and traces do not stream into OAP Segment anymore.  UI  General service instance: move Thread Pool from JVM to Overview, fix JVM GC Count calculation. Add Apache ShenYu (incubating) component LOGO. Show more metrics on service/instance/endpoint list on the dashboards. Support average values of metrics on the service/list/endpoint table widgets, with pop-up linear graph. Fix viewLogs button query no data. Fix UTC when page loads. Implement the eBPF profile widget on dashboard. Optimize the trace widget. Avoid invalid query for topology metrics. Add the alarm and log tag tips. Fix spans details and task logs. Verify query params to avoid invalid queries. Mobile terminal adaptation. Fix: set dropdown for the Tab widget, init instance/endpoint relation selectors, update sankey graph. Add eBPF Profiling widget into General service, Service Mesh and Kubernetes tabs. Fix jump to endpoint-relation dashboard template. Fix set graph options. Remove the Layer filed from the Instance and Process. Fix date time picker display when set hour to 0. Implement tags auto-complete for Trace and Log. Support multiple trees for the flame graph. Fix the page doesn\u0026rsquo;t need to be re-rendered when the url changes. Remove unexpected data for exporting dashboards. Fix duration time. Remove the total field from query conditions. Fix minDuration and maxDuration for the trace filter. Add Log configuration for the browser templates. Fix query conditions for the browser logs. Add Spanish Translation. Visualize the OFF CPU eBPF profiling. Add Spanish language to UI. Sort spans with startTime or spanId in a segment. Visualize a on-demand log widget. Fix activate the correct tab index after renaming a Tabs name. FaaS dashboard support on-demand log (OpenFunction/functions-framework-go version \u0026gt; 0.3.0).  Documentation  Add eBPF agent into probe introduction.  All issues and pull requests are here\n","excerpt":"SkyWalking 9.1.0 is released. Go to downloads page to find release tars.\n eBPF agent(skywalking …","ref":"/events/release-apache-skywalking-apm-9.1.0/","title":"Release Apache SkyWalking APM 9.1.0"},{"body":"SkyWalking BanyanDB 0.1.0 is released. Go to downloads page to find release tars.\nFeatures  BanyanD is the server of BanyanDB  TSDB module. It provides the primary time series database with a key-value data module. Stream module. It implements the stream data model\u0026rsquo;s writing. Measure module. It implements the measure data model\u0026rsquo;s writing. Metadata module. It implements resource registering and property CRUD. Query module. It handles the querying requests of stream and measure. Liaison module. It\u0026rsquo;s the gateway to other modules and provides access endpoints to clients.   gRPC based APIs Document  API reference Installation instrument Basic concepts   Testing  UT E2E with Java Client and OAP    ","excerpt":"SkyWalking BanyanDB 0.1.0 is released. Go to downloads page to find release tars.\nFeatures  BanyanD …","ref":"/events/release-apache-skywalking-banyandb-0-1-0/","title":"Release Apache SkyWalking BanyanDB 0.1.0"},{"body":"SkyWalking BanyanDB 0.1.0 is released. Go to downloads page to find release tars.\nFeatures  Support Measure, Stream and Property Query and Write APIs Support Metadata Management APIs for Measure, Stream, IndexRule and IndexRuleBinding  Chores  Set up GitHub actions to check code styles, licenses, and tests.  ","excerpt":"SkyWalking BanyanDB 0.1.0 is released. Go to downloads page to find release tars.\nFeatures  Support …","ref":"/events/release-apache-skywalking-banyandb-java-client-0-1-0/","title":"Release Apache SkyWalking BanyanDB Java Client 0.1.0"},{"body":"SkyWalking Rover 0.1.0 is released. Go to downloads page to find release tars.\nFeatures  Support detect processes in scanner or kubernetes mode. Support profiling C, C++, Golang, and Rust service.  Bug Fixes Issues and PR  All issues are here All and pull requests are here  ","excerpt":"SkyWalking Rover 0.1.0 is released. Go to downloads page to find release tars.\nFeatures  Support …","ref":"/events/release-apache-skwaylking-rover-0-1-0/","title":"Release Apache SkyWalking Rover 0.1.0"},{"body":"SkyWalking Satellite 1.0.0 is released. Go to downloads page to find release tars.\nFeatures  Add the compat protocol receiver for the old version of agents. Support transmit the native eBPF Process and Profiling protocol. Change the name of plugin that is not well-named.  Bug Fixes  Fix Metadata lost in the Native Meter protocol.  Issues and PR  All issues are here All and pull requests are here  ","excerpt":"SkyWalking Satellite 1.0.0 is released. Go to downloads page to find release tars.\nFeatures  Add the …","ref":"/events/release-apache-skwaylking-satellite-1-0-0/","title":"Release Apache SkyWalking Satellite 1.0.0"},{"body":"SkyWalking Eyes 0.3.0 is released. Go to downloads page to find release tars.\n  Dependency License\n Fix license check in go library testify (#93)    License Header\n fix command supports more languages:  Add comment style for cmake language (#86) Add comment style for hcl (#89) Add mpl-2.0 header template (#87) Support fix license header for tcl files (#102) Add python docstring comment style (#100) Add comment style for makefile \u0026amp; editorconfig (#90)   Support config license header comment style (#97) Trim leading and trailing newlines before rewrite license header cotent (#94) Replace already existing license header based on pattern (#98) [docs] add the usage for config the license header comment style (#99)    Project\n Obtain default github token in github actions (#82) Add tests for bare spdx license header content (#92) Add github action step summary for better experience (#104) Adds an option to the action to run in fix mode (#84) Provide --summary flag to generate the license summary file (#103) Add .exe suffix to windows binary (#101) Fix wrong file path and exclude binary files in src release (#81) Use t.tempdir to create temporary test directory (#95) Config: fix incorrect log message (#91) [docs] correct spelling mistakes (#96)    ","excerpt":"SkyWalking Eyes 0.3.0 is released. Go to downloads page to find release tars.\n  Dependency License …","ref":"/events/release-apache-skywalking-eyes-0-3-0/","title":"Release Apache SkyWalking Eyes 0.3.0"},{"body":"","excerpt":"","ref":"/zh_tags/apache-shenyu-incubating/","title":"Apache ShenYu (incubating)"},{"body":"","excerpt":"","ref":"/tags/apache-shenyu-incubating/","title":"Apache ShenYu (incubating)"},{"body":"目录  SkyWalking和ShenYu介绍 ApacheShenYu插件实现原理 给gRPC插件增加泛化调用追踪并保持兼容 ShenYu网关可观测性实践 总结  1.SkyWalking和ShenYu介绍 1.1 SkyWalking SkyWalking是一个针对微服务、分布式系统、云原生的应用性能监控(APM)和可观测性分析平台(OAP), 拥有强大的功能,提供了多维度应用性能分析手段,包含分布式拓扑图、应用性能指标、分布式链路追踪、日志关联分析和告警。同时还拥有非常丰富的生态。广泛应用于各个公司和开源项目。\n1.2 Apache ShenYu (incubating) Apache ShenYu (incubating)是一个高性能,多协议,易扩展,响应式的API网关。 兼容各种主流框架体系,支持热插拔,用户可以定制化开发,满足用户各种场景的现状和未来需求,经历过大规模场景的锤炼。 支持丰富的协议:Http、Spring Cloud、gRPC、Dubbo、SOFARPC、Motan、Tars等等。\n2.ApacheShenYu插件实现原理 ShenYu的异步和以往接触的异步有一点不一样,是一种全链路异步,每一个插件的执行都是异步的,并且线程切换并不是单一固定的情况(和各个插件实现有关)。 网关会发起各种协议类型的服务调用,现有的SkyWalking插件发起服务调用的时候会创建ExitSpan(同步或异步). 网关接收到请求会创建异步的EntrySpan。 异步的EntrySpan需要和同步或异步的ExitSpan串联起来,否则链路会断。 串联方案有2种:\n 快照传递: 将创建EntrySpan之后的快照通过某种方式传递到创建ExitSpan的线程中。\n目前这种方式应用在异步的WebClient插件中,该插件能接收异步快照。ShenYu代理Http服务或SpringCloud服务便是通过快照传递实现span串联。 LocalSpan中转: 其它RPC类插件不像异步WebClient那样可以接收快照实现串联。尽管你可以改动其它RPC插件让其接收快照实现串联,但不推荐也没必要, 因为可以通过在创建ExitSpan的线程中,创建一个LocalSpan就可以实现和ExitSpan串联,然后将异步的EntrySpan和LocalSpan通过快照传递的方式串联。这样实现完全可以不改动原先插件的代码。  span连接如下图所示:\n也许你会问是否可以在一个通用的插件里面创建LocalSpan,而不是ShenYu RPC插件分别创建一个? 答案是不行,因为需要保证LocalSpan和ExitSpan在同一个线程,而ShenYu是全链路异步. 在实现上创建LocalSpan的代码是复用的。\n3. 给gRPC插件增加泛化调用追踪并保持兼容 现有的SkyWalking gRPC插件只支持通过存根的方式发起的调用。而对于网关而言并没有proto文件,网关采取的是泛化调用(不通过存根),所以追踪rpc请求,你会发现链路会在网关节点断掉。 在这种情况下,需要让gRPC插件支持泛化调用,而同时需要保持兼容,不影响原先的追踪方式。实现上通过判断请求参数是否是动态消息(DynamicMessage),如果不是则走原先通过存根的追踪逻辑, 如果是则走泛化调用追踪逻辑。另外的兼容则是在gRPC新旧版本的差异,以及获取服务端IP各种情况的兼容,感兴趣的可以看看源码。\n4. ShenYu网关可观测性实践 上面讲解了SkyWalking ShenYu插件的实现原理,下面部署应用看下效果。SkyWalking功能强大,除了了链路追踪需要开发插件外,其它功能强大功能开箱即用。 这里只描述链路追踪和应用性能剖析部分,如果想体验SkyWalking功能的强大,请参考SkyWalking官方文档。\n版本说明:\n skywalking-java: 8.11.0-SNAPSHOT源码构建。说明:shenyu插件会在8.11.0版本发布,可能会在5月或6月初步发布它。Java代理正处于常规发布阶段。 skywalking: 9.0.0 V9 版本  用法说明:\nSkyWalking的设计非常易用,配置和激活插件请参考官方文档。\n SkyWalking Documentation SkyWalking Java Agent Documentation  4.1 向网关发起请求 通过postman客户端或者其它方式向网关发起各种服务请求\n4.2 请求拓扑图  4.3 请求链路(以gRPC为例) 正常链路: 异常链路: 点击链路节点变可以看到对应的节点信息和异常信息\n服务提供者span 网关请求span 4.4 服务指标监控 服务指标监控 4.5 网关后台指标监控 数据库监控: 线程池和连接池监控 4.6 JVM监控 4.7 接口分析 4.8 异常日志和异常链路分析 日志配置见官方文档\n日志监控 异常日志对应的分布式链路追踪详情 5. 总结 SkyWalking在可观测性方面对指标、链路追踪、日志有着非常全面的支持,功能强大,简单易用,专为大型分布式系统、微服务、云原生、容器架构而设计,拥有丰富的生态。 使用SkyWalking为Apache ShenYu (incubating)提供强大的可观测性支持,让ShenYu如虎添翼。最后,如果你对高性能响应式网关感兴趣,可以关注 Apache ShenYu (incubating) 。 同时感谢SkyWalking这么优秀的开源软件对行业所作的贡献。\n","excerpt":"目录  SkyWalking和ShenYu介绍 ApacheShenYu插件实现原理 给gRPC插件增加泛化调用追踪并保持兼容 ShenYu网关可观测性实践 总结  1.SkyWalking …","ref":"/zh/2022-05-08-apache-shenyuincubating-integrated-skywalking-practice-observability/","title":"Apache ShenYu (incubating)插件实现原理和可观测性实践"},{"body":"Content  Introduction of SkyWalking and ShenYu Apache ShenYu plugin implementation principle Adding generalized call tracking to the gRPC plugin and keeping it compatible ShenYu Gateway Observability Practice Summary  1. Introduction of SkyWalking and ShenYu 1.1 SkyWalking SkyWalking is an Application Performance Monitoring (APM) and Observability Analysis Platform (OAP) for microservices, distributed systems, and cloud natives, Has powerful features that provide a multi-dimensional means of application performance analysis, including distributed topology diagrams, application performance metrics, distributed link tracing, log correlation analysis and alerts. Also has a very rich ecology. Widely used in various companies and open source projects.\n1.2 Apache ShenYu (incubating) Apache ShenYu (incubating) High-performance,multi-protocol,extensible,responsive API Gateway. Compatible with a variety of mainstream framework systems, support hot plug, users can customize the development, meet the current situation and future needs of users in a variety of scenarios, experienced the temper of large-scale scenes. Rich protocol support: Http, Spring Cloud, gRPC, Dubbo, SOFARPC, Motan, Tars, etc.\n2. Apache ShenYu plugin implementation principle ShenYu\u0026rsquo;s asynchrony is a little different from previous exposure to asynchrony, it is a full-link asynchrony, the execution of each plug-in is asynchronous, and thread switching is not a single fixed situation (and the individual plug-in implementation is related). The gateway initiates service calls of various protocol types, and the existing SkyWalking plugins create ExitSpan (synchronous or asynchronous) when they initiate service calls. The gateway receives the request and creates an asynchronous EntrySpan. The asynchronous EntrySpan needs to be concatenated with the synchronous or asynchronous ExitSpan, otherwise the link will be broken.\nThere are 2 types of tandem solutions:\n Snapshot Delivery:\nPass the snapshot after creating the EntrySpan to the thread that created the ExitSpan in some way.\nCurrently this approach is used in the asynchronous WebClient plugin, which can receive asynchronous snapshots. shenYu proxy Http service or SpringCloud service is to achieve span concatenation through snapshot passing. LocalSpan transit:\nOther RPC class plugins do not receive snapshots for concatenation like Asynchronous WebClient. Although you can modify other RPC plugins to receive snapshots for concatenation, it is not recommended or necessary to do so. This can be achieved by creating a LocalSpan in the thread where the ExitSpan is created, and then connecting the asynchronous EntrySpan and LocalSpan by snapshot passing. This can be done without changing the original plugin code.  The span connection is shown below:\nYou may ask if it is possible to create LocalSpan inside a generic plugin, instead of creating one separately for ShenYu RPC plugin? The answer is no, because you need to ensure that LocalSpan and ExitSpan are in the same thread, and ShenYu is fully linked asynchronously. The code to create LocalSpan is reused in the implementation.\n3. Adding generalized call tracking to the gRPC plugin and keeping it compatible The existing SkyWalking gRPC plugin only supports calls initiated by way of stubs. For the gateway there is no proto file, the gateway takes generalized calls (not through stubs), so tracing RPC requests, you will find that the link will break at the gateway node. In this case, it is necessary to make the gRPC plugin support generalized calls, while at the same time needing to remain compatible and not affect the original tracing method. This is achieved by determining whether the request parameter is a DynamicMessage, and if it is not, then the original tracing logic through the stub is used. If not, then the original tracing logic via stubs is used, and if not, then the generalized call tracing logic is used. The other compatibility is the difference between the old and new versions of gRPC, as well as the compatibility of various cases of obtaining server-side IP, for those interested in the source code.\n4. ShenYu Gateway Observability Practice The above explains the principle of SkyWalking ShenYu plug-in implementation, the following deployment application to see the effect. SkyWalking powerful, in addition to the link tracking requires the development of plug-ins, other powerful features out of the box. Here only describe the link tracking and application performance analysis part, if you want to experience the power of SkyWalking features, please refer to the SkyWalking official documentation.\nVersion description:\n skywalking-java: 8.11.0-SNAPSHOT source code build. Note: The shenyu plugin will be released in version 8.11.0, and will probably release it initially in May or June. the Java agent is in the regular release phase. skywalking: 9.0.0 V9 version  Usage instructions:\nSkyWalking is designed to be very easy to use. Please refer to the official documentation for configuring and activating the shenyu plugin.\n SkyWalking Documentation SkyWalking Java Agent Documentation  4.1 Sending requests to the gateway Initiate various service requests to the gateway via the postman client or other means.\n4.2 Request Topology Diagram   4.3 Request Trace (in the case of gRPC) Normal Trace: Abnormal Trace: Click on the link node to see the corresponding node information and exception information\nService Provider Span Gateway request span 4.4 Service Metrics Monitoring 4.5 Gateway background metrics monitoring Database Monitoring: Thread pool and connection pool monitoring: 4.6 JVM Monitoring 4.7 Endpoint Analysis 4.8 Exception log and exception link analysis See official documentation for log configuration\nLog monitoring Distributed link trace details corresponding to exception logs 5. Summary SkyWalking has very comprehensive support for metrics, link tracing, and logging in observability, and is powerful, easy to use, and designed for large distributed systems, microservices, cloud-native, container architectures, and has a rich ecosystem. Using SkyWalking to provide powerful observability support for Apache ShenYu (incubating) gives ShenYu a boost. Finally, if you are interested in high-performance responsive gateways, you can follow Apache ShenYu (incubating). Also, thanks to SkyWalking such an excellent open source software to the industry contributions.\n","excerpt":"Content  Introduction of SkyWalking and ShenYu Apache ShenYu plugin implementation principle Adding …","ref":"/blog/2022-05-08-apache-shenyuincubating-integrated-skywalking-practice-observability/","title":"Apache ShenYu(incubating) plugin implementation principles and observability practices"},{"body":"","excerpt":"","ref":"/zh_tags/logging/","title":"Logging"},{"body":"","excerpt":"","ref":"/zh_tags/metrics/","title":"Metrics"},{"body":"","excerpt":"","ref":"/zh_tags/observability/","title":"Observability"},{"body":"","excerpt":"","ref":"/zh_tags/skywalking/","title":"SkyWalking"},{"body":"","excerpt":"","ref":"/zh_tags/tracing/","title":"Tracing"},{"body":"SkyWalking Kubernetes Event Exporter 1.0.0 is released. Go to downloads page to find release tars.\n Add Apache SkyWalking exporter to export events into SkyWalking OAP. Add console exporter for debugging purpose.  ","excerpt":"SkyWalking Kubernetes Event Exporter 1.0.0 is released. Go to downloads page to find release tars. …","ref":"/events/release-apache-skywalking-kubernetes-event-exporter-1.0.0/","title":"Release Apache SkyWalking Kubernetes Event Exporter 1.0.0"},{"body":"content:  Introduction Features Install SWCK Deploy a demo application Verify the injector Concluding remarks  1. Introduction 1.1 What\u0026rsquo;s SWCK? SWCK is a platform for the SkyWalking user, provisions, upgrades, maintains SkyWalking relevant components, and makes them work natively on Kubernetes.\nIn fact, SWCK is an operator developed based on kubebuilder, providing users with Custom Resources ( CR ) and controllers for managing resources ( Controller ), all CustomResourceDefinitions(CRDs)are as follows:\n JavaAgent OAP UI Storage Satellite Fetcher  1.2 What\u0026rsquo;s the java agent injector? For a java application, users need to inject the java agent into the application to get metadata and send it to the SkyWalking backend. To make users use the java agent more natively, we propose the java agent injector to inject the java agent sidecar into a pod. The java agent injector is actually a Kubernetes Mutation Webhook Controller. The controller intercepts pod events and applies mutations to the pod if annotations exist within the request.\n2. Features   Transparent. User’s applications generally run in normal containers while the java agent runs in the init container, and both belong to the same pod. Each container in the pod mounts a shared memory volume that provides a storage path for the java agent. When the pod starts, the java agent in the init container will run before the application container, and the injector will store the java agent file in the shared memory volume. When the application container starts, the injector injects the agent file into the application by setting the JVM parameter. Users can inject the java agent in this way without rebuilding the container image containing the java agent.\n  Configurability. The injector provides two ways to configure the java agent: global configuration and custom configuration. The default global configuration is stored in the configmap, you can update it as your own global configuration, such as backend_service. In addition, you can also set custom configuration for some applications via annotation, such as “service_name”. For more information, please see java-agent-injector.\n  Observability. For each injected java agent, we provide CustomDefinitionResources called JavaAgent to observe the final agent configuration. Please refer to javaagent to get more details.\n  3. Install SWCK In the next steps, we will show how to build a stand-alone Kubernetes cluster and deploy the 0.6.1 version of SWCK on the platform.\n3.1 Tool Preparation Firstly, you need to install some tools as follows:\n kind, which is used to create a stand-alone Kubernetes cluster. kubectl, which is used to communicate with the Kubernetes cluster.  3.2 Install stand-alone Kubernetes cluster After installing kind , you could use the following command to create a stand-alone Kubernetes cluster.\n Notice! If your terminal is configured with a proxy, you need to close it before the cluster is created to avoid some errors.\n $ kind create cluster --image=kindest/node:v1.19.1 After creating a cluster, you can get the pods as below.\n$ kubectl get pod -A NAMESPACE NAME READY STATUS RESTARTS AGE kube-system coredns-f9fd979d6-57xpc 1/1 Running 0 7m16s kube-system coredns-f9fd979d6-8zj8h 1/1 Running 0 7m16s kube-system etcd-kind-control-plane 1/1 Running 0 7m23s kube-system kindnet-gc9gt 1/1 Running 0 7m16s kube-system kube-apiserver-kind-control-plane 1/1 Running 0 7m23s kube-system kube-controller-manager-kind-control-plane 1/1 Running 0 7m23s kube-system kube-proxy-6zbtb 1/1 Running 0 7m16s kube-system kube-scheduler-kind-control-plane 1/1 Running 0 7m23s local-path-storage local-path-provisioner-78776bfc44-jwwcs 1/1 Running 0 7m16s 3.3 Install certificates manger(cert-manger) The certificates of SWCK are distributed and verified by the certificate manager. You need to install the cert-manager through the following command.\n$ kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.3.1/cert-manager.yaml Verify whether cert-manager is installed successfully.\n$ kubectl get pod -n cert-manager NAME READY STATUS RESTARTS AGE cert-manager-7dd5854bb4-slcmd 1/1 Running 0 73s cert-manager-cainjector-64c949654c-tfmt2 1/1 Running 0 73s cert-manager-webhook-6bdffc7c9d-h8cfv 1/1 Running 0 73s 3.4 Install SWCK The java agent injector is a component of the operator, so please follow the next steps to install the operator first.\n Get the deployment yaml file of SWCK and deploy it.  $ curl -Ls https://archive.apache.org/dist/skywalking/swck/0.6.1/skywalking-swck-0.6.1-bin.tgz | tar -zxf - -O ./config/operator-bundle.yaml | kubectl apply -f - Check SWCK as below.  $ kubectl get pod -n skywalking-swck-system NAME READY STATUS RESTARTS AGE skywalking-swck-controller-manager-7f64f996fc-qh8s9 2/2 Running 0 94s 3.5 Install Skywalking components — OAPServer and UI  Deploy the OAPServer and UI in the default namespace.  $ kubectl apply -f https://raw.githubusercontent.com/apache/skywalking-swck/master/operator/config/samples/default.yaml Check the OAPServer.  $ kubectl get oapserver NAME INSTANCES RUNNING ADDRESS default 1 1 default-oap.default Check the UI.  $ kubectl get ui NAME INSTANCES RUNNING INTERNALADDRESS EXTERNALIPS PORTS default 1 1 default-ui.default [80] 4. Deploy a demo application In the third step, we have installed SWCK and related Skywalking components. Next, we will show how to use the java agent injector in SWCK through two java application examples in two ways: global configuration and custom configuration.\n4.1 Set the global configuration When we have installed SWCK, the default configuration is the configmap in the system namespace, we can get it as follows.\n$ kubectl get configmap skywalking-swck-java-agent-configmap -n skywalking-swck-system -oyaml apiVersion: v1 data: agent.config: |- # The service name in UI agent.service_name=${SW_AGENT_NAME:Your_ApplicationName} # Backend service addresses. collector.backend_service=${SW_AGENT_COLLECTOR_BACKEND_SERVICES:127.0.0.1:11800} # Please refer to https://skywalking.apache.org/docs/skywalking-java/latest/en/setup/service-agent/java-agent/configurations/#table-of-agent-configuration-properties to get more details. In the cluster created by kind, the backend_service may not be correct, we need to use the real OAPServer\u0026rsquo;s address default-oap.default to replace the default 127.0.0.1, so we can edit the configmap as follow.\n$ kubectl edit configmap skywalking-swck-java-agent-configmap -n skywalking-swck-system configmap/skywalking-swck-java-agent-configmap edited $ kubectl get configmap skywalking-swck-java-agent-configmap -n skywalking-swck-system -oyaml apiVersion: v1 data: agent.config: |- # The service name in UI agent.service_name=${SW_AGENT_NAME:Your_ApplicationName} # Backend service addresses. collector.backend_service=${SW_AGENT_COLLECTOR_BACKEND_SERVICES:default-oap.default:11800} # Please refer to https://skywalking.apache.org/docs/skywalking-java/latest/en/setup/service-agent/java-agent/configurations/#table-of-agent-configuration-properties to get more details. 4.2 Set the custom configuration In some cases, we need to use the Skywalking component to monitor different java applications, so the agent configuration of different applications may be different, such as the name of the application, and the plugins that the application needs to use, etc. Next, we will take two simple java applications developed based on spring boot and spring cloud gateway as examples for a detailed description. You can use the source code to build the image.\n# build the springboot and springcloudgateway image  $ git clone https://github.com/dashanji/swck-spring-cloud-k8s-demo $ cd swck-spring-cloud-k8s-demo \u0026amp;\u0026amp; make # check the image $ docker images REPOSITORY TAG IMAGE ID CREATED SIZE gateway v0.0.1 51d16251c1d5 48 minutes ago 723MB app v0.0.1 62f4dbcde2ed 48 minutes ago 561MB # load the image into the cluster $ kind load docker-image app:v0.0.1 \u0026amp;\u0026amp; kind load docker-image gateway:v0.0.1 4.3 deploy spring boot application  Create the springboot-system namespace.  $ kubectl create namespace springboot-system Label the springboot-systemnamespace to enable the java agent injector.  $ kubectl label namespace springboot-system swck-injection=enabled Deploy the corresponding deployment file springboot.yaml for the spring boot application, which uses annotation to override the default agent configuration, such as service_name.   Notice! Before using the annotation to override the agent configuration, you need to add strategy.skywalking.apache.org/agent.Overlay: \u0026quot;true\u0026quot; to make the override take effect.\n apiVersion:apps/v1kind:Deploymentmetadata:name:demo-springbootnamespace:springboot-systemspec:selector:matchLabels:app:demo-springboottemplate:metadata:labels:swck-java-agent-injected:\u0026#34;true\u0026#34;# enable the java agent injectorapp:demo-springbootannotations:strategy.skywalking.apache.org/agent.Overlay:\u0026#34;true\u0026#34;# enable the agent overlayagent.skywalking.apache.org/agent.service_name:\u0026#34;backend-service\u0026#34;spec:containers:- name:springbootimagePullPolicy:IfNotPresentimage:app:v0.0.1command:[\u0026#34;java\u0026#34;]args:[\u0026#34;-jar\u0026#34;,\u0026#34;/app.jar\u0026#34;]---apiVersion:v1kind:Servicemetadata:name:demonamespace:springboot-systemspec:type:ClusterIPports:- name:8085-tcpport:8085protocol:TCPtargetPort:8085selector:app:demo-springbootDeploy a spring boot application in the springboot-system namespace.  $ kubectl apply -f springboot.yaml Check for deployment.  $ kubectl get pod -n springboot-system NAME READY STATUS RESTARTS AGE demo-springboot-7c89f79885-dvk8m 1/1 Running 0 11s Get the finnal injected java agent configuration through JavaAgent.  $ kubectl get javaagent -n springboot-system NAME PODSELECTOR SERVICENAME BACKENDSERVICE app-demo-springboot-javaagent app=demo-springboot backend-service default-oap.default:11800 4.4 deploy spring cloud gateway application  Create the gateway-system namespace.  $ kubectl create namespace gateway-system Label the gateway-systemnamespace to enable the java agent injector.  $ kubectl label namespace gateway-system swck-injection=enabled Deploy the corresponding deployment file springgateway.yaml for the spring cloud gateway application, which uses annotation to override the default agent configuration, such as service_name. In addition, when using spring cloud gateway, we need to add the spring cloud gateway plugin to the agent configuration.   Notice! Before using the annotation to override the agent configuration, you need to add strategy.skywalking.apache.org/agent.Overlay: \u0026quot;true\u0026quot; to make the override take effect.\n apiVersion:apps/v1kind:Deploymentmetadata:labels:app:demo-gatewayname:demo-gatewaynamespace:gateway-systemspec:selector:matchLabels:app:demo-gatewaytemplate:metadata:labels:swck-java-agent-injected:\u0026#34;true\u0026#34;app:demo-gatewayannotations:strategy.skywalking.apache.org/agent.Overlay:\u0026#34;true\u0026#34;agent.skywalking.apache.org/agent.service_name:\u0026#34;gateway-service\u0026#34;optional.skywalking.apache.org:\u0026#34;cloud-gateway-3.x\u0026#34;# add spring cloud gateway pluginspec:containers:- image:gateway:v0.0.1name:gatewaycommand:[\u0026#34;java\u0026#34;]args:[\u0026#34;-jar\u0026#34;,\u0026#34;/gateway.jar\u0026#34;]---apiVersion:v1kind:Servicemetadata:name:service-gatewaynamespace:gateway-systemspec:type:ClusterIPports:- name:9999-tcpport:9999protocol:TCPtargetPort:9999selector:app:demo-gatewayDeploy a spring cloud gateway application in the gateway-system namespace.  $ kubectl apply -f springgateway.yaml Check for deployment.  $ kubectl get pod -n gateway-system NAME READY STATUS RESTARTS AGE demo-gateway-5bb77f6d85-9j7c6 1/1 Running 0 15s Get the finnal injected java agent configuration through JavaAgent.  $ kubectl get javaagent -n gateway-system NAME PODSELECTOR SERVICENAME BACKENDSERVICE app-demo-gateway-javaagent app=demo-gateway gateway-service default-oap.default:11800 5. Verify the injector  After completing the above steps, we can view detailed state of the injected pod, like the injected agent container.  # get all injected pod $ kubectl get pod -A -lswck-java-agent-injected=true NAMESPACE NAME READY STATUS RESTARTS AGE gateway-system demo-gateway-5bb77f6d85-lt4z7 1/1 Running 0 69s springboot-system demo-springboot-7c89f79885-lkb5j 1/1 Running 0 75s # view detailed state of the injected pod [demo-springboot] $ kubectl describe pod -l app=demo-springboot -n springboot-system ... Events: Type Reason Age From Message ---- ------ ---- ---- ------- ... Normal Created 91s kubelet,kind-control-plane Created container inject-skywalking-agent Normal Started 91s kubelet,kind-control-plane Started container inject-skywalking-agent ... Normal Created 90s kubelet,kind-control-plane Created container springboot Normal Started 90s kubelet,kind-control-plane Started container springboot # view detailed state of the injected pod [demo-gateway]  $ kubectl describe pod -l app=demo-gateway -n gateway-system ... Events: Type Reason Age From Message ---- ------ ---- ---- ------- ... Normal Created 2m20s kubelet,kind-control-plane Created container inject-skywalking-agent Normal Started 2m20s kubelet,kind-control-plane Started container inject-skywalking-agent ... Normal Created 2m20s kubelet,kind-control-plane Created container gateway Normal Started 2m20s kubelet,kind-control-plane Started container gateway Now we can expose the service and watch the data displayed on the web. First of all, we need to get the gateway service and the ui service as follows.  $ kubectl get service service-gateway -n gateway-system NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE service-gateway ClusterIP 10.99.181.145 \u0026lt;none\u0026gt; 9999/TCP 9m19s $ kubectl get service default-ui NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE default-ui ClusterIP 10.111.39.250 \u0026lt;none\u0026gt; 80/TCP 82m Then open two terminals to expose the service: service-gateway、default-ui.  $ kubectl port-forward service/service-gateway -n gateway-system 9999:9999 Forwarding from 127.0.0.1:9999 -\u0026gt; 9999 Forwarding from [::1]:9999 -\u0026gt; 9999 $ kubectl port-forward service/default-ui 8090:80 Forwarding from 127.0.0.1:8090 -\u0026gt; 8080 Forwarding from [::1]:8090 -\u0026gt; 8080 Use the following commands to access the spring boot demo 10 times through the spring cloud gateway service.  $ for i in {1..10}; do curl http://127.0.0.1:9999/gateway/hello \u0026amp;\u0026amp; echo \u0026#34;\u0026#34;; done Hello World! Hello World! Hello World! Hello World! Hello World! Hello World! Hello World! Hello World! Hello World! Hello World! We can see the Dashboard by accessing http://127.0.0.1:8090.  All services' topology is shown below.  We can see the trace information of gateway-service.  We can see the trace information of backend-service.  6. Concluding remarks If your application is deployed in the Kubernetes platform and requires Skywalking to provide monitoring services, SWCK can help you deploy, upgrade and maintain the Skywalking components in the Kubernetes cluster. In addition to this blog, you can also view swck document and Java agent injector documentation for more information. If you find this project useful, please give SWCK a star! If you have any questions, welcome to ask in Issues or Discussions.\n","excerpt":"content:  Introduction Features Install SWCK Deploy a demo application Verify the injector …","ref":"/blog/2022-04-19-how-to-use-the-java-agent-injector/","title":"How to use the java agent injector?"},{"body":"","excerpt":"","ref":"/zh_tags/user-manual/","title":"User Manual"},{"body":"","excerpt":"","ref":"/tags/user-manual/","title":"User Manual"},{"body":"目录  介绍 主要特点 安装SWCK 部署demo应用 验证注入器 结束语  1. 介绍 1.1 SWCK 是什么? SWCK是部署在 Kubernetes 环境中,为 Skywalking 用户提供服务的平台,用户可以基于该平台使用、升级和维护 SkyWalking 相关组件。\n实际上,SWCK 是基于 kubebuilder 开发的Operator,为用户提供自定义资源( CR )以及管理资源的控制器( Controller ),所有的自定义资源定义(CRD)如下所示:\n JavaAgent OAP UI Storage Satellite Fetcher  1.2 java 探针注入器是什么? 对于 java 应用来说,用户需要将 java 探针注入到应用程序中获取元数据并发送到 Skywalking 后端。为了让用户在 Kubernetes 平台上更原生地使用 java 探针,我们提供了 java 探针注入器,该注入器能够将 java 探针通过 sidecar 方式注入到应用程序所在的 pod 中。 java 探针注入器实际上是一个Kubernetes Mutation Webhook控制器,如果请求中存在 annotations ,控制器会拦截 pod 事件并将其应用于 pod 上。\n2. 主要特点  透明性。用户应用一般运行在普通容器中而 java 探针则运行在初始化容器中,且两者都属于同一个 pod 。该 pod 中的每个容器都会挂载一个共享内存卷,为 java 探针提供存储路径。在 pod 启动时,初始化容器中的 java 探针会先于应用容器运行,由注入器将其中的探针文件存放在共享内存卷中。在应用容器启动时,注入器通过设置 JVM 参数将探针文件注入到应用程序中。用户可以通过这种方式实现 java 探针的注入,而无需重新构建包含 java 探针的容器镜像。 可配置性。注入器提供两种方式配置 java 探针:全局配置和自定义配置。默认的全局配置存放在 configmap 中,用户可以根据需求修改全局配置,比如修改 backend_service 的地址。此外,用户也能通过 annotation 为特定应用设置自定义的一些配置,比如不同服务的 service_name 名称。详情可见 java探针说明书。 可观察性。每个 java 探针在被注入时,用户可以查看名为 JavaAgent 的 CRD 资源,用于观测注入后的 java 探针配置。详情可见 JavaAgent说明。  3. 安装SWCK 在接下来的几个步骤中,我们将演示如何从0开始搭建单机版的 Kubernetes 集群,并在该平台部署0.6.1版本的 SWCK。\n3.1 工具准备 首先,你需要安装一些必要的工具,如下所示:\n kind,用于创建单机版 Kubernetes集群。 kubectl,用于和Kubernetes 集群交互。  3.2 搭建单机版 Kubernetes集群 在安装完 kind 工具后,可通过如下命令创建一个单机集群。\n 注意!如果你的终端配置了代理,在运行以下命令之前最好先关闭代理,防止一些意外错误的发生。\n $ kind create cluster --image=kindest/node:v1.19.1 在集群创建完毕后,可获得如下的pod信息。\n$ kubectl get pod -A NAMESPACE NAME READY STATUS RESTARTS AGE kube-system coredns-f9fd979d6-57xpc 1/1 Running 0 7m16s kube-system coredns-f9fd979d6-8zj8h 1/1 Running 0 7m16s kube-system etcd-kind-control-plane 1/1 Running 0 7m23s kube-system kindnet-gc9gt 1/1 Running 0 7m16s kube-system kube-apiserver-kind-control-plane 1/1 Running 0 7m23s kube-system kube-controller-manager-kind-control-plane 1/1 Running 0 7m23s kube-system kube-proxy-6zbtb 1/1 Running 0 7m16s kube-system kube-scheduler-kind-control-plane 1/1 Running 0 7m23s local-path-storage local-path-provisioner-78776bfc44-jwwcs 1/1 Running 0 7m16s 3.3 安装证书管理器(cert-manger) SWCK 的证书都是由证书管理器分发和验证,需要先通过如下命令安装证书管理器cert-manger。\n$ kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.3.1/cert-manager.yaml 验证 cert-manger 是否安装成功。\n$ kubectl get pod -n cert-manager NAME READY STATUS RESTARTS AGE cert-manager-7dd5854bb4-slcmd 1/1 Running 0 73s cert-manager-cainjector-64c949654c-tfmt2 1/1 Running 0 73s cert-manager-webhook-6bdffc7c9d-h8cfv 1/1 Running 0 73s 3.4 安装SWCK java 探针注入器是 SWCK 中的一个组件,首先需要按照如下步骤安装 SWCK:\n 输入如下命令获取 SWCK 的 yaml 文件并部署在 Kubernetes 集群中。  $ curl -Ls https://archive.apache.org/dist/skywalking/swck/0.6.1/skywalking-swck-0.6.1-bin.tgz | tar -zxf - -O ./config/operator-bundle.yaml | kubectl apply -f - 检查 SWCK 是否正常运行。  $ kubectl get pod -n skywalking-swck-system NAME READY STATUS RESTARTS AGE skywalking-swck-controller-manager-7f64f996fc-qh8s9 2/2 Running 0 94s 3.5 安装 Skywalking 组件 — OAPServer 和 UI  在 default 命名空间中部署 OAPServer 组件和 UI 组件。  $ kubectl apply -f https://raw.githubusercontent.com/apache/skywalking-swck/master/operator/config/samples/default.yaml 查看 OAPServer 组件部署情况。  $ kubectl get oapserver NAME INSTANCES RUNNING ADDRESS default 1 1 default-oap.default 查看 UI 组件部署情况。  $ kubectl get ui NAME INSTANCES RUNNING INTERNALADDRESS EXTERNALIPS PORTS default 1 1 default-ui.default [80] 4. 部署demo应用 在第3个步骤中,我们已经安装好 SWCK 以及相关的 Skywalking 组件,接下来按照全局配置以及自定义配置两种方式,通过两个 java 应用实例,分别演示如何使用 SWCK 中的 java 探针注入器。\n4.1 设置全局配置 当 SWCK 安装完成后,默认的全局配置就会以 configmap 的形式存储在系统命令空间中,可通过如下命令查看。\n$ kubectl get configmap skywalking-swck-java-agent-configmap -n skywalking-swck-system -oyaml apiVersion: v1 data: agent.config: |- # The service name in UI agent.service_name=${SW_AGENT_NAME:Your_ApplicationName} # Backend service addresses. collector.backend_service=${SW_AGENT_COLLECTOR_BACKEND_SERVICES:127.0.0.1:11800} # Please refer to https://skywalking.apache.org/docs/skywalking-java/latest/en/setup/service-agent/java-agent/configurations/#table-of-agent-configuration-properties to get more details. 在 kind 创建的 Kubernetes 集群中, SkyWalking 后端地址和 configmap 中指定的地址可能不同,我们需要使用真正的 OAPServer 组件的地址 default-oap.default 来代替默认的 127.0.0.1 ,可通过修改 configmap 实现。\n$ kubectl edit configmap skywalking-swck-java-agent-configmap -n skywalking-swck-system configmap/skywalking-swck-java-agent-configmap edited $ kubectl get configmap skywalking-swck-java-agent-configmap -n skywalking-swck-system -oyaml apiVersion: v1 data: agent.config: |- # The service name in UI agent.service_name=${SW_AGENT_NAME:Your_ApplicationName} # Backend service addresses. collector.backend_service=${SW_AGENT_COLLECTOR_BACKEND_SERVICES:default-oap.default:11800} # Please refer to https://skywalking.apache.org/docs/skywalking-java/latest/en/setup/service-agent/java-agent/configurations/#table-of-agent-configuration-properties to get more details. 4.2 设置自定义配置 在实际使用场景中,我们需要使用 Skywalking 组件监控不同的 java 应用,因此不同应用的探针配置可能有所不同,比如应用的名称、应用需要使用的插件等。为了支持自定义配置,注入器提供 annotation 来覆盖默认的全局配置。接下来我们将分别以基于 spring boot 以及 spring cloud gateway 开发的两个简单java应用为例进行详细说明,你可以使用这两个应用的源代码构建镜像。\n# build the springboot and springcloudgateway image  $ git clone https://github.com/dashanji/swck-spring-cloud-k8s-demo $ cd swck-spring-cloud-k8s-demo \u0026amp;\u0026amp; make # check the image $ docker images REPOSITORY TAG IMAGE ID CREATED SIZE gateway v0.0.1 51d16251c1d5 48 minutes ago 723MB app v0.0.1 62f4dbcde2ed 48 minutes ago 561MB # load the image into the cluster $ kind load docker-image app:v0.0.1 \u0026amp;\u0026amp; kind load docker-image gateway:v0.0.1 4.3 部署 spring boot 应用  创建 springboot-system 命名空间。  $ kubectl create namespace springboot-system 给 springboot-system 命名空间打上标签使能 java 探针注入器。  $ kubectl label namespace springboot-system swck-injection=enabled 接下来为 spring boot 应用对应的部署文件 springboot.yaml ,其中使用了 annotation 覆盖默认的探针配置,比如 service_name ,将其覆盖为 backend-service 。   需要注意的是,在使用 annotation 覆盖探针配置之前,需要增加 strategy.skywalking.apache.org/agent.Overlay: \u0026quot;true\u0026quot; 来使覆盖生效。\n apiVersion:apps/v1kind:Deploymentmetadata:name:demo-springbootnamespace:springboot-systemspec:selector:matchLabels:app:demo-springboottemplate:metadata:labels:swck-java-agent-injected:\u0026#34;true\u0026#34;# enable the java agent injectorapp:demo-springbootannotations:strategy.skywalking.apache.org/agent.Overlay:\u0026#34;true\u0026#34;# enable the agent overlayagent.skywalking.apache.org/agent.service_name:\u0026#34;backend-service\u0026#34;spec:containers:- name:springbootimagePullPolicy:IfNotPresentimage:app:v0.0.1command:[\u0026#34;java\u0026#34;]args:[\u0026#34;-jar\u0026#34;,\u0026#34;/app.jar\u0026#34;]---apiVersion:v1kind:Servicemetadata:name:demonamespace:springboot-systemspec:type:ClusterIPports:- name:8085-tcpport:8085protocol:TCPtargetPort:8085selector:app:demo-springboot在 springboot-system 命名空间中部署 spring boot 应用。  $ kubectl apply -f springboot.yaml 查看部署情况。  $ kubectl get pod -n springboot-system NAME READY STATUS RESTARTS AGE demo-springboot-7c89f79885-dvk8m 1/1 Running 0 11s 通过 JavaAgent 查看最终注入的 java 探针配置。  $ kubectl get javaagent -n springboot-system NAME PODSELECTOR SERVICENAME BACKENDSERVICE app-demo-springboot-javaagent app=demo-springboot backend-service default-oap.default:11800 4.4 部署 spring cloud gateway 应用  创建 gateway-system 命名空间。  $ kubectl create namespace gateway-system 给 gateway-system 命名空间打上标签使能 java 探针注入器。  $ kubectl label namespace gateway-system swck-injection=enabled 接下来为 spring cloud gateway 应用对应的部署文件 springgateway.yaml ,其中使用了 annotation 覆盖默认的探针配置,比如 service_name ,将其覆盖为 gateway-service 。此外,在使用 spring cloud gateway 时,我们需要在探针配置中添加 spring cloud gateway 插件。   需要注意的是,在使用 annotation 覆盖探针配置之前,需要增加 strategy.skywalking.apache.org/agent.Overlay: \u0026quot;true\u0026quot; 来使覆盖生效。\n apiVersion:apps/v1kind:Deploymentmetadata:labels:app:demo-gatewayname:demo-gatewaynamespace:gateway-systemspec:selector:matchLabels:app:demo-gatewaytemplate:metadata:labels:swck-java-agent-injected:\u0026#34;true\u0026#34;app:demo-gatewayannotations:strategy.skywalking.apache.org/agent.Overlay:\u0026#34;true\u0026#34;agent.skywalking.apache.org/agent.service_name:\u0026#34;gateway-service\u0026#34;optional.skywalking.apache.org:\u0026#34;cloud-gateway-3.x\u0026#34;# add spring cloud gateway pluginspec:containers:- image:gateway:v0.0.1name:gatewaycommand:[\u0026#34;java\u0026#34;]args:[\u0026#34;-jar\u0026#34;,\u0026#34;/gateway.jar\u0026#34;]---apiVersion:v1kind:Servicemetadata:name:service-gatewaynamespace:gateway-systemspec:type:ClusterIPports:- name:9999-tcpport:9999protocol:TCPtargetPort:9999selector:app:demo-gateway在 gateway-system 命名空间中部署 spring cloud gateway 应用。  $ kubectl apply -f springgateway.yaml 查看部署情况。  $ kubectl get pod -n gateway-system NAME READY STATUS RESTARTS AGE demo-gateway-758899c99-6872s 1/1 Running 0 15s 通过 JavaAgent 获取最终注入的java探针配置。  $ kubectl get javaagent -n gateway-system NAME PODSELECTOR SERVICENAME BACKENDSERVICE app-demo-gateway-javaagent app=demo-gateway gateway-service default-oap.default:11800 5. 验证注入器  当完成上述步骤后,我们可以查看被注入pod的详细状态,比如被注入的agent容器。  # get all injected pod $ kubectl get pod -A -lswck-java-agent-injected=true NAMESPACE NAME READY STATUS RESTARTS AGE gateway-system demo-gateway-5bb77f6d85-lt4z7 1/1 Running 0 69s springboot-system demo-springboot-7c89f79885-lkb5j 1/1 Running 0 75s # view detailed state of the injected pod [demo-springboot] $ kubectl describe pod -l app=demo-springboot -n springboot-system ... Events: Type Reason Age From Message ---- ------ ---- ---- ------- ... Normal Created 91s kubelet,kind-control-plane Created container inject-skywalking-agent Normal Started 91s kubelet,kind-control-plane Started container inject-skywalking-agent ... Normal Created 90s kubelet,kind-control-plane Created container springboot Normal Started 90s kubelet,kind-control-plane Started container springboot # view detailed state of the injected pod [demo-gateway]  $ kubectl describe pod -l app=demo-gateway -n gateway-system ... Events: Type Reason Age From Message ---- ------ ---- ---- ------- ... Normal Created 2m20s kubelet,kind-control-plane Created container inject-skywalking-agent Normal Started 2m20s kubelet,kind-control-plane Started container inject-skywalking-agent ... Normal Created 2m20s kubelet,kind-control-plane Created container gateway Normal Started 2m20s kubelet,kind-control-plane Started container gateway 现在我们可以将服务绑定在某个端口上并通过 web 浏览器查看采样数据。首先,我们需要通过以下命令获取gateway服务和ui服务的信息。  $ kubectl get service service-gateway -n gateway-system NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE service-gateway ClusterIP 10.99.181.145 \u0026lt;none\u0026gt; 9999/TCP 9m19s $ kubectl get service default-ui NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE default-ui ClusterIP 10.111.39.250 \u0026lt;none\u0026gt; 80/TCP 82m 接下来分别启动2个终端将service-gateway 以及 default-ui 绑定到本地端口上,如下所示:  $ kubectl port-forward service/service-gateway -n gateway-system 9999:9999 Forwarding from 127.0.0.1:9999 -\u0026gt; 9999 Forwarding from [::1]:9999 -\u0026gt; 9999 $ kubectl port-forward service/default-ui 8090:80 Forwarding from 127.0.0.1:8090 -\u0026gt; 8080 Forwarding from [::1]:8090 -\u0026gt; 8080 使用以下命令通过spring cloud gateway 网关服务暴露的端口来访问 spring boot 应用服务。  $ for i in {1..10}; do curl http://127.0.0.1:9999/gateway/hello \u0026amp;\u0026amp; echo \u0026#34;\u0026#34;; done Hello World! Hello World! Hello World! Hello World! Hello World! Hello World! Hello World! Hello World! Hello World! Hello World! 我们可以在 web 浏览器中输入 http://127.0.0.1:8090 来访问探针采集到的数据。  所有服务的拓扑图如下所示。  查看 gateway-service 网关服务的 trace 信息。  查看 backend-service 应用服务的 trace 信息。  6. 结束语 如果你的应用部署在 Kubernetes 平台中,且需要 Skywalking 提供监控服务, SWCK 能够帮助你部署、升级和维护 Kubernetes 集群中的 Skywalking 组件。除了本篇博客外,你还可以查看 SWCK文档 以及 java探针注入器文档 获取更多的信息。如果你觉得这个项目好用,请给 SWCK 一个star! 如果你有任何疑问,欢迎在Issues或者Discussions中提出。\n","excerpt":"目录  介绍 主要特点 安装SWCK 部署demo应用 验证注入器 结束语  1. 介绍 1.1 SWCK 是什么? SWCK是部署在 Kubernetes 环境中,为 Skywalking 用户提供 …","ref":"/zh/2022-04-19-how-to-use-the-java-agent-injector/","title":"如何使用java探针注入器?"},{"body":"Apache SkyWalking 是中国首个,也是目前唯一的个人开源的 Apache 顶级项目。\n作为一个针对分布式系统的应用性能监控 APM 和可观测性分析平台, SkyWalking 提供了媲美商业APM/监控的功能。\nCSDN云原生系列在线峰会第4期,特邀SkyWalking创始人、Apache基金会首位中国董事、Tetrate创始工程师吴晟担任出品人,推出SkyWalking峰会。\nSkyWalking峰会在解读SkyWalking v9新特性的同时,还将首发解密APM的专用数据库BanyanDB,以及分享SkyWalking在原生eBPF探针、监控虚拟机和Kubernetes、云原生函数计算可观测性等方面的应用实践。\n峰会议程:\n14:00-14:30 开场演讲:SkyWalking v9解析 吴晟 Tetrate 创始工程师、Apache 基金会首位中国董事\n14:30-15:00 首发解密:APM的专用数据库BanyanDB\n高洪涛 Tetrate 创始工程师\n15:00-15:30 SkyWalking 原生eBPF探针展示\n刘晗 Tetrate 工程师\n15:30-16:00 Apache SkyWalking MAL实践-监控虚拟机和Kubernetes\n万凯 Tetrate 工程师\n16:00-16:30 SkyWalking助力云原生函数计算可观测\n霍秉杰 青云科技 资深架构师\n峰会视频 B站视频地址\n","excerpt":"Apache SkyWalking 是中国首个,也是目前唯一的个人开源的 Apache 顶级项目。\n作为一个针对分布式系统的应用性能监控 APM 和可观测性分析平台, SkyWalking 提供了媲美 …","ref":"/zh/2022-04-18-meeting/","title":"Apache SkyWalking 2022 峰会"},{"body":"SkyWalking Java Agent 8.10.0 is released. Go to downloads page to find release tars. Changes by Version\n8.10.0  [Important] Namespace represents a subnet, such as kubernetes namespace, or 172.10... Make namespace concept as a part of service naming format. [Important] Add cluster concept, also as a part of service naming format. The cluster name would be  Add as {@link #SERVICE_NAME} suffix. Add as exit span\u0026rsquo;s peer, ${CLUSTER} / original peer Cross Process Propagation Header\u0026rsquo;s value addressUsedAtClient[index=8] (Target address of this request used on the client end).   Support Undertow thread pool metrics collecting. Support Tomcat thread pool metric collect. Remove plugin for ServiceComb Java Chassis 0.x Add Guava EventBus plugin. Fix Dubbo 3.x plugin\u0026rsquo;s tracing problem. Fix the bug that maybe generate multiple trace when invoke http request by spring webflux webclient. Support Druid Connection pool metrics collecting. Support HikariCP Connection pool metrics collecting. Support Dbcp2 Connection pool metrics collecting. Ignore the synthetic constructor created by the agent in the Spring patch plugin. Add witness class for vertx-core-3.x plugin. Add witness class for graphql plugin. Add vertx-core-4.x plugin. Renamed graphql-12.x-plugin to graphql-12.x-15.x-plugin and graphql-12.x-scenario to graphql-12.x-15.x-scenario. Add graphql-16plus plugin. [Test] Support to configure plugin test base images. [Breaking Change] Remove deprecated agent.instance_properties configuration. Recommend agent.instance_properties_json. The namespace and cluster would be reported as instance properties, keys are namespace and cluster. Notice, if instance_properties_json includes these two keys, they would be overrided by the agent core. [Breaking Change] Remove the namespace from cross process propagation key. Make sure the parent endpoint in tracing context from existing first ENTRY span, rather than first span only. Fix the bug that maybe causing memory leak and repeated traceId when use gateway-2.1.x-plugin or gateway-3.x-plugin. Fix Grpc 1.x plugin could leak context due to gRPC cancelled. Add JDK ThreadPoolExecutor Plugin. Support default database(not set through JDBC URL) in mysql-5.x plugin.  Documentation  Add link about java agent injector. Update configurations doc, remove agent.instance_properties[key]=value. Update configurations doc, add agent.cluster and update agent.namespace.  All issues and pull requests are here\n","excerpt":"SkyWalking Java Agent 8.10.0 is released. Go to downloads page to find release tars. Changes by …","ref":"/events/release-apache-skywalking-java-agent-8-10-0/","title":"Release Apache SkyWalking Java Agent 8.10.0"},{"body":"Introduction  The most profound technologies are those that disappear. They weave themselves into the fabric of everyday life until they are indistinguishable from it. - Mark Weiser\n Mark Weiser prophetically argued in the late 1980s, that the most far-reaching technologies are those which vanish into thin air. According to Weiser, \u0026ldquo;Whenever people learn something sufficiently well, they cease to be aware of it.\u0026rdquo; This disappearing act, as Weiser claimed, is not limited to technology but rather human psychology. It is this very experience that allows us to escape lower-level thinking into higher-level thinking. For once we are no longer impeded by mundane details, we are then free to focus on new goals.\nThis realization becomes more relevant as APMs become increasingly popular. As more applications are deployed with APMs, the number of abstract representations of the underlying source code also increases. While this provides great value to many non-development roles within an organization, it does pose additional challenges to those in development roles who must translate these representations into concepts they can work with (i.e. source code). Weiser sums this difficultly up rather succinctly when he states that \u0026ldquo;Programmers should no more be asked to work without access to source code than auto-mechanics should be asked to work without looking at the engine.\u0026rdquo;\nStill, APMs collect more information only to produce a plethora of new abstract representations. In this article, we will introduce a new concept in Source++, the open-source live-coding platform, specifically designed to allow developers to monitor production applications more intuitively.\nLive Views  And we really don\u0026rsquo;t understand even yet, hundreds of metrics later, what make a program easier to understand or modify or reuse or borrow. I don\u0026rsquo;t think we\u0026rsquo;ll find out by looking away from programs to their abstract interfaces. The answers are in the source code. - Mark Weiser\n As APMs move from the \u0026ldquo;nice to have\u0026rdquo; category to the \u0026ldquo;must-have\u0026rdquo; category, there is a fundamental feature holding them back from ubiquity. They must disappear from consciousness. As developers, we should feel no impulse to open our browsers to better understand the underlying source code. The answers are literally in the source code. Instead, we should improve our tools so the source code conveniently tells us what we need to know. Think of how simple life could be if failing code always indicated how and why it failed. This is the idea behind Source++.\nIn our last blog post, we discussed Extending Apache SkyWalking with non-breaking breakpoints. In that post, we introduced a concept called Live Instruments, which developers can use to easily debug live production applications without leaving their IDE. Today, we will discuss how existing SkyWalking installations can be integrated into your IDE via a new concept called Live Views. Unlike Live Instruments, which are designed for debugging live applications, Live Views are designed for increasing application comprehension and awareness. This is accomplished through a variety of commands which are input into the Live Command Palette.\nLive Command Palette The Live Command Palette (LCP) is a contextual command prompt, included in the Source++ JetBrains Plugin, that allows developers to control and query live applications from their IDE. Opened via keyboard shortcut (Ctrl+Shift+S), the LCP allows developers to easily view metrics relevant to the source code they\u0026rsquo;re currently viewing. The following Live View commands are currently supported:\nCommand: view (overview/activity/traces/logs) The view commands display contextual popups with live operational data of the current source code. These commands allow developers to view traditional SkyWalking operational data filtered down to the relevant metrics.\nCommand: watch log The watch log command allows developers to follow individual log statements of a running application in real-time. This command allows developers to negate the need for manually scrolling through the logs to find instances of a specific log statement.\nCommand: (show/hide) quick stats The show quick stats command displays live endpoint metrics for a quick idea of an endpoint\u0026rsquo;s activity. Using this command, developers can quickly assess the status of an endpoint and determine if the endpoint is performing as expected.\nFuture Work  A good tool is an invisible tool. By invisible, I mean that the tool does not intrude on your consciousness; you focus on the task, not the tool. Eyeglasses are a good tool \u0026ndash; you look at the world, not the eyeglasses. - Mark Weiser\n Source++ aims to extend SkyWalking in such a way that SkyWalking itself becomes invisible. To accomplish this, we plan to support custom developer commands. Developers will be able to build customized commands for themselves, as well as commands to share with their team. These commands will recognize context, types, and conditions allowing for a wide possibility of operations. As more commands are added, developers will be able to expose everything SkyWalking has to offer while focusing on what matters most, the source code.\nIf you find these features useful, please consider giving Source++ a try. You can install the plugin directly from your JetBrains IDE, or through the JetBrains Marketplace. If you have any issues or questions, please open an issue. Feedback is always welcome!\n","excerpt":"Introduction  The most profound technologies are those that disappear. They weave themselves into …","ref":"/blog/2022-04-14-integrating-skywalking-with-source-code/","title":"Integrating Apache SkyWalking with source code"},{"body":"Read this post in original language: English\n介绍  最具影响力的技术是那些消失的技术。他们交织在日常生活中,直到二者完全相融。 - 马克韦瑟\n 马克韦瑟在 1980 年代后期预言,影响最深远的技术是那些消失在空气中的技术。\n“当人们足够熟知它,就不会再意识到它。”\n正如韦瑟所说,这种消失的现象不只源于技术,更是人类的心理。 正是这种经验使我们能够摆脱对底层的考量,进入更高层次的思考。 一旦我们不再被平凡的细枝末节所阻碍,我们就可以自如地专注于新的目标。\n随着 APM(应用性能管理系统) 变得越来越普遍,这种认识变得更加重要。随着更多的应用程序开始使用 APM 部署,底层源代码抽象表示的数量也在同步增加。 虽然这为组织内的许多非开发角色提供了巨大的价值,但它确实也对开发人员提出了额外的挑战 - 他们必须将这些表示转化为可操作的概念(即源代码)。 对此,韦瑟相当简洁的总结道,“就像不应要求汽车机械师在不查看引擎的情况下工作一样,我们不应要求程序员在不访问源代码的情况下工作”。\n尽管如此,APM 收集更多信息只是为了产生充足的新抽象表示。 在本文中,我们将介绍开源实时编码平台 Source++ 中的一个新概念,旨在让开发人员更直观地监控生产应用程序。\n实时查看  我们尚且不理解在收集了数百个指标之后,是什么让程序更容易理解、修改、重复使用或借用。 我不认为我们能够通过原理程序本身而到它们的抽象接口中找到答案。答案就在源代码之中。 - 马克韦瑟\n 随着 APM 从“有了更好”转变为“必须拥有”,有一个基本特性阻碍了它们的普及。 它们必须从意识中消失。作为开发人员,我们不应急于打开浏览器以更好地理解底层源代码,答案就在源代码中。 相反,我们应该改进我们的工具,以便源代码直观地告诉我们需要了解的内容。 想想如果失败的代码总是表明它是如何以及为什么失败的,生活会多么简单。这就是 Source++ 背后的理念。\n在我们的上一篇博客中,我们讨论了不间断断点 Extending Apache SkyWalking。 我们介绍了一个名为 Live Instruments(实时埋点) 的概念,开发人员可以使用它轻松调试实时生产应用程序,而无需离开他们的开发环境。 而今天,我们将讨论如何通过一个名为 Live Views(实时查看)的新概念将现有部署的 SkyWalking 集成到您的 IDE 中。 与专为调试实时应用程序而设计的 Live Instruments (实时埋点) 不同,Live Views(实时查看)旨在提高对应用程序的理解和领悟。 这将通过输入到 Live Command Palette (实时命令面板) 中的各种命令来完成。\n实时命令面板 Live Command Palette (LCP) 是一个当前上下文场景下的命令行面板,这个组件包含在 Source++ JetBrains 插件中,它允许开发人员从 IDE 中直接控制和对实时应用程序发起查询。\nLCP 通过键盘快捷键 (Ctrl+Shift+S) 打开,允许开发人员轻松了解与他们当前正在查看的源代码相关的运行指标。\n目前 LCP 支持以下实时查看命令:\n命令:view(overview/activity/traces/Logs)- 查看 总览/活动/追踪/日志 view 查看命令会展示一个与当前源码的实时运维数据关联的弹窗。 这些命令允许开发人员查看根据相关指标过滤的传统 SkyWalking 的运维数据。\n命令:watch log - 实时监听日志 本日志命令允许开发人员实时跟踪正在运行的应用程序的每一条日志。 通过此命令开发人员无需手动查阅大量日志就可以查找特定日志语句的实例。\n命令:(show/hide) quick stats (显示/隐藏)快速统计 show quick stats 显示快速统计命令显示实时端点指标,以便快速了解端点的活动。 使用此命令,开发人员可以快速评估端点的状态并确定端点是否按预期正常运行。\n未来的工作  好工具是无形的。我所指的无形,是指这个工具不会侵入你的意识; 你专注于任务,而不是工具。 眼镜就是很好的工具——你看的是世界,而不是眼镜。 - 马克韦瑟\n Source++ 旨在扩展 SkyWalking,使 SkyWalking 本身变得无需感知。 为此,我们计划支持自定义的开发人员命令。 开发人员将能够构建自定义命令,以及与团队共享的命令。 这些命令将识别上下文、类型和条件,从而允许广泛的操作。 随着更多命令的添加,开发人员将能够洞悉 SkyWalking 所提供的所有功能,同时专注于最重要的源码。\n如果您觉得这些功能有用,请考虑尝试使用 Source++。 您可以通过 JetBrains Marketplace 或直接从您的 JetBrains IDE 安装插件。 如果您有任何疑问,请到这提 issue。\n欢迎随时反馈!\n","excerpt":"Read this post in original language: English\n介绍  最具影响力的技术是那些消失的技术。他们交织在日常生活中,直到二者完全相融。 - …","ref":"/zh/2022-04-14-integrating-skywalking-with-source-code/","title":"将 Apache SkyWalking 与源代码集成"},{"body":"随着无人驾驶在行业的不断发展和技术的持续革新,规范化、常态化的真无人运营逐渐成为事实标准,而要保障各个场景下的真无人业务运作,一个迫切需要解决的现状就是业务链路长,出现问题难以定位。本文由此前于 KubeSphere 直播上的分享整理而成,主要介绍 SkyWalking 的基本概念和使用方法,以及在无人驾驶领域的一系列实践。\nB站视频地址\n行业背景 驭势科技(UISEE)是国内领先的无人驾驶公司。致力于为全行业、全场景提供 AI 驾驶服务,做赋能出行和物流新生态的 AI 驾驶员。早在三年前, 驭势科技已在机场和厂区领域实现了“去安全员” 无人驾驶常态化运营的重大突破,落地“全场景、真无人、全天候”的自动驾驶技术,并由此迈向大规模商用。要保证各个场景下没有安全员参与的业务运作,我们在链路追踪上做了一系列实践。\n对于无人驾驶来说,从云端到车端的链路长且复杂,任何一层出问题都会导致严重的后果;然而在如下图所示的链路中,准确迅速地定位故障服务并不容易,经常遇到多个服务层层排查的情况。我们希望做到的事情,就是在出现问题以后,能够尽快定位到源头,从而快速解决问题,以绝后患。\n前提条件 SkyWalking 简介 Apache SkyWalking 是一个开源的可观察性平台,用于收集、分析、聚集和可视化来自服务和云原生基础设施的数据。SkyWalking 通过简单的方法,提拱了分布式系统的清晰视图,甚至跨云。它是一个现代的 APM(Application Performence Management),专门为云原生、基于容器的分布式系统设计。它在逻辑上被分成四个部分。探针、平台后端、存储和用户界面。\n 探针收集数据并根据 SkyWalking 的要求重新格式化(不同的探针支持不同的来源)。 平台后端支持数据聚合、分析以及从探针接收数据流的过程,包括 Tracing、Logging、Metrics。 存储系统通过一个开放/可插拔接口容纳 SkyWalking 数据。用户可以选择一个现有的实现,如 ElasticSearch、H2、MySQL、TiDB、InfluxDB,或实现自定义的存储。 UI是一个高度可定制的基于网络的界面,允许 SkyWalking 终端用户可视化和管理 SkyWalking 数据。  综合考虑了对各语言、各框架的支持性、可观测性的全面性以及社区环境等因素,我们选择了 SkyWalking 进行链路追踪。\n链路追踪简介 关于链路追踪的基本概念,可以参看吴晟老师翻译的 OpenTracing 概念和术语 以及 OpenTelemetry。在这里,择取几个重要的概念供大家参考:\n Trace:代表一个潜在的分布式的存在并行数据或者并行执行轨迹的系统。一个 Trace 可以认为是多个 Span 的有向无环图(DAG)。简单来说,在微服务体系下,一个 Trace 代表从第一个服务到最后一个服务经历的一系列的服务的调用链。   Span:在服务中埋点时,最需要关注的内容。一个 Span 代表系统中具有开始时间和执行时长的逻辑运行单元。举例来说,在一个服务发出请求时,可以认为是一个 Span 的开始;在这个服务接收到上游服务的返回值时,可以认为是这个 Span 的结束。Span 之间通过嵌套或者顺序排列建立逻辑因果关系。在 SkyWalking 中,Span 被区分为:  LocalSpan:服务内部调用方法时创建的 Span 类型 EntrySpan:请求进入服务时会创建的 Span 类型(例如处理其他服务对于本服务接口的调用) ExitSpan:请求离开服务时会创建的 Span 类型(例如调用其他服务的接口)   TraceSegment:SkyWalking 中的概念,介于 Trace 和 Span 之间,是一条 Trace 的一段,可以包含多个 Span。一个 TraceSegment 记录了一个线程中的执行过程,一个 Trace 由一个或多个 TraceSegment 组成,一个 TraceSegment 又由一个或多个 Span 组成。 SpanContext:代表跨越进程上下文,传递到下级 Span 的状态。一般包含 Trace ID、Span ID 等信息。 Baggage:存储在 SpanContext 中的一个键值对集合。它会在一条追踪链路上的所有 Span 内全局传输,包含这些 Span 对应的 SpanContext。Baggage 会随着 Trace 一同传播。  SkyWalking 中,上下文数据通过名为 sw8 的头部项进行传递,值中包含 8 个字段,由 - 进行分割(包括 Trace ID,Parent Span ID 等等) 另外 SkyWalking 中还提供名为 sw8-correlation 的扩展头部项,可以传递一些自定义的信息    快速上手 以 Go 为例,介绍如何使用 SkyWalking 在服务中埋点。\n部署 我们选择使用 Helm Chart 在 Kubernetes 中进行部署。\nexport SKYWALKING_RELEASE_NAME=skywalking # change the release name according to your scenario export SKYWALKING_RELEASE_NAMESPACE=default # change the namespace to where you want to install SkyWalking export REPO=skywalking helm repo add ${REPO} https://apache.jfrog.io/artifactory/skywalking-helm helm install \u0026#34;${SKYWALKING_RELEASE_NAME}\u0026#34; ${REPO}/skywalking -n \u0026#34;${SKYWALKING_RELEASE_NAMESPACE}\u0026#34; \\  --set oap.image.tag=8.8.1 \\  --set oap.storageType=elasticsearch \\  --set ui.image.tag=8.8.1 \\  --set elasticsearch.imageTag=6.8.6 埋点 部署完以后,需要在服务中进行埋点,以生成 Span 数据:主要的方式即在服务的入口和出口创建 Span。在代码中,首先我们会创建一个 Reporter,用于向 SkyWalking 后端发送数据。接下来,我们需要创建一个名为 \u0026quot;example\u0026quot; 的 Tracer 实例。此时,我们就可以使用 Tracer 实例来创建 Span。 在 Go 中,主要利用 context.Context 来创建以及传递 Span。\nimport \u0026#34;github.com/SkyAPM/go2sky\u0026#34; // configure to export to OAP server r, err := reporter.NewGRPCReporter(\u0026#34;oap-skywalking:11800\u0026#34;) if err != nil { log.Fatalf(\u0026#34;new reporter error %v \\n\u0026#34;, err) } defer r.Close() tracer, err := go2sky.NewTracer(\u0026#34;example\u0026#34;, go2sky.WithReporter(r)) 服务内部 在下面的代码片段中,通过 context.background() 生成的 Context 创建了一个 Root Span,同时在创建该 Span 的时候,也会产生一个跟这 个 Span 相关联的 Context。利用这个新的 Context,就可以创建一个与 Root Span 相关联的 Child Span。\n// create root span span, ctx, err := tracer.CreateLocalSpan(context.Background()) // create sub span w/ context above subSpan, newCtx, err := tracer.CreateLocalSpan(ctx) 服务间通信 在服务内部,我们会利用 Context 传的递来进行 Span 的创建。但是如果是服务间通信的话,这也是链路追踪最为广泛的应用场景,肯定是没有办法直接传递 Context 参数的。这种情况下,应该怎么做呢?一般来说,SkyWalking 会把 Context 中与当前 Span 相关的键值对进行编码,后续在服务通信时进行传递。例如,在 HTTP 协议中,一般利用请求头进行链路传递。再例如 gRPC 协议,一般想到的就是利用 Metadata 进行传递。\n在服务间通信的时候,我们会利用 EntrySpan 和 ExitSpan 进行链路的串联。以 HTTP 请求为例,在创建 EntrySpan 时,会从请求头中获取到 Span 上下文信息。而在 ExitSpan 中,则在请求中注入了上下文。这里的上下文是经过了 SkyWalking 编码后的字符串,以便在服务间进行传递。除了传递 Span 信息,也可以给 Span 打上 Tag 进行标记。例如,记录 HTTP 请求的方法,URL 等等,以便于后续数据的可视化。\n//Extract context from HTTP request header `sw8` span, ctx, err := tracer.CreateEntrySpan(r.Context(), \u0026#34;/api/login\u0026#34;, func(key string) (string, error) { return r.Header.Get(key), nil }) // Some operation ... // Inject context into HTTP request header `sw8` span, err := tracer.CreateExitSpan(req.Context(), \u0026#34;/service/validate\u0026#34;, \u0026#34;tomcat-service:8080\u0026#34;, func(key, value string) error { req.Header.Set(key, value) return nil }) // tags span.Tag(go2sky.TagHTTPMethod, req.Method) span.Tag(go2sky.TagURL, req.URL.String()) 但是,我们可能也会用到一些不那么常用的协议,比如说 MQTT 协议。在这些情况下,应该如何传递上下文呢?关于这个问题,我们在自定义插件的部分做了实践。\nUI 经过刚才的埋点以后,就可以在 SkyWalking 的 UI 界面看到调用链。SkyWalking 官方提供了一个 Demo 页面,有兴趣可以一探究竟:\n UI http://demo.skywalking.apache.org\nUsername skywalking Password skywalking\n 插件体系 如上述埋点的方式,其实是比较麻烦的。好在 SkyWalking 官方提供了很多插件,一般情况下,直接接入插件便能达到埋点效果。SkyWalking 官方为多种语言都是提供了丰富的插件,对一些主流框架都有插件支持。由于我们部门使用的主要是 Go 和 Python 插件,下文中便主要介绍这两种语言的插件。同时,由于我们的链路复杂,用到的协议较多,不可避免的是也需要开发一些自定义插件。下图中整理了 Go 与 Python 插件的主要思想,以及我们开发的各框架协议自定义插件的研发思路。\n官方插件 Go · Gin 插件 Gin 是 Go 的 Web 框架,利用其中间件,可以进行链路追踪。由于是接收请求,所以需要在中间件中,创建一个 EntrySpan,同时从请求头中获取 Span 的上下文的信息。获取到上下文信息以后,还需要再进行一步操作:把当前请求请求的上下文 c.Request.Context(), 设置成为刚才创建完 EntrySpan 时生成的 Context。这样一来,这个请求的 Context 就会携带有 Span 上下文信息,可以用于在后续的请求处理中进行后续传递。\nfunc Middleware(engine *gin.Engine, tracer *go2sky.Tracer) gin.HandlerFunc { return func(c *gin.Context) { span, ctx, err := tracer.CreateEntrySpan(c.Request.Context(), getOperationName(c), func(key string) (string, error) { return c.Request.Header.Get(key), nil }) // some operation \tc.Request = c.Request.WithContext(ctx) c.Next() span.End() } } Python · requests Requests 插件会直接修改 Requests 库中的request函数,把它替换成 SkyWalking 自定义的_sw_request函数。在这个函数中,创建了 ExitSpan,并将 ExitSpan 上下文注入到请求头中。在服务安装该插件后,实际调用 Requests 库进行请求的时候,就会携带带有上下文的请求体进行请求。\ndef install(): from requests import Session _request = Session.request def _sw_request(this: Session, method, url, other params...): span = get_context().new_exit_span(op=url_param.path or \u0026#39;/\u0026#39;, peer=url_param.netloc, component=Component.Requests) with span: carrier = span.inject() span.layer = Layer.Http if headers is None: headers = {} for item in carrier: headers[item.key] = item.val span.tag(TagHttpMethod(method.upper())) span.tag(TagHttpURL(url_param.geturl())) res = _request(this, method, url, , other params...n) # some operation return res Session.request = _sw_request 自定义插件 Go · Gorm Gorm 框架是 Go 的 ORM 框架。我们自己在开发的时候经常用到这个框架,因此希望能对通过 Gorm 调用数据库的链路进行追踪。\nGorm 有自己的插件体系,会在数据库的操作前调用BeforeCallback函数,数据库的操作后调用AfterCallback函数。于是在BeforeCallback中,我们创建 ExitSpan,并在AfterCallback里结束先前在BeforeCallback中创建的 ExitSpan。\nfunc (s *SkyWalking) BeforeCallback(operation string) func(db *gorm.DB) { // some operation  return func(db *gorm.DB) { tableName := db.Statement.Table operation := fmt.Sprintf(\u0026#34;%s/%s\u0026#34;, tableName, operation) span, err := tracer.CreateExitSpan(db.Statement.Context, operation, peer, func(key, value string) error { return nil }) // set span from db instance\u0026#39;s context to pass span  db.Set(spanKey, span) } } 需要注意的是,因为 Gorm 的插件分为 Before 与 After 两个 Callback,所以需要在两个回调函数间传递 Span,这样我们才可以在AfterCallback中结束当前的 Span。\nfunc (s *SkyWalking) AfterCallback() func(db *gorm.DB) { // some operation  return func(db *gorm.DB) { // get span from db instance\u0026#39;s context  spanInterface, _ := db.Get(spanKey) span, ok := spanInterface.(go2sky.Span) if !ok { return } defer span.End() // some operation  } } Python · MQTT 在 IoT 领域,MQTT 是非常常用的协议,无人驾驶领域自然也相当依赖这个协议。\n以 Publish 为例,根据官方插件的示例,我们直接修改 paho.mqtt 库中的publish函数,改为自己定义的_sw_publish函数。在自定义函数中,创建 ExitSpan,并将上下文注入到 MQTT 的 Payload 中。\ndef install(): from paho.mqtt.client import Client _publish = Client.publish Client.publish = _sw_publish_func(_publish) def _sw_publish_func(_publish): def _sw_publish(this, topic, payload=None, qos=0, retain=False, properties=None): # some operation with get_context().new_exit_span(op=\u0026#34;EMQX/Topic/\u0026#34; + topic + \u0026#34;/Producer\u0026#34; or \u0026#34;/\u0026#34;, peer=peer) as span: carrier = span.inject() span.layer = Layer.MQ span.component = Component.RabbitmqProducer payload = {} if payload is None else json.loads(payload) payload[\u0026#39;headers\u0026#39;] = {} for item in carrier: payload[\u0026#39;headers\u0026#39;][item.key] = item.val # ... return _sw_publish 可能这个方式不是特别优雅:因为我们目前使用 MQTT 3.1 版本,此时尚未引入 Properties 属性(类似于请求头)。直到 MQTT 5.0,才对此有相关支持。我们希望在升级到 MQTT 5.0 以后,能够将上下文注入到 Properties 中进行传递。\n无人驾驶领域的实践 虽然这些插件基本上涵盖了所有的场景,但是链路追踪并不是只要接入插件就万事大吉。在一些复杂场景下,尤其无人驾驶领域的链路追踪,由于微服务架构中涉及的语言环境、中间件种类以及业务诉求通常都比较丰富,导致在接入全链路追踪的过程中,难免遇到各种主观和客观的坑。下面选取了几个典型例子和大家分享。\n【问题一】Kong 网关的插件链路接入 我们的请求在进入服务之前,都会通过 API 网关 Kong,同时我们在 Kong 中定义了一个自定义权限插件,这个插件会调用权限服务接口进行授权。如果只是单独单纯地接入 SkyWalking Kong 插件,对于权限服务的调用无法在调用链中体现。所以我们的解决思路是,直接地在权限插件里进行埋点,而不是使用官方的插件,这样就可以把对于权限服务的调用也纳入到调用链中。\n【问题二】 Context 传递 我们有这样一个场景:一个服务,使用 Gin Web 框架,同时在处理 HTTP 请求时调用上游服务的 gRPC 接口。起初以为只要接入 Gin 的插件以及 gRPC 的插件,这个场景的链路就会轻松地接上。但是结果并不如预期。\n最后发现,Gin 提供一个 Contextc;同时对于某一个请求,可以通过c.Request.Context()获取到请求的 ContextreqCtx,二者不一致;接入 SkyWalking 提供的 Gin 插件后,修改的是reqCtx,使其包含 Span 上下文信息;而现有服务,在 gRPC 调用时传入的 Context 是c,所以一开始 HTTP -\u0026gt; gRPC 无法连接。最后通过一个工具函数,复制了reqCtx的键值对到c后,解决了这个问题。\n【问题三】官方 Python·Redis 插件 Pub/Sub 断路 由于官方提供了 Python ·Redis 插件,所以一开始认为,安装了 Redis 插件,对于一切 Redis 操作,都能互相连接。但是实际上,对于 Pub/Sub 操作,链路会断开。\n查看代码后发现,对于所有的 Redis 操作,插件都创建一个 ExitSpan;也就是说该插件其实仅适用于 Redis 作缓存等情况;但是在我们的场景中,需要进行 Pub/Sub 操作。这导致两个操作都会创建 ExitSpan,而使链路无法相连。通过改造插件,在 Pub 时创建 ExitSpan,在 Sub 时创建 EntrySpan 后,解决该问题。\n【问题四】MQTT Broker 的多种 DataBridge 接入 一般来说,对 MQTT 的追踪链路是 Publisher -\u0026gt; Subscriber,但是在我们的使用场景中,存在 MQTT broker 接收到消息后,通过规则引擎调用其他服务接口这种特殊场景。这便不是 Publisher -\u0026gt; Subscriber,而是 Publisher -\u0026gt; HTTP。\n我们希望能够从 MQTT Payload 中取出 Span 上下文,再注入到 HTTP 的请求头中。然而规则引擎调用接口时,没有办法自定义请求头,所以我们最后的做法是,约定好参数名称,将上下文放到请求体中,在服务收到请求后,从请求体中提取 Context。\n【问题五】Tracing 与 Logging 如何结合 很多时候,只有 Tracing 信息,对于问题排查来说可能还是不充分的,我们非常的期望也能够把 Tracing 和 Logging 进行结合。\n如上图所示,我们会把所有服务的 Tracing 的信息发送到 SkyWalking,同时也会把这个服务产生的日志通过 Fluent Bit 以及 Fluentd 发送到 ElasticSearch。对于这种情况,我们只需要在日志中去记录 Span 的上下文,比如记录 Trace ID 或者 Span ID 等,就可以在 Kibana 里面去进行对于 Trace ID 的搜索,来快速的查看同一次调用链中的日志。\n当然,SkyWalking 它本身也提供了自己的日志收集和分析机制,可以利用 Fluentd 或者 Fluent Bit 等向 SkyWalking 后端发送日志(我们选用了 Fluentd)。当然,像 SkyWalking 后端发送日志的时候,也要符合其日志协议,即可在 UI 上查看相应日志。\n本文介绍了 SkyWalking 的使用方法、插件体系以及实践踩坑等,希望对大家有所帮助。总结一下,SkyWalking 的使用的确是有迹可循的,一般来说我们只要接入插件,基本上可以涵盖大部分的场景,达到链路追踪的目的。但是也要注意,很多时候需要具体问题具体分析,尤其是在链路复杂的情况下,很多地方还是需要根据不同场景来进行一些特殊处理。\n最后,我们正在使用的 FaaS 平台 OpenFunction 近期也接入了 SkyWalking 作为其 链路追踪的解决方案:\nOpenFunction 提供了插件体系,并预先定义了 SkyWalking pre/post 插件;编写函数时,用户无需手动埋点,只需在 OpenFunction 配置文件中简单配置,即可开启 SkyWalking 插件,达到链路追踪的目的。\n 在感叹 OpenFunction 动作迅速的同时,也能够看到 SkyWalking 已成为链路追踪领域的首要选择之一。\n参考资料  OpenTracing 文档:https://wu-sheng.gitbooks.io/opentracing-io/content/pages/spec.html SkyWalking 文档:https://skywalking.apache.org/docs/main/latest/readme/ SkyWalking GitHub:https://github.com/apache/skywalking SkyWalking go2sky GitHub:https://github.com/SkyAPM/go2sky SkyWalking Python GitHub:https://github.com/apache/skywalking-python SkyWalking Helm Chart:https://github.com/apache/skywalking-kubernetes SkyWalking Solution for OpenFunction https://openfunction.dev/docs/best-practices/skywalking-solution-for-openfunction/  ","excerpt":"随着无人驾驶在行业的不断发展和技术的持续革新,规范化、常态化的真无人运营逐渐成为事实标准,而要保障各个场景下的真无人业务运作,一个迫切需要解决的现状就是业务链路长,出现问题难以定位。 …","ref":"/zh/2022-04-13-skywalking-in-autonomous-driving/","title":"SkyWalking 在无人驾驶领域的实践"},{"body":"SkyWalking Client JS 0.8.0 is released. Go to downloads page to find release tars.\n Fix fmp metric. Add e2e tese based on skywaling-infra-e2e. Update metric and events. Remove ServiceTag by following SkyWalking v9 new layer model.  ","excerpt":"SkyWalking Client JS 0.8.0 is released. Go to downloads page to find release tars.\n Fix fmp metric. …","ref":"/events/release-apache-skywalking-client-js-0-8-0/","title":"Release Apache SkyWalking Client JS 0.8.0"},{"body":"SkyWalking 9.0.0 is released. Go to downloads page to find release tars.\nSkyWalking v9 is the next main stream of the OAP and UI.\nStarting from v9, SkyWalking introduces the new core concept Layer. A layer represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), Kubernetes(k8s layer). All detected instances belong to a layer to represent the running environment of this instance, the service would have one or multiple layer definitions according to its instances.\nRocketBot UI has officially been replaced by the Booster UI.\nChanges by Version Project  Upgrade log4j2 to 2.17.1 for CVE-2021-44228, CVE-2021-45046, CVE-2021-45105 and CVE-2021-44832. This CVE only effects on JDK if JNDI is opened in default. Notice, using JVM option -Dlog4j2.formatMsgNoLookups=true or setting the LOG4J_FORMAT_MSG_NO_LOOKUPS=”true” environment variable also avoids CVEs. Upgrade maven-wrapper to 3.1.0, maven to 3.8.4 for performance improvements and ARM more native support. Exclude unnecessary libs when building under JDK 9+. Migrate base Docker image to eclipse-temurin as adoptopenjdk is deprecated. Add E2E test under Java 17. Upgrade protoc to 3.19.2. Add Istio 1.13.1 to E2E test matrix for verification. Upgrade Apache parent pom version to 25. Use the plugin version defined by the Apache maven parent.  Upgrade maven-dependency-plugin to 3.2.0. Upgrade maven-assembly-plugin to 3.3.0. Upgrade maven-failsafe-plugin to 2.22.2. Upgrade maven-surefire-plugin to 2.22.2. Upgrade maven-jar-plugin to 3.2.2. Upgrade maven-enforcer-plugin to 3.0.0. Upgrade maven-compiler-plugin to 3.10.0. Upgrade maven-resources-plugin to 3.2.0. Upgrade maven-source-plugin to 3.2.1.   Update codeStyle.xml to fix incompatibility on M1\u0026rsquo;s IntelliJ IDEA 2021.3.2. Update frontend-maven-plugin to 1.12 and npm to 16.14.0 for booster UI build. Improve CI with the GHA new feature \u0026ldquo;run failed jobs\u0026rdquo;. Fix ./mvnw compile not work if ./mvnw install is not executed at least once. Add JD_PRESERVE_LINE_FEEDS=true in official code style file. Upgrade OAP dependencies gson(2.9.0), guava(31.1), jackson(2.13.2), protobuf-java(3.18.4), commons-io(2.7), postgresql(42.3.3). Remove commons-pool and commons-dbcp from OAP dependencies(Not used before). Upgrade webapp dependencies gson(2.9.0), spring boot(2.6.6), jackson(2.13.2.2), spring cloud(2021.0.1), Apache httpclient(4.5.13).  OAP Server  Fix potential NPE in OAL string match and a bug when right-hand-side variable includes double quotes. Bump up Armeria version to 1.14.1 to fix CVE. Polish ETCD cluster config environment variables. Add the analysis of metrics in Satellite MetricsService. Fix Can't split endpoint id into 2 parts bug for endpoint ID. In the TCP in service mesh observability, endpoint name doesn\u0026rsquo;t exist in TCP traffic. Upgrade H2 version to 2.0.206 to fix CVE-2021-23463 and GHSA-h376-j262-vhq6. Extend column name override mechanism working for ValueColumnMetadata. Introduce new concept Layer and removed NodeType. More details refer to v9-version-upgrade. Fix query sort metrics failure in H2 Storage. Bump up grpc to 1.43.2 and protobuf to 3.19.2 to fix CVE-2021-22569. Add source layer and dest layer to relation. Follow protocol grammar fix GCPhrase -\u0026gt; GCPhase. Set layer to mesh relation. Add FAAS to SpanLayer. Adjust e2e case for V9 core. Support ZGC GC time and count metric collecting. Sync proto buffers files from upstream Envoy (Related to https://github.com/envoyproxy/envoy/pull/18955). Bump up GraphQL related dependencies to latest versions. Add normal to V9 service meta query. Support scope=ALL catalog for metrics. Bump up H2 to 2.1.210 to fix CVE-2022-23221. E2E: Add normal field to Service. Add FreeSql component ID(3017) of dotnet agent. E2E: verify OAP cluster model data aggregation. Fix SelfRemoteClient self observing metrics. Add env variables SW_CLUSTER_INTERNAL_COM_HOST and SW_CLUSTER_INTERNAL_COM_PORT for cluster selectors zookeeper ,consul,etcd and nacos. Doc update: configuration-vocabulary,backend-cluster about env variables SW_CLUSTER_INTERNAL_COM_HOST and SW_CLUSTER_INTERNAL_COM_PORT. Add Python MysqlClient component ID(7013) with mapping information. Support Java thread pool metrics analysis. Fix IoTDB Storage Option insert null index value. Set the default value of SW_STORAGE_IOTDB_SESSIONPOOL_SIZE to 8. Bump up iotdb-session to 0.12.4. Bump up PostgreSQL driver to fix CVE. Add Guava EventBus component ID(123) of Java agent. Add OpenFunction component ID(5013). Expose configuration responseTimeout of ES client. Support datasource metric analysis. [Breaking Change] Keep the endpoint avg resp time meter name the same with others scope. (This may break 3rd party integration and existing alarm rule settings) Add Python FastAPI component ID(7014). Support all metrics from MAL engine in alarm core, including Prometheus, OC receiver, meter receiver. Allow updating non-metrics templates when structure changed. Set default connection timeout of ElasticSearch to 3000 milliseconds. Support ElasticSearch 8 and add it into E2E tests. Disable indexing for field alarm_record.tags_raw_data of binary type in ElasticSearch storage. Fix Zipkin receiver wrong condition for decoding gzip. Add a new sampler (possibility) in LAL. Unify module name receiver_zipkin to receiver-zipkin, remove receiver_jaeger from application.yaml. Introduce the entity of Process type. Set the length of event#parameters to 2000. Limit the length of Event#parameters. Support large service/instance/networkAddressAlias list query by using ElasticSearch scrolling API, add metadataQueryBatchSize to configure scrolling page size. Change default value of metadataQueryMaxSize from 5000 to 10000 Replace deprecated Armeria API BasicToken.of with AuthToken.ofBasic. Implement v9 UI template management protocol. Implement process metadata query protocol. Expose more ElasticSearch health check related logs to help to diagnose Health check fails. reason: No healthy endpoint. Add source event generated metrics to SERVICE_CATALOG_NAME catalog. [Breaking Change] Deprecate All from OAL source. [Breaking Change] Remove SRC_ALL: 'All' from OAL grammar tree. Remove all_heatmap and all_percentile metrics. Fix ElasticSearch normal index couldn\u0026rsquo;t apply mapping and update. Enhance DataCarrier#MultipleChannelsConsumer to add priority for the channels, which makes OAP server has a better performance to activate all analyzers on default. Activate receiver-otel#enabledOcRules receiver with k8s-node,oap,vm rules on default. Activate satellite,spring-sleuth for agent-analyzer#meterAnalyzerActiveFiles on default. Activate receiver-zabbix receiver with agent rule on default. Replace HTTP server (GraphQL, agent HTTP protocol) from Jetty with Armeria. [Breaking Change] Remove configuration restAcceptorPriorityDelta (env var: SW_RECEIVER_SHARING_JETTY_DELTA , SW_CORE_REST_JETTY_DELTA). [Breaking Change] Remove configuration graphql/path (env var: SW_QUERY_GRAPHQL_PATH). Add storage column attribute indexOnly, support ElasticSearch only index and not store some fields. Add indexOnly=true to SegmentRecord.tags, AlarmRecord.tags, AbstractLogRecord.tags, to reduce unnecessary storage. [Breaking Change] Remove configuration restMinThreads (env var: SW_CORE_REST_JETTY_MIN_THREADS , SW_RECEIVER_SHARING_JETTY_MIN_THREADS). Refactor the core Builder mechanism, new storage plugin could implement their own converter and get rid of hard requirement of using HashMap to communicate between data object and database native structure. [Breaking Change] Break all existing 3rd-party storage extensions. Remove hard requirement of BASE64 encoding for binary field. Add complexity limitation for GraphQL query to avoid malicious query. Add Column.shardingKeyIdx for column definition for BanyanDB.  Sharding key is used to group time series data per metric of one entity in one place (same sharding and/or same row for column-oriented database). For example, ServiceA's traffic gauge, service call per minute, includes following timestamp values, then it should be sharded by service ID [ServiceA(encoded ID): 01-28 18:30 values-1, 01-28 18:31 values-2, 01-28 18:32 values-3, 01-28 18:32 values-4] BanyanDB is the 1st storage implementation supporting this. It would make continuous time series metrics stored closely and compressed better. NOTICE, this sharding concept is NOT just for splitting data into different database instances or physical files.  Support ElasticSearch template mappings properties parameters and _source update. Implement the eBPF profiling query and data collect protocol. [Breaking Change] Remove Deprecated responseCode from sources, including Service, ServiceInstance, Endpoint Enhance endpoint dependency analysis to support cross threads cases. Refactor span analysis code structures. Remove isNotNormal service requirement when use alias to merge service topology from client side. All RPCs' peer services from client side are always normal services. This cause the topology is not merged correctly. Fix event type of export data is incorrect, it was EventType.TOTAL always. Reduce redundancy ThreadLocal in MAL core. Improve MAL performance. Trim tag\u0026rsquo;s key and value in log query. Refactor IoTDB storage plugin, add IoTDBDataConverter and fix ModifyCollectionInEnhancedForLoop bug. Bump up iotdb-session to 0.12.5. Fix the configuration of Aggregation and GC Count metrics for oap self observability E2E: Add verify OAP eBPF Profiling. Let multiGet could query without tag value in the InfluxDB storage plugin. Adjust MAL for V9, remove some groups, add a new Service function for the custom delimiter. Add service catalog DatabaseSlowStatement. Add Error Prone Annotations dependency to suppress warnings, which are not errors.  UI  [Breaking Change] Introduce Booster UI, remove RocketBot UI. [Breaking Change] UI Templates have been redesigned totally. GraphQL query is minimal compatible for metadata and metrics query. Remove unused jars (log4j-api.jar) in classpath. Bump up netty version to fix CVE. Add Database Connection pool metric. Re-implement UI template initialization for Booster UI. Add environment variable SW_ENABLE_UPDATE_UI_TEMPLATE to control user edit UI template. Add the Self Observability template of the SkyWalking Satellite. Add the template of OpenFunction observability.  Documentation  Reconstruction doc menu for v9. Update backend-alarm.md doc, support op \u0026ldquo;=\u0026rdquo; to \u0026ldquo;==\u0026rdquo;. Update backend-meter.md doc . Add \u0026lt;STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System\u0026gt; paper. Add Academy menu for recommending articles. Remove All source relative document and examples. Update Booster UI\u0026rsquo;s dependency licenses. Add profiling doc, and remove service mesh intro doc(not necessary). Add a doc for virtual database. Rewrite UI introduction. Update k8s-monitoring, backend-telemetry and v9-version-upgrade doc for v9.  All issues and pull requests are here\n","excerpt":"SkyWalking 9.0.0 is released. Go to downloads page to find release tars.\nSkyWalking v9 is the next …","ref":"/events/release-apache-skywalking-apm-9.0.0/","title":"Release Apache SkyWalking APM 9.0.0"},{"body":"SkyWalking CLI 0.10.0 is released. Go to downloads page to find release tars.\nFeatures  Allow setting start and end with relative time (#128) Add some commands for the browser (#126) Add the sub-command service layer to query services according to layer (#133) Add the sub-command layer list to query layer list (#133) Add the sub-command instance get to query single instance (#134) Add the sub-command endpoint get to query single endpoint info (#134) Change the GraphQL method to the v9 version according to the server version (#134) Add normal field to Service entity (#136) Add the command process for query Process metadata (#137) Add the command profiling ebpf for process ebpf profiling (#138) Support getprofiletasklogs query (#125) Support query list alarms (#127) [Breaking Change] Update the command profile as a sub-command profiling trace, and update profiled-analyze command to analysis (#138) profiling ebpf/trace analysis generates the profiling graph HTML on default and saves it to the current work directory (#138)  Bug Fixes  Fix quick install (#131) Set correct go version in publishing snapshot docker image (#124) Stop build kit container after finishing (#130)  Chores  Add cross platform build targets (#129) Update download host (#132)  ","excerpt":"SkyWalking CLI 0.10.0 is released. Go to downloads page to find release tars.\nFeatures  Allow …","ref":"/events/release-apache-skywalking-cli-0-10-0/","title":"Release Apache SkyWalking CLI 0.10.0"},{"body":"SkyWalking is an open-source APM system, including monitoring, tracing, and diagnosing capabilities for distributed systems in Cloud Native architecture. It covers monitoring for Linux, Kubernetes, Service Mesh, Serverless/Function-as-a-Service, agent-attached services, and browsers. With data covering traces, metrics, logs, and events, SkyWalking is a full-stack observability APM system.\nOpen Source Promotion Plan is a summer program organized and long-term supported by Open Source Software Supply Chain Promotion Plan. It aims to encourage college students to actively participate in developing and maintaining open-source software and promote the vigorous development of an excellent open-source software community.\nApache SkyWalking has been accepted in OSPP 2022\n   Project Description Difficulty Mentor / E-mail Expectation Tech. Requirements Repository     SkyAPM-PHP Add switches for monitoring items Advanced Level Yanlong He / heyanlong@apache.org Complete project development work C++, GO, PHP https://github.com/SkyAPM/SkyAPM-php-sdk   SkyWalking-Infra-E2E Optimize verifier Normal Level Huaxi Jiang / hoshea@apache.org 1. Continue to verify cases when other cases fail  2. Merge retry outputs  3. Prettify verify results' output Go https://github.com/apache/skywalking-infra-e2e   SkyWalking Metrics anomaly detection with machine learning Advanced Level Yihao Chen / yihaochen@apache.org An MVP version of ML-powered metrics anomaly detection using dynamic baselines and thresholds Python, Java https://github.com/apache/skywalking   SkyWalking Python Collect PVM metrics and send the metrics to OAP backend, configure dashboard in UI Normal Level Zhenxu Ke / kezhenxu94@apache.org Core Python VM metrics should be collected and displayed in SkyWalking. Python https://github.com/apache/skywalking-python issue   SkyWalking BanyanDB Command line tools for BanyanDB Normal Level Hongtao Gao / hanahmily@apache.org Command line tools should access relevant APIs to manage resources and online data. Go https://github.com/apache/skywalking-banyandb   SkyWalking SWCK CRD and controller for BanyanDB Advance Level Ye Cao / dashanji@apache.org CRD and controller provision BanyanDB as the native Storage resource. Go https://github.com/apache/skywalking-swck   SkyAPM-Go2sky Collect golang metrics such as gc, goroutines and threads, and send the the metrics to OAP backend, configure dashboard in UI Normal Level Wei Zhang / zhangwei24@apache.org Core golang metrics should be collected and displayed in SkyWalking. Go https://github.com/SkyAPM/go2sky   SkyWalking Collect system metrics such as system_load, cpu_usage, mem_usage from telegraf and send the metrics to OAP backend, configure dashboard in UI Normal Level Haoyang Liu / liuhaoyangzz@apache.org System metrics should be collected and displayed in SkyWalking. Java https://github.com/apache/skywalking    Mentors could submit pull requests to update the above list.\nContact the community You could send emails to mentor\u0026rsquo;s personal email to talk about the project and details. The official mail list of the community is dev@skywalking.apache.org. You need to subscribe to the mail list to get all replies. Send mail to dev-suscribe@skywalking.apache.org and follow the replies.\n","excerpt":"SkyWalking is an open-source APM system, including monitoring, tracing, and diagnosing capabilities …","ref":"/events/summer-ospp-2022/readme/","title":"Open Source Promotion Plan 2022 -- Project List"},{"body":"如果要讨论提高自己系统设计能力的方式,我想大多数人都会选择去阅读优秀开源项目的源代码。近年来我参与了多个监控服务的开发工作,并在工作中大量地使用了 SkyWalking 并对其进行二次开发。在这个过程中,我发现 SkyWalking 天然的因其国产的身份,整套源代码地组织和设计非常符合国人的编程思维。由此我录制了本套课程,旨在和大家分享我的一些浅薄的心得和体会。\n本套课程分为两个阶段,分别讲解 Agent 端和 OAP 端地设计和实现。每个阶段的内容都是以启动流程作为讲解主线,逐步展开相关的功能模块。除了对 SKyWalking 本身内容进行讲解,课程还针对 SKyWalking 使用到的一些较为生僻的知识点进行了补充讲解(如 synthetic、NBAC 机制、自定义类加载器等),以便于大家更清晰地掌握课程内容。\nSkyWalking8.7.0 源码分析 - 视频课程直达链接\n目前课程已更新完 Agent 端的讲解,目录如下:\n 01-开篇和源码环境准备 02-Agent 启动流程 03-Agent 配置加载流程 04-自定义类加载器 AgentClassLoader 05-插件定义体系 07-插件加载 06-定制 Agent 08-什么是 synthetic 09-NBAC 机制 10-服务加载 11-witness 组件版本识别 12-Transform 工作流程 13-静态方法插桩 14-构造器和实例方法插桩 15-插件拦截器加载流程(非常重要) 16-运行时插件效果的字节码讲解 17-JDK 类库插件工作原理 18-服务-GRPCChanelService 19-服务-ServiceManagementClient 20-服务-CommandService 21-服务-SamplingService 22-服务-JVMService 23-服务-KafkaXxxService 24-服务-StatusCheckService 25-链路基础知识 26-链路 ID 生成 27-TraceSegment 28-Span 基本概念 29-Span 完整模型 30-StackBasedTracingSpan 31-ExitSpan 和 LocalSpan 32-链路追踪上下文 TracerContext 33-上下文适配器 ContextManager 34-DataCarrier-Buffer 35-DataCarrier-全解 36-链路数据发送到 OAP  B站视频地址\n","excerpt":"如果要讨论提高自己系统设计能力的方式,我想大多数人都会选择去阅读优秀开源项目的源代码。近年来我参与了多个监控服务的开发工作,并在工作中大量地使用了 SkyWalking 并对其进行二次开发。在这个过程 …","ref":"/zh/2022-03-25-skywalking-source-code-analyzation/","title":"[视频] SkyWalking 8.7.0 源码分析"},{"body":"","excerpt":"","ref":"/zh_tags/course/","title":"Course"},{"body":"SkyWalking NodeJS 0.4.0 is released. Go to downloads page to find release tars.\n Fix mysql2 plugin install error. (#74) Update IORedis Plugin, fill dbinstance tag as host if condition.select doesn\u0026rsquo;t exist. (#73) Experimental AWS Lambda Function support. (#70) Upgrade dependencies to fix vulnerabilities. (#68) Add lint pre-commit hook and migrate to eslint. (#66, #67) Bump up gRPC version, and use its new release repository. (#65) Regard baseURL when in Axios Plugin. (#63) Add an API to access the trace id. (#60) Use agent test tool snapshot Docker image instead of building in CI. (#59) Wrapped IORedisPlugin call in try/catch. (#58)  ","excerpt":"SkyWalking NodeJS 0.4.0 is released. Go to downloads page to find release tars.\n Fix mysql2 plugin …","ref":"/events/release-apache-skywalking-nodejs-0-4-0/","title":"Release Apache SkyWalking for NodeJS 0.4.0"},{"body":"大约二十年前我刚开始进入互联网的世界的时候,支撑起整个网络的基础设施,就包括了 Apache 软件基金会(ASF)治下的软件。\nApache Httpd 是开启这个故事的软件,巅峰时期有超过七成的市场占有率,即使是在今天 NGINX 等新技术蓬勃发展的时代,也有三成左右的市场占有率。由 Linux、Apache Httpd、MySQL 和 PHP 组成的 LAMP 技术栈,是开源吞噬软件应用的第一场大型胜利。\n我从 2018 年参与 Apache Flink 开始正式直接接触到成立于 1999 年,如今已经有二十年以上历史的 Apache 软件基金会,并在一年后的 2019 年成为 Apache Flink 项目 Committer 队伍的一员,2020 年成为 Apache Curator 项目 PMC(项目管理委员会)的一员。今年,经由姜宁老师推荐,成为了 Apache Members 之一,也就是 Apache 软件基金会层面的正式成员。\n我想系统性地做一个开源案例库已经很久了。无论怎么分类筛选优秀的开源共同体,The Apache Community 都是无法绕开的。然而,拥有三百余个开源软件项目的 Apache 软件基金会,并不是一篇文章就能讲清楚的案例。本文也没有打算写成一篇长文顾及方方面面,而是启发于自己的新角色,回顾过去近五年在 Apache Community 当中的经历和体验,简单讨论 Apache 的理念,以及这些理念是如何落实到基金会组织、项目组织以及每一个参与者的日常生活事务当中的。\n不过,尽管对讨论的对象做了如此大幅度的缩减,由我自己来定义什么是 Apache 的理念未免也太容易有失偏颇。幸运的是,Apache Community 作为优秀的开源共同体,当然做到了我在《共同创造价值》一文中提到的回答好“我能为你做什么”以及“我应该怎么做到”的问题。Apache Community 的理念之一就是 Open Communications 即开放式讨论,由此产生的公开材料以及基于公开材料整理的文档汗牛充栋。这既是研究 Apache Community 的珍贵材料,也为还原和讨论一个真实的 Apache Community 提出了不小的挑战。\n无论如何,本文将以 Apache 软件基金会在 2020 年发布的纪录片 Trillions and Trillions Served 为主线,结合其他文档和文字材料来介绍 Apache 的理念。\n以人为本 纪录片一开始就讲起了 Apache Httpd 项目的历史,当初的 Apache Group 是基于一个源代码共享的 Web Server 建立起来的邮件列表上的一群人。软件开发当初的印象如同科学研究,因此交流源码在近似科学共同体的开源共同体当中是非常自然的。\n如同 ASF 的联合创始人 Brian Behlendorf 所说,每当有人解决了一个问题或者实现了一个新功能,他出于一种朴素的分享精神,也就是“为什么不把补丁提交回共享的源代码当中呢”的念头,基于开源软件的协作就这样自然发生了。纪录片中有一位提到,她很喜欢 Apache 这个词和 a patchy software 的谐音,共享同一个软件的补丁(patches)就是开源精神最早诞生的形式。\n这是 Apache Community 的根基,我们将会看到这种朴素精神经过发展形成了一个怎样的共同体,在共同体的发展过程当中,这样的根基又是如何深刻地影响了 Apache 理念的方方面面。\nApache Group 的工作模式还有一个重要的特征,那就是每个人都是基于自己的需求修复缺陷或是新增功能,在邮件列表上交流和提交补丁的个人,仅仅只是代表他个人,而没有一个“背后的组织”或者“背后的公司”。因此,ASF 的 How it Works 文档中一直强调,在基金会当中的个体,都只是个体(individuals),或者称之为志愿者(volunteers)。\n我在某公司的分享当中提到过,商业产品可以基于开源软件打造,但是当公司的雇员出现在社群当中的时候,他应该保持自己志愿者的身份。这就像是开源软件可以被用于生产环境或者严肃场景,例如航空器的发射和运行离不开 Linux 操作系统,但是开源软件本身是具有免责条款的。商业公司或专业团队提供服务保障,而开源软件本身是 AS IS 的。同样,社群成员本人可以有商业公司雇员的身份,但是他在社群当中,就是一个志愿者。\n毫无疑问,这种论调当即受到了质疑,因为通常的认知里,我就是拿了公司的钱,就是因为在给这家公司打工,才会去关注这个项目,你非要说我是一个志愿者,我还就真不是一个志愿者,你怎么说?\n其实这个问题,同样在 How it Works 文档中已经有了解答。\n All participants in ASF projects are volunteers and nobody (not even members or officers) is paid directly by the foundation to do their job. There are many examples of committers who are paid to work on projects, but never by the foundation itself. Rather, companies or institutions that use the software and want to enhance it or maintain it provide the salary.\n 我当时基于这样的认识,给到质疑的回答是,如果你不想背负起因为你是员工,因此必须响应社群成员的 issue 或 PR 等信息,那么你可以试着把自己摆在一个 volunteer 的角度来观察和参与社群。实际上,你并没有这样的义务,即使公司要求你必须回答,那也是公司的规定,而不是社群的要求。如果你保持着这样的认识和心态,那么社群于你而言,才有可能是一个跨越职业生涯不同阶段的归属地,而不是工作的附庸。\n社群从来不会从你这里索取什么,因为你的参与本身也是自愿的。其他社群成员会感谢你的参与,并且如果相处得好,这会是一个可爱的去处。社群不是你的敌人,不要因为公司下达了离谱的社群指标而把怒火发泄在社群和社群成员身上。压力来源于公司,作为社群成员的你本来可以不用承受这些。\nApache Community 对个体贡献者组成社群这点有多么重视呢?只看打印出来不过 10 页 A4 纸的 How it Works 文档,volunteer 和 individuals 两个词加起来出现了 19 次。The Apache Way 文档中强调的社群特征就包括了 Independence 一条,唯一并列的另一个是经常被引用的 Community over code 原则。甚至,有一个专门的 Project independence 文档讨论了 ASF 治下的项目如何由个体志愿者开发和维护,又为何因此是中立和非商业性的。\nINDIVIDUALS COMPOSE THE ASF 集中体现了 ASF 以人为本的理念。实际上,不止上面提到的 Independence 强调了社群成员个体志愿者的属性,Community over code 这一原则也在强调 ASF 关注围绕开源软件聚集起来的人,包括开发者、用户和其他各种形式的参与者。人是维持社群常青的根本,在后面具体讨论 The Apache Way 的内容的时候还会展开。\n上善若水 众所周知,Apache License 2.0 (APL-2.0) 是所谓的宽容式软件协议。也就是说,不同于 GPL 3.0 这样的 Copyleft 软件协议要求衍生作品需要以相同的条款发布,其中包括开放源代码和自由修改从而使得软件源代码总是可以获取和修改的,Apache License 在协议内容当中仅保留了著作权和商标,并要求保留软件作者的任何声明(NOTICE)。\nASF 在软件协议上的理念是赋予最大程度的使用自由,鼓励用户和开发者参与到共同体当中来,鼓励与上游共同创造价值,共享补丁。“鼓励”而不是“要求”,是 ASF 和自由软件基金会(Free Software Foundation, FSF)最主要的区别。\n这一倾向可以追溯到 Apache Group 建立的基础。Apache Httpd 派生自伊利诺伊大学的 NCSA Httpd 项目,由于使用并开发这个 web server 的人以邮件列表为纽带聚集在一起,通过交换补丁来开发同一个项目。在项目的发起人 Robert McCool 等大学生毕业以后,Apache Group 的发起人们接过这个软件的维护和开发工作。当时他们看到的软件协议,就是一个 MIT License 精神下的宽容式软件协议。自然而然地,Apache Group 维护 Apache Httpd 的时候,也就继承了这个协议。\n后来,Apache Httpd 打下了 web server 的半壁江山,也验证了这一模式的可靠性。虽然有些路径依赖的嫌疑,但是 ASF 凭借近似“上善若水”的宽容理念,在二十年间成功创造了数以百亿计美元价值的三百多个软件项目。\n纪录片中 ASF 的元老 Ted Dunning 提到,在他早期创造的软件当中,他会在宽容式软件协议之上,添加一个商用的例外条款。这就像是著名开源领域律师 Heather Meeker 起草的 The Commons Clause 附加条款。\n Without limiting other conditions in the License, the grant of rights under the License will not include, and the License does not grant to you, the right to Sell the Software.\n 附加 The Commons Clause 条款的软件都不是符合 OSD 定义的开源软件,也不再是原来的协议了。NebulaGraph 曾经在附加 The Commons Clause 条款的情况下声称自己是 APL-2.0 协议许可的软件,当时的 ASF 董事吴晟就提 issue (vesoft-inc/nebula#3247) 指出这一问题。NebulaGraph 于是删除了所有 The Commons Clause 的字样,保证无误地以 APL-2.0 协议许可该软件。\nTed Dunning 随后提到,这样的附加条款实际上严重影响了软件的采用。他意识到自己实际上并不想为此打官司,因此加上这样的条款对他而言是毫无意义的。Ted Dunning 于是去掉了附加条款,而这使得使用他的软件的条件能够简单的被理解,从而需要这些软件的用户能够大规模的采用。“水利万物而不争”,反而是不去强迫和约束用户行为的做法,为软件赢得了更多贡献。\n我仍然很敬佩采用 GPL 系列协议发布高质量软件的开发者,Linux 和 GCC 这样的软件的成功改变了世人对软件领域的自由的认识。然而,FSF 自己也认识到需要提出修正的 LGPL 来改进应用程序以外的软件的发布和采用,例如基础库。\nAPL-2.0 的思路与之不同,它允许任何人以任何形式使用、修改和分发软件,因此 ASF 治下的项目,以及 Linux Foundation 治下采用 APL-2.0 的项目,以及更多个人或组织采用 APL-2.0 的项目,共同构成了强大的开源软件生态,涵盖了应用软件,基础库,开发工具和框架等等各个方面。事实证明,“鼓励”而不是“要求”用户秉持 upstream first 的理念,尽可能参与到开源共同体并交换知识和补丁,共同创造价值,是能够制造出高质量的软件,构建出繁荣的社群和生态的。\n匠人精神 Apache Community 关注开发者的需要。\nApache Group 成立 ASF 的原因,是在 Apache Httpd 流行起来以后,商业公司和社会团体开始寻求和这个围绕项目形成的群体交流。然而,缺少一个正式的法律实体让组织之间的往来缺乏保障和流程。因此,如同纪录片当中提到的,ASF 成立的主要原因,是为了支撑 Apache Httpd 项目。只不过当初的创始成员们很难想到的是,ASF 最终支撑了数百个开源项目。\n不同于 Linux Foundation 是行业联盟,主要目的是为了促进其成员的共同商业利益,ASF 主要服务于开发者,由此支撑开源项目的开发以及开源共同体的发展。\n举例来说,进入 ASF 孵化器的项目都能够在 ASF Infra 的支持下运行自己的 apache.org 域名的网站,将代码托管在 ASF 仓库中上,例如 Apache GitBox Repositories 和 Apache GitHub Organization 等。这些仓库上运行着自由取用的开发基础设施,例如持续集成和持续发布的工具和资源等等。ASF 还维护了自己的邮件列表和文件服务器等一系列资源,以帮助开源项目建立起自己的共同体和发布自己的构件。\n反观 Linux Foundation 的主要思路,则是关注围绕项目聚集起来的供应商,以行业联盟的形式举办联合市场活动扩大影响,协调谈判推出行业标准等等。典型地,例如 CNCF 一直致力于定义云上应用开发的标准,容器虚拟化技术的标准。上述 ASF Infra 关注的内容和资源,则大多需要项目开发者自己解决,这些开发者往往主要为一个或若干个供应商工作,他们解决的方式通常也是依赖供应商出力。\n当然,上面的对比只是为了说明区别,并无优劣之分,也不相互对立。ASF 的创始成员 Brian Behlendorf 同时是 Linux Foundation 下 Open Source Security Foundation 的经理,以及 Hyperledger 的执行董事。\nASF 关注开发者的需要,体现出 Apache Community 及其成员对开发者的人文关怀。纪录片中谈到 ASF 治下项目的开发体验时,几乎每个人的眼里都有光。他们谈论着匠人精神,称赞知识分享,与人合作,以及打磨技艺的愉快经历。实际上,要想从 Apache 孵化器中成功毕业,相当部分的 mentor 关注的是围绕开源软件形成的共同体,能否支撑开源软件长久的发展和采用,这其中就包括共同体成员是否能够沉下心来做技术,而不是追求花哨的数字指标和人头凑数。\n讲几个具体的开发者福利。\n每个拥有 @apache.org 邮箱的人,即成为 ASF 治下项目 Committer 或 ASF Member 的成员,JetBrains 会提供免费的全家桶订阅授权码。我从 2019 年成为 Apache Flink 项目的 Committer 以后,已经三年沉浸在 IDEA 和 CLion 的包容下,成为彻底使用 IDE 主力开发的程序员了。\nApache GitHub Organization 下的 GitHub Actions 资源是企业级支持,这部分开销也是由 ASF 作为非营利组织募资和运营得到的资金支付的。基本上,如果你的项目成为 Apache 孵化器项目或顶级项目,那么和 GitHub Actions 集成的 CI 体验是非常顺畅的。Apache SkyWalking 只算主仓库就基于 GitHub Actions 运行了十多个端到端测试作业,Apache Pulsar 也全面基于 GitHub Actions 集成了自己的 CI 作业。\n提到匠人精神,一个隐形的开发者福利,其实是 ASF 的成员尤其是孵化器的 mentor 大多是经验非常丰富的开发者。软件开发不只是写代码,Apache Community 成员之间相互帮助,能够帮你跟上全世界最前沿的开发实践。如何提问题,如何做项目管理,如何发布软件,这些平日里在学校在公司很难有机会接触的知识和实践机会,在 Apache Community 当中只要你积极承担责任,都是触手可得的。\n当然,如何写代码也是开发当中最常交流的话题。我深入接触 Maven 开始于跟 Flink Community 的 Chesnay Schepler 的交流。我对 Java 开发的理解,分布式系统开发的知识,很大程度上也得到了 Apache Flink 和 Apache ZooKeeper 等项目的成员的帮助,尤其是 Till Rohrmann 和 Enrico Olivelli 几位。上面提到的 Ted Dunning 开始攻读博士的时候,我还没出生。但是我在项目当中用到 ZooKeeper 的 multi 功能并提出疑问和改进想法的时候,也跟他有过一系列的讨论。\n谈到技艺就会想起人,这也是 ASF 一直坚持以人为本带来的社群风气。\n我跟姜宁老师在一年前认识,交流 The Apache Way 期间萌生出相互认同。姜宁老师在 Apache 孵化器当中帮助众多项目理解 The Apache Way 并予以实践,德高望重。在今年的 ASF Members 年会当中,姜宁老师也被推举为 ASF Board 的一员。\n我跟吴晟老师在去年认识。他经常会强调开发者尤其是没有强烈公司背景的开发者的视角,多次提到这些开发者是整个开源生态的重要组成部分。他作为 PMC Chair 的 Apache SkyWalking 项目相信“没有下一个版本的计划,只知道会有下一个版本”,这是最佳实践的传播,也是伴随技术的文化理念的传播。SkyWalking 项目出于自己需要,也出于为开源世界添砖加瓦的动机创建的 SkyWalking Eyes 项目,被广泛用在不止于 ASF 治下项目,而是整个开源世界的轻量级的软件协议审计和 License Header 检查上。\n主要贡献在 Apache APISIX 的琚致远同学今年也被推选成为 Apache Members 的一员。他最让我印象深刻的是在 APISIX 社群当中积极讨论社群建设的议题,以及作为 APISIX 发布的 GSoC 项目的 mentor 帮助在校学生接触开源,实践开源,锻炼技艺。巧合的是,他跟我年龄相同,于是我痛失 Youngest Apache Member 的噱头,哈哈。\n或许,参与 Apache Community 就是这样的一种体验。并不是什么复杂的叙事,只是找到志同道合的人做出好的软件。我希望能够为提升整个软件行业付出自己的努力,希望我(参与)制造的软件创造出更大的价值,这里的人看起来大都也有相似的想法,这很好。仅此而已。\n原本还想聊聊 The Apache Way 的具体内容,还有介绍 Apache Incubator 这个保持 Apache Community 理念常青,完成代际传承的重要机制,但是到此为止似乎也很好。Apache Community 的故事和经验很难用一篇文章讲完,这两个话题就留待以后再写吧。\n","excerpt":"大约二十年前我刚开始进入互联网的世界的时候,支撑起整个网络的基础设施,就包括了 Apache 软件基金会(ASF)治下的软件。\nApache Httpd 是开启这个故事的软件,巅峰时期有超过七成的市场 …","ref":"/zh/2022-03-14-the-apache-community/","title":"我眼中的 The Apache Way"},{"body":"SkyWalking Client Rust 0.1.0 is released. Go to downloads page to find release tars.\n","excerpt":"SkyWalking Client Rust 0.1.0 is released. Go to downloads page to find release tars.","ref":"/events/release-apache-skywalking-client-rust-0-1-0/","title":"Release Apache SkyWalking Client Rust 0.1.0"},{"body":"SkyWalking Java Agent 8.9.0 is released. Go to downloads page to find release tars. Changes by Version\n8.9.0  Support Transaction and fix duplicated methods enhancements for jedis-2.x plugin. Add ConsumerWrapper/FunctionWrapper to support CompletableFuture.x.thenAcceptAsync/thenApplyAsync. Build CLI from Docker instead of source codes, add alpine based Docker image. Support set instance properties in json format. Upgrade grpc-java to 1.42.1 and protoc to 3.17.3 to allow using native Mac osx-aarch_64 artifacts. Add doc about system environment variables to configurations.md Avoid ProfileTaskChannelService.addProfilingSnapshot throw IllegalStateException(Queue full) Increase ProfileTaskChannelService.snapshotQueue default size from 50 to 4500 Support 2.8 and 2.9 of pulsar client. Add dubbo 3.x plugin. Fix TracePathMatcher should match pattern \u0026ldquo;**\u0026rdquo; with paths end by \u0026ldquo;/\u0026rdquo; Add support returnedObj expression for apm-customize-enhance-plugin Fix the bug that httpasyncclient-4.x-plugin puts the dirty tracing context in the connection context Compatible with the versions after dubbo-2.7.14 Follow protocol grammar fix GCPhrase -\u0026gt; GCPhase. Support ZGC GC time and count metric collect. (Require 9.0.0 OAP) Support configuration for collecting redis parameters for jedis-2.x and redisson-3.x plugin. Migrate base images to Temurin and add images for ARM. (Plugin Test) Fix compiling issues in many plugin tests due to they didn\u0026rsquo;t lock the Spring version, and Spring 3 is incompatible with 2.x APIs and JDK8 compiling. Support ShardingSphere 5.0.0 Bump up gRPC to 1.44.0, fix relative CVEs.  Documentation  Add a FAQ, Why is -Djava.ext.dirs not supported?.  All issues and pull requests are here\n","excerpt":"SkyWalking Java Agent 8.9.0 is released. Go to downloads page to find release tars. Changes by …","ref":"/events/release-apache-skywalking-java-agent-8-9-0/","title":"Release Apache SkyWalking Java Agent 8.9.0"},{"body":"Apache SkyWalking is an open-source APM for a distributed system, Apache Software Foundation top-level project.\nOn Jan. 28th, we received a License violation report from one of the committers (anonymously). They have a cloud service called Application Performance Monitoring - Distributed Tracing (应用性能监控全链路版). At the Java service monitoring section, it provides this agent download link\n wget https://datarangers.com.cn/apminsight/repo/v2/download/java-agent/apminsight-java-agent_latest.tar.gz\n We downloaded it at 23:15 Jan. 28th UTC+8(Beijing), and archived it at here\nWe have confirmed this is a distribution of SkyWalking Java agent.\nWe listed several pieces of evidence to prove this here, every reader could compare with the official SkyWalking source codes\n The first and the easiest one is agent.config file, which is using the same config keys, and the same config format.  This is the Volcengine\u0026rsquo;s version, and check SkyWalking agent.config In the apmplus-agent.jar, Volcengine\u0026rsquo;s agent core jar, you could easily find several core classes exactly as same as SkyWalking\u0026rsquo;s.  The ComponentsDefine class is unchanged, even with component ID and name. This is Volcengine\u0026rsquo;s version, and check SkyWalking\u0026rsquo;s version\nThe whole code names, package names, and hierarchy structure are all as same as SkyWalking 6.x version.  This is the Volcengine package hierarchy structure, and check the SkyWalking\u0026rsquo;s version\n Volcengine Inc.\u0026rsquo;s team changed all package names, removed the Apache Software Foundation\u0026rsquo;s header, and don\u0026rsquo;t keep Apache Software Foundation and Apache SkyWalking\u0026rsquo;s LICENSE and NOTICE file in their redistribution.\nAlso, we can\u0026rsquo;t find anything on their website to declare they are distributing SkyWalking.\nAll above have proved they are violating the Apache 2.0 License, and don\u0026rsquo;t respect Apache Software Foundation and Apache SkyWalking\u0026rsquo;s IP and Branding.\nWe have contacted their legal team, and wait for their official response.\nResolution On Jan. 30th night, UTC+8, 2022. We received a response from Volcengine\u0026rsquo;s APMPlus team. They admitted their violation behaviors, and made the following changes.\n Volcengine\u0026rsquo;s APMPlus service page was updated on January 30th and stated that the agent is a fork version(re-distribution) of Apache SkyWalking agent. Below is the screenshot of Volcengine\u0026rsquo;s APMPlus product page.  Volcengine\u0026rsquo;s APMPlus agent distributions were also updated and include SkyWalking\u0026rsquo;s License and NOTICE now. Below is the screenshot of Volcengine\u0026rsquo;s APMPlus latest agent, you could download from the product page. We keep a copy of their Jan. 30th 2022 at here.  Volcengine\u0026rsquo;s APMPlus team had restored all license headers of SkyWalking in the agent, and the modifications of the project files are also listed in \u0026ldquo;SkyWalking-NOTICE\u0026rdquo;, which you could download from the product page.  We have updated the status to the PMC mail list. This license violation issue has been resolved for now.\n Appendix Inquiries of committers Q: I hope Volcengine Inc. can give a reason for this license issue, not just an afterthought PR. This will not only let us know where the issue is but also avoid similar problems in the future.\nA(apmplus apmplus@volcengine.com):\nThe developers neglected this repository during submitting compliance assessment. Currently, APMPlus team had introduced advanced tools provided by the company for compliance assessment, and we also strengthened training for our developers. In the future, the compliance assessment process will be further improved from tool assessment and manual assessment. ","excerpt":"Apache SkyWalking is an open-source APM for a distributed system, Apache Software Foundation …","ref":"/blog/2022-01-28-volcengine-violates-aplv2/","title":"[Resolved][License Issue] Volcengine Inc.(火山引擎) violates the Apache 2.0 License when using SkyWalking."},{"body":"Background In the Apache SkyWalking ecosystem, the OAP obtains metrics, traces, logs, and event data through SkyWalking Agent, Envoy, or other data sources. Under the gRPC protocol, it transmits data by communicating with a single server node. Only when the connection is broken, the reconnecting policy would be used based on DNS round-robin mode. When new services are added at runtime or the OAP load is kept high due to increased traffic of observed services, the OAP cluster needs to scale out for increased traffic. The load of the new OAP node would be less due to all existing agents having connected to previous nodes. Even without scaling, the load of OAP nodes would be unbalanced, because the agent would keep the connection due to random policy at the booting stage. In these cases, it would become a challenge to keep up the health status of all nodes, and be able to scale out when needed.\nIn this article, we mainly discuss how to solve this challenge in SkyWalking.\nHow to Load Balance SkyWalking mainly uses the gRPC protocol for data transmission, so this article mainly introduces load balancing in the gRPC protocol.\nProxy Or Client-side Based on the gRPC official Load Balancing blog, there are two approaches to load balancing:\n Client-side: The client perceives multiple back-end services and uses a load-balancing algorithm to select a back-end service for each RPC. Proxy: The client sends the message to the proxy server, and the proxy server load balances the message to the back-end service.  From the perspective of observability system architecture:\n    Pros Cons     Client-side High performance because of the elimination of extra hop Complex client (cluster awareness, load balancing, health check, etc.)Ensure each data source to be connected provides complex client capabilities   Proxy Simple Client Higher latency    We choose Proxy mode for the following reasons:\n Observable data is not very time-sensitive, a little latency caused by transmission is acceptable. A little extra hop is acceptable and there is no impact on the client-side. As an observability platform, we cannot/should not ask clients to change. They make their own tech decisions and may have their own commercial considerations.  Transmission Policy In the proxy mode, we should determine the transmission path between downstream and upstream.\nDifferent data protocols require different processing policies. There are two transmission policies:\n Synchronous: Suitable for protocols that require data exchange in the client, such as SkyWalking Dynamic Configuration Service. This type of protocol provides real-time results. Asynchronous batch: Used when the client doesn’t care about the upstream processing results, but only the transmitted data (e.g., trace report, log report, etc.)  The synchronization policy requires that the proxy send the message to the upstream server when receiving the client message, and synchronously return the response data to the downstream client. Usually, only a few protocols need to use the synchronization policy.\nAs shown below, after the client sends the request to the Proxy, the proxy would send the message to the server synchronously. When the proxy receives the result, it returns to the client.\nThe asynchronous batch policy means that the data is sent to the upstream server in batches asynchronously. This policy is more common because most protocols in SkyWalking are primarily based on data reporting. We think using the queue as a buffer could have a good effect. The asynchronous batch policy is executed according to the following steps:\n The proxy receives the data and wraps it as an Event object. An event is added into the queue. When the cycle time is reached or when the queue elements reach the fixed number, the elements in the queue will parallel consume and send to the OAP.  The advantage of using queues is:\n Separate data receiving and sending to reduce the mutual influence. The interval quantization mechanism can be used to combine events, which helps to speed up sending events to the OAP. Using multi-threaded consumption queue events can make fuller use of network IO.  As shown below, after the proxy receives the message, the proxy would wrap the message as an event and push it to the queue. The message sender would take batch events from the queue and send them to the upstream OAP.\nRouting Routing algorithms are used to route messages to a single upstream server node.\nThe Round-Robin algorithm selects nodes in order from the list of upstream service nodes. The advantage of this algorithm is that the number of times each node is selected is average. When the size of the data is close to the same, each upstream node can handle the same quantity of data content.\nWith the Weight Round-Robin, each upstream server node has a corresponding routing weight ratio. The difference from Round-Robin is that each upstream node has more chances to be routed according to its weight. This algorithm is more suitable to use when the upstream server node machine configuration is not the same.\nThe Fixed algorithm is a hybrid algorithm. It can ensure that the same data is routed to the same upstream server node, and when the upstream server scales out, it still maintains routing to the same node; unless the upstream node does not exist, it will reroute. This algorithm is mainly used in the SkyWalking Meter protocol because this protocol needs to ensure that the metrics of the same service instance are sent to the same OAP node. The Routing steps are as follows:\n Generate a unique identification string based on the data content, as short as possible. The amount of data is controllable. Get the upstream node of identity from LRU Cache, and use it if it exists. According to the identification, generate the corresponding hash value, and find the upstream server node from the upstream list. Save the mapping relationship between the upstream server node and identification to LRU Cache.  The advantage of this algorithm is to bind the data with the upstream server node as much as possible, so the upstream server can better process continuous data. The disadvantage is that it takes up a certain amount of memory space to save the corresponding relationship.\nAs shown below, the image is divided into two parts:\n The left side represents that the same data content always is routed to the same server node. The right side represents the data routing algorithm. Get the number from the data, and use the remainder algorithm to obtain the position.  We choose to use a combination of Round-Robin and Fixed algorithm for routing:\n The Fixed routing algorithm is suitable for specific protocols, mainly used when passing metrics data to the SkyWalking Meter protocol The Round-Robin algorithm is used by default. When the SkyWalking OAP cluster is deployed, the configuration of the nodes needs to be as much the same as possible, so there would be no need to use the Weight Round-Robin algorithm.  How to balance the load balancer itself? Proxy still needs to deal with the load balancing problem from client to itself, especially when deploying a Proxy cluster in a production environment.\nThere are three ways to solve this problem:\n Connection management: Use the max_connection config on the client-side to specify the maximum connection duration of each connection. For more information, please read the proposal. Cluster awareness: The proxy has cluster awareness, and actively disconnects the connection when the load is unbalanced to allow the client to re-pick up the proxy. Resource limit+HPA: Restrict the connection resource situation of each proxy, and no longer accept new connections when the resource limit is reached. And use the HPA mechanism of Kubernetes to dynamically scale out the number of the proxy.      Connection management Cluster awareness Resource Limit+HPA     Pros Simple to use Ensure that the number of connections in each proxy is relatively  Simple to use   Cons Each client needs to ensure that data is not lostThe client is required to accept GOWAY responses May cause a sudden increase in traffic on some nodesEach client needs to ensure that data is not lost  Traffic will not be particularly balanced in each instance    We choose Limit+HPA for these reasons:\n Easy to config and use the proxy and easy to understand based on basic data metrics. No data loss due to broken connection. There is no need for the client to implement any other protocols to prevent data loss, especially when the client is a commercial product. The connection of each node in the proxy cluster does not need to be particularly balanced, as long as the proxy node itself is high-performance.  SkyWalking-Satellite We have implemented this Proxy in the SkyWalking-Satellite project. It’s used between Client and SkyWalking OAP, effectively solving the load balancing problem.\nAfter the system is deployed, the Satellite would accept the traffic from the Client, and the Satellite will perceive all the nodes of the OAP through Kubernetes Label Selector or manual configuration, and load balance the traffic to the upstream OAP node.\nAs shown below, a single client still maintains a connection with a single Satellite, Satellite would establish the connection with each OAP, and load balance message to the OAP node.\nWhen scaling Satellite, we need to deploy the SWCK adapter and configure the HPA in Kubernetes. SWCK is a platform for the SkyWalking users, provisions, upgrades, maintains SkyWalking relevant components, and makes them work natively on Kubernetes.\nAfter deployment is finished, the following steps would be performed:\n Read metrics from OAP: HPA requests the SWCK metrics adapter to dynamically read the metrics in the OAP. Scaling the Satellite: Kubernetes HPA senses that the metrics values are in line with expectations, so the Satellite would be scaling automatically.  As shown below, use the dotted line to divide the two parts. HPA uses SWCK Adapter to read the metrics in the OAP. When the threshold is met, HPA would scale the Satellite deployment.\nExample In this section, we will demonstrate two cases:\n SkyWalking Scaling: After SkyWalking OAP scaling, the traffic would auto load balancing through Satellite. Satellite Scaling: Satellite’s own traffic load balancing.  NOTE: All commands could be accessed through GitHub.\nSkyWalking Scaling We will use the bookinfo application to demonstrate how to integrate Apache SkyWalking 8.9.1 with Apache SkyWalking-Satellite 0.5.0, and observe the service mesh through the Envoy ALS protocol.\nBefore starting, please make sure that you already have a Kubernetes environment.\nInstall Istio Istio provides a very convenient way to configure the Envoy proxy and enable the access log service. The following step:\n Install the istioctl locally to help manage the Istio mesh. Install Istio into the Kubernetes environment with a demo configuration profile, and enable the Envoy ALS. Transmit the ALS message to the satellite. The satellite we will deploy later. Add the label into the default namespace so Istio could automatically inject Envoy sidecar proxies when you deploy your application later.  # install istioctl export ISTIO_VERSION=1.12.0 curl -L https://istio.io/downloadIstio | sh - sudo mv $PWD/istio-$ISTIO_VERSION/bin/istioctl /usr/local/bin/ # install istio istioctl install -y --set profile=demo \\ \t--set meshConfig.enableEnvoyAccessLogService=true \\ \t--set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-system-satellite.skywalking-system:11800 # enbale envoy proxy in default namespace kubectl label namespace default istio-injection=enabled Install SWCK SWCK provides convenience for users to deploy and upgrade SkyWalking related components based on Kubernetes. The automatic scale function of Satellite also mainly relies on SWCK. For more information, you could refer to the official documentation.\n# Install cert-manager kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.3.1/cert-manager.yaml # Deploy SWCK mkdir -p skywalking-swck \u0026amp;\u0026amp; cd skywalking-swck wget https://dlcdn.apache.org/skywalking/swck/0.6.1/skywalking-swck-0.6.1-bin.tgz tar -zxvf skywalking-swck-0.6.1-bin.tgz cd config kubectl apply -f operator-bundle.yaml Deploy Apache SkyWalking And Apache SkyWalking-Satellite We have provided a simple script to deploy the skywalking OAP, UI, and Satellite.\n# Create the skywalking components namespace kubectl create namespace skywalking-system kubectl label namespace skywalking-system swck-injection=enabled # Deploy components kubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/sw-components.yaml Deploy Bookinfo Application export ISTIO_VERSION=1.12.0 kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/platform/kube/bookinfo.yaml kubectl wait --for=condition=Ready pods --all --timeout=1200s kubectl port-forward service/productpage 9080 Next, please open your browser and visit http://localhost:9080. You should be able to see the Bookinfo application. Refresh the webpage several times to generate enough access logs.\nThen, you can see the topology and metrics of the Bookinfo application on SkyWalking WebUI. At this time, you can see that the Satellite is working!\nDeploy Monitor We need to install OpenTelemetry Collector to collect metrics in OAPs and analyze them.\n# Add OTEL collector kubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/otel-collector-oap.yaml kubectl port-forward -n skywalking-system service/skywalking-system-ui 8080:80 Next, please open your browser and visit http://localhost:8080/ and create a new item on the dashboard. The SkyWalking Web UI pictured below shows how the data content is applied.\nScaling OAP Scaling the number of OAPs by deployment.\nkubectl scale --replicas=3 -n skywalking-system deployment/skywalking-system-oap Done! After a period of time, you will see that the number of OAPs becomes 3, and the ALS traffic is balanced to each OAP.\nSatellite Scaling After we have completed the SkyWalking Scaling, we would carry out the Satellite Scaling demo.\nDeploy SWCK HPA SWCK provides an adapter to implement the Kubernetes external metrics to adapt the HPA through reading the metrics in SkyWalking OAP. We expose the metrics service in Satellite to OAP and configure HPA Resource to auto-scaling the Satellite.\nInstall the SWCK adapter into the Kubernetes environment:\nkubectl apply -f skywalking-swck/config/adapter-bundle.yaml Create the HPA resource, and limit each Satellite to handle a maximum of 10 connections:\nkubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/satellite-hpa.yaml Then, you could see we have 9 connections in one satellite. One envoy proxy may establish multiple connections to the satellite.\n$ kubectl get HorizontalPodAutoscaler -n skywalking-system NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE hpa-demo Deployment/skywalking-system-satellite 9/10 1 3 1 5m18s Scaling Application The scaling application could establish more connections to the satellite, to verify whether the HPA is in effect.\nkubectl scale --replicas=3 deployment/productpage-v1 deployment/details-v1 Done! By default, Satellite will deploy a single instance and a single instance will only accept 11 connections. HPA resources limit one Satellite to handle 10 connections and use a stabilization window to make Satellite stable scaling up. In this case, we deploy the Bookinfo application in 10+ instances after scaling, which means that 10+ connections will be established to the Satellite.\nSo after HPA resources are running, the Satellite would be automatically scaled up to 2 instances. You can learn about the calculation algorithm of replicas through the official documentation. Run the following command to view the running status:\n$ kubectl get HorizontalPodAutoscaler -n skywalking-system --watch NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 1 3m31s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 1 4m20s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 2 4m38s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 2 5m8s hpa-demo Deployment/skywalking-system-satellite 6/10 1 3 2 5m23s By observing the “number of connections” metric, we would be able to see that when the number of connections of each gRPC exceeds 10 connections, then the satellite automatically scales through the HPA rule. As a result, the connection number is down to normal status (in this example, less than 10)\nswctl metrics linear --name satellite_service_grpc_connect_count --service-name satellite::satellite-service ","excerpt":"Background In the Apache SkyWalking ecosystem, the OAP obtains metrics, traces, logs, and event data …","ref":"/blog/2022-01-24-scaling-with-apache-skywalking/","title":"Scaling with Apache SkyWalking"},{"body":"SkyWalking Cloud on Kubernetes 0.6.1 is released. Go to downloads page to find release tars.\n Bugs  Fix could not deploy metrics adapter to GKE    ","excerpt":"SkyWalking Cloud on Kubernetes 0.6.1 is released. Go to downloads page to find release tars.\n Bugs …","ref":"/events/release-apache-skywalking-cloud-on-kubernetes-0-6-1/","title":"Release Apache SkyWalking Cloud on Kubernetes 0.6.1"},{"body":"随着业务与用户量的持续发展,系统的瓶颈也逐渐出现。尤其在一些节假日、突发的营销活动中,访问量激增可能会导致系统性能下降,甚至造成系统瘫痪。 全链路压测可以很好的帮助我们预先演练高峰流量,从而提前模拟出系统的执行情况,帮助我们预估系统容量。当流量真正来临时,也可以更从容面对。 Apache SkyWalking 联合 Apache APISIX 及 Apache ShardingSphere,三大顶级开源社区通力合作,共同打造生产级可用的全链路压测解决方案,CyborgFlow。\n介绍 CyborgFlow 是一款面向生产级可用的全链路压测解决方案。总共由三个组件组成,如下图所示。\n Flow Gateway: 压测流量网关。当流量到达该组件时,则会将请求认定为压测流量,并将压测流量标识传递至上游服务。 Database Shadow: 数据库中间件。当数据库中间件感知到当前流量为压测流量时,则会将数据库操作路由至影子表中进行操作。 Agent/Dashboard: 分布式监控系统。与业务系统紧密结合,当感知到压测请求后,自动将其标识传递至上游,无需业务代码改造。并且利用分析能力,构建Dashboard来便于查看流量情况。  以此,便覆盖了单个请求的完整生命周期,在网关层构建压测标识,到业务系统透传标识,最终将请求与影子表交互。同时整个流程拥有完整的监控分析。\n原理 依托于三大社区合作,让这一切变得简单易用。下图为全链路压测系统的运行原理,橙色和蓝色分别代表正常流量和压测流量。\nFlow Gateway Flow Gateway 作为压测流量网关,主要负责接收流量,并传递压测流量表示至上游。\n 添加 skywalking插件 构建链路入口。 依据 proxy-rewrite插件 将压测流量标识注入到上游的请求头中。  Agent/Dashboard 该组件中则分为两部分内容说明。\nAgent Agent与业务程序拥有相同生命周期,负责压测流量标识在各个业务系统之间传递,并与 Database Shadow 交互。\n SkyWalking Agent通过读取从Flow Gateway传递的压测流量标识,利用 透传协议 将该标识在应用之间传递。 当准备进行数据库调用时,则通过判断是否包含压测流量标识来决定是否SQL调用时追加压测流量标识(/* cyborg-flow: true */)。 当检测到当前请求包含压测流量标识后,将该数据与Trace绑定,用于Dashboard数据分析。  Dashboard Dashboard 用于压测过程进行中的监控数据分析,并最终以图表的方式进行展示。\n 接收来自Agent中上报的Trace数据,并依据OAL中的Tag过滤器(.filter(tags contain \u0026quot;cyborg-flow:true\u0026quot;))来生成压测与非压测的指标数据。 利用指标数据便可以在Dashboard中创建图表进行观察。  Database Shadow Database Shadow 作为 Proxy 在业务程序与数据库中间完成数据交互,当检测到压测流量时则会将SQL传递至影子表中处理。\n 检测下游传递的数据库语句中是否包含压测流量标识(/* cyborg-flow: true */),存在时则将SQL交给由用户配置的影子表中处理。  快速上手 下面将带你快速将Cyborg Flow集成至你的项目中。相关组件的下载请至 Github Release 中下载,目前已发布 0.1.0 版本。\n部署 Database Shadow  解压缩cyborg-database-shadow.tar.gz。 将 conf/config-shadow.yaml 文件中的业务数据库与影子数据库配置为自身业务中的配置。 启动 Database Shadow服务,启动脚本位于bin/start.sh中。  如需了解更详细的部署参数配置,请参考 官方文档 。\n部署 Cyborg Dashboard  解压缩cyborg-dashboard.tar.gz。 启动后端与UI界面服务,用于链路数据解析与界面展示,启动脚本位于bin/startup.sh中。 接下来就可以通过打开浏览器并访问http://localhost:8080/,此页面为Cyborg Dashboard界面,由于目前尚未部署任何业务程序,所以暂无任何数据。  如需了解更详细的部署参数配置,请参考 后端服务 与 UI界面服务 的安装文档。\n部署 Cyborg Agent 到业务程序中  解压缩cyborg-agent.tar.gz. 修改config/agent.config中的collector.backend_service为 Cyborg Dashboard 中后端地址(默认为11800端口),用于将监控数据上报至 Cyborg Dashboard 。 修改业务程序中与数据库的链接,将其更改为 Database Shadow 中的配置。默认访问端口为3307,用户名密码均为root。 当程序启动时,增加该参数到启动命令中:-jar path/to/cyborg-agent/skywalking-agent.jar。  如需了解更详细的部署参数配置,请参考 Agent安装文档 。\n部署 Flow Gateway  参考 Flow Gateway 快速开始 进行下载 Apache APISIX 并配置相关插件。 基于 APISIX 创建路由文档 进行路由创建。  完成! 最后,通过Flow Gateway访问业务系统资源,便完成了一次压测流量请求。\n 压测流量最终访问至影子表进行数据操作。 如下图所示,通过观察 Cyborg Dashboard 便可以得知压测与非压测请求的执行情况。  总结 在本文中,我们详细介绍了Cyborg Flow中的各个组件的功能、原理,最终搭配快速上手来快速将该系统与自己的业务系统结合。 如果在使用中有任何问题,欢迎来共同讨论。\n","excerpt":"随着业务与用户量的持续发展,系统的瓶颈也逐渐出现。尤其在一些节假日、突发的营销活动中,访问量激增可能会导致系统性能下降,甚至造成系统瘫痪。 全链路压测可以很好的帮助我们预先演练高峰流量,从而提前模拟出 …","ref":"/zh/2022-01-18-cyborg-flow/","title":"Cyborg Flow X SkyWalking: 生产环境全链路压测"},{"body":"SkyWalking Cloud on Kubernetes 0.6.0 is released. Go to downloads page to find release tars.\n Features  Add the Satellite CRD, webhooks and controller   Bugs  Update release images to set numeric user id Fix the satellite config not support number error Use env JAVA_TOOL_OPTIONS to replace AGENT_OPTS   Chores  Add stabilization windows feature in satellite HPA documentation    ","excerpt":"SkyWalking Cloud on Kubernetes 0.6.0 is released. Go to downloads page to find release tars. …","ref":"/events/release-apache-skywalking-cloud-on-kubernetes-0-6-0/","title":"Release Apache SkyWalking Cloud on Kubernetes 0.6.0"},{"body":"SkyWalking Kong Agent 0.2.0 is released. Go to downloads page to find release tars.\n Establish the SkyWalking Kong Agent.  ","excerpt":"SkyWalking Kong Agent 0.2.0 is released. Go to downloads page to find release tars.\n Establish the …","ref":"/events/release-apache-skywalking-kong-0-2-0/","title":"Release Apache SkyWalking Kong 0.2.0"},{"body":"SkyWalking Satellite 0.5.0 is released. Go to downloads page to find release tars.\nFeatures  Make the gRPC client client_pem_path and client_key_path as an optional config. Remove prometheus-server sharing server plugin. Support let the telemetry metrics export to prometheus or metricsService. Add the resource limit when gRPC server accept connection.  Bug Fixes  Fix the gRPC server enable TLS failure. Fix the native meter protocol message load balance bug.  Issues and PR  All issues are here All and pull requests are here  ","excerpt":"SkyWalking Satellite 0.5.0 is released. Go to downloads page to find release tars.\nFeatures  Make …","ref":"/events/release-apache-skwaylking-satellite-0-5-0/","title":"Release Apache SkyWalking Satellite 0.5.0"},{"body":"SkyWalking LUA Nginx 0.6.0 is released. Go to downloads page to find release tars.\n fix: skywalking_tracer:finish() will not be called in some case such as upstream timeout.  ","excerpt":"SkyWalking LUA Nginx 0.6.0 is released. Go to downloads page to find release tars.\n fix: …","ref":"/events/release-apache-skywalking-lua-nginx-0.6.0/","title":"Release Apache SkyWalking LUA Nginx 0.6.0"},{"body":"","excerpt":"","ref":"/tags/chaos-engineering/","title":"Chaos Engineering"},{"body":"","excerpt":"","ref":"/tags/chaos-mesh/","title":"Chaos Mesh"},{"body":"Chaos Mesh is an open-source cloud-native chaos engineering platform. You can use Chaos Mesh to conveniently inject failures and simulate abnormalities that might occur in reality, so you can identify potential problems in your system. Chaos Mesh also offers a Chaos Dashboard which allows you to monitor the status of a chaos experiment. However, this dashboard cannot let you observe how the failures in the experiment impact the service performance of applications. This hinders us from further testing our systems and finding potential problems.\n Apache SkyWalking is an open-source application performance monitor (APM), specially designed to monitor, track, and diagnose cloud native, container-based distributed systems. It collects events that occur and then displays them on its dashboard, allowing you to observe directly the type and number of events that have occurred in your system and how different events impact the service performance.\nWhen you use SkyWalking and Chaos Mesh together during chaos experiments, you can observe how different failures impact the service performance.\nThis tutorial will show you how to configure SkyWalking and Chaos Mesh. You’ll also learn how to leverage the two systems to monitor events and observe in real time how chaos experiments impact applications’ service performance.\nPreparation Before you start to use SkyWalking and Chaos Mesh, you have to:\n Set up a SkyWalking cluster according to the SkyWalking configuration guide. Deploy Chao Mesh using Helm. Install JMeter or other Java testing tools (to increase service loads). Configure SkyWalking and Chaos Mesh according to this guide if you just want to run a demo.  Now, you are fully prepared, and we can cut to the chase.\nStep 1: Access the SkyWalking cluster After you install the SkyWalking cluster, you can access its user interface (UI). However, no service is running at this point, so before you start monitoring, you have to add one and set the agents.\nIn this tutorial, we take Spring Boot, a lightweight microservice framework, as an example to build a simplified demo environment.\n Create a SkyWalking demo in Spring Boot by referring to this document. Execute the command kubectl apply -f demo-deployment.yaml -n skywalking to deploy the demo.  After you finish deployment, you can observe the real-time monitoring results at the SkyWalking UI.\nNote: Spring Boot and SkyWalking have the same default port number: 8080. Be careful when you configure the port forwarding; otherise, you may have port conflicts. For example, you can set Spring Boot’s port to 8079 by using a command like kubectl port-forward svc/spring-boot-skywalking-demo 8079:8080 -n skywalking to avoid conflicts.\nStep 2: Deploy SkyWalking Kubernetes Event Exporter SkyWalking Kubernetes Event Exporter is able to watch, filter, and send Kubernetes events into the SkyWalking backend. SkyWalking then associates the events with the system metrics and displays an overview about when and how the metrics are affected by the events.\nIf you want to deploy SkyWalking Kubernetes Event Explorer with one line of commands, refer to this document to create configuration files in YAML format and then customize the parameters in the filters and exporters. Now, you can use the command kubectl apply to deploy SkyWalking Kubernetes Event Explorer.\nStep 3: Use JMeter to increase service loads To better observe the change in service performance, you need to increase the service loads on Spring Boot. In this tutorial, we use JMeter, a widely adopted Java testing tool, to increase the service loads.\nPerform a stress test on localhost:8079 using JMeter and add five threads to continuously increase the service loads.\nOpen the SkyWalking Dashboard. You can see that the access rate is 100%, and that the service loads reach about 5,300 calls per minute (CPM).\nStep 4: Inject failures via Chaos Mesh and observe results After you finish the three steps above, you can use the Chaos Dashboard to simulate stress scenarios and observe the change in service performance during chaos experiments.\nThe following sections describe how service performance varies under the stress of three chaos conditions:\n  CPU load: 10%; memory load: 128 MB\nThe first chaos experiment simulates low CPU usage. To display when a chaos experiment starts and ends, click the switching button on the right side of the dashboard. To learn whether the experiment is Applied to the system or Recovered from the system, move your cursor onto the short, green line.\nDuring the time period between the two short, green lines, the service load decreases to 4,929 CPM, but returns to normal after the chaos experiment ends.\n  CPU load: 50%; memory load: 128 MB\nWhen the application’s CPU load increases to 50%, the service load decreases to 4,307 CPM.\n  CPU load: 100%; memory load: 128 MB\nWhen the CPU usage is at 100%, the service load decreases to only 40% of what it would be if no chaos experiments were taking place.\nBecause the process scheduling under the Linux system does not allow a process to occupy the CPU all the time, the deployed Spring Boot Demo can still handle 40% of the access requests even in the extreme case of a full CPU load.\n  Summary By combining SkyWalking and Chaos Mesh, you can clearly observe when and to what extent chaos experiments affect application service performance. This combination of tools lets you observe the service performance in various extreme conditions, thus boosting your confidence in your services.\nChaos Mesh has grown a lot in 2021 thanks to the unremitting efforts of all PingCAP engineers and community contributors. In order to continue to upgrade our support for our wide variety of users and learn more about users’ experience in Chaos Engineering, we’d like to invite you to takethis survey and give us your valuable feedback.\nIf you want to know more about Chaos Mesh, you’re welcome to join the Chaos Mesh community on GitHub or our Slack discussions (#project-chaos-mesh). If you find any bugs or missing features when using Chaos Mesh, you can submit your pull requests or issues to our GitHub repository.\n","excerpt":"Chaos Mesh is an open-source cloud-native chaos engineering platform. You can use Chaos Mesh to …","ref":"/blog/2021-12-21-better-observability-for-chaos-engineering/","title":"Chaos Mesh + SkyWalking: Better Observability for Chaos Engineering"},{"body":"","excerpt":"","ref":"/tags/tutorial/","title":"Tutorial"},{"body":"SkyWalking Cloud on Kubernetes 0.5.0 is released. Go to downloads page to find release tars.\n Features  Add E2E test cases to verify OAPServer, UI, Java agent and Storage components.   Bugs  Fix operator role patch issues Fix invalid CSR signername Fix bug in the configmap controller   Chores  Bump up KubeBuilder to V3 Bump up metric adapter server to v1.21.0 Split mono-project to two independent projects    ","excerpt":"SkyWalking Cloud on Kubernetes 0.5.0 is released. Go to downloads page to find release tars. …","ref":"/events/release-apache-skywalking-cloud-on-kubernetes-0-5-0/","title":"Release Apache SkyWalking Cloud on Kubernetes 0.5.0"},{"body":"We Can integrate Skywalking to Java Application by Java Agent TEC., In typical application, the system runs Java Web applications at the backend of the load balancer, and the most commonly used load balancer is nginx. What should we do if we want to bring it under surveillance? Fortunately, skywalking has provided Nginx agent。 During the integration process, it is found that the examples on the official website only support openresty. For openresty, common modules such as luajit and Lua nginx module have been integrated. Adding skywalking related configurations according to the examples on the official website can take effect. However, when configured for nginx startup, many errors will be reported. We may not want to change a load balancer (nginx to openresty) in order to use skywalking. Therefore, we must solve the integration problem between skywalking and nginx.\nNote: openresty is a high-performance web development platform based on nginx + Lua, which solves the short board that is not easy to program in nginx.\nBased on Skywalking-8.7.0 and Nginx-1.20.1\nUpgrade of nginx: The agent plug-in of nginx is written based on Lua, so nginx needs to add support for Lua, Lua nginx module It just provides this function. The Lua nginx module depends on luajit Therefore, first we need to install luajit. In the environment, it is best to choose version 2.1.\nFor nginx, you need to compile the necessary modules yourself. It depends on the following two modules:\nlua-nginx-module The version is lua-nginx-module-0.10.21rc1\nngx_devel_kit The version using ngx_devel_kit-0.3.1\nCompile nginx parameters\nconfigure arguments: --add-module=/path/to/ngx_devel_kit-0.3.1 --add-module=/path/to/lua-nginx-module-0.10.21rc1 --with-ld-opt=-Wl,-rpath,/usr/local/LuaJIT/lib The following is for skywalking-nginx-lua-0.3.0 and 0.3.0+ are described separately.\nskywalking-nginx-lua-0.3.0 After testing, skywalking-nginx-lua-0.3.0 requires the following Lua related modules\nlua-resty-core https://github.com/openresty/lua-resty-core lua-resty-lrucache https://github.com/openresty/lua-resty-lrucache lua-cjson https://github.com/openresty/lua-cjson The dependent Lua modules are as follows:\nlua_package_path \u0026#34;/path/to/lua-resty-core/lua-resty-core-master/lib/?.lua;/path/to/lua-resty-lrucache-0.11/lib/?.lua;/path/to/skywalking-nginx-lua-0.3.0/lib/?.lua;;\u0026#34;; In the process of make \u0026amp; \u0026amp; make install, Lua cjson needs to pay attention to:\nModify a path in makefile\nLUA_INCLUDE_DIR ?= /usr/local/LuaJIT/include/luajit-2.0\nReference:https://blog.csdn.net/ymeputer/article/details/50146143 \nskywalking-nginx-lua-0.3.0+ For skywalking-nginx-lua-0.3.0+, tablepool support needs to be added, but it seems that cjson is not required\nlua-resty-core https://github.com/openresty/lua-resty-core lua-resty-lrucache https://github.com/openresty/lua-resty-lrucache lua-tablepool https://github.com/openresty/lua-tablepool lua_ package_ path \u0026#34;/path/to/lua-resty-core/lua-resty-core-master/lib/?.lua;/path/to/lua-resty-lrucache-0.11/lib/?.lua;/path/to/lua-tablepool-master/lib/?.lua;/path/to/skywalking-nginx-lua-master/lib/?.lua;;\u0026#34;; tablepool introduces two APIs according to its official documents table new and table. Clear requires luajit2.1, there is a paragraph in the skywalking-nginx-lua document that says you can use \u0026lsquo;require (\u0026ldquo;skywalking. Util\u0026rdquo;) disable_ Tablepool() ` disable tablepool\nWhen you start nginx, you will be prompted to install openresty\u0026rsquo;s own [luajit version]( https://github.com/openresty/luajit2 )\ndetected a LuaJIT version which is not OpenResty\u0026#39;s; many optimizations will be disabled and performance will be compromised (see https://github.com/openresty/luajit2 for OpenResty\u0026#39;s LuaJIT or, even better, consider using the OpenResty releases from https://openresty.org/en/download.html ) here is successful configuration:\nhttp { lua_package_path \u0026#34;/path/to/lua-resty-core/lua-resty-core-master/lib/?.lua;/path/to/lua-resty-lrucache-0.11/lib/?.lua;/path/to/lua-tablepool-master/lib/?.lua;/path/to/skywalking-nginx-lua-master/lib/?.lua;;\u0026#34;; # Buffer represents the register inform and the queue of the finished segment lua_shared_dict tracing_buffer 100m; # Init is the timer setter and keeper # Setup an infinite loop timer to do register and trace report. init_worker_by_lua_block { local metadata_buffer = ngx.shared.tracing_buffer -- Set service name metadata_buffer:set(\u0026#39;serviceName\u0026#39;, \u0026#39;User Service Name\u0026#39;) -- Instance means the number of Nginx deployment, does not mean the worker instances metadata_buffer:set(\u0026#39;serviceInstanceName\u0026#39;, \u0026#39;User Service Instance Name\u0026#39;) -- type \u0026#39;boolean\u0026#39;, mark the entrySpan include host/domain metadata_buffer:set(\u0026#39;includeHostInEntrySpan\u0026#39;, false) -- set random seed require(\u0026#34;skywalking.util\u0026#34;).set_randomseed() require(\u0026#34;skywalking.client\u0026#34;):startBackendTimer(\u0026#34;http://127.0.0.1:12800\u0026#34;) -- If there is a bug of this `tablepool` implementation, we can -- disable it in this way -- require(\u0026#34;skywalking.util\u0026#34;).disable_tablepool() skywalking_tracer = require(\u0026#34;skywalking.tracer\u0026#34;) } server { listen 8090; location /ingress { default_type text/html; rewrite_by_lua_block { ------------------------------------------------------ -- NOTICE, this should be changed manually -- This variable represents the upstream logic address -- Please set them as service logic name or DNS name -- -- Currently, we can not have the upstream real network address ------------------------------------------------------ skywalking_tracer:start(\u0026#34;upstream service\u0026#34;) -- If you want correlation custom data to the downstream service -- skywalking_tracer:start(\u0026#34;upstream service\u0026#34;, {custom = \u0026#34;custom_value\u0026#34;}) } -- Target upstream service proxy_pass http://127.0.0.1:8080/backend; body_filter_by_lua_block { if ngx.arg[2] then skywalking_tracer:finish() end } log_by_lua_block { skywalking_tracer:prepareForReport() } } } } Original post:https://www.cnblogs.com/kebibuluan/p/14440228.html\n","excerpt":"We Can integrate Skywalking to Java Application by Java Agent TEC., In typical application, the …","ref":"/blog/2021-12-13-skywalking-nginx-agent-integration/","title":"How to integrate skywalking-nginx-lua to Nginx?"},{"body":"SkyWalking 8.9.1 is released. Go to downloads page to find release tars.\nChanges by Version\nProject  Upgrade log4j2 to 2.15.0 for CVE-2021-44228. This CVE only effects on JDK versions below 6u211, 7u201, 8u191 and 11.0.1 according to the post. Notice, using JVM option -Dlog4j2.formatMsgNoLookups=true also avoids CVE if your JRE opened JNDI in default.  ","excerpt":"SkyWalking 8.9.1 is released. Go to downloads page to find release tars.\nChanges by Version\nProject …","ref":"/events/release-apache-skywalking-apm-8-9-1/","title":"Release Apache SkyWalking APM 8.9.1"},{"body":"In the field of observability, the three main directions of data collection and analysis, Metrics, Logger and Tracing, are usually used to achieve insight into the operational status of applications.\nApache APISIX has integrated Apache SkyWaling Tracing capabilities as early as version 1.4, with features such as error logging and access log collection added in subsequent versions. Now with Apache SkyWalking\u0026rsquo;s support for Metrics, it enables Apache APISIX to implement a one-stop observable solution in integrated mode, covering both logging, metrics and call tracing.\nFeature Development Background Those of you who are familiar with Apache APISIX should know that Apache APISIX produces two types of logs during operation, namely the access log and the error log.\nAccess logs record detailed information about each request and are logs generated within the scope of the request, so they can be directly associated with Tracing. Error logs, on the other hand, are Apache APISIX runtime output log messages, which are application-wide logs, but cannot be 100% associated with requests.\nAt present, Apache APISIX provides very rich log processing plug-ins, including TCP/HTTP/Kafka and other collection and reporting plug-ins, but they are weakly associated with Tracing. Take Apache SkyWalking as an example. We extract the SkyWalking Tracing Conetxt Header from the log records of Apache APISIX and export it to the file system, and then use the log processing framework (fluentbit) to convert the logs into a log format acceptable to SkyWalking. The Tracing Context is then parsed and extracted to obtain the Tracing ID to establish a connection with the Trace.\nObviously, the above way of handling the process is tedious and complicated, and requires additional conversion of log formats. For this reason, in PR#5500 we have implemented the Apache SkyWalking access log into the Apache APISIX plug-in ecosystem to make it easier for users to collect and process logs using Apache SkyWalking in Apache APISIX.\nIntroduction of the New Plugins SkyWalking Logger Pulgin The SkyWalking Logger plugin parses the SkyWalking Tracing Context Header and prints the relevant Tracing Context information to the log, thus enabling the log to be associated with the call chain.\nBy using this plug-in, Apache APISIX can get the SkyWalking Tracing Context and associate it with Tracing even if the SkyWalking Tracing plug-in is not turned on, if Apache SkyWalking is already integrated downstream.\nThe above Content is the log content, where the Apache APISIX metadata configuration is used to collect request-related information. You can later modify the Log Format to customize the log content by Plugin Metadata, please refer to the official documentation.\nHow to Use When using this plugin, since the SkyWalking plugin is \u0026ldquo;not enabled\u0026rdquo; by default, you need to manually modify the plugins section in the conf/default-apisix.yaml file to enable the plugin.\nplugins:...- error-log-logger...Then you can use the SkyWalking Tracing plug-in to get the tracing data directly, so you can verify that the Logging plug-in-related features are enabled and working properly.\nStep 1: Create a route Next, create a route and bind the SkyWalking Tracing plugin and the SkyWalking Logging plugin. More details of the plugin configuration can be found in the official Apache APISIX documentation.\ncurl -X PUT \u0026#39;http://192.168.0.108:9080/apisix/admin/routes/1001\u0026#39; \\ -H \u0026#39;X-API-KEY: edd1c9f034335f136f87ad84b625c8f1\u0026#39; \\ -H \u0026#39;Content-Type: application/json\u0026#39; \\ -d \u0026#39;{ \u0026#34;uri\u0026#34;: \u0026#34;/get\u0026#34;, \u0026#34;plugins\u0026#34;: { \u0026#34;skywalking\u0026#34;: { \u0026#34;sample_ratio\u0026#34;: 1 }, \u0026#34;skywalking-logger\u0026#34;: { \u0026#34;endpoint_addr\u0026#34;: \u0026#34;http://127.0.0.1:12800\u0026#34; } }, \u0026#34;upstream\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;roundrobin\u0026#34;, \u0026#34;nodes\u0026#34;: { \u0026#34;httpbin.org:80\u0026#34;: 1 } } }\u0026#39; Step 2: Log Processing On the Apache SkyWalking side, you can use LAL (Logger Analysis Language) scripts for log processing, such as Tag extraction, SkyWalking metadata correction, and so on.\nThe main purpose of Tag extraction here is to facilitate subsequent retrieval and to add dependencies to the Metrics statistics. The following code can be used to configure the SkyWalking LAL script to complete the Tag extraction. For more information on how to use the SkyWalking LAL script, please refer to the official Apache SkyWalking documentation.\n# The default LAL script to save all logs, behaving like the versions before 8.5.0.rules:- name:defaultdsl:|filter { json { abortOnFailure false } extractor { tag routeId: parsed.route_id tag upstream: parsed.upstream tag clientIp: parsed.client_ip tag latency: parsed.latency } sink { } }After configuring the above LAL script in SkyWalking OAP Server the following log will be displayed.\nDetails of the expanded log are as follows.\nAs you can see from the above, displaying routeId, upstream and clientIp as key-value pairs is much easier than searching directly in the log body. This is because the Tag format not only supports log display format and search, but also generates information such as Metrics using MAL statistics.\nSkyWalking Error Logger Plugin The error-log-logger plug-in now supports the SkyWalking log format, and you can now use the http-error-log plug-in to quickly connect Apache APISIX error logs to Apache SkyWalking. Currently, error logs do not have access to SkyWalking Tracing Context information, and therefore cannot be directly associated with SkyWalking Tracing.\nThe main reason for the error log to be integrated into SkyWalking is to centralize the Apache APISIX log data and to make it easier to view all observable data within SkyWalking.\nHow to Use Since the error-log-logger plugin is \u0026ldquo;not enabled\u0026rdquo; by default, you still need to enable the plugin in the way mentioned above.\nplugins:...- error-log-logger...Step 1: Bind the route After enabling, you need to bind the plugin to routes or global rules. Here we take \u0026ldquo;bind routes\u0026rdquo; as an example.\ncurl -X PUT \u0026#39;http://192.168.0.108:9080/apisix/admin/plugin_metadata/error-log-logger\u0026#39; \\ -H \u0026#39;X-API-KEY: edd1c9f034335f136f87ad84b625c8f1\u0026#39; \\ -H \u0026#39;Content-Type: application/json\u0026#39; \\ -d \u0026#39;{ \u0026#34;inactive_timeout\u0026#34;: 10, \u0026#34;level\u0026#34;: \u0026#34;ERROR\u0026#34;, \u0026#34;skywalking\u0026#34;: { \u0026#34;endpoint_addr\u0026#34;: \u0026#34;http://127.0.0.1:12800/v3/logs\u0026#34; } }\u0026#39;  Note that the endpoint_addr is the SkyWalking OAP Server address and needs to have the URI (i.e. /v3/logs).\n Step 2: LAL Processing In much the same way as the Access Log processing, the logs are also processed by LAL when they reach SkyWalking OAP Server. Therefore, we can still use the SkyWalking LAL script to analyze and process the log messages.\nIt is important to note that the Error Log message body is in text format. If you are extracting tags, you will need to use regular expressions to do this. Unlike Access Log, which handles the message body in a slightly different way, Acces Log uses JSON format and can directly reference the fields of the JSON object using JSON parsing, but the rest of the process is largely the same.\nTags can also be used to optimize the display and retrieval for subsequent metrics calculations using SkyWalking MAL.\nrules: - name: apisix-errlog dsl: | filter { text { regexp \u0026#34;(?\u0026lt;datetime\u0026gt;\\\\d{4}/\\\\d{2}/\\\\d{2} \\\\d{2}:\\\\d{2}:\\\\d{2}) \\\\[(?\u0026lt;level\u0026gt;\\\\w+)\\\\] \\\\d+\\\\#\\\\d+:( \\\\*\\\\d+ \\\\[(?\u0026lt;module\u0026gt;\\\\w+)\\\\] (?\u0026lt;position\u0026gt;.*\\\\.lua:\\\\d+): (?\u0026lt;function\u0026gt;\\\\w+\\\\(\\\\)):)* (?\u0026lt;msg\u0026gt;.+)\u0026#34; } extractor { tag level: parsed.level if (parsed?.module) { tag module: parsed.module tag position: parsed.position tag function: parsed.function } } sink { } } After the LAL script used by SkyWalking OAP Server, some of the Tags will be extracted from the logs, as shown below.\nSummary This article introduces two logging plug-ins for Apache APISIX that integrate with SkyWalking to provide a more convenient operation and environment for logging in Apache APISIX afterwards.\nWe hope that through this article, you will have a fuller understanding of the new features and be able to use Apache APISIX for centralized management of observable data more conveniently in the future.\n","excerpt":"In the field of observability, the three main directions of data collection and analysis, Metrics, …","ref":"/blog/2021-12-08-apisix-integrate-skywalking-plugin/apisix-integrate-skywalking-plugin/","title":"Apache APISIX Integrates with SkyWalking to Create a Full Range of Log Processing"},{"body":"","excerpt":"","ref":"/tags/apisix/","title":"APISIX"},{"body":"","excerpt":"","ref":"/tags/iotdb/","title":"IoTDB"},{"body":"","excerpt":"","ref":"/tags/storage/","title":"Storage"},{"body":"This document is one of the outcomes of Apache IoTDB - Apache SkyWalking Adapter in Summer 2021 of Open Source Promotion Plan. The design and development work is under the guidance of @jixuan1989 from IoTDB and @wu-sheng from SkyWalking. Thanks for their guidance and the help from community.\nStart with SkyWalking Showcase Before using SkyWalking Showcase to quick start with IoTDB, please ensure your have make installed and Docker daemon running.\nPlease run the command below.\ngit clone https://github.com/LIU-WEI-git/skywalking-showcase.git cd skywalking-showcase make deploy.docker FEATURE_FLAGS=single-node.iotdb,agent The former variable single-node.iotdb will deploy only one single node of SkyWalking OAP-v8.9.0, and SkyWalking RocketBot UI-v8.9.0, IoTDB-v0.12.3 as storage. The latter variable agent will deploy micro-services with SkyWalking agent enabled, which include agents for Java, NodeJS server, browser, Python.\nThese shell command maybe take a long while. After pulling and running docker image, please visit http://localhost:9999/. Then you will see the SkyWalking UI and data from OAP backend.\nIf you want to use more functions of SkyWalking Showcase, please visit its official document and clone official repository.\nStart Manually If you want to download and run IoTDB and SkyWalking manually, here is the guidance.\nInstall and Run IoTDB Apache IoTDB (Database for Internet of Things) is an IoT native database with high performance for data management and analysis, deployable on the edge and the cloud. It is a time-series database storage option for SkyWalking now. Please ensure your IoTDB server version \u0026gt;= 0.12.3 and a single node version is sufficient. For more installation details, please see official document: IoTDB Quick Start and IoTDB Download Page. You could download it from Docker Hub as well.\nThere is some connection tools for IoTDB\n Command Line Interface(CLI)\nIf iotdb-cli connects successfully, you will see   _____ _________ ______ ______ |_ _| | _ _ ||_ _ `.|_ _ \\ | | .--.|_/ | | \\_| | | `. \\ | |_) | | | / .'`\\ \\ | | | | | | | __'. _| |_| \\__. | _| |_ _| |_.' /_| |__) | |_____|'.__.' |_____| |______.'|_______/ version x.x.x IoTDB\u0026gt; login successfully IoTDB\u0026gt;  IoTDB-Grafana\nIoTDB-Grafana is a connector which we developed to show time series data in IoTDB by reading data from IoTDB and sends to Grafana.  Zeppelin-IoTDB\nYou could enable Zeppelin to operate IoTDB via SQL.   For more ecosystem integration, please visit official documents.\nWe will use iotdb-cli in the next examples.\nRun SkyWalking OAP Server There are some SkyWalking official documents which will help you start. Please ensure your SkyWalking version \u0026gt;= 8.9.0. We recommend you download SkyWalking OAP distributions from its official download page or pull docker images.\n SkyWalking Download Page SkyWalking Backend Setup SkyWalking UI Setup  Before starting SkyWalking backend, please edit /config/application.yml, set storage.selector: ${SW_STORAGE:iotdb} or set environment variable SW_STORAGE=iotdb. All config options about IoTDB is following, please edit it or not according to your local environment:\nstorage:selector:${SW_STORAGE:iotdb}iotdb:host:${SW_STORAGE_IOTDB_HOST:127.0.0.1}rpcPort:${SW_STORAGE_IOTDB_RPC_PORT:6667}username:${SW_STORAGE_IOTDB_USERNAME:root}password:${SW_STORAGE_IOTDB_PASSWORD:root}storageGroup:${SW_STORAGE_IOTDB_STORAGE_GROUP:root.skywalking}sessionPoolSize:${SW_STORAGE_IOTDB_SESSIONPOOL_SIZE:16}fetchTaskLogMaxSize:${SW_STORAGE_IOTDB_FETCH_TASK_LOG_MAX_SIZE:1000}# the max number of fetch task log in a requestVisit IoTDB Server and Query SkyWalking Data There are some official document about data model and IoTDB-SQL language:\n Data Model and Terminology DDL (Data Definition Language) DML (Data Manipulation Language) Maintenance Command  Example Model and Insert SQL Before giving any example, we set time display type as long (CLI: set time_display_type=long).\nIn our design, we choose id, entity_id, node_type, service_id, service_group, trace_id as indexes and fix their appearance order. The value of these indexed fields store in the path with double quotation mark wrapping, just like \u0026quot;value\u0026quot;.\nThere is a model named service_traffic with fields id, time_bucket, name, node_type, service_group. In order to see its data, we could use a query SQL: select * from root.skywalking.service_traffic align by device. root.skywalking is the default storage group and align by device could return a more friendly result. The query result is following:\n   Time Device name     1637919540000 root.skywalking.service_traffic.\u0026ldquo;YXBwbGljYXRpb24tZGVtbw==.1\u0026rdquo;.\u0026ldquo;0\u0026rdquo;.\u0026quot;\u0026quot; application-demo   1637919600000 root.skywalking.service_traffic.\u0026ldquo;YXBwbGljYXRpb24tZGVtby1teXNxbA==.1\u0026rdquo;.\u0026ldquo;0\u0026rdquo;.\u0026quot;\u0026quot; application-demo-mysql    Another example model is service_cpm which has fields id, service_id, total, value. Query its data with select * from root.skywalking.service_cpm align by device. The result is following:\n   Time Device total value     1637919540000 root.skywalking.service_cpm.\u0026ldquo;202111261739_YXBwbGljYXRpb24tZGVtbw==.1\u0026rdquo;.\u0026ldquo;YXBwbGljYXRpb24tZGVtbw==.1\u0026rdquo; 2 2   1637919600000 root.skywalking.service_cpm.\u0026ldquo;202111261740_YXBwbGljYXRpb24tZGVtby1teXNxbA==.1\u0026rdquo;.\u0026ldquo;YXBwbGljYXRpb24tZGVtby1teXNxbA==.1\u0026rdquo; 1 1   1637917200000 root.skywalking.service_cpm.\u0026ldquo;2021112617_YXBwbGljYXRpb24tZGVtbw==.1\u0026rdquo;.\u0026ldquo;YXBwbGljYXRpb24tZGVtbw==.1\u0026rdquo; 2 0    For the first data of service_traffic, the mapping between fields and values is following. Notice, all time_bucket are converted to timestamp(also named time in IoTDB) and the value of all indexed fields are stored in the Device path.\n   Field Value     id(indexed) YXBwbGljYXRpb24tZGVtbw==.1   time(converted from time_bucket) 1637919540000   name application-demo   node_type(indexed) 0   service_group(indexed) (empty string)    You could use the SQL below to insert example data.\ncreate storage group root.skywalking insert into root.skywalking.service_traffic.\u0026#34;YXBwbGljYXRpb24tZGVtbw==.1\u0026#34;.\u0026#34;0\u0026#34;.\u0026#34;\u0026#34;(timestamp, name) values(1637919540000, \u0026#34;application-demo\u0026#34;) insert into root.skywalking.service_traffic.\u0026#34;YXBwbGljYXRpb24tZGVtby1teXNxbA==.1\u0026#34;.\u0026#34;0\u0026#34;.\u0026#34;\u0026#34;(timestamp, name) values(1637919600000, \u0026#34;application-demo-mysql\u0026#34;) insert into root.skywalking.service_cpm.\u0026#34;202111261739_YXBwbGljYXRpb24tZGVtbw==.1\u0026#34;.\u0026#34;YXBwbGljYXRpb24tZGVtbw==.1\u0026#34;(timestamp, total, value) values(1637919540000, 2, 2) insert into root.skywalking.service_cpm.\u0026#34;202111261740_YXBwbGljYXRpb24tZGVtby1teXNxbA==.1\u0026#34;.\u0026#34;YXBwbGljYXRpb24tZGVtby1teXNxbA==.1\u0026#34;(timestamp, total, value) values(1637919600000, 1, 1) insert into root.skywalking.service_cpm.\u0026#34;2021112617_YXBwbGljYXRpb24tZGVtbw==.1\u0026#34;.\u0026#34;YXBwbGljYXRpb24tZGVtbw==.1\u0026#34;(timestamp, total, value) values(1637917200000, 2, 0) Query SQL Now, let\u0026rsquo;s show some query examples.\n  Filter Query\n If you want to query name field of service_traffic, the query SQL is select name from root.skywalking.service_traffic align by device. If you want to query service_traffic with id = YXBwbGljYXRpb24tZGVtbw==.1, the query SQL is select * from root.skywalking.service_traffic.\u0026quot;YXBwbGljYXRpb24tZGVtbw==.1\u0026quot; align by device. If you want to query service_traffic with name = application-demo, the query SQL is select * from root.skywalking.service_traffic where name = \u0026quot;application-demo\u0026quot; align by device. Combining the above three, the query SQL is select name from root.skywalking.service_traffic.\u0026quot;YXBwbGljYXRpb24tZGVtbw==.1\u0026quot; where name = \u0026quot;application-demo\u0026quot; align by device.    Fuzzy Query\n If you want to query service_traffic with name contains application, the query SQL is select * from root.skywalking.service_traffic.*.*.* where name like '%application%' align by device.    Aggregate Query\nIoTDB only supports group by time and group by level. The former please refer to Down-Frequency Aggregate Query and the latter please refer to Aggregation By Level. Here is an example about group by level: select sum(total) from root.skywalking.service_cpm.*.* group by level = 3. We couldn\u0026rsquo;t get a expected result since our design make the data of one model spread across multiple devices. So we don\u0026rsquo;t recommend using group by level to query SkyWalking backend data. You could refer to the Discussion #3907 in IoTDB community for more details.\n  Sort Query\nIoTDB only supports order by time, but we could use its select function which contains top_k and bottom_k to get top/bottom k data. For example, select top_k(total, \u0026quot;k\u0026quot;=\u0026quot;3\u0026quot;) from root.skywalking.service_cpm.*.*. We don\u0026rsquo;t recommend using this to query SkyWalking backend data since its result is not friendly. You could refer to the Discussion #3888 in IoTDB community for more details.\n  Pagination Query\nWe could use limit and offset to paginate the query result. Please refer to Row and Column Control over Query Results.\n  Delete\n Delete storage group:  delete storage group root.skywalking   Delete timeseries:  delete timeseries root.skywalking.service_cpm.*.*.total delete timeseries root.skywalking.service_cpm.\u0026quot;202111261739_YXBwbGljYXRpb24tZGVtbw==.1\u0026quot;.\u0026quot;YXBwbGljYXRpb24tZGVtbw==.1\u0026quot;.total   Delete data:  delete from root.skywalking.service_traffic delete from root.skywalking.service_traffic where time \u0026lt; 1637919540000      ","excerpt":"This document is one of the outcomes of Apache IoTDB - Apache SkyWalking Adapter in Summer 2021 of …","ref":"/blog/2021-12-08-application-guide-of-iotdb-storage-option/","title":"The Application Guide of Apache IoTDB Storage Option"},{"body":"Non-breaking breakpoints are breakpoints specifically designed for live production environments. With non-breaking breakpoints, reproducing production bugs locally or in staging is conveniently replaced with capturing them directly in production.\nLike regular breakpoints, non-breaking breakpoints can be:\n placed almost anywhere added and removed at will set to fire on specific conditions expose internal application state persist as long as desired (even between application reboots)  The last feature is especially useful given non-breaking breakpoints can be left in production for days, weeks, and even months at a time while waiting to capture behavior that happens rarely and unpredictably.\nHow do non-breaking breakpoints work? If you\u0026rsquo;re familiar with general distributed tracing concepts, such as \u0026ldquo;traces\u0026rdquo; and \u0026ldquo;spans\u0026rdquo;, then you\u0026rsquo;re already broadly familiar with how non-breaking breakpoints work. Put simply, non-breaking breakpoints are small fragments of code added during runtime that, upon the proper conditions, save a portion of the application\u0026rsquo;s current state, and resume normal execution. In SkyWalking, this can be implemented by simply opening a new local span, adding some tags, and closing the local span.\nWhile this process is relatively simple, the range of functionality that can be achieved through this technique is quite impressive. Save the current and global variables to create a non-breaking breakpoint; add the ability to format log messages to create just-in-time logging; add the ability to trigger metric telemetry to create real-time KPI monitoring. If you keep moving in this direction, you eventually enter the realm of live debugging/coding, and this is where Source++ comes in.\nLive Coding Platform Source++ is an open-source live coding platform designed for production environments, powered by Apache SkyWalking. Using Source++, developers can add breakpoints, logs, metrics, and distributed tracing to live production software in real-time on-demand, right from their IDE or CLI. While capable of stand-alone deployment, the latest version of Source++ makes it easier than ever to integrate into existing Apache SkyWalking installations. This process can be completed in a few minutes and is easy to customize for your specific needs.\nFor a better idea of how Source++ works, take a look at the following diagram:\nIn this diagram, blue components represent existing SkyWalking architecture, black components represent new Source++ architecture, and the red arrows show how non-breaking breakpoints make their way from production to IDEs. A process that is facilitated by Source++ components: Live Probe, Live Processors, Live Platform, and Live Interface.\nLive Probe The Live Probe is currently available for JVM and Python applications. It runs alongside the SkyWalking agent and is responsible for dynamically adding and removing code fragments based on valid instrumentation requests from developers. These code fragments in turn make use of the SkyWalking agent\u0026rsquo;s internal APIs to facilitate production instrumentation.\nLive Processors Live Processors are responsible for finding, extracting, and transforming data found in distributed traces produced via live probes. They run alongside SkyWalking collectors and implement additional post-processing logic, such as PII redaction. Live processors work via uniquely identifiable tags (prefix spp.) added previously by live probes.\nOne could easily view a non-breaking breakpoint ready for processing using Rocketbot, however, it will look like this:\nEven though the above does not resemble what\u0026rsquo;s normally thought of as a breakpoint, the necessary information is there. With live processors added to your SkyWalking installation, this data is refined and may be viewed more traditionally via live interfaces.\nLive Platform The Live Platform is the core part of the Source++ architecture. Unlike the live probe and processors, the live platform does not have a direct correlation with SkyWalking components. It is a standalone server responsible for validating and distributing production breakpoints, logs, metrics, and traces. Each component of the Source++ architecture (probes, processors, interfaces) communicates with each other through the live platform. It is important to ensure the live platform is accessible to all of these components.\nLive Interface Finally, with all the previous parts installed, we\u0026rsquo;re now at the component software developers will find the most useful. A Live Interface is what developers use to create, manage, and view non-breaking breakpoints, and so on. There are a few live interfaces available:\n JetBrains Plugin CLI  With the Live Instrument Processor enabled, and the JetBrains Plugin installed, non-breaking breakpoints appear as such:\nThe above should be a sight far more familiar to software developers. Beyond the fact that you can\u0026rsquo;t step through execution, non-breaking breakpoints look and feel just like regular breakpoints.\n For more details and complete setup instructions, please visit:\n https://github.com/sourceplusplus/deploy-skywalking  ","excerpt":"Non-breaking breakpoints are breakpoints specifically designed for live production environments. …","ref":"/blog/2021-12-06-extend-skywalking-with-nbb/","title":"Extending Apache SkyWalking with non-breaking breakpoints"},{"body":"SkyWalking Kubernetes Helm Chart 4.2.0 is released. Go to downloads page to find release tars.\n Fix Can\u0026rsquo;t evaluate field Capabilities in type interface{}. Update the document let that all docker images use the latest version. Fix missing nodes resource permission when the OAP using k8s-mesh analyzer. Fix bug that customized config files are not loaded into es-init job. Add skywalking satellite support.  ","excerpt":"SkyWalking Kubernetes Helm Chart 4.2.0 is released. Go to downloads page to find release tars.\n Fix …","ref":"/events/release-apache-skywalking-kubernetes-helm-chart-4.2.0/","title":"Release Apache SkyWalking Kubernetes Helm Chart 4.2.0"},{"body":"SkyWalking Satellite 0.4.0 is released. Go to downloads page to find release tars.\nFeatures  Support partition queue. Using byte array to transmit the ALS streaming, Native tracing segment and log, reducing en/decoding cpu usage. Support using the new ALS protocol to transmit the Envoy accesslog. Support transmit the Native Meter Batch protocol.  Bug Fixes Issues and PR  All issues are here All and pull requests are here  ","excerpt":"SkyWalking Satellite 0.4.0 is released. Go to downloads page to find release tars.\nFeatures  Support …","ref":"/events/release-apache-skwaylking-satellite-0-4-0/","title":"Release Apache SkyWalking Satellite 0.4.0"},{"body":"SkyWalking 8.9.0 is released. Go to downloads page to find release tars.\nChanges by Version\nProject  E2E tests immigrate to e2e-v2. Support JDK 16 and 17. Add Docker images for arm64 architecture.  OAP Server  Add component definition for Jackson. Fix that zipkin-receiver plugin is not packaged into dist. Upgrade Armeria to 1.12, upgrade OpenSearch test version to 1.1.0. Add component definition for Apache-Kylin. Enhance get generation mechanism of OAL engine, support map type of source\u0026rsquo;s field. Add tag(Map) into All, Service, ServiceInstance and Endpoint sources. Fix funcParamExpression and literalExpression can\u0026rsquo;t be used in the same aggregation function. Support cast statement in the OAL core engine. Support (str-\u0026gt;long) and (long) for string to long cast statement. Support (str-\u0026gt;int) and (int) for string to int cast statement. Support Long literal number in the OAL core engine. Support literal string as parameter of aggregation function. Add attributeExpression and attributeExpressionSegment in the OAL grammar tree to support map type for the attribute expression. Refactor the OAL compiler context to improve readability. Fix wrong generated codes of hashCode and remoteHashCode methods for numeric fields. Support != null in OAL engine. Add Message Queue Consuming Count metric for MQ consuming service and endpoint. Add Message Queue Avg Consuming Latency metric for MQ consuming service and endpoint. Support -Inf as bucket in the meter system. Fix setting wrong field when combining Events. Support search browser service. Add getProfileTaskLogs to profile query protocol. Set SW_KAFKA_FETCHER_ENABLE_NATIVE_PROTO_LOG, SW_KAFKA_FETCHER_ENABLE_NATIVE_JSON_LOG default true. Fix unexpected deleting due to TTL mechanism bug for H2, MySQL, TiDB and PostgreSQL. Add a GraphQL query to get OAP version, display OAP version in startup message and error logs. Fix TimeBucket missing in H2, MySQL, TiDB and PostgreSQL bug, which causes TTL doesn\u0026rsquo;t work for service_traffic. Fix TimeBucket missing in ElasticSearch and provide compatible storage2Entity for previous versions. Fix ElasticSearch implementation of queryMetricsValues and readLabeledMetricsValues doesn\u0026rsquo;t fill default values when no available data in the ElasticSearch server. Fix config yaml data type conversion bug when meets special character like !. Optimize metrics of minute dimensionality persistence. The value of metrics, which has declaration of the default value and current value equals the default value logically, the whole row wouldn\u0026rsquo;t be pushed into database. Fix max function in OAL doesn\u0026rsquo;t support negative long. Add MicroBench module to make it easier for developers to write JMH test. Upgrade Kubernetes Java client to 14.0.0, supports GCP token refreshing and fixes some bugs. Change SO11Y metric envoy_als_in_count to calculate the ALS message count. Support Istio 1.10.3, 1.11.4, 1.12.0 release.(Tested through e2e) Add filter mechanism in MAL core to filter metrics. Fix concurrency bug in MAL increase-related calculation. Fix a null pointer bug when building SampleFamily. Fix the so11y latency of persistence execution latency not correct in ElasticSearch storage. Add MeterReportService collectBatch method. Add OpenSearch 1.2.0 to test and verify it works. Upgrade grpc-java to 1.42.1 and protoc to 3.17.3 to allow using native Mac osx-aarch_64 artifacts. Fix TopologyQuery.loadEndpointRelation bug. Support using IoTDB as a new storage option. Add customized envoy ALS protocol receiver for satellite transmit batch data. Remove logback dependencies in IoTDB plugin. Fix StorageModuleElasticsearchProvider doesn\u0026rsquo;t watch on trustStorePath. Fix a wrong check about entity if GraphQL at the endpoint relation level.  UI  Optimize endpoint dependency. Show service name by hovering nodes in the sankey chart. Add Apache Kylin logo. Add ClickHouse logo. Optimize the style and add tips for log conditions. Fix the condition for trace table. Optimize profile functions. Implement a reminder to clear cache for dashboard templates. Support +/- hh:mm in TimeZone setting. Optimize global settings. Fix current endpoint for endpoint dependency. Add version in the global settings popup. Optimize Log page style. Avoid some abnormal settings. Fix query condition of events.  Documentation  Enhance documents about the data report and query protocols. Restructure documents about receivers and fetchers.  Remove general receiver and fetcher docs Add more specific menu with docs to help users to find documents easier.   Add a guidance doc about the logic endpoint. Link Satellite as Load Balancer documentation and compatibility with satellite.  All issues and pull requests are here\n","excerpt":"SkyWalking 8.9.0 is released. Go to downloads page to find release tars.\nChanges by Version\nProject …","ref":"/events/release-apache-skywalking-apm-8-9-0/","title":"Release Apache SkyWalking APM 8.9.0"},{"body":"Chaos Mesh 是一个开源的云原生混沌工程平台,借助 Chaos Mesh,用户可以很方便地对服务注入异常故障,并配合 Chaos Dashboard 实现对整个混沌实验运行状况的监测 。然而,对混沌实验运行情况的监控并不能告诉我们应用服务性能的变化。从系统可观测性的角度来说,我们可能无法单纯通过混沌实验的动态了解故障的全貌,这也阻碍了我们对系统和故障的进一步了解,调试。\nApache SkyWalking 是一个开源的 APM (Application Performance Monitor) 系统,可以对云原生服务提供监控、跟踪、诊断等功能。SkyWalking 支持收集 Event(事件),可在 Dashboard 中查看分布式系统中发生了哪些事件,并可以直观地观测到不同 Event 对服务性能造成的影响,和 Chaos Mesh 结合使用,便可为混沌实验造成的服务影响提供监控。\n本教程将分享如何通过将 SkyWalking 和 Chaos Mesh 结合,运用 Event 信息监控,实时了解混沌实验对应用服务性能造成的影响。\n准备工作  创建 Skywalking 集群,具体可以参考 SkyWalking Readme。 部署 Chaos Mesh,推荐使用 helm 安装。 安装 Java 测试工具 JMeter (其他工具亦可,仅用于增加服务负载) 如果仅作为 Demo 使用,可以参考 chaos-mesh-on-skywalking 这个仓库进行配置  Step 1 - 访问 SkyWalking 集群 安装 SkyWalking 后,就可以访问它的UI了,但因为还没有服务进行监控,这里还需要添加服务并进行 Agent 埋点设置。本文选用轻量级微服务框架 Spring Boot 作为埋点对象搭建一个简易 Demo 环境。\n可以参考 chaos-mesh-on-skywalking 仓库中的 demo-deployment.yaml 文件创建。之后使用 kubectl apply -f demo-deployment.yaml -n skywalking 进行部署。部署成功后即可在SkyWalking-UI 中看到实时监控的服务信息。\n注意:因为 Spring Boot 的端口也是8080,在端口转发时要避免和 **SkyWalking **的端口冲突,比如使用 kubectl port-forward svc/spring-boot-skywalking-demo 8079:8080 -n skywalking 。\nStep 2 - 部署 SkyWalking Kubernetes Event Exporter SkyWalking Kubernetes Event Exporter 可以用来监控和过滤 Kubernetes 集群中的 Event ,通过设置过滤条件筛选出需要的 Event,并将这些 Event 发送到 SkyWalking 后台, 这样就可以通过 SkyWalking 观察到你的 Kubernetes 集群中的Event 何时影响到服务的各项指标了。如果想要一条命令部署,可以参考此配置创建 yaml 文件 ,设置 filters 和 exporters 的参数后,使用 kubectl apply 进行部署。\nStep 3 - 使用 JMeter 对服务加压 为了达到更好的观察效果,需要先对 Spring Boot 增加服务负载,本文选择使用 JMeter 这一使用广泛的 Java 压力测试工具来对服务加压。\n通过 JMeter 对 localhost:8079 进行压测,添加5个线程持续进行加压。 通过 SkyWalking Dashboard 可以看到,目前访问成功率为100%,服务负载大约在5300 CPM (Calls Per Minute)。\nStep 4 - Chaos Mesh 注入故障,观察效果 做好了这些准备工便可以使用 Chaos Dashboard 进行压力场景模拟,并在实验进程中观察服务性能的变化。\n以下使用不同 Stress Chaos 配置,观测对应服务性能变化:\n  CPU 负载10%,内存负载128 MB 。\n混沌实验开始和结束的时间点标记可以通过右侧开关显示在在图表中,将鼠标移至短线出可以看到是实验的 Applied 或 Recovered。可以看到两个绿色短线之间的时间段里,服务处理调用的的性能降低,为4929 CPM,在实验结束后,性能恢复正常。\n  CPU load 增加到50%,发现服务负载进一步降低至4307 CPM。\n  极端情况下 CPU 负载达到100%,服务负载降至无混沌实验时的40% 。\n  因为 Linux 系统下的进程调度并不会让某个进程一直占据 CPU,所以即使实在 CPU 满载的极端情况下,该部署的 Spring Boot Demo 仍可以处理40%的访问请求。\n小结 通过 SkyWalking 与 Chaos Mesh 的结合,我们可以清晰的观察到服务在何时受到混沌实验的影响,在注入混沌后服务的表现性能又将如何。SkyWalking 与 Chaos Mesh 的结合使得我们轻松地观察到了服务在各种极端情况下的表现,增强了我们对服务的信心。\nChaos Mesh 在 2021 年成长了许多。为了更多地了解用户在实践混沌工程方面的经验,以便持续完善和提升对用户的支持,社区发起了 Chaos Mesh 用户问卷调查,点击【阅读原文】参与调查,谢谢!\nhttps://www.surveymonkey.com/r/X78WQPC\n欢迎大家加入 Chaos Mesh 社区,加入 CNCF Slack (slack.cncf.io) 底下的 Chaos Mesh 频道: project-chaos-mesh,一起参与到项目的讨论与开发中来!大家在使用过程发现 Bug 或缺失什么功能,也可以直接在 GitHub (https://github.com/chaos-mesh) 上提 Issue 或 PR。\n","excerpt":"Chaos Mesh 是一个开源的云原生混沌工程平台,借助 Chaos Mesh,用户可以很方便地对服务注入异常故障,并配合 Chaos Dashboard 实现对整个混沌实验运行状况的监测 。然而, …","ref":"/zh/2021-11-29-better-observability-for-chaos-engineering/","title":"Chaos Mesh X SkyWalking: 可观测的混沌工程"},{"body":"This plugin is one of the outcomes of Apache IoTDB - Apache SkyWalking Adapter in Summer 2021 of Open Source Promotion Plan. The design and development work is under the guidance of @jixuan1989 from IoTDB and @wu-sheng from SkyWalking. Thanks for their guidance and the help from community.\nIoTDB Storage Plugin Setup IoTDB is a time-series database from Apache, which is one of the storage plugin options. If you want to use iotdb as SkyWalking backend storage, please refer to the following configuration.\nIoTDB storage plugin is still in progress. Its efficiency will improve in the future.\nstorage:selector:${SW_STORAGE:iotdb}iotdb:host:${SW_STORAGE_IOTDB_HOST:127.0.0.1}rpcPort:${SW_STORAGE_IOTDB_RPC_PORT:6667}username:${SW_STORAGE_IOTDB_USERNAME:root}password:${SW_STORAGE_IOTDB_PASSWORD:root}storageGroup:${SW_STORAGE_IOTDB_STORAGE_GROUP:root.skywalking}sessionPoolSize:${SW_STORAGE_IOTDB_SESSIONPOOL_SIZE:16}fetchTaskLogMaxSize:${SW_STORAGE_IOTDB_FETCH_TASK_LOG_MAX_SIZE:1000}# the max number of fetch task log in a requestAll connection related settings, including host, rpcPort, username, and password are found in application.yml. Please ensure the IoTDB version \u0026gt;= 0.12.3.\nIoTDB Introduction Apache IoTDB (Database for Internet of Things) is an IoT native database with high performance for data management and analysis, deployable on the edge and the cloud. It is a time-series database donated by Tsinghua University to Apache Foundation.\nThe Data Model of IoTDB We can use the tree structure to understand the data model of iotdb. If divided according to layers, from high to low is: Storage Group \u0026ndash; (LayerName) \u0026ndash; Device \u0026ndash; Measurement. From the top layer to a certain layer below it is called a Path. The top layer is Storage Group (must start with root), the penultimate layer is Device, and the bottom layer is Measurement. There can be many layers in the middle, and each layer is called a LayerName. For more information, please refer to the Data Model and Terminology in the official document of the version 0.12.x.\nThe Design of IoTDB Storage Plugin The Data Model of SkyWalking Each storage model of SkyWalking can be considered as a Model, which contains multiple Columns. Each Column has ColumnName and ColumnType attributes, representing the name and type of Column respectively. Each Column named ColumnName stores multiple Value of the ColumnType. From a relational database perspective, Model is a relational table and Column is the field in a relational table.\nSchema Design Since each LayerName of IoTDB is stored in memory, it can be considered as an index, and this feature can be fully utilized to improve IoTDB query performance. The default storage group is root.skywalking, it will occupy the first and the second layer of the path. The model name is stored at the next layer of the storage group (the third layer of the path), such as root.skywalking.model_name.\nSkyWalking has its own index requirement, but it isn\u0026rsquo;t applicable to IoTDB. Considering query frequency and referring to the implementation of the other storage options, we choose id, entity_id, node_type, service_id, service_group, trace_id as indexes and fix their appearance order in the path. The value of these indexed columns will occupy the last few layers of the path. If we don\u0026rsquo;t fix their order, we cannot map their value to column, since we only store their value in the path but don\u0026rsquo;t store their column name. The other columns are treated as Measurements.\nThe mapping from SkyWalking data model to IoTDB data model is below.\n   SkyWalking IoTDB     Database Storage Group (1st and 2nd layer of the path)   Model LayerName (3rd layer of the path)   Indexed Column stored in memory through hard-code   Indexed Column Value LayerName (after 3rd layer of the path)   Non-indexed Column Measurement   Non-indexed Value the value of Measurement    For general example There are model1(column11, column12), model2(column21, column22, column23), model3(column31). Underline indicates that the column requires to be indexed. In this example, modelx_name refers to the name of modelx, columnx_name refers to the name of columnx and columnx_value refers to the value of columnx.\nBefore these 3 model storage schema, here are some points we need to know.\n In order to avoid the value of indexed column contains dot(.), all of them should be wrapped in double quotation mark since IoTDB use dot(.) as the separator in the path. We use align by device in query SQL to get a more friendly result. For more information about align by device, please see DML (Data Manipulation Language) and Query by device alignment.  The path of them is following:\n The Model with index:  root.skywalking.model1_name.column11_value.column12_name root.skywalking.model2_name.column21_value.column22_value.column23_name   The Model without index:  root.skywalking.model3_name.column31_Name    Use select * from root.skywalking.modelx_name align by device respectively to get their schema and data. The SQL result is following:\n   Time Device column12_name     1637494020000 root.skywalking.model1_name.\u0026ldquo;column11_value\u0026rdquo; column12_value       Time Device column23_name     1637494020000 root.skywalking.model2_name.\u0026ldquo;column21_value\u0026rdquo;.\u0026ldquo;column22_value\u0026rdquo; column23_value       Time Device column31_name     1637494020000 root.skywalking.model3_name column31_value    For specific example Before 5 typical examples, here are some points we need to know.\n The indexed columns and their order: id, entity_id, node_type, service_id, service_group, trace_id. Other columns are treated as non indexed and stored as Measurement. The storage entity extends Metrics or Record contains a column time_bucket. The time_bucket column in SkyWalking Model can be converted to the timestamp of IoTDB when inserting data. We don\u0026rsquo;t need to store time_bucket separately. In the next examples, we won\u0026rsquo;t list time_bucket anymore. The Time in query result corresponds to the timestamp in insert SQL and API.   Metadata: service_traffic\nservice_traffic entity has 4 columns: id, name, node_type, service_group. When service_traffic entity includes a row with timestamp 1637494020000, the row should be as following: (Notice: the value of service_group is null.)     id name node_type service_group     ZTJlLXNlcnZpY2UtcHJvdmlkZXI=.1 e2e-service-provider 0     And the row stored in IoTDB should be as following: (Query SQL: select from root.skywalking.service_traffic align by device)\n   Time Device name     1637494020000 root.skywalking.service_traffic.\u0026ldquo;ZTJlLXNlcnZpY2UtcHJvdmlkZXI=.1\u0026rdquo;.\u0026ldquo;0\u0026rdquo;.\u0026ldquo;null\u0026rdquo; e2e-service-provider    The value of id, node_type and service_group are stored in the path in the specified order. Notice: If those index value is null, it will be transformed to a string \u0026ldquo;null\u0026rdquo;.\nMetrics: service_cpm\nservice_cpm entity has 4 columns: id, service_id, total, value.\nWhen service_cpm entity includes a row with timestamp 1637494020000, the row should be as following:     id service_id total value     202111211127_ZTJlLXNlcnZpY2UtY29uc3VtZXI=.1 ZTJlLXNlcnZpY2UtY29uc3VtZXI=.1 4 4    And the row stored in IoTDB should be as following: (Query SQL: select from root.skywalking.service_cpm align by device)\n   Time Device total value     1637494020000 root.skywalking.service_cpm.\u0026ldquo;202111211127_ZTJlLXNlcnZpY2UtY29uc3VtZXI=.1\u0026rdquo;.\u0026ldquo;ZTJlLXNlcnZpY2UtY29uc3VtZXI=.1\u0026rdquo; 4 4    The value of id and service_id are stored in the path in the specified order.\nTrace segment: segment\nsegment entity has 10 columns at least: id, segment_id, trace_id, service_id, service_instance_id, endpoint_id, start_time, latency, is_error, data_binary. In addition, it could have variable number of tags.\nWhen segment entity includes 2 rows with timestamp 1637494106000 and 1637494134000, these rows should be as following. The db.type and db.instance are two tags. The first data has two tags, and the second data doesn\u0026rsquo;t have tag.     id segment_id trace_id service_id service_instance_id endpoint_id start_time latency is_error data_binary db.type db.instance     id_1 segment_id_1 trace_id_1 service_id_1 service_instance_id_1 endpoint_id_1 1637494106515 1425 0 data_binary_1 sql testdb   id_2 segment_id_2 trace_id_2 service_id_2 service_instance_id_2 endpoint_id_2 2637494106765 1254 0 data_binary_2      And these row stored in IoTDB should be as following: (Query SQL: select from root.skywalking.segment align by device)\n   Time Device start_time data_binary latency endpoint_id is_error service_instance_id segment_id \u0026ldquo;db.type\u0026rdquo; \u0026ldquo;db.instance\u0026rdquo;     1637494106000 root.skywalking.segment.\u0026ldquo;id_1\u0026rdquo;.\u0026ldquo;service_id_1\u0026rdquo;.\u0026ldquo;trace_id_1\u0026rdquo; 1637494106515 data_binary_1 1425 endpoint_id_1 0 service_instance_id_1 segment_id_1 sql testdb   1637494106000 root.skywalking.segment.\u0026ldquo;id_2\u0026rdquo;.\u0026ldquo;service_id_2\u0026rdquo;.\u0026ldquo;trace_id_2\u0026rdquo; 1637494106765 data_binary_2 1254 endpoint_id_2 0 service_instance_id_2 segment_id_2 null null    The value of id, service_id and trace_id are stored in the path in the specified order. Notice: If the measurement contains dot(.), it will be wrapped in double quotation mark since IoTDB doesn\u0026rsquo;t allow it. In order to align, IoTDB will append null value for those data without tag in some models.\nLog\nlog entity has 12 columns at least: id, unique_id, service_id, service_instance_id, endpoint_id, trace_id, trace_segment_id, span_id, content_type, content, tags_raw_data, timestamp. In addition, it could have variable number of tags. When log entity includes a row with timestamp 1637494052000, the row should be as following and the level is a tag.     id unique_id service_id service_instance_id endpoint_id trace_id trace_segment_id span_id content_type content tags_raw_data timestamp level     id_1 unique_id_1 service_id_1 service_instance_id_1 endpoint_id_1 trace_id_1 trace_segment_id_1 0 1 content_1 tags_raw_data_1 1637494052118 INFO    And the row stored in IoTDB should be as following: (Query SQL: select from root.skywalking.log align by device)\n   Time Device unique_id content_type span_id tags_raw_data \u0026ldquo;timestamp\u0026rdquo; level service_instance_id content trace_segment_id     1637494052000 root.skywalking.\u0026ldquo;id_1\u0026rdquo;.\u0026ldquo;service_id_1\u0026rdquo;.\u0026ldquo;trace_id_1\u0026rdquo; unique_id_1 1 0 tags_raw_data_1 1637494052118 INFO service_instance_id_1 content_1 trace_segment_id_1    The value of id, service_id and trace_id are stored in the path in the specified order. Notice: If the measurement named timestamp, it will be wrapped in double quotation mark since IoTDB doesn\u0026rsquo;t allow it.\nProfiling snapshots: profile_task_segment_snapshot\nprofile_task_segment_snapshot entity has 6 columns: id, task_id, segment_id, dump_time, sequence, stack_binary. When profile_task_segment_snapshot includes a row with timestamp 1637494131000, the row should be as following.     id task_id segment_id dump_time sequence stack_binary     id_1 task_id_1 segment_id_1 1637494131153 0 stack_binary_1    And the row stored in IoTDB should be as following: (Query SQL: select from root.skywalking.profile_task_segment_snapshot align by device)\n   Time Device sequence dump_time stack_binary task_id segment_id     1637494131000 root.skywalking.profile_task_segment_snapshot.\u0026ldquo;id_1\u0026rdquo; 0 1637494131153 stack_binary_1 task_id_1 segment_id_1    The value of id is stored in the path in the specified order.\nQuery In this design, part of the data is stored in memory through LayerName, so data from the same Model is spread across multiple devices. Queries often need to cross multiple devices. But in this aspect, IoTDB\u0026rsquo;s support is not perfect in cross-device aggregation query, sort query and pagination query. In some cases, we have to use a violence method that query all data meets the condition and then aggregate, sort or paginate them. So it might not be efficient. For detailed descriptions, please refer to the Discussion submitted in IoTDB community below.\n Discussion:  一个有关排序查询的问题(A problem about sort query)#3888 一个有关聚合查询的问题(A problem about aggregation query)#3907    Query SQL for the general example above:\n-- query all data in model1 select * from root.skywalking.model1_name align by device; -- query the data in model2 with column22_value=\u0026#34;test\u0026#34; select * from root.skywalking.model2_name.*.\u0026#34;test\u0026#34; align by device; -- query the sum of column23 in model2 and group by column21 select sum(column23) from root.skywalking.model2_name.*.* group by level = 3; iotdb-cli is a useful tools to connect and visit IoTDB server. More information please refer Command Line Interface(CLI)\n","excerpt":"This plugin is one of the outcomes of Apache IoTDB - Apache SkyWalking Adapter in Summer 2021 of …","ref":"/blog/2021-11-23-design-of-iotdb-storage-option/","title":"The Design of Apache IoTDB Storage Option"},{"body":"SkyWalking Infra E2E 1.1.0 is released. Go to downloads page to find release tars.\nFeatures  Support using setup.init-system-environment to import environment. Support body and headers in http trigger. Add install target in makefile. Stop trigger when cleaning up. Change interval setting to Duration style. Add reasonable default cleanup.on. Support float value compare when type not match Support reuse verify.cases. Ignore trigger when not set. Support export KUBECONFIG to the environment. Support using setup.kind.import-images to load local docker images. Support using setup.kind.expose-ports to declare the resource port for host access. Support save pod/container std log on the Environment.  Bug Fixes  Fix that trigger is not continuously triggered when running e2e trigger. Migrate timeout config to Duration style and wait for node ready in KinD setup. Remove manifest only could apply the default namespace resource.  Issues and PR  All issues are here All and pull requests are here  ","excerpt":"SkyWalking Infra E2E 1.1.0 is released. Go to downloads page to find release tars.\nFeatures  Support …","ref":"/events/release-apache-skywalking-infra-e2e-1-1-0/","title":"Release Apache SkyWalking Infra E2E 1.1.0"},{"body":"SkyWalking Cloud on Kubernetes 0.4.0 is released. Go to downloads page to find release tars.\n  Support special characters in the metric selector of HPA metric adapter.\n  Add the namespace to HPA metric name.\n  Features\n Add Java agent injector. Add JavaAgent and Storage CRDs of the operator.    Vulnerabilities\n CVE-2021-3121: An issue was discovered in GoGo Protobuf before 1.3.2. plugin/unmarshal/unmarshal.go lacks certain index validation CVE-2020-29652: A nil pointer dereference in the golang.org/x/crypto/ssh component through v0.0.0-20201203163018-be400aefbc4c for Go allows remote attackers to cause a denial of service against SSH servers.    Chores\n Bump up GO to 1.17. Bump up k8s api to 0.20.11. Polish documents. Bump up SkyWalking OAP to 8.8.1.    ","excerpt":"SkyWalking Cloud on Kubernetes 0.4.0 is released. Go to downloads page to find release tars. …","ref":"/events/release-apache-skywalking-cloud-on-kubernetes-0-4-0/","title":"Release Apache SkyWalking Cloud on Kubernetes 0.4.0"},{"body":"SkyWalking Satellite 0.3.0 is released. Go to downloads page to find release tars.\nFeatures  Support load-balance GRPC client with the static server list. Support load-balance GRPC client with the Kubernetes selector. Support transmit Envoy ALS v2/v3 protocol. Support transmit Envoy Metrics v2/v3 protocol.  Bug Fixes  Fix errors when converting meter data from histogram and summary.#75  Issues and PR  All issues are here All and pull requests are here  ","excerpt":"SkyWalking Satellite 0.3.0 is released. Go to downloads page to find release tars.\nFeatures  Support …","ref":"/events/release-apache-skwaylking-satellite-0-3-0/","title":"Release Apache SkyWalking Satellite 0.3.0"},{"body":"SkyWalking Java Agent 8.8.0 is released. Go to downloads page to find release tars. Changes by Version\n8.8.0  Split Java agent from the main monorepo. It is a separate repository and going to release separately. Support JDK 8-17 through upgrading byte-buddy to 1.11.18. Upgrade JDK 11 in dockerfile and remove unused java_opts. DataCarrier changes a #consume API to add properties as a parameter to initialize consumer when use Class\u0026lt;? extends IConsumer\u0026lt;T\u0026gt;\u0026gt; consumerClass. Support Multiple DNS period resolving mechanism Modify Tags.STATUS_CODE field name to Tags.HTTP_RESPONSE_STATUS_CODE and type from StringTag to IntegerTag, add Tags.RPC_RESPONSE_STATUS_CODE field to hold rpc response code value. Fix kafka-reporter-plugin shade package conflict Add all config items to agent.conf file for convenient containerization use cases. Advanced Kafka Producer configuration enhancement. Support mTLS for gRPC channel. fix the bug that plugin record wrong time elapse for lettuce plugin fix the bug that the wrong db.instance value displayed on Skywalking-UI when existing multi-database-instance on same host port pair. Add thrift plugin support thrift TMultiplexedProcessor. Add benchmark result for exception-ignore plugin and polish plugin guide. Provide Alibaba Druid database connection pool plugin. Provide HikariCP database connection pool plugin. Fix NumberFormat exception in jdbc-commons plugin when MysqlURLParser parser jdbcurl Provide Alibaba Fastjson parser/generator plugin. Provide Jackson serialization and deserialization plugin. Fix a tracing context leak of SpringMVC plugin, when an internal exception throws due to response can\u0026rsquo;t be found. Make GRPC log reporter sharing GRPC channel with other reporters of agent. Remove config items of agent.conf, plugin.toolkit.log.grpc.reporter.server_host, plugin.toolkit.log.grpc.reporter.server_port, and plugin.toolkit.log.grpc.reporter.upstream_timeout. rename plugin.toolkit.log.grpc.reporter.max_message_size to log.max_message_size. Implement Kafka Log Reporter. Add config item of agnt.conf, plugin.kafka.topic_logging. Add plugin to support Apache HttpClient 5. Format SpringMVC \u0026amp; Tomcat EntrySpan operation name to METHOD:URI. Make HTTP method in the operation name according to runtime, rather than previous code-level definition, which used to have possibilities including multiple HTTP methods. Fix the bug that httpasyncclient-4.x-plugin does not take effect every time. Add plugin to support ClickHouse JDBC driver. Fix version compatibility for JsonRPC4J plugin. Add plugin to support Apache Kylin-jdbc 2.6.x 3.x 4.x Fix instrumentation v2 API doesn\u0026rsquo;t work for constructor instrumentation. Add plugin to support okhttp 2.x Optimize okhttp 3.x 4.x plugin to get span time cost precisely Adapt message header properties of RocketMQ 4.9.x  Documentation All issues and pull requests are here\n","excerpt":"SkyWalking Java Agent 8.8.0 is released. Go to downloads page to find release tars. Changes by …","ref":"/events/release-apache-skywalking-java-agent-8-8-0/","title":"Release Apache SkyWalking Java Agent 8.8.0"},{"body":"SkyWalking CLI 0.9.0 is released. Go to downloads page to find release tars.\nFeatures  Add the sub-command dependency instance to query instance relationships (#117)  Bug Fixes  fix: multiple-linear command\u0026rsquo;s labels type can be string type (#122) Add missing dest-service-id dest-service-name to metrics linear command (#121) Fix the wrong name when getting destInstance flag (#118)  Chores  Upgrade Go version to 1.16 (#120) Migrate tests to infra-e2e, overhaul the flags names (#119) Publish Docker snapshot images to ghcr (#116) Remove dist directory when build release source tar (#115)  ","excerpt":"SkyWalking CLI 0.9.0 is released. Go to downloads page to find release tars.\nFeatures  Add the …","ref":"/events/release-apache-skywalking-cli-0-9-0/","title":"Release Apache SkyWalking CLI 0.9.0"},{"body":"SkyWalking Eyes 0.2.0 is released. Go to downloads page to find release tars.\n  Dependency License\n Support resolving go.mod for Go Support resolving pom.xml for maven (#50) Support resolving jars' licenses (#53) Support resolving npm dependencies' licenses (#48) Support saving dependencies' licenses (#69) Add dependency check to check dependencies license compatibilities (#58)    License Header\n fix command supports more languages:  Add support for plantuml (#42) Add support for PHP (#40) Add support for Twig template language (#39) Add support for Smarty template language (#38) Add support for MatLab files (#37) Add support for TypeScript language files (#73) Add support for nextflow files (#65) Add support for perl files (#63) Add support for ini extension (#24) Add support for R files (#64) Add support for .rst files and allow fixing header of a single file (#25) Add support for Rust files (#29) Add support for bat files (#32)   Remove .tsx from XML language extensions Honor Python\u0026rsquo;s coding directive (#68) Fix file extension conflict between RenderScript and Rust (#66) Add comment type to cython declaration (#62) header fix: respect user configured license content (#60) Expose license-location-threshold as config item (#34) Fix infinite recursive calls when containing symbolic files (#33) defect: avoid crash when no comment style is found (#23)    Project\n Enhance license identification (#79) Support installing via go install (#76) Speed up the initialization phase (#75) Resolve absolute path in .gitignore to relative path (#67) Reduce img size and add npm env (#59) Make the config file and log level in GitHub Action configurable (#56, #57) doc: add a PlantUML activity diagram of header fixing mechanism (#41) Fix bug: license file is not found but reported message is nil (#49) Add all well-known licenses and polish normalizers (#47) Fix compatibility issues in Windows (#44) feature: add reasonable default config to allow running in a new repo without copying config file (#28) chore: only build linux binary when building inside docker (#26) chore: upgrade to go 1.16 and remove go-bindata (#22) Add documentation about how to use via docker image (#20)    ","excerpt":"SkyWalking Eyes 0.2.0 is released. Go to downloads page to find release tars.\n  Dependency License …","ref":"/events/release-apache-skywalking-eyes-0-2-0/","title":"Release Apache SkyWalking Eyes 0.2.0"},{"body":"SkyWalking Client JS 0.7.0 is released. Go to downloads page to find release tars.\n Support setting time interval to report segments. Fix segments report only send once. Fix apache/skywalking#7335. Fix apache/skywalking#7793. Fix firstReportedError for SPA.  ","excerpt":"SkyWalking Client JS 0.7.0 is released. Go to downloads page to find release tars.\n Support setting …","ref":"/events/release-apache-skywalking-client-js-0-7-0/","title":"Release Apache SkyWalking Client JS 0.7.0"},{"body":"SkyWalking 8.8.1 is released. Go to downloads page to find release tars.\nThis is a bugfix version that fixes several important bugs in previous version 8.8.0.\nChanges OAP Server  Fix wrong (de)serializer of ElasticSearch client for OpenSearch storage. Fix that traces query with tags will report error. Replace e2e simple cases to e2e-v2. Fix endpoint dependency breaking.  UI  Delete duplicate calls for endpoint dependency.  All issues and pull requests are here\n","excerpt":"SkyWalking 8.8.1 is released. Go to downloads page to find release tars.\nThis is a bugfix version …","ref":"/events/release-apache-skywalking-apm-8-8-1/","title":"Release Apache SkyWalking APM 8.8.1"},{"body":"Kai Wan has been involved in SkyWalking for over half a year since the first PR(Dec 21, 2020). He majorly focuses on the Service Mesh and metrics analysis engine(MAL). And recently add the support of OpenAPI specification into SkyWalking.\nHe learnd fast, and dedicates hours every day on the project, and has finished 37 PRs 11,168 LOC++ 1,586 LOC\u0026ndash;. In these days, he is working with PMC and infra-e2e team to upgrade our main repository\u0026rsquo;s test framework to the NGET(Next Generation E2E Test framework).\nIt is our honor to have him join the team.\n","excerpt":"Kai Wan has been involved in SkyWalking for over half a year since the first PR(Dec 21, 2020). He …","ref":"/events/welcome-kai-wan-to-join-the-pmc/","title":"Welcome Kai Wan (万凯) to join the PMC"},{"body":"SkyWalking 8.8.0 is released. Go to downloads page to find release tars.\nThis is a first OAP server + UI release, Java agent will be release independently. Check the latest compatibility document to find suitable agent releases.\nChanges by Version\nProject  Split javaagent into skywalking-java repository. https://github.com/apache/skywalking-java Merge Dockerfiles from apache/skywalking-docker into this codebase.  OAP Server  Fix CVE-2021-35515, CVE-2021-35516, CVE-2021-35517, CVE-2021-36090. Upgrade org.apache.commons:commons-compress to 1.21. kubernetes java client upgrade from 12.0.1 to 13.0.0 Add event http receiver Support Metric level function serviceRelation in MAL. Support envoy metrics binding into the topology. Fix openapi-definitions folder not being read correctly. Trace segment wouldn\u0026rsquo;t be recognized as a TopN sample service. Add through #4694 experimentally, but it caused performance impact. Remove version and endTime in the segment entity. Reduce indexing payload. Fix mapper_parsing_exception in ElasticSearch 7.14. Support component IDs for Go-Kratos framework. [Break Change] Remove endpoint name in the trace query condition. Only support query by endpoint id. Fix ProfileSnapshotExporterTest case on OpenJDK Runtime Environment AdoptOpenJDK-11.0.11+9 (build 11.0.11+9), MacOS. [Break Change] Remove page path in the browser log query condition. Only support query by page path id. [Break Change] Remove endpoint name in the backend log query condition. Only support query by endpoint id. [Break Change] Fix typo for a column page_path_id(was pate_path_id) of storage entity browser_error_log. Add component id for Python falcon plugin. Add rpcStatusCode for rpc.status_code tag. The responseCode field is marked as deprecated and replaced by httpResponseStatusCode field. Remove the duplicated tags to reduce the storage payload. Add a new API to test log analysis language. Harden the security of Groovy-based DSL, MAL and LAL. Fix distinct in Service/Instance/Endpoint query is not working. Support collection type in dynamic configuration core. Support zookeeper grouped dynamic configurations. Fix NPE when OAP nodes synchronize events with each other in cluster mode. Support k8s configmap grouped dynamic configurations. Add desc sort function in H2 and ElasticSearch implementations of IBrowserLogQueryDAO Support configure sampling policy by configuration module dynamically and static configuration file trace-sampling-policy-settings.yml for service dimension on the backend side. Dynamic configurations agent-analyzer.default.sampleRate and agent-analyzer.default.slowTraceSegmentThreshold are replaced by agent-analyzer.default.traceSamplingPolicy. Static configurations agent-analyzer.default.sampleRate and agent-analyzer.default.slowTraceSegmentThreshold are replaced by agent-analyzer.default.traceSamplingPolicySettingsFile. Fix dynamic configuration watch implementation current value not null when the config is deleted. Fix LoggingConfigWatcher return watch.value would not consistent with the real configuration content. Fix ZookeeperConfigWatcherRegister.readConfig() could cause NPE when data.getData() is null. Support nacos grouped dynamic configurations. Support for filter function filtering of int type values. Support mTLS for gRPC channel. Add yaml file suffix limit when reading ui templates. Support consul grouped dynamic configurations. Fix H2MetadataQueryDAO.searchService doesn\u0026rsquo;t support auto grouping. Rebuilt ElasticSearch client on top of their REST API. Fix ElasticSearch storage plugin doesn\u0026rsquo;t work when hot reloading from secretsManagementFile. Support etcd grouped dynamic configurations. Unified the config word namespace in the project. Switch JRE base image for dev images. Support apollo grouped dynamic configurations. Fix ProfileThreadSnapshotQuery.queryProfiledSegments adopts a wrong sort function Support gRPC sync grouped dynamic configurations. Fix H2EventQueryDAO doesn\u0026rsquo;t sort data by Event.START_TIME and uses a wrong pagination query. Fix LogHandler of kafka-fetcher-plugin cannot recognize namespace. Improve the speed of writing TiDB by batching the SQL execution. Fix wrong service name when IP is node IP in k8s-mesh. Support dynamic configurations for openAPI endpoint name grouping rule. Add component definition for Alibaba Druid and HikariCP. Fix Hour and Day dimensionality metrics not accurate, due to the cache read-then-clear mechanism conflicts with low down metrics flush period added in 8.7.0. Fix Slow SQL sampling not accurate, due to TopN works conflict with cache read-then-clear mechanism. The persistent cache is only read when necessary. Add component definition for Alibaba Fastjson. Fix entity(service/instance/endpoint) names in the MAL system(prometheus, native meter, open census, envoy metric service) are not controlled by core\u0026rsquo;s naming-control mechanism. Upgrade netty version to 4.1.68.Final avoid cve-2021-37136.  UI  Fix not found error when refresh UI. Update endpointName to endpointId in the query trace condition. Add Python falcon icon on the UI. Fix searching endpoints with keywords. Support clicking the service name in the chart to link to the trace or log page. Implement the Log Analysis Language text regexp debugger. Fix fetching nodes and calls with serviceIds on the topology side. Implement Alerts for query errors. Fixes graph parameter of query for topology metrics.  Documentation  Add a section in Log Collecting And Analysis doc, introducing the new Python agent log reporter. Add one missing step in otel-receiver doc about how to activate the default receiver. Reorganize dynamic configuration doc. Add more description about meter configurations in backend-meter doc. Fix typo in endpoint-grouping-rules doc.  All issues and pull requests are here\n","excerpt":"SkyWalking 8.8.0 is released. Go to downloads page to find release tars.\nThis is a first OAP server …","ref":"/events/release-apache-skywalking-apm-8-8-0/","title":"Release Apache SkyWalking APM 8.8.0"},{"body":"SkyWalking CLI 0.8.0 is released. Go to downloads page to find release tars.\n  Features\n Add profile command Add logs command Add dependency command Support query events protocol Support auto-completion for bash and powershell    Bug Fixes\n Fix missing service instance name in trace command    Chores\n Optimize output by adding color to help information Set display style explicitly for commands in the test script Set different default display style for different commands Add scripts for quick install Update release doc and add scripts for release split into multiple workflows to speed up CI    ","excerpt":"SkyWalking CLI 0.8.0 is released. Go to downloads page to find release tars.\n  Features\n Add profile …","ref":"/events/release-apache-skywalking-cli-0-8-0/","title":"Release Apache SkyWalking CLI 0.8.0"},{"body":"SkyWalking Satellite 0.2.0 is released. Go to downloads page to find release tars.\nFeatures  Set MAXPROCS according to real cpu quota. Update golangci-lint version to 1.39.0. Update protoc-gen-go version to 1.26.0. Add prometheus-metrics-fetcher plugin. Add grpc client plugin. Add nativelog-grpc-forwarder plugin. Add meter-grpc-forwarder plugin. Support native management protocol. Support native tracing protocol. Support native profile protocol. Support native CDS protocol. Support native JVM protocol. Support native Meter protocol. Support native Event protocol. Support native protocols E2E testing. Add Prometheus service discovery in Kubernetes.  Bug Fixes  Fix the data race in mmap queue. Fix channel blocking in sender module. Fix pipes.sender.min_flush_events config could not support min number. Remove service name and instance name labels from Prometheus fetcher.  Issues and PR  All issues are here All and pull requests are here  ","excerpt":"SkyWalking Satellite 0.2.0 is released. Go to downloads page to find release tars.\nFeatures  Set …","ref":"/events/release-apache-skwaylking-satellite-0-2-0/","title":"Release Apache SkyWalking Satellite 0.2.0"},{"body":"SkyWalking Python 0.7.0 is released. Go to downloads page to find release tars.\n  Feature:\n Support collecting and reporting logs to backend (#147) Support profiling Python method level performance (#127 Add a new sw-python CLI that enables agent non-intrusive integration (#156) Add exponential reconnection backoff strategy when OAP is down (#157) Support ignoring traces by http method (#143) NoopSpan on queue full, propagation downstream (#141) Support agent namespace. (#126) Support secure connection option for GRPC and HTTP (#134)    Plugins:\n Add Falcon Plugin (#146) Update sw_pymongo.py to be compatible with cluster mode (#150) Add Python celery plugin (#125) Support tornado5+ and tornado6+ (#119)    Fixes:\n Remove HTTP basic auth credentials from log, stacktrace, segment (#152) Fix @trace decorator not work (#136) Fix grpc disconnect, add SW_AGENT_MAX_BUFFER_SIZE to control buffer queue size (#138)    Others:\n Chore: bump up requests version to avoid license issue (#142) Fix module wrapt as normal install dependency (#123) Explicit component inheritance (#132) Provide dockerfile \u0026amp; images for easy integration in containerized scenarios (#159)    ","excerpt":"SkyWalking Python 0.7.0 is released. Go to downloads page to find release tars.\n  Feature:\n Support …","ref":"/events/release-apache-skywalking-python-0-7-0/","title":"Release Apache SkyWalking Python 0.7.0"},{"body":"","excerpt":"","ref":"/tags/python/","title":"Python"},{"body":"SkyWalking Infra E2E 1.0.0 is released. Go to downloads page to find release tars.\nFeatures  Support using docker-compose to setup the environment. Support using the HTTP request as trigger. Support verify test case by command-line or file with retry strategy. Support GitHub Action.  Bug Fixes Issues and PR  All issues are here All and pull requests are here  ","excerpt":"SkyWalking Infra E2E 1.0.0 is released. Go to downloads page to find release tars.\nFeatures  Support …","ref":"/events/release-apache-skywalking-infra-e2e-1-0-0/","title":"Release Apache SkyWalking Infra E2E 1.0.0"},{"body":"The Java Agent of Apache SkyWalking has supported profiling since v7.0.0, and it enables users to troubleshoot the root cause of performance issues, and now we bring it into Python Agent. In this blog, we will show you how to use it, and we will introduce the mechanism of profiling.\nHow to use profiling in Python Agent This feature is released in Python Agent at v0.7.0. It is turned on by default, so you don\u0026rsquo;t need any extra configuration to use it. You can find the environment variables about it here.\nHere are the demo codes of an intentional slow application.\nimport time def method1(): time.sleep(0.02) return \u0026#39;1\u0026#39; def method2(): time.sleep(0.02) return method1() def method3(): time.sleep(0.02) return method2() if __name__ == \u0026#39;__main__\u0026#39;: import socketserver from http.server import BaseHTTPRequestHandler class SimpleHTTPRequestHandler(BaseHTTPRequestHandler): def do_POST(self): method3() time.sleep(0.5) self.send_response(200) self.send_header(\u0026#39;Content-Type\u0026#39;, \u0026#39;application/json\u0026#39;) self.end_headers() self.wfile.write(\u0026#39;{\u0026#34;song\u0026#34;: \u0026#34;Despacito\u0026#34;, \u0026#34;artist\u0026#34;: \u0026#34;Luis Fonsi\u0026#34;}\u0026#39;.encode(\u0026#39;ascii\u0026#39;)) PORT = 19090 Handler = SimpleHTTPRequestHandler with socketserver.TCPServer((\u0026#34;\u0026#34;, PORT), Handler) as httpd: httpd.serve_forever() We can start it with SkyWalking Python Agent CLI without changing any application code now, which is also the latest feature of v0.7.0. We just need to add sw-python run before our start command(i.e. sw-python run python3 main.py), to start the application with python agent attached. More information about sw-python can be found there.\nThen, we should add a new profile task for the / endpoint from the SkyWalking UI, as shown below.\nWe can access it by curl -X POST http://localhost:19090/, after that, we can view the result of this profile task on the SkyWalking UI.\nThe mechanism of profiling When a request lands on an application with the profile function enabled, the agent begins the profiling automatically if the request’s URI is as required by the profiling task. A new thread is spawned to fetch the thread dump periodically until the end of request.\nThe agent sends these thread dumps, called ThreadSnapshot, to SkyWalking OAPServer, and the OAPServer analyzes those ThreadSnapshot(s) and gets the final result. It will take a method invocation with the same stack depth and code signature as the same operation, and estimate the execution time of each method from this.\nLet\u0026rsquo;s demonstrate how this analysis works through the following example. Suppose we have such a program below and we profile it at 10ms intervals.\ndef main(): methodA() def methodA(): methodB() def methodB(): methodC() methodD() def methodC(): time.sleep(0.04) def methodD(): time.sleep(0.06) The agent collects a total of 10 ThreadSnapShot(s) over the entire time period(Diagram A). The first 4 snapshots represent the thread dumps during the execution of function C, and the last 6 snapshots represent the thread dumps during the execution of function D. After the analysis of OAPServer, we can see the result of this profile task on the SkyWalking Rocketbot UI as shown in the right of the diagram. With this result, we can clearly see the function call relationship and the time consumption situation of this program.\nDiagram A You can read more details of profiling theory from this blog.\nWe hope you enjoy the profile in the Python Agent, and if so, you can give us a star on Python Agent and SkyWalking on GitHub.\n","excerpt":"The Java Agent of Apache SkyWalking has supported profiling since v7.0.0, and it enables users to …","ref":"/blog/2021-09-12-skywalking-python-profiling/","title":"SkyWalking Python Agent Supports Profiling Now"},{"body":"SkyWalking Kubernetes Helm Chart 4.1.0 is released. Go to downloads page to find release tars.\n Add missing service account to init job. Improve notes.txt and nodePort configuration. Improve ingress compatibility. Fix bug that customized config files are not loaded into es-init job. Add imagePullSecrets and node selector. Fix istio adapter description. Enhancement: allow mounting binary data files.  ","excerpt":"SkyWalking Kubernetes Helm Chart 4.1.0 is released. Go to downloads page to find release tars.\n Add …","ref":"/events/release-apache-skywalking-kubernetes-helm-chart-4.1.0/","title":"Release Apache SkyWalking Kubernetes Helm Chart 4.1.0"},{"body":"GOUP hosted a webinar, and invited Sheng Wu to introduce Apache SkyWalking. This is a 1.5 hours presentation including the full landscape of Apache SkyWalking 8.x.\nChapter04 Session10 - Apache Skywalking by Sheng Wu   ","excerpt":"GOUP hosted a webinar, and invited Sheng Wu to introduce Apache SkyWalking. This is a 1.5 hours …","ref":"/blog/2021-08-01-skywalking-8-intro/","title":"[Webinar] SkyWalking 8.x Introduction"},{"body":"SkyWalking 8.7.0 is released. Go to downloads page to find release tars. Changes by Version\nProject  Extract dependency management to a bom. Add JDK 16 to test matrix. DataCarrier consumer add a new event notification, call nothingToConsume method if the queue has no element to consume. Build and push snapshot Docker images to GitHub Container Registry, this is only for people who want to help to test the master branch codes, please don\u0026rsquo;t use in production environments.  Java Agent  Supports modifying span attributes in async mode. Agent supports the collection of JVM arguments and jar dependency information. [Temporary] Support authentication for log report channel. This feature and grpc channel is going to be removed after Satellite 0.2.0 release. Remove deprecated gRPC method, io.grpc.ManagedChannelBuilder#nameResolverFactory. See gRPC-java 7133 for more details. Add Neo4j-4.x plugin. Correct profile.duration to profile.max_duration in the default agent.config file. Fix the response time of gRPC. Support parameter collection for SqlServer. Add ShardingSphere-5.0.0-beta plugin. Fix some method exception error. Fix async finish repeatedly in spring-webflux-5.x-webclient plugin. Add agent plugin to support Sentinel. Move ehcache-2.x plugin as an optional plugin. Support guava-cache plugin. Enhance the compatibility of mysql-8.x-plugin plugin. Support Kafka SASL login module. Fix gateway plugin async finish repeatedly when fallback url configured. Chore: polish methods naming for Spring-Kafka plugins. Remove plugins for ShardingSphere legacy version. Update agent plugin for ElasticJob GA version Remove the logic of generating instance name in KafkaServiceManagementServiceClient class. Improve okhttp plugin performance by optimizing Class.getDeclaredField(). Fix GRPCLogClientAppender no context warning. Fix spring-webflux-5.x-webclient-plugin NPE.  OAP-Backend  Disable Spring sleuth meter analyzer by default. Only count 5xx as error in Envoy ALS receiver. Upgrade apollo core caused by CVE-2020-15170. Upgrade kubernetes client caused by CVE-2020-28052. Upgrade Elasticsearch 7 client caused by CVE-2020-7014. Upgrade jackson related libs caused by CVE-2018-11307, CVE-2018-14718 ~ CVE-2018-14721, CVE-2018-19360 ~ CVE-2018-19362, CVE-2019-14379, CVE-2019-14540, CVE-2019-14892, CVE-2019-14893, CVE-2019-16335, CVE-2019-16942, CVE-2019-16943, CVE-2019-17267, CVE-2019-17531, CVE-2019-20330, CVE-2020-8840, CVE-2020-9546, CVE-2020-9547, CVE-2020-9548, CVE-2018-12022, CVE-2018-12023, CVE-2019-12086, CVE-2019-14439, CVE-2020-10672, CVE-2020-10673, CVE-2020-10968, CVE-2020-10969, CVE-2020-11111, CVE-2020-11112, CVE-2020-11113, CVE-2020-11619, CVE-2020-11620, CVE-2020-14060, CVE-2020-14061, CVE-2020-14062, CVE-2020-14195, CVE-2020-24616, CVE-2020-24750, CVE-2020-25649, CVE-2020-35490, CVE-2020-35491, CVE-2020-35728 and CVE-2020-36179 ~ CVE-2020-36190. Exclude log4j 1.x caused by CVE-2019-17571. Upgrade log4j 2.x caused by CVE-2020-9488. Upgrade nacos libs caused by CVE-2021-29441 and CVE-2021-29442. Upgrade netty caused by CVE-2019-20444, CVE-2019-20445, CVE-2019-16869, CVE-2020-11612, CVE-2021-21290, CVE-2021-21295 and CVE-2021-21409. Upgrade consul client caused by CVE-2018-1000844, CVE-2018-1000850. Upgrade zookeeper caused by CVE-2019-0201, zookeeper cluster coordinator plugin now requires zookeeper server 3.5+. Upgrade snake yaml caused by CVE-2017-18640. Upgrade embed tomcat caused by CVE-2020-13935. Upgrade commons-lang3 to avoid potential NPE in some JDK versions. OAL supports generating metrics from events. Support endpoint name grouping by OpenAPI definitions. Concurrent create PrepareRequest when persist Metrics Fix CounterWindow increase computing issue. Performance: optimize Envoy ALS analyzer performance in high traffic load scenario (reduce ~1cpu in ~10k RPS). Performance: trim useless metadata fields in Envoy ALS metadata to improve performance. Fix: slowDBAccessThreshold dynamic config error when not configured. Performance: cache regex pattern and result, optimize string concatenation in Envy ALS analyzer. Performance: cache metrics id and entity id in Metrics and ISource. Performance: enhance persistent session mechanism, about differentiating cache timeout for different dimensionality metrics. The timeout of the cache for minute and hour level metrics has been prolonged to ~5 min. Performance: Add L1 aggregation flush period, which reduce the CPU load and help young GC. Support connectTimeout and socketTimeout settings for ElasticSearch6 and ElasticSearch7 storages. Re-implement storage session mechanism, cached metrics are removed only according to their last access timestamp, rather than first time. This makes sure hot data never gets removed unexpectedly. Support session expired threshold configurable. Fix InfluxDB storage-plugin Metrics#multiGet issue. Replace zuul proxy with spring cloud gateway 2.x. in webapp module. Upgrade etcd cluster coordinator and dynamic configuration to v3.x. Configuration: Allow configuring server maximum request header size and ES index template order. Add thread state metric and class loaded info metric to JVMMetric. Performance: compile LAL DSL statically and run with type checked. Add pagination to event query protocol. Performance: optimize Envoy error logs persistence performance. Support envoy cluster manager metrics. Performance: remove the synchronous persistence mechanism from batch ElasticSearch DAO. Because the current enhanced persistent session mechanism, don\u0026rsquo;t require the data queryable immediately after the insert and update anymore. Performance: share flushInterval setting for both metrics and record data, due to synchronous persistence mechanism removed. Record flush interval used to be hardcoded as 10s. Remove syncBulkActions in ElasticSearch storage option. Increase the default bulkActions(env, SW_STORAGE_ES_BULK_ACTIONS) to 5000(from 1000). Increase the flush interval of ElasticSearch indices to 15s(from 10s) Provide distinct for elements of metadata lists. Due to the more aggressive asynchronous flush, metadata lists have more chances including duplicate elements. Don\u0026rsquo;t need this as indicate anymore. Reduce the flush period of hour and day level metrics, only run in 4 times of regular persistent period. This means default flush period of hour and day level metrics are 25s * 4. Performance: optimize IDs read of ElasticSearch storage options(6 and 7). Use the physical index rather than template alias name. Adjust index refresh period as INT(flushInterval * 2/3), it used to be as same as bulk flush period. At the edge case, in low traffic(traffic \u0026lt; bulkActions in the whole period), there is a possible case, 2 period bulks are included in one index refresh rebuild operation, which could cause version conflicts. And this case can\u0026rsquo;t be fixed through core/persistentPeriod as the bulk fresh is not controlled by the persistent timer anymore. The core/maxSyncOperationNum setting(added in 8.5.0) is removed due to metrics persistence is fully asynchronous. The core/syncThreads setting(added in 8.5.0) is removed due to metrics persistence is fully asynchronous. Optimization: Concurrency mode of execution stage for metrics is removed(added in 8.5.0). Only concurrency of prepare stage is meaningful and kept. Fix -meters metrics topic isn\u0026rsquo;t created with namespace issue Enhance persistent session timeout mechanism. Because the enhanced session could cache the metadata metrics forever, new timeout mechanism is designed for avoiding this specific case. Fix Kafka transport topics are created duplicated with and without namespace issue Fix the persistent session timeout mechanism bug. Fix possible version_conflict_engine_exception in bulk execution. Fix PrometheusMetricConverter may throw an IllegalArgumentException when convert metrics to SampleFamily Filtering NaN value samples when build SampleFamily Add Thread and ClassLoader Metrics for the self-observability and otel-oc-rules Simple optimization of trace sql query statement. Avoid \u0026ldquo;select *\u0026rdquo; query method Introduce dynamical logging to update log configuration at runtime Fix Kubernetes ConfigMap configuration center doesn\u0026rsquo;t send delete event Breaking Change: emove qps and add rpm in LAL  UI  Fix the date component for log conditions. Fix selector keys for duplicate options. Add Python celery plugin. Fix default config for metrics. Fix trace table for profile ui. Fix the error of server response time in the topology. Fix chart types for setting metrics configure. Fix logs pages number. Implement a timeline for Events in a new page. Fix style for event details.  Documentation  Add FAQ about Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Add Self Observability service discovery (k8s). Add sending Envoy Metrics to OAP in envoy 1.19 example and bump up to Envoy V3 api.  All issues and pull requests are here\n","excerpt":"SkyWalking 8.7.0 is released. Go to downloads page to find release tars. Changes by Version\nProject …","ref":"/events/release-apache-skywalking-apm-8-7-0/","title":"Release Apache SkyWalking APM 8.7.0"},{"body":"SkyWalking Client JS 0.6.0 is released. Go to downloads page to find release tars.\n Separate production and development environments when building. Upgrade packages to fix vulnerabilities. Fix headers could be null . Fix catching errors for http requests. Fix the firstReportedError is calculated with more types of errors.  ","excerpt":"SkyWalking Client JS 0.6.0 is released. Go to downloads page to find release tars.\n Separate …","ref":"/events/release-apache-skywalking-client-js-0-6-0/","title":"Release Apache SkyWalking Client JS 0.6.0"},{"body":"SkyWalking is an open source APM (application performance monitor) system, especially designed for microservices, cloud native, and container-based architectures.\nFrom 2020, it has dominated the open source APM market in China, and expanded aggressively in North American, Europe and Asia\u0026rsquo;s other countries.\nWith over 6 years (2015-2021) of development, driven by the global open source community, SkyWalking now provides full stack observability covering metrics, tracing and logging, plus event detector, which are built based on various native and ecosystem solutions.\n Language agent-based(Java, Dot Net, Golang, PHP, NodeJS, Python, C++, LUA) in-process monitoring, is as powerful as commercial APM vendors' agents. Mostly auto-instrumentation, and good interactivity. Service Mesh Observability, working closely with Envoy and Istio teams. Transparent integration of popular metrics ecosystem. Accept metrics from Prometheus SDK, OpenTelemetry collectors, Zabbix agents, etc. Log collection with analysis capability from FluentD, Fluent-bit, Filebeat, etc. agents. Infrastructure monitoring, such as Linux and k8s, is out of the box.  The SkyWalking ecosystem was started by very few people. The community drives the project to cover real scenarios, from tracing to the whole APM field. Even today, more professional open source developers, powered by the vendors behind them, are bringing the project to a different level.\nTypically and most attractively, SkyWalking is going to build the first known open source APM specific database in the world, at least providing\n Time series-based database engine. Support traces/logs and metrics in the database core level. High performance with cluster mode and HPA. Reasonable resource cost.  We nearly doubled the number of contributors in the last year, from ~300 to over 500. The whole community is very energetic. Here, we want to thank our 47 committers(28 PMC members included), listed here, and over 400 other contributors.\nWe together built this humongous Apache Top Level project, and proved the stronge competitiveness of an open-source project.\nThis is a hard-won and impressive achievement. We won\u0026rsquo;t stop here. The trend is there, the ground is solid. We are going to build the top-level APM system relying on our open-source community.\n500 Contributors List    GitHub         1095071913 182148432** 295198088** 394102339** 437376068**   50168383 55846420** 826245622** 844067874 Ahoo-Wang   AirTrioa AlexanderWert AlseinX AngryMills Ax1an   BFergerson BZFYS CalvinKirs CharlesMaster ChaunceyLin5152   CommissarXia Cvimer DeadLion Doublemine Du-fei   ElderJames EvanLjp FatihErdem FeynmanZhou Fine0830   FingerLiu FrankyXu Gallardot GerryYuan HackerRookie   HarryFQ Heguoya Hen1ng HendSame Humbertzhang   IanCao IluckySi Indifer J-Cod3r JaredTan95   Jargon96 Jijun JoeKerouac JohnNiang Johor03   Jozdortraz Jtrust Just-maple KangZhiDong LazyLei   LiWenGu Lin1997 Linda-pan LiteSun Liu-XinYuan   MiracleDx Miss-you MoGuGuai-hzr MrYzys O-ll-O   Patrick0308 QHWG67 Qiliang QuanjieDeng RandyAbernethy   RedzRedz Runrioter SataQiu ScienJus SevenBlue2018   ShaoHans Shikugawa SoberChina SummerOfServenteen Switch-vov   TJ666 Technoboy- TerrellChen TeslaCN TheRealHaui   TinyAllen TomMD ViberW Videl WALL-E   WeihanLi WildWolfBang WillemJiang Wooo0 XhangUeiJong   Xlinlin YczYanchengzhe Yebemeto YoungHu YunaiV   YunfengGao Z-Beatles ZS-Oliver ZhHong ZhuoSiChen   a198720 a1vin-tian a526672351 acurtain adamni135   adermxzs adriancole** aeolusheath agile6v aix3   aiyanbo ajanthan alexkarezin alonelaval amogege   amwyyyy andyliyuze andyzzl aoxls arugal   ascrutae ascrutae** augustowebd aviaviavi bai-yang   beckhampu beckjin beiwangnull bigflybrother bootsrc   bostin brucewu-fly buxingzhe buzuotaxuan bwh12398**   c feng c1ay candyleer carllhw carlvine500   carrypann cheenursn cheetah012 chenbeitang chenglei**   chengshiwen chenmudu chenpengfei chenvista chess-equality   chestarss chidaodezhongsheng chopin-d clevertension clk1st   cngdkxw cnlangzi codeglzhang codelipenghui coder-yqj   coki230 compilerduck constanine coolbeevip crystaldust   cui-liqiang cuiweiwei cutePanda123 cyberdak cyejing   cyhii dafu-wu dagmom dalekliuhan** darcydai   dengliming devkanro devon-ye dickens7 dimaaan   dingdongnigetou dio divyakumarjain dmsolr dominicqi   donbing007 dsc6636926 dvsv2 dzx2018 echooymxq   efekaptan elk-g emschu eoeac evanljp**   evanxuhe feelwing1314 fgksgf fredster33 fuhuo   fulmicoton fushiqinghuan111 geektcp geomonlin ggndnn   gitter-badger givingwu glongzh gnr163 gonedays   grissom-grissom grissomsh guodongq guyukou gxthrj   gy09535 gzshilu hailin0 hanahmily haotian2015   haoyann hardzhang harvies heihaozi hepyu   heyanlong hi-sb honganan horber hsoftxl   huangyoje huliangdream huohuanhuan iluckysi innerpeacez   itsvse jasper-zsh jbampton jialong121 jinlongwang   jjlu521016 jjtyro jmjoy jsbxyyx justeene   juzhiyuan jy00464346 kaanid kagaya85 karott   kayleyang kevinyyyy kezhenxu94 kikupotter kilingzhang   killGC kkl129 klboke ksewen kuaikuai   kun-song kylixs landonzeng langke93 langyan1022   langyizhao lazycathome leemove leizhiyuan libinglong   lijial lilien1010 limfriend linkinshi linliaoy   liqiangz liu-junchi liufei** liuhaoXD liuhaoyang   liuweiyi** liuyanggithup liuzhengyang liweiv lixin40**   lizl9** lkxiaolou llissery louis-zhou lpcy   lpf32 lsyf lucperkins lujiajing1126 lunamagic1978   lunchboxav lxin96** lxliuxuankb lytscu lyzhang1999   mage3k makefriend8 makingtime mantuliu maolie   margauxcabrera masterxxo maxiaoguang64 me** membphis   mestarshine mgsheng michaelsembwever mikkeschiren ming_flycash**   minquan.chen** misaya momo0313 moonming mrproliu   mrproliu** muyun12 nacx neatlife neeuq   nic-chen nickwongwong nikitap492 nileblack nisiyong   novayoung oatiz oflebbe olzhy onecloud360   osiriswd panniyuyu peng-yongsheng pengweiqhca potiuk   probeyang purgeyao qijianbo010 qinhang3 qiuyu-d   qjgszzx qq362220083 qqeasonchen qxo ralphgj   raybi-asus refactor2 remicollet rlenferink rootsongjc   rovast ruibaby s00373198 scolia sdanzo   seifeHu sergicastro shiluo34 sikelangya simonlei   sk163 snakorse songzhendong songzhian songzhian**   sonxy spacewander stalary stenio2011 stevehu   stone-wlg sungitly surechen swartz-k sxzaihua   tangxqa tanjunchen tankilo tanzhen** taskmgr   tbdpmi terranhu terrymanu tevahp thanq   thebouv tianyk tianyuak tincopper tinyu0   tom-pytel tristaZero tristan-tsl trustin tsuilouis   tuohai666 tzsword-2020 tzy1316106836 vcjmhg viktoryi   vision-ken viswaramamoorthy wallezhang wang-yeliang wang_weihan**   wangrzneu wankai123 wbpcode web-xiaxia webb2019   weiqiang-w weiqiang333 wendal wengangJi wenjianzhang   whfjam whl12345 willseeyou wilsonwu wind2008hxy   wingwong-knh withlin wl4g wqr2016 wu-sheng   wuguangkuo wujun8 wuwen5 wuxingye x22x22   xbkaishui xcaspar xdRight xiaoweiyu** xiaoxiangmoe   xiaoy00 xinfeingxia85 xingren23 xinzhuxiansheng xonze   xuanyu66 xuchangjunjx xudianyang yanbw yanfch   yang-xiaodong yangxb2010000 yanickxia yanmaipian yanmingbi   yantaowu yaojingguo yaowenqiang yazong ychandu   ycoe yimeng yu199195 yuqichou yushuqiang**   yuyujulin yxudong yymoth zaunist zaygrzx   zcai2 zeaposs zhang98722 zhanghao001 zhangjianweibj   zhangkewei zhangsean zhangxin** zhaoyuguang zhe1926   zhentaoJin zhongjianno1** zhousiliang163 zhuCheer zhyyu   zifeihan zijin-m zkscpqm zoidbergwill zoumingzm   zouyx zpf1989 zshit zxbu zygfengyuwuzu    ","excerpt":"SkyWalking is an open source APM (application performance monitor) system, especially designed for …","ref":"/blog/2021-07-12-500-contributors-mark/","title":"[Community win] SkyWalking achieved 500 contributors milestone."},{"body":"时间:2021 年 6 月 26 日\n地点:北京市海淀区西格玛大厦 B1 多功能厅\n视频回放:见 Bilibili\nApache SkyWalking Landscape  吴晟 Sheng Wu. Tetrate Founding Engineer, Apache Software Foundation board director. SkyWalking founder.  SkyWalking 2020-2021 年发展和后续计划\n微服务可观测性分析平台的探索与实践  凌若川 腾讯高级工程师  可观测性分析平台作为云原生时代微服务系统基础组件,开放性与性能是决定平台价值的核心要素。 复杂微服务应用场景与海量多维链路数据,对可观测性分析平台在开放性设计和各环节高性能实现带来诸多挑战。 本次分享中将重点梳理腾讯云微服务团队在构建云原生可观测性分析平台过程中遇到的挑战,介绍我们在架构设计与实现方面的探索与实践。\n 云原生时代微服务可观测性平台面临的性能与可用性挑战 腾讯云在构建高性能微服务可观测性分析平台的探索与实践 微服务可观测性分析平台架构的下一阶段演进方向展望  BanyanDB 数据模型背后的逻辑  高洪涛 Hongtao Gao. Tetrate SRE, SkyWalking PMC, Apache ShardingSphere PMC.  BanyanDB 作为为处理 Apache SkyWalking 产生的 trace,log 和 metric 的数据而特别设计的数据库,其背后数据模型的抉择是非常与众不同的。 在本次分享中,我将根据 RUM 猜想来讨论为什么 BanyanDB 使用的数据模型对于 APM 数据而言是更加高效和可靠的。\n通过本次分享,观众可以:\n 理解数据库设计的取舍 了解 BanyanDB 的数据模型 认识到该模型对于 APM 类数据有特定的优势  Apache SkyWalking 如何做前端监控  范秋霞 Qiuxia Fan,Tetrate FE SRE,SkyWalking PMC.  Apache SkyWalking 对前端进行了监控与跟踪,分别有 Metric, Log, Trace 三部分。本次分享我会介绍页面性能指标的收集与计算,同时用案列进行分析,也会讲解 Log 的采集方法以及 Source Map 错误定位的实施。最后介绍浏览器端 Requets 的跟踪方法。\n通过本次分享,观众可以:\n 了解页面的性能指标以及收集计算方法 了解前端如何做错误日志收集 如何对页面请求进行跟踪以及跟踪的好处  一名普通工程师,该如何正确的理解开源精神?  王晔倞 Yeliang Wang. API7 Partner / Product VP.  开源精神,那也许是一种给于和获取的平衡,有给于才能有获取,有获取才会有给于的动力。无需指责别人只会获取,我们应该懂得开源是一种创造方式,一个没有创造欲和创造力的人加入开源也是无用的。\n通过本次分享,观众可以:\n 为什么国内一些程序员会对开源产生误解? 了解 “开源≠自由≠非商业” 的来龙去脉。 一名普通工程师,如何高效地向开源社区做贡献?  可观测性技术生态和 OpenTelemetry 原理及实践  陈一枭 腾讯. OpenTelemetry docs-cn maintainer、Tencent OpenTelemetry OTeam 创始人  综述云原生可观测性技术生态,介绍 OpenTracing,OpenMetrics,OpenTelemetry 等标准演进。介绍 OpenTelemetry 存在价值意义,介绍 OpenTelemetry 原理及其整体生态规划。介绍腾讯在 OpenTelemetry 方面的实践。\n本次分享内容如下:\n 云原生可观测性技术简介 OpenTelemetry 及其它规范简介 OpenTelemetry 原理 OpenTelemetry 在腾讯的应用及实践  Apache SkyWalking 事件采集系统更快定位故障  柯振旭 Zhenxu Ke,Tetrate SRE, Apache SkyWalking PMC. Apache Incubator PMC. Apache Dubbo committer.  通过本次分享,听众可以:\n 了解 SkyWalking 的事件采集系统; 了解上报事件至 SkyWalking 的多种方式; 学习如何利用 SkyWalking 采集的事件结合 metrics,分析目标系统的性能问题;  可观测性自动注入技术原理探索与实践  詹启新 Tencnet OpenTelemetry Oteam PMC  在可观测领域中自动注入已经成为重要的组成部分之一,其优异简便的使用方式并且可同时覆盖到链路、指标、日志,大大降低了接入成本及运维成本,属于友好的一种接入方式; 本次分享将介绍 Java 中的字节码注入技术原理,及在可观测领域的应用实践\n 常用的自动注入技术原理简介 介绍可观测性在 Java 落地的要点 opentelemetry-java-instrumentation 的核心原理及实现 opentelemetry 自动注入的应用实践  如何利用 Apache APISIX 提升 Nginx 的可观测性  金卫 Wei Jin, API7 Engineer Apache SkyWalking committer. Apache apisix-ingress-controller Founder. Apache APISIX PMC.  在云原生时代,动态和可观测性是 API 网关的标准特性。Apache APISIX 不仅覆盖了 Nginx 的传统功能,在可观测性上也和 SkyWalking 深度合作,大大提升了服务治理能力。本次分享会介绍如何无痛的提升 Nginx 的可观测性和 APISIX 在未来可观测性方面的规划。\n通过本次分享,观众可以:\n 通过 Apache APISIX 实现观测性的几种手段. 了解 Apache APISIX 高效且易用的秘诀. 结合 Apache skywalking 进一步提升可观测性.  ","excerpt":"时间:2021 年 6 月 26 日\n地点:北京市海淀区西格玛大厦 B1 多功能厅\n视频回放:见 Bilibili\nApache SkyWalking Landscape  吴晟 Sheng Wu. …","ref":"/zh/skywalking-day-2021/","title":"[视频] SkyWalking Day 2021 演讲视频"},{"body":"SkyWalking CLI 0.7.0 is released. Go to downloads page to find release tars.\n  Features\n Add GitHub Action for integration of event reporter    Bug Fixes\n Fix metrics top can\u0026rsquo;t infer the scope automatically    Chores\n Upgrade dependency crypto Refactor project to use goapi Move parseScope to pkg Update release doc    ","excerpt":"SkyWalking CLI 0.7.0 is released. Go to downloads page to find release tars.\n  Features\n Add GitHub …","ref":"/events/release-apache-skywalking-cli-0-7-0/","title":"Release Apache SkyWalking CLI 0.7.0"},{"body":"SkyWalking 8.6.0 is released. Go to downloads page to find release tars. Changes by Version\nProject  Add OpenSearch as storage option. Upgrade Kubernetes Java client dependency to 11.0. Fix plugin test script error in macOS.  Java Agent  Add trace_segment_ref_limit_per_span configuration mechanism to avoid OOM. Improve GlobalIdGenerator performance. Add an agent plugin to support elasticsearch7. Add jsonrpc4j agent plugin. new options to support multi skywalking cluster use same kafka cluster(plugin.kafka.namespace) resolve agent has no retries if connect kafka cluster failed when bootstrap Add Seata in the component definition. Seata plugin hosts on Seata project. Extended Kafka plugin to properly trace consumers that have topic partitions directly assigned. Support Kafka consumer 2.8.0. Support print SkyWalking context to logs. Add MessageListener enhancement in pulsar plugin. fix a bug that spring-mvc set an error endpoint name if the controller class annotation implements an interface. Add an optional agent plugin to support mybatis. Add spring-cloud-gateway-3.x optional plugin. Add okhttp-4.x plugin. Fix NPE when thrift field is nested in plugin thrift Fix possible NullPointerException in agent\u0026rsquo;s ES plugin. Fix the conversion problem of float type in ConfigInitializer. Fixed part of the dynamic configuration of ConfigurationDiscoveryService that does not take effect under certain circumstances. Introduce method interceptor API v2 Fix ClassCast issue for RequestHolder/ResponseHolder. fixed jdk-threading-plugin memory leak. Optimize multiple field reflection operation in Feign plugin. Fix trace-ignore-plugin TraceIgnorePathPatterns can\u0026rsquo;t set empty value  OAP-Backend  BugFix: filter invalid Envoy access logs whose socket address is empty. Fix K8s monitoring the incorrect metrics calculate. Loop alarm into event system. Support alarm tags. Support WeLink as a channel of alarm notification. Fix: Some defensive codes didn\u0026rsquo;t work in PercentileFunction combine. CVE: fix Jetty vulnerability. https://nvd.nist.gov/vuln/detail/CVE-2019-17638 Fix: MAL function would miss samples name after creating new samples. perf: use iterator.remove() to remove modulesWithoutProvider Support analyzing Envoy TCP access logs and persist error TCP logs. Fix: Envoy error logs are not persisted when no metrics are generated Fix: Memory leakage of low version etcd client. fix-issue Allow multiple definitions as fallback in metadata-service-mapping.yaml file and k8sServiceNameRule. Fix: NPE when configmap has no data. Fix: Dynamic Configuration key slowTraceSegmentThreshold not work Fix: != is not supported in oal when parameters are numbers. Include events of the entity(s) in the alarm. Support native-json format log in kafka-fetcher-plugin. Fix counter misuse in the alarm core. Alarm can\u0026rsquo;t be triggered in time. Events can be configured as alarm source. Make the number of core worker in meter converter thread pool configurable. Add HTTP implementation of logs reporting protocol. Make metrics exporter still work even when storage layer failed. Fix Jetty HTTP TRACE issue, disable HTTP methods except POST. CVE: upgrade snakeyaml to prevent billion laughs attack in dynamic configuration. polish debug logging avoids null value when the segment ignored.  UI  Add logo for kong plugin. Add apisix logo. Refactor js to ts for browser logs and style change. When creating service groups in the topology, it is better if the service names are sorted. Add tooltip for dashboard component. Fix style of endpoint dependency. Support search and visualize alarms with tags. Fix configurations on dashboard. Support to configure the maximum number of displayed items. After changing the durationTime, the topology shows the originally selected group or service. remove the no use maxItemNum for labeled-value metric, etc. Add Azure Functions logo. Support search Endpoint use keyword params in trace view. Add a function which show the statistics infomation during the trace query. Remove the sort button at the column of Type in the trace statistics page. Optimize the APISIX icon in the topology. Implement metrics templates in the topology. Visualize Events on the alarm page. Update duration steps in graphs for Trace and Log.  Documentation  Polish k8s monitoring otel-collector configuration example. Print SkyWalking context to logs configuration example. Update doc about metrics v2 APIs.  All issues and pull requests are here\n","excerpt":"SkyWalking 8.6.0 is released. Go to downloads page to find release tars. Changes by Version\nProject …","ref":"/events/release-apache-skywalking-apm-8-6-0/","title":"Release Apache SkyWalking APM 8.6.0"},{"body":"Abstract Apache SkyWalking hosts SkyWalkingDay Conference 2021 in June 26th, jointly with Tencent and Tetrate.\nWe are going to share SkyWalking\u0026rsquo;s roadmap, features, product experiences and open source culture.\nWelcome to join us.\nVenue Addr./地址 北京市海淀区西格玛大厦B1多功能厅\nDate June 26th.\nRegistration For Free Register for onsite or online\nSessions 10:00 - 10:20 Apache SkyWalking Landscape  吴晟 Sheng Wu. Tetrate Founding Engineer, Apache Software Foundation board director. SkyWalking founder.  SkyWalking 2020-2021年发展和后续计划\n10:20 - 10:50 微服务可观测性分析平台的探索与实践  凌若川 腾讯高级工程师  可观测性分析平台作为云原生时代微服务系统基础组件,开放性与性能是决定平台价值的核心要素。 复杂微服务应用场景与海量多维链路数据,对可观测性分析平台在开放性设计和各环节高性能实现带来诸多挑战。 本次分享中将重点梳理腾讯云微服务团队在构建云原生可观测性分析平台过程中遇到的挑战,介绍我们在架构设计与实现方面的探索与实践。\n 云原生时代微服务可观测性平台面临的性能与可用性挑战 腾讯云在构建高性能微服务可观测性分析平台的探索与实践 微服务可观测性分析平台架构的下一阶段演进方向展望  10:50 - 11:20 BanyanDB数据模型背后的逻辑  高洪涛 Hongtao Gao. Tetrate SRE, SkyWalking PMC, Apache ShardingSphere PMC.  BanyanDB作为为处理Apache SkyWalking产生的trace,log和metric的数据而特别设计的数据库,其背后数据模型的抉择是非常与众不同的。 在本次分享中,我将根据RUM猜想来讨论为什么BanyanDB使用的数据模型对于APM数据而言是更加高效和可靠的。\n通过本次分享,观众可以:\n 理解数据库设计的取舍 了解BanyanDB的数据模型 认识到该模型对于APM类数据有特定的优势  11:20 - 11:50 Apache SkyWalking 如何做前端监控  范秋霞 Qiuxia Fan,Tetrate FE SRE,SkyWalking PMC.  Apache SkyWalking对前端进行了监控与跟踪,分别有Metric, Log, Trace三部分。本次分享我会介绍页面性能指标的收集与计算,同时用案列进行分析,也会讲解Log的采集方法以及Source Map错误定位的实施。最后介绍浏览器端Requets的跟踪方法。\n通过本次分享,观众可以:\n 了解页面的性能指标以及收集计算方法 了解前端如何做错误日志收集 如何对页面请求进行跟踪以及跟踪的好处  午休 13:30 - 14:00 一名普通工程师,该如何正确的理解开源精神?  王晔倞 Yeliang Wang. API7 Partner / Product VP.  开源精神,那也许是一种给于和获取的平衡,有给于才能有获取,有获取才会有给于的动力。无需指责别人只会获取,我们应该懂得开源是一种创造方式,一个没有创造欲和创造力的人加入开源也是无用的。\n通过本次分享,观众可以:\n 为什么国内一些程序员会对开源产生误解? 了解 “开源≠自由≠非商业” 的来龙去脉。 一名普通工程师,如何高效地向开源社区做贡献?  14:00 - 14:30 可观测性技术生态和OpenTelemetry原理及实践  陈一枭 腾讯. OpenTelemetry docs-cn maintainer、Tencent OpenTelemetry OTeam创始人  综述云原生可观测性技术生态,介绍OpenTracing,OpenMetrics,OpenTelemetry等标准演进。介绍OpenTelemetry存在价值意义,介绍OpenTelemetry原理及其整体生态规划。介绍腾讯在OpenTelemetry方面的实践。\n本次分享内容如下:\n 云原生可观测性技术简介 OpenTelemetry及其它规范简介 OpenTelemetry原理 OpenTelemetry在腾讯的应用及实践  14:30 - 15:10 利用 Apache SkyWalking 事件采集系统更快定位故障  柯振旭 Zhenxu Ke,Tetrate SRE, Apache SkyWalking PMC. Apache Incubator PMC. Apache Dubbo committer.  通过本次分享,听众可以:\n 了解 SkyWalking 的事件采集系统; 了解上报事件至 SkyWalking 的多种方式; 学习如何利用 SkyWalking 采集的事件结合 metrics,分析目标系统的性能问题;  15:10 - 15:30 茶歇 15:30 - 16:00 可观测性自动注入技术原理探索与实践  詹启新 Tencnet OpenTelemetry Oteam PMC  在可观测领域中自动注入已经成为重要的组成部分之一,其优异简便的使用方式并且可同时覆盖到链路、指标、日志,大大降低了接入成本及运维成本,属于友好的一种接入方式; 本次分享将介绍Java中的字节码注入技术原理,及在可观测领域的应用实践\n 常用的自动注入技术原理简介 介绍可观测性在Java落地的要点 opentelemetry-java-instrumentation的核心原理及实现 opentelemetry自动注入的应用实践  16:00 - 16:30 如何利用 Apache APISIX 提升 Nginx 的可观测性  金卫 Wei Jin, API7 Engineer Apache SkyWalking committer. Apache apisix-ingress-controller Founder. Apache APISIX PMC.  在云原生时代,动态和可观测性是 API 网关的标准特性。Apache APISIX 不仅覆盖了 Nginx 的传统功能,在可观测性上也和 SkyWalking 深度合作,大大提升了服务治理能力。本次分享会介绍如何无痛的提升 Nginx 的可观测性和 APISIX 在未来可观测性方面的规划。\n通过本次分享,观众可以:\n 通过 Apache APISIX 实现观测性的几种手段. 了解 Apache APISIX 高效且易用的秘诀. 结合 Apache skywalking 进一步提升可观测性.  16:35 抽奖,结束 Sponsors  Tencent Tetrate SegmentFault 思否  Anti-harassment policy SkyWalkingDay is dedicated to providing a harassment-free experience for everyone. We do not tolerate harassment of participants in any form. Sexual language and imagery will also not be tolerated in any event venue. Participants violating these rules may be sanctioned or expelled without a refund, at the discretion of the event organizers. Our anti-harassment policy can be found at Apache website.\nContact Us Send mail to dev@skywalking.apache.org.\n","excerpt":"Abstract Apache SkyWalking hosts SkyWalkingDay Conference 2021 in June 26th, jointly with Tencent …","ref":"/events/skywalkingday-2021/","title":"SkyWalkingDay Conference 2021, relocating at Beijing"},{"body":"SkyWalking NodeJS 0.3.0 is released. Go to downloads page to find release tars.\n Add ioredis plugin. (#53) Endpoint cold start detection and marking. (#52) Add mysql2 plugin. (#54) Add AzureHttpTriggerPlugin. (#51) Add Node 15 into test matrix. (#45) Segment reference and reporting overhaul. (#50) Add http ignore by method. (#49) Add secure connection option. (#48) BugFix: wrong context during many async spans. (#46) Add Node Mongoose Plugin. (#44)  ","excerpt":"SkyWalking NodeJS 0.3.0 is released. Go to downloads page to find release tars.\n Add ioredis plugin. …","ref":"/events/release-apache-skywalking-nodejs-0-3-0/","title":"Release Apache SkyWalking for NodeJS 0.3.0"},{"body":"SkyWalking Client JS 0.5.1 is released. Go to downloads page to find release tars.\n Add noTraceOrigins option. Fix wrong URL when using relative path. Catch frames errors. Get response.body as a stream with the fetch API. Support reporting multiple logs. Support typescript project.  ","excerpt":"SkyWalking Client JS 0.5.1 is released. Go to downloads page to find release tars.\n Add …","ref":"/events/release-apache-skywalking-client-js-0-5-1/","title":"Release Apache SkyWalking Client JS 0.5.1"},{"body":"SkyWalking Kong Agent 0.1.1 is released. Go to downloads page to find release tars.\n Establish the SkyWalking Kong Agent.  ","excerpt":"SkyWalking Kong Agent 0.1.1 is released. Go to downloads page to find release tars.\n Establish the …","ref":"/events/release-apache-skywalking-kong-0-1-1/","title":"Release Apache SkyWalking Kong 0.1.1"},{"body":"B站视频地址\n","excerpt":"B站视频地址","ref":"/zh/2021-05-09-summer-2021-asf20/","title":"[视频] 大咖说开源 第二季 第4期 | Apache软件基金会20年"},{"body":"We posted our Response to Elastic 2021 License Change blog 4 months ago. It doesn\u0026rsquo;t have a big impact in the short term, but because of the incompatibility between SSPL and Apache 2.0, we lost the chance of upgrading the storage server, which concerns the community and our users. So, we have to keep looking for a new option as a replacement.\nThere was an open source project, Open Distro for Elasticsearch, maintained by the AWS team. It is an Apache 2.0-licensed distribution of Elasticsearch enhanced with enterprise security, alerting, SQL, and more. After Elastic relicensed its projects, we talked with their team, and they have an agenda to take over the community leadship and keep maintaining Elasticsearch, as it was licensed by Apache 2.0. So, they are good to fork and continue.\nOn April 12th, 2021, AWS announced the new project, OpenSearch, driven by the community, which is initialized from people of AWS, Red Hat, SAP, Capital One, and Logz.io. Read this Introducing OpenSearch blog for more detail.\nOnce we had this news in public, we begin to plan the process of evaluating and testing OpenSearch as SkyWalking\u0026rsquo;s storage option. Read our issue.\nToday, we are glad to ANNOUNCE, OpenSearch could replace ElastcSearch as the storage, and it is still licensed under Apache 2.0.\nThis has been merged in the main stream, and you can find it in the dev doc already.\nOpenSearch OpenSearch storage shares the same configurations as Elasticsearch 7. In order to activate Elasticsearch 7 as storage, set storage provider to elasticsearch7. Please download the apache-skywalking-bin-es7.tar.gz if you want to use OpenSearch as storage.\nSkyWalking community will keep our eyes on the OpenSearch project, and look forward to their first GA release.\n NOTE: we have to add a warning NOTICE to the Elasticsearch storage doc:\nNOTICE: Elastic announced through their blog that Elasticsearch will be moving over to a Server Side Public License (SSPL), which is incompatible with Apache License 2.0. This license change is effective from Elasticsearch version 7.11. So please choose the suitable Elasticsearch version according to your usage.\n","excerpt":"We posted our Response to Elastic 2021 License Change blog 4 months ago. It doesn\u0026rsquo;t have a big …","ref":"/blog/2021-05-09-opensearch-supported/","title":"OpenSearch, a new storage option to avoid ElasticSearch's SSPL"},{"body":"Hailin Wang(GitHub ID, hailin0) began his SkyWalking journey since Aug 23rd, 2020.\nHe is very active on the code contributions and brought several important features into the SkyWalking ecosystem.\nHe is on the 33rd of the contributor in the main repository[1], focuses on plugin contributions, and logs ecosystem integration, see his code contributions[2]. And also, he started a new and better way to make other open-source projects integrating with SkyWalking.\nHe used over 2 months to make the SkyWalking agent and its plugins as a part of Apache DolphinScheduler\u0026rsquo;s default binary distribution[3], see this PR[4]. This kind of example has affected further community development. Our PMC member, Yuguang Zhao, is using this way to ship our agent and plugins into the Seata project[5]. With SkyWalking\u0026rsquo;s growing, I would not doubt that this kind of integration would be more.\nThe SkyWalking accepts him as a new committer.\nWelcome Hailin Wang join the committer team.\n[1] https://github.com/apache/skywalking/graphs/contributors [2] https://github.com/apache/skywalking/commits?author=hailin0 [3] https://github.com/apache/dolphinscheduler/tree/1.3.6-prepare/ext/skywalking [4] https://github.com/apache/incubator-dolphinscheduler/pull/4852 [5] https://github.com/seata/seata/pull/3652\n","excerpt":"Hailin Wang(GitHub ID, hailin0) began his SkyWalking journey since Aug 23rd, 2020.\nHe is very active …","ref":"/events/welcome-hailin-wang-as-new-committer/","title":"Welcome Hailin Wang as new committer"},{"body":"SkyWalking LUA Nginx 0.5.0 is released. Go to downloads page to find release tars.\n Adapt to Kong agent. Correct the version format luarock.  ","excerpt":"SkyWalking LUA Nginx 0.5.0 is released. Go to downloads page to find release tars.\n Adapt to Kong …","ref":"/events/release-apache-skywalking-lua-nginx-0.5.0/","title":"Release Apache SkyWalking LUA Nginx 0.5.0"},{"body":"SkyWalking 8.5.0 is released. Go to downloads page to find release tars. Changes by Version\nProject  Incompatible Change. Indices and templates of ElasticSearch(6/7, including zipkin-elasticsearch7) storage option have been changed. Update frontend-maven-plugin to 1.11.0, for Download node x64 binary on Apple Silicon. Add E2E test for VM monitoring that metrics from Prometheus node-exporter. Upgrade lombok to 1.18.16. Add Java agent Dockerfile to build Docker image for Java agent.  Java Agent  Remove invalid mysql configuration in agent.config. Add net.bytebuddy.agent.builder.AgentBuilder.RedefinitionStrategy.Listener to show detail message when redefine errors occur. Fix ClassCastException of log4j gRPC reporter. Fix NPE when Kafka reporter activated. Enhance gRPC log appender to allow layout pattern. Fix apm-dubbo-2.7.x-plugin memory leak due to some Dubbo RpcExceptions. Fix lettuce-5.x-plugin get null host in redis sentinel mode. Fix ClassCastException by making CallbackAdapterInterceptor to implement EnhancedInstance interface in the spring-kafka plugin. Fix NullPointerException with KafkaProducer.send(record). Support config agent.span_limit_per_segment can be changed in the runtime. Collect and report agent starting / shutdown events. Support jedis pipeline in jedis-2.x-plugin. Fix apm-toolkit-log4j-2.x-activation no trace Id in async log. Replace hbase-1.x-plugin with hbase-1.x-2.x-plugin to adapt hbase client 2.x Remove the close_before_method and close_after_method parameters of custom-enhance-plugin to avoid memory leaks. Fix bug that springmvc-annotation-4.x-plugin, witness class does not exist in some versions. Add Redis command parameters to \u0026lsquo;db.statement\u0026rsquo; field on Lettuce span UI for displaying more info. Fix NullPointerException with ReactiveRequestHolder.getHeaders. Fix springmvc reactive api can\u0026rsquo;t collect HTTP statusCode. Fix bug that asynchttpclient plugin does not record the response status code. Fix spanLayer is null in optional plugin(gateway-2.0.x-plugin gateway-2.1.x-plugin). Support @Trace, @Tag and @Tags work for static methods.  OAP-Backend  Allow user-defined JAVA_OPTS in the startup script. Metrics combination API supports abandoning results. Add a new concept \u0026ldquo;Event\u0026rdquo; and its implementations to collect events. Add some defensive codes for NPE and bump up Kubernetes client version to expose exception stack trace. Update the timestamp field type for LogQuery. Support Zabbix protocol to receive agent metrics. Update the Apdex metric combine calculator. Enhance MeterSystem to allow creating metrics with same metricName / function / scope. Storage plugin supports postgresql. Fix kubernetes.client.openapi.ApiException. Remove filename suffix in the meter active file config. Introduce log analysis language (LAL). Fix alarm httpclient connection leak. Add sum function in meter system. Remove Jaeger receiver. Remove the experimental Zipkin span analyzer. Upgrade the Zipkin Elasticsearch storage from 6 to 7. Require Zipkin receiver must work with zipkin-elasticsearch7 storage option. Fix DatabaseSlowStatementBuilder statement maybe null. Remove fields of parent entity in the relation sources. Save Envoy http access logs when error occurs. Fix wrong service_instance_sla setting in the topology-instance.yml. Fix wrong metrics name setting in the self-observability.yml. Add telemetry data about metrics in, metrics scraping, mesh error and trace in metrics to zipkin receiver. Fix tags store of log and trace on h2/mysql/pg storage. Merge indices by Metrics Function and Meter Function in Elasticsearch Storage. Fix receiver don\u0026rsquo;t need to get itself when healthCheck Remove group concept from AvgHistogramFunction. Heatmap(function result) doesn\u0026rsquo;t support labels. Support metrics grouped by scope labelValue in MAL, no need global same labelValue as before. Add functions in MAL to filter metrics according to the metric value. Optimize the self monitoring grafana dashboard. Enhance the export service. Add function retagByK8sMeta and opt type K8sRetagType.Pod2Service in MAL for k8s to relate pods and services. Using \u0026ldquo;service.istio.io/canonical-name\u0026rdquo; to replace \u0026ldquo;app\u0026rdquo; label to resolve Envoy ALS service name. Support k8s monitoring. Make the flushing metrics operation concurrent. Fix ALS K8SServiceRegistry didn\u0026rsquo;t remove the correct entry. Using \u0026ldquo;service.istio.io/canonical-name\u0026rdquo; to replace \u0026ldquo;app\u0026rdquo; label to resolve Envoy ALS service name. Append the root slash(/) to getIndex and getTemplate requests in ES(6 and 7) client. Fix disable statement not working. This bug exists since 8.0.0. Remove the useless metric in vm.yaml.  UI  Update selector scroller to show in all pages. Implement searching logs with date. Add nodejs 14 compiling. Fix trace id by clear search conditions. Search endpoints with keywords. Fix pageSize on logs page. Update echarts version to 5.0.2. Fix instance dependency on the topology page. Fix resolved url for vue-property-decorator. Show instance attributes. Copywriting grammar fix. Fix log pages tags column not updated. Fix the problem that the footer and topology group is shaded when the topology radiation is displayed. When the topology radiation chart is displayed, the corresponding button should be highlighted. Refactor the route mapping, Dynamically import routing components, Improve first page loading performance. Support topology of two mutually calling services. Implement a type of table chart in the dashboard. Support event in the dashboard. Show instance name in the trace view. Fix groups of services in the topography.  Documentation  Polish documentation due to we have covered all tracing, logging, and metrics fields. Adjust documentation about Zipkin receiver. Add backend-infrastructure-monitoring doc.  All issues and pull requests are here\n","excerpt":"SkyWalking 8.5.0 is released. Go to downloads page to find release tars. Changes by Version\nProject …","ref":"/events/release-apache-skywalking-apm-8-5-0/","title":"Release Apache SkyWalking APM 8.5.0"},{"body":"SkyWalking Cloud on Kubernetes 0.3.0 is released. Go to downloads page to find release tars.\n Support special characters in the metric selector of HPA metric adapter. Add the namespace to HPA metric name.  ","excerpt":"SkyWalking Cloud on Kubernetes 0.3.0 is released. Go to downloads page to find release tars. …","ref":"/events/release-apache-skywalking-cloud-on-kubernetes-0-3-0/","title":"Release Apache SkyWalking Cloud on Kubernetes 0.3.0"},{"body":"SkyWalking NodeJS 0.2.0 is released. Go to downloads page to find release tars.\n Add AMQPLib plugin (RabbitMQ). (#34) Add MongoDB plugin. (#33) Add PgPlugin - PosgreSQL. (#31) Add MySQLPlugin to plugins. (#30) Add http protocol of host to http plugins. (#28) Add tag http.method to plugins. (#26) Bugfix: child spans created on immediate cb from op. (#41) Bugfix: async and preparing child entry/exit. (#36) Bugfix: tsc error of dist lib. (#24) Bugfix: AxiosPlugin async() / resync(). (#21) Bugfix: some requests of express / axios are not close correctly. (#20) Express plugin uses http wrap explicitly if http plugin disabled. (#42)  ","excerpt":"SkyWalking NodeJS 0.2.0 is released. Go to downloads page to find release tars.\n Add AMQPLib plugin …","ref":"/events/release-apache-skywalking-nodejs-0-2-0/","title":"Release Apache SkyWalking for NodeJS 0.2.0"},{"body":"SkyWalking Python 0.6.0 is released. Go to downloads page to find release tars.\n Fixes:  Segment data loss when gRPC timing out. (#116) sw_tornado plugin async handler status set correctly. (#115) sw_pymysql error when connection haven\u0026rsquo;t db. (#113)    ","excerpt":"SkyWalking Python 0.6.0 is released. Go to downloads page to find release tars.\n Fixes:  Segment …","ref":"/events/release-apache-skywalking-python-0-6-0/","title":"Release Apache SkyWalking Python 0.6.0"},{"body":"","excerpt":"","ref":"/tags/apm/","title":"APM"},{"body":" Origin: End-User Tracing in a SkyWalking-Observed Browser - The New Stack\n Apache SkyWalking: an APM (application performance monitor) system, especially designed for microservices, cloud native, and container-based (Docker, Kubernetes, Mesos) architectures.\nskywalking-client-js: a lightweight client-side JavaScript exception, performance, and tracing library. It provides metrics and error collection to the SkyWalking backend. It also makes the browser the starting point for distributed tracing.\nBackground Web application performance affects the retention rate of users. If a page load time is too long, the user will give up. So we need to monitor the web application to understand performance and ensure that servers are stable, available and healthy. SkyWalking is an APM tool and the skywalking-client-js extends its monitoring to include the browser, providing performance metrics and error collection to the SkyWalking backend.\nPerformance Metrics The skywalking-client-js uses [window.performance] (https://developer.mozilla.org/en-US/docs/Web/API/Window/performance) for performance data collection. From the MDN doc, the performance interface provides access to performance-related information for the current page. It\u0026rsquo;s part of the High Resolution Time API, but is enhanced by the Performance Timeline API, the Navigation Timing API, the User Timing API, and the Resource Timing API. In skywalking-client-js, all performance metrics are calculated according to the Navigation Timing API defined in the W3C specification. We can get a PerformanceTiming object describing our page using the window.performance.timing property. The PerformanceTiming interface contains properties that offer performance timing information for various events that occur during the loading and use of the current page.\nWe can better understand these attributes when we see them together in the figure below from W3C:\nThe following table contains performance metrics in skywalking-client-js.\n   Metrics Name Describe Calculating Formulae Note     redirectTime Page redirection time redirectEnd - redirectStart If the current document and the document that is redirected to are not from the same origin, set redirectStart, redirectEnd to 0   ttfbTime Time to First Byte responseStart - requestStart According to Google Development   dnsTime Time to DNS query domainLookupEnd - domainLookupStart    tcpTime Time to TCP link connectEnd - connectStart    transTime Time to content transfer responseEnd - responseStart    sslTime Time to SSL secure connection connectEnd - secureConnectionStart Only supports HTTPS   resTime Time to resource loading loadEventStart - domContentLoadedEventEnd Represents a synchronized load resource in pages   fmpTime Time to First Meaningful Paint - Listen for changes in page elements. Traverse each new element, and calculate the total score of these elements. If the element is visible, the score is 1 * weight; if the element is not visible, the score is 0   domAnalysisTime Time to DOM analysis domInteractive - responseEnd    fptTime First Paint Time responseEnd - fetchStart    domReadyTime Time to DOM ready domContentLoadedEventEnd - fetchStart    loadPageTime Page full load time loadEventStart - fetchStart    ttlTime Time to interact domInteractive - fetchStart    firstPackTime Time to first package responseStart - domainLookupStart     Skywalking-client-js collects those performance metrics and sends them to the OAP (Observability Analysis Platform) server , which aggregates data on the back-end side that is then shown in visualizations on the UI side. Users can optimize the page according to these data.\nException Metrics There are five kinds of errors that can be caught in skywalking-client-js:\n The resource loading error is captured by window.addeventlistener ('error ', callback, true) window.onerror catches JS execution errors window.addEventListener('unhandledrejection', callback) is used to catch the promise errors the Vue errors are captured by Vue.config.errorHandler the Ajax errors are captured by addEventListener('error', callback); addEventListener('abort', callback); addEventListener('timeout', callback);  in send callback.  The Skywalking-client-js traces error data to the OAP server, finally visualizing data on the UI side. For an error overview of the App, there are several metrics for basic statistics and trends of errors, including the following metrics.\n App Error Count, the total number of errors in the selected time period. App JS Error Rate, the proportion of PV with JS errors in a selected time period to total PV. All of Apps Error Count, Top N Apps error count ranking. All of Apps JS Error Rate, Top N Apps JS error rate ranking. Error Count of Versions in the Selected App, Top N Error Count of Versions in the Selected App ranking. Error Rate of Versions in the Selected App, Top N JS Error Rate of Versions in the Selected App ranking. Error Count of the Selected App, Top N Error Count of the Selected App ranking. Error Rate of the Selected App, Top N JS Error Rate of the Selected App ranking.  For pages, we use several metrics for basic statistics and trends of errors, including the following metrics:\n Top Unstable Pages / Error Rate, Top N Error Count pages of the Selected version ranking. Top Unstable Pages / Error Count, Top N Error Count pages of the Selected version ranking. Page Error Count Layout, data display of different errors in a period of time.  User Metrics SkyWalking browser monitoring also provides metrics about how the visitors use the monitored websites, such as PV(page views), UV(unique visitors), top N PV(page views), etc.\nIn SPAs (single page applications), the page will be refreshed only once. The traditional method only reports PV once after the page loading, but cannot count the PV of each sub-page, and can\u0026rsquo;t make other types of logs aggregate by sub-page.\nSkyWalking browser monitoring provides two processing methods for SPA pages:\n  Enable SPA automatic parsing. This method is suitable for most single page application scenarios with URL hash as the route. In the initialized configuration item, set enableSPA to true, which will turn on the page\u0026rsquo;s hashchange event listener (trigger re reporting PV), and use URL hash as the page field in other data reporting.\n  Manual reporting. This method can be used in all single page application scenarios. This method can be used if the first method is not usable. The following example provides a set page method to manually update the page name when data is reported. When this method is called, the page PV will be re reported by default:\n  app.on(\u0026#39;routeChange\u0026#39;, function (to) { ClientMonitor.setPerformance({ collector: \u0026#39;http://127.0.0.1:8080\u0026#39;, service: \u0026#39;browser-app\u0026#39;, serviceVersion: \u0026#39;1.0.0\u0026#39;, pagePath: to.path, autoTracePerf: true, enableSPA: true, }); }); Let\u0026rsquo;s take a look at the result found in the following image. It shows the most popular applications and versions, and the changes of PV over a period of time.\nMake the browser the starting point for distributed tracing SkyWalking browser monitoring intercepts HTTP requests to trace segments and spans. It supports tracking these following modes of HTTP requests: XMLHttpRequest and fetch. It also supports tracking libraries and tools based on XMLHttpRequest and fetch - such as Axios, SuperAgent, OpenApi, and so on.\nLet’s see how the SkyWalking browser monitoring intercepts HTTP requests:\nAfter this, use window.addEventListener('xhrReadyStateChange', callback) and set the readyState value tosw8 = xxxx in the request header. At the same time, reporting requests information to the back-end side. Finally, we can view trace data on the trace page. The following graphic is from the trace page:\nTo see how we listen for fetch requests, let’s see the source code of fetch\nAs you can see, it creates a promise and a new XMLHttpRequest object. Because the code of the fetch is built into the browser, it must monitor the code execution first. Therefore, when we add listening events, we can\u0026rsquo;t monitor the code in the fetch. Just after monitoring the code execution, let\u0026rsquo;s rewrite the fetch:\nimport { fetch } from \u0026#39;whatwg-fetch\u0026#39;; window.fetch = fetch; In this way, we can intercept the fetch request through the above method.\nAdditional Resources  End-User Tracing in a SkyWalking-Observed Browser.  ","excerpt":"Origin: End-User Tracing in a SkyWalking-Observed Browser - The New Stack\n Apache SkyWalking: an APM …","ref":"/blog/end-user-tracing-in-a-skywalking-observed-browser/","title":"End-User Tracing in a SkyWalking-Observed Browser"},{"body":"","excerpt":"","ref":"/tags/web-performance/","title":"Web-performance"},{"body":"","excerpt":"","ref":"/tags/design/","title":"Design"},{"body":"","excerpt":"","ref":"/tags/logs/","title":"Logs"},{"body":"SourceMarker is an open-source continuous feedback IDE plugin built on top of Apache SkyWalking, a popular open-source APM system with monitoring, tracing, and diagnosing capabilities for distributed software systems. SkyWalking, a truly holistic system, provides the means for automatically producing, storing, and querying software operation metrics. It requires little to no code changes to implement and is lightweight enough to be used in production. By itself, SkyWalking is a formidable force in the realm of continuous monitoring technology.\nSourceMarker, leveraging the continuous monitoring functionality provided by SkyWalking, creates continuous feedback technology by automatically linking software operation metrics to source code and displaying feedback directly inside of the IDE. While currently only supporting JetBrains-based IDEs and JVM-based programming languages, SourceMarker may be extended to support any number of programming languages and IDEs. Using SourceMarker, software developers can understand and validate software operation inside of their IDE. Instead of charts that indicate the health of the application, software developers can view the health of individual source code components and interpret software operation metrics from a much more familiar perspective. Such capabilities improve productivity as time spent continuously context switching from development to monitoring would be eliminated.\nLogging The benefits of continuous feedback technology are immediately apparent with the ability to view and search logs directly from source code. Instead of tailing log files or viewing logs through the browser, SourceMarker allows software developers to navigate production logs just as easily as they navigate source code. By using the source code as the primary perspective for navigating logs, SourceMarker allows software developers to view logs specific to any package, class, method, or line directly from the context of the source code which resulted in those logs.\nTracing Furthermore, continuous feedback technology offers software developers a deeper understanding of software by explicitly tying the implicit software operation to source code. Instead of visualizing software traces as Gantt charts, SourceMarker allows software developers to step through trace stacks while automatically resolving trace tags and logs. With SourceMarker, software developers can navigate production software traces in much the same way one debugs local applications.\nAlerting Most importantly, continuous feedback technology keeps software developers aware of production software operation. Armed with an APM-powered IDE, every software developer can keep track of the behavior of any method, class, package, and even the entire application itself. Moreover, this allows for source code to be the medium through which production bugs are made evident, thereby creating the feasibility of source code with the ability to self-diagnose and convey its own health.\n Download SourceMarker SourceMarker aims to bridge the theoretical and empirical practices of software development through continuous feedback. The goal is to make developing software with empirical data feel natural and intuitive, creating more complete software developers that understand the entire software development cycle.\n https://github.com/sourceplusplus/sourcemarker  This project is still early in its development, so if you think of any ways to improve SourceMarker, please let us know.\n","excerpt":"SourceMarker is an open-source continuous feedback IDE plugin built on top of Apache SkyWalking, a …","ref":"/blog/2021-03-16-continuous-feedback/","title":"SourceMarker: Continuous Feedback for Developers"},{"body":"SkyWalking LUA Nginx 0.4.1 is released. Go to downloads page to find release tars.\n fix: missing constants in the rockspsec.  ","excerpt":"SkyWalking LUA Nginx 0.4.1 is released. Go to downloads page to find release tars.\n fix: missing …","ref":"/events/release-apache-skywalking-lua-nginx-0.4.1/","title":"Release Apache SkyWalking LUA Nginx 0.4.1"},{"body":"SkyWalking LUA Nginx 0.4.0 is released. Go to downloads page to find release tars.\n Add a global field \u0026lsquo;includeHostInEntrySpan\u0026rsquo;, type \u0026lsquo;boolean\u0026rsquo;, mark the entrySpan include host/domain. Add destroyBackendTimer to stop reporting metrics. Doc: set random seed in init_worker phase. Local cache some variables and reuse them in Lua module. Enable local cache and use tablepool to reuse the temporary table.  ","excerpt":"SkyWalking LUA Nginx 0.4.0 is released. Go to downloads page to find release tars.\n Add a global …","ref":"/events/release-apache-skywalking-lua-nginx-0.4.0/","title":"Release Apache SkyWalking LUA Nginx 0.4.0"},{"body":"SkyWalking Client JS 0.4.0 is released. Go to downloads page to find release tars.\n Update stack and message in logs. Fix wrong URL when using relative path in xhr.  ","excerpt":"SkyWalking Client JS 0.4.0 is released. Go to downloads page to find release tars.\n Update stack and …","ref":"/events/release-apache-skywalking-client-js-0-4-0/","title":"Release Apache SkyWalking Client JS 0.4.0"},{"body":"SkyWalking Satellite 0.1.0 is released. Go to downloads page to find release tars.\nFeatures  Build the Satellite core structure. Add prometheus self telemetry. Add kafka client plugin. Add none-fallbacker plugin. Add timer-fallbacker plugin. Add nativelog-kafka-forwarder plugin. Add memory-queue plugin. Add mmap-queue plugin. Add grpc-nativelog-receiver plugin. Add http-nativelog-receiver plugin. Add grpc-server plugin. Add http-server plugin. Add prometheus-server plugin.  Bug Fixes Issues and PR  All issues are here All and pull requests are here  ","excerpt":"SkyWalking Satellite 0.1.0 is released. Go to downloads page to find release tars.\nFeatures  Build …","ref":"/events/release-apache-skwaylking-satellite-0-1-0/","title":"Release Apache SkyWalking Satellite 0.1.0"},{"body":"Juntao Zhang leads and finished the re-build process of the whole skywalking website. Immigrate to the whole automatic website update, super friendly to users. Within the re-building process, he took several months contributions to bring the document of our main repository to host on the SkyWalking website, which is also available for host documentations of other repositories. We were waiting for this for years.\nJust in the website repository, he has 3800 LOC contributions through 26 commits.\nWe are honored to have him on the PMC team.\n","excerpt":"Juntao Zhang leads and finished the re-build process of the whole skywalking website. Immigrate to …","ref":"/events/welcome-juntao-zhang-to-join-the-pmc/","title":"Welcome Juntao Zhang (张峻滔) to join the PMC"},{"body":" Origin: Observe VM Service Meshes with Apache SkyWalking and the Envoy Access Log Service - The New Stack\n Apache SkyWalking: an APM (application performance monitor) system, especially designed for microservices, cloud native, and container-based (Docker, Kubernetes, Mesos) architectures.\nEnvoy Access Log Service: Access Log Service (ALS) is an Envoy extension that emits detailed access logs of all requests going through Envoy.\nBackground In the previous post, we talked about the observability of service mesh under Kubernetes environment, and applied it to the bookinfo application in practice. We also mentioned that, in order to map the IP addresses into services, SkyWalking needs access to the service metadata from a Kubernetes cluster, which is not available for services deployed in virtual machines (VMs). In this post, we will introduce a new analyzer in SkyWalking that leverages Envoy’s metadata exchange mechanism to decouple with Kubernetes. The analyzer is designed to work in Kubernetes environments, VM environments, and hybrid environments. If there are virtual machines in your service mesh, you might want to try out this new analyzer for better observability, which we will demonstrate in this tutorial.\nHow it works The mechanism of how the analyzer works is the same as what we discussed in the previous post. What makes VMs different from Kubernetes is that, for VM services, there are no places where we can fetch the metadata to map the IP addresses into services.\nThe basic idea we present in this article is to carry the metadata along with Envoy’s access logs, which is called metadata-exchange mechanism in Envoy. When Istio pilot-agent starts an Envoy proxy as a sidecar of a service, it collects the metadata of that service from the Kubernetes platform, or a file on the VM where that service is deployed, and injects the metadata into the bootstrap configuration of Envoy. Envoy will carry the metadata transparently when emitting access logs to the SkyWalking receiver.\nBut how does Envoy compose a piece of a complete access log that involves the client side and server side? When a request goes out from Envoy, a plugin of istio-proxy named \u0026ldquo;metadata-exchange\u0026rdquo; injects the metadata into the http headers (with a prefix like x-envoy-downstream-), and the metadata is propagated to the server side. The Envoy sidecar of the server side receives the request and parses the headers into metadata, and puts the metadata into the access log, keyed by wasm.downstream_peer. The server side Envoy also puts its own metadata into the access log keyed by wasm.upstream_peer. Hence the two sides of a single request are completed.\nWith the metadata-exchange mechanism, we can use the metadata directly without any extra query.\nExample In this tutorial, we will use another demo application Online Boutique that consists of 10+ services so that we can deploy some of them in VMs and make them communicate with other services deployed in Kubernetes.\nTopology of Online Boutique In order to cover as many cases as possible, we will deploy CheckoutService and PaymentService on VM and all the other services on Kubernetes, so that we can cover the cases like Kubernetes → VM (e.g. Frontend → CheckoutService), VM → Kubernetes (e.g. CheckoutService → ShippingService), and VM → VM ( e.g. CheckoutService → PaymentService).\nNOTE: All the commands used in this tutorial are accessible on GitHub.\ngit clone https://github.com/SkyAPMTest/sw-als-vm-demo-scripts cd sw-als-vm-demo-scripts Make sure to init the gcloud SDK properly before moving on. Modify the GCP_PROJECT in file env.sh to your own project name. Most of the other variables should be OK to work if you keep them intact. If you would like to use ISTIO_VERSION \u0026gt;/= 1.8.0, please make sure this patch is included.\n  Prepare Kubernetes cluster and VM instances 00-create-cluster-and-vms.sh creates a new GKE cluster and 2 VM instances that will be used through the entire tutorial, and sets up some necessary firewall rules for them to communicate with each other.\n  Install Istio and SkyWalking 01a-install-istio.sh installs Istio Operator with spec resources/vmintegration.yaml. In the YAML file, we enable the meshExpansion that supports VM in mesh. We also enable the Envoy access log service and specify the address skywalking-oap.istio-system.svc.cluster.local:11800 to which Envoy emits the access logs. 01b-install-skywalking.sh installs Apache SkyWalking and sets the analyzer to mx-mesh.\n  Create files to initialize the VM 02-create-files-to-transfer-to-vm.sh creates necessary files that will be used to initialize the VMs. 03-copy-work-files-to-vm.sh securely transfers the generated files to the VMs with gcloud scp command. Now use ./ssh.sh checkoutservice and ./ssh.sh paymentservice to log into the two VMs respectively, and cd to the ~/work directory, execute ./prep-checkoutservice.sh on checkoutservice VM instance and ./prep-paymentservice.sh on paymentservice VM instance. The Istio sidecar should be installed and started properly. To verify that, use tail -f /var/logs/istio/istio.log to check the Istio logs. The output should be something like:\n2020-12-12T08:07:07.348329Z\tinfo\tsds\tresource:default new connection 2020-12-12T08:07:07.348401Z\tinfo\tsds\tSkipping waiting for gateway secret 2020-12-12T08:07:07.348401Z\tinfo\tsds\tSkipping waiting for gateway secret 2020-12-12T08:07:07.568676Z\tinfo\tcache\tRoot cert has changed, start rotating root cert for SDS clients 2020-12-12T08:07:07.568718Z\tinfo\tcache\tGenerateSecret default 2020-12-12T08:07:07.569398Z\tinfo\tsds\tresource:default pushed key/cert pair to proxy 2020-12-12T08:07:07.949156Z\tinfo\tcache\tLoaded root cert from certificate ROOTCA 2020-12-12T08:07:07.949348Z\tinfo\tsds\tresource:ROOTCA pushed root cert to proxy 2020-12-12T20:12:07.384782Z\tinfo\tsds\tresource:default pushed key/cert pair to proxy 2020-12-12T20:12:07.384832Z\tinfo\tsds\tDynamic push for secret default The dnsmasq configuration address=/.svc.cluster.local/{ISTIO_SERVICE_IP_STUB} also resolves the domain names ended with .svc.cluster.local to Istio service IP, so that you are able to access the Kubernetes services in the VM by fully qualified domain name (FQDN) such as httpbin.default.svc.cluster.local.\n  Deploy demo application Because we want to deploy CheckoutService and PaymentService manually on VM, resources/google-demo.yaml removes the two services from the original YAML . 04a-deploy-demo-app.sh deploys the other services on Kubernetes. Then log into the 2 VMs, run ~/work/deploy-checkoutservice.sh and ~/work/deploy-paymentservice.sh respectively to deploy CheckoutService and PaymentService.\n  Register VMs to Istio Services on VMs can access the services on Kubernetes by FQDN, but that’s not the case when the Kubernetes services want to talk to the VM services. The mesh has no idea where to forward the requests such as checkoutservice.default.svc.cluster.local because checkoutservice is isolated in the VM. Therefore, we need to register the services to the mesh. 04b-register-vm-with-istio.sh registers the VM services to the mesh by creating a \u0026ldquo;dummy\u0026rdquo; service without running Pods, and a WorkloadEntry to bridge the \u0026ldquo;dummy\u0026rdquo; service with the VM service.\n  Done! The demo application contains a load generator service that performs requests repeatedly. We only need to wait a few seconds, and then open the SkyWalking web UI to check the results.\nexport POD_NAME=$(kubectl get pods --namespace istio-system -l \u0026quot;app=skywalking,release=skywalking,component=ui\u0026quot; -o jsonpath=\u0026quot;{.items[0].metadata.name}\u0026quot;) echo \u0026quot;Visit http://127.0.0.1:8080 to use your application\u0026quot; kubectl port-forward $POD_NAME 8080:8080 --namespace istio-system Navigate the browser to http://localhost:8080 . The metrics, topology should be there.\nTroubleshooting If you face any trouble when walking through the steps, here are some common problems and possible solutions:\n  VM service cannot access Kubernetes services? It’s likely the DNS on the VM doesn’t correctly resolve the fully qualified domain names. Try to verify that with nslookup istiod.istio-system.svc.cluster.local. If it doesn’t resolve to the Kubernetes CIDR address, recheck the step in prep-checkoutservice.sh and prep-paymentservice.sh. If the DNS works correctly, try to verify that Envoy has fetched the upstream clusters from the control plane with curl http://localhost:15000/clusters. If it doesn’t contain the target service, recheck prep-checkoutservice.sh.\n  Services are normal but nothing on SkyWalking WebUI? Check the SkyWalking OAP logs via kubectl -n istio-system logs -f $(kubectl get pod -A -l \u0026quot;app=skywalking,release=skywalking,component=oap\u0026quot; -o name) and WebUI logs via kubectl -n istio-system logs -f $(kubectl get pod -A -l \u0026quot;app=skywalking,release=skywalking,component=ui\u0026quot; -o name) to see whether there are any error logs . Also, make sure the time zone at the bottom-right of the browser is set to UTC +0.\n  Additional Resources  Observe a Service Mesh with Envoy ALS.  ","excerpt":"Origin: Observe VM Service Meshes with Apache SkyWalking and the Envoy Access Log Service - The New …","ref":"/blog/obs-service-mesh-vm-with-sw-and-als/","title":"Observe VM Service Meshes with Apache SkyWalking and the Envoy Access Log Service"},{"body":"When using SkyWalking java agent, people usually propagate context easily. They even do not need to change the business code. However, it becomes harder when you want to propagate context between threads when using ThreadPoolExecutor. You can use the RunnableWrapper in the maven artifact org.apache.skywalking:apm-toolkit-trace. This way you must change your code. The developer manager usually don\u0026rsquo;t like this because there may be lots of projects, or lots of runnable code. If they don\u0026rsquo;t use SkyWalking some day, the code added will be superfluous and inelegant.\nIs there a way to propagate context without changing the business code? Yes.\nSkywalking java agent enhances a class by add a field and implement an interface. The ThreadPoolExecutor is a special class that is used widely. We even don\u0026rsquo;t know when and where it is loaded. Most JVMs do not allow changes in the class file format for classes that have been loaded previously. So SkyWalking should not enhance the ThreadPoolExecutor successfully by retransforming when the ThreadPoolExecutor has been loaded. However, we can apply advice to the ThreadPoolExecutor#execute method and wrap the Runnable param using our own agent, then enhance the wrapper class by SkyWalking java agent. An advice do not change the layout of a class.\nNow we should decide how to do this. You can use the RunnableWrapper in the maven artifact org.apache.skywalking:apm-toolkit-trace to wrap the param, but you need to face another problem. This RunnableWrapper has a plugin whose active condition is checking if there is @TraceCrossThread. Agent core uses net.bytebuddy.pool.TypePool.Default.WithLazyResolution.LazyTypeDescription to find the annotations of a class. The LazyTypeDescription finds annotations by using a URLClassLoader with no urls if the classloader is null(bootstrap classloader). So it can not find the @TraceCrossThread class unless you change the LocationStrategy of SkyWalking java agent builder.\nIn this project, I write my own wrapper class, and simply add a plugin with a name match condition. Next, Let me show you how these two agents work together.\n  Move the plugin to the skywalking \u0026ldquo;plugins\u0026rdquo; directory.\n  Add this agent after the SkyWalking agent since the wrapper class should not be loaded before SkyWalking agent instrumentation have finished. For example,\n java -javaagent:/path/to/skywalking-agent.jar -javaagent:/path/to/skywalking-tool-agent-v1.0.0.jar \u0026hellip;\n   When our application runs\n SkyWalking java agent adds a transformer by parsing the plugin for enhancing the wrapper class in the tool agent. The tool agent loads the wrapper class into bootstrap classloader. This triggers the previous transformer. The tool agent applies an advice to the ThreadPoolExecutor class, wrapping the java.lang.Runnable param of \u0026ldquo;execute\u0026rdquo; method with the wrapper class. Now SkyWalking propagates the context with the wrapper class.    Enjoy tracing with ThreadPoolExecutor in SkyWalking!\n","excerpt":"When using SkyWalking java agent, people usually propagate context easily. They even do not need to …","ref":"/blog/2021-02-09-skywalking-trace-threadpool/","title":"Apache SkyWalking: How to propagate context between threads when using ThreadPoolExecutor"},{"body":"","excerpt":"","ref":"/tags/java/","title":"Java"},{"body":"SkyWalking CLI 0.6.0 is released. Go to downloads page to find release tars.\n  Features\n Support authorization when connecting to the OAP Add install command and manifest sub-command Add event command and report sub-command    Bug Fixes\n Fix the bug that can\u0026rsquo;t query JVM instance metrics    Chores\n Set up a simple test with GitHub Actions Reorganize the project layout Update year in NOTICE Add missing license of swck Use license-eye to check license header    ","excerpt":"SkyWalking CLI 0.6.0 is released. Go to downloads page to find release tars.\n  Features\n Support …","ref":"/events/release-apache-skywalking-cli-0-6-0/","title":"Release Apache SkyWalking CLI 0.6.0"},{"body":"","excerpt":"","ref":"/tags/infrastructure-monitoring/","title":"Infrastructure Monitoring"},{"body":" Origin: Tetrate.io blog\n Background Apache SkyWalking\u0026ndash; the APM tool for distributed systems\u0026ndash; has historically focused on providing observability around tracing and metrics, but service performance is often affected by the host. The newest release, SkyWalking 8.4.0, introduces a new feature for monitoring virtual machines. Users can easily detect possible problems from the dashboard\u0026ndash; for example, when CPU usage is overloaded, when there’s not enough memory or disk space, or when the network status is unhealthy, etc.\nHow it works SkyWalking leverages Prometheus and OpenTelemetry for collecting metrics data as we did for Istio control panel metrics; Prometheus is mature and widely used, and we expect to see increased adoption of the new CNCF project, OpenTelemetry. The SkyWalking OAP Server receives these metrics data of OpenCensus format from OpenTelemetry. The process is as follows:\n Prometheus Node Exporter collects metrics data from the VMs. OpenTelemetry Collector fetches metrics from Node Exporters via Prometheus Receiver, and pushes metrics to SkyWalking OAP Server via the OpenCensus GRPC Exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results. The expression rules are in /config/otel-oc-rules/vm.yaml. We can now see the data on the SkyWalking WebUI dashboard.  What to monitor SkyWalking provides default monitoring metrics including:\n CPU Usage (%) Memory RAM Usage (MB) Memory Swap Usage (MB) CPU Average Used CPU Load Memory RAM (total/available/used MB) Memory Swap (total/free MB) File System Mount point Usage (%) Disk R/W (KB/s) Network Bandwidth Usage (receive/transmit KB/s) Network Status (tcp_curr_estab/tcp_tw/tcp_alloc/sockets_used/udp_inuse) File fd Allocated  The following is how it looks when we monitor Linux:\nHow to use To enable this feature, we need to install Prometheus Node Exporter and OpenTelemetry Collector and activate the VM monitoring rules in SkyWalking OAP Server.\nInstall Prometheus Node Exporter wget https://github.com/prometheus/node_exporter/releases/download/v1.0.1/node_exporter-1.0.1.linux-amd64.tar.gz tar xvfz node_exporter-1.0.1.linux-amd64.tar.gz cd node_exporter-1.0.1.linux-amd64 ./node_exporter In linux Node Exporter exposes metrics on port 9100 by default. When it is running, we can get the metrics from the /metrics endpoint. Use a web browser or command curl to verify.\ncurl http://localhost:9100/metrics We should see all the metrics from the output like:\n# HELP go_gc_duration_seconds A summary of the pause duration of garbage collection cycles. # TYPE go_gc_duration_seconds summary go_gc_duration_seconds{quantile=\u0026#34;0\u0026#34;} 7.7777e-05 go_gc_duration_seconds{quantile=\u0026#34;0.25\u0026#34;} 0.000113756 go_gc_duration_seconds{quantile=\u0026#34;0.5\u0026#34;} 0.000127199 go_gc_duration_seconds{quantile=\u0026#34;0.75\u0026#34;} 0.000147778 go_gc_duration_seconds{quantile=\u0026#34;1\u0026#34;} 0.000371894 go_gc_duration_seconds_sum 0.292994058 go_gc_duration_seconds_count 2029 ... Note: We only need to install Node Exporter, rather than Prometheus server. If you want to get more information about Prometheus Node Exporter see: https://prometheus.io/docs/guides/node-exporter/\nInstall OpenTelemetry Collector We can quickly install a OpenTelemetry Collector instance by using docker-compose with the following steps:\n Create a directory to store the configuration files, like /usr/local/otel. Create docker-compose.yaml and otel-collector-config.yaml in this directory represented below:  docker-compose.yaml\nversion:\u0026#34;2\u0026#34;services:# Collectorotel-collector:# Specify the image to start the container fromimage:otel/opentelemetry-collector:0.19.0# Set the otel-collector configfile command:[\u0026#34;--config=/etc/otel-collector-config.yaml\u0026#34;]# Mapping the configfile to host directoryvolumes:- ./otel-collector-config.yaml:/etc/otel-collector-config.yamlports:- \u0026#34;13133:13133\u0026#34;# health_check extension- \u0026#34;55678\u0026#34;# OpenCensus receiverotel-collector-config.yaml\nextensions:health_check:# A receiver is how data gets into the OpenTelemetry Collectorreceivers:# Set Prometheus Receiver to collects metrics from targets# It’s supports the full set of Prometheus configurationprometheus:config:scrape_configs:- job_name:\u0026#39;otel-collector\u0026#39;scrape_interval:10sstatic_configs:# Replace the IP to your VMs‘s IP which has installed Node Exporter- targets:[\u0026#39;vm1:9100\u0026#39;]- targets:[\u0026#39;vm2:9100\u0026#39;]- targets:[‘vm3:9100\u0026#39; ]processors:batch:# An exporter is how data gets sent to different systems/back-endsexporters:# Exports metrics via gRPC using OpenCensus formatopencensus:endpoint:\u0026#34;docker.for.mac.host.internal:11800\u0026#34;# The OAP Server addressinsecure:truelogging:logLevel:debugservice:pipelines:metrics:receivers:[prometheus]processors:[batch]exporters:[logging, opencensus]extensions:[health_check]In this directory use command docker-compose to start up the container:  docker-compose up -d After the container is up and running, you should see metrics already exported in the logs:\n... Metric #165 Descriptor: -\u0026gt; Name: node_network_receive_compressed_total -\u0026gt; Description: Network device statistic receive_compressed. -\u0026gt; Unit: -\u0026gt; DataType: DoubleSum -\u0026gt; IsMonotonic: true -\u0026gt; AggregationTemporality: AGGREGATION_TEMPORALITY_CUMULATIVE DoubleDataPoints #0 Data point labels: -\u0026gt; device: ens4 StartTime: 1612234754364000000 Timestamp: 1612235563448000000 Value: 0.000000 DoubleDataPoints #1 Data point labels: -\u0026gt; device: lo StartTime: 1612234754364000000 Timestamp: 1612235563448000000 Value: 0.000000 ... If you want to get more information about OpenTelemetry Collector see: https://opentelemetry.io/docs/collector/\nSet up SkyWalking OAP Server To activate the oc handler and vm relevant rules, set your environment variables:\nSW_OTEL_RECEIVER=default SW_OTEL_RECEIVER_ENABLED_OC_RULES=vm Note: If there are other rules already activated , you can add vm with use , as a separator.\nSW_OTEL_RECEIVER_ENABLED_OC_RULES=vm,oap Start the SkyWalking OAP Server.\nDone! After all of the above steps are completed, check out the SkyWalking WebUI. Dashboard VM provides the default metrics of all observed virtual machines. Note: Clear the browser local cache if you used it to access deployments of previous SkyWalking versions.\nAdditional Resources  Read more about the SkyWalking 8.4 release highlights. Get more SkyWalking updates on Twitter.  ","excerpt":"Origin: Tetrate.io blog\n Background Apache SkyWalking\u0026ndash; the APM tool for distributed …","ref":"/blog/2021-02-07-infrastructure-monitoring/","title":"SkyWalking 8.4 provides infrastructure monitoring"},{"body":" Origin: Tetrate.io blog\n The Apache SkyWalking team today announced the 8.4 release is generally available. This release fills the gap between all previous versions of SkyWalking and the logging domain area. The release also advances SkyWalking’s capabilities for infrastructure observability, starting with virtual machine monitoring.\nBackground SkyWalking has historically focused on the tracing and metrics fields of observability. As its features for tracing, metrics and service level monitoring have become more and more powerful and stable, the SkyWalking team has started to explore new scenarios covered by observability. Because service performance is reflected in the logs, and is highly impacted by the infrastructure on which it runs, SkyWalking brings these two fields into the 8.4 release. This release blog briefly introduces the two new features as well as some other notable changes.\nLogs Metrics, tracing, and logging are considered the three pillars of observability [1]. SkyWalking had the full features of metrics and tracing prior to 8.4; today, as 8.4 is released, the last piece of the jigsaw is now in place.\nFigure 1: Logs Collected By SkyWalking\nFigure 2: Logs Collected By SkyWalking\nThe Java agent firstly provides SDKs to enhance the widely-used logging frameworks, log4j (1.x and 2.x) [2] and logback [3], and send the logs to the SkyWalking backend (OAP). The latter is able to collect logs from wherever the protocol is implemented. This is not a big deal, but when it comes to the correlation between logs and traces, the traditional solution is to print the trace IDs in the logs, and pick the IDs in the error logs to query the related traces. SkyWalking just simplifies the workflow by correlating the logs and traces natively. Navigating between traces and their related logs is as simple as clicking a button.\nFigure 3: Correlation Between Logs and Traces\nInfrastructure Monitoring SkyWalking is known as an application performance monitoring tool. One of the most important factors that impacts the application’s performance is the infrastructure on which the application runs. In the 8.4 release, we added the monitoring metrics of virtual machines into the dashboard.\nFigure 4: VM Metrics\nFundamental metrics such as CPU Used, Memory Used, Disk Read / Write and Network Usage are available on the dashboard. And as usual, those metrics are also available to be configured as alarm triggers when needed.\nDynamic Configurations at Agent Side Dynamic configuration at the backend side has long existed in SkyWalking for several versions. Now, it finally comes to the agent side! Prior to 8.4, you’d have to restart the target services when you modify some configuration items of the agent \u0026ndash; for instance, sampling rate (agent side), ignorable endpoint paths, etc. Now, say goodbye to rebooting. Modifying configurations is not the only usage of the dynamic configuration mechanism. The latter gives countless possibilities to the agent side in terms of dynamic behaviours, e.g. enabling / disabling plugins, enabling / disabling the whole agent, etc. Just imagine!\nGrouped Service Topology This enhancement is from the UI. SkyWalking backend supports grouping the services by user-defined dimensions. In a real world use case, the services are usually grouped by business group or department. When a developer opens the topology map, out of hundreds of services, he or she may just want to focus on the services in charge. The grouped service topology comes to the rescue: one can now choose to display only services belonging to a specified group.\nFigure 5: Grouped Service Topology\nOther Notable Enhancements  Agent: resolves domain names to look up backend service IP addresses. Backend: meter receiver supports meter analysis language (MAL). Backend: several CVE fixes. Backend: supports Envoy {AccessLog,Metrics}Service API V3 and adopts MAL.  Links  [1] https://peter.bourgon.org/blog/2017/02/21/metrics-tracing-and-logging.html [2] https://logging.apache.org/log4j/2.x/ [3] http://logback.qos.ch  Additional Resources  Read more about the SkyWalking 8.4 release highlights. Get more SkyWalking updates on Twitter.  ","excerpt":"Origin: Tetrate.io blog\n The Apache SkyWalking team today announced the 8.4 release is generally …","ref":"/blog/skywalking8-4-release/","title":"Apache SkyWalking 8.4: Logs, VM Monitoring, and Dynamic Configurations at Agent Side"},{"body":"","excerpt":"","ref":"/tags/release-blog/","title":"Release Blog"},{"body":"SkyWalking 8.4.0 is released. Go to downloads page to find release tars. Changes by Version\nProject  Incompatible with previous releases when use H2/MySQL/TiDB storage options, due to support multiple alarm rules triggered for one entity. Chore: adapt create_source_release.sh to make it runnable on Linux. Add package to .proto files, prevent polluting top-level namespace in some languages; The OAP server supports previous agent releases, whereas the previous OAP server (\u0026lt;=8.3.0) won\u0026rsquo;t recognize newer agents since this version (\u0026gt;= 8.4.0). Add ElasticSearch 7.10 to test matrix and verify it works. Replace Apache RAT with skywalking-eyes to check license headers. Set up test of Envoy ALS / MetricsService under Istio 1.8.2 to verify Envoy V3 protocol Test: fix flaky E2E test of Kafka.  Java Agent  The operation name of quartz-scheduler plugin, has been changed as the quartz-scheduler/${className} format. Fix jdk-http and okhttp-3.x plugin did not overwrite the old trace header. Add interceptors of method(analyze, searchScroll, clearScroll, searchTemplate and deleteByQuery) for elasticsearch-6.x-plugin. Fix the unexpected RunningContext recreation in the Tomcat plugin. Fix the potential NPE when trace_sql_parameters is enabled. Update byte-buddy to 1.10.19. Fix thrift plugin trace link broken when intermediate service does not mount agent Fix thrift plugin collects wrong args when the method without parameter. Fix DataCarrier\u0026rsquo;s org.apache.skywalking.apm.commons.datacarrier.buffer.Buffer implementation isn\u0026rsquo;t activated in IF_POSSIBLE mode. Fix ArrayBlockingQueueBuffer\u0026rsquo;s useless IF_POSSIBLE mode list Support building gRPC TLS channel but CA file is not required. Add witness method mechanism in the agent plugin core. Add Dolphinscheduler plugin definition. Make sampling still works when the trace ignores plug-in activation. Fix mssql-plugin occur ClassCastException when call the method of return generate key. The operation name of dubbo and dubbo-2.7.x-plugin, has been changed as the groupValue/className.methodName format Fix bug that rocketmq-plugin set the wrong tag. Fix duplicated EnhancedInstance interface added. Fix thread leaks caused by the elasticsearch-6.x-plugin plugin. Support reading segmentId and spanId with toolkit. Fix RestTemplate plugin recording url tag with wrong port Support collecting logs and forwarding through gRPC. Support config agent.sample_n_per_3_secs can be changed in the runtime. Support config agent.ignore_suffix can be changed in the runtime. Support DNS periodic resolving mechanism to update backend service. Support config agent.trace.ignore_path can be changed in the runtime. Added support for transmitting logback 1.x and log4j 2.x formatted \u0026amp; un-formatted messages via gPRC  OAP-Backend  Make meter receiver support MAL. Support influxDB connection response format option. Fix some error when use JSON as influxDB response format. Support Kafka MirrorMaker 2.0 to replicate topics between Kafka clusters. Add the rule name field to alarm record storage entity as a part of ID, to support multiple alarm rules triggered for one entity. The scope id has been removed from the ID. Fix MAL concurrent execution issues. Fix group name can\u0026rsquo;t be queried in the GraphQL. Fix potential gRPC connection leak(not closed) for the channels among OAP instances. Filter OAP instances(unassigned in booting stage) of the empty IP in KubernetesCoordinator. Add component ID for Python aiohttp plugin requester and server. Fix H2 in-memory database table missing issues Add component ID for Python pyramid plugin server. Add component ID for NodeJS Axios plugin. Fix searchService method error in storage-influxdb-plugin. Add JavaScript component ID. Fix CVE of UninstrumentedGateways in Dynamic Configuration activation. Improve query performance in storage-influxdb-plugin. Fix the uuid field in GRPCConfigWatcherRegister is not updated. Support Envoy {AccessLog,Metrics}Service API V3. Adopt the MAL in Envoy metrics service analyzer. Fix the priority setting doesn\u0026rsquo;t work of the ALS analyzers. Fix bug that endpoint-name-grouping.yml is not customizable in Dockerized case. Fix bug that istio version metric type on UI template mismatches the otel rule. Improve ReadWriteSafeCache concurrency read-write performance Fix bug that if use JSON as InfluxDB.ResponseFormat then NumberFormatException maybe occur. Fix timeBucket not taking effect in EqualsAndHashCode annotation of some relationship metrics. Fix SharingServerConfig\u0026rsquo;s propertie is not correct in the application.yml, contextPath -\u0026gt; restConnextPath. Istio control plane: remove redundant metrics and polish panel layout. Fix bug endpoint name grouping not work due to setting service name and endpoint name out of order. Fix receiver analysis error count metrics. Log collecting and query implementation. Support Alarm to feishu. Add the implementation of ConfigurationDiscovery on the OAP side. Fix bug in parseInternalErrorCode where some error codes are never reached. OAL supports multiple values when as numeric. Add node information from the Openensus proto to the labels of the samples, to support the identification of the source of the Metric data. Fix bug that the same sample name in one MAL expression caused IllegalArgumentException in Analyzer.analyse. Add the text analyzer for querying log in the es storage. Chore: Remove duplicate codes in Envoy ALS handler. Remove the strict rule of OAL disable statement parameter. Fix a legal metric query adoption bug. Don\u0026rsquo;t support global level metric query. Add VM MAL and ui-template configration, support Prometheus node-exporter VM metrics that pushed from OpenTelemetry-collector. Remove unused log query parameters.  UI  Fix un-removed tags in trace query. Fix unexpected metrics name on single value component. Don\u0026rsquo;t allow negative value as the refresh period. Fix style issue in trace table view. Separation Log and Dashboard selector data to avoid conflicts. Fix trace instance selector bug. Fix Unnecessary sidebar in tooltips for charts. Refactor dashboard query in a common script. Implement refreshing data for topology by updating date. Implement group selector in the topology. Fix all as default parameter for services selector. Add icon for Python aiohttp plugin. Add icon for Python pyramid plugin. Fix topology render all services nodes when groups changed. Fix rk-footer utc input\u0026rsquo;s width. Update rk-icon and rewrite rk-header svg tags with rk-icon. Add icon for http type. Fix rk-footer utc without local storage. Sort group names in the topology. Add logo for Dolphinscheduler. Fix dashboard wrong instance. Add a legend for the topology. Update the condition of unhealthy cube. Fix: use icons to replace buttons for task list in profile. Fix: support = in the tag value in the trace query page. Add envoy proxy component logo. Chore: set up license-eye to check license headers and add missing license headers. Fix prop for instances-survey and endpoints-survey. Fix envoy icon in topology. Implement the service logs on UI. Change the flask icon to light version for a better view of topology dark theme. Implement viewing logs on trace page. Fix update props of date component. Fix query conditions for logs. Fix style of selectors to word wrap. Fix logs time. Fix search ui for logs.  Documentation  Update the documents of backend fetcher and self observability about the latest configurations. Add documents about the group name of service. Update docs about the latest UI. Update the document of backend trace sampling with the latest configuration. Update kafka plugin support version to 2.6.1. Add FAQ about Fix compiling on Mac M1 chip.  All issues and pull requests are here\n","excerpt":"SkyWalking 8.4.0 is released. Go to downloads page to find release tars. Changes by Version\nProject …","ref":"/events/release-apache-skywalking-apm-8-4-0/","title":"Release Apache SkyWalking APM 8.4.0"},{"body":"Background The verifier is an important part of the next generation End-to-End Testing framework (NGE2E), which is responsible for verifying whether the actual output satisfies the expected template.\nDesign Thinking We will implement the verifier with Go template, plus some enhancements. Firstly, users need to write a Go template file with provided functions and actions to describe how the expected data looks like. Then the verifer renders the template with the actual data object. Finally, the verifier compares the rendered output with the actual data. If the rendered output is not the same with the actual output, it means the actual data is inconsist with the expected data. Otherwise, it means the actual data match the expected data. On failure, the verifier will also print out what are different between expected and actual data.\nBranches / Actions The verifier inherits all the actions from the standard Go template, such as if, with, range, etc. In addition, we also provide some custom actions to satisfy our own needs.\nList Elements Match contains checks if the actual list contains elements that match the given template.\nExamples:\nmetrics:{{- contains .metrics }}- name:{{notEmpty .name }}id:{{notEmpty .id }}value:{{gt .value 0 }}{{- end }}It means that the list metrics must contain an element whose name and id are not empty, and value is greater than 0.\nmetrics:{{- contains .metrics }}- name:p95value:{{gt .value 0 }}- name:p99value:{{gt .value 0 }}{{- end }}This means that the list metrics must contain an element named p95 with a value greater than 0, and an element named p95 with a value greater than 0. Besides the two element, the list metrics may or may not have other random elements.\nFunctions Users can use these provided functions in the template to describe the expected data.\nNot Empty notEmpty checks if the string s is empty.\nExample:\nid:{{notEmpty .id }}Regexp match regexp checks if string s matches the regular expression pattern.\nExamples:\nlabel:{{regexp .label \u0026#34;ratings.*\u0026#34; }}Base64 b64enc s returns the Base64 encoded string of s.\nExamples:\nid:{{b64enc \u0026#34;User\u0026#34; }}.static-suffix# this evalutes the base64 encoded string of \u0026#34;User\u0026#34;, concatenated with a static suffix \u0026#34;.static-suffix\u0026#34;Result:\nid:VXNlcg==.static-suffixFull Example Here is an example of expected data:\n# expected.data.yamlnodes:- id:{{b64enc \u0026#34;User\u0026#34; }}.0name:Usertype:USERisReal:false- id:{{b64enc \u0026#34;Your_ApplicationName\u0026#34; }}.1name:Your_ApplicationNametype:TomcatisReal:true- id:{{$h2ID := (index .nodes 2).id }}{{ notEmpty $h2ID }}# We assert that nodes[2].id is not empty and save it to variable `h2ID` for later usename:localhost:-1type:H2isReal:falsecalls:- id:{{notEmpty (index .calls 0).id }}source:{{b64enc \u0026#34;Your_ApplicationName\u0026#34; }}.1target:{{$h2ID }}# We use the previously assigned variable `h2Id` to asert that the `target` is equal to the `id` of the nodes[2]detectPoints:- CLIENT- id:{{b64enc \u0026#34;User\u0026#34; }}.0-{{ b64enc \u0026#34;Your_ApplicationName\u0026#34; }}.1source:{{b64enc \u0026#34;User\u0026#34; }}.0target:{{b64enc \u0026#34;Your_ApplicationName\u0026#34; }}.1detectPoints:- SERVERwill validate this data:\n# actual.data.yamlnodes:- id:VXNlcg==.0name:Usertype:USERisReal:false- id:WW91cl9BcHBsaWNhdGlvbk5hbWU=.1name:Your_ApplicationNametype:TomcatisReal:true- id:bG9jYWxob3N0Oi0x.0name:localhost:-1type:H2isReal:falsecalls:- id:WW91cl9BcHBsaWNhdGlvbk5hbWU=.1-bG9jYWxob3N0Oi0x.0source:WW91cl9BcHBsaWNhdGlvbk5hbWU=.1detectPoints:- CLIENTtarget:bG9jYWxob3N0Oi0x.0- id:VXNlcg==.0-WW91cl9BcHBsaWNhdGlvbk5hbWU=.1source:VXNlcg==.0detectPoints:- SERVERtarget:WW91cl9BcHBsaWNhdGlvbk5hbWU=.1# expected.data.yamlmetrics:{{- contains .metrics }}- name:{{notEmpty .name }}id:{{notEmpty .id }}value:{{gt .value 0 }}{{- end }}will validate this data:\n# actual.data.yamlmetrics:- name:business-zone::projectAid:YnVzaW5lc3Mtem9uZTo6cHJvamVjdEE=.1value:1- name:system::load balancer1id:c3lzdGVtOjpsb2FkIGJhbGFuY2VyMQ==.1value:0- name:system::load balancer2id:c3lzdGVtOjpsb2FkIGJhbGFuY2VyMg==.1value:0and will report an error when validating this data, because there is no element with a value greater than 0:\n# actual.data.yamlmetrics:- name:business-zone::projectAid:YnVzaW5lc3Mtem9uZTo6cHJvamVjdEE=.1value:0- name:system::load balancer1id:c3lzdGVtOjpsb2FkIGJhbGFuY2VyMQ==.1value:0- name:system::load balancer2id:c3lzdGVtOjpsb2FkIGJhbGFuY2VyMg==.1value:0The contains does an unordered list verification, in order to do list verifications including orders, you can simply use the basic ruls like this:\n# expected.data.yamlmetrics:- name:p99value:{{gt (index .metrics 0).value 0 }}- name:p95value:{{gt (index .metrics 1).value 0 }}which expects the actual metrics list to be exactly ordered, with first element named p99 and value greater 0, second element named p95 and value greater 0.\n","excerpt":"Background The verifier is an important part of the next generation End-to-End Testing framework …","ref":"/blog/2021-02-01-e2e-verifier-design/","title":"[Design] The Verifier of NGE2E"},{"body":"","excerpt":"","ref":"/tags/testing/","title":"Testing"},{"body":"SkyWalking Cloud on Kubernetes 0.2.0 is released. Go to downloads page to find release tars.\n Introduce custom metrics adapter to SkyWalking OAP cluster for Kubernetes HPA autoscaling. Add RBAC files and service account to support Kubernetes coordination. Add default and validation webhooks to operator controllers. Add UI CRD to deploy skywalking UI server. Add Fetcher CRD to fetch metrics from other telemetry system, for example, Prometheus.  ","excerpt":"SkyWalking Cloud on Kubernetes 0.2.0 is released. Go to downloads page to find release tars. …","ref":"/events/release-apache-skywalking-cloud-on-kubernetes-0-2-0/","title":"Release Apache SkyWalking Cloud on Kubernetes 0.2.0"},{"body":"Apache SkyWalking is an open source APM for distributed system, Apache Software Foundation top-level project.\nAt Jan. 11th, 2021, we noticed the Tencent Cloud Service, Tencent Service Watcher - TSW, for first time. Due to the similar short name, which SkyWalking is also called SW in the community, we connected with the service team of Tencent Cloud, and kindly asked.\nThey used to replay, TSW is purely developed by Tencent team itself, which doesn\u0026rsquo;t have any code dependency on SkyWalking.. We didn\u0026rsquo;t push harder.\nBut one week later, Jan 18th, 2021, our V.P., Sheng got the report again from Haoyang SkyWalking PMC member, through WeChat DM(direct message),. He provided complete evidence to prove TSW actually re-distributed the SkyWalking\u0026rsquo;s Java agent. We keep one copy of their agent\u0026rsquo;s distribution(at Jan. 18th), you could be downloaded here.\nSome typically evidences are here\n  ServiceManager is copied and package-name changed in the TSW\u0026rsquo;s agent.   ContextManager is copied and ackage-name changed in the TSW\u0026rsquo;s agent.   At the same time, we checked their tsw-client-package.zip, it didn\u0026rsquo;t include the SkyWalking\u0026rsquo;s LICENSE and NOTICE. Also, they didn\u0026rsquo;t mention TSW agent is the re-ditribution SkyWalking on their website.\nWith all above information, we had enough reason to believe, from the tech perspective, they were violating the Apache 2.0 License.\nFrom the 18th Jan., 2021, we sent mail [Apache 2.0 License Violation] Tencent Cloud TSW service doesn't follow the Apache 2.0 License to brief the SkyWalking PMC, and took the following actions to connect with Tencent.\n Made direct call to Tencent Open Source Office. Connected with Tencent Cloud TVP program committee, as Sheng Wu(Our VP) is a Tencent Cloud TVP. Talked with the Tencent Cloud team lead.  In all above channels, we provided the evidences of copy-redistribution hebaviors, requested them to revaluate their statements on the website, and follow the License\u0026rsquo;s requirements.\nResolution At Jan. 19th night, UTC+8, 2021. We received response from the Tencent cloud team. They admited their violation behaviors, and did following changes\n  Tencent Cloud TSW service page states, the agent is the fork version(re-distribution) of Apache SkyWalking agent.   TSW agent distributions include the SkyWalking\u0026rsquo;s License and NOTICE. Below is the screenshot, you could download from their product page. We keep a copy of their Jan. 19th 2021 at here.   We have updated the status to the PMC mail list. This license violation issue has been resolved for now.\nThe SkyWalking community and program management committee will keep our eyes on Tencent TSW. ","excerpt":"Apache SkyWalking is an open source APM for distributed system, Apache Software Foundation top-level …","ref":"/blog/2021-01-23-tencent-cloud-violates-aplv2/","title":"[Resolved][License Issue] Tencent Cloud TSW service violates the Apache 2.0 License when using SkyWalking."},{"body":" 第一节:开篇介绍 第二节:数字游戏(Number Game) 第三节:社区原则(Community “Principles”) 第四节:基金会原则(For public good) 第五节:一些不太好的事情  B站视频地址\n","excerpt":"第一节:开篇介绍 第二节:数字游戏(Number Game) 第三节:社区原则(Community “Principles”) 第四节:基金会原则(For public good) 第五节:一些不太好 …","ref":"/zh/2021-01-21-educate-community/","title":"[视频] 开放原子开源基金会2020年度峰会 - Educate community Over Support community"},{"body":"Elastic announced their license change, Upcoming licensing changes to Elasticsearch and Kibana.\n We are moving our Apache 2.0-licensed source code in Elasticsearch and Kibana to be dual licensed under Server Side Public License (SSPL) and the Elastic License, giving users the choice of which license to apply. This license change ensures our community and customers have free and open access to use, modify, redistribute, and collaborate on the code. It also protects our continued investment in developing products that we distribute for free and in the open by restricting cloud service providers from offering Elasticsearch and Kibana as a service without contributing back. This will apply to all maintained branches of these two products and will take place before our upcoming 7.11 release. Our releases will continue to be under the Elastic License as they have been for the last three years.\n Also, they provide the FAQ page for more information about the impact for the users, developers, and vendors.\nIn the perspective of Apache Software Foundation, SSPL has been confirmed as a Catalog X LICENSE(https://www.apache.org/legal/resolved.html#category-x), which means hard-dependency as a part of the core is not allowed. With that, we can\u0026rsquo;t only focus on it anymore. We need to consider other storage options. Right now, we still have InfluxDB, TiDB, H2 server still in Apache 2.0 licensed. Right now, we still have InfluxDB, TiDB, H2 server as storage options still in Apache 2.0 licensed.\nAs one optional plugin, we need to focus on the client driver license. Right now, we are only using ElasticSearch 7.5.0 and 6.3.2 drivers, which are both Apache 2.0 licensed. So, we are safe. For further upgrade, here is their announcement. They answer these typical cases in the FAQ page.\n  I build a SaaS application using Elasticsearch as the backend, how does this affect me?\n This source code license change should not affect you - you can use our default distribution or develop applications on top of it for free, under the Elastic License. This source-available license does not contain any copyleft provisions and the default functionality is free of charge. For a specific example, you can see our response to a question around this at Magento.\nOur users still could use, redistribute, sale the products/services, based on SkyWalking, even they are using self hosting Elastic Search unmodified server.\n  I\u0026rsquo;m using Elasticsearch via APIs, how does this change affect me?\n This change does not affect how you use client libraries to access Elasticsearch. Our client libraries remain licensed under Apache 2.0, with the exception of our Java High Level Rest Client (Java HLRC). The Java HLRC has dependencies on the core of Elasticsearch, and as a result this client library will be licensed under the Elastic License. Over time, we will eliminate this dependency and move the Java HLRC to be licensed under Apache 2.0. Until that time, for the avoidance of doubt, we do not consider using the Java HLRC as a client library in development of an application or library used to access Elasticsearch to constitute a derivative work under the Elastic License, and this will not have any impact on how you license the source code of your application using this client library or how you distribute it.\nThe client driver license incompatible issue will exist, we can\u0026rsquo;t upgrade the driver(s) until they release the Apache 2.0 licensed driver jars. But users are still safe to upgrade the drivers by themselves.\n Apache SkyWalking will discuss the further actions here. If you have any question, welcome to ask. In the later 2021, we will begin to invest the posibility of creating SkyWalking\u0026rsquo;s observability database implementation.\n","excerpt":"Elastic announced their license change, Upcoming licensing changes to Elasticsearch and Kibana.\n We …","ref":"/blog/2021-01-17-elastic-change-license/","title":"Response to Elastic 2021 License Change"},{"body":"SkyWalking Client JS 0.3.0 is released. Go to downloads page to find release tars.\n Support tracing starting at the browser. Add traceSDKInternal SDK for tracing SDK internal RPC. Add detailMode SDK for tracing http method and url as tags in spans. Fix conditions of http status.  ","excerpt":"SkyWalking Client JS 0.3.0 is released. Go to downloads page to find release tars.\n Support tracing …","ref":"/events/release-apache-skywalking-client-js-0-3-0/","title":"Release Apache SkyWalking Client JS 0.3.0"},{"body":"SkyWalking Eyes 0.1.0 is released. Go to downloads page to find release tars.\n License Header  Add check and fix command. check results can be reported to pull request as comments. fix suggestions can be filed on pull request as edit suggestions.    ","excerpt":"SkyWalking Eyes 0.1.0 is released. Go to downloads page to find release tars.\n License Header  Add …","ref":"/events/release-apache-skywalking-eyes-0-1-0/","title":"Release Apache SkyWalking Eyes 0.1.0"},{"body":"SkyWalking NodeJS 0.1.0 is released. Go to downloads page to find release tars.\n Initialize project core codes. Built-in http/https plugin. Express plugin. Axios plugin.  ","excerpt":"SkyWalking NodeJS 0.1.0 is released. Go to downloads page to find release tars.\n Initialize project …","ref":"/events/release-apache-skywalking-nodejs-0-1-0/","title":"Release Apache SkyWalking for NodeJS 0.1.0"},{"body":"SkyWalking Python 0.5.0 is released. Go to downloads page to find release tars.\n  New plugins\n Pyramid Plugin (#102) AioHttp Plugin (#101) Sanic Plugin (#91)    API and enhancements\n @trace decorator supports async functions Supports async task context Optimized path trace ignore Moved exception check to Span.__exit__ Moved Method \u0026amp; Url tags before requests    Fixes:\n BaseExceptions not recorded as errors Allow pending data to send before exit sw_flask general exceptions handled Make skywalking logging Non-global    Chores and tests\n Make tests really run on specified Python version Deprecate 3.5 as it\u0026rsquo;s EOL    ","excerpt":"SkyWalking Python 0.5.0 is released. Go to downloads page to find release tars.\n  New plugins …","ref":"/events/release-apache-skywalking-python-0-5-0/","title":"Release Apache SkyWalking Python 0.5.0"},{"body":"Apache SkyWalking is an open source APM for distributed system. Provide tracing, service mesh observability, metrics analysis, alarm and visualization.\nJust 11 months ago, on Jan. 20th, 2020, SkyWalking hit the 200 contributors mark. With the growth of the project and the community, SkyWalking now includes over 20 sub(ecosystem) projects covering multiple language agents and service mesh, integration with mature open source projects, like Prometheus, Spring(Sleuth), hundreds of libraries to support all tracing/metrics/logs fields. In the past year, the number of contributors grows super astoundingly , and all its metrics point to its community vibrancy. Many corporate titans are already using SkyWalking in a large-scale production environment, including, Alibaba, Huawei, Baidu, Tencent, etc.\nRecently, our SkyWalking main repository overs 300 contributors.\nOur website has thousands of views from most countries in the world every week.\nAlthough we know that, the metrics like GitHub stars and the numbers of open users and contributors, are not a determinant of vibrancy, they do show the trend, we are very proud to share the increased numbers here, too.\nWe double those numbers and are honored with the development of our community.\nThank you, all of our contributors. Not just these 300 contributors of the main repository, or nearly 400 contributors in all repositories, counted by GitHub. There are countless people contributing codes to SkyWalking\u0026rsquo;s subprojects, ecosystem projects, and private fork versions; writing blogs and guidances, translating documents, books, and presentations; setting up learning sessions for new users; convincing friends to join the community as end-users, contributors, even committers. Companies behinds those contributors support their employees to work with the community to provide feedback and contribute the improvements and features upstream. Conference organizers share the stages with speakers from the SkyWalking community.\nSkyWalking can’t make this happen without your help. You made this community extraordinary.\nAt this crazy distributed computing and cloud native age, we as a community could make DEV, OPS, and SRE teams' work easier by locating the issue(s) in the haystack quicker than before, like why we named the project as SkyWalking, we will have a clear site line when you stand on the glass bridge Skywalk at Grand Canyon West.\n 376 Contributors counted by GitHub account are following. Dec. 22st, 2020. Generated by a tool deveoped by Yousa\n 1095071913 50168383 Ahoo-Wang AirTrioa AlexanderWert AlseinX Ax1an BFergerson BZFYS CharlesMaster ChaunceyLin5152 CommissarXia Cvimer Doublemine ElderJames EvanLjp FatihErdem FeynmanZhou Fine0830 FingerLiu Gallardot GerryYuan HackerRookie Heguoya Hen1ng Humbertzhang IanCao IluckySi Indifer J-Cod3r JaredTan95 Jargon96 Jijun JohnNiang Jozdortraz Jtrust Just-maple KangZhiDong LazyLei LiWenGu Liu-XinYuan Miss-you O-ll-O Patrick0308 QHWG67 Qiliang RandyAbernethy RedzRedz Runrioter SataQiu ScienJus SevenPointOld ShaoHans Shikugawa SoberChina SummerOfServenteen TJ666 TerrellChen TheRealHaui TinyAllen TomMD ViberW Videl WALL-E WeihanLi WildWolfBang WillemJiang Wooo0 XhangUeiJong Xlinlin YczYanchengzhe YoungHu YunaiV ZhHong ZhuoSiChen ZS-Oliver a198720 a526672351 acurtain adamni135 adermxzs adriancole aeolusheath agile6v aix3 aiyanbo ajanthan alexkarezin alonelaval amogege amwyyyy arugal ascrutae augustowebd bai-yang beckhampu beckjin beiwangnull bigflybrother bostin brucewu-fly c1ay candyleer carlvine500 carrypann cheenursn cheetah012 chenpengfei chenvista chess-equality chestarss chidaodezhongsheng chopin-d clevertension clk1st cngdkxw codeglzhang codelipenghui coder-yqj coki230 coolbeevip crystaldust cui-liqiang cuiweiwei cyberdak cyejing dagmom dengliming devkanro devon-ye dimaaan dingdongnigetou dio dmsolr dominicqi donbing007 dsc6636926 duotai dvsv2 dzx2018 echooymxq efekaptan eoeac evanxuhe feelwing1314 fgksgf fuhuo geektcp geomonlin ggndnn gitter-badger glongzh gnr163 gonedays grissom-grissom grissomsh guodongq guyukou gxthrj gzshilu hailin0 hanahmily haotian2015 haoyann hardzhang harvies hepyu heyanlong hi-sb honganan hsoftxl huangyoje huliangdream huohuanhuan innerpeacez itsvse jasonz93 jialong121 jinlongwang jjlu521016 jjtyro jmjoy jsbxyyx justeene juzhiyuan jy00464346 kaanid karott kayleyang kevinyyyy kezhenxu94 kikupotter kilingzhang killGC klboke ksewen kuaikuai kun-song kylixs landonzeng langke93 langyan1022 langyizhao lazycathome leemove leizhiyuan libinglong lilien1010 limfriend linkinshi linliaoy liuhaoXD liuhaoyang liuyanggithup liuzhengyang liweiv lkxiaolou llissery louis-zhou lpf32 lsyf lucperkins lujiajing1126 lunamagic1978 lunchboxav lxliuxuankb lytscu lyzhang1999 magic-akari makingtime maolie masterxxo maxiaoguang64 membphis mestarshine mgsheng michaelsembwever mikkeschiren mm23504570 momo0313 moonming mrproliu muyun12 nacx neatlife neeuq nic-chen nikitap492 nileblack nisiyong novayoung oatiz oflebbe olzhy onecloud360 osiriswd peng-yongsheng pengweiqhca potiuk purgeyao qijianbo010 qinhang3 qiuyu-d qqeasonchen qxo raybi-asus refactor2 remicollet rlenferink rootsongjc rovast scolia sdanzo seifeHu shiluo34 sikelangya simonlei sk163 snakorse songzhendong songzhian sonxy spacewander stalary stenio2011 stevehu stone-wlg sungitly surechen swartz-k sxzaihua tanjunchen tankilo taskmgr tbdpmi terranhu terrymanu tevahp thanq thebouv tianyuak tincopper tinyu0 tom-pytel tristaZero tristan-tsl trustin tsuilouis tuohai666 tzsword-2020 tzy1316106836 vcjmhg vision-ken viswaramamoorthy wankai123 wbpcode web-xiaxia webb2019 weiqiang333 wendal wengangJi wenjianzhang whfjam wind2008hxy withlin wqr2016 wu-sheng wuguangkuo wujun8 wuxingye x22x22 xbkaishui xcaspar xiaoxiangmoe xiaoy00 xinfeingxia85 xinzhuxiansheng xudianyang yanbw yanfch yang-xiaodong yangxb2010000 yanickxia yanmaipian yanmingbi yantaowu yaowenqiang yazong ychandu ycoe yimeng yu199195 yuqichou yuyujulin yymoth zaunist zaygrzx zcai2 zeaposs zhang98722 zhanghao001 zhangjianweibj zhangkewei zhangsean zhaoyuguang zhentaoJin zhousiliang163 zhuCheer zifeihan zkscpqm zoidbergwill zoumingzm zouyx zshit zxbu zygfengyuwuzu  ","excerpt":"Apache SkyWalking is an open source APM for distributed system. Provide tracing, service mesh …","ref":"/blog/2021-01-01-300-contributors-mark/","title":"Celebrate SkyWalking single repository hits the 300 contributors mark"},{"body":"","excerpt":"","ref":"/zh_tags/open-source-contribution/","title":"Open Source Contribution"},{"body":"","excerpt":"","ref":"/zh_tags/open-source-promotion-plan/","title":"Open Source Promotion Plan"},{"body":"Ke Zhang (a.k.a. HumbertZhang) mainly focuses on the SkyWalking Python agent, he had participated in the \u0026ldquo;Open Source Promotion Plan - Summer 2020\u0026rdquo; and completed the project smoothly, and won the award \u0026ldquo;Most Potential Students\u0026rdquo; that shows his great willingness to continuously contribute to our community.\nUp to date, he has submitted 8 PRs in the Python agent repository, 7 PRs in the main repo, all in total include ~2000 LOC.\nAt Dec. 13th, 2020, the project management committee (PMC) passed the proposal of promoting him as a new committer. He has accepted the invitation at the same day.\nWelcome to join the committer team, Ke Zhang!\n","excerpt":"Ke Zhang (a.k.a. HumbertZhang) mainly focuses on the SkyWalking Python agent, he had participated in …","ref":"/events/welcome-ke-zhang-as-new-committer/","title":"Welcome Ke Zhang (张可) as new committer"},{"body":"今年暑假期间我参加了开源软件供应链点亮计划—暑期 2020 的活动,在这个活动中,我主要参加了 Apache SkyWalking 的 Python Agent 的开发,最终项目顺利结项并获得了”最具潜力奖“,今天我想分享一下我参与这个活动以及开源社区的感受与收获。\n缘起 其实我在参加暑期 2020 活动之前就听说过 SkyWalking 了。我研究生的主要研究方向是微服务和云原生,组里的学长们之前就在使用 SkyWalking 进行一些研究工作,也是通过他们,我了解到了 OpenTracing, SkyWalking 等与微服务相关的 Tracing 工具以及 APM 等,当时我就在想如果有机会可以深度参加这些开源项目就好了。 巧的是,也正是在差不多的时候,本科的一个学长发给了我暑期 2020 活动的链接,我在其中惊喜的发现了 SkyWalking 项目。\n虽然说想要参与 SkyWalking 的开发,但是真的有了机会我却有一些不自信——这可是 Star 上万的 Apache 顶级项目。万幸的是在暑期 2020 活动中,每一个社区都提供了很多题目以供选择,想参与的同学可以提前对要做的事情有所了解,并可以提前做一些准备。我当时也仔细地浏览了项目列表,最终决定申请为 Python Agent 支持 Flask 或 Django 埋点的功能。当时主要考虑的是,我对 Python 语言比较熟悉,同时也有使用 Flask 等 web 框架进行开发的经验,我认为应该可以完成项目要求。为了能让心里更有底一些,我阅读了 Python Agent 的源码,写下了对项目需要做的工作的理解,并向项目的导师柯振旭发送了自荐邮件,最终被选中去完成这个项目。\n过程 被选中后我很激动,也把这份激动化作了参与开源的动力。我在进一步阅读源码,搭建本地环境后,用了三周左右的时间完成了 Django 项目的埋点插件的开发,毕竟我选择的项目是一个低难度的项目,而我在 Python web 方面也有一些经验。在这之后,我的导师和我进行了沟通,在我表达了想要继续做贡献的意愿之后,他给我建议了一些可以进一步进行贡献的方向,我也就继续参与 Python Agent 的开发。接下来,我陆续完成了 PyMongo 埋点插件, 插件版本检查机制, 支持使用 kafka 协议进行数据上报等功能。在提交了暑期 2020 活动的结项申请书后,我又继续参与了在端到端测试中增加对百分位数的验证等功能。\n在整个过程中,我遇到过很多问题,包括对问题认识不够清晰,功能的设计不够完善等等,但是通过与导师的讨论以及 Code Review,这些问题最终都迎刃而解了。此外他还经常会和我交流项目进一步发展方向,并给我以鼓励和肯定,在这里我想特别感谢我的导师在整个项目过程中给我的各种帮助。\n收获 参加暑期 2020 的活动带给我了很多收获,主要有以下几点:\n第一是让我真正参与到了开源项目中。在之前我只向在项目代码或文档中发现的 typo 发起过一些 Pull Request,但是暑期 2020 活动通过列出项目 + 导师指导的方式,明确了所要做的事情,并提供了相应的指导,降低了参与开源的门槛,使得我们学生可以参与到项目的开发中来。\n第二是对我的专业研究方向也有很多启发,我的研究方向就是微服务与云原生相关,通过参与到 SkyWalking 的开发中使得我可以更好地理解研究问题中的一些概念,也让我更得心应手得使用 SkyWalking 来解决一些实际的问题。\n第三是通过参与 SkyWalking Python Agent 以及其他部分的开发,我的贡献得到了社区的承认,并在最近被邀请作为 Committer 加入了社区,这对我而言是很高的认可,也提升了我的自信心。\n​\t第四点就是我通过这个活动认识了不少新朋友,同时也开拓了我的视野,使得我对于开源项目与开源社区有了很多新的认识。\n建议 最后同样是我对想要参与开源社区,想要参与此类活动的同学们的一些建议:\n 虽然奖金很吸引人,但是还是希望大家能抱着长期为项目进行贡献的心态来参与开源项目,以这样的心态参与开源可以让你更好地理解开源社区的运作方式,也可以让你更有机会参与完成激动人心的功能,你在一个东西上付出的时间精力越多,你能收获的往往也越多。 在申请项目的时候,可以提前阅读一下相关功能的源码,并结合自己的思考去写一份清晰明了的 proposal ,这样可以帮助你在申请人中脱颖而出。 在开始着手去完成一个功能之前,首先理清思路,并和自己的导师或了解这一部分的人进行沟通与确认,从而尽量避免在错误的方向上浪费太多时间。  ","excerpt":"今年暑假期间我参加了开源软件供应链点亮计划—暑期 2020 的活动,在这个活动中,我主要参加了 Apache SkyWalking 的 Python Agent 的开发,最终项目顺利结项并获得了”最具 …","ref":"/zh/2020-12-20-summer2020-activity-sharing2/","title":"暑期 2020 活动学生(张可)心得分享"},{"body":"背景 我是一个热爱编程、热爱技术的人,⼀直以来都向往着能参与到开源项⽬中锻炼⾃⼰,但当我面对庞大而复杂的项目代码时,却感到手足无措,不知该从何开始。⽽此次的“开源软件供应链点亮计划-暑期2020”活动则正好提供了这样⼀个机会:清晰的任务要求、开源社区成员作为导师提供指导以及一笔丰厚的奖金,让我顺利地踏上了开源这条道路。\n回顾 在“暑期2020”活动的这两个多月里,我为 SkyWalking 的命令行工具实现了一个 dashboard,此外在阅读项目源码的过程中,还发现并修复了几个 bug。到活动结束时,我共提交了11个 PR,贡献了两千多行改动,对 SkyWalking CLI 项目的贡献数量排名第二,还获得了“最具潜力奖”。\n我觉得之所以能够如此顺利地完成这个项⽬主要有两个原因。一方面,我选择的 SkyWalking CLI 项⽬当时最新的版本号为0.3.0,还处于起步阶段,代码量相对较少,⽽且项⽬结构非常清晰,文档也较为详细,这对于我理解整个项⽬⾮常有帮助,从⽽能够更快地上⼿。另一方面,我的项目导师非常认真负责,每次我遇到问题,导师都会及时地为我解答,然后我提交的 PR 也能够很快地被 review。⽽且导师不时会给予我肯定的评论与⿎励,这极⼤地提⾼了我的成就感,让我更加积极地投⼊到下⼀阶段的⼯作,形成⼀个正向的循环。\n收获 回顾整个参与过程,觉得自己收获颇多:\n首先,我学习到了很多可能在学校里接触不到的新技术,了解了开源项目是如何进行协作,开源社区是如何运转治理的,以及开源文化、Apache way 等知识,仿佛进入了一个崭新而精彩的世界。\n其次,我的编程能力得到了锻炼。因为开源项目对于代码的质量有较高的要求,因此我会在编程时有意识地遵守相关的规范,培养良好的编码习惯。然后在导师的 code review 中也学习到了一些编程技巧。\n此外,参与开源为我的科研带来了不少灵感。因为我的研究方向是智能软件工程,旨在将人工智能技术应用在软件工程的各个环节中,这需要我在实践中发现实际问题。而开源则提供了这样一个窗口,让我足不出户即可参与到软件项目的设计、开发、测试和发布等环节。\n最后也是本次活动最大的一个收获,我的贡献得到了社区的认可,被提名成为了 SkyWalking 社区的第一位学生 committer。\n建议 最后,对于将来想要参加此类活动的同学,附上我的一些建议:\n第一,选择活跃、知名的社区。社区对你的影响将是极其深远的,好的社区意味着成熟的协作流程、良好的氛围、严谨的代码规范,以及有更大几率遇到优秀的导师,这些对于你今后在开源方面的发展都是非常有帮助的。\n第二,以兴趣为导向来选择项目,同时要敢于走出舒适区。我最初在选择项目时,初步确定了两个,一个是低难度的 Python 项目,另一个是中等难度的 Go 项目。当时我很纠结:因为我对 Python 语言比较熟悉,选择一个低难度的项目是比较稳妥的,但是项目的代码我看的并不是很懂,具体要怎么做我完全没有头绪;而 Go 项目是一个命令行工具,我对这个比较感兴趣,且有一个大致的思路,但是我对 Go 语言并不是很熟悉,实践经验为零。最后凭借清晰具体的 proposal 我成功申请到了 Go 项目并顺利地完成了,还在实践中快速掌握了一门新的编程语言。\n这次的“暑期2020”活动虽已圆满结束,但我的开源之路才刚刚开始。\n","excerpt":"背景 我是一个热爱编程、热爱技术的人,⼀直以来都向往着能参与到开源项⽬中锻炼⾃⼰,但当我面对庞大而复杂的项目代码时,却感到手足无措,不知该从何开始。⽽此次的“开源软件供应链点亮计划-暑期2020”活动 …","ref":"/zh/2020-12-19-summer2020-activity-sharing/","title":"暑期2020活动心得分享"},{"body":"NGE2E is the next generation End-to-End Testing framework that aims to help developers to set up, debug, and verify E2E tests with ease. It\u0026rsquo;s built based on the lessons learnt from tens of hundreds of test cases in the SkyWalking main repo.\nGoal  Keep the feature parity with the existing E2E framework in SkyWalking main repo; Support both docker-compose and KinD to orchestrate the tested services under different environments; Get rid of the heavy Java/Maven stack, which exists in the current E2E; be language independent as much as possible, users only need to configure YAMLs and run commands, without writing codes;  Non-Goal  This framework is not involved with the build process, i.e. it won\u0026rsquo;t do something like mvn package or docker build, the artifacts (.tar, docker images) should be ready in an earlier process before this; This project doesn\u0026rsquo;t take the plugin tests into account, at least for now; This project doesn\u0026rsquo;t mean to add/remove any new/existing test case to/from the main repo; This documentation won\u0026rsquo;t cover too much technical details of how to implement the framework, that should go into an individual documentation;  Design Before diving into the design details, let\u0026rsquo;s take a quick look at how the end user might use NGE2E.\n All the following commands are mock, and are open to debate.\n To run a test case in a directory /path/to/the/case/directory\ne2e run /path/to/the/case/directory # or cd /path/to/the/case/directory \u0026amp;\u0026amp; e2e run This will run the test case in the specified directory, this command is a wrapper that glues all the following commands, which can be executed separately, for example, to debug the case:\nNOTE: because all the options can be loaded from a configuration file, so as long as a configuration file (say e2e.yaml) is given in the directory, every command should be able to run in bare mode (without any option explicitly specified in the command line);\nSet Up e2e setup --env=compose --file=docker-compose.yaml --wait-for=service/health e2e setup --env=kind --file=kind.yaml --manifests=bookinfo.yaml,gateway.yaml --wait-for=pod/ready e2e setup # If configuration file e2e.yaml is present  --env: the environment, may be compose or kind, represents docker-compose and KinD respectively; --file: the docker-compose.yaml or kind.yaml file that declares how to set up the environment; --manifests: for KinD, the resources files/directories to apply (using kubectl apply -f); --command: a command to run after the environment is started, this may be useful when users need to install some extra tools or apply resources from command line, like istioctl install --profile=demo; --wait-for: can be specified multiple times to give a list of conditions to be met; wait until the given conditions are met; the most frequently-used strategy should be --wait-for=service/health, --wait-for=deployments/available, etc. that make the e2e setup command to wait for all conditions to be met; other possible strategies may be something like --wait-for=\u0026quot;log:Started Successfully\u0026quot;, --wait-for=\u0026quot;http:localhost:8080/healthcheck\u0026quot;, etc. if really needed;  Trigger Inputs e2e trigger --interval=3s --times=0 --action=http --url=\u0026#34;localhost:8080/users\u0026#34; e2e trigger --interval=3s --times=0 --action=cmd --cmd=\u0026#34;curl localhost:8080/users\u0026#34; e2e trigger # If configuration file e2e.yaml is present  --interval=3s: trigger the action every 3 seconds; --times=0: how many times to trigger the action, 0=infinite; --action=http: the action of the trigger, i.e. \u0026ldquo;perform an http request as an input\u0026rdquo;; --action=cmd: the action of the trigger, i.e. \u0026ldquo;execute the cmd as an input\u0026rdquo;;  Query Output swctl service ls this is a project-specific step, different project may use different tools to query the actual output, for SkyWalking, it uses swctl to query the actual output.\nVerify e2e verify --actual=actual.data.yaml --expected=expected.data.yaml e2e verify --query=\u0026#34;swctl service ls\u0026#34; --expected=expected.data.yaml e2e verify # If configuration file e2e.yaml is present   --actual: the actual data file, only YAML file format is supported;\n  --expected: the expected data file, only YAML file format is supported;\n  --query: the query to get the actual data, the query result must have the same format as --actual and --expected;\n The --query option will get the output into a temporary file and use the --actual under the hood;\n   Cleanup e2e cleanup --env=compose --file=docker-compose.yaml e2e cleanup --env=kind --file=kind.yaml --resources=bookinfo.yaml,gateway.yaml e2e cleanup # If configuration file e2e.yaml is present This step requires the same options in the setup step so that it can clean up all things necessarily.\nSummarize To summarize, the directory structure of a test case might be\ncase-name ├── agent-service # optional, an arbitrary project that is used in the docker-compose.yaml if needed │ ├── Dockerfile │ ├── pom.xml │ └── src ├── docker-compose.yaml ├── e2e.yaml # see a sample below └── testdata ├── expected.endpoints.service1.yaml ├── expected.endpoints.service2.yaml └── expected.services.yaml or\ncase-name ├── kind.yaml ├── bookinfo │ ├── bookinfo.yaml │ └── bookinfo-gateway.yaml ├── e2e.yaml # see a sample below └── testdata ├── expected.endpoints.service1.yaml ├── expected.endpoints.service2.yaml └── expected.services.yaml a sample of e2e.yaml may be\nsetup:env:kindfile:kind.yamlmanifests:- path:bookinfo.yamlwait:# you can have multiple conditions to wait- namespace:bookinfolabel-selector:app=productfor:deployment/available- namespace:reviewslabel-selector:app=productfor:deployment/available- namespace:ratingslabel-selector:app=productfor:deployment/availablerun:- command:|# it can be a shell script or anything executableistioctl install --profile=demo -ykubectl label namespace default istio-injection=enabledwait:- namespace:istio-systemlabel-selector:app=istiodfor:deployment/available# OR# env: compose# file: docker-compose.yamltrigger:action:httpinterval:3stimes:0url:localhost:9090/usersverify:- query:swctl service lsexpected:expected.services.yaml- query:swctl endpoint ls --service=\u0026#34;YnVzaW5lc3Mtem9uZTo6cHJvamVjdEM=.1\u0026#34;expected:expected.projectC.endpoints.yamlthen a single command should do the trick.\ne2e run Modules This project is divided into the following modules.\nController A controller command (e2e run) composes all the steps declared in the e2e.yaml, it should be progressive and clearly display which step is currently running. If it failed in a step, the error message should be as much comprehensive as possible. An example of the output might be\ne2e run ✔ Started Kind Cluster - Cluster Name ✔ Checked Pods Readiness - All pods are ready ? Generating Traffic - http localhost:9090/users (progress spinner) ✔ Verified Output - service ls (progress spinner) Verifying Output - endpoint ls ✘ Failed to Verify Output Data - endpoint ls \u0026lt;the diff content\u0026gt; ✔ Clean Up Compared with running the steps one by one, the controller is also responsible for cleaning up env (by executing cleanup command) no mater what status other commands are, even if they are failed, the controller has the following semantics in terms of setup and cleanup.\n// Java try { setup(); // trigger step // verify step // ... } finally { cleanup(); } // GoLang func run() { setup(); defer cleanup(); // trigger step // verify step // ... } Initializer The initializer is responsible for\n  When env==compose\n Start the docker-compose services; Check the services' healthiness; Wait until all services are ready according to the interval, etc.;    When env==kind\n Start the KinD cluster according to the config files; Apply the resources files (--manifests) or/and run the custom init command (--commands); Check the pods' readiness; Wait until all pods are ready according to the interval, etc.;    Verifier According to scenarios we have at the moment, the must-have features are:\n  Matchers\n Exact match Not null Not empty Greater than 0 Regexp match At least one of list element match    Functions\n Base64 encode/decode    in order to help to identify simple bugs from the GitHub Actions workflow, there are some \u0026ldquo;nice to have\u0026rdquo; features:\n Printing the diff content when verification failed is a super helpful bonus proved in the Python agent repo;  Logging When a test case failed, all the necessary logs should be collected into a dedicated directory, which could be uploaded to the GitHub Artifacts for downloading and analysis;\nLogs through the entire process of a test case are:\n KinD clusters logs; Containers/pods logs; The logs from the NGE2E itself;  More Planned Debugging Debugging the E2E locally has been a strong requirement and time killer that we haven\u0026rsquo;t solve up to date, though we have enhancements like https://github.com/apache/skywalking/pull/5198 , but in this framework, we will adopt a new method to \u0026ldquo;really\u0026rdquo; support debugging locally.\nThe most common case when debugging is to run the E2E tests, with one or more services forwarded into the host machine, where the services are run in the IDE or in debug mode.\nFor example, you may run the SkyWalking OAP server in an IDE and run e2e run, expecting the other services (e.g. agent services, SkyWalking WebUI, etc.) inside the containers to connect to your local OAP, instead of the one declared in docker-compose.yaml.\nFor Docker Desktop Mac/Windows, we can access the services running on the host machine inside containers via host.docker.internal, for Linux, it\u0026rsquo;s 172.17.0.1.\nOne possible solution is to add an option --debug-services=oap,other-service-name that rewrites all the router rules inside the containers from oap to host.docker.internal/172.17.0.1.\nCodeGen When adding new test case, a code generator would be of great value to eliminate the repeated labor and copy-pasting issues.\ne2e new \u0026lt;case-name\u0026gt; ","excerpt":"NGE2E is the next generation End-to-End Testing framework that aims to help developers to set up, …","ref":"/blog/e2e-design/","title":"[Design] NGE2E - Next Generation End-to-End Testing Framework"},{"body":"这篇文章暂时不讲告警策略, 直接看默认情况下激活的告警目标以及钉钉上的告警效果\nSkyWalking内置了很多默认的告警策略, 然后根据告警策略生成告警目标, 我们可以很容易的在界面上看到\n当我们想去让这些告警目标通知到我们时, 由于SkyWalking目前版本(8.3)已经自带了, 只需要简单配置一下即可\n我们先来钉钉群中创建机器人并勾选加签\n然后再修改告警部分的配置文件, 如果你是默认的配置文件(就像我一样), 你可以直接执行以下命令, 反之你也可以手动修改configs/alarm-settings.yml文件\ntee \u0026lt;your_skywalking_path\u0026gt;/configs/alarm-settings.yml \u0026lt;\u0026lt;-'EOF' dingtalkHooks: textTemplate: |- { \u0026quot;msgtype\u0026quot;: \u0026quot;text\u0026quot;, \u0026quot;text\u0026quot;: { \u0026quot;content\u0026quot;: \u0026quot;Apache SkyWalking Alarm: \\n %s.\u0026quot; } } webhooks: - url: https://oapi.dingtalk.com/robot/send?access_token=\u0026lt;access_token\u0026gt; secret: \u0026lt;加签值\u0026gt; EOF 最终效果如下\n参考文档:\nhttps://github.com/apache/skywalking/blob/master/docs/en/setup/backend/backend-alarm.md\nhttps://ding-doc.dingtalk.com/doc#/serverapi2/qf2nxq/uKPlK\n谢谢观看, 后续我会在SkyWalking告警这块写更多实战文章\n","excerpt":"这篇文章暂时不讲告警策略, 直接看默认情况下激活的告警目标以及钉钉上的告警效果\nSkyWalking内置了很多默认的告警策略, 然后根据告警策略生成告警目标, 我们可以很容易的在界面上看到\n当我们想去 …","ref":"/zh/2020-12-13-skywalking-alarm/","title":"SkyWalking报警发送到钉钉群"},{"body":"Gui Cao began the code contributions since May 3, 2020. In the past 6 months, his 23 pull requests(GitHub, zifeihan[1]) have been accepted, which includes 5k+ lines of codes.\nMeanwhile, he took part in the tech discussion, and show the interests to contribute more to the project.\nAt Dec. 4th, 2020, the project management committee(PMC) passed the proposal of promoting him as a new committer. He has accepted the invitation at the same day.\nWelcome Gui Cao join the committer team.\n[1] https://github.com/apache/skywalking/commits?author=zifeihan\n","excerpt":"Gui Cao began the code contributions since May 3, 2020. In the past 6 months, his 23 pull …","ref":"/events/welcome-gui-cao-as-new-committer/","title":"Welcome Gui Cao as new committer"},{"body":" Author: Zhenxu Ke, Sheng Wu, and Tevah Platt. tetrate.io Original link, Tetrate.io blog Dec. 03th, 2020  Apache SkyWalking: an APM (application performance monitor) system, especially designed for microservices, cloud native, and container-based (Docker, Kubernetes, Mesos) architectures.\nEnvoy Access Log Service: Access Log Service (ALS) is an Envoy extension that emits detailed access logs of all requests going through Envoy.\nBackground Apache SkyWalking has long supported observability in service mesh with Istio Mixer adapter. But since v1.5, Istio began to deprecate Mixer due to its poor performance in large scale clusters. Mixer’s functionalities have been moved into the Envoy proxies, and is supported only through the 1.7 Istio release. On the other hand, Sheng Wu and Lizan Zhou presented a better solution based on the Apache SkyWalking and Envoy ALS on KubeCon China 2019, to reduce the performance impact brought by Mixer, while retaining the same observability in service mesh. This solution was initially implemented by Sheng Wu, Hongtao Gao, Lizan Zhou, and Dhi Aurrahman at Tetrate.io. If you are looking for a more efficient solution to observe your service mesh instead of using a Mixer-based solution, this is exactly what you need. In this tutorial, we will explain a little bit how the new solution works, and apply it to the bookinfo application in practice.\nHow it works From a perspective of observability, Envoy can be typically deployed in 2 modes, sidecar, and router. As a sidecar, Envoy mostly represents a single service to receive and send requests (2 and 3 in the picture below). While as a proxy, Envoy may represent many services (1 in the picture below).\nIn both modes, the logs emitted by ALS include a node identifier. The identifier starts with router~ (or ingress~) in router mode and sidecar~ in sidecar proxy mode.\nApart from the node identifier, there are several noteworthy properties in the access logs that will be used in this solution:\n  downstream_direct_remote_address: This field is the downstream direct remote address on which the request from the user was received. Note: This is always the physical peer, even if the remote address is inferred from for example the x-forwarded-for header, proxy protocol, etc.\n  downstream_remote_address: The remote/origin address on which the request from the user was received.\n  downstream_local_address: The local/destination address on which the request from the user was received.\n  upstream_remote_address: The upstream remote/destination address that handles this exchange.\n  upstream_local_address: The upstream local/origin address that handles this exchange.\n  upstream_cluster: The upstream cluster that upstream_remote_address belongs to.\n  We will discuss more about the properties in the following sections.\nSidecar When serving as a sidecar, Envoy is deployed alongside a service, and delegates all the incoming/outgoing requests to/from the service.\n  Delegating incoming requests: in this case, Envoy acts as a server side sidecar, and sets the upstream_cluster in form of inbound|portNumber|portName|Hostname[or]SidecarScopeID.\nThe SkyWalking analyzer checks whether either downstream_remote_address can be mapped to a Kubernetes service:\na. If there is a service (say Service B) whose implementation is running in this IP(and port), then we have a service-to-service relation, Service B -\u0026gt; Service A, which can be used to build the topology. Together with the start_time and duration fields in the access log, we have the latency metrics now.\nb. If there is no service that can be mapped to downstream_remote_address, then the request may come from a service out of the mesh. Since SkyWalking cannot identify the source service where the requests come from, it simply generates the metrics without source service, according to the topology analysis method. The topology can be built as accurately as possible, and the metrics detected from server side are still correct.\n  Delegating outgoing requests: in this case, Envoy acts as a client-side sidecar, and sets the upstream_cluster in form of outbound|\u0026lt;port\u0026gt;|\u0026lt;subset\u0026gt;|\u0026lt;serviceFQDN\u0026gt;.\nClient side detection is relatively simpler than (1. Delegating incoming requests). If upstream_remote_address is another sidecar or proxy, we simply get the mapped service name and generate the topology and metrics. Otherwise, we have no idea what it is and consider it an UNKNOWN service.\n  Proxy role When Envoy is deployed as a proxy, it is an independent service itself and doesn\u0026rsquo;t represent any other service like a sidecar does. Therefore, we can build client-side metrics as well as server-side metrics.\nExample In this section, we will use the typical bookinfo application to demonstrate how Apache SkyWalking 8.3.0+ (the latest version up to Nov. 30th, 2020) works together with Envoy ALS to observe a service mesh.\nInstalling Kubernetes SkyWalking 8.3.0 supports the Envoy ALS solution under both Kubernetes environment and virtual machines (VM) environment, in this tutorial, we’ll only focus on the Kubernetes scenario, for VM solution, please stay tuned for our next blog, so we need to install Kubernetes before taking further steps.\nIn this tutorial, we will use the Minikube tool to quickly set up a local Kubernetes(v1.17) cluster for testing. In order to run all the needed components, including the bookinfo application, the SkyWalking OAP and WebUI, the cluster may need up to 4GB RAM and 2 CPU cores.\nminikube start --memory=4096 --cpus=2 Next, run kubectl get pods --namespace=kube-system --watch to check whether all the Kubernetes components are ready. If not, wait for the readiness before going on.\nInstalling Istio Istio provides a very convenient way to configure the Envoy proxy and enable the access log service. The built-in configuration profiles free us from lots of manual operations. So, for demonstration purposes, we will use Istio through this tutorial.\nexport ISTIO_VERSION=1.7.1 curl -L https://istio.io/downloadIstio | sh - sudo mv $PWD/istio-$ISTIO_VERSION/bin/istioctl /usr/local/bin/ istioctl install --set profile=demo kubectl label namespace default istio-injection=enabled Run kubectl get pods --namespace=istio-system --watch to check whether all the Istio components are ready. If not, wait for the readiness before going on.\nEnabling ALS The demo profile doesn’t enable ALS by default. We need to reconfigure it to enable ALS via some configuration.\nistioctl manifest install \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-oap.istio-system:11800 The example command --set meshConfig.enableEnvoyAccessLogService=true enables the Envoy access log service in the mesh. And as we said earlier, ALS is essentially a gRPC service that emits requests logs. The config meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-oap.istio-system:11800 tells this gRPC service where to emit the logs, say skywalking-oap.istio-system:11800, where we will deploy the SkyWalking ALS receiver later.\nNOTE: You can also enable the ALS when installing Istio so that you don’t need to restart Istio after installation:\nistioctl install --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-oap.istio-system:11800 kubectl label namespace default istio-injection=enabled Deploying Apache SkyWalking The SkyWalking community provides a Helm Chart to make it easier to deploy SkyWalking and its dependent services in Kubernetes. The Helm Chart can be found at the GitHub repository.\n# Install Helm curl -sSLO https://get.helm.sh/helm-v3.0.0-linux-amd64.tar.gz sudo tar xz -C /usr/local/bin --strip-components=1 linux-amd64/helm -f helm-v3.0.0-linux-amd64.tar.gz # Clone SkyWalking Helm Chart git clone https://github.com/apache/skywalking-kubernetes cd skywalking-kubernetes/chart git reset --hard dd749f25913830c47a97430618cefc4167612e75 # Update dependencies helm dep up skywalking # Deploy SkyWalking helm -n istio-system install skywalking skywalking \\  --set oap.storageType=\u0026#39;h2\u0026#39;\\  --set ui.image.tag=8.3.0 \\  --set oap.image.tag=8.3.0-es7 \\  --set oap.replicas=1 \\  --set oap.env.SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=k8s-mesh \\  --set oap.env.JAVA_OPTS=\u0026#39;-Dmode=\u0026#39; \\  --set oap.envoy.als.enabled=true \\  --set elasticsearch.enabled=false We deploy SkyWalking to the namespace istio-system, so that SkyWalking OAP service can be accessed by skywalking-oap.istio-system:11800, to which we told ALS to emit their logs, in the previous step.\nWe also enable the ALS analyzer in the SkyWalking OAP: oap.env.SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=k8s-mesh. The analyzer parses the access logs and maps the IP addresses in the logs to the real service names in the Kubernetes, to build a topology.\nIn order to retrieve the metadata (such as Pod IP and service names) from a Kubernetes cluster for IP mappings, we also set oap.envoy.als.enabled=true, to apply for a ClusterRole that has access to the metadata.\nexport POD_NAME=$(kubectl get pods -A -l \u0026#34;app=skywalking,release=skywalking,component=ui\u0026#34; -o name) echo $POD_NAME kubectl -n istio-system port-forward $POD_NAME 8080:8080 Now navigate your browser to http://localhost:8080 . You should be able to see the SkyWalking dashboard. The dashboard is empty for now, but after we deploy the demo application and generate traffic, it should be filled up later.\nDeploying Bookinfo application Run:\nexport ISTIO_VERSION=1.7.1 kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/platform/kube/bookinfo.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/bookinfo-gateway.yaml kubectl wait --for=condition=Ready pods --all --timeout=1200s minikube tunnel Then navigate your browser to http://localhost/productpage. You should be able to see the typical bookinfo application. Refresh the webpage several times to generate enough access logs.\nDone! And you’re all done! Check out the SkyWalking WebUI again. You should see the topology of the bookinfo application, as well the metrics of each individual service of the bookinfo application.\nTroubleshooting  Check all pods status: kubectl get pods -A. SkyWalking OAP logs: kubectl -n istio-system logs -f $(kubectl get pod -A -l \u0026quot;app=skywalking,release=skywalking,component=oap\u0026quot; -o name). SkyWalking WebUI logs: kubectl -n istio-system logs -f $(kubectl get pod -A -l \u0026quot;app=skywalking,release=skywalking,component=ui\u0026quot; -o name). Make sure the time zone at the bottom-right of the WebUI is set to UTC +0.  Customizing Service Names The SkyWalking community brought more improvements to the ALS solution in the 8.3.0 version. You can decide how to compose the service names when mapping from the IP addresses, with variables service and pod. For instance, configuring K8S_SERVICE_NAME_RULE to the expression ${service.metadata.name}-${pod.metadata.labels.version} gets service names with version label such as reviews-v1, reviews-v2, and reviews-v3, instead of a single service reviews, see the PR.\nWorking ALS with VM Kubernetes is popular, but what about VMs? From what we discussed above, in order to map the IPs to services, SkyWalking needs access to the Kubernetes cluster, fetching service metadata and Pod IPs. But in a VM environment, there is no source from which we can fetch those metadata. In the next post, we will introduce another ALS analyzer based on the Envoy metadata exchange mechanism. With this analyzer, you are able to observe a service mesh in the VM environment. Stay tuned! If you want to have commercial support for the ALS solution or hybrid mesh observability, Tetrate Service Bridge, TSB is another good option out there.\nAdditional Resources  KubeCon 2019 Recorded Video. Get more SkyWalking updates on the official website.  Apache SkyWalking founder Sheng Wu, SkyWalking core maintainer Zhenxu Ke are Tetrate engineers, and Tevah Platt is a content writer for Tetrate. Tetrate helps organizations adopt open source service mesh tools, including Istio, Envoy, and Apache SkyWalking, so they can manage microservices, run service mesh on any infrastructure, and modernize their applications.\n","excerpt":"Author: Zhenxu Ke, Sheng Wu, and Tevah Platt. tetrate.io Original link, Tetrate.io blog Dec. 03th, …","ref":"/blog/2020-12-03-obs-service-mesh-with-sw-and-als/","title":"Observe Service Mesh with SkyWalking and Envoy Access Log Service"},{"body":"","excerpt":"","ref":"/zh_tags/service-mesh/","title":"Service Mesh"},{"body":" 如果你正在寻找在 Mixer 方案以外观察服务网格的更优解,本文正符合你的需要。\n Apache Skywalking︰特别为微服务、云原生和容器化(Docker、Kubernetes、Mesos)架构而设计的 APM(应用性能监控)系统。\nEnvoy 访问日志服务︰访问日志服务(ALS)是 Envoy 的扩展组件,会将所有通过 Envoy 的请求的详细访问日志发送出来。\n背景 Apache SkyWalking 一直通过 Istio Mixer 的适配器,支持服务网格的可观察性。不过自从 v1.5 版本,由于 Mixer 在大型集群中差强人意的表现,Istio 开始弃用 Mixer。Mixer 的功能现已迁至 Envoy 代理,并获 Istio 1.7 版本支持。\n在去年的中国 KubeCon 中,吴晟和周礼赞基于 Apache SkyWalking 和 Envoy ALS,发布了新的方案:不再受制于 Mixer 带来的性能影响,也同时保持服务网格中同等的可观察性。这个方案最初是由吴晟、高洪涛、周礼赞和 Dhi Aurrahman 在 Tetrate.io 实现的。\n如果你正在寻找在 Mixer 方案之外,为你的服务网格进行观察的最优解,本文正是你当前所需的。在这个教程中,我们会解释此方案的运作逻辑,并将它实践到 bookinfo 应用上。\n运作逻辑 从可观察性的角度来说,Envoy 一般有两种部署模式︰Sidecar 和路由模式。 Envoy 代理可以代表多项服务(见下图之 1),或者当它作为 Sidecar 时,一般是代表接收和发送请求的单项服务(下图之 2 和 3)。\n在两种模式中,ALS 发放的日志都会带有一个节点标记符。该标记符在路由模式时,以 router~ (或 ingress~)开头,而在 Sidecar 代理模式时,则以 sidecar~ 开头。\n除了节点标记符之外,这个方案[1]所采用的访问日志也有几个值得一提的字段︰\ndownstream_direct_remote_address︰此字段是下游的直接远程地址,用作接收来自用户的请求。注意︰它永远是对端实体的地址,即使远程地址是从 x-forwarded-for header、代理协议等推断出来的。\ndownstream_remote_address︰远程或原始地址,用作接收来自用户的请求。\ndownstream_local_address︰本地或目标地址,用作接收来自用户的请求。\nupstream_remote_address︰上游的远程或目标地址,用作处理本次交换。\nupstream_local_address︰上游的本地或原始地址,用作处理本次交换。\nupstream_cluster︰upstream_remote_address 所属的上游集群。\n我们会在下面详细讲解各个字段。\nSidecar 当 Envoy 作为 Sidecar 的时候,会搭配服务一起部署,并代理来往服务的传入或传出请求。\n  代理传入请求︰在此情况下,Envoy 会作为服务器端的 Sidecar,以 inbound|portNumber|portName|Hostname[or]SidecarScopeID 格式设定 upstream_cluster。\nSkyWalking 分析器会检查 downstream_remote_address 是否能够找到对应的 Kubernetes 服务。\n如果在此 IP(和端口)中有一个服务(例如服务 B)正在运行,那我们就会建立起服务对服务的关系(即服务 B → 服务 A),帮助建立拓扑。再配合访问日志中的 start_time 和 duration 两个字段,我们就可以获得延迟的指标数据了。\n如果没有任何服务可以和 downstream_remote_address 相对应,那请求就有可能来自网格以外的服务。由于 SkyWalking 无法识别请求的服务来源,在没有源服务的情况下,它简单地根据拓扑分析方法生成数据。拓扑依然可以准确地建立,而从服务器端侦测出来的指标数据也依然是正确的。\n  代理传出请求︰在此情况下,Envoy 会作为客户端的 Sidecar,以 outbound|\u0026lt;port\u0026gt;|\u0026lt;subset\u0026gt;|\u0026lt;serviceFQDN\u0026gt; 格式设定 upstream_cluster。\n客户端的侦测相对来说比代理传入请求容易。如果 upstream_remote_address 是另一个 Sidecar 或代理的话,我们只需要获得它相应的服务名称,便可生成拓扑和指标数据。否则,我们没有办法理解它,只能把它当作 UNKNOWN 服务。\n  代理角色 当 Envoy 被部署为前端代理时,它是独立的服务,并不会像 Sidecar 一样,代表任何其他的服务。所以,我们可以建立客户端以及服务器端的指标数据。\n演示范例 在本章,我们会使用典型的 bookinfo 应用,来演示 Apache SkyWalking 8.3.0+ (截至 2020 年 11 月 30 日的最新版本)如何与 Envoy ALS 合作,联手观察服务网格。\n安装 Kubernetes 在 Kubernetes 和虚拟机器(VM)的环境下,SkyWalking 8.3.0 均支持 Envoy ALS 的方案。在本教程中,我们只会演示在 Kubernetes 的情境,至于 VM 方案,请耐心期待我们下一篇文章。所以在进行下一步之前,我们需要先安装 Kubernetes。\n在本教程中,我们会使用 Minikube 工具来快速设立本地的 Kubernetes(v1.17 版本)集群用作测试。要运行所有必要组件,包括 bookinfo 应用、SkyWalking OAP 和 WebUI,集群需要动用至少 4GB 内存和 2 个 CPU 的核心。\nminikube start --memory=4096 --cpus=2 然后,运行 kubectl get pods --namespace=kube-system --watch,检查所有 Kubernetes 的组件是否已准备好。如果还没,在进行下一步前,请耐心等待准备就绪。\n安装 Istio Istio 为配置 Envoy 代理和实现访问日志服务提供了一个非常方便的方案。内建的配置设定档为我们省去了不少手动的操作。所以,考虑到演示的目的,我们会在本教程全程使用 Istio。\nexport ISTIO_VERSION=1.7.1 curl -L https://istio.io/downloadIstio | sh - sudo mv $PWD/istio-$ISTIO_VERSION/bin/istioctl /usr/local/bin/ istioctl install --set profile=demo kubectl label namespace default istio-injection=enabled 然后,运行 kubectl get pods --namespace=istio-system --watch,检查 Istio 的所有组件是否已准备好。如果还没,在进行下一步前,请耐心等待准备就绪。\n启动访问日志服务 演示的设定档没有预设启动 ALS,我们需要重新配置才能够启动 ALS。\nistioctl manifest install \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-oap.istio-system:11800 范例指令 --set meshConfig.enableEnvoyAccessLogService=true 会在网格中启动访问日志服务。正如之前提到,ALS 本质上是一个会发放请求日志的 gRPC 服务。配置 meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-oap.istio-system:11800 会告诉这个gRPC 服务往哪里发送日志,这里是往 skywalking-oap.istio-system:11800 发送,稍后我们会部署 SkyWalking ALS 接收器到这个地址。\n注意︰\n你也可以在安装 Istio 时启动 ALS,那就不需要在安装后重新启动 Istio︰\nistioctl install --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-oap.istio-system:11800 kubectl label namespace default istio-injection=enabled 部署 Apache SkyWalking SkyWalking 社区提供了 Helm Chart ,让你更轻易地在 Kubernetes 中部署 SkyWalking 以及其依赖服务。 Helm Chart 可以在 GitHub 仓库找到。\n# Install Helm curl -sSLO https://get.helm.sh/helm-v3.0.0-linux-amd64.tar.gz sudo tar xz -C /usr/local/bin --strip-components=1 linux-amd64/helm -f helm-v3.0.0-linux-amd64.tar.gz # Clone SkyWalking Helm Chart git clone https://github.com/apache/skywalking-kubernetes cd skywalking-kubernetes/chart git reset --hard dd749f25913830c47a97430618cefc4167612e75 # Update dependencies helm dep up skywalking # Deploy SkyWalking helm -n istio-system install skywalking skywalking \\  --set oap.storageType=\u0026#39;h2\u0026#39;\\  --set ui.image.tag=8.3.0 \\  --set oap.image.tag=8.3.0-es7 \\  --set oap.replicas=1 \\  --set oap.env.SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=k8s-mesh \\  --set oap.env.JAVA_OPTS=\u0026#39;-Dmode=\u0026#39; \\  --set oap.envoy.als.enabled=true \\  --set elasticsearch.enabled=false 我们在 istio-system 的命名空间内部署 SkyWalking,使 SkyWalking OAP 服务可以使用地址 skywalking-oap.istio-system:11800 访问,在上一步中,我们曾告诉过 ALS 应往此处发放它们的日志。\n我们也在 SkyWalking OAP 中启动 ALS 分析器︰oap.env.SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=k8s-mesh。分析器会对访问日志进行分析,并解析日志中的 IP 地址和 Kubernetes 中的真实服务名称,以建立拓扑。\n为了从 Kubernetes 集群处获取元数据(例如 Pod IP 和服务名称),以识别相应的 IP 地址,我们还会设定 oap.envoy.als.enabled=true,用来申请一个对元数据有访问权的 ClusterRole。\nexport POD_NAME=$(kubectl get pods -A -l \u0026#34;app=skywalking,release=skywalking,component=ui\u0026#34; -o name) echo $POD_NAME kubectl -n istio-system port-forward $POD_NAME 8080:8080 现在到你的浏览器上访问 http://localhost:8080。你应该会看到 SkyWalking 的 Dashboard。 Dashboard 现在应该是空的,但稍后部署应用和生成流量后,它就会被填满。\n部署 Bookinfo 应用 运行︰\nexport ISTIO_VERSION=1.7.1 kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/platform/kube/bookinfo.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/bookinfo-gateway.yaml kubectl wait --for=condition=Ready pods --all --timeout=1200s minikube tunnel 现在到你的浏览器上进入 http://localhost/productpage。你应该会看到典型的 bookinfo 应用画面。重新整理该页面几次,以生成足够的访问日志。\n完成了! 这样做,你就成功完成设置了!再查看 SkyWalking 的 WebUI,你应该会看到 bookinfo 应用的拓扑,以及它每一个单独服务的指标数据。\n疑难解答  检查所有 pod 的状态︰kubectl get pods -A。 SkyWalking OAP 的日志︰kubectl -n istio-system logs -f $(kubectl get pod -A -l \u0026quot;app=skywalking,release=skywalking,component=oap\u0026quot; -o name)。 SkyWalking WebUI 的日志︰kubectl -n istio-system logs -f $(kubectl get pod -A -l \u0026quot;app=skywalking,release=skywalking,component=ui\u0026quot; -o name)。 确保 WebUI 右下方的时区设定在 UTC +0。  自定义服务器名称 SkyWalking 社区在 ALS 方案的 8.3.0 版本中,作出了许多改善。你现在可以在映射 IP 地址时,决定如何用 service 和 pod 变量去自定义服务器的名称。例如,将 K8S_SERVICE_NAME_RULE 设置为 ${service.metadata.name}-${pod.metadata.labels.version},就可以使服务名称带上版本的标签,类似 reviews-v1、reviews-v2 和 reviews- v3,而不再是单个服务 review[2]。\n在 VM 上使用 ALS Kubernetes 很受欢迎,可是 VM 呢?正如我们之前所说,为了替 IP 找到对应的服务,SkyWalking 需要对 Kubernetes 集群有访问权,以获得服务的元数据和 Pod 的 IP。可是在 VM 环境中,我们并没有来源去收集这些元数据。\n在下一篇文章,我们会介绍另外一个 ALS 分析器,它是建立于 Envoy 的元数据交换机制。有了这个分析器,你就可以在 VM 环境中观察服务网格了。万勿错过!\n如果你希望在 ALS 方案或是混合式网格可观察性上获得商业支持,TSB 会是一个好选项。\n额外资源\n KubeCon 2019 的录影视频。 在官方网站上获得更多有关 SkyWalking 的最新消息吧。  如有任何问题或反馈,发送邮件至 learn@tetrate.io。\nApache SkyWalking 创始人吴晟和 SkyWalking 的核心贡献者柯振旭都是 Tetrate 的工程师。 Tetrate 的内容创造者编辑与贡献于本文章。 Tetrate 帮助企业采用开源服务网格工具,包括 Istio、Envoy 和 Apache SkyWalking,让它们轻松管理微服务,在任何架构上运行服务网格,以至现代化他们的应用。\n[1]https://github.com/envoyproxy/envoy/blob/549164c42cae84b59154ca4c36009e408aa10b52/generated_api_shadow/envoy/data/accesslog/v2/accesslog.proto\n[2]https://github.com/apache/skywalking/pull/5722\n","excerpt":"如果你正在寻找在 Mixer 方案以外观察服务网格的更优解,本文正符合你的需要。\n Apache Skywalking︰特别为微服务、云原生和容器化(Docker、Kubernetes、Mesos)架 …","ref":"/zh/observe-service-mesh-with-skywalking-and-envoy-access-log-service/","title":"使用 SkyWalking 和 Envoy 访问日志服务对服务网格进行观察"},{"body":"SkyWalking 8.3.0 is released. Go to downloads page to find release tars.\nProject  Test: ElasticSearch version 7.0.0 and 7.9.3 as storage are E2E tested. Test: Bump up testcontainers version to work around the Docker bug on MacOS.  Java Agent  Support propagate the sending timestamp in MQ plugins to calculate the transfer latency in the async MQ scenarios. Support auto-tag with the fixed values propagated in the correlation context. Make HttpClient 3.x, 4.x, and HttpAsyncClient 3.x plugins to support collecting HTTP parameters. Make the Feign plugin to support Java 14 Make the okhttp3 plugin to support Java 14 Polish tracing context related codes. Add the plugin for async-http-client 2.x Fix NPE in the nutz plugin. Provide Apache Commons DBCP 2.x plugin. Add the plugin for mssql-jtds 1.x. Add the plugin for mssql-jdbc 6.x -\u0026gt; 9.x. Fix the default ignore mechanism isn\u0026rsquo;t accurate enough bug. Add the plugin for spring-kafka 1.3.x. Add the plugin for Apache CXF 3.x. Fix okhttp-3.x and async-http-client-2.x did not overwrite the old trace header.  OAP-Backend  Add the @SuperDataset annotation for BrowserErrorLog. Add the thread pool to the Kafka fetcher to increase the performance. Add contain and not contain OPS in OAL. Add Envoy ALS analyzer based on metadata exchange. Add listMetrics GraphQL query. Add group name into services of so11y and istio relevant metrics Support keeping collecting the slowly segments in the sampling mechanism. Support choose files to active the meter analyzer. Support nested class definition in the Service, ServiceInstance, Endpoint, ServiceRelation, and ServiceInstanceRelation sources. Support sideCar.internalErrorCode in the Service, ServiceInstance, Endpoint, ServiceRelation, and ServiceInstanceRelation sources. Improve Kubernetes service registry for ALS analysis. Add health checker for cluster management Support the service auto grouping. Support query service list by the group name. Improve the queryable tags generation. Remove the duplicated tags to reduce the storage payload. Fix the threads of the Kafka fetcher exit if some unexpected exceptions happen. Fix the excessive timeout period set by the kubernetes-client. Fix deadlock problem when using elasticsearch-client-7.0.0. Fix storage-jdbc isExists not set dbname. Fix searchService bug in the InfluxDB storage implementation. Fix CVE in the alarm module, when activating the dynamic configuration feature. Fix CVE in the endpoint grouping, when activating the dynamic configuration feature. Fix CVE in the uninstrumented gateways configs, when activating the dynamic configuration feature. Fix CVE in the Apdex threshold configs, when activating the dynamic configuration feature. Make the codes and doc consistent in sharding server and core server. Fix that chunked string is incorrect while the tag contains colon. Fix the incorrect dynamic configuration key bug of endpoint-name-grouping. Remove unused min date timebucket in jdbc deletehistory logical Fix \u0026ldquo;transaction too large error\u0026rdquo; when use TiDB as storage. Fix \u0026ldquo;index not found\u0026rdquo; in trace query when use ES7 storage. Add otel rules to ui template to observe Istio control plane. Remove istio mixer Support close influxdb batch write model. Check SAN in the ALS (m)TLS process.  UI  Fix incorrect label in radial chart in topology. Replace node-sass with dart-sass. Replace serviceFilter with serviceGroup Removed \u0026ldquo;Les Miserables\u0026rdquo; from radial chart in topology. Add the Promise dropdown option  Documentation  Add VNode FAQ doc. Add logic endpoint section in the agent setup doc. Adjust configuration names and system environment names of the sharing server module Tweak Istio metrics collection doc. Add otel receiver.  All issues and pull requests are here\n","excerpt":"SkyWalking 8.3.0 is released. Go to downloads page to find release tars.\nProject  Test: …","ref":"/events/release-apache-skwaylking-apm-8-3-0/","title":"Release Apache SkyWalking APM 8.3.0"},{"body":"Python 作为一门功能强大的编程语言,被广泛的应用于计算机行业之中; 在微服务系统架构盛行的今天,Python 以其丰富的软件生态和灵活的语言特性在服务端编程领域也占有重要的一席之地。 本次分享将阐述 Apache SkyWalking 在微服务架构中要解决的问题,展示如何使用 Apache SkyWalking 来近乎自动化地监控 Python 后端应用服务,并对 Apache SkyWalking 的 Python 语言探针的实现技术进行解读。\nB站视频地址\n","excerpt":"Python 作为一门功能强大的编程语言,被广泛的应用于计算机行业之中; 在微服务系统架构盛行的今天,Python 以其丰富的软件生态和灵活的语言特性在服务端编程领域也占有重要的一席之地。 本次分享将 …","ref":"/zh/2020-11-30-pycon/","title":"[视频] PyCon China 2020 - Python 微服务应用性能监控"},{"body":"SkyWalking CLI 0.5.0 is released. Go to downloads page to find release tars.\n  Features\n Use template files in yaml format instead Refactor metrics command to adopt metrics-v2 protocol Use goroutine to speed up dashboard global command Add metrics list command    Bug Fixes\n Add flags of instance, endpoint and normal for metrics command Fix the problem of unable to query database metrics    Chores\n Update release guide doc Add screenshots for use cases in README.md Introduce generated codes into codebase    ","excerpt":"SkyWalking CLI 0.5.0 is released. Go to downloads page to find release tars.\n  Features\n Use …","ref":"/events/release-apache-skywalking-cli-0-5-0/","title":"Release Apache SkyWalking CLI 0.5.0"},{"body":"","excerpt":"","ref":"/tags/satellite/","title":"Satellite"},{"body":" Author: Jiapeng Liu. Baidu. skywalking-satellite: The Sidecar Project of Apache SkyWalking Nov. 25th, 2020  A lightweight collector/sidecar which can be deployed close to the target monitored system, to collect metrics, traces, and logs. It also provides advanced features, such as local cache, format transformation, and sampling.\nDesign Thinking Satellite is a 2 level system to collect observability data from other core systems. So, the core element of the design is to guarantee data stability during Pod startup all the way to Pod shutdown avoiding alarm loss. All modules are designed as plugins, and if you have other ideas, you can add them yourself.\nSLO  Single gatherer supports \u0026gt; 1000 ops (Based 0.5 Core,50M) At least once delivery.(Optional) Data stability: 99.999%.(Optional)  Because they are influenced by the choice of plugins, some items in SLO are optional.\nRole Satellite would be running as a Sidecar. Although Daemonset mode would take up fewer resources, it will cause more troubles to the forwarding of agents. So we also want to use Sidecar mode by reducing the costs. But Daemonset mode would be also supported in the future plan.\nCore Modules The Satellite has 3 core modules which are Gatherer, Processor, and Sender.\n The Gatherer module is responsible for fetching or receiving data and pushing the data to Queue. The Processor module is responsible for reading data from the queue and processing data by a series of filter chains. The Sender module is responsible for async processing and forwarding the data to the external services in the batch mode. After sending success, Sender would also acknowledge the offset of Queue in Gatherer.  Detailed Structure The overall design is shown in detail in the figure below. We will explain the specific components one by one.\nGatherer Concepts The Gatherer has 4 components to support the data collection, which are Input, Collector, Worker, and Queue. There are 2 roles in the Worker, which are Fetcher and Receiver.\n The Input is an abstraction of the input source, which is usually mapped to a configuration file. The Collector is created by the Source, but many collectors could be created by the same Source. For example, when a log path has been configured as the /var/*.log in an Input, the number of collectors is the same as the file number in this path. The Fetcher and Receiver is the real worker to collect data. The receiver interface is an abstraction, which has multiple implementations, such as gRPC receiver and HTTP receiver.Here are some specific use cases:  Trace Receiver is a gRPC server for receiving trace data created by Skywalking agents. Log Receiver is also a gRPC server for receiving log data which is collected by Skywalking agents. (In the future we want Skywalking Agent to support log sending, and RPC-based log sending is more efficient and needs fewer resources than file reading. For example, the way of file reading will bring IO pressure and performance cost under multi-line splicing.) Log Fetcher is like Filebeat, which fits the common log collection scenario. This fetcher will have more responsibility than any other workers because it needs to record the offset and process the multi-line splicing. This feature will be implemented in the future. Prometheus Fetcher supports a new way to fetch Prometheus data and push the data to the upstream. \u0026hellip;\u0026hellip;   The Queue is a buffer module to decouple collection and transmission. In the 1st release version, we will use persistent storage to ensure data stability. But the implementation is a plug-in design that can support pure memory queues later.   The data flow We use the Trace Receiver as an example to introduce the data flow. Queue MmapQueue We have simplified the design of MmapQueue to reduce the resources cost on the memory and disk.\nConcepts There are 2 core concepts in MmapQueue.\n Segment: Segment is the real data store center, that provides large-space storage and does not reduce read and write performance as much as possible by using mmap. And we will avoid deleting files by reusing them. Meta: The purpose of meta is to find the data that the consumer needs.  Segment One MmapQueue has a directory to store the whole data. The Queue directory is made up with many segments and 1 meta file. The number of the segments would be computed by 2 params, which are the max cost of the Queue and the cost of each segment. For example, If the max cost is 512M and each segment cost is 256K, the directory can hold up to 2000 files. Once capacity is exceeded, an coverage policy is adopted that means the 2000th would override the first file.\nEach segment in Queue will be N times the size of the page cache and will be read and written in an appended sequence rather than randomly. These would improve the performance of Queue. For example, each Segment is a 128k file, as shown in the figure below.\nMeta The Meta is a mmap file that only contains 56Bit. There are 5 concepts in the Meta.\n Version: A version flag. Watermark Offset: Point to the current writing space.  ID: SegmentID Offset: The offset in Segment.   Writed Offset: Point to the latest refreshed data, that would be overridden by the write offset after period refresh.  ID: SegmentID Offset: The offset in Segment.   Reading Offset: Point to the current reading space.  ID: SegmentID Offset: The offset in Segment.   Committed Offset: Point to the latest committed offset , that is equal to the latest acked offset plus one.  ID: SegmentID Offset: The offset in Segment.    The following diagram illustrates the transformation process.\n The publisher receives data and wants to write to Queue.  The publisher would read Writing Offset to find a space and do plus one. After this, the publisher will write the data to the space.   The consumer wants to read the data from Queue.  The consumer would read Reading Offset to find the current read offset and do plus one. After this, the consumer will read the data from the space.   On period flush, the flusher would override Watermark Offset by using Writing Offset. When the ack operation is triggered, Committed Offset would plus the batch size in the ack batch. When facing crash, Writing Offset and Reading Offset would be overridden by Watermark Offset and Committed Offset. That is because the Reading Offset and Writing Offset cannot guarantee at least once delivery.  Mmap Performance Test The test is to verify the efficiency of mmap in low memory cost.\n The rate of data generation: 7.5K/item 1043 item/s (Based on Aifanfan online pod.) The test structure is based on Bigqueue because of similar structure. Test tool: Go Benchmark Test Command: go test -bench BenchmarkEnqueue -run=none -cpu=1 Result On Mac(15-inch, 2018,16 GB 2400 MHz DDR4, 2.2 GHz Intel Core i7 SSD):  BenchmarkEnqueue/ArenaSize-128KB/MessageSize-8KB/MaxMem-384KB 66501 21606 ns/op 68 B/op 1 allocs/op BenchmarkEnqueue/ArenaSize-128KB/MessageSize-8KB/MaxMem-1.25MB 72348 16649 ns/op 67 B/op 1 allocs/op BenchmarkEnqueue/ArenaSize-128KB/MessageSize-16KB/MaxMem-1.25MB 39996 33199 ns/op 103 B/op 1 allocs/op   Result On Linux(INTEL Xeon E5-2450 V2 8C 2.5GHZ2,INVENTEC PC3L-10600 16G8,INVENTEC SATA 4T 7.2K*8):  BenchmarkEnqueue/ArenaSize-128KB/MessageSize-8KB/MaxMem-384KB 126662\t12070 ns/op\t62 B/op\t1 allocs/op BenchmarkEnqueue/ArenaSize-128KB/MessageSize-8KB/MaxMem-1.25MB 127393\t12097 ns/op\t62 B/op\t1 allocs/op BenchmarkEnqueue/ArenaSize-128KB/MessageSize-16KB/MaxMem-1.25MB 63292\t23806 ns/op\t92 B/op\t1 allocs/op   Conclusion: Based on the above tests, mmap is both satisfied at the write speed and at little memory with very low consumption when running as a sidecar.  Processor The Processor has 3 core components, which are Consumer, Filter, and Context.\n The Consumer is created by the downstream Queue. The consumer has its own read offset and committed offset, which is similar to the offset concept of Spark Streaming. Due to the particularity of APM data preprocessing, Context is a unique concept in the Satellite filter chain, which supports storing the intermediate event because the intermediate state event also needs to be sent in sometimes. The Filter is the core data processing part, which is similar to the processor of beats. Due to the context, the upstream/downstream filters would be logically coupling.  Sender  BatchConverter decouples the Processor and Sender by staging the Buffer structure, providing parallelization. But if BatchBuffer is full, the downstream processors would be blocked. Follower is a real send worker that has a client, such as a gRPC client or Kafka client, and a fallback strategy. Fallback strategy is an interface, we can add more strategies to resolve the abnormal conditions, such as Instability in the network, upgrade the oap cluster. When sent success, Committed Offset in Queue would plus the number of this batch.  High Performance The scenario using Satellite is to collect a lot of APM data collection. We guarantee high performance by the following ways.\n Shorten transmission path, that means only join 2 components,which are Queue and Processor, between receiving and forwarding. High Performance Queue. MmapQueue provides a big, fast and persistent queue based on memory mapped file and ring structure. Processor maintains a linear design, that could be functional processed in one go-routine to avoid too much goroutines switching.  Stability Stability is a core point in Satellite. Stability can be considered in many ways, such as stable resources cost, stable running and crash recovery.\nStable resource cost In terms of resource cost, Memory and CPU should be a concern.\nIn the aspect of the CPU, we keep a sequence structure to avoid a large number of retries occurring when facing network congestion. And Satellite avoids keep pulling when the Queue is empty based on the offset design of Queue.\nIn the aspect of the Memory, we have guaranteed only one data caching in Satellite, that is Queue. For the queue structure, we also keep the size fixed based on the ring structure to maintain stable Memory cost. Also, MmapQueue is designed for minimizing memory consumption and providing persistence while keeping speed as fast as possible. Maybe supports some strategy to dynamically control the size of MmapQueue to process more extreme conditions in the future.\nStable running There are many cases of network congestion, such as the network problem on the host node, OAP cluster is under upgrating, and Kafka cluster is unstable. When facing the above cases, Follower would process fallback strategy and block the downstream processes. Once the failure strategy is finished, such that send success or give up this batch, the Follower would process the next batch.\nCrash Recovery The crash recovery only works when the user selects MmapQueue in Gatherer because of persistent file system design. When facing a crash, Reading Offset would be overridden by Committed Offset that ensure the at least once delivery. And Writed Offset would override Writing Offset that ensures the consumer always works properly and avoid encountering uncrossable defective data blocks.\nBuffer pool The Queue is to store fixed structure objects, object buffer pool would be efficient to reuse memory to avoid GC.\n ackChan batch convertor  Some metrics In Satellite, we should also collect its own monitoring metrics. The following metrics are necessary for Satellite.\n cpu memory go routine number gatherer_writing_offset gatherer_watermark_offset processor_reading_count sender_committed_offset sender_abandoned_count sender_retry_count  Input and Output We will reuse this diagram to explain the input and output.\n Input  Because the push-pull mode is both supported, Queue is a core component. Queue is designed to be a ring-shaped fixed capacity, that means the oldest data would be overridden by the latest data. If users find data loss, users should raise the ceiling of memory Queue. MmapQueue generally doesn\u0026rsquo;t face this problem unless the Sender transport is congested.   Ouput  If the BatchBuffer is full, the processor would be blocked. If the Channel is full, the downstream components would be blocked, such as BatchConvertor and Processor. When SenderWorker sends failure, the batch data would do a failure strategy that would block pulling data from the Channel. The strategy is a part of Sender,the operation mode is synchronous. Once the failure strategy is finished, such that send success or give up this batch, the Sendworker would keep pulling data from the Channel.    Questions How to avoid keep pulling when the Queue is empty? If Watermark Offset is less than or equal to Reading Offset, a signal would be sent to the consumer to avoid keep pulling.\nWhy reusing files in Queue? The unified model is a ring in Queue, that limits fixed resources cost in memory or disk.In Mmap Queue, reusing files turns the delete operations into an overwrite operations, effectively reducing the creation and deletion behavior in files.\nWhat are the strategies for file creation and deletion in MmapQueue? As Satellite running, the number of the files in MmapQueue would keep growing until up to the maximum capacity. After this, the old files will be overridden by the new data to avoid file deletion. When the Pod died, all resources were recycled.\n","excerpt":"Author: Jiapeng Liu. Baidu. skywalking-satellite: The Sidecar Project of Apache SkyWalking Nov. …","ref":"/blog/2020-11-25-skywalking-satellite-0.1.0-design/","title":"The first design of Satellite 0.1.0"},{"body":"SkyWalking Python 0.4.0 is released. Go to downloads page to find release tars.\n Feature: Support Kafka reporter protocol (#74) BugFix: Move generated packages into skywalking namespace to avoid conflicts (#72) BugFix: Agent cannot reconnect after server is down (#79) Test: Mitigate unsafe yaml loading (#76)  ","excerpt":"SkyWalking Python 0.4.0 is released. Go to downloads page to find release tars.\n Feature: Support …","ref":"/events/release-apache-skywalking-python-0-4-0/","title":"Release Apache SkyWalking Python 0.4.0"},{"body":"活动介绍 Apache SkyWalking 2020 开发者线下活动,社区创始人,PMC成员和Committer会亲临现场,和大家交流和分享项目中的使用经验。 以及邀请Apache Local Community 北京的成员一起分享Apache文化和Apache之道。\n日程安排 开场演讲 09:30-09:50 SkyWalking\u0026rsquo;s 2019-2020 and beyond\n吴晟,Tetrate.io创始工程师,Apache SkyWalking创始人\nB站视频地址\n 上午 09:55-10:30 贝壳全链路跟踪实践\n赵禹光,赵禹光,贝壳找房监控技术负责人,Apache SkyWalking PMC成员\n10:35-11:15 SkyWalking在百度爱番番部门实践\n刘嘉鹏,百度,SkyWalking contributor\n11:15-11:55 非计算机背景的同学如何贡献开源\n缘于一位本科在读的社会学系的同学的问题,这让我反思我们开源community的定位和Open的程度,于是,适兕从生产、分发、消费的软件供应的角度,根据涉及到的角色,然后再反观现代大学教育体系的专业,进一步对一个开源项目和community需要的专业背景多样性进行一个阐述和探究。并以ALC Beijing为例进行一个事例性的说明。\n适兕,开源布道师,ALC Beijing member,开源之道主创,开源社教育组成员。\nB站视频地址\n 下午 13:30-14:10 如何从 Apache SkyWalking 社区学习 Apache Way\n温铭,支流科技联合创始人&CEO,Apache APISIX 项目 VP, Apache SkyWalking Committer\n14:10-14:50 Apache SkyWalking 在小米公司的应用\n宋振东,小米公司小米信息技术部 skywalking 研发负责人\n14:50-15:30 Istio全生命周期监控\n高洪涛,Tetrate.io创始工程师,Apache SkyWalking PMC成员\n15:30-15:45 茶歇\n15:45-16:25 针对HikariCP数据库连接池的监控\n张鑫 Apache SkyWalking PMC 成员\n16:25-17:00 SkyWalking 与 Nginx 的优化实践\n王院生 深圳支流科技创始人兼 CTO,Apache APISIX 创始人 \u0026amp; PMC成员\nB站视频地址\n","excerpt":"活动介绍 Apache SkyWalking 2020 开发者线下活动,社区创始人,PMC成员和Committer会亲临现场,和大家交流和分享项目中的使用经验。 以及邀请Apache Local …","ref":"/zh/2020-11-23-devcon/","title":"[视频] SkyWalking DevCon 2020"},{"body":"The APM system provides the tracing or metrics for distributed systems or microservice architectures. Back to APM themselves, they always need backend storage to store the necessary massive data. What are the features required for backend storage? Simple, fewer dependencies, widely used query language, and the efficiency could be into your consideration. Based on that, traditional SQL databases (like MySQL) or NoSQL databases would be better choices. However, this topic will present another backend storage solution for the APM system viewing from NewSQL. Taking Apache Skywalking for instance, this talking will share how to make use of Apache ShardingSphere, a distributed database middleware ecosystem to extend the APM system\u0026rsquo;s storage capability.\nAs a senior DBA worked at JD.com, the responsibility is to develop the distributed database and middleware, and the automated management platform for database clusters. As a PMC of Apache ShardingSphere, I am willing to contribute to the OS community and explore the area of distributed databases and NewSQL.\n  ","excerpt":"The APM system provides the tracing or metrics for distributed systems or microservice …","ref":"/blog/2020-11-21-apachecon-obs-shardingsphere/","title":"[Video] Another backend storage solution for the APM system"},{"body":"Apache APISIX is a cloud-native microservices API gateway, delivering the ultimate performance, security, open-source and scalable platform for all your APIs and microservices. Apache SkyWalking: an APM(application performance monitor) system, especially designed for microservices, cloud-native and container-based (Docker, Kubernetes, Mesos) architectures. Through the powerful plug-in mechanism of Apache APISIX, Apache Skywalking is quickly supported, so that we can see the complete life cycle of requests from the edge to the internal service. Monitor and manage each request in a visual way, and improve the observability of the service.\n  ","excerpt":"Apache APISIX is a cloud-native microservices API gateway, delivering the ultimate performance, …","ref":"/blog/2020-11-21-apachecon-obs-apisix/","title":"[Video] Improve Apache APISIX observability with Apache SkyWalking"},{"body":"Today\u0026rsquo;s monitoring solutions are geared towards operational tasks, displaying behavior as time-series graphs inside dashboards and other abstractions. These abstractions are immensely useful but are largely designed for software operators, whose responsibilities require them to think in systems, rather than the underlying source code. This is problematic given that an ongoing trend of software development is the blurring boundaries between building and operating software. This trend makes it increasingly necessary for programming environments to not just support development-centric activities, but operation-centric activities as well. Such is the goal of the feedback-driven development approach. By combining IDE and APM technology, software developers can intuitively explore multiple dimensions of their software simultaneously with continuous feedback about their software from inception to production.\nBrandon Fergerson is an open-source software developer who does not regard himself as a specialist in the field of programming, but rather as someone who is a devoted admirer. He discovered the beauty of programming at a young age and views programming as an art and those who do it well to be artists. He has an affinity towards getting meta and combining that with admiration of programming, has found source code analysis to be exceptionally interesting. Lately, his primary focus involves researching and building AI-based pair programming technology.\n  ","excerpt":"Today\u0026rsquo;s monitoring solutions are geared towards operational tasks, displaying behavior as …","ref":"/blog/2020-11-21-apachecon-obs-sourcemarker/","title":"[Video] SourceMarker - Continuous Feedback for Developers"},{"body":"Over the past few years, and coupled with the growing adoption of microservices, distributed tracing has emerged as one of the most commonly used monitoring and troubleshooting methodologies. New tracing tools are increasingly being introduced, driving adoption even further. One of these tools is Apache SkyWalking, a popular open-source tracing, and APM platform. This talk explores the history of the SkyWalking storage module, shows the evolution of distributed tracing storage layers, from the traditional relational database to document-based search engine. I hope that this talk contributes to the understanding of history and also that it helps to clarify the different types of storage that are available to organizations today.\nHongtao Gao is the engineer of tetrate.io and the former Huawei Cloud expert. One of PMC members of Apache SkyWalking and participates in some popular open-source projects such as Apache ShardingSphere and Elastic-Job. He has an in-depth understanding of distributed databases, container scheduling, microservices, ServicMesh, and other technologies.\n  ","excerpt":"Over the past few years, and coupled with the growing adoption of microservices, distributed tracing …","ref":"/blog/2020-11-21-apachecon-obs-storage/","title":"[Video] The history of distributed tracing storage"},{"body":" 作者: 赵禹光 原文链接: 亲临百人盛况的Apache SkyWalking 2020 DevCon,看见了什么? 2020 年 10 月 29 日  活动现场 2020年11月14日Apache SkyWalking 2020 DevCon由贝壳找房和tetrate赞助,Apache SkyWalking、云原生、Apache APISIX、Apache Pulsar 和 ALC Beijing 五大社区合作,在贝壳找房一年级会议室盛大举行,本次活动主要面对Apache SkyWalking的使用者、开发者和潜在用户。线上线下共有230多人报名。经统计,实际参加活动人数超过130人,近60%的人愿意抽出自己的休息时间,来交流学习Apache SkyWalking和开源文化。不难看见,在可预见的未来,中国的开源项目很快将进入下一个维度,那必定是更广的社区人员参与,更高技术知识体现,更强的线上稳定性和及时修复能力。\n活动历程: 09:30-09:50 SkyWalking\u0026rsquo;s 2019-2020 and beyond 吴晟老师本次分享:回顾2020年度SkyWalking发布的重要的新特性,出版的《Apache SkyWalking实战》图书,社区的进展,开源爱好者如何参与SkyWalking建设,和已知社区在主导的SkyWalking2021年孵化中的新特性。\n09:55-10:30 贝壳全链路跟踪实践 赵禹光老师(作者)本次分享:回顾了贝壳找房2018年至今,贝壳找房的全链路跟踪项目与SkyWalking的渊源,分享了SkyWalking在实践中遇到的问题,和解决方案。以及SkyWalking近10%的Committer都曾经或正在贝壳人店平台签中研发部,工作过的趣事。\n10:35-11:15 刘嘉鹏老师分享 SkyWalking在百度爱番番部门实践 刘嘉鹏老师本次分享:回顾了百度爱番番部门在使用SkyWalking的发展历程\u0026amp;现状,CRM SAAS产品在近1年使用SkyWalking实践经验,以及如何参与SkyWalking的贡献,并成为的Apache Committer。\n11:15-11:55 适兕老师分享 非计算机背景的同学如何贡献开源 适兕是国内很有名的开源布道师,本次分享从生产、分发、消费的软件供应的角度,根据涉及到的角色,然后再反观现代大学教育体系的专业,进一步对一个开源项目和community需要的专业背景多样性进行一个阐述和探究。并以ALC Beijing为例进行一个事例性的说明,非计算机背景的同学如何贡献开源。\n13:30-14:10 如何从 Apache SkyWalking 社区学习 Apache Way 14:10-14:50 Apache SkyWalking 在小米公司的应用 宋振东老师是小米信息技术部分布式链路追踪系统研发负责人,分别以小米公司,业务开发、架构师、SRE、Leader和QA等多个视角,回顾了SkyWalking在小米公司的应用实践。从APM的产品选型到实际落地,对其他公司准备使用SkyWalking落地,非常有借鉴意义。\n14:50-15:30 Istio全生命周期监控 高洪涛老师本次分享了SkyWalking和可观测云原生等非常前沿的知识布道,其中有,云原生在Logging、Metrics和Tracing的相关知识,Istio,K8S等方面的实践。对一些公司在前沿技术的落地,非常有借鉴意义。\n15:45-16:25 针对HikariCP数据库连接池的监控 张鑫老师本次分享了,以一个SkyWalking无法Tracing的实际线上故障的故事出发,讲述如何定位,和补充SkyWalking插件的不足,并将最后的实践贡献到社区。对大家参与开源很有帮助。\n16:25-17:00 SkyWalking 与 Nginx 的优化实践 王院生老师本次分享SkyWalking社区和APISIX社区合作,在Nginx插件的实践过程,对社区之间的如何开展合作,非常有借鉴意义,院生老师的工作\u0026amp;开源态度,很好的诠释Geek精神,也是我们互联网从业者需要学习恪守的。\nApache SkyWalking 2020 DevCon 讲师PPT Apache SkyWalking 2020 DevCon 讲师 PPT\nSkyWalking 后续发展计划 正如吴晟老师所说:No plan, open to the community,Apache SkyWalking是没有RoadMap。社区的后续发展,依赖于每个人在社区的贡献。与其期待,不如大胆设想,将自己的设计按照Apache Way贡献到SkyWalking,你就是下一个Apache SkyWalking Commiter,加入Member of SkyWalking大家庭,让社区因为你,而更加有活力。\n","excerpt":"作者: 赵禹光 原文链接: 亲临百人盛况的Apache SkyWalking 2020 DevCon,看见了什么? 2020 年 10 月 29 日  活动现场 2020年11月14日Apache …","ref":"/zh/2020-11-21-what-do-we-see-at-the-apache-skywalking-2020-devcon-event/","title":"亲临百人盛况的Apache SkyWalking 2020 DevCon,看见了什么?"},{"body":"Sheng Wu is a founding engineer at tetrate.io, leads the observability for service mesh and hybrid cloud. A searcher, evangelist, and developer in the observability, distributed tracing, and APM. He is a member of the Apache Software Foundation. Love open source software and culture. Created the Apache SkyWalking project and being its VP and PMC member. Co-founder and PMC member of Apache ShardingSphere. Also as a PMC member of Apache Incubator and APISIX. He is awarded as Microsoft MVP, Alibaba Cloud MVP, Tencent Cloud TVP.\nIn the Apache FY2020 report, China is on the top of the download statistics. More China initiated projects joined the incubator, and some of them graduated as the Apache TLP. Sheng joined the Apache community since 2017, in the past 3 years, he witnessed the growth of the open-source culture and Apache way in China. Many developers have joined the ASF as new contributors, committers, foundation members. Chinese enterprises and companies paid more attention to open source contributions, rather than simply using the project like before. In the keynote, he would share the progress about China embracing the Apache culture, and willing of enhancing the whole Apache community.\n  ","excerpt":"Sheng Wu is a founding engineer at tetrate.io, leads the observability for service mesh and hybrid …","ref":"/blog/2020-11-21-apachecon-keynote/","title":"[Video] Apache grows in China"},{"body":"SkyWalking Client JS 0.2.0 is released. Go to downloads page to find release tars.\n Bug Fixes  Fixed a bug in sslTime calculate. Fixed a bug in server response status judgment.    ","excerpt":"SkyWalking Client JS 0.2.0 is released. Go to downloads page to find release tars.\n Bug Fixes  Fixed …","ref":"/events/release-apache-skywalking-client-js-0-2-0/","title":"Release Apache SkyWalking Client JS 0.2.0"},{"body":"SkyWalking Cloud on Kubernetes 0.1.0 is released. Go to downloads page to find release tars.\n Add OAPServer CRDs and controller.  ","excerpt":"SkyWalking Cloud on Kubernetes 0.1.0 is released. Go to downloads page to find release tars.\n Add …","ref":"/events/release-apache-skywalking-cloud-on-kubernetes-0.1.0/","title":"Release Apache SkyWalking Cloud on Kubernetes 0.1.0"},{"body":"Based on his continuous contributions, Jiapeng Liu (a.k.a evanljp) has been voted as a new committer.\n","excerpt":"Based on his continuous contributions, Jiapeng Liu (a.k.a evanljp) has been voted as a new …","ref":"/events/welcome-jiapeng-liu-as-new-committer/","title":"Welcome Jiapeng Liu as new committer"},{"body":"SkyWalking Kubernetes Helm Chart 4.0.0 is released. Go to downloads page to find release tars.\n Allow overriding configurations files under /skywalking/config. Unify the usages of different SkyWalking versions. Add Values for init container in case of using private regestry. Add services, endpoints resources in ClusterRole.  ","excerpt":"SkyWalking Kubernetes Helm Chart 4.0.0 is released. Go to downloads page to find release tars. …","ref":"/events/release-apache-skywalking-kubernetes-helm-chart-4.0.0/","title":"Release Apache SkyWalking Kubernetes Helm Chart 4.0.0"},{"body":"SkyWalking Client JS 0.1.0 is released. Go to downloads page to find release tars.\n Support Browser Side Monitoring. Require SkyWalking APM 8.2+.  ","excerpt":"SkyWalking Client JS 0.1.0 is released. Go to downloads page to find release tars.\n Support Browser …","ref":"/events/release-apache-skywalking-client-js-0-1-0/","title":"Release Apache SkyWalking Client JS 0.1.0"},{"body":"","excerpt":"","ref":"/tags/browser/","title":"Browser"},{"body":" Author: Zhenxu Ke, Sheng Wu, Hongtao Gao, and Tevah Platt. tetrate.io Original link, Tetrate.io blog Oct. 29th, 2020  Apache SkyWalking, the observability platform, and open-source application performance monitor (APM) project, today announced the general availability of its 8.2 release. The release extends Apache SkyWalking’s functionalities and monitoring boundary to the browser side.\nBackground SkyWalking is an observability platform and APM tool that works with or without a service mesh, providing automatic instrumentation for microservices, cloud-native and container-based applications. The top-level Apache project is supported by a global community and is used by Alibaba, Huawei, Tencent, Baidu, ByteDance, and scores of others.\nBrowser side monitoring APM helps SRE and Engineering teams to diagnose system failures, or optimize the systems before they become intolerably slow. But is it enough to always make the users happy?\nIn 8.2.0, SkyWalking extends its monitoring boundary to the browser side, e.g., Chrome, or the network between Chrome and the backend service, or the codes running in the browser. With this, not only can we monitor the backend services and requests sent by the browser as usual, but also the front end rendering speed, error logs, etc., which are the most efficient metrics for capturing the experiences of our end users. (This does not currently extend to IoT devices, but this feature moves SkyWalking a step in that direction).\nWhat\u0026rsquo;s more, SkyWalking browser monitoring also provides data about how the users use products, such as PV(page views), UV(unique visitors), top N PV(page views), etc., which can give a product team clues for optimizing their products.\nQuery traces by tags In SkyWalking\u0026rsquo;s Span data model, there are many important fields that are already indexed and can be queried by the users, but for the sake of performance, querying by Span tags was not supported until now. In SkyWalking 8.2.0, we allow users to query traces by specified tags, which is extremely useful. For example, SRE engineers running tests on the product environment can tag the synthetic traffic and query by this tag later.\nMeter Analysis Language In 8.2.0, the meter system provides a functional analysis language called MAL(Meter Analysis Language) that allows users to analyze and aggregate meter data in the OAP streaming system. The result of an expression can be ingested by either the agent analyzer or OpenTelemetry/Prometheus analyzer.\nComposite Alert Rules Alerting is a good way to discover system failures in time. A common problem is that we configure too many triggers just to avoid missing any possible issue. Nobody likes to be woken up by alert messages at midnight, only to find out that the trigger is too sensitive. These kinds of alerts become noisy and don\u0026rsquo;t help at all.\nIn 8.2.0, users can now configure composite alert rules, where composite rules take multiple metrics dimensions into account. With composite alert rules, we can leverage as many metrics as needed to more accurately determine whether there’s a real problem or just an occasional glitch.\nCommon scenarios like successful rate \u0026lt; 90% but there are only 1~2 requests can now be resolved by a composite rule, such as traffic(calls per minute) \u0026gt; n \u0026amp;\u0026amp; successful rate \u0026lt; m%.\nOther Notable Enhancements  The agent toolkit exposes some APIs for users to send customizable metrics. The agent exclude_plugins allows you to exclude some plugins; mount enables you to load a new set of plugins. More than 10 new plugins have been contributed to the agent. The alert system natively supports sending alert messages to Slack, WeChat, DingTalk.  Additional Resources  Read more about the SkyWalking 8.2 release highlights. Get more SkyWalking updates on Twitter.  ","excerpt":"Author: Zhenxu Ke, Sheng Wu, Hongtao Gao, and Tevah Platt. tetrate.io Original link, Tetrate.io blog …","ref":"/blog/2020-10-29-skywalking8-2-release/","title":"Features in SkyWalking 8.2: Browser Side Monitoring; Query Traces by Tags; Meter Analysis Language"},{"body":"","excerpt":"","ref":"/zh_tags/release-blog/","title":"Release Blog"},{"body":" 作者: 柯振旭, 吴晟, 高洪涛, Tevah Platt. tetrate.io 原文链接: What\u0026rsquo;s new with Apache SkyWalking 8.2? Browser monitoring and more 2020 年 10 月 29 日  Apache SkyWalking,一个可观测性平台,也是一个开源的应用性能监视器(APM)项目,今日宣布 8.2 发行版全面可用。该发行版拓展了核心功能,并将其监控边界拓展到浏览器端。\n背景 SkyWalking 是一个观测平台和 APM 工具。它可以选择性的与 Service Mesh 协同工作,为微服务、云原生和基于容器的应用提供自动的指标。该项目是全球社区支持的 Apache 顶级项目,阿里巴巴、华为、腾讯、百度、字节跳动等许多公司都在使用。\n浏览器端监控 APM 可以帮助 SRE 和工程团队诊断系统故障,也能在系统异常缓慢之前优化它。但它是否足以让用户总是满意呢?\n在 8.2.0 版本中, SkyWalking 将它的监控边界拓展到了浏览器端,比如 Chrome ,或者 Chrome 和后端服务之间的网络。这样,我们不仅可以像以前一样监控浏览器发送给后端服务的与请求,还能看到前端的渲染速度、错误日志等信息——这些信息是获取最终用户体验的最有效指标。(目前此功能尚未拓展到物联网设备中,但这项功能使得 SkyWalking 向着这个方向前进了一步)\n此外,SkyWalking浏览器监视也提供以下数据: PV(page views,页面浏览量), UV(unique visitors,独立访客数),浏览量前 N 的页面(Top N Page Views)等。这些数据可以为产品队伍优化他们的产品提供线索。\n按标签 (tag) 查询链路数据 在 SkyWalking 的 Span 数据模型中,已经有了许多被索引并可供用户查询的重要字段。但出于性能考虑,使用 Span 标签查询链路数据的功能直到现在才正式提供。在 SkyWalking 8.2.0 中,我们允许用户查询被特定标签标记的链路,这非常有用。SRE 工程师可以在生产环境中运行测试,将其打上仿真流量的标签,并稍后通过该标签查找它。\n指标分析语言 在 8.2.0 中,仪表系统提供了一项名为MAL(Meter Analysis Language,指标分析语言)的强大分析语言。该语言允许用户在 OAP 流系统中分析并聚合(aggregate)指标数据。 表达式的结果可以被 Agent 分析器或 OpenTelemetry/Prometheus 分析器获取。\n复合警报规则 警报是及时发现系统失效的有效方式。一个常见的问题是,为了避免错过任何可能的问题,我们通常会配置过多的触发器(triggers)。没有人喜欢半夜被警报叫醒,结果只是因为触发系统太敏感。这种警报很嘈杂并毫无帮助。\n在 8.2.0 版本中,用户选择可以配置考虑了多个度量维度的复合警报规则。使用复合报警规则,我们可以根据需要添加尽可能多的指标来更精确地判断是否存在真正的问题,或者只是一个偶发的小问题。\n一些常见的情况,如 成功率 \u0026lt; 90% 但只有 1~2 个请求,现在可以通过复合规则解决,如流量(即每分钟调用数) \u0026gt; n \u0026amp;\u0026amp; 成功率 \u0026lt; m%。\n其它值得注意的功能增强  agent-toolkit SDK 公开了某些 API,供用户发送自定义指标。 Agent exclude_plgins 配置允许您排除某些插件(plugins); mount 配置使您能够加载一套新的插件。 社区贡献了超过 10 个新 Agent 插件。 报警系统原生支持发送消息到 Slack,企业微信,钉钉。  附加资源   阅读更多关于SkyWalkng 8.2 发行版重点.\n  在推特上获取更多关于 SkyWalking 的更新。\n  Apache SkyWalking DevCon 报名信息 Apache SkyWalking DevCon 2020 开始报名了。 2020 年 11 月 14 日,欢迎大家来线下参加活动和交流, 或者报名观看线上直播。\n","excerpt":"作者: 柯振旭, 吴晟, 高洪涛, Tevah Platt. tetrate.io 原文链接: What\u0026rsquo;s new with Apache SkyWalking 8.2? Browser …","ref":"/zh/2020-10-29-skywalking8-2-release/","title":"SkyWalking 8.2.0 中的新特性: 浏览器端监控; 使用标签查询; 指标分析语言"},{"body":"SkyWalking 8.2.0 is released. Go to downloads page to find release tars.\nProject  Support Browser monitoring. Add e2e test for ALS solution of service mesh observability. Support compiling(include testing) in JDK11. Support build a single module.  Java Agent  Support metrics plugin. Support slf4j logs of gRPC and Kafka(when agent uses them) into the agent log files. Add PROPERTIES_REPORT_PERIOD_FACTOR config to avoid the properties of instance cleared. Limit the size of traced SQL to avoid OOM. Support mount command to load a new set of plugins. Add plugin selector mechanism. Enhance the witness classes for MongoDB plugin. Enhance the parameter truncate mechanism of SQL plugins. Enhance the SpringMVC plugin in the reactive APIs. Enhance the SpringMVC plugin to collect HTTP headers as the span tags. Enhance the Kafka plugin, about @KafkaPollAndInvoke Enhance the configuration initialization core. Plugin could have its own plugins. Enhance Feign plugin to collect parameters. Enhance Dubbo plugin to collect parameters. Provide Thrift plugin. Provide XXL-job plugin. Provide MongoDB 4.x plugin. Provide Kafka client 2.1+ plugin. Provide WebFlux-WebClient plugin. Provide ignore-exception plugin. Provide quartz scheduler plugin. Provide ElasticJob 2.x plugin. Provide Spring @Scheduled plugin. Provide Spring-Kafka plugin. Provide HBase client plugin. Provide JSON log format. Move Spring WebFlux plugin to the optional plugin. Fix inconsistent logic bug in PrefixMatch Fix duplicate exit spans in Feign LoadBalancer mechanism. Fix the target service blocked by the Kafka reporter. Fix configurations of Kafka report don\u0026rsquo;t work. Fix rest template concurrent conflict. Fix NPE in the ActiveMQ plugin. Fix conflict between Kafka reporter and sampling plugin. Fix NPE in the log formatter. Fix span layer missing in certain cases, in the Kafka plugin. Fix error format of time in serviceTraffic update. Upgrade bytebuddy to 1.10.14  OAP-Backend  Support Nacos authentication. Support labeled meter in the meter receiver. Separate UI template into multiple files. Provide support for Envoy tracing. Envoy tracer depends on the Envoy community. Support query trace by tags. Support composite alarm rules. Support alarm messages to DingTalk. Support alarm messages to WeChat. Support alarm messages to Slack. Support SSL for Prometheus fetcher and self telemetry. Support labeled histogram in the prometheus format. Support the status of segment based on entry span or first span only. Support the error segment in the sampling mechanism. Support SSL certs of gRPC server. Support labeled metrics in the alarm rule setting. Support to query all labeled data, if no explicit label in the query condition. Add TLS parameters in the mesh analysis. Add health check for InfluxDB storage. Add super dataset concept for the traces/logs. Add separate replicas configuration for super dataset. Add IN operator in the OAL. Add != operator in the OAL. Add like operator in the OAL. Add latest function in the prometheus analysis. Add more configurations in the gRPC server. Optimize the trace query performance. Optimize the CPU usage rate calculation, at least to be 1. Optimize the length of slow SQL column in the MySQL storage. Optimize the topology query, use client side component name when no server side mapping. Add component IDs for Python component. Add component ID range for C++. Fix Slack notification setting NPE. Fix some module missing check of the module manager core. Fix authentication doesn\u0026rsquo;t work in sharing server. Fix metrics batch persistent size bug. Fix trace sampling bug. Fix CLR receiver bug. Fix end time bug in the query process. Fix Exporter INCREMENT mode is not working. Fix an error when executing startup.bat when the log directory exists Add syncBulkActions configuration to set up the batch size of the metrics persistent. Meter Analysis Language.  UI  Add browser dashboard. Add browser log query page. Support query trace by tags. Fix JVM configuration. Fix CLR configuration.  Document  Add the document about SW_NO_UPSTREAM_REAL_ADDRESS. Update ALS setup document. Add Customization Config section for plugin development.  All issues and pull requests are here\n","excerpt":"SkyWalking 8.2.0 is released. Go to downloads page to find release tars.\nProject  Support Browser …","ref":"/events/release-apache-skywalking-apm-8-2-0/","title":"Release Apache SkyWalking APM 8.2.0"},{"body":"高洪涛 美国ServiceMesh服务商tetrate创始工程师。原华为软件开发云技术专家。目前为Apache SkyWalking核心贡献者,参与该开源项目在软件开发云的商业化进程。曾任职当当网系统架构师,开源达人,曾参与Apache ShardingSphere,Elastic-Job等知名开源项目。对分布式数据库,容器调度,微服务,ServicMesh等技术有深入的了解。\n议题简介 定制化Operator模式在面向Kubernetes的云化平台建构中变得越来越流行。Apache SkyWalking社区已经开始尝试使用Operator模式去构建基于Kubernetes平台的PaaS云组件。本次分享给将会给听众带来该项目的初衷,实现与未来演进等相关内容。分享的内容包含:\n 项目动机与设计理念 核心功能展示,包含SkyWalking核心组件的发布,更新与维护。 观测ServiceMesh,包含于Istio的自动集成。 目前的工作进展和对未来的规划。  B站视频地址\n","excerpt":"高洪涛 美国ServiceMesh服务商tetrate创始工程师。原华为软件开发云技术专家。目前为Apache SkyWalking核心贡献者,参与该开源项目在软件开发云的商业化进程。曾任职当当网系统 …","ref":"/zh/2020-10-25-coscon20-swck/","title":"[视频] Apache SkyWalking Cloud on Kubernetes"},{"body":"SkyWalking LUA Nginx 0.3.0 is released. Go to downloads page to find release tars.\n Load the base64 module in utils, different ENV use different library. Add prefix skywalking, avoid conflicts with other lua libraries. Chore: only expose the method of setting random seed, it is optional. Coc: use correct code block type. CI: add upstream_status to tag http.status Add http.status  ","excerpt":"SkyWalking LUA Nginx 0.3.0 is released. Go to downloads page to find release tars.\n Load the base64 …","ref":"/events/release-apache-skywalking-lua-nginx-0.3.0/","title":"Release Apache SkyWalking LUA Nginx 0.3.0"},{"body":"SkyWalking CLI 0.4.0 is released. Go to downloads page to find release tars.\n Features  Add dashboard global command with auto-refresh Add dashboard global-metrics command Add traces search Refactor metrics thermodynamic command to adopt the new query protocol   Bug Fixes  Fix wrong golang standard time    ","excerpt":"SkyWalking CLI 0.4.0 is released. Go to downloads page to find release tars.\n Features  Add …","ref":"/events/release-apache-skywalking-cli-0-4-0/","title":"Release Apache SkyWalking CLI 0.4.0"},{"body":"Huaxi Jiang (江华禧) (a.k.a. fgksgf) mainly focuses on the SkyWalking CLI project, he had participated in the \u0026ldquo;Open Source Promotion Plan - Summer 2020\u0026rdquo; and completed the project smoothly, and won the award \u0026ldquo;Most Potential Students\u0026rdquo; that shows his great willingness to continuously contribute to our community.\nUp to date, he has submitted 26 PRs in the CLI repository, 3 PRs in the main repo, all in total include ~4000 LOC.\nAt Sep. 28th, 2020, the project management committee (PMC) passed the proposal of promoting him as a new committer. He has accepted the invitation at the same day.\nWelcome to join the committer team, Huaxi!\n","excerpt":"Huaxi Jiang (江华禧) (a.k.a. fgksgf) mainly focuses on the SkyWalking CLI project, he had participated …","ref":"/events/welcome-huaxi-jiang-as-new-committer/","title":"Welcome Huaxi Jiang (江华禧) as new committer"},{"body":"SkyWalking Python 0.3.0 is released. Go to downloads page to find release tars.\n  New plugins\n Urllib3 Plugin (#69) Elasticsearch Plugin (#64) PyMongo Plugin (#60) Rabbitmq Plugin (#53) Make plugin compatible with Django (#52)    API\n Add process propagation (#67) Add tags to decorators (#65) Add Check version of packages when install plugins (#63) Add thread propagation (#62) Add trace ignore (#59) Support snapshot context (#56) Support correlation context (#55)    Chores and tests\n Test: run multiple versions of supported libraries (#66) Chore: add pull request template for plugin (#61) Chore: add dev doc and reorganize the structure (#58) Test: update test health check (#57) Chore: add make goal to package release tar ball (#54)    ","excerpt":"SkyWalking Python 0.3.0 is released. Go to downloads page to find release tars.\n  New plugins …","ref":"/events/release-apache-skywalking-python-0-3-0/","title":"Release Apache SkyWalking Python 0.3.0"},{"body":"吴晟 吴晟,Apache 基金会会员,Apache SkyWalking 创始人、项目 VP 和 PMC 成员,Apache 孵化器 PMC 成员,Apache ShardingSphere PMC成员,Apache APISIX PMC 成员,Apache ECharts (incubating) 和Apache DolphinScheduler (incubating) 孵化器导师,Zipkin 成员和贡献者。\n分享大纲  分布式追踪兴起的背景 SkyWalking和其他分布式追踪的异同 定位问题的流程和方法 性能剖析的由来、用途和优势  听众收获 听众能够全面的了解分布式追踪的技术背景,和技术原理。以及为什么这些年,分布式追踪和基于分布式追踪的APM系统,Apache SkyWalking,得到了广泛的使用、集成,甚至云厂商的支持。同时,除了针对追踪数据,我们应该关注更多的是,如何利用其产生的监控数据,定位系统的性能问题。以及它有哪些短板,应该如何弥补。\nB站视频地址\n","excerpt":"吴晟 吴晟,Apache 基金会会员,Apache SkyWalking 创始人、项目 VP 和 PMC 成员,Apache 孵化器 PMC 成员,Apache ShardingSphere PMC成 …","ref":"/zh/2020-08-13-cloud-native-academy/","title":"[视频] 云原生学院 - 后分布式追踪时代的性能问题定位——方法级性能剖析"},{"body":"SkyWalking Chart 3.1.0 is released. Go to downloads page to find release tars.\n Support SkyWalking 8.1.0 Support enable oap dynamic configuration through k8s configmap  ","excerpt":"SkyWalking Chart 3.1.0 is released. Go to downloads page to find release tars.\n Support SkyWalking …","ref":"/events/release-apache-skywalking-chart-3-1-0-for-skywalking-8-1-0/","title":"Release Apache SkyWalking Chart 3.1.0 for SkyWalking 8.1.0"},{"body":" Author: Sheng Wu Original link, Tetrate.io blog  SkyWalking, a top-level Apache project, is the open source APM and observability analysis platform that is solving the problems of 21st-century systems that are increasingly large, distributed, and heterogenous. It\u0026rsquo;s built for the struggles system admins face today: To identify and locate needles in a haystack of interdependent services, to get apples-to-apples metrics across polyglot apps, and to get a complete and meaningful view of performance.\nSkyWalking is a holistic platform that can observe microservices on or off a mesh, and can provide consistent monitoring with a lightweight payload.\nLet\u0026rsquo;s take a look at how SkyWalking evolved to address the problem of observability at scale, and grew from a pure tracing system to a feature-rich observability platform that is now used to analyze deployments that collect tens of billions of traces per day.\nDesigning for scale When SkyWalking was first initialized back in 2015, its primary use case was monitoring the first-generation distributed core system of China Top Telecom companies, China Unicom and China Mobile. In 2013-2014, the telecom companies planned to replace their old traditional monolithic applications with a distributed system. Supporting a super-large distributed system and scaleablity were the high-priority design goals from Day one. So, what matters at scale?\nPull vs. push Pull and push modes relate to the direction of data flow. If the agent collects data and pushes them to the backend for further analysis, we call it \u0026ldquo;push\u0026rdquo; mode. Debate over pull vs. push has gone on for a long time. The key for an observability system is to minimize the cost of the agent, and to be generally suitable for different kinds of observability data.\nThe agent would send the data out a short period after it is collected. Then, we would have less concern about overloading the local cache. One typical case would be endpoint (URI of HTTP, service of gRPC) metrics. Any service could easily have hundreds, even thousands of endpoints. An APM system must have these metrics analysis capabilities.\nFurthermore, metrics aren\u0026rsquo;t the only thing in the observability landscape; traces and logs are important too. SkyWalking is designed to provide a 100% sampling rate tracing capability in the production environment. Clearly, push mode is the only solution.\nAt the same time, using push mode natively doesn\u0026rsquo;t mean SkyWalking can\u0026rsquo;t do data pulling. In recent 8.x releases, SkyWalking supports fetching data from Prometheus-instrumented services for reducing the Non-Recurring Engineering of the end users. Also, pull mode is popular in the MQ based transport, typically as a Kafka consumer. The SkyWalking agent side uses the push mode, and the OAP server uses the pull mode.\nThe conclusion: push mode is the native way, but pull mode works in some special cases too.\nMetrics analysis isn\u0026rsquo;t just mathematical calculation Metrics rely on mathematical theories and calculations. Percentile is a good measure for identifying the long tail issue, and reasonable average response time and successful rate are good SLO(s). But those are not all. Distributed tracing provides not just traces with detailed information, but high values metrics that can be analyzed.\nThe service topology map is required from Ops and SRE teams for the NOC dashboard and confirmation of system data flow. SkyWalking uses the STAM (Streaming Topology Analysis Method) to analyze topology from the traces, or based on ALS (Envoy Access Log Service) in the service mesh environment. This topology and metrics of nodes (services) and lines (service relationships) can\u0026rsquo;t be pulled from simple metrics SDKs.\nAs with fixing the limitation of endpoint metrics collection, SkyWalking needs to do endpoint dependency analysis from trace data too. Endpoint dependency analysis provides more important and specific information, including upstream and downstream. Those dependency relationships and metrics help the developer team to locate the boundaries of a performance issue, to specific code blocks.\nPre-calculation vs. query stage calculation? Query stage calculation provides flexibility. Pre-calculation, in the analysis stage, provides better and much more stable performance. Recall our design principle: SkyWalking targets a large-scale distributed system. Query stage calculation was very limited in scope, and most metrics calculations need to be pre-defined and pre-calculated. The key of supporting large datasets is reducing the size of datasets in the design level. Pre-calculation allows the original data to be merged into aggregated results downstream, to be used in a query or even for an alert check.\nTTL of metrics is another important business enabler. With the near linear performance offered by queries because of pre-calculation, with a similar query infrastructure, organizations can offer higher TTL, thereby providing extended visibility of performance.\nSpeaking of alerts, query-stage calculation also means the alerting query is required to be based on the query engine. But in this case, when the dataset increasing, the query performance could be inconsistent. The same thing happens in a different metrics query.\nCases today Today, SkyWalking is monitoring super large-scale distributed systems in many large enterprises, including Alibaba, Huawei, Tencent, Baidu, China Telecom, and various banks and insurance companies. The online service companies have more traffic than the traditional companies, like banks and telecom suppliers.\nSkyWalking is the observability platform used for a variety of use cases for distributed systems that are super-large by many measures:\n Lagou.com, an online job recruitment platform  SkyWalking is observing \u0026gt;100 services, 500+ JVM instances SkyWalking collects and analyzes 4+ billion traces per day to analyze performance data, including metrics of 300k+ endpoints and dependencies Monitoring \u0026gt;50k traffic per second in the whole cluster   Yonghui SuperMarket, online service  SkyWalking analyzes at least 10+ billion (3B) traces with metrics per day SkyWalking\u0026rsquo;s second, smaller deployment, analyzes 200+ million traces per day   Baidu, internet and AI company, Kubernetes deployment  SkyWalking collects 1T+ traces a day from 1,400+ pods of 120+ services Continues to scale out as more services are added   Beike Zhaofang(ke.com), a Chinese online property brokerage backed by Tencent Holdings and SoftBank Group  Has used SkyWalking from its very beginning, and has two members in the PMC team. Deployments collect 16+ billion traces per day   Ali Yunxiao, DevOps service on the Alibaba Cloud,  SkyWalking collects and analyzes billions of spans per day SkyWalking keeps AliCloud\u0026rsquo;s 45 services and ~300 instances stable   A department of Alibaba TMall, one of the largest business-to-consumer online retailers, spun off from Taobao  A customized version of SkyWalking monitors billions of traces per day At the same time, they are building a load testing platform based on SkyWalking\u0026rsquo;s agent tech stack, leveraging its tracing and context propagation cabilities    Conclusion SkyWalking\u0026rsquo;s approach to observability follows these principles:\n Understand the logic model: don\u0026rsquo;t treat observability as a mathematical tool. Identify dependencies first, then their metrics. Scaling should be accomplished easily and natively. Maintain consistency across different architectures, and in the performance of APM itself.  Resources  Read about the SkyWalking 8.1 release highlights. Get more SkyWalking updates on Twitter. Sign up to hear more about SkyWalking and observability from Tetrate.  ","excerpt":"Author: Sheng Wu Original link, Tetrate.io blog  SkyWalking, a top-level Apache project, is the open …","ref":"/blog/2020-08-11-observability-at-scale/","title":"Observability at Scale: SkyWalking it is"},{"body":" 作者:吴晟 翻译:董旭 金蝶医疗 原文链接:Tetrate.io blog  SkyWalking做为Apache的顶级项目,是一个开源的APM和可观测性分析平台,它解决了21世纪日益庞大、分布式和异构的系统的问题。它是为应对当前系统管理所面临的困难而构建的:就像大海捞针,SkyWalking可以在服务依赖复杂且多语言环境下,获取服务对应的指标,以及完整而有意义的性能视图。\nSkyWalking是一个非常全面的平台,无论你的微服务是否在服务网格(Service Mesh)架构下,它都可以提供高性能且一致性的监控。\n让我们来看看,SkyWalking是如何解决大规模集群的可观测性问题,并从一个纯粹的链路跟踪系统,发展成为一个每天分析百亿级跟踪数据,功能丰富的可观测性平台。\n为超大规模而生 SkyWalking的诞生,时间要追溯到2015年,当时它主要应用于监控顶级电信公司(例如:中国联通和中国移动)的第一代分布式核心系统。2013-2014年,这些电信公司计划用分布式系统取代传统的单体架构应用。从诞生那天开始,SkyWalking首要的设计目标,就是能够支持超大型分布式系统,并具有很好可扩展性。那么支撑超大规模系统要考虑什么呢?\n拉取vs推送 与数据流向息息相关的:拉取模式和推送模式。Agent(客户端)收集数据并将其推送到后端,再对数据进一步分析,我们称之为“推送”模式。究竟应该使用拉取还是推送?这个话题已经争论已久。关键因素取决于可观测性系统的目标,即:在Agent端花最小的成本,使其适配不同类型的可观测性数据。\nAgent收集数据后,可以在短时间内发送出去。这样,我们就不必担心本地缓存压力过大。举一个典型的例子,任意服务都可以轻松地拥有数百个甚至数千个端点指标(如:HTTP的URI,gRPC的服务)。那么APM系统就必须具有分析这些数量庞大指标的能力。\n此外,度量指标并不是可观测性领域中的唯一关注点,链路跟踪和日志也很重要。在生产环境下,SkyWalking为了能提供100%采样率的跟踪能力,数据推送模式是唯一可行的解决方案。\nSkyWalking即便使用了推送模式,同时也可进行数据拉取。在最近的8.x的发版本中,SkyWalking支持从已经集成Prometheus的服务中获取终端用户的数据,避免重复工程建设,减少资源浪费。另外,比较常见的是基于MQ的传输构建拉取模式,Kafka消费者就是一个比较典型的例子。SkyWalking的Agent端使用推送模式,OAP服务器端使用拉取模式。\n结论:SkyWalking的推送模式是原生方式,但拉取式模式也适用于某些特殊场景。\n度量指标分析并不仅仅是数学统计 度量指标依赖于数学理论和计算。Percentile(百分位数)是用于反映响应时间的长尾效应。服务具备合理的平均响应时间和成功率,说明服务的服务等级目标(SLO)很好。除此之外,分布式跟踪还为跟踪提供了详细的信息,以及可分析的高价值指标。\n运维团队(OPS)和系统稳定性(SRE)团队通过服务拓扑图,用来观察网络情况(当做NOC dashboard使用)、确认系统数据流。SkyWalking依靠trace(跟踪数据),使用STAM(Streaming Topology Analysis Method)方法进行分析拓扑结构。在服务网格环境下,使用ALS(Envoy Access Log Service)进行拓扑分析。节点(services)和线路(service relationships)的拓扑结构和度量指标数据,无法通过sdk轻而易举的拿到。\n为了解决端点度量指标收集的局限性,SkyWalking还要从跟踪数据中分析端点依赖关系,从而拿到链路上游、下游这些关键具体的信息。这些依赖关系和度量指标信息,有助于开发团队定位引起性能问题的边界,甚至代码块。\n预计算还是查询时计算? 相比查询时计算的灵活性,预计算可以提供更好、更稳定的性能,这在分析场景下尤为重要。回想一下我们的设计原则:SkyWalking是为了一个大规模的分布式系统而设计。查询时计算的使用范围非常有限,大多数度量计算都需要预先定义和预先计算。支持大数据集的关键是:在设计阶段,要减小数据集。预计算允许将原始数据合并到下游的聚合结果中,用于查询,甚至用于警报检查。\n使用SkyWalking的另一个重要因素是:指标的有效期,TTL(Time To Live)。由于采用了预先计算,查询提供了近似线性的高性能。这也帮助“查询系统”这类基础设施系统,提供更好的性能扩展。\n关于警报,使用查询时计算方案,也意味着警报查询需要基于查询引擎。但在这种情况下,随着数据集增加,查询性能会随之下降,其他指标查询也是一样的结果。\n目前使用案例 如今,SkyWalking在许多大型企业的超大规模分布式系统中使用,包括阿里巴巴、华为、腾讯、百度、中国通讯企业以及多家银行和保险公司。上线SkyWalking公司的流量,比银行和电信运营商这种传统公司还要大。\n在很多行业中,SkyWalking是被应用于超大型分布式系统各种场景下的一个可观测性平台:\n  拉勾网\n  SkyWalking正在观测超过100个服务,500多个JVM实例\n  SkyWalking每天收集和分析40多亿个跟踪数据,用来分析性能,其中包括30万个端点和依赖关系的指标\n  在整个群集中监控\u0026gt;50k流量/秒\n    永辉超市\n  SkyWalking每天分析至少100多亿(3B)的跟踪数据\n  其次,SkyWalking用较小的部署,每天分析2亿多个跟踪数据\n    百度\n  SkyWalking每天从1400多个pod中,从120多个服务收集1T以上的跟踪数据\n  随着更多服务的增加,规模会持续增大\n    贝壳找房(ke.com)\n  很早就使用了SkyWalking,有两名成员已经成为PMC\n  Deployments每天收集160多亿个跟踪数据\n    阿里云效\n  SkyWalking每天收集和分析数十亿个span\n  SkyWalking使阿里云的45项服务和~300个实例保持稳定\n    阿里巴巴天猫\n  SkyWalking个性化定制版,每天监控数十亿跟踪数据\n  与此同时,他们基于SkyWalking的Agent技术栈,利用其跟踪和上下文传播能力,正在构建一个全链路压测平台\n    结论 SkyWalking针对可观测性遵循以下原则:\n 理解逻辑模型:不要把可观测性当作数学统计工具。 首先确定依赖关系,然后确定它们的度量指标。 原生和方便的支撑大规模增长。 在不同的架构情况下,APM各方面表现依然保持稳定和一致。  资源  阅读SkyWalking 8.1发布亮点。 在Twitter上获取更多SkyWalking更新。 注册Tetrate以了解更多有关SkyWalking可观测性的信息。  ","excerpt":"作者:吴晟 翻译:董旭 金蝶医疗 原文链接:Tetrate.io blog  SkyWalking做为Apache的顶级项目,是一个开源的APM和可观测性分析平台,它解决了21世纪日益庞大、分布式和异 …","ref":"/zh/2020-08-11-observability-at-scale-skywalking-it-is/","title":"SkyWalking 为超大规模而生"},{"body":"","excerpt":"","ref":"/zh_tags/use-case/","title":"Use Case"},{"body":" Author: Sheng Wu, Hongtao Gao, and Tevah Platt(Tetrate) Original link, Tetrate.io blog  Apache SkyWalking, the observability platform, and open-source application performance monitor (APM) project, today announced the general availability of its 8.1 release that extends its functionalities and provides a transport layer to maintain the lightweight of the platform that observes data continuously.\nBackground SkyWalking is an observability platform and APM tool that works with or without a service mesh, providing automatic instrumentation for microservices, cloud-native and container-based applications. The top-level Apache project is supported by a global community and is used by Alibaba, Huawei, Tencent, Baidu, and scores of others.\nTransport traces For a long time, SkyWalking has used gRPC and HTTP to transport traces, metrics, and logs. They provide good performance and are quite lightweight, but people kept asking about the MQ as a transport layer because they want to keep the observability data continuously as much as possible. From SkyWalking’s perspective, the MQ based transport layer consumes more resources required in the deployment and the complexity of deployment and maintenance but brings more powerful throughput capacity between the agent and backend.\nIn 8.1.0, SkyWalking officially provides the typical MQ implementation, Kafka, to transport all observability data, including traces, metrics, logs, and profiling data. At the same time, the backend can support traditional gRPC and HTTP receivers, with the new Kafka consumer at the same time. Different users could choose the transport layer(s) according to their own requirements. Also, by referring to this implementation, the community could contribute various transport plugins for Apache Pulsar, RabbitMQ.\nAutomatic endpoint dependencies detection The 8.1 SkyWalking release offers automatic detection of endpoint dependencies. SkyWalking has long offered automatic endpoint detection, but endpoint dependencies, including upstream and downstream endpoints, are critical for Ops and SRE teams’ performance analysis. The APM system is expected to detect the relationships powered by the distributed tracing. While SkyWalking has been designed to include this important information at the beginning the latest 8.1 release offers a cool visualization about the dependency and metrics between dependent endpoints. It provides a new drill-down angle from the topology. Once you have the performance issue from the service level, you could check on instance and endpoint perspectives:\nSpringSleuth metrics detection In the Java field, the Spring ecosystem is one of the most widely used. Micrometer, the metrics API lib included in the Spring Boot 2.0, is now adopted by SkyWalking’s native meter system APIs and agent. For applications using Micrometer with the SkyWalking agent installed, all Micrometer collected metrics could then be shipped into SkyWalking OAP. With some configurations in the OAP and UI, all metrics are analyzed and visualized in the SkyWalking UI, with all other metrics detected by SkyWalking agents automatically.\nNotable enhancements The Java agent core is enhanced in this release. It could work better in the concurrency class loader case and is more compatible with another agent solution, such as Alibaba’s Arthas.\n With the logic endpoint supported, the local span can be analyzed to get metrics. One span could carry the raw data of more than one endpoint’s performance. GraphQL, InfluxDB Java Client, and Quasar fiber libs are supported to be observed automatically. Kubernetes Configmap can now for the first time be used as the dynamic configuration center– a more cloud-native solution for k8s deployment environments. OAP supports health checks, especially including the storage health status. If the storage (e.g., ElasticSearch) is not available, you could get the unhealth status with explicit reasons through the health status query. Opencensus receiver supports ingesting OpenTelemetry/OpenCensus agent metrics by meter-system.  Additional resources  Read more about the SkyWalking 8.1 release highlights. Read more about SkyWalking from Tetrate on our blog. Get more SkyWalking updates on Twitter. Sign up to hear more about SkyWalking and observability from Tetrate.  ","excerpt":"Author: Sheng Wu, Hongtao Gao, and Tevah Platt(Tetrate) Original link, Tetrate.io blog  Apache …","ref":"/blog/2020-08-03-skywalking8-1-release/","title":"Features in SkyWalking 8.1: SpringSleuth metrics, endpoint dependency detection, Kafka transport traces and metrics"},{"body":"","excerpt":"","ref":"/tags/kafka/","title":"Kafka"},{"body":"SkyWalking APM 8.1.0 is release. Go to downloads page to find release tars.\nProject  Support Kafka as an optional trace, JVM metrics, profiling snapshots and meter system data transport layer. Support Meter system, including the native metrics APIs and the Spring Sleuth adoption. Support JVM thread metrics.  Java Agent  [Core] Fix the concurrency access bug in the Concurrency ClassLoader Case. [Core] Separate the config of the plugins from the core level. [Core] Support instrumented class cached in memory or file, to be compatible with other agents, such as Arthas. Add logic endpoint concept. Could analysis any span or tags flagged by the logic endpoint. Add Spring annotation component name for UI visualization only. Add support to trace Call procedures in MySQL plugin. Support GraphQL plugin. Support Quasar fiber plugin. Support InfluxDB java client plugin. Support brpc java plugin Support ConsoleAppender in the logback v1 plugin. Enhance vert.x endpoint names. Optimize the code to prevent mongo statements from being too long. Fix WebFlux plugin concurrency access bug. Fix ShardingSphere plugins internal conflicts. Fix duplicated Spring MVC endpoint. Fix lettuce plugin sometimes trace doesn‘t show span layer. Fix @Tag returnedObject bug.  OAP-Backend  Support Jetty Server advanced configurations. Support label based filter in the prometheus fetcher and OpenCensus receiver. Support using k8s configmap as the configuration center. Support OAP health check, and storage module health check. Support sampling rate in the dynamic configuration. Add endpoint_relation_sla and endpoint_relation_percentile for endpoint relationship metrics. Add components for Python plugins, including Kafka, Tornado, Redis, Django, PyMysql. Add components for Golang SDK. Add Nacos 1.3.1 back as an optional cluster coordinator and dynamic configuration center. Enhance the metrics query for ElasticSearch implementation to increase the stability. Reduce the length of storage entity names in the self-observability for MySQL and TiDB storage. Fix labels are missing in Prometheus analysis context. Fix column length issue in MySQL/TiDB storage. Fix no data in 2nd level aggregation in self-observability. Fix searchService bug in ES implementation. Fix wrong validation of endpoint relation entity query. Fix the bug caused by the OAL debug flag. Fix endpoint dependency bug in MQ and uninstrumented proxy cases. Fix time bucket conversion issue in the InfluxDB storage implementation. Update k8s client to 8.0.0  UI  Support endpoint dependency graph. Support x-scroll of trace/profile page Fix database selector issue. Add the bar chart in the UI templates.  Document  Update the user logo wall. Add backend configuration vocabulary document. Add agent installation doc for Tomcat9 on Windows. Add istioctl ALS commands for the document. Fix TTL documentation. Add FAQ doc about thread instrumentation.  CVE  Fix fuzzy query sql injection in the MySQL/TiDB storage.  All issues and pull requests are here\n","excerpt":"SkyWalking APM 8.1.0 is release. Go to downloads page to find release tars.\nProject  Support Kafka …","ref":"/events/release-apache-skywalking-apm-8-1-0/","title":"Release Apache SkyWalking APM 8.1.0"},{"body":"","excerpt":"","ref":"/tags/spring/","title":"Spring"},{"body":"Based on his continuous contributions, Wei Hua (a.k.a alonelaval) has been voted as a new committer.\n","excerpt":"Based on his continuous contributions, Wei Hua (a.k.a alonelaval) has been voted as a new committer.","ref":"/events/welcome-wei-hua-as-new-committer/","title":"Welcome Wei Hua as new committer"},{"body":"SkyWalking Python 0.2.0 is released. Go to downloads page to find release tars.\n  Plugins:\n Kafka Plugin (#50) Tornado Plugin (#48) Redis Plugin (#44) Django Plugin (#37) PyMsql Plugin (#35) Flask plugin (#31)    API\n Add ignore_suffix Config (#40) Add missing log method and simplify test codes (#34) Add content equality of SegmentRef (#30) Validate carrier before using it (#29)    Chores and tests\n Test: print the diff list when validation failed (#46) Created venv builders for linux/windows and req flashers + use documentation (#38)    ","excerpt":"SkyWalking Python 0.2.0 is released. Go to downloads page to find release tars.\n  Plugins:\n Kafka …","ref":"/events/release-apache-skywalking-python-0-2-0/","title":"Release Apache SkyWalking Python 0.2.0"},{"body":"SkyWalking CLI 0.3.0 is released. Go to downloads page to find release tars.\n Command: health check command Command: Add trace command BugFix: Fix wrong metrics graphql path  ","excerpt":"SkyWalking CLI 0.3.0 is released. Go to downloads page to find release tars.\n Command: health check …","ref":"/events/release-apache-skywalking-cli-0-3-0/","title":"Release Apache SkyWalking CLI 0.3.0"},{"body":" Author: Srinivasan Ramaswamy, tetrate Original link, Tetrate.io blog  Asking How are you is more profound than What are your symptoms Background Recently I visited my preferred doctor. Whenever I visit, the doctor greets me with a series of light questions: How’s your day? How about the week before? Any recent trips? Did I break my cycling record? How’s your workout regimen? _Finally _he asks, “Do you have any problems?\u0026quot; On those visits when I didn\u0026rsquo;t feel ok, I would say something like, \u0026ldquo;I\u0026rsquo;m feeling dull this week, and I\u0026rsquo;m feeling more tired towards noon….\u0026quot; It\u0026rsquo;s at this point that he takes out his stethoscope, his pulse oximeter, and blood pressure apparatus. Then, if he feels he needs a more in-depth insight, he starts listing out specific tests to be made.\nWhen I asked him if the first part of the discussion was just an ice-breaker, he said, \u0026ldquo;That\u0026rsquo;s the essential part. It helps me find out how you feel, rather than what your symptoms are.\u0026quot; So, despite appearances, our opening chat about life helped him structure subsequent questions on symptoms, investigations and test results.\nOn the way back, I couldn\u0026rsquo;t stop asking myself, \u0026ldquo;Shouldn\u0026rsquo;t we be managing our mesh this way, too?\u0026quot;\nIf I strike parallels between my own health check and a health check, “tests” would be log analysis, “investigations” would be tracing, and “symptoms” would be the traditional RED (Rate, Errors and Duration) metrics. That leaves the “essential part,” which is what we are talking about here: the Wellness Factor, primarily the health of our mesh.\nHealth in the context of service mesh We can measure the performance of any observed service through RED metrics. RED metrics offer immense value in understanding the performance, reliability, and throughput of every service. Compelling visualizations of these metrics across the mesh make monitoring the entire mesh standardized and scalable. Also, setting alerts based on thresholds for each of these metrics helps to detect anomalies as and when they arise.\nTo establish the context of any service and observe them, it\u0026rsquo;s ideal to visualize the mesh as a topology.\nA topology visualization of the mesh not only allows for picking any service and watching its metrics, but also gives vital information about service dependencies and the potential impact of a given service on the mesh.\nWhile RED metrics of each service offer tremendous insights, the user is more concerned with the overall responsiveness of the mesh rather than each of these services in isolation.\nTo describe the performance of any service, right from submitting the request to receiving a completed http response, we’d be measuring the user\u0026rsquo;s perception of responsiveness. This measure of response time compared with a set threshold is called Apdex. This Apdex is an indicator of the health of a service in the mesh.\nApdex Apdex is a measure of response time considered against a set threshold**. **It is the ratio of satisfactory response times and unsatisfactory response times to total response times.\nApdex is an industry standard to measure the satisfaction of users based on the response time of applications and services. It measures how satisfied your users are with your services, as traditional metrics such as average response time could get skewed quickly.\nSatisfactory response time indicates the number of times when the roundtrip response time of a particular service was less than this threshold. Unsatisfactory response time while meaning the opposite, is further categorized as Tolerating and Frustrating. Tolerating accommodates any performance that is up to four times the threshold, and anything over that or any errors encountered is considered Frustrating. The threshold mentioned here is an ideal roundtrip performance that we expect from any service. We could even start with an organization-wide limit of say, 500ms.\nThe Apdex score is a ratio of satisfied and tolerating requests to the total requests made.\nEach satisfied request counts as one request, while each tolerating request counts as half a satisfied request.\nAn Apdex score takes values from 0 to 1, with 0 being the worst possible score indicating that users were always frustrated, and ‘1’ as the best possible score (100% of response times were Satisfactory).\nA percentage representation of this score also serves as the Health Indicator of the service.\nThe Math The actual computation of this Apdex score is achieved through the following formula.\n\tSatisfiedCount + ( ToleratingCount / 2 ) Apdex Score = ------------------------------------------------------ TotalSamples A percentage representation of this score is known as the Health Indicator of a service.\nExample Computation During a 2-minute period, a host handles 200 requests.\nThe Apdex threshold T = 0.5 seconds (500ms).\n 170 of the requests were handled within 500ms, so they are classified as Satisfied. 20 of the requests were handled between 500ms and 2 seconds (2000 ms), so they are classified as Tolerating. The remaining 10 were not handled properly or took longer than 2 seconds, so they are classified as Frustrated.  The resulting Apdex score is 0.9: (170 + (20/2))/200 = 0.9.\nThe next level At the next level, we can attempt to improve our topology visualization by coloring nodes based on their health. Also, we can include health as a part of the information we show when the user taps on a service.\nApdex specifications recommend the following Apdex Quality Ratings by classifying Apdex Score as Excellent (0.94 - 1.00), Good (0.85 - 0.93), Fair (0.70 - 0.84), Poor (0.50 - 0.69) and Unacceptable (0.00 - 0.49).\nTo visualize this, let’s look at our topology using traffic light colors, marking our nodes as Healthy, At-Risk and Unhealthy, where Unhealthy indicates health that falls below 80%. A rate between 80% and 95% indicates At-Risk, and health at 95% and above is termed Healthy.\nLet’s incorporate this coloring into our topology visualization and take its usability to the next level. If implemented, we will be looking at something like this.\nMoving further Apdex provides tremendous visibility into customer satisfaction on the responsiveness of our services. Even more, by extending the implementation to the edges calling this service we get further insight into the health of the mesh itself.\nTwo services with similar Apdex scores offer the same customer satisfaction to the customer. However, the size of traffic that flows into the service can be of immense help in prioritizing between services to address. A service with higher traffic flow is an indication that this experience is impacting a significant number of users on the mesh.\nWhile health relates to a service, we can also analyze the interactions between two services and calculate the health of the interaction. This health calculation of every interaction on the mesh helps us establish a critical path, based on the health of all interactions in the entire topology.\nIn a big mesh, showing traffic as yet another number will make it more challenging to visualize and monitor. We can, with a bit of creativity, improve the entire visualization by rendering the edges that connect services with different thickness depending on the throughput of the service.\nAn unhealthy service participating in a high throughput transaction could lead to excessive consumption of resources. On the other hand, this visualization also offers a great tip to maximize investment in tuning services.\nTuning service that is a part of a high throughput transaction offers exponential benefits when compared to tuning an occasionally used service.\nIf we look at implementing such a visualization, which includes the health of interactions and throughput of such interactions, we would be looking at something like below :\nThe day is not far These capabilities are already available to users today as one of the UI features of Tetrate’s service mesh platform, using the highly configurable and performant observability and performance management framework: Apache SkyWalking (https://skywalking.apache.org), which monitors traffic across the mesh, aggregates RED metrics for both services and their interactions, continuously computes and monitors health of the services, and enables users to configure alerts and notifications when services cross specific thresholds, thereby having a comprehensive health visibility of the mesh.\nWith such tremendous visibility into our mesh performance, the day is not far when we at our NOC (Network Operations Center) for the mesh have this topology as our HUD (Heads Up Display).\nThis HUD, with the insights and patterns gathered over time, would predict situations and proactively prompt us on potential focus areas to improve customer satisfaction.\nThe visualization with rich historical data can also empower the Network Engineers to go back in time and look at the performance of the mesh on a similar day in the past.\nAn earnest implementation of such a visualization would be something like below :\nTo conclude With all the discussion so far, the health of a mesh is more about how our users feel, and what we can proactively do as service providers to sustain, if not enhance, the experience of our users.\nAs the world advances toward personalized medicine, we\u0026rsquo;re not far from a day when my doctor will text me: \u0026ldquo;How about feasting yourself with ice cream today and take the Gray Butte Trail to Mount Shasta!\u0026rdquo; Likewise, we can do more for our customers by having better insight into their overall wellness.\nTetrate’s approach to “service mesh health” is not only to offer management, monitoring and support but to make infrastructure healthy from the start to reduce the probability of incidents. Powered by the Istio, Envoy, and SkyWalking, Tetrate\u0026rsquo;s solutions enable consistent end-to-end observability, runtime security, and traffic management for any workload in any environment.\nOur customers deserve healthy systems! Please do share your thoughts on making service mesh an exciting and robust experience for our customers.\nReferences  https://en.wikipedia.org/wiki/Apdex https://www.apdex.org/overview.html https://www.apdex.org/index.php/specifications/ https://skywalking.apache.org/  ","excerpt":"Author: Srinivasan Ramaswamy, tetrate Original link, Tetrate.io blog  Asking How are you is more …","ref":"/blog/2020-07-26-apdex-and-skywalking/","title":"The Apdex Score for Measuring Service Mesh Health"},{"body":" 作者: Srinivasan Ramaswamy, tetrate 翻译:唐昊杰,南京大学在读学生 校对:吴晟 Original link, Tetrate.io blog July. 26th, 2020  \u0026ldquo;你感觉怎么样\u0026rdquo; 比 \u0026ldquo;你的症状是什么\u0026rdquo; 更重要 背景 最近我拜访了我的医生。每次去看病,医生都会首先问我一连串轻快的问题,比如:你今天过得怎么样?上周过的怎么样?最近有什么出行吗?你打破了自己的骑车记录吗?你的锻炼计划实施如何?最后他会问:“你有什么麻烦吗?”如果这个时候我感觉自己不太好,我会说:“我这周感觉很沉闷,临近中午的时候感觉更累。”这时他就会拿出听诊器、脉搏血氧仪和血压仪。然后,如果他觉得自己需要更深入的了解情况,他就开始列出我需要做的具体检查。\n当我问他,最开始的讨论是否只是为了缓和氛围。他说:“这是必不可少的部分。它帮助我发现你感觉如何,而不是你的症状是什么。\u0026quot;。我们这样关于生活的开场聊天,帮助他组织了后续关于症状、调查和测试结果的问题。\n在回来的路上,我不停地问自己:“我们是不是也应该用这种方式管理我们的网格(service mesh)?”\n如果我把自己的健康检查和网格的健康检查进行类比,“医疗检查”就是日志分析,“调查”就是追踪,“症状”就是传统的RED指标(请求速率、请求错误和请求耗时)。那么根本的问题,就是我们在这里讨论的:健康因素(主要是网格的健康)。\n服务网格中的健康状况 我们可以通过RED指标来衡量任何被观察到的服务的性能。RED指标在了解每个服务的性能、可靠性和吞吐量方面提供了巨大的价值。这些指标在网格上的令人信服的可视化使得监控全部网格变得标准化和可扩展。此外,根据这些指标的阈值设置警报有助于在指标值异常的时候进行异常检测。\n为了建立任何服务的上下文环境并观察它们,理想的做法是将网格可视化为一个拓扑结构。\n网格的拓扑结构可视化不仅允许使用者挑选任意服务并观察其指标,还可以提供有关服务依赖和特定服务在网格上的潜在影响这些重要信息。\n虽然每个服务的RED指标为使用者提供了深刻的洞察能力,但使用者更关心网格的整体响应性,而非每个单独出来的服务的响应性。\n为了描述任意服务的性能(即从提交请求到收到完成了的http响应这段时间内的表现),我们会测量用户对响应性的感知。这种将响应时间与设定的阈值进行比较的衡量标准叫做Apdex。Apdex是衡量一个服务在网格中的健康程度的指标。\nApdex Apdex是根据设定的阈值和响应时间结合考虑的衡量标准。它是满意响应时间和不满意响应时间相对于总响应时间的比率。\nApdex是根据应用和服务的响应时间来衡量使用者满意程度的行业标准。它衡量的是用户对你的服务的满意程度,因为传统的指标(如平均响应时间)可能很快就会容易形成偏差。\n基于满意度的响应时间,表示特定服务的往返响应时间小于设定的阈值的次数。不满意响应时间虽然意思相反,但又进一步分为容忍型和失望型。容忍型包括了了任何响应时间不超过四倍阈值的表现,而任何超过四倍阈值或遇到了错误的表现都被认为是失望型。这里提到的阈值是我们对任意服务所期望的理想响应表现。我们可以设置一个全局范围的阈值,如,500ms。\nApdex得分是满意型请求和容忍型请求与做出的总请求的比率。\n每个_满意的请求_算作一个请求,而每个_容忍的请求_算作半个_满意_的请求。\n一个Apdex得分从0到1的范围内取值。0是最差的分数,表示用户总是感到失望;而'1\u0026rsquo;是最好的分数(100%的响应时间是令人满意的)。\n这个分数的百分比表示也可以用作服务的健康指标。\n数学表示 Apdex得分的实际计算是通过以下公式实现的:\n\t满意请求数 + ( 容忍请求数 / 2 ) Apdex 得分 = ------------------------------------------------------ 总请求数 此公示得到的百分率,即可视为服务的健康度。\n样例计算 在两分钟的采样时间内,主机处理200个请求。\nApdex阈值T设置为0.5秒(500ms)。\n*.\t170个请求在500ms内被处理完成,它们被分类为满意型。 *.\t20个请求在500ms和2秒间被处理,它们被分类为容忍型。 *.\t剩余的10个请求没有被正确处理或者处理时间超过了2秒,所以它们被分类为失望型。\n最终的Apdex得分是0.9,即(170 + (20 / 2))/ 200。\n深入使用 在接下来的层次,我们可以尝试通过根据节点的健康状况来着色节点以改进我们的拓扑可视化。此外,我们还可以在用户点击服务时将健康状况作为我们展示的信息的一部分。\nApdex规范推荐了以下Apdex质量评级,将Apdex得分分为优秀(0.94 - 1.00)、良好(0.85 - 0.93)、一般(0.70 - 0.84)、差(0.50 - 0.69)和不可接受(0.00 - 0.49)。\n为了可视化网格的健康状况,我们用交通灯的颜色将我们的节点标记为健康、有风险和不健康,其中不健康表示健康率低于80%。健康率在80%到95%之间的表示有风险,健康率在95%及以上的称为健康。\n让我们将这种着色融入到我们的拓扑可视化中,并将其可用性提升到一个新的水平。如果实施,我们将看到下图所示的情况。\n更进一步 Apdex为客户对我们服务响应性的满意度提供了可见性。更有甚者,通过将实施范围扩展到调用该服务的调用关系,我们可以进一步了解网格本身的健康状况。\n两个有着相似Apdex分数的服务,为客户提供了相同的客户满意度。然而,流入服务的流量大小对于优先处理哪一服务有着巨大的帮助。流量较高的服务表明这种服务体验影响了网格上更大量的使用者。\n虽然健康程度与单个服务有关,但我们也可以分析两个服务之间的交互并计算交互过程的健康程度。这种对网格上每一个交互的健康程度的计算,可以帮助我们根据整个拓扑结构中所有交互的健康程度,建立一个关键路径。\n在一个大的网格中,将流量展示为另一个数字将使可视化和监控更具挑战性。我们可以根据服务的吞吐量,通过用不同的粗细程度渲染连接服务的边来改善整个可视化的效果。\n一个位于高吞吐量事务的不健康的服务可能会导致资源的过度消耗。另一方面,这种可视化也为调整服务时获取最大化投资效果提供了一个很好的提示。\n与调整一个偶尔使用的服务相比,调整作为高吞吐量事务的一部分的那些服务会带来指数级的收益。\n实施这种包括了交互的健康状况和吞吐量的可视化,我们会看到下图所示的情况:\n这一天即将到来 目前,这些功能已经作为Tetrate服务网格平台的UI功能之一来提供给用户。该平台使用了高速可配置化、高性能的可观测性和监控性能管理平台:Apache SkyWalking (https://skywalking.apache.org),SkyWalking可以监控整个网格的流量,为服务及它们的交互合计RED指标,持续计算和监控服务的健康状况,并使用户能够在服务超过特定阈值时配置报警和通知。这些功能使得SkyWalking对网格拥有全面的健康状况可见性。\n有了这样强大的网格性能可视性,我们将可以在为网格准备的网络运营中心使用这种拓扑结构作为我们的HUD(Heads Up Display)。\nHUD随着时间的推移收集了解到的信息和模式,并将预测各种情况和主动提示我们潜在的重点领域以提高客户满意度。\n丰富的历史数据的可视化也可以使网络工程师能够看看过去中类似的一天的网格表现。\n可视化效果如下图所示。\n总结 综合到目前为止的所有讨论,网格的健康状况更多地是关于用户的感受,以及我们作为服务提供商可以采取积极行动来维持(如果不能增强)用户的体验。\n着个人化医学的发展,现在距离我的医生给我发这样短信的日子并不遥远:“要不今天享用冰淇淋并且沿着灰色小山步道到达沙斯塔山!”相似的,我们可以通过更好地了解客户的整体健康状况为他们做更多的事情。\nTetrate的“服务网格健康程度”方法不仅提供了管理,监视和支持,而且从一开始就使基础架构保持健康以减少事故发生的可能性。在Istio,Envoy和SkyWalking的支持下,Tetrate的解决方案可为任何环境中的任何工作负载提供持续的端到端可观察性,运行时安全性和流量管理。\n我们的客户应该拥有健康的系统!请分享您对使用服务网格为我们的客户带来令人兴奋和强健的体验的想法。\n引用  https://en.wikipedia.org/wiki/Apdex https://www.apdex.org/overview.html https://www.apdex.org/index.php/specifications/ https://skywalking.apache.org/  ","excerpt":"作者: Srinivasan Ramaswamy, tetrate 翻译:唐昊杰,南京大学在读学生 校对:吴晟 Original link, Tetrate.io blog July. 26th, …","ref":"/zh/2020-07-26-apdex-and-skywalking/","title":"度量服务网格健康度——Apdex得分"},{"body":"SkyWalking Python 0.1.0 is released. Go to downloads page to find release tars.\n API: agent core APIs, check the APIs and the examples Plugin: built-in libraries http, urllib.request and third-party library requests are supported. Test: agent test framework is setup, and the corresponding tests of aforementioned plugins are also added.  ","excerpt":"SkyWalking Python 0.1.0 is released. Go to downloads page to find release tars.\n API: agent core …","ref":"/events/release-apache-skywalking-python-0-1-0/","title":"Release Apache SkyWalking Python 0.1.0"},{"body":"SkyWalking Chart 3.0.0 is released. Go to downloads page to find release tars.\n Support SkyWalking 8.0.1  ","excerpt":"SkyWalking Chart 3.0.0 is released. Go to downloads page to find release tars.\n Support SkyWalking …","ref":"/events/release-apache-skywalking-chart-3-0-0-for-skywalking-8-0-1/","title":"Release Apache SkyWalking Chart 3.0.0 for SkyWalking 8.0.1"},{"body":"Apache SkyWalking 8.0.1 已发布。SkyWalking 是观察性分析平台和应用性能管理系统,提供分布式追踪、服务网格遥测分析、度量聚合和可视化一体化解决方案,支持 Java, .Net Core, PHP, NodeJS, Golang, LUA 语言探针,支持 Envoy + Istio 构建的 Service Mesh。\n与 8.0.0 相比,此版本包含一个热修复程序。\nOAP-Backend\n 修复 no-init 模式在 Elasticsearch 存储中无法运行的错误  8.0.0 值得关注的变化:\n 添加并实现了 v3 协议,旧版本与 8.x 不兼容 移除服务、实例、端点注册机制和 inventory 存储实体 (inventory storage entities) 提供新的 GraphQL 查询协议,同时支持旧协议(计划在今年年底移除) 支持 Prometheus 网络协议,可将 Prometheus 格式的指标传输到 SkyWalking 中 提供 Python agent 移除所有 inventory 缓存 提供 Apache ShardingSphere (4.0.0, 4.1.1) agent 插件 UI dashboard 100% 可配置,可采用后台定义的新指标 修复 H2/MySQL 实现中的 SQL 注入漏洞 Upgrade Nacos to avoid the FastJson CVE in high frequency. 升级 Nacos 以避免 FastJson CVE 升级 jasckson-databind 至 2.9.10  下载地址:http://skywalking.apache.org/downloads/\n","excerpt":"Apache SkyWalking 8.0.1 已发布。SkyWalking 是观察性分析平台和应用性能管理系统,提供分布式追踪、服务网格遥测分析、度量聚合和可视化一体化解决方案,支持 Java, …","ref":"/zh/2020-06-21-skywalking8-0-1-release/","title":"Apache SkyWalking 8.0.1 发布"},{"body":"SkyWalking Nginx LUA 0.2.0 is release. Go to downloads page to find release tars.\n Adapt the new v3 protocol. Implement correlation protocol. Support batch segment report.  ","excerpt":"SkyWalking Nginx LUA 0.2.0 is release. Go to downloads page to find release tars.\n Adapt the new v3 …","ref":"/events/release-apache-skywalking-nginx-lua-0-2-0/","title":"Relase Apache SkyWalking Nginx LUA 0.2.0"},{"body":"SkyWalking APM 8.0.0 is release. Go to downloads page to find release tars.\nProject  v3 protocol is added and implemented. All previous releases are incompatible with 8.x releases. Service, Instance, Endpoint register mechanism and inventory storage entities are removed. New GraphQL query protocol is provided, the legacy procotol is still supported(plan to remove at the end of this year). Support Prometheus network protocol. Metrics in Prometheus format could be transferred into SkyWalking. Python agent provided. All inventory caches have been removed. Apache ShardingSphere(4.1.0, 4.1.1) agent plugin provided.  Java Agent  Add MariaDB plugin. Vert.x plugin enhancement. More cases are covered. Support v3 extension header. Fix ElasticSearch 5.x plugin TransportClient error. Support Correlation protocol v1. Fix Finagle plugin bug, in processing Noop Span. Make CommandService daemon to avoid blocking target application shutting down gracefully. Refactor spring cloud gateway plugin and support tracing spring cloud gateway 2.2.x  OAP-Backend  Support meter system for Prometheus adoption. In future releases, we will add native meter APIs and MicroMeter(Sleuth) system. Support endpoint grouping. Add SuperDataSet annotation for storage entity. Add superDatasetIndexShardsFactor in the ElasticSearch storage, to provide more shards for @SuperDataSet annotated entites. Typically TraceSegment. Support alarm settings for relationship of service, instance, and endpoint level metrics. Support alarm settings for database(conjecture node in tracing scenario). Data Model could be added in the runtime, don\u0026rsquo;t depend on the bootstrap sequence anymore. Reduce the memory cost, due to no inventory caches. No buffer files in tracing and service mesh cases. New ReadWriteSafe cache implementation. Simplify codes. Provide default way for metrics query, even the metrics doesn\u0026rsquo;t exist. New GraphQL query protocol is provided. Support the metrics type query. Set up length rule of service, instance, and endpoint. Adjust the default jks for ElasticSearch to empty. Fix Apdex function integer overflow issue. Fix profile storage issue. Fix TTL issue. Fix H2 column type bug. Add JRE 8-14 test for the backend.  UI  UI dashboard is 100% configurable to adopt new metrics definited in the backend.  Document  Add v8 upgrade document. Make the coverage accurate including UT and e2e tests. Add miss doc about collecting parameters in the profiled traces.  CVE  Fix SQL Injection vulnerability in H2/MySQL implementation. Upgrade Nacos to avoid the FastJson CVE in high frequency. Upgrade jasckson-databind to 2.9.10.  All issues and pull requests are here\n","excerpt":"SkyWalking APM 8.0.0 is release. Go to downloads page to find release tars.\nProject  v3 protocol is …","ref":"/events/release-apache-skywalking-apm-8-0-0/","title":"Release Apache SkyWalking APM 8.0.0"},{"body":"可观察性平台和开源应用程序性能监控(APM)项目 Apache SkyWalking,今天刚宣布 8.0 的发布版本。素以强劲指标、追踪与服务网格能力见称的 SkyWalking ,在最新版本中的功能性延展到用户渴求已久的功能 —— 将指标功能和包括 Prometheus 的其他指标收集系统进行了融合。\n什么是 Apache SkyWalking? SkyWalking 是可观察性平台和 APM 工具,可以选择是否搭载服务网格的使用,为微服务、云原生和容器化应用提供自动度量功能。顶尖的 Apache 项目由来自世界各地的社区人员支持,应用在阿里巴巴、华为、腾讯、百度和大量其他企业。SkyWalking 提供记录、监控和追踪功能,同时也得力于其架构而拥有数据收集终端、分析平台,还有用户界面。\n值得关注的优化包括:  用户界面 Dashboard 上提供百分百的自由度,用户可以任意进行配置,采用后台新定义的指标。 支持 Prometheus 导出格式。Prometheus 格式的指标可以转换至 SkyWalking。 SkyWalking 现已可以自主监控服务网格,为 Istio 和 Envoy 提供指标。 服务、实例、终端地址的注册机制,和库存存储实体已经被移除了。  无须修改原始码的前提下,为用户界面加入新的指标 对于 SkyWalking 的用户,8.0 版本的亮点将会是数据模型的更新,而且传播格式也针对更多语言进行优化。再加上引进了新的 MeterSystem ,除了可以同步运行传统追踪模式,用户还可自定义需要收集的指标。追踪和服务网格专注在拓扑和服务流量的指标上,而 MeterSystem 则汇报用户感兴趣的业务指标,例如是数据库存取性能、圣诞节期间的下单率,或者用户注册或下单的百分比。这些指标数据会在 SkyWalking 的用户界面 Dashboard 上以图像显示。指标的面板数据和拓扑图可以通过 Envoy 的指标绘制,而追踪分析也可以支持 Istio 的遥测。Dashboard 还支持以 JSON 格式导入、导出,而 Dashboard 上的自定义指标也支持设定指标名称、实体种类(服务、实例、终端地址或全部)、标记值等。用户界面模板上已详细描述了用户界面的逻辑和原型配置,以及它的 Dashboard、tab 和组件。\n观察任何配备了 Prometheus 的应用 在这次最新的社区发布中,SkyWalking 可以观察任何配备了 Prometheus 或者提供了 Prometheus 终端地址的应用。这项更新为很多想采用 SkyWalking 指标和追踪的用户节省了不少时间,现在你不再需要重新设置指标工具,就可以获得 Prometheus 数据。因为 Prometheus 更简单、更为人熟悉,是不少用户的不二选择。有了 8.0 版本,Prometheus 网络协议就能够读取所有已设定在 API 上的数据,另外 Prometheus 格式的指标也可转换至 SkyWalking 上。如此一来,通过图像方式展示,所有的指标和拓扑都能一目了然。同时,也支持 Prometheus 的 fetcher。\n监控你的网格 SkyWalking 现在不再只是监控服务或平台,而是监控整个网格。有了 8.0 版本,你除了能获取关于你的网格的指标(包括 Istio 和 Envoy 在内),同时也能通过 SkyWalking 监控自身的性能。因为当监控服务在观察业务集群的同时,它也能实现自我观察,确保运维团队拥有稳定可靠的平台。\n性能优化 最后,8.0 发布移除了注册机制,也不再需要使用独一无二的整数来代表实体。这项改变将大幅优化性能。想了解完整的更新功能列表,可以阅读在 SkyWalking 社区发布的公告页面。\n额外资源  追踪 Twitter 获取更多 SkyWalking 最新资讯 SkyWalking 未来的发布会加入原生指标 API 和融合 Micrometer (Sleuth) 指标集合。  ","excerpt":"可观察性平台和开源应用程序性能监控(APM)项目 Apache SkyWalking,今天刚宣布 8.0 的发布版本。素以强劲指标、追踪与服务网格能力见称的 SkyWalking ,在最新版本中的功能 …","ref":"/zh/whats-new-in-skywalking-metersystem-and-mesh-monitoring-in-8-0/","title":"SkyWalking 的最新动向?8.0 版本的 MeterSystem 和网格监控"},{"body":"作者:宋净超、张伟\n日前,云原生网络代理 MOSN v0.12.0 发布,观察性分析平台和应用性能管理系统 SkyWalking 完成了与 MOSN 的集成,作为 MOSN 中的支持的分布式追踪系统之一,旨在实现在微服务和 Service Mesh 中的更强大的可观察性。\n背景 相比传统的巨石(Monolith)应用,微服务的一个主要变化是将应用中的不同模块拆分为了独立的进程。在微服务架构下,原来进程内的方法调用成为了跨进程的远程方法调用。相对于单一进程内的方法调用而言,跨进程调用的调试和故障分析是非常困难的,难以使用传统的代码调试程序或者日志打印来对分布式的调用过程进行查看和分析。\n如上图右边所示,微服务架构中系统中各个微服务之间存在复杂的调用关系。\n一个来自客户端的请求在其业务处理过程中经过了多个微服务进程。我们如果想要对该请求的端到端调用过程进行完整的分析,则必须将该请求经过的所有进程的相关信息都收集起来并关联在一起,这就是“分布式追踪”。\n以上关于分布式追踪的介绍引用自 Istio Handbook。\nMOSN 中 tracing 的架构 MOSN 的 tracing 框架由 Driver、Tracer 和 Span 三个部分组成。\nDriver 是 Tracer 的容器,管理注册的 Tracer 实例,Tracer 是 tracing 的入口,根据请求信息创建一个 Span,Span 存储当前跨度的链路信息。\n目前 MOSN tracing 有 SOFATracer 和 SkyWalking 两种实现。SOFATracer 支持 http1 和 xprotocol 协议的链路追踪,将 trace 数据写入本地日志文件中。SkyWalking 支持 http1 协议的链路追踪,使用原生的 Go 语言探针 go2sky 将 trace 数据通过 gRPC 上报到 SkyWalking 后端服务。\n快速开始 下面将使用 Docker 和 docker-compose 来快速开始运行一个集成了 SkyWalking 的分布式追踪示例,该示例代码请见 MOSN GitHub。\n准备 安装 docker 和 docker-compose。\n  安装 docker\n  安装 docker-compose\n  需要一个编译好的 MOSN 程序,您可以下载 MOSN 源码自行编译,或者直接下载 MOSN v0.12.0 发行版以获取 MOSN 的运行时二进制文件。\n下面将以源码编译的方式演示 MOSN 如何与 SkyWalking 集成。\ncd ${projectpath}/cmd/mosn/main go build 获取示例代码目录。\n${targetpath} = ${projectpath}/examples/codes/trace/skywalking/http/ 将编译好的程序移动到示例代码目录。\nmv main ${targetpath}/ cd ${targetpath} 目录结构 下面是 SkyWalking 的目录结构。\n* skywalking └─── http │ main # 编译完成的 MOSN 程序 | server.go # 模拟的 Http Server | clint.go # 模拟的 Http Client | config.json # MOSN 配置 | skywalking-docker-compose.yaml # skywalking docker-compose 运行说明 启动 SkyWalking oap \u0026amp; ui。\ndocker-compose -f skywalking-docker-compose.yaml up -d 启动一个 HTTP Server。\ngo run server.go 启动 MOSN。\n./main start -c config.json 启动一个 HTTP Client。\ngo run client.go 打开 http://127.0.0.1:8080 查看 SkyWalking-UI,SkyWalking Dashboard 界面如下图所示。\n在打开 Dashboard 后请点击右上角的 Auto 按钮以使页面自动刷新。\nDemo 视频 下面来看一下该 Demo 的操作视频。\n\n清理 要想销毁 SkyWalking 后台运行的 docker 容器只需要下面的命令。\ncd ${projectpath}/examples/codes/trace/skywalking/http/ docker-compose -f skywalking-docker-compose.yaml down 未来计划 在今年五月份,SkyWalking 8.0 版本会进行一次全面升级,采用新的探针协议和分析逻辑,探针将更具互感知能力,更好的在 Service Mesh 下使用探针进行监控。同时,SkyWalking 将开放之前仅存在于内核中的 metrics 指标分析体系。Prmoetheus、Spring Cloud Sleuth、Zabbix 等常用的 metrics 监控方式,都会被统一的接入进来,进行分析。此外, SkyWalking 与 MOSN 社区将继续合作:支持追踪 Dubbo 和 SOFARPC,同时适配 sidecar 模式下的链路追踪。\n关于 MOSN MOSN 是一款使用 Go 语言开发的网络代理软件,由蚂蚁金服开源并经过几十万容器的生产级验证。 MOSN 作为云原生的网络数据平面,旨在为服务提供多协议、模块化、智能化、安全的代理能力。 MOSN 是 Modular Open Smart Network 的简称。 MOSN 可以与任何支持 xDS API 的 Service Mesh 集成,亦可以作为独立的四、七层负载均衡,API Gateway、云原生 Ingress 等使用。\n GitHub:https://github.com/mosn/mosn 官网:https://mosn.io  关于 Skywalking SkyWalking 是观察性分析平台和应用性能管理系统。提供分布式追踪、服务网格遥测分析、度量聚合和可视化一体化解决方案。支持 Java、.Net Core、PHP、NodeJS、Golang、LUA 语言探针,支持 Envoy/MOSN + Istio 构建的 Service Mesh。\n GitHub:https://github.com/apache/skywalking 官网:https://skywalking.apache.org  关于本文中的示例请参考 MOSN GitHub 和 MOSN 官方文档。\n","excerpt":"作者:宋净超、张伟\n日前,云原生网络代理 MOSN v0.12.0 发布,观察性分析平台和应用性能管理系统 SkyWalking 完成了与 MOSN 的集成,作为 MOSN 中的支持的分布式追踪系统之 …","ref":"/zh/2020-04-28-skywalking-and-mosn/","title":"SkyWalking 支持云原生网络代理 MOSN 做分布式追踪"},{"body":"Based on his continuous contributions, Wei Zhang (a.k.a arugal) has been invited to join the PMC. Welcome aboard.\n","excerpt":"Based on his continuous contributions, Wei Zhang (a.k.a arugal) has been invited to join the PMC. …","ref":"/events/welcome-wei-zhang-to-join-the-pmc/","title":"Welcome Wei Zhang to join the PMC"},{"body":"目录:\n 1. 概述 2. 搭建 SkyWalking 单机环境 3. 搭建 SkyWalking 集群环境 4. 告警 5. 注意事项 6. Spring Boot 使用示例 6. Spring Cloud 使用示例    作者:芋道源码 原文地址   1. 概述 1.1 概念 SkyWalking 是什么?\n FROM http://skywalking.apache.org/\n分布式系统的应用程序性能监视工具,专为微服务、云原生架构和基于容器(Docker、K8s、Mesos)架构而设计。\n提供分布式追踪、服务网格遥测分析、度量聚合和可视化一体化解决方案。\n 1.2 功能列表 SkyWalking 有哪些功能?\n FROM http://skywalking.apache.org/\n 多种监控手段。可以通过语言探针和 service mesh 获得监控是数据。 多个语言自动探针。包括 Java,.NET Core 和 Node.JS。 轻量高效。无需大数据平台,和大量的服务器资源。 模块化。UI、存储、集群管理都有多种机制可选。 支持告警。 优秀的可视化解决方案。   1.3 整体架构 SkyWalking 整体架构如何?\n FROM http://skywalking.apache.org/\n 整个架构,分成上、下、左、右四部分:\n 考虑到让描述更简单,我们舍弃掉 Metric 指标相关,而着重在 Tracing 链路相关功能。\n  上部分 Agent :负责从应用中,收集链路信息,发送给 SkyWalking OAP 服务器。目前支持 SkyWalking、Zikpin、Jaeger 等提供的 Tracing 数据信息。而我们目前采用的是,SkyWalking Agent 收集 SkyWalking Tracing 数据,传递给服务器。 下部分 SkyWalking OAP :负责接收 Agent 发送的 Tracing 数据信息,然后进行分析(Analysis Core) ,存储到外部存储器( Storage ),最终提供查询( Query )功能。 右部分 Storage :Tracing 数据存储。目前支持 ES、MySQL、Sharding Sphere、TiDB、H2 多种存储器。而我们目前采用的是 ES ,主要考虑是 SkyWalking 开发团队自己的生产环境采用 ES 为主。 左部分 SkyWalking UI :负责提供控台,查看链路等等。  1.4 官方文档 在 https://github.com/apache/skywalking/tree/master/docs 地址下,提供了 SkyWalking 的英文文档。\n考虑到大多数胖友的英语水平和艿艿不相伯仲,再加上胖友一开始对 SkyWalking 比较陌生,所以比较推荐先阅读 https://github.com/SkyAPM/document-cn-translation-of-skywalking 地址,提供了 SkyWalking 的中文文档。\n考虑到胖友使用 SkyWalking 的目的,是实现分布式链路追踪的功能,所以最好去了解下相关的知识。这里推荐阅读两篇文章:\n 《OpenTracing 官方标准 —— 中文版》 Google 论文 《Dapper,大规模分布式系统的跟踪系统》  2. 搭建 SkyWalking 单机环境 考虑到让胖友更快的入门,我们来搭建一个 SkyWalking 单机环境,步骤如下:\n 第一步,搭建一个 Elasticsearch 服务。 第二步,下载 SkyWalking 软件包。 第三步,搭建一个 SkyWalking OAP 服务。 第四步,启动一个 Spring Boot 应用,并配置 SkyWalking Agent。 第五步,搭建一个 SkyWalking UI 服务。  仅仅五步,按照艿艿标题党的性格,应该给本文取个《10 分钟快速搭建 SkyWalking 服务》标题才对,哈哈哈。\n2.1 Elasticsearch 搭建  FROM https://www.elastic.co/cn/products/elasticsearch\nElasticsearch 是一个分布式、RESTful 风格的搜索和数据分析引擎,能够解决不断涌现出的各种用例。 作为 Elastic Stack 的核心,它集中存储您的数据,帮助您发现意料之中以及意料之外的情况。\n 参考《Elasticsearch 极简入门》的「1. 单机部署」小节,搭建一个 Elasticsearch 单机服务。\n不过要注意,本文使用的是 Elasticsearch 7.5.1 版本。因为 SkyWalking 6.6.0 版本,增加了对 Elasticsearch 7.X 版本的支持。当然,如果胖友使用 Elasticsearch 6.X 版本也是可以的。\n2.2 下载 SkyWalking 软件包 对于 SkyWalking 的软件包,有两种方式获取:\n 手动编译 官方包  一般情况下,我们建议使用官方包。手动编译,更多是尝鲜或者等着急修复的 BUG 的版本。\n2.2.1 官方包 在 http://skywalking.apache.org/downloads/ 下,我们下载操作系统对应的发布版。\n这里,我们选择 Binary Distribution for ElasticSearch 7 (Linux) 版本,因为艿艿是 Mac 环境,再加上想使用 Elasticsearch 7.X 版本作为存储。如果胖友想用 Elasticsearch 6.X 版本作为存储,记得下载 Binary Distribution (Linux) 版本。\n① 下载:\n# 创建目录 $ mkdir -p /Users/yunai/skywalking $ cd /Users/yunai/skywalking # 下载 $ wget http://mirror.bit.edu.cn/apache/skywalking/6.6.0/apache-skywalking-apm-es7-6.6.0.tar.gz ② 解压:\n# 解压 $ tar -zxvf apache-skywalking-apm-es7-6.6.0.tar.gz $ cd apache-skywalking-apm-bin-es7 $ ls -ls 4 drwxr-xr-x 8 root root 4096 Sep 9 15:09 agent # SkyWalking Agent 4 drwxr-xr-x 2 root root 4096 Sep 9 15:44 bin # 执行脚本 4 drwxr-xr-x 2 root root 4096 Sep 9 15:44 config # SkyWalking OAP Server 配置文件 32 -rwxr-xr-x 1 root root 28903 Sep 9 14:32 LICENSE 4 drwxr-xr-x 3 root root 4096 Sep 9 15:44 licenses 32 -rwxr-xr-x 1 root root 31850 Sep 9 14:32 NOTICE 16 drwxr-xr-x 2 root root 16384 Sep 9 15:22 oap-libs # SkyWalking OAP Server 4 -rw-r--r-- 1 root root 1978 Sep 9 14:32 README.txt 4 drwxr-xr-x 2 root root 4096 Sep 9 15:44 webapp # SkyWalking UI 2.2.2 手动编译  友情提示:如果胖友没有编译 SkyWalking 源码的诉求,可以跳过本小节。\n 参考 How to build project 文章。\n需要前置安装如下:\n GIT JDK 8+ Maven  ① 克隆代码:\n$ git clone https://github.com/apache/skywalking.git  因为网络问题,可能克隆会有点久。  ② 初始化子模块:\n$ cd skywalking $ git submodule init $ git submodule update ③ 编译\n$ ./mvnw clean package -DskipTests  编译过程,如果机子比较差,花费时间会比较久。  ④ 查看编译结果\n$ cd apm-dist # 编译结果目录 $ cd target $ tar -zxvf apache-skywalking-apm-bin.tar.gz # 解压 Linux 包 $ cd apache-skywalking-apm-bin $ ls -ls 4 drwxr-xr-x 8 root root 4096 Sep 9 15:09 agent # SkyWalking Agent 4 drwxr-xr-x 2 root root 4096 Sep 9 15:44 bin # 执行脚本 4 drwxr-xr-x 2 root root 4096 Sep 9 15:44 config # SkyWalking OAP Server 配置文件 32 -rwxr-xr-x 1 root root 28903 Sep 9 14:32 LICENSE 4 drwxr-xr-x 3 root root 4096 Sep 9 15:44 licenses 32 -rwxr-xr-x 1 root root 31850 Sep 9 14:32 NOTICE 16 drwxr-xr-x 2 root root 16384 Sep 9 15:22 oap-libs # SkyWalking OAP Server 4 -rw-r--r-- 1 root root 1978 Sep 9 14:32 README.txt 4 drwxr-xr-x 2 root root 4096 Sep 9 15:44 webapp # SkyWalking UI 2.3 SkyWalking OAP 搭建 ① 修改 OAP 配置文件\n 友情提示:如果配置文件,适合 SkyWalking 6.X 版本。\n $ vi config/application.ymlstorage:elasticsearch7:nameSpace:${SW_NAMESPACE:\u0026#34;elasticsearch\u0026#34;}clusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:9200}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;http\u0026#34;}# trustStorePath: ${SW_SW_STORAGE_ES_SSL_JKS_PATH:\u0026#34;../es_keystore.jks\u0026#34;}# trustStorePass: ${SW_SW_STORAGE_ES_SSL_JKS_PASS:\u0026#34;\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}password:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}indexShardsNumber:${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:2}indexReplicasNumber:${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:0}# Those data TTL settings will override the same settings in core module.recordDataTTL:${SW_STORAGE_ES_RECORD_DATA_TTL:7}# Unit is dayotherMetricsDataTTL:${SW_STORAGE_ES_OTHER_METRIC_DATA_TTL:45}# Unit is daymonthMetricsDataTTL:${SW_STORAGE_ES_MONTH_METRIC_DATA_TTL:18}# Unit is month# Batch process setting, refer to https://www.elastic.co/guide/en/elasticsearch/client/java-api/5.5/java-docs-bulk-processor.htmlbulkActions:${SW_STORAGE_ES_BULK_ACTIONS:1000}# Execute the bulk every 1000 requestsflushInterval:${SW_STORAGE_ES_FLUSH_INTERVAL:10}# flush the bulk every 10 seconds whatever the number of requestsconcurrentRequests:${SW_STORAGE_ES_CONCURRENT_REQUESTS:2}# the number of concurrent requestsresultWindowMaxSize:${SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE:10000}metadataQueryMaxSize:${SW_STORAGE_ES_QUERY_MAX_SIZE:5000}segmentQueryMaxSize:${SW_STORAGE_ES_QUERY_SEGMENT_SIZE:200}# h2:# driver: ${SW_STORAGE_H2_DRIVER:org.h2.jdbcx.JdbcDataSource}# url: ${SW_STORAGE_H2_URL:jdbc:h2:mem:skywalking-oap-db}# user: ${SW_STORAGE_H2_USER:sa}# metadataQueryMaxSize: ${SW_STORAGE_H2_QUERY_MAX_SIZE:5000} storage.elasticsearch7 配置项,设置使用 Elasticsearch 7.X 版本作为存储器。  这里,我们打开注释,并记得通过 nameSpace 设置 Elasticsearch 集群名。   storage.elasticsearch 配置项,设置使用 Elasticsearch 6.X 版本作为存储器。  这里,我们无需做任何改动。 如果胖友使用 Elasticsearch 6.X 版本作为存储器,记得设置这个配置项,而不是 storage.elasticsearch7 配置项。   storage.h2 配置项,设置使用 H2 作为存储器。  这里,我们需要手动注释掉,因为 H2 是默认配置的存储器。     友情提示:如果配置文件,适合 SkyWalking 7.X 版本。\n  重点修改 storage 配置项,通过 storage.selector 配置项来设置具体使用的存储器。 storage.elasticsearch 配置项,设置使用 Elasticsearch 6.X 版本作为存储器。胖友可以主要修改 nameSpace、clusterNodes 两个配置项即可,设置使用的 Elasticsearch 的集群和命名空间。 storage.elasticsearch7 配置项,设置使用 Elasticsearch 7.X 版本作为存储器。 还有 MySQL、H2、InfluxDB 等等存储器的配置可以选择,胖友自己根据需要去选择哈~  ② 启动 SkyWalking OAP 服务\n$ bin/oapService.sh SkyWalking OAP started successfully! 是否真正启动成功,胖友打开 logs/skywalking-oap-server.log 日志文件,查看是否有错误日志。首次启动时,因为 SkyWalking OAP 会创建 Elasticsearch 的索引,所以会“疯狂”的打印日志。最终,我们看到如下日志,基本可以代表 SkyWalking OAP 服务启动成功:\n 友情提示:因为首次启动会创建 Elasticsearch 索引,所以可能会比较慢。\n 2020-01-02 18:22:53,635 - org.eclipse.jetty.server.Server - 444 [main] INFO [] - Started @35249ms 2.4 SkyWalking UI 搭建 ① 启动 SkyWalking UI 服务\nbin/webappService.sh SkyWalking Web Application started successfully! 是否真正启动成功,胖友打开 logs/logs/webapp.log 日志文件,查看是否有错误日志。最终,我们看到如下日志,基本可以代表 SkyWalking UI 服务启动成功:\n2020-01-02 18:27:02.824 INFO 48250 --- [main] o.a.s.apm.webapp.ApplicationStartUp : Started ApplicationStartUp in 7.774 seconds (JVM running for 8.316) 如果想要修改 SkyWalking UI 服务的参数,可以编辑 webapp/webapp.yml 配置文件。例如说:\n server.port :SkyWalking UI 服务端口。 collector.ribbon.listOfServers :SkyWalking OAP 服务地址数组。因为 SkyWalking UI 界面的数据,是通过请求 SkyWalking OAP 服务来获得的。  ② 访问 UI 界面:\n浏览器打开 http://127.0.0.1:8080 。界面如下图:2.5 SkyWalking Agent 大多数情况下,我们在启动项目的 Shell 脚本上,通过 -javaagent 参数进行配置 SkyWalking Agent 。我们在 「2.3.1 Shell」 小节来看。\n考虑到偶尔我们需要在 IDE 中,也希望使用 SkyWalking Agent ,所以我们在 「2.3.2 IDEA」 小节来看。\n2.3.1 Shell ① Agent 软件包\n我们需要将 apache-skywalking-apm-bin/agent 目录,拷贝到 Java 应用所在的服务器上。这样,Java 应用才可以配置使用该 SkyWalking Agent。我们来看看 Agent 目录下有哪些:\n$ ls -ls total 35176 0 drwxr-xr-x@ 7 yunai staff 224 Dec 24 14:20 activations 0 drwxr-xr-x@ 4 yunai staff 128 Dec 24 14:21 bootstrap-plugins 0 drwxr-xr-x@ 3 yunai staff 96 Dec 24 14:12 config # SkyWalking Agent 配置 0 drwxr-xr-x@ 3 yunai staff 96 Jan 2 19:29 logs # SkyWalking Agent 日志 0 drwxr-xr-x@ 13 yunai staff 416 Dec 24 14:22 optional-plugins # 可选插件 0 drwxr-xr-x@ 68 yunai staff 2176 Dec 24 14:20 plugins # 插件 35176 -rw-r--r--@ 1 yunai staff 18006420 Dec 24 14:12 skywalking-agent.jar # SkyWalking Agent  关于 SkyWalking Agent 提供的插件列表,可以看看《SkyWalking 文档 —— 插件支持列表》。  因为艿艿是在本机测试,所以无需拷贝,SkyWalking Agent 目录是 /Users/yunai/skywalking/apache-skywalking-apm-bin-es7/agent/。\n考虑到方便胖友,艿艿这里提供了一个最简的 Spring Boot 应用 lab-39-demo-2.2.2.RELEASE.jar。对应 Github 仓库是 lab-39-demo。\n② 配置 Java 启动脚本\n# SkyWalking Agent 配置 export SW_AGENT_NAME=demo-application # 配置 Agent 名字。一般来说,我们直接使用 Spring Boot 项目的 `spring.application.name` 。 export SW_AGENT_COLLECTOR_BACKEND_SERVICES=127.0.0.1:11800 # 配置 Collector 地址。 export SW_AGENT_SPAN_LIMIT=2000 # 配置链路的最大 Span 数量。一般情况下,不需要配置,默认为 300 。主要考虑,有些新上 SkyWalking Agent 的项目,代码可能比较糟糕。 export JAVA_AGENT=-javaagent:/Users/yunai/skywalking/apache-skywalking-apm-bin-es7/agent/skywalking-agent.jar # SkyWalking Agent jar 地址。 # Jar 启动 java -jar $JAVA_AGENT -jar lab-39-demo-2.2.2.RELEASE.jar  通过环境变量,进行配置。 更多的变量,可以在 /work/programs/skywalking/apache-skywalking-apm-bin/agent/config/agent.config 查看。要注意,可能有些变量是被注释掉的,例如说 SW_AGENT_SPAN_LIMIT 对应的 agent.span_limit_per_segment 。  ③ 执行脚本:\n直接执行上述的 Shell 脚本,启动 Java 项目。在启动日志中,我们可以看到 SkyWalking Agent 被加载的日志。日志示例如下:\nDEBUG 2020-01-02 19:29:29:400 main AgentPackagePath : The beacon class location is jar:file:/Users/yunai/skywalking/apache-skywalking-apm-bin-es7/agent/skywalking-agent.jar!/org/apache/skywalking/apm/agent/core/boot/AgentPackagePath.class. INFO 2020-01-02 19:29:29:402 main SnifferConfigInitializer : Config file found in /Users/yunai/skywalking/apache-skywalking-apm-bin-es7/agent/config/agent.config. 同时,也可以在 /Users/yunai/skywalking/apache-skywalking-apm-bin-es7/agent/agent/logs/skywalking-api.log 查看对应的 SkyWalking Agent 日志。日志示例如下:\nDEBUG 2020-01-02 19:37:22:539 SkywalkingAgent-5-ServiceAndEndpointRegisterClient-0 ServiceAndEndpointRegisterClient : ServiceAndEndpointRegisterClient running, status:CONNECTED.  这里,我们看到 status:CONNECTED ,表示 SkyWalking Agent 连接 SkyWalking OAP 服务成功。  ④ 简单测试\n完事,可以去 SkyWalking UI 查看是否链路收集成功。\n1、首先,使用浏览器,访问下 http://127.0.0.1:8079/demo/echo 地址,请求下 Spring Boot 应用提供的 API。因为,我们要追踪下该链路。\n2、然后,继续使用浏览器,打开 http://127.0.0.1:8080/ 地址,进入 SkyWalking UI 界面。如下图所示:这里,我们会看到 SkyWalking 中非常重要的三个概念:\n  服务(Service) :表示对请求提供相同行为的一系列或一组工作负载。在使用 Agent 或 SDK 的时候,你可以定义服务的名字。如果不定义的话,SkyWalking 将会使用你在平台(例如说 Istio)上定义的名字。\n 这里,我们可以看到 Spring Boot 应用的服务为 \u0026quot;demo-application\u0026quot;,就是我们在环境变量 SW_AGENT_NAME 中所定义的。\n   服务实例(Service Instance) :上述的一组工作负载中的每一个工作负载称为一个实例。就像 Kubernetes 中的 pods 一样, 服务实例未必就是操作系统上的一个进程。但当你在使用 Agent 的时候, 一个服务实例实际就是操作系统上的一个真实进程。\n 这里,我们可以看到 Spring Boot 应用的服务为 {agent_name}-pid:{pid}@{hostname},由 Agent 自动生成。关于它,我们在「5.1 hostname」小节中,有进一步的讲解,胖友可以瞅瞅。\n   端点(Endpoint) :对于特定服务所接收的请求路径, 如 HTTP 的 URI 路径和 gRPC 服务的类名 + 方法签名。\n 这里,我们可以看到 Spring Boot 应用的一个端点,为 API 接口 /demo/echo。\n   3、之后,点击「拓扑图」菜单,进入查看拓扑图的界面。如下图所示:4、再之后,点击「追踪」菜单,进入查看链路数据的界面。如下图所示:2.3.2 IDEA 我们统一使用 IDEA 作为开发 IDE ,所以忽略 Eclipse 的配置方式。\n具体参考下图,比较简单:3. 搭建 SkyWalking 集群环境 在生产环境下,我们一般推荐搭建 SkyWalking 集群环境。😈 当然,如果公司比较抠门,也可以在生产环境下使用 SkyWalking 单机环境,毕竟 SkyWalking 挂了之后,不影响业务的正常运行。\n搭建一个 SkyWalking 集群环境,步骤如下:\n 第一步,搭建一个 Elasticsearch 服务的集群。 第二步,搭建一个注册中心的集群。目前 SkyWalking 支持 Zookeeper、Kubernetes、Consul、Nacos 作为注册中心。 第三步,搭建一个 SkyWalking OAP 服务的集群,同时参考《SkyWalking 文档 —— 集群管理》,将 SkyWalking OAP 服务注册到注册中心上。 第四步,启动一个 Spring Boot 应用,并配置 SkyWalking Agent。另外,在设置 SkyWaling Agent 的 SW_AGENT_COLLECTOR_BACKEND_SERVICES 地址时,需要设置多个 SkyWalking OAP 服务的地址数组。 第五步,搭建一个 SkyWalking UI 服务的集群,同时使用 Nginx 进行负载均衡。另外,在设置 SkyWalking UI 的 collector.ribbon.listOfServers 地址时,也需要设置多个 SkyWalking OAP 服务的地址数组。  😈 具体的搭建过程,并不复杂,胖友自己去尝试下。\n4. 告警 在 SkyWaling 中,已经提供了告警功能,具体可见《SkyWalking 文档 —— 告警》。\n默认情况下,SkyWalking 已经内置告警规则。同时,我们可以参考告警规则,进行自定义。\n在满足 SkyWalking 告警规则的触发规则时,我们在 SkyWaling UI 的告警界面,可以看到告警内容。如下图所示:同时,我们自定义 Webhook ,对接 SkyWalking 的告警请求。而具体的邮箱、钉钉等告警方式,需要自己进行开发。至于自定义 WebHook 如何实现,可以参考:\n Java 语言:  《基于 SkyWalking 的分布式跟踪系统 - 异常告警》   Go 语言:  dingding-notify-for-skywalking infra-skywalking-webhook    5. 注意事项 5.1 hostname 配置 在 SkyWalking 中,每个被监控的实例的名字,会包含 hostname 。格式为:{agent_name}-pid:{pid}@{hostname} ,例如说:\u0026quot;scrm-scheduler-pid:27629@iZbp1e2xlyvr7fh67qi59oZ\u0026quot; 。\n因为有些服务器未正确设置 hostname ,所以我们一定要去修改,不然都不知道是哪个服务器上的实例(😈 鬼知道 \u0026quot;iZbp1e2xlyvr7fh67qi59oZ\u0026quot; 一串是哪个服务器啊)。\n修改方式如下:\n1、修改 /etc/hosts 的 hostname :\n127.0.0.1 localhost ::1 localhost localhost.localdomain localhost6 localhost6.localdomain6 10.80.62.151 pre-app-01 # 就是这个,其中 10.80.62.151 是本机内网 IP ,pre-app-01 是 hostname 。 2、修改本机 hostname :\n参考 《CentOS7 修改主机名(hostname)》\n$ hostname pre-app-01 # 其中 pre-app-01 就是你希望的 hostname 。 $ hostnamectl set-hostname pre-app-01 # 其中 pre-app-01 就是你希望的 hostname 。 6. Spring Boot 使用示例 在 《芋道 Spring Boot 链路追踪 SkyWalking 入门》 中,我们来详细学习如何在 Spring Boot 中,整合并使用 SkyWalking 收集链路数据。😈 相比「2.5 SkyWaling Agent」来说,我们会提供更加丰富的示例哟。\n7. Spring Cloud 使用示例 在 《芋道 Spring Cloud 链路追踪 SkyWalking 入门》 中,我们来详细学习如何在 Spring Cloud 中,整合并使用 SkyWalking 收集链路数据。😈 相比「2.5 SkyWaling Agent」来说,我们会提供更加丰富的示例哟。\n666. 彩蛋 本文仅仅是简单的 SkyWalking 入门文章,如果胖友想要更好的使用 SkyWalking,推荐通读下《SkyWalking 文档》。\n想要进一步深入的胖友,也可以阅读如下资料:\n 《SkyWalking 源码解析》 《APM 巅峰对决:Apache Skywalking P.K. Pinpoint》 《SkyWalking 官方 —— 博客合集》  😈 最后弱弱的问一句,上完 SkyWaling 之后,有没发现自己系统各种地方慢慢慢!嘻嘻。\n","excerpt":"目录:\n 1. 概述 2. 搭建 SkyWalking 单机环境 3. 搭建 SkyWalking 集群环境 4. 告警 5. 注意事项 6. Spring Boot 使用示例 6. Spring …","ref":"/zh/2020-04-19-skywalking-quick-start/","title":"SkyWalking 极简入门"},{"body":"This post originally appears on The New Stack\nThis post introduces a way to automatically profile code in production with Apache SkyWalking. We believe the profile method helps reduce maintenance and overhead while increasing the precision in root cause analysis.\nLimitations of the Distributed Tracing In the early days, metrics and logging systems were the key solutions in monitoring platforms. With the adoption of microservice and distributed system-based architecture, distributed tracing has become more important. Distributed tracing provides relevant service context, such as system topology map and RPC parent-child relationships.\nSome claim that distributed tracing is the best way to discover the cause of performance issues in a distributed system. It’s good at finding issues at the RPC abstraction, or in the scope of components instrumented with spans. However, it isn’t that perfect.\nHave you been surprised to find a span duration longer than expected, but no insight into why? What should you do next? Some may think that the next step is to add more instrumentation, more spans into the trace, thinking that you would eventually find the root cause, with more data points. We’ll argue this is not a good option within a production environment. Here’s why:\n There is a risk of application overhead and system overload. Ad-hoc spans measure the performance of specific scopes or methods, but picking the right place can be difficult. To identify the precise cause, you can “instrument” (add spans to) many suspicious places. The additional instrumentation costs more CPU and memory in the production environment. Next, ad-hoc instrumentation that didn’t help is often forgotten, not deleted. This creates a valueless overhead load. In the worst case, excess instrumentation can cause performance problems in the production app or overload the tracing system. The process of ad-hoc (manual) instrumentation usually implies at least a restart. Trace instrumentation libraries, like Zipkin Brave, are integrated into many framework libraries. To instrument a method’s performance typically implies changing code, even if only an annotation. This implies a re-deploy. Even if you have the way to do auto instrumentation, like Apache SkyWalking, you still need to change the configuration and reboot the app. Otherwise, you take the risk of GC caused by hot dynamic instrumentation. Injecting instrumentation into an uninstrumented third party library is hard and complex. It takes more time and many won’t know how to do this. Usually, we don’t have code line numbers in the distributed tracing. Particularly when lambdas are in use, it can be difficult to identify the line of code associated with a span. Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.  Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.\nProfiling in Production Introduction To reuse distributed tracing to achieve method scope precision requires an understanding of the above limitations and a different approach. We called it PROFILE.\nMost high-level languages build and run on a thread concept. The profile approach takes continuous thread dumps. We merge the thread dumps to estimate the execution time of every method shown in the thread dumps. The key for distributed tracing is the tracing context, identifiers active (or current) for the profiled method. Using this trace context, we can weave data harvested from profiling into existing traces. This allows the system to automate otherwise ad-hoc instrumentation. Let’s dig deeper into how profiling works:\nWe consider a method invocation with the same stack depth and signature (method, line number etc), the same operation. We derive span timestamps from the thread dumps the same operation is in. Let’s put this visually:\nAbove, represents 10 successive thread dumps. If this method is in dumps 4-8, we assume it started before dump 4 and finished after dump 8. We can’t tell exactly when the method started and stopped. but the timestamps of thread dumps are close enough.\nTo reduce overhead caused by thread dumps, we only profile methods enclosed by a specific entry point, such as a URI or MVC Controller method. We identify these entry points through the trace context and the APM system.\nThe profile does thread dump analysis and gives us:\n The root cause, precise to the line number in the code. Reduced maintenance as ad-hoc instrumentation is obviated. Reduced overload risk caused by ad-hoc instrumentation. Dynamic activation: only when necessary and with a very clear profile target.  Implementing Precise Profiling with Apache SkyWalking 7 Distributed profiling is built-into Apache SkyWalking application performance monitoring (APM). Let’s demonstrate how the profiling approach locates the root cause of the performance issue.\nfinal CountDownLatchcountDownLatch= new CountDownLatch(2); threadPool.submit(new Task1(countDownLatch)); threadPool.submit(new Task2(countDownLatch)); try { countDownLatch.await(500, TimeUnit.MILLISECONDS); } catch (InterruptedExceptione) { } Task1 and Task2 have a race condition and unstable execution time: they will impact the performance of each other and anything calling them. While this code looks suspicious, it is representative of real life. People in the OPS/SRE team are not usually aware of all code changes and who did them. They only know something in the new code is causing a problem.\nTo make matters interesting, the above code is not always slow: it only happens when the condition is locked. In SkyWalking APM, we have metrics of endpoint p99/p95 latency, so, we are easy to find out the p99 of this endpoint is far from the avg response time. However, this is not the same as understanding the cause of the latency. To locate the root cause, add a profile condition to this endpoint: duration greater than 500ms. This means faster executions will not add profiling load.\nThis is a typical profiled trace segment (part of the whole distributed trace) shown on the SkyWalking UI. We now notice the “service/processWithThreadPool” span is slow as we expected, but why? This method is the one we added the faulty code to. As the UI shows that method, we know the profiler is working. Now, let’s see what the profile analysis result say.\nThis is the profile analysis stack view. We see the stack element names, duration (include/exclude the children) and slowest methods have been highlighted. It shows clearly, “sun.misc.Unsafe.park” costs the most time. If we look for the caller, it is the code we added: CountDownLatch.await.\nThe Limitations of the Profile Method No diagnostic tool can fit all cases, not even the profile method.\nThe first consideration is mistaking a repeatedly called method for a slow method. Thread dumps are periodic. If there is a loop of calling one method, the profile analysis result would say the target method is slow because it is captured every time in the dump process. There could be another reason. A method called many times can also end up captured in each thread dump. Even so, the profile did what it is designed for. It still helps the OPS/SRE team to locate the code having the issue.\nThe second consideration is overhead, the impact of repeated thread dumps is real and can’t be ignored. In SkyWalking, we set the profile dump period to at least 10ms. This means we can’t locate method performance issues if they complete in less than 10ms. SkyWalking has a threshold to control the maximum parallel degree as well.\nUnderstanding the above keeps distributed tracing and APM systems useful for your OPS/SRE team.\nHow to Try This Everything we discussed, including the Apache SkyWalking Java Agent, profile analysis code, and UI, could be found in our GitHub repository. We hope you enjoyed this new profile method, and love Apache SkyWalking. If so, give us a star on GitHub to encourage us.\nSkyWalking 7 has just been released. You can contact the project team through the following channels:\n Follow SkyWalking twitter. Subscribe mailing list: dev@skywalking.apache.org. Send to dev-subscribe@kywalking.apache.org to subscribe to the mail list.  Co-author Sheng Wu is a Tetrate founding engineer and the founder and VP of Apache SkyWalking. He is solving the problem of observability for large-scale service meshes in hybrid and multi-cloud environments.\nAdrian Cole works in the Spring Cloud team at VMware, mostly on Zipkin\nHan Liu is a tech expert at Lagou. He is an Apache SkyWalking committer\n","excerpt":"This post originally appears on The New Stack\nThis post introduces a way to automatically profile …","ref":"/blog/2020-04-13-apache-skywalking-profiling/","title":"Apache SkyWalking: Use Profiling to Fix the Blind Spot of Distributed Tracing"},{"body":"SkyWalking Chart 2.0.0 is released. Go to downloads page to find release tars.\n Support SkyWalking 7.0.0 Support set ES user/password Add CI for release  ","excerpt":"SkyWalking Chart 2.0.0 is released. Go to downloads page to find release tars.\n Support SkyWalking …","ref":"/events/release-apache-skywalking-chart-2-0-0-for-skywalking-7-0-0/","title":"Release Apache SkyWalking Chart 2.0.0 for SkyWalking 7.0.0"},{"body":"SkyWalking APM 7.0.0 is release. Go to downloads page to find release tars.\n Upgrade JDK minimal JDK requirement to JDK8 Support profiling code level performance Don\u0026rsquo;t support SkyWalking v5 agent in-wire and out-wire protocol. V6 is required.  ","excerpt":"SkyWalking APM 7.0.0 is release. Go to downloads page to find release tars.\n Upgrade JDK minimal JDK …","ref":"/events/release-apache-skywalking-apm-7-0-0/","title":"Release Apache SkyWalking APM 7.0.0"},{"body":"","excerpt":"","ref":"/zh_tags/agent/","title":"Agent"},{"body":"","excerpt":"","ref":"/zh_tags/java/","title":"Java"},{"body":"","excerpt":"","ref":"/zh_tags/profiling/","title":"Profiling"},{"body":" 作者:吴晟,刘晗 原文地址  在本文中,我们详细介绍了代码级的性能剖析方法,以及我们在 Apache SkyWalking 中的实践。希望能够帮助大家在线定位系统性能短板,缓解系统压力。\n分布式链路追踪的局限性 在传统的监控系统中,我们如果想要得知系统中的业务是否正常,会采用进程监控、日志收集分析等方式来对系统进行监控。当机器或者服务出现问题时,则会触发告警及时通知负责人。通过这种方式,我们可以得知具体哪些服务出现了问题。但是这时我们并不能得知具体的错误原因出在了哪里,开发人员或者运维人员需要到日志系统里面查看错误日志,甚至需要到真实的业务服务器上查看执行情况来解决问题。\n如此一来,仅仅是发现问题的阶段,可能就会耗费相当长的时间;另外,发现问题但是并不能追溯到问题产生具体原因的情况,也常有发生。这样反反复复极其耗费时间和精力,为此我们便有了基于分布式追踪的 APM 系统。\n通过将业务系统接入分布式追踪中,我们就像是给程序增加了一个放大镜功能,可以清晰看到真实业务请求的整体链路,包括请求时间、请求路径,甚至是操作数据库的语句都可以看得一清二楚。通过这种方式,我们结合告警便可以快速追踪到真实用户请求的完整链路信息,并且这些数据信息完全是持久化的,可以随时进行查询,复盘错误的原因。\n然而随着我们对服务监控理解的加深,我们发现事情并没有那么简单。在分布式链路追踪中我们有这样的两个流派:代码埋点和字节码增强。无论使用哪种方式,底层逻辑一定都逃不过面向切面这个基础逻辑。因为只有这样才可以做到大面积的使用。这也就决定了它只能做到框架级别和 RPC 粒度的监控。这时我们可能依旧会遇到程序执行缓慢或者响应时间不稳定等情况,但无法具体查询到原因。这时候,大家很自然的会考虑到增加埋点粒度,比如对所有的 Spring Bean 方法、甚至主要的业务层方法都加上埋点。但是这种思路会遇到不小的挑战:\n第一,增加埋点时系统开销大,埋点覆盖不够全面。通过这种方式我们确实可以做到具体业务场景具体分析。但随着业务不断迭代上线,弊端也很明显:大量的埋点无疑会加大系统资源的开销,造成 CPU、内存使用率增加,更有可能拖慢整个链路的执行效率。虽然每个埋点消耗的性能很小,在微秒级别,但是因为数量的增加,甚至因为业务代码重用造成重复埋点或者循环使用,此时的性能开销已经无法忽略。\n第二,动态埋点作为一项埋点技术,和手动埋点的性能消耗上十分类似,只是减少的代码修改量,但是因为通用技术的特别,上一个挑战中提到的循环埋点和重复使用的场景甚至更为严重。比如选择所有方法或者特定包下的所有方法埋点,很可能造成系统性能彻底崩溃。\n第三,即使我们通过合理设计和埋点,解决了上述问题,但是 JDK 函数是广泛使用的,我们很难限制对 JDK API 的使用场景。对 JDK 过多方法、特别是非 RPC 方法的监控会造成系统的巨大延迟风险。而且有一些基础类型和底层工具类,是很难通过字节码进行增强的。当我们的 SDK 使用不当或者出现 bug 时,我们无法具体得知真实的错误原因。\n代码级性能剖析方法 方法介绍 基于以上问题,在系统性能监控方法上,我们提出了代码级性能剖析这种在线诊断方法。这种方法基于一个高级语言编程模型共性,即使再复杂的系统,再复杂的业务逻辑,都是基于线程去进行执行的,而且多数逻辑是在单个线程状态下执行的。\n代码级性能剖析就是利用方法栈快照,并对方法执行情况进行分析和汇总。并结合有限的分布式追踪 span 上下文,对代码执行速度进行估算。\n性能剖析激活时,会对指定线程周期性的进行线程栈快照,并将所有的快照进行汇总分析,如果两个连续的快照含有同样的方法栈,则说明此栈中的方法大概率在这个时间间隔内都处于执行状态。从而,通过这种连续快照的时间间隔累加成为估算的方法执行时间。时间估算方法如下图所示:\n在上图中,d0-d10 代表 10 次连续的内存栈快照,实际方法执行时间在 d3-d4 区间,结束时间在 d8-d9 之间。性能剖析无法告诉你方法的准确执行时间,但是他会估算出方法执行时间为 d4-d8 的 4 个快照采集间隔时间之和,这已经是非常的精确的时间估算了。\n而这个过程因为不涉及代码埋点,所以自然性能消耗是稳定和可控的,也无需担心是否被埋点,是否是 JDK 方法等问题。同时,由于上层已经在分布式追踪之下,性能剖析方法可以明确地确定分析开始和结束时间,减少不必要的性能开销。\n性能剖析可以很好的对线程的堆栈信息进行监控,主要有以下几点优势:\n 精确的问题定位,直接到代码方法和代码行; 无需反复的增删埋点,大大减少了人力开发成本; 不用承担过多埋点对目标系统和监控系统的压力和性能风险; 按需使用,平时对系统无消耗,使用时的消耗稳定可能。  SkyWalking 实践实例 我们首先在 Apache SkyWalking APM 中实现此技术方法,下面我们就以一个真实的例子来说明此方法的执行效果。\nfinal CountDownLatchcountDownLatch= new CountDownLatch(2); threadPool.submit(new Task1(countDownLatch)); threadPool.submit(new Task2(countDownLatch)); try { countDownLatch.await(500, TimeUnit.MILLISECONDS); } catch (InterruptedExceptione) { } 这是我们故意加入的问题代码,我们使用 CountDownLanth 设置了两个任务完成后方法执行结束,Task1 和 Task2 是两个执行时间不稳定的任务,所以主任务也会执行速度不稳定。但对于运维和监控团队来说,很难定位到这个方法片段。\n针对于这种情况,我们看看性能剖析会怎样直接定位此问题。\n上图所示的就是我们在进行链路追踪时所看到的真实执行情况,其中我们可以看到在 service/processWithThreadPool 执行速度缓慢,这正是我们植入问题代码的方法。此时在这个调用中没有后续链路了,所以并没有更细致的原因,我们也不打算去 review 代码,从而增加新埋点。这时,我们可以对 HelloService 进行性能剖析,并执行只剖析响应速度大于 500 毫秒的请求。\n注意,指定特定响应时间的剖析是保证剖析有效性的重要特性,如果方法在平均响应时间上已经出现问题,往往通过分布式链路可以快速定位,因为此时链路总时间长,新埋点带来的性能影响相对可控。但是方法性能抖动是不容易用新增埋点来解决的,而且往往只发生在生产环境。\n上图就是我们进行性能剖析后的真实结果图。从左到右分别表示:栈帧名称、该栈帧总计耗时(包含其下面所有自栈帧)、当前栈帧自身耗时和监控次数。我们可以在最后一行看到,线程卡在了 sun.misc.Unsafe.park 中了。如果你熟悉 Java 就可以知道此时进行了锁等待,我们继续按照树的结构向上推,便可以看到线程真正是卡在了 CountDownLatch.await 方法中。\n方法局限性 当然任何的方法都不是万能的,性能剖析也有一些局限性。\n第一, 对于高频反复执行的方法,如循环调用,可能会误报为缓慢方法。但这并不是大问题,因为如果反复执行的耗时较长,必然是系统需要关注的性能瓶颈。\n第二, 由于性能栈快照有一定的性能消耗,所以采集周期不宜过密,如 SkyWalking 实践中,不支持小于 10ms 的采集间隔。所以如果问题方法执行时间过小(比如在 10 毫秒内波动),此方法并不适用。我们也再此强调,方法论和工具的强大,始终不能代替程序员。\n","excerpt":"作者:吴晟,刘晗 原文地址  在本文中,我们详细介绍了代码级的性能剖析方法,以及我们在 Apache SkyWalking 中的实践。希望能够帮助大家在线定位系统性能短板,缓解系统压力。\n分布式链路追 …","ref":"/zh/2020-03-23-using-profiling-to-fix-the-blind-spot-of-distributed-tracing/","title":"在线代码级性能剖析,补全分布式追踪的最后一块“短板”"},{"body":"SkyWalking CLI 0.2.0 is released. Go to downloads page to find release tars.\n Support visualization of heat map Support top N entities, swctl metrics top 5 --name service_sla Support thermodynamic metrics, swctl metrics thermodynamic --name all_heatmap Support multiple linear metrics, swctl --display=graph --debug metrics multiple-linear --name all_percentile  ","excerpt":"SkyWalking CLI 0.2.0 is released. Go to downloads page to find release tars.\n Support visualization …","ref":"/events/release-apache-skywalking-cli-0-2-0/","title":"Release Apache SkyWalking CLI 0.2.0"},{"body":"SkyWalking Chart 1.1.0 is released. Go to downloads page to find release tars.\n Support SkyWalking 6.6.0 Support deploy Elasticsearch 7 The official helm repo was changed to the official Elasticsearch repo (https://helm.elastic.co/)  ","excerpt":"SkyWalking Chart 1.1.0 is released. Go to downloads page to find release tars.\n Support SkyWalking …","ref":"/events/release-apache-skywalking-chart-1-1-0-for-skywalking-6-6-0/","title":"Release Apache SkyWalking Chart 1.1.0 for SkyWalking 6.6.0"},{"body":"Support tracing and collect metrics from Nginx server. Require SkyWalking APM 7.0+.\n","excerpt":"Support tracing and collect metrics from Nginx server. Require SkyWalking APM 7.0+.","ref":"/events/skywalking-nginx-lua-0-1-0-release/","title":"SkyWalking Nginx LUA 0.1.0 release"},{"body":"Based on his continuous contributions, Ming Wen (a.k.a moonming) has been voted as a new committer.\n","excerpt":"Based on his continuous contributions, Ming Wen (a.k.a moonming) has been voted as a new committer.","ref":"/events/welcome-ming-wen-as-new-committer/","title":"Welcome Ming Wen as new committer"},{"body":"Based on his continuous contributions, Haochao Zhuang (a.k.a dmsolr) has been invited to join the PMC. Welcome aboard.\n","excerpt":"Based on his continuous contributions, Haochao Zhuang (a.k.a dmsolr) has been invited to join the …","ref":"/events/welcome-haochao-zhuang-to-join-the-pmc/","title":"Welcome Haochao Zhuang to join the PMC"},{"body":"Based on his continuous contributions, Zhusheng Xu (a.k.a aderm) has been voted as a new committer.\n","excerpt":"Based on his continuous contributions, Zhusheng Xu (a.k.a aderm) has been voted as a new committer.","ref":"/events/welcome-zhusheng-xu-as-new-committer/","title":"Welcome Zhusheng Xu as new committer"},{"body":"Based on his continuous contributions, Han Liu (a.k.a mrproliu) has been voted as a new committer.\n","excerpt":"Based on his continuous contributions, Han Liu (a.k.a mrproliu) has been voted as a new committer.","ref":"/events/welcome-han-liu-as-new-committer/","title":"Welcome Han Liu as new committer"},{"body":" Author: Wu Sheng, tetrate.io, SkyWalking original creator, SkyWalking V.P. GitHub, Twitter, Linkedin  The SkyWalking project provides distributed tracing, topology map analysis, service mesh telemetry analysis, metrics analysis and a super cool visualization targeting distributed systems in k8s or traditional VM deployments.\nThe project is widely used in Alibaba, Huawei, Tencent, DiDi, xiaomi, Pingan, China’s top 3 telecom companies (China Mobile, China telecom, China Unicom), airlines, banks and more. It has over 140 company users listed on our powered by page.\nToday, we welcome and celebrate reaching 200 code contributors on our main repo. We hereby mark this milestone as official today, : Jan. 20th 2020.\nAt this great moment, I would like to share SkyWalking’s 4-year open source journey.\nI wrote the first line on Nov. 1st, 2015, guiding people to understand a distributed system just as micro-services and distributed architecture were becoming popular. In the first 2 years, I never thought it would become such a big and active community. I didn’t even expect it would be an open source project. Initially, the goal was primarily to teach others about distributed tracing and analysis.\nIt was a typical open source project in obscurity in its first two years. But people still showed up, asked questions, and tried to improve the project. I got several invitations to share the project at local meetups.All these made me realize people really needed a good open source APM project.\nIn 2017, I decided to dedicate myself as much as possible to make the project successful, and it became my day job. To be honest, I had no clue about how to do that; at that time in China, it was rare to have this kind of job. So, I began to ask friends around me, “Do you want to collaborate on the open source APM with me?” Most people were busy and gave a clear NO, but two of them agreed to help: Xin Zhang and Yongsheng Peng. We built SkyWalking 3.x and shared the 3.2 release at GOPS Shanghai, China.\nIt became the first adoption version used in production\nCompared to today\u0026rsquo;s SkyWalking, it was a toy prototype, but it had the same tracing design, protocol and analysis method.\nThat year the contributor team was 15-20, and the project had obvious potential to expand. I began to consider bringing the project into a worldwide, top-level open source foundation. Thanks to our initial incubator mentors, Michael Semb Wever, William Jiang, and Luke Han, this really worked. At the end of 2017, SkyWalking joined the Apache Incubator, and kept following the Apache Way to build community. More contributors joined the community.\nWith more people spending time on the project collaborations, including codes, tests, blogs, conference talks, books and uses of the project, a chemical reaction happens. New developers begin to provide bug fixes, new feature requirements and new proposals. At the moment of graduation in spring 2019, the project had 100 contributors. Now, only 9 months later, it’s surged to 200 super quickly. They enhance the project and extend it to frontiers we never imaged: 5 popular language agents, service mesh adoption, CLI tool, super cool visualization. We are even moving on thread profiling, browser performance and Nginx tracing NOW.\nOver the whole 4+ years open source journey, we have had supports from leaders in the tracing open source community around the world, including Adrian Cole, William Jiang, Luke Han, Michael Semb Wever, Ben Sigelman, and Jonah Kowall. And we’ve had critical foundations' help, especially Apache Software Foundation and the Cloud Native Computing Foundation.\nOur contributors also have their support from their employers, including, to the best of my knowledge, Alibaba, Huawei, China Mobile, ke.com, DaoCloud, Lizhi.fm, Yonghui Supermarket, and dangdang.com. I also have support from my employers, tetrate.io, Huawei, and OneAPM.\nThanks to our 200+ contributors and the companies behind them. You make this magic happen.\n","excerpt":"Author: Wu Sheng, tetrate.io, SkyWalking original creator, SkyWalking V.P. GitHub, Twitter, Linkedin …","ref":"/blog/2020-01-20-celebrate-200th-contributor/","title":"SkyWalking hits 200 contributors mark"},{"body":"Based on his continuous contributions, Hongwei Zhai (a.k.a innerpeacez) has been invited to join the PMC. Welcome aboard.\n","excerpt":"Based on his continuous contributions, Hongwei Zhai (a.k.a innerpeacez) has been invited to join the …","ref":"/events/welcome-hongwei-zhai-to-join-the-pmc/","title":"Welcome Hongwei Zhai to join the PMC"},{"body":"Apache APM 6.6.0 release. Go to downloads page to find release tars.\n Service Instance dependency detection are available. Support ElasticSearch 7 as a storage option. Reduce the register load.  ","excerpt":"Apache APM 6.6.0 release. Go to downloads page to find release tars.\n Service Instance dependency …","ref":"/events/release-apache-skywalking-apm-6-6-0/","title":"Release Apache SkyWalking APM 6.6.0"},{"body":"SkyWalking Chart 1.0.0 is released. Go to downloads page to find release tars.\n Deploy SkyWalking 6.5.0 by Chart. Elasticsearch deploy optional.  ","excerpt":"SkyWalking Chart 1.0.0 is released. Go to downloads page to find release tars.\n Deploy SkyWalking …","ref":"/events/release-apache-skywalking-chart-1-0-0-for-skywalking-6-5-0/","title":"Release Apache SkyWalking Chart 1.0.0 for SkyWalking 6.5.0"},{"body":"SkyWalking CLI 0.1.0 is released. Go to downloads page to find release tars.\n Add command swctl service to list services Add command swctl instance and swctl search to list and search instances of service. Add command swctl endpoint to list endpoints of service. Add command swctl linear-metrics to query linear metrics and plot the metrics in Ascii Graph mode. Add command swctl single-metrics to query single-value metrics.  ","excerpt":"SkyWalking CLI 0.1.0 is released. Go to downloads page to find release tars.\n Add command swctl …","ref":"/events/release-apache-skywalking-cli-0-1-0/","title":"Release Apache SkyWalking CLI 0.1.0"},{"body":"Based on his continuous contributions, Weiyi Liu (a.k.a wayilau) has been voted as a new committer.\n","excerpt":"Based on his continuous contributions, Weiyi Liu (a.k.a wayilau) has been voted as a new committer.","ref":"/events/welcome-weiyi-liu-as-new-committer/","title":"Welcome Weiyi Liu as new committer"},{"body":"Based on his contributions to the project, he has been accepted as SkyWalking committer. Welcome aboard.\n","excerpt":"Based on his contributions to the project, he has been accepted as SkyWalking committer. Welcome …","ref":"/events/welcome-lang-li-as-a-new-committer/","title":"Welcome Lang Li as a new committer"},{"body":"Based on her continuous contributions, Qiuxia Fan (a.k.a Fine0830) has been voted as a new committer.\n","excerpt":"Based on her continuous contributions, Qiuxia Fan (a.k.a Fine0830) has been voted as a new …","ref":"/events/welcome-qiuxia-fan-as-new-committer/","title":"Welcome Qiuxia Fan as new committer"},{"body":"6.5.0 release. Go to downloads page to find release tars.\n New metrics comparison view in UI. Dynamic Alert setting supported. JDK9-12 supported in backend.  ","excerpt":"6.5.0 release. Go to downloads page to find release tars.\n New metrics comparison view in UI. …","ref":"/events/release-apache-skywalking-apm-6-5-0/","title":"Release Apache SkyWalking APM 6.5.0"},{"body":"Based on his continuous contributions, Wei Zhang (a.k.a arugal) has been voted as a new committer.\n","excerpt":"Based on his continuous contributions, Wei Zhang (a.k.a arugal) has been voted as a new committer.","ref":"/events/welcome-wei-zhang-as-new-committer/","title":"Welcome Wei Zhang as new committer"},{"body":"PS:本文仅仅是在我的测试环境实验过,如果有问题,请自行优化调整\n前记:记得skywlking还是6.0版本的时候我就在试用,当时是skywalking基本在两三天左右就会监控数据完全查不出来,elasticsearch日志报错,由于当时也算是初用es,主要用来日志收集,并且时间有限,没有继续深入研究,最近空闲,更新到最新的6.5.0(开发版本)还是会出现同样的问题,下定决心解决下,于是有了本文的浅知拙见\n本次调优环境 skywalking: 6.5.0 elasticsearch:6.3.2(下文用es代替)\n调优过程   当然是百度了,百度后其实翻来翻去就找到一个相关的文章https://my.oschina.net/keking/blog/3025303 ,参考之。\n  调整skywalking的这两个参数试试 bulkActions: 4000 # Execute the bulk every 2000 requests  bulkSize: 60 # flush the bulk every 20mb 然后es还是继续挂,继续频繁的重启\n  继续看这个文章,发现了另外一篇https://www.easyice.cn/archives/207 ,继续参考之\n  这篇文章发现每一个字我都认识,看起来也能懂,但是对于es小白的我来说,着实不知道怎么调整这些参数,姑且先加到es的配置文件里边试试看吧,于是就加了,然后重启es的时候说发现index参数配置,自从5.0之后就不支持这样配置了,还给调了个es的接口去设置,但是设置失败(真够不错的),朝着这个思路去百度,百度到快放弃,后来就寻思,再试试看吧,(百度的结果是知道了index有静态参数和动态参数,动态的参数是可以随时设置,静态的只能创建或者关闭状态的索引才可以设置) 然鹅并不知道怎么关闭索引,继续百度,(怎么全特么百度,好吧不百度了,直接来干货)\n 关闭索引(我的skywalking索引命名空间是dry_trace) curl -XPOST \u0026quot;http://localhost:9200/dry_trace*/_close\u0026quot; 设置参数 curl -XPUT 'http://localhost:9200/dry_trace*/_settings?preserve_existing=true' -H 'Content-type:application/json' -d '{ \u0026quot;index.refresh_interval\u0026quot; : \u0026quot;10s\u0026quot;, \u0026quot;index.translog.durability\u0026quot; : \u0026quot;async\u0026quot;, \u0026quot;index.translog.flush_threshold_size\u0026quot; : \u0026quot;1024mb\u0026quot;, \u0026quot;index.translog.sync_interval\u0026quot; : \u0026quot;120s\u0026quot; }'  打开索引 curl -XPOST \u0026quot;http://localhost:9200/dry_trace*/_open\u0026quot;    还有一点,第四步的方式只适用于现有的索引设置,那么新的索引设置呢,总不能每天重复下第四步吧。当然不需要,来干货 首先登陆kinaba控制台找到开发工具 贴入以下代码\n   PUT /_template/dry_trace_tmp { \u0026quot;index_patterns\u0026quot;: \u0026quot;dry_trace*\u0026quot;, \u0026quot;order\u0026quot;: 1, \u0026quot;settings\u0026quot;: { \u0026quot;index\u0026quot;: { \u0026quot;refresh_interval\u0026quot;: \u0026quot;30s\u0026quot;, \u0026quot;translog\u0026quot;: { \u0026quot;flush_threshold_size\u0026quot;: \u0026quot;1GB\u0026quot;, \u0026quot;sync_interval\u0026quot;: \u0026quot;60s\u0026quot;, \u0026quot;durability\u0026quot;: \u0026quot;async\u0026quot; } } } } 截止目前为止运行一周,还未发现挂掉,一切看起来正常   完结\u0026mdash; 于 2019年11月\n","excerpt":"PS:本文仅仅是在我的测试环境实验过,如果有问题,请自行优化调整\n前记:记得skywlking还是6.0版本的时候我就在试用,当时是skywalking基本在两三天左右就会监控数据完全查不出 …","ref":"/zh/2019-11-07-skywalking-elasticsearch-storage-optimization/","title":"SkyWalking 使用 ElasticSearch 存储的优化"},{"body":"Based on his continuous contributions, Haochao Zhuang (a.k.a dmsolr) has been voted as a new committer.\n","excerpt":"Based on his continuous contributions, Haochao Zhuang (a.k.a dmsolr) has been voted as a new …","ref":"/events/welcome-haochao-zhuang-as-new-committer/","title":"Welcome Haochao Zhuang as new committer"},{"body":" 作者:innerpeacez 原文地址  本文主要讲述的是如何使用 Helm Charts 将 SkyWalking 部署到 Kubernetes 集群中,相关文档可以参考skywalking-kubernetes 和 backend-k8s 文档 。\n目前推荐的四种方式:\n 使用 helm 2 提供的 helm serve 启动本地 helm repo 使用本地 chart 文件部署 使用 harbor 提供的 repo 功能 直接从官方 repo 进行部署  注意:目前 skywalking 的 chart 还没有提交到官方仓库,请先参照前三种方式进行部署\nHelm 2 提供的 helm serve 打包对应版本的 skywalking chart 1.配置 helm 环境,参考 Helm 环境配置 ,如果你要部署 helm2 相关 chart 可以直接配置 helm2 的相关环境\n2.克隆/下载ZIP skywalking-kubernetes 这个仓库,仓库关于chart的目录结构如下\n helm-chart\n helm2  6.0.0-GA 6.1.0   helm3  6.3.0 6.4.0     克隆/下载ZIP 完成后进入指定目录打包对应版本的chart\ncd skywalking-kubernetes/helm-chart/\u0026lt;helm-version\u0026gt;/\u0026lt;skywalking-version\u0026gt; 注意:helm-version 为对应的 helm 版本目录,skywalking-version 为对应的 skywalking 版本目录,下面以helm3 和 skywalking 6.3.0 为例\ncd skywalking-kubernetes/helm-chart/helm3/6.3.0 3.由于skywalking 依赖 elasticsearch 作为存储库,执行以下命令更新依赖,默认会从官方repo进行拉取\nhelm dep up skywalking  Hang tight while we grab the latest from your chart repositories\u0026hellip; \u0026hellip;Successfully got an update from the \u0026ldquo;stable\u0026rdquo; chart repository Update Complete. ⎈Happy Helming!⎈ Saving 1 charts Downloading elasticsearch from repo https://kubernetes-charts.storage.googleapis.com/ Deleting outdated charts\n 如果官方 repo 不存在,请先添加官方仓库\nhelm repo add stable https://kubernetes-charts.storage.googleapis.com  \u0026ldquo;stable\u0026rdquo; has been added to your repositories\n 4.打包 skywalking , 执行以下命令\nhelm package skywalking/  Successfully packaged chart and saved it to: C:\\code\\innerpeacez_github\\skywalking-kubernetes\\helm-chart\\helm3\\6.3.0\\skywalking-0.1.0.tgz\n 打包完成后会在当前目录的同级目录生成 .tgz 文件\n ls  skywalking/ skywalking-0.1.0.tgz\n 启动 helm serve 由于上文配置的 helm 为 helm3 ,但是 helm 3中移除了 helm serve 的相关命令,所以需要另外一个环境配置helm2 的相关环境,下载 helm 2.14.3 的二进制文件,配置基本上没有大的差别,不在赘述\n初始化 helm\nhelm init 将上文生成的 skywalking-0.1.0.tgz 文件复制到 helm 相关目录 /root/.helm/repository/local,启动 serve\nhelm serve --address \u0026lt;ip\u0026gt;:8879 --repo-path /root/.helm/repository/local 注意: ip 为要能够被上文配置 helm 3 环境的机器访问到\n可以访问一下看看服务 serve 是否启动成功\ncurl ip:8879 部署 skywalking 1.在helm3 环境中添加启动的本地 repo\nhelm repo add local http://\u0026lt;ip\u0026gt;:8879 2.查看 skywalking chart 是否存在于本地仓库中\nhelm search skywalking  NAME CHART VERSION\tAPP VERSION\tDESCRIPTION local/skywalking 0.1.0 6.3.0 Apache SkyWalking APM System\n 3.部署\nhelm -n test install skywalking local/skywalking 这样 skywalking 就部署到了 k8s 集群中的 test 命名空间了,至此本地安装skywalking 就完成了。\n本地文件部署 如果你不想存储到 chart 到仓库中也可以直接使用本地文件部署 skywalking,按照上面的步骤将skywalking chart 打包完成之后,直接使用以下命令进行部署\nhelm -n test install skywalking skywalking-0.1.0.tgz harbor 作为 repo 存储 charts harbor 目前已经提供了,charts repo 的能力,这样就可以将 docker 镜像和 chart 存储在一个仓库中了,方便维护,具体harbor 的部署方法参考 Harbor 作为存储仓库存储 chart\n官方 repo 部署 目前没有发布到官方 repo 中,后续发布完成后,只需要执行下面命令即可\nhelm install -n test stable/skywalking 总结 四种方式都可以进行部署,如果你想要自定义 chart ,需要使用上述两种本地方法及 harbor 存储的方式,以便你修改好 chart 之后进行部署.\n","excerpt":"作者:innerpeacez 原文地址  本文主要讲述的是如何使用 Helm Charts 将 SkyWalking 部署到 Kubernetes 集群中,相关文档可以参 …","ref":"/zh/2019-10-08-how-to-use-sw-chart/","title":"使用 chart 部署 SkyWalking"},{"body":" Author: Wei Qiang GitHub  Background SkyWalking backend provides the alarm function, we can define some Alarm rules, call webhook after the rule is triggered. I share my implementation\nDemonstration SkyWalking alarm UI\ndingtalk message body\nIntroduction  install  go get -u github.com/weiqiang333/infra-skywalking-webhook cd $GOPATH/src/github.com/weiqiang333/infra-skywalking-webhook/ bash build/build.sh ./bin/infra-skywalking-webhook help  Configuration  main configs file:configs/production.ymldingtalk:p3:token... Example  ./bin/infra-skywalking-webhook --config configs/production.yml --address 0.0.0.0:8000  SkyWalking backend alarm settings  webhooks:- http://127.0.0.1:8000/dingtalkCollaboration Hope that we can improve together webhook\nSkyWalking alarm rules may add more metric names (eg priority name), we can send different channels by locating different levels of alerts (dingtalk / SMS / phone)\nThanks.\n","excerpt":"Author: Wei Qiang GitHub  Background SkyWalking backend provides the alarm function, we can define …","ref":"/blog/2019-09-25-alarm-webhook-share/","title":"SkyWalking alarm webhook sharing"},{"body":"作者: SkyWalking committer,Kdump\n本文介绍申请Apache SkyWalking Committer流程, 流程包括以下步骤\n 与PMC成员表达想成为committer的意愿(主动/被动) PMC内部投票 PMC正式邮件邀请 填写Apache iCLA申请表 设置ApacheID和邮箱 设置GitHub加入Apache组织 GitHub其它一些不重要设置  前期过程  与PMC成员表达想成为committer的意愿(主动/被动) PMC内部投票  当你对项目的贡献活跃度足够高或足够多时, Skywalking项目的PMC(项目管理委员会)会找到你并询问你是否有意愿成为项目的Committer, 或者也可以主动联系项目的PMC表达自己的意向, 在此之后PMC们会进行内部讨论和投票并告知你是否可以进入下一个环节.这个过程可能需要一周. 如果PMC主动邀请你进行非正式的意愿咨询, 你可以选择接受或拒绝.\nPS:PMC会向你索要你的个人邮箱, 建议提供Gmail, 因为后期绑定Apache邮箱需要用到, 其它邮箱我不确定是否能绑定.\nPS:从Apache官方的流程来讲, 现有的PMC会在没有通知候选人的情况下先进行候选人投票, 但是Skywalking项目的PMC有可能更倾向于先得到候选人的意愿再进行投票.\n正式阶段   PMC正式邮件邀请\n 当你收到PMC正式的邀请邮件时, 恭喜你, 你已经通过了PMC的内部投票, 你需要用英文回答接受邀请或者拒绝邀请, 记住回复的时候一定要选择全部回复.    填写Apache iCLA申请表\n  在你收到的PMC邮件中, 有几个ASF官方链接需要你去浏览, 重点的内容是查看CLAs, 并填写Individual Contributor License Agreement, 你可以将icla.pdf文件下载到本地, 使用PDF工具填写里面所需的信息, 并打印出来签名(一定要手写签名, 否则会被要求重新签名), 再扫描(或手机拍照)成电子文档(需要回复PDF格式, 文件名建议重命名为你的名字-icla.pdf), 使用gpg对电子文档进行签名(参考[HOW-TO: SUBMITTING LICENSE AGREEMENTS AND GRANTS\n](http://www.apache.org/licenses/contributor-agreements.html#submitting)), Window可以使用GnuPG或者Gpg4win.\n  完成gpg签名后, 请将你签名用的公钥上送到pool.sks-keyservers.net服务器, 并在这个页面中验证你的公钥是否可以被搜索到, 搜索关键词可以是你秘钥中填写的名字或者邮箱地址.\n  gpg签名后, 会生成.pdf.asc的文件, 需要将你的你的名字-icla.pdf和你的名字-icla.pdf.asc以附件的方式一起发送到secretary@apache.org, 并抄送给private@skywalking.apache.org.\n    设置ApacheID和邮箱\n 大概5个工作日内, 你会收到一封来至于root@apache.org的邮件, 主题为Welcome to the Apache Software Foundation (ASF)!, 恭喜你, 你已经获得了ApacheID, 这时候你需要根据邮件内容的提示去设置你的ApacheID密码, 密码设置完成后, 需要在Apache Account Utility页面中重点设置Forwarding email address和Your GitHub Username两个信息.保存信息的时候需要你填写当前的ApacheID的密码. 现在进入Gmail, 选择右上角的齿轮-\u0026gt;设置-\u0026gt;账号和导入-\u0026gt;添加其他电子邮件地址-\u0026gt;参考Sending email from your apache.org email address给出的信息根据向导填写Apache邮箱.    设置GitHub加入Apache组织\n 进入Welcome to the GitBox Account Linking Utility!, 按照顺序将Apache Account和GitHub Account点绿, 想点绿MFA Status, 需要去GitHub开启2FA, 请参考配置双重身份验证完成2FA的功能. 等待1~2小时后登陆自己的GitHub的dashboard界面, 你应该会看到一条Apache组织邀请你加入的通知, 这个时候接受即可享有Skywalking相关GitHub项目权限了.    其它提示  GitHub其它一些不重要设置  在GitHub首页展示Apache组织的logo: 进入Apache GitHub组织-\u0026gt;People-\u0026gt;搜索自己的GitHubID-\u0026gt;将Private改成Public    ","excerpt":"作者: SkyWalking committer,Kdump\n本文介绍申请Apache SkyWalking Committer流程, 流程包括以下步骤\n 与PMC成员表达想成为committer的意 …","ref":"/zh/2019-09-12-apache-skywalking-committer-apply-process/","title":"Apache SkyWalking Committer申请流程"},{"body":"Based on his contributions to the skywalking ui project, Weijie Zou (a.k.a Kdump) has been accepted as a new committer.\n","excerpt":"Based on his contributions to the skywalking ui project, Weijie Zou (a.k.a Kdump) has been accepted …","ref":"/events/welcome-weijie-zou-as-a-new-committer/","title":"Welcome Weijie Zou as a new committer"},{"body":"6.4.0 release. Go to downloads page to find release tars.\n Highly recommend to upgrade due to Pxx metrics calculation bug. Make agent working in JDK9+ Module system.  Read changelog for the details.\n","excerpt":"6.4.0 release. Go to downloads page to find release tars.\n Highly recommend to upgrade due to Pxx …","ref":"/events/release-apache-skywalking-apm-6-4-0/","title":"Release Apache SkyWalking APM 6.4.0"},{"body":"  作者:innerpeacez 原文地址   如果你还不知道 Skywalking agent 是什么,请点击这里查看 Probe 或者这里查看快速了解agent,由于我这边大部分都是 JAVA 服务,所以下文以 Java 中使用 agent 为例,提供了以下三种方式供你选择\n三种方式:  使用官方提供的基础镜像 将 agent 包构建到已经存在的基础镜像中 sidecar 模式挂载 agent  1.使用官方提供的基础镜像 查看官方 docker hub 提供的基础镜像,只需要在你构建服务镜像是 From 这个镜像即可,直接集成到 Jenkins 中可以更加方便\n2.将 agent 包构建到已经存在的基础镜像中 提供这种方式的原因是:官方的镜像属于精简镜像,并且是 openjdk ,可能很多命令没有,需要自己二次安装,以下是我构建的过程\n  下载 oracle jdk\n这个现在 oracle 有点恶心了,wget 各种不行,然后我放弃了,直接从官网下载了\n  下载 skywalking 官方发行包,并解压(以6.3.0为例)\nwget https://www.apache.org/dyn/closer.cgi/skywalking/6.3.0/apache-skywalking-apm-6.3.0.tar.gz \u0026amp;\u0026amp; tar -zxvf apache-skywalking-apm-6.3.0.tar.gz   通过以下 dockerfile 构建基础镜像\nFROMalpine:3.8  ENV LANG=C.UTF-8 RUN set -eux \u0026amp;\u0026amp; \\  apk update \u0026amp;\u0026amp; apk upgrade \u0026amp;\u0026amp; \\  wget -q -O /etc/apk/keys/sgerrand.rsa.pub https://alpine-pkgs.sgerrand.com/sgerrand.rsa.pub \u0026amp;\u0026amp;\\  wget https://github.com/sgerrand/alpine-pkg-glibc/releases/download/2.30-r0/glibc-2.30-r0.apk \u0026amp;\u0026amp;\\  apk --no-cache add unzip vim curl git bash ca-certificates glibc-2.30-r0.apk file \u0026amp;\u0026amp; \\  rm -rf /var/lib/apk/* \u0026amp;\u0026amp;\\  mkdir -p /usr/skywalking/agent/ # A streamlined jreADD jdk1.8.0_221/ /usr/java/jdk1.8.0_221/ADD apache-skywalking-apm-bin/agent/ /usr/skywalking/agent/ # set envENV JAVA_HOME /usr/java/jdk1.8.0_221ENV PATH ${PATH}:${JAVA_HOME}/bin # run container with base path:/WORKDIR/ CMD bash  这里由于 alpine 是基于mini lib 的,但是 java 需要 glibc ,所以加入了 glibc 相关的东西,最后构建出的镜像大小在 490M 左右,因为加了挺多命令还是有点大,仅供参考,同样构建出的镜像也可以直接配置到 jenkins 中。\n3.sidecar 模式挂载 agent 如果你们的服务是部署在 Kubernetes 中,你还可以使用这种方式来使用 Skywalking Agent ,这种方式的好处在与不需要修改原来的基础镜像,也不用重新构建新的服务镜像,而是以sidecar 模式,通过共享volume的方式将agent 所需的相关文件挂载到已经存在的服务镜像中\n构建 skywalking agent sidecar 镜像的方法\n  下载skywalking 官方发行包,并解压\nwget https://www.apache.org/dyn/closer.cgi/skywalking/6.3.0/apache-skywalking-apm-6.3.0.tar.gz \u0026amp;\u0026amp; tar -zxvf apache-skywalking-apm-6.3.0.tar.gz   通过以下 dockerfile 进行构建\nFROMbusybox:latest  ENV LANG=C.UTF-8 RUN set -eux \u0026amp;\u0026amp; mkdir -p /usr/skywalking/agent/ ADD apache-skywalking-apm-bin/agent/ /usr/skywalking/agent/ WORKDIR/  注意:这里我没有在dockerfile中下载skywalking 发行包是因为保证构建出的 sidecar 镜像保持最小,bosybox 只有700 k左右,加上 agent 最后大小小于20M\n如何使用 sidecar 呢?\napiVersion:apps/v1kind:Deploymentmetadata:labels:name:demo-swname:demo-swspec:replicas:1selector:matchLabels:name:demo-swtemplate:metadata:labels:name:demo-swspec:initContainers:- image:innerpeacez/sw-agent-sidecar:latestname:sw-agent-sidecarimagePullPolicy:IfNotPresentcommand:[\u0026#39;sh\u0026#39;]args:[\u0026#39;-c\u0026#39;,\u0026#39;mkdir -p /skywalking/agent \u0026amp;\u0026amp; cp -r /usr/skywalking/agent/* /skywalking/agent\u0026#39;]volumeMounts:- mountPath:/skywalking/agentname:sw-agentcontainers:- image:nginx:1.7.9name:nginxvolumeMounts:- mountPath:/usr/skywalking/agentname:sw-agentports:- containerPort:80volumes:- name:sw-agentemptyDir:{}以上是挂载 sidecar 的 deployment.yaml 文件,以nginx 作为服务为例,主要是通过共享 volume 的方式挂载 agent,首先 initContainers 通过 sw-agent 卷挂载了 sw-agent-sidecar 中的 /skywalking/agent ,并且将上面构建好的镜像中的 agent 目录 cp 到了 /skywalking/agent 目录,完成之后 nginx 启动时也挂载了 sw-agent 卷,并将其挂载到了容器的 /usr/skywalking/agent 目录,这样就完成了共享过程。\n总结 这样除去 ServiceMesh 以外,我能想到的方式就介绍完了,希望可以帮助到你。最后给 Skywalking 一个 Star 吧,国人的骄傲。\n","excerpt":"作者:innerpeacez 原文地址   如果你还不知道 Skywalking agent 是什么,请点击这里查看 Probe 或者这里查看快速了解agent,由于我这边大部分都是 JAVA 服务, …","ref":"/zh/2019-08-30-how-to-use-skywalking-agent/","title":"如何使用 SkyWalking Agent ?"},{"body":"Based on his continuous contributions, Yuguang Zhao (a.k.a zhaoyuguang) has been invited to join the PMC. Welcome aboard.\n","excerpt":"Based on his continuous contributions, Yuguang Zhao (a.k.a zhaoyuguang) has been invited to join the …","ref":"/events/welcome-yuguang-zhao-to-join-the-pmc/","title":"Welcome Yuguang Zhao to join the PMC"},{"body":"Based on his continuous contributions, Zhenxu Ke (a.k.a kezhenxu94) has been invited to join the PMC. Welcome aboard.\n","excerpt":"Based on his continuous contributions, Zhenxu Ke (a.k.a kezhenxu94) has been invited to join the …","ref":"/events/welcome-zhenxu-ke-to-join-the-pmc/","title":"Welcome Zhenxu Ke to join the PMC"},{"body":"Based on his contributions to the skywalking PHP project, Yanlong He (a.k.a heyanlong has been accepted as a new committer.\n","excerpt":"Based on his contributions to the skywalking PHP project, Yanlong He (a.k.a heyanlong has been …","ref":"/events/welcome-yanlong-he-as-a-new-committer/","title":"Welcome Yanlong He as a new committer"},{"body":"6.3.0 release. Go to downloads page to find release tars.\n Improve ElasticSearch storage implementation performance again. OAP backend re-install w/o agent reboot required.  Read changelog for the details.\n","excerpt":"6.3.0 release. Go to downloads page to find release tars.\n Improve ElasticSearch storage …","ref":"/events/release-apache-skywalking-apm-6-3-0/","title":"Release Apache SkyWalking APM 6.3.0"},{"body":"6.2.0 release. Go to downloads page to find release tars. ElasticSearch storage implementation changed, high reduce payload to ElasticSearch cluster.\nRead changelog for the details.\n","excerpt":"6.2.0 release. Go to downloads page to find release tars. ElasticSearch storage implementation …","ref":"/events/release-apache-skywalking-apm-6-2-0/","title":"Release Apache SkyWalking APM 6.2.0"},{"body":"Based on his continuous contributions, Zhenxu Ke (a.k.a kezhenxu94) has been voted as a new committer.\n","excerpt":"Based on his continuous contributions, Zhenxu Ke (a.k.a kezhenxu94) has been voted as a new …","ref":"/events/welcome-zhenxu-ke-as-a-new-committer/","title":"Welcome Zhenxu Ke as a new committer"},{"body":"6.1.0 release. Go to downloads page to find release tars. This is the first top level project version.\nKey updates\n RocketBot UI OAP performance improvement  ","excerpt":"6.1.0 release. Go to downloads page to find release tars. This is the first top level project …","ref":"/events/release-apache-skywalking-apm-6-1-0/","title":"Release Apache SkyWalking APM 6.1.0"},{"body":"Apache SkyWalking PMC accept the RocketBot UI contributions. After IP clearance, it will be released in SkyWalking 6.1 soon.\n","excerpt":"Apache SkyWalking PMC accept the RocketBot UI contributions. After IP clearance, it will be released …","ref":"/events/rocketbot-ui-has-been-accepted-as-skywalking-primary-ui/","title":"RocketBot UI has been accepted as SkyWalking primary UI"},{"body":"Apache board approved SkyWalking graduated as TLP at April 17th 2019.\n","excerpt":"Apache board approved SkyWalking graduated as TLP at April 17th 2019.","ref":"/events/skywalking-graduated-as-apache-top-level-project/","title":"SkyWalking graduated as Apache Top Level Project"},{"body":"Based on his continuous contributions, he has been accepted as a new committer.\n","excerpt":"Based on his continuous contributions, he has been accepted as a new committer.","ref":"/events/welcome-yuguang-zhao-as-a-new-committer/","title":"Welcome Yuguang Zhao as a new committer"},{"body":"APM和调用链跟踪 随着企业经营规模的扩大,以及对内快速诊断效率和对外SLA(服务品质协议,service-level agreement)的追求,对于业务系统的掌控度的要求越来越高,主要体现在:\n 对于第三方依赖的监控,实时/准实时了解第三方的健康状况/服务品质,降低第三方依赖对于自身系统的扰动(服务降级、故障转移) 对于容器的监控,实时/准实时的了解应用部署环境(CPU、内存、进程、线程、网络、带宽)情况,以便快速扩容/缩容、流量控制、业务迁移 业务方对于自己的调用情况,方便作容量规划,同时对于突发的请求也能进行异常告警和应急准备 自己业务的健康、性能监控,实时/准实时的了解自身的业务运行情况,排查业务瓶颈,快速诊断和定位异常,增加对自己业务的掌控力  同时,对于企业来说,能够更精确的了解资源的使用情况,对于成本核算和控制也有非常大的裨益。\n在这种情况下,一般都会引入APM(Application Performance Management \u0026amp; Monitoring)系统,通过各种探针采集数据,收集关键指标,同时搭配数据呈现和监控告警,能够解决上述的大部分问题。\n然而随着RPC框架、微服务、云计算、大数据的发展,同时业务的规模和深度相比过往也都增加了很多,一次业务可能横跨多个模块/服务/容器,依赖的中间件也越来越多,其中任何一个节点出现异常,都可能导致业务出现波动或者异常,这就导致服务质量监控和异常诊断/定位变得异常复杂,于是催生了新的业务监控模式:调用链跟踪\n 能够分布式的抓取多个节点的业务记录,并且通过统一的业务id(traceId,messageId,requestId等)将一次业务在各个节点的记录串联起来,方便排查业务的瓶颈或者异常点  产品对比 APM和调用链跟踪均不是新诞生事务,很多公司已经有了大量的实践,不过开源的并且能够开箱即用的产品并不多,这里主要选取了Pinpoint,Skywalking,CAT来进行对比(当然也有其他的例如Zipkin,Jaeger等产品,不过总体来说不如前面选取的3个完成度高),了解一下APM和调用链跟踪在开源方面的发展状态。\nPinpoint Pinpoint是一个比较早并且成熟度也非常高的APM+调用链监控的项目,在全世界范围内均有用户使用,支持Java和PHP的探针,数据容器为HBase,其界面参考:\nSkywalking Skywalking是一个新晋的项目,最近一两年发展非常迅猛,本身支持OpenTracing规范,优秀的设计提供了良好的扩展性,支持Java、PHP、.Net、NodeJs探针,数据容器为ElasticSearch,其界面参考:\nCAT CAT是由美团开源的一个APM项目,也历经了多年的迭代升级,拥有大量的企业级用户,对于监控和报警整合比较紧密,支持Java、C/C++、.Net、Python、Go、NodeJs,不过CAT目前主要通过侵入性的方式接入,数据容器包括HDFS(存储原始数据)和mysql(二次统计),其界面参考:\n横向对比 上面只是做了一个简介,那这三个项目各自有什么特色或者优势/劣势呢(三者的主要产品均针对Java,这里也主要针对Java的特性)?\n Pinpoint  优势  大企业/长时间验证,稳定性和完成度高 探针收集的数据粒度比较细 HBase的数据密度较大,支持PB级别下的数据查询 代码设计考虑的扩展性较弱,二次开发难度较大(探针为插件式,开发比较简单) 拥有完整的APM和调用链跟踪功能   劣势  代码针对性强,扩展较难 容器为HBase,查询功能较弱(主要为时间维度) 探针的额外消耗较多(探针采集粒度细,大概10%~20%) 项目趋于成熟,而扩展难度较大,目前社区活跃度偏低,基本只进行探针的增加或者升级 缺少自定义指标的设计     Skywalking  优势  数据容器为ES,查询支持的维度较多并且扩展潜力大 项目设计采用微内核+插件,易读性和扩展性都比较强 主要的研发人员为华人并且均比较活跃,能够进行更加直接的沟通 拥有完整的APM和调用链跟踪功能   劣势  项目发展非常快,稳定性有待验证 ES数据密度较小,在PB级别可能会有性能压力 缺少自定义指标的设计     CAT  优势  大企业/长时间验证,稳定性和完成度高 采用手动数据埋点而不是探针,数据采集的灵活性更强 支持自定义指标 代码设计考虑的扩展性较弱,并且数据结构复杂,二次开发难度较大 拥有完善的监控告警机制   劣势  代码针对性强,扩展较难 需要手动接入埋点,代码侵入性强 APM功能完善,但是不支持调用链跟踪      基本组件 如果分别去看Pinpoint/Skywalking/CAT的整体设计,我们会发现三者更像是一个规范的三种实现,虽然各自有不同的机制和特性,但是从模块划分和功能基本是一致的:\n当然也有一些微小的区别:\n Pinpoint基本没有aggregator,同时query和alarm集成在了web中,只有agent,collector和web Skywalking则是把collector、aggregator、alarm集成为OAP(Observability Analysis Platform),并且可以通过集群部署,不同的实例可以分别承担collector或者aggregator+alarm的角色 CAT则和Skywalking类似,把collector、aggregator、alarm集成为cat-consumer,而由于CAT有比较复杂的配置管理,所以query和配置一起集成为cat-home 当然最大的区别是Pinpoint和Skywalking均是通过javaagent做字节码的扩展,通过切面编程采集数据,类似于探针,而CAT的agent则更像是一个工具集,用于手动埋点  Skywalking 前戏这么多,终于开始进入主题,介绍今天的主角:Skywalking,不过通过之前的铺垫,我们基本都知道了Skywalking期望解决的问题以及总体的结构,下面我们则从细节来看Skywalking是怎么一步一步实现的。\n模块构成 首先,Skywalking进行了精准的领域模型划分:\n整个系统分为三部分:\n agent:采集tracing(调用链数据)和metric(指标)信息并上报 OAP:收集tracing和metric信息通过analysis core模块将数据放入持久化容器中(ES,H2(内存数据库),mysql等等),并进行二次统计和监控告警 webapp:前后端分离,前端负责呈现,并将查询请求封装为graphQL提交给后端,后端通过ribbon做负载均衡转发给OAP集群,再将查询结果渲染展示  而整个Skywalking(包括agent和OAP,而webapp后端业务非常简单主要就是认证和请求转发)均通过微内核+插件式的模式进行编码,代码结构和扩展性均非常强,具体设计可以参考: 从Skywalking看如何设计一个微核+插件式扩展的高扩展框架 ,Spring Cloud Gateway的GatewayFilterFactory的扩展也是通过这种plugin define的方式来实现的。\nSkywalking也提供了其他的一些特性:\n 配置重载:支持通过jvm参数覆写默认配置,支持动态配置管理 集群管理:这个主要体现在OAP,通过集群部署分担数据上报的流量压力和二次计算的计算压力,同时集群也可以通过配置切换角色,分别面向数据采集(collector)和计算(aggregator,alarm),需要注意的是agent目前不支持多collector负载均衡,而是随机从集群中选择一个实例进行数据上报 支持k8s和mesh 支持数据容器的扩展,例如官方主推是ES,通过扩展接口,也可以实现插件去支持其他的数据容器 支持数据上报receiver的扩展,例如目前主要是支持gRPC接受agent的上报,但是也可以实现插件支持其他类型的数据上报(官方默认实现了对Zipkin,telemetry和envoy的支持) 支持客户端采样和服务端采样,不过服务端采样最有意义 官方制定了一个数据查询脚本规范:OAL(Observability Analysis Language),语法类似Linq,以简化数据查询扩展的工作量 支持监控预警,通过OAL获取数据指标和阈值进行对比来触发告警,支持webhook扩展告警方式,支持统计周期的自定义,以及告警静默防止重复告警  数据容器 由于Skywalking并没有自己定制的数据容器或者使用多种数据容器增加复杂度,而是主要使用ElasticSearch(当然开源的基本上都是这样来保持简洁,例如Pinpoint也只使用了HBase),所以数据容器的特性以及自己数据结构基本上就限制了业务的上限,以ES为例:\n ES查询功能异常强大,在数据筛选方面碾压其他所有容器,在数据筛选潜力巨大(Skywalking默认的查询维度就比使用HBase的Pinpoint强很多) 支持sharding分片和replicas数据备份,在高可用/高性能/大数据支持都非常好 支持批量插入,高并发下的插入性能大大增强 数据密度低,源于ES会提前构建大量的索引来优化搜索查询,这是查询功能强大和性能好的代价,但是链路跟踪往往有非常多的上下文需要记录,所以Skywalking把这些上下文二进制化然后通过Base64编码放入data_binary字段并且将字段标记为not_analyzed来避免进行预处理建立查询索引  总体来说,Skywalking尽量使用ES在大数据和查询方面的优势,同时尽量减少ES数据密度低的劣势带来的影响,从目前来看,ES在调用链跟踪方面是不二的数据容器,而在数据指标方面,ES也能中规中矩的完成业务,虽然和时序数据库相比要弱一些,但在PB级以下的数据支持也不会有太大问题。\n数据结构 如果说数据容器决定了上限,那么数据结构则决定了实际到达的高度。Skywalking的数据结构主要为:\n 数据维度(ES索引为skywalking_*_inventory)  service:服务 instance:实例 endpoint:接口 network_adress:外部依赖   数据内容  原始数据  调用链跟踪数据(调用链的trace信息,ES索引为skywalking_segment,Skywalking主要的数据消耗都在这里) 指标(主要是jvm或者envoy的运行时指标,例如ES索引skywalking_instance_jvm_cpu)   二次统计指标  指标(按维度/时间二次统计出来的例如pxx、sla等指标,例如ES索引skywalking_database_access_p75_month) 数据库慢查询记录(数据库索引:skywalking_top_n_database_statement)   关联关系(维度/指标之间的关联关系,ES索引为skywalking_*_relation_*) 特别记录  告警信息(ES索引为skywalking_alarm_record) 并发控制(ES索引为skywalking_register_lock)      其中数量占比最大的就是调用链跟踪数据和各种指标,而这些数据均可以通过OAP设置过期时间,以降低历史数据的对磁盘占用和查询效率的影响。\n调用链跟踪数据 作为Skywalking的核心数据,调用链跟踪数据(skywalking_segment)基本上奠定了整个系统的基础,而如果要详细的了解调用链跟踪的话,就不得不提到openTracing。\nopenTracing基本上是目前开源调用链跟踪系统的一个事实标准,它制定了调用链跟踪的基本流程和基本的数据结构,同时也提供了各个语言的实现。如果用一张图来表现openTracing,则是如下:\n其中:\n SpanContext:一个类似于MDC(Slfj)或者ThreadLocal的组件,负责整个调用链数据采集过程中的上下文保持和传递 Trace:一次调用的完整记录  Span:一次调用中的某个节点/步骤,类似于一层堆栈信息,Trace是由多个Span组成,Span和Span之间也有父子或者并列的关系来标志这个节点/步骤在整个调用中的位置  Tag:节点/步骤中的关键信息 Log:节点/步骤中的详细记录,例如异常时的异常堆栈   Baggage:和SpanContext一样并不属于数据结构而是一种机制,主要用于跨Span或者跨实例的上下文传递,Baggage的数据更多是用于运行时,而不会进行持久化    以一个Trace为例:\n首先是外部请求调用A,然后A依次同步调用了B和C,而B被调用时会去同步调用D,C被调用的时候会依次同步调用E和F,F被调用的时候会通过异步调用G,G则会异步调用H,最终完成一次调用。\n上图是通过Span之间的依赖关系来表现一个Trace,而在时间线上,则可以有如下的表达:\n当然,如果是同步调用的话,父Span的时间占用是包括子Span的时间消耗的。\n而落地到Skywalking中,我们以一条skywalking_segment的记录为例:\n{ \u0026quot;trace_id\u0026quot;: \u0026quot;52.70.15530767312125341\u0026quot;, \u0026quot;endpoint_name\u0026quot;: \u0026quot;Mysql/JDBI/Connection/commit\u0026quot;, \u0026quot;latency\u0026quot;: 0, \u0026quot;end_time\u0026quot;: 1553076731212, \u0026quot;endpoint_id\u0026quot;: 96142, \u0026quot;service_instance_id\u0026quot;: 52, \u0026quot;version\u0026quot;: 2, \u0026quot;start_time\u0026quot;: 1553076731212, \u0026quot;data_binary\u0026quot;: \u0026quot;CgwKCjRGnPvp5eikyxsSXhD///////////8BGMz62NSZLSDM+tjUmS0wju8FQChQAVgBYCF6DgoHZGIudHlwZRIDc3FsehcKC2RiLmluc3RhbmNlEghyaXNrZGF0YXoOCgxkYi5zdGF0ZW1lbnQYAiA0\u0026quot;, \u0026quot;service_id\u0026quot;: 2, \u0026quot;time_bucket\u0026quot;: 20190320181211, \u0026quot;is_error\u0026quot;: 0, \u0026quot;segment_id\u0026quot;: \u0026quot;52.70.15530767312125340\u0026quot; } 其中:\n trace_id:本次调用的唯一id,通过snowflake模式生成 endpoint_name:被调用的接口 latency:耗时 end_time:结束时间戳 endpoint_id:被调用的接口的唯一id service_instance_id:被调用的实例的唯一id version:本数据结构的版本号 start_time:开始时间戳 data_binary:里面保存了本次调用的所有Span的数据,序列化并用Base64编码,不会进行分析和用于查询 service_id:服务的唯一id time_bucket:调用所处的时段 is_error:是否失败 segment_id:数据本身的唯一id,类似于主键,通过snowflake模式生成  这里可以看到,目前Skywalking虽然相较于Pinpoint来说查询的维度要多一些,但是也很有限,而且除了endPoint,并没有和业务有关联的字段,只能通过时间/服务/实例/接口/成功标志/耗时来进行非业务相关的查询,如果后续要增强业务相关的搜索查询的话,应该还需要增加一些用于保存动态内容(如messageId,orderId等业务关键字)的字段用于快速定位。\n指标 指标数据相对于Tracing则要简单得多了,一般来说就是指标标志、时间戳、指标值,而Skywalking中的指标有两种:一种是采集的原始指标值,例如jvm的各种运行时指标(例如cpu消耗、内存结构、GC信息等);一种是各种二次统计指标(例如tp性能指标、SLA等,当然也有为了便于查询的更高时间维度的指标,例如基于分钟、小时、天、周、月)\n例如以下是索引skywalking_endpoint_cpm_hour中的一条记录,用于标志一个小时内某个接口的cpm指标:\n{ \u0026quot;total\u0026quot;: 8900, \u0026quot;service_id\u0026quot;: 5, \u0026quot;time_bucket\u0026quot;: 2019031816, \u0026quot;service_instance_id\u0026quot;: 5, \u0026quot;entity_id\u0026quot;: \u0026quot;7\u0026quot;, \u0026quot;value\u0026quot;: 148 } 各个字段的释义如下:\n total:一分钟内的调用总量 service_id:所属服务的唯一id time_bucket:统计的时段 service_instance_id:所属实例的唯一id entity_id:接口(endpoint)的唯一id value:cpm的指标值(cpm=call per minute,即total/60)  工程实现 Skywalking的工程实现堪比Dubbo,框架设计和代码质量都达到非常高的水准,以dubbo为例,即使2012年发布的老版本放到当今,其设计和编码看起来也依然赏心悦目,设计简洁但是覆盖了所有的核心需求,同时又具备非常强的扩展性,二次开发非常简单,然而却又不会像Spring那样过度封装(当然Spring作为一个更加高度通用的框架,更高的封装也是有必要的)导致代码阅读异常困难。\nagent agent(apm-sniffer)是Skywalking的Java探针实现,主要负责:\n 采集应用实例的jvm指标 通过切向编程进行数据埋点,采集调用链数据 通过RPC将采集的数据上报  当然,agent还实现了客户端采样,不过在APM监控系统里进行客户端数据采样都是没有灵魂的,所以这里就不再赘述了。\n首先,agent通过 org.apache.skywalking.apm.agent.core.boot.BootService 实现了整体的插件化,agent启动会加载所有的BootService实现,并通过 ServiceManager 来管理这些插件的生命周期,采集jvm指标、gRPC连接管理、调用链数据维护、数据上报OAP这些服务均是通过这种方式扩展。\n然后,agent还通过bytebuddy以javaagent的模式,通过字节码增强的机制来构造AOP环境,再提供PluginDefine的规范方便探针的开发,最终实现非侵入性的数据埋点,采集调用链数据。\n最终落地到代码上则异常清晰:\n//通过bytebuddy的AgentBuilder构造javaagent增强classLoader new AgentBuilder.Default(byteBuddy) .ignore( //忽略这些包的内容,不进行增强 nameStartsWith(\u0026quot;net.bytebuddy.\u0026quot;) .or(nameStartsWith(\u0026quot;org.slf4j.\u0026quot;)) .or(nameStartsWith(\u0026quot;org.apache.logging.\u0026quot;)) .or(nameStartsWith(\u0026quot;org.groovy.\u0026quot;)) .or(nameContains(\u0026quot;javassist\u0026quot;)) .or(nameContains(\u0026quot;.asm.\u0026quot;)) .or(nameStartsWith(\u0026quot;sun.reflect\u0026quot;)) .or(allSkyWalkingAgentExcludeToolkit()) .or(ElementMatchers.\u0026lt;TypeDescription\u0026gt;isSynthetic())) //通过pluginFinder加载所有的探针扩展,并获取所有可以增强的class .type(pluginFinder.buildMatch()) //按照pluginFinder的实现,去改变字节码增强类 .transform(new Transformer(pluginFinder)) //通过listener订阅增强的操作记录,方便调试 .with(new Listener()) .installOn(instrumentation); try { //加载所有的service实现并启动 ServiceManager.INSTANCE.boot(); } catch (Exception e) { logger.error(e, \u0026quot;Skywalking agent boot failure.\u0026quot;); } agent也提供了非常简单的扩展实现机制,以增强一个普通类的方法为例,首先你需要定义一个切向点:\npublic interface InstanceMethodsInterceptPoint { //定义切向方法的适配器,符合适配器的class将被增强 ElementMatcher\u0026lt;MethodDescription\u0026gt; getMethodsMatcher(); //增强的具体实现类,classReference String getMethodsInterceptor(); //是否重写参数 boolean isOverrideArgs(); } 然后你还需要一个增强的实现类:\npublic interface InstanceMethodsAroundInterceptor { //方法真正执行前执行 void beforeMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, MethodInterceptResult result) throws Throwable; //方法真正执行后执行 Object afterMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Object ret) throws Throwable; //当异常发生时执行 void handleMethodException(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Throwable t); } 一般在执行前和执行后进行数据埋点,就可以采集到想要的数据,当然实际编程要稍微复杂一点,不过官方也实现了对应的abstract类和数据埋点工具类,所以探针的二次开发在Skywalking这个级别确实是非常简单,只需要处理好资源占用和并发问题即可。真正的难点是要对需要增强的对象非常了解,熟悉其运作机制,才能找准切向点,既要所有的流程都需要经过这个点,又可以抓取到期望抓取的上下文信息。同时,多版本的适配和测试也是非常大的工作量,官方虽然提供witness的机制(通过验证某个class是否存在来验证版本),但是作为影响全局的探针,开发和测试都是需要慎之又慎的。\nOAP 同agent类似,OAP作为Skywalking最核心的模块,也实现了自己的扩展机制,不过在这里叫做Module,具体可以参考library-module,在module的机制下,Skywalking实现了自己必须核心组件:\n core:整个OAP核心业务(remoting、cluster、storage、analysis、query、alarm)的规范和接口 cluster:集群管理的具体实现 storage:数据容器的具体实现 query:为前端提供的查询接口的具体实现 receiver:接收探针上报数据的接收器的具体实现 alarm:监控告警的具体实现  以及一个可选组件:\n telemetry:用于监控OAP自身的健康状况  而前面提到的OAP的高扩展性则体现在核心业务的规范均定义在了core中,如果有需要自己扩展的,只需要自己单独做自己的实现,而不需要做侵入式的改动,最典型的示例则是官方支持的storage,不仅支持单机demo的内存数据库H2和经典的ES,连目前开源的Tidb都可以接入。\n初步实践 对于Skywalking的实践我们经历了三个阶段\n 线下测试 第一次生产环境小规模测试 第二次生产环境小规模测试+全量接入  线下测试 环境 由于是线下测试,所以我们直接使用物理机(E5-2680v2 x2, 128G)虚拟了一个集群(实际性能相比云服务器应该偏好一些):\n ES:单机实例,v6.5,4C8G,jvm内存分配为4G OAP:单机实例,v6.1.0-SNAPSHOT,4C8G,jvm内存分配为4G 应用:基于SpringCloud的4个测试实例,调用关系为A-\u0026gt;B-\u0026gt;C-\u0026gt;D,QPS为200  测试结果 拓扑图:\nOAP机器监控:\nES机器监控:\n服务监控面板:\n其中一个调用链记录:\n可以看出,Skywalking非常依赖CPU(不论是OAP还是ES),同时对于网络IO也有一定的要求,至于ES的文件IO在可接受范围内,毕竟确实有大量内容需要持久化。测试结果也基本达到预期要求,调用链和各个指标的监控都工作良好。\n第一次生产环境测试 在线下测试之后,我们再进行了一次基于实际业务针对探针的测试,测试没有发现探针的异常问题,也没有影响业务的正常运作,同时对于jvm实例影响也不是很大,CPU大概提高了5%左右,并不很明显。在这个基础上我们选择了线上的一台服务器,进行了我们第一次生产环境的测试。\n环境  ES:基于现有的一个ES集群,node x 3,v6.0 OAP:2C4G x 2,v6.1.0-SNAPSHOT,jvm内存分配为2G 应用:两个jvm实例  测试时间:03.11-03.16\n测试结果 业务机器负载情况:\n从最敏感的CPU指标上来看,增加agent并没有导致可见的CPU使用率的变化,而其他的内存、网络IO、连接数也基本没有变化。\nOAP负载情况:\n可以看到机器的CPU和网络均有较大的波动,但是也都没有真正打爆服务器,但是我们的实例却经常出现两种日志:\n One trace segment has been abandoned, cause by buffer is full.\n  Collector traceSegment service doesn\u0026rsquo;t response in xxx seconds.\n 通过阅读源码发现:\n agent和OAP只会使用一个长连接阻塞式的交换数据,如果某次数据交换没有得到响应,则会阻塞后续的上报流程(一般长连接的RPC请求会在数据传输期间互相阻塞,但是不会在等待期间互相阻塞,当然这也是源于agent并没有并发上报的机制),所以一旦OAP在接收数据的过程中发生阻塞,就会导致agent本地的缓冲区满,最终只能将监控数据直接丢弃防止内存泄漏  而导致OAP没有及时响应的一方面是OAP本身性能不够(OAP需要承担大量的二次统计工作,通过Jstack统计,长期有超过几十个线程处于RUNNABLE状态,据吴晟描述目前OAP都是高性能模式,后续将会提供配置来支持低性能模式),另一方面可能是ES批量插入效率不够,因此我们修改了OAP的批量插入参数来增加插入频率,降低单次插入数量:\n bulkActions: ${SW_STORAGE_ES_BULK_ACTIONS:2000 -\u0026gt; 20} # Execute the bulk every 2000 requests bulkSize: ${SW_STORAGE_ES_BULK_SIZE:20 -\u0026gt; 2} # flush the bulk every 20mb flushInterval: ${SW_STORAGE_ES_FLUSH_INTERVAL:10 -\u0026gt; 2} # flush the bulk every 10 seconds whatever the number of requests  虽然 service doesn\u0026rsquo;t response 出现的频率明显降低,但是依然还是会偶尔出现,而每一次出现都会伴随大量的 trace segment has been abandoned ,推测OAP和ES可能都存在性能瓶颈(应该进行更进一步的诊断确定问题,不过当时直接和吴晟沟通,确认确实OAP非常消耗CPU资源,考虑到当时部署只是2C,并且还部署有其他业务,就没有进一步的测试)。\n同时,在频繁的数据丢弃过程中,也偶发了一个bug:当agent上报数据超时并且大量丢弃数据之后,即使后续恢复正常也能通过日志看到数据正常上报,在查询界面查询的时候,会查不到这个实例上报的数据,不过在重启OAP和agent之后,之前上报的数据又能查询到,这个也和吴晟沟通过,没有其他的案例,后续想重现却也一直没有成功。\n而同时还发现两个更加严重的问题:\n 我们使用的是线上已经部署好的ES集群,其版本只有6.0,而新的Skywalking使用了6.3的查询特性,导致很多查询执行报错,只能使用最简单的查询 我们的kafka集群版本也非常古老,不支持v1或者更高版本的header,而kafka的探针强依赖header来传输上下文信息,导致kafka客户端直接报错影响业务,所以也立即移除了kafka的探针  在这一次测试中,我们基本确认了agent对于应用的影响,同时也发现了一些我们和Skywalking的一些问题,留待后续测试确认。\n第二次生产环境测试 为了排除性能和ES版本的影响,测试Skywalking本身的可用性,参考吴晟的建议(这也是在最初技术选型的时候没有选择Pinpoint和CAT的部分原因:一方面Skywalking的功能符合我们的要求,更重要的是有更加直接和效率的和项目维护者直接沟通的渠道),所以这一次我们新申请了ES集群和OAP机器。\n环境  ES:腾讯云托管ES集群,4C16G x 3 SSD,v6.4 OAP:16C32G,standalone,jvm分配24G 应用:2~8个jvm实例  测试时间:03.18-至今\n测试结果 OAP负载情况:\nES集群负载:\n测试过程中,我们先接入了一台机器上的两个实例,完全没有遇到一测中的延迟或者数据丢弃的问题,三天后我们又接入了另外两台机器的4个实例,这之后两天我们又接入了另外两台机器的2个实例。依然没有遇到一测中的延迟或者数据丢弃的问题。\n而ES负载的监控也基本验证了一测延迟的问题,Skywalking由于较高的并发插入,对于ES的性能压力很大(批量插入时需要针对每条数据分析并且构建查询索引),大概率是ES批量插入性能不够导致延迟,考虑到我们仅仅接入了8个实例,日均segment插入量大概5000万条(即日均5000万次独立调用),如果想支持更大规模的监控,对于ES容量规划势必要留够足够的冗余。同时OAP和ES集群的网络开销也不容忽视,在支撑大规模的监控时,需要集群并且receiver和aggregattor分离部署来分担网络IO的压力。\n而在磁盘容量占用上,我们设置的原始数据7天过期,目前刚刚开始滚动过期,目前segment索引已经累计了314757240条记录总计158G数据,当然我们目前异常记录较少,如果异常记录较多的话,其磁盘开销将会急剧增加(span中会记录异常堆栈信息)。而由于选择的SSD,磁盘的写入和查询性能都很高,即使只有3个节点,也完全没有任何压力。\n而在新版本的ES集群下,Skywalking的所有查询功能都变得可用,和我们之前自己的单独编写的异常指标监控都能完美对照。当然我们也遇到一个问题:Skywalking仅采集了调用记录,但是对于调用过程中的过程数据,除了异常堆栈其他均没有采集,导致真的出现异常也缺少充足的上下文信息还原现场,于是我们扩展了Skywalking的两个探针(我们项目目前重度依赖的组件):OkHttp(增加对requestBody和responseBody的采集)和SpringMVC(增加了对requestBody的采集),目前工作正常,如果进一步的增加其他的探针,采集到足够的数据,那么我们基本可以脱离ELK了。\n而OAP方面,CPU和内存的消耗远远低于预期的估计,CPU占用率一直较低,而分配的24G内存也仅使用了10+G,完全可以支持更大规模的接入量,不过在网络IO方面可能存在一定的风险,推测应该8C16G的容器就足以支持十万CPM级别的数据接入。\n当然我们在查询也遇到了一些瓶颈,最大的问题就是无法精确的命中某一条调用记录,就如前面的分析,因为segment的数据结构问题,无法进行面向业务的查询(例如messageId、requestId、orderId等),所以如果想精确匹配某一次调用请求,需要通过各个维度的条件约束慢慢缩小范围最后定位。\nSkywalking展望 通过上述对Skywalking的剖析和实践,Skywalking确实是一个优秀的APM+调用链跟踪监控系统,能够覆盖大部分使用场景,让研发和运维能够更加实时/准实时的了解线上服务的运行情况。当然Skywailking也不是尽善尽美,例如下面就是个人觉得目前可见的不满足我们期望的:\n 数据准实时通过gRPC上报,本地缓存的瓶颈(当然官方主要是为了简化模型,减少依赖,否则Skywalking还依赖ELK就玩得有点大了)  缓存队列的长度,过长占据内存,过短容易buffer满丢弃数据 优雅停机同时又不丢失缓存   数据上报需要在起点上报,链路回传的时候需要携带SPAN及子SPAN的信息,当链路较长或者SPAN保存的信息较多时,会额外消耗一定的带宽 skywalking更多是一个APM系统而不是分布式调用链跟踪系统  在整个链路的探针上均缺少输入输出的抓取 在调用链的筛查上并没用进行增强,并且体现在数据结构的设计,例如TAG信息均保存在SPAN信息中,而SPAN信息均被BASE64编码作为数据保存,无法检索,最终trace的筛查只能通过时间/traceId/service/endPoint/state进行非业务相关的搜索   skywalking缺少对三方接口依赖的指标,这个对于系统稳定往往非常重要  而作为一个初级的使用者,个人觉得我们可以使用有限的人力在以下方向进行扩展:\n 增加receiver:整合ELK,通过日志采集采集数据,降低异构系统的采集开发成本 优化数据结构,提供基于业务关键数据的查询接口 优化探针,采集更多的业务数据,争取代替传统的ELK日志简单查询,绝大部分异常诊断和定位均可以通过Skywalking即可完成 增加业务指标监控的模式,能够自定义业务指标(目前官方已经在实现 Metric Exporter )  ","excerpt":"APM和调用链跟踪 随着企业经营规模的扩大,以及对内快速诊断效率和对外SLA(服务品质协议,service-level agreement)的追求,对于业务系统的掌控度的要求越来越高,主要体现在:\n  …","ref":"/zh/2019-03-29-introduction-of-skywalking-and-simple-practice/","title":"SkyWalking调研与初步实践"},{"body":"前言 首先描述下问题的背景,博主有个习惯,每天上下班的时候看下skywalking的trace页面的error情况。但是某天突然发现生产环境skywalking页面没有任何数据了,页面也没有显示任何的异常,有点慌,我们线上虽然没有全面铺开对接skywalking,但是也有十多个应用。看了应用agent端日志后,其实也不用太担心,对应用毫无影响。大概情况就是这样,但是问题还是要解决,下面就开始排查skywalking不可用的问题。\n使用到的工具arthas Arthas是阿里巴巴开源的一款在线诊断java应用程序的工具,是greys工具的升级版本,深受开发者喜爱。当你遇到以下类似问题而束手无策时,Arthas可以帮助你解决:\n 这个类从哪个 jar 包加载的?为什么会报各种类相关的 Exception? 我改的代码为什么没有执行到?难道是我没 commit?分支搞错了? 遇到问题无法在线上 debug,难道只能通过加日志再重新发布吗? 线上遇到某个用户的数据处理有问题,但线上同样无法 debug,线下无法重现! 是否有一个全局视角来查看系统的运行状况? 有什么办法可以监控到JVM的实时运行状态? Arthas采用命令行交互模式,同时提供丰富的 Tab 自动补全功能,进一步方便进行问题的定位和诊断。  项目地址:https://github.com/alibaba/arthas\n先定位问题一 查看skywalking-oap-server.log的日志,发现会有一条异常疯狂的在输出,异常详情如下:\n2019-03-01 09:12:11,578 - org.apache.skywalking.oap.server.core.register.worker.RegisterPersistentWorker -3264081149 [DataCarrier.IndicatorPersistentWorker.endpoint_inventory.Consumser.0.Thread] ERROR [] - Validation Failed: 1: id is too long, must be no longer than 512 bytes but was: 684; org.elasticsearch.action.ActionRequestValidationException: Validation Failed: 1: id is too long, must be no longer than 512 bytes but was: 684; at org.elasticsearch.action.ValidateActions.addValidationError(ValidateActions.java:26) ~[elasticsearch-6.3.2.jar:6.3.2] at org.elasticsearch.action.index.IndexRequest.validate(IndexRequest.java:183) ~[elasticsearch-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestHighLevelClient.performRequest(RestHighLevelClient.java:515) ~[elasticsearch-rest-high-level-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestHighLevelClient.performRequestAndParseEntity(RestHighLevelClient.java:508) ~[elasticsearch-rest-high-level-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestHighLevelClient.index(RestHighLevelClient.java:348) ~[elasticsearch-rest-high-level-client-6.3.2.jar:6.3.2] at org.apache.skywalking.oap.server.library.client.elasticsearch.ElasticSearchClient.forceInsert(ElasticSearchClient.java:141) ~[library-client-6.0.0-alpha.jar:6.0.0-alpha] at org.apache.skywalking.oap.server.storage.plugin.elasticsearch.base.RegisterEsDAO.forceInsert(RegisterEsDAO.java:66) ~[storage-elasticsearch-plugin-6.0.0-alpha.jar:6.0.0-alpha] at org.apache.skywalking.oap.server.core.register.worker.RegisterPersistentWorker.lambda$onWork$0(RegisterPersistentWorker.java:83) ~[server-core-6.0.0-alpha.jar:6.0.0-alpha] at java.util.HashMap$Values.forEach(HashMap.java:981) [?:1.8.0_201] at org.apache.skywalking.oap.server.core.register.worker.RegisterPersistentWorker.onWork(RegisterPersistentWorker.java:74) [server-core-6.0.0-alpha.jar:6.0.0-alpha] at org.apache.skywalking.oap.server.core.register.worker.RegisterPersistentWorker.access$100(RegisterPersistentWorker.java:35) [server-core-6.0.0-alpha.jar:6.0.0-alpha] at org.apache.skywalking.oap.server.core.register.worker.RegisterPersistentWorker$PersistentConsumer.consume(RegisterPersistentWorker.java:120) [server-core-6.0.0-alpha.jar:6.0.0-alpha] at org.apache.skywalking.apm.commons.datacarrier.consumer.ConsumerThread.consume(ConsumerThread.java:101) [apm-datacarrier-6.0.0-alpha.jar:6.0.0-alpha] at org.apache.skywalking.apm.commons.datacarrier.consumer.ConsumerThread.run(ConsumerThread.java:68) [apm-datacarrier-6.0.0-alpha.jar:6.0.0-alpha] 2019-03-01 09:12:11,627 - org.apache.skywalking.oap.server.core.register.worker.RegisterPersistentWorker -3264081198 [DataCarrier.IndicatorPersistentWorker.endpoint_inventory.Consumser.0.Thread] ERROR [] - Validation Failed: 1: id is too long, must be no longer than 512 bytes but was: 684; org.elasticsearch.action.ActionRequestValidationException: Validation Failed: 1: id is too long, must be no longer than 512 bytes but was: 684; at org.elasticsearch.action.ValidateActions.addValidationError(ValidateActions.java:26) ~[elasticsearch-6.3.2.jar:6.3.2] at org.elasticsearch.action.index.IndexRequest.validate(IndexRequest.java:183) ~[elasticsearch-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestHighLevelClient.performRequest(RestHighLevelClient.java:515) ~[elasticsearch-rest-high-level-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestHighLevelClient.performRequestAndParseEntity(RestHighLevelClient.java:508) ~[elasticsearch-rest-high-level-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestHighLevelClient.index(RestHighLevelClient.java:348) ~[elasticsearch-rest-high-level-client-6.3.2.jar:6.3.2] at org.apache.skywalking.oap.server.library.client.elasticsearch.ElasticSearchClient.forceInsert(ElasticSearchClient.java:141) ~[library-client-6.0.0-alpha.jar:6.0.0-alpha] at org.apache.skywalking.oap.server.storage.plugin.elasticsearch.base.RegisterEsDAO.forceInsert(RegisterEsDAO.java:66) ~[storage-elasticsearch-plugin-6.0.0-alpha.jar:6.0.0-alpha] at org.apache.skywalking.oap.server.core.register.worker.RegisterPersistentWorker.lambda$onWork$0(RegisterPersistentWorker.java:83) ~[server-core-6.0.0-alpha.jar:6.0.0-alpha] at java.util.HashMap$Values.forEach(HashMap.java:981) [?:1.8.0_201] at org.apache.skywalking.oap.server.core.register.worker.RegisterPersistentWorker.onWork(RegisterPersistentWorker.java:74) [server-core-6.0.0-alpha.jar:6.0.0-alpha] at org.apache.skywalking.oap.server.core.register.worker.RegisterPersistentWorker.access$100(RegisterPersistentWorker.java:35) [server-core-6.0.0-alpha.jar:6.0.0-alpha] at org.apache.skywalking.oap.server.core.register.worker.RegisterPersistentWorker$PersistentConsumer.consume(RegisterPersistentWorker.java:120) [server-core-6.0.0-alpha.jar:6.0.0-alpha] at org.apache.skywalking.apm.commons.datacarrier.consumer.ConsumerThread.consume(ConsumerThread.java:101) [apm-datacarrier-6.0.0-alpha.jar:6.0.0-alpha] at org.apache.skywalking.apm.commons.datacarrier.consumer.ConsumerThread.run(ConsumerThread.java:68) [apm-datacarrier-6.0.0-alpha.jar:6.0.0-alpha] 可以看到,上面的异常输出的时间节点,以这种频率在疯狂的刷新。通过异常message,得知到是因为skywalking在写elasticsearch时,索引的id太长了。下面是elasticsearch的源码:\nif (id != null \u0026amp;\u0026amp; id.getBytes(StandardCharsets.UTF_8).length \u0026gt; 512) { validationException = addValidationError(\u0026#34;id is too long, must be no longer than 512 bytes but was: \u0026#34; + id.getBytes(StandardCharsets.UTF_8).length, validationException); } 具体可见:elasticsearch/action/index/IndexRequest.java#L240\n问题一: 通过日志,初步定位是哪个系统的url太长,skywalking在注册url数据时触发elasticsearch针对索引id校验的异常,而skywalking注册失败后会不断的重试,所以才有了上面日志不断刷的现象。\n问题解决: elasticsearch client在写es前通过硬编码的方式写死了索引id的长度不能超过512字节大小。也就是我们不能通过从ES侧找解决方案了。回到异常的message,只能看到提示id太长,并没有写明id具体是什么,这个异常提示其实是不合格的,博主觉得应该把id的具体内容抛出来,问题就简单了。因为异常没有明确提示,系统又比较多,不能十多个系统依次关闭重启来验证到底是哪个系统的哪个url有问题。这个时候Arthas就派上用场了,在不重启应用不开启debug模式下,查看实例中的属性对象。下面通过Arthas找到具体的url。\n从异常中得知,org.elasticsearch.action.index.IndexRequest这个类的validate方法触发的,这个方法是没有入参的,校验的id属性其实是对象本身的属性,那么我们使用Arthas的watch指令来看下这个实例id属性。先介绍下watch的用法:\n功能说明 让你能方便的观察到指定方法的调用情况。能观察到的范围为:返回值、抛出异常、入参,通过编写 \u0008OGNL 表达式进行对应变量的查看。\n参数说明 watch 的参数比较多,主要是因为它能在 4 个不同的场景观察对象\n   参数名称 参数说明     class-pattern 类名表达式匹配   method-pattern 方法名表达式匹配   express 观察表达式   condition-express 条件表达式   [b] 在方法调用之前观察   [e] 在方法异常之后观察   [s] 在方法返回之后观察   [f] 在方法结束之后(正常返回和异常返回)观察   [E] 开启正则表达式匹配,默认为通配符匹配   [x:] 指定输出结果的属性遍历深度,默认为 1    从上面的用法说明结合异常信息,我们得到了如下的指令脚本:\nwatch org.elasticsearch.action.index.IndexRequest validate \u0026ldquo;target\u0026rdquo;\n执行后,就看到了我们希望了解到的内容,如:\n索引id的具体内容看到后,就好办了。我们暂时把定位到的这个应用启动脚本中的的skywalking agent移除后(计划后面重新设计下接口)重启了下系统验证下。果然疯狂输出的日志停住了,但是问题并没完全解决,skywalking页面上的数据还是没有恢复。\n定位问题二 skywalking数据存储使用了elasticsearch,页面没有数据,很有可能是elasticsearch出问题了。查看elasticsearch日志后,发现elasticsearch正在疯狂的GC,日志如:\n: 139939K-\u0026gt;3479K(153344K), 0.0285655 secs] 473293K-\u0026gt;336991K(5225856K), 0.0286918 secs] [Times: user=0.05 sys=0.00, real=0.03 secs] 2019-02-28T20:05:38.276+0800: 3216940.387: Total time for which application threads were stopped: 0.0301495 seconds, Stopping threads took: 0.0001549 seconds 2019-02-28T20:05:38.535+0800: 3216940.646: [GC (Allocation Failure) 2019-02-28T20:05:38.535+0800: 3216940.646: [ParNew Desired survivor size 8716288 bytes, new threshold 6 (max 6) - age 1: 1220136 bytes, 1220136 total - age 2: 158496 bytes, 1378632 total - age 3: 88200 bytes, 1466832 total - age 4: 46240 bytes, 1513072 total - age 5: 126584 bytes, 1639656 total - age 6: 159224 bytes, 1798880 total : 139799K-\u0026gt;3295K(153344K), 0.0261667 secs] 473311K-\u0026gt;336837K(5225856K), 0.0263158 secs] [Times: user=0.06 sys=0.00, real=0.03 secs] 2019-02-28T20:05:38.562+0800: 3216940.673: Total time for which application threads were stopped: 0.0276971 seconds, Stopping threads took: 0.0001030 seconds 2019-02-28T20:05:38.901+0800: 3216941.012: [GC (Allocation Failure) 2019-02-28T20:05:38.901+0800: 3216941.012: [ParNew Desired survivor size 8716288 bytes, new threshold 6 (max 6) 问题二: 查询后得知,elasticsearch的内存配置偏大了,GC时间太长,导致elasticsearch脱离服务了。elasticsearch所在主机的内存是8G的实际内存7.6G,刚开始配置了5G的堆内存大小,可能Full GC的时候耗时太久了。查询elasticsearch官方文档后,得到如下的jvm优化建议:\n 将最小堆大小(Xms)和最大堆大小(Xmx)设置为彼此相等。 Elasticsearch可用的堆越多,它可用于缓存的内存就越多。但请注意,过多的堆可能会使您陷入长时间的垃圾收集暂停。 设置Xmx为不超过物理RAM的50%,以确保有足够的物理RAM用于内核文件系统缓存。 不要设置Xmx为JVM用于压缩对象指针(压缩oops)的截止值之上; 确切的截止值变化但接近32 GB。  详情见:https://www.elastic.co/guide/en/elasticsearch/reference/6.5/heap-size.html\n问题解决: 根据Xmx不超过物理RAM的50%上面的jvm优化建议。后面将Xms和Xmx都设置成了3G。然后先停掉skywalking(由于skywalking中会缓存部分数据,如果直接先停ES,会报索引找不到的类似异常,这个大部分skywalking用户应该有遇到过),清空skywalking缓存目录下的内容,如:\n在重启elasticsearch,接着启动skywalking后页面终于恢复了\n结语 整个问题排查到解决大概花了半天时间,幸好一点也不影响线上应用的使用,这个要得益于skywalking的设计,不然就是大灾难了。然后要感谢下Arthas的技术团队,写了这么好用的一款产品并且开源了,如果没有Arthas,这个问题真的不好定位,甚至一度想到了换掉elasticsearch,采用mysql来解决索引id过长的问题。Arthas真的是线上找问题的利器,博主在Arthas刚面世的时候就关注了,并且一直在公司推广使用,在这里在硬推一波。\n作者简介: 陈凯玲,2016年5月加入凯京科技。曾任职高级研发和项目经理,现任凯京科技研发中心架构\u0026amp;运维部负责人。pmp项目管理认证,阿里云MVP。热爱开源,先后开源过多个热门项目。热爱分享技术点滴,独立博客KL博客(http://www.kailing.pub)博主。\n","excerpt":"前言 首先描述下问题的背景,博主有个习惯,每天上下班的时候看下skywalking的trace页面的error情况。但是某天突然发现生产环境skywalking页面没有任何数据了,页面也没有显示任何的 …","ref":"/zh/2019-03-01-skywalking-troubleshoot/","title":"SkyWalking线上问题排查定位"},{"body":" 作者:王振飞, 写于:2019-02-24 说明:此文是个人所写,版本归属作者,代表个人观点,仅供参考,不代表skywalking官方观点。 说明:本次对比基于skywalking-6.0.0-GA和Pinpoint-1.8.2(截止2019-02-19最新版本)。另外,我们这次技术选型直接否定了Zipkin,其最大原因是它对代码有侵入性,CAT也是一样。这是我们所完全无法接受的。\n 这应该是目前最优秀的两款开源APM产品了,而且两款产品都通过字节码注入的方式,实现了对代码完全无任何侵入,他们的对比信息如下:\nOAP说明: skywalking6.x才有OAP这个概念,skywalking5.x叫collector。\n接下来,对每个PK项进行深入分析和对比。更多精彩和首发内容请关注公众号:【阿飞的博客】。\n社区比较\n这一点上面skywalking肯定完胜。一方面,skywalking已经进入apache孵化,社区相当活跃。而且项目发起人是中国人,我们能够进入官方群(Apache SkyWalking交流群:392443393)和项目发起人吴晟零距离沟通,很多问题能第一时间得到大家的帮助(玩过开源的都知道,这个价值有多大)。 而Pinpoint是韩国人开发的,免不了有沟通障碍。至于github上最近一年的commit频率,skywalking和Pinpoint旗鼓相当,都是接近20的水平: 所以,社区方面,skywalking更胜一筹。\n支持语言比较 Pinpoint只支持Java和PHP,而skywalking支持5种语言:Java, C#, PHP, Node.js, Go。如果公司的服务涉及到多个开发语言,那么skywalking会是你更好的选择。并且,如果你要实现自己的探针(比如python语言),skywalking的二次开发成本也比Pinpoint更低。\n 说明:Github上有开发者为Pinpoint贡献了对Node.js的支持,请戳链接:https://github.com/peaksnail/pinpoint-node-agent。但是已经停止维护,几年没更新了!\n 所以,支持语言方面,skywalking更胜一筹。\n协议比较 SkyWalking支持gRPC和http,不过建议使用gRPC,skywalking6.x版本已经不提供http方式(但是还会保留接收5.x的数据),以后会考虑删除。 而Pinpoint使用的是thrift协议。 协议本身没有谁好谁坏。\n存储比较(重要) 笔者认为,存储是skywalking和Pinpoint最大的差异所在,因为底层存储决定了上层功能。\nPinpoint只支持HBase,且扩展代价较大。这就意味着,如果选择Pinpoint,还要有能力hold住一套HBase集群(daocloud从Pinpoint切换到skywalking就是因为HBase的维护代价有点大)。在这方面,skywalking支持的存储就多很多,这样的话,技术选型时可以根据团队技术特点选择合适的存储,而且还可以自行扩展(不过生产环境上应该大部分是以es存储为主)。\nPinpoint只支持HBase的另一个缺陷就是,HBase本身查询能力有限(HBase只能支持三种方式查询:RowKey精确查找,SCAN范围查找,全表扫描)限制了Pinpoint的查询能力,所以其支持的查询一定是在时间的基础上(Pinpoint通过鼠标圈定一个时间范围后查看这个范围内的Trace信息)。而skywalking可以多个维度任意组合查询,例如:时间范围,服务名,Trace状态,请求路径,TraceId等。\n另外,Pinpoint和skywalking都支持TTL,即历史数据保留策略。skywalking是在OAP模块的application.yml中配置从而指定保留时间。而Pinpoint是通过HBase的ttl功能实现,通过Pinpoint提供的hbase脚本https://github.com/naver/pinpoint/blob/master/hbase/scripts/hbase-create.hbase可以看到:ApplicationTraceIndex配置了TTL =\u0026gt; 5184000,SqlMetaData_Ver2配合了TTL =\u0026gt; 15552000,单位是秒。\n 说明:es并不是完全碾压HBase,es和HBase没有绝对的好和坏。es强在检索能力,存储能力偏弱(千亿以下,es还是完全有能力hold的住的)。HBase强在存储能力,检索能力偏弱。如果搜集的日志量非常庞大,那么es存储就比较吃力。当然,没有蹩脚的中间件,只有蹩脚的程序员,无论是es还是HBase,调优才是最关键的。同样的,如果对检索能力有一定的要求,那么HBase肯定满足不了你。所以,又到了根据你的业务和需求决定的时刻了,trade-off真是无所不在。\n UI比较 Pinpoint的UI确实比skywalking稍微好些,尤其是服务的拓扑图展示。不过daocloud根据Pinpoint的风格为skywalking定制了一款UI。请戳链接:https://github.com/TinyAllen/rocketbot,项目介绍是:rocketbot: A UI for Skywalking。截图如下所示; 所以,只比较原生UI的话,Pinpoint更胜一筹。\n扩展性比较 Pinpoint好像设计之初就没有过多考虑扩展性,无论是底层的存储,还是自定义探针实现等。而skywalking核心设计目标之一就是Pluggable,即可插拔。\n以存储为例,pinpoint完全没有考虑扩展性,而skywalking如果要自定义实现一套存储,只需要定义一个类实现接口org.apache.skywalking.oap.server.library.module.ModuleProvider,然后实现一些DAO即可。至于Pinpoint则完全没有考虑过扩展底层存储。\n再以实现一个自己的探针为例(比如我要实现python语言的探针),Pinpoint选择thrift作为数据传输协议标准,而且为了节省数据传输大小,在传递常量的时候也尽量使用数据参考字典,传递一个数字而不是直接传递字符串等等。这些优化也增加了系统的复杂度:包括使用 Thrift 接口的难度、UDP 数据传输的问题、以及数据常量字典的注册问题等等。Pinpoint发展这么年才支持Java和PHP,可见一斑。而skywalking的数据接口就标准很多,并且支持OpenTracing协议,除了官方支持Java以外,C#、PHP和Node.js的支持都是由社区开发并维护。\n还有后面会提到的告警,skywalking的可扩展性也要远好于Pinpoint。\n最后,Pinpoint和skywalking都支持插件开发,Pinpoint插件开发参考:http://naver.github.io/pinpoint/1.8.2/plugindevguide.html。skywalking插件开发参考:https://github.com/apache/incubator-skywalking/blob/master/docs/en/guides/Java-Plugin-Development-Guide.md。\n所以,扩展性方面skywalking更胜一筹。\n告警比较 Pinpoint和skywalking都支持自定义告警规则。\n但是恼人的是,Pinpoint如果要配置告警规则,还需要安装MySQL(配置告警时的用户,用户组信息以及告警规则都持久化保存在MySQL中),这就导致Pinpoint的维护成本又高了一些,既要维护HBase又要维护MySQL。\nPinpoint支持的告警规则有:SLOW COUNT|RATE, ERROR COUNT|RATE, TOTAL COUNT, SLOW COUNT|RATE TO CALLEE, ERROR COUNT|RATE TO CALLEE, ERROR RATE TO CALLEE, HEAP USAGE RATE, JVM CPU USAGE RATE, DATASOURCE CONNECTION USAGE RATE。\nPinpoint每3分钟周期性检查过去5分钟的数据,如果有符合规则的告警,就会发送sms/email给用户组下的所有用户。需要说明的是,实现发送sms/email的逻辑需要自己实现,Pinpoint只提供了接口com.navercorp.pinpoint.web.alarm.AlarmMessageSender。并且Pinpoint发现告警持续时,会递增发送sms/email的时间间隔 3min -\u0026gt; 6min -\u0026gt; 12min -\u0026gt; 24min,防止sms/email狂刷。\n Pinpoint告警参考:http://naver.github.io/pinpoint/1.8.2/alarm.html\n skywalking配置告警不需要引入任何其他存储。skywalking在config/alarm-settings.xml中可以配置告警规则,告警规则支持自定义。\nskywalking支持的告警规则(配置项中的名称是indicator-name)有:service_resp_time, service_sla, service_cpm, service_p99, service_p95, service_p90, service_p75, service_p50, service_instance_sla, service_instance_resp_time, service_instance_cpm, endpoint_cpm, endpoint_avg, endpoint_sla, endpoint_p99, endpoint_p95, endpoint_p90, endpoint_p75, endpoint_p50。\nSkywalking通过HttpClient的方式远程调用在配置项webhooks中定义的告警通知服务地址。skywalking也支持silence-period配置,假设在TN这个时间点触发了告警,那么TN -\u0026gt; TN+period 这段时间内不会再重复发送该告警。\n skywalking告警参考:https://github.com/apache/incubator-skywalking/blob/master/docs/en/setup/backend/backend-alarm.md。目前只支持official_analysis.oal脚本中Service, Service Instance, Endpoint scope的metric,其他scope的metric需要等待后续扩展。\n Pinpoint和skywalking都支持常用的告警规则配置,但是skywalking采用webhooks的方式就灵活很多:短信通知,邮件通知,微信通知都是可以支持的。而Pinpoint只能sms/email通知,并且还需要引入MySQL存储,增加了整个系统复杂度。所以,告警方面,skywalking更胜一筹。\nJVM监控 skywalking支持监控:Heap, Non-Heap, GC(YGC和FGC)。 Pinpoint能够监控的指标主要有:Heap, Non-Heap, FGC, DirectBufferMemory, MappedBufferMemory,但是没有YGC。另外,Pinpoint还支持多个指标同一时间点查看的功能。如下图所示:\n所以,对JVM的监控方面,Pinpoint更胜一筹。\n服务监控 包括操作系统,和部署的服务实例的监控。 Pinpoint支持的维度有:CPU使用率,Open File Descriptor,数据源,活动线程数,RT,TPS。 skywalking支持的维度有:CPU使用率,SLA,RT,CPM(Call Per Minutes)。 所以,这方面两者旗鼓相当,没有明显的差距。\n跟踪粒度比较 Pinpoint在这方面做的非常好,跟踪粒度非常细。如下图所示,是Pinpoint对某个接口的trace信息: 而同一个接口skywalking的trace信息如下图所示:  备注: 此截图是skywalking加载了插件apm-spring-annotation-plugin-6.0.0-GA.jar(这个插件允许跟踪加了@Bean, @Service, @Component and @Repository注解的spring context中的bean的方法)。\n 通过对比发现,在跟踪粒度方面,Pinpoint更胜一筹。\n过滤追踪 Pinpoint和skywalking都可以实现,而且配置的表达式都是基于ant风格。 Pinpoint在Web UI上配置 filter wizard 即可自定义过滤追踪。 skywalking通过加载apm-trace-ignore-plugin插件就能自定义过滤跟踪,skywalking这种方式更灵活,比如一台高配服务器上有若干个服务,在共用的agent配置文件apm-trace-ignore-plugin.config中可以配置通用的过滤规则,然后通过-D的方式为每个服务配置个性化过滤。\n所以,在过滤追踪方面,skywalking更胜一筹。\n性能损耗 由于Pinpoint采集信息太过详细,所以,它对性能的损耗最大。而skywalking默认策略比较保守,对性能损耗很小。 有网友做过压力测试,对比如下:\n 图片来源于:https://juejin.im/post/5a7a9e0af265da4e914b46f1\n 所以,在性能损耗方面,skywalking更胜一筹。\n发布包比较 skywalking与时俱进,全系标配jar包,部署只需要执行start.sh脚本即可。而Pinpoint的collector和web还是war包,部署时依赖web容器(比如Tomcat)。拜托,都9012年了。\n所以,在发布包方面,skywalking更胜一筹。\n支持组件比较 skywalking和Pinpoint支持的中间件对比说明:\n WEB容器说明:Pinpoint支持几乎所有的WEB容器,包括开源和商业的。而wkywalking只支持开源的WEB容器,对2款大名鼎鼎的商业WEB容器Weblogic和Wevsphere都不支持。 RPC框架说明:对RPC框架的支持,skywalking简直秒杀Pinpoint。连小众的motan和sofarpc都支持。 MQ说明:skywalking比Pinpoint多支持一个国产的MQ中间件RocketMQ,毕竟RocketMQ在国内名气大,而在国外就一般了。加之skywalking也是国产的。 RDBMS/NoSQL说明:Pinpoint对RDBMS和NoSQL的支持都要略好于skywalking,RDBMS方面,skywalking不支持MSSQL和MariaDB。而NoSQL方面,skywalking不支持Cassandra和HBase。至于Pinpoint不支持的H2,完全不是问题,毕竟生产环境是肯定不会使用H2作为底层存储的。 Redis客户端说明:虽然skywalking和Pinpoint都支持Redis,但是skywalking支持三种流行的Redis客户端:Jedis,Redisson,Lettuce。而Pinpoint只支持Jedis和Lettuce,再一次,韩国人开发的Pinpoint无视了目前中国人开发的GitHub上star最多的Redis Client \u0026ndash; Redisson。 日志框架说明:Pinpoint居然不支持log4j2?但是已经有人开发了相关功能,详情请戳链接:log4j plugin support log4j2 or not? https://github.com/naver/pinpoint/issues/3055  通过对skywalking和Pinpoint支持中间件的对比我们发现,skywalking对国产软件的支持真的是全方位秒杀Pinpoint,比如小众化的RPC框架:motan(微博出品),sofarpc,阿里的RocketMQ,Redis客户端Redisson,以及分布式任务调度框架elastic-job等。当然也从另一方面反应国产开源软件在世界上的影响力还很小。\n这方面没有谁好谁坏,毕竟每个公司使用的技术栈不一样。如果你对RocketMQ有强需求,那么skywalking是你的最佳选择。如果你对es有强需求,那么skywalking也是你的最佳选择。如果HBase是你的强需求,那么Pinpoint就是你的最佳选择。如果MSSQL是你的强需求,那么Pinpoint也是你的最佳选择。总之,这里完全取决你的项目了。\n总结 经过前面对skywalking和Pinpoint全方位对比后我们发现,对于两款非常优秀的APM软件,有一种既生瑜何生亮的感觉。Pinpoint的优势在于:追踪数据粒度非常细、功能强大的用户界面,以及使用HBase作为存储带来的海量存储能力。而skywalking的优势在于:非常活跃的中文社区,支持多种语言的探针,对国产开源软件非常全面的支持,以及使用es作为底层存储带来的强大的检索能力,并且skywalking的扩展性以及定制化要更优于Pinpoint:\n 如果你有海量的日志存储需求,推荐Pinpoint。 如果你更看重二次开发的便捷性,推荐skywalking。  最后,参考上面的对比,结合你的需求,哪些不能妥协,哪些可以舍弃,从而更好的选择一款最适合你的APM软件。\n参考链接  参考[1]. https://github.com/apache/incubator-skywalking/blob/master/docs/en/setup/service-agent/java-agent/Supported-list.md 参考[2]. http://naver.github.io/pinpoint/1.8.2/main.html#supported-modules 参考[3]. https://juejin.im/post/5a7a9e0af265da4e914b46f1    如果觉得本文不错,请关注作者公众号:【阿飞的博客】,多谢!\n ","excerpt":"作者:王振飞, 写于:2019-02-24 说明:此文是个人所写,版本归属作者,代表个人观点,仅供参考,不代表skywalking官方观点。 说明:本次对比基于skywalking-6.0.0-GA …","ref":"/zh/2019-02-24-skywalking-pk-pinpoint/","title":"APM巅峰对决:SkyWalking P.K. Pinpoint"},{"body":"According to Apache Software Foundation branding policy all docker images of Apache Skywalking should be transferred from skywalking to apache with a prefix skywalking-. The transfer details are as follows\n skywalking/base -\u0026gt; apache/skywalking-base skywalking/oap -\u0026gt; apache/skywalking-oap-server skywalking/ui -\u0026gt; apache/skywalking-ui  All of repositories in skywalking will be removed after one week.\n","excerpt":"According to Apache Software Foundation branding policy all docker images of Apache Skywalking …","ref":"/events/transfer-docker-images-to-apache-official-repository/","title":"Transfer Docker Images to Apache Official Repository"},{"body":"6.0.0-GA release. Go to downloads page to find release tars. This is an important milestone version, we recommend all users upgrade to this version.\nKey updates\n Bug fixed Register bug fix, refactor and performance improvement New trace UI  ","excerpt":"6.0.0-GA release. Go to downloads page to find release tars. This is an important milestone version, …","ref":"/events/release-apache-skywalking-apm-6-0-0-ga/","title":"Release Apache SkyWalking APM 6.0.0-GA"},{"body":"Based on his contributions to the project, he has been accepted as SkyWalking PPMC. Welcome aboard.\n","excerpt":"Based on his contributions to the project, he has been accepted as SkyWalking PPMC. Welcome aboard.","ref":"/events/welcome-jian-tan-as-a-new-ppmc/","title":"Welcome Jian Tan as a new PPMC"},{"body":" Author: Hongtao Gao, Apache SkyWalking \u0026amp; ShardingShpere PMC GitHub, Twitter, Linkedin  Service mesh receiver was first introduced in Apache SkyWalking 6.0.0-beta. It is designed to provide a common entrance for receiving telemetry data from service mesh framework, for instance, Istio, Linkerd, Envoy etc. What’s the service mesh? According to Istio’s explain:\nThe term service mesh is used to describe the network of microservices that make up such applications and the interactions between them.\nAs a PMC member of Apache SkyWalking, I tested trace receiver and well understood the performance of collectors in trace scenario. I also would like to figure out the performance of service mesh receiver.\nDifferent between trace and service mesh Following chart presents a typical trace map:\nYou could find a variety of elements in it just like web service, local method, database, cache, MQ and so on. But service mesh only collect service network telemetry data that contains the entrance and exit data of a service for now(more elements will be imported soon, just like Database). A smaller quantity of data is sent to the service mesh receiver than the trace.\nBut using sidecar is a little different.The client requesting “A” that will send a segment to service mesh receiver from “A”’s sidecar. If “A” depends on “B”, another segment will be sent from “A”’s sidecar. But for a trace system, only one segment is received by the collector. The sidecar model splits one segment into small segments, that will increase service mesh receiver network overhead.\nDeployment Architecture In this test, I will pick two different backend deployment. One is called mini unit, consist of one collector and one elasticsearch instance. Another is a standard production cluster, contains three collectors and three elasticsearch instances.\nMini unit is a suitable architecture for dev or test environment. It saves your time and VM resources, speeds up depolyment process.\nThe standard cluster provides good performance and HA for a production scenario. Though you will pay more money and take care of the cluster carefully, the reliability of the cluster will be a good reward to you.\nI pick 8 CPU and 16GB VM to set up the test environment. This test targets the performance of normal usage scenarios, so that choice is reasonable. The cluster is built on Google Kubernetes Engine(GKE), and every node links each other with a VPC network. For running collector is a CPU intensive task, the resource request of collector deployment should be 8 CPU, which means every collector instance occupy a VM node.\nTesting Process Receiving mesh fragments per second(MPS) depends on the following variables.\n Ingress query per second(QPS) The topology of a microservice cluster Service mesh mode(proxy or sidecar)  In this test, I use Bookinfo app as a demo cluster.\nSo every request will touch max 4 nodes. Plus picking the sidecar mode(every request will send two telemetry data), the MPS will be QPS * 4 *2.\nThere are also some important metrics that should be explained\n Client Query Latency: GraphQL API query response time heatmap. Client Mesh Sender: Send mesh segments per second. The total line represents total send amount and the error line is the total number of failed send. Mesh telemetry latency: service mesh receiver handling data heatmap. Mesh telemetry received: received mesh telemetry data per second.  Mini Unit You could find collector can process up to 25k data per second. The CPU usage is about 4 cores. Most of the query latency is less than 50ms. After login the VM on which collector instance running, I know that system load is reaching the limit(max is 8).\nAccording to the previous formula, a single collector instance could process 3k QPS of Bookinfo traffic.\nStandard Cluster Compare to the mini-unit, cluster’s throughput increases linearly. Three instances provide total 80k per second processing power. Query latency increases slightly, but it’s also very small(less than 500ms). I also checked every collector instance system load that all reached the limit. 10k QPS of BookInfo telemetry data could be processed by the cluster.\nConclusion Let’s wrap them up. There are some important things you could get from this test.\n QPS varies by the there variables. The test results in this blog are not important. The user should pick property value according to his system. Collector cluster’s processing power could scale out. The collector is CPU intensive application. So you should provide sufficient CPU resource to it.  This blog gives people a common method to evaluate the throughput of Service Mesh Receiver. Users could use this to design their Apache Skywalking backend deployment architecture.\n","excerpt":"Author: Hongtao Gao, Apache SkyWalking \u0026amp; ShardingShpere PMC GitHub, Twitter, Linkedin  Service …","ref":"/blog/2019-01-25-mesh-loadtest/","title":"SkyWalking performance in Service Mesh scenario"},{"body":"","excerpt":"","ref":"/zh_tags/development/","title":"Development"},{"body":"ps:本文仅写给菜鸟,以及不知道如何远程调试的程序员,并且仅仅适用skywalking的远程调试\n概述 远程调试的目的是为了解决代码或者说程序包部署在服务器上运行,只能通过log来查看问题,以及不能跟在本地IDE运行debug那样查找问题,观看程序运行流程\u0026hellip; 想想当你的程序运行在服务器上,你在本地的IDE随时debug,是不是很爽的感觉。\n好了不废话,切入正题。\n环境篇 IDE:推荐 IntelliJ IDEA\n开发语言: 本文仅限于java,其他语言请自行询问google爸爸或者baidu娘娘\n源代码:自行从github下载,并且确保你运行的skywalking包也源代码的一致,(也就是说你自己从源代码编译打包运行,虽然不一样也可以调试,但是你想想你在本地开发,更改完代码,没有重新运行,debug出现的诡异情况)\n场景篇 假定有如下三台机器\n   IP 用途 备注     10.193.78.1 oap-server skywalking 的oap服务(或者说collector所在的服务器)   10.193.78.2 agent skywalking agent运行所在的服务器   10.193.78.0 IDE 你自己装IDE也就是IntelliJ IDEA的机器    以上环境,场景请自行安装好,并确认正常运行。本文不在赘述\n废话终于说完了\n操作篇 首要条件,下载源码后,先用maven 打包编译。然后使用Idea打开源码的父目录,整体结构大致如下图 1 :agent调试 1)Idea 配置部分 点击Edit Configurations 在弹出窗口中依次找到(红色线框的部分)并点击 打开的界面如下 修改Name值,自己随意,好记即可 然后Host输入10.193.78.2 Port默认或者其他的,重要的是这个端口在10.193.78.2上没有被占用\n然后找到Use module classpath 选择 apm-agent 最终的结果如下: 注意选择目标agent运行的jdk版本,很重要\n然后点击Apply,并找到如下内容,并且复制待用 2)agent配置部分 找到agent配置的脚本,并打开,找到配置agent的地方, 就这个地方,在这个后边加上刚才复制的内容 最终的结果如下 提供一个我配置的weblogic的配置(仅供参考) 然后重启应用(agent)\n3)调试 回到Idea中找到这个地方,并点击debug按钮,你没看错,就是红色圈住的地方 然后控制台如果出现以下字样: 那么恭喜你,可以愉快的加断点调试了。 ps:需要注意的是agent的、 service instance的注册可能不能那么愉快的调试。因为这个注册比较快,而且是在agent启动的时候就发生的, 而远程调试也需要agent打开后才可以调试,所以,如果你手快当我没说这句话。\n2 :oap-server的调试(也就是collector的调试) 具体过程不在赘述,和上一步的agent调试大同小异,不同的是 Use module classpath需要选择oap-server\n","excerpt":"ps:本文仅写给菜鸟,以及不知道如何远程调试的程序员,并且仅仅适用skywalking的远程调试\n概述 远程调试的目的是为了解决代码或者说程序包部署在服务器上运行,只能通过log来查看问题,以及不能跟 …","ref":"/zh/2019-01-24-skywalking-remote-debug/","title":"SkyWalking的远程调试"},{"body":"引言 《SkyWalking Java 插件贡献实践》:本文将基于SkyWalking 6.0.0-GA-SNAPSHOT版本,以编写Redis客户端Lettuce的SkyWalking Java Agent 插件为例,与大家分享我贡献PR的过程,希望对大家了解SkyWalking Java Agent插件有所帮助。\n基础概念 OpenTracing和SkyWalking链路模块几个很重要的语义概念。\n  Span:可理解为一次方法调用,一个程序块的调用,或一次RPC/数据库访问。只要是一个具有完整时间周期的程序访问,都可以被认为是一个span。SkyWalking Span对象中的重要属性\n   属性 名称 备注     component 组件 插件的组件名称,如:Lettuce,详见:ComponentsDefine.Class。   tag 标签 k-v结构,关键标签,key详见:Tags.Class。   peer 对端资源 用于拓扑图,若DB组件,需记录集群信息。   operationName 操作名称 若span=0,operationName将会搜索的下拉列表。   layer 显示 在链路页显示,详见SpanLayer.Class。      Trace:调用链,通过归属于其的Span来隐性的定义。一条Trace可被认为是一个由多个Span组成的有向无环图(DAG图),在SkyWalking链路模块你可以看到,Trace又由多个归属于其的trace segment组成。\n  Trace segment:Segment是SkyWalking中的一个概念,它应该包括单个OS进程中每个请求的所有范围,通常是基于语言的单线程。由多个归属于本线程操作的Span组成。\n  核心API 跨进程ContextCarrier核心API  为了实现分布式跟踪,需要绑定跨进程的跟踪,并且应该传播上下文 整个过程。 这就是ContextCarrier的职责。 以下是实现有关跨进程传播的步骤:  在客户端,创建一个新的空的ContextCarrier,将ContextCarrier所有信息放到HTTP heads、Dubbo attachments 或者Kafka messages。 通过服务调用,将ContextCarrier传递到服务端。 在服务端,在对应组件的heads、attachments或messages获取ContextCarrier所有消息。将服务端和客户端的链路信息绑定。    跨线程ContextSnapshot核心API  除了跨进程,跨线程也是需要支持的,例如异步线程(内存中的消息队列)和批处理在Java中很常见,跨进程和跨线程十分相似,因为都是需要传播 上下文。 唯一的区别是,不需要跨线程序列化。 以下是实现有关跨线程传播的步骤:  使用ContextManager#capture获取ContextSnapshot对象。 让子线程以任何方式,通过方法参数或由现有参数携带来访问ContextSnapshot。 在子线程中使用ContextManager#continued。    详尽的核心API相关知识,可点击阅读 《插件开发指南-中文版本》\n插件实践 Lettuce操作redis代码 @PostMapping(\u0026#34;/ping\u0026#34;) public String ping(HttpServletRequest request) throws ExecutionException, InterruptedException { RedisClient redisClient = RedisClient.create(\u0026#34;redis://\u0026#34; + \u0026#34;127.0.0.1\u0026#34; + \u0026#34;:6379\u0026#34;); StatefulRedisConnection\u0026lt;String, String\u0026gt; connection0 = redisClient.connect(); RedisAsyncCommands\u0026lt;String, String\u0026gt; asyncCommands0 = connection0.async(); AsyncCommand\u0026lt;String, String, String\u0026gt; future = (AsyncCommand\u0026lt;String, String, String\u0026gt;)asyncCommands0.set(\u0026#34;key_a\u0026#34;, \u0026#34;value_a\u0026#34;); future.onComplete(s -\u0026gt; OkHttpClient.call(\u0026#34;http://skywalking.apache.org\u0026#34;)); future.get(); connection0.close(); redisClient.shutdown(); return \u0026#34;pong\u0026#34;; } 插件源码架构 Lettuce对Redis封装与Redisson Redisson 类似,目的均是实现简单易用,且无学习曲线的Java的Redis客户端。所以要是先对Redis操作的拦截,需要学习对应客户端的源码。\n设计插件 理解插件实现过程,找到最佳InterceptPoint位置是实现插件融入SkyWalking的核心所在。\n代码实现 PR的url:Support lettuce plugin\n实践中遇到的问题  多线程编程使用debug断点会将链路变成同步,建议使用run模式增加log,或者远程debug来解决。 多线程编程,需要使用跨线程ContextSnapshot核心API,否则链路会断裂。 CompleteableCommand.onComplete方法有时会同步执行,这个和内部机制有关,有时候不分离线程。 插件编译版本若为1.7+,需要将插件放到可选插件中。因为sniffer支持的版本是1.6。  插件兼容 为了插件得到插件最终的兼容兼容版本,我们需要使用docker对所有插件版本的测试,具体步骤如下:\n 编写测试用例:关于如何编写测试用例,请按照如何编写文档来实现。 提供自动测试用例。 如:Redisson插件testcase 确保本地几个流行的插件版本,在本地运行起来是和自己的预期是一致的。 在提供自动测试用例并在CI中递交测试后,插件提交者会批准您的插件。 最终得到完整的插件测试报告。  Pull Request 提交PR 提交PR的时候,需要简述自己对插件的设计,这样有助于与社区的贡献者讨论完成codereview。\n申请自动化测试 测试用例编写完成后,可以申请自动化测试,在自己的PR中会生成插件兼容版本的报告。\n插件文档 插件文档需要更新:Supported-list.md相关插件信息的支持。\n插件如果为可选插件需要在agent-optional-plugins可选插件文档中增加对应的描述。\n注释 Lettuce是一个完全无阻塞的Redis客户端,使用netty构建,提供反应,异步和同步数据访问。了解细节可点击阅读 lettuce.io;\nOpenTracing是一个跨编程语言的标准,了解细节可点击阅读 《OpenTracing语义标准》;\nspan:org.apache.skywalking.apm.agent.core.context.trace.AbstractSpan接口定义了所有Span实现需要完成的方法;\nRedisson是一个非常易用Java的Redis客户端, 它没有学习曲线,无需知道任何Redis命令即可开始使用它。了解细节可点击阅读 redisson.org;\n","excerpt":"引言 《SkyWalking Java 插件贡献实践》:本文将基于SkyWalking 6.0.0-GA-SNAPSHOT版本,以编写Redis客户端Lettuce的SkyWalking Java …","ref":"/zh/2019-01-21-agent-plugin-practice/","title":"SkyWalking Java 插件贡献实践"},{"body":"Jinlin Fu has contributed 4 new plugins, including gson, activemq, rabbitmq and canal, which made SkyWalking supporting all mainstream OSS MQ. Also provide several documents and bug fixes. The SkyWalking PPMC based on these, promote him as new committer. Welcome on board.\n","excerpt":"Jinlin Fu has contributed 4 new plugins, including gson, activemq, rabbitmq and canal, which made …","ref":"/events/welcome-jinlin-fu-as-new-committer/","title":"Welcome Jinlin Fu as new committer"},{"body":" 作者:赵瑞栋 原文地址  引言 微服务框架落地后,分布式部署架构带来的问题就会迅速凸显出来。服务之间的相互调用过程中,如果业务出现错误或者异常,如何快速定位问题?如何跟踪业务调用链路?如何分析解决业务瓶颈?\u0026hellip;本文我们来看看如何解决以上问题。\n一、SkyWalking初探 Skywalking 简介 Skywalking是一款国内开源的应用性能监控工具,支持对分布式系统的监控、跟踪和诊断。\n它提供了如下的主要功能特性: Skywalking 技术架构 SW总体可以分为四部分:\n1.Skywalking Agent:使用Javaagent做字节码植入,无侵入式的收集,并通过HTTP或者gRPC方式发送数据到Skywalking Collector。\nSkywalking Collector :链路数据收集器,对agent传过来的数据进行整合分析处理并落入相关的数据存储中。 Storage:Skywalking的存储,时间更迭,sw已经开发迭代到了6.x版本,在6.x版本中支持以ElasticSearch、Mysql、TiDB、H2、作为存储介质进行数据存储。 UI :Web可视化平台,用来展示落地的数据。  Skywalking Agent配置 通过了解配置,可以对一个组件功能有一个大致的了解。让我们一起看一下skywalking的相关配置。\n解压开skywalking的压缩包,在agent/config文件夹中可以看到agent的配置文件。\n从skywalking支持环境变量配置加载,在启动的时候优先读取环境变量中的相关配置。\n agent.namespace: 跨进程链路中的header,不同的namespace会导致跨进程的链路中断 agent.service_name:一个服务(项目)的唯一标识,这个字段决定了在sw的UI上的关于service的展示名称 agent.sample_n_per_3_secs: 客户端采样率,默认是-1代表全采样 agent.authentication: 与collector进行通信的安全认证,需要同collector中配置相同 agent.ignore_suffix: 忽略特定请求后缀的trace collecttor.backend_service: agent需要同collector进行数据传输的IP和端口 logging.level: agent记录日志级别  skywalking agent使用javaagent无侵入式的配合collector实现对分布式系统的追踪和相关数据的上下文传递。\nSkywalking Collector关键配置 Collector支持集群部署,zookeeper、kubernetes(如果你的应用是部署在容器中的)、consul(GO语言开发的服务发现工具)是sw可选的集群管理工具,结合大家具体的部署方式进行选择。详细配置大家可以去Skywalking官网下载介质包进行了解。\nCollector端口设置\n downsampling: 采样汇总统计维度,会分别按照分钟、【小时、天、月】(可选)来统计各项指标数据。 通过设置TTL相关配置项可以对数据进行自动清理。  Skywalking 在6.X中简化了配置。collector提供了gRPC和HTTP两种通信方式。\nUI使用rest http通信,agent在大多数场景下使用grpc方式通信,在语言不支持的情况下会使用http通信。\n关于绑定IP和端口需要注意的一点是,通过绑定IP,agent和collector必须配置对应ip才可以正常通信。\nCollector存储配置\n在application.yml中配置的storage模块配置中选择要使用的数据库类型,并填写相关的配置信息。\nCollector Receiver\nReceiver是Skywalking在6.x提出的新的概念,负责从被监控的系统中接受指标数据。用户完全可以参照OpenTracing规范来上传自定义的监控数据。Skywalking官方提供了service-mesh、istio、zipkin的相关能力。\n现在Skywalking支持服务端采样,配置项为sampleRate,比例采样,如果配置为5000则采样率就是50%。\n关于采样设置的一点注意事项\n关于服务采样配置的一点建议,如果Collector以集群方式部署,比如:Acollector和Bcollector,建议Acollector.sampleRate = Bcollector.sampleRate。如果采样率设置不相同可能会出现数据丢失问题。\n假设Agent端将所有数据发送到后端Collector处,A采样率设置为30%,B采样率为50%。\n假设有30%的数据,发送到A上,这些数据被全部正确接受并存储,极端情况(与期望的采样数据量相同)下,如果剩下20%待采样的数据发送到了B,这个时候一切都是正常的,如果这20%中有一部分数据被送到了A那么,这些数据将是被忽略的,由此就会造成数据丢失。\n二、业务调用链路监控 Service Topology监控 调用链路监控可以从两个角度去看待。我们先从整体上来认识一下我们所监控的系统。\n通过给服务添加探针并产生实际的调用之后,我们可以通过Skywalking的前端UI查看服务之间的调用关系。\n我们简单模拟一次服务之间的调用。新建两个服务,service-provider以及service-consumer,服务之间简单的通过Feign Client 来模拟远程调用。\n从图中可以看到:\n 有两个服务节点:provider \u0026amp; consumer 有一个数据库节点:localhost【mysql】 一个注册中心节点  consumer消费了provider提供出来的接口。\n一个系统的拓扑图让我们清晰的认识到系统之间的应用的依赖关系以及当前状态下的业务流转流程。细心的可能发现图示节点consumer上有一部分是红色的,红色是什么意思呢?\n红色代表当前流经consumer节点的请求有一断时间内是响应异常的。当节点全部变红的时候证明服务现阶段内就彻底不可用了。运维人员可以通过Topology迅速发现某一个服务潜在的问题,并进行下一步的排查并做到预防。\nSkywalking Trace监控 Skywalking通过业务调用监控进行依赖分析,提供给我们了服务之间的服务调用拓扑关系、以及针对每个endpoint的trace记录。\n我们在之前看到consumer节点服务中发生了错误,让我们一起来定位下错误是发生在了什么地方又是什么原因呢?\n在每一条trace的信息中都可以看到当前请求的时间、GloableId、以及请求被调用的时间。我们分别看一看正确的调用和异常的调用。\nTrace调用链路监控 图示展示的是一次正常的响应,这条响应总耗时19ms,它有4个span:\n span1 /getStore = 19ms 响应的总流转时间 span2 /demo2/stores = 14ms feign client 开始调用远程服务后的响应的总时间 span3 /stores = 14ms 接口服务响应总时间 span4 Mysql = 1ms 服务提供端查询数据库的时间  这里span2和span3的时间表现相同,其实是不同的,因为这里时间取了整。\n在每个Span中可以查看当前Span的相关属性。\n 组件类型: SpringMVC、Feign Span状态: false HttpMethod: GET Url: http://192.168.16.125:10002/demo2/stores  这是一次正常的请求调用Trace日志,可能我们并不关心正常的时候,毕竟一切正常不就是我们期待的么!\n我们再来看下,异常状态下我们的Trace以及Span又是什么样的呢。\n发生错误的调用链中Span中的is error标识变为true,并且在名为Logs的TAB中可以看到错误发生的具体原因。根据异常情况我们就可以轻松定位到影响业务的具体原因,从而快速定位问题,解决问题。\n通过Log我们看到连接被拒,那么可能是我们的网络出现了问题(可能性小,因为实际情况如果网络出现问题我们连这个trace都看不到了),也有可能是服务端配置问题无法正确建立连接。通过异常日志,我们迅速就找到了问题的关键。\n实际情况是,我把服务方停掉了,做了一次简单的模拟。可见,通过拓扑图示我们可以清晰的看到众多服务中哪个服务是出现了问题的,通过trace日志我们可以很快就定位到问题所在,在最短的时间内解决问题。\n三、服务性能指标监控 Skywalking还可以查看具体Service的性能指标,根据相关的性能指标可以分析系统的瓶颈所在并提出优化方案。\nSkywalking 性能监控 在服务调用拓扑图上点击相应的节点我们可以看到该服务的\n SLA: 服务可用性(主要是通过请求成功与失败次数来计算) CPM: 每分钟调用次数 Avg Response Time: 平均响应时间  从应用整体外部来看我们可以监测到应用在一定时间段内的\n 服务可用性指标SLA 每分钟平均响应数 平均响应时间 服务进程PID 服务所在物理机的IP、HostName、Operation System  Service JVM信息监控 还可以监控到Service运行时的CPU、堆内存、非堆内存使用率、以及GC情况。这些信息来源于JVM。注意这里的数据可不是机器本身的数据。\n四、服务告警 前文我们提到了通过查看拓扑图以及调用链路可以定位问题,可是运维人员又不可能一直盯着这些数据,那么我们就需要告警能力,在异常达到一定阈值的时候主动的提示我们去查看系统状态。\n在Sywalking 6.x版本中新增了对服务状态的告警能力。它通过webhook的方式让我们可以自定义我们告警信息的通知方式。诸如:邮件通知、微信通知、短信通知等。\nSkywalking 服务告警 先来看一下告警的规则配置。在alarm-settings.xml中可以配置告警规则,告警规则支持自定义。\n一份告警配置由以下几部分组成:\n service_resp_time_rule:告警规则名称 ***_rule (规则名称可以自定义但是必须以’_rule’结尾 indicator-name:指标数据名称: 定义参见http://t.cn/EGhfbmd op: 操作符: \u0026gt; , \u0026lt; , = 【当然你可以自己扩展开发其他的操作符】 threshold:目标值:指标数据的目标数据 如sample中的1000就是服务响应时间,配合上操作符就是大于1000ms的服务响应 period: 告警检查周期:多久检查一次当前的指标数据是否符合告警规则 counts: 达到告警阈值的次数 silence-period:忽略相同告警信息的周期 message:告警信息 webhooks:服务告警通知服务地址  Skywalking通过HttpClient的方式远程调用在配置项webhooks中定义的告警通知服务地址。\n了解了SW所传送的数据格式我们就可以对告警信息进行接收处理,实现我们需要的告警通知服务啦!\n我们将一个服务停掉,并将另外一个服务的某个对外暴露的接口让他休眠一定的时间。然后调用一定的次数观察服务的状态信息以及告警情况。\n总结 本文简单的通过skwaylking的配置来对skywlaking的功能进行一次初步的了解,对skwaylking新提出的概念以及新功能进行简单的诠释,方便大家了解和使用。通过使用APM工具,可以让我们方便的查看微服务架构中系统瓶颈以及性能问题等。\n精选提问 问1:想问问选型的时候用pinpoint还是SK好?\n答:选型问题\n 要结合具体的业务场景, 比如你的代码运行环境 是java、php、net还是什么。 pinpoint在安装部署上要比skywalking略微复杂 pinpoint和sw支持的组件列表是不同的。 https://github.com/apache/incubator-skywalking/blob/master/docs/en/setup/service-agent/java-agent/Supported-list.md你可以参照这里的支持列表对比下pinpoint的支持对象做一个简单对比。 sw经过测试在并发量较高的情况下比pinpoint的吞吐量更好一些。  问2:有没有指标统计,比如某个url 的top10 请求、响应最慢的10个请求?某个服务在整个链条中的耗时占比?\n答:1.sw自带有响应最慢的请求top10统计针对所有的endpoint的统计。 2.针对每个url的top10统计,sw本身没有做统计,数据都是现成的通过简单的检索就可以搜到你想要的结果。 3.没有具体的耗时占比,但是有具体总链路时间统计以及某个服务的耗时统计,至于占比自己算吧,可以看ppt中的调用链路监控的span时间解释。\n问3:能不能具体说一下在你们系统中的应用?\n答:EOS8LA版本中,我们整合sw对应用提供拓扑、调用链路、性能指标的监控、并在sw数据的基础上增加系统的维度。 当服务数很庞大的时候,整体的拓扑其实就是一张密密麻麻的蜘蛛网。我们可以通过系统来选择具体某个系统下的应用。 8LA中SW是5.0.0alpha版本,受限于sw功能,我们并没有提供告警能力,这在之后会是我们的考虑目标。\n问4:业务访问日志大概每天100G,kubernetes 环境中部署,使用稳定吗?\n答:监控数据没有长时间的存储必要,除非你有特定的需求。它有一定的时效性,你可以设置ttl自动清除过时信息。100g,es集群还是能轻松支撑的。\n问5:和pinpoint相比有什么优势吗?\n答:\n 部署方式、使用方式简单 功能特性支持的更多 高并发性能会更好一些  问6:skywalking的侵入式追踪功能方便进行单服务链的服务追踪。但是跨多台服务器多项目的整体服务链追踪是否有整体设计考虑?\n答:sw本身特性就是对分布式系统的追踪,他是无侵入式的。无关你的应用部署在多少台服务器上。\n问7:应用在加上代理之后性能会下降。请问您有什么解决方法吗?\n答:性能下降是在所难免的,但是据我了解,以及官方的测试,他的性能影响是很低的。这是sw的测试数据供你参考。 https://skywalkingtest.github.io/Agent-Benchmarks/README_zh.html。\n问8:有异构系统需求的话可以用sw吗?\n答:只要skywalking的探针支持的应该都是可以的。\n问9:sw对于商用的web中间件,如bes、tongweb、websphere、weblogic的支持如何?\n答:商业组件支持的比较少,因为涉及到相关license的问题,sw项目组需要获得他们的支持来进行数据上报,据我了解,支持不是很好。\n","excerpt":"作者:赵瑞栋 原文地址  引言 微服务框架落地后,分布式部署架构带来的问题就会迅速凸显出来。服务之间的相互调用过程中,如果业务出现错误或者异常,如何快速定位问题?如何跟踪业务调用链路?如何分析解决业务 …","ref":"/zh/2019-01-03-monitor-microservice/","title":"SkyWalking 微服务监控分析"},{"body":"","excerpt":"","ref":"/zh_tags/elasticsearch/","title":"ElasticSearch"},{"body":"SkyWalking 依赖 elasticsearch 集群,如果 elasticsearch 安装有 x-pack 插件的话,那么就会存在一个 Basic 认证,导致 skywalking 无法调用 elasticsearch, 解决方法是使用 nginx 做代理,让 nginx 来做这个 Basic 认证,那么这个问题就自然解决了。\n方法如下:\n 安装 nginx   yum install -y nginx\n 配置 nginx  server { listen 9200 default_server; server_name _; location / { proxy_set_header Host $host; proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_pass http://localhost:9200; #Basic字符串就是使用你的用户名(admin),密码(12345)编码后的值 #注意:在进行Basic加密的时候要使用如下格式如:admin:123456 注意中间有个冒号 proxy_set_header Authorization \u0026#34;Basic YWRtaW4gMTIzNDU2\u0026#34;; } } 验证   curl localhost:9200\n { \u0026#34;name\u0026#34; : \u0026#34;Yd0rCp9\u0026#34;, \u0026#34;cluster_name\u0026#34; : \u0026#34;es-cn-4590xv9md0009doky\u0026#34;, \u0026#34;cluster_uuid\u0026#34; : \u0026#34;jAPLrqY5R6KWWgHnGCWOAA\u0026#34;, \u0026#34;version\u0026#34; : { \u0026#34;number\u0026#34; : \u0026#34;6.3.2\u0026#34;, \u0026#34;build_flavor\u0026#34; : \u0026#34;default\u0026#34;, \u0026#34;build_type\u0026#34; : \u0026#34;tar\u0026#34;, \u0026#34;build_hash\u0026#34; : \u0026#34;053779d\u0026#34;, \u0026#34;build_date\u0026#34; : \u0026#34;2018-07-20T05:20:23.451332Z\u0026#34;, \u0026#34;build_snapshot\u0026#34; : false, \u0026#34;lucene_version\u0026#34; : \u0026#34;7.3.1\u0026#34;, \u0026#34;minimum_wire_compatibility_version\u0026#34; : \u0026#34;5.6.0\u0026#34;, \u0026#34;minimum_index_compatibility_version\u0026#34; : \u0026#34;5.0.0\u0026#34; }, \u0026#34;tagline\u0026#34; : \u0026#34;You Know, for Search\u0026#34; } 看到如上结果那么恭喜你成功了。\n","excerpt":"SkyWalking 依赖 elasticsearch 集群,如果 elasticsearch 安装有 x-pack 插件的话,那么就会存在一个 Basic 认证,导致 skywalking …","ref":"/zh/2019-01-02-skywalking-elasticsearch-basic/","title":"关于 ElasticSearch 因 basic 认证导致 SkyWalking 无法正常调用接口问题"},{"body":" 作者: Wu Sheng, tetrate, SkyWalking original creator GitHub, Twitter, Linkedin 翻译: jjlu521016  背景 在当前的微服务架构中分布式链路追踪是很有必要的一部分,但是对于一些用户来说如何去理解和使用分布式链路追踪的相关数据是不清楚的。 这个博客概述了典型的分布式跟踪用例,以及Skywalking的V6版本中新的可视化功能。我们希望新的用户通过这些示例来更好的理解。\n指标和拓扑图 跟踪数据支持两个众所周知的分析特性:指标和拓扑图\n指标: 每个service, service instance, endpoint的指标都是从跟踪中的入口span派生的。指标代表响应时间的性能。所以可以有一个平均响应时间,99%的响应时间,成功率等。它们按service, service instance, endpoint进行分解。\n拓扑图: 拓扑表示服务之间的链接,是分布式跟踪最有吸引力的特性。拓扑结构允许所有用户理解分布式服务关系和依赖关系,即使它们是不同的或复杂的。这一点很重要,因为它为所有相关方提供了一个单一的视图,无论他们是开发人员、设计者还是操作者。\n这里有一个拓扑图的例子包含了4个项目,包括kafka和两个外部依赖。\n-在skywalking的可选择UI0RocketBot的拓扑图-\nTrace 在分布式链路追踪系统中,我们花费大量资源(CPU、内存、磁盘和网络)来生成、传输和持久跟踪数据。让我们试着回答为什么要这样做?我们可以用跟踪数据回答哪些典型的诊断和系统性能问题?\nSkywalking v6包含两种追踪视图:\n   TreeMode: 第一次提供,帮助您更容易识别问题。    ListMode: 常规的时间线视图,通常也出现在其他跟踪系统中,如Zipkin。    发生错误 在trace视图,最简单的部分是定位错误,可能是由代码异常或网络故障引起的。通过span详情提供的细节,ListMode和TreeMode都能够找到错误 -ListMode 错误span-\n-TreeMode 错误span-\n慢span 一个高优先级的特性是识别跟踪中最慢的span。这将使用应用程序代理捕获的执行持续时间。在旧的ListMode跟踪视图中,由于嵌套,父span几乎总是包括子span的持续时间。换句话说,一个缓慢的span通常会导致它的父节点也变慢,在Skywalking 6中,我们提供了 最慢的前5个span 过滤器来帮助你您直接定位span。\n-最慢的前5个span-\n太多子span 在某些情况下,个别持续时间很快,但跟踪速度仍然很慢,如: -没有慢span的追踪-\n如果要了解根问题是否与太多操作相关,请使用子范围号的Top 5 of children span number,筛选器显示每个span的子级数量,突出显示前5个。 -13个数据库访问相关的span-\n在这个截图中,有一个包含13个子项的span,这些子项都是数据库访问。另外,当您看到跟踪的概述时,这个2000ms跟踪的数据库花费了1380ms。 -1380ms花费在数据库访问-\n在本例中,根本原因是数据库访问太多。这在其他场景中也很常见,比如太多的RPC或缓存访问。\n链路深度 跟踪深度也与延迟有关。像太多子span的场景一样,每个span延迟看起来不错,但整个链路追踪的过程很慢。 -链路深度-\n上图所示,最慢的span小鱼500ms,对于2000毫秒的跟踪来说,速度并不太慢。当您看到第一行时,有四种不同的颜色表示这个分布式跟踪中涉及的四个services。每一个都需要100~400ms,这四个都需要近2000ms,从这里我们知道这个缓慢的跟踪是由一个序列中的3个RPC造成的。\n结束语 分布式链路追踪和APM 工具帮助我们确定造成问题的根源,允许开发和操作团队进行相应的优化。我们希望您喜欢这一点,并且喜欢Apache Skywalking和我们的新链路追踪可视化界面。如果你喜欢的话,在github上面给我们加start来鼓励我们\nSkywakling 6计划在2019年的1月底完成release。您可以通过以下渠道联系项目团队成员\n 关注 skywalking推特 订阅邮件:dev@skywalking.apache.org。发送邮件到 dev-subscribe@kywalking.apache.org 来订阅. 加入Gitter聊天室  ","excerpt":"作者: Wu Sheng, tetrate, SkyWalking original creator GitHub, Twitter, Linkedin 翻译: jjlu521016  背景 在当前的 …","ref":"/zh/2019-01-02-understand-trace-trans2cn/","title":"更容易理解将要到来的分布式链路追踪 6.0GA (翻译)"},{"body":"Background Distributed tracing is a necessary part of modern microservices architecture, but how to understand or use distributed tracing data is unclear to some end users. This blog overviews typical distributed tracing use cases with new visualization features in SkyWalking v6. We hope new users will understand more through these examples.\nMetric and topology Trace data underpins in two well known analysis features: metric and topology\nMetric of each service, service instance, endpoint are derived from entry spans in trace. Metrics represent response time performance. So, you could have average response time, 99% response time, success rate, etc. These are broken down by service, service instance, endpoint.\nTopology represents links between services and is distributed tracing\u0026rsquo;s most attractive feature. Topologies allows all users to understand distributed service relationships and dependencies even when they are varied or complex. This is important as it brings a single view to all interested parties, regardless of if they are a developer, designer or operator.\nHere\u0026rsquo;s an example topology of 4 projects, including Kafka and two outside dependencies.\nTopology in SkyWalking optional UI, RocketBot\nTrace In a distributed tracing system, we spend a lot of resources(CPU, Memory, Disk and Network) to generate, transport and persistent trace data. Let\u0026rsquo;s try to answer why we do this? What are the typical diagnosis and system performance questions we can answer with trace data?\nSkyWalking v6 includes two trace views:\n TreeMode: The first time provided. Help you easier to identify issues. ListMode: Traditional view in time line, also usually seen in other tracing system, such as Zipkin.  Error occurred In the trace view, the easiest part is locating the error, possibly caused by a code exception or network fault. Both ListMode and TreeMode can identify errors, while the span detail screen provides details.\nListMode error span\nTreeMode error span\nSlow span A high priority feature is identifying the slowest spans in a trace. This uses execution duration captured by application agents. In the old ListMode trace view, parent span almost always includes the child span\u0026rsquo;s duration, due to nesting. In other words, a slow span usually causes its parent to also become slow. In SkyWalking 6, we provide Top 5 of slow span filter to help you locate the spans directly.\nTop 5 slow span\nThe above screenshot highlights the top 5 slow spans, excluding child span duration. Also, this shows all spans' execution time, which helps identify the slowest ones.\nToo many child spans In some cases, individual durations are quick, but the trace is still slow, like this one:\nTrace with no slow span\nTo understand if the root problem is related to too many operations, use Top 5 of children span number. This filter shows the amount of children each span has, highlighting the top 5.\n13 database accesses of a span\nIn this screenshot, there is a span with 13 children, which are all Database accesses. Also, when you see overview of trace, database cost 1380ms of this 2000ms trace.\n1380ms database accesses\nIn this example, the root cause is too many database accesses. This is also typical in other scenarios like too many RPCs or cache accesses.\nTrace depth Trace depth is also related latency. Like the too many child spans scenario, each span latency looks good, but the whole trace is slow.\nTrace depth\nHere, the slowest spans are less than 500ms, which are not too slow for a 2000ms trace. When you see the first line, there are four different colors representing four services involved in this distributed trace. Every one of them costs 100~400ms. For all four, there nearly 2000ms. From here, we know this slow trace is caused by 3 RPCs in a serial sequence.\nAt the end Distributed tracing and APM tools help users identify root causes, allowing development and operation teams to optimize accordingly. We hope you enjoyed this, and love Apache SkyWalking and our new trace visualization. If so, give us a star on GitHub to encourage us.\nSkyWalking 6 is scheduled to release at the end of January 2019. You can contact the project team through the following channels:\n Follow SkyWalking twitter Subscribe mailing list: dev@skywalking.apache.org . Send to dev-subscribe@kywalking.apache.org to subscribe the mail list. Join Gitter room.  ","excerpt":"Background Distributed tracing is a necessary part of modern microservices architecture, but how to …","ref":"/blog/2019-01-01-understand-trace/","title":"Understand distributed trace easier in the incoming 6-GA"},{"body":"6.0.0-beta release. Go to downloads page to find release tars.\nKey updates\n Bugs fixed, closed to GA New protocols provided, old still compatible. Spring 5 supported MySQL and TiDB as optional storage  ","excerpt":"6.0.0-beta release. Go to downloads page to find release tars.\nKey updates\n Bugs fixed, closed to GA …","ref":"/events/release-apache-skywalking-apm-6-0-0-beta/","title":"Release Apache SkyWalking APM 6.0.0-beta"},{"body":"Based on his contributions. Including created RocketBot as our secondary UI, new website and very cool trace view page in next release. he has been accepted as SkyWalking PPMC. Welcome aboard.\n","excerpt":"Based on his contributions. Including created RocketBot as our secondary UI, new website and very …","ref":"/events/welcome-yao-wang-as-a-new-ppmc/","title":"Welcome Yao Wang as a new PPMC"},{"body":"导读  SkyWalking 中 Java 探针是使用 JavaAgent 的两大字节码操作工具之一的 Byte Buddy(另外是 Javassist)实现的。项目还包含.Net core 和 Nodejs 自动探针,以及 Service Mesh Istio 的监控。总体上,SkyWalking 是一个多语言,多场景的适配,特别为微服务、云原生和基于容器架构设计的可观测性分析平台(Observability Analysis Platform)。 本文基于 SkyWalking 5.0.0-RC2 和 Byte Buddy 1.7.9 版本,会从以下几个章节,让大家掌握 SkyWalking Java 探针的使用,进而让 SkyWalking 在自己公司中的二次开发变得触手可及。  Byte Buddy 实现 JavaAgent 项目 迭代 JavaAgent 项目的方法论 SkyWalking agent 项目如何 Debug SkyWalking 插件开发实践   文章底部有 SkyWalking 和 Byte Buddy 相应的学习资源。  Byte Buddy 实现  首先如果你对 JavaAgent 还不是很了解可以先百度一下,或在公众号内看下《JavaAgent 原理与实践》简单入门下。 SpringMVC 分发请求的关键方法相信已经不用我在赘述了,那我们来编写 Byte Buddy JavaAgent 代码吧。  public class AgentMain { public static void premain(String agentOps, Instrumentation instrumentation) { new AgentBuilder.Default() .type(ElementMatchers.named(\u0026#34;org.springframework.web.servlet.DispatcherServlet\u0026#34;)) .transform((builder, type, classLoader, module) -\u0026gt; builder.method(ElementMatchers.named(\u0026#34;doDispatch\u0026#34;)) .intercept(MethodDelegation.to(DoDispatchInterceptor.class))) .installOn(instrumentation); } }  编写 DispatcherServlet doDispatch 拦截器代码(是不是跟 AOP 如出一辙)  public class DoDispatchInterceptor { @RuntimeType public static Object intercept(@Argument(0) HttpServletRequest request, @SuperCall Callable\u0026lt;?\u0026gt; callable) { final StringBuilder in = new StringBuilder(); if (request.getParameterMap() != null \u0026amp;\u0026amp; request.getParameterMap().size() \u0026gt; 0) { request.getParameterMap().keySet().forEach(key -\u0026gt; in.append(\u0026#34;key=\u0026#34; + key + \u0026#34;_value=\u0026#34; + request.getParameter(key) + \u0026#34;,\u0026#34;)); } long agentStart = System.currentTimeMillis(); try { return callable.call(); } catch (Exception e) { System.out.println(\u0026#34;Exception :\u0026#34; + e.getMessage()); return null; } finally { System.out.println(\u0026#34;path:\u0026#34; + request.getRequestURI() + \u0026#34; 入参:\u0026#34; + in + \u0026#34; 耗时:\u0026#34; + (System.currentTimeMillis() - agentStart)); } } }  resources/META-INF/MANIFEST.MF  Manifest-Version: 1.0 Premain-Class: com.z.test.agent.AgentMain Can-Redefine-Classes: true  pom.xml 文件  dependencies +net.bytebuddy.byte-buddy +javax.servlet.javax.servlet-api *scope=provided plugins +maven-jar-plugin *manifestFile=src/main/resources/META-INF/MANIFEST.MF +maven-shade-plugin *include:net.bytebuddy:byte-buddy:jar: +maven-compiler-plugin  小结:没几十行代码就完成了,通过 Byte Buddy 实现应用组件 SpringMVC 记录请求路径、入参、执行时间 JavaAgent 项目,是不是觉得自己很优秀。  持续迭代 JavaAgent  本章节主要介绍 JavaAgent 如何 Debug,以及持续集成的方法论。 首先我的 JavaAgent 项目目录结构如图所示: 应用项目是用几行代码实现的 SpringBootWeb 项目:  @SpringBootApplication(scanBasePackages = {\u0026#34;com\u0026#34;}) public class TestBootWeb { public static void main(String[] args) { SpringApplication.run(TestBootWeb.class, args); } @RestController public class ApiController { @PostMapping(\u0026#34;/ping\u0026#34;) public String ping(HttpServletRequest request) { return \u0026#34;pong\u0026#34;; } } }  下面是关键 JavaAgent 项目如何持续迭代与集成:  VM options增加:-JavaAgent:{$HOME}/Code/github/z_my_test/test-agent/target/test-agent-1.0-SNAPSHOT.jar=args Before launch 在Build之前增加: Working directory:{$HOME}/Code/github/incubator-skywalking Command line:-T 1C -pl test-agent -am clean package -Denforcer.skip=true -Dmaven.test.skip=true -Dmaven.compile.fork=true  小结:看到这里的将 JavaAgent 持续迭代集成方法,是不是瞬间觉得自己手心已经发痒起来,很想编写一个自己的 agent 项目了呢,等等还有一个好消息:test-demo 这 10 几行的代码实现的 Web 服务,居然有 5k 左右的类可以使用 agent 增强。 注意 mvn 编译加速的命令是 maven3 + 版本以上才支持的哈。  SkyWalking Debug  峰回路转,到了文章的主题《SkyWalking 之高级用法》的正文啦。首先,JavaAgent 项目想 Debug,还需要将 agent 代码与接入 agent 项目至少在同一个工作空间内,网上方法有很多,这里我推荐大家一个最简单的方法。File-\u0026gt;New-\u0026gt;Module from Exisiting Sources… 引入 skywalking-agent 源码即可 详细的 idea 编辑器配置: 优化 SkyWalking agent 编译时间,我的集成时间优化到 30 秒左右:  VM options增加:-JavaAgent:-JavaAgent:{$HOME}/Code/github/incubator-skywalking/skywalking-agent/skywalking-agent.jar:不要用dist里面的skywalking-agent.jar,具体原因大家可以看看源码:apm-sniffer/apm-agent/pom.xml中的maven插件的使用。 Before launch 在Build之前增加: Working directory:{$HOME}/Code/github/incubator-skywalking Command line:-T 1C -pl apm-sniffer/apm-sdk-plugin -amd clean package -Denforcer.skip=true -Dmaven.test.skip=true -Dmaven.compile.fork=true: 这里我针对插件包,因为紧接着下文要开发插件 另外根pom注释maven-checkstyle-plugin也可加速编译 kob 之 SkyWalking 插件编写  kob(贝壳分布式作业调度框架)是贝壳找房项目微服务集群中的基础组件,通过编写贝壳分布式作业调度框架的 SkyWalking 插件,可以实时收集作业调度任务的执行链路信息,从而及时得到基础组件的稳定性,了解细节可点击阅读《贝壳分布式调度框架简介》。想详细了解 SkyWalking 插件编写可在文章底部参考链接中,跳转至对应的官方资源,好话不多说,代码一把唆起来。 apm-sdk-plugin pom.xml 增加自己的插件 model  \u0026lt;artifactId\u0026gt;apm-sdk-plugin\u0026lt;/artifactId\u0026gt; \u0026lt;modules\u0026gt; \u0026lt;module\u0026gt;kob-plugin\u0026lt;/module\u0026gt; ... \u0026lt;modules\u0026gt;  resources.skywalking-plugin.def 增加自己的描述  kob=org.apache.skywalking.apm.plugin.kob.KobInstrumentation  在 SkyWalking 的项目中,通过继承 ClassInstanceMethodsEnhancePluginDefine 可以定义需要拦截的类和增强的方法,编写作业调度方法的 instrumentation  public class KobInstrumentation extends ClassInstanceMethodsEnhancePluginDefine { private static final String ENHANCE_CLASS = \u0026#34;com.ke.kob.client.spring.core.TaskDispatcher\u0026#34;; private static final String INTERCEPT_CLASS = \u0026#34;org.apache.skywalking.apm.plugin.kob.KobInterceptor\u0026#34;; @Override protected ClassMatch enhanceClass() { return NameMatch.byName(ENHANCE_CLASS); } @Override protected ConstructorInterceptPoint[] getConstructorsInterceptPoints() { return null; } @Override protected InstanceMethodsInterceptPoint[] getInstanceMethodsInterceptPoints() { return new InstanceMethodsInterceptPoint[] { new InstanceMethodsInterceptPoint() { @Override public ElementMatcher\u0026lt;MethodDescription\u0026gt; getMethodsMatcher() { return named(\u0026#34;dispatcher1\u0026#34;); } @Override public String getMethodsInterceptor() { return INTERCEPT_CLASS; } @Override public boolean isOverrideArgs() { return false; } } }; } }  通过实现 InstanceMethodsAroundInterceptor 后,定义 beforeMethod、afterMethod 和 handleMethodException 的实现方法,可以环绕增强指定目标方法,下面自定义 interceptor 实现 span 的跟踪(这里需要注意 SkyWalking 中 span 的生命周期,在 afterMethod 方法中结束 span)  public class KobInterceptor implements InstanceMethodsAroundInterceptor { @Override public void beforeMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, MethodInterceptResult result) throws Throwable { final ContextCarrier contextCarrier = new ContextCarrier(); com.ke.kob.client.spring.model.TaskContext context = (TaskContext) allArguments[0]; CarrierItem next = contextCarrier.items(); while (next.hasNext()) { next = next.next(); next.setHeadValue(JSON.toJSONString(context.getUserParam())); } AbstractSpan span = ContextManager.createEntrySpan(\u0026#34;client:\u0026#34;+allArguments[1]+\u0026#34;,task:\u0026#34;+context.getTaskKey(), contextCarrier); span.setComponent(ComponentsDefine.TRANSPORT_CLIENT); SpanLayer.asRPCFramework(span); } @Override public Object afterMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Object ret) throws Throwable { ContextManager.stopSpan(); return ret; } @Override public void handleMethodException(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Throwable t) { } }  实现效果,将操作名改成任务执行节点 + 任务执行方法,实现 kob 的 SkyWalking 的插件编写,加上报警体系,可以进一步增加公司基础组件的稳定性。  参考链接  Apache SkyWalking Byte Buddy(runtime code generation for the Java virtual machine)  ","excerpt":"导读  SkyWalking 中 Java 探针是使用 JavaAgent 的两大字节码操作工具之一的 Byte Buddy(另外是 Javassist)实现的。项目还包含.Net core …","ref":"/zh/2018-12-21-skywalking-apm-sniffer-beginning/","title":"SkyWalking apm-sniffer 原理学习与插件编写"},{"body":"搭建调试环境 阅读 SkyWalking 源码,从配置调试环境开始。\n一定一定一定不要干读代码,而是通过调试的方式。\n 01 通过 Skywalking-5.x 版本的源码构建并运行 👉:哔哩哔哩 | 腾讯视频 02 通过 Skywalking-6.x 版本的源码构建并运行 👉:哔哩哔哩 | 腾讯视频 03 Java 应用(探针)接入 Skywalking[6.x] 👉:哔哩哔哩 | 腾讯视频  SkyWalking 3.X 源码解析合集 虽然是基于 3.X 版本的源码解析,但是对于阅读 SkyWalking Java Agent 和插件部分,同样适用。\n对于 SkyWalking Collector 部分,可以作为一定的参考。\n 《SkyWalking 源码分析 —— 调试环境搭建》 《SkyWalking 源码分析 —— Agent 初始化》 《SkyWalking 源码分析 —— Agent 插件体系》 《SkyWalking 源码分析 —— Collector 初始化》 《SkyWalking 源码分析 —— Collector Cluster 集群管理》 《SkyWalking 源码分析 —— Collector Client Component 客户端组件》 《SkyWalking 源码分析 —— Collector Server Component 服务器组件》 《SkyWalking 源码分析 —— Collector Jetty Server Manager》 《SkyWalking 源码分析 —— Collector gRPC Server Manager》 《SkyWalking 源码分析 —— Collector Naming Server 命名服务》 《SkyWalking 源码分析 —— Collector Queue 队列组件》 《SkyWalking 源码分析 —— Collector Storage 存储组件》 《SkyWalking 源码分析 —— Collector Streaming Computing 流式处理(一)》 《SkyWalking 源码分析 —— Collector Streaming Computing 流式处理(二)》 《SkyWalking 源码分析 —— Collector Cache 缓存组件》 《SkyWalking 源码分析 —— Collector Remote 远程通信服务》 《SkyWalking 源码分析 —— DataCarrier 异步处理库》 《SkyWalking 源码分析 —— Agent Remote 远程通信服务》 《SkyWalking 源码分析 —— 应用于应用实例的注册》 《SkyWalking 源码分析 —— Agent DictionaryManager 字典管理》 《SkyWalking 源码分析 —— Agent 收集 Trace 数据》 《SkyWalking 源码分析 —— Agent 发送 Trace 数据》 《SkyWalking 源码分析 —— Collector 接收 Trace 数据》 《SkyWalking 源码分析 —— Collector 存储 Trace 数据》 《SkyWalking 源码分析 —— JVM 指标的收集与存储》 《SkyWalking 源码分析 —— 运维界面(一)之应用视角》 《SkyWalking 源码分析 —— 运维界面(二)之应用实例视角》 《SkyWalking 源码分析 —— 运维界面(三)之链路追踪视角》 《SkyWalking 源码分析 —— 运维界面(四)之操作视角》 《SkyWalking 源码分析 —— @Trace 注解想要追踪的任何方法》 《SkyWalking 源码分析 —— traceId 集成到日志组件》 《SkyWalking 源码分析 —— Agent 插件(一)之 Tomcat》 《SkyWalking 源码分析 —— Agent 插件(二)之 Dubbo》 《SkyWalking 源码分析 —— Agent 插件(三)之 SpringMVC》 《SkyWalking 源码分析 —— Agent 插件(四)之 MongoDB》  SkyWalking 6.X 源码解析合集  《SkyWalking 6.x 源码分析 —— 调试环境搭建》  ","excerpt":"搭建调试环境 阅读 SkyWalking 源码,从配置调试环境开始。\n一定一定一定不要干读代码,而是通过调试的方式。\n 01 通过 Skywalking-5.x 版本的源码构建并运行 👉:哔哩哔哩 | …","ref":"/zh/2018-12-21-skywalking-source-code-read/","title":"SkyWalking 源码解析合集"},{"body":"","excerpt":"","ref":"/zh_tags/source-code/","title":"Source Code"},{"body":"版本选择 我们采用的是 5.0.0-RC2 的版本,SkyWalking 的版本信息可以参考 https://github.com/apache/incubator-skywalking/blob/5.x/CHANGES.md\n那么为什么我们没有采用 5.1.0 版本呢,这是因为我们公司内部需要支持 es x-pack,但是在官方发布里面,没有支持 xpack 的版本。\n在 Apache SkyWalking 官方文档 https://github.com/CharlesMaster/incubator-skywalking/tree/master/docs/others/cn 中有提到,SkyWalking 5.x 仍受社区支持。\n对于用户计划从 5.x 升级到 6.x,您应该知道关于有一些概念的定义的变更。最重要的两个改变了的概念是:\n Application(在 5.x 中)更改为 Service(在 6.x 中),Application Instance 也更改为 Service Instance。 Service(在 5.x 中)更改为 Endpoint(在 6.x 中)。  图文详解 Apache SkyWalking 的监控界面由 Monitor 和 Trace 两者构成,Monitor 菜单又包括 Dashbord、Topology、Application、Service、Alarm 五个子菜单构成。本文就是围绕这些菜单分别逐一进行介绍。\nMonitor 当用户通过 SkyWalking 登陆界面使用用户名、密码登陆以后,就会默认进入到 SkyWalking 的 Monitor 下的 Dashboard 界面\nDashboard 下图就是用户登陆之后都会看到的关键 Dashboard 页面,在这个页面的下方的关键指标,图中都做了详细的解释。\n上图中 app 需要强调的是,52 个 app 并不代表 52 个应用,比如 paycenter 有两台 paycenter1 和 paycenter2 就算了 2 个 app,当然还有一些应用是 3 个以上的。在我们公司,paycenter1、paycenter2 这些运维都和我们跳板机管理平台上的名称设置的一样,约定大于配置,开发人员可以更加便捷的排查问题。\n 再次修正一下,关于 dashboard 页面的 app 数,语言类探针,是探针的 app_code 来决定的。比如我们公司的线上配置就是 agent.application_code=auth-center-1\n 上图中需要解释两个概念:\n cpm 代表每分钟请求次数 SLA=(TRANSACTION_CALLS- TRANSACTION_ERROR_CALLS ) * 10000 ) / TRANSACTION_CALLS  该页面主要支持四个跳转:\n一、在上图中,App 板块上的帮助选项是可以直接跳转到 Application 监控页面的。 二、 Service 板块上的帮助选项是可以直接跳转到 Service 监控页面的。\n三、 Slow Service 列表中的每一个慢服务点击以后都会进入到其专项的 Service 监控页面。\n四、 Application Throughput 列表中的每一个 Application 点击以后也都是可以进入到其专项的 Application 监控页面。\n 关于 Application 和 Service 的详细介绍我们后续会展开\n 在 Dashboard 的页面上部分,还有一个选择功能模块: 左侧部分可以定期 refresh Dashboard 的数据,右侧则可以调整整体的查询区间。\nTopology 点击 Monitor 菜单下的 Topology 你会看到下面这张拓扑图\n当然这张图太过于夸张了,如果接入 SkyWalking 的应用并不是很多,会如下图所示: 左侧的三个小按钮可以调整你的视图,支持拖拽。右侧可以输入你所关心的应用名。比如我们输入一个支付和订单两个应用,左侧的拓扑图会变得更加清晰:\n另外,上图中的绿色圆圈都是可以点击的,如果你点击以后,还会出现节点信息: Application 点击 Monitor 菜单下的 Application 你会看到下面这张图,这张图里你可以看到的东西都做了注解。\n这张图里有一个惊喜,就是如果你点开 More Server Details,你可以看到更多的信息\n是的,除了 Host、IPv4、Pid、OS 以外,你还可以看到 CPU、Heap、Non-Heap、GC(Young GC、Old GC)等详细监控信息。\nService 点击 Monitor 菜单下的 Service 你会看到下面这张图,这张图里你可以看到的同样都做了注解。 关于 Dependency Map 这张图我们再补充一下,鼠标悬停可以看到每个阶段的执行时间,这是 Service 下的功能 我们点开图中该图中 Top 20 Slow Traces 下面的被我马赛克掉的 trace 的按钮框,可以看到如下更加详细的信息:\n这些信息可以帮助我们知道每一个方法在哪个阶段那个具体实现耗时了多久。\n如上图所示,每一行基本都是可以打开的,每一行都包含了 Tags、Logs 等监控内容\nAlarm 点击 Monitor 菜单下的 Alarm 你会看到告警菜单。目前 5.X 版本的还没有接入邮件、短信等告警方式,后续 6 支持 webhook,用户可以自己去接短信和邮件。\n告警内容中你可以看到 Applicaion、Server 和 Service 三个层面的告警内容\nTrace Trace 是一个非常实用的功能,用户可以根据精确的 TraceId 去查找\n也可以设定时间段去查找\n我在写使用手册时候,非常巧的是,看到了上图三起异常,于是我们往下拉列表看到了具体的数据\n点击进去,我们可以看到具体的失败原因 当然用户也可以直接将 Trace State 调整为 Error 级别进行查询\n再回顾一遍 一、首先我们进入首页:\n二、点击一下首页的 Slow Service 的 projectC,可以看到如下信息:\n三、如果点击首页的 Appliation Throughput 中的 projectD,可以看到如下信息:\n四、继续点进去右下角的这个 slow service 里的 Consumer,我们可以看到下图:\n参考资料  https://twitter.com/AsfSkyWalking/status/1013616673218179072 https://twitter.com/AsfSkyWalking/status/1013617100143800320  ","excerpt":"版本选择 我们采用的是 5.0.0-RC2 的版本,SkyWalking …","ref":"/zh/2018-12-18-apache-skywalking-5-0-userguide/","title":"Apache SkyWalking 5.0 中文版图文详解使用手册"},{"body":"","excerpt":"","ref":"/zh_tags/web-ui/","title":"Web UI"},{"body":"Based on his contributions to the project, he has been accepted as SkyWalking committer. Welcome aboard.\n","excerpt":"Based on his contributions to the project, he has been accepted as SkyWalking committer. Welcome …","ref":"/events/welcome-yixiong-cao-as-a-new-committer/","title":"Welcome Yixiong Cao as a new committer"},{"body":"Original link, Tetrate.io blog\nContext The integration of SkyWalking and Istio Service Mesh yields an essential open-source tool for resolving the chaos created by the proliferation of siloed, cloud-based services.\nApache SkyWalking is an open, modern performance management tool for distributed services, designed especially for microservices, cloud native and container-based (Docker, K8s, Mesos) architectures. We at Tetrate believe it is going to be an important project for understanding the performance of microservices. The recently released v6 integrates with Istio Service Mesh and focuses on metrics and tracing. It natively understands the most common language runtimes (Java, .Net, and NodeJS). With its new core code, SkyWalking v6 also supports Istrio telemetry data formats, providing consistent analysis, persistence, and visualization.\nSkyWalking has evolved into an Observability Analysis Platform that enables observation and monitoring of hundreds of services all at once. It promises solutions for some of the trickiest problems faced by system administrators using complex arrays of abundant services: Identifying why and where a request is slow, distinguishing normal from deviant system performance, comparing apples-to-apples metrics across apps regardless of programming language, and attaining a complete and meaningful view of performance.\nSkyWalking History Launched in China by Wu Sheng in 2015, SkyWalking started as just a distributed tracing system, like Zipkin, but with auto instrumentation from a Java agent. This enabled JVM users to see distributed traces without any change to their source code. In the last two years, it has been used for research and production by more than 50 companies. With its expanded capabilities, we expect to see it adopted more globally.\nWhat\u0026rsquo;s new Service Mesh Integration Istio has picked up a lot of steam as the framework of choice for distributed services. Based on all the interest in the Istio project, and community feedback, some SkyWalking (P)PMC members decided to integrate with Istio Service Mesh to move SkyWalking to a higher level.\nSo now you can use Skywalking to get metrics and understand the topology of your applications. This works not just for Java, .NET and Node using our language agents, but also for microservices running under the Istio service mesh. You can get a full topology of both kinds of applications.\nObservability analysis platform With its roots in tracing, SkyWalking is now transitioning into an open-standards based Observability Analysis Platform, which means the following:\n It can accept different kinds and formats of telemetry data from mesh like Istio telemetry. Its agents support various popular software technologies and frameworks like Tomcat, Spring, Kafka. The whole supported framework list is here. It can accept data from other compliant sources like Zipkin-formatted traces reported from Zipkin, Jaeger, or OpenCensus clients.  SkyWalking is logically split into four parts: Probes, Platform Backend, Storage and UI:\nThere are two kinds of probes:\n Language agents or SDKs following SkyWalking across-thread propagation formats and trace formats, run in the user’s application process. The Istio mixer adaptor, which collects telemetry from the Service Mesh.  The platform backend provides gRPC and RESTful HTTP endpoints for all SkyWalking-supported trace and metric telemetry data. For example, you can stream these metrics into an analysis system.\nStorage supports multiple implementations such as ElasticSearch, H2 (alpha), MySQL, and Apache ShardingSphere for MySQL Cluster. TiDB will be supported in next release.\nSkyWalking’s built-in UI with a GraphQL endpoint for data allows intuitive, customizable integration.\nSome examples of SkyWalking’s UI:\n Observe a Spring app using the SkyWalking JVM-agent   Observe on Istio without any agent, no matter what langugage the service is written in   See fine-grained metrics like request/Call per Minute, P99/95/90/75/50 latency, avg response time, heatmap   Service dependencies and metrics  Service Focused At Tetrate, we are focused on discovery, reliability, and security of your running services. This is why we are embracing Skywalking, which makes service performance observable.\nBehind this admittedly cool UI, the aggregation logic is very easy to understand, making it easy to customize SkyWalking in its Observability Analysis Language (OAL) script.\nWe’ll post more about OAL for developers looking to customize SkyWalking, and you can read the official OAL introduction document.\nScripts are based on three core concepts:\n  Service represents a group of workloads that provide the same behaviours for incoming requests. You can define the service name whether you are using instrument agents or SDKs. Otherwise, SkyWalking uses the name you defined in the underlying platform, such as Istio.\n  Service Instance Each workload in the Service group is called an instance. Like Pods in Kubernetes, it doesn\u0026rsquo;t need to be a single OS process. If you are using an instrument agent, an instance does map to one OS process.\n  Endpoint is a path in a certain service that handles incoming requests, such as HTTP paths or a gRPC service + method. Mesh telemetry and trace data are formatted as source objects (aka scope). These are the input for the aggregation, with the script describing how to aggregate, including input, conditions, and the resulting metric name.\n  Core Features The other core features in SkyWalking v6 are:\n Service, service instance, endpoint metrics analysis. Consistent visualization in Service Mesh and no mesh. Topology discovery, Service dependency analysis. Distributed tracing. Slow services and endpoints detected. Alarms.  Of course, SkyWalking has some more upgrades from v5, such as:\n ElasticSearch 6 as storage is supported. H2 storage implementor is back. Kubernetes cluster management is provided. You don’t need Zookeeper to keep the backend running in cluster mode. Totally new alarm core. Easier configuration. More cloud native style. MySQL will be supported in the next release.  Please: Test and Provide Feedback! We would love everyone to try to test our new version. You can find everything you need in our Apache repository,read the document for further details. You can contact the project team through the following channels:\n Submit an issue on GitHub repository Mailing list: dev@skywalking.apache.org . Send to dev-subscribe@kywalking.apache.org to subscribe the mail list. Gitter Project twitter  Oh, and one last thing! If you like our project, don\u0026rsquo;t forget to give us a star on GitHub.\n","excerpt":"Original link, Tetrate.io blog\nContext The integration of SkyWalking and Istio Service Mesh yields …","ref":"/blog/2018-12-12-skywalking-service-mesh-ready/","title":"SkyWalking v6 is Service Mesh ready"},{"body":"Based on his contributions to the project, he has been accepted as SkyWalking committer. Welcome aboard.\n","excerpt":"Based on his contributions to the project, he has been accepted as SkyWalking committer. Welcome …","ref":"/events/welcome-jian-tan-as-a-new-committer/","title":"Welcome Jian Tan as a new committer"},{"body":"APM consistently compatible in language agent(Java, .Net, NodeJS), 3rd party format(Zipkin) and service mesh telemetry(Istio). Go to downloads page to find release tars.\n","excerpt":"APM consistently compatible in language agent(Java, .Net, NodeJS), 3rd party format(Zipkin) and …","ref":"/events/release-apache-skywalking-6-0-0-alpha/","title":"Release Apache SkyWalking 6.0.0-alpha"},{"body":"A stable version of 5.x release. Go to downloads page to find release tars.\n","excerpt":"A stable version of 5.x release. Go to downloads page to find release tars.","ref":"/events/release-apache-skywalking-5-0-0-ga/","title":"Release Apache SkyWalking 5.0.0-GA"},{"body":"5.0.0-RC2 release. Go to downloads page to find release tars.\n","excerpt":"5.0.0-RC2 release. Go to downloads page to find release tars.","ref":"/events/release-apache-skywalking-5-0-0-rc2/","title":"Release Apache SkyWalking 5.0.0-RC2"},{"body":"5.0.0-beta2 release. Go to downloads page to find release tars.\n","excerpt":"5.0.0-beta2 release. Go to downloads page to find release tars.","ref":"/events/release-apache-skywalking-5-0-0-beta2/","title":"Release Apache SkyWalking 5.0.0-beta2"},{"body":"Translated by Sheng Wu.\nIn many big systems, distributed and especially microservice architectures become more and more popular. With the increase of modules and services, one incoming request could cross dozens of service. How to pinpoint the issues of the online system, and the bottleneck of the whole distributed system? This became a very important problem, which must be resolved.\nTo resolve the problems in distributed system, Google published the paper “Dapper, a Large-Scale Distributed Systems Tracing Infrastructure”, which mentioned the designs and ideas of building a distributed system. Many projects are inspired by it, created in the last 10 years. At 2015, Apache SkyWalking was created by Wu Sheng as a simple distributed system at first and open source. Through almost 3 years developments, at 2018, according to its 5.0.0-alpha/beta releases, it had already became a cool open source APM system for cloud native, container based system.\nAt the early of this year, I was trying to build the Butterfly open source APM in .NET Core, and that is when I met the Apache SkyWalking team and its creator. I decided to join them, and cooperate with them, to provide .NET Core agent native compatible with SkyWalking. At April, I released the first version .NET core agent 0.1.0. After several weeks interation, we released 0.2.0, for increasing the stability and adding HttpClient, Database driver supports.\nBefore we used .NET Core agent, we need to deploy SkyWalking collector, UI and ElasticSearch 5.x. You can download the release versions at here: http://skywalking.apache.org/downloads/ and follow the docs (Deploy-backend-in-standalone-mode, Deploy-backend-in-cluster-mode) to setup the backend.\nAt here, I are giving a quick start to represent, how to monitor a demo distributed .NET Core applications. I can say, that is easy.\n git clone https://github.com/OpenSkywalking/skywalking-netcore.git\n  cd skywalking-netcore\n  dotnet restore\n  dotnet run -p sample/SkyWalking.Sample.Backend dotnet run -p sample/SkyWalking.Sample.Frontend\n Now you can open http://localhost:5001/api/values to access the demo application. Then you can open SkyWalking WebUI http://localhost:8080\n  Overview of the whole distributed system   Topology of distributed system   Application view   Trace query   Span’s tags, logs and related traces   GitHub  Website: http://skywalking.apache.org/ SkyWalking Github Repo: https://github.com/apache/incubator-skywalking SkyWalking-NetCore Github Repo: https://github.com/OpenSkywalking/skywalking-netcore  ","excerpt":"Translated by Sheng Wu.\nIn many big systems, distributed and especially microservice architectures …","ref":"/blog/2018-05-24-skywalking-net/","title":"Apache SkyWalking provides open source APM and distributed tracing in .NET Core field"},{"body":"在大型网站系统设计中,随着分布式架构,特别是微服务架构的流行,我们将系统解耦成更小的单元,通过不断的添加新的、小的模块或者重用已经有的模块来构建复杂的系统。随着模块的不断增多,一次请求可能会涉及到十几个甚至几十个服务的协同处理,那么如何准确快速的定位到线上故障和性能瓶颈,便成为我们不得不面对的棘手问题。\n为解决分布式架构中复杂的服务定位和性能问题,Google 在论文《Dapper, a Large-Scale Distributed Systems Tracing Infrastructure》中提出了分布式跟踪系统的设计和构建思路。在这样的背景下,Apache SkyWalking 创建于 2015 年,参考 Dapper 论文实现分布式追踪功能,并逐渐进化为一个完整功能的 Application Performance Management 系统,用于追踪、监控和诊断大型分布式系统,尤其是容器和云原生下的微服务系统。\n今年初我在尝试使用.NET Core 构建分布式追踪系统 Butterfly 时接触到 SkyWalking 团队,开始和 SkyWalking 团队合作探索 SkyWalking 对.NET Core 的支持,并于 4 月发布 SkyWalking .NET Core 探针的 第一个版本,同时我也有幸加入 SkyWalking 团队共同进行 SkyWalking 在多语言生态的推动。在.NET Core 探针 v0.1 版本发布之后,得到了一些同学的尝鲜使用,也得到诸多改进的建议。经过几周的迭代,SkyWalking .NET Core 探针于今天发布 v0.2 release,在 v0.1 的基础上增加了\u0008稳定性和 HttpClient 及数据库驱动的追踪支持。\n在使用 SkyWalking 对.NET Core 应用追踪之前,我们需要先部署 SkyWalking Collector 收集分析 Trace 和 Elasticsearch 作为 Trace 数据存储。SkyWalking 支持 5.x 的 ES,所以我们需要下载安装对应版本的 ES,并配置 ES 的 cluster.name 为 CollectorDBCluster。然后部署 SkyWalking 5.0 beta 或更高版本 (下载地址:http://skywalking.apache.org/downloads/)。更详细的 Collector 部署文档,请参考 Deploy-backend-in-standalone-mode 和 Deploy-backend-in-cluster-mode。\n最后我们使用示例项目来演示在.NET Core 应用中使用 SkyWalking 进行追踪和监控,克隆 SkyWalking-NetCore 项目到本地:\ngit clone https://github.com/OpenSkywalking/skywalking-netcore.git 进入 skywalking-netcore 目录:\ncd skywalking-netcore 还原 nuget package:\ndotnet restore 启动示例项目:\ndotnet run -p sample/SkyWalking.Sample.Backend dotnet run -p sample/SkyWalking.Sample.Frontend 访问示例应用:\n打开 SkyWalking WebUI 即可看到我们的应用监控面板 http://localhost:8080\nDashboard 视图\nTopologyMap 视图\nApplication 视图\nTrace 视图\nTraceDetails 视图\nGitHub  SkyWalking Github Repo:https://github.com/apache/incubator-skywalking SkyWalking-NetCore Github Repo:https://github.com/OpenSkywalking/skywalking-netcore  ","excerpt":"在大型网站系统设计中,随着分布式架构,特别是微服务架构的流行,我们将系统解耦成更小的单元,通过不断的添加新的、小的模块或者重用已经有的模块来构建复杂的系统。随着模块的不断增多,一次请求可能会涉及到十几 …","ref":"/zh/2018-05-24-skywalking-net/","title":"Apache SkyWalking 为.NET Core带来开箱即用的分布式追踪和应用性能监控"},{"body":"","excerpt":"","ref":"/zh_tags/dotnetcore/","title":"DotNetCore"},{"body":"","excerpt":"","ref":"/tags/dotnetcore/","title":"DotNetCore"},{"body":"5.0.0-beta release. Go to downloads page to find release tars.\n","excerpt":"5.0.0-beta release. Go to downloads page to find release tars.","ref":"/events/release-apache-skywalking-5-0-0-beta/","title":"Release Apache SkyWalking 5.0.0-beta"},{"body":"5.0.0-alpha release. Go to downloads page to find release tars.\n","excerpt":"5.0.0-alpha release. Go to downloads page to find release tars.","ref":"/events/release-apache-skywalking-apm-5-0-0-alpha/","title":"Release Apache SkyWalking APM 5.0.0-alpha"},{"body":"","excerpt":"","ref":"/index.json","title":""},{"body":"10.0.0 Project  Support Java 21 runtime. Support oap-java21 image for Java 21 runtime. Upgrade OTEL collector version to 0.92.0 in all e2e tests. Switch CI macOS runner to m1. Upgrade PostgreSQL driver to 42.4.4 to fix CVE-2024-1597. Remove CLI(swctl) from the image. Remove CLI_VERSION variable from Makefile build. Add BanyanDB to docker-compose quickstart. Bump up Armeria, jackson, netty, jetcd and grpc to fix CVEs.  OAP Server  Add layer parameter to the global topology graphQL query. Add is_present function in MQE for check if the list metrics has a value or not. Remove unreasonable default configurations for gRPC thread executor. Remove gRPCThreadPoolQueueSize (SW_RECEIVER_GRPC_POOL_QUEUE_SIZE) configuration. Allow excluding ServiceEntries in some namespaces when looking up ServiceEntries as a final resolution method of service metadata. Set up the length of source and dest IDs in relation entities of service, instance, endpoint, and process to 250(was 200). Support build Service/Instance Hierarchy and query. Change the string field in Elasticsearch storage from keyword type to text type if it set more than 32766 length. [Break Change] Change the configuration field of ui_template and ui_menu in Elasticsearch storage from keyword type to text. Support Service Hierarchy auto matching, add auto matching layer relationships (upper -\u0026gt; lower) as following:  MESH -\u0026gt; MESH_DP MESH -\u0026gt; K8S_SERVICE MESH_DP -\u0026gt; K8S_SERVICE GENERAL -\u0026gt; K8S_SERVICE   Add namespace suffix for K8S_SERVICE_NAME_RULE/ISTIO_SERVICE_NAME_RULE and metadata-service-mapping.yaml as default. Allow using a dedicated port for ALS receiver. Fix log query by traceId in JDBCLogQueryDAO. Support handler eBPF access log protocol. Fix SumPerMinFunctionTest error function. Remove unnecessary annotations and functions from Meter Functions. Add max and min functions for MAL down sampling. Fix critical bug of uncontrolled memory cost of TopN statistics. Change topN group key from StorageId to entityId + timeBucket. Add Service Hierarchy auto matching layer relationships (upper -\u0026gt; lower) as following:  MYSQL -\u0026gt; K8S_SERVICE POSTGRESQL -\u0026gt; K8S_SERVICE SO11Y_OAP -\u0026gt; K8S_SERVICE VIRTUAL_DATABASE -\u0026gt; MYSQL VIRTUAL_DATABASE -\u0026gt; POSTGRESQL   Add Golang as a supported language for AMQP. Support available layers of service in the topology. Add count aggregation function for MAL Add Service Hierarchy auto matching layer relationships (upper -\u0026gt; lower) as following:  NGINX -\u0026gt; K8S_SERVICE APISIX -\u0026gt; K8S_SERVICE GENERAL -\u0026gt; APISIX   Add Golang as a supported language for RocketMQ. Support Apache RocketMQ server monitoring. Add Service Hierarchy auto matching layer relationships (upper -\u0026gt; lower) as following:  ROCKETMQ -\u0026gt; K8S_SERVICE VIRTUAL_MQ -\u0026gt; ROCKETMQ   Fix ServiceInstance in query. Mock /api/v1/status/buildinfo for PromQL API. Fix table exists check in the JDBC Storage Plugin. Fix day-based table rolling time range strategy in JDBC storage. Add maxInboundMessageSize (SW_DCS_MAX_INBOUND_MESSAGE_SIZE) configuration to change the max inbound message size of DCS. Fix Service Layer when building Events in the EventHookCallback. Add Golang as a supported language for Pulsar. Add Service Hierarchy auto matching layer relationships (upper -\u0026gt; lower) as following:  RABBITMQ -\u0026gt; K8S_SERVICE VIRTUAL_MQ -\u0026gt; RABBITMQ   Remove Column#function mechanism in the kernel. Make query readMetricValue always return the average value of the duration. Add Service Hierarchy auto matching layer relationships (upper -\u0026gt; lower) as following:  KAFKA -\u0026gt; K8S_SERVICE VIRTUAL_MQ -\u0026gt; KAFKA   Support ClickHouse server monitoring. Add Service Hierarchy auto matching layer relationships (upper -\u0026gt; lower) as following:  CLICKHOUSE -\u0026gt; K8S_SERVICE VIRTUAL_DATABASE -\u0026gt; CLICKHOUSE   Add Service Hierarchy auto matching layer relationships (upper -\u0026gt; lower) as following:  PULSAR -\u0026gt; K8S_SERVICE VIRTUAL_MQ -\u0026gt; PULSAR   Add Golang as a supported language for Kafka. Support displaying the port services listen to from OAP and UI during server start. Refactor data-generator to support generating metrics. Fix AvgHistogramPercentileFunction legacy name. [Break Change] Labeled Metrics support multiple labels.  Storage: store all label names and values instead of only the values. MQE:  Support querying by multiple labels(name and value) instead using _ as the anonymous label name. aggregate_labels function support aggregate by specific labels. relabels function require target label and rename label name and value.   PromQL:  Support querying by multiple labels(name and value) instead using lables as the anonymous label name. Remove general labels labels/relabels/label function. API /api/v1/labels and /api/v1/label/\u0026lt;label_name\u0026gt;/values support return matched metrics labels.   OAL:  Deprecate percentile function and introduce percentile2 function instead.     Bump up Kafka to fix CVE. Fix NullPointerException in Istio ServiceEntry registry. Remove unnecessary componentIds as series ID in the ServiceRelationClientSideMetrics and ServiceRelationServerSideMetrics entities. Fix not throw error when part of expression not matched any expression node in the MQE and `PromQL. Remove kafka-fetcher/default/createTopicIfNotExist as the creation is automatically since #7326 (v8.7.0). Fix inaccuracy nginx service metrics. Fix/Change Windows metrics name(Swap -\u0026gt; Virtual Memory)  memory_swap_free -\u0026gt; memory_virtual_memory_free memory_swap_total -\u0026gt; memory_virtual_memory_total memory_swap_percentage -\u0026gt; memory_virtual_memory_percentage   Fix/Change UI init setting for Windows Swap -\u0026gt; Virtual Memory Fix Memory Swap Usage/Virtual Memory Usage display with UI init.(Linux/Windows) Fix inaccurate APISIX metrics. Fix inaccurate MongoDB Metrics. Support Apache ActiveMQ server monitoring. Add Service Hierarchy auto matching layer relationships (upper -\u0026gt; lower) as following:  ACTIVEMQ -\u0026gt; K8S_SERVICE   Calculate Nginx service HTTP Latency by MQE. MQE query: make metadata not return null. MQE labeled metrics Binary Operation: return empty value if the labels not match rather than report error. Fix inaccurate Hierarchy of RabbitMQ Server monitoring metrics. Fix inaccurate MySQL/MariaDB, Redis, PostgreSQL metrics. Support DoubleValue,IntValue,BoolValue in OTEL metrics attributes. [Break Change] gGRPC metrics exporter unified the metric value type and support labeled metrics. Add component definition(ID=152) for c3p0(JDBC3 Connection and Statement Pooling). Fix MQE top_n global query. Fix inaccurate Pulsar and Bookkeeper metrics.  UI  Fix the mismatch between the unit and calculation of the \u0026ldquo;Network Bandwidth Usage\u0026rdquo; widget in Linux-Service Dashboard. Add theme change animation. Implement the Service and Instance hierarchy topology. Support Tabs in the widget visible when MQE expressions. Support search on Marketplace. Fix default route. Fix layout on the Log widget. Fix Trace associates with Log widget. Add isDefault to the dashboard configuration. Add expressions to dashboard configurations on the dashboard list page. Update Kubernetes related UI templates for adapt data from eBPF access log. Fix dashboard K8S-Service-Root metrics expression. Add dashboards for Service/Instance Hierarchy. Fix MQE in dashboards when using Card widget. Optimize tooltips style. Fix resizing window causes the trace graph to display incorrectly. Add the not found page(404). Enhance VNode logic and support multiple Trace IDs in span\u0026rsquo;s ref. Add the layers filed and associate layers dashboards for the service topology nodes. Fix Nginx-Instance metrics to instance level. Update tabs of the Kubernetes service page. Add Airflow menu i18n. Add Support for dragging in the trace panel. Add workflow icon. Metrics support multiple labels. Support the SINGLE_VALUE for table widgets. Remove the General metric mode and related logical code. Remove metrics for unreal nodes in the topology. Enhance the Trace widget for batch consuming spans. Clean the unused elements in the UI-templates.  Documentation  Update the release doc to remove the announcement as the tests are through e2e rather than manually. Update the release notification mail a little. Polish docs structure. Move customization docs separately from the introduction docs. Add webhook/gRPC hooks settings example for backend-alarm.md. Begin the process of SWIP - SkyWalking Improvement Proposal. Add SWIP-1 Create and detect Service Hierarchy Relationship. Add SWIP-2 Collecting and Gathering Kubernetes Monitoring Data. Update the Overview docs to add the Service Hierarchy Relationship section. Fix incorrect words for backend-bookkeeper-monitoring.md and backend-pulsar-monitoring.md Document a new way to load balance OAP. Add SWIP-3 Support RocketMQ monitoring. Add OpenTelemetry SkyWalking Exporter deprecated warning doc. Update i18n for rocketmq monitoring. Fix: remove click event after unmounted. Fix: end loading without query results. Update nanoid version to 3.3.7. Update postcss version to 8.4.33. Fix kafka topic name in exporter doc. Fix query-protocol.md, make it consistent with the GraphQL query protocol. Add SWIP-5 Support ClickHouse Monitoring. Remove OpenTelemetry Exporter support from meter doc, as this has been flagged as unmaintained on OTEL upstream. Add doc of one-line quick start script for different storage types. Add FAQ for Why is Clickhouse or Loki or xxx not supported as a storage option?. Add SWIP-8 Support ActiveMQ Monitoring.  All issues and pull requests are here\n","excerpt":"10.0.0 Project  Support Java 21 runtime. Support oap-java21 image for Java 21 runtime. Upgrade OTEL …","ref":"/docs/main/next/en/changes/changes/","title":"10.0.0"},{"body":"5.1.0 Agent Changes  Fix spring inherit issue in another way Fix classloader dead lock in jdk7+ - 5.x Support Spring mvc 5.x Support Spring webflux 5.x  Collector Changes  Fix too many open files. Fix the buffer file cannot delete.  5.0.0-GA Agent Changes  Add several package names ignore in agent settings. Classes in these packages would be enhanced, even plugin declared. Support Undertow 2.x plugin. Fix wrong class names of Motan plugin, not a feature related issue, just naming.  Collector Changes  Make buffer file handler close more safety. Fix NPE in AlarmService  Documentation  Fix compiling doc link. Update new live demo address.  5.0.0-RC2 Agent Changes  Support ActiveMQ 5.x Support RuntimeContext used out of TracingContext. Support Oracle ojdbc8 Plugin. Support ElasticSearch client transport 5.2-5.6 Plugin Support using agent.config with given path through system properties. Add a new way to transmit the Request and Response, to avoid bugs in Hytrix scenarios. Fix HTTPComponent client v4 operation name is empty. Fix 2 possible NPEs in Spring plugin. Fix a possible span leak in SpringMVC plugin. Fix NPE in Spring callback plugin.  Collector Changes  Add GZip support for Zipkin receiver. Add new component IDs for nodejs. Fix Zipkin span receiver may miss data in request. Optimize codes in heatmap calculation. Reduce unnecessary divide. Fix NPE in Alarm content generation. Fix the precision lost in ServiceNameService#startTimeMillis. Fix GC count is 0. Fix topology breaks when RPC client uses the async thread call.  UI Changes  Fix UI port can\u0026rsquo;t be set by startup script in Windows. Fix Topology self link error. Fix stack color mismatch label color in gc time chart.  Documentation  Add users list. Fix several document typo. Sync the Chinese documents. Add OpenAPM badge. Add icon/font documents to NOTICE files.  Issues and Pull requests\n5.0.0-beta2 UI -\u0026gt; Collector GraphQL query protocol  Add order and status in trace query.  Agent Changes  Add SOFA plugin. Add witness class for Kafka plugin. Add RuntimeContext in Context. Fix RuntimeContext fail in Tomcat plugin. Fix incompatible for getPropertyDescriptors in Spring core. Fix spymemcached plugin bug. Fix database URL parser bug. Fix StringIndexOutOfBoundsException when mysql jdbc url without databaseName。 Fix duplicate slash in Spring MVC plugin bug. Fix namespace bug. Fix NPE in Okhttp plugin when connect failed. FIx MalformedURLException in httpClientComponent plugin. Remove unused dependencies in Dubbo plugin. Remove gRPC timeout to avoid out of memory leak. Rewrite Async http client plugin. [Incubating] Add trace custom ignore optional plugin.  Collector Changes  Topology query optimization for more than 100 apps. Error rate alarm is not triggered. Tolerate unsupported segments. Support Integer Array, Long Array, String Array, Double Array in streaming data model. Support multiple entry span and multiple service name in one segment durtaion record. Use BulkProcessor to control the linear writing of data by multiple threads. Determine the log is enabled for the DEBUG level before printing message. Add static modifier to Logger. Add AspNet component. Filter inactive service in query. Support to query service based on Application. Fix RemoteDataMappingIdNotFoundException Exclude component-libaries.xml file in collector-*.jar, make sure it is in /conf only. Separate a single TTL in minute to in minute, hour, day, month metric and trace. Add order and status in trace query. Add folder lock to buffer folder. Modify operationName search from match to match_phrase. [Incubating] Add Zipkin span receiver. Support analysis Zipkin v1/v2 formats. [Incubating] Support sharding-sphere as storage implementor.  UI Changes  Support login and access control. Add new webapp.yml configuration file. Modify webapp startup script. Link to trace query from Thermodynamic graph Add application selector in service view. Add order and status in trace query.  Documentation  Add architecture design doc. Reformat deploy document. Adjust Tomcat deploy document. Remove all Apache licenses files in dist release packages. Update user cases. Update UI licenses. Add incubating sections in doc.  Issues and Pull requests\n5.0.0-beta UI -\u0026gt; Collector GraphQL query protocol  Replace all tps to throughput/cpm(calls per min) Add getThermodynamic service Update version to beta  Agent Changes  Support TLS. Support namespace. Support direct link. Support token. Add across thread toolkit. Add new plugin extend machenism to override agent core implementations. Fix an agent start up sequence bug. Fix wrong gc count. Remove system env override. Add Spring AOP aspect patch to avoid aop conflicts.  Collector Changes  Trace query based on timeline. Delete JVM aggregation in second. Support TLS. Support namespace. Support token auth. Group and aggregate requests based on response time and timeline, support Thermodynamic chart query Support component librariy setting through yml file for better extendibility. Optimize performance. Support short column name in ES or other storage implementor. Add a new cache module implementor, based on Caffeine. Support system property override settings. Refactor settings initialization. Provide collector instrumentation agent. Support .NET core component libraries. Fix divide zero in query. Fix Data don't remove as expected in ES implementor. Add some checks in collector modulization core. Add some test cases.  UI Changes  New trace query UI. New Application UI, merge server tab(removed) into application as sub page. New Topology UI. New response time / throughput TopN list. Add Thermodynamic chart in overview page. Change all tps to cpm(calls per minutes). Fix wrong osName in server view. Fix wrong startTime in trace view. Fix some icons internet requirements.  Documentation  Add TLS document. Add namespace document. Add direct link document. Add token document. Add across thread toolkit document. Add a FAQ about, Agent or collector version upgrade. Sync all English document to Chinese.  Issues and Pull requests\n5.0.0-alpha Agent -\u0026gt; Collector protocol  Remove C++ keywords Move Ref into Span from Segment Add span type, when register an operation  UI -\u0026gt; Collector GraphQL query protocol  First version protocol  Agent Changes  Support gRPC 1.x plugin Support kafka 0.11 and 1.x plugin Support ServiceComb 0.x plugin Support optional plugin mechanism. Support Spring 3.x and 4.x bean annotation optional plugin Support Apache httpcomponent AsyncClient 4.x plugin Provide automatic agent daily tests, and release reports here. Refactor Postgresql, Oracle, MySQL plugin for compatible. Fix jetty client 9 plugin error Fix async APIs of okhttp plugin error Fix log config didn\u0026rsquo;t work Fix a class loader error in okhttp plugin  Collector Changes  Support metrics analysis and aggregation for application, application instance and service in minute, hour, day and month. Support new GraphQL query protocol Support alarm Provide a prototype instrument for collector. Support node speculate in cluster and application topology. (Provider Node -\u0026gt; Consumer Node) -\u0026gt; (Provider Node -\u0026gt; MQ Server -\u0026gt; Consumer Node)  UI Changes  New 5.0.0 UI!!!  Issues and Pull requests\n","excerpt":"5.1.0 Agent Changes  Fix spring inherit issue in another way Fix classloader dead lock in jdk7+ - …","ref":"/docs/main/latest/en/changes/changes-5.x/","title":"5.1.0"},{"body":"5.1.0 Agent Changes  Fix spring inherit issue in another way Fix classloader dead lock in jdk7+ - 5.x Support Spring mvc 5.x Support Spring webflux 5.x  Collector Changes  Fix too many open files. Fix the buffer file cannot delete.  5.0.0-GA Agent Changes  Add several package names ignore in agent settings. Classes in these packages would be enhanced, even plugin declared. Support Undertow 2.x plugin. Fix wrong class names of Motan plugin, not a feature related issue, just naming.  Collector Changes  Make buffer file handler close more safety. Fix NPE in AlarmService  Documentation  Fix compiling doc link. Update new live demo address.  5.0.0-RC2 Agent Changes  Support ActiveMQ 5.x Support RuntimeContext used out of TracingContext. Support Oracle ojdbc8 Plugin. Support ElasticSearch client transport 5.2-5.6 Plugin Support using agent.config with given path through system properties. Add a new way to transmit the Request and Response, to avoid bugs in Hytrix scenarios. Fix HTTPComponent client v4 operation name is empty. Fix 2 possible NPEs in Spring plugin. Fix a possible span leak in SpringMVC plugin. Fix NPE in Spring callback plugin.  Collector Changes  Add GZip support for Zipkin receiver. Add new component IDs for nodejs. Fix Zipkin span receiver may miss data in request. Optimize codes in heatmap calculation. Reduce unnecessary divide. Fix NPE in Alarm content generation. Fix the precision lost in ServiceNameService#startTimeMillis. Fix GC count is 0. Fix topology breaks when RPC client uses the async thread call.  UI Changes  Fix UI port can\u0026rsquo;t be set by startup script in Windows. Fix Topology self link error. Fix stack color mismatch label color in gc time chart.  Documentation  Add users list. Fix several document typo. Sync the Chinese documents. Add OpenAPM badge. Add icon/font documents to NOTICE files.  Issues and Pull requests\n5.0.0-beta2 UI -\u0026gt; Collector GraphQL query protocol  Add order and status in trace query.  Agent Changes  Add SOFA plugin. Add witness class for Kafka plugin. Add RuntimeContext in Context. Fix RuntimeContext fail in Tomcat plugin. Fix incompatible for getPropertyDescriptors in Spring core. Fix spymemcached plugin bug. Fix database URL parser bug. Fix StringIndexOutOfBoundsException when mysql jdbc url without databaseName。 Fix duplicate slash in Spring MVC plugin bug. Fix namespace bug. Fix NPE in Okhttp plugin when connect failed. FIx MalformedURLException in httpClientComponent plugin. Remove unused dependencies in Dubbo plugin. Remove gRPC timeout to avoid out of memory leak. Rewrite Async http client plugin. [Incubating] Add trace custom ignore optional plugin.  Collector Changes  Topology query optimization for more than 100 apps. Error rate alarm is not triggered. Tolerate unsupported segments. Support Integer Array, Long Array, String Array, Double Array in streaming data model. Support multiple entry span and multiple service name in one segment durtaion record. Use BulkProcessor to control the linear writing of data by multiple threads. Determine the log is enabled for the DEBUG level before printing message. Add static modifier to Logger. Add AspNet component. Filter inactive service in query. Support to query service based on Application. Fix RemoteDataMappingIdNotFoundException Exclude component-libaries.xml file in collector-*.jar, make sure it is in /conf only. Separate a single TTL in minute to in minute, hour, day, month metric and trace. Add order and status in trace query. Add folder lock to buffer folder. Modify operationName search from match to match_phrase. [Incubating] Add Zipkin span receiver. Support analysis Zipkin v1/v2 formats. [Incubating] Support sharding-sphere as storage implementor.  UI Changes  Support login and access control. Add new webapp.yml configuration file. Modify webapp startup script. Link to trace query from Thermodynamic graph Add application selector in service view. Add order and status in trace query.  Documentation  Add architecture design doc. Reformat deploy document. Adjust Tomcat deploy document. Remove all Apache licenses files in dist release packages. Update user cases. Update UI licenses. Add incubating sections in doc.  Issues and Pull requests\n5.0.0-beta UI -\u0026gt; Collector GraphQL query protocol  Replace all tps to throughput/cpm(calls per min) Add getThermodynamic service Update version to beta  Agent Changes  Support TLS. Support namespace. Support direct link. Support token. Add across thread toolkit. Add new plugin extend machenism to override agent core implementations. Fix an agent start up sequence bug. Fix wrong gc count. Remove system env override. Add Spring AOP aspect patch to avoid aop conflicts.  Collector Changes  Trace query based on timeline. Delete JVM aggregation in second. Support TLS. Support namespace. Support token auth. Group and aggregate requests based on response time and timeline, support Thermodynamic chart query Support component librariy setting through yml file for better extendibility. Optimize performance. Support short column name in ES or other storage implementor. Add a new cache module implementor, based on Caffeine. Support system property override settings. Refactor settings initialization. Provide collector instrumentation agent. Support .NET core component libraries. Fix divide zero in query. Fix Data don't remove as expected in ES implementor. Add some checks in collector modulization core. Add some test cases.  UI Changes  New trace query UI. New Application UI, merge server tab(removed) into application as sub page. New Topology UI. New response time / throughput TopN list. Add Thermodynamic chart in overview page. Change all tps to cpm(calls per minutes). Fix wrong osName in server view. Fix wrong startTime in trace view. Fix some icons internet requirements.  Documentation  Add TLS document. Add namespace document. Add direct link document. Add token document. Add across thread toolkit document. Add a FAQ about, Agent or collector version upgrade. Sync all English document to Chinese.  Issues and Pull requests\n5.0.0-alpha Agent -\u0026gt; Collector protocol  Remove C++ keywords Move Ref into Span from Segment Add span type, when register an operation  UI -\u0026gt; Collector GraphQL query protocol  First version protocol  Agent Changes  Support gRPC 1.x plugin Support kafka 0.11 and 1.x plugin Support ServiceComb 0.x plugin Support optional plugin mechanism. Support Spring 3.x and 4.x bean annotation optional plugin Support Apache httpcomponent AsyncClient 4.x plugin Provide automatic agent daily tests, and release reports here. Refactor Postgresql, Oracle, MySQL plugin for compatible. Fix jetty client 9 plugin error Fix async APIs of okhttp plugin error Fix log config didn\u0026rsquo;t work Fix a class loader error in okhttp plugin  Collector Changes  Support metrics analysis and aggregation for application, application instance and service in minute, hour, day and month. Support new GraphQL query protocol Support alarm Provide a prototype instrument for collector. Support node speculate in cluster and application topology. (Provider Node -\u0026gt; Consumer Node) -\u0026gt; (Provider Node -\u0026gt; MQ Server -\u0026gt; Consumer Node)  UI Changes  New 5.0.0 UI!!!  Issues and Pull requests\n","excerpt":"5.1.0 Agent Changes  Fix spring inherit issue in another way Fix classloader dead lock in jdk7+ - …","ref":"/docs/main/next/en/changes/changes-5.x/","title":"5.1.0"},{"body":"5.1.0 Agent Changes  Fix spring inherit issue in another way Fix classloader dead lock in jdk7+ - 5.x Support Spring mvc 5.x Support Spring webflux 5.x  Collector Changes  Fix too many open files. Fix the buffer file cannot delete.  5.0.0-GA Agent Changes  Add several package names ignore in agent settings. Classes in these packages would be enhanced, even plugin declared. Support Undertow 2.x plugin. Fix wrong class names of Motan plugin, not a feature related issue, just naming.  Collector Changes  Make buffer file handler close more safety. Fix NPE in AlarmService  Documentation  Fix compiling doc link. Update new live demo address.  5.0.0-RC2 Agent Changes  Support ActiveMQ 5.x Support RuntimeContext used out of TracingContext. Support Oracle ojdbc8 Plugin. Support ElasticSearch client transport 5.2-5.6 Plugin Support using agent.config with given path through system properties. Add a new way to transmit the Request and Response, to avoid bugs in Hytrix scenarios. Fix HTTPComponent client v4 operation name is empty. Fix 2 possible NPEs in Spring plugin. Fix a possible span leak in SpringMVC plugin. Fix NPE in Spring callback plugin.  Collector Changes  Add GZip support for Zipkin receiver. Add new component IDs for nodejs. Fix Zipkin span receiver may miss data in request. Optimize codes in heatmap calculation. Reduce unnecessary divide. Fix NPE in Alarm content generation. Fix the precision lost in ServiceNameService#startTimeMillis. Fix GC count is 0. Fix topology breaks when RPC client uses the async thread call.  UI Changes  Fix UI port can\u0026rsquo;t be set by startup script in Windows. Fix Topology self link error. Fix stack color mismatch label color in gc time chart.  Documentation  Add users list. Fix several document typo. Sync the Chinese documents. Add OpenAPM badge. Add icon/font documents to NOTICE files.  Issues and Pull requests\n5.0.0-beta2 UI -\u0026gt; Collector GraphQL query protocol  Add order and status in trace query.  Agent Changes  Add SOFA plugin. Add witness class for Kafka plugin. Add RuntimeContext in Context. Fix RuntimeContext fail in Tomcat plugin. Fix incompatible for getPropertyDescriptors in Spring core. Fix spymemcached plugin bug. Fix database URL parser bug. Fix StringIndexOutOfBoundsException when mysql jdbc url without databaseName。 Fix duplicate slash in Spring MVC plugin bug. Fix namespace bug. Fix NPE in Okhttp plugin when connect failed. FIx MalformedURLException in httpClientComponent plugin. Remove unused dependencies in Dubbo plugin. Remove gRPC timeout to avoid out of memory leak. Rewrite Async http client plugin. [Incubating] Add trace custom ignore optional plugin.  Collector Changes  Topology query optimization for more than 100 apps. Error rate alarm is not triggered. Tolerate unsupported segments. Support Integer Array, Long Array, String Array, Double Array in streaming data model. Support multiple entry span and multiple service name in one segment durtaion record. Use BulkProcessor to control the linear writing of data by multiple threads. Determine the log is enabled for the DEBUG level before printing message. Add static modifier to Logger. Add AspNet component. Filter inactive service in query. Support to query service based on Application. Fix RemoteDataMappingIdNotFoundException Exclude component-libaries.xml file in collector-*.jar, make sure it is in /conf only. Separate a single TTL in minute to in minute, hour, day, month metric and trace. Add order and status in trace query. Add folder lock to buffer folder. Modify operationName search from match to match_phrase. [Incubating] Add Zipkin span receiver. Support analysis Zipkin v1/v2 formats. [Incubating] Support sharding-sphere as storage implementor.  UI Changes  Support login and access control. Add new webapp.yml configuration file. Modify webapp startup script. Link to trace query from Thermodynamic graph Add application selector in service view. Add order and status in trace query.  Documentation  Add architecture design doc. Reformat deploy document. Adjust Tomcat deploy document. Remove all Apache licenses files in dist release packages. Update user cases. Update UI licenses. Add incubating sections in doc.  Issues and Pull requests\n5.0.0-beta UI -\u0026gt; Collector GraphQL query protocol  Replace all tps to throughput/cpm(calls per min) Add getThermodynamic service Update version to beta  Agent Changes  Support TLS. Support namespace. Support direct link. Support token. Add across thread toolkit. Add new plugin extend machenism to override agent core implementations. Fix an agent start up sequence bug. Fix wrong gc count. Remove system env override. Add Spring AOP aspect patch to avoid aop conflicts.  Collector Changes  Trace query based on timeline. Delete JVM aggregation in second. Support TLS. Support namespace. Support token auth. Group and aggregate requests based on response time and timeline, support Thermodynamic chart query Support component librariy setting through yml file for better extendibility. Optimize performance. Support short column name in ES or other storage implementor. Add a new cache module implementor, based on Caffeine. Support system property override settings. Refactor settings initialization. Provide collector instrumentation agent. Support .NET core component libraries. Fix divide zero in query. Fix Data don't remove as expected in ES implementor. Add some checks in collector modulization core. Add some test cases.  UI Changes  New trace query UI. New Application UI, merge server tab(removed) into application as sub page. New Topology UI. New response time / throughput TopN list. Add Thermodynamic chart in overview page. Change all tps to cpm(calls per minutes). Fix wrong osName in server view. Fix wrong startTime in trace view. Fix some icons internet requirements.  Documentation  Add TLS document. Add namespace document. Add direct link document. Add token document. Add across thread toolkit document. Add a FAQ about, Agent or collector version upgrade. Sync all English document to Chinese.  Issues and Pull requests\n5.0.0-alpha Agent -\u0026gt; Collector protocol  Remove C++ keywords Move Ref into Span from Segment Add span type, when register an operation  UI -\u0026gt; Collector GraphQL query protocol  First version protocol  Agent Changes  Support gRPC 1.x plugin Support kafka 0.11 and 1.x plugin Support ServiceComb 0.x plugin Support optional plugin mechanism. Support Spring 3.x and 4.x bean annotation optional plugin Support Apache httpcomponent AsyncClient 4.x plugin Provide automatic agent daily tests, and release reports here. Refactor Postgresql, Oracle, MySQL plugin for compatible. Fix jetty client 9 plugin error Fix async APIs of okhttp plugin error Fix log config didn\u0026rsquo;t work Fix a class loader error in okhttp plugin  Collector Changes  Support metrics analysis and aggregation for application, application instance and service in minute, hour, day and month. Support new GraphQL query protocol Support alarm Provide a prototype instrument for collector. Support node speculate in cluster and application topology. (Provider Node -\u0026gt; Consumer Node) -\u0026gt; (Provider Node -\u0026gt; MQ Server -\u0026gt; Consumer Node)  UI Changes  New 5.0.0 UI!!!  Issues and Pull requests\n","excerpt":"5.1.0 Agent Changes  Fix spring inherit issue in another way Fix classloader dead lock in jdk7+ - …","ref":"/docs/main/v9.1.0/en/changes/changes-5.x/","title":"5.1.0"},{"body":"5.1.0 Agent Changes  Fix spring inherit issue in another way Fix classloader dead lock in jdk7+ - 5.x Support Spring mvc 5.x Support Spring webflux 5.x  Collector Changes  Fix too many open files. Fix the buffer file cannot delete.  5.0.0-GA Agent Changes  Add several package names ignore in agent settings. Classes in these packages would be enhanced, even plugin declared. Support Undertow 2.x plugin. Fix wrong class names of Motan plugin, not a feature related issue, just naming.  Collector Changes  Make buffer file handler close more safety. Fix NPE in AlarmService  Documentation  Fix compiling doc link. Update new live demo address.  5.0.0-RC2 Agent Changes  Support ActiveMQ 5.x Support RuntimeContext used out of TracingContext. Support Oracle ojdbc8 Plugin. Support ElasticSearch client transport 5.2-5.6 Plugin Support using agent.config with given path through system properties. Add a new way to transmit the Request and Response, to avoid bugs in Hytrix scenarios. Fix HTTPComponent client v4 operation name is empty. Fix 2 possible NPEs in Spring plugin. Fix a possible span leak in SpringMVC plugin. Fix NPE in Spring callback plugin.  Collector Changes  Add GZip support for Zipkin receiver. Add new component IDs for nodejs. Fix Zipkin span receiver may miss data in request. Optimize codes in heatmap calculation. Reduce unnecessary divide. Fix NPE in Alarm content generation. Fix the precision lost in ServiceNameService#startTimeMillis. Fix GC count is 0. Fix topology breaks when RPC client uses the async thread call.  UI Changes  Fix UI port can\u0026rsquo;t be set by startup script in Windows. Fix Topology self link error. Fix stack color mismatch label color in gc time chart.  Documentation  Add users list. Fix several document typo. Sync the Chinese documents. Add OpenAPM badge. Add icon/font documents to NOTICE files.  Issues and Pull requests\n5.0.0-beta2 UI -\u0026gt; Collector GraphQL query protocol  Add order and status in trace query.  Agent Changes  Add SOFA plugin. Add witness class for Kafka plugin. Add RuntimeContext in Context. Fix RuntimeContext fail in Tomcat plugin. Fix incompatible for getPropertyDescriptors in Spring core. Fix spymemcached plugin bug. Fix database URL parser bug. Fix StringIndexOutOfBoundsException when mysql jdbc url without databaseName。 Fix duplicate slash in Spring MVC plugin bug. Fix namespace bug. Fix NPE in Okhttp plugin when connect failed. FIx MalformedURLException in httpClientComponent plugin. Remove unused dependencies in Dubbo plugin. Remove gRPC timeout to avoid out of memory leak. Rewrite Async http client plugin. [Incubating] Add trace custom ignore optional plugin.  Collector Changes  Topology query optimization for more than 100 apps. Error rate alarm is not triggered. Tolerate unsupported segments. Support Integer Array, Long Array, String Array, Double Array in streaming data model. Support multiple entry span and multiple service name in one segment durtaion record. Use BulkProcessor to control the linear writing of data by multiple threads. Determine the log is enabled for the DEBUG level before printing message. Add static modifier to Logger. Add AspNet component. Filter inactive service in query. Support to query service based on Application. Fix RemoteDataMappingIdNotFoundException Exclude component-libaries.xml file in collector-*.jar, make sure it is in /conf only. Separate a single TTL in minute to in minute, hour, day, month metric and trace. Add order and status in trace query. Add folder lock to buffer folder. Modify operationName search from match to match_phrase. [Incubating] Add Zipkin span receiver. Support analysis Zipkin v1/v2 formats. [Incubating] Support sharding-sphere as storage implementor.  UI Changes  Support login and access control. Add new webapp.yml configuration file. Modify webapp startup script. Link to trace query from Thermodynamic graph Add application selector in service view. Add order and status in trace query.  Documentation  Add architecture design doc. Reformat deploy document. Adjust Tomcat deploy document. Remove all Apache licenses files in dist release packages. Update user cases. Update UI licenses. Add incubating sections in doc.  Issues and Pull requests\n5.0.0-beta UI -\u0026gt; Collector GraphQL query protocol  Replace all tps to throughput/cpm(calls per min) Add getThermodynamic service Update version to beta  Agent Changes  Support TLS. Support namespace. Support direct link. Support token. Add across thread toolkit. Add new plugin extend machenism to override agent core implementations. Fix an agent start up sequence bug. Fix wrong gc count. Remove system env override. Add Spring AOP aspect patch to avoid aop conflicts.  Collector Changes  Trace query based on timeline. Delete JVM aggregation in second. Support TLS. Support namespace. Support token auth. Group and aggregate requests based on response time and timeline, support Thermodynamic chart query Support component librariy setting through yml file for better extendibility. Optimize performance. Support short column name in ES or other storage implementor. Add a new cache module implementor, based on Caffeine. Support system property override settings. Refactor settings initialization. Provide collector instrumentation agent. Support .NET core component libraries. Fix divide zero in query. Fix Data don't remove as expected in ES implementor. Add some checks in collector modulization core. Add some test cases.  UI Changes  New trace query UI. New Application UI, merge server tab(removed) into application as sub page. New Topology UI. New response time / throughput TopN list. Add Thermodynamic chart in overview page. Change all tps to cpm(calls per minutes). Fix wrong osName in server view. Fix wrong startTime in trace view. Fix some icons internet requirements.  Documentation  Add TLS document. Add namespace document. Add direct link document. Add token document. Add across thread toolkit document. Add a FAQ about, Agent or collector version upgrade. Sync all English document to Chinese.  Issues and Pull requests\n5.0.0-alpha Agent -\u0026gt; Collector protocol  Remove C++ keywords Move Ref into Span from Segment Add span type, when register an operation  UI -\u0026gt; Collector GraphQL query protocol  First version protocol  Agent Changes  Support gRPC 1.x plugin Support kafka 0.11 and 1.x plugin Support ServiceComb 0.x plugin Support optional plugin mechanism. Support Spring 3.x and 4.x bean annotation optional plugin Support Apache httpcomponent AsyncClient 4.x plugin Provide automatic agent daily tests, and release reports here. Refactor Postgresql, Oracle, MySQL plugin for compatible. Fix jetty client 9 plugin error Fix async APIs of okhttp plugin error Fix log config didn\u0026rsquo;t work Fix a class loader error in okhttp plugin  Collector Changes  Support metrics analysis and aggregation for application, application instance and service in minute, hour, day and month. Support new GraphQL query protocol Support alarm Provide a prototype instrument for collector. Support node speculate in cluster and application topology. (Provider Node -\u0026gt; Consumer Node) -\u0026gt; (Provider Node -\u0026gt; MQ Server -\u0026gt; Consumer Node)  UI Changes  New 5.0.0 UI!!!  Issues and Pull requests\n","excerpt":"5.1.0 Agent Changes  Fix spring inherit issue in another way Fix classloader dead lock in jdk7+ - …","ref":"/docs/main/v9.2.0/en/changes/changes-5.x/","title":"5.1.0"},{"body":"5.1.0 Agent Changes  Fix spring inherit issue in another way Fix classloader dead lock in jdk7+ - 5.x Support Spring mvc 5.x Support Spring webflux 5.x  Collector Changes  Fix too many open files. Fix the buffer file cannot delete.  5.0.0-GA Agent Changes  Add several package names ignore in agent settings. Classes in these packages would be enhanced, even plugin declared. Support Undertow 2.x plugin. Fix wrong class names of Motan plugin, not a feature related issue, just naming.  Collector Changes  Make buffer file handler close more safety. Fix NPE in AlarmService  Documentation  Fix compiling doc link. Update new live demo address.  5.0.0-RC2 Agent Changes  Support ActiveMQ 5.x Support RuntimeContext used out of TracingContext. Support Oracle ojdbc8 Plugin. Support ElasticSearch client transport 5.2-5.6 Plugin Support using agent.config with given path through system properties. Add a new way to transmit the Request and Response, to avoid bugs in Hytrix scenarios. Fix HTTPComponent client v4 operation name is empty. Fix 2 possible NPEs in Spring plugin. Fix a possible span leak in SpringMVC plugin. Fix NPE in Spring callback plugin.  Collector Changes  Add GZip support for Zipkin receiver. Add new component IDs for nodejs. Fix Zipkin span receiver may miss data in request. Optimize codes in heatmap calculation. Reduce unnecessary divide. Fix NPE in Alarm content generation. Fix the precision lost in ServiceNameService#startTimeMillis. Fix GC count is 0. Fix topology breaks when RPC client uses the async thread call.  UI Changes  Fix UI port can\u0026rsquo;t be set by startup script in Windows. Fix Topology self link error. Fix stack color mismatch label color in gc time chart.  Documentation  Add users list. Fix several document typo. Sync the Chinese documents. Add OpenAPM badge. Add icon/font documents to NOTICE files.  Issues and Pull requests\n5.0.0-beta2 UI -\u0026gt; Collector GraphQL query protocol  Add order and status in trace query.  Agent Changes  Add SOFA plugin. Add witness class for Kafka plugin. Add RuntimeContext in Context. Fix RuntimeContext fail in Tomcat plugin. Fix incompatible for getPropertyDescriptors in Spring core. Fix spymemcached plugin bug. Fix database URL parser bug. Fix StringIndexOutOfBoundsException when mysql jdbc url without databaseName。 Fix duplicate slash in Spring MVC plugin bug. Fix namespace bug. Fix NPE in Okhttp plugin when connect failed. FIx MalformedURLException in httpClientComponent plugin. Remove unused dependencies in Dubbo plugin. Remove gRPC timeout to avoid out of memory leak. Rewrite Async http client plugin. [Incubating] Add trace custom ignore optional plugin.  Collector Changes  Topology query optimization for more than 100 apps. Error rate alarm is not triggered. Tolerate unsupported segments. Support Integer Array, Long Array, String Array, Double Array in streaming data model. Support multiple entry span and multiple service name in one segment durtaion record. Use BulkProcessor to control the linear writing of data by multiple threads. Determine the log is enabled for the DEBUG level before printing message. Add static modifier to Logger. Add AspNet component. Filter inactive service in query. Support to query service based on Application. Fix RemoteDataMappingIdNotFoundException Exclude component-libaries.xml file in collector-*.jar, make sure it is in /conf only. Separate a single TTL in minute to in minute, hour, day, month metric and trace. Add order and status in trace query. Add folder lock to buffer folder. Modify operationName search from match to match_phrase. [Incubating] Add Zipkin span receiver. Support analysis Zipkin v1/v2 formats. [Incubating] Support sharding-sphere as storage implementor.  UI Changes  Support login and access control. Add new webapp.yml configuration file. Modify webapp startup script. Link to trace query from Thermodynamic graph Add application selector in service view. Add order and status in trace query.  Documentation  Add architecture design doc. Reformat deploy document. Adjust Tomcat deploy document. Remove all Apache licenses files in dist release packages. Update user cases. Update UI licenses. Add incubating sections in doc.  Issues and Pull requests\n5.0.0-beta UI -\u0026gt; Collector GraphQL query protocol  Replace all tps to throughput/cpm(calls per min) Add getThermodynamic service Update version to beta  Agent Changes  Support TLS. Support namespace. Support direct link. Support token. Add across thread toolkit. Add new plugin extend machenism to override agent core implementations. Fix an agent start up sequence bug. Fix wrong gc count. Remove system env override. Add Spring AOP aspect patch to avoid aop conflicts.  Collector Changes  Trace query based on timeline. Delete JVM aggregation in second. Support TLS. Support namespace. Support token auth. Group and aggregate requests based on response time and timeline, support Thermodynamic chart query Support component librariy setting through yml file for better extendibility. Optimize performance. Support short column name in ES or other storage implementor. Add a new cache module implementor, based on Caffeine. Support system property override settings. Refactor settings initialization. Provide collector instrumentation agent. Support .NET core component libraries. Fix divide zero in query. Fix Data don't remove as expected in ES implementor. Add some checks in collector modulization core. Add some test cases.  UI Changes  New trace query UI. New Application UI, merge server tab(removed) into application as sub page. New Topology UI. New response time / throughput TopN list. Add Thermodynamic chart in overview page. Change all tps to cpm(calls per minutes). Fix wrong osName in server view. Fix wrong startTime in trace view. Fix some icons internet requirements.  Documentation  Add TLS document. Add namespace document. Add direct link document. Add token document. Add across thread toolkit document. Add a FAQ about, Agent or collector version upgrade. Sync all English document to Chinese.  Issues and Pull requests\n5.0.0-alpha Agent -\u0026gt; Collector protocol  Remove C++ keywords Move Ref into Span from Segment Add span type, when register an operation  UI -\u0026gt; Collector GraphQL query protocol  First version protocol  Agent Changes  Support gRPC 1.x plugin Support kafka 0.11 and 1.x plugin Support ServiceComb 0.x plugin Support optional plugin mechanism. Support Spring 3.x and 4.x bean annotation optional plugin Support Apache httpcomponent AsyncClient 4.x plugin Provide automatic agent daily tests, and release reports here. Refactor Postgresql, Oracle, MySQL plugin for compatible. Fix jetty client 9 plugin error Fix async APIs of okhttp plugin error Fix log config didn\u0026rsquo;t work Fix a class loader error in okhttp plugin  Collector Changes  Support metrics analysis and aggregation for application, application instance and service in minute, hour, day and month. Support new GraphQL query protocol Support alarm Provide a prototype instrument for collector. Support node speculate in cluster and application topology. (Provider Node -\u0026gt; Consumer Node) -\u0026gt; (Provider Node -\u0026gt; MQ Server -\u0026gt; Consumer Node)  UI Changes  New 5.0.0 UI!!!  Issues and Pull requests\n","excerpt":"5.1.0 Agent Changes  Fix spring inherit issue in another way Fix classloader dead lock in jdk7+ - …","ref":"/docs/main/v9.3.0/en/changes/changes-5.x/","title":"5.1.0"},{"body":"5.1.0 Agent Changes  Fix spring inherit issue in another way Fix classloader dead lock in jdk7+ - 5.x Support Spring mvc 5.x Support Spring webflux 5.x  Collector Changes  Fix too many open files. Fix the buffer file cannot delete.  5.0.0-GA Agent Changes  Add several package names ignore in agent settings. Classes in these packages would be enhanced, even plugin declared. Support Undertow 2.x plugin. Fix wrong class names of Motan plugin, not a feature related issue, just naming.  Collector Changes  Make buffer file handler close more safety. Fix NPE in AlarmService  Documentation  Fix compiling doc link. Update new live demo address.  5.0.0-RC2 Agent Changes  Support ActiveMQ 5.x Support RuntimeContext used out of TracingContext. Support Oracle ojdbc8 Plugin. Support ElasticSearch client transport 5.2-5.6 Plugin Support using agent.config with given path through system properties. Add a new way to transmit the Request and Response, to avoid bugs in Hytrix scenarios. Fix HTTPComponent client v4 operation name is empty. Fix 2 possible NPEs in Spring plugin. Fix a possible span leak in SpringMVC plugin. Fix NPE in Spring callback plugin.  Collector Changes  Add GZip support for Zipkin receiver. Add new component IDs for nodejs. Fix Zipkin span receiver may miss data in request. Optimize codes in heatmap calculation. Reduce unnecessary divide. Fix NPE in Alarm content generation. Fix the precision lost in ServiceNameService#startTimeMillis. Fix GC count is 0. Fix topology breaks when RPC client uses the async thread call.  UI Changes  Fix UI port can\u0026rsquo;t be set by startup script in Windows. Fix Topology self link error. Fix stack color mismatch label color in gc time chart.  Documentation  Add users list. Fix several document typo. Sync the Chinese documents. Add OpenAPM badge. Add icon/font documents to NOTICE files.  Issues and Pull requests\n5.0.0-beta2 UI -\u0026gt; Collector GraphQL query protocol  Add order and status in trace query.  Agent Changes  Add SOFA plugin. Add witness class for Kafka plugin. Add RuntimeContext in Context. Fix RuntimeContext fail in Tomcat plugin. Fix incompatible for getPropertyDescriptors in Spring core. Fix spymemcached plugin bug. Fix database URL parser bug. Fix StringIndexOutOfBoundsException when mysql jdbc url without databaseName。 Fix duplicate slash in Spring MVC plugin bug. Fix namespace bug. Fix NPE in Okhttp plugin when connect failed. FIx MalformedURLException in httpClientComponent plugin. Remove unused dependencies in Dubbo plugin. Remove gRPC timeout to avoid out of memory leak. Rewrite Async http client plugin. [Incubating] Add trace custom ignore optional plugin.  Collector Changes  Topology query optimization for more than 100 apps. Error rate alarm is not triggered. Tolerate unsupported segments. Support Integer Array, Long Array, String Array, Double Array in streaming data model. Support multiple entry span and multiple service name in one segment durtaion record. Use BulkProcessor to control the linear writing of data by multiple threads. Determine the log is enabled for the DEBUG level before printing message. Add static modifier to Logger. Add AspNet component. Filter inactive service in query. Support to query service based on Application. Fix RemoteDataMappingIdNotFoundException Exclude component-libaries.xml file in collector-*.jar, make sure it is in /conf only. Separate a single TTL in minute to in minute, hour, day, month metric and trace. Add order and status in trace query. Add folder lock to buffer folder. Modify operationName search from match to match_phrase. [Incubating] Add Zipkin span receiver. Support analysis Zipkin v1/v2 formats. [Incubating] Support sharding-sphere as storage implementor.  UI Changes  Support login and access control. Add new webapp.yml configuration file. Modify webapp startup script. Link to trace query from Thermodynamic graph Add application selector in service view. Add order and status in trace query.  Documentation  Add architecture design doc. Reformat deploy document. Adjust Tomcat deploy document. Remove all Apache licenses files in dist release packages. Update user cases. Update UI licenses. Add incubating sections in doc.  Issues and Pull requests\n5.0.0-beta UI -\u0026gt; Collector GraphQL query protocol  Replace all tps to throughput/cpm(calls per min) Add getThermodynamic service Update version to beta  Agent Changes  Support TLS. Support namespace. Support direct link. Support token. Add across thread toolkit. Add new plugin extend machenism to override agent core implementations. Fix an agent start up sequence bug. Fix wrong gc count. Remove system env override. Add Spring AOP aspect patch to avoid aop conflicts.  Collector Changes  Trace query based on timeline. Delete JVM aggregation in second. Support TLS. Support namespace. Support token auth. Group and aggregate requests based on response time and timeline, support Thermodynamic chart query Support component librariy setting through yml file for better extendibility. Optimize performance. Support short column name in ES or other storage implementor. Add a new cache module implementor, based on Caffeine. Support system property override settings. Refactor settings initialization. Provide collector instrumentation agent. Support .NET core component libraries. Fix divide zero in query. Fix Data don't remove as expected in ES implementor. Add some checks in collector modulization core. Add some test cases.  UI Changes  New trace query UI. New Application UI, merge server tab(removed) into application as sub page. New Topology UI. New response time / throughput TopN list. Add Thermodynamic chart in overview page. Change all tps to cpm(calls per minutes). Fix wrong osName in server view. Fix wrong startTime in trace view. Fix some icons internet requirements.  Documentation  Add TLS document. Add namespace document. Add direct link document. Add token document. Add across thread toolkit document. Add a FAQ about, Agent or collector version upgrade. Sync all English document to Chinese.  Issues and Pull requests\n5.0.0-alpha Agent -\u0026gt; Collector protocol  Remove C++ keywords Move Ref into Span from Segment Add span type, when register an operation  UI -\u0026gt; Collector GraphQL query protocol  First version protocol  Agent Changes  Support gRPC 1.x plugin Support kafka 0.11 and 1.x plugin Support ServiceComb 0.x plugin Support optional plugin mechanism. Support Spring 3.x and 4.x bean annotation optional plugin Support Apache httpcomponent AsyncClient 4.x plugin Provide automatic agent daily tests, and release reports here. Refactor Postgresql, Oracle, MySQL plugin for compatible. Fix jetty client 9 plugin error Fix async APIs of okhttp plugin error Fix log config didn\u0026rsquo;t work Fix a class loader error in okhttp plugin  Collector Changes  Support metrics analysis and aggregation for application, application instance and service in minute, hour, day and month. Support new GraphQL query protocol Support alarm Provide a prototype instrument for collector. Support node speculate in cluster and application topology. (Provider Node -\u0026gt; Consumer Node) -\u0026gt; (Provider Node -\u0026gt; MQ Server -\u0026gt; Consumer Node)  UI Changes  New 5.0.0 UI!!!  Issues and Pull requests\n","excerpt":"5.1.0 Agent Changes  Fix spring inherit issue in another way Fix classloader dead lock in jdk7+ - …","ref":"/docs/main/v9.4.0/en/changes/changes-5.x/","title":"5.1.0"},{"body":"5.1.0 Agent Changes  Fix spring inherit issue in another way Fix classloader dead lock in jdk7+ - 5.x Support Spring mvc 5.x Support Spring webflux 5.x  Collector Changes  Fix too many open files. Fix the buffer file cannot delete.  5.0.0-GA Agent Changes  Add several package names ignore in agent settings. Classes in these packages would be enhanced, even plugin declared. Support Undertow 2.x plugin. Fix wrong class names of Motan plugin, not a feature related issue, just naming.  Collector Changes  Make buffer file handler close more safety. Fix NPE in AlarmService  Documentation  Fix compiling doc link. Update new live demo address.  5.0.0-RC2 Agent Changes  Support ActiveMQ 5.x Support RuntimeContext used out of TracingContext. Support Oracle ojdbc8 Plugin. Support ElasticSearch client transport 5.2-5.6 Plugin Support using agent.config with given path through system properties. Add a new way to transmit the Request and Response, to avoid bugs in Hytrix scenarios. Fix HTTPComponent client v4 operation name is empty. Fix 2 possible NPEs in Spring plugin. Fix a possible span leak in SpringMVC plugin. Fix NPE in Spring callback plugin.  Collector Changes  Add GZip support for Zipkin receiver. Add new component IDs for nodejs. Fix Zipkin span receiver may miss data in request. Optimize codes in heatmap calculation. Reduce unnecessary divide. Fix NPE in Alarm content generation. Fix the precision lost in ServiceNameService#startTimeMillis. Fix GC count is 0. Fix topology breaks when RPC client uses the async thread call.  UI Changes  Fix UI port can\u0026rsquo;t be set by startup script in Windows. Fix Topology self link error. Fix stack color mismatch label color in gc time chart.  Documentation  Add users list. Fix several document typo. Sync the Chinese documents. Add OpenAPM badge. Add icon/font documents to NOTICE files.  Issues and Pull requests\n5.0.0-beta2 UI -\u0026gt; Collector GraphQL query protocol  Add order and status in trace query.  Agent Changes  Add SOFA plugin. Add witness class for Kafka plugin. Add RuntimeContext in Context. Fix RuntimeContext fail in Tomcat plugin. Fix incompatible for getPropertyDescriptors in Spring core. Fix spymemcached plugin bug. Fix database URL parser bug. Fix StringIndexOutOfBoundsException when mysql jdbc url without databaseName。 Fix duplicate slash in Spring MVC plugin bug. Fix namespace bug. Fix NPE in Okhttp plugin when connect failed. FIx MalformedURLException in httpClientComponent plugin. Remove unused dependencies in Dubbo plugin. Remove gRPC timeout to avoid out of memory leak. Rewrite Async http client plugin. [Incubating] Add trace custom ignore optional plugin.  Collector Changes  Topology query optimization for more than 100 apps. Error rate alarm is not triggered. Tolerate unsupported segments. Support Integer Array, Long Array, String Array, Double Array in streaming data model. Support multiple entry span and multiple service name in one segment durtaion record. Use BulkProcessor to control the linear writing of data by multiple threads. Determine the log is enabled for the DEBUG level before printing message. Add static modifier to Logger. Add AspNet component. Filter inactive service in query. Support to query service based on Application. Fix RemoteDataMappingIdNotFoundException Exclude component-libaries.xml file in collector-*.jar, make sure it is in /conf only. Separate a single TTL in minute to in minute, hour, day, month metric and trace. Add order and status in trace query. Add folder lock to buffer folder. Modify operationName search from match to match_phrase. [Incubating] Add Zipkin span receiver. Support analysis Zipkin v1/v2 formats. [Incubating] Support sharding-sphere as storage implementor.  UI Changes  Support login and access control. Add new webapp.yml configuration file. Modify webapp startup script. Link to trace query from Thermodynamic graph Add application selector in service view. Add order and status in trace query.  Documentation  Add architecture design doc. Reformat deploy document. Adjust Tomcat deploy document. Remove all Apache licenses files in dist release packages. Update user cases. Update UI licenses. Add incubating sections in doc.  Issues and Pull requests\n5.0.0-beta UI -\u0026gt; Collector GraphQL query protocol  Replace all tps to throughput/cpm(calls per min) Add getThermodynamic service Update version to beta  Agent Changes  Support TLS. Support namespace. Support direct link. Support token. Add across thread toolkit. Add new plugin extend machenism to override agent core implementations. Fix an agent start up sequence bug. Fix wrong gc count. Remove system env override. Add Spring AOP aspect patch to avoid aop conflicts.  Collector Changes  Trace query based on timeline. Delete JVM aggregation in second. Support TLS. Support namespace. Support token auth. Group and aggregate requests based on response time and timeline, support Thermodynamic chart query Support component librariy setting through yml file for better extendibility. Optimize performance. Support short column name in ES or other storage implementor. Add a new cache module implementor, based on Caffeine. Support system property override settings. Refactor settings initialization. Provide collector instrumentation agent. Support .NET core component libraries. Fix divide zero in query. Fix Data don't remove as expected in ES implementor. Add some checks in collector modulization core. Add some test cases.  UI Changes  New trace query UI. New Application UI, merge server tab(removed) into application as sub page. New Topology UI. New response time / throughput TopN list. Add Thermodynamic chart in overview page. Change all tps to cpm(calls per minutes). Fix wrong osName in server view. Fix wrong startTime in trace view. Fix some icons internet requirements.  Documentation  Add TLS document. Add namespace document. Add direct link document. Add token document. Add across thread toolkit document. Add a FAQ about, Agent or collector version upgrade. Sync all English document to Chinese.  Issues and Pull requests\n5.0.0-alpha Agent -\u0026gt; Collector protocol  Remove C++ keywords Move Ref into Span from Segment Add span type, when register an operation  UI -\u0026gt; Collector GraphQL query protocol  First version protocol  Agent Changes  Support gRPC 1.x plugin Support kafka 0.11 and 1.x plugin Support ServiceComb 0.x plugin Support optional plugin mechanism. Support Spring 3.x and 4.x bean annotation optional plugin Support Apache httpcomponent AsyncClient 4.x plugin Provide automatic agent daily tests, and release reports here. Refactor Postgresql, Oracle, MySQL plugin for compatible. Fix jetty client 9 plugin error Fix async APIs of okhttp plugin error Fix log config didn\u0026rsquo;t work Fix a class loader error in okhttp plugin  Collector Changes  Support metrics analysis and aggregation for application, application instance and service in minute, hour, day and month. Support new GraphQL query protocol Support alarm Provide a prototype instrument for collector. Support node speculate in cluster and application topology. (Provider Node -\u0026gt; Consumer Node) -\u0026gt; (Provider Node -\u0026gt; MQ Server -\u0026gt; Consumer Node)  UI Changes  New 5.0.0 UI!!!  Issues and Pull requests\n","excerpt":"5.1.0 Agent Changes  Fix spring inherit issue in another way Fix classloader dead lock in jdk7+ - …","ref":"/docs/main/v9.5.0/en/changes/changes-5.x/","title":"5.1.0"},{"body":"5.1.0 Agent Changes  Fix spring inherit issue in another way Fix classloader dead lock in jdk7+ - 5.x Support Spring mvc 5.x Support Spring webflux 5.x  Collector Changes  Fix too many open files. Fix the buffer file cannot delete.  5.0.0-GA Agent Changes  Add several package names ignore in agent settings. Classes in these packages would be enhanced, even plugin declared. Support Undertow 2.x plugin. Fix wrong class names of Motan plugin, not a feature related issue, just naming.  Collector Changes  Make buffer file handler close more safety. Fix NPE in AlarmService  Documentation  Fix compiling doc link. Update new live demo address.  5.0.0-RC2 Agent Changes  Support ActiveMQ 5.x Support RuntimeContext used out of TracingContext. Support Oracle ojdbc8 Plugin. Support ElasticSearch client transport 5.2-5.6 Plugin Support using agent.config with given path through system properties. Add a new way to transmit the Request and Response, to avoid bugs in Hytrix scenarios. Fix HTTPComponent client v4 operation name is empty. Fix 2 possible NPEs in Spring plugin. Fix a possible span leak in SpringMVC plugin. Fix NPE in Spring callback plugin.  Collector Changes  Add GZip support for Zipkin receiver. Add new component IDs for nodejs. Fix Zipkin span receiver may miss data in request. Optimize codes in heatmap calculation. Reduce unnecessary divide. Fix NPE in Alarm content generation. Fix the precision lost in ServiceNameService#startTimeMillis. Fix GC count is 0. Fix topology breaks when RPC client uses the async thread call.  UI Changes  Fix UI port can\u0026rsquo;t be set by startup script in Windows. Fix Topology self link error. Fix stack color mismatch label color in gc time chart.  Documentation  Add users list. Fix several document typo. Sync the Chinese documents. Add OpenAPM badge. Add icon/font documents to NOTICE files.  Issues and Pull requests\n5.0.0-beta2 UI -\u0026gt; Collector GraphQL query protocol  Add order and status in trace query.  Agent Changes  Add SOFA plugin. Add witness class for Kafka plugin. Add RuntimeContext in Context. Fix RuntimeContext fail in Tomcat plugin. Fix incompatible for getPropertyDescriptors in Spring core. Fix spymemcached plugin bug. Fix database URL parser bug. Fix StringIndexOutOfBoundsException when mysql jdbc url without databaseName。 Fix duplicate slash in Spring MVC plugin bug. Fix namespace bug. Fix NPE in Okhttp plugin when connect failed. FIx MalformedURLException in httpClientComponent plugin. Remove unused dependencies in Dubbo plugin. Remove gRPC timeout to avoid out of memory leak. Rewrite Async http client plugin. [Incubating] Add trace custom ignore optional plugin.  Collector Changes  Topology query optimization for more than 100 apps. Error rate alarm is not triggered. Tolerate unsupported segments. Support Integer Array, Long Array, String Array, Double Array in streaming data model. Support multiple entry span and multiple service name in one segment durtaion record. Use BulkProcessor to control the linear writing of data by multiple threads. Determine the log is enabled for the DEBUG level before printing message. Add static modifier to Logger. Add AspNet component. Filter inactive service in query. Support to query service based on Application. Fix RemoteDataMappingIdNotFoundException Exclude component-libaries.xml file in collector-*.jar, make sure it is in /conf only. Separate a single TTL in minute to in minute, hour, day, month metric and trace. Add order and status in trace query. Add folder lock to buffer folder. Modify operationName search from match to match_phrase. [Incubating] Add Zipkin span receiver. Support analysis Zipkin v1/v2 formats. [Incubating] Support sharding-sphere as storage implementor.  UI Changes  Support login and access control. Add new webapp.yml configuration file. Modify webapp startup script. Link to trace query from Thermodynamic graph Add application selector in service view. Add order and status in trace query.  Documentation  Add architecture design doc. Reformat deploy document. Adjust Tomcat deploy document. Remove all Apache licenses files in dist release packages. Update user cases. Update UI licenses. Add incubating sections in doc.  Issues and Pull requests\n5.0.0-beta UI -\u0026gt; Collector GraphQL query protocol  Replace all tps to throughput/cpm(calls per min) Add getThermodynamic service Update version to beta  Agent Changes  Support TLS. Support namespace. Support direct link. Support token. Add across thread toolkit. Add new plugin extend machenism to override agent core implementations. Fix an agent start up sequence bug. Fix wrong gc count. Remove system env override. Add Spring AOP aspect patch to avoid aop conflicts.  Collector Changes  Trace query based on timeline. Delete JVM aggregation in second. Support TLS. Support namespace. Support token auth. Group and aggregate requests based on response time and timeline, support Thermodynamic chart query Support component librariy setting through yml file for better extendibility. Optimize performance. Support short column name in ES or other storage implementor. Add a new cache module implementor, based on Caffeine. Support system property override settings. Refactor settings initialization. Provide collector instrumentation agent. Support .NET core component libraries. Fix divide zero in query. Fix Data don't remove as expected in ES implementor. Add some checks in collector modulization core. Add some test cases.  UI Changes  New trace query UI. New Application UI, merge server tab(removed) into application as sub page. New Topology UI. New response time / throughput TopN list. Add Thermodynamic chart in overview page. Change all tps to cpm(calls per minutes). Fix wrong osName in server view. Fix wrong startTime in trace view. Fix some icons internet requirements.  Documentation  Add TLS document. Add namespace document. Add direct link document. Add token document. Add across thread toolkit document. Add a FAQ about, Agent or collector version upgrade. Sync all English document to Chinese.  Issues and Pull requests\n5.0.0-alpha Agent -\u0026gt; Collector protocol  Remove C++ keywords Move Ref into Span from Segment Add span type, when register an operation  UI -\u0026gt; Collector GraphQL query protocol  First version protocol  Agent Changes  Support gRPC 1.x plugin Support kafka 0.11 and 1.x plugin Support ServiceComb 0.x plugin Support optional plugin mechanism. Support Spring 3.x and 4.x bean annotation optional plugin Support Apache httpcomponent AsyncClient 4.x plugin Provide automatic agent daily tests, and release reports here. Refactor Postgresql, Oracle, MySQL plugin for compatible. Fix jetty client 9 plugin error Fix async APIs of okhttp plugin error Fix log config didn\u0026rsquo;t work Fix a class loader error in okhttp plugin  Collector Changes  Support metrics analysis and aggregation for application, application instance and service in minute, hour, day and month. Support new GraphQL query protocol Support alarm Provide a prototype instrument for collector. Support node speculate in cluster and application topology. (Provider Node -\u0026gt; Consumer Node) -\u0026gt; (Provider Node -\u0026gt; MQ Server -\u0026gt; Consumer Node)  UI Changes  New 5.0.0 UI!!!  Issues and Pull requests\n","excerpt":"5.1.0 Agent Changes  Fix spring inherit issue in another way Fix classloader dead lock in jdk7+ - …","ref":"/docs/main/v9.6.0/en/changes/changes-5.x/","title":"5.1.0"},{"body":"5.1.0 Agent Changes  Fix spring inherit issue in another way Fix classloader dead lock in jdk7+ - 5.x Support Spring mvc 5.x Support Spring webflux 5.x  Collector Changes  Fix too many open files. Fix the buffer file cannot delete.  5.0.0-GA Agent Changes  Add several package names ignore in agent settings. Classes in these packages would be enhanced, even plugin declared. Support Undertow 2.x plugin. Fix wrong class names of Motan plugin, not a feature related issue, just naming.  Collector Changes  Make buffer file handler close more safety. Fix NPE in AlarmService  Documentation  Fix compiling doc link. Update new live demo address.  5.0.0-RC2 Agent Changes  Support ActiveMQ 5.x Support RuntimeContext used out of TracingContext. Support Oracle ojdbc8 Plugin. Support ElasticSearch client transport 5.2-5.6 Plugin Support using agent.config with given path through system properties. Add a new way to transmit the Request and Response, to avoid bugs in Hytrix scenarios. Fix HTTPComponent client v4 operation name is empty. Fix 2 possible NPEs in Spring plugin. Fix a possible span leak in SpringMVC plugin. Fix NPE in Spring callback plugin.  Collector Changes  Add GZip support for Zipkin receiver. Add new component IDs for nodejs. Fix Zipkin span receiver may miss data in request. Optimize codes in heatmap calculation. Reduce unnecessary divide. Fix NPE in Alarm content generation. Fix the precision lost in ServiceNameService#startTimeMillis. Fix GC count is 0. Fix topology breaks when RPC client uses the async thread call.  UI Changes  Fix UI port can\u0026rsquo;t be set by startup script in Windows. Fix Topology self link error. Fix stack color mismatch label color in gc time chart.  Documentation  Add users list. Fix several document typo. Sync the Chinese documents. Add OpenAPM badge. Add icon/font documents to NOTICE files.  Issues and Pull requests\n5.0.0-beta2 UI -\u0026gt; Collector GraphQL query protocol  Add order and status in trace query.  Agent Changes  Add SOFA plugin. Add witness class for Kafka plugin. Add RuntimeContext in Context. Fix RuntimeContext fail in Tomcat plugin. Fix incompatible for getPropertyDescriptors in Spring core. Fix spymemcached plugin bug. Fix database URL parser bug. Fix StringIndexOutOfBoundsException when mysql jdbc url without databaseName。 Fix duplicate slash in Spring MVC plugin bug. Fix namespace bug. Fix NPE in Okhttp plugin when connect failed. FIx MalformedURLException in httpClientComponent plugin. Remove unused dependencies in Dubbo plugin. Remove gRPC timeout to avoid out of memory leak. Rewrite Async http client plugin. [Incubating] Add trace custom ignore optional plugin.  Collector Changes  Topology query optimization for more than 100 apps. Error rate alarm is not triggered. Tolerate unsupported segments. Support Integer Array, Long Array, String Array, Double Array in streaming data model. Support multiple entry span and multiple service name in one segment durtaion record. Use BulkProcessor to control the linear writing of data by multiple threads. Determine the log is enabled for the DEBUG level before printing message. Add static modifier to Logger. Add AspNet component. Filter inactive service in query. Support to query service based on Application. Fix RemoteDataMappingIdNotFoundException Exclude component-libaries.xml file in collector-*.jar, make sure it is in /conf only. Separate a single TTL in minute to in minute, hour, day, month metric and trace. Add order and status in trace query. Add folder lock to buffer folder. Modify operationName search from match to match_phrase. [Incubating] Add Zipkin span receiver. Support analysis Zipkin v1/v2 formats. [Incubating] Support sharding-sphere as storage implementor.  UI Changes  Support login and access control. Add new webapp.yml configuration file. Modify webapp startup script. Link to trace query from Thermodynamic graph Add application selector in service view. Add order and status in trace query.  Documentation  Add architecture design doc. Reformat deploy document. Adjust Tomcat deploy document. Remove all Apache licenses files in dist release packages. Update user cases. Update UI licenses. Add incubating sections in doc.  Issues and Pull requests\n5.0.0-beta UI -\u0026gt; Collector GraphQL query protocol  Replace all tps to throughput/cpm(calls per min) Add getThermodynamic service Update version to beta  Agent Changes  Support TLS. Support namespace. Support direct link. Support token. Add across thread toolkit. Add new plugin extend machenism to override agent core implementations. Fix an agent start up sequence bug. Fix wrong gc count. Remove system env override. Add Spring AOP aspect patch to avoid aop conflicts.  Collector Changes  Trace query based on timeline. Delete JVM aggregation in second. Support TLS. Support namespace. Support token auth. Group and aggregate requests based on response time and timeline, support Thermodynamic chart query Support component librariy setting through yml file for better extendibility. Optimize performance. Support short column name in ES or other storage implementor. Add a new cache module implementor, based on Caffeine. Support system property override settings. Refactor settings initialization. Provide collector instrumentation agent. Support .NET core component libraries. Fix divide zero in query. Fix Data don't remove as expected in ES implementor. Add some checks in collector modulization core. Add some test cases.  UI Changes  New trace query UI. New Application UI, merge server tab(removed) into application as sub page. New Topology UI. New response time / throughput TopN list. Add Thermodynamic chart in overview page. Change all tps to cpm(calls per minutes). Fix wrong osName in server view. Fix wrong startTime in trace view. Fix some icons internet requirements.  Documentation  Add TLS document. Add namespace document. Add direct link document. Add token document. Add across thread toolkit document. Add a FAQ about, Agent or collector version upgrade. Sync all English document to Chinese.  Issues and Pull requests\n5.0.0-alpha Agent -\u0026gt; Collector protocol  Remove C++ keywords Move Ref into Span from Segment Add span type, when register an operation  UI -\u0026gt; Collector GraphQL query protocol  First version protocol  Agent Changes  Support gRPC 1.x plugin Support kafka 0.11 and 1.x plugin Support ServiceComb 0.x plugin Support optional plugin mechanism. Support Spring 3.x and 4.x bean annotation optional plugin Support Apache httpcomponent AsyncClient 4.x plugin Provide automatic agent daily tests, and release reports here. Refactor Postgresql, Oracle, MySQL plugin for compatible. Fix jetty client 9 plugin error Fix async APIs of okhttp plugin error Fix log config didn\u0026rsquo;t work Fix a class loader error in okhttp plugin  Collector Changes  Support metrics analysis and aggregation for application, application instance and service in minute, hour, day and month. Support new GraphQL query protocol Support alarm Provide a prototype instrument for collector. Support node speculate in cluster and application topology. (Provider Node -\u0026gt; Consumer Node) -\u0026gt; (Provider Node -\u0026gt; MQ Server -\u0026gt; Consumer Node)  UI Changes  New 5.0.0 UI!!!  Issues and Pull requests\n","excerpt":"5.1.0 Agent Changes  Fix spring inherit issue in another way Fix classloader dead lock in jdk7+ - …","ref":"/docs/main/v9.7.0/en/changes/changes-5.x/","title":"5.1.0"},{"body":"6.6.0 Project  [IMPORTANT] Local span and exit span are not treated as endpoint detected at client and local. Only entry span is the endpoint. Reduce the load of register and memory cost.   Support MiniKube, Istio and SkyWalking on K8s deployment in CI. Support Windows and MacOS build in GitHub Action CI. Support ElasticSearch 7 in official dist. Hundreds plugin cases have been added in GitHub Action CI process.  Java Agent  Remove the local/exit span operation name register mechanism. Add plugin for JDK Threading classes. Add plugin for Armeria. Support set operation name in async span. Enhance webflux plugin, related to Spring Gateway plugin. Webflux plugin is in optional, due to JDK8 required. Fix a possible deadlock. Fix NPE when OAL scripts are different in different OAP nodes, mostly in upgrading stage. Fix bug about wrong peer in ES plugin. Fix NPE in Spring plugin. Fix wrong class name in Dubbo 2.7 conflict patch. Fix spring annotation inheritance problem.  OAP-Backend  Remove the local/exit span operation name register mechanism. Remove client side endpoint register in service mesh. Service instance dependency and related metrics. Support min func in OAL Support apdex func in OAL Support custom ES config setting at the index level. Envoy ALS proto upgraded. Update JODA lib as bugs in UTC +13/+14. Support topN sample period configurable. Ignore no statement DB operations in slow SQL collection. Fix bug in docker-entrypoint.sh when using MySQL as storage  UI  Service topology enhancement. Dive into service, instance and endpoint metrics on topo map. Service instance dependency view and related metrics. Support using URL parameter in trace query page. Support apdex score in service page. Add service dependency metrics into metrics comparison. Fix alarm search not working.  Document  Update user list and user wall. Add document link for CLI. Add deployment guide of agent in Jetty case. Modify Consul cluster doc. Add document about injecting traceId into the logback with logstack in JSON format. ElementUI license and dependency added.  All issues and pull requests are here\n6.5.0 Project  TTL E2E test (#3437) Test coverage is back in pull request check status (#3503) Plugin tests begin to be migrated into main repo, and is in process. (#3528, #3756, #3751, etc.) Switch to SkyWalking CI (exclusive) nodes (#3546) MySQL storage e2e test. (#3648) E2E tests are verified in multiple jdk versions, jdk 8, 9, 11, 12 (#3657) Jenkins build jobs run only when necessary (#3662)  OAP-Backend  Support dynamically configure alarm settings (#3557) Language of instance could be null (#3485) Make query max window size configurable. (#3765) Remove two max size 500 limit. (#3748) Parameterize the cache size. (#3741) ServiceInstanceRelation set error id (#3683) Makes the scope of alarm message more semantic. (#3680) Add register persistent worker latency metrics (#3677) Fix more reasonable error (#3619) Add GraphQL getServiceInstance instanceUuid field. (#3595) Support namespace in Nacos cluster/configuration (#3578) Instead of datasource-settings.properties, use application.yml for MySQLStorageProvider (#3564) Provide consul dynamic configuration center implementation (#3560) Upgrade guava version to support higher jdk version (#3541) Sync latest als from envoy api (#3507) Set telemetry instanced id for Etcd and Nacos plugin (#3492) Support timeout configuration in agent and backend. (#3491) Make sure the cluster register happens before streaming process. (#3471) Agent supports custom properties. (#3367) Miscellaneous bug fixes (#3567)  UI  Feature: node detail display in topo circle-chart view. BugFix: the jvm-maxheap \u0026amp; jvm-maxnonheap is -1, free is no value Fix bug: time select operation not in effect Fix bug: language initialization failed Fix bug: not show instance language Feature: support the trace list display export png Feature: Metrics comparison view BugFix: Fix dashboard top throughput copy  Java Agent  Spring async scenario optimize (#3723) Support log4j2 AsyncLogger (#3715) Add config to collect PostgreSQL sql query params (#3695) Support namespace in Nacos cluster/configuration (#3578) Provide plugin for ehcache 2.x (#3575) Supporting RequestRateLimiterGatewayFilterFactory (#3538) Kafka-plugin compatible with KafkaTemplate (#3505) Add pulsar apm plugin (#3476) Spring-cloud-gateway traceId does not transmit #3411 (#3446) Gateway compatible with downstream loss (#3445) Provide cassandra java driver 3.x plugin (#3410) Fix SpringMVC4 NoSuchMethodError (#3408) BugFix: endpoint grouping rules may be not unique (#3510) Add feature to control the maximum agent log files (#3475) Agent support custom properties. (#3367) Add Light4j plugin (#3323)  Document  Remove travis badge (#3763) Replace user wall to typical users in readme page (#3719) Update istio docs according latest istio release (#3646) Use chart deploy sw docs (#3573) Reorganize the doc, and provide catalog (#3563) Committer vote and set up document. (#3496) Update als setup doc as istio 1.3 released (#3470) Fill faq reply in official document. (#3450)  All issues and pull requests are here\n6.4.0 Project  Highly recommend to upgrade due to Pxx metrics calculation bug. Make agent working in JDK9+ Module system.  Java Agent  Make agent working in JDK9+ Module system. Support Kafka 2.x client libs. Log error in OKHTTP OnFailure callback. Support injecting traceid into logstack appender in logback. Add OperationName(including endpoint name) length max threshold. Support using Regex to group operation name. Support Undertow routing handler. RestTemplate plugin support operation name grouping. Fix ClassCastException in Webflux plugin. Ordering zookeeper server list, to make it better in topology. Fix a Dubbo plugin incompatible issue. Fix MySQL 5 plugin issue. Make log writer cached. Optimize Spring Cloud Gateway plugin Fix and improve gRPC reconnect mechanism. Remove Disruptor dependency from agent.  Backend  Fix Pxx(p50,p75,p90,p95,p99) metrics func bug.(Critical) Support Gateway in backend analysis, even when it doesn\u0026rsquo;t have suitable language agent. Support using HTTPs SSL accessing ElasticSearch storage. Support Zookeeper ACL. Make alarm records listed in order. Fix Pxx data persistence failure in some cases. Fix some bugs in MySQL storage. Setup slow SQL length threshold. Fix TTL settings is not working as expected. Remove scope-meta file.  UI  Enhance alarm page layout. Support trace tree chart resize. Support trace auto completion when partial traces abandoned somehow. Fix dashboard endpoint slow chart. Add radial chart in topology page. Add trace table mode. Fix topology page bug. Fix calender js bug. Fix \u0026ldquo;The \u0026ldquo;topo-services\u0026rdquo; component did not update the data in time after modifying the time range on the topology page.  Document  Restore the broken Istio setup doc. Add etcd config center document. Correct span_limit_per_segment default value in document. Enhance plugin develop doc. Fix error description in build document.  All issues and pull requests are here\n6.3.0 Project  e2e tests have been added, and verify every pull request. Use ArrayList to replace LinkedList in DataCarrier for much better performance. Add plugin instrumentation definition check in CI. DataCarrier performance improvement by avoiding false-sharing.  Java Agent  Java agent supports JDK 9 - 12, but don\u0026rsquo;t support Java Module yet. Support JVM class auto instrumentation, cataloged as bootstrap plugin. Support JVM HttpClient and HttpsClient plugin.[Optional] Support backend upgrade without rebooting required. Open Redefine and Retransform by other agents. Support Servlet 2.5 in Jetty, Tomcat and SpringMVC plugins. Support Spring @Async plugin. Add new config item to restrict the length of span#peer. Refactor ContextManager#stopSpan. Add gRPC timeout. Support Logback AsyncAppender print tid Fix gRPC reconnect bug. Fix trace segment service doesn\u0026rsquo;t report onComplete. Fix wrong logger class name. Fix gRPC plugin bug. Fix ContextManager.activeSpan() API usage error.  Backend  Support agent reset command downstream when the storage is erased, mostly because of backend upgrade. Backend stream flow refactor. High dimensionality metrics(Hour/Day/Month) are changed to lower priority, to ease the storage payload. Add OAP metrics cache to ease the storage query payload and improve performance. Remove DataCarrier in trace persistent of ElasticSearch storage, by leveraging the elasticsearch bulk queue. OAP internal communication protocol changed. Don\u0026rsquo;t be compatible with old releases. Improve ElasticSearch storage bulk performance. Support etcd as dynamic configuration center. Simplify the PxxMetrics and ThermodynamicMetrics functions for better performance and GC. Support JVM metrics self observability. Add the new OAL runtime engine. Add gRPC timeout. Add Charset in the alarm web hook. Fix buffer lost. Fix dirty read in ElasticSearch storage. Fix bug of cluster management plugins in un-Mixed mode. Fix wrong logger class name. Fix delete bug in ElasticSearch when using namespace. Fix MySQL TTL failure. Totally remove IDs can't be null log, to avoid misleading. Fix provider has been initialized repeatedly. Adjust providers conflict log message. Fix using wrong gc time metrics in OAL.  UI  Fix refresh is not working after endpoint and instance changed. Fix endpoint selector but. Fix wrong copy value in slow traces. Fix can\u0026rsquo;t show trace when it is broken partially(Because of agent sampling or fail safe). Fix database and response time graph bugs.  Document  Add bootstrap plugin development document. Alarm documentation typo fixed. Clarify the Docker file purpose. Fix a license typo.  All issues and pull requests are here\n6.2.0 Project  ElasticSearch implementation performance improved, and CHANGED totally. Must delete all existing indexes to do upgrade. CI and Integration tests provided by ASF INFRA. Plan to enhance tests including e2e, plugin tests in all pull requests, powered by ASF INFRA. DataCarrier queue write index controller performance improvement. 3-5 times quicker than before. Add windows compile support in CI.  Java Agent  Support collect SQL parameter in MySQL plugin.[Optional] Support SolrJ plugin. Support RESTEasy plugin. Support Spring Gateway plugin for 2.1.x[Optional] TracingContext performance improvement. Support Apache ShardingSphere(incubating) plugin. Support span#error in application toolkit. Fix OOM by empty stack of exception. FIx wrong cause exception of stack in span log. Fix unclear the running context in SpringMVC plugin. Fix CPU usage accessor calculation issue. Fix SpringMVC plugin span not stop bug when doing HTTP forward. Fix lettuce plugin async commend bug and NPE. Fix webflux plugin cast exception. [CI]Support import check.  Backend  Support time serious ElasticSearch storage. Provide dynamic configuration module and implementation. Slow SQL threshold supports dynamic config today. Dynamic Configuration module provide multiple implementations, DCS(gRPC based), Zookeeper, Apollo, Nacos. Provide P99/95/90/75/50 charts in topology edge. New topology query protocol and implementation. Support Envoy ALS in Service Mesh scenario. Support Nacos cluster management. Enhance metric exporter. Run in increment and total modes. Fix module provider is loaded repeatedly. Change TOP slow SQL storage in ES to Text from Keyword, as too long text issue. Fix H2TopologyQuery tiny bug. Fix H2 log query bug.(No feature provided yet) Filtering pods not in \u0026lsquo;Running\u0026rsquo; phase in mesh scenario. Fix query alarm bug in MySQL and H2 storage. Codes refactor.  UI  Fix some ID is null query(s). Page refactor, especially time-picker, more friendly. Login removed. Trace timestamp visualization issue fixed. Provide P99/95/90/75/50 charts in topology edge. Change all P99/95/90/75/50 charts style. More readable. Fix 404 in trace page.  Document  Go2Sky project has been donated to SkyAPM, change document link. Add FAQ for ElasticSearch storage, and links from document. Add FAQ fro WebSphere installation. Add several open users. Add alarm webhook document.  All issues and pull requests are here\n6.1.0 Project SkyWalking graduated as Apache Top Level Project.\n Support compiling project agent, backend, UI separately.  Java Agent  Support Vert.x Core 3.x plugin. Support Apache Dubbo plugin. Support use_qualified_name_as_endpoint_name and use_qualified_name_as_operation_name configs in SpringMVC plugin. Support span async close APIs in core. Used in Vert.x plugin. Support MySQL 5,8 plugins. Support set instance id manually(optional). Support customize enhance trace plugin in optional list. Support to set peer in Entry Span. Support Zookeeper plugin. Fix Webflux plugin created unexpected Entry Span. Fix Kafka plugin NPE in Kafka 1.1+ Fix wrong operation name in postgre 8.x plugin. Fix RabbitMQ plugin NPE. Fix agent can\u0026rsquo;t run in JVM 6/7, remove module-info.class. Fix agent can\u0026rsquo;t work well, if there is whitespace in agent path. Fix Spring annotation bug and inheritance enhance issue. Fix CPU accessor bug.  Backend Performance improved, especially in CPU limited environment. 3x improvement in service mesh scenario(no trace) in 8C16G VM. Significantly cost less CPU in low payload.\n Support database metrics and SLOW SQL detection. Support to set max size of metadata query. And change default to 5000 from 100. Support ElasticSearch template for new feature in the future. Support shutdown Zipkin trace analysis, because it doesn\u0026rsquo;t fit production environment. Support log type, scope HTTP_ACCESS_LOG and query. No feature provided, prepare for future versions. Support .NET clr receiver. Support Jaeger trace format, no analysis. Support group endpoint name by regax rules in mesh receiver. Support disable statement in OAL. Support basic auth in ElasticSearch connection. Support metrics exporter module and gRPC implementor. Support \u0026gt;, \u0026lt;, \u0026gt;=, \u0026lt;= in OAL. Support role mode in backend. Support Envoy metrics. Support query segment by service instance. Support to set host/port manually at cluster coordinator, rather than based on core settings. Make sure OAP shutdown when it faces startup error. Support set separated gRPC/Jetty ip:port for receiver, default still use core settings. Fix JVM receiver bug. Fix wrong dest service in mesh analysis. Fix search doesn\u0026rsquo;t work as expected. Refactor ScopeDeclaration annotation. Refactor register lock mechanism. Add SmartSql component for .NET Add integration tests for ElasticSearch client. Add test cases for exporter. Add test cases for queue consume.  UI  RocketBot UI has been accepted and bind in this release. Support CLR metrics.  Document  Documents updated, matching Top Level Project requirement. UI licenses updated, according to RocketBot UI IP clearance. User wall and powered-by list updated. CN documents removed, only consider to provide by volunteer out of Apache.  All issues and pull requests are here\n6.0.0-GA Java Agent  Support gson plugin(optional). Support canal plugin. Fix missing ojdbc component id. Fix dubbo plugin conflict. Fix OpenTracing tag match bug. Fix a missing check in ignore plugin.  Backend  Adjust service inventory entity, to add properties. Adjust service instance inventory entity, to add properties. Add nodeType to service inventory entity. Fix when operation name of local and exit spans in ref, the segment lost. Fix the index names don\u0026rsquo;t show right in logs. Fix wrong alarm text. Add test case for span limit mechanism. Add telemetry module and prometheus implementation, with grafana setting. A refactor for register API in storage module. Fix H2 and MySQL endpoint dependency map miss upstream side. Optimize the inventory register and refactor the implementation. Speed up the trace buffer read. Fix and removed unnecessary inventory register operations.  UI  Add new trace view. Add word-break to tag value.  Document  Add two startup modes document. Add PHP agent links. Add some cn documents. Update year to 2019 User wall updated. Fix a wrong description in how-to-build doc.  All issues and pull requests are here\n6.0.0-beta Protocol  Provide Trace Data Protocol v2 Provide SkyWalking Cross Process Propagation Headers Protocol v2.  Java Agent  Support Trace Data Protocol v2 Support SkyWalking Cross Process Propagation Headers Protocol v2. Support SkyWalking Cross Process Propagation Headers Protocol v1 running in compatible way. Need declare open explicitly. Support SpringMVC 5 Support webflux Support a new way to override agent.config by system env. Span tag can override by explicit way. Fix Spring Controller Inherit issue. Fix ElasticSearch plugin NPE. Fix agent classloader dead lock in certain situation. Fix agent log typo. Fix wrong component id in resettemplete plugin. Fix use transform ignore() in wrong way. Fix H2 query bug.  Backend  Support Trace Data Protocol v2. And Trace Data Protocol v1 is still supported. Support MySQL as storage. Support TiDB as storage. Support a new way to override application.yml by system env. Support service instance and endpoint alarm. Support namespace in istio receiver. Support service throughput(cpm), successful rate(sla), avg response time and p99/p95/p90/p75/p50 response time. Support backend trace sampling. Support Zipkin format again. Support init mode. Support namespace in Zookeeper cluster management. Support consul plugin in cluster module. OAL generate tool has been integrated into main repo, in the maven compile stage. Optimize trace paging query. Fix trace query don\u0026rsquo;t use fuzzy query in ElasticSearch storage. Fix alarm can\u0026rsquo;t be active in right way. Fix unnecessary condition in database and cache number query. Fix wrong namespace bug in ElasticSearch storage. Fix Remote clients selector error: / by zero . Fix segment TTL is not working.  UI  Support service throughput(cpm), successful rate(sla), avg response time and p99/p95/p90/p75/p50 response time. Fix TopN endpoint link doesn\u0026rsquo;t work right. Fix trace stack style. Fix CI.  Document  Add more agent setting documents. Add more contribution documents. Update user wall and powered-by page. Add RocketBot UI project link in document.  All issues and pull requests are here\n6.0.0-alpha SkyWalking 6 is totally new milestone for the project. At this point, we are not just a distributing tracing system with analysis and visualization capabilities. We are an Observability Analysis Platform(OAL).\nThe core and most important features in v6 are\n Support to collect telemetry data from different sources, such as multiple language agents and service mesh. Extensible stream analysis core. Make SQL and cache analysis available in core level, although haven\u0026rsquo;t provided in this release. Provide Observability Analysis Language(OAL) to make analysis metrics customization available. New GraphQL query protocol. Not binding with UI now. UI topology is better now. New alarm core provided. In alpha, only on service related metrics.  All issues and pull requests are here\n","excerpt":"6.6.0 Project  [IMPORTANT] Local span and exit span are not treated as endpoint detected at client …","ref":"/docs/main/latest/en/changes/changes-6.x/","title":"6.6.0"},{"body":"6.6.0 Project  [IMPORTANT] Local span and exit span are not treated as endpoint detected at client and local. Only entry span is the endpoint. Reduce the load of register and memory cost.   Support MiniKube, Istio and SkyWalking on K8s deployment in CI. Support Windows and MacOS build in GitHub Action CI. Support ElasticSearch 7 in official dist. Hundreds plugin cases have been added in GitHub Action CI process.  Java Agent  Remove the local/exit span operation name register mechanism. Add plugin for JDK Threading classes. Add plugin for Armeria. Support set operation name in async span. Enhance webflux plugin, related to Spring Gateway plugin. Webflux plugin is in optional, due to JDK8 required. Fix a possible deadlock. Fix NPE when OAL scripts are different in different OAP nodes, mostly in upgrading stage. Fix bug about wrong peer in ES plugin. Fix NPE in Spring plugin. Fix wrong class name in Dubbo 2.7 conflict patch. Fix spring annotation inheritance problem.  OAP-Backend  Remove the local/exit span operation name register mechanism. Remove client side endpoint register in service mesh. Service instance dependency and related metrics. Support min func in OAL Support apdex func in OAL Support custom ES config setting at the index level. Envoy ALS proto upgraded. Update JODA lib as bugs in UTC +13/+14. Support topN sample period configurable. Ignore no statement DB operations in slow SQL collection. Fix bug in docker-entrypoint.sh when using MySQL as storage  UI  Service topology enhancement. Dive into service, instance and endpoint metrics on topo map. Service instance dependency view and related metrics. Support using URL parameter in trace query page. Support apdex score in service page. Add service dependency metrics into metrics comparison. Fix alarm search not working.  Document  Update user list and user wall. Add document link for CLI. Add deployment guide of agent in Jetty case. Modify Consul cluster doc. Add document about injecting traceId into the logback with logstack in JSON format. ElementUI license and dependency added.  All issues and pull requests are here\n6.5.0 Project  TTL E2E test (#3437) Test coverage is back in pull request check status (#3503) Plugin tests begin to be migrated into main repo, and is in process. (#3528, #3756, #3751, etc.) Switch to SkyWalking CI (exclusive) nodes (#3546) MySQL storage e2e test. (#3648) E2E tests are verified in multiple jdk versions, jdk 8, 9, 11, 12 (#3657) Jenkins build jobs run only when necessary (#3662)  OAP-Backend  Support dynamically configure alarm settings (#3557) Language of instance could be null (#3485) Make query max window size configurable. (#3765) Remove two max size 500 limit. (#3748) Parameterize the cache size. (#3741) ServiceInstanceRelation set error id (#3683) Makes the scope of alarm message more semantic. (#3680) Add register persistent worker latency metrics (#3677) Fix more reasonable error (#3619) Add GraphQL getServiceInstance instanceUuid field. (#3595) Support namespace in Nacos cluster/configuration (#3578) Instead of datasource-settings.properties, use application.yml for MySQLStorageProvider (#3564) Provide consul dynamic configuration center implementation (#3560) Upgrade guava version to support higher jdk version (#3541) Sync latest als from envoy api (#3507) Set telemetry instanced id for Etcd and Nacos plugin (#3492) Support timeout configuration in agent and backend. (#3491) Make sure the cluster register happens before streaming process. (#3471) Agent supports custom properties. (#3367) Miscellaneous bug fixes (#3567)  UI  Feature: node detail display in topo circle-chart view. BugFix: the jvm-maxheap \u0026amp; jvm-maxnonheap is -1, free is no value Fix bug: time select operation not in effect Fix bug: language initialization failed Fix bug: not show instance language Feature: support the trace list display export png Feature: Metrics comparison view BugFix: Fix dashboard top throughput copy  Java Agent  Spring async scenario optimize (#3723) Support log4j2 AsyncLogger (#3715) Add config to collect PostgreSQL sql query params (#3695) Support namespace in Nacos cluster/configuration (#3578) Provide plugin for ehcache 2.x (#3575) Supporting RequestRateLimiterGatewayFilterFactory (#3538) Kafka-plugin compatible with KafkaTemplate (#3505) Add pulsar apm plugin (#3476) Spring-cloud-gateway traceId does not transmit #3411 (#3446) Gateway compatible with downstream loss (#3445) Provide cassandra java driver 3.x plugin (#3410) Fix SpringMVC4 NoSuchMethodError (#3408) BugFix: endpoint grouping rules may be not unique (#3510) Add feature to control the maximum agent log files (#3475) Agent support custom properties. (#3367) Add Light4j plugin (#3323)  Document  Remove travis badge (#3763) Replace user wall to typical users in readme page (#3719) Update istio docs according latest istio release (#3646) Use chart deploy sw docs (#3573) Reorganize the doc, and provide catalog (#3563) Committer vote and set up document. (#3496) Update als setup doc as istio 1.3 released (#3470) Fill faq reply in official document. (#3450)  All issues and pull requests are here\n6.4.0 Project  Highly recommend to upgrade due to Pxx metrics calculation bug. Make agent working in JDK9+ Module system.  Java Agent  Make agent working in JDK9+ Module system. Support Kafka 2.x client libs. Log error in OKHTTP OnFailure callback. Support injecting traceid into logstack appender in logback. Add OperationName(including endpoint name) length max threshold. Support using Regex to group operation name. Support Undertow routing handler. RestTemplate plugin support operation name grouping. Fix ClassCastException in Webflux plugin. Ordering zookeeper server list, to make it better in topology. Fix a Dubbo plugin incompatible issue. Fix MySQL 5 plugin issue. Make log writer cached. Optimize Spring Cloud Gateway plugin Fix and improve gRPC reconnect mechanism. Remove Disruptor dependency from agent.  Backend  Fix Pxx(p50,p75,p90,p95,p99) metrics func bug.(Critical) Support Gateway in backend analysis, even when it doesn\u0026rsquo;t have suitable language agent. Support using HTTPs SSL accessing ElasticSearch storage. Support Zookeeper ACL. Make alarm records listed in order. Fix Pxx data persistence failure in some cases. Fix some bugs in MySQL storage. Setup slow SQL length threshold. Fix TTL settings is not working as expected. Remove scope-meta file.  UI  Enhance alarm page layout. Support trace tree chart resize. Support trace auto completion when partial traces abandoned somehow. Fix dashboard endpoint slow chart. Add radial chart in topology page. Add trace table mode. Fix topology page bug. Fix calender js bug. Fix \u0026ldquo;The \u0026ldquo;topo-services\u0026rdquo; component did not update the data in time after modifying the time range on the topology page.  Document  Restore the broken Istio setup doc. Add etcd config center document. Correct span_limit_per_segment default value in document. Enhance plugin develop doc. Fix error description in build document.  All issues and pull requests are here\n6.3.0 Project  e2e tests have been added, and verify every pull request. Use ArrayList to replace LinkedList in DataCarrier for much better performance. Add plugin instrumentation definition check in CI. DataCarrier performance improvement by avoiding false-sharing.  Java Agent  Java agent supports JDK 9 - 12, but don\u0026rsquo;t support Java Module yet. Support JVM class auto instrumentation, cataloged as bootstrap plugin. Support JVM HttpClient and HttpsClient plugin.[Optional] Support backend upgrade without rebooting required. Open Redefine and Retransform by other agents. Support Servlet 2.5 in Jetty, Tomcat and SpringMVC plugins. Support Spring @Async plugin. Add new config item to restrict the length of span#peer. Refactor ContextManager#stopSpan. Add gRPC timeout. Support Logback AsyncAppender print tid Fix gRPC reconnect bug. Fix trace segment service doesn\u0026rsquo;t report onComplete. Fix wrong logger class name. Fix gRPC plugin bug. Fix ContextManager.activeSpan() API usage error.  Backend  Support agent reset command downstream when the storage is erased, mostly because of backend upgrade. Backend stream flow refactor. High dimensionality metrics(Hour/Day/Month) are changed to lower priority, to ease the storage payload. Add OAP metrics cache to ease the storage query payload and improve performance. Remove DataCarrier in trace persistent of ElasticSearch storage, by leveraging the elasticsearch bulk queue. OAP internal communication protocol changed. Don\u0026rsquo;t be compatible with old releases. Improve ElasticSearch storage bulk performance. Support etcd as dynamic configuration center. Simplify the PxxMetrics and ThermodynamicMetrics functions for better performance and GC. Support JVM metrics self observability. Add the new OAL runtime engine. Add gRPC timeout. Add Charset in the alarm web hook. Fix buffer lost. Fix dirty read in ElasticSearch storage. Fix bug of cluster management plugins in un-Mixed mode. Fix wrong logger class name. Fix delete bug in ElasticSearch when using namespace. Fix MySQL TTL failure. Totally remove IDs can't be null log, to avoid misleading. Fix provider has been initialized repeatedly. Adjust providers conflict log message. Fix using wrong gc time metrics in OAL.  UI  Fix refresh is not working after endpoint and instance changed. Fix endpoint selector but. Fix wrong copy value in slow traces. Fix can\u0026rsquo;t show trace when it is broken partially(Because of agent sampling or fail safe). Fix database and response time graph bugs.  Document  Add bootstrap plugin development document. Alarm documentation typo fixed. Clarify the Docker file purpose. Fix a license typo.  All issues and pull requests are here\n6.2.0 Project  ElasticSearch implementation performance improved, and CHANGED totally. Must delete all existing indexes to do upgrade. CI and Integration tests provided by ASF INFRA. Plan to enhance tests including e2e, plugin tests in all pull requests, powered by ASF INFRA. DataCarrier queue write index controller performance improvement. 3-5 times quicker than before. Add windows compile support in CI.  Java Agent  Support collect SQL parameter in MySQL plugin.[Optional] Support SolrJ plugin. Support RESTEasy plugin. Support Spring Gateway plugin for 2.1.x[Optional] TracingContext performance improvement. Support Apache ShardingSphere(incubating) plugin. Support span#error in application toolkit. Fix OOM by empty stack of exception. FIx wrong cause exception of stack in span log. Fix unclear the running context in SpringMVC plugin. Fix CPU usage accessor calculation issue. Fix SpringMVC plugin span not stop bug when doing HTTP forward. Fix lettuce plugin async commend bug and NPE. Fix webflux plugin cast exception. [CI]Support import check.  Backend  Support time serious ElasticSearch storage. Provide dynamic configuration module and implementation. Slow SQL threshold supports dynamic config today. Dynamic Configuration module provide multiple implementations, DCS(gRPC based), Zookeeper, Apollo, Nacos. Provide P99/95/90/75/50 charts in topology edge. New topology query protocol and implementation. Support Envoy ALS in Service Mesh scenario. Support Nacos cluster management. Enhance metric exporter. Run in increment and total modes. Fix module provider is loaded repeatedly. Change TOP slow SQL storage in ES to Text from Keyword, as too long text issue. Fix H2TopologyQuery tiny bug. Fix H2 log query bug.(No feature provided yet) Filtering pods not in \u0026lsquo;Running\u0026rsquo; phase in mesh scenario. Fix query alarm bug in MySQL and H2 storage. Codes refactor.  UI  Fix some ID is null query(s). Page refactor, especially time-picker, more friendly. Login removed. Trace timestamp visualization issue fixed. Provide P99/95/90/75/50 charts in topology edge. Change all P99/95/90/75/50 charts style. More readable. Fix 404 in trace page.  Document  Go2Sky project has been donated to SkyAPM, change document link. Add FAQ for ElasticSearch storage, and links from document. Add FAQ fro WebSphere installation. Add several open users. Add alarm webhook document.  All issues and pull requests are here\n6.1.0 Project SkyWalking graduated as Apache Top Level Project.\n Support compiling project agent, backend, UI separately.  Java Agent  Support Vert.x Core 3.x plugin. Support Apache Dubbo plugin. Support use_qualified_name_as_endpoint_name and use_qualified_name_as_operation_name configs in SpringMVC plugin. Support span async close APIs in core. Used in Vert.x plugin. Support MySQL 5,8 plugins. Support set instance id manually(optional). Support customize enhance trace plugin in optional list. Support to set peer in Entry Span. Support Zookeeper plugin. Fix Webflux plugin created unexpected Entry Span. Fix Kafka plugin NPE in Kafka 1.1+ Fix wrong operation name in postgre 8.x plugin. Fix RabbitMQ plugin NPE. Fix agent can\u0026rsquo;t run in JVM 6/7, remove module-info.class. Fix agent can\u0026rsquo;t work well, if there is whitespace in agent path. Fix Spring annotation bug and inheritance enhance issue. Fix CPU accessor bug.  Backend Performance improved, especially in CPU limited environment. 3x improvement in service mesh scenario(no trace) in 8C16G VM. Significantly cost less CPU in low payload.\n Support database metrics and SLOW SQL detection. Support to set max size of metadata query. And change default to 5000 from 100. Support ElasticSearch template for new feature in the future. Support shutdown Zipkin trace analysis, because it doesn\u0026rsquo;t fit production environment. Support log type, scope HTTP_ACCESS_LOG and query. No feature provided, prepare for future versions. Support .NET clr receiver. Support Jaeger trace format, no analysis. Support group endpoint name by regax rules in mesh receiver. Support disable statement in OAL. Support basic auth in ElasticSearch connection. Support metrics exporter module and gRPC implementor. Support \u0026gt;, \u0026lt;, \u0026gt;=, \u0026lt;= in OAL. Support role mode in backend. Support Envoy metrics. Support query segment by service instance. Support to set host/port manually at cluster coordinator, rather than based on core settings. Make sure OAP shutdown when it faces startup error. Support set separated gRPC/Jetty ip:port for receiver, default still use core settings. Fix JVM receiver bug. Fix wrong dest service in mesh analysis. Fix search doesn\u0026rsquo;t work as expected. Refactor ScopeDeclaration annotation. Refactor register lock mechanism. Add SmartSql component for .NET Add integration tests for ElasticSearch client. Add test cases for exporter. Add test cases for queue consume.  UI  RocketBot UI has been accepted and bind in this release. Support CLR metrics.  Document  Documents updated, matching Top Level Project requirement. UI licenses updated, according to RocketBot UI IP clearance. User wall and powered-by list updated. CN documents removed, only consider to provide by volunteer out of Apache.  All issues and pull requests are here\n6.0.0-GA Java Agent  Support gson plugin(optional). Support canal plugin. Fix missing ojdbc component id. Fix dubbo plugin conflict. Fix OpenTracing tag match bug. Fix a missing check in ignore plugin.  Backend  Adjust service inventory entity, to add properties. Adjust service instance inventory entity, to add properties. Add nodeType to service inventory entity. Fix when operation name of local and exit spans in ref, the segment lost. Fix the index names don\u0026rsquo;t show right in logs. Fix wrong alarm text. Add test case for span limit mechanism. Add telemetry module and prometheus implementation, with grafana setting. A refactor for register API in storage module. Fix H2 and MySQL endpoint dependency map miss upstream side. Optimize the inventory register and refactor the implementation. Speed up the trace buffer read. Fix and removed unnecessary inventory register operations.  UI  Add new trace view. Add word-break to tag value.  Document  Add two startup modes document. Add PHP agent links. Add some cn documents. Update year to 2019 User wall updated. Fix a wrong description in how-to-build doc.  All issues and pull requests are here\n6.0.0-beta Protocol  Provide Trace Data Protocol v2 Provide SkyWalking Cross Process Propagation Headers Protocol v2.  Java Agent  Support Trace Data Protocol v2 Support SkyWalking Cross Process Propagation Headers Protocol v2. Support SkyWalking Cross Process Propagation Headers Protocol v1 running in compatible way. Need declare open explicitly. Support SpringMVC 5 Support webflux Support a new way to override agent.config by system env. Span tag can override by explicit way. Fix Spring Controller Inherit issue. Fix ElasticSearch plugin NPE. Fix agent classloader dead lock in certain situation. Fix agent log typo. Fix wrong component id in resettemplete plugin. Fix use transform ignore() in wrong way. Fix H2 query bug.  Backend  Support Trace Data Protocol v2. And Trace Data Protocol v1 is still supported. Support MySQL as storage. Support TiDB as storage. Support a new way to override application.yml by system env. Support service instance and endpoint alarm. Support namespace in istio receiver. Support service throughput(cpm), successful rate(sla), avg response time and p99/p95/p90/p75/p50 response time. Support backend trace sampling. Support Zipkin format again. Support init mode. Support namespace in Zookeeper cluster management. Support consul plugin in cluster module. OAL generate tool has been integrated into main repo, in the maven compile stage. Optimize trace paging query. Fix trace query don\u0026rsquo;t use fuzzy query in ElasticSearch storage. Fix alarm can\u0026rsquo;t be active in right way. Fix unnecessary condition in database and cache number query. Fix wrong namespace bug in ElasticSearch storage. Fix Remote clients selector error: / by zero . Fix segment TTL is not working.  UI  Support service throughput(cpm), successful rate(sla), avg response time and p99/p95/p90/p75/p50 response time. Fix TopN endpoint link doesn\u0026rsquo;t work right. Fix trace stack style. Fix CI.  Document  Add more agent setting documents. Add more contribution documents. Update user wall and powered-by page. Add RocketBot UI project link in document.  All issues and pull requests are here\n6.0.0-alpha SkyWalking 6 is totally new milestone for the project. At this point, we are not just a distributing tracing system with analysis and visualization capabilities. We are an Observability Analysis Platform(OAL).\nThe core and most important features in v6 are\n Support to collect telemetry data from different sources, such as multiple language agents and service mesh. Extensible stream analysis core. Make SQL and cache analysis available in core level, although haven\u0026rsquo;t provided in this release. Provide Observability Analysis Language(OAL) to make analysis metrics customization available. New GraphQL query protocol. Not binding with UI now. UI topology is better now. New alarm core provided. In alpha, only on service related metrics.  All issues and pull requests are here\n","excerpt":"6.6.0 Project  [IMPORTANT] Local span and exit span are not treated as endpoint detected at client …","ref":"/docs/main/next/en/changes/changes-6.x/","title":"6.6.0"},{"body":"6.6.0 Project  [IMPORTANT] Local span and exit span are not treated as endpoint detected at client and local. Only entry span is the endpoint. Reduce the load of register and memory cost.   Support MiniKube, Istio and SkyWalking on K8s deployment in CI. Support Windows and MacOS build in GitHub Action CI. Support ElasticSearch 7 in official dist. Hundreds plugin cases have been added in GitHub Action CI process.  Java Agent  Remove the local/exit span operation name register mechanism. Add plugin for JDK Threading classes. Add plugin for Armeria. Support set operation name in async span. Enhance webflux plugin, related to Spring Gateway plugin. Webflux plugin is in optional, due to JDK8 required. Fix a possible deadlock. Fix NPE when OAL scripts are different in different OAP nodes, mostly in upgrading stage. Fix bug about wrong peer in ES plugin. Fix NPE in Spring plugin. Fix wrong class name in Dubbo 2.7 conflict patch. Fix spring annotation inheritance problem.  OAP-Backend  Remove the local/exit span operation name register mechanism. Remove client side endpoint register in service mesh. Service instance dependency and related metrics. Support min func in OAL Support apdex func in OAL Support custom ES config setting at the index level. Envoy ALS proto upgraded. Update JODA lib as bugs in UTC +13/+14. Support topN sample period configurable. Ignore no statement DB operations in slow SQL collection. Fix bug in docker-entrypoint.sh when using MySQL as storage  UI  Service topology enhancement. Dive into service, instance and endpoint metrics on topo map. Service instance dependency view and related metrics. Support using URL parameter in trace query page. Support apdex score in service page. Add service dependency metrics into metrics comparison. Fix alarm search not working.  Document  Update user list and user wall. Add document link for CLI. Add deployment guide of agent in Jetty case. Modify Consul cluster doc. Add document about injecting traceId into the logback with logstack in JSON format. ElementUI license and dependency added.  All issues and pull requests are here\n6.5.0 Project  TTL E2E test (#3437) Test coverage is back in pull request check status (#3503) Plugin tests begin to be migrated into main repo, and is in process. (#3528, #3756, #3751, etc.) Switch to SkyWalking CI (exclusive) nodes (#3546) MySQL storage e2e test. (#3648) E2E tests are verified in multiple jdk versions, jdk 8, 9, 11, 12 (#3657) Jenkins build jobs run only when necessary (#3662)  OAP-Backend  Support dynamically configure alarm settings (#3557) Language of instance could be null (#3485) Make query max window size configurable. (#3765) Remove two max size 500 limit. (#3748) Parameterize the cache size. (#3741) ServiceInstanceRelation set error id (#3683) Makes the scope of alarm message more semantic. (#3680) Add register persistent worker latency metrics (#3677) Fix more reasonable error (#3619) Add GraphQL getServiceInstance instanceUuid field. (#3595) Support namespace in Nacos cluster/configuration (#3578) Instead of datasource-settings.properties, use application.yml for MySQLStorageProvider (#3564) Provide consul dynamic configuration center implementation (#3560) Upgrade guava version to support higher jdk version (#3541) Sync latest als from envoy api (#3507) Set telemetry instanced id for Etcd and Nacos plugin (#3492) Support timeout configuration in agent and backend. (#3491) Make sure the cluster register happens before streaming process. (#3471) Agent supports custom properties. (#3367) Miscellaneous bug fixes (#3567)  UI  Feature: node detail display in topo circle-chart view. BugFix: the jvm-maxheap \u0026amp; jvm-maxnonheap is -1, free is no value Fix bug: time select operation not in effect Fix bug: language initialization failed Fix bug: not show instance language Feature: support the trace list display export png Feature: Metrics comparison view BugFix: Fix dashboard top throughput copy  Java Agent  Spring async scenario optimize (#3723) Support log4j2 AsyncLogger (#3715) Add config to collect PostgreSQL sql query params (#3695) Support namespace in Nacos cluster/configuration (#3578) Provide plugin for ehcache 2.x (#3575) Supporting RequestRateLimiterGatewayFilterFactory (#3538) Kafka-plugin compatible with KafkaTemplate (#3505) Add pulsar apm plugin (#3476) Spring-cloud-gateway traceId does not transmit #3411 (#3446) Gateway compatible with downstream loss (#3445) Provide cassandra java driver 3.x plugin (#3410) Fix SpringMVC4 NoSuchMethodError (#3408) BugFix: endpoint grouping rules may be not unique (#3510) Add feature to control the maximum agent log files (#3475) Agent support custom properties. (#3367) Add Light4j plugin (#3323)  Document  Remove travis badge (#3763) Replace user wall to typical users in readme page (#3719) Update istio docs according latest istio release (#3646) Use chart deploy sw docs (#3573) Reorganize the doc, and provide catalog (#3563) Committer vote and set up document. (#3496) Update als setup doc as istio 1.3 released (#3470) Fill faq reply in official document. (#3450)  All issues and pull requests are here\n6.4.0 Project  Highly recommend to upgrade due to Pxx metrics calculation bug. Make agent working in JDK9+ Module system.  Java Agent  Make agent working in JDK9+ Module system. Support Kafka 2.x client libs. Log error in OKHTTP OnFailure callback. Support injecting traceid into logstack appender in logback. Add OperationName(including endpoint name) length max threshold. Support using Regex to group operation name. Support Undertow routing handler. RestTemplate plugin support operation name grouping. Fix ClassCastException in Webflux plugin. Ordering zookeeper server list, to make it better in topology. Fix a Dubbo plugin incompatible issue. Fix MySQL 5 plugin issue. Make log writer cached. Optimize Spring Cloud Gateway plugin Fix and improve gRPC reconnect mechanism. Remove Disruptor dependency from agent.  Backend  Fix Pxx(p50,p75,p90,p95,p99) metrics func bug.(Critical) Support Gateway in backend analysis, even when it doesn\u0026rsquo;t have suitable language agent. Support using HTTPs SSL accessing ElasticSearch storage. Support Zookeeper ACL. Make alarm records listed in order. Fix Pxx data persistence failure in some cases. Fix some bugs in MySQL storage. Setup slow SQL length threshold. Fix TTL settings is not working as expected. Remove scope-meta file.  UI  Enhance alarm page layout. Support trace tree chart resize. Support trace auto completion when partial traces abandoned somehow. Fix dashboard endpoint slow chart. Add radial chart in topology page. Add trace table mode. Fix topology page bug. Fix calender js bug. Fix \u0026ldquo;The \u0026ldquo;topo-services\u0026rdquo; component did not update the data in time after modifying the time range on the topology page.  Document  Restore the broken Istio setup doc. Add etcd config center document. Correct span_limit_per_segment default value in document. Enhance plugin develop doc. Fix error description in build document.  All issues and pull requests are here\n6.3.0 Project  e2e tests have been added, and verify every pull request. Use ArrayList to replace LinkedList in DataCarrier for much better performance. Add plugin instrumentation definition check in CI. DataCarrier performance improvement by avoiding false-sharing.  Java Agent  Java agent supports JDK 9 - 12, but don\u0026rsquo;t support Java Module yet. Support JVM class auto instrumentation, cataloged as bootstrap plugin. Support JVM HttpClient and HttpsClient plugin.[Optional] Support backend upgrade without rebooting required. Open Redefine and Retransform by other agents. Support Servlet 2.5 in Jetty, Tomcat and SpringMVC plugins. Support Spring @Async plugin. Add new config item to restrict the length of span#peer. Refactor ContextManager#stopSpan. Add gRPC timeout. Support Logback AsyncAppender print tid Fix gRPC reconnect bug. Fix trace segment service doesn\u0026rsquo;t report onComplete. Fix wrong logger class name. Fix gRPC plugin bug. Fix ContextManager.activeSpan() API usage error.  Backend  Support agent reset command downstream when the storage is erased, mostly because of backend upgrade. Backend stream flow refactor. High dimensionality metrics(Hour/Day/Month) are changed to lower priority, to ease the storage payload. Add OAP metrics cache to ease the storage query payload and improve performance. Remove DataCarrier in trace persistent of ElasticSearch storage, by leveraging the elasticsearch bulk queue. OAP internal communication protocol changed. Don\u0026rsquo;t be compatible with old releases. Improve ElasticSearch storage bulk performance. Support etcd as dynamic configuration center. Simplify the PxxMetrics and ThermodynamicMetrics functions for better performance and GC. Support JVM metrics self observability. Add the new OAL runtime engine. Add gRPC timeout. Add Charset in the alarm web hook. Fix buffer lost. Fix dirty read in ElasticSearch storage. Fix bug of cluster management plugins in un-Mixed mode. Fix wrong logger class name. Fix delete bug in ElasticSearch when using namespace. Fix MySQL TTL failure. Totally remove IDs can't be null log, to avoid misleading. Fix provider has been initialized repeatedly. Adjust providers conflict log message. Fix using wrong gc time metrics in OAL.  UI  Fix refresh is not working after endpoint and instance changed. Fix endpoint selector but. Fix wrong copy value in slow traces. Fix can\u0026rsquo;t show trace when it is broken partially(Because of agent sampling or fail safe). Fix database and response time graph bugs.  Document  Add bootstrap plugin development document. Alarm documentation typo fixed. Clarify the Docker file purpose. Fix a license typo.  All issues and pull requests are here\n6.2.0 Project  ElasticSearch implementation performance improved, and CHANGED totally. Must delete all existing indexes to do upgrade. CI and Integration tests provided by ASF INFRA. Plan to enhance tests including e2e, plugin tests in all pull requests, powered by ASF INFRA. DataCarrier queue write index controller performance improvement. 3-5 times quicker than before. Add windows compile support in CI.  Java Agent  Support collect SQL parameter in MySQL plugin.[Optional] Support SolrJ plugin. Support RESTEasy plugin. Support Spring Gateway plugin for 2.1.x[Optional] TracingContext performance improvement. Support Apache ShardingSphere(incubating) plugin. Support span#error in application toolkit. Fix OOM by empty stack of exception. FIx wrong cause exception of stack in span log. Fix unclear the running context in SpringMVC plugin. Fix CPU usage accessor calculation issue. Fix SpringMVC plugin span not stop bug when doing HTTP forward. Fix lettuce plugin async commend bug and NPE. Fix webflux plugin cast exception. [CI]Support import check.  Backend  Support time serious ElasticSearch storage. Provide dynamic configuration module and implementation. Slow SQL threshold supports dynamic config today. Dynamic Configuration module provide multiple implementations, DCS(gRPC based), Zookeeper, Apollo, Nacos. Provide P99/95/90/75/50 charts in topology edge. New topology query protocol and implementation. Support Envoy ALS in Service Mesh scenario. Support Nacos cluster management. Enhance metric exporter. Run in increment and total modes. Fix module provider is loaded repeatedly. Change TOP slow SQL storage in ES to Text from Keyword, as too long text issue. Fix H2TopologyQuery tiny bug. Fix H2 log query bug.(No feature provided yet) Filtering pods not in \u0026lsquo;Running\u0026rsquo; phase in mesh scenario. Fix query alarm bug in MySQL and H2 storage. Codes refactor.  UI  Fix some ID is null query(s). Page refactor, especially time-picker, more friendly. Login removed. Trace timestamp visualization issue fixed. Provide P99/95/90/75/50 charts in topology edge. Change all P99/95/90/75/50 charts style. More readable. Fix 404 in trace page.  Document  Go2Sky project has been donated to SkyAPM, change document link. Add FAQ for ElasticSearch storage, and links from document. Add FAQ fro WebSphere installation. Add several open users. Add alarm webhook document.  All issues and pull requests are here\n6.1.0 Project SkyWalking graduated as Apache Top Level Project.\n Support compiling project agent, backend, UI separately.  Java Agent  Support Vert.x Core 3.x plugin. Support Apache Dubbo plugin. Support use_qualified_name_as_endpoint_name and use_qualified_name_as_operation_name configs in SpringMVC plugin. Support span async close APIs in core. Used in Vert.x plugin. Support MySQL 5,8 plugins. Support set instance id manually(optional). Support customize enhance trace plugin in optional list. Support to set peer in Entry Span. Support Zookeeper plugin. Fix Webflux plugin created unexpected Entry Span. Fix Kafka plugin NPE in Kafka 1.1+ Fix wrong operation name in postgre 8.x plugin. Fix RabbitMQ plugin NPE. Fix agent can\u0026rsquo;t run in JVM 6/7, remove module-info.class. Fix agent can\u0026rsquo;t work well, if there is whitespace in agent path. Fix Spring annotation bug and inheritance enhance issue. Fix CPU accessor bug.  Backend Performance improved, especially in CPU limited environment. 3x improvement in service mesh scenario(no trace) in 8C16G VM. Significantly cost less CPU in low payload.\n Support database metrics and SLOW SQL detection. Support to set max size of metadata query. And change default to 5000 from 100. Support ElasticSearch template for new feature in the future. Support shutdown Zipkin trace analysis, because it doesn\u0026rsquo;t fit production environment. Support log type, scope HTTP_ACCESS_LOG and query. No feature provided, prepare for future versions. Support .NET clr receiver. Support Jaeger trace format, no analysis. Support group endpoint name by regax rules in mesh receiver. Support disable statement in OAL. Support basic auth in ElasticSearch connection. Support metrics exporter module and gRPC implementor. Support \u0026gt;, \u0026lt;, \u0026gt;=, \u0026lt;= in OAL. Support role mode in backend. Support Envoy metrics. Support query segment by service instance. Support to set host/port manually at cluster coordinator, rather than based on core settings. Make sure OAP shutdown when it faces startup error. Support set separated gRPC/Jetty ip:port for receiver, default still use core settings. Fix JVM receiver bug. Fix wrong dest service in mesh analysis. Fix search doesn\u0026rsquo;t work as expected. Refactor ScopeDeclaration annotation. Refactor register lock mechanism. Add SmartSql component for .NET Add integration tests for ElasticSearch client. Add test cases for exporter. Add test cases for queue consume.  UI  RocketBot UI has been accepted and bind in this release. Support CLR metrics.  Document  Documents updated, matching Top Level Project requirement. UI licenses updated, according to RocketBot UI IP clearance. User wall and powered-by list updated. CN documents removed, only consider to provide by volunteer out of Apache.  All issues and pull requests are here\n6.0.0-GA Java Agent  Support gson plugin(optional). Support canal plugin. Fix missing ojdbc component id. Fix dubbo plugin conflict. Fix OpenTracing tag match bug. Fix a missing check in ignore plugin.  Backend  Adjust service inventory entity, to add properties. Adjust service instance inventory entity, to add properties. Add nodeType to service inventory entity. Fix when operation name of local and exit spans in ref, the segment lost. Fix the index names don\u0026rsquo;t show right in logs. Fix wrong alarm text. Add test case for span limit mechanism. Add telemetry module and prometheus implementation, with grafana setting. A refactor for register API in storage module. Fix H2 and MySQL endpoint dependency map miss upstream side. Optimize the inventory register and refactor the implementation. Speed up the trace buffer read. Fix and removed unnecessary inventory register operations.  UI  Add new trace view. Add word-break to tag value.  Document  Add two startup modes document. Add PHP agent links. Add some cn documents. Update year to 2019 User wall updated. Fix a wrong description in how-to-build doc.  All issues and pull requests are here\n6.0.0-beta Protocol  Provide Trace Data Protocol v2 Provide SkyWalking Cross Process Propagation Headers Protocol v2.  Java Agent  Support Trace Data Protocol v2 Support SkyWalking Cross Process Propagation Headers Protocol v2. Support SkyWalking Cross Process Propagation Headers Protocol v1 running in compatible way. Need declare open explicitly. Support SpringMVC 5 Support webflux Support a new way to override agent.config by system env. Span tag can override by explicit way. Fix Spring Controller Inherit issue. Fix ElasticSearch plugin NPE. Fix agent classloader dead lock in certain situation. Fix agent log typo. Fix wrong component id in resettemplete plugin. Fix use transform ignore() in wrong way. Fix H2 query bug.  Backend  Support Trace Data Protocol v2. And Trace Data Protocol v1 is still supported. Support MySQL as storage. Support TiDB as storage. Support a new way to override application.yml by system env. Support service instance and endpoint alarm. Support namespace in istio receiver. Support service throughput(cpm), successful rate(sla), avg response time and p99/p95/p90/p75/p50 response time. Support backend trace sampling. Support Zipkin format again. Support init mode. Support namespace in Zookeeper cluster management. Support consul plugin in cluster module. OAL generate tool has been integrated into main repo, in the maven compile stage. Optimize trace paging query. Fix trace query don\u0026rsquo;t use fuzzy query in ElasticSearch storage. Fix alarm can\u0026rsquo;t be active in right way. Fix unnecessary condition in database and cache number query. Fix wrong namespace bug in ElasticSearch storage. Fix Remote clients selector error: / by zero . Fix segment TTL is not working.  UI  Support service throughput(cpm), successful rate(sla), avg response time and p99/p95/p90/p75/p50 response time. Fix TopN endpoint link doesn\u0026rsquo;t work right. Fix trace stack style. Fix CI.  Document  Add more agent setting documents. Add more contribution documents. Update user wall and powered-by page. Add RocketBot UI project link in document.  All issues and pull requests are here\n6.0.0-alpha SkyWalking 6 is totally new milestone for the project. At this point, we are not just a distributing tracing system with analysis and visualization capabilities. We are an Observability Analysis Platform(OAL).\nThe core and most important features in v6 are\n Support to collect telemetry data from different sources, such as multiple language agents and service mesh. Extensible stream analysis core. Make SQL and cache analysis available in core level, although haven\u0026rsquo;t provided in this release. Provide Observability Analysis Language(OAL) to make analysis metrics customization available. New GraphQL query protocol. Not binding with UI now. UI topology is better now. New alarm core provided. In alpha, only on service related metrics.  All issues and pull requests are here\n","excerpt":"6.6.0 Project  [IMPORTANT] Local span and exit span are not treated as endpoint detected at client …","ref":"/docs/main/v9.1.0/en/changes/changes-6.x/","title":"6.6.0"},{"body":"6.6.0 Project  [IMPORTANT] Local span and exit span are not treated as endpoint detected at client and local. Only entry span is the endpoint. Reduce the load of register and memory cost.   Support MiniKube, Istio and SkyWalking on K8s deployment in CI. Support Windows and MacOS build in GitHub Action CI. Support ElasticSearch 7 in official dist. Hundreds plugin cases have been added in GitHub Action CI process.  Java Agent  Remove the local/exit span operation name register mechanism. Add plugin for JDK Threading classes. Add plugin for Armeria. Support set operation name in async span. Enhance webflux plugin, related to Spring Gateway plugin. Webflux plugin is in optional, due to JDK8 required. Fix a possible deadlock. Fix NPE when OAL scripts are different in different OAP nodes, mostly in upgrading stage. Fix bug about wrong peer in ES plugin. Fix NPE in Spring plugin. Fix wrong class name in Dubbo 2.7 conflict patch. Fix spring annotation inheritance problem.  OAP-Backend  Remove the local/exit span operation name register mechanism. Remove client side endpoint register in service mesh. Service instance dependency and related metrics. Support min func in OAL Support apdex func in OAL Support custom ES config setting at the index level. Envoy ALS proto upgraded. Update JODA lib as bugs in UTC +13/+14. Support topN sample period configurable. Ignore no statement DB operations in slow SQL collection. Fix bug in docker-entrypoint.sh when using MySQL as storage  UI  Service topology enhancement. Dive into service, instance and endpoint metrics on topo map. Service instance dependency view and related metrics. Support using URL parameter in trace query page. Support apdex score in service page. Add service dependency metrics into metrics comparison. Fix alarm search not working.  Document  Update user list and user wall. Add document link for CLI. Add deployment guide of agent in Jetty case. Modify Consul cluster doc. Add document about injecting traceId into the logback with logstack in JSON format. ElementUI license and dependency added.  All issues and pull requests are here\n6.5.0 Project  TTL E2E test (#3437) Test coverage is back in pull request check status (#3503) Plugin tests begin to be migrated into main repo, and is in process. (#3528, #3756, #3751, etc.) Switch to SkyWalking CI (exclusive) nodes (#3546) MySQL storage e2e test. (#3648) E2E tests are verified in multiple jdk versions, jdk 8, 9, 11, 12 (#3657) Jenkins build jobs run only when necessary (#3662)  OAP-Backend  Support dynamically configure alarm settings (#3557) Language of instance could be null (#3485) Make query max window size configurable. (#3765) Remove two max size 500 limit. (#3748) Parameterize the cache size. (#3741) ServiceInstanceRelation set error id (#3683) Makes the scope of alarm message more semantic. (#3680) Add register persistent worker latency metrics (#3677) Fix more reasonable error (#3619) Add GraphQL getServiceInstance instanceUuid field. (#3595) Support namespace in Nacos cluster/configuration (#3578) Instead of datasource-settings.properties, use application.yml for MySQLStorageProvider (#3564) Provide consul dynamic configuration center implementation (#3560) Upgrade guava version to support higher jdk version (#3541) Sync latest als from envoy api (#3507) Set telemetry instanced id for Etcd and Nacos plugin (#3492) Support timeout configuration in agent and backend. (#3491) Make sure the cluster register happens before streaming process. (#3471) Agent supports custom properties. (#3367) Miscellaneous bug fixes (#3567)  UI  Feature: node detail display in topo circle-chart view. BugFix: the jvm-maxheap \u0026amp; jvm-maxnonheap is -1, free is no value Fix bug: time select operation not in effect Fix bug: language initialization failed Fix bug: not show instance language Feature: support the trace list display export png Feature: Metrics comparison view BugFix: Fix dashboard top throughput copy  Java Agent  Spring async scenario optimize (#3723) Support log4j2 AsyncLogger (#3715) Add config to collect PostgreSQL sql query params (#3695) Support namespace in Nacos cluster/configuration (#3578) Provide plugin for ehcache 2.x (#3575) Supporting RequestRateLimiterGatewayFilterFactory (#3538) Kafka-plugin compatible with KafkaTemplate (#3505) Add pulsar apm plugin (#3476) Spring-cloud-gateway traceId does not transmit #3411 (#3446) Gateway compatible with downstream loss (#3445) Provide cassandra java driver 3.x plugin (#3410) Fix SpringMVC4 NoSuchMethodError (#3408) BugFix: endpoint grouping rules may be not unique (#3510) Add feature to control the maximum agent log files (#3475) Agent support custom properties. (#3367) Add Light4j plugin (#3323)  Document  Remove travis badge (#3763) Replace user wall to typical users in readme page (#3719) Update istio docs according latest istio release (#3646) Use chart deploy sw docs (#3573) Reorganize the doc, and provide catalog (#3563) Committer vote and set up document. (#3496) Update als setup doc as istio 1.3 released (#3470) Fill faq reply in official document. (#3450)  All issues and pull requests are here\n6.4.0 Project  Highly recommend to upgrade due to Pxx metrics calculation bug. Make agent working in JDK9+ Module system.  Java Agent  Make agent working in JDK9+ Module system. Support Kafka 2.x client libs. Log error in OKHTTP OnFailure callback. Support injecting traceid into logstack appender in logback. Add OperationName(including endpoint name) length max threshold. Support using Regex to group operation name. Support Undertow routing handler. RestTemplate plugin support operation name grouping. Fix ClassCastException in Webflux plugin. Ordering zookeeper server list, to make it better in topology. Fix a Dubbo plugin incompatible issue. Fix MySQL 5 plugin issue. Make log writer cached. Optimize Spring Cloud Gateway plugin Fix and improve gRPC reconnect mechanism. Remove Disruptor dependency from agent.  Backend  Fix Pxx(p50,p75,p90,p95,p99) metrics func bug.(Critical) Support Gateway in backend analysis, even when it doesn\u0026rsquo;t have suitable language agent. Support using HTTPs SSL accessing ElasticSearch storage. Support Zookeeper ACL. Make alarm records listed in order. Fix Pxx data persistence failure in some cases. Fix some bugs in MySQL storage. Setup slow SQL length threshold. Fix TTL settings is not working as expected. Remove scope-meta file.  UI  Enhance alarm page layout. Support trace tree chart resize. Support trace auto completion when partial traces abandoned somehow. Fix dashboard endpoint slow chart. Add radial chart in topology page. Add trace table mode. Fix topology page bug. Fix calender js bug. Fix \u0026ldquo;The \u0026ldquo;topo-services\u0026rdquo; component did not update the data in time after modifying the time range on the topology page.  Document  Restore the broken Istio setup doc. Add etcd config center document. Correct span_limit_per_segment default value in document. Enhance plugin develop doc. Fix error description in build document.  All issues and pull requests are here\n6.3.0 Project  e2e tests have been added, and verify every pull request. Use ArrayList to replace LinkedList in DataCarrier for much better performance. Add plugin instrumentation definition check in CI. DataCarrier performance improvement by avoiding false-sharing.  Java Agent  Java agent supports JDK 9 - 12, but don\u0026rsquo;t support Java Module yet. Support JVM class auto instrumentation, cataloged as bootstrap plugin. Support JVM HttpClient and HttpsClient plugin.[Optional] Support backend upgrade without rebooting required. Open Redefine and Retransform by other agents. Support Servlet 2.5 in Jetty, Tomcat and SpringMVC plugins. Support Spring @Async plugin. Add new config item to restrict the length of span#peer. Refactor ContextManager#stopSpan. Add gRPC timeout. Support Logback AsyncAppender print tid Fix gRPC reconnect bug. Fix trace segment service doesn\u0026rsquo;t report onComplete. Fix wrong logger class name. Fix gRPC plugin bug. Fix ContextManager.activeSpan() API usage error.  Backend  Support agent reset command downstream when the storage is erased, mostly because of backend upgrade. Backend stream flow refactor. High dimensionality metrics(Hour/Day/Month) are changed to lower priority, to ease the storage payload. Add OAP metrics cache to ease the storage query payload and improve performance. Remove DataCarrier in trace persistent of ElasticSearch storage, by leveraging the elasticsearch bulk queue. OAP internal communication protocol changed. Don\u0026rsquo;t be compatible with old releases. Improve ElasticSearch storage bulk performance. Support etcd as dynamic configuration center. Simplify the PxxMetrics and ThermodynamicMetrics functions for better performance and GC. Support JVM metrics self observability. Add the new OAL runtime engine. Add gRPC timeout. Add Charset in the alarm web hook. Fix buffer lost. Fix dirty read in ElasticSearch storage. Fix bug of cluster management plugins in un-Mixed mode. Fix wrong logger class name. Fix delete bug in ElasticSearch when using namespace. Fix MySQL TTL failure. Totally remove IDs can't be null log, to avoid misleading. Fix provider has been initialized repeatedly. Adjust providers conflict log message. Fix using wrong gc time metrics in OAL.  UI  Fix refresh is not working after endpoint and instance changed. Fix endpoint selector but. Fix wrong copy value in slow traces. Fix can\u0026rsquo;t show trace when it is broken partially(Because of agent sampling or fail safe). Fix database and response time graph bugs.  Document  Add bootstrap plugin development document. Alarm documentation typo fixed. Clarify the Docker file purpose. Fix a license typo.  All issues and pull requests are here\n6.2.0 Project  ElasticSearch implementation performance improved, and CHANGED totally. Must delete all existing indexes to do upgrade. CI and Integration tests provided by ASF INFRA. Plan to enhance tests including e2e, plugin tests in all pull requests, powered by ASF INFRA. DataCarrier queue write index controller performance improvement. 3-5 times quicker than before. Add windows compile support in CI.  Java Agent  Support collect SQL parameter in MySQL plugin.[Optional] Support SolrJ plugin. Support RESTEasy plugin. Support Spring Gateway plugin for 2.1.x[Optional] TracingContext performance improvement. Support Apache ShardingSphere(incubating) plugin. Support span#error in application toolkit. Fix OOM by empty stack of exception. FIx wrong cause exception of stack in span log. Fix unclear the running context in SpringMVC plugin. Fix CPU usage accessor calculation issue. Fix SpringMVC plugin span not stop bug when doing HTTP forward. Fix lettuce plugin async commend bug and NPE. Fix webflux plugin cast exception. [CI]Support import check.  Backend  Support time serious ElasticSearch storage. Provide dynamic configuration module and implementation. Slow SQL threshold supports dynamic config today. Dynamic Configuration module provide multiple implementations, DCS(gRPC based), Zookeeper, Apollo, Nacos. Provide P99/95/90/75/50 charts in topology edge. New topology query protocol and implementation. Support Envoy ALS in Service Mesh scenario. Support Nacos cluster management. Enhance metric exporter. Run in increment and total modes. Fix module provider is loaded repeatedly. Change TOP slow SQL storage in ES to Text from Keyword, as too long text issue. Fix H2TopologyQuery tiny bug. Fix H2 log query bug.(No feature provided yet) Filtering pods not in \u0026lsquo;Running\u0026rsquo; phase in mesh scenario. Fix query alarm bug in MySQL and H2 storage. Codes refactor.  UI  Fix some ID is null query(s). Page refactor, especially time-picker, more friendly. Login removed. Trace timestamp visualization issue fixed. Provide P99/95/90/75/50 charts in topology edge. Change all P99/95/90/75/50 charts style. More readable. Fix 404 in trace page.  Document  Go2Sky project has been donated to SkyAPM, change document link. Add FAQ for ElasticSearch storage, and links from document. Add FAQ fro WebSphere installation. Add several open users. Add alarm webhook document.  All issues and pull requests are here\n6.1.0 Project SkyWalking graduated as Apache Top Level Project.\n Support compiling project agent, backend, UI separately.  Java Agent  Support Vert.x Core 3.x plugin. Support Apache Dubbo plugin. Support use_qualified_name_as_endpoint_name and use_qualified_name_as_operation_name configs in SpringMVC plugin. Support span async close APIs in core. Used in Vert.x plugin. Support MySQL 5,8 plugins. Support set instance id manually(optional). Support customize enhance trace plugin in optional list. Support to set peer in Entry Span. Support Zookeeper plugin. Fix Webflux plugin created unexpected Entry Span. Fix Kafka plugin NPE in Kafka 1.1+ Fix wrong operation name in postgre 8.x plugin. Fix RabbitMQ plugin NPE. Fix agent can\u0026rsquo;t run in JVM 6/7, remove module-info.class. Fix agent can\u0026rsquo;t work well, if there is whitespace in agent path. Fix Spring annotation bug and inheritance enhance issue. Fix CPU accessor bug.  Backend Performance improved, especially in CPU limited environment. 3x improvement in service mesh scenario(no trace) in 8C16G VM. Significantly cost less CPU in low payload.\n Support database metrics and SLOW SQL detection. Support to set max size of metadata query. And change default to 5000 from 100. Support ElasticSearch template for new feature in the future. Support shutdown Zipkin trace analysis, because it doesn\u0026rsquo;t fit production environment. Support log type, scope HTTP_ACCESS_LOG and query. No feature provided, prepare for future versions. Support .NET clr receiver. Support Jaeger trace format, no analysis. Support group endpoint name by regax rules in mesh receiver. Support disable statement in OAL. Support basic auth in ElasticSearch connection. Support metrics exporter module and gRPC implementor. Support \u0026gt;, \u0026lt;, \u0026gt;=, \u0026lt;= in OAL. Support role mode in backend. Support Envoy metrics. Support query segment by service instance. Support to set host/port manually at cluster coordinator, rather than based on core settings. Make sure OAP shutdown when it faces startup error. Support set separated gRPC/Jetty ip:port for receiver, default still use core settings. Fix JVM receiver bug. Fix wrong dest service in mesh analysis. Fix search doesn\u0026rsquo;t work as expected. Refactor ScopeDeclaration annotation. Refactor register lock mechanism. Add SmartSql component for .NET Add integration tests for ElasticSearch client. Add test cases for exporter. Add test cases for queue consume.  UI  RocketBot UI has been accepted and bind in this release. Support CLR metrics.  Document  Documents updated, matching Top Level Project requirement. UI licenses updated, according to RocketBot UI IP clearance. User wall and powered-by list updated. CN documents removed, only consider to provide by volunteer out of Apache.  All issues and pull requests are here\n6.0.0-GA Java Agent  Support gson plugin(optional). Support canal plugin. Fix missing ojdbc component id. Fix dubbo plugin conflict. Fix OpenTracing tag match bug. Fix a missing check in ignore plugin.  Backend  Adjust service inventory entity, to add properties. Adjust service instance inventory entity, to add properties. Add nodeType to service inventory entity. Fix when operation name of local and exit spans in ref, the segment lost. Fix the index names don\u0026rsquo;t show right in logs. Fix wrong alarm text. Add test case for span limit mechanism. Add telemetry module and prometheus implementation, with grafana setting. A refactor for register API in storage module. Fix H2 and MySQL endpoint dependency map miss upstream side. Optimize the inventory register and refactor the implementation. Speed up the trace buffer read. Fix and removed unnecessary inventory register operations.  UI  Add new trace view. Add word-break to tag value.  Document  Add two startup modes document. Add PHP agent links. Add some cn documents. Update year to 2019 User wall updated. Fix a wrong description in how-to-build doc.  All issues and pull requests are here\n6.0.0-beta Protocol  Provide Trace Data Protocol v2 Provide SkyWalking Cross Process Propagation Headers Protocol v2.  Java Agent  Support Trace Data Protocol v2 Support SkyWalking Cross Process Propagation Headers Protocol v2. Support SkyWalking Cross Process Propagation Headers Protocol v1 running in compatible way. Need declare open explicitly. Support SpringMVC 5 Support webflux Support a new way to override agent.config by system env. Span tag can override by explicit way. Fix Spring Controller Inherit issue. Fix ElasticSearch plugin NPE. Fix agent classloader dead lock in certain situation. Fix agent log typo. Fix wrong component id in resettemplete plugin. Fix use transform ignore() in wrong way. Fix H2 query bug.  Backend  Support Trace Data Protocol v2. And Trace Data Protocol v1 is still supported. Support MySQL as storage. Support TiDB as storage. Support a new way to override application.yml by system env. Support service instance and endpoint alarm. Support namespace in istio receiver. Support service throughput(cpm), successful rate(sla), avg response time and p99/p95/p90/p75/p50 response time. Support backend trace sampling. Support Zipkin format again. Support init mode. Support namespace in Zookeeper cluster management. Support consul plugin in cluster module. OAL generate tool has been integrated into main repo, in the maven compile stage. Optimize trace paging query. Fix trace query don\u0026rsquo;t use fuzzy query in ElasticSearch storage. Fix alarm can\u0026rsquo;t be active in right way. Fix unnecessary condition in database and cache number query. Fix wrong namespace bug in ElasticSearch storage. Fix Remote clients selector error: / by zero . Fix segment TTL is not working.  UI  Support service throughput(cpm), successful rate(sla), avg response time and p99/p95/p90/p75/p50 response time. Fix TopN endpoint link doesn\u0026rsquo;t work right. Fix trace stack style. Fix CI.  Document  Add more agent setting documents. Add more contribution documents. Update user wall and powered-by page. Add RocketBot UI project link in document.  All issues and pull requests are here\n6.0.0-alpha SkyWalking 6 is totally new milestone for the project. At this point, we are not just a distributing tracing system with analysis and visualization capabilities. We are an Observability Analysis Platform(OAL).\nThe core and most important features in v6 are\n Support to collect telemetry data from different sources, such as multiple language agents and service mesh. Extensible stream analysis core. Make SQL and cache analysis available in core level, although haven\u0026rsquo;t provided in this release. Provide Observability Analysis Language(OAL) to make analysis metrics customization available. New GraphQL query protocol. Not binding with UI now. UI topology is better now. New alarm core provided. In alpha, only on service related metrics.  All issues and pull requests are here\n","excerpt":"6.6.0 Project  [IMPORTANT] Local span and exit span are not treated as endpoint detected at client …","ref":"/docs/main/v9.2.0/en/changes/changes-6.x/","title":"6.6.0"},{"body":"6.6.0 Project  [IMPORTANT] Local span and exit span are not treated as endpoint detected at client and local. Only entry span is the endpoint. Reduce the load of register and memory cost.   Support MiniKube, Istio and SkyWalking on K8s deployment in CI. Support Windows and MacOS build in GitHub Action CI. Support ElasticSearch 7 in official dist. Hundreds plugin cases have been added in GitHub Action CI process.  Java Agent  Remove the local/exit span operation name register mechanism. Add plugin for JDK Threading classes. Add plugin for Armeria. Support set operation name in async span. Enhance webflux plugin, related to Spring Gateway plugin. Webflux plugin is in optional, due to JDK8 required. Fix a possible deadlock. Fix NPE when OAL scripts are different in different OAP nodes, mostly in upgrading stage. Fix bug about wrong peer in ES plugin. Fix NPE in Spring plugin. Fix wrong class name in Dubbo 2.7 conflict patch. Fix spring annotation inheritance problem.  OAP-Backend  Remove the local/exit span operation name register mechanism. Remove client side endpoint register in service mesh. Service instance dependency and related metrics. Support min func in OAL Support apdex func in OAL Support custom ES config setting at the index level. Envoy ALS proto upgraded. Update JODA lib as bugs in UTC +13/+14. Support topN sample period configurable. Ignore no statement DB operations in slow SQL collection. Fix bug in docker-entrypoint.sh when using MySQL as storage  UI  Service topology enhancement. Dive into service, instance and endpoint metrics on topo map. Service instance dependency view and related metrics. Support using URL parameter in trace query page. Support apdex score in service page. Add service dependency metrics into metrics comparison. Fix alarm search not working.  Document  Update user list and user wall. Add document link for CLI. Add deployment guide of agent in Jetty case. Modify Consul cluster doc. Add document about injecting traceId into the logback with logstack in JSON format. ElementUI license and dependency added.  All issues and pull requests are here\n6.5.0 Project  TTL E2E test (#3437) Test coverage is back in pull request check status (#3503) Plugin tests begin to be migrated into main repo, and is in process. (#3528, #3756, #3751, etc.) Switch to SkyWalking CI (exclusive) nodes (#3546) MySQL storage e2e test. (#3648) E2E tests are verified in multiple jdk versions, jdk 8, 9, 11, 12 (#3657) Jenkins build jobs run only when necessary (#3662)  OAP-Backend  Support dynamically configure alarm settings (#3557) Language of instance could be null (#3485) Make query max window size configurable. (#3765) Remove two max size 500 limit. (#3748) Parameterize the cache size. (#3741) ServiceInstanceRelation set error id (#3683) Makes the scope of alarm message more semantic. (#3680) Add register persistent worker latency metrics (#3677) Fix more reasonable error (#3619) Add GraphQL getServiceInstance instanceUuid field. (#3595) Support namespace in Nacos cluster/configuration (#3578) Instead of datasource-settings.properties, use application.yml for MySQLStorageProvider (#3564) Provide consul dynamic configuration center implementation (#3560) Upgrade guava version to support higher jdk version (#3541) Sync latest als from envoy api (#3507) Set telemetry instanced id for Etcd and Nacos plugin (#3492) Support timeout configuration in agent and backend. (#3491) Make sure the cluster register happens before streaming process. (#3471) Agent supports custom properties. (#3367) Miscellaneous bug fixes (#3567)  UI  Feature: node detail display in topo circle-chart view. BugFix: the jvm-maxheap \u0026amp; jvm-maxnonheap is -1, free is no value Fix bug: time select operation not in effect Fix bug: language initialization failed Fix bug: not show instance language Feature: support the trace list display export png Feature: Metrics comparison view BugFix: Fix dashboard top throughput copy  Java Agent  Spring async scenario optimize (#3723) Support log4j2 AsyncLogger (#3715) Add config to collect PostgreSQL sql query params (#3695) Support namespace in Nacos cluster/configuration (#3578) Provide plugin for ehcache 2.x (#3575) Supporting RequestRateLimiterGatewayFilterFactory (#3538) Kafka-plugin compatible with KafkaTemplate (#3505) Add pulsar apm plugin (#3476) Spring-cloud-gateway traceId does not transmit #3411 (#3446) Gateway compatible with downstream loss (#3445) Provide cassandra java driver 3.x plugin (#3410) Fix SpringMVC4 NoSuchMethodError (#3408) BugFix: endpoint grouping rules may be not unique (#3510) Add feature to control the maximum agent log files (#3475) Agent support custom properties. (#3367) Add Light4j plugin (#3323)  Document  Remove travis badge (#3763) Replace user wall to typical users in readme page (#3719) Update istio docs according latest istio release (#3646) Use chart deploy sw docs (#3573) Reorganize the doc, and provide catalog (#3563) Committer vote and set up document. (#3496) Update als setup doc as istio 1.3 released (#3470) Fill faq reply in official document. (#3450)  All issues and pull requests are here\n6.4.0 Project  Highly recommend to upgrade due to Pxx metrics calculation bug. Make agent working in JDK9+ Module system.  Java Agent  Make agent working in JDK9+ Module system. Support Kafka 2.x client libs. Log error in OKHTTP OnFailure callback. Support injecting traceid into logstack appender in logback. Add OperationName(including endpoint name) length max threshold. Support using Regex to group operation name. Support Undertow routing handler. RestTemplate plugin support operation name grouping. Fix ClassCastException in Webflux plugin. Ordering zookeeper server list, to make it better in topology. Fix a Dubbo plugin incompatible issue. Fix MySQL 5 plugin issue. Make log writer cached. Optimize Spring Cloud Gateway plugin Fix and improve gRPC reconnect mechanism. Remove Disruptor dependency from agent.  Backend  Fix Pxx(p50,p75,p90,p95,p99) metrics func bug.(Critical) Support Gateway in backend analysis, even when it doesn\u0026rsquo;t have suitable language agent. Support using HTTPs SSL accessing ElasticSearch storage. Support Zookeeper ACL. Make alarm records listed in order. Fix Pxx data persistence failure in some cases. Fix some bugs in MySQL storage. Setup slow SQL length threshold. Fix TTL settings is not working as expected. Remove scope-meta file.  UI  Enhance alarm page layout. Support trace tree chart resize. Support trace auto completion when partial traces abandoned somehow. Fix dashboard endpoint slow chart. Add radial chart in topology page. Add trace table mode. Fix topology page bug. Fix calender js bug. Fix \u0026ldquo;The \u0026ldquo;topo-services\u0026rdquo; component did not update the data in time after modifying the time range on the topology page.  Document  Restore the broken Istio setup doc. Add etcd config center document. Correct span_limit_per_segment default value in document. Enhance plugin develop doc. Fix error description in build document.  All issues and pull requests are here\n6.3.0 Project  e2e tests have been added, and verify every pull request. Use ArrayList to replace LinkedList in DataCarrier for much better performance. Add plugin instrumentation definition check in CI. DataCarrier performance improvement by avoiding false-sharing.  Java Agent  Java agent supports JDK 9 - 12, but don\u0026rsquo;t support Java Module yet. Support JVM class auto instrumentation, cataloged as bootstrap plugin. Support JVM HttpClient and HttpsClient plugin.[Optional] Support backend upgrade without rebooting required. Open Redefine and Retransform by other agents. Support Servlet 2.5 in Jetty, Tomcat and SpringMVC plugins. Support Spring @Async plugin. Add new config item to restrict the length of span#peer. Refactor ContextManager#stopSpan. Add gRPC timeout. Support Logback AsyncAppender print tid Fix gRPC reconnect bug. Fix trace segment service doesn\u0026rsquo;t report onComplete. Fix wrong logger class name. Fix gRPC plugin bug. Fix ContextManager.activeSpan() API usage error.  Backend  Support agent reset command downstream when the storage is erased, mostly because of backend upgrade. Backend stream flow refactor. High dimensionality metrics(Hour/Day/Month) are changed to lower priority, to ease the storage payload. Add OAP metrics cache to ease the storage query payload and improve performance. Remove DataCarrier in trace persistent of ElasticSearch storage, by leveraging the elasticsearch bulk queue. OAP internal communication protocol changed. Don\u0026rsquo;t be compatible with old releases. Improve ElasticSearch storage bulk performance. Support etcd as dynamic configuration center. Simplify the PxxMetrics and ThermodynamicMetrics functions for better performance and GC. Support JVM metrics self observability. Add the new OAL runtime engine. Add gRPC timeout. Add Charset in the alarm web hook. Fix buffer lost. Fix dirty read in ElasticSearch storage. Fix bug of cluster management plugins in un-Mixed mode. Fix wrong logger class name. Fix delete bug in ElasticSearch when using namespace. Fix MySQL TTL failure. Totally remove IDs can't be null log, to avoid misleading. Fix provider has been initialized repeatedly. Adjust providers conflict log message. Fix using wrong gc time metrics in OAL.  UI  Fix refresh is not working after endpoint and instance changed. Fix endpoint selector but. Fix wrong copy value in slow traces. Fix can\u0026rsquo;t show trace when it is broken partially(Because of agent sampling or fail safe). Fix database and response time graph bugs.  Document  Add bootstrap plugin development document. Alarm documentation typo fixed. Clarify the Docker file purpose. Fix a license typo.  All issues and pull requests are here\n6.2.0 Project  ElasticSearch implementation performance improved, and CHANGED totally. Must delete all existing indexes to do upgrade. CI and Integration tests provided by ASF INFRA. Plan to enhance tests including e2e, plugin tests in all pull requests, powered by ASF INFRA. DataCarrier queue write index controller performance improvement. 3-5 times quicker than before. Add windows compile support in CI.  Java Agent  Support collect SQL parameter in MySQL plugin.[Optional] Support SolrJ plugin. Support RESTEasy plugin. Support Spring Gateway plugin for 2.1.x[Optional] TracingContext performance improvement. Support Apache ShardingSphere(incubating) plugin. Support span#error in application toolkit. Fix OOM by empty stack of exception. FIx wrong cause exception of stack in span log. Fix unclear the running context in SpringMVC plugin. Fix CPU usage accessor calculation issue. Fix SpringMVC plugin span not stop bug when doing HTTP forward. Fix lettuce plugin async commend bug and NPE. Fix webflux plugin cast exception. [CI]Support import check.  Backend  Support time serious ElasticSearch storage. Provide dynamic configuration module and implementation. Slow SQL threshold supports dynamic config today. Dynamic Configuration module provide multiple implementations, DCS(gRPC based), Zookeeper, Apollo, Nacos. Provide P99/95/90/75/50 charts in topology edge. New topology query protocol and implementation. Support Envoy ALS in Service Mesh scenario. Support Nacos cluster management. Enhance metric exporter. Run in increment and total modes. Fix module provider is loaded repeatedly. Change TOP slow SQL storage in ES to Text from Keyword, as too long text issue. Fix H2TopologyQuery tiny bug. Fix H2 log query bug.(No feature provided yet) Filtering pods not in \u0026lsquo;Running\u0026rsquo; phase in mesh scenario. Fix query alarm bug in MySQL and H2 storage. Codes refactor.  UI  Fix some ID is null query(s). Page refactor, especially time-picker, more friendly. Login removed. Trace timestamp visualization issue fixed. Provide P99/95/90/75/50 charts in topology edge. Change all P99/95/90/75/50 charts style. More readable. Fix 404 in trace page.  Document  Go2Sky project has been donated to SkyAPM, change document link. Add FAQ for ElasticSearch storage, and links from document. Add FAQ fro WebSphere installation. Add several open users. Add alarm webhook document.  All issues and pull requests are here\n6.1.0 Project SkyWalking graduated as Apache Top Level Project.\n Support compiling project agent, backend, UI separately.  Java Agent  Support Vert.x Core 3.x plugin. Support Apache Dubbo plugin. Support use_qualified_name_as_endpoint_name and use_qualified_name_as_operation_name configs in SpringMVC plugin. Support span async close APIs in core. Used in Vert.x plugin. Support MySQL 5,8 plugins. Support set instance id manually(optional). Support customize enhance trace plugin in optional list. Support to set peer in Entry Span. Support Zookeeper plugin. Fix Webflux plugin created unexpected Entry Span. Fix Kafka plugin NPE in Kafka 1.1+ Fix wrong operation name in postgre 8.x plugin. Fix RabbitMQ plugin NPE. Fix agent can\u0026rsquo;t run in JVM 6/7, remove module-info.class. Fix agent can\u0026rsquo;t work well, if there is whitespace in agent path. Fix Spring annotation bug and inheritance enhance issue. Fix CPU accessor bug.  Backend Performance improved, especially in CPU limited environment. 3x improvement in service mesh scenario(no trace) in 8C16G VM. Significantly cost less CPU in low payload.\n Support database metrics and SLOW SQL detection. Support to set max size of metadata query. And change default to 5000 from 100. Support ElasticSearch template for new feature in the future. Support shutdown Zipkin trace analysis, because it doesn\u0026rsquo;t fit production environment. Support log type, scope HTTP_ACCESS_LOG and query. No feature provided, prepare for future versions. Support .NET clr receiver. Support Jaeger trace format, no analysis. Support group endpoint name by regax rules in mesh receiver. Support disable statement in OAL. Support basic auth in ElasticSearch connection. Support metrics exporter module and gRPC implementor. Support \u0026gt;, \u0026lt;, \u0026gt;=, \u0026lt;= in OAL. Support role mode in backend. Support Envoy metrics. Support query segment by service instance. Support to set host/port manually at cluster coordinator, rather than based on core settings. Make sure OAP shutdown when it faces startup error. Support set separated gRPC/Jetty ip:port for receiver, default still use core settings. Fix JVM receiver bug. Fix wrong dest service in mesh analysis. Fix search doesn\u0026rsquo;t work as expected. Refactor ScopeDeclaration annotation. Refactor register lock mechanism. Add SmartSql component for .NET Add integration tests for ElasticSearch client. Add test cases for exporter. Add test cases for queue consume.  UI  RocketBot UI has been accepted and bind in this release. Support CLR metrics.  Document  Documents updated, matching Top Level Project requirement. UI licenses updated, according to RocketBot UI IP clearance. User wall and powered-by list updated. CN documents removed, only consider to provide by volunteer out of Apache.  All issues and pull requests are here\n6.0.0-GA Java Agent  Support gson plugin(optional). Support canal plugin. Fix missing ojdbc component id. Fix dubbo plugin conflict. Fix OpenTracing tag match bug. Fix a missing check in ignore plugin.  Backend  Adjust service inventory entity, to add properties. Adjust service instance inventory entity, to add properties. Add nodeType to service inventory entity. Fix when operation name of local and exit spans in ref, the segment lost. Fix the index names don\u0026rsquo;t show right in logs. Fix wrong alarm text. Add test case for span limit mechanism. Add telemetry module and prometheus implementation, with grafana setting. A refactor for register API in storage module. Fix H2 and MySQL endpoint dependency map miss upstream side. Optimize the inventory register and refactor the implementation. Speed up the trace buffer read. Fix and removed unnecessary inventory register operations.  UI  Add new trace view. Add word-break to tag value.  Document  Add two startup modes document. Add PHP agent links. Add some cn documents. Update year to 2019 User wall updated. Fix a wrong description in how-to-build doc.  All issues and pull requests are here\n6.0.0-beta Protocol  Provide Trace Data Protocol v2 Provide SkyWalking Cross Process Propagation Headers Protocol v2.  Java Agent  Support Trace Data Protocol v2 Support SkyWalking Cross Process Propagation Headers Protocol v2. Support SkyWalking Cross Process Propagation Headers Protocol v1 running in compatible way. Need declare open explicitly. Support SpringMVC 5 Support webflux Support a new way to override agent.config by system env. Span tag can override by explicit way. Fix Spring Controller Inherit issue. Fix ElasticSearch plugin NPE. Fix agent classloader dead lock in certain situation. Fix agent log typo. Fix wrong component id in resettemplete plugin. Fix use transform ignore() in wrong way. Fix H2 query bug.  Backend  Support Trace Data Protocol v2. And Trace Data Protocol v1 is still supported. Support MySQL as storage. Support TiDB as storage. Support a new way to override application.yml by system env. Support service instance and endpoint alarm. Support namespace in istio receiver. Support service throughput(cpm), successful rate(sla), avg response time and p99/p95/p90/p75/p50 response time. Support backend trace sampling. Support Zipkin format again. Support init mode. Support namespace in Zookeeper cluster management. Support consul plugin in cluster module. OAL generate tool has been integrated into main repo, in the maven compile stage. Optimize trace paging query. Fix trace query don\u0026rsquo;t use fuzzy query in ElasticSearch storage. Fix alarm can\u0026rsquo;t be active in right way. Fix unnecessary condition in database and cache number query. Fix wrong namespace bug in ElasticSearch storage. Fix Remote clients selector error: / by zero . Fix segment TTL is not working.  UI  Support service throughput(cpm), successful rate(sla), avg response time and p99/p95/p90/p75/p50 response time. Fix TopN endpoint link doesn\u0026rsquo;t work right. Fix trace stack style. Fix CI.  Document  Add more agent setting documents. Add more contribution documents. Update user wall and powered-by page. Add RocketBot UI project link in document.  All issues and pull requests are here\n6.0.0-alpha SkyWalking 6 is totally new milestone for the project. At this point, we are not just a distributing tracing system with analysis and visualization capabilities. We are an Observability Analysis Platform(OAL).\nThe core and most important features in v6 are\n Support to collect telemetry data from different sources, such as multiple language agents and service mesh. Extensible stream analysis core. Make SQL and cache analysis available in core level, although haven\u0026rsquo;t provided in this release. Provide Observability Analysis Language(OAL) to make analysis metrics customization available. New GraphQL query protocol. Not binding with UI now. UI topology is better now. New alarm core provided. In alpha, only on service related metrics.  All issues and pull requests are here\n","excerpt":"6.6.0 Project  [IMPORTANT] Local span and exit span are not treated as endpoint detected at client …","ref":"/docs/main/v9.3.0/en/changes/changes-6.x/","title":"6.6.0"},{"body":"6.6.0 Project  [IMPORTANT] Local span and exit span are not treated as endpoint detected at client and local. Only entry span is the endpoint. Reduce the load of register and memory cost.   Support MiniKube, Istio and SkyWalking on K8s deployment in CI. Support Windows and MacOS build in GitHub Action CI. Support ElasticSearch 7 in official dist. Hundreds plugin cases have been added in GitHub Action CI process.  Java Agent  Remove the local/exit span operation name register mechanism. Add plugin for JDK Threading classes. Add plugin for Armeria. Support set operation name in async span. Enhance webflux plugin, related to Spring Gateway plugin. Webflux plugin is in optional, due to JDK8 required. Fix a possible deadlock. Fix NPE when OAL scripts are different in different OAP nodes, mostly in upgrading stage. Fix bug about wrong peer in ES plugin. Fix NPE in Spring plugin. Fix wrong class name in Dubbo 2.7 conflict patch. Fix spring annotation inheritance problem.  OAP-Backend  Remove the local/exit span operation name register mechanism. Remove client side endpoint register in service mesh. Service instance dependency and related metrics. Support min func in OAL Support apdex func in OAL Support custom ES config setting at the index level. Envoy ALS proto upgraded. Update JODA lib as bugs in UTC +13/+14. Support topN sample period configurable. Ignore no statement DB operations in slow SQL collection. Fix bug in docker-entrypoint.sh when using MySQL as storage  UI  Service topology enhancement. Dive into service, instance and endpoint metrics on topo map. Service instance dependency view and related metrics. Support using URL parameter in trace query page. Support apdex score in service page. Add service dependency metrics into metrics comparison. Fix alarm search not working.  Document  Update user list and user wall. Add document link for CLI. Add deployment guide of agent in Jetty case. Modify Consul cluster doc. Add document about injecting traceId into the logback with logstack in JSON format. ElementUI license and dependency added.  All issues and pull requests are here\n6.5.0 Project  TTL E2E test (#3437) Test coverage is back in pull request check status (#3503) Plugin tests begin to be migrated into main repo, and is in process. (#3528, #3756, #3751, etc.) Switch to SkyWalking CI (exclusive) nodes (#3546) MySQL storage e2e test. (#3648) E2E tests are verified in multiple jdk versions, jdk 8, 9, 11, 12 (#3657) Jenkins build jobs run only when necessary (#3662)  OAP-Backend  Support dynamically configure alarm settings (#3557) Language of instance could be null (#3485) Make query max window size configurable. (#3765) Remove two max size 500 limit. (#3748) Parameterize the cache size. (#3741) ServiceInstanceRelation set error id (#3683) Makes the scope of alarm message more semantic. (#3680) Add register persistent worker latency metrics (#3677) Fix more reasonable error (#3619) Add GraphQL getServiceInstance instanceUuid field. (#3595) Support namespace in Nacos cluster/configuration (#3578) Instead of datasource-settings.properties, use application.yml for MySQLStorageProvider (#3564) Provide consul dynamic configuration center implementation (#3560) Upgrade guava version to support higher jdk version (#3541) Sync latest als from envoy api (#3507) Set telemetry instanced id for Etcd and Nacos plugin (#3492) Support timeout configuration in agent and backend. (#3491) Make sure the cluster register happens before streaming process. (#3471) Agent supports custom properties. (#3367) Miscellaneous bug fixes (#3567)  UI  Feature: node detail display in topo circle-chart view. BugFix: the jvm-maxheap \u0026amp; jvm-maxnonheap is -1, free is no value Fix bug: time select operation not in effect Fix bug: language initialization failed Fix bug: not show instance language Feature: support the trace list display export png Feature: Metrics comparison view BugFix: Fix dashboard top throughput copy  Java Agent  Spring async scenario optimize (#3723) Support log4j2 AsyncLogger (#3715) Add config to collect PostgreSQL sql query params (#3695) Support namespace in Nacos cluster/configuration (#3578) Provide plugin for ehcache 2.x (#3575) Supporting RequestRateLimiterGatewayFilterFactory (#3538) Kafka-plugin compatible with KafkaTemplate (#3505) Add pulsar apm plugin (#3476) Spring-cloud-gateway traceId does not transmit #3411 (#3446) Gateway compatible with downstream loss (#3445) Provide cassandra java driver 3.x plugin (#3410) Fix SpringMVC4 NoSuchMethodError (#3408) BugFix: endpoint grouping rules may be not unique (#3510) Add feature to control the maximum agent log files (#3475) Agent support custom properties. (#3367) Add Light4j plugin (#3323)  Document  Remove travis badge (#3763) Replace user wall to typical users in readme page (#3719) Update istio docs according latest istio release (#3646) Use chart deploy sw docs (#3573) Reorganize the doc, and provide catalog (#3563) Committer vote and set up document. (#3496) Update als setup doc as istio 1.3 released (#3470) Fill faq reply in official document. (#3450)  All issues and pull requests are here\n6.4.0 Project  Highly recommend to upgrade due to Pxx metrics calculation bug. Make agent working in JDK9+ Module system.  Java Agent  Make agent working in JDK9+ Module system. Support Kafka 2.x client libs. Log error in OKHTTP OnFailure callback. Support injecting traceid into logstack appender in logback. Add OperationName(including endpoint name) length max threshold. Support using Regex to group operation name. Support Undertow routing handler. RestTemplate plugin support operation name grouping. Fix ClassCastException in Webflux plugin. Ordering zookeeper server list, to make it better in topology. Fix a Dubbo plugin incompatible issue. Fix MySQL 5 plugin issue. Make log writer cached. Optimize Spring Cloud Gateway plugin Fix and improve gRPC reconnect mechanism. Remove Disruptor dependency from agent.  Backend  Fix Pxx(p50,p75,p90,p95,p99) metrics func bug.(Critical) Support Gateway in backend analysis, even when it doesn\u0026rsquo;t have suitable language agent. Support using HTTPs SSL accessing ElasticSearch storage. Support Zookeeper ACL. Make alarm records listed in order. Fix Pxx data persistence failure in some cases. Fix some bugs in MySQL storage. Setup slow SQL length threshold. Fix TTL settings is not working as expected. Remove scope-meta file.  UI  Enhance alarm page layout. Support trace tree chart resize. Support trace auto completion when partial traces abandoned somehow. Fix dashboard endpoint slow chart. Add radial chart in topology page. Add trace table mode. Fix topology page bug. Fix calender js bug. Fix \u0026ldquo;The \u0026ldquo;topo-services\u0026rdquo; component did not update the data in time after modifying the time range on the topology page.  Document  Restore the broken Istio setup doc. Add etcd config center document. Correct span_limit_per_segment default value in document. Enhance plugin develop doc. Fix error description in build document.  All issues and pull requests are here\n6.3.0 Project  e2e tests have been added, and verify every pull request. Use ArrayList to replace LinkedList in DataCarrier for much better performance. Add plugin instrumentation definition check in CI. DataCarrier performance improvement by avoiding false-sharing.  Java Agent  Java agent supports JDK 9 - 12, but don\u0026rsquo;t support Java Module yet. Support JVM class auto instrumentation, cataloged as bootstrap plugin. Support JVM HttpClient and HttpsClient plugin.[Optional] Support backend upgrade without rebooting required. Open Redefine and Retransform by other agents. Support Servlet 2.5 in Jetty, Tomcat and SpringMVC plugins. Support Spring @Async plugin. Add new config item to restrict the length of span#peer. Refactor ContextManager#stopSpan. Add gRPC timeout. Support Logback AsyncAppender print tid Fix gRPC reconnect bug. Fix trace segment service doesn\u0026rsquo;t report onComplete. Fix wrong logger class name. Fix gRPC plugin bug. Fix ContextManager.activeSpan() API usage error.  Backend  Support agent reset command downstream when the storage is erased, mostly because of backend upgrade. Backend stream flow refactor. High dimensionality metrics(Hour/Day/Month) are changed to lower priority, to ease the storage payload. Add OAP metrics cache to ease the storage query payload and improve performance. Remove DataCarrier in trace persistent of ElasticSearch storage, by leveraging the elasticsearch bulk queue. OAP internal communication protocol changed. Don\u0026rsquo;t be compatible with old releases. Improve ElasticSearch storage bulk performance. Support etcd as dynamic configuration center. Simplify the PxxMetrics and ThermodynamicMetrics functions for better performance and GC. Support JVM metrics self observability. Add the new OAL runtime engine. Add gRPC timeout. Add Charset in the alarm web hook. Fix buffer lost. Fix dirty read in ElasticSearch storage. Fix bug of cluster management plugins in un-Mixed mode. Fix wrong logger class name. Fix delete bug in ElasticSearch when using namespace. Fix MySQL TTL failure. Totally remove IDs can't be null log, to avoid misleading. Fix provider has been initialized repeatedly. Adjust providers conflict log message. Fix using wrong gc time metrics in OAL.  UI  Fix refresh is not working after endpoint and instance changed. Fix endpoint selector but. Fix wrong copy value in slow traces. Fix can\u0026rsquo;t show trace when it is broken partially(Because of agent sampling or fail safe). Fix database and response time graph bugs.  Document  Add bootstrap plugin development document. Alarm documentation typo fixed. Clarify the Docker file purpose. Fix a license typo.  All issues and pull requests are here\n6.2.0 Project  ElasticSearch implementation performance improved, and CHANGED totally. Must delete all existing indexes to do upgrade. CI and Integration tests provided by ASF INFRA. Plan to enhance tests including e2e, plugin tests in all pull requests, powered by ASF INFRA. DataCarrier queue write index controller performance improvement. 3-5 times quicker than before. Add windows compile support in CI.  Java Agent  Support collect SQL parameter in MySQL plugin.[Optional] Support SolrJ plugin. Support RESTEasy plugin. Support Spring Gateway plugin for 2.1.x[Optional] TracingContext performance improvement. Support Apache ShardingSphere(incubating) plugin. Support span#error in application toolkit. Fix OOM by empty stack of exception. FIx wrong cause exception of stack in span log. Fix unclear the running context in SpringMVC plugin. Fix CPU usage accessor calculation issue. Fix SpringMVC plugin span not stop bug when doing HTTP forward. Fix lettuce plugin async commend bug and NPE. Fix webflux plugin cast exception. [CI]Support import check.  Backend  Support time serious ElasticSearch storage. Provide dynamic configuration module and implementation. Slow SQL threshold supports dynamic config today. Dynamic Configuration module provide multiple implementations, DCS(gRPC based), Zookeeper, Apollo, Nacos. Provide P99/95/90/75/50 charts in topology edge. New topology query protocol and implementation. Support Envoy ALS in Service Mesh scenario. Support Nacos cluster management. Enhance metric exporter. Run in increment and total modes. Fix module provider is loaded repeatedly. Change TOP slow SQL storage in ES to Text from Keyword, as too long text issue. Fix H2TopologyQuery tiny bug. Fix H2 log query bug.(No feature provided yet) Filtering pods not in \u0026lsquo;Running\u0026rsquo; phase in mesh scenario. Fix query alarm bug in MySQL and H2 storage. Codes refactor.  UI  Fix some ID is null query(s). Page refactor, especially time-picker, more friendly. Login removed. Trace timestamp visualization issue fixed. Provide P99/95/90/75/50 charts in topology edge. Change all P99/95/90/75/50 charts style. More readable. Fix 404 in trace page.  Document  Go2Sky project has been donated to SkyAPM, change document link. Add FAQ for ElasticSearch storage, and links from document. Add FAQ fro WebSphere installation. Add several open users. Add alarm webhook document.  All issues and pull requests are here\n6.1.0 Project SkyWalking graduated as Apache Top Level Project.\n Support compiling project agent, backend, UI separately.  Java Agent  Support Vert.x Core 3.x plugin. Support Apache Dubbo plugin. Support use_qualified_name_as_endpoint_name and use_qualified_name_as_operation_name configs in SpringMVC plugin. Support span async close APIs in core. Used in Vert.x plugin. Support MySQL 5,8 plugins. Support set instance id manually(optional). Support customize enhance trace plugin in optional list. Support to set peer in Entry Span. Support Zookeeper plugin. Fix Webflux plugin created unexpected Entry Span. Fix Kafka plugin NPE in Kafka 1.1+ Fix wrong operation name in postgre 8.x plugin. Fix RabbitMQ plugin NPE. Fix agent can\u0026rsquo;t run in JVM 6/7, remove module-info.class. Fix agent can\u0026rsquo;t work well, if there is whitespace in agent path. Fix Spring annotation bug and inheritance enhance issue. Fix CPU accessor bug.  Backend Performance improved, especially in CPU limited environment. 3x improvement in service mesh scenario(no trace) in 8C16G VM. Significantly cost less CPU in low payload.\n Support database metrics and SLOW SQL detection. Support to set max size of metadata query. And change default to 5000 from 100. Support ElasticSearch template for new feature in the future. Support shutdown Zipkin trace analysis, because it doesn\u0026rsquo;t fit production environment. Support log type, scope HTTP_ACCESS_LOG and query. No feature provided, prepare for future versions. Support .NET clr receiver. Support Jaeger trace format, no analysis. Support group endpoint name by regax rules in mesh receiver. Support disable statement in OAL. Support basic auth in ElasticSearch connection. Support metrics exporter module and gRPC implementor. Support \u0026gt;, \u0026lt;, \u0026gt;=, \u0026lt;= in OAL. Support role mode in backend. Support Envoy metrics. Support query segment by service instance. Support to set host/port manually at cluster coordinator, rather than based on core settings. Make sure OAP shutdown when it faces startup error. Support set separated gRPC/Jetty ip:port for receiver, default still use core settings. Fix JVM receiver bug. Fix wrong dest service in mesh analysis. Fix search doesn\u0026rsquo;t work as expected. Refactor ScopeDeclaration annotation. Refactor register lock mechanism. Add SmartSql component for .NET Add integration tests for ElasticSearch client. Add test cases for exporter. Add test cases for queue consume.  UI  RocketBot UI has been accepted and bind in this release. Support CLR metrics.  Document  Documents updated, matching Top Level Project requirement. UI licenses updated, according to RocketBot UI IP clearance. User wall and powered-by list updated. CN documents removed, only consider to provide by volunteer out of Apache.  All issues and pull requests are here\n6.0.0-GA Java Agent  Support gson plugin(optional). Support canal plugin. Fix missing ojdbc component id. Fix dubbo plugin conflict. Fix OpenTracing tag match bug. Fix a missing check in ignore plugin.  Backend  Adjust service inventory entity, to add properties. Adjust service instance inventory entity, to add properties. Add nodeType to service inventory entity. Fix when operation name of local and exit spans in ref, the segment lost. Fix the index names don\u0026rsquo;t show right in logs. Fix wrong alarm text. Add test case for span limit mechanism. Add telemetry module and prometheus implementation, with grafana setting. A refactor for register API in storage module. Fix H2 and MySQL endpoint dependency map miss upstream side. Optimize the inventory register and refactor the implementation. Speed up the trace buffer read. Fix and removed unnecessary inventory register operations.  UI  Add new trace view. Add word-break to tag value.  Document  Add two startup modes document. Add PHP agent links. Add some cn documents. Update year to 2019 User wall updated. Fix a wrong description in how-to-build doc.  All issues and pull requests are here\n6.0.0-beta Protocol  Provide Trace Data Protocol v2 Provide SkyWalking Cross Process Propagation Headers Protocol v2.  Java Agent  Support Trace Data Protocol v2 Support SkyWalking Cross Process Propagation Headers Protocol v2. Support SkyWalking Cross Process Propagation Headers Protocol v1 running in compatible way. Need declare open explicitly. Support SpringMVC 5 Support webflux Support a new way to override agent.config by system env. Span tag can override by explicit way. Fix Spring Controller Inherit issue. Fix ElasticSearch plugin NPE. Fix agent classloader dead lock in certain situation. Fix agent log typo. Fix wrong component id in resettemplete plugin. Fix use transform ignore() in wrong way. Fix H2 query bug.  Backend  Support Trace Data Protocol v2. And Trace Data Protocol v1 is still supported. Support MySQL as storage. Support TiDB as storage. Support a new way to override application.yml by system env. Support service instance and endpoint alarm. Support namespace in istio receiver. Support service throughput(cpm), successful rate(sla), avg response time and p99/p95/p90/p75/p50 response time. Support backend trace sampling. Support Zipkin format again. Support init mode. Support namespace in Zookeeper cluster management. Support consul plugin in cluster module. OAL generate tool has been integrated into main repo, in the maven compile stage. Optimize trace paging query. Fix trace query don\u0026rsquo;t use fuzzy query in ElasticSearch storage. Fix alarm can\u0026rsquo;t be active in right way. Fix unnecessary condition in database and cache number query. Fix wrong namespace bug in ElasticSearch storage. Fix Remote clients selector error: / by zero . Fix segment TTL is not working.  UI  Support service throughput(cpm), successful rate(sla), avg response time and p99/p95/p90/p75/p50 response time. Fix TopN endpoint link doesn\u0026rsquo;t work right. Fix trace stack style. Fix CI.  Document  Add more agent setting documents. Add more contribution documents. Update user wall and powered-by page. Add RocketBot UI project link in document.  All issues and pull requests are here\n6.0.0-alpha SkyWalking 6 is totally new milestone for the project. At this point, we are not just a distributing tracing system with analysis and visualization capabilities. We are an Observability Analysis Platform(OAL).\nThe core and most important features in v6 are\n Support to collect telemetry data from different sources, such as multiple language agents and service mesh. Extensible stream analysis core. Make SQL and cache analysis available in core level, although haven\u0026rsquo;t provided in this release. Provide Observability Analysis Language(OAL) to make analysis metrics customization available. New GraphQL query protocol. Not binding with UI now. UI topology is better now. New alarm core provided. In alpha, only on service related metrics.  All issues and pull requests are here\n","excerpt":"6.6.0 Project  [IMPORTANT] Local span and exit span are not treated as endpoint detected at client …","ref":"/docs/main/v9.4.0/en/changes/changes-6.x/","title":"6.6.0"},{"body":"6.6.0 Project  [IMPORTANT] Local span and exit span are not treated as endpoint detected at client and local. Only entry span is the endpoint. Reduce the load of register and memory cost.   Support MiniKube, Istio and SkyWalking on K8s deployment in CI. Support Windows and MacOS build in GitHub Action CI. Support ElasticSearch 7 in official dist. Hundreds plugin cases have been added in GitHub Action CI process.  Java Agent  Remove the local/exit span operation name register mechanism. Add plugin for JDK Threading classes. Add plugin for Armeria. Support set operation name in async span. Enhance webflux plugin, related to Spring Gateway plugin. Webflux plugin is in optional, due to JDK8 required. Fix a possible deadlock. Fix NPE when OAL scripts are different in different OAP nodes, mostly in upgrading stage. Fix bug about wrong peer in ES plugin. Fix NPE in Spring plugin. Fix wrong class name in Dubbo 2.7 conflict patch. Fix spring annotation inheritance problem.  OAP-Backend  Remove the local/exit span operation name register mechanism. Remove client side endpoint register in service mesh. Service instance dependency and related metrics. Support min func in OAL Support apdex func in OAL Support custom ES config setting at the index level. Envoy ALS proto upgraded. Update JODA lib as bugs in UTC +13/+14. Support topN sample period configurable. Ignore no statement DB operations in slow SQL collection. Fix bug in docker-entrypoint.sh when using MySQL as storage  UI  Service topology enhancement. Dive into service, instance and endpoint metrics on topo map. Service instance dependency view and related metrics. Support using URL parameter in trace query page. Support apdex score in service page. Add service dependency metrics into metrics comparison. Fix alarm search not working.  Document  Update user list and user wall. Add document link for CLI. Add deployment guide of agent in Jetty case. Modify Consul cluster doc. Add document about injecting traceId into the logback with logstack in JSON format. ElementUI license and dependency added.  All issues and pull requests are here\n6.5.0 Project  TTL E2E test (#3437) Test coverage is back in pull request check status (#3503) Plugin tests begin to be migrated into main repo, and is in process. (#3528, #3756, #3751, etc.) Switch to SkyWalking CI (exclusive) nodes (#3546) MySQL storage e2e test. (#3648) E2E tests are verified in multiple jdk versions, jdk 8, 9, 11, 12 (#3657) Jenkins build jobs run only when necessary (#3662)  OAP-Backend  Support dynamically configure alarm settings (#3557) Language of instance could be null (#3485) Make query max window size configurable. (#3765) Remove two max size 500 limit. (#3748) Parameterize the cache size. (#3741) ServiceInstanceRelation set error id (#3683) Makes the scope of alarm message more semantic. (#3680) Add register persistent worker latency metrics (#3677) Fix more reasonable error (#3619) Add GraphQL getServiceInstance instanceUuid field. (#3595) Support namespace in Nacos cluster/configuration (#3578) Instead of datasource-settings.properties, use application.yml for MySQLStorageProvider (#3564) Provide consul dynamic configuration center implementation (#3560) Upgrade guava version to support higher jdk version (#3541) Sync latest als from envoy api (#3507) Set telemetry instanced id for Etcd and Nacos plugin (#3492) Support timeout configuration in agent and backend. (#3491) Make sure the cluster register happens before streaming process. (#3471) Agent supports custom properties. (#3367) Miscellaneous bug fixes (#3567)  UI  Feature: node detail display in topo circle-chart view. BugFix: the jvm-maxheap \u0026amp; jvm-maxnonheap is -1, free is no value Fix bug: time select operation not in effect Fix bug: language initialization failed Fix bug: not show instance language Feature: support the trace list display export png Feature: Metrics comparison view BugFix: Fix dashboard top throughput copy  Java Agent  Spring async scenario optimize (#3723) Support log4j2 AsyncLogger (#3715) Add config to collect PostgreSQL sql query params (#3695) Support namespace in Nacos cluster/configuration (#3578) Provide plugin for ehcache 2.x (#3575) Supporting RequestRateLimiterGatewayFilterFactory (#3538) Kafka-plugin compatible with KafkaTemplate (#3505) Add pulsar apm plugin (#3476) Spring-cloud-gateway traceId does not transmit #3411 (#3446) Gateway compatible with downstream loss (#3445) Provide cassandra java driver 3.x plugin (#3410) Fix SpringMVC4 NoSuchMethodError (#3408) BugFix: endpoint grouping rules may be not unique (#3510) Add feature to control the maximum agent log files (#3475) Agent support custom properties. (#3367) Add Light4j plugin (#3323)  Document  Remove travis badge (#3763) Replace user wall to typical users in readme page (#3719) Update istio docs according latest istio release (#3646) Use chart deploy sw docs (#3573) Reorganize the doc, and provide catalog (#3563) Committer vote and set up document. (#3496) Update als setup doc as istio 1.3 released (#3470) Fill faq reply in official document. (#3450)  All issues and pull requests are here\n6.4.0 Project  Highly recommend to upgrade due to Pxx metrics calculation bug. Make agent working in JDK9+ Module system.  Java Agent  Make agent working in JDK9+ Module system. Support Kafka 2.x client libs. Log error in OKHTTP OnFailure callback. Support injecting traceid into logstack appender in logback. Add OperationName(including endpoint name) length max threshold. Support using Regex to group operation name. Support Undertow routing handler. RestTemplate plugin support operation name grouping. Fix ClassCastException in Webflux plugin. Ordering zookeeper server list, to make it better in topology. Fix a Dubbo plugin incompatible issue. Fix MySQL 5 plugin issue. Make log writer cached. Optimize Spring Cloud Gateway plugin Fix and improve gRPC reconnect mechanism. Remove Disruptor dependency from agent.  Backend  Fix Pxx(p50,p75,p90,p95,p99) metrics func bug.(Critical) Support Gateway in backend analysis, even when it doesn\u0026rsquo;t have suitable language agent. Support using HTTPs SSL accessing ElasticSearch storage. Support Zookeeper ACL. Make alarm records listed in order. Fix Pxx data persistence failure in some cases. Fix some bugs in MySQL storage. Setup slow SQL length threshold. Fix TTL settings is not working as expected. Remove scope-meta file.  UI  Enhance alarm page layout. Support trace tree chart resize. Support trace auto completion when partial traces abandoned somehow. Fix dashboard endpoint slow chart. Add radial chart in topology page. Add trace table mode. Fix topology page bug. Fix calender js bug. Fix \u0026ldquo;The \u0026ldquo;topo-services\u0026rdquo; component did not update the data in time after modifying the time range on the topology page.  Document  Restore the broken Istio setup doc. Add etcd config center document. Correct span_limit_per_segment default value in document. Enhance plugin develop doc. Fix error description in build document.  All issues and pull requests are here\n6.3.0 Project  e2e tests have been added, and verify every pull request. Use ArrayList to replace LinkedList in DataCarrier for much better performance. Add plugin instrumentation definition check in CI. DataCarrier performance improvement by avoiding false-sharing.  Java Agent  Java agent supports JDK 9 - 12, but don\u0026rsquo;t support Java Module yet. Support JVM class auto instrumentation, cataloged as bootstrap plugin. Support JVM HttpClient and HttpsClient plugin.[Optional] Support backend upgrade without rebooting required. Open Redefine and Retransform by other agents. Support Servlet 2.5 in Jetty, Tomcat and SpringMVC plugins. Support Spring @Async plugin. Add new config item to restrict the length of span#peer. Refactor ContextManager#stopSpan. Add gRPC timeout. Support Logback AsyncAppender print tid Fix gRPC reconnect bug. Fix trace segment service doesn\u0026rsquo;t report onComplete. Fix wrong logger class name. Fix gRPC plugin bug. Fix ContextManager.activeSpan() API usage error.  Backend  Support agent reset command downstream when the storage is erased, mostly because of backend upgrade. Backend stream flow refactor. High dimensionality metrics(Hour/Day/Month) are changed to lower priority, to ease the storage payload. Add OAP metrics cache to ease the storage query payload and improve performance. Remove DataCarrier in trace persistent of ElasticSearch storage, by leveraging the elasticsearch bulk queue. OAP internal communication protocol changed. Don\u0026rsquo;t be compatible with old releases. Improve ElasticSearch storage bulk performance. Support etcd as dynamic configuration center. Simplify the PxxMetrics and ThermodynamicMetrics functions for better performance and GC. Support JVM metrics self observability. Add the new OAL runtime engine. Add gRPC timeout. Add Charset in the alarm web hook. Fix buffer lost. Fix dirty read in ElasticSearch storage. Fix bug of cluster management plugins in un-Mixed mode. Fix wrong logger class name. Fix delete bug in ElasticSearch when using namespace. Fix MySQL TTL failure. Totally remove IDs can't be null log, to avoid misleading. Fix provider has been initialized repeatedly. Adjust providers conflict log message. Fix using wrong gc time metrics in OAL.  UI  Fix refresh is not working after endpoint and instance changed. Fix endpoint selector but. Fix wrong copy value in slow traces. Fix can\u0026rsquo;t show trace when it is broken partially(Because of agent sampling or fail safe). Fix database and response time graph bugs.  Document  Add bootstrap plugin development document. Alarm documentation typo fixed. Clarify the Docker file purpose. Fix a license typo.  All issues and pull requests are here\n6.2.0 Project  ElasticSearch implementation performance improved, and CHANGED totally. Must delete all existing indexes to do upgrade. CI and Integration tests provided by ASF INFRA. Plan to enhance tests including e2e, plugin tests in all pull requests, powered by ASF INFRA. DataCarrier queue write index controller performance improvement. 3-5 times quicker than before. Add windows compile support in CI.  Java Agent  Support collect SQL parameter in MySQL plugin.[Optional] Support SolrJ plugin. Support RESTEasy plugin. Support Spring Gateway plugin for 2.1.x[Optional] TracingContext performance improvement. Support Apache ShardingSphere(incubating) plugin. Support span#error in application toolkit. Fix OOM by empty stack of exception. FIx wrong cause exception of stack in span log. Fix unclear the running context in SpringMVC plugin. Fix CPU usage accessor calculation issue. Fix SpringMVC plugin span not stop bug when doing HTTP forward. Fix lettuce plugin async commend bug and NPE. Fix webflux plugin cast exception. [CI]Support import check.  Backend  Support time serious ElasticSearch storage. Provide dynamic configuration module and implementation. Slow SQL threshold supports dynamic config today. Dynamic Configuration module provide multiple implementations, DCS(gRPC based), Zookeeper, Apollo, Nacos. Provide P99/95/90/75/50 charts in topology edge. New topology query protocol and implementation. Support Envoy ALS in Service Mesh scenario. Support Nacos cluster management. Enhance metric exporter. Run in increment and total modes. Fix module provider is loaded repeatedly. Change TOP slow SQL storage in ES to Text from Keyword, as too long text issue. Fix H2TopologyQuery tiny bug. Fix H2 log query bug.(No feature provided yet) Filtering pods not in \u0026lsquo;Running\u0026rsquo; phase in mesh scenario. Fix query alarm bug in MySQL and H2 storage. Codes refactor.  UI  Fix some ID is null query(s). Page refactor, especially time-picker, more friendly. Login removed. Trace timestamp visualization issue fixed. Provide P99/95/90/75/50 charts in topology edge. Change all P99/95/90/75/50 charts style. More readable. Fix 404 in trace page.  Document  Go2Sky project has been donated to SkyAPM, change document link. Add FAQ for ElasticSearch storage, and links from document. Add FAQ fro WebSphere installation. Add several open users. Add alarm webhook document.  All issues and pull requests are here\n6.1.0 Project SkyWalking graduated as Apache Top Level Project.\n Support compiling project agent, backend, UI separately.  Java Agent  Support Vert.x Core 3.x plugin. Support Apache Dubbo plugin. Support use_qualified_name_as_endpoint_name and use_qualified_name_as_operation_name configs in SpringMVC plugin. Support span async close APIs in core. Used in Vert.x plugin. Support MySQL 5,8 plugins. Support set instance id manually(optional). Support customize enhance trace plugin in optional list. Support to set peer in Entry Span. Support Zookeeper plugin. Fix Webflux plugin created unexpected Entry Span. Fix Kafka plugin NPE in Kafka 1.1+ Fix wrong operation name in postgre 8.x plugin. Fix RabbitMQ plugin NPE. Fix agent can\u0026rsquo;t run in JVM 6/7, remove module-info.class. Fix agent can\u0026rsquo;t work well, if there is whitespace in agent path. Fix Spring annotation bug and inheritance enhance issue. Fix CPU accessor bug.  Backend Performance improved, especially in CPU limited environment. 3x improvement in service mesh scenario(no trace) in 8C16G VM. Significantly cost less CPU in low payload.\n Support database metrics and SLOW SQL detection. Support to set max size of metadata query. And change default to 5000 from 100. Support ElasticSearch template for new feature in the future. Support shutdown Zipkin trace analysis, because it doesn\u0026rsquo;t fit production environment. Support log type, scope HTTP_ACCESS_LOG and query. No feature provided, prepare for future versions. Support .NET clr receiver. Support Jaeger trace format, no analysis. Support group endpoint name by regax rules in mesh receiver. Support disable statement in OAL. Support basic auth in ElasticSearch connection. Support metrics exporter module and gRPC implementor. Support \u0026gt;, \u0026lt;, \u0026gt;=, \u0026lt;= in OAL. Support role mode in backend. Support Envoy metrics. Support query segment by service instance. Support to set host/port manually at cluster coordinator, rather than based on core settings. Make sure OAP shutdown when it faces startup error. Support set separated gRPC/Jetty ip:port for receiver, default still use core settings. Fix JVM receiver bug. Fix wrong dest service in mesh analysis. Fix search doesn\u0026rsquo;t work as expected. Refactor ScopeDeclaration annotation. Refactor register lock mechanism. Add SmartSql component for .NET Add integration tests for ElasticSearch client. Add test cases for exporter. Add test cases for queue consume.  UI  RocketBot UI has been accepted and bind in this release. Support CLR metrics.  Document  Documents updated, matching Top Level Project requirement. UI licenses updated, according to RocketBot UI IP clearance. User wall and powered-by list updated. CN documents removed, only consider to provide by volunteer out of Apache.  All issues and pull requests are here\n6.0.0-GA Java Agent  Support gson plugin(optional). Support canal plugin. Fix missing ojdbc component id. Fix dubbo plugin conflict. Fix OpenTracing tag match bug. Fix a missing check in ignore plugin.  Backend  Adjust service inventory entity, to add properties. Adjust service instance inventory entity, to add properties. Add nodeType to service inventory entity. Fix when operation name of local and exit spans in ref, the segment lost. Fix the index names don\u0026rsquo;t show right in logs. Fix wrong alarm text. Add test case for span limit mechanism. Add telemetry module and prometheus implementation, with grafana setting. A refactor for register API in storage module. Fix H2 and MySQL endpoint dependency map miss upstream side. Optimize the inventory register and refactor the implementation. Speed up the trace buffer read. Fix and removed unnecessary inventory register operations.  UI  Add new trace view. Add word-break to tag value.  Document  Add two startup modes document. Add PHP agent links. Add some cn documents. Update year to 2019 User wall updated. Fix a wrong description in how-to-build doc.  All issues and pull requests are here\n6.0.0-beta Protocol  Provide Trace Data Protocol v2 Provide SkyWalking Cross Process Propagation Headers Protocol v2.  Java Agent  Support Trace Data Protocol v2 Support SkyWalking Cross Process Propagation Headers Protocol v2. Support SkyWalking Cross Process Propagation Headers Protocol v1 running in compatible way. Need declare open explicitly. Support SpringMVC 5 Support webflux Support a new way to override agent.config by system env. Span tag can override by explicit way. Fix Spring Controller Inherit issue. Fix ElasticSearch plugin NPE. Fix agent classloader dead lock in certain situation. Fix agent log typo. Fix wrong component id in resettemplete plugin. Fix use transform ignore() in wrong way. Fix H2 query bug.  Backend  Support Trace Data Protocol v2. And Trace Data Protocol v1 is still supported. Support MySQL as storage. Support TiDB as storage. Support a new way to override application.yml by system env. Support service instance and endpoint alarm. Support namespace in istio receiver. Support service throughput(cpm), successful rate(sla), avg response time and p99/p95/p90/p75/p50 response time. Support backend trace sampling. Support Zipkin format again. Support init mode. Support namespace in Zookeeper cluster management. Support consul plugin in cluster module. OAL generate tool has been integrated into main repo, in the maven compile stage. Optimize trace paging query. Fix trace query don\u0026rsquo;t use fuzzy query in ElasticSearch storage. Fix alarm can\u0026rsquo;t be active in right way. Fix unnecessary condition in database and cache number query. Fix wrong namespace bug in ElasticSearch storage. Fix Remote clients selector error: / by zero . Fix segment TTL is not working.  UI  Support service throughput(cpm), successful rate(sla), avg response time and p99/p95/p90/p75/p50 response time. Fix TopN endpoint link doesn\u0026rsquo;t work right. Fix trace stack style. Fix CI.  Document  Add more agent setting documents. Add more contribution documents. Update user wall and powered-by page. Add RocketBot UI project link in document.  All issues and pull requests are here\n6.0.0-alpha SkyWalking 6 is totally new milestone for the project. At this point, we are not just a distributing tracing system with analysis and visualization capabilities. We are an Observability Analysis Platform(OAL).\nThe core and most important features in v6 are\n Support to collect telemetry data from different sources, such as multiple language agents and service mesh. Extensible stream analysis core. Make SQL and cache analysis available in core level, although haven\u0026rsquo;t provided in this release. Provide Observability Analysis Language(OAL) to make analysis metrics customization available. New GraphQL query protocol. Not binding with UI now. UI topology is better now. New alarm core provided. In alpha, only on service related metrics.  All issues and pull requests are here\n","excerpt":"6.6.0 Project  [IMPORTANT] Local span and exit span are not treated as endpoint detected at client …","ref":"/docs/main/v9.5.0/en/changes/changes-6.x/","title":"6.6.0"},{"body":"6.6.0 Project  [IMPORTANT] Local span and exit span are not treated as endpoint detected at client and local. Only entry span is the endpoint. Reduce the load of register and memory cost.   Support MiniKube, Istio and SkyWalking on K8s deployment in CI. Support Windows and MacOS build in GitHub Action CI. Support ElasticSearch 7 in official dist. Hundreds plugin cases have been added in GitHub Action CI process.  Java Agent  Remove the local/exit span operation name register mechanism. Add plugin for JDK Threading classes. Add plugin for Armeria. Support set operation name in async span. Enhance webflux plugin, related to Spring Gateway plugin. Webflux plugin is in optional, due to JDK8 required. Fix a possible deadlock. Fix NPE when OAL scripts are different in different OAP nodes, mostly in upgrading stage. Fix bug about wrong peer in ES plugin. Fix NPE in Spring plugin. Fix wrong class name in Dubbo 2.7 conflict patch. Fix spring annotation inheritance problem.  OAP-Backend  Remove the local/exit span operation name register mechanism. Remove client side endpoint register in service mesh. Service instance dependency and related metrics. Support min func in OAL Support apdex func in OAL Support custom ES config setting at the index level. Envoy ALS proto upgraded. Update JODA lib as bugs in UTC +13/+14. Support topN sample period configurable. Ignore no statement DB operations in slow SQL collection. Fix bug in docker-entrypoint.sh when using MySQL as storage  UI  Service topology enhancement. Dive into service, instance and endpoint metrics on topo map. Service instance dependency view and related metrics. Support using URL parameter in trace query page. Support apdex score in service page. Add service dependency metrics into metrics comparison. Fix alarm search not working.  Document  Update user list and user wall. Add document link for CLI. Add deployment guide of agent in Jetty case. Modify Consul cluster doc. Add document about injecting traceId into the logback with logstack in JSON format. ElementUI license and dependency added.  All issues and pull requests are here\n6.5.0 Project  TTL E2E test (#3437) Test coverage is back in pull request check status (#3503) Plugin tests begin to be migrated into main repo, and is in process. (#3528, #3756, #3751, etc.) Switch to SkyWalking CI (exclusive) nodes (#3546) MySQL storage e2e test. (#3648) E2E tests are verified in multiple jdk versions, jdk 8, 9, 11, 12 (#3657) Jenkins build jobs run only when necessary (#3662)  OAP-Backend  Support dynamically configure alarm settings (#3557) Language of instance could be null (#3485) Make query max window size configurable. (#3765) Remove two max size 500 limit. (#3748) Parameterize the cache size. (#3741) ServiceInstanceRelation set error id (#3683) Makes the scope of alarm message more semantic. (#3680) Add register persistent worker latency metrics (#3677) Fix more reasonable error (#3619) Add GraphQL getServiceInstance instanceUuid field. (#3595) Support namespace in Nacos cluster/configuration (#3578) Instead of datasource-settings.properties, use application.yml for MySQLStorageProvider (#3564) Provide consul dynamic configuration center implementation (#3560) Upgrade guava version to support higher jdk version (#3541) Sync latest als from envoy api (#3507) Set telemetry instanced id for Etcd and Nacos plugin (#3492) Support timeout configuration in agent and backend. (#3491) Make sure the cluster register happens before streaming process. (#3471) Agent supports custom properties. (#3367) Miscellaneous bug fixes (#3567)  UI  Feature: node detail display in topo circle-chart view. BugFix: the jvm-maxheap \u0026amp; jvm-maxnonheap is -1, free is no value Fix bug: time select operation not in effect Fix bug: language initialization failed Fix bug: not show instance language Feature: support the trace list display export png Feature: Metrics comparison view BugFix: Fix dashboard top throughput copy  Java Agent  Spring async scenario optimize (#3723) Support log4j2 AsyncLogger (#3715) Add config to collect PostgreSQL sql query params (#3695) Support namespace in Nacos cluster/configuration (#3578) Provide plugin for ehcache 2.x (#3575) Supporting RequestRateLimiterGatewayFilterFactory (#3538) Kafka-plugin compatible with KafkaTemplate (#3505) Add pulsar apm plugin (#3476) Spring-cloud-gateway traceId does not transmit #3411 (#3446) Gateway compatible with downstream loss (#3445) Provide cassandra java driver 3.x plugin (#3410) Fix SpringMVC4 NoSuchMethodError (#3408) BugFix: endpoint grouping rules may be not unique (#3510) Add feature to control the maximum agent log files (#3475) Agent support custom properties. (#3367) Add Light4j plugin (#3323)  Document  Remove travis badge (#3763) Replace user wall to typical users in readme page (#3719) Update istio docs according latest istio release (#3646) Use chart deploy sw docs (#3573) Reorganize the doc, and provide catalog (#3563) Committer vote and set up document. (#3496) Update als setup doc as istio 1.3 released (#3470) Fill faq reply in official document. (#3450)  All issues and pull requests are here\n6.4.0 Project  Highly recommend to upgrade due to Pxx metrics calculation bug. Make agent working in JDK9+ Module system.  Java Agent  Make agent working in JDK9+ Module system. Support Kafka 2.x client libs. Log error in OKHTTP OnFailure callback. Support injecting traceid into logstack appender in logback. Add OperationName(including endpoint name) length max threshold. Support using Regex to group operation name. Support Undertow routing handler. RestTemplate plugin support operation name grouping. Fix ClassCastException in Webflux plugin. Ordering zookeeper server list, to make it better in topology. Fix a Dubbo plugin incompatible issue. Fix MySQL 5 plugin issue. Make log writer cached. Optimize Spring Cloud Gateway plugin Fix and improve gRPC reconnect mechanism. Remove Disruptor dependency from agent.  Backend  Fix Pxx(p50,p75,p90,p95,p99) metrics func bug.(Critical) Support Gateway in backend analysis, even when it doesn\u0026rsquo;t have suitable language agent. Support using HTTPs SSL accessing ElasticSearch storage. Support Zookeeper ACL. Make alarm records listed in order. Fix Pxx data persistence failure in some cases. Fix some bugs in MySQL storage. Setup slow SQL length threshold. Fix TTL settings is not working as expected. Remove scope-meta file.  UI  Enhance alarm page layout. Support trace tree chart resize. Support trace auto completion when partial traces abandoned somehow. Fix dashboard endpoint slow chart. Add radial chart in topology page. Add trace table mode. Fix topology page bug. Fix calender js bug. Fix \u0026ldquo;The \u0026ldquo;topo-services\u0026rdquo; component did not update the data in time after modifying the time range on the topology page.  Document  Restore the broken Istio setup doc. Add etcd config center document. Correct span_limit_per_segment default value in document. Enhance plugin develop doc. Fix error description in build document.  All issues and pull requests are here\n6.3.0 Project  e2e tests have been added, and verify every pull request. Use ArrayList to replace LinkedList in DataCarrier for much better performance. Add plugin instrumentation definition check in CI. DataCarrier performance improvement by avoiding false-sharing.  Java Agent  Java agent supports JDK 9 - 12, but don\u0026rsquo;t support Java Module yet. Support JVM class auto instrumentation, cataloged as bootstrap plugin. Support JVM HttpClient and HttpsClient plugin.[Optional] Support backend upgrade without rebooting required. Open Redefine and Retransform by other agents. Support Servlet 2.5 in Jetty, Tomcat and SpringMVC plugins. Support Spring @Async plugin. Add new config item to restrict the length of span#peer. Refactor ContextManager#stopSpan. Add gRPC timeout. Support Logback AsyncAppender print tid Fix gRPC reconnect bug. Fix trace segment service doesn\u0026rsquo;t report onComplete. Fix wrong logger class name. Fix gRPC plugin bug. Fix ContextManager.activeSpan() API usage error.  Backend  Support agent reset command downstream when the storage is erased, mostly because of backend upgrade. Backend stream flow refactor. High dimensionality metrics(Hour/Day/Month) are changed to lower priority, to ease the storage payload. Add OAP metrics cache to ease the storage query payload and improve performance. Remove DataCarrier in trace persistent of ElasticSearch storage, by leveraging the elasticsearch bulk queue. OAP internal communication protocol changed. Don\u0026rsquo;t be compatible with old releases. Improve ElasticSearch storage bulk performance. Support etcd as dynamic configuration center. Simplify the PxxMetrics and ThermodynamicMetrics functions for better performance and GC. Support JVM metrics self observability. Add the new OAL runtime engine. Add gRPC timeout. Add Charset in the alarm web hook. Fix buffer lost. Fix dirty read in ElasticSearch storage. Fix bug of cluster management plugins in un-Mixed mode. Fix wrong logger class name. Fix delete bug in ElasticSearch when using namespace. Fix MySQL TTL failure. Totally remove IDs can't be null log, to avoid misleading. Fix provider has been initialized repeatedly. Adjust providers conflict log message. Fix using wrong gc time metrics in OAL.  UI  Fix refresh is not working after endpoint and instance changed. Fix endpoint selector but. Fix wrong copy value in slow traces. Fix can\u0026rsquo;t show trace when it is broken partially(Because of agent sampling or fail safe). Fix database and response time graph bugs.  Document  Add bootstrap plugin development document. Alarm documentation typo fixed. Clarify the Docker file purpose. Fix a license typo.  All issues and pull requests are here\n6.2.0 Project  ElasticSearch implementation performance improved, and CHANGED totally. Must delete all existing indexes to do upgrade. CI and Integration tests provided by ASF INFRA. Plan to enhance tests including e2e, plugin tests in all pull requests, powered by ASF INFRA. DataCarrier queue write index controller performance improvement. 3-5 times quicker than before. Add windows compile support in CI.  Java Agent  Support collect SQL parameter in MySQL plugin.[Optional] Support SolrJ plugin. Support RESTEasy plugin. Support Spring Gateway plugin for 2.1.x[Optional] TracingContext performance improvement. Support Apache ShardingSphere(incubating) plugin. Support span#error in application toolkit. Fix OOM by empty stack of exception. FIx wrong cause exception of stack in span log. Fix unclear the running context in SpringMVC plugin. Fix CPU usage accessor calculation issue. Fix SpringMVC plugin span not stop bug when doing HTTP forward. Fix lettuce plugin async commend bug and NPE. Fix webflux plugin cast exception. [CI]Support import check.  Backend  Support time serious ElasticSearch storage. Provide dynamic configuration module and implementation. Slow SQL threshold supports dynamic config today. Dynamic Configuration module provide multiple implementations, DCS(gRPC based), Zookeeper, Apollo, Nacos. Provide P99/95/90/75/50 charts in topology edge. New topology query protocol and implementation. Support Envoy ALS in Service Mesh scenario. Support Nacos cluster management. Enhance metric exporter. Run in increment and total modes. Fix module provider is loaded repeatedly. Change TOP slow SQL storage in ES to Text from Keyword, as too long text issue. Fix H2TopologyQuery tiny bug. Fix H2 log query bug.(No feature provided yet) Filtering pods not in \u0026lsquo;Running\u0026rsquo; phase in mesh scenario. Fix query alarm bug in MySQL and H2 storage. Codes refactor.  UI  Fix some ID is null query(s). Page refactor, especially time-picker, more friendly. Login removed. Trace timestamp visualization issue fixed. Provide P99/95/90/75/50 charts in topology edge. Change all P99/95/90/75/50 charts style. More readable. Fix 404 in trace page.  Document  Go2Sky project has been donated to SkyAPM, change document link. Add FAQ for ElasticSearch storage, and links from document. Add FAQ fro WebSphere installation. Add several open users. Add alarm webhook document.  All issues and pull requests are here\n6.1.0 Project SkyWalking graduated as Apache Top Level Project.\n Support compiling project agent, backend, UI separately.  Java Agent  Support Vert.x Core 3.x plugin. Support Apache Dubbo plugin. Support use_qualified_name_as_endpoint_name and use_qualified_name_as_operation_name configs in SpringMVC plugin. Support span async close APIs in core. Used in Vert.x plugin. Support MySQL 5,8 plugins. Support set instance id manually(optional). Support customize enhance trace plugin in optional list. Support to set peer in Entry Span. Support Zookeeper plugin. Fix Webflux plugin created unexpected Entry Span. Fix Kafka plugin NPE in Kafka 1.1+ Fix wrong operation name in postgre 8.x plugin. Fix RabbitMQ plugin NPE. Fix agent can\u0026rsquo;t run in JVM 6/7, remove module-info.class. Fix agent can\u0026rsquo;t work well, if there is whitespace in agent path. Fix Spring annotation bug and inheritance enhance issue. Fix CPU accessor bug.  Backend Performance improved, especially in CPU limited environment. 3x improvement in service mesh scenario(no trace) in 8C16G VM. Significantly cost less CPU in low payload.\n Support database metrics and SLOW SQL detection. Support to set max size of metadata query. And change default to 5000 from 100. Support ElasticSearch template for new feature in the future. Support shutdown Zipkin trace analysis, because it doesn\u0026rsquo;t fit production environment. Support log type, scope HTTP_ACCESS_LOG and query. No feature provided, prepare for future versions. Support .NET clr receiver. Support Jaeger trace format, no analysis. Support group endpoint name by regax rules in mesh receiver. Support disable statement in OAL. Support basic auth in ElasticSearch connection. Support metrics exporter module and gRPC implementor. Support \u0026gt;, \u0026lt;, \u0026gt;=, \u0026lt;= in OAL. Support role mode in backend. Support Envoy metrics. Support query segment by service instance. Support to set host/port manually at cluster coordinator, rather than based on core settings. Make sure OAP shutdown when it faces startup error. Support set separated gRPC/Jetty ip:port for receiver, default still use core settings. Fix JVM receiver bug. Fix wrong dest service in mesh analysis. Fix search doesn\u0026rsquo;t work as expected. Refactor ScopeDeclaration annotation. Refactor register lock mechanism. Add SmartSql component for .NET Add integration tests for ElasticSearch client. Add test cases for exporter. Add test cases for queue consume.  UI  RocketBot UI has been accepted and bind in this release. Support CLR metrics.  Document  Documents updated, matching Top Level Project requirement. UI licenses updated, according to RocketBot UI IP clearance. User wall and powered-by list updated. CN documents removed, only consider to provide by volunteer out of Apache.  All issues and pull requests are here\n6.0.0-GA Java Agent  Support gson plugin(optional). Support canal plugin. Fix missing ojdbc component id. Fix dubbo plugin conflict. Fix OpenTracing tag match bug. Fix a missing check in ignore plugin.  Backend  Adjust service inventory entity, to add properties. Adjust service instance inventory entity, to add properties. Add nodeType to service inventory entity. Fix when operation name of local and exit spans in ref, the segment lost. Fix the index names don\u0026rsquo;t show right in logs. Fix wrong alarm text. Add test case for span limit mechanism. Add telemetry module and prometheus implementation, with grafana setting. A refactor for register API in storage module. Fix H2 and MySQL endpoint dependency map miss upstream side. Optimize the inventory register and refactor the implementation. Speed up the trace buffer read. Fix and removed unnecessary inventory register operations.  UI  Add new trace view. Add word-break to tag value.  Document  Add two startup modes document. Add PHP agent links. Add some cn documents. Update year to 2019 User wall updated. Fix a wrong description in how-to-build doc.  All issues and pull requests are here\n6.0.0-beta Protocol  Provide Trace Data Protocol v2 Provide SkyWalking Cross Process Propagation Headers Protocol v2.  Java Agent  Support Trace Data Protocol v2 Support SkyWalking Cross Process Propagation Headers Protocol v2. Support SkyWalking Cross Process Propagation Headers Protocol v1 running in compatible way. Need declare open explicitly. Support SpringMVC 5 Support webflux Support a new way to override agent.config by system env. Span tag can override by explicit way. Fix Spring Controller Inherit issue. Fix ElasticSearch plugin NPE. Fix agent classloader dead lock in certain situation. Fix agent log typo. Fix wrong component id in resettemplete plugin. Fix use transform ignore() in wrong way. Fix H2 query bug.  Backend  Support Trace Data Protocol v2. And Trace Data Protocol v1 is still supported. Support MySQL as storage. Support TiDB as storage. Support a new way to override application.yml by system env. Support service instance and endpoint alarm. Support namespace in istio receiver. Support service throughput(cpm), successful rate(sla), avg response time and p99/p95/p90/p75/p50 response time. Support backend trace sampling. Support Zipkin format again. Support init mode. Support namespace in Zookeeper cluster management. Support consul plugin in cluster module. OAL generate tool has been integrated into main repo, in the maven compile stage. Optimize trace paging query. Fix trace query don\u0026rsquo;t use fuzzy query in ElasticSearch storage. Fix alarm can\u0026rsquo;t be active in right way. Fix unnecessary condition in database and cache number query. Fix wrong namespace bug in ElasticSearch storage. Fix Remote clients selector error: / by zero . Fix segment TTL is not working.  UI  Support service throughput(cpm), successful rate(sla), avg response time and p99/p95/p90/p75/p50 response time. Fix TopN endpoint link doesn\u0026rsquo;t work right. Fix trace stack style. Fix CI.  Document  Add more agent setting documents. Add more contribution documents. Update user wall and powered-by page. Add RocketBot UI project link in document.  All issues and pull requests are here\n6.0.0-alpha SkyWalking 6 is totally new milestone for the project. At this point, we are not just a distributing tracing system with analysis and visualization capabilities. We are an Observability Analysis Platform(OAL).\nThe core and most important features in v6 are\n Support to collect telemetry data from different sources, such as multiple language agents and service mesh. Extensible stream analysis core. Make SQL and cache analysis available in core level, although haven\u0026rsquo;t provided in this release. Provide Observability Analysis Language(OAL) to make analysis metrics customization available. New GraphQL query protocol. Not binding with UI now. UI topology is better now. New alarm core provided. In alpha, only on service related metrics.  All issues and pull requests are here\n","excerpt":"6.6.0 Project  [IMPORTANT] Local span and exit span are not treated as endpoint detected at client …","ref":"/docs/main/v9.6.0/en/changes/changes-6.x/","title":"6.6.0"},{"body":"6.6.0 Project  [IMPORTANT] Local span and exit span are not treated as endpoint detected at client and local. Only entry span is the endpoint. Reduce the load of register and memory cost.   Support MiniKube, Istio and SkyWalking on K8s deployment in CI. Support Windows and MacOS build in GitHub Action CI. Support ElasticSearch 7 in official dist. Hundreds plugin cases have been added in GitHub Action CI process.  Java Agent  Remove the local/exit span operation name register mechanism. Add plugin for JDK Threading classes. Add plugin for Armeria. Support set operation name in async span. Enhance webflux plugin, related to Spring Gateway plugin. Webflux plugin is in optional, due to JDK8 required. Fix a possible deadlock. Fix NPE when OAL scripts are different in different OAP nodes, mostly in upgrading stage. Fix bug about wrong peer in ES plugin. Fix NPE in Spring plugin. Fix wrong class name in Dubbo 2.7 conflict patch. Fix spring annotation inheritance problem.  OAP-Backend  Remove the local/exit span operation name register mechanism. Remove client side endpoint register in service mesh. Service instance dependency and related metrics. Support min func in OAL Support apdex func in OAL Support custom ES config setting at the index level. Envoy ALS proto upgraded. Update JODA lib as bugs in UTC +13/+14. Support topN sample period configurable. Ignore no statement DB operations in slow SQL collection. Fix bug in docker-entrypoint.sh when using MySQL as storage  UI  Service topology enhancement. Dive into service, instance and endpoint metrics on topo map. Service instance dependency view and related metrics. Support using URL parameter in trace query page. Support apdex score in service page. Add service dependency metrics into metrics comparison. Fix alarm search not working.  Document  Update user list and user wall. Add document link for CLI. Add deployment guide of agent in Jetty case. Modify Consul cluster doc. Add document about injecting traceId into the logback with logstack in JSON format. ElementUI license and dependency added.  All issues and pull requests are here\n6.5.0 Project  TTL E2E test (#3437) Test coverage is back in pull request check status (#3503) Plugin tests begin to be migrated into main repo, and is in process. (#3528, #3756, #3751, etc.) Switch to SkyWalking CI (exclusive) nodes (#3546) MySQL storage e2e test. (#3648) E2E tests are verified in multiple jdk versions, jdk 8, 9, 11, 12 (#3657) Jenkins build jobs run only when necessary (#3662)  OAP-Backend  Support dynamically configure alarm settings (#3557) Language of instance could be null (#3485) Make query max window size configurable. (#3765) Remove two max size 500 limit. (#3748) Parameterize the cache size. (#3741) ServiceInstanceRelation set error id (#3683) Makes the scope of alarm message more semantic. (#3680) Add register persistent worker latency metrics (#3677) Fix more reasonable error (#3619) Add GraphQL getServiceInstance instanceUuid field. (#3595) Support namespace in Nacos cluster/configuration (#3578) Instead of datasource-settings.properties, use application.yml for MySQLStorageProvider (#3564) Provide consul dynamic configuration center implementation (#3560) Upgrade guava version to support higher jdk version (#3541) Sync latest als from envoy api (#3507) Set telemetry instanced id for Etcd and Nacos plugin (#3492) Support timeout configuration in agent and backend. (#3491) Make sure the cluster register happens before streaming process. (#3471) Agent supports custom properties. (#3367) Miscellaneous bug fixes (#3567)  UI  Feature: node detail display in topo circle-chart view. BugFix: the jvm-maxheap \u0026amp; jvm-maxnonheap is -1, free is no value Fix bug: time select operation not in effect Fix bug: language initialization failed Fix bug: not show instance language Feature: support the trace list display export png Feature: Metrics comparison view BugFix: Fix dashboard top throughput copy  Java Agent  Spring async scenario optimize (#3723) Support log4j2 AsyncLogger (#3715) Add config to collect PostgreSQL sql query params (#3695) Support namespace in Nacos cluster/configuration (#3578) Provide plugin for ehcache 2.x (#3575) Supporting RequestRateLimiterGatewayFilterFactory (#3538) Kafka-plugin compatible with KafkaTemplate (#3505) Add pulsar apm plugin (#3476) Spring-cloud-gateway traceId does not transmit #3411 (#3446) Gateway compatible with downstream loss (#3445) Provide cassandra java driver 3.x plugin (#3410) Fix SpringMVC4 NoSuchMethodError (#3408) BugFix: endpoint grouping rules may be not unique (#3510) Add feature to control the maximum agent log files (#3475) Agent support custom properties. (#3367) Add Light4j plugin (#3323)  Document  Remove travis badge (#3763) Replace user wall to typical users in readme page (#3719) Update istio docs according latest istio release (#3646) Use chart deploy sw docs (#3573) Reorganize the doc, and provide catalog (#3563) Committer vote and set up document. (#3496) Update als setup doc as istio 1.3 released (#3470) Fill faq reply in official document. (#3450)  All issues and pull requests are here\n6.4.0 Project  Highly recommend to upgrade due to Pxx metrics calculation bug. Make agent working in JDK9+ Module system.  Java Agent  Make agent working in JDK9+ Module system. Support Kafka 2.x client libs. Log error in OKHTTP OnFailure callback. Support injecting traceid into logstack appender in logback. Add OperationName(including endpoint name) length max threshold. Support using Regex to group operation name. Support Undertow routing handler. RestTemplate plugin support operation name grouping. Fix ClassCastException in Webflux plugin. Ordering zookeeper server list, to make it better in topology. Fix a Dubbo plugin incompatible issue. Fix MySQL 5 plugin issue. Make log writer cached. Optimize Spring Cloud Gateway plugin Fix and improve gRPC reconnect mechanism. Remove Disruptor dependency from agent.  Backend  Fix Pxx(p50,p75,p90,p95,p99) metrics func bug.(Critical) Support Gateway in backend analysis, even when it doesn\u0026rsquo;t have suitable language agent. Support using HTTPs SSL accessing ElasticSearch storage. Support Zookeeper ACL. Make alarm records listed in order. Fix Pxx data persistence failure in some cases. Fix some bugs in MySQL storage. Setup slow SQL length threshold. Fix TTL settings is not working as expected. Remove scope-meta file.  UI  Enhance alarm page layout. Support trace tree chart resize. Support trace auto completion when partial traces abandoned somehow. Fix dashboard endpoint slow chart. Add radial chart in topology page. Add trace table mode. Fix topology page bug. Fix calender js bug. Fix \u0026ldquo;The \u0026ldquo;topo-services\u0026rdquo; component did not update the data in time after modifying the time range on the topology page.  Document  Restore the broken Istio setup doc. Add etcd config center document. Correct span_limit_per_segment default value in document. Enhance plugin develop doc. Fix error description in build document.  All issues and pull requests are here\n6.3.0 Project  e2e tests have been added, and verify every pull request. Use ArrayList to replace LinkedList in DataCarrier for much better performance. Add plugin instrumentation definition check in CI. DataCarrier performance improvement by avoiding false-sharing.  Java Agent  Java agent supports JDK 9 - 12, but don\u0026rsquo;t support Java Module yet. Support JVM class auto instrumentation, cataloged as bootstrap plugin. Support JVM HttpClient and HttpsClient plugin.[Optional] Support backend upgrade without rebooting required. Open Redefine and Retransform by other agents. Support Servlet 2.5 in Jetty, Tomcat and SpringMVC plugins. Support Spring @Async plugin. Add new config item to restrict the length of span#peer. Refactor ContextManager#stopSpan. Add gRPC timeout. Support Logback AsyncAppender print tid Fix gRPC reconnect bug. Fix trace segment service doesn\u0026rsquo;t report onComplete. Fix wrong logger class name. Fix gRPC plugin bug. Fix ContextManager.activeSpan() API usage error.  Backend  Support agent reset command downstream when the storage is erased, mostly because of backend upgrade. Backend stream flow refactor. High dimensionality metrics(Hour/Day/Month) are changed to lower priority, to ease the storage payload. Add OAP metrics cache to ease the storage query payload and improve performance. Remove DataCarrier in trace persistent of ElasticSearch storage, by leveraging the elasticsearch bulk queue. OAP internal communication protocol changed. Don\u0026rsquo;t be compatible with old releases. Improve ElasticSearch storage bulk performance. Support etcd as dynamic configuration center. Simplify the PxxMetrics and ThermodynamicMetrics functions for better performance and GC. Support JVM metrics self observability. Add the new OAL runtime engine. Add gRPC timeout. Add Charset in the alarm web hook. Fix buffer lost. Fix dirty read in ElasticSearch storage. Fix bug of cluster management plugins in un-Mixed mode. Fix wrong logger class name. Fix delete bug in ElasticSearch when using namespace. Fix MySQL TTL failure. Totally remove IDs can't be null log, to avoid misleading. Fix provider has been initialized repeatedly. Adjust providers conflict log message. Fix using wrong gc time metrics in OAL.  UI  Fix refresh is not working after endpoint and instance changed. Fix endpoint selector but. Fix wrong copy value in slow traces. Fix can\u0026rsquo;t show trace when it is broken partially(Because of agent sampling or fail safe). Fix database and response time graph bugs.  Document  Add bootstrap plugin development document. Alarm documentation typo fixed. Clarify the Docker file purpose. Fix a license typo.  All issues and pull requests are here\n6.2.0 Project  ElasticSearch implementation performance improved, and CHANGED totally. Must delete all existing indexes to do upgrade. CI and Integration tests provided by ASF INFRA. Plan to enhance tests including e2e, plugin tests in all pull requests, powered by ASF INFRA. DataCarrier queue write index controller performance improvement. 3-5 times quicker than before. Add windows compile support in CI.  Java Agent  Support collect SQL parameter in MySQL plugin.[Optional] Support SolrJ plugin. Support RESTEasy plugin. Support Spring Gateway plugin for 2.1.x[Optional] TracingContext performance improvement. Support Apache ShardingSphere(incubating) plugin. Support span#error in application toolkit. Fix OOM by empty stack of exception. FIx wrong cause exception of stack in span log. Fix unclear the running context in SpringMVC plugin. Fix CPU usage accessor calculation issue. Fix SpringMVC plugin span not stop bug when doing HTTP forward. Fix lettuce plugin async commend bug and NPE. Fix webflux plugin cast exception. [CI]Support import check.  Backend  Support time serious ElasticSearch storage. Provide dynamic configuration module and implementation. Slow SQL threshold supports dynamic config today. Dynamic Configuration module provide multiple implementations, DCS(gRPC based), Zookeeper, Apollo, Nacos. Provide P99/95/90/75/50 charts in topology edge. New topology query protocol and implementation. Support Envoy ALS in Service Mesh scenario. Support Nacos cluster management. Enhance metric exporter. Run in increment and total modes. Fix module provider is loaded repeatedly. Change TOP slow SQL storage in ES to Text from Keyword, as too long text issue. Fix H2TopologyQuery tiny bug. Fix H2 log query bug.(No feature provided yet) Filtering pods not in \u0026lsquo;Running\u0026rsquo; phase in mesh scenario. Fix query alarm bug in MySQL and H2 storage. Codes refactor.  UI  Fix some ID is null query(s). Page refactor, especially time-picker, more friendly. Login removed. Trace timestamp visualization issue fixed. Provide P99/95/90/75/50 charts in topology edge. Change all P99/95/90/75/50 charts style. More readable. Fix 404 in trace page.  Document  Go2Sky project has been donated to SkyAPM, change document link. Add FAQ for ElasticSearch storage, and links from document. Add FAQ fro WebSphere installation. Add several open users. Add alarm webhook document.  All issues and pull requests are here\n6.1.0 Project SkyWalking graduated as Apache Top Level Project.\n Support compiling project agent, backend, UI separately.  Java Agent  Support Vert.x Core 3.x plugin. Support Apache Dubbo plugin. Support use_qualified_name_as_endpoint_name and use_qualified_name_as_operation_name configs in SpringMVC plugin. Support span async close APIs in core. Used in Vert.x plugin. Support MySQL 5,8 plugins. Support set instance id manually(optional). Support customize enhance trace plugin in optional list. Support to set peer in Entry Span. Support Zookeeper plugin. Fix Webflux plugin created unexpected Entry Span. Fix Kafka plugin NPE in Kafka 1.1+ Fix wrong operation name in postgre 8.x plugin. Fix RabbitMQ plugin NPE. Fix agent can\u0026rsquo;t run in JVM 6/7, remove module-info.class. Fix agent can\u0026rsquo;t work well, if there is whitespace in agent path. Fix Spring annotation bug and inheritance enhance issue. Fix CPU accessor bug.  Backend Performance improved, especially in CPU limited environment. 3x improvement in service mesh scenario(no trace) in 8C16G VM. Significantly cost less CPU in low payload.\n Support database metrics and SLOW SQL detection. Support to set max size of metadata query. And change default to 5000 from 100. Support ElasticSearch template for new feature in the future. Support shutdown Zipkin trace analysis, because it doesn\u0026rsquo;t fit production environment. Support log type, scope HTTP_ACCESS_LOG and query. No feature provided, prepare for future versions. Support .NET clr receiver. Support Jaeger trace format, no analysis. Support group endpoint name by regax rules in mesh receiver. Support disable statement in OAL. Support basic auth in ElasticSearch connection. Support metrics exporter module and gRPC implementor. Support \u0026gt;, \u0026lt;, \u0026gt;=, \u0026lt;= in OAL. Support role mode in backend. Support Envoy metrics. Support query segment by service instance. Support to set host/port manually at cluster coordinator, rather than based on core settings. Make sure OAP shutdown when it faces startup error. Support set separated gRPC/Jetty ip:port for receiver, default still use core settings. Fix JVM receiver bug. Fix wrong dest service in mesh analysis. Fix search doesn\u0026rsquo;t work as expected. Refactor ScopeDeclaration annotation. Refactor register lock mechanism. Add SmartSql component for .NET Add integration tests for ElasticSearch client. Add test cases for exporter. Add test cases for queue consume.  UI  RocketBot UI has been accepted and bind in this release. Support CLR metrics.  Document  Documents updated, matching Top Level Project requirement. UI licenses updated, according to RocketBot UI IP clearance. User wall and powered-by list updated. CN documents removed, only consider to provide by volunteer out of Apache.  All issues and pull requests are here\n6.0.0-GA Java Agent  Support gson plugin(optional). Support canal plugin. Fix missing ojdbc component id. Fix dubbo plugin conflict. Fix OpenTracing tag match bug. Fix a missing check in ignore plugin.  Backend  Adjust service inventory entity, to add properties. Adjust service instance inventory entity, to add properties. Add nodeType to service inventory entity. Fix when operation name of local and exit spans in ref, the segment lost. Fix the index names don\u0026rsquo;t show right in logs. Fix wrong alarm text. Add test case for span limit mechanism. Add telemetry module and prometheus implementation, with grafana setting. A refactor for register API in storage module. Fix H2 and MySQL endpoint dependency map miss upstream side. Optimize the inventory register and refactor the implementation. Speed up the trace buffer read. Fix and removed unnecessary inventory register operations.  UI  Add new trace view. Add word-break to tag value.  Document  Add two startup modes document. Add PHP agent links. Add some cn documents. Update year to 2019 User wall updated. Fix a wrong description in how-to-build doc.  All issues and pull requests are here\n6.0.0-beta Protocol  Provide Trace Data Protocol v2 Provide SkyWalking Cross Process Propagation Headers Protocol v2.  Java Agent  Support Trace Data Protocol v2 Support SkyWalking Cross Process Propagation Headers Protocol v2. Support SkyWalking Cross Process Propagation Headers Protocol v1 running in compatible way. Need declare open explicitly. Support SpringMVC 5 Support webflux Support a new way to override agent.config by system env. Span tag can override by explicit way. Fix Spring Controller Inherit issue. Fix ElasticSearch plugin NPE. Fix agent classloader dead lock in certain situation. Fix agent log typo. Fix wrong component id in resettemplete plugin. Fix use transform ignore() in wrong way. Fix H2 query bug.  Backend  Support Trace Data Protocol v2. And Trace Data Protocol v1 is still supported. Support MySQL as storage. Support TiDB as storage. Support a new way to override application.yml by system env. Support service instance and endpoint alarm. Support namespace in istio receiver. Support service throughput(cpm), successful rate(sla), avg response time and p99/p95/p90/p75/p50 response time. Support backend trace sampling. Support Zipkin format again. Support init mode. Support namespace in Zookeeper cluster management. Support consul plugin in cluster module. OAL generate tool has been integrated into main repo, in the maven compile stage. Optimize trace paging query. Fix trace query don\u0026rsquo;t use fuzzy query in ElasticSearch storage. Fix alarm can\u0026rsquo;t be active in right way. Fix unnecessary condition in database and cache number query. Fix wrong namespace bug in ElasticSearch storage. Fix Remote clients selector error: / by zero . Fix segment TTL is not working.  UI  Support service throughput(cpm), successful rate(sla), avg response time and p99/p95/p90/p75/p50 response time. Fix TopN endpoint link doesn\u0026rsquo;t work right. Fix trace stack style. Fix CI.  Document  Add more agent setting documents. Add more contribution documents. Update user wall and powered-by page. Add RocketBot UI project link in document.  All issues and pull requests are here\n6.0.0-alpha SkyWalking 6 is totally new milestone for the project. At this point, we are not just a distributing tracing system with analysis and visualization capabilities. We are an Observability Analysis Platform(OAL).\nThe core and most important features in v6 are\n Support to collect telemetry data from different sources, such as multiple language agents and service mesh. Extensible stream analysis core. Make SQL and cache analysis available in core level, although haven\u0026rsquo;t provided in this release. Provide Observability Analysis Language(OAL) to make analysis metrics customization available. New GraphQL query protocol. Not binding with UI now. UI topology is better now. New alarm core provided. In alpha, only on service related metrics.  All issues and pull requests are here\n","excerpt":"6.6.0 Project  [IMPORTANT] Local span and exit span are not treated as endpoint detected at client …","ref":"/docs/main/v9.7.0/en/changes/changes-6.x/","title":"6.6.0"},{"body":"7.0.0 Project  SkyWalking discards the supports of JDK 1.6 and 1.7 on the java agent side. The minimal requirement of JDK is JDK8. Support method performance profile. Provide new E2E test framework. Remove AppVeyor from the CI, use GitHub action only. Provide new plugin test tool. Don\u0026rsquo;t support SkyWalking v5 agent in-wire and out-wire protocol. v6 is required.  Java Agent  Add lazy injection API in the agent core. Support Servlet 2.5 in the Struts plugin. Fix RestTemplate plugin ClassCastException in the Async call. Add Finagle plugin. Add test cases of H2 and struts. Add Armeria 0.98 plugin. Fix ElasticSearch plugin bug. Fix EHCache plugin bug. Fix a potential I/O leak. Support Oracle SID mode. Update Byte-buddy core. Performance tuning: replace AtomicInteger with AtomicIntegerFieldUpdater. Add AVRO plugin. Update to JDK 1.8 Optimize the ignore plugin. Enhance the gRPC plugin. Add Kotlin Coroutine plugin. Support HTTP parameter collection in Tomcat and SpringMVC plugin. Add @Tag annotation in the application toolkit. Move Lettuce into the default plugin list. Move Webflux into the default plugin list. Add HttpClient 3.x plugin.  OAP-Backend  Support InfluxDB as a new storage option. Add selector in the application.yml. Make the provider activation more flexible through System ENV. Support sub-topology map query. Support gRPC SSL. Support HTTP protocol for agent. Support Nginx LUA agent. Support skip the instance relationship analysis if some agents doesn\u0026rsquo;t have upstream address, currently for LUA agent. Support metrics entity name in the storage. Optional, default OFF. Merge the HOUR and DAY metrics into MINUTE in the ElasticSearch storage implementation. Reduce the payload for ElasticSearch server. Support change detection mechanism in DCS. Support Daily step in the ElasticSearch storage implementation for low traffic system. Provide profile export tool. Support alarm gRPC hook. Fix PHP language doesn\u0026rsquo;t show up on the instance page. Add more comments in the source codes. Add a new metrics type, multiple linears. Fix thread concurrency issue in the alarm core.  UI  Support custom topology definition.  Document  Add FAQ about python2 command required in the compiling. Add doc about new e2e framework. Add doc about the new profile feature. Powered-by page updated.  All issues and pull requests are here\n","excerpt":"7.0.0 Project  SkyWalking discards the supports of JDK 1.6 and 1.7 on the java agent side. The …","ref":"/docs/main/latest/en/changes/changes-7.0.0/","title":"7.0.0"},{"body":"7.0.0 Project  SkyWalking discards the supports of JDK 1.6 and 1.7 on the java agent side. The minimal requirement of JDK is JDK8. Support method performance profile. Provide new E2E test framework. Remove AppVeyor from the CI, use GitHub action only. Provide new plugin test tool. Don\u0026rsquo;t support SkyWalking v5 agent in-wire and out-wire protocol. v6 is required.  Java Agent  Add lazy injection API in the agent core. Support Servlet 2.5 in the Struts plugin. Fix RestTemplate plugin ClassCastException in the Async call. Add Finagle plugin. Add test cases of H2 and struts. Add Armeria 0.98 plugin. Fix ElasticSearch plugin bug. Fix EHCache plugin bug. Fix a potential I/O leak. Support Oracle SID mode. Update Byte-buddy core. Performance tuning: replace AtomicInteger with AtomicIntegerFieldUpdater. Add AVRO plugin. Update to JDK 1.8 Optimize the ignore plugin. Enhance the gRPC plugin. Add Kotlin Coroutine plugin. Support HTTP parameter collection in Tomcat and SpringMVC plugin. Add @Tag annotation in the application toolkit. Move Lettuce into the default plugin list. Move Webflux into the default plugin list. Add HttpClient 3.x plugin.  OAP-Backend  Support InfluxDB as a new storage option. Add selector in the application.yml. Make the provider activation more flexible through System ENV. Support sub-topology map query. Support gRPC SSL. Support HTTP protocol for agent. Support Nginx LUA agent. Support skip the instance relationship analysis if some agents doesn\u0026rsquo;t have upstream address, currently for LUA agent. Support metrics entity name in the storage. Optional, default OFF. Merge the HOUR and DAY metrics into MINUTE in the ElasticSearch storage implementation. Reduce the payload for ElasticSearch server. Support change detection mechanism in DCS. Support Daily step in the ElasticSearch storage implementation for low traffic system. Provide profile export tool. Support alarm gRPC hook. Fix PHP language doesn\u0026rsquo;t show up on the instance page. Add more comments in the source codes. Add a new metrics type, multiple linears. Fix thread concurrency issue in the alarm core.  UI  Support custom topology definition.  Document  Add FAQ about python2 command required in the compiling. Add doc about new e2e framework. Add doc about the new profile feature. Powered-by page updated.  All issues and pull requests are here\n","excerpt":"7.0.0 Project  SkyWalking discards the supports of JDK 1.6 and 1.7 on the java agent side. The …","ref":"/docs/main/next/en/changes/changes-7.0.0/","title":"7.0.0"},{"body":"7.0.0 Project  SkyWalking discards the supports of JDK 1.6 and 1.7 on the java agent side. The minimal requirement of JDK is JDK8. Support method performance profile. Provide new E2E test framework. Remove AppVeyor from the CI, use GitHub action only. Provide new plugin test tool. Don\u0026rsquo;t support SkyWalking v5 agent in-wire and out-wire protocol. v6 is required.  Java Agent  Add lazy injection API in the agent core. Support Servlet 2.5 in the Struts plugin. Fix RestTemplate plugin ClassCastException in the Async call. Add Finagle plugin. Add test cases of H2 and struts. Add Armeria 0.98 plugin. Fix ElasticSearch plugin bug. Fix EHCache plugin bug. Fix a potential I/O leak. Support Oracle SID mode. Update Byte-buddy core. Performance tuning: replace AtomicInteger with AtomicIntegerFieldUpdater. Add AVRO plugin. Update to JDK 1.8 Optimize the ignore plugin. Enhance the gRPC plugin. Add Kotlin Coroutine plugin. Support HTTP parameter collection in Tomcat and SpringMVC plugin. Add @Tag annotation in the application toolkit. Move Lettuce into the default plugin list. Move Webflux into the default plugin list. Add HttpClient 3.x plugin.  OAP-Backend  Support InfluxDB as a new storage option. Add selector in the application.yml. Make the provider activation more flexible through System ENV. Support sub-topology map query. Support gRPC SSL. Support HTTP protocol for agent. Support Nginx LUA agent. Support skip the instance relationship analysis if some agents doesn\u0026rsquo;t have upstream address, currently for LUA agent. Support metrics entity name in the storage. Optional, default OFF. Merge the HOUR and DAY metrics into MINUTE in the ElasticSearch storage implementation. Reduce the payload for ElasticSearch server. Support change detection mechanism in DCS. Support Daily step in the ElasticSearch storage implementation for low traffic system. Provide profile export tool. Support alarm gRPC hook. Fix PHP language doesn\u0026rsquo;t show up on the instance page. Add more comments in the source codes. Add a new metrics type, multiple linears. Fix thread concurrency issue in the alarm core.  UI  Support custom topology definition.  Document  Add FAQ about python2 command required in the compiling. Add doc about new e2e framework. Add doc about the new profile feature. Powered-by page updated.  All issues and pull requests are here\n","excerpt":"7.0.0 Project  SkyWalking discards the supports of JDK 1.6 and 1.7 on the java agent side. The …","ref":"/docs/main/v9.1.0/en/changes/changes-7.0.0/","title":"7.0.0"},{"body":"7.0.0 Project  SkyWalking discards the supports of JDK 1.6 and 1.7 on the java agent side. The minimal requirement of JDK is JDK8. Support method performance profile. Provide new E2E test framework. Remove AppVeyor from the CI, use GitHub action only. Provide new plugin test tool. Don\u0026rsquo;t support SkyWalking v5 agent in-wire and out-wire protocol. v6 is required.  Java Agent  Add lazy injection API in the agent core. Support Servlet 2.5 in the Struts plugin. Fix RestTemplate plugin ClassCastException in the Async call. Add Finagle plugin. Add test cases of H2 and struts. Add Armeria 0.98 plugin. Fix ElasticSearch plugin bug. Fix EHCache plugin bug. Fix a potential I/O leak. Support Oracle SID mode. Update Byte-buddy core. Performance tuning: replace AtomicInteger with AtomicIntegerFieldUpdater. Add AVRO plugin. Update to JDK 1.8 Optimize the ignore plugin. Enhance the gRPC plugin. Add Kotlin Coroutine plugin. Support HTTP parameter collection in Tomcat and SpringMVC plugin. Add @Tag annotation in the application toolkit. Move Lettuce into the default plugin list. Move Webflux into the default plugin list. Add HttpClient 3.x plugin.  OAP-Backend  Support InfluxDB as a new storage option. Add selector in the application.yml. Make the provider activation more flexible through System ENV. Support sub-topology map query. Support gRPC SSL. Support HTTP protocol for agent. Support Nginx LUA agent. Support skip the instance relationship analysis if some agents doesn\u0026rsquo;t have upstream address, currently for LUA agent. Support metrics entity name in the storage. Optional, default OFF. Merge the HOUR and DAY metrics into MINUTE in the ElasticSearch storage implementation. Reduce the payload for ElasticSearch server. Support change detection mechanism in DCS. Support Daily step in the ElasticSearch storage implementation for low traffic system. Provide profile export tool. Support alarm gRPC hook. Fix PHP language doesn\u0026rsquo;t show up on the instance page. Add more comments in the source codes. Add a new metrics type, multiple linears. Fix thread concurrency issue in the alarm core.  UI  Support custom topology definition.  Document  Add FAQ about python2 command required in the compiling. Add doc about new e2e framework. Add doc about the new profile feature. Powered-by page updated.  All issues and pull requests are here\n","excerpt":"7.0.0 Project  SkyWalking discards the supports of JDK 1.6 and 1.7 on the java agent side. The …","ref":"/docs/main/v9.2.0/en/changes/changes-7.0.0/","title":"7.0.0"},{"body":"7.0.0 Project  SkyWalking discards the supports of JDK 1.6 and 1.7 on the java agent side. The minimal requirement of JDK is JDK8. Support method performance profile. Provide new E2E test framework. Remove AppVeyor from the CI, use GitHub action only. Provide new plugin test tool. Don\u0026rsquo;t support SkyWalking v5 agent in-wire and out-wire protocol. v6 is required.  Java Agent  Add lazy injection API in the agent core. Support Servlet 2.5 in the Struts plugin. Fix RestTemplate plugin ClassCastException in the Async call. Add Finagle plugin. Add test cases of H2 and struts. Add Armeria 0.98 plugin. Fix ElasticSearch plugin bug. Fix EHCache plugin bug. Fix a potential I/O leak. Support Oracle SID mode. Update Byte-buddy core. Performance tuning: replace AtomicInteger with AtomicIntegerFieldUpdater. Add AVRO plugin. Update to JDK 1.8 Optimize the ignore plugin. Enhance the gRPC plugin. Add Kotlin Coroutine plugin. Support HTTP parameter collection in Tomcat and SpringMVC plugin. Add @Tag annotation in the application toolkit. Move Lettuce into the default plugin list. Move Webflux into the default plugin list. Add HttpClient 3.x plugin.  OAP-Backend  Support InfluxDB as a new storage option. Add selector in the application.yml. Make the provider activation more flexible through System ENV. Support sub-topology map query. Support gRPC SSL. Support HTTP protocol for agent. Support Nginx LUA agent. Support skip the instance relationship analysis if some agents doesn\u0026rsquo;t have upstream address, currently for LUA agent. Support metrics entity name in the storage. Optional, default OFF. Merge the HOUR and DAY metrics into MINUTE in the ElasticSearch storage implementation. Reduce the payload for ElasticSearch server. Support change detection mechanism in DCS. Support Daily step in the ElasticSearch storage implementation for low traffic system. Provide profile export tool. Support alarm gRPC hook. Fix PHP language doesn\u0026rsquo;t show up on the instance page. Add more comments in the source codes. Add a new metrics type, multiple linears. Fix thread concurrency issue in the alarm core.  UI  Support custom topology definition.  Document  Add FAQ about python2 command required in the compiling. Add doc about new e2e framework. Add doc about the new profile feature. Powered-by page updated.  All issues and pull requests are here\n","excerpt":"7.0.0 Project  SkyWalking discards the supports of JDK 1.6 and 1.7 on the java agent side. The …","ref":"/docs/main/v9.3.0/en/changes/changes-7.0.0/","title":"7.0.0"},{"body":"7.0.0 Project  SkyWalking discards the supports of JDK 1.6 and 1.7 on the java agent side. The minimal requirement of JDK is JDK8. Support method performance profile. Provide new E2E test framework. Remove AppVeyor from the CI, use GitHub action only. Provide new plugin test tool. Don\u0026rsquo;t support SkyWalking v5 agent in-wire and out-wire protocol. v6 is required.  Java Agent  Add lazy injection API in the agent core. Support Servlet 2.5 in the Struts plugin. Fix RestTemplate plugin ClassCastException in the Async call. Add Finagle plugin. Add test cases of H2 and struts. Add Armeria 0.98 plugin. Fix ElasticSearch plugin bug. Fix EHCache plugin bug. Fix a potential I/O leak. Support Oracle SID mode. Update Byte-buddy core. Performance tuning: replace AtomicInteger with AtomicIntegerFieldUpdater. Add AVRO plugin. Update to JDK 1.8 Optimize the ignore plugin. Enhance the gRPC plugin. Add Kotlin Coroutine plugin. Support HTTP parameter collection in Tomcat and SpringMVC plugin. Add @Tag annotation in the application toolkit. Move Lettuce into the default plugin list. Move Webflux into the default plugin list. Add HttpClient 3.x plugin.  OAP-Backend  Support InfluxDB as a new storage option. Add selector in the application.yml. Make the provider activation more flexible through System ENV. Support sub-topology map query. Support gRPC SSL. Support HTTP protocol for agent. Support Nginx LUA agent. Support skip the instance relationship analysis if some agents doesn\u0026rsquo;t have upstream address, currently for LUA agent. Support metrics entity name in the storage. Optional, default OFF. Merge the HOUR and DAY metrics into MINUTE in the ElasticSearch storage implementation. Reduce the payload for ElasticSearch server. Support change detection mechanism in DCS. Support Daily step in the ElasticSearch storage implementation for low traffic system. Provide profile export tool. Support alarm gRPC hook. Fix PHP language doesn\u0026rsquo;t show up on the instance page. Add more comments in the source codes. Add a new metrics type, multiple linears. Fix thread concurrency issue in the alarm core.  UI  Support custom topology definition.  Document  Add FAQ about python2 command required in the compiling. Add doc about new e2e framework. Add doc about the new profile feature. Powered-by page updated.  All issues and pull requests are here\n","excerpt":"7.0.0 Project  SkyWalking discards the supports of JDK 1.6 and 1.7 on the java agent side. The …","ref":"/docs/main/v9.4.0/en/changes/changes-7.0.0/","title":"7.0.0"},{"body":"7.0.0 Project  SkyWalking discards the supports of JDK 1.6 and 1.7 on the java agent side. The minimal requirement of JDK is JDK8. Support method performance profile. Provide new E2E test framework. Remove AppVeyor from the CI, use GitHub action only. Provide new plugin test tool. Don\u0026rsquo;t support SkyWalking v5 agent in-wire and out-wire protocol. v6 is required.  Java Agent  Add lazy injection API in the agent core. Support Servlet 2.5 in the Struts plugin. Fix RestTemplate plugin ClassCastException in the Async call. Add Finagle plugin. Add test cases of H2 and struts. Add Armeria 0.98 plugin. Fix ElasticSearch plugin bug. Fix EHCache plugin bug. Fix a potential I/O leak. Support Oracle SID mode. Update Byte-buddy core. Performance tuning: replace AtomicInteger with AtomicIntegerFieldUpdater. Add AVRO plugin. Update to JDK 1.8 Optimize the ignore plugin. Enhance the gRPC plugin. Add Kotlin Coroutine plugin. Support HTTP parameter collection in Tomcat and SpringMVC plugin. Add @Tag annotation in the application toolkit. Move Lettuce into the default plugin list. Move Webflux into the default plugin list. Add HttpClient 3.x plugin.  OAP-Backend  Support InfluxDB as a new storage option. Add selector in the application.yml. Make the provider activation more flexible through System ENV. Support sub-topology map query. Support gRPC SSL. Support HTTP protocol for agent. Support Nginx LUA agent. Support skip the instance relationship analysis if some agents doesn\u0026rsquo;t have upstream address, currently for LUA agent. Support metrics entity name in the storage. Optional, default OFF. Merge the HOUR and DAY metrics into MINUTE in the ElasticSearch storage implementation. Reduce the payload for ElasticSearch server. Support change detection mechanism in DCS. Support Daily step in the ElasticSearch storage implementation for low traffic system. Provide profile export tool. Support alarm gRPC hook. Fix PHP language doesn\u0026rsquo;t show up on the instance page. Add more comments in the source codes. Add a new metrics type, multiple linears. Fix thread concurrency issue in the alarm core.  UI  Support custom topology definition.  Document  Add FAQ about python2 command required in the compiling. Add doc about new e2e framework. Add doc about the new profile feature. Powered-by page updated.  All issues and pull requests are here\n","excerpt":"7.0.0 Project  SkyWalking discards the supports of JDK 1.6 and 1.7 on the java agent side. The …","ref":"/docs/main/v9.5.0/en/changes/changes-7.0.0/","title":"7.0.0"},{"body":"7.0.0 Project  SkyWalking discards the supports of JDK 1.6 and 1.7 on the java agent side. The minimal requirement of JDK is JDK8. Support method performance profile. Provide new E2E test framework. Remove AppVeyor from the CI, use GitHub action only. Provide new plugin test tool. Don\u0026rsquo;t support SkyWalking v5 agent in-wire and out-wire protocol. v6 is required.  Java Agent  Add lazy injection API in the agent core. Support Servlet 2.5 in the Struts plugin. Fix RestTemplate plugin ClassCastException in the Async call. Add Finagle plugin. Add test cases of H2 and struts. Add Armeria 0.98 plugin. Fix ElasticSearch plugin bug. Fix EHCache plugin bug. Fix a potential I/O leak. Support Oracle SID mode. Update Byte-buddy core. Performance tuning: replace AtomicInteger with AtomicIntegerFieldUpdater. Add AVRO plugin. Update to JDK 1.8 Optimize the ignore plugin. Enhance the gRPC plugin. Add Kotlin Coroutine plugin. Support HTTP parameter collection in Tomcat and SpringMVC plugin. Add @Tag annotation in the application toolkit. Move Lettuce into the default plugin list. Move Webflux into the default plugin list. Add HttpClient 3.x plugin.  OAP-Backend  Support InfluxDB as a new storage option. Add selector in the application.yml. Make the provider activation more flexible through System ENV. Support sub-topology map query. Support gRPC SSL. Support HTTP protocol for agent. Support Nginx LUA agent. Support skip the instance relationship analysis if some agents doesn\u0026rsquo;t have upstream address, currently for LUA agent. Support metrics entity name in the storage. Optional, default OFF. Merge the HOUR and DAY metrics into MINUTE in the ElasticSearch storage implementation. Reduce the payload for ElasticSearch server. Support change detection mechanism in DCS. Support Daily step in the ElasticSearch storage implementation for low traffic system. Provide profile export tool. Support alarm gRPC hook. Fix PHP language doesn\u0026rsquo;t show up on the instance page. Add more comments in the source codes. Add a new metrics type, multiple linears. Fix thread concurrency issue in the alarm core.  UI  Support custom topology definition.  Document  Add FAQ about python2 command required in the compiling. Add doc about new e2e framework. Add doc about the new profile feature. Powered-by page updated.  All issues and pull requests are here\n","excerpt":"7.0.0 Project  SkyWalking discards the supports of JDK 1.6 and 1.7 on the java agent side. The …","ref":"/docs/main/v9.6.0/en/changes/changes-7.0.0/","title":"7.0.0"},{"body":"7.0.0 Project  SkyWalking discards the supports of JDK 1.6 and 1.7 on the java agent side. The minimal requirement of JDK is JDK8. Support method performance profile. Provide new E2E test framework. Remove AppVeyor from the CI, use GitHub action only. Provide new plugin test tool. Don\u0026rsquo;t support SkyWalking v5 agent in-wire and out-wire protocol. v6 is required.  Java Agent  Add lazy injection API in the agent core. Support Servlet 2.5 in the Struts plugin. Fix RestTemplate plugin ClassCastException in the Async call. Add Finagle plugin. Add test cases of H2 and struts. Add Armeria 0.98 plugin. Fix ElasticSearch plugin bug. Fix EHCache plugin bug. Fix a potential I/O leak. Support Oracle SID mode. Update Byte-buddy core. Performance tuning: replace AtomicInteger with AtomicIntegerFieldUpdater. Add AVRO plugin. Update to JDK 1.8 Optimize the ignore plugin. Enhance the gRPC plugin. Add Kotlin Coroutine plugin. Support HTTP parameter collection in Tomcat and SpringMVC plugin. Add @Tag annotation in the application toolkit. Move Lettuce into the default plugin list. Move Webflux into the default plugin list. Add HttpClient 3.x plugin.  OAP-Backend  Support InfluxDB as a new storage option. Add selector in the application.yml. Make the provider activation more flexible through System ENV. Support sub-topology map query. Support gRPC SSL. Support HTTP protocol for agent. Support Nginx LUA agent. Support skip the instance relationship analysis if some agents doesn\u0026rsquo;t have upstream address, currently for LUA agent. Support metrics entity name in the storage. Optional, default OFF. Merge the HOUR and DAY metrics into MINUTE in the ElasticSearch storage implementation. Reduce the payload for ElasticSearch server. Support change detection mechanism in DCS. Support Daily step in the ElasticSearch storage implementation for low traffic system. Provide profile export tool. Support alarm gRPC hook. Fix PHP language doesn\u0026rsquo;t show up on the instance page. Add more comments in the source codes. Add a new metrics type, multiple linears. Fix thread concurrency issue in the alarm core.  UI  Support custom topology definition.  Document  Add FAQ about python2 command required in the compiling. Add doc about new e2e framework. Add doc about the new profile feature. Powered-by page updated.  All issues and pull requests are here\n","excerpt":"7.0.0 Project  SkyWalking discards the supports of JDK 1.6 and 1.7 on the java agent side. The …","ref":"/docs/main/v9.7.0/en/changes/changes-7.0.0/","title":"7.0.0"},{"body":"8.0.0 Project  v3 protocol is added and implemented. All previous releases are incompatible with 8.x releases. Service, Instance, Endpoint register mechanism and inventory storage entities are removed. New GraphQL query protocol is provided, the legacy protocol is still supported(plan to remove at the end of this year). Support Prometheus network protocol. Metrics in Prometheus format could be transferred into SkyWalking. Python agent provided. All inventory caches have been removed. Apache ShardingSphere(4.1.0, 4.1.1) agent plugin provided.  Java Agent  Add MariaDB plugin. Vert.x plugin enhancement. More cases are covered. Support v3 extension header. Fix ElasticSearch 5.x plugin TransportClient error. Support Correlation protocol v1. Fix Finagle plugin bug, in processing Noop Span. Make CommandService daemon to avoid blocking target application shutting down gracefully. Refactor spring cloud gateway plugin and support tracing spring cloud gateway 2.2.x  OAP-Backend  Support meter system for Prometheus adoption. In future releases, we will add native meter APIs and MicroMeter(Sleuth) system. Support endpoint grouping. Add SuperDataSet annotation for storage entity. Add superDatasetIndexShardsFactor in the ElasticSearch storage, to provide more shards for @SuperDataSet annotated entites. Typically TraceSegment. Support alarm settings for relationship of service, instance, and endpoint level metrics. Support alarm settings for database(conjecture node in tracing scenario). Data Model could be added in the runtime, don\u0026rsquo;t depend on the bootstrap sequence anymore. Reduce the memory cost, due to no inventory caches. No buffer files in tracing and service mesh cases. New ReadWriteSafe cache implementation. Simplify codes. Provide default way for metrics query, even the metrics doesn\u0026rsquo;t exist. New GraphQL query protocol is provided. Support the metrics type query. Set up length rule of service, instance, and endpoint. Adjust the default jks for ElasticSearch to empty. Fix Apdex function integer overflow issue. Fix profile storage issue. Fix TTL issue. Fix H2 column type bug. Add JRE 8-14 test for the backend.  UI  UI dashboard is 100% configurable to adopt new metrics definited in the backend.  Document  Add v8 upgrade document. Make the coverage accurate including UT and e2e tests. Add miss doc about collecting parameters in the profiled traces.  CVE  Fix SQL Injection vulnerability in H2/MySQL implementation. Upgrade Nacos to avoid the FastJson CVE in high frequency. Upgrade jasckson-databind to 2.9.10.  All issues and pull requests are here\n","excerpt":"8.0.0 Project  v3 protocol is added and implemented. All previous releases are incompatible with 8.x …","ref":"/docs/main/latest/en/changes/changes-8.0.0/","title":"8.0.0"},{"body":"8.0.0 Project  v3 protocol is added and implemented. All previous releases are incompatible with 8.x releases. Service, Instance, Endpoint register mechanism and inventory storage entities are removed. New GraphQL query protocol is provided, the legacy protocol is still supported(plan to remove at the end of this year). Support Prometheus network protocol. Metrics in Prometheus format could be transferred into SkyWalking. Python agent provided. All inventory caches have been removed. Apache ShardingSphere(4.1.0, 4.1.1) agent plugin provided.  Java Agent  Add MariaDB plugin. Vert.x plugin enhancement. More cases are covered. Support v3 extension header. Fix ElasticSearch 5.x plugin TransportClient error. Support Correlation protocol v1. Fix Finagle plugin bug, in processing Noop Span. Make CommandService daemon to avoid blocking target application shutting down gracefully. Refactor spring cloud gateway plugin and support tracing spring cloud gateway 2.2.x  OAP-Backend  Support meter system for Prometheus adoption. In future releases, we will add native meter APIs and MicroMeter(Sleuth) system. Support endpoint grouping. Add SuperDataSet annotation for storage entity. Add superDatasetIndexShardsFactor in the ElasticSearch storage, to provide more shards for @SuperDataSet annotated entites. Typically TraceSegment. Support alarm settings for relationship of service, instance, and endpoint level metrics. Support alarm settings for database(conjecture node in tracing scenario). Data Model could be added in the runtime, don\u0026rsquo;t depend on the bootstrap sequence anymore. Reduce the memory cost, due to no inventory caches. No buffer files in tracing and service mesh cases. New ReadWriteSafe cache implementation. Simplify codes. Provide default way for metrics query, even the metrics doesn\u0026rsquo;t exist. New GraphQL query protocol is provided. Support the metrics type query. Set up length rule of service, instance, and endpoint. Adjust the default jks for ElasticSearch to empty. Fix Apdex function integer overflow issue. Fix profile storage issue. Fix TTL issue. Fix H2 column type bug. Add JRE 8-14 test for the backend.  UI  UI dashboard is 100% configurable to adopt new metrics definited in the backend.  Document  Add v8 upgrade document. Make the coverage accurate including UT and e2e tests. Add miss doc about collecting parameters in the profiled traces.  CVE  Fix SQL Injection vulnerability in H2/MySQL implementation. Upgrade Nacos to avoid the FastJson CVE in high frequency. Upgrade jasckson-databind to 2.9.10.  All issues and pull requests are here\n","excerpt":"8.0.0 Project  v3 protocol is added and implemented. All previous releases are incompatible with 8.x …","ref":"/docs/main/next/en/changes/changes-8.0.0/","title":"8.0.0"},{"body":"8.0.0 Project  v3 protocol is added and implemented. All previous releases are incompatible with 8.x releases. Service, Instance, Endpoint register mechanism and inventory storage entities are removed. New GraphQL query protocol is provided, the legacy protocol is still supported(plan to remove at the end of this year). Support Prometheus network protocol. Metrics in Prometheus format could be transferred into SkyWalking. Python agent provided. All inventory caches have been removed. Apache ShardingSphere(4.1.0, 4.1.1) agent plugin provided.  Java Agent  Add MariaDB plugin. Vert.x plugin enhancement. More cases are covered. Support v3 extension header. Fix ElasticSearch 5.x plugin TransportClient error. Support Correlation protocol v1. Fix Finagle plugin bug, in processing Noop Span. Make CommandService daemon to avoid blocking target application shutting down gracefully. Refactor spring cloud gateway plugin and support tracing spring cloud gateway 2.2.x  OAP-Backend  Support meter system for Prometheus adoption. In future releases, we will add native meter APIs and MicroMeter(Sleuth) system. Support endpoint grouping. Add SuperDataSet annotation for storage entity. Add superDatasetIndexShardsFactor in the ElasticSearch storage, to provide more shards for @SuperDataSet annotated entites. Typically TraceSegment. Support alarm settings for relationship of service, instance, and endpoint level metrics. Support alarm settings for database(conjecture node in tracing scenario). Data Model could be added in the runtime, don\u0026rsquo;t depend on the bootstrap sequence anymore. Reduce the memory cost, due to no inventory caches. No buffer files in tracing and service mesh cases. New ReadWriteSafe cache implementation. Simplify codes. Provide default way for metrics query, even the metrics doesn\u0026rsquo;t exist. New GraphQL query protocol is provided. Support the metrics type query. Set up length rule of service, instance, and endpoint. Adjust the default jks for ElasticSearch to empty. Fix Apdex function integer overflow issue. Fix profile storage issue. Fix TTL issue. Fix H2 column type bug. Add JRE 8-14 test for the backend.  UI  UI dashboard is 100% configurable to adopt new metrics definited in the backend.  Document  Add v8 upgrade document. Make the coverage accurate including UT and e2e tests. Add miss doc about collecting parameters in the profiled traces.  CVE  Fix SQL Injection vulnerability in H2/MySQL implementation. Upgrade Nacos to avoid the FastJson CVE in high frequency. Upgrade jasckson-databind to 2.9.10.  All issues and pull requests are here\n","excerpt":"8.0.0 Project  v3 protocol is added and implemented. All previous releases are incompatible with 8.x …","ref":"/docs/main/v9.1.0/en/changes/changes-8.0.0/","title":"8.0.0"},{"body":"8.0.0 Project  v3 protocol is added and implemented. All previous releases are incompatible with 8.x releases. Service, Instance, Endpoint register mechanism and inventory storage entities are removed. New GraphQL query protocol is provided, the legacy protocol is still supported(plan to remove at the end of this year). Support Prometheus network protocol. Metrics in Prometheus format could be transferred into SkyWalking. Python agent provided. All inventory caches have been removed. Apache ShardingSphere(4.1.0, 4.1.1) agent plugin provided.  Java Agent  Add MariaDB plugin. Vert.x plugin enhancement. More cases are covered. Support v3 extension header. Fix ElasticSearch 5.x plugin TransportClient error. Support Correlation protocol v1. Fix Finagle plugin bug, in processing Noop Span. Make CommandService daemon to avoid blocking target application shutting down gracefully. Refactor spring cloud gateway plugin and support tracing spring cloud gateway 2.2.x  OAP-Backend  Support meter system for Prometheus adoption. In future releases, we will add native meter APIs and MicroMeter(Sleuth) system. Support endpoint grouping. Add SuperDataSet annotation for storage entity. Add superDatasetIndexShardsFactor in the ElasticSearch storage, to provide more shards for @SuperDataSet annotated entites. Typically TraceSegment. Support alarm settings for relationship of service, instance, and endpoint level metrics. Support alarm settings for database(conjecture node in tracing scenario). Data Model could be added in the runtime, don\u0026rsquo;t depend on the bootstrap sequence anymore. Reduce the memory cost, due to no inventory caches. No buffer files in tracing and service mesh cases. New ReadWriteSafe cache implementation. Simplify codes. Provide default way for metrics query, even the metrics doesn\u0026rsquo;t exist. New GraphQL query protocol is provided. Support the metrics type query. Set up length rule of service, instance, and endpoint. Adjust the default jks for ElasticSearch to empty. Fix Apdex function integer overflow issue. Fix profile storage issue. Fix TTL issue. Fix H2 column type bug. Add JRE 8-14 test for the backend.  UI  UI dashboard is 100% configurable to adopt new metrics definited in the backend.  Document  Add v8 upgrade document. Make the coverage accurate including UT and e2e tests. Add miss doc about collecting parameters in the profiled traces.  CVE  Fix SQL Injection vulnerability in H2/MySQL implementation. Upgrade Nacos to avoid the FastJson CVE in high frequency. Upgrade jasckson-databind to 2.9.10.  All issues and pull requests are here\n","excerpt":"8.0.0 Project  v3 protocol is added and implemented. All previous releases are incompatible with 8.x …","ref":"/docs/main/v9.2.0/en/changes/changes-8.0.0/","title":"8.0.0"},{"body":"8.0.0 Project  v3 protocol is added and implemented. All previous releases are incompatible with 8.x releases. Service, Instance, Endpoint register mechanism and inventory storage entities are removed. New GraphQL query protocol is provided, the legacy protocol is still supported(plan to remove at the end of this year). Support Prometheus network protocol. Metrics in Prometheus format could be transferred into SkyWalking. Python agent provided. All inventory caches have been removed. Apache ShardingSphere(4.1.0, 4.1.1) agent plugin provided.  Java Agent  Add MariaDB plugin. Vert.x plugin enhancement. More cases are covered. Support v3 extension header. Fix ElasticSearch 5.x plugin TransportClient error. Support Correlation protocol v1. Fix Finagle plugin bug, in processing Noop Span. Make CommandService daemon to avoid blocking target application shutting down gracefully. Refactor spring cloud gateway plugin and support tracing spring cloud gateway 2.2.x  OAP-Backend  Support meter system for Prometheus adoption. In future releases, we will add native meter APIs and MicroMeter(Sleuth) system. Support endpoint grouping. Add SuperDataSet annotation for storage entity. Add superDatasetIndexShardsFactor in the ElasticSearch storage, to provide more shards for @SuperDataSet annotated entites. Typically TraceSegment. Support alarm settings for relationship of service, instance, and endpoint level metrics. Support alarm settings for database(conjecture node in tracing scenario). Data Model could be added in the runtime, don\u0026rsquo;t depend on the bootstrap sequence anymore. Reduce the memory cost, due to no inventory caches. No buffer files in tracing and service mesh cases. New ReadWriteSafe cache implementation. Simplify codes. Provide default way for metrics query, even the metrics doesn\u0026rsquo;t exist. New GraphQL query protocol is provided. Support the metrics type query. Set up length rule of service, instance, and endpoint. Adjust the default jks for ElasticSearch to empty. Fix Apdex function integer overflow issue. Fix profile storage issue. Fix TTL issue. Fix H2 column type bug. Add JRE 8-14 test for the backend.  UI  UI dashboard is 100% configurable to adopt new metrics definited in the backend.  Document  Add v8 upgrade document. Make the coverage accurate including UT and e2e tests. Add miss doc about collecting parameters in the profiled traces.  CVE  Fix SQL Injection vulnerability in H2/MySQL implementation. Upgrade Nacos to avoid the FastJson CVE in high frequency. Upgrade jasckson-databind to 2.9.10.  All issues and pull requests are here\n","excerpt":"8.0.0 Project  v3 protocol is added and implemented. All previous releases are incompatible with 8.x …","ref":"/docs/main/v9.3.0/en/changes/changes-8.0.0/","title":"8.0.0"},{"body":"8.0.0 Project  v3 protocol is added and implemented. All previous releases are incompatible with 8.x releases. Service, Instance, Endpoint register mechanism and inventory storage entities are removed. New GraphQL query protocol is provided, the legacy protocol is still supported(plan to remove at the end of this year). Support Prometheus network protocol. Metrics in Prometheus format could be transferred into SkyWalking. Python agent provided. All inventory caches have been removed. Apache ShardingSphere(4.1.0, 4.1.1) agent plugin provided.  Java Agent  Add MariaDB plugin. Vert.x plugin enhancement. More cases are covered. Support v3 extension header. Fix ElasticSearch 5.x plugin TransportClient error. Support Correlation protocol v1. Fix Finagle plugin bug, in processing Noop Span. Make CommandService daemon to avoid blocking target application shutting down gracefully. Refactor spring cloud gateway plugin and support tracing spring cloud gateway 2.2.x  OAP-Backend  Support meter system for Prometheus adoption. In future releases, we will add native meter APIs and MicroMeter(Sleuth) system. Support endpoint grouping. Add SuperDataSet annotation for storage entity. Add superDatasetIndexShardsFactor in the ElasticSearch storage, to provide more shards for @SuperDataSet annotated entites. Typically TraceSegment. Support alarm settings for relationship of service, instance, and endpoint level metrics. Support alarm settings for database(conjecture node in tracing scenario). Data Model could be added in the runtime, don\u0026rsquo;t depend on the bootstrap sequence anymore. Reduce the memory cost, due to no inventory caches. No buffer files in tracing and service mesh cases. New ReadWriteSafe cache implementation. Simplify codes. Provide default way for metrics query, even the metrics doesn\u0026rsquo;t exist. New GraphQL query protocol is provided. Support the metrics type query. Set up length rule of service, instance, and endpoint. Adjust the default jks for ElasticSearch to empty. Fix Apdex function integer overflow issue. Fix profile storage issue. Fix TTL issue. Fix H2 column type bug. Add JRE 8-14 test for the backend.  UI  UI dashboard is 100% configurable to adopt new metrics definited in the backend.  Document  Add v8 upgrade document. Make the coverage accurate including UT and e2e tests. Add miss doc about collecting parameters in the profiled traces.  CVE  Fix SQL Injection vulnerability in H2/MySQL implementation. Upgrade Nacos to avoid the FastJson CVE in high frequency. Upgrade jasckson-databind to 2.9.10.  All issues and pull requests are here\n","excerpt":"8.0.0 Project  v3 protocol is added and implemented. All previous releases are incompatible with 8.x …","ref":"/docs/main/v9.4.0/en/changes/changes-8.0.0/","title":"8.0.0"},{"body":"8.0.0 Project  v3 protocol is added and implemented. All previous releases are incompatible with 8.x releases. Service, Instance, Endpoint register mechanism and inventory storage entities are removed. New GraphQL query protocol is provided, the legacy protocol is still supported(plan to remove at the end of this year). Support Prometheus network protocol. Metrics in Prometheus format could be transferred into SkyWalking. Python agent provided. All inventory caches have been removed. Apache ShardingSphere(4.1.0, 4.1.1) agent plugin provided.  Java Agent  Add MariaDB plugin. Vert.x plugin enhancement. More cases are covered. Support v3 extension header. Fix ElasticSearch 5.x plugin TransportClient error. Support Correlation protocol v1. Fix Finagle plugin bug, in processing Noop Span. Make CommandService daemon to avoid blocking target application shutting down gracefully. Refactor spring cloud gateway plugin and support tracing spring cloud gateway 2.2.x  OAP-Backend  Support meter system for Prometheus adoption. In future releases, we will add native meter APIs and MicroMeter(Sleuth) system. Support endpoint grouping. Add SuperDataSet annotation for storage entity. Add superDatasetIndexShardsFactor in the ElasticSearch storage, to provide more shards for @SuperDataSet annotated entites. Typically TraceSegment. Support alarm settings for relationship of service, instance, and endpoint level metrics. Support alarm settings for database(conjecture node in tracing scenario). Data Model could be added in the runtime, don\u0026rsquo;t depend on the bootstrap sequence anymore. Reduce the memory cost, due to no inventory caches. No buffer files in tracing and service mesh cases. New ReadWriteSafe cache implementation. Simplify codes. Provide default way for metrics query, even the metrics doesn\u0026rsquo;t exist. New GraphQL query protocol is provided. Support the metrics type query. Set up length rule of service, instance, and endpoint. Adjust the default jks for ElasticSearch to empty. Fix Apdex function integer overflow issue. Fix profile storage issue. Fix TTL issue. Fix H2 column type bug. Add JRE 8-14 test for the backend.  UI  UI dashboard is 100% configurable to adopt new metrics definited in the backend.  Document  Add v8 upgrade document. Make the coverage accurate including UT and e2e tests. Add miss doc about collecting parameters in the profiled traces.  CVE  Fix SQL Injection vulnerability in H2/MySQL implementation. Upgrade Nacos to avoid the FastJson CVE in high frequency. Upgrade jasckson-databind to 2.9.10.  All issues and pull requests are here\n","excerpt":"8.0.0 Project  v3 protocol is added and implemented. All previous releases are incompatible with 8.x …","ref":"/docs/main/v9.5.0/en/changes/changes-8.0.0/","title":"8.0.0"},{"body":"8.0.0 Project  v3 protocol is added and implemented. All previous releases are incompatible with 8.x releases. Service, Instance, Endpoint register mechanism and inventory storage entities are removed. New GraphQL query protocol is provided, the legacy protocol is still supported(plan to remove at the end of this year). Support Prometheus network protocol. Metrics in Prometheus format could be transferred into SkyWalking. Python agent provided. All inventory caches have been removed. Apache ShardingSphere(4.1.0, 4.1.1) agent plugin provided.  Java Agent  Add MariaDB plugin. Vert.x plugin enhancement. More cases are covered. Support v3 extension header. Fix ElasticSearch 5.x plugin TransportClient error. Support Correlation protocol v1. Fix Finagle plugin bug, in processing Noop Span. Make CommandService daemon to avoid blocking target application shutting down gracefully. Refactor spring cloud gateway plugin and support tracing spring cloud gateway 2.2.x  OAP-Backend  Support meter system for Prometheus adoption. In future releases, we will add native meter APIs and MicroMeter(Sleuth) system. Support endpoint grouping. Add SuperDataSet annotation for storage entity. Add superDatasetIndexShardsFactor in the ElasticSearch storage, to provide more shards for @SuperDataSet annotated entites. Typically TraceSegment. Support alarm settings for relationship of service, instance, and endpoint level metrics. Support alarm settings for database(conjecture node in tracing scenario). Data Model could be added in the runtime, don\u0026rsquo;t depend on the bootstrap sequence anymore. Reduce the memory cost, due to no inventory caches. No buffer files in tracing and service mesh cases. New ReadWriteSafe cache implementation. Simplify codes. Provide default way for metrics query, even the metrics doesn\u0026rsquo;t exist. New GraphQL query protocol is provided. Support the metrics type query. Set up length rule of service, instance, and endpoint. Adjust the default jks for ElasticSearch to empty. Fix Apdex function integer overflow issue. Fix profile storage issue. Fix TTL issue. Fix H2 column type bug. Add JRE 8-14 test for the backend.  UI  UI dashboard is 100% configurable to adopt new metrics definited in the backend.  Document  Add v8 upgrade document. Make the coverage accurate including UT and e2e tests. Add miss doc about collecting parameters in the profiled traces.  CVE  Fix SQL Injection vulnerability in H2/MySQL implementation. Upgrade Nacos to avoid the FastJson CVE in high frequency. Upgrade jasckson-databind to 2.9.10.  All issues and pull requests are here\n","excerpt":"8.0.0 Project  v3 protocol is added and implemented. All previous releases are incompatible with 8.x …","ref":"/docs/main/v9.6.0/en/changes/changes-8.0.0/","title":"8.0.0"},{"body":"8.0.0 Project  v3 protocol is added and implemented. All previous releases are incompatible with 8.x releases. Service, Instance, Endpoint register mechanism and inventory storage entities are removed. New GraphQL query protocol is provided, the legacy protocol is still supported(plan to remove at the end of this year). Support Prometheus network protocol. Metrics in Prometheus format could be transferred into SkyWalking. Python agent provided. All inventory caches have been removed. Apache ShardingSphere(4.1.0, 4.1.1) agent plugin provided.  Java Agent  Add MariaDB plugin. Vert.x plugin enhancement. More cases are covered. Support v3 extension header. Fix ElasticSearch 5.x plugin TransportClient error. Support Correlation protocol v1. Fix Finagle plugin bug, in processing Noop Span. Make CommandService daemon to avoid blocking target application shutting down gracefully. Refactor spring cloud gateway plugin and support tracing spring cloud gateway 2.2.x  OAP-Backend  Support meter system for Prometheus adoption. In future releases, we will add native meter APIs and MicroMeter(Sleuth) system. Support endpoint grouping. Add SuperDataSet annotation for storage entity. Add superDatasetIndexShardsFactor in the ElasticSearch storage, to provide more shards for @SuperDataSet annotated entites. Typically TraceSegment. Support alarm settings for relationship of service, instance, and endpoint level metrics. Support alarm settings for database(conjecture node in tracing scenario). Data Model could be added in the runtime, don\u0026rsquo;t depend on the bootstrap sequence anymore. Reduce the memory cost, due to no inventory caches. No buffer files in tracing and service mesh cases. New ReadWriteSafe cache implementation. Simplify codes. Provide default way for metrics query, even the metrics doesn\u0026rsquo;t exist. New GraphQL query protocol is provided. Support the metrics type query. Set up length rule of service, instance, and endpoint. Adjust the default jks for ElasticSearch to empty. Fix Apdex function integer overflow issue. Fix profile storage issue. Fix TTL issue. Fix H2 column type bug. Add JRE 8-14 test for the backend.  UI  UI dashboard is 100% configurable to adopt new metrics definited in the backend.  Document  Add v8 upgrade document. Make the coverage accurate including UT and e2e tests. Add miss doc about collecting parameters in the profiled traces.  CVE  Fix SQL Injection vulnerability in H2/MySQL implementation. Upgrade Nacos to avoid the FastJson CVE in high frequency. Upgrade jasckson-databind to 2.9.10.  All issues and pull requests are here\n","excerpt":"8.0.0 Project  v3 protocol is added and implemented. All previous releases are incompatible with 8.x …","ref":"/docs/main/v9.7.0/en/changes/changes-8.0.0/","title":"8.0.0"},{"body":"8.0.1 OAP-Backend  Fix no-init mode is not working in ElasticSearch storage.  ","excerpt":"8.0.1 OAP-Backend  Fix no-init mode is not working in ElasticSearch storage.  ","ref":"/docs/main/latest/en/changes/changes-8.0.1/","title":"8.0.1"},{"body":"8.0.1 OAP-Backend  Fix no-init mode is not working in ElasticSearch storage.  ","excerpt":"8.0.1 OAP-Backend  Fix no-init mode is not working in ElasticSearch storage.  ","ref":"/docs/main/next/en/changes/changes-8.0.1/","title":"8.0.1"},{"body":"8.0.1 OAP-Backend  Fix no-init mode is not working in ElasticSearch storage.  ","excerpt":"8.0.1 OAP-Backend  Fix no-init mode is not working in ElasticSearch storage.  ","ref":"/docs/main/v9.1.0/en/changes/changes-8.0.1/","title":"8.0.1"},{"body":"8.0.1 OAP-Backend  Fix no-init mode is not working in ElasticSearch storage.  ","excerpt":"8.0.1 OAP-Backend  Fix no-init mode is not working in ElasticSearch storage.  ","ref":"/docs/main/v9.2.0/en/changes/changes-8.0.1/","title":"8.0.1"},{"body":"8.0.1 OAP-Backend  Fix no-init mode is not working in ElasticSearch storage.  ","excerpt":"8.0.1 OAP-Backend  Fix no-init mode is not working in ElasticSearch storage.  ","ref":"/docs/main/v9.3.0/en/changes/changes-8.0.1/","title":"8.0.1"},{"body":"8.0.1 OAP-Backend  Fix no-init mode is not working in ElasticSearch storage.  ","excerpt":"8.0.1 OAP-Backend  Fix no-init mode is not working in ElasticSearch storage.  ","ref":"/docs/main/v9.4.0/en/changes/changes-8.0.1/","title":"8.0.1"},{"body":"8.0.1 OAP-Backend  Fix no-init mode is not working in ElasticSearch storage.  ","excerpt":"8.0.1 OAP-Backend  Fix no-init mode is not working in ElasticSearch storage.  ","ref":"/docs/main/v9.5.0/en/changes/changes-8.0.1/","title":"8.0.1"},{"body":"8.0.1 OAP-Backend  Fix no-init mode is not working in ElasticSearch storage.  ","excerpt":"8.0.1 OAP-Backend  Fix no-init mode is not working in ElasticSearch storage.  ","ref":"/docs/main/v9.6.0/en/changes/changes-8.0.1/","title":"8.0.1"},{"body":"8.0.1 OAP-Backend  Fix no-init mode is not working in ElasticSearch storage.  ","excerpt":"8.0.1 OAP-Backend  Fix no-init mode is not working in ElasticSearch storage.  ","ref":"/docs/main/v9.7.0/en/changes/changes-8.0.1/","title":"8.0.1"},{"body":"8.1.0 Project  Support Kafka as an optional trace, JVM metrics, profiling snapshots and meter system data transport layer. Support Meter system, including the native metrics APIs and the Spring Sleuth adoption. Support JVM thread metrics.  Java Agent  [Core] Fix the concurrency access bug in the Concurrency ClassLoader Case. [Core] Separate the config of the plugins from the core level. [Core] Support instrumented class cached in memory or file, to be compatible with other agents, such as Arthas. Add logic endpoint concept. Could analysis any span or tags flagged by the logic endpoint. Add Spring annotation component name for UI visualization only. Add support to trace Call procedures in MySQL plugin. Support GraphQL plugin. Support Quasar fiber plugin. Support InfluxDB java client plugin. Support brpc java plugin Support ConsoleAppender in the logback v1 plugin. Enhance vert.x endpoint names. Optimize the code to prevent mongo statements from being too long. Fix WebFlux plugin concurrency access bug. Fix ShardingSphere plugins internal conflicts. Fix duplicated Spring MVC endpoint. Fix lettuce plugin sometimes trace doesn‘t show span layer. Fix @Tag returnedObject bug.  OAP-Backend  Support Jetty Server advanced configurations. Support label based filter in the prometheus fetcher and OpenCensus receiver. Support using k8s configmap as the configuration center. Support OAP health check, and storage module health check. Support sampling rate in the dynamic configuration. Add endpoint_relation_sla and endpoint_relation_percentile for endpoint relationship metrics. Add components for Python plugins, including Kafka, Tornado, Redis, Django, PyMysql. Add components for Golang SDK. Add Nacos 1.3.1 back as an optional cluster coordinator and dynamic configuration center. Enhance the metrics query for ElasticSearch implementation to increase the stability. Reduce the length of storage entity names in the self-observability for MySQL and TiDB storage. Fix labels are missing in Prometheus analysis context. Fix column length issue in MySQL/TiDB storage. Fix no data in 2nd level aggregation in self-observability. Fix searchService bug in ES implementation. Fix wrong validation of endpoint relation entity query. Fix the bug caused by the OAL debug flag. Fix endpoint dependency bug in MQ and uninstrumented proxy cases. Fix time bucket conversion issue in the InfluxDB storage implementation. Update k8s client to 8.0.0  UI  Support endpoint dependency graph. Support x-scroll of trace/profile page Fix database selector issue. Add the bar chart in the UI templates.  Document  Update the user logo wall. Add backend configuration vocabulary document. Add agent installation doc for Tomcat9 on Windows. Add istioctl ALS commands for the document. Fix TTL documentation. Add FAQ doc about thread instrumentation.  CVE  Fix fuzzy query sql injection in the MySQL/TiDB storage.  All issues and pull requests are here\n","excerpt":"8.1.0 Project  Support Kafka as an optional trace, JVM metrics, profiling snapshots and meter system …","ref":"/docs/main/latest/en/changes/changes-8.1.0/","title":"8.1.0"},{"body":"8.1.0 Project  Support Kafka as an optional trace, JVM metrics, profiling snapshots and meter system data transport layer. Support Meter system, including the native metrics APIs and the Spring Sleuth adoption. Support JVM thread metrics.  Java Agent  [Core] Fix the concurrency access bug in the Concurrency ClassLoader Case. [Core] Separate the config of the plugins from the core level. [Core] Support instrumented class cached in memory or file, to be compatible with other agents, such as Arthas. Add logic endpoint concept. Could analysis any span or tags flagged by the logic endpoint. Add Spring annotation component name for UI visualization only. Add support to trace Call procedures in MySQL plugin. Support GraphQL plugin. Support Quasar fiber plugin. Support InfluxDB java client plugin. Support brpc java plugin Support ConsoleAppender in the logback v1 plugin. Enhance vert.x endpoint names. Optimize the code to prevent mongo statements from being too long. Fix WebFlux plugin concurrency access bug. Fix ShardingSphere plugins internal conflicts. Fix duplicated Spring MVC endpoint. Fix lettuce plugin sometimes trace doesn‘t show span layer. Fix @Tag returnedObject bug.  OAP-Backend  Support Jetty Server advanced configurations. Support label based filter in the prometheus fetcher and OpenCensus receiver. Support using k8s configmap as the configuration center. Support OAP health check, and storage module health check. Support sampling rate in the dynamic configuration. Add endpoint_relation_sla and endpoint_relation_percentile for endpoint relationship metrics. Add components for Python plugins, including Kafka, Tornado, Redis, Django, PyMysql. Add components for Golang SDK. Add Nacos 1.3.1 back as an optional cluster coordinator and dynamic configuration center. Enhance the metrics query for ElasticSearch implementation to increase the stability. Reduce the length of storage entity names in the self-observability for MySQL and TiDB storage. Fix labels are missing in Prometheus analysis context. Fix column length issue in MySQL/TiDB storage. Fix no data in 2nd level aggregation in self-observability. Fix searchService bug in ES implementation. Fix wrong validation of endpoint relation entity query. Fix the bug caused by the OAL debug flag. Fix endpoint dependency bug in MQ and uninstrumented proxy cases. Fix time bucket conversion issue in the InfluxDB storage implementation. Update k8s client to 8.0.0  UI  Support endpoint dependency graph. Support x-scroll of trace/profile page Fix database selector issue. Add the bar chart in the UI templates.  Document  Update the user logo wall. Add backend configuration vocabulary document. Add agent installation doc for Tomcat9 on Windows. Add istioctl ALS commands for the document. Fix TTL documentation. Add FAQ doc about thread instrumentation.  CVE  Fix fuzzy query sql injection in the MySQL/TiDB storage.  All issues and pull requests are here\n","excerpt":"8.1.0 Project  Support Kafka as an optional trace, JVM metrics, profiling snapshots and meter system …","ref":"/docs/main/next/en/changes/changes-8.1.0/","title":"8.1.0"},{"body":"8.1.0 Project  Support Kafka as an optional trace, JVM metrics, profiling snapshots and meter system data transport layer. Support Meter system, including the native metrics APIs and the Spring Sleuth adoption. Support JVM thread metrics.  Java Agent  [Core] Fix the concurrency access bug in the Concurrency ClassLoader Case. [Core] Separate the config of the plugins from the core level. [Core] Support instrumented class cached in memory or file, to be compatible with other agents, such as Arthas. Add logic endpoint concept. Could analysis any span or tags flagged by the logic endpoint. Add Spring annotation component name for UI visualization only. Add support to trace Call procedures in MySQL plugin. Support GraphQL plugin. Support Quasar fiber plugin. Support InfluxDB java client plugin. Support brpc java plugin Support ConsoleAppender in the logback v1 plugin. Enhance vert.x endpoint names. Optimize the code to prevent mongo statements from being too long. Fix WebFlux plugin concurrency access bug. Fix ShardingSphere plugins internal conflicts. Fix duplicated Spring MVC endpoint. Fix lettuce plugin sometimes trace doesn‘t show span layer. Fix @Tag returnedObject bug.  OAP-Backend  Support Jetty Server advanced configurations. Support label based filter in the prometheus fetcher and OpenCensus receiver. Support using k8s configmap as the configuration center. Support OAP health check, and storage module health check. Support sampling rate in the dynamic configuration. Add endpoint_relation_sla and endpoint_relation_percentile for endpoint relationship metrics. Add components for Python plugins, including Kafka, Tornado, Redis, Django, PyMysql. Add components for Golang SDK. Add Nacos 1.3.1 back as an optional cluster coordinator and dynamic configuration center. Enhance the metrics query for ElasticSearch implementation to increase the stability. Reduce the length of storage entity names in the self-observability for MySQL and TiDB storage. Fix labels are missing in Prometheus analysis context. Fix column length issue in MySQL/TiDB storage. Fix no data in 2nd level aggregation in self-observability. Fix searchService bug in ES implementation. Fix wrong validation of endpoint relation entity query. Fix the bug caused by the OAL debug flag. Fix endpoint dependency bug in MQ and uninstrumented proxy cases. Fix time bucket conversion issue in the InfluxDB storage implementation. Update k8s client to 8.0.0  UI  Support endpoint dependency graph. Support x-scroll of trace/profile page Fix database selector issue. Add the bar chart in the UI templates.  Document  Update the user logo wall. Add backend configuration vocabulary document. Add agent installation doc for Tomcat9 on Windows. Add istioctl ALS commands for the document. Fix TTL documentation. Add FAQ doc about thread instrumentation.  CVE  Fix fuzzy query sql injection in the MySQL/TiDB storage.  All issues and pull requests are here\n","excerpt":"8.1.0 Project  Support Kafka as an optional trace, JVM metrics, profiling snapshots and meter system …","ref":"/docs/main/v9.1.0/en/changes/changes-8.1.0/","title":"8.1.0"},{"body":"8.1.0 Project  Support Kafka as an optional trace, JVM metrics, profiling snapshots and meter system data transport layer. Support Meter system, including the native metrics APIs and the Spring Sleuth adoption. Support JVM thread metrics.  Java Agent  [Core] Fix the concurrency access bug in the Concurrency ClassLoader Case. [Core] Separate the config of the plugins from the core level. [Core] Support instrumented class cached in memory or file, to be compatible with other agents, such as Arthas. Add logic endpoint concept. Could analysis any span or tags flagged by the logic endpoint. Add Spring annotation component name for UI visualization only. Add support to trace Call procedures in MySQL plugin. Support GraphQL plugin. Support Quasar fiber plugin. Support InfluxDB java client plugin. Support brpc java plugin Support ConsoleAppender in the logback v1 plugin. Enhance vert.x endpoint names. Optimize the code to prevent mongo statements from being too long. Fix WebFlux plugin concurrency access bug. Fix ShardingSphere plugins internal conflicts. Fix duplicated Spring MVC endpoint. Fix lettuce plugin sometimes trace doesn‘t show span layer. Fix @Tag returnedObject bug.  OAP-Backend  Support Jetty Server advanced configurations. Support label based filter in the prometheus fetcher and OpenCensus receiver. Support using k8s configmap as the configuration center. Support OAP health check, and storage module health check. Support sampling rate in the dynamic configuration. Add endpoint_relation_sla and endpoint_relation_percentile for endpoint relationship metrics. Add components for Python plugins, including Kafka, Tornado, Redis, Django, PyMysql. Add components for Golang SDK. Add Nacos 1.3.1 back as an optional cluster coordinator and dynamic configuration center. Enhance the metrics query for ElasticSearch implementation to increase the stability. Reduce the length of storage entity names in the self-observability for MySQL and TiDB storage. Fix labels are missing in Prometheus analysis context. Fix column length issue in MySQL/TiDB storage. Fix no data in 2nd level aggregation in self-observability. Fix searchService bug in ES implementation. Fix wrong validation of endpoint relation entity query. Fix the bug caused by the OAL debug flag. Fix endpoint dependency bug in MQ and uninstrumented proxy cases. Fix time bucket conversion issue in the InfluxDB storage implementation. Update k8s client to 8.0.0  UI  Support endpoint dependency graph. Support x-scroll of trace/profile page Fix database selector issue. Add the bar chart in the UI templates.  Document  Update the user logo wall. Add backend configuration vocabulary document. Add agent installation doc for Tomcat9 on Windows. Add istioctl ALS commands for the document. Fix TTL documentation. Add FAQ doc about thread instrumentation.  CVE  Fix fuzzy query sql injection in the MySQL/TiDB storage.  All issues and pull requests are here\n","excerpt":"8.1.0 Project  Support Kafka as an optional trace, JVM metrics, profiling snapshots and meter system …","ref":"/docs/main/v9.2.0/en/changes/changes-8.1.0/","title":"8.1.0"},{"body":"8.1.0 Project  Support Kafka as an optional trace, JVM metrics, profiling snapshots and meter system data transport layer. Support Meter system, including the native metrics APIs and the Spring Sleuth adoption. Support JVM thread metrics.  Java Agent  [Core] Fix the concurrency access bug in the Concurrency ClassLoader Case. [Core] Separate the config of the plugins from the core level. [Core] Support instrumented class cached in memory or file, to be compatible with other agents, such as Arthas. Add logic endpoint concept. Could analysis any span or tags flagged by the logic endpoint. Add Spring annotation component name for UI visualization only. Add support to trace Call procedures in MySQL plugin. Support GraphQL plugin. Support Quasar fiber plugin. Support InfluxDB java client plugin. Support brpc java plugin Support ConsoleAppender in the logback v1 plugin. Enhance vert.x endpoint names. Optimize the code to prevent mongo statements from being too long. Fix WebFlux plugin concurrency access bug. Fix ShardingSphere plugins internal conflicts. Fix duplicated Spring MVC endpoint. Fix lettuce plugin sometimes trace doesn‘t show span layer. Fix @Tag returnedObject bug.  OAP-Backend  Support Jetty Server advanced configurations. Support label based filter in the prometheus fetcher and OpenCensus receiver. Support using k8s configmap as the configuration center. Support OAP health check, and storage module health check. Support sampling rate in the dynamic configuration. Add endpoint_relation_sla and endpoint_relation_percentile for endpoint relationship metrics. Add components for Python plugins, including Kafka, Tornado, Redis, Django, PyMysql. Add components for Golang SDK. Add Nacos 1.3.1 back as an optional cluster coordinator and dynamic configuration center. Enhance the metrics query for ElasticSearch implementation to increase the stability. Reduce the length of storage entity names in the self-observability for MySQL and TiDB storage. Fix labels are missing in Prometheus analysis context. Fix column length issue in MySQL/TiDB storage. Fix no data in 2nd level aggregation in self-observability. Fix searchService bug in ES implementation. Fix wrong validation of endpoint relation entity query. Fix the bug caused by the OAL debug flag. Fix endpoint dependency bug in MQ and uninstrumented proxy cases. Fix time bucket conversion issue in the InfluxDB storage implementation. Update k8s client to 8.0.0  UI  Support endpoint dependency graph. Support x-scroll of trace/profile page Fix database selector issue. Add the bar chart in the UI templates.  Document  Update the user logo wall. Add backend configuration vocabulary document. Add agent installation doc for Tomcat9 on Windows. Add istioctl ALS commands for the document. Fix TTL documentation. Add FAQ doc about thread instrumentation.  CVE  Fix fuzzy query sql injection in the MySQL/TiDB storage.  All issues and pull requests are here\n","excerpt":"8.1.0 Project  Support Kafka as an optional trace, JVM metrics, profiling snapshots and meter system …","ref":"/docs/main/v9.3.0/en/changes/changes-8.1.0/","title":"8.1.0"},{"body":"8.1.0 Project  Support Kafka as an optional trace, JVM metrics, profiling snapshots and meter system data transport layer. Support Meter system, including the native metrics APIs and the Spring Sleuth adoption. Support JVM thread metrics.  Java Agent  [Core] Fix the concurrency access bug in the Concurrency ClassLoader Case. [Core] Separate the config of the plugins from the core level. [Core] Support instrumented class cached in memory or file, to be compatible with other agents, such as Arthas. Add logic endpoint concept. Could analysis any span or tags flagged by the logic endpoint. Add Spring annotation component name for UI visualization only. Add support to trace Call procedures in MySQL plugin. Support GraphQL plugin. Support Quasar fiber plugin. Support InfluxDB java client plugin. Support brpc java plugin Support ConsoleAppender in the logback v1 plugin. Enhance vert.x endpoint names. Optimize the code to prevent mongo statements from being too long. Fix WebFlux plugin concurrency access bug. Fix ShardingSphere plugins internal conflicts. Fix duplicated Spring MVC endpoint. Fix lettuce plugin sometimes trace doesn‘t show span layer. Fix @Tag returnedObject bug.  OAP-Backend  Support Jetty Server advanced configurations. Support label based filter in the prometheus fetcher and OpenCensus receiver. Support using k8s configmap as the configuration center. Support OAP health check, and storage module health check. Support sampling rate in the dynamic configuration. Add endpoint_relation_sla and endpoint_relation_percentile for endpoint relationship metrics. Add components for Python plugins, including Kafka, Tornado, Redis, Django, PyMysql. Add components for Golang SDK. Add Nacos 1.3.1 back as an optional cluster coordinator and dynamic configuration center. Enhance the metrics query for ElasticSearch implementation to increase the stability. Reduce the length of storage entity names in the self-observability for MySQL and TiDB storage. Fix labels are missing in Prometheus analysis context. Fix column length issue in MySQL/TiDB storage. Fix no data in 2nd level aggregation in self-observability. Fix searchService bug in ES implementation. Fix wrong validation of endpoint relation entity query. Fix the bug caused by the OAL debug flag. Fix endpoint dependency bug in MQ and uninstrumented proxy cases. Fix time bucket conversion issue in the InfluxDB storage implementation. Update k8s client to 8.0.0  UI  Support endpoint dependency graph. Support x-scroll of trace/profile page Fix database selector issue. Add the bar chart in the UI templates.  Document  Update the user logo wall. Add backend configuration vocabulary document. Add agent installation doc for Tomcat9 on Windows. Add istioctl ALS commands for the document. Fix TTL documentation. Add FAQ doc about thread instrumentation.  CVE  Fix fuzzy query sql injection in the MySQL/TiDB storage.  All issues and pull requests are here\n","excerpt":"8.1.0 Project  Support Kafka as an optional trace, JVM metrics, profiling snapshots and meter system …","ref":"/docs/main/v9.4.0/en/changes/changes-8.1.0/","title":"8.1.0"},{"body":"8.1.0 Project  Support Kafka as an optional trace, JVM metrics, profiling snapshots and meter system data transport layer. Support Meter system, including the native metrics APIs and the Spring Sleuth adoption. Support JVM thread metrics.  Java Agent  [Core] Fix the concurrency access bug in the Concurrency ClassLoader Case. [Core] Separate the config of the plugins from the core level. [Core] Support instrumented class cached in memory or file, to be compatible with other agents, such as Arthas. Add logic endpoint concept. Could analysis any span or tags flagged by the logic endpoint. Add Spring annotation component name for UI visualization only. Add support to trace Call procedures in MySQL plugin. Support GraphQL plugin. Support Quasar fiber plugin. Support InfluxDB java client plugin. Support brpc java plugin Support ConsoleAppender in the logback v1 plugin. Enhance vert.x endpoint names. Optimize the code to prevent mongo statements from being too long. Fix WebFlux plugin concurrency access bug. Fix ShardingSphere plugins internal conflicts. Fix duplicated Spring MVC endpoint. Fix lettuce plugin sometimes trace doesn‘t show span layer. Fix @Tag returnedObject bug.  OAP-Backend  Support Jetty Server advanced configurations. Support label based filter in the prometheus fetcher and OpenCensus receiver. Support using k8s configmap as the configuration center. Support OAP health check, and storage module health check. Support sampling rate in the dynamic configuration. Add endpoint_relation_sla and endpoint_relation_percentile for endpoint relationship metrics. Add components for Python plugins, including Kafka, Tornado, Redis, Django, PyMysql. Add components for Golang SDK. Add Nacos 1.3.1 back as an optional cluster coordinator and dynamic configuration center. Enhance the metrics query for ElasticSearch implementation to increase the stability. Reduce the length of storage entity names in the self-observability for MySQL and TiDB storage. Fix labels are missing in Prometheus analysis context. Fix column length issue in MySQL/TiDB storage. Fix no data in 2nd level aggregation in self-observability. Fix searchService bug in ES implementation. Fix wrong validation of endpoint relation entity query. Fix the bug caused by the OAL debug flag. Fix endpoint dependency bug in MQ and uninstrumented proxy cases. Fix time bucket conversion issue in the InfluxDB storage implementation. Update k8s client to 8.0.0  UI  Support endpoint dependency graph. Support x-scroll of trace/profile page Fix database selector issue. Add the bar chart in the UI templates.  Document  Update the user logo wall. Add backend configuration vocabulary document. Add agent installation doc for Tomcat9 on Windows. Add istioctl ALS commands for the document. Fix TTL documentation. Add FAQ doc about thread instrumentation.  CVE  Fix fuzzy query sql injection in the MySQL/TiDB storage.  All issues and pull requests are here\n","excerpt":"8.1.0 Project  Support Kafka as an optional trace, JVM metrics, profiling snapshots and meter system …","ref":"/docs/main/v9.5.0/en/changes/changes-8.1.0/","title":"8.1.0"},{"body":"8.1.0 Project  Support Kafka as an optional trace, JVM metrics, profiling snapshots and meter system data transport layer. Support Meter system, including the native metrics APIs and the Spring Sleuth adoption. Support JVM thread metrics.  Java Agent  [Core] Fix the concurrency access bug in the Concurrency ClassLoader Case. [Core] Separate the config of the plugins from the core level. [Core] Support instrumented class cached in memory or file, to be compatible with other agents, such as Arthas. Add logic endpoint concept. Could analysis any span or tags flagged by the logic endpoint. Add Spring annotation component name for UI visualization only. Add support to trace Call procedures in MySQL plugin. Support GraphQL plugin. Support Quasar fiber plugin. Support InfluxDB java client plugin. Support brpc java plugin Support ConsoleAppender in the logback v1 plugin. Enhance vert.x endpoint names. Optimize the code to prevent mongo statements from being too long. Fix WebFlux plugin concurrency access bug. Fix ShardingSphere plugins internal conflicts. Fix duplicated Spring MVC endpoint. Fix lettuce plugin sometimes trace doesn‘t show span layer. Fix @Tag returnedObject bug.  OAP-Backend  Support Jetty Server advanced configurations. Support label based filter in the prometheus fetcher and OpenCensus receiver. Support using k8s configmap as the configuration center. Support OAP health check, and storage module health check. Support sampling rate in the dynamic configuration. Add endpoint_relation_sla and endpoint_relation_percentile for endpoint relationship metrics. Add components for Python plugins, including Kafka, Tornado, Redis, Django, PyMysql. Add components for Golang SDK. Add Nacos 1.3.1 back as an optional cluster coordinator and dynamic configuration center. Enhance the metrics query for ElasticSearch implementation to increase the stability. Reduce the length of storage entity names in the self-observability for MySQL and TiDB storage. Fix labels are missing in Prometheus analysis context. Fix column length issue in MySQL/TiDB storage. Fix no data in 2nd level aggregation in self-observability. Fix searchService bug in ES implementation. Fix wrong validation of endpoint relation entity query. Fix the bug caused by the OAL debug flag. Fix endpoint dependency bug in MQ and uninstrumented proxy cases. Fix time bucket conversion issue in the InfluxDB storage implementation. Update k8s client to 8.0.0  UI  Support endpoint dependency graph. Support x-scroll of trace/profile page Fix database selector issue. Add the bar chart in the UI templates.  Document  Update the user logo wall. Add backend configuration vocabulary document. Add agent installation doc for Tomcat9 on Windows. Add istioctl ALS commands for the document. Fix TTL documentation. Add FAQ doc about thread instrumentation.  CVE  Fix fuzzy query sql injection in the MySQL/TiDB storage.  All issues and pull requests are here\n","excerpt":"8.1.0 Project  Support Kafka as an optional trace, JVM metrics, profiling snapshots and meter system …","ref":"/docs/main/v9.6.0/en/changes/changes-8.1.0/","title":"8.1.0"},{"body":"8.1.0 Project  Support Kafka as an optional trace, JVM metrics, profiling snapshots and meter system data transport layer. Support Meter system, including the native metrics APIs and the Spring Sleuth adoption. Support JVM thread metrics.  Java Agent  [Core] Fix the concurrency access bug in the Concurrency ClassLoader Case. [Core] Separate the config of the plugins from the core level. [Core] Support instrumented class cached in memory or file, to be compatible with other agents, such as Arthas. Add logic endpoint concept. Could analysis any span or tags flagged by the logic endpoint. Add Spring annotation component name for UI visualization only. Add support to trace Call procedures in MySQL plugin. Support GraphQL plugin. Support Quasar fiber plugin. Support InfluxDB java client plugin. Support brpc java plugin Support ConsoleAppender in the logback v1 plugin. Enhance vert.x endpoint names. Optimize the code to prevent mongo statements from being too long. Fix WebFlux plugin concurrency access bug. Fix ShardingSphere plugins internal conflicts. Fix duplicated Spring MVC endpoint. Fix lettuce plugin sometimes trace doesn‘t show span layer. Fix @Tag returnedObject bug.  OAP-Backend  Support Jetty Server advanced configurations. Support label based filter in the prometheus fetcher and OpenCensus receiver. Support using k8s configmap as the configuration center. Support OAP health check, and storage module health check. Support sampling rate in the dynamic configuration. Add endpoint_relation_sla and endpoint_relation_percentile for endpoint relationship metrics. Add components for Python plugins, including Kafka, Tornado, Redis, Django, PyMysql. Add components for Golang SDK. Add Nacos 1.3.1 back as an optional cluster coordinator and dynamic configuration center. Enhance the metrics query for ElasticSearch implementation to increase the stability. Reduce the length of storage entity names in the self-observability for MySQL and TiDB storage. Fix labels are missing in Prometheus analysis context. Fix column length issue in MySQL/TiDB storage. Fix no data in 2nd level aggregation in self-observability. Fix searchService bug in ES implementation. Fix wrong validation of endpoint relation entity query. Fix the bug caused by the OAL debug flag. Fix endpoint dependency bug in MQ and uninstrumented proxy cases. Fix time bucket conversion issue in the InfluxDB storage implementation. Update k8s client to 8.0.0  UI  Support endpoint dependency graph. Support x-scroll of trace/profile page Fix database selector issue. Add the bar chart in the UI templates.  Document  Update the user logo wall. Add backend configuration vocabulary document. Add agent installation doc for Tomcat9 on Windows. Add istioctl ALS commands for the document. Fix TTL documentation. Add FAQ doc about thread instrumentation.  CVE  Fix fuzzy query sql injection in the MySQL/TiDB storage.  All issues and pull requests are here\n","excerpt":"8.1.0 Project  Support Kafka as an optional trace, JVM metrics, profiling snapshots and meter system …","ref":"/docs/main/v9.7.0/en/changes/changes-8.1.0/","title":"8.1.0"},{"body":"8.2.0 Project  Support Browser monitoring. Add e2e test for ALS solution of service mesh observability. Support compiling(include testing) in JDK11. Support build a single module.  Java Agent  Support metrics plugin. Support slf4j logs of gRPC and Kafka(when agent uses them) into the agent log files. Add PROPERTIES_REPORT_PERIOD_FACTOR config to avoid the properties of instance cleared. Limit the size of traced SQL to avoid OOM. Support mount command to load a new set of plugins. Add plugin selector mechanism. Enhance the witness classes for MongoDB plugin. Enhance the parameter truncate mechanism of SQL plugins. Enhance the SpringMVC plugin in the reactive APIs. Enhance the SpringMVC plugin to collect HTTP headers as the span tags. Enhance the Kafka plugin, about @KafkaPollAndInvoke Enhance the configuration initialization core. Plugin could have its own plugins. Enhance Feign plugin to collect parameters. Enhance Dubbo plugin to collect parameters. Provide Thrift plugin. Provide XXL-job plugin. Provide MongoDB 4.x plugin. Provide Kafka client 2.1+ plugin. Provide WebFlux-WebClient plugin. Provide ignore-exception plugin. Provide quartz scheduler plugin. Provide ElasticJob 2.x plugin. Provide Spring @Scheduled plugin. Provide Spring-Kafka plugin. Provide HBase client plugin. Provide JSON log format. Move Spring WebFlux plugin to the optional plugin. Fix inconsistent logic bug in PrefixMatch Fix duplicate exit spans in Feign LoadBalancer mechanism. Fix the target service blocked by the Kafka reporter. Fix configurations of Kafka report don\u0026rsquo;t work. Fix rest template concurrent conflict. Fix NPE in the ActiveMQ plugin. Fix conflict between Kafka reporter and sampling plugin. Fix NPE in the log formatter. Fix span layer missing in certain cases, in the Kafka plugin. Fix error format of time in serviceTraffic update. Upgrade bytebuddy to 1.10.14  OAP-Backend  Support Nacos authentication. Support labeled meter in the meter receiver. Separate UI template into multiple files. Provide support for Envoy tracing. Envoy tracer depends on the Envoy community. Support query trace by tags. Support composite alarm rules. Support alarm messages to DingTalk. Support alarm messages to WeChat. Support alarm messages to Slack. Support SSL for Prometheus fetcher and self telemetry. Support labeled histogram in the prometheus format. Support the status of segment based on entry span or first span only. Support the error segment in the sampling mechanism. Support SSL certs of gRPC server. Support labeled metrics in the alarm rule setting. Support to query all labeled data, if no explicit label in the query condition. Add TLS parameters in the mesh analysis. Add health check for InfluxDB storage. Add super dataset concept for the traces/logs. Add separate replicas configuration for super dataset. Add IN operator in the OAL. Add != operator in the OAL. Add like operator in the OAL. Add latest function in the prometheus analysis. Add more configurations in the gRPC server. Optimize the trace query performance. Optimize the CPU usage rate calculation, at least to be 1. Optimize the length of slow SQL column in the MySQL storage. Optimize the topology query, use client side component name when no server side mapping. Add component IDs for Python component. Add component ID range for C++. Fix Slack notification setting NPE. Fix some module missing check of the module manager core. Fix authentication doesn\u0026rsquo;t work in sharing server. Fix metrics batch persistent size bug. Fix trace sampling bug. Fix CLR receiver bug. Fix end time bug in the query process. Fix Exporter INCREMENT mode is not working. Fix an error when executing startup.bat when the log directory exists Add syncBulkActions configuration to set up the batch size of the metrics persistent. Meter Analysis Language.  UI  Add browser dashboard. Add browser log query page. Support query trace by tags. Fix JVM configuration. Fix CLR configuration.  Document  Add the document about SW_NO_UPSTREAM_REAL_ADDRESS. Update ALS setup document. Add Customization Config section for plugin development.  All issues and pull requests are here\n","excerpt":"8.2.0 Project  Support Browser monitoring. Add e2e test for ALS solution of service mesh …","ref":"/docs/main/latest/en/changes/changes-8.2.0/","title":"8.2.0"},{"body":"8.2.0 Project  Support Browser monitoring. Add e2e test for ALS solution of service mesh observability. Support compiling(include testing) in JDK11. Support build a single module.  Java Agent  Support metrics plugin. Support slf4j logs of gRPC and Kafka(when agent uses them) into the agent log files. Add PROPERTIES_REPORT_PERIOD_FACTOR config to avoid the properties of instance cleared. Limit the size of traced SQL to avoid OOM. Support mount command to load a new set of plugins. Add plugin selector mechanism. Enhance the witness classes for MongoDB plugin. Enhance the parameter truncate mechanism of SQL plugins. Enhance the SpringMVC plugin in the reactive APIs. Enhance the SpringMVC plugin to collect HTTP headers as the span tags. Enhance the Kafka plugin, about @KafkaPollAndInvoke Enhance the configuration initialization core. Plugin could have its own plugins. Enhance Feign plugin to collect parameters. Enhance Dubbo plugin to collect parameters. Provide Thrift plugin. Provide XXL-job plugin. Provide MongoDB 4.x plugin. Provide Kafka client 2.1+ plugin. Provide WebFlux-WebClient plugin. Provide ignore-exception plugin. Provide quartz scheduler plugin. Provide ElasticJob 2.x plugin. Provide Spring @Scheduled plugin. Provide Spring-Kafka plugin. Provide HBase client plugin. Provide JSON log format. Move Spring WebFlux plugin to the optional plugin. Fix inconsistent logic bug in PrefixMatch Fix duplicate exit spans in Feign LoadBalancer mechanism. Fix the target service blocked by the Kafka reporter. Fix configurations of Kafka report don\u0026rsquo;t work. Fix rest template concurrent conflict. Fix NPE in the ActiveMQ plugin. Fix conflict between Kafka reporter and sampling plugin. Fix NPE in the log formatter. Fix span layer missing in certain cases, in the Kafka plugin. Fix error format of time in serviceTraffic update. Upgrade bytebuddy to 1.10.14  OAP-Backend  Support Nacos authentication. Support labeled meter in the meter receiver. Separate UI template into multiple files. Provide support for Envoy tracing. Envoy tracer depends on the Envoy community. Support query trace by tags. Support composite alarm rules. Support alarm messages to DingTalk. Support alarm messages to WeChat. Support alarm messages to Slack. Support SSL for Prometheus fetcher and self telemetry. Support labeled histogram in the prometheus format. Support the status of segment based on entry span or first span only. Support the error segment in the sampling mechanism. Support SSL certs of gRPC server. Support labeled metrics in the alarm rule setting. Support to query all labeled data, if no explicit label in the query condition. Add TLS parameters in the mesh analysis. Add health check for InfluxDB storage. Add super dataset concept for the traces/logs. Add separate replicas configuration for super dataset. Add IN operator in the OAL. Add != operator in the OAL. Add like operator in the OAL. Add latest function in the prometheus analysis. Add more configurations in the gRPC server. Optimize the trace query performance. Optimize the CPU usage rate calculation, at least to be 1. Optimize the length of slow SQL column in the MySQL storage. Optimize the topology query, use client side component name when no server side mapping. Add component IDs for Python component. Add component ID range for C++. Fix Slack notification setting NPE. Fix some module missing check of the module manager core. Fix authentication doesn\u0026rsquo;t work in sharing server. Fix metrics batch persistent size bug. Fix trace sampling bug. Fix CLR receiver bug. Fix end time bug in the query process. Fix Exporter INCREMENT mode is not working. Fix an error when executing startup.bat when the log directory exists Add syncBulkActions configuration to set up the batch size of the metrics persistent. Meter Analysis Language.  UI  Add browser dashboard. Add browser log query page. Support query trace by tags. Fix JVM configuration. Fix CLR configuration.  Document  Add the document about SW_NO_UPSTREAM_REAL_ADDRESS. Update ALS setup document. Add Customization Config section for plugin development.  All issues and pull requests are here\n","excerpt":"8.2.0 Project  Support Browser monitoring. Add e2e test for ALS solution of service mesh …","ref":"/docs/main/next/en/changes/changes-8.2.0/","title":"8.2.0"},{"body":"8.2.0 Project  Support Browser monitoring. Add e2e test for ALS solution of service mesh observability. Support compiling(include testing) in JDK11. Support build a single module.  Java Agent  Support metrics plugin. Support slf4j logs of gRPC and Kafka(when agent uses them) into the agent log files. Add PROPERTIES_REPORT_PERIOD_FACTOR config to avoid the properties of instance cleared. Limit the size of traced SQL to avoid OOM. Support mount command to load a new set of plugins. Add plugin selector mechanism. Enhance the witness classes for MongoDB plugin. Enhance the parameter truncate mechanism of SQL plugins. Enhance the SpringMVC plugin in the reactive APIs. Enhance the SpringMVC plugin to collect HTTP headers as the span tags. Enhance the Kafka plugin, about @KafkaPollAndInvoke Enhance the configuration initialization core. Plugin could have its own plugins. Enhance Feign plugin to collect parameters. Enhance Dubbo plugin to collect parameters. Provide Thrift plugin. Provide XXL-job plugin. Provide MongoDB 4.x plugin. Provide Kafka client 2.1+ plugin. Provide WebFlux-WebClient plugin. Provide ignore-exception plugin. Provide quartz scheduler plugin. Provide ElasticJob 2.x plugin. Provide Spring @Scheduled plugin. Provide Spring-Kafka plugin. Provide HBase client plugin. Provide JSON log format. Move Spring WebFlux plugin to the optional plugin. Fix inconsistent logic bug in PrefixMatch Fix duplicate exit spans in Feign LoadBalancer mechanism. Fix the target service blocked by the Kafka reporter. Fix configurations of Kafka report don\u0026rsquo;t work. Fix rest template concurrent conflict. Fix NPE in the ActiveMQ plugin. Fix conflict between Kafka reporter and sampling plugin. Fix NPE in the log formatter. Fix span layer missing in certain cases, in the Kafka plugin. Fix error format of time in serviceTraffic update. Upgrade bytebuddy to 1.10.14  OAP-Backend  Support Nacos authentication. Support labeled meter in the meter receiver. Separate UI template into multiple files. Provide support for Envoy tracing. Envoy tracer depends on the Envoy community. Support query trace by tags. Support composite alarm rules. Support alarm messages to DingTalk. Support alarm messages to WeChat. Support alarm messages to Slack. Support SSL for Prometheus fetcher and self telemetry. Support labeled histogram in the prometheus format. Support the status of segment based on entry span or first span only. Support the error segment in the sampling mechanism. Support SSL certs of gRPC server. Support labeled metrics in the alarm rule setting. Support to query all labeled data, if no explicit label in the query condition. Add TLS parameters in the mesh analysis. Add health check for InfluxDB storage. Add super dataset concept for the traces/logs. Add separate replicas configuration for super dataset. Add IN operator in the OAL. Add != operator in the OAL. Add like operator in the OAL. Add latest function in the prometheus analysis. Add more configurations in the gRPC server. Optimize the trace query performance. Optimize the CPU usage rate calculation, at least to be 1. Optimize the length of slow SQL column in the MySQL storage. Optimize the topology query, use client side component name when no server side mapping. Add component IDs for Python component. Add component ID range for C++. Fix Slack notification setting NPE. Fix some module missing check of the module manager core. Fix authentication doesn\u0026rsquo;t work in sharing server. Fix metrics batch persistent size bug. Fix trace sampling bug. Fix CLR receiver bug. Fix end time bug in the query process. Fix Exporter INCREMENT mode is not working. Fix an error when executing startup.bat when the log directory exists Add syncBulkActions configuration to set up the batch size of the metrics persistent. Meter Analysis Language.  UI  Add browser dashboard. Add browser log query page. Support query trace by tags. Fix JVM configuration. Fix CLR configuration.  Document  Add the document about SW_NO_UPSTREAM_REAL_ADDRESS. Update ALS setup document. Add Customization Config section for plugin development.  All issues and pull requests are here\n","excerpt":"8.2.0 Project  Support Browser monitoring. Add e2e test for ALS solution of service mesh …","ref":"/docs/main/v9.1.0/en/changes/changes-8.2.0/","title":"8.2.0"},{"body":"8.2.0 Project  Support Browser monitoring. Add e2e test for ALS solution of service mesh observability. Support compiling(include testing) in JDK11. Support build a single module.  Java Agent  Support metrics plugin. Support slf4j logs of gRPC and Kafka(when agent uses them) into the agent log files. Add PROPERTIES_REPORT_PERIOD_FACTOR config to avoid the properties of instance cleared. Limit the size of traced SQL to avoid OOM. Support mount command to load a new set of plugins. Add plugin selector mechanism. Enhance the witness classes for MongoDB plugin. Enhance the parameter truncate mechanism of SQL plugins. Enhance the SpringMVC plugin in the reactive APIs. Enhance the SpringMVC plugin to collect HTTP headers as the span tags. Enhance the Kafka plugin, about @KafkaPollAndInvoke Enhance the configuration initialization core. Plugin could have its own plugins. Enhance Feign plugin to collect parameters. Enhance Dubbo plugin to collect parameters. Provide Thrift plugin. Provide XXL-job plugin. Provide MongoDB 4.x plugin. Provide Kafka client 2.1+ plugin. Provide WebFlux-WebClient plugin. Provide ignore-exception plugin. Provide quartz scheduler plugin. Provide ElasticJob 2.x plugin. Provide Spring @Scheduled plugin. Provide Spring-Kafka plugin. Provide HBase client plugin. Provide JSON log format. Move Spring WebFlux plugin to the optional plugin. Fix inconsistent logic bug in PrefixMatch Fix duplicate exit spans in Feign LoadBalancer mechanism. Fix the target service blocked by the Kafka reporter. Fix configurations of Kafka report don\u0026rsquo;t work. Fix rest template concurrent conflict. Fix NPE in the ActiveMQ plugin. Fix conflict between Kafka reporter and sampling plugin. Fix NPE in the log formatter. Fix span layer missing in certain cases, in the Kafka plugin. Fix error format of time in serviceTraffic update. Upgrade bytebuddy to 1.10.14  OAP-Backend  Support Nacos authentication. Support labeled meter in the meter receiver. Separate UI template into multiple files. Provide support for Envoy tracing. Envoy tracer depends on the Envoy community. Support query trace by tags. Support composite alarm rules. Support alarm messages to DingTalk. Support alarm messages to WeChat. Support alarm messages to Slack. Support SSL for Prometheus fetcher and self telemetry. Support labeled histogram in the prometheus format. Support the status of segment based on entry span or first span only. Support the error segment in the sampling mechanism. Support SSL certs of gRPC server. Support labeled metrics in the alarm rule setting. Support to query all labeled data, if no explicit label in the query condition. Add TLS parameters in the mesh analysis. Add health check for InfluxDB storage. Add super dataset concept for the traces/logs. Add separate replicas configuration for super dataset. Add IN operator in the OAL. Add != operator in the OAL. Add like operator in the OAL. Add latest function in the prometheus analysis. Add more configurations in the gRPC server. Optimize the trace query performance. Optimize the CPU usage rate calculation, at least to be 1. Optimize the length of slow SQL column in the MySQL storage. Optimize the topology query, use client side component name when no server side mapping. Add component IDs for Python component. Add component ID range for C++. Fix Slack notification setting NPE. Fix some module missing check of the module manager core. Fix authentication doesn\u0026rsquo;t work in sharing server. Fix metrics batch persistent size bug. Fix trace sampling bug. Fix CLR receiver bug. Fix end time bug in the query process. Fix Exporter INCREMENT mode is not working. Fix an error when executing startup.bat when the log directory exists Add syncBulkActions configuration to set up the batch size of the metrics persistent. Meter Analysis Language.  UI  Add browser dashboard. Add browser log query page. Support query trace by tags. Fix JVM configuration. Fix CLR configuration.  Document  Add the document about SW_NO_UPSTREAM_REAL_ADDRESS. Update ALS setup document. Add Customization Config section for plugin development.  All issues and pull requests are here\n","excerpt":"8.2.0 Project  Support Browser monitoring. Add e2e test for ALS solution of service mesh …","ref":"/docs/main/v9.2.0/en/changes/changes-8.2.0/","title":"8.2.0"},{"body":"8.2.0 Project  Support Browser monitoring. Add e2e test for ALS solution of service mesh observability. Support compiling(include testing) in JDK11. Support build a single module.  Java Agent  Support metrics plugin. Support slf4j logs of gRPC and Kafka(when agent uses them) into the agent log files. Add PROPERTIES_REPORT_PERIOD_FACTOR config to avoid the properties of instance cleared. Limit the size of traced SQL to avoid OOM. Support mount command to load a new set of plugins. Add plugin selector mechanism. Enhance the witness classes for MongoDB plugin. Enhance the parameter truncate mechanism of SQL plugins. Enhance the SpringMVC plugin in the reactive APIs. Enhance the SpringMVC plugin to collect HTTP headers as the span tags. Enhance the Kafka plugin, about @KafkaPollAndInvoke Enhance the configuration initialization core. Plugin could have its own plugins. Enhance Feign plugin to collect parameters. Enhance Dubbo plugin to collect parameters. Provide Thrift plugin. Provide XXL-job plugin. Provide MongoDB 4.x plugin. Provide Kafka client 2.1+ plugin. Provide WebFlux-WebClient plugin. Provide ignore-exception plugin. Provide quartz scheduler plugin. Provide ElasticJob 2.x plugin. Provide Spring @Scheduled plugin. Provide Spring-Kafka plugin. Provide HBase client plugin. Provide JSON log format. Move Spring WebFlux plugin to the optional plugin. Fix inconsistent logic bug in PrefixMatch Fix duplicate exit spans in Feign LoadBalancer mechanism. Fix the target service blocked by the Kafka reporter. Fix configurations of Kafka report don\u0026rsquo;t work. Fix rest template concurrent conflict. Fix NPE in the ActiveMQ plugin. Fix conflict between Kafka reporter and sampling plugin. Fix NPE in the log formatter. Fix span layer missing in certain cases, in the Kafka plugin. Fix error format of time in serviceTraffic update. Upgrade bytebuddy to 1.10.14  OAP-Backend  Support Nacos authentication. Support labeled meter in the meter receiver. Separate UI template into multiple files. Provide support for Envoy tracing. Envoy tracer depends on the Envoy community. Support query trace by tags. Support composite alarm rules. Support alarm messages to DingTalk. Support alarm messages to WeChat. Support alarm messages to Slack. Support SSL for Prometheus fetcher and self telemetry. Support labeled histogram in the prometheus format. Support the status of segment based on entry span or first span only. Support the error segment in the sampling mechanism. Support SSL certs of gRPC server. Support labeled metrics in the alarm rule setting. Support to query all labeled data, if no explicit label in the query condition. Add TLS parameters in the mesh analysis. Add health check for InfluxDB storage. Add super dataset concept for the traces/logs. Add separate replicas configuration for super dataset. Add IN operator in the OAL. Add != operator in the OAL. Add like operator in the OAL. Add latest function in the prometheus analysis. Add more configurations in the gRPC server. Optimize the trace query performance. Optimize the CPU usage rate calculation, at least to be 1. Optimize the length of slow SQL column in the MySQL storage. Optimize the topology query, use client side component name when no server side mapping. Add component IDs for Python component. Add component ID range for C++. Fix Slack notification setting NPE. Fix some module missing check of the module manager core. Fix authentication doesn\u0026rsquo;t work in sharing server. Fix metrics batch persistent size bug. Fix trace sampling bug. Fix CLR receiver bug. Fix end time bug in the query process. Fix Exporter INCREMENT mode is not working. Fix an error when executing startup.bat when the log directory exists Add syncBulkActions configuration to set up the batch size of the metrics persistent. Meter Analysis Language.  UI  Add browser dashboard. Add browser log query page. Support query trace by tags. Fix JVM configuration. Fix CLR configuration.  Document  Add the document about SW_NO_UPSTREAM_REAL_ADDRESS. Update ALS setup document. Add Customization Config section for plugin development.  All issues and pull requests are here\n","excerpt":"8.2.0 Project  Support Browser monitoring. Add e2e test for ALS solution of service mesh …","ref":"/docs/main/v9.3.0/en/changes/changes-8.2.0/","title":"8.2.0"},{"body":"8.2.0 Project  Support Browser monitoring. Add e2e test for ALS solution of service mesh observability. Support compiling(include testing) in JDK11. Support build a single module.  Java Agent  Support metrics plugin. Support slf4j logs of gRPC and Kafka(when agent uses them) into the agent log files. Add PROPERTIES_REPORT_PERIOD_FACTOR config to avoid the properties of instance cleared. Limit the size of traced SQL to avoid OOM. Support mount command to load a new set of plugins. Add plugin selector mechanism. Enhance the witness classes for MongoDB plugin. Enhance the parameter truncate mechanism of SQL plugins. Enhance the SpringMVC plugin in the reactive APIs. Enhance the SpringMVC plugin to collect HTTP headers as the span tags. Enhance the Kafka plugin, about @KafkaPollAndInvoke Enhance the configuration initialization core. Plugin could have its own plugins. Enhance Feign plugin to collect parameters. Enhance Dubbo plugin to collect parameters. Provide Thrift plugin. Provide XXL-job plugin. Provide MongoDB 4.x plugin. Provide Kafka client 2.1+ plugin. Provide WebFlux-WebClient plugin. Provide ignore-exception plugin. Provide quartz scheduler plugin. Provide ElasticJob 2.x plugin. Provide Spring @Scheduled plugin. Provide Spring-Kafka plugin. Provide HBase client plugin. Provide JSON log format. Move Spring WebFlux plugin to the optional plugin. Fix inconsistent logic bug in PrefixMatch Fix duplicate exit spans in Feign LoadBalancer mechanism. Fix the target service blocked by the Kafka reporter. Fix configurations of Kafka report don\u0026rsquo;t work. Fix rest template concurrent conflict. Fix NPE in the ActiveMQ plugin. Fix conflict between Kafka reporter and sampling plugin. Fix NPE in the log formatter. Fix span layer missing in certain cases, in the Kafka plugin. Fix error format of time in serviceTraffic update. Upgrade bytebuddy to 1.10.14  OAP-Backend  Support Nacos authentication. Support labeled meter in the meter receiver. Separate UI template into multiple files. Provide support for Envoy tracing. Envoy tracer depends on the Envoy community. Support query trace by tags. Support composite alarm rules. Support alarm messages to DingTalk. Support alarm messages to WeChat. Support alarm messages to Slack. Support SSL for Prometheus fetcher and self telemetry. Support labeled histogram in the prometheus format. Support the status of segment based on entry span or first span only. Support the error segment in the sampling mechanism. Support SSL certs of gRPC server. Support labeled metrics in the alarm rule setting. Support to query all labeled data, if no explicit label in the query condition. Add TLS parameters in the mesh analysis. Add health check for InfluxDB storage. Add super dataset concept for the traces/logs. Add separate replicas configuration for super dataset. Add IN operator in the OAL. Add != operator in the OAL. Add like operator in the OAL. Add latest function in the prometheus analysis. Add more configurations in the gRPC server. Optimize the trace query performance. Optimize the CPU usage rate calculation, at least to be 1. Optimize the length of slow SQL column in the MySQL storage. Optimize the topology query, use client side component name when no server side mapping. Add component IDs for Python component. Add component ID range for C++. Fix Slack notification setting NPE. Fix some module missing check of the module manager core. Fix authentication doesn\u0026rsquo;t work in sharing server. Fix metrics batch persistent size bug. Fix trace sampling bug. Fix CLR receiver bug. Fix end time bug in the query process. Fix Exporter INCREMENT mode is not working. Fix an error when executing startup.bat when the log directory exists Add syncBulkActions configuration to set up the batch size of the metrics persistent. Meter Analysis Language.  UI  Add browser dashboard. Add browser log query page. Support query trace by tags. Fix JVM configuration. Fix CLR configuration.  Document  Add the document about SW_NO_UPSTREAM_REAL_ADDRESS. Update ALS setup document. Add Customization Config section for plugin development.  All issues and pull requests are here\n","excerpt":"8.2.0 Project  Support Browser monitoring. Add e2e test for ALS solution of service mesh …","ref":"/docs/main/v9.4.0/en/changes/changes-8.2.0/","title":"8.2.0"},{"body":"8.2.0 Project  Support Browser monitoring. Add e2e test for ALS solution of service mesh observability. Support compiling(include testing) in JDK11. Support build a single module.  Java Agent  Support metrics plugin. Support slf4j logs of gRPC and Kafka(when agent uses them) into the agent log files. Add PROPERTIES_REPORT_PERIOD_FACTOR config to avoid the properties of instance cleared. Limit the size of traced SQL to avoid OOM. Support mount command to load a new set of plugins. Add plugin selector mechanism. Enhance the witness classes for MongoDB plugin. Enhance the parameter truncate mechanism of SQL plugins. Enhance the SpringMVC plugin in the reactive APIs. Enhance the SpringMVC plugin to collect HTTP headers as the span tags. Enhance the Kafka plugin, about @KafkaPollAndInvoke Enhance the configuration initialization core. Plugin could have its own plugins. Enhance Feign plugin to collect parameters. Enhance Dubbo plugin to collect parameters. Provide Thrift plugin. Provide XXL-job plugin. Provide MongoDB 4.x plugin. Provide Kafka client 2.1+ plugin. Provide WebFlux-WebClient plugin. Provide ignore-exception plugin. Provide quartz scheduler plugin. Provide ElasticJob 2.x plugin. Provide Spring @Scheduled plugin. Provide Spring-Kafka plugin. Provide HBase client plugin. Provide JSON log format. Move Spring WebFlux plugin to the optional plugin. Fix inconsistent logic bug in PrefixMatch Fix duplicate exit spans in Feign LoadBalancer mechanism. Fix the target service blocked by the Kafka reporter. Fix configurations of Kafka report don\u0026rsquo;t work. Fix rest template concurrent conflict. Fix NPE in the ActiveMQ plugin. Fix conflict between Kafka reporter and sampling plugin. Fix NPE in the log formatter. Fix span layer missing in certain cases, in the Kafka plugin. Fix error format of time in serviceTraffic update. Upgrade bytebuddy to 1.10.14  OAP-Backend  Support Nacos authentication. Support labeled meter in the meter receiver. Separate UI template into multiple files. Provide support for Envoy tracing. Envoy tracer depends on the Envoy community. Support query trace by tags. Support composite alarm rules. Support alarm messages to DingTalk. Support alarm messages to WeChat. Support alarm messages to Slack. Support SSL for Prometheus fetcher and self telemetry. Support labeled histogram in the prometheus format. Support the status of segment based on entry span or first span only. Support the error segment in the sampling mechanism. Support SSL certs of gRPC server. Support labeled metrics in the alarm rule setting. Support to query all labeled data, if no explicit label in the query condition. Add TLS parameters in the mesh analysis. Add health check for InfluxDB storage. Add super dataset concept for the traces/logs. Add separate replicas configuration for super dataset. Add IN operator in the OAL. Add != operator in the OAL. Add like operator in the OAL. Add latest function in the prometheus analysis. Add more configurations in the gRPC server. Optimize the trace query performance. Optimize the CPU usage rate calculation, at least to be 1. Optimize the length of slow SQL column in the MySQL storage. Optimize the topology query, use client side component name when no server side mapping. Add component IDs for Python component. Add component ID range for C++. Fix Slack notification setting NPE. Fix some module missing check of the module manager core. Fix authentication doesn\u0026rsquo;t work in sharing server. Fix metrics batch persistent size bug. Fix trace sampling bug. Fix CLR receiver bug. Fix end time bug in the query process. Fix Exporter INCREMENT mode is not working. Fix an error when executing startup.bat when the log directory exists Add syncBulkActions configuration to set up the batch size of the metrics persistent. Meter Analysis Language.  UI  Add browser dashboard. Add browser log query page. Support query trace by tags. Fix JVM configuration. Fix CLR configuration.  Document  Add the document about SW_NO_UPSTREAM_REAL_ADDRESS. Update ALS setup document. Add Customization Config section for plugin development.  All issues and pull requests are here\n","excerpt":"8.2.0 Project  Support Browser monitoring. Add e2e test for ALS solution of service mesh …","ref":"/docs/main/v9.5.0/en/changes/changes-8.2.0/","title":"8.2.0"},{"body":"8.2.0 Project  Support Browser monitoring. Add e2e test for ALS solution of service mesh observability. Support compiling(include testing) in JDK11. Support build a single module.  Java Agent  Support metrics plugin. Support slf4j logs of gRPC and Kafka(when agent uses them) into the agent log files. Add PROPERTIES_REPORT_PERIOD_FACTOR config to avoid the properties of instance cleared. Limit the size of traced SQL to avoid OOM. Support mount command to load a new set of plugins. Add plugin selector mechanism. Enhance the witness classes for MongoDB plugin. Enhance the parameter truncate mechanism of SQL plugins. Enhance the SpringMVC plugin in the reactive APIs. Enhance the SpringMVC plugin to collect HTTP headers as the span tags. Enhance the Kafka plugin, about @KafkaPollAndInvoke Enhance the configuration initialization core. Plugin could have its own plugins. Enhance Feign plugin to collect parameters. Enhance Dubbo plugin to collect parameters. Provide Thrift plugin. Provide XXL-job plugin. Provide MongoDB 4.x plugin. Provide Kafka client 2.1+ plugin. Provide WebFlux-WebClient plugin. Provide ignore-exception plugin. Provide quartz scheduler plugin. Provide ElasticJob 2.x plugin. Provide Spring @Scheduled plugin. Provide Spring-Kafka plugin. Provide HBase client plugin. Provide JSON log format. Move Spring WebFlux plugin to the optional plugin. Fix inconsistent logic bug in PrefixMatch Fix duplicate exit spans in Feign LoadBalancer mechanism. Fix the target service blocked by the Kafka reporter. Fix configurations of Kafka report don\u0026rsquo;t work. Fix rest template concurrent conflict. Fix NPE in the ActiveMQ plugin. Fix conflict between Kafka reporter and sampling plugin. Fix NPE in the log formatter. Fix span layer missing in certain cases, in the Kafka plugin. Fix error format of time in serviceTraffic update. Upgrade bytebuddy to 1.10.14  OAP-Backend  Support Nacos authentication. Support labeled meter in the meter receiver. Separate UI template into multiple files. Provide support for Envoy tracing. Envoy tracer depends on the Envoy community. Support query trace by tags. Support composite alarm rules. Support alarm messages to DingTalk. Support alarm messages to WeChat. Support alarm messages to Slack. Support SSL for Prometheus fetcher and self telemetry. Support labeled histogram in the prometheus format. Support the status of segment based on entry span or first span only. Support the error segment in the sampling mechanism. Support SSL certs of gRPC server. Support labeled metrics in the alarm rule setting. Support to query all labeled data, if no explicit label in the query condition. Add TLS parameters in the mesh analysis. Add health check for InfluxDB storage. Add super dataset concept for the traces/logs. Add separate replicas configuration for super dataset. Add IN operator in the OAL. Add != operator in the OAL. Add like operator in the OAL. Add latest function in the prometheus analysis. Add more configurations in the gRPC server. Optimize the trace query performance. Optimize the CPU usage rate calculation, at least to be 1. Optimize the length of slow SQL column in the MySQL storage. Optimize the topology query, use client side component name when no server side mapping. Add component IDs for Python component. Add component ID range for C++. Fix Slack notification setting NPE. Fix some module missing check of the module manager core. Fix authentication doesn\u0026rsquo;t work in sharing server. Fix metrics batch persistent size bug. Fix trace sampling bug. Fix CLR receiver bug. Fix end time bug in the query process. Fix Exporter INCREMENT mode is not working. Fix an error when executing startup.bat when the log directory exists Add syncBulkActions configuration to set up the batch size of the metrics persistent. Meter Analysis Language.  UI  Add browser dashboard. Add browser log query page. Support query trace by tags. Fix JVM configuration. Fix CLR configuration.  Document  Add the document about SW_NO_UPSTREAM_REAL_ADDRESS. Update ALS setup document. Add Customization Config section for plugin development.  All issues and pull requests are here\n","excerpt":"8.2.0 Project  Support Browser monitoring. Add e2e test for ALS solution of service mesh …","ref":"/docs/main/v9.6.0/en/changes/changes-8.2.0/","title":"8.2.0"},{"body":"8.2.0 Project  Support Browser monitoring. Add e2e test for ALS solution of service mesh observability. Support compiling(include testing) in JDK11. Support build a single module.  Java Agent  Support metrics plugin. Support slf4j logs of gRPC and Kafka(when agent uses them) into the agent log files. Add PROPERTIES_REPORT_PERIOD_FACTOR config to avoid the properties of instance cleared. Limit the size of traced SQL to avoid OOM. Support mount command to load a new set of plugins. Add plugin selector mechanism. Enhance the witness classes for MongoDB plugin. Enhance the parameter truncate mechanism of SQL plugins. Enhance the SpringMVC plugin in the reactive APIs. Enhance the SpringMVC plugin to collect HTTP headers as the span tags. Enhance the Kafka plugin, about @KafkaPollAndInvoke Enhance the configuration initialization core. Plugin could have its own plugins. Enhance Feign plugin to collect parameters. Enhance Dubbo plugin to collect parameters. Provide Thrift plugin. Provide XXL-job plugin. Provide MongoDB 4.x plugin. Provide Kafka client 2.1+ plugin. Provide WebFlux-WebClient plugin. Provide ignore-exception plugin. Provide quartz scheduler plugin. Provide ElasticJob 2.x plugin. Provide Spring @Scheduled plugin. Provide Spring-Kafka plugin. Provide HBase client plugin. Provide JSON log format. Move Spring WebFlux plugin to the optional plugin. Fix inconsistent logic bug in PrefixMatch Fix duplicate exit spans in Feign LoadBalancer mechanism. Fix the target service blocked by the Kafka reporter. Fix configurations of Kafka report don\u0026rsquo;t work. Fix rest template concurrent conflict. Fix NPE in the ActiveMQ plugin. Fix conflict between Kafka reporter and sampling plugin. Fix NPE in the log formatter. Fix span layer missing in certain cases, in the Kafka plugin. Fix error format of time in serviceTraffic update. Upgrade bytebuddy to 1.10.14  OAP-Backend  Support Nacos authentication. Support labeled meter in the meter receiver. Separate UI template into multiple files. Provide support for Envoy tracing. Envoy tracer depends on the Envoy community. Support query trace by tags. Support composite alarm rules. Support alarm messages to DingTalk. Support alarm messages to WeChat. Support alarm messages to Slack. Support SSL for Prometheus fetcher and self telemetry. Support labeled histogram in the prometheus format. Support the status of segment based on entry span or first span only. Support the error segment in the sampling mechanism. Support SSL certs of gRPC server. Support labeled metrics in the alarm rule setting. Support to query all labeled data, if no explicit label in the query condition. Add TLS parameters in the mesh analysis. Add health check for InfluxDB storage. Add super dataset concept for the traces/logs. Add separate replicas configuration for super dataset. Add IN operator in the OAL. Add != operator in the OAL. Add like operator in the OAL. Add latest function in the prometheus analysis. Add more configurations in the gRPC server. Optimize the trace query performance. Optimize the CPU usage rate calculation, at least to be 1. Optimize the length of slow SQL column in the MySQL storage. Optimize the topology query, use client side component name when no server side mapping. Add component IDs for Python component. Add component ID range for C++. Fix Slack notification setting NPE. Fix some module missing check of the module manager core. Fix authentication doesn\u0026rsquo;t work in sharing server. Fix metrics batch persistent size bug. Fix trace sampling bug. Fix CLR receiver bug. Fix end time bug in the query process. Fix Exporter INCREMENT mode is not working. Fix an error when executing startup.bat when the log directory exists Add syncBulkActions configuration to set up the batch size of the metrics persistent. Meter Analysis Language.  UI  Add browser dashboard. Add browser log query page. Support query trace by tags. Fix JVM configuration. Fix CLR configuration.  Document  Add the document about SW_NO_UPSTREAM_REAL_ADDRESS. Update ALS setup document. Add Customization Config section for plugin development.  All issues and pull requests are here\n","excerpt":"8.2.0 Project  Support Browser monitoring. Add e2e test for ALS solution of service mesh …","ref":"/docs/main/v9.7.0/en/changes/changes-8.2.0/","title":"8.2.0"},{"body":"8.3.0  Project  Test: ElasticSearch version 7.0.0 and 7.9.3 as storage are E2E tested. Test: Bump up testcontainers version to work around the Docker bug on MacOS.  Java Agent  Support propagate the sending timestamp in MQ plugins to calculate the transfer latency in the async MQ scenarios. Support auto-tag with the fixed values propagated in the correlation context. Make HttpClient 3.x, 4.x, and HttpAsyncClient 3.x plugins to support collecting HTTP parameters. Make the Feign plugin to support Java 14 Make the okhttp3 plugin to support Java 14 Polish tracing context related codes. Add the plugin for async-http-client 2.x Fix NPE in the nutz plugin. Provide Apache Commons DBCP 2.x plugin. Add the plugin for mssql-jtds 1.x. Add the plugin for mssql-jdbc 6.x -\u0026gt; 9.x. Fix the default ignore mechanism isn\u0026rsquo;t accurate enough bug. Add the plugin for spring-kafka 1.3.x. Add the plugin for Apache CXF 3.x. Fix okhttp-3.x and async-http-client-2.x did not overwrite the old trace header.  OAP-Backend  Add the @SuperDataset annotation for BrowserErrorLog. Add the thread pool to the Kafka fetcher to increase the performance. Add contain and not contain OPS in OAL. Add Envoy ALS analyzer based on metadata exchange. Add listMetrics GraphQL query. Add group name into services of so11y and istio relevant metrics Support keeping collecting the slowly segments in the sampling mechanism. Support choose files to active the meter analyzer. Support nested class definition in the Service, ServiceInstance, Endpoint, ServiceRelation, and ServiceInstanceRelation sources. Support sideCar.internalErrorCode in the Service, ServiceInstance, Endpoint, ServiceRelation, and ServiceInstanceRelation sources. Improve Kubernetes service registry for ALS analysis. Add health checker for cluster management Support the service auto grouping. Support query service list by the group name. Improve the queryable tags generation. Remove the duplicated tags to reduce the storage payload. Fix the threads of the Kafka fetcher exit if some unexpected exceptions happen. Fix the excessive timeout period set by the kubernetes-client. Fix deadlock problem when using elasticsearch-client-7.0.0. Fix storage-jdbc isExists not set dbname. Fix searchService bug in the InfluxDB storage implementation. Fix CVE in the alarm module, when activating the dynamic configuration feature. Fix CVE in the endpoint grouping, when activating the dynamic configuration feature. Fix CVE in the uninstrumented gateways configs, when activating the dynamic configuration feature. Fix CVE in the Apdex threshold configs, when activating the dynamic configuration feature. Make the codes and doc consistent in sharding server and core server. Fix that chunked string is incorrect while the tag contains colon. Fix the incorrect dynamic configuration key bug of endpoint-name-grouping. Remove unused min date timebucket in jdbc deletehistory logical Fix \u0026ldquo;transaction too large error\u0026rdquo; when use TiDB as storage. Fix \u0026ldquo;index not found\u0026rdquo; in trace query when use ES7 storage. Add otel rules to ui template to observe Istio control plane. Remove istio mixer Support close influxdb batch write model. Check SAN in the ALS (m)TLS process.  UI  Fix incorrect label in radial chart in topology. Replace node-sass with dart-sass. Replace serviceFilter with serviceGroup Removed \u0026ldquo;Les Miserables\u0026rdquo; from radial chart in topology. Add the Promise dropdown option  Documentation  Add VNode FAQ doc. Add logic endpoint section in the agent setup doc. Adjust configuration names and system environment names of the sharing server module Tweak Istio metrics collection doc. Add otel receiver.  All issues and pull requests are here\n","excerpt":"8.3.0  Project  Test: ElasticSearch version 7.0.0 and 7.9.3 as storage are E2E tested. Test: Bump up …","ref":"/docs/main/latest/en/changes/changes-8.3.0/","title":"8.3.0"},{"body":"8.3.0  Project  Test: ElasticSearch version 7.0.0 and 7.9.3 as storage are E2E tested. Test: Bump up testcontainers version to work around the Docker bug on MacOS.  Java Agent  Support propagate the sending timestamp in MQ plugins to calculate the transfer latency in the async MQ scenarios. Support auto-tag with the fixed values propagated in the correlation context. Make HttpClient 3.x, 4.x, and HttpAsyncClient 3.x plugins to support collecting HTTP parameters. Make the Feign plugin to support Java 14 Make the okhttp3 plugin to support Java 14 Polish tracing context related codes. Add the plugin for async-http-client 2.x Fix NPE in the nutz plugin. Provide Apache Commons DBCP 2.x plugin. Add the plugin for mssql-jtds 1.x. Add the plugin for mssql-jdbc 6.x -\u0026gt; 9.x. Fix the default ignore mechanism isn\u0026rsquo;t accurate enough bug. Add the plugin for spring-kafka 1.3.x. Add the plugin for Apache CXF 3.x. Fix okhttp-3.x and async-http-client-2.x did not overwrite the old trace header.  OAP-Backend  Add the @SuperDataset annotation for BrowserErrorLog. Add the thread pool to the Kafka fetcher to increase the performance. Add contain and not contain OPS in OAL. Add Envoy ALS analyzer based on metadata exchange. Add listMetrics GraphQL query. Add group name into services of so11y and istio relevant metrics Support keeping collecting the slowly segments in the sampling mechanism. Support choose files to active the meter analyzer. Support nested class definition in the Service, ServiceInstance, Endpoint, ServiceRelation, and ServiceInstanceRelation sources. Support sideCar.internalErrorCode in the Service, ServiceInstance, Endpoint, ServiceRelation, and ServiceInstanceRelation sources. Improve Kubernetes service registry for ALS analysis. Add health checker for cluster management Support the service auto grouping. Support query service list by the group name. Improve the queryable tags generation. Remove the duplicated tags to reduce the storage payload. Fix the threads of the Kafka fetcher exit if some unexpected exceptions happen. Fix the excessive timeout period set by the kubernetes-client. Fix deadlock problem when using elasticsearch-client-7.0.0. Fix storage-jdbc isExists not set dbname. Fix searchService bug in the InfluxDB storage implementation. Fix CVE in the alarm module, when activating the dynamic configuration feature. Fix CVE in the endpoint grouping, when activating the dynamic configuration feature. Fix CVE in the uninstrumented gateways configs, when activating the dynamic configuration feature. Fix CVE in the Apdex threshold configs, when activating the dynamic configuration feature. Make the codes and doc consistent in sharding server and core server. Fix that chunked string is incorrect while the tag contains colon. Fix the incorrect dynamic configuration key bug of endpoint-name-grouping. Remove unused min date timebucket in jdbc deletehistory logical Fix \u0026ldquo;transaction too large error\u0026rdquo; when use TiDB as storage. Fix \u0026ldquo;index not found\u0026rdquo; in trace query when use ES7 storage. Add otel rules to ui template to observe Istio control plane. Remove istio mixer Support close influxdb batch write model. Check SAN in the ALS (m)TLS process.  UI  Fix incorrect label in radial chart in topology. Replace node-sass with dart-sass. Replace serviceFilter with serviceGroup Removed \u0026ldquo;Les Miserables\u0026rdquo; from radial chart in topology. Add the Promise dropdown option  Documentation  Add VNode FAQ doc. Add logic endpoint section in the agent setup doc. Adjust configuration names and system environment names of the sharing server module Tweak Istio metrics collection doc. Add otel receiver.  All issues and pull requests are here\n","excerpt":"8.3.0  Project  Test: ElasticSearch version 7.0.0 and 7.9.3 as storage are E2E tested. Test: Bump up …","ref":"/docs/main/next/en/changes/changes-8.3.0/","title":"8.3.0"},{"body":"8.3.0  Project  Test: ElasticSearch version 7.0.0 and 7.9.3 as storage are E2E tested. Test: Bump up testcontainers version to work around the Docker bug on MacOS.  Java Agent  Support propagate the sending timestamp in MQ plugins to calculate the transfer latency in the async MQ scenarios. Support auto-tag with the fixed values propagated in the correlation context. Make HttpClient 3.x, 4.x, and HttpAsyncClient 3.x plugins to support collecting HTTP parameters. Make the Feign plugin to support Java 14 Make the okhttp3 plugin to support Java 14 Polish tracing context related codes. Add the plugin for async-http-client 2.x Fix NPE in the nutz plugin. Provide Apache Commons DBCP 2.x plugin. Add the plugin for mssql-jtds 1.x. Add the plugin for mssql-jdbc 6.x -\u0026gt; 9.x. Fix the default ignore mechanism isn\u0026rsquo;t accurate enough bug. Add the plugin for spring-kafka 1.3.x. Add the plugin for Apache CXF 3.x. Fix okhttp-3.x and async-http-client-2.x did not overwrite the old trace header.  OAP-Backend  Add the @SuperDataset annotation for BrowserErrorLog. Add the thread pool to the Kafka fetcher to increase the performance. Add contain and not contain OPS in OAL. Add Envoy ALS analyzer based on metadata exchange. Add listMetrics GraphQL query. Add group name into services of so11y and istio relevant metrics Support keeping collecting the slowly segments in the sampling mechanism. Support choose files to active the meter analyzer. Support nested class definition in the Service, ServiceInstance, Endpoint, ServiceRelation, and ServiceInstanceRelation sources. Support sideCar.internalErrorCode in the Service, ServiceInstance, Endpoint, ServiceRelation, and ServiceInstanceRelation sources. Improve Kubernetes service registry for ALS analysis. Add health checker for cluster management Support the service auto grouping. Support query service list by the group name. Improve the queryable tags generation. Remove the duplicated tags to reduce the storage payload. Fix the threads of the Kafka fetcher exit if some unexpected exceptions happen. Fix the excessive timeout period set by the kubernetes-client. Fix deadlock problem when using elasticsearch-client-7.0.0. Fix storage-jdbc isExists not set dbname. Fix searchService bug in the InfluxDB storage implementation. Fix CVE in the alarm module, when activating the dynamic configuration feature. Fix CVE in the endpoint grouping, when activating the dynamic configuration feature. Fix CVE in the uninstrumented gateways configs, when activating the dynamic configuration feature. Fix CVE in the Apdex threshold configs, when activating the dynamic configuration feature. Make the codes and doc consistent in sharding server and core server. Fix that chunked string is incorrect while the tag contains colon. Fix the incorrect dynamic configuration key bug of endpoint-name-grouping. Remove unused min date timebucket in jdbc deletehistory logical Fix \u0026ldquo;transaction too large error\u0026rdquo; when use TiDB as storage. Fix \u0026ldquo;index not found\u0026rdquo; in trace query when use ES7 storage. Add otel rules to ui template to observe Istio control plane. Remove istio mixer Support close influxdb batch write model. Check SAN in the ALS (m)TLS process.  UI  Fix incorrect label in radial chart in topology. Replace node-sass with dart-sass. Replace serviceFilter with serviceGroup Removed \u0026ldquo;Les Miserables\u0026rdquo; from radial chart in topology. Add the Promise dropdown option  Documentation  Add VNode FAQ doc. Add logic endpoint section in the agent setup doc. Adjust configuration names and system environment names of the sharing server module Tweak Istio metrics collection doc. Add otel receiver.  All issues and pull requests are here\n","excerpt":"8.3.0  Project  Test: ElasticSearch version 7.0.0 and 7.9.3 as storage are E2E tested. Test: Bump up …","ref":"/docs/main/v9.1.0/en/changes/changes-8.3.0/","title":"8.3.0"},{"body":"8.3.0  Project  Test: ElasticSearch version 7.0.0 and 7.9.3 as storage are E2E tested. Test: Bump up testcontainers version to work around the Docker bug on MacOS.  Java Agent  Support propagate the sending timestamp in MQ plugins to calculate the transfer latency in the async MQ scenarios. Support auto-tag with the fixed values propagated in the correlation context. Make HttpClient 3.x, 4.x, and HttpAsyncClient 3.x plugins to support collecting HTTP parameters. Make the Feign plugin to support Java 14 Make the okhttp3 plugin to support Java 14 Polish tracing context related codes. Add the plugin for async-http-client 2.x Fix NPE in the nutz plugin. Provide Apache Commons DBCP 2.x plugin. Add the plugin for mssql-jtds 1.x. Add the plugin for mssql-jdbc 6.x -\u0026gt; 9.x. Fix the default ignore mechanism isn\u0026rsquo;t accurate enough bug. Add the plugin for spring-kafka 1.3.x. Add the plugin for Apache CXF 3.x. Fix okhttp-3.x and async-http-client-2.x did not overwrite the old trace header.  OAP-Backend  Add the @SuperDataset annotation for BrowserErrorLog. Add the thread pool to the Kafka fetcher to increase the performance. Add contain and not contain OPS in OAL. Add Envoy ALS analyzer based on metadata exchange. Add listMetrics GraphQL query. Add group name into services of so11y and istio relevant metrics Support keeping collecting the slowly segments in the sampling mechanism. Support choose files to active the meter analyzer. Support nested class definition in the Service, ServiceInstance, Endpoint, ServiceRelation, and ServiceInstanceRelation sources. Support sideCar.internalErrorCode in the Service, ServiceInstance, Endpoint, ServiceRelation, and ServiceInstanceRelation sources. Improve Kubernetes service registry for ALS analysis. Add health checker for cluster management Support the service auto grouping. Support query service list by the group name. Improve the queryable tags generation. Remove the duplicated tags to reduce the storage payload. Fix the threads of the Kafka fetcher exit if some unexpected exceptions happen. Fix the excessive timeout period set by the kubernetes-client. Fix deadlock problem when using elasticsearch-client-7.0.0. Fix storage-jdbc isExists not set dbname. Fix searchService bug in the InfluxDB storage implementation. Fix CVE in the alarm module, when activating the dynamic configuration feature. Fix CVE in the endpoint grouping, when activating the dynamic configuration feature. Fix CVE in the uninstrumented gateways configs, when activating the dynamic configuration feature. Fix CVE in the Apdex threshold configs, when activating the dynamic configuration feature. Make the codes and doc consistent in sharding server and core server. Fix that chunked string is incorrect while the tag contains colon. Fix the incorrect dynamic configuration key bug of endpoint-name-grouping. Remove unused min date timebucket in jdbc deletehistory logical Fix \u0026ldquo;transaction too large error\u0026rdquo; when use TiDB as storage. Fix \u0026ldquo;index not found\u0026rdquo; in trace query when use ES7 storage. Add otel rules to ui template to observe Istio control plane. Remove istio mixer Support close influxdb batch write model. Check SAN in the ALS (m)TLS process.  UI  Fix incorrect label in radial chart in topology. Replace node-sass with dart-sass. Replace serviceFilter with serviceGroup Removed \u0026ldquo;Les Miserables\u0026rdquo; from radial chart in topology. Add the Promise dropdown option  Documentation  Add VNode FAQ doc. Add logic endpoint section in the agent setup doc. Adjust configuration names and system environment names of the sharing server module Tweak Istio metrics collection doc. Add otel receiver.  All issues and pull requests are here\n","excerpt":"8.3.0  Project  Test: ElasticSearch version 7.0.0 and 7.9.3 as storage are E2E tested. Test: Bump up …","ref":"/docs/main/v9.2.0/en/changes/changes-8.3.0/","title":"8.3.0"},{"body":"8.3.0  Project  Test: ElasticSearch version 7.0.0 and 7.9.3 as storage are E2E tested. Test: Bump up testcontainers version to work around the Docker bug on MacOS.  Java Agent  Support propagate the sending timestamp in MQ plugins to calculate the transfer latency in the async MQ scenarios. Support auto-tag with the fixed values propagated in the correlation context. Make HttpClient 3.x, 4.x, and HttpAsyncClient 3.x plugins to support collecting HTTP parameters. Make the Feign plugin to support Java 14 Make the okhttp3 plugin to support Java 14 Polish tracing context related codes. Add the plugin for async-http-client 2.x Fix NPE in the nutz plugin. Provide Apache Commons DBCP 2.x plugin. Add the plugin for mssql-jtds 1.x. Add the plugin for mssql-jdbc 6.x -\u0026gt; 9.x. Fix the default ignore mechanism isn\u0026rsquo;t accurate enough bug. Add the plugin for spring-kafka 1.3.x. Add the plugin for Apache CXF 3.x. Fix okhttp-3.x and async-http-client-2.x did not overwrite the old trace header.  OAP-Backend  Add the @SuperDataset annotation for BrowserErrorLog. Add the thread pool to the Kafka fetcher to increase the performance. Add contain and not contain OPS in OAL. Add Envoy ALS analyzer based on metadata exchange. Add listMetrics GraphQL query. Add group name into services of so11y and istio relevant metrics Support keeping collecting the slowly segments in the sampling mechanism. Support choose files to active the meter analyzer. Support nested class definition in the Service, ServiceInstance, Endpoint, ServiceRelation, and ServiceInstanceRelation sources. Support sideCar.internalErrorCode in the Service, ServiceInstance, Endpoint, ServiceRelation, and ServiceInstanceRelation sources. Improve Kubernetes service registry for ALS analysis. Add health checker for cluster management Support the service auto grouping. Support query service list by the group name. Improve the queryable tags generation. Remove the duplicated tags to reduce the storage payload. Fix the threads of the Kafka fetcher exit if some unexpected exceptions happen. Fix the excessive timeout period set by the kubernetes-client. Fix deadlock problem when using elasticsearch-client-7.0.0. Fix storage-jdbc isExists not set dbname. Fix searchService bug in the InfluxDB storage implementation. Fix CVE in the alarm module, when activating the dynamic configuration feature. Fix CVE in the endpoint grouping, when activating the dynamic configuration feature. Fix CVE in the uninstrumented gateways configs, when activating the dynamic configuration feature. Fix CVE in the Apdex threshold configs, when activating the dynamic configuration feature. Make the codes and doc consistent in sharding server and core server. Fix that chunked string is incorrect while the tag contains colon. Fix the incorrect dynamic configuration key bug of endpoint-name-grouping. Remove unused min date timebucket in jdbc deletehistory logical Fix \u0026ldquo;transaction too large error\u0026rdquo; when use TiDB as storage. Fix \u0026ldquo;index not found\u0026rdquo; in trace query when use ES7 storage. Add otel rules to ui template to observe Istio control plane. Remove istio mixer Support close influxdb batch write model. Check SAN in the ALS (m)TLS process.  UI  Fix incorrect label in radial chart in topology. Replace node-sass with dart-sass. Replace serviceFilter with serviceGroup Removed \u0026ldquo;Les Miserables\u0026rdquo; from radial chart in topology. Add the Promise dropdown option  Documentation  Add VNode FAQ doc. Add logic endpoint section in the agent setup doc. Adjust configuration names and system environment names of the sharing server module Tweak Istio metrics collection doc. Add otel receiver.  All issues and pull requests are here\n","excerpt":"8.3.0  Project  Test: ElasticSearch version 7.0.0 and 7.9.3 as storage are E2E tested. Test: Bump up …","ref":"/docs/main/v9.3.0/en/changes/changes-8.3.0/","title":"8.3.0"},{"body":"8.3.0  Project  Test: ElasticSearch version 7.0.0 and 7.9.3 as storage are E2E tested. Test: Bump up testcontainers version to work around the Docker bug on MacOS.  Java Agent  Support propagate the sending timestamp in MQ plugins to calculate the transfer latency in the async MQ scenarios. Support auto-tag with the fixed values propagated in the correlation context. Make HttpClient 3.x, 4.x, and HttpAsyncClient 3.x plugins to support collecting HTTP parameters. Make the Feign plugin to support Java 14 Make the okhttp3 plugin to support Java 14 Polish tracing context related codes. Add the plugin for async-http-client 2.x Fix NPE in the nutz plugin. Provide Apache Commons DBCP 2.x plugin. Add the plugin for mssql-jtds 1.x. Add the plugin for mssql-jdbc 6.x -\u0026gt; 9.x. Fix the default ignore mechanism isn\u0026rsquo;t accurate enough bug. Add the plugin for spring-kafka 1.3.x. Add the plugin for Apache CXF 3.x. Fix okhttp-3.x and async-http-client-2.x did not overwrite the old trace header.  OAP-Backend  Add the @SuperDataset annotation for BrowserErrorLog. Add the thread pool to the Kafka fetcher to increase the performance. Add contain and not contain OPS in OAL. Add Envoy ALS analyzer based on metadata exchange. Add listMetrics GraphQL query. Add group name into services of so11y and istio relevant metrics Support keeping collecting the slowly segments in the sampling mechanism. Support choose files to active the meter analyzer. Support nested class definition in the Service, ServiceInstance, Endpoint, ServiceRelation, and ServiceInstanceRelation sources. Support sideCar.internalErrorCode in the Service, ServiceInstance, Endpoint, ServiceRelation, and ServiceInstanceRelation sources. Improve Kubernetes service registry for ALS analysis. Add health checker for cluster management Support the service auto grouping. Support query service list by the group name. Improve the queryable tags generation. Remove the duplicated tags to reduce the storage payload. Fix the threads of the Kafka fetcher exit if some unexpected exceptions happen. Fix the excessive timeout period set by the kubernetes-client. Fix deadlock problem when using elasticsearch-client-7.0.0. Fix storage-jdbc isExists not set dbname. Fix searchService bug in the InfluxDB storage implementation. Fix CVE in the alarm module, when activating the dynamic configuration feature. Fix CVE in the endpoint grouping, when activating the dynamic configuration feature. Fix CVE in the uninstrumented gateways configs, when activating the dynamic configuration feature. Fix CVE in the Apdex threshold configs, when activating the dynamic configuration feature. Make the codes and doc consistent in sharding server and core server. Fix that chunked string is incorrect while the tag contains colon. Fix the incorrect dynamic configuration key bug of endpoint-name-grouping. Remove unused min date timebucket in jdbc deletehistory logical Fix \u0026ldquo;transaction too large error\u0026rdquo; when use TiDB as storage. Fix \u0026ldquo;index not found\u0026rdquo; in trace query when use ES7 storage. Add otel rules to ui template to observe Istio control plane. Remove istio mixer Support close influxdb batch write model. Check SAN in the ALS (m)TLS process.  UI  Fix incorrect label in radial chart in topology. Replace node-sass with dart-sass. Replace serviceFilter with serviceGroup Removed \u0026ldquo;Les Miserables\u0026rdquo; from radial chart in topology. Add the Promise dropdown option  Documentation  Add VNode FAQ doc. Add logic endpoint section in the agent setup doc. Adjust configuration names and system environment names of the sharing server module Tweak Istio metrics collection doc. Add otel receiver.  All issues and pull requests are here\n","excerpt":"8.3.0  Project  Test: ElasticSearch version 7.0.0 and 7.9.3 as storage are E2E tested. Test: Bump up …","ref":"/docs/main/v9.4.0/en/changes/changes-8.3.0/","title":"8.3.0"},{"body":"8.3.0  Project  Test: ElasticSearch version 7.0.0 and 7.9.3 as storage are E2E tested. Test: Bump up testcontainers version to work around the Docker bug on MacOS.  Java Agent  Support propagate the sending timestamp in MQ plugins to calculate the transfer latency in the async MQ scenarios. Support auto-tag with the fixed values propagated in the correlation context. Make HttpClient 3.x, 4.x, and HttpAsyncClient 3.x plugins to support collecting HTTP parameters. Make the Feign plugin to support Java 14 Make the okhttp3 plugin to support Java 14 Polish tracing context related codes. Add the plugin for async-http-client 2.x Fix NPE in the nutz plugin. Provide Apache Commons DBCP 2.x plugin. Add the plugin for mssql-jtds 1.x. Add the plugin for mssql-jdbc 6.x -\u0026gt; 9.x. Fix the default ignore mechanism isn\u0026rsquo;t accurate enough bug. Add the plugin for spring-kafka 1.3.x. Add the plugin for Apache CXF 3.x. Fix okhttp-3.x and async-http-client-2.x did not overwrite the old trace header.  OAP-Backend  Add the @SuperDataset annotation for BrowserErrorLog. Add the thread pool to the Kafka fetcher to increase the performance. Add contain and not contain OPS in OAL. Add Envoy ALS analyzer based on metadata exchange. Add listMetrics GraphQL query. Add group name into services of so11y and istio relevant metrics Support keeping collecting the slowly segments in the sampling mechanism. Support choose files to active the meter analyzer. Support nested class definition in the Service, ServiceInstance, Endpoint, ServiceRelation, and ServiceInstanceRelation sources. Support sideCar.internalErrorCode in the Service, ServiceInstance, Endpoint, ServiceRelation, and ServiceInstanceRelation sources. Improve Kubernetes service registry for ALS analysis. Add health checker for cluster management Support the service auto grouping. Support query service list by the group name. Improve the queryable tags generation. Remove the duplicated tags to reduce the storage payload. Fix the threads of the Kafka fetcher exit if some unexpected exceptions happen. Fix the excessive timeout period set by the kubernetes-client. Fix deadlock problem when using elasticsearch-client-7.0.0. Fix storage-jdbc isExists not set dbname. Fix searchService bug in the InfluxDB storage implementation. Fix CVE in the alarm module, when activating the dynamic configuration feature. Fix CVE in the endpoint grouping, when activating the dynamic configuration feature. Fix CVE in the uninstrumented gateways configs, when activating the dynamic configuration feature. Fix CVE in the Apdex threshold configs, when activating the dynamic configuration feature. Make the codes and doc consistent in sharding server and core server. Fix that chunked string is incorrect while the tag contains colon. Fix the incorrect dynamic configuration key bug of endpoint-name-grouping. Remove unused min date timebucket in jdbc deletehistory logical Fix \u0026ldquo;transaction too large error\u0026rdquo; when use TiDB as storage. Fix \u0026ldquo;index not found\u0026rdquo; in trace query when use ES7 storage. Add otel rules to ui template to observe Istio control plane. Remove istio mixer Support close influxdb batch write model. Check SAN in the ALS (m)TLS process.  UI  Fix incorrect label in radial chart in topology. Replace node-sass with dart-sass. Replace serviceFilter with serviceGroup Removed \u0026ldquo;Les Miserables\u0026rdquo; from radial chart in topology. Add the Promise dropdown option  Documentation  Add VNode FAQ doc. Add logic endpoint section in the agent setup doc. Adjust configuration names and system environment names of the sharing server module Tweak Istio metrics collection doc. Add otel receiver.  All issues and pull requests are here\n","excerpt":"8.3.0  Project  Test: ElasticSearch version 7.0.0 and 7.9.3 as storage are E2E tested. Test: Bump up …","ref":"/docs/main/v9.5.0/en/changes/changes-8.3.0/","title":"8.3.0"},{"body":"8.3.0  Project  Test: ElasticSearch version 7.0.0 and 7.9.3 as storage are E2E tested. Test: Bump up testcontainers version to work around the Docker bug on MacOS.  Java Agent  Support propagate the sending timestamp in MQ plugins to calculate the transfer latency in the async MQ scenarios. Support auto-tag with the fixed values propagated in the correlation context. Make HttpClient 3.x, 4.x, and HttpAsyncClient 3.x plugins to support collecting HTTP parameters. Make the Feign plugin to support Java 14 Make the okhttp3 plugin to support Java 14 Polish tracing context related codes. Add the plugin for async-http-client 2.x Fix NPE in the nutz plugin. Provide Apache Commons DBCP 2.x plugin. Add the plugin for mssql-jtds 1.x. Add the plugin for mssql-jdbc 6.x -\u0026gt; 9.x. Fix the default ignore mechanism isn\u0026rsquo;t accurate enough bug. Add the plugin for spring-kafka 1.3.x. Add the plugin for Apache CXF 3.x. Fix okhttp-3.x and async-http-client-2.x did not overwrite the old trace header.  OAP-Backend  Add the @SuperDataset annotation for BrowserErrorLog. Add the thread pool to the Kafka fetcher to increase the performance. Add contain and not contain OPS in OAL. Add Envoy ALS analyzer based on metadata exchange. Add listMetrics GraphQL query. Add group name into services of so11y and istio relevant metrics Support keeping collecting the slowly segments in the sampling mechanism. Support choose files to active the meter analyzer. Support nested class definition in the Service, ServiceInstance, Endpoint, ServiceRelation, and ServiceInstanceRelation sources. Support sideCar.internalErrorCode in the Service, ServiceInstance, Endpoint, ServiceRelation, and ServiceInstanceRelation sources. Improve Kubernetes service registry for ALS analysis. Add health checker for cluster management Support the service auto grouping. Support query service list by the group name. Improve the queryable tags generation. Remove the duplicated tags to reduce the storage payload. Fix the threads of the Kafka fetcher exit if some unexpected exceptions happen. Fix the excessive timeout period set by the kubernetes-client. Fix deadlock problem when using elasticsearch-client-7.0.0. Fix storage-jdbc isExists not set dbname. Fix searchService bug in the InfluxDB storage implementation. Fix CVE in the alarm module, when activating the dynamic configuration feature. Fix CVE in the endpoint grouping, when activating the dynamic configuration feature. Fix CVE in the uninstrumented gateways configs, when activating the dynamic configuration feature. Fix CVE in the Apdex threshold configs, when activating the dynamic configuration feature. Make the codes and doc consistent in sharding server and core server. Fix that chunked string is incorrect while the tag contains colon. Fix the incorrect dynamic configuration key bug of endpoint-name-grouping. Remove unused min date timebucket in jdbc deletehistory logical Fix \u0026ldquo;transaction too large error\u0026rdquo; when use TiDB as storage. Fix \u0026ldquo;index not found\u0026rdquo; in trace query when use ES7 storage. Add otel rules to ui template to observe Istio control plane. Remove istio mixer Support close influxdb batch write model. Check SAN in the ALS (m)TLS process.  UI  Fix incorrect label in radial chart in topology. Replace node-sass with dart-sass. Replace serviceFilter with serviceGroup Removed \u0026ldquo;Les Miserables\u0026rdquo; from radial chart in topology. Add the Promise dropdown option  Documentation  Add VNode FAQ doc. Add logic endpoint section in the agent setup doc. Adjust configuration names and system environment names of the sharing server module Tweak Istio metrics collection doc. Add otel receiver.  All issues and pull requests are here\n","excerpt":"8.3.0  Project  Test: ElasticSearch version 7.0.0 and 7.9.3 as storage are E2E tested. Test: Bump up …","ref":"/docs/main/v9.6.0/en/changes/changes-8.3.0/","title":"8.3.0"},{"body":"8.3.0  Project  Test: ElasticSearch version 7.0.0 and 7.9.3 as storage are E2E tested. Test: Bump up testcontainers version to work around the Docker bug on MacOS.  Java Agent  Support propagate the sending timestamp in MQ plugins to calculate the transfer latency in the async MQ scenarios. Support auto-tag with the fixed values propagated in the correlation context. Make HttpClient 3.x, 4.x, and HttpAsyncClient 3.x plugins to support collecting HTTP parameters. Make the Feign plugin to support Java 14 Make the okhttp3 plugin to support Java 14 Polish tracing context related codes. Add the plugin for async-http-client 2.x Fix NPE in the nutz plugin. Provide Apache Commons DBCP 2.x plugin. Add the plugin for mssql-jtds 1.x. Add the plugin for mssql-jdbc 6.x -\u0026gt; 9.x. Fix the default ignore mechanism isn\u0026rsquo;t accurate enough bug. Add the plugin for spring-kafka 1.3.x. Add the plugin for Apache CXF 3.x. Fix okhttp-3.x and async-http-client-2.x did not overwrite the old trace header.  OAP-Backend  Add the @SuperDataset annotation for BrowserErrorLog. Add the thread pool to the Kafka fetcher to increase the performance. Add contain and not contain OPS in OAL. Add Envoy ALS analyzer based on metadata exchange. Add listMetrics GraphQL query. Add group name into services of so11y and istio relevant metrics Support keeping collecting the slowly segments in the sampling mechanism. Support choose files to active the meter analyzer. Support nested class definition in the Service, ServiceInstance, Endpoint, ServiceRelation, and ServiceInstanceRelation sources. Support sideCar.internalErrorCode in the Service, ServiceInstance, Endpoint, ServiceRelation, and ServiceInstanceRelation sources. Improve Kubernetes service registry for ALS analysis. Add health checker for cluster management Support the service auto grouping. Support query service list by the group name. Improve the queryable tags generation. Remove the duplicated tags to reduce the storage payload. Fix the threads of the Kafka fetcher exit if some unexpected exceptions happen. Fix the excessive timeout period set by the kubernetes-client. Fix deadlock problem when using elasticsearch-client-7.0.0. Fix storage-jdbc isExists not set dbname. Fix searchService bug in the InfluxDB storage implementation. Fix CVE in the alarm module, when activating the dynamic configuration feature. Fix CVE in the endpoint grouping, when activating the dynamic configuration feature. Fix CVE in the uninstrumented gateways configs, when activating the dynamic configuration feature. Fix CVE in the Apdex threshold configs, when activating the dynamic configuration feature. Make the codes and doc consistent in sharding server and core server. Fix that chunked string is incorrect while the tag contains colon. Fix the incorrect dynamic configuration key bug of endpoint-name-grouping. Remove unused min date timebucket in jdbc deletehistory logical Fix \u0026ldquo;transaction too large error\u0026rdquo; when use TiDB as storage. Fix \u0026ldquo;index not found\u0026rdquo; in trace query when use ES7 storage. Add otel rules to ui template to observe Istio control plane. Remove istio mixer Support close influxdb batch write model. Check SAN in the ALS (m)TLS process.  UI  Fix incorrect label in radial chart in topology. Replace node-sass with dart-sass. Replace serviceFilter with serviceGroup Removed \u0026ldquo;Les Miserables\u0026rdquo; from radial chart in topology. Add the Promise dropdown option  Documentation  Add VNode FAQ doc. Add logic endpoint section in the agent setup doc. Adjust configuration names and system environment names of the sharing server module Tweak Istio metrics collection doc. Add otel receiver.  All issues and pull requests are here\n","excerpt":"8.3.0  Project  Test: ElasticSearch version 7.0.0 and 7.9.3 as storage are E2E tested. Test: Bump up …","ref":"/docs/main/v9.7.0/en/changes/changes-8.3.0/","title":"8.3.0"},{"body":"8.4.0 Project  Incompatible with previous releases when use H2/MySQL/TiDB storage options, due to support multiple alarm rules triggered for one entity. Chore: adapt create_source_release.sh to make it runnable on Linux. Add package to .proto files, prevent polluting top-level namespace in some languages; The OAP server supports previous agent releases, whereas the previous OAP server (\u0026lt;=8.3.0) won\u0026rsquo;t recognize newer agents since this version (\u0026gt;= 8.4.0). Add ElasticSearch 7.10 to test matrix and verify it works. Replace Apache RAT with skywalking-eyes to check license headers. Set up test of Envoy ALS / MetricsService under Istio 1.8.2 to verify Envoy V3 protocol Test: fix flaky E2E test of Kafka.  Java Agent  The operation name of quartz-scheduler plugin, has been changed as the quartz-scheduler/${className} format. Fix jdk-http and okhttp-3.x plugin did not overwrite the old trace header. Add interceptors of method(analyze, searchScroll, clearScroll, searchTemplate and deleteByQuery) for elasticsearch-6.x-plugin. Fix the unexpected RunningContext recreation in the Tomcat plugin. Fix the potential NPE when trace_sql_parameters is enabled. Update byte-buddy to 1.10.19. Fix thrift plugin trace link broken when intermediate service does not mount agent Fix thrift plugin collects wrong args when the method without parameter. Fix DataCarrier\u0026rsquo;s org.apache.skywalking.apm.commons.datacarrier.buffer.Buffer implementation isn\u0026rsquo;t activated in IF_POSSIBLE mode. Fix ArrayBlockingQueueBuffer\u0026rsquo;s useless IF_POSSIBLE mode list Support building gRPC TLS channel but CA file is not required. Add witness method mechanism in the agent plugin core. Add Dolphinscheduler plugin definition. Make sampling still works when the trace ignores plug-in activation. Fix mssql-plugin occur ClassCastException when call the method of return generate key. The operation name of dubbo and dubbo-2.7.x-plugin, has been changed as the groupValue/className.methodName format Fix bug that rocketmq-plugin set the wrong tag. Fix duplicated EnhancedInstance interface added. Fix thread leaks caused by the elasticsearch-6.x-plugin plugin. Support reading segmentId and spanId with toolkit. Fix RestTemplate plugin recording url tag with wrong port Support collecting logs and forwarding through gRPC. Support config agent.sample_n_per_3_secs can be changed in the runtime. Support config agent.ignore_suffix can be changed in the runtime. Support DNS periodic resolving mechanism to update backend service. Support config agent.trace.ignore_path can be changed in the runtime. Added support for transmitting logback 1.x and log4j 2.x formatted \u0026amp; un-formatted messages via gPRC  OAP-Backend  Make meter receiver support MAL. Support influxDB connection response format option. Fix some error when use JSON as influxDB response format. Support Kafka MirrorMaker 2.0 to replicate topics between Kafka clusters. Add the rule name field to alarm record storage entity as a part of ID, to support multiple alarm rules triggered for one entity. The scope id has been removed from the ID. Fix MAL concurrent execution issues. Fix group name can\u0026rsquo;t be queried in the GraphQL. Fix potential gRPC connection leak(not closed) for the channels among OAP instances. Filter OAP instances(unassigned in booting stage) of the empty IP in KubernetesCoordinator. Add component ID for Python aiohttp plugin requester and server. Fix H2 in-memory database table missing issues Add component ID for Python pyramid plugin server. Add component ID for NodeJS Axios plugin. Fix searchService method error in storage-influxdb-plugin. Add JavaScript component ID. Fix CVE of UninstrumentedGateways in Dynamic Configuration activation. Improve query performance in storage-influxdb-plugin. Fix the uuid field in GRPCConfigWatcherRegister is not updated. Support Envoy {AccessLog,Metrics}Service API V3. Adopt the MAL in Envoy metrics service analyzer. Fix the priority setting doesn\u0026rsquo;t work of the ALS analyzers. Fix bug that endpoint-name-grouping.yml is not customizable in Dockerized case. Fix bug that istio version metric type on UI template mismatches the otel rule. Improve ReadWriteSafeCache concurrency read-write performance Fix bug that if use JSON as InfluxDB.ResponseFormat then NumberFormatException maybe occur. Fix timeBucket not taking effect in EqualsAndHashCode annotation of some relationship metrics. Fix SharingServerConfig\u0026rsquo;s propertie is not correct in the application.yml, contextPath -\u0026gt; restConnextPath. Istio control plane: remove redundant metrics and polish panel layout. Fix bug endpoint name grouping not work due to setting service name and endpoint name out of order. Fix receiver analysis error count metrics. Log collecting and query implementation. Support Alarm to feishu. Add the implementation of ConfigurationDiscovery on the OAP side. Fix bug in parseInternalErrorCode where some error codes are never reached. OAL supports multiple values when as numeric. Add node information from the Openensus proto to the labels of the samples, to support the identification of the source of the Metric data. Fix bug that the same sample name in one MAL expression caused IllegalArgumentException in Analyzer.analyse. Add the text analyzer for querying log in the es storage. Chore: Remove duplicate codes in Envoy ALS handler. Remove the strict rule of OAL disable statement parameter. Fix a legal metric query adoption bug. Don\u0026rsquo;t support global level metric query. Add VM MAL and ui-template configration, support Prometheus node-exporter VM metrics that pushed from OpenTelemetry-collector. Remove unused log query parameters.  UI  Fix un-removed tags in trace query. Fix unexpected metrics name on single value component. Don\u0026rsquo;t allow negative value as the refresh period. Fix style issue in trace table view. Separation Log and Dashboard selector data to avoid conflicts. Fix trace instance selector bug. Fix Unnecessary sidebar in tooltips for charts. Refactor dashboard query in a common script. Implement refreshing data for topology by updating date. Implement group selector in the topology. Fix all as default parameter for services selector. Add icon for Python aiohttp plugin. Add icon for Python pyramid plugin. Fix topology render all services nodes when groups changed. Fix rk-footer utc input\u0026rsquo;s width. Update rk-icon and rewrite rk-header svg tags with rk-icon. Add icon for http type. Fix rk-footer utc without local storage. Sort group names in the topology. Add logo for Dolphinscheduler. Fix dashboard wrong instance. Add a legend for the topology. Update the condition of unhealthy cube. Fix: use icons to replace buttons for task list in profile. Fix: support = in the tag value in the trace query page. Add envoy proxy component logo. Chore: set up license-eye to check license headers and add missing license headers. Fix prop for instances-survey and endpoints-survey. Fix envoy icon in topology. Implement the service logs on UI. Change the flask icon to light version for a better view of topology dark theme. Implement viewing logs on trace page. Fix update props of date component. Fix query conditions for logs. Fix style of selectors to word wrap. Fix logs time. Fix search ui for logs.  Documentation  Update the documents of backend fetcher and self observability about the latest configurations. Add documents about the group name of service. Update docs about the latest UI. Update the document of backend trace sampling with the latest configuration. Update kafka plugin support version to 2.6.1. Add FAQ about Fix compiling on Mac M1 chip.  All issues and pull requests are here\n","excerpt":"8.4.0 Project  Incompatible with previous releases when use H2/MySQL/TiDB storage options, due to …","ref":"/docs/main/latest/en/changes/changes-8.4.0/","title":"8.4.0"},{"body":"8.4.0 Project  Incompatible with previous releases when use H2/MySQL/TiDB storage options, due to support multiple alarm rules triggered for one entity. Chore: adapt create_source_release.sh to make it runnable on Linux. Add package to .proto files, prevent polluting top-level namespace in some languages; The OAP server supports previous agent releases, whereas the previous OAP server (\u0026lt;=8.3.0) won\u0026rsquo;t recognize newer agents since this version (\u0026gt;= 8.4.0). Add ElasticSearch 7.10 to test matrix and verify it works. Replace Apache RAT with skywalking-eyes to check license headers. Set up test of Envoy ALS / MetricsService under Istio 1.8.2 to verify Envoy V3 protocol Test: fix flaky E2E test of Kafka.  Java Agent  The operation name of quartz-scheduler plugin, has been changed as the quartz-scheduler/${className} format. Fix jdk-http and okhttp-3.x plugin did not overwrite the old trace header. Add interceptors of method(analyze, searchScroll, clearScroll, searchTemplate and deleteByQuery) for elasticsearch-6.x-plugin. Fix the unexpected RunningContext recreation in the Tomcat plugin. Fix the potential NPE when trace_sql_parameters is enabled. Update byte-buddy to 1.10.19. Fix thrift plugin trace link broken when intermediate service does not mount agent Fix thrift plugin collects wrong args when the method without parameter. Fix DataCarrier\u0026rsquo;s org.apache.skywalking.apm.commons.datacarrier.buffer.Buffer implementation isn\u0026rsquo;t activated in IF_POSSIBLE mode. Fix ArrayBlockingQueueBuffer\u0026rsquo;s useless IF_POSSIBLE mode list Support building gRPC TLS channel but CA file is not required. Add witness method mechanism in the agent plugin core. Add Dolphinscheduler plugin definition. Make sampling still works when the trace ignores plug-in activation. Fix mssql-plugin occur ClassCastException when call the method of return generate key. The operation name of dubbo and dubbo-2.7.x-plugin, has been changed as the groupValue/className.methodName format Fix bug that rocketmq-plugin set the wrong tag. Fix duplicated EnhancedInstance interface added. Fix thread leaks caused by the elasticsearch-6.x-plugin plugin. Support reading segmentId and spanId with toolkit. Fix RestTemplate plugin recording url tag with wrong port Support collecting logs and forwarding through gRPC. Support config agent.sample_n_per_3_secs can be changed in the runtime. Support config agent.ignore_suffix can be changed in the runtime. Support DNS periodic resolving mechanism to update backend service. Support config agent.trace.ignore_path can be changed in the runtime. Added support for transmitting logback 1.x and log4j 2.x formatted \u0026amp; un-formatted messages via gPRC  OAP-Backend  Make meter receiver support MAL. Support influxDB connection response format option. Fix some error when use JSON as influxDB response format. Support Kafka MirrorMaker 2.0 to replicate topics between Kafka clusters. Add the rule name field to alarm record storage entity as a part of ID, to support multiple alarm rules triggered for one entity. The scope id has been removed from the ID. Fix MAL concurrent execution issues. Fix group name can\u0026rsquo;t be queried in the GraphQL. Fix potential gRPC connection leak(not closed) for the channels among OAP instances. Filter OAP instances(unassigned in booting stage) of the empty IP in KubernetesCoordinator. Add component ID for Python aiohttp plugin requester and server. Fix H2 in-memory database table missing issues Add component ID for Python pyramid plugin server. Add component ID for NodeJS Axios plugin. Fix searchService method error in storage-influxdb-plugin. Add JavaScript component ID. Fix CVE of UninstrumentedGateways in Dynamic Configuration activation. Improve query performance in storage-influxdb-plugin. Fix the uuid field in GRPCConfigWatcherRegister is not updated. Support Envoy {AccessLog,Metrics}Service API V3. Adopt the MAL in Envoy metrics service analyzer. Fix the priority setting doesn\u0026rsquo;t work of the ALS analyzers. Fix bug that endpoint-name-grouping.yml is not customizable in Dockerized case. Fix bug that istio version metric type on UI template mismatches the otel rule. Improve ReadWriteSafeCache concurrency read-write performance Fix bug that if use JSON as InfluxDB.ResponseFormat then NumberFormatException maybe occur. Fix timeBucket not taking effect in EqualsAndHashCode annotation of some relationship metrics. Fix SharingServerConfig\u0026rsquo;s propertie is not correct in the application.yml, contextPath -\u0026gt; restConnextPath. Istio control plane: remove redundant metrics and polish panel layout. Fix bug endpoint name grouping not work due to setting service name and endpoint name out of order. Fix receiver analysis error count metrics. Log collecting and query implementation. Support Alarm to feishu. Add the implementation of ConfigurationDiscovery on the OAP side. Fix bug in parseInternalErrorCode where some error codes are never reached. OAL supports multiple values when as numeric. Add node information from the Openensus proto to the labels of the samples, to support the identification of the source of the Metric data. Fix bug that the same sample name in one MAL expression caused IllegalArgumentException in Analyzer.analyse. Add the text analyzer for querying log in the es storage. Chore: Remove duplicate codes in Envoy ALS handler. Remove the strict rule of OAL disable statement parameter. Fix a legal metric query adoption bug. Don\u0026rsquo;t support global level metric query. Add VM MAL and ui-template configration, support Prometheus node-exporter VM metrics that pushed from OpenTelemetry-collector. Remove unused log query parameters.  UI  Fix un-removed tags in trace query. Fix unexpected metrics name on single value component. Don\u0026rsquo;t allow negative value as the refresh period. Fix style issue in trace table view. Separation Log and Dashboard selector data to avoid conflicts. Fix trace instance selector bug. Fix Unnecessary sidebar in tooltips for charts. Refactor dashboard query in a common script. Implement refreshing data for topology by updating date. Implement group selector in the topology. Fix all as default parameter for services selector. Add icon for Python aiohttp plugin. Add icon for Python pyramid plugin. Fix topology render all services nodes when groups changed. Fix rk-footer utc input\u0026rsquo;s width. Update rk-icon and rewrite rk-header svg tags with rk-icon. Add icon for http type. Fix rk-footer utc without local storage. Sort group names in the topology. Add logo for Dolphinscheduler. Fix dashboard wrong instance. Add a legend for the topology. Update the condition of unhealthy cube. Fix: use icons to replace buttons for task list in profile. Fix: support = in the tag value in the trace query page. Add envoy proxy component logo. Chore: set up license-eye to check license headers and add missing license headers. Fix prop for instances-survey and endpoints-survey. Fix envoy icon in topology. Implement the service logs on UI. Change the flask icon to light version for a better view of topology dark theme. Implement viewing logs on trace page. Fix update props of date component. Fix query conditions for logs. Fix style of selectors to word wrap. Fix logs time. Fix search ui for logs.  Documentation  Update the documents of backend fetcher and self observability about the latest configurations. Add documents about the group name of service. Update docs about the latest UI. Update the document of backend trace sampling with the latest configuration. Update kafka plugin support version to 2.6.1. Add FAQ about Fix compiling on Mac M1 chip.  All issues and pull requests are here\n","excerpt":"8.4.0 Project  Incompatible with previous releases when use H2/MySQL/TiDB storage options, due to …","ref":"/docs/main/next/en/changes/changes-8.4.0/","title":"8.4.0"},{"body":"8.4.0 Project  Incompatible with previous releases when use H2/MySQL/TiDB storage options, due to support multiple alarm rules triggered for one entity. Chore: adapt create_source_release.sh to make it runnable on Linux. Add package to .proto files, prevent polluting top-level namespace in some languages; The OAP server supports previous agent releases, whereas the previous OAP server (\u0026lt;=8.3.0) won\u0026rsquo;t recognize newer agents since this version (\u0026gt;= 8.4.0). Add ElasticSearch 7.10 to test matrix and verify it works. Replace Apache RAT with skywalking-eyes to check license headers. Set up test of Envoy ALS / MetricsService under Istio 1.8.2 to verify Envoy V3 protocol Test: fix flaky E2E test of Kafka.  Java Agent  The operation name of quartz-scheduler plugin, has been changed as the quartz-scheduler/${className} format. Fix jdk-http and okhttp-3.x plugin did not overwrite the old trace header. Add interceptors of method(analyze, searchScroll, clearScroll, searchTemplate and deleteByQuery) for elasticsearch-6.x-plugin. Fix the unexpected RunningContext recreation in the Tomcat plugin. Fix the potential NPE when trace_sql_parameters is enabled. Update byte-buddy to 1.10.19. Fix thrift plugin trace link broken when intermediate service does not mount agent Fix thrift plugin collects wrong args when the method without parameter. Fix DataCarrier\u0026rsquo;s org.apache.skywalking.apm.commons.datacarrier.buffer.Buffer implementation isn\u0026rsquo;t activated in IF_POSSIBLE mode. Fix ArrayBlockingQueueBuffer\u0026rsquo;s useless IF_POSSIBLE mode list Support building gRPC TLS channel but CA file is not required. Add witness method mechanism in the agent plugin core. Add Dolphinscheduler plugin definition. Make sampling still works when the trace ignores plug-in activation. Fix mssql-plugin occur ClassCastException when call the method of return generate key. The operation name of dubbo and dubbo-2.7.x-plugin, has been changed as the groupValue/className.methodName format Fix bug that rocketmq-plugin set the wrong tag. Fix duplicated EnhancedInstance interface added. Fix thread leaks caused by the elasticsearch-6.x-plugin plugin. Support reading segmentId and spanId with toolkit. Fix RestTemplate plugin recording url tag with wrong port Support collecting logs and forwarding through gRPC. Support config agent.sample_n_per_3_secs can be changed in the runtime. Support config agent.ignore_suffix can be changed in the runtime. Support DNS periodic resolving mechanism to update backend service. Support config agent.trace.ignore_path can be changed in the runtime. Added support for transmitting logback 1.x and log4j 2.x formatted \u0026amp; un-formatted messages via gPRC  OAP-Backend  Make meter receiver support MAL. Support influxDB connection response format option. Fix some error when use JSON as influxDB response format. Support Kafka MirrorMaker 2.0 to replicate topics between Kafka clusters. Add the rule name field to alarm record storage entity as a part of ID, to support multiple alarm rules triggered for one entity. The scope id has been removed from the ID. Fix MAL concurrent execution issues. Fix group name can\u0026rsquo;t be queried in the GraphQL. Fix potential gRPC connection leak(not closed) for the channels among OAP instances. Filter OAP instances(unassigned in booting stage) of the empty IP in KubernetesCoordinator. Add component ID for Python aiohttp plugin requester and server. Fix H2 in-memory database table missing issues Add component ID for Python pyramid plugin server. Add component ID for NodeJS Axios plugin. Fix searchService method error in storage-influxdb-plugin. Add JavaScript component ID. Fix CVE of UninstrumentedGateways in Dynamic Configuration activation. Improve query performance in storage-influxdb-plugin. Fix the uuid field in GRPCConfigWatcherRegister is not updated. Support Envoy {AccessLog,Metrics}Service API V3. Adopt the MAL in Envoy metrics service analyzer. Fix the priority setting doesn\u0026rsquo;t work of the ALS analyzers. Fix bug that endpoint-name-grouping.yml is not customizable in Dockerized case. Fix bug that istio version metric type on UI template mismatches the otel rule. Improve ReadWriteSafeCache concurrency read-write performance Fix bug that if use JSON as InfluxDB.ResponseFormat then NumberFormatException maybe occur. Fix timeBucket not taking effect in EqualsAndHashCode annotation of some relationship metrics. Fix SharingServerConfig\u0026rsquo;s propertie is not correct in the application.yml, contextPath -\u0026gt; restConnextPath. Istio control plane: remove redundant metrics and polish panel layout. Fix bug endpoint name grouping not work due to setting service name and endpoint name out of order. Fix receiver analysis error count metrics. Log collecting and query implementation. Support Alarm to feishu. Add the implementation of ConfigurationDiscovery on the OAP side. Fix bug in parseInternalErrorCode where some error codes are never reached. OAL supports multiple values when as numeric. Add node information from the Openensus proto to the labels of the samples, to support the identification of the source of the Metric data. Fix bug that the same sample name in one MAL expression caused IllegalArgumentException in Analyzer.analyse. Add the text analyzer for querying log in the es storage. Chore: Remove duplicate codes in Envoy ALS handler. Remove the strict rule of OAL disable statement parameter. Fix a legal metric query adoption bug. Don\u0026rsquo;t support global level metric query. Add VM MAL and ui-template configration, support Prometheus node-exporter VM metrics that pushed from OpenTelemetry-collector. Remove unused log query parameters.  UI  Fix un-removed tags in trace query. Fix unexpected metrics name on single value component. Don\u0026rsquo;t allow negative value as the refresh period. Fix style issue in trace table view. Separation Log and Dashboard selector data to avoid conflicts. Fix trace instance selector bug. Fix Unnecessary sidebar in tooltips for charts. Refactor dashboard query in a common script. Implement refreshing data for topology by updating date. Implement group selector in the topology. Fix all as default parameter for services selector. Add icon for Python aiohttp plugin. Add icon for Python pyramid plugin. Fix topology render all services nodes when groups changed. Fix rk-footer utc input\u0026rsquo;s width. Update rk-icon and rewrite rk-header svg tags with rk-icon. Add icon for http type. Fix rk-footer utc without local storage. Sort group names in the topology. Add logo for Dolphinscheduler. Fix dashboard wrong instance. Add a legend for the topology. Update the condition of unhealthy cube. Fix: use icons to replace buttons for task list in profile. Fix: support = in the tag value in the trace query page. Add envoy proxy component logo. Chore: set up license-eye to check license headers and add missing license headers. Fix prop for instances-survey and endpoints-survey. Fix envoy icon in topology. Implement the service logs on UI. Change the flask icon to light version for a better view of topology dark theme. Implement viewing logs on trace page. Fix update props of date component. Fix query conditions for logs. Fix style of selectors to word wrap. Fix logs time. Fix search ui for logs.  Documentation  Update the documents of backend fetcher and self observability about the latest configurations. Add documents about the group name of service. Update docs about the latest UI. Update the document of backend trace sampling with the latest configuration. Update kafka plugin support version to 2.6.1. Add FAQ about Fix compiling on Mac M1 chip.  All issues and pull requests are here\n","excerpt":"8.4.0 Project  Incompatible with previous releases when use H2/MySQL/TiDB storage options, due to …","ref":"/docs/main/v9.1.0/en/changes/changes-8.4.0/","title":"8.4.0"},{"body":"8.4.0 Project  Incompatible with previous releases when use H2/MySQL/TiDB storage options, due to support multiple alarm rules triggered for one entity. Chore: adapt create_source_release.sh to make it runnable on Linux. Add package to .proto files, prevent polluting top-level namespace in some languages; The OAP server supports previous agent releases, whereas the previous OAP server (\u0026lt;=8.3.0) won\u0026rsquo;t recognize newer agents since this version (\u0026gt;= 8.4.0). Add ElasticSearch 7.10 to test matrix and verify it works. Replace Apache RAT with skywalking-eyes to check license headers. Set up test of Envoy ALS / MetricsService under Istio 1.8.2 to verify Envoy V3 protocol Test: fix flaky E2E test of Kafka.  Java Agent  The operation name of quartz-scheduler plugin, has been changed as the quartz-scheduler/${className} format. Fix jdk-http and okhttp-3.x plugin did not overwrite the old trace header. Add interceptors of method(analyze, searchScroll, clearScroll, searchTemplate and deleteByQuery) for elasticsearch-6.x-plugin. Fix the unexpected RunningContext recreation in the Tomcat plugin. Fix the potential NPE when trace_sql_parameters is enabled. Update byte-buddy to 1.10.19. Fix thrift plugin trace link broken when intermediate service does not mount agent Fix thrift plugin collects wrong args when the method without parameter. Fix DataCarrier\u0026rsquo;s org.apache.skywalking.apm.commons.datacarrier.buffer.Buffer implementation isn\u0026rsquo;t activated in IF_POSSIBLE mode. Fix ArrayBlockingQueueBuffer\u0026rsquo;s useless IF_POSSIBLE mode list Support building gRPC TLS channel but CA file is not required. Add witness method mechanism in the agent plugin core. Add Dolphinscheduler plugin definition. Make sampling still works when the trace ignores plug-in activation. Fix mssql-plugin occur ClassCastException when call the method of return generate key. The operation name of dubbo and dubbo-2.7.x-plugin, has been changed as the groupValue/className.methodName format Fix bug that rocketmq-plugin set the wrong tag. Fix duplicated EnhancedInstance interface added. Fix thread leaks caused by the elasticsearch-6.x-plugin plugin. Support reading segmentId and spanId with toolkit. Fix RestTemplate plugin recording url tag with wrong port Support collecting logs and forwarding through gRPC. Support config agent.sample_n_per_3_secs can be changed in the runtime. Support config agent.ignore_suffix can be changed in the runtime. Support DNS periodic resolving mechanism to update backend service. Support config agent.trace.ignore_path can be changed in the runtime. Added support for transmitting logback 1.x and log4j 2.x formatted \u0026amp; un-formatted messages via gPRC  OAP-Backend  Make meter receiver support MAL. Support influxDB connection response format option. Fix some error when use JSON as influxDB response format. Support Kafka MirrorMaker 2.0 to replicate topics between Kafka clusters. Add the rule name field to alarm record storage entity as a part of ID, to support multiple alarm rules triggered for one entity. The scope id has been removed from the ID. Fix MAL concurrent execution issues. Fix group name can\u0026rsquo;t be queried in the GraphQL. Fix potential gRPC connection leak(not closed) for the channels among OAP instances. Filter OAP instances(unassigned in booting stage) of the empty IP in KubernetesCoordinator. Add component ID for Python aiohttp plugin requester and server. Fix H2 in-memory database table missing issues Add component ID for Python pyramid plugin server. Add component ID for NodeJS Axios plugin. Fix searchService method error in storage-influxdb-plugin. Add JavaScript component ID. Fix CVE of UninstrumentedGateways in Dynamic Configuration activation. Improve query performance in storage-influxdb-plugin. Fix the uuid field in GRPCConfigWatcherRegister is not updated. Support Envoy {AccessLog,Metrics}Service API V3. Adopt the MAL in Envoy metrics service analyzer. Fix the priority setting doesn\u0026rsquo;t work of the ALS analyzers. Fix bug that endpoint-name-grouping.yml is not customizable in Dockerized case. Fix bug that istio version metric type on UI template mismatches the otel rule. Improve ReadWriteSafeCache concurrency read-write performance Fix bug that if use JSON as InfluxDB.ResponseFormat then NumberFormatException maybe occur. Fix timeBucket not taking effect in EqualsAndHashCode annotation of some relationship metrics. Fix SharingServerConfig\u0026rsquo;s propertie is not correct in the application.yml, contextPath -\u0026gt; restConnextPath. Istio control plane: remove redundant metrics and polish panel layout. Fix bug endpoint name grouping not work due to setting service name and endpoint name out of order. Fix receiver analysis error count metrics. Log collecting and query implementation. Support Alarm to feishu. Add the implementation of ConfigurationDiscovery on the OAP side. Fix bug in parseInternalErrorCode where some error codes are never reached. OAL supports multiple values when as numeric. Add node information from the Openensus proto to the labels of the samples, to support the identification of the source of the Metric data. Fix bug that the same sample name in one MAL expression caused IllegalArgumentException in Analyzer.analyse. Add the text analyzer for querying log in the es storage. Chore: Remove duplicate codes in Envoy ALS handler. Remove the strict rule of OAL disable statement parameter. Fix a legal metric query adoption bug. Don\u0026rsquo;t support global level metric query. Add VM MAL and ui-template configration, support Prometheus node-exporter VM metrics that pushed from OpenTelemetry-collector. Remove unused log query parameters.  UI  Fix un-removed tags in trace query. Fix unexpected metrics name on single value component. Don\u0026rsquo;t allow negative value as the refresh period. Fix style issue in trace table view. Separation Log and Dashboard selector data to avoid conflicts. Fix trace instance selector bug. Fix Unnecessary sidebar in tooltips for charts. Refactor dashboard query in a common script. Implement refreshing data for topology by updating date. Implement group selector in the topology. Fix all as default parameter for services selector. Add icon for Python aiohttp plugin. Add icon for Python pyramid plugin. Fix topology render all services nodes when groups changed. Fix rk-footer utc input\u0026rsquo;s width. Update rk-icon and rewrite rk-header svg tags with rk-icon. Add icon for http type. Fix rk-footer utc without local storage. Sort group names in the topology. Add logo for Dolphinscheduler. Fix dashboard wrong instance. Add a legend for the topology. Update the condition of unhealthy cube. Fix: use icons to replace buttons for task list in profile. Fix: support = in the tag value in the trace query page. Add envoy proxy component logo. Chore: set up license-eye to check license headers and add missing license headers. Fix prop for instances-survey and endpoints-survey. Fix envoy icon in topology. Implement the service logs on UI. Change the flask icon to light version for a better view of topology dark theme. Implement viewing logs on trace page. Fix update props of date component. Fix query conditions for logs. Fix style of selectors to word wrap. Fix logs time. Fix search ui for logs.  Documentation  Update the documents of backend fetcher and self observability about the latest configurations. Add documents about the group name of service. Update docs about the latest UI. Update the document of backend trace sampling with the latest configuration. Update kafka plugin support version to 2.6.1. Add FAQ about Fix compiling on Mac M1 chip.  All issues and pull requests are here\n","excerpt":"8.4.0 Project  Incompatible with previous releases when use H2/MySQL/TiDB storage options, due to …","ref":"/docs/main/v9.2.0/en/changes/changes-8.4.0/","title":"8.4.0"},{"body":"8.4.0 Project  Incompatible with previous releases when use H2/MySQL/TiDB storage options, due to support multiple alarm rules triggered for one entity. Chore: adapt create_source_release.sh to make it runnable on Linux. Add package to .proto files, prevent polluting top-level namespace in some languages; The OAP server supports previous agent releases, whereas the previous OAP server (\u0026lt;=8.3.0) won\u0026rsquo;t recognize newer agents since this version (\u0026gt;= 8.4.0). Add ElasticSearch 7.10 to test matrix and verify it works. Replace Apache RAT with skywalking-eyes to check license headers. Set up test of Envoy ALS / MetricsService under Istio 1.8.2 to verify Envoy V3 protocol Test: fix flaky E2E test of Kafka.  Java Agent  The operation name of quartz-scheduler plugin, has been changed as the quartz-scheduler/${className} format. Fix jdk-http and okhttp-3.x plugin did not overwrite the old trace header. Add interceptors of method(analyze, searchScroll, clearScroll, searchTemplate and deleteByQuery) for elasticsearch-6.x-plugin. Fix the unexpected RunningContext recreation in the Tomcat plugin. Fix the potential NPE when trace_sql_parameters is enabled. Update byte-buddy to 1.10.19. Fix thrift plugin trace link broken when intermediate service does not mount agent Fix thrift plugin collects wrong args when the method without parameter. Fix DataCarrier\u0026rsquo;s org.apache.skywalking.apm.commons.datacarrier.buffer.Buffer implementation isn\u0026rsquo;t activated in IF_POSSIBLE mode. Fix ArrayBlockingQueueBuffer\u0026rsquo;s useless IF_POSSIBLE mode list Support building gRPC TLS channel but CA file is not required. Add witness method mechanism in the agent plugin core. Add Dolphinscheduler plugin definition. Make sampling still works when the trace ignores plug-in activation. Fix mssql-plugin occur ClassCastException when call the method of return generate key. The operation name of dubbo and dubbo-2.7.x-plugin, has been changed as the groupValue/className.methodName format Fix bug that rocketmq-plugin set the wrong tag. Fix duplicated EnhancedInstance interface added. Fix thread leaks caused by the elasticsearch-6.x-plugin plugin. Support reading segmentId and spanId with toolkit. Fix RestTemplate plugin recording url tag with wrong port Support collecting logs and forwarding through gRPC. Support config agent.sample_n_per_3_secs can be changed in the runtime. Support config agent.ignore_suffix can be changed in the runtime. Support DNS periodic resolving mechanism to update backend service. Support config agent.trace.ignore_path can be changed in the runtime. Added support for transmitting logback 1.x and log4j 2.x formatted \u0026amp; un-formatted messages via gPRC  OAP-Backend  Make meter receiver support MAL. Support influxDB connection response format option. Fix some error when use JSON as influxDB response format. Support Kafka MirrorMaker 2.0 to replicate topics between Kafka clusters. Add the rule name field to alarm record storage entity as a part of ID, to support multiple alarm rules triggered for one entity. The scope id has been removed from the ID. Fix MAL concurrent execution issues. Fix group name can\u0026rsquo;t be queried in the GraphQL. Fix potential gRPC connection leak(not closed) for the channels among OAP instances. Filter OAP instances(unassigned in booting stage) of the empty IP in KubernetesCoordinator. Add component ID for Python aiohttp plugin requester and server. Fix H2 in-memory database table missing issues Add component ID for Python pyramid plugin server. Add component ID for NodeJS Axios plugin. Fix searchService method error in storage-influxdb-plugin. Add JavaScript component ID. Fix CVE of UninstrumentedGateways in Dynamic Configuration activation. Improve query performance in storage-influxdb-plugin. Fix the uuid field in GRPCConfigWatcherRegister is not updated. Support Envoy {AccessLog,Metrics}Service API V3. Adopt the MAL in Envoy metrics service analyzer. Fix the priority setting doesn\u0026rsquo;t work of the ALS analyzers. Fix bug that endpoint-name-grouping.yml is not customizable in Dockerized case. Fix bug that istio version metric type on UI template mismatches the otel rule. Improve ReadWriteSafeCache concurrency read-write performance Fix bug that if use JSON as InfluxDB.ResponseFormat then NumberFormatException maybe occur. Fix timeBucket not taking effect in EqualsAndHashCode annotation of some relationship metrics. Fix SharingServerConfig\u0026rsquo;s propertie is not correct in the application.yml, contextPath -\u0026gt; restConnextPath. Istio control plane: remove redundant metrics and polish panel layout. Fix bug endpoint name grouping not work due to setting service name and endpoint name out of order. Fix receiver analysis error count metrics. Log collecting and query implementation. Support Alarm to feishu. Add the implementation of ConfigurationDiscovery on the OAP side. Fix bug in parseInternalErrorCode where some error codes are never reached. OAL supports multiple values when as numeric. Add node information from the Openensus proto to the labels of the samples, to support the identification of the source of the Metric data. Fix bug that the same sample name in one MAL expression caused IllegalArgumentException in Analyzer.analyse. Add the text analyzer for querying log in the es storage. Chore: Remove duplicate codes in Envoy ALS handler. Remove the strict rule of OAL disable statement parameter. Fix a legal metric query adoption bug. Don\u0026rsquo;t support global level metric query. Add VM MAL and ui-template configration, support Prometheus node-exporter VM metrics that pushed from OpenTelemetry-collector. Remove unused log query parameters.  UI  Fix un-removed tags in trace query. Fix unexpected metrics name on single value component. Don\u0026rsquo;t allow negative value as the refresh period. Fix style issue in trace table view. Separation Log and Dashboard selector data to avoid conflicts. Fix trace instance selector bug. Fix Unnecessary sidebar in tooltips for charts. Refactor dashboard query in a common script. Implement refreshing data for topology by updating date. Implement group selector in the topology. Fix all as default parameter for services selector. Add icon for Python aiohttp plugin. Add icon for Python pyramid plugin. Fix topology render all services nodes when groups changed. Fix rk-footer utc input\u0026rsquo;s width. Update rk-icon and rewrite rk-header svg tags with rk-icon. Add icon for http type. Fix rk-footer utc without local storage. Sort group names in the topology. Add logo for Dolphinscheduler. Fix dashboard wrong instance. Add a legend for the topology. Update the condition of unhealthy cube. Fix: use icons to replace buttons for task list in profile. Fix: support = in the tag value in the trace query page. Add envoy proxy component logo. Chore: set up license-eye to check license headers and add missing license headers. Fix prop for instances-survey and endpoints-survey. Fix envoy icon in topology. Implement the service logs on UI. Change the flask icon to light version for a better view of topology dark theme. Implement viewing logs on trace page. Fix update props of date component. Fix query conditions for logs. Fix style of selectors to word wrap. Fix logs time. Fix search ui for logs.  Documentation  Update the documents of backend fetcher and self observability about the latest configurations. Add documents about the group name of service. Update docs about the latest UI. Update the document of backend trace sampling with the latest configuration. Update kafka plugin support version to 2.6.1. Add FAQ about Fix compiling on Mac M1 chip.  All issues and pull requests are here\n","excerpt":"8.4.0 Project  Incompatible with previous releases when use H2/MySQL/TiDB storage options, due to …","ref":"/docs/main/v9.3.0/en/changes/changes-8.4.0/","title":"8.4.0"},{"body":"8.4.0 Project  Incompatible with previous releases when use H2/MySQL/TiDB storage options, due to support multiple alarm rules triggered for one entity. Chore: adapt create_source_release.sh to make it runnable on Linux. Add package to .proto files, prevent polluting top-level namespace in some languages; The OAP server supports previous agent releases, whereas the previous OAP server (\u0026lt;=8.3.0) won\u0026rsquo;t recognize newer agents since this version (\u0026gt;= 8.4.0). Add ElasticSearch 7.10 to test matrix and verify it works. Replace Apache RAT with skywalking-eyes to check license headers. Set up test of Envoy ALS / MetricsService under Istio 1.8.2 to verify Envoy V3 protocol Test: fix flaky E2E test of Kafka.  Java Agent  The operation name of quartz-scheduler plugin, has been changed as the quartz-scheduler/${className} format. Fix jdk-http and okhttp-3.x plugin did not overwrite the old trace header. Add interceptors of method(analyze, searchScroll, clearScroll, searchTemplate and deleteByQuery) for elasticsearch-6.x-plugin. Fix the unexpected RunningContext recreation in the Tomcat plugin. Fix the potential NPE when trace_sql_parameters is enabled. Update byte-buddy to 1.10.19. Fix thrift plugin trace link broken when intermediate service does not mount agent Fix thrift plugin collects wrong args when the method without parameter. Fix DataCarrier\u0026rsquo;s org.apache.skywalking.apm.commons.datacarrier.buffer.Buffer implementation isn\u0026rsquo;t activated in IF_POSSIBLE mode. Fix ArrayBlockingQueueBuffer\u0026rsquo;s useless IF_POSSIBLE mode list Support building gRPC TLS channel but CA file is not required. Add witness method mechanism in the agent plugin core. Add Dolphinscheduler plugin definition. Make sampling still works when the trace ignores plug-in activation. Fix mssql-plugin occur ClassCastException when call the method of return generate key. The operation name of dubbo and dubbo-2.7.x-plugin, has been changed as the groupValue/className.methodName format Fix bug that rocketmq-plugin set the wrong tag. Fix duplicated EnhancedInstance interface added. Fix thread leaks caused by the elasticsearch-6.x-plugin plugin. Support reading segmentId and spanId with toolkit. Fix RestTemplate plugin recording url tag with wrong port Support collecting logs and forwarding through gRPC. Support config agent.sample_n_per_3_secs can be changed in the runtime. Support config agent.ignore_suffix can be changed in the runtime. Support DNS periodic resolving mechanism to update backend service. Support config agent.trace.ignore_path can be changed in the runtime. Added support for transmitting logback 1.x and log4j 2.x formatted \u0026amp; un-formatted messages via gPRC  OAP-Backend  Make meter receiver support MAL. Support influxDB connection response format option. Fix some error when use JSON as influxDB response format. Support Kafka MirrorMaker 2.0 to replicate topics between Kafka clusters. Add the rule name field to alarm record storage entity as a part of ID, to support multiple alarm rules triggered for one entity. The scope id has been removed from the ID. Fix MAL concurrent execution issues. Fix group name can\u0026rsquo;t be queried in the GraphQL. Fix potential gRPC connection leak(not closed) for the channels among OAP instances. Filter OAP instances(unassigned in booting stage) of the empty IP in KubernetesCoordinator. Add component ID for Python aiohttp plugin requester and server. Fix H2 in-memory database table missing issues Add component ID for Python pyramid plugin server. Add component ID for NodeJS Axios plugin. Fix searchService method error in storage-influxdb-plugin. Add JavaScript component ID. Fix CVE of UninstrumentedGateways in Dynamic Configuration activation. Improve query performance in storage-influxdb-plugin. Fix the uuid field in GRPCConfigWatcherRegister is not updated. Support Envoy {AccessLog,Metrics}Service API V3. Adopt the MAL in Envoy metrics service analyzer. Fix the priority setting doesn\u0026rsquo;t work of the ALS analyzers. Fix bug that endpoint-name-grouping.yml is not customizable in Dockerized case. Fix bug that istio version metric type on UI template mismatches the otel rule. Improve ReadWriteSafeCache concurrency read-write performance Fix bug that if use JSON as InfluxDB.ResponseFormat then NumberFormatException maybe occur. Fix timeBucket not taking effect in EqualsAndHashCode annotation of some relationship metrics. Fix SharingServerConfig\u0026rsquo;s propertie is not correct in the application.yml, contextPath -\u0026gt; restConnextPath. Istio control plane: remove redundant metrics and polish panel layout. Fix bug endpoint name grouping not work due to setting service name and endpoint name out of order. Fix receiver analysis error count metrics. Log collecting and query implementation. Support Alarm to feishu. Add the implementation of ConfigurationDiscovery on the OAP side. Fix bug in parseInternalErrorCode where some error codes are never reached. OAL supports multiple values when as numeric. Add node information from the Openensus proto to the labels of the samples, to support the identification of the source of the Metric data. Fix bug that the same sample name in one MAL expression caused IllegalArgumentException in Analyzer.analyse. Add the text analyzer for querying log in the es storage. Chore: Remove duplicate codes in Envoy ALS handler. Remove the strict rule of OAL disable statement parameter. Fix a legal metric query adoption bug. Don\u0026rsquo;t support global level metric query. Add VM MAL and ui-template configration, support Prometheus node-exporter VM metrics that pushed from OpenTelemetry-collector. Remove unused log query parameters.  UI  Fix un-removed tags in trace query. Fix unexpected metrics name on single value component. Don\u0026rsquo;t allow negative value as the refresh period. Fix style issue in trace table view. Separation Log and Dashboard selector data to avoid conflicts. Fix trace instance selector bug. Fix Unnecessary sidebar in tooltips for charts. Refactor dashboard query in a common script. Implement refreshing data for topology by updating date. Implement group selector in the topology. Fix all as default parameter for services selector. Add icon for Python aiohttp plugin. Add icon for Python pyramid plugin. Fix topology render all services nodes when groups changed. Fix rk-footer utc input\u0026rsquo;s width. Update rk-icon and rewrite rk-header svg tags with rk-icon. Add icon for http type. Fix rk-footer utc without local storage. Sort group names in the topology. Add logo for Dolphinscheduler. Fix dashboard wrong instance. Add a legend for the topology. Update the condition of unhealthy cube. Fix: use icons to replace buttons for task list in profile. Fix: support = in the tag value in the trace query page. Add envoy proxy component logo. Chore: set up license-eye to check license headers and add missing license headers. Fix prop for instances-survey and endpoints-survey. Fix envoy icon in topology. Implement the service logs on UI. Change the flask icon to light version for a better view of topology dark theme. Implement viewing logs on trace page. Fix update props of date component. Fix query conditions for logs. Fix style of selectors to word wrap. Fix logs time. Fix search ui for logs.  Documentation  Update the documents of backend fetcher and self observability about the latest configurations. Add documents about the group name of service. Update docs about the latest UI. Update the document of backend trace sampling with the latest configuration. Update kafka plugin support version to 2.6.1. Add FAQ about Fix compiling on Mac M1 chip.  All issues and pull requests are here\n","excerpt":"8.4.0 Project  Incompatible with previous releases when use H2/MySQL/TiDB storage options, due to …","ref":"/docs/main/v9.4.0/en/changes/changes-8.4.0/","title":"8.4.0"},{"body":"8.4.0 Project  Incompatible with previous releases when use H2/MySQL/TiDB storage options, due to support multiple alarm rules triggered for one entity. Chore: adapt create_source_release.sh to make it runnable on Linux. Add package to .proto files, prevent polluting top-level namespace in some languages; The OAP server supports previous agent releases, whereas the previous OAP server (\u0026lt;=8.3.0) won\u0026rsquo;t recognize newer agents since this version (\u0026gt;= 8.4.0). Add ElasticSearch 7.10 to test matrix and verify it works. Replace Apache RAT with skywalking-eyes to check license headers. Set up test of Envoy ALS / MetricsService under Istio 1.8.2 to verify Envoy V3 protocol Test: fix flaky E2E test of Kafka.  Java Agent  The operation name of quartz-scheduler plugin, has been changed as the quartz-scheduler/${className} format. Fix jdk-http and okhttp-3.x plugin did not overwrite the old trace header. Add interceptors of method(analyze, searchScroll, clearScroll, searchTemplate and deleteByQuery) for elasticsearch-6.x-plugin. Fix the unexpected RunningContext recreation in the Tomcat plugin. Fix the potential NPE when trace_sql_parameters is enabled. Update byte-buddy to 1.10.19. Fix thrift plugin trace link broken when intermediate service does not mount agent Fix thrift plugin collects wrong args when the method without parameter. Fix DataCarrier\u0026rsquo;s org.apache.skywalking.apm.commons.datacarrier.buffer.Buffer implementation isn\u0026rsquo;t activated in IF_POSSIBLE mode. Fix ArrayBlockingQueueBuffer\u0026rsquo;s useless IF_POSSIBLE mode list Support building gRPC TLS channel but CA file is not required. Add witness method mechanism in the agent plugin core. Add Dolphinscheduler plugin definition. Make sampling still works when the trace ignores plug-in activation. Fix mssql-plugin occur ClassCastException when call the method of return generate key. The operation name of dubbo and dubbo-2.7.x-plugin, has been changed as the groupValue/className.methodName format Fix bug that rocketmq-plugin set the wrong tag. Fix duplicated EnhancedInstance interface added. Fix thread leaks caused by the elasticsearch-6.x-plugin plugin. Support reading segmentId and spanId with toolkit. Fix RestTemplate plugin recording url tag with wrong port Support collecting logs and forwarding through gRPC. Support config agent.sample_n_per_3_secs can be changed in the runtime. Support config agent.ignore_suffix can be changed in the runtime. Support DNS periodic resolving mechanism to update backend service. Support config agent.trace.ignore_path can be changed in the runtime. Added support for transmitting logback 1.x and log4j 2.x formatted \u0026amp; un-formatted messages via gPRC  OAP-Backend  Make meter receiver support MAL. Support influxDB connection response format option. Fix some error when use JSON as influxDB response format. Support Kafka MirrorMaker 2.0 to replicate topics between Kafka clusters. Add the rule name field to alarm record storage entity as a part of ID, to support multiple alarm rules triggered for one entity. The scope id has been removed from the ID. Fix MAL concurrent execution issues. Fix group name can\u0026rsquo;t be queried in the GraphQL. Fix potential gRPC connection leak(not closed) for the channels among OAP instances. Filter OAP instances(unassigned in booting stage) of the empty IP in KubernetesCoordinator. Add component ID for Python aiohttp plugin requester and server. Fix H2 in-memory database table missing issues Add component ID for Python pyramid plugin server. Add component ID for NodeJS Axios plugin. Fix searchService method error in storage-influxdb-plugin. Add JavaScript component ID. Fix CVE of UninstrumentedGateways in Dynamic Configuration activation. Improve query performance in storage-influxdb-plugin. Fix the uuid field in GRPCConfigWatcherRegister is not updated. Support Envoy {AccessLog,Metrics}Service API V3. Adopt the MAL in Envoy metrics service analyzer. Fix the priority setting doesn\u0026rsquo;t work of the ALS analyzers. Fix bug that endpoint-name-grouping.yml is not customizable in Dockerized case. Fix bug that istio version metric type on UI template mismatches the otel rule. Improve ReadWriteSafeCache concurrency read-write performance Fix bug that if use JSON as InfluxDB.ResponseFormat then NumberFormatException maybe occur. Fix timeBucket not taking effect in EqualsAndHashCode annotation of some relationship metrics. Fix SharingServerConfig\u0026rsquo;s propertie is not correct in the application.yml, contextPath -\u0026gt; restConnextPath. Istio control plane: remove redundant metrics and polish panel layout. Fix bug endpoint name grouping not work due to setting service name and endpoint name out of order. Fix receiver analysis error count metrics. Log collecting and query implementation. Support Alarm to feishu. Add the implementation of ConfigurationDiscovery on the OAP side. Fix bug in parseInternalErrorCode where some error codes are never reached. OAL supports multiple values when as numeric. Add node information from the Openensus proto to the labels of the samples, to support the identification of the source of the Metric data. Fix bug that the same sample name in one MAL expression caused IllegalArgumentException in Analyzer.analyse. Add the text analyzer for querying log in the es storage. Chore: Remove duplicate codes in Envoy ALS handler. Remove the strict rule of OAL disable statement parameter. Fix a legal metric query adoption bug. Don\u0026rsquo;t support global level metric query. Add VM MAL and ui-template configration, support Prometheus node-exporter VM metrics that pushed from OpenTelemetry-collector. Remove unused log query parameters.  UI  Fix un-removed tags in trace query. Fix unexpected metrics name on single value component. Don\u0026rsquo;t allow negative value as the refresh period. Fix style issue in trace table view. Separation Log and Dashboard selector data to avoid conflicts. Fix trace instance selector bug. Fix Unnecessary sidebar in tooltips for charts. Refactor dashboard query in a common script. Implement refreshing data for topology by updating date. Implement group selector in the topology. Fix all as default parameter for services selector. Add icon for Python aiohttp plugin. Add icon for Python pyramid plugin. Fix topology render all services nodes when groups changed. Fix rk-footer utc input\u0026rsquo;s width. Update rk-icon and rewrite rk-header svg tags with rk-icon. Add icon for http type. Fix rk-footer utc without local storage. Sort group names in the topology. Add logo for Dolphinscheduler. Fix dashboard wrong instance. Add a legend for the topology. Update the condition of unhealthy cube. Fix: use icons to replace buttons for task list in profile. Fix: support = in the tag value in the trace query page. Add envoy proxy component logo. Chore: set up license-eye to check license headers and add missing license headers. Fix prop for instances-survey and endpoints-survey. Fix envoy icon in topology. Implement the service logs on UI. Change the flask icon to light version for a better view of topology dark theme. Implement viewing logs on trace page. Fix update props of date component. Fix query conditions for logs. Fix style of selectors to word wrap. Fix logs time. Fix search ui for logs.  Documentation  Update the documents of backend fetcher and self observability about the latest configurations. Add documents about the group name of service. Update docs about the latest UI. Update the document of backend trace sampling with the latest configuration. Update kafka plugin support version to 2.6.1. Add FAQ about Fix compiling on Mac M1 chip.  All issues and pull requests are here\n","excerpt":"8.4.0 Project  Incompatible with previous releases when use H2/MySQL/TiDB storage options, due to …","ref":"/docs/main/v9.5.0/en/changes/changes-8.4.0/","title":"8.4.0"},{"body":"8.4.0 Project  Incompatible with previous releases when use H2/MySQL/TiDB storage options, due to support multiple alarm rules triggered for one entity. Chore: adapt create_source_release.sh to make it runnable on Linux. Add package to .proto files, prevent polluting top-level namespace in some languages; The OAP server supports previous agent releases, whereas the previous OAP server (\u0026lt;=8.3.0) won\u0026rsquo;t recognize newer agents since this version (\u0026gt;= 8.4.0). Add ElasticSearch 7.10 to test matrix and verify it works. Replace Apache RAT with skywalking-eyes to check license headers. Set up test of Envoy ALS / MetricsService under Istio 1.8.2 to verify Envoy V3 protocol Test: fix flaky E2E test of Kafka.  Java Agent  The operation name of quartz-scheduler plugin, has been changed as the quartz-scheduler/${className} format. Fix jdk-http and okhttp-3.x plugin did not overwrite the old trace header. Add interceptors of method(analyze, searchScroll, clearScroll, searchTemplate and deleteByQuery) for elasticsearch-6.x-plugin. Fix the unexpected RunningContext recreation in the Tomcat plugin. Fix the potential NPE when trace_sql_parameters is enabled. Update byte-buddy to 1.10.19. Fix thrift plugin trace link broken when intermediate service does not mount agent Fix thrift plugin collects wrong args when the method without parameter. Fix DataCarrier\u0026rsquo;s org.apache.skywalking.apm.commons.datacarrier.buffer.Buffer implementation isn\u0026rsquo;t activated in IF_POSSIBLE mode. Fix ArrayBlockingQueueBuffer\u0026rsquo;s useless IF_POSSIBLE mode list Support building gRPC TLS channel but CA file is not required. Add witness method mechanism in the agent plugin core. Add Dolphinscheduler plugin definition. Make sampling still works when the trace ignores plug-in activation. Fix mssql-plugin occur ClassCastException when call the method of return generate key. The operation name of dubbo and dubbo-2.7.x-plugin, has been changed as the groupValue/className.methodName format Fix bug that rocketmq-plugin set the wrong tag. Fix duplicated EnhancedInstance interface added. Fix thread leaks caused by the elasticsearch-6.x-plugin plugin. Support reading segmentId and spanId with toolkit. Fix RestTemplate plugin recording url tag with wrong port Support collecting logs and forwarding through gRPC. Support config agent.sample_n_per_3_secs can be changed in the runtime. Support config agent.ignore_suffix can be changed in the runtime. Support DNS periodic resolving mechanism to update backend service. Support config agent.trace.ignore_path can be changed in the runtime. Added support for transmitting logback 1.x and log4j 2.x formatted \u0026amp; un-formatted messages via gPRC  OAP-Backend  Make meter receiver support MAL. Support influxDB connection response format option. Fix some error when use JSON as influxDB response format. Support Kafka MirrorMaker 2.0 to replicate topics between Kafka clusters. Add the rule name field to alarm record storage entity as a part of ID, to support multiple alarm rules triggered for one entity. The scope id has been removed from the ID. Fix MAL concurrent execution issues. Fix group name can\u0026rsquo;t be queried in the GraphQL. Fix potential gRPC connection leak(not closed) for the channels among OAP instances. Filter OAP instances(unassigned in booting stage) of the empty IP in KubernetesCoordinator. Add component ID for Python aiohttp plugin requester and server. Fix H2 in-memory database table missing issues Add component ID for Python pyramid plugin server. Add component ID for NodeJS Axios plugin. Fix searchService method error in storage-influxdb-plugin. Add JavaScript component ID. Fix CVE of UninstrumentedGateways in Dynamic Configuration activation. Improve query performance in storage-influxdb-plugin. Fix the uuid field in GRPCConfigWatcherRegister is not updated. Support Envoy {AccessLog,Metrics}Service API V3. Adopt the MAL in Envoy metrics service analyzer. Fix the priority setting doesn\u0026rsquo;t work of the ALS analyzers. Fix bug that endpoint-name-grouping.yml is not customizable in Dockerized case. Fix bug that istio version metric type on UI template mismatches the otel rule. Improve ReadWriteSafeCache concurrency read-write performance Fix bug that if use JSON as InfluxDB.ResponseFormat then NumberFormatException maybe occur. Fix timeBucket not taking effect in EqualsAndHashCode annotation of some relationship metrics. Fix SharingServerConfig\u0026rsquo;s propertie is not correct in the application.yml, contextPath -\u0026gt; restConnextPath. Istio control plane: remove redundant metrics and polish panel layout. Fix bug endpoint name grouping not work due to setting service name and endpoint name out of order. Fix receiver analysis error count metrics. Log collecting and query implementation. Support Alarm to feishu. Add the implementation of ConfigurationDiscovery on the OAP side. Fix bug in parseInternalErrorCode where some error codes are never reached. OAL supports multiple values when as numeric. Add node information from the Openensus proto to the labels of the samples, to support the identification of the source of the Metric data. Fix bug that the same sample name in one MAL expression caused IllegalArgumentException in Analyzer.analyse. Add the text analyzer for querying log in the es storage. Chore: Remove duplicate codes in Envoy ALS handler. Remove the strict rule of OAL disable statement parameter. Fix a legal metric query adoption bug. Don\u0026rsquo;t support global level metric query. Add VM MAL and ui-template configration, support Prometheus node-exporter VM metrics that pushed from OpenTelemetry-collector. Remove unused log query parameters.  UI  Fix un-removed tags in trace query. Fix unexpected metrics name on single value component. Don\u0026rsquo;t allow negative value as the refresh period. Fix style issue in trace table view. Separation Log and Dashboard selector data to avoid conflicts. Fix trace instance selector bug. Fix Unnecessary sidebar in tooltips for charts. Refactor dashboard query in a common script. Implement refreshing data for topology by updating date. Implement group selector in the topology. Fix all as default parameter for services selector. Add icon for Python aiohttp plugin. Add icon for Python pyramid plugin. Fix topology render all services nodes when groups changed. Fix rk-footer utc input\u0026rsquo;s width. Update rk-icon and rewrite rk-header svg tags with rk-icon. Add icon for http type. Fix rk-footer utc without local storage. Sort group names in the topology. Add logo for Dolphinscheduler. Fix dashboard wrong instance. Add a legend for the topology. Update the condition of unhealthy cube. Fix: use icons to replace buttons for task list in profile. Fix: support = in the tag value in the trace query page. Add envoy proxy component logo. Chore: set up license-eye to check license headers and add missing license headers. Fix prop for instances-survey and endpoints-survey. Fix envoy icon in topology. Implement the service logs on UI. Change the flask icon to light version for a better view of topology dark theme. Implement viewing logs on trace page. Fix update props of date component. Fix query conditions for logs. Fix style of selectors to word wrap. Fix logs time. Fix search ui for logs.  Documentation  Update the documents of backend fetcher and self observability about the latest configurations. Add documents about the group name of service. Update docs about the latest UI. Update the document of backend trace sampling with the latest configuration. Update kafka plugin support version to 2.6.1. Add FAQ about Fix compiling on Mac M1 chip.  All issues and pull requests are here\n","excerpt":"8.4.0 Project  Incompatible with previous releases when use H2/MySQL/TiDB storage options, due to …","ref":"/docs/main/v9.6.0/en/changes/changes-8.4.0/","title":"8.4.0"},{"body":"8.4.0 Project  Incompatible with previous releases when use H2/MySQL/TiDB storage options, due to support multiple alarm rules triggered for one entity. Chore: adapt create_source_release.sh to make it runnable on Linux. Add package to .proto files, prevent polluting top-level namespace in some languages; The OAP server supports previous agent releases, whereas the previous OAP server (\u0026lt;=8.3.0) won\u0026rsquo;t recognize newer agents since this version (\u0026gt;= 8.4.0). Add ElasticSearch 7.10 to test matrix and verify it works. Replace Apache RAT with skywalking-eyes to check license headers. Set up test of Envoy ALS / MetricsService under Istio 1.8.2 to verify Envoy V3 protocol Test: fix flaky E2E test of Kafka.  Java Agent  The operation name of quartz-scheduler plugin, has been changed as the quartz-scheduler/${className} format. Fix jdk-http and okhttp-3.x plugin did not overwrite the old trace header. Add interceptors of method(analyze, searchScroll, clearScroll, searchTemplate and deleteByQuery) for elasticsearch-6.x-plugin. Fix the unexpected RunningContext recreation in the Tomcat plugin. Fix the potential NPE when trace_sql_parameters is enabled. Update byte-buddy to 1.10.19. Fix thrift plugin trace link broken when intermediate service does not mount agent Fix thrift plugin collects wrong args when the method without parameter. Fix DataCarrier\u0026rsquo;s org.apache.skywalking.apm.commons.datacarrier.buffer.Buffer implementation isn\u0026rsquo;t activated in IF_POSSIBLE mode. Fix ArrayBlockingQueueBuffer\u0026rsquo;s useless IF_POSSIBLE mode list Support building gRPC TLS channel but CA file is not required. Add witness method mechanism in the agent plugin core. Add Dolphinscheduler plugin definition. Make sampling still works when the trace ignores plug-in activation. Fix mssql-plugin occur ClassCastException when call the method of return generate key. The operation name of dubbo and dubbo-2.7.x-plugin, has been changed as the groupValue/className.methodName format Fix bug that rocketmq-plugin set the wrong tag. Fix duplicated EnhancedInstance interface added. Fix thread leaks caused by the elasticsearch-6.x-plugin plugin. Support reading segmentId and spanId with toolkit. Fix RestTemplate plugin recording url tag with wrong port Support collecting logs and forwarding through gRPC. Support config agent.sample_n_per_3_secs can be changed in the runtime. Support config agent.ignore_suffix can be changed in the runtime. Support DNS periodic resolving mechanism to update backend service. Support config agent.trace.ignore_path can be changed in the runtime. Added support for transmitting logback 1.x and log4j 2.x formatted \u0026amp; un-formatted messages via gPRC  OAP-Backend  Make meter receiver support MAL. Support influxDB connection response format option. Fix some error when use JSON as influxDB response format. Support Kafka MirrorMaker 2.0 to replicate topics between Kafka clusters. Add the rule name field to alarm record storage entity as a part of ID, to support multiple alarm rules triggered for one entity. The scope id has been removed from the ID. Fix MAL concurrent execution issues. Fix group name can\u0026rsquo;t be queried in the GraphQL. Fix potential gRPC connection leak(not closed) for the channels among OAP instances. Filter OAP instances(unassigned in booting stage) of the empty IP in KubernetesCoordinator. Add component ID for Python aiohttp plugin requester and server. Fix H2 in-memory database table missing issues Add component ID for Python pyramid plugin server. Add component ID for NodeJS Axios plugin. Fix searchService method error in storage-influxdb-plugin. Add JavaScript component ID. Fix CVE of UninstrumentedGateways in Dynamic Configuration activation. Improve query performance in storage-influxdb-plugin. Fix the uuid field in GRPCConfigWatcherRegister is not updated. Support Envoy {AccessLog,Metrics}Service API V3. Adopt the MAL in Envoy metrics service analyzer. Fix the priority setting doesn\u0026rsquo;t work of the ALS analyzers. Fix bug that endpoint-name-grouping.yml is not customizable in Dockerized case. Fix bug that istio version metric type on UI template mismatches the otel rule. Improve ReadWriteSafeCache concurrency read-write performance Fix bug that if use JSON as InfluxDB.ResponseFormat then NumberFormatException maybe occur. Fix timeBucket not taking effect in EqualsAndHashCode annotation of some relationship metrics. Fix SharingServerConfig\u0026rsquo;s propertie is not correct in the application.yml, contextPath -\u0026gt; restConnextPath. Istio control plane: remove redundant metrics and polish panel layout. Fix bug endpoint name grouping not work due to setting service name and endpoint name out of order. Fix receiver analysis error count metrics. Log collecting and query implementation. Support Alarm to feishu. Add the implementation of ConfigurationDiscovery on the OAP side. Fix bug in parseInternalErrorCode where some error codes are never reached. OAL supports multiple values when as numeric. Add node information from the Openensus proto to the labels of the samples, to support the identification of the source of the Metric data. Fix bug that the same sample name in one MAL expression caused IllegalArgumentException in Analyzer.analyse. Add the text analyzer for querying log in the es storage. Chore: Remove duplicate codes in Envoy ALS handler. Remove the strict rule of OAL disable statement parameter. Fix a legal metric query adoption bug. Don\u0026rsquo;t support global level metric query. Add VM MAL and ui-template configration, support Prometheus node-exporter VM metrics that pushed from OpenTelemetry-collector. Remove unused log query parameters.  UI  Fix un-removed tags in trace query. Fix unexpected metrics name on single value component. Don\u0026rsquo;t allow negative value as the refresh period. Fix style issue in trace table view. Separation Log and Dashboard selector data to avoid conflicts. Fix trace instance selector bug. Fix Unnecessary sidebar in tooltips for charts. Refactor dashboard query in a common script. Implement refreshing data for topology by updating date. Implement group selector in the topology. Fix all as default parameter for services selector. Add icon for Python aiohttp plugin. Add icon for Python pyramid plugin. Fix topology render all services nodes when groups changed. Fix rk-footer utc input\u0026rsquo;s width. Update rk-icon and rewrite rk-header svg tags with rk-icon. Add icon for http type. Fix rk-footer utc without local storage. Sort group names in the topology. Add logo for Dolphinscheduler. Fix dashboard wrong instance. Add a legend for the topology. Update the condition of unhealthy cube. Fix: use icons to replace buttons for task list in profile. Fix: support = in the tag value in the trace query page. Add envoy proxy component logo. Chore: set up license-eye to check license headers and add missing license headers. Fix prop for instances-survey and endpoints-survey. Fix envoy icon in topology. Implement the service logs on UI. Change the flask icon to light version for a better view of topology dark theme. Implement viewing logs on trace page. Fix update props of date component. Fix query conditions for logs. Fix style of selectors to word wrap. Fix logs time. Fix search ui for logs.  Documentation  Update the documents of backend fetcher and self observability about the latest configurations. Add documents about the group name of service. Update docs about the latest UI. Update the document of backend trace sampling with the latest configuration. Update kafka plugin support version to 2.6.1. Add FAQ about Fix compiling on Mac M1 chip.  All issues and pull requests are here\n","excerpt":"8.4.0 Project  Incompatible with previous releases when use H2/MySQL/TiDB storage options, due to …","ref":"/docs/main/v9.7.0/en/changes/changes-8.4.0/","title":"8.4.0"},{"body":"8.5.0 Project  Incompatible Change. Indices and templates of ElasticSearch(6/7, including zipkin-elasticsearch7) storage option have been changed. Update frontend-maven-plugin to 1.11.0, for Download node x64 binary on Apple Silicon. Add E2E test for VM monitoring that metrics from Prometheus node-exporter. Upgrade lombok to 1.18.16. Add Java agent Dockerfile to build Docker image for Java agent.  Java Agent  Remove invalid mysql configuration in agent.config. Add net.bytebuddy.agent.builder.AgentBuilder.RedefinitionStrategy.Listener to show detail message when redefine errors occur. Fix ClassCastException of log4j gRPC reporter. Fix NPE when Kafka reporter activated. Enhance gRPC log appender to allow layout pattern. Fix apm-dubbo-2.7.x-plugin memory leak due to some Dubbo RpcExceptions. Fix lettuce-5.x-plugin get null host in redis sentinel mode. Fix ClassCastException by making CallbackAdapterInterceptor to implement EnhancedInstance interface in the spring-kafka plugin. Fix NullPointerException with KafkaProducer.send(record). Support config agent.span_limit_per_segment can be changed in the runtime. Collect and report agent starting / shutdown events. Support jedis pipeline in jedis-2.x-plugin. Fix apm-toolkit-log4j-2.x-activation no trace Id in async log. Replace hbase-1.x-plugin with hbase-1.x-2.x-plugin to adapt hbase client 2.x Remove the close_before_method and close_after_method parameters of custom-enhance-plugin to avoid memory leaks. Fix bug that springmvc-annotation-4.x-plugin, witness class does not exist in some versions. Add Redis command parameters to \u0026lsquo;db.statement\u0026rsquo; field on Lettuce span UI for displaying more info. Fix NullPointerException with ReactiveRequestHolder.getHeaders. Fix springmvc reactive api can\u0026rsquo;t collect HTTP statusCode. Fix bug that asynchttpclient plugin does not record the response status code. Fix spanLayer is null in optional plugin(gateway-2.0.x-plugin gateway-2.1.x-plugin). Support @Trace, @Tag and @Tags work for static methods.  OAP-Backend  Allow user-defined JAVA_OPTS in the startup script. Metrics combination API supports abandoning results. Add a new concept \u0026ldquo;Event\u0026rdquo; and its implementations to collect events. Add some defensive codes for NPE and bump up Kubernetes client version to expose exception stack trace. Update the timestamp field type for LogQuery. Support Zabbix protocol to receive agent metrics. Update the Apdex metric combine calculator. Enhance MeterSystem to allow creating metrics with same metricName / function / scope. Storage plugin supports postgresql. Fix kubernetes.client.openapi.ApiException. Remove filename suffix in the meter active file config. Introduce log analysis language (LAL). Fix alarm httpclient connection leak. Add sum function in meter system. Remove Jaeger receiver. Remove the experimental Zipkin span analyzer. Upgrade the Zipkin Elasticsearch storage from 6 to 7. Require Zipkin receiver must work with zipkin-elasticsearch7 storage option. Fix DatabaseSlowStatementBuilder statement maybe null. Remove fields of parent entity in the relation sources. Save Envoy http access logs when error occurs. Fix wrong service_instance_sla setting in the topology-instance.yml. Fix wrong metrics name setting in the self-observability.yml. Add telemetry data about metrics in, metrics scraping, mesh error and trace in metrics to zipkin receiver. Fix tags store of log and trace on h2/mysql/pg storage. Merge indices by Metrics Function and Meter Function in Elasticsearch Storage. Fix receiver don\u0026rsquo;t need to get itself when healthCheck Remove group concept from AvgHistogramFunction. Heatmap(function result) doesn\u0026rsquo;t support labels. Support metrics grouped by scope labelValue in MAL, no need global same labelValue as before. Add functions in MAL to filter metrics according to the metric value. Optimize the self monitoring grafana dashboard. Enhance the export service. Add function retagByK8sMeta and opt type K8sRetagType.Pod2Service in MAL for k8s to relate pods and services. Using \u0026ldquo;service.istio.io/canonical-name\u0026rdquo; to replace \u0026ldquo;app\u0026rdquo; label to resolve Envoy ALS service name. Support k8s monitoring. Make the flushing metrics operation concurrent. Fix ALS K8SServiceRegistry didn\u0026rsquo;t remove the correct entry. Using \u0026ldquo;service.istio.io/canonical-name\u0026rdquo; to replace \u0026ldquo;app\u0026rdquo; label to resolve Envoy ALS service name. Append the root slash(/) to getIndex and getTemplate requests in ES(6 and 7) client. Fix disable statement not working. This bug exists since 8.0.0. Remove the useless metric in vm.yaml.  UI  Update selector scroller to show in all pages. Implement searching logs with date. Add nodejs 14 compiling. Fix trace id by clear search conditions. Search endpoints with keywords. Fix pageSize on logs page. Update echarts version to 5.0.2. Fix instance dependency on the topology page. Fix resolved url for vue-property-decorator. Show instance attributes. Copywriting grammar fix. Fix log pages tags column not updated. Fix the problem that the footer and topology group is shaded when the topology radiation is displayed. When the topology radiation chart is displayed, the corresponding button should be highlighted. Refactor the route mapping, Dynamically import routing components, Improve first page loading performance. Support topology of two mutually calling services. Implement a type of table chart in the dashboard. Support event in the dashboard. Show instance name in the trace view. Fix groups of services in the topography.  Documentation  Polish documentation due to we have covered all tracing, logging, and metrics fields. Adjust documentation about Zipkin receiver. Add backend-infrastructure-monitoring doc.  All issues and pull requests are here\n","excerpt":"8.5.0 Project  Incompatible Change. Indices and templates of ElasticSearch(6/7, including …","ref":"/docs/main/latest/en/changes/changes-8.5.0/","title":"8.5.0"},{"body":"8.5.0 Project  Incompatible Change. Indices and templates of ElasticSearch(6/7, including zipkin-elasticsearch7) storage option have been changed. Update frontend-maven-plugin to 1.11.0, for Download node x64 binary on Apple Silicon. Add E2E test for VM monitoring that metrics from Prometheus node-exporter. Upgrade lombok to 1.18.16. Add Java agent Dockerfile to build Docker image for Java agent.  Java Agent  Remove invalid mysql configuration in agent.config. Add net.bytebuddy.agent.builder.AgentBuilder.RedefinitionStrategy.Listener to show detail message when redefine errors occur. Fix ClassCastException of log4j gRPC reporter. Fix NPE when Kafka reporter activated. Enhance gRPC log appender to allow layout pattern. Fix apm-dubbo-2.7.x-plugin memory leak due to some Dubbo RpcExceptions. Fix lettuce-5.x-plugin get null host in redis sentinel mode. Fix ClassCastException by making CallbackAdapterInterceptor to implement EnhancedInstance interface in the spring-kafka plugin. Fix NullPointerException with KafkaProducer.send(record). Support config agent.span_limit_per_segment can be changed in the runtime. Collect and report agent starting / shutdown events. Support jedis pipeline in jedis-2.x-plugin. Fix apm-toolkit-log4j-2.x-activation no trace Id in async log. Replace hbase-1.x-plugin with hbase-1.x-2.x-plugin to adapt hbase client 2.x Remove the close_before_method and close_after_method parameters of custom-enhance-plugin to avoid memory leaks. Fix bug that springmvc-annotation-4.x-plugin, witness class does not exist in some versions. Add Redis command parameters to \u0026lsquo;db.statement\u0026rsquo; field on Lettuce span UI for displaying more info. Fix NullPointerException with ReactiveRequestHolder.getHeaders. Fix springmvc reactive api can\u0026rsquo;t collect HTTP statusCode. Fix bug that asynchttpclient plugin does not record the response status code. Fix spanLayer is null in optional plugin(gateway-2.0.x-plugin gateway-2.1.x-plugin). Support @Trace, @Tag and @Tags work for static methods.  OAP-Backend  Allow user-defined JAVA_OPTS in the startup script. Metrics combination API supports abandoning results. Add a new concept \u0026ldquo;Event\u0026rdquo; and its implementations to collect events. Add some defensive codes for NPE and bump up Kubernetes client version to expose exception stack trace. Update the timestamp field type for LogQuery. Support Zabbix protocol to receive agent metrics. Update the Apdex metric combine calculator. Enhance MeterSystem to allow creating metrics with same metricName / function / scope. Storage plugin supports postgresql. Fix kubernetes.client.openapi.ApiException. Remove filename suffix in the meter active file config. Introduce log analysis language (LAL). Fix alarm httpclient connection leak. Add sum function in meter system. Remove Jaeger receiver. Remove the experimental Zipkin span analyzer. Upgrade the Zipkin Elasticsearch storage from 6 to 7. Require Zipkin receiver must work with zipkin-elasticsearch7 storage option. Fix DatabaseSlowStatementBuilder statement maybe null. Remove fields of parent entity in the relation sources. Save Envoy http access logs when error occurs. Fix wrong service_instance_sla setting in the topology-instance.yml. Fix wrong metrics name setting in the self-observability.yml. Add telemetry data about metrics in, metrics scraping, mesh error and trace in metrics to zipkin receiver. Fix tags store of log and trace on h2/mysql/pg storage. Merge indices by Metrics Function and Meter Function in Elasticsearch Storage. Fix receiver don\u0026rsquo;t need to get itself when healthCheck Remove group concept from AvgHistogramFunction. Heatmap(function result) doesn\u0026rsquo;t support labels. Support metrics grouped by scope labelValue in MAL, no need global same labelValue as before. Add functions in MAL to filter metrics according to the metric value. Optimize the self monitoring grafana dashboard. Enhance the export service. Add function retagByK8sMeta and opt type K8sRetagType.Pod2Service in MAL for k8s to relate pods and services. Using \u0026ldquo;service.istio.io/canonical-name\u0026rdquo; to replace \u0026ldquo;app\u0026rdquo; label to resolve Envoy ALS service name. Support k8s monitoring. Make the flushing metrics operation concurrent. Fix ALS K8SServiceRegistry didn\u0026rsquo;t remove the correct entry. Using \u0026ldquo;service.istio.io/canonical-name\u0026rdquo; to replace \u0026ldquo;app\u0026rdquo; label to resolve Envoy ALS service name. Append the root slash(/) to getIndex and getTemplate requests in ES(6 and 7) client. Fix disable statement not working. This bug exists since 8.0.0. Remove the useless metric in vm.yaml.  UI  Update selector scroller to show in all pages. Implement searching logs with date. Add nodejs 14 compiling. Fix trace id by clear search conditions. Search endpoints with keywords. Fix pageSize on logs page. Update echarts version to 5.0.2. Fix instance dependency on the topology page. Fix resolved url for vue-property-decorator. Show instance attributes. Copywriting grammar fix. Fix log pages tags column not updated. Fix the problem that the footer and topology group is shaded when the topology radiation is displayed. When the topology radiation chart is displayed, the corresponding button should be highlighted. Refactor the route mapping, Dynamically import routing components, Improve first page loading performance. Support topology of two mutually calling services. Implement a type of table chart in the dashboard. Support event in the dashboard. Show instance name in the trace view. Fix groups of services in the topography.  Documentation  Polish documentation due to we have covered all tracing, logging, and metrics fields. Adjust documentation about Zipkin receiver. Add backend-infrastructure-monitoring doc.  All issues and pull requests are here\n","excerpt":"8.5.0 Project  Incompatible Change. Indices and templates of ElasticSearch(6/7, including …","ref":"/docs/main/next/en/changes/changes-8.5.0/","title":"8.5.0"},{"body":"8.5.0 Project  Incompatible Change. Indices and templates of ElasticSearch(6/7, including zipkin-elasticsearch7) storage option have been changed. Update frontend-maven-plugin to 1.11.0, for Download node x64 binary on Apple Silicon. Add E2E test for VM monitoring that metrics from Prometheus node-exporter. Upgrade lombok to 1.18.16. Add Java agent Dockerfile to build Docker image for Java agent.  Java Agent  Remove invalid mysql configuration in agent.config. Add net.bytebuddy.agent.builder.AgentBuilder.RedefinitionStrategy.Listener to show detail message when redefine errors occur. Fix ClassCastException of log4j gRPC reporter. Fix NPE when Kafka reporter activated. Enhance gRPC log appender to allow layout pattern. Fix apm-dubbo-2.7.x-plugin memory leak due to some Dubbo RpcExceptions. Fix lettuce-5.x-plugin get null host in redis sentinel mode. Fix ClassCastException by making CallbackAdapterInterceptor to implement EnhancedInstance interface in the spring-kafka plugin. Fix NullPointerException with KafkaProducer.send(record). Support config agent.span_limit_per_segment can be changed in the runtime. Collect and report agent starting / shutdown events. Support jedis pipeline in jedis-2.x-plugin. Fix apm-toolkit-log4j-2.x-activation no trace Id in async log. Replace hbase-1.x-plugin with hbase-1.x-2.x-plugin to adapt hbase client 2.x Remove the close_before_method and close_after_method parameters of custom-enhance-plugin to avoid memory leaks. Fix bug that springmvc-annotation-4.x-plugin, witness class does not exist in some versions. Add Redis command parameters to \u0026lsquo;db.statement\u0026rsquo; field on Lettuce span UI for displaying more info. Fix NullPointerException with ReactiveRequestHolder.getHeaders. Fix springmvc reactive api can\u0026rsquo;t collect HTTP statusCode. Fix bug that asynchttpclient plugin does not record the response status code. Fix spanLayer is null in optional plugin(gateway-2.0.x-plugin gateway-2.1.x-plugin). Support @Trace, @Tag and @Tags work for static methods.  OAP-Backend  Allow user-defined JAVA_OPTS in the startup script. Metrics combination API supports abandoning results. Add a new concept \u0026ldquo;Event\u0026rdquo; and its implementations to collect events. Add some defensive codes for NPE and bump up Kubernetes client version to expose exception stack trace. Update the timestamp field type for LogQuery. Support Zabbix protocol to receive agent metrics. Update the Apdex metric combine calculator. Enhance MeterSystem to allow creating metrics with same metricName / function / scope. Storage plugin supports postgresql. Fix kubernetes.client.openapi.ApiException. Remove filename suffix in the meter active file config. Introduce log analysis language (LAL). Fix alarm httpclient connection leak. Add sum function in meter system. Remove Jaeger receiver. Remove the experimental Zipkin span analyzer. Upgrade the Zipkin Elasticsearch storage from 6 to 7. Require Zipkin receiver must work with zipkin-elasticsearch7 storage option. Fix DatabaseSlowStatementBuilder statement maybe null. Remove fields of parent entity in the relation sources. Save Envoy http access logs when error occurs. Fix wrong service_instance_sla setting in the topology-instance.yml. Fix wrong metrics name setting in the self-observability.yml. Add telemetry data about metrics in, metrics scraping, mesh error and trace in metrics to zipkin receiver. Fix tags store of log and trace on h2/mysql/pg storage. Merge indices by Metrics Function and Meter Function in Elasticsearch Storage. Fix receiver don\u0026rsquo;t need to get itself when healthCheck Remove group concept from AvgHistogramFunction. Heatmap(function result) doesn\u0026rsquo;t support labels. Support metrics grouped by scope labelValue in MAL, no need global same labelValue as before. Add functions in MAL to filter metrics according to the metric value. Optimize the self monitoring grafana dashboard. Enhance the export service. Add function retagByK8sMeta and opt type K8sRetagType.Pod2Service in MAL for k8s to relate pods and services. Using \u0026ldquo;service.istio.io/canonical-name\u0026rdquo; to replace \u0026ldquo;app\u0026rdquo; label to resolve Envoy ALS service name. Support k8s monitoring. Make the flushing metrics operation concurrent. Fix ALS K8SServiceRegistry didn\u0026rsquo;t remove the correct entry. Using \u0026ldquo;service.istio.io/canonical-name\u0026rdquo; to replace \u0026ldquo;app\u0026rdquo; label to resolve Envoy ALS service name. Append the root slash(/) to getIndex and getTemplate requests in ES(6 and 7) client. Fix disable statement not working. This bug exists since 8.0.0. Remove the useless metric in vm.yaml.  UI  Update selector scroller to show in all pages. Implement searching logs with date. Add nodejs 14 compiling. Fix trace id by clear search conditions. Search endpoints with keywords. Fix pageSize on logs page. Update echarts version to 5.0.2. Fix instance dependency on the topology page. Fix resolved url for vue-property-decorator. Show instance attributes. Copywriting grammar fix. Fix log pages tags column not updated. Fix the problem that the footer and topology group is shaded when the topology radiation is displayed. When the topology radiation chart is displayed, the corresponding button should be highlighted. Refactor the route mapping, Dynamically import routing components, Improve first page loading performance. Support topology of two mutually calling services. Implement a type of table chart in the dashboard. Support event in the dashboard. Show instance name in the trace view. Fix groups of services in the topography.  Documentation  Polish documentation due to we have covered all tracing, logging, and metrics fields. Adjust documentation about Zipkin receiver. Add backend-infrastructure-monitoring doc.  All issues and pull requests are here\n","excerpt":"8.5.0 Project  Incompatible Change. Indices and templates of ElasticSearch(6/7, including …","ref":"/docs/main/v9.1.0/en/changes/changes-8.5.0/","title":"8.5.0"},{"body":"8.5.0 Project  Incompatible Change. Indices and templates of ElasticSearch(6/7, including zipkin-elasticsearch7) storage option have been changed. Update frontend-maven-plugin to 1.11.0, for Download node x64 binary on Apple Silicon. Add E2E test for VM monitoring that metrics from Prometheus node-exporter. Upgrade lombok to 1.18.16. Add Java agent Dockerfile to build Docker image for Java agent.  Java Agent  Remove invalid mysql configuration in agent.config. Add net.bytebuddy.agent.builder.AgentBuilder.RedefinitionStrategy.Listener to show detail message when redefine errors occur. Fix ClassCastException of log4j gRPC reporter. Fix NPE when Kafka reporter activated. Enhance gRPC log appender to allow layout pattern. Fix apm-dubbo-2.7.x-plugin memory leak due to some Dubbo RpcExceptions. Fix lettuce-5.x-plugin get null host in redis sentinel mode. Fix ClassCastException by making CallbackAdapterInterceptor to implement EnhancedInstance interface in the spring-kafka plugin. Fix NullPointerException with KafkaProducer.send(record). Support config agent.span_limit_per_segment can be changed in the runtime. Collect and report agent starting / shutdown events. Support jedis pipeline in jedis-2.x-plugin. Fix apm-toolkit-log4j-2.x-activation no trace Id in async log. Replace hbase-1.x-plugin with hbase-1.x-2.x-plugin to adapt hbase client 2.x Remove the close_before_method and close_after_method parameters of custom-enhance-plugin to avoid memory leaks. Fix bug that springmvc-annotation-4.x-plugin, witness class does not exist in some versions. Add Redis command parameters to \u0026lsquo;db.statement\u0026rsquo; field on Lettuce span UI for displaying more info. Fix NullPointerException with ReactiveRequestHolder.getHeaders. Fix springmvc reactive api can\u0026rsquo;t collect HTTP statusCode. Fix bug that asynchttpclient plugin does not record the response status code. Fix spanLayer is null in optional plugin(gateway-2.0.x-plugin gateway-2.1.x-plugin). Support @Trace, @Tag and @Tags work for static methods.  OAP-Backend  Allow user-defined JAVA_OPTS in the startup script. Metrics combination API supports abandoning results. Add a new concept \u0026ldquo;Event\u0026rdquo; and its implementations to collect events. Add some defensive codes for NPE and bump up Kubernetes client version to expose exception stack trace. Update the timestamp field type for LogQuery. Support Zabbix protocol to receive agent metrics. Update the Apdex metric combine calculator. Enhance MeterSystem to allow creating metrics with same metricName / function / scope. Storage plugin supports postgresql. Fix kubernetes.client.openapi.ApiException. Remove filename suffix in the meter active file config. Introduce log analysis language (LAL). Fix alarm httpclient connection leak. Add sum function in meter system. Remove Jaeger receiver. Remove the experimental Zipkin span analyzer. Upgrade the Zipkin Elasticsearch storage from 6 to 7. Require Zipkin receiver must work with zipkin-elasticsearch7 storage option. Fix DatabaseSlowStatementBuilder statement maybe null. Remove fields of parent entity in the relation sources. Save Envoy http access logs when error occurs. Fix wrong service_instance_sla setting in the topology-instance.yml. Fix wrong metrics name setting in the self-observability.yml. Add telemetry data about metrics in, metrics scraping, mesh error and trace in metrics to zipkin receiver. Fix tags store of log and trace on h2/mysql/pg storage. Merge indices by Metrics Function and Meter Function in Elasticsearch Storage. Fix receiver don\u0026rsquo;t need to get itself when healthCheck Remove group concept from AvgHistogramFunction. Heatmap(function result) doesn\u0026rsquo;t support labels. Support metrics grouped by scope labelValue in MAL, no need global same labelValue as before. Add functions in MAL to filter metrics according to the metric value. Optimize the self monitoring grafana dashboard. Enhance the export service. Add function retagByK8sMeta and opt type K8sRetagType.Pod2Service in MAL for k8s to relate pods and services. Using \u0026ldquo;service.istio.io/canonical-name\u0026rdquo; to replace \u0026ldquo;app\u0026rdquo; label to resolve Envoy ALS service name. Support k8s monitoring. Make the flushing metrics operation concurrent. Fix ALS K8SServiceRegistry didn\u0026rsquo;t remove the correct entry. Using \u0026ldquo;service.istio.io/canonical-name\u0026rdquo; to replace \u0026ldquo;app\u0026rdquo; label to resolve Envoy ALS service name. Append the root slash(/) to getIndex and getTemplate requests in ES(6 and 7) client. Fix disable statement not working. This bug exists since 8.0.0. Remove the useless metric in vm.yaml.  UI  Update selector scroller to show in all pages. Implement searching logs with date. Add nodejs 14 compiling. Fix trace id by clear search conditions. Search endpoints with keywords. Fix pageSize on logs page. Update echarts version to 5.0.2. Fix instance dependency on the topology page. Fix resolved url for vue-property-decorator. Show instance attributes. Copywriting grammar fix. Fix log pages tags column not updated. Fix the problem that the footer and topology group is shaded when the topology radiation is displayed. When the topology radiation chart is displayed, the corresponding button should be highlighted. Refactor the route mapping, Dynamically import routing components, Improve first page loading performance. Support topology of two mutually calling services. Implement a type of table chart in the dashboard. Support event in the dashboard. Show instance name in the trace view. Fix groups of services in the topography.  Documentation  Polish documentation due to we have covered all tracing, logging, and metrics fields. Adjust documentation about Zipkin receiver. Add backend-infrastructure-monitoring doc.  All issues and pull requests are here\n","excerpt":"8.5.0 Project  Incompatible Change. Indices and templates of ElasticSearch(6/7, including …","ref":"/docs/main/v9.2.0/en/changes/changes-8.5.0/","title":"8.5.0"},{"body":"8.5.0 Project  Incompatible Change. Indices and templates of ElasticSearch(6/7, including zipkin-elasticsearch7) storage option have been changed. Update frontend-maven-plugin to 1.11.0, for Download node x64 binary on Apple Silicon. Add E2E test for VM monitoring that metrics from Prometheus node-exporter. Upgrade lombok to 1.18.16. Add Java agent Dockerfile to build Docker image for Java agent.  Java Agent  Remove invalid mysql configuration in agent.config. Add net.bytebuddy.agent.builder.AgentBuilder.RedefinitionStrategy.Listener to show detail message when redefine errors occur. Fix ClassCastException of log4j gRPC reporter. Fix NPE when Kafka reporter activated. Enhance gRPC log appender to allow layout pattern. Fix apm-dubbo-2.7.x-plugin memory leak due to some Dubbo RpcExceptions. Fix lettuce-5.x-plugin get null host in redis sentinel mode. Fix ClassCastException by making CallbackAdapterInterceptor to implement EnhancedInstance interface in the spring-kafka plugin. Fix NullPointerException with KafkaProducer.send(record). Support config agent.span_limit_per_segment can be changed in the runtime. Collect and report agent starting / shutdown events. Support jedis pipeline in jedis-2.x-plugin. Fix apm-toolkit-log4j-2.x-activation no trace Id in async log. Replace hbase-1.x-plugin with hbase-1.x-2.x-plugin to adapt hbase client 2.x Remove the close_before_method and close_after_method parameters of custom-enhance-plugin to avoid memory leaks. Fix bug that springmvc-annotation-4.x-plugin, witness class does not exist in some versions. Add Redis command parameters to \u0026lsquo;db.statement\u0026rsquo; field on Lettuce span UI for displaying more info. Fix NullPointerException with ReactiveRequestHolder.getHeaders. Fix springmvc reactive api can\u0026rsquo;t collect HTTP statusCode. Fix bug that asynchttpclient plugin does not record the response status code. Fix spanLayer is null in optional plugin(gateway-2.0.x-plugin gateway-2.1.x-plugin). Support @Trace, @Tag and @Tags work for static methods.  OAP-Backend  Allow user-defined JAVA_OPTS in the startup script. Metrics combination API supports abandoning results. Add a new concept \u0026ldquo;Event\u0026rdquo; and its implementations to collect events. Add some defensive codes for NPE and bump up Kubernetes client version to expose exception stack trace. Update the timestamp field type for LogQuery. Support Zabbix protocol to receive agent metrics. Update the Apdex metric combine calculator. Enhance MeterSystem to allow creating metrics with same metricName / function / scope. Storage plugin supports postgresql. Fix kubernetes.client.openapi.ApiException. Remove filename suffix in the meter active file config. Introduce log analysis language (LAL). Fix alarm httpclient connection leak. Add sum function in meter system. Remove Jaeger receiver. Remove the experimental Zipkin span analyzer. Upgrade the Zipkin Elasticsearch storage from 6 to 7. Require Zipkin receiver must work with zipkin-elasticsearch7 storage option. Fix DatabaseSlowStatementBuilder statement maybe null. Remove fields of parent entity in the relation sources. Save Envoy http access logs when error occurs. Fix wrong service_instance_sla setting in the topology-instance.yml. Fix wrong metrics name setting in the self-observability.yml. Add telemetry data about metrics in, metrics scraping, mesh error and trace in metrics to zipkin receiver. Fix tags store of log and trace on h2/mysql/pg storage. Merge indices by Metrics Function and Meter Function in Elasticsearch Storage. Fix receiver don\u0026rsquo;t need to get itself when healthCheck Remove group concept from AvgHistogramFunction. Heatmap(function result) doesn\u0026rsquo;t support labels. Support metrics grouped by scope labelValue in MAL, no need global same labelValue as before. Add functions in MAL to filter metrics according to the metric value. Optimize the self monitoring grafana dashboard. Enhance the export service. Add function retagByK8sMeta and opt type K8sRetagType.Pod2Service in MAL for k8s to relate pods and services. Using \u0026ldquo;service.istio.io/canonical-name\u0026rdquo; to replace \u0026ldquo;app\u0026rdquo; label to resolve Envoy ALS service name. Support k8s monitoring. Make the flushing metrics operation concurrent. Fix ALS K8SServiceRegistry didn\u0026rsquo;t remove the correct entry. Using \u0026ldquo;service.istio.io/canonical-name\u0026rdquo; to replace \u0026ldquo;app\u0026rdquo; label to resolve Envoy ALS service name. Append the root slash(/) to getIndex and getTemplate requests in ES(6 and 7) client. Fix disable statement not working. This bug exists since 8.0.0. Remove the useless metric in vm.yaml.  UI  Update selector scroller to show in all pages. Implement searching logs with date. Add nodejs 14 compiling. Fix trace id by clear search conditions. Search endpoints with keywords. Fix pageSize on logs page. Update echarts version to 5.0.2. Fix instance dependency on the topology page. Fix resolved url for vue-property-decorator. Show instance attributes. Copywriting grammar fix. Fix log pages tags column not updated. Fix the problem that the footer and topology group is shaded when the topology radiation is displayed. When the topology radiation chart is displayed, the corresponding button should be highlighted. Refactor the route mapping, Dynamically import routing components, Improve first page loading performance. Support topology of two mutually calling services. Implement a type of table chart in the dashboard. Support event in the dashboard. Show instance name in the trace view. Fix groups of services in the topography.  Documentation  Polish documentation due to we have covered all tracing, logging, and metrics fields. Adjust documentation about Zipkin receiver. Add backend-infrastructure-monitoring doc.  All issues and pull requests are here\n","excerpt":"8.5.0 Project  Incompatible Change. Indices and templates of ElasticSearch(6/7, including …","ref":"/docs/main/v9.3.0/en/changes/changes-8.5.0/","title":"8.5.0"},{"body":"8.5.0 Project  Incompatible Change. Indices and templates of ElasticSearch(6/7, including zipkin-elasticsearch7) storage option have been changed. Update frontend-maven-plugin to 1.11.0, for Download node x64 binary on Apple Silicon. Add E2E test for VM monitoring that metrics from Prometheus node-exporter. Upgrade lombok to 1.18.16. Add Java agent Dockerfile to build Docker image for Java agent.  Java Agent  Remove invalid mysql configuration in agent.config. Add net.bytebuddy.agent.builder.AgentBuilder.RedefinitionStrategy.Listener to show detail message when redefine errors occur. Fix ClassCastException of log4j gRPC reporter. Fix NPE when Kafka reporter activated. Enhance gRPC log appender to allow layout pattern. Fix apm-dubbo-2.7.x-plugin memory leak due to some Dubbo RpcExceptions. Fix lettuce-5.x-plugin get null host in redis sentinel mode. Fix ClassCastException by making CallbackAdapterInterceptor to implement EnhancedInstance interface in the spring-kafka plugin. Fix NullPointerException with KafkaProducer.send(record). Support config agent.span_limit_per_segment can be changed in the runtime. Collect and report agent starting / shutdown events. Support jedis pipeline in jedis-2.x-plugin. Fix apm-toolkit-log4j-2.x-activation no trace Id in async log. Replace hbase-1.x-plugin with hbase-1.x-2.x-plugin to adapt hbase client 2.x Remove the close_before_method and close_after_method parameters of custom-enhance-plugin to avoid memory leaks. Fix bug that springmvc-annotation-4.x-plugin, witness class does not exist in some versions. Add Redis command parameters to \u0026lsquo;db.statement\u0026rsquo; field on Lettuce span UI for displaying more info. Fix NullPointerException with ReactiveRequestHolder.getHeaders. Fix springmvc reactive api can\u0026rsquo;t collect HTTP statusCode. Fix bug that asynchttpclient plugin does not record the response status code. Fix spanLayer is null in optional plugin(gateway-2.0.x-plugin gateway-2.1.x-plugin). Support @Trace, @Tag and @Tags work for static methods.  OAP-Backend  Allow user-defined JAVA_OPTS in the startup script. Metrics combination API supports abandoning results. Add a new concept \u0026ldquo;Event\u0026rdquo; and its implementations to collect events. Add some defensive codes for NPE and bump up Kubernetes client version to expose exception stack trace. Update the timestamp field type for LogQuery. Support Zabbix protocol to receive agent metrics. Update the Apdex metric combine calculator. Enhance MeterSystem to allow creating metrics with same metricName / function / scope. Storage plugin supports postgresql. Fix kubernetes.client.openapi.ApiException. Remove filename suffix in the meter active file config. Introduce log analysis language (LAL). Fix alarm httpclient connection leak. Add sum function in meter system. Remove Jaeger receiver. Remove the experimental Zipkin span analyzer. Upgrade the Zipkin Elasticsearch storage from 6 to 7. Require Zipkin receiver must work with zipkin-elasticsearch7 storage option. Fix DatabaseSlowStatementBuilder statement maybe null. Remove fields of parent entity in the relation sources. Save Envoy http access logs when error occurs. Fix wrong service_instance_sla setting in the topology-instance.yml. Fix wrong metrics name setting in the self-observability.yml. Add telemetry data about metrics in, metrics scraping, mesh error and trace in metrics to zipkin receiver. Fix tags store of log and trace on h2/mysql/pg storage. Merge indices by Metrics Function and Meter Function in Elasticsearch Storage. Fix receiver don\u0026rsquo;t need to get itself when healthCheck Remove group concept from AvgHistogramFunction. Heatmap(function result) doesn\u0026rsquo;t support labels. Support metrics grouped by scope labelValue in MAL, no need global same labelValue as before. Add functions in MAL to filter metrics according to the metric value. Optimize the self monitoring grafana dashboard. Enhance the export service. Add function retagByK8sMeta and opt type K8sRetagType.Pod2Service in MAL for k8s to relate pods and services. Using \u0026ldquo;service.istio.io/canonical-name\u0026rdquo; to replace \u0026ldquo;app\u0026rdquo; label to resolve Envoy ALS service name. Support k8s monitoring. Make the flushing metrics operation concurrent. Fix ALS K8SServiceRegistry didn\u0026rsquo;t remove the correct entry. Using \u0026ldquo;service.istio.io/canonical-name\u0026rdquo; to replace \u0026ldquo;app\u0026rdquo; label to resolve Envoy ALS service name. Append the root slash(/) to getIndex and getTemplate requests in ES(6 and 7) client. Fix disable statement not working. This bug exists since 8.0.0. Remove the useless metric in vm.yaml.  UI  Update selector scroller to show in all pages. Implement searching logs with date. Add nodejs 14 compiling. Fix trace id by clear search conditions. Search endpoints with keywords. Fix pageSize on logs page. Update echarts version to 5.0.2. Fix instance dependency on the topology page. Fix resolved url for vue-property-decorator. Show instance attributes. Copywriting grammar fix. Fix log pages tags column not updated. Fix the problem that the footer and topology group is shaded when the topology radiation is displayed. When the topology radiation chart is displayed, the corresponding button should be highlighted. Refactor the route mapping, Dynamically import routing components, Improve first page loading performance. Support topology of two mutually calling services. Implement a type of table chart in the dashboard. Support event in the dashboard. Show instance name in the trace view. Fix groups of services in the topography.  Documentation  Polish documentation due to we have covered all tracing, logging, and metrics fields. Adjust documentation about Zipkin receiver. Add backend-infrastructure-monitoring doc.  All issues and pull requests are here\n","excerpt":"8.5.0 Project  Incompatible Change. Indices and templates of ElasticSearch(6/7, including …","ref":"/docs/main/v9.4.0/en/changes/changes-8.5.0/","title":"8.5.0"},{"body":"8.5.0 Project  Incompatible Change. Indices and templates of ElasticSearch(6/7, including zipkin-elasticsearch7) storage option have been changed. Update frontend-maven-plugin to 1.11.0, for Download node x64 binary on Apple Silicon. Add E2E test for VM monitoring that metrics from Prometheus node-exporter. Upgrade lombok to 1.18.16. Add Java agent Dockerfile to build Docker image for Java agent.  Java Agent  Remove invalid mysql configuration in agent.config. Add net.bytebuddy.agent.builder.AgentBuilder.RedefinitionStrategy.Listener to show detail message when redefine errors occur. Fix ClassCastException of log4j gRPC reporter. Fix NPE when Kafka reporter activated. Enhance gRPC log appender to allow layout pattern. Fix apm-dubbo-2.7.x-plugin memory leak due to some Dubbo RpcExceptions. Fix lettuce-5.x-plugin get null host in redis sentinel mode. Fix ClassCastException by making CallbackAdapterInterceptor to implement EnhancedInstance interface in the spring-kafka plugin. Fix NullPointerException with KafkaProducer.send(record). Support config agent.span_limit_per_segment can be changed in the runtime. Collect and report agent starting / shutdown events. Support jedis pipeline in jedis-2.x-plugin. Fix apm-toolkit-log4j-2.x-activation no trace Id in async log. Replace hbase-1.x-plugin with hbase-1.x-2.x-plugin to adapt hbase client 2.x Remove the close_before_method and close_after_method parameters of custom-enhance-plugin to avoid memory leaks. Fix bug that springmvc-annotation-4.x-plugin, witness class does not exist in some versions. Add Redis command parameters to \u0026lsquo;db.statement\u0026rsquo; field on Lettuce span UI for displaying more info. Fix NullPointerException with ReactiveRequestHolder.getHeaders. Fix springmvc reactive api can\u0026rsquo;t collect HTTP statusCode. Fix bug that asynchttpclient plugin does not record the response status code. Fix spanLayer is null in optional plugin(gateway-2.0.x-plugin gateway-2.1.x-plugin). Support @Trace, @Tag and @Tags work for static methods.  OAP-Backend  Allow user-defined JAVA_OPTS in the startup script. Metrics combination API supports abandoning results. Add a new concept \u0026ldquo;Event\u0026rdquo; and its implementations to collect events. Add some defensive codes for NPE and bump up Kubernetes client version to expose exception stack trace. Update the timestamp field type for LogQuery. Support Zabbix protocol to receive agent metrics. Update the Apdex metric combine calculator. Enhance MeterSystem to allow creating metrics with same metricName / function / scope. Storage plugin supports postgresql. Fix kubernetes.client.openapi.ApiException. Remove filename suffix in the meter active file config. Introduce log analysis language (LAL). Fix alarm httpclient connection leak. Add sum function in meter system. Remove Jaeger receiver. Remove the experimental Zipkin span analyzer. Upgrade the Zipkin Elasticsearch storage from 6 to 7. Require Zipkin receiver must work with zipkin-elasticsearch7 storage option. Fix DatabaseSlowStatementBuilder statement maybe null. Remove fields of parent entity in the relation sources. Save Envoy http access logs when error occurs. Fix wrong service_instance_sla setting in the topology-instance.yml. Fix wrong metrics name setting in the self-observability.yml. Add telemetry data about metrics in, metrics scraping, mesh error and trace in metrics to zipkin receiver. Fix tags store of log and trace on h2/mysql/pg storage. Merge indices by Metrics Function and Meter Function in Elasticsearch Storage. Fix receiver don\u0026rsquo;t need to get itself when healthCheck Remove group concept from AvgHistogramFunction. Heatmap(function result) doesn\u0026rsquo;t support labels. Support metrics grouped by scope labelValue in MAL, no need global same labelValue as before. Add functions in MAL to filter metrics according to the metric value. Optimize the self monitoring grafana dashboard. Enhance the export service. Add function retagByK8sMeta and opt type K8sRetagType.Pod2Service in MAL for k8s to relate pods and services. Using \u0026ldquo;service.istio.io/canonical-name\u0026rdquo; to replace \u0026ldquo;app\u0026rdquo; label to resolve Envoy ALS service name. Support k8s monitoring. Make the flushing metrics operation concurrent. Fix ALS K8SServiceRegistry didn\u0026rsquo;t remove the correct entry. Using \u0026ldquo;service.istio.io/canonical-name\u0026rdquo; to replace \u0026ldquo;app\u0026rdquo; label to resolve Envoy ALS service name. Append the root slash(/) to getIndex and getTemplate requests in ES(6 and 7) client. Fix disable statement not working. This bug exists since 8.0.0. Remove the useless metric in vm.yaml.  UI  Update selector scroller to show in all pages. Implement searching logs with date. Add nodejs 14 compiling. Fix trace id by clear search conditions. Search endpoints with keywords. Fix pageSize on logs page. Update echarts version to 5.0.2. Fix instance dependency on the topology page. Fix resolved url for vue-property-decorator. Show instance attributes. Copywriting grammar fix. Fix log pages tags column not updated. Fix the problem that the footer and topology group is shaded when the topology radiation is displayed. When the topology radiation chart is displayed, the corresponding button should be highlighted. Refactor the route mapping, Dynamically import routing components, Improve first page loading performance. Support topology of two mutually calling services. Implement a type of table chart in the dashboard. Support event in the dashboard. Show instance name in the trace view. Fix groups of services in the topography.  Documentation  Polish documentation due to we have covered all tracing, logging, and metrics fields. Adjust documentation about Zipkin receiver. Add backend-infrastructure-monitoring doc.  All issues and pull requests are here\n","excerpt":"8.5.0 Project  Incompatible Change. Indices and templates of ElasticSearch(6/7, including …","ref":"/docs/main/v9.5.0/en/changes/changes-8.5.0/","title":"8.5.0"},{"body":"8.5.0 Project  Incompatible Change. Indices and templates of ElasticSearch(6/7, including zipkin-elasticsearch7) storage option have been changed. Update frontend-maven-plugin to 1.11.0, for Download node x64 binary on Apple Silicon. Add E2E test for VM monitoring that metrics from Prometheus node-exporter. Upgrade lombok to 1.18.16. Add Java agent Dockerfile to build Docker image for Java agent.  Java Agent  Remove invalid mysql configuration in agent.config. Add net.bytebuddy.agent.builder.AgentBuilder.RedefinitionStrategy.Listener to show detail message when redefine errors occur. Fix ClassCastException of log4j gRPC reporter. Fix NPE when Kafka reporter activated. Enhance gRPC log appender to allow layout pattern. Fix apm-dubbo-2.7.x-plugin memory leak due to some Dubbo RpcExceptions. Fix lettuce-5.x-plugin get null host in redis sentinel mode. Fix ClassCastException by making CallbackAdapterInterceptor to implement EnhancedInstance interface in the spring-kafka plugin. Fix NullPointerException with KafkaProducer.send(record). Support config agent.span_limit_per_segment can be changed in the runtime. Collect and report agent starting / shutdown events. Support jedis pipeline in jedis-2.x-plugin. Fix apm-toolkit-log4j-2.x-activation no trace Id in async log. Replace hbase-1.x-plugin with hbase-1.x-2.x-plugin to adapt hbase client 2.x Remove the close_before_method and close_after_method parameters of custom-enhance-plugin to avoid memory leaks. Fix bug that springmvc-annotation-4.x-plugin, witness class does not exist in some versions. Add Redis command parameters to \u0026lsquo;db.statement\u0026rsquo; field on Lettuce span UI for displaying more info. Fix NullPointerException with ReactiveRequestHolder.getHeaders. Fix springmvc reactive api can\u0026rsquo;t collect HTTP statusCode. Fix bug that asynchttpclient plugin does not record the response status code. Fix spanLayer is null in optional plugin(gateway-2.0.x-plugin gateway-2.1.x-plugin). Support @Trace, @Tag and @Tags work for static methods.  OAP-Backend  Allow user-defined JAVA_OPTS in the startup script. Metrics combination API supports abandoning results. Add a new concept \u0026ldquo;Event\u0026rdquo; and its implementations to collect events. Add some defensive codes for NPE and bump up Kubernetes client version to expose exception stack trace. Update the timestamp field type for LogQuery. Support Zabbix protocol to receive agent metrics. Update the Apdex metric combine calculator. Enhance MeterSystem to allow creating metrics with same metricName / function / scope. Storage plugin supports postgresql. Fix kubernetes.client.openapi.ApiException. Remove filename suffix in the meter active file config. Introduce log analysis language (LAL). Fix alarm httpclient connection leak. Add sum function in meter system. Remove Jaeger receiver. Remove the experimental Zipkin span analyzer. Upgrade the Zipkin Elasticsearch storage from 6 to 7. Require Zipkin receiver must work with zipkin-elasticsearch7 storage option. Fix DatabaseSlowStatementBuilder statement maybe null. Remove fields of parent entity in the relation sources. Save Envoy http access logs when error occurs. Fix wrong service_instance_sla setting in the topology-instance.yml. Fix wrong metrics name setting in the self-observability.yml. Add telemetry data about metrics in, metrics scraping, mesh error and trace in metrics to zipkin receiver. Fix tags store of log and trace on h2/mysql/pg storage. Merge indices by Metrics Function and Meter Function in Elasticsearch Storage. Fix receiver don\u0026rsquo;t need to get itself when healthCheck Remove group concept from AvgHistogramFunction. Heatmap(function result) doesn\u0026rsquo;t support labels. Support metrics grouped by scope labelValue in MAL, no need global same labelValue as before. Add functions in MAL to filter metrics according to the metric value. Optimize the self monitoring grafana dashboard. Enhance the export service. Add function retagByK8sMeta and opt type K8sRetagType.Pod2Service in MAL for k8s to relate pods and services. Using \u0026ldquo;service.istio.io/canonical-name\u0026rdquo; to replace \u0026ldquo;app\u0026rdquo; label to resolve Envoy ALS service name. Support k8s monitoring. Make the flushing metrics operation concurrent. Fix ALS K8SServiceRegistry didn\u0026rsquo;t remove the correct entry. Using \u0026ldquo;service.istio.io/canonical-name\u0026rdquo; to replace \u0026ldquo;app\u0026rdquo; label to resolve Envoy ALS service name. Append the root slash(/) to getIndex and getTemplate requests in ES(6 and 7) client. Fix disable statement not working. This bug exists since 8.0.0. Remove the useless metric in vm.yaml.  UI  Update selector scroller to show in all pages. Implement searching logs with date. Add nodejs 14 compiling. Fix trace id by clear search conditions. Search endpoints with keywords. Fix pageSize on logs page. Update echarts version to 5.0.2. Fix instance dependency on the topology page. Fix resolved url for vue-property-decorator. Show instance attributes. Copywriting grammar fix. Fix log pages tags column not updated. Fix the problem that the footer and topology group is shaded when the topology radiation is displayed. When the topology radiation chart is displayed, the corresponding button should be highlighted. Refactor the route mapping, Dynamically import routing components, Improve first page loading performance. Support topology of two mutually calling services. Implement a type of table chart in the dashboard. Support event in the dashboard. Show instance name in the trace view. Fix groups of services in the topography.  Documentation  Polish documentation due to we have covered all tracing, logging, and metrics fields. Adjust documentation about Zipkin receiver. Add backend-infrastructure-monitoring doc.  All issues and pull requests are here\n","excerpt":"8.5.0 Project  Incompatible Change. Indices and templates of ElasticSearch(6/7, including …","ref":"/docs/main/v9.6.0/en/changes/changes-8.5.0/","title":"8.5.0"},{"body":"8.5.0 Project  Incompatible Change. Indices and templates of ElasticSearch(6/7, including zipkin-elasticsearch7) storage option have been changed. Update frontend-maven-plugin to 1.11.0, for Download node x64 binary on Apple Silicon. Add E2E test for VM monitoring that metrics from Prometheus node-exporter. Upgrade lombok to 1.18.16. Add Java agent Dockerfile to build Docker image for Java agent.  Java Agent  Remove invalid mysql configuration in agent.config. Add net.bytebuddy.agent.builder.AgentBuilder.RedefinitionStrategy.Listener to show detail message when redefine errors occur. Fix ClassCastException of log4j gRPC reporter. Fix NPE when Kafka reporter activated. Enhance gRPC log appender to allow layout pattern. Fix apm-dubbo-2.7.x-plugin memory leak due to some Dubbo RpcExceptions. Fix lettuce-5.x-plugin get null host in redis sentinel mode. Fix ClassCastException by making CallbackAdapterInterceptor to implement EnhancedInstance interface in the spring-kafka plugin. Fix NullPointerException with KafkaProducer.send(record). Support config agent.span_limit_per_segment can be changed in the runtime. Collect and report agent starting / shutdown events. Support jedis pipeline in jedis-2.x-plugin. Fix apm-toolkit-log4j-2.x-activation no trace Id in async log. Replace hbase-1.x-plugin with hbase-1.x-2.x-plugin to adapt hbase client 2.x Remove the close_before_method and close_after_method parameters of custom-enhance-plugin to avoid memory leaks. Fix bug that springmvc-annotation-4.x-plugin, witness class does not exist in some versions. Add Redis command parameters to \u0026lsquo;db.statement\u0026rsquo; field on Lettuce span UI for displaying more info. Fix NullPointerException with ReactiveRequestHolder.getHeaders. Fix springmvc reactive api can\u0026rsquo;t collect HTTP statusCode. Fix bug that asynchttpclient plugin does not record the response status code. Fix spanLayer is null in optional plugin(gateway-2.0.x-plugin gateway-2.1.x-plugin). Support @Trace, @Tag and @Tags work for static methods.  OAP-Backend  Allow user-defined JAVA_OPTS in the startup script. Metrics combination API supports abandoning results. Add a new concept \u0026ldquo;Event\u0026rdquo; and its implementations to collect events. Add some defensive codes for NPE and bump up Kubernetes client version to expose exception stack trace. Update the timestamp field type for LogQuery. Support Zabbix protocol to receive agent metrics. Update the Apdex metric combine calculator. Enhance MeterSystem to allow creating metrics with same metricName / function / scope. Storage plugin supports postgresql. Fix kubernetes.client.openapi.ApiException. Remove filename suffix in the meter active file config. Introduce log analysis language (LAL). Fix alarm httpclient connection leak. Add sum function in meter system. Remove Jaeger receiver. Remove the experimental Zipkin span analyzer. Upgrade the Zipkin Elasticsearch storage from 6 to 7. Require Zipkin receiver must work with zipkin-elasticsearch7 storage option. Fix DatabaseSlowStatementBuilder statement maybe null. Remove fields of parent entity in the relation sources. Save Envoy http access logs when error occurs. Fix wrong service_instance_sla setting in the topology-instance.yml. Fix wrong metrics name setting in the self-observability.yml. Add telemetry data about metrics in, metrics scraping, mesh error and trace in metrics to zipkin receiver. Fix tags store of log and trace on h2/mysql/pg storage. Merge indices by Metrics Function and Meter Function in Elasticsearch Storage. Fix receiver don\u0026rsquo;t need to get itself when healthCheck Remove group concept from AvgHistogramFunction. Heatmap(function result) doesn\u0026rsquo;t support labels. Support metrics grouped by scope labelValue in MAL, no need global same labelValue as before. Add functions in MAL to filter metrics according to the metric value. Optimize the self monitoring grafana dashboard. Enhance the export service. Add function retagByK8sMeta and opt type K8sRetagType.Pod2Service in MAL for k8s to relate pods and services. Using \u0026ldquo;service.istio.io/canonical-name\u0026rdquo; to replace \u0026ldquo;app\u0026rdquo; label to resolve Envoy ALS service name. Support k8s monitoring. Make the flushing metrics operation concurrent. Fix ALS K8SServiceRegistry didn\u0026rsquo;t remove the correct entry. Using \u0026ldquo;service.istio.io/canonical-name\u0026rdquo; to replace \u0026ldquo;app\u0026rdquo; label to resolve Envoy ALS service name. Append the root slash(/) to getIndex and getTemplate requests in ES(6 and 7) client. Fix disable statement not working. This bug exists since 8.0.0. Remove the useless metric in vm.yaml.  UI  Update selector scroller to show in all pages. Implement searching logs with date. Add nodejs 14 compiling. Fix trace id by clear search conditions. Search endpoints with keywords. Fix pageSize on logs page. Update echarts version to 5.0.2. Fix instance dependency on the topology page. Fix resolved url for vue-property-decorator. Show instance attributes. Copywriting grammar fix. Fix log pages tags column not updated. Fix the problem that the footer and topology group is shaded when the topology radiation is displayed. When the topology radiation chart is displayed, the corresponding button should be highlighted. Refactor the route mapping, Dynamically import routing components, Improve first page loading performance. Support topology of two mutually calling services. Implement a type of table chart in the dashboard. Support event in the dashboard. Show instance name in the trace view. Fix groups of services in the topography.  Documentation  Polish documentation due to we have covered all tracing, logging, and metrics fields. Adjust documentation about Zipkin receiver. Add backend-infrastructure-monitoring doc.  All issues and pull requests are here\n","excerpt":"8.5.0 Project  Incompatible Change. Indices and templates of ElasticSearch(6/7, including …","ref":"/docs/main/v9.7.0/en/changes/changes-8.5.0/","title":"8.5.0"},{"body":"8.6.0 Project  Add OpenSearch as storage option. Upgrade Kubernetes Java client dependency to 11.0. Fix plugin test script error in macOS.  Java Agent  Add trace_segment_ref_limit_per_span configuration mechanism to avoid OOM. Improve GlobalIdGenerator performance. Add an agent plugin to support elasticsearch7. Add jsonrpc4j agent plugin. new options to support multi skywalking cluster use same kafka cluster(plugin.kafka.namespace) resolve agent has no retries if connect kafka cluster failed when bootstrap Add Seata in the component definition. Seata plugin hosts on Seata project. Extended Kafka plugin to properly trace consumers that have topic partitions directly assigned. Support Kafka consumer 2.8.0. Support print SkyWalking context to logs. Add MessageListener enhancement in pulsar plugin. fix a bug that spring-mvc set an error endpoint name if the controller class annotation implements an interface. Add an optional agent plugin to support mybatis. Add spring-cloud-gateway-3.x optional plugin. Add okhttp-4.x plugin. Fix NPE when thrift field is nested in plugin thrift Fix possible NullPointerException in agent\u0026rsquo;s ES plugin. Fix the conversion problem of float type in ConfigInitializer. Fixed part of the dynamic configuration of ConfigurationDiscoveryService that does not take effect under certain circumstances. Introduce method interceptor API v2 Fix ClassCast issue for RequestHolder/ResponseHolder. fixed jdk-threading-plugin memory leak. Optimize multiple field reflection operation in Feign plugin. Fix trace-ignore-plugin TraceIgnorePathPatterns can\u0026rsquo;t set empty value  OAP-Backend  BugFix: filter invalid Envoy access logs whose socket address is empty. Fix K8s monitoring the incorrect metrics calculate. Loop alarm into event system. Support alarm tags. Support WeLink as a channel of alarm notification. Fix: Some defensive codes didn\u0026rsquo;t work in PercentileFunction combine. CVE: fix Jetty vulnerability. https://nvd.nist.gov/vuln/detail/CVE-2019-17638 Fix: MAL function would miss samples name after creating new samples. perf: use iterator.remove() to remove modulesWithoutProvider Support analyzing Envoy TCP access logs and persist error TCP logs. Fix: Envoy error logs are not persisted when no metrics are generated Fix: Memory leakage of low version etcd client. fix-issue Allow multiple definitions as fallback in metadata-service-mapping.yaml file and k8sServiceNameRule. Fix: NPE when configmap has no data. Fix: Dynamic Configuration key slowTraceSegmentThreshold not work Fix: != is not supported in oal when parameters are numbers. Include events of the entity(s) in the alarm. Support native-json format log in kafka-fetcher-plugin. Fix counter misuse in the alarm core. Alarm can\u0026rsquo;t be triggered in time. Events can be configured as alarm source. Make the number of core worker in meter converter thread pool configurable. Add HTTP implementation of logs reporting protocol. Make metrics exporter still work even when storage layer failed. Fix Jetty HTTP TRACE issue, disable HTTP methods except POST. CVE: upgrade snakeyaml to prevent billion laughs attack in dynamic configuration. polish debug logging avoids null value when the segment ignored.  UI  Add logo for kong plugin. Add apisix logo. Refactor js to ts for browser logs and style change. When creating service groups in the topology, it is better if the service names are sorted. Add tooltip for dashboard component. Fix style of endpoint dependency. Support search and visualize alarms with tags. Fix configurations on dashboard. Support to configure the maximum number of displayed items. After changing the durationTime, the topology shows the originally selected group or service. remove the no use maxItemNum for labeled-value metric, etc. Add Azure Functions logo. Support search Endpoint use keyword params in trace view. Add a function which show the statistics information during the trace query. Remove the sort button at the column of Type in the trace statistics page. Optimize the APISIX icon in the topology. Implement metrics templates in the topology. Visualize Events on the alarm page. Update duration steps in graphs for Trace and Log.  Documentation  Polish k8s monitoring otel-collector configuration example. Print SkyWalking context to logs configuration example. Update doc about metrics v2 APIs.  All issues and pull requests are here\n Find change logs of all versions here.\n","excerpt":"8.6.0 Project  Add OpenSearch as storage option. Upgrade Kubernetes Java client dependency to 11.0. …","ref":"/docs/main/latest/en/changes/changes-8.6.0/","title":"8.6.0"},{"body":"8.6.0 Project  Add OpenSearch as storage option. Upgrade Kubernetes Java client dependency to 11.0. Fix plugin test script error in macOS.  Java Agent  Add trace_segment_ref_limit_per_span configuration mechanism to avoid OOM. Improve GlobalIdGenerator performance. Add an agent plugin to support elasticsearch7. Add jsonrpc4j agent plugin. new options to support multi skywalking cluster use same kafka cluster(plugin.kafka.namespace) resolve agent has no retries if connect kafka cluster failed when bootstrap Add Seata in the component definition. Seata plugin hosts on Seata project. Extended Kafka plugin to properly trace consumers that have topic partitions directly assigned. Support Kafka consumer 2.8.0. Support print SkyWalking context to logs. Add MessageListener enhancement in pulsar plugin. fix a bug that spring-mvc set an error endpoint name if the controller class annotation implements an interface. Add an optional agent plugin to support mybatis. Add spring-cloud-gateway-3.x optional plugin. Add okhttp-4.x plugin. Fix NPE when thrift field is nested in plugin thrift Fix possible NullPointerException in agent\u0026rsquo;s ES plugin. Fix the conversion problem of float type in ConfigInitializer. Fixed part of the dynamic configuration of ConfigurationDiscoveryService that does not take effect under certain circumstances. Introduce method interceptor API v2 Fix ClassCast issue for RequestHolder/ResponseHolder. fixed jdk-threading-plugin memory leak. Optimize multiple field reflection operation in Feign plugin. Fix trace-ignore-plugin TraceIgnorePathPatterns can\u0026rsquo;t set empty value  OAP-Backend  BugFix: filter invalid Envoy access logs whose socket address is empty. Fix K8s monitoring the incorrect metrics calculate. Loop alarm into event system. Support alarm tags. Support WeLink as a channel of alarm notification. Fix: Some defensive codes didn\u0026rsquo;t work in PercentileFunction combine. CVE: fix Jetty vulnerability. https://nvd.nist.gov/vuln/detail/CVE-2019-17638 Fix: MAL function would miss samples name after creating new samples. perf: use iterator.remove() to remove modulesWithoutProvider Support analyzing Envoy TCP access logs and persist error TCP logs. Fix: Envoy error logs are not persisted when no metrics are generated Fix: Memory leakage of low version etcd client. fix-issue Allow multiple definitions as fallback in metadata-service-mapping.yaml file and k8sServiceNameRule. Fix: NPE when configmap has no data. Fix: Dynamic Configuration key slowTraceSegmentThreshold not work Fix: != is not supported in oal when parameters are numbers. Include events of the entity(s) in the alarm. Support native-json format log in kafka-fetcher-plugin. Fix counter misuse in the alarm core. Alarm can\u0026rsquo;t be triggered in time. Events can be configured as alarm source. Make the number of core worker in meter converter thread pool configurable. Add HTTP implementation of logs reporting protocol. Make metrics exporter still work even when storage layer failed. Fix Jetty HTTP TRACE issue, disable HTTP methods except POST. CVE: upgrade snakeyaml to prevent billion laughs attack in dynamic configuration. polish debug logging avoids null value when the segment ignored.  UI  Add logo for kong plugin. Add apisix logo. Refactor js to ts for browser logs and style change. When creating service groups in the topology, it is better if the service names are sorted. Add tooltip for dashboard component. Fix style of endpoint dependency. Support search and visualize alarms with tags. Fix configurations on dashboard. Support to configure the maximum number of displayed items. After changing the durationTime, the topology shows the originally selected group or service. remove the no use maxItemNum for labeled-value metric, etc. Add Azure Functions logo. Support search Endpoint use keyword params in trace view. Add a function which show the statistics information during the trace query. Remove the sort button at the column of Type in the trace statistics page. Optimize the APISIX icon in the topology. Implement metrics templates in the topology. Visualize Events on the alarm page. Update duration steps in graphs for Trace and Log.  Documentation  Polish k8s monitoring otel-collector configuration example. Print SkyWalking context to logs configuration example. Update doc about metrics v2 APIs.  All issues and pull requests are here\n Find change logs of all versions here.\n","excerpt":"8.6.0 Project  Add OpenSearch as storage option. Upgrade Kubernetes Java client dependency to 11.0. …","ref":"/docs/main/next/en/changes/changes-8.6.0/","title":"8.6.0"},{"body":"8.6.0 Project  Add OpenSearch as storage option. Upgrade Kubernetes Java client dependency to 11.0. Fix plugin test script error in macOS.  Java Agent  Add trace_segment_ref_limit_per_span configuration mechanism to avoid OOM. Improve GlobalIdGenerator performance. Add an agent plugin to support elasticsearch7. Add jsonrpc4j agent plugin. new options to support multi skywalking cluster use same kafka cluster(plugin.kafka.namespace) resolve agent has no retries if connect kafka cluster failed when bootstrap Add Seata in the component definition. Seata plugin hosts on Seata project. Extended Kafka plugin to properly trace consumers that have topic partitions directly assigned. Support Kafka consumer 2.8.0. Support print SkyWalking context to logs. Add MessageListener enhancement in pulsar plugin. fix a bug that spring-mvc set an error endpoint name if the controller class annotation implements an interface. Add an optional agent plugin to support mybatis. Add spring-cloud-gateway-3.x optional plugin. Add okhttp-4.x plugin. Fix NPE when thrift field is nested in plugin thrift Fix possible NullPointerException in agent\u0026rsquo;s ES plugin. Fix the conversion problem of float type in ConfigInitializer. Fixed part of the dynamic configuration of ConfigurationDiscoveryService that does not take effect under certain circumstances. Introduce method interceptor API v2 Fix ClassCast issue for RequestHolder/ResponseHolder. fixed jdk-threading-plugin memory leak. Optimize multiple field reflection operation in Feign plugin. Fix trace-ignore-plugin TraceIgnorePathPatterns can\u0026rsquo;t set empty value  OAP-Backend  BugFix: filter invalid Envoy access logs whose socket address is empty. Fix K8s monitoring the incorrect metrics calculate. Loop alarm into event system. Support alarm tags. Support WeLink as a channel of alarm notification. Fix: Some defensive codes didn\u0026rsquo;t work in PercentileFunction combine. CVE: fix Jetty vulnerability. https://nvd.nist.gov/vuln/detail/CVE-2019-17638 Fix: MAL function would miss samples name after creating new samples. perf: use iterator.remove() to remove modulesWithoutProvider Support analyzing Envoy TCP access logs and persist error TCP logs. Fix: Envoy error logs are not persisted when no metrics are generated Fix: Memory leakage of low version etcd client. fix-issue Allow multiple definitions as fallback in metadata-service-mapping.yaml file and k8sServiceNameRule. Fix: NPE when configmap has no data. Fix: Dynamic Configuration key slowTraceSegmentThreshold not work Fix: != is not supported in oal when parameters are numbers. Include events of the entity(s) in the alarm. Support native-json format log in kafka-fetcher-plugin. Fix counter misuse in the alarm core. Alarm can\u0026rsquo;t be triggered in time. Events can be configured as alarm source. Make the number of core worker in meter converter thread pool configurable. Add HTTP implementation of logs reporting protocol. Make metrics exporter still work even when storage layer failed. Fix Jetty HTTP TRACE issue, disable HTTP methods except POST. CVE: upgrade snakeyaml to prevent billion laughs attack in dynamic configuration. polish debug logging avoids null value when the segment ignored.  UI  Add logo for kong plugin. Add apisix logo. Refactor js to ts for browser logs and style change. When creating service groups in the topology, it is better if the service names are sorted. Add tooltip for dashboard component. Fix style of endpoint dependency. Support search and visualize alarms with tags. Fix configurations on dashboard. Support to configure the maximum number of displayed items. After changing the durationTime, the topology shows the originally selected group or service. remove the no use maxItemNum for labeled-value metric, etc. Add Azure Functions logo. Support search Endpoint use keyword params in trace view. Add a function which show the statistics infomation during the trace query. Remove the sort button at the column of Type in the trace statistics page. Optimize the APISIX icon in the topology. Implement metrics templates in the topology. Visualize Events on the alarm page. Update duration steps in graphs for Trace and Log.  Documentation  Polish k8s monitoring otel-collector configuration example. Print SkyWalking context to logs configuration example. Update doc about metrics v2 APIs.  All issues and pull requests are here\n Find change logs of all versions here.\n","excerpt":"8.6.0 Project  Add OpenSearch as storage option. Upgrade Kubernetes Java client dependency to 11.0. …","ref":"/docs/main/v9.1.0/en/changes/changes-8.6.0/","title":"8.6.0"},{"body":"8.6.0 Project  Add OpenSearch as storage option. Upgrade Kubernetes Java client dependency to 11.0. Fix plugin test script error in macOS.  Java Agent  Add trace_segment_ref_limit_per_span configuration mechanism to avoid OOM. Improve GlobalIdGenerator performance. Add an agent plugin to support elasticsearch7. Add jsonrpc4j agent plugin. new options to support multi skywalking cluster use same kafka cluster(plugin.kafka.namespace) resolve agent has no retries if connect kafka cluster failed when bootstrap Add Seata in the component definition. Seata plugin hosts on Seata project. Extended Kafka plugin to properly trace consumers that have topic partitions directly assigned. Support Kafka consumer 2.8.0. Support print SkyWalking context to logs. Add MessageListener enhancement in pulsar plugin. fix a bug that spring-mvc set an error endpoint name if the controller class annotation implements an interface. Add an optional agent plugin to support mybatis. Add spring-cloud-gateway-3.x optional plugin. Add okhttp-4.x plugin. Fix NPE when thrift field is nested in plugin thrift Fix possible NullPointerException in agent\u0026rsquo;s ES plugin. Fix the conversion problem of float type in ConfigInitializer. Fixed part of the dynamic configuration of ConfigurationDiscoveryService that does not take effect under certain circumstances. Introduce method interceptor API v2 Fix ClassCast issue for RequestHolder/ResponseHolder. fixed jdk-threading-plugin memory leak. Optimize multiple field reflection operation in Feign plugin. Fix trace-ignore-plugin TraceIgnorePathPatterns can\u0026rsquo;t set empty value  OAP-Backend  BugFix: filter invalid Envoy access logs whose socket address is empty. Fix K8s monitoring the incorrect metrics calculate. Loop alarm into event system. Support alarm tags. Support WeLink as a channel of alarm notification. Fix: Some defensive codes didn\u0026rsquo;t work in PercentileFunction combine. CVE: fix Jetty vulnerability. https://nvd.nist.gov/vuln/detail/CVE-2019-17638 Fix: MAL function would miss samples name after creating new samples. perf: use iterator.remove() to remove modulesWithoutProvider Support analyzing Envoy TCP access logs and persist error TCP logs. Fix: Envoy error logs are not persisted when no metrics are generated Fix: Memory leakage of low version etcd client. fix-issue Allow multiple definitions as fallback in metadata-service-mapping.yaml file and k8sServiceNameRule. Fix: NPE when configmap has no data. Fix: Dynamic Configuration key slowTraceSegmentThreshold not work Fix: != is not supported in oal when parameters are numbers. Include events of the entity(s) in the alarm. Support native-json format log in kafka-fetcher-plugin. Fix counter misuse in the alarm core. Alarm can\u0026rsquo;t be triggered in time. Events can be configured as alarm source. Make the number of core worker in meter converter thread pool configurable. Add HTTP implementation of logs reporting protocol. Make metrics exporter still work even when storage layer failed. Fix Jetty HTTP TRACE issue, disable HTTP methods except POST. CVE: upgrade snakeyaml to prevent billion laughs attack in dynamic configuration. polish debug logging avoids null value when the segment ignored.  UI  Add logo for kong plugin. Add apisix logo. Refactor js to ts for browser logs and style change. When creating service groups in the topology, it is better if the service names are sorted. Add tooltip for dashboard component. Fix style of endpoint dependency. Support search and visualize alarms with tags. Fix configurations on dashboard. Support to configure the maximum number of displayed items. After changing the durationTime, the topology shows the originally selected group or service. remove the no use maxItemNum for labeled-value metric, etc. Add Azure Functions logo. Support search Endpoint use keyword params in trace view. Add a function which show the statistics information during the trace query. Remove the sort button at the column of Type in the trace statistics page. Optimize the APISIX icon in the topology. Implement metrics templates in the topology. Visualize Events on the alarm page. Update duration steps in graphs for Trace and Log.  Documentation  Polish k8s monitoring otel-collector configuration example. Print SkyWalking context to logs configuration example. Update doc about metrics v2 APIs.  All issues and pull requests are here\n Find change logs of all versions here.\n","excerpt":"8.6.0 Project  Add OpenSearch as storage option. Upgrade Kubernetes Java client dependency to 11.0. …","ref":"/docs/main/v9.2.0/en/changes/changes-8.6.0/","title":"8.6.0"},{"body":"8.6.0 Project  Add OpenSearch as storage option. Upgrade Kubernetes Java client dependency to 11.0. Fix plugin test script error in macOS.  Java Agent  Add trace_segment_ref_limit_per_span configuration mechanism to avoid OOM. Improve GlobalIdGenerator performance. Add an agent plugin to support elasticsearch7. Add jsonrpc4j agent plugin. new options to support multi skywalking cluster use same kafka cluster(plugin.kafka.namespace) resolve agent has no retries if connect kafka cluster failed when bootstrap Add Seata in the component definition. Seata plugin hosts on Seata project. Extended Kafka plugin to properly trace consumers that have topic partitions directly assigned. Support Kafka consumer 2.8.0. Support print SkyWalking context to logs. Add MessageListener enhancement in pulsar plugin. fix a bug that spring-mvc set an error endpoint name if the controller class annotation implements an interface. Add an optional agent plugin to support mybatis. Add spring-cloud-gateway-3.x optional plugin. Add okhttp-4.x plugin. Fix NPE when thrift field is nested in plugin thrift Fix possible NullPointerException in agent\u0026rsquo;s ES plugin. Fix the conversion problem of float type in ConfigInitializer. Fixed part of the dynamic configuration of ConfigurationDiscoveryService that does not take effect under certain circumstances. Introduce method interceptor API v2 Fix ClassCast issue for RequestHolder/ResponseHolder. fixed jdk-threading-plugin memory leak. Optimize multiple field reflection operation in Feign plugin. Fix trace-ignore-plugin TraceIgnorePathPatterns can\u0026rsquo;t set empty value  OAP-Backend  BugFix: filter invalid Envoy access logs whose socket address is empty. Fix K8s monitoring the incorrect metrics calculate. Loop alarm into event system. Support alarm tags. Support WeLink as a channel of alarm notification. Fix: Some defensive codes didn\u0026rsquo;t work in PercentileFunction combine. CVE: fix Jetty vulnerability. https://nvd.nist.gov/vuln/detail/CVE-2019-17638 Fix: MAL function would miss samples name after creating new samples. perf: use iterator.remove() to remove modulesWithoutProvider Support analyzing Envoy TCP access logs and persist error TCP logs. Fix: Envoy error logs are not persisted when no metrics are generated Fix: Memory leakage of low version etcd client. fix-issue Allow multiple definitions as fallback in metadata-service-mapping.yaml file and k8sServiceNameRule. Fix: NPE when configmap has no data. Fix: Dynamic Configuration key slowTraceSegmentThreshold not work Fix: != is not supported in oal when parameters are numbers. Include events of the entity(s) in the alarm. Support native-json format log in kafka-fetcher-plugin. Fix counter misuse in the alarm core. Alarm can\u0026rsquo;t be triggered in time. Events can be configured as alarm source. Make the number of core worker in meter converter thread pool configurable. Add HTTP implementation of logs reporting protocol. Make metrics exporter still work even when storage layer failed. Fix Jetty HTTP TRACE issue, disable HTTP methods except POST. CVE: upgrade snakeyaml to prevent billion laughs attack in dynamic configuration. polish debug logging avoids null value when the segment ignored.  UI  Add logo for kong plugin. Add apisix logo. Refactor js to ts for browser logs and style change. When creating service groups in the topology, it is better if the service names are sorted. Add tooltip for dashboard component. Fix style of endpoint dependency. Support search and visualize alarms with tags. Fix configurations on dashboard. Support to configure the maximum number of displayed items. After changing the durationTime, the topology shows the originally selected group or service. remove the no use maxItemNum for labeled-value metric, etc. Add Azure Functions logo. Support search Endpoint use keyword params in trace view. Add a function which show the statistics information during the trace query. Remove the sort button at the column of Type in the trace statistics page. Optimize the APISIX icon in the topology. Implement metrics templates in the topology. Visualize Events on the alarm page. Update duration steps in graphs for Trace and Log.  Documentation  Polish k8s monitoring otel-collector configuration example. Print SkyWalking context to logs configuration example. Update doc about metrics v2 APIs.  All issues and pull requests are here\n Find change logs of all versions here.\n","excerpt":"8.6.0 Project  Add OpenSearch as storage option. Upgrade Kubernetes Java client dependency to 11.0. …","ref":"/docs/main/v9.3.0/en/changes/changes-8.6.0/","title":"8.6.0"},{"body":"8.6.0 Project  Add OpenSearch as storage option. Upgrade Kubernetes Java client dependency to 11.0. Fix plugin test script error in macOS.  Java Agent  Add trace_segment_ref_limit_per_span configuration mechanism to avoid OOM. Improve GlobalIdGenerator performance. Add an agent plugin to support elasticsearch7. Add jsonrpc4j agent plugin. new options to support multi skywalking cluster use same kafka cluster(plugin.kafka.namespace) resolve agent has no retries if connect kafka cluster failed when bootstrap Add Seata in the component definition. Seata plugin hosts on Seata project. Extended Kafka plugin to properly trace consumers that have topic partitions directly assigned. Support Kafka consumer 2.8.0. Support print SkyWalking context to logs. Add MessageListener enhancement in pulsar plugin. fix a bug that spring-mvc set an error endpoint name if the controller class annotation implements an interface. Add an optional agent plugin to support mybatis. Add spring-cloud-gateway-3.x optional plugin. Add okhttp-4.x plugin. Fix NPE when thrift field is nested in plugin thrift Fix possible NullPointerException in agent\u0026rsquo;s ES plugin. Fix the conversion problem of float type in ConfigInitializer. Fixed part of the dynamic configuration of ConfigurationDiscoveryService that does not take effect under certain circumstances. Introduce method interceptor API v2 Fix ClassCast issue for RequestHolder/ResponseHolder. fixed jdk-threading-plugin memory leak. Optimize multiple field reflection operation in Feign plugin. Fix trace-ignore-plugin TraceIgnorePathPatterns can\u0026rsquo;t set empty value  OAP-Backend  BugFix: filter invalid Envoy access logs whose socket address is empty. Fix K8s monitoring the incorrect metrics calculate. Loop alarm into event system. Support alarm tags. Support WeLink as a channel of alarm notification. Fix: Some defensive codes didn\u0026rsquo;t work in PercentileFunction combine. CVE: fix Jetty vulnerability. https://nvd.nist.gov/vuln/detail/CVE-2019-17638 Fix: MAL function would miss samples name after creating new samples. perf: use iterator.remove() to remove modulesWithoutProvider Support analyzing Envoy TCP access logs and persist error TCP logs. Fix: Envoy error logs are not persisted when no metrics are generated Fix: Memory leakage of low version etcd client. fix-issue Allow multiple definitions as fallback in metadata-service-mapping.yaml file and k8sServiceNameRule. Fix: NPE when configmap has no data. Fix: Dynamic Configuration key slowTraceSegmentThreshold not work Fix: != is not supported in oal when parameters are numbers. Include events of the entity(s) in the alarm. Support native-json format log in kafka-fetcher-plugin. Fix counter misuse in the alarm core. Alarm can\u0026rsquo;t be triggered in time. Events can be configured as alarm source. Make the number of core worker in meter converter thread pool configurable. Add HTTP implementation of logs reporting protocol. Make metrics exporter still work even when storage layer failed. Fix Jetty HTTP TRACE issue, disable HTTP methods except POST. CVE: upgrade snakeyaml to prevent billion laughs attack in dynamic configuration. polish debug logging avoids null value when the segment ignored.  UI  Add logo for kong plugin. Add apisix logo. Refactor js to ts for browser logs and style change. When creating service groups in the topology, it is better if the service names are sorted. Add tooltip for dashboard component. Fix style of endpoint dependency. Support search and visualize alarms with tags. Fix configurations on dashboard. Support to configure the maximum number of displayed items. After changing the durationTime, the topology shows the originally selected group or service. remove the no use maxItemNum for labeled-value metric, etc. Add Azure Functions logo. Support search Endpoint use keyword params in trace view. Add a function which show the statistics information during the trace query. Remove the sort button at the column of Type in the trace statistics page. Optimize the APISIX icon in the topology. Implement metrics templates in the topology. Visualize Events on the alarm page. Update duration steps in graphs for Trace and Log.  Documentation  Polish k8s monitoring otel-collector configuration example. Print SkyWalking context to logs configuration example. Update doc about metrics v2 APIs.  All issues and pull requests are here\n Find change logs of all versions here.\n","excerpt":"8.6.0 Project  Add OpenSearch as storage option. Upgrade Kubernetes Java client dependency to 11.0. …","ref":"/docs/main/v9.4.0/en/changes/changes-8.6.0/","title":"8.6.0"},{"body":"8.6.0 Project  Add OpenSearch as storage option. Upgrade Kubernetes Java client dependency to 11.0. Fix plugin test script error in macOS.  Java Agent  Add trace_segment_ref_limit_per_span configuration mechanism to avoid OOM. Improve GlobalIdGenerator performance. Add an agent plugin to support elasticsearch7. Add jsonrpc4j agent plugin. new options to support multi skywalking cluster use same kafka cluster(plugin.kafka.namespace) resolve agent has no retries if connect kafka cluster failed when bootstrap Add Seata in the component definition. Seata plugin hosts on Seata project. Extended Kafka plugin to properly trace consumers that have topic partitions directly assigned. Support Kafka consumer 2.8.0. Support print SkyWalking context to logs. Add MessageListener enhancement in pulsar plugin. fix a bug that spring-mvc set an error endpoint name if the controller class annotation implements an interface. Add an optional agent plugin to support mybatis. Add spring-cloud-gateway-3.x optional plugin. Add okhttp-4.x plugin. Fix NPE when thrift field is nested in plugin thrift Fix possible NullPointerException in agent\u0026rsquo;s ES plugin. Fix the conversion problem of float type in ConfigInitializer. Fixed part of the dynamic configuration of ConfigurationDiscoveryService that does not take effect under certain circumstances. Introduce method interceptor API v2 Fix ClassCast issue for RequestHolder/ResponseHolder. fixed jdk-threading-plugin memory leak. Optimize multiple field reflection operation in Feign plugin. Fix trace-ignore-plugin TraceIgnorePathPatterns can\u0026rsquo;t set empty value  OAP-Backend  BugFix: filter invalid Envoy access logs whose socket address is empty. Fix K8s monitoring the incorrect metrics calculate. Loop alarm into event system. Support alarm tags. Support WeLink as a channel of alarm notification. Fix: Some defensive codes didn\u0026rsquo;t work in PercentileFunction combine. CVE: fix Jetty vulnerability. https://nvd.nist.gov/vuln/detail/CVE-2019-17638 Fix: MAL function would miss samples name after creating new samples. perf: use iterator.remove() to remove modulesWithoutProvider Support analyzing Envoy TCP access logs and persist error TCP logs. Fix: Envoy error logs are not persisted when no metrics are generated Fix: Memory leakage of low version etcd client. fix-issue Allow multiple definitions as fallback in metadata-service-mapping.yaml file and k8sServiceNameRule. Fix: NPE when configmap has no data. Fix: Dynamic Configuration key slowTraceSegmentThreshold not work Fix: != is not supported in oal when parameters are numbers. Include events of the entity(s) in the alarm. Support native-json format log in kafka-fetcher-plugin. Fix counter misuse in the alarm core. Alarm can\u0026rsquo;t be triggered in time. Events can be configured as alarm source. Make the number of core worker in meter converter thread pool configurable. Add HTTP implementation of logs reporting protocol. Make metrics exporter still work even when storage layer failed. Fix Jetty HTTP TRACE issue, disable HTTP methods except POST. CVE: upgrade snakeyaml to prevent billion laughs attack in dynamic configuration. polish debug logging avoids null value when the segment ignored.  UI  Add logo for kong plugin. Add apisix logo. Refactor js to ts for browser logs and style change. When creating service groups in the topology, it is better if the service names are sorted. Add tooltip for dashboard component. Fix style of endpoint dependency. Support search and visualize alarms with tags. Fix configurations on dashboard. Support to configure the maximum number of displayed items. After changing the durationTime, the topology shows the originally selected group or service. remove the no use maxItemNum for labeled-value metric, etc. Add Azure Functions logo. Support search Endpoint use keyword params in trace view. Add a function which show the statistics information during the trace query. Remove the sort button at the column of Type in the trace statistics page. Optimize the APISIX icon in the topology. Implement metrics templates in the topology. Visualize Events on the alarm page. Update duration steps in graphs for Trace and Log.  Documentation  Polish k8s monitoring otel-collector configuration example. Print SkyWalking context to logs configuration example. Update doc about metrics v2 APIs.  All issues and pull requests are here\n Find change logs of all versions here.\n","excerpt":"8.6.0 Project  Add OpenSearch as storage option. Upgrade Kubernetes Java client dependency to 11.0. …","ref":"/docs/main/v9.5.0/en/changes/changes-8.6.0/","title":"8.6.0"},{"body":"8.6.0 Project  Add OpenSearch as storage option. Upgrade Kubernetes Java client dependency to 11.0. Fix plugin test script error in macOS.  Java Agent  Add trace_segment_ref_limit_per_span configuration mechanism to avoid OOM. Improve GlobalIdGenerator performance. Add an agent plugin to support elasticsearch7. Add jsonrpc4j agent plugin. new options to support multi skywalking cluster use same kafka cluster(plugin.kafka.namespace) resolve agent has no retries if connect kafka cluster failed when bootstrap Add Seata in the component definition. Seata plugin hosts on Seata project. Extended Kafka plugin to properly trace consumers that have topic partitions directly assigned. Support Kafka consumer 2.8.0. Support print SkyWalking context to logs. Add MessageListener enhancement in pulsar plugin. fix a bug that spring-mvc set an error endpoint name if the controller class annotation implements an interface. Add an optional agent plugin to support mybatis. Add spring-cloud-gateway-3.x optional plugin. Add okhttp-4.x plugin. Fix NPE when thrift field is nested in plugin thrift Fix possible NullPointerException in agent\u0026rsquo;s ES plugin. Fix the conversion problem of float type in ConfigInitializer. Fixed part of the dynamic configuration of ConfigurationDiscoveryService that does not take effect under certain circumstances. Introduce method interceptor API v2 Fix ClassCast issue for RequestHolder/ResponseHolder. fixed jdk-threading-plugin memory leak. Optimize multiple field reflection operation in Feign plugin. Fix trace-ignore-plugin TraceIgnorePathPatterns can\u0026rsquo;t set empty value  OAP-Backend  BugFix: filter invalid Envoy access logs whose socket address is empty. Fix K8s monitoring the incorrect metrics calculate. Loop alarm into event system. Support alarm tags. Support WeLink as a channel of alarm notification. Fix: Some defensive codes didn\u0026rsquo;t work in PercentileFunction combine. CVE: fix Jetty vulnerability. https://nvd.nist.gov/vuln/detail/CVE-2019-17638 Fix: MAL function would miss samples name after creating new samples. perf: use iterator.remove() to remove modulesWithoutProvider Support analyzing Envoy TCP access logs and persist error TCP logs. Fix: Envoy error logs are not persisted when no metrics are generated Fix: Memory leakage of low version etcd client. fix-issue Allow multiple definitions as fallback in metadata-service-mapping.yaml file and k8sServiceNameRule. Fix: NPE when configmap has no data. Fix: Dynamic Configuration key slowTraceSegmentThreshold not work Fix: != is not supported in oal when parameters are numbers. Include events of the entity(s) in the alarm. Support native-json format log in kafka-fetcher-plugin. Fix counter misuse in the alarm core. Alarm can\u0026rsquo;t be triggered in time. Events can be configured as alarm source. Make the number of core worker in meter converter thread pool configurable. Add HTTP implementation of logs reporting protocol. Make metrics exporter still work even when storage layer failed. Fix Jetty HTTP TRACE issue, disable HTTP methods except POST. CVE: upgrade snakeyaml to prevent billion laughs attack in dynamic configuration. polish debug logging avoids null value when the segment ignored.  UI  Add logo for kong plugin. Add apisix logo. Refactor js to ts for browser logs and style change. When creating service groups in the topology, it is better if the service names are sorted. Add tooltip for dashboard component. Fix style of endpoint dependency. Support search and visualize alarms with tags. Fix configurations on dashboard. Support to configure the maximum number of displayed items. After changing the durationTime, the topology shows the originally selected group or service. remove the no use maxItemNum for labeled-value metric, etc. Add Azure Functions logo. Support search Endpoint use keyword params in trace view. Add a function which show the statistics information during the trace query. Remove the sort button at the column of Type in the trace statistics page. Optimize the APISIX icon in the topology. Implement metrics templates in the topology. Visualize Events on the alarm page. Update duration steps in graphs for Trace and Log.  Documentation  Polish k8s monitoring otel-collector configuration example. Print SkyWalking context to logs configuration example. Update doc about metrics v2 APIs.  All issues and pull requests are here\n Find change logs of all versions here.\n","excerpt":"8.6.0 Project  Add OpenSearch as storage option. Upgrade Kubernetes Java client dependency to 11.0. …","ref":"/docs/main/v9.6.0/en/changes/changes-8.6.0/","title":"8.6.0"},{"body":"8.6.0 Project  Add OpenSearch as storage option. Upgrade Kubernetes Java client dependency to 11.0. Fix plugin test script error in macOS.  Java Agent  Add trace_segment_ref_limit_per_span configuration mechanism to avoid OOM. Improve GlobalIdGenerator performance. Add an agent plugin to support elasticsearch7. Add jsonrpc4j agent plugin. new options to support multi skywalking cluster use same kafka cluster(plugin.kafka.namespace) resolve agent has no retries if connect kafka cluster failed when bootstrap Add Seata in the component definition. Seata plugin hosts on Seata project. Extended Kafka plugin to properly trace consumers that have topic partitions directly assigned. Support Kafka consumer 2.8.0. Support print SkyWalking context to logs. Add MessageListener enhancement in pulsar plugin. fix a bug that spring-mvc set an error endpoint name if the controller class annotation implements an interface. Add an optional agent plugin to support mybatis. Add spring-cloud-gateway-3.x optional plugin. Add okhttp-4.x plugin. Fix NPE when thrift field is nested in plugin thrift Fix possible NullPointerException in agent\u0026rsquo;s ES plugin. Fix the conversion problem of float type in ConfigInitializer. Fixed part of the dynamic configuration of ConfigurationDiscoveryService that does not take effect under certain circumstances. Introduce method interceptor API v2 Fix ClassCast issue for RequestHolder/ResponseHolder. fixed jdk-threading-plugin memory leak. Optimize multiple field reflection operation in Feign plugin. Fix trace-ignore-plugin TraceIgnorePathPatterns can\u0026rsquo;t set empty value  OAP-Backend  BugFix: filter invalid Envoy access logs whose socket address is empty. Fix K8s monitoring the incorrect metrics calculate. Loop alarm into event system. Support alarm tags. Support WeLink as a channel of alarm notification. Fix: Some defensive codes didn\u0026rsquo;t work in PercentileFunction combine. CVE: fix Jetty vulnerability. https://nvd.nist.gov/vuln/detail/CVE-2019-17638 Fix: MAL function would miss samples name after creating new samples. perf: use iterator.remove() to remove modulesWithoutProvider Support analyzing Envoy TCP access logs and persist error TCP logs. Fix: Envoy error logs are not persisted when no metrics are generated Fix: Memory leakage of low version etcd client. fix-issue Allow multiple definitions as fallback in metadata-service-mapping.yaml file and k8sServiceNameRule. Fix: NPE when configmap has no data. Fix: Dynamic Configuration key slowTraceSegmentThreshold not work Fix: != is not supported in oal when parameters are numbers. Include events of the entity(s) in the alarm. Support native-json format log in kafka-fetcher-plugin. Fix counter misuse in the alarm core. Alarm can\u0026rsquo;t be triggered in time. Events can be configured as alarm source. Make the number of core worker in meter converter thread pool configurable. Add HTTP implementation of logs reporting protocol. Make metrics exporter still work even when storage layer failed. Fix Jetty HTTP TRACE issue, disable HTTP methods except POST. CVE: upgrade snakeyaml to prevent billion laughs attack in dynamic configuration. polish debug logging avoids null value when the segment ignored.  UI  Add logo for kong plugin. Add apisix logo. Refactor js to ts for browser logs and style change. When creating service groups in the topology, it is better if the service names are sorted. Add tooltip for dashboard component. Fix style of endpoint dependency. Support search and visualize alarms with tags. Fix configurations on dashboard. Support to configure the maximum number of displayed items. After changing the durationTime, the topology shows the originally selected group or service. remove the no use maxItemNum for labeled-value metric, etc. Add Azure Functions logo. Support search Endpoint use keyword params in trace view. Add a function which show the statistics information during the trace query. Remove the sort button at the column of Type in the trace statistics page. Optimize the APISIX icon in the topology. Implement metrics templates in the topology. Visualize Events on the alarm page. Update duration steps in graphs for Trace and Log.  Documentation  Polish k8s monitoring otel-collector configuration example. Print SkyWalking context to logs configuration example. Update doc about metrics v2 APIs.  All issues and pull requests are here\n Find change logs of all versions here.\n","excerpt":"8.6.0 Project  Add OpenSearch as storage option. Upgrade Kubernetes Java client dependency to 11.0. …","ref":"/docs/main/v9.7.0/en/changes/changes-8.6.0/","title":"8.6.0"},{"body":"8.7.0 Project  Extract dependency management to a bom. Add JDK 16 to test matrix. DataCarrier consumer add a new event notification, call nothingToConsume method if the queue has no element to consume. Build and push snapshot Docker images to GitHub Container Registry, this is only for people who want to help to test the master branch codes, please don\u0026rsquo;t use in production environments.  Java Agent  Supports modifying span attributes in async mode. Agent supports the collection of JVM arguments and jar dependency information. [Temporary] Support authentication for log report channel. This feature and grpc channel is going to be removed after Satellite 0.2.0 release. Remove deprecated gRPC method, io.grpc.ManagedChannelBuilder#nameResolverFactory. See gRPC-java 7133 for more details. Add Neo4j-4.x plugin. Correct profile.duration to profile.max_duration in the default agent.config file. Fix the response time of gRPC. Support parameter collection for SqlServer. Add ShardingSphere-5.0.0-beta plugin. Fix some method exception error. Fix async finish repeatedly in spring-webflux-5.x-webclient plugin. Add agent plugin to support Sentinel. Move ehcache-2.x plugin as an optional plugin. Support guava-cache plugin. Enhance the compatibility of mysql-8.x-plugin plugin. Support Kafka SASL login module. Fix gateway plugin async finish repeatedly when fallback url configured. Chore: polish methods naming for Spring-Kafka plugins. Remove plugins for ShardingSphere legacy version. Update agent plugin for ElasticJob GA version Remove the logic of generating instance name in KafkaServiceManagementServiceClient class. Improve okhttp plugin performance by optimizing Class.getDeclaredField(). Fix GRPCLogClientAppender no context warning. Fix spring-webflux-5.x-webclient-plugin NPE.  OAP-Backend  Disable Spring sleuth meter analyzer by default. Only count 5xx as error in Envoy ALS receiver. Upgrade apollo core caused by CVE-2020-15170. Upgrade kubernetes client caused by CVE-2020-28052. Upgrade Elasticsearch 7 client caused by CVE-2020-7014. Upgrade jackson related libs caused by CVE-2018-11307, CVE-2018-14718 ~ CVE-2018-14721, CVE-2018-19360 ~ CVE-2018-19362, CVE-2019-14379, CVE-2019-14540, CVE-2019-14892, CVE-2019-14893, CVE-2019-16335, CVE-2019-16942, CVE-2019-16943, CVE-2019-17267, CVE-2019-17531, CVE-2019-20330, CVE-2020-8840, CVE-2020-9546, CVE-2020-9547, CVE-2020-9548, CVE-2018-12022, CVE-2018-12023, CVE-2019-12086, CVE-2019-14439, CVE-2020-10672, CVE-2020-10673, CVE-2020-10968, CVE-2020-10969, CVE-2020-11111, CVE-2020-11112, CVE-2020-11113, CVE-2020-11619, CVE-2020-11620, CVE-2020-14060, CVE-2020-14061, CVE-2020-14062, CVE-2020-14195, CVE-2020-24616, CVE-2020-24750, CVE-2020-25649, CVE-2020-35490, CVE-2020-35491, CVE-2020-35728 and CVE-2020-36179 ~ CVE-2020-36190. Exclude log4j 1.x caused by CVE-2019-17571. Upgrade log4j 2.x caused by CVE-2020-9488. Upgrade nacos libs caused by CVE-2021-29441 and CVE-2021-29442. Upgrade netty caused by CVE-2019-20444, CVE-2019-20445, CVE-2019-16869, CVE-2020-11612, CVE-2021-21290, CVE-2021-21295 and CVE-2021-21409. Upgrade consul client caused by CVE-2018-1000844, CVE-2018-1000850. Upgrade zookeeper caused by CVE-2019-0201, zookeeper cluster coordinator plugin now requires zookeeper server 3.5+. Upgrade snake yaml caused by CVE-2017-18640. Upgrade embed tomcat caused by CVE-2020-13935. Upgrade commons-lang3 to avoid potential NPE in some JDK versions. OAL supports generating metrics from events. Support endpoint name grouping by OpenAPI definitions. Concurrent create PrepareRequest when persist Metrics Fix CounterWindow increase computing issue. Performance: optimize Envoy ALS analyzer performance in high traffic load scenario (reduce ~1cpu in ~10k RPS). Performance: trim useless metadata fields in Envoy ALS metadata to improve performance. Fix: slowDBAccessThreshold dynamic config error when not configured. Performance: cache regex pattern and result, optimize string concatenation in Envy ALS analyzer. Performance: cache metrics id and entity id in Metrics and ISource. Performance: enhance persistent session mechanism, about differentiating cache timeout for different dimensionality metrics. The timeout of the cache for minute and hour level metrics has been prolonged to ~5 min. Performance: Add L1 aggregation flush period, which reduce the CPU load and help young GC. Support connectTimeout and socketTimeout settings for ElasticSearch6 and ElasticSearch7 storages. Re-implement storage session mechanism, cached metrics are removed only according to their last access timestamp, rather than first time. This makes sure hot data never gets removed unexpectedly. Support session expired threshold configurable. Fix InfluxDB storage-plugin Metrics#multiGet issue. Replace zuul proxy with spring cloud gateway 2.x. in webapp module. Upgrade etcd cluster coordinator and dynamic configuration to v3.x. Configuration: Allow configuring server maximum request header size and ES index template order. Add thread state metric and class loaded info metric to JVMMetric. Performance: compile LAL DSL statically and run with type checked. Add pagination to event query protocol. Performance: optimize Envoy error logs persistence performance. Support envoy cluster manager metrics. Performance: remove the synchronous persistence mechanism from batch ElasticSearch DAO. Because the current enhanced persistent session mechanism, don\u0026rsquo;t require the data queryable immediately after the insert and update anymore. Performance: share flushInterval setting for both metrics and record data, due to synchronous persistence mechanism removed. Record flush interval used to be hardcoded as 10s. Remove syncBulkActions in ElasticSearch storage option. Increase the default bulkActions(env, SW_STORAGE_ES_BULK_ACTIONS) to 5000(from 1000). Increase the flush interval of ElasticSearch indices to 15s(from 10s) Provide distinct for elements of metadata lists. Due to the more aggressive asynchronous flush, metadata lists have more chances including duplicate elements. Don\u0026rsquo;t need this as indicate anymore. Reduce the flush period of hour and day level metrics, only run in 4 times of regular persistent period. This means default flush period of hour and day level metrics are 25s * 4. Performance: optimize IDs read of ElasticSearch storage options(6 and 7). Use the physical index rather than template alias name. Adjust index refresh period as INT(flushInterval * 2/3), it used to be as same as bulk flush period. At the edge case, in low traffic(traffic \u0026lt; bulkActions in the whole period), there is a possible case, 2 period bulks are included in one index refresh rebuild operation, which could cause version conflicts. And this case can\u0026rsquo;t be fixed through core/persistentPeriod as the bulk fresh is not controlled by the persistent timer anymore. The core/maxSyncOperationNum setting(added in 8.5.0) is removed due to metrics persistence is fully asynchronous. The core/syncThreads setting(added in 8.5.0) is removed due to metrics persistence is fully asynchronous. Optimization: Concurrency mode of execution stage for metrics is removed(added in 8.5.0). Only concurrency of prepare stage is meaningful and kept. Fix -meters metrics topic isn\u0026rsquo;t created with namespace issue Enhance persistent session timeout mechanism. Because the enhanced session could cache the metadata metrics forever, new timeout mechanism is designed for avoiding this specific case. Fix Kafka transport topics are created duplicated with and without namespace issue Fix the persistent session timeout mechanism bug. Fix possible version_conflict_engine_exception in bulk execution. Fix PrometheusMetricConverter may throw an IllegalArgumentException when convert metrics to SampleFamily Filtering NaN value samples when build SampleFamily Add Thread and ClassLoader Metrics for the self-observability and otel-oc-rules Simple optimization of trace sql query statement. Avoid \u0026ldquo;select *\u0026rdquo; query method Introduce dynamical logging to update log configuration at runtime Fix Kubernetes ConfigMap configuration center doesn\u0026rsquo;t send delete event Breaking Change: emove qps and add rpm in LAL  UI  Fix the date component for log conditions. Fix selector keys for duplicate options. Add Python celery plugin. Fix default config for metrics. Fix trace table for profile ui. Fix the error of server response time in the topology. Fix chart types for setting metrics configure. Fix logs pages number. Implement a timeline for Events in a new page. Fix style for event details.  Documentation  Add FAQ about Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Add Self Observability service discovery (k8s). Add sending Envoy Metrics to OAP in envoy 1.19 example and bump up to Envoy V3 api.  All issues and pull requests are here\n","excerpt":"8.7.0 Project  Extract dependency management to a bom. Add JDK 16 to test matrix. DataCarrier …","ref":"/docs/main/latest/en/changes/changes-8.7.0/","title":"8.7.0"},{"body":"8.7.0 Project  Extract dependency management to a bom. Add JDK 16 to test matrix. DataCarrier consumer add a new event notification, call nothingToConsume method if the queue has no element to consume. Build and push snapshot Docker images to GitHub Container Registry, this is only for people who want to help to test the master branch codes, please don\u0026rsquo;t use in production environments.  Java Agent  Supports modifying span attributes in async mode. Agent supports the collection of JVM arguments and jar dependency information. [Temporary] Support authentication for log report channel. This feature and grpc channel is going to be removed after Satellite 0.2.0 release. Remove deprecated gRPC method, io.grpc.ManagedChannelBuilder#nameResolverFactory. See gRPC-java 7133 for more details. Add Neo4j-4.x plugin. Correct profile.duration to profile.max_duration in the default agent.config file. Fix the response time of gRPC. Support parameter collection for SqlServer. Add ShardingSphere-5.0.0-beta plugin. Fix some method exception error. Fix async finish repeatedly in spring-webflux-5.x-webclient plugin. Add agent plugin to support Sentinel. Move ehcache-2.x plugin as an optional plugin. Support guava-cache plugin. Enhance the compatibility of mysql-8.x-plugin plugin. Support Kafka SASL login module. Fix gateway plugin async finish repeatedly when fallback url configured. Chore: polish methods naming for Spring-Kafka plugins. Remove plugins for ShardingSphere legacy version. Update agent plugin for ElasticJob GA version Remove the logic of generating instance name in KafkaServiceManagementServiceClient class. Improve okhttp plugin performance by optimizing Class.getDeclaredField(). Fix GRPCLogClientAppender no context warning. Fix spring-webflux-5.x-webclient-plugin NPE.  OAP-Backend  Disable Spring sleuth meter analyzer by default. Only count 5xx as error in Envoy ALS receiver. Upgrade apollo core caused by CVE-2020-15170. Upgrade kubernetes client caused by CVE-2020-28052. Upgrade Elasticsearch 7 client caused by CVE-2020-7014. Upgrade jackson related libs caused by CVE-2018-11307, CVE-2018-14718 ~ CVE-2018-14721, CVE-2018-19360 ~ CVE-2018-19362, CVE-2019-14379, CVE-2019-14540, CVE-2019-14892, CVE-2019-14893, CVE-2019-16335, CVE-2019-16942, CVE-2019-16943, CVE-2019-17267, CVE-2019-17531, CVE-2019-20330, CVE-2020-8840, CVE-2020-9546, CVE-2020-9547, CVE-2020-9548, CVE-2018-12022, CVE-2018-12023, CVE-2019-12086, CVE-2019-14439, CVE-2020-10672, CVE-2020-10673, CVE-2020-10968, CVE-2020-10969, CVE-2020-11111, CVE-2020-11112, CVE-2020-11113, CVE-2020-11619, CVE-2020-11620, CVE-2020-14060, CVE-2020-14061, CVE-2020-14062, CVE-2020-14195, CVE-2020-24616, CVE-2020-24750, CVE-2020-25649, CVE-2020-35490, CVE-2020-35491, CVE-2020-35728 and CVE-2020-36179 ~ CVE-2020-36190. Exclude log4j 1.x caused by CVE-2019-17571. Upgrade log4j 2.x caused by CVE-2020-9488. Upgrade nacos libs caused by CVE-2021-29441 and CVE-2021-29442. Upgrade netty caused by CVE-2019-20444, CVE-2019-20445, CVE-2019-16869, CVE-2020-11612, CVE-2021-21290, CVE-2021-21295 and CVE-2021-21409. Upgrade consul client caused by CVE-2018-1000844, CVE-2018-1000850. Upgrade zookeeper caused by CVE-2019-0201, zookeeper cluster coordinator plugin now requires zookeeper server 3.5+. Upgrade snake yaml caused by CVE-2017-18640. Upgrade embed tomcat caused by CVE-2020-13935. Upgrade commons-lang3 to avoid potential NPE in some JDK versions. OAL supports generating metrics from events. Support endpoint name grouping by OpenAPI definitions. Concurrent create PrepareRequest when persist Metrics Fix CounterWindow increase computing issue. Performance: optimize Envoy ALS analyzer performance in high traffic load scenario (reduce ~1cpu in ~10k RPS). Performance: trim useless metadata fields in Envoy ALS metadata to improve performance. Fix: slowDBAccessThreshold dynamic config error when not configured. Performance: cache regex pattern and result, optimize string concatenation in Envy ALS analyzer. Performance: cache metrics id and entity id in Metrics and ISource. Performance: enhance persistent session mechanism, about differentiating cache timeout for different dimensionality metrics. The timeout of the cache for minute and hour level metrics has been prolonged to ~5 min. Performance: Add L1 aggregation flush period, which reduce the CPU load and help young GC. Support connectTimeout and socketTimeout settings for ElasticSearch6 and ElasticSearch7 storages. Re-implement storage session mechanism, cached metrics are removed only according to their last access timestamp, rather than first time. This makes sure hot data never gets removed unexpectedly. Support session expired threshold configurable. Fix InfluxDB storage-plugin Metrics#multiGet issue. Replace zuul proxy with spring cloud gateway 2.x. in webapp module. Upgrade etcd cluster coordinator and dynamic configuration to v3.x. Configuration: Allow configuring server maximum request header size and ES index template order. Add thread state metric and class loaded info metric to JVMMetric. Performance: compile LAL DSL statically and run with type checked. Add pagination to event query protocol. Performance: optimize Envoy error logs persistence performance. Support envoy cluster manager metrics. Performance: remove the synchronous persistence mechanism from batch ElasticSearch DAO. Because the current enhanced persistent session mechanism, don\u0026rsquo;t require the data queryable immediately after the insert and update anymore. Performance: share flushInterval setting for both metrics and record data, due to synchronous persistence mechanism removed. Record flush interval used to be hardcoded as 10s. Remove syncBulkActions in ElasticSearch storage option. Increase the default bulkActions(env, SW_STORAGE_ES_BULK_ACTIONS) to 5000(from 1000). Increase the flush interval of ElasticSearch indices to 15s(from 10s) Provide distinct for elements of metadata lists. Due to the more aggressive asynchronous flush, metadata lists have more chances including duplicate elements. Don\u0026rsquo;t need this as indicate anymore. Reduce the flush period of hour and day level metrics, only run in 4 times of regular persistent period. This means default flush period of hour and day level metrics are 25s * 4. Performance: optimize IDs read of ElasticSearch storage options(6 and 7). Use the physical index rather than template alias name. Adjust index refresh period as INT(flushInterval * 2/3), it used to be as same as bulk flush period. At the edge case, in low traffic(traffic \u0026lt; bulkActions in the whole period), there is a possible case, 2 period bulks are included in one index refresh rebuild operation, which could cause version conflicts. And this case can\u0026rsquo;t be fixed through core/persistentPeriod as the bulk fresh is not controlled by the persistent timer anymore. The core/maxSyncOperationNum setting(added in 8.5.0) is removed due to metrics persistence is fully asynchronous. The core/syncThreads setting(added in 8.5.0) is removed due to metrics persistence is fully asynchronous. Optimization: Concurrency mode of execution stage for metrics is removed(added in 8.5.0). Only concurrency of prepare stage is meaningful and kept. Fix -meters metrics topic isn\u0026rsquo;t created with namespace issue Enhance persistent session timeout mechanism. Because the enhanced session could cache the metadata metrics forever, new timeout mechanism is designed for avoiding this specific case. Fix Kafka transport topics are created duplicated with and without namespace issue Fix the persistent session timeout mechanism bug. Fix possible version_conflict_engine_exception in bulk execution. Fix PrometheusMetricConverter may throw an IllegalArgumentException when convert metrics to SampleFamily Filtering NaN value samples when build SampleFamily Add Thread and ClassLoader Metrics for the self-observability and otel-oc-rules Simple optimization of trace sql query statement. Avoid \u0026ldquo;select *\u0026rdquo; query method Introduce dynamical logging to update log configuration at runtime Fix Kubernetes ConfigMap configuration center doesn\u0026rsquo;t send delete event Breaking Change: emove qps and add rpm in LAL  UI  Fix the date component for log conditions. Fix selector keys for duplicate options. Add Python celery plugin. Fix default config for metrics. Fix trace table for profile ui. Fix the error of server response time in the topology. Fix chart types for setting metrics configure. Fix logs pages number. Implement a timeline for Events in a new page. Fix style for event details.  Documentation  Add FAQ about Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Add Self Observability service discovery (k8s). Add sending Envoy Metrics to OAP in envoy 1.19 example and bump up to Envoy V3 api.  All issues and pull requests are here\n","excerpt":"8.7.0 Project  Extract dependency management to a bom. Add JDK 16 to test matrix. DataCarrier …","ref":"/docs/main/next/en/changes/changes-8.7.0/","title":"8.7.0"},{"body":"8.7.0 Project  Extract dependency management to a bom. Add JDK 16 to test matrix. DataCarrier consumer add a new event notification, call nothingToConsume method if the queue has no element to consume. Build and push snapshot Docker images to GitHub Container Registry, this is only for people who want to help to test the master branch codes, please don\u0026rsquo;t use in production environments.  Java Agent  Supports modifying span attributes in async mode. Agent supports the collection of JVM arguments and jar dependency information. [Temporary] Support authentication for log report channel. This feature and grpc channel is going to be removed after Satellite 0.2.0 release. Remove deprecated gRPC method, io.grpc.ManagedChannelBuilder#nameResolverFactory. See gRPC-java 7133 for more details. Add Neo4j-4.x plugin. Correct profile.duration to profile.max_duration in the default agent.config file. Fix the response time of gRPC. Support parameter collection for SqlServer. Add ShardingSphere-5.0.0-beta plugin. Fix some method exception error. Fix async finish repeatedly in spring-webflux-5.x-webclient plugin. Add agent plugin to support Sentinel. Move ehcache-2.x plugin as an optional plugin. Support guava-cache plugin. Enhance the compatibility of mysql-8.x-plugin plugin. Support Kafka SASL login module. Fix gateway plugin async finish repeatedly when fallback url configured. Chore: polish methods naming for Spring-Kafka plugins. Remove plugins for ShardingSphere legacy version. Update agent plugin for ElasticJob GA version Remove the logic of generating instance name in KafkaServiceManagementServiceClient class. Improve okhttp plugin performance by optimizing Class.getDeclaredField(). Fix GRPCLogClientAppender no context warning. Fix spring-webflux-5.x-webclient-plugin NPE.  OAP-Backend  Disable Spring sleuth meter analyzer by default. Only count 5xx as error in Envoy ALS receiver. Upgrade apollo core caused by CVE-2020-15170. Upgrade kubernetes client caused by CVE-2020-28052. Upgrade Elasticsearch 7 client caused by CVE-2020-7014. Upgrade jackson related libs caused by CVE-2018-11307, CVE-2018-14718 ~ CVE-2018-14721, CVE-2018-19360 ~ CVE-2018-19362, CVE-2019-14379, CVE-2019-14540, CVE-2019-14892, CVE-2019-14893, CVE-2019-16335, CVE-2019-16942, CVE-2019-16943, CVE-2019-17267, CVE-2019-17531, CVE-2019-20330, CVE-2020-8840, CVE-2020-9546, CVE-2020-9547, CVE-2020-9548, CVE-2018-12022, CVE-2018-12023, CVE-2019-12086, CVE-2019-14439, CVE-2020-10672, CVE-2020-10673, CVE-2020-10968, CVE-2020-10969, CVE-2020-11111, CVE-2020-11112, CVE-2020-11113, CVE-2020-11619, CVE-2020-11620, CVE-2020-14060, CVE-2020-14061, CVE-2020-14062, CVE-2020-14195, CVE-2020-24616, CVE-2020-24750, CVE-2020-25649, CVE-2020-35490, CVE-2020-35491, CVE-2020-35728 and CVE-2020-36179 ~ CVE-2020-36190. Exclude log4j 1.x caused by CVE-2019-17571. Upgrade log4j 2.x caused by CVE-2020-9488. Upgrade nacos libs caused by CVE-2021-29441 and CVE-2021-29442. Upgrade netty caused by CVE-2019-20444, CVE-2019-20445, CVE-2019-16869, CVE-2020-11612, CVE-2021-21290, CVE-2021-21295 and CVE-2021-21409. Upgrade consul client caused by CVE-2018-1000844, CVE-2018-1000850. Upgrade zookeeper caused by CVE-2019-0201, zookeeper cluster coordinator plugin now requires zookeeper server 3.5+. Upgrade snake yaml caused by CVE-2017-18640. Upgrade embed tomcat caused by CVE-2020-13935. Upgrade commons-lang3 to avoid potential NPE in some JDK versions. OAL supports generating metrics from events. Support endpoint name grouping by OpenAPI definitions. Concurrent create PrepareRequest when persist Metrics Fix CounterWindow increase computing issue. Performance: optimize Envoy ALS analyzer performance in high traffic load scenario (reduce ~1cpu in ~10k RPS). Performance: trim useless metadata fields in Envoy ALS metadata to improve performance. Fix: slowDBAccessThreshold dynamic config error when not configured. Performance: cache regex pattern and result, optimize string concatenation in Envy ALS analyzer. Performance: cache metrics id and entity id in Metrics and ISource. Performance: enhance persistent session mechanism, about differentiating cache timeout for different dimensionality metrics. The timeout of the cache for minute and hour level metrics has been prolonged to ~5 min. Performance: Add L1 aggregation flush period, which reduce the CPU load and help young GC. Support connectTimeout and socketTimeout settings for ElasticSearch6 and ElasticSearch7 storages. Re-implement storage session mechanism, cached metrics are removed only according to their last access timestamp, rather than first time. This makes sure hot data never gets removed unexpectedly. Support session expired threshold configurable. Fix InfluxDB storage-plugin Metrics#multiGet issue. Replace zuul proxy with spring cloud gateway 2.x. in webapp module. Upgrade etcd cluster coordinator and dynamic configuration to v3.x. Configuration: Allow configuring server maximum request header size and ES index template order. Add thread state metric and class loaded info metric to JVMMetric. Performance: compile LAL DSL statically and run with type checked. Add pagination to event query protocol. Performance: optimize Envoy error logs persistence performance. Support envoy cluster manager metrics. Performance: remove the synchronous persistence mechanism from batch ElasticSearch DAO. Because the current enhanced persistent session mechanism, don\u0026rsquo;t require the data queryable immediately after the insert and update anymore. Performance: share flushInterval setting for both metrics and record data, due to synchronous persistence mechanism removed. Record flush interval used to be hardcoded as 10s. Remove syncBulkActions in ElasticSearch storage option. Increase the default bulkActions(env, SW_STORAGE_ES_BULK_ACTIONS) to 5000(from 1000). Increase the flush interval of ElasticSearch indices to 15s(from 10s) Provide distinct for elements of metadata lists. Due to the more aggressive asynchronous flush, metadata lists have more chances including duplicate elements. Don\u0026rsquo;t need this as indicate anymore. Reduce the flush period of hour and day level metrics, only run in 4 times of regular persistent period. This means default flush period of hour and day level metrics are 25s * 4. Performance: optimize IDs read of ElasticSearch storage options(6 and 7). Use the physical index rather than template alias name. Adjust index refresh period as INT(flushInterval * 2/3), it used to be as same as bulk flush period. At the edge case, in low traffic(traffic \u0026lt; bulkActions in the whole period), there is a possible case, 2 period bulks are included in one index refresh rebuild operation, which could cause version conflicts. And this case can\u0026rsquo;t be fixed through core/persistentPeriod as the bulk fresh is not controlled by the persistent timer anymore. The core/maxSyncOperationNum setting(added in 8.5.0) is removed due to metrics persistence is fully asynchronous. The core/syncThreads setting(added in 8.5.0) is removed due to metrics persistence is fully asynchronous. Optimization: Concurrency mode of execution stage for metrics is removed(added in 8.5.0). Only concurrency of prepare stage is meaningful and kept. Fix -meters metrics topic isn\u0026rsquo;t created with namespace issue Enhance persistent session timeout mechanism. Because the enhanced session could cache the metadata metrics forever, new timeout mechanism is designed for avoiding this specific case. Fix Kafka transport topics are created duplicated with and without namespace issue Fix the persistent session timeout mechanism bug. Fix possible version_conflict_engine_exception in bulk execution. Fix PrometheusMetricConverter may throw an IllegalArgumentException when convert metrics to SampleFamily Filtering NaN value samples when build SampleFamily Add Thread and ClassLoader Metrics for the self-observability and otel-oc-rules Simple optimization of trace sql query statement. Avoid \u0026ldquo;select *\u0026rdquo; query method Introduce dynamical logging to update log configuration at runtime Fix Kubernetes ConfigMap configuration center doesn\u0026rsquo;t send delete event Breaking Change: emove qps and add rpm in LAL  UI  Fix the date component for log conditions. Fix selector keys for duplicate options. Add Python celery plugin. Fix default config for metrics. Fix trace table for profile ui. Fix the error of server response time in the topology. Fix chart types for setting metrics configure. Fix logs pages number. Implement a timeline for Events in a new page. Fix style for event details.  Documentation  Add FAQ about Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Add Self Observability service discovery (k8s). Add sending Envoy Metrics to OAP in envoy 1.19 example and bump up to Envoy V3 api.  All issues and pull requests are here\n","excerpt":"8.7.0 Project  Extract dependency management to a bom. Add JDK 16 to test matrix. DataCarrier …","ref":"/docs/main/v9.1.0/en/changes/changes-8.7.0/","title":"8.7.0"},{"body":"8.7.0 Project  Extract dependency management to a bom. Add JDK 16 to test matrix. DataCarrier consumer add a new event notification, call nothingToConsume method if the queue has no element to consume. Build and push snapshot Docker images to GitHub Container Registry, this is only for people who want to help to test the master branch codes, please don\u0026rsquo;t use in production environments.  Java Agent  Supports modifying span attributes in async mode. Agent supports the collection of JVM arguments and jar dependency information. [Temporary] Support authentication for log report channel. This feature and grpc channel is going to be removed after Satellite 0.2.0 release. Remove deprecated gRPC method, io.grpc.ManagedChannelBuilder#nameResolverFactory. See gRPC-java 7133 for more details. Add Neo4j-4.x plugin. Correct profile.duration to profile.max_duration in the default agent.config file. Fix the response time of gRPC. Support parameter collection for SqlServer. Add ShardingSphere-5.0.0-beta plugin. Fix some method exception error. Fix async finish repeatedly in spring-webflux-5.x-webclient plugin. Add agent plugin to support Sentinel. Move ehcache-2.x plugin as an optional plugin. Support guava-cache plugin. Enhance the compatibility of mysql-8.x-plugin plugin. Support Kafka SASL login module. Fix gateway plugin async finish repeatedly when fallback url configured. Chore: polish methods naming for Spring-Kafka plugins. Remove plugins for ShardingSphere legacy version. Update agent plugin for ElasticJob GA version Remove the logic of generating instance name in KafkaServiceManagementServiceClient class. Improve okhttp plugin performance by optimizing Class.getDeclaredField(). Fix GRPCLogClientAppender no context warning. Fix spring-webflux-5.x-webclient-plugin NPE.  OAP-Backend  Disable Spring sleuth meter analyzer by default. Only count 5xx as error in Envoy ALS receiver. Upgrade apollo core caused by CVE-2020-15170. Upgrade kubernetes client caused by CVE-2020-28052. Upgrade Elasticsearch 7 client caused by CVE-2020-7014. Upgrade jackson related libs caused by CVE-2018-11307, CVE-2018-14718 ~ CVE-2018-14721, CVE-2018-19360 ~ CVE-2018-19362, CVE-2019-14379, CVE-2019-14540, CVE-2019-14892, CVE-2019-14893, CVE-2019-16335, CVE-2019-16942, CVE-2019-16943, CVE-2019-17267, CVE-2019-17531, CVE-2019-20330, CVE-2020-8840, CVE-2020-9546, CVE-2020-9547, CVE-2020-9548, CVE-2018-12022, CVE-2018-12023, CVE-2019-12086, CVE-2019-14439, CVE-2020-10672, CVE-2020-10673, CVE-2020-10968, CVE-2020-10969, CVE-2020-11111, CVE-2020-11112, CVE-2020-11113, CVE-2020-11619, CVE-2020-11620, CVE-2020-14060, CVE-2020-14061, CVE-2020-14062, CVE-2020-14195, CVE-2020-24616, CVE-2020-24750, CVE-2020-25649, CVE-2020-35490, CVE-2020-35491, CVE-2020-35728 and CVE-2020-36179 ~ CVE-2020-36190. Exclude log4j 1.x caused by CVE-2019-17571. Upgrade log4j 2.x caused by CVE-2020-9488. Upgrade nacos libs caused by CVE-2021-29441 and CVE-2021-29442. Upgrade netty caused by CVE-2019-20444, CVE-2019-20445, CVE-2019-16869, CVE-2020-11612, CVE-2021-21290, CVE-2021-21295 and CVE-2021-21409. Upgrade consul client caused by CVE-2018-1000844, CVE-2018-1000850. Upgrade zookeeper caused by CVE-2019-0201, zookeeper cluster coordinator plugin now requires zookeeper server 3.5+. Upgrade snake yaml caused by CVE-2017-18640. Upgrade embed tomcat caused by CVE-2020-13935. Upgrade commons-lang3 to avoid potential NPE in some JDK versions. OAL supports generating metrics from events. Support endpoint name grouping by OpenAPI definitions. Concurrent create PrepareRequest when persist Metrics Fix CounterWindow increase computing issue. Performance: optimize Envoy ALS analyzer performance in high traffic load scenario (reduce ~1cpu in ~10k RPS). Performance: trim useless metadata fields in Envoy ALS metadata to improve performance. Fix: slowDBAccessThreshold dynamic config error when not configured. Performance: cache regex pattern and result, optimize string concatenation in Envy ALS analyzer. Performance: cache metrics id and entity id in Metrics and ISource. Performance: enhance persistent session mechanism, about differentiating cache timeout for different dimensionality metrics. The timeout of the cache for minute and hour level metrics has been prolonged to ~5 min. Performance: Add L1 aggregation flush period, which reduce the CPU load and help young GC. Support connectTimeout and socketTimeout settings for ElasticSearch6 and ElasticSearch7 storages. Re-implement storage session mechanism, cached metrics are removed only according to their last access timestamp, rather than first time. This makes sure hot data never gets removed unexpectedly. Support session expired threshold configurable. Fix InfluxDB storage-plugin Metrics#multiGet issue. Replace zuul proxy with spring cloud gateway 2.x. in webapp module. Upgrade etcd cluster coordinator and dynamic configuration to v3.x. Configuration: Allow configuring server maximum request header size and ES index template order. Add thread state metric and class loaded info metric to JVMMetric. Performance: compile LAL DSL statically and run with type checked. Add pagination to event query protocol. Performance: optimize Envoy error logs persistence performance. Support envoy cluster manager metrics. Performance: remove the synchronous persistence mechanism from batch ElasticSearch DAO. Because the current enhanced persistent session mechanism, don\u0026rsquo;t require the data queryable immediately after the insert and update anymore. Performance: share flushInterval setting for both metrics and record data, due to synchronous persistence mechanism removed. Record flush interval used to be hardcoded as 10s. Remove syncBulkActions in ElasticSearch storage option. Increase the default bulkActions(env, SW_STORAGE_ES_BULK_ACTIONS) to 5000(from 1000). Increase the flush interval of ElasticSearch indices to 15s(from 10s) Provide distinct for elements of metadata lists. Due to the more aggressive asynchronous flush, metadata lists have more chances including duplicate elements. Don\u0026rsquo;t need this as indicate anymore. Reduce the flush period of hour and day level metrics, only run in 4 times of regular persistent period. This means default flush period of hour and day level metrics are 25s * 4. Performance: optimize IDs read of ElasticSearch storage options(6 and 7). Use the physical index rather than template alias name. Adjust index refresh period as INT(flushInterval * 2/3), it used to be as same as bulk flush period. At the edge case, in low traffic(traffic \u0026lt; bulkActions in the whole period), there is a possible case, 2 period bulks are included in one index refresh rebuild operation, which could cause version conflicts. And this case can\u0026rsquo;t be fixed through core/persistentPeriod as the bulk fresh is not controlled by the persistent timer anymore. The core/maxSyncOperationNum setting(added in 8.5.0) is removed due to metrics persistence is fully asynchronous. The core/syncThreads setting(added in 8.5.0) is removed due to metrics persistence is fully asynchronous. Optimization: Concurrency mode of execution stage for metrics is removed(added in 8.5.0). Only concurrency of prepare stage is meaningful and kept. Fix -meters metrics topic isn\u0026rsquo;t created with namespace issue Enhance persistent session timeout mechanism. Because the enhanced session could cache the metadata metrics forever, new timeout mechanism is designed for avoiding this specific case. Fix Kafka transport topics are created duplicated with and without namespace issue Fix the persistent session timeout mechanism bug. Fix possible version_conflict_engine_exception in bulk execution. Fix PrometheusMetricConverter may throw an IllegalArgumentException when convert metrics to SampleFamily Filtering NaN value samples when build SampleFamily Add Thread and ClassLoader Metrics for the self-observability and otel-oc-rules Simple optimization of trace sql query statement. Avoid \u0026ldquo;select *\u0026rdquo; query method Introduce dynamical logging to update log configuration at runtime Fix Kubernetes ConfigMap configuration center doesn\u0026rsquo;t send delete event Breaking Change: emove qps and add rpm in LAL  UI  Fix the date component for log conditions. Fix selector keys for duplicate options. Add Python celery plugin. Fix default config for metrics. Fix trace table for profile ui. Fix the error of server response time in the topology. Fix chart types for setting metrics configure. Fix logs pages number. Implement a timeline for Events in a new page. Fix style for event details.  Documentation  Add FAQ about Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Add Self Observability service discovery (k8s). Add sending Envoy Metrics to OAP in envoy 1.19 example and bump up to Envoy V3 api.  All issues and pull requests are here\n","excerpt":"8.7.0 Project  Extract dependency management to a bom. Add JDK 16 to test matrix. DataCarrier …","ref":"/docs/main/v9.2.0/en/changes/changes-8.7.0/","title":"8.7.0"},{"body":"8.7.0 Project  Extract dependency management to a bom. Add JDK 16 to test matrix. DataCarrier consumer add a new event notification, call nothingToConsume method if the queue has no element to consume. Build and push snapshot Docker images to GitHub Container Registry, this is only for people who want to help to test the master branch codes, please don\u0026rsquo;t use in production environments.  Java Agent  Supports modifying span attributes in async mode. Agent supports the collection of JVM arguments and jar dependency information. [Temporary] Support authentication for log report channel. This feature and grpc channel is going to be removed after Satellite 0.2.0 release. Remove deprecated gRPC method, io.grpc.ManagedChannelBuilder#nameResolverFactory. See gRPC-java 7133 for more details. Add Neo4j-4.x plugin. Correct profile.duration to profile.max_duration in the default agent.config file. Fix the response time of gRPC. Support parameter collection for SqlServer. Add ShardingSphere-5.0.0-beta plugin. Fix some method exception error. Fix async finish repeatedly in spring-webflux-5.x-webclient plugin. Add agent plugin to support Sentinel. Move ehcache-2.x plugin as an optional plugin. Support guava-cache plugin. Enhance the compatibility of mysql-8.x-plugin plugin. Support Kafka SASL login module. Fix gateway plugin async finish repeatedly when fallback url configured. Chore: polish methods naming for Spring-Kafka plugins. Remove plugins for ShardingSphere legacy version. Update agent plugin for ElasticJob GA version Remove the logic of generating instance name in KafkaServiceManagementServiceClient class. Improve okhttp plugin performance by optimizing Class.getDeclaredField(). Fix GRPCLogClientAppender no context warning. Fix spring-webflux-5.x-webclient-plugin NPE.  OAP-Backend  Disable Spring sleuth meter analyzer by default. Only count 5xx as error in Envoy ALS receiver. Upgrade apollo core caused by CVE-2020-15170. Upgrade kubernetes client caused by CVE-2020-28052. Upgrade Elasticsearch 7 client caused by CVE-2020-7014. Upgrade jackson related libs caused by CVE-2018-11307, CVE-2018-14718 ~ CVE-2018-14721, CVE-2018-19360 ~ CVE-2018-19362, CVE-2019-14379, CVE-2019-14540, CVE-2019-14892, CVE-2019-14893, CVE-2019-16335, CVE-2019-16942, CVE-2019-16943, CVE-2019-17267, CVE-2019-17531, CVE-2019-20330, CVE-2020-8840, CVE-2020-9546, CVE-2020-9547, CVE-2020-9548, CVE-2018-12022, CVE-2018-12023, CVE-2019-12086, CVE-2019-14439, CVE-2020-10672, CVE-2020-10673, CVE-2020-10968, CVE-2020-10969, CVE-2020-11111, CVE-2020-11112, CVE-2020-11113, CVE-2020-11619, CVE-2020-11620, CVE-2020-14060, CVE-2020-14061, CVE-2020-14062, CVE-2020-14195, CVE-2020-24616, CVE-2020-24750, CVE-2020-25649, CVE-2020-35490, CVE-2020-35491, CVE-2020-35728 and CVE-2020-36179 ~ CVE-2020-36190. Exclude log4j 1.x caused by CVE-2019-17571. Upgrade log4j 2.x caused by CVE-2020-9488. Upgrade nacos libs caused by CVE-2021-29441 and CVE-2021-29442. Upgrade netty caused by CVE-2019-20444, CVE-2019-20445, CVE-2019-16869, CVE-2020-11612, CVE-2021-21290, CVE-2021-21295 and CVE-2021-21409. Upgrade consul client caused by CVE-2018-1000844, CVE-2018-1000850. Upgrade zookeeper caused by CVE-2019-0201, zookeeper cluster coordinator plugin now requires zookeeper server 3.5+. Upgrade snake yaml caused by CVE-2017-18640. Upgrade embed tomcat caused by CVE-2020-13935. Upgrade commons-lang3 to avoid potential NPE in some JDK versions. OAL supports generating metrics from events. Support endpoint name grouping by OpenAPI definitions. Concurrent create PrepareRequest when persist Metrics Fix CounterWindow increase computing issue. Performance: optimize Envoy ALS analyzer performance in high traffic load scenario (reduce ~1cpu in ~10k RPS). Performance: trim useless metadata fields in Envoy ALS metadata to improve performance. Fix: slowDBAccessThreshold dynamic config error when not configured. Performance: cache regex pattern and result, optimize string concatenation in Envy ALS analyzer. Performance: cache metrics id and entity id in Metrics and ISource. Performance: enhance persistent session mechanism, about differentiating cache timeout for different dimensionality metrics. The timeout of the cache for minute and hour level metrics has been prolonged to ~5 min. Performance: Add L1 aggregation flush period, which reduce the CPU load and help young GC. Support connectTimeout and socketTimeout settings for ElasticSearch6 and ElasticSearch7 storages. Re-implement storage session mechanism, cached metrics are removed only according to their last access timestamp, rather than first time. This makes sure hot data never gets removed unexpectedly. Support session expired threshold configurable. Fix InfluxDB storage-plugin Metrics#multiGet issue. Replace zuul proxy with spring cloud gateway 2.x. in webapp module. Upgrade etcd cluster coordinator and dynamic configuration to v3.x. Configuration: Allow configuring server maximum request header size and ES index template order. Add thread state metric and class loaded info metric to JVMMetric. Performance: compile LAL DSL statically and run with type checked. Add pagination to event query protocol. Performance: optimize Envoy error logs persistence performance. Support envoy cluster manager metrics. Performance: remove the synchronous persistence mechanism from batch ElasticSearch DAO. Because the current enhanced persistent session mechanism, don\u0026rsquo;t require the data queryable immediately after the insert and update anymore. Performance: share flushInterval setting for both metrics and record data, due to synchronous persistence mechanism removed. Record flush interval used to be hardcoded as 10s. Remove syncBulkActions in ElasticSearch storage option. Increase the default bulkActions(env, SW_STORAGE_ES_BULK_ACTIONS) to 5000(from 1000). Increase the flush interval of ElasticSearch indices to 15s(from 10s) Provide distinct for elements of metadata lists. Due to the more aggressive asynchronous flush, metadata lists have more chances including duplicate elements. Don\u0026rsquo;t need this as indicate anymore. Reduce the flush period of hour and day level metrics, only run in 4 times of regular persistent period. This means default flush period of hour and day level metrics are 25s * 4. Performance: optimize IDs read of ElasticSearch storage options(6 and 7). Use the physical index rather than template alias name. Adjust index refresh period as INT(flushInterval * 2/3), it used to be as same as bulk flush period. At the edge case, in low traffic(traffic \u0026lt; bulkActions in the whole period), there is a possible case, 2 period bulks are included in one index refresh rebuild operation, which could cause version conflicts. And this case can\u0026rsquo;t be fixed through core/persistentPeriod as the bulk fresh is not controlled by the persistent timer anymore. The core/maxSyncOperationNum setting(added in 8.5.0) is removed due to metrics persistence is fully asynchronous. The core/syncThreads setting(added in 8.5.0) is removed due to metrics persistence is fully asynchronous. Optimization: Concurrency mode of execution stage for metrics is removed(added in 8.5.0). Only concurrency of prepare stage is meaningful and kept. Fix -meters metrics topic isn\u0026rsquo;t created with namespace issue Enhance persistent session timeout mechanism. Because the enhanced session could cache the metadata metrics forever, new timeout mechanism is designed for avoiding this specific case. Fix Kafka transport topics are created duplicated with and without namespace issue Fix the persistent session timeout mechanism bug. Fix possible version_conflict_engine_exception in bulk execution. Fix PrometheusMetricConverter may throw an IllegalArgumentException when convert metrics to SampleFamily Filtering NaN value samples when build SampleFamily Add Thread and ClassLoader Metrics for the self-observability and otel-oc-rules Simple optimization of trace sql query statement. Avoid \u0026ldquo;select *\u0026rdquo; query method Introduce dynamical logging to update log configuration at runtime Fix Kubernetes ConfigMap configuration center doesn\u0026rsquo;t send delete event Breaking Change: emove qps and add rpm in LAL  UI  Fix the date component for log conditions. Fix selector keys for duplicate options. Add Python celery plugin. Fix default config for metrics. Fix trace table for profile ui. Fix the error of server response time in the topology. Fix chart types for setting metrics configure. Fix logs pages number. Implement a timeline for Events in a new page. Fix style for event details.  Documentation  Add FAQ about Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Add Self Observability service discovery (k8s). Add sending Envoy Metrics to OAP in envoy 1.19 example and bump up to Envoy V3 api.  All issues and pull requests are here\n","excerpt":"8.7.0 Project  Extract dependency management to a bom. Add JDK 16 to test matrix. DataCarrier …","ref":"/docs/main/v9.3.0/en/changes/changes-8.7.0/","title":"8.7.0"},{"body":"8.7.0 Project  Extract dependency management to a bom. Add JDK 16 to test matrix. DataCarrier consumer add a new event notification, call nothingToConsume method if the queue has no element to consume. Build and push snapshot Docker images to GitHub Container Registry, this is only for people who want to help to test the master branch codes, please don\u0026rsquo;t use in production environments.  Java Agent  Supports modifying span attributes in async mode. Agent supports the collection of JVM arguments and jar dependency information. [Temporary] Support authentication for log report channel. This feature and grpc channel is going to be removed after Satellite 0.2.0 release. Remove deprecated gRPC method, io.grpc.ManagedChannelBuilder#nameResolverFactory. See gRPC-java 7133 for more details. Add Neo4j-4.x plugin. Correct profile.duration to profile.max_duration in the default agent.config file. Fix the response time of gRPC. Support parameter collection for SqlServer. Add ShardingSphere-5.0.0-beta plugin. Fix some method exception error. Fix async finish repeatedly in spring-webflux-5.x-webclient plugin. Add agent plugin to support Sentinel. Move ehcache-2.x plugin as an optional plugin. Support guava-cache plugin. Enhance the compatibility of mysql-8.x-plugin plugin. Support Kafka SASL login module. Fix gateway plugin async finish repeatedly when fallback url configured. Chore: polish methods naming for Spring-Kafka plugins. Remove plugins for ShardingSphere legacy version. Update agent plugin for ElasticJob GA version Remove the logic of generating instance name in KafkaServiceManagementServiceClient class. Improve okhttp plugin performance by optimizing Class.getDeclaredField(). Fix GRPCLogClientAppender no context warning. Fix spring-webflux-5.x-webclient-plugin NPE.  OAP-Backend  Disable Spring sleuth meter analyzer by default. Only count 5xx as error in Envoy ALS receiver. Upgrade apollo core caused by CVE-2020-15170. Upgrade kubernetes client caused by CVE-2020-28052. Upgrade Elasticsearch 7 client caused by CVE-2020-7014. Upgrade jackson related libs caused by CVE-2018-11307, CVE-2018-14718 ~ CVE-2018-14721, CVE-2018-19360 ~ CVE-2018-19362, CVE-2019-14379, CVE-2019-14540, CVE-2019-14892, CVE-2019-14893, CVE-2019-16335, CVE-2019-16942, CVE-2019-16943, CVE-2019-17267, CVE-2019-17531, CVE-2019-20330, CVE-2020-8840, CVE-2020-9546, CVE-2020-9547, CVE-2020-9548, CVE-2018-12022, CVE-2018-12023, CVE-2019-12086, CVE-2019-14439, CVE-2020-10672, CVE-2020-10673, CVE-2020-10968, CVE-2020-10969, CVE-2020-11111, CVE-2020-11112, CVE-2020-11113, CVE-2020-11619, CVE-2020-11620, CVE-2020-14060, CVE-2020-14061, CVE-2020-14062, CVE-2020-14195, CVE-2020-24616, CVE-2020-24750, CVE-2020-25649, CVE-2020-35490, CVE-2020-35491, CVE-2020-35728 and CVE-2020-36179 ~ CVE-2020-36190. Exclude log4j 1.x caused by CVE-2019-17571. Upgrade log4j 2.x caused by CVE-2020-9488. Upgrade nacos libs caused by CVE-2021-29441 and CVE-2021-29442. Upgrade netty caused by CVE-2019-20444, CVE-2019-20445, CVE-2019-16869, CVE-2020-11612, CVE-2021-21290, CVE-2021-21295 and CVE-2021-21409. Upgrade consul client caused by CVE-2018-1000844, CVE-2018-1000850. Upgrade zookeeper caused by CVE-2019-0201, zookeeper cluster coordinator plugin now requires zookeeper server 3.5+. Upgrade snake yaml caused by CVE-2017-18640. Upgrade embed tomcat caused by CVE-2020-13935. Upgrade commons-lang3 to avoid potential NPE in some JDK versions. OAL supports generating metrics from events. Support endpoint name grouping by OpenAPI definitions. Concurrent create PrepareRequest when persist Metrics Fix CounterWindow increase computing issue. Performance: optimize Envoy ALS analyzer performance in high traffic load scenario (reduce ~1cpu in ~10k RPS). Performance: trim useless metadata fields in Envoy ALS metadata to improve performance. Fix: slowDBAccessThreshold dynamic config error when not configured. Performance: cache regex pattern and result, optimize string concatenation in Envy ALS analyzer. Performance: cache metrics id and entity id in Metrics and ISource. Performance: enhance persistent session mechanism, about differentiating cache timeout for different dimensionality metrics. The timeout of the cache for minute and hour level metrics has been prolonged to ~5 min. Performance: Add L1 aggregation flush period, which reduce the CPU load and help young GC. Support connectTimeout and socketTimeout settings for ElasticSearch6 and ElasticSearch7 storages. Re-implement storage session mechanism, cached metrics are removed only according to their last access timestamp, rather than first time. This makes sure hot data never gets removed unexpectedly. Support session expired threshold configurable. Fix InfluxDB storage-plugin Metrics#multiGet issue. Replace zuul proxy with spring cloud gateway 2.x. in webapp module. Upgrade etcd cluster coordinator and dynamic configuration to v3.x. Configuration: Allow configuring server maximum request header size and ES index template order. Add thread state metric and class loaded info metric to JVMMetric. Performance: compile LAL DSL statically and run with type checked. Add pagination to event query protocol. Performance: optimize Envoy error logs persistence performance. Support envoy cluster manager metrics. Performance: remove the synchronous persistence mechanism from batch ElasticSearch DAO. Because the current enhanced persistent session mechanism, don\u0026rsquo;t require the data queryable immediately after the insert and update anymore. Performance: share flushInterval setting for both metrics and record data, due to synchronous persistence mechanism removed. Record flush interval used to be hardcoded as 10s. Remove syncBulkActions in ElasticSearch storage option. Increase the default bulkActions(env, SW_STORAGE_ES_BULK_ACTIONS) to 5000(from 1000). Increase the flush interval of ElasticSearch indices to 15s(from 10s) Provide distinct for elements of metadata lists. Due to the more aggressive asynchronous flush, metadata lists have more chances including duplicate elements. Don\u0026rsquo;t need this as indicate anymore. Reduce the flush period of hour and day level metrics, only run in 4 times of regular persistent period. This means default flush period of hour and day level metrics are 25s * 4. Performance: optimize IDs read of ElasticSearch storage options(6 and 7). Use the physical index rather than template alias name. Adjust index refresh period as INT(flushInterval * 2/3), it used to be as same as bulk flush period. At the edge case, in low traffic(traffic \u0026lt; bulkActions in the whole period), there is a possible case, 2 period bulks are included in one index refresh rebuild operation, which could cause version conflicts. And this case can\u0026rsquo;t be fixed through core/persistentPeriod as the bulk fresh is not controlled by the persistent timer anymore. The core/maxSyncOperationNum setting(added in 8.5.0) is removed due to metrics persistence is fully asynchronous. The core/syncThreads setting(added in 8.5.0) is removed due to metrics persistence is fully asynchronous. Optimization: Concurrency mode of execution stage for metrics is removed(added in 8.5.0). Only concurrency of prepare stage is meaningful and kept. Fix -meters metrics topic isn\u0026rsquo;t created with namespace issue Enhance persistent session timeout mechanism. Because the enhanced session could cache the metadata metrics forever, new timeout mechanism is designed for avoiding this specific case. Fix Kafka transport topics are created duplicated with and without namespace issue Fix the persistent session timeout mechanism bug. Fix possible version_conflict_engine_exception in bulk execution. Fix PrometheusMetricConverter may throw an IllegalArgumentException when convert metrics to SampleFamily Filtering NaN value samples when build SampleFamily Add Thread and ClassLoader Metrics for the self-observability and otel-oc-rules Simple optimization of trace sql query statement. Avoid \u0026ldquo;select *\u0026rdquo; query method Introduce dynamical logging to update log configuration at runtime Fix Kubernetes ConfigMap configuration center doesn\u0026rsquo;t send delete event Breaking Change: emove qps and add rpm in LAL  UI  Fix the date component for log conditions. Fix selector keys for duplicate options. Add Python celery plugin. Fix default config for metrics. Fix trace table for profile ui. Fix the error of server response time in the topology. Fix chart types for setting metrics configure. Fix logs pages number. Implement a timeline for Events in a new page. Fix style for event details.  Documentation  Add FAQ about Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Add Self Observability service discovery (k8s). Add sending Envoy Metrics to OAP in envoy 1.19 example and bump up to Envoy V3 api.  All issues and pull requests are here\n","excerpt":"8.7.0 Project  Extract dependency management to a bom. Add JDK 16 to test matrix. DataCarrier …","ref":"/docs/main/v9.4.0/en/changes/changes-8.7.0/","title":"8.7.0"},{"body":"8.7.0 Project  Extract dependency management to a bom. Add JDK 16 to test matrix. DataCarrier consumer add a new event notification, call nothingToConsume method if the queue has no element to consume. Build and push snapshot Docker images to GitHub Container Registry, this is only for people who want to help to test the master branch codes, please don\u0026rsquo;t use in production environments.  Java Agent  Supports modifying span attributes in async mode. Agent supports the collection of JVM arguments and jar dependency information. [Temporary] Support authentication for log report channel. This feature and grpc channel is going to be removed after Satellite 0.2.0 release. Remove deprecated gRPC method, io.grpc.ManagedChannelBuilder#nameResolverFactory. See gRPC-java 7133 for more details. Add Neo4j-4.x plugin. Correct profile.duration to profile.max_duration in the default agent.config file. Fix the response time of gRPC. Support parameter collection for SqlServer. Add ShardingSphere-5.0.0-beta plugin. Fix some method exception error. Fix async finish repeatedly in spring-webflux-5.x-webclient plugin. Add agent plugin to support Sentinel. Move ehcache-2.x plugin as an optional plugin. Support guava-cache plugin. Enhance the compatibility of mysql-8.x-plugin plugin. Support Kafka SASL login module. Fix gateway plugin async finish repeatedly when fallback url configured. Chore: polish methods naming for Spring-Kafka plugins. Remove plugins for ShardingSphere legacy version. Update agent plugin for ElasticJob GA version Remove the logic of generating instance name in KafkaServiceManagementServiceClient class. Improve okhttp plugin performance by optimizing Class.getDeclaredField(). Fix GRPCLogClientAppender no context warning. Fix spring-webflux-5.x-webclient-plugin NPE.  OAP-Backend  Disable Spring sleuth meter analyzer by default. Only count 5xx as error in Envoy ALS receiver. Upgrade apollo core caused by CVE-2020-15170. Upgrade kubernetes client caused by CVE-2020-28052. Upgrade Elasticsearch 7 client caused by CVE-2020-7014. Upgrade jackson related libs caused by CVE-2018-11307, CVE-2018-14718 ~ CVE-2018-14721, CVE-2018-19360 ~ CVE-2018-19362, CVE-2019-14379, CVE-2019-14540, CVE-2019-14892, CVE-2019-14893, CVE-2019-16335, CVE-2019-16942, CVE-2019-16943, CVE-2019-17267, CVE-2019-17531, CVE-2019-20330, CVE-2020-8840, CVE-2020-9546, CVE-2020-9547, CVE-2020-9548, CVE-2018-12022, CVE-2018-12023, CVE-2019-12086, CVE-2019-14439, CVE-2020-10672, CVE-2020-10673, CVE-2020-10968, CVE-2020-10969, CVE-2020-11111, CVE-2020-11112, CVE-2020-11113, CVE-2020-11619, CVE-2020-11620, CVE-2020-14060, CVE-2020-14061, CVE-2020-14062, CVE-2020-14195, CVE-2020-24616, CVE-2020-24750, CVE-2020-25649, CVE-2020-35490, CVE-2020-35491, CVE-2020-35728 and CVE-2020-36179 ~ CVE-2020-36190. Exclude log4j 1.x caused by CVE-2019-17571. Upgrade log4j 2.x caused by CVE-2020-9488. Upgrade nacos libs caused by CVE-2021-29441 and CVE-2021-29442. Upgrade netty caused by CVE-2019-20444, CVE-2019-20445, CVE-2019-16869, CVE-2020-11612, CVE-2021-21290, CVE-2021-21295 and CVE-2021-21409. Upgrade consul client caused by CVE-2018-1000844, CVE-2018-1000850. Upgrade zookeeper caused by CVE-2019-0201, zookeeper cluster coordinator plugin now requires zookeeper server 3.5+. Upgrade snake yaml caused by CVE-2017-18640. Upgrade embed tomcat caused by CVE-2020-13935. Upgrade commons-lang3 to avoid potential NPE in some JDK versions. OAL supports generating metrics from events. Support endpoint name grouping by OpenAPI definitions. Concurrent create PrepareRequest when persist Metrics Fix CounterWindow increase computing issue. Performance: optimize Envoy ALS analyzer performance in high traffic load scenario (reduce ~1cpu in ~10k RPS). Performance: trim useless metadata fields in Envoy ALS metadata to improve performance. Fix: slowDBAccessThreshold dynamic config error when not configured. Performance: cache regex pattern and result, optimize string concatenation in Envy ALS analyzer. Performance: cache metrics id and entity id in Metrics and ISource. Performance: enhance persistent session mechanism, about differentiating cache timeout for different dimensionality metrics. The timeout of the cache for minute and hour level metrics has been prolonged to ~5 min. Performance: Add L1 aggregation flush period, which reduce the CPU load and help young GC. Support connectTimeout and socketTimeout settings for ElasticSearch6 and ElasticSearch7 storages. Re-implement storage session mechanism, cached metrics are removed only according to their last access timestamp, rather than first time. This makes sure hot data never gets removed unexpectedly. Support session expired threshold configurable. Fix InfluxDB storage-plugin Metrics#multiGet issue. Replace zuul proxy with spring cloud gateway 2.x. in webapp module. Upgrade etcd cluster coordinator and dynamic configuration to v3.x. Configuration: Allow configuring server maximum request header size and ES index template order. Add thread state metric and class loaded info metric to JVMMetric. Performance: compile LAL DSL statically and run with type checked. Add pagination to event query protocol. Performance: optimize Envoy error logs persistence performance. Support envoy cluster manager metrics. Performance: remove the synchronous persistence mechanism from batch ElasticSearch DAO. Because the current enhanced persistent session mechanism, don\u0026rsquo;t require the data queryable immediately after the insert and update anymore. Performance: share flushInterval setting for both metrics and record data, due to synchronous persistence mechanism removed. Record flush interval used to be hardcoded as 10s. Remove syncBulkActions in ElasticSearch storage option. Increase the default bulkActions(env, SW_STORAGE_ES_BULK_ACTIONS) to 5000(from 1000). Increase the flush interval of ElasticSearch indices to 15s(from 10s) Provide distinct for elements of metadata lists. Due to the more aggressive asynchronous flush, metadata lists have more chances including duplicate elements. Don\u0026rsquo;t need this as indicate anymore. Reduce the flush period of hour and day level metrics, only run in 4 times of regular persistent period. This means default flush period of hour and day level metrics are 25s * 4. Performance: optimize IDs read of ElasticSearch storage options(6 and 7). Use the physical index rather than template alias name. Adjust index refresh period as INT(flushInterval * 2/3), it used to be as same as bulk flush period. At the edge case, in low traffic(traffic \u0026lt; bulkActions in the whole period), there is a possible case, 2 period bulks are included in one index refresh rebuild operation, which could cause version conflicts. And this case can\u0026rsquo;t be fixed through core/persistentPeriod as the bulk fresh is not controlled by the persistent timer anymore. The core/maxSyncOperationNum setting(added in 8.5.0) is removed due to metrics persistence is fully asynchronous. The core/syncThreads setting(added in 8.5.0) is removed due to metrics persistence is fully asynchronous. Optimization: Concurrency mode of execution stage for metrics is removed(added in 8.5.0). Only concurrency of prepare stage is meaningful and kept. Fix -meters metrics topic isn\u0026rsquo;t created with namespace issue Enhance persistent session timeout mechanism. Because the enhanced session could cache the metadata metrics forever, new timeout mechanism is designed for avoiding this specific case. Fix Kafka transport topics are created duplicated with and without namespace issue Fix the persistent session timeout mechanism bug. Fix possible version_conflict_engine_exception in bulk execution. Fix PrometheusMetricConverter may throw an IllegalArgumentException when convert metrics to SampleFamily Filtering NaN value samples when build SampleFamily Add Thread and ClassLoader Metrics for the self-observability and otel-oc-rules Simple optimization of trace sql query statement. Avoid \u0026ldquo;select *\u0026rdquo; query method Introduce dynamical logging to update log configuration at runtime Fix Kubernetes ConfigMap configuration center doesn\u0026rsquo;t send delete event Breaking Change: emove qps and add rpm in LAL  UI  Fix the date component for log conditions. Fix selector keys for duplicate options. Add Python celery plugin. Fix default config for metrics. Fix trace table for profile ui. Fix the error of server response time in the topology. Fix chart types for setting metrics configure. Fix logs pages number. Implement a timeline for Events in a new page. Fix style for event details.  Documentation  Add FAQ about Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Add Self Observability service discovery (k8s). Add sending Envoy Metrics to OAP in envoy 1.19 example and bump up to Envoy V3 api.  All issues and pull requests are here\n","excerpt":"8.7.0 Project  Extract dependency management to a bom. Add JDK 16 to test matrix. DataCarrier …","ref":"/docs/main/v9.5.0/en/changes/changes-8.7.0/","title":"8.7.0"},{"body":"8.7.0 Project  Extract dependency management to a bom. Add JDK 16 to test matrix. DataCarrier consumer add a new event notification, call nothingToConsume method if the queue has no element to consume. Build and push snapshot Docker images to GitHub Container Registry, this is only for people who want to help to test the master branch codes, please don\u0026rsquo;t use in production environments.  Java Agent  Supports modifying span attributes in async mode. Agent supports the collection of JVM arguments and jar dependency information. [Temporary] Support authentication for log report channel. This feature and grpc channel is going to be removed after Satellite 0.2.0 release. Remove deprecated gRPC method, io.grpc.ManagedChannelBuilder#nameResolverFactory. See gRPC-java 7133 for more details. Add Neo4j-4.x plugin. Correct profile.duration to profile.max_duration in the default agent.config file. Fix the response time of gRPC. Support parameter collection for SqlServer. Add ShardingSphere-5.0.0-beta plugin. Fix some method exception error. Fix async finish repeatedly in spring-webflux-5.x-webclient plugin. Add agent plugin to support Sentinel. Move ehcache-2.x plugin as an optional plugin. Support guava-cache plugin. Enhance the compatibility of mysql-8.x-plugin plugin. Support Kafka SASL login module. Fix gateway plugin async finish repeatedly when fallback url configured. Chore: polish methods naming for Spring-Kafka plugins. Remove plugins for ShardingSphere legacy version. Update agent plugin for ElasticJob GA version Remove the logic of generating instance name in KafkaServiceManagementServiceClient class. Improve okhttp plugin performance by optimizing Class.getDeclaredField(). Fix GRPCLogClientAppender no context warning. Fix spring-webflux-5.x-webclient-plugin NPE.  OAP-Backend  Disable Spring sleuth meter analyzer by default. Only count 5xx as error in Envoy ALS receiver. Upgrade apollo core caused by CVE-2020-15170. Upgrade kubernetes client caused by CVE-2020-28052. Upgrade Elasticsearch 7 client caused by CVE-2020-7014. Upgrade jackson related libs caused by CVE-2018-11307, CVE-2018-14718 ~ CVE-2018-14721, CVE-2018-19360 ~ CVE-2018-19362, CVE-2019-14379, CVE-2019-14540, CVE-2019-14892, CVE-2019-14893, CVE-2019-16335, CVE-2019-16942, CVE-2019-16943, CVE-2019-17267, CVE-2019-17531, CVE-2019-20330, CVE-2020-8840, CVE-2020-9546, CVE-2020-9547, CVE-2020-9548, CVE-2018-12022, CVE-2018-12023, CVE-2019-12086, CVE-2019-14439, CVE-2020-10672, CVE-2020-10673, CVE-2020-10968, CVE-2020-10969, CVE-2020-11111, CVE-2020-11112, CVE-2020-11113, CVE-2020-11619, CVE-2020-11620, CVE-2020-14060, CVE-2020-14061, CVE-2020-14062, CVE-2020-14195, CVE-2020-24616, CVE-2020-24750, CVE-2020-25649, CVE-2020-35490, CVE-2020-35491, CVE-2020-35728 and CVE-2020-36179 ~ CVE-2020-36190. Exclude log4j 1.x caused by CVE-2019-17571. Upgrade log4j 2.x caused by CVE-2020-9488. Upgrade nacos libs caused by CVE-2021-29441 and CVE-2021-29442. Upgrade netty caused by CVE-2019-20444, CVE-2019-20445, CVE-2019-16869, CVE-2020-11612, CVE-2021-21290, CVE-2021-21295 and CVE-2021-21409. Upgrade consul client caused by CVE-2018-1000844, CVE-2018-1000850. Upgrade zookeeper caused by CVE-2019-0201, zookeeper cluster coordinator plugin now requires zookeeper server 3.5+. Upgrade snake yaml caused by CVE-2017-18640. Upgrade embed tomcat caused by CVE-2020-13935. Upgrade commons-lang3 to avoid potential NPE in some JDK versions. OAL supports generating metrics from events. Support endpoint name grouping by OpenAPI definitions. Concurrent create PrepareRequest when persist Metrics Fix CounterWindow increase computing issue. Performance: optimize Envoy ALS analyzer performance in high traffic load scenario (reduce ~1cpu in ~10k RPS). Performance: trim useless metadata fields in Envoy ALS metadata to improve performance. Fix: slowDBAccessThreshold dynamic config error when not configured. Performance: cache regex pattern and result, optimize string concatenation in Envy ALS analyzer. Performance: cache metrics id and entity id in Metrics and ISource. Performance: enhance persistent session mechanism, about differentiating cache timeout for different dimensionality metrics. The timeout of the cache for minute and hour level metrics has been prolonged to ~5 min. Performance: Add L1 aggregation flush period, which reduce the CPU load and help young GC. Support connectTimeout and socketTimeout settings for ElasticSearch6 and ElasticSearch7 storages. Re-implement storage session mechanism, cached metrics are removed only according to their last access timestamp, rather than first time. This makes sure hot data never gets removed unexpectedly. Support session expired threshold configurable. Fix InfluxDB storage-plugin Metrics#multiGet issue. Replace zuul proxy with spring cloud gateway 2.x. in webapp module. Upgrade etcd cluster coordinator and dynamic configuration to v3.x. Configuration: Allow configuring server maximum request header size and ES index template order. Add thread state metric and class loaded info metric to JVMMetric. Performance: compile LAL DSL statically and run with type checked. Add pagination to event query protocol. Performance: optimize Envoy error logs persistence performance. Support envoy cluster manager metrics. Performance: remove the synchronous persistence mechanism from batch ElasticSearch DAO. Because the current enhanced persistent session mechanism, don\u0026rsquo;t require the data queryable immediately after the insert and update anymore. Performance: share flushInterval setting for both metrics and record data, due to synchronous persistence mechanism removed. Record flush interval used to be hardcoded as 10s. Remove syncBulkActions in ElasticSearch storage option. Increase the default bulkActions(env, SW_STORAGE_ES_BULK_ACTIONS) to 5000(from 1000). Increase the flush interval of ElasticSearch indices to 15s(from 10s) Provide distinct for elements of metadata lists. Due to the more aggressive asynchronous flush, metadata lists have more chances including duplicate elements. Don\u0026rsquo;t need this as indicate anymore. Reduce the flush period of hour and day level metrics, only run in 4 times of regular persistent period. This means default flush period of hour and day level metrics are 25s * 4. Performance: optimize IDs read of ElasticSearch storage options(6 and 7). Use the physical index rather than template alias name. Adjust index refresh period as INT(flushInterval * 2/3), it used to be as same as bulk flush period. At the edge case, in low traffic(traffic \u0026lt; bulkActions in the whole period), there is a possible case, 2 period bulks are included in one index refresh rebuild operation, which could cause version conflicts. And this case can\u0026rsquo;t be fixed through core/persistentPeriod as the bulk fresh is not controlled by the persistent timer anymore. The core/maxSyncOperationNum setting(added in 8.5.0) is removed due to metrics persistence is fully asynchronous. The core/syncThreads setting(added in 8.5.0) is removed due to metrics persistence is fully asynchronous. Optimization: Concurrency mode of execution stage for metrics is removed(added in 8.5.0). Only concurrency of prepare stage is meaningful and kept. Fix -meters metrics topic isn\u0026rsquo;t created with namespace issue Enhance persistent session timeout mechanism. Because the enhanced session could cache the metadata metrics forever, new timeout mechanism is designed for avoiding this specific case. Fix Kafka transport topics are created duplicated with and without namespace issue Fix the persistent session timeout mechanism bug. Fix possible version_conflict_engine_exception in bulk execution. Fix PrometheusMetricConverter may throw an IllegalArgumentException when convert metrics to SampleFamily Filtering NaN value samples when build SampleFamily Add Thread and ClassLoader Metrics for the self-observability and otel-oc-rules Simple optimization of trace sql query statement. Avoid \u0026ldquo;select *\u0026rdquo; query method Introduce dynamical logging to update log configuration at runtime Fix Kubernetes ConfigMap configuration center doesn\u0026rsquo;t send delete event Breaking Change: emove qps and add rpm in LAL  UI  Fix the date component for log conditions. Fix selector keys for duplicate options. Add Python celery plugin. Fix default config for metrics. Fix trace table for profile ui. Fix the error of server response time in the topology. Fix chart types for setting metrics configure. Fix logs pages number. Implement a timeline for Events in a new page. Fix style for event details.  Documentation  Add FAQ about Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Add Self Observability service discovery (k8s). Add sending Envoy Metrics to OAP in envoy 1.19 example and bump up to Envoy V3 api.  All issues and pull requests are here\n","excerpt":"8.7.0 Project  Extract dependency management to a bom. Add JDK 16 to test matrix. DataCarrier …","ref":"/docs/main/v9.6.0/en/changes/changes-8.7.0/","title":"8.7.0"},{"body":"8.7.0 Project  Extract dependency management to a bom. Add JDK 16 to test matrix. DataCarrier consumer add a new event notification, call nothingToConsume method if the queue has no element to consume. Build and push snapshot Docker images to GitHub Container Registry, this is only for people who want to help to test the master branch codes, please don\u0026rsquo;t use in production environments.  Java Agent  Supports modifying span attributes in async mode. Agent supports the collection of JVM arguments and jar dependency information. [Temporary] Support authentication for log report channel. This feature and grpc channel is going to be removed after Satellite 0.2.0 release. Remove deprecated gRPC method, io.grpc.ManagedChannelBuilder#nameResolverFactory. See gRPC-java 7133 for more details. Add Neo4j-4.x plugin. Correct profile.duration to profile.max_duration in the default agent.config file. Fix the response time of gRPC. Support parameter collection for SqlServer. Add ShardingSphere-5.0.0-beta plugin. Fix some method exception error. Fix async finish repeatedly in spring-webflux-5.x-webclient plugin. Add agent plugin to support Sentinel. Move ehcache-2.x plugin as an optional plugin. Support guava-cache plugin. Enhance the compatibility of mysql-8.x-plugin plugin. Support Kafka SASL login module. Fix gateway plugin async finish repeatedly when fallback url configured. Chore: polish methods naming for Spring-Kafka plugins. Remove plugins for ShardingSphere legacy version. Update agent plugin for ElasticJob GA version Remove the logic of generating instance name in KafkaServiceManagementServiceClient class. Improve okhttp plugin performance by optimizing Class.getDeclaredField(). Fix GRPCLogClientAppender no context warning. Fix spring-webflux-5.x-webclient-plugin NPE.  OAP-Backend  Disable Spring sleuth meter analyzer by default. Only count 5xx as error in Envoy ALS receiver. Upgrade apollo core caused by CVE-2020-15170. Upgrade kubernetes client caused by CVE-2020-28052. Upgrade Elasticsearch 7 client caused by CVE-2020-7014. Upgrade jackson related libs caused by CVE-2018-11307, CVE-2018-14718 ~ CVE-2018-14721, CVE-2018-19360 ~ CVE-2018-19362, CVE-2019-14379, CVE-2019-14540, CVE-2019-14892, CVE-2019-14893, CVE-2019-16335, CVE-2019-16942, CVE-2019-16943, CVE-2019-17267, CVE-2019-17531, CVE-2019-20330, CVE-2020-8840, CVE-2020-9546, CVE-2020-9547, CVE-2020-9548, CVE-2018-12022, CVE-2018-12023, CVE-2019-12086, CVE-2019-14439, CVE-2020-10672, CVE-2020-10673, CVE-2020-10968, CVE-2020-10969, CVE-2020-11111, CVE-2020-11112, CVE-2020-11113, CVE-2020-11619, CVE-2020-11620, CVE-2020-14060, CVE-2020-14061, CVE-2020-14062, CVE-2020-14195, CVE-2020-24616, CVE-2020-24750, CVE-2020-25649, CVE-2020-35490, CVE-2020-35491, CVE-2020-35728 and CVE-2020-36179 ~ CVE-2020-36190. Exclude log4j 1.x caused by CVE-2019-17571. Upgrade log4j 2.x caused by CVE-2020-9488. Upgrade nacos libs caused by CVE-2021-29441 and CVE-2021-29442. Upgrade netty caused by CVE-2019-20444, CVE-2019-20445, CVE-2019-16869, CVE-2020-11612, CVE-2021-21290, CVE-2021-21295 and CVE-2021-21409. Upgrade consul client caused by CVE-2018-1000844, CVE-2018-1000850. Upgrade zookeeper caused by CVE-2019-0201, zookeeper cluster coordinator plugin now requires zookeeper server 3.5+. Upgrade snake yaml caused by CVE-2017-18640. Upgrade embed tomcat caused by CVE-2020-13935. Upgrade commons-lang3 to avoid potential NPE in some JDK versions. OAL supports generating metrics from events. Support endpoint name grouping by OpenAPI definitions. Concurrent create PrepareRequest when persist Metrics Fix CounterWindow increase computing issue. Performance: optimize Envoy ALS analyzer performance in high traffic load scenario (reduce ~1cpu in ~10k RPS). Performance: trim useless metadata fields in Envoy ALS metadata to improve performance. Fix: slowDBAccessThreshold dynamic config error when not configured. Performance: cache regex pattern and result, optimize string concatenation in Envy ALS analyzer. Performance: cache metrics id and entity id in Metrics and ISource. Performance: enhance persistent session mechanism, about differentiating cache timeout for different dimensionality metrics. The timeout of the cache for minute and hour level metrics has been prolonged to ~5 min. Performance: Add L1 aggregation flush period, which reduce the CPU load and help young GC. Support connectTimeout and socketTimeout settings for ElasticSearch6 and ElasticSearch7 storages. Re-implement storage session mechanism, cached metrics are removed only according to their last access timestamp, rather than first time. This makes sure hot data never gets removed unexpectedly. Support session expired threshold configurable. Fix InfluxDB storage-plugin Metrics#multiGet issue. Replace zuul proxy with spring cloud gateway 2.x. in webapp module. Upgrade etcd cluster coordinator and dynamic configuration to v3.x. Configuration: Allow configuring server maximum request header size and ES index template order. Add thread state metric and class loaded info metric to JVMMetric. Performance: compile LAL DSL statically and run with type checked. Add pagination to event query protocol. Performance: optimize Envoy error logs persistence performance. Support envoy cluster manager metrics. Performance: remove the synchronous persistence mechanism from batch ElasticSearch DAO. Because the current enhanced persistent session mechanism, don\u0026rsquo;t require the data queryable immediately after the insert and update anymore. Performance: share flushInterval setting for both metrics and record data, due to synchronous persistence mechanism removed. Record flush interval used to be hardcoded as 10s. Remove syncBulkActions in ElasticSearch storage option. Increase the default bulkActions(env, SW_STORAGE_ES_BULK_ACTIONS) to 5000(from 1000). Increase the flush interval of ElasticSearch indices to 15s(from 10s) Provide distinct for elements of metadata lists. Due to the more aggressive asynchronous flush, metadata lists have more chances including duplicate elements. Don\u0026rsquo;t need this as indicate anymore. Reduce the flush period of hour and day level metrics, only run in 4 times of regular persistent period. This means default flush period of hour and day level metrics are 25s * 4. Performance: optimize IDs read of ElasticSearch storage options(6 and 7). Use the physical index rather than template alias name. Adjust index refresh period as INT(flushInterval * 2/3), it used to be as same as bulk flush period. At the edge case, in low traffic(traffic \u0026lt; bulkActions in the whole period), there is a possible case, 2 period bulks are included in one index refresh rebuild operation, which could cause version conflicts. And this case can\u0026rsquo;t be fixed through core/persistentPeriod as the bulk fresh is not controlled by the persistent timer anymore. The core/maxSyncOperationNum setting(added in 8.5.0) is removed due to metrics persistence is fully asynchronous. The core/syncThreads setting(added in 8.5.0) is removed due to metrics persistence is fully asynchronous. Optimization: Concurrency mode of execution stage for metrics is removed(added in 8.5.0). Only concurrency of prepare stage is meaningful and kept. Fix -meters metrics topic isn\u0026rsquo;t created with namespace issue Enhance persistent session timeout mechanism. Because the enhanced session could cache the metadata metrics forever, new timeout mechanism is designed for avoiding this specific case. Fix Kafka transport topics are created duplicated with and without namespace issue Fix the persistent session timeout mechanism bug. Fix possible version_conflict_engine_exception in bulk execution. Fix PrometheusMetricConverter may throw an IllegalArgumentException when convert metrics to SampleFamily Filtering NaN value samples when build SampleFamily Add Thread and ClassLoader Metrics for the self-observability and otel-oc-rules Simple optimization of trace sql query statement. Avoid \u0026ldquo;select *\u0026rdquo; query method Introduce dynamical logging to update log configuration at runtime Fix Kubernetes ConfigMap configuration center doesn\u0026rsquo;t send delete event Breaking Change: emove qps and add rpm in LAL  UI  Fix the date component for log conditions. Fix selector keys for duplicate options. Add Python celery plugin. Fix default config for metrics. Fix trace table for profile ui. Fix the error of server response time in the topology. Fix chart types for setting metrics configure. Fix logs pages number. Implement a timeline for Events in a new page. Fix style for event details.  Documentation  Add FAQ about Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Add Self Observability service discovery (k8s). Add sending Envoy Metrics to OAP in envoy 1.19 example and bump up to Envoy V3 api.  All issues and pull requests are here\n","excerpt":"8.7.0 Project  Extract dependency management to a bom. Add JDK 16 to test matrix. DataCarrier …","ref":"/docs/main/v9.7.0/en/changes/changes-8.7.0/","title":"8.7.0"},{"body":"8.8.0 Project  Split javaagent into skywalking-java repository. https://github.com/apache/skywalking-java Merge Dockerfiles from apache/skywalking-docker into this codebase.  OAP Server  Fix CVE-2021-35515, CVE-2021-35516, CVE-2021-35517, CVE-2021-36090. Upgrade org.apache.commons:commons-compress to 1.21. kubernetes java client upgrade from 12.0.1 to 13.0.0 Add event http receiver Support Metric level function serviceRelation in MAL. Support envoy metrics binding into the topology. Fix openapi-definitions folder not being read correctly. Trace segment wouldn\u0026rsquo;t be recognized as a TopN sample service. Add through #4694 experimentally, but it caused performance impact. Remove version and endTime in the segment entity. Reduce indexing payload. Fix mapper_parsing_exception in ElasticSearch 7.14. Support component IDs for Go-Kratos framework. [Break Change] Remove endpoint name in the trace query condition. Only support query by endpoint id. Fix ProfileSnapshotExporterTest case on OpenJDK Runtime Environment AdoptOpenJDK-11.0.11+9 (build 11.0.11+9), MacOS. [Break Change] Remove page path in the browser log query condition. Only support query by page path id. [Break Change] Remove endpoint name in the backend log query condition. Only support query by endpoint id. [Break Change] Fix typo for a column page_path_id(was pate_path_id) of storage entity browser_error_log. Add component id for Python falcon plugin. Add rpcStatusCode for rpc.status_code tag. The responseCode field is marked as deprecated and replaced by httpResponseStatusCode field. Remove the duplicated tags to reduce the storage payload. Add a new API to test log analysis language. Harden the security of Groovy-based DSL, MAL and LAL. Fix distinct in Service/Instance/Endpoint query is not working. Support collection type in dynamic configuration core. Support zookeeper grouped dynamic configurations. Fix NPE when OAP nodes synchronize events with each other in cluster mode. Support k8s configmap grouped dynamic configurations. Add desc sort function in H2 and ElasticSearch implementations of IBrowserLogQueryDAO Support configure sampling policy by configuration module dynamically and static configuration file trace-sampling-policy-settings.yml for service dimension on the backend side. Dynamic configurations agent-analyzer.default.sampleRate and agent-analyzer.default.slowTraceSegmentThreshold are replaced by agent-analyzer.default.traceSamplingPolicy. Static configurations agent-analyzer.default.sampleRate and agent-analyzer.default.slowTraceSegmentThreshold are replaced by agent-analyzer.default.traceSamplingPolicySettingsFile. Fix dynamic configuration watch implementation current value not null when the config is deleted. Fix LoggingConfigWatcher return watch.value would not consistent with the real configuration content. Fix ZookeeperConfigWatcherRegister.readConfig() could cause NPE when data.getData() is null. Support nacos grouped dynamic configurations. Support for filter function filtering of int type values. Support mTLS for gRPC channel. Add yaml file suffix limit when reading ui templates. Support consul grouped dynamic configurations. Fix H2MetadataQueryDAO.searchService doesn\u0026rsquo;t support auto grouping. Rebuilt ElasticSearch client on top of their REST API. Fix ElasticSearch storage plugin doesn\u0026rsquo;t work when hot reloading from secretsManagementFile. Support etcd grouped dynamic configurations. Unified the config word namespace in the project. Switch JRE base image for dev images. Support apollo grouped dynamic configurations. Fix ProfileThreadSnapshotQuery.queryProfiledSegments adopts a wrong sort function Support gRPC sync grouped dynamic configurations. Fix H2EventQueryDAO doesn\u0026rsquo;t sort data by Event.START_TIME and uses a wrong pagination query. Fix LogHandler of kafka-fetcher-plugin cannot recognize namespace. Improve the speed of writing TiDB by batching the SQL execution. Fix wrong service name when IP is node IP in k8s-mesh. Support dynamic configurations for openAPI endpoint name grouping rule. Add component definition for Alibaba Druid and HikariCP. Fix Hour and Day dimensionality metrics not accurate, due to the cache read-then-clear mechanism conflicts with low down metrics flush period added in 8.7.0. Fix Slow SQL sampling not accurate, due to TopN works conflict with cache read-then-clear mechanism. The persistent cache is only read when necessary. Add component definition for Alibaba Fastjson. Fix entity(service/instance/endpoint) names in the MAL system(prometheus, native meter, open census, envoy metric service) are not controlled by core\u0026rsquo;s naming-control mechanism. Upgrade netty version to 4.1.68.Final avoid cve-2021-37136.  UI  Fix not found error when refresh UI. Update endpointName to endpointId in the query trace condition. Add Python falcon icon on the UI. Fix searching endpoints with keywords. Support clicking the service name in the chart to link to the trace or log page. Implement the Log Analysis Language text regexp debugger. Fix fetching nodes and calls with serviceIds on the topology side. Implement Alerts for query errors. Fixes graph parameter of query for topology metrics.  Documentation  Add a section in Log Collecting And Analysis doc, introducing the new Python agent log reporter. Add one missing step in otel-receiver doc about how to activate the default receiver. Reorganize dynamic configuration doc. Add more description about meter configurations in backend-meter doc. Fix typo in endpoint-grouping-rules doc.  All issues and pull requests are here\n","excerpt":"8.8.0 Project  Split javaagent into skywalking-java repository. …","ref":"/docs/main/latest/en/changes/changes-8.8.0/","title":"8.8.0"},{"body":"8.8.0 Project  Split javaagent into skywalking-java repository. https://github.com/apache/skywalking-java Merge Dockerfiles from apache/skywalking-docker into this codebase.  OAP Server  Fix CVE-2021-35515, CVE-2021-35516, CVE-2021-35517, CVE-2021-36090. Upgrade org.apache.commons:commons-compress to 1.21. kubernetes java client upgrade from 12.0.1 to 13.0.0 Add event http receiver Support Metric level function serviceRelation in MAL. Support envoy metrics binding into the topology. Fix openapi-definitions folder not being read correctly. Trace segment wouldn\u0026rsquo;t be recognized as a TopN sample service. Add through #4694 experimentally, but it caused performance impact. Remove version and endTime in the segment entity. Reduce indexing payload. Fix mapper_parsing_exception in ElasticSearch 7.14. Support component IDs for Go-Kratos framework. [Break Change] Remove endpoint name in the trace query condition. Only support query by endpoint id. Fix ProfileSnapshotExporterTest case on OpenJDK Runtime Environment AdoptOpenJDK-11.0.11+9 (build 11.0.11+9), MacOS. [Break Change] Remove page path in the browser log query condition. Only support query by page path id. [Break Change] Remove endpoint name in the backend log query condition. Only support query by endpoint id. [Break Change] Fix typo for a column page_path_id(was pate_path_id) of storage entity browser_error_log. Add component id for Python falcon plugin. Add rpcStatusCode for rpc.status_code tag. The responseCode field is marked as deprecated and replaced by httpResponseStatusCode field. Remove the duplicated tags to reduce the storage payload. Add a new API to test log analysis language. Harden the security of Groovy-based DSL, MAL and LAL. Fix distinct in Service/Instance/Endpoint query is not working. Support collection type in dynamic configuration core. Support zookeeper grouped dynamic configurations. Fix NPE when OAP nodes synchronize events with each other in cluster mode. Support k8s configmap grouped dynamic configurations. Add desc sort function in H2 and ElasticSearch implementations of IBrowserLogQueryDAO Support configure sampling policy by configuration module dynamically and static configuration file trace-sampling-policy-settings.yml for service dimension on the backend side. Dynamic configurations agent-analyzer.default.sampleRate and agent-analyzer.default.slowTraceSegmentThreshold are replaced by agent-analyzer.default.traceSamplingPolicy. Static configurations agent-analyzer.default.sampleRate and agent-analyzer.default.slowTraceSegmentThreshold are replaced by agent-analyzer.default.traceSamplingPolicySettingsFile. Fix dynamic configuration watch implementation current value not null when the config is deleted. Fix LoggingConfigWatcher return watch.value would not consistent with the real configuration content. Fix ZookeeperConfigWatcherRegister.readConfig() could cause NPE when data.getData() is null. Support nacos grouped dynamic configurations. Support for filter function filtering of int type values. Support mTLS for gRPC channel. Add yaml file suffix limit when reading ui templates. Support consul grouped dynamic configurations. Fix H2MetadataQueryDAO.searchService doesn\u0026rsquo;t support auto grouping. Rebuilt ElasticSearch client on top of their REST API. Fix ElasticSearch storage plugin doesn\u0026rsquo;t work when hot reloading from secretsManagementFile. Support etcd grouped dynamic configurations. Unified the config word namespace in the project. Switch JRE base image for dev images. Support apollo grouped dynamic configurations. Fix ProfileThreadSnapshotQuery.queryProfiledSegments adopts a wrong sort function Support gRPC sync grouped dynamic configurations. Fix H2EventQueryDAO doesn\u0026rsquo;t sort data by Event.START_TIME and uses a wrong pagination query. Fix LogHandler of kafka-fetcher-plugin cannot recognize namespace. Improve the speed of writing TiDB by batching the SQL execution. Fix wrong service name when IP is node IP in k8s-mesh. Support dynamic configurations for openAPI endpoint name grouping rule. Add component definition for Alibaba Druid and HikariCP. Fix Hour and Day dimensionality metrics not accurate, due to the cache read-then-clear mechanism conflicts with low down metrics flush period added in 8.7.0. Fix Slow SQL sampling not accurate, due to TopN works conflict with cache read-then-clear mechanism. The persistent cache is only read when necessary. Add component definition for Alibaba Fastjson. Fix entity(service/instance/endpoint) names in the MAL system(prometheus, native meter, open census, envoy metric service) are not controlled by core\u0026rsquo;s naming-control mechanism. Upgrade netty version to 4.1.68.Final avoid cve-2021-37136.  UI  Fix not found error when refresh UI. Update endpointName to endpointId in the query trace condition. Add Python falcon icon on the UI. Fix searching endpoints with keywords. Support clicking the service name in the chart to link to the trace or log page. Implement the Log Analysis Language text regexp debugger. Fix fetching nodes and calls with serviceIds on the topology side. Implement Alerts for query errors. Fixes graph parameter of query for topology metrics.  Documentation  Add a section in Log Collecting And Analysis doc, introducing the new Python agent log reporter. Add one missing step in otel-receiver doc about how to activate the default receiver. Reorganize dynamic configuration doc. Add more description about meter configurations in backend-meter doc. Fix typo in endpoint-grouping-rules doc.  All issues and pull requests are here\n","excerpt":"8.8.0 Project  Split javaagent into skywalking-java repository. …","ref":"/docs/main/next/en/changes/changes-8.8.0/","title":"8.8.0"},{"body":"8.8.0 Project  Split javaagent into skywalking-java repository. https://github.com/apache/skywalking-java Merge Dockerfiles from apache/skywalking-docker into this codebase.  OAP Server  Fix CVE-2021-35515, CVE-2021-35516, CVE-2021-35517, CVE-2021-36090. Upgrade org.apache.commons:commons-compress to 1.21. kubernetes java client upgrade from 12.0.1 to 13.0.0 Add event http receiver Support Metric level function serviceRelation in MAL. Support envoy metrics binding into the topology. Fix openapi-definitions folder not being read correctly. Trace segment wouldn\u0026rsquo;t be recognized as a TopN sample service. Add through #4694 experimentally, but it caused performance impact. Remove version and endTime in the segment entity. Reduce indexing payload. Fix mapper_parsing_exception in ElasticSearch 7.14. Support component IDs for Go-Kratos framework. [Break Change] Remove endpoint name in the trace query condition. Only support query by endpoint id. Fix ProfileSnapshotExporterTest case on OpenJDK Runtime Environment AdoptOpenJDK-11.0.11+9 (build 11.0.11+9), MacOS. [Break Change] Remove page path in the browser log query condition. Only support query by page path id. [Break Change] Remove endpoint name in the backend log query condition. Only support query by endpoint id. [Break Change] Fix typo for a column page_path_id(was pate_path_id) of storage entity browser_error_log. Add component id for Python falcon plugin. Add rpcStatusCode for rpc.status_code tag. The responseCode field is marked as deprecated and replaced by httpResponseStatusCode field. Remove the duplicated tags to reduce the storage payload. Add a new API to test log analysis language. Harden the security of Groovy-based DSL, MAL and LAL. Fix distinct in Service/Instance/Endpoint query is not working. Support collection type in dynamic configuration core. Support zookeeper grouped dynamic configurations. Fix NPE when OAP nodes synchronize events with each other in cluster mode. Support k8s configmap grouped dynamic configurations. Add desc sort function in H2 and ElasticSearch implementations of IBrowserLogQueryDAO Support configure sampling policy by configuration module dynamically and static configuration file trace-sampling-policy-settings.yml for service dimension on the backend side. Dynamic configurations agent-analyzer.default.sampleRate and agent-analyzer.default.slowTraceSegmentThreshold are replaced by agent-analyzer.default.traceSamplingPolicy. Static configurations agent-analyzer.default.sampleRate and agent-analyzer.default.slowTraceSegmentThreshold are replaced by agent-analyzer.default.traceSamplingPolicySettingsFile. Fix dynamic configuration watch implementation current value not null when the config is deleted. Fix LoggingConfigWatcher return watch.value would not consistent with the real configuration content. Fix ZookeeperConfigWatcherRegister.readConfig() could cause NPE when data.getData() is null. Support nacos grouped dynamic configurations. Support for filter function filtering of int type values. Support mTLS for gRPC channel. Add yaml file suffix limit when reading ui templates. Support consul grouped dynamic configurations. Fix H2MetadataQueryDAO.searchService doesn\u0026rsquo;t support auto grouping. Rebuilt ElasticSearch client on top of their REST API. Fix ElasticSearch storage plugin doesn\u0026rsquo;t work when hot reloading from secretsManagementFile. Support etcd grouped dynamic configurations. Unified the config word namespace in the project. Switch JRE base image for dev images. Support apollo grouped dynamic configurations. Fix ProfileThreadSnapshotQuery.queryProfiledSegments adopts a wrong sort function Support gRPC sync grouped dynamic configurations. Fix H2EventQueryDAO doesn\u0026rsquo;t sort data by Event.START_TIME and uses a wrong pagination query. Fix LogHandler of kafka-fetcher-plugin cannot recognize namespace. Improve the speed of writing TiDB by batching the SQL execution. Fix wrong service name when IP is node IP in k8s-mesh. Support dynamic configurations for openAPI endpoint name grouping rule. Add component definition for Alibaba Druid and HikariCP. Fix Hour and Day dimensionality metrics not accurate, due to the cache read-then-clear mechanism conflicts with low down metrics flush period added in 8.7.0. Fix Slow SQL sampling not accurate, due to TopN works conflict with cache read-then-clear mechanism. The persistent cache is only read when necessary. Add component definition for Alibaba Fastjson. Fix entity(service/instance/endpoint) names in the MAL system(prometheus, native meter, open census, envoy metric service) are not controlled by core\u0026rsquo;s naming-control mechanism. Upgrade netty version to 4.1.68.Final avoid cve-2021-37136.  UI  Fix not found error when refresh UI. Update endpointName to endpointId in the query trace condition. Add Python falcon icon on the UI. Fix searching endpoints with keywords. Support clicking the service name in the chart to link to the trace or log page. Implement the Log Analysis Language text regexp debugger. Fix fetching nodes and calls with serviceIds on the topology side. Implement Alerts for query errors. Fixes graph parameter of query for topology metrics.  Documentation  Add a section in Log Collecting And Analysis doc, introducing the new Python agent log reporter. Add one missing step in otel-receiver doc about how to activate the default receiver. Reorganize dynamic configuration doc. Add more description about meter configurations in backend-meter doc. Fix typo in endpoint-grouping-rules doc.  All issues and pull requests are here\n","excerpt":"8.8.0 Project  Split javaagent into skywalking-java repository. …","ref":"/docs/main/v9.1.0/en/changes/changes-8.8.0/","title":"8.8.0"},{"body":"8.8.0 Project  Split javaagent into skywalking-java repository. https://github.com/apache/skywalking-java Merge Dockerfiles from apache/skywalking-docker into this codebase.  OAP Server  Fix CVE-2021-35515, CVE-2021-35516, CVE-2021-35517, CVE-2021-36090. Upgrade org.apache.commons:commons-compress to 1.21. kubernetes java client upgrade from 12.0.1 to 13.0.0 Add event http receiver Support Metric level function serviceRelation in MAL. Support envoy metrics binding into the topology. Fix openapi-definitions folder not being read correctly. Trace segment wouldn\u0026rsquo;t be recognized as a TopN sample service. Add through #4694 experimentally, but it caused performance impact. Remove version and endTime in the segment entity. Reduce indexing payload. Fix mapper_parsing_exception in ElasticSearch 7.14. Support component IDs for Go-Kratos framework. [Break Change] Remove endpoint name in the trace query condition. Only support query by endpoint id. Fix ProfileSnapshotExporterTest case on OpenJDK Runtime Environment AdoptOpenJDK-11.0.11+9 (build 11.0.11+9), MacOS. [Break Change] Remove page path in the browser log query condition. Only support query by page path id. [Break Change] Remove endpoint name in the backend log query condition. Only support query by endpoint id. [Break Change] Fix typo for a column page_path_id(was pate_path_id) of storage entity browser_error_log. Add component id for Python falcon plugin. Add rpcStatusCode for rpc.status_code tag. The responseCode field is marked as deprecated and replaced by httpResponseStatusCode field. Remove the duplicated tags to reduce the storage payload. Add a new API to test log analysis language. Harden the security of Groovy-based DSL, MAL and LAL. Fix distinct in Service/Instance/Endpoint query is not working. Support collection type in dynamic configuration core. Support zookeeper grouped dynamic configurations. Fix NPE when OAP nodes synchronize events with each other in cluster mode. Support k8s configmap grouped dynamic configurations. Add desc sort function in H2 and ElasticSearch implementations of IBrowserLogQueryDAO Support configure sampling policy by configuration module dynamically and static configuration file trace-sampling-policy-settings.yml for service dimension on the backend side. Dynamic configurations agent-analyzer.default.sampleRate and agent-analyzer.default.slowTraceSegmentThreshold are replaced by agent-analyzer.default.traceSamplingPolicy. Static configurations agent-analyzer.default.sampleRate and agent-analyzer.default.slowTraceSegmentThreshold are replaced by agent-analyzer.default.traceSamplingPolicySettingsFile. Fix dynamic configuration watch implementation current value not null when the config is deleted. Fix LoggingConfigWatcher return watch.value would not consistent with the real configuration content. Fix ZookeeperConfigWatcherRegister.readConfig() could cause NPE when data.getData() is null. Support nacos grouped dynamic configurations. Support for filter function filtering of int type values. Support mTLS for gRPC channel. Add yaml file suffix limit when reading ui templates. Support consul grouped dynamic configurations. Fix H2MetadataQueryDAO.searchService doesn\u0026rsquo;t support auto grouping. Rebuilt ElasticSearch client on top of their REST API. Fix ElasticSearch storage plugin doesn\u0026rsquo;t work when hot reloading from secretsManagementFile. Support etcd grouped dynamic configurations. Unified the config word namespace in the project. Switch JRE base image for dev images. Support apollo grouped dynamic configurations. Fix ProfileThreadSnapshotQuery.queryProfiledSegments adopts a wrong sort function Support gRPC sync grouped dynamic configurations. Fix H2EventQueryDAO doesn\u0026rsquo;t sort data by Event.START_TIME and uses a wrong pagination query. Fix LogHandler of kafka-fetcher-plugin cannot recognize namespace. Improve the speed of writing TiDB by batching the SQL execution. Fix wrong service name when IP is node IP in k8s-mesh. Support dynamic configurations for openAPI endpoint name grouping rule. Add component definition for Alibaba Druid and HikariCP. Fix Hour and Day dimensionality metrics not accurate, due to the cache read-then-clear mechanism conflicts with low down metrics flush period added in 8.7.0. Fix Slow SQL sampling not accurate, due to TopN works conflict with cache read-then-clear mechanism. The persistent cache is only read when necessary. Add component definition for Alibaba Fastjson. Fix entity(service/instance/endpoint) names in the MAL system(prometheus, native meter, open census, envoy metric service) are not controlled by core\u0026rsquo;s naming-control mechanism. Upgrade netty version to 4.1.68.Final avoid cve-2021-37136.  UI  Fix not found error when refresh UI. Update endpointName to endpointId in the query trace condition. Add Python falcon icon on the UI. Fix searching endpoints with keywords. Support clicking the service name in the chart to link to the trace or log page. Implement the Log Analysis Language text regexp debugger. Fix fetching nodes and calls with serviceIds on the topology side. Implement Alerts for query errors. Fixes graph parameter of query for topology metrics.  Documentation  Add a section in Log Collecting And Analysis doc, introducing the new Python agent log reporter. Add one missing step in otel-receiver doc about how to activate the default receiver. Reorganize dynamic configuration doc. Add more description about meter configurations in backend-meter doc. Fix typo in endpoint-grouping-rules doc.  All issues and pull requests are here\n","excerpt":"8.8.0 Project  Split javaagent into skywalking-java repository. …","ref":"/docs/main/v9.2.0/en/changes/changes-8.8.0/","title":"8.8.0"},{"body":"8.8.0 Project  Split javaagent into skywalking-java repository. https://github.com/apache/skywalking-java Merge Dockerfiles from apache/skywalking-docker into this codebase.  OAP Server  Fix CVE-2021-35515, CVE-2021-35516, CVE-2021-35517, CVE-2021-36090. Upgrade org.apache.commons:commons-compress to 1.21. kubernetes java client upgrade from 12.0.1 to 13.0.0 Add event http receiver Support Metric level function serviceRelation in MAL. Support envoy metrics binding into the topology. Fix openapi-definitions folder not being read correctly. Trace segment wouldn\u0026rsquo;t be recognized as a TopN sample service. Add through #4694 experimentally, but it caused performance impact. Remove version and endTime in the segment entity. Reduce indexing payload. Fix mapper_parsing_exception in ElasticSearch 7.14. Support component IDs for Go-Kratos framework. [Break Change] Remove endpoint name in the trace query condition. Only support query by endpoint id. Fix ProfileSnapshotExporterTest case on OpenJDK Runtime Environment AdoptOpenJDK-11.0.11+9 (build 11.0.11+9), MacOS. [Break Change] Remove page path in the browser log query condition. Only support query by page path id. [Break Change] Remove endpoint name in the backend log query condition. Only support query by endpoint id. [Break Change] Fix typo for a column page_path_id(was pate_path_id) of storage entity browser_error_log. Add component id for Python falcon plugin. Add rpcStatusCode for rpc.status_code tag. The responseCode field is marked as deprecated and replaced by httpResponseStatusCode field. Remove the duplicated tags to reduce the storage payload. Add a new API to test log analysis language. Harden the security of Groovy-based DSL, MAL and LAL. Fix distinct in Service/Instance/Endpoint query is not working. Support collection type in dynamic configuration core. Support zookeeper grouped dynamic configurations. Fix NPE when OAP nodes synchronize events with each other in cluster mode. Support k8s configmap grouped dynamic configurations. Add desc sort function in H2 and ElasticSearch implementations of IBrowserLogQueryDAO Support configure sampling policy by configuration module dynamically and static configuration file trace-sampling-policy-settings.yml for service dimension on the backend side. Dynamic configurations agent-analyzer.default.sampleRate and agent-analyzer.default.slowTraceSegmentThreshold are replaced by agent-analyzer.default.traceSamplingPolicy. Static configurations agent-analyzer.default.sampleRate and agent-analyzer.default.slowTraceSegmentThreshold are replaced by agent-analyzer.default.traceSamplingPolicySettingsFile. Fix dynamic configuration watch implementation current value not null when the config is deleted. Fix LoggingConfigWatcher return watch.value would not consistent with the real configuration content. Fix ZookeeperConfigWatcherRegister.readConfig() could cause NPE when data.getData() is null. Support nacos grouped dynamic configurations. Support for filter function filtering of int type values. Support mTLS for gRPC channel. Add yaml file suffix limit when reading ui templates. Support consul grouped dynamic configurations. Fix H2MetadataQueryDAO.searchService doesn\u0026rsquo;t support auto grouping. Rebuilt ElasticSearch client on top of their REST API. Fix ElasticSearch storage plugin doesn\u0026rsquo;t work when hot reloading from secretsManagementFile. Support etcd grouped dynamic configurations. Unified the config word namespace in the project. Switch JRE base image for dev images. Support apollo grouped dynamic configurations. Fix ProfileThreadSnapshotQuery.queryProfiledSegments adopts a wrong sort function Support gRPC sync grouped dynamic configurations. Fix H2EventQueryDAO doesn\u0026rsquo;t sort data by Event.START_TIME and uses a wrong pagination query. Fix LogHandler of kafka-fetcher-plugin cannot recognize namespace. Improve the speed of writing TiDB by batching the SQL execution. Fix wrong service name when IP is node IP in k8s-mesh. Support dynamic configurations for openAPI endpoint name grouping rule. Add component definition for Alibaba Druid and HikariCP. Fix Hour and Day dimensionality metrics not accurate, due to the cache read-then-clear mechanism conflicts with low down metrics flush period added in 8.7.0. Fix Slow SQL sampling not accurate, due to TopN works conflict with cache read-then-clear mechanism. The persistent cache is only read when necessary. Add component definition for Alibaba Fastjson. Fix entity(service/instance/endpoint) names in the MAL system(prometheus, native meter, open census, envoy metric service) are not controlled by core\u0026rsquo;s naming-control mechanism. Upgrade netty version to 4.1.68.Final avoid cve-2021-37136.  UI  Fix not found error when refresh UI. Update endpointName to endpointId in the query trace condition. Add Python falcon icon on the UI. Fix searching endpoints with keywords. Support clicking the service name in the chart to link to the trace or log page. Implement the Log Analysis Language text regexp debugger. Fix fetching nodes and calls with serviceIds on the topology side. Implement Alerts for query errors. Fixes graph parameter of query for topology metrics.  Documentation  Add a section in Log Collecting And Analysis doc, introducing the new Python agent log reporter. Add one missing step in otel-receiver doc about how to activate the default receiver. Reorganize dynamic configuration doc. Add more description about meter configurations in backend-meter doc. Fix typo in endpoint-grouping-rules doc.  All issues and pull requests are here\n","excerpt":"8.8.0 Project  Split javaagent into skywalking-java repository. …","ref":"/docs/main/v9.3.0/en/changes/changes-8.8.0/","title":"8.8.0"},{"body":"8.8.0 Project  Split javaagent into skywalking-java repository. https://github.com/apache/skywalking-java Merge Dockerfiles from apache/skywalking-docker into this codebase.  OAP Server  Fix CVE-2021-35515, CVE-2021-35516, CVE-2021-35517, CVE-2021-36090. Upgrade org.apache.commons:commons-compress to 1.21. kubernetes java client upgrade from 12.0.1 to 13.0.0 Add event http receiver Support Metric level function serviceRelation in MAL. Support envoy metrics binding into the topology. Fix openapi-definitions folder not being read correctly. Trace segment wouldn\u0026rsquo;t be recognized as a TopN sample service. Add through #4694 experimentally, but it caused performance impact. Remove version and endTime in the segment entity. Reduce indexing payload. Fix mapper_parsing_exception in ElasticSearch 7.14. Support component IDs for Go-Kratos framework. [Break Change] Remove endpoint name in the trace query condition. Only support query by endpoint id. Fix ProfileSnapshotExporterTest case on OpenJDK Runtime Environment AdoptOpenJDK-11.0.11+9 (build 11.0.11+9), MacOS. [Break Change] Remove page path in the browser log query condition. Only support query by page path id. [Break Change] Remove endpoint name in the backend log query condition. Only support query by endpoint id. [Break Change] Fix typo for a column page_path_id(was pate_path_id) of storage entity browser_error_log. Add component id for Python falcon plugin. Add rpcStatusCode for rpc.status_code tag. The responseCode field is marked as deprecated and replaced by httpResponseStatusCode field. Remove the duplicated tags to reduce the storage payload. Add a new API to test log analysis language. Harden the security of Groovy-based DSL, MAL and LAL. Fix distinct in Service/Instance/Endpoint query is not working. Support collection type in dynamic configuration core. Support zookeeper grouped dynamic configurations. Fix NPE when OAP nodes synchronize events with each other in cluster mode. Support k8s configmap grouped dynamic configurations. Add desc sort function in H2 and ElasticSearch implementations of IBrowserLogQueryDAO Support configure sampling policy by configuration module dynamically and static configuration file trace-sampling-policy-settings.yml for service dimension on the backend side. Dynamic configurations agent-analyzer.default.sampleRate and agent-analyzer.default.slowTraceSegmentThreshold are replaced by agent-analyzer.default.traceSamplingPolicy. Static configurations agent-analyzer.default.sampleRate and agent-analyzer.default.slowTraceSegmentThreshold are replaced by agent-analyzer.default.traceSamplingPolicySettingsFile. Fix dynamic configuration watch implementation current value not null when the config is deleted. Fix LoggingConfigWatcher return watch.value would not consistent with the real configuration content. Fix ZookeeperConfigWatcherRegister.readConfig() could cause NPE when data.getData() is null. Support nacos grouped dynamic configurations. Support for filter function filtering of int type values. Support mTLS for gRPC channel. Add yaml file suffix limit when reading ui templates. Support consul grouped dynamic configurations. Fix H2MetadataQueryDAO.searchService doesn\u0026rsquo;t support auto grouping. Rebuilt ElasticSearch client on top of their REST API. Fix ElasticSearch storage plugin doesn\u0026rsquo;t work when hot reloading from secretsManagementFile. Support etcd grouped dynamic configurations. Unified the config word namespace in the project. Switch JRE base image for dev images. Support apollo grouped dynamic configurations. Fix ProfileThreadSnapshotQuery.queryProfiledSegments adopts a wrong sort function Support gRPC sync grouped dynamic configurations. Fix H2EventQueryDAO doesn\u0026rsquo;t sort data by Event.START_TIME and uses a wrong pagination query. Fix LogHandler of kafka-fetcher-plugin cannot recognize namespace. Improve the speed of writing TiDB by batching the SQL execution. Fix wrong service name when IP is node IP in k8s-mesh. Support dynamic configurations for openAPI endpoint name grouping rule. Add component definition for Alibaba Druid and HikariCP. Fix Hour and Day dimensionality metrics not accurate, due to the cache read-then-clear mechanism conflicts with low down metrics flush period added in 8.7.0. Fix Slow SQL sampling not accurate, due to TopN works conflict with cache read-then-clear mechanism. The persistent cache is only read when necessary. Add component definition for Alibaba Fastjson. Fix entity(service/instance/endpoint) names in the MAL system(prometheus, native meter, open census, envoy metric service) are not controlled by core\u0026rsquo;s naming-control mechanism. Upgrade netty version to 4.1.68.Final avoid cve-2021-37136.  UI  Fix not found error when refresh UI. Update endpointName to endpointId in the query trace condition. Add Python falcon icon on the UI. Fix searching endpoints with keywords. Support clicking the service name in the chart to link to the trace or log page. Implement the Log Analysis Language text regexp debugger. Fix fetching nodes and calls with serviceIds on the topology side. Implement Alerts for query errors. Fixes graph parameter of query for topology metrics.  Documentation  Add a section in Log Collecting And Analysis doc, introducing the new Python agent log reporter. Add one missing step in otel-receiver doc about how to activate the default receiver. Reorganize dynamic configuration doc. Add more description about meter configurations in backend-meter doc. Fix typo in endpoint-grouping-rules doc.  All issues and pull requests are here\n","excerpt":"8.8.0 Project  Split javaagent into skywalking-java repository. …","ref":"/docs/main/v9.4.0/en/changes/changes-8.8.0/","title":"8.8.0"},{"body":"8.8.0 Project  Split javaagent into skywalking-java repository. https://github.com/apache/skywalking-java Merge Dockerfiles from apache/skywalking-docker into this codebase.  OAP Server  Fix CVE-2021-35515, CVE-2021-35516, CVE-2021-35517, CVE-2021-36090. Upgrade org.apache.commons:commons-compress to 1.21. kubernetes java client upgrade from 12.0.1 to 13.0.0 Add event http receiver Support Metric level function serviceRelation in MAL. Support envoy metrics binding into the topology. Fix openapi-definitions folder not being read correctly. Trace segment wouldn\u0026rsquo;t be recognized as a TopN sample service. Add through #4694 experimentally, but it caused performance impact. Remove version and endTime in the segment entity. Reduce indexing payload. Fix mapper_parsing_exception in ElasticSearch 7.14. Support component IDs for Go-Kratos framework. [Break Change] Remove endpoint name in the trace query condition. Only support query by endpoint id. Fix ProfileSnapshotExporterTest case on OpenJDK Runtime Environment AdoptOpenJDK-11.0.11+9 (build 11.0.11+9), MacOS. [Break Change] Remove page path in the browser log query condition. Only support query by page path id. [Break Change] Remove endpoint name in the backend log query condition. Only support query by endpoint id. [Break Change] Fix typo for a column page_path_id(was pate_path_id) of storage entity browser_error_log. Add component id for Python falcon plugin. Add rpcStatusCode for rpc.status_code tag. The responseCode field is marked as deprecated and replaced by httpResponseStatusCode field. Remove the duplicated tags to reduce the storage payload. Add a new API to test log analysis language. Harden the security of Groovy-based DSL, MAL and LAL. Fix distinct in Service/Instance/Endpoint query is not working. Support collection type in dynamic configuration core. Support zookeeper grouped dynamic configurations. Fix NPE when OAP nodes synchronize events with each other in cluster mode. Support k8s configmap grouped dynamic configurations. Add desc sort function in H2 and ElasticSearch implementations of IBrowserLogQueryDAO Support configure sampling policy by configuration module dynamically and static configuration file trace-sampling-policy-settings.yml for service dimension on the backend side. Dynamic configurations agent-analyzer.default.sampleRate and agent-analyzer.default.slowTraceSegmentThreshold are replaced by agent-analyzer.default.traceSamplingPolicy. Static configurations agent-analyzer.default.sampleRate and agent-analyzer.default.slowTraceSegmentThreshold are replaced by agent-analyzer.default.traceSamplingPolicySettingsFile. Fix dynamic configuration watch implementation current value not null when the config is deleted. Fix LoggingConfigWatcher return watch.value would not consistent with the real configuration content. Fix ZookeeperConfigWatcherRegister.readConfig() could cause NPE when data.getData() is null. Support nacos grouped dynamic configurations. Support for filter function filtering of int type values. Support mTLS for gRPC channel. Add yaml file suffix limit when reading ui templates. Support consul grouped dynamic configurations. Fix H2MetadataQueryDAO.searchService doesn\u0026rsquo;t support auto grouping. Rebuilt ElasticSearch client on top of their REST API. Fix ElasticSearch storage plugin doesn\u0026rsquo;t work when hot reloading from secretsManagementFile. Support etcd grouped dynamic configurations. Unified the config word namespace in the project. Switch JRE base image for dev images. Support apollo grouped dynamic configurations. Fix ProfileThreadSnapshotQuery.queryProfiledSegments adopts a wrong sort function Support gRPC sync grouped dynamic configurations. Fix H2EventQueryDAO doesn\u0026rsquo;t sort data by Event.START_TIME and uses a wrong pagination query. Fix LogHandler of kafka-fetcher-plugin cannot recognize namespace. Improve the speed of writing TiDB by batching the SQL execution. Fix wrong service name when IP is node IP in k8s-mesh. Support dynamic configurations for openAPI endpoint name grouping rule. Add component definition for Alibaba Druid and HikariCP. Fix Hour and Day dimensionality metrics not accurate, due to the cache read-then-clear mechanism conflicts with low down metrics flush period added in 8.7.0. Fix Slow SQL sampling not accurate, due to TopN works conflict with cache read-then-clear mechanism. The persistent cache is only read when necessary. Add component definition for Alibaba Fastjson. Fix entity(service/instance/endpoint) names in the MAL system(prometheus, native meter, open census, envoy metric service) are not controlled by core\u0026rsquo;s naming-control mechanism. Upgrade netty version to 4.1.68.Final avoid cve-2021-37136.  UI  Fix not found error when refresh UI. Update endpointName to endpointId in the query trace condition. Add Python falcon icon on the UI. Fix searching endpoints with keywords. Support clicking the service name in the chart to link to the trace or log page. Implement the Log Analysis Language text regexp debugger. Fix fetching nodes and calls with serviceIds on the topology side. Implement Alerts for query errors. Fixes graph parameter of query for topology metrics.  Documentation  Add a section in Log Collecting And Analysis doc, introducing the new Python agent log reporter. Add one missing step in otel-receiver doc about how to activate the default receiver. Reorganize dynamic configuration doc. Add more description about meter configurations in backend-meter doc. Fix typo in endpoint-grouping-rules doc.  All issues and pull requests are here\n","excerpt":"8.8.0 Project  Split javaagent into skywalking-java repository. …","ref":"/docs/main/v9.5.0/en/changes/changes-8.8.0/","title":"8.8.0"},{"body":"8.8.0 Project  Split javaagent into skywalking-java repository. https://github.com/apache/skywalking-java Merge Dockerfiles from apache/skywalking-docker into this codebase.  OAP Server  Fix CVE-2021-35515, CVE-2021-35516, CVE-2021-35517, CVE-2021-36090. Upgrade org.apache.commons:commons-compress to 1.21. kubernetes java client upgrade from 12.0.1 to 13.0.0 Add event http receiver Support Metric level function serviceRelation in MAL. Support envoy metrics binding into the topology. Fix openapi-definitions folder not being read correctly. Trace segment wouldn\u0026rsquo;t be recognized as a TopN sample service. Add through #4694 experimentally, but it caused performance impact. Remove version and endTime in the segment entity. Reduce indexing payload. Fix mapper_parsing_exception in ElasticSearch 7.14. Support component IDs for Go-Kratos framework. [Break Change] Remove endpoint name in the trace query condition. Only support query by endpoint id. Fix ProfileSnapshotExporterTest case on OpenJDK Runtime Environment AdoptOpenJDK-11.0.11+9 (build 11.0.11+9), MacOS. [Break Change] Remove page path in the browser log query condition. Only support query by page path id. [Break Change] Remove endpoint name in the backend log query condition. Only support query by endpoint id. [Break Change] Fix typo for a column page_path_id(was pate_path_id) of storage entity browser_error_log. Add component id for Python falcon plugin. Add rpcStatusCode for rpc.status_code tag. The responseCode field is marked as deprecated and replaced by httpResponseStatusCode field. Remove the duplicated tags to reduce the storage payload. Add a new API to test log analysis language. Harden the security of Groovy-based DSL, MAL and LAL. Fix distinct in Service/Instance/Endpoint query is not working. Support collection type in dynamic configuration core. Support zookeeper grouped dynamic configurations. Fix NPE when OAP nodes synchronize events with each other in cluster mode. Support k8s configmap grouped dynamic configurations. Add desc sort function in H2 and ElasticSearch implementations of IBrowserLogQueryDAO Support configure sampling policy by configuration module dynamically and static configuration file trace-sampling-policy-settings.yml for service dimension on the backend side. Dynamic configurations agent-analyzer.default.sampleRate and agent-analyzer.default.slowTraceSegmentThreshold are replaced by agent-analyzer.default.traceSamplingPolicy. Static configurations agent-analyzer.default.sampleRate and agent-analyzer.default.slowTraceSegmentThreshold are replaced by agent-analyzer.default.traceSamplingPolicySettingsFile. Fix dynamic configuration watch implementation current value not null when the config is deleted. Fix LoggingConfigWatcher return watch.value would not consistent with the real configuration content. Fix ZookeeperConfigWatcherRegister.readConfig() could cause NPE when data.getData() is null. Support nacos grouped dynamic configurations. Support for filter function filtering of int type values. Support mTLS for gRPC channel. Add yaml file suffix limit when reading ui templates. Support consul grouped dynamic configurations. Fix H2MetadataQueryDAO.searchService doesn\u0026rsquo;t support auto grouping. Rebuilt ElasticSearch client on top of their REST API. Fix ElasticSearch storage plugin doesn\u0026rsquo;t work when hot reloading from secretsManagementFile. Support etcd grouped dynamic configurations. Unified the config word namespace in the project. Switch JRE base image for dev images. Support apollo grouped dynamic configurations. Fix ProfileThreadSnapshotQuery.queryProfiledSegments adopts a wrong sort function Support gRPC sync grouped dynamic configurations. Fix H2EventQueryDAO doesn\u0026rsquo;t sort data by Event.START_TIME and uses a wrong pagination query. Fix LogHandler of kafka-fetcher-plugin cannot recognize namespace. Improve the speed of writing TiDB by batching the SQL execution. Fix wrong service name when IP is node IP in k8s-mesh. Support dynamic configurations for openAPI endpoint name grouping rule. Add component definition for Alibaba Druid and HikariCP. Fix Hour and Day dimensionality metrics not accurate, due to the cache read-then-clear mechanism conflicts with low down metrics flush period added in 8.7.0. Fix Slow SQL sampling not accurate, due to TopN works conflict with cache read-then-clear mechanism. The persistent cache is only read when necessary. Add component definition for Alibaba Fastjson. Fix entity(service/instance/endpoint) names in the MAL system(prometheus, native meter, open census, envoy metric service) are not controlled by core\u0026rsquo;s naming-control mechanism. Upgrade netty version to 4.1.68.Final avoid cve-2021-37136.  UI  Fix not found error when refresh UI. Update endpointName to endpointId in the query trace condition. Add Python falcon icon on the UI. Fix searching endpoints with keywords. Support clicking the service name in the chart to link to the trace or log page. Implement the Log Analysis Language text regexp debugger. Fix fetching nodes and calls with serviceIds on the topology side. Implement Alerts for query errors. Fixes graph parameter of query for topology metrics.  Documentation  Add a section in Log Collecting And Analysis doc, introducing the new Python agent log reporter. Add one missing step in otel-receiver doc about how to activate the default receiver. Reorganize dynamic configuration doc. Add more description about meter configurations in backend-meter doc. Fix typo in endpoint-grouping-rules doc.  All issues and pull requests are here\n","excerpt":"8.8.0 Project  Split javaagent into skywalking-java repository. …","ref":"/docs/main/v9.6.0/en/changes/changes-8.8.0/","title":"8.8.0"},{"body":"8.8.0 Project  Split javaagent into skywalking-java repository. https://github.com/apache/skywalking-java Merge Dockerfiles from apache/skywalking-docker into this codebase.  OAP Server  Fix CVE-2021-35515, CVE-2021-35516, CVE-2021-35517, CVE-2021-36090. Upgrade org.apache.commons:commons-compress to 1.21. kubernetes java client upgrade from 12.0.1 to 13.0.0 Add event http receiver Support Metric level function serviceRelation in MAL. Support envoy metrics binding into the topology. Fix openapi-definitions folder not being read correctly. Trace segment wouldn\u0026rsquo;t be recognized as a TopN sample service. Add through #4694 experimentally, but it caused performance impact. Remove version and endTime in the segment entity. Reduce indexing payload. Fix mapper_parsing_exception in ElasticSearch 7.14. Support component IDs for Go-Kratos framework. [Break Change] Remove endpoint name in the trace query condition. Only support query by endpoint id. Fix ProfileSnapshotExporterTest case on OpenJDK Runtime Environment AdoptOpenJDK-11.0.11+9 (build 11.0.11+9), MacOS. [Break Change] Remove page path in the browser log query condition. Only support query by page path id. [Break Change] Remove endpoint name in the backend log query condition. Only support query by endpoint id. [Break Change] Fix typo for a column page_path_id(was pate_path_id) of storage entity browser_error_log. Add component id for Python falcon plugin. Add rpcStatusCode for rpc.status_code tag. The responseCode field is marked as deprecated and replaced by httpResponseStatusCode field. Remove the duplicated tags to reduce the storage payload. Add a new API to test log analysis language. Harden the security of Groovy-based DSL, MAL and LAL. Fix distinct in Service/Instance/Endpoint query is not working. Support collection type in dynamic configuration core. Support zookeeper grouped dynamic configurations. Fix NPE when OAP nodes synchronize events with each other in cluster mode. Support k8s configmap grouped dynamic configurations. Add desc sort function in H2 and ElasticSearch implementations of IBrowserLogQueryDAO Support configure sampling policy by configuration module dynamically and static configuration file trace-sampling-policy-settings.yml for service dimension on the backend side. Dynamic configurations agent-analyzer.default.sampleRate and agent-analyzer.default.slowTraceSegmentThreshold are replaced by agent-analyzer.default.traceSamplingPolicy. Static configurations agent-analyzer.default.sampleRate and agent-analyzer.default.slowTraceSegmentThreshold are replaced by agent-analyzer.default.traceSamplingPolicySettingsFile. Fix dynamic configuration watch implementation current value not null when the config is deleted. Fix LoggingConfigWatcher return watch.value would not consistent with the real configuration content. Fix ZookeeperConfigWatcherRegister.readConfig() could cause NPE when data.getData() is null. Support nacos grouped dynamic configurations. Support for filter function filtering of int type values. Support mTLS for gRPC channel. Add yaml file suffix limit when reading ui templates. Support consul grouped dynamic configurations. Fix H2MetadataQueryDAO.searchService doesn\u0026rsquo;t support auto grouping. Rebuilt ElasticSearch client on top of their REST API. Fix ElasticSearch storage plugin doesn\u0026rsquo;t work when hot reloading from secretsManagementFile. Support etcd grouped dynamic configurations. Unified the config word namespace in the project. Switch JRE base image for dev images. Support apollo grouped dynamic configurations. Fix ProfileThreadSnapshotQuery.queryProfiledSegments adopts a wrong sort function Support gRPC sync grouped dynamic configurations. Fix H2EventQueryDAO doesn\u0026rsquo;t sort data by Event.START_TIME and uses a wrong pagination query. Fix LogHandler of kafka-fetcher-plugin cannot recognize namespace. Improve the speed of writing TiDB by batching the SQL execution. Fix wrong service name when IP is node IP in k8s-mesh. Support dynamic configurations for openAPI endpoint name grouping rule. Add component definition for Alibaba Druid and HikariCP. Fix Hour and Day dimensionality metrics not accurate, due to the cache read-then-clear mechanism conflicts with low down metrics flush period added in 8.7.0. Fix Slow SQL sampling not accurate, due to TopN works conflict with cache read-then-clear mechanism. The persistent cache is only read when necessary. Add component definition for Alibaba Fastjson. Fix entity(service/instance/endpoint) names in the MAL system(prometheus, native meter, open census, envoy metric service) are not controlled by core\u0026rsquo;s naming-control mechanism. Upgrade netty version to 4.1.68.Final avoid cve-2021-37136.  UI  Fix not found error when refresh UI. Update endpointName to endpointId in the query trace condition. Add Python falcon icon on the UI. Fix searching endpoints with keywords. Support clicking the service name in the chart to link to the trace or log page. Implement the Log Analysis Language text regexp debugger. Fix fetching nodes and calls with serviceIds on the topology side. Implement Alerts for query errors. Fixes graph parameter of query for topology metrics.  Documentation  Add a section in Log Collecting And Analysis doc, introducing the new Python agent log reporter. Add one missing step in otel-receiver doc about how to activate the default receiver. Reorganize dynamic configuration doc. Add more description about meter configurations in backend-meter doc. Fix typo in endpoint-grouping-rules doc.  All issues and pull requests are here\n","excerpt":"8.8.0 Project  Split javaagent into skywalking-java repository. …","ref":"/docs/main/v9.7.0/en/changes/changes-8.8.0/","title":"8.8.0"},{"body":"8.8.1 OAP Server  Fix wrong (de)serializer of ElasticSearch client for OpenSearch storage. Fix that traces query with tags will report error. Replace e2e simple cases to e2e-v2. Fix endpoint dependency breaking.  UI  Delete duplicate calls for endpoint dependency.  Documentation All issues and pull requests are here\n","excerpt":"8.8.1 OAP Server  Fix wrong (de)serializer of ElasticSearch client for OpenSearch storage. Fix that …","ref":"/docs/main/latest/en/changes/changes-8.8.1/","title":"8.8.1"},{"body":"8.8.1 OAP Server  Fix wrong (de)serializer of ElasticSearch client for OpenSearch storage. Fix that traces query with tags will report error. Replace e2e simple cases to e2e-v2. Fix endpoint dependency breaking.  UI  Delete duplicate calls for endpoint dependency.  Documentation All issues and pull requests are here\n","excerpt":"8.8.1 OAP Server  Fix wrong (de)serializer of ElasticSearch client for OpenSearch storage. Fix that …","ref":"/docs/main/next/en/changes/changes-8.8.1/","title":"8.8.1"},{"body":"8.8.1 OAP Server  Fix wrong (de)serializer of ElasticSearch client for OpenSearch storage. Fix that traces query with tags will report error. Replace e2e simple cases to e2e-v2. Fix endpoint dependency breaking.  UI  Delete duplicate calls for endpoint dependency.  Documentation All issues and pull requests are here\n","excerpt":"8.8.1 OAP Server  Fix wrong (de)serializer of ElasticSearch client for OpenSearch storage. Fix that …","ref":"/docs/main/v9.1.0/en/changes/changes-8.8.1/","title":"8.8.1"},{"body":"8.8.1 OAP Server  Fix wrong (de)serializer of ElasticSearch client for OpenSearch storage. Fix that traces query with tags will report error. Replace e2e simple cases to e2e-v2. Fix endpoint dependency breaking.  UI  Delete duplicate calls for endpoint dependency.  Documentation All issues and pull requests are here\n","excerpt":"8.8.1 OAP Server  Fix wrong (de)serializer of ElasticSearch client for OpenSearch storage. Fix that …","ref":"/docs/main/v9.2.0/en/changes/changes-8.8.1/","title":"8.8.1"},{"body":"8.8.1 OAP Server  Fix wrong (de)serializer of ElasticSearch client for OpenSearch storage. Fix that traces query with tags will report error. Replace e2e simple cases to e2e-v2. Fix endpoint dependency breaking.  UI  Delete duplicate calls for endpoint dependency.  Documentation All issues and pull requests are here\n","excerpt":"8.8.1 OAP Server  Fix wrong (de)serializer of ElasticSearch client for OpenSearch storage. Fix that …","ref":"/docs/main/v9.3.0/en/changes/changes-8.8.1/","title":"8.8.1"},{"body":"8.8.1 OAP Server  Fix wrong (de)serializer of ElasticSearch client for OpenSearch storage. Fix that traces query with tags will report error. Replace e2e simple cases to e2e-v2. Fix endpoint dependency breaking.  UI  Delete duplicate calls for endpoint dependency.  Documentation All issues and pull requests are here\n","excerpt":"8.8.1 OAP Server  Fix wrong (de)serializer of ElasticSearch client for OpenSearch storage. Fix that …","ref":"/docs/main/v9.4.0/en/changes/changes-8.8.1/","title":"8.8.1"},{"body":"8.8.1 OAP Server  Fix wrong (de)serializer of ElasticSearch client for OpenSearch storage. Fix that traces query with tags will report error. Replace e2e simple cases to e2e-v2. Fix endpoint dependency breaking.  UI  Delete duplicate calls for endpoint dependency.  Documentation All issues and pull requests are here\n","excerpt":"8.8.1 OAP Server  Fix wrong (de)serializer of ElasticSearch client for OpenSearch storage. Fix that …","ref":"/docs/main/v9.5.0/en/changes/changes-8.8.1/","title":"8.8.1"},{"body":"8.8.1 OAP Server  Fix wrong (de)serializer of ElasticSearch client for OpenSearch storage. Fix that traces query with tags will report error. Replace e2e simple cases to e2e-v2. Fix endpoint dependency breaking.  UI  Delete duplicate calls for endpoint dependency.  Documentation All issues and pull requests are here\n","excerpt":"8.8.1 OAP Server  Fix wrong (de)serializer of ElasticSearch client for OpenSearch storage. Fix that …","ref":"/docs/main/v9.6.0/en/changes/changes-8.8.1/","title":"8.8.1"},{"body":"8.8.1 OAP Server  Fix wrong (de)serializer of ElasticSearch client for OpenSearch storage. Fix that traces query with tags will report error. Replace e2e simple cases to e2e-v2. Fix endpoint dependency breaking.  UI  Delete duplicate calls for endpoint dependency.  Documentation All issues and pull requests are here\n","excerpt":"8.8.1 OAP Server  Fix wrong (de)serializer of ElasticSearch client for OpenSearch storage. Fix that …","ref":"/docs/main/v9.7.0/en/changes/changes-8.8.1/","title":"8.8.1"},{"body":"8.9.0 Project  E2E tests immigrate to e2e-v2. Support JDK 16 and 17. Add Docker images for arm64 architecture.  OAP Server  Add component definition for Jackson. Fix that zipkin-receiver plugin is not packaged into dist. Upgrade Armeria to 1.12, upgrade OpenSearch test version to 1.1.0. Add component definition for Apache-Kylin. Enhance get generation mechanism of OAL engine, support map type of source\u0026rsquo;s field. Add tag(Map) into All, Service, ServiceInstance and Endpoint sources. Fix funcParamExpression and literalExpression can\u0026rsquo;t be used in the same aggregation function. Support cast statement in the OAL core engine. Support (str-\u0026gt;long) and (long) for string to long cast statement. Support (str-\u0026gt;int) and (int) for string to int cast statement. Support Long literal number in the OAL core engine. Support literal string as parameter of aggregation function. Add attributeExpression and attributeExpressionSegment in the OAL grammar tree to support map type for the attribute expression. Refactor the OAL compiler context to improve readability. Fix wrong generated codes of hashCode and remoteHashCode methods for numeric fields. Support != null in OAL engine. Add Message Queue Consuming Count metric for MQ consuming service and endpoint. Add Message Queue Avg Consuming Latency metric for MQ consuming service and endpoint. Support -Inf as bucket in the meter system. Fix setting wrong field when combining Events. Support search browser service. Add getProfileTaskLogs to profile query protocol. Set SW_KAFKA_FETCHER_ENABLE_NATIVE_PROTO_LOG, SW_KAFKA_FETCHER_ENABLE_NATIVE_JSON_LOG default true. Fix unexpected deleting due to TTL mechanism bug for H2, MySQL, TiDB and PostgreSQL. Add a GraphQL query to get OAP version, display OAP version in startup message and error logs. Fix TimeBucket missing in H2, MySQL, TiDB and PostgreSQL bug, which causes TTL doesn\u0026rsquo;t work for service_traffic. Fix TimeBucket missing in ElasticSearch and provide compatible storage2Entity for previous versions. Fix ElasticSearch implementation of queryMetricsValues and readLabeledMetricsValues doesn\u0026rsquo;t fill default values when no available data in the ElasticSearch server. Fix config yaml data type conversion bug when meets special character like !. Optimize metrics of minute dimensionality persistence. The value of metrics, which has declaration of the default value and current value equals the default value logically, the whole row wouldn\u0026rsquo;t be pushed into database. Fix max function in OAL doesn\u0026rsquo;t support negative long. Add MicroBench module to make it easier for developers to write JMH test. Upgrade Kubernetes Java client to 14.0.0, supports GCP token refreshing and fixes some bugs. Change SO11Y metric envoy_als_in_count to calculate the ALS message count. Support Istio 1.10.3, 1.11.4, 1.12.0 release.(Tested through e2e) Add filter mechanism in MAL core to filter metrics. Fix concurrency bug in MAL increase-related calculation. Fix a null pointer bug when building SampleFamily. Fix the so11y latency of persistence execution latency not correct in ElasticSearch storage. Add MeterReportService collectBatch method. Add OpenSearch 1.2.0 to test and verify it works. Upgrade grpc-java to 1.42.1 and protoc to 3.17.3 to allow using native Mac osx-aarch_64 artifacts. Fix TopologyQuery.loadEndpointRelation bug. Support using IoTDB as a new storage option. Add customized envoy ALS protocol receiver for satellite transmit batch data. Remove logback dependencies in IoTDB plugin. Fix StorageModuleElasticsearchProvider doesn\u0026rsquo;t watch on trustStorePath. Fix a wrong check about entity if GraphQL at the endpoint relation level.  UI  Optimize endpoint dependency. Show service name by hovering nodes in the sankey chart. Add Apache Kylin logo. Add ClickHouse logo. Optimize the style and add tips for log conditions. Fix the condition for trace table. Optimize profile functions. Implement a reminder to clear cache for dashboard templates. Support +/- hh:mm in TimeZone setting. Optimize global settings. Fix current endpoint for endpoint dependency. Add version in the global settings popup. Optimize Log page style. Avoid some abnormal settings. Fix query condition of events.  Documentation  Enhance documents about the data report and query protocols. Restructure documents about receivers and fetchers.  Remove general receiver and fetcher docs Add more specific menu with docs to help users to find documents easier.   Add a guidance doc about the logic endpoint. Link Satellite as Load Balancer documentation and compatibility with satellite.  All issues and pull requests are here\n","excerpt":"8.9.0 Project  E2E tests immigrate to e2e-v2. Support JDK 16 and 17. Add Docker images for arm64 …","ref":"/docs/main/latest/en/changes/changes-8.9.0/","title":"8.9.0"},{"body":"8.9.0 Project  E2E tests immigrate to e2e-v2. Support JDK 16 and 17. Add Docker images for arm64 architecture.  OAP Server  Add component definition for Jackson. Fix that zipkin-receiver plugin is not packaged into dist. Upgrade Armeria to 1.12, upgrade OpenSearch test version to 1.1.0. Add component definition for Apache-Kylin. Enhance get generation mechanism of OAL engine, support map type of source\u0026rsquo;s field. Add tag(Map) into All, Service, ServiceInstance and Endpoint sources. Fix funcParamExpression and literalExpression can\u0026rsquo;t be used in the same aggregation function. Support cast statement in the OAL core engine. Support (str-\u0026gt;long) and (long) for string to long cast statement. Support (str-\u0026gt;int) and (int) for string to int cast statement. Support Long literal number in the OAL core engine. Support literal string as parameter of aggregation function. Add attributeExpression and attributeExpressionSegment in the OAL grammar tree to support map type for the attribute expression. Refactor the OAL compiler context to improve readability. Fix wrong generated codes of hashCode and remoteHashCode methods for numeric fields. Support != null in OAL engine. Add Message Queue Consuming Count metric for MQ consuming service and endpoint. Add Message Queue Avg Consuming Latency metric for MQ consuming service and endpoint. Support -Inf as bucket in the meter system. Fix setting wrong field when combining Events. Support search browser service. Add getProfileTaskLogs to profile query protocol. Set SW_KAFKA_FETCHER_ENABLE_NATIVE_PROTO_LOG, SW_KAFKA_FETCHER_ENABLE_NATIVE_JSON_LOG default true. Fix unexpected deleting due to TTL mechanism bug for H2, MySQL, TiDB and PostgreSQL. Add a GraphQL query to get OAP version, display OAP version in startup message and error logs. Fix TimeBucket missing in H2, MySQL, TiDB and PostgreSQL bug, which causes TTL doesn\u0026rsquo;t work for service_traffic. Fix TimeBucket missing in ElasticSearch and provide compatible storage2Entity for previous versions. Fix ElasticSearch implementation of queryMetricsValues and readLabeledMetricsValues doesn\u0026rsquo;t fill default values when no available data in the ElasticSearch server. Fix config yaml data type conversion bug when meets special character like !. Optimize metrics of minute dimensionality persistence. The value of metrics, which has declaration of the default value and current value equals the default value logically, the whole row wouldn\u0026rsquo;t be pushed into database. Fix max function in OAL doesn\u0026rsquo;t support negative long. Add MicroBench module to make it easier for developers to write JMH test. Upgrade Kubernetes Java client to 14.0.0, supports GCP token refreshing and fixes some bugs. Change SO11Y metric envoy_als_in_count to calculate the ALS message count. Support Istio 1.10.3, 1.11.4, 1.12.0 release.(Tested through e2e) Add filter mechanism in MAL core to filter metrics. Fix concurrency bug in MAL increase-related calculation. Fix a null pointer bug when building SampleFamily. Fix the so11y latency of persistence execution latency not correct in ElasticSearch storage. Add MeterReportService collectBatch method. Add OpenSearch 1.2.0 to test and verify it works. Upgrade grpc-java to 1.42.1 and protoc to 3.17.3 to allow using native Mac osx-aarch_64 artifacts. Fix TopologyQuery.loadEndpointRelation bug. Support using IoTDB as a new storage option. Add customized envoy ALS protocol receiver for satellite transmit batch data. Remove logback dependencies in IoTDB plugin. Fix StorageModuleElasticsearchProvider doesn\u0026rsquo;t watch on trustStorePath. Fix a wrong check about entity if GraphQL at the endpoint relation level.  UI  Optimize endpoint dependency. Show service name by hovering nodes in the sankey chart. Add Apache Kylin logo. Add ClickHouse logo. Optimize the style and add tips for log conditions. Fix the condition for trace table. Optimize profile functions. Implement a reminder to clear cache for dashboard templates. Support +/- hh:mm in TimeZone setting. Optimize global settings. Fix current endpoint for endpoint dependency. Add version in the global settings popup. Optimize Log page style. Avoid some abnormal settings. Fix query condition of events.  Documentation  Enhance documents about the data report and query protocols. Restructure documents about receivers and fetchers.  Remove general receiver and fetcher docs Add more specific menu with docs to help users to find documents easier.   Add a guidance doc about the logic endpoint. Link Satellite as Load Balancer documentation and compatibility with satellite.  All issues and pull requests are here\n","excerpt":"8.9.0 Project  E2E tests immigrate to e2e-v2. Support JDK 16 and 17. Add Docker images for arm64 …","ref":"/docs/main/next/en/changes/changes-8.9.0/","title":"8.9.0"},{"body":"8.9.0 Project  E2E tests immigrate to e2e-v2. Support JDK 16 and 17. Add Docker images for arm64 architecture.  OAP Server  Add component definition for Jackson. Fix that zipkin-receiver plugin is not packaged into dist. Upgrade Armeria to 1.12, upgrade OpenSearch test version to 1.1.0. Add component definition for Apache-Kylin. Enhance get generation mechanism of OAL engine, support map type of source\u0026rsquo;s field. Add tag(Map) into All, Service, ServiceInstance and Endpoint sources. Fix funcParamExpression and literalExpression can\u0026rsquo;t be used in the same aggregation function. Support cast statement in the OAL core engine. Support (str-\u0026gt;long) and (long) for string to long cast statement. Support (str-\u0026gt;int) and (int) for string to int cast statement. Support Long literal number in the OAL core engine. Support literal string as parameter of aggregation function. Add attributeExpression and attributeExpressionSegment in the OAL grammar tree to support map type for the attribute expression. Refactor the OAL compiler context to improve readability. Fix wrong generated codes of hashCode and remoteHashCode methods for numeric fields. Support != null in OAL engine. Add Message Queue Consuming Count metric for MQ consuming service and endpoint. Add Message Queue Avg Consuming Latency metric for MQ consuming service and endpoint. Support -Inf as bucket in the meter system. Fix setting wrong field when combining Events. Support search browser service. Add getProfileTaskLogs to profile query protocol. Set SW_KAFKA_FETCHER_ENABLE_NATIVE_PROTO_LOG, SW_KAFKA_FETCHER_ENABLE_NATIVE_JSON_LOG default true. Fix unexpected deleting due to TTL mechanism bug for H2, MySQL, TiDB and PostgreSQL. Add a GraphQL query to get OAP version, display OAP version in startup message and error logs. Fix TimeBucket missing in H2, MySQL, TiDB and PostgreSQL bug, which causes TTL doesn\u0026rsquo;t work for service_traffic. Fix TimeBucket missing in ElasticSearch and provide compatible storage2Entity for previous versions. Fix ElasticSearch implementation of queryMetricsValues and readLabeledMetricsValues doesn\u0026rsquo;t fill default values when no available data in the ElasticSearch server. Fix config yaml data type conversion bug when meets special character like !. Optimize metrics of minute dimensionality persistence. The value of metrics, which has declaration of the default value and current value equals the default value logically, the whole row wouldn\u0026rsquo;t be pushed into database. Fix max function in OAL doesn\u0026rsquo;t support negative long. Add MicroBench module to make it easier for developers to write JMH test. Upgrade Kubernetes Java client to 14.0.0, supports GCP token refreshing and fixes some bugs. Change SO11Y metric envoy_als_in_count to calculate the ALS message count. Support Istio 1.10.3, 1.11.4, 1.12.0 release.(Tested through e2e) Add filter mechanism in MAL core to filter metrics. Fix concurrency bug in MAL increase-related calculation. Fix a null pointer bug when building SampleFamily. Fix the so11y latency of persistence execution latency not correct in ElasticSearch storage. Add MeterReportService collectBatch method. Add OpenSearch 1.2.0 to test and verify it works. Upgrade grpc-java to 1.42.1 and protoc to 3.17.3 to allow using native Mac osx-aarch_64 artifacts. Fix TopologyQuery.loadEndpointRelation bug. Support using IoTDB as a new storage option. Add customized envoy ALS protocol receiver for satellite transmit batch data. Remove logback dependencies in IoTDB plugin. Fix StorageModuleElasticsearchProvider doesn\u0026rsquo;t watch on trustStorePath. Fix a wrong check about entity if GraphQL at the endpoint relation level.  UI  Optimize endpoint dependency. Show service name by hovering nodes in the sankey chart. Add Apache Kylin logo. Add ClickHouse logo. Optimize the style and add tips for log conditions. Fix the condition for trace table. Optimize profile functions. Implement a reminder to clear cache for dashboard templates. Support +/- hh:mm in TimeZone setting. Optimize global settings. Fix current endpoint for endpoint dependency. Add version in the global settings popup. Optimize Log page style. Avoid some abnormal settings. Fix query condition of events.  Documentation  Enhance documents about the data report and query protocols. Restructure documents about receivers and fetchers.  Remove general receiver and fetcher docs Add more specific menu with docs to help users to find documents easier.   Add a guidance doc about the logic endpoint. Link Satellite as Load Balancer documentation and compatibility with satellite.  All issues and pull requests are here\n","excerpt":"8.9.0 Project  E2E tests immigrate to e2e-v2. Support JDK 16 and 17. Add Docker images for arm64 …","ref":"/docs/main/v9.1.0/en/changes/changes-8.9.0/","title":"8.9.0"},{"body":"8.9.0 Project  E2E tests immigrate to e2e-v2. Support JDK 16 and 17. Add Docker images for arm64 architecture.  OAP Server  Add component definition for Jackson. Fix that zipkin-receiver plugin is not packaged into dist. Upgrade Armeria to 1.12, upgrade OpenSearch test version to 1.1.0. Add component definition for Apache-Kylin. Enhance get generation mechanism of OAL engine, support map type of source\u0026rsquo;s field. Add tag(Map) into All, Service, ServiceInstance and Endpoint sources. Fix funcParamExpression and literalExpression can\u0026rsquo;t be used in the same aggregation function. Support cast statement in the OAL core engine. Support (str-\u0026gt;long) and (long) for string to long cast statement. Support (str-\u0026gt;int) and (int) for string to int cast statement. Support Long literal number in the OAL core engine. Support literal string as parameter of aggregation function. Add attributeExpression and attributeExpressionSegment in the OAL grammar tree to support map type for the attribute expression. Refactor the OAL compiler context to improve readability. Fix wrong generated codes of hashCode and remoteHashCode methods for numeric fields. Support != null in OAL engine. Add Message Queue Consuming Count metric for MQ consuming service and endpoint. Add Message Queue Avg Consuming Latency metric for MQ consuming service and endpoint. Support -Inf as bucket in the meter system. Fix setting wrong field when combining Events. Support search browser service. Add getProfileTaskLogs to profile query protocol. Set SW_KAFKA_FETCHER_ENABLE_NATIVE_PROTO_LOG, SW_KAFKA_FETCHER_ENABLE_NATIVE_JSON_LOG default true. Fix unexpected deleting due to TTL mechanism bug for H2, MySQL, TiDB and PostgreSQL. Add a GraphQL query to get OAP version, display OAP version in startup message and error logs. Fix TimeBucket missing in H2, MySQL, TiDB and PostgreSQL bug, which causes TTL doesn\u0026rsquo;t work for service_traffic. Fix TimeBucket missing in ElasticSearch and provide compatible storage2Entity for previous versions. Fix ElasticSearch implementation of queryMetricsValues and readLabeledMetricsValues doesn\u0026rsquo;t fill default values when no available data in the ElasticSearch server. Fix config yaml data type conversion bug when meets special character like !. Optimize metrics of minute dimensionality persistence. The value of metrics, which has declaration of the default value and current value equals the default value logically, the whole row wouldn\u0026rsquo;t be pushed into database. Fix max function in OAL doesn\u0026rsquo;t support negative long. Add MicroBench module to make it easier for developers to write JMH test. Upgrade Kubernetes Java client to 14.0.0, supports GCP token refreshing and fixes some bugs. Change SO11Y metric envoy_als_in_count to calculate the ALS message count. Support Istio 1.10.3, 1.11.4, 1.12.0 release.(Tested through e2e) Add filter mechanism in MAL core to filter metrics. Fix concurrency bug in MAL increase-related calculation. Fix a null pointer bug when building SampleFamily. Fix the so11y latency of persistence execution latency not correct in ElasticSearch storage. Add MeterReportService collectBatch method. Add OpenSearch 1.2.0 to test and verify it works. Upgrade grpc-java to 1.42.1 and protoc to 3.17.3 to allow using native Mac osx-aarch_64 artifacts. Fix TopologyQuery.loadEndpointRelation bug. Support using IoTDB as a new storage option. Add customized envoy ALS protocol receiver for satellite transmit batch data. Remove logback dependencies in IoTDB plugin. Fix StorageModuleElasticsearchProvider doesn\u0026rsquo;t watch on trustStorePath. Fix a wrong check about entity if GraphQL at the endpoint relation level.  UI  Optimize endpoint dependency. Show service name by hovering nodes in the sankey chart. Add Apache Kylin logo. Add ClickHouse logo. Optimize the style and add tips for log conditions. Fix the condition for trace table. Optimize profile functions. Implement a reminder to clear cache for dashboard templates. Support +/- hh:mm in TimeZone setting. Optimize global settings. Fix current endpoint for endpoint dependency. Add version in the global settings popup. Optimize Log page style. Avoid some abnormal settings. Fix query condition of events.  Documentation  Enhance documents about the data report and query protocols. Restructure documents about receivers and fetchers.  Remove general receiver and fetcher docs Add more specific menu with docs to help users to find documents easier.   Add a guidance doc about the logic endpoint. Link Satellite as Load Balancer documentation and compatibility with satellite.  All issues and pull requests are here\n","excerpt":"8.9.0 Project  E2E tests immigrate to e2e-v2. Support JDK 16 and 17. Add Docker images for arm64 …","ref":"/docs/main/v9.2.0/en/changes/changes-8.9.0/","title":"8.9.0"},{"body":"8.9.0 Project  E2E tests immigrate to e2e-v2. Support JDK 16 and 17. Add Docker images for arm64 architecture.  OAP Server  Add component definition for Jackson. Fix that zipkin-receiver plugin is not packaged into dist. Upgrade Armeria to 1.12, upgrade OpenSearch test version to 1.1.0. Add component definition for Apache-Kylin. Enhance get generation mechanism of OAL engine, support map type of source\u0026rsquo;s field. Add tag(Map) into All, Service, ServiceInstance and Endpoint sources. Fix funcParamExpression and literalExpression can\u0026rsquo;t be used in the same aggregation function. Support cast statement in the OAL core engine. Support (str-\u0026gt;long) and (long) for string to long cast statement. Support (str-\u0026gt;int) and (int) for string to int cast statement. Support Long literal number in the OAL core engine. Support literal string as parameter of aggregation function. Add attributeExpression and attributeExpressionSegment in the OAL grammar tree to support map type for the attribute expression. Refactor the OAL compiler context to improve readability. Fix wrong generated codes of hashCode and remoteHashCode methods for numeric fields. Support != null in OAL engine. Add Message Queue Consuming Count metric for MQ consuming service and endpoint. Add Message Queue Avg Consuming Latency metric for MQ consuming service and endpoint. Support -Inf as bucket in the meter system. Fix setting wrong field when combining Events. Support search browser service. Add getProfileTaskLogs to profile query protocol. Set SW_KAFKA_FETCHER_ENABLE_NATIVE_PROTO_LOG, SW_KAFKA_FETCHER_ENABLE_NATIVE_JSON_LOG default true. Fix unexpected deleting due to TTL mechanism bug for H2, MySQL, TiDB and PostgreSQL. Add a GraphQL query to get OAP version, display OAP version in startup message and error logs. Fix TimeBucket missing in H2, MySQL, TiDB and PostgreSQL bug, which causes TTL doesn\u0026rsquo;t work for service_traffic. Fix TimeBucket missing in ElasticSearch and provide compatible storage2Entity for previous versions. Fix ElasticSearch implementation of queryMetricsValues and readLabeledMetricsValues doesn\u0026rsquo;t fill default values when no available data in the ElasticSearch server. Fix config yaml data type conversion bug when meets special character like !. Optimize metrics of minute dimensionality persistence. The value of metrics, which has declaration of the default value and current value equals the default value logically, the whole row wouldn\u0026rsquo;t be pushed into database. Fix max function in OAL doesn\u0026rsquo;t support negative long. Add MicroBench module to make it easier for developers to write JMH test. Upgrade Kubernetes Java client to 14.0.0, supports GCP token refreshing and fixes some bugs. Change SO11Y metric envoy_als_in_count to calculate the ALS message count. Support Istio 1.10.3, 1.11.4, 1.12.0 release.(Tested through e2e) Add filter mechanism in MAL core to filter metrics. Fix concurrency bug in MAL increase-related calculation. Fix a null pointer bug when building SampleFamily. Fix the so11y latency of persistence execution latency not correct in ElasticSearch storage. Add MeterReportService collectBatch method. Add OpenSearch 1.2.0 to test and verify it works. Upgrade grpc-java to 1.42.1 and protoc to 3.17.3 to allow using native Mac osx-aarch_64 artifacts. Fix TopologyQuery.loadEndpointRelation bug. Support using IoTDB as a new storage option. Add customized envoy ALS protocol receiver for satellite transmit batch data. Remove logback dependencies in IoTDB plugin. Fix StorageModuleElasticsearchProvider doesn\u0026rsquo;t watch on trustStorePath. Fix a wrong check about entity if GraphQL at the endpoint relation level.  UI  Optimize endpoint dependency. Show service name by hovering nodes in the sankey chart. Add Apache Kylin logo. Add ClickHouse logo. Optimize the style and add tips for log conditions. Fix the condition for trace table. Optimize profile functions. Implement a reminder to clear cache for dashboard templates. Support +/- hh:mm in TimeZone setting. Optimize global settings. Fix current endpoint for endpoint dependency. Add version in the global settings popup. Optimize Log page style. Avoid some abnormal settings. Fix query condition of events.  Documentation  Enhance documents about the data report and query protocols. Restructure documents about receivers and fetchers.  Remove general receiver and fetcher docs Add more specific menu with docs to help users to find documents easier.   Add a guidance doc about the logic endpoint. Link Satellite as Load Balancer documentation and compatibility with satellite.  All issues and pull requests are here\n","excerpt":"8.9.0 Project  E2E tests immigrate to e2e-v2. Support JDK 16 and 17. Add Docker images for arm64 …","ref":"/docs/main/v9.3.0/en/changes/changes-8.9.0/","title":"8.9.0"},{"body":"8.9.0 Project  E2E tests immigrate to e2e-v2. Support JDK 16 and 17. Add Docker images for arm64 architecture.  OAP Server  Add component definition for Jackson. Fix that zipkin-receiver plugin is not packaged into dist. Upgrade Armeria to 1.12, upgrade OpenSearch test version to 1.1.0. Add component definition for Apache-Kylin. Enhance get generation mechanism of OAL engine, support map type of source\u0026rsquo;s field. Add tag(Map) into All, Service, ServiceInstance and Endpoint sources. Fix funcParamExpression and literalExpression can\u0026rsquo;t be used in the same aggregation function. Support cast statement in the OAL core engine. Support (str-\u0026gt;long) and (long) for string to long cast statement. Support (str-\u0026gt;int) and (int) for string to int cast statement. Support Long literal number in the OAL core engine. Support literal string as parameter of aggregation function. Add attributeExpression and attributeExpressionSegment in the OAL grammar tree to support map type for the attribute expression. Refactor the OAL compiler context to improve readability. Fix wrong generated codes of hashCode and remoteHashCode methods for numeric fields. Support != null in OAL engine. Add Message Queue Consuming Count metric for MQ consuming service and endpoint. Add Message Queue Avg Consuming Latency metric for MQ consuming service and endpoint. Support -Inf as bucket in the meter system. Fix setting wrong field when combining Events. Support search browser service. Add getProfileTaskLogs to profile query protocol. Set SW_KAFKA_FETCHER_ENABLE_NATIVE_PROTO_LOG, SW_KAFKA_FETCHER_ENABLE_NATIVE_JSON_LOG default true. Fix unexpected deleting due to TTL mechanism bug for H2, MySQL, TiDB and PostgreSQL. Add a GraphQL query to get OAP version, display OAP version in startup message and error logs. Fix TimeBucket missing in H2, MySQL, TiDB and PostgreSQL bug, which causes TTL doesn\u0026rsquo;t work for service_traffic. Fix TimeBucket missing in ElasticSearch and provide compatible storage2Entity for previous versions. Fix ElasticSearch implementation of queryMetricsValues and readLabeledMetricsValues doesn\u0026rsquo;t fill default values when no available data in the ElasticSearch server. Fix config yaml data type conversion bug when meets special character like !. Optimize metrics of minute dimensionality persistence. The value of metrics, which has declaration of the default value and current value equals the default value logically, the whole row wouldn\u0026rsquo;t be pushed into database. Fix max function in OAL doesn\u0026rsquo;t support negative long. Add MicroBench module to make it easier for developers to write JMH test. Upgrade Kubernetes Java client to 14.0.0, supports GCP token refreshing and fixes some bugs. Change SO11Y metric envoy_als_in_count to calculate the ALS message count. Support Istio 1.10.3, 1.11.4, 1.12.0 release.(Tested through e2e) Add filter mechanism in MAL core to filter metrics. Fix concurrency bug in MAL increase-related calculation. Fix a null pointer bug when building SampleFamily. Fix the so11y latency of persistence execution latency not correct in ElasticSearch storage. Add MeterReportService collectBatch method. Add OpenSearch 1.2.0 to test and verify it works. Upgrade grpc-java to 1.42.1 and protoc to 3.17.3 to allow using native Mac osx-aarch_64 artifacts. Fix TopologyQuery.loadEndpointRelation bug. Support using IoTDB as a new storage option. Add customized envoy ALS protocol receiver for satellite transmit batch data. Remove logback dependencies in IoTDB plugin. Fix StorageModuleElasticsearchProvider doesn\u0026rsquo;t watch on trustStorePath. Fix a wrong check about entity if GraphQL at the endpoint relation level.  UI  Optimize endpoint dependency. Show service name by hovering nodes in the sankey chart. Add Apache Kylin logo. Add ClickHouse logo. Optimize the style and add tips for log conditions. Fix the condition for trace table. Optimize profile functions. Implement a reminder to clear cache for dashboard templates. Support +/- hh:mm in TimeZone setting. Optimize global settings. Fix current endpoint for endpoint dependency. Add version in the global settings popup. Optimize Log page style. Avoid some abnormal settings. Fix query condition of events.  Documentation  Enhance documents about the data report and query protocols. Restructure documents about receivers and fetchers.  Remove general receiver and fetcher docs Add more specific menu with docs to help users to find documents easier.   Add a guidance doc about the logic endpoint. Link Satellite as Load Balancer documentation and compatibility with satellite.  All issues and pull requests are here\n","excerpt":"8.9.0 Project  E2E tests immigrate to e2e-v2. Support JDK 16 and 17. Add Docker images for arm64 …","ref":"/docs/main/v9.4.0/en/changes/changes-8.9.0/","title":"8.9.0"},{"body":"8.9.0 Project  E2E tests immigrate to e2e-v2. Support JDK 16 and 17. Add Docker images for arm64 architecture.  OAP Server  Add component definition for Jackson. Fix that zipkin-receiver plugin is not packaged into dist. Upgrade Armeria to 1.12, upgrade OpenSearch test version to 1.1.0. Add component definition for Apache-Kylin. Enhance get generation mechanism of OAL engine, support map type of source\u0026rsquo;s field. Add tag(Map) into All, Service, ServiceInstance and Endpoint sources. Fix funcParamExpression and literalExpression can\u0026rsquo;t be used in the same aggregation function. Support cast statement in the OAL core engine. Support (str-\u0026gt;long) and (long) for string to long cast statement. Support (str-\u0026gt;int) and (int) for string to int cast statement. Support Long literal number in the OAL core engine. Support literal string as parameter of aggregation function. Add attributeExpression and attributeExpressionSegment in the OAL grammar tree to support map type for the attribute expression. Refactor the OAL compiler context to improve readability. Fix wrong generated codes of hashCode and remoteHashCode methods for numeric fields. Support != null in OAL engine. Add Message Queue Consuming Count metric for MQ consuming service and endpoint. Add Message Queue Avg Consuming Latency metric for MQ consuming service and endpoint. Support -Inf as bucket in the meter system. Fix setting wrong field when combining Events. Support search browser service. Add getProfileTaskLogs to profile query protocol. Set SW_KAFKA_FETCHER_ENABLE_NATIVE_PROTO_LOG, SW_KAFKA_FETCHER_ENABLE_NATIVE_JSON_LOG default true. Fix unexpected deleting due to TTL mechanism bug for H2, MySQL, TiDB and PostgreSQL. Add a GraphQL query to get OAP version, display OAP version in startup message and error logs. Fix TimeBucket missing in H2, MySQL, TiDB and PostgreSQL bug, which causes TTL doesn\u0026rsquo;t work for service_traffic. Fix TimeBucket missing in ElasticSearch and provide compatible storage2Entity for previous versions. Fix ElasticSearch implementation of queryMetricsValues and readLabeledMetricsValues doesn\u0026rsquo;t fill default values when no available data in the ElasticSearch server. Fix config yaml data type conversion bug when meets special character like !. Optimize metrics of minute dimensionality persistence. The value of metrics, which has declaration of the default value and current value equals the default value logically, the whole row wouldn\u0026rsquo;t be pushed into database. Fix max function in OAL doesn\u0026rsquo;t support negative long. Add MicroBench module to make it easier for developers to write JMH test. Upgrade Kubernetes Java client to 14.0.0, supports GCP token refreshing and fixes some bugs. Change SO11Y metric envoy_als_in_count to calculate the ALS message count. Support Istio 1.10.3, 1.11.4, 1.12.0 release.(Tested through e2e) Add filter mechanism in MAL core to filter metrics. Fix concurrency bug in MAL increase-related calculation. Fix a null pointer bug when building SampleFamily. Fix the so11y latency of persistence execution latency not correct in ElasticSearch storage. Add MeterReportService collectBatch method. Add OpenSearch 1.2.0 to test and verify it works. Upgrade grpc-java to 1.42.1 and protoc to 3.17.3 to allow using native Mac osx-aarch_64 artifacts. Fix TopologyQuery.loadEndpointRelation bug. Support using IoTDB as a new storage option. Add customized envoy ALS protocol receiver for satellite transmit batch data. Remove logback dependencies in IoTDB plugin. Fix StorageModuleElasticsearchProvider doesn\u0026rsquo;t watch on trustStorePath. Fix a wrong check about entity if GraphQL at the endpoint relation level.  UI  Optimize endpoint dependency. Show service name by hovering nodes in the sankey chart. Add Apache Kylin logo. Add ClickHouse logo. Optimize the style and add tips for log conditions. Fix the condition for trace table. Optimize profile functions. Implement a reminder to clear cache for dashboard templates. Support +/- hh:mm in TimeZone setting. Optimize global settings. Fix current endpoint for endpoint dependency. Add version in the global settings popup. Optimize Log page style. Avoid some abnormal settings. Fix query condition of events.  Documentation  Enhance documents about the data report and query protocols. Restructure documents about receivers and fetchers.  Remove general receiver and fetcher docs Add more specific menu with docs to help users to find documents easier.   Add a guidance doc about the logic endpoint. Link Satellite as Load Balancer documentation and compatibility with satellite.  All issues and pull requests are here\n","excerpt":"8.9.0 Project  E2E tests immigrate to e2e-v2. Support JDK 16 and 17. Add Docker images for arm64 …","ref":"/docs/main/v9.5.0/en/changes/changes-8.9.0/","title":"8.9.0"},{"body":"8.9.0 Project  E2E tests immigrate to e2e-v2. Support JDK 16 and 17. Add Docker images for arm64 architecture.  OAP Server  Add component definition for Jackson. Fix that zipkin-receiver plugin is not packaged into dist. Upgrade Armeria to 1.12, upgrade OpenSearch test version to 1.1.0. Add component definition for Apache-Kylin. Enhance get generation mechanism of OAL engine, support map type of source\u0026rsquo;s field. Add tag(Map) into All, Service, ServiceInstance and Endpoint sources. Fix funcParamExpression and literalExpression can\u0026rsquo;t be used in the same aggregation function. Support cast statement in the OAL core engine. Support (str-\u0026gt;long) and (long) for string to long cast statement. Support (str-\u0026gt;int) and (int) for string to int cast statement. Support Long literal number in the OAL core engine. Support literal string as parameter of aggregation function. Add attributeExpression and attributeExpressionSegment in the OAL grammar tree to support map type for the attribute expression. Refactor the OAL compiler context to improve readability. Fix wrong generated codes of hashCode and remoteHashCode methods for numeric fields. Support != null in OAL engine. Add Message Queue Consuming Count metric for MQ consuming service and endpoint. Add Message Queue Avg Consuming Latency metric for MQ consuming service and endpoint. Support -Inf as bucket in the meter system. Fix setting wrong field when combining Events. Support search browser service. Add getProfileTaskLogs to profile query protocol. Set SW_KAFKA_FETCHER_ENABLE_NATIVE_PROTO_LOG, SW_KAFKA_FETCHER_ENABLE_NATIVE_JSON_LOG default true. Fix unexpected deleting due to TTL mechanism bug for H2, MySQL, TiDB and PostgreSQL. Add a GraphQL query to get OAP version, display OAP version in startup message and error logs. Fix TimeBucket missing in H2, MySQL, TiDB and PostgreSQL bug, which causes TTL doesn\u0026rsquo;t work for service_traffic. Fix TimeBucket missing in ElasticSearch and provide compatible storage2Entity for previous versions. Fix ElasticSearch implementation of queryMetricsValues and readLabeledMetricsValues doesn\u0026rsquo;t fill default values when no available data in the ElasticSearch server. Fix config yaml data type conversion bug when meets special character like !. Optimize metrics of minute dimensionality persistence. The value of metrics, which has declaration of the default value and current value equals the default value logically, the whole row wouldn\u0026rsquo;t be pushed into database. Fix max function in OAL doesn\u0026rsquo;t support negative long. Add MicroBench module to make it easier for developers to write JMH test. Upgrade Kubernetes Java client to 14.0.0, supports GCP token refreshing and fixes some bugs. Change SO11Y metric envoy_als_in_count to calculate the ALS message count. Support Istio 1.10.3, 1.11.4, 1.12.0 release.(Tested through e2e) Add filter mechanism in MAL core to filter metrics. Fix concurrency bug in MAL increase-related calculation. Fix a null pointer bug when building SampleFamily. Fix the so11y latency of persistence execution latency not correct in ElasticSearch storage. Add MeterReportService collectBatch method. Add OpenSearch 1.2.0 to test and verify it works. Upgrade grpc-java to 1.42.1 and protoc to 3.17.3 to allow using native Mac osx-aarch_64 artifacts. Fix TopologyQuery.loadEndpointRelation bug. Support using IoTDB as a new storage option. Add customized envoy ALS protocol receiver for satellite transmit batch data. Remove logback dependencies in IoTDB plugin. Fix StorageModuleElasticsearchProvider doesn\u0026rsquo;t watch on trustStorePath. Fix a wrong check about entity if GraphQL at the endpoint relation level.  UI  Optimize endpoint dependency. Show service name by hovering nodes in the sankey chart. Add Apache Kylin logo. Add ClickHouse logo. Optimize the style and add tips for log conditions. Fix the condition for trace table. Optimize profile functions. Implement a reminder to clear cache for dashboard templates. Support +/- hh:mm in TimeZone setting. Optimize global settings. Fix current endpoint for endpoint dependency. Add version in the global settings popup. Optimize Log page style. Avoid some abnormal settings. Fix query condition of events.  Documentation  Enhance documents about the data report and query protocols. Restructure documents about receivers and fetchers.  Remove general receiver and fetcher docs Add more specific menu with docs to help users to find documents easier.   Add a guidance doc about the logic endpoint. Link Satellite as Load Balancer documentation and compatibility with satellite.  All issues and pull requests are here\n","excerpt":"8.9.0 Project  E2E tests immigrate to e2e-v2. Support JDK 16 and 17. Add Docker images for arm64 …","ref":"/docs/main/v9.6.0/en/changes/changes-8.9.0/","title":"8.9.0"},{"body":"8.9.0 Project  E2E tests immigrate to e2e-v2. Support JDK 16 and 17. Add Docker images for arm64 architecture.  OAP Server  Add component definition for Jackson. Fix that zipkin-receiver plugin is not packaged into dist. Upgrade Armeria to 1.12, upgrade OpenSearch test version to 1.1.0. Add component definition for Apache-Kylin. Enhance get generation mechanism of OAL engine, support map type of source\u0026rsquo;s field. Add tag(Map) into All, Service, ServiceInstance and Endpoint sources. Fix funcParamExpression and literalExpression can\u0026rsquo;t be used in the same aggregation function. Support cast statement in the OAL core engine. Support (str-\u0026gt;long) and (long) for string to long cast statement. Support (str-\u0026gt;int) and (int) for string to int cast statement. Support Long literal number in the OAL core engine. Support literal string as parameter of aggregation function. Add attributeExpression and attributeExpressionSegment in the OAL grammar tree to support map type for the attribute expression. Refactor the OAL compiler context to improve readability. Fix wrong generated codes of hashCode and remoteHashCode methods for numeric fields. Support != null in OAL engine. Add Message Queue Consuming Count metric for MQ consuming service and endpoint. Add Message Queue Avg Consuming Latency metric for MQ consuming service and endpoint. Support -Inf as bucket in the meter system. Fix setting wrong field when combining Events. Support search browser service. Add getProfileTaskLogs to profile query protocol. Set SW_KAFKA_FETCHER_ENABLE_NATIVE_PROTO_LOG, SW_KAFKA_FETCHER_ENABLE_NATIVE_JSON_LOG default true. Fix unexpected deleting due to TTL mechanism bug for H2, MySQL, TiDB and PostgreSQL. Add a GraphQL query to get OAP version, display OAP version in startup message and error logs. Fix TimeBucket missing in H2, MySQL, TiDB and PostgreSQL bug, which causes TTL doesn\u0026rsquo;t work for service_traffic. Fix TimeBucket missing in ElasticSearch and provide compatible storage2Entity for previous versions. Fix ElasticSearch implementation of queryMetricsValues and readLabeledMetricsValues doesn\u0026rsquo;t fill default values when no available data in the ElasticSearch server. Fix config yaml data type conversion bug when meets special character like !. Optimize metrics of minute dimensionality persistence. The value of metrics, which has declaration of the default value and current value equals the default value logically, the whole row wouldn\u0026rsquo;t be pushed into database. Fix max function in OAL doesn\u0026rsquo;t support negative long. Add MicroBench module to make it easier for developers to write JMH test. Upgrade Kubernetes Java client to 14.0.0, supports GCP token refreshing and fixes some bugs. Change SO11Y metric envoy_als_in_count to calculate the ALS message count. Support Istio 1.10.3, 1.11.4, 1.12.0 release.(Tested through e2e) Add filter mechanism in MAL core to filter metrics. Fix concurrency bug in MAL increase-related calculation. Fix a null pointer bug when building SampleFamily. Fix the so11y latency of persistence execution latency not correct in ElasticSearch storage. Add MeterReportService collectBatch method. Add OpenSearch 1.2.0 to test and verify it works. Upgrade grpc-java to 1.42.1 and protoc to 3.17.3 to allow using native Mac osx-aarch_64 artifacts. Fix TopologyQuery.loadEndpointRelation bug. Support using IoTDB as a new storage option. Add customized envoy ALS protocol receiver for satellite transmit batch data. Remove logback dependencies in IoTDB plugin. Fix StorageModuleElasticsearchProvider doesn\u0026rsquo;t watch on trustStorePath. Fix a wrong check about entity if GraphQL at the endpoint relation level.  UI  Optimize endpoint dependency. Show service name by hovering nodes in the sankey chart. Add Apache Kylin logo. Add ClickHouse logo. Optimize the style and add tips for log conditions. Fix the condition for trace table. Optimize profile functions. Implement a reminder to clear cache for dashboard templates. Support +/- hh:mm in TimeZone setting. Optimize global settings. Fix current endpoint for endpoint dependency. Add version in the global settings popup. Optimize Log page style. Avoid some abnormal settings. Fix query condition of events.  Documentation  Enhance documents about the data report and query protocols. Restructure documents about receivers and fetchers.  Remove general receiver and fetcher docs Add more specific menu with docs to help users to find documents easier.   Add a guidance doc about the logic endpoint. Link Satellite as Load Balancer documentation and compatibility with satellite.  All issues and pull requests are here\n","excerpt":"8.9.0 Project  E2E tests immigrate to e2e-v2. Support JDK 16 and 17. Add Docker images for arm64 …","ref":"/docs/main/v9.7.0/en/changes/changes-8.9.0/","title":"8.9.0"},{"body":"8.9.1 Project  Upgrade log4j2 to 2.15.0 for CVE-2021-44228  ","excerpt":"8.9.1 Project  Upgrade log4j2 to 2.15.0 for CVE-2021-44228  ","ref":"/docs/main/latest/en/changes/changes-8.9.1/","title":"8.9.1"},{"body":"8.9.1 Project  Upgrade log4j2 to 2.15.0 for CVE-2021-44228  ","excerpt":"8.9.1 Project  Upgrade log4j2 to 2.15.0 for CVE-2021-44228  ","ref":"/docs/main/next/en/changes/changes-8.9.1/","title":"8.9.1"},{"body":"8.9.1 Project  Upgrade log4j2 to 2.15.0 for CVE-2021-44228  ","excerpt":"8.9.1 Project  Upgrade log4j2 to 2.15.0 for CVE-2021-44228  ","ref":"/docs/main/v9.1.0/en/changes/changes-8.9.1/","title":"8.9.1"},{"body":"8.9.1 Project  Upgrade log4j2 to 2.15.0 for CVE-2021-44228  ","excerpt":"8.9.1 Project  Upgrade log4j2 to 2.15.0 for CVE-2021-44228  ","ref":"/docs/main/v9.2.0/en/changes/changes-8.9.1/","title":"8.9.1"},{"body":"8.9.1 Project  Upgrade log4j2 to 2.15.0 for CVE-2021-44228  ","excerpt":"8.9.1 Project  Upgrade log4j2 to 2.15.0 for CVE-2021-44228  ","ref":"/docs/main/v9.3.0/en/changes/changes-8.9.1/","title":"8.9.1"},{"body":"8.9.1 Project  Upgrade log4j2 to 2.15.0 for CVE-2021-44228  ","excerpt":"8.9.1 Project  Upgrade log4j2 to 2.15.0 for CVE-2021-44228  ","ref":"/docs/main/v9.4.0/en/changes/changes-8.9.1/","title":"8.9.1"},{"body":"8.9.1 Project  Upgrade log4j2 to 2.15.0 for CVE-2021-44228  ","excerpt":"8.9.1 Project  Upgrade log4j2 to 2.15.0 for CVE-2021-44228  ","ref":"/docs/main/v9.5.0/en/changes/changes-8.9.1/","title":"8.9.1"},{"body":"8.9.1 Project  Upgrade log4j2 to 2.15.0 for CVE-2021-44228  ","excerpt":"8.9.1 Project  Upgrade log4j2 to 2.15.0 for CVE-2021-44228  ","ref":"/docs/main/v9.6.0/en/changes/changes-8.9.1/","title":"8.9.1"},{"body":"8.9.1 Project  Upgrade log4j2 to 2.15.0 for CVE-2021-44228  ","excerpt":"8.9.1 Project  Upgrade log4j2 to 2.15.0 for CVE-2021-44228  ","ref":"/docs/main/v9.7.0/en/changes/changes-8.9.1/","title":"8.9.1"},{"body":"9.0.0 Project  Upgrade log4j2 to 2.17.1 for CVE-2021-44228, CVE-2021-45046, CVE-2021-45105 and CVE-2021-44832. This CVE only effects on JDK if JNDI is opened in default. Notice, using JVM option -Dlog4j2.formatMsgNoLookups=true or setting the LOG4J_FORMAT_MSG_NO_LOOKUPS=”true” environment variable also avoids CVEs. Upgrade maven-wrapper to 3.1.0, maven to 3.8.4 for performance improvements and ARM more native support. Exclude unnecessary libs when building under JDK 9+. Migrate base Docker image to eclipse-temurin as adoptopenjdk is deprecated. Add E2E test under Java 17. Upgrade protoc to 3.19.2. Add Istio 1.13.1 to E2E test matrix for verification. Upgrade Apache parent pom version to 25. Use the plugin version defined by the Apache maven parent.  Upgrade maven-dependency-plugin to 3.2.0. Upgrade maven-assembly-plugin to 3.3.0. Upgrade maven-failsafe-plugin to 2.22.2. Upgrade maven-surefire-plugin to 2.22.2. Upgrade maven-jar-plugin to 3.2.2. Upgrade maven-enforcer-plugin to 3.0.0. Upgrade maven-compiler-plugin to 3.10.0. Upgrade maven-resources-plugin to 3.2.0. Upgrade maven-source-plugin to 3.2.1.   Update codeStyle.xml to fix incompatibility on M1\u0026rsquo;s IntelliJ IDEA 2021.3.2. Update frontend-maven-plugin to 1.12 and npm to 16.14.0 for booster UI build. Improve CI with the GHA new feature \u0026ldquo;run failed jobs\u0026rdquo;. Fix ./mvnw compile not work if ./mvnw install is not executed at least once. Add JD_PRESERVE_LINE_FEEDS=true in official code style file. Upgrade OAP dependencies gson(2.9.0), guava(31.1), jackson(2.13.2), protobuf-java(3.18.4), commons-io(2.7), postgresql(42.3.3). Remove commons-pool and commons-dbcp from OAP dependencies(Not used before). Upgrade webapp dependencies gson(2.9.0), spring boot(2.6.6), jackson(2.13.2.2), spring cloud(2021.0.1), Apache httpclient(4.5.13).  OAP Server  Fix potential NPE in OAL string match and a bug when right-hand-side variable includes double quotes. Bump up Armeria version to 1.14.1 to fix CVE. Polish ETCD cluster config environment variables. Add the analysis of metrics in Satellite MetricsService. Fix Can't split endpoint id into 2 parts bug for endpoint ID. In the TCP in service mesh observability, endpoint name doesn\u0026rsquo;t exist in TCP traffic. Upgrade H2 version to 2.0.206 to fix CVE-2021-23463 and GHSA-h376-j262-vhq6. Extend column name override mechanism working for ValueColumnMetadata. Introduce new concept Layer and removed NodeType. More details refer to v9-version-upgrade. Fix query sort metrics failure in H2 Storage. Bump up grpc to 1.43.2 and protobuf to 3.19.2 to fix CVE-2021-22569. Add source layer and dest layer to relation. Follow protocol grammar fix GCPhrase -\u0026gt; GCPhase. Set layer to mesh relation. Add FAAS to SpanLayer. Adjust e2e case for V9 core. Support ZGC GC time and count metric collecting. Sync proto buffers files from upstream Envoy (Related to https://github.com/envoyproxy/envoy/pull/18955). Bump up GraphQL related dependencies to latest versions. Add normal to V9 service meta query. Support scope=ALL catalog for metrics. Bump up H2 to 2.1.210 to fix CVE-2022-23221. E2E: Add normal field to Service. Add FreeSql component ID(3017) of dotnet agent. E2E: verify OAP cluster model data aggregation. Fix SelfRemoteClient self observing metrics. Add env variables SW_CLUSTER_INTERNAL_COM_HOST and SW_CLUSTER_INTERNAL_COM_PORT for cluster selectors zookeeper ,consul,etcd and nacos. Doc update: configuration-vocabulary,backend-cluster about env variables SW_CLUSTER_INTERNAL_COM_HOST and SW_CLUSTER_INTERNAL_COM_PORT. Add Python MysqlClient component ID(7013) with mapping information. Support Java thread pool metrics analysis. Fix IoTDB Storage Option insert null index value. Set the default value of SW_STORAGE_IOTDB_SESSIONPOOL_SIZE to 8. Bump up iotdb-session to 0.12.4. Bump up PostgreSQL driver to fix CVE. Add Guava EventBus component ID(123) of Java agent. Add OpenFunction component ID(5013). Expose configuration responseTimeout of ES client. Support datasource metric analysis. [Breaking Change] Keep the endpoint avg resp time meter name the same with others scope. (This may break 3rd party integration and existing alarm rule settings) Add Python FastAPI component ID(7014). Support all metrics from MAL engine in alarm core, including Prometheus, OC receiver, meter receiver. Allow updating non-metrics templates when structure changed. Set default connection timeout of ElasticSearch to 3000 milliseconds. Support ElasticSearch 8 and add it into E2E tests. Disable indexing for field alarm_record.tags_raw_data of binary type in ElasticSearch storage. Fix Zipkin receiver wrong condition for decoding gzip. Add a new sampler (possibility) in LAL. Unify module name receiver_zipkin to receiver-zipkin, remove receiver_jaeger from application.yaml. Introduce the entity of Process type. Set the length of event#parameters to 2000. Limit the length of Event#parameters. Support large service/instance/networkAddressAlias list query by using ElasticSearch scrolling API, add metadataQueryBatchSize to configure scrolling page size. Change default value of metadataQueryMaxSize from 5000 to 10000 Replace deprecated Armeria API BasicToken.of with AuthToken.ofBasic. Implement v9 UI template management protocol. Implement process metadata query protocol. Expose more ElasticSearch health check related logs to help to diagnose Health check fails. reason: No healthy endpoint. Add source event generated metrics to SERVICE_CATALOG_NAME catalog. [Breaking Change] Deprecate All from OAL source. [Breaking Change] Remove SRC_ALL: 'All' from OAL grammar tree. Remove all_heatmap and all_percentile metrics. Fix ElasticSearch normal index couldn\u0026rsquo;t apply mapping and update. Enhance DataCarrier#MultipleChannelsConsumer to add priority for the channels, which makes OAP server has a better performance to activate all analyzers on default. Activate receiver-otel#enabledOcRules receiver with k8s-node,oap,vm rules on default. Activate satellite,spring-sleuth for agent-analyzer#meterAnalyzerActiveFiles on default. Activate receiver-zabbix receiver with agent rule on default. Replace HTTP server (GraphQL, agent HTTP protocol) from Jetty with Armeria. [Breaking Change] Remove configuration restAcceptorPriorityDelta (env var: SW_RECEIVER_SHARING_JETTY_DELTA , SW_CORE_REST_JETTY_DELTA). [Breaking Change] Remove configuration graphql/path (env var: SW_QUERY_GRAPHQL_PATH). Add storage column attribute indexOnly, support ElasticSearch only index and not store some fields. Add indexOnly=true to SegmentRecord.tags, AlarmRecord.tags, AbstractLogRecord.tags, to reduce unnecessary storage. [Breaking Change] Remove configuration restMinThreads (env var: SW_CORE_REST_JETTY_MIN_THREADS , SW_RECEIVER_SHARING_JETTY_MIN_THREADS). Refactor the core Builder mechanism, new storage plugin could implement their own converter and get rid of hard requirement of using HashMap to communicate between data object and database native structure. [Breaking Change] Break all existing 3rd-party storage extensions. Remove hard requirement of BASE64 encoding for binary field. Add complexity limitation for GraphQL query to avoid malicious query. Add Column.shardingKeyIdx for column definition for BanyanDB.  Sharding key is used to group time series data per metric of one entity in one place (same sharding and/or same row for column-oriented database). For example, ServiceA's traffic gauge, service call per minute, includes following timestamp values, then it should be sharded by service ID [ServiceA(encoded ID): 01-28 18:30 values-1, 01-28 18:31 values-2, 01-28 18:32 values-3, 01-28 18:32 values-4] BanyanDB is the 1st storage implementation supporting this. It would make continuous time series metrics stored closely and compressed better. NOTICE, this sharding concept is NOT just for splitting data into different database instances or physical files.  Support ElasticSearch template mappings properties parameters and _source update. Implement the eBPF profiling query and data collect protocol. [Breaking Change] Remove Deprecated responseCode from sources, including Service, ServiceInstance, Endpoint Enhance endpoint dependency analysis to support cross threads cases. Refactor span analysis code structures. Remove isNotNormal service requirement when use alias to merge service topology from client side. All RPCs' peer services from client side are always normal services. This cause the topology is not merged correctly. Fix event type of export data is incorrect, it was EventType.TOTAL always. Reduce redundancy ThreadLocal in MAL core. Improve MAL performance. Trim tag\u0026rsquo;s key and value in log query. Refactor IoTDB storage plugin, add IoTDBDataConverter and fix ModifyCollectionInEnhancedForLoop bug. Bump up iotdb-session to 0.12.5. Fix the configuration of Aggregation and GC Count metrics for oap self observability E2E: Add verify OAP eBPF Profiling. Let multiGet could query without tag value in the InfluxDB storage plugin. Adjust MAL for V9, remove some groups, add a new Service function for the custom delimiter. Add service catalog DatabaseSlowStatement. Add Error Prone Annotations dependency to suppress warnings, which are not errors.  UI  [Breaking Change] Introduce Booster UI, remove RocketBot UI. [Breaking Change] UI Templates have been redesigned totally. GraphQL query is minimal compatible for metadata and metrics query. Remove unused jars (log4j-api.jar) in classpath. Bump up netty version to fix CVE. Add Database Connection pool metric. Re-implement UI template initialization for Booster UI. Add environment variable SW_ENABLE_UPDATE_UI_TEMPLATE to control user edit UI template. Add the Self Observability template of the SkyWalking Satellite. Add the template of OpenFunction observability.  Documentation  Reconstruction doc menu for v9. Update backend-alarm.md doc, support op \u0026ldquo;=\u0026rdquo; to \u0026ldquo;==\u0026rdquo;. Update backend-meter.md doc . Add \u0026lt;STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System\u0026gt; paper. Add Academy menu for recommending articles. Remove All source relative document and examples. Update Booster UI\u0026rsquo;s dependency licenses. Add profiling doc, and remove service mesh intro doc(not necessary). Add a doc for virtual database. Rewrite UI introduction. Update k8s-monitoring, backend-telemetry and v9-version-upgrade doc for v9.  All issues and pull requests are here\n","excerpt":"9.0.0 Project  Upgrade log4j2 to 2.17.1 for CVE-2021-44228, CVE-2021-45046, CVE-2021-45105 and …","ref":"/docs/main/latest/en/changes/changes-9.0.0/","title":"9.0.0"},{"body":"9.0.0 Project  Upgrade log4j2 to 2.17.1 for CVE-2021-44228, CVE-2021-45046, CVE-2021-45105 and CVE-2021-44832. This CVE only effects on JDK if JNDI is opened in default. Notice, using JVM option -Dlog4j2.formatMsgNoLookups=true or setting the LOG4J_FORMAT_MSG_NO_LOOKUPS=”true” environment variable also avoids CVEs. Upgrade maven-wrapper to 3.1.0, maven to 3.8.4 for performance improvements and ARM more native support. Exclude unnecessary libs when building under JDK 9+. Migrate base Docker image to eclipse-temurin as adoptopenjdk is deprecated. Add E2E test under Java 17. Upgrade protoc to 3.19.2. Add Istio 1.13.1 to E2E test matrix for verification. Upgrade Apache parent pom version to 25. Use the plugin version defined by the Apache maven parent.  Upgrade maven-dependency-plugin to 3.2.0. Upgrade maven-assembly-plugin to 3.3.0. Upgrade maven-failsafe-plugin to 2.22.2. Upgrade maven-surefire-plugin to 2.22.2. Upgrade maven-jar-plugin to 3.2.2. Upgrade maven-enforcer-plugin to 3.0.0. Upgrade maven-compiler-plugin to 3.10.0. Upgrade maven-resources-plugin to 3.2.0. Upgrade maven-source-plugin to 3.2.1.   Update codeStyle.xml to fix incompatibility on M1\u0026rsquo;s IntelliJ IDEA 2021.3.2. Update frontend-maven-plugin to 1.12 and npm to 16.14.0 for booster UI build. Improve CI with the GHA new feature \u0026ldquo;run failed jobs\u0026rdquo;. Fix ./mvnw compile not work if ./mvnw install is not executed at least once. Add JD_PRESERVE_LINE_FEEDS=true in official code style file. Upgrade OAP dependencies gson(2.9.0), guava(31.1), jackson(2.13.2), protobuf-java(3.18.4), commons-io(2.7), postgresql(42.3.3). Remove commons-pool and commons-dbcp from OAP dependencies(Not used before). Upgrade webapp dependencies gson(2.9.0), spring boot(2.6.6), jackson(2.13.2.2), spring cloud(2021.0.1), Apache httpclient(4.5.13).  OAP Server  Fix potential NPE in OAL string match and a bug when right-hand-side variable includes double quotes. Bump up Armeria version to 1.14.1 to fix CVE. Polish ETCD cluster config environment variables. Add the analysis of metrics in Satellite MetricsService. Fix Can't split endpoint id into 2 parts bug for endpoint ID. In the TCP in service mesh observability, endpoint name doesn\u0026rsquo;t exist in TCP traffic. Upgrade H2 version to 2.0.206 to fix CVE-2021-23463 and GHSA-h376-j262-vhq6. Extend column name override mechanism working for ValueColumnMetadata. Introduce new concept Layer and removed NodeType. More details refer to v9-version-upgrade. Fix query sort metrics failure in H2 Storage. Bump up grpc to 1.43.2 and protobuf to 3.19.2 to fix CVE-2021-22569. Add source layer and dest layer to relation. Follow protocol grammar fix GCPhrase -\u0026gt; GCPhase. Set layer to mesh relation. Add FAAS to SpanLayer. Adjust e2e case for V9 core. Support ZGC GC time and count metric collecting. Sync proto buffers files from upstream Envoy (Related to https://github.com/envoyproxy/envoy/pull/18955). Bump up GraphQL related dependencies to latest versions. Add normal to V9 service meta query. Support scope=ALL catalog for metrics. Bump up H2 to 2.1.210 to fix CVE-2022-23221. E2E: Add normal field to Service. Add FreeSql component ID(3017) of dotnet agent. E2E: verify OAP cluster model data aggregation. Fix SelfRemoteClient self observing metrics. Add env variables SW_CLUSTER_INTERNAL_COM_HOST and SW_CLUSTER_INTERNAL_COM_PORT for cluster selectors zookeeper ,consul,etcd and nacos. Doc update: configuration-vocabulary,backend-cluster about env variables SW_CLUSTER_INTERNAL_COM_HOST and SW_CLUSTER_INTERNAL_COM_PORT. Add Python MysqlClient component ID(7013) with mapping information. Support Java thread pool metrics analysis. Fix IoTDB Storage Option insert null index value. Set the default value of SW_STORAGE_IOTDB_SESSIONPOOL_SIZE to 8. Bump up iotdb-session to 0.12.4. Bump up PostgreSQL driver to fix CVE. Add Guava EventBus component ID(123) of Java agent. Add OpenFunction component ID(5013). Expose configuration responseTimeout of ES client. Support datasource metric analysis. [Breaking Change] Keep the endpoint avg resp time meter name the same with others scope. (This may break 3rd party integration and existing alarm rule settings) Add Python FastAPI component ID(7014). Support all metrics from MAL engine in alarm core, including Prometheus, OC receiver, meter receiver. Allow updating non-metrics templates when structure changed. Set default connection timeout of ElasticSearch to 3000 milliseconds. Support ElasticSearch 8 and add it into E2E tests. Disable indexing for field alarm_record.tags_raw_data of binary type in ElasticSearch storage. Fix Zipkin receiver wrong condition for decoding gzip. Add a new sampler (possibility) in LAL. Unify module name receiver_zipkin to receiver-zipkin, remove receiver_jaeger from application.yaml. Introduce the entity of Process type. Set the length of event#parameters to 2000. Limit the length of Event#parameters. Support large service/instance/networkAddressAlias list query by using ElasticSearch scrolling API, add metadataQueryBatchSize to configure scrolling page size. Change default value of metadataQueryMaxSize from 5000 to 10000 Replace deprecated Armeria API BasicToken.of with AuthToken.ofBasic. Implement v9 UI template management protocol. Implement process metadata query protocol. Expose more ElasticSearch health check related logs to help to diagnose Health check fails. reason: No healthy endpoint. Add source event generated metrics to SERVICE_CATALOG_NAME catalog. [Breaking Change] Deprecate All from OAL source. [Breaking Change] Remove SRC_ALL: 'All' from OAL grammar tree. Remove all_heatmap and all_percentile metrics. Fix ElasticSearch normal index couldn\u0026rsquo;t apply mapping and update. Enhance DataCarrier#MultipleChannelsConsumer to add priority for the channels, which makes OAP server has a better performance to activate all analyzers on default. Activate receiver-otel#enabledOcRules receiver with k8s-node,oap,vm rules on default. Activate satellite,spring-sleuth for agent-analyzer#meterAnalyzerActiveFiles on default. Activate receiver-zabbix receiver with agent rule on default. Replace HTTP server (GraphQL, agent HTTP protocol) from Jetty with Armeria. [Breaking Change] Remove configuration restAcceptorPriorityDelta (env var: SW_RECEIVER_SHARING_JETTY_DELTA , SW_CORE_REST_JETTY_DELTA). [Breaking Change] Remove configuration graphql/path (env var: SW_QUERY_GRAPHQL_PATH). Add storage column attribute indexOnly, support ElasticSearch only index and not store some fields. Add indexOnly=true to SegmentRecord.tags, AlarmRecord.tags, AbstractLogRecord.tags, to reduce unnecessary storage. [Breaking Change] Remove configuration restMinThreads (env var: SW_CORE_REST_JETTY_MIN_THREADS , SW_RECEIVER_SHARING_JETTY_MIN_THREADS). Refactor the core Builder mechanism, new storage plugin could implement their own converter and get rid of hard requirement of using HashMap to communicate between data object and database native structure. [Breaking Change] Break all existing 3rd-party storage extensions. Remove hard requirement of BASE64 encoding for binary field. Add complexity limitation for GraphQL query to avoid malicious query. Add Column.shardingKeyIdx for column definition for BanyanDB.  Sharding key is used to group time series data per metric of one entity in one place (same sharding and/or same row for column-oriented database). For example, ServiceA's traffic gauge, service call per minute, includes following timestamp values, then it should be sharded by service ID [ServiceA(encoded ID): 01-28 18:30 values-1, 01-28 18:31 values-2, 01-28 18:32 values-3, 01-28 18:32 values-4] BanyanDB is the 1st storage implementation supporting this. It would make continuous time series metrics stored closely and compressed better. NOTICE, this sharding concept is NOT just for splitting data into different database instances or physical files.  Support ElasticSearch template mappings properties parameters and _source update. Implement the eBPF profiling query and data collect protocol. [Breaking Change] Remove Deprecated responseCode from sources, including Service, ServiceInstance, Endpoint Enhance endpoint dependency analysis to support cross threads cases. Refactor span analysis code structures. Remove isNotNormal service requirement when use alias to merge service topology from client side. All RPCs' peer services from client side are always normal services. This cause the topology is not merged correctly. Fix event type of export data is incorrect, it was EventType.TOTAL always. Reduce redundancy ThreadLocal in MAL core. Improve MAL performance. Trim tag\u0026rsquo;s key and value in log query. Refactor IoTDB storage plugin, add IoTDBDataConverter and fix ModifyCollectionInEnhancedForLoop bug. Bump up iotdb-session to 0.12.5. Fix the configuration of Aggregation and GC Count metrics for oap self observability E2E: Add verify OAP eBPF Profiling. Let multiGet could query without tag value in the InfluxDB storage plugin. Adjust MAL for V9, remove some groups, add a new Service function for the custom delimiter. Add service catalog DatabaseSlowStatement. Add Error Prone Annotations dependency to suppress warnings, which are not errors.  UI  [Breaking Change] Introduce Booster UI, remove RocketBot UI. [Breaking Change] UI Templates have been redesigned totally. GraphQL query is minimal compatible for metadata and metrics query. Remove unused jars (log4j-api.jar) in classpath. Bump up netty version to fix CVE. Add Database Connection pool metric. Re-implement UI template initialization for Booster UI. Add environment variable SW_ENABLE_UPDATE_UI_TEMPLATE to control user edit UI template. Add the Self Observability template of the SkyWalking Satellite. Add the template of OpenFunction observability.  Documentation  Reconstruction doc menu for v9. Update backend-alarm.md doc, support op \u0026ldquo;=\u0026rdquo; to \u0026ldquo;==\u0026rdquo;. Update backend-meter.md doc . Add \u0026lt;STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System\u0026gt; paper. Add Academy menu for recommending articles. Remove All source relative document and examples. Update Booster UI\u0026rsquo;s dependency licenses. Add profiling doc, and remove service mesh intro doc(not necessary). Add a doc for virtual database. Rewrite UI introduction. Update k8s-monitoring, backend-telemetry and v9-version-upgrade doc for v9.  All issues and pull requests are here\n","excerpt":"9.0.0 Project  Upgrade log4j2 to 2.17.1 for CVE-2021-44228, CVE-2021-45046, CVE-2021-45105 and …","ref":"/docs/main/next/en/changes/changes-9.0.0/","title":"9.0.0"},{"body":"9.0.0 Project  Upgrade log4j2 to 2.17.1 for CVE-2021-44228, CVE-2021-45046, CVE-2021-45105 and CVE-2021-44832. This CVE only effects on JDK if JNDI is opened in default. Notice, using JVM option -Dlog4j2.formatMsgNoLookups=true or setting the LOG4J_FORMAT_MSG_NO_LOOKUPS=”true” environment variable also avoids CVEs. Upgrade maven-wrapper to 3.1.0, maven to 3.8.4 for performance improvements and ARM more native support. Exclude unnecessary libs when building under JDK 9+. Migrate base Docker image to eclipse-temurin as adoptopenjdk is deprecated. Add E2E test under Java 17. Upgrade protoc to 3.19.2. Add Istio 1.13.1 to E2E test matrix for verification. Upgrade Apache parent pom version to 25. Use the plugin version defined by the Apache maven parent.  Upgrade maven-dependency-plugin to 3.2.0. Upgrade maven-assembly-plugin to 3.3.0. Upgrade maven-failsafe-plugin to 2.22.2. Upgrade maven-surefire-plugin to 2.22.2. Upgrade maven-jar-plugin to 3.2.2. Upgrade maven-enforcer-plugin to 3.0.0. Upgrade maven-compiler-plugin to 3.10.0. Upgrade maven-resources-plugin to 3.2.0. Upgrade maven-source-plugin to 3.2.1.   Update codeStyle.xml to fix incompatibility on M1\u0026rsquo;s IntelliJ IDEA 2021.3.2. Update frontend-maven-plugin to 1.12 and npm to 16.14.0 for booster UI build. Improve CI with the GHA new feature \u0026ldquo;run failed jobs\u0026rdquo;. Fix ./mvnw compile not work if ./mvnw install is not executed at least once. Add JD_PRESERVE_LINE_FEEDS=true in official code style file. Upgrade OAP dependencies gson(2.9.0), guava(31.1), jackson(2.13.2), protobuf-java(3.18.4), commons-io(2.7), postgresql(42.3.3). Remove commons-pool and commons-dbcp from OAP dependencies(Not used before). Upgrade webapp dependencies gson(2.9.0), spring boot(2.6.6), jackson(2.13.2.2), spring cloud(2021.0.1), Apache httpclient(4.5.13).  OAP Server  Fix potential NPE in OAL string match and a bug when right-hand-side variable includes double quotes. Bump up Armeria version to 1.14.1 to fix CVE. Polish ETCD cluster config environment variables. Add the analysis of metrics in Satellite MetricsService. Fix Can't split endpoint id into 2 parts bug for endpoint ID. In the TCP in service mesh observability, endpoint name doesn\u0026rsquo;t exist in TCP traffic. Upgrade H2 version to 2.0.206 to fix CVE-2021-23463 and GHSA-h376-j262-vhq6. Extend column name override mechanism working for ValueColumnMetadata. Introduce new concept Layer and removed NodeType. More details refer to v9-version-upgrade. Fix query sort metrics failure in H2 Storage. Bump up grpc to 1.43.2 and protobuf to 3.19.2 to fix CVE-2021-22569. Add source layer and dest layer to relation. Follow protocol grammar fix GCPhrase -\u0026gt; GCPhase. Set layer to mesh relation. Add FAAS to SpanLayer. Adjust e2e case for V9 core. Support ZGC GC time and count metric collecting. Sync proto buffers files from upstream Envoy (Related to https://github.com/envoyproxy/envoy/pull/18955). Bump up GraphQL related dependencies to latest versions. Add normal to V9 service meta query. Support scope=ALL catalog for metrics. Bump up H2 to 2.1.210 to fix CVE-2022-23221. E2E: Add normal field to Service. Add FreeSql component ID(3017) of dotnet agent. E2E: verify OAP cluster model data aggregation. Fix SelfRemoteClient self observing metrics. Add env variables SW_CLUSTER_INTERNAL_COM_HOST and SW_CLUSTER_INTERNAL_COM_PORT for cluster selectors zookeeper ,consul,etcd and nacos. Doc update: configuration-vocabulary,backend-cluster about env variables SW_CLUSTER_INTERNAL_COM_HOST and SW_CLUSTER_INTERNAL_COM_PORT. Add Python MysqlClient component ID(7013) with mapping information. Support Java thread pool metrics analysis. Fix IoTDB Storage Option insert null index value. Set the default value of SW_STORAGE_IOTDB_SESSIONPOOL_SIZE to 8. Bump up iotdb-session to 0.12.4. Bump up PostgreSQL driver to fix CVE. Add Guava EventBus component ID(123) of Java agent. Add OpenFunction component ID(5013). Expose configuration responseTimeout of ES client. Support datasource metric analysis. [Breaking Change] Keep the endpoint avg resp time meter name the same with others scope. (This may break 3rd party integration and existing alarm rule settings) Add Python FastAPI component ID(7014). Support all metrics from MAL engine in alarm core, including Prometheus, OC receiver, meter receiver. Allow updating non-metrics templates when structure changed. Set default connection timeout of ElasticSearch to 3000 milliseconds. Support ElasticSearch 8 and add it into E2E tests. Disable indexing for field alarm_record.tags_raw_data of binary type in ElasticSearch storage. Fix Zipkin receiver wrong condition for decoding gzip. Add a new sampler (possibility) in LAL. Unify module name receiver_zipkin to receiver-zipkin, remove receiver_jaeger from application.yaml. Introduce the entity of Process type. Set the length of event#parameters to 2000. Limit the length of Event#parameters. Support large service/instance/networkAddressAlias list query by using ElasticSearch scrolling API, add metadataQueryBatchSize to configure scrolling page size. Change default value of metadataQueryMaxSize from 5000 to 10000 Replace deprecated Armeria API BasicToken.of with AuthToken.ofBasic. Implement v9 UI template management protocol. Implement process metadata query protocol. Expose more ElasticSearch health check related logs to help to diagnose Health check fails. reason: No healthy endpoint. Add source event generated metrics to SERVICE_CATALOG_NAME catalog. [Breaking Change] Deprecate All from OAL source. [Breaking Change] Remove SRC_ALL: 'All' from OAL grammar tree. Remove all_heatmap and all_percentile metrics. Fix ElasticSearch normal index couldn\u0026rsquo;t apply mapping and update. Enhance DataCarrier#MultipleChannelsConsumer to add priority for the channels, which makes OAP server has a better performance to activate all analyzers on default. Activate receiver-otel#enabledOcRules receiver with k8s-node,oap,vm rules on default. Activate satellite,spring-sleuth for agent-analyzer#meterAnalyzerActiveFiles on default. Activate receiver-zabbix receiver with agent rule on default. Replace HTTP server (GraphQL, agent HTTP protocol) from Jetty with Armeria. [Breaking Change] Remove configuration restAcceptorPriorityDelta (env var: SW_RECEIVER_SHARING_JETTY_DELTA , SW_CORE_REST_JETTY_DELTA). [Breaking Change] Remove configuration graphql/path (env var: SW_QUERY_GRAPHQL_PATH). Add storage column attribute indexOnly, support ElasticSearch only index and not store some fields. Add indexOnly=true to SegmentRecord.tags, AlarmRecord.tags, AbstractLogRecord.tags, to reduce unnecessary storage. [Breaking Change] Remove configuration restMinThreads (env var: SW_CORE_REST_JETTY_MIN_THREADS , SW_RECEIVER_SHARING_JETTY_MIN_THREADS). Refactor the core Builder mechanism, new storage plugin could implement their own converter and get rid of hard requirement of using HashMap to communicate between data object and database native structure. [Breaking Change] Break all existing 3rd-party storage extensions. Remove hard requirement of BASE64 encoding for binary field. Add complexity limitation for GraphQL query to avoid malicious query. Add Column.shardingKeyIdx for column definition for BanyanDB.  Sharding key is used to group time series data per metric of one entity in one place (same sharding and/or same row for column-oriented database). For example, ServiceA's traffic gauge, service call per minute, includes following timestamp values, then it should be sharded by service ID [ServiceA(encoded ID): 01-28 18:30 values-1, 01-28 18:31 values-2, 01-28 18:32 values-3, 01-28 18:32 values-4] BanyanDB is the 1st storage implementation supporting this. It would make continuous time series metrics stored closely and compressed better. NOTICE, this sharding concept is NOT just for splitting data into different database instances or physical files.  Support ElasticSearch template mappings properties parameters and _source update. Implement the eBPF profiling query and data collect protocol. [Breaking Change] Remove Deprecated responseCode from sources, including Service, ServiceInstance, Endpoint Enhance endpoint dependency analysis to support cross threads cases. Refactor span analysis code structures. Remove isNotNormal service requirement when use alias to merge service topology from client side. All RPCs' peer services from client side are always normal services. This cause the topology is not merged correctly. Fix event type of export data is incorrect, it was EventType.TOTAL always. Reduce redundancy ThreadLocal in MAL core. Improve MAL performance. Trim tag\u0026rsquo;s key and value in log query. Refactor IoTDB storage plugin, add IoTDBDataConverter and fix ModifyCollectionInEnhancedForLoop bug. Bump up iotdb-session to 0.12.5. Fix the configuration of Aggregation and GC Count metrics for oap self observability E2E: Add verify OAP eBPF Profiling. Let multiGet could query without tag value in the InfluxDB storage plugin. Adjust MAL for V9, remove some groups, add a new Service function for the custom delimiter. Add service catalog DatabaseSlowStatement. Add Error Prone Annotations dependency to suppress warnings, which are not errors.  UI  [Breaking Change] Introduce Booster UI, remove RocketBot UI. [Breaking Change] UI Templates have been redesigned totally. GraphQL query is minimal compatible for metadata and metrics query. Remove unused jars (log4j-api.jar) in classpath. Bump up netty version to fix CVE. Add Database Connection pool metric. Re-implement UI template initialization for Booster UI. Add environment variable SW_ENABLE_UPDATE_UI_TEMPLATE to control user edit UI template. Add the Self Observability template of the SkyWalking Satellite. Add the template of OpenFunction observability.  Documentation  Reconstruction doc menu for v9. Update backend-alarm.md doc, support op \u0026ldquo;=\u0026rdquo; to \u0026ldquo;==\u0026rdquo;. Update backend-meter.md doc . Add \u0026lt;STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System\u0026gt; paper. Add Academy menu for recommending articles. Remove All source relative document and examples. Update Booster UI\u0026rsquo;s dependency licenses. Add profiling doc, and remove service mesh intro doc(not necessary). Add a doc for virtual database. Rewrite UI introduction. Update k8s-monitoring, backend-telemetry and v9-version-upgrade doc for v9.  All issues and pull requests are here\n","excerpt":"9.0.0 Project  Upgrade log4j2 to 2.17.1 for CVE-2021-44228, CVE-2021-45046, CVE-2021-45105 and …","ref":"/docs/main/v9.1.0/en/changes/changes-9.0.0/","title":"9.0.0"},{"body":"9.0.0 Project  Upgrade log4j2 to 2.17.1 for CVE-2021-44228, CVE-2021-45046, CVE-2021-45105 and CVE-2021-44832. This CVE only effects on JDK if JNDI is opened in default. Notice, using JVM option -Dlog4j2.formatMsgNoLookups=true or setting the LOG4J_FORMAT_MSG_NO_LOOKUPS=”true” environment variable also avoids CVEs. Upgrade maven-wrapper to 3.1.0, maven to 3.8.4 for performance improvements and ARM more native support. Exclude unnecessary libs when building under JDK 9+. Migrate base Docker image to eclipse-temurin as adoptopenjdk is deprecated. Add E2E test under Java 17. Upgrade protoc to 3.19.2. Add Istio 1.13.1 to E2E test matrix for verification. Upgrade Apache parent pom version to 25. Use the plugin version defined by the Apache maven parent.  Upgrade maven-dependency-plugin to 3.2.0. Upgrade maven-assembly-plugin to 3.3.0. Upgrade maven-failsafe-plugin to 2.22.2. Upgrade maven-surefire-plugin to 2.22.2. Upgrade maven-jar-plugin to 3.2.2. Upgrade maven-enforcer-plugin to 3.0.0. Upgrade maven-compiler-plugin to 3.10.0. Upgrade maven-resources-plugin to 3.2.0. Upgrade maven-source-plugin to 3.2.1.   Update codeStyle.xml to fix incompatibility on M1\u0026rsquo;s IntelliJ IDEA 2021.3.2. Update frontend-maven-plugin to 1.12 and npm to 16.14.0 for booster UI build. Improve CI with the GHA new feature \u0026ldquo;run failed jobs\u0026rdquo;. Fix ./mvnw compile not work if ./mvnw install is not executed at least once. Add JD_PRESERVE_LINE_FEEDS=true in official code style file. Upgrade OAP dependencies gson(2.9.0), guava(31.1), jackson(2.13.2), protobuf-java(3.18.4), commons-io(2.7), postgresql(42.3.3). Remove commons-pool and commons-dbcp from OAP dependencies(Not used before). Upgrade webapp dependencies gson(2.9.0), spring boot(2.6.6), jackson(2.13.2.2), spring cloud(2021.0.1), Apache httpclient(4.5.13).  OAP Server  Fix potential NPE in OAL string match and a bug when right-hand-side variable includes double quotes. Bump up Armeria version to 1.14.1 to fix CVE. Polish ETCD cluster config environment variables. Add the analysis of metrics in Satellite MetricsService. Fix Can't split endpoint id into 2 parts bug for endpoint ID. In the TCP in service mesh observability, endpoint name doesn\u0026rsquo;t exist in TCP traffic. Upgrade H2 version to 2.0.206 to fix CVE-2021-23463 and GHSA-h376-j262-vhq6. Extend column name override mechanism working for ValueColumnMetadata. Introduce new concept Layer and removed NodeType. More details refer to v9-version-upgrade. Fix query sort metrics failure in H2 Storage. Bump up grpc to 1.43.2 and protobuf to 3.19.2 to fix CVE-2021-22569. Add source layer and dest layer to relation. Follow protocol grammar fix GCPhrase -\u0026gt; GCPhase. Set layer to mesh relation. Add FAAS to SpanLayer. Adjust e2e case for V9 core. Support ZGC GC time and count metric collecting. Sync proto buffers files from upstream Envoy (Related to https://github.com/envoyproxy/envoy/pull/18955). Bump up GraphQL related dependencies to latest versions. Add normal to V9 service meta query. Support scope=ALL catalog for metrics. Bump up H2 to 2.1.210 to fix CVE-2022-23221. E2E: Add normal field to Service. Add FreeSql component ID(3017) of dotnet agent. E2E: verify OAP cluster model data aggregation. Fix SelfRemoteClient self observing metrics. Add env variables SW_CLUSTER_INTERNAL_COM_HOST and SW_CLUSTER_INTERNAL_COM_PORT for cluster selectors zookeeper ,consul,etcd and nacos. Doc update: configuration-vocabulary,backend-cluster about env variables SW_CLUSTER_INTERNAL_COM_HOST and SW_CLUSTER_INTERNAL_COM_PORT. Add Python MysqlClient component ID(7013) with mapping information. Support Java thread pool metrics analysis. Fix IoTDB Storage Option insert null index value. Set the default value of SW_STORAGE_IOTDB_SESSIONPOOL_SIZE to 8. Bump up iotdb-session to 0.12.4. Bump up PostgreSQL driver to fix CVE. Add Guava EventBus component ID(123) of Java agent. Add OpenFunction component ID(5013). Expose configuration responseTimeout of ES client. Support datasource metric analysis. [Breaking Change] Keep the endpoint avg resp time meter name the same with others scope. (This may break 3rd party integration and existing alarm rule settings) Add Python FastAPI component ID(7014). Support all metrics from MAL engine in alarm core, including Prometheus, OC receiver, meter receiver. Allow updating non-metrics templates when structure changed. Set default connection timeout of ElasticSearch to 3000 milliseconds. Support ElasticSearch 8 and add it into E2E tests. Disable indexing for field alarm_record.tags_raw_data of binary type in ElasticSearch storage. Fix Zipkin receiver wrong condition for decoding gzip. Add a new sampler (possibility) in LAL. Unify module name receiver_zipkin to receiver-zipkin, remove receiver_jaeger from application.yaml. Introduce the entity of Process type. Set the length of event#parameters to 2000. Limit the length of Event#parameters. Support large service/instance/networkAddressAlias list query by using ElasticSearch scrolling API, add metadataQueryBatchSize to configure scrolling page size. Change default value of metadataQueryMaxSize from 5000 to 10000 Replace deprecated Armeria API BasicToken.of with AuthToken.ofBasic. Implement v9 UI template management protocol. Implement process metadata query protocol. Expose more ElasticSearch health check related logs to help to diagnose Health check fails. reason: No healthy endpoint. Add source event generated metrics to SERVICE_CATALOG_NAME catalog. [Breaking Change] Deprecate All from OAL source. [Breaking Change] Remove SRC_ALL: 'All' from OAL grammar tree. Remove all_heatmap and all_percentile metrics. Fix ElasticSearch normal index couldn\u0026rsquo;t apply mapping and update. Enhance DataCarrier#MultipleChannelsConsumer to add priority for the channels, which makes OAP server has a better performance to activate all analyzers on default. Activate receiver-otel#enabledOcRules receiver with k8s-node,oap,vm rules on default. Activate satellite,spring-sleuth for agent-analyzer#meterAnalyzerActiveFiles on default. Activate receiver-zabbix receiver with agent rule on default. Replace HTTP server (GraphQL, agent HTTP protocol) from Jetty with Armeria. [Breaking Change] Remove configuration restAcceptorPriorityDelta (env var: SW_RECEIVER_SHARING_JETTY_DELTA , SW_CORE_REST_JETTY_DELTA). [Breaking Change] Remove configuration graphql/path (env var: SW_QUERY_GRAPHQL_PATH). Add storage column attribute indexOnly, support ElasticSearch only index and not store some fields. Add indexOnly=true to SegmentRecord.tags, AlarmRecord.tags, AbstractLogRecord.tags, to reduce unnecessary storage. [Breaking Change] Remove configuration restMinThreads (env var: SW_CORE_REST_JETTY_MIN_THREADS , SW_RECEIVER_SHARING_JETTY_MIN_THREADS). Refactor the core Builder mechanism, new storage plugin could implement their own converter and get rid of hard requirement of using HashMap to communicate between data object and database native structure. [Breaking Change] Break all existing 3rd-party storage extensions. Remove hard requirement of BASE64 encoding for binary field. Add complexity limitation for GraphQL query to avoid malicious query. Add Column.shardingKeyIdx for column definition for BanyanDB.  Sharding key is used to group time series data per metric of one entity in one place (same sharding and/or same row for column-oriented database). For example, ServiceA's traffic gauge, service call per minute, includes following timestamp values, then it should be sharded by service ID [ServiceA(encoded ID): 01-28 18:30 values-1, 01-28 18:31 values-2, 01-28 18:32 values-3, 01-28 18:32 values-4] BanyanDB is the 1st storage implementation supporting this. It would make continuous time series metrics stored closely and compressed better. NOTICE, this sharding concept is NOT just for splitting data into different database instances or physical files.  Support ElasticSearch template mappings properties parameters and _source update. Implement the eBPF profiling query and data collect protocol. [Breaking Change] Remove Deprecated responseCode from sources, including Service, ServiceInstance, Endpoint Enhance endpoint dependency analysis to support cross threads cases. Refactor span analysis code structures. Remove isNotNormal service requirement when use alias to merge service topology from client side. All RPCs' peer services from client side are always normal services. This cause the topology is not merged correctly. Fix event type of export data is incorrect, it was EventType.TOTAL always. Reduce redundancy ThreadLocal in MAL core. Improve MAL performance. Trim tag\u0026rsquo;s key and value in log query. Refactor IoTDB storage plugin, add IoTDBDataConverter and fix ModifyCollectionInEnhancedForLoop bug. Bump up iotdb-session to 0.12.5. Fix the configuration of Aggregation and GC Count metrics for oap self observability E2E: Add verify OAP eBPF Profiling. Let multiGet could query without tag value in the InfluxDB storage plugin. Adjust MAL for V9, remove some groups, add a new Service function for the custom delimiter. Add service catalog DatabaseSlowStatement. Add Error Prone Annotations dependency to suppress warnings, which are not errors.  UI  [Breaking Change] Introduce Booster UI, remove RocketBot UI. [Breaking Change] UI Templates have been redesigned totally. GraphQL query is minimal compatible for metadata and metrics query. Remove unused jars (log4j-api.jar) in classpath. Bump up netty version to fix CVE. Add Database Connection pool metric. Re-implement UI template initialization for Booster UI. Add environment variable SW_ENABLE_UPDATE_UI_TEMPLATE to control user edit UI template. Add the Self Observability template of the SkyWalking Satellite. Add the template of OpenFunction observability.  Documentation  Reconstruction doc menu for v9. Update backend-alarm.md doc, support op \u0026ldquo;=\u0026rdquo; to \u0026ldquo;==\u0026rdquo;. Update backend-meter.md doc . Add \u0026lt;STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System\u0026gt; paper. Add Academy menu for recommending articles. Remove All source relative document and examples. Update Booster UI\u0026rsquo;s dependency licenses. Add profiling doc, and remove service mesh intro doc(not necessary). Add a doc for virtual database. Rewrite UI introduction. Update k8s-monitoring, backend-telemetry and v9-version-upgrade doc for v9.  All issues and pull requests are here\n","excerpt":"9.0.0 Project  Upgrade log4j2 to 2.17.1 for CVE-2021-44228, CVE-2021-45046, CVE-2021-45105 and …","ref":"/docs/main/v9.2.0/en/changes/changes-9.0.0/","title":"9.0.0"},{"body":"9.0.0 Project  Upgrade log4j2 to 2.17.1 for CVE-2021-44228, CVE-2021-45046, CVE-2021-45105 and CVE-2021-44832. This CVE only effects on JDK if JNDI is opened in default. Notice, using JVM option -Dlog4j2.formatMsgNoLookups=true or setting the LOG4J_FORMAT_MSG_NO_LOOKUPS=”true” environment variable also avoids CVEs. Upgrade maven-wrapper to 3.1.0, maven to 3.8.4 for performance improvements and ARM more native support. Exclude unnecessary libs when building under JDK 9+. Migrate base Docker image to eclipse-temurin as adoptopenjdk is deprecated. Add E2E test under Java 17. Upgrade protoc to 3.19.2. Add Istio 1.13.1 to E2E test matrix for verification. Upgrade Apache parent pom version to 25. Use the plugin version defined by the Apache maven parent.  Upgrade maven-dependency-plugin to 3.2.0. Upgrade maven-assembly-plugin to 3.3.0. Upgrade maven-failsafe-plugin to 2.22.2. Upgrade maven-surefire-plugin to 2.22.2. Upgrade maven-jar-plugin to 3.2.2. Upgrade maven-enforcer-plugin to 3.0.0. Upgrade maven-compiler-plugin to 3.10.0. Upgrade maven-resources-plugin to 3.2.0. Upgrade maven-source-plugin to 3.2.1.   Update codeStyle.xml to fix incompatibility on M1\u0026rsquo;s IntelliJ IDEA 2021.3.2. Update frontend-maven-plugin to 1.12 and npm to 16.14.0 for booster UI build. Improve CI with the GHA new feature \u0026ldquo;run failed jobs\u0026rdquo;. Fix ./mvnw compile not work if ./mvnw install is not executed at least once. Add JD_PRESERVE_LINE_FEEDS=true in official code style file. Upgrade OAP dependencies gson(2.9.0), guava(31.1), jackson(2.13.2), protobuf-java(3.18.4), commons-io(2.7), postgresql(42.3.3). Remove commons-pool and commons-dbcp from OAP dependencies(Not used before). Upgrade webapp dependencies gson(2.9.0), spring boot(2.6.6), jackson(2.13.2.2), spring cloud(2021.0.1), Apache httpclient(4.5.13).  OAP Server  Fix potential NPE in OAL string match and a bug when right-hand-side variable includes double quotes. Bump up Armeria version to 1.14.1 to fix CVE. Polish ETCD cluster config environment variables. Add the analysis of metrics in Satellite MetricsService. Fix Can't split endpoint id into 2 parts bug for endpoint ID. In the TCP in service mesh observability, endpoint name doesn\u0026rsquo;t exist in TCP traffic. Upgrade H2 version to 2.0.206 to fix CVE-2021-23463 and GHSA-h376-j262-vhq6. Extend column name override mechanism working for ValueColumnMetadata. Introduce new concept Layer and removed NodeType. More details refer to v9-version-upgrade. Fix query sort metrics failure in H2 Storage. Bump up grpc to 1.43.2 and protobuf to 3.19.2 to fix CVE-2021-22569. Add source layer and dest layer to relation. Follow protocol grammar fix GCPhrase -\u0026gt; GCPhase. Set layer to mesh relation. Add FAAS to SpanLayer. Adjust e2e case for V9 core. Support ZGC GC time and count metric collecting. Sync proto buffers files from upstream Envoy (Related to https://github.com/envoyproxy/envoy/pull/18955). Bump up GraphQL related dependencies to latest versions. Add normal to V9 service meta query. Support scope=ALL catalog for metrics. Bump up H2 to 2.1.210 to fix CVE-2022-23221. E2E: Add normal field to Service. Add FreeSql component ID(3017) of dotnet agent. E2E: verify OAP cluster model data aggregation. Fix SelfRemoteClient self observing metrics. Add env variables SW_CLUSTER_INTERNAL_COM_HOST and SW_CLUSTER_INTERNAL_COM_PORT for cluster selectors zookeeper ,consul,etcd and nacos. Doc update: configuration-vocabulary,backend-cluster about env variables SW_CLUSTER_INTERNAL_COM_HOST and SW_CLUSTER_INTERNAL_COM_PORT. Add Python MysqlClient component ID(7013) with mapping information. Support Java thread pool metrics analysis. Fix IoTDB Storage Option insert null index value. Set the default value of SW_STORAGE_IOTDB_SESSIONPOOL_SIZE to 8. Bump up iotdb-session to 0.12.4. Bump up PostgreSQL driver to fix CVE. Add Guava EventBus component ID(123) of Java agent. Add OpenFunction component ID(5013). Expose configuration responseTimeout of ES client. Support datasource metric analysis. [Breaking Change] Keep the endpoint avg resp time meter name the same with others scope. (This may break 3rd party integration and existing alarm rule settings) Add Python FastAPI component ID(7014). Support all metrics from MAL engine in alarm core, including Prometheus, OC receiver, meter receiver. Allow updating non-metrics templates when structure changed. Set default connection timeout of ElasticSearch to 3000 milliseconds. Support ElasticSearch 8 and add it into E2E tests. Disable indexing for field alarm_record.tags_raw_data of binary type in ElasticSearch storage. Fix Zipkin receiver wrong condition for decoding gzip. Add a new sampler (possibility) in LAL. Unify module name receiver_zipkin to receiver-zipkin, remove receiver_jaeger from application.yaml. Introduce the entity of Process type. Set the length of event#parameters to 2000. Limit the length of Event#parameters. Support large service/instance/networkAddressAlias list query by using ElasticSearch scrolling API, add metadataQueryBatchSize to configure scrolling page size. Change default value of metadataQueryMaxSize from 5000 to 10000 Replace deprecated Armeria API BasicToken.of with AuthToken.ofBasic. Implement v9 UI template management protocol. Implement process metadata query protocol. Expose more ElasticSearch health check related logs to help to diagnose Health check fails. reason: No healthy endpoint. Add source event generated metrics to SERVICE_CATALOG_NAME catalog. [Breaking Change] Deprecate All from OAL source. [Breaking Change] Remove SRC_ALL: 'All' from OAL grammar tree. Remove all_heatmap and all_percentile metrics. Fix ElasticSearch normal index couldn\u0026rsquo;t apply mapping and update. Enhance DataCarrier#MultipleChannelsConsumer to add priority for the channels, which makes OAP server has a better performance to activate all analyzers on default. Activate receiver-otel#enabledOcRules receiver with k8s-node,oap,vm rules on default. Activate satellite,spring-sleuth for agent-analyzer#meterAnalyzerActiveFiles on default. Activate receiver-zabbix receiver with agent rule on default. Replace HTTP server (GraphQL, agent HTTP protocol) from Jetty with Armeria. [Breaking Change] Remove configuration restAcceptorPriorityDelta (env var: SW_RECEIVER_SHARING_JETTY_DELTA , SW_CORE_REST_JETTY_DELTA). [Breaking Change] Remove configuration graphql/path (env var: SW_QUERY_GRAPHQL_PATH). Add storage column attribute indexOnly, support ElasticSearch only index and not store some fields. Add indexOnly=true to SegmentRecord.tags, AlarmRecord.tags, AbstractLogRecord.tags, to reduce unnecessary storage. [Breaking Change] Remove configuration restMinThreads (env var: SW_CORE_REST_JETTY_MIN_THREADS , SW_RECEIVER_SHARING_JETTY_MIN_THREADS). Refactor the core Builder mechanism, new storage plugin could implement their own converter and get rid of hard requirement of using HashMap to communicate between data object and database native structure. [Breaking Change] Break all existing 3rd-party storage extensions. Remove hard requirement of BASE64 encoding for binary field. Add complexity limitation for GraphQL query to avoid malicious query. Add Column.shardingKeyIdx for column definition for BanyanDB.  Sharding key is used to group time series data per metric of one entity in one place (same sharding and/or same row for column-oriented database). For example, ServiceA's traffic gauge, service call per minute, includes following timestamp values, then it should be sharded by service ID [ServiceA(encoded ID): 01-28 18:30 values-1, 01-28 18:31 values-2, 01-28 18:32 values-3, 01-28 18:32 values-4] BanyanDB is the 1st storage implementation supporting this. It would make continuous time series metrics stored closely and compressed better. NOTICE, this sharding concept is NOT just for splitting data into different database instances or physical files.  Support ElasticSearch template mappings properties parameters and _source update. Implement the eBPF profiling query and data collect protocol. [Breaking Change] Remove Deprecated responseCode from sources, including Service, ServiceInstance, Endpoint Enhance endpoint dependency analysis to support cross threads cases. Refactor span analysis code structures. Remove isNotNormal service requirement when use alias to merge service topology from client side. All RPCs' peer services from client side are always normal services. This cause the topology is not merged correctly. Fix event type of export data is incorrect, it was EventType.TOTAL always. Reduce redundancy ThreadLocal in MAL core. Improve MAL performance. Trim tag\u0026rsquo;s key and value in log query. Refactor IoTDB storage plugin, add IoTDBDataConverter and fix ModifyCollectionInEnhancedForLoop bug. Bump up iotdb-session to 0.12.5. Fix the configuration of Aggregation and GC Count metrics for oap self observability E2E: Add verify OAP eBPF Profiling. Let multiGet could query without tag value in the InfluxDB storage plugin. Adjust MAL for V9, remove some groups, add a new Service function for the custom delimiter. Add service catalog DatabaseSlowStatement. Add Error Prone Annotations dependency to suppress warnings, which are not errors.  UI  [Breaking Change] Introduce Booster UI, remove RocketBot UI. [Breaking Change] UI Templates have been redesigned totally. GraphQL query is minimal compatible for metadata and metrics query. Remove unused jars (log4j-api.jar) in classpath. Bump up netty version to fix CVE. Add Database Connection pool metric. Re-implement UI template initialization for Booster UI. Add environment variable SW_ENABLE_UPDATE_UI_TEMPLATE to control user edit UI template. Add the Self Observability template of the SkyWalking Satellite. Add the template of OpenFunction observability.  Documentation  Reconstruction doc menu for v9. Update backend-alarm.md doc, support op \u0026ldquo;=\u0026rdquo; to \u0026ldquo;==\u0026rdquo;. Update backend-meter.md doc . Add \u0026lt;STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System\u0026gt; paper. Add Academy menu for recommending articles. Remove All source relative document and examples. Update Booster UI\u0026rsquo;s dependency licenses. Add profiling doc, and remove service mesh intro doc(not necessary). Add a doc for virtual database. Rewrite UI introduction. Update k8s-monitoring, backend-telemetry and v9-version-upgrade doc for v9.  All issues and pull requests are here\n","excerpt":"9.0.0 Project  Upgrade log4j2 to 2.17.1 for CVE-2021-44228, CVE-2021-45046, CVE-2021-45105 and …","ref":"/docs/main/v9.3.0/en/changes/changes-9.0.0/","title":"9.0.0"},{"body":"9.0.0 Project  Upgrade log4j2 to 2.17.1 for CVE-2021-44228, CVE-2021-45046, CVE-2021-45105 and CVE-2021-44832. This CVE only effects on JDK if JNDI is opened in default. Notice, using JVM option -Dlog4j2.formatMsgNoLookups=true or setting the LOG4J_FORMAT_MSG_NO_LOOKUPS=”true” environment variable also avoids CVEs. Upgrade maven-wrapper to 3.1.0, maven to 3.8.4 for performance improvements and ARM more native support. Exclude unnecessary libs when building under JDK 9+. Migrate base Docker image to eclipse-temurin as adoptopenjdk is deprecated. Add E2E test under Java 17. Upgrade protoc to 3.19.2. Add Istio 1.13.1 to E2E test matrix for verification. Upgrade Apache parent pom version to 25. Use the plugin version defined by the Apache maven parent.  Upgrade maven-dependency-plugin to 3.2.0. Upgrade maven-assembly-plugin to 3.3.0. Upgrade maven-failsafe-plugin to 2.22.2. Upgrade maven-surefire-plugin to 2.22.2. Upgrade maven-jar-plugin to 3.2.2. Upgrade maven-enforcer-plugin to 3.0.0. Upgrade maven-compiler-plugin to 3.10.0. Upgrade maven-resources-plugin to 3.2.0. Upgrade maven-source-plugin to 3.2.1.   Update codeStyle.xml to fix incompatibility on M1\u0026rsquo;s IntelliJ IDEA 2021.3.2. Update frontend-maven-plugin to 1.12 and npm to 16.14.0 for booster UI build. Improve CI with the GHA new feature \u0026ldquo;run failed jobs\u0026rdquo;. Fix ./mvnw compile not work if ./mvnw install is not executed at least once. Add JD_PRESERVE_LINE_FEEDS=true in official code style file. Upgrade OAP dependencies gson(2.9.0), guava(31.1), jackson(2.13.2), protobuf-java(3.18.4), commons-io(2.7), postgresql(42.3.3). Remove commons-pool and commons-dbcp from OAP dependencies(Not used before). Upgrade webapp dependencies gson(2.9.0), spring boot(2.6.6), jackson(2.13.2.2), spring cloud(2021.0.1), Apache httpclient(4.5.13).  OAP Server  Fix potential NPE in OAL string match and a bug when right-hand-side variable includes double quotes. Bump up Armeria version to 1.14.1 to fix CVE. Polish ETCD cluster config environment variables. Add the analysis of metrics in Satellite MetricsService. Fix Can't split endpoint id into 2 parts bug for endpoint ID. In the TCP in service mesh observability, endpoint name doesn\u0026rsquo;t exist in TCP traffic. Upgrade H2 version to 2.0.206 to fix CVE-2021-23463 and GHSA-h376-j262-vhq6. Extend column name override mechanism working for ValueColumnMetadata. Introduce new concept Layer and removed NodeType. More details refer to v9-version-upgrade. Fix query sort metrics failure in H2 Storage. Bump up grpc to 1.43.2 and protobuf to 3.19.2 to fix CVE-2021-22569. Add source layer and dest layer to relation. Follow protocol grammar fix GCPhrase -\u0026gt; GCPhase. Set layer to mesh relation. Add FAAS to SpanLayer. Adjust e2e case for V9 core. Support ZGC GC time and count metric collecting. Sync proto buffers files from upstream Envoy (Related to https://github.com/envoyproxy/envoy/pull/18955). Bump up GraphQL related dependencies to latest versions. Add normal to V9 service meta query. Support scope=ALL catalog for metrics. Bump up H2 to 2.1.210 to fix CVE-2022-23221. E2E: Add normal field to Service. Add FreeSql component ID(3017) of dotnet agent. E2E: verify OAP cluster model data aggregation. Fix SelfRemoteClient self observing metrics. Add env variables SW_CLUSTER_INTERNAL_COM_HOST and SW_CLUSTER_INTERNAL_COM_PORT for cluster selectors zookeeper ,consul,etcd and nacos. Doc update: configuration-vocabulary,backend-cluster about env variables SW_CLUSTER_INTERNAL_COM_HOST and SW_CLUSTER_INTERNAL_COM_PORT. Add Python MysqlClient component ID(7013) with mapping information. Support Java thread pool metrics analysis. Fix IoTDB Storage Option insert null index value. Set the default value of SW_STORAGE_IOTDB_SESSIONPOOL_SIZE to 8. Bump up iotdb-session to 0.12.4. Bump up PostgreSQL driver to fix CVE. Add Guava EventBus component ID(123) of Java agent. Add OpenFunction component ID(5013). Expose configuration responseTimeout of ES client. Support datasource metric analysis. [Breaking Change] Keep the endpoint avg resp time meter name the same with others scope. (This may break 3rd party integration and existing alarm rule settings) Add Python FastAPI component ID(7014). Support all metrics from MAL engine in alarm core, including Prometheus, OC receiver, meter receiver. Allow updating non-metrics templates when structure changed. Set default connection timeout of ElasticSearch to 3000 milliseconds. Support ElasticSearch 8 and add it into E2E tests. Disable indexing for field alarm_record.tags_raw_data of binary type in ElasticSearch storage. Fix Zipkin receiver wrong condition for decoding gzip. Add a new sampler (possibility) in LAL. Unify module name receiver_zipkin to receiver-zipkin, remove receiver_jaeger from application.yaml. Introduce the entity of Process type. Set the length of event#parameters to 2000. Limit the length of Event#parameters. Support large service/instance/networkAddressAlias list query by using ElasticSearch scrolling API, add metadataQueryBatchSize to configure scrolling page size. Change default value of metadataQueryMaxSize from 5000 to 10000 Replace deprecated Armeria API BasicToken.of with AuthToken.ofBasic. Implement v9 UI template management protocol. Implement process metadata query protocol. Expose more ElasticSearch health check related logs to help to diagnose Health check fails. reason: No healthy endpoint. Add source event generated metrics to SERVICE_CATALOG_NAME catalog. [Breaking Change] Deprecate All from OAL source. [Breaking Change] Remove SRC_ALL: 'All' from OAL grammar tree. Remove all_heatmap and all_percentile metrics. Fix ElasticSearch normal index couldn\u0026rsquo;t apply mapping and update. Enhance DataCarrier#MultipleChannelsConsumer to add priority for the channels, which makes OAP server has a better performance to activate all analyzers on default. Activate receiver-otel#enabledOcRules receiver with k8s-node,oap,vm rules on default. Activate satellite,spring-sleuth for agent-analyzer#meterAnalyzerActiveFiles on default. Activate receiver-zabbix receiver with agent rule on default. Replace HTTP server (GraphQL, agent HTTP protocol) from Jetty with Armeria. [Breaking Change] Remove configuration restAcceptorPriorityDelta (env var: SW_RECEIVER_SHARING_JETTY_DELTA , SW_CORE_REST_JETTY_DELTA). [Breaking Change] Remove configuration graphql/path (env var: SW_QUERY_GRAPHQL_PATH). Add storage column attribute indexOnly, support ElasticSearch only index and not store some fields. Add indexOnly=true to SegmentRecord.tags, AlarmRecord.tags, AbstractLogRecord.tags, to reduce unnecessary storage. [Breaking Change] Remove configuration restMinThreads (env var: SW_CORE_REST_JETTY_MIN_THREADS , SW_RECEIVER_SHARING_JETTY_MIN_THREADS). Refactor the core Builder mechanism, new storage plugin could implement their own converter and get rid of hard requirement of using HashMap to communicate between data object and database native structure. [Breaking Change] Break all existing 3rd-party storage extensions. Remove hard requirement of BASE64 encoding for binary field. Add complexity limitation for GraphQL query to avoid malicious query. Add Column.shardingKeyIdx for column definition for BanyanDB.  Sharding key is used to group time series data per metric of one entity in one place (same sharding and/or same row for column-oriented database). For example, ServiceA's traffic gauge, service call per minute, includes following timestamp values, then it should be sharded by service ID [ServiceA(encoded ID): 01-28 18:30 values-1, 01-28 18:31 values-2, 01-28 18:32 values-3, 01-28 18:32 values-4] BanyanDB is the 1st storage implementation supporting this. It would make continuous time series metrics stored closely and compressed better. NOTICE, this sharding concept is NOT just for splitting data into different database instances or physical files.  Support ElasticSearch template mappings properties parameters and _source update. Implement the eBPF profiling query and data collect protocol. [Breaking Change] Remove Deprecated responseCode from sources, including Service, ServiceInstance, Endpoint Enhance endpoint dependency analysis to support cross threads cases. Refactor span analysis code structures. Remove isNotNormal service requirement when use alias to merge service topology from client side. All RPCs' peer services from client side are always normal services. This cause the topology is not merged correctly. Fix event type of export data is incorrect, it was EventType.TOTAL always. Reduce redundancy ThreadLocal in MAL core. Improve MAL performance. Trim tag\u0026rsquo;s key and value in log query. Refactor IoTDB storage plugin, add IoTDBDataConverter and fix ModifyCollectionInEnhancedForLoop bug. Bump up iotdb-session to 0.12.5. Fix the configuration of Aggregation and GC Count metrics for oap self observability E2E: Add verify OAP eBPF Profiling. Let multiGet could query without tag value in the InfluxDB storage plugin. Adjust MAL for V9, remove some groups, add a new Service function for the custom delimiter. Add service catalog DatabaseSlowStatement. Add Error Prone Annotations dependency to suppress warnings, which are not errors.  UI  [Breaking Change] Introduce Booster UI, remove RocketBot UI. [Breaking Change] UI Templates have been redesigned totally. GraphQL query is minimal compatible for metadata and metrics query. Remove unused jars (log4j-api.jar) in classpath. Bump up netty version to fix CVE. Add Database Connection pool metric. Re-implement UI template initialization for Booster UI. Add environment variable SW_ENABLE_UPDATE_UI_TEMPLATE to control user edit UI template. Add the Self Observability template of the SkyWalking Satellite. Add the template of OpenFunction observability.  Documentation  Reconstruction doc menu for v9. Update backend-alarm.md doc, support op \u0026ldquo;=\u0026rdquo; to \u0026ldquo;==\u0026rdquo;. Update backend-meter.md doc . Add \u0026lt;STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System\u0026gt; paper. Add Academy menu for recommending articles. Remove All source relative document and examples. Update Booster UI\u0026rsquo;s dependency licenses. Add profiling doc, and remove service mesh intro doc(not necessary). Add a doc for virtual database. Rewrite UI introduction. Update k8s-monitoring, backend-telemetry and v9-version-upgrade doc for v9.  All issues and pull requests are here\n","excerpt":"9.0.0 Project  Upgrade log4j2 to 2.17.1 for CVE-2021-44228, CVE-2021-45046, CVE-2021-45105 and …","ref":"/docs/main/v9.4.0/en/changes/changes-9.0.0/","title":"9.0.0"},{"body":"9.0.0 Project  Upgrade log4j2 to 2.17.1 for CVE-2021-44228, CVE-2021-45046, CVE-2021-45105 and CVE-2021-44832. This CVE only effects on JDK if JNDI is opened in default. Notice, using JVM option -Dlog4j2.formatMsgNoLookups=true or setting the LOG4J_FORMAT_MSG_NO_LOOKUPS=”true” environment variable also avoids CVEs. Upgrade maven-wrapper to 3.1.0, maven to 3.8.4 for performance improvements and ARM more native support. Exclude unnecessary libs when building under JDK 9+. Migrate base Docker image to eclipse-temurin as adoptopenjdk is deprecated. Add E2E test under Java 17. Upgrade protoc to 3.19.2. Add Istio 1.13.1 to E2E test matrix for verification. Upgrade Apache parent pom version to 25. Use the plugin version defined by the Apache maven parent.  Upgrade maven-dependency-plugin to 3.2.0. Upgrade maven-assembly-plugin to 3.3.0. Upgrade maven-failsafe-plugin to 2.22.2. Upgrade maven-surefire-plugin to 2.22.2. Upgrade maven-jar-plugin to 3.2.2. Upgrade maven-enforcer-plugin to 3.0.0. Upgrade maven-compiler-plugin to 3.10.0. Upgrade maven-resources-plugin to 3.2.0. Upgrade maven-source-plugin to 3.2.1.   Update codeStyle.xml to fix incompatibility on M1\u0026rsquo;s IntelliJ IDEA 2021.3.2. Update frontend-maven-plugin to 1.12 and npm to 16.14.0 for booster UI build. Improve CI with the GHA new feature \u0026ldquo;run failed jobs\u0026rdquo;. Fix ./mvnw compile not work if ./mvnw install is not executed at least once. Add JD_PRESERVE_LINE_FEEDS=true in official code style file. Upgrade OAP dependencies gson(2.9.0), guava(31.1), jackson(2.13.2), protobuf-java(3.18.4), commons-io(2.7), postgresql(42.3.3). Remove commons-pool and commons-dbcp from OAP dependencies(Not used before). Upgrade webapp dependencies gson(2.9.0), spring boot(2.6.6), jackson(2.13.2.2), spring cloud(2021.0.1), Apache httpclient(4.5.13).  OAP Server  Fix potential NPE in OAL string match and a bug when right-hand-side variable includes double quotes. Bump up Armeria version to 1.14.1 to fix CVE. Polish ETCD cluster config environment variables. Add the analysis of metrics in Satellite MetricsService. Fix Can't split endpoint id into 2 parts bug for endpoint ID. In the TCP in service mesh observability, endpoint name doesn\u0026rsquo;t exist in TCP traffic. Upgrade H2 version to 2.0.206 to fix CVE-2021-23463 and GHSA-h376-j262-vhq6. Extend column name override mechanism working for ValueColumnMetadata. Introduce new concept Layer and removed NodeType. More details refer to v9-version-upgrade. Fix query sort metrics failure in H2 Storage. Bump up grpc to 1.43.2 and protobuf to 3.19.2 to fix CVE-2021-22569. Add source layer and dest layer to relation. Follow protocol grammar fix GCPhrase -\u0026gt; GCPhase. Set layer to mesh relation. Add FAAS to SpanLayer. Adjust e2e case for V9 core. Support ZGC GC time and count metric collecting. Sync proto buffers files from upstream Envoy (Related to https://github.com/envoyproxy/envoy/pull/18955). Bump up GraphQL related dependencies to latest versions. Add normal to V9 service meta query. Support scope=ALL catalog for metrics. Bump up H2 to 2.1.210 to fix CVE-2022-23221. E2E: Add normal field to Service. Add FreeSql component ID(3017) of dotnet agent. E2E: verify OAP cluster model data aggregation. Fix SelfRemoteClient self observing metrics. Add env variables SW_CLUSTER_INTERNAL_COM_HOST and SW_CLUSTER_INTERNAL_COM_PORT for cluster selectors zookeeper ,consul,etcd and nacos. Doc update: configuration-vocabulary,backend-cluster about env variables SW_CLUSTER_INTERNAL_COM_HOST and SW_CLUSTER_INTERNAL_COM_PORT. Add Python MysqlClient component ID(7013) with mapping information. Support Java thread pool metrics analysis. Fix IoTDB Storage Option insert null index value. Set the default value of SW_STORAGE_IOTDB_SESSIONPOOL_SIZE to 8. Bump up iotdb-session to 0.12.4. Bump up PostgreSQL driver to fix CVE. Add Guava EventBus component ID(123) of Java agent. Add OpenFunction component ID(5013). Expose configuration responseTimeout of ES client. Support datasource metric analysis. [Breaking Change] Keep the endpoint avg resp time meter name the same with others scope. (This may break 3rd party integration and existing alarm rule settings) Add Python FastAPI component ID(7014). Support all metrics from MAL engine in alarm core, including Prometheus, OC receiver, meter receiver. Allow updating non-metrics templates when structure changed. Set default connection timeout of ElasticSearch to 3000 milliseconds. Support ElasticSearch 8 and add it into E2E tests. Disable indexing for field alarm_record.tags_raw_data of binary type in ElasticSearch storage. Fix Zipkin receiver wrong condition for decoding gzip. Add a new sampler (possibility) in LAL. Unify module name receiver_zipkin to receiver-zipkin, remove receiver_jaeger from application.yaml. Introduce the entity of Process type. Set the length of event#parameters to 2000. Limit the length of Event#parameters. Support large service/instance/networkAddressAlias list query by using ElasticSearch scrolling API, add metadataQueryBatchSize to configure scrolling page size. Change default value of metadataQueryMaxSize from 5000 to 10000 Replace deprecated Armeria API BasicToken.of with AuthToken.ofBasic. Implement v9 UI template management protocol. Implement process metadata query protocol. Expose more ElasticSearch health check related logs to help to diagnose Health check fails. reason: No healthy endpoint. Add source event generated metrics to SERVICE_CATALOG_NAME catalog. [Breaking Change] Deprecate All from OAL source. [Breaking Change] Remove SRC_ALL: 'All' from OAL grammar tree. Remove all_heatmap and all_percentile metrics. Fix ElasticSearch normal index couldn\u0026rsquo;t apply mapping and update. Enhance DataCarrier#MultipleChannelsConsumer to add priority for the channels, which makes OAP server has a better performance to activate all analyzers on default. Activate receiver-otel#enabledOcRules receiver with k8s-node,oap,vm rules on default. Activate satellite,spring-sleuth for agent-analyzer#meterAnalyzerActiveFiles on default. Activate receiver-zabbix receiver with agent rule on default. Replace HTTP server (GraphQL, agent HTTP protocol) from Jetty with Armeria. [Breaking Change] Remove configuration restAcceptorPriorityDelta (env var: SW_RECEIVER_SHARING_JETTY_DELTA , SW_CORE_REST_JETTY_DELTA). [Breaking Change] Remove configuration graphql/path (env var: SW_QUERY_GRAPHQL_PATH). Add storage column attribute indexOnly, support ElasticSearch only index and not store some fields. Add indexOnly=true to SegmentRecord.tags, AlarmRecord.tags, AbstractLogRecord.tags, to reduce unnecessary storage. [Breaking Change] Remove configuration restMinThreads (env var: SW_CORE_REST_JETTY_MIN_THREADS , SW_RECEIVER_SHARING_JETTY_MIN_THREADS). Refactor the core Builder mechanism, new storage plugin could implement their own converter and get rid of hard requirement of using HashMap to communicate between data object and database native structure. [Breaking Change] Break all existing 3rd-party storage extensions. Remove hard requirement of BASE64 encoding for binary field. Add complexity limitation for GraphQL query to avoid malicious query. Add Column.shardingKeyIdx for column definition for BanyanDB.  Sharding key is used to group time series data per metric of one entity in one place (same sharding and/or same row for column-oriented database). For example, ServiceA's traffic gauge, service call per minute, includes following timestamp values, then it should be sharded by service ID [ServiceA(encoded ID): 01-28 18:30 values-1, 01-28 18:31 values-2, 01-28 18:32 values-3, 01-28 18:32 values-4] BanyanDB is the 1st storage implementation supporting this. It would make continuous time series metrics stored closely and compressed better. NOTICE, this sharding concept is NOT just for splitting data into different database instances or physical files.  Support ElasticSearch template mappings properties parameters and _source update. Implement the eBPF profiling query and data collect protocol. [Breaking Change] Remove Deprecated responseCode from sources, including Service, ServiceInstance, Endpoint Enhance endpoint dependency analysis to support cross threads cases. Refactor span analysis code structures. Remove isNotNormal service requirement when use alias to merge service topology from client side. All RPCs' peer services from client side are always normal services. This cause the topology is not merged correctly. Fix event type of export data is incorrect, it was EventType.TOTAL always. Reduce redundancy ThreadLocal in MAL core. Improve MAL performance. Trim tag\u0026rsquo;s key and value in log query. Refactor IoTDB storage plugin, add IoTDBDataConverter and fix ModifyCollectionInEnhancedForLoop bug. Bump up iotdb-session to 0.12.5. Fix the configuration of Aggregation and GC Count metrics for oap self observability E2E: Add verify OAP eBPF Profiling. Let multiGet could query without tag value in the InfluxDB storage plugin. Adjust MAL for V9, remove some groups, add a new Service function for the custom delimiter. Add service catalog DatabaseSlowStatement. Add Error Prone Annotations dependency to suppress warnings, which are not errors.  UI  [Breaking Change] Introduce Booster UI, remove RocketBot UI. [Breaking Change] UI Templates have been redesigned totally. GraphQL query is minimal compatible for metadata and metrics query. Remove unused jars (log4j-api.jar) in classpath. Bump up netty version to fix CVE. Add Database Connection pool metric. Re-implement UI template initialization for Booster UI. Add environment variable SW_ENABLE_UPDATE_UI_TEMPLATE to control user edit UI template. Add the Self Observability template of the SkyWalking Satellite. Add the template of OpenFunction observability.  Documentation  Reconstruction doc menu for v9. Update backend-alarm.md doc, support op \u0026ldquo;=\u0026rdquo; to \u0026ldquo;==\u0026rdquo;. Update backend-meter.md doc . Add \u0026lt;STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System\u0026gt; paper. Add Academy menu for recommending articles. Remove All source relative document and examples. Update Booster UI\u0026rsquo;s dependency licenses. Add profiling doc, and remove service mesh intro doc(not necessary). Add a doc for virtual database. Rewrite UI introduction. Update k8s-monitoring, backend-telemetry and v9-version-upgrade doc for v9.  All issues and pull requests are here\n","excerpt":"9.0.0 Project  Upgrade log4j2 to 2.17.1 for CVE-2021-44228, CVE-2021-45046, CVE-2021-45105 and …","ref":"/docs/main/v9.5.0/en/changes/changes-9.0.0/","title":"9.0.0"},{"body":"9.0.0 Project  Upgrade log4j2 to 2.17.1 for CVE-2021-44228, CVE-2021-45046, CVE-2021-45105 and CVE-2021-44832. This CVE only effects on JDK if JNDI is opened in default. Notice, using JVM option -Dlog4j2.formatMsgNoLookups=true or setting the LOG4J_FORMAT_MSG_NO_LOOKUPS=”true” environment variable also avoids CVEs. Upgrade maven-wrapper to 3.1.0, maven to 3.8.4 for performance improvements and ARM more native support. Exclude unnecessary libs when building under JDK 9+. Migrate base Docker image to eclipse-temurin as adoptopenjdk is deprecated. Add E2E test under Java 17. Upgrade protoc to 3.19.2. Add Istio 1.13.1 to E2E test matrix for verification. Upgrade Apache parent pom version to 25. Use the plugin version defined by the Apache maven parent.  Upgrade maven-dependency-plugin to 3.2.0. Upgrade maven-assembly-plugin to 3.3.0. Upgrade maven-failsafe-plugin to 2.22.2. Upgrade maven-surefire-plugin to 2.22.2. Upgrade maven-jar-plugin to 3.2.2. Upgrade maven-enforcer-plugin to 3.0.0. Upgrade maven-compiler-plugin to 3.10.0. Upgrade maven-resources-plugin to 3.2.0. Upgrade maven-source-plugin to 3.2.1.   Update codeStyle.xml to fix incompatibility on M1\u0026rsquo;s IntelliJ IDEA 2021.3.2. Update frontend-maven-plugin to 1.12 and npm to 16.14.0 for booster UI build. Improve CI with the GHA new feature \u0026ldquo;run failed jobs\u0026rdquo;. Fix ./mvnw compile not work if ./mvnw install is not executed at least once. Add JD_PRESERVE_LINE_FEEDS=true in official code style file. Upgrade OAP dependencies gson(2.9.0), guava(31.1), jackson(2.13.2), protobuf-java(3.18.4), commons-io(2.7), postgresql(42.3.3). Remove commons-pool and commons-dbcp from OAP dependencies(Not used before). Upgrade webapp dependencies gson(2.9.0), spring boot(2.6.6), jackson(2.13.2.2), spring cloud(2021.0.1), Apache httpclient(4.5.13).  OAP Server  Fix potential NPE in OAL string match and a bug when right-hand-side variable includes double quotes. Bump up Armeria version to 1.14.1 to fix CVE. Polish ETCD cluster config environment variables. Add the analysis of metrics in Satellite MetricsService. Fix Can't split endpoint id into 2 parts bug for endpoint ID. In the TCP in service mesh observability, endpoint name doesn\u0026rsquo;t exist in TCP traffic. Upgrade H2 version to 2.0.206 to fix CVE-2021-23463 and GHSA-h376-j262-vhq6. Extend column name override mechanism working for ValueColumnMetadata. Introduce new concept Layer and removed NodeType. More details refer to v9-version-upgrade. Fix query sort metrics failure in H2 Storage. Bump up grpc to 1.43.2 and protobuf to 3.19.2 to fix CVE-2021-22569. Add source layer and dest layer to relation. Follow protocol grammar fix GCPhrase -\u0026gt; GCPhase. Set layer to mesh relation. Add FAAS to SpanLayer. Adjust e2e case for V9 core. Support ZGC GC time and count metric collecting. Sync proto buffers files from upstream Envoy (Related to https://github.com/envoyproxy/envoy/pull/18955). Bump up GraphQL related dependencies to latest versions. Add normal to V9 service meta query. Support scope=ALL catalog for metrics. Bump up H2 to 2.1.210 to fix CVE-2022-23221. E2E: Add normal field to Service. Add FreeSql component ID(3017) of dotnet agent. E2E: verify OAP cluster model data aggregation. Fix SelfRemoteClient self observing metrics. Add env variables SW_CLUSTER_INTERNAL_COM_HOST and SW_CLUSTER_INTERNAL_COM_PORT for cluster selectors zookeeper ,consul,etcd and nacos. Doc update: configuration-vocabulary,backend-cluster about env variables SW_CLUSTER_INTERNAL_COM_HOST and SW_CLUSTER_INTERNAL_COM_PORT. Add Python MysqlClient component ID(7013) with mapping information. Support Java thread pool metrics analysis. Fix IoTDB Storage Option insert null index value. Set the default value of SW_STORAGE_IOTDB_SESSIONPOOL_SIZE to 8. Bump up iotdb-session to 0.12.4. Bump up PostgreSQL driver to fix CVE. Add Guava EventBus component ID(123) of Java agent. Add OpenFunction component ID(5013). Expose configuration responseTimeout of ES client. Support datasource metric analysis. [Breaking Change] Keep the endpoint avg resp time meter name the same with others scope. (This may break 3rd party integration and existing alarm rule settings) Add Python FastAPI component ID(7014). Support all metrics from MAL engine in alarm core, including Prometheus, OC receiver, meter receiver. Allow updating non-metrics templates when structure changed. Set default connection timeout of ElasticSearch to 3000 milliseconds. Support ElasticSearch 8 and add it into E2E tests. Disable indexing for field alarm_record.tags_raw_data of binary type in ElasticSearch storage. Fix Zipkin receiver wrong condition for decoding gzip. Add a new sampler (possibility) in LAL. Unify module name receiver_zipkin to receiver-zipkin, remove receiver_jaeger from application.yaml. Introduce the entity of Process type. Set the length of event#parameters to 2000. Limit the length of Event#parameters. Support large service/instance/networkAddressAlias list query by using ElasticSearch scrolling API, add metadataQueryBatchSize to configure scrolling page size. Change default value of metadataQueryMaxSize from 5000 to 10000 Replace deprecated Armeria API BasicToken.of with AuthToken.ofBasic. Implement v9 UI template management protocol. Implement process metadata query protocol. Expose more ElasticSearch health check related logs to help to diagnose Health check fails. reason: No healthy endpoint. Add source event generated metrics to SERVICE_CATALOG_NAME catalog. [Breaking Change] Deprecate All from OAL source. [Breaking Change] Remove SRC_ALL: 'All' from OAL grammar tree. Remove all_heatmap and all_percentile metrics. Fix ElasticSearch normal index couldn\u0026rsquo;t apply mapping and update. Enhance DataCarrier#MultipleChannelsConsumer to add priority for the channels, which makes OAP server has a better performance to activate all analyzers on default. Activate receiver-otel#enabledOcRules receiver with k8s-node,oap,vm rules on default. Activate satellite,spring-sleuth for agent-analyzer#meterAnalyzerActiveFiles on default. Activate receiver-zabbix receiver with agent rule on default. Replace HTTP server (GraphQL, agent HTTP protocol) from Jetty with Armeria. [Breaking Change] Remove configuration restAcceptorPriorityDelta (env var: SW_RECEIVER_SHARING_JETTY_DELTA , SW_CORE_REST_JETTY_DELTA). [Breaking Change] Remove configuration graphql/path (env var: SW_QUERY_GRAPHQL_PATH). Add storage column attribute indexOnly, support ElasticSearch only index and not store some fields. Add indexOnly=true to SegmentRecord.tags, AlarmRecord.tags, AbstractLogRecord.tags, to reduce unnecessary storage. [Breaking Change] Remove configuration restMinThreads (env var: SW_CORE_REST_JETTY_MIN_THREADS , SW_RECEIVER_SHARING_JETTY_MIN_THREADS). Refactor the core Builder mechanism, new storage plugin could implement their own converter and get rid of hard requirement of using HashMap to communicate between data object and database native structure. [Breaking Change] Break all existing 3rd-party storage extensions. Remove hard requirement of BASE64 encoding for binary field. Add complexity limitation for GraphQL query to avoid malicious query. Add Column.shardingKeyIdx for column definition for BanyanDB.  Sharding key is used to group time series data per metric of one entity in one place (same sharding and/or same row for column-oriented database). For example, ServiceA's traffic gauge, service call per minute, includes following timestamp values, then it should be sharded by service ID [ServiceA(encoded ID): 01-28 18:30 values-1, 01-28 18:31 values-2, 01-28 18:32 values-3, 01-28 18:32 values-4] BanyanDB is the 1st storage implementation supporting this. It would make continuous time series metrics stored closely and compressed better. NOTICE, this sharding concept is NOT just for splitting data into different database instances or physical files.  Support ElasticSearch template mappings properties parameters and _source update. Implement the eBPF profiling query and data collect protocol. [Breaking Change] Remove Deprecated responseCode from sources, including Service, ServiceInstance, Endpoint Enhance endpoint dependency analysis to support cross threads cases. Refactor span analysis code structures. Remove isNotNormal service requirement when use alias to merge service topology from client side. All RPCs' peer services from client side are always normal services. This cause the topology is not merged correctly. Fix event type of export data is incorrect, it was EventType.TOTAL always. Reduce redundancy ThreadLocal in MAL core. Improve MAL performance. Trim tag\u0026rsquo;s key and value in log query. Refactor IoTDB storage plugin, add IoTDBDataConverter and fix ModifyCollectionInEnhancedForLoop bug. Bump up iotdb-session to 0.12.5. Fix the configuration of Aggregation and GC Count metrics for oap self observability E2E: Add verify OAP eBPF Profiling. Let multiGet could query without tag value in the InfluxDB storage plugin. Adjust MAL for V9, remove some groups, add a new Service function for the custom delimiter. Add service catalog DatabaseSlowStatement. Add Error Prone Annotations dependency to suppress warnings, which are not errors.  UI  [Breaking Change] Introduce Booster UI, remove RocketBot UI. [Breaking Change] UI Templates have been redesigned totally. GraphQL query is minimal compatible for metadata and metrics query. Remove unused jars (log4j-api.jar) in classpath. Bump up netty version to fix CVE. Add Database Connection pool metric. Re-implement UI template initialization for Booster UI. Add environment variable SW_ENABLE_UPDATE_UI_TEMPLATE to control user edit UI template. Add the Self Observability template of the SkyWalking Satellite. Add the template of OpenFunction observability.  Documentation  Reconstruction doc menu for v9. Update backend-alarm.md doc, support op \u0026ldquo;=\u0026rdquo; to \u0026ldquo;==\u0026rdquo;. Update backend-meter.md doc . Add \u0026lt;STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System\u0026gt; paper. Add Academy menu for recommending articles. Remove All source relative document and examples. Update Booster UI\u0026rsquo;s dependency licenses. Add profiling doc, and remove service mesh intro doc(not necessary). Add a doc for virtual database. Rewrite UI introduction. Update k8s-monitoring, backend-telemetry and v9-version-upgrade doc for v9.  All issues and pull requests are here\n","excerpt":"9.0.0 Project  Upgrade log4j2 to 2.17.1 for CVE-2021-44228, CVE-2021-45046, CVE-2021-45105 and …","ref":"/docs/main/v9.6.0/en/changes/changes-9.0.0/","title":"9.0.0"},{"body":"9.0.0 Project  Upgrade log4j2 to 2.17.1 for CVE-2021-44228, CVE-2021-45046, CVE-2021-45105 and CVE-2021-44832. This CVE only effects on JDK if JNDI is opened in default. Notice, using JVM option -Dlog4j2.formatMsgNoLookups=true or setting the LOG4J_FORMAT_MSG_NO_LOOKUPS=”true” environment variable also avoids CVEs. Upgrade maven-wrapper to 3.1.0, maven to 3.8.4 for performance improvements and ARM more native support. Exclude unnecessary libs when building under JDK 9+. Migrate base Docker image to eclipse-temurin as adoptopenjdk is deprecated. Add E2E test under Java 17. Upgrade protoc to 3.19.2. Add Istio 1.13.1 to E2E test matrix for verification. Upgrade Apache parent pom version to 25. Use the plugin version defined by the Apache maven parent.  Upgrade maven-dependency-plugin to 3.2.0. Upgrade maven-assembly-plugin to 3.3.0. Upgrade maven-failsafe-plugin to 2.22.2. Upgrade maven-surefire-plugin to 2.22.2. Upgrade maven-jar-plugin to 3.2.2. Upgrade maven-enforcer-plugin to 3.0.0. Upgrade maven-compiler-plugin to 3.10.0. Upgrade maven-resources-plugin to 3.2.0. Upgrade maven-source-plugin to 3.2.1.   Update codeStyle.xml to fix incompatibility on M1\u0026rsquo;s IntelliJ IDEA 2021.3.2. Update frontend-maven-plugin to 1.12 and npm to 16.14.0 for booster UI build. Improve CI with the GHA new feature \u0026ldquo;run failed jobs\u0026rdquo;. Fix ./mvnw compile not work if ./mvnw install is not executed at least once. Add JD_PRESERVE_LINE_FEEDS=true in official code style file. Upgrade OAP dependencies gson(2.9.0), guava(31.1), jackson(2.13.2), protobuf-java(3.18.4), commons-io(2.7), postgresql(42.3.3). Remove commons-pool and commons-dbcp from OAP dependencies(Not used before). Upgrade webapp dependencies gson(2.9.0), spring boot(2.6.6), jackson(2.13.2.2), spring cloud(2021.0.1), Apache httpclient(4.5.13).  OAP Server  Fix potential NPE in OAL string match and a bug when right-hand-side variable includes double quotes. Bump up Armeria version to 1.14.1 to fix CVE. Polish ETCD cluster config environment variables. Add the analysis of metrics in Satellite MetricsService. Fix Can't split endpoint id into 2 parts bug for endpoint ID. In the TCP in service mesh observability, endpoint name doesn\u0026rsquo;t exist in TCP traffic. Upgrade H2 version to 2.0.206 to fix CVE-2021-23463 and GHSA-h376-j262-vhq6. Extend column name override mechanism working for ValueColumnMetadata. Introduce new concept Layer and removed NodeType. More details refer to v9-version-upgrade. Fix query sort metrics failure in H2 Storage. Bump up grpc to 1.43.2 and protobuf to 3.19.2 to fix CVE-2021-22569. Add source layer and dest layer to relation. Follow protocol grammar fix GCPhrase -\u0026gt; GCPhase. Set layer to mesh relation. Add FAAS to SpanLayer. Adjust e2e case for V9 core. Support ZGC GC time and count metric collecting. Sync proto buffers files from upstream Envoy (Related to https://github.com/envoyproxy/envoy/pull/18955). Bump up GraphQL related dependencies to latest versions. Add normal to V9 service meta query. Support scope=ALL catalog for metrics. Bump up H2 to 2.1.210 to fix CVE-2022-23221. E2E: Add normal field to Service. Add FreeSql component ID(3017) of dotnet agent. E2E: verify OAP cluster model data aggregation. Fix SelfRemoteClient self observing metrics. Add env variables SW_CLUSTER_INTERNAL_COM_HOST and SW_CLUSTER_INTERNAL_COM_PORT for cluster selectors zookeeper ,consul,etcd and nacos. Doc update: configuration-vocabulary,backend-cluster about env variables SW_CLUSTER_INTERNAL_COM_HOST and SW_CLUSTER_INTERNAL_COM_PORT. Add Python MysqlClient component ID(7013) with mapping information. Support Java thread pool metrics analysis. Fix IoTDB Storage Option insert null index value. Set the default value of SW_STORAGE_IOTDB_SESSIONPOOL_SIZE to 8. Bump up iotdb-session to 0.12.4. Bump up PostgreSQL driver to fix CVE. Add Guava EventBus component ID(123) of Java agent. Add OpenFunction component ID(5013). Expose configuration responseTimeout of ES client. Support datasource metric analysis. [Breaking Change] Keep the endpoint avg resp time meter name the same with others scope. (This may break 3rd party integration and existing alarm rule settings) Add Python FastAPI component ID(7014). Support all metrics from MAL engine in alarm core, including Prometheus, OC receiver, meter receiver. Allow updating non-metrics templates when structure changed. Set default connection timeout of ElasticSearch to 3000 milliseconds. Support ElasticSearch 8 and add it into E2E tests. Disable indexing for field alarm_record.tags_raw_data of binary type in ElasticSearch storage. Fix Zipkin receiver wrong condition for decoding gzip. Add a new sampler (possibility) in LAL. Unify module name receiver_zipkin to receiver-zipkin, remove receiver_jaeger from application.yaml. Introduce the entity of Process type. Set the length of event#parameters to 2000. Limit the length of Event#parameters. Support large service/instance/networkAddressAlias list query by using ElasticSearch scrolling API, add metadataQueryBatchSize to configure scrolling page size. Change default value of metadataQueryMaxSize from 5000 to 10000 Replace deprecated Armeria API BasicToken.of with AuthToken.ofBasic. Implement v9 UI template management protocol. Implement process metadata query protocol. Expose more ElasticSearch health check related logs to help to diagnose Health check fails. reason: No healthy endpoint. Add source event generated metrics to SERVICE_CATALOG_NAME catalog. [Breaking Change] Deprecate All from OAL source. [Breaking Change] Remove SRC_ALL: 'All' from OAL grammar tree. Remove all_heatmap and all_percentile metrics. Fix ElasticSearch normal index couldn\u0026rsquo;t apply mapping and update. Enhance DataCarrier#MultipleChannelsConsumer to add priority for the channels, which makes OAP server has a better performance to activate all analyzers on default. Activate receiver-otel#enabledOcRules receiver with k8s-node,oap,vm rules on default. Activate satellite,spring-sleuth for agent-analyzer#meterAnalyzerActiveFiles on default. Activate receiver-zabbix receiver with agent rule on default. Replace HTTP server (GraphQL, agent HTTP protocol) from Jetty with Armeria. [Breaking Change] Remove configuration restAcceptorPriorityDelta (env var: SW_RECEIVER_SHARING_JETTY_DELTA , SW_CORE_REST_JETTY_DELTA). [Breaking Change] Remove configuration graphql/path (env var: SW_QUERY_GRAPHQL_PATH). Add storage column attribute indexOnly, support ElasticSearch only index and not store some fields. Add indexOnly=true to SegmentRecord.tags, AlarmRecord.tags, AbstractLogRecord.tags, to reduce unnecessary storage. [Breaking Change] Remove configuration restMinThreads (env var: SW_CORE_REST_JETTY_MIN_THREADS , SW_RECEIVER_SHARING_JETTY_MIN_THREADS). Refactor the core Builder mechanism, new storage plugin could implement their own converter and get rid of hard requirement of using HashMap to communicate between data object and database native structure. [Breaking Change] Break all existing 3rd-party storage extensions. Remove hard requirement of BASE64 encoding for binary field. Add complexity limitation for GraphQL query to avoid malicious query. Add Column.shardingKeyIdx for column definition for BanyanDB.  Sharding key is used to group time series data per metric of one entity in one place (same sharding and/or same row for column-oriented database). For example, ServiceA's traffic gauge, service call per minute, includes following timestamp values, then it should be sharded by service ID [ServiceA(encoded ID): 01-28 18:30 values-1, 01-28 18:31 values-2, 01-28 18:32 values-3, 01-28 18:32 values-4] BanyanDB is the 1st storage implementation supporting this. It would make continuous time series metrics stored closely and compressed better. NOTICE, this sharding concept is NOT just for splitting data into different database instances or physical files.  Support ElasticSearch template mappings properties parameters and _source update. Implement the eBPF profiling query and data collect protocol. [Breaking Change] Remove Deprecated responseCode from sources, including Service, ServiceInstance, Endpoint Enhance endpoint dependency analysis to support cross threads cases. Refactor span analysis code structures. Remove isNotNormal service requirement when use alias to merge service topology from client side. All RPCs' peer services from client side are always normal services. This cause the topology is not merged correctly. Fix event type of export data is incorrect, it was EventType.TOTAL always. Reduce redundancy ThreadLocal in MAL core. Improve MAL performance. Trim tag\u0026rsquo;s key and value in log query. Refactor IoTDB storage plugin, add IoTDBDataConverter and fix ModifyCollectionInEnhancedForLoop bug. Bump up iotdb-session to 0.12.5. Fix the configuration of Aggregation and GC Count metrics for oap self observability E2E: Add verify OAP eBPF Profiling. Let multiGet could query without tag value in the InfluxDB storage plugin. Adjust MAL for V9, remove some groups, add a new Service function for the custom delimiter. Add service catalog DatabaseSlowStatement. Add Error Prone Annotations dependency to suppress warnings, which are not errors.  UI  [Breaking Change] Introduce Booster UI, remove RocketBot UI. [Breaking Change] UI Templates have been redesigned totally. GraphQL query is minimal compatible for metadata and metrics query. Remove unused jars (log4j-api.jar) in classpath. Bump up netty version to fix CVE. Add Database Connection pool metric. Re-implement UI template initialization for Booster UI. Add environment variable SW_ENABLE_UPDATE_UI_TEMPLATE to control user edit UI template. Add the Self Observability template of the SkyWalking Satellite. Add the template of OpenFunction observability.  Documentation  Reconstruction doc menu for v9. Update backend-alarm.md doc, support op \u0026ldquo;=\u0026rdquo; to \u0026ldquo;==\u0026rdquo;. Update backend-meter.md doc . Add \u0026lt;STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System\u0026gt; paper. Add Academy menu for recommending articles. Remove All source relative document and examples. Update Booster UI\u0026rsquo;s dependency licenses. Add profiling doc, and remove service mesh intro doc(not necessary). Add a doc for virtual database. Rewrite UI introduction. Update k8s-monitoring, backend-telemetry and v9-version-upgrade doc for v9.  All issues and pull requests are here\n","excerpt":"9.0.0 Project  Upgrade log4j2 to 2.17.1 for CVE-2021-44228, CVE-2021-45046, CVE-2021-45105 and …","ref":"/docs/main/v9.7.0/en/changes/changes-9.0.0/","title":"9.0.0"},{"body":"9.1.0 Project  [IMPORTANT] Remove InfluxDB 1.x and Apache IoTDB 0.X as storage options, check details at here. Remove converter-moshi 2.5.0, influx-java 2.15, iotdb java 0.12.5, thrift 0.14.1, moshi 1.5.0, msgpack 0.8.16 dependencies. Remove InfluxDB and IoTDB relative codes and E2E tests. Upgrade OAP dependencies zipkin to 2.23.16, H2 to 2.1.212, Apache Freemarker to 2.3.31, gRPC-java 1.46.0, netty to 4.1.76. Upgrade Webapp dependencies, spring-cloud-dependencies to 2021.0.2, logback-classic to 1.2.11 [IMPORTANT] Add BanyanDB storage implementation. Notice BanyanDB is currently under active development and SHOULD NOT be used in production cluster.  OAP Server  Add component definition(ID=127) for Apache ShenYu (incubating). Fix Zipkin receiver: Decode spans error, missing Layer for V9 and wrong time bucket for generate Service and Endpoint. [Refactor] Move SQLDatabase(H2/MySQL/PostgreSQL), ElasticSearch and BanyanDB specific configurations out of column. Support BanyanDB global index for entities. Log and Segment record entities declare this new feature. Remove unnecessary analyzer settings in columns of templates. Many were added due to analyzer\u0026rsquo;s default value. Simplify the Kafka Fetch configuration in cluster mode. [Breaking Change] Update the eBPF Profiling task to the service level, please delete index/table: ebpf_profiling_task, process_traffic. Fix event can\u0026rsquo;t split service ID into 2 parts. Fix OAP Self-Observability metric GC Time calculation. Set SW_QUERY_MAX_QUERY_COMPLEXITY default value to 1000 Webapp module (for UI) enabled compression. [Breaking Change] Add layer field to event, report an event without layer is not allowed. Fix ES flush thread stops when flush schedule task throws exception, such as ElasticSearch flush failed. Fix ES BulkProcessor in BatchProcessEsDAO was initialized multiple times and created multiple ES flush schedule tasks. HTTPServer support the handler register with allowed HTTP methods. [Critical] Revert Enhance DataCarrier#MultipleChannelsConsumer to add priority to avoid consuming issues. Fix the problem that some configurations (such as group.id) did not take effect due to the override order when using the kafkaConsumerConfig property to extend the configuration in Kafka Fetcher. Remove build time from the OAP version. Add data-generator module to run OAP in testing mode, generating mock data for testing. Support receive Kubernetes processes from gRPC protocol. Fix the problem that es index(TimeSeriesTable, eg. endpoint_traffic, alarm_record) didn\u0026rsquo;t create even after rerun with init-mode. This problem caused the OAP server to fail to start when the OAP server was down for more than a day. Support autocomplete tags in traces query. [Breaking Change] Replace all configurations **_JETTY_** to **_REST_**. Add the support eBPF profiling field into the process entity. E2E: fix log test miss verify LAL and metrics. Enhance Converter mechanism in kernel level to make BanyanDB native feature more effective. Add TermsAggregation properties collect_mode and execution_hint. Add \u0026ldquo;execution_hint\u0026rdquo;: \u0026ldquo;map\u0026rdquo;, \u0026ldquo;collect_mode\u0026rdquo;: \u0026ldquo;breadth_first\u0026rdquo; for aggregation and topology query to improve 5-10x performance. Clean up scroll contexts after used. Support autocomplete tags in logs query. Enhance Deprecated MetricQuery(v1) getValues querying to asynchronous concurrency query Fix the pod match error when the service has multiple selector in kubernetes environment. VM monitoring adapts the 0.50.0 of the opentelemetry-collector. Add Envoy internal cost metrics. Remove Layer concept from ServiceInstance. Remove unnecessary onCompleted on gRPC onError callback. Remove Layer concept form Process. Update to list all eBPF profiling schedulers without duration. Storage(ElasticSearch): add search options to tolerate inexisting indices. Fix the problem that MQ has the wrong Layer type. Fix NoneStream model has wrong downsampling(was Second, should be Minute). SQL Database: provide @SQLDatabase.AdditionalEntity to support create additional tables from a model. [Breaking Change] SQL Database: remove SQL Database config maxSizeOfArrayColumn and numOfSearchableValuesPerTag. [Breaking Change] SQL Database: move Tags list from Segment,Logs,Alarms to their additional table. [Breaking Change] Remove total field in Trace, Log, Event, Browser log, and alarm list query. Support OFF_CPU eBPF Profiling. Fix SumAggregationBuilder#build should use the SumAggregation rather than MaxAggregation. Add TiDB, OpenSearch, Postgres storage optional to Trace and eBPF Profiling E2E testing. Add OFF CPU eBPF Profiling E2E Testing. Fix searchableTag as rpc.status_code and http.status_code. status_code had been removed. Fix scroll query failure exception. Add profileDataQueryBatchSize config in Elasticsearch Storage. Add APIs to query Pod log on demand. Remove OAL for events. Simplify the format index name logical in ES storage. Add instance properties extractor in MAL. Support Zipkin traces collect and zipkin traces query API. [Breaking Change] Zipkin receiver mechanism changes and traces do not stream into OAP Segment anymore.  UI  General service instance: move Thread Pool from JVM to Overview, fix JVM GC Count calculation. Add Apache ShenYu (incubating) component LOGO. Show more metrics on service/instance/endpoint list on the dashboards. Support average values of metrics on the service/list/endpoint table widgets, with pop-up linear graph. Fix viewLogs button query no data. Fix UTC when page loads. Implement the eBPF profile widget on dashboard. Optimize the trace widget. Avoid invalid query for topology metrics. Add the alarm and log tag tips. Fix spans details and task logs. Verify query params to avoid invalid queries. Mobile terminal adaptation. Fix: set dropdown for the Tab widget, init instance/endpoint relation selectors, update sankey graph. Add eBPF Profiling widget into General service, Service Mesh and Kubernetes tabs. Fix jump to endpoint-relation dashboard template. Fix set graph options. Remove the Layer filed from the Instance and Process. Fix date time picker display when set hour to 0. Implement tags auto-complete for Trace and Log. Support multiple trees for the flame graph. Fix the page doesn\u0026rsquo;t need to be re-rendered when the url changes. Remove unexpected data for exporting dashboards. Fix duration time. Remove the total field from query conditions. Fix minDuration and maxDuration for the trace filter. Add Log configuration for the browser templates. Fix query conditions for the browser logs. Add Spanish Translation. Visualize the OFF CPU eBPF profiling. Add Spanish language to UI. Sort spans with startTime or spanId in a segment. Visualize a on-demand log widget. Fix activate the correct tab index after renaming a Tabs name. FaaS dashboard support on-demand log (OpenFunction/functions-framework-go version \u0026gt; 0.3.0).  Documentation  Add eBPF agent into probe introduction.  All issues and pull requests are here\n","excerpt":"9.1.0 Project  [IMPORTANT] Remove InfluxDB 1.x and Apache IoTDB 0.X as storage options, check …","ref":"/docs/main/latest/en/changes/changes-9.1.0/","title":"9.1.0"},{"body":"9.1.0 Project  [IMPORTANT] Remove InfluxDB 1.x and Apache IoTDB 0.X as storage options, check details at here. Remove converter-moshi 2.5.0, influx-java 2.15, iotdb java 0.12.5, thrift 0.14.1, moshi 1.5.0, msgpack 0.8.16 dependencies. Remove InfluxDB and IoTDB relative codes and E2E tests. Upgrade OAP dependencies zipkin to 2.23.16, H2 to 2.1.212, Apache Freemarker to 2.3.31, gRPC-java 1.46.0, netty to 4.1.76. Upgrade Webapp dependencies, spring-cloud-dependencies to 2021.0.2, logback-classic to 1.2.11 [IMPORTANT] Add BanyanDB storage implementation. Notice BanyanDB is currently under active development and SHOULD NOT be used in production cluster.  OAP Server  Add component definition(ID=127) for Apache ShenYu (incubating). Fix Zipkin receiver: Decode spans error, missing Layer for V9 and wrong time bucket for generate Service and Endpoint. [Refactor] Move SQLDatabase(H2/MySQL/PostgreSQL), ElasticSearch and BanyanDB specific configurations out of column. Support BanyanDB global index for entities. Log and Segment record entities declare this new feature. Remove unnecessary analyzer settings in columns of templates. Many were added due to analyzer\u0026rsquo;s default value. Simplify the Kafka Fetch configuration in cluster mode. [Breaking Change] Update the eBPF Profiling task to the service level, please delete index/table: ebpf_profiling_task, process_traffic. Fix event can\u0026rsquo;t split service ID into 2 parts. Fix OAP Self-Observability metric GC Time calculation. Set SW_QUERY_MAX_QUERY_COMPLEXITY default value to 1000 Webapp module (for UI) enabled compression. [Breaking Change] Add layer field to event, report an event without layer is not allowed. Fix ES flush thread stops when flush schedule task throws exception, such as ElasticSearch flush failed. Fix ES BulkProcessor in BatchProcessEsDAO was initialized multiple times and created multiple ES flush schedule tasks. HTTPServer support the handler register with allowed HTTP methods. [Critical] Revert Enhance DataCarrier#MultipleChannelsConsumer to add priority to avoid consuming issues. Fix the problem that some configurations (such as group.id) did not take effect due to the override order when using the kafkaConsumerConfig property to extend the configuration in Kafka Fetcher. Remove build time from the OAP version. Add data-generator module to run OAP in testing mode, generating mock data for testing. Support receive Kubernetes processes from gRPC protocol. Fix the problem that es index(TimeSeriesTable, eg. endpoint_traffic, alarm_record) didn\u0026rsquo;t create even after rerun with init-mode. This problem caused the OAP server to fail to start when the OAP server was down for more than a day. Support autocomplete tags in traces query. [Breaking Change] Replace all configurations **_JETTY_** to **_REST_**. Add the support eBPF profiling field into the process entity. E2E: fix log test miss verify LAL and metrics. Enhance Converter mechanism in kernel level to make BanyanDB native feature more effective. Add TermsAggregation properties collect_mode and execution_hint. Add \u0026ldquo;execution_hint\u0026rdquo;: \u0026ldquo;map\u0026rdquo;, \u0026ldquo;collect_mode\u0026rdquo;: \u0026ldquo;breadth_first\u0026rdquo; for aggregation and topology query to improve 5-10x performance. Clean up scroll contexts after used. Support autocomplete tags in logs query. Enhance Deprecated MetricQuery(v1) getValues querying to asynchronous concurrency query Fix the pod match error when the service has multiple selector in kubernetes environment. VM monitoring adapts the 0.50.0 of the opentelemetry-collector. Add Envoy internal cost metrics. Remove Layer concept from ServiceInstance. Remove unnecessary onCompleted on gRPC onError callback. Remove Layer concept form Process. Update to list all eBPF profiling schedulers without duration. Storage(ElasticSearch): add search options to tolerate inexisting indices. Fix the problem that MQ has the wrong Layer type. Fix NoneStream model has wrong downsampling(was Second, should be Minute). SQL Database: provide @SQLDatabase.AdditionalEntity to support create additional tables from a model. [Breaking Change] SQL Database: remove SQL Database config maxSizeOfArrayColumn and numOfSearchableValuesPerTag. [Breaking Change] SQL Database: move Tags list from Segment,Logs,Alarms to their additional table. [Breaking Change] Remove total field in Trace, Log, Event, Browser log, and alarm list query. Support OFF_CPU eBPF Profiling. Fix SumAggregationBuilder#build should use the SumAggregation rather than MaxAggregation. Add TiDB, OpenSearch, Postgres storage optional to Trace and eBPF Profiling E2E testing. Add OFF CPU eBPF Profiling E2E Testing. Fix searchableTag as rpc.status_code and http.status_code. status_code had been removed. Fix scroll query failure exception. Add profileDataQueryBatchSize config in Elasticsearch Storage. Add APIs to query Pod log on demand. Remove OAL for events. Simplify the format index name logical in ES storage. Add instance properties extractor in MAL. Support Zipkin traces collect and zipkin traces query API. [Breaking Change] Zipkin receiver mechanism changes and traces do not stream into OAP Segment anymore.  UI  General service instance: move Thread Pool from JVM to Overview, fix JVM GC Count calculation. Add Apache ShenYu (incubating) component LOGO. Show more metrics on service/instance/endpoint list on the dashboards. Support average values of metrics on the service/list/endpoint table widgets, with pop-up linear graph. Fix viewLogs button query no data. Fix UTC when page loads. Implement the eBPF profile widget on dashboard. Optimize the trace widget. Avoid invalid query for topology metrics. Add the alarm and log tag tips. Fix spans details and task logs. Verify query params to avoid invalid queries. Mobile terminal adaptation. Fix: set dropdown for the Tab widget, init instance/endpoint relation selectors, update sankey graph. Add eBPF Profiling widget into General service, Service Mesh and Kubernetes tabs. Fix jump to endpoint-relation dashboard template. Fix set graph options. Remove the Layer filed from the Instance and Process. Fix date time picker display when set hour to 0. Implement tags auto-complete for Trace and Log. Support multiple trees for the flame graph. Fix the page doesn\u0026rsquo;t need to be re-rendered when the url changes. Remove unexpected data for exporting dashboards. Fix duration time. Remove the total field from query conditions. Fix minDuration and maxDuration for the trace filter. Add Log configuration for the browser templates. Fix query conditions for the browser logs. Add Spanish Translation. Visualize the OFF CPU eBPF profiling. Add Spanish language to UI. Sort spans with startTime or spanId in a segment. Visualize a on-demand log widget. Fix activate the correct tab index after renaming a Tabs name. FaaS dashboard support on-demand log (OpenFunction/functions-framework-go version \u0026gt; 0.3.0).  Documentation  Add eBPF agent into probe introduction.  All issues and pull requests are here\n","excerpt":"9.1.0 Project  [IMPORTANT] Remove InfluxDB 1.x and Apache IoTDB 0.X as storage options, check …","ref":"/docs/main/next/en/changes/changes-9.1.0/","title":"9.1.0"},{"body":"9.1.0 Project  [IMPORTANT] Remove InfluxDB 1.x and Apache IoTDB 0.X as storage options, check details at here. Remove converter-moshi 2.5.0, influx-java 2.15, iotdb java 0.12.5, thrift 0.14.1, moshi 1.5.0, msgpack 0.8.16 dependencies. Remove InfluxDB and IoTDB relative codes and E2E tests. Upgrade OAP dependencies zipkin to 2.23.16, H2 to 2.1.212, Apache Freemarker to 2.3.31, gRPC-java 1.46.0, netty to 4.1.76. Upgrade Webapp dependencies, spring-cloud-dependencies to 2021.0.2, logback-classic to 1.2.11 [IMPORTANT] Add BanyanDB storage implementation. Notice BanyanDB is currently under active development and SHOULD NOT be used in production cluster.  OAP Server  Add component definition(ID=127) for Apache ShenYu (incubating). Fix Zipkin receiver: Decode spans error, missing Layer for V9 and wrong time bucket for generate Service and Endpoint. [Refactor] Move SQLDatabase(H2/MySQL/PostgreSQL), ElasticSearch and BanyanDB specific configurations out of column. Support BanyanDB global index for entities. Log and Segment record entities declare this new feature. Remove unnecessary analyzer settings in columns of templates. Many were added due to analyzer\u0026rsquo;s default value. Simplify the Kafka Fetch configuration in cluster mode. [Breaking Change] Update the eBPF Profiling task to the service level, please delete index/table: ebpf_profiling_task, process_traffic. Fix event can\u0026rsquo;t split service ID into 2 parts. Fix OAP Self-Observability metric GC Time calculation. Set SW_QUERY_MAX_QUERY_COMPLEXITY default value to 1000 Webapp module (for UI) enabled compression. [Breaking Change] Add layer field to event, report an event without layer is not allowed. Fix ES flush thread stops when flush schedule task throws exception, such as ElasticSearch flush failed. Fix ES BulkProcessor in BatchProcessEsDAO was initialized multiple times and created multiple ES flush schedule tasks. HTTPServer support the handler register with allowed HTTP methods. [Critical] Revert Enhance DataCarrier#MultipleChannelsConsumer to add priority to avoid consuming issues. Fix the problem that some configurations (such as group.id) did not take effect due to the override order when using the kafkaConsumerConfig property to extend the configuration in Kafka Fetcher. Remove build time from the OAP version. Add data-generator module to run OAP in testing mode, generating mock data for testing. Support receive Kubernetes processes from gRPC protocol. Fix the problem that es index(TimeSeriesTable, eg. endpoint_traffic, alarm_record) didn\u0026rsquo;t create even after rerun with init-mode. This problem caused the OAP server to fail to start when the OAP server was down for more than a day. Support autocomplete tags in traces query. [Breaking Change] Replace all configurations **_JETTY_** to **_REST_**. Add the support eBPF profiling field into the process entity. E2E: fix log test miss verify LAL and metrics. Enhance Converter mechanism in kernel level to make BanyanDB native feature more effective. Add TermsAggregation properties collect_mode and execution_hint. Add \u0026ldquo;execution_hint\u0026rdquo;: \u0026ldquo;map\u0026rdquo;, \u0026ldquo;collect_mode\u0026rdquo;: \u0026ldquo;breadth_first\u0026rdquo; for aggregation and topology query to improve 5-10x performance. Clean up scroll contexts after used. Support autocomplete tags in logs query. Enhance Deprecated MetricQuery(v1) getValues querying to asynchronous concurrency query Fix the pod match error when the service has multiple selector in kubernetes environment. VM monitoring adapts the 0.50.0 of the opentelemetry-collector. Add Envoy internal cost metrics. Remove Layer concept from ServiceInstance. Remove unnecessary onCompleted on gRPC onError callback. Remove Layer concept form Process. Update to list all eBPF profiling schedulers without duration. Storage(ElasticSearch): add search options to tolerate inexisting indices. Fix the problem that MQ has the wrong Layer type. Fix NoneStream model has wrong downsampling(was Second, should be Minute). SQL Database: provide @SQLDatabase.AdditionalEntity to support create additional tables from a model. [Breaking Change] SQL Database: remove SQL Database config maxSizeOfArrayColumn and numOfSearchableValuesPerTag. [Breaking Change] SQL Database: move Tags list from Segment,Logs,Alarms to their additional table. [Breaking Change] Remove total field in Trace, Log, Event, Browser log, and alarm list query. Support OFF_CPU eBPF Profiling. Fix SumAggregationBuilder#build should use the SumAggregation rather than MaxAggregation. Add TiDB, OpenSearch, Postgres storage optional to Trace and eBPF Profiling E2E testing. Add OFF CPU eBPF Profiling E2E Testing. Fix searchableTag as rpc.status_code and http.status_code. status_code had been removed. Fix scroll query failure exception. Add profileDataQueryBatchSize config in Elasticsearch Storage. Add APIs to query Pod log on demand. Remove OAL for events. Simplify the format index name logical in ES storage. Add instance properties extractor in MAL. Support Zipkin traces collect and zipkin traces query API. [Breaking Change] Zipkin receiver mechanism changes and traces do not stream into OAP Segment anymore.  UI  General service instance: move Thread Pool from JVM to Overview, fix JVM GC Count calculation. Add Apache ShenYu (incubating) component LOGO. Show more metrics on service/instance/endpoint list on the dashboards. Support average values of metrics on the service/list/endpoint table widgets, with pop-up linear graph. Fix viewLogs button query no data. Fix UTC when page loads. Implement the eBPF profile widget on dashboard. Optimize the trace widget. Avoid invalid query for topology metrics. Add the alarm and log tag tips. Fix spans details and task logs. Verify query params to avoid invalid queries. Mobile terminal adaptation. Fix: set dropdown for the Tab widget, init instance/endpoint relation selectors, update sankey graph. Add eBPF Profiling widget into General service, Service Mesh and Kubernetes tabs. Fix jump to endpoint-relation dashboard template. Fix set graph options. Remove the Layer filed from the Instance and Process. Fix date time picker display when set hour to 0. Implement tags auto-complete for Trace and Log. Support multiple trees for the flame graph. Fix the page doesn\u0026rsquo;t need to be re-rendered when the url changes. Remove unexpected data for exporting dashboards. Fix duration time. Remove the total field from query conditions. Fix minDuration and maxDuration for the trace filter. Add Log configuration for the browser templates. Fix query conditions for the browser logs. Add Spanish Translation. Visualize the OFF CPU eBPF profiling. Add Spanish language to UI. Sort spans with startTime or spanId in a segment. Visualize a on-demand log widget. Fix activate the correct tab index after renaming a Tabs name. FaaS dashboard support on-demand log (OpenFunction/functions-framework-go version \u0026gt; 0.3.0).  Documentation  Add eBPF agent into probe introduction.  All issues and pull requests are here\n","excerpt":"9.1.0 Project  [IMPORTANT] Remove InfluxDB 1.x and Apache IoTDB 0.X as storage options, check …","ref":"/docs/main/v9.1.0/en/changes/changes/","title":"9.1.0"},{"body":"9.1.0 Project  [IMPORTANT] Remove InfluxDB 1.x and Apache IoTDB 0.X as storage options, check details at here. Remove converter-moshi 2.5.0, influx-java 2.15, iotdb java 0.12.5, thrift 0.14.1, moshi 1.5.0, msgpack 0.8.16 dependencies. Remove InfluxDB and IoTDB relative codes and E2E tests. Upgrade OAP dependencies zipkin to 2.23.16, H2 to 2.1.212, Apache Freemarker to 2.3.31, gRPC-java 1.46.0, netty to 4.1.76. Upgrade Webapp dependencies, spring-cloud-dependencies to 2021.0.2, logback-classic to 1.2.11 [IMPORTANT] Add BanyanDB storage implementation. Notice BanyanDB is currently under active development and SHOULD NOT be used in production cluster.  OAP Server  Add component definition(ID=127) for Apache ShenYu (incubating). Fix Zipkin receiver: Decode spans error, missing Layer for V9 and wrong time bucket for generate Service and Endpoint. [Refactor] Move SQLDatabase(H2/MySQL/PostgreSQL), ElasticSearch and BanyanDB specific configurations out of column. Support BanyanDB global index for entities. Log and Segment record entities declare this new feature. Remove unnecessary analyzer settings in columns of templates. Many were added due to analyzer\u0026rsquo;s default value. Simplify the Kafka Fetch configuration in cluster mode. [Breaking Change] Update the eBPF Profiling task to the service level, please delete index/table: ebpf_profiling_task, process_traffic. Fix event can\u0026rsquo;t split service ID into 2 parts. Fix OAP Self-Observability metric GC Time calculation. Set SW_QUERY_MAX_QUERY_COMPLEXITY default value to 1000 Webapp module (for UI) enabled compression. [Breaking Change] Add layer field to event, report an event without layer is not allowed. Fix ES flush thread stops when flush schedule task throws exception, such as ElasticSearch flush failed. Fix ES BulkProcessor in BatchProcessEsDAO was initialized multiple times and created multiple ES flush schedule tasks. HTTPServer support the handler register with allowed HTTP methods. [Critical] Revert Enhance DataCarrier#MultipleChannelsConsumer to add priority to avoid consuming issues. Fix the problem that some configurations (such as group.id) did not take effect due to the override order when using the kafkaConsumerConfig property to extend the configuration in Kafka Fetcher. Remove build time from the OAP version. Add data-generator module to run OAP in testing mode, generating mock data for testing. Support receive Kubernetes processes from gRPC protocol. Fix the problem that es index(TimeSeriesTable, eg. endpoint_traffic, alarm_record) didn\u0026rsquo;t create even after rerun with init-mode. This problem caused the OAP server to fail to start when the OAP server was down for more than a day. Support autocomplete tags in traces query. [Breaking Change] Replace all configurations **_JETTY_** to **_REST_**. Add the support eBPF profiling field into the process entity. E2E: fix log test miss verify LAL and metrics. Enhance Converter mechanism in kernel level to make BanyanDB native feature more effective. Add TermsAggregation properties collect_mode and execution_hint. Add \u0026ldquo;execution_hint\u0026rdquo;: \u0026ldquo;map\u0026rdquo;, \u0026ldquo;collect_mode\u0026rdquo;: \u0026ldquo;breadth_first\u0026rdquo; for aggregation and topology query to improve 5-10x performance. Clean up scroll contexts after used. Support autocomplete tags in logs query. Enhance Deprecated MetricQuery(v1) getValues querying to asynchronous concurrency query Fix the pod match error when the service has multiple selector in kubernetes environment. VM monitoring adapts the 0.50.0 of the opentelemetry-collector. Add Envoy internal cost metrics. Remove Layer concept from ServiceInstance. Remove unnecessary onCompleted on gRPC onError callback. Remove Layer concept form Process. Update to list all eBPF profiling schedulers without duration. Storage(ElasticSearch): add search options to tolerate inexisting indices. Fix the problem that MQ has the wrong Layer type. Fix NoneStream model has wrong downsampling(was Second, should be Minute). SQL Database: provide @SQLDatabase.AdditionalEntity to support create additional tables from a model. [Breaking Change] SQL Database: remove SQL Database config maxSizeOfArrayColumn and numOfSearchableValuesPerTag. [Breaking Change] SQL Database: move Tags list from Segment,Logs,Alarms to their additional table. [Breaking Change] Remove total field in Trace, Log, Event, Browser log, and alarm list query. Support OFF_CPU eBPF Profiling. Fix SumAggregationBuilder#build should use the SumAggregation rather than MaxAggregation. Add TiDB, OpenSearch, Postgres storage optional to Trace and eBPF Profiling E2E testing. Add OFF CPU eBPF Profiling E2E Testing. Fix searchableTag as rpc.status_code and http.status_code. status_code had been removed. Fix scroll query failure exception. Add profileDataQueryBatchSize config in Elasticsearch Storage. Add APIs to query Pod log on demand. Remove OAL for events. Simplify the format index name logical in ES storage. Add instance properties extractor in MAL. Support Zipkin traces collect and zipkin traces query API. [Breaking Change] Zipkin receiver mechanism changes and traces do not stream into OAP Segment anymore.  UI  General service instance: move Thread Pool from JVM to Overview, fix JVM GC Count calculation. Add Apache ShenYu (incubating) component LOGO. Show more metrics on service/instance/endpoint list on the dashboards. Support average values of metrics on the service/list/endpoint table widgets, with pop-up linear graph. Fix viewLogs button query no data. Fix UTC when page loads. Implement the eBPF profile widget on dashboard. Optimize the trace widget. Avoid invalid query for topology metrics. Add the alarm and log tag tips. Fix spans details and task logs. Verify query params to avoid invalid queries. Mobile terminal adaptation. Fix: set dropdown for the Tab widget, init instance/endpoint relation selectors, update sankey graph. Add eBPF Profiling widget into General service, Service Mesh and Kubernetes tabs. Fix jump to endpoint-relation dashboard template. Fix set graph options. Remove the Layer filed from the Instance and Process. Fix date time picker display when set hour to 0. Implement tags auto-complete for Trace and Log. Support multiple trees for the flame graph. Fix the page doesn\u0026rsquo;t need to be re-rendered when the url changes. Remove unexpected data for exporting dashboards. Fix duration time. Remove the total field from query conditions. Fix minDuration and maxDuration for the trace filter. Add Log configuration for the browser templates. Fix query conditions for the browser logs. Add Spanish Translation. Visualize the OFF CPU eBPF profiling. Add Spanish language to UI. Sort spans with startTime or spanId in a segment. Visualize a on-demand log widget. Fix activate the correct tab index after renaming a Tabs name. FaaS dashboard support on-demand log (OpenFunction/functions-framework-go version \u0026gt; 0.3.0).  Documentation  Add eBPF agent into probe introduction.  All issues and pull requests are here\n","excerpt":"9.1.0 Project  [IMPORTANT] Remove InfluxDB 1.x and Apache IoTDB 0.X as storage options, check …","ref":"/docs/main/v9.2.0/en/changes/changes-9.1.0/","title":"9.1.0"},{"body":"9.1.0 Project  [IMPORTANT] Remove InfluxDB 1.x and Apache IoTDB 0.X as storage options, check details at here. Remove converter-moshi 2.5.0, influx-java 2.15, iotdb java 0.12.5, thrift 0.14.1, moshi 1.5.0, msgpack 0.8.16 dependencies. Remove InfluxDB and IoTDB relative codes and E2E tests. Upgrade OAP dependencies zipkin to 2.23.16, H2 to 2.1.212, Apache Freemarker to 2.3.31, gRPC-java 1.46.0, netty to 4.1.76. Upgrade Webapp dependencies, spring-cloud-dependencies to 2021.0.2, logback-classic to 1.2.11 [IMPORTANT] Add BanyanDB storage implementation. Notice BanyanDB is currently under active development and SHOULD NOT be used in production cluster.  OAP Server  Add component definition(ID=127) for Apache ShenYu (incubating). Fix Zipkin receiver: Decode spans error, missing Layer for V9 and wrong time bucket for generate Service and Endpoint. [Refactor] Move SQLDatabase(H2/MySQL/PostgreSQL), ElasticSearch and BanyanDB specific configurations out of column. Support BanyanDB global index for entities. Log and Segment record entities declare this new feature. Remove unnecessary analyzer settings in columns of templates. Many were added due to analyzer\u0026rsquo;s default value. Simplify the Kafka Fetch configuration in cluster mode. [Breaking Change] Update the eBPF Profiling task to the service level, please delete index/table: ebpf_profiling_task, process_traffic. Fix event can\u0026rsquo;t split service ID into 2 parts. Fix OAP Self-Observability metric GC Time calculation. Set SW_QUERY_MAX_QUERY_COMPLEXITY default value to 1000 Webapp module (for UI) enabled compression. [Breaking Change] Add layer field to event, report an event without layer is not allowed. Fix ES flush thread stops when flush schedule task throws exception, such as ElasticSearch flush failed. Fix ES BulkProcessor in BatchProcessEsDAO was initialized multiple times and created multiple ES flush schedule tasks. HTTPServer support the handler register with allowed HTTP methods. [Critical] Revert Enhance DataCarrier#MultipleChannelsConsumer to add priority to avoid consuming issues. Fix the problem that some configurations (such as group.id) did not take effect due to the override order when using the kafkaConsumerConfig property to extend the configuration in Kafka Fetcher. Remove build time from the OAP version. Add data-generator module to run OAP in testing mode, generating mock data for testing. Support receive Kubernetes processes from gRPC protocol. Fix the problem that es index(TimeSeriesTable, eg. endpoint_traffic, alarm_record) didn\u0026rsquo;t create even after rerun with init-mode. This problem caused the OAP server to fail to start when the OAP server was down for more than a day. Support autocomplete tags in traces query. [Breaking Change] Replace all configurations **_JETTY_** to **_REST_**. Add the support eBPF profiling field into the process entity. E2E: fix log test miss verify LAL and metrics. Enhance Converter mechanism in kernel level to make BanyanDB native feature more effective. Add TermsAggregation properties collect_mode and execution_hint. Add \u0026ldquo;execution_hint\u0026rdquo;: \u0026ldquo;map\u0026rdquo;, \u0026ldquo;collect_mode\u0026rdquo;: \u0026ldquo;breadth_first\u0026rdquo; for aggregation and topology query to improve 5-10x performance. Clean up scroll contexts after used. Support autocomplete tags in logs query. Enhance Deprecated MetricQuery(v1) getValues querying to asynchronous concurrency query Fix the pod match error when the service has multiple selector in kubernetes environment. VM monitoring adapts the 0.50.0 of the opentelemetry-collector. Add Envoy internal cost metrics. Remove Layer concept from ServiceInstance. Remove unnecessary onCompleted on gRPC onError callback. Remove Layer concept form Process. Update to list all eBPF profiling schedulers without duration. Storage(ElasticSearch): add search options to tolerate inexisting indices. Fix the problem that MQ has the wrong Layer type. Fix NoneStream model has wrong downsampling(was Second, should be Minute). SQL Database: provide @SQLDatabase.AdditionalEntity to support create additional tables from a model. [Breaking Change] SQL Database: remove SQL Database config maxSizeOfArrayColumn and numOfSearchableValuesPerTag. [Breaking Change] SQL Database: move Tags list from Segment,Logs,Alarms to their additional table. [Breaking Change] Remove total field in Trace, Log, Event, Browser log, and alarm list query. Support OFF_CPU eBPF Profiling. Fix SumAggregationBuilder#build should use the SumAggregation rather than MaxAggregation. Add TiDB, OpenSearch, Postgres storage optional to Trace and eBPF Profiling E2E testing. Add OFF CPU eBPF Profiling E2E Testing. Fix searchableTag as rpc.status_code and http.status_code. status_code had been removed. Fix scroll query failure exception. Add profileDataQueryBatchSize config in Elasticsearch Storage. Add APIs to query Pod log on demand. Remove OAL for events. Simplify the format index name logical in ES storage. Add instance properties extractor in MAL. Support Zipkin traces collect and zipkin traces query API. [Breaking Change] Zipkin receiver mechanism changes and traces do not stream into OAP Segment anymore.  UI  General service instance: move Thread Pool from JVM to Overview, fix JVM GC Count calculation. Add Apache ShenYu (incubating) component LOGO. Show more metrics on service/instance/endpoint list on the dashboards. Support average values of metrics on the service/list/endpoint table widgets, with pop-up linear graph. Fix viewLogs button query no data. Fix UTC when page loads. Implement the eBPF profile widget on dashboard. Optimize the trace widget. Avoid invalid query for topology metrics. Add the alarm and log tag tips. Fix spans details and task logs. Verify query params to avoid invalid queries. Mobile terminal adaptation. Fix: set dropdown for the Tab widget, init instance/endpoint relation selectors, update sankey graph. Add eBPF Profiling widget into General service, Service Mesh and Kubernetes tabs. Fix jump to endpoint-relation dashboard template. Fix set graph options. Remove the Layer filed from the Instance and Process. Fix date time picker display when set hour to 0. Implement tags auto-complete for Trace and Log. Support multiple trees for the flame graph. Fix the page doesn\u0026rsquo;t need to be re-rendered when the url changes. Remove unexpected data for exporting dashboards. Fix duration time. Remove the total field from query conditions. Fix minDuration and maxDuration for the trace filter. Add Log configuration for the browser templates. Fix query conditions for the browser logs. Add Spanish Translation. Visualize the OFF CPU eBPF profiling. Add Spanish language to UI. Sort spans with startTime or spanId in a segment. Visualize a on-demand log widget. Fix activate the correct tab index after renaming a Tabs name. FaaS dashboard support on-demand log (OpenFunction/functions-framework-go version \u0026gt; 0.3.0).  Documentation  Add eBPF agent into probe introduction.  All issues and pull requests are here\n","excerpt":"9.1.0 Project  [IMPORTANT] Remove InfluxDB 1.x and Apache IoTDB 0.X as storage options, check …","ref":"/docs/main/v9.3.0/en/changes/changes-9.1.0/","title":"9.1.0"},{"body":"9.1.0 Project  [IMPORTANT] Remove InfluxDB 1.x and Apache IoTDB 0.X as storage options, check details at here. Remove converter-moshi 2.5.0, influx-java 2.15, iotdb java 0.12.5, thrift 0.14.1, moshi 1.5.0, msgpack 0.8.16 dependencies. Remove InfluxDB and IoTDB relative codes and E2E tests. Upgrade OAP dependencies zipkin to 2.23.16, H2 to 2.1.212, Apache Freemarker to 2.3.31, gRPC-java 1.46.0, netty to 4.1.76. Upgrade Webapp dependencies, spring-cloud-dependencies to 2021.0.2, logback-classic to 1.2.11 [IMPORTANT] Add BanyanDB storage implementation. Notice BanyanDB is currently under active development and SHOULD NOT be used in production cluster.  OAP Server  Add component definition(ID=127) for Apache ShenYu (incubating). Fix Zipkin receiver: Decode spans error, missing Layer for V9 and wrong time bucket for generate Service and Endpoint. [Refactor] Move SQLDatabase(H2/MySQL/PostgreSQL), ElasticSearch and BanyanDB specific configurations out of column. Support BanyanDB global index for entities. Log and Segment record entities declare this new feature. Remove unnecessary analyzer settings in columns of templates. Many were added due to analyzer\u0026rsquo;s default value. Simplify the Kafka Fetch configuration in cluster mode. [Breaking Change] Update the eBPF Profiling task to the service level, please delete index/table: ebpf_profiling_task, process_traffic. Fix event can\u0026rsquo;t split service ID into 2 parts. Fix OAP Self-Observability metric GC Time calculation. Set SW_QUERY_MAX_QUERY_COMPLEXITY default value to 1000 Webapp module (for UI) enabled compression. [Breaking Change] Add layer field to event, report an event without layer is not allowed. Fix ES flush thread stops when flush schedule task throws exception, such as ElasticSearch flush failed. Fix ES BulkProcessor in BatchProcessEsDAO was initialized multiple times and created multiple ES flush schedule tasks. HTTPServer support the handler register with allowed HTTP methods. [Critical] Revert Enhance DataCarrier#MultipleChannelsConsumer to add priority to avoid consuming issues. Fix the problem that some configurations (such as group.id) did not take effect due to the override order when using the kafkaConsumerConfig property to extend the configuration in Kafka Fetcher. Remove build time from the OAP version. Add data-generator module to run OAP in testing mode, generating mock data for testing. Support receive Kubernetes processes from gRPC protocol. Fix the problem that es index(TimeSeriesTable, eg. endpoint_traffic, alarm_record) didn\u0026rsquo;t create even after rerun with init-mode. This problem caused the OAP server to fail to start when the OAP server was down for more than a day. Support autocomplete tags in traces query. [Breaking Change] Replace all configurations **_JETTY_** to **_REST_**. Add the support eBPF profiling field into the process entity. E2E: fix log test miss verify LAL and metrics. Enhance Converter mechanism in kernel level to make BanyanDB native feature more effective. Add TermsAggregation properties collect_mode and execution_hint. Add \u0026ldquo;execution_hint\u0026rdquo;: \u0026ldquo;map\u0026rdquo;, \u0026ldquo;collect_mode\u0026rdquo;: \u0026ldquo;breadth_first\u0026rdquo; for aggregation and topology query to improve 5-10x performance. Clean up scroll contexts after used. Support autocomplete tags in logs query. Enhance Deprecated MetricQuery(v1) getValues querying to asynchronous concurrency query Fix the pod match error when the service has multiple selector in kubernetes environment. VM monitoring adapts the 0.50.0 of the opentelemetry-collector. Add Envoy internal cost metrics. Remove Layer concept from ServiceInstance. Remove unnecessary onCompleted on gRPC onError callback. Remove Layer concept form Process. Update to list all eBPF profiling schedulers without duration. Storage(ElasticSearch): add search options to tolerate inexisting indices. Fix the problem that MQ has the wrong Layer type. Fix NoneStream model has wrong downsampling(was Second, should be Minute). SQL Database: provide @SQLDatabase.AdditionalEntity to support create additional tables from a model. [Breaking Change] SQL Database: remove SQL Database config maxSizeOfArrayColumn and numOfSearchableValuesPerTag. [Breaking Change] SQL Database: move Tags list from Segment,Logs,Alarms to their additional table. [Breaking Change] Remove total field in Trace, Log, Event, Browser log, and alarm list query. Support OFF_CPU eBPF Profiling. Fix SumAggregationBuilder#build should use the SumAggregation rather than MaxAggregation. Add TiDB, OpenSearch, Postgres storage optional to Trace and eBPF Profiling E2E testing. Add OFF CPU eBPF Profiling E2E Testing. Fix searchableTag as rpc.status_code and http.status_code. status_code had been removed. Fix scroll query failure exception. Add profileDataQueryBatchSize config in Elasticsearch Storage. Add APIs to query Pod log on demand. Remove OAL for events. Simplify the format index name logical in ES storage. Add instance properties extractor in MAL. Support Zipkin traces collect and zipkin traces query API. [Breaking Change] Zipkin receiver mechanism changes and traces do not stream into OAP Segment anymore.  UI  General service instance: move Thread Pool from JVM to Overview, fix JVM GC Count calculation. Add Apache ShenYu (incubating) component LOGO. Show more metrics on service/instance/endpoint list on the dashboards. Support average values of metrics on the service/list/endpoint table widgets, with pop-up linear graph. Fix viewLogs button query no data. Fix UTC when page loads. Implement the eBPF profile widget on dashboard. Optimize the trace widget. Avoid invalid query for topology metrics. Add the alarm and log tag tips. Fix spans details and task logs. Verify query params to avoid invalid queries. Mobile terminal adaptation. Fix: set dropdown for the Tab widget, init instance/endpoint relation selectors, update sankey graph. Add eBPF Profiling widget into General service, Service Mesh and Kubernetes tabs. Fix jump to endpoint-relation dashboard template. Fix set graph options. Remove the Layer filed from the Instance and Process. Fix date time picker display when set hour to 0. Implement tags auto-complete for Trace and Log. Support multiple trees for the flame graph. Fix the page doesn\u0026rsquo;t need to be re-rendered when the url changes. Remove unexpected data for exporting dashboards. Fix duration time. Remove the total field from query conditions. Fix minDuration and maxDuration for the trace filter. Add Log configuration for the browser templates. Fix query conditions for the browser logs. Add Spanish Translation. Visualize the OFF CPU eBPF profiling. Add Spanish language to UI. Sort spans with startTime or spanId in a segment. Visualize a on-demand log widget. Fix activate the correct tab index after renaming a Tabs name. FaaS dashboard support on-demand log (OpenFunction/functions-framework-go version \u0026gt; 0.3.0).  Documentation  Add eBPF agent into probe introduction.  All issues and pull requests are here\n","excerpt":"9.1.0 Project  [IMPORTANT] Remove InfluxDB 1.x and Apache IoTDB 0.X as storage options, check …","ref":"/docs/main/v9.4.0/en/changes/changes-9.1.0/","title":"9.1.0"},{"body":"9.1.0 Project  [IMPORTANT] Remove InfluxDB 1.x and Apache IoTDB 0.X as storage options, check details at here. Remove converter-moshi 2.5.0, influx-java 2.15, iotdb java 0.12.5, thrift 0.14.1, moshi 1.5.0, msgpack 0.8.16 dependencies. Remove InfluxDB and IoTDB relative codes and E2E tests. Upgrade OAP dependencies zipkin to 2.23.16, H2 to 2.1.212, Apache Freemarker to 2.3.31, gRPC-java 1.46.0, netty to 4.1.76. Upgrade Webapp dependencies, spring-cloud-dependencies to 2021.0.2, logback-classic to 1.2.11 [IMPORTANT] Add BanyanDB storage implementation. Notice BanyanDB is currently under active development and SHOULD NOT be used in production cluster.  OAP Server  Add component definition(ID=127) for Apache ShenYu (incubating). Fix Zipkin receiver: Decode spans error, missing Layer for V9 and wrong time bucket for generate Service and Endpoint. [Refactor] Move SQLDatabase(H2/MySQL/PostgreSQL), ElasticSearch and BanyanDB specific configurations out of column. Support BanyanDB global index for entities. Log and Segment record entities declare this new feature. Remove unnecessary analyzer settings in columns of templates. Many were added due to analyzer\u0026rsquo;s default value. Simplify the Kafka Fetch configuration in cluster mode. [Breaking Change] Update the eBPF Profiling task to the service level, please delete index/table: ebpf_profiling_task, process_traffic. Fix event can\u0026rsquo;t split service ID into 2 parts. Fix OAP Self-Observability metric GC Time calculation. Set SW_QUERY_MAX_QUERY_COMPLEXITY default value to 1000 Webapp module (for UI) enabled compression. [Breaking Change] Add layer field to event, report an event without layer is not allowed. Fix ES flush thread stops when flush schedule task throws exception, such as ElasticSearch flush failed. Fix ES BulkProcessor in BatchProcessEsDAO was initialized multiple times and created multiple ES flush schedule tasks. HTTPServer support the handler register with allowed HTTP methods. [Critical] Revert Enhance DataCarrier#MultipleChannelsConsumer to add priority to avoid consuming issues. Fix the problem that some configurations (such as group.id) did not take effect due to the override order when using the kafkaConsumerConfig property to extend the configuration in Kafka Fetcher. Remove build time from the OAP version. Add data-generator module to run OAP in testing mode, generating mock data for testing. Support receive Kubernetes processes from gRPC protocol. Fix the problem that es index(TimeSeriesTable, eg. endpoint_traffic, alarm_record) didn\u0026rsquo;t create even after rerun with init-mode. This problem caused the OAP server to fail to start when the OAP server was down for more than a day. Support autocomplete tags in traces query. [Breaking Change] Replace all configurations **_JETTY_** to **_REST_**. Add the support eBPF profiling field into the process entity. E2E: fix log test miss verify LAL and metrics. Enhance Converter mechanism in kernel level to make BanyanDB native feature more effective. Add TermsAggregation properties collect_mode and execution_hint. Add \u0026ldquo;execution_hint\u0026rdquo;: \u0026ldquo;map\u0026rdquo;, \u0026ldquo;collect_mode\u0026rdquo;: \u0026ldquo;breadth_first\u0026rdquo; for aggregation and topology query to improve 5-10x performance. Clean up scroll contexts after used. Support autocomplete tags in logs query. Enhance Deprecated MetricQuery(v1) getValues querying to asynchronous concurrency query Fix the pod match error when the service has multiple selector in kubernetes environment. VM monitoring adapts the 0.50.0 of the opentelemetry-collector. Add Envoy internal cost metrics. Remove Layer concept from ServiceInstance. Remove unnecessary onCompleted on gRPC onError callback. Remove Layer concept form Process. Update to list all eBPF profiling schedulers without duration. Storage(ElasticSearch): add search options to tolerate inexisting indices. Fix the problem that MQ has the wrong Layer type. Fix NoneStream model has wrong downsampling(was Second, should be Minute). SQL Database: provide @SQLDatabase.AdditionalEntity to support create additional tables from a model. [Breaking Change] SQL Database: remove SQL Database config maxSizeOfArrayColumn and numOfSearchableValuesPerTag. [Breaking Change] SQL Database: move Tags list from Segment,Logs,Alarms to their additional table. [Breaking Change] Remove total field in Trace, Log, Event, Browser log, and alarm list query. Support OFF_CPU eBPF Profiling. Fix SumAggregationBuilder#build should use the SumAggregation rather than MaxAggregation. Add TiDB, OpenSearch, Postgres storage optional to Trace and eBPF Profiling E2E testing. Add OFF CPU eBPF Profiling E2E Testing. Fix searchableTag as rpc.status_code and http.status_code. status_code had been removed. Fix scroll query failure exception. Add profileDataQueryBatchSize config in Elasticsearch Storage. Add APIs to query Pod log on demand. Remove OAL for events. Simplify the format index name logical in ES storage. Add instance properties extractor in MAL. Support Zipkin traces collect and zipkin traces query API. [Breaking Change] Zipkin receiver mechanism changes and traces do not stream into OAP Segment anymore.  UI  General service instance: move Thread Pool from JVM to Overview, fix JVM GC Count calculation. Add Apache ShenYu (incubating) component LOGO. Show more metrics on service/instance/endpoint list on the dashboards. Support average values of metrics on the service/list/endpoint table widgets, with pop-up linear graph. Fix viewLogs button query no data. Fix UTC when page loads. Implement the eBPF profile widget on dashboard. Optimize the trace widget. Avoid invalid query for topology metrics. Add the alarm and log tag tips. Fix spans details and task logs. Verify query params to avoid invalid queries. Mobile terminal adaptation. Fix: set dropdown for the Tab widget, init instance/endpoint relation selectors, update sankey graph. Add eBPF Profiling widget into General service, Service Mesh and Kubernetes tabs. Fix jump to endpoint-relation dashboard template. Fix set graph options. Remove the Layer filed from the Instance and Process. Fix date time picker display when set hour to 0. Implement tags auto-complete for Trace and Log. Support multiple trees for the flame graph. Fix the page doesn\u0026rsquo;t need to be re-rendered when the url changes. Remove unexpected data for exporting dashboards. Fix duration time. Remove the total field from query conditions. Fix minDuration and maxDuration for the trace filter. Add Log configuration for the browser templates. Fix query conditions for the browser logs. Add Spanish Translation. Visualize the OFF CPU eBPF profiling. Add Spanish language to UI. Sort spans with startTime or spanId in a segment. Visualize a on-demand log widget. Fix activate the correct tab index after renaming a Tabs name. FaaS dashboard support on-demand log (OpenFunction/functions-framework-go version \u0026gt; 0.3.0).  Documentation  Add eBPF agent into probe introduction.  All issues and pull requests are here\n","excerpt":"9.1.0 Project  [IMPORTANT] Remove InfluxDB 1.x and Apache IoTDB 0.X as storage options, check …","ref":"/docs/main/v9.5.0/en/changes/changes-9.1.0/","title":"9.1.0"},{"body":"9.1.0 Project  [IMPORTANT] Remove InfluxDB 1.x and Apache IoTDB 0.X as storage options, check details at here. Remove converter-moshi 2.5.0, influx-java 2.15, iotdb java 0.12.5, thrift 0.14.1, moshi 1.5.0, msgpack 0.8.16 dependencies. Remove InfluxDB and IoTDB relative codes and E2E tests. Upgrade OAP dependencies zipkin to 2.23.16, H2 to 2.1.212, Apache Freemarker to 2.3.31, gRPC-java 1.46.0, netty to 4.1.76. Upgrade Webapp dependencies, spring-cloud-dependencies to 2021.0.2, logback-classic to 1.2.11 [IMPORTANT] Add BanyanDB storage implementation. Notice BanyanDB is currently under active development and SHOULD NOT be used in production cluster.  OAP Server  Add component definition(ID=127) for Apache ShenYu (incubating). Fix Zipkin receiver: Decode spans error, missing Layer for V9 and wrong time bucket for generate Service and Endpoint. [Refactor] Move SQLDatabase(H2/MySQL/PostgreSQL), ElasticSearch and BanyanDB specific configurations out of column. Support BanyanDB global index for entities. Log and Segment record entities declare this new feature. Remove unnecessary analyzer settings in columns of templates. Many were added due to analyzer\u0026rsquo;s default value. Simplify the Kafka Fetch configuration in cluster mode. [Breaking Change] Update the eBPF Profiling task to the service level, please delete index/table: ebpf_profiling_task, process_traffic. Fix event can\u0026rsquo;t split service ID into 2 parts. Fix OAP Self-Observability metric GC Time calculation. Set SW_QUERY_MAX_QUERY_COMPLEXITY default value to 1000 Webapp module (for UI) enabled compression. [Breaking Change] Add layer field to event, report an event without layer is not allowed. Fix ES flush thread stops when flush schedule task throws exception, such as ElasticSearch flush failed. Fix ES BulkProcessor in BatchProcessEsDAO was initialized multiple times and created multiple ES flush schedule tasks. HTTPServer support the handler register with allowed HTTP methods. [Critical] Revert Enhance DataCarrier#MultipleChannelsConsumer to add priority to avoid consuming issues. Fix the problem that some configurations (such as group.id) did not take effect due to the override order when using the kafkaConsumerConfig property to extend the configuration in Kafka Fetcher. Remove build time from the OAP version. Add data-generator module to run OAP in testing mode, generating mock data for testing. Support receive Kubernetes processes from gRPC protocol. Fix the problem that es index(TimeSeriesTable, eg. endpoint_traffic, alarm_record) didn\u0026rsquo;t create even after rerun with init-mode. This problem caused the OAP server to fail to start when the OAP server was down for more than a day. Support autocomplete tags in traces query. [Breaking Change] Replace all configurations **_JETTY_** to **_REST_**. Add the support eBPF profiling field into the process entity. E2E: fix log test miss verify LAL and metrics. Enhance Converter mechanism in kernel level to make BanyanDB native feature more effective. Add TermsAggregation properties collect_mode and execution_hint. Add \u0026ldquo;execution_hint\u0026rdquo;: \u0026ldquo;map\u0026rdquo;, \u0026ldquo;collect_mode\u0026rdquo;: \u0026ldquo;breadth_first\u0026rdquo; for aggregation and topology query to improve 5-10x performance. Clean up scroll contexts after used. Support autocomplete tags in logs query. Enhance Deprecated MetricQuery(v1) getValues querying to asynchronous concurrency query Fix the pod match error when the service has multiple selector in kubernetes environment. VM monitoring adapts the 0.50.0 of the opentelemetry-collector. Add Envoy internal cost metrics. Remove Layer concept from ServiceInstance. Remove unnecessary onCompleted on gRPC onError callback. Remove Layer concept form Process. Update to list all eBPF profiling schedulers without duration. Storage(ElasticSearch): add search options to tolerate inexisting indices. Fix the problem that MQ has the wrong Layer type. Fix NoneStream model has wrong downsampling(was Second, should be Minute). SQL Database: provide @SQLDatabase.AdditionalEntity to support create additional tables from a model. [Breaking Change] SQL Database: remove SQL Database config maxSizeOfArrayColumn and numOfSearchableValuesPerTag. [Breaking Change] SQL Database: move Tags list from Segment,Logs,Alarms to their additional table. [Breaking Change] Remove total field in Trace, Log, Event, Browser log, and alarm list query. Support OFF_CPU eBPF Profiling. Fix SumAggregationBuilder#build should use the SumAggregation rather than MaxAggregation. Add TiDB, OpenSearch, Postgres storage optional to Trace and eBPF Profiling E2E testing. Add OFF CPU eBPF Profiling E2E Testing. Fix searchableTag as rpc.status_code and http.status_code. status_code had been removed. Fix scroll query failure exception. Add profileDataQueryBatchSize config in Elasticsearch Storage. Add APIs to query Pod log on demand. Remove OAL for events. Simplify the format index name logical in ES storage. Add instance properties extractor in MAL. Support Zipkin traces collect and zipkin traces query API. [Breaking Change] Zipkin receiver mechanism changes and traces do not stream into OAP Segment anymore.  UI  General service instance: move Thread Pool from JVM to Overview, fix JVM GC Count calculation. Add Apache ShenYu (incubating) component LOGO. Show more metrics on service/instance/endpoint list on the dashboards. Support average values of metrics on the service/list/endpoint table widgets, with pop-up linear graph. Fix viewLogs button query no data. Fix UTC when page loads. Implement the eBPF profile widget on dashboard. Optimize the trace widget. Avoid invalid query for topology metrics. Add the alarm and log tag tips. Fix spans details and task logs. Verify query params to avoid invalid queries. Mobile terminal adaptation. Fix: set dropdown for the Tab widget, init instance/endpoint relation selectors, update sankey graph. Add eBPF Profiling widget into General service, Service Mesh and Kubernetes tabs. Fix jump to endpoint-relation dashboard template. Fix set graph options. Remove the Layer filed from the Instance and Process. Fix date time picker display when set hour to 0. Implement tags auto-complete for Trace and Log. Support multiple trees for the flame graph. Fix the page doesn\u0026rsquo;t need to be re-rendered when the url changes. Remove unexpected data for exporting dashboards. Fix duration time. Remove the total field from query conditions. Fix minDuration and maxDuration for the trace filter. Add Log configuration for the browser templates. Fix query conditions for the browser logs. Add Spanish Translation. Visualize the OFF CPU eBPF profiling. Add Spanish language to UI. Sort spans with startTime or spanId in a segment. Visualize a on-demand log widget. Fix activate the correct tab index after renaming a Tabs name. FaaS dashboard support on-demand log (OpenFunction/functions-framework-go version \u0026gt; 0.3.0).  Documentation  Add eBPF agent into probe introduction.  All issues and pull requests are here\n","excerpt":"9.1.0 Project  [IMPORTANT] Remove InfluxDB 1.x and Apache IoTDB 0.X as storage options, check …","ref":"/docs/main/v9.6.0/en/changes/changes-9.1.0/","title":"9.1.0"},{"body":"9.1.0 Project  [IMPORTANT] Remove InfluxDB 1.x and Apache IoTDB 0.X as storage options, check details at here. Remove converter-moshi 2.5.0, influx-java 2.15, iotdb java 0.12.5, thrift 0.14.1, moshi 1.5.0, msgpack 0.8.16 dependencies. Remove InfluxDB and IoTDB relative codes and E2E tests. Upgrade OAP dependencies zipkin to 2.23.16, H2 to 2.1.212, Apache Freemarker to 2.3.31, gRPC-java 1.46.0, netty to 4.1.76. Upgrade Webapp dependencies, spring-cloud-dependencies to 2021.0.2, logback-classic to 1.2.11 [IMPORTANT] Add BanyanDB storage implementation. Notice BanyanDB is currently under active development and SHOULD NOT be used in production cluster.  OAP Server  Add component definition(ID=127) for Apache ShenYu (incubating). Fix Zipkin receiver: Decode spans error, missing Layer for V9 and wrong time bucket for generate Service and Endpoint. [Refactor] Move SQLDatabase(H2/MySQL/PostgreSQL), ElasticSearch and BanyanDB specific configurations out of column. Support BanyanDB global index for entities. Log and Segment record entities declare this new feature. Remove unnecessary analyzer settings in columns of templates. Many were added due to analyzer\u0026rsquo;s default value. Simplify the Kafka Fetch configuration in cluster mode. [Breaking Change] Update the eBPF Profiling task to the service level, please delete index/table: ebpf_profiling_task, process_traffic. Fix event can\u0026rsquo;t split service ID into 2 parts. Fix OAP Self-Observability metric GC Time calculation. Set SW_QUERY_MAX_QUERY_COMPLEXITY default value to 1000 Webapp module (for UI) enabled compression. [Breaking Change] Add layer field to event, report an event without layer is not allowed. Fix ES flush thread stops when flush schedule task throws exception, such as ElasticSearch flush failed. Fix ES BulkProcessor in BatchProcessEsDAO was initialized multiple times and created multiple ES flush schedule tasks. HTTPServer support the handler register with allowed HTTP methods. [Critical] Revert Enhance DataCarrier#MultipleChannelsConsumer to add priority to avoid consuming issues. Fix the problem that some configurations (such as group.id) did not take effect due to the override order when using the kafkaConsumerConfig property to extend the configuration in Kafka Fetcher. Remove build time from the OAP version. Add data-generator module to run OAP in testing mode, generating mock data for testing. Support receive Kubernetes processes from gRPC protocol. Fix the problem that es index(TimeSeriesTable, eg. endpoint_traffic, alarm_record) didn\u0026rsquo;t create even after rerun with init-mode. This problem caused the OAP server to fail to start when the OAP server was down for more than a day. Support autocomplete tags in traces query. [Breaking Change] Replace all configurations **_JETTY_** to **_REST_**. Add the support eBPF profiling field into the process entity. E2E: fix log test miss verify LAL and metrics. Enhance Converter mechanism in kernel level to make BanyanDB native feature more effective. Add TermsAggregation properties collect_mode and execution_hint. Add \u0026ldquo;execution_hint\u0026rdquo;: \u0026ldquo;map\u0026rdquo;, \u0026ldquo;collect_mode\u0026rdquo;: \u0026ldquo;breadth_first\u0026rdquo; for aggregation and topology query to improve 5-10x performance. Clean up scroll contexts after used. Support autocomplete tags in logs query. Enhance Deprecated MetricQuery(v1) getValues querying to asynchronous concurrency query Fix the pod match error when the service has multiple selector in kubernetes environment. VM monitoring adapts the 0.50.0 of the opentelemetry-collector. Add Envoy internal cost metrics. Remove Layer concept from ServiceInstance. Remove unnecessary onCompleted on gRPC onError callback. Remove Layer concept form Process. Update to list all eBPF profiling schedulers without duration. Storage(ElasticSearch): add search options to tolerate inexisting indices. Fix the problem that MQ has the wrong Layer type. Fix NoneStream model has wrong downsampling(was Second, should be Minute). SQL Database: provide @SQLDatabase.AdditionalEntity to support create additional tables from a model. [Breaking Change] SQL Database: remove SQL Database config maxSizeOfArrayColumn and numOfSearchableValuesPerTag. [Breaking Change] SQL Database: move Tags list from Segment,Logs,Alarms to their additional table. [Breaking Change] Remove total field in Trace, Log, Event, Browser log, and alarm list query. Support OFF_CPU eBPF Profiling. Fix SumAggregationBuilder#build should use the SumAggregation rather than MaxAggregation. Add TiDB, OpenSearch, Postgres storage optional to Trace and eBPF Profiling E2E testing. Add OFF CPU eBPF Profiling E2E Testing. Fix searchableTag as rpc.status_code and http.status_code. status_code had been removed. Fix scroll query failure exception. Add profileDataQueryBatchSize config in Elasticsearch Storage. Add APIs to query Pod log on demand. Remove OAL for events. Simplify the format index name logical in ES storage. Add instance properties extractor in MAL. Support Zipkin traces collect and zipkin traces query API. [Breaking Change] Zipkin receiver mechanism changes and traces do not stream into OAP Segment anymore.  UI  General service instance: move Thread Pool from JVM to Overview, fix JVM GC Count calculation. Add Apache ShenYu (incubating) component LOGO. Show more metrics on service/instance/endpoint list on the dashboards. Support average values of metrics on the service/list/endpoint table widgets, with pop-up linear graph. Fix viewLogs button query no data. Fix UTC when page loads. Implement the eBPF profile widget on dashboard. Optimize the trace widget. Avoid invalid query for topology metrics. Add the alarm and log tag tips. Fix spans details and task logs. Verify query params to avoid invalid queries. Mobile terminal adaptation. Fix: set dropdown for the Tab widget, init instance/endpoint relation selectors, update sankey graph. Add eBPF Profiling widget into General service, Service Mesh and Kubernetes tabs. Fix jump to endpoint-relation dashboard template. Fix set graph options. Remove the Layer filed from the Instance and Process. Fix date time picker display when set hour to 0. Implement tags auto-complete for Trace and Log. Support multiple trees for the flame graph. Fix the page doesn\u0026rsquo;t need to be re-rendered when the url changes. Remove unexpected data for exporting dashboards. Fix duration time. Remove the total field from query conditions. Fix minDuration and maxDuration for the trace filter. Add Log configuration for the browser templates. Fix query conditions for the browser logs. Add Spanish Translation. Visualize the OFF CPU eBPF profiling. Add Spanish language to UI. Sort spans with startTime or spanId in a segment. Visualize a on-demand log widget. Fix activate the correct tab index after renaming a Tabs name. FaaS dashboard support on-demand log (OpenFunction/functions-framework-go version \u0026gt; 0.3.0).  Documentation  Add eBPF agent into probe introduction.  All issues and pull requests are here\n","excerpt":"9.1.0 Project  [IMPORTANT] Remove InfluxDB 1.x and Apache IoTDB 0.X as storage options, check …","ref":"/docs/main/v9.7.0/en/changes/changes-9.1.0/","title":"9.1.0"},{"body":"9.2.0 Project  [Critical] Fix a low performance issue of metrics persistent in the ElasticSearch storage implementation. One single metric could have to wait for an unnecessary 7~10s(System Env Variable SW_STORAGE_ES_FLUSH_INTERVAL) since 8.8.0 - 9.1.0 releases. Upgrade Armeria to 1.16.0, Kubernetes Java client to 15.0.1.  OAP Server  Add more entities for Zipkin to improve performance. ElasticSearch: scroll id should be updated when scrolling as it may change. Mesh: fix only last rule works when multiple rules are defined in metadata-service-mapping.yaml. Support sending alarm messages to PagerDuty. Support Zipkin kafka collector. Add VIRTUAL detect type to Process for Network Profiling. Add component ID(128) for Java Hutool plugin. Add Zipkin query exception handler, response error message for illegal arguments. Fix a NullPointerException in the endpoint analysis, which would cause missing MQ-related LocalSpan in the trace. Add forEach, processRelation function to MAL expression. Add expPrefix, initExp in MAL config. Add component ID(7015) for Python Bottle plugin. Remove legacy OAL percentile functions, p99, p95, p90, p75, p50 func(s). Revert #8066. Keep all metrics persistent even it is default value. Skip loading UI templates if folder is empty or doesn\u0026rsquo;t exist. Optimize ElasticSearch query performance by using _mGet and physical index name rather than alias in these scenarios, (a) Metrics aggregation (b) Zipkin query (c) Metrics query (d) Log query Support the NETWORK type of eBPF Profiling task. Support sumHistogram in MAL. [Breaking Change] Make the eBPF Profiling task support to the service instance level, index/table ebpf_profiling_task is required to be re-created when bump up from previous releases. Fix race condition in Banyandb storage Support SUM_PER_MIN downsampling in MAL. Support sumHistogramPercentile in MAL. Add VIRTUAL_CACHE to Layer, to fix conjectured Redis server, which icon can\u0026rsquo;t show on the topology. [Breaking Change] Elasticsearch storage merge all metrics/meter and records(without super datasets) indices into one physical index template metrics-all and records-all on the default setting. Provide system environment variable(SW_STORAGE_ES_LOGIC_SHARDING) to shard metrics/meter indices into multi-physical indices as the previous versions(one index template per metric/meter aggregation function). In the current one index mode, users still could choose to adjust ElasticSearch\u0026rsquo;s shard number(SW_STORAGE_ES_INDEX_SHARDS_NUMBER) to scale out. More details please refer to New ElasticSearch storage option explanation in 9.2.0 and backend-storage.md [Breaking Change] Index/table ebpf_profiling_schedule added a new column ebpf_profiling_schedule_id, the H2/Mysql/Tidb/Postgres storage users are required to re-created it when bump up from previous releases. Fix Zipkin trace query the max size of spans. Add tls and https component IDs for Network Profiling. Support Elasticsearch column alias for the compatibility between storage logicSharding model and no-logicSharding model. Support MySQL monitoring. Support PostgreSQL monitoring. Fix query services by serviceId error when Elasticsearch storage SW_STORAGE_ES_QUERY_MAX_SIZE \u0026gt; 10000. Support sending alarm messages to Discord. Fix query history process data failure. Optimize TTL mechanism for Elasticsearch storage, skip executed indices in one TTL rotation. Add Kubernetes support module to share codes between modules and reduce calls to Kubernetes API server. Bump up Kubernetes Java client to fix cve. Adapt OpenTelemetry native metrics protocol. [Breaking Change] rename configuration folder from otel-oc-rules to otel-rules. [Breaking Change] rename configuration field from enabledOcRules to enabledOtelRules and environment variable name from SW_OTEL_RECEIVER_ENABLED_OC_RULES to SW_OTEL_RECEIVER_ENABLED_OTEL_RULES. [Breaking Change] Fix JDBC TTL to delete additional tables data. SQL Database requires removing segment,segment_tag, logs, logs_tag, alarms, alarms_tag, zipkin_span, zipkin_query before OAP starts. SQL Database: add @SQLDatabase.ExtraColumn4AdditionalEntity to support add an extra column from parent to an additional table. Add component ID(131) for Java Micronaut plugin Add component ID(132) for Nats java client plugin  UI  Fix query conditions for the browser logs. Implement a url parameter to activate tab index. Fix clear interval fail when switch autoRefresh to off. Optimize log tables. Fix log detail pop-up page doesn\u0026rsquo;t work. Optimize table widget to hide the whole metric column when no metric is set. Implement the Event widget. Remove event menu. Fix span detail text overlap. Add Python Bottle Plugin Logo. Implement an association between widgets(line, bar, area graphs) with time. Fix tag dropdown style. Hide the copy button when db.statement is empty. Fix legend metrics for topology. Dashboard: Add metrics association. Dashboard: Fix FaaS-Root document link and topology service relation dashboard link. Dashboard: Fix Mesh-Instance metric Throughput. Dashboard: Fix Mesh-Service-Relation metric Throughput and Proxy Sidecar Internal Latency in Nanoseconds (Client Response). Dashboard: Fix Mesh-Instance-Relation metric Throughput. Enhance associations for the Event widget. Add event widgets in dashboard where applicable. Fix dashboard list search box not work. Fix short time range. Fix event widget incompatibility in Safari. Refactor the tags component to support searching for tag keys and values. Implement the log widget and the trace widget associate with each other, remove log tables on the trace widget. Add log widget to general service root. Associate the event widget with the trace and log widget. Add the MYSQL layer and update layer routers. Fix query order for trace list. Add a calculation to convert seconds to days. q* Add Spring Sleuth dashboard to general service instance. Support the process dashboard and create the time range text widget. Fix picking calendar with a wrong time range and setting a unique value for dashboard grid key. Add PostgreSQL to Database sub-menu. Implement the network profiling widget. Add Micronaut icon for Java plugin. Add Nats icon for Java plugin. Bump moment and @vue/cli-plugin-e2e-cypress. Add Network Profiling for Service Mesh DP instance and K8s pod panels.  Documentation  Fix invalid links in release docs. Clean up doc about event metrics. Add a table for metric calculations in the ui doc. Add an explanation for alerting kernel and its in-memory window mechanism. Add more docs for widget details. Update alarm doc introduce configuration property key Fix dependency license\u0026rsquo;s NOTICE and binary jar included issues in the source release. Add eBPF CPU profiling doc.  All issues and pull requests are here\n","excerpt":"9.2.0 Project  [Critical] Fix a low performance issue of metrics persistent in the ElasticSearch …","ref":"/docs/main/latest/en/changes/changes-9.2.0/","title":"9.2.0"},{"body":"9.2.0 Project  [Critical] Fix a low performance issue of metrics persistent in the ElasticSearch storage implementation. One single metric could have to wait for an unnecessary 7~10s(System Env Variable SW_STORAGE_ES_FLUSH_INTERVAL) since 8.8.0 - 9.1.0 releases. Upgrade Armeria to 1.16.0, Kubernetes Java client to 15.0.1.  OAP Server  Add more entities for Zipkin to improve performance. ElasticSearch: scroll id should be updated when scrolling as it may change. Mesh: fix only last rule works when multiple rules are defined in metadata-service-mapping.yaml. Support sending alarm messages to PagerDuty. Support Zipkin kafka collector. Add VIRTUAL detect type to Process for Network Profiling. Add component ID(128) for Java Hutool plugin. Add Zipkin query exception handler, response error message for illegal arguments. Fix a NullPointerException in the endpoint analysis, which would cause missing MQ-related LocalSpan in the trace. Add forEach, processRelation function to MAL expression. Add expPrefix, initExp in MAL config. Add component ID(7015) for Python Bottle plugin. Remove legacy OAL percentile functions, p99, p95, p90, p75, p50 func(s). Revert #8066. Keep all metrics persistent even it is default value. Skip loading UI templates if folder is empty or doesn\u0026rsquo;t exist. Optimize ElasticSearch query performance by using _mGet and physical index name rather than alias in these scenarios, (a) Metrics aggregation (b) Zipkin query (c) Metrics query (d) Log query Support the NETWORK type of eBPF Profiling task. Support sumHistogram in MAL. [Breaking Change] Make the eBPF Profiling task support to the service instance level, index/table ebpf_profiling_task is required to be re-created when bump up from previous releases. Fix race condition in Banyandb storage Support SUM_PER_MIN downsampling in MAL. Support sumHistogramPercentile in MAL. Add VIRTUAL_CACHE to Layer, to fix conjectured Redis server, which icon can\u0026rsquo;t show on the topology. [Breaking Change] Elasticsearch storage merge all metrics/meter and records(without super datasets) indices into one physical index template metrics-all and records-all on the default setting. Provide system environment variable(SW_STORAGE_ES_LOGIC_SHARDING) to shard metrics/meter indices into multi-physical indices as the previous versions(one index template per metric/meter aggregation function). In the current one index mode, users still could choose to adjust ElasticSearch\u0026rsquo;s shard number(SW_STORAGE_ES_INDEX_SHARDS_NUMBER) to scale out. More details please refer to New ElasticSearch storage option explanation in 9.2.0 and backend-storage.md [Breaking Change] Index/table ebpf_profiling_schedule added a new column ebpf_profiling_schedule_id, the H2/Mysql/Tidb/Postgres storage users are required to re-created it when bump up from previous releases. Fix Zipkin trace query the max size of spans. Add tls and https component IDs for Network Profiling. Support Elasticsearch column alias for the compatibility between storage logicSharding model and no-logicSharding model. Support MySQL monitoring. Support PostgreSQL monitoring. Fix query services by serviceId error when Elasticsearch storage SW_STORAGE_ES_QUERY_MAX_SIZE \u0026gt; 10000. Support sending alarm messages to Discord. Fix query history process data failure. Optimize TTL mechanism for Elasticsearch storage, skip executed indices in one TTL rotation. Add Kubernetes support module to share codes between modules and reduce calls to Kubernetes API server. Bump up Kubernetes Java client to fix cve. Adapt OpenTelemetry native metrics protocol. [Breaking Change] rename configuration folder from otel-oc-rules to otel-rules. [Breaking Change] rename configuration field from enabledOcRules to enabledOtelRules and environment variable name from SW_OTEL_RECEIVER_ENABLED_OC_RULES to SW_OTEL_RECEIVER_ENABLED_OTEL_RULES. [Breaking Change] Fix JDBC TTL to delete additional tables data. SQL Database requires removing segment,segment_tag, logs, logs_tag, alarms, alarms_tag, zipkin_span, zipkin_query before OAP starts. SQL Database: add @SQLDatabase.ExtraColumn4AdditionalEntity to support add an extra column from parent to an additional table. Add component ID(131) for Java Micronaut plugin Add component ID(132) for Nats java client plugin  UI  Fix query conditions for the browser logs. Implement a url parameter to activate tab index. Fix clear interval fail when switch autoRefresh to off. Optimize log tables. Fix log detail pop-up page doesn\u0026rsquo;t work. Optimize table widget to hide the whole metric column when no metric is set. Implement the Event widget. Remove event menu. Fix span detail text overlap. Add Python Bottle Plugin Logo. Implement an association between widgets(line, bar, area graphs) with time. Fix tag dropdown style. Hide the copy button when db.statement is empty. Fix legend metrics for topology. Dashboard: Add metrics association. Dashboard: Fix FaaS-Root document link and topology service relation dashboard link. Dashboard: Fix Mesh-Instance metric Throughput. Dashboard: Fix Mesh-Service-Relation metric Throughput and Proxy Sidecar Internal Latency in Nanoseconds (Client Response). Dashboard: Fix Mesh-Instance-Relation metric Throughput. Enhance associations for the Event widget. Add event widgets in dashboard where applicable. Fix dashboard list search box not work. Fix short time range. Fix event widget incompatibility in Safari. Refactor the tags component to support searching for tag keys and values. Implement the log widget and the trace widget associate with each other, remove log tables on the trace widget. Add log widget to general service root. Associate the event widget with the trace and log widget. Add the MYSQL layer and update layer routers. Fix query order for trace list. Add a calculation to convert seconds to days. q* Add Spring Sleuth dashboard to general service instance. Support the process dashboard and create the time range text widget. Fix picking calendar with a wrong time range and setting a unique value for dashboard grid key. Add PostgreSQL to Database sub-menu. Implement the network profiling widget. Add Micronaut icon for Java plugin. Add Nats icon for Java plugin. Bump moment and @vue/cli-plugin-e2e-cypress. Add Network Profiling for Service Mesh DP instance and K8s pod panels.  Documentation  Fix invalid links in release docs. Clean up doc about event metrics. Add a table for metric calculations in the ui doc. Add an explanation for alerting kernel and its in-memory window mechanism. Add more docs for widget details. Update alarm doc introduce configuration property key Fix dependency license\u0026rsquo;s NOTICE and binary jar included issues in the source release. Add eBPF CPU profiling doc.  All issues and pull requests are here\n","excerpt":"9.2.0 Project  [Critical] Fix a low performance issue of metrics persistent in the ElasticSearch …","ref":"/docs/main/next/en/changes/changes-9.2.0/","title":"9.2.0"},{"body":"9.2.0 Project  [Critical] Fix a low performance issue of metrics persistent in the ElasticSearch storage implementation. One single metric could have to wait for an unnecessary 7~10s(System Env Variable SW_STORAGE_ES_FLUSH_INTERVAL) since 8.8.0 - 9.1.0 releases. Upgrade Armeria to 1.16.0, Kubernetes Java client to 15.0.1.  OAP Server  Add more entities for Zipkin to improve performance. ElasticSearch: scroll id should be updated when scrolling as it may change. Mesh: fix only last rule works when multiple rules are defined in metadata-service-mapping.yaml. Support sending alarm messages to PagerDuty. Support Zipkin kafka collector. Add VIRTUAL detect type to Process for Network Profiling. Add component ID(128) for Java Hutool plugin. Add Zipkin query exception handler, response error message for illegal arguments. Fix a NullPointerException in the endpoint analysis, which would cause missing MQ-related LocalSpan in the trace. Add forEach, processRelation function to MAL expression. Add expPrefix, initExp in MAL config. Add component ID(7015) for Python Bottle plugin. Remove legacy OAL percentile functions, p99, p95, p90, p75, p50 func(s). Revert #8066. Keep all metrics persistent even it is default value. Skip loading UI templates if folder is empty or doesn\u0026rsquo;t exist. Optimize ElasticSearch query performance by using _mGet and physical index name rather than alias in these scenarios, (a) Metrics aggregation (b) Zipkin query (c) Metrics query (d) Log query Support the NETWORK type of eBPF Profiling task. Support sumHistogram in MAL. [Breaking Change] Make the eBPF Profiling task support to the service instance level, index/table ebpf_profiling_task is required to be re-created when bump up from previous releases. Fix race condition in Banyandb storage Support SUM_PER_MIN downsampling in MAL. Support sumHistogramPercentile in MAL. Add VIRTUAL_CACHE to Layer, to fix conjectured Redis server, which icon can\u0026rsquo;t show on the topology. [Breaking Change] Elasticsearch storage merge all metrics/meter and records(without super datasets) indices into one physical index template metrics-all and records-all on the default setting. Provide system environment variable(SW_STORAGE_ES_LOGIC_SHARDING) to shard metrics/meter indices into multi-physical indices as the previous versions(one index template per metric/meter aggregation function). In the current one index mode, users still could choose to adjust ElasticSearch\u0026rsquo;s shard number(SW_STORAGE_ES_INDEX_SHARDS_NUMBER) to scale out. More details please refer to New ElasticSearch storage option explanation in 9.2.0 and backend-storage.md [Breaking Change] Index/table ebpf_profiling_schedule added a new column ebpf_profiling_schedule_id, the H2/Mysql/Tidb/Postgres storage users are required to re-created it when bump up from previous releases. Fix Zipkin trace query the max size of spans. Add tls and https component IDs for Network Profiling. Support Elasticsearch column alias for the compatibility between storage logicSharding model and no-logicSharding model. Support MySQL monitoring. Support PostgreSQL monitoring. Fix query services by serviceId error when Elasticsearch storage SW_STORAGE_ES_QUERY_MAX_SIZE \u0026gt; 10000. Support sending alarm messages to Discord. Fix query history process data failure. Optimize TTL mechanism for Elasticsearch storage, skip executed indices in one TTL rotation. Add Kubernetes support module to share codes between modules and reduce calls to Kubernetes API server. Bump up Kubernetes Java client to fix cve. Adapt OpenTelemetry native metrics protocol. [Breaking Change] rename configuration folder from otel-oc-rules to otel-rules. [Breaking Change] rename configuration field from enabledOcRules to enabledOtelRules and environment variable name from SW_OTEL_RECEIVER_ENABLED_OC_RULES to SW_OTEL_RECEIVER_ENABLED_OTEL_RULES. [Breaking Change] Fix JDBC TTL to delete additional tables data. SQL Database requires removing segment,segment_tag, logs, logs_tag, alarms, alarms_tag, zipkin_span, zipkin_query before OAP starts. SQL Database: add @SQLDatabase.ExtraColumn4AdditionalEntity to support add an extra column from parent to an additional table. Add component ID(131) for Java Micronaut plugin Add component ID(132) for Nats java client plugin  UI  Fix query conditions for the browser logs. Implement a url parameter to activate tab index. Fix clear interval fail when switch autoRefresh to off. Optimize log tables. Fix log detail pop-up page doesn\u0026rsquo;t work. Optimize table widget to hide the whole metric column when no metric is set. Implement the Event widget. Remove event menu. Fix span detail text overlap. Add Python Bottle Plugin Logo. Implement an association between widgets(line, bar, area graphs) with time. Fix tag dropdown style. Hide the copy button when db.statement is empty. Fix legend metrics for topology. Dashboard: Add metrics association. Dashboard: Fix FaaS-Root document link and topology service relation dashboard link. Dashboard: Fix Mesh-Instance metric Throughput. Dashboard: Fix Mesh-Service-Relation metric Throughput and Proxy Sidecar Internal Latency in Nanoseconds (Client Response). Dashboard: Fix Mesh-Instance-Relation metric Throughput. Enhance associations for the Event widget. Add event widgets in dashboard where applicable. Fix dashboard list search box not work. Fix short time range. Fix event widget incompatibility in Safari. Refactor the tags component to support searching for tag keys and values. Implement the log widget and the trace widget associate with each other, remove log tables on the trace widget. Add log widget to general service root. Associate the event widget with the trace and log widget. Add the MYSQL layer and update layer routers. Fix query order for trace list. Add a calculation to convert seconds to days. q* Add Spring Sleuth dashboard to general service instance. Support the process dashboard and create the time range text widget. Fix picking calendar with a wrong time range and setting a unique value for dashboard grid key. Add PostgreSQL to Database sub-menu. Implement the network profiling widget. Add Micronaut icon for Java plugin. Add Nats icon for Java plugin. Bump moment and @vue/cli-plugin-e2e-cypress. Add Network Profiling for Service Mesh DP instance and K8s pod panels.  Documentation  Fix invalid links in release docs. Clean up doc about event metrics. Add a table for metric calculations in the ui doc. Add an explanation for alerting kernel and its in-memory window mechanism. Add more docs for widget details. Update alarm doc introduce configuration property key Fix dependency license\u0026rsquo;s NOTICE and binary jar included issues in the source release. Add eBPF CPU profiling doc.  All issues and pull requests are here\n","excerpt":"9.2.0 Project  [Critical] Fix a low performance issue of metrics persistent in the ElasticSearch …","ref":"/docs/main/v9.2.0/en/changes/changes/","title":"9.2.0"},{"body":"9.2.0 Project  [Critical] Fix a low performance issue of metrics persistent in the ElasticSearch storage implementation. One single metric could have to wait for an unnecessary 7~10s(System Env Variable SW_STORAGE_ES_FLUSH_INTERVAL) since 8.8.0 - 9.1.0 releases. Upgrade Armeria to 1.16.0, Kubernetes Java client to 15.0.1.  OAP Server  Add more entities for Zipkin to improve performance. ElasticSearch: scroll id should be updated when scrolling as it may change. Mesh: fix only last rule works when multiple rules are defined in metadata-service-mapping.yaml. Support sending alarm messages to PagerDuty. Support Zipkin kafka collector. Add VIRTUAL detect type to Process for Network Profiling. Add component ID(128) for Java Hutool plugin. Add Zipkin query exception handler, response error message for illegal arguments. Fix a NullPointerException in the endpoint analysis, which would cause missing MQ-related LocalSpan in the trace. Add forEach, processRelation function to MAL expression. Add expPrefix, initExp in MAL config. Add component ID(7015) for Python Bottle plugin. Remove legacy OAL percentile functions, p99, p95, p90, p75, p50 func(s). Revert #8066. Keep all metrics persistent even it is default value. Skip loading UI templates if folder is empty or doesn\u0026rsquo;t exist. Optimize ElasticSearch query performance by using _mGet and physical index name rather than alias in these scenarios, (a) Metrics aggregation (b) Zipkin query (c) Metrics query (d) Log query Support the NETWORK type of eBPF Profiling task. Support sumHistogram in MAL. [Breaking Change] Make the eBPF Profiling task support to the service instance level, index/table ebpf_profiling_task is required to be re-created when bump up from previous releases. Fix race condition in Banyandb storage Support SUM_PER_MIN downsampling in MAL. Support sumHistogramPercentile in MAL. Add VIRTUAL_CACHE to Layer, to fix conjectured Redis server, which icon can\u0026rsquo;t show on the topology. [Breaking Change] Elasticsearch storage merge all metrics/meter and records(without super datasets) indices into one physical index template metrics-all and records-all on the default setting. Provide system environment variable(SW_STORAGE_ES_LOGIC_SHARDING) to shard metrics/meter indices into multi-physical indices as the previous versions(one index template per metric/meter aggregation function). In the current one index mode, users still could choose to adjust ElasticSearch\u0026rsquo;s shard number(SW_STORAGE_ES_INDEX_SHARDS_NUMBER) to scale out. More details please refer to New ElasticSearch storage option explanation in 9.2.0 and backend-storage.md [Breaking Change] Index/table ebpf_profiling_schedule added a new column ebpf_profiling_schedule_id, the H2/Mysql/Tidb/Postgres storage users are required to re-created it when bump up from previous releases. Fix Zipkin trace query the max size of spans. Add tls and https component IDs for Network Profiling. Support Elasticsearch column alias for the compatibility between storage logicSharding model and no-logicSharding model. Support MySQL monitoring. Support PostgreSQL monitoring. Fix query services by serviceId error when Elasticsearch storage SW_STORAGE_ES_QUERY_MAX_SIZE \u0026gt; 10000. Support sending alarm messages to Discord. Fix query history process data failure. Optimize TTL mechanism for Elasticsearch storage, skip executed indices in one TTL rotation. Add Kubernetes support module to share codes between modules and reduce calls to Kubernetes API server. Bump up Kubernetes Java client to fix cve. Adapt OpenTelemetry native metrics protocol. [Breaking Change] rename configuration folder from otel-oc-rules to otel-rules. [Breaking Change] rename configuration field from enabledOcRules to enabledOtelRules and environment variable name from SW_OTEL_RECEIVER_ENABLED_OC_RULES to SW_OTEL_RECEIVER_ENABLED_OTEL_RULES. [Breaking Change] Fix JDBC TTL to delete additional tables data. SQL Database requires removing segment,segment_tag, logs, logs_tag, alarms, alarms_tag, zipkin_span, zipkin_query before OAP starts. SQL Database: add @SQLDatabase.ExtraColumn4AdditionalEntity to support add an extra column from parent to an additional table. Add component ID(131) for Java Micronaut plugin Add component ID(132) for Nats java client plugin  UI  Fix query conditions for the browser logs. Implement a url parameter to activate tab index. Fix clear interval fail when switch autoRefresh to off. Optimize log tables. Fix log detail pop-up page doesn\u0026rsquo;t work. Optimize table widget to hide the whole metric column when no metric is set. Implement the Event widget. Remove event menu. Fix span detail text overlap. Add Python Bottle Plugin Logo. Implement an association between widgets(line, bar, area graphs) with time. Fix tag dropdown style. Hide the copy button when db.statement is empty. Fix legend metrics for topology. Dashboard: Add metrics association. Dashboard: Fix FaaS-Root document link and topology service relation dashboard link. Dashboard: Fix Mesh-Instance metric Throughput. Dashboard: Fix Mesh-Service-Relation metric Throughput and Proxy Sidecar Internal Latency in Nanoseconds (Client Response). Dashboard: Fix Mesh-Instance-Relation metric Throughput. Enhance associations for the Event widget. Add event widgets in dashboard where applicable. Fix dashboard list search box not work. Fix short time range. Fix event widget incompatibility in Safari. Refactor the tags component to support searching for tag keys and values. Implement the log widget and the trace widget associate with each other, remove log tables on the trace widget. Add log widget to general service root. Associate the event widget with the trace and log widget. Add the MYSQL layer and update layer routers. Fix query order for trace list. Add a calculation to convert seconds to days. q* Add Spring Sleuth dashboard to general service instance. Support the process dashboard and create the time range text widget. Fix picking calendar with a wrong time range and setting a unique value for dashboard grid key. Add PostgreSQL to Database sub-menu. Implement the network profiling widget. Add Micronaut icon for Java plugin. Add Nats icon for Java plugin. Bump moment and @vue/cli-plugin-e2e-cypress. Add Network Profiling for Service Mesh DP instance and K8s pod panels.  Documentation  Fix invalid links in release docs. Clean up doc about event metrics. Add a table for metric calculations in the ui doc. Add an explanation for alerting kernel and its in-memory window mechanism. Add more docs for widget details. Update alarm doc introduce configuration property key Fix dependency license\u0026rsquo;s NOTICE and binary jar included issues in the source release. Add eBPF CPU profiling doc.  All issues and pull requests are here\n","excerpt":"9.2.0 Project  [Critical] Fix a low performance issue of metrics persistent in the ElasticSearch …","ref":"/docs/main/v9.3.0/en/changes/changes-9.2.0/","title":"9.2.0"},{"body":"9.2.0 Project  [Critical] Fix a low performance issue of metrics persistent in the ElasticSearch storage implementation. One single metric could have to wait for an unnecessary 7~10s(System Env Variable SW_STORAGE_ES_FLUSH_INTERVAL) since 8.8.0 - 9.1.0 releases. Upgrade Armeria to 1.16.0, Kubernetes Java client to 15.0.1.  OAP Server  Add more entities for Zipkin to improve performance. ElasticSearch: scroll id should be updated when scrolling as it may change. Mesh: fix only last rule works when multiple rules are defined in metadata-service-mapping.yaml. Support sending alarm messages to PagerDuty. Support Zipkin kafka collector. Add VIRTUAL detect type to Process for Network Profiling. Add component ID(128) for Java Hutool plugin. Add Zipkin query exception handler, response error message for illegal arguments. Fix a NullPointerException in the endpoint analysis, which would cause missing MQ-related LocalSpan in the trace. Add forEach, processRelation function to MAL expression. Add expPrefix, initExp in MAL config. Add component ID(7015) for Python Bottle plugin. Remove legacy OAL percentile functions, p99, p95, p90, p75, p50 func(s). Revert #8066. Keep all metrics persistent even it is default value. Skip loading UI templates if folder is empty or doesn\u0026rsquo;t exist. Optimize ElasticSearch query performance by using _mGet and physical index name rather than alias in these scenarios, (a) Metrics aggregation (b) Zipkin query (c) Metrics query (d) Log query Support the NETWORK type of eBPF Profiling task. Support sumHistogram in MAL. [Breaking Change] Make the eBPF Profiling task support to the service instance level, index/table ebpf_profiling_task is required to be re-created when bump up from previous releases. Fix race condition in Banyandb storage Support SUM_PER_MIN downsampling in MAL. Support sumHistogramPercentile in MAL. Add VIRTUAL_CACHE to Layer, to fix conjectured Redis server, which icon can\u0026rsquo;t show on the topology. [Breaking Change] Elasticsearch storage merge all metrics/meter and records(without super datasets) indices into one physical index template metrics-all and records-all on the default setting. Provide system environment variable(SW_STORAGE_ES_LOGIC_SHARDING) to shard metrics/meter indices into multi-physical indices as the previous versions(one index template per metric/meter aggregation function). In the current one index mode, users still could choose to adjust ElasticSearch\u0026rsquo;s shard number(SW_STORAGE_ES_INDEX_SHARDS_NUMBER) to scale out. More details please refer to New ElasticSearch storage option explanation in 9.2.0 and backend-storage.md [Breaking Change] Index/table ebpf_profiling_schedule added a new column ebpf_profiling_schedule_id, the H2/Mysql/Tidb/Postgres storage users are required to re-created it when bump up from previous releases. Fix Zipkin trace query the max size of spans. Add tls and https component IDs for Network Profiling. Support Elasticsearch column alias for the compatibility between storage logicSharding model and no-logicSharding model. Support MySQL monitoring. Support PostgreSQL monitoring. Fix query services by serviceId error when Elasticsearch storage SW_STORAGE_ES_QUERY_MAX_SIZE \u0026gt; 10000. Support sending alarm messages to Discord. Fix query history process data failure. Optimize TTL mechanism for Elasticsearch storage, skip executed indices in one TTL rotation. Add Kubernetes support module to share codes between modules and reduce calls to Kubernetes API server. Bump up Kubernetes Java client to fix cve. Adapt OpenTelemetry native metrics protocol. [Breaking Change] rename configuration folder from otel-oc-rules to otel-rules. [Breaking Change] rename configuration field from enabledOcRules to enabledOtelRules and environment variable name from SW_OTEL_RECEIVER_ENABLED_OC_RULES to SW_OTEL_RECEIVER_ENABLED_OTEL_RULES. [Breaking Change] Fix JDBC TTL to delete additional tables data. SQL Database requires removing segment,segment_tag, logs, logs_tag, alarms, alarms_tag, zipkin_span, zipkin_query before OAP starts. SQL Database: add @SQLDatabase.ExtraColumn4AdditionalEntity to support add an extra column from parent to an additional table. Add component ID(131) for Java Micronaut plugin Add component ID(132) for Nats java client plugin  UI  Fix query conditions for the browser logs. Implement a url parameter to activate tab index. Fix clear interval fail when switch autoRefresh to off. Optimize log tables. Fix log detail pop-up page doesn\u0026rsquo;t work. Optimize table widget to hide the whole metric column when no metric is set. Implement the Event widget. Remove event menu. Fix span detail text overlap. Add Python Bottle Plugin Logo. Implement an association between widgets(line, bar, area graphs) with time. Fix tag dropdown style. Hide the copy button when db.statement is empty. Fix legend metrics for topology. Dashboard: Add metrics association. Dashboard: Fix FaaS-Root document link and topology service relation dashboard link. Dashboard: Fix Mesh-Instance metric Throughput. Dashboard: Fix Mesh-Service-Relation metric Throughput and Proxy Sidecar Internal Latency in Nanoseconds (Client Response). Dashboard: Fix Mesh-Instance-Relation metric Throughput. Enhance associations for the Event widget. Add event widgets in dashboard where applicable. Fix dashboard list search box not work. Fix short time range. Fix event widget incompatibility in Safari. Refactor the tags component to support searching for tag keys and values. Implement the log widget and the trace widget associate with each other, remove log tables on the trace widget. Add log widget to general service root. Associate the event widget with the trace and log widget. Add the MYSQL layer and update layer routers. Fix query order for trace list. Add a calculation to convert seconds to days. q* Add Spring Sleuth dashboard to general service instance. Support the process dashboard and create the time range text widget. Fix picking calendar with a wrong time range and setting a unique value for dashboard grid key. Add PostgreSQL to Database sub-menu. Implement the network profiling widget. Add Micronaut icon for Java plugin. Add Nats icon for Java plugin. Bump moment and @vue/cli-plugin-e2e-cypress. Add Network Profiling for Service Mesh DP instance and K8s pod panels.  Documentation  Fix invalid links in release docs. Clean up doc about event metrics. Add a table for metric calculations in the ui doc. Add an explanation for alerting kernel and its in-memory window mechanism. Add more docs for widget details. Update alarm doc introduce configuration property key Fix dependency license\u0026rsquo;s NOTICE and binary jar included issues in the source release. Add eBPF CPU profiling doc.  All issues and pull requests are here\n","excerpt":"9.2.0 Project  [Critical] Fix a low performance issue of metrics persistent in the ElasticSearch …","ref":"/docs/main/v9.4.0/en/changes/changes-9.2.0/","title":"9.2.0"},{"body":"9.2.0 Project  [Critical] Fix a low performance issue of metrics persistent in the ElasticSearch storage implementation. One single metric could have to wait for an unnecessary 7~10s(System Env Variable SW_STORAGE_ES_FLUSH_INTERVAL) since 8.8.0 - 9.1.0 releases. Upgrade Armeria to 1.16.0, Kubernetes Java client to 15.0.1.  OAP Server  Add more entities for Zipkin to improve performance. ElasticSearch: scroll id should be updated when scrolling as it may change. Mesh: fix only last rule works when multiple rules are defined in metadata-service-mapping.yaml. Support sending alarm messages to PagerDuty. Support Zipkin kafka collector. Add VIRTUAL detect type to Process for Network Profiling. Add component ID(128) for Java Hutool plugin. Add Zipkin query exception handler, response error message for illegal arguments. Fix a NullPointerException in the endpoint analysis, which would cause missing MQ-related LocalSpan in the trace. Add forEach, processRelation function to MAL expression. Add expPrefix, initExp in MAL config. Add component ID(7015) for Python Bottle plugin. Remove legacy OAL percentile functions, p99, p95, p90, p75, p50 func(s). Revert #8066. Keep all metrics persistent even it is default value. Skip loading UI templates if folder is empty or doesn\u0026rsquo;t exist. Optimize ElasticSearch query performance by using _mGet and physical index name rather than alias in these scenarios, (a) Metrics aggregation (b) Zipkin query (c) Metrics query (d) Log query Support the NETWORK type of eBPF Profiling task. Support sumHistogram in MAL. [Breaking Change] Make the eBPF Profiling task support to the service instance level, index/table ebpf_profiling_task is required to be re-created when bump up from previous releases. Fix race condition in Banyandb storage Support SUM_PER_MIN downsampling in MAL. Support sumHistogramPercentile in MAL. Add VIRTUAL_CACHE to Layer, to fix conjectured Redis server, which icon can\u0026rsquo;t show on the topology. [Breaking Change] Elasticsearch storage merge all metrics/meter and records(without super datasets) indices into one physical index template metrics-all and records-all on the default setting. Provide system environment variable(SW_STORAGE_ES_LOGIC_SHARDING) to shard metrics/meter indices into multi-physical indices as the previous versions(one index template per metric/meter aggregation function). In the current one index mode, users still could choose to adjust ElasticSearch\u0026rsquo;s shard number(SW_STORAGE_ES_INDEX_SHARDS_NUMBER) to scale out. More details please refer to New ElasticSearch storage option explanation in 9.2.0 and backend-storage.md [Breaking Change] Index/table ebpf_profiling_schedule added a new column ebpf_profiling_schedule_id, the H2/Mysql/Tidb/Postgres storage users are required to re-created it when bump up from previous releases. Fix Zipkin trace query the max size of spans. Add tls and https component IDs for Network Profiling. Support Elasticsearch column alias for the compatibility between storage logicSharding model and no-logicSharding model. Support MySQL monitoring. Support PostgreSQL monitoring. Fix query services by serviceId error when Elasticsearch storage SW_STORAGE_ES_QUERY_MAX_SIZE \u0026gt; 10000. Support sending alarm messages to Discord. Fix query history process data failure. Optimize TTL mechanism for Elasticsearch storage, skip executed indices in one TTL rotation. Add Kubernetes support module to share codes between modules and reduce calls to Kubernetes API server. Bump up Kubernetes Java client to fix cve. Adapt OpenTelemetry native metrics protocol. [Breaking Change] rename configuration folder from otel-oc-rules to otel-rules. [Breaking Change] rename configuration field from enabledOcRules to enabledOtelRules and environment variable name from SW_OTEL_RECEIVER_ENABLED_OC_RULES to SW_OTEL_RECEIVER_ENABLED_OTEL_RULES. [Breaking Change] Fix JDBC TTL to delete additional tables data. SQL Database requires removing segment,segment_tag, logs, logs_tag, alarms, alarms_tag, zipkin_span, zipkin_query before OAP starts. SQL Database: add @SQLDatabase.ExtraColumn4AdditionalEntity to support add an extra column from parent to an additional table. Add component ID(131) for Java Micronaut plugin Add component ID(132) for Nats java client plugin  UI  Fix query conditions for the browser logs. Implement a url parameter to activate tab index. Fix clear interval fail when switch autoRefresh to off. Optimize log tables. Fix log detail pop-up page doesn\u0026rsquo;t work. Optimize table widget to hide the whole metric column when no metric is set. Implement the Event widget. Remove event menu. Fix span detail text overlap. Add Python Bottle Plugin Logo. Implement an association between widgets(line, bar, area graphs) with time. Fix tag dropdown style. Hide the copy button when db.statement is empty. Fix legend metrics for topology. Dashboard: Add metrics association. Dashboard: Fix FaaS-Root document link and topology service relation dashboard link. Dashboard: Fix Mesh-Instance metric Throughput. Dashboard: Fix Mesh-Service-Relation metric Throughput and Proxy Sidecar Internal Latency in Nanoseconds (Client Response). Dashboard: Fix Mesh-Instance-Relation metric Throughput. Enhance associations for the Event widget. Add event widgets in dashboard where applicable. Fix dashboard list search box not work. Fix short time range. Fix event widget incompatibility in Safari. Refactor the tags component to support searching for tag keys and values. Implement the log widget and the trace widget associate with each other, remove log tables on the trace widget. Add log widget to general service root. Associate the event widget with the trace and log widget. Add the MYSQL layer and update layer routers. Fix query order for trace list. Add a calculation to convert seconds to days. q* Add Spring Sleuth dashboard to general service instance. Support the process dashboard and create the time range text widget. Fix picking calendar with a wrong time range and setting a unique value for dashboard grid key. Add PostgreSQL to Database sub-menu. Implement the network profiling widget. Add Micronaut icon for Java plugin. Add Nats icon for Java plugin. Bump moment and @vue/cli-plugin-e2e-cypress. Add Network Profiling for Service Mesh DP instance and K8s pod panels.  Documentation  Fix invalid links in release docs. Clean up doc about event metrics. Add a table for metric calculations in the ui doc. Add an explanation for alerting kernel and its in-memory window mechanism. Add more docs for widget details. Update alarm doc introduce configuration property key Fix dependency license\u0026rsquo;s NOTICE and binary jar included issues in the source release. Add eBPF CPU profiling doc.  All issues and pull requests are here\n","excerpt":"9.2.0 Project  [Critical] Fix a low performance issue of metrics persistent in the ElasticSearch …","ref":"/docs/main/v9.5.0/en/changes/changes-9.2.0/","title":"9.2.0"},{"body":"9.2.0 Project  [Critical] Fix a low performance issue of metrics persistent in the ElasticSearch storage implementation. One single metric could have to wait for an unnecessary 7~10s(System Env Variable SW_STORAGE_ES_FLUSH_INTERVAL) since 8.8.0 - 9.1.0 releases. Upgrade Armeria to 1.16.0, Kubernetes Java client to 15.0.1.  OAP Server  Add more entities for Zipkin to improve performance. ElasticSearch: scroll id should be updated when scrolling as it may change. Mesh: fix only last rule works when multiple rules are defined in metadata-service-mapping.yaml. Support sending alarm messages to PagerDuty. Support Zipkin kafka collector. Add VIRTUAL detect type to Process for Network Profiling. Add component ID(128) for Java Hutool plugin. Add Zipkin query exception handler, response error message for illegal arguments. Fix a NullPointerException in the endpoint analysis, which would cause missing MQ-related LocalSpan in the trace. Add forEach, processRelation function to MAL expression. Add expPrefix, initExp in MAL config. Add component ID(7015) for Python Bottle plugin. Remove legacy OAL percentile functions, p99, p95, p90, p75, p50 func(s). Revert #8066. Keep all metrics persistent even it is default value. Skip loading UI templates if folder is empty or doesn\u0026rsquo;t exist. Optimize ElasticSearch query performance by using _mGet and physical index name rather than alias in these scenarios, (a) Metrics aggregation (b) Zipkin query (c) Metrics query (d) Log query Support the NETWORK type of eBPF Profiling task. Support sumHistogram in MAL. [Breaking Change] Make the eBPF Profiling task support to the service instance level, index/table ebpf_profiling_task is required to be re-created when bump up from previous releases. Fix race condition in Banyandb storage Support SUM_PER_MIN downsampling in MAL. Support sumHistogramPercentile in MAL. Add VIRTUAL_CACHE to Layer, to fix conjectured Redis server, which icon can\u0026rsquo;t show on the topology. [Breaking Change] Elasticsearch storage merge all metrics/meter and records(without super datasets) indices into one physical index template metrics-all and records-all on the default setting. Provide system environment variable(SW_STORAGE_ES_LOGIC_SHARDING) to shard metrics/meter indices into multi-physical indices as the previous versions(one index template per metric/meter aggregation function). In the current one index mode, users still could choose to adjust ElasticSearch\u0026rsquo;s shard number(SW_STORAGE_ES_INDEX_SHARDS_NUMBER) to scale out. More details please refer to New ElasticSearch storage option explanation in 9.2.0 and backend-storage.md [Breaking Change] Index/table ebpf_profiling_schedule added a new column ebpf_profiling_schedule_id, the H2/Mysql/Tidb/Postgres storage users are required to re-created it when bump up from previous releases. Fix Zipkin trace query the max size of spans. Add tls and https component IDs for Network Profiling. Support Elasticsearch column alias for the compatibility between storage logicSharding model and no-logicSharding model. Support MySQL monitoring. Support PostgreSQL monitoring. Fix query services by serviceId error when Elasticsearch storage SW_STORAGE_ES_QUERY_MAX_SIZE \u0026gt; 10000. Support sending alarm messages to Discord. Fix query history process data failure. Optimize TTL mechanism for Elasticsearch storage, skip executed indices in one TTL rotation. Add Kubernetes support module to share codes between modules and reduce calls to Kubernetes API server. Bump up Kubernetes Java client to fix cve. Adapt OpenTelemetry native metrics protocol. [Breaking Change] rename configuration folder from otel-oc-rules to otel-rules. [Breaking Change] rename configuration field from enabledOcRules to enabledOtelRules and environment variable name from SW_OTEL_RECEIVER_ENABLED_OC_RULES to SW_OTEL_RECEIVER_ENABLED_OTEL_RULES. [Breaking Change] Fix JDBC TTL to delete additional tables data. SQL Database requires removing segment,segment_tag, logs, logs_tag, alarms, alarms_tag, zipkin_span, zipkin_query before OAP starts. SQL Database: add @SQLDatabase.ExtraColumn4AdditionalEntity to support add an extra column from parent to an additional table. Add component ID(131) for Java Micronaut plugin Add component ID(132) for Nats java client plugin  UI  Fix query conditions for the browser logs. Implement a url parameter to activate tab index. Fix clear interval fail when switch autoRefresh to off. Optimize log tables. Fix log detail pop-up page doesn\u0026rsquo;t work. Optimize table widget to hide the whole metric column when no metric is set. Implement the Event widget. Remove event menu. Fix span detail text overlap. Add Python Bottle Plugin Logo. Implement an association between widgets(line, bar, area graphs) with time. Fix tag dropdown style. Hide the copy button when db.statement is empty. Fix legend metrics for topology. Dashboard: Add metrics association. Dashboard: Fix FaaS-Root document link and topology service relation dashboard link. Dashboard: Fix Mesh-Instance metric Throughput. Dashboard: Fix Mesh-Service-Relation metric Throughput and Proxy Sidecar Internal Latency in Nanoseconds (Client Response). Dashboard: Fix Mesh-Instance-Relation metric Throughput. Enhance associations for the Event widget. Add event widgets in dashboard where applicable. Fix dashboard list search box not work. Fix short time range. Fix event widget incompatibility in Safari. Refactor the tags component to support searching for tag keys and values. Implement the log widget and the trace widget associate with each other, remove log tables on the trace widget. Add log widget to general service root. Associate the event widget with the trace and log widget. Add the MYSQL layer and update layer routers. Fix query order for trace list. Add a calculation to convert seconds to days. q* Add Spring Sleuth dashboard to general service instance. Support the process dashboard and create the time range text widget. Fix picking calendar with a wrong time range and setting a unique value for dashboard grid key. Add PostgreSQL to Database sub-menu. Implement the network profiling widget. Add Micronaut icon for Java plugin. Add Nats icon for Java plugin. Bump moment and @vue/cli-plugin-e2e-cypress. Add Network Profiling for Service Mesh DP instance and K8s pod panels.  Documentation  Fix invalid links in release docs. Clean up doc about event metrics. Add a table for metric calculations in the ui doc. Add an explanation for alerting kernel and its in-memory window mechanism. Add more docs for widget details. Update alarm doc introduce configuration property key Fix dependency license\u0026rsquo;s NOTICE and binary jar included issues in the source release. Add eBPF CPU profiling doc.  All issues and pull requests are here\n","excerpt":"9.2.0 Project  [Critical] Fix a low performance issue of metrics persistent in the ElasticSearch …","ref":"/docs/main/v9.6.0/en/changes/changes-9.2.0/","title":"9.2.0"},{"body":"9.2.0 Project  [Critical] Fix a low performance issue of metrics persistent in the ElasticSearch storage implementation. One single metric could have to wait for an unnecessary 7~10s(System Env Variable SW_STORAGE_ES_FLUSH_INTERVAL) since 8.8.0 - 9.1.0 releases. Upgrade Armeria to 1.16.0, Kubernetes Java client to 15.0.1.  OAP Server  Add more entities for Zipkin to improve performance. ElasticSearch: scroll id should be updated when scrolling as it may change. Mesh: fix only last rule works when multiple rules are defined in metadata-service-mapping.yaml. Support sending alarm messages to PagerDuty. Support Zipkin kafka collector. Add VIRTUAL detect type to Process for Network Profiling. Add component ID(128) for Java Hutool plugin. Add Zipkin query exception handler, response error message for illegal arguments. Fix a NullPointerException in the endpoint analysis, which would cause missing MQ-related LocalSpan in the trace. Add forEach, processRelation function to MAL expression. Add expPrefix, initExp in MAL config. Add component ID(7015) for Python Bottle plugin. Remove legacy OAL percentile functions, p99, p95, p90, p75, p50 func(s). Revert #8066. Keep all metrics persistent even it is default value. Skip loading UI templates if folder is empty or doesn\u0026rsquo;t exist. Optimize ElasticSearch query performance by using _mGet and physical index name rather than alias in these scenarios, (a) Metrics aggregation (b) Zipkin query (c) Metrics query (d) Log query Support the NETWORK type of eBPF Profiling task. Support sumHistogram in MAL. [Breaking Change] Make the eBPF Profiling task support to the service instance level, index/table ebpf_profiling_task is required to be re-created when bump up from previous releases. Fix race condition in Banyandb storage Support SUM_PER_MIN downsampling in MAL. Support sumHistogramPercentile in MAL. Add VIRTUAL_CACHE to Layer, to fix conjectured Redis server, which icon can\u0026rsquo;t show on the topology. [Breaking Change] Elasticsearch storage merge all metrics/meter and records(without super datasets) indices into one physical index template metrics-all and records-all on the default setting. Provide system environment variable(SW_STORAGE_ES_LOGIC_SHARDING) to shard metrics/meter indices into multi-physical indices as the previous versions(one index template per metric/meter aggregation function). In the current one index mode, users still could choose to adjust ElasticSearch\u0026rsquo;s shard number(SW_STORAGE_ES_INDEX_SHARDS_NUMBER) to scale out. More details please refer to New ElasticSearch storage option explanation in 9.2.0 and backend-storage.md [Breaking Change] Index/table ebpf_profiling_schedule added a new column ebpf_profiling_schedule_id, the H2/Mysql/Tidb/Postgres storage users are required to re-created it when bump up from previous releases. Fix Zipkin trace query the max size of spans. Add tls and https component IDs for Network Profiling. Support Elasticsearch column alias for the compatibility between storage logicSharding model and no-logicSharding model. Support MySQL monitoring. Support PostgreSQL monitoring. Fix query services by serviceId error when Elasticsearch storage SW_STORAGE_ES_QUERY_MAX_SIZE \u0026gt; 10000. Support sending alarm messages to Discord. Fix query history process data failure. Optimize TTL mechanism for Elasticsearch storage, skip executed indices in one TTL rotation. Add Kubernetes support module to share codes between modules and reduce calls to Kubernetes API server. Bump up Kubernetes Java client to fix cve. Adapt OpenTelemetry native metrics protocol. [Breaking Change] rename configuration folder from otel-oc-rules to otel-rules. [Breaking Change] rename configuration field from enabledOcRules to enabledOtelRules and environment variable name from SW_OTEL_RECEIVER_ENABLED_OC_RULES to SW_OTEL_RECEIVER_ENABLED_OTEL_RULES. [Breaking Change] Fix JDBC TTL to delete additional tables data. SQL Database requires removing segment,segment_tag, logs, logs_tag, alarms, alarms_tag, zipkin_span, zipkin_query before OAP starts. SQL Database: add @SQLDatabase.ExtraColumn4AdditionalEntity to support add an extra column from parent to an additional table. Add component ID(131) for Java Micronaut plugin Add component ID(132) for Nats java client plugin  UI  Fix query conditions for the browser logs. Implement a url parameter to activate tab index. Fix clear interval fail when switch autoRefresh to off. Optimize log tables. Fix log detail pop-up page doesn\u0026rsquo;t work. Optimize table widget to hide the whole metric column when no metric is set. Implement the Event widget. Remove event menu. Fix span detail text overlap. Add Python Bottle Plugin Logo. Implement an association between widgets(line, bar, area graphs) with time. Fix tag dropdown style. Hide the copy button when db.statement is empty. Fix legend metrics for topology. Dashboard: Add metrics association. Dashboard: Fix FaaS-Root document link and topology service relation dashboard link. Dashboard: Fix Mesh-Instance metric Throughput. Dashboard: Fix Mesh-Service-Relation metric Throughput and Proxy Sidecar Internal Latency in Nanoseconds (Client Response). Dashboard: Fix Mesh-Instance-Relation metric Throughput. Enhance associations for the Event widget. Add event widgets in dashboard where applicable. Fix dashboard list search box not work. Fix short time range. Fix event widget incompatibility in Safari. Refactor the tags component to support searching for tag keys and values. Implement the log widget and the trace widget associate with each other, remove log tables on the trace widget. Add log widget to general service root. Associate the event widget with the trace and log widget. Add the MYSQL layer and update layer routers. Fix query order for trace list. Add a calculation to convert seconds to days. q* Add Spring Sleuth dashboard to general service instance. Support the process dashboard and create the time range text widget. Fix picking calendar with a wrong time range and setting a unique value for dashboard grid key. Add PostgreSQL to Database sub-menu. Implement the network profiling widget. Add Micronaut icon for Java plugin. Add Nats icon for Java plugin. Bump moment and @vue/cli-plugin-e2e-cypress. Add Network Profiling for Service Mesh DP instance and K8s pod panels.  Documentation  Fix invalid links in release docs. Clean up doc about event metrics. Add a table for metric calculations in the ui doc. Add an explanation for alerting kernel and its in-memory window mechanism. Add more docs for widget details. Update alarm doc introduce configuration property key Fix dependency license\u0026rsquo;s NOTICE and binary jar included issues in the source release. Add eBPF CPU profiling doc.  All issues and pull requests are here\n","excerpt":"9.2.0 Project  [Critical] Fix a low performance issue of metrics persistent in the ElasticSearch …","ref":"/docs/main/v9.7.0/en/changes/changes-9.2.0/","title":"9.2.0"},{"body":"9.3.0 Project  Bump up the embedded swctl version in OAP Docker image.  OAP Server  Add component ID(133) for impala JDBC Java agent plugin and component ID(134) for impala server. Use prepareStatement in H2SQLExecutor#getByIDs.(No function change). Bump up snakeyaml to 1.32 for fixing CVE. Fix DurationUtils.convertToTimeBucket missed verify date format. Enhance LAL to support converting LogData to DatabaseSlowStatement. [Breaking Change] Change the LAL script format(Add layer property). Adapt ElasticSearch 8.1+, migrate from removed APIs to recommended APIs. Support monitoring MySQL slow SQLs. Support analyzing cache related spans to provide metrics and slow commands for cache services from client side Optimize virtual database, fix dynamic config watcher NPE when default value is null Remove physical index existing check and keep template existing check only to avoid meaningless retry wait in no-init mode. Make sure instance list ordered in TTL processor to avoid TTL timer never runs. Support monitoring PostgreSQL slow SQLs. [Breaking Change] Support sharding MySQL database instances and tables by Shardingsphere-Proxy. SQL-Database requires removing tables log_tag/segment_tag/zipkin_query before OAP starts, if bump up from previous releases. Fix meter functions avgHistogram, avgHistogramPercentile, avgLabeled, sumHistogram having data conflict when downsampling. Do sorting readLabeledMetricsValues result forcedly in case the storage(database) doesn\u0026rsquo;t return data consistent with the parameter list. Fix the wrong watch semantics in Kubernetes watchers, which causes heavy traffic to API server in some Kubernetes clusters, we should use Get State and Start at Most Recent semantic instead of Start at Exact because we don\u0026rsquo;t need the changing history events, see https://kubernetes.io/docs/reference/using-api/api-concepts/#semantics-for-watch. Unify query services and DAOs codes time range condition to Duration. [Breaking Change]: Remove prometheus-fetcher plugin, please use OpenTelemetry to scrape Prometheus metrics and set up SkyWalking OpenTelemetry receiver instead. BugFix: histogram metrics sent to MAL should be treated as OpenTelemetry style, not Prometheus style: (-infinity, explicit_bounds[i]] for i == 0 (explicit_bounds[i-1], explicit_bounds[i]] for 0 \u0026lt; i \u0026lt; size(explicit_bounds) (explicit_bounds[i-1], +infinity) for i == size(explicit_bounds)  Support Golang runtime metrics analysis. Add APISIX metrics monitoring Support skywalking-client-js report empty service version and page path , set default version as latest and default page path as /(root). Fix the error fetching data (/browser_app_page_pv0) : Can't split endpoint id into 2 parts. [Breaking Change] Limit the max length of trace/log/alarm tag\u0026rsquo;s key=value, set the max length of column tags in tableslog_tag/segment_tag/alarm_record_tag and column query in zipkin_query and column tag_value in tag_autocomplete to 256. SQL-Database requires altering these columns' length or removing these tables before OAP starts, if bump up from previous releases. Optimize the creation conditions of profiling task. Lazy load the Kubernetes metadata and switch from event-driven to polling. Previously we set up watchers to watch the Kubernetes metadata changes, this is perfect when there are deployments changes and SkyWalking can react to the changes in real time. However when the cluster has many events (such as in large cluster or some special Kubernetes engine like OpenShift), the requests sent from SkyWalking becomes unpredictable, i.e. SkyWalking might send massive requests to Kubernetes API server, causing heavy load to the API server. This PR switches from the watcher mechanism to polling mechanism, SkyWalking polls the metadata in a specified interval, so that the requests sent to API server is predictable (~10 requests every interval, 3 minutes), and the requests count is constant regardless of the cluster\u0026rsquo;s changes. However with this change SkyWalking can\u0026rsquo;t react to the cluster changes in time, but the delay is acceptable in our case. Optimize the query time of tasks in ProfileTaskCache. Fix metrics was put into wrong slot of the window in the alerting kernel. Support sumPerMinLabeled in MAL. Bump up jackson databind, snakeyaml, grpc dependencies. Support export Trace and Log through Kafka. Add new config initialization mechanism of module provider. This is a ModuleManager lib kernel level change. [Breaking Change] Support new records query protocol, rename the column named service_id to entity_id for support difference entity. Please re-create top_n_database_statement index/table. Remove improper self-obs metrics in JvmMetricsHandler(for Kafka channel). gRPC stream canceling code is not logged as an error when the client cancels the stream. The client cancels the stream when the pod is terminated. [Breaking Change] Change the way of loading MAL rules(support pattern). Move k8s relative MAL files into /otel-rules/k8s. [Breaking Change] Refactor service mesh protobuf definitions and split TCP-related metrics to individual definition. Add TCP{Service,ServiceInstance,ServiceRelation,ServiceInstanceRelation} sources and split TCP-related entities out from original Service,ServiceInstance,ServiceRelation,ServiceInstanceRelation. [Breaking Change] TCP-related source names are changed, fields of TCP-related sources are changed, please refer to the latest oal/tcp.oal file. Do not log error logs when failed to create ElasticSearch index because the index is created already. Add virtual MQ analysis for native traces. Support Python runtime metrics analysis. Support sampledTrace in LAL. Support multiple rules with different names under the same layer of LAL script. (Optimization) Reduce the buffer size(queue) of MAL(only) metric streams. Set L1 queue size as 1/20, L2 queue size as 1/2. Support monitoring MySQL/PostgreSQL in the cluster mode. [Breaking Change] Migrate to BanyanDB v0.2.0.  Adopt new OR logical operator for,  MeasureIDs query BanyanDBProfileThreadSnapshotQueryDAO query Multiple Event conditions query Metrics query   Simplify Group check and creation Partially apply UITemplate changes Support index_only Return CompletableFuture\u0026lt;Void\u0026gt; directly from BanyanDB client Optimize data binary parse methods in *LogQueryDAO Support different indexType Support configuration for TTL and (block|segment) intervals   Elasticsearch storage: Provide system environment variable(SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS) and support specify the settings (number_of_shards/number_of_replicas) for each index individually. Elasticsearch storage: Support update index settings (number_of_shards/number_of_replicas) for the index template after rebooting. Optimize MQ Topology analysis. Use entry span\u0026rsquo;s peer from the consumer side as source service when no producer instrumentation(no cross-process reference). Refactor JDBC storage implementations to reuse logics. Fix ClassCastException in LoggingConfigWatcher. Support span attached event concept in Zipkin and SkyWalking trace query. Support span attached events on Zipkin lens UI. Force UTF-8 encoding in JsonLogHandler of kafka-fetcher-plugin. Fix max length to 512 of entity, instance and endpoint IDs in trace, log, profiling, topN tables(JDBC storages). The value was 200 by default. Add component IDs(135, 136, 137) for EventMesh server and client-side plugins. Bump up Kafka client to 2.8.1 to fix CVE-2021-38153. Remove lengthEnvVariable for Column as it never works as expected. Add LongText to support longer logs persistent as a text type in ElasticSearch, instead of a keyword, to avoid length limitation. Fix wrong system variable name SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI. It was opaenapi. Fix not-time-series model blocking OAP boots in no-init mode. Fix ShardingTopologyQueryDAO.loadServiceRelationsDetectedAtServerSide invoke backend miss parameter serviceIds. Changed system variable SW_SUPERDATASET_STORAGE_DAY_STEP to SW_STORAGE_ES_SUPER_DATASET_DAY_STEP to be consistent with other ES storage related variables. Fix ESEventQueryDAO missing metric_table boolQuery criteria. Add default entity name(_blank) if absent to avoid NPE in the decoding. This caused Can't split xxx id into 2 parts. Support dynamic config the sampling strategy in network profiling. Zipkin module support BanyanDB storage. Zipkin traces query API, sort the result set by start time by default. Enhance the cache mechanism in the metric persistent process.  This cache only worked when the metric is accessible(readable) from the database. Once the insert execution is delayed due to the scale, the cache loses efficacy. It only works for the last time update per minute, considering our 25s period. Fix ID conflicts for all JDBC storage implementations. Due to the insert delay, the JDBC storage implementation would still generate another new insert statement.   [Breaking Change] Remove core/default/enableDatabaseSession config. [Breaking Change] Add @BanyanDB.TimestampColumn to identify which column in Record is providing the timestamp(milliseconds) for BanyanDB, since BanyanDB stream requires a timestamp in milliseconds. For SQL-Database: add new column timestamp for tables profile_task_log/top_n_database_statement, requires altering this column or removing these tables before OAP starts, if bump up from previous releases. Fix Elasticsearch storage: In No-Sharding Mode, add specific analyzer to the template before index creation to avoid update index error. Internal API: remove undocumented ElasticSearch API usage and use documented one. Fix BanyanDB.ShardingKey annotation missed in the generated OAL metrics classes. Fix Elasticsearch storage: Query sortMetrics missing transform real index column name. Rename BanyanDB.ShardingKey to BanyanDB.SeriesID. Self-Observability: Add counters for metrics reading from DB or cached. Dashboard:Metrics Persistent Cache Count. Self-Observability: Fix GC Time calculation. Fix Elasticsearch storage: In No-Sharding Mode, column\u0026rsquo;s property indexOnly not applied and cannot be updated. Update the trace_id field as storage only(cannot be queried) in top_n_database_statement, top_n_cache_read_command, top_n_cache_read_command index.  UI  Fix: tab active incorrectly, when click tab space Add impala icon for impala JDBC Java agent plugin. (Webapp)Bump up snakeyaml to 1.31 for fixing CVE-2022-25857 [Breaking Change]: migrate from Spring Web to Armeria, now you should use the environment variable name SW_OAP_ADDRESS to change the OAP backend service addresses, like SW_OAP_ADDRESS=localhost:12800,localhost:12801, and use environment variable SW_SERVER_PORT to change the port. Other Spring-related configurations don\u0026rsquo;t take effect anymore. Polish the endpoint list graph. Fix styles for an adaptive height. Fix setting up a new time range after clicking the refresh button. Enhance the process topology graph to support dragging nodes. UI-template: Fix metrics calculation in general-service/mesh-service/faas-function top-list dashboard. Update MySQL dashboard to visualize collected slow SQLs. Add virtual cache dashboard. Remove responseCode fields of all OAL sources, as well as examples to avoid user\u0026rsquo;s confusion. Remove All from the endpoints selector. Enhance menu configurations to make it easier to change. Update PostgreSQL dashboard to visualize collected slow SQLs. Add Golang runtime metrics and cpu/memory used rate panels in General-Instance dashboard. Add gateway apisix menu. Query logs with the specific service ID. Bump d3-color from 3.0.1 to 3.1.0. Add Golang runtime metrics and cpu/memory used rate panels in FaaS-Instance dashboard. Revert logs on trace widget. Add a sub-menu for virtual mq. Add readRecords to metric types. Verify dashboard names for new dashboards. Associate metrics with the trace widget on dashboards. Fix configuration panel styles. Remove a un-use icon. Support labeled value on the service/instance/endpoint list widgets. Add menu for virtual MQ. Set selector props and update configuration panel styles. Add Python runtime metrics and cpu/memory utilization panels to General-Instance and Fass-Instance dashboards. Enhance the legend of metrics graph widget with the summary table. Add apache eventMesh logo file. Fix conditions for trace profiling. Fix tag keys list and duration condition. Fix typo. Fix condition logic for trace tree data. Enhance tags component to search tags with the input value. Fix topology loading style. Fix update metric processor for the readRecords and remove readSampledRecords from metrics selector. Add trace association for FAAS dashboards. Visualize attached events on the trace widget. Add HTTP/1.x metrics and HTTP req/resp body collecting tabs on the network profiling widget. Implement creating tasks ui for network profiling widget. Fix entity types for ProcessRelation. Add trace association for general service dashboards.  Documentation  Add metadata-uid setup doc about Kubernetes coordinator in the cluster management. Add a doc for adding menus to booster UI. Move general good read blogs from Agent Introduction to Academy. Add re-post for blog Scaling with Apache SkyWalking in the academy list. Add re-post for blog Diagnose Service Mesh Network Performance with eBPF in the academy list. Add Security Notice doc. Add new docs for Report Span Attached Events data collecting protocol. Add new docs for Record query protocol Update Server Agents and Compatibility for PHP agent. Add docs for profiling. Update the network profiling documentation.  All issues and pull requests are here\n","excerpt":"9.3.0 Project  Bump up the embedded swctl version in OAP Docker image.  OAP Server  Add component …","ref":"/docs/main/latest/en/changes/changes-9.3.0/","title":"9.3.0"},{"body":"9.3.0 Project  Bump up the embedded swctl version in OAP Docker image.  OAP Server  Add component ID(133) for impala JDBC Java agent plugin and component ID(134) for impala server. Use prepareStatement in H2SQLExecutor#getByIDs.(No function change). Bump up snakeyaml to 1.32 for fixing CVE. Fix DurationUtils.convertToTimeBucket missed verify date format. Enhance LAL to support converting LogData to DatabaseSlowStatement. [Breaking Change] Change the LAL script format(Add layer property). Adapt ElasticSearch 8.1+, migrate from removed APIs to recommended APIs. Support monitoring MySQL slow SQLs. Support analyzing cache related spans to provide metrics and slow commands for cache services from client side Optimize virtual database, fix dynamic config watcher NPE when default value is null Remove physical index existing check and keep template existing check only to avoid meaningless retry wait in no-init mode. Make sure instance list ordered in TTL processor to avoid TTL timer never runs. Support monitoring PostgreSQL slow SQLs. [Breaking Change] Support sharding MySQL database instances and tables by Shardingsphere-Proxy. SQL-Database requires removing tables log_tag/segment_tag/zipkin_query before OAP starts, if bump up from previous releases. Fix meter functions avgHistogram, avgHistogramPercentile, avgLabeled, sumHistogram having data conflict when downsampling. Do sorting readLabeledMetricsValues result forcedly in case the storage(database) doesn\u0026rsquo;t return data consistent with the parameter list. Fix the wrong watch semantics in Kubernetes watchers, which causes heavy traffic to API server in some Kubernetes clusters, we should use Get State and Start at Most Recent semantic instead of Start at Exact because we don\u0026rsquo;t need the changing history events, see https://kubernetes.io/docs/reference/using-api/api-concepts/#semantics-for-watch. Unify query services and DAOs codes time range condition to Duration. [Breaking Change]: Remove prometheus-fetcher plugin, please use OpenTelemetry to scrape Prometheus metrics and set up SkyWalking OpenTelemetry receiver instead. BugFix: histogram metrics sent to MAL should be treated as OpenTelemetry style, not Prometheus style: (-infinity, explicit_bounds[i]] for i == 0 (explicit_bounds[i-1], explicit_bounds[i]] for 0 \u0026lt; i \u0026lt; size(explicit_bounds) (explicit_bounds[i-1], +infinity) for i == size(explicit_bounds)  Support Golang runtime metrics analysis. Add APISIX metrics monitoring Support skywalking-client-js report empty service version and page path , set default version as latest and default page path as /(root). Fix the error fetching data (/browser_app_page_pv0) : Can't split endpoint id into 2 parts. [Breaking Change] Limit the max length of trace/log/alarm tag\u0026rsquo;s key=value, set the max length of column tags in tableslog_tag/segment_tag/alarm_record_tag and column query in zipkin_query and column tag_value in tag_autocomplete to 256. SQL-Database requires altering these columns' length or removing these tables before OAP starts, if bump up from previous releases. Optimize the creation conditions of profiling task. Lazy load the Kubernetes metadata and switch from event-driven to polling. Previously we set up watchers to watch the Kubernetes metadata changes, this is perfect when there are deployments changes and SkyWalking can react to the changes in real time. However when the cluster has many events (such as in large cluster or some special Kubernetes engine like OpenShift), the requests sent from SkyWalking becomes unpredictable, i.e. SkyWalking might send massive requests to Kubernetes API server, causing heavy load to the API server. This PR switches from the watcher mechanism to polling mechanism, SkyWalking polls the metadata in a specified interval, so that the requests sent to API server is predictable (~10 requests every interval, 3 minutes), and the requests count is constant regardless of the cluster\u0026rsquo;s changes. However with this change SkyWalking can\u0026rsquo;t react to the cluster changes in time, but the delay is acceptable in our case. Optimize the query time of tasks in ProfileTaskCache. Fix metrics was put into wrong slot of the window in the alerting kernel. Support sumPerMinLabeled in MAL. Bump up jackson databind, snakeyaml, grpc dependencies. Support export Trace and Log through Kafka. Add new config initialization mechanism of module provider. This is a ModuleManager lib kernel level change. [Breaking Change] Support new records query protocol, rename the column named service_id to entity_id for support difference entity. Please re-create top_n_database_statement index/table. Remove improper self-obs metrics in JvmMetricsHandler(for Kafka channel). gRPC stream canceling code is not logged as an error when the client cancels the stream. The client cancels the stream when the pod is terminated. [Breaking Change] Change the way of loading MAL rules(support pattern). Move k8s relative MAL files into /otel-rules/k8s. [Breaking Change] Refactor service mesh protobuf definitions and split TCP-related metrics to individual definition. Add TCP{Service,ServiceInstance,ServiceRelation,ServiceInstanceRelation} sources and split TCP-related entities out from original Service,ServiceInstance,ServiceRelation,ServiceInstanceRelation. [Breaking Change] TCP-related source names are changed, fields of TCP-related sources are changed, please refer to the latest oal/tcp.oal file. Do not log error logs when failed to create ElasticSearch index because the index is created already. Add virtual MQ analysis for native traces. Support Python runtime metrics analysis. Support sampledTrace in LAL. Support multiple rules with different names under the same layer of LAL script. (Optimization) Reduce the buffer size(queue) of MAL(only) metric streams. Set L1 queue size as 1/20, L2 queue size as 1/2. Support monitoring MySQL/PostgreSQL in the cluster mode. [Breaking Change] Migrate to BanyanDB v0.2.0.  Adopt new OR logical operator for,  MeasureIDs query BanyanDBProfileThreadSnapshotQueryDAO query Multiple Event conditions query Metrics query   Simplify Group check and creation Partially apply UITemplate changes Support index_only Return CompletableFuture\u0026lt;Void\u0026gt; directly from BanyanDB client Optimize data binary parse methods in *LogQueryDAO Support different indexType Support configuration for TTL and (block|segment) intervals   Elasticsearch storage: Provide system environment variable(SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS) and support specify the settings (number_of_shards/number_of_replicas) for each index individually. Elasticsearch storage: Support update index settings (number_of_shards/number_of_replicas) for the index template after rebooting. Optimize MQ Topology analysis. Use entry span\u0026rsquo;s peer from the consumer side as source service when no producer instrumentation(no cross-process reference). Refactor JDBC storage implementations to reuse logics. Fix ClassCastException in LoggingConfigWatcher. Support span attached event concept in Zipkin and SkyWalking trace query. Support span attached events on Zipkin lens UI. Force UTF-8 encoding in JsonLogHandler of kafka-fetcher-plugin. Fix max length to 512 of entity, instance and endpoint IDs in trace, log, profiling, topN tables(JDBC storages). The value was 200 by default. Add component IDs(135, 136, 137) for EventMesh server and client-side plugins. Bump up Kafka client to 2.8.1 to fix CVE-2021-38153. Remove lengthEnvVariable for Column as it never works as expected. Add LongText to support longer logs persistent as a text type in ElasticSearch, instead of a keyword, to avoid length limitation. Fix wrong system variable name SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI. It was opaenapi. Fix not-time-series model blocking OAP boots in no-init mode. Fix ShardingTopologyQueryDAO.loadServiceRelationsDetectedAtServerSide invoke backend miss parameter serviceIds. Changed system variable SW_SUPERDATASET_STORAGE_DAY_STEP to SW_STORAGE_ES_SUPER_DATASET_DAY_STEP to be consistent with other ES storage related variables. Fix ESEventQueryDAO missing metric_table boolQuery criteria. Add default entity name(_blank) if absent to avoid NPE in the decoding. This caused Can't split xxx id into 2 parts. Support dynamic config the sampling strategy in network profiling. Zipkin module support BanyanDB storage. Zipkin traces query API, sort the result set by start time by default. Enhance the cache mechanism in the metric persistent process.  This cache only worked when the metric is accessible(readable) from the database. Once the insert execution is delayed due to the scale, the cache loses efficacy. It only works for the last time update per minute, considering our 25s period. Fix ID conflicts for all JDBC storage implementations. Due to the insert delay, the JDBC storage implementation would still generate another new insert statement.   [Breaking Change] Remove core/default/enableDatabaseSession config. [Breaking Change] Add @BanyanDB.TimestampColumn to identify which column in Record is providing the timestamp(milliseconds) for BanyanDB, since BanyanDB stream requires a timestamp in milliseconds. For SQL-Database: add new column timestamp for tables profile_task_log/top_n_database_statement, requires altering this column or removing these tables before OAP starts, if bump up from previous releases. Fix Elasticsearch storage: In No-Sharding Mode, add specific analyzer to the template before index creation to avoid update index error. Internal API: remove undocumented ElasticSearch API usage and use documented one. Fix BanyanDB.ShardingKey annotation missed in the generated OAL metrics classes. Fix Elasticsearch storage: Query sortMetrics missing transform real index column name. Rename BanyanDB.ShardingKey to BanyanDB.SeriesID. Self-Observability: Add counters for metrics reading from DB or cached. Dashboard:Metrics Persistent Cache Count. Self-Observability: Fix GC Time calculation. Fix Elasticsearch storage: In No-Sharding Mode, column\u0026rsquo;s property indexOnly not applied and cannot be updated. Update the trace_id field as storage only(cannot be queried) in top_n_database_statement, top_n_cache_read_command, top_n_cache_read_command index.  UI  Fix: tab active incorrectly, when click tab space Add impala icon for impala JDBC Java agent plugin. (Webapp)Bump up snakeyaml to 1.31 for fixing CVE-2022-25857 [Breaking Change]: migrate from Spring Web to Armeria, now you should use the environment variable name SW_OAP_ADDRESS to change the OAP backend service addresses, like SW_OAP_ADDRESS=localhost:12800,localhost:12801, and use environment variable SW_SERVER_PORT to change the port. Other Spring-related configurations don\u0026rsquo;t take effect anymore. Polish the endpoint list graph. Fix styles for an adaptive height. Fix setting up a new time range after clicking the refresh button. Enhance the process topology graph to support dragging nodes. UI-template: Fix metrics calculation in general-service/mesh-service/faas-function top-list dashboard. Update MySQL dashboard to visualize collected slow SQLs. Add virtual cache dashboard. Remove responseCode fields of all OAL sources, as well as examples to avoid user\u0026rsquo;s confusion. Remove All from the endpoints selector. Enhance menu configurations to make it easier to change. Update PostgreSQL dashboard to visualize collected slow SQLs. Add Golang runtime metrics and cpu/memory used rate panels in General-Instance dashboard. Add gateway apisix menu. Query logs with the specific service ID. Bump d3-color from 3.0.1 to 3.1.0. Add Golang runtime metrics and cpu/memory used rate panels in FaaS-Instance dashboard. Revert logs on trace widget. Add a sub-menu for virtual mq. Add readRecords to metric types. Verify dashboard names for new dashboards. Associate metrics with the trace widget on dashboards. Fix configuration panel styles. Remove a un-use icon. Support labeled value on the service/instance/endpoint list widgets. Add menu for virtual MQ. Set selector props and update configuration panel styles. Add Python runtime metrics and cpu/memory utilization panels to General-Instance and Fass-Instance dashboards. Enhance the legend of metrics graph widget with the summary table. Add apache eventMesh logo file. Fix conditions for trace profiling. Fix tag keys list and duration condition. Fix typo. Fix condition logic for trace tree data. Enhance tags component to search tags with the input value. Fix topology loading style. Fix update metric processor for the readRecords and remove readSampledRecords from metrics selector. Add trace association for FAAS dashboards. Visualize attached events on the trace widget. Add HTTP/1.x metrics and HTTP req/resp body collecting tabs on the network profiling widget. Implement creating tasks ui for network profiling widget. Fix entity types for ProcessRelation. Add trace association for general service dashboards.  Documentation  Add metadata-uid setup doc about Kubernetes coordinator in the cluster management. Add a doc for adding menus to booster UI. Move general good read blogs from Agent Introduction to Academy. Add re-post for blog Scaling with Apache SkyWalking in the academy list. Add re-post for blog Diagnose Service Mesh Network Performance with eBPF in the academy list. Add Security Notice doc. Add new docs for Report Span Attached Events data collecting protocol. Add new docs for Record query protocol Update Server Agents and Compatibility for PHP agent. Add docs for profiling. Update the network profiling documentation.  All issues and pull requests are here\n","excerpt":"9.3.0 Project  Bump up the embedded swctl version in OAP Docker image.  OAP Server  Add component …","ref":"/docs/main/next/en/changes/changes-9.3.0/","title":"9.3.0"},{"body":"9.3.0 Project  Bump up the embedded swctl version in OAP Docker image.  OAP Server  Add component ID(133) for impala JDBC Java agent plugin and component ID(134) for impala server. Use prepareStatement in H2SQLExecutor#getByIDs.(No function change). Bump up snakeyaml to 1.32 for fixing CVE. Fix DurationUtils.convertToTimeBucket missed verify date format. Enhance LAL to support converting LogData to DatabaseSlowStatement. [Breaking Change] Change the LAL script format(Add layer property). Adapt ElasticSearch 8.1+, migrate from removed APIs to recommended APIs. Support monitoring MySQL slow SQLs. Support analyzing cache related spans to provide metrics and slow commands for cache services from client side Optimize virtual database, fix dynamic config watcher NPE when default value is null Remove physical index existing check and keep template existing check only to avoid meaningless retry wait in no-init mode. Make sure instance list ordered in TTL processor to avoid TTL timer never runs. Support monitoring PostgreSQL slow SQLs. [Breaking Change] Support sharding MySQL database instances and tables by Shardingsphere-Proxy. SQL-Database requires removing tables log_tag/segment_tag/zipkin_query before OAP starts, if bump up from previous releases. Fix meter functions avgHistogram, avgHistogramPercentile, avgLabeled, sumHistogram having data conflict when downsampling. Do sorting readLabeledMetricsValues result forcedly in case the storage(database) doesn\u0026rsquo;t return data consistent with the parameter list. Fix the wrong watch semantics in Kubernetes watchers, which causes heavy traffic to API server in some Kubernetes clusters, we should use Get State and Start at Most Recent semantic instead of Start at Exact because we don\u0026rsquo;t need the changing history events, see https://kubernetes.io/docs/reference/using-api/api-concepts/#semantics-for-watch. Unify query services and DAOs codes time range condition to Duration. [Breaking Change]: Remove prometheus-fetcher plugin, please use OpenTelemetry to scrape Prometheus metrics and set up SkyWalking OpenTelemetry receiver instead. BugFix: histogram metrics sent to MAL should be treated as OpenTelemetry style, not Prometheus style: (-infinity, explicit_bounds[i]] for i == 0 (explicit_bounds[i-1], explicit_bounds[i]] for 0 \u0026lt; i \u0026lt; size(explicit_bounds) (explicit_bounds[i-1], +infinity) for i == size(explicit_bounds)  Support Golang runtime metrics analysis. Add APISIX metrics monitoring Support skywalking-client-js report empty service version and page path , set default version as latest and default page path as /(root). Fix the error fetching data (/browser_app_page_pv0) : Can't split endpoint id into 2 parts. [Breaking Change] Limit the max length of trace/log/alarm tag\u0026rsquo;s key=value, set the max length of column tags in tableslog_tag/segment_tag/alarm_record_tag and column query in zipkin_query and column tag_value in tag_autocomplete to 256. SQL-Database requires altering these columns' length or removing these tables before OAP starts, if bump up from previous releases. Optimize the creation conditions of profiling task. Lazy load the Kubernetes metadata and switch from event-driven to polling. Previously we set up watchers to watch the Kubernetes metadata changes, this is perfect when there are deployments changes and SkyWalking can react to the changes in real time. However when the cluster has many events (such as in large cluster or some special Kubernetes engine like OpenShift), the requests sent from SkyWalking becomes unpredictable, i.e. SkyWalking might send massive requests to Kubernetes API server, causing heavy load to the API server. This PR switches from the watcher mechanism to polling mechanism, SkyWalking polls the metadata in a specified interval, so that the requests sent to API server is predictable (~10 requests every interval, 3 minutes), and the requests count is constant regardless of the cluster\u0026rsquo;s changes. However with this change SkyWalking can\u0026rsquo;t react to the cluster changes in time, but the delay is acceptable in our case. Optimize the query time of tasks in ProfileTaskCache. Fix metrics was put into wrong slot of the window in the alerting kernel. Support sumPerMinLabeled in MAL. Bump up jackson databind, snakeyaml, grpc dependencies. Support export Trace and Log through Kafka. Add new config initialization mechanism of module provider. This is a ModuleManager lib kernel level change. [Breaking Change] Support new records query protocol, rename the column named service_id to entity_id for support difference entity. Please re-create top_n_database_statement index/table. Remove improper self-obs metrics in JvmMetricsHandler(for Kafka channel). gRPC stream canceling code is not logged as an error when the client cancels the stream. The client cancels the stream when the pod is terminated. [Breaking Change] Change the way of loading MAL rules(support pattern). Move k8s relative MAL files into /otel-rules/k8s. [Breaking Change] Refactor service mesh protobuf definitions and split TCP-related metrics to individual definition. Add TCP{Service,ServiceInstance,ServiceRelation,ServiceInstanceRelation} sources and split TCP-related entities out from original Service,ServiceInstance,ServiceRelation,ServiceInstanceRelation. [Breaking Change] TCP-related source names are changed, fields of TCP-related sources are changed, please refer to the latest oal/tcp.oal file. Do not log error logs when failed to create ElasticSearch index because the index is created already. Add virtual MQ analysis for native traces. Support Python runtime metrics analysis. Support sampledTrace in LAL. Support multiple rules with different names under the same layer of LAL script. (Optimization) Reduce the buffer size(queue) of MAL(only) metric streams. Set L1 queue size as 1/20, L2 queue size as 1/2. Support monitoring MySQL/PostgreSQL in the cluster mode. [Breaking Change] Migrate to BanyanDB v0.2.0.  Adopt new OR logical operator for,  MeasureIDs query BanyanDBProfileThreadSnapshotQueryDAO query Multiple Event conditions query Metrics query   Simplify Group check and creation Partially apply UITemplate changes Support index_only Return CompletableFuture\u0026lt;Void\u0026gt; directly from BanyanDB client Optimize data binary parse methods in *LogQueryDAO Support different indexType Support configuration for TTL and (block|segment) intervals   Elasticsearch storage: Provide system environment variable(SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS) and support specify the settings (number_of_shards/number_of_replicas) for each index individually. Elasticsearch storage: Support update index settings (number_of_shards/number_of_replicas) for the index template after rebooting. Optimize MQ Topology analysis. Use entry span\u0026rsquo;s peer from the consumer side as source service when no producer instrumentation(no cross-process reference). Refactor JDBC storage implementations to reuse logics. Fix ClassCastException in LoggingConfigWatcher. Support span attached event concept in Zipkin and SkyWalking trace query. Support span attached events on Zipkin lens UI. Force UTF-8 encoding in JsonLogHandler of kafka-fetcher-plugin. Fix max length to 512 of entity, instance and endpoint IDs in trace, log, profiling, topN tables(JDBC storages). The value was 200 by default. Add component IDs(135, 136, 137) for EventMesh server and client-side plugins. Bump up Kafka client to 2.8.1 to fix CVE-2021-38153. Remove lengthEnvVariable for Column as it never works as expected. Add LongText to support longer logs persistent as a text type in ElasticSearch, instead of a keyword, to avoid length limitation. Fix wrong system variable name SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI. It was opaenapi. Fix not-time-series model blocking OAP boots in no-init mode. Fix ShardingTopologyQueryDAO.loadServiceRelationsDetectedAtServerSide invoke backend miss parameter serviceIds. Changed system variable SW_SUPERDATASET_STORAGE_DAY_STEP to SW_STORAGE_ES_SUPER_DATASET_DAY_STEP to be consistent with other ES storage related variables. Fix ESEventQueryDAO missing metric_table boolQuery criteria. Add default entity name(_blank) if absent to avoid NPE in the decoding. This caused Can't split xxx id into 2 parts. Support dynamic config the sampling strategy in network profiling. Zipkin module support BanyanDB storage. Zipkin traces query API, sort the result set by start time by default. Enhance the cache mechanism in the metric persistent process.  This cache only worked when the metric is accessible(readable) from the database. Once the insert execution is delayed due to the scale, the cache loses efficacy. It only works for the last time update per minute, considering our 25s period. Fix ID conflicts for all JDBC storage implementations. Due to the insert delay, the JDBC storage implementation would still generate another new insert statement.   [Breaking Change] Remove core/default/enableDatabaseSession config. [Breaking Change] Add @BanyanDB.TimestampColumn to identify which column in Record is providing the timestamp(milliseconds) for BanyanDB, since BanyanDB stream requires a timestamp in milliseconds. For SQL-Database: add new column timestamp for tables profile_task_log/top_n_database_statement, requires altering this column or removing these tables before OAP starts, if bump up from previous releases. Fix Elasticsearch storage: In No-Sharding Mode, add specific analyzer to the template before index creation to avoid update index error. Internal API: remove undocumented ElasticSearch API usage and use documented one. Fix BanyanDB.ShardingKey annotation missed in the generated OAL metrics classes. Fix Elasticsearch storage: Query sortMetrics missing transform real index column name. Rename BanyanDB.ShardingKey to BanyanDB.SeriesID. Self-Observability: Add counters for metrics reading from DB or cached. Dashboard:Metrics Persistent Cache Count. Self-Observability: Fix GC Time calculation. Fix Elasticsearch storage: In No-Sharding Mode, column\u0026rsquo;s property indexOnly not applied and cannot be updated. Update the trace_id field as storage only(cannot be queried) in top_n_database_statement, top_n_cache_read_command, top_n_cache_read_command index.  UI  Fix: tab active incorrectly, when click tab space Add impala icon for impala JDBC Java agent plugin. (Webapp)Bump up snakeyaml to 1.31 for fixing CVE-2022-25857 [Breaking Change]: migrate from Spring Web to Armeria, now you should use the environment variable name SW_OAP_ADDRESS to change the OAP backend service addresses, like SW_OAP_ADDRESS=localhost:12800,localhost:12801, and use environment variable SW_SERVER_PORT to change the port. Other Spring-related configurations don\u0026rsquo;t take effect anymore. Polish the endpoint list graph. Fix styles for an adaptive height. Fix setting up a new time range after clicking the refresh button. Enhance the process topology graph to support dragging nodes. UI-template: Fix metrics calculation in general-service/mesh-service/faas-function top-list dashboard. Update MySQL dashboard to visualize collected slow SQLs. Add virtual cache dashboard. Remove responseCode fields of all OAL sources, as well as examples to avoid user\u0026rsquo;s confusion. Remove All from the endpoints selector. Enhance menu configurations to make it easier to change. Update PostgreSQL dashboard to visualize collected slow SQLs. Add Golang runtime metrics and cpu/memory used rate panels in General-Instance dashboard. Add gateway apisix menu. Query logs with the specific service ID. Bump d3-color from 3.0.1 to 3.1.0. Add Golang runtime metrics and cpu/memory used rate panels in FaaS-Instance dashboard. Revert logs on trace widget. Add a sub-menu for virtual mq. Add readRecords to metric types. Verify dashboard names for new dashboards. Associate metrics with the trace widget on dashboards. Fix configuration panel styles. Remove a un-use icon. Support labeled value on the service/instance/endpoint list widgets. Add menu for virtual MQ. Set selector props and update configuration panel styles. Add Python runtime metrics and cpu/memory utilization panels to General-Instance and Fass-Instance dashboards. Enhance the legend of metrics graph widget with the summary table. Add apache eventMesh logo file. Fix conditions for trace profiling. Fix tag keys list and duration condition. Fix typo. Fix condition logic for trace tree data. Enhance tags component to search tags with the input value. Fix topology loading style. Fix update metric processor for the readRecords and remove readSampledRecords from metrics selector. Add trace association for FAAS dashboards. Visualize attached events on the trace widget. Add HTTP/1.x metrics and HTTP req/resp body collecting tabs on the network profiling widget. Implement creating tasks ui for network profiling widget. Fix entity types for ProcessRelation. Add trace association for general service dashboards.  Documentation  Add metadata-uid setup doc about Kubernetes coordinator in the cluster management. Add a doc for adding menus to booster UI. Move general good read blogs from Agent Introduction to Academy. Add re-post for blog Scaling with Apache SkyWalking in the academy list. Add re-post for blog Diagnose Service Mesh Network Performance with eBPF in the academy list. Add Security Notice doc. Add new docs for Report Span Attached Events data collecting protocol. Add new docs for Record query protocol Update Server Agents and Compatibility for PHP agent. Add docs for profiling. Update the network profiling documentation.  All issues and pull requests are here\n","excerpt":"9.3.0 Project  Bump up the embedded swctl version in OAP Docker image.  OAP Server  Add component …","ref":"/docs/main/v9.3.0/en/changes/changes/","title":"9.3.0"},{"body":"9.3.0 Project  Bump up the embedded swctl version in OAP Docker image.  OAP Server  Add component ID(133) for impala JDBC Java agent plugin and component ID(134) for impala server. Use prepareStatement in H2SQLExecutor#getByIDs.(No function change). Bump up snakeyaml to 1.32 for fixing CVE. Fix DurationUtils.convertToTimeBucket missed verify date format. Enhance LAL to support converting LogData to DatabaseSlowStatement. [Breaking Change] Change the LAL script format(Add layer property). Adapt ElasticSearch 8.1+, migrate from removed APIs to recommended APIs. Support monitoring MySQL slow SQLs. Support analyzing cache related spans to provide metrics and slow commands for cache services from client side Optimize virtual database, fix dynamic config watcher NPE when default value is null Remove physical index existing check and keep template existing check only to avoid meaningless retry wait in no-init mode. Make sure instance list ordered in TTL processor to avoid TTL timer never runs. Support monitoring PostgreSQL slow SQLs. [Breaking Change] Support sharding MySQL database instances and tables by Shardingsphere-Proxy. SQL-Database requires removing tables log_tag/segment_tag/zipkin_query before OAP starts, if bump up from previous releases. Fix meter functions avgHistogram, avgHistogramPercentile, avgLabeled, sumHistogram having data conflict when downsampling. Do sorting readLabeledMetricsValues result forcedly in case the storage(database) doesn\u0026rsquo;t return data consistent with the parameter list. Fix the wrong watch semantics in Kubernetes watchers, which causes heavy traffic to API server in some Kubernetes clusters, we should use Get State and Start at Most Recent semantic instead of Start at Exact because we don\u0026rsquo;t need the changing history events, see https://kubernetes.io/docs/reference/using-api/api-concepts/#semantics-for-watch. Unify query services and DAOs codes time range condition to Duration. [Breaking Change]: Remove prometheus-fetcher plugin, please use OpenTelemetry to scrape Prometheus metrics and set up SkyWalking OpenTelemetry receiver instead. BugFix: histogram metrics sent to MAL should be treated as OpenTelemetry style, not Prometheus style: (-infinity, explicit_bounds[i]] for i == 0 (explicit_bounds[i-1], explicit_bounds[i]] for 0 \u0026lt; i \u0026lt; size(explicit_bounds) (explicit_bounds[i-1], +infinity) for i == size(explicit_bounds)  Support Golang runtime metrics analysis. Add APISIX metrics monitoring Support skywalking-client-js report empty service version and page path , set default version as latest and default page path as /(root). Fix the error fetching data (/browser_app_page_pv0) : Can't split endpoint id into 2 parts. [Breaking Change] Limit the max length of trace/log/alarm tag\u0026rsquo;s key=value, set the max length of column tags in tableslog_tag/segment_tag/alarm_record_tag and column query in zipkin_query and column tag_value in tag_autocomplete to 256. SQL-Database requires altering these columns' length or removing these tables before OAP starts, if bump up from previous releases. Optimize the creation conditions of profiling task. Lazy load the Kubernetes metadata and switch from event-driven to polling. Previously we set up watchers to watch the Kubernetes metadata changes, this is perfect when there are deployments changes and SkyWalking can react to the changes in real time. However when the cluster has many events (such as in large cluster or some special Kubernetes engine like OpenShift), the requests sent from SkyWalking becomes unpredictable, i.e. SkyWalking might send massive requests to Kubernetes API server, causing heavy load to the API server. This PR switches from the watcher mechanism to polling mechanism, SkyWalking polls the metadata in a specified interval, so that the requests sent to API server is predictable (~10 requests every interval, 3 minutes), and the requests count is constant regardless of the cluster\u0026rsquo;s changes. However with this change SkyWalking can\u0026rsquo;t react to the cluster changes in time, but the delay is acceptable in our case. Optimize the query time of tasks in ProfileTaskCache. Fix metrics was put into wrong slot of the window in the alerting kernel. Support sumPerMinLabeled in MAL. Bump up jackson databind, snakeyaml, grpc dependencies. Support export Trace and Log through Kafka. Add new config initialization mechanism of module provider. This is a ModuleManager lib kernel level change. [Breaking Change] Support new records query protocol, rename the column named service_id to entity_id for support difference entity. Please re-create top_n_database_statement index/table. Remove improper self-obs metrics in JvmMetricsHandler(for Kafka channel). gRPC stream canceling code is not logged as an error when the client cancels the stream. The client cancels the stream when the pod is terminated. [Breaking Change] Change the way of loading MAL rules(support pattern). Move k8s relative MAL files into /otel-rules/k8s. [Breaking Change] Refactor service mesh protobuf definitions and split TCP-related metrics to individual definition. Add TCP{Service,ServiceInstance,ServiceRelation,ServiceInstanceRelation} sources and split TCP-related entities out from original Service,ServiceInstance,ServiceRelation,ServiceInstanceRelation. [Breaking Change] TCP-related source names are changed, fields of TCP-related sources are changed, please refer to the latest oal/tcp.oal file. Do not log error logs when failed to create ElasticSearch index because the index is created already. Add virtual MQ analysis for native traces. Support Python runtime metrics analysis. Support sampledTrace in LAL. Support multiple rules with different names under the same layer of LAL script. (Optimization) Reduce the buffer size(queue) of MAL(only) metric streams. Set L1 queue size as 1/20, L2 queue size as 1/2. Support monitoring MySQL/PostgreSQL in the cluster mode. [Breaking Change] Migrate to BanyanDB v0.2.0.  Adopt new OR logical operator for,  MeasureIDs query BanyanDBProfileThreadSnapshotQueryDAO query Multiple Event conditions query Metrics query   Simplify Group check and creation Partially apply UITemplate changes Support index_only Return CompletableFuture\u0026lt;Void\u0026gt; directly from BanyanDB client Optimize data binary parse methods in *LogQueryDAO Support different indexType Support configuration for TTL and (block|segment) intervals   Elasticsearch storage: Provide system environment variable(SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS) and support specify the settings (number_of_shards/number_of_replicas) for each index individually. Elasticsearch storage: Support update index settings (number_of_shards/number_of_replicas) for the index template after rebooting. Optimize MQ Topology analysis. Use entry span\u0026rsquo;s peer from the consumer side as source service when no producer instrumentation(no cross-process reference). Refactor JDBC storage implementations to reuse logics. Fix ClassCastException in LoggingConfigWatcher. Support span attached event concept in Zipkin and SkyWalking trace query. Support span attached events on Zipkin lens UI. Force UTF-8 encoding in JsonLogHandler of kafka-fetcher-plugin. Fix max length to 512 of entity, instance and endpoint IDs in trace, log, profiling, topN tables(JDBC storages). The value was 200 by default. Add component IDs(135, 136, 137) for EventMesh server and client-side plugins. Bump up Kafka client to 2.8.1 to fix CVE-2021-38153. Remove lengthEnvVariable for Column as it never works as expected. Add LongText to support longer logs persistent as a text type in ElasticSearch, instead of a keyword, to avoid length limitation. Fix wrong system variable name SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI. It was opaenapi. Fix not-time-series model blocking OAP boots in no-init mode. Fix ShardingTopologyQueryDAO.loadServiceRelationsDetectedAtServerSide invoke backend miss parameter serviceIds. Changed system variable SW_SUPERDATASET_STORAGE_DAY_STEP to SW_STORAGE_ES_SUPER_DATASET_DAY_STEP to be consistent with other ES storage related variables. Fix ESEventQueryDAO missing metric_table boolQuery criteria. Add default entity name(_blank) if absent to avoid NPE in the decoding. This caused Can't split xxx id into 2 parts. Support dynamic config the sampling strategy in network profiling. Zipkin module support BanyanDB storage. Zipkin traces query API, sort the result set by start time by default. Enhance the cache mechanism in the metric persistent process.  This cache only worked when the metric is accessible(readable) from the database. Once the insert execution is delayed due to the scale, the cache loses efficacy. It only works for the last time update per minute, considering our 25s period. Fix ID conflicts for all JDBC storage implementations. Due to the insert delay, the JDBC storage implementation would still generate another new insert statement.   [Breaking Change] Remove core/default/enableDatabaseSession config. [Breaking Change] Add @BanyanDB.TimestampColumn to identify which column in Record is providing the timestamp(milliseconds) for BanyanDB, since BanyanDB stream requires a timestamp in milliseconds. For SQL-Database: add new column timestamp for tables profile_task_log/top_n_database_statement, requires altering this column or removing these tables before OAP starts, if bump up from previous releases. Fix Elasticsearch storage: In No-Sharding Mode, add specific analyzer to the template before index creation to avoid update index error. Internal API: remove undocumented ElasticSearch API usage and use documented one. Fix BanyanDB.ShardingKey annotation missed in the generated OAL metrics classes. Fix Elasticsearch storage: Query sortMetrics missing transform real index column name. Rename BanyanDB.ShardingKey to BanyanDB.SeriesID. Self-Observability: Add counters for metrics reading from DB or cached. Dashboard:Metrics Persistent Cache Count. Self-Observability: Fix GC Time calculation. Fix Elasticsearch storage: In No-Sharding Mode, column\u0026rsquo;s property indexOnly not applied and cannot be updated. Update the trace_id field as storage only(cannot be queried) in top_n_database_statement, top_n_cache_read_command, top_n_cache_read_command index.  UI  Fix: tab active incorrectly, when click tab space Add impala icon for impala JDBC Java agent plugin. (Webapp)Bump up snakeyaml to 1.31 for fixing CVE-2022-25857 [Breaking Change]: migrate from Spring Web to Armeria, now you should use the environment variable name SW_OAP_ADDRESS to change the OAP backend service addresses, like SW_OAP_ADDRESS=localhost:12800,localhost:12801, and use environment variable SW_SERVER_PORT to change the port. Other Spring-related configurations don\u0026rsquo;t take effect anymore. Polish the endpoint list graph. Fix styles for an adaptive height. Fix setting up a new time range after clicking the refresh button. Enhance the process topology graph to support dragging nodes. UI-template: Fix metrics calculation in general-service/mesh-service/faas-function top-list dashboard. Update MySQL dashboard to visualize collected slow SQLs. Add virtual cache dashboard. Remove responseCode fields of all OAL sources, as well as examples to avoid user\u0026rsquo;s confusion. Remove All from the endpoints selector. Enhance menu configurations to make it easier to change. Update PostgreSQL dashboard to visualize collected slow SQLs. Add Golang runtime metrics and cpu/memory used rate panels in General-Instance dashboard. Add gateway apisix menu. Query logs with the specific service ID. Bump d3-color from 3.0.1 to 3.1.0. Add Golang runtime metrics and cpu/memory used rate panels in FaaS-Instance dashboard. Revert logs on trace widget. Add a sub-menu for virtual mq. Add readRecords to metric types. Verify dashboard names for new dashboards. Associate metrics with the trace widget on dashboards. Fix configuration panel styles. Remove a un-use icon. Support labeled value on the service/instance/endpoint list widgets. Add menu for virtual MQ. Set selector props and update configuration panel styles. Add Python runtime metrics and cpu/memory utilization panels to General-Instance and Fass-Instance dashboards. Enhance the legend of metrics graph widget with the summary table. Add apache eventMesh logo file. Fix conditions for trace profiling. Fix tag keys list and duration condition. Fix typo. Fix condition logic for trace tree data. Enhance tags component to search tags with the input value. Fix topology loading style. Fix update metric processor for the readRecords and remove readSampledRecords from metrics selector. Add trace association for FAAS dashboards. Visualize attached events on the trace widget. Add HTTP/1.x metrics and HTTP req/resp body collecting tabs on the network profiling widget. Implement creating tasks ui for network profiling widget. Fix entity types for ProcessRelation. Add trace association for general service dashboards.  Documentation  Add metadata-uid setup doc about Kubernetes coordinator in the cluster management. Add a doc for adding menus to booster UI. Move general good read blogs from Agent Introduction to Academy. Add re-post for blog Scaling with Apache SkyWalking in the academy list. Add re-post for blog Diagnose Service Mesh Network Performance with eBPF in the academy list. Add Security Notice doc. Add new docs for Report Span Attached Events data collecting protocol. Add new docs for Record query protocol Update Server Agents and Compatibility for PHP agent. Add docs for profiling. Update the network profiling documentation.  All issues and pull requests are here\n","excerpt":"9.3.0 Project  Bump up the embedded swctl version in OAP Docker image.  OAP Server  Add component …","ref":"/docs/main/v9.4.0/en/changes/changes-9.3.0/","title":"9.3.0"},{"body":"9.3.0 Project  Bump up the embedded swctl version in OAP Docker image.  OAP Server  Add component ID(133) for impala JDBC Java agent plugin and component ID(134) for impala server. Use prepareStatement in H2SQLExecutor#getByIDs.(No function change). Bump up snakeyaml to 1.32 for fixing CVE. Fix DurationUtils.convertToTimeBucket missed verify date format. Enhance LAL to support converting LogData to DatabaseSlowStatement. [Breaking Change] Change the LAL script format(Add layer property). Adapt ElasticSearch 8.1+, migrate from removed APIs to recommended APIs. Support monitoring MySQL slow SQLs. Support analyzing cache related spans to provide metrics and slow commands for cache services from client side Optimize virtual database, fix dynamic config watcher NPE when default value is null Remove physical index existing check and keep template existing check only to avoid meaningless retry wait in no-init mode. Make sure instance list ordered in TTL processor to avoid TTL timer never runs. Support monitoring PostgreSQL slow SQLs. [Breaking Change] Support sharding MySQL database instances and tables by Shardingsphere-Proxy. SQL-Database requires removing tables log_tag/segment_tag/zipkin_query before OAP starts, if bump up from previous releases. Fix meter functions avgHistogram, avgHistogramPercentile, avgLabeled, sumHistogram having data conflict when downsampling. Do sorting readLabeledMetricsValues result forcedly in case the storage(database) doesn\u0026rsquo;t return data consistent with the parameter list. Fix the wrong watch semantics in Kubernetes watchers, which causes heavy traffic to API server in some Kubernetes clusters, we should use Get State and Start at Most Recent semantic instead of Start at Exact because we don\u0026rsquo;t need the changing history events, see https://kubernetes.io/docs/reference/using-api/api-concepts/#semantics-for-watch. Unify query services and DAOs codes time range condition to Duration. [Breaking Change]: Remove prometheus-fetcher plugin, please use OpenTelemetry to scrape Prometheus metrics and set up SkyWalking OpenTelemetry receiver instead. BugFix: histogram metrics sent to MAL should be treated as OpenTelemetry style, not Prometheus style: (-infinity, explicit_bounds[i]] for i == 0 (explicit_bounds[i-1], explicit_bounds[i]] for 0 \u0026lt; i \u0026lt; size(explicit_bounds) (explicit_bounds[i-1], +infinity) for i == size(explicit_bounds)  Support Golang runtime metrics analysis. Add APISIX metrics monitoring Support skywalking-client-js report empty service version and page path , set default version as latest and default page path as /(root). Fix the error fetching data (/browser_app_page_pv0) : Can't split endpoint id into 2 parts. [Breaking Change] Limit the max length of trace/log/alarm tag\u0026rsquo;s key=value, set the max length of column tags in tableslog_tag/segment_tag/alarm_record_tag and column query in zipkin_query and column tag_value in tag_autocomplete to 256. SQL-Database requires altering these columns' length or removing these tables before OAP starts, if bump up from previous releases. Optimize the creation conditions of profiling task. Lazy load the Kubernetes metadata and switch from event-driven to polling. Previously we set up watchers to watch the Kubernetes metadata changes, this is perfect when there are deployments changes and SkyWalking can react to the changes in real time. However when the cluster has many events (such as in large cluster or some special Kubernetes engine like OpenShift), the requests sent from SkyWalking becomes unpredictable, i.e. SkyWalking might send massive requests to Kubernetes API server, causing heavy load to the API server. This PR switches from the watcher mechanism to polling mechanism, SkyWalking polls the metadata in a specified interval, so that the requests sent to API server is predictable (~10 requests every interval, 3 minutes), and the requests count is constant regardless of the cluster\u0026rsquo;s changes. However with this change SkyWalking can\u0026rsquo;t react to the cluster changes in time, but the delay is acceptable in our case. Optimize the query time of tasks in ProfileTaskCache. Fix metrics was put into wrong slot of the window in the alerting kernel. Support sumPerMinLabeled in MAL. Bump up jackson databind, snakeyaml, grpc dependencies. Support export Trace and Log through Kafka. Add new config initialization mechanism of module provider. This is a ModuleManager lib kernel level change. [Breaking Change] Support new records query protocol, rename the column named service_id to entity_id for support difference entity. Please re-create top_n_database_statement index/table. Remove improper self-obs metrics in JvmMetricsHandler(for Kafka channel). gRPC stream canceling code is not logged as an error when the client cancels the stream. The client cancels the stream when the pod is terminated. [Breaking Change] Change the way of loading MAL rules(support pattern). Move k8s relative MAL files into /otel-rules/k8s. [Breaking Change] Refactor service mesh protobuf definitions and split TCP-related metrics to individual definition. Add TCP{Service,ServiceInstance,ServiceRelation,ServiceInstanceRelation} sources and split TCP-related entities out from original Service,ServiceInstance,ServiceRelation,ServiceInstanceRelation. [Breaking Change] TCP-related source names are changed, fields of TCP-related sources are changed, please refer to the latest oal/tcp.oal file. Do not log error logs when failed to create ElasticSearch index because the index is created already. Add virtual MQ analysis for native traces. Support Python runtime metrics analysis. Support sampledTrace in LAL. Support multiple rules with different names under the same layer of LAL script. (Optimization) Reduce the buffer size(queue) of MAL(only) metric streams. Set L1 queue size as 1/20, L2 queue size as 1/2. Support monitoring MySQL/PostgreSQL in the cluster mode. [Breaking Change] Migrate to BanyanDB v0.2.0.  Adopt new OR logical operator for,  MeasureIDs query BanyanDBProfileThreadSnapshotQueryDAO query Multiple Event conditions query Metrics query   Simplify Group check and creation Partially apply UITemplate changes Support index_only Return CompletableFuture\u0026lt;Void\u0026gt; directly from BanyanDB client Optimize data binary parse methods in *LogQueryDAO Support different indexType Support configuration for TTL and (block|segment) intervals   Elasticsearch storage: Provide system environment variable(SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS) and support specify the settings (number_of_shards/number_of_replicas) for each index individually. Elasticsearch storage: Support update index settings (number_of_shards/number_of_replicas) for the index template after rebooting. Optimize MQ Topology analysis. Use entry span\u0026rsquo;s peer from the consumer side as source service when no producer instrumentation(no cross-process reference). Refactor JDBC storage implementations to reuse logics. Fix ClassCastException in LoggingConfigWatcher. Support span attached event concept in Zipkin and SkyWalking trace query. Support span attached events on Zipkin lens UI. Force UTF-8 encoding in JsonLogHandler of kafka-fetcher-plugin. Fix max length to 512 of entity, instance and endpoint IDs in trace, log, profiling, topN tables(JDBC storages). The value was 200 by default. Add component IDs(135, 136, 137) for EventMesh server and client-side plugins. Bump up Kafka client to 2.8.1 to fix CVE-2021-38153. Remove lengthEnvVariable for Column as it never works as expected. Add LongText to support longer logs persistent as a text type in ElasticSearch, instead of a keyword, to avoid length limitation. Fix wrong system variable name SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI. It was opaenapi. Fix not-time-series model blocking OAP boots in no-init mode. Fix ShardingTopologyQueryDAO.loadServiceRelationsDetectedAtServerSide invoke backend miss parameter serviceIds. Changed system variable SW_SUPERDATASET_STORAGE_DAY_STEP to SW_STORAGE_ES_SUPER_DATASET_DAY_STEP to be consistent with other ES storage related variables. Fix ESEventQueryDAO missing metric_table boolQuery criteria. Add default entity name(_blank) if absent to avoid NPE in the decoding. This caused Can't split xxx id into 2 parts. Support dynamic config the sampling strategy in network profiling. Zipkin module support BanyanDB storage. Zipkin traces query API, sort the result set by start time by default. Enhance the cache mechanism in the metric persistent process.  This cache only worked when the metric is accessible(readable) from the database. Once the insert execution is delayed due to the scale, the cache loses efficacy. It only works for the last time update per minute, considering our 25s period. Fix ID conflicts for all JDBC storage implementations. Due to the insert delay, the JDBC storage implementation would still generate another new insert statement.   [Breaking Change] Remove core/default/enableDatabaseSession config. [Breaking Change] Add @BanyanDB.TimestampColumn to identify which column in Record is providing the timestamp(milliseconds) for BanyanDB, since BanyanDB stream requires a timestamp in milliseconds. For SQL-Database: add new column timestamp for tables profile_task_log/top_n_database_statement, requires altering this column or removing these tables before OAP starts, if bump up from previous releases. Fix Elasticsearch storage: In No-Sharding Mode, add specific analyzer to the template before index creation to avoid update index error. Internal API: remove undocumented ElasticSearch API usage and use documented one. Fix BanyanDB.ShardingKey annotation missed in the generated OAL metrics classes. Fix Elasticsearch storage: Query sortMetrics missing transform real index column name. Rename BanyanDB.ShardingKey to BanyanDB.SeriesID. Self-Observability: Add counters for metrics reading from DB or cached. Dashboard:Metrics Persistent Cache Count. Self-Observability: Fix GC Time calculation. Fix Elasticsearch storage: In No-Sharding Mode, column\u0026rsquo;s property indexOnly not applied and cannot be updated. Update the trace_id field as storage only(cannot be queried) in top_n_database_statement, top_n_cache_read_command, top_n_cache_read_command index.  UI  Fix: tab active incorrectly, when click tab space Add impala icon for impala JDBC Java agent plugin. (Webapp)Bump up snakeyaml to 1.31 for fixing CVE-2022-25857 [Breaking Change]: migrate from Spring Web to Armeria, now you should use the environment variable name SW_OAP_ADDRESS to change the OAP backend service addresses, like SW_OAP_ADDRESS=localhost:12800,localhost:12801, and use environment variable SW_SERVER_PORT to change the port. Other Spring-related configurations don\u0026rsquo;t take effect anymore. Polish the endpoint list graph. Fix styles for an adaptive height. Fix setting up a new time range after clicking the refresh button. Enhance the process topology graph to support dragging nodes. UI-template: Fix metrics calculation in general-service/mesh-service/faas-function top-list dashboard. Update MySQL dashboard to visualize collected slow SQLs. Add virtual cache dashboard. Remove responseCode fields of all OAL sources, as well as examples to avoid user\u0026rsquo;s confusion. Remove All from the endpoints selector. Enhance menu configurations to make it easier to change. Update PostgreSQL dashboard to visualize collected slow SQLs. Add Golang runtime metrics and cpu/memory used rate panels in General-Instance dashboard. Add gateway apisix menu. Query logs with the specific service ID. Bump d3-color from 3.0.1 to 3.1.0. Add Golang runtime metrics and cpu/memory used rate panels in FaaS-Instance dashboard. Revert logs on trace widget. Add a sub-menu for virtual mq. Add readRecords to metric types. Verify dashboard names for new dashboards. Associate metrics with the trace widget on dashboards. Fix configuration panel styles. Remove a un-use icon. Support labeled value on the service/instance/endpoint list widgets. Add menu for virtual MQ. Set selector props and update configuration panel styles. Add Python runtime metrics and cpu/memory utilization panels to General-Instance and Fass-Instance dashboards. Enhance the legend of metrics graph widget with the summary table. Add apache eventMesh logo file. Fix conditions for trace profiling. Fix tag keys list and duration condition. Fix typo. Fix condition logic for trace tree data. Enhance tags component to search tags with the input value. Fix topology loading style. Fix update metric processor for the readRecords and remove readSampledRecords from metrics selector. Add trace association for FAAS dashboards. Visualize attached events on the trace widget. Add HTTP/1.x metrics and HTTP req/resp body collecting tabs on the network profiling widget. Implement creating tasks ui for network profiling widget. Fix entity types for ProcessRelation. Add trace association for general service dashboards.  Documentation  Add metadata-uid setup doc about Kubernetes coordinator in the cluster management. Add a doc for adding menus to booster UI. Move general good read blogs from Agent Introduction to Academy. Add re-post for blog Scaling with Apache SkyWalking in the academy list. Add re-post for blog Diagnose Service Mesh Network Performance with eBPF in the academy list. Add Security Notice doc. Add new docs for Report Span Attached Events data collecting protocol. Add new docs for Record query protocol Update Server Agents and Compatibility for PHP agent. Add docs for profiling. Update the network profiling documentation.  All issues and pull requests are here\n","excerpt":"9.3.0 Project  Bump up the embedded swctl version in OAP Docker image.  OAP Server  Add component …","ref":"/docs/main/v9.5.0/en/changes/changes-9.3.0/","title":"9.3.0"},{"body":"9.3.0 Project  Bump up the embedded swctl version in OAP Docker image.  OAP Server  Add component ID(133) for impala JDBC Java agent plugin and component ID(134) for impala server. Use prepareStatement in H2SQLExecutor#getByIDs.(No function change). Bump up snakeyaml to 1.32 for fixing CVE. Fix DurationUtils.convertToTimeBucket missed verify date format. Enhance LAL to support converting LogData to DatabaseSlowStatement. [Breaking Change] Change the LAL script format(Add layer property). Adapt ElasticSearch 8.1+, migrate from removed APIs to recommended APIs. Support monitoring MySQL slow SQLs. Support analyzing cache related spans to provide metrics and slow commands for cache services from client side Optimize virtual database, fix dynamic config watcher NPE when default value is null Remove physical index existing check and keep template existing check only to avoid meaningless retry wait in no-init mode. Make sure instance list ordered in TTL processor to avoid TTL timer never runs. Support monitoring PostgreSQL slow SQLs. [Breaking Change] Support sharding MySQL database instances and tables by Shardingsphere-Proxy. SQL-Database requires removing tables log_tag/segment_tag/zipkin_query before OAP starts, if bump up from previous releases. Fix meter functions avgHistogram, avgHistogramPercentile, avgLabeled, sumHistogram having data conflict when downsampling. Do sorting readLabeledMetricsValues result forcedly in case the storage(database) doesn\u0026rsquo;t return data consistent with the parameter list. Fix the wrong watch semantics in Kubernetes watchers, which causes heavy traffic to API server in some Kubernetes clusters, we should use Get State and Start at Most Recent semantic instead of Start at Exact because we don\u0026rsquo;t need the changing history events, see https://kubernetes.io/docs/reference/using-api/api-concepts/#semantics-for-watch. Unify query services and DAOs codes time range condition to Duration. [Breaking Change]: Remove prometheus-fetcher plugin, please use OpenTelemetry to scrape Prometheus metrics and set up SkyWalking OpenTelemetry receiver instead. BugFix: histogram metrics sent to MAL should be treated as OpenTelemetry style, not Prometheus style: (-infinity, explicit_bounds[i]] for i == 0 (explicit_bounds[i-1], explicit_bounds[i]] for 0 \u0026lt; i \u0026lt; size(explicit_bounds) (explicit_bounds[i-1], +infinity) for i == size(explicit_bounds)  Support Golang runtime metrics analysis. Add APISIX metrics monitoring Support skywalking-client-js report empty service version and page path , set default version as latest and default page path as /(root). Fix the error fetching data (/browser_app_page_pv0) : Can't split endpoint id into 2 parts. [Breaking Change] Limit the max length of trace/log/alarm tag\u0026rsquo;s key=value, set the max length of column tags in tableslog_tag/segment_tag/alarm_record_tag and column query in zipkin_query and column tag_value in tag_autocomplete to 256. SQL-Database requires altering these columns' length or removing these tables before OAP starts, if bump up from previous releases. Optimize the creation conditions of profiling task. Lazy load the Kubernetes metadata and switch from event-driven to polling. Previously we set up watchers to watch the Kubernetes metadata changes, this is perfect when there are deployments changes and SkyWalking can react to the changes in real time. However when the cluster has many events (such as in large cluster or some special Kubernetes engine like OpenShift), the requests sent from SkyWalking becomes unpredictable, i.e. SkyWalking might send massive requests to Kubernetes API server, causing heavy load to the API server. This PR switches from the watcher mechanism to polling mechanism, SkyWalking polls the metadata in a specified interval, so that the requests sent to API server is predictable (~10 requests every interval, 3 minutes), and the requests count is constant regardless of the cluster\u0026rsquo;s changes. However with this change SkyWalking can\u0026rsquo;t react to the cluster changes in time, but the delay is acceptable in our case. Optimize the query time of tasks in ProfileTaskCache. Fix metrics was put into wrong slot of the window in the alerting kernel. Support sumPerMinLabeled in MAL. Bump up jackson databind, snakeyaml, grpc dependencies. Support export Trace and Log through Kafka. Add new config initialization mechanism of module provider. This is a ModuleManager lib kernel level change. [Breaking Change] Support new records query protocol, rename the column named service_id to entity_id for support difference entity. Please re-create top_n_database_statement index/table. Remove improper self-obs metrics in JvmMetricsHandler(for Kafka channel). gRPC stream canceling code is not logged as an error when the client cancels the stream. The client cancels the stream when the pod is terminated. [Breaking Change] Change the way of loading MAL rules(support pattern). Move k8s relative MAL files into /otel-rules/k8s. [Breaking Change] Refactor service mesh protobuf definitions and split TCP-related metrics to individual definition. Add TCP{Service,ServiceInstance,ServiceRelation,ServiceInstanceRelation} sources and split TCP-related entities out from original Service,ServiceInstance,ServiceRelation,ServiceInstanceRelation. [Breaking Change] TCP-related source names are changed, fields of TCP-related sources are changed, please refer to the latest oal/tcp.oal file. Do not log error logs when failed to create ElasticSearch index because the index is created already. Add virtual MQ analysis for native traces. Support Python runtime metrics analysis. Support sampledTrace in LAL. Support multiple rules with different names under the same layer of LAL script. (Optimization) Reduce the buffer size(queue) of MAL(only) metric streams. Set L1 queue size as 1/20, L2 queue size as 1/2. Support monitoring MySQL/PostgreSQL in the cluster mode. [Breaking Change] Migrate to BanyanDB v0.2.0.  Adopt new OR logical operator for,  MeasureIDs query BanyanDBProfileThreadSnapshotQueryDAO query Multiple Event conditions query Metrics query   Simplify Group check and creation Partially apply UITemplate changes Support index_only Return CompletableFuture\u0026lt;Void\u0026gt; directly from BanyanDB client Optimize data binary parse methods in *LogQueryDAO Support different indexType Support configuration for TTL and (block|segment) intervals   Elasticsearch storage: Provide system environment variable(SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS) and support specify the settings (number_of_shards/number_of_replicas) for each index individually. Elasticsearch storage: Support update index settings (number_of_shards/number_of_replicas) for the index template after rebooting. Optimize MQ Topology analysis. Use entry span\u0026rsquo;s peer from the consumer side as source service when no producer instrumentation(no cross-process reference). Refactor JDBC storage implementations to reuse logics. Fix ClassCastException in LoggingConfigWatcher. Support span attached event concept in Zipkin and SkyWalking trace query. Support span attached events on Zipkin lens UI. Force UTF-8 encoding in JsonLogHandler of kafka-fetcher-plugin. Fix max length to 512 of entity, instance and endpoint IDs in trace, log, profiling, topN tables(JDBC storages). The value was 200 by default. Add component IDs(135, 136, 137) for EventMesh server and client-side plugins. Bump up Kafka client to 2.8.1 to fix CVE-2021-38153. Remove lengthEnvVariable for Column as it never works as expected. Add LongText to support longer logs persistent as a text type in ElasticSearch, instead of a keyword, to avoid length limitation. Fix wrong system variable name SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI. It was opaenapi. Fix not-time-series model blocking OAP boots in no-init mode. Fix ShardingTopologyQueryDAO.loadServiceRelationsDetectedAtServerSide invoke backend miss parameter serviceIds. Changed system variable SW_SUPERDATASET_STORAGE_DAY_STEP to SW_STORAGE_ES_SUPER_DATASET_DAY_STEP to be consistent with other ES storage related variables. Fix ESEventQueryDAO missing metric_table boolQuery criteria. Add default entity name(_blank) if absent to avoid NPE in the decoding. This caused Can't split xxx id into 2 parts. Support dynamic config the sampling strategy in network profiling. Zipkin module support BanyanDB storage. Zipkin traces query API, sort the result set by start time by default. Enhance the cache mechanism in the metric persistent process.  This cache only worked when the metric is accessible(readable) from the database. Once the insert execution is delayed due to the scale, the cache loses efficacy. It only works for the last time update per minute, considering our 25s period. Fix ID conflicts for all JDBC storage implementations. Due to the insert delay, the JDBC storage implementation would still generate another new insert statement.   [Breaking Change] Remove core/default/enableDatabaseSession config. [Breaking Change] Add @BanyanDB.TimestampColumn to identify which column in Record is providing the timestamp(milliseconds) for BanyanDB, since BanyanDB stream requires a timestamp in milliseconds. For SQL-Database: add new column timestamp for tables profile_task_log/top_n_database_statement, requires altering this column or removing these tables before OAP starts, if bump up from previous releases. Fix Elasticsearch storage: In No-Sharding Mode, add specific analyzer to the template before index creation to avoid update index error. Internal API: remove undocumented ElasticSearch API usage and use documented one. Fix BanyanDB.ShardingKey annotation missed in the generated OAL metrics classes. Fix Elasticsearch storage: Query sortMetrics missing transform real index column name. Rename BanyanDB.ShardingKey to BanyanDB.SeriesID. Self-Observability: Add counters for metrics reading from DB or cached. Dashboard:Metrics Persistent Cache Count. Self-Observability: Fix GC Time calculation. Fix Elasticsearch storage: In No-Sharding Mode, column\u0026rsquo;s property indexOnly not applied and cannot be updated. Update the trace_id field as storage only(cannot be queried) in top_n_database_statement, top_n_cache_read_command, top_n_cache_read_command index.  UI  Fix: tab active incorrectly, when click tab space Add impala icon for impala JDBC Java agent plugin. (Webapp)Bump up snakeyaml to 1.31 for fixing CVE-2022-25857 [Breaking Change]: migrate from Spring Web to Armeria, now you should use the environment variable name SW_OAP_ADDRESS to change the OAP backend service addresses, like SW_OAP_ADDRESS=localhost:12800,localhost:12801, and use environment variable SW_SERVER_PORT to change the port. Other Spring-related configurations don\u0026rsquo;t take effect anymore. Polish the endpoint list graph. Fix styles for an adaptive height. Fix setting up a new time range after clicking the refresh button. Enhance the process topology graph to support dragging nodes. UI-template: Fix metrics calculation in general-service/mesh-service/faas-function top-list dashboard. Update MySQL dashboard to visualize collected slow SQLs. Add virtual cache dashboard. Remove responseCode fields of all OAL sources, as well as examples to avoid user\u0026rsquo;s confusion. Remove All from the endpoints selector. Enhance menu configurations to make it easier to change. Update PostgreSQL dashboard to visualize collected slow SQLs. Add Golang runtime metrics and cpu/memory used rate panels in General-Instance dashboard. Add gateway apisix menu. Query logs with the specific service ID. Bump d3-color from 3.0.1 to 3.1.0. Add Golang runtime metrics and cpu/memory used rate panels in FaaS-Instance dashboard. Revert logs on trace widget. Add a sub-menu for virtual mq. Add readRecords to metric types. Verify dashboard names for new dashboards. Associate metrics with the trace widget on dashboards. Fix configuration panel styles. Remove a un-use icon. Support labeled value on the service/instance/endpoint list widgets. Add menu for virtual MQ. Set selector props and update configuration panel styles. Add Python runtime metrics and cpu/memory utilization panels to General-Instance and Fass-Instance dashboards. Enhance the legend of metrics graph widget with the summary table. Add apache eventMesh logo file. Fix conditions for trace profiling. Fix tag keys list and duration condition. Fix typo. Fix condition logic for trace tree data. Enhance tags component to search tags with the input value. Fix topology loading style. Fix update metric processor for the readRecords and remove readSampledRecords from metrics selector. Add trace association for FAAS dashboards. Visualize attached events on the trace widget. Add HTTP/1.x metrics and HTTP req/resp body collecting tabs on the network profiling widget. Implement creating tasks ui for network profiling widget. Fix entity types for ProcessRelation. Add trace association for general service dashboards.  Documentation  Add metadata-uid setup doc about Kubernetes coordinator in the cluster management. Add a doc for adding menus to booster UI. Move general good read blogs from Agent Introduction to Academy. Add re-post for blog Scaling with Apache SkyWalking in the academy list. Add re-post for blog Diagnose Service Mesh Network Performance with eBPF in the academy list. Add Security Notice doc. Add new docs for Report Span Attached Events data collecting protocol. Add new docs for Record query protocol Update Server Agents and Compatibility for PHP agent. Add docs for profiling. Update the network profiling documentation.  All issues and pull requests are here\n","excerpt":"9.3.0 Project  Bump up the embedded swctl version in OAP Docker image.  OAP Server  Add component …","ref":"/docs/main/v9.6.0/en/changes/changes-9.3.0/","title":"9.3.0"},{"body":"9.3.0 Project  Bump up the embedded swctl version in OAP Docker image.  OAP Server  Add component ID(133) for impala JDBC Java agent plugin and component ID(134) for impala server. Use prepareStatement in H2SQLExecutor#getByIDs.(No function change). Bump up snakeyaml to 1.32 for fixing CVE. Fix DurationUtils.convertToTimeBucket missed verify date format. Enhance LAL to support converting LogData to DatabaseSlowStatement. [Breaking Change] Change the LAL script format(Add layer property). Adapt ElasticSearch 8.1+, migrate from removed APIs to recommended APIs. Support monitoring MySQL slow SQLs. Support analyzing cache related spans to provide metrics and slow commands for cache services from client side Optimize virtual database, fix dynamic config watcher NPE when default value is null Remove physical index existing check and keep template existing check only to avoid meaningless retry wait in no-init mode. Make sure instance list ordered in TTL processor to avoid TTL timer never runs. Support monitoring PostgreSQL slow SQLs. [Breaking Change] Support sharding MySQL database instances and tables by Shardingsphere-Proxy. SQL-Database requires removing tables log_tag/segment_tag/zipkin_query before OAP starts, if bump up from previous releases. Fix meter functions avgHistogram, avgHistogramPercentile, avgLabeled, sumHistogram having data conflict when downsampling. Do sorting readLabeledMetricsValues result forcedly in case the storage(database) doesn\u0026rsquo;t return data consistent with the parameter list. Fix the wrong watch semantics in Kubernetes watchers, which causes heavy traffic to API server in some Kubernetes clusters, we should use Get State and Start at Most Recent semantic instead of Start at Exact because we don\u0026rsquo;t need the changing history events, see https://kubernetes.io/docs/reference/using-api/api-concepts/#semantics-for-watch. Unify query services and DAOs codes time range condition to Duration. [Breaking Change]: Remove prometheus-fetcher plugin, please use OpenTelemetry to scrape Prometheus metrics and set up SkyWalking OpenTelemetry receiver instead. BugFix: histogram metrics sent to MAL should be treated as OpenTelemetry style, not Prometheus style: (-infinity, explicit_bounds[i]] for i == 0 (explicit_bounds[i-1], explicit_bounds[i]] for 0 \u0026lt; i \u0026lt; size(explicit_bounds) (explicit_bounds[i-1], +infinity) for i == size(explicit_bounds)  Support Golang runtime metrics analysis. Add APISIX metrics monitoring Support skywalking-client-js report empty service version and page path , set default version as latest and default page path as /(root). Fix the error fetching data (/browser_app_page_pv0) : Can't split endpoint id into 2 parts. [Breaking Change] Limit the max length of trace/log/alarm tag\u0026rsquo;s key=value, set the max length of column tags in tableslog_tag/segment_tag/alarm_record_tag and column query in zipkin_query and column tag_value in tag_autocomplete to 256. SQL-Database requires altering these columns' length or removing these tables before OAP starts, if bump up from previous releases. Optimize the creation conditions of profiling task. Lazy load the Kubernetes metadata and switch from event-driven to polling. Previously we set up watchers to watch the Kubernetes metadata changes, this is perfect when there are deployments changes and SkyWalking can react to the changes in real time. However when the cluster has many events (such as in large cluster or some special Kubernetes engine like OpenShift), the requests sent from SkyWalking becomes unpredictable, i.e. SkyWalking might send massive requests to Kubernetes API server, causing heavy load to the API server. This PR switches from the watcher mechanism to polling mechanism, SkyWalking polls the metadata in a specified interval, so that the requests sent to API server is predictable (~10 requests every interval, 3 minutes), and the requests count is constant regardless of the cluster\u0026rsquo;s changes. However with this change SkyWalking can\u0026rsquo;t react to the cluster changes in time, but the delay is acceptable in our case. Optimize the query time of tasks in ProfileTaskCache. Fix metrics was put into wrong slot of the window in the alerting kernel. Support sumPerMinLabeled in MAL. Bump up jackson databind, snakeyaml, grpc dependencies. Support export Trace and Log through Kafka. Add new config initialization mechanism of module provider. This is a ModuleManager lib kernel level change. [Breaking Change] Support new records query protocol, rename the column named service_id to entity_id for support difference entity. Please re-create top_n_database_statement index/table. Remove improper self-obs metrics in JvmMetricsHandler(for Kafka channel). gRPC stream canceling code is not logged as an error when the client cancels the stream. The client cancels the stream when the pod is terminated. [Breaking Change] Change the way of loading MAL rules(support pattern). Move k8s relative MAL files into /otel-rules/k8s. [Breaking Change] Refactor service mesh protobuf definitions and split TCP-related metrics to individual definition. Add TCP{Service,ServiceInstance,ServiceRelation,ServiceInstanceRelation} sources and split TCP-related entities out from original Service,ServiceInstance,ServiceRelation,ServiceInstanceRelation. [Breaking Change] TCP-related source names are changed, fields of TCP-related sources are changed, please refer to the latest oal/tcp.oal file. Do not log error logs when failed to create ElasticSearch index because the index is created already. Add virtual MQ analysis for native traces. Support Python runtime metrics analysis. Support sampledTrace in LAL. Support multiple rules with different names under the same layer of LAL script. (Optimization) Reduce the buffer size(queue) of MAL(only) metric streams. Set L1 queue size as 1/20, L2 queue size as 1/2. Support monitoring MySQL/PostgreSQL in the cluster mode. [Breaking Change] Migrate to BanyanDB v0.2.0.  Adopt new OR logical operator for,  MeasureIDs query BanyanDBProfileThreadSnapshotQueryDAO query Multiple Event conditions query Metrics query   Simplify Group check and creation Partially apply UITemplate changes Support index_only Return CompletableFuture\u0026lt;Void\u0026gt; directly from BanyanDB client Optimize data binary parse methods in *LogQueryDAO Support different indexType Support configuration for TTL and (block|segment) intervals   Elasticsearch storage: Provide system environment variable(SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS) and support specify the settings (number_of_shards/number_of_replicas) for each index individually. Elasticsearch storage: Support update index settings (number_of_shards/number_of_replicas) for the index template after rebooting. Optimize MQ Topology analysis. Use entry span\u0026rsquo;s peer from the consumer side as source service when no producer instrumentation(no cross-process reference). Refactor JDBC storage implementations to reuse logics. Fix ClassCastException in LoggingConfigWatcher. Support span attached event concept in Zipkin and SkyWalking trace query. Support span attached events on Zipkin lens UI. Force UTF-8 encoding in JsonLogHandler of kafka-fetcher-plugin. Fix max length to 512 of entity, instance and endpoint IDs in trace, log, profiling, topN tables(JDBC storages). The value was 200 by default. Add component IDs(135, 136, 137) for EventMesh server and client-side plugins. Bump up Kafka client to 2.8.1 to fix CVE-2021-38153. Remove lengthEnvVariable for Column as it never works as expected. Add LongText to support longer logs persistent as a text type in ElasticSearch, instead of a keyword, to avoid length limitation. Fix wrong system variable name SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI. It was opaenapi. Fix not-time-series model blocking OAP boots in no-init mode. Fix ShardingTopologyQueryDAO.loadServiceRelationsDetectedAtServerSide invoke backend miss parameter serviceIds. Changed system variable SW_SUPERDATASET_STORAGE_DAY_STEP to SW_STORAGE_ES_SUPER_DATASET_DAY_STEP to be consistent with other ES storage related variables. Fix ESEventQueryDAO missing metric_table boolQuery criteria. Add default entity name(_blank) if absent to avoid NPE in the decoding. This caused Can't split xxx id into 2 parts. Support dynamic config the sampling strategy in network profiling. Zipkin module support BanyanDB storage. Zipkin traces query API, sort the result set by start time by default. Enhance the cache mechanism in the metric persistent process.  This cache only worked when the metric is accessible(readable) from the database. Once the insert execution is delayed due to the scale, the cache loses efficacy. It only works for the last time update per minute, considering our 25s period. Fix ID conflicts for all JDBC storage implementations. Due to the insert delay, the JDBC storage implementation would still generate another new insert statement.   [Breaking Change] Remove core/default/enableDatabaseSession config. [Breaking Change] Add @BanyanDB.TimestampColumn to identify which column in Record is providing the timestamp(milliseconds) for BanyanDB, since BanyanDB stream requires a timestamp in milliseconds. For SQL-Database: add new column timestamp for tables profile_task_log/top_n_database_statement, requires altering this column or removing these tables before OAP starts, if bump up from previous releases. Fix Elasticsearch storage: In No-Sharding Mode, add specific analyzer to the template before index creation to avoid update index error. Internal API: remove undocumented ElasticSearch API usage and use documented one. Fix BanyanDB.ShardingKey annotation missed in the generated OAL metrics classes. Fix Elasticsearch storage: Query sortMetrics missing transform real index column name. Rename BanyanDB.ShardingKey to BanyanDB.SeriesID. Self-Observability: Add counters for metrics reading from DB or cached. Dashboard:Metrics Persistent Cache Count. Self-Observability: Fix GC Time calculation. Fix Elasticsearch storage: In No-Sharding Mode, column\u0026rsquo;s property indexOnly not applied and cannot be updated. Update the trace_id field as storage only(cannot be queried) in top_n_database_statement, top_n_cache_read_command, top_n_cache_read_command index.  UI  Fix: tab active incorrectly, when click tab space Add impala icon for impala JDBC Java agent plugin. (Webapp)Bump up snakeyaml to 1.31 for fixing CVE-2022-25857 [Breaking Change]: migrate from Spring Web to Armeria, now you should use the environment variable name SW_OAP_ADDRESS to change the OAP backend service addresses, like SW_OAP_ADDRESS=localhost:12800,localhost:12801, and use environment variable SW_SERVER_PORT to change the port. Other Spring-related configurations don\u0026rsquo;t take effect anymore. Polish the endpoint list graph. Fix styles for an adaptive height. Fix setting up a new time range after clicking the refresh button. Enhance the process topology graph to support dragging nodes. UI-template: Fix metrics calculation in general-service/mesh-service/faas-function top-list dashboard. Update MySQL dashboard to visualize collected slow SQLs. Add virtual cache dashboard. Remove responseCode fields of all OAL sources, as well as examples to avoid user\u0026rsquo;s confusion. Remove All from the endpoints selector. Enhance menu configurations to make it easier to change. Update PostgreSQL dashboard to visualize collected slow SQLs. Add Golang runtime metrics and cpu/memory used rate panels in General-Instance dashboard. Add gateway apisix menu. Query logs with the specific service ID. Bump d3-color from 3.0.1 to 3.1.0. Add Golang runtime metrics and cpu/memory used rate panels in FaaS-Instance dashboard. Revert logs on trace widget. Add a sub-menu for virtual mq. Add readRecords to metric types. Verify dashboard names for new dashboards. Associate metrics with the trace widget on dashboards. Fix configuration panel styles. Remove a un-use icon. Support labeled value on the service/instance/endpoint list widgets. Add menu for virtual MQ. Set selector props and update configuration panel styles. Add Python runtime metrics and cpu/memory utilization panels to General-Instance and Fass-Instance dashboards. Enhance the legend of metrics graph widget with the summary table. Add apache eventMesh logo file. Fix conditions for trace profiling. Fix tag keys list and duration condition. Fix typo. Fix condition logic for trace tree data. Enhance tags component to search tags with the input value. Fix topology loading style. Fix update metric processor for the readRecords and remove readSampledRecords from metrics selector. Add trace association for FAAS dashboards. Visualize attached events on the trace widget. Add HTTP/1.x metrics and HTTP req/resp body collecting tabs on the network profiling widget. Implement creating tasks ui for network profiling widget. Fix entity types for ProcessRelation. Add trace association for general service dashboards.  Documentation  Add metadata-uid setup doc about Kubernetes coordinator in the cluster management. Add a doc for adding menus to booster UI. Move general good read blogs from Agent Introduction to Academy. Add re-post for blog Scaling with Apache SkyWalking in the academy list. Add re-post for blog Diagnose Service Mesh Network Performance with eBPF in the academy list. Add Security Notice doc. Add new docs for Report Span Attached Events data collecting protocol. Add new docs for Record query protocol Update Server Agents and Compatibility for PHP agent. Add docs for profiling. Update the network profiling documentation.  All issues and pull requests are here\n","excerpt":"9.3.0 Project  Bump up the embedded swctl version in OAP Docker image.  OAP Server  Add component …","ref":"/docs/main/v9.7.0/en/changes/changes-9.3.0/","title":"9.3.0"},{"body":"9.4.0 Project  Bump up Zipkin and Zipkin lens UI dependency to 2.24.0. Bump up Apache parent pom version to 29. Bump up Armeria version to 1.21.0. Clean up maven pom.xmls. Bump up Java version to 11. Bump up snakeyaml to 2.0.  OAP Server  Add ServerStatusService in the core module to provide a new way to expose booting status to other modules. Adds Micrometer as a new component.(ID=141) Refactor session cache in MetricsPersistentWorker. Cache enhancement - don\u0026rsquo;t read new metrics from database in minute dimensionality.   // When // (1) the time bucket of the server's latest stability status is provided // 1.1 the OAP has booted successfully // 1.2 the current dimensionality is in minute. // 1.3 the OAP cluster is rebalanced due to scaling // (2) the metrics are from the time after the timeOfLatestStabilitySts // (3) the metrics don't exist in the cache // the kernel should NOT try to load it from the database. // // Notice, about condition (2), // for the specific minute of booted successfully, the metrics are expected to load from database when // it doesn't exist in the cache.  Remove the offset of metric session timeout according to worker creation sequence. Correct MetricsExtension annotations declarations in manual entities. Support component IDs' priority in process relation metrics. Remove abandon logic in MergableBufferedData, which caused unexpected no-update. Fix miss set LastUpdateTimestamp that caused the metrics session to expire. Rename MAL rule spring-sleuth.yaml to spring-micrometer.yaml. Fix memory leak in Zipkin API. Remove the dependency of refresh_interval of ElasticSearch indices from elasticsearch/flushInterval config. Now, it uses core/persistentPeriod + 5s as refresh_interval for all indices instead. Change elasticsearch/flushInterval to 5s(was 15s). Optimize flushInterval of ElasticSearch BulkProcessor to avoid extra periodical flush in the continuous bulk streams. An unexpected dot is added when exp is a pure metric name and expPrefix != null. Support monitoring MariaDB. Remove measure/stream specific interval settings in BanyanDB. Add global-specific settings used to override global configurations (e.g segmentIntervalDays, blockIntervalHours) in BanyanDB. Use TTL-driven interval settings for the measure-default group in BanyanDB. Fix wrong group of non time-relative metadata in BanyanDB. Refactor StorageData#id to the new StorageID object from a String type. Support multiple component IDs in the service topology level. Add ElasticSearch.Keyword annotation to declare the target field type as keyword. [Breaking Change] Column component_id of service_relation_client_side and service_relation_server_side have been replaced by component_ids. Support priority definition in the component-libraries.yml. Enhance service topology query. When there are multiple components detected from the server side, the component type of the node would be determined by the priority, which was random in the previous release. Remove component_id from service_instance_relation_client_side and service_instance_relation_server_side. Make the satellite E2E test more stable. Add Istio 1.16 to test matrix. Register ValueColumn as Tag for Record in BanyanDB storage plugin. Bump up Netty to 4.1.86. Remove unnecessary additional columns when storage is in logical sharding mode. The cluster coordinator support watch mechanism for notifying RemoteClientManager and ServerStatusService. Fix ServiceMeshServiceDispatcher overwrite ServiceDispatcher debug file when open SW_OAL_ENGINE_DEBUG. Use groupBy and in operators to optimize topology query for BanyanDB storage plugin. Support server status watcher for MetricsPersistentWorker to check the metrics whether required initialization. Fix the meter value are not correct when using sumPerMinLabeld or sumHistogramPercentile MAL function. Fix cannot display attached events when using Zipkin Lens UI query traces. Remove time_bucket for both Stream and Measure kinds in BanyanDB plugin. Merge TIME_BUCKET of Metrics and Record into StorageData. Support no layer in the listServices query. Fix time_bucket of ServiceTraffic not set correctly in slowSql of MAL. Correct the TopN record query DAO of BanyanDB. Tweak interval settings of BanyanDB. Support monitoring AWS Cloud EKS. Bump BanyanDB Java client to 0.3.0-rc1. Remove id tag from measures. Add Banyandb.MeasureField to mark a column as a BanyanDB Measure field. Add BanyanDB.StoreIDTag to store a process\u0026rsquo;s id for searching. [Breaking Change] The supported version of ShardingSphere-Proxy is upgraded from 5.1.2 to 5.3.1. Due to the changes of ShardingSphere\u0026rsquo;s API, versions before 5.3.1 are not compatible. Add the eBPF network profiling E2E Test in the per storage. Fix TCP service instances are lack of instance properties like pod and namespace, which causes Pod log not to work for TCP workloads. Add Python HBase happybase module component ID(94). Fix gRPC alarm cannot update settings from dynamic configuration source. Add batchOfBytes configuration to limit the size of bulk flush. Add Python Websocket module component ID(7018). [Optional] Optimize single trace query performance by customizing routing in ElasticSearch. SkyWalking trace segments and Zipkin spans are using trace ID for routing. This is OFF by default, controlled by storage/elasticsearch/enableCustomRouting. Enhance OAP HTTP server to support HTTPS Remove handler scan in otel receiver, manual initialization instead Add aws-firehose-receiver to support collecting AWS CloudWatch metric(OpenTelemetry format). Notice, no HTTPS/TLS setup support. By following AWS Firehose request, it uses proxy request (https://... instead of /aws/firehose/metrics), there must be a proxy(Nginx, Envoy, etc.). Avoid Antlr dependencies' versions might be different in compile time and runtime. Now PrometheusMetricConverter#escapedName also support converting / to _. Add missing TCP throughput metrics. Refactor @Column annotation, swap Column#name and ElasticSearch.Column#columnAlias and rename ElasticSearch.Column#columnAlias to ElasticSearch.Column#legacyName. Add Python HTTPX module component ID(7019). Migrate tests from junit 4 to junit 5. Refactor http-based alarm plugins and extract common logic to HttpAlarmCallback. Support Amazon Simple Storage Service (Amazon S3) metrics monitoring Support process Sum metrics with AGGREGATION_TEMPORALITY_DELTA case Support Amazon DynamoDB monitoring. Support prometheus HTTP API and promQL. Scope in the Entity of Metrics query v1 protocol is not required and automatical correction. The scope is determined based on the metric itself. Add explicit ReadTimeout for ConsulConfigurationWatcher to avoid IllegalArgumentException: Cache watchInterval=10sec \u0026gt;= networkClientReadTimeout=10000ms. Fix DurationUtils.getDurationPoints exceed, when startTimeBucket equals endTimeBucket. Support process OpenTelemetry ExponentialHistogram metrics Add FreeRedis component ID(3018).  UI  Add Zipkin Lens UI to webapp, and proxy it to context path /zipkin. Migrate the build tool from vue cli to Vite4. Fix Instance Relation and Endpoint Relation dashboards show up. Add Micrometer icon. Update MySQL UI to support MariaDB. Add AWS menu for supporting AWS monitoring. Add missing FastAPI logo. Update the log details page to support the formatted display of JSON content. Fix build config. Avoid being unable to drag process nodes for the first time. Add node folder into ignore list. Add ElPopconfirm to component types. Add an iframe widget for zipkin UI. Optimize graph tooltips to make them more friendly. Bump json5 from 1.0.1 to 1.0.2. Add websockets icon. Implement independent mode for widgets. Bump http-cache-semantics from 4.1.0 to 4.1.1. Update menus for OpenFunction. Add auto fresh to widgets independent mode. Fix: clear trace ID on the Log and Trace widgets after using association. Fix: reset duration for query conditions after time range changes. Add AWS S3 menu. Refactor: optimize side bar component to make it more friendly. Fix: remove duplicate popup message for query result. Add logo for HTTPX. Refactor: optimize the attached events visualization in the trace widget. Update BanyanDB client to 0.3.1. Add AWS DynamoDB menu. Fix: add auto period to the independent mode for widgets. Optimize menus and add Windows monitoring menu. Add a calculation for the cpm5dAvg. add a cpm5d calculation. Fix data processing error in the eBPF profiling widget. Support for double quotes in SlowSQL statements. Fix: the wrong position of the menu when clicking the topology node.  Documentation  Remove Spring Sleuth docs, and add Spring MicroMeter Observations Analysis with the latest Java agent side enhancement. Update monitoring MySQL document to add the MariaDB part. Reorganize the protocols docs to a more clear API docs. Add documentation about replacing Zipkin server with SkyWalking OAP. Add Lens UI relative docs in Zipkin trace section. Add Profiling APIs. Fix backend telemetry doc and so11y dashboard doc as the OAP Prometheus fetcher was removed since 9.3.0  All issues and pull requests are here\n","excerpt":"9.4.0 Project  Bump up Zipkin and Zipkin lens UI dependency to 2.24.0. Bump up Apache parent pom …","ref":"/docs/main/latest/en/changes/changes-9.4.0/","title":"9.4.0"},{"body":"9.4.0 Project  Bump up Zipkin and Zipkin lens UI dependency to 2.24.0. Bump up Apache parent pom version to 29. Bump up Armeria version to 1.21.0. Clean up maven pom.xmls. Bump up Java version to 11. Bump up snakeyaml to 2.0.  OAP Server  Add ServerStatusService in the core module to provide a new way to expose booting status to other modules. Adds Micrometer as a new component.(ID=141) Refactor session cache in MetricsPersistentWorker. Cache enhancement - don\u0026rsquo;t read new metrics from database in minute dimensionality.   // When // (1) the time bucket of the server's latest stability status is provided // 1.1 the OAP has booted successfully // 1.2 the current dimensionality is in minute. // 1.3 the OAP cluster is rebalanced due to scaling // (2) the metrics are from the time after the timeOfLatestStabilitySts // (3) the metrics don't exist in the cache // the kernel should NOT try to load it from the database. // // Notice, about condition (2), // for the specific minute of booted successfully, the metrics are expected to load from database when // it doesn't exist in the cache.  Remove the offset of metric session timeout according to worker creation sequence. Correct MetricsExtension annotations declarations in manual entities. Support component IDs' priority in process relation metrics. Remove abandon logic in MergableBufferedData, which caused unexpected no-update. Fix miss set LastUpdateTimestamp that caused the metrics session to expire. Rename MAL rule spring-sleuth.yaml to spring-micrometer.yaml. Fix memory leak in Zipkin API. Remove the dependency of refresh_interval of ElasticSearch indices from elasticsearch/flushInterval config. Now, it uses core/persistentPeriod + 5s as refresh_interval for all indices instead. Change elasticsearch/flushInterval to 5s(was 15s). Optimize flushInterval of ElasticSearch BulkProcessor to avoid extra periodical flush in the continuous bulk streams. An unexpected dot is added when exp is a pure metric name and expPrefix != null. Support monitoring MariaDB. Remove measure/stream specific interval settings in BanyanDB. Add global-specific settings used to override global configurations (e.g segmentIntervalDays, blockIntervalHours) in BanyanDB. Use TTL-driven interval settings for the measure-default group in BanyanDB. Fix wrong group of non time-relative metadata in BanyanDB. Refactor StorageData#id to the new StorageID object from a String type. Support multiple component IDs in the service topology level. Add ElasticSearch.Keyword annotation to declare the target field type as keyword. [Breaking Change] Column component_id of service_relation_client_side and service_relation_server_side have been replaced by component_ids. Support priority definition in the component-libraries.yml. Enhance service topology query. When there are multiple components detected from the server side, the component type of the node would be determined by the priority, which was random in the previous release. Remove component_id from service_instance_relation_client_side and service_instance_relation_server_side. Make the satellite E2E test more stable. Add Istio 1.16 to test matrix. Register ValueColumn as Tag for Record in BanyanDB storage plugin. Bump up Netty to 4.1.86. Remove unnecessary additional columns when storage is in logical sharding mode. The cluster coordinator support watch mechanism for notifying RemoteClientManager and ServerStatusService. Fix ServiceMeshServiceDispatcher overwrite ServiceDispatcher debug file when open SW_OAL_ENGINE_DEBUG. Use groupBy and in operators to optimize topology query for BanyanDB storage plugin. Support server status watcher for MetricsPersistentWorker to check the metrics whether required initialization. Fix the meter value are not correct when using sumPerMinLabeld or sumHistogramPercentile MAL function. Fix cannot display attached events when using Zipkin Lens UI query traces. Remove time_bucket for both Stream and Measure kinds in BanyanDB plugin. Merge TIME_BUCKET of Metrics and Record into StorageData. Support no layer in the listServices query. Fix time_bucket of ServiceTraffic not set correctly in slowSql of MAL. Correct the TopN record query DAO of BanyanDB. Tweak interval settings of BanyanDB. Support monitoring AWS Cloud EKS. Bump BanyanDB Java client to 0.3.0-rc1. Remove id tag from measures. Add Banyandb.MeasureField to mark a column as a BanyanDB Measure field. Add BanyanDB.StoreIDTag to store a process\u0026rsquo;s id for searching. [Breaking Change] The supported version of ShardingSphere-Proxy is upgraded from 5.1.2 to 5.3.1. Due to the changes of ShardingSphere\u0026rsquo;s API, versions before 5.3.1 are not compatible. Add the eBPF network profiling E2E Test in the per storage. Fix TCP service instances are lack of instance properties like pod and namespace, which causes Pod log not to work for TCP workloads. Add Python HBase happybase module component ID(94). Fix gRPC alarm cannot update settings from dynamic configuration source. Add batchOfBytes configuration to limit the size of bulk flush. Add Python Websocket module component ID(7018). [Optional] Optimize single trace query performance by customizing routing in ElasticSearch. SkyWalking trace segments and Zipkin spans are using trace ID for routing. This is OFF by default, controlled by storage/elasticsearch/enableCustomRouting. Enhance OAP HTTP server to support HTTPS Remove handler scan in otel receiver, manual initialization instead Add aws-firehose-receiver to support collecting AWS CloudWatch metric(OpenTelemetry format). Notice, no HTTPS/TLS setup support. By following AWS Firehose request, it uses proxy request (https://... instead of /aws/firehose/metrics), there must be a proxy(Nginx, Envoy, etc.). Avoid Antlr dependencies' versions might be different in compile time and runtime. Now PrometheusMetricConverter#escapedName also support converting / to _. Add missing TCP throughput metrics. Refactor @Column annotation, swap Column#name and ElasticSearch.Column#columnAlias and rename ElasticSearch.Column#columnAlias to ElasticSearch.Column#legacyName. Add Python HTTPX module component ID(7019). Migrate tests from junit 4 to junit 5. Refactor http-based alarm plugins and extract common logic to HttpAlarmCallback. Support Amazon Simple Storage Service (Amazon S3) metrics monitoring Support process Sum metrics with AGGREGATION_TEMPORALITY_DELTA case Support Amazon DynamoDB monitoring. Support prometheus HTTP API and promQL. Scope in the Entity of Metrics query v1 protocol is not required and automatical correction. The scope is determined based on the metric itself. Add explicit ReadTimeout for ConsulConfigurationWatcher to avoid IllegalArgumentException: Cache watchInterval=10sec \u0026gt;= networkClientReadTimeout=10000ms. Fix DurationUtils.getDurationPoints exceed, when startTimeBucket equals endTimeBucket. Support process OpenTelemetry ExponentialHistogram metrics Add FreeRedis component ID(3018).  UI  Add Zipkin Lens UI to webapp, and proxy it to context path /zipkin. Migrate the build tool from vue cli to Vite4. Fix Instance Relation and Endpoint Relation dashboards show up. Add Micrometer icon. Update MySQL UI to support MariaDB. Add AWS menu for supporting AWS monitoring. Add missing FastAPI logo. Update the log details page to support the formatted display of JSON content. Fix build config. Avoid being unable to drag process nodes for the first time. Add node folder into ignore list. Add ElPopconfirm to component types. Add an iframe widget for zipkin UI. Optimize graph tooltips to make them more friendly. Bump json5 from 1.0.1 to 1.0.2. Add websockets icon. Implement independent mode for widgets. Bump http-cache-semantics from 4.1.0 to 4.1.1. Update menus for OpenFunction. Add auto fresh to widgets independent mode. Fix: clear trace ID on the Log and Trace widgets after using association. Fix: reset duration for query conditions after time range changes. Add AWS S3 menu. Refactor: optimize side bar component to make it more friendly. Fix: remove duplicate popup message for query result. Add logo for HTTPX. Refactor: optimize the attached events visualization in the trace widget. Update BanyanDB client to 0.3.1. Add AWS DynamoDB menu. Fix: add auto period to the independent mode for widgets. Optimize menus and add Windows monitoring menu. Add a calculation for the cpm5dAvg. add a cpm5d calculation. Fix data processing error in the eBPF profiling widget. Support for double quotes in SlowSQL statements. Fix: the wrong position of the menu when clicking the topology node.  Documentation  Remove Spring Sleuth docs, and add Spring MicroMeter Observations Analysis with the latest Java agent side enhancement. Update monitoring MySQL document to add the MariaDB part. Reorganize the protocols docs to a more clear API docs. Add documentation about replacing Zipkin server with SkyWalking OAP. Add Lens UI relative docs in Zipkin trace section. Add Profiling APIs. Fix backend telemetry doc and so11y dashboard doc as the OAP Prometheus fetcher was removed since 9.3.0  All issues and pull requests are here\n","excerpt":"9.4.0 Project  Bump up Zipkin and Zipkin lens UI dependency to 2.24.0. Bump up Apache parent pom …","ref":"/docs/main/next/en/changes/changes-9.4.0/","title":"9.4.0"},{"body":"9.4.0 Project  Bump up Zipkin and Zipkin lens UI dependency to 2.24.0. Bump up Apache parent pom version to 29. Bump up Armeria version to 1.21.0. Clean up maven pom.xmls. Bump up Java version to 11. Bump up snakeyaml to 2.0.  OAP Server  Add ServerStatusService in the core module to provide a new way to expose booting status to other modules. Adds Micrometer as a new component.(ID=141) Refactor session cache in MetricsPersistentWorker. Cache enhancement - don\u0026rsquo;t read new metrics from database in minute dimensionality.   // When // (1) the time bucket of the server's latest stability status is provided // 1.1 the OAP has booted successfully // 1.2 the current dimensionality is in minute. // 1.3 the OAP cluster is rebalanced due to scaling // (2) the metrics are from the time after the timeOfLatestStabilitySts // (3) the metrics don't exist in the cache // the kernel should NOT try to load it from the database. // // Notice, about condition (2), // for the specific minute of booted successfully, the metrics are expected to load from database when // it doesn't exist in the cache.  Remove the offset of metric session timeout according to worker creation sequence. Correct MetricsExtension annotations declarations in manual entities. Support component IDs' priority in process relation metrics. Remove abandon logic in MergableBufferedData, which caused unexpected no-update. Fix miss set LastUpdateTimestamp that caused the metrics session to expire. Rename MAL rule spring-sleuth.yaml to spring-micrometer.yaml. Fix memory leak in Zipkin API. Remove the dependency of refresh_interval of ElasticSearch indices from elasticsearch/flushInterval config. Now, it uses core/persistentPeriod + 5s as refresh_interval for all indices instead. Change elasticsearch/flushInterval to 5s(was 15s). Optimize flushInterval of ElasticSearch BulkProcessor to avoid extra periodical flush in the continuous bulk streams. An unexpected dot is added when exp is a pure metric name and expPrefix != null. Support monitoring MariaDB. Remove measure/stream specific interval settings in BanyanDB. Add global-specific settings used to override global configurations (e.g segmentIntervalDays, blockIntervalHours) in BanyanDB. Use TTL-driven interval settings for the measure-default group in BanyanDB. Fix wrong group of non time-relative metadata in BanyanDB. Refactor StorageData#id to the new StorageID object from a String type. Support multiple component IDs in the service topology level. Add ElasticSearch.Keyword annotation to declare the target field type as keyword. [Breaking Change] Column component_id of service_relation_client_side and service_relation_server_side have been replaced by component_ids. Support priority definition in the component-libraries.yml. Enhance service topology query. When there are multiple components detected from the server side, the component type of the node would be determined by the priority, which was random in the previous release. Remove component_id from service_instance_relation_client_side and service_instance_relation_server_side. Make the satellite E2E test more stable. Add Istio 1.16 to test matrix. Register ValueColumn as Tag for Record in BanyanDB storage plugin. Bump up Netty to 4.1.86. Remove unnecessary additional columns when storage is in logical sharding mode. The cluster coordinator support watch mechanism for notifying RemoteClientManager and ServerStatusService. Fix ServiceMeshServiceDispatcher overwrite ServiceDispatcher debug file when open SW_OAL_ENGINE_DEBUG. Use groupBy and in operators to optimize topology query for BanyanDB storage plugin. Support server status watcher for MetricsPersistentWorker to check the metrics whether required initialization. Fix the meter value are not correct when using sumPerMinLabeld or sumHistogramPercentile MAL function. Fix cannot display attached events when using Zipkin Lens UI query traces. Remove time_bucket for both Stream and Measure kinds in BanyanDB plugin. Merge TIME_BUCKET of Metrics and Record into StorageData. Support no layer in the listServices query. Fix time_bucket of ServiceTraffic not set correctly in slowSql of MAL. Correct the TopN record query DAO of BanyanDB. Tweak interval settings of BanyanDB. Support monitoring AWS Cloud EKS. Bump BanyanDB Java client to 0.3.0-rc1. Remove id tag from measures. Add Banyandb.MeasureField to mark a column as a BanyanDB Measure field. Add BanyanDB.StoreIDTag to store a process\u0026rsquo;s id for searching. [Breaking Change] The supported version of ShardingSphere-Proxy is upgraded from 5.1.2 to 5.3.1. Due to the changes of ShardingSphere\u0026rsquo;s API, versions before 5.3.1 are not compatible. Add the eBPF network profiling E2E Test in the per storage. Fix TCP service instances are lack of instance properties like pod and namespace, which causes Pod log not to work for TCP workloads. Add Python HBase happybase module component ID(94). Fix gRPC alarm cannot update settings from dynamic configuration source. Add batchOfBytes configuration to limit the size of bulk flush. Add Python Websocket module component ID(7018). [Optional] Optimize single trace query performance by customizing routing in ElasticSearch. SkyWalking trace segments and Zipkin spans are using trace ID for routing. This is OFF by default, controlled by storage/elasticsearch/enableCustomRouting. Enhance OAP HTTP server to support HTTPS Remove handler scan in otel receiver, manual initialization instead Add aws-firehose-receiver to support collecting AWS CloudWatch metric(OpenTelemetry format). Notice, no HTTPS/TLS setup support. By following AWS Firehose request, it uses proxy request (https://... instead of /aws/firehose/metrics), there must be a proxy(Nginx, Envoy, etc.). Avoid Antlr dependencies' versions might be different in compile time and runtime. Now PrometheusMetricConverter#escapedName also support converting / to _. Add missing TCP throughput metrics. Refactor @Column annotation, swap Column#name and ElasticSearch.Column#columnAlias and rename ElasticSearch.Column#columnAlias to ElasticSearch.Column#legacyName. Add Python HTTPX module component ID(7019). Migrate tests from junit 4 to junit 5. Refactor http-based alarm plugins and extract common logic to HttpAlarmCallback. Support Amazon Simple Storage Service (Amazon S3) metrics monitoring Support process Sum metrics with AGGREGATION_TEMPORALITY_DELTA case Support Amazon DynamoDB monitoring. Support prometheus HTTP API and promQL. Scope in the Entity of Metrics query v1 protocol is not required and automatical correction. The scope is determined based on the metric itself. Add explicit ReadTimeout for ConsulConfigurationWatcher to avoid IllegalArgumentException: Cache watchInterval=10sec \u0026gt;= networkClientReadTimeout=10000ms. Fix DurationUtils.getDurationPoints exceed, when startTimeBucket equals endTimeBucket. Support process OpenTelemetry ExponentialHistogram metrics Add FreeRedis component ID(3018).  UI  Add Zipkin Lens UI to webapp, and proxy it to context path /zipkin. Migrate the build tool from vue cli to Vite4. Fix Instance Relation and Endpoint Relation dashboards show up. Add Micrometer icon. Update MySQL UI to support MariaDB. Add AWS menu for supporting AWS monitoring. Add missing FastAPI logo. Update the log details page to support the formatted display of JSON content. Fix build config. Avoid being unable to drag process nodes for the first time. Add node folder into ignore list. Add ElPopconfirm to component types. Add an iframe widget for zipkin UI. Optimize graph tooltips to make them more friendly. Bump json5 from 1.0.1 to 1.0.2. Add websockets icon. Implement independent mode for widgets. Bump http-cache-semantics from 4.1.0 to 4.1.1. Update menus for OpenFunction. Add auto fresh to widgets independent mode. Fix: clear trace ID on the Log and Trace widgets after using association. Fix: reset duration for query conditions after time range changes. Add AWS S3 menu. Refactor: optimize side bar component to make it more friendly. Fix: remove duplicate popup message for query result. Add logo for HTTPX. Refactor: optimize the attached events visualization in the trace widget. Update BanyanDB client to 0.3.1. Add AWS DynamoDB menu. Fix: add auto period to the independent mode for widgets. Optimize menus and add Windows monitoring menu. Add a calculation for the cpm5dAvg. add a cpm5d calculation. Fix data processing error in the eBPF profiling widget. Support for double quotes in SlowSQL statements. Fix: the wrong position of the menu when clicking the topology node.  Documentation  Remove Spring Sleuth docs, and add Spring MicroMeter Observations Analysis with the latest Java agent side enhancement. Update monitoring MySQL document to add the MariaDB part. Reorganize the protocols docs to a more clear API docs. Add documentation about replacing Zipkin server with SkyWalking OAP. Add Lens UI relative docs in Zipkin trace section. Add Profiling APIs. Fix backend telemetry doc and so11y dashboard doc as the OAP Prometheus fetcher was removed since 9.3.0  All issues and pull requests are here\n","excerpt":"9.4.0 Project  Bump up Zipkin and Zipkin lens UI dependency to 2.24.0. Bump up Apache parent pom …","ref":"/docs/main/v9.4.0/en/changes/changes/","title":"9.4.0"},{"body":"9.4.0 Project  Bump up Zipkin and Zipkin lens UI dependency to 2.24.0. Bump up Apache parent pom version to 29. Bump up Armeria version to 1.21.0. Clean up maven pom.xmls. Bump up Java version to 11. Bump up snakeyaml to 2.0.  OAP Server  Add ServerStatusService in the core module to provide a new way to expose booting status to other modules. Adds Micrometer as a new component.(ID=141) Refactor session cache in MetricsPersistentWorker. Cache enhancement - don\u0026rsquo;t read new metrics from database in minute dimensionality.   // When // (1) the time bucket of the server's latest stability status is provided // 1.1 the OAP has booted successfully // 1.2 the current dimensionality is in minute. // 1.3 the OAP cluster is rebalanced due to scaling // (2) the metrics are from the time after the timeOfLatestStabilitySts // (3) the metrics don't exist in the cache // the kernel should NOT try to load it from the database. // // Notice, about condition (2), // for the specific minute of booted successfully, the metrics are expected to load from database when // it doesn't exist in the cache.  Remove the offset of metric session timeout according to worker creation sequence. Correct MetricsExtension annotations declarations in manual entities. Support component IDs' priority in process relation metrics. Remove abandon logic in MergableBufferedData, which caused unexpected no-update. Fix miss set LastUpdateTimestamp that caused the metrics session to expire. Rename MAL rule spring-sleuth.yaml to spring-micrometer.yaml. Fix memory leak in Zipkin API. Remove the dependency of refresh_interval of ElasticSearch indices from elasticsearch/flushInterval config. Now, it uses core/persistentPeriod + 5s as refresh_interval for all indices instead. Change elasticsearch/flushInterval to 5s(was 15s). Optimize flushInterval of ElasticSearch BulkProcessor to avoid extra periodical flush in the continuous bulk streams. An unexpected dot is added when exp is a pure metric name and expPrefix != null. Support monitoring MariaDB. Remove measure/stream specific interval settings in BanyanDB. Add global-specific settings used to override global configurations (e.g segmentIntervalDays, blockIntervalHours) in BanyanDB. Use TTL-driven interval settings for the measure-default group in BanyanDB. Fix wrong group of non time-relative metadata in BanyanDB. Refactor StorageData#id to the new StorageID object from a String type. Support multiple component IDs in the service topology level. Add ElasticSearch.Keyword annotation to declare the target field type as keyword. [Breaking Change] Column component_id of service_relation_client_side and service_relation_server_side have been replaced by component_ids. Support priority definition in the component-libraries.yml. Enhance service topology query. When there are multiple components detected from the server side, the component type of the node would be determined by the priority, which was random in the previous release. Remove component_id from service_instance_relation_client_side and service_instance_relation_server_side. Make the satellite E2E test more stable. Add Istio 1.16 to test matrix. Register ValueColumn as Tag for Record in BanyanDB storage plugin. Bump up Netty to 4.1.86. Remove unnecessary additional columns when storage is in logical sharding mode. The cluster coordinator support watch mechanism for notifying RemoteClientManager and ServerStatusService. Fix ServiceMeshServiceDispatcher overwrite ServiceDispatcher debug file when open SW_OAL_ENGINE_DEBUG. Use groupBy and in operators to optimize topology query for BanyanDB storage plugin. Support server status watcher for MetricsPersistentWorker to check the metrics whether required initialization. Fix the meter value are not correct when using sumPerMinLabeld or sumHistogramPercentile MAL function. Fix cannot display attached events when using Zipkin Lens UI query traces. Remove time_bucket for both Stream and Measure kinds in BanyanDB plugin. Merge TIME_BUCKET of Metrics and Record into StorageData. Support no layer in the listServices query. Fix time_bucket of ServiceTraffic not set correctly in slowSql of MAL. Correct the TopN record query DAO of BanyanDB. Tweak interval settings of BanyanDB. Support monitoring AWS Cloud EKS. Bump BanyanDB Java client to 0.3.0-rc1. Remove id tag from measures. Add Banyandb.MeasureField to mark a column as a BanyanDB Measure field. Add BanyanDB.StoreIDTag to store a process\u0026rsquo;s id for searching. [Breaking Change] The supported version of ShardingSphere-Proxy is upgraded from 5.1.2 to 5.3.1. Due to the changes of ShardingSphere\u0026rsquo;s API, versions before 5.3.1 are not compatible. Add the eBPF network profiling E2E Test in the per storage. Fix TCP service instances are lack of instance properties like pod and namespace, which causes Pod log not to work for TCP workloads. Add Python HBase happybase module component ID(94). Fix gRPC alarm cannot update settings from dynamic configuration source. Add batchOfBytes configuration to limit the size of bulk flush. Add Python Websocket module component ID(7018). [Optional] Optimize single trace query performance by customizing routing in ElasticSearch. SkyWalking trace segments and Zipkin spans are using trace ID for routing. This is OFF by default, controlled by storage/elasticsearch/enableCustomRouting. Enhance OAP HTTP server to support HTTPS Remove handler scan in otel receiver, manual initialization instead Add aws-firehose-receiver to support collecting AWS CloudWatch metric(OpenTelemetry format). Notice, no HTTPS/TLS setup support. By following AWS Firehose request, it uses proxy request (https://... instead of /aws/firehose/metrics), there must be a proxy(Nginx, Envoy, etc.). Avoid Antlr dependencies' versions might be different in compile time and runtime. Now PrometheusMetricConverter#escapedName also support converting / to _. Add missing TCP throughput metrics. Refactor @Column annotation, swap Column#name and ElasticSearch.Column#columnAlias and rename ElasticSearch.Column#columnAlias to ElasticSearch.Column#legacyName. Add Python HTTPX module component ID(7019). Migrate tests from junit 4 to junit 5. Refactor http-based alarm plugins and extract common logic to HttpAlarmCallback. Support Amazon Simple Storage Service (Amazon S3) metrics monitoring Support process Sum metrics with AGGREGATION_TEMPORALITY_DELTA case Support Amazon DynamoDB monitoring. Support prometheus HTTP API and promQL. Scope in the Entity of Metrics query v1 protocol is not required and automatical correction. The scope is determined based on the metric itself. Add explicit ReadTimeout for ConsulConfigurationWatcher to avoid IllegalArgumentException: Cache watchInterval=10sec \u0026gt;= networkClientReadTimeout=10000ms. Fix DurationUtils.getDurationPoints exceed, when startTimeBucket equals endTimeBucket. Support process OpenTelemetry ExponentialHistogram metrics Add FreeRedis component ID(3018).  UI  Add Zipkin Lens UI to webapp, and proxy it to context path /zipkin. Migrate the build tool from vue cli to Vite4. Fix Instance Relation and Endpoint Relation dashboards show up. Add Micrometer icon. Update MySQL UI to support MariaDB. Add AWS menu for supporting AWS monitoring. Add missing FastAPI logo. Update the log details page to support the formatted display of JSON content. Fix build config. Avoid being unable to drag process nodes for the first time. Add node folder into ignore list. Add ElPopconfirm to component types. Add an iframe widget for zipkin UI. Optimize graph tooltips to make them more friendly. Bump json5 from 1.0.1 to 1.0.2. Add websockets icon. Implement independent mode for widgets. Bump http-cache-semantics from 4.1.0 to 4.1.1. Update menus for OpenFunction. Add auto fresh to widgets independent mode. Fix: clear trace ID on the Log and Trace widgets after using association. Fix: reset duration for query conditions after time range changes. Add AWS S3 menu. Refactor: optimize side bar component to make it more friendly. Fix: remove duplicate popup message for query result. Add logo for HTTPX. Refactor: optimize the attached events visualization in the trace widget. Update BanyanDB client to 0.3.1. Add AWS DynamoDB menu. Fix: add auto period to the independent mode for widgets. Optimize menus and add Windows monitoring menu. Add a calculation for the cpm5dAvg. add a cpm5d calculation. Fix data processing error in the eBPF profiling widget. Support for double quotes in SlowSQL statements. Fix: the wrong position of the menu when clicking the topology node.  Documentation  Remove Spring Sleuth docs, and add Spring MicroMeter Observations Analysis with the latest Java agent side enhancement. Update monitoring MySQL document to add the MariaDB part. Reorganize the protocols docs to a more clear API docs. Add documentation about replacing Zipkin server with SkyWalking OAP. Add Lens UI relative docs in Zipkin trace section. Add Profiling APIs. Fix backend telemetry doc and so11y dashboard doc as the OAP Prometheus fetcher was removed since 9.3.0  All issues and pull requests are here\n","excerpt":"9.4.0 Project  Bump up Zipkin and Zipkin lens UI dependency to 2.24.0. Bump up Apache parent pom …","ref":"/docs/main/v9.5.0/en/changes/changes-9.4.0/","title":"9.4.0"},{"body":"9.4.0 Project  Bump up Zipkin and Zipkin lens UI dependency to 2.24.0. Bump up Apache parent pom version to 29. Bump up Armeria version to 1.21.0. Clean up maven pom.xmls. Bump up Java version to 11. Bump up snakeyaml to 2.0.  OAP Server  Add ServerStatusService in the core module to provide a new way to expose booting status to other modules. Adds Micrometer as a new component.(ID=141) Refactor session cache in MetricsPersistentWorker. Cache enhancement - don\u0026rsquo;t read new metrics from database in minute dimensionality.   // When // (1) the time bucket of the server's latest stability status is provided // 1.1 the OAP has booted successfully // 1.2 the current dimensionality is in minute. // 1.3 the OAP cluster is rebalanced due to scaling // (2) the metrics are from the time after the timeOfLatestStabilitySts // (3) the metrics don't exist in the cache // the kernel should NOT try to load it from the database. // // Notice, about condition (2), // for the specific minute of booted successfully, the metrics are expected to load from database when // it doesn't exist in the cache.  Remove the offset of metric session timeout according to worker creation sequence. Correct MetricsExtension annotations declarations in manual entities. Support component IDs' priority in process relation metrics. Remove abandon logic in MergableBufferedData, which caused unexpected no-update. Fix miss set LastUpdateTimestamp that caused the metrics session to expire. Rename MAL rule spring-sleuth.yaml to spring-micrometer.yaml. Fix memory leak in Zipkin API. Remove the dependency of refresh_interval of ElasticSearch indices from elasticsearch/flushInterval config. Now, it uses core/persistentPeriod + 5s as refresh_interval for all indices instead. Change elasticsearch/flushInterval to 5s(was 15s). Optimize flushInterval of ElasticSearch BulkProcessor to avoid extra periodical flush in the continuous bulk streams. An unexpected dot is added when exp is a pure metric name and expPrefix != null. Support monitoring MariaDB. Remove measure/stream specific interval settings in BanyanDB. Add global-specific settings used to override global configurations (e.g segmentIntervalDays, blockIntervalHours) in BanyanDB. Use TTL-driven interval settings for the measure-default group in BanyanDB. Fix wrong group of non time-relative metadata in BanyanDB. Refactor StorageData#id to the new StorageID object from a String type. Support multiple component IDs in the service topology level. Add ElasticSearch.Keyword annotation to declare the target field type as keyword. [Breaking Change] Column component_id of service_relation_client_side and service_relation_server_side have been replaced by component_ids. Support priority definition in the component-libraries.yml. Enhance service topology query. When there are multiple components detected from the server side, the component type of the node would be determined by the priority, which was random in the previous release. Remove component_id from service_instance_relation_client_side and service_instance_relation_server_side. Make the satellite E2E test more stable. Add Istio 1.16 to test matrix. Register ValueColumn as Tag for Record in BanyanDB storage plugin. Bump up Netty to 4.1.86. Remove unnecessary additional columns when storage is in logical sharding mode. The cluster coordinator support watch mechanism for notifying RemoteClientManager and ServerStatusService. Fix ServiceMeshServiceDispatcher overwrite ServiceDispatcher debug file when open SW_OAL_ENGINE_DEBUG. Use groupBy and in operators to optimize topology query for BanyanDB storage plugin. Support server status watcher for MetricsPersistentWorker to check the metrics whether required initialization. Fix the meter value are not correct when using sumPerMinLabeld or sumHistogramPercentile MAL function. Fix cannot display attached events when using Zipkin Lens UI query traces. Remove time_bucket for both Stream and Measure kinds in BanyanDB plugin. Merge TIME_BUCKET of Metrics and Record into StorageData. Support no layer in the listServices query. Fix time_bucket of ServiceTraffic not set correctly in slowSql of MAL. Correct the TopN record query DAO of BanyanDB. Tweak interval settings of BanyanDB. Support monitoring AWS Cloud EKS. Bump BanyanDB Java client to 0.3.0-rc1. Remove id tag from measures. Add Banyandb.MeasureField to mark a column as a BanyanDB Measure field. Add BanyanDB.StoreIDTag to store a process\u0026rsquo;s id for searching. [Breaking Change] The supported version of ShardingSphere-Proxy is upgraded from 5.1.2 to 5.3.1. Due to the changes of ShardingSphere\u0026rsquo;s API, versions before 5.3.1 are not compatible. Add the eBPF network profiling E2E Test in the per storage. Fix TCP service instances are lack of instance properties like pod and namespace, which causes Pod log not to work for TCP workloads. Add Python HBase happybase module component ID(94). Fix gRPC alarm cannot update settings from dynamic configuration source. Add batchOfBytes configuration to limit the size of bulk flush. Add Python Websocket module component ID(7018). [Optional] Optimize single trace query performance by customizing routing in ElasticSearch. SkyWalking trace segments and Zipkin spans are using trace ID for routing. This is OFF by default, controlled by storage/elasticsearch/enableCustomRouting. Enhance OAP HTTP server to support HTTPS Remove handler scan in otel receiver, manual initialization instead Add aws-firehose-receiver to support collecting AWS CloudWatch metric(OpenTelemetry format). Notice, no HTTPS/TLS setup support. By following AWS Firehose request, it uses proxy request (https://... instead of /aws/firehose/metrics), there must be a proxy(Nginx, Envoy, etc.). Avoid Antlr dependencies' versions might be different in compile time and runtime. Now PrometheusMetricConverter#escapedName also support converting / to _. Add missing TCP throughput metrics. Refactor @Column annotation, swap Column#name and ElasticSearch.Column#columnAlias and rename ElasticSearch.Column#columnAlias to ElasticSearch.Column#legacyName. Add Python HTTPX module component ID(7019). Migrate tests from junit 4 to junit 5. Refactor http-based alarm plugins and extract common logic to HttpAlarmCallback. Support Amazon Simple Storage Service (Amazon S3) metrics monitoring Support process Sum metrics with AGGREGATION_TEMPORALITY_DELTA case Support Amazon DynamoDB monitoring. Support prometheus HTTP API and promQL. Scope in the Entity of Metrics query v1 protocol is not required and automatical correction. The scope is determined based on the metric itself. Add explicit ReadTimeout for ConsulConfigurationWatcher to avoid IllegalArgumentException: Cache watchInterval=10sec \u0026gt;= networkClientReadTimeout=10000ms. Fix DurationUtils.getDurationPoints exceed, when startTimeBucket equals endTimeBucket. Support process OpenTelemetry ExponentialHistogram metrics Add FreeRedis component ID(3018).  UI  Add Zipkin Lens UI to webapp, and proxy it to context path /zipkin. Migrate the build tool from vue cli to Vite4. Fix Instance Relation and Endpoint Relation dashboards show up. Add Micrometer icon. Update MySQL UI to support MariaDB. Add AWS menu for supporting AWS monitoring. Add missing FastAPI logo. Update the log details page to support the formatted display of JSON content. Fix build config. Avoid being unable to drag process nodes for the first time. Add node folder into ignore list. Add ElPopconfirm to component types. Add an iframe widget for zipkin UI. Optimize graph tooltips to make them more friendly. Bump json5 from 1.0.1 to 1.0.2. Add websockets icon. Implement independent mode for widgets. Bump http-cache-semantics from 4.1.0 to 4.1.1. Update menus for OpenFunction. Add auto fresh to widgets independent mode. Fix: clear trace ID on the Log and Trace widgets after using association. Fix: reset duration for query conditions after time range changes. Add AWS S3 menu. Refactor: optimize side bar component to make it more friendly. Fix: remove duplicate popup message for query result. Add logo for HTTPX. Refactor: optimize the attached events visualization in the trace widget. Update BanyanDB client to 0.3.1. Add AWS DynamoDB menu. Fix: add auto period to the independent mode for widgets. Optimize menus and add Windows monitoring menu. Add a calculation for the cpm5dAvg. add a cpm5d calculation. Fix data processing error in the eBPF profiling widget. Support for double quotes in SlowSQL statements. Fix: the wrong position of the menu when clicking the topology node.  Documentation  Remove Spring Sleuth docs, and add Spring MicroMeter Observations Analysis with the latest Java agent side enhancement. Update monitoring MySQL document to add the MariaDB part. Reorganize the protocols docs to a more clear API docs. Add documentation about replacing Zipkin server with SkyWalking OAP. Add Lens UI relative docs in Zipkin trace section. Add Profiling APIs. Fix backend telemetry doc and so11y dashboard doc as the OAP Prometheus fetcher was removed since 9.3.0  All issues and pull requests are here\n","excerpt":"9.4.0 Project  Bump up Zipkin and Zipkin lens UI dependency to 2.24.0. Bump up Apache parent pom …","ref":"/docs/main/v9.6.0/en/changes/changes-9.4.0/","title":"9.4.0"},{"body":"9.4.0 Project  Bump up Zipkin and Zipkin lens UI dependency to 2.24.0. Bump up Apache parent pom version to 29. Bump up Armeria version to 1.21.0. Clean up maven pom.xmls. Bump up Java version to 11. Bump up snakeyaml to 2.0.  OAP Server  Add ServerStatusService in the core module to provide a new way to expose booting status to other modules. Adds Micrometer as a new component.(ID=141) Refactor session cache in MetricsPersistentWorker. Cache enhancement - don\u0026rsquo;t read new metrics from database in minute dimensionality.   // When // (1) the time bucket of the server's latest stability status is provided // 1.1 the OAP has booted successfully // 1.2 the current dimensionality is in minute. // 1.3 the OAP cluster is rebalanced due to scaling // (2) the metrics are from the time after the timeOfLatestStabilitySts // (3) the metrics don't exist in the cache // the kernel should NOT try to load it from the database. // // Notice, about condition (2), // for the specific minute of booted successfully, the metrics are expected to load from database when // it doesn't exist in the cache.  Remove the offset of metric session timeout according to worker creation sequence. Correct MetricsExtension annotations declarations in manual entities. Support component IDs' priority in process relation metrics. Remove abandon logic in MergableBufferedData, which caused unexpected no-update. Fix miss set LastUpdateTimestamp that caused the metrics session to expire. Rename MAL rule spring-sleuth.yaml to spring-micrometer.yaml. Fix memory leak in Zipkin API. Remove the dependency of refresh_interval of ElasticSearch indices from elasticsearch/flushInterval config. Now, it uses core/persistentPeriod + 5s as refresh_interval for all indices instead. Change elasticsearch/flushInterval to 5s(was 15s). Optimize flushInterval of ElasticSearch BulkProcessor to avoid extra periodical flush in the continuous bulk streams. An unexpected dot is added when exp is a pure metric name and expPrefix != null. Support monitoring MariaDB. Remove measure/stream specific interval settings in BanyanDB. Add global-specific settings used to override global configurations (e.g segmentIntervalDays, blockIntervalHours) in BanyanDB. Use TTL-driven interval settings for the measure-default group in BanyanDB. Fix wrong group of non time-relative metadata in BanyanDB. Refactor StorageData#id to the new StorageID object from a String type. Support multiple component IDs in the service topology level. Add ElasticSearch.Keyword annotation to declare the target field type as keyword. [Breaking Change] Column component_id of service_relation_client_side and service_relation_server_side have been replaced by component_ids. Support priority definition in the component-libraries.yml. Enhance service topology query. When there are multiple components detected from the server side, the component type of the node would be determined by the priority, which was random in the previous release. Remove component_id from service_instance_relation_client_side and service_instance_relation_server_side. Make the satellite E2E test more stable. Add Istio 1.16 to test matrix. Register ValueColumn as Tag for Record in BanyanDB storage plugin. Bump up Netty to 4.1.86. Remove unnecessary additional columns when storage is in logical sharding mode. The cluster coordinator support watch mechanism for notifying RemoteClientManager and ServerStatusService. Fix ServiceMeshServiceDispatcher overwrite ServiceDispatcher debug file when open SW_OAL_ENGINE_DEBUG. Use groupBy and in operators to optimize topology query for BanyanDB storage plugin. Support server status watcher for MetricsPersistentWorker to check the metrics whether required initialization. Fix the meter value are not correct when using sumPerMinLabeld or sumHistogramPercentile MAL function. Fix cannot display attached events when using Zipkin Lens UI query traces. Remove time_bucket for both Stream and Measure kinds in BanyanDB plugin. Merge TIME_BUCKET of Metrics and Record into StorageData. Support no layer in the listServices query. Fix time_bucket of ServiceTraffic not set correctly in slowSql of MAL. Correct the TopN record query DAO of BanyanDB. Tweak interval settings of BanyanDB. Support monitoring AWS Cloud EKS. Bump BanyanDB Java client to 0.3.0-rc1. Remove id tag from measures. Add Banyandb.MeasureField to mark a column as a BanyanDB Measure field. Add BanyanDB.StoreIDTag to store a process\u0026rsquo;s id for searching. [Breaking Change] The supported version of ShardingSphere-Proxy is upgraded from 5.1.2 to 5.3.1. Due to the changes of ShardingSphere\u0026rsquo;s API, versions before 5.3.1 are not compatible. Add the eBPF network profiling E2E Test in the per storage. Fix TCP service instances are lack of instance properties like pod and namespace, which causes Pod log not to work for TCP workloads. Add Python HBase happybase module component ID(94). Fix gRPC alarm cannot update settings from dynamic configuration source. Add batchOfBytes configuration to limit the size of bulk flush. Add Python Websocket module component ID(7018). [Optional] Optimize single trace query performance by customizing routing in ElasticSearch. SkyWalking trace segments and Zipkin spans are using trace ID for routing. This is OFF by default, controlled by storage/elasticsearch/enableCustomRouting. Enhance OAP HTTP server to support HTTPS Remove handler scan in otel receiver, manual initialization instead Add aws-firehose-receiver to support collecting AWS CloudWatch metric(OpenTelemetry format). Notice, no HTTPS/TLS setup support. By following AWS Firehose request, it uses proxy request (https://... instead of /aws/firehose/metrics), there must be a proxy(Nginx, Envoy, etc.). Avoid Antlr dependencies' versions might be different in compile time and runtime. Now PrometheusMetricConverter#escapedName also support converting / to _. Add missing TCP throughput metrics. Refactor @Column annotation, swap Column#name and ElasticSearch.Column#columnAlias and rename ElasticSearch.Column#columnAlias to ElasticSearch.Column#legacyName. Add Python HTTPX module component ID(7019). Migrate tests from junit 4 to junit 5. Refactor http-based alarm plugins and extract common logic to HttpAlarmCallback. Support Amazon Simple Storage Service (Amazon S3) metrics monitoring Support process Sum metrics with AGGREGATION_TEMPORALITY_DELTA case Support Amazon DynamoDB monitoring. Support prometheus HTTP API and promQL. Scope in the Entity of Metrics query v1 protocol is not required and automatical correction. The scope is determined based on the metric itself. Add explicit ReadTimeout for ConsulConfigurationWatcher to avoid IllegalArgumentException: Cache watchInterval=10sec \u0026gt;= networkClientReadTimeout=10000ms. Fix DurationUtils.getDurationPoints exceed, when startTimeBucket equals endTimeBucket. Support process OpenTelemetry ExponentialHistogram metrics Add FreeRedis component ID(3018).  UI  Add Zipkin Lens UI to webapp, and proxy it to context path /zipkin. Migrate the build tool from vue cli to Vite4. Fix Instance Relation and Endpoint Relation dashboards show up. Add Micrometer icon. Update MySQL UI to support MariaDB. Add AWS menu for supporting AWS monitoring. Add missing FastAPI logo. Update the log details page to support the formatted display of JSON content. Fix build config. Avoid being unable to drag process nodes for the first time. Add node folder into ignore list. Add ElPopconfirm to component types. Add an iframe widget for zipkin UI. Optimize graph tooltips to make them more friendly. Bump json5 from 1.0.1 to 1.0.2. Add websockets icon. Implement independent mode for widgets. Bump http-cache-semantics from 4.1.0 to 4.1.1. Update menus for OpenFunction. Add auto fresh to widgets independent mode. Fix: clear trace ID on the Log and Trace widgets after using association. Fix: reset duration for query conditions after time range changes. Add AWS S3 menu. Refactor: optimize side bar component to make it more friendly. Fix: remove duplicate popup message for query result. Add logo for HTTPX. Refactor: optimize the attached events visualization in the trace widget. Update BanyanDB client to 0.3.1. Add AWS DynamoDB menu. Fix: add auto period to the independent mode for widgets. Optimize menus and add Windows monitoring menu. Add a calculation for the cpm5dAvg. add a cpm5d calculation. Fix data processing error in the eBPF profiling widget. Support for double quotes in SlowSQL statements. Fix: the wrong position of the menu when clicking the topology node.  Documentation  Remove Spring Sleuth docs, and add Spring MicroMeter Observations Analysis with the latest Java agent side enhancement. Update monitoring MySQL document to add the MariaDB part. Reorganize the protocols docs to a more clear API docs. Add documentation about replacing Zipkin server with SkyWalking OAP. Add Lens UI relative docs in Zipkin trace section. Add Profiling APIs. Fix backend telemetry doc and so11y dashboard doc as the OAP Prometheus fetcher was removed since 9.3.0  All issues and pull requests are here\n","excerpt":"9.4.0 Project  Bump up Zipkin and Zipkin lens UI dependency to 2.24.0. Bump up Apache parent pom …","ref":"/docs/main/v9.7.0/en/changes/changes-9.4.0/","title":"9.4.0"},{"body":"9.5.0 Project  Fix Duplicate class found due to the delombok goal.  OAP Server  Fix wrong layer of metric user error in DynamoDB monitoring. ElasticSearch storage does not check field types when OAP running in no-init mode. Support to bind TLS status as a part of component for service topology. Fix component ID priority bug. Fix component ID of topology overlap due to storage layer bugs. [Breaking Change] Enhance JDBC storage through merging tables and managing day-based table rolling. [Breaking Change] Sharding-MySQL implementations and tests get removed due to we have the day-based rolling mechanism by default Fix otel k8s-cluster rule add namespace dimension for MAL aggregation calculation(Deployment Status,Deployment Spec Replicas) Support continuous profiling feature. Support collect process level related metrics. Fix K8sRetag reads the wrong k8s service from the cache due to a possible namespace mismatch. [Breaking Change] Support cross-thread trace profiling. The data structure and query APIs are changed. Fix PromQL HTTP API /api/v1/labels response missing service label. Fix possible NPE when initialize IntList. Support parse PromQL expression has empty labels in the braces for metadata query. Support alarm metric OP !=. Support metrics query indicates whether value == 0 represents actually zero or no data. Fix NPE when query the not exist series indexes in ElasticSearch storage. Support collecting memory buff/cache metrics in VM monitoring. PromQL: Remove empty values from the query result, fix /api/v1/metadata param limit could cause out of bound. Support monitoring the total number metrics of k8s StatefulSet and DaemonSet. Support Amazon API Gateway monitoring. Bump up graphql-java to fix cve. Bump up Kubernetes Java client. Support Redis Monitoring. Add component ID for amqp, amqp-producer and amqp-consumer. Support no-proxy mode for aws-firehose receiver Bump up armeria to 1.23.1 Support Elasticsearch Monitoring. Fix PromQL HTTP API /api/v1/series response missing service label when matching metric. Support ServerSide TopN for BanyanDB. Add component ID for Jersey. Remove OpenCensus support, the related codes and docs as it\u0026rsquo;s sunsetting. Support dynamic configuration of searchableTracesTags Support exportErrorStatusTraceOnly for export the error status trace segments through the Kafka channel Add component ID for Grizzly. Fix potential NPE in Zipkin receiver when the Span is missing some fields. Filter out unknown_cluster metric data. Support RabbitMQ Monitoring. Support Redis slow logs collection. Fix data loss when query continuous profiling task record. Adapt the continuous profiling task query GraphQL. Support Metrics Query Expression(MQE) and allows users to do simple query-stage calculation through the expression. Deprecated metrics query v2 protocol. Deprecated record query protocol. Add component ID for go-redis. Add OpenSearch 2.8.0 to test case. Add ai-pipeline module. Support HTTP URI formatting through ai-pipeline to do pattern recognition. Add new HTTP URI grouping engine with benchmark. [Breaking Change] Use the new HTTP URI grouping engine to replace the old regex based mechanism. Support sumLabeled in MAL. Migrate from kubernetes-client/java to fabric8 client. Envoy ALS generated relation metrics considers http status codes \u0026gt;= 400 has an error at the client side. Add cause message field when query continuous profiling task.  UI  Revert: cpm5d function. This feature is cancelled from backend. Fix: alerting link breaks on the topology. Refactor Topology widget to make it more hierarchical.  Choose User as the first node. If User node is absent, choose the busiest node(which has the most calls of all). Do a left-to-right flow process. At the same level, list nodes from top to bottom in alphabetical order.   Fix filter ID when ReadRecords metric associates with trace. Add AWS API Gateway menu. Change trace profiling protocol. Add Redis menu. Optimize data types. Support isEmptyValue flag for metrics query. Add elasticsearch menu. [Clean UI templates before upgrade] Set showSymbol: true, and make the data point shows on the Line graph. Please clean ui_template index in elasticsearch storage or table in JDBC storage. [Clean UI templates before upgrade] UI templates: Simplify metric name with the label. Add MQ menu. Add Jeysey icon. Fix: set endpoint and instance selectors with url parameters correctly. Bump up dependencies versions icons-vue 1.1.4, element-plus 2.1.0, nanoid 3.3.6, postcss 8.4.23 Add OpenTelemetry log protocol support. [Breaking Change] Configuration key enabledOtelRules is renamed to enabledOtelMetricsRules and the corresponding environment variable is renamed to SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES. Add grizzly icon. Fix: the Instance List data display error. Fix: set topN type to Number. Support Metrics Query Expression(MQE) and allows users to do simple query-stage calculation through the expression. Bump up zipkin ui dependency to 2.24.1. Bump up vite to 4.0.5. Apply MQE on General and Virtual-Database layer UI-templates. Add Continuous Profiling tab on Mesh layer UI-templates.  Documentation  Add Profiling related documentations. Add SUM_PER_MIN to MAL documentation. Make the log relative docs more clear, and easier for further more formats support. Update the cluster management and advanced deployment docs.  All issues and pull requests are here\n","excerpt":"9.5.0 Project  Fix Duplicate class found due to the delombok goal.  OAP Server  Fix wrong layer of …","ref":"/docs/main/latest/en/changes/changes-9.5.0/","title":"9.5.0"},{"body":"9.5.0 Project  Fix Duplicate class found due to the delombok goal.  OAP Server  Fix wrong layer of metric user error in DynamoDB monitoring. ElasticSearch storage does not check field types when OAP running in no-init mode. Support to bind TLS status as a part of component for service topology. Fix component ID priority bug. Fix component ID of topology overlap due to storage layer bugs. [Breaking Change] Enhance JDBC storage through merging tables and managing day-based table rolling. [Breaking Change] Sharding-MySQL implementations and tests get removed due to we have the day-based rolling mechanism by default Fix otel k8s-cluster rule add namespace dimension for MAL aggregation calculation(Deployment Status,Deployment Spec Replicas) Support continuous profiling feature. Support collect process level related metrics. Fix K8sRetag reads the wrong k8s service from the cache due to a possible namespace mismatch. [Breaking Change] Support cross-thread trace profiling. The data structure and query APIs are changed. Fix PromQL HTTP API /api/v1/labels response missing service label. Fix possible NPE when initialize IntList. Support parse PromQL expression has empty labels in the braces for metadata query. Support alarm metric OP !=. Support metrics query indicates whether value == 0 represents actually zero or no data. Fix NPE when query the not exist series indexes in ElasticSearch storage. Support collecting memory buff/cache metrics in VM monitoring. PromQL: Remove empty values from the query result, fix /api/v1/metadata param limit could cause out of bound. Support monitoring the total number metrics of k8s StatefulSet and DaemonSet. Support Amazon API Gateway monitoring. Bump up graphql-java to fix cve. Bump up Kubernetes Java client. Support Redis Monitoring. Add component ID for amqp, amqp-producer and amqp-consumer. Support no-proxy mode for aws-firehose receiver Bump up armeria to 1.23.1 Support Elasticsearch Monitoring. Fix PromQL HTTP API /api/v1/series response missing service label when matching metric. Support ServerSide TopN for BanyanDB. Add component ID for Jersey. Remove OpenCensus support, the related codes and docs as it\u0026rsquo;s sunsetting. Support dynamic configuration of searchableTracesTags Support exportErrorStatusTraceOnly for export the error status trace segments through the Kafka channel Add component ID for Grizzly. Fix potential NPE in Zipkin receiver when the Span is missing some fields. Filter out unknown_cluster metric data. Support RabbitMQ Monitoring. Support Redis slow logs collection. Fix data loss when query continuous profiling task record. Adapt the continuous profiling task query GraphQL. Support Metrics Query Expression(MQE) and allows users to do simple query-stage calculation through the expression. Deprecated metrics query v2 protocol. Deprecated record query protocol. Add component ID for go-redis. Add OpenSearch 2.8.0 to test case. Add ai-pipeline module. Support HTTP URI formatting through ai-pipeline to do pattern recognition. Add new HTTP URI grouping engine with benchmark. [Breaking Change] Use the new HTTP URI grouping engine to replace the old regex based mechanism. Support sumLabeled in MAL. Migrate from kubernetes-client/java to fabric8 client. Envoy ALS generated relation metrics considers http status codes \u0026gt;= 400 has an error at the client side. Add cause message field when query continuous profiling task.  UI  Revert: cpm5d function. This feature is cancelled from backend. Fix: alerting link breaks on the topology. Refactor Topology widget to make it more hierarchical.  Choose User as the first node. If User node is absent, choose the busiest node(which has the most calls of all). Do a left-to-right flow process. At the same level, list nodes from top to bottom in alphabetical order.   Fix filter ID when ReadRecords metric associates with trace. Add AWS API Gateway menu. Change trace profiling protocol. Add Redis menu. Optimize data types. Support isEmptyValue flag for metrics query. Add elasticsearch menu. [Clean UI templates before upgrade] Set showSymbol: true, and make the data point shows on the Line graph. Please clean ui_template index in elasticsearch storage or table in JDBC storage. [Clean UI templates before upgrade] UI templates: Simplify metric name with the label. Add MQ menu. Add Jeysey icon. Fix: set endpoint and instance selectors with url parameters correctly. Bump up dependencies versions icons-vue 1.1.4, element-plus 2.1.0, nanoid 3.3.6, postcss 8.4.23 Add OpenTelemetry log protocol support. [Breaking Change] Configuration key enabledOtelRules is renamed to enabledOtelMetricsRules and the corresponding environment variable is renamed to SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES. Add grizzly icon. Fix: the Instance List data display error. Fix: set topN type to Number. Support Metrics Query Expression(MQE) and allows users to do simple query-stage calculation through the expression. Bump up zipkin ui dependency to 2.24.1. Bump up vite to 4.0.5. Apply MQE on General and Virtual-Database layer UI-templates. Add Continuous Profiling tab on Mesh layer UI-templates.  Documentation  Add Profiling related documentations. Add SUM_PER_MIN to MAL documentation. Make the log relative docs more clear, and easier for further more formats support. Update the cluster management and advanced deployment docs.  All issues and pull requests are here\n","excerpt":"9.5.0 Project  Fix Duplicate class found due to the delombok goal.  OAP Server  Fix wrong layer of …","ref":"/docs/main/next/en/changes/changes-9.5.0/","title":"9.5.0"},{"body":"9.5.0 Project  Fix Duplicate class found due to the delombok goal.  OAP Server  Fix wrong layer of metric user error in DynamoDB monitoring. ElasticSearch storage does not check field types when OAP running in no-init mode. Support to bind TLS status as a part of component for service topology. Fix component ID priority bug. Fix component ID of topology overlap due to storage layer bugs. [Breaking Change] Enhance JDBC storage through merging tables and managing day-based table rolling. [Breaking Change] Sharding-MySQL implementations and tests get removed due to we have the day-based rolling mechanism by default Fix otel k8s-cluster rule add namespace dimension for MAL aggregation calculation(Deployment Status,Deployment Spec Replicas) Support continuous profiling feature. Support collect process level related metrics. Fix K8sRetag reads the wrong k8s service from the cache due to a possible namespace mismatch. [Breaking Change] Support cross-thread trace profiling. The data structure and query APIs are changed. Fix PromQL HTTP API /api/v1/labels response missing service label. Fix possible NPE when initialize IntList. Support parse PromQL expression has empty labels in the braces for metadata query. Support alarm metric OP !=. Support metrics query indicates whether value == 0 represents actually zero or no data. Fix NPE when query the not exist series indexes in ElasticSearch storage. Support collecting memory buff/cache metrics in VM monitoring. PromQL: Remove empty values from the query result, fix /api/v1/metadata param limit could cause out of bound. Support monitoring the total number metrics of k8s StatefulSet and DaemonSet. Support Amazon API Gateway monitoring. Bump up graphql-java to fix cve. Bump up Kubernetes Java client. Support Redis Monitoring. Add component ID for amqp, amqp-producer and amqp-consumer. Support no-proxy mode for aws-firehose receiver Bump up armeria to 1.23.1 Support Elasticsearch Monitoring. Fix PromQL HTTP API /api/v1/series response missing service label when matching metric. Support ServerSide TopN for BanyanDB. Add component ID for Jersey. Remove OpenCensus support, the related codes and docs as it\u0026rsquo;s sunsetting. Support dynamic configuration of searchableTracesTags Support exportErrorStatusTraceOnly for export the error status trace segments through the Kafka channel Add component ID for Grizzly. Fix potential NPE in Zipkin receiver when the Span is missing some fields. Filter out unknown_cluster metric data. Support RabbitMQ Monitoring. Support Redis slow logs collection. Fix data loss when query continuous profiling task record. Adapt the continuous profiling task query GraphQL. Support Metrics Query Expression(MQE) and allows users to do simple query-stage calculation through the expression. Deprecated metrics query v2 protocol. Deprecated record query protocol. Add component ID for go-redis. Add OpenSearch 2.8.0 to test case. Add ai-pipeline module. Support HTTP URI formatting through ai-pipeline to do pattern recognition. Add new HTTP URI grouping engine with benchmark. [Breaking Change] Use the new HTTP URI grouping engine to replace the old regex based mechanism. Support sumLabeled in MAL. Migrate from kubernetes-client/java to fabric8 client. Envoy ALS generated relation metrics considers http status codes \u0026gt;= 400 has an error at the client side. Add cause message field when query continuous profiling task.  UI  Revert: cpm5d function. This feature is cancelled from backend. Fix: alerting link breaks on the topology. Refactor Topology widget to make it more hierarchical.  Choose User as the first node. If User node is absent, choose the busiest node(which has the most calls of all). Do a left-to-right flow process. At the same level, list nodes from top to bottom in alphabetical order.   Fix filter ID when ReadRecords metric associates with trace. Add AWS API Gateway menu. Change trace profiling protocol. Add Redis menu. Optimize data types. Support isEmptyValue flag for metrics query. Add elasticsearch menu. [Clean UI templates before upgrade] Set showSymbol: true, and make the data point shows on the Line graph. Please clean ui_template index in elasticsearch storage or table in JDBC storage. [Clean UI templates before upgrade] UI templates: Simplify metric name with the label. Add MQ menu. Add Jeysey icon. Fix: set endpoint and instance selectors with url parameters correctly. Bump up dependencies versions icons-vue 1.1.4, element-plus 2.1.0, nanoid 3.3.6, postcss 8.4.23 Add OpenTelemetry log protocol support. [Breaking Change] Configuration key enabledOtelRules is renamed to enabledOtelMetricsRules and the corresponding environment variable is renamed to SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES. Add grizzly icon. Fix: the Instance List data display error. Fix: set topN type to Number. Support Metrics Query Expression(MQE) and allows users to do simple query-stage calculation through the expression. Bump up zipkin ui dependency to 2.24.1. Bump up vite to 4.0.5. Apply MQE on General and Virtual-Database layer UI-templates. Add Continuous Profiling tab on Mesh layer UI-templates.  Documentation  Add Profiling related documentations. Add SUM_PER_MIN to MAL documentation. Make the log relative docs more clear, and easier for further more formats support. Update the cluster management and advanced deployment docs.  All issues and pull requests are here\n","excerpt":"9.5.0 Project  Fix Duplicate class found due to the delombok goal.  OAP Server  Fix wrong layer of …","ref":"/docs/main/v9.5.0/en/changes/changes/","title":"9.5.0"},{"body":"9.5.0 Project  Fix Duplicate class found due to the delombok goal.  OAP Server  Fix wrong layer of metric user error in DynamoDB monitoring. ElasticSearch storage does not check field types when OAP running in no-init mode. Support to bind TLS status as a part of component for service topology. Fix component ID priority bug. Fix component ID of topology overlap due to storage layer bugs. [Breaking Change] Enhance JDBC storage through merging tables and managing day-based table rolling. [Breaking Change] Sharding-MySQL implementations and tests get removed due to we have the day-based rolling mechanism by default Fix otel k8s-cluster rule add namespace dimension for MAL aggregation calculation(Deployment Status,Deployment Spec Replicas) Support continuous profiling feature. Support collect process level related metrics. Fix K8sRetag reads the wrong k8s service from the cache due to a possible namespace mismatch. [Breaking Change] Support cross-thread trace profiling. The data structure and query APIs are changed. Fix PromQL HTTP API /api/v1/labels response missing service label. Fix possible NPE when initialize IntList. Support parse PromQL expression has empty labels in the braces for metadata query. Support alarm metric OP !=. Support metrics query indicates whether value == 0 represents actually zero or no data. Fix NPE when query the not exist series indexes in ElasticSearch storage. Support collecting memory buff/cache metrics in VM monitoring. PromQL: Remove empty values from the query result, fix /api/v1/metadata param limit could cause out of bound. Support monitoring the total number metrics of k8s StatefulSet and DaemonSet. Support Amazon API Gateway monitoring. Bump up graphql-java to fix cve. Bump up Kubernetes Java client. Support Redis Monitoring. Add component ID for amqp, amqp-producer and amqp-consumer. Support no-proxy mode for aws-firehose receiver Bump up armeria to 1.23.1 Support Elasticsearch Monitoring. Fix PromQL HTTP API /api/v1/series response missing service label when matching metric. Support ServerSide TopN for BanyanDB. Add component ID for Jersey. Remove OpenCensus support, the related codes and docs as it\u0026rsquo;s sunsetting. Support dynamic configuration of searchableTracesTags Support exportErrorStatusTraceOnly for export the error status trace segments through the Kafka channel Add component ID for Grizzly. Fix potential NPE in Zipkin receiver when the Span is missing some fields. Filter out unknown_cluster metric data. Support RabbitMQ Monitoring. Support Redis slow logs collection. Fix data loss when query continuous profiling task record. Adapt the continuous profiling task query GraphQL. Support Metrics Query Expression(MQE) and allows users to do simple query-stage calculation through the expression. Deprecated metrics query v2 protocol. Deprecated record query protocol. Add component ID for go-redis. Add OpenSearch 2.8.0 to test case. Add ai-pipeline module. Support HTTP URI formatting through ai-pipeline to do pattern recognition. Add new HTTP URI grouping engine with benchmark. [Breaking Change] Use the new HTTP URI grouping engine to replace the old regex based mechanism. Support sumLabeled in MAL. Migrate from kubernetes-client/java to fabric8 client. Envoy ALS generated relation metrics considers http status codes \u0026gt;= 400 has an error at the client side. Add cause message field when query continuous profiling task.  UI  Revert: cpm5d function. This feature is cancelled from backend. Fix: alerting link breaks on the topology. Refactor Topology widget to make it more hierarchical.  Choose User as the first node. If User node is absent, choose the busiest node(which has the most calls of all). Do a left-to-right flow process. At the same level, list nodes from top to bottom in alphabetical order.   Fix filter ID when ReadRecords metric associates with trace. Add AWS API Gateway menu. Change trace profiling protocol. Add Redis menu. Optimize data types. Support isEmptyValue flag for metrics query. Add elasticsearch menu. [Clean UI templates before upgrade] Set showSymbol: true, and make the data point shows on the Line graph. Please clean ui_template index in elasticsearch storage or table in JDBC storage. [Clean UI templates before upgrade] UI templates: Simplify metric name with the label. Add MQ menu. Add Jeysey icon. Fix: set endpoint and instance selectors with url parameters correctly. Bump up dependencies versions icons-vue 1.1.4, element-plus 2.1.0, nanoid 3.3.6, postcss 8.4.23 Add OpenTelemetry log protocol support. [Breaking Change] Configuration key enabledOtelRules is renamed to enabledOtelMetricsRules and the corresponding environment variable is renamed to SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES. Add grizzly icon. Fix: the Instance List data display error. Fix: set topN type to Number. Support Metrics Query Expression(MQE) and allows users to do simple query-stage calculation through the expression. Bump up zipkin ui dependency to 2.24.1. Bump up vite to 4.0.5. Apply MQE on General and Virtual-Database layer UI-templates. Add Continuous Profiling tab on Mesh layer UI-templates.  Documentation  Add Profiling related documentations. Add SUM_PER_MIN to MAL documentation. Make the log relative docs more clear, and easier for further more formats support. Update the cluster management and advanced deployment docs.  All issues and pull requests are here\n","excerpt":"9.5.0 Project  Fix Duplicate class found due to the delombok goal.  OAP Server  Fix wrong layer of …","ref":"/docs/main/v9.6.0/en/changes/changes-9.5.0/","title":"9.5.0"},{"body":"9.5.0 Project  Fix Duplicate class found due to the delombok goal.  OAP Server  Fix wrong layer of metric user error in DynamoDB monitoring. ElasticSearch storage does not check field types when OAP running in no-init mode. Support to bind TLS status as a part of component for service topology. Fix component ID priority bug. Fix component ID of topology overlap due to storage layer bugs. [Breaking Change] Enhance JDBC storage through merging tables and managing day-based table rolling. [Breaking Change] Sharding-MySQL implementations and tests get removed due to we have the day-based rolling mechanism by default Fix otel k8s-cluster rule add namespace dimension for MAL aggregation calculation(Deployment Status,Deployment Spec Replicas) Support continuous profiling feature. Support collect process level related metrics. Fix K8sRetag reads the wrong k8s service from the cache due to a possible namespace mismatch. [Breaking Change] Support cross-thread trace profiling. The data structure and query APIs are changed. Fix PromQL HTTP API /api/v1/labels response missing service label. Fix possible NPE when initialize IntList. Support parse PromQL expression has empty labels in the braces for metadata query. Support alarm metric OP !=. Support metrics query indicates whether value == 0 represents actually zero or no data. Fix NPE when query the not exist series indexes in ElasticSearch storage. Support collecting memory buff/cache metrics in VM monitoring. PromQL: Remove empty values from the query result, fix /api/v1/metadata param limit could cause out of bound. Support monitoring the total number metrics of k8s StatefulSet and DaemonSet. Support Amazon API Gateway monitoring. Bump up graphql-java to fix cve. Bump up Kubernetes Java client. Support Redis Monitoring. Add component ID for amqp, amqp-producer and amqp-consumer. Support no-proxy mode for aws-firehose receiver Bump up armeria to 1.23.1 Support Elasticsearch Monitoring. Fix PromQL HTTP API /api/v1/series response missing service label when matching metric. Support ServerSide TopN for BanyanDB. Add component ID for Jersey. Remove OpenCensus support, the related codes and docs as it\u0026rsquo;s sunsetting. Support dynamic configuration of searchableTracesTags Support exportErrorStatusTraceOnly for export the error status trace segments through the Kafka channel Add component ID for Grizzly. Fix potential NPE in Zipkin receiver when the Span is missing some fields. Filter out unknown_cluster metric data. Support RabbitMQ Monitoring. Support Redis slow logs collection. Fix data loss when query continuous profiling task record. Adapt the continuous profiling task query GraphQL. Support Metrics Query Expression(MQE) and allows users to do simple query-stage calculation through the expression. Deprecated metrics query v2 protocol. Deprecated record query protocol. Add component ID for go-redis. Add OpenSearch 2.8.0 to test case. Add ai-pipeline module. Support HTTP URI formatting through ai-pipeline to do pattern recognition. Add new HTTP URI grouping engine with benchmark. [Breaking Change] Use the new HTTP URI grouping engine to replace the old regex based mechanism. Support sumLabeled in MAL. Migrate from kubernetes-client/java to fabric8 client. Envoy ALS generated relation metrics considers http status codes \u0026gt;= 400 has an error at the client side. Add cause message field when query continuous profiling task.  UI  Revert: cpm5d function. This feature is cancelled from backend. Fix: alerting link breaks on the topology. Refactor Topology widget to make it more hierarchical.  Choose User as the first node. If User node is absent, choose the busiest node(which has the most calls of all). Do a left-to-right flow process. At the same level, list nodes from top to bottom in alphabetical order.   Fix filter ID when ReadRecords metric associates with trace. Add AWS API Gateway menu. Change trace profiling protocol. Add Redis menu. Optimize data types. Support isEmptyValue flag for metrics query. Add elasticsearch menu. [Clean UI templates before upgrade] Set showSymbol: true, and make the data point shows on the Line graph. Please clean ui_template index in elasticsearch storage or table in JDBC storage. [Clean UI templates before upgrade] UI templates: Simplify metric name with the label. Add MQ menu. Add Jeysey icon. Fix: set endpoint and instance selectors with url parameters correctly. Bump up dependencies versions icons-vue 1.1.4, element-plus 2.1.0, nanoid 3.3.6, postcss 8.4.23 Add OpenTelemetry log protocol support. [Breaking Change] Configuration key enabledOtelRules is renamed to enabledOtelMetricsRules and the corresponding environment variable is renamed to SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES. Add grizzly icon. Fix: the Instance List data display error. Fix: set topN type to Number. Support Metrics Query Expression(MQE) and allows users to do simple query-stage calculation through the expression. Bump up zipkin ui dependency to 2.24.1. Bump up vite to 4.0.5. Apply MQE on General and Virtual-Database layer UI-templates. Add Continuous Profiling tab on Mesh layer UI-templates.  Documentation  Add Profiling related documentations. Add SUM_PER_MIN to MAL documentation. Make the log relative docs more clear, and easier for further more formats support. Update the cluster management and advanced deployment docs.  All issues and pull requests are here\n","excerpt":"9.5.0 Project  Fix Duplicate class found due to the delombok goal.  OAP Server  Fix wrong layer of …","ref":"/docs/main/v9.7.0/en/changes/changes-9.5.0/","title":"9.5.0"},{"body":"9.6.0 Project  Bump up Guava to 32.0.1 to avoid the lib listed as vulnerable due to CVE-2020-8908. This API is never used. Maven artifact skywalking-log-recevier-plugin is renamed to skywalking-log-receiver-plugin. Bump up cli version 0.11 to 0.12. Bump up the version of ASF parent pom to v30. Make builds reproducible for automatic releases CI.  OAP Server  Add Neo4j component ID(112) language: Python. Add Istio ServiceEntry registry to resolve unknown IPs in ALS. Wrap deleteProperty API to the BanyanDBStorageClient. [Breaking change] Remove matchedCounter from HttpUriRecognitionService#feedRawData. Remove patterns from HttpUriRecognitionService#feedRawData and add max 10 candidates of raw URIs for each pattern. Add component ID for WebSphere. Fix AI Pipeline uri caching NullPointer and IllegalArgument Exceptions. Fix NPE in metrics query when the metric is not exist. Remove E2E tests for Istio \u0026lt; 1.15, ElasticSearch \u0026lt; 7.16.3, they might still work but are not supported as planed. Scroll all results in ElasticSearch storage and refactor scrolling logics, including Service, Instance, Endpoint, Process, etc. Improve Kubernetes coordinator to remove Terminating OAP Pods in cluster. Support SW_CORE_SYNC_PERIOD_HTTP_URI_RECOGNITION_PATTERN and SW_CORE_TRAINING_PERIOD_HTTP_URI_RECOGNITION_PATTERN to control the period of training and sync HTTP URI recognition patterns. And shorten the default period to 10s for sync and 60s for training. Fix ElasticSearch scroller bug. Add component ID for Aerospike(ID=149). Packages with name recevier are renamed to receiver. BanyanDBMetricsDAO handles storeIDTag in multiGet for BanyanDBModelExtension. Fix endpoint grouping-related logic and enhance the performance of PatternTree retrieval. Fix metric session cache saving after batch insert when using mysql-connector-java. Support dynamic UI menu query. Add comment for docker/.env to explain the usage. Fix wrong environment variable name SW_OTEL_RECEIVER_ENABLED_OTEL_RULES to right SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES. Fix instance query in JDBC implementation. Set the SW_QUERY_MAX_QUERY_COMPLEXITY default value to 3000(was 1000). Accept length=4000 parameter value of the event. It was 2000. Tolerate parameter value in illegal JSON format. Update BanyanDB Java Client to 0.4.0 Support aggregate Labeled Value Metrics in MQE. [Breaking change] Change the default label name in MQE from label to _. Bump up grpc version to 1.53.0. [Breaking change] Removed \u0026lsquo;\u0026amp;\u0026rsquo; symbols from shell scripts to avoid OAP server process running as a background process. Revert part of #10616 to fix the unexpected changes: if there is no data we should return an array with 0s, but in #10616, an empty array is returned. Cache all service entity in memory for query. Bump up jackson version to 2.15.2. Increase the default memory size to avoid OOM. Bump up graphql-java to 21.0. Add Echo component ID(5015) language: Golang. Fix index out of bounds exception in aggregate_labels MQE function. Support MongoDB Server/Cluster monitoring powered by OTEL. Do not print configurations values in logs to avoid sensitive info leaked. Move created the latest index before retrieval indexes by aliases to avoid the 404 exception. This just prevents some interference from manual operations. Add more Go VM metrics, as new skywalking-go agent provided since its 0.2 release. Add component ID for Lock (ID=5016). [Breaking change] Adjust the structure of hooks in the alarm-settings.yml. Support multiple configs for each hook types and specifying the hooks in the alarm rule. Bump up Armeria to 1.24.3. Fix BooleanMatch and BooleanNotEqualMatch doing Boolean comparison. Support LogQL HTTP query APIs. Add Mux Server component ID(5017) language: Golang. Remove ElasticSearch 6.3.2 from our client lib tests. Bump up ElasticSearch server 8.8.1 to 8.9.0 for latest e2e testing. 8.1.0, 7.16.3 and 7.17.10 are still tested. Add OpenSearch 2.8.0 to our client lib tests. Use listening mode for apollo implementation of dynamic configuration. Add view_as_seq function in MQE for listing metrics in the given prioritized sequence. Fix the wrong default value of k8sServiceNameRule if it\u0026rsquo;s not explicitly set. Improve PromQL to allow for multiple metric operations within a single query. Fix MQE Binary Operation between labeled metrics and other type of value result. Add component ID for Nacos (ID=150). Support Compare Operation in MQE. Fix the Kubernetes resource cache not refreshed. Fix wrong classpath that might cause OOM in startup. Enhance the serviceRelation in MAL by adding settings for the delimiter and component fields. [Breaking change] Support MQE in the Alerting. The Alarm Rules configuration(alarm-settings.yml), add expression field and remove metrics-name/count/threshold/op/only-as-condition fields and remove composite-rules configuration. Check results in ALS as per downstream/upstream instead of per log. Fix GraphQL query listInstances not using endTime query Do not start server and Kafka consumer in init mode. Add Iris component ID(5018). Add OTLP Tracing support as a Zipkin trace input.  UI  Fix metric name browser_app_error_rate in Browser-Root dashboard. Fix display name of endpoint_cpm for endpoint list in General-Service dashboard. Implement customize menus and marketplace page. Fix minTraceDuration and maxTraceDuration types. Fix init minTime to Infinity. Bump dependencies to fix vulnerabilities. Add scss variables. Fix the title of instance list and notices in the continue profiling. Add a link to explain the expression metric, add units in the continue profiling widget. Calculate string width to set Tabs name width. [Breaking change] Removed \u0026lsquo;\u0026amp;\u0026rsquo; symbols from shell scripts to avoid web application server process running as a background process. Reset chart label. Fix service associates instances. Remove node-sass. Fix commit error on Windows. Apply MQE on MYSQL, POSTGRESQL, REDIS, ELASTICSEARCH and DYNAMODB layer UI-templates. Apply MQE on Virtual-Cache layer UI-templates Apply MQE on APISIX, AWS_EKS, AWS_GATEWAY and AWS_S3 layer UI templates. Apply MQE on RabbitMQ Dashboards. Apply MQE on Virtual-MQ layer UI-templates Apply MQE on Infra-Linux layer UI-templates Apply MQE on Infra-Windows layer UI-templates Apply MQE on Browser layer UI-templates. Implement MQE on topology widget. Fix getEndpoints keyword blank. Implement a breadcrumb component as navigation.  Documentation  Add Go agent into the server agent documentation. Add data unit description in the configuration of continuous profiling policy. Remove storage extension doc, as it is expired. Remove how to add menu doc, as SkyWalking supports marketplace and new backend-based setup. Separate contribution docs to a new menu structure. Add a doc to explain how to manage i18n. Add a doc to explain OTLP Trace support. Fix typo in dynamic-config-configmap.md. Fix out-dated docs about Kafka fetcher. Remove 3rd part fetchers from the docs, as they are not maintained anymore.  All issues and pull requests are here\n","excerpt":"9.6.0 Project  Bump up Guava to 32.0.1 to avoid the lib listed as vulnerable due to CVE-2020-8908. …","ref":"/docs/main/latest/en/changes/changes-9.6.0/","title":"9.6.0"},{"body":"9.6.0 Project  Bump up Guava to 32.0.1 to avoid the lib listed as vulnerable due to CVE-2020-8908. This API is never used. Maven artifact skywalking-log-recevier-plugin is renamed to skywalking-log-receiver-plugin. Bump up cli version 0.11 to 0.12. Bump up the version of ASF parent pom to v30. Make builds reproducible for automatic releases CI.  OAP Server  Add Neo4j component ID(112) language: Python. Add Istio ServiceEntry registry to resolve unknown IPs in ALS. Wrap deleteProperty API to the BanyanDBStorageClient. [Breaking change] Remove matchedCounter from HttpUriRecognitionService#feedRawData. Remove patterns from HttpUriRecognitionService#feedRawData and add max 10 candidates of raw URIs for each pattern. Add component ID for WebSphere. Fix AI Pipeline uri caching NullPointer and IllegalArgument Exceptions. Fix NPE in metrics query when the metric is not exist. Remove E2E tests for Istio \u0026lt; 1.15, ElasticSearch \u0026lt; 7.16.3, they might still work but are not supported as planed. Scroll all results in ElasticSearch storage and refactor scrolling logics, including Service, Instance, Endpoint, Process, etc. Improve Kubernetes coordinator to remove Terminating OAP Pods in cluster. Support SW_CORE_SYNC_PERIOD_HTTP_URI_RECOGNITION_PATTERN and SW_CORE_TRAINING_PERIOD_HTTP_URI_RECOGNITION_PATTERN to control the period of training and sync HTTP URI recognition patterns. And shorten the default period to 10s for sync and 60s for training. Fix ElasticSearch scroller bug. Add component ID for Aerospike(ID=149). Packages with name recevier are renamed to receiver. BanyanDBMetricsDAO handles storeIDTag in multiGet for BanyanDBModelExtension. Fix endpoint grouping-related logic and enhance the performance of PatternTree retrieval. Fix metric session cache saving after batch insert when using mysql-connector-java. Support dynamic UI menu query. Add comment for docker/.env to explain the usage. Fix wrong environment variable name SW_OTEL_RECEIVER_ENABLED_OTEL_RULES to right SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES. Fix instance query in JDBC implementation. Set the SW_QUERY_MAX_QUERY_COMPLEXITY default value to 3000(was 1000). Accept length=4000 parameter value of the event. It was 2000. Tolerate parameter value in illegal JSON format. Update BanyanDB Java Client to 0.4.0 Support aggregate Labeled Value Metrics in MQE. [Breaking change] Change the default label name in MQE from label to _. Bump up grpc version to 1.53.0. [Breaking change] Removed \u0026lsquo;\u0026amp;\u0026rsquo; symbols from shell scripts to avoid OAP server process running as a background process. Revert part of #10616 to fix the unexpected changes: if there is no data we should return an array with 0s, but in #10616, an empty array is returned. Cache all service entity in memory for query. Bump up jackson version to 2.15.2. Increase the default memory size to avoid OOM. Bump up graphql-java to 21.0. Add Echo component ID(5015) language: Golang. Fix index out of bounds exception in aggregate_labels MQE function. Support MongoDB Server/Cluster monitoring powered by OTEL. Do not print configurations values in logs to avoid sensitive info leaked. Move created the latest index before retrieval indexes by aliases to avoid the 404 exception. This just prevents some interference from manual operations. Add more Go VM metrics, as new skywalking-go agent provided since its 0.2 release. Add component ID for Lock (ID=5016). [Breaking change] Adjust the structure of hooks in the alarm-settings.yml. Support multiple configs for each hook types and specifying the hooks in the alarm rule. Bump up Armeria to 1.24.3. Fix BooleanMatch and BooleanNotEqualMatch doing Boolean comparison. Support LogQL HTTP query APIs. Add Mux Server component ID(5017) language: Golang. Remove ElasticSearch 6.3.2 from our client lib tests. Bump up ElasticSearch server 8.8.1 to 8.9.0 for latest e2e testing. 8.1.0, 7.16.3 and 7.17.10 are still tested. Add OpenSearch 2.8.0 to our client lib tests. Use listening mode for apollo implementation of dynamic configuration. Add view_as_seq function in MQE for listing metrics in the given prioritized sequence. Fix the wrong default value of k8sServiceNameRule if it\u0026rsquo;s not explicitly set. Improve PromQL to allow for multiple metric operations within a single query. Fix MQE Binary Operation between labeled metrics and other type of value result. Add component ID for Nacos (ID=150). Support Compare Operation in MQE. Fix the Kubernetes resource cache not refreshed. Fix wrong classpath that might cause OOM in startup. Enhance the serviceRelation in MAL by adding settings for the delimiter and component fields. [Breaking change] Support MQE in the Alerting. The Alarm Rules configuration(alarm-settings.yml), add expression field and remove metrics-name/count/threshold/op/only-as-condition fields and remove composite-rules configuration. Check results in ALS as per downstream/upstream instead of per log. Fix GraphQL query listInstances not using endTime query Do not start server and Kafka consumer in init mode. Add Iris component ID(5018). Add OTLP Tracing support as a Zipkin trace input.  UI  Fix metric name browser_app_error_rate in Browser-Root dashboard. Fix display name of endpoint_cpm for endpoint list in General-Service dashboard. Implement customize menus and marketplace page. Fix minTraceDuration and maxTraceDuration types. Fix init minTime to Infinity. Bump dependencies to fix vulnerabilities. Add scss variables. Fix the title of instance list and notices in the continue profiling. Add a link to explain the expression metric, add units in the continue profiling widget. Calculate string width to set Tabs name width. [Breaking change] Removed \u0026lsquo;\u0026amp;\u0026rsquo; symbols from shell scripts to avoid web application server process running as a background process. Reset chart label. Fix service associates instances. Remove node-sass. Fix commit error on Windows. Apply MQE on MYSQL, POSTGRESQL, REDIS, ELASTICSEARCH and DYNAMODB layer UI-templates. Apply MQE on Virtual-Cache layer UI-templates Apply MQE on APISIX, AWS_EKS, AWS_GATEWAY and AWS_S3 layer UI templates. Apply MQE on RabbitMQ Dashboards. Apply MQE on Virtual-MQ layer UI-templates Apply MQE on Infra-Linux layer UI-templates Apply MQE on Infra-Windows layer UI-templates Apply MQE on Browser layer UI-templates. Implement MQE on topology widget. Fix getEndpoints keyword blank. Implement a breadcrumb component as navigation.  Documentation  Add Go agent into the server agent documentation. Add data unit description in the configuration of continuous profiling policy. Remove storage extension doc, as it is expired. Remove how to add menu doc, as SkyWalking supports marketplace and new backend-based setup. Separate contribution docs to a new menu structure. Add a doc to explain how to manage i18n. Add a doc to explain OTLP Trace support. Fix typo in dynamic-config-configmap.md. Fix out-dated docs about Kafka fetcher. Remove 3rd part fetchers from the docs, as they are not maintained anymore.  All issues and pull requests are here\n","excerpt":"9.6.0 Project  Bump up Guava to 32.0.1 to avoid the lib listed as vulnerable due to CVE-2020-8908. …","ref":"/docs/main/next/en/changes/changes-9.6.0/","title":"9.6.0"},{"body":"9.6.0 Project  Bump up Guava to 32.0.1 to avoid the lib listed as vulnerable due to CVE-2020-8908. This API is never used. Maven artifact skywalking-log-recevier-plugin is renamed to skywalking-log-receiver-plugin. Bump up cli version 0.11 to 0.12. Bump up the version of ASF parent pom to v30. Make builds reproducible for automatic releases CI.  OAP Server  Add Neo4j component ID(112) language: Python. Add Istio ServiceEntry registry to resolve unknown IPs in ALS. Wrap deleteProperty API to the BanyanDBStorageClient. [Breaking change] Remove matchedCounter from HttpUriRecognitionService#feedRawData. Remove patterns from HttpUriRecognitionService#feedRawData and add max 10 candidates of raw URIs for each pattern. Add component ID for WebSphere. Fix AI Pipeline uri caching NullPointer and IllegalArgument Exceptions. Fix NPE in metrics query when the metric is not exist. Remove E2E tests for Istio \u0026lt; 1.15, ElasticSearch \u0026lt; 7.16.3, they might still work but are not supported as planed. Scroll all results in ElasticSearch storage and refactor scrolling logics, including Service, Instance, Endpoint, Process, etc. Improve Kubernetes coordinator to remove Terminating OAP Pods in cluster. Support SW_CORE_SYNC_PERIOD_HTTP_URI_RECOGNITION_PATTERN and SW_CORE_TRAINING_PERIOD_HTTP_URI_RECOGNITION_PATTERN to control the period of training and sync HTTP URI recognition patterns. And shorten the default period to 10s for sync and 60s for training. Fix ElasticSearch scroller bug. Add component ID for Aerospike(ID=149). Packages with name recevier are renamed to receiver. BanyanDBMetricsDAO handles storeIDTag in multiGet for BanyanDBModelExtension. Fix endpoint grouping-related logic and enhance the performance of PatternTree retrieval. Fix metric session cache saving after batch insert when using mysql-connector-java. Support dynamic UI menu query. Add comment for docker/.env to explain the usage. Fix wrong environment variable name SW_OTEL_RECEIVER_ENABLED_OTEL_RULES to right SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES. Fix instance query in JDBC implementation. Set the SW_QUERY_MAX_QUERY_COMPLEXITY default value to 3000(was 1000). Accept length=4000 parameter value of the event. It was 2000. Tolerate parameter value in illegal JSON format. Update BanyanDB Java Client to 0.4.0 Support aggregate Labeled Value Metrics in MQE. [Breaking change] Change the default label name in MQE from label to _. Bump up grpc version to 1.53.0. [Breaking change] Removed \u0026lsquo;\u0026amp;\u0026rsquo; symbols from shell scripts to avoid OAP server process running as a background process. Revert part of #10616 to fix the unexpected changes: if there is no data we should return an array with 0s, but in #10616, an empty array is returned. Cache all service entity in memory for query. Bump up jackson version to 2.15.2. Increase the default memory size to avoid OOM. Bump up graphql-java to 21.0. Add Echo component ID(5015) language: Golang. Fix index out of bounds exception in aggregate_labels MQE function. Support MongoDB Server/Cluster monitoring powered by OTEL. Do not print configurations values in logs to avoid sensitive info leaked. Move created the latest index before retrieval indexes by aliases to avoid the 404 exception. This just prevents some interference from manual operations. Add more Go VM metrics, as new skywalking-go agent provided since its 0.2 release. Add component ID for Lock (ID=5016). [Breaking change] Adjust the structure of hooks in the alarm-settings.yml. Support multiple configs for each hook types and specifying the hooks in the alarm rule. Bump up Armeria to 1.24.3. Fix BooleanMatch and BooleanNotEqualMatch doing Boolean comparison. Support LogQL HTTP query APIs. Add Mux Server component ID(5017) language: Golang. Remove ElasticSearch 6.3.2 from our client lib tests. Bump up ElasticSearch server 8.8.1 to 8.9.0 for latest e2e testing. 8.1.0, 7.16.3 and 7.17.10 are still tested. Add OpenSearch 2.8.0 to our client lib tests. Use listening mode for apollo implementation of dynamic configuration. Add view_as_seq function in MQE for listing metrics in the given prioritized sequence. Fix the wrong default value of k8sServiceNameRule if it\u0026rsquo;s not explicitly set. Improve PromQL to allow for multiple metric operations within a single query. Fix MQE Binary Operation between labeled metrics and other type of value result. Add component ID for Nacos (ID=150). Support Compare Operation in MQE. Fix the Kubernetes resource cache not refreshed. Fix wrong classpath that might cause OOM in startup. Enhance the serviceRelation in MAL by adding settings for the delimiter and component fields. [Breaking change] Support MQE in the Alerting. The Alarm Rules configuration(alarm-settings.yml), add expression field and remove metrics-name/count/threshold/op/only-as-condition fields and remove composite-rules configuration. Check results in ALS as per downstream/upstream instead of per log. Fix GraphQL query listInstances not using endTime query Do not start server and Kafka consumer in init mode. Add Iris component ID(5018). Add OTLP Tracing support as a Zipkin trace input.  UI  Fix metric name browser_app_error_rate in Browser-Root dashboard. Fix display name of endpoint_cpm for endpoint list in General-Service dashboard. Implement customize menus and marketplace page. Fix minTraceDuration and maxTraceDuration types. Fix init minTime to Infinity. Bump dependencies to fix vulnerabilities. Add scss variables. Fix the title of instance list and notices in the continue profiling. Add a link to explain the expression metric, add units in the continue profiling widget. Calculate string width to set Tabs name width. [Breaking change] Removed \u0026lsquo;\u0026amp;\u0026rsquo; symbols from shell scripts to avoid web application server process running as a background process. Reset chart label. Fix service associates instances. Remove node-sass. Fix commit error on Windows. Apply MQE on MYSQL, POSTGRESQL, REDIS, ELASTICSEARCH and DYNAMODB layer UI-templates. Apply MQE on Virtual-Cache layer UI-templates Apply MQE on APISIX, AWS_EKS, AWS_GATEWAY and AWS_S3 layer UI templates. Apply MQE on RabbitMQ Dashboards. Apply MQE on Virtual-MQ layer UI-templates Apply MQE on Infra-Linux layer UI-templates Apply MQE on Infra-Windows layer UI-templates Apply MQE on Browser layer UI-templates. Implement MQE on topology widget. Fix getEndpoints keyword blank. Implement a breadcrumb component as navigation.  Documentation  Add Go agent into the server agent documentation. Add data unit description in the configuration of continuous profiling policy. Remove storage extension doc, as it is expired. Remove how to add menu doc, as SkyWalking supports marketplace and new backend-based setup. Separate contribution docs to a new menu structure. Add a doc to explain how to manage i18n. Add a doc to explain OTLP Trace support. Fix typo in dynamic-config-configmap.md. Fix out-dated docs about Kafka fetcher. Remove 3rd part fetchers from the docs, as they are not maintained anymore.  All issues and pull requests are here\n","excerpt":"9.6.0 Project  Bump up Guava to 32.0.1 to avoid the lib listed as vulnerable due to CVE-2020-8908. …","ref":"/docs/main/v9.6.0/en/changes/changes/","title":"9.6.0"},{"body":"9.6.0 Project  Bump up Guava to 32.0.1 to avoid the lib listed as vulnerable due to CVE-2020-8908. This API is never used. Maven artifact skywalking-log-recevier-plugin is renamed to skywalking-log-receiver-plugin. Bump up cli version 0.11 to 0.12. Bump up the version of ASF parent pom to v30. Make builds reproducible for automatic releases CI.  OAP Server  Add Neo4j component ID(112) language: Python. Add Istio ServiceEntry registry to resolve unknown IPs in ALS. Wrap deleteProperty API to the BanyanDBStorageClient. [Breaking change] Remove matchedCounter from HttpUriRecognitionService#feedRawData. Remove patterns from HttpUriRecognitionService#feedRawData and add max 10 candidates of raw URIs for each pattern. Add component ID for WebSphere. Fix AI Pipeline uri caching NullPointer and IllegalArgument Exceptions. Fix NPE in metrics query when the metric is not exist. Remove E2E tests for Istio \u0026lt; 1.15, ElasticSearch \u0026lt; 7.16.3, they might still work but are not supported as planed. Scroll all results in ElasticSearch storage and refactor scrolling logics, including Service, Instance, Endpoint, Process, etc. Improve Kubernetes coordinator to remove Terminating OAP Pods in cluster. Support SW_CORE_SYNC_PERIOD_HTTP_URI_RECOGNITION_PATTERN and SW_CORE_TRAINING_PERIOD_HTTP_URI_RECOGNITION_PATTERN to control the period of training and sync HTTP URI recognition patterns. And shorten the default period to 10s for sync and 60s for training. Fix ElasticSearch scroller bug. Add component ID for Aerospike(ID=149). Packages with name recevier are renamed to receiver. BanyanDBMetricsDAO handles storeIDTag in multiGet for BanyanDBModelExtension. Fix endpoint grouping-related logic and enhance the performance of PatternTree retrieval. Fix metric session cache saving after batch insert when using mysql-connector-java. Support dynamic UI menu query. Add comment for docker/.env to explain the usage. Fix wrong environment variable name SW_OTEL_RECEIVER_ENABLED_OTEL_RULES to right SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES. Fix instance query in JDBC implementation. Set the SW_QUERY_MAX_QUERY_COMPLEXITY default value to 3000(was 1000). Accept length=4000 parameter value of the event. It was 2000. Tolerate parameter value in illegal JSON format. Update BanyanDB Java Client to 0.4.0 Support aggregate Labeled Value Metrics in MQE. [Breaking change] Change the default label name in MQE from label to _. Bump up grpc version to 1.53.0. [Breaking change] Removed \u0026lsquo;\u0026amp;\u0026rsquo; symbols from shell scripts to avoid OAP server process running as a background process. Revert part of #10616 to fix the unexpected changes: if there is no data we should return an array with 0s, but in #10616, an empty array is returned. Cache all service entity in memory for query. Bump up jackson version to 2.15.2. Increase the default memory size to avoid OOM. Bump up graphql-java to 21.0. Add Echo component ID(5015) language: Golang. Fix index out of bounds exception in aggregate_labels MQE function. Support MongoDB Server/Cluster monitoring powered by OTEL. Do not print configurations values in logs to avoid sensitive info leaked. Move created the latest index before retrieval indexes by aliases to avoid the 404 exception. This just prevents some interference from manual operations. Add more Go VM metrics, as new skywalking-go agent provided since its 0.2 release. Add component ID for Lock (ID=5016). [Breaking change] Adjust the structure of hooks in the alarm-settings.yml. Support multiple configs for each hook types and specifying the hooks in the alarm rule. Bump up Armeria to 1.24.3. Fix BooleanMatch and BooleanNotEqualMatch doing Boolean comparison. Support LogQL HTTP query APIs. Add Mux Server component ID(5017) language: Golang. Remove ElasticSearch 6.3.2 from our client lib tests. Bump up ElasticSearch server 8.8.1 to 8.9.0 for latest e2e testing. 8.1.0, 7.16.3 and 7.17.10 are still tested. Add OpenSearch 2.8.0 to our client lib tests. Use listening mode for apollo implementation of dynamic configuration. Add view_as_seq function in MQE for listing metrics in the given prioritized sequence. Fix the wrong default value of k8sServiceNameRule if it\u0026rsquo;s not explicitly set. Improve PromQL to allow for multiple metric operations within a single query. Fix MQE Binary Operation between labeled metrics and other type of value result. Add component ID for Nacos (ID=150). Support Compare Operation in MQE. Fix the Kubernetes resource cache not refreshed. Fix wrong classpath that might cause OOM in startup. Enhance the serviceRelation in MAL by adding settings for the delimiter and component fields. [Breaking change] Support MQE in the Alerting. The Alarm Rules configuration(alarm-settings.yml), add expression field and remove metrics-name/count/threshold/op/only-as-condition fields and remove composite-rules configuration. Check results in ALS as per downstream/upstream instead of per log. Fix GraphQL query listInstances not using endTime query Do not start server and Kafka consumer in init mode. Add Iris component ID(5018). Add OTLP Tracing support as a Zipkin trace input.  UI  Fix metric name browser_app_error_rate in Browser-Root dashboard. Fix display name of endpoint_cpm for endpoint list in General-Service dashboard. Implement customize menus and marketplace page. Fix minTraceDuration and maxTraceDuration types. Fix init minTime to Infinity. Bump dependencies to fix vulnerabilities. Add scss variables. Fix the title of instance list and notices in the continue profiling. Add a link to explain the expression metric, add units in the continue profiling widget. Calculate string width to set Tabs name width. [Breaking change] Removed \u0026lsquo;\u0026amp;\u0026rsquo; symbols from shell scripts to avoid web application server process running as a background process. Reset chart label. Fix service associates instances. Remove node-sass. Fix commit error on Windows. Apply MQE on MYSQL, POSTGRESQL, REDIS, ELASTICSEARCH and DYNAMODB layer UI-templates. Apply MQE on Virtual-Cache layer UI-templates Apply MQE on APISIX, AWS_EKS, AWS_GATEWAY and AWS_S3 layer UI templates. Apply MQE on RabbitMQ Dashboards. Apply MQE on Virtual-MQ layer UI-templates Apply MQE on Infra-Linux layer UI-templates Apply MQE on Infra-Windows layer UI-templates Apply MQE on Browser layer UI-templates. Implement MQE on topology widget. Fix getEndpoints keyword blank. Implement a breadcrumb component as navigation.  Documentation  Add Go agent into the server agent documentation. Add data unit description in the configuration of continuous profiling policy. Remove storage extension doc, as it is expired. Remove how to add menu doc, as SkyWalking supports marketplace and new backend-based setup. Separate contribution docs to a new menu structure. Add a doc to explain how to manage i18n. Add a doc to explain OTLP Trace support. Fix typo in dynamic-config-configmap.md. Fix out-dated docs about Kafka fetcher. Remove 3rd part fetchers from the docs, as they are not maintained anymore.  All issues and pull requests are here\n","excerpt":"9.6.0 Project  Bump up Guava to 32.0.1 to avoid the lib listed as vulnerable due to CVE-2020-8908. …","ref":"/docs/main/v9.7.0/en/changes/changes-9.6.0/","title":"9.6.0"},{"body":"9.7.0 Project  Bump Java agent to 9.1-dev in the e2e tests. Bump up netty to 4.1.100. Update Groovy 3 to 4.0.15. Support packaging the project in JDK21. Compiler source and target remain in JDK11.  OAP Server  ElasticSearchClient: Add deleteById API. Fix Custom alarm rules are overwritten by \u0026lsquo;resource/alarm-settings.yml\u0026rsquo; Support Kafka Monitoring. Support Pulsar server and BookKeeper server Monitoring. [Breaking Change] Elasticsearch storage merge all management data indices into one index management, including ui_template,ui_menu,continuous_profiling_policy. Add a release mechanism for alarm windows when it is expired in case of OOM. Fix Zipkin trace receiver response: make the HTTP status code from 200 to 202. Update BanyanDB Java Client to 0.5.0. Fix getInstances query in the BanyanDB Metadata DAO. BanyanDBStorageClient: Add keepAliveProperty API. Fix table exists check in the JDBC Storage Plugin. Enhance extensibility of HTTP Server library. Adjust AlarmRecord alarmMessage column length to 512. Fix EventHookCallback build event: build the layer from Service's Layer. Fix AlarmCore doAlarm: catch exception for each callback to avoid interruption. Optimize queryBasicTraces in TraceQueryEsDAO. Fix WebhookCallback send incorrect messages, add catch exception for each callback HTTP Post. Fix AlarmRule expression validation: add labeled metrics mock data for check. Support collect ZGC memory pool metrics. Add a component ID for Netty-http (ID=151). Add a component ID for Fiber (ID=5021). BanyanDBStorageClient: Add define(Property property, PropertyStore.Strategy strategy) API. Correct the file format and fix typos in the filenames for monitoring Kafka\u0026rsquo;s e2e tests. Support extract timestamp from patterned datetime string in LAL. Support output key parameters in the booting logs. Fix cannot query zipkin traces with annotationQuery parameter in the JDBC related storage. Fix limit doesn\u0026rsquo;t work for findEndpoint API in ES storage. Isolate MAL CounterWindow cache by metric name. Fix JDBC Log query order. Change the DataCarrier IF_POSSIBLE strategy to use ArrayBlockingQueue implementation. Change the policy of the queue(DataCarrier) in the L1 metric aggregate worker to IF_POSSIBLE mode. Add self-observability metric metrics_aggregator_abandon to count the number of abandon metrics. Support Nginx monitoring. Fix BanyanDB Metadata Query: make query single instance/process return full tags to avoid NPE. Repleace go2sky E2E to GO agent. Replace Metrics v2 protocol with MQE in UI templates and E2E Test. Fix incorrect apisix metrics otel rules. Support Scratch The OAP Config Dump. Support increase/rate function in the MQE query language. Group service endpoints into _abandoned when endpoints have high cardinality.  UI  Add new menu for kafka monitoring. Fix independent widget duration. Fix the display height of the link tree structure. Replace the name by shortName on service widget. Refactor: update pagination style. No visualization style change. Apply MQE on K8s layer UI-templates. Fix icons display in trace tree diagram. Fix: update tooltip style to support multiple metrics scrolling view in a metrics graph. Add a new widget to show jvm memory pool detail. Fix: avoid querying data with empty parameters. Add a title and a description for trace segments. Add Netty icon for Netty HTTP plugin. Add Pulsar menu i18n files. Refactor Logs view. Implement the Dark Theme. Change UI templates for Text widgets. Add Nginx menu i18n. Fix the height for trace widget. Polish list style. Fix Log associate with Trace. Enhance layout for broken Topology widget. Fix calls metric with call type for Topology widget. Fix changing metrics config for Topology widget. Fix routes for Tab widget. Remove OpenFunction(FAAS layer) relative UI templates and menu item. Fix: change colors to match dark theme for Network Profiling. Remove the description of OpenFunction in the UI i18n. Reduce component chunks to improve page loading resource time.  Documentation  Separate storage docs to different files, and add an estimated timeline for BanyanDB(end of 2023). Add topology configuration in UI-Grafana doc. Add missing metrics to the OpenTelemetry Metrics doc. Polish docs of Concepts and Designs. Fix incorrect notes of slowCacheReadThreshold. Update OAP setup and cluster coordinator docs to explain new booting parameters table in the logs, and how to setup cluster mode.  All issues and pull requests are here\n","excerpt":"9.7.0 Project  Bump Java agent to 9.1-dev in the e2e tests. Bump up netty to 4.1.100. Update Groovy …","ref":"/docs/main/latest/en/changes/changes/","title":"9.7.0"},{"body":"9.7.0 Project  Bump Java agent to 9.1-dev in the e2e tests. Bump up netty to 4.1.100. Update Groovy 3 to 4.0.15. Support packaging the project in JDK21. Compiler source and target remain in JDK11.  OAP Server  ElasticSearchClient: Add deleteById API. Fix Custom alarm rules are overwritten by \u0026lsquo;resource/alarm-settings.yml\u0026rsquo; Support Kafka Monitoring. Support Pulsar server and BookKeeper server Monitoring. [Breaking Change] Elasticsearch storage merge all management data indices into one index management, including ui_template,ui_menu,continuous_profiling_policy. Add a release mechanism for alarm windows when it is expired in case of OOM. Fix Zipkin trace receiver response: make the HTTP status code from 200 to 202. Update BanyanDB Java Client to 0.5.0. Fix getInstances query in the BanyanDB Metadata DAO. BanyanDBStorageClient: Add keepAliveProperty API. Fix table exists check in the JDBC Storage Plugin. Enhance extensibility of HTTP Server library. Adjust AlarmRecord alarmMessage column length to 512. Fix EventHookCallback build event: build the layer from Service's Layer. Fix AlarmCore doAlarm: catch exception for each callback to avoid interruption. Optimize queryBasicTraces in TraceQueryEsDAO. Fix WebhookCallback send incorrect messages, add catch exception for each callback HTTP Post. Fix AlarmRule expression validation: add labeled metrics mock data for check. Support collect ZGC memory pool metrics. Add a component ID for Netty-http (ID=151). Add a component ID for Fiber (ID=5021). BanyanDBStorageClient: Add define(Property property, PropertyStore.Strategy strategy) API. Correct the file format and fix typos in the filenames for monitoring Kafka\u0026rsquo;s e2e tests. Support extract timestamp from patterned datetime string in LAL. Support output key parameters in the booting logs. Fix cannot query zipkin traces with annotationQuery parameter in the JDBC related storage. Fix limit doesn\u0026rsquo;t work for findEndpoint API in ES storage. Isolate MAL CounterWindow cache by metric name. Fix JDBC Log query order. Change the DataCarrier IF_POSSIBLE strategy to use ArrayBlockingQueue implementation. Change the policy of the queue(DataCarrier) in the L1 metric aggregate worker to IF_POSSIBLE mode. Add self-observability metric metrics_aggregator_abandon to count the number of abandon metrics. Support Nginx monitoring. Fix BanyanDB Metadata Query: make query single instance/process return full tags to avoid NPE. Repleace go2sky E2E to GO agent. Replace Metrics v2 protocol with MQE in UI templates and E2E Test. Fix incorrect apisix metrics otel rules. Support Scratch The OAP Config Dump. Support increase/rate function in the MQE query language. Group service endpoints into _abandoned when endpoints have high cardinality.  UI  Add new menu for kafka monitoring. Fix independent widget duration. Fix the display height of the link tree structure. Replace the name by shortName on service widget. Refactor: update pagination style. No visualization style change. Apply MQE on K8s layer UI-templates. Fix icons display in trace tree diagram. Fix: update tooltip style to support multiple metrics scrolling view in a metrics graph. Add a new widget to show jvm memory pool detail. Fix: avoid querying data with empty parameters. Add a title and a description for trace segments. Add Netty icon for Netty HTTP plugin. Add Pulsar menu i18n files. Refactor Logs view. Implement the Dark Theme. Change UI templates for Text widgets. Add Nginx menu i18n. Fix the height for trace widget. Polish list style. Fix Log associate with Trace. Enhance layout for broken Topology widget. Fix calls metric with call type for Topology widget. Fix changing metrics config for Topology widget. Fix routes for Tab widget. Remove OpenFunction(FAAS layer) relative UI templates and menu item. Fix: change colors to match dark theme for Network Profiling. Remove the description of OpenFunction in the UI i18n. Reduce component chunks to improve page loading resource time.  Documentation  Separate storage docs to different files, and add an estimated timeline for BanyanDB(end of 2023). Add topology configuration in UI-Grafana doc. Add missing metrics to the OpenTelemetry Metrics doc. Polish docs of Concepts and Designs. Fix incorrect notes of slowCacheReadThreshold. Update OAP setup and cluster coordinator docs to explain new booting parameters table in the logs, and how to setup cluster mode.  All issues and pull requests are here\n","excerpt":"9.7.0 Project  Bump Java agent to 9.1-dev in the e2e tests. Bump up netty to 4.1.100. Update Groovy …","ref":"/docs/main/next/en/changes/changes-9.7.0/","title":"9.7.0"},{"body":"9.7.0 Project  Bump Java agent to 9.1-dev in the e2e tests. Bump up netty to 4.1.100. Update Groovy 3 to 4.0.15. Support packaging the project in JDK21. Compiler source and target remain in JDK11.  OAP Server  ElasticSearchClient: Add deleteById API. Fix Custom alarm rules are overwritten by \u0026lsquo;resource/alarm-settings.yml\u0026rsquo; Support Kafka Monitoring. Support Pulsar server and BookKeeper server Monitoring. [Breaking Change] Elasticsearch storage merge all management data indices into one index management, including ui_template,ui_menu,continuous_profiling_policy. Add a release mechanism for alarm windows when it is expired in case of OOM. Fix Zipkin trace receiver response: make the HTTP status code from 200 to 202. Update BanyanDB Java Client to 0.5.0. Fix getInstances query in the BanyanDB Metadata DAO. BanyanDBStorageClient: Add keepAliveProperty API. Fix table exists check in the JDBC Storage Plugin. Enhance extensibility of HTTP Server library. Adjust AlarmRecord alarmMessage column length to 512. Fix EventHookCallback build event: build the layer from Service's Layer. Fix AlarmCore doAlarm: catch exception for each callback to avoid interruption. Optimize queryBasicTraces in TraceQueryEsDAO. Fix WebhookCallback send incorrect messages, add catch exception for each callback HTTP Post. Fix AlarmRule expression validation: add labeled metrics mock data for check. Support collect ZGC memory pool metrics. Add a component ID for Netty-http (ID=151). Add a component ID for Fiber (ID=5021). BanyanDBStorageClient: Add define(Property property, PropertyStore.Strategy strategy) API. Correct the file format and fix typos in the filenames for monitoring Kafka\u0026rsquo;s e2e tests. Support extract timestamp from patterned datetime string in LAL. Support output key parameters in the booting logs. Fix cannot query zipkin traces with annotationQuery parameter in the JDBC related storage. Fix limit doesn\u0026rsquo;t work for findEndpoint API in ES storage. Isolate MAL CounterWindow cache by metric name. Fix JDBC Log query order. Change the DataCarrier IF_POSSIBLE strategy to use ArrayBlockingQueue implementation. Change the policy of the queue(DataCarrier) in the L1 metric aggregate worker to IF_POSSIBLE mode. Add self-observability metric metrics_aggregator_abandon to count the number of abandon metrics. Support Nginx monitoring. Fix BanyanDB Metadata Query: make query single instance/process return full tags to avoid NPE. Repleace go2sky E2E to GO agent. Replace Metrics v2 protocol with MQE in UI templates and E2E Test. Fix incorrect apisix metrics otel rules. Support Scratch The OAP Config Dump. Support increase/rate function in the MQE query language. Group service endpoints into _abandoned when endpoints have high cardinality.  UI  Add new menu for kafka monitoring. Fix independent widget duration. Fix the display height of the link tree structure. Replace the name by shortName on service widget. Refactor: update pagination style. No visualization style change. Apply MQE on K8s layer UI-templates. Fix icons display in trace tree diagram. Fix: update tooltip style to support multiple metrics scrolling view in a metrics graph. Add a new widget to show jvm memory pool detail. Fix: avoid querying data with empty parameters. Add a title and a description for trace segments. Add Netty icon for Netty HTTP plugin. Add Pulsar menu i18n files. Refactor Logs view. Implement the Dark Theme. Change UI templates for Text widgets. Add Nginx menu i18n. Fix the height for trace widget. Polish list style. Fix Log associate with Trace. Enhance layout for broken Topology widget. Fix calls metric with call type for Topology widget. Fix changing metrics config for Topology widget. Fix routes for Tab widget. Remove OpenFunction(FAAS layer) relative UI templates and menu item. Fix: change colors to match dark theme for Network Profiling. Remove the description of OpenFunction in the UI i18n. Reduce component chunks to improve page loading resource time.  Documentation  Separate storage docs to different files, and add an estimated timeline for BanyanDB(end of 2023). Add topology configuration in UI-Grafana doc. Add missing metrics to the OpenTelemetry Metrics doc. Polish docs of Concepts and Designs. Fix incorrect notes of slowCacheReadThreshold. Update OAP setup and cluster coordinator docs to explain new booting parameters table in the logs, and how to setup cluster mode.  All issues and pull requests are here\n","excerpt":"9.7.0 Project  Bump Java agent to 9.1-dev in the e2e tests. Bump up netty to 4.1.100. Update Groovy …","ref":"/docs/main/v9.7.0/en/changes/changes/","title":"9.7.0"},{"body":"Academy Academy is an article/video list recommended by the committer team.\n  STAM Paper about the fundamental theory of SkyWalking tracing models.\n  Blog about Scaling SkyWalking server automatically in kubernetes.\n  Blog about Use Profiling to Fix the Blind Spot of Distributed Tracing.\n  Blog about observing Istio + Envoy service mesh with ALS solution.\n  Blog about observing Istio + Envoy service mesh with ALS Metadata-Exchange mechanism (in VMs and / or Kubernetes).\n  ","excerpt":"Academy Academy is an article/video list recommended by the committer team.\n  STAM Paper about the …","ref":"/docs/main/v9.0.0/en/academy/list/","title":"Academy"},{"body":"Academy Academy is an article/video list recommended by the committer team.\n  STAM Paper about the fundamental theory of SkyWalking tracing models.\n  Blog about Scaling SkyWalking server automatically in kubernetes.\n  Blog about Use Profiling to Fix the Blind Spot of Distributed Tracing.\n  Blog about observing Istio + Envoy service mesh with ALS solution.\n  Blog about observing Istio + Envoy service mesh with ALS Metadata-Exchange mechanism (in VMs and / or Kubernetes).\n  ","excerpt":"Academy Academy is an article/video list recommended by the committer team.\n  STAM Paper about the …","ref":"/docs/main/v9.1.0/en/academy/list/","title":"Academy"},{"body":"Academy Academy is an article/video list recommended by the committer team.\n  STAM Paper about the fundamental theory of SkyWalking tracing models.\n  Blog about Scaling SkyWalking server automatically in kubernetes.\n  Blog about Use Profiling to Fix the Blind Spot of Distributed Tracing.\n  Blog about observing Istio + Envoy service mesh with ALS solution.\n  Blog about observing Istio + Envoy service mesh with ALS Metadata-Exchange mechanism (in VMs and / or Kubernetes).\n  Blog about using eBPF Profiling to pinpoint service mesh critical performance Impact.\n  ","excerpt":"Academy Academy is an article/video list recommended by the committer team.\n  STAM Paper about the …","ref":"/docs/main/v9.2.0/en/academy/list/","title":"Academy"},{"body":"ActiveMQ classic monitoring SkyWalking leverages jmx prometheus exporter for collecting metrics data from ActiveMQ classic. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  ActiveMQ classic has extensive support for JMX to allow you to monitor and control the behavior of the broker via the JMX MBeans. The jmx prometheus exporter collects metrics data from ActiveMQ classic, this exporter is intended to be run as a Java Agent, exposing a HTTP server and serving metrics of the local JVM. OpenTelemetry Collector fetches metrics from jmx prometheus exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Enable JMX in activemq.xml, the JMX remote port defaults to 1616, you can change it through ACTIVEMQ_SUNJMX_START. The example for ActiveMQ configuration, refer to here. Set up jmx prometheus exporter which runs as a Java Agent(recommended) of ActiveMQ classic. If you work with docker, you also can set up a single server for exporter, refer to here(note the configuration of includeObjectNames). Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  ActiveMQ classic Monitoring ActiveMQ classic monitoring provides multidimensional metrics monitoring of ActiveMQ Exporter as Layer: ActiveMQ Service in the OAP. In each cluster, the broker is represented as Instance and the destination is represented as Endpoint.\nActiveMQ Cluster Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     System Load Average Count meter_activemq_cluster_system_load_average The average system load, range:[0, 10000]. JMX Prometheus Exporter   Thread Count Count meter_activemq_cluster_thread_count Threads currently used by the JVM. JMX Prometheus Exporter   Init Heap Memory Usage Bytes meter_activemq_cluster_heap_memory_usage_init The initial amount of heap memory available. JMX Prometheus Exporter   Committed Heap Memory Usage Bytes meter_activemq_cluster_heap_memory_usage_committed The memory is guaranteed to be available for the JVM to use. JMX Prometheus Exporter   Used Heap Memory Usage Bytes meter_activemq_cluster_heap_memory_usage_used The amount of JVM heap memory currently in use. JMX Prometheus Exporter   Max Heap Memory Usage Bytes meter_activemq_cluster_heap_memory_usage_max The maximum possible size of the heap memory. JMX Prometheus Exporter   GC G1 Old Collection Count Count meter_activemq_cluster_gc_g1_old_collection_count The gc count of G1 Old Generation(JDK[9,17]). JMX Prometheus Exporter   GC G1 Young Collection Count Count meter_activemq_cluster_gc_g1_young_collection_count The gc count of G1 Young Generation(JDK[9,17]). JMX Prometheus Exporter   GC G1 Old Collection Time ms meter_activemq_cluster_gc_g1_old_collection_time The gc time spent in G1 Old Generation in milliseconds(JDK[9,17]). JMX Prometheus Exporter   GC G1 Young Collection Time ms meter_activemq_cluster_gc_g1_young_collection_time The gc time spent in G1 Young Generation in milliseconds(JDK[9,17]). JMX Prometheus Exporter   GC Parallel Old Collection Count Count meter_activemq_cluster_gc_parallel_old_collection_count The gc count of Parallel Old Generation(JDK[6,8]). JMX Prometheus Exporter   GC Parallel Young Collection Count Count meter_activemq_cluster_gc_parallel_young_collection_count The gc count of Parallel Young Generation(JDK[6,8]). JMX Prometheus Exporter   GC Parallel Old Collection Time ms meter_activemq_cluster_gc_parallel_old_collection_time The gc time spent in Parallel Old Generation in milliseconds(JDK[6,8]). JMX Prometheus Exporter   GC Parallel Young Collection Time ms meter_activemq_cluster_gc_parallel_young_collection_time The gc time spent in Parallel Young Generation in milliseconds(JDK[6,8]). JMX Prometheus Exporter   Enqueue Rate Count/s meter_activemq_cluster_enqueue_rate Number of messages that have been sent to the cluster per second(JDK[6,8]). JMX Prometheus Exporter   Dequeue Rate Count/s meter_activemq_cluster_dequeue_rate Number of messages that have been acknowledged or discarded on the cluster per second. JMX Prometheus Exporter   Dispatch Rate Count/s meter_activemq_cluster_dispatch_rate Number of messages that has been delivered to consumers per second. JMX Prometheus Exporter   Expired Rate Count/s meter_activemq_cluster_expired_rate Number of messages that have been expired per second. JMX Prometheus Exporter   Average Enqueue Time ms meter_activemq_cluster_average_enqueue_time The average time a message was held on this cluster. JMX Prometheus Exporter   Max Enqueue Time ms meter_activemq_cluster_max_enqueue_time The max time a message was held on this cluster. JMX Prometheus Exporter    ActiveMQ Broker Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Uptime sec meter_activemq_broker_uptime Uptime of the broker in day. JMX Prometheus Exporter   State  meter_activemq_broker_state If slave broker 1 else 0. JMX Prometheus Exporter   Current Connections Count meter_activemq_broker_current_connections The number of clients connected to the broker currently. JMX Prometheus Exporter   Current Producer Count Count meter_activemq_broker_current_producer_count The number of producers currently attached to the broker. JMX Prometheus Exporter   Current Consumer Count Count meter_activemq_broker_current_consumer_count The number of consumers consuming messages from the broker. JMX Prometheus Exporter   Producer Count Count meter_activemq_broker_producer_count Number of message producers active on destinations. JMX Prometheus Exporter   Consumer Count Count meter_activemq_broker_consumer_count Number of message consumers subscribed to destinations. JMX Prometheus Exporter   Enqueue Count Count meter_activemq_broker_enqueue_count The total number of messages sent to the broker. JMX Prometheus Exporter   Dequeue Count Count meter_activemq_broker_dequeue_count The total number of messages the broker has delivered to consumers. JMX Prometheus Exporter   Enqueue Rate Count/sec meter_activemq_broker_enqueue_rate The total number of messages sent to the broker per second. JMX Prometheus Exporter   Dequeue Rate Count/sec meter_activemq_broker_dequeue_rate The total number of messages the broker has delivered to consumers per second. JMX Prometheus Exporter   Memory Percent Usage % meter_activemq_broker_memory_percent_usage Percentage of configured memory used by the broker. JMX Prometheus Exporter   Memory Usage Bytes meter_activemq_broker_memory_percent_usage Memory used by undelivered messages in bytes. JMX Prometheus Exporter   Memory Limit Bytes meter_activemq_broker_memory_limit Memory limited used for holding undelivered messages before paging to temporary storage. JMX Prometheus Exporter   Store Percent Usage % meter_activemq_broker_store_percent_usage Percentage of available disk space used for persistent message storage. JMX Prometheus Exporter   Store Limit Bytes meter_activemq_broker_store_limit Disk limited used for persistent messages before producers are blocked. JMX Prometheus Exporter   Temp Percent Usage Bytes meter_activemq_broker_temp_percent_usage Percentage of available disk space used for non-persistent message storage. JMX Prometheus Exporter   Temp Limit Bytes meter_activemq_broker_temp_limit Disk limited used for non-persistent messages and temporary data before producers are blocked. JMX Prometheus Exporter   Average Message Size Bytes meter_activemq_broker_average_message_size Average message size on this broker. JMX Prometheus Exporter   Max Message Size Bytes meter_activemq_broker_max_message_size Max message size on this broker. JMX Prometheus Exporter   Queue Size Count meter_activemq_broker_queue_size Number of messages on this broker that have been dispatched but not acknowledged. JMX Prometheus Exporter    ActiveMQ Destination Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Producer Count Count meter_activemq_destination_producer_count Number of producers attached to this destination. JMX Prometheus Exporter   Consumer Count Count meter_activemq_destination_consumer_count Number of consumers subscribed to this destination. JMX Prometheus Exporter   Topic Consumer Count Count meter_activemq_destination_topic_consumer_count Number of consumers subscribed to the topics. JMX Prometheus Exporter   Queue Size Count meter_activemq_destination_queue_size The number of messages that have not been acknowledged by a consumer. JMX Prometheus Exporter   Memory Usage Bytes meter_activemq_destination_memory_usage Memory used by undelivered messages in bytes. JMX Prometheus Exporter   Memory Percent Usage % meter_activemq_destination_memory_percent_usage Percentage of configured memory used by the destination. JMX Prometheus Exporter   Enqueue Count Count meter_activemq_destination_enqueue_count The number of messages sent to the destination. JMX Prometheus Exporter   Dequeue Count Count meter_activemq_destination_dequeue_count The number of messages the destination has delivered to consumers. JMX Prometheus Exporter   Average Enqueue Time ms meter_activemq_destination_average_enqueue_time The average time a message was held on this destination. JMX Prometheus Exporter   Max Enqueue Time ms meter_activemq_destination_max_enqueue_time The max time a message was held on this destination. JMX Prometheus Exporter   Dispatch Count Count meter_activemq_destination_dispatch_count Number of messages that has been delivered to consumers. JMX Prometheus Exporter   Expired Count Count meter_activemq_destination_expired_count Number of messages that have been expired. JMX Prometheus Exporter   Inflight Count Count meter_activemq_destination_inflight_count Number of messages that have been dispatched to but not acknowledged by consumers. JMX Prometheus Exporter   Average Message Size Bytes meter_activemq_destination_average_message_size Average message size on this destination. JMX Prometheus Exporter   Max Message Size Bytes meter_activemq_destination_max_message_size Max message size on this destination. JMX Prometheus Exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in otel-rules/activemq/activemq-cluster.yaml, otel-rules/activemq/activemq-broker.yaml, otel-rules/activemq/activemq-destination.yaml. The ActiveMQ dashboard panel configurations are found in ui-initialized-templates/activemq.\n","excerpt":"ActiveMQ classic monitoring SkyWalking leverages jmx prometheus exporter for collecting metrics data …","ref":"/docs/main/next/en/setup/backend/backend-activemq-monitoring/","title":"ActiveMQ classic monitoring"},{"body":"Advanced deployment OAP servers communicate with each other in a cluster environment to do distributed aggregation. In the cluster mode, all OAP nodes are running in Mixed mode by default.\nThe available roles for OAP are,\n Mixed(default) Receiver Aggregator  Sometimes users may wish to deploy cluster nodes with a clearly defined role. They could then use this function.\nMixed By default, the OAP is responsible for:\n Receiving agent traces or metrics. L1 aggregation Internal communication (sending/receiving) L2 aggregation Persistence Alarm  Receiver The OAP is responsible for:\n Receiving agent traces or metrics. L1 aggregation Internal communication (sending)  Aggregator The OAP is responsible for:\n Internal communication(receiving from Receiver and Mixed roles OAP) L2 aggregation Persistence Alarm   These roles are designed for complex deployment requirements on security and network policy.\nKubernetes If you are using our native Kubernetes coordinator, and you insist to install OAP nodes with a clearly defined role. There should be two deployments for each role, one for receiver OAPs and the other for aggregator OAPs to separate different system environment settings. Then, the labelSelector should be set for Aggregator role selection rules to choose the right OAP deployment based on your needs.\n","excerpt":"Advanced deployment OAP servers communicate with each other in a cluster environment to do …","ref":"/docs/main/latest/en/setup/backend/advanced-deployment/","title":"Advanced deployment"},{"body":"Advanced deployment OAP servers communicate with each other in a cluster environment to do distributed aggregation. In the cluster mode, all OAP nodes are running in Mixed mode by default.\nThe available roles for OAP are,\n Mixed(default) Receiver Aggregator  Sometimes users may wish to deploy cluster nodes with a clearly defined role. They could then use this function.\nMixed By default, the OAP is responsible for:\n Receiving agent traces or metrics. L1 aggregation Internal communication (sending/receiving) L2 aggregation Persistence Alarm  Receiver The OAP is responsible for:\n Receiving agent traces or metrics. L1 aggregation Internal communication (sending)  Aggregator The OAP is responsible for:\n Internal communication(receiving from Receiver and Mixed roles OAP) L2 aggregation Persistence Alarm   These roles are designed for complex deployment requirements on security and network policy.\nKubernetes If you are using our native Kubernetes coordinator, and you insist to install OAP nodes with a clearly defined role. There should be two deployments for each role, one for receiver OAPs and the other for aggregator OAPs to separate different system environment settings. Then, the labelSelector should be set for Aggregator role selection rules to choose the right OAP deployment based on your needs.\n","excerpt":"Advanced deployment OAP servers communicate with each other in a cluster environment to do …","ref":"/docs/main/next/en/setup/backend/advanced-deployment/","title":"Advanced deployment"},{"body":"Advanced deployment OAP servers communicate with each other in a cluster environment. In the cluster mode, you could run in different roles.\n Mixed(default) Receiver Aggregator  Sometimes users may wish to deploy cluster nodes with a clearly defined role. They could then use this function.\nMixed By default, the OAP is responsible for:\n Receiving agent traces or metrics. L1 aggregation Internal communication (sending/receiving) L2 aggregation Persistence Alarm  Receiver The OAP is responsible for:\n Receiving agent traces or metrics. L1 aggregation Internal communication (sending)  Aggregator The OAP is responsible for:\n Internal communication(receive) L2 aggregation Persistence Alarm   These roles are designed for complex deployment requirements on security and network policy.\nKubernetes If you are using our native Kubernetes coordinator, the labelSelector setting is used for Aggregator role selection rules. Choose the right OAP deployment based on your needs.\n","excerpt":"Advanced deployment OAP servers communicate with each other in a cluster environment. In the cluster …","ref":"/docs/main/v9.0.0/en/setup/backend/advanced-deployment/","title":"Advanced deployment"},{"body":"Advanced deployment OAP servers communicate with each other in a cluster environment. In the cluster mode, you could run in different roles.\n Mixed(default) Receiver Aggregator  Sometimes users may wish to deploy cluster nodes with a clearly defined role. They could then use this function.\nMixed By default, the OAP is responsible for:\n Receiving agent traces or metrics. L1 aggregation Internal communication (sending/receiving) L2 aggregation Persistence Alarm  Receiver The OAP is responsible for:\n Receiving agent traces or metrics. L1 aggregation Internal communication (sending)  Aggregator The OAP is responsible for:\n Internal communication(receive) L2 aggregation Persistence Alarm   These roles are designed for complex deployment requirements on security and network policy.\nKubernetes If you are using our native Kubernetes coordinator, the labelSelector setting is used for Aggregator role selection rules. Choose the right OAP deployment based on your needs.\n","excerpt":"Advanced deployment OAP servers communicate with each other in a cluster environment. In the cluster …","ref":"/docs/main/v9.1.0/en/setup/backend/advanced-deployment/","title":"Advanced deployment"},{"body":"Advanced deployment OAP servers communicate with each other in a cluster environment. In the cluster mode, you could run in different roles.\n Mixed(default) Receiver Aggregator  Sometimes users may wish to deploy cluster nodes with a clearly defined role. They could then use this function.\nMixed By default, the OAP is responsible for:\n Receiving agent traces or metrics. L1 aggregation Internal communication (sending/receiving) L2 aggregation Persistence Alarm  Receiver The OAP is responsible for:\n Receiving agent traces or metrics. L1 aggregation Internal communication (sending)  Aggregator The OAP is responsible for:\n Internal communication(receive) L2 aggregation Persistence Alarm   These roles are designed for complex deployment requirements on security and network policy.\nKubernetes If you are using our native Kubernetes coordinator, the labelSelector setting is used for Aggregator role selection rules. Choose the right OAP deployment based on your needs.\n","excerpt":"Advanced deployment OAP servers communicate with each other in a cluster environment. In the cluster …","ref":"/docs/main/v9.2.0/en/setup/backend/advanced-deployment/","title":"Advanced deployment"},{"body":"Advanced deployment OAP servers communicate with each other in a cluster environment. In the cluster mode, you could run in different roles.\n Mixed(default) Receiver Aggregator  Sometimes users may wish to deploy cluster nodes with a clearly defined role. They could then use this function.\nMixed By default, the OAP is responsible for:\n Receiving agent traces or metrics. L1 aggregation Internal communication (sending/receiving) L2 aggregation Persistence Alarm  Receiver The OAP is responsible for:\n Receiving agent traces or metrics. L1 aggregation Internal communication (sending)  Aggregator The OAP is responsible for:\n Internal communication(receive) L2 aggregation Persistence Alarm   These roles are designed for complex deployment requirements on security and network policy.\nKubernetes If you are using our native Kubernetes coordinator, the labelSelector setting is used for Aggregator role selection rules. Choose the right OAP deployment based on your needs.\n","excerpt":"Advanced deployment OAP servers communicate with each other in a cluster environment. In the cluster …","ref":"/docs/main/v9.3.0/en/setup/backend/advanced-deployment/","title":"Advanced deployment"},{"body":"Advanced deployment OAP servers communicate with each other in a cluster environment. In the cluster mode, you could run in different roles.\n Mixed(default) Receiver Aggregator  Sometimes users may wish to deploy cluster nodes with a clearly defined role. They could then use this function.\nMixed By default, the OAP is responsible for:\n Receiving agent traces or metrics. L1 aggregation Internal communication (sending/receiving) L2 aggregation Persistence Alarm  Receiver The OAP is responsible for:\n Receiving agent traces or metrics. L1 aggregation Internal communication (sending)  Aggregator The OAP is responsible for:\n Internal communication(receive) L2 aggregation Persistence Alarm   These roles are designed for complex deployment requirements on security and network policy.\nKubernetes If you are using our native Kubernetes coordinator, the labelSelector setting is used for Aggregator role selection rules. Choose the right OAP deployment based on your needs.\n","excerpt":"Advanced deployment OAP servers communicate with each other in a cluster environment. In the cluster …","ref":"/docs/main/v9.4.0/en/setup/backend/advanced-deployment/","title":"Advanced deployment"},{"body":"Advanced deployment OAP servers communicate with each other in a cluster environment to do distributed aggregation. In the cluster mode, all OAP nodes are running in Mixed mode by default.\nThe available roles for OAP are,\n Mixed(default) Receiver Aggregator  Sometimes users may wish to deploy cluster nodes with a clearly defined role. They could then use this function.\nMixed By default, the OAP is responsible for:\n Receiving agent traces or metrics. L1 aggregation Internal communication (sending/receiving) L2 aggregation Persistence Alarm  Receiver The OAP is responsible for:\n Receiving agent traces or metrics. L1 aggregation Internal communication (sending)  Aggregator The OAP is responsible for:\n Internal communication(receiving from Receiver and Mixed roles OAP) L2 aggregation Persistence Alarm   These roles are designed for complex deployment requirements on security and network policy.\nKubernetes If you are using our native Kubernetes coordinator, and you insist to install OAP nodes with a clearly defined role. There should be two deployments for each role, one for receiver OAPs and the other for aggregator OAPs to separate different system environment settings. Then, the labelSelector should be set for Aggregator role selection rules to choose the right OAP deployment based on your needs.\n","excerpt":"Advanced deployment OAP servers communicate with each other in a cluster environment to do …","ref":"/docs/main/v9.5.0/en/setup/backend/advanced-deployment/","title":"Advanced deployment"},{"body":"Advanced deployment OAP servers communicate with each other in a cluster environment to do distributed aggregation. In the cluster mode, all OAP nodes are running in Mixed mode by default.\nThe available roles for OAP are,\n Mixed(default) Receiver Aggregator  Sometimes users may wish to deploy cluster nodes with a clearly defined role. They could then use this function.\nMixed By default, the OAP is responsible for:\n Receiving agent traces or metrics. L1 aggregation Internal communication (sending/receiving) L2 aggregation Persistence Alarm  Receiver The OAP is responsible for:\n Receiving agent traces or metrics. L1 aggregation Internal communication (sending)  Aggregator The OAP is responsible for:\n Internal communication(receiving from Receiver and Mixed roles OAP) L2 aggregation Persistence Alarm   These roles are designed for complex deployment requirements on security and network policy.\nKubernetes If you are using our native Kubernetes coordinator, and you insist to install OAP nodes with a clearly defined role. There should be two deployments for each role, one for receiver OAPs and the other for aggregator OAPs to separate different system environment settings. Then, the labelSelector should be set for Aggregator role selection rules to choose the right OAP deployment based on your needs.\n","excerpt":"Advanced deployment OAP servers communicate with each other in a cluster environment to do …","ref":"/docs/main/v9.6.0/en/setup/backend/advanced-deployment/","title":"Advanced deployment"},{"body":"Advanced deployment OAP servers communicate with each other in a cluster environment to do distributed aggregation. In the cluster mode, all OAP nodes are running in Mixed mode by default.\nThe available roles for OAP are,\n Mixed(default) Receiver Aggregator  Sometimes users may wish to deploy cluster nodes with a clearly defined role. They could then use this function.\nMixed By default, the OAP is responsible for:\n Receiving agent traces or metrics. L1 aggregation Internal communication (sending/receiving) L2 aggregation Persistence Alarm  Receiver The OAP is responsible for:\n Receiving agent traces or metrics. L1 aggregation Internal communication (sending)  Aggregator The OAP is responsible for:\n Internal communication(receiving from Receiver and Mixed roles OAP) L2 aggregation Persistence Alarm   These roles are designed for complex deployment requirements on security and network policy.\nKubernetes If you are using our native Kubernetes coordinator, and you insist to install OAP nodes with a clearly defined role. There should be two deployments for each role, one for receiver OAPs and the other for aggregator OAPs to separate different system environment settings. Then, the labelSelector should be set for Aggregator role selection rules to choose the right OAP deployment based on your needs.\n","excerpt":"Advanced deployment OAP servers communicate with each other in a cluster environment to do …","ref":"/docs/main/v9.7.0/en/setup/backend/advanced-deployment/","title":"Advanced deployment"},{"body":"Advanced Features  Set the settings through system properties for config file override. Read setting override. Use gRPC TLS to link backend. See open TLS Set client token if backend open the token authentication. Application Toolkit, are a collection of libraries, provided by SkyWalking APM. Using them, you have a bridge between your application and SkyWalking APM agent.  If you want your codes to interact with SkyWalking agent, including getting trace id, setting tags, propagating custom data etc.. Try SkyWalking manual APIs. If you require customized metrics, try SkyWalking Meter System Toolkit. If you want to continue traces across thread manually, use across thread solution APIs. If you want to forward Micrometer metrics / observations, use SkyWalking Micrometer Register. If you want to use OpenTracing Java APIs, try SkyWalking OpenTracing compatible tracer. More details you could find at http://opentracing.io If you want to tolerate some exceptions, read tolerate custom exception doc. If you want to print trace context(e.g. traceId) in your logs, or collect logs, choose the log frameworks, log4j, log4j2, logback.   If you want to specify the path of your agent.config file. Read set config file through system properties  ","excerpt":"Advanced Features  Set the settings through system properties for config file override. Read setting …","ref":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/advanced-features/","title":"Advanced Features"},{"body":"Advanced Features  Set the settings through system properties for config file override. Read setting override. Use gRPC TLS to link backend. See open TLS Set client token if backend open the token authentication. Application Toolkit, are a collection of libraries, provided by SkyWalking APM. Using them, you have a bridge between your application and SkyWalking APM agent.  If you want your codes to interact with SkyWalking agent, including getting trace id, setting tags, propagating custom data etc.. Try SkyWalking manual APIs. If you require customized metrics, try SkyWalking Meter System Toolkit. If you want to continue traces across thread manually, use across thread solution APIs. If you want to forward Micrometer metrics / observations, use SkyWalking Micrometer Register. If you want to use OpenTracing Java APIs, try SkyWalking OpenTracing compatible tracer. More details you could find at http://opentracing.io If you want to tolerate some exceptions, read tolerate custom exception doc. If you want to print trace context(e.g. traceId) in your logs, or collect logs, choose the log frameworks, log4j, log4j2, logback.   If you want to specify the path of your agent.config file. Read set config file through system properties  ","excerpt":"Advanced Features  Set the settings through system properties for config file override. Read setting …","ref":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/advanced-features/","title":"Advanced Features"},{"body":"Advanced Features  Set the settings through system properties for config file override. Read setting override. Use gRPC TLS to link backend. See open TLS Set client token if backend open the token authentication. Application Toolkit, are a collection of libraries, provided by SkyWalking APM. Using them, you have a bridge between your application and SkyWalking APM agent.  If you want your codes to interact with SkyWalking agent, including getting trace id, setting tags, propagating custom data etc.. Try SkyWalking manual APIs. If you require customized metrics, try SkyWalking Meter System Toolkit. If you want to continue traces across thread manually, use across thread solution APIs. If you want to forward Micrometer metrics / observations, use SkyWalking Micrometer Register. If you want to use OpenTracing Java APIs, try SkyWalking OpenTracing compatible tracer. More details you could find at http://opentracing.io If you want to tolerate some exceptions, read tolerate custom exception doc. If you want to print trace context(e.g. traceId) in your logs, or collect logs, choose the log frameworks, log4j, log4j2, logback.   If you want to specify the path of your agent.config file. Read set config file through system properties  ","excerpt":"Advanced Features  Set the settings through system properties for config file override. Read setting …","ref":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/advanced-features/","title":"Advanced Features"},{"body":"Advanced Features  Set the settings through system properties for config file override. Read setting override. Use gRPC TLS to link backend. See open TLS Set client token if backend open the token authentication. Application Toolkit, are a collection of libraries, provided by SkyWalking APM. Using them, you have a bridge between your application and SkyWalking APM agent.  If you want your codes to interact with SkyWalking agent, including getting trace id, setting tags, propagating custom data etc.. Try SkyWalking manual APIs. If you require customized metrics, try SkyWalking Meter System Toolkit. If you want to continue traces across thread manually, use across thread solution APIs. If you want to forward Micrometer metrics / observations, use SkyWalking Micrometer Register. If you want to use OpenTracing Java APIs, try SkyWalking OpenTracing compatible tracer. More details you could find at http://opentracing.io If you want to tolerate some exceptions, read tolerate custom exception doc. If you want to print trace context(e.g. traceId) in your logs, or collect logs, choose the log frameworks, log4j, log4j2, logback.   If you want to specify the path of your agent.config file. Read set config file through system properties  ","excerpt":"Advanced Features  Set the settings through system properties for config file override. Read setting …","ref":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/advanced-features/","title":"Advanced Features"},{"body":"Advanced Features  Set the settings through system properties for config file override. Read setting override. Use gRPC TLS to link backend. See open TLS Set client token if backend open the token authentication. Application Toolkit, are a collection of libraries, provided by SkyWalking APM. Using them, you have a bridge between your application and SkyWalking APM agent.  If you want your codes to interact with SkyWalking agent, including getting trace id, setting tags, propagating custom data etc.. Try SkyWalking manual APIs. If you require customized metrics, try SkyWalking Meter System Toolkit. If you want to continue traces across thread manually, use across thread solution APIs. If you want to forward Micrometer metrics / observations, use SkyWalking Micrometer Register. If you want to use OpenTracing Java APIs, try SkyWalking OpenTracing compatible tracer. More details you could find at http://opentracing.io If you want to tolerate some exceptions, read tolerate custom exception doc. If you want to print trace context(e.g. traceId) in your logs, or collect logs, choose the log frameworks, log4j, log4j2, logback.   If you want to specify the path of your agent.config file. Read set config file through system properties  ","excerpt":"Advanced Features  Set the settings through system properties for config file override. Read setting …","ref":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/advanced-features/","title":"Advanced Features"},{"body":"Advanced Reporters The advanced report provides an alternative way to submit the agent collected data to the backend. All of them are in the optional-reporter-plugins folder, move the one you needed into the reporter-plugins folder for the activation. Notice, don\u0026rsquo;t try to activate multiple reporters, that could cause unexpected fatal errors.\nKafka Reporter The Kafka reporter plugin support report traces, JVM metrics, Instance Properties, and profiled snapshots to Kafka cluster, which is disabled in default. Move the jar of the plugin, kafka-reporter-plugin-x.y.z.jar, from agent/optional-reporter-plugins to agent/plugins for activating.\nIf you configure to use compression.type such as lz4, zstd, snappy, etc., you also need to move the jar of the plugin, lz4-java-x.y.z.jar or zstd-jni-x.y.z.jar or snappy-java.x.y.z.jar, from agent/optional-reporter-plugins to agent/plugins.\nNotice, currently, the agent still needs to configure GRPC receiver for delivering the task of profiling. In other words, the following configure cannot be omitted.\n# Backend service addresses. collector.backend_service=${SW_AGENT_COLLECTOR_BACKEND_SERVICES:127.0.0.1:11800} # Kafka producer configuration plugin.kafka.bootstrap_servers=${SW_KAFKA_BOOTSTRAP_SERVERS:localhost:9092} plugin.kafka.get_topic_timeout=${SW_GET_TOPIC_TIMEOUT:10} Before you activated the Kafka reporter, you have to make sure that Kafka fetcher of OAP server has been opened in service.\nAdvanced Kafka Producer Configurations Kafka reporter plugin support to customize all configurations of listed in here. For example:\nplugin.kafka.producer_config[delivery.timeout.ms]=12000 Since SkyWalking 8.8.0, support to configure advanced Producer configurations in JSON format, like this:\nplugin.kafka.producer_config_json={\u0026quot;delivery.timeout.ms\u0026quot;: 12000, \u0026quot;compression.type\u0026quot;: \u0026quot;snappy\u0026quot;} Currently, there are 2 ways to configure advanced configurations below. Notice that, the new way, configured in JSON format, will be overridden by plugin.kafka.producer_config[key]=value when they have the duplication keys.\nSince 8.16.0, users could implement their decoder for kafka configurations rather than using plain configurations(such as password) of Kafka producer, Including plugin.kafka.producer_config_json,plugin.kafka.producer_config or environment variable SW_PLUGIN_KAFKA_PRODUCER_CONFIG_JSON.\nBy doing that, add the kafka-config-extension dependency to your decoder project and implement decode interface.\n Add the KafkaConfigExtension dependency to your project.  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;kafka-config-extension\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;scope\u0026gt;provided\u0026lt;/scope\u0026gt; \u0026lt;/dependency\u0026gt;  Implement your custom decode method.Like this:  package org.apache.skywalking.apm.agent.sample; import org.apache.skywalking.apm.agent.core.kafka.KafkaConfigExtension; import java.util.Map; /** * Custom decode class */ public class DecodeUtil implements KafkaConfigExtension { /** * Custom decode method. * @param config the value of `plugin.kafka.producer_config` or `plugin.kafka.producer_config_json` in `agent.config`. * @return the decoded configuration if you implement your custom decode logic. */ public Map\u0026lt;String, String\u0026gt; decode(Map\u0026lt;String, String\u0026gt; config) { /** * implement your custom decode logic * */ return config; } } Then, package your decoder project as a jar and move to agent/plugins.\nNotice, the jar package should contain all the dependencies required for your custom decode code.\nThe last step is to activate the decoder class in agent.config like this:\nplugin.kafka.decrypt_class=\u0026quot;org.apache.skywalking.apm.agent.sample.DecodeUtil\u0026quot; or configure by environment variable\nSW_KAFKA_DECRYPT_CLASS=\u0026quot;org.apache.skywalking.apm.agent.sample.DecodeUtil\u0026quot; 3rd party reporters There are other reporter implementations from out of the Apache Software Foundation.\nPulsar Reporter Go to Pulsar-reporter-plugin for more details.\nRocketMQ Reporter Go to RocketMQ-reporter-plugin for more details.\n","excerpt":"Advanced Reporters The advanced report provides an alternative way to submit the agent collected …","ref":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/advanced-reporters/","title":"Advanced Reporters"},{"body":"Advanced Reporters The advanced report provides an alternative way to submit the agent collected data to the backend. All of them are in the optional-reporter-plugins folder, move the one you needed into the plugins folder for the activation. Notice, don\u0026rsquo;t try to activate multiple reporters, that could cause unexpected fatal errors.\nKafka Reporter The Kafka reporter plugin support report traces, JVM metrics, Instance Properties, and profiled snapshots to Kafka cluster, which is disabled in default. Move the jar of the plugin, kafka-reporter-plugin-x.y.z.jar, from agent/optional-reporter-plugins to agent/plugins for activating.\nIf you configure to use compression.type such as lz4, zstd, snappy, etc., you also need to move the jar of the plugin, lz4-java-x.y.z.jar or zstd-jni-x.y.z.jar or snappy-java.x.y.z.jar, from agent/optional-reporter-plugins to agent/plugins.\nNotice, currently, the agent still needs to configure GRPC receiver for delivering the task of profiling. In other words, the following configure cannot be omitted.\n# Backend service addresses. collector.backend_service=${SW_AGENT_COLLECTOR_BACKEND_SERVICES:127.0.0.1:11800} # Kafka producer configuration plugin.kafka.bootstrap_servers=${SW_KAFKA_BOOTSTRAP_SERVERS:localhost:9092} plugin.kafka.get_topic_timeout=${SW_GET_TOPIC_TIMEOUT:10} Before you activated the Kafka reporter, you have to make sure that Kafka fetcher of OAP server has been opened in service.\nAdvanced Kafka Producer Configurations Kafka reporter plugin support to customize all configurations of listed in here. For example:\nplugin.kafka.producer_config[delivery.timeout.ms]=12000 Since SkyWalking 8.8.0, support to configure advanced Producer configurations in JSON format, like this:\nplugin.kafka.producer_config_json={\u0026quot;delivery.timeout.ms\u0026quot;: 12000, \u0026quot;compression.type\u0026quot;: \u0026quot;snappy\u0026quot;} Currently, there are 2 ways to configure advanced configurations below. Notice that, the new way, configured in JSON format, will be overridden by plugin.kafka.producer_config[key]=value when they have the duplication keys.\nSince 8.16.0, users could implement their decoder for kafka configurations rather than using plain configurations(such as password) of Kafka producer, Including plugin.kafka.producer_config_json,plugin.kafka.producer_config or environment variable SW_PLUGIN_KAFKA_PRODUCER_CONFIG_JSON.\nBy doing that, add the kafka-config-extension dependency to your decoder project and implement decode interface.\n Add the KafkaConfigExtension dependency to your project.  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;kafka-config-extension\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;scope\u0026gt;provided\u0026lt;/scope\u0026gt; \u0026lt;/dependency\u0026gt;  Implement your custom decode method.Like this:  package org.apache.skywalking.apm.agent.sample; import org.apache.skywalking.apm.agent.core.kafka.KafkaConfigExtension; import java.util.Map; /** * Custom decode class */ public class DecodeUtil implements KafkaConfigExtension { /** * Custom decode method. * @param config the value of `plugin.kafka.producer_config` or `plugin.kafka.producer_config_json` in `agent.config`. * @return the decoded configuration if you implement your custom decode logic. */ public Map\u0026lt;String, String\u0026gt; decode(Map\u0026lt;String, String\u0026gt; config) { /** * implement your custom decode logic * */ return config; } } Then, package your decoder project as a jar and move to agent/plugins.\nNotice, the jar package should contain all the dependencies required for your custom decode code.\nThe last step is to activate the decoder class in agent.config like this:\nplugin.kafka.decrypt_class=\u0026quot;org.apache.skywalking.apm.agent.sample.DecodeUtil\u0026quot; or configure by environment variable\nSW_KAFKA_DECRYPT_CLASS=\u0026quot;org.apache.skywalking.apm.agent.sample.DecodeUtil\u0026quot; 3rd party reporters There are other reporter implementations from out of the Apache Software Foundation.\nPulsar Reporter Go to Pulsar-reporter-plugin for more details.\nRocketMQ Reporter Go to RocketMQ-reporter-plugin for more details.\n","excerpt":"Advanced Reporters The advanced report provides an alternative way to submit the agent collected …","ref":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/advanced-reporters/","title":"Advanced Reporters"},{"body":"Advanced Reporters The advanced report provides an alternative way to submit the agent collected data to the backend. All of them are in the optional-reporter-plugins folder, move the one you needed into the reporter-plugins folder for the activation. Notice, don\u0026rsquo;t try to activate multiple reporters, that could cause unexpected fatal errors.\nKafka Reporter The Kafka reporter plugin support report traces, JVM metrics, Instance Properties, and profiled snapshots to Kafka cluster, which is disabled in default. Move the jar of the plugin, kafka-reporter-plugin-x.y.z.jar, from agent/optional-reporter-plugins to agent/plugins for activating.\nIf you configure to use compression.type such as lz4, zstd, snappy, etc., you also need to move the jar of the plugin, lz4-java-x.y.z.jar or zstd-jni-x.y.z.jar or snappy-java.x.y.z.jar, from agent/optional-reporter-plugins to agent/plugins.\nNotice, currently, the agent still needs to configure GRPC receiver for delivering the task of profiling. In other words, the following configure cannot be omitted.\n# Backend service addresses. collector.backend_service=${SW_AGENT_COLLECTOR_BACKEND_SERVICES:127.0.0.1:11800} # Kafka producer configuration plugin.kafka.bootstrap_servers=${SW_KAFKA_BOOTSTRAP_SERVERS:localhost:9092} plugin.kafka.get_topic_timeout=${SW_GET_TOPIC_TIMEOUT:10} Before you activated the Kafka reporter, you have to make sure that Kafka fetcher of OAP server has been opened in service.\nAdvanced Kafka Producer Configurations Kafka reporter plugin support to customize all configurations of listed in here. For example:\nplugin.kafka.producer_config[delivery.timeout.ms]=12000 Since SkyWalking 8.8.0, support to configure advanced Producer configurations in JSON format, like this:\nplugin.kafka.producer_config_json={\u0026quot;delivery.timeout.ms\u0026quot;: 12000, \u0026quot;compression.type\u0026quot;: \u0026quot;snappy\u0026quot;} Currently, there are 2 ways to configure advanced configurations below. Notice that, the new way, configured in JSON format, will be overridden by plugin.kafka.producer_config[key]=value when they have the duplication keys.\nSince 8.16.0, users could implement their decoder for kafka configurations rather than using plain configurations(such as password) of Kafka producer, Including plugin.kafka.producer_config_json,plugin.kafka.producer_config or environment variable SW_PLUGIN_KAFKA_PRODUCER_CONFIG_JSON.\nBy doing that, add the kafka-config-extension dependency to your decoder project and implement decode interface.\n Add the KafkaConfigExtension dependency to your project.  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;kafka-config-extension\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;scope\u0026gt;provided\u0026lt;/scope\u0026gt; \u0026lt;/dependency\u0026gt;  Implement your custom decode method.Like this:  package org.apache.skywalking.apm.agent.sample; import org.apache.skywalking.apm.agent.core.kafka.KafkaConfigExtension; import java.util.Map; /** * Custom decode class */ public class DecodeUtil implements KafkaConfigExtension { /** * Custom decode method. * @param config the value of `plugin.kafka.producer_config` or `plugin.kafka.producer_config_json` in `agent.config`. * @return the decoded configuration if you implement your custom decode logic. */ public Map\u0026lt;String, String\u0026gt; decode(Map\u0026lt;String, String\u0026gt; config) { /** * implement your custom decode logic * */ return config; } } Then, package your decoder project as a jar and move to agent/plugins.\nNotice, the jar package should contain all the dependencies required for your custom decode code.\nThe last step is to activate the decoder class in agent.config like this:\nplugin.kafka.decrypt_class=\u0026quot;org.apache.skywalking.apm.agent.sample.DecodeUtil\u0026quot; or configure by environment variable\nSW_KAFKA_DECRYPT_CLASS=\u0026quot;org.apache.skywalking.apm.agent.sample.DecodeUtil\u0026quot; 3rd party reporters There are other reporter implementations from out of the Apache Software Foundation.\nPulsar Reporter Go to Pulsar-reporter-plugin for more details.\nRocketMQ Reporter Go to RocketMQ-reporter-plugin for more details.\n","excerpt":"Advanced Reporters The advanced report provides an alternative way to submit the agent collected …","ref":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/advanced-reporters/","title":"Advanced Reporters"},{"body":"Advanced Reporters The advanced report provides an alternative way to submit the agent collected data to the backend. All of them are in the optional-reporter-plugins folder, move the one you needed into the reporter-plugins folder for the activation. Notice, don\u0026rsquo;t try to activate multiple reporters, that could cause unexpected fatal errors.\nKafka Reporter The Kafka reporter plugin support report traces, JVM metrics, Instance Properties, and profiled snapshots to Kafka cluster, which is disabled in default. Move the jar of the plugin, kafka-reporter-plugin-x.y.z.jar, from agent/optional-reporter-plugins to agent/plugins for activating.\nIf you configure to use compression.type such as lz4, zstd, snappy, etc., you also need to move the jar of the plugin, lz4-java-x.y.z.jar or zstd-jni-x.y.z.jar or snappy-java.x.y.z.jar, from agent/optional-reporter-plugins to agent/plugins.\nNotice, currently, the agent still needs to configure GRPC receiver for delivering the task of profiling. In other words, the following configure cannot be omitted.\n# Backend service addresses. collector.backend_service=${SW_AGENT_COLLECTOR_BACKEND_SERVICES:127.0.0.1:11800} # Kafka producer configuration plugin.kafka.bootstrap_servers=${SW_KAFKA_BOOTSTRAP_SERVERS:localhost:9092} plugin.kafka.get_topic_timeout=${SW_GET_TOPIC_TIMEOUT:10} Before you activated the Kafka reporter, you have to make sure that Kafka fetcher of OAP server has been opened in service.\nAdvanced Kafka Producer Configurations Kafka reporter plugin support to customize all configurations of listed in here. For example:\nplugin.kafka.producer_config[delivery.timeout.ms]=12000 Since SkyWalking 8.8.0, support to configure advanced Producer configurations in JSON format, like this:\nplugin.kafka.producer_config_json={\u0026quot;delivery.timeout.ms\u0026quot;: 12000, \u0026quot;compression.type\u0026quot;: \u0026quot;snappy\u0026quot;} Currently, there are 2 ways to configure advanced configurations below. Notice that, the new way, configured in JSON format, will be overridden by plugin.kafka.producer_config[key]=value when they have the duplication keys.\nSince 8.16.0, users could implement their decoder for kafka configurations rather than using plain configurations(such as password) of Kafka producer, Including plugin.kafka.producer_config_json,plugin.kafka.producer_config or environment variable SW_PLUGIN_KAFKA_PRODUCER_CONFIG_JSON.\nBy doing that, add the kafka-config-extension dependency to your decoder project and implement decode interface.\n Add the KafkaConfigExtension dependency to your project.  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;kafka-config-extension\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;scope\u0026gt;provided\u0026lt;/scope\u0026gt; \u0026lt;/dependency\u0026gt;  Implement your custom decode method.Like this:  package org.apache.skywalking.apm.agent.sample; import org.apache.skywalking.apm.agent.core.kafka.KafkaConfigExtension; import java.util.Map; /** * Custom decode class */ public class DecodeUtil implements KafkaConfigExtension { /** * Custom decode method. * @param config the value of `plugin.kafka.producer_config` or `plugin.kafka.producer_config_json` in `agent.config`. * @return the decoded configuration if you implement your custom decode logic. */ public Map\u0026lt;String, String\u0026gt; decode(Map\u0026lt;String, String\u0026gt; config) { /** * implement your custom decode logic * */ return config; } } Then, package your decoder project as a jar and move to agent/plugins.\nNotice, the jar package should contain all the dependencies required for your custom decode code.\nThe last step is to activate the decoder class in agent.config like this:\nplugin.kafka.decrypt_class=\u0026quot;org.apache.skywalking.apm.agent.sample.DecodeUtil\u0026quot; or configure by environment variable\nSW_KAFKA_DECRYPT_CLASS=\u0026quot;org.apache.skywalking.apm.agent.sample.DecodeUtil\u0026quot; 3rd party reporters There are other reporter implementations from out of the Apache Software Foundation.\nPulsar Reporter Go to Pulsar-reporter-plugin for more details.\nRocketMQ Reporter Go to RocketMQ-reporter-plugin for more details.\n","excerpt":"Advanced Reporters The advanced report provides an alternative way to submit the agent collected …","ref":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/advanced-reporters/","title":"Advanced Reporters"},{"body":"Advanced Reporters The advanced report provides an alternative way to submit the agent collected data to the backend. All of them are in the optional-reporter-plugins folder, move the one you needed into the reporter-plugins folder for the activation. Notice, don\u0026rsquo;t try to activate multiple reporters, that could cause unexpected fatal errors.\nKafka Reporter The Kafka reporter plugin support report traces, JVM metrics, Instance Properties, and profiled snapshots to Kafka cluster, which is disabled in default. Move the jar of the plugin, kafka-reporter-plugin-x.y.z.jar, from agent/optional-reporter-plugins to agent/plugins for activating.\nIf you configure to use compression.type such as lz4, zstd, snappy, etc., you also need to move the jar of the plugin, lz4-java-x.y.z.jar or zstd-jni-x.y.z.jar or snappy-java.x.y.z.jar, from agent/optional-reporter-plugins to agent/plugins.\nNotice, currently, the agent still needs to configure GRPC receiver for delivering the task of profiling. In other words, the following configure cannot be omitted.\n# Backend service addresses. collector.backend_service=${SW_AGENT_COLLECTOR_BACKEND_SERVICES:127.0.0.1:11800} # Kafka producer configuration plugin.kafka.bootstrap_servers=${SW_KAFKA_BOOTSTRAP_SERVERS:localhost:9092} plugin.kafka.get_topic_timeout=${SW_GET_TOPIC_TIMEOUT:10} Before you activated the Kafka reporter, you have to make sure that Kafka fetcher of OAP server has been opened in service.\nAdvanced Kafka Producer Configurations Kafka reporter plugin support to customize all configurations of listed in here. For example:\nplugin.kafka.producer_config[delivery.timeout.ms]=12000 Since SkyWalking 8.8.0, support to configure advanced Producer configurations in JSON format, like this:\nplugin.kafka.producer_config_json={\u0026quot;delivery.timeout.ms\u0026quot;: 12000, \u0026quot;compression.type\u0026quot;: \u0026quot;snappy\u0026quot;} Currently, there are 2 ways to configure advanced configurations below. Notice that, the new way, configured in JSON format, will be overridden by plugin.kafka.producer_config[key]=value when they have the duplication keys.\nSince 8.16.0, users could implement their decoder for kafka configurations rather than using plain configurations(such as password) of Kafka producer, Including plugin.kafka.producer_config_json,plugin.kafka.producer_config or environment variable SW_PLUGIN_KAFKA_PRODUCER_CONFIG_JSON.\nBy doing that, add the kafka-config-extension dependency to your decoder project and implement decode interface.\n Add the KafkaConfigExtension dependency to your project.  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;kafka-config-extension\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;scope\u0026gt;provided\u0026lt;/scope\u0026gt; \u0026lt;/dependency\u0026gt;  Implement your custom decode method.Like this:  package org.apache.skywalking.apm.agent.sample; import org.apache.skywalking.apm.agent.core.kafka.KafkaConfigExtension; import java.util.Map; /** * Custom decode class */ public class DecodeUtil implements KafkaConfigExtension { /** * Custom decode method. * @param config the value of `plugin.kafka.producer_config` or `plugin.kafka.producer_config_json` in `agent.config`. * @return the decoded configuration if you implement your custom decode logic. */ public Map\u0026lt;String, String\u0026gt; decode(Map\u0026lt;String, String\u0026gt; config) { /** * implement your custom decode logic * */ return config; } } Then, package your decoder project as a jar and move to agent/plugins.\nNotice, the jar package should contain all the dependencies required for your custom decode code.\nThe last step is to activate the decoder class in agent.config like this:\nplugin.kafka.decrypt_class=\u0026quot;org.apache.skywalking.apm.agent.sample.DecodeUtil\u0026quot; or configure by environment variable\nSW_KAFKA_DECRYPT_CLASS=\u0026quot;org.apache.skywalking.apm.agent.sample.DecodeUtil\u0026quot; 3rd party reporters There are other reporter implementations from out of the Apache Software Foundation.\nPulsar Reporter Go to Pulsar-reporter-plugin for more details.\nRocketMQ Reporter Go to RocketMQ-reporter-plugin for more details.\n","excerpt":"Advanced Reporters The advanced report provides an alternative way to submit the agent collected …","ref":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/advanced-reporters/","title":"Advanced Reporters"},{"body":"AI Pipeline Warning, this module is still in the ALPHA stage. This is not stable.\nPattern Recognition, Machine Learning(ML) and Artificial Intelligence(AI) are common technology to identify patterns in data. This module provides a way to integrate these technologies in a standardized way about shipping the data from OAP kernel to 3rd party.\nFrom the industry practice, Pattern Recognition, Machine Learning(ML) and Artificial Intelligence(AI) are always overestimated, they are good at many things but have to run in a clear context.\nThe ai-pipeline module is activated by default.\nai-pipeline:selector:${SW_AI_PIPELINE:default}default:uriRecognitionServerAddr:${SW_AI_PIPELINE_URI_RECOGNITION_SERVER_ADDR:}uriRecognitionServerPort:${SW_AI_PIPELINE_URI_RECOGNITION_SERVER_PORT:17128}Supported Scenarios  HTTP Restful URI recognition.  ","excerpt":"AI Pipeline Warning, this module is still in the ALPHA stage. This is not stable.\nPattern …","ref":"/docs/main/latest/en/setup/ai-pipeline/introduction/","title":"AI Pipeline"},{"body":"AI Pipeline Warning, this module is still in the ALPHA stage. This is not stable.\nPattern Recognition, Machine Learning(ML) and Artificial Intelligence(AI) are common technology to identify patterns in data. This module provides a way to integrate these technologies in a standardized way about shipping the data from OAP kernel to 3rd party.\nFrom the industry practice, Pattern Recognition, Machine Learning(ML) and Artificial Intelligence(AI) are always overestimated, they are good at many things but have to run in a clear context.\nThe ai-pipeline module is activated by default.\nai-pipeline:selector:${SW_AI_PIPELINE:default}default:uriRecognitionServerAddr:${SW_AI_PIPELINE_URI_RECOGNITION_SERVER_ADDR:}uriRecognitionServerPort:${SW_AI_PIPELINE_URI_RECOGNITION_SERVER_PORT:17128}Supported Scenarios  HTTP Restful URI recognition.  ","excerpt":"AI Pipeline Warning, this module is still in the ALPHA stage. This is not stable.\nPattern …","ref":"/docs/main/next/en/setup/ai-pipeline/introduction/","title":"AI Pipeline"},{"body":"AI Pipeline Warning, this module is still in the ALPHA stage. This is not stable.\nPattern Recognition, Machine Learning(ML) and Artificial Intelligence(AI) are common technology to identify patterns in data. This module provides a way to integrate these technologies in a standardized way about shipping the data from OAP kernel to 3rd party.\nFrom the industry practice, Pattern Recognition, Machine Learning(ML) and Artificial Intelligence(AI) are always overestimated, they are good at many things but have to run in a clear context.\nThe ai-pipeline module is activated by default.\nai-pipeline:selector:${SW_AI_PIPELINE:default}default:uriRecognitionServerAddr:${SW_AI_PIPELINE_URI_RECOGNITION_SERVER_ADDR:}uriRecognitionServerPort:${SW_AI_PIPELINE_URI_RECOGNITION_SERVER_PORT:17128}Supported Scenarios  HTTP Restful URI recognition.  ","excerpt":"AI Pipeline Warning, this module is still in the ALPHA stage. This is not stable.\nPattern …","ref":"/docs/main/v9.5.0/en/setup/ai-pipeline/introduction/","title":"AI Pipeline"},{"body":"AI Pipeline Warning, this module is still in the ALPHA stage. This is not stable.\nPattern Recognition, Machine Learning(ML) and Artificial Intelligence(AI) are common technology to identify patterns in data. This module provides a way to integrate these technologies in a standardized way about shipping the data from OAP kernel to 3rd party.\nFrom the industry practice, Pattern Recognition, Machine Learning(ML) and Artificial Intelligence(AI) are always overestimated, they are good at many things but have to run in a clear context.\nThe ai-pipeline module is activated by default.\nai-pipeline:selector:${SW_AI_PIPELINE:default}default:uriRecognitionServerAddr:${SW_AI_PIPELINE_URI_RECOGNITION_SERVER_ADDR:}uriRecognitionServerPort:${SW_AI_PIPELINE_URI_RECOGNITION_SERVER_PORT:17128}Supported Scenarios  HTTP Restful URI recognition.  ","excerpt":"AI Pipeline Warning, this module is still in the ALPHA stage. This is not stable.\nPattern …","ref":"/docs/main/v9.6.0/en/setup/ai-pipeline/introduction/","title":"AI Pipeline"},{"body":"AI Pipeline Warning, this module is still in the ALPHA stage. This is not stable.\nPattern Recognition, Machine Learning(ML) and Artificial Intelligence(AI) are common technology to identify patterns in data. This module provides a way to integrate these technologies in a standardized way about shipping the data from OAP kernel to 3rd party.\nFrom the industry practice, Pattern Recognition, Machine Learning(ML) and Artificial Intelligence(AI) are always overestimated, they are good at many things but have to run in a clear context.\nThe ai-pipeline module is activated by default.\nai-pipeline:selector:${SW_AI_PIPELINE:default}default:uriRecognitionServerAddr:${SW_AI_PIPELINE_URI_RECOGNITION_SERVER_ADDR:}uriRecognitionServerPort:${SW_AI_PIPELINE_URI_RECOGNITION_SERVER_PORT:17128}Supported Scenarios  HTTP Restful URI recognition.  ","excerpt":"AI Pipeline Warning, this module is still in the ALPHA stage. This is not stable.\nPattern …","ref":"/docs/main/v9.7.0/en/setup/ai-pipeline/introduction/","title":"AI Pipeline"},{"body":"Alarm Alarm core is driven by a collection of rules, which are defined in config/alarm-settings.yml. There are three parts in alarm rule definition.\n Alarm rules. They define how metrics alarm should be triggered and what conditions should be considered. Webhooks. The list of web service endpoints, which should be called after the alarm is triggered. gRPCHook. The host and port of the remote gRPC method, which should be called after the alarm is triggered.  Entity name Defines the relation between scope and entity name.\n Service: Service name Instance: {Instance name} of {Service name} Endpoint: {Endpoint name} in {Service name} Database: Database service name Service Relation: {Source service name} to {Dest service name} Instance Relation: {Source instance name} of {Source service name} to {Dest instance name} of {Dest service name} Endpoint Relation: {Source endpoint name} in {Source Service name} to {Dest endpoint name} in {Dest service name}  Rules There are two types of rules: individual rules and composite rules. A composite rule is a combination of individual rules.\nIndividual rules An alarm rule is made up of the following elements:\n Rule name. A unique name shown in the alarm message. It must end with _rule. Metrics name. This is also the metrics name in the OAL script. Only long, double, int types are supported. See the list of all potential metrics name. Events can be also configured as the source of alarm, please refer to the event doc for more details. Include names. Entity names which are included in this rule. Please follow the entity name definitions. Exclude names. Entity names which are excluded from this rule. Please follow the entity name definitions. Include names regex. A regex that includes entity names. If both include-name list and include-name regex are set, both rules will take effect. Exclude names regex. A regex that excludes entity names. If both exclude-name list and exclude-name regex are set, both rules will take effect. Include labels. Metric labels which are included in this rule. Exclude labels. Metric labels which are excluded from this rule. Include labels regex. A regex that includes labels. If both include-label list and include-label regex are set, both rules will take effect. Exclude labels regex. A regex that exclude labels. If both the exclude-label list and exclude-label regex are set, both rules will take effect. Tags. Tags are key/value pairs that are attached to alarms. Tags are used to specify distinguishing attributes of alarms that are meaningful and relevant to users. If you would like to make these tags searchable on the SkyWalking UI, you may set the tag keys in core/default/searchableAlarmTags, or through system environment variable SW_SEARCHABLE_ALARM_TAG_KEYS. The key level is supported by default.  Label settings are required by the meter-system. They are used to store metrics from the label-system platform, such as Prometheus, Micrometer, etc. The four label settings mentioned above must implement LabeledValueHolder.\n Threshold. The target value. For multiple-value metrics, such as percentile, the threshold is an array. It is described as: value1, value2, value3, value4, value5. Each value may serve as the threshold for each value of the metrics. Set the value to - if you do not wish to trigger the alarm by one or more of the values.\nFor example in percentile, value1 is the threshold of P50, and -, -, value3, value4, value5 means that there is no threshold for P50 and P75 in the percentile alarm rule. OP. The operator. It supports \u0026gt;, \u0026gt;=, \u0026lt;, \u0026lt;=, ==. We welcome contributions of all OPs. Period. The size of metrics cache in minutes for checking the alarm conditions. This is a time window that corresponds to the backend deployment env time. Count. Within a period window, if the number of times which value goes over the threshold (based on OP) reaches count, then an alarm will be sent. Only as condition. Indicates if the rule can send notifications, or if it simply serves as an condition of the composite rule. Silence period. After the alarm is triggered in Time-N, there will be silence during the TN -\u0026gt; TN + period. By default, it works in the same manner as period. The same alarm (having the same ID in the same metrics name) may only be triggered once within a period.  Composite rules NOTE: Composite rules are only applicable to alarm rules targeting the same entity level, such as service-level alarm rules (service_percent_rule \u0026amp;\u0026amp; service_resp_time_percentile_rule). Do not compose alarm rules of different entity levels, such as an alarm rule of the service metrics with another rule of the endpoint metrics.\nA composite rule is made up of the following elements:\n Rule name. A unique name shown in the alarm message. Must end with _rule. Expression. Specifies how to compose rules, and supports \u0026amp;\u0026amp;, ||, and (). Message. The notification message to be sent out when the rule is triggered. Tags. Tags are key/value pairs that are attached to alarms. Tags are used to specify distinguishing attributes of alarms that are meaningful and relevant to users.  rules:# Rule unique name, must be ended with `_rule`.endpoint_percent_rule:# Metrics value need to be long, double or intmetrics-name:endpoint_percentthreshold:75op:\u0026lt;# The length of time to evaluate the metricsperiod:10# How many times after the metrics match the condition, will trigger alarmcount:3# How many times of checks, the alarm keeps silence after alarm triggered, default as same as period.silence-period:10# Specify if the rule can send notification or just as an condition of composite ruleonly-as-condition:falsetags:level:WARNINGservice_percent_rule:metrics-name:service_percent# [Optional] Default, match all services in this metricsinclude-names:- service_a- service_bexclude-names:- service_c# Single value metrics threshold.threshold:85op:\u0026lt;period:10count:4only-as-condition:falseservice_resp_time_percentile_rule:# Metrics value need to be long, double or intmetrics-name:service_percentileop:\u0026#34;\u0026gt;\u0026#34;# Multiple value metrics threshold. Thresholds for P50, P75, P90, P95, P99.threshold:1000,1000,1000,1000,1000period:10count:3silence-period:5message:Percentile response time of service {name} alarm in 3 minutes of last 10 minutes, due to more than one condition of p50 \u0026gt; 1000, p75 \u0026gt; 1000, p90 \u0026gt; 1000, p95 \u0026gt; 1000, p99 \u0026gt; 1000only-as-condition:falsemeter_service_status_code_rule:metrics-name:meter_status_codeexclude-labels:- \u0026#34;200\u0026#34;op:\u0026#34;\u0026gt;\u0026#34;threshold:10period:10count:3silence-period:5message:The request number of entity {name} non-200 status is more than expected.only-as-condition:falsecomposite-rules:comp_rule:# Must satisfied percent rule and resp time rule expression:service_percent_rule \u0026amp;\u0026amp; service_resp_time_percentile_rulemessage:Service {name} successful rate is less than 80% and P50 of response time is over 1000mstags:level:CRITICALDefault alarm rules For convenience\u0026rsquo;s sake, we have provided a default alarm-setting.yml in our release. It includes the following rules:\n Service average response time over 1s in the last 3 minutes. Service success rate lower than 80% in the last 2 minutes. Percentile of service response time over 1s in the last 3 minutes Service Instance average response time over 1s in the last 2 minutes, and the instance name matches the regex. Endpoint average response time over 1s in the last 2 minutes. Database access average response time over 1s in the last 2 minutes. Endpoint relation average response time over 1s in the last 2 minutes.  List of all potential metrics name The metrics names are defined in the official OAL scripts and MAL scripts, the Event names can also serve as the metrics names, all possible event names can be also found in the Event doc.\nCurrently, metrics from the Service, Service Instance, Endpoint, Service Relation, Service Instance Relation, Endpoint Relation scopes could be used in Alarm, and the Database access scope is same as Service.\nSubmit an issue or a pull request if you want to support any other scopes in alarm.\nWebhook The Webhook requires the peer to be a web container. The alarm message will be sent through HTTP post by application/json content type. The JSON format is based on List\u0026lt;org.apache.skywalking.oap.server.core.alarm.AlarmMessage\u0026gt; with the following key information:\n scopeId, scope. All scopes are defined in org.apache.skywalking.oap.server.core.source.DefaultScopeDefine. name. Target scope entity name. Please follow the entity name definitions. id0. The ID of the scope entity that matches with the name. When using the relation scope, it is the source entity ID. id1. When using the relation scope, it is the destination entity ID. Otherwise, it is empty. ruleName. The rule name configured in alarm-settings.yml. alarmMessage. The alarm text message. startTime. The alarm time measured in milliseconds, which occurs between the current time and the midnight of January 1, 1970 UTC. tags. The tags configured in alarm-settings.yml.  See the following example:\n[{ \u0026#34;scopeId\u0026#34;: 1, \u0026#34;scope\u0026#34;: \u0026#34;SERVICE\u0026#34;, \u0026#34;name\u0026#34;: \u0026#34;serviceA\u0026#34;, \u0026#34;id0\u0026#34;: \u0026#34;12\u0026#34;, \u0026#34;id1\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;ruleName\u0026#34;: \u0026#34;service_resp_time_rule\u0026#34;, \u0026#34;alarmMessage\u0026#34;: \u0026#34;alarmMessage xxxx\u0026#34;, \u0026#34;startTime\u0026#34;: 1560524171000, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;WARNING\u0026#34; }] }, { \u0026#34;scopeId\u0026#34;: 1, \u0026#34;scope\u0026#34;: \u0026#34;SERVICE\u0026#34;, \u0026#34;name\u0026#34;: \u0026#34;serviceB\u0026#34;, \u0026#34;id0\u0026#34;: \u0026#34;23\u0026#34;, \u0026#34;id1\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;ruleName\u0026#34;: \u0026#34;service_resp_time_rule\u0026#34;, \u0026#34;alarmMessage\u0026#34;: \u0026#34;alarmMessage yyy\u0026#34;, \u0026#34;startTime\u0026#34;: 1560524171000, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;CRITICAL\u0026#34; }] }] gRPCHook The alarm message will be sent through remote gRPC method by Protobuf content type. The message contains key information which are defined in oap-server/server-alarm-plugin/src/main/proto/alarm-hook.proto.\nPart of the protocol looks like this:\nmessage AlarmMessage { int64 scopeId = 1; string scope = 2; string name = 3; string id0 = 4; string id1 = 5; string ruleName = 6; string alarmMessage = 7; int64 startTime = 8; AlarmTags tags = 9;}message AlarmTags { // String key, String value pair.  repeated KeyStringValuePair data = 1;}message KeyStringValuePair { string key = 1; string value = 2;}Slack Chat Hook Follow the Getting Started with Incoming Webhooks guide and create new Webhooks.\nThe alarm message will be sent through HTTP post by application/json content type if you have configured Slack Incoming Webhooks as follows:\nslackHooks:textTemplate:|-{ \u0026#34;type\u0026#34;: \u0026#34;section\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;mrkdwn\u0026#34;, \u0026#34;text\u0026#34;: \u0026#34;:alarm_clock: *Apache Skywalking Alarm* \\n **%s**.\u0026#34; } }webhooks:- https://hooks.slack.com/services/x/y/zWeChat Hook Note that only the WeChat Company Edition (WeCom) supports WebHooks. To use the WeChat WebHook, follow the Wechat Webhooks guide. The alarm message will be sent through HTTP post by application/json content type after you have set up Wechat Webhooks as follows:\nwechatHooks:textTemplate:|-{ \u0026#34;msgtype\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;content\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; } }webhooks:- https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=dummy_keyDingtalk Hook Follow the Dingtalk Webhooks guide and create new Webhooks. For security purposes, you can config an optional secret for an individual webhook URL. The alarm message will be sent through HTTP post by application/json content type if you have configured Dingtalk Webhooks as follows:\ndingtalkHooks:textTemplate:|-{ \u0026#34;msgtype\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;content\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; } }webhooks:- url:https://oapi.dingtalk.com/robot/send?access_token=dummy_tokensecret:dummysecretFeishu Hook Follow the Feishu Webhooks guide and create new Webhooks. For security purposes, you can config an optional secret for an individual webhook URL. If you would like to direct a text to a user, you can config ats which is the feishu\u0026rsquo;s user_id and separated by \u0026ldquo;,\u0026rdquo; . The alarm message will be sent through HTTP post by application/json content type if you have configured Feishu Webhooks as follows:\nfeishuHooks:textTemplate:|-{ \u0026#34;msg_type\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;content\u0026#34;: { \u0026#34;text\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; }, \u0026#34;ats\u0026#34;:\u0026#34;feishu_user_id_1,feishu_user_id_2\u0026#34; }webhooks:- url:https://open.feishu.cn/open-apis/bot/v2/hook/dummy_tokensecret:dummysecretWeLink Hook Follow the WeLink Webhooks guide and create new Webhooks. The alarm message will be sent through HTTP post by application/json content type if you have configured WeLink Webhooks as follows:\nwelinkHooks:textTemplate:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;webhooks:# you may find your own client_id and client_secret in your app, below are dummy, need to change.- client_id:\u0026#34;dummy_client_id\u0026#34;client_secret:dummy_secret_keyaccess_token_url:https://open.welink.huaweicloud.com/api/auth/v2/ticketsmessage_url:https://open.welink.huaweicloud.com/api/welinkim/v1/im-service/chat/group-chat# if you send to multi group at a time, separate group_ids with commas, e.g. \u0026#34;123xx\u0026#34;,\u0026#34;456xx\u0026#34;group_ids:\u0026#34;dummy_group_id\u0026#34;# make a name you like for the robot, it will display in grouprobot_name:robotUpdate the settings dynamically Since 6.5.0, the alarm settings can be updated dynamically at runtime by Dynamic Configuration, which will override the settings in alarm-settings.yml.\nIn order to determine whether an alarm rule is triggered or not, SkyWalking needs to cache the metrics of a time window for each alarm rule. If any attribute (metrics-name, op, threshold, period, count, etc.) of a rule is changed, the sliding window will be destroyed and re-created, causing the alarm of this specific rule to restart again.\n","excerpt":"Alarm Alarm core is driven by a collection of rules, which are defined in config/alarm-settings.yml. …","ref":"/docs/main/v9.0.0/en/setup/backend/backend-alarm/","title":"Alarm"},{"body":"Alarm The alarm core is driven by a collection of rules defined in config/alarm-settings.yml. There are three parts to alarm rule definitions.\n Alarm rules. They define how metrics alarm should be triggered and what conditions should be considered. Webhooks. The list of web service endpoints, which should be called after an alarm is triggered. gRPCHook. The host and port of the remote gRPC method, which should be called after an alarm is triggered.  Entity name Defines the relation between scope and entity name.\n Service: Service name Instance: {Instance name} of {Service name} Endpoint: {Endpoint name} in {Service name} Database: Database service name Service Relation: {Source service name} to {Dest service name} Instance Relation: {Source instance name} of {Source service name} to {Dest instance name} of {Dest service name} Endpoint Relation: {Source endpoint name} in {Source Service name} to {Dest endpoint name} in {Dest service name}  Rules There are two types of rules: individual rules and composite rules. A composite rule is a combination of individual rules.\nIndividual rules An alarm rule is made up of the following elements:\n Rule name. A unique name shown in the alarm message. It must end with _rule. Metrics name. This is also the metrics name in the OAL script. Only long, double, int types are supported. See the list of all potential metrics name. Events can also be configured as the source of Alarm. Please refer to the event doc for more details. Include names. Entity names that are included in this rule. Please follow the entity name definitions. Exclude names. Entity names that are excluded from this rule. Please follow the entity name definitions. Include names regex. A regex that includes entity names. If both include-name list and include-name regex are set, both rules will take effect. Exclude names regex. A regex that excludes entity names. Both rules will take effect if both include-label list and include-label regex are set. Include labels. Metric labels that are included in this rule. Exclude labels. Metric labels that are excluded from this rule. Include labels regex. A regex that includes labels. If both include-label list and include-label regex are set, both rules will take effect. Exclude labels regex. A regex that excludes labels. Both rules will take effect if both exclude-label list and exclude-label regex are set. Tags. Tags are key/value pairs that are attached to alarms. Tags are used to specify distinguishing attributes of alarms that are meaningful and relevant to users. If you want to make these tags searchable on the SkyWalking UI, you may set the tag keys in core/default/searchableAlarmTags or through the system environment variable SW_SEARCHABLE_ALARM_TAG_KEYS. The key level is supported by default.  Label settings are required by the meter system. They are used to store metrics from the label-system platform, such as Prometheus, Micrometer, etc. The four label settings mentioned above must implement LabeledValueHolder.\n Threshold. The target value. For multiple-value metrics, such as percentile, the threshold is an array. It is described as: value1, value2, value3, value4, value5. Each value may serve as the threshold for each value of the metrics. Set the value to - if you do not wish to trigger the Alarm by one or more of the values.\nFor example, in percentile, value1 is the threshold of P50, and -, -, value3, value4, value5 means that there is no threshold for P50 and P75 in the percentile alarm rule. OP. The operator. It supports \u0026gt;, \u0026gt;=, \u0026lt;, \u0026lt;=, ==. We welcome contributions of all OPs. Period. The size of metrics cache in minutes for checking the alarm conditions. This is a time window that corresponds to the backend deployment env time. Count. Within a period window, if the number of times which value goes over the threshold (based on OP) reaches count, then an alarm will be sent. Only as condition. Indicates if the rule can send notifications or if it simply serves as a condition of the composite rule. Silence period. After the alarm is triggered at Time-N (TN), there will be silence during the TN -\u0026gt; TN + period. By default, it works in the same manner as period. The same Alarm (having the same ID in the same metrics name) may only be triggered once within a period.  Composite rules NOTE: Composite rules are only applicable to alarm rules targeting the same entity level, such as service-level alarm rules (service_percent_rule \u0026amp;\u0026amp; service_resp_time_percentile_rule). Do not compose alarm rules of different entity levels, such as an alarm rule of the service metrics with another rule of the endpoint metrics.\nA composite rule is made up of the following elements:\n Rule name. A unique name shown in the alarm message. Must end with _rule. Expression. Specifies how to compose rules, and supports \u0026amp;\u0026amp;, ||, and (). Message. The notification message to be sent out when the rule is triggered. Tags. Tags are key/value pairs that are attached to alarms. Tags are used to specify distinguishing attributes of alarms that are meaningful and relevant to users.  rules:# Rule unique name, must be ended with `_rule`.endpoint_percent_rule:# Metrics value need to be long, double or intmetrics-name:endpoint_percentthreshold:75op:\u0026lt;# The length of time to evaluate the metricsperiod:10# How many times after the metrics match the condition, will trigger alarmcount:3# How many times of checks, the alarm keeps silence after alarm triggered, default as same as period.silence-period:10# Specify if the rule can send notification or just as an condition of composite ruleonly-as-condition:falsetags:level:WARNINGservice_percent_rule:metrics-name:service_percent# [Optional] Default, match all services in this metricsinclude-names:- service_a- service_bexclude-names:- service_c# Single value metrics threshold.threshold:85op:\u0026lt;period:10count:4only-as-condition:falseservice_resp_time_percentile_rule:# Metrics value need to be long, double or intmetrics-name:service_percentileop:\u0026#34;\u0026gt;\u0026#34;# Multiple value metrics threshold. Thresholds for P50, P75, P90, P95, P99.threshold:1000,1000,1000,1000,1000period:10count:3silence-period:5message:Percentile response time of service {name} alarm in 3 minutes of last 10 minutes, due to more than one condition of p50 \u0026gt; 1000, p75 \u0026gt; 1000, p90 \u0026gt; 1000, p95 \u0026gt; 1000, p99 \u0026gt; 1000only-as-condition:falsemeter_service_status_code_rule:metrics-name:meter_status_codeexclude-labels:- \u0026#34;200\u0026#34;op:\u0026#34;\u0026gt;\u0026#34;threshold:10period:10count:3silence-period:5message:The request number of entity {name} non-200 status is more than expected.only-as-condition:falsecomposite-rules:comp_rule:# Must satisfied percent rule and resp time rule expression:service_percent_rule \u0026amp;\u0026amp; service_resp_time_percentile_rulemessage:Service {name} successful rate is less than 80% and P50 of response time is over 1000mstags:level:CRITICALDefault alarm rules For convenience\u0026rsquo;s sake, we have provided a default alarm-setting.yml in our release. It includes the following rules:\n Service average response time over 1s in the last 3 minutes. Service success rate lower than 80% in the last 2 minutes. Percentile of service response time over 1s in the last 3 minutes Service Instance average response time over 1s in the last 2 minutes, and the instance name matches the regex. Endpoint average response time over 1s in the last 2 minutes. Database access average response time over 1s in the last 2 minutes. Endpoint relation average response time over 1s in the last 2 minutes.  List of all potential metrics name The metrics names are defined in the official OAL scripts and MAL scripts, the Event names can also serve as the metrics names, all possible event names can be also found in the Event doc.\nCurrently, metrics from the Service, Service Instance, Endpoint, Service Relation, Service Instance Relation, Endpoint Relation scopes could be used in Alarm, and the Database access scope is the same as Service.\nSubmit an issue or a pull request if you want to support any other scopes in Alarm.\nWebhook The Webhook requires the peer to be a web container. The alarm message will be sent through HTTP post by application/json content type. The JSON format is based on List\u0026lt;org.apache.skywalking.oap.server.core.alarm.AlarmMessage\u0026gt; with the following key information:\n scopeId, scope. All scopes are defined in org.apache.skywalking.oap.server.core.source.DefaultScopeDefine. name. Target scope entity name. Please follow the entity name definitions. id0. The ID of the scope entity that matches with the name. When using the relation scope, it is the source entity ID. id1. When using the relation scope, it is the destination entity ID. Otherwise, it is empty. ruleName. The rule name configured in alarm-settings.yml. alarmMessage. The alarm text message. startTime. The alarm time measured in milliseconds, which occurs between the current time and the midnight of January 1, 1970 UTC. tags. The tags configured in alarm-settings.yml.  See the following example:\n[{ \u0026#34;scopeId\u0026#34;: 1, \u0026#34;scope\u0026#34;: \u0026#34;SERVICE\u0026#34;, \u0026#34;name\u0026#34;: \u0026#34;serviceA\u0026#34;, \u0026#34;id0\u0026#34;: \u0026#34;12\u0026#34;, \u0026#34;id1\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;ruleName\u0026#34;: \u0026#34;service_resp_time_rule\u0026#34;, \u0026#34;alarmMessage\u0026#34;: \u0026#34;alarmMessage xxxx\u0026#34;, \u0026#34;startTime\u0026#34;: 1560524171000, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;WARNING\u0026#34; }] }, { \u0026#34;scopeId\u0026#34;: 1, \u0026#34;scope\u0026#34;: \u0026#34;SERVICE\u0026#34;, \u0026#34;name\u0026#34;: \u0026#34;serviceB\u0026#34;, \u0026#34;id0\u0026#34;: \u0026#34;23\u0026#34;, \u0026#34;id1\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;ruleName\u0026#34;: \u0026#34;service_resp_time_rule\u0026#34;, \u0026#34;alarmMessage\u0026#34;: \u0026#34;alarmMessage yyy\u0026#34;, \u0026#34;startTime\u0026#34;: 1560524171000, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;CRITICAL\u0026#34; }] }] gRPCHook The alarm message will be sent through remote gRPC method by Protobuf content type. The message contains key information which are defined in oap-server/server-alarm-plugin/src/main/proto/alarm-hook.proto.\nPart of the protocol looks like this:\nmessage AlarmMessage { int64 scopeId = 1; string scope = 2; string name = 3; string id0 = 4; string id1 = 5; string ruleName = 6; string alarmMessage = 7; int64 startTime = 8; AlarmTags tags = 9;}message AlarmTags { // String key, String value pair.  repeated KeyStringValuePair data = 1;}message KeyStringValuePair { string key = 1; string value = 2;}Slack Chat Hook Follow the Getting Started with Incoming Webhooks guide and create new Webhooks.\nThe alarm message will be sent through HTTP post by application/json content type if you have configured Slack Incoming Webhooks as follows:\nslackHooks:textTemplate:|-{ \u0026#34;type\u0026#34;: \u0026#34;section\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;mrkdwn\u0026#34;, \u0026#34;text\u0026#34;: \u0026#34;:alarm_clock: *Apache Skywalking Alarm* \\n **%s**.\u0026#34; } }webhooks:- https://hooks.slack.com/services/x/y/zWeChat Hook Note that only the WeChat Company Edition (WeCom) supports WebHooks. To use the WeChat WebHook, follow the Wechat Webhooks guide. The alarm message will be sent through HTTP post by application/json content type after you have set up Wechat Webhooks as follows:\nwechatHooks:textTemplate:|-{ \u0026#34;msgtype\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;content\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; } }webhooks:- https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=dummy_keyDingTalk Hook Follow the Dingtalk Webhooks guide and create new Webhooks. You can configure an optional secret for an individual webhook URL for security purposes. The alarm message will be sent through HTTP post by application/json content type if you have configured DingTalk Webhooks as follows:\ndingtalkHooks:textTemplate:|-{ \u0026#34;msgtype\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;content\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; } }webhooks:- url:https://oapi.dingtalk.com/robot/send?access_token=dummy_tokensecret:dummysecretFeishu Hook Follow the Feishu Webhooks guide and create new Webhooks. You can configure an optional secret for an individual webhook URL for security purposes. If you want to direct a text to a user, you can configure ats, which is Feishu\u0026rsquo;s user_id and separated by \u0026ldquo;,\u0026rdquo; . The alarm message will be sent through HTTP post by application/json content type if you have configured Feishu Webhooks as follows:\nfeishuHooks:textTemplate:|-{ \u0026#34;msg_type\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;content\u0026#34;: { \u0026#34;text\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; }, \u0026#34;ats\u0026#34;:\u0026#34;feishu_user_id_1,feishu_user_id_2\u0026#34; }webhooks:- url:https://open.feishu.cn/open-apis/bot/v2/hook/dummy_tokensecret:dummysecretWeLink Hook Follow the WeLink Webhooks guide and create new Webhooks. The alarm message will be sent through HTTP post by application/json content type if you have configured WeLink Webhooks as follows:\nwelinkHooks:textTemplate:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;webhooks:# you may find your own client_id and client_secret in your app, below are dummy, need to change.- client_id:\u0026#34;dummy_client_id\u0026#34;client_secret:dummy_secret_keyaccess_token_url:https://open.welink.huaweicloud.com/api/auth/v2/ticketsmessage_url:https://open.welink.huaweicloud.com/api/welinkim/v1/im-service/chat/group-chat# if you send to multi group at a time, separate group_ids with commas, e.g. \u0026#34;123xx\u0026#34;,\u0026#34;456xx\u0026#34;group_ids:\u0026#34;dummy_group_id\u0026#34;# make a name you like for the robot, it will display in grouprobot_name:robotUpdate the settings dynamically Since 6.5.0, the alarm settings can be updated dynamically at runtime by Dynamic Configuration, which will override the settings in alarm-settings.yml.\nIn order to determine whether an alarm rule is triggered or not, SkyWalking needs to cache the metrics of a time window for each alarm rule. If any attribute (metrics-name, op, threshold, period, count, etc.) of a rule is changed, the sliding window will be destroyed and re-created, causing the Alarm of this specific rule to restart again.\n","excerpt":"Alarm The alarm core is driven by a collection of rules defined in config/alarm-settings.yml. There …","ref":"/docs/main/v9.1.0/en/setup/backend/backend-alarm/","title":"Alarm"},{"body":"Alerting Alerting mechanism measures system performance according to the metrics of services/instances/endpoints from different layers. Alerting kernel is an in-memory, time-window based queue.\nThe alerting core is driven by a collection of rules defined in config/alarm-settings.yml. There are three parts to alerting rule definitions.\n alerting rules. They define how metrics alerting should be triggered and what conditions should be considered. hooks. The list of hooks, which should be called after an alerting is triggered.  Entity name Defines the relation between scope and entity name.\n Service: Service name Instance: {Instance name} of {Service name} Endpoint: {Endpoint name} in {Service name} Service Relation: {Source service name} to {Dest service name} Instance Relation: {Source instance name} of {Source service name} to {Dest instance name} of {Dest service name} Endpoint Relation: {Source endpoint name} in {Source Service name} to {Dest endpoint name} in {Dest service name}  Rules An alerting rule is made up of the following elements:\n Rule name. A unique name shown in the alarm message. It must end with _rule. Expression. A MQE expression that defines the conditions of the rule. The result type must be SINGLE_VALUE and the root operation of the expression must be a Compare Operation which provides 1(true) or 0(false) result. When the result is 1(true), the alarm will be triggered. For example, avg(service_resp_time / 1000) \u0026gt; 1 is a valid expression to indicate the request latency is slower than 1s. The typical illegal expressions are  avg(service_resp_time \u0026gt; 1000) + 1 expression root doesn\u0026rsquo;t use Compare Operation service_resp_time \u0026gt; 1000 expression return a TIME_SERIES_VALUES type of values rather than a SINGLE_VALUE value.    The metrics names in the expression could be found in the list of all potential metrics name doc.\n Include names. Entity names that are included in this rule. Please follow the entity name definitions. Exclude names. Entity names that are excluded from this rule. Please follow the entity name definitions. Include names regex. A regex that includes entity names. If both include-name list and include-name regex are set, both rules will take effect. Exclude names regex. A regex that excludes entity names. Both rules will take effect if both include-label list and include-label regex are set. Tags. Tags are key/value pairs that are attached to alarms. Tags are used to specify distinguishing attributes of alarms that are meaningful and relevant to users. If you want to make these tags searchable on the SkyWalking UI, you may set the tag keys in core/default/searchableAlarmTags or through the system environment variable SW_SEARCHABLE_ALARM_TAG_KEYS. The key level is supported by default. Period. The size of metrics cache in minutes for checking the alarm conditions. This is a time window that corresponds to the backend deployment env time. Hooks. Binding the specific names of the hooks when the alarm is triggered. The name format is {hookType}.{hookName} (slack.custom1 e.g.) and must be defined in the hooks section of the alarm-settings.yml file. If the hook name is not specified, the global hook will be used. Silence period. After the alarm is triggered at Time-N (TN), there will be silence during the TN -\u0026gt; TN + period. By default, it works in the same manner as period. The same Alarm (having the same ID in the same metrics name) may only be triggered once within a period.  Such as for a metric, there is a shifting window as following at T7.\n   T1 T2 T3 T4 T5 T6 T7     Value1 Value2 Value3 Value4 Value5 Value6 Value7     Period(Time point T1 ~ T7) are continuous data points for minutes. Notice, alerts are not supported above minute-by-minute periods as they would not be efficient. Values(Value1 ~ Value7) are the values or labeled values for every time point. Expression is calculated based on the metric values(Value1 ~ Value7). For example, expression avg(service_resp_time) \u0026gt; 1000, if the value are 1001, 1001, 1001, 1001, 1001, 1001, 1001, the calculation is ((1001 + 10001 + ... + 1001) / 7) \u0026gt; 1000 and the result would be 1(true). Then the alarm would be triggered. In every minute, the window would shift automatically. At T8, Value8 would be cached, and T1/Value1 would be removed from the window.  NOTE:\n If the expression include labeled metrics and result has multiple labeled value(e.g. sum(service_percentile{_='0,1'} \u0026gt; 1000) \u0026gt;= 3), the alarm will be triggered if any of the labeled value result matches 3 times of the condition(P50 \u0026gt; 1000 or P75 \u0026gt; 1000). One alarm rule is targeting the same entity level, such as service-level expression (avg(service_resp_time) \u0026gt; 1000). Set entity names(Include/Exclude names\u0026hellip;) according to metrics entity levels, do not include different entity levels metrics in the same expression, such as service metrics and endpoint metrics.  rules:# Rule unique name, must be ended with `_rule`.endpoint_percent_rule:# A MQE expression and the root operation of the expression must be a Compare Operation.expression:sum((endpoint_sla / 100) \u0026lt; 75) \u0026gt;= 3# The length of time to evaluate the metricsperiod:10# How many times of checks, the alarm keeps silence after alarm triggered, default as same as period.silence-period:10message:Successful rate of endpoint {name} is lower than 75%tags:level:WARNINGservice_percent_rule:expression:sum((service_sla / 100) \u0026lt; 85) \u0026gt;= 4# [Optional] Default, match all services in this metricsinclude-names:- service_a- service_bexclude-names:- service_cperiod:10message:Service {name} successful rate is less than 85%service_resp_time_percentile_rule:expression:sum(service_percentile{_=\u0026#39;0,1,2,3,4\u0026#39;} \u0026gt; 1000) \u0026gt;= 3period:10silence-period:5message:Percentile response time of service {name} alarm in 3 minutes of last 10 minutes, due to more than one condition of p50 \u0026gt; 1000, p75 \u0026gt; 1000, p90 \u0026gt; 1000, p95 \u0026gt; 1000, p99 \u0026gt; 1000meter_service_status_code_rule:expression:sum(aggregate_labels(meter_status_code{_=\u0026#39;4xx,5xx\u0026#39;},sum) \u0026gt; 10) \u0026gt; 3period:10count:3silence-period:5message:The request number of entity {name} 4xx and 5xx status is more than expected.hooks:- \u0026#34;slack.custom1\u0026#34;- \u0026#34;pagerduty.custom1\u0026#34;comp_rule:expression:(avg(service_sla / 100) \u0026gt; 80) * (avg(service_percentile{_=\u0026#39;0\u0026#39;}) \u0026gt; 1000) == 1period:10message:Service {name} avg successful rate is less than 80% and P50 of avg response time is over 1000ms in last 10 minutes.tags:level:CRITICALhooks:- \u0026#34;slack.default\u0026#34;- \u0026#34;slack.custom1\u0026#34;- \u0026#34;pagerduty.custom1\u0026#34;Default alarm rules For convenience\u0026rsquo;s sake, we have provided a default alarm-setting.yml in our release. It includes the following rules:\n Service average response time over 1s in the last 3 minutes. Service success rate lower than 80% in the last 2 minutes. Percentile of service response time over 1s in the last 3 minutes Service Instance average response time over 1s in the last 2 minutes, and the instance name matches the regex. Endpoint average response time over 1s in the last 2 minutes. Database access average response time over 1s in the last 2 minutes. Endpoint relation average response time over 1s in the last 2 minutes.  List of all potential metrics name The metrics names are defined in the official OAL scripts and MAL scripts.\nCurrently, metrics from the Service, Service Instance, Endpoint, Service Relation, Service Instance Relation, Endpoint Relation scopes could be used in Alarm, and the Database access scope is the same as Service.\nSubmit an issue or a pull request if you want to support any other scopes in Alarm.\nHooks Hooks are a way to send alarm messages to the outside world. SkyWalking supports multiple hooks of the same type, each hook can support different configurations. For example, you can configure two Slack hooks, one named default and set is-default: true means this hook will apply on all Alarm Rules without config hooks. Another named custom1 will only apply on the Alarm Rules which with config hooks and include the name slack.custom1.\nhooks:slack:# default here is just a name, set the field \u0026#39;is-default: true\u0026#39; if this notification hook is expected to be default globally.default:# If true, this hook will apply on all rules, unless a rule has its own specific hook. Could have more than one default hooks in the same hook type.is-default:truetext-template:|-{ \u0026#34;type\u0026#34;: \u0026#34;section\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;mrkdwn\u0026#34;, \u0026#34;text\u0026#34;: \u0026#34;:alarm_clock: *Apache Skywalking Alarm* \\n **%s**.\u0026#34; } }webhooks:- https://hooks.slack.com/services/x/y/zsssscustom1:text-template:|-{ \u0026#34;type\u0026#34;: \u0026#34;section\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;mrkdwn\u0026#34;, \u0026#34;text\u0026#34;: \u0026#34;:alarm_clock: *Apache Skywalking Alarm* \\n **%s**.\u0026#34; } }webhooks:- https://hooks.slack.com/services/x/y/custom1Currently, SkyWalking supports the following hook types:\nWebhook The Webhook requires the peer to be a web container. The alarm message will be sent through HTTP post by application/json content type after you have set up Webhook hooks as follows:\nwebhook:default:is-default:trueurls:- http://ip:port/xxx- http://ip:port/yyyThe JSON format is based on List\u0026lt;org.apache.skywalking.oap.server.core.alarm.AlarmMessage\u0026gt; with the following key information:\n scopeId, scope. All scopes are defined in org.apache.skywalking.oap.server.core.source.DefaultScopeDefine. name. Target scope entity name. Please follow the entity name definitions. id0. The ID of the scope entity that matches with the name. When using the relation scope, it is the source entity ID. id1. When using the relation scope, it is the destination entity ID. Otherwise, it is empty. ruleName. The rule name configured in alarm-settings.yml. alarmMessage. The alarm text message. startTime. The alarm time measured in milliseconds, which occurs between the current time and the midnight of January 1, 1970 UTC. tags. The tags configured in alarm-settings.yml.  See the following example:\n[{ \u0026#34;scopeId\u0026#34;: 1, \u0026#34;scope\u0026#34;: \u0026#34;SERVICE\u0026#34;, \u0026#34;name\u0026#34;: \u0026#34;serviceA\u0026#34;, \u0026#34;id0\u0026#34;: \u0026#34;12\u0026#34;, \u0026#34;id1\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;ruleName\u0026#34;: \u0026#34;service_resp_time_rule\u0026#34;, \u0026#34;alarmMessage\u0026#34;: \u0026#34;alarmMessage xxxx\u0026#34;, \u0026#34;startTime\u0026#34;: 1560524171000, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;WARNING\u0026#34; }] }, { \u0026#34;scopeId\u0026#34;: 1, \u0026#34;scope\u0026#34;: \u0026#34;SERVICE\u0026#34;, \u0026#34;name\u0026#34;: \u0026#34;serviceB\u0026#34;, \u0026#34;id0\u0026#34;: \u0026#34;23\u0026#34;, \u0026#34;id1\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;ruleName\u0026#34;: \u0026#34;service_resp_time_rule\u0026#34;, \u0026#34;alarmMessage\u0026#34;: \u0026#34;alarmMessage yyy\u0026#34;, \u0026#34;startTime\u0026#34;: 1560524171000, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;CRITICAL\u0026#34; }] }] gRPC The alarm message will be sent through remote gRPC method by Protobuf content type after you have set up gRPC hooks as follows:\ngRPC:default:is-default:truetarget-host:iptarget-port:portThe message contains key information which are defined in oap-server/server-alarm-plugin/src/main/proto/alarm-hook.proto.\nPart of the protocol looks like this:\nmessage AlarmMessage { int64 scopeId = 1; string scope = 2; string name = 3; string id0 = 4; string id1 = 5; string ruleName = 6; string alarmMessage = 7; int64 startTime = 8; AlarmTags tags = 9;}message AlarmTags { // String key, String value pair.  repeated KeyStringValuePair data = 1;}message KeyStringValuePair { string key = 1; string value = 2;}Slack Chat Follow the Getting Started with Incoming Webhooks guide and create new Webhooks.\nThe alarm message will be sent through HTTP post by application/json content type if you have configured Slack Incoming Webhooks as follows:\nslack:default:is-default:truetext-template:|-{ \u0026#34;type\u0026#34;: \u0026#34;section\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;mrkdwn\u0026#34;, \u0026#34;text\u0026#34;: \u0026#34;:alarm_clock: *Apache Skywalking Alarm* \\n **%s**.\u0026#34; } }webhooks:- https://hooks.slack.com/services/x/y/zWeChat Note that only the WeChat Company Edition (WeCom) supports WebHooks. To use the WeChat WebHook, follow the Wechat Webhooks guide. The alarm message will be sent through HTTP post by application/json content type after you have set up Wechat Webhooks as follows:\nwechat:default:is-default:truetext-template:|-{ \u0026#34;msgtype\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;content\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; } }webhooks:- https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=dummy_keyDingTalk Follow the Dingtalk Webhooks guide and create new Webhooks. You can configure an optional secret for an individual webhook URL for security purposes. The alarm message will be sent through HTTP post by application/json content type if you have configured DingTalk Webhooks as follows:\ndingtalk:default:is-default:truetext-template:|-{ \u0026#34;msgtype\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;content\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; } }webhooks:- url:https://oapi.dingtalk.com/robot/send?access_token=dummy_tokensecret:dummysecretFeishu Follow the Feishu Webhooks guide and create new Webhooks. You can configure an optional secret for an individual webhook URL for security purposes. If you want to direct a text to a user, you can configure ats, which is Feishu\u0026rsquo;s user_id and separated by \u0026ldquo;,\u0026rdquo; . The alarm message will be sent through HTTP post by application/json content type if you have configured Feishu Webhooks as follows:\nfeishu:default:is-default:truetext-template:|-{ \u0026#34;msg_type\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;content\u0026#34;: { \u0026#34;text\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; }, \u0026#34;ats\u0026#34;:\u0026#34;feishu_user_id_1,feishu_user_id_2\u0026#34; }webhooks:- url:https://open.feishu.cn/open-apis/bot/v2/hook/dummy_tokensecret:dummysecretWeLink Follow the WeLink Webhooks guide and create new Webhooks. The alarm message will be sent through HTTP post by application/json content type if you have configured WeLink Webhooks as follows:\nwelink:default:is-default:truetext-template:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;webhooks:# you may find your own client_id and client_secret in your app, below are dummy, need to change.- client-id:\u0026#34;dummy_client_id\u0026#34;client-secret:dummy_secret_keyaccess-token-url:https://open.welink.huaweicloud.com/api/auth/v2/ticketsmessage-url:https://open.welink.huaweicloud.com/api/welinkim/v1/im-service/chat/group-chat# if you send to multi group at a time, separate group_ids with commas, e.g. \u0026#34;123xx\u0026#34;,\u0026#34;456xx\u0026#34;group-ids:\u0026#34;dummy_group_id\u0026#34;# make a name you like for the robot, it will display in grouprobot-name:robotPagerDuty The PagerDuty hook is based on Events API v2.\nFollow the Getting Started section to create an Events API v2 integration on your PagerDuty service and copy the integration key.\nThen configure as follows:\npagerduty:default:is-default:truetext-template:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;integration-keys:- 5c6d805c9dcf4e03d09dfa81e8789ba1You can also configure multiple integration keys.\nDiscord Follow the Discord Webhooks guide and create a new webhook.\nThen configure as follows:\ndiscord:default:is-default:truetext-template:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;webhooks:- url:https://discordapp.com/api/webhooks/1008166889777414645/8e0Am4Zb-YGbBqqbiiq0jSHPTEEaHa4j1vIC-zSSm231T8ewGxgY0_XUYpY-k1nN4HBlusername:robotUpdate the settings dynamically Since 6.5.0, the alerting settings can be updated dynamically at runtime by Dynamic Configuration, which will override the settings in alarm-settings.yml.\nIn order to determine whether an alerting rule is triggered or not, SkyWalking needs to cache the metrics of a time window for each alerting rule. If any attribute (expression, period, etc.) of a rule is changed, the sliding window will be destroyed and re-created, causing the Alarm of this specific rule to restart again.\nKeys with data types of alerting rule configuration file    Alerting element Configuration property key Type Description     Expression expression string MQE expression   Include names include-names string array    Exclude names exclude-names string array    Include names regex include-names-regex string Java regex Pattern   Exclude names regex exclude-names-regex string Java regex Pattern   Tags tags key-value pair    Period Period int    Silence period silence-period int    Message message string    Hooks hooks string array     ","excerpt":"Alerting Alerting mechanism measures system performance according to the metrics of …","ref":"/docs/main/latest/en/setup/backend/backend-alarm/","title":"Alerting"},{"body":"Alerting Alerting mechanism measures system performance according to the metrics of services/instances/endpoints from different layers. Alerting kernel is an in-memory, time-window based queue.\nThe alerting core is driven by a collection of rules defined in config/alarm-settings.yml. There are three parts to alerting rule definitions.\n alerting rules. They define how metrics alerting should be triggered and what conditions should be considered. hooks. The list of hooks, which should be called after an alerting is triggered.  Entity name Defines the relation between scope and entity name.\n Service: Service name Instance: {Instance name} of {Service name} Endpoint: {Endpoint name} in {Service name} Service Relation: {Source service name} to {Dest service name} Instance Relation: {Source instance name} of {Source service name} to {Dest instance name} of {Dest service name} Endpoint Relation: {Source endpoint name} in {Source Service name} to {Dest endpoint name} in {Dest service name}  Rules An alerting rule is made up of the following elements:\n Rule name. A unique name shown in the alarm message. It must end with _rule. Expression. A MQE expression that defines the conditions of the rule. The result type must be SINGLE_VALUE and the root operation of the expression must be a Compare Operation which provides 1(true) or 0(false) result. When the result is 1(true), the alarm will be triggered. For example, avg(service_resp_time / 1000) \u0026gt; 1 is a valid expression to indicate the request latency is slower than 1s. The typical illegal expressions are  avg(service_resp_time \u0026gt; 1000) + 1 expression root doesn\u0026rsquo;t use Compare Operation service_resp_time \u0026gt; 1000 expression return a TIME_SERIES_VALUES type of values rather than a SINGLE_VALUE value.    The metrics names in the expression could be found in the list of all potential metrics name doc.\n Include names. Entity names that are included in this rule. Please follow the entity name definitions. Exclude names. Entity names that are excluded from this rule. Please follow the entity name definitions. Include names regex. A regex that includes entity names. If both include-name list and include-name regex are set, both rules will take effect. Exclude names regex. A regex that excludes entity names. Both rules will take effect if both include-label list and include-label regex are set. Tags. Tags are key/value pairs that are attached to alarms. Tags are used to specify distinguishing attributes of alarms that are meaningful and relevant to users. If you want to make these tags searchable on the SkyWalking UI, you may set the tag keys in core/default/searchableAlarmTags or through the system environment variable SW_SEARCHABLE_ALARM_TAG_KEYS. The key level is supported by default. Period. The size of metrics cache in minutes for checking the alarm conditions. This is a time window that corresponds to the backend deployment env time. Hooks. Binding the specific names of the hooks when the alarm is triggered. The name format is {hookType}.{hookName} (slack.custom1 e.g.) and must be defined in the hooks section of the alarm-settings.yml file. If the hook name is not specified, the global hook will be used. Silence period. After the alarm is triggered at Time-N (TN), there will be silence during the TN -\u0026gt; TN + period. By default, it works in the same manner as period. The same Alarm (having the same ID in the same metrics name) may only be triggered once within a period.  Such as for a metric, there is a shifting window as following at T7.\n   T1 T2 T3 T4 T5 T6 T7     Value1 Value2 Value3 Value4 Value5 Value6 Value7     Period(Time point T1 ~ T7) are continuous data points for minutes. Notice, alerts are not supported above minute-by-minute periods as they would not be efficient. Values(Value1 ~ Value7) are the values or labeled values for every time point. Expression is calculated based on the metric values(Value1 ~ Value7). For example, expression avg(service_resp_time) \u0026gt; 1000, if the value are 1001, 1001, 1001, 1001, 1001, 1001, 1001, the calculation is ((1001 + 10001 + ... + 1001) / 7) \u0026gt; 1000 and the result would be 1(true). Then the alarm would be triggered. In every minute, the window would shift automatically. At T8, Value8 would be cached, and T1/Value1 would be removed from the window.  NOTE:\n If the expression include labeled metrics and result has multiple labeled value(e.g. sum(service_percentile{p='50,75'} \u0026gt; 1000) \u0026gt;= 3), the alarm will be triggered if any of the labeled value result matches 3 times of the condition(P50 \u0026gt; 1000 or P75 \u0026gt; 1000). One alarm rule is targeting the same entity level, such as service-level expression (avg(service_resp_time) \u0026gt; 1000). Set entity names(Include/Exclude names\u0026hellip;) according to metrics entity levels, do not include different entity levels metrics in the same expression, such as service metrics and endpoint metrics.  rules:# Rule unique name, must be ended with `_rule`.endpoint_percent_rule:# A MQE expression and the root operation of the expression must be a Compare Operation.expression:sum((endpoint_sla / 100) \u0026lt; 75) \u0026gt;= 3# The length of time to evaluate the metricsperiod:10# How many times of checks, the alarm keeps silence after alarm triggered, default as same as period.silence-period:10message:Successful rate of endpoint {name} is lower than 75%tags:level:WARNINGservice_percent_rule:expression:sum((service_sla / 100) \u0026lt; 85) \u0026gt;= 4# [Optional] Default, match all services in this metricsinclude-names:- service_a- service_bexclude-names:- service_cperiod:10message:Service {name} successful rate is less than 85%service_resp_time_percentile_rule:expression:sum(service_percentile{p=\u0026#39;50,75,90,95,99\u0026#39;} \u0026gt; 1000) \u0026gt;= 3period:10silence-period:5message:Percentile response time of service {name} alarm in 3 minutes of last 10 minutes, due to more than one condition of p50 \u0026gt; 1000, p75 \u0026gt; 1000, p90 \u0026gt; 1000, p95 \u0026gt; 1000, p99 \u0026gt; 1000meter_service_status_code_rule:expression:sum(aggregate_labels(meter_status_code{_=\u0026#39;4xx,5xx\u0026#39;},sum) \u0026gt; 10) \u0026gt; 3period:10count:3silence-period:5message:The request number of entity {name} 4xx and 5xx status is more than expected.hooks:- \u0026#34;slack.custom1\u0026#34;- \u0026#34;pagerduty.custom1\u0026#34;comp_rule:expression:(avg(service_sla / 100) \u0026gt; 80) * (avg(service_percentile{_=\u0026#39;0\u0026#39;}) \u0026gt; 1000) == 1period:10message:Service {name} avg successful rate is less than 80% and P50 of avg response time is over 1000ms in last 10 minutes.tags:level:CRITICALhooks:- \u0026#34;slack.default\u0026#34;- \u0026#34;slack.custom1\u0026#34;- \u0026#34;pagerduty.custom1\u0026#34;Default alarm rules For convenience\u0026rsquo;s sake, we have provided a default alarm-setting.yml in our release. It includes the following rules:\n Service average response time over 1s in the last 3 minutes. Service success rate lower than 80% in the last 2 minutes. Percentile of service response time over 1s in the last 3 minutes Service Instance average response time over 1s in the last 2 minutes, and the instance name matches the regex. Endpoint average response time over 1s in the last 2 minutes. Database access average response time over 1s in the last 2 minutes. Endpoint relation average response time over 1s in the last 2 minutes.  List of all potential metrics name The metrics names are defined in the official OAL scripts and MAL scripts.\nCurrently, metrics from the Service, Service Instance, Endpoint, Service Relation, Service Instance Relation, Endpoint Relation scopes could be used in Alarm, and the Database access scope is the same as Service.\nSubmit an issue or a pull request if you want to support any other scopes in Alarm.\nHooks Hooks are a way to send alarm messages to the outside world. SkyWalking supports multiple hooks of the same type, each hook can support different configurations. For example, you can configure two Slack hooks, one named default and set is-default: true means this hook will apply on all Alarm Rules without config hooks. Another named custom1 will only apply on the Alarm Rules which with config hooks and include the name slack.custom1.\nhooks:slack:# default here is just a name, set the field \u0026#39;is-default: true\u0026#39; if this notification hook is expected to be default globally.default:# If true, this hook will apply on all rules, unless a rule has its own specific hook. Could have more than one default hooks in the same hook type.is-default:truetext-template:|-{ \u0026#34;type\u0026#34;: \u0026#34;section\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;mrkdwn\u0026#34;, \u0026#34;text\u0026#34;: \u0026#34;:alarm_clock: *Apache Skywalking Alarm* \\n **%s**.\u0026#34; } }webhooks:- https://hooks.slack.com/services/x/y/zsssscustom1:text-template:|-{ \u0026#34;type\u0026#34;: \u0026#34;section\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;mrkdwn\u0026#34;, \u0026#34;text\u0026#34;: \u0026#34;:alarm_clock: *Apache Skywalking Alarm* \\n **%s**.\u0026#34; } }webhooks:- https://hooks.slack.com/services/x/y/custom1Currently, SkyWalking supports the following hook types:\nWebhook The Webhook requires the peer to be a web container. The alarm message will be sent through HTTP post by application/json content type after you have set up Webhook hooks as follows:\nwebhook:default:is-default:trueurls:- http://ip:port/xxx- http://ip:port/yyyThe JSON format is based on List\u0026lt;org.apache.skywalking.oap.server.core.alarm.AlarmMessage\u0026gt; with the following key information:\n scopeId, scope. All scopes are defined in org.apache.skywalking.oap.server.core.source.DefaultScopeDefine. name. Target scope entity name. Please follow the entity name definitions. id0. The ID of the scope entity that matches with the name. When using the relation scope, it is the source entity ID. id1. When using the relation scope, it is the destination entity ID. Otherwise, it is empty. ruleName. The rule name configured in alarm-settings.yml. alarmMessage. The alarm text message. startTime. The alarm time measured in milliseconds, which occurs between the current time and the midnight of January 1, 1970 UTC. tags. The tags configured in alarm-settings.yml.  See the following example:\n[{ \u0026#34;scopeId\u0026#34;: 1, \u0026#34;scope\u0026#34;: \u0026#34;SERVICE\u0026#34;, \u0026#34;name\u0026#34;: \u0026#34;serviceA\u0026#34;, \u0026#34;id0\u0026#34;: \u0026#34;12\u0026#34;, \u0026#34;id1\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;ruleName\u0026#34;: \u0026#34;service_resp_time_rule\u0026#34;, \u0026#34;alarmMessage\u0026#34;: \u0026#34;alarmMessage xxxx\u0026#34;, \u0026#34;startTime\u0026#34;: 1560524171000, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;WARNING\u0026#34; }] }, { \u0026#34;scopeId\u0026#34;: 1, \u0026#34;scope\u0026#34;: \u0026#34;SERVICE\u0026#34;, \u0026#34;name\u0026#34;: \u0026#34;serviceB\u0026#34;, \u0026#34;id0\u0026#34;: \u0026#34;23\u0026#34;, \u0026#34;id1\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;ruleName\u0026#34;: \u0026#34;service_resp_time_rule\u0026#34;, \u0026#34;alarmMessage\u0026#34;: \u0026#34;alarmMessage yyy\u0026#34;, \u0026#34;startTime\u0026#34;: 1560524171000, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;CRITICAL\u0026#34; }] }] gRPC The alarm message will be sent through remote gRPC method by Protobuf content type after you have set up gRPC hooks as follows:\ngRPC:default:is-default:truetarget-host:iptarget-port:portThe message contains key information which are defined in oap-server/server-alarm-plugin/src/main/proto/alarm-hook.proto.\nPart of the protocol looks like this:\nmessage AlarmMessage { int64 scopeId = 1; string scope = 2; string name = 3; string id0 = 4; string id1 = 5; string ruleName = 6; string alarmMessage = 7; int64 startTime = 8; AlarmTags tags = 9;}message AlarmTags { // String key, String value pair.  repeated KeyStringValuePair data = 1;}message KeyStringValuePair { string key = 1; string value = 2;}Slack Chat Follow the Getting Started with Incoming Webhooks guide and create new Webhooks.\nThe alarm message will be sent through HTTP post by application/json content type if you have configured Slack Incoming Webhooks as follows:\nslack:default:is-default:truetext-template:|-{ \u0026#34;type\u0026#34;: \u0026#34;section\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;mrkdwn\u0026#34;, \u0026#34;text\u0026#34;: \u0026#34;:alarm_clock: *Apache Skywalking Alarm* \\n **%s**.\u0026#34; } }webhooks:- https://hooks.slack.com/services/x/y/zWeChat Note that only the WeChat Company Edition (WeCom) supports WebHooks. To use the WeChat WebHook, follow the Wechat Webhooks guide. The alarm message will be sent through HTTP post by application/json content type after you have set up Wechat Webhooks as follows:\nwechat:default:is-default:truetext-template:|-{ \u0026#34;msgtype\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;content\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; } }webhooks:- https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=dummy_keyDingTalk Follow the Dingtalk Webhooks guide and create new Webhooks. You can configure an optional secret for an individual webhook URL for security purposes. The alarm message will be sent through HTTP post by application/json content type if you have configured DingTalk Webhooks as follows:\ndingtalk:default:is-default:truetext-template:|-{ \u0026#34;msgtype\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;content\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; } }webhooks:- url:https://oapi.dingtalk.com/robot/send?access_token=dummy_tokensecret:dummysecretFeishu Follow the Feishu Webhooks guide and create new Webhooks. You can configure an optional secret for an individual webhook URL for security purposes. If you want to direct a text to a user, you can configure ats, which is Feishu\u0026rsquo;s user_id and separated by \u0026ldquo;,\u0026rdquo; . The alarm message will be sent through HTTP post by application/json content type if you have configured Feishu Webhooks as follows:\nfeishu:default:is-default:truetext-template:|-{ \u0026#34;msg_type\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;content\u0026#34;: { \u0026#34;text\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; }, \u0026#34;ats\u0026#34;:\u0026#34;feishu_user_id_1,feishu_user_id_2\u0026#34; }webhooks:- url:https://open.feishu.cn/open-apis/bot/v2/hook/dummy_tokensecret:dummysecretWeLink Follow the WeLink Webhooks guide and create new Webhooks. The alarm message will be sent through HTTP post by application/json content type if you have configured WeLink Webhooks as follows:\nwelink:default:is-default:truetext-template:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;webhooks:# you may find your own client_id and client_secret in your app, below are dummy, need to change.- client-id:\u0026#34;dummy_client_id\u0026#34;client-secret:dummy_secret_keyaccess-token-url:https://open.welink.huaweicloud.com/api/auth/v2/ticketsmessage-url:https://open.welink.huaweicloud.com/api/welinkim/v1/im-service/chat/group-chat# if you send to multi group at a time, separate group_ids with commas, e.g. \u0026#34;123xx\u0026#34;,\u0026#34;456xx\u0026#34;group-ids:\u0026#34;dummy_group_id\u0026#34;# make a name you like for the robot, it will display in grouprobot-name:robotPagerDuty The PagerDuty hook is based on Events API v2.\nFollow the Getting Started section to create an Events API v2 integration on your PagerDuty service and copy the integration key.\nThen configure as follows:\npagerduty:default:is-default:truetext-template:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;integration-keys:- 5c6d805c9dcf4e03d09dfa81e8789ba1You can also configure multiple integration keys.\nDiscord Follow the Discord Webhooks guide and create a new webhook.\nThen configure as follows:\ndiscord:default:is-default:truetext-template:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;webhooks:- url:https://discordapp.com/api/webhooks/1008166889777414645/8e0Am4Zb-YGbBqqbiiq0jSHPTEEaHa4j1vIC-zSSm231T8ewGxgY0_XUYpY-k1nN4HBlusername:robotUpdate the settings dynamically Since 6.5.0, the alerting settings can be updated dynamically at runtime by Dynamic Configuration, which will override the settings in alarm-settings.yml.\nIn order to determine whether an alerting rule is triggered or not, SkyWalking needs to cache the metrics of a time window for each alerting rule. If any attribute (expression, period, etc.) of a rule is changed, the sliding window will be destroyed and re-created, causing the Alarm of this specific rule to restart again.\nKeys with data types of alerting rule configuration file    Alerting element Configuration property key Type Description     Expression expression string MQE expression   Include names include-names string array    Exclude names exclude-names string array    Include names regex include-names-regex string Java regex Pattern   Exclude names regex exclude-names-regex string Java regex Pattern   Tags tags key-value pair    Period Period int    Silence period silence-period int    Message message string    Hooks hooks string array     ","excerpt":"Alerting Alerting mechanism measures system performance according to the metrics of …","ref":"/docs/main/next/en/setup/backend/backend-alarm/","title":"Alerting"},{"body":"Alerting Alerting mechanism measures system performance according to the metrics of services/instances/endpoints from different layers. Alerting kernel is an in-memory, time-window based queue.\nThe alerting core is driven by a collection of rules defined in config/alarm-settings.yml. There are three parts to alerting rule definitions.\n alerting rules. They define how metrics alerting should be triggered and what conditions should be considered. Webhooks. The list of web service endpoints, which should be called after an alerting is triggered. gRPCHook. The host and port of the remote gRPC method, which should be called after an alerting is triggered.  Entity name Defines the relation between scope and entity name.\n Service: Service name Instance: {Instance name} of {Service name} Endpoint: {Endpoint name} in {Service name} Database: Database service name Service Relation: {Source service name} to {Dest service name} Instance Relation: {Source instance name} of {Source service name} to {Dest instance name} of {Dest service name} Endpoint Relation: {Source endpoint name} in {Source Service name} to {Dest endpoint name} in {Dest service name}  Rules There are two types of rules: individual rules and composite rules. A composite rule is a combination of individual rules.\nIndividual rules An alerting rule is made up of the following elements:\n Rule name. A unique name shown in the alarm message. It must end with _rule. Metrics name. This is also the metrics name in the OAL script. Only long, double, int types are supported. See the list of all potential metrics name. Events can also be configured as the source of Alarm. Please refer to the event doc for more details. Include names. Entity names that are included in this rule. Please follow the entity name definitions. Exclude names. Entity names that are excluded from this rule. Please follow the entity name definitions. Include names regex. A regex that includes entity names. If both include-name list and include-name regex are set, both rules will take effect. Exclude names regex. A regex that excludes entity names. Both rules will take effect if both include-label list and include-label regex are set. Include labels. Metric labels that are included in this rule. Exclude labels. Metric labels that are excluded from this rule. Include labels regex. A regex that includes labels. If both include-label list and include-label regex are set, both rules will take effect. Exclude labels regex. A regex that excludes labels. Both rules will take effect if both exclude-label list and exclude-label regex are set. Tags. Tags are key/value pairs that are attached to alarms. Tags are used to specify distinguishing attributes of alarms that are meaningful and relevant to users. If you want to make these tags searchable on the SkyWalking UI, you may set the tag keys in core/default/searchableAlarmTags or through the system environment variable SW_SEARCHABLE_ALARM_TAG_KEYS. The key level is supported by default.  Label settings are required by the meter system. They are used to store metrics from the label-system platform, such as Prometheus, Micrometer, etc. The four label settings mentioned above must implement LabeledValueHolder.\n Threshold. The target value. For multiple-value metrics, such as percentile, the threshold is an array. It is described as: value1, value2, value3, value4, value5. Each value may serve as the threshold for each value of the metrics. Set the value to - if you do not wish to trigger the Alarm by one or more of the values.\nFor example, in percentile, value1 is the threshold of P50, and -, -, value3, value4, value5 means that there is no threshold for P50 and P75 in the percentile alarm rule. OP. The operator. It supports \u0026gt;, \u0026gt;=, \u0026lt;, \u0026lt;=, ==. We welcome contributions of all OPs. Period. The size of metrics cache in minutes for checking the alarm conditions. This is a time window that corresponds to the backend deployment env time. Count. Within a period window, if the number of times which value goes over the threshold (based on OP) reaches count, then an alarm will be sent. Only as condition. Indicates if the rule can send notifications or if it simply serves as a condition of the composite rule. Silence period. After the alarm is triggered at Time-N (TN), there will be silence during the TN -\u0026gt; TN + period. By default, it works in the same manner as period. The same Alarm (having the same ID in the same metrics name) may only be triggered once within a period.  Such as for a metric, there is a shifting window as following at T7.\n   T1 T2 T3 T4 T5 T6 T7     Value1 Value2 Value3 Value4 Value5 Value6 Value7     Period(Time point T1 ~ T7) are continuous data points for minutes. Notice, alerts are not supported above minute-by-minute periods as they would not be efficient. Values(Value1 ~ Value7) are the values or labeled values for every time point. Count\u0026rsquo;s value(N) represents there are N values in the window matched the operator and threshold. In every minute, the window would shift automatically. At T8, Value8 would be cached, and T1/Value1 would be removed from the window.  Composite rules NOTE: Composite rules are only applicable to alerting rules targeting the same entity level, such as service-level alarm rules (service_percent_rule \u0026amp;\u0026amp; service_resp_time_percentile_rule). Do not compose alarm rules of different entity levels, such as an alarm rule of the service metrics with another rule of the endpoint metrics.\nA composite rule is made up of the following elements:\n Rule name. A unique name shown in the alarm message. Must end with _rule. Expression. Specifies how to compose rules, and supports \u0026amp;\u0026amp;, ||, and (). Message. The notification message to be sent out when the rule is triggered. Tags. Tags are key/value pairs that are attached to alarms. Tags are used to specify distinguishing attributes of alarms that are meaningful and relevant to users.  rules:# Rule unique name, must be ended with `_rule`.endpoint_percent_rule:# Metrics value need to be long, double or intmetrics-name:endpoint_percentthreshold:75op:\u0026lt;# The length of time to evaluate the metricsperiod:10# How many times after the metrics match the condition, will trigger alarmcount:3# How many times of checks, the alarm keeps silence after alarm triggered, default as same as period.silence-period:10# Specify if the rule can send notification or just as an condition of composite ruleonly-as-condition:falsetags:level:WARNINGservice_percent_rule:metrics-name:service_percent# [Optional] Default, match all services in this metricsinclude-names:- service_a- service_bexclude-names:- service_c# Single value metrics threshold.threshold:85op:\u0026lt;period:10count:4only-as-condition:falseservice_resp_time_percentile_rule:# Metrics value need to be long, double or intmetrics-name:service_percentileop:\u0026#34;\u0026gt;\u0026#34;# Multiple value metrics threshold. Thresholds for P50, P75, P90, P95, P99.threshold:1000,1000,1000,1000,1000period:10count:3silence-period:5message:Percentile response time of service {name} alarm in 3 minutes of last 10 minutes, due to more than one condition of p50 \u0026gt; 1000, p75 \u0026gt; 1000, p90 \u0026gt; 1000, p95 \u0026gt; 1000, p99 \u0026gt; 1000only-as-condition:falsemeter_service_status_code_rule:metrics-name:meter_status_codeexclude-labels:- \u0026#34;200\u0026#34;op:\u0026#34;\u0026gt;\u0026#34;threshold:10period:10count:3silence-period:5message:The request number of entity {name} non-200 status is more than expected.only-as-condition:falsecomposite-rules:comp_rule:# Must satisfied percent rule and resp time rule expression:service_percent_rule \u0026amp;\u0026amp; service_resp_time_percentile_rulemessage:Service {name} successful rate is less than 80% and P50 of response time is over 1000mstags:level:CRITICALDefault alarm rules For convenience\u0026rsquo;s sake, we have provided a default alarm-setting.yml in our release. It includes the following rules:\n Service average response time over 1s in the last 3 minutes. Service success rate lower than 80% in the last 2 minutes. Percentile of service response time over 1s in the last 3 minutes Service Instance average response time over 1s in the last 2 minutes, and the instance name matches the regex. Endpoint average response time over 1s in the last 2 minutes. Database access average response time over 1s in the last 2 minutes. Endpoint relation average response time over 1s in the last 2 minutes.  List of all potential metrics name The metrics names are defined in the official OAL scripts and MAL scripts, the Event names can also serve as the metrics names, all possible event names can be also found in the Event doc.\nCurrently, metrics from the Service, Service Instance, Endpoint, Service Relation, Service Instance Relation, Endpoint Relation scopes could be used in Alarm, and the Database access scope is the same as Service.\nSubmit an issue or a pull request if you want to support any other scopes in Alarm.\nWebhook The Webhook requires the peer to be a web container. The alarm message will be sent through HTTP post by application/json content type. The JSON format is based on List\u0026lt;org.apache.skywalking.oap.server.core.alarm.AlarmMessage\u0026gt; with the following key information:\n scopeId, scope. All scopes are defined in org.apache.skywalking.oap.server.core.source.DefaultScopeDefine. name. Target scope entity name. Please follow the entity name definitions. id0. The ID of the scope entity that matches with the name. When using the relation scope, it is the source entity ID. id1. When using the relation scope, it is the destination entity ID. Otherwise, it is empty. ruleName. The rule name configured in alarm-settings.yml. alarmMessage. The alarm text message. startTime. The alarm time measured in milliseconds, which occurs between the current time and the midnight of January 1, 1970 UTC. tags. The tags configured in alarm-settings.yml.  See the following example:\n[{ \u0026#34;scopeId\u0026#34;: 1, \u0026#34;scope\u0026#34;: \u0026#34;SERVICE\u0026#34;, \u0026#34;name\u0026#34;: \u0026#34;serviceA\u0026#34;, \u0026#34;id0\u0026#34;: \u0026#34;12\u0026#34;, \u0026#34;id1\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;ruleName\u0026#34;: \u0026#34;service_resp_time_rule\u0026#34;, \u0026#34;alarmMessage\u0026#34;: \u0026#34;alarmMessage xxxx\u0026#34;, \u0026#34;startTime\u0026#34;: 1560524171000, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;WARNING\u0026#34; }] }, { \u0026#34;scopeId\u0026#34;: 1, \u0026#34;scope\u0026#34;: \u0026#34;SERVICE\u0026#34;, \u0026#34;name\u0026#34;: \u0026#34;serviceB\u0026#34;, \u0026#34;id0\u0026#34;: \u0026#34;23\u0026#34;, \u0026#34;id1\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;ruleName\u0026#34;: \u0026#34;service_resp_time_rule\u0026#34;, \u0026#34;alarmMessage\u0026#34;: \u0026#34;alarmMessage yyy\u0026#34;, \u0026#34;startTime\u0026#34;: 1560524171000, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;CRITICAL\u0026#34; }] }] gRPCHook The alarm message will be sent through remote gRPC method by Protobuf content type. The message contains key information which are defined in oap-server/server-alarm-plugin/src/main/proto/alarm-hook.proto.\nPart of the protocol looks like this:\nmessage AlarmMessage { int64 scopeId = 1; string scope = 2; string name = 3; string id0 = 4; string id1 = 5; string ruleName = 6; string alarmMessage = 7; int64 startTime = 8; AlarmTags tags = 9;}message AlarmTags { // String key, String value pair.  repeated KeyStringValuePair data = 1;}message KeyStringValuePair { string key = 1; string value = 2;}Slack Chat Hook Follow the Getting Started with Incoming Webhooks guide and create new Webhooks.\nThe alarm message will be sent through HTTP post by application/json content type if you have configured Slack Incoming Webhooks as follows:\nslackHooks:textTemplate:|-{ \u0026#34;type\u0026#34;: \u0026#34;section\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;mrkdwn\u0026#34;, \u0026#34;text\u0026#34;: \u0026#34;:alarm_clock: *Apache Skywalking Alarm* \\n **%s**.\u0026#34; } }webhooks:- https://hooks.slack.com/services/x/y/zWeChat Hook Note that only the WeChat Company Edition (WeCom) supports WebHooks. To use the WeChat WebHook, follow the Wechat Webhooks guide. The alarm message will be sent through HTTP post by application/json content type after you have set up Wechat Webhooks as follows:\nwechatHooks:textTemplate:|-{ \u0026#34;msgtype\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;content\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; } }webhooks:- https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=dummy_keyDingTalk Hook Follow the Dingtalk Webhooks guide and create new Webhooks. You can configure an optional secret for an individual webhook URL for security purposes. The alarm message will be sent through HTTP post by application/json content type if you have configured DingTalk Webhooks as follows:\ndingtalkHooks:textTemplate:|-{ \u0026#34;msgtype\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;content\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; } }webhooks:- url:https://oapi.dingtalk.com/robot/send?access_token=dummy_tokensecret:dummysecretFeishu Hook Follow the Feishu Webhooks guide and create new Webhooks. You can configure an optional secret for an individual webhook URL for security purposes. If you want to direct a text to a user, you can configure ats, which is Feishu\u0026rsquo;s user_id and separated by \u0026ldquo;,\u0026rdquo; . The alarm message will be sent through HTTP post by application/json content type if you have configured Feishu Webhooks as follows:\nfeishuHooks:textTemplate:|-{ \u0026#34;msg_type\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;content\u0026#34;: { \u0026#34;text\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; }, \u0026#34;ats\u0026#34;:\u0026#34;feishu_user_id_1,feishu_user_id_2\u0026#34; }webhooks:- url:https://open.feishu.cn/open-apis/bot/v2/hook/dummy_tokensecret:dummysecretWeLink Hook Follow the WeLink Webhooks guide and create new Webhooks. The alarm message will be sent through HTTP post by application/json content type if you have configured WeLink Webhooks as follows:\nwelinkHooks:textTemplate:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;webhooks:# you may find your own client_id and client_secret in your app, below are dummy, need to change.- client_id:\u0026#34;dummy_client_id\u0026#34;client_secret:dummy_secret_keyaccess_token_url:https://open.welink.huaweicloud.com/api/auth/v2/ticketsmessage_url:https://open.welink.huaweicloud.com/api/welinkim/v1/im-service/chat/group-chat# if you send to multi group at a time, separate group_ids with commas, e.g. \u0026#34;123xx\u0026#34;,\u0026#34;456xx\u0026#34;group_ids:\u0026#34;dummy_group_id\u0026#34;# make a name you like for the robot, it will display in grouprobot_name:robotPagerDuty Hook The PagerDuty hook is based on Events API v2.\nFollow the Getting Started section to create an Events API v2 integration on your PagerDuty service and copy the integration key.\nThen configure as follows:\npagerDutyHooks:textTemplate:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;integrationKeys:- 5c6d805c9dcf4e03d09dfa81e8789ba1You can also configure multiple integration keys.\nDiscord Hook Follow the Discord Webhooks guide and create a new webhook.\nThen configure as follows:\ndiscordHooks:textTemplate:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;webhooks:- url:https://discordapp.com/api/webhooks/1008166889777414645/8e0Am4Zb-YGbBqqbiiq0jSHPTEEaHa4j1vIC-zSSm231T8ewGxgY0_XUYpY-k1nN4HBlusername:robotUpdate the settings dynamically Since 6.5.0, the alerting settings can be updated dynamically at runtime by Dynamic Configuration, which will override the settings in alarm-settings.yml.\nIn order to determine whether an alerting rule is triggered or not, SkyWalking needs to cache the metrics of a time window for each alerting rule. If any attribute (metrics-name, op, threshold, period, count, etc.) of a rule is changed, the sliding window will be destroyed and re-created, causing the Alarm of this specific rule to restart again.\nKeys with data types of alerting rule configuration file    Alerting element Configuration property key Type Description     Include names include-names string array    Exclude names exclude-names string array    Include names regex include-names-regex string Java regex Pattern   Exclude names regex exclude-names-regex string Java regex Pattern   Include labels include-labels string array    Exclude labels exclude-labels string array    Include labels regex include-labels-regex string Java regex Pattern   Exclude labels regex exclude-labels-regex string Java regex Pattern   Tags tags key-value pair    Threshold threshold number    OP op operator example: \u0026gt;, \u0026gt;=   Period Period int    Count count int    Only as condition only-as-condition boolean    Silence period silence-period int    Message message string     ","excerpt":"Alerting Alerting mechanism measures system performance according to the metrics of …","ref":"/docs/main/v9.2.0/en/setup/backend/backend-alarm/","title":"Alerting"},{"body":"Alerting Alerting mechanism measures system performance according to the metrics of services/instances/endpoints from different layers. Alerting kernel is an in-memory, time-window based queue.\nThe alerting core is driven by a collection of rules defined in config/alarm-settings.yml. There are three parts to alerting rule definitions.\n alerting rules. They define how metrics alerting should be triggered and what conditions should be considered. Webhooks. The list of web service endpoints, which should be called after an alerting is triggered. gRPCHook. The host and port of the remote gRPC method, which should be called after an alerting is triggered.  Entity name Defines the relation between scope and entity name.\n Service: Service name Instance: {Instance name} of {Service name} Endpoint: {Endpoint name} in {Service name} Database: Database service name Service Relation: {Source service name} to {Dest service name} Instance Relation: {Source instance name} of {Source service name} to {Dest instance name} of {Dest service name} Endpoint Relation: {Source endpoint name} in {Source Service name} to {Dest endpoint name} in {Dest service name}  Rules There are two types of rules: individual rules and composite rules. A composite rule is a combination of individual rules.\nIndividual rules An alerting rule is made up of the following elements:\n Rule name. A unique name shown in the alarm message. It must end with _rule. Metrics name. This is also the metrics name in the OAL script. Only long, double, int types are supported. See the list of all potential metrics name. Events can also be configured as the source of Alarm. Please refer to the event doc for more details. Include names. Entity names that are included in this rule. Please follow the entity name definitions. Exclude names. Entity names that are excluded from this rule. Please follow the entity name definitions. Include names regex. A regex that includes entity names. If both include-name list and include-name regex are set, both rules will take effect. Exclude names regex. A regex that excludes entity names. Both rules will take effect if both include-label list and include-label regex are set. Include labels. Metric labels that are included in this rule. Exclude labels. Metric labels that are excluded from this rule. Include labels regex. A regex that includes labels. If both include-label list and include-label regex are set, both rules will take effect. Exclude labels regex. A regex that excludes labels. Both rules will take effect if both exclude-label list and exclude-label regex are set. Tags. Tags are key/value pairs that are attached to alarms. Tags are used to specify distinguishing attributes of alarms that are meaningful and relevant to users. If you want to make these tags searchable on the SkyWalking UI, you may set the tag keys in core/default/searchableAlarmTags or through the system environment variable SW_SEARCHABLE_ALARM_TAG_KEYS. The key level is supported by default.  Label settings are required by the meter system. They are used to store metrics from the label-system platform, such as Prometheus, Micrometer, etc. The four label settings mentioned above must implement LabeledValueHolder.\n Threshold. The target value. For multiple-value metrics, such as percentile, the threshold is an array. It is described as: value1, value2, value3, value4, value5. Each value may serve as the threshold for each value of the metrics. Set the value to - if you do not wish to trigger the Alarm by one or more of the values.\nFor example, in percentile, value1 is the threshold of P50, and -, -, value3, value4, value5 means that there is no threshold for P50 and P75 in the percentile alarm rule. OP. The operator. It supports \u0026gt;, \u0026gt;=, \u0026lt;, \u0026lt;=, ==. We welcome contributions of all OPs. Period. The size of metrics cache in minutes for checking the alarm conditions. This is a time window that corresponds to the backend deployment env time. Count. Within a period window, if the number of times which value goes over the threshold (based on OP) reaches count, then an alarm will be sent. Only as condition. Indicates if the rule can send notifications or if it simply serves as a condition of the composite rule. Silence period. After the alarm is triggered at Time-N (TN), there will be silence during the TN -\u0026gt; TN + period. By default, it works in the same manner as period. The same Alarm (having the same ID in the same metrics name) may only be triggered once within a period.  Such as for a metric, there is a shifting window as following at T7.\n   T1 T2 T3 T4 T5 T6 T7     Value1 Value2 Value3 Value4 Value5 Value6 Value7     Period(Time point T1 ~ T7) are continuous data points for minutes. Notice, alerts are not supported above minute-by-minute periods as they would not be efficient. Values(Value1 ~ Value7) are the values or labeled values for every time point. Count\u0026rsquo;s value(N) represents there are N values in the window matched the operator and threshold. In every minute, the window would shift automatically. At T8, Value8 would be cached, and T1/Value1 would be removed from the window.  Composite rules NOTE: Composite rules are only applicable to alerting rules targeting the same entity level, such as service-level alarm rules (service_percent_rule \u0026amp;\u0026amp; service_resp_time_percentile_rule). Do not compose alarm rules of different entity levels, such as an alarm rule of the service metrics with another rule of the endpoint metrics.\nA composite rule is made up of the following elements:\n Rule name. A unique name shown in the alarm message. Must end with _rule. Expression. Specifies how to compose rules, and supports \u0026amp;\u0026amp;, ||, and (). Message. The notification message to be sent out when the rule is triggered. Tags. Tags are key/value pairs that are attached to alarms. Tags are used to specify distinguishing attributes of alarms that are meaningful and relevant to users.  rules:# Rule unique name, must be ended with `_rule`.endpoint_percent_rule:# Metrics value need to be long, double or intmetrics-name:endpoint_percentthreshold:75op:\u0026lt;# The length of time to evaluate the metricsperiod:10# How many times after the metrics match the condition, will trigger alarmcount:3# How many times of checks, the alarm keeps silence after alarm triggered, default as same as period.silence-period:10# Specify if the rule can send notification or just as an condition of composite ruleonly-as-condition:falsetags:level:WARNINGservice_percent_rule:metrics-name:service_percent# [Optional] Default, match all services in this metricsinclude-names:- service_a- service_bexclude-names:- service_c# Single value metrics threshold.threshold:85op:\u0026lt;period:10count:4only-as-condition:falseservice_resp_time_percentile_rule:# Metrics value need to be long, double or intmetrics-name:service_percentileop:\u0026#34;\u0026gt;\u0026#34;# Multiple value metrics threshold. Thresholds for P50, P75, P90, P95, P99.threshold:1000,1000,1000,1000,1000period:10count:3silence-period:5message:Percentile response time of service {name} alarm in 3 minutes of last 10 minutes, due to more than one condition of p50 \u0026gt; 1000, p75 \u0026gt; 1000, p90 \u0026gt; 1000, p95 \u0026gt; 1000, p99 \u0026gt; 1000only-as-condition:falsemeter_service_status_code_rule:metrics-name:meter_status_codeexclude-labels:- \u0026#34;200\u0026#34;op:\u0026#34;\u0026gt;\u0026#34;threshold:10period:10count:3silence-period:5message:The request number of entity {name} non-200 status is more than expected.only-as-condition:falsecomposite-rules:comp_rule:# Must satisfied percent rule and resp time rule expression:service_percent_rule \u0026amp;\u0026amp; service_resp_time_percentile_rulemessage:Service {name} successful rate is less than 80% and P50 of response time is over 1000mstags:level:CRITICALDefault alarm rules For convenience\u0026rsquo;s sake, we have provided a default alarm-setting.yml in our release. It includes the following rules:\n Service average response time over 1s in the last 3 minutes. Service success rate lower than 80% in the last 2 minutes. Percentile of service response time over 1s in the last 3 minutes Service Instance average response time over 1s in the last 2 minutes, and the instance name matches the regex. Endpoint average response time over 1s in the last 2 minutes. Database access average response time over 1s in the last 2 minutes. Endpoint relation average response time over 1s in the last 2 minutes.  List of all potential metrics name The metrics names are defined in the official OAL scripts and MAL scripts, the Event names can also serve as the metrics names, all possible event names can be also found in the Event doc.\nCurrently, metrics from the Service, Service Instance, Endpoint, Service Relation, Service Instance Relation, Endpoint Relation scopes could be used in Alarm, and the Database access scope is the same as Service.\nSubmit an issue or a pull request if you want to support any other scopes in Alarm.\nWebhook The Webhook requires the peer to be a web container. The alarm message will be sent through HTTP post by application/json content type. The JSON format is based on List\u0026lt;org.apache.skywalking.oap.server.core.alarm.AlarmMessage\u0026gt; with the following key information:\n scopeId, scope. All scopes are defined in org.apache.skywalking.oap.server.core.source.DefaultScopeDefine. name. Target scope entity name. Please follow the entity name definitions. id0. The ID of the scope entity that matches with the name. When using the relation scope, it is the source entity ID. id1. When using the relation scope, it is the destination entity ID. Otherwise, it is empty. ruleName. The rule name configured in alarm-settings.yml. alarmMessage. The alarm text message. startTime. The alarm time measured in milliseconds, which occurs between the current time and the midnight of January 1, 1970 UTC. tags. The tags configured in alarm-settings.yml.  See the following example:\n[{ \u0026#34;scopeId\u0026#34;: 1, \u0026#34;scope\u0026#34;: \u0026#34;SERVICE\u0026#34;, \u0026#34;name\u0026#34;: \u0026#34;serviceA\u0026#34;, \u0026#34;id0\u0026#34;: \u0026#34;12\u0026#34;, \u0026#34;id1\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;ruleName\u0026#34;: \u0026#34;service_resp_time_rule\u0026#34;, \u0026#34;alarmMessage\u0026#34;: \u0026#34;alarmMessage xxxx\u0026#34;, \u0026#34;startTime\u0026#34;: 1560524171000, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;WARNING\u0026#34; }] }, { \u0026#34;scopeId\u0026#34;: 1, \u0026#34;scope\u0026#34;: \u0026#34;SERVICE\u0026#34;, \u0026#34;name\u0026#34;: \u0026#34;serviceB\u0026#34;, \u0026#34;id0\u0026#34;: \u0026#34;23\u0026#34;, \u0026#34;id1\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;ruleName\u0026#34;: \u0026#34;service_resp_time_rule\u0026#34;, \u0026#34;alarmMessage\u0026#34;: \u0026#34;alarmMessage yyy\u0026#34;, \u0026#34;startTime\u0026#34;: 1560524171000, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;CRITICAL\u0026#34; }] }] gRPCHook The alarm message will be sent through remote gRPC method by Protobuf content type. The message contains key information which are defined in oap-server/server-alarm-plugin/src/main/proto/alarm-hook.proto.\nPart of the protocol looks like this:\nmessage AlarmMessage { int64 scopeId = 1; string scope = 2; string name = 3; string id0 = 4; string id1 = 5; string ruleName = 6; string alarmMessage = 7; int64 startTime = 8; AlarmTags tags = 9;}message AlarmTags { // String key, String value pair.  repeated KeyStringValuePair data = 1;}message KeyStringValuePair { string key = 1; string value = 2;}Slack Chat Hook Follow the Getting Started with Incoming Webhooks guide and create new Webhooks.\nThe alarm message will be sent through HTTP post by application/json content type if you have configured Slack Incoming Webhooks as follows:\nslackHooks:textTemplate:|-{ \u0026#34;type\u0026#34;: \u0026#34;section\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;mrkdwn\u0026#34;, \u0026#34;text\u0026#34;: \u0026#34;:alarm_clock: *Apache Skywalking Alarm* \\n **%s**.\u0026#34; } }webhooks:- https://hooks.slack.com/services/x/y/zWeChat Hook Note that only the WeChat Company Edition (WeCom) supports WebHooks. To use the WeChat WebHook, follow the Wechat Webhooks guide. The alarm message will be sent through HTTP post by application/json content type after you have set up Wechat Webhooks as follows:\nwechatHooks:textTemplate:|-{ \u0026#34;msgtype\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;content\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; } }webhooks:- https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=dummy_keyDingTalk Hook Follow the Dingtalk Webhooks guide and create new Webhooks. You can configure an optional secret for an individual webhook URL for security purposes. The alarm message will be sent through HTTP post by application/json content type if you have configured DingTalk Webhooks as follows:\ndingtalkHooks:textTemplate:|-{ \u0026#34;msgtype\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;content\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; } }webhooks:- url:https://oapi.dingtalk.com/robot/send?access_token=dummy_tokensecret:dummysecretFeishu Hook Follow the Feishu Webhooks guide and create new Webhooks. You can configure an optional secret for an individual webhook URL for security purposes. If you want to direct a text to a user, you can configure ats, which is Feishu\u0026rsquo;s user_id and separated by \u0026ldquo;,\u0026rdquo; . The alarm message will be sent through HTTP post by application/json content type if you have configured Feishu Webhooks as follows:\nfeishuHooks:textTemplate:|-{ \u0026#34;msg_type\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;content\u0026#34;: { \u0026#34;text\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; }, \u0026#34;ats\u0026#34;:\u0026#34;feishu_user_id_1,feishu_user_id_2\u0026#34; }webhooks:- url:https://open.feishu.cn/open-apis/bot/v2/hook/dummy_tokensecret:dummysecretWeLink Hook Follow the WeLink Webhooks guide and create new Webhooks. The alarm message will be sent through HTTP post by application/json content type if you have configured WeLink Webhooks as follows:\nwelinkHooks:textTemplate:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;webhooks:# you may find your own client_id and client_secret in your app, below are dummy, need to change.- client_id:\u0026#34;dummy_client_id\u0026#34;client_secret:dummy_secret_keyaccess_token_url:https://open.welink.huaweicloud.com/api/auth/v2/ticketsmessage_url:https://open.welink.huaweicloud.com/api/welinkim/v1/im-service/chat/group-chat# if you send to multi group at a time, separate group_ids with commas, e.g. \u0026#34;123xx\u0026#34;,\u0026#34;456xx\u0026#34;group_ids:\u0026#34;dummy_group_id\u0026#34;# make a name you like for the robot, it will display in grouprobot_name:robotPagerDuty Hook The PagerDuty hook is based on Events API v2.\nFollow the Getting Started section to create an Events API v2 integration on your PagerDuty service and copy the integration key.\nThen configure as follows:\npagerDutyHooks:textTemplate:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;integrationKeys:- 5c6d805c9dcf4e03d09dfa81e8789ba1You can also configure multiple integration keys.\nDiscord Hook Follow the Discord Webhooks guide and create a new webhook.\nThen configure as follows:\ndiscordHooks:textTemplate:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;webhooks:- url:https://discordapp.com/api/webhooks/1008166889777414645/8e0Am4Zb-YGbBqqbiiq0jSHPTEEaHa4j1vIC-zSSm231T8ewGxgY0_XUYpY-k1nN4HBlusername:robotUpdate the settings dynamically Since 6.5.0, the alerting settings can be updated dynamically at runtime by Dynamic Configuration, which will override the settings in alarm-settings.yml.\nIn order to determine whether an alerting rule is triggered or not, SkyWalking needs to cache the metrics of a time window for each alerting rule. If any attribute (metrics-name, op, threshold, period, count, etc.) of a rule is changed, the sliding window will be destroyed and re-created, causing the Alarm of this specific rule to restart again.\nKeys with data types of alerting rule configuration file    Alerting element Configuration property key Type Description     Include names include-names string array    Exclude names exclude-names string array    Include names regex include-names-regex string Java regex Pattern   Exclude names regex exclude-names-regex string Java regex Pattern   Include labels include-labels string array    Exclude labels exclude-labels string array    Include labels regex include-labels-regex string Java regex Pattern   Exclude labels regex exclude-labels-regex string Java regex Pattern   Tags tags key-value pair    Threshold threshold number    OP op operator example: \u0026gt;, \u0026gt;=   Period Period int    Count count int    Only as condition only-as-condition boolean    Silence period silence-period int    Message message string     ","excerpt":"Alerting Alerting mechanism measures system performance according to the metrics of …","ref":"/docs/main/v9.3.0/en/setup/backend/backend-alarm/","title":"Alerting"},{"body":"Alerting Alerting mechanism measures system performance according to the metrics of services/instances/endpoints from different layers. Alerting kernel is an in-memory, time-window based queue.\nThe alerting core is driven by a collection of rules defined in config/alarm-settings.yml. There are three parts to alerting rule definitions.\n alerting rules. They define how metrics alerting should be triggered and what conditions should be considered. Webhooks. The list of web service endpoints, which should be called after an alerting is triggered. gRPCHook. The host and port of the remote gRPC method, which should be called after an alerting is triggered.  Entity name Defines the relation between scope and entity name.\n Service: Service name Instance: {Instance name} of {Service name} Endpoint: {Endpoint name} in {Service name} Database: Database service name Service Relation: {Source service name} to {Dest service name} Instance Relation: {Source instance name} of {Source service name} to {Dest instance name} of {Dest service name} Endpoint Relation: {Source endpoint name} in {Source Service name} to {Dest endpoint name} in {Dest service name}  Rules There are two types of rules: individual rules and composite rules. A composite rule is a combination of individual rules.\nIndividual rules An alerting rule is made up of the following elements:\n Rule name. A unique name shown in the alarm message. It must end with _rule. Metrics name. This is also the metrics name in the OAL script. Only long, double, int types are supported. See the list of all potential metrics name. Events can also be configured as the source of Alarm. Please refer to the event doc for more details. Include names. Entity names that are included in this rule. Please follow the entity name definitions. Exclude names. Entity names that are excluded from this rule. Please follow the entity name definitions. Include names regex. A regex that includes entity names. If both include-name list and include-name regex are set, both rules will take effect. Exclude names regex. A regex that excludes entity names. Both rules will take effect if both include-label list and include-label regex are set. Include labels. Metric labels that are included in this rule. Exclude labels. Metric labels that are excluded from this rule. Include labels regex. A regex that includes labels. If both include-label list and include-label regex are set, both rules will take effect. Exclude labels regex. A regex that excludes labels. Both rules will take effect if both exclude-label list and exclude-label regex are set. Tags. Tags are key/value pairs that are attached to alarms. Tags are used to specify distinguishing attributes of alarms that are meaningful and relevant to users. If you want to make these tags searchable on the SkyWalking UI, you may set the tag keys in core/default/searchableAlarmTags or through the system environment variable SW_SEARCHABLE_ALARM_TAG_KEYS. The key level is supported by default.  Label settings are required by the meter system. They are used to store metrics from the label-system platform, such as Prometheus, Micrometer, etc. The four label settings mentioned above must implement LabeledValueHolder.\n Threshold. The target value. For multiple-value metrics, such as percentile, the threshold is an array. It is described as: value1, value2, value3, value4, value5. Each value may serve as the threshold for each value of the metrics. Set the value to - if you do not wish to trigger the Alarm by one or more of the values.\nFor example, in percentile, value1 is the threshold of P50, and -, -, value3, value4, value5 means that there is no threshold for P50 and P75 in the percentile alarm rule. OP. The operator. It supports \u0026gt;, \u0026gt;=, \u0026lt;, \u0026lt;=, ==. We welcome contributions of all OPs. Period. The size of metrics cache in minutes for checking the alarm conditions. This is a time window that corresponds to the backend deployment env time. Count. Within a period window, if the number of times which value goes over the threshold (based on OP) reaches count, then an alarm will be sent. Only as condition. Indicates if the rule can send notifications or if it simply serves as a condition of the composite rule. Silence period. After the alarm is triggered at Time-N (TN), there will be silence during the TN -\u0026gt; TN + period. By default, it works in the same manner as period. The same Alarm (having the same ID in the same metrics name) may only be triggered once within a period.  Such as for a metric, there is a shifting window as following at T7.\n   T1 T2 T3 T4 T5 T6 T7     Value1 Value2 Value3 Value4 Value5 Value6 Value7     Period(Time point T1 ~ T7) are continuous data points for minutes. Notice, alerts are not supported above minute-by-minute periods as they would not be efficient. Values(Value1 ~ Value7) are the values or labeled values for every time point. Count\u0026rsquo;s value(N) represents there are N values in the window matched the operator and threshold. In every minute, the window would shift automatically. At T8, Value8 would be cached, and T1/Value1 would be removed from the window.  Composite rules NOTE: Composite rules are only applicable to alerting rules targeting the same entity level, such as service-level alarm rules (service_percent_rule \u0026amp;\u0026amp; service_resp_time_percentile_rule). Do not compose alarm rules of different entity levels, such as an alarm rule of the service metrics with another rule of the endpoint metrics.\nA composite rule is made up of the following elements:\n Rule name. A unique name shown in the alarm message. Must end with _rule. Expression. Specifies how to compose rules, and supports \u0026amp;\u0026amp;, ||, and (). Message. The notification message to be sent out when the rule is triggered. Tags. Tags are key/value pairs that are attached to alarms. Tags are used to specify distinguishing attributes of alarms that are meaningful and relevant to users.  rules:# Rule unique name, must be ended with `_rule`.endpoint_percent_rule:# Metrics value need to be long, double or intmetrics-name:endpoint_percentthreshold:75op:\u0026lt;# The length of time to evaluate the metricsperiod:10# How many times after the metrics match the condition, will trigger alarmcount:3# How many times of checks, the alarm keeps silence after alarm triggered, default as same as period.silence-period:10# Specify if the rule can send notification or just as an condition of composite ruleonly-as-condition:falsetags:level:WARNINGservice_percent_rule:metrics-name:service_percent# [Optional] Default, match all services in this metricsinclude-names:- service_a- service_bexclude-names:- service_c# Single value metrics threshold.threshold:85op:\u0026lt;period:10count:4only-as-condition:falseservice_resp_time_percentile_rule:# Metrics value need to be long, double or intmetrics-name:service_percentileop:\u0026#34;\u0026gt;\u0026#34;# Multiple value metrics threshold. Thresholds for P50, P75, P90, P95, P99.threshold:1000,1000,1000,1000,1000period:10count:3silence-period:5message:Percentile response time of service {name} alarm in 3 minutes of last 10 minutes, due to more than one condition of p50 \u0026gt; 1000, p75 \u0026gt; 1000, p90 \u0026gt; 1000, p95 \u0026gt; 1000, p99 \u0026gt; 1000only-as-condition:falsemeter_service_status_code_rule:metrics-name:meter_status_codeexclude-labels:- \u0026#34;200\u0026#34;op:\u0026#34;\u0026gt;\u0026#34;threshold:10period:10count:3silence-period:5message:The request number of entity {name} non-200 status is more than expected.only-as-condition:falsecomposite-rules:comp_rule:# Must satisfied percent rule and resp time rule expression:service_percent_rule \u0026amp;\u0026amp; service_resp_time_percentile_rulemessage:Service {name} successful rate is less than 80% and P50 of response time is over 1000mstags:level:CRITICALDefault alarm rules For convenience\u0026rsquo;s sake, we have provided a default alarm-setting.yml in our release. It includes the following rules:\n Service average response time over 1s in the last 3 minutes. Service success rate lower than 80% in the last 2 minutes. Percentile of service response time over 1s in the last 3 minutes Service Instance average response time over 1s in the last 2 minutes, and the instance name matches the regex. Endpoint average response time over 1s in the last 2 minutes. Database access average response time over 1s in the last 2 minutes. Endpoint relation average response time over 1s in the last 2 minutes.  List of all potential metrics name The metrics names are defined in the official OAL scripts and MAL scripts, the Event names can also serve as the metrics names, all possible event names can be also found in the Event doc.\nCurrently, metrics from the Service, Service Instance, Endpoint, Service Relation, Service Instance Relation, Endpoint Relation scopes could be used in Alarm, and the Database access scope is the same as Service.\nSubmit an issue or a pull request if you want to support any other scopes in Alarm.\nWebhook The Webhook requires the peer to be a web container. The alarm message will be sent through HTTP post by application/json content type. The JSON format is based on List\u0026lt;org.apache.skywalking.oap.server.core.alarm.AlarmMessage\u0026gt; with the following key information:\n scopeId, scope. All scopes are defined in org.apache.skywalking.oap.server.core.source.DefaultScopeDefine. name. Target scope entity name. Please follow the entity name definitions. id0. The ID of the scope entity that matches with the name. When using the relation scope, it is the source entity ID. id1. When using the relation scope, it is the destination entity ID. Otherwise, it is empty. ruleName. The rule name configured in alarm-settings.yml. alarmMessage. The alarm text message. startTime. The alarm time measured in milliseconds, which occurs between the current time and the midnight of January 1, 1970 UTC. tags. The tags configured in alarm-settings.yml.  See the following example:\n[{ \u0026#34;scopeId\u0026#34;: 1, \u0026#34;scope\u0026#34;: \u0026#34;SERVICE\u0026#34;, \u0026#34;name\u0026#34;: \u0026#34;serviceA\u0026#34;, \u0026#34;id0\u0026#34;: \u0026#34;12\u0026#34;, \u0026#34;id1\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;ruleName\u0026#34;: \u0026#34;service_resp_time_rule\u0026#34;, \u0026#34;alarmMessage\u0026#34;: \u0026#34;alarmMessage xxxx\u0026#34;, \u0026#34;startTime\u0026#34;: 1560524171000, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;WARNING\u0026#34; }] }, { \u0026#34;scopeId\u0026#34;: 1, \u0026#34;scope\u0026#34;: \u0026#34;SERVICE\u0026#34;, \u0026#34;name\u0026#34;: \u0026#34;serviceB\u0026#34;, \u0026#34;id0\u0026#34;: \u0026#34;23\u0026#34;, \u0026#34;id1\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;ruleName\u0026#34;: \u0026#34;service_resp_time_rule\u0026#34;, \u0026#34;alarmMessage\u0026#34;: \u0026#34;alarmMessage yyy\u0026#34;, \u0026#34;startTime\u0026#34;: 1560524171000, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;CRITICAL\u0026#34; }] }] gRPCHook The alarm message will be sent through remote gRPC method by Protobuf content type. The message contains key information which are defined in oap-server/server-alarm-plugin/src/main/proto/alarm-hook.proto.\nPart of the protocol looks like this:\nmessage AlarmMessage { int64 scopeId = 1; string scope = 2; string name = 3; string id0 = 4; string id1 = 5; string ruleName = 6; string alarmMessage = 7; int64 startTime = 8; AlarmTags tags = 9;}message AlarmTags { // String key, String value pair.  repeated KeyStringValuePair data = 1;}message KeyStringValuePair { string key = 1; string value = 2;}Slack Chat Hook Follow the Getting Started with Incoming Webhooks guide and create new Webhooks.\nThe alarm message will be sent through HTTP post by application/json content type if you have configured Slack Incoming Webhooks as follows:\nslackHooks:textTemplate:|-{ \u0026#34;type\u0026#34;: \u0026#34;section\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;mrkdwn\u0026#34;, \u0026#34;text\u0026#34;: \u0026#34;:alarm_clock: *Apache Skywalking Alarm* \\n **%s**.\u0026#34; } }webhooks:- https://hooks.slack.com/services/x/y/zWeChat Hook Note that only the WeChat Company Edition (WeCom) supports WebHooks. To use the WeChat WebHook, follow the Wechat Webhooks guide. The alarm message will be sent through HTTP post by application/json content type after you have set up Wechat Webhooks as follows:\nwechatHooks:textTemplate:|-{ \u0026#34;msgtype\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;content\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; } }webhooks:- https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=dummy_keyDingTalk Hook Follow the Dingtalk Webhooks guide and create new Webhooks. You can configure an optional secret for an individual webhook URL for security purposes. The alarm message will be sent through HTTP post by application/json content type if you have configured DingTalk Webhooks as follows:\ndingtalkHooks:textTemplate:|-{ \u0026#34;msgtype\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;content\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; } }webhooks:- url:https://oapi.dingtalk.com/robot/send?access_token=dummy_tokensecret:dummysecretFeishu Hook Follow the Feishu Webhooks guide and create new Webhooks. You can configure an optional secret for an individual webhook URL for security purposes. If you want to direct a text to a user, you can configure ats, which is Feishu\u0026rsquo;s user_id and separated by \u0026ldquo;,\u0026rdquo; . The alarm message will be sent through HTTP post by application/json content type if you have configured Feishu Webhooks as follows:\nfeishuHooks:textTemplate:|-{ \u0026#34;msg_type\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;content\u0026#34;: { \u0026#34;text\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; }, \u0026#34;ats\u0026#34;:\u0026#34;feishu_user_id_1,feishu_user_id_2\u0026#34; }webhooks:- url:https://open.feishu.cn/open-apis/bot/v2/hook/dummy_tokensecret:dummysecretWeLink Hook Follow the WeLink Webhooks guide and create new Webhooks. The alarm message will be sent through HTTP post by application/json content type if you have configured WeLink Webhooks as follows:\nwelinkHooks:textTemplate:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;webhooks:# you may find your own client_id and client_secret in your app, below are dummy, need to change.- client_id:\u0026#34;dummy_client_id\u0026#34;client_secret:dummy_secret_keyaccess_token_url:https://open.welink.huaweicloud.com/api/auth/v2/ticketsmessage_url:https://open.welink.huaweicloud.com/api/welinkim/v1/im-service/chat/group-chat# if you send to multi group at a time, separate group_ids with commas, e.g. \u0026#34;123xx\u0026#34;,\u0026#34;456xx\u0026#34;group_ids:\u0026#34;dummy_group_id\u0026#34;# make a name you like for the robot, it will display in grouprobot_name:robotPagerDuty Hook The PagerDuty hook is based on Events API v2.\nFollow the Getting Started section to create an Events API v2 integration on your PagerDuty service and copy the integration key.\nThen configure as follows:\npagerDutyHooks:textTemplate:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;integrationKeys:- 5c6d805c9dcf4e03d09dfa81e8789ba1You can also configure multiple integration keys.\nDiscord Hook Follow the Discord Webhooks guide and create a new webhook.\nThen configure as follows:\ndiscordHooks:textTemplate:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;webhooks:- url:https://discordapp.com/api/webhooks/1008166889777414645/8e0Am4Zb-YGbBqqbiiq0jSHPTEEaHa4j1vIC-zSSm231T8ewGxgY0_XUYpY-k1nN4HBlusername:robotUpdate the settings dynamically Since 6.5.0, the alerting settings can be updated dynamically at runtime by Dynamic Configuration, which will override the settings in alarm-settings.yml.\nIn order to determine whether an alerting rule is triggered or not, SkyWalking needs to cache the metrics of a time window for each alerting rule. If any attribute (metrics-name, op, threshold, period, count, etc.) of a rule is changed, the sliding window will be destroyed and re-created, causing the Alarm of this specific rule to restart again.\nKeys with data types of alerting rule configuration file    Alerting element Configuration property key Type Description     Include names include-names string array    Exclude names exclude-names string array    Include names regex include-names-regex string Java regex Pattern   Exclude names regex exclude-names-regex string Java regex Pattern   Include labels include-labels string array    Exclude labels exclude-labels string array    Include labels regex include-labels-regex string Java regex Pattern   Exclude labels regex exclude-labels-regex string Java regex Pattern   Tags tags key-value pair    Threshold threshold number    OP op operator example: \u0026gt;, \u0026gt;=   Period Period int    Count count int    Only as condition only-as-condition boolean    Silence period silence-period int    Message message string     ","excerpt":"Alerting Alerting mechanism measures system performance according to the metrics of …","ref":"/docs/main/v9.4.0/en/setup/backend/backend-alarm/","title":"Alerting"},{"body":"Alerting Alerting mechanism measures system performance according to the metrics of services/instances/endpoints from different layers. Alerting kernel is an in-memory, time-window based queue.\nThe alerting core is driven by a collection of rules defined in config/alarm-settings.yml. There are three parts to alerting rule definitions.\n alerting rules. They define how metrics alerting should be triggered and what conditions should be considered. Webhooks. The list of web service endpoints, which should be called after an alerting is triggered. gRPCHook. The host and port of the remote gRPC method, which should be called after an alerting is triggered.  Entity name Defines the relation between scope and entity name.\n Service: Service name Instance: {Instance name} of {Service name} Endpoint: {Endpoint name} in {Service name} Database: Database service name Service Relation: {Source service name} to {Dest service name} Instance Relation: {Source instance name} of {Source service name} to {Dest instance name} of {Dest service name} Endpoint Relation: {Source endpoint name} in {Source Service name} to {Dest endpoint name} in {Dest service name}  Rules There are two types of rules: individual rules and composite rules. A composite rule is a combination of individual rules.\nIndividual rules An alerting rule is made up of the following elements:\n Rule name. A unique name shown in the alarm message. It must end with _rule. Metrics name. This is also the metrics name in the OAL script. Only long, double, int types are supported. See the list of all potential metrics name. Events can also be configured as the source of Alarm. Please refer to the event doc for more details. Include names. Entity names that are included in this rule. Please follow the entity name definitions. Exclude names. Entity names that are excluded from this rule. Please follow the entity name definitions. Include names regex. A regex that includes entity names. If both include-name list and include-name regex are set, both rules will take effect. Exclude names regex. A regex that excludes entity names. Both rules will take effect if both include-label list and include-label regex are set. Include labels. Metric labels that are included in this rule. Exclude labels. Metric labels that are excluded from this rule. Include labels regex. A regex that includes labels. If both include-label list and include-label regex are set, both rules will take effect. Exclude labels regex. A regex that excludes labels. Both rules will take effect if both exclude-label list and exclude-label regex are set. Tags. Tags are key/value pairs that are attached to alarms. Tags are used to specify distinguishing attributes of alarms that are meaningful and relevant to users. If you want to make these tags searchable on the SkyWalking UI, you may set the tag keys in core/default/searchableAlarmTags or through the system environment variable SW_SEARCHABLE_ALARM_TAG_KEYS. The key level is supported by default.  Label settings are required by the meter system. They are used to store metrics from the label-system platform, such as Prometheus, Micrometer, etc. The four label settings mentioned above must implement LabeledValueHolder.\n Threshold. The target value. For multiple-value metrics, such as percentile, the threshold is an array. It is described as: value1, value2, value3, value4, value5. Each value may serve as the threshold for each value of the metrics. Set the value to - if you do not wish to trigger the Alarm by one or more of the values.\nFor example, in percentile, value1 is the threshold of P50, and -, -, value3, value4, value5 means that there is no threshold for P50 and P75 in the percentile alarm rule. OP. The operator. It supports \u0026gt;, \u0026gt;=, \u0026lt;, \u0026lt;=, ==, !=. We welcome contributions of all OPs. Period. The size of metrics cache in minutes for checking the alarm conditions. This is a time window that corresponds to the backend deployment env time. Count. Within a period window, if the number of times which value goes over the threshold (based on OP) reaches count, then an alarm will be sent. Only as condition. Indicates if the rule can send notifications or if it simply serves as a condition of the composite rule. Silence period. After the alarm is triggered at Time-N (TN), there will be silence during the TN -\u0026gt; TN + period. By default, it works in the same manner as period. The same Alarm (having the same ID in the same metrics name) may only be triggered once within a period.  Such as for a metric, there is a shifting window as following at T7.\n   T1 T2 T3 T4 T5 T6 T7     Value1 Value2 Value3 Value4 Value5 Value6 Value7     Period(Time point T1 ~ T7) are continuous data points for minutes. Notice, alerts are not supported above minute-by-minute periods as they would not be efficient. Values(Value1 ~ Value7) are the values or labeled values for every time point. Count\u0026rsquo;s value(N) represents there are N values in the window matched the operator and threshold. In every minute, the window would shift automatically. At T8, Value8 would be cached, and T1/Value1 would be removed from the window.  Composite rules NOTE: Composite rules are only applicable to alerting rules targeting the same entity level, such as service-level alarm rules (service_percent_rule \u0026amp;\u0026amp; service_resp_time_percentile_rule). Do not compose alarm rules of different entity levels, such as an alarm rule of the service metrics with another rule of the endpoint metrics.\nA composite rule is made up of the following elements:\n Rule name. A unique name shown in the alarm message. Must end with _rule. Expression. Specifies how to compose rules, and supports \u0026amp;\u0026amp;, ||, and (). Message. The notification message to be sent out when the rule is triggered. Tags. Tags are key/value pairs that are attached to alarms. Tags are used to specify distinguishing attributes of alarms that are meaningful and relevant to users.  rules:# Rule unique name, must be ended with `_rule`.endpoint_percent_rule:# Metrics value need to be long, double or intmetrics-name:endpoint_percentthreshold:75op:\u0026lt;# The length of time to evaluate the metricsperiod:10# How many times after the metrics match the condition, will trigger alarmcount:3# How many times of checks, the alarm keeps silence after alarm triggered, default as same as period.silence-period:10# Specify if the rule can send notification or just as an condition of composite ruleonly-as-condition:falsetags:level:WARNINGservice_percent_rule:metrics-name:service_percent# [Optional] Default, match all services in this metricsinclude-names:- service_a- service_bexclude-names:- service_c# Single value metrics threshold.threshold:85op:\u0026lt;period:10count:4only-as-condition:falseservice_resp_time_percentile_rule:# Metrics value need to be long, double or intmetrics-name:service_percentileop:\u0026#34;\u0026gt;\u0026#34;# Multiple value metrics threshold. Thresholds for P50, P75, P90, P95, P99.threshold:1000,1000,1000,1000,1000period:10count:3silence-period:5message:Percentile response time of service {name} alarm in 3 minutes of last 10 minutes, due to more than one condition of p50 \u0026gt; 1000, p75 \u0026gt; 1000, p90 \u0026gt; 1000, p95 \u0026gt; 1000, p99 \u0026gt; 1000only-as-condition:falsemeter_service_status_code_rule:metrics-name:meter_status_codeexclude-labels:- \u0026#34;200\u0026#34;op:\u0026#34;\u0026gt;\u0026#34;threshold:10period:10count:3silence-period:5message:The request number of entity {name} non-200 status is more than expected.only-as-condition:falsecomposite-rules:comp_rule:# Must satisfied percent rule and resp time rule expression:service_percent_rule \u0026amp;\u0026amp; service_resp_time_percentile_rulemessage:Service {name} successful rate is less than 80% and P50 of response time is over 1000mstags:level:CRITICALDefault alarm rules For convenience\u0026rsquo;s sake, we have provided a default alarm-setting.yml in our release. It includes the following rules:\n Service average response time over 1s in the last 3 minutes. Service success rate lower than 80% in the last 2 minutes. Percentile of service response time over 1s in the last 3 minutes Service Instance average response time over 1s in the last 2 minutes, and the instance name matches the regex. Endpoint average response time over 1s in the last 2 minutes. Database access average response time over 1s in the last 2 minutes. Endpoint relation average response time over 1s in the last 2 minutes.  List of all potential metrics name The metrics names are defined in the official OAL scripts and MAL scripts, the Event names can also serve as the metrics names, all possible event names can be also found in the Event doc.\nCurrently, metrics from the Service, Service Instance, Endpoint, Service Relation, Service Instance Relation, Endpoint Relation scopes could be used in Alarm, and the Database access scope is the same as Service.\nSubmit an issue or a pull request if you want to support any other scopes in Alarm.\nWebhook The Webhook requires the peer to be a web container. The alarm message will be sent through HTTP post by application/json content type. The JSON format is based on List\u0026lt;org.apache.skywalking.oap.server.core.alarm.AlarmMessage\u0026gt; with the following key information:\n scopeId, scope. All scopes are defined in org.apache.skywalking.oap.server.core.source.DefaultScopeDefine. name. Target scope entity name. Please follow the entity name definitions. id0. The ID of the scope entity that matches with the name. When using the relation scope, it is the source entity ID. id1. When using the relation scope, it is the destination entity ID. Otherwise, it is empty. ruleName. The rule name configured in alarm-settings.yml. alarmMessage. The alarm text message. startTime. The alarm time measured in milliseconds, which occurs between the current time and the midnight of January 1, 1970 UTC. tags. The tags configured in alarm-settings.yml.  See the following example:\n[{ \u0026#34;scopeId\u0026#34;: 1, \u0026#34;scope\u0026#34;: \u0026#34;SERVICE\u0026#34;, \u0026#34;name\u0026#34;: \u0026#34;serviceA\u0026#34;, \u0026#34;id0\u0026#34;: \u0026#34;12\u0026#34;, \u0026#34;id1\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;ruleName\u0026#34;: \u0026#34;service_resp_time_rule\u0026#34;, \u0026#34;alarmMessage\u0026#34;: \u0026#34;alarmMessage xxxx\u0026#34;, \u0026#34;startTime\u0026#34;: 1560524171000, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;WARNING\u0026#34; }] }, { \u0026#34;scopeId\u0026#34;: 1, \u0026#34;scope\u0026#34;: \u0026#34;SERVICE\u0026#34;, \u0026#34;name\u0026#34;: \u0026#34;serviceB\u0026#34;, \u0026#34;id0\u0026#34;: \u0026#34;23\u0026#34;, \u0026#34;id1\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;ruleName\u0026#34;: \u0026#34;service_resp_time_rule\u0026#34;, \u0026#34;alarmMessage\u0026#34;: \u0026#34;alarmMessage yyy\u0026#34;, \u0026#34;startTime\u0026#34;: 1560524171000, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;CRITICAL\u0026#34; }] }] gRPCHook The alarm message will be sent through remote gRPC method by Protobuf content type. The message contains key information which are defined in oap-server/server-alarm-plugin/src/main/proto/alarm-hook.proto.\nPart of the protocol looks like this:\nmessage AlarmMessage { int64 scopeId = 1; string scope = 2; string name = 3; string id0 = 4; string id1 = 5; string ruleName = 6; string alarmMessage = 7; int64 startTime = 8; AlarmTags tags = 9;}message AlarmTags { // String key, String value pair.  repeated KeyStringValuePair data = 1;}message KeyStringValuePair { string key = 1; string value = 2;}Slack Chat Hook Follow the Getting Started with Incoming Webhooks guide and create new Webhooks.\nThe alarm message will be sent through HTTP post by application/json content type if you have configured Slack Incoming Webhooks as follows:\nslackHooks:textTemplate:|-{ \u0026#34;type\u0026#34;: \u0026#34;section\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;mrkdwn\u0026#34;, \u0026#34;text\u0026#34;: \u0026#34;:alarm_clock: *Apache Skywalking Alarm* \\n **%s**.\u0026#34; } }webhooks:- https://hooks.slack.com/services/x/y/zWeChat Hook Note that only the WeChat Company Edition (WeCom) supports WebHooks. To use the WeChat WebHook, follow the Wechat Webhooks guide. The alarm message will be sent through HTTP post by application/json content type after you have set up Wechat Webhooks as follows:\nwechatHooks:textTemplate:|-{ \u0026#34;msgtype\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;content\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; } }webhooks:- https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=dummy_keyDingTalk Hook Follow the Dingtalk Webhooks guide and create new Webhooks. You can configure an optional secret for an individual webhook URL for security purposes. The alarm message will be sent through HTTP post by application/json content type if you have configured DingTalk Webhooks as follows:\ndingtalkHooks:textTemplate:|-{ \u0026#34;msgtype\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;content\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; } }webhooks:- url:https://oapi.dingtalk.com/robot/send?access_token=dummy_tokensecret:dummysecretFeishu Hook Follow the Feishu Webhooks guide and create new Webhooks. You can configure an optional secret for an individual webhook URL for security purposes. If you want to direct a text to a user, you can configure ats, which is Feishu\u0026rsquo;s user_id and separated by \u0026ldquo;,\u0026rdquo; . The alarm message will be sent through HTTP post by application/json content type if you have configured Feishu Webhooks as follows:\nfeishuHooks:textTemplate:|-{ \u0026#34;msg_type\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;content\u0026#34;: { \u0026#34;text\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; }, \u0026#34;ats\u0026#34;:\u0026#34;feishu_user_id_1,feishu_user_id_2\u0026#34; }webhooks:- url:https://open.feishu.cn/open-apis/bot/v2/hook/dummy_tokensecret:dummysecretWeLink Hook Follow the WeLink Webhooks guide and create new Webhooks. The alarm message will be sent through HTTP post by application/json content type if you have configured WeLink Webhooks as follows:\nwelinkHooks:textTemplate:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;webhooks:# you may find your own client_id and client_secret in your app, below are dummy, need to change.- client_id:\u0026#34;dummy_client_id\u0026#34;client_secret:dummy_secret_keyaccess_token_url:https://open.welink.huaweicloud.com/api/auth/v2/ticketsmessage_url:https://open.welink.huaweicloud.com/api/welinkim/v1/im-service/chat/group-chat# if you send to multi group at a time, separate group_ids with commas, e.g. \u0026#34;123xx\u0026#34;,\u0026#34;456xx\u0026#34;group_ids:\u0026#34;dummy_group_id\u0026#34;# make a name you like for the robot, it will display in grouprobot_name:robotPagerDuty Hook The PagerDuty hook is based on Events API v2.\nFollow the Getting Started section to create an Events API v2 integration on your PagerDuty service and copy the integration key.\nThen configure as follows:\npagerDutyHooks:textTemplate:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;integrationKeys:- 5c6d805c9dcf4e03d09dfa81e8789ba1You can also configure multiple integration keys.\nDiscord Hook Follow the Discord Webhooks guide and create a new webhook.\nThen configure as follows:\ndiscordHooks:textTemplate:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;webhooks:- url:https://discordapp.com/api/webhooks/1008166889777414645/8e0Am4Zb-YGbBqqbiiq0jSHPTEEaHa4j1vIC-zSSm231T8ewGxgY0_XUYpY-k1nN4HBlusername:robotUpdate the settings dynamically Since 6.5.0, the alerting settings can be updated dynamically at runtime by Dynamic Configuration, which will override the settings in alarm-settings.yml.\nIn order to determine whether an alerting rule is triggered or not, SkyWalking needs to cache the metrics of a time window for each alerting rule. If any attribute (metrics-name, op, threshold, period, count, etc.) of a rule is changed, the sliding window will be destroyed and re-created, causing the Alarm of this specific rule to restart again.\nKeys with data types of alerting rule configuration file    Alerting element Configuration property key Type Description     Include names include-names string array    Exclude names exclude-names string array    Include names regex include-names-regex string Java regex Pattern   Exclude names regex exclude-names-regex string Java regex Pattern   Include labels include-labels string array    Exclude labels exclude-labels string array    Include labels regex include-labels-regex string Java regex Pattern   Exclude labels regex exclude-labels-regex string Java regex Pattern   Tags tags key-value pair    Threshold threshold number    OP op operator example: \u0026gt;, \u0026gt;=   Period Period int    Count count int    Only as condition only-as-condition boolean    Silence period silence-period int    Message message string     ","excerpt":"Alerting Alerting mechanism measures system performance according to the metrics of …","ref":"/docs/main/v9.5.0/en/setup/backend/backend-alarm/","title":"Alerting"},{"body":"Alerting Alerting mechanism measures system performance according to the metrics of services/instances/endpoints from different layers. Alerting kernel is an in-memory, time-window based queue.\nThe alerting core is driven by a collection of rules defined in config/alarm-settings.yml. There are three parts to alerting rule definitions.\n alerting rules. They define how metrics alerting should be triggered and what conditions should be considered. hooks. The list of hooks, which should be called after an alerting is triggered.  Entity name Defines the relation between scope and entity name.\n Service: Service name Instance: {Instance name} of {Service name} Endpoint: {Endpoint name} in {Service name} Service Relation: {Source service name} to {Dest service name} Instance Relation: {Source instance name} of {Source service name} to {Dest instance name} of {Dest service name} Endpoint Relation: {Source endpoint name} in {Source Service name} to {Dest endpoint name} in {Dest service name}  Rules An alerting rule is made up of the following elements:\n Rule name. A unique name shown in the alarm message. It must end with _rule. Expression. A MQE expression that defines the conditions of the rule. The result type must be SINGLE_VALUE and the root operation of the expression must be a Compare Operation which provides 1(true) or 0(false) result. When the result is 1(true), the alarm will be triggered. For example, avg(service_resp_time / 1000) \u0026gt; 1 is a valid expression to indicate the request latency is slower than 1s. The typical illegal expressions are  avg(service_resp_time \u0026gt; 1000) + 1 expression root doesn\u0026rsquo;t use Compare Operation service_resp_time \u0026gt; 1000 expression return a TIME_SERIES_VALUES type of values rather than a SINGLE_VALUE value.    The metrics names in the expression could be found in the list of all potential metrics name doc.\n Include names. Entity names that are included in this rule. Please follow the entity name definitions. Exclude names. Entity names that are excluded from this rule. Please follow the entity name definitions. Include names regex. A regex that includes entity names. If both include-name list and include-name regex are set, both rules will take effect. Exclude names regex. A regex that excludes entity names. Both rules will take effect if both include-label list and include-label regex are set. Tags. Tags are key/value pairs that are attached to alarms. Tags are used to specify distinguishing attributes of alarms that are meaningful and relevant to users. If you want to make these tags searchable on the SkyWalking UI, you may set the tag keys in core/default/searchableAlarmTags or through the system environment variable SW_SEARCHABLE_ALARM_TAG_KEYS. The key level is supported by default. Period. The size of metrics cache in minutes for checking the alarm conditions. This is a time window that corresponds to the backend deployment env time. Hooks. Binding the specific names of the hooks when the alarm is triggered. The name format is {hookType}.{hookName} (slack.custom1 e.g.) and must be defined in the hooks section of the alarm-settings.yml file. If the hook name is not specified, the global hook will be used. Silence period. After the alarm is triggered at Time-N (TN), there will be silence during the TN -\u0026gt; TN + period. By default, it works in the same manner as period. The same Alarm (having the same ID in the same metrics name) may only be triggered once within a period.  Such as for a metric, there is a shifting window as following at T7.\n   T1 T2 T3 T4 T5 T6 T7     Value1 Value2 Value3 Value4 Value5 Value6 Value7     Period(Time point T1 ~ T7) are continuous data points for minutes. Notice, alerts are not supported above minute-by-minute periods as they would not be efficient. Values(Value1 ~ Value7) are the values or labeled values for every time point. Expression is calculated based on the metric values(Value1 ~ Value7). For example, expression avg(service_resp_time) \u0026gt; 1000, if the value are 1001, 1001, 1001, 1001, 1001, 1001, 1001, the calculation is ((1001 + 10001 + ... + 1001) / 7) \u0026gt; 1000 and the result would be 1(true). Then the alarm would be triggered. In every minute, the window would shift automatically. At T8, Value8 would be cached, and T1/Value1 would be removed from the window.  NOTE:\n If the expression include labeled metrics and result has multiple labeled value(e.g. sum(service_percentile{_='0,1'} \u0026gt; 1000) \u0026gt;= 3), the alarm will be triggered if any of the labeled value result matches 3 times of the condition(P50 \u0026gt; 1000 or P75 \u0026gt; 1000). One alarm rule is targeting the same entity level, such as service-level expression (avg(service_resp_time) \u0026gt; 1000). Set entity names(Include/Exclude names\u0026hellip;) according to metrics entity levels, do not include different entity levels metrics in the same expression, such as service metrics and endpoint metrics.  rules:# Rule unique name, must be ended with `_rule`.endpoint_percent_rule:# A MQE expression and the root operation of the expression must be a Compare Operation.expression:sum((endpoint_sla / 100) \u0026lt; 75) \u0026gt;= 3# The length of time to evaluate the metricsperiod:10# How many times of checks, the alarm keeps silence after alarm triggered, default as same as period.silence-period:10message:Successful rate of endpoint {name} is lower than 75%tags:level:WARNINGservice_percent_rule:expression:sum((service_sla / 100) \u0026lt; 85) \u0026gt;= 4# [Optional] Default, match all services in this metricsinclude-names:- service_a- service_bexclude-names:- service_cperiod:10message:Service {name} successful rate is less than 85%service_resp_time_percentile_rule:expression:sum(service_percentile{_=\u0026#39;0,1,2,3,4\u0026#39;} \u0026gt; 1000) \u0026gt;= 3period:10silence-period:5message:Percentile response time of service {name} alarm in 3 minutes of last 10 minutes, due to more than one condition of p50 \u0026gt; 1000, p75 \u0026gt; 1000, p90 \u0026gt; 1000, p95 \u0026gt; 1000, p99 \u0026gt; 1000meter_service_status_code_rule:expression:sum(aggregate_labels(meter_status_code{_=\u0026#39;4xx,5xx\u0026#39;},sum) \u0026gt; 10) \u0026gt; 3period:10count:3silence-period:5message:The request number of entity {name} 4xx and 5xx status is more than expected.hooks:- \u0026#34;slack.custom1\u0026#34;- \u0026#34;pagerduty.custom1\u0026#34;comp_rule:expression:(avg(service_sla / 100) \u0026gt; 80) * (avg(service_percentile{_=\u0026#39;0\u0026#39;}) \u0026gt; 1000) == 1period:10message:Service {name} avg successful rate is less than 80% and P50 of avg response time is over 1000ms in last 10 minutes.tags:level:CRITICALhooks:- \u0026#34;slack.default\u0026#34;- \u0026#34;slack.custom1\u0026#34;- \u0026#34;pagerduty.custom1\u0026#34;Default alarm rules For convenience\u0026rsquo;s sake, we have provided a default alarm-setting.yml in our release. It includes the following rules:\n Service average response time over 1s in the last 3 minutes. Service success rate lower than 80% in the last 2 minutes. Percentile of service response time over 1s in the last 3 minutes Service Instance average response time over 1s in the last 2 minutes, and the instance name matches the regex. Endpoint average response time over 1s in the last 2 minutes. Database access average response time over 1s in the last 2 minutes. Endpoint relation average response time over 1s in the last 2 minutes.  List of all potential metrics name The metrics names are defined in the official OAL scripts and MAL scripts.\nCurrently, metrics from the Service, Service Instance, Endpoint, Service Relation, Service Instance Relation, Endpoint Relation scopes could be used in Alarm, and the Database access scope is the same as Service.\nSubmit an issue or a pull request if you want to support any other scopes in Alarm.\nHooks Hooks are a way to send alarm messages to the outside world. SkyWalking supports multiple hooks of the same type, each hook can support different configurations. For example, you can configure two Slack hooks, one named default and set is-default: true means this hook will apply on all Alarm Rules without config hooks. Another named custom1 will only apply on the Alarm Rules which with config hooks and include the name slack.custom1.\nhooks:slack:# default here is just a name, set the field \u0026#39;is-default: true\u0026#39; if this notification hook is expected to be default globally.default:# If true, this hook will apply on all rules, unless a rule has its own specific hook. Could have more than one default hooks in the same hook type.is-default:truetext-template:|-{ \u0026#34;type\u0026#34;: \u0026#34;section\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;mrkdwn\u0026#34;, \u0026#34;text\u0026#34;: \u0026#34;:alarm_clock: *Apache Skywalking Alarm* \\n **%s**.\u0026#34; } }webhooks:- https://hooks.slack.com/services/x/y/zsssscustom1:text-template:|-{ \u0026#34;type\u0026#34;: \u0026#34;section\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;mrkdwn\u0026#34;, \u0026#34;text\u0026#34;: \u0026#34;:alarm_clock: *Apache Skywalking Alarm* \\n **%s**.\u0026#34; } }webhooks:- https://hooks.slack.com/services/x/y/custom1Currently, SkyWalking supports the following hook types:\nWebhook The Webhook requires the peer to be a web container. The alarm message will be sent through HTTP post by application/json content type. The JSON format is based on List\u0026lt;org.apache.skywalking.oap.server.core.alarm.AlarmMessage\u0026gt; with the following key information:\n scopeId, scope. All scopes are defined in org.apache.skywalking.oap.server.core.source.DefaultScopeDefine. name. Target scope entity name. Please follow the entity name definitions. id0. The ID of the scope entity that matches with the name. When using the relation scope, it is the source entity ID. id1. When using the relation scope, it is the destination entity ID. Otherwise, it is empty. ruleName. The rule name configured in alarm-settings.yml. alarmMessage. The alarm text message. startTime. The alarm time measured in milliseconds, which occurs between the current time and the midnight of January 1, 1970 UTC. tags. The tags configured in alarm-settings.yml.  See the following example:\n[{ \u0026#34;scopeId\u0026#34;: 1, \u0026#34;scope\u0026#34;: \u0026#34;SERVICE\u0026#34;, \u0026#34;name\u0026#34;: \u0026#34;serviceA\u0026#34;, \u0026#34;id0\u0026#34;: \u0026#34;12\u0026#34;, \u0026#34;id1\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;ruleName\u0026#34;: \u0026#34;service_resp_time_rule\u0026#34;, \u0026#34;alarmMessage\u0026#34;: \u0026#34;alarmMessage xxxx\u0026#34;, \u0026#34;startTime\u0026#34;: 1560524171000, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;WARNING\u0026#34; }] }, { \u0026#34;scopeId\u0026#34;: 1, \u0026#34;scope\u0026#34;: \u0026#34;SERVICE\u0026#34;, \u0026#34;name\u0026#34;: \u0026#34;serviceB\u0026#34;, \u0026#34;id0\u0026#34;: \u0026#34;23\u0026#34;, \u0026#34;id1\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;ruleName\u0026#34;: \u0026#34;service_resp_time_rule\u0026#34;, \u0026#34;alarmMessage\u0026#34;: \u0026#34;alarmMessage yyy\u0026#34;, \u0026#34;startTime\u0026#34;: 1560524171000, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;CRITICAL\u0026#34; }] }] gRPC The alarm message will be sent through remote gRPC method by Protobuf content type. The message contains key information which are defined in oap-server/server-alarm-plugin/src/main/proto/alarm-hook.proto.\nPart of the protocol looks like this:\nmessage AlarmMessage { int64 scopeId = 1; string scope = 2; string name = 3; string id0 = 4; string id1 = 5; string ruleName = 6; string alarmMessage = 7; int64 startTime = 8; AlarmTags tags = 9;}message AlarmTags { // String key, String value pair.  repeated KeyStringValuePair data = 1;}message KeyStringValuePair { string key = 1; string value = 2;}Slack Chat Follow the Getting Started with Incoming Webhooks guide and create new Webhooks.\nThe alarm message will be sent through HTTP post by application/json content type if you have configured Slack Incoming Webhooks as follows:\nslack:default:is-default:truetext-template:|-{ \u0026#34;type\u0026#34;: \u0026#34;section\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;mrkdwn\u0026#34;, \u0026#34;text\u0026#34;: \u0026#34;:alarm_clock: *Apache Skywalking Alarm* \\n **%s**.\u0026#34; } }webhooks:- https://hooks.slack.com/services/x/y/zWeChat Note that only the WeChat Company Edition (WeCom) supports WebHooks. To use the WeChat WebHook, follow the Wechat Webhooks guide. The alarm message will be sent through HTTP post by application/json content type after you have set up Wechat Webhooks as follows:\nwechat:default:is-default:truetext-template:|-{ \u0026#34;msgtype\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;content\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; } }webhooks:- https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=dummy_keyDingTalk Follow the Dingtalk Webhooks guide and create new Webhooks. You can configure an optional secret for an individual webhook URL for security purposes. The alarm message will be sent through HTTP post by application/json content type if you have configured DingTalk Webhooks as follows:\ndingtalk:default:is-default:truetext-template:|-{ \u0026#34;msgtype\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;content\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; } }webhooks:- url:https://oapi.dingtalk.com/robot/send?access_token=dummy_tokensecret:dummysecretFeishu Follow the Feishu Webhooks guide and create new Webhooks. You can configure an optional secret for an individual webhook URL for security purposes. If you want to direct a text to a user, you can configure ats, which is Feishu\u0026rsquo;s user_id and separated by \u0026ldquo;,\u0026rdquo; . The alarm message will be sent through HTTP post by application/json content type if you have configured Feishu Webhooks as follows:\nfeishu:default:is-default:truetext-template:|-{ \u0026#34;msg_type\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;content\u0026#34;: { \u0026#34;text\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; }, \u0026#34;ats\u0026#34;:\u0026#34;feishu_user_id_1,feishu_user_id_2\u0026#34; }webhooks:- url:https://open.feishu.cn/open-apis/bot/v2/hook/dummy_tokensecret:dummysecretWeLink Follow the WeLink Webhooks guide and create new Webhooks. The alarm message will be sent through HTTP post by application/json content type if you have configured WeLink Webhooks as follows:\nwelink:default:is-default:truetext-template:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;webhooks:# you may find your own client_id and client_secret in your app, below are dummy, need to change.- client-id:\u0026#34;dummy_client_id\u0026#34;client-secret:dummy_secret_keyaccess-token-url:https://open.welink.huaweicloud.com/api/auth/v2/ticketsmessage-url:https://open.welink.huaweicloud.com/api/welinkim/v1/im-service/chat/group-chat# if you send to multi group at a time, separate group_ids with commas, e.g. \u0026#34;123xx\u0026#34;,\u0026#34;456xx\u0026#34;group-ids:\u0026#34;dummy_group_id\u0026#34;# make a name you like for the robot, it will display in grouprobot-name:robotPagerDuty The PagerDuty hook is based on Events API v2.\nFollow the Getting Started section to create an Events API v2 integration on your PagerDuty service and copy the integration key.\nThen configure as follows:\npagerduty:default:is-default:truetext-template:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;integration-keys:- 5c6d805c9dcf4e03d09dfa81e8789ba1You can also configure multiple integration keys.\nDiscord Follow the Discord Webhooks guide and create a new webhook.\nThen configure as follows:\ndiscord:default:is-default:truetext-template:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;webhooks:- url:https://discordapp.com/api/webhooks/1008166889777414645/8e0Am4Zb-YGbBqqbiiq0jSHPTEEaHa4j1vIC-zSSm231T8ewGxgY0_XUYpY-k1nN4HBlusername:robotUpdate the settings dynamically Since 6.5.0, the alerting settings can be updated dynamically at runtime by Dynamic Configuration, which will override the settings in alarm-settings.yml.\nIn order to determine whether an alerting rule is triggered or not, SkyWalking needs to cache the metrics of a time window for each alerting rule. If any attribute (expression, period, etc.) of a rule is changed, the sliding window will be destroyed and re-created, causing the Alarm of this specific rule to restart again.\nKeys with data types of alerting rule configuration file    Alerting element Configuration property key Type Description     Expression expression string MQE expression   Include names include-names string array    Exclude names exclude-names string array    Include names regex include-names-regex string Java regex Pattern   Exclude names regex exclude-names-regex string Java regex Pattern   Tags tags key-value pair    Period Period int    Silence period silence-period int    Message message string    Hooks hooks string array     ","excerpt":"Alerting Alerting mechanism measures system performance according to the metrics of …","ref":"/docs/main/v9.6.0/en/setup/backend/backend-alarm/","title":"Alerting"},{"body":"Alerting Alerting mechanism measures system performance according to the metrics of services/instances/endpoints from different layers. Alerting kernel is an in-memory, time-window based queue.\nThe alerting core is driven by a collection of rules defined in config/alarm-settings.yml. There are three parts to alerting rule definitions.\n alerting rules. They define how metrics alerting should be triggered and what conditions should be considered. hooks. The list of hooks, which should be called after an alerting is triggered.  Entity name Defines the relation between scope and entity name.\n Service: Service name Instance: {Instance name} of {Service name} Endpoint: {Endpoint name} in {Service name} Service Relation: {Source service name} to {Dest service name} Instance Relation: {Source instance name} of {Source service name} to {Dest instance name} of {Dest service name} Endpoint Relation: {Source endpoint name} in {Source Service name} to {Dest endpoint name} in {Dest service name}  Rules An alerting rule is made up of the following elements:\n Rule name. A unique name shown in the alarm message. It must end with _rule. Expression. A MQE expression that defines the conditions of the rule. The result type must be SINGLE_VALUE and the root operation of the expression must be a Compare Operation which provides 1(true) or 0(false) result. When the result is 1(true), the alarm will be triggered. For example, avg(service_resp_time / 1000) \u0026gt; 1 is a valid expression to indicate the request latency is slower than 1s. The typical illegal expressions are  avg(service_resp_time \u0026gt; 1000) + 1 expression root doesn\u0026rsquo;t use Compare Operation service_resp_time \u0026gt; 1000 expression return a TIME_SERIES_VALUES type of values rather than a SINGLE_VALUE value.    The metrics names in the expression could be found in the list of all potential metrics name doc.\n Include names. Entity names that are included in this rule. Please follow the entity name definitions. Exclude names. Entity names that are excluded from this rule. Please follow the entity name definitions. Include names regex. A regex that includes entity names. If both include-name list and include-name regex are set, both rules will take effect. Exclude names regex. A regex that excludes entity names. Both rules will take effect if both include-label list and include-label regex are set. Tags. Tags are key/value pairs that are attached to alarms. Tags are used to specify distinguishing attributes of alarms that are meaningful and relevant to users. If you want to make these tags searchable on the SkyWalking UI, you may set the tag keys in core/default/searchableAlarmTags or through the system environment variable SW_SEARCHABLE_ALARM_TAG_KEYS. The key level is supported by default. Period. The size of metrics cache in minutes for checking the alarm conditions. This is a time window that corresponds to the backend deployment env time. Hooks. Binding the specific names of the hooks when the alarm is triggered. The name format is {hookType}.{hookName} (slack.custom1 e.g.) and must be defined in the hooks section of the alarm-settings.yml file. If the hook name is not specified, the global hook will be used. Silence period. After the alarm is triggered at Time-N (TN), there will be silence during the TN -\u0026gt; TN + period. By default, it works in the same manner as period. The same Alarm (having the same ID in the same metrics name) may only be triggered once within a period.  Such as for a metric, there is a shifting window as following at T7.\n   T1 T2 T3 T4 T5 T6 T7     Value1 Value2 Value3 Value4 Value5 Value6 Value7     Period(Time point T1 ~ T7) are continuous data points for minutes. Notice, alerts are not supported above minute-by-minute periods as they would not be efficient. Values(Value1 ~ Value7) are the values or labeled values for every time point. Expression is calculated based on the metric values(Value1 ~ Value7). For example, expression avg(service_resp_time) \u0026gt; 1000, if the value are 1001, 1001, 1001, 1001, 1001, 1001, 1001, the calculation is ((1001 + 10001 + ... + 1001) / 7) \u0026gt; 1000 and the result would be 1(true). Then the alarm would be triggered. In every minute, the window would shift automatically. At T8, Value8 would be cached, and T1/Value1 would be removed from the window.  NOTE:\n If the expression include labeled metrics and result has multiple labeled value(e.g. sum(service_percentile{_='0,1'} \u0026gt; 1000) \u0026gt;= 3), the alarm will be triggered if any of the labeled value result matches 3 times of the condition(P50 \u0026gt; 1000 or P75 \u0026gt; 1000). One alarm rule is targeting the same entity level, such as service-level expression (avg(service_resp_time) \u0026gt; 1000). Set entity names(Include/Exclude names\u0026hellip;) according to metrics entity levels, do not include different entity levels metrics in the same expression, such as service metrics and endpoint metrics.  rules:# Rule unique name, must be ended with `_rule`.endpoint_percent_rule:# A MQE expression and the root operation of the expression must be a Compare Operation.expression:sum((endpoint_sla / 100) \u0026lt; 75) \u0026gt;= 3# The length of time to evaluate the metricsperiod:10# How many times of checks, the alarm keeps silence after alarm triggered, default as same as period.silence-period:10message:Successful rate of endpoint {name} is lower than 75%tags:level:WARNINGservice_percent_rule:expression:sum((service_sla / 100) \u0026lt; 85) \u0026gt;= 4# [Optional] Default, match all services in this metricsinclude-names:- service_a- service_bexclude-names:- service_cperiod:10message:Service {name} successful rate is less than 85%service_resp_time_percentile_rule:expression:sum(service_percentile{_=\u0026#39;0,1,2,3,4\u0026#39;} \u0026gt; 1000) \u0026gt;= 3period:10silence-period:5message:Percentile response time of service {name} alarm in 3 minutes of last 10 minutes, due to more than one condition of p50 \u0026gt; 1000, p75 \u0026gt; 1000, p90 \u0026gt; 1000, p95 \u0026gt; 1000, p99 \u0026gt; 1000meter_service_status_code_rule:expression:sum(aggregate_labels(meter_status_code{_=\u0026#39;4xx,5xx\u0026#39;},sum) \u0026gt; 10) \u0026gt; 3period:10count:3silence-period:5message:The request number of entity {name} 4xx and 5xx status is more than expected.hooks:- \u0026#34;slack.custom1\u0026#34;- \u0026#34;pagerduty.custom1\u0026#34;comp_rule:expression:(avg(service_sla / 100) \u0026gt; 80) * (avg(service_percentile{_=\u0026#39;0\u0026#39;}) \u0026gt; 1000) == 1period:10message:Service {name} avg successful rate is less than 80% and P50 of avg response time is over 1000ms in last 10 minutes.tags:level:CRITICALhooks:- \u0026#34;slack.default\u0026#34;- \u0026#34;slack.custom1\u0026#34;- \u0026#34;pagerduty.custom1\u0026#34;Default alarm rules For convenience\u0026rsquo;s sake, we have provided a default alarm-setting.yml in our release. It includes the following rules:\n Service average response time over 1s in the last 3 minutes. Service success rate lower than 80% in the last 2 minutes. Percentile of service response time over 1s in the last 3 minutes Service Instance average response time over 1s in the last 2 minutes, and the instance name matches the regex. Endpoint average response time over 1s in the last 2 minutes. Database access average response time over 1s in the last 2 minutes. Endpoint relation average response time over 1s in the last 2 minutes.  List of all potential metrics name The metrics names are defined in the official OAL scripts and MAL scripts.\nCurrently, metrics from the Service, Service Instance, Endpoint, Service Relation, Service Instance Relation, Endpoint Relation scopes could be used in Alarm, and the Database access scope is the same as Service.\nSubmit an issue or a pull request if you want to support any other scopes in Alarm.\nHooks Hooks are a way to send alarm messages to the outside world. SkyWalking supports multiple hooks of the same type, each hook can support different configurations. For example, you can configure two Slack hooks, one named default and set is-default: true means this hook will apply on all Alarm Rules without config hooks. Another named custom1 will only apply on the Alarm Rules which with config hooks and include the name slack.custom1.\nhooks:slack:# default here is just a name, set the field \u0026#39;is-default: true\u0026#39; if this notification hook is expected to be default globally.default:# If true, this hook will apply on all rules, unless a rule has its own specific hook. Could have more than one default hooks in the same hook type.is-default:truetext-template:|-{ \u0026#34;type\u0026#34;: \u0026#34;section\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;mrkdwn\u0026#34;, \u0026#34;text\u0026#34;: \u0026#34;:alarm_clock: *Apache Skywalking Alarm* \\n **%s**.\u0026#34; } }webhooks:- https://hooks.slack.com/services/x/y/zsssscustom1:text-template:|-{ \u0026#34;type\u0026#34;: \u0026#34;section\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;mrkdwn\u0026#34;, \u0026#34;text\u0026#34;: \u0026#34;:alarm_clock: *Apache Skywalking Alarm* \\n **%s**.\u0026#34; } }webhooks:- https://hooks.slack.com/services/x/y/custom1Currently, SkyWalking supports the following hook types:\nWebhook The Webhook requires the peer to be a web container. The alarm message will be sent through HTTP post by application/json content type after you have set up Webhook hooks as follows:\nwebhook:default:is-default:trueurls:- http://ip:port/xxx- http://ip:port/yyyThe JSON format is based on List\u0026lt;org.apache.skywalking.oap.server.core.alarm.AlarmMessage\u0026gt; with the following key information:\n scopeId, scope. All scopes are defined in org.apache.skywalking.oap.server.core.source.DefaultScopeDefine. name. Target scope entity name. Please follow the entity name definitions. id0. The ID of the scope entity that matches with the name. When using the relation scope, it is the source entity ID. id1. When using the relation scope, it is the destination entity ID. Otherwise, it is empty. ruleName. The rule name configured in alarm-settings.yml. alarmMessage. The alarm text message. startTime. The alarm time measured in milliseconds, which occurs between the current time and the midnight of January 1, 1970 UTC. tags. The tags configured in alarm-settings.yml.  See the following example:\n[{ \u0026#34;scopeId\u0026#34;: 1, \u0026#34;scope\u0026#34;: \u0026#34;SERVICE\u0026#34;, \u0026#34;name\u0026#34;: \u0026#34;serviceA\u0026#34;, \u0026#34;id0\u0026#34;: \u0026#34;12\u0026#34;, \u0026#34;id1\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;ruleName\u0026#34;: \u0026#34;service_resp_time_rule\u0026#34;, \u0026#34;alarmMessage\u0026#34;: \u0026#34;alarmMessage xxxx\u0026#34;, \u0026#34;startTime\u0026#34;: 1560524171000, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;WARNING\u0026#34; }] }, { \u0026#34;scopeId\u0026#34;: 1, \u0026#34;scope\u0026#34;: \u0026#34;SERVICE\u0026#34;, \u0026#34;name\u0026#34;: \u0026#34;serviceB\u0026#34;, \u0026#34;id0\u0026#34;: \u0026#34;23\u0026#34;, \u0026#34;id1\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;ruleName\u0026#34;: \u0026#34;service_resp_time_rule\u0026#34;, \u0026#34;alarmMessage\u0026#34;: \u0026#34;alarmMessage yyy\u0026#34;, \u0026#34;startTime\u0026#34;: 1560524171000, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;CRITICAL\u0026#34; }] }] gRPC The alarm message will be sent through remote gRPC method by Protobuf content type after you have set up gRPC hooks as follows:\ngRPC:default:is-default:truetarget-host:iptarget-port:portThe message contains key information which are defined in oap-server/server-alarm-plugin/src/main/proto/alarm-hook.proto.\nPart of the protocol looks like this:\nmessage AlarmMessage { int64 scopeId = 1; string scope = 2; string name = 3; string id0 = 4; string id1 = 5; string ruleName = 6; string alarmMessage = 7; int64 startTime = 8; AlarmTags tags = 9;}message AlarmTags { // String key, String value pair.  repeated KeyStringValuePair data = 1;}message KeyStringValuePair { string key = 1; string value = 2;}Slack Chat Follow the Getting Started with Incoming Webhooks guide and create new Webhooks.\nThe alarm message will be sent through HTTP post by application/json content type if you have configured Slack Incoming Webhooks as follows:\nslack:default:is-default:truetext-template:|-{ \u0026#34;type\u0026#34;: \u0026#34;section\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;mrkdwn\u0026#34;, \u0026#34;text\u0026#34;: \u0026#34;:alarm_clock: *Apache Skywalking Alarm* \\n **%s**.\u0026#34; } }webhooks:- https://hooks.slack.com/services/x/y/zWeChat Note that only the WeChat Company Edition (WeCom) supports WebHooks. To use the WeChat WebHook, follow the Wechat Webhooks guide. The alarm message will be sent through HTTP post by application/json content type after you have set up Wechat Webhooks as follows:\nwechat:default:is-default:truetext-template:|-{ \u0026#34;msgtype\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;content\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; } }webhooks:- https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=dummy_keyDingTalk Follow the Dingtalk Webhooks guide and create new Webhooks. You can configure an optional secret for an individual webhook URL for security purposes. The alarm message will be sent through HTTP post by application/json content type if you have configured DingTalk Webhooks as follows:\ndingtalk:default:is-default:truetext-template:|-{ \u0026#34;msgtype\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;content\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; } }webhooks:- url:https://oapi.dingtalk.com/robot/send?access_token=dummy_tokensecret:dummysecretFeishu Follow the Feishu Webhooks guide and create new Webhooks. You can configure an optional secret for an individual webhook URL for security purposes. If you want to direct a text to a user, you can configure ats, which is Feishu\u0026rsquo;s user_id and separated by \u0026ldquo;,\u0026rdquo; . The alarm message will be sent through HTTP post by application/json content type if you have configured Feishu Webhooks as follows:\nfeishu:default:is-default:truetext-template:|-{ \u0026#34;msg_type\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;content\u0026#34;: { \u0026#34;text\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; }, \u0026#34;ats\u0026#34;:\u0026#34;feishu_user_id_1,feishu_user_id_2\u0026#34; }webhooks:- url:https://open.feishu.cn/open-apis/bot/v2/hook/dummy_tokensecret:dummysecretWeLink Follow the WeLink Webhooks guide and create new Webhooks. The alarm message will be sent through HTTP post by application/json content type if you have configured WeLink Webhooks as follows:\nwelink:default:is-default:truetext-template:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;webhooks:# you may find your own client_id and client_secret in your app, below are dummy, need to change.- client-id:\u0026#34;dummy_client_id\u0026#34;client-secret:dummy_secret_keyaccess-token-url:https://open.welink.huaweicloud.com/api/auth/v2/ticketsmessage-url:https://open.welink.huaweicloud.com/api/welinkim/v1/im-service/chat/group-chat# if you send to multi group at a time, separate group_ids with commas, e.g. \u0026#34;123xx\u0026#34;,\u0026#34;456xx\u0026#34;group-ids:\u0026#34;dummy_group_id\u0026#34;# make a name you like for the robot, it will display in grouprobot-name:robotPagerDuty The PagerDuty hook is based on Events API v2.\nFollow the Getting Started section to create an Events API v2 integration on your PagerDuty service and copy the integration key.\nThen configure as follows:\npagerduty:default:is-default:truetext-template:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;integration-keys:- 5c6d805c9dcf4e03d09dfa81e8789ba1You can also configure multiple integration keys.\nDiscord Follow the Discord Webhooks guide and create a new webhook.\nThen configure as follows:\ndiscord:default:is-default:truetext-template:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;webhooks:- url:https://discordapp.com/api/webhooks/1008166889777414645/8e0Am4Zb-YGbBqqbiiq0jSHPTEEaHa4j1vIC-zSSm231T8ewGxgY0_XUYpY-k1nN4HBlusername:robotUpdate the settings dynamically Since 6.5.0, the alerting settings can be updated dynamically at runtime by Dynamic Configuration, which will override the settings in alarm-settings.yml.\nIn order to determine whether an alerting rule is triggered or not, SkyWalking needs to cache the metrics of a time window for each alerting rule. If any attribute (expression, period, etc.) of a rule is changed, the sliding window will be destroyed and re-created, causing the Alarm of this specific rule to restart again.\nKeys with data types of alerting rule configuration file    Alerting element Configuration property key Type Description     Expression expression string MQE expression   Include names include-names string array    Exclude names exclude-names string array    Include names regex include-names-regex string Java regex Pattern   Exclude names regex exclude-names-regex string Java regex Pattern   Tags tags key-value pair    Period Period int    Silence period silence-period int    Message message string    Hooks hooks string array     ","excerpt":"Alerting Alerting mechanism measures system performance according to the metrics of …","ref":"/docs/main/v9.7.0/en/setup/backend/backend-alarm/","title":"Alerting"},{"body":"ALS Load Balance Using satellite as a load balancer in envoy and OAP can effectively prevent the problem of unbalanced messages received by OAP.\nIn this case, we mainly use memory queues for intermediate data storage.\nDeference Envoy Count, OAP performance could impact the Satellite transmit performance.\n   Envoy Instance Concurrent User ALS OPS Satellite CPU Satellite Memory     150 100 ~50K 1.2C 0.5-1.0G   150 300 ~80K 1.8C 1.0-1.5G   300 100 ~50K 1.4C 0.8-1.2G   300 300 ~100K 2.2C 1.3-2.0G   800 100 ~50K 1.5C 0.9-1.5G   800 300 ~100K 2.6C 1.7-2.7G   1500 100 ~50K 1.7C 1.4-2.4G   1500 300 ~100K 2.7C 2.3-3.0G   2300 150 ~50K 1.8C 1.9-3.1G   2300 300 ~90K 2.5C 2.3-4.0G   2300 500 ~110K 3.2C 2.8-4.7G    Detail Environment Using GKE Environment, helm to build cluster.\n   Module Version Replicate Count CPU Limit Memory Limit Description     OAP 8.9.0 6 12C 32Gi Using ElasticSearch as Storage   Satellite 0.4.0 1 8C 16Gi    ElasticSearch 7.5.1 3 8 16Gi     Setting 800 Envoy, 100K QPS ALS.\n   Module Environment Config Use Value Default Value Description Recommend Value     Satellite SATELLITE_QUEUE_PARTITION 50 4 Support several goroutines concurrently to consume the queue Satellite CPU number * 4-6, It could help improve throughput, but the default value also could handle 800 Envoy Instance and 100K QPS ALS message.   Satellite SATELLITE_QUEUE_EVENT_BUFFER_SIZE 3000 1000 The size of the queue in each concurrency This is related to the number of Envoys. If the number of Envoys is large, it is recommended to increase the value.   Satellite SATELLITE_ENVOY_ALS_V3_PIPE_RECEIVER_FLUSH_TIME 3000 1000 When the Satellite receives the message, how long(millisecond) will the ALS message be merged into an Event. If a certain time delay is accepted, the value can be adjusted larger, which can effectively reduce CPU usage and make the Satellite more stable   Satellite SATELLITE_ENVOY_ALS_V3_PIPE_SENDER_FLUSH_TIME 3000 1000 How long(millisecond) is the memory queue data for each Goroutine to be summarized and sent to OAP This depends on the amount of data in your queue, you can keep it consistent with SATELLITE_ENVOY_ALS_V3_PIPE_RECEIVER_FLUSH_TIME   OAP SW_CORE_GRPC_MAX_CONCURRENT_CALL 50 4 A link between Satellite and OAP, how many requests parallelism is supported Same with SATELLITE_QUEUE_PARTITION in Satellite    ","excerpt":"ALS Load Balance Using satellite as a load balancer in envoy and OAP can effectively prevent the …","ref":"/docs/skywalking-satellite/latest/en/setup/performance/als-load-balance/readme/","title":"ALS Load Balance"},{"body":"ALS Load Balance Using satellite as a load balancer in envoy and OAP can effectively prevent the problem of unbalanced messages received by OAP.\nIn this case, we mainly use memory queues for intermediate data storage.\nDeference Envoy Count, OAP performance could impact the Satellite transmit performance.\n   Envoy Instance Concurrent User ALS OPS Satellite CPU Satellite Memory     150 100 ~50K 1.2C 0.5-1.0G   150 300 ~80K 1.8C 1.0-1.5G   300 100 ~50K 1.4C 0.8-1.2G   300 300 ~100K 2.2C 1.3-2.0G   800 100 ~50K 1.5C 0.9-1.5G   800 300 ~100K 2.6C 1.7-2.7G   1500 100 ~50K 1.7C 1.4-2.4G   1500 300 ~100K 2.7C 2.3-3.0G   2300 150 ~50K 1.8C 1.9-3.1G   2300 300 ~90K 2.5C 2.3-4.0G   2300 500 ~110K 3.2C 2.8-4.7G    Detail Environment Using GKE Environment, helm to build cluster.\n   Module Version Replicate Count CPU Limit Memory Limit Description     OAP 8.9.0 6 12C 32Gi Using ElasticSearch as Storage   Satellite 0.4.0 1 8C 16Gi    ElasticSearch 7.5.1 3 8 16Gi     Setting 800 Envoy, 100K QPS ALS.\n   Module Environment Config Use Value Default Value Description Recommend Value     Satellite SATELLITE_QUEUE_PARTITION 50 4 Support several goroutines concurrently to consume the queue Satellite CPU number * 4-6, It could help improve throughput, but the default value also could handle 800 Envoy Instance and 100K QPS ALS message.   Satellite SATELLITE_QUEUE_EVENT_BUFFER_SIZE 3000 1000 The size of the queue in each concurrency This is related to the number of Envoys. If the number of Envoys is large, it is recommended to increase the value.   Satellite SATELLITE_ENVOY_ALS_V3_PIPE_RECEIVER_FLUSH_TIME 3000 1000 When the Satellite receives the message, how long(millisecond) will the ALS message be merged into an Event. If a certain time delay is accepted, the value can be adjusted larger, which can effectively reduce CPU usage and make the Satellite more stable   Satellite SATELLITE_ENVOY_ALS_V3_PIPE_SENDER_FLUSH_TIME 3000 1000 How long(millisecond) is the memory queue data for each Goroutine to be summarized and sent to OAP This depends on the amount of data in your queue, you can keep it consistent with SATELLITE_ENVOY_ALS_V3_PIPE_RECEIVER_FLUSH_TIME   OAP SW_CORE_GRPC_MAX_CONCURRENT_CALL 50 4 A link between Satellite and OAP, how many requests parallelism is supported Same with SATELLITE_QUEUE_PARTITION in Satellite    ","excerpt":"ALS Load Balance Using satellite as a load balancer in envoy and OAP can effectively prevent the …","ref":"/docs/skywalking-satellite/next/en/setup/performance/als-load-balance/readme/","title":"ALS Load Balance"},{"body":"ALS Load Balance Using satellite as a load balancer in envoy and OAP can effectively prevent the problem of unbalanced messages received by OAP.\nIn this case, we mainly use memory queues for intermediate data storage.\nDeference Envoy Count, OAP performance could impact the Satellite transmit performance.\n   Envoy Instance Concurrent User ALS OPS Satellite CPU Satellite Memory     150 100 ~50K 1.2C 0.5-1.0G   150 300 ~80K 1.8C 1.0-1.5G   300 100 ~50K 1.4C 0.8-1.2G   300 300 ~100K 2.2C 1.3-2.0G   800 100 ~50K 1.5C 0.9-1.5G   800 300 ~100K 2.6C 1.7-2.7G   1500 100 ~50K 1.7C 1.4-2.4G   1500 300 ~100K 2.7C 2.3-3.0G   2300 150 ~50K 1.8C 1.9-3.1G   2300 300 ~90K 2.5C 2.3-4.0G   2300 500 ~110K 3.2C 2.8-4.7G    Detail Environment Using GKE Environment, helm to build cluster.\n   Module Version Replicate Count CPU Limit Memory Limit Description     OAP 8.9.0 6 12C 32Gi Using ElasticSearch as Storage   Satellite 0.4.0 1 8C 16Gi    ElasticSearch 7.5.1 3 8 16Gi     Setting 800 Envoy, 100K QPS ALS.\n   Module Environment Config Use Value Default Value Description Recommend Value     Satellite SATELLITE_QUEUE_PARTITION 50 4 Support several goroutines concurrently to consume the queue Satellite CPU number * 4-6, It could help improve throughput, but the default value also could handle 800 Envoy Instance and 100K QPS ALS message.   Satellite SATELLITE_QUEUE_EVENT_BUFFER_SIZE 3000 1000 The size of the queue in each concurrency This is related to the number of Envoys. If the number of Envoys is large, it is recommended to increase the value.   Satellite SATELLITE_ENVOY_ALS_V3_PIPE_RECEIVER_FLUSH_TIME 3000 1000 When the Satellite receives the message, how long(millisecond) will the ALS message be merged into an Event. If a certain time delay is accepted, the value can be adjusted larger, which can effectively reduce CPU usage and make the Satellite more stable   Satellite SATELLITE_ENVOY_ALS_V3_PIPE_SENDER_FLUSH_TIME 3000 1000 How long(millisecond) is the memory queue data for each Goroutine to be summarized and sent to OAP This depends on the amount of data in your queue, you can keep it consistent with SATELLITE_ENVOY_ALS_V3_PIPE_RECEIVER_FLUSH_TIME   OAP SW_CORE_GRPC_MAX_CONCURRENT_CALL 50 4 A link between Satellite and OAP, how many requests parallelism is supported Same with SATELLITE_QUEUE_PARTITION in Satellite    ","excerpt":"ALS Load Balance Using satellite as a load balancer in envoy and OAP can effectively prevent the …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/performance/als-load-balance/readme/","title":"ALS Load Balance"},{"body":"Analysis Native Streaming Traces and Service Mesh Traffic The traces in SkyWalking native format and Service Mesh Traffic(Access Log in gRPC) are able to be analyzed by OAL, to build metrics of services, service instances and endpoints, and to build topology/dependency of services, service instances and endpoints(traces-oriented analysis only).\nThe spans of traces relative with RPC, such as HTTP, gRPC, Dubbo, RocketMQ, Kafka, would be converted to service input/output traffic, like access logs collected from service mesh. Both of those traffic would be cataloged as the defined sources in the Observability Analysis Language engine.\nThe metrics are customizable through Observability Analysis Language(OAL) scripts, and the topology/dependency is built by the SkyWalking OAP kernel automatically without explicit OAL scripts.\nObservability Analysis Language OAL(Observability Analysis Language) serves to analyze incoming data in streaming mode.\nOAL focuses on metrics in Service, Service Instance and Endpoint. Therefore, the language is easy to learn and use.\nOAL scripts are now found in the /config folder, and users could simply change and reboot the server to run them. However, the OAL script is a compiled language, and the OAL Runtime generates java codes dynamically. Don\u0026rsquo;t expect to mount the changes of those scripts in the runtime. If your OAP servers are running in a cluster mode, these script defined metrics should be aligned.\nYou can open set SW_OAL_ENGINE_DEBUG=Y at system env to see which classes are generated.\nGrammar Scripts should be named *.oal\n// Declare the metrics. METRICS_NAME = from(CAST SCOPE.(* | [FIELD][,FIELD ...])) [.filter(CAST FIELD OP [INT | STRING])] .FUNCTION([PARAM][, PARAM ...]) // Disable hard code disable(METRICS_NAME); From The from statement defines the data source of this OAL expression.\nPrimary SCOPEs are Service, ServiceInstance, Endpoint, ServiceRelation, ServiceInstanceRelation, and EndpointRelation. There are also some secondary scopes which belong to a primary scope.\nSee Scope Definitions, where you can find all existing Scopes and Fields.\nFilter Use filter to build conditions for the value of fields by using field name and expression.\nThe filter expressions run as a chain, generally connected with logic AND. The OPs support ==, !=, \u0026gt;, \u0026lt;, \u0026gt;=, \u0026lt;=, in [...] ,like %..., like ...% , like %...% , contain and not contain, with type detection based on field type. In the event of incompatibility, compile or code generation errors may be triggered.\nAggregation Function The default functions are provided by the SkyWalking OAP core, and it is possible to implement additional functions.\nFunctions provided\n longAvg. The avg of all input per scope entity. The input field must be a long.   instance_jvm_memory_max = from(ServiceInstanceJVMMemory.max).longAvg();\n In this case, the input represents the request of each ServiceInstanceJVMMemory scope, and avg is based on field max.\n doubleAvg. The avg of all input per scope entity. The input field must be a double.   instance_jvm_cpu = from(ServiceInstanceJVMCPU.usePercent).doubleAvg();\n In this case, the input represents the request of each ServiceInstanceJVMCPU scope, and avg is based on field usePercent.\n percent. The number or ratio is expressed as a fraction of 100, where the input matches with the condition.   endpoint_percent = from(Endpoint.*).percent(status == true);\n In this case, all input represents requests of each endpoint, and the condition is endpoint.status == true.\n rate. The rate expressed is as a fraction of 100, where the input matches with the condition.   browser_app_error_rate = from(BrowserAppTraffic.*).rate(trafficCategory == BrowserAppTrafficCategory.FIRST_ERROR, trafficCategory == BrowserAppTrafficCategory.NORMAL);\n In this case, all input represents requests of each browser app traffic, the numerator condition is trafficCategory == BrowserAppTrafficCategory.FIRST_ERROR and denominator condition is trafficCategory == BrowserAppTrafficCategory.NORMAL. Parameter (1) is the numerator condition. Parameter (2) is the denominator condition.\n count. The sum of calls per scope entity.   service_calls_sum = from(Service.*).count();\n In this case, the number of calls of each service.\n histogram. See Heatmap in WIKI.   service_heatmap = from(Service.latency).histogram(100, 20);\n In this case, the thermodynamic heatmap of all incoming requests. Parameter (1) is the precision of latency calculation, such as in the above case, where 113ms and 193ms are considered the same in the 101-200ms group. Parameter (2) is the group amount. In the above case, 21(param value + 1) groups are 0-100ms, 101-200ms, \u0026hellip; 1901-2000ms, 2000+ms\n apdex. See Apdex in WIKI.   service_apdex = from(Service.latency).apdex(name, status);\n In this case, the apdex score of each service. Parameter (1) is the service name, which reflects the Apdex threshold value loaded from service-apdex-threshold.yml in the config folder. Parameter (2) is the status of this request. The status(success/failure) reflects the Apdex calculation.\n p99, p95, p90, p75, p50. See percentile in WIKI.   service_percentile = from(Service.latency).percentile(10);\n percentile is the first multiple-value metric, which has been introduced since 7.0.0. As a metric with multiple values, it could be queried through the getMultipleLinearIntValues GraphQL query. In this case, see p99, p95, p90, p75, and p50 of all incoming requests. The parameter is precise to a latency at p99, such as in the above case, and 120ms and 124ms are considered to produce the same response time.\nIn this case, the p99 value of all incoming requests. The parameter is precise to a latency at p99, such as in the above case, and 120ms and 124ms are considered to produce the same response time.\nMetrics name The metrics name for storage implementor, alarm and query modules. The type inference is supported by core.\nGroup All metrics data will be grouped by Scope.ID and min-level TimeBucket.\n In the Endpoint scope, the Scope.ID is same as the Endpoint ID (i.e. the unique ID based on service and its endpoint).  Cast Fields of source are static type. In some cases, the type required by the filter expression and aggregation function doesn\u0026rsquo;t match the type in the source, such as tag value in the source is String type, most aggregation calculation requires numeric.\nCast expression is provided to do so.\n (str-\u0026gt;long) or (long), cast string type into long. (str-\u0026gt;int) or (int), cast string type into int.  mq_consume_latency = from((str-\u0026gt;long)Service.tag[\u0026quot;transmission.latency\u0026quot;]).longAvg(); // the value of tag is string type. Cast statement is supported in\n From statement. from((cast)source.attre). Filter expression. .filter((cast)tag[\u0026quot;transmission.latency\u0026quot;] \u0026gt; 0) Aggregation function parameter. .longAvg((cast)strField1== 1, (cast)strField2)  Disable Disable is an advanced statement in OAL, which is only used in certain cases. Some of the aggregation and metrics are defined through core hard codes. Examples include segment and top_n_database_statement. This disable statement is designed to render them inactive. By default, none of them are disabled.\nNOTICE, all disable statements should be in oal/disable.oal script file.\nExamples // Calculate p99 of both Endpoint1 and Endpoint2 endpoint_p99 = from(Endpoint.latency).filter(name in (\u0026quot;Endpoint1\u0026quot;, \u0026quot;Endpoint2\u0026quot;)).summary(0.99) // Calculate p99 of Endpoint name started with `serv` serv_Endpoint_p99 = from(Endpoint.latency).filter(name like \u0026quot;serv%\u0026quot;).summary(0.99) // Calculate the avg response time of each Endpoint endpoint_resp_time = from(Endpoint.latency).avg() // Calculate the p50, p75, p90, p95 and p99 of each Endpoint by 50 ms steps. endpoint_percentile = from(Endpoint.latency).percentile(10) // Calculate the percent of response status is true, for each service. endpoint_success = from(Endpoint.*).filter(status == true).percent() // Calculate the sum of response code in [404, 500, 503], for each service. endpoint_abnormal = from(Endpoint.*).filter(httpResponseStatusCode in [404, 500, 503]).count() // Calculate the sum of request type in [RequestType.RPC, RequestType.gRPC], for each service. endpoint_rpc_calls_sum = from(Endpoint.*).filter(type in [RequestType.RPC, RequestType.gRPC]).count() // Calculate the sum of endpoint name in [\u0026quot;/v1\u0026quot;, \u0026quot;/v2\u0026quot;], for each service. endpoint_url_sum = from(Endpoint.*).filter(name in [\u0026quot;/v1\u0026quot;, \u0026quot;/v2\u0026quot;]).count() // Calculate the sum of calls for each service. endpoint_calls = from(Endpoint.*).count() // Calculate the CPM with the GET method for each service.The value is made up with `tagKey:tagValue`. // Option 1, use `tags contain`. service_cpm_http_get = from(Service.*).filter(tags contain \u0026quot;http.method:GET\u0026quot;).cpm() // Option 2, use `tag[key]`. service_cpm_http_get = from(Service.*).filter(tag[\u0026quot;http.method\u0026quot;] == \u0026quot;GET\u0026quot;).cpm(); // Calculate the CPM with the HTTP method except for the GET method for each service.The value is made up with `tagKey:tagValue`. service_cpm_http_other = from(Service.*).filter(tags not contain \u0026quot;http.method:GET\u0026quot;).cpm() disable(segment); disable(endpoint_relation_server_side); disable(top_n_database_statement); ","excerpt":"Analysis Native Streaming Traces and Service Mesh Traffic The traces in SkyWalking native format and …","ref":"/docs/main/latest/en/concepts-and-designs/oal/","title":"Analysis Native Streaming Traces and Service Mesh Traffic"},{"body":"Analysis Native Streaming Traces and Service Mesh Traffic The traces in SkyWalking native format and Service Mesh Traffic(Access Log in gRPC) are able to be analyzed by OAL, to build metrics of services, service instances and endpoints, and to build topology/dependency of services, service instances and endpoints(traces-oriented analysis only).\nThe spans of traces relative with RPC, such as HTTP, gRPC, Dubbo, RocketMQ, Kafka, would be converted to service input/output traffic, like access logs collected from service mesh. Both of those traffic would be cataloged as the defined sources in the Observability Analysis Language engine.\nThe metrics are customizable through Observability Analysis Language(OAL) scripts, and the topology/dependency is built by the SkyWalking OAP kernel automatically without explicit OAL scripts.\nObservability Analysis Language OAL(Observability Analysis Language) serves to analyze incoming data in streaming mode.\nOAL focuses on metrics in Service, Service Instance and Endpoint. Therefore, the language is easy to learn and use.\nOAL scripts are now found in the /config folder, and users could simply change and reboot the server to run them. However, the OAL script is a compiled language, and the OAL Runtime generates java codes dynamically. Don\u0026rsquo;t expect to mount the changes of those scripts in the runtime. If your OAP servers are running in a cluster mode, these script defined metrics should be aligned.\nYou can open set SW_OAL_ENGINE_DEBUG=Y at system env to see which classes are generated.\nGrammar Scripts should be named *.oal\n// Declare the metrics. METRICS_NAME = from(CAST SCOPE.(* | [FIELD][,FIELD ...])) [.filter(CAST FIELD OP [INT | STRING])] .FUNCTION([PARAM][, PARAM ...]) // Disable hard code disable(METRICS_NAME); From The from statement defines the data source of this OAL expression.\nPrimary SCOPEs are Service, ServiceInstance, Endpoint, ServiceRelation, ServiceInstanceRelation, and EndpointRelation. There are also some secondary scopes which belong to a primary scope.\nSee Scope Definitions, where you can find all existing Scopes and Fields.\nFilter Use filter to build conditions for the value of fields by using field name and expression.\nThe filter expressions run as a chain, generally connected with logic AND. The OPs support ==, !=, \u0026gt;, \u0026lt;, \u0026gt;=, \u0026lt;=, in [...] ,like %..., like ...% , like %...% , contain and not contain, with type detection based on field type. In the event of incompatibility, compile or code generation errors may be triggered.\nAggregation Function The default functions are provided by the SkyWalking OAP core, and it is possible to implement additional functions.\nFunctions provided\n longAvg. The avg of all input per scope entity. The input field must be a long.   instance_jvm_memory_max = from(ServiceInstanceJVMMemory.max).longAvg();\n In this case, the input represents the request of each ServiceInstanceJVMMemory scope, and avg is based on field max.\n doubleAvg. The avg of all input per scope entity. The input field must be a double.   instance_jvm_cpu = from(ServiceInstanceJVMCPU.usePercent).doubleAvg();\n In this case, the input represents the request of each ServiceInstanceJVMCPU scope, and avg is based on field usePercent.\n percent. The number or ratio is expressed as a fraction of 100, where the input matches with the condition.   endpoint_percent = from(Endpoint.*).percent(status == true);\n In this case, all input represents requests of each endpoint, and the condition is endpoint.status == true.\n rate. The rate expressed is as a fraction of 100, where the input matches with the condition.   browser_app_error_rate = from(BrowserAppTraffic.*).rate(trafficCategory == BrowserAppTrafficCategory.FIRST_ERROR, trafficCategory == BrowserAppTrafficCategory.NORMAL);\n In this case, all input represents requests of each browser app traffic, the numerator condition is trafficCategory == BrowserAppTrafficCategory.FIRST_ERROR and denominator condition is trafficCategory == BrowserAppTrafficCategory.NORMAL. Parameter (1) is the numerator condition. Parameter (2) is the denominator condition.\n count. The sum of calls per scope entity.   service_calls_sum = from(Service.*).count();\n In this case, the number of calls of each service.\n histogram. See Heatmap in WIKI.   service_heatmap = from(Service.latency).histogram(100, 20);\n In this case, the thermodynamic heatmap of all incoming requests. Parameter (1) is the precision of latency calculation, such as in the above case, where 113ms and 193ms are considered the same in the 101-200ms group. Parameter (2) is the group amount. In the above case, 21(param value + 1) groups are 0-100ms, 101-200ms, \u0026hellip; 1901-2000ms, 2000+ms\n apdex. See Apdex in WIKI.   service_apdex = from(Service.latency).apdex(name, status);\n In this case, the apdex score of each service. Parameter (1) is the service name, which reflects the Apdex threshold value loaded from service-apdex-threshold.yml in the config folder. Parameter (2) is the status of this request. The status(success/failure) reflects the Apdex calculation.\n p99, p95, p90, p75, p50. See percentile in WIKI.   service_percentile = from(Service.latency).percentile2(10);\n percentile (deprecated since 10.0.0) is the first multiple-value metric, which has been introduced since 7.0.0. As a metric with multiple values, it could be queried through the getMultipleLinearIntValues GraphQL query. percentile2 Since 10.0.0, the percentile function has been instead by percentile2. The percentile2 function is a labeled-value metric with default label name p and label values 50,75,90,95,99. In this case, see p99, p95, p90, p75, and p50 of all incoming requests. The parameter is precise to a latency at p99, such as in the above case, and 120ms and 124ms are considered to produce the same response time.\nIn this case, the p99 value of all incoming requests. The parameter is precise to a latency at p99, such as in the above case, and 120ms and 124ms are considered to produce the same response time.\nMetrics name The metrics name for storage implementor, alarm and query modules. The type inference is supported by core.\nGroup All metrics data will be grouped by Scope.ID and min-level TimeBucket.\n In the Endpoint scope, the Scope.ID is same as the Endpoint ID (i.e. the unique ID based on service and its endpoint).  Cast Fields of source are static type. In some cases, the type required by the filter expression and aggregation function doesn\u0026rsquo;t match the type in the source, such as tag value in the source is String type, most aggregation calculation requires numeric.\nCast expression is provided to do so.\n (str-\u0026gt;long) or (long), cast string type into long. (str-\u0026gt;int) or (int), cast string type into int.  mq_consume_latency = from((str-\u0026gt;long)Service.tag[\u0026quot;transmission.latency\u0026quot;]).longAvg(); // the value of tag is string type. Cast statement is supported in\n From statement. from((cast)source.attre). Filter expression. .filter((cast)tag[\u0026quot;transmission.latency\u0026quot;] \u0026gt; 0) Aggregation function parameter. .longAvg((cast)strField1== 1, (cast)strField2)  Disable Disable is an advanced statement in OAL, which is only used in certain cases. Some of the aggregation and metrics are defined through core hard codes. Examples include segment and top_n_database_statement. This disable statement is designed to render them inactive. By default, none of them are disabled.\nNOTICE, all disable statements should be in oal/disable.oal script file.\nExamples // Calculate p99 of both Endpoint1 and Endpoint2 endpoint_p99 = from(Endpoint.latency).filter(name in (\u0026quot;Endpoint1\u0026quot;, \u0026quot;Endpoint2\u0026quot;)).summary(0.99) // Calculate p99 of Endpoint name started with `serv` serv_Endpoint_p99 = from(Endpoint.latency).filter(name like \u0026quot;serv%\u0026quot;).summary(0.99) // Calculate the avg response time of each Endpoint endpoint_resp_time = from(Endpoint.latency).avg() // Calculate the p50, p75, p90, p95 and p99 of each Endpoint by 50 ms steps. endpoint_percentile = from(Endpoint.latency).percentile2(10) // Calculate the percent of response status is true, for each service. endpoint_success = from(Endpoint.*).filter(status == true).percent() // Calculate the sum of response code in [404, 500, 503], for each service. endpoint_abnormal = from(Endpoint.*).filter(httpResponseStatusCode in [404, 500, 503]).count() // Calculate the sum of request type in [RequestType.RPC, RequestType.gRPC], for each service. endpoint_rpc_calls_sum = from(Endpoint.*).filter(type in [RequestType.RPC, RequestType.gRPC]).count() // Calculate the sum of endpoint name in [\u0026quot;/v1\u0026quot;, \u0026quot;/v2\u0026quot;], for each service. endpoint_url_sum = from(Endpoint.*).filter(name in [\u0026quot;/v1\u0026quot;, \u0026quot;/v2\u0026quot;]).count() // Calculate the sum of calls for each service. endpoint_calls = from(Endpoint.*).count() // Calculate the CPM with the GET method for each service.The value is made up with `tagKey:tagValue`. // Option 1, use `tags contain`. service_cpm_http_get = from(Service.*).filter(tags contain \u0026quot;http.method:GET\u0026quot;).cpm() // Option 2, use `tag[key]`. service_cpm_http_get = from(Service.*).filter(tag[\u0026quot;http.method\u0026quot;] == \u0026quot;GET\u0026quot;).cpm(); // Calculate the CPM with the HTTP method except for the GET method for each service.The value is made up with `tagKey:tagValue`. service_cpm_http_other = from(Service.*).filter(tags not contain \u0026quot;http.method:GET\u0026quot;).cpm() disable(segment); disable(endpoint_relation_server_side); disable(top_n_database_statement); ","excerpt":"Analysis Native Streaming Traces and Service Mesh Traffic The traces in SkyWalking native format and …","ref":"/docs/main/next/en/concepts-and-designs/oal/","title":"Analysis Native Streaming Traces and Service Mesh Traffic"},{"body":"Analysis Native Streaming Traces and Service Mesh Traffic The traces in SkyWalking native format and Service Mesh Traffic(Access Log in gRPC) are able to be analyzed by OAL, to build metrics of services, service instances and endpoints, and to build topology/dependency of services, service instances and endpoints(traces-oriented analysis only).\nThe spans of traces relative with RPC, such as HTTP, gRPC, Dubbo, RocketMQ, Kafka, would be converted to service input/output traffic, like access logs collected from service mesh. Both of those traffic would be cataloged as the defined sources in the Observability Analysis Language engine.\nThe metrics are customizable through Observability Analysis Language(OAL) scripts, and the topology/dependency is built by the SkyWalking OAP kernel automatically without explicit OAL scripts.\nObservability Analysis Language OAL(Observability Analysis Language) serves to analyze incoming data in streaming mode.\nOAL focuses on metrics in Service, Service Instance and Endpoint. Therefore, the language is easy to learn and use.\nOAL scripts are now found in the /config folder, and users could simply change and reboot the server to run them. However, the OAL script is a compiled language, and the OAL Runtime generates java codes dynamically. Don\u0026rsquo;t expect to mount the changes of those scripts in the runtime. If your OAP servers are running in a cluster mode, these script defined metrics should be aligned.\nYou can open set SW_OAL_ENGINE_DEBUG=Y at system env to see which classes are generated.\nGrammar Scripts should be named *.oal\n// Declare the metrics. METRICS_NAME = from(CAST SCOPE.(* | [FIELD][,FIELD ...])) [.filter(CAST FIELD OP [INT | STRING])] .FUNCTION([PARAM][, PARAM ...]) // Disable hard code disable(METRICS_NAME); From The from statement defines the data source of this OAL expression.\nPrimary SCOPEs are Service, ServiceInstance, Endpoint, ServiceRelation, ServiceInstanceRelation, and EndpointRelation. There are also some secondary scopes which belong to a primary scope.\nSee Scope Definitions, where you can find all existing Scopes and Fields.\nFilter Use filter to build conditions for the value of fields by using field name and expression.\nThe filter expressions run as a chain, generally connected with logic AND. The OPs support ==, !=, \u0026gt;, \u0026lt;, \u0026gt;=, \u0026lt;=, in [...] ,like %..., like ...% , like %...% , contain and not contain, with type detection based on field type. In the event of incompatibility, compile or code generation errors may be triggered.\nAggregation Function The default functions are provided by the SkyWalking OAP core, and it is possible to implement additional functions.\nFunctions provided\n longAvg. The avg of all input per scope entity. The input field must be a long.   instance_jvm_memory_max = from(ServiceInstanceJVMMemory.max).longAvg();\n In this case, the input represents the request of each ServiceInstanceJVMMemory scope, and avg is based on field max.\n doubleAvg. The avg of all input per scope entity. The input field must be a double.   instance_jvm_cpu = from(ServiceInstanceJVMCPU.usePercent).doubleAvg();\n In this case, the input represents the request of each ServiceInstanceJVMCPU scope, and avg is based on field usePercent.\n percent. The number or ratio is expressed as a fraction of 100, where the input matches with the condition.   endpoint_percent = from(Endpoint.*).percent(status == true);\n In this case, all input represents requests of each endpoint, and the condition is endpoint.status == true.\n rate. The rate expressed is as a fraction of 100, where the input matches with the condition.   browser_app_error_rate = from(BrowserAppTraffic.*).rate(trafficCategory == BrowserAppTrafficCategory.FIRST_ERROR, trafficCategory == BrowserAppTrafficCategory.NORMAL);\n In this case, all input represents requests of each browser app traffic, the numerator condition is trafficCategory == BrowserAppTrafficCategory.FIRST_ERROR and denominator condition is trafficCategory == BrowserAppTrafficCategory.NORMAL. Parameter (1) is the numerator condition. Parameter (2) is the denominator condition.\n count. The sum of calls per scope entity.   service_calls_sum = from(Service.*).count();\n In this case, the number of calls of each service.\n histogram. See Heatmap in WIKI.   service_heatmap = from(Service.latency).histogram(100, 20);\n In this case, the thermodynamic heatmap of all incoming requests. Parameter (1) is the precision of latency calculation, such as in the above case, where 113ms and 193ms are considered the same in the 101-200ms group. Parameter (2) is the group amount. In the above case, 21(param value + 1) groups are 0-100ms, 101-200ms, \u0026hellip; 1901-2000ms, 2000+ms\n apdex. See Apdex in WIKI.   service_apdex = from(Service.latency).apdex(name, status);\n In this case, the apdex score of each service. Parameter (1) is the service name, which reflects the Apdex threshold value loaded from service-apdex-threshold.yml in the config folder. Parameter (2) is the status of this request. The status(success/failure) reflects the Apdex calculation.\n p99, p95, p90, p75, p50. See percentile in WIKI.   service_percentile = from(Service.latency).percentile(10);\n percentile is the first multiple-value metric, which has been introduced since 7.0.0. As a metric with multiple values, it could be queried through the getMultipleLinearIntValues GraphQL query. In this case, see p99, p95, p90, p75, and p50 of all incoming requests. The parameter is precise to a latency at p99, such as in the above case, and 120ms and 124ms are considered to produce the same response time.\nIn this case, the p99 value of all incoming requests. The parameter is precise to a latency at p99, such as in the above case, and 120ms and 124ms are considered to produce the same response time.\nMetrics name The metrics name for storage implementor, alarm and query modules. The type inference is supported by core.\nGroup All metrics data will be grouped by Scope.ID and min-level TimeBucket.\n In the Endpoint scope, the Scope.ID is same as the Endpoint ID (i.e. the unique ID based on service and its endpoint).  Cast Fields of source are static type. In some cases, the type required by the filter expression and aggregation function doesn\u0026rsquo;t match the type in the source, such as tag value in the source is String type, most aggregation calculation requires numeric.\nCast expression is provided to do so.\n (str-\u0026gt;long) or (long), cast string type into long. (str-\u0026gt;int) or (int), cast string type into int.  mq_consume_latency = from((str-\u0026gt;long)Service.tag[\u0026quot;transmission.latency\u0026quot;]).longAvg(); // the value of tag is string type. Cast statement is supported in\n From statement. from((cast)source.attre). Filter expression. .filter((cast)tag[\u0026quot;transmission.latency\u0026quot;] \u0026gt; 0) Aggregation function parameter. .longAvg((cast)strField1== 1, (cast)strField2)  Disable Disable is an advanced statement in OAL, which is only used in certain cases. Some of the aggregation and metrics are defined through core hard codes. Examples include segment and top_n_database_statement. This disable statement is designed to render them inactive. By default, none of them are disabled.\nNOTICE, all disable statements should be in oal/disable.oal script file.\nExamples // Calculate p99 of both Endpoint1 and Endpoint2 endpoint_p99 = from(Endpoint.latency).filter(name in (\u0026quot;Endpoint1\u0026quot;, \u0026quot;Endpoint2\u0026quot;)).summary(0.99) // Calculate p99 of Endpoint name started with `serv` serv_Endpoint_p99 = from(Endpoint.latency).filter(name like \u0026quot;serv%\u0026quot;).summary(0.99) // Calculate the avg response time of each Endpoint endpoint_resp_time = from(Endpoint.latency).avg() // Calculate the p50, p75, p90, p95 and p99 of each Endpoint by 50 ms steps. endpoint_percentile = from(Endpoint.latency).percentile(10) // Calculate the percent of response status is true, for each service. endpoint_success = from(Endpoint.*).filter(status == true).percent() // Calculate the sum of response code in [404, 500, 503], for each service. endpoint_abnormal = from(Endpoint.*).filter(httpResponseStatusCode in [404, 500, 503]).count() // Calculate the sum of request type in [RequestType.RPC, RequestType.gRPC], for each service. endpoint_rpc_calls_sum = from(Endpoint.*).filter(type in [RequestType.RPC, RequestType.gRPC]).count() // Calculate the sum of endpoint name in [\u0026quot;/v1\u0026quot;, \u0026quot;/v2\u0026quot;], for each service. endpoint_url_sum = from(Endpoint.*).filter(name in [\u0026quot;/v1\u0026quot;, \u0026quot;/v2\u0026quot;]).count() // Calculate the sum of calls for each service. endpoint_calls = from(Endpoint.*).count() // Calculate the CPM with the GET method for each service.The value is made up with `tagKey:tagValue`. // Option 1, use `tags contain`. service_cpm_http_get = from(Service.*).filter(tags contain \u0026quot;http.method:GET\u0026quot;).cpm() // Option 2, use `tag[key]`. service_cpm_http_get = from(Service.*).filter(tag[\u0026quot;http.method\u0026quot;] == \u0026quot;GET\u0026quot;).cpm(); // Calculate the CPM with the HTTP method except for the GET method for each service.The value is made up with `tagKey:tagValue`. service_cpm_http_other = from(Service.*).filter(tags not contain \u0026quot;http.method:GET\u0026quot;).cpm() disable(segment); disable(endpoint_relation_server_side); disable(top_n_database_statement); ","excerpt":"Analysis Native Streaming Traces and Service Mesh Traffic The traces in SkyWalking native format and …","ref":"/docs/main/v9.7.0/en/concepts-and-designs/oal/","title":"Analysis Native Streaming Traces and Service Mesh Traffic"},{"body":" COMMUNITY SkyWalking has received contributions from 882 individuals now.   Apache SkyWalking    Application performance monitor tool for distributed systems, especially designed for microservices, cloud native and container-based (Kubernetes) architectures.  Quick start  Live demo  Username: skywalking Password: skywalking Go to native UI    Preview metrics on Grafana        Agent for Service       Topology       Trace       eBPF Profiling       Database       Kubernetes       Linux       Service Mesh          Agent for Service  Topology  Trace  eBPF Profiling  Database  Kubernetes  Linux  Service Mesh    All-in-one APM solution   Distributed Tracing  End-to-end distributed tracing. Service topology analysis, service-centric observability and APIs dashboards.   Agents for your stack  Java, .Net Core, PHP, NodeJS, Golang, LUA, Rust, C++, Client JavaScript and Python agents with active development and maintenance.   eBPF early adoption  Rover agent works as a monitor and profiler powered by eBPF to monitor Kubernetes deployments and diagnose CPU and network performance.    Scaling  100+ billion telemetry data could be collected and analyzed from one SkyWalking cluster.   Mature Telemetry Ecosystems Supported  Metrics, Traces, and Logs from mature ecosystems are supported, e.g. Zipkin, OpenTelemetry, Prometheus, Zabbix, Fluentd   Native APM Database  BanyanDB, an observability database, created in 2022, aims to ingest, analyze and store telemetry/observability data.    Consistent Metrics Aggregation  SkyWalking native meter format and widely known metrics format(OpenCensus, OTLP, Telegraf, Zabbix, e.g.) are processed through the same script pipeline.   Log Management Pipeline  Support log formatting, extract metrics, various sampling policies through script pipeline in high performance.   Alerting and Telemetry Pipelines  Support service-centric, deployment-centric, API-centric alarm rule setting. Support forwarding alarms and all telemetry data to 3rd party.      Events \u0026amp; Blogs  Welcome Zixin Zhou as new committer Mon, Apr 15, 2024 Zixin Zhou(GitHub ID, CodePrometheus[1]) began the code contributions since Oct 28, 2023. Up to …\n   Release Apache SkyWalking Eyes 0.6.0 Fri, Apr 12, 2024 SkyWalking Eyes 0.6.0 is released. Go to downloads page to find release tars. Add | as comment …\n   Release Apache SkyWalking Java Agent 9.2.0 Mon, Apr 1, 2024 SkyWalking Java Agent 9.2.0 is released. Go to downloads page to find release tars. Changes by …\n   Monitoring ActiveMQ through SkyWalking Fri, Apr 19, 2024 Introduction Apache ActiveMQ Classic is a popular and powerful open-source messaging and integration …\n   Monitoring Kubernetes network traffic by using eBPF Mon, Mar 18, 2024 Background Apache SkyWalking is an open-source Application Performance Management system that helps …\n   Monitoring Clickhouse Server through SkyWalking Tue, Mar 12, 2024 Background ClickHouse is an open-source column-oriented database management system that allows …\n    Ready to get started?  Run SkyWalking in a snap Try this demo music application to showcase features of Apache SkyWalking in action.\n Quick start     All releases    Stay tuned with SkyWalking   Questions/bugs? Features requests, questions or report bugs? Feel free to open a discussion or file an issue.\n  Join our slack workspace! Send \"Request to join SkyWalking slack\" mail to dev@skywalking.apache.org. We will invite you in.\n  Follow us on Twitter For announcement of latest features etc, stay tuned with @ASFSkyWalking.    ","excerpt":"COMMUNITY SkyWalking has received contributions from 882 individuals now.   Apache SkyWalking …","ref":"/","title":"Apache SkyWalking"},{"body":"Apache SkyWalking Agent Containerized Scenarios Docker images are not official ASF releases but provided for convenience. Recommended usage is always to build the source\nThis image only hosts the pre-built SkyWalking Java agent jars, and provides some convenient configurations for containerized scenarios.\nHow to use this image Docker FROMapache/skywalking-java-agent:8.5.0-jdk8# ... build your java applicationYou can start your Java application with CMD or ENTRYPOINT, but you don\u0026rsquo;t need to care about the Java options to enable SkyWalking agent, it should be adopted automatically.\nKubernetes Currently, SkyWalking provides two ways to install the java agent on your services on Kubernetes.\n  To use the java agent more natively, you can try the java agent injector to inject the java agent image as a sidecar.\n  If you think it\u0026rsquo;s hard to install the injector, you can also use this java agent image as a sidecar as below.\n  apiVersion:v1kind:Podmetadata:name:agent-as-sidecarspec:restartPolicy:Nevervolumes:- name:skywalking-agentemptyDir:{}initContainers:- name:agent-containerimage:apache/skywalking-java-agent:8.7.0-alpinevolumeMounts:- name:skywalking-agentmountPath:/agentcommand:[\u0026#34;/bin/sh\u0026#34;]args:[\u0026#34;-c\u0026#34;,\u0026#34;cp -R /skywalking/agent /agent/\u0026#34;]containers:- name:app-containerimage:springio/gs-spring-boot-dockervolumeMounts:- name:skywalking-agentmountPath:/skywalkingenv:- name:JAVA_TOOL_OPTIONSvalue:\u0026#34;-javaagent:/skywalking/agent/skywalking-agent.jar\u0026#34;","excerpt":"Apache SkyWalking Agent Containerized Scenarios Docker images are not official ASF releases but …","ref":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/containerization/","title":"Apache SkyWalking Agent Containerized Scenarios"},{"body":"Apache SkyWalking Agent Containerized Scenarios Docker images are not official ASF releases but provided for convenience. Recommended usage is always to build the source\nThis image only hosts the pre-built SkyWalking Java agent jars, and provides some convenient configurations for containerized scenarios.\nHow to use this image Docker FROMapache/skywalking-java-agent:8.5.0-jdk8# ... build your java applicationYou can start your Java application with CMD or ENTRYPOINT, but you don\u0026rsquo;t need to care about the Java options to enable SkyWalking agent, it should be adopted automatically.\nKubernetes Currently, SkyWalking provides two ways to install the java agent on your services on Kubernetes.\n  To use the java agent more natively, you can try the java agent injector to inject the java agent image as a sidecar.\n  If you think it\u0026rsquo;s hard to install the injector, you can also use this java agent image as a sidecar as below.\n  apiVersion:v1kind:Podmetadata:name:agent-as-sidecarspec:restartPolicy:Nevervolumes:- name:skywalking-agentemptyDir:{}initContainers:- name:agent-containerimage:apache/skywalking-java-agent:8.7.0-alpinevolumeMounts:- name:skywalking-agentmountPath:/agentcommand:[\u0026#34;/bin/sh\u0026#34;]args:[\u0026#34;-c\u0026#34;,\u0026#34;cp -R /skywalking/agent /agent/\u0026#34;]containers:- name:app-containerimage:springio/gs-spring-boot-dockervolumeMounts:- name:skywalking-agentmountPath:/skywalkingenv:- name:JAVA_TOOL_OPTIONSvalue:\u0026#34;-javaagent:/skywalking/agent/skywalking-agent.jar\u0026#34;","excerpt":"Apache SkyWalking Agent Containerized Scenarios Docker images are not official ASF releases but …","ref":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/containerization/","title":"Apache SkyWalking Agent Containerized Scenarios"},{"body":"Apache SkyWalking Agent Containerized Scenarios Docker images are not official ASF releases but provided for convenience. Recommended usage is always to build the source\nThis image only hosts the pre-built SkyWalking Java agent jars, and provides some convenient configurations for containerized scenarios.\nHow to use this image Docker FROMapache/skywalking-java-agent:8.5.0-jdk8# ... build your java applicationYou can start your Java application with CMD or ENTRYPOINT, but you don\u0026rsquo;t need to care about the Java options to enable SkyWalking agent, it should be adopted automatically.\nKubernetes Currently, SkyWalking provides two ways to install the java agent on your services on Kubernetes.\n  To use the java agent more natively, you can try the java agent injector to inject the java agent image as a sidecar.\n  If you think it\u0026rsquo;s hard to install the injector, you can also use this java agent image as a sidecar as below.\n  apiVersion:v1kind:Podmetadata:name:agent-as-sidecarspec:restartPolicy:Nevervolumes:- name:skywalking-agentemptyDir:{}initContainers:- name:agent-containerimage:apache/skywalking-java-agent:8.7.0-alpinevolumeMounts:- name:skywalking-agentmountPath:/agentcommand:[\u0026#34;/bin/sh\u0026#34;]args:[\u0026#34;-c\u0026#34;,\u0026#34;cp -R /skywalking/agent /agent/\u0026#34;]containers:- name:app-containerimage:springio/gs-spring-boot-dockervolumeMounts:- name:skywalking-agentmountPath:/skywalkingenv:- name:JAVA_TOOL_OPTIONSvalue:\u0026#34;-javaagent:/skywalking/agent/skywalking-agent.jar\u0026#34;","excerpt":"Apache SkyWalking Agent Containerized Scenarios Docker images are not official ASF releases but …","ref":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/containerization/","title":"Apache SkyWalking Agent Containerized Scenarios"},{"body":"Apache SkyWalking Agent Containerized Scenarios Docker images are not official ASF releases but provided for convenience. Recommended usage is always to build the source\nThis image only hosts the pre-built SkyWalking Java agent jars, and provides some convenient configurations for containerized scenarios.\nHow to use this image Docker FROMapache/skywalking-java-agent:8.5.0-jdk8# ... build your java applicationYou can start your Java application with CMD or ENTRYPOINT, but you don\u0026rsquo;t need to care about the Java options to enable SkyWalking agent, it should be adopted automatically.\nKubernetes Currently, SkyWalking provides two ways to install the java agent on your services on Kubernetes.\n  To use the java agent more natively, you can try the java agent injector to inject the java agent image as a sidecar.\n  If you think it\u0026rsquo;s hard to install the injector, you can also use this java agent image as a sidecar as below.\n  apiVersion:v1kind:Podmetadata:name:agent-as-sidecarspec:restartPolicy:Nevervolumes:- name:skywalking-agentemptyDir:{}initContainers:- name:agent-containerimage:apache/skywalking-java-agent:8.7.0-alpinevolumeMounts:- name:skywalking-agentmountPath:/agentcommand:[\u0026#34;/bin/sh\u0026#34;]args:[\u0026#34;-c\u0026#34;,\u0026#34;cp -R /skywalking/agent /agent/\u0026#34;]containers:- name:app-containerimage:springio/gs-spring-boot-dockervolumeMounts:- name:skywalking-agentmountPath:/skywalkingenv:- name:JAVA_TOOL_OPTIONSvalue:\u0026#34;-javaagent:/skywalking/agent/skywalking-agent.jar\u0026#34;","excerpt":"Apache SkyWalking Agent Containerized Scenarios Docker images are not official ASF releases but …","ref":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/containerization/","title":"Apache SkyWalking Agent Containerized Scenarios"},{"body":"Apache SkyWalking Agent Containerized Scenarios Docker images are not official ASF releases but provided for convenience. Recommended usage is always to build the source\nThis image only hosts the pre-built SkyWalking Java agent jars, and provides some convenient configurations for containerized scenarios.\nHow to use this image Docker FROMapache/skywalking-java-agent:8.5.0-jdk8# ... build your java applicationYou can start your Java application with CMD or ENTRYPOINT, but you don\u0026rsquo;t need to care about the Java options to enable SkyWalking agent, it should be adopted automatically.\nKubernetes Currently, SkyWalking provides two ways to install the java agent on your services on Kubernetes.\n  To use the java agent more natively, you can try the java agent injector to inject the java agent image as a sidecar.\n  If you think it\u0026rsquo;s hard to install the injector, you can also use this java agent image as a sidecar as below.\n  apiVersion:v1kind:Podmetadata:name:agent-as-sidecarspec:restartPolicy:Nevervolumes:- name:skywalking-agentemptyDir:{}initContainers:- name:agent-containerimage:apache/skywalking-java-agent:8.7.0-alpinevolumeMounts:- name:skywalking-agentmountPath:/agentcommand:[\u0026#34;/bin/sh\u0026#34;]args:[\u0026#34;-c\u0026#34;,\u0026#34;cp -R /skywalking/agent /agent/\u0026#34;]containers:- name:app-containerimage:springio/gs-spring-boot-dockervolumeMounts:- name:skywalking-agentmountPath:/skywalkingenv:- name:JAVA_TOOL_OPTIONSvalue:\u0026#34;-javaagent:/skywalking/agent/skywalking-agent.jar\u0026#34;","excerpt":"Apache SkyWalking Agent Containerized Scenarios Docker images are not official ASF releases but …","ref":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/containerization/","title":"Apache SkyWalking Agent Containerized Scenarios"},{"body":"Apache SkyWalking BanyanDB release guide This documentation guides the release manager to release the SkyWalking BanyanDB in the Apache Way, and also helps people to check the release for vote.\nPrerequisites  Close(if finished, or move to next milestone otherwise) all issues in the current milestone from skywalking-banyandb and skywalking, create a new milestone if needed. Update CHANGES.md.  Add your GPG public key to Apache svn   Log in id.apache.org and submit your key fingerprint.\n  Add your GPG public key into SkyWalking GPG KEYS file, you can do this only if you are a PMC member. You can ask a PMC member for help. DO NOT override the existed KEYS file content, only append your key at the end of the file.\n  Build and sign the source code package export VERSION=\u0026lt;the version to release\u0026gt; git clone git@github.com:apache/skywalking-banyandb \u0026amp;\u0026amp; cd skywalking-banyandb git tag -a \u0026#34;v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking BanyanDB $VERSION\u0026#34; git push --tags make clean \u0026amp;\u0026amp; make release-assembly The skywalking-banyandb-${VERSION}-bin.tgz, skywalking-banyandb-${VERSION}-src.tgz, and their corresponding asc, sha512. In total, six files should be automatically generated in the directory.\nUpload to Apache svn svn co https://dist.apache.org/repos/dist/dev/skywalking/ mkdir -p skywalking/banyandb/\u0026#34;$VERSION\u0026#34; cp skywalking-banyandb/build/skywalking-banyandb*.tgz skywalking/banyandb/\u0026#34;$VERSION\u0026#34; cp skywalking-banyandb/build/skywalking-banyandb*.tgz.asc skywalking/banyandb/\u0026#34;$VERSION\u0026#34; cp skywalking-banyandb/build/skywalking-banyandb*.tgz.sha512 skywalking/banyandb/\u0026#34;$VERSION\u0026#34; cd skywalking/banyandb \u0026amp;\u0026amp; svn add \u0026#34;$VERSION\u0026#34; \u0026amp;\u0026amp; svn commit -m \u0026#34;Draft Apache SkyWalking BanyanDB release $VERSION\u0026#34; Call for vote in dev@ mailing list Call for vote in dev@skywalking.apache.org\nSubject: [VOTE] Release Apache SkyWalking BanyanDB version $VERSION Content: Hi the SkyWalking Community: This is a call for vote to release Apache SkyWalking BanyanDB version $VERSION. Release notes: * https://github.com/apache/skywalking-banyandb/blob/v$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/banyandb/$VERSION * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-banyandb-src-x.x.x.tgz - sha512xxxxyyyzzz apache-skywalking-banyandb-bin-x.x.x.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-banyandb/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-banyandb/blob/v$VERSION/docs/installation.md Voting will start now and will remain open for at least 72 hours, all PMC members are required to give their votes. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Thanks. [1] https://github.com/apache/skywalking/blob/master/docs/en/guides/How-to-release.md#vote-check Vote Check All PMC members and committers should check these before voting +1:\n Features test. All artifacts in staging repository are published with .asc, .md5, and sha files. Source codes and distribution packages (apache-skywalking-banyandb-{src,bin}-$VERSION.tgz) are in https://dist.apache.org/repos/dist/dev/skywalking/banyandb/$VERSION with .asc, .sha512. LICENSE and NOTICE are in source codes and distribution package. Check shasum -c apache-skywalking-banyandb-{src,bin}-$VERSION.tgz.sha512. Check GPG signature. Download KEYS and import them by curl https://www.apache.org/dist/skywalking/KEYS -o KEYS \u0026amp;\u0026amp; gpg --import KEYS. Check gpg --batch --verify apache-skywalking-banyandb-{src,bin}-$VERSION.tgz.asc apache-skywalking-banyandb-{src,bin}-$VERSION.tgz Build distribution from source code package by following this the build guide. Licenses header check.  Vote result should follow these:\n  PMC vote is +1 binding, all others is +1 no binding.\n  Within 72 hours, you get at least 3 (+1 binding), and have more +1 than -1. Vote pass.\n  Send the closing vote mail to announce the result. When count the binding and no binding votes, please list the names of voters. An example like this:\n[RESULT][VOTE] Release Apache SkyWalking BanyanDB version $VERSION 3 days passed, we’ve got ($NUMBER) +1 bindings: xxx xxx xxx ... (list names) I’ll continue the release process.   Publish release   Move source codes tar balls and distributions to https://dist.apache.org/repos/dist/release/skywalking/, you can do this only if you are a PMC member.\nexport SVN_EDITOR=vim svn mv https://dist.apache.org/repos/dist/dev/skywalking/banyandb/$VERSION https://dist.apache.org/repos/dist/release/skywalking/banyandb # .... # enter your apache password # ....   Remove last released tar balls from https://dist.apache.org/repos/dist/release/skywalking\n  Refer to the previous PR, update news and links on the website. There are seven files need to modify.\n  Update Github release page, follow the previous convention.\n  Send ANNOUNCE email to dev@skywalking.apache.org and announce@apache.org, the sender should use his/her Apache email account. You can get the permlink of vote thread at here.\nSubject: [ANNOUNCEMENT] Apache SkyWalking BanyanDB $VERSION Released Content: Hi the SkyWalking Community On behalf of the SkyWalking Team, I’m glad to announce that SkyWalking BanyanDB $VERSION is now released. SkyWalking BanyanDB: An observability database, aims to ingest, analyze and store Metrics, Tracing and Logging data. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. Vote Thread: $VOTE_THREAD_PERMALINK Download Links: https://skywalking.apache.org/downloads/ Release Notes : https://github.com/apache/skywalking-banyandb/blob/v$VERSION/CHANGES.md Website: https://skywalking.apache.org/ SkyWalking BanyanDB Resources: - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Documents: https://github.com/apache/skywalking-banyandb/blob/v$VERSION/README.md The Apache SkyWalking Team   ","excerpt":"Apache SkyWalking BanyanDB release guide This documentation guides the release manager to release …","ref":"/docs/skywalking-banyandb/latest/release/","title":"Apache SkyWalking BanyanDB release guide"},{"body":"Apache SkyWalking BanyanDB release guide This documentation guides the release manager to release the SkyWalking BanyanDB in the Apache Way, and also helps people to check the release for vote.\nPrerequisites  Close(if finished, or move to next milestone otherwise) all issues in the current milestone from skywalking-banyandb and skywalking, create a new milestone if needed. Update CHANGES.md.  Add your GPG public key to Apache svn   Log in id.apache.org and submit your key fingerprint.\n  Add your GPG public key into SkyWalking GPG KEYS file, you can do this only if you are a PMC member. You can ask a PMC member for help. DO NOT override the existed KEYS file content, only append your key at the end of the file.\n  Build and sign the source code package export VERSION=\u0026lt;the version to release\u0026gt; git clone git@github.com:apache/skywalking-banyandb \u0026amp;\u0026amp; cd skywalking-banyandb git tag -a \u0026#34;v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking BanyanDB $VERSION\u0026#34; git push --tags make clean \u0026amp;\u0026amp; make release-assembly The skywalking-banyandb-${VERSION}-bin.tgz, skywalking-banyandb-${VERSION}-src.tgz, and their corresponding asc, sha512. In total, six files should be automatically generated in the directory.\nUpload to Apache svn svn co https://dist.apache.org/repos/dist/dev/skywalking/ mkdir -p skywalking/banyandb/\u0026#34;$VERSION\u0026#34; cp skywalking-banyandb/build/skywalking-banyandb*.tgz skywalking/banyandb/\u0026#34;$VERSION\u0026#34; cp skywalking-banyandb/build/skywalking-banyandb*.tgz.asc skywalking/banyandb/\u0026#34;$VERSION\u0026#34; cp skywalking-banyandb/build/skywalking-banyandb*.tgz.sha512 skywalking/banyandb/\u0026#34;$VERSION\u0026#34; cd skywalking/banyandb \u0026amp;\u0026amp; svn add \u0026#34;$VERSION\u0026#34; \u0026amp;\u0026amp; svn commit -m \u0026#34;Draft Apache SkyWalking BanyanDB release $VERSION\u0026#34; Call for vote in dev@ mailing list Call for vote in dev@skywalking.apache.org\nSubject: [VOTE] Release Apache SkyWalking BanyanDB version $VERSION Content: Hi the SkyWalking Community: This is a call for vote to release Apache SkyWalking BanyanDB version $VERSION. Release notes: * https://github.com/apache/skywalking-banyandb/blob/v$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/banyandb/$VERSION * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-banyandb-src-x.x.x.tgz - sha512xxxxyyyzzz apache-skywalking-banyandb-bin-x.x.x.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-banyandb/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-banyandb/blob/v$VERSION/docs/installation/binaries.md#Build-From-Source Voting will start now and will remain open for at least 72 hours, all PMC members are required to give their votes. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Thanks. [1] https://github.com/apache/skywalking/blob/master/docs/en/guides/How-to-release.md#vote-check Vote Check All PMC members and committers should check these before voting +1:\n Features test. All artifacts in staging repository are published with .asc, .md5, and sha files. Source codes and distribution packages (apache-skywalking-banyandb-{src,bin}-$VERSION.tgz) are in https://dist.apache.org/repos/dist/dev/skywalking/banyandb/$VERSION with .asc, .sha512. LICENSE and NOTICE are in source codes and distribution package. Check shasum -c apache-skywalking-banyandb-{src,bin}-$VERSION.tgz.sha512. Check GPG signature. Download KEYS and import them by curl https://www.apache.org/dist/skywalking/KEYS -o KEYS \u0026amp;\u0026amp; gpg --import KEYS. Check gpg --batch --verify apache-skywalking-banyandb-{src,bin}-$VERSION.tgz.asc apache-skywalking-banyandb-{src,bin}-$VERSION.tgz Build distribution from source code package by following this the build guide. Licenses header check.  Vote result should follow these:\n  PMC vote is +1 binding, all others is +1 no binding.\n  Within 72 hours, you get at least 3 (+1 binding), and have more +1 than -1. Vote pass.\n  Send the closing vote mail to announce the result. When count the binding and no binding votes, please list the names of voters. An example like this:\n[RESULT][VOTE] Release Apache SkyWalking BanyanDB version $VERSION 3 days passed, we’ve got ($NUMBER) +1 bindings: xxx xxx xxx ... (list names) I’ll continue the release process.   Publish release   Move source codes tar balls and distributions to https://dist.apache.org/repos/dist/release/skywalking/, you can do this only if you are a PMC member.\nexport SVN_EDITOR=vim svn mv https://dist.apache.org/repos/dist/dev/skywalking/banyandb/$VERSION https://dist.apache.org/repos/dist/release/skywalking/banyandb # .... # enter your apache password # ....   Remove last released tar balls from https://dist.apache.org/repos/dist/release/skywalking\n  Refer to the previous PR, update news and links on the website. There are seven files need to modify.\n  Update Github release page, follow the previous convention.\n  Send ANNOUNCE email to dev@skywalking.apache.org and announce@apache.org, the sender should use his/her Apache email account. You can get the permlink of vote thread at here.\nSubject: [ANNOUNCEMENT] Apache SkyWalking BanyanDB $VERSION Released Content: Hi the SkyWalking Community On behalf of the SkyWalking Team, I’m glad to announce that SkyWalking BanyanDB $VERSION is now released. SkyWalking BanyanDB: An observability database, aims to ingest, analyze and store Metrics, Tracing and Logging data. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. Vote Thread: $VOTE_THREAD_PERMALINK Download Links: https://skywalking.apache.org/downloads/ Release Notes : https://github.com/apache/skywalking-banyandb/blob/v$VERSION/CHANGES.md Website: https://skywalking.apache.org/ SkyWalking BanyanDB Resources: - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Documents: https://github.com/apache/skywalking-banyandb/blob/v$VERSION/README.md The Apache SkyWalking Team   ","excerpt":"Apache SkyWalking BanyanDB release guide This documentation guides the release manager to release …","ref":"/docs/skywalking-banyandb/next/release/","title":"Apache SkyWalking BanyanDB release guide"},{"body":"Apache SkyWalking BanyanDB release guide This documentation guides the release manager to release the SkyWalking BanyanDB in the Apache Way, and also helps people to check the release for vote.\nPrerequisites  Close(if finished, or move to next milestone otherwise) all issues in the current milestone from skywalking-banyandb and skywalking, create a new milestone if needed. Update CHANGES.md.  Add your GPG public key to Apache svn   Log in id.apache.org and submit your key fingerprint.\n  Add your GPG public key into SkyWalking GPG KEYS file, you can do this only if you are a PMC member. You can ask a PMC member for help. DO NOT override the existed KEYS file content, only append your key at the end of the file.\n  Build and sign the source code package export VERSION=\u0026lt;the version to release\u0026gt; git clone git@github.com:apache/skywalking-banyandb \u0026amp;\u0026amp; cd skywalking-banyandb git tag -a \u0026#34;v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking BanyanDB $VERSION\u0026#34; git push --tags make clean \u0026amp;\u0026amp; make release-assembly The skywalking-banyandb-${VERSION}-bin.tgz, skywalking-banyandb-${VERSION}-src.tgz, and their corresponding asc, sha512. In total, six files should be automatically generated in the directory.\nUpload to Apache svn svn co https://dist.apache.org/repos/dist/dev/skywalking/ mkdir -p skywalking/banyandb/\u0026#34;$VERSION\u0026#34; cp skywalking-banyandb/build/skywalking-banyandb*.tgz skywalking/banyandb/\u0026#34;$VERSION\u0026#34; cp skywalking-banyandb/build/skywalking-banyandb*.tgz.asc skywalking/banyandb/\u0026#34;$VERSION\u0026#34; cp skywalking-banyandb/build/skywalking-banyandb*.tgz.sha512 skywalking/banyandb/\u0026#34;$VERSION\u0026#34; cd skywalking/banyandb \u0026amp;\u0026amp; svn add \u0026#34;$VERSION\u0026#34; \u0026amp;\u0026amp; svn commit -m \u0026#34;Draft Apache SkyWalking BanyanDB release $VERSION\u0026#34; Call for vote in dev@ mailing list Call for vote in dev@skywalking.apache.org\nSubject: [VOTE] Release Apache SkyWalking BanyanDB version $VERSION Content: Hi the SkyWalking Community: This is a call for vote to release Apache SkyWalking BanyanDB version $VERSION. Release notes: * https://github.com/apache/skywalking-banyandb/blob/v$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/banyandb/$VERSION * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-banyandb-src-x.x.x.tgz - sha512xxxxyyyzzz apache-skywalking-banyandb-bin-x.x.x.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-banyandb/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-banyandb/blob/v$VERSION/docs/installation.md Voting will start now and will remain open for at least 72 hours, all PMC members are required to give their votes. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Thanks. [1] https://github.com/apache/skywalking/blob/master/docs/en/guides/How-to-release.md#vote-check Vote Check All PMC members and committers should check these before voting +1:\n Features test. All artifacts in staging repository are published with .asc, .md5, and sha files. Source codes and distribution packages (apache-skywalking-banyandb-{src,bin}-$VERSION.tgz) are in https://dist.apache.org/repos/dist/dev/skywalking/banyandb/$VERSION with .asc, .sha512. LICENSE and NOTICE are in source codes and distribution package. Check shasum -c apache-skywalking-banyandb-{src,bin}-$VERSION.tgz.sha512. Check GPG signature. Download KEYS and import them by curl https://www.apache.org/dist/skywalking/KEYS -o KEYS \u0026amp;\u0026amp; gpg --import KEYS. Check gpg --batch --verify apache-skywalking-banyandb-{src,bin}-$VERSION.tgz.asc apache-skywalking-banyandb-{src,bin}-$VERSION.tgz Build distribution from source code package by following this the build guide. Licenses header check.  Vote result should follow these:\n  PMC vote is +1 binding, all others is +1 no binding.\n  Within 72 hours, you get at least 3 (+1 binding), and have more +1 than -1. Vote pass.\n  Send the closing vote mail to announce the result. When count the binding and no binding votes, please list the names of voters. An example like this:\n[RESULT][VOTE] Release Apache SkyWalking BanyanDB version $VERSION 3 days passed, we’ve got ($NUMBER) +1 bindings: xxx xxx xxx ... (list names) I’ll continue the release process.   Publish release   Move source codes tar balls and distributions to https://dist.apache.org/repos/dist/release/skywalking/, you can do this only if you are a PMC member.\nexport SVN_EDITOR=vim svn mv https://dist.apache.org/repos/dist/dev/skywalking/banyandb/$VERSION https://dist.apache.org/repos/dist/release/skywalking/banyandb # .... # enter your apache password # ....   Remove last released tar balls from https://dist.apache.org/repos/dist/release/skywalking\n  Refer to the previous PR, update news and links on the website. There are seven files need to modify.\n  Update Github release page, follow the previous convention.\n  Send ANNOUNCE email to dev@skywalking.apache.org and announce@apache.org, the sender should use his/her Apache email account. You can get the permlink of vote thread at here.\nSubject: [ANNOUNCEMENT] Apache SkyWalking BanyanDB $VERSION Released Content: Hi the SkyWalking Community On behalf of the SkyWalking Team, I’m glad to announce that SkyWalking BanyanDB $VERSION is now released. SkyWalking BanyanDB: An observability database, aims to ingest, analyze and store Metrics, Tracing and Logging data. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. Vote Thread: $VOTE_THREAD_PERMALINK Download Links: https://skywalking.apache.org/downloads/ Release Notes : https://github.com/apache/skywalking-banyandb/blob/v$VERSION/CHANGES.md Website: https://skywalking.apache.org/ SkyWalking BanyanDB Resources: - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Documents: https://github.com/apache/skywalking-banyandb/blob/v$VERSION/README.md The Apache SkyWalking Team   ","excerpt":"Apache SkyWalking BanyanDB release guide This documentation guides the release manager to release …","ref":"/docs/skywalking-banyandb/v0.5.0/release/","title":"Apache SkyWalking BanyanDB release guide"},{"body":"Apache SkyWalking Cloud on Kubernetes release guide This documentation guides the release manager to release the SkyWalking Cloud on Kubernetes in the Apache Way, and also helps people to check the release for vote.\nPrerequisites  Close(if finished, or move to next milestone otherwise) all issues in the current milestone from skywalking-swck and skywalking, create a new milestone if needed. Update CHANGES.md. Update image tags of adapter and operator.  Add your GPG public key to Apache svn   Log in id.apache.org and submit your key fingerprint.\n  Add your GPG public key into SkyWalking GPG KEYS file, you can do this only if you are a PMC member. You can ask a PMC member for help. DO NOT override the existed KEYS file content, only append your key at the end of the file.\n  Build and sign the source code package export VERSION=\u0026lt;the version to release\u0026gt; git clone git@github.com:apache/skywalking-swck \u0026amp;\u0026amp; cd skywalking-swck git tag -a \u0026#34;v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking Cloud on Kubernetes $VERSION\u0026#34; git push --tags make clean \u0026amp;\u0026amp; make release The skywalking-swck-${VERSION}-bin.tgz, skywalking-swck-${VERSION}-src.tgz, and their corresponding asc, sha512. In total, six files should be automatically generated in the directory.\nUpload to Apache svn svn co https://dist.apache.org/repos/dist/dev/skywalking/ mkdir -p skywalking/swck/\u0026#34;$VERSION\u0026#34; cp skywalking-swck/build/release/skywalking-swck*.tgz skywalking/swck/\u0026#34;$VERSION\u0026#34; cp skywalking-swck/build/release/skywalking-swck*.tgz.asc skywalking/swck/\u0026#34;$VERSION\u0026#34; cp skywalking-swck/build/release/skywalking-swck*.tgz.sha512 skywalking/swck/\u0026#34;$VERSION\u0026#34; cd skywalking/swck \u0026amp;\u0026amp; svn add \u0026#34;$VERSION\u0026#34; \u0026amp;\u0026amp; svn commit -m \u0026#34;Draft Apache SkyWalking-SWCK release $VERSION\u0026#34; Make the internal announcement Send an announcement email to dev@ mailing list.\nSubject: [ANNOUNCEMENT] SkyWalking Cloud on Kubernetes $VERSION test build available Content: The test build of SkyWalking Cloud on Kubernetes $VERSION is now available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking-swck/blob/$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/swck/$VERSION * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-swck-bin-x.x.x.tgz - sha512xxxxyyyzzz apache-skywalking-swck-src-x.x.x.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-swck/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-swck/blob/$VERSION/docs/operator.md#build-from-sources * https://github.com/apache/skywalking-swck/blob/$VERSION/docs/custom-metrics-adapter.md#use-kustomize-to-customise-your-deployment * https://github.com/apache/skywalking-swck/blob/$VERSION/docs/release.md A vote regarding the quality of this test build will be initiated within the next couple of days. Wait at least 48 hours for test responses Any PMC, committer or contributor can test features for releasing, and feedback. Based on that, PMC will decide whether to start a vote or not.\nCall for vote in dev@ mailing list Call for vote in dev@skywalking.apache.org\nSubject: [VOTE] Release Apache SkyWalking Cloud on Kubernetes version $VERSION Content: Hi the SkyWalking Community: This is a call for vote to release Apache SkyWalking Cloud on Kubernetes version $VERSION. Release notes: * https://github.com/apache/skywalking-swck/blob/$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/swck/$VERSION * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-swck-src-x.x.x.tgz - sha512xxxxyyyzzz apache-skywalking-swck-bin-x.x.x.tgz Release Tag : * (Git Tag) $VERSION Release Commit Hash : * https://github.com/apache/skywalking-swck/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-swck/blob/$VERSION/docs/release.md Voting will start now and will remain open for at least 72 hours, all PMC members are required to give their votes. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Thanks. [1] https://github.com/apache/skywalking/blob/master/docs/en/guides/How-to-release.md#vote-check Vote Check All PMC members and committers should check these before voting +1:\n Features test. All artifacts in staging repository are published with .asc, .md5, and sha files. Source codes and distribution packages (apache-skywalking-swck-{src,bin}-$VERSION.tgz) are in https://dist.apache.org/repos/dist/dev/skywalking/swck/$VERSION with .asc, .sha512. LICENSE and NOTICE are in source codes and distribution package. Check shasum -c apache-skywalking-swck-{src,bin}-$VERSION.tgz.sha512. Check GPG signature. Download KEYS and import them by curl https://www.apache.org/dist/skywalking/KEYS -o KEYS \u0026amp;\u0026amp; gpg --import KEYS. Check gpg --batch --verify apache-skywalking-swck-{src,bin}-$VERSION.tgz.asc apache-skywalking-swck-{src,bin}-$VERSION.tgz Build distribution from source code package by following this the build guide. Licenses header check.  Vote result should follow these:\n  PMC vote is +1 binding, all others is +1 no binding.\n  Within 72 hours, you get at least 3 (+1 binding), and have more +1 than -1. Vote pass.\n  Send the closing vote mail to announce the result. When count the binding and no binding votes, please list the names of voters. An example like this:\n[RESULT][VOTE] Release Apache SkyWalking Cloud on Kubernetes version $VERSION 3 days passed, we’ve got ($NUMBER) +1 bindings: xxx xxx xxx ... (list names) I’ll continue the release process.   Publish release   Move source codes tar balls and distributions to https://dist.apache.org/repos/dist/release/skywalking/, you can do this only if you are a PMC member.\nexport SVN_EDITOR=vim svn mv https://dist.apache.org/repos/dist/dev/skywalking/swck/$VERSION https://dist.apache.org/repos/dist/release/skywalking/swck # .... # enter your apache password # ....   Remove last released tar balls from https://dist.apache.org/repos/dist/release/skywalking\n  Refer to the previous PR, update news and links on the website. There are seven files need to modify.\n  Update Github release page, follow the previous convention.\n  Send ANNOUNCE email to dev@skywalking.apache.org and announce@apache.org, the sender should use his/her Apache email account. You can get the permlink of vote thread at here.\nSubject: [ANNOUNCEMENT] Apache SkyWalking Cloud on Kubernetes $VERSION Released Content: Hi the SkyWalking Community On behalf of the SkyWalking Team, I’m glad to announce that SkyWalking Cloud on Kubernetes $VERSION is now released. SkyWalking Cloud on Kubernetes: A bridge platform between Apache SkyWalking and Kubernetes. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. Vote Thread: $VOTE_THREAD_PERMALINK Download Links: https://skywalking.apache.org/downloads/ Release Notes : https://github.com/apache/skywalking-swck/blob/$VERSION/CHANGES.md Website: https://skywalking.apache.org/ SkyWalking Cloud on Kubernetes Resources: - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Documents: https://github.com/apache/skywalking-swck/blob/$VERSION/README.md The Apache SkyWalking Team   ","excerpt":"Apache SkyWalking Cloud on Kubernetes release guide This documentation guides the release manager to …","ref":"/docs/skywalking-swck/latest/release/","title":"Apache SkyWalking Cloud on Kubernetes release guide"},{"body":"Apache SkyWalking Cloud on Kubernetes release guide This documentation guides the release manager to release the SkyWalking Cloud on Kubernetes in the Apache Way, and also helps people to check the release for vote.\nPrerequisites  Close(if finished, or move to next milestone otherwise) all issues in the current milestone from skywalking-swck and skywalking, create a new milestone if needed. Update CHANGES.md. Update image tags of adapter and operator.  Add your GPG public key to Apache svn   Log in id.apache.org and submit your key fingerprint.\n  Add your GPG public key into SkyWalking GPG KEYS file, you can do this only if you are a PMC member. You can ask a PMC member for help. DO NOT override the existed KEYS file content, only append your key at the end of the file.\n  Build and sign the source code package export VERSION=\u0026lt;the version to release\u0026gt; git clone git@github.com:apache/skywalking-swck \u0026amp;\u0026amp; cd skywalking-swck git tag -a \u0026#34;v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking Cloud on Kubernetes $VERSION\u0026#34; git push --tags make clean \u0026amp;\u0026amp; make release The skywalking-swck-${VERSION}-bin.tgz, skywalking-swck-${VERSION}-src.tgz, and their corresponding asc, sha512. In total, six files should be automatically generated in the directory.\nUpload to Apache svn svn co https://dist.apache.org/repos/dist/dev/skywalking/ mkdir -p skywalking/swck/\u0026#34;$VERSION\u0026#34; cp skywalking-swck/build/release/skywalking-swck*.tgz skywalking/swck/\u0026#34;$VERSION\u0026#34; cp skywalking-swck/build/release/skywalking-swck*.tgz.asc skywalking/swck/\u0026#34;$VERSION\u0026#34; cp skywalking-swck/build/release/skywalking-swck*.tgz.sha512 skywalking/swck/\u0026#34;$VERSION\u0026#34; cd skywalking/swck \u0026amp;\u0026amp; svn add \u0026#34;$VERSION\u0026#34; \u0026amp;\u0026amp; svn commit -m \u0026#34;Draft Apache SkyWalking-SWCK release $VERSION\u0026#34; Make the internal announcement Send an announcement email to dev@ mailing list.\nSubject: [ANNOUNCEMENT] SkyWalking Cloud on Kubernetes $VERSION test build available Content: The test build of SkyWalking Cloud on Kubernetes $VERSION is now available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking-swck/blob/$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/swck/$VERSION * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-swck-bin-x.x.x.tgz - sha512xxxxyyyzzz apache-skywalking-swck-src-x.x.x.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-swck/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-swck/blob/$VERSION/docs/operator.md#build-from-sources * https://github.com/apache/skywalking-swck/blob/$VERSION/docs/custom-metrics-adapter.md#use-kustomize-to-customise-your-deployment * https://github.com/apache/skywalking-swck/blob/$VERSION/docs/release.md A vote regarding the quality of this test build will be initiated within the next couple of days. Wait at least 48 hours for test responses Any PMC, committer or contributor can test features for releasing, and feedback. Based on that, PMC will decide whether to start a vote or not.\nCall for vote in dev@ mailing list Call for vote in dev@skywalking.apache.org\nSubject: [VOTE] Release Apache SkyWalking Cloud on Kubernetes version $VERSION Content: Hi the SkyWalking Community: This is a call for vote to release Apache SkyWalking Cloud on Kubernetes version $VERSION. Release notes: * https://github.com/apache/skywalking-swck/blob/$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/swck/$VERSION * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-swck-src-x.x.x.tgz - sha512xxxxyyyzzz apache-skywalking-swck-bin-x.x.x.tgz Release Tag : * (Git Tag) $VERSION Release Commit Hash : * https://github.com/apache/skywalking-swck/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-swck/blob/$VERSION/docs/release.md Voting will start now and will remain open for at least 72 hours, all PMC members are required to give their votes. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Thanks. [1] https://github.com/apache/skywalking/blob/master/docs/en/guides/How-to-release.md#vote-check Vote Check All PMC members and committers should check these before voting +1:\n Features test. All artifacts in staging repository are published with .asc, .md5, and sha files. Source codes and distribution packages (apache-skywalking-swck-{src,bin}-$VERSION.tgz) are in https://dist.apache.org/repos/dist/dev/skywalking/swck/$VERSION with .asc, .sha512. LICENSE and NOTICE are in source codes and distribution package. Check shasum -c apache-skywalking-swck-{src,bin}-$VERSION.tgz.sha512. Check GPG signature. Download KEYS and import them by curl https://www.apache.org/dist/skywalking/KEYS -o KEYS \u0026amp;\u0026amp; gpg --import KEYS. Check gpg --batch --verify apache-skywalking-swck-{src,bin}-$VERSION.tgz.asc apache-skywalking-swck-{src,bin}-$VERSION.tgz Build distribution from source code package by following this the build guide. Licenses header check.  Vote result should follow these:\n  PMC vote is +1 binding, all others is +1 no binding.\n  Within 72 hours, you get at least 3 (+1 binding), and have more +1 than -1. Vote pass.\n  Send the closing vote mail to announce the result. When count the binding and no binding votes, please list the names of voters. An example like this:\n[RESULT][VOTE] Release Apache SkyWalking Cloud on Kubernetes version $VERSION 3 days passed, we’ve got ($NUMBER) +1 bindings: xxx xxx xxx ... (list names) I’ll continue the release process.   Publish release   Move source codes tar balls and distributions to https://dist.apache.org/repos/dist/release/skywalking/, you can do this only if you are a PMC member.\nexport SVN_EDITOR=vim svn mv https://dist.apache.org/repos/dist/dev/skywalking/swck/$VERSION https://dist.apache.org/repos/dist/release/skywalking/swck # .... # enter your apache password # ....   Remove last released tar balls from https://dist.apache.org/repos/dist/release/skywalking\n  Refer to the previous PR, update news and links on the website. There are seven files need to modify.\n  Update Github release page, follow the previous convention.\n  Send ANNOUNCE email to dev@skywalking.apache.org and announce@apache.org, the sender should use his/her Apache email account. You can get the permlink of vote thread at here.\nSubject: [ANNOUNCEMENT] Apache SkyWalking Cloud on Kubernetes $VERSION Released Content: Hi the SkyWalking Community On behalf of the SkyWalking Team, I’m glad to announce that SkyWalking Cloud on Kubernetes $VERSION is now released. SkyWalking Cloud on Kubernetes: A bridge platform between Apache SkyWalking and Kubernetes. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. Vote Thread: $VOTE_THREAD_PERMALINK Download Links: https://skywalking.apache.org/downloads/ Release Notes : https://github.com/apache/skywalking-swck/blob/$VERSION/CHANGES.md Website: https://skywalking.apache.org/ SkyWalking Cloud on Kubernetes Resources: - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Documents: https://github.com/apache/skywalking-swck/blob/$VERSION/README.md The Apache SkyWalking Team   ","excerpt":"Apache SkyWalking Cloud on Kubernetes release guide This documentation guides the release manager to …","ref":"/docs/skywalking-swck/next/release/","title":"Apache SkyWalking Cloud on Kubernetes release guide"},{"body":"Apache SkyWalking Cloud on Kubernetes release guide This documentation guides the release manager to release the SkyWalking Cloud on Kubernetes in the Apache Way, and also helps people to check the release for vote.\nPrerequisites  Close(if finished, or move to next milestone otherwise) all issues in the current milestone from skywalking-swck and skywalking, create a new milestone if needed. Update CHANGES.md. Update image tags of adapter and operator.  Add your GPG public key to Apache svn   Log in id.apache.org and submit your key fingerprint.\n  Add your GPG public key into SkyWalking GPG KEYS file, you can do this only if you are a PMC member. You can ask a PMC member for help. DO NOT override the existed KEYS file content, only append your key at the end of the file.\n  Build and sign the source code package export VERSION=\u0026lt;the version to release\u0026gt; git clone git@github.com:apache/skywalking-swck \u0026amp;\u0026amp; cd skywalking-swck git tag -a \u0026#34;v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking Cloud on Kubernetes $VERSION\u0026#34; git push --tags make clean \u0026amp;\u0026amp; make release The skywalking-swck-${VERSION}-bin.tgz, skywalking-swck-${VERSION}-src.tgz, and their corresponding asc, sha512. In total, six files should be automatically generated in the directory.\nUpload to Apache svn svn co https://dist.apache.org/repos/dist/dev/skywalking/ mkdir -p skywalking/swck/\u0026#34;$VERSION\u0026#34; cp skywalking-swck/build/release/skywalking-swck*.tgz skywalking/swck/\u0026#34;$VERSION\u0026#34; cp skywalking-swck/build/release/skywalking-swck*.tgz.asc skywalking/swck/\u0026#34;$VERSION\u0026#34; cp skywalking-swck/build/release/skywalking-swck*.tgz.sha512 skywalking/swck/\u0026#34;$VERSION\u0026#34; cd skywalking/swck \u0026amp;\u0026amp; svn add \u0026#34;$VERSION\u0026#34; \u0026amp;\u0026amp; svn commit -m \u0026#34;Draft Apache SkyWalking-SWCK release $VERSION\u0026#34; Make the internal announcement Send an announcement email to dev@ mailing list.\nSubject: [ANNOUNCEMENT] SkyWalking Cloud on Kubernetes $VERSION test build available Content: The test build of SkyWalking Cloud on Kubernetes $VERSION is now available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking-swck/blob/$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/swck/$VERSION * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-swck-bin-x.x.x.tgz - sha512xxxxyyyzzz apache-skywalking-swck-src-x.x.x.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-swck/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-swck/blob/$VERSION/docs/operator.md#build-from-sources * https://github.com/apache/skywalking-swck/blob/$VERSION/docs/custom-metrics-adapter.md#use-kustomize-to-customise-your-deployment * https://github.com/apache/skywalking-swck/blob/$VERSION/docs/release.md A vote regarding the quality of this test build will be initiated within the next couple of days. Wait at least 48 hours for test responses Any PMC, committer or contributor can test features for releasing, and feedback. Based on that, PMC will decide whether to start a vote or not.\nCall for vote in dev@ mailing list Call for vote in dev@skywalking.apache.org\nSubject: [VOTE] Release Apache SkyWalking Cloud on Kubernetes version $VERSION Content: Hi the SkyWalking Community: This is a call for vote to release Apache SkyWalking Cloud on Kubernetes version $VERSION. Release notes: * https://github.com/apache/skywalking-swck/blob/$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/swck/$VERSION * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-swck-src-x.x.x.tgz - sha512xxxxyyyzzz apache-skywalking-swck-bin-x.x.x.tgz Release Tag : * (Git Tag) $VERSION Release Commit Hash : * https://github.com/apache/skywalking-swck/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-swck/blob/$VERSION/docs/release.md Voting will start now and will remain open for at least 72 hours, all PMC members are required to give their votes. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Thanks. [1] https://github.com/apache/skywalking/blob/master/docs/en/guides/How-to-release.md#vote-check Vote Check All PMC members and committers should check these before voting +1:\n Features test. All artifacts in staging repository are published with .asc, .md5, and sha files. Source codes and distribution packages (apache-skywalking-swck-{src,bin}-$VERSION.tgz) are in https://dist.apache.org/repos/dist/dev/skywalking/swck/$VERSION with .asc, .sha512. LICENSE and NOTICE are in source codes and distribution package. Check shasum -c apache-skywalking-swck-{src,bin}-$VERSION.tgz.sha512. Check GPG signature. Download KEYS and import them by curl https://www.apache.org/dist/skywalking/KEYS -o KEYS \u0026amp;\u0026amp; gpg --import KEYS. Check gpg --batch --verify apache-skywalking-swck-{src,bin}-$VERSION.tgz.asc apache-skywalking-swck-{src,bin}-$VERSION.tgz Build distribution from source code package by following this the build guide. Licenses header check.  Vote result should follow these:\n  PMC vote is +1 binding, all others is +1 no binding.\n  Within 72 hours, you get at least 3 (+1 binding), and have more +1 than -1. Vote pass.\n  Send the closing vote mail to announce the result. When count the binding and no binding votes, please list the names of voters. An example like this:\n[RESULT][VOTE] Release Apache SkyWalking Cloud on Kubernetes version $VERSION 3 days passed, we’ve got ($NUMBER) +1 bindings: xxx xxx xxx ... (list names) I’ll continue the release process.   Publish release   Move source codes tar balls and distributions to https://dist.apache.org/repos/dist/release/skywalking/, you can do this only if you are a PMC member.\nexport SVN_EDITOR=vim svn mv https://dist.apache.org/repos/dist/dev/skywalking/swck/$VERSION https://dist.apache.org/repos/dist/release/skywalking/swck # .... # enter your apache password # ....   Remove last released tar balls from https://dist.apache.org/repos/dist/release/skywalking\n  Refer to the previous PR, update news and links on the website. There are seven files need to modify.\n  Update Github release page, follow the previous convention.\n  Send ANNOUNCE email to dev@skywalking.apache.org and announce@apache.org, the sender should use his/her Apache email account. You can get the permlink of vote thread at here.\nSubject: [ANNOUNCEMENT] Apache SkyWalking Cloud on Kubernetes $VERSION Released Content: Hi the SkyWalking Community On behalf of the SkyWalking Team, I’m glad to announce that SkyWalking Cloud on Kubernetes $VERSION is now released. SkyWalking Cloud on Kubernetes: A bridge platform between Apache SkyWalking and Kubernetes. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. Vote Thread: $VOTE_THREAD_PERMALINK Download Links: https://skywalking.apache.org/downloads/ Release Notes : https://github.com/apache/skywalking-swck/blob/$VERSION/CHANGES.md Website: https://skywalking.apache.org/ SkyWalking Cloud on Kubernetes Resources: - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Documents: https://github.com/apache/skywalking-swck/blob/$VERSION/README.md The Apache SkyWalking Team   ","excerpt":"Apache SkyWalking Cloud on Kubernetes release guide This documentation guides the release manager to …","ref":"/docs/skywalking-swck/v0.9.0/release/","title":"Apache SkyWalking Cloud on Kubernetes release guide"},{"body":"Apache SkyWalking committer SkyWalking Project Management Committee (PMC) is responsible for assessing the contributions of candidates.\nLike many Apache projects, SkyWalking welcome all contributions, including code contributions, blog entries, guides for new users, public speeches, and enhancement of the project in various ways.\nCommitter Nominate new committer In SkyWalking, new committer nomination could only be officially started by existing PMC members. If a new committer feels that he/she is qualified, he/she should contact any existing PMC member and discuss. If this is agreed among some members of the PMC, the process will kick off.\nThe following steps are recommended (to be initiated only by an existing PMC member):\n Send an email titled [DISCUSS] Promote xxx as new committer to private@skywalking.a.o. List the important contributions of the candidate, so you could gather support from other PMC members for your proposal. Keep the discussion open for more than 3 days but no more than 1 week, unless there is any express objection or concern. If the PMC generally agrees to the proposal, send an email titled [VOTE] Promote xxx as new committer to private@skywalking.a.o. Keep the voting process open for more than 3 days, but no more than 1 week. Consider the result as Consensus Approval if there are three +1 votes and +1 votes \u0026gt; -1 votes. Send an email titled [RESULT][VOTE] Promote xxx as new committer to private@skywalking.a.o, and list the voting details, including who the voters are.  Invite new committer The PMC member who starts the promotion is responsible for sending an invitation to the new committer and guiding him/her to set up the ASF env.\nThe PMC member should send an email using the following template to the new committer:\nTo: JoeBloggs@foo.net Cc: private@skywalking.apache.org Subject: Invitation to become SkyWalking committer: Joe Bloggs Hello [invitee name], The SkyWalking Project Management Committee] (PMC) hereby offers you committer privileges to the project. These privileges are offered on the understanding that you'll use them reasonably and with common sense. We like to work on trust rather than unnecessary constraints. Being a committer enables you to more easily make changes without needing to go through the patch submission process. Being a committer does not require you to participate any more than you already do. It does tend to make one even more committed. You will probably find that you spend more time here. Of course, you can decline and instead remain as a contributor, participating as you do now. A. This personal invitation is a chance for you to accept or decline in private. Either way, please let us know in reply to the [private@skywalking.apache.org] address only. B. If you accept, the next step is to register an iCLA: 1. Details of the iCLA and the forms are found through this link: http://www.apache.org/licenses/#clas 2. Instructions for its completion and return to the Secretary of the ASF are found at http://www.apache.org/licenses/#submitting 3. When you transmit the completed iCLA, request to notify the Apache SkyWalking and choose a unique Apache id. Look to see if your preferred id is already taken at http://people.apache.org/committer-index.html This will allow the Secretary to notify the PMC when your iCLA has been recorded. When recording of your iCLA is noticed, you will receive a follow-up message with the next steps for establishing you as a committer. Invitation acceptance process The new committer should reply to private@skywalking.apache.org (choose reply all), and express his/her intention to accept the invitation. Then, this invitation will be treated as accepted by the project\u0026rsquo;s PMC. Of course, the new committer may also choose to decline the invitation.\nOnce the invitation has been accepted, the new committer has to take the following steps:\n Subscribe to dev@skywalking.apache.org. Usually this is already done. Choose a Apache ID that is not on the apache committers list page. Download the ICLA (If the new committer contributes to the project as a day job, CCLA is expected). After filling in the icla.pdf (or ccla.pdf) with the correct information, print, sign it by hand, scan it as an PDF, and send it as an attachment to secretary@apache.org. (If electronic signature is preferred, please follow the steps on this page) The PMC will wait for the Apache secretary to confirm the ICLA (or CCLA) filed. The new committer and PMC will receive the following email:  Dear XXX, This message acknowledges receipt of your ICLA, which has been filed in the Apache Software Foundation records. Your account has been requested for you and you should receive email with next steps within the next few days (can take up to a week). Please refer to https://www.apache.org/foundation/how-it-works.html#developers for more information about roles at Apache. In the unlikely event that the account has not yet been requested, the PMC member should contact the project V.P.. The V.P. could request through the Apache Account Submission Helper Form.\nAfter several days, the new committer will receive an email confirming creation of the account, titled Welcome to the Apache Software Foundation (ASF)!. Congratulations! The new committer now has an official Apache ID.\nThe PMC member should add the new committer to the official committer list through roster.\nSet up the Apache ID and dev env  Go to Apache Account Utility Platform, create your password, set up your personal mailbox (Forwarding email address) and GitHub account(Your GitHub Username). An organizational invite will be sent to you via email shortly thereafter (within 2 hours). If you would like to use the xxx@apache.org email service, please refer to here. Gmail is recommended, because this forwarding mode is not easy to find in most mailbox service settings. Follow the authorized GitHub 2FA wiki to enable two-factor authorization (2FA) on Github. When you set 2FA to \u0026ldquo;off\u0026rdquo;, it will be delisted by the corresponding Apache committer write permission group until you set it up again. (NOTE: Treat your recovery codes with the same level of attention as you would your password!) Use GitBox Account Linking Utility to obtain write permission of the SkyWalking project. Follow this doc to update the website.  If you would like to show up publicly in the Apache GitHub org, you need to go to the Apache GitHub org people page, search for yourself, and choose Organization visibility to Public.\nCommitter rights, duties, and responsibilities The SkyWalking project doesn\u0026rsquo;t require continuing contributions from you after you have become a committer, but we truly hope that you will continue to play a part in our community!\nAs a committer, you could\n Review and merge the pull request to the master branch in the Apache repo. A pull request often contains multiple commits. Those commits must be squashed and merged into a single commit with explanatory comments. It is recommended for new committers to request recheck of the pull request from senior committers. Create and push codes to the new branch in the Apache repo. Follow the release process to prepare a new release. Remember to confirm with the committer team that it is the right time to create the release.  The PMC hopes that the new committer will take part in the release process as well as release voting, even though their vote will be regarded as +1 no binding. Being familiar with the release process is key to being promoted to the role of PMC member.\nProject Management Committee The Project Management Committee (PMC) member does not have any special rights in code contributions. They simply oversee the project and make sure that it follows the Apache requirements. Its functions include:\n Binding voting for releases and license checks; New committer and PMC member recognition; Identification of branding issues and brand protection; and Responding to questions raised by the ASF board, and taking necessary actions.  The V.P. and chair of the PMC is the secretary, who is responsible for initializing the board report.\nIn most cases, a new PMC member is nominated from the committer team. But it is also possible to become a PMC member directly, so long as the PMC agrees to the nomination and is confident that the candidate is ready. For instance, this can be demonstrated by the fact that he/she has been an Apache member, an Apache officer, or a PMC member of another project.\nThe new PMC voting process should also follow the [DISCUSS], [VOTE] and [RESULT][VOTE] procedures using a private mail list, just like the voting process for new committers. Before sending the invitation, the PMC must also send a NOTICE mail to the Apache board.\nTo: board@apache.org Cc: private@skywalking.apache.org Subject: [NOTICE] Jane Doe for SkyWalking PMC SkyWalking proposes to invite Jane Doe (janedoe) to join the PMC. (include if a vote was held) The vote result is available here: https://lists.apache.org/... After 72 hours, if the board doesn\u0026rsquo;t object to the nomination (which it won\u0026rsquo;t most cases), an invitation may then be sent to the candidate.\nOnce the invitation is accepted, a PMC member should add the new member to the official PMC list through roster.\n","excerpt":"Apache SkyWalking committer SkyWalking Project Management Committee (PMC) is responsible for …","ref":"/docs/main/latest/en/guides/asf/committer/","title":"Apache SkyWalking committer"},{"body":"Apache SkyWalking committer SkyWalking Project Management Committee (PMC) is responsible for assessing the contributions of candidates.\nLike many Apache projects, SkyWalking welcome all contributions, including code contributions, blog entries, guides for new users, public speeches, and enhancement of the project in various ways.\nCommitter Nominate new committer In SkyWalking, new committer nomination could only be officially started by existing PMC members. If a new committer feels that he/she is qualified, he/she should contact any existing PMC member and discuss. If this is agreed among some members of the PMC, the process will kick off.\nThe following steps are recommended (to be initiated only by an existing PMC member):\n Send an email titled [DISCUSS] Promote xxx as new committer to private@skywalking.a.o. List the important contributions of the candidate, so you could gather support from other PMC members for your proposal. Keep the discussion open for more than 3 days but no more than 1 week, unless there is any express objection or concern. If the PMC generally agrees to the proposal, send an email titled [VOTE] Promote xxx as new committer to private@skywalking.a.o. Keep the voting process open for more than 3 days, but no more than 1 week. Consider the result as Consensus Approval if there are three +1 votes and +1 votes \u0026gt; -1 votes. Send an email titled [RESULT][VOTE] Promote xxx as new committer to private@skywalking.a.o, and list the voting details, including who the voters are.  Invite new committer The PMC member who starts the promotion is responsible for sending an invitation to the new committer and guiding him/her to set up the ASF env.\nThe PMC member should send an email using the following template to the new committer:\nTo: JoeBloggs@foo.net Cc: private@skywalking.apache.org Subject: Invitation to become SkyWalking committer: Joe Bloggs Hello [invitee name], The SkyWalking Project Management Committee] (PMC) hereby offers you committer privileges to the project. These privileges are offered on the understanding that you'll use them reasonably and with common sense. We like to work on trust rather than unnecessary constraints. Being a committer enables you to more easily make changes without needing to go through the patch submission process. Being a committer does not require you to participate any more than you already do. It does tend to make one even more committed. You will probably find that you spend more time here. Of course, you can decline and instead remain as a contributor, participating as you do now. A. This personal invitation is a chance for you to accept or decline in private. Either way, please let us know in reply to the [private@skywalking.apache.org] address only. B. If you accept, the next step is to register an iCLA: 1. Details of the iCLA and the forms are found through this link: http://www.apache.org/licenses/#clas 2. Instructions for its completion and return to the Secretary of the ASF are found at http://www.apache.org/licenses/#submitting 3. When you transmit the completed iCLA, request to notify the Apache SkyWalking and choose a unique Apache id. Look to see if your preferred id is already taken at http://people.apache.org/committer-index.html This will allow the Secretary to notify the PMC when your iCLA has been recorded. When recording of your iCLA is noticed, you will receive a follow-up message with the next steps for establishing you as a committer. Invitation acceptance process The new committer should reply to private@skywalking.apache.org (choose reply all), and express his/her intention to accept the invitation. Then, this invitation will be treated as accepted by the project\u0026rsquo;s PMC. Of course, the new committer may also choose to decline the invitation.\nOnce the invitation has been accepted, the new committer has to take the following steps:\n Subscribe to dev@skywalking.apache.org. Usually this is already done. Choose a Apache ID that is not on the apache committers list page. Download the ICLA (If the new committer contributes to the project as a day job, CCLA is expected). After filling in the icla.pdf (or ccla.pdf) with the correct information, print, sign it by hand, scan it as an PDF, and send it as an attachment to secretary@apache.org. (If electronic signature is preferred, please follow the steps on this page) The PMC will wait for the Apache secretary to confirm the ICLA (or CCLA) filed. The new committer and PMC will receive the following email:  Dear XXX, This message acknowledges receipt of your ICLA, which has been filed in the Apache Software Foundation records. Your account has been requested for you and you should receive email with next steps within the next few days (can take up to a week). Please refer to https://www.apache.org/foundation/how-it-works.html#developers for more information about roles at Apache. In the unlikely event that the account has not yet been requested, the PMC member should contact the project V.P.. The V.P. could request through the Apache Account Submission Helper Form.\nAfter several days, the new committer will receive an email confirming creation of the account, titled Welcome to the Apache Software Foundation (ASF)!. Congratulations! The new committer now has an official Apache ID.\nThe PMC member should add the new committer to the official committer list through roster.\nSet up the Apache ID and dev env  Go to Apache Account Utility Platform, create your password, set up your personal mailbox (Forwarding email address) and GitHub account(Your GitHub Username). An organizational invite will be sent to you via email shortly thereafter (within 2 hours). If you would like to use the xxx@apache.org email service, please refer to here. Gmail is recommended, because this forwarding mode is not easy to find in most mailbox service settings. Follow the authorized GitHub 2FA wiki to enable two-factor authorization (2FA) on Github. When you set 2FA to \u0026ldquo;off\u0026rdquo;, it will be delisted by the corresponding Apache committer write permission group until you set it up again. (NOTE: Treat your recovery codes with the same level of attention as you would your password!) Use GitBox Account Linking Utility to obtain write permission of the SkyWalking project. Follow this doc to update the website.  If you would like to show up publicly in the Apache GitHub org, you need to go to the Apache GitHub org people page, search for yourself, and choose Organization visibility to Public.\nCommitter rights, duties, and responsibilities The SkyWalking project doesn\u0026rsquo;t require continuing contributions from you after you have become a committer, but we truly hope that you will continue to play a part in our community!\nAs a committer, you could\n Review and merge the pull request to the master branch in the Apache repo. A pull request often contains multiple commits. Those commits must be squashed and merged into a single commit with explanatory comments. It is recommended for new committers to request recheck of the pull request from senior committers. Create and push codes to the new branch in the Apache repo. Follow the release process to prepare a new release. Remember to confirm with the committer team that it is the right time to create the release.  The PMC hopes that the new committer will take part in the release process as well as release voting, even though their vote will be regarded as +1 no binding. Being familiar with the release process is key to being promoted to the role of PMC member.\nProject Management Committee The Project Management Committee (PMC) member does not have any special rights in code contributions. They simply oversee the project and make sure that it follows the Apache requirements. Its functions include:\n Binding voting for releases and license checks; New committer and PMC member recognition; Identification of branding issues and brand protection; and Responding to questions raised by the ASF board, and taking necessary actions.  The V.P. and chair of the PMC is the secretary, who is responsible for initializing the board report.\nIn most cases, a new PMC member is nominated from the committer team. But it is also possible to become a PMC member directly, so long as the PMC agrees to the nomination and is confident that the candidate is ready. For instance, this can be demonstrated by the fact that he/she has been an Apache member, an Apache officer, or a PMC member of another project.\nThe new PMC voting process should also follow the [DISCUSS], [VOTE] and [RESULT][VOTE] procedures using a private mail list, just like the voting process for new committers. Before sending the invitation, the PMC must also send a NOTICE mail to the Apache board.\nTo: board@apache.org Cc: private@skywalking.apache.org Subject: [NOTICE] Jane Doe for SkyWalking PMC SkyWalking proposes to invite Jane Doe (janedoe) to join the PMC. (include if a vote was held) The vote result is available here: https://lists.apache.org/... After 72 hours, if the board doesn\u0026rsquo;t object to the nomination (which it won\u0026rsquo;t most cases), an invitation may then be sent to the candidate.\nOnce the invitation is accepted, a PMC member should add the new member to the official PMC list through roster.\n","excerpt":"Apache SkyWalking committer SkyWalking Project Management Committee (PMC) is responsible for …","ref":"/docs/main/next/en/guides/asf/committer/","title":"Apache SkyWalking committer"},{"body":"Apache SkyWalking committer SkyWalking Project Management Committee (PMC) is responsible for assessing the contributions of candidates.\nLike many Apache projects, SkyWalking welcome all contributions, including code contributions, blog entries, guides for new users, public speeches, and enhancement of the project in various ways.\nCommitter Nominate new committer In SkyWalking, new committer nomination could only be officially started by existing PMC members. If a new committer feels that he/she is qualified, he/she should contact any existing PMC member and discuss. If this is agreed among some members of the PMC, the process will kick off.\nThe following steps are recommended (to be initiated only by an existing PMC member):\n Send an email titled [DISCUSS] Promote xxx as new committer to private@skywalking.a.o. List the important contributions of the candidate, so you could gather support from other PMC members for your proposal. Keep the discussion open for more than 3 days but no more than 1 week, unless there is any express objection or concern. If the PMC generally agrees to the proposal, send an email titled [VOTE] Promote xxx as new committer to private@skywalking.a.o. Keep the voting process open for more than 3 days, but no more than 1 week. Consider the result as Consensus Approval if there are three +1 votes and +1 votes \u0026gt; -1 votes. Send an email titled [RESULT][VOTE] Promote xxx as new committer to private@skywalking.a.o, and list the voting details, including who the voters are.  Invite new committer The PMC member who starts the promotion is responsible for sending an invitation to the new committer and guiding him/her to set up the ASF env.\nThe PMC member should send an email using the following template to the new committer:\nTo: JoeBloggs@foo.net Cc: private@skywalking.apache.org Subject: Invitation to become SkyWalking committer: Joe Bloggs Hello [invitee name], The SkyWalking Project Management Committee] (PMC) hereby offers you committer privileges to the project. These privileges are offered on the understanding that you'll use them reasonably and with common sense. We like to work on trust rather than unnecessary constraints. Being a committer enables you to more easily make changes without needing to go through the patch submission process. Being a committer does not require you to participate any more than you already do. It does tend to make one even more committed. You will probably find that you spend more time here. Of course, you can decline and instead remain as a contributor, participating as you do now. A. This personal invitation is a chance for you to accept or decline in private. Either way, please let us know in reply to the [private@skywalking.apache.org] address only. B. If you accept, the next step is to register an iCLA: 1. Details of the iCLA and the forms are found through this link: http://www.apache.org/licenses/#clas 2. Instructions for its completion and return to the Secretary of the ASF are found at http://www.apache.org/licenses/#submitting 3. When you transmit the completed iCLA, request to notify the Apache SkyWalking and choose a unique Apache id. Look to see if your preferred id is already taken at http://people.apache.org/committer-index.html This will allow the Secretary to notify the PMC when your iCLA has been recorded. When recording of your iCLA is noticed, you will receive a follow-up message with the next steps for establishing you as a committer. Invitation acceptance process The new committer should reply to private@skywalking.apache.org (choose reply all), and express his/her intention to accept the invitation. Then, this invitation will be treated as accepted by the project\u0026rsquo;s PMC. Of course, the new committer may also choose to decline the invitation.\nOnce the invitation has been accepted, the new committer has to take the following steps:\n Subscribe to dev@skywalking.apache.org. Usually this is already done. Choose a Apache ID that is not on the apache committers list page. Download the ICLA (If the new committer contributes to the project as a day job, CCLA is expected). After filling in the icla.pdf (or ccla.pdf) with the correct information, print, sign it by hand, scan it as an PDF, and send it as an attachment to secretary@apache.org. (If electronic signature is preferred, please follow the steps on this page) The PMC will wait for the Apache secretary to confirm the ICLA (or CCLA) filed. The new committer and PMC will receive the following email:  Dear XXX, This message acknowledges receipt of your ICLA, which has been filed in the Apache Software Foundation records. Your account has been requested for you and you should receive email with next steps within the next few days (can take up to a week). Please refer to https://www.apache.org/foundation/how-it-works.html#developers for more information about roles at Apache. In the unlikely event that the account has not yet been requested, the PMC member should contact the project V.P.. The V.P. could request through the Apache Account Submission Helper Form.\nAfter several days, the new committer will receive an email confirming creation of the account, titled Welcome to the Apache Software Foundation (ASF)!. Congratulations! The new committer now has an official Apache ID.\nThe PMC member should add the new committer to the official committer list through roster.\nSet up the Apache ID and dev env  Go to Apache Account Utility Platform, create your password, set up your personal mailbox (Forwarding email address) and GitHub account(Your GitHub Username). An organizational invite will be sent to you via email shortly thereafter (within 2 hours). If you would like to use the xxx@apache.org email service, please refer to here. Gmail is recommended, because this forwarding mode is not easy to find in most mailbox service settings. Follow the authorized GitHub 2FA wiki to enable two-factor authorization (2FA) on Github. When you set 2FA to \u0026ldquo;off\u0026rdquo;, it will be delisted by the corresponding Apache committer write permission group until you set it up again. (NOTE: Treat your recovery codes with the same level of attention as you would your password!) Use GitBox Account Linking Utility to obtain write permission of the SkyWalking project. Follow this doc to update the website.  If you would like to show up publicly in the Apache GitHub org, you need to go to the Apache GitHub org people page, search for yourself, and choose Organization visibility to Public.\nCommitter rights, duties, and responsibilities The SkyWalking project doesn\u0026rsquo;t require continuing contributions from you after you have become a committer, but we truly hope that you will continue to play a part in our community!\nAs a committer, you could\n Review and merge the pull request to the master branch in the Apache repo. A pull request often contains multiple commits. Those commits must be squashed and merged into a single commit with explanatory comments. It is recommended for new committers to request recheck of the pull request from senior committers. Create and push codes to the new branch in the Apache repo. Follow the release process to prepare a new release. Remember to confirm with the committer team that it is the right time to create the release.  The PMC hopes that the new committer will take part in the release process as well as release voting, even though their vote will be regarded as +1 no binding. Being familiar with the release process is key to being promoted to the role of PMC member.\nProject Management Committee The Project Management Committee (PMC) member does not have any special rights in code contributions. They simply oversee the project and make sure that it follows the Apache requirements. Its functions include:\n Binding voting for releases and license checks; New committer and PMC member recognition; Identification of branding issues and brand protection; and Responding to questions raised by the ASF board, and taking necessary actions.  The V.P. and chair of the PMC is the secretary, who is responsible for initializing the board report.\nIn most cases, a new PMC member is nominated from the committer team. But it is also possible to become a PMC member directly, so long as the PMC agrees to the nomination and is confident that the candidate is ready. For instance, this can be demonstrated by the fact that he/she has been an Apache member, an Apache officer, or a PMC member of another project.\nThe new PMC voting process should also follow the [DISCUSS], [VOTE] and [RESULT][VOTE] procedures using a private mail list, just like the voting process for new committers. Before sending the invitation, the PMC must also send a NOTICE mail to the Apache board.\nTo: board@apache.org Cc: private@skywalking.apache.org Subject: [NOTICE] Jane Doe for SkyWalking PMC SkyWalking proposes to invite Jane Doe (janedoe) to join the PMC. (include if a vote was held) The vote result is available here: https://lists.apache.org/... After 72 hours, if the board doesn\u0026rsquo;t object to the nomination (which it won\u0026rsquo;t most cases), an invitation may then be sent to the candidate.\nOnce the invitation is accepted, a PMC member should add the new member to the official PMC list through roster.\n","excerpt":"Apache SkyWalking committer SkyWalking Project Management Committee (PMC) is responsible for …","ref":"/docs/main/v9.0.0/en/guides/asf/committer/","title":"Apache SkyWalking committer"},{"body":"Apache SkyWalking committer SkyWalking Project Management Committee (PMC) is responsible for assessing the contributions of candidates.\nLike many Apache projects, SkyWalking welcome all contributions, including code contributions, blog entries, guides for new users, public speeches, and enhancement of the project in various ways.\nCommitter Nominate new committer In SkyWalking, new committer nomination could only be officially started by existing PMC members. If a new committer feels that he/she is qualified, he/she should contact any existing PMC member and discuss. If this is agreed among some members of the PMC, the process will kick off.\nThe following steps are recommended (to be initiated only by an existing PMC member):\n Send an email titled [DISCUSS] Promote xxx as new committer to private@skywalking.a.o. List the important contributions of the candidate, so you could gather support from other PMC members for your proposal. Keep the discussion open for more than 3 days but no more than 1 week, unless there is any express objection or concern. If the PMC generally agrees to the proposal, send an email titled [VOTE] Promote xxx as new committer to private@skywalking.a.o. Keep the voting process open for more than 3 days, but no more than 1 week. Consider the result as Consensus Approval if there are three +1 votes and +1 votes \u0026gt; -1 votes. Send an email titled [RESULT][VOTE] Promote xxx as new committer to private@skywalking.a.o, and list the voting details, including who the voters are.  Invite new committer The PMC member who starts the promotion is responsible for sending an invitation to the new committer and guiding him/her to set up the ASF env.\nThe PMC member should send an email using the following template to the new committer:\nTo: JoeBloggs@foo.net Cc: private@skywalking.apache.org Subject: Invitation to become SkyWalking committer: Joe Bloggs Hello [invitee name], The SkyWalking Project Management Committee] (PMC) hereby offers you committer privileges to the project. These privileges are offered on the understanding that you'll use them reasonably and with common sense. We like to work on trust rather than unnecessary constraints. Being a committer enables you to more easily make changes without needing to go through the patch submission process. Being a committer does not require you to participate any more than you already do. It does tend to make one even more committed. You will probably find that you spend more time here. Of course, you can decline and instead remain as a contributor, participating as you do now. A. This personal invitation is a chance for you to accept or decline in private. Either way, please let us know in reply to the [private@skywalking.apache.org] address only. B. If you accept, the next step is to register an iCLA: 1. Details of the iCLA and the forms are found through this link: http://www.apache.org/licenses/#clas 2. Instructions for its completion and return to the Secretary of the ASF are found at http://www.apache.org/licenses/#submitting 3. When you transmit the completed iCLA, request to notify the Apache SkyWalking and choose a unique Apache id. Look to see if your preferred id is already taken at http://people.apache.org/committer-index.html This will allow the Secretary to notify the PMC when your iCLA has been recorded. When recording of your iCLA is noticed, you will receive a follow-up message with the next steps for establishing you as a committer. Invitation acceptance process The new committer should reply to private@skywalking.apache.org (choose reply all), and express his/her intention to accept the invitation. Then, this invitation will be treated as accepted by the project\u0026rsquo;s PMC. Of course, the new committer may also choose to decline the invitation.\nOnce the invitation has been accepted, the new committer has to take the following steps:\n Subscribe to dev@skywalking.apache.org. Usually this is already done. Choose a Apache ID that is not on the apache committers list page. Download the ICLA (If the new committer contributes to the project as a day job, CCLA is expected). After filling in the icla.pdf (or ccla.pdf) with the correct information, print, sign it by hand, scan it as an PDF, and send it as an attachment to secretary@apache.org. (If electronic signature is preferred, please follow the steps on this page) The PMC will wait for the Apache secretary to confirm the ICLA (or CCLA) filed. The new committer and PMC will receive the following email:  Dear XXX, This message acknowledges receipt of your ICLA, which has been filed in the Apache Software Foundation records. Your account has been requested for you and you should receive email with next steps within the next few days (can take up to a week). Please refer to https://www.apache.org/foundation/how-it-works.html#developers for more information about roles at Apache. In the unlikely event that the account has not yet been requested, the PMC member should contact the project V.P.. The V.P. could request through the Apache Account Submission Helper Form.\nAfter several days, the new committer will receive an email confirming creation of the account, titled Welcome to the Apache Software Foundation (ASF)!. Congratulations! The new committer now has an official Apache ID.\nThe PMC member should add the new committer to the official committer list through roster.\nSet up the Apache ID and dev env  Go to Apache Account Utility Platform, create your password, set up your personal mailbox (Forwarding email address) and GitHub account(Your GitHub Username). An organizational invite will be sent to you via email shortly thereafter (within 2 hours). If you would like to use the xxx@apache.org email service, please refer to here. Gmail is recommended, because this forwarding mode is not easy to find in most mailbox service settings. Follow the authorized GitHub 2FA wiki to enable two-factor authorization (2FA) on Github. When you set 2FA to \u0026ldquo;off\u0026rdquo;, it will be delisted by the corresponding Apache committer write permission group until you set it up again. (NOTE: Treat your recovery codes with the same level of attention as you would your password!) Use GitBox Account Linking Utility to obtain write permission of the SkyWalking project. Follow this doc to update the website.  If you would like to show up publicly in the Apache GitHub org, you need to go to the Apache GitHub org people page, search for yourself, and choose Organization visibility to Public.\nCommitter rights, duties, and responsibilities The SkyWalking project doesn\u0026rsquo;t require continuing contributions from you after you have become a committer, but we truly hope that you will continue to play a part in our community!\nAs a committer, you could\n Review and merge the pull request to the master branch in the Apache repo. A pull request often contains multiple commits. Those commits must be squashed and merged into a single commit with explanatory comments. It is recommended for new committers to request recheck of the pull request from senior committers. Create and push codes to the new branch in the Apache repo. Follow the release process to prepare a new release. Remember to confirm with the committer team that it is the right time to create the release.  The PMC hopes that the new committer will take part in the release process as well as release voting, even though their vote will be regarded as +1 no binding. Being familiar with the release process is key to being promoted to the role of PMC member.\nProject Management Committee The Project Management Committee (PMC) member does not have any special rights in code contributions. They simply oversee the project and make sure that it follows the Apache requirements. Its functions include:\n Binding voting for releases and license checks; New committer and PMC member recognition; Identification of branding issues and brand protection; and Responding to questions raised by the ASF board, and taking necessary actions.  The V.P. and chair of the PMC is the secretary, who is responsible for initializing the board report.\nIn most cases, a new PMC member is nominated from the committer team. But it is also possible to become a PMC member directly, so long as the PMC agrees to the nomination and is confident that the candidate is ready. For instance, this can be demonstrated by the fact that he/she has been an Apache member, an Apache officer, or a PMC member of another project.\nThe new PMC voting process should also follow the [DISCUSS], [VOTE] and [RESULT][VOTE] procedures using a private mail list, just like the voting process for new committers. Before sending the invitation, the PMC must also send a NOTICE mail to the Apache board.\nTo: board@apache.org Cc: private@skywalking.apache.org Subject: [NOTICE] Jane Doe for SkyWalking PMC SkyWalking proposes to invite Jane Doe (janedoe) to join the PMC. (include if a vote was held) The vote result is available here: https://lists.apache.org/... After 72 hours, if the board doesn\u0026rsquo;t object to the nomination (which it won\u0026rsquo;t most cases), an invitation may then be sent to the candidate.\nOnce the invitation is accepted, a PMC member should add the new member to the official PMC list through roster.\n","excerpt":"Apache SkyWalking committer SkyWalking Project Management Committee (PMC) is responsible for …","ref":"/docs/main/v9.1.0/en/guides/asf/committer/","title":"Apache SkyWalking committer"},{"body":"Apache SkyWalking committer SkyWalking Project Management Committee (PMC) is responsible for assessing the contributions of candidates.\nLike many Apache projects, SkyWalking welcome all contributions, including code contributions, blog entries, guides for new users, public speeches, and enhancement of the project in various ways.\nCommitter Nominate new committer In SkyWalking, new committer nomination could only be officially started by existing PMC members. If a new committer feels that he/she is qualified, he/she should contact any existing PMC member and discuss. If this is agreed among some members of the PMC, the process will kick off.\nThe following steps are recommended (to be initiated only by an existing PMC member):\n Send an email titled [DISCUSS] Promote xxx as new committer to private@skywalking.a.o. List the important contributions of the candidate, so you could gather support from other PMC members for your proposal. Keep the discussion open for more than 3 days but no more than 1 week, unless there is any express objection or concern. If the PMC generally agrees to the proposal, send an email titled [VOTE] Promote xxx as new committer to private@skywalking.a.o. Keep the voting process open for more than 3 days, but no more than 1 week. Consider the result as Consensus Approval if there are three +1 votes and +1 votes \u0026gt; -1 votes. Send an email titled [RESULT][VOTE] Promote xxx as new committer to private@skywalking.a.o, and list the voting details, including who the voters are.  Invite new committer The PMC member who starts the promotion is responsible for sending an invitation to the new committer and guiding him/her to set up the ASF env.\nThe PMC member should send an email using the following template to the new committer:\nTo: JoeBloggs@foo.net Cc: private@skywalking.apache.org Subject: Invitation to become SkyWalking committer: Joe Bloggs Hello [invitee name], The SkyWalking Project Management Committee] (PMC) hereby offers you committer privileges to the project. These privileges are offered on the understanding that you'll use them reasonably and with common sense. We like to work on trust rather than unnecessary constraints. Being a committer enables you to more easily make changes without needing to go through the patch submission process. Being a committer does not require you to participate any more than you already do. It does tend to make one even more committed. You will probably find that you spend more time here. Of course, you can decline and instead remain as a contributor, participating as you do now. A. This personal invitation is a chance for you to accept or decline in private. Either way, please let us know in reply to the [private@skywalking.apache.org] address only. B. If you accept, the next step is to register an iCLA: 1. Details of the iCLA and the forms are found through this link: http://www.apache.org/licenses/#clas 2. Instructions for its completion and return to the Secretary of the ASF are found at http://www.apache.org/licenses/#submitting 3. When you transmit the completed iCLA, request to notify the Apache SkyWalking and choose a unique Apache id. Look to see if your preferred id is already taken at http://people.apache.org/committer-index.html This will allow the Secretary to notify the PMC when your iCLA has been recorded. When recording of your iCLA is noticed, you will receive a follow-up message with the next steps for establishing you as a committer. Invitation acceptance process The new committer should reply to private@skywalking.apache.org (choose reply all), and express his/her intention to accept the invitation. Then, this invitation will be treated as accepted by the project\u0026rsquo;s PMC. Of course, the new committer may also choose to decline the invitation.\nOnce the invitation has been accepted, the new committer has to take the following steps:\n Subscribe to dev@skywalking.apache.org. Usually this is already done. Choose a Apache ID that is not on the apache committers list page. Download the ICLA (If the new committer contributes to the project as a day job, CCLA is expected). After filling in the icla.pdf (or ccla.pdf) with the correct information, print, sign it by hand, scan it as an PDF, and send it as an attachment to secretary@apache.org. (If electronic signature is preferred, please follow the steps on this page) The PMC will wait for the Apache secretary to confirm the ICLA (or CCLA) filed. The new committer and PMC will receive the following email:  Dear XXX, This message acknowledges receipt of your ICLA, which has been filed in the Apache Software Foundation records. Your account has been requested for you and you should receive email with next steps within the next few days (can take up to a week). Please refer to https://www.apache.org/foundation/how-it-works.html#developers for more information about roles at Apache. In the unlikely event that the account has not yet been requested, the PMC member should contact the project V.P.. The V.P. could request through the Apache Account Submission Helper Form.\nAfter several days, the new committer will receive an email confirming creation of the account, titled Welcome to the Apache Software Foundation (ASF)!. Congratulations! The new committer now has an official Apache ID.\nThe PMC member should add the new committer to the official committer list through roster.\nSet up the Apache ID and dev env  Go to Apache Account Utility Platform, create your password, set up your personal mailbox (Forwarding email address) and GitHub account(Your GitHub Username). An organizational invite will be sent to you via email shortly thereafter (within 2 hours). If you would like to use the xxx@apache.org email service, please refer to here. Gmail is recommended, because this forwarding mode is not easy to find in most mailbox service settings. Follow the authorized GitHub 2FA wiki to enable two-factor authorization (2FA) on Github. When you set 2FA to \u0026ldquo;off\u0026rdquo;, it will be delisted by the corresponding Apache committer write permission group until you set it up again. (NOTE: Treat your recovery codes with the same level of attention as you would your password!) Use GitBox Account Linking Utility to obtain write permission of the SkyWalking project. Follow this doc to update the website.  If you would like to show up publicly in the Apache GitHub org, you need to go to the Apache GitHub org people page, search for yourself, and choose Organization visibility to Public.\nCommitter rights, duties, and responsibilities The SkyWalking project doesn\u0026rsquo;t require continuing contributions from you after you have become a committer, but we truly hope that you will continue to play a part in our community!\nAs a committer, you could\n Review and merge the pull request to the master branch in the Apache repo. A pull request often contains multiple commits. Those commits must be squashed and merged into a single commit with explanatory comments. It is recommended for new committers to request recheck of the pull request from senior committers. Create and push codes to the new branch in the Apache repo. Follow the release process to prepare a new release. Remember to confirm with the committer team that it is the right time to create the release.  The PMC hopes that the new committer will take part in the release process as well as release voting, even though their vote will be regarded as +1 no binding. Being familiar with the release process is key to being promoted to the role of PMC member.\nProject Management Committee The Project Management Committee (PMC) member does not have any special rights in code contributions. They simply oversee the project and make sure that it follows the Apache requirements. Its functions include:\n Binding voting for releases and license checks; New committer and PMC member recognition; Identification of branding issues and brand protection; and Responding to questions raised by the ASF board, and taking necessary actions.  The V.P. and chair of the PMC is the secretary, who is responsible for initializing the board report.\nIn most cases, a new PMC member is nominated from the committer team. But it is also possible to become a PMC member directly, so long as the PMC agrees to the nomination and is confident that the candidate is ready. For instance, this can be demonstrated by the fact that he/she has been an Apache member, an Apache officer, or a PMC member of another project.\nThe new PMC voting process should also follow the [DISCUSS], [VOTE] and [RESULT][VOTE] procedures using a private mail list, just like the voting process for new committers. Before sending the invitation, the PMC must also send a NOTICE mail to the Apache board.\nTo: board@apache.org Cc: private@skywalking.apache.org Subject: [NOTICE] Jane Doe for SkyWalking PMC SkyWalking proposes to invite Jane Doe (janedoe) to join the PMC. (include if a vote was held) The vote result is available here: https://lists.apache.org/... After 72 hours, if the board doesn\u0026rsquo;t object to the nomination (which it won\u0026rsquo;t most cases), an invitation may then be sent to the candidate.\nOnce the invitation is accepted, a PMC member should add the new member to the official PMC list through roster.\n","excerpt":"Apache SkyWalking committer SkyWalking Project Management Committee (PMC) is responsible for …","ref":"/docs/main/v9.2.0/en/guides/asf/committer/","title":"Apache SkyWalking committer"},{"body":"Apache SkyWalking committer SkyWalking Project Management Committee (PMC) is responsible for assessing the contributions of candidates.\nLike many Apache projects, SkyWalking welcome all contributions, including code contributions, blog entries, guides for new users, public speeches, and enhancement of the project in various ways.\nCommitter Nominate new committer In SkyWalking, new committer nomination could only be officially started by existing PMC members. If a new committer feels that he/she is qualified, he/she should contact any existing PMC member and discuss. If this is agreed among some members of the PMC, the process will kick off.\nThe following steps are recommended (to be initiated only by an existing PMC member):\n Send an email titled [DISCUSS] Promote xxx as new committer to private@skywalking.a.o. List the important contributions of the candidate, so you could gather support from other PMC members for your proposal. Keep the discussion open for more than 3 days but no more than 1 week, unless there is any express objection or concern. If the PMC generally agrees to the proposal, send an email titled [VOTE] Promote xxx as new committer to private@skywalking.a.o. Keep the voting process open for more than 3 days, but no more than 1 week. Consider the result as Consensus Approval if there are three +1 votes and +1 votes \u0026gt; -1 votes. Send an email titled [RESULT][VOTE] Promote xxx as new committer to private@skywalking.a.o, and list the voting details, including who the voters are.  Invite new committer The PMC member who starts the promotion is responsible for sending an invitation to the new committer and guiding him/her to set up the ASF env.\nThe PMC member should send an email using the following template to the new committer:\nTo: JoeBloggs@foo.net Cc: private@skywalking.apache.org Subject: Invitation to become SkyWalking committer: Joe Bloggs Hello [invitee name], The SkyWalking Project Management Committee] (PMC) hereby offers you committer privileges to the project. These privileges are offered on the understanding that you'll use them reasonably and with common sense. We like to work on trust rather than unnecessary constraints. Being a committer enables you to more easily make changes without needing to go through the patch submission process. Being a committer does not require you to participate any more than you already do. It does tend to make one even more committed. You will probably find that you spend more time here. Of course, you can decline and instead remain as a contributor, participating as you do now. A. This personal invitation is a chance for you to accept or decline in private. Either way, please let us know in reply to the [private@skywalking.apache.org] address only. B. If you accept, the next step is to register an iCLA: 1. Details of the iCLA and the forms are found through this link: http://www.apache.org/licenses/#clas 2. Instructions for its completion and return to the Secretary of the ASF are found at http://www.apache.org/licenses/#submitting 3. When you transmit the completed iCLA, request to notify the Apache SkyWalking and choose a unique Apache id. Look to see if your preferred id is already taken at http://people.apache.org/committer-index.html This will allow the Secretary to notify the PMC when your iCLA has been recorded. When recording of your iCLA is noticed, you will receive a follow-up message with the next steps for establishing you as a committer. Invitation acceptance process The new committer should reply to private@skywalking.apache.org (choose reply all), and express his/her intention to accept the invitation. Then, this invitation will be treated as accepted by the project\u0026rsquo;s PMC. Of course, the new committer may also choose to decline the invitation.\nOnce the invitation has been accepted, the new committer has to take the following steps:\n Subscribe to dev@skywalking.apache.org. Usually this is already done. Choose a Apache ID that is not on the apache committers list page. Download the ICLA (If the new committer contributes to the project as a day job, CCLA is expected). After filling in the icla.pdf (or ccla.pdf) with the correct information, print, sign it by hand, scan it as an PDF, and send it as an attachment to secretary@apache.org. (If electronic signature is preferred, please follow the steps on this page) The PMC will wait for the Apache secretary to confirm the ICLA (or CCLA) filed. The new committer and PMC will receive the following email:  Dear XXX, This message acknowledges receipt of your ICLA, which has been filed in the Apache Software Foundation records. Your account has been requested for you and you should receive email with next steps within the next few days (can take up to a week). Please refer to https://www.apache.org/foundation/how-it-works.html#developers for more information about roles at Apache. In the unlikely event that the account has not yet been requested, the PMC member should contact the project V.P.. The V.P. could request through the Apache Account Submission Helper Form.\nAfter several days, the new committer will receive an email confirming creation of the account, titled Welcome to the Apache Software Foundation (ASF)!. Congratulations! The new committer now has an official Apache ID.\nThe PMC member should add the new committer to the official committer list through roster.\nSet up the Apache ID and dev env  Go to Apache Account Utility Platform, create your password, set up your personal mailbox (Forwarding email address) and GitHub account(Your GitHub Username). An organizational invite will be sent to you via email shortly thereafter (within 2 hours). If you would like to use the xxx@apache.org email service, please refer to here. Gmail is recommended, because this forwarding mode is not easy to find in most mailbox service settings. Follow the authorized GitHub 2FA wiki to enable two-factor authorization (2FA) on Github. When you set 2FA to \u0026ldquo;off\u0026rdquo;, it will be delisted by the corresponding Apache committer write permission group until you set it up again. (NOTE: Treat your recovery codes with the same level of attention as you would your password!) Use GitBox Account Linking Utility to obtain write permission of the SkyWalking project. Follow this doc to update the website.  If you would like to show up publicly in the Apache GitHub org, you need to go to the Apache GitHub org people page, search for yourself, and choose Organization visibility to Public.\nCommitter rights, duties, and responsibilities The SkyWalking project doesn\u0026rsquo;t require continuing contributions from you after you have become a committer, but we truly hope that you will continue to play a part in our community!\nAs a committer, you could\n Review and merge the pull request to the master branch in the Apache repo. A pull request often contains multiple commits. Those commits must be squashed and merged into a single commit with explanatory comments. It is recommended for new committers to request recheck of the pull request from senior committers. Create and push codes to the new branch in the Apache repo. Follow the release process to prepare a new release. Remember to confirm with the committer team that it is the right time to create the release.  The PMC hopes that the new committer will take part in the release process as well as release voting, even though their vote will be regarded as +1 no binding. Being familiar with the release process is key to being promoted to the role of PMC member.\nProject Management Committee The Project Management Committee (PMC) member does not have any special rights in code contributions. They simply oversee the project and make sure that it follows the Apache requirements. Its functions include:\n Binding voting for releases and license checks; New committer and PMC member recognition; Identification of branding issues and brand protection; and Responding to questions raised by the ASF board, and taking necessary actions.  The V.P. and chair of the PMC is the secretary, who is responsible for initializing the board report.\nIn most cases, a new PMC member is nominated from the committer team. But it is also possible to become a PMC member directly, so long as the PMC agrees to the nomination and is confident that the candidate is ready. For instance, this can be demonstrated by the fact that he/she has been an Apache member, an Apache officer, or a PMC member of another project.\nThe new PMC voting process should also follow the [DISCUSS], [VOTE] and [RESULT][VOTE] procedures using a private mail list, just like the voting process for new committers. Before sending the invitation, the PMC must also send a NOTICE mail to the Apache board.\nTo: board@apache.org Cc: private@skywalking.apache.org Subject: [NOTICE] Jane Doe for SkyWalking PMC SkyWalking proposes to invite Jane Doe (janedoe) to join the PMC. (include if a vote was held) The vote result is available here: https://lists.apache.org/... After 72 hours, if the board doesn\u0026rsquo;t object to the nomination (which it won\u0026rsquo;t most cases), an invitation may then be sent to the candidate.\nOnce the invitation is accepted, a PMC member should add the new member to the official PMC list through roster.\n","excerpt":"Apache SkyWalking committer SkyWalking Project Management Committee (PMC) is responsible for …","ref":"/docs/main/v9.3.0/en/guides/asf/committer/","title":"Apache SkyWalking committer"},{"body":"Apache SkyWalking committer SkyWalking Project Management Committee (PMC) is responsible for assessing the contributions of candidates.\nLike many Apache projects, SkyWalking welcome all contributions, including code contributions, blog entries, guides for new users, public speeches, and enhancement of the project in various ways.\nCommitter Nominate new committer In SkyWalking, new committer nomination could only be officially started by existing PMC members. If a new committer feels that he/she is qualified, he/she should contact any existing PMC member and discuss. If this is agreed among some members of the PMC, the process will kick off.\nThe following steps are recommended (to be initiated only by an existing PMC member):\n Send an email titled [DISCUSS] Promote xxx as new committer to private@skywalking.a.o. List the important contributions of the candidate, so you could gather support from other PMC members for your proposal. Keep the discussion open for more than 3 days but no more than 1 week, unless there is any express objection or concern. If the PMC generally agrees to the proposal, send an email titled [VOTE] Promote xxx as new committer to private@skywalking.a.o. Keep the voting process open for more than 3 days, but no more than 1 week. Consider the result as Consensus Approval if there are three +1 votes and +1 votes \u0026gt; -1 votes. Send an email titled [RESULT][VOTE] Promote xxx as new committer to private@skywalking.a.o, and list the voting details, including who the voters are.  Invite new committer The PMC member who starts the promotion is responsible for sending an invitation to the new committer and guiding him/her to set up the ASF env.\nThe PMC member should send an email using the following template to the new committer:\nTo: JoeBloggs@foo.net Cc: private@skywalking.apache.org Subject: Invitation to become SkyWalking committer: Joe Bloggs Hello [invitee name], The SkyWalking Project Management Committee] (PMC) hereby offers you committer privileges to the project. These privileges are offered on the understanding that you'll use them reasonably and with common sense. We like to work on trust rather than unnecessary constraints. Being a committer enables you to more easily make changes without needing to go through the patch submission process. Being a committer does not require you to participate any more than you already do. It does tend to make one even more committed. You will probably find that you spend more time here. Of course, you can decline and instead remain as a contributor, participating as you do now. A. This personal invitation is a chance for you to accept or decline in private. Either way, please let us know in reply to the [private@skywalking.apache.org] address only. B. If you accept, the next step is to register an iCLA: 1. Details of the iCLA and the forms are found through this link: http://www.apache.org/licenses/#clas 2. Instructions for its completion and return to the Secretary of the ASF are found at http://www.apache.org/licenses/#submitting 3. When you transmit the completed iCLA, request to notify the Apache SkyWalking and choose a unique Apache id. Look to see if your preferred id is already taken at http://people.apache.org/committer-index.html This will allow the Secretary to notify the PMC when your iCLA has been recorded. When recording of your iCLA is noticed, you will receive a follow-up message with the next steps for establishing you as a committer. Invitation acceptance process The new committer should reply to private@skywalking.apache.org (choose reply all), and express his/her intention to accept the invitation. Then, this invitation will be treated as accepted by the project\u0026rsquo;s PMC. Of course, the new committer may also choose to decline the invitation.\nOnce the invitation has been accepted, the new committer has to take the following steps:\n Subscribe to dev@skywalking.apache.org. Usually this is already done. Choose a Apache ID that is not on the apache committers list page. Download the ICLA (If the new committer contributes to the project as a day job, CCLA is expected). After filling in the icla.pdf (or ccla.pdf) with the correct information, print, sign it by hand, scan it as an PDF, and send it as an attachment to secretary@apache.org. (If electronic signature is preferred, please follow the steps on this page) The PMC will wait for the Apache secretary to confirm the ICLA (or CCLA) filed. The new committer and PMC will receive the following email:  Dear XXX, This message acknowledges receipt of your ICLA, which has been filed in the Apache Software Foundation records. Your account has been requested for you and you should receive email with next steps within the next few days (can take up to a week). Please refer to https://www.apache.org/foundation/how-it-works.html#developers for more information about roles at Apache. In the unlikely event that the account has not yet been requested, the PMC member should contact the project V.P.. The V.P. could request through the Apache Account Submission Helper Form.\nAfter several days, the new committer will receive an email confirming creation of the account, titled Welcome to the Apache Software Foundation (ASF)!. Congratulations! The new committer now has an official Apache ID.\nThe PMC member should add the new committer to the official committer list through roster.\nSet up the Apache ID and dev env  Go to Apache Account Utility Platform, create your password, set up your personal mailbox (Forwarding email address) and GitHub account(Your GitHub Username). An organizational invite will be sent to you via email shortly thereafter (within 2 hours). If you would like to use the xxx@apache.org email service, please refer to here. Gmail is recommended, because this forwarding mode is not easy to find in most mailbox service settings. Follow the authorized GitHub 2FA wiki to enable two-factor authorization (2FA) on Github. When you set 2FA to \u0026ldquo;off\u0026rdquo;, it will be delisted by the corresponding Apache committer write permission group until you set it up again. (NOTE: Treat your recovery codes with the same level of attention as you would your password!) Use GitBox Account Linking Utility to obtain write permission of the SkyWalking project. Follow this doc to update the website.  If you would like to show up publicly in the Apache GitHub org, you need to go to the Apache GitHub org people page, search for yourself, and choose Organization visibility to Public.\nCommitter rights, duties, and responsibilities The SkyWalking project doesn\u0026rsquo;t require continuing contributions from you after you have become a committer, but we truly hope that you will continue to play a part in our community!\nAs a committer, you could\n Review and merge the pull request to the master branch in the Apache repo. A pull request often contains multiple commits. Those commits must be squashed and merged into a single commit with explanatory comments. It is recommended for new committers to request recheck of the pull request from senior committers. Create and push codes to the new branch in the Apache repo. Follow the release process to prepare a new release. Remember to confirm with the committer team that it is the right time to create the release.  The PMC hopes that the new committer will take part in the release process as well as release voting, even though their vote will be regarded as +1 no binding. Being familiar with the release process is key to being promoted to the role of PMC member.\nProject Management Committee The Project Management Committee (PMC) member does not have any special rights in code contributions. They simply oversee the project and make sure that it follows the Apache requirements. Its functions include:\n Binding voting for releases and license checks; New committer and PMC member recognition; Identification of branding issues and brand protection; and Responding to questions raised by the ASF board, and taking necessary actions.  The V.P. and chair of the PMC is the secretary, who is responsible for initializing the board report.\nIn most cases, a new PMC member is nominated from the committer team. But it is also possible to become a PMC member directly, so long as the PMC agrees to the nomination and is confident that the candidate is ready. For instance, this can be demonstrated by the fact that he/she has been an Apache member, an Apache officer, or a PMC member of another project.\nThe new PMC voting process should also follow the [DISCUSS], [VOTE] and [RESULT][VOTE] procedures using a private mail list, just like the voting process for new committers. Before sending the invitation, the PMC must also send a NOTICE mail to the Apache board.\nTo: board@apache.org Cc: private@skywalking.apache.org Subject: [NOTICE] Jane Doe for SkyWalking PMC SkyWalking proposes to invite Jane Doe (janedoe) to join the PMC. (include if a vote was held) The vote result is available here: https://lists.apache.org/... After 72 hours, if the board doesn\u0026rsquo;t object to the nomination (which it won\u0026rsquo;t most cases), an invitation may then be sent to the candidate.\nOnce the invitation is accepted, a PMC member should add the new member to the official PMC list through roster.\n","excerpt":"Apache SkyWalking committer SkyWalking Project Management Committee (PMC) is responsible for …","ref":"/docs/main/v9.4.0/en/guides/asf/committer/","title":"Apache SkyWalking committer"},{"body":"Apache SkyWalking committer SkyWalking Project Management Committee (PMC) is responsible for assessing the contributions of candidates.\nLike many Apache projects, SkyWalking welcome all contributions, including code contributions, blog entries, guides for new users, public speeches, and enhancement of the project in various ways.\nCommitter Nominate new committer In SkyWalking, new committer nomination could only be officially started by existing PMC members. If a new committer feels that he/she is qualified, he/she should contact any existing PMC member and discuss. If this is agreed among some members of the PMC, the process will kick off.\nThe following steps are recommended (to be initiated only by an existing PMC member):\n Send an email titled [DISCUSS] Promote xxx as new committer to private@skywalking.a.o. List the important contributions of the candidate, so you could gather support from other PMC members for your proposal. Keep the discussion open for more than 3 days but no more than 1 week, unless there is any express objection or concern. If the PMC generally agrees to the proposal, send an email titled [VOTE] Promote xxx as new committer to private@skywalking.a.o. Keep the voting process open for more than 3 days, but no more than 1 week. Consider the result as Consensus Approval if there are three +1 votes and +1 votes \u0026gt; -1 votes. Send an email titled [RESULT][VOTE] Promote xxx as new committer to private@skywalking.a.o, and list the voting details, including who the voters are.  Invite new committer The PMC member who starts the promotion is responsible for sending an invitation to the new committer and guiding him/her to set up the ASF env.\nThe PMC member should send an email using the following template to the new committer:\nTo: JoeBloggs@foo.net Cc: private@skywalking.apache.org Subject: Invitation to become SkyWalking committer: Joe Bloggs Hello [invitee name], The SkyWalking Project Management Committee] (PMC) hereby offers you committer privileges to the project. These privileges are offered on the understanding that you'll use them reasonably and with common sense. We like to work on trust rather than unnecessary constraints. Being a committer enables you to more easily make changes without needing to go through the patch submission process. Being a committer does not require you to participate any more than you already do. It does tend to make one even more committed. You will probably find that you spend more time here. Of course, you can decline and instead remain as a contributor, participating as you do now. A. This personal invitation is a chance for you to accept or decline in private. Either way, please let us know in reply to the [private@skywalking.apache.org] address only. B. If you accept, the next step is to register an iCLA: 1. Details of the iCLA and the forms are found through this link: http://www.apache.org/licenses/#clas 2. Instructions for its completion and return to the Secretary of the ASF are found at http://www.apache.org/licenses/#submitting 3. When you transmit the completed iCLA, request to notify the Apache SkyWalking and choose a unique Apache id. Look to see if your preferred id is already taken at http://people.apache.org/committer-index.html This will allow the Secretary to notify the PMC when your iCLA has been recorded. When recording of your iCLA is noticed, you will receive a follow-up message with the next steps for establishing you as a committer. Invitation acceptance process The new committer should reply to private@skywalking.apache.org (choose reply all), and express his/her intention to accept the invitation. Then, this invitation will be treated as accepted by the project\u0026rsquo;s PMC. Of course, the new committer may also choose to decline the invitation.\nOnce the invitation has been accepted, the new committer has to take the following steps:\n Subscribe to dev@skywalking.apache.org. Usually this is already done. Choose a Apache ID that is not on the apache committers list page. Download the ICLA (If the new committer contributes to the project as a day job, CCLA is expected). After filling in the icla.pdf (or ccla.pdf) with the correct information, print, sign it by hand, scan it as an PDF, and send it as an attachment to secretary@apache.org. (If electronic signature is preferred, please follow the steps on this page) The PMC will wait for the Apache secretary to confirm the ICLA (or CCLA) filed. The new committer and PMC will receive the following email:  Dear XXX, This message acknowledges receipt of your ICLA, which has been filed in the Apache Software Foundation records. Your account has been requested for you and you should receive email with next steps within the next few days (can take up to a week). Please refer to https://www.apache.org/foundation/how-it-works.html#developers for more information about roles at Apache. In the unlikely event that the account has not yet been requested, the PMC member should contact the project V.P.. The V.P. could request through the Apache Account Submission Helper Form.\nAfter several days, the new committer will receive an email confirming creation of the account, titled Welcome to the Apache Software Foundation (ASF)!. Congratulations! The new committer now has an official Apache ID.\nThe PMC member should add the new committer to the official committer list through roster.\nSet up the Apache ID and dev env  Go to Apache Account Utility Platform, create your password, set up your personal mailbox (Forwarding email address) and GitHub account(Your GitHub Username). An organizational invite will be sent to you via email shortly thereafter (within 2 hours). If you would like to use the xxx@apache.org email service, please refer to here. Gmail is recommended, because this forwarding mode is not easy to find in most mailbox service settings. Follow the authorized GitHub 2FA wiki to enable two-factor authorization (2FA) on Github. When you set 2FA to \u0026ldquo;off\u0026rdquo;, it will be delisted by the corresponding Apache committer write permission group until you set it up again. (NOTE: Treat your recovery codes with the same level of attention as you would your password!) Use GitBox Account Linking Utility to obtain write permission of the SkyWalking project. Follow this doc to update the website.  If you would like to show up publicly in the Apache GitHub org, you need to go to the Apache GitHub org people page, search for yourself, and choose Organization visibility to Public.\nCommitter rights, duties, and responsibilities The SkyWalking project doesn\u0026rsquo;t require continuing contributions from you after you have become a committer, but we truly hope that you will continue to play a part in our community!\nAs a committer, you could\n Review and merge the pull request to the master branch in the Apache repo. A pull request often contains multiple commits. Those commits must be squashed and merged into a single commit with explanatory comments. It is recommended for new committers to request recheck of the pull request from senior committers. Create and push codes to the new branch in the Apache repo. Follow the release process to prepare a new release. Remember to confirm with the committer team that it is the right time to create the release.  The PMC hopes that the new committer will take part in the release process as well as release voting, even though their vote will be regarded as +1 no binding. Being familiar with the release process is key to being promoted to the role of PMC member.\nProject Management Committee The Project Management Committee (PMC) member does not have any special rights in code contributions. They simply oversee the project and make sure that it follows the Apache requirements. Its functions include:\n Binding voting for releases and license checks; New committer and PMC member recognition; Identification of branding issues and brand protection; and Responding to questions raised by the ASF board, and taking necessary actions.  The V.P. and chair of the PMC is the secretary, who is responsible for initializing the board report.\nIn most cases, a new PMC member is nominated from the committer team. But it is also possible to become a PMC member directly, so long as the PMC agrees to the nomination and is confident that the candidate is ready. For instance, this can be demonstrated by the fact that he/she has been an Apache member, an Apache officer, or a PMC member of another project.\nThe new PMC voting process should also follow the [DISCUSS], [VOTE] and [RESULT][VOTE] procedures using a private mail list, just like the voting process for new committers. Before sending the invitation, the PMC must also send a NOTICE mail to the Apache board.\nTo: board@apache.org Cc: private@skywalking.apache.org Subject: [NOTICE] Jane Doe for SkyWalking PMC SkyWalking proposes to invite Jane Doe (janedoe) to join the PMC. (include if a vote was held) The vote result is available here: https://lists.apache.org/... After 72 hours, if the board doesn\u0026rsquo;t object to the nomination (which it won\u0026rsquo;t most cases), an invitation may then be sent to the candidate.\nOnce the invitation is accepted, a PMC member should add the new member to the official PMC list through roster.\n","excerpt":"Apache SkyWalking committer SkyWalking Project Management Committee (PMC) is responsible for …","ref":"/docs/main/v9.5.0/en/guides/asf/committer/","title":"Apache SkyWalking committer"},{"body":"Apache SkyWalking committer SkyWalking Project Management Committee (PMC) is responsible for assessing the contributions of candidates.\nLike many Apache projects, SkyWalking welcome all contributions, including code contributions, blog entries, guides for new users, public speeches, and enhancement of the project in various ways.\nCommitter Nominate new committer In SkyWalking, new committer nomination could only be officially started by existing PMC members. If a new committer feels that he/she is qualified, he/she should contact any existing PMC member and discuss. If this is agreed among some members of the PMC, the process will kick off.\nThe following steps are recommended (to be initiated only by an existing PMC member):\n Send an email titled [DISCUSS] Promote xxx as new committer to private@skywalking.a.o. List the important contributions of the candidate, so you could gather support from other PMC members for your proposal. Keep the discussion open for more than 3 days but no more than 1 week, unless there is any express objection or concern. If the PMC generally agrees to the proposal, send an email titled [VOTE] Promote xxx as new committer to private@skywalking.a.o. Keep the voting process open for more than 3 days, but no more than 1 week. Consider the result as Consensus Approval if there are three +1 votes and +1 votes \u0026gt; -1 votes. Send an email titled [RESULT][VOTE] Promote xxx as new committer to private@skywalking.a.o, and list the voting details, including who the voters are.  Invite new committer The PMC member who starts the promotion is responsible for sending an invitation to the new committer and guiding him/her to set up the ASF env.\nThe PMC member should send an email using the following template to the new committer:\nTo: JoeBloggs@foo.net Cc: private@skywalking.apache.org Subject: Invitation to become SkyWalking committer: Joe Bloggs Hello [invitee name], The SkyWalking Project Management Committee] (PMC) hereby offers you committer privileges to the project. These privileges are offered on the understanding that you'll use them reasonably and with common sense. We like to work on trust rather than unnecessary constraints. Being a committer enables you to more easily make changes without needing to go through the patch submission process. Being a committer does not require you to participate any more than you already do. It does tend to make one even more committed. You will probably find that you spend more time here. Of course, you can decline and instead remain as a contributor, participating as you do now. A. This personal invitation is a chance for you to accept or decline in private. Either way, please let us know in reply to the [private@skywalking.apache.org] address only. B. If you accept, the next step is to register an iCLA: 1. Details of the iCLA and the forms are found through this link: http://www.apache.org/licenses/#clas 2. Instructions for its completion and return to the Secretary of the ASF are found at http://www.apache.org/licenses/#submitting 3. When you transmit the completed iCLA, request to notify the Apache SkyWalking and choose a unique Apache id. Look to see if your preferred id is already taken at http://people.apache.org/committer-index.html This will allow the Secretary to notify the PMC when your iCLA has been recorded. When recording of your iCLA is noticed, you will receive a follow-up message with the next steps for establishing you as a committer. Invitation acceptance process The new committer should reply to private@skywalking.apache.org (choose reply all), and express his/her intention to accept the invitation. Then, this invitation will be treated as accepted by the project\u0026rsquo;s PMC. Of course, the new committer may also choose to decline the invitation.\nOnce the invitation has been accepted, the new committer has to take the following steps:\n Subscribe to dev@skywalking.apache.org. Usually this is already done. Choose a Apache ID that is not on the apache committers list page. Download the ICLA (If the new committer contributes to the project as a day job, CCLA is expected). After filling in the icla.pdf (or ccla.pdf) with the correct information, print, sign it by hand, scan it as an PDF, and send it as an attachment to secretary@apache.org. (If electronic signature is preferred, please follow the steps on this page) The PMC will wait for the Apache secretary to confirm the ICLA (or CCLA) filed. The new committer and PMC will receive the following email:  Dear XXX, This message acknowledges receipt of your ICLA, which has been filed in the Apache Software Foundation records. Your account has been requested for you and you should receive email with next steps within the next few days (can take up to a week). Please refer to https://www.apache.org/foundation/how-it-works.html#developers for more information about roles at Apache. In the unlikely event that the account has not yet been requested, the PMC member should contact the project V.P.. The V.P. could request through the Apache Account Submission Helper Form.\nAfter several days, the new committer will receive an email confirming creation of the account, titled Welcome to the Apache Software Foundation (ASF)!. Congratulations! The new committer now has an official Apache ID.\nThe PMC member should add the new committer to the official committer list through roster.\nSet up the Apache ID and dev env  Go to Apache Account Utility Platform, create your password, set up your personal mailbox (Forwarding email address) and GitHub account(Your GitHub Username). An organizational invite will be sent to you via email shortly thereafter (within 2 hours). If you would like to use the xxx@apache.org email service, please refer to here. Gmail is recommended, because this forwarding mode is not easy to find in most mailbox service settings. Follow the authorized GitHub 2FA wiki to enable two-factor authorization (2FA) on Github. When you set 2FA to \u0026ldquo;off\u0026rdquo;, it will be delisted by the corresponding Apache committer write permission group until you set it up again. (NOTE: Treat your recovery codes with the same level of attention as you would your password!) Use GitBox Account Linking Utility to obtain write permission of the SkyWalking project. Follow this doc to update the website.  If you would like to show up publicly in the Apache GitHub org, you need to go to the Apache GitHub org people page, search for yourself, and choose Organization visibility to Public.\nCommitter rights, duties, and responsibilities The SkyWalking project doesn\u0026rsquo;t require continuing contributions from you after you have become a committer, but we truly hope that you will continue to play a part in our community!\nAs a committer, you could\n Review and merge the pull request to the master branch in the Apache repo. A pull request often contains multiple commits. Those commits must be squashed and merged into a single commit with explanatory comments. It is recommended for new committers to request recheck of the pull request from senior committers. Create and push codes to the new branch in the Apache repo. Follow the release process to prepare a new release. Remember to confirm with the committer team that it is the right time to create the release.  The PMC hopes that the new committer will take part in the release process as well as release voting, even though their vote will be regarded as +1 no binding. Being familiar with the release process is key to being promoted to the role of PMC member.\nProject Management Committee The Project Management Committee (PMC) member does not have any special rights in code contributions. They simply oversee the project and make sure that it follows the Apache requirements. Its functions include:\n Binding voting for releases and license checks; New committer and PMC member recognition; Identification of branding issues and brand protection; and Responding to questions raised by the ASF board, and taking necessary actions.  The V.P. and chair of the PMC is the secretary, who is responsible for initializing the board report.\nIn most cases, a new PMC member is nominated from the committer team. But it is also possible to become a PMC member directly, so long as the PMC agrees to the nomination and is confident that the candidate is ready. For instance, this can be demonstrated by the fact that he/she has been an Apache member, an Apache officer, or a PMC member of another project.\nThe new PMC voting process should also follow the [DISCUSS], [VOTE] and [RESULT][VOTE] procedures using a private mail list, just like the voting process for new committers. Before sending the invitation, the PMC must also send a NOTICE mail to the Apache board.\nTo: board@apache.org Cc: private@skywalking.apache.org Subject: [NOTICE] Jane Doe for SkyWalking PMC SkyWalking proposes to invite Jane Doe (janedoe) to join the PMC. (include if a vote was held) The vote result is available here: https://lists.apache.org/... After 72 hours, if the board doesn\u0026rsquo;t object to the nomination (which it won\u0026rsquo;t most cases), an invitation may then be sent to the candidate.\nOnce the invitation is accepted, a PMC member should add the new member to the official PMC list through roster.\n","excerpt":"Apache SkyWalking committer SkyWalking Project Management Committee (PMC) is responsible for …","ref":"/docs/main/v9.6.0/en/guides/asf/committer/","title":"Apache SkyWalking committer"},{"body":"Apache SkyWalking committer SkyWalking Project Management Committee (PMC) is responsible for assessing the contributions of candidates.\nLike many Apache projects, SkyWalking welcome all contributions, including code contributions, blog entries, guides for new users, public speeches, and enhancement of the project in various ways.\nCommitter Nominate new committer In SkyWalking, new committer nomination could only be officially started by existing PMC members. If a new committer feels that he/she is qualified, he/she should contact any existing PMC member and discuss. If this is agreed among some members of the PMC, the process will kick off.\nThe following steps are recommended (to be initiated only by an existing PMC member):\n Send an email titled [DISCUSS] Promote xxx as new committer to private@skywalking.a.o. List the important contributions of the candidate, so you could gather support from other PMC members for your proposal. Keep the discussion open for more than 3 days but no more than 1 week, unless there is any express objection or concern. If the PMC generally agrees to the proposal, send an email titled [VOTE] Promote xxx as new committer to private@skywalking.a.o. Keep the voting process open for more than 3 days, but no more than 1 week. Consider the result as Consensus Approval if there are three +1 votes and +1 votes \u0026gt; -1 votes. Send an email titled [RESULT][VOTE] Promote xxx as new committer to private@skywalking.a.o, and list the voting details, including who the voters are.  Invite new committer The PMC member who starts the promotion is responsible for sending an invitation to the new committer and guiding him/her to set up the ASF env.\nThe PMC member should send an email using the following template to the new committer:\nTo: JoeBloggs@foo.net Cc: private@skywalking.apache.org Subject: Invitation to become SkyWalking committer: Joe Bloggs Hello [invitee name], The SkyWalking Project Management Committee] (PMC) hereby offers you committer privileges to the project. These privileges are offered on the understanding that you'll use them reasonably and with common sense. We like to work on trust rather than unnecessary constraints. Being a committer enables you to more easily make changes without needing to go through the patch submission process. Being a committer does not require you to participate any more than you already do. It does tend to make one even more committed. You will probably find that you spend more time here. Of course, you can decline and instead remain as a contributor, participating as you do now. A. This personal invitation is a chance for you to accept or decline in private. Either way, please let us know in reply to the [private@skywalking.apache.org] address only. B. If you accept, the next step is to register an iCLA: 1. Details of the iCLA and the forms are found through this link: http://www.apache.org/licenses/#clas 2. Instructions for its completion and return to the Secretary of the ASF are found at http://www.apache.org/licenses/#submitting 3. When you transmit the completed iCLA, request to notify the Apache SkyWalking and choose a unique Apache id. Look to see if your preferred id is already taken at http://people.apache.org/committer-index.html This will allow the Secretary to notify the PMC when your iCLA has been recorded. When recording of your iCLA is noticed, you will receive a follow-up message with the next steps for establishing you as a committer. Invitation acceptance process The new committer should reply to private@skywalking.apache.org (choose reply all), and express his/her intention to accept the invitation. Then, this invitation will be treated as accepted by the project\u0026rsquo;s PMC. Of course, the new committer may also choose to decline the invitation.\nOnce the invitation has been accepted, the new committer has to take the following steps:\n Subscribe to dev@skywalking.apache.org. Usually this is already done. Choose a Apache ID that is not on the apache committers list page. Download the ICLA (If the new committer contributes to the project as a day job, CCLA is expected). After filling in the icla.pdf (or ccla.pdf) with the correct information, print, sign it by hand, scan it as an PDF, and send it as an attachment to secretary@apache.org. (If electronic signature is preferred, please follow the steps on this page) The PMC will wait for the Apache secretary to confirm the ICLA (or CCLA) filed. The new committer and PMC will receive the following email:  Dear XXX, This message acknowledges receipt of your ICLA, which has been filed in the Apache Software Foundation records. Your account has been requested for you and you should receive email with next steps within the next few days (can take up to a week). Please refer to https://www.apache.org/foundation/how-it-works.html#developers for more information about roles at Apache. In the unlikely event that the account has not yet been requested, the PMC member should contact the project V.P.. The V.P. could request through the Apache Account Submission Helper Form.\nAfter several days, the new committer will receive an email confirming creation of the account, titled Welcome to the Apache Software Foundation (ASF)!. Congratulations! The new committer now has an official Apache ID.\nThe PMC member should add the new committer to the official committer list through roster.\nSet up the Apache ID and dev env  Go to Apache Account Utility Platform, create your password, set up your personal mailbox (Forwarding email address) and GitHub account(Your GitHub Username). An organizational invite will be sent to you via email shortly thereafter (within 2 hours). If you would like to use the xxx@apache.org email service, please refer to here. Gmail is recommended, because this forwarding mode is not easy to find in most mailbox service settings. Follow the authorized GitHub 2FA wiki to enable two-factor authorization (2FA) on Github. When you set 2FA to \u0026ldquo;off\u0026rdquo;, it will be delisted by the corresponding Apache committer write permission group until you set it up again. (NOTE: Treat your recovery codes with the same level of attention as you would your password!) Use GitBox Account Linking Utility to obtain write permission of the SkyWalking project. Follow this doc to update the website.  If you would like to show up publicly in the Apache GitHub org, you need to go to the Apache GitHub org people page, search for yourself, and choose Organization visibility to Public.\nCommitter rights, duties, and responsibilities The SkyWalking project doesn\u0026rsquo;t require continuing contributions from you after you have become a committer, but we truly hope that you will continue to play a part in our community!\nAs a committer, you could\n Review and merge the pull request to the master branch in the Apache repo. A pull request often contains multiple commits. Those commits must be squashed and merged into a single commit with explanatory comments. It is recommended for new committers to request recheck of the pull request from senior committers. Create and push codes to the new branch in the Apache repo. Follow the release process to prepare a new release. Remember to confirm with the committer team that it is the right time to create the release.  The PMC hopes that the new committer will take part in the release process as well as release voting, even though their vote will be regarded as +1 no binding. Being familiar with the release process is key to being promoted to the role of PMC member.\nProject Management Committee The Project Management Committee (PMC) member does not have any special rights in code contributions. They simply oversee the project and make sure that it follows the Apache requirements. Its functions include:\n Binding voting for releases and license checks; New committer and PMC member recognition; Identification of branding issues and brand protection; and Responding to questions raised by the ASF board, and taking necessary actions.  The V.P. and chair of the PMC is the secretary, who is responsible for initializing the board report.\nIn most cases, a new PMC member is nominated from the committer team. But it is also possible to become a PMC member directly, so long as the PMC agrees to the nomination and is confident that the candidate is ready. For instance, this can be demonstrated by the fact that he/she has been an Apache member, an Apache officer, or a PMC member of another project.\nThe new PMC voting process should also follow the [DISCUSS], [VOTE] and [RESULT][VOTE] procedures using a private mail list, just like the voting process for new committers. Before sending the invitation, the PMC must also send a NOTICE mail to the Apache board.\nTo: board@apache.org Cc: private@skywalking.apache.org Subject: [NOTICE] Jane Doe for SkyWalking PMC SkyWalking proposes to invite Jane Doe (janedoe) to join the PMC. (include if a vote was held) The vote result is available here: https://lists.apache.org/... After 72 hours, if the board doesn\u0026rsquo;t object to the nomination (which it won\u0026rsquo;t most cases), an invitation may then be sent to the candidate.\nOnce the invitation is accepted, a PMC member should add the new member to the official PMC list through roster.\n","excerpt":"Apache SkyWalking committer SkyWalking Project Management Committee (PMC) is responsible for …","ref":"/docs/main/v9.7.0/en/guides/asf/committer/","title":"Apache SkyWalking committer"},{"body":"Apache SkyWalking Go Release Guide This documentation guides the release manager to release the SkyWalking Go in the Apache Way, and also helps people to check the release for vote.\nPrerequisites  Close(if finished, or move to next milestone otherwise) all issues in the current milestone from skywalking-go and skywalking, create a new milestone if needed. Update CHANGES.md. Check the dependency licenses including all dependencies.  Add your GPG public key to Apache svn   Upload your GPG public key to a public GPG site, such as MIT\u0026rsquo;s site.\n  Log in id.apache.org and submit your key fingerprint.\n  Add your GPG public key into SkyWalking GPG KEYS file, you can do this only if you are a PMC member. You can ask a PMC member for help. DO NOT override the existed KEYS file content, only append your key at the end of the file.\n  Build and sign the source code package export VERSION=\u0026lt;the version to release\u0026gt; git clone git@github.com:apache/skywalking-go \u0026amp;\u0026amp; cd skywalking-go git tag -a \u0026#34;v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking-Go v$VERSION\u0026#34; git tag -a \u0026#34;toolkit/v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking-Go Toolkit v$VERSION\u0026#34; git push --tags make release In total, six files should be automatically generated in the directory: apache-skywalking-go-${VERSION}-bin.tgz, apache-skywalking-go-${VERSION}-src.tgz, and their corresponding asc, sha512 files.\nUpload to Apache svn svn co https://dist.apache.org/repos/dist/dev/skywalking/ mkdir -p skywalking/go/\u0026#34;$VERSION\u0026#34; cp skywalking-go/apache-skywalking*.tgz skywalking/go/\u0026#34;$VERSION\u0026#34; cp skywalking-go/apache-skywalking*.tgz.asc skywalking/go/\u0026#34;$VERSION\u0026#34; cp skywalking-go/apache-skywalking*.tgz.sha512 skywalking/go/\u0026#34;$VERSION\u0026#34; cd skywalking/go \u0026amp;\u0026amp; svn add \u0026#34;$VERSION\u0026#34; \u0026amp;\u0026amp; svn commit -m \u0026#34;Draft Apache SkyWalking-Go release $VERSION\u0026#34; Call for vote in dev@ mailing list Call for vote in dev@skywalking.apache.org, please check all links before sending the email.\nSubject: [VOTE] Release Apache SkyWalking Go version $VERSION Content: Hi the SkyWalking Community: This is a call for vote to release Apache SkyWalking Go version $VERSION. Release notes: * https://github.com/apache/skywalking-go/blob/v$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/go/$VERSION * sha512 checksums - sha512xxxxyyyzzz skywalking-go-x.x.x-src.tgz - sha512xxxxyyyzzz skywalking-go-x.x.x-bin.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-go/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-go/blob/v$VERSION/docs/en/development-and-contribution/how-to-release.md Voting will start now and will remain open for at least 72 hours, all PMC members are required to give their votes. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Thanks. [1] https://github.com/apache/skywalking/blob/master/docs/en/guides/How-to-release.md#vote-check Vote Check All PMC members and committers should check these before voting +1:\n Features test. All artifacts in staging repository are published with .asc, .md5, and sha files. Source codes and distribution packages (skywalking-go-$VERSION-{src,bin}.tgz) are in https://dist.apache.org/repos/dist/dev/skywalking/go/$VERSION with .asc, .sha512. LICENSE and NOTICE are in source codes and distribution package. Check shasum -c skywalking-go-$VERSION-{src,bin}.tgz.sha512. Check gpg --verify skywalking-go-$VERSION-{src,bin}.tgz.asc skywalking-go-$VERSION-{src,bin}.tgz. Build distribution from source code package by following this command, make build.  Vote result should follow these:\n  PMC vote is +1 binding, all others is +1 no binding.\n  Within 72 hours, you get at least 3 (+1 binding), and have more +1 than -1. Vote pass.\n  Send the closing vote mail to announce the result. When count the binding and no binding votes, please list the names of voters. An example like this:\n[RESULT][VOTE] Release Apache SkyWalking Go version $VERSION 3 days passed, we’ve got ($NUMBER) +1 bindings (and ... +1 non-bindings): (list names) +1 bindings: xxx ... +1 non-bindings: xxx ... Thank you for voting, I’ll continue the release process.   Publish release   Move source codes tar balls and distributions to https://dist.apache.org/repos/dist/release/skywalking/, you can do this only if you are a PMC member.\nexport SVN_EDITOR=vim svn mv https://dist.apache.org/repos/dist/dev/skywalking/go/$VERSION https://dist.apache.org/repos/dist/release/skywalking/go   Refer to the previous PR, update the event and download links on the website.\n  Update Github release page, follow the previous convention.\n  Send ANNOUNCE email to dev@skywalking.apache.org and announce@apache.org, the sender should use his/her Apache email account, please check all links before sending the email.\nSubject: [ANNOUNCEMENT] Apache SkyWalking Go $VERSION Released Content: Hi the SkyWalking Community On behalf of the SkyWalking Team, I’m glad to announce that SkyWalking Go $VERSION is now released. SkyWalking Go: The Golang auto-instrument Agent for Apache SkyWalking, which provides the native tracing/metrics/logging abilities for Golang projects. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. Download Links: http://skywalking.apache.org/downloads/ Release Notes : https://github.com/apache/skywalking-go/blob/v$VERSION/CHANGES.md Website: http://skywalking.apache.org/ SkyWalking Go Resources: - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalking.apache.org - Documents: https://github.com/apache/skywalking-go/blob/v$VERSION/README.md The Apache SkyWalking Team   Remove Unnecessary Releases Please remember to remove all unnecessary releases in the mirror svn (https://dist.apache.org/repos/dist/release/skywalking/), if you don\u0026rsquo;t recommend users to choose those version. For example, you have removed the download and documentation links from the website. If they want old ones, the Archive repository has all of them.\n","excerpt":"Apache SkyWalking Go Release Guide This documentation guides the release manager to release the …","ref":"/docs/skywalking-go/latest/en/development-and-contribution/how-to-release/","title":"Apache SkyWalking Go Release Guide"},{"body":"Apache SkyWalking Go Release Guide This documentation guides the release manager to release the SkyWalking Go in the Apache Way, and also helps people to check the release for vote.\nPrerequisites  Close(if finished, or move to next milestone otherwise) all issues in the current milestone from skywalking-go and skywalking, create a new milestone if needed. Update CHANGES.md. Check the dependency licenses including all dependencies.  Add your GPG public key to Apache svn   Upload your GPG public key to a public GPG site, such as MIT\u0026rsquo;s site.\n  Log in id.apache.org and submit your key fingerprint.\n  Add your GPG public key into SkyWalking GPG KEYS file, you can do this only if you are a PMC member. You can ask a PMC member for help. DO NOT override the existed KEYS file content, only append your key at the end of the file.\n  Build and sign the source code package export VERSION=\u0026lt;the version to release\u0026gt; git clone git@github.com:apache/skywalking-go \u0026amp;\u0026amp; cd skywalking-go git tag -a \u0026#34;v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking-Go v$VERSION\u0026#34; git tag -a \u0026#34;toolkit/v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking-Go Toolkit v$VERSION\u0026#34; git push --tags make release In total, six files should be automatically generated in the directory: apache-skywalking-go-${VERSION}-bin.tgz, apache-skywalking-go-${VERSION}-src.tgz, and their corresponding asc, sha512 files.\nUpload to Apache svn svn co https://dist.apache.org/repos/dist/dev/skywalking/ mkdir -p skywalking/go/\u0026#34;$VERSION\u0026#34; cp skywalking-go/apache-skywalking*.tgz skywalking/go/\u0026#34;$VERSION\u0026#34; cp skywalking-go/apache-skywalking*.tgz.asc skywalking/go/\u0026#34;$VERSION\u0026#34; cp skywalking-go/apache-skywalking*.tgz.sha512 skywalking/go/\u0026#34;$VERSION\u0026#34; cd skywalking/go \u0026amp;\u0026amp; svn add \u0026#34;$VERSION\u0026#34; \u0026amp;\u0026amp; svn commit -m \u0026#34;Draft Apache SkyWalking-Go release $VERSION\u0026#34; Call for vote in dev@ mailing list Call for vote in dev@skywalking.apache.org, please check all links before sending the email.\nSubject: [VOTE] Release Apache SkyWalking Go version $VERSION Content: Hi the SkyWalking Community: This is a call for vote to release Apache SkyWalking Go version $VERSION. Release notes: * https://github.com/apache/skywalking-go/blob/v$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/go/$VERSION * sha512 checksums - sha512xxxxyyyzzz skywalking-go-x.x.x-src.tgz - sha512xxxxyyyzzz skywalking-go-x.x.x-bin.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-go/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-go/blob/v$VERSION/docs/en/development-and-contribution/how-to-release.md Voting will start now and will remain open for at least 72 hours, all PMC members are required to give their votes. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Thanks. [1] https://github.com/apache/skywalking/blob/master/docs/en/guides/How-to-release.md#vote-check Vote Check All PMC members and committers should check these before voting +1:\n Features test. All artifacts in staging repository are published with .asc, .md5, and sha files. Source codes and distribution packages (skywalking-go-$VERSION-{src,bin}.tgz) are in https://dist.apache.org/repos/dist/dev/skywalking/go/$VERSION with .asc, .sha512. LICENSE and NOTICE are in source codes and distribution package. Check shasum -c skywalking-go-$VERSION-{src,bin}.tgz.sha512. Check gpg --verify skywalking-go-$VERSION-{src,bin}.tgz.asc skywalking-go-$VERSION-{src,bin}.tgz. Build distribution from source code package by following this command, make build.  Vote result should follow these:\n  PMC vote is +1 binding, all others is +1 no binding.\n  Within 72 hours, you get at least 3 (+1 binding), and have more +1 than -1. Vote pass.\n  Send the closing vote mail to announce the result. When count the binding and no binding votes, please list the names of voters. An example like this:\n[RESULT][VOTE] Release Apache SkyWalking Go version $VERSION 3 days passed, we’ve got ($NUMBER) +1 bindings (and ... +1 non-bindings): (list names) +1 bindings: xxx ... +1 non-bindings: xxx ... Thank you for voting, I’ll continue the release process.   Publish release   Move source codes tar balls and distributions to https://dist.apache.org/repos/dist/release/skywalking/, you can do this only if you are a PMC member.\nexport SVN_EDITOR=vim svn mv https://dist.apache.org/repos/dist/dev/skywalking/go/$VERSION https://dist.apache.org/repos/dist/release/skywalking/go   Refer to the previous PR, update the event and download links on the website.\n  Update Github release page, follow the previous convention.\n  Send ANNOUNCE email to dev@skywalking.apache.org and announce@apache.org, the sender should use his/her Apache email account, please check all links before sending the email.\nSubject: [ANNOUNCEMENT] Apache SkyWalking Go $VERSION Released Content: Hi the SkyWalking Community On behalf of the SkyWalking Team, I’m glad to announce that SkyWalking Go $VERSION is now released. SkyWalking Go: The Golang auto-instrument Agent for Apache SkyWalking, which provides the native tracing/metrics/logging abilities for Golang projects. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. Download Links: http://skywalking.apache.org/downloads/ Release Notes : https://github.com/apache/skywalking-go/blob/v$VERSION/CHANGES.md Website: http://skywalking.apache.org/ SkyWalking Go Resources: - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalking.apache.org - Documents: https://github.com/apache/skywalking-go/blob/v$VERSION/README.md The Apache SkyWalking Team   Remove Unnecessary Releases Please remember to remove all unnecessary releases in the mirror svn (https://dist.apache.org/repos/dist/release/skywalking/), if you don\u0026rsquo;t recommend users to choose those version. For example, you have removed the download and documentation links from the website. If they want old ones, the Archive repository has all of them.\n","excerpt":"Apache SkyWalking Go Release Guide This documentation guides the release manager to release the …","ref":"/docs/skywalking-go/next/en/development-and-contribution/how-to-release/","title":"Apache SkyWalking Go Release Guide"},{"body":"Apache SkyWalking Go Release Guide This documentation guides the release manager to release the SkyWalking Go in the Apache Way, and also helps people to check the release for vote.\nPrerequisites  Close(if finished, or move to next milestone otherwise) all issues in the current milestone from skywalking-go and skywalking, create a new milestone if needed. Update CHANGES.md. Check the dependency licenses including all dependencies.  Add your GPG public key to Apache svn   Upload your GPG public key to a public GPG site, such as MIT\u0026rsquo;s site.\n  Log in id.apache.org and submit your key fingerprint.\n  Add your GPG public key into SkyWalking GPG KEYS file, you can do this only if you are a PMC member. You can ask a PMC member for help. DO NOT override the existed KEYS file content, only append your key at the end of the file.\n  Build and sign the source code package export VERSION=\u0026lt;the version to release\u0026gt; git clone git@github.com:apache/skywalking-go \u0026amp;\u0026amp; cd skywalking-go git tag -a \u0026#34;v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking-Go v$VERSION\u0026#34; git tag -a \u0026#34;toolkit/v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking-Go Toolkit v$VERSION\u0026#34; git push --tags make release In total, six files should be automatically generated in the directory: apache-skywalking-go-${VERSION}-bin.tgz, apache-skywalking-go-${VERSION}-src.tgz, and their corresponding asc, sha512 files.\nUpload to Apache svn svn co https://dist.apache.org/repos/dist/dev/skywalking/ mkdir -p skywalking/go/\u0026#34;$VERSION\u0026#34; cp skywalking-go/apache-skywalking*.tgz skywalking/go/\u0026#34;$VERSION\u0026#34; cp skywalking-go/apache-skywalking*.tgz.asc skywalking/go/\u0026#34;$VERSION\u0026#34; cp skywalking-go/apache-skywalking*.tgz.sha512 skywalking/go/\u0026#34;$VERSION\u0026#34; cd skywalking/go \u0026amp;\u0026amp; svn add \u0026#34;$VERSION\u0026#34; \u0026amp;\u0026amp; svn commit -m \u0026#34;Draft Apache SkyWalking-Go release $VERSION\u0026#34; Call for vote in dev@ mailing list Call for vote in dev@skywalking.apache.org, please check all links before sending the email.\nSubject: [VOTE] Release Apache SkyWalking Go version $VERSION Content: Hi the SkyWalking Community: This is a call for vote to release Apache SkyWalking Go version $VERSION. Release notes: * https://github.com/apache/skywalking-go/blob/v$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/go/$VERSION * sha512 checksums - sha512xxxxyyyzzz skywalking-go-x.x.x-src.tgz - sha512xxxxyyyzzz skywalking-go-x.x.x-bin.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-go/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-go/blob/v$VERSION/docs/en/development-and-contribution/how-to-release.md Voting will start now and will remain open for at least 72 hours, all PMC members are required to give their votes. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Thanks. [1] https://github.com/apache/skywalking/blob/master/docs/en/guides/How-to-release.md#vote-check Vote Check All PMC members and committers should check these before voting +1:\n Features test. All artifacts in staging repository are published with .asc, .md5, and sha files. Source codes and distribution packages (skywalking-go-$VERSION-{src,bin}.tgz) are in https://dist.apache.org/repos/dist/dev/skywalking/go/$VERSION with .asc, .sha512. LICENSE and NOTICE are in source codes and distribution package. Check shasum -c skywalking-go-$VERSION-{src,bin}.tgz.sha512. Check gpg --verify skywalking-go-$VERSION-{src,bin}.tgz.asc skywalking-go-$VERSION-{src,bin}.tgz. Build distribution from source code package by following this command, make build.  Vote result should follow these:\n  PMC vote is +1 binding, all others is +1 no binding.\n  Within 72 hours, you get at least 3 (+1 binding), and have more +1 than -1. Vote pass.\n  Send the closing vote mail to announce the result. When count the binding and no binding votes, please list the names of voters. An example like this:\n[RESULT][VOTE] Release Apache SkyWalking Go version $VERSION 3 days passed, we’ve got ($NUMBER) +1 bindings (and ... +1 non-bindings): (list names) +1 bindings: xxx ... +1 non-bindings: xxx ... Thank you for voting, I’ll continue the release process.   Publish release   Move source codes tar balls and distributions to https://dist.apache.org/repos/dist/release/skywalking/, you can do this only if you are a PMC member.\nexport SVN_EDITOR=vim svn mv https://dist.apache.org/repos/dist/dev/skywalking/go/$VERSION https://dist.apache.org/repos/dist/release/skywalking/go   Refer to the previous PR, update the event and download links on the website.\n  Update Github release page, follow the previous convention.\n  Send ANNOUNCE email to dev@skywalking.apache.org and announce@apache.org, the sender should use his/her Apache email account, please check all links before sending the email.\nSubject: [ANNOUNCEMENT] Apache SkyWalking Go $VERSION Released Content: Hi the SkyWalking Community On behalf of the SkyWalking Team, I’m glad to announce that SkyWalking Go $VERSION is now released. SkyWalking Go: The Golang auto-instrument Agent for Apache SkyWalking, which provides the native tracing/metrics/logging abilities for Golang projects. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. Download Links: http://skywalking.apache.org/downloads/ Release Notes : https://github.com/apache/skywalking-go/blob/v$VERSION/CHANGES.md Website: http://skywalking.apache.org/ SkyWalking Go Resources: - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalking.apache.org - Documents: https://github.com/apache/skywalking-go/blob/v$VERSION/README.md The Apache SkyWalking Team   Remove Unnecessary Releases Please remember to remove all unnecessary releases in the mirror svn (https://dist.apache.org/repos/dist/release/skywalking/), if you don\u0026rsquo;t recommend users to choose those version. For example, you have removed the download and documentation links from the website. If they want old ones, the Archive repository has all of them.\n","excerpt":"Apache SkyWalking Go Release Guide This documentation guides the release manager to release the …","ref":"/docs/skywalking-go/v0.4.0/en/development-and-contribution/how-to-release/","title":"Apache SkyWalking Go Release Guide"},{"body":"Apache SkyWalking Infra E2E Release Guide This documentation guides the release manager to release the SkyWalking Infra E2E in the Apache Way, and also helps people to check the release for voting.\nPrerequisites  Close (if finished, or move to next milestone otherwise) all issues in the current milestone from skywalking-infra-e2e and skywalking, create a new milestone if needed. Update CHANGES.md.  Add your GPG public key to Apache svn   Upload your GPG public key to a public GPG site, such as MIT\u0026rsquo;s site.\n  Log in id.apache.org and submit your key fingerprint.\n  Add your GPG public key into SkyWalking GPG KEYS file, you can do this only if you are a PMC member. You can ask a PMC member for help. DO NOT override the existed KEYS file content, only append your key at the end of the file.\n  Build and sign the source code package export VERSION=\u0026lt;the version to release\u0026gt; git clone --recurse-submodules git@github.com:apache/skywalking-infra-e2e.git \u0026amp;\u0026amp; cd skywalking-infra-e2e git tag -a \u0026#34;v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking-Infra-E2E $VERSION\u0026#34; git push --tags make clean make test # this is optional, it runs sanity checks to verify the features make release Upload to Apache svn svn co https://dist.apache.org/repos/dist/dev/skywalking/infra-e2e release/skywalking/infra-e2e mkdir -p release/skywalking/infra-e2e/\u0026#34;$VERSION\u0026#34; cp skywalking-infra-e2e/skywalking*.tgz release/skywalking/infra-e2e/\u0026#34;$VERSION\u0026#34; cp skywalking-infra-e2e/skywalking*.tgz.asc release/skywalking/infra-e2e/\u0026#34;$VERSION\u0026#34; cp skywalking-infra-e2e/skywalking*.tgz.sha512 release/skywalking/infra-e2e/\u0026#34;$VERSION\u0026#34; cd release/skywalking \u0026amp;\u0026amp; svn add infra-e2e/$VERSION \u0026amp;\u0026amp; svn commit infra-e2e -m \u0026#34;Draft Apache SkyWalking-Infra-E2E release $VERSION\u0026#34; Call for vote in dev@ mailing list Call for vote in dev@skywalking.apache.org.\nSubject: [VOTE] Release Apache SkyWalking Infra E2E version $VERSION Content: Hi the SkyWalking Community: This is a call for vote to release Apache SkyWalking Infra E2E version $VERSION. Release notes: * https://github.com/apache/skywalking-infra-e2e/blob/v$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/infra-e2e/$VERSION * sha512 checksums - sha512xxxxyyyzzz skywalking-e2e-$VERSION-bin.tgz - sha512xxxxyyyzzz skywalking-e2e-$VERSION-src.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-infra-e2e/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-infra-e2e/blob/main/docs/en/contribution/Release-Guidance.md Voting will start now and will remain open for at least 72 hours, all PMC members are required to give their votes. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Thanks. [1] https://github.com/apache/skywalking/blob/master/docs/en/guides/How-to-release.md#vote-check Vote Check All PMC members and committers should check these before voting +1:\n Features test. All artifacts in staging repository are published with .asc, and sha files. Source codes and distribution packages (skywalking-e2e-$VERSION-src.tgz) are in https://dist.apache.org/repos/dist/dev/skywalking/infra-e2e/$VERSION with .asc, .sha512. LICENSE and NOTICE are in source codes and distribution package. Check shasum -c skywalking-e2e-$VERSION-src.tgz.sha512. Check gpg --verify skywalking-e2e-$VERSION-src.tgz.asc skywalking-e2e-$VERSION-src.tgz. Build distribution from source code package by following this the build guide.  Vote result should follow these:\n  PMC vote is +1 binding, all others is +1 no binding.\n  Within 72 hours, you get at least 3 (+1 binding), and have more +1 than -1. Vote pass.\n  Send the closing vote mail to announce the result. When count the binding and no binding votes, please list the names of voters. An example like this:\n[RESULT][VOTE] Release Apache SkyWalking Infra E2E version $VERSION 72+ hours passed, we’ve got ($NUMBER) +1 bindings (and ... +1 non-bindings): (list names) +1 bindings: xxx ... +1 non-bindings: xxx ... Thank you for voting, I’ll continue the release process.   Publish release   Move source codes tar balls and distributions to https://dist.apache.org/repos/dist/release/skywalking/, you can do this only if you are a PMC member.\nsvn mv https://dist.apache.org/repos/dist/dev/skywalking/infra-e2e/\u0026#34;$VERSION\u0026#34; https://dist.apache.org/repos/dist/release/skywalking/infra-e2e/\u0026#34;$VERSION\u0026#34;   Refer to the previous PR, update news and links on the website. There are several files need to modify.\n  Update Github release page, follow the previous convention.\n  Send ANNOUNCE email to dev@skywalking.apache.org and announce@apache.org, the sender should use his/her Apache email account.\nSubject: [ANNOUNCEMENT] Apache SkyWalking Infra E2E $VERSION Released Content: Hi the SkyWalking Community On behalf of the SkyWalking Team, I’m glad to announce that SkyWalking Infra E2E $VERSION is now released. SkyWalking Infra E2E: An End-to-End Testing framework that aims to help developers to set up, debug, and verify E2E tests with ease. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. Download Links: http://skywalking.apache.org/downloads/ Release Notes : https://github.com/apache/skywalking-infra-e2e/blob/v$VERSION/CHANGES.md Website: http://skywalking.apache.org/ SkyWalking Infra E2E Resources: - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalking.apache.org - Documents: https://github.com/apache/skywalking-infra-e2e/blob/v$VERSION/README.md The Apache SkyWalking Team   ","excerpt":"Apache SkyWalking Infra E2E Release Guide This documentation guides the release manager to release …","ref":"/docs/skywalking-infra-e2e/latest/en/contribution/release-guidance/","title":"Apache SkyWalking Infra E2E Release Guide"},{"body":"Apache SkyWalking Infra E2E Release Guide This documentation guides the release manager to release the SkyWalking Infra E2E in the Apache Way, and also helps people to check the release for voting.\nPrerequisites  Close (if finished, or move to next milestone otherwise) all issues in the current milestone from skywalking-infra-e2e and skywalking, create a new milestone if needed. Update CHANGES.md.  Add your GPG public key to Apache svn   Upload your GPG public key to a public GPG site, such as MIT\u0026rsquo;s site.\n  Log in id.apache.org and submit your key fingerprint.\n  Add your GPG public key into SkyWalking GPG KEYS file, you can do this only if you are a PMC member. You can ask a PMC member for help. DO NOT override the existed KEYS file content, only append your key at the end of the file.\n  Build and sign the source code package export VERSION=\u0026lt;the version to release\u0026gt; git clone --recurse-submodules git@github.com:apache/skywalking-infra-e2e.git \u0026amp;\u0026amp; cd skywalking-infra-e2e git tag -a \u0026#34;v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking-Infra-E2E $VERSION\u0026#34; git push --tags make clean make test # this is optional, it runs sanity checks to verify the features make release Upload to Apache svn svn co https://dist.apache.org/repos/dist/dev/skywalking/infra-e2e release/skywalking/infra-e2e mkdir -p release/skywalking/infra-e2e/\u0026#34;$VERSION\u0026#34; cp skywalking-infra-e2e/skywalking*.tgz release/skywalking/infra-e2e/\u0026#34;$VERSION\u0026#34; cp skywalking-infra-e2e/skywalking*.tgz.asc release/skywalking/infra-e2e/\u0026#34;$VERSION\u0026#34; cp skywalking-infra-e2e/skywalking*.tgz.sha512 release/skywalking/infra-e2e/\u0026#34;$VERSION\u0026#34; cd release/skywalking \u0026amp;\u0026amp; svn add infra-e2e/$VERSION \u0026amp;\u0026amp; svn commit infra-e2e -m \u0026#34;Draft Apache SkyWalking-Infra-E2E release $VERSION\u0026#34; Call for vote in dev@ mailing list Call for vote in dev@skywalking.apache.org.\nSubject: [VOTE] Release Apache SkyWalking Infra E2E version $VERSION Content: Hi the SkyWalking Community: This is a call for vote to release Apache SkyWalking Infra E2E version $VERSION. Release notes: * https://github.com/apache/skywalking-infra-e2e/blob/v$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/infra-e2e/$VERSION * sha512 checksums - sha512xxxxyyyzzz skywalking-e2e-$VERSION-bin.tgz - sha512xxxxyyyzzz skywalking-e2e-$VERSION-src.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-infra-e2e/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-infra-e2e/blob/main/docs/en/contribution/Release-Guidance.md Voting will start now and will remain open for at least 72 hours, all PMC members are required to give their votes. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Thanks. [1] https://github.com/apache/skywalking/blob/master/docs/en/guides/How-to-release.md#vote-check Vote Check All PMC members and committers should check these before voting +1:\n Features test. All artifacts in staging repository are published with .asc, and sha files. Source codes and distribution packages (skywalking-e2e-$VERSION-src.tgz) are in https://dist.apache.org/repos/dist/dev/skywalking/infra-e2e/$VERSION with .asc, .sha512. LICENSE and NOTICE are in source codes and distribution package. Check shasum -c skywalking-e2e-$VERSION-src.tgz.sha512. Check gpg --verify skywalking-e2e-$VERSION-src.tgz.asc skywalking-e2e-$VERSION-src.tgz. Build distribution from source code package by following this the build guide.  Vote result should follow these:\n  PMC vote is +1 binding, all others is +1 no binding.\n  Within 72 hours, you get at least 3 (+1 binding), and have more +1 than -1. Vote pass.\n  Send the closing vote mail to announce the result. When count the binding and no binding votes, please list the names of voters. An example like this:\n[RESULT][VOTE] Release Apache SkyWalking Infra E2E version $VERSION 72+ hours passed, we’ve got ($NUMBER) +1 bindings (and ... +1 non-bindings): (list names) +1 bindings: xxx ... +1 non-bindings: xxx ... Thank you for voting, I’ll continue the release process.   Publish release   Move source codes tar balls and distributions to https://dist.apache.org/repos/dist/release/skywalking/, you can do this only if you are a PMC member.\nsvn mv https://dist.apache.org/repos/dist/dev/skywalking/infra-e2e/\u0026#34;$VERSION\u0026#34; https://dist.apache.org/repos/dist/release/skywalking/infra-e2e/\u0026#34;$VERSION\u0026#34;   Refer to the previous PR, update news and links on the website. There are several files need to modify.\n  Update Github release page, follow the previous convention.\n  Send ANNOUNCE email to dev@skywalking.apache.org and announce@apache.org, the sender should use his/her Apache email account.\nSubject: [ANNOUNCEMENT] Apache SkyWalking Infra E2E $VERSION Released Content: Hi the SkyWalking Community On behalf of the SkyWalking Team, I’m glad to announce that SkyWalking Infra E2E $VERSION is now released. SkyWalking Infra E2E: An End-to-End Testing framework that aims to help developers to set up, debug, and verify E2E tests with ease. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. Download Links: http://skywalking.apache.org/downloads/ Release Notes : https://github.com/apache/skywalking-infra-e2e/blob/v$VERSION/CHANGES.md Website: http://skywalking.apache.org/ SkyWalking Infra E2E Resources: - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalking.apache.org - Documents: https://github.com/apache/skywalking-infra-e2e/blob/v$VERSION/README.md The Apache SkyWalking Team   ","excerpt":"Apache SkyWalking Infra E2E Release Guide This documentation guides the release manager to release …","ref":"/docs/skywalking-infra-e2e/next/en/contribution/release-guidance/","title":"Apache SkyWalking Infra E2E Release Guide"},{"body":"Apache SkyWalking Infra E2E Release Guide This documentation guides the release manager to release the SkyWalking Infra E2E in the Apache Way, and also helps people to check the release for voting.\nPrerequisites  Close (if finished, or move to next milestone otherwise) all issues in the current milestone from skywalking-infra-e2e and skywalking, create a new milestone if needed. Update CHANGES.md.  Add your GPG public key to Apache svn   Upload your GPG public key to a public GPG site, such as MIT\u0026rsquo;s site.\n  Log in id.apache.org and submit your key fingerprint.\n  Add your GPG public key into SkyWalking GPG KEYS file, you can do this only if you are a PMC member. You can ask a PMC member for help. DO NOT override the existed KEYS file content, only append your key at the end of the file.\n  Build and sign the source code package export VERSION=\u0026lt;the version to release\u0026gt; git clone --recurse-submodules git@github.com:apache/skywalking-infra-e2e.git \u0026amp;\u0026amp; cd skywalking-infra-e2e git tag -a \u0026#34;v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking-Infra-E2E $VERSION\u0026#34; git push --tags make clean make test # this is optional, it runs sanity checks to verify the features make release Upload to Apache svn svn co https://dist.apache.org/repos/dist/dev/skywalking/infra-e2e release/skywalking/infra-e2e mkdir -p release/skywalking/infra-e2e/\u0026#34;$VERSION\u0026#34; cp skywalking-infra-e2e/skywalking*.tgz release/skywalking/infra-e2e/\u0026#34;$VERSION\u0026#34; cp skywalking-infra-e2e/skywalking*.tgz.asc release/skywalking/infra-e2e/\u0026#34;$VERSION\u0026#34; cp skywalking-infra-e2e/skywalking*.tgz.sha512 release/skywalking/infra-e2e/\u0026#34;$VERSION\u0026#34; cd release/skywalking \u0026amp;\u0026amp; svn add infra-e2e/$VERSION \u0026amp;\u0026amp; svn commit infra-e2e -m \u0026#34;Draft Apache SkyWalking-Infra-E2E release $VERSION\u0026#34; Call for vote in dev@ mailing list Call for vote in dev@skywalking.apache.org.\nSubject: [VOTE] Release Apache SkyWalking Infra E2E version $VERSION Content: Hi the SkyWalking Community: This is a call for vote to release Apache SkyWalking Infra E2E version $VERSION. Release notes: * https://github.com/apache/skywalking-infra-e2e/blob/v$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/infra-e2e/$VERSION * sha512 checksums - sha512xxxxyyyzzz skywalking-e2e-$VERSION-bin.tgz - sha512xxxxyyyzzz skywalking-e2e-$VERSION-src.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-infra-e2e/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-infra-e2e/blob/main/docs/en/contribution/Release-Guidance.md Voting will start now and will remain open for at least 72 hours, all PMC members are required to give their votes. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Thanks. [1] https://github.com/apache/skywalking/blob/master/docs/en/guides/How-to-release.md#vote-check Vote Check All PMC members and committers should check these before voting +1:\n Features test. All artifacts in staging repository are published with .asc, and sha files. Source codes and distribution packages (skywalking-e2e-$VERSION-src.tgz) are in https://dist.apache.org/repos/dist/dev/skywalking/infra-e2e/$VERSION with .asc, .sha512. LICENSE and NOTICE are in source codes and distribution package. Check shasum -c skywalking-e2e-$VERSION-src.tgz.sha512. Check gpg --verify skywalking-e2e-$VERSION-src.tgz.asc skywalking-e2e-$VERSION-src.tgz. Build distribution from source code package by following this the build guide.  Vote result should follow these:\n  PMC vote is +1 binding, all others is +1 no binding.\n  Within 72 hours, you get at least 3 (+1 binding), and have more +1 than -1. Vote pass.\n  Send the closing vote mail to announce the result. When count the binding and no binding votes, please list the names of voters. An example like this:\n[RESULT][VOTE] Release Apache SkyWalking Infra E2E version $VERSION 72+ hours passed, we’ve got ($NUMBER) +1 bindings (and ... +1 non-bindings): (list names) +1 bindings: xxx ... +1 non-bindings: xxx ... Thank you for voting, I’ll continue the release process.   Publish release   Move source codes tar balls and distributions to https://dist.apache.org/repos/dist/release/skywalking/, you can do this only if you are a PMC member.\nsvn mv https://dist.apache.org/repos/dist/dev/skywalking/infra-e2e/\u0026#34;$VERSION\u0026#34; https://dist.apache.org/repos/dist/release/skywalking/infra-e2e/\u0026#34;$VERSION\u0026#34;   Refer to the previous PR, update news and links on the website. There are several files need to modify.\n  Update Github release page, follow the previous convention.\n  Send ANNOUNCE email to dev@skywalking.apache.org and announce@apache.org, the sender should use his/her Apache email account.\nSubject: [ANNOUNCEMENT] Apache SkyWalking Infra E2E $VERSION Released Content: Hi the SkyWalking Community On behalf of the SkyWalking Team, I’m glad to announce that SkyWalking Infra E2E $VERSION is now released. SkyWalking Infra E2E: An End-to-End Testing framework that aims to help developers to set up, debug, and verify E2E tests with ease. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. Download Links: http://skywalking.apache.org/downloads/ Release Notes : https://github.com/apache/skywalking-infra-e2e/blob/v$VERSION/CHANGES.md Website: http://skywalking.apache.org/ SkyWalking Infra E2E Resources: - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalking.apache.org - Documents: https://github.com/apache/skywalking-infra-e2e/blob/v$VERSION/README.md The Apache SkyWalking Team   ","excerpt":"Apache SkyWalking Infra E2E Release Guide This documentation guides the release manager to release …","ref":"/docs/skywalking-infra-e2e/v1.3.0/en/contribution/release-guidance/","title":"Apache SkyWalking Infra E2E Release Guide"},{"body":"Apache SkyWalking PHP Agent release guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking SDK in The Apache Way and start the voting process by reading this document.\nRequirements  Rust(rustc) Cargo PHP(php, php-config) Pecl GPG shasum  Add your GPG public key  Add your GPG public key into the SkyWalking GPG KEYS file. If you are a committer, use your Apache ID and password to log in this svn, and update the file. Don\u0026rsquo;t override the existing file.(Notice, only PMC member could update this file) Upload your GPG public key to the public GPG site, such as MIT\u0026rsquo;s site. This site should be in the Apache maven staging repository checklist.  Draft a new release Open Create a new release page, choose the tag, and click the Generate release notes button, then copy the generated text to local /tmp/notes.txt.\nTest your settings and package ## Make sure local compiling passed \u0026gt; cargo build ## Create package.xml from package.xml.tpl \u0026gt; cargo run -p scripts --release -- create-package-xml --version x.y.z --notes \u0026#34;`cat /tmp/notes.txt`\u0026#34; ## Create local package. The skywalking_agent-x.y.z.tgz should be found in project root \u0026gt; pecl package Sign the package Tag the commit ID of this release as vx.y.z.\nAfter set the version in Cargo.toml with the release number, package locally. Then run the following commands to sign your package.\n\u0026gt; export RELEASE_VERSION=x.y.z ## The package should be signed by your Apache committer mail. \u0026gt; gpg --armor --detach-sig skywalking_agent-$RELEASE_VERSION.tgz \u0026gt; shasum -a 512 skywalking_agent-$RELEASE_VERSION.tgz \u0026gt; skywalking_agent-$RELEASE_VERSION.tgz.sha512 After these, the source tar with its signed asc and sha512 are ready.\nUpload to Apache SVN and tag a release  Use your Apache ID to log in to https://dist.apache.org/repos/dist/dev/skywalking/php. Create a folder and name it by the release version and round, such as: x.y.z Upload tar ball, asc, sha512 files to the new folder.  Call a vote in dev Call a vote in dev@skywalking.apache.org\nMail title: [VOTE] Release Apache SkyWalking PHP version x.y.z Mail content: Hi All, This is a call for vote to release Apache SkyWalking PHP version x.y.z. Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/php/x.y.z/ * sha512 checksums - xxxxxxxx skywalking_agent-x.y.z.tgz Release Tag : * (Git Tag) vx.y.z Release CommitID : * https://github.com/apache/skywalking-php/tree/{commit-id} Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-php/blob/master/docs/en/contribution/compiling.md Voting will start now (Date) and will remain open for at least 72 hours, Request all PMC members to give their vote. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Vote Check The voting process is as follows:\n All PMC member votes are +1 binding, and all other votes are +1 but non-binding. If you obtain at least 3 (+1 binding) votes with more +1 than -1 votes within 72 hours, the release will be approved.  Publish the release   Move source codes tar and distribution packages to https://dist.apache.org/repos/dist/release/skywalking/.\n\u0026gt; export SVN_EDITOR=vim \u0026gt; svn mv https://dist.apache.org/repos/dist/dev/skywalking/php/x.y.z https://dist.apache.org/repos/dist/release/skywalking/php .... enter your apache password ....   Pecl publish package on skywalking_agent.\nMake sure you have a PECL account, and list in package.tpl.xml as \u0026lt;developer\u0026gt;, or reach private@skywalking.apache.org if you are a committer/PMC but not listed.\nYou can request a PECL account via https://pecl.php.net/account-request.php.\n  Add an release event, update download and doc releases on the SkyWalking website.\n  Add the new release on ASF addrelease site.\n  Remove the old releases on https://dist.apache.org/repos/dist/release/skywalking/php/{previous-version}.\n  Send a release announcement Send ANNOUNCE email to dev@skywalking.apache.org, announce@apache.org. The sender should use the Apache email account.\nMail title: [ANNOUNCE] Apache SkyWalking PHP x.y.z released Mail content: Hi all, SkyWalking PHP Agent provides the native tracing abilities for PHP project. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. This release contains a number of new features, bug fixes and improvements compared to version a.b.c(last release). The notable changes since x.y.z include: (Highlight key changes) 1. ... 2. ... 3. ... Apache SkyWalking website: http://skywalking.apache.org/ Downloads: http://skywalking.apache.org/downloads/ Twitter: https://twitter.com/ASFSkyWalking SkyWalking Resources: - GitHub: https://github.com/apache/skywalking - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Apache SkyWalking Team ","excerpt":"Apache SkyWalking PHP Agent release guide If you\u0026rsquo;re a committer, you can learn how to release …","ref":"/docs/skywalking-php/latest/en/contribution/release-agent/","title":"Apache SkyWalking PHP Agent release guide"},{"body":"Apache SkyWalking PHP Agent release guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking SDK in The Apache Way and start the voting process by reading this document.\nRequirements  Rust(rustc) Cargo PHP(php, php-config) Pecl GPG shasum  Add your GPG public key  Add your GPG public key into the SkyWalking GPG KEYS file. If you are a committer, use your Apache ID and password to log in this svn, and update the file. Don\u0026rsquo;t override the existing file.(Notice, only PMC member could update this file) Upload your GPG public key to the public GPG site, such as MIT\u0026rsquo;s site. This site should be in the Apache maven staging repository checklist.  Draft a new release Open Create a new release page, choose the tag, and click the Generate release notes button, then copy the generated text to local /tmp/notes.txt.\nTest your settings and package ## Make sure local compiling passed \u0026gt; cargo build ## Create package.xml from package.xml.tpl \u0026gt; cargo run -p scripts --release -- create-package-xml --version x.y.z --notes \u0026#34;`cat /tmp/notes.txt`\u0026#34; ## Create local package. The skywalking_agent-x.y.z.tgz should be found in project root \u0026gt; pecl package Sign the package Tag the commit ID of this release as vx.y.z.\nAfter set the version in Cargo.toml with the release number, package locally. Then run the following commands to sign your package.\n\u0026gt; export RELEASE_VERSION=x.y.z ## The package should be signed by your Apache committer mail. \u0026gt; gpg --armor --detach-sig skywalking_agent-$RELEASE_VERSION.tgz \u0026gt; shasum -a 512 skywalking_agent-$RELEASE_VERSION.tgz \u0026gt; skywalking_agent-$RELEASE_VERSION.tgz.sha512 After these, the source tar with its signed asc and sha512 are ready.\nUpload to Apache SVN and tag a release  Use your Apache ID to log in to https://dist.apache.org/repos/dist/dev/skywalking/php. Create a folder and name it by the release version and round, such as: x.y.z Upload tar ball, asc, sha512 files to the new folder.  Call a vote in dev Call a vote in dev@skywalking.apache.org\nMail title: [VOTE] Release Apache SkyWalking PHP version x.y.z Mail content: Hi All, This is a call for vote to release Apache SkyWalking PHP version x.y.z. Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/php/x.y.z/ * sha512 checksums - xxxxxxxx skywalking_agent-x.y.z.tgz Release Tag : * (Git Tag) vx.y.z Release CommitID : * https://github.com/apache/skywalking-php/tree/{commit-id} Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-php/blob/master/docs/en/contribution/compiling.md Voting will start now (Date) and will remain open for at least 72 hours, Request all PMC members to give their vote. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Vote Check The voting process is as follows:\n All PMC member votes are +1 binding, and all other votes are +1 but non-binding. If you obtain at least 3 (+1 binding) votes with more +1 than -1 votes within 72 hours, the release will be approved.  Publish the release   Move source codes tar and distribution packages to https://dist.apache.org/repos/dist/release/skywalking/.\n\u0026gt; export SVN_EDITOR=vim \u0026gt; svn mv https://dist.apache.org/repos/dist/dev/skywalking/php/x.y.z https://dist.apache.org/repos/dist/release/skywalking/php .... enter your apache password ....   Pecl publish package on skywalking_agent.\nMake sure you have a PECL account, and list in package.tpl.xml as \u0026lt;developer\u0026gt;, or reach private@skywalking.apache.org if you are a committer/PMC but not listed.\nYou can request a PECL account via https://pecl.php.net/account-request.php.\n  Add an release event, update download and doc releases on the SkyWalking website.\n  Add the new release on ASF addrelease site.\n  Remove the old releases on https://dist.apache.org/repos/dist/release/skywalking/php/{previous-version}.\n  Send a release announcement Send ANNOUNCE email to dev@skywalking.apache.org, announce@apache.org. The sender should use the Apache email account.\nMail title: [ANNOUNCE] Apache SkyWalking PHP x.y.z released Mail content: Hi all, SkyWalking PHP Agent provides the native tracing abilities for PHP project. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. This release contains a number of new features, bug fixes and improvements compared to version a.b.c(last release). The notable changes since x.y.z include: (Highlight key changes) 1. ... 2. ... 3. ... Apache SkyWalking website: http://skywalking.apache.org/ Downloads: http://skywalking.apache.org/downloads/ Twitter: https://twitter.com/ASFSkyWalking SkyWalking Resources: - GitHub: https://github.com/apache/skywalking - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Apache SkyWalking Team ","excerpt":"Apache SkyWalking PHP Agent release guide If you\u0026rsquo;re a committer, you can learn how to release …","ref":"/docs/skywalking-php/next/en/contribution/release-agent/","title":"Apache SkyWalking PHP Agent release guide"},{"body":"Apache SkyWalking PHP Agent release guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking SDK in The Apache Way and start the voting process by reading this document.\nRequirements  Rust(rustc) Cargo PHP(php, php-config) Pecl GPG shasum  Add your GPG public key  Add your GPG public key into the SkyWalking GPG KEYS file. If you are a committer, use your Apache ID and password to log in this svn, and update the file. Don\u0026rsquo;t override the existing file.(Notice, only PMC member could update this file) Upload your GPG public key to the public GPG site, such as MIT\u0026rsquo;s site. This site should be in the Apache maven staging repository checklist.  Draft a new release Open Create a new release page, choose the tag, and click the Generate release notes button, then copy the generated text to local /tmp/notes.txt.\nTest your settings and package ## Make sure local compiling passed \u0026gt; cargo build ## Create package.xml from package.xml.tpl \u0026gt; cargo run -p scripts --release -- create-package-xml --version x.y.z --notes \u0026#34;`cat /tmp/notes.txt`\u0026#34; ## Create local package. The skywalking_agent-x.y.z.tgz should be found in project root \u0026gt; pecl package Sign the package Tag the commit ID of this release as vx.y.z.\nAfter set the version in Cargo.toml with the release number, package locally. Then run the following commands to sign your package.\n\u0026gt; export RELEASE_VERSION=x.y.z ## The package should be signed by your Apache committer mail. \u0026gt; gpg --armor --detach-sig skywalking_agent-$RELEASE_VERSION.tgz \u0026gt; shasum -a 512 skywalking_agent-$RELEASE_VERSION.tgz \u0026gt; skywalking_agent-$RELEASE_VERSION.tgz.sha512 After these, the source tar with its signed asc and sha512 are ready.\nUpload to Apache SVN and tag a release  Use your Apache ID to log in to https://dist.apache.org/repos/dist/dev/skywalking/php. Create a folder and name it by the release version and round, such as: x.y.z Upload tar ball, asc, sha512 files to the new folder.  Call a vote in dev Call a vote in dev@skywalking.apache.org\nMail title: [VOTE] Release Apache SkyWalking PHP version x.y.z Mail content: Hi All, This is a call for vote to release Apache SkyWalking PHP version x.y.z. Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/php/x.y.z/ * sha512 checksums - xxxxxxxx skywalking_agent-x.y.z.tgz Release Tag : * (Git Tag) vx.y.z Release CommitID : * https://github.com/apache/skywalking-php/tree/{commit-id} Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-php/blob/master/docs/en/contribution/compiling.md Voting will start now (Date) and will remain open for at least 72 hours, Request all PMC members to give their vote. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Vote Check The voting process is as follows:\n All PMC member votes are +1 binding, and all other votes are +1 but non-binding. If you obtain at least 3 (+1 binding) votes with more +1 than -1 votes within 72 hours, the release will be approved.  Publish the release   Move source codes tar and distribution packages to https://dist.apache.org/repos/dist/release/skywalking/.\n\u0026gt; export SVN_EDITOR=vim \u0026gt; svn mv https://dist.apache.org/repos/dist/dev/skywalking/php/x.y.z https://dist.apache.org/repos/dist/release/skywalking/php .... enter your apache password ....   Pecl publish package on skywalking_agent.\nMake sure you have a PECL account, and list in package.tpl.xml as \u0026lt;developer\u0026gt;, or reach private@skywalking.apache.org if you are a committer/PMC but not listed.\nYou can request a PECL account via https://pecl.php.net/account-request.php.\n  Add an release event, update download and doc releases on the SkyWalking website.\n  Add the new release on ASF addrelease site.\n  Remove the old releases on https://dist.apache.org/repos/dist/release/skywalking/php/{previous-version}.\n  Send a release announcement Send ANNOUNCE email to dev@skywalking.apache.org, announce@apache.org. The sender should use the Apache email account.\nMail title: [ANNOUNCE] Apache SkyWalking PHP x.y.z released Mail content: Hi all, SkyWalking PHP Agent provides the native tracing abilities for PHP project. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. This release contains a number of new features, bug fixes and improvements compared to version a.b.c(last release). The notable changes since x.y.z include: (Highlight key changes) 1. ... 2. ... 3. ... Apache SkyWalking website: http://skywalking.apache.org/ Downloads: http://skywalking.apache.org/downloads/ Twitter: https://twitter.com/ASFSkyWalking SkyWalking Resources: - GitHub: https://github.com/apache/skywalking - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Apache SkyWalking Team ","excerpt":"Apache SkyWalking PHP Agent release guide If you\u0026rsquo;re a committer, you can learn how to release …","ref":"/docs/skywalking-php/v0.7.0/en/contribution/release-agent/","title":"Apache SkyWalking PHP Agent release guide"},{"body":"Apache SkyWalking Python Agent dockerfile and images Docker images are not official ASF releases but provided for convenience. Recommended usage is always to build the source\nThis image hosts the SkyWalking Python agent package on top of official Python base images (full \u0026amp; slim) providing support from Python 3.7 - 3.11.\nHow to use this image The images are hosted at Docker Hub and available from the skywalking.docker.scarf.sh endpoint.\nskywalking.docker.scarf.sh/apache/skywalking-python\nBuild your Python application image on top of this image Start by pulling the skywalking-python image as the base of your application image. Refer to Docker Hub for the list of tags available.\nFROMapache/skywalking-python:0.7.0-grpc-py3.9# ... build your Python applicationYou could start your Python application with CMD. The Python image already sets an entry point ENTRYPOINT [\u0026quot;sw-python\u0026quot;].\nFor example - CMD ['run', '-p', 'gunicorn', 'app.wsgi'] -p is always needed when using with Gunicorn/uWSGI -\u0026gt; This will be translated to sw-python run -p gunicorn app.wsgi\nYou don\u0026rsquo;t need to care about enabling the SkyWalking Python agent manually, it should be adopted and bootstrapped automatically through the sw-python CLI.\nEnvironment variables should be provided to customize the agent behavior.\nBuild an image from the dockerfile Provide the following arguments to build your own image from the dockerfile.\nBASE_PYTHON_IMAGE # the Python base image to build upon SW_PYTHON_AGENT_VERSION # agent version to be pulled from PyPI SW_PYTHON_AGENT_PROTOCOL # agent protocol - grpc/ http/ kafka ","excerpt":"Apache SkyWalking Python Agent dockerfile and images Docker images are not official ASF releases but …","ref":"/docs/skywalking-python/latest/en/setup/container/","title":"Apache SkyWalking Python Agent dockerfile and images"},{"body":"Apache SkyWalking Python Agent dockerfile and images Docker images are not official ASF releases but provided for convenience. Recommended usage is always to build the source\nThis image hosts the SkyWalking Python agent package on top of official Python base images (full \u0026amp; slim) providing support from Python 3.7 - 3.11.\nHow to use this image The images are hosted at Docker Hub.\nThe images come with protocol variants(gRPC, Kafka, HTTP) and base Python variants(Full, Slim).\nBuild your Python application image on top of this image Start by pulling the skywalking-python image as the base of your application image. Refer to Docker Hub for the list of tags available.\nFROMapache/skywalking-python:1.1.0-grpc-py3.10# ... build your Python applicationYou could start your Python application with CMD. The Python image already sets an entry point ENTRYPOINT [\u0026quot;sw-python\u0026quot;].\nFor example - CMD ['run', '-p', 'gunicorn', 'app.wsgi'] -p is always needed when using with Gunicorn/uWSGI -\u0026gt; This will be translated to sw-python run -p gunicorn app.wsgi\nYou don\u0026rsquo;t need to care about enabling the SkyWalking Python agent manually, it should be adopted and bootstrapped automatically through the sw-python CLI.\nEnvironment variables should be provided to customize the agent behavior.\nBuild an image from the dockerfile Provide the following arguments to build your own image from the dockerfile.\nBASE_PYTHON_IMAGE # the Python base image to build upon SW_PYTHON_AGENT_VERSION # agent version to be pulled from PyPI SW_PYTHON_AGENT_PROTOCOL # agent protocol - grpc/ http/ kafka ","excerpt":"Apache SkyWalking Python Agent dockerfile and images Docker images are not official ASF releases but …","ref":"/docs/skywalking-python/next/en/setup/container/","title":"Apache SkyWalking Python Agent dockerfile and images"},{"body":"Apache SkyWalking Python Agent dockerfile and images Docker images are not official ASF releases but provided for convenience. Recommended usage is always to build the source\nThis image hosts the SkyWalking Python agent package on top of official Python base images (full \u0026amp; slim) providing support from Python 3.7 - 3.11.\nHow to use this image The images are hosted at Docker Hub and available from the skywalking.docker.scarf.sh endpoint.\nskywalking.docker.scarf.sh/apache/skywalking-python\nBuild your Python application image on top of this image Start by pulling the skywalking-python image as the base of your application image. Refer to Docker Hub for the list of tags available.\nFROMapache/skywalking-python:0.7.0-grpc-py3.9# ... build your Python applicationYou could start your Python application with CMD. The Python image already sets an entry point ENTRYPOINT [\u0026quot;sw-python\u0026quot;].\nFor example - CMD ['run', '-p', 'gunicorn', 'app.wsgi'] -p is always needed when using with Gunicorn/uWSGI -\u0026gt; This will be translated to sw-python run -p gunicorn app.wsgi\nYou don\u0026rsquo;t need to care about enabling the SkyWalking Python agent manually, it should be adopted and bootstrapped automatically through the sw-python CLI.\nEnvironment variables should be provided to customize the agent behavior.\nBuild an image from the dockerfile Provide the following arguments to build your own image from the dockerfile.\nBASE_PYTHON_IMAGE # the Python base image to build upon SW_PYTHON_AGENT_VERSION # agent version to be pulled from PyPI SW_PYTHON_AGENT_PROTOCOL # agent protocol - grpc/ http/ kafka ","excerpt":"Apache SkyWalking Python Agent dockerfile and images Docker images are not official ASF releases but …","ref":"/docs/skywalking-python/v1.0.1/en/setup/container/","title":"Apache SkyWalking Python Agent dockerfile and images"},{"body":"Apache SkyWalking Python Image Release Guide This documentation shows the way to build and push the SkyWalking Python images to DockerHub.\nPrerequisites Before building the latest release of images, make sure an official release is pushed to PyPI where the dockerfile will depend on.\nImages This process wil generate a list of images covering most used Python versions and variations(grpc/http/kafka) of the Python agent.\nThe convenience images are published to Docker Hub and available from the skywalking.docker.scarf.sh endpoint.\n skywalking.docker.scarf.sh/apache/skywalking-python (Docker Hub)  How to build Issue the following commands to build relevant docker images for the Python agent. The make command will generate three images(grpc, http, kafka) for each Python version supported.\nAt the root folder -\nexport AGENT_VERSION=\u0026lt;version\u0026gt; make build-image Or at the docker folder -\ncd docker export AGENT_VERSION=\u0026lt;version\u0026gt; make How to publish images After a SkyWalking Apache release for the Python agent and wheels have been pushed to PyPI:\n  Build images from the project root, this step pulls agent wheel from PyPI and installs it:\nexport AGENT_VERSION=\u0026lt;version\u0026gt; make build-image   Verify the images built.\n  Push built images to docker hub repos:\nmake push-image   ","excerpt":"Apache SkyWalking Python Image Release Guide This documentation shows the way to build and push the …","ref":"/docs/skywalking-python/latest/en/contribution/how-to-release-docker/","title":"Apache SkyWalking Python Image Release Guide"},{"body":"Apache SkyWalking Python Image Release Guide The official process generating a list of images covering most used Python versions and variations(grpc/http/kafka) of the Python agent is deployed to our GitHub actions and therefore do not rely on this documentation.\nThis documentation shows the way to build and push the SkyWalking Python images manually.\nHow to build manually Before building the latest release of images, make sure an official release is pushed to PyPI where the dockerfile will depend on.\nImages The process generating a list of images covering most used Python versions and variations(grpc/http/kafka) of the Python agent is deployed to our GitHub actions.\nThe convenience images are published to DockerHub\nHow to build Issue the following commands to build relevant docker images for the Python agent. The make command will generate three images(grpc, http, kafka) for each Python version supported.\nAt the root folder -\nexport AGENT_VERSION=\u0026lt;version\u0026gt; make build-image Or at the docker folder -\ncd docker export AGENT_VERSION=\u0026lt;version\u0026gt; make How to publish images After a SkyWalking Apache release for the Python agent and wheels have been pushed to PyPI:\n  Build images from the project root, this step pulls agent wheel from PyPI and installs it:\nexport AGENT_VERSION=\u0026lt;version\u0026gt; make build-image   Verify the images built.\n  Push built images to docker hub repos:\nmake push-image   ","excerpt":"Apache SkyWalking Python Image Release Guide The official process generating a list of images …","ref":"/docs/skywalking-python/next/en/contribution/how-to-release-docker/","title":"Apache SkyWalking Python Image Release Guide"},{"body":"Apache SkyWalking Python Image Release Guide This documentation shows the way to build and push the SkyWalking Python images to DockerHub.\nPrerequisites Before building the latest release of images, make sure an official release is pushed to PyPI where the dockerfile will depend on.\nImages This process wil generate a list of images covering most used Python versions and variations(grpc/http/kafka) of the Python agent.\nThe convenience images are published to Docker Hub and available from the skywalking.docker.scarf.sh endpoint.\n skywalking.docker.scarf.sh/apache/skywalking-python (Docker Hub)  How to build Issue the following commands to build relevant docker images for the Python agent. The make command will generate three images(grpc, http, kafka) for each Python version supported.\nAt the root folder -\nexport AGENT_VERSION=\u0026lt;version\u0026gt; make build-image Or at the docker folder -\ncd docker export AGENT_VERSION=\u0026lt;version\u0026gt; make How to publish images After a SkyWalking Apache release for the Python agent and wheels have been pushed to PyPI:\n  Build images from the project root, this step pulls agent wheel from PyPI and installs it:\nexport AGENT_VERSION=\u0026lt;version\u0026gt; make build-image   Verify the images built.\n  Push built images to docker hub repos:\nmake push-image   ","excerpt":"Apache SkyWalking Python Image Release Guide This documentation shows the way to build and push the …","ref":"/docs/skywalking-python/v1.0.1/en/contribution/how-to-release-docker/","title":"Apache SkyWalking Python Image Release Guide"},{"body":"Apache SkyWalking Python Release Guide This documentation guides the release manager to release the SkyWalking Python in the Apache Way, and also helps people to check the release for vote.\nPrerequisites  Close (if finished, or move to next milestone otherwise) all issues in the current milestone from skywalking-python and skywalking, create a new milestone if needed. Update CHANGELOG.md and version in pyproject.toml.  Add your GPG public key to Apache SVN   Log in id.apache.org and submit your key fingerprint.\n  Add your GPG public key into SkyWalking GPG KEYS file, you can do this only if you are a PMC member. You can ask a PMC member for help. DO NOT override the existed KEYS file content, only append your key at the end of the file.\n  Build and sign the source code package export VERSION=\u0026lt;the version to release\u0026gt; git clone --recurse-submodules git@github.com:apache/skywalking-python \u0026amp;\u0026amp; cd skywalking-python git tag -a \u0026#34;v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking-Python $VERSION\u0026#34; git push --tags make clean \u0026amp;\u0026amp; make release Upload to Apache SVN svn co https://dist.apache.org/repos/dist/dev/skywalking/python release/skywalking/python mkdir -p release/skywalking/python/\u0026#34;$VERSION\u0026#34; cp skywalking-python/skywalking*.tgz release/skywalking/python/\u0026#34;$VERSION\u0026#34; cp skywalking-python/skywalking*.tgz.asc release/skywalking/python/\u0026#34;$VERSION\u0026#34; cp skywalking-python/skywalking-python*.tgz.sha512 release/skywalking/python/\u0026#34;$VERSION\u0026#34; cd release/skywalking \u0026amp;\u0026amp; svn add python/$VERSION \u0026amp;\u0026amp; svn commit python -m \u0026#34;Draft Apache SkyWalking-Python release $VERSION\u0026#34; Make the internal announcement Send an announcement email to dev@ mailing list, please check all links before sending the email, the same below.\nSubject: [ANNOUNCEMENT] Apache SkyWalking Python $VERSION test build available Content: The test build of Apache SkyWalking Python $VERSION is now available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking-python/blob/v$VERSION/CHANGELOG.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/python/$VERSION * sha512 checksums - sha512xxxxyyyzzz skywalking-python-src-x.x.x.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-python/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * http://pgp.mit.edu:11371/pks/lookup?op=get\u0026amp;search=0x8BD99F552D9F33D7 corresponding to kezhenxu94@apache.org Guide to build the release from source : * https://github.com/apache/skywalking-python/blob/master/CONTRIBUTING.md#compiling-and-building A vote regarding the quality of this test build will be initiated within the next couple of days. Wait at least 48 hours for test responses Any PMC, committer or contributor can test features for releasing, and feedback. Based on that, PMC will decide whether to start a vote or not.\nCall for vote in dev@ mailing list Call for vote in dev@skywalking.apache.org.\nSubject: [VOTE] Release Apache SkyWalking Python version $VERSION Content: Hi the SkyWalking Community: This is a call for vote to release Apache SkyWalking Python version $VERSION. Release notes: * https://github.com/apache/skywalking-python/blob/v$VERSION/CHANGELOG.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/python/$VERSION * sha512 checksums - sha512xxxxyyyzzz skywalking-python-src-x.x.x.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-python/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-python/blob/master/CONTRIBUTING.md#compiling-and-building Voting will start now and will remain open for at least 72 hours, all PMC members are required to give their votes. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Thanks. [1] https://github.com/apache/skywalking/blob/master/docs/en/guides/How-to-release.md#vote-check Vote Check All PMC members and committers should check these before voting +1:\n Features test. All artifacts in staging repository are published with .asc, .md5, and sha files. Source codes and distribution packages (skywalking-python-src-$VERSION.tgz) are in https://dist.apache.org/repos/dist/dev/skywalking/python/$VERSION with .asc, .sha512. LICENSE and NOTICE are in source codes and distribution package. Check shasum -c skywalking-python-src-$VERSION.tgz.sha512. Check gpg --verify skywalking-python-src-$VERSION.tgz.asc skywalking-python-src-$VERSION.tgz. Build distribution from source code package by following this the build guide. Licenses check, make license.  Vote result should follow these:\n  PMC vote is +1 binding, all others is +1 no binding.\n  Within 72 hours, you get at least 3 (+1 binding), and have more +1 than -1. Vote pass.\n  Send the closing vote mail to announce the result. When count the binding and no binding votes, please list the names of voters. An example like this:\n[RESULT][VOTE] Release Apache SkyWalking Python version $VERSION 72+ hours passed, we’ve got ($NUMBER) +1 bindings (and ... +1 non-bindings): (list names) +1 bindings: xxx ... +1 non-bindings: xxx ... Thank you for voting, I’ll continue the release process.   Publish release   Move source codes tar balls and distributions to https://dist.apache.org/repos/dist/release/skywalking/, you can do this only if you are a PMC member.\nsvn mv https://dist.apache.org/repos/dist/dev/skywalking/python/\u0026#34;$VERSION\u0026#34; https://dist.apache.org/repos/dist/release/skywalking/python/\u0026#34;$VERSION\u0026#34;   Refer to the previous PR, update news and links on the website. There are several files need to modify.\n  Update Github release page, follow the previous convention.\n  Send ANNOUNCE email to dev@skywalking.apache.org and announce@apache.org, the sender should use his/her Apache email account.\nSubject: [ANNOUNCEMENT] Apache SkyWalking Python $VERSION Released Content: Hi the SkyWalking Community On behalf of the SkyWalking Team, I’m glad to announce that SkyWalking Python $VERSION is now released. SkyWalking Python: The Python Agent for Apache SkyWalking provides the native tracing/metrics/logging/profiling abilities for Python projects. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. Download Links: http://skywalking.apache.org/downloads/ Release Notes : https://github.com/apache/skywalking-python/blob/v$VERSION/CHANGELOG.md Website: http://skywalking.apache.org/ SkyWalking Python Resources: - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalking.apache.org - Documents: https://github.com/apache/skywalking-python/blob/v$VERSION/README.md The Apache SkyWalking Team   ","excerpt":"Apache SkyWalking Python Release Guide This documentation guides the release manager to release the …","ref":"/docs/skywalking-python/latest/en/contribution/how-to-release/","title":"Apache SkyWalking Python Release Guide"},{"body":"Apache SkyWalking Python Release Guide This documentation guides the release manager to release the SkyWalking Python in the Apache Way, and also helps people to check the release for vote.\nPrerequisites  Close (if finished, or move to next milestone otherwise) all issues in the current milestone from skywalking-python and skywalking, create a new milestone if needed. Update CHANGELOG.md and version in pyproject.toml.  Add your GPG public key to Apache SVN   Log in id.apache.org and submit your key fingerprint.\n  Add your GPG public key into SkyWalking GPG KEYS file, you can do this only if you are a PMC member. You can ask a PMC member for help. DO NOT override the existed KEYS file content, only append your key at the end of the file.\n  Build and sign the source code package export VERSION=\u0026lt;the version to release\u0026gt; git clone --recurse-submodules git@github.com:apache/skywalking-python \u0026amp;\u0026amp; cd skywalking-python git tag -a \u0026#34;v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking-Python $VERSION\u0026#34; git push --tags make clean \u0026amp;\u0026amp; make release Upload to Apache SVN svn co https://dist.apache.org/repos/dist/dev/skywalking/python release/skywalking/python mkdir -p release/skywalking/python/\u0026#34;$VERSION\u0026#34; cp skywalking*.tgz release/skywalking/python/\u0026#34;$VERSION\u0026#34; cp skywalking*.tgz.asc release/skywalking/python/\u0026#34;$VERSION\u0026#34; cp skywalking-python*.tgz.sha512 release/skywalking/python/\u0026#34;$VERSION\u0026#34; cd release/skywalking \u0026amp;\u0026amp; svn add python/$VERSION \u0026amp;\u0026amp; svn commit python -m \u0026#34;Draft Apache SkyWalking-Python release $VERSION\u0026#34; Make the internal announcement First, generate a sha512sum for the source code package generated in last step:\nsha512sum release/skywalking/python/\u0026#34;$VERSION\u0026#34;/skywalking-python-src-\u0026#34;$VERSION\u0026#34;.tgz Send an announcement email to dev@ mailing list, please check all links before sending the email, the same as below.\nSubject: [ANNOUNCEMENT] Apache SkyWalking Python $VERSION test build available Content: The test build of Apache SkyWalking Python $VERSION is now available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking-python/blob/v$VERSION/CHANGELOG.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/python/$VERSION * sha512 checksums - sha512xxxxyyyzzz skywalking-python-src-x.x.x.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-python/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * http://pgp.mit.edu:11371/pks/lookup?op=get\u0026amp;search=0x8BD99F552D9F33D7 corresponding to kezhenxu94@apache.org Guide to build the release from source : * https://github.com/apache/skywalking-python/blob/master/CONTRIBUTING.md#compiling-and-building A vote regarding the quality of this test build will be initiated within the next couple of days. Wait at least 48 hours for test responses Any PMC, committer or contributor can test features for releasing, and feedback. Based on that, PMC will decide whether to start a vote or not.\nCall for vote in dev@ mailing list Call for vote in dev@skywalking.apache.org.\nSubject: [VOTE] Release Apache SkyWalking Python version $VERSION Content: Hi the SkyWalking Community: This is a call for vote to release Apache SkyWalking Python version $VERSION. Release notes: * https://github.com/apache/skywalking-python/blob/v$VERSION/CHANGELOG.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/python/$VERSION * sha512 checksums - sha512xxxxyyyzzz skywalking-python-src-x.x.x.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-python/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-python/blob/master/CONTRIBUTING.md#compiling-and-building Voting will start now and will remain open for at least 72 hours, all PMC members are required to give their votes. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Thanks. [1] https://github.com/apache/skywalking/blob/master/docs/en/guides/How-to-release.md#vote-check Vote Check All PMC members and committers should check these before voting +1:\n Features test. All artifacts in staging repository are published with .asc, .md5, and sha files. Source codes and distribution packages (skywalking-python-src-$VERSION.tgz) are in https://dist.apache.org/repos/dist/dev/skywalking/python/$VERSION with .asc, .sha512. LICENSE and NOTICE are in source codes and distribution package. Check shasum -c skywalking-python-src-$VERSION.tgz.sha512. Check gpg --verify skywalking-python-src-$VERSION.tgz.asc skywalking-python-src-$VERSION.tgz. Build distribution from source code package by following this the build guide. Licenses check, make license.  Vote result should follow these:\n  PMC vote is +1 binding, all others is +1 no binding.\n  Within 72 hours, you get at least 3 (+1 binding), and have more +1 than -1. Vote pass.\n  Send the closing vote mail to announce the result. When count the binding and no binding votes, please list the names of voters. An example like this:\n[RESULT][VOTE] Release Apache SkyWalking Python version $VERSION 72+ hours passed, we’ve got ($NUMBER) +1 bindings (and ... +1 non-bindings): (list names) +1 bindings: xxx ... +1 non-bindings: xxx ... Thank you for voting, I’ll continue the release process.   Publish release   Move source codes tar balls and distributions to https://dist.apache.org/repos/dist/release/skywalking/, you can do this only if you are a PMC member.\nsvn mv https://dist.apache.org/repos/dist/dev/skywalking/python/\u0026#34;$VERSION\u0026#34; https://dist.apache.org/repos/dist/release/skywalking/python/\u0026#34;$VERSION\u0026#34;   Refer to the previous PR, update news and links on the website. There are several files need to modify.\n  Publish PyPI package After the official ASF release, we publish the packaged wheel to the PyPI index.\n Make sure the final upload is correct by using the test PyPI index make upload-test. Upload the final artifacts by running make upload.  Publish Docker images After the release on GitHub, a GitHub Action will be triggered to build Docker images based on the latest code.\nImportant We announce the new release by drafting one on Github release page, following the previous convention.\nAn automation via GitHub Actions will automatically trigger upon the mentioned release event to build and upload Docker images to DockerHub.\nSee How-to-release-docker for a detailed description of manual release.\n Send ANNOUNCEMENT email to dev@skywalking.apache.org and announce@apache.org, the sender should use his/her Apache email account.\nSubject: [ANNOUNCEMENT] Apache SkyWalking Python $VERSION Released Content: Hi the SkyWalking Community On behalf of the SkyWalking Team, I’m glad to announce that SkyWalking Python $VERSION is now released. SkyWalking Python: The Python Agent for Apache SkyWalking provides the native tracing/metrics/logging/profiling abilities for Python projects. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. Download Links: http://skywalking.apache.org/downloads/ Release Notes : https://github.com/apache/skywalking-python/blob/v$VERSION/CHANGELOG.md Website: http://skywalking.apache.org/ SkyWalking Python Resources: - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalking.apache.org - Documents: https://github.com/apache/skywalking-python/blob/v$VERSION/README.md The Apache SkyWalking Team   ","excerpt":"Apache SkyWalking Python Release Guide This documentation guides the release manager to release the …","ref":"/docs/skywalking-python/next/en/contribution/how-to-release/","title":"Apache SkyWalking Python Release Guide"},{"body":"Apache SkyWalking Python Release Guide This documentation guides the release manager to release the SkyWalking Python in the Apache Way, and also helps people to check the release for vote.\nPrerequisites  Close (if finished, or move to next milestone otherwise) all issues in the current milestone from skywalking-python and skywalking, create a new milestone if needed. Update CHANGELOG.md and version in pyproject.toml.  Add your GPG public key to Apache SVN   Log in id.apache.org and submit your key fingerprint.\n  Add your GPG public key into SkyWalking GPG KEYS file, you can do this only if you are a PMC member. You can ask a PMC member for help. DO NOT override the existed KEYS file content, only append your key at the end of the file.\n  Build and sign the source code package export VERSION=\u0026lt;the version to release\u0026gt; git clone --recurse-submodules git@github.com:apache/skywalking-python \u0026amp;\u0026amp; cd skywalking-python git tag -a \u0026#34;v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking-Python $VERSION\u0026#34; git push --tags make clean \u0026amp;\u0026amp; make release Upload to Apache SVN svn co https://dist.apache.org/repos/dist/dev/skywalking/python release/skywalking/python mkdir -p release/skywalking/python/\u0026#34;$VERSION\u0026#34; cp skywalking-python/skywalking*.tgz release/skywalking/python/\u0026#34;$VERSION\u0026#34; cp skywalking-python/skywalking*.tgz.asc release/skywalking/python/\u0026#34;$VERSION\u0026#34; cp skywalking-python/skywalking-python*.tgz.sha512 release/skywalking/python/\u0026#34;$VERSION\u0026#34; cd release/skywalking \u0026amp;\u0026amp; svn add python/$VERSION \u0026amp;\u0026amp; svn commit python -m \u0026#34;Draft Apache SkyWalking-Python release $VERSION\u0026#34; Make the internal announcement Send an announcement email to dev@ mailing list, please check all links before sending the email, the same below.\nSubject: [ANNOUNCEMENT] Apache SkyWalking Python $VERSION test build available Content: The test build of Apache SkyWalking Python $VERSION is now available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking-python/blob/v$VERSION/CHANGELOG.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/python/$VERSION * sha512 checksums - sha512xxxxyyyzzz skywalking-python-src-x.x.x.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-python/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * http://pgp.mit.edu:11371/pks/lookup?op=get\u0026amp;search=0x8BD99F552D9F33D7 corresponding to kezhenxu94@apache.org Guide to build the release from source : * https://github.com/apache/skywalking-python/blob/master/CONTRIBUTING.md#compiling-and-building A vote regarding the quality of this test build will be initiated within the next couple of days. Wait at least 48 hours for test responses Any PMC, committer or contributor can test features for releasing, and feedback. Based on that, PMC will decide whether to start a vote or not.\nCall for vote in dev@ mailing list Call for vote in dev@skywalking.apache.org.\nSubject: [VOTE] Release Apache SkyWalking Python version $VERSION Content: Hi the SkyWalking Community: This is a call for vote to release Apache SkyWalking Python version $VERSION. Release notes: * https://github.com/apache/skywalking-python/blob/v$VERSION/CHANGELOG.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/python/$VERSION * sha512 checksums - sha512xxxxyyyzzz skywalking-python-src-x.x.x.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-python/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-python/blob/master/CONTRIBUTING.md#compiling-and-building Voting will start now and will remain open for at least 72 hours, all PMC members are required to give their votes. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Thanks. [1] https://github.com/apache/skywalking/blob/master/docs/en/guides/How-to-release.md#vote-check Vote Check All PMC members and committers should check these before voting +1:\n Features test. All artifacts in staging repository are published with .asc, .md5, and sha files. Source codes and distribution packages (skywalking-python-src-$VERSION.tgz) are in https://dist.apache.org/repos/dist/dev/skywalking/python/$VERSION with .asc, .sha512. LICENSE and NOTICE are in source codes and distribution package. Check shasum -c skywalking-python-src-$VERSION.tgz.sha512. Check gpg --verify skywalking-python-src-$VERSION.tgz.asc skywalking-python-src-$VERSION.tgz. Build distribution from source code package by following this the build guide. Licenses check, make license.  Vote result should follow these:\n  PMC vote is +1 binding, all others is +1 no binding.\n  Within 72 hours, you get at least 3 (+1 binding), and have more +1 than -1. Vote pass.\n  Send the closing vote mail to announce the result. When count the binding and no binding votes, please list the names of voters. An example like this:\n[RESULT][VOTE] Release Apache SkyWalking Python version $VERSION 72+ hours passed, we’ve got ($NUMBER) +1 bindings (and ... +1 non-bindings): (list names) +1 bindings: xxx ... +1 non-bindings: xxx ... Thank you for voting, I’ll continue the release process.   Publish release   Move source codes tar balls and distributions to https://dist.apache.org/repos/dist/release/skywalking/, you can do this only if you are a PMC member.\nsvn mv https://dist.apache.org/repos/dist/dev/skywalking/python/\u0026#34;$VERSION\u0026#34; https://dist.apache.org/repos/dist/release/skywalking/python/\u0026#34;$VERSION\u0026#34;   Refer to the previous PR, update news and links on the website. There are several files need to modify.\n  Update Github release page, follow the previous convention.\n  Send ANNOUNCE email to dev@skywalking.apache.org and announce@apache.org, the sender should use his/her Apache email account.\nSubject: [ANNOUNCEMENT] Apache SkyWalking Python $VERSION Released Content: Hi the SkyWalking Community On behalf of the SkyWalking Team, I’m glad to announce that SkyWalking Python $VERSION is now released. SkyWalking Python: The Python Agent for Apache SkyWalking provides the native tracing/metrics/logging/profiling abilities for Python projects. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. Download Links: http://skywalking.apache.org/downloads/ Release Notes : https://github.com/apache/skywalking-python/blob/v$VERSION/CHANGELOG.md Website: http://skywalking.apache.org/ SkyWalking Python Resources: - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalking.apache.org - Documents: https://github.com/apache/skywalking-python/blob/v$VERSION/README.md The Apache SkyWalking Team   ","excerpt":"Apache SkyWalking Python Release Guide This documentation guides the release manager to release the …","ref":"/docs/skywalking-python/v1.0.1/en/contribution/how-to-release/","title":"Apache SkyWalking Python Release Guide"},{"body":"Apache SkyWalking Rover Release Guide This documentation guides the release manager to release the SkyWalking Rover in the Apache Way, and also helps people to check the release for vote.\nPrerequisites  Close(if finished, or move to next milestone otherwise) all issues in the current milestone from skywalking-rover and skywalking, create a new milestone if needed. Update CHANGES.md. Check the dependency licenses including all dependencies.  Add your GPG public key to Apache svn   Upload your GPG public key to a public GPG site, such as MIT\u0026rsquo;s site.\n  Log in id.apache.org and submit your key fingerprint.\n  Add your GPG public key into SkyWalking GPG KEYS file, you can do this only if you are a PMC member. You can ask a PMC member for help. DO NOT override the existed KEYS file content, only append your key at the end of the file.\n  Build and sign the source code package export VERSION=\u0026lt;the version to release\u0026gt; git clone git@github.com:apache/skywalking-rover \u0026amp;\u0026amp; cd skywalking-rover git tag -a \u0026#34;v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking-Rover v$VERSION\u0026#34; git push --tags make release In total, six files should be automatically generated in the directory: apache-skywalking-rover-${VERSION}-bin.tgz, apache-skywalking-rover-${VERSION}-src.tgz, and their corresponding asc, sha512 files.\nUpload to Apache svn svn co https://dist.apache.org/repos/dist/dev/skywalking/ mkdir -p skywalking/rover/\u0026#34;$VERSION\u0026#34; cp skywalking-rover/apache-skywalking*.tgz skywalking/rover/\u0026#34;$VERSION\u0026#34; cp skywalking-rover/apache-skywalking*.tgz.asc skywalking/rover/\u0026#34;$VERSION\u0026#34; cp skywalking-rover/apache-skywalking-rover*.tgz.sha512 skywalking/rover/\u0026#34;$VERSION\u0026#34; cd skywalking/rover \u0026amp;\u0026amp; svn add \u0026#34;$VERSION\u0026#34; \u0026amp;\u0026amp; svn commit -m \u0026#34;Draft Apache SkyWalking-Rover release $VERSION\u0026#34; Call for vote in dev@ mailing list Call for vote in dev@skywalking.apache.org, please check all links before sending the email.\nSubject: [VOTE] Release Apache SkyWalking Rover version $VERSION Content: Hi the SkyWalking Community: This is a call for vote to release Apache SkyWalking Rover version $VERSION. Release notes: * https://github.com/apache/skywalking-rover/blob/v$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/rover/$VERSION * sha512 checksums - sha512xxxxyyyzzz skywalking-rover-x.x.x-src.tgz - sha512xxxxyyyzzz skywalking-rover-x.x.x-bin.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-rover/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-rover/blob/v$VERSION/docs/en/guides/contribution/how-to-release.md Voting will start now and will remain open for at least 72 hours, all PMC members are required to give their votes. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Thanks. [1] https://github.com/apache/skywalking/blob/master/docs/en/guides/How-to-release.md#vote-check Vote Check All PMC members and committers should check these before voting +1:\n Features test. All artifacts in staging repository are published with .asc, .md5, and sha files. Source codes and distribution packages (skywalking-rover-$VERSION-{src,bin}.tgz) are in https://dist.apache.org/repos/dist/dev/skywalking/rover/$VERSION with .asc, .sha512. LICENSE and NOTICE are in source codes and distribution package. Check shasum -c skywalking-rover-$VERSION-{src,bin}.tgz.sha512. Check gpg --verify skywalking-rover-$VERSION-{src,bin}.tgz.asc skywalking-rover-$VERSION-{src,bin}.tgz. Build distribution from source code package by following this command, make container-generate build.  Vote result should follow these:\n  PMC vote is +1 binding, all others is +1 no binding.\n  Within 72 hours, you get at least 3 (+1 binding), and have more +1 than -1. Vote pass.\n  Send the closing vote mail to announce the result. When count the binding and no binding votes, please list the names of voters. An example like this:\n[RESULT][VOTE] Release Apache SkyWalking Rover version $VERSION 3 days passed, we’ve got ($NUMBER) +1 bindings (and ... +1 non-bindings): (list names) +1 bindings: xxx ... +1 non-bindings: xxx ... Thank you for voting, I’ll continue the release process.   Publish release   Move source codes tar balls and distributions to https://dist.apache.org/repos/dist/release/skywalking/, you can do this only if you are a PMC member.\nexport SVN_EDITOR=vim svn mv https://dist.apache.org/repos/dist/dev/skywalking/rover/$VERSION https://dist.apache.org/repos/dist/release/skywalking/rover   Refer to the previous PR, update the event and download links on the website.\n  Update Github release page, follow the previous convention.\n  Push docker image to the Docker Hub, make sure you have the write permission for push image.\nmake docker \u0026amp;\u0026amp; make docker.push   Send ANNOUNCE email to dev@skywalking.apache.org and announce@apache.org, the sender should use his/her Apache email account, please check all links before sending the email.\nSubject: [ANNOUNCEMENT] Apache SkyWalking Rover $VERSION Released Content: Hi the SkyWalking Community On behalf of the SkyWalking Team, I’m glad to announce that SkyWalking Rover $VERSION is now released. SkyWalking Rover: A lightweight collector/sidecar could be deployed closing to the target monitored system, to collect metrics, traces, and logs. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. Download Links: http://skywalking.apache.org/downloads/ Release Notes : https://github.com/apache/skywalking-rover/blob/v$VERSION/CHANGES.md Website: http://skywalking.apache.org/ SkyWalking Rover Resources: - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalking.apache.org - Documents: https://github.com/apache/skywalking-rover/blob/v$VERSION/README.md The Apache SkyWalking Team   Remove Unnecessary Releases Please remember to remove all unnecessary releases in the mirror svn (https://dist.apache.org/repos/dist/release/skywalking/), if you don\u0026rsquo;t recommend users to choose those version. For example, you have removed the download and documentation links from the website. If they want old ones, the Archive repository has all of them.\n","excerpt":"Apache SkyWalking Rover Release Guide This documentation guides the release manager to release the …","ref":"/docs/skywalking-rover/latest/en/guides/contribution/how-to-release/","title":"Apache SkyWalking Rover Release Guide"},{"body":"Apache SkyWalking Rover Release Guide This documentation guides the release manager to release the SkyWalking Rover in the Apache Way, and also helps people to check the release for vote.\nPrerequisites  Close(if finished, or move to next milestone otherwise) all issues in the current milestone from skywalking-rover and skywalking, create a new milestone if needed. Update CHANGES.md. Check the dependency licenses including all dependencies.  Add your GPG public key to Apache svn   Upload your GPG public key to a public GPG site, such as MIT\u0026rsquo;s site.\n  Log in id.apache.org and submit your key fingerprint.\n  Add your GPG public key into SkyWalking GPG KEYS file, you can do this only if you are a PMC member. You can ask a PMC member for help. DO NOT override the existed KEYS file content, only append your key at the end of the file.\n  Build and sign the source code package export VERSION=\u0026lt;the version to release\u0026gt; git clone git@github.com:apache/skywalking-rover \u0026amp;\u0026amp; cd skywalking-rover git tag -a \u0026#34;v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking-Rover v$VERSION\u0026#34; git push --tags make release In total, six files should be automatically generated in the directory: apache-skywalking-rover-${VERSION}-bin.tgz, apache-skywalking-rover-${VERSION}-src.tgz, and their corresponding asc, sha512 files.\nUpload to Apache svn svn co https://dist.apache.org/repos/dist/dev/skywalking/ mkdir -p skywalking/rover/\u0026#34;$VERSION\u0026#34; cp skywalking-rover/apache-skywalking*.tgz skywalking/rover/\u0026#34;$VERSION\u0026#34; cp skywalking-rover/apache-skywalking*.tgz.asc skywalking/rover/\u0026#34;$VERSION\u0026#34; cp skywalking-rover/apache-skywalking-rover*.tgz.sha512 skywalking/rover/\u0026#34;$VERSION\u0026#34; cd skywalking/rover \u0026amp;\u0026amp; svn add \u0026#34;$VERSION\u0026#34; \u0026amp;\u0026amp; svn commit -m \u0026#34;Draft Apache SkyWalking-Rover release $VERSION\u0026#34; Call for vote in dev@ mailing list Call for vote in dev@skywalking.apache.org, please check all links before sending the email.\nSubject: [VOTE] Release Apache SkyWalking Rover version $VERSION Content: Hi the SkyWalking Community: This is a call for vote to release Apache SkyWalking Rover version $VERSION. Release notes: * https://github.com/apache/skywalking-rover/blob/v$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/rover/$VERSION * sha512 checksums - sha512xxxxyyyzzz skywalking-rover-x.x.x-src.tgz - sha512xxxxyyyzzz skywalking-rover-x.x.x-bin.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-rover/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-rover/blob/v$VERSION/docs/en/guides/contribution/how-to-release.md Voting will start now and will remain open for at least 72 hours, all PMC members are required to give their votes. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Thanks. [1] https://github.com/apache/skywalking/blob/master/docs/en/guides/How-to-release.md#vote-check Vote Check All PMC members and committers should check these before voting +1:\n Features test. All artifacts in staging repository are published with .asc, .md5, and sha files. Source codes and distribution packages (skywalking-rover-$VERSION-{src,bin}.tgz) are in https://dist.apache.org/repos/dist/dev/skywalking/rover/$VERSION with .asc, .sha512. LICENSE and NOTICE are in source codes and distribution package. Check shasum -c skywalking-rover-$VERSION-{src,bin}.tgz.sha512. Check gpg --verify skywalking-rover-$VERSION-{src,bin}.tgz.asc skywalking-rover-$VERSION-{src,bin}.tgz. Build distribution from source code package by following this command, make container-generate build.  Vote result should follow these:\n  PMC vote is +1 binding, all others is +1 no binding.\n  Within 72 hours, you get at least 3 (+1 binding), and have more +1 than -1. Vote pass.\n  Send the closing vote mail to announce the result. When count the binding and no binding votes, please list the names of voters. An example like this:\n[RESULT][VOTE] Release Apache SkyWalking Rover version $VERSION 3 days passed, we’ve got ($NUMBER) +1 bindings (and ... +1 non-bindings): (list names) +1 bindings: xxx ... +1 non-bindings: xxx ... Thank you for voting, I’ll continue the release process.   Publish release   Move source codes tar balls and distributions to https://dist.apache.org/repos/dist/release/skywalking/, you can do this only if you are a PMC member.\nexport SVN_EDITOR=vim svn mv https://dist.apache.org/repos/dist/dev/skywalking/rover/$VERSION https://dist.apache.org/repos/dist/release/skywalking/rover   Refer to the previous PR, update the event and download links on the website.\n  Update Github release page, follow the previous convention.\n  Push docker image to the Docker Hub, make sure you have the write permission for push image.\nmake docker \u0026amp;\u0026amp; make docker.push   Send ANNOUNCE email to dev@skywalking.apache.org and announce@apache.org, the sender should use his/her Apache email account, please check all links before sending the email.\nSubject: [ANNOUNCEMENT] Apache SkyWalking Rover $VERSION Released Content: Hi the SkyWalking Community On behalf of the SkyWalking Team, I’m glad to announce that SkyWalking Rover $VERSION is now released. SkyWalking Rover: A lightweight collector/sidecar could be deployed closing to the target monitored system, to collect metrics, traces, and logs. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. Download Links: http://skywalking.apache.org/downloads/ Release Notes : https://github.com/apache/skywalking-rover/blob/v$VERSION/CHANGES.md Website: http://skywalking.apache.org/ SkyWalking Rover Resources: - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalking.apache.org - Documents: https://github.com/apache/skywalking-rover/blob/v$VERSION/README.md The Apache SkyWalking Team   Remove Unnecessary Releases Please remember to remove all unnecessary releases in the mirror svn (https://dist.apache.org/repos/dist/release/skywalking/), if you don\u0026rsquo;t recommend users to choose those version. For example, you have removed the download and documentation links from the website. If they want old ones, the Archive repository has all of them.\n","excerpt":"Apache SkyWalking Rover Release Guide This documentation guides the release manager to release the …","ref":"/docs/skywalking-rover/next/en/guides/contribution/how-to-release/","title":"Apache SkyWalking Rover Release Guide"},{"body":"Apache SkyWalking Rover Release Guide This documentation guides the release manager to release the SkyWalking Rover in the Apache Way, and also helps people to check the release for vote.\nPrerequisites  Close(if finished, or move to next milestone otherwise) all issues in the current milestone from skywalking-rover and skywalking, create a new milestone if needed. Update CHANGES.md. Check the dependency licenses including all dependencies.  Add your GPG public key to Apache svn   Upload your GPG public key to a public GPG site, such as MIT\u0026rsquo;s site.\n  Log in id.apache.org and submit your key fingerprint.\n  Add your GPG public key into SkyWalking GPG KEYS file, you can do this only if you are a PMC member. You can ask a PMC member for help. DO NOT override the existed KEYS file content, only append your key at the end of the file.\n  Build and sign the source code package export VERSION=\u0026lt;the version to release\u0026gt; git clone git@github.com:apache/skywalking-rover \u0026amp;\u0026amp; cd skywalking-rover git tag -a \u0026#34;v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking-Rover v$VERSION\u0026#34; git push --tags make release In total, six files should be automatically generated in the directory: apache-skywalking-rover-${VERSION}-bin.tgz, apache-skywalking-rover-${VERSION}-src.tgz, and their corresponding asc, sha512 files.\nUpload to Apache svn svn co https://dist.apache.org/repos/dist/dev/skywalking/ mkdir -p skywalking/rover/\u0026#34;$VERSION\u0026#34; cp skywalking-rover/apache-skywalking*.tgz skywalking/rover/\u0026#34;$VERSION\u0026#34; cp skywalking-rover/apache-skywalking*.tgz.asc skywalking/rover/\u0026#34;$VERSION\u0026#34; cp skywalking-rover/apache-skywalking-rover*.tgz.sha512 skywalking/rover/\u0026#34;$VERSION\u0026#34; cd skywalking/rover \u0026amp;\u0026amp; svn add \u0026#34;$VERSION\u0026#34; \u0026amp;\u0026amp; svn commit -m \u0026#34;Draft Apache SkyWalking-Rover release $VERSION\u0026#34; Call for vote in dev@ mailing list Call for vote in dev@skywalking.apache.org, please check all links before sending the email.\nSubject: [VOTE] Release Apache SkyWalking Rover version $VERSION Content: Hi the SkyWalking Community: This is a call for vote to release Apache SkyWalking Rover version $VERSION. Release notes: * https://github.com/apache/skywalking-rover/blob/v$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/rover/$VERSION * sha512 checksums - sha512xxxxyyyzzz skywalking-rover-x.x.x-src.tgz - sha512xxxxyyyzzz skywalking-rover-x.x.x-bin.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-rover/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-rover/blob/v$VERSION/docs/en/guides/contribution/how-to-release.md Voting will start now and will remain open for at least 72 hours, all PMC members are required to give their votes. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Thanks. [1] https://github.com/apache/skywalking/blob/master/docs/en/guides/How-to-release.md#vote-check Vote Check All PMC members and committers should check these before voting +1:\n Features test. All artifacts in staging repository are published with .asc, .md5, and sha files. Source codes and distribution packages (skywalking-rover-$VERSION-{src,bin}.tgz) are in https://dist.apache.org/repos/dist/dev/skywalking/rover/$VERSION with .asc, .sha512. LICENSE and NOTICE are in source codes and distribution package. Check shasum -c skywalking-rover-$VERSION-{src,bin}.tgz.sha512. Check gpg --verify skywalking-rover-$VERSION-{src,bin}.tgz.asc skywalking-rover-$VERSION-{src,bin}.tgz. Build distribution from source code package by following this command, make container-generate build.  Vote result should follow these:\n  PMC vote is +1 binding, all others is +1 no binding.\n  Within 72 hours, you get at least 3 (+1 binding), and have more +1 than -1. Vote pass.\n  Send the closing vote mail to announce the result. When count the binding and no binding votes, please list the names of voters. An example like this:\n[RESULT][VOTE] Release Apache SkyWalking Rover version $VERSION 3 days passed, we’ve got ($NUMBER) +1 bindings (and ... +1 non-bindings): (list names) +1 bindings: xxx ... +1 non-bindings: xxx ... Thank you for voting, I’ll continue the release process.   Publish release   Move source codes tar balls and distributions to https://dist.apache.org/repos/dist/release/skywalking/, you can do this only if you are a PMC member.\nexport SVN_EDITOR=vim svn mv https://dist.apache.org/repos/dist/dev/skywalking/rover/$VERSION https://dist.apache.org/repos/dist/release/skywalking/rover   Refer to the previous PR, update the event and download links on the website.\n  Update Github release page, follow the previous convention.\n  Push docker image to the Docker Hub, make sure you have the write permission for push image.\nmake docker \u0026amp;\u0026amp; make docker.push   Send ANNOUNCE email to dev@skywalking.apache.org and announce@apache.org, the sender should use his/her Apache email account, please check all links before sending the email.\nSubject: [ANNOUNCEMENT] Apache SkyWalking Rover $VERSION Released Content: Hi the SkyWalking Community On behalf of the SkyWalking Team, I’m glad to announce that SkyWalking Rover $VERSION is now released. SkyWalking Rover: A lightweight collector/sidecar could be deployed closing to the target monitored system, to collect metrics, traces, and logs. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. Download Links: http://skywalking.apache.org/downloads/ Release Notes : https://github.com/apache/skywalking-rover/blob/v$VERSION/CHANGES.md Website: http://skywalking.apache.org/ SkyWalking Rover Resources: - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalking.apache.org - Documents: https://github.com/apache/skywalking-rover/blob/v$VERSION/README.md The Apache SkyWalking Team   Remove Unnecessary Releases Please remember to remove all unnecessary releases in the mirror svn (https://dist.apache.org/repos/dist/release/skywalking/), if you don\u0026rsquo;t recommend users to choose those version. For example, you have removed the download and documentation links from the website. If they want old ones, the Archive repository has all of them.\n","excerpt":"Apache SkyWalking Rover Release Guide This documentation guides the release manager to release the …","ref":"/docs/skywalking-rover/v0.6.0/en/guides/contribution/how-to-release/","title":"Apache SkyWalking Rover Release Guide"},{"body":"Apache SkyWalking Satellite Release Guide This documentation guides the release manager to release the SkyWalking Satellite in the Apache Way, and also helps people to check the release for vote.\nPrerequisites  Close(if finished, or move to next milestone otherwise) all issues in the current milestone from skywalking-satellite and skywalking, create a new milestone if needed. Update CHANGES.md.  Add your GPG public key to Apache svn   Upload your GPG public key to a public GPG site, such as MIT\u0026rsquo;s site.\n  Log in id.apache.org and submit your key fingerprint.\n  Add your GPG public key into SkyWalking GPG KEYS file, you can do this only if you are a PMC member. You can ask a PMC member for help. DO NOT override the existed KEYS file content, only append your key at the end of the file.\n  Build and sign the source code package export VERSION=\u0026lt;the version to release\u0026gt; git clone git@github.com:apache/skywalking-satellite \u0026amp;\u0026amp; cd skywalking-satellite git tag -a \u0026#34;v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking-Satellite v$VERSION\u0026#34; git push --tags make release In total, six files should be automatically generated in the directory: apache-skywalking-satellite-${VERSION}-bin.tgz, apache-skywalking-satellite-${VERSION}-src.tgz, and their corresponding asc, sha512 files.\nUpload to Apache svn svn co https://dist.apache.org/repos/dist/dev/skywalking/ mkdir -p skywalking/satellite/\u0026#34;$VERSION\u0026#34; cp skywalking-satellite/apache-skywalking*.tgz skywalking/satellite/\u0026#34;$VERSION\u0026#34; cp skywalking-satellite/apache-skywalking*.tgz.asc skywalking/satellite/\u0026#34;$VERSION\u0026#34; cp skywalking-satellite/apache-skywalking-satellite*.tgz.sha512 skywalking/satellite/\u0026#34;$VERSION\u0026#34; cd skywalking/satellite \u0026amp;\u0026amp; svn add \u0026#34;$VERSION\u0026#34; \u0026amp;\u0026amp; svn commit -m \u0026#34;Draft Apache SkyWalking-Satellite release $VERSION\u0026#34; Make the internal announcement Send an announcement email to dev@ mailing list, please check all links before sending the email.\nSubject: [ANNOUNCEMENT] SkyWalking Satellite $VERSION test build available Content: The test build of SkyWalking Satellite $VERSION is now available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking-satellite/blob/$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/satellite/$VERSION * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-satellite-bin-x.x.x.tgz - sha512xxxxyyyzzz apache-skywalking-satellite-src-x.x.x.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-satellite/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * http://pgp.mit.edu:11371/pks/lookup?op=get\u0026amp;search=0x8BD99F552D9F33D7 corresponding to kezhenxu94@apache.org Guide to build the release from source : * https://github.com/apache/skywalking-satellite/blob/v$VERSION/docs/en/guides/contribution/How-to-release.md A vote regarding the quality of this test build will be initiated within the next couple of days. Wait at least 48 hours for test responses Any PMC, committer or contributor can test features for releasing, and feedback. Based on that, PMC will decide whether to start a vote or not.\nCall for vote in dev@ mailing list Call for vote in dev@skywalking.apache.org, please check all links before sending the email.\nSubject: [VOTE] Release Apache SkyWalking Satellite version $VERSION Content: Hi the SkyWalking Community: This is a call for vote to release Apache SkyWalking Satellite version $VERSION. Release notes: * https://github.com/apache/skywalking-satellite/blob/$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/satellite/$VERSION * sha512 checksums - sha512xxxxyyyzzz skywalking-satellite-x.x.x-src.tgz - sha512xxxxyyyzzz skywalking-satellite-x.x.x-bin.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-satellite/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-satellite/blob/$VERSION/docs/en/guides/contribuation/How-to-release.md Voting will start now and will remain open for at least 72 hours, all PMC members are required to give their votes. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Thanks. [1] https://github.com/apache/skywalking/blob/master/docs/en/guides/How-to-release.md#vote-check Vote Check All PMC members and committers should check these before voting +1:\n Features test. All artifacts in staging repository are published with .asc, .md5, and sha files. Source codes and distribution packages (skywalking-satellite-$VERSION-{src,bin}.tgz) are in https://dist.apache.org/repos/dist/dev/skywalking/satellite/$VERSION with .asc, .sha512. LICENSE and NOTICE are in source codes and distribution package. Check shasum -c skywalking-satellite-$VERSION-{src,bin}.tgz.sha512. Check gpg --verify skywalking-satellite-$VERSION-{src,bin}.tgz.asc skywalking-satellite-$VERSION-{src,bin}.tgz. Build distribution from source code package by following this command, make build. Licenses check, make license.  Vote result should follow these:\n  PMC vote is +1 binding, all others is +1 no binding.\n  Within 72 hours, you get at least 3 (+1 binding), and have more +1 than -1. Vote pass.\n  Send the closing vote mail to announce the result. When count the binding and no binding votes, please list the names of voters. An example like this:\n[RESULT][VOTE] Release Apache SkyWalking Satellite version $VERSION 3 days passed, we’ve got ($NUMBER) +1 bindings (and ... +1 non-bindings): (list names) +1 bindings: xxx ... +1 non-bindings: xxx ... Thank you for voting, I’ll continue the release process.   Publish release   Move source codes tar balls and distributions to https://dist.apache.org/repos/dist/release/skywalking/, you can do this only if you are a PMC member.\nexport SVN_EDITOR=vim svn mv https://dist.apache.org/repos/dist/dev/skywalking/satellite/$VERSION https://dist.apache.org/repos/dist/release/skywalking/satellite   Refer to the previous PR, update the event and download links on the website.\n  Update Github release page, follow the previous convention.\n  Push docker image to the Docker Hub, make sure you have the write permission for push image.\nmake docker \u0026amp;\u0026amp; make docker.push   Send ANNOUNCE email to dev@skywalking.apache.org and announce@apache.org, the sender should use his/her Apache email account, please check all links before sending the email.\nSubject: [ANNOUNCEMENT] Apache SkyWalking Satellite $VERSION Released Content: Hi the SkyWalking Community On behalf of the SkyWalking Team, I’m glad to announce that SkyWalking Satellite $VERSION is now released. SkyWalking Satellite: A lightweight collector/sidecar could be deployed closing to the target monitored system, to collect metrics, traces, and logs. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. Download Links: http://skywalking.apache.org/downloads/ Release Notes : https://github.com/apache/skywalking-satellite/blob/$VERSION/CHANGES.md Website: http://skywalking.apache.org/ SkyWalking Satellite Resources: - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalking.apache.org - Documents: https://github.com/apache/skywalking-satellite/blob/$VERSION/README.md The Apache SkyWalking Team   Remove Unnecessary Releases Please remember to remove all unnecessary releases in the mirror svn (https://dist.apache.org/repos/dist/release/skywalking/), if you don\u0026rsquo;t recommend users to choose those version. For example, you have removed the download and documentation links from the website. If they want old ones, the Archive repository has all of them.\n","excerpt":"Apache SkyWalking Satellite Release Guide This documentation guides the release manager to release …","ref":"/docs/skywalking-satellite/latest/en/guides/contribution/how-to-release/","title":"Apache SkyWalking Satellite Release Guide"},{"body":"Apache SkyWalking Satellite Release Guide This documentation guides the release manager to release the SkyWalking Satellite in the Apache Way, and also helps people to check the release for vote.\nPrerequisites  Close(if finished, or move to next milestone otherwise) all issues in the current milestone from skywalking-satellite and skywalking, create a new milestone if needed. Update CHANGES.md.  Add your GPG public key to Apache svn   Upload your GPG public key to a public GPG site, such as MIT\u0026rsquo;s site.\n  Log in id.apache.org and submit your key fingerprint.\n  Add your GPG public key into SkyWalking GPG KEYS file, you can do this only if you are a PMC member. You can ask a PMC member for help. DO NOT override the existed KEYS file content, only append your key at the end of the file.\n  Build and sign the source code package export VERSION=\u0026lt;the version to release\u0026gt; git clone git@github.com:apache/skywalking-satellite \u0026amp;\u0026amp; cd skywalking-satellite git tag -a \u0026#34;v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking-Satellite v$VERSION\u0026#34; git push --tags make release In total, six files should be automatically generated in the directory: apache-skywalking-satellite-${VERSION}-bin.tgz, apache-skywalking-satellite-${VERSION}-src.tgz, and their corresponding asc, sha512 files.\nUpload to Apache svn svn co https://dist.apache.org/repos/dist/dev/skywalking/ mkdir -p skywalking/satellite/\u0026#34;$VERSION\u0026#34; cp skywalking-satellite/apache-skywalking*.tgz skywalking/satellite/\u0026#34;$VERSION\u0026#34; cp skywalking-satellite/apache-skywalking*.tgz.asc skywalking/satellite/\u0026#34;$VERSION\u0026#34; cp skywalking-satellite/apache-skywalking-satellite*.tgz.sha512 skywalking/satellite/\u0026#34;$VERSION\u0026#34; cd skywalking/satellite \u0026amp;\u0026amp; svn add \u0026#34;$VERSION\u0026#34; \u0026amp;\u0026amp; svn commit -m \u0026#34;Draft Apache SkyWalking-Satellite release $VERSION\u0026#34; Make the internal announcement Send an announcement email to dev@ mailing list, please check all links before sending the email.\nSubject: [ANNOUNCEMENT] SkyWalking Satellite $VERSION test build available Content: The test build of SkyWalking Satellite $VERSION is now available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking-satellite/blob/$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/satellite/$VERSION * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-satellite-bin-x.x.x.tgz - sha512xxxxyyyzzz apache-skywalking-satellite-src-x.x.x.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-satellite/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * http://pgp.mit.edu:11371/pks/lookup?op=get\u0026amp;search=0x8BD99F552D9F33D7 corresponding to kezhenxu94@apache.org Guide to build the release from source : * https://github.com/apache/skywalking-satellite/blob/v$VERSION/docs/en/guides/contribution/How-to-release.md A vote regarding the quality of this test build will be initiated within the next couple of days. Wait at least 48 hours for test responses Any PMC, committer or contributor can test features for releasing, and feedback. Based on that, PMC will decide whether to start a vote or not.\nCall for vote in dev@ mailing list Call for vote in dev@skywalking.apache.org, please check all links before sending the email.\nSubject: [VOTE] Release Apache SkyWalking Satellite version $VERSION Content: Hi the SkyWalking Community: This is a call for vote to release Apache SkyWalking Satellite version $VERSION. Release notes: * https://github.com/apache/skywalking-satellite/blob/$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/satellite/$VERSION * sha512 checksums - sha512xxxxyyyzzz skywalking-satellite-x.x.x-src.tgz - sha512xxxxyyyzzz skywalking-satellite-x.x.x-bin.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-satellite/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-satellite/blob/$VERSION/docs/en/guides/contribuation/How-to-release.md Voting will start now and will remain open for at least 72 hours, all PMC members are required to give their votes. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Thanks. [1] https://github.com/apache/skywalking/blob/master/docs/en/guides/How-to-release.md#vote-check Vote Check All PMC members and committers should check these before voting +1:\n Features test. All artifacts in staging repository are published with .asc, .md5, and sha files. Source codes and distribution packages (skywalking-satellite-$VERSION-{src,bin}.tgz) are in https://dist.apache.org/repos/dist/dev/skywalking/satellite/$VERSION with .asc, .sha512. LICENSE and NOTICE are in source codes and distribution package. Check shasum -c skywalking-satellite-$VERSION-{src,bin}.tgz.sha512. Check gpg --verify skywalking-satellite-$VERSION-{src,bin}.tgz.asc skywalking-satellite-$VERSION-{src,bin}.tgz. Build distribution from source code package by following this command, make build. Licenses check, make license.  Vote result should follow these:\n  PMC vote is +1 binding, all others is +1 no binding.\n  Within 72 hours, you get at least 3 (+1 binding), and have more +1 than -1. Vote pass.\n  Send the closing vote mail to announce the result. When count the binding and no binding votes, please list the names of voters. An example like this:\n[RESULT][VOTE] Release Apache SkyWalking Satellite version $VERSION 3 days passed, we’ve got ($NUMBER) +1 bindings (and ... +1 non-bindings): (list names) +1 bindings: xxx ... +1 non-bindings: xxx ... Thank you for voting, I’ll continue the release process.   Publish release   Move source codes tar balls and distributions to https://dist.apache.org/repos/dist/release/skywalking/, you can do this only if you are a PMC member.\nexport SVN_EDITOR=vim svn mv https://dist.apache.org/repos/dist/dev/skywalking/satellite/$VERSION https://dist.apache.org/repos/dist/release/skywalking/satellite   Refer to the previous PR, update the event and download links on the website.\n  Update Github release page, follow the previous convention.\n  Push docker image to the Docker Hub, make sure you have the write permission for push image.\nmake docker \u0026amp;\u0026amp; make docker.push   Send ANNOUNCE email to dev@skywalking.apache.org and announce@apache.org, the sender should use his/her Apache email account, please check all links before sending the email.\nSubject: [ANNOUNCEMENT] Apache SkyWalking Satellite $VERSION Released Content: Hi the SkyWalking Community On behalf of the SkyWalking Team, I’m glad to announce that SkyWalking Satellite $VERSION is now released. SkyWalking Satellite: A lightweight collector/sidecar could be deployed closing to the target monitored system, to collect metrics, traces, and logs. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. Download Links: http://skywalking.apache.org/downloads/ Release Notes : https://github.com/apache/skywalking-satellite/blob/$VERSION/CHANGES.md Website: http://skywalking.apache.org/ SkyWalking Satellite Resources: - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalking.apache.org - Documents: https://github.com/apache/skywalking-satellite/blob/$VERSION/README.md The Apache SkyWalking Team   Remove Unnecessary Releases Please remember to remove all unnecessary releases in the mirror svn (https://dist.apache.org/repos/dist/release/skywalking/), if you don\u0026rsquo;t recommend users to choose those version. For example, you have removed the download and documentation links from the website. If they want old ones, the Archive repository has all of them.\n","excerpt":"Apache SkyWalking Satellite Release Guide This documentation guides the release manager to release …","ref":"/docs/skywalking-satellite/next/en/guides/contribution/how-to-release/","title":"Apache SkyWalking Satellite Release Guide"},{"body":"Apache SkyWalking Satellite Release Guide This documentation guides the release manager to release the SkyWalking Satellite in the Apache Way, and also helps people to check the release for vote.\nPrerequisites  Close(if finished, or move to next milestone otherwise) all issues in the current milestone from skywalking-satellite and skywalking, create a new milestone if needed. Update CHANGES.md.  Add your GPG public key to Apache svn   Upload your GPG public key to a public GPG site, such as MIT\u0026rsquo;s site.\n  Log in id.apache.org and submit your key fingerprint.\n  Add your GPG public key into SkyWalking GPG KEYS file, you can do this only if you are a PMC member. You can ask a PMC member for help. DO NOT override the existed KEYS file content, only append your key at the end of the file.\n  Build and sign the source code package export VERSION=\u0026lt;the version to release\u0026gt; git clone git@github.com:apache/skywalking-satellite \u0026amp;\u0026amp; cd skywalking-satellite git tag -a \u0026#34;v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking-Satellite v$VERSION\u0026#34; git push --tags make release In total, six files should be automatically generated in the directory: apache-skywalking-satellite-${VERSION}-bin.tgz, apache-skywalking-satellite-${VERSION}-src.tgz, and their corresponding asc, sha512 files.\nUpload to Apache svn svn co https://dist.apache.org/repos/dist/dev/skywalking/ mkdir -p skywalking/satellite/\u0026#34;$VERSION\u0026#34; cp skywalking-satellite/apache-skywalking*.tgz skywalking/satellite/\u0026#34;$VERSION\u0026#34; cp skywalking-satellite/apache-skywalking*.tgz.asc skywalking/satellite/\u0026#34;$VERSION\u0026#34; cp skywalking-satellite/apache-skywalking-satellite*.tgz.sha512 skywalking/satellite/\u0026#34;$VERSION\u0026#34; cd skywalking/satellite \u0026amp;\u0026amp; svn add \u0026#34;$VERSION\u0026#34; \u0026amp;\u0026amp; svn commit -m \u0026#34;Draft Apache SkyWalking-Satellite release $VERSION\u0026#34; Make the internal announcement Send an announcement email to dev@ mailing list, please check all links before sending the email.\nSubject: [ANNOUNCEMENT] SkyWalking Satellite $VERSION test build available Content: The test build of SkyWalking Satellite $VERSION is now available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking-satellite/blob/$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/satellite/$VERSION * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-satellite-bin-x.x.x.tgz - sha512xxxxyyyzzz apache-skywalking-satellite-src-x.x.x.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-satellite/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * http://pgp.mit.edu:11371/pks/lookup?op=get\u0026amp;search=0x8BD99F552D9F33D7 corresponding to kezhenxu94@apache.org Guide to build the release from source : * https://github.com/apache/skywalking-satellite/blob/v$VERSION/docs/en/guides/contribution/How-to-release.md A vote regarding the quality of this test build will be initiated within the next couple of days. Wait at least 48 hours for test responses Any PMC, committer or contributor can test features for releasing, and feedback. Based on that, PMC will decide whether to start a vote or not.\nCall for vote in dev@ mailing list Call for vote in dev@skywalking.apache.org, please check all links before sending the email.\nSubject: [VOTE] Release Apache SkyWalking Satellite version $VERSION Content: Hi the SkyWalking Community: This is a call for vote to release Apache SkyWalking Satellite version $VERSION. Release notes: * https://github.com/apache/skywalking-satellite/blob/$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/satellite/$VERSION * sha512 checksums - sha512xxxxyyyzzz skywalking-satellite-x.x.x-src.tgz - sha512xxxxyyyzzz skywalking-satellite-x.x.x-bin.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-satellite/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-satellite/blob/$VERSION/docs/en/guides/contribuation/How-to-release.md Voting will start now and will remain open for at least 72 hours, all PMC members are required to give their votes. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Thanks. [1] https://github.com/apache/skywalking/blob/master/docs/en/guides/How-to-release.md#vote-check Vote Check All PMC members and committers should check these before voting +1:\n Features test. All artifacts in staging repository are published with .asc, .md5, and sha files. Source codes and distribution packages (skywalking-satellite-$VERSION-{src,bin}.tgz) are in https://dist.apache.org/repos/dist/dev/skywalking/satellite/$VERSION with .asc, .sha512. LICENSE and NOTICE are in source codes and distribution package. Check shasum -c skywalking-satellite-$VERSION-{src,bin}.tgz.sha512. Check gpg --verify skywalking-satellite-$VERSION-{src,bin}.tgz.asc skywalking-satellite-$VERSION-{src,bin}.tgz. Build distribution from source code package by following this command, make build. Licenses check, make license.  Vote result should follow these:\n  PMC vote is +1 binding, all others is +1 no binding.\n  Within 72 hours, you get at least 3 (+1 binding), and have more +1 than -1. Vote pass.\n  Send the closing vote mail to announce the result. When count the binding and no binding votes, please list the names of voters. An example like this:\n[RESULT][VOTE] Release Apache SkyWalking Satellite version $VERSION 3 days passed, we’ve got ($NUMBER) +1 bindings (and ... +1 non-bindings): (list names) +1 bindings: xxx ... +1 non-bindings: xxx ... Thank you for voting, I’ll continue the release process.   Publish release   Move source codes tar balls and distributions to https://dist.apache.org/repos/dist/release/skywalking/, you can do this only if you are a PMC member.\nexport SVN_EDITOR=vim svn mv https://dist.apache.org/repos/dist/dev/skywalking/satellite/$VERSION https://dist.apache.org/repos/dist/release/skywalking/satellite   Refer to the previous PR, update the event and download links on the website.\n  Update Github release page, follow the previous convention.\n  Push docker image to the Docker Hub, make sure you have the write permission for push image.\nmake docker \u0026amp;\u0026amp; make docker.push   Send ANNOUNCE email to dev@skywalking.apache.org and announce@apache.org, the sender should use his/her Apache email account, please check all links before sending the email.\nSubject: [ANNOUNCEMENT] Apache SkyWalking Satellite $VERSION Released Content: Hi the SkyWalking Community On behalf of the SkyWalking Team, I’m glad to announce that SkyWalking Satellite $VERSION is now released. SkyWalking Satellite: A lightweight collector/sidecar could be deployed closing to the target monitored system, to collect metrics, traces, and logs. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. Download Links: http://skywalking.apache.org/downloads/ Release Notes : https://github.com/apache/skywalking-satellite/blob/$VERSION/CHANGES.md Website: http://skywalking.apache.org/ SkyWalking Satellite Resources: - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalking.apache.org - Documents: https://github.com/apache/skywalking-satellite/blob/$VERSION/README.md The Apache SkyWalking Team   Remove Unnecessary Releases Please remember to remove all unnecessary releases in the mirror svn (https://dist.apache.org/repos/dist/release/skywalking/), if you don\u0026rsquo;t recommend users to choose those version. For example, you have removed the download and documentation links from the website. If they want old ones, the Archive repository has all of them.\n","excerpt":"Apache SkyWalking Satellite Release Guide This documentation guides the release manager to release …","ref":"/docs/skywalking-satellite/v1.2.0/en/guides/contribution/how-to-release/","title":"Apache SkyWalking Satellite Release Guide"},{"body":"Apdex threshold Apdex is a measure of response time based against a set threshold. It measures the ratio of satisfactory response times to unsatisfactory response times. The response time is measured from an asset request to completed delivery back to the requestor.\nA user defines a response time threshold T. All responses handled in T or less time satisfy the user.\nFor example, if T is 1.2 seconds and a response completes in 0.5 seconds, then the user is satisfied. All responses greater than 1.2 seconds dissatisfy the user. Responses greater than 4.8 seconds frustrate the user.\nThe apdex threshold T can be configured in service-apdex-threshold.yml file or via Dynamic Configuration. The default item will apply to a service that isn\u0026rsquo;t defined in this configuration as the default threshold.\nConfiguration Format The configuration content includes the names and thresholds of the services:\n# default threshold is 500msdefault:500# example:# the threshold of service \u0026#34;tomcat\u0026#34; is 1s# tomcat: 1000# the threshold of service \u0026#34;springboot1\u0026#34; is 50ms# springboot1: 50","excerpt":"Apdex threshold Apdex is a measure of response time based against a set threshold. It measures the …","ref":"/docs/main/latest/en/setup/backend/apdex-threshold/","title":"Apdex threshold"},{"body":"Apdex threshold Apdex is a measure of response time based against a set threshold. It measures the ratio of satisfactory response times to unsatisfactory response times. The response time is measured from an asset request to completed delivery back to the requestor.\nA user defines a response time threshold T. All responses handled in T or less time satisfy the user.\nFor example, if T is 1.2 seconds and a response completes in 0.5 seconds, then the user is satisfied. All responses greater than 1.2 seconds dissatisfy the user. Responses greater than 4.8 seconds frustrate the user.\nThe apdex threshold T can be configured in service-apdex-threshold.yml file or via Dynamic Configuration. The default item will apply to a service that isn\u0026rsquo;t defined in this configuration as the default threshold.\nConfiguration Format The configuration content includes the names and thresholds of the services:\n# default threshold is 500msdefault:500# example:# the threshold of service \u0026#34;tomcat\u0026#34; is 1s# tomcat: 1000# the threshold of service \u0026#34;springboot1\u0026#34; is 50ms# springboot1: 50","excerpt":"Apdex threshold Apdex is a measure of response time based against a set threshold. It measures the …","ref":"/docs/main/next/en/setup/backend/apdex-threshold/","title":"Apdex threshold"},{"body":"Apdex threshold Apdex is a measure of response time based against a set threshold. It measures the ratio of satisfactory response times to unsatisfactory response times. The response time is measured from an asset request to completed delivery back to the requestor.\nA user defines a response time threshold T. All responses handled in T or less time satisfy the user.\nFor example, if T is 1.2 seconds and a response completes in 0.5 seconds, then the user is satisfied. All responses greater than 1.2 seconds dissatisfy the user. Responses greater than 4.8 seconds frustrate the user.\nThe apdex threshold T can be configured in service-apdex-threshold.yml file or via Dynamic Configuration. The default item will apply to a service that isn\u0026rsquo;t defined in this configuration as the default threshold.\nConfiguration Format The configuration content includes the names and thresholds of the services:\n# default threshold is 500msdefault:500# example:# the threshold of service \u0026#34;tomcat\u0026#34; is 1s# tomcat: 1000# the threshold of service \u0026#34;springboot1\u0026#34; is 50ms# springboot1: 50","excerpt":"Apdex threshold Apdex is a measure of response time based against a set threshold. It measures the …","ref":"/docs/main/v9.0.0/en/setup/backend/apdex-threshold/","title":"Apdex threshold"},{"body":"Apdex threshold Apdex is a measure of response time based against a set threshold. It measures the ratio of satisfactory response times to unsatisfactory response times. The response time is measured from an asset request to completed delivery back to the requestor.\nA user defines a response time threshold T. All responses handled in T or less time satisfy the user.\nFor example, if T is 1.2 seconds and a response completes in 0.5 seconds, then the user is satisfied. All responses greater than 1.2 seconds dissatisfy the user. Responses greater than 4.8 seconds frustrate the user.\nThe apdex threshold T can be configured in service-apdex-threshold.yml file or via Dynamic Configuration. The default item will apply to a service that isn\u0026rsquo;t defined in this configuration as the default threshold.\nConfiguration Format The configuration content includes the names and thresholds of the services:\n# default threshold is 500msdefault:500# example:# the threshold of service \u0026#34;tomcat\u0026#34; is 1s# tomcat: 1000# the threshold of service \u0026#34;springboot1\u0026#34; is 50ms# springboot1: 50","excerpt":"Apdex threshold Apdex is a measure of response time based against a set threshold. It measures the …","ref":"/docs/main/v9.1.0/en/setup/backend/apdex-threshold/","title":"Apdex threshold"},{"body":"Apdex threshold Apdex is a measure of response time based against a set threshold. It measures the ratio of satisfactory response times to unsatisfactory response times. The response time is measured from an asset request to completed delivery back to the requestor.\nA user defines a response time threshold T. All responses handled in T or less time satisfy the user.\nFor example, if T is 1.2 seconds and a response completes in 0.5 seconds, then the user is satisfied. All responses greater than 1.2 seconds dissatisfy the user. Responses greater than 4.8 seconds frustrate the user.\nThe apdex threshold T can be configured in service-apdex-threshold.yml file or via Dynamic Configuration. The default item will apply to a service that isn\u0026rsquo;t defined in this configuration as the default threshold.\nConfiguration Format The configuration content includes the names and thresholds of the services:\n# default threshold is 500msdefault:500# example:# the threshold of service \u0026#34;tomcat\u0026#34; is 1s# tomcat: 1000# the threshold of service \u0026#34;springboot1\u0026#34; is 50ms# springboot1: 50","excerpt":"Apdex threshold Apdex is a measure of response time based against a set threshold. It measures the …","ref":"/docs/main/v9.2.0/en/setup/backend/apdex-threshold/","title":"Apdex threshold"},{"body":"Apdex threshold Apdex is a measure of response time based against a set threshold. It measures the ratio of satisfactory response times to unsatisfactory response times. The response time is measured from an asset request to completed delivery back to the requestor.\nA user defines a response time threshold T. All responses handled in T or less time satisfy the user.\nFor example, if T is 1.2 seconds and a response completes in 0.5 seconds, then the user is satisfied. All responses greater than 1.2 seconds dissatisfy the user. Responses greater than 4.8 seconds frustrate the user.\nThe apdex threshold T can be configured in service-apdex-threshold.yml file or via Dynamic Configuration. The default item will apply to a service that isn\u0026rsquo;t defined in this configuration as the default threshold.\nConfiguration Format The configuration content includes the names and thresholds of the services:\n# default threshold is 500msdefault:500# example:# the threshold of service \u0026#34;tomcat\u0026#34; is 1s# tomcat: 1000# the threshold of service \u0026#34;springboot1\u0026#34; is 50ms# springboot1: 50","excerpt":"Apdex threshold Apdex is a measure of response time based against a set threshold. It measures the …","ref":"/docs/main/v9.3.0/en/setup/backend/apdex-threshold/","title":"Apdex threshold"},{"body":"Apdex threshold Apdex is a measure of response time based against a set threshold. It measures the ratio of satisfactory response times to unsatisfactory response times. The response time is measured from an asset request to completed delivery back to the requestor.\nA user defines a response time threshold T. All responses handled in T or less time satisfy the user.\nFor example, if T is 1.2 seconds and a response completes in 0.5 seconds, then the user is satisfied. All responses greater than 1.2 seconds dissatisfy the user. Responses greater than 4.8 seconds frustrate the user.\nThe apdex threshold T can be configured in service-apdex-threshold.yml file or via Dynamic Configuration. The default item will apply to a service that isn\u0026rsquo;t defined in this configuration as the default threshold.\nConfiguration Format The configuration content includes the names and thresholds of the services:\n# default threshold is 500msdefault:500# example:# the threshold of service \u0026#34;tomcat\u0026#34; is 1s# tomcat: 1000# the threshold of service \u0026#34;springboot1\u0026#34; is 50ms# springboot1: 50","excerpt":"Apdex threshold Apdex is a measure of response time based against a set threshold. It measures the …","ref":"/docs/main/v9.4.0/en/setup/backend/apdex-threshold/","title":"Apdex threshold"},{"body":"Apdex threshold Apdex is a measure of response time based against a set threshold. It measures the ratio of satisfactory response times to unsatisfactory response times. The response time is measured from an asset request to completed delivery back to the requestor.\nA user defines a response time threshold T. All responses handled in T or less time satisfy the user.\nFor example, if T is 1.2 seconds and a response completes in 0.5 seconds, then the user is satisfied. All responses greater than 1.2 seconds dissatisfy the user. Responses greater than 4.8 seconds frustrate the user.\nThe apdex threshold T can be configured in service-apdex-threshold.yml file or via Dynamic Configuration. The default item will apply to a service that isn\u0026rsquo;t defined in this configuration as the default threshold.\nConfiguration Format The configuration content includes the names and thresholds of the services:\n# default threshold is 500msdefault:500# example:# the threshold of service \u0026#34;tomcat\u0026#34; is 1s# tomcat: 1000# the threshold of service \u0026#34;springboot1\u0026#34; is 50ms# springboot1: 50","excerpt":"Apdex threshold Apdex is a measure of response time based against a set threshold. It measures the …","ref":"/docs/main/v9.5.0/en/setup/backend/apdex-threshold/","title":"Apdex threshold"},{"body":"Apdex threshold Apdex is a measure of response time based against a set threshold. It measures the ratio of satisfactory response times to unsatisfactory response times. The response time is measured from an asset request to completed delivery back to the requestor.\nA user defines a response time threshold T. All responses handled in T or less time satisfy the user.\nFor example, if T is 1.2 seconds and a response completes in 0.5 seconds, then the user is satisfied. All responses greater than 1.2 seconds dissatisfy the user. Responses greater than 4.8 seconds frustrate the user.\nThe apdex threshold T can be configured in service-apdex-threshold.yml file or via Dynamic Configuration. The default item will apply to a service that isn\u0026rsquo;t defined in this configuration as the default threshold.\nConfiguration Format The configuration content includes the names and thresholds of the services:\n# default threshold is 500msdefault:500# example:# the threshold of service \u0026#34;tomcat\u0026#34; is 1s# tomcat: 1000# the threshold of service \u0026#34;springboot1\u0026#34; is 50ms# springboot1: 50","excerpt":"Apdex threshold Apdex is a measure of response time based against a set threshold. It measures the …","ref":"/docs/main/v9.6.0/en/setup/backend/apdex-threshold/","title":"Apdex threshold"},{"body":"Apdex threshold Apdex is a measure of response time based against a set threshold. It measures the ratio of satisfactory response times to unsatisfactory response times. The response time is measured from an asset request to completed delivery back to the requestor.\nA user defines a response time threshold T. All responses handled in T or less time satisfy the user.\nFor example, if T is 1.2 seconds and a response completes in 0.5 seconds, then the user is satisfied. All responses greater than 1.2 seconds dissatisfy the user. Responses greater than 4.8 seconds frustrate the user.\nThe apdex threshold T can be configured in service-apdex-threshold.yml file or via Dynamic Configuration. The default item will apply to a service that isn\u0026rsquo;t defined in this configuration as the default threshold.\nConfiguration Format The configuration content includes the names and thresholds of the services:\n# default threshold is 500msdefault:500# example:# the threshold of service \u0026#34;tomcat\u0026#34; is 1s# tomcat: 1000# the threshold of service \u0026#34;springboot1\u0026#34; is 50ms# springboot1: 50","excerpt":"Apdex threshold Apdex is a measure of response time based against a set threshold. It measures the …","ref":"/docs/main/v9.7.0/en/setup/backend/apdex-threshold/","title":"Apdex threshold"},{"body":"APISIX monitoring APISIX performance from apisix prometheus plugin SkyWalking leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  APISIX Prometheus plugin collects metrics data from APSIX. OpenTelemetry Collector fetches metrics from APISIX Prometheus plugin via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Enable APISIX APISIX Prometheus plugin . Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  APISIX Monitoring APISIX prometheus plugin provide multiple dimensions metrics for APISIX server , upstream , route , etc. Accordingly, SkyWalking observes the status, payload, and latency of the APISIX server, which is cataloged as a LAYER: APISIX Service in the OAP. Meanwhile, the instances would be recognized as LAYER: APISIX instances. The route rules and nodes would be recognized as endpoints with route/ and upstream/ prefixes.\nSpecify SkyWalking Service name SkyWalking expects OTEL Collector attribute skywalking_service to be the Service name.\nMake sure skywalking_service attribute exists through static_configs of OTEL Prometheus scape config.\nreceivers:prometheus:config:scrape_configs:- job_name:\u0026#39;apisix-monitoring\u0026#39;static_configs:- targets:[\u0026#39;apisix:9091\u0026#39;]labels:skywalking_service:exmple_service_name # Specify SkyWalking Service name You also could leverage OTEL Collector processor to add skywalking_service attribute , as following :\nprocessors:resource/skywalking-service:attributes:- key:skywalking_service value:exmple_service_name# Specify Skywalking Service name action:insert Notice , if you don\u0026rsquo;t specify skywalking_service attribute, SkyWalking OAP would use APISIX as the default service name\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     HTTP status  meter_apisix_sv_http_status Service The increment rate of the status of HTTP requests APISIX Prometheus plugin   HTTP latency  meter_apisix_sv_http_latency Service The increment rate of the latency of HTTP requests APISIX Prometheus plugin   HTTP bandwidth KB meter_apisix_sv_bandwidth Service The increment rate of the bandwidth of HTTP requests APISIX Prometheus plugin   HTTP status of non-matched requests  meter_apisix_sv_http_status Service The increment rate of the status of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP latency non-matched requests  meter_apisix_sv_http_latency Service The increment rate of the latency of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP bandwidth non-matched requests KB meter_apisix_sv_bandwidth Service The increment rate of the bandwidth of HTTP requests ,which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP connection  meter_apisix_sv_http_connections Service The avg number of the connections APISIX Prometheus plugin   HTTP Request Trend  meter_apisix_http_requests Service The increment rate of HTTP requests APISIX Prometheus plugin   HTTP status  meter_apisix_instance_http_status Instance The increment rate of the status of HTTP requests APISIX Prometheus plugin   HTTP latency  meter_apisix_instance_http_latency Instance The increment rate of the latency of HTTP requests APISIX Prometheus plugin   HTTP bandwidth KB meter_apisix_instance_bandwidth Instance The increment rate of the bandwidth of HTTP requests APISIX Prometheus plugin   HTTP status of non-matched requests  meter_apisix_instance_http_status Instance The increment rate of the status of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP latency non-matched requests  meter_apisix_instance_http_latency Instance The increment rate of the latency of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP bandwidth non-matched requests KB meter_apisix_instance_bandwidth Instance The increment rate of the bandwidth of HTTP requests ,which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP connection  meter_apisix_instance_http_connections Instance The avg number of the connections APISIX Prometheus plugin   HTTP Request Trend  meter_apisix_instance_http_requests Instance The increment rate of HTTP requests APISIX Prometheus plugin   Shared dict capacity MB meter_apisix_instance_shared_dict_capacity_bytes Instance The avg capacity of shared dict capacity APISIX Prometheus plugin   Shared free space MB meter_apisix_instance_shared_dict_free_space_bytes Instance The avg free space of shared dict capacity APISIX Prometheus plugin   etcd index  meter_apisix_instance_sv_etcd_indexes Instance etcd modify index for APISIX keys APISIX Prometheus plugin   etcd latest reachability  meter_apisix_instance_sv_etcd_reachable Instance etcd latest reachable , Refer to APISIX Prometheus plugin APISIX Prometheus plugin   HTTP status  meter_apisix_endpoint_node_http_status Endpoint The increment rate of the status of HTTP requests APISIX Prometheus plugin   HTTP latency  meter_apisix_endpoint_node_http_latency Endpoint The increment rate of the latency of HTTP requests APISIX Prometheus plugin   HTTP bandwidth KB meter_apisix_endpoint_node_bandwidth Endpoint The increment rate of the bandwidth of HTTP requests APISIX Prometheus plugin    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/apisix.yaml. The APISIX dashboard panel configurations are found in /config/ui-initialized-templates/apisix.\n","excerpt":"APISIX monitoring APISIX performance from apisix prometheus plugin SkyWalking leverages …","ref":"/docs/main/latest/en/setup/backend/backend-apisix-monitoring/","title":"APISIX monitoring"},{"body":"APISIX monitoring APISIX performance from apisix prometheus plugin SkyWalking leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  APISIX Prometheus plugin collects metrics data from APSIX. OpenTelemetry Collector fetches metrics from APISIX Prometheus plugin via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Enable APISIX APISIX Prometheus plugin . Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  APISIX Monitoring APISIX prometheus plugin provide multiple dimensions metrics for APISIX server , upstream , route , etc. Accordingly, SkyWalking observes the status, payload, and latency of the APISIX server, which is cataloged as a LAYER: APISIX Service in the OAP. Meanwhile, the instances would be recognized as LAYER: APISIX instances. The route rules and nodes would be recognized as endpoints with route/ and upstream/ prefixes.\nSpecify SkyWalking Service name SkyWalking expects OTEL Collector attribute skywalking_service to be the Service name.\nMake sure skywalking_service attribute exists through static_configs of OTEL Prometheus scape config.\nreceivers:prometheus:config:scrape_configs:- job_name:\u0026#39;apisix-monitoring\u0026#39;static_configs:- targets:[\u0026#39;apisix:9091\u0026#39;]labels:skywalking_service:exmple_service_name # Specify SkyWalking Service name You also could leverage OTEL Collector processor to add skywalking_service attribute , as following :\nprocessors:resource/skywalking-service:attributes:- key:skywalking_service value:exmple_service_name# Specify Skywalking Service name action:insert Notice , if you don\u0026rsquo;t specify skywalking_service attribute, SkyWalking OAP would use APISIX as the default service name\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     HTTP status  meter_apisix_sv_http_status Service The increment rate of the status of HTTP requests APISIX Prometheus plugin   HTTP latency  meter_apisix_sv_http_latency Service The increment rate of the latency of HTTP requests APISIX Prometheus plugin   HTTP bandwidth KB meter_apisix_sv_bandwidth Service The increment rate of the bandwidth of HTTP requests APISIX Prometheus plugin   HTTP status of non-matched requests  meter_apisix_sv_http_status Service The increment rate of the status of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP latency non-matched requests  meter_apisix_sv_http_latency Service The increment rate of the latency of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP bandwidth non-matched requests KB meter_apisix_sv_bandwidth Service The increment rate of the bandwidth of HTTP requests ,which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP connection  meter_apisix_sv_http_connections Service The avg number of the connections APISIX Prometheus plugin   HTTP Request Trend  meter_apisix_http_requests Service The increment rate of HTTP requests APISIX Prometheus plugin   HTTP status  meter_apisix_instance_http_status Instance The increment rate of the status of HTTP requests APISIX Prometheus plugin   HTTP latency  meter_apisix_instance_http_latency Instance The increment rate of the latency of HTTP requests APISIX Prometheus plugin   HTTP bandwidth KB meter_apisix_instance_bandwidth Instance The increment rate of the bandwidth of HTTP requests APISIX Prometheus plugin   HTTP status of non-matched requests  meter_apisix_instance_http_status Instance The increment rate of the status of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP latency non-matched requests  meter_apisix_instance_http_latency Instance The increment rate of the latency of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP bandwidth non-matched requests KB meter_apisix_instance_bandwidth Instance The increment rate of the bandwidth of HTTP requests ,which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP connection  meter_apisix_instance_http_connections Instance The avg number of the connections APISIX Prometheus plugin   HTTP Request Trend  meter_apisix_instance_http_requests Instance The increment rate of HTTP requests APISIX Prometheus plugin   Shared dict capacity MB meter_apisix_instance_shared_dict_capacity_bytes Instance The avg capacity of shared dict capacity APISIX Prometheus plugin   Shared free space MB meter_apisix_instance_shared_dict_free_space_bytes Instance The avg free space of shared dict capacity APISIX Prometheus plugin   etcd index  meter_apisix_instance_sv_etcd_indexes Instance etcd modify index for APISIX keys APISIX Prometheus plugin   etcd latest reachability  meter_apisix_instance_sv_etcd_reachable Instance etcd latest reachable , Refer to APISIX Prometheus plugin APISIX Prometheus plugin   HTTP status  meter_apisix_endpoint_node_http_status Endpoint The increment rate of the status of HTTP requests APISIX Prometheus plugin   HTTP latency  meter_apisix_endpoint_node_http_latency Endpoint The increment rate of the latency of HTTP requests APISIX Prometheus plugin   HTTP bandwidth KB meter_apisix_endpoint_node_bandwidth Endpoint The increment rate of the bandwidth of HTTP requests APISIX Prometheus plugin    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/apisix.yaml. The APISIX dashboard panel configurations are found in /config/ui-initialized-templates/apisix.\n","excerpt":"APISIX monitoring APISIX performance from apisix prometheus plugin SkyWalking leverages …","ref":"/docs/main/next/en/setup/backend/backend-apisix-monitoring/","title":"APISIX monitoring"},{"body":"APISIX monitoring APISIX performance from apisix prometheus plugin SkyWalking leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  APISIX Prometheus plugin collects metrics data from APSIX. OpenTelemetry Collector fetches metrics from APISIX Prometheus plugin via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via the OpenCensus gRPC Exporter or OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Enable APISIX APISIX Prometheus plugin . Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  APISIX Monitoring APISIX prometheus plugin provide multiple dimensions metrics for APISIX server , upstream , route , etc. Accordingly, SkyWalking observes the status, payload, and latency of the APISIX server, which is cataloged as a LAYER: APISIX Service in the OAP. Meanwhile, the instances would be recognized as LAYER: APISIX instances. The route rules and nodes would be recognized as endpoints with route/ and upstream/ prefixes.\nSpecify SkyWalking Service name SkyWalking expects OTEL Collector attribute skywalking_service to be the Service name.\nMake sure skywalking_service attribute exists through static_configs of OTEL Prometheus scape config.\nreceivers:prometheus:config:scrape_configs:- job_name:\u0026#39;apisix-monitoring\u0026#39;static_configs:- targets:[\u0026#39;apisix:9091\u0026#39;]labels:skywalking_service:exmple_service_name # Specify SkyWalking Service name You also could leverage OTEL Collector processor to add skywalking_service attribute , as following :\nprocessors:resource/skywalking-service:attributes:- key:skywalking_service value:exmple_service_name# Specify Skywalking Service name action:insert Notice , if you don\u0026rsquo;t specify skywalking_service attribute, SkyWalking OAP would use APISIX as the default service name\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     HTTP status  meter_apisix_sv_http_status Service The increment rate of the status of HTTP requests APISIX Prometheus plugin   HTTP latency  meter_apisix_sv_http_latency Service The increment rate of the latency of HTTP requests APISIX Prometheus plugin   HTTP bandwidth KB meter_apisix_sv_bandwidth Service The increment rate of the bandwidth of HTTP requests APISIX Prometheus plugin   HTTP status of non-matched requests  meter_apisix_sv_http_status Service The increment rate of the status of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP latency non-matched requests  meter_apisix_sv_http_latency Service The increment rate of the latency of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP bandwidth non-matched requests KB meter_apisix_sv_bandwidth Service The increment rate of the bandwidth of HTTP requests ,which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP connection  meter_apisix_sv_http_connections Service The avg number of the connections APISIX Prometheus plugin   HTTP Request Trend  meter_apisix_http_requests Service The increment rate of HTTP requests APISIX Prometheus plugin   HTTP status  meter_apisix_instance_http_status Instance The increment rate of the status of HTTP requests APISIX Prometheus plugin   HTTP latency  meter_apisix_instance_http_latency Instance The increment rate of the latency of HTTP requests APISIX Prometheus plugin   HTTP bandwidth KB meter_apisix_instance_bandwidth Instance The increment rate of the bandwidth of HTTP requests APISIX Prometheus plugin   HTTP status of non-matched requests  meter_apisix_instance_http_status Instance The increment rate of the status of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP latency non-matched requests  meter_apisix_instance_http_latency Instance The increment rate of the latency of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP bandwidth non-matched requests KB meter_apisix_instance_bandwidth Instance The increment rate of the bandwidth of HTTP requests ,which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP connection  meter_apisix_instance_http_connections Instance The avg number of the connections APISIX Prometheus plugin   HTTP Request Trend  meter_apisix_instance_http_requests Instance The increment rate of HTTP requests APISIX Prometheus plugin   Shared dict capacity MB meter_apisix_instance_shared_dict_capacity_bytes Instance The avg capacity of shared dict capacity APISIX Prometheus plugin   Shared free space MB meter_apisix_instance_shared_dict_free_space_bytes Instance The avg free space of shared dict capacity APISIX Prometheus plugin   etcd index  meter_apisix_instance_sv_etcd_indexes Instance etcd modify index for APISIX keys APISIX Prometheus plugin   etcd latest reachability  meter_apisix_instance_sv_etcd_reachable Instance etcd latest reachable , Refer to APISIX Prometheus plugin APISIX Prometheus plugin   HTTP status  meter_apisix_endpoint_node_http_status Endpoint The increment rate of the status of HTTP requests APISIX Prometheus plugin   HTTP latency  meter_apisix_endpoint_node_http_latency Endpoint The increment rate of the latency of HTTP requests APISIX Prometheus plugin   HTTP bandwidth KB meter_apisix_endpoint_node_bandwidth Endpoint The increment rate of the bandwidth of HTTP requests APISIX Prometheus plugin    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/apisix.yaml. The APISIX dashboard panel configurations are found in /config/ui-initialized-templates/apisix.\n","excerpt":"APISIX monitoring APISIX performance from apisix prometheus plugin SkyWalking leverages …","ref":"/docs/main/v9.3.0/en/setup/backend/backend-apisix-monitoring/","title":"APISIX monitoring"},{"body":"APISIX monitoring APISIX performance from apisix prometheus plugin SkyWalking leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  APISIX Prometheus plugin collects metrics data from APSIX. OpenTelemetry Collector fetches metrics from APISIX Prometheus plugin via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via the OpenCensus gRPC Exporter or OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Enable APISIX APISIX Prometheus plugin . Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  APISIX Monitoring APISIX prometheus plugin provide multiple dimensions metrics for APISIX server , upstream , route , etc. Accordingly, SkyWalking observes the status, payload, and latency of the APISIX server, which is cataloged as a LAYER: APISIX Service in the OAP. Meanwhile, the instances would be recognized as LAYER: APISIX instances. The route rules and nodes would be recognized as endpoints with route/ and upstream/ prefixes.\nSpecify SkyWalking Service name SkyWalking expects OTEL Collector attribute skywalking_service to be the Service name.\nMake sure skywalking_service attribute exists through static_configs of OTEL Prometheus scape config.\nreceivers:prometheus:config:scrape_configs:- job_name:\u0026#39;apisix-monitoring\u0026#39;static_configs:- targets:[\u0026#39;apisix:9091\u0026#39;]labels:skywalking_service:exmple_service_name # Specify SkyWalking Service name You also could leverage OTEL Collector processor to add skywalking_service attribute , as following :\nprocessors:resource/skywalking-service:attributes:- key:skywalking_service value:exmple_service_name# Specify Skywalking Service name action:insert Notice , if you don\u0026rsquo;t specify skywalking_service attribute, SkyWalking OAP would use APISIX as the default service name\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     HTTP status  meter_apisix_sv_http_status Service The increment rate of the status of HTTP requests APISIX Prometheus plugin   HTTP latency  meter_apisix_sv_http_latency Service The increment rate of the latency of HTTP requests APISIX Prometheus plugin   HTTP bandwidth KB meter_apisix_sv_bandwidth Service The increment rate of the bandwidth of HTTP requests APISIX Prometheus plugin   HTTP status of non-matched requests  meter_apisix_sv_http_status Service The increment rate of the status of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP latency non-matched requests  meter_apisix_sv_http_latency Service The increment rate of the latency of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP bandwidth non-matched requests KB meter_apisix_sv_bandwidth Service The increment rate of the bandwidth of HTTP requests ,which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP connection  meter_apisix_sv_http_connections Service The avg number of the connections APISIX Prometheus plugin   HTTP Request Trend  meter_apisix_http_requests Service The increment rate of HTTP requests APISIX Prometheus plugin   HTTP status  meter_apisix_instance_http_status Instance The increment rate of the status of HTTP requests APISIX Prometheus plugin   HTTP latency  meter_apisix_instance_http_latency Instance The increment rate of the latency of HTTP requests APISIX Prometheus plugin   HTTP bandwidth KB meter_apisix_instance_bandwidth Instance The increment rate of the bandwidth of HTTP requests APISIX Prometheus plugin   HTTP status of non-matched requests  meter_apisix_instance_http_status Instance The increment rate of the status of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP latency non-matched requests  meter_apisix_instance_http_latency Instance The increment rate of the latency of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP bandwidth non-matched requests KB meter_apisix_instance_bandwidth Instance The increment rate of the bandwidth of HTTP requests ,which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP connection  meter_apisix_instance_http_connections Instance The avg number of the connections APISIX Prometheus plugin   HTTP Request Trend  meter_apisix_instance_http_requests Instance The increment rate of HTTP requests APISIX Prometheus plugin   Shared dict capacity MB meter_apisix_instance_shared_dict_capacity_bytes Instance The avg capacity of shared dict capacity APISIX Prometheus plugin   Shared free space MB meter_apisix_instance_shared_dict_free_space_bytes Instance The avg free space of shared dict capacity APISIX Prometheus plugin   etcd index  meter_apisix_instance_sv_etcd_indexes Instance etcd modify index for APISIX keys APISIX Prometheus plugin   etcd latest reachability  meter_apisix_instance_sv_etcd_reachable Instance etcd latest reachable , Refer to APISIX Prometheus plugin APISIX Prometheus plugin   HTTP status  meter_apisix_endpoint_node_http_status Endpoint The increment rate of the status of HTTP requests APISIX Prometheus plugin   HTTP latency  meter_apisix_endpoint_node_http_latency Endpoint The increment rate of the latency of HTTP requests APISIX Prometheus plugin   HTTP bandwidth KB meter_apisix_endpoint_node_bandwidth Endpoint The increment rate of the bandwidth of HTTP requests APISIX Prometheus plugin    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/apisix.yaml. The APISIX dashboard panel configurations are found in /config/ui-initialized-templates/apisix.\n","excerpt":"APISIX monitoring APISIX performance from apisix prometheus plugin SkyWalking leverages …","ref":"/docs/main/v9.4.0/en/setup/backend/backend-apisix-monitoring/","title":"APISIX monitoring"},{"body":"APISIX monitoring APISIX performance from apisix prometheus plugin SkyWalking leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  APISIX Prometheus plugin collects metrics data from APSIX. OpenTelemetry Collector fetches metrics from APISIX Prometheus plugin via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Enable APISIX APISIX Prometheus plugin . Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  APISIX Monitoring APISIX prometheus plugin provide multiple dimensions metrics for APISIX server , upstream , route , etc. Accordingly, SkyWalking observes the status, payload, and latency of the APISIX server, which is cataloged as a LAYER: APISIX Service in the OAP. Meanwhile, the instances would be recognized as LAYER: APISIX instances. The route rules and nodes would be recognized as endpoints with route/ and upstream/ prefixes.\nSpecify SkyWalking Service name SkyWalking expects OTEL Collector attribute skywalking_service to be the Service name.\nMake sure skywalking_service attribute exists through static_configs of OTEL Prometheus scape config.\nreceivers:prometheus:config:scrape_configs:- job_name:\u0026#39;apisix-monitoring\u0026#39;static_configs:- targets:[\u0026#39;apisix:9091\u0026#39;]labels:skywalking_service:exmple_service_name # Specify SkyWalking Service name You also could leverage OTEL Collector processor to add skywalking_service attribute , as following :\nprocessors:resource/skywalking-service:attributes:- key:skywalking_service value:exmple_service_name# Specify Skywalking Service name action:insert Notice , if you don\u0026rsquo;t specify skywalking_service attribute, SkyWalking OAP would use APISIX as the default service name\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     HTTP status  meter_apisix_sv_http_status Service The increment rate of the status of HTTP requests APISIX Prometheus plugin   HTTP latency  meter_apisix_sv_http_latency Service The increment rate of the latency of HTTP requests APISIX Prometheus plugin   HTTP bandwidth KB meter_apisix_sv_bandwidth Service The increment rate of the bandwidth of HTTP requests APISIX Prometheus plugin   HTTP status of non-matched requests  meter_apisix_sv_http_status Service The increment rate of the status of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP latency non-matched requests  meter_apisix_sv_http_latency Service The increment rate of the latency of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP bandwidth non-matched requests KB meter_apisix_sv_bandwidth Service The increment rate of the bandwidth of HTTP requests ,which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP connection  meter_apisix_sv_http_connections Service The avg number of the connections APISIX Prometheus plugin   HTTP Request Trend  meter_apisix_http_requests Service The increment rate of HTTP requests APISIX Prometheus plugin   HTTP status  meter_apisix_instance_http_status Instance The increment rate of the status of HTTP requests APISIX Prometheus plugin   HTTP latency  meter_apisix_instance_http_latency Instance The increment rate of the latency of HTTP requests APISIX Prometheus plugin   HTTP bandwidth KB meter_apisix_instance_bandwidth Instance The increment rate of the bandwidth of HTTP requests APISIX Prometheus plugin   HTTP status of non-matched requests  meter_apisix_instance_http_status Instance The increment rate of the status of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP latency non-matched requests  meter_apisix_instance_http_latency Instance The increment rate of the latency of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP bandwidth non-matched requests KB meter_apisix_instance_bandwidth Instance The increment rate of the bandwidth of HTTP requests ,which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP connection  meter_apisix_instance_http_connections Instance The avg number of the connections APISIX Prometheus plugin   HTTP Request Trend  meter_apisix_instance_http_requests Instance The increment rate of HTTP requests APISIX Prometheus plugin   Shared dict capacity MB meter_apisix_instance_shared_dict_capacity_bytes Instance The avg capacity of shared dict capacity APISIX Prometheus plugin   Shared free space MB meter_apisix_instance_shared_dict_free_space_bytes Instance The avg free space of shared dict capacity APISIX Prometheus plugin   etcd index  meter_apisix_instance_sv_etcd_indexes Instance etcd modify index for APISIX keys APISIX Prometheus plugin   etcd latest reachability  meter_apisix_instance_sv_etcd_reachable Instance etcd latest reachable , Refer to APISIX Prometheus plugin APISIX Prometheus plugin   HTTP status  meter_apisix_endpoint_node_http_status Endpoint The increment rate of the status of HTTP requests APISIX Prometheus plugin   HTTP latency  meter_apisix_endpoint_node_http_latency Endpoint The increment rate of the latency of HTTP requests APISIX Prometheus plugin   HTTP bandwidth KB meter_apisix_endpoint_node_bandwidth Endpoint The increment rate of the bandwidth of HTTP requests APISIX Prometheus plugin    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/apisix.yaml. The APISIX dashboard panel configurations are found in /config/ui-initialized-templates/apisix.\n","excerpt":"APISIX monitoring APISIX performance from apisix prometheus plugin SkyWalking leverages …","ref":"/docs/main/v9.5.0/en/setup/backend/backend-apisix-monitoring/","title":"APISIX monitoring"},{"body":"APISIX monitoring APISIX performance from apisix prometheus plugin SkyWalking leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  APISIX Prometheus plugin collects metrics data from APSIX. OpenTelemetry Collector fetches metrics from APISIX Prometheus plugin via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Enable APISIX APISIX Prometheus plugin . Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  APISIX Monitoring APISIX prometheus plugin provide multiple dimensions metrics for APISIX server , upstream , route , etc. Accordingly, SkyWalking observes the status, payload, and latency of the APISIX server, which is cataloged as a LAYER: APISIX Service in the OAP. Meanwhile, the instances would be recognized as LAYER: APISIX instances. The route rules and nodes would be recognized as endpoints with route/ and upstream/ prefixes.\nSpecify SkyWalking Service name SkyWalking expects OTEL Collector attribute skywalking_service to be the Service name.\nMake sure skywalking_service attribute exists through static_configs of OTEL Prometheus scape config.\nreceivers:prometheus:config:scrape_configs:- job_name:\u0026#39;apisix-monitoring\u0026#39;static_configs:- targets:[\u0026#39;apisix:9091\u0026#39;]labels:skywalking_service:exmple_service_name # Specify SkyWalking Service name You also could leverage OTEL Collector processor to add skywalking_service attribute , as following :\nprocessors:resource/skywalking-service:attributes:- key:skywalking_service value:exmple_service_name# Specify Skywalking Service name action:insert Notice , if you don\u0026rsquo;t specify skywalking_service attribute, SkyWalking OAP would use APISIX as the default service name\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     HTTP status  meter_apisix_sv_http_status Service The increment rate of the status of HTTP requests APISIX Prometheus plugin   HTTP latency  meter_apisix_sv_http_latency Service The increment rate of the latency of HTTP requests APISIX Prometheus plugin   HTTP bandwidth KB meter_apisix_sv_bandwidth Service The increment rate of the bandwidth of HTTP requests APISIX Prometheus plugin   HTTP status of non-matched requests  meter_apisix_sv_http_status Service The increment rate of the status of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP latency non-matched requests  meter_apisix_sv_http_latency Service The increment rate of the latency of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP bandwidth non-matched requests KB meter_apisix_sv_bandwidth Service The increment rate of the bandwidth of HTTP requests ,which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP connection  meter_apisix_sv_http_connections Service The avg number of the connections APISIX Prometheus plugin   HTTP Request Trend  meter_apisix_http_requests Service The increment rate of HTTP requests APISIX Prometheus plugin   HTTP status  meter_apisix_instance_http_status Instance The increment rate of the status of HTTP requests APISIX Prometheus plugin   HTTP latency  meter_apisix_instance_http_latency Instance The increment rate of the latency of HTTP requests APISIX Prometheus plugin   HTTP bandwidth KB meter_apisix_instance_bandwidth Instance The increment rate of the bandwidth of HTTP requests APISIX Prometheus plugin   HTTP status of non-matched requests  meter_apisix_instance_http_status Instance The increment rate of the status of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP latency non-matched requests  meter_apisix_instance_http_latency Instance The increment rate of the latency of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP bandwidth non-matched requests KB meter_apisix_instance_bandwidth Instance The increment rate of the bandwidth of HTTP requests ,which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP connection  meter_apisix_instance_http_connections Instance The avg number of the connections APISIX Prometheus plugin   HTTP Request Trend  meter_apisix_instance_http_requests Instance The increment rate of HTTP requests APISIX Prometheus plugin   Shared dict capacity MB meter_apisix_instance_shared_dict_capacity_bytes Instance The avg capacity of shared dict capacity APISIX Prometheus plugin   Shared free space MB meter_apisix_instance_shared_dict_free_space_bytes Instance The avg free space of shared dict capacity APISIX Prometheus plugin   etcd index  meter_apisix_instance_sv_etcd_indexes Instance etcd modify index for APISIX keys APISIX Prometheus plugin   etcd latest reachability  meter_apisix_instance_sv_etcd_reachable Instance etcd latest reachable , Refer to APISIX Prometheus plugin APISIX Prometheus plugin   HTTP status  meter_apisix_endpoint_node_http_status Endpoint The increment rate of the status of HTTP requests APISIX Prometheus plugin   HTTP latency  meter_apisix_endpoint_node_http_latency Endpoint The increment rate of the latency of HTTP requests APISIX Prometheus plugin   HTTP bandwidth KB meter_apisix_endpoint_node_bandwidth Endpoint The increment rate of the bandwidth of HTTP requests APISIX Prometheus plugin    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/apisix.yaml. The APISIX dashboard panel configurations are found in /config/ui-initialized-templates/apisix.\n","excerpt":"APISIX monitoring APISIX performance from apisix prometheus plugin SkyWalking leverages …","ref":"/docs/main/v9.6.0/en/setup/backend/backend-apisix-monitoring/","title":"APISIX monitoring"},{"body":"APISIX monitoring APISIX performance from apisix prometheus plugin SkyWalking leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  APISIX Prometheus plugin collects metrics data from APSIX. OpenTelemetry Collector fetches metrics from APISIX Prometheus plugin via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Enable APISIX APISIX Prometheus plugin . Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  APISIX Monitoring APISIX prometheus plugin provide multiple dimensions metrics for APISIX server , upstream , route , etc. Accordingly, SkyWalking observes the status, payload, and latency of the APISIX server, which is cataloged as a LAYER: APISIX Service in the OAP. Meanwhile, the instances would be recognized as LAYER: APISIX instances. The route rules and nodes would be recognized as endpoints with route/ and upstream/ prefixes.\nSpecify SkyWalking Service name SkyWalking expects OTEL Collector attribute skywalking_service to be the Service name.\nMake sure skywalking_service attribute exists through static_configs of OTEL Prometheus scape config.\nreceivers:prometheus:config:scrape_configs:- job_name:\u0026#39;apisix-monitoring\u0026#39;static_configs:- targets:[\u0026#39;apisix:9091\u0026#39;]labels:skywalking_service:exmple_service_name # Specify SkyWalking Service name You also could leverage OTEL Collector processor to add skywalking_service attribute , as following :\nprocessors:resource/skywalking-service:attributes:- key:skywalking_service value:exmple_service_name# Specify Skywalking Service name action:insert Notice , if you don\u0026rsquo;t specify skywalking_service attribute, SkyWalking OAP would use APISIX as the default service name\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     HTTP status  meter_apisix_sv_http_status Service The increment rate of the status of HTTP requests APISIX Prometheus plugin   HTTP latency  meter_apisix_sv_http_latency Service The increment rate of the latency of HTTP requests APISIX Prometheus plugin   HTTP bandwidth KB meter_apisix_sv_bandwidth Service The increment rate of the bandwidth of HTTP requests APISIX Prometheus plugin   HTTP status of non-matched requests  meter_apisix_sv_http_status Service The increment rate of the status of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP latency non-matched requests  meter_apisix_sv_http_latency Service The increment rate of the latency of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP bandwidth non-matched requests KB meter_apisix_sv_bandwidth Service The increment rate of the bandwidth of HTTP requests ,which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP connection  meter_apisix_sv_http_connections Service The avg number of the connections APISIX Prometheus plugin   HTTP Request Trend  meter_apisix_http_requests Service The increment rate of HTTP requests APISIX Prometheus plugin   HTTP status  meter_apisix_instance_http_status Instance The increment rate of the status of HTTP requests APISIX Prometheus plugin   HTTP latency  meter_apisix_instance_http_latency Instance The increment rate of the latency of HTTP requests APISIX Prometheus plugin   HTTP bandwidth KB meter_apisix_instance_bandwidth Instance The increment rate of the bandwidth of HTTP requests APISIX Prometheus plugin   HTTP status of non-matched requests  meter_apisix_instance_http_status Instance The increment rate of the status of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP latency non-matched requests  meter_apisix_instance_http_latency Instance The increment rate of the latency of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP bandwidth non-matched requests KB meter_apisix_instance_bandwidth Instance The increment rate of the bandwidth of HTTP requests ,which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP connection  meter_apisix_instance_http_connections Instance The avg number of the connections APISIX Prometheus plugin   HTTP Request Trend  meter_apisix_instance_http_requests Instance The increment rate of HTTP requests APISIX Prometheus plugin   Shared dict capacity MB meter_apisix_instance_shared_dict_capacity_bytes Instance The avg capacity of shared dict capacity APISIX Prometheus plugin   Shared free space MB meter_apisix_instance_shared_dict_free_space_bytes Instance The avg free space of shared dict capacity APISIX Prometheus plugin   etcd index  meter_apisix_instance_sv_etcd_indexes Instance etcd modify index for APISIX keys APISIX Prometheus plugin   etcd latest reachability  meter_apisix_instance_sv_etcd_reachable Instance etcd latest reachable , Refer to APISIX Prometheus plugin APISIX Prometheus plugin   HTTP status  meter_apisix_endpoint_node_http_status Endpoint The increment rate of the status of HTTP requests APISIX Prometheus plugin   HTTP latency  meter_apisix_endpoint_node_http_latency Endpoint The increment rate of the latency of HTTP requests APISIX Prometheus plugin   HTTP bandwidth KB meter_apisix_endpoint_node_bandwidth Endpoint The increment rate of the bandwidth of HTTP requests APISIX Prometheus plugin    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/apisix.yaml. The APISIX dashboard panel configurations are found in /config/ui-initialized-templates/apisix.\n","excerpt":"APISIX monitoring APISIX performance from apisix prometheus plugin SkyWalking leverages …","ref":"/docs/main/v9.7.0/en/setup/backend/backend-apisix-monitoring/","title":"APISIX monitoring"},{"body":"AWS API Gateway monitoring Amazon API Gateway is an AWS service for creating, publishing, maintaining, monitoring, and securing REST, HTTP, and WebSocket APIs. SkyWalking leverages AWS Kinesis Data Firehose receiver to transfer the CloudWatch metrics of API Gateway(HTTP and REST APIs) to OpenTelemetry receiver and into the Meter System.\nData flow  AWS CloudWatch collect metrics for API Gateway(REST and HTTP APIs), refer to API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch CloudWatch metric streams stream CloudWatch metrics of API Gateway to AWS Kinesis Data Firehose AWS Kinesis Data Firehose delivery metrics to AWS Kinesis Data Firehose receiver through the HTTP endpoint  Set up  Enable CloudWatch metrics for API Gateway Create an Amazon Kinesis Data Firehose Delivery Stream, and set AWS Kinesis Data Firehose receiver\u0026rsquo;s address as HTTP(s) Destination, refer to Create Delivery Stream Create CloudWatch metric stream, and select the Firehose Delivery Stream which has been created above, set Select namespaces to AWS/ApiGateway, Select output format to OpenTelemetry 0.7. refer to CloudWatch Metric Streams  Gateway Monitoring SkyWalking observes CloudWatch metrics of the AWS API Gateway, which is cataloged as a LAYER: AWS_GATEWAY Service in the OAP. Meanwhile, the routes would be recognized as LAYER: AWS_GATEWAY endpoints\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     Request Count count aws_gateway_service_count Service The total number API requests in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   4xx Count count aws_gateway_service_4xx Service The number of client-side errors captured in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   5xx Count count aws_gateway_service_5xx Service The number of server-side errors captured in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Request Average Latency ms aws_gateway_service_latency Service The time between when API Gateway receives a request from a client and when it returns a response to the client. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Request Average Integration Latency ms aws_gateway_service_integration_latency Service The time between when API Gateway relays a request to the backend and when it receives a response from the backend. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Data Processed KB aws_gateway_service_data_processed Service The amount of data processed API Gateway HTTP APIs monitoring with CloudWatch   Cache Hit Count Rate % aws_gateway_service_cache_hit_rate Service The number of requests served from the API cache API Gateway REST APIs monitoring with CloudWatch   Cache Miss Count Rate % aws_gateway_service_cache_miss_rate Service The number of requests served from the backend API Gateway REST APIs monitoring with CloudWatch   Request Count count aws_gateway_endpoint_count Endpoint The total number API requests in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   4xx Count count aws_gateway_endpoint_4xx Endpoint The number of client-side errors captured in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   5xx Count count aws_gateway_endpoint_5xx Endpoint The number of server-side errors captured in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Request Average Latency ms aws_gateway_endpoint_latency Endpoint The time between when API Gateway receives a request from a client and when it returns a response to the client. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Request Average Integration Latency ms aws_gateway_endpoint_integration_latency Endpoint The time between when API Gateway relays a request to the backend and when it receives a response from the backend. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Data Processed KB aws_gateway_endpoint_data_processed Endpoint The amount of data processed API Gateway HTTP APIs monitoring with CloudWatch   Cache Hit Count Rate % aws_gateway_endpoint_cache_hit_rate Endpoint The number of requests served from the API cache API Gateway REST APIs monitoring with CloudWatch   Cache Miss Count Rate % aws_gateway_endpoint_cache_miss_rate Endpoint The number of requests served from the backend API Gateway REST APIs monitoring with CloudWatch    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-gateway/. The AWS Cloud EKS dashboard panel configurations are found in /config/ui-initialized-templates/aws_gateway.\n","excerpt":"AWS API Gateway monitoring Amazon API Gateway is an AWS service for creating, publishing, …","ref":"/docs/main/latest/en/setup/backend/backend-aws-api-gateway-monitoring/","title":"AWS API Gateway monitoring"},{"body":"AWS API Gateway monitoring Amazon API Gateway is an AWS service for creating, publishing, maintaining, monitoring, and securing REST, HTTP, and WebSocket APIs. SkyWalking leverages AWS Kinesis Data Firehose receiver to transfer the CloudWatch metrics of API Gateway(HTTP and REST APIs) to OpenTelemetry receiver and into the Meter System.\nData flow  AWS CloudWatch collect metrics for API Gateway(REST and HTTP APIs), refer to API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch CloudWatch metric streams stream CloudWatch metrics of API Gateway to AWS Kinesis Data Firehose AWS Kinesis Data Firehose delivery metrics to AWS Kinesis Data Firehose receiver through the HTTP endpoint  Set up  Enable CloudWatch metrics for API Gateway Create an Amazon Kinesis Data Firehose Delivery Stream, and set AWS Kinesis Data Firehose receiver\u0026rsquo;s address as HTTP(s) Destination, refer to Create Delivery Stream Create CloudWatch metric stream, and select the Firehose Delivery Stream which has been created above, set Select namespaces to AWS/ApiGateway, Select output format to OpenTelemetry 0.7. refer to CloudWatch Metric Streams  Gateway Monitoring SkyWalking observes CloudWatch metrics of the AWS API Gateway, which is cataloged as a LAYER: AWS_GATEWAY Service in the OAP. Meanwhile, the routes would be recognized as LAYER: AWS_GATEWAY endpoints\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     Request Count count aws_gateway_service_count Service The total number API requests in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   4xx Count count aws_gateway_service_4xx Service The number of client-side errors captured in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   5xx Count count aws_gateway_service_5xx Service The number of server-side errors captured in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Request Average Latency ms aws_gateway_service_latency Service The time between when API Gateway receives a request from a client and when it returns a response to the client. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Request Average Integration Latency ms aws_gateway_service_integration_latency Service The time between when API Gateway relays a request to the backend and when it receives a response from the backend. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Data Processed KB aws_gateway_service_data_processed Service The amount of data processed API Gateway HTTP APIs monitoring with CloudWatch   Cache Hit Count Rate % aws_gateway_service_cache_hit_rate Service The number of requests served from the API cache API Gateway REST APIs monitoring with CloudWatch   Cache Miss Count Rate % aws_gateway_service_cache_miss_rate Service The number of requests served from the backend API Gateway REST APIs monitoring with CloudWatch   Request Count count aws_gateway_endpoint_count Endpoint The total number API requests in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   4xx Count count aws_gateway_endpoint_4xx Endpoint The number of client-side errors captured in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   5xx Count count aws_gateway_endpoint_5xx Endpoint The number of server-side errors captured in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Request Average Latency ms aws_gateway_endpoint_latency Endpoint The time between when API Gateway receives a request from a client and when it returns a response to the client. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Request Average Integration Latency ms aws_gateway_endpoint_integration_latency Endpoint The time between when API Gateway relays a request to the backend and when it receives a response from the backend. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Data Processed KB aws_gateway_endpoint_data_processed Endpoint The amount of data processed API Gateway HTTP APIs monitoring with CloudWatch   Cache Hit Count Rate % aws_gateway_endpoint_cache_hit_rate Endpoint The number of requests served from the API cache API Gateway REST APIs monitoring with CloudWatch   Cache Miss Count Rate % aws_gateway_endpoint_cache_miss_rate Endpoint The number of requests served from the backend API Gateway REST APIs monitoring with CloudWatch    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-gateway/. The AWS Cloud EKS dashboard panel configurations are found in /config/ui-initialized-templates/aws_gateway.\n","excerpt":"AWS API Gateway monitoring Amazon API Gateway is an AWS service for creating, publishing, …","ref":"/docs/main/next/en/setup/backend/backend-aws-api-gateway-monitoring/","title":"AWS API Gateway monitoring"},{"body":"AWS API Gateway monitoring Amazon API Gateway is an AWS service for creating, publishing, maintaining, monitoring, and securing REST, HTTP, and WebSocket APIs. SkyWalking leverages AWS Kinesis Data Firehose receiver to transfer the CloudWatch metrics of API Gateway(HTTP and REST APIs) to OpenTelemetry receiver and into the Meter System.\nData flow  AWS CloudWatch collect metrics for API Gateway(REST and HTTP APIs), refer to API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch CloudWatch metric streams stream CloudWatch metrics of API Gateway to AWS Kinesis Data Firehose AWS Kinesis Data Firehose delivery metrics to AWS Kinesis Data Firehose receiver through the HTTP endpoint  Set up  Enable CloudWatch metrics for API Gateway Create an Amazon Kinesis Data Firehose Delivery Stream, and set AWS Kinesis Data Firehose receiver\u0026rsquo;s address as HTTP(s) Destination, refer to Create Delivery Stream Create CloudWatch metric stream, and select the Firehose Delivery Stream which has been created above, set Select namespaces to AWS/ApiGateway, Select output format to OpenTelemetry 0.7. refer to CloudWatch Metric Streams  Gateway Monitoring SkyWalking observes CloudWatch metrics of the AWS API Gateway, which is cataloged as a LAYER: AWS_GATEWAY Service in the OAP. Meanwhile, the routes would be recognized as LAYER: AWS_GATEWAY endpoints\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     Request Count count aws_gateway_service_count Service The total number API requests in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   4xx Count count aws_gateway_service_4xx Service The number of client-side errors captured in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   5xx Count count aws_gateway_service_5xx Service The number of server-side errors captured in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Request Average Latency ms aws_gateway_service_latency Service The time between when API Gateway receives a request from a client and when it returns a response to the client. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Request Average Integration Latency ms aws_gateway_service_integration_latency Service The time between when API Gateway relays a request to the backend and when it receives a response from the backend. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Data Processed KB aws_gateway_service_data_processed Service The amount of data processed API Gateway HTTP APIs monitoring with CloudWatch   Cache Hit Count Rate % aws_gateway_service_cache_hit_rate Service The number of requests served from the API cache API Gateway REST APIs monitoring with CloudWatch   Cache Miss Count Rate % aws_gateway_service_cache_miss_rate Service The number of requests served from the backend API Gateway REST APIs monitoring with CloudWatch   Request Count count aws_gateway_endpoint_count Endpoint The total number API requests in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   4xx Count count aws_gateway_endpoint_4xx Endpoint The number of client-side errors captured in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   5xx Count count aws_gateway_endpoint_5xx Endpoint The number of server-side errors captured in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Request Average Latency ms aws_gateway_endpoint_latency Endpoint The time between when API Gateway receives a request from a client and when it returns a response to the client. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Request Average Integration Latency ms aws_gateway_endpoint_integration_latency Endpoint The time between when API Gateway relays a request to the backend and when it receives a response from the backend. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Data Processed KB aws_gateway_endpoint_data_processed Endpoint The amount of data processed API Gateway HTTP APIs monitoring with CloudWatch   Cache Hit Count Rate % aws_gateway_endpoint_cache_hit_rate Endpoint The number of requests served from the API cache API Gateway REST APIs monitoring with CloudWatch   Cache Miss Count Rate % aws_gateway_endpoint_cache_miss_rate Endpoint The number of requests served from the backend API Gateway REST APIs monitoring with CloudWatch    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-gateway/. The AWS Cloud EKS dashboard panel configurations are found in /config/ui-initialized-templates/aws_gateway.\n","excerpt":"AWS API Gateway monitoring Amazon API Gateway is an AWS service for creating, publishing, …","ref":"/docs/main/v9.5.0/en/setup/backend/backend-aws-api-gateway-monitoring/","title":"AWS API Gateway monitoring"},{"body":"AWS API Gateway monitoring Amazon API Gateway is an AWS service for creating, publishing, maintaining, monitoring, and securing REST, HTTP, and WebSocket APIs. SkyWalking leverages AWS Kinesis Data Firehose receiver to transfer the CloudWatch metrics of API Gateway(HTTP and REST APIs) to OpenTelemetry receiver and into the Meter System.\nData flow  AWS CloudWatch collect metrics for API Gateway(REST and HTTP APIs), refer to API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch CloudWatch metric streams stream CloudWatch metrics of API Gateway to AWS Kinesis Data Firehose AWS Kinesis Data Firehose delivery metrics to AWS Kinesis Data Firehose receiver through the HTTP endpoint  Set up  Enable CloudWatch metrics for API Gateway Create an Amazon Kinesis Data Firehose Delivery Stream, and set AWS Kinesis Data Firehose receiver\u0026rsquo;s address as HTTP(s) Destination, refer to Create Delivery Stream Create CloudWatch metric stream, and select the Firehose Delivery Stream which has been created above, set Select namespaces to AWS/ApiGateway, Select output format to OpenTelemetry 0.7. refer to CloudWatch Metric Streams  Gateway Monitoring SkyWalking observes CloudWatch metrics of the AWS API Gateway, which is cataloged as a LAYER: AWS_GATEWAY Service in the OAP. Meanwhile, the routes would be recognized as LAYER: AWS_GATEWAY endpoints\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     Request Count count aws_gateway_service_count Service The total number API requests in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   4xx Count count aws_gateway_service_4xx Service The number of client-side errors captured in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   5xx Count count aws_gateway_service_5xx Service The number of server-side errors captured in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Request Average Latency ms aws_gateway_service_latency Service The time between when API Gateway receives a request from a client and when it returns a response to the client. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Request Average Integration Latency ms aws_gateway_service_integration_latency Service The time between when API Gateway relays a request to the backend and when it receives a response from the backend. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Data Processed KB aws_gateway_service_data_processed Service The amount of data processed API Gateway HTTP APIs monitoring with CloudWatch   Cache Hit Count Rate % aws_gateway_service_cache_hit_rate Service The number of requests served from the API cache API Gateway REST APIs monitoring with CloudWatch   Cache Miss Count Rate % aws_gateway_service_cache_miss_rate Service The number of requests served from the backend API Gateway REST APIs monitoring with CloudWatch   Request Count count aws_gateway_endpoint_count Endpoint The total number API requests in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   4xx Count count aws_gateway_endpoint_4xx Endpoint The number of client-side errors captured in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   5xx Count count aws_gateway_endpoint_5xx Endpoint The number of server-side errors captured in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Request Average Latency ms aws_gateway_endpoint_latency Endpoint The time between when API Gateway receives a request from a client and when it returns a response to the client. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Request Average Integration Latency ms aws_gateway_endpoint_integration_latency Endpoint The time between when API Gateway relays a request to the backend and when it receives a response from the backend. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Data Processed KB aws_gateway_endpoint_data_processed Endpoint The amount of data processed API Gateway HTTP APIs monitoring with CloudWatch   Cache Hit Count Rate % aws_gateway_endpoint_cache_hit_rate Endpoint The number of requests served from the API cache API Gateway REST APIs monitoring with CloudWatch   Cache Miss Count Rate % aws_gateway_endpoint_cache_miss_rate Endpoint The number of requests served from the backend API Gateway REST APIs monitoring with CloudWatch    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-gateway/. The AWS Cloud EKS dashboard panel configurations are found in /config/ui-initialized-templates/aws_gateway.\n","excerpt":"AWS API Gateway monitoring Amazon API Gateway is an AWS service for creating, publishing, …","ref":"/docs/main/v9.6.0/en/setup/backend/backend-aws-api-gateway-monitoring/","title":"AWS API Gateway monitoring"},{"body":"AWS API Gateway monitoring Amazon API Gateway is an AWS service for creating, publishing, maintaining, monitoring, and securing REST, HTTP, and WebSocket APIs. SkyWalking leverages AWS Kinesis Data Firehose receiver to transfer the CloudWatch metrics of API Gateway(HTTP and REST APIs) to OpenTelemetry receiver and into the Meter System.\nData flow  AWS CloudWatch collect metrics for API Gateway(REST and HTTP APIs), refer to API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch CloudWatch metric streams stream CloudWatch metrics of API Gateway to AWS Kinesis Data Firehose AWS Kinesis Data Firehose delivery metrics to AWS Kinesis Data Firehose receiver through the HTTP endpoint  Set up  Enable CloudWatch metrics for API Gateway Create an Amazon Kinesis Data Firehose Delivery Stream, and set AWS Kinesis Data Firehose receiver\u0026rsquo;s address as HTTP(s) Destination, refer to Create Delivery Stream Create CloudWatch metric stream, and select the Firehose Delivery Stream which has been created above, set Select namespaces to AWS/ApiGateway, Select output format to OpenTelemetry 0.7. refer to CloudWatch Metric Streams  Gateway Monitoring SkyWalking observes CloudWatch metrics of the AWS API Gateway, which is cataloged as a LAYER: AWS_GATEWAY Service in the OAP. Meanwhile, the routes would be recognized as LAYER: AWS_GATEWAY endpoints\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     Request Count count aws_gateway_service_count Service The total number API requests in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   4xx Count count aws_gateway_service_4xx Service The number of client-side errors captured in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   5xx Count count aws_gateway_service_5xx Service The number of server-side errors captured in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Request Average Latency ms aws_gateway_service_latency Service The time between when API Gateway receives a request from a client and when it returns a response to the client. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Request Average Integration Latency ms aws_gateway_service_integration_latency Service The time between when API Gateway relays a request to the backend and when it receives a response from the backend. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Data Processed KB aws_gateway_service_data_processed Service The amount of data processed API Gateway HTTP APIs monitoring with CloudWatch   Cache Hit Count Rate % aws_gateway_service_cache_hit_rate Service The number of requests served from the API cache API Gateway REST APIs monitoring with CloudWatch   Cache Miss Count Rate % aws_gateway_service_cache_miss_rate Service The number of requests served from the backend API Gateway REST APIs monitoring with CloudWatch   Request Count count aws_gateway_endpoint_count Endpoint The total number API requests in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   4xx Count count aws_gateway_endpoint_4xx Endpoint The number of client-side errors captured in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   5xx Count count aws_gateway_endpoint_5xx Endpoint The number of server-side errors captured in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Request Average Latency ms aws_gateway_endpoint_latency Endpoint The time between when API Gateway receives a request from a client and when it returns a response to the client. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Request Average Integration Latency ms aws_gateway_endpoint_integration_latency Endpoint The time between when API Gateway relays a request to the backend and when it receives a response from the backend. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Data Processed KB aws_gateway_endpoint_data_processed Endpoint The amount of data processed API Gateway HTTP APIs monitoring with CloudWatch   Cache Hit Count Rate % aws_gateway_endpoint_cache_hit_rate Endpoint The number of requests served from the API cache API Gateway REST APIs monitoring with CloudWatch   Cache Miss Count Rate % aws_gateway_endpoint_cache_miss_rate Endpoint The number of requests served from the backend API Gateway REST APIs monitoring with CloudWatch    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-gateway/. The AWS Cloud EKS dashboard panel configurations are found in /config/ui-initialized-templates/aws_gateway.\n","excerpt":"AWS API Gateway monitoring Amazon API Gateway is an AWS service for creating, publishing, …","ref":"/docs/main/v9.7.0/en/setup/backend/backend-aws-api-gateway-monitoring/","title":"AWS API Gateway monitoring"},{"body":"AWS Cloud EKS monitoring SkyWalking leverages OpenTelemetry Collector with AWS Container Insights Receiver to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  OpenTelemetry Collector fetches metrics from EKS via AWS Container Insights Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Deploy amazon/aws-otel-collector with AWS Container Insights Receiver to EKS Config SkyWalking OpenTelemetry receiver.  Read Monitoring AWS EKS and S3 with SkyWalking for more details\nEKS Monitoring AWS Container Insights Receiver provides multiple dimensions metrics for EKS cluster, node, service, etc. Accordingly, SkyWalking observes the status, and payload of the EKS cluster, which is cataloged as a LAYER: AWS_EKS Service in the OAP. Meanwhile, the k8s nodes would be recognized as LAYER: AWS_EKS instances. The k8s service would be recognized as endpoints.\nSpecify Job Name SkyWalking distinguishes AWS Cloud EKS metrics by attributes job_name, which value is aws-cloud-eks-monitoring. You could leverage OTEL Collector processor to add the attribute as follows:\nprocessors:resource/job-name:attributes:- key:job_namevalue:aws-cloud-eks-monitoringaction:insert Notice, if you don\u0026rsquo;t specify job_name attribute, SkyWalking OAP will ignore the metrics\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     Node Count  eks_cluster_node_count Service The node count of the EKS cluster AWS Container Insights Receiver   Failed Node Count  eks_cluster_failed_node_count Service The failed node count of the EKS cluster AWS Container Insights Receiver   Pod Count (namespace dimension)  eks_cluster_namespace_count Service The count of pod in the EKS cluster(namespace dimension) AWS Container Insights Receiver   Pod Count (service dimension)  eks_cluster_service_count Service The count of pod in the EKS cluster(service dimension) AWS Container Insights Receiver   Network RX Dropped Count (per second) count/s eks_cluster_net_rx_dropped Service Network RX dropped count AWS Container Insights Receiver   Network RX Error Count (per second) count/s eks_cluster_net_rx_error Service Network RX error count AWS Container Insights Receiver   Network TX Dropped Count (per second) count/s eks_cluster_net_rx_dropped Service Network TX dropped count AWS Container Insights Receiver   Network TX Error Count (per second) count/s eks_cluster_net_rx_error Service Network TX error count AWS Container Insights Receiver   Pod Count  eks_cluster_node_pod_number Instance The count of pod running on the node AWS Container Insights Receiver   CPU Utilization percent eks_cluster_node_cpu_utilization Instance The CPU Utilization of the node AWS Container Insights Receiver   Memory Utilization percent eks_cluster_node_memory_utilization Instance The Memory Utilization of the node AWS Container Insights Receiver   Network RX bytes/s eks_cluster_node_net_rx_bytes Instance Network RX bytes of the node AWS Container Insights Receiver   Network RX Error Count count/s eks_cluster_node_net_rx_bytes Instance Network RX error count of the node AWS Container Insights Receiver   Network TX bytes/s eks_cluster_node_net_rx_bytes Instance Network TX bytes of the node AWS Container Insights Receiver   Network TX Error Count count/s eks_cluster_node_net_rx_bytes Instance Network TX error count of the node AWS Container Insights Receiver   Disk IO Write bytes/s eks_cluster_node_net_rx_bytes Instance The IO write bytes of the node AWS Container Insights Receiver   Disk IO Read bytes/s eks_cluster_node_net_rx_bytes Instance The IO read bytes of the node AWS Container Insights Receiver   FS Utilization percent eks_cluster_node_net_rx_bytes Instance The filesystem utilization of the node AWS Container Insights Receiver   CPU Utilization percent eks_cluster_node_pod_cpu_utilization Instance The CPU Utilization of the pod running on the node AWS Container Insights Receiver   Memory Utilization percent eks_cluster_node_pod_memory_utilization Instance The Memory Utilization of the pod running on the node AWS Container Insights Receiver   Network RX bytes/s eks_cluster_node_pod_net_rx_bytes Instance Network RX bytes of the pod running on the node AWS Container Insights Receiver   Network RX Error Count count/s eks_cluster_node_pod_net_rx_error Instance Network RX error count of the pod running on the node AWS Container Insights Receiver   Network TX bytes/s eks_cluster_node_pod_net_tx_bytes Instance Network RX bytes of the pod running on the node AWS Container Insights Receiver   Network TX Error Count count/s eks_cluster_node_pod_net_tx_error Instance Network RX error count of the pod running on the node AWS Container Insights Receiver   CPU Utilization percent eks_cluster_service_pod_cpu_utilization Endpoint The CPU Utilization of pod that belong to the service AWS Container Insights Receiver   Memory Utilization percent eks_cluster_service_pod_memory_utilization Endpoint The Memory Utilization of pod that belong to the service AWS Container Insights Receiver   Network RX bytes/s eks_cluster_service_pod_net_rx_bytes Endpoint Network RX bytes of the pod that belong to the service AWS Container Insights Receiver   Network RX Error Count count/s eks_cluster_service_pod_net_rx_error Endpoint Network TX error count of the pod that belongs to the service AWS Container Insights Receiver   Network TX bytes/s eks_cluster_service_pod_net_tx_bytes Endpoint Network TX bytes of the pod that belong to the service AWS Container Insights Receiver   Network TX Error Count count/s eks_cluster_node_pod_net_tx_error Endpoint Network TX error count of the pod that belongs to the service AWS Container Insights Receiver    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-eks/. The AWS Cloud EKS dashboard panel configurations are found in /config/ui-initialized-templates/aws_eks.\nOTEL Configuration Sample With AWS Container Insights Receiver extensions:health_check:receivers:awscontainerinsightreceiver:processors:resource/job-name:attributes:- key:job_namevalue:aws-cloud-eks-monitoringaction:insertexporters:otlp:endpoint:oap-service:11800tls:insecure:truelogging:loglevel:debugservice:pipelines:metrics:receivers:[awscontainerinsightreceiver]processors:[resource/job-name]exporters:[otlp,logging]extensions:[health_check]Refer to AWS Container Insights Receiver for more information\n","excerpt":"AWS Cloud EKS monitoring SkyWalking leverages OpenTelemetry Collector with AWS Container Insights …","ref":"/docs/main/latest/en/setup/backend/backend-aws-eks-monitoring/","title":"AWS Cloud EKS monitoring"},{"body":"AWS Cloud EKS monitoring SkyWalking leverages OpenTelemetry Collector with AWS Container Insights Receiver to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  OpenTelemetry Collector fetches metrics from EKS via AWS Container Insights Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Deploy amazon/aws-otel-collector with AWS Container Insights Receiver to EKS Config SkyWalking OpenTelemetry receiver.  Read Monitoring AWS EKS and S3 with SkyWalking for more details\nEKS Monitoring AWS Container Insights Receiver provides multiple dimensions metrics for EKS cluster, node, service, etc. Accordingly, SkyWalking observes the status, and payload of the EKS cluster, which is cataloged as a LAYER: AWS_EKS Service in the OAP. Meanwhile, the k8s nodes would be recognized as LAYER: AWS_EKS instances. The k8s service would be recognized as endpoints.\nSpecify Job Name SkyWalking distinguishes AWS Cloud EKS metrics by attributes job_name, which value is aws-cloud-eks-monitoring. You could leverage OTEL Collector processor to add the attribute as follows:\nprocessors:resource/job-name:attributes:- key:job_namevalue:aws-cloud-eks-monitoringaction:insert Notice, if you don\u0026rsquo;t specify job_name attribute, SkyWalking OAP will ignore the metrics\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     Node Count  eks_cluster_node_count Service The node count of the EKS cluster AWS Container Insights Receiver   Failed Node Count  eks_cluster_failed_node_count Service The failed node count of the EKS cluster AWS Container Insights Receiver   Pod Count (namespace dimension)  eks_cluster_namespace_count Service The count of pod in the EKS cluster(namespace dimension) AWS Container Insights Receiver   Pod Count (service dimension)  eks_cluster_service_count Service The count of pod in the EKS cluster(service dimension) AWS Container Insights Receiver   Network RX Dropped Count (per second) count/s eks_cluster_net_rx_dropped Service Network RX dropped count AWS Container Insights Receiver   Network RX Error Count (per second) count/s eks_cluster_net_rx_error Service Network RX error count AWS Container Insights Receiver   Network TX Dropped Count (per second) count/s eks_cluster_net_rx_dropped Service Network TX dropped count AWS Container Insights Receiver   Network TX Error Count (per second) count/s eks_cluster_net_rx_error Service Network TX error count AWS Container Insights Receiver   Pod Count  eks_cluster_node_pod_number Instance The count of pod running on the node AWS Container Insights Receiver   CPU Utilization percent eks_cluster_node_cpu_utilization Instance The CPU Utilization of the node AWS Container Insights Receiver   Memory Utilization percent eks_cluster_node_memory_utilization Instance The Memory Utilization of the node AWS Container Insights Receiver   Network RX bytes/s eks_cluster_node_net_rx_bytes Instance Network RX bytes of the node AWS Container Insights Receiver   Network RX Error Count count/s eks_cluster_node_net_rx_bytes Instance Network RX error count of the node AWS Container Insights Receiver   Network TX bytes/s eks_cluster_node_net_rx_bytes Instance Network TX bytes of the node AWS Container Insights Receiver   Network TX Error Count count/s eks_cluster_node_net_rx_bytes Instance Network TX error count of the node AWS Container Insights Receiver   Disk IO Write bytes/s eks_cluster_node_net_rx_bytes Instance The IO write bytes of the node AWS Container Insights Receiver   Disk IO Read bytes/s eks_cluster_node_net_rx_bytes Instance The IO read bytes of the node AWS Container Insights Receiver   FS Utilization percent eks_cluster_node_net_rx_bytes Instance The filesystem utilization of the node AWS Container Insights Receiver   CPU Utilization percent eks_cluster_node_pod_cpu_utilization Instance The CPU Utilization of the pod running on the node AWS Container Insights Receiver   Memory Utilization percent eks_cluster_node_pod_memory_utilization Instance The Memory Utilization of the pod running on the node AWS Container Insights Receiver   Network RX bytes/s eks_cluster_node_pod_net_rx_bytes Instance Network RX bytes of the pod running on the node AWS Container Insights Receiver   Network RX Error Count count/s eks_cluster_node_pod_net_rx_error Instance Network RX error count of the pod running on the node AWS Container Insights Receiver   Network TX bytes/s eks_cluster_node_pod_net_tx_bytes Instance Network RX bytes of the pod running on the node AWS Container Insights Receiver   Network TX Error Count count/s eks_cluster_node_pod_net_tx_error Instance Network RX error count of the pod running on the node AWS Container Insights Receiver   CPU Utilization percent eks_cluster_service_pod_cpu_utilization Endpoint The CPU Utilization of pod that belong to the service AWS Container Insights Receiver   Memory Utilization percent eks_cluster_service_pod_memory_utilization Endpoint The Memory Utilization of pod that belong to the service AWS Container Insights Receiver   Network RX bytes/s eks_cluster_service_pod_net_rx_bytes Endpoint Network RX bytes of the pod that belong to the service AWS Container Insights Receiver   Network RX Error Count count/s eks_cluster_service_pod_net_rx_error Endpoint Network TX error count of the pod that belongs to the service AWS Container Insights Receiver   Network TX bytes/s eks_cluster_service_pod_net_tx_bytes Endpoint Network TX bytes of the pod that belong to the service AWS Container Insights Receiver   Network TX Error Count count/s eks_cluster_node_pod_net_tx_error Endpoint Network TX error count of the pod that belongs to the service AWS Container Insights Receiver    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-eks/. The AWS Cloud EKS dashboard panel configurations are found in /config/ui-initialized-templates/aws_eks.\nOTEL Configuration Sample With AWS Container Insights Receiver extensions:health_check:receivers:awscontainerinsightreceiver:processors:resource/job-name:attributes:- key:job_namevalue:aws-cloud-eks-monitoringaction:insertexporters:otlp:endpoint:oap-service:11800tls:insecure:truelogging:loglevel:debugservice:pipelines:metrics:receivers:[awscontainerinsightreceiver]processors:[resource/job-name]exporters:[otlp,logging]extensions:[health_check]Refer to AWS Container Insights Receiver for more information\n","excerpt":"AWS Cloud EKS monitoring SkyWalking leverages OpenTelemetry Collector with AWS Container Insights …","ref":"/docs/main/next/en/setup/backend/backend-aws-eks-monitoring/","title":"AWS Cloud EKS monitoring"},{"body":"AWS Cloud EKS monitoring SkyWalking leverages OpenTelemetry Collector with AWS Container Insights Receiver to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  OpenTelemetry Collector fetches metrics from EKS via AWS Container Insights Receiver and pushes metrics to SkyWalking OAP Server via the OpenCensus gRPC Exporter or OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Deploy amazon/aws-otel-collector with AWS Container Insights Receiver to EKS Config SkyWalking OpenTelemetry receiver.  EKS Monitoring AWS Container Insights Receiver provides multiple dimensions metrics for EKS cluster, node, service, etc. Accordingly, SkyWalking observes the status, and payload of the EKS cluster, which is cataloged as a LAYER: AWS_EKS Service in the OAP. Meanwhile, the k8s nodes would be recognized as LAYER: AWS_EKS instances. The k8s service would be recognized as endpoints.\nSpecify Job Name SkyWalking distinguishes AWS Cloud EKS metrics by attributes job_name, which value is aws-cloud-eks-monitoring. You could leverage OTEL Collector processor to add the attribute as follows:\nprocessors:resource/job-name:attributes:- key:job_namevalue:aws-cloud-eks-monitoringaction:insert Notice, if you don\u0026rsquo;t specify job_name attribute, SkyWalking OAP will ignore the metrics\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     Node Count  eks_cluster_node_count Service The node count of the EKS cluster AWS Container Insights Receiver   Failed Node Count  eks_cluster_failed_node_count Service The failed node count of the EKS cluster AWS Container Insights Receiver   Pod Count (namespace dimension)  eks_cluster_namespace_count Service The count of pod in the EKS cluster(namespace dimension) AWS Container Insights Receiver   Pod Count (service dimension)  eks_cluster_service_count Service The count of pod in the EKS cluster(service dimension) AWS Container Insights Receiver   Network RX Dropped Count (per second) count/s eks_cluster_net_rx_dropped Service Network RX dropped count AWS Container Insights Receiver   Network RX Error Count (per second) count/s eks_cluster_net_rx_error Service Network RX error count AWS Container Insights Receiver   Network TX Dropped Count (per second) count/s eks_cluster_net_rx_dropped Service Network TX dropped count AWS Container Insights Receiver   Network TX Error Count (per second) count/s eks_cluster_net_rx_error Service Network TX error count AWS Container Insights Receiver   Pod Count  eks_cluster_node_pod_number Instance The count of pod running on the node AWS Container Insights Receiver   CPU Utilization percent eks_cluster_node_cpu_utilization Instance The CPU Utilization of the node AWS Container Insights Receiver   Memory Utilization percent eks_cluster_node_memory_utilization Instance The Memory Utilization of the node AWS Container Insights Receiver   Network RX bytes/s eks_cluster_node_net_rx_bytes Instance Network RX bytes of the node AWS Container Insights Receiver   Network RX Error Count count/s eks_cluster_node_net_rx_bytes Instance Network RX error count of the node AWS Container Insights Receiver   Network TX bytes/s eks_cluster_node_net_rx_bytes Instance Network TX bytes of the node AWS Container Insights Receiver   Network TX Error Count count/s eks_cluster_node_net_rx_bytes Instance Network TX error count of the node AWS Container Insights Receiver   Disk IO Write bytes/s eks_cluster_node_net_rx_bytes Instance The IO write bytes of the node AWS Container Insights Receiver   Disk IO Read bytes/s eks_cluster_node_net_rx_bytes Instance The IO read bytes of the node AWS Container Insights Receiver   FS Utilization percent eks_cluster_node_net_rx_bytes Instance The filesystem utilization of the node AWS Container Insights Receiver   CPU Utilization percent eks_cluster_node_pod_cpu_utilization Instance The CPU Utilization of the pod running on the node AWS Container Insights Receiver   Memory Utilization percent eks_cluster_node_pod_memory_utilization Instance The Memory Utilization of the pod running on the node AWS Container Insights Receiver   Network RX bytes/s eks_cluster_node_pod_net_rx_bytes Instance Network RX bytes of the pod running on the node AWS Container Insights Receiver   Network RX Error Count count/s eks_cluster_node_pod_net_rx_error Instance Network RX error count of the pod running on the node AWS Container Insights Receiver   Network TX bytes/s eks_cluster_node_pod_net_tx_bytes Instance Network RX bytes of the pod running on the node AWS Container Insights Receiver   Network TX Error Count count/s eks_cluster_node_pod_net_tx_error Instance Network RX error count of the pod running on the node AWS Container Insights Receiver   CPU Utilization percent eks_cluster_service_pod_cpu_utilization Endpoint The CPU Utilization of pod that belong to the service AWS Container Insights Receiver   Memory Utilization percent eks_cluster_service_pod_memory_utilization Endpoint The Memory Utilization of pod that belong to the service AWS Container Insights Receiver   Network RX bytes/s eks_cluster_service_pod_net_rx_bytes Endpoint Network RX bytes of the pod that belong to the service AWS Container Insights Receiver   Network RX Error Count count/s eks_cluster_service_pod_net_rx_error Endpoint Network TX error count of the pod that belongs to the service AWS Container Insights Receiver   Network TX bytes/s eks_cluster_service_pod_net_tx_bytes Endpoint Network TX bytes of the pod that belong to the service AWS Container Insights Receiver   Network TX Error Count count/s eks_cluster_node_pod_net_tx_error Endpoint Network TX error count of the pod that belongs to the service AWS Container Insights Receiver    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-eks/. The AWS Cloud EKS dashboard panel configurations are found in /config/ui-initialized-templates/aws_eks.\nOTEL Configuration Sample With AWS Container Insights Receiver extensions:health_check:receivers:awscontainerinsightreceiver:processors:resource/job-name:attributes:- key:job_namevalue:aws-cloud-eks-monitoringaction:insertexporters:otlp:endpoint:oap-service:11800tls:insecure:truelogging:loglevel:debugservice:pipelines:metrics:receivers:[awscontainerinsightreceiver]processors:[resource/job-name]exporters:[otlp,logging]extensions:[health_check]Refer to AWS Container Insights Receiver for more information\n","excerpt":"AWS Cloud EKS monitoring SkyWalking leverages OpenTelemetry Collector with AWS Container Insights …","ref":"/docs/main/v9.4.0/en/setup/backend/backend-aws-eks-monitoring/","title":"AWS Cloud EKS monitoring"},{"body":"AWS Cloud EKS monitoring SkyWalking leverages OpenTelemetry Collector with AWS Container Insights Receiver to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  OpenTelemetry Collector fetches metrics from EKS via AWS Container Insights Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Deploy amazon/aws-otel-collector with AWS Container Insights Receiver to EKS Config SkyWalking OpenTelemetry receiver.  Read Monitoring AWS EKS and S3 with SkyWalking for more details\nEKS Monitoring AWS Container Insights Receiver provides multiple dimensions metrics for EKS cluster, node, service, etc. Accordingly, SkyWalking observes the status, and payload of the EKS cluster, which is cataloged as a LAYER: AWS_EKS Service in the OAP. Meanwhile, the k8s nodes would be recognized as LAYER: AWS_EKS instances. The k8s service would be recognized as endpoints.\nSpecify Job Name SkyWalking distinguishes AWS Cloud EKS metrics by attributes job_name, which value is aws-cloud-eks-monitoring. You could leverage OTEL Collector processor to add the attribute as follows:\nprocessors:resource/job-name:attributes:- key:job_namevalue:aws-cloud-eks-monitoringaction:insert Notice, if you don\u0026rsquo;t specify job_name attribute, SkyWalking OAP will ignore the metrics\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     Node Count  eks_cluster_node_count Service The node count of the EKS cluster AWS Container Insights Receiver   Failed Node Count  eks_cluster_failed_node_count Service The failed node count of the EKS cluster AWS Container Insights Receiver   Pod Count (namespace dimension)  eks_cluster_namespace_count Service The count of pod in the EKS cluster(namespace dimension) AWS Container Insights Receiver   Pod Count (service dimension)  eks_cluster_service_count Service The count of pod in the EKS cluster(service dimension) AWS Container Insights Receiver   Network RX Dropped Count (per second) count/s eks_cluster_net_rx_dropped Service Network RX dropped count AWS Container Insights Receiver   Network RX Error Count (per second) count/s eks_cluster_net_rx_error Service Network RX error count AWS Container Insights Receiver   Network TX Dropped Count (per second) count/s eks_cluster_net_rx_dropped Service Network TX dropped count AWS Container Insights Receiver   Network TX Error Count (per second) count/s eks_cluster_net_rx_error Service Network TX error count AWS Container Insights Receiver   Pod Count  eks_cluster_node_pod_number Instance The count of pod running on the node AWS Container Insights Receiver   CPU Utilization percent eks_cluster_node_cpu_utilization Instance The CPU Utilization of the node AWS Container Insights Receiver   Memory Utilization percent eks_cluster_node_memory_utilization Instance The Memory Utilization of the node AWS Container Insights Receiver   Network RX bytes/s eks_cluster_node_net_rx_bytes Instance Network RX bytes of the node AWS Container Insights Receiver   Network RX Error Count count/s eks_cluster_node_net_rx_bytes Instance Network RX error count of the node AWS Container Insights Receiver   Network TX bytes/s eks_cluster_node_net_rx_bytes Instance Network TX bytes of the node AWS Container Insights Receiver   Network TX Error Count count/s eks_cluster_node_net_rx_bytes Instance Network TX error count of the node AWS Container Insights Receiver   Disk IO Write bytes/s eks_cluster_node_net_rx_bytes Instance The IO write bytes of the node AWS Container Insights Receiver   Disk IO Read bytes/s eks_cluster_node_net_rx_bytes Instance The IO read bytes of the node AWS Container Insights Receiver   FS Utilization percent eks_cluster_node_net_rx_bytes Instance The filesystem utilization of the node AWS Container Insights Receiver   CPU Utilization percent eks_cluster_node_pod_cpu_utilization Instance The CPU Utilization of the pod running on the node AWS Container Insights Receiver   Memory Utilization percent eks_cluster_node_pod_memory_utilization Instance The Memory Utilization of the pod running on the node AWS Container Insights Receiver   Network RX bytes/s eks_cluster_node_pod_net_rx_bytes Instance Network RX bytes of the pod running on the node AWS Container Insights Receiver   Network RX Error Count count/s eks_cluster_node_pod_net_rx_error Instance Network RX error count of the pod running on the node AWS Container Insights Receiver   Network TX bytes/s eks_cluster_node_pod_net_tx_bytes Instance Network RX bytes of the pod running on the node AWS Container Insights Receiver   Network TX Error Count count/s eks_cluster_node_pod_net_tx_error Instance Network RX error count of the pod running on the node AWS Container Insights Receiver   CPU Utilization percent eks_cluster_service_pod_cpu_utilization Endpoint The CPU Utilization of pod that belong to the service AWS Container Insights Receiver   Memory Utilization percent eks_cluster_service_pod_memory_utilization Endpoint The Memory Utilization of pod that belong to the service AWS Container Insights Receiver   Network RX bytes/s eks_cluster_service_pod_net_rx_bytes Endpoint Network RX bytes of the pod that belong to the service AWS Container Insights Receiver   Network RX Error Count count/s eks_cluster_service_pod_net_rx_error Endpoint Network TX error count of the pod that belongs to the service AWS Container Insights Receiver   Network TX bytes/s eks_cluster_service_pod_net_tx_bytes Endpoint Network TX bytes of the pod that belong to the service AWS Container Insights Receiver   Network TX Error Count count/s eks_cluster_node_pod_net_tx_error Endpoint Network TX error count of the pod that belongs to the service AWS Container Insights Receiver    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-eks/. The AWS Cloud EKS dashboard panel configurations are found in /config/ui-initialized-templates/aws_eks.\nOTEL Configuration Sample With AWS Container Insights Receiver extensions:health_check:receivers:awscontainerinsightreceiver:processors:resource/job-name:attributes:- key:job_namevalue:aws-cloud-eks-monitoringaction:insertexporters:otlp:endpoint:oap-service:11800tls:insecure:truelogging:loglevel:debugservice:pipelines:metrics:receivers:[awscontainerinsightreceiver]processors:[resource/job-name]exporters:[otlp,logging]extensions:[health_check]Refer to AWS Container Insights Receiver for more information\n","excerpt":"AWS Cloud EKS monitoring SkyWalking leverages OpenTelemetry Collector with AWS Container Insights …","ref":"/docs/main/v9.5.0/en/setup/backend/backend-aws-eks-monitoring/","title":"AWS Cloud EKS monitoring"},{"body":"AWS Cloud EKS monitoring SkyWalking leverages OpenTelemetry Collector with AWS Container Insights Receiver to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  OpenTelemetry Collector fetches metrics from EKS via AWS Container Insights Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Deploy amazon/aws-otel-collector with AWS Container Insights Receiver to EKS Config SkyWalking OpenTelemetry receiver.  Read Monitoring AWS EKS and S3 with SkyWalking for more details\nEKS Monitoring AWS Container Insights Receiver provides multiple dimensions metrics for EKS cluster, node, service, etc. Accordingly, SkyWalking observes the status, and payload of the EKS cluster, which is cataloged as a LAYER: AWS_EKS Service in the OAP. Meanwhile, the k8s nodes would be recognized as LAYER: AWS_EKS instances. The k8s service would be recognized as endpoints.\nSpecify Job Name SkyWalking distinguishes AWS Cloud EKS metrics by attributes job_name, which value is aws-cloud-eks-monitoring. You could leverage OTEL Collector processor to add the attribute as follows:\nprocessors:resource/job-name:attributes:- key:job_namevalue:aws-cloud-eks-monitoringaction:insert Notice, if you don\u0026rsquo;t specify job_name attribute, SkyWalking OAP will ignore the metrics\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     Node Count  eks_cluster_node_count Service The node count of the EKS cluster AWS Container Insights Receiver   Failed Node Count  eks_cluster_failed_node_count Service The failed node count of the EKS cluster AWS Container Insights Receiver   Pod Count (namespace dimension)  eks_cluster_namespace_count Service The count of pod in the EKS cluster(namespace dimension) AWS Container Insights Receiver   Pod Count (service dimension)  eks_cluster_service_count Service The count of pod in the EKS cluster(service dimension) AWS Container Insights Receiver   Network RX Dropped Count (per second) count/s eks_cluster_net_rx_dropped Service Network RX dropped count AWS Container Insights Receiver   Network RX Error Count (per second) count/s eks_cluster_net_rx_error Service Network RX error count AWS Container Insights Receiver   Network TX Dropped Count (per second) count/s eks_cluster_net_rx_dropped Service Network TX dropped count AWS Container Insights Receiver   Network TX Error Count (per second) count/s eks_cluster_net_rx_error Service Network TX error count AWS Container Insights Receiver   Pod Count  eks_cluster_node_pod_number Instance The count of pod running on the node AWS Container Insights Receiver   CPU Utilization percent eks_cluster_node_cpu_utilization Instance The CPU Utilization of the node AWS Container Insights Receiver   Memory Utilization percent eks_cluster_node_memory_utilization Instance The Memory Utilization of the node AWS Container Insights Receiver   Network RX bytes/s eks_cluster_node_net_rx_bytes Instance Network RX bytes of the node AWS Container Insights Receiver   Network RX Error Count count/s eks_cluster_node_net_rx_bytes Instance Network RX error count of the node AWS Container Insights Receiver   Network TX bytes/s eks_cluster_node_net_rx_bytes Instance Network TX bytes of the node AWS Container Insights Receiver   Network TX Error Count count/s eks_cluster_node_net_rx_bytes Instance Network TX error count of the node AWS Container Insights Receiver   Disk IO Write bytes/s eks_cluster_node_net_rx_bytes Instance The IO write bytes of the node AWS Container Insights Receiver   Disk IO Read bytes/s eks_cluster_node_net_rx_bytes Instance The IO read bytes of the node AWS Container Insights Receiver   FS Utilization percent eks_cluster_node_net_rx_bytes Instance The filesystem utilization of the node AWS Container Insights Receiver   CPU Utilization percent eks_cluster_node_pod_cpu_utilization Instance The CPU Utilization of the pod running on the node AWS Container Insights Receiver   Memory Utilization percent eks_cluster_node_pod_memory_utilization Instance The Memory Utilization of the pod running on the node AWS Container Insights Receiver   Network RX bytes/s eks_cluster_node_pod_net_rx_bytes Instance Network RX bytes of the pod running on the node AWS Container Insights Receiver   Network RX Error Count count/s eks_cluster_node_pod_net_rx_error Instance Network RX error count of the pod running on the node AWS Container Insights Receiver   Network TX bytes/s eks_cluster_node_pod_net_tx_bytes Instance Network RX bytes of the pod running on the node AWS Container Insights Receiver   Network TX Error Count count/s eks_cluster_node_pod_net_tx_error Instance Network RX error count of the pod running on the node AWS Container Insights Receiver   CPU Utilization percent eks_cluster_service_pod_cpu_utilization Endpoint The CPU Utilization of pod that belong to the service AWS Container Insights Receiver   Memory Utilization percent eks_cluster_service_pod_memory_utilization Endpoint The Memory Utilization of pod that belong to the service AWS Container Insights Receiver   Network RX bytes/s eks_cluster_service_pod_net_rx_bytes Endpoint Network RX bytes of the pod that belong to the service AWS Container Insights Receiver   Network RX Error Count count/s eks_cluster_service_pod_net_rx_error Endpoint Network TX error count of the pod that belongs to the service AWS Container Insights Receiver   Network TX bytes/s eks_cluster_service_pod_net_tx_bytes Endpoint Network TX bytes of the pod that belong to the service AWS Container Insights Receiver   Network TX Error Count count/s eks_cluster_node_pod_net_tx_error Endpoint Network TX error count of the pod that belongs to the service AWS Container Insights Receiver    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-eks/. The AWS Cloud EKS dashboard panel configurations are found in /config/ui-initialized-templates/aws_eks.\nOTEL Configuration Sample With AWS Container Insights Receiver extensions:health_check:receivers:awscontainerinsightreceiver:processors:resource/job-name:attributes:- key:job_namevalue:aws-cloud-eks-monitoringaction:insertexporters:otlp:endpoint:oap-service:11800tls:insecure:truelogging:loglevel:debugservice:pipelines:metrics:receivers:[awscontainerinsightreceiver]processors:[resource/job-name]exporters:[otlp,logging]extensions:[health_check]Refer to AWS Container Insights Receiver for more information\n","excerpt":"AWS Cloud EKS monitoring SkyWalking leverages OpenTelemetry Collector with AWS Container Insights …","ref":"/docs/main/v9.6.0/en/setup/backend/backend-aws-eks-monitoring/","title":"AWS Cloud EKS monitoring"},{"body":"AWS Cloud EKS monitoring SkyWalking leverages OpenTelemetry Collector with AWS Container Insights Receiver to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  OpenTelemetry Collector fetches metrics from EKS via AWS Container Insights Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Deploy amazon/aws-otel-collector with AWS Container Insights Receiver to EKS Config SkyWalking OpenTelemetry receiver.  Read Monitoring AWS EKS and S3 with SkyWalking for more details\nEKS Monitoring AWS Container Insights Receiver provides multiple dimensions metrics for EKS cluster, node, service, etc. Accordingly, SkyWalking observes the status, and payload of the EKS cluster, which is cataloged as a LAYER: AWS_EKS Service in the OAP. Meanwhile, the k8s nodes would be recognized as LAYER: AWS_EKS instances. The k8s service would be recognized as endpoints.\nSpecify Job Name SkyWalking distinguishes AWS Cloud EKS metrics by attributes job_name, which value is aws-cloud-eks-monitoring. You could leverage OTEL Collector processor to add the attribute as follows:\nprocessors:resource/job-name:attributes:- key:job_namevalue:aws-cloud-eks-monitoringaction:insert Notice, if you don\u0026rsquo;t specify job_name attribute, SkyWalking OAP will ignore the metrics\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     Node Count  eks_cluster_node_count Service The node count of the EKS cluster AWS Container Insights Receiver   Failed Node Count  eks_cluster_failed_node_count Service The failed node count of the EKS cluster AWS Container Insights Receiver   Pod Count (namespace dimension)  eks_cluster_namespace_count Service The count of pod in the EKS cluster(namespace dimension) AWS Container Insights Receiver   Pod Count (service dimension)  eks_cluster_service_count Service The count of pod in the EKS cluster(service dimension) AWS Container Insights Receiver   Network RX Dropped Count (per second) count/s eks_cluster_net_rx_dropped Service Network RX dropped count AWS Container Insights Receiver   Network RX Error Count (per second) count/s eks_cluster_net_rx_error Service Network RX error count AWS Container Insights Receiver   Network TX Dropped Count (per second) count/s eks_cluster_net_rx_dropped Service Network TX dropped count AWS Container Insights Receiver   Network TX Error Count (per second) count/s eks_cluster_net_rx_error Service Network TX error count AWS Container Insights Receiver   Pod Count  eks_cluster_node_pod_number Instance The count of pod running on the node AWS Container Insights Receiver   CPU Utilization percent eks_cluster_node_cpu_utilization Instance The CPU Utilization of the node AWS Container Insights Receiver   Memory Utilization percent eks_cluster_node_memory_utilization Instance The Memory Utilization of the node AWS Container Insights Receiver   Network RX bytes/s eks_cluster_node_net_rx_bytes Instance Network RX bytes of the node AWS Container Insights Receiver   Network RX Error Count count/s eks_cluster_node_net_rx_bytes Instance Network RX error count of the node AWS Container Insights Receiver   Network TX bytes/s eks_cluster_node_net_rx_bytes Instance Network TX bytes of the node AWS Container Insights Receiver   Network TX Error Count count/s eks_cluster_node_net_rx_bytes Instance Network TX error count of the node AWS Container Insights Receiver   Disk IO Write bytes/s eks_cluster_node_net_rx_bytes Instance The IO write bytes of the node AWS Container Insights Receiver   Disk IO Read bytes/s eks_cluster_node_net_rx_bytes Instance The IO read bytes of the node AWS Container Insights Receiver   FS Utilization percent eks_cluster_node_net_rx_bytes Instance The filesystem utilization of the node AWS Container Insights Receiver   CPU Utilization percent eks_cluster_node_pod_cpu_utilization Instance The CPU Utilization of the pod running on the node AWS Container Insights Receiver   Memory Utilization percent eks_cluster_node_pod_memory_utilization Instance The Memory Utilization of the pod running on the node AWS Container Insights Receiver   Network RX bytes/s eks_cluster_node_pod_net_rx_bytes Instance Network RX bytes of the pod running on the node AWS Container Insights Receiver   Network RX Error Count count/s eks_cluster_node_pod_net_rx_error Instance Network RX error count of the pod running on the node AWS Container Insights Receiver   Network TX bytes/s eks_cluster_node_pod_net_tx_bytes Instance Network RX bytes of the pod running on the node AWS Container Insights Receiver   Network TX Error Count count/s eks_cluster_node_pod_net_tx_error Instance Network RX error count of the pod running on the node AWS Container Insights Receiver   CPU Utilization percent eks_cluster_service_pod_cpu_utilization Endpoint The CPU Utilization of pod that belong to the service AWS Container Insights Receiver   Memory Utilization percent eks_cluster_service_pod_memory_utilization Endpoint The Memory Utilization of pod that belong to the service AWS Container Insights Receiver   Network RX bytes/s eks_cluster_service_pod_net_rx_bytes Endpoint Network RX bytes of the pod that belong to the service AWS Container Insights Receiver   Network RX Error Count count/s eks_cluster_service_pod_net_rx_error Endpoint Network TX error count of the pod that belongs to the service AWS Container Insights Receiver   Network TX bytes/s eks_cluster_service_pod_net_tx_bytes Endpoint Network TX bytes of the pod that belong to the service AWS Container Insights Receiver   Network TX Error Count count/s eks_cluster_node_pod_net_tx_error Endpoint Network TX error count of the pod that belongs to the service AWS Container Insights Receiver    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-eks/. The AWS Cloud EKS dashboard panel configurations are found in /config/ui-initialized-templates/aws_eks.\nOTEL Configuration Sample With AWS Container Insights Receiver extensions:health_check:receivers:awscontainerinsightreceiver:processors:resource/job-name:attributes:- key:job_namevalue:aws-cloud-eks-monitoringaction:insertexporters:otlp:endpoint:oap-service:11800tls:insecure:truelogging:loglevel:debugservice:pipelines:metrics:receivers:[awscontainerinsightreceiver]processors:[resource/job-name]exporters:[otlp,logging]extensions:[health_check]Refer to AWS Container Insights Receiver for more information\n","excerpt":"AWS Cloud EKS monitoring SkyWalking leverages OpenTelemetry Collector with AWS Container Insights …","ref":"/docs/main/v9.7.0/en/setup/backend/backend-aws-eks-monitoring/","title":"AWS Cloud EKS monitoring"},{"body":"AWS Cloud S3 monitoring Amazon Simple Storage Service (Amazon S3) is an object storage service. SkyWalking leverages AWS Kinesis Data Firehose receiver to transfer the CloudWatch metrics of s3 to OpenTelemetry receiver and into the Meter System.\nData flow  AWS CloudWatch collect metrics for S3, refer to S3 monitoring with CloudWatch CloudWatch metric streams stream CloudWatch metrics of S3 to AWS Kinesis Data Firehose AWS Kinesis Data Firehose delivery metrics to AWS Kinesis Data Firehose receiver through the HTTP endpoint  Set up  Create CloudWatch metrics configuration for S3, refer to S3 metrics configuration Create an Amazon Kinesis Data Firehose Delivery Stream, and set AWS Kinesis Data Firehose receiver\u0026rsquo;s address as HTTP(s) Destination, refer to Create Delivery Stream Create CloudWatch metric stream, and select the Firehose Delivery Stream which has been created above, set Select namespaces to AWS/S3, Select output format to OpenTelemetry 0.7. refer to CloudWatch Metric Streams  Read Monitoring AWS EKS and S3 with SkyWalking for more details\nS3 Monitoring SkyWalking observes CloudWatch metrics of the S3 bucket, which is cataloged as a LAYER: AWS_S3 Service in the OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     4xx Errors count aws_s3_4xx Service The number of HTTP 4xx client error status code requests made to the S3 bucket S3 monitoring with CloudWatch   5xx Errors count aws_s3_5xx Service The number of HTTP 5xx client error status code requests made to the S3 bucket S3 monitoring with CloudWatch   Downloaded bytes aws_s3_downloaded_bytes Service The number of bytes downloaded for requests made to an Amazon S3 bucket S3 monitoring with CloudWatch   Uploaded bytes aws_s3_uploaded_bytes Service The number of bytes uploaded for requests made to an Amazon S3 bucket S3 monitoring with CloudWatch   Request Average Latency bytes aws_s3_request_latency Service The average of elapsed per-request time from the first byte received to the last byte sent to an Amazon S3 bucket S3 monitoring with CloudWatch   First Byte Average Latency bytes aws_s3_request_latency Service The average of per-request time from the complete request being received by an Amazon S3 bucket to when the response starts to be returned S3 monitoring with CloudWatch   All Requests bytes aws_s3_delete_requests Service The number of HTTP All requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch   Get Requests bytes aws_s3_delete_requests Service The number of HTTP Get requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch   Put Requests bytes aws_s3_delete_requests Service The number of HTTP PUT requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch   Delete Requests bytes aws_s3_delete_requests Service The number of HTTP Delete requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-s3/. The AWS Cloud EKS dashboard panel configurations are found in /config/ui-initialized-templates/aws_s3.\n","excerpt":"AWS Cloud S3 monitoring Amazon Simple Storage Service (Amazon S3) is an object storage service. …","ref":"/docs/main/latest/en/setup/backend/backend-aws-s3-monitoring/","title":"AWS Cloud S3 monitoring"},{"body":"AWS Cloud S3 monitoring Amazon Simple Storage Service (Amazon S3) is an object storage service. SkyWalking leverages AWS Kinesis Data Firehose receiver to transfer the CloudWatch metrics of s3 to OpenTelemetry receiver and into the Meter System.\nData flow  AWS CloudWatch collect metrics for S3, refer to S3 monitoring with CloudWatch CloudWatch metric streams stream CloudWatch metrics of S3 to AWS Kinesis Data Firehose AWS Kinesis Data Firehose delivery metrics to AWS Kinesis Data Firehose receiver through the HTTP endpoint  Set up  Create CloudWatch metrics configuration for S3, refer to S3 metrics configuration Create an Amazon Kinesis Data Firehose Delivery Stream, and set AWS Kinesis Data Firehose receiver\u0026rsquo;s address as HTTP(s) Destination, refer to Create Delivery Stream Create CloudWatch metric stream, and select the Firehose Delivery Stream which has been created above, set Select namespaces to AWS/S3, Select output format to OpenTelemetry 0.7. refer to CloudWatch Metric Streams  Read Monitoring AWS EKS and S3 with SkyWalking for more details\nS3 Monitoring SkyWalking observes CloudWatch metrics of the S3 bucket, which is cataloged as a LAYER: AWS_S3 Service in the OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     4xx Errors count aws_s3_4xx Service The number of HTTP 4xx client error status code requests made to the S3 bucket S3 monitoring with CloudWatch   5xx Errors count aws_s3_5xx Service The number of HTTP 5xx client error status code requests made to the S3 bucket S3 monitoring with CloudWatch   Downloaded bytes aws_s3_downloaded_bytes Service The number of bytes downloaded for requests made to an Amazon S3 bucket S3 monitoring with CloudWatch   Uploaded bytes aws_s3_uploaded_bytes Service The number of bytes uploaded for requests made to an Amazon S3 bucket S3 monitoring with CloudWatch   Request Average Latency bytes aws_s3_request_latency Service The average of elapsed per-request time from the first byte received to the last byte sent to an Amazon S3 bucket S3 monitoring with CloudWatch   First Byte Average Latency bytes aws_s3_request_latency Service The average of per-request time from the complete request being received by an Amazon S3 bucket to when the response starts to be returned S3 monitoring with CloudWatch   All Requests bytes aws_s3_delete_requests Service The number of HTTP All requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch   Get Requests bytes aws_s3_delete_requests Service The number of HTTP Get requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch   Put Requests bytes aws_s3_delete_requests Service The number of HTTP PUT requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch   Delete Requests bytes aws_s3_delete_requests Service The number of HTTP Delete requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-s3/. The AWS Cloud EKS dashboard panel configurations are found in /config/ui-initialized-templates/aws_s3.\n","excerpt":"AWS Cloud S3 monitoring Amazon Simple Storage Service (Amazon S3) is an object storage service. …","ref":"/docs/main/next/en/setup/backend/backend-aws-s3-monitoring/","title":"AWS Cloud S3 monitoring"},{"body":"AWS Cloud S3 monitoring Amazon Simple Storage Service (Amazon S3) is an object storage service. SkyWalking leverages AWS Kinesis Data Firehose receiver to transfer the CloudWatch metrics of s3 to OpenTelemetry receiver and into the Meter System.\nData flow  AWS CloudWatch collect metrics for S3, refer to S3 monitoring with CloudWatch CloudWatch metric streams stream CloudWatch metrics of S3 to AWS Kinesis Data Firehose AWS Kinesis Data Firehose delivery metrics to AWS Kinesis Data Firehose receiver through the HTTP endpoint  Set up  Create CloudWatch metrics configuration for S3, refer to S3 metrics configuration Create an Amazon Kinesis Data Firehose Delivery Stream, and set AWS Kinesis Data Firehose receiver\u0026rsquo;s address as HTTP(s) Destination, refer to Create Delivery Stream Create CloudWatch metric stream, and select the Firehose Delivery Stream which has been created above, set Select namespaces to AWS/S3, Select output format to OpenTelemetry 0.7. refer to CloudWatch Metric Streams  S3 Monitoring SkyWalking observes CloudWatch metrics of the S3 bucket, which is cataloged as a LAYER: AWS_S3 Service in the OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     4xx Errors count aws_s3_4xx Service The number of HTTP 4xx client error status code requests made to the S3 bucket S3 monitoring with CloudWatch   5xx Errors count aws_s3_5xx Service The number of HTTP 5xx client error status code requests made to the S3 bucket S3 monitoring with CloudWatch   Downloaded bytes aws_s3_downloaded_bytes Service The number of bytes downloaded for requests made to an Amazon S3 bucket S3 monitoring with CloudWatch   Uploaded bytes aws_s3_uploaded_bytes Service The number of bytes uploaded for requests made to an Amazon S3 bucket S3 monitoring with CloudWatch   Request Average Latency bytes aws_s3_request_latency Service The average of elapsed per-request time from the first byte received to the last byte sent to an Amazon S3 bucket S3 monitoring with CloudWatch   First Byte Average Latency bytes aws_s3_request_latency Service The average of per-request time from the complete request being received by an Amazon S3 bucket to when the response starts to be returned S3 monitoring with CloudWatch   All Requests bytes aws_s3_delete_requests Service The number of HTTP All requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch   Get Requests bytes aws_s3_delete_requests Service The number of HTTP Get requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch   Put Requests bytes aws_s3_delete_requests Service The number of HTTP PUT requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch   Delete Requests bytes aws_s3_delete_requests Service The number of HTTP Delete requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-s3/. The AWS Cloud EKS dashboard panel configurations are found in /config/ui-initialized-templates/aws_s3.\n","excerpt":"AWS Cloud S3 monitoring Amazon Simple Storage Service (Amazon S3) is an object storage service. …","ref":"/docs/main/v9.4.0/en/setup/backend/backend-aws-s3-monitoring/","title":"AWS Cloud S3 monitoring"},{"body":"AWS Cloud S3 monitoring Amazon Simple Storage Service (Amazon S3) is an object storage service. SkyWalking leverages AWS Kinesis Data Firehose receiver to transfer the CloudWatch metrics of s3 to OpenTelemetry receiver and into the Meter System.\nData flow  AWS CloudWatch collect metrics for S3, refer to S3 monitoring with CloudWatch CloudWatch metric streams stream CloudWatch metrics of S3 to AWS Kinesis Data Firehose AWS Kinesis Data Firehose delivery metrics to AWS Kinesis Data Firehose receiver through the HTTP endpoint  Set up  Create CloudWatch metrics configuration for S3, refer to S3 metrics configuration Create an Amazon Kinesis Data Firehose Delivery Stream, and set AWS Kinesis Data Firehose receiver\u0026rsquo;s address as HTTP(s) Destination, refer to Create Delivery Stream Create CloudWatch metric stream, and select the Firehose Delivery Stream which has been created above, set Select namespaces to AWS/S3, Select output format to OpenTelemetry 0.7. refer to CloudWatch Metric Streams  Read Monitoring AWS EKS and S3 with SkyWalking for more details\nS3 Monitoring SkyWalking observes CloudWatch metrics of the S3 bucket, which is cataloged as a LAYER: AWS_S3 Service in the OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     4xx Errors count aws_s3_4xx Service The number of HTTP 4xx client error status code requests made to the S3 bucket S3 monitoring with CloudWatch   5xx Errors count aws_s3_5xx Service The number of HTTP 5xx client error status code requests made to the S3 bucket S3 monitoring with CloudWatch   Downloaded bytes aws_s3_downloaded_bytes Service The number of bytes downloaded for requests made to an Amazon S3 bucket S3 monitoring with CloudWatch   Uploaded bytes aws_s3_uploaded_bytes Service The number of bytes uploaded for requests made to an Amazon S3 bucket S3 monitoring with CloudWatch   Request Average Latency bytes aws_s3_request_latency Service The average of elapsed per-request time from the first byte received to the last byte sent to an Amazon S3 bucket S3 monitoring with CloudWatch   First Byte Average Latency bytes aws_s3_request_latency Service The average of per-request time from the complete request being received by an Amazon S3 bucket to when the response starts to be returned S3 monitoring with CloudWatch   All Requests bytes aws_s3_delete_requests Service The number of HTTP All requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch   Get Requests bytes aws_s3_delete_requests Service The number of HTTP Get requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch   Put Requests bytes aws_s3_delete_requests Service The number of HTTP PUT requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch   Delete Requests bytes aws_s3_delete_requests Service The number of HTTP Delete requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-s3/. The AWS Cloud EKS dashboard panel configurations are found in /config/ui-initialized-templates/aws_s3.\n","excerpt":"AWS Cloud S3 monitoring Amazon Simple Storage Service (Amazon S3) is an object storage service. …","ref":"/docs/main/v9.5.0/en/setup/backend/backend-aws-s3-monitoring/","title":"AWS Cloud S3 monitoring"},{"body":"AWS Cloud S3 monitoring Amazon Simple Storage Service (Amazon S3) is an object storage service. SkyWalking leverages AWS Kinesis Data Firehose receiver to transfer the CloudWatch metrics of s3 to OpenTelemetry receiver and into the Meter System.\nData flow  AWS CloudWatch collect metrics for S3, refer to S3 monitoring with CloudWatch CloudWatch metric streams stream CloudWatch metrics of S3 to AWS Kinesis Data Firehose AWS Kinesis Data Firehose delivery metrics to AWS Kinesis Data Firehose receiver through the HTTP endpoint  Set up  Create CloudWatch metrics configuration for S3, refer to S3 metrics configuration Create an Amazon Kinesis Data Firehose Delivery Stream, and set AWS Kinesis Data Firehose receiver\u0026rsquo;s address as HTTP(s) Destination, refer to Create Delivery Stream Create CloudWatch metric stream, and select the Firehose Delivery Stream which has been created above, set Select namespaces to AWS/S3, Select output format to OpenTelemetry 0.7. refer to CloudWatch Metric Streams  Read Monitoring AWS EKS and S3 with SkyWalking for more details\nS3 Monitoring SkyWalking observes CloudWatch metrics of the S3 bucket, which is cataloged as a LAYER: AWS_S3 Service in the OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     4xx Errors count aws_s3_4xx Service The number of HTTP 4xx client error status code requests made to the S3 bucket S3 monitoring with CloudWatch   5xx Errors count aws_s3_5xx Service The number of HTTP 5xx client error status code requests made to the S3 bucket S3 monitoring with CloudWatch   Downloaded bytes aws_s3_downloaded_bytes Service The number of bytes downloaded for requests made to an Amazon S3 bucket S3 monitoring with CloudWatch   Uploaded bytes aws_s3_uploaded_bytes Service The number of bytes uploaded for requests made to an Amazon S3 bucket S3 monitoring with CloudWatch   Request Average Latency bytes aws_s3_request_latency Service The average of elapsed per-request time from the first byte received to the last byte sent to an Amazon S3 bucket S3 monitoring with CloudWatch   First Byte Average Latency bytes aws_s3_request_latency Service The average of per-request time from the complete request being received by an Amazon S3 bucket to when the response starts to be returned S3 monitoring with CloudWatch   All Requests bytes aws_s3_delete_requests Service The number of HTTP All requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch   Get Requests bytes aws_s3_delete_requests Service The number of HTTP Get requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch   Put Requests bytes aws_s3_delete_requests Service The number of HTTP PUT requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch   Delete Requests bytes aws_s3_delete_requests Service The number of HTTP Delete requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-s3/. The AWS Cloud EKS dashboard panel configurations are found in /config/ui-initialized-templates/aws_s3.\n","excerpt":"AWS Cloud S3 monitoring Amazon Simple Storage Service (Amazon S3) is an object storage service. …","ref":"/docs/main/v9.6.0/en/setup/backend/backend-aws-s3-monitoring/","title":"AWS Cloud S3 monitoring"},{"body":"AWS Cloud S3 monitoring Amazon Simple Storage Service (Amazon S3) is an object storage service. SkyWalking leverages AWS Kinesis Data Firehose receiver to transfer the CloudWatch metrics of s3 to OpenTelemetry receiver and into the Meter System.\nData flow  AWS CloudWatch collect metrics for S3, refer to S3 monitoring with CloudWatch CloudWatch metric streams stream CloudWatch metrics of S3 to AWS Kinesis Data Firehose AWS Kinesis Data Firehose delivery metrics to AWS Kinesis Data Firehose receiver through the HTTP endpoint  Set up  Create CloudWatch metrics configuration for S3, refer to S3 metrics configuration Create an Amazon Kinesis Data Firehose Delivery Stream, and set AWS Kinesis Data Firehose receiver\u0026rsquo;s address as HTTP(s) Destination, refer to Create Delivery Stream Create CloudWatch metric stream, and select the Firehose Delivery Stream which has been created above, set Select namespaces to AWS/S3, Select output format to OpenTelemetry 0.7. refer to CloudWatch Metric Streams  Read Monitoring AWS EKS and S3 with SkyWalking for more details\nS3 Monitoring SkyWalking observes CloudWatch metrics of the S3 bucket, which is cataloged as a LAYER: AWS_S3 Service in the OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     4xx Errors count aws_s3_4xx Service The number of HTTP 4xx client error status code requests made to the S3 bucket S3 monitoring with CloudWatch   5xx Errors count aws_s3_5xx Service The number of HTTP 5xx client error status code requests made to the S3 bucket S3 monitoring with CloudWatch   Downloaded bytes aws_s3_downloaded_bytes Service The number of bytes downloaded for requests made to an Amazon S3 bucket S3 monitoring with CloudWatch   Uploaded bytes aws_s3_uploaded_bytes Service The number of bytes uploaded for requests made to an Amazon S3 bucket S3 monitoring with CloudWatch   Request Average Latency bytes aws_s3_request_latency Service The average of elapsed per-request time from the first byte received to the last byte sent to an Amazon S3 bucket S3 monitoring with CloudWatch   First Byte Average Latency bytes aws_s3_request_latency Service The average of per-request time from the complete request being received by an Amazon S3 bucket to when the response starts to be returned S3 monitoring with CloudWatch   All Requests bytes aws_s3_delete_requests Service The number of HTTP All requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch   Get Requests bytes aws_s3_delete_requests Service The number of HTTP Get requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch   Put Requests bytes aws_s3_delete_requests Service The number of HTTP PUT requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch   Delete Requests bytes aws_s3_delete_requests Service The number of HTTP Delete requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-s3/. The AWS Cloud EKS dashboard panel configurations are found in /config/ui-initialized-templates/aws_s3.\n","excerpt":"AWS Cloud S3 monitoring Amazon Simple Storage Service (Amazon S3) is an object storage service. …","ref":"/docs/main/v9.7.0/en/setup/backend/backend-aws-s3-monitoring/","title":"AWS Cloud S3 monitoring"},{"body":"AWS DynamoDb monitoring SkyWalking leverages Amazon Kinesis Data Filehose with Amazon CloudWatch to transfer the metrics into the Meter System.\nData flow  Amazon CloudWatch fetches metrics from DynamoDB and pushes metrics to SkyWalking OAP Server via Amazon Kinesis data firehose. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Create CloudWatch metrics configuration for DynamoDB, refer to DynamoDB metrics configuration Create an Amazon Kinesis Data Firehose Delivery Stream, and set AWS Kinesis Data Firehose receiver\u0026rsquo;s address as HTTP(s) Destination, refer to Create Delivery Stream3. Create a metric stream, set namespace to DynanoDB, and set Kinesis Data Firehose to the firehose you just created. Config aws-firehose-receiver to receive data. Create CloudWatch metric stream, and select the Firehose Delivery Stream which has been created above, set Select namespaces to AWS/DynamoDB, Select output format to OpenTelemetry 0.7. refer to CloudWatch Metric Streams  Read Monitoring DynamoDB with SkyWalking for more details\nDynamoDB Monitoring DynamoDB monitoring provides monitoring of the status and resources of the DynamoDB server. AWS user id is cataloged as a Layer: AWS_DYNAMODB Service in OAP. Each DynamoDB table is cataloged as an Endpoint in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Read Usage unit/s consumed_read_capacity_units provisioned_read_capacity_units The situation of read capacity units consumed and provisioned over the specified time period Amazon CloudWatch   Write Usage unit/s consumed_write_capacity_units provisioned_write_capacity_units The situation of write capacity units consumed and provisioned over the specified time period Amazon CloudWatch   Successful Request Latency ms get_successful_request_latency put_successful_request_latency query_successful_request_latency scan_successful_request_latency The latency of successful request Amazon CloudWatch   TTL Deleted Item count  time_to_live_deleted_item_count The count of items deleted by TTL Amazon CloudWatch   Throttle Events  read_throttle_events write_throttle_events Requests to DynamoDB that exceed the provisioned read/write capacity units for a table or a global secondary index. Amazon CloudWatch   Throttled Requests  read_throttled_requests write_throttled_requests Requests to DynamoDB that exceed the provisioned throughput limits on a resource (such as a table or an index). Amazon CloudWatch   Scan/Query Operation Returned Item Ccount  scan_returned_item_count query_returned_item_count\n The number of items returned by Query, Scan or ExecuteStatement (select) operations during the specified time period. Amazon CloudWatch   System Errors  read_system_errors\nwrite_system_errors The requests to DynamoDB or Amazon DynamoDB Streams that generate an HTTP 500 status code during the specified time period. Amazon CloudWatch   User Errors  user_errors Requests to DynamoDB or Amazon DynamoDB Streams that generate an HTTP 400 status code during the specified time period. Amazon CloudWatch   Condition Checked Fail Requests  conditional_check_failed_requests The number of failed attempts to perform conditional writes. Amazon CloudWatch   Transaction Conflict  transaction_conflict Rejected item-level requests due to transactional conflicts between concurrent requests on the same items. Amazon CloudWatch    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-dynamodb. The DynamoDB dashboard panel configurations are found in /config/ui-initialized-templates/aws_dynamodb.\n","excerpt":"AWS DynamoDb monitoring SkyWalking leverages Amazon Kinesis Data Filehose with Amazon CloudWatch to …","ref":"/docs/main/latest/en/setup/backend/backend-aws-dynamodb-monitoring/","title":"AWS DynamoDb monitoring"},{"body":"AWS DynamoDb monitoring SkyWalking leverages Amazon Kinesis Data Filehose with Amazon CloudWatch to transfer the metrics into the Meter System.\nData flow  Amazon CloudWatch fetches metrics from DynamoDB and pushes metrics to SkyWalking OAP Server via Amazon Kinesis data firehose. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Create CloudWatch metrics configuration for DynamoDB, refer to DynamoDB metrics configuration Create an Amazon Kinesis Data Firehose Delivery Stream, and set AWS Kinesis Data Firehose receiver\u0026rsquo;s address as HTTP(s) Destination, refer to Create Delivery Stream3. Create a metric stream, set namespace to DynanoDB, and set Kinesis Data Firehose to the firehose you just created. Config aws-firehose-receiver to receive data. Create CloudWatch metric stream, and select the Firehose Delivery Stream which has been created above, set Select namespaces to AWS/DynamoDB, Select output format to OpenTelemetry 0.7. refer to CloudWatch Metric Streams  Read Monitoring DynamoDB with SkyWalking for more details\nDynamoDB Monitoring DynamoDB monitoring provides monitoring of the status and resources of the DynamoDB server. AWS user id is cataloged as a Layer: AWS_DYNAMODB Service in OAP. Each DynamoDB table is cataloged as an Endpoint in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Read Usage unit/s consumed_read_capacity_units provisioned_read_capacity_units The situation of read capacity units consumed and provisioned over the specified time period Amazon CloudWatch   Write Usage unit/s consumed_write_capacity_units provisioned_write_capacity_units The situation of write capacity units consumed and provisioned over the specified time period Amazon CloudWatch   Successful Request Latency ms get_successful_request_latency put_successful_request_latency query_successful_request_latency scan_successful_request_latency The latency of successful request Amazon CloudWatch   TTL Deleted Item count  time_to_live_deleted_item_count The count of items deleted by TTL Amazon CloudWatch   Throttle Events  read_throttle_events write_throttle_events Requests to DynamoDB that exceed the provisioned read/write capacity units for a table or a global secondary index. Amazon CloudWatch   Throttled Requests  read_throttled_requests write_throttled_requests Requests to DynamoDB that exceed the provisioned throughput limits on a resource (such as a table or an index). Amazon CloudWatch   Scan/Query Operation Returned Item Ccount  scan_returned_item_count query_returned_item_count\n The number of items returned by Query, Scan or ExecuteStatement (select) operations during the specified time period. Amazon CloudWatch   System Errors  read_system_errors\nwrite_system_errors The requests to DynamoDB or Amazon DynamoDB Streams that generate an HTTP 500 status code during the specified time period. Amazon CloudWatch   User Errors  user_errors Requests to DynamoDB or Amazon DynamoDB Streams that generate an HTTP 400 status code during the specified time period. Amazon CloudWatch   Condition Checked Fail Requests  conditional_check_failed_requests The number of failed attempts to perform conditional writes. Amazon CloudWatch   Transaction Conflict  transaction_conflict Rejected item-level requests due to transactional conflicts between concurrent requests on the same items. Amazon CloudWatch    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-dynamodb. The DynamoDB dashboard panel configurations are found in /config/ui-initialized-templates/aws_dynamodb.\n","excerpt":"AWS DynamoDb monitoring SkyWalking leverages Amazon Kinesis Data Filehose with Amazon CloudWatch to …","ref":"/docs/main/next/en/setup/backend/backend-aws-dynamodb-monitoring/","title":"AWS DynamoDb monitoring"},{"body":"AWS DynamoDb monitoring SkyWalking leverages Amazon Kinesis Data Filehose with Amazon CloudWatch to transfer the metrics into the Meter System.\nData flow  Amazon CloudWatch fetches metrics from DynamoDB and pushes metrics to SkyWalking OAP Server via Amazon Kinesis data firehose. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Create CloudWatch metrics configuration for DynamoDB, refer to DynamoDB metrics configuration Create an Amazon Kinesis Data Firehose Delivery Stream, and set AWS Kinesis Data Firehose receiver\u0026rsquo;s address as HTTP(s) Destination, refer to Create Delivery Stream3. Create a metric stream, set namespace to DynanoDB, and set Kinesis Data Firehose to the firehose you just created. Config aws-firehose-receiver to receive data. Create CloudWatch metric stream, and select the Firehose Delivery Stream which has been created above, set Select namespaces to AWS/DynamoDB, Select output format to OpenTelemetry 0.7. refer to CloudWatch Metric Streams  DynamoDB Monitoring DynamoDB monitoring provides monitoring of the status and resources of the DynamoDB server. AWS user id is cataloged as a Layer: AWS_DYNAMODB Service in OAP. Each DynamoDB table is cataloged as an Endpoint in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Read Usage unit/s consumed_read_capacity_units provisioned_read_capacity_units The situation of read capacity units consumed and provisioned over the specified time period Amazon CloudWatch   Write Usage unit/s consumed_write_capacity_units provisioned_write_capacity_units The situation of write capacity units consumed and provisioned over the specified time period Amazon CloudWatch   Successful Request Latency ms get_successful_request_latency put_successful_request_latency query_successful_request_latency scan_successful_request_latency The latency of successful request Amazon CloudWatch   TTL Deleted Item count  time_to_live_deleted_item_count The count of items deleted by TTL Amazon CloudWatch   Throttle Events  read_throttle_events write_throttle_events Requests to DynamoDB that exceed the provisioned read/write capacity units for a table or a global secondary index. Amazon CloudWatch   Throttled Requests  read_throttled_requests write_throttled_requests Requests to DynamoDB that exceed the provisioned throughput limits on a resource (such as a table or an index). Amazon CloudWatch   Scan/Query Operation Returned Item Ccount  scan_returned_item_count query_returned_item_count\n The number of items returned by Query, Scan or ExecuteStatement (select) operations during the specified time period. Amazon CloudWatch   System Errors  read_system_errors\nwrite_system_errors The requests to DynamoDB or Amazon DynamoDB Streams that generate an HTTP 500 status code during the specified time period. Amazon CloudWatch   User Errors  user_errors Requests to DynamoDB or Amazon DynamoDB Streams that generate an HTTP 400 status code during the specified time period. Amazon CloudWatch   Condition Checked Fail Requests  conditional_check_failed_requests The number of failed attempts to perform conditional writes. Amazon CloudWatch   Transaction Conflict  transaction_conflict Rejected item-level requests due to transactional conflicts between concurrent requests on the same items. Amazon CloudWatch    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-dynamodb. The DynamoDB dashboard panel configurations are found in /config/ui-initialized-templates/aws_dynamodb.\n","excerpt":"AWS DynamoDb monitoring SkyWalking leverages Amazon Kinesis Data Filehose with Amazon CloudWatch to …","ref":"/docs/main/v9.4.0/en/setup/backend/backend-aws-dynamodb-monitoring/","title":"AWS DynamoDb monitoring"},{"body":"AWS DynamoDb monitoring SkyWalking leverages Amazon Kinesis Data Filehose with Amazon CloudWatch to transfer the metrics into the Meter System.\nData flow  Amazon CloudWatch fetches metrics from DynamoDB and pushes metrics to SkyWalking OAP Server via Amazon Kinesis data firehose. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Create CloudWatch metrics configuration for DynamoDB, refer to DynamoDB metrics configuration Create an Amazon Kinesis Data Firehose Delivery Stream, and set AWS Kinesis Data Firehose receiver\u0026rsquo;s address as HTTP(s) Destination, refer to Create Delivery Stream3. Create a metric stream, set namespace to DynanoDB, and set Kinesis Data Firehose to the firehose you just created. Config aws-firehose-receiver to receive data. Create CloudWatch metric stream, and select the Firehose Delivery Stream which has been created above, set Select namespaces to AWS/DynamoDB, Select output format to OpenTelemetry 0.7. refer to CloudWatch Metric Streams  Read Monitoring DynamoDB with SkyWalking for more details\nDynamoDB Monitoring DynamoDB monitoring provides monitoring of the status and resources of the DynamoDB server. AWS user id is cataloged as a Layer: AWS_DYNAMODB Service in OAP. Each DynamoDB table is cataloged as an Endpoint in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Read Usage unit/s consumed_read_capacity_units provisioned_read_capacity_units The situation of read capacity units consumed and provisioned over the specified time period Amazon CloudWatch   Write Usage unit/s consumed_write_capacity_units provisioned_write_capacity_units The situation of write capacity units consumed and provisioned over the specified time period Amazon CloudWatch   Successful Request Latency ms get_successful_request_latency put_successful_request_latency query_successful_request_latency scan_successful_request_latency The latency of successful request Amazon CloudWatch   TTL Deleted Item count  time_to_live_deleted_item_count The count of items deleted by TTL Amazon CloudWatch   Throttle Events  read_throttle_events write_throttle_events Requests to DynamoDB that exceed the provisioned read/write capacity units for a table or a global secondary index. Amazon CloudWatch   Throttled Requests  read_throttled_requests write_throttled_requests Requests to DynamoDB that exceed the provisioned throughput limits on a resource (such as a table or an index). Amazon CloudWatch   Scan/Query Operation Returned Item Ccount  scan_returned_item_count query_returned_item_count\n The number of items returned by Query, Scan or ExecuteStatement (select) operations during the specified time period. Amazon CloudWatch   System Errors  read_system_errors\nwrite_system_errors The requests to DynamoDB or Amazon DynamoDB Streams that generate an HTTP 500 status code during the specified time period. Amazon CloudWatch   User Errors  user_errors Requests to DynamoDB or Amazon DynamoDB Streams that generate an HTTP 400 status code during the specified time period. Amazon CloudWatch   Condition Checked Fail Requests  conditional_check_failed_requests The number of failed attempts to perform conditional writes. Amazon CloudWatch   Transaction Conflict  transaction_conflict Rejected item-level requests due to transactional conflicts between concurrent requests on the same items. Amazon CloudWatch    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-dynamodb. The DynamoDB dashboard panel configurations are found in /config/ui-initialized-templates/aws_dynamodb.\n","excerpt":"AWS DynamoDb monitoring SkyWalking leverages Amazon Kinesis Data Filehose with Amazon CloudWatch to …","ref":"/docs/main/v9.5.0/en/setup/backend/backend-aws-dynamodb-monitoring/","title":"AWS DynamoDb monitoring"},{"body":"AWS DynamoDb monitoring SkyWalking leverages Amazon Kinesis Data Filehose with Amazon CloudWatch to transfer the metrics into the Meter System.\nData flow  Amazon CloudWatch fetches metrics from DynamoDB and pushes metrics to SkyWalking OAP Server via Amazon Kinesis data firehose. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Create CloudWatch metrics configuration for DynamoDB, refer to DynamoDB metrics configuration Create an Amazon Kinesis Data Firehose Delivery Stream, and set AWS Kinesis Data Firehose receiver\u0026rsquo;s address as HTTP(s) Destination, refer to Create Delivery Stream3. Create a metric stream, set namespace to DynanoDB, and set Kinesis Data Firehose to the firehose you just created. Config aws-firehose-receiver to receive data. Create CloudWatch metric stream, and select the Firehose Delivery Stream which has been created above, set Select namespaces to AWS/DynamoDB, Select output format to OpenTelemetry 0.7. refer to CloudWatch Metric Streams  Read Monitoring DynamoDB with SkyWalking for more details\nDynamoDB Monitoring DynamoDB monitoring provides monitoring of the status and resources of the DynamoDB server. AWS user id is cataloged as a Layer: AWS_DYNAMODB Service in OAP. Each DynamoDB table is cataloged as an Endpoint in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Read Usage unit/s consumed_read_capacity_units provisioned_read_capacity_units The situation of read capacity units consumed and provisioned over the specified time period Amazon CloudWatch   Write Usage unit/s consumed_write_capacity_units provisioned_write_capacity_units The situation of write capacity units consumed and provisioned over the specified time period Amazon CloudWatch   Successful Request Latency ms get_successful_request_latency put_successful_request_latency query_successful_request_latency scan_successful_request_latency The latency of successful request Amazon CloudWatch   TTL Deleted Item count  time_to_live_deleted_item_count The count of items deleted by TTL Amazon CloudWatch   Throttle Events  read_throttle_events write_throttle_events Requests to DynamoDB that exceed the provisioned read/write capacity units for a table or a global secondary index. Amazon CloudWatch   Throttled Requests  read_throttled_requests write_throttled_requests Requests to DynamoDB that exceed the provisioned throughput limits on a resource (such as a table or an index). Amazon CloudWatch   Scan/Query Operation Returned Item Ccount  scan_returned_item_count query_returned_item_count\n The number of items returned by Query, Scan or ExecuteStatement (select) operations during the specified time period. Amazon CloudWatch   System Errors  read_system_errors\nwrite_system_errors The requests to DynamoDB or Amazon DynamoDB Streams that generate an HTTP 500 status code during the specified time period. Amazon CloudWatch   User Errors  user_errors Requests to DynamoDB or Amazon DynamoDB Streams that generate an HTTP 400 status code during the specified time period. Amazon CloudWatch   Condition Checked Fail Requests  conditional_check_failed_requests The number of failed attempts to perform conditional writes. Amazon CloudWatch   Transaction Conflict  transaction_conflict Rejected item-level requests due to transactional conflicts between concurrent requests on the same items. Amazon CloudWatch    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-dynamodb. The DynamoDB dashboard panel configurations are found in /config/ui-initialized-templates/aws_dynamodb.\n","excerpt":"AWS DynamoDb monitoring SkyWalking leverages Amazon Kinesis Data Filehose with Amazon CloudWatch to …","ref":"/docs/main/v9.6.0/en/setup/backend/backend-aws-dynamodb-monitoring/","title":"AWS DynamoDb monitoring"},{"body":"AWS DynamoDb monitoring SkyWalking leverages Amazon Kinesis Data Filehose with Amazon CloudWatch to transfer the metrics into the Meter System.\nData flow  Amazon CloudWatch fetches metrics from DynamoDB and pushes metrics to SkyWalking OAP Server via Amazon Kinesis data firehose. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Create CloudWatch metrics configuration for DynamoDB, refer to DynamoDB metrics configuration Create an Amazon Kinesis Data Firehose Delivery Stream, and set AWS Kinesis Data Firehose receiver\u0026rsquo;s address as HTTP(s) Destination, refer to Create Delivery Stream3. Create a metric stream, set namespace to DynanoDB, and set Kinesis Data Firehose to the firehose you just created. Config aws-firehose-receiver to receive data. Create CloudWatch metric stream, and select the Firehose Delivery Stream which has been created above, set Select namespaces to AWS/DynamoDB, Select output format to OpenTelemetry 0.7. refer to CloudWatch Metric Streams  Read Monitoring DynamoDB with SkyWalking for more details\nDynamoDB Monitoring DynamoDB monitoring provides monitoring of the status and resources of the DynamoDB server. AWS user id is cataloged as a Layer: AWS_DYNAMODB Service in OAP. Each DynamoDB table is cataloged as an Endpoint in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Read Usage unit/s consumed_read_capacity_units provisioned_read_capacity_units The situation of read capacity units consumed and provisioned over the specified time period Amazon CloudWatch   Write Usage unit/s consumed_write_capacity_units provisioned_write_capacity_units The situation of write capacity units consumed and provisioned over the specified time period Amazon CloudWatch   Successful Request Latency ms get_successful_request_latency put_successful_request_latency query_successful_request_latency scan_successful_request_latency The latency of successful request Amazon CloudWatch   TTL Deleted Item count  time_to_live_deleted_item_count The count of items deleted by TTL Amazon CloudWatch   Throttle Events  read_throttle_events write_throttle_events Requests to DynamoDB that exceed the provisioned read/write capacity units for a table or a global secondary index. Amazon CloudWatch   Throttled Requests  read_throttled_requests write_throttled_requests Requests to DynamoDB that exceed the provisioned throughput limits on a resource (such as a table or an index). Amazon CloudWatch   Scan/Query Operation Returned Item Ccount  scan_returned_item_count query_returned_item_count\n The number of items returned by Query, Scan or ExecuteStatement (select) operations during the specified time period. Amazon CloudWatch   System Errors  read_system_errors\nwrite_system_errors The requests to DynamoDB or Amazon DynamoDB Streams that generate an HTTP 500 status code during the specified time period. Amazon CloudWatch   User Errors  user_errors Requests to DynamoDB or Amazon DynamoDB Streams that generate an HTTP 400 status code during the specified time period. Amazon CloudWatch   Condition Checked Fail Requests  conditional_check_failed_requests The number of failed attempts to perform conditional writes. Amazon CloudWatch   Transaction Conflict  transaction_conflict Rejected item-level requests due to transactional conflicts between concurrent requests on the same items. Amazon CloudWatch    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-dynamodb. The DynamoDB dashboard panel configurations are found in /config/ui-initialized-templates/aws_dynamodb.\n","excerpt":"AWS DynamoDb monitoring SkyWalking leverages Amazon Kinesis Data Filehose with Amazon CloudWatch to …","ref":"/docs/main/v9.7.0/en/setup/backend/backend-aws-dynamodb-monitoring/","title":"AWS DynamoDb monitoring"},{"body":"AWS Firehose receiver AWS Firehose receiver listens on 0.0.0.0:12801 by default, and provides an HTTP Endpoint /aws/firehose/metrics that follows Amazon Kinesis Data Firehose Delivery Stream HTTP Endpoint Delivery Specifications You could leverage the receiver to collect AWS CloudWatch metrics, and analysis it through MAL as the receiver bases on OpenTelemetry receiver\nSetup(S3 example)  Create CloudWatch metrics configuration for S3 (refer to S3 CloudWatch metrics) Stream CloudWatch metrics to AWS Kinesis Data Firehose delivery stream by CloudWatch metrics stream Specify AWS Kinesis Data Firehose delivery stream HTTP Endpoint (refer to Choose HTTP Endpoint for Your Destination)  Usually, the AWS CloudWatch metrics process flow with OAP is as follows:\nCloudWatch metrics with S3 --\u0026gt; CloudWatch Metric Stream (OpenTelemetry formart) --\u0026gt; Kinesis Data Firehose Delivery Stream --\u0026gt; AWS Firehose receiver(OAP) --\u0026gt; OpenTelemetry receiver(OAP) The following blogs demonstrate complete setup process for AWS S3 and API Gateway:\n Monitoring DynamoDB with SkyWalking Monitoring AWS EKS and S3 with SkyWalking  Supported metrics    Description Configuration File Data Source     Metrics of AWS Cloud S3 otel-rules/aws-s3/s3-service.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS DynamoDB otel-rules/aws-dynamodb/dynamodb-service.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS DynamoDB otel-rules/aws-dynamodb/dynamodb-endpoint.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS API Gateway otel-rules/aws-gateway/gateway-service.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS API Gateway otel-rules/aws-gateway/gateway-endpoint.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver    Notice  Only OpenTelemetry format is supported (refer to Metric streams output formats) According to HTTPS requirement by AWS Firehose(refer to Amazon Kinesis Data Firehose Delivery Stream HTTP Endpoint Delivery Specifications, users have two options   A proxy(e.g. Nginx, Envoy) is required in front of OAP\u0026rsquo;s Firehose receiver to accept HTTPS requests from AWS Firehose through port 443. (Recommended based on the general security policy) Set aws-firehose/enableTLS=true with suitable cert/key files through aws-firehose/tlsKeyPath and aws-firehose/tlsCertChainPath at OAP side to accept requests from firehose directly.  AWS Firehose receiver support setting accessKey for Kinesis Data Firehose, please refer to configuration vocabulary  ","excerpt":"AWS Firehose receiver AWS Firehose receiver listens on 0.0.0.0:12801 by default, and provides an …","ref":"/docs/main/latest/en/setup/backend/aws-firehose-receiver/","title":"AWS Firehose receiver"},{"body":"AWS Firehose receiver AWS Firehose receiver listens on 0.0.0.0:12801 by default, and provides an HTTP Endpoint /aws/firehose/metrics that follows Amazon Kinesis Data Firehose Delivery Stream HTTP Endpoint Delivery Specifications You could leverage the receiver to collect AWS CloudWatch metrics, and analysis it through MAL as the receiver bases on OpenTelemetry receiver\nSetup(S3 example)  Create CloudWatch metrics configuration for S3 (refer to S3 CloudWatch metrics) Stream CloudWatch metrics to AWS Kinesis Data Firehose delivery stream by CloudWatch metrics stream Specify AWS Kinesis Data Firehose delivery stream HTTP Endpoint (refer to Choose HTTP Endpoint for Your Destination)  Usually, the AWS CloudWatch metrics process flow with OAP is as follows:\nCloudWatch metrics with S3 --\u0026gt; CloudWatch Metric Stream (OpenTelemetry formart) --\u0026gt; Kinesis Data Firehose Delivery Stream --\u0026gt; AWS Firehose receiver(OAP) --\u0026gt; OpenTelemetry receiver(OAP) The following blogs demonstrate complete setup process for AWS S3 and API Gateway:\n Monitoring DynamoDB with SkyWalking Monitoring AWS EKS and S3 with SkyWalking  Supported metrics    Description Configuration File Data Source     Metrics of AWS Cloud S3 otel-rules/aws-s3/s3-service.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS DynamoDB otel-rules/aws-dynamodb/dynamodb-service.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS DynamoDB otel-rules/aws-dynamodb/dynamodb-endpoint.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS API Gateway otel-rules/aws-gateway/gateway-service.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS API Gateway otel-rules/aws-gateway/gateway-endpoint.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver    Notice  Only OpenTelemetry format is supported (refer to Metric streams output formats) According to HTTPS requirement by AWS Firehose(refer to Amazon Kinesis Data Firehose Delivery Stream HTTP Endpoint Delivery Specifications, users have two options   A proxy(e.g. Nginx, Envoy) is required in front of OAP\u0026rsquo;s Firehose receiver to accept HTTPS requests from AWS Firehose through port 443. (Recommended based on the general security policy) Set aws-firehose/enableTLS=true with suitable cert/key files through aws-firehose/tlsKeyPath and aws-firehose/tlsCertChainPath at OAP side to accept requests from firehose directly.  AWS Firehose receiver support setting accessKey for Kinesis Data Firehose, please refer to configuration vocabulary  ","excerpt":"AWS Firehose receiver AWS Firehose receiver listens on 0.0.0.0:12801 by default, and provides an …","ref":"/docs/main/next/en/setup/backend/aws-firehose-receiver/","title":"AWS Firehose receiver"},{"body":"AWS Firehose receiver AWS Firehose receiver listens on 0.0.0.0:12801 by default, and provides an HTTP Endpoint /aws/firehose/metrics that follows Amazon Kinesis Data Firehose Delivery Stream HTTP Endpoint Delivery Specifications You could leverage the receiver to collect AWS CloudWatch metrics, and analysis it through MAL as the receiver bases on OpenTelemetry receiver\nSetup(S3 example)  Create CloudWatch metrics configuration for S3 (refer to S3 CloudWatch metrics) Stream CloudWatch metrics to AWS Kinesis Data Firehose delivery stream by CloudWatch metrics stream Specify AWS Kinesis Data Firehose delivery stream HTTP Endpoint (refer to Choose HTTP Endpoint for Your Destination)  Usually, the AWS CloudWatch metrics process flow with OAP is as follows:\nCloudWatch metrics with S3 --\u0026gt; CloudWatch Metric Stream (OpenTelemetry formart) --\u0026gt; Kinesis Data Firehose Delivery Stream --\u0026gt; AWS Firehose receiver(OAP) --\u0026gt; OpenTelemetry receiver(OAP) Supported metrics    Description Configuration File Data Source     Metrics of AWS Cloud S3 otel-rules/aws-s3/s3-service.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS DynamoDB otel-rules/aws-dynamodb/dynamodb-service.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS DynamoDB otel-rules/aws-dynamodb/dynamodb-endpoint.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver    Notice  Only OpenTelemetry format is supported (refer to Metric streams output formats) A proxy(e.g. Nginx, Envoy) is required in front of OAP\u0026rsquo;s Firehose receiver to accept HTTPS requests from AWS Firehose through port 443 (refer to Amazon Kinesis Data Firehose Delivery Stream HTTP Endpoint Delivery Specifications. AWS Firehose receiver support setting accessKey for Kinesis Data Firehose, please refer to configuration vocabulary  ","excerpt":"AWS Firehose receiver AWS Firehose receiver listens on 0.0.0.0:12801 by default, and provides an …","ref":"/docs/main/v9.4.0/en/setup/backend/aws-firehose-receiver/","title":"AWS Firehose receiver"},{"body":"AWS Firehose receiver AWS Firehose receiver listens on 0.0.0.0:12801 by default, and provides an HTTP Endpoint /aws/firehose/metrics that follows Amazon Kinesis Data Firehose Delivery Stream HTTP Endpoint Delivery Specifications You could leverage the receiver to collect AWS CloudWatch metrics, and analysis it through MAL as the receiver bases on OpenTelemetry receiver\nSetup(S3 example)  Create CloudWatch metrics configuration for S3 (refer to S3 CloudWatch metrics) Stream CloudWatch metrics to AWS Kinesis Data Firehose delivery stream by CloudWatch metrics stream Specify AWS Kinesis Data Firehose delivery stream HTTP Endpoint (refer to Choose HTTP Endpoint for Your Destination)  Usually, the AWS CloudWatch metrics process flow with OAP is as follows:\nCloudWatch metrics with S3 --\u0026gt; CloudWatch Metric Stream (OpenTelemetry formart) --\u0026gt; Kinesis Data Firehose Delivery Stream --\u0026gt; AWS Firehose receiver(OAP) --\u0026gt; OpenTelemetry receiver(OAP) The following blogs demonstrate complete setup process for AWS S3 and API Gateway:\n Monitoring DynamoDB with SkyWalking Monitoring AWS EKS and S3 with SkyWalking  Supported metrics    Description Configuration File Data Source     Metrics of AWS Cloud S3 otel-rules/aws-s3/s3-service.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS DynamoDB otel-rules/aws-dynamodb/dynamodb-service.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS DynamoDB otel-rules/aws-dynamodb/dynamodb-endpoint.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS API Gateway otel-rules/aws-gateway/gateway-service.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS API Gateway otel-rules/aws-gateway/gateway-endpoint.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver    Notice  Only OpenTelemetry format is supported (refer to Metric streams output formats) According to HTTPS requirement by AWS Firehose(refer to Amazon Kinesis Data Firehose Delivery Stream HTTP Endpoint Delivery Specifications, users have two options   A proxy(e.g. Nginx, Envoy) is required in front of OAP\u0026rsquo;s Firehose receiver to accept HTTPS requests from AWS Firehose through port 443. (Recommended based on the general security policy) Set aws-firehose/enableTLS=true with suitable cert/key files through aws-firehose/tlsKeyPath and aws-firehose/tlsCertChainPath at OAP side to accept requests from firehose directly.  AWS Firehose receiver support setting accessKey for Kinesis Data Firehose, please refer to configuration vocabulary  ","excerpt":"AWS Firehose receiver AWS Firehose receiver listens on 0.0.0.0:12801 by default, and provides an …","ref":"/docs/main/v9.5.0/en/setup/backend/aws-firehose-receiver/","title":"AWS Firehose receiver"},{"body":"AWS Firehose receiver AWS Firehose receiver listens on 0.0.0.0:12801 by default, and provides an HTTP Endpoint /aws/firehose/metrics that follows Amazon Kinesis Data Firehose Delivery Stream HTTP Endpoint Delivery Specifications You could leverage the receiver to collect AWS CloudWatch metrics, and analysis it through MAL as the receiver bases on OpenTelemetry receiver\nSetup(S3 example)  Create CloudWatch metrics configuration for S3 (refer to S3 CloudWatch metrics) Stream CloudWatch metrics to AWS Kinesis Data Firehose delivery stream by CloudWatch metrics stream Specify AWS Kinesis Data Firehose delivery stream HTTP Endpoint (refer to Choose HTTP Endpoint for Your Destination)  Usually, the AWS CloudWatch metrics process flow with OAP is as follows:\nCloudWatch metrics with S3 --\u0026gt; CloudWatch Metric Stream (OpenTelemetry formart) --\u0026gt; Kinesis Data Firehose Delivery Stream --\u0026gt; AWS Firehose receiver(OAP) --\u0026gt; OpenTelemetry receiver(OAP) The following blogs demonstrate complete setup process for AWS S3 and API Gateway:\n Monitoring DynamoDB with SkyWalking Monitoring AWS EKS and S3 with SkyWalking  Supported metrics    Description Configuration File Data Source     Metrics of AWS Cloud S3 otel-rules/aws-s3/s3-service.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS DynamoDB otel-rules/aws-dynamodb/dynamodb-service.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS DynamoDB otel-rules/aws-dynamodb/dynamodb-endpoint.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS API Gateway otel-rules/aws-gateway/gateway-service.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS API Gateway otel-rules/aws-gateway/gateway-endpoint.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver    Notice  Only OpenTelemetry format is supported (refer to Metric streams output formats) According to HTTPS requirement by AWS Firehose(refer to Amazon Kinesis Data Firehose Delivery Stream HTTP Endpoint Delivery Specifications, users have two options   A proxy(e.g. Nginx, Envoy) is required in front of OAP\u0026rsquo;s Firehose receiver to accept HTTPS requests from AWS Firehose through port 443. (Recommended based on the general security policy) Set aws-firehose/enableTLS=true with suitable cert/key files through aws-firehose/tlsKeyPath and aws-firehose/tlsCertChainPath at OAP side to accept requests from firehose directly.  AWS Firehose receiver support setting accessKey for Kinesis Data Firehose, please refer to configuration vocabulary  ","excerpt":"AWS Firehose receiver AWS Firehose receiver listens on 0.0.0.0:12801 by default, and provides an …","ref":"/docs/main/v9.6.0/en/setup/backend/aws-firehose-receiver/","title":"AWS Firehose receiver"},{"body":"AWS Firehose receiver AWS Firehose receiver listens on 0.0.0.0:12801 by default, and provides an HTTP Endpoint /aws/firehose/metrics that follows Amazon Kinesis Data Firehose Delivery Stream HTTP Endpoint Delivery Specifications You could leverage the receiver to collect AWS CloudWatch metrics, and analysis it through MAL as the receiver bases on OpenTelemetry receiver\nSetup(S3 example)  Create CloudWatch metrics configuration for S3 (refer to S3 CloudWatch metrics) Stream CloudWatch metrics to AWS Kinesis Data Firehose delivery stream by CloudWatch metrics stream Specify AWS Kinesis Data Firehose delivery stream HTTP Endpoint (refer to Choose HTTP Endpoint for Your Destination)  Usually, the AWS CloudWatch metrics process flow with OAP is as follows:\nCloudWatch metrics with S3 --\u0026gt; CloudWatch Metric Stream (OpenTelemetry formart) --\u0026gt; Kinesis Data Firehose Delivery Stream --\u0026gt; AWS Firehose receiver(OAP) --\u0026gt; OpenTelemetry receiver(OAP) The following blogs demonstrate complete setup process for AWS S3 and API Gateway:\n Monitoring DynamoDB with SkyWalking Monitoring AWS EKS and S3 with SkyWalking  Supported metrics    Description Configuration File Data Source     Metrics of AWS Cloud S3 otel-rules/aws-s3/s3-service.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS DynamoDB otel-rules/aws-dynamodb/dynamodb-service.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS DynamoDB otel-rules/aws-dynamodb/dynamodb-endpoint.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS API Gateway otel-rules/aws-gateway/gateway-service.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS API Gateway otel-rules/aws-gateway/gateway-endpoint.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver    Notice  Only OpenTelemetry format is supported (refer to Metric streams output formats) According to HTTPS requirement by AWS Firehose(refer to Amazon Kinesis Data Firehose Delivery Stream HTTP Endpoint Delivery Specifications, users have two options   A proxy(e.g. Nginx, Envoy) is required in front of OAP\u0026rsquo;s Firehose receiver to accept HTTPS requests from AWS Firehose through port 443. (Recommended based on the general security policy) Set aws-firehose/enableTLS=true with suitable cert/key files through aws-firehose/tlsKeyPath and aws-firehose/tlsCertChainPath at OAP side to accept requests from firehose directly.  AWS Firehose receiver support setting accessKey for Kinesis Data Firehose, please refer to configuration vocabulary  ","excerpt":"AWS Firehose receiver AWS Firehose receiver listens on 0.0.0.0:12801 by default, and provides an …","ref":"/docs/main/v9.7.0/en/setup/backend/aws-firehose-receiver/","title":"AWS Firehose receiver"},{"body":"Backend Load Balancer When setting the Agent or Envoy to connect to the OAP server directly as in default, the OAP server cluster would face the problem of load imbalance. This issue would be severe in high-traffic load scenarios. SkyWalking Satellite is recommended to be used as a native gateway proxy to provide load balancing capabilities for data content before the data from Agent/ Envoy reaches the OAP. The major difference between Satellite and other general wide used proxy(s), like Envoy, is that it would route the data accordingly to contents rather than connection, as gRPC streaming is used widely in SkyWalking.\nFollow instructions in the Setup SkyWalking Satellite to deploy Satellite and connect your application to the Satellite.\nScaling with Apache SkyWalking blog introduces the theory and technology details on how to set up a load balancer for the OAP cluster.\n","excerpt":"Backend Load Balancer When setting the Agent or Envoy to connect to the OAP server directly as in …","ref":"/docs/main/latest/en/setup/backend/backend-load-balancer/","title":"Backend Load Balancer"},{"body":"Backend Load Balancer When setting the Agent or Envoy to connect to the OAP server directly by default, the OAP server cluster would face the problem of load imbalance. This issue becomes severe in high-traffic load scenarios. In this doc, we will introduce two means to solve the problem.\nSkyWalking Satellite Project SkyWalking Satellite is recommended to be used as a native gateway proxy to provide load balancing capabilities for data content before the data from Agent/ Envoy reaches the OAP. The major difference between Satellite and other general wide used proxy(s), like Envoy, is that it would route the data accordingly to contents rather than connection, as gRPC streaming is used widely in SkyWalking.\nFollow instructions in the Setup SkyWalking Satellite to deploy Satellite and connect your application to the Satellite.\nScaling with Apache SkyWalking blog introduces the theory and technology details on how to set up a load balancer for the OAP cluster.\nEnvoy Filter to Limit Connections Per OAP Instance If you don\u0026rsquo;t want to deploy skywalking-satellite, you can enable Istio sidecar injection for SkyWalking OAP Pods,\nkubectl label namespace $SKYWALKING_NAMESPACE istio-injection=enabled kubectl -n $SKYWALKING_NAMESPACE rollout restart -l app=skywalking,component=oap and apply an EnvoyFilter to limit the connections per OAP instance, so that each of the OAP instance can have similar amount of gRPC connections.\nBefore that, you need to calculate the number of connections for each OAP instance as follows:\nNUMBER_OF_SERVICE_PODS=\u0026lt;the-number-of-service-pods-that-are-monitored-by-skywalking\u0026gt; # Each service Pod has 2 connections to OAP NUMBER_OF_TOTAL_CONNECTIONS=$((NUMBER_OF_SERVICE_PODS * 2)) # Divide the total connections by the replicas of OAP NUMBER_OF_CONNECTIONS_PER_OAP=$((NUMBER_OF_TOTAL_CONNECTIONS / $NUMBER_OF_OAP_REPLICAS)) And you can apply an EnvoyFilter to limit connections:\nkubectl -n $SKYWALKING_NAMESPACE apply -f - \u0026lt;\u0026lt;EOF apiVersion: networking.istio.io/v1alpha3 kind: EnvoyFilter metadata: name: oap-limit-connections namespace: istio-system spec: configPatches: - applyTo: NETWORK_FILTER match: context: ANY listener: filterChain: filter: name: envoy.filters.network.http_connection_manager portNumber: 11800 patch: operation: INSERT_BEFORE value: name: envoy.filters.network.ConnectionLimit typed_config: \u0026#39;@type\u0026#39;: type.googleapis.com/envoy.extensions.filters.network.connection_limit.v3.ConnectionLimit max_connections: $NUMBER_OF_CONNECTIONS_PER_OAP stat_prefix: envoy_filters_network_connection_limit workloadSelector: labels: app: oap EOF By this approach, we can limit the connections to port 11800 per OAP instance, but there is another corner case when the amount of service Pods are huge. Because the limiting is on connection level, and each service Pod has 2 connections to OAP port 11800, one for Envoy ALS to send access log, the other one for Envoy metrics, and because the traffic of the 2 connections can vary very much, if the number of service Pods is large enough, an extreme case might happen that one OAP instance is serving all Envoy metrics connections and the other OAP instance is serving all Envoy ALS connections, which in turn might be unbalanced again, to solve this, we can split the ALS connections to a dedicated port, and limit the connections to that port only.\nYou can set the environment variable SW_ALS_GRPC_PORT to a port number other than 0 when deploying skywalking, and limit connections to that port only in the EnvoyFilter:\nexport SW_ALS_GRPC_PORT=11802 kubectl -n $SKYWALKING_NAMESPACE apply -f - \u0026lt;\u0026lt;EOF apiVersion: networking.istio.io/v1alpha3 kind: EnvoyFilter metadata: name: oap-limit-connections namespace: istio-system spec: configPatches: - applyTo: NETWORK_FILTER match: context: ANY listener: filterChain: filter: name: envoy.filters.network.http_connection_manager portNumber: $SW_ALS_GRPC_PORT patch: operation: INSERT_BEFORE value: name: envoy.filters.network.ConnectionLimit typed_config: \u0026#39;@type\u0026#39;: type.googleapis.com/envoy.extensions.filters.network.connection_limit.v3.ConnectionLimit max_connections: $NUMBER_OF_CONNECTIONS_PER_OAP stat_prefix: envoy_filters_network_connection_limit workloadSelector: labels: app: oap EOF ","excerpt":"Backend Load Balancer When setting the Agent or Envoy to connect to the OAP server directly by …","ref":"/docs/main/next/en/setup/backend/backend-load-balancer/","title":"Backend Load Balancer"},{"body":"Backend Load Balancer When set the Agent or Envoy connecting to OAP server directly as in default, OAP server cluster would face the problem of OAP load imbalance. This issue would be very serious in high traffic load scenarios. Satellite is recommended to be used as a native gateway proxy, to provide load balancing capabilities for data content before the data from Agent/Envoy reaches the OAP. The major difference between Satellite and other general wide used proxy(s), like Envoy, is that, Satellite would route the data accordingly to contents rather than connection, as gRPC streaming is used widely in SkyWalking.\nFollow instructions in the Setup SkyWalking Satellite to deploy Satellite and connect your application to the satellite.\nScaling with Apache SkyWalking blog introduces the theory and technology details how to set up load balancer for the OAP cluster.\n","excerpt":"Backend Load Balancer When set the Agent or Envoy connecting to OAP server directly as in default, …","ref":"/docs/main/v9.0.0/en/setup/backend/backend-load-balancer/","title":"Backend Load Balancer"},{"body":"Backend Load Balancer When setting the Agent or Envoy to connect to the OAP server directly as in default, the OAP server cluster would face the problem of load imbalance. This issue would be severe in high-traffic load scenarios. SkyWalking Satellite is recommended to be used as a native gateway proxy to provide load balancing capabilities for data content before the data from Agent/ Envoy reaches the OAP. The major difference between Satellite and other general wide used proxy(s), like Envoy, is that it would route the data accordingly to contents rather than connection, as gRPC streaming is used widely in SkyWalking.\nFollow instructions in the Setup SkyWalking Satellite to deploy Satellite and connect your application to the Satellite.\nScaling with Apache SkyWalking blog introduces the theory and technology details on how to set up a load balancer for the OAP cluster.\n","excerpt":"Backend Load Balancer When setting the Agent or Envoy to connect to the OAP server directly as in …","ref":"/docs/main/v9.1.0/en/setup/backend/backend-load-balancer/","title":"Backend Load Balancer"},{"body":"Backend Load Balancer When setting the Agent or Envoy to connect to the OAP server directly as in default, the OAP server cluster would face the problem of load imbalance. This issue would be severe in high-traffic load scenarios. SkyWalking Satellite is recommended to be used as a native gateway proxy to provide load balancing capabilities for data content before the data from Agent/ Envoy reaches the OAP. The major difference between Satellite and other general wide used proxy(s), like Envoy, is that it would route the data accordingly to contents rather than connection, as gRPC streaming is used widely in SkyWalking.\nFollow instructions in the Setup SkyWalking Satellite to deploy Satellite and connect your application to the Satellite.\nScaling with Apache SkyWalking blog introduces the theory and technology details on how to set up a load balancer for the OAP cluster.\n","excerpt":"Backend Load Balancer When setting the Agent or Envoy to connect to the OAP server directly as in …","ref":"/docs/main/v9.2.0/en/setup/backend/backend-load-balancer/","title":"Backend Load Balancer"},{"body":"Backend Load Balancer When setting the Agent or Envoy to connect to the OAP server directly as in default, the OAP server cluster would face the problem of load imbalance. This issue would be severe in high-traffic load scenarios. SkyWalking Satellite is recommended to be used as a native gateway proxy to provide load balancing capabilities for data content before the data from Agent/ Envoy reaches the OAP. The major difference between Satellite and other general wide used proxy(s), like Envoy, is that it would route the data accordingly to contents rather than connection, as gRPC streaming is used widely in SkyWalking.\nFollow instructions in the Setup SkyWalking Satellite to deploy Satellite and connect your application to the Satellite.\nScaling with Apache SkyWalking blog introduces the theory and technology details on how to set up a load balancer for the OAP cluster.\n","excerpt":"Backend Load Balancer When setting the Agent or Envoy to connect to the OAP server directly as in …","ref":"/docs/main/v9.3.0/en/setup/backend/backend-load-balancer/","title":"Backend Load Balancer"},{"body":"Backend Load Balancer When setting the Agent or Envoy to connect to the OAP server directly as in default, the OAP server cluster would face the problem of load imbalance. This issue would be severe in high-traffic load scenarios. SkyWalking Satellite is recommended to be used as a native gateway proxy to provide load balancing capabilities for data content before the data from Agent/ Envoy reaches the OAP. The major difference between Satellite and other general wide used proxy(s), like Envoy, is that it would route the data accordingly to contents rather than connection, as gRPC streaming is used widely in SkyWalking.\nFollow instructions in the Setup SkyWalking Satellite to deploy Satellite and connect your application to the Satellite.\nScaling with Apache SkyWalking blog introduces the theory and technology details on how to set up a load balancer for the OAP cluster.\n","excerpt":"Backend Load Balancer When setting the Agent or Envoy to connect to the OAP server directly as in …","ref":"/docs/main/v9.4.0/en/setup/backend/backend-load-balancer/","title":"Backend Load Balancer"},{"body":"Backend Load Balancer When setting the Agent or Envoy to connect to the OAP server directly as in default, the OAP server cluster would face the problem of load imbalance. This issue would be severe in high-traffic load scenarios. SkyWalking Satellite is recommended to be used as a native gateway proxy to provide load balancing capabilities for data content before the data from Agent/ Envoy reaches the OAP. The major difference between Satellite and other general wide used proxy(s), like Envoy, is that it would route the data accordingly to contents rather than connection, as gRPC streaming is used widely in SkyWalking.\nFollow instructions in the Setup SkyWalking Satellite to deploy Satellite and connect your application to the Satellite.\nScaling with Apache SkyWalking blog introduces the theory and technology details on how to set up a load balancer for the OAP cluster.\n","excerpt":"Backend Load Balancer When setting the Agent or Envoy to connect to the OAP server directly as in …","ref":"/docs/main/v9.5.0/en/setup/backend/backend-load-balancer/","title":"Backend Load Balancer"},{"body":"Backend Load Balancer When setting the Agent or Envoy to connect to the OAP server directly as in default, the OAP server cluster would face the problem of load imbalance. This issue would be severe in high-traffic load scenarios. SkyWalking Satellite is recommended to be used as a native gateway proxy to provide load balancing capabilities for data content before the data from Agent/ Envoy reaches the OAP. The major difference between Satellite and other general wide used proxy(s), like Envoy, is that it would route the data accordingly to contents rather than connection, as gRPC streaming is used widely in SkyWalking.\nFollow instructions in the Setup SkyWalking Satellite to deploy Satellite and connect your application to the Satellite.\nScaling with Apache SkyWalking blog introduces the theory and technology details on how to set up a load balancer for the OAP cluster.\n","excerpt":"Backend Load Balancer When setting the Agent or Envoy to connect to the OAP server directly as in …","ref":"/docs/main/v9.6.0/en/setup/backend/backend-load-balancer/","title":"Backend Load Balancer"},{"body":"Backend Load Balancer When setting the Agent or Envoy to connect to the OAP server directly as in default, the OAP server cluster would face the problem of load imbalance. This issue would be severe in high-traffic load scenarios. SkyWalking Satellite is recommended to be used as a native gateway proxy to provide load balancing capabilities for data content before the data from Agent/ Envoy reaches the OAP. The major difference between Satellite and other general wide used proxy(s), like Envoy, is that it would route the data accordingly to contents rather than connection, as gRPC streaming is used widely in SkyWalking.\nFollow instructions in the Setup SkyWalking Satellite to deploy Satellite and connect your application to the Satellite.\nScaling with Apache SkyWalking blog introduces the theory and technology details on how to set up a load balancer for the OAP cluster.\n","excerpt":"Backend Load Balancer When setting the Agent or Envoy to connect to the OAP server directly as in …","ref":"/docs/main/v9.7.0/en/setup/backend/backend-load-balancer/","title":"Backend Load Balancer"},{"body":"Backend setup SkyWalking\u0026rsquo;s backend distribution package consists of the following parts:\n  bin/cmd scripts: Located in the /bin folder. Includes startup Linux shell and Windows cmd scripts for the backend server and UI startup.\n  Backend config: Located in the /config folder. Includes settings files of the backend, which are:\n application.yml log4j.xml alarm-settings.yml    Libraries of backend: Located in the /oap-libs folder. All dependencies of the backend can be found there.\n  Webapp env: Located in the webapp folder. UI frontend jar file can be found here, together with its webapp.yml setting file.\n  Requirements and default settings Requirement: JDK11 or JDK17.\nBefore you begin, you should understand that the main purpose of the following quickstart is to help you obtain a basic configuration for previews/demos. Performance and long-term running are NOT among the purposes of the quickstart.\nFor production/QA/tests environments, see Backend and UI deployment documents.\nYou can use bin/startup.sh (or cmd) to start up the backend and UI with their default settings, set out as follows:\n Backend storage uses H2 by default (for an easier start) Backend listens on 0.0.0.0/11800 for gRPC APIs and 0.0.0.0/12800 for HTTP REST APIs.  In Java, DotNetCore, Node.js, and Istio agents/probes, you should set the gRPC service address to ip/host:11800, and IP/host should be where your backend is.\n UI listens on 8080 port and request 127.0.0.1/12800 to run a GraphQL query.  Interaction Before deploying Skywalking in your distributed environment, you should learn about how agents/probes, the backend, and the UI communicate with each other:\n Most native agents and probes, including language-based or mesh probes, use gRPC service (core/default/gRPC* in application.yml) to report data to the backend. Also, the REST service is supported in JSON format. UI uses GraphQL (HTTP) query to access the backend, also in REST service (core/default/rest* in application.yml).  Startup script The default startup scripts are /bin/oapService.sh(.bat). Read the start up mode document to learn other ways to start up the backend.\nKey Parameters In The Booting Logs After the OAP booting process completed, you should be able to see all important parameters listed in the logs.\n2023-11-06 21:10:45,988 org.apache.skywalking.oap.server.starter.OAPServerBootstrap 67 [main] INFO [] - The key booting parameters of Apache SkyWalking OAP are listed as following. Running Mode | null TTL.metrics | 7 TTL.record | 3 Version | 9.7.0-SNAPSHOT-92af797 module.agent-analyzer.provider | default module.ai-pipeline.provider | default module.alarm.provider | default module.aws-firehose.provider | default module.cluster.provider | standalone module.configuration-discovery.provider | default module.configuration.provider | none module.core.provider | default module.envoy-metric.provider | default module.event-analyzer.provider | default module.log-analyzer.provider | default module.logql.provider | default module.promql.provider | default module.query.provider | graphql module.receiver-browser.provider | default module.receiver-clr.provider | default module.receiver-ebpf.provider | default module.receiver-event.provider | default module.receiver-jvm.provider | default module.receiver-log.provider | default module.receiver-meter.provider | default module.receiver-otel.provider | default module.receiver-profile.provider | default module.receiver-register.provider | default module.receiver-sharing-server.provider | default module.receiver-telegraf.provider | default module.receiver-trace.provider | default module.service-mesh.provider | default module.storage.provider | h2 module.telemetry.provider | none oap.external.grpc.host | 0.0.0.0 oap.external.grpc.port | 11800 oap.external.http.host | 0.0.0.0 oap.external.http.port | 12800 oap.internal.comm.host | 0.0.0.0 oap.internal.comm.port | 11800  oap.external.grpc.host:oap.external.grpc.port is for reporting telemetry data through gRPC channel, including native agents, OTEL. oap.external.http.host:oap.external.http.port is for reporting telemetry data through HTTP channel and query, including native GraphQL(UI), PromQL, LogQL. oap.internal.comm.host:oap.internal.comm.port is for OAP cluster internal communication via gRPC/HTTP2 protocol. The default host(0.0.0.0) is not suitable for the cluster mode, unless in k8s deployment. Please read Cluster Doc to understand how to set up the SkyWalking backend in the cluster mode.  application.yml SkyWalking backend startup behaviours are driven by config/application.yml. Understanding the settings file will help you read this document.\nThe core concept behind this setting file is that the SkyWalking collector is based on a pure modular design. End-users can switch or assemble the collector features according to their unique requirements.\nIn application.yml, there are three levels.\n Level 1: Module name. This means that this module is active in running mode. Level 2: Provider option list and provider selector. Available providers are listed here with a selector to indicate which one will actually take effect. If only one provider is listed, the selector is optional and can be omitted. Level 3. Settings of the chosen provider.  Example:\nstorage:selector:mysql# the mysql storage will actually be activated, while the h2 storage takes no effecth2:properties:jdbcUrl:${SW_STORAGE_H2_URL:jdbc:h2:mem:skywalking-oap-db;DB_CLOSE_DELAY=-1;DATABASE_TO_UPPER=FALSE}dataSource.user:${SW_STORAGE_H2_USER:sa}metadataQueryMaxSize:${SW_STORAGE_H2_QUERY_MAX_SIZE:5000}mysql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:3306/swtest?allowMultiQueries=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root@1234}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}# other configurations storage is the module. selector selects one out of all providers listed below. The unselected ones take no effect as if they were deleted. default is the default implementor of the core module. driver, url, \u0026hellip; metadataQueryMaxSize are all setting items of the implementor.  At the same time, there are two types of modules: required and optional. The required modules provide the skeleton of the backend. Even though their modular design supports pluggability, removing those modules does not serve any purpose. For optional modules, some of them have a provider implementation called none, meaning that it only provides a shell with no actual logic, typically such as telemetry. Setting - to the selector means that this whole module will be excluded at runtime. We advise against changing the APIs of those modules unless you understand the SkyWalking project and its codes very well.\nThe required modules are listed here:\n Core. Provides the basic and major skeleton of all data analysis and stream dispatch. Cluster. Manages multiple backend instances in a cluster, which could provide high throughput process capabilities. See Cluster Management for more details. Storage. Makes the analysis result persistent. See Choose storage for more details Query. Provides query interfaces to UI. Receiver and Fetcher. Expose the service to the agents and probes, or read telemetry data from a channel.  FAQs Why do we need to set the timezone? And when do we do it? SkyWalking provides downsampling time-series metrics features. Query and store at each time dimension (minute, hour, day, month metrics indexes) related to timezone when time formatting.\nFor example, metrics time will be formatted like yyyyMMddHHmm in minute dimension metrics, which is timezone-related.\nBy default, SkyWalking\u0026rsquo;s OAP backend chooses the OS default timezone. Please follow the Java and OS documents if you want to override the timezone.\n","excerpt":"Backend setup SkyWalking\u0026rsquo;s backend distribution package consists of the following parts: …","ref":"/docs/main/latest/en/setup/backend/backend-setup/","title":"Backend setup"},{"body":"Backend setup SkyWalking\u0026rsquo;s backend distribution package consists of the following parts:\n  bin/cmd scripts: Located in the /bin folder. Includes startup Linux shell and Windows cmd scripts for the backend server and UI startup.\n  Backend config: Located in the /config folder. Includes settings files of the backend, which are:\n application.yml log4j.xml alarm-settings.yml    Libraries of backend: Located in the /oap-libs folder. All dependencies of the backend can be found there.\n  Webapp env: Located in the webapp folder. UI frontend jar file can be found here, together with its webapp.yml setting file.\n  Requirements and default settings Requirement: Java 11/17/21.\nBefore you begin, you should understand that the main purpose of the following quickstart is to help you obtain a basic configuration for previews/demos. Performance and long-term running are NOT among the purposes of the quickstart.\nFor production/QA/tests environments, see Backend and UI deployment documents.\nYou can use bin/startup.sh (or cmd) to start up the backend and UI with their default settings, set out as follows:\n Backend storage uses H2 by default (for an easier start) Backend listens on 0.0.0.0/11800 for gRPC APIs and 0.0.0.0/12800 for HTTP REST APIs.  In Java, DotNetCore, Node.js, and Istio agents/probes, you should set the gRPC service address to ip/host:11800, and IP/host should be where your backend is.\n UI listens on 8080 port and request 127.0.0.1/12800 to run a GraphQL query.  Interaction Before deploying Skywalking in your distributed environment, you should learn about how agents/probes, the backend, and the UI communicate with each other:\n Most native agents and probes, including language-based or mesh probes, use gRPC service (core/default/gRPC* in application.yml) to report data to the backend. Also, the REST service is supported in JSON format. UI uses GraphQL (HTTP) query to access the backend, also in REST service (core/default/rest* in application.yml).  Startup script The default startup scripts are /bin/oapService.sh(.bat). Read the start up mode document to learn other ways to start up the backend.\nKey Parameters In The Booting Logs After the OAP booting process completed, you should be able to see all important parameters listed in the logs.\n2023-11-06 21:10:45,988 org.apache.skywalking.oap.server.starter.OAPServerBootstrap 67 [main] INFO [] - The key booting parameters of Apache SkyWalking OAP are listed as following. Running Mode | null TTL.metrics | 7 TTL.record | 3 Version | 9.7.0-SNAPSHOT-92af797 module.agent-analyzer.provider | default module.ai-pipeline.provider | default module.alarm.provider | default module.aws-firehose.provider | default module.cluster.provider | standalone module.configuration-discovery.provider | default module.configuration.provider | none module.core.provider | default module.envoy-metric.provider | default module.event-analyzer.provider | default module.log-analyzer.provider | default module.logql.provider | default module.promql.provider | default module.query.provider | graphql module.receiver-browser.provider | default module.receiver-clr.provider | default module.receiver-ebpf.provider | default module.receiver-event.provider | default module.receiver-jvm.provider | default module.receiver-log.provider | default module.receiver-meter.provider | default module.receiver-otel.provider | default module.receiver-profile.provider | default module.receiver-register.provider | default module.receiver-sharing-server.provider | default module.receiver-telegraf.provider | default module.receiver-trace.provider | default module.service-mesh.provider | default module.storage.provider | h2 module.telemetry.provider | none oap.external.grpc.host | 0.0.0.0 oap.external.grpc.port | 11800 oap.external.http.host | 0.0.0.0 oap.external.http.port | 12800 oap.internal.comm.host | 0.0.0.0 oap.internal.comm.port | 11800  oap.external.grpc.host:oap.external.grpc.port is for reporting telemetry data through gRPC channel, including native agents, OTEL. oap.external.http.host:oap.external.http.port is for reporting telemetry data through HTTP channel and query, including native GraphQL(UI), PromQL, LogQL. oap.internal.comm.host:oap.internal.comm.port is for OAP cluster internal communication via gRPC/HTTP2 protocol. The default host(0.0.0.0) is not suitable for the cluster mode, unless in k8s deployment. Please read Cluster Doc to understand how to set up the SkyWalking backend in the cluster mode.  application.yml SkyWalking backend startup behaviours are driven by config/application.yml. Understanding the settings file will help you read this document.\nThe core concept behind this setting file is that the SkyWalking collector is based on a pure modular design. End-users can switch or assemble the collector features according to their unique requirements.\nIn application.yml, there are three levels.\n Level 1: Module name. This means that this module is active in running mode. Level 2: Provider option list and provider selector. Available providers are listed here with a selector to indicate which one will actually take effect. If only one provider is listed, the selector is optional and can be omitted. Level 3. Settings of the chosen provider.  Example:\nstorage:selector:mysql# the mysql storage will actually be activated, while the h2 storage takes no effecth2:properties:jdbcUrl:${SW_STORAGE_H2_URL:jdbc:h2:mem:skywalking-oap-db;DB_CLOSE_DELAY=-1;DATABASE_TO_UPPER=FALSE}dataSource.user:${SW_STORAGE_H2_USER:sa}metadataQueryMaxSize:${SW_STORAGE_H2_QUERY_MAX_SIZE:5000}mysql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:3306/swtest?allowMultiQueries=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root@1234}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}# other configurations storage is the module. selector selects one out of all providers listed below. The unselected ones take no effect as if they were deleted. default is the default implementor of the core module. driver, url, \u0026hellip; metadataQueryMaxSize are all setting items of the implementor.  At the same time, there are two types of modules: required and optional. The required modules provide the skeleton of the backend. Even though their modular design supports pluggability, removing those modules does not serve any purpose. For optional modules, some of them have a provider implementation called none, meaning that it only provides a shell with no actual logic, typically such as telemetry. Setting - to the selector means that this whole module will be excluded at runtime. We advise against changing the APIs of those modules unless you understand the SkyWalking project and its codes very well.\nThe required modules are listed here:\n Core. Provides the basic and major skeleton of all data analysis and stream dispatch. Cluster. Manages multiple backend instances in a cluster, which could provide high throughput process capabilities. See Cluster Management for more details. Storage. Makes the analysis result persistent. See Choose storage for more details Query. Provides query interfaces to UI. Receiver and Fetcher. Expose the service to the agents and probes, or read telemetry data from a channel.  FAQs Why do we need to set the timezone? And when do we do it? SkyWalking provides downsampling time-series metrics features. Query and store at each time dimension (minute, hour, day, month metrics indexes) related to timezone when time formatting.\nFor example, metrics time will be formatted like yyyyMMddHHmm in minute dimension metrics, which is timezone-related.\nBy default, SkyWalking\u0026rsquo;s OAP backend chooses the OS default timezone. Please follow the Java and OS documents if you want to override the timezone.\n","excerpt":"Backend setup SkyWalking\u0026rsquo;s backend distribution package consists of the following parts: …","ref":"/docs/main/next/en/setup/backend/backend-setup/","title":"Backend setup"},{"body":"Backend setup SkyWalking\u0026rsquo;s backend distribution package consists of the following parts:\n  bin/cmd scripts: Located in the /bin folder. Includes startup linux shell and Windows cmd scripts for the backend server and UI startup.\n  Backend config: Located in the /config folder. Includes settings files of the backend, which are:\n application.yml log4j.xml alarm-settings.yml    Libraries of backend: Located in the /oap-libs folder. All dependencies of the backend can be found in it.\n  Webapp env: Located in the webapp folder. UI frontend jar file can be found here, together with its webapp.yml setting file.\n  Requirements and default settings Requirement: JDK8 to JDK17 are tested. Other versions are not tested and may or may not work.\nBefore you start, you should know that the main purpose of quickstart is to help you obtain a basic configuration for previews/demo. Performance and long-term running are not our goals.\nFor production/QA/tests environments, see Backend and UI deployment documents.\nYou can use bin/startup.sh (or cmd) to start up the backend and UI with their default settings, set out as follows:\n Backend storage uses H2 by default (for an easier start) Backend listens on 0.0.0.0/11800 for gRPC APIs and 0.0.0.0/12800 for HTTP REST APIs.  In Java, DotNetCore, Node.js, and Istio agents/probes, you should set the gRPC service address to ip/host:11800, and ip/host should be where your backend is.\n UI listens on 8080 port and request 127.0.0.1/12800 to run a GraphQL query.  Interaction Before deploying Skywalking in your distributed environment, you should learn about how agents/probes, the backend, and the UI communicate with each other:\n All native agents and probes, either language based or mesh probe, use the gRPC service (core/default/gRPC* in application.yml) to report data to the backend. Also, the Jetty service is supported in JSON format. UI uses GraphQL (HTTP) query to access the backend also in Jetty service (core/default/rest* in application.yml).  Startup script The default startup scripts are /bin/oapService.sh(.bat). Read the start up mode document to learn about other ways to start up the backend.\napplication.yml SkyWalking backend startup behaviours are driven by config/application.yml. Understanding the setting file will help you read this document. The core concept behind this setting file is that the SkyWalking collector is based on pure modular design. End users can switch or assemble the collector features according to their own requirements.\nIn application.yml, there are three levels.\n Level 1: Module name. This means that this module is active in running mode. Level 2: Provider option list and provider selector. Available providers are listed here with a selector to indicate which one will actually take effect. If there is only one provider listed, the selector is optional and can be omitted. Level 3. Settings of the provider.  Example:\nstorage:selector:mysql# the mysql storage will actually be activated, while the h2 storage takes no effecth2:driver:${SW_STORAGE_H2_DRIVER:org.h2.jdbcx.JdbcDataSource}url:${SW_STORAGE_H2_URL:jdbc:h2:mem:skywalking-oap-db}user:${SW_STORAGE_H2_USER:sa}metadataQueryMaxSize:${SW_STORAGE_H2_QUERY_MAX_SIZE:5000}mysql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:3306/swtest\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root@1234}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}# other configurations storage is the module. selector selects one out of all providers listed below. The unselected ones take no effect as if they were deleted. default is the default implementor of the core module. driver, url, \u0026hellip; metadataQueryMaxSize are all setting items of the implementor.  At the same time, there are two types of modules: required and optional. The required modules provide the skeleton of the backend. Even though their modular design supports pluggability, removing those modules does not serve any purpose. For optional modules, some of them have a provider implementation called none, meaning that it only provides a shell with no actual logic, typically such as telemetry. Setting - to the selector means that this whole module will be excluded at runtime. We advise against trying to change the APIs of those modules, unless you understand the SkyWalking project and its codes very well.\nThe required modules are listed here:\n Core. Provides the basic and major skeleton of all data analysis and stream dispatch. Cluster. Manages multiple backend instances in a cluster, which could provide high throughput process capabilities. See Cluster Management for more details. Storage. Makes the analysis result persistent. See Choose storage for more details Query. Provides query interfaces to UI. Receiver and Fetcher. Expose the service to the agents and probes, or read telemetry data from a channel.  FAQs Why do we need to set the timezone? And when do we do it? SkyWalking provides downsampling time series metrics features. Query and store at each time dimension (minute, hour, day, month metrics indexes) related to timezone when time formatting.\nFor example, metrics time will be formatted like YYYYMMDDHHmm in minute dimension metrics, which is timezone related.\nBy default, SkyWalking\u0026rsquo;s OAP backend chooses the OS default timezone. If you want to override it, please follow the Java and OS documents.\nHow to query the storage directly from a 3rd party tool? SkyWalking provides different options based on browser UI, CLI and GraphQL to support extensions. But some users may want to query data directly from the storage. For example, in the case of ElasticSearch, Kibana is a great tool for doing this.\nBy default, in order to reduce memory, network and storage space usages, SkyWalking saves based64-encoded ID(s) only in metrics entities. But these tools usually don\u0026rsquo;t support nested query, and are not convenient to work with. For these exceptional reasons, SkyWalking provides a config to add all necessary name column(s) into the final metrics entities with ID as a trade-off.\nTake a look at core/default/activeExtraModelColumns config in the application.yaml, and set it as true to enable this feature.\nNote that this feature is simply for 3rd party integration and doesn\u0026rsquo;t provide any new features to native SkyWalking use cases.\n","excerpt":"Backend setup SkyWalking\u0026rsquo;s backend distribution package consists of the following parts: …","ref":"/docs/main/v9.0.0/en/setup/backend/backend-setup/","title":"Backend setup"},{"body":"Backend setup SkyWalking\u0026rsquo;s backend distribution package consists of the following parts:\n  bin/cmd scripts: Located in the /bin folder. Includes startup Linux shell and Windows cmd scripts for the backend server and UI startup.\n  Backend config: Located in the /config folder. Includes settings files of the backend, which are:\n application.yml log4j.xml alarm-settings.yml    Libraries of backend: Located in the /oap-libs folder. All dependencies of the backend can be found there.\n  Webapp env: Located in the webapp folder. UI frontend jar file can be found here, together with its webapp.yml setting file.\n  Requirements and default settings Requirement: JDK8 to JDK17 are tested. Other versions are not tested and may or may not work.\nBefore you begin, you should understand that the main purpose of the following quickstart is to help you obtain a basic configuration for previews/demos. Performance and long-term running are NOT among the purposes of the quickstart.\nFor production/QA/tests environments, see Backend and UI deployment documents.\nYou can use bin/startup.sh (or cmd) to start up the backend and UI with their default settings, set out as follows:\n Backend storage uses H2 by default (for an easier start) Backend listens on 0.0.0.0/11800 for gRPC APIs and 0.0.0.0/12800 for HTTP REST APIs.  In Java, DotNetCore, Node.js, and Istio agents/probes, you should set the gRPC service address to ip/host:11800, and IP/host should be where your backend is.\n UI listens on 8080 port and request 127.0.0.1/12800 to run a GraphQL query.  Interaction Before deploying Skywalking in your distributed environment, you should learn about how agents/probes, the backend, and the UI communicate with each other:\n Most native agents and probes, including language-based or mesh probes, use gRPC service (core/default/gRPC* in application.yml) to report data to the backend. Also, the REST service is supported in JSON format. UI uses GraphQL (HTTP) query to access the backend, also in REST service (core/default/rest* in application.yml).  Startup script The default startup scripts are /bin/oapService.sh(.bat). Read the start up mode document to learn other ways to start up the backend.\napplication.yml SkyWalking backend startup behaviours are driven by config/application.yml. Understanding the settings file will help you read this document.\nThe core concept behind this setting file is that the SkyWalking collector is based on a pure modular design. End-users can switch or assemble the collector features according to their unique requirements.\nIn application.yml, there are three levels.\n Level 1: Module name. This means that this module is active in running mode. Level 2: Provider option list and provider selector. Available providers are listed here with a selector to indicate which one will actually take effect. If only one provider is listed, the selector is optional and can be omitted. Level 3. Settings of the chosen provider.  Example:\nstorage:selector:mysql# the mysql storage will actually be activated, while the h2 storage takes no effecth2:driver:${SW_STORAGE_H2_DRIVER:org.h2.jdbcx.JdbcDataSource}url:${SW_STORAGE_H2_URL:jdbc:h2:mem:skywalking-oap-db}user:${SW_STORAGE_H2_USER:sa}metadataQueryMaxSize:${SW_STORAGE_H2_QUERY_MAX_SIZE:5000}mysql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:3306/swtest\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root@1234}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}# other configurations storage is the module. selector selects one out of all providers listed below. The unselected ones take no effect as if they were deleted. default is the default implementor of the core module. driver, url, \u0026hellip; metadataQueryMaxSize are all setting items of the implementor.  At the same time, there are two types of modules: required and optional. The required modules provide the skeleton of the backend. Even though their modular design supports pluggability, removing those modules does not serve any purpose. For optional modules, some of them have a provider implementation called none, meaning that it only provides a shell with no actual logic, typically such as telemetry. Setting - to the selector means that this whole module will be excluded at runtime. We advise against changing the APIs of those modules unless you understand the SkyWalking project and its codes very well.\nThe required modules are listed here:\n Core. Provides the basic and major skeleton of all data analysis and stream dispatch. Cluster. Manages multiple backend instances in a cluster, which could provide high throughput process capabilities. See Cluster Management for more details. Storage. Makes the analysis result persistent. See Choose storage for more details Query. Provides query interfaces to UI. Receiver and Fetcher. Expose the service to the agents and probes, or read telemetry data from a channel.  FAQs Why do we need to set the timezone? And when do we do it? SkyWalking provides downsampling time-series metrics features. Query and store at each time dimension (minute, hour, day, month metrics indexes) related to timezone when time formatting.\nFor example, metrics time will be formatted like yyyyMMddHHmm in minute dimension metrics, which is timezone-related.\nBy default, SkyWalking\u0026rsquo;s OAP backend chooses the OS default timezone. Please follow the Java and OS documents if you want to override the timezone.\nHow to query the storage directly from a 3rd party tool? SkyWalking provides different options based on browser UI, CLI and GraphQL to support extensions. But some users may want to query data directly from the storage. For example, in the case of ElasticSearch, Kibana is a great tool for doing this.\nBy default, SkyWalking saves based64-encoded ID(s) only in metrics entities to reduce memory, network and storage space usages. But these tools usually don\u0026rsquo;t support nested queries and are not convenient to work with. For these exceptional reasons, SkyWalking provides a config to add all necessary name column(s) into the final metrics entities with ID as a trade-off.\nTake a look at core/default/activeExtraModelColumns config in the application.yaml, and set it as true to enable this feature.\nNote that this feature is simply for 3rd party integration and doesn\u0026rsquo;t provide any new features to native SkyWalking use cases.\n","excerpt":"Backend setup SkyWalking\u0026rsquo;s backend distribution package consists of the following parts: …","ref":"/docs/main/v9.1.0/en/setup/backend/backend-setup/","title":"Backend setup"},{"body":"Backend setup SkyWalking\u0026rsquo;s backend distribution package consists of the following parts:\n  bin/cmd scripts: Located in the /bin folder. Includes startup Linux shell and Windows cmd scripts for the backend server and UI startup.\n  Backend config: Located in the /config folder. Includes settings files of the backend, which are:\n application.yml log4j.xml alarm-settings.yml    Libraries of backend: Located in the /oap-libs folder. All dependencies of the backend can be found there.\n  Webapp env: Located in the webapp folder. UI frontend jar file can be found here, together with its webapp.yml setting file.\n  Requirements and default settings Requirement: JDK8 to JDK17 are tested. Other versions are not tested and may or may not work.\nBefore you begin, you should understand that the main purpose of the following quickstart is to help you obtain a basic configuration for previews/demos. Performance and long-term running are NOT among the purposes of the quickstart.\nFor production/QA/tests environments, see Backend and UI deployment documents.\nYou can use bin/startup.sh (or cmd) to start up the backend and UI with their default settings, set out as follows:\n Backend storage uses H2 by default (for an easier start) Backend listens on 0.0.0.0/11800 for gRPC APIs and 0.0.0.0/12800 for HTTP REST APIs.  In Java, DotNetCore, Node.js, and Istio agents/probes, you should set the gRPC service address to ip/host:11800, and IP/host should be where your backend is.\n UI listens on 8080 port and request 127.0.0.1/12800 to run a GraphQL query.  Interaction Before deploying Skywalking in your distributed environment, you should learn about how agents/probes, the backend, and the UI communicate with each other:\n Most native agents and probes, including language-based or mesh probes, use gRPC service (core/default/gRPC* in application.yml) to report data to the backend. Also, the REST service is supported in JSON format. UI uses GraphQL (HTTP) query to access the backend, also in REST service (core/default/rest* in application.yml).  Startup script The default startup scripts are /bin/oapService.sh(.bat). Read the start up mode document to learn other ways to start up the backend.\napplication.yml SkyWalking backend startup behaviours are driven by config/application.yml. Understanding the settings file will help you read this document.\nThe core concept behind this setting file is that the SkyWalking collector is based on a pure modular design. End-users can switch or assemble the collector features according to their unique requirements.\nIn application.yml, there are three levels.\n Level 1: Module name. This means that this module is active in running mode. Level 2: Provider option list and provider selector. Available providers are listed here with a selector to indicate which one will actually take effect. If only one provider is listed, the selector is optional and can be omitted. Level 3. Settings of the chosen provider.  Example:\nstorage:selector:mysql# the mysql storage will actually be activated, while the h2 storage takes no effecth2:driver:${SW_STORAGE_H2_DRIVER:org.h2.jdbcx.JdbcDataSource}url:${SW_STORAGE_H2_URL:jdbc:h2:mem:skywalking-oap-db}user:${SW_STORAGE_H2_USER:sa}metadataQueryMaxSize:${SW_STORAGE_H2_QUERY_MAX_SIZE:5000}mysql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:3306/swtest\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root@1234}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}# other configurations storage is the module. selector selects one out of all providers listed below. The unselected ones take no effect as if they were deleted. default is the default implementor of the core module. driver, url, \u0026hellip; metadataQueryMaxSize are all setting items of the implementor.  At the same time, there are two types of modules: required and optional. The required modules provide the skeleton of the backend. Even though their modular design supports pluggability, removing those modules does not serve any purpose. For optional modules, some of them have a provider implementation called none, meaning that it only provides a shell with no actual logic, typically such as telemetry. Setting - to the selector means that this whole module will be excluded at runtime. We advise against changing the APIs of those modules unless you understand the SkyWalking project and its codes very well.\nThe required modules are listed here:\n Core. Provides the basic and major skeleton of all data analysis and stream dispatch. Cluster. Manages multiple backend instances in a cluster, which could provide high throughput process capabilities. See Cluster Management for more details. Storage. Makes the analysis result persistent. See Choose storage for more details Query. Provides query interfaces to UI. Receiver and Fetcher. Expose the service to the agents and probes, or read telemetry data from a channel.  FAQs Why do we need to set the timezone? And when do we do it? SkyWalking provides downsampling time-series metrics features. Query and store at each time dimension (minute, hour, day, month metrics indexes) related to timezone when time formatting.\nFor example, metrics time will be formatted like yyyyMMddHHmm in minute dimension metrics, which is timezone-related.\nBy default, SkyWalking\u0026rsquo;s OAP backend chooses the OS default timezone. Please follow the Java and OS documents if you want to override the timezone.\nHow to query the storage directly from a 3rd party tool? SkyWalking provides different options based on browser UI, CLI and GraphQL to support extensions. But some users may want to query data directly from the storage. For example, in the case of ElasticSearch, Kibana is a great tool for doing this.\nBy default, SkyWalking saves based64-encoded ID(s) only in metrics entities to reduce memory, network and storage space usages. But these tools usually don\u0026rsquo;t support nested queries and are not convenient to work with. For these exceptional reasons, SkyWalking provides a config to add all necessary name column(s) into the final metrics entities with ID as a trade-off.\nTake a look at core/default/activeExtraModelColumns config in the application.yaml, and set it as true to enable this feature.\nNote that this feature is simply for 3rd party integration and doesn\u0026rsquo;t provide any new features to native SkyWalking use cases.\n","excerpt":"Backend setup SkyWalking\u0026rsquo;s backend distribution package consists of the following parts: …","ref":"/docs/main/v9.2.0/en/setup/backend/backend-setup/","title":"Backend setup"},{"body":"Backend setup SkyWalking\u0026rsquo;s backend distribution package consists of the following parts:\n  bin/cmd scripts: Located in the /bin folder. Includes startup Linux shell and Windows cmd scripts for the backend server and UI startup.\n  Backend config: Located in the /config folder. Includes settings files of the backend, which are:\n application.yml log4j.xml alarm-settings.yml    Libraries of backend: Located in the /oap-libs folder. All dependencies of the backend can be found there.\n  Webapp env: Located in the webapp folder. UI frontend jar file can be found here, together with its webapp.yml setting file.\n  Requirements and default settings Requirement: JDK8 to JDK17 are tested. Other versions are not tested and may or may not work.\nBefore you begin, you should understand that the main purpose of the following quickstart is to help you obtain a basic configuration for previews/demos. Performance and long-term running are NOT among the purposes of the quickstart.\nFor production/QA/tests environments, see Backend and UI deployment documents.\nYou can use bin/startup.sh (or cmd) to start up the backend and UI with their default settings, set out as follows:\n Backend storage uses H2 by default (for an easier start) Backend listens on 0.0.0.0/11800 for gRPC APIs and 0.0.0.0/12800 for HTTP REST APIs.  In Java, DotNetCore, Node.js, and Istio agents/probes, you should set the gRPC service address to ip/host:11800, and IP/host should be where your backend is.\n UI listens on 8080 port and request 127.0.0.1/12800 to run a GraphQL query.  Interaction Before deploying Skywalking in your distributed environment, you should learn about how agents/probes, the backend, and the UI communicate with each other:\n Most native agents and probes, including language-based or mesh probes, use gRPC service (core/default/gRPC* in application.yml) to report data to the backend. Also, the REST service is supported in JSON format. UI uses GraphQL (HTTP) query to access the backend, also in REST service (core/default/rest* in application.yml).  Startup script The default startup scripts are /bin/oapService.sh(.bat). Read the start up mode document to learn other ways to start up the backend.\napplication.yml SkyWalking backend startup behaviours are driven by config/application.yml. Understanding the settings file will help you read this document.\nThe core concept behind this setting file is that the SkyWalking collector is based on a pure modular design. End-users can switch or assemble the collector features according to their unique requirements.\nIn application.yml, there are three levels.\n Level 1: Module name. This means that this module is active in running mode. Level 2: Provider option list and provider selector. Available providers are listed here with a selector to indicate which one will actually take effect. If only one provider is listed, the selector is optional and can be omitted. Level 3. Settings of the chosen provider.  Example:\nstorage:selector:mysql# the mysql storage will actually be activated, while the h2 storage takes no effecth2:properties:jdbcUrl:${SW_STORAGE_H2_URL:jdbc:h2:mem:skywalking-oap-db;DB_CLOSE_DELAY=-1}dataSource.user:${SW_STORAGE_H2_USER:sa}metadataQueryMaxSize:${SW_STORAGE_H2_QUERY_MAX_SIZE:5000}mysql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:3306/swtest\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root@1234}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}# other configurations storage is the module. selector selects one out of all providers listed below. The unselected ones take no effect as if they were deleted. default is the default implementor of the core module. driver, url, \u0026hellip; metadataQueryMaxSize are all setting items of the implementor.  At the same time, there are two types of modules: required and optional. The required modules provide the skeleton of the backend. Even though their modular design supports pluggability, removing those modules does not serve any purpose. For optional modules, some of them have a provider implementation called none, meaning that it only provides a shell with no actual logic, typically such as telemetry. Setting - to the selector means that this whole module will be excluded at runtime. We advise against changing the APIs of those modules unless you understand the SkyWalking project and its codes very well.\nThe required modules are listed here:\n Core. Provides the basic and major skeleton of all data analysis and stream dispatch. Cluster. Manages multiple backend instances in a cluster, which could provide high throughput process capabilities. See Cluster Management for more details. Storage. Makes the analysis result persistent. See Choose storage for more details Query. Provides query interfaces to UI. Receiver and Fetcher. Expose the service to the agents and probes, or read telemetry data from a channel.  FAQs Why do we need to set the timezone? And when do we do it? SkyWalking provides downsampling time-series metrics features. Query and store at each time dimension (minute, hour, day, month metrics indexes) related to timezone when time formatting.\nFor example, metrics time will be formatted like yyyyMMddHHmm in minute dimension metrics, which is timezone-related.\nBy default, SkyWalking\u0026rsquo;s OAP backend chooses the OS default timezone. Please follow the Java and OS documents if you want to override the timezone.\nHow to query the storage directly from a 3rd party tool? SkyWalking provides different options based on browser UI, CLI and GraphQL to support extensions. But some users may want to query data directly from the storage. For example, in the case of ElasticSearch, Kibana is a great tool for doing this.\nBy default, SkyWalking saves based64-encoded ID(s) only in metrics entities to reduce memory, network and storage space usages. But these tools usually don\u0026rsquo;t support nested queries and are not convenient to work with. For these exceptional reasons, SkyWalking provides a config to add all necessary name column(s) into the final metrics entities with ID as a trade-off.\nTake a look at core/default/activeExtraModelColumns config in the application.yaml, and set it as true to enable this feature.\nNote that this feature is simply for 3rd party integration and doesn\u0026rsquo;t provide any new features to native SkyWalking use cases.\n","excerpt":"Backend setup SkyWalking\u0026rsquo;s backend distribution package consists of the following parts: …","ref":"/docs/main/v9.3.0/en/setup/backend/backend-setup/","title":"Backend setup"},{"body":"Backend setup SkyWalking\u0026rsquo;s backend distribution package consists of the following parts:\n  bin/cmd scripts: Located in the /bin folder. Includes startup Linux shell and Windows cmd scripts for the backend server and UI startup.\n  Backend config: Located in the /config folder. Includes settings files of the backend, which are:\n application.yml log4j.xml alarm-settings.yml    Libraries of backend: Located in the /oap-libs folder. All dependencies of the backend can be found there.\n  Webapp env: Located in the webapp folder. UI frontend jar file can be found here, together with its webapp.yml setting file.\n  Requirements and default settings Requirement: JDK11 to JDK17 are tested. Other versions are not tested and may or may not work.\nBefore you begin, you should understand that the main purpose of the following quickstart is to help you obtain a basic configuration for previews/demos. Performance and long-term running are NOT among the purposes of the quickstart.\nFor production/QA/tests environments, see Backend and UI deployment documents.\nYou can use bin/startup.sh (or cmd) to start up the backend and UI with their default settings, set out as follows:\n Backend storage uses H2 by default (for an easier start) Backend listens on 0.0.0.0/11800 for gRPC APIs and 0.0.0.0/12800 for HTTP REST APIs.  In Java, DotNetCore, Node.js, and Istio agents/probes, you should set the gRPC service address to ip/host:11800, and IP/host should be where your backend is.\n UI listens on 8080 port and request 127.0.0.1/12800 to run a GraphQL query.  Interaction Before deploying Skywalking in your distributed environment, you should learn about how agents/probes, the backend, and the UI communicate with each other:\n Most native agents and probes, including language-based or mesh probes, use gRPC service (core/default/gRPC* in application.yml) to report data to the backend. Also, the REST service is supported in JSON format. UI uses GraphQL (HTTP) query to access the backend, also in REST service (core/default/rest* in application.yml).  Startup script The default startup scripts are /bin/oapService.sh(.bat). Read the start up mode document to learn other ways to start up the backend.\napplication.yml SkyWalking backend startup behaviours are driven by config/application.yml. Understanding the settings file will help you read this document.\nThe core concept behind this setting file is that the SkyWalking collector is based on a pure modular design. End-users can switch or assemble the collector features according to their unique requirements.\nIn application.yml, there are three levels.\n Level 1: Module name. This means that this module is active in running mode. Level 2: Provider option list and provider selector. Available providers are listed here with a selector to indicate which one will actually take effect. If only one provider is listed, the selector is optional and can be omitted. Level 3. Settings of the chosen provider.  Example:\nstorage:selector:mysql# the mysql storage will actually be activated, while the h2 storage takes no effecth2:properties:jdbcUrl:${SW_STORAGE_H2_URL:jdbc:h2:mem:skywalking-oap-db;DB_CLOSE_DELAY=-1}dataSource.user:${SW_STORAGE_H2_USER:sa}metadataQueryMaxSize:${SW_STORAGE_H2_QUERY_MAX_SIZE:5000}mysql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:3306/swtest\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root@1234}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}# other configurations storage is the module. selector selects one out of all providers listed below. The unselected ones take no effect as if they were deleted. default is the default implementor of the core module. driver, url, \u0026hellip; metadataQueryMaxSize are all setting items of the implementor.  At the same time, there are two types of modules: required and optional. The required modules provide the skeleton of the backend. Even though their modular design supports pluggability, removing those modules does not serve any purpose. For optional modules, some of them have a provider implementation called none, meaning that it only provides a shell with no actual logic, typically such as telemetry. Setting - to the selector means that this whole module will be excluded at runtime. We advise against changing the APIs of those modules unless you understand the SkyWalking project and its codes very well.\nThe required modules are listed here:\n Core. Provides the basic and major skeleton of all data analysis and stream dispatch. Cluster. Manages multiple backend instances in a cluster, which could provide high throughput process capabilities. See Cluster Management for more details. Storage. Makes the analysis result persistent. See Choose storage for more details Query. Provides query interfaces to UI. Receiver and Fetcher. Expose the service to the agents and probes, or read telemetry data from a channel.  FAQs Why do we need to set the timezone? And when do we do it? SkyWalking provides downsampling time-series metrics features. Query and store at each time dimension (minute, hour, day, month metrics indexes) related to timezone when time formatting.\nFor example, metrics time will be formatted like yyyyMMddHHmm in minute dimension metrics, which is timezone-related.\nBy default, SkyWalking\u0026rsquo;s OAP backend chooses the OS default timezone. Please follow the Java and OS documents if you want to override the timezone.\nHow to query the storage directly from a 3rd party tool? SkyWalking provides different options based on browser UI, CLI and GraphQL to support extensions. But some users may want to query data directly from the storage. For example, in the case of ElasticSearch, Kibana is a great tool for doing this.\nBy default, SkyWalking saves based64-encoded ID(s) only in metrics entities to reduce memory, network and storage space usages. But these tools usually don\u0026rsquo;t support nested queries and are not convenient to work with. For these exceptional reasons, SkyWalking provides a config to add all necessary name column(s) into the final metrics entities with ID as a trade-off.\nTake a look at core/default/activeExtraModelColumns config in the application.yaml, and set it as true to enable this feature.\nNote that this feature is simply for 3rd party integration and doesn\u0026rsquo;t provide any new features to native SkyWalking use cases.\n","excerpt":"Backend setup SkyWalking\u0026rsquo;s backend distribution package consists of the following parts: …","ref":"/docs/main/v9.4.0/en/setup/backend/backend-setup/","title":"Backend setup"},{"body":"Backend setup SkyWalking\u0026rsquo;s backend distribution package consists of the following parts:\n  bin/cmd scripts: Located in the /bin folder. Includes startup Linux shell and Windows cmd scripts for the backend server and UI startup.\n  Backend config: Located in the /config folder. Includes settings files of the backend, which are:\n application.yml log4j.xml alarm-settings.yml    Libraries of backend: Located in the /oap-libs folder. All dependencies of the backend can be found there.\n  Webapp env: Located in the webapp folder. UI frontend jar file can be found here, together with its webapp.yml setting file.\n  Requirements and default settings Requirement: JDK11 to JDK17 are tested. Other versions are not tested and may or may not work.\nBefore you begin, you should understand that the main purpose of the following quickstart is to help you obtain a basic configuration for previews/demos. Performance and long-term running are NOT among the purposes of the quickstart.\nFor production/QA/tests environments, see Backend and UI deployment documents.\nYou can use bin/startup.sh (or cmd) to start up the backend and UI with their default settings, set out as follows:\n Backend storage uses H2 by default (for an easier start) Backend listens on 0.0.0.0/11800 for gRPC APIs and 0.0.0.0/12800 for HTTP REST APIs.  In Java, DotNetCore, Node.js, and Istio agents/probes, you should set the gRPC service address to ip/host:11800, and IP/host should be where your backend is.\n UI listens on 8080 port and request 127.0.0.1/12800 to run a GraphQL query.  Interaction Before deploying Skywalking in your distributed environment, you should learn about how agents/probes, the backend, and the UI communicate with each other:\n Most native agents and probes, including language-based or mesh probes, use gRPC service (core/default/gRPC* in application.yml) to report data to the backend. Also, the REST service is supported in JSON format. UI uses GraphQL (HTTP) query to access the backend, also in REST service (core/default/rest* in application.yml).  Startup script The default startup scripts are /bin/oapService.sh(.bat). Read the start up mode document to learn other ways to start up the backend.\napplication.yml SkyWalking backend startup behaviours are driven by config/application.yml. Understanding the settings file will help you read this document.\nThe core concept behind this setting file is that the SkyWalking collector is based on a pure modular design. End-users can switch or assemble the collector features according to their unique requirements.\nIn application.yml, there are three levels.\n Level 1: Module name. This means that this module is active in running mode. Level 2: Provider option list and provider selector. Available providers are listed here with a selector to indicate which one will actually take effect. If only one provider is listed, the selector is optional and can be omitted. Level 3. Settings of the chosen provider.  Example:\nstorage:selector:mysql# the mysql storage will actually be activated, while the h2 storage takes no effecth2:properties:jdbcUrl:${SW_STORAGE_H2_URL:jdbc:h2:mem:skywalking-oap-db;DB_CLOSE_DELAY=-1;DATABASE_TO_UPPER=FALSE}dataSource.user:${SW_STORAGE_H2_USER:sa}metadataQueryMaxSize:${SW_STORAGE_H2_QUERY_MAX_SIZE:5000}mysql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:3306/swtest?allowMultiQueries=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root@1234}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}# other configurations storage is the module. selector selects one out of all providers listed below. The unselected ones take no effect as if they were deleted. default is the default implementor of the core module. driver, url, \u0026hellip; metadataQueryMaxSize are all setting items of the implementor.  At the same time, there are two types of modules: required and optional. The required modules provide the skeleton of the backend. Even though their modular design supports pluggability, removing those modules does not serve any purpose. For optional modules, some of them have a provider implementation called none, meaning that it only provides a shell with no actual logic, typically such as telemetry. Setting - to the selector means that this whole module will be excluded at runtime. We advise against changing the APIs of those modules unless you understand the SkyWalking project and its codes very well.\nThe required modules are listed here:\n Core. Provides the basic and major skeleton of all data analysis and stream dispatch. Cluster. Manages multiple backend instances in a cluster, which could provide high throughput process capabilities. See Cluster Management for more details. Storage. Makes the analysis result persistent. See Choose storage for more details Query. Provides query interfaces to UI. Receiver and Fetcher. Expose the service to the agents and probes, or read telemetry data from a channel.  FAQs Why do we need to set the timezone? And when do we do it? SkyWalking provides downsampling time-series metrics features. Query and store at each time dimension (minute, hour, day, month metrics indexes) related to timezone when time formatting.\nFor example, metrics time will be formatted like yyyyMMddHHmm in minute dimension metrics, which is timezone-related.\nBy default, SkyWalking\u0026rsquo;s OAP backend chooses the OS default timezone. Please follow the Java and OS documents if you want to override the timezone.\nHow to query the storage directly from a 3rd party tool? SkyWalking provides different options based on browser UI, CLI and GraphQL to support extensions. But some users may want to query data directly from the storage. For example, in the case of ElasticSearch, Kibana is a great tool for doing this.\nBy default, SkyWalking saves based64-encoded ID(s) only in metrics entities to reduce memory, network and storage space usages. But these tools usually don\u0026rsquo;t support nested queries and are not convenient to work with. For these exceptional reasons, SkyWalking provides a config to add all necessary name column(s) into the final metrics entities with ID as a trade-off.\nTake a look at core/default/activeExtraModelColumns config in the application.yaml, and set it as true to enable this feature.\nNote that this feature is simply for 3rd party integration and doesn\u0026rsquo;t provide any new features to native SkyWalking use cases.\n","excerpt":"Backend setup SkyWalking\u0026rsquo;s backend distribution package consists of the following parts: …","ref":"/docs/main/v9.5.0/en/setup/backend/backend-setup/","title":"Backend setup"},{"body":"Backend setup SkyWalking\u0026rsquo;s backend distribution package consists of the following parts:\n  bin/cmd scripts: Located in the /bin folder. Includes startup Linux shell and Windows cmd scripts for the backend server and UI startup.\n  Backend config: Located in the /config folder. Includes settings files of the backend, which are:\n application.yml log4j.xml alarm-settings.yml    Libraries of backend: Located in the /oap-libs folder. All dependencies of the backend can be found there.\n  Webapp env: Located in the webapp folder. UI frontend jar file can be found here, together with its webapp.yml setting file.\n  Requirements and default settings Requirement: JDK11 to JDK17 are tested. Other versions are not tested and may or may not work.\nBefore you begin, you should understand that the main purpose of the following quickstart is to help you obtain a basic configuration for previews/demos. Performance and long-term running are NOT among the purposes of the quickstart.\nFor production/QA/tests environments, see Backend and UI deployment documents.\nYou can use bin/startup.sh (or cmd) to start up the backend and UI with their default settings, set out as follows:\n Backend storage uses H2 by default (for an easier start) Backend listens on 0.0.0.0/11800 for gRPC APIs and 0.0.0.0/12800 for HTTP REST APIs.  In Java, DotNetCore, Node.js, and Istio agents/probes, you should set the gRPC service address to ip/host:11800, and IP/host should be where your backend is.\n UI listens on 8080 port and request 127.0.0.1/12800 to run a GraphQL query.  Interaction Before deploying Skywalking in your distributed environment, you should learn about how agents/probes, the backend, and the UI communicate with each other:\n Most native agents and probes, including language-based or mesh probes, use gRPC service (core/default/gRPC* in application.yml) to report data to the backend. Also, the REST service is supported in JSON format. UI uses GraphQL (HTTP) query to access the backend, also in REST service (core/default/rest* in application.yml).  Startup script The default startup scripts are /bin/oapService.sh(.bat). Read the start up mode document to learn other ways to start up the backend.\napplication.yml SkyWalking backend startup behaviours are driven by config/application.yml. Understanding the settings file will help you read this document.\nThe core concept behind this setting file is that the SkyWalking collector is based on a pure modular design. End-users can switch or assemble the collector features according to their unique requirements.\nIn application.yml, there are three levels.\n Level 1: Module name. This means that this module is active in running mode. Level 2: Provider option list and provider selector. Available providers are listed here with a selector to indicate which one will actually take effect. If only one provider is listed, the selector is optional and can be omitted. Level 3. Settings of the chosen provider.  Example:\nstorage:selector:mysql# the mysql storage will actually be activated, while the h2 storage takes no effecth2:properties:jdbcUrl:${SW_STORAGE_H2_URL:jdbc:h2:mem:skywalking-oap-db;DB_CLOSE_DELAY=-1;DATABASE_TO_UPPER=FALSE}dataSource.user:${SW_STORAGE_H2_USER:sa}metadataQueryMaxSize:${SW_STORAGE_H2_QUERY_MAX_SIZE:5000}mysql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:3306/swtest?allowMultiQueries=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root@1234}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}# other configurations storage is the module. selector selects one out of all providers listed below. The unselected ones take no effect as if they were deleted. default is the default implementor of the core module. driver, url, \u0026hellip; metadataQueryMaxSize are all setting items of the implementor.  At the same time, there are two types of modules: required and optional. The required modules provide the skeleton of the backend. Even though their modular design supports pluggability, removing those modules does not serve any purpose. For optional modules, some of them have a provider implementation called none, meaning that it only provides a shell with no actual logic, typically such as telemetry. Setting - to the selector means that this whole module will be excluded at runtime. We advise against changing the APIs of those modules unless you understand the SkyWalking project and its codes very well.\nThe required modules are listed here:\n Core. Provides the basic and major skeleton of all data analysis and stream dispatch. Cluster. Manages multiple backend instances in a cluster, which could provide high throughput process capabilities. See Cluster Management for more details. Storage. Makes the analysis result persistent. See Choose storage for more details Query. Provides query interfaces to UI. Receiver and Fetcher. Expose the service to the agents and probes, or read telemetry data from a channel.  FAQs Why do we need to set the timezone? And when do we do it? SkyWalking provides downsampling time-series metrics features. Query and store at each time dimension (minute, hour, day, month metrics indexes) related to timezone when time formatting.\nFor example, metrics time will be formatted like yyyyMMddHHmm in minute dimension metrics, which is timezone-related.\nBy default, SkyWalking\u0026rsquo;s OAP backend chooses the OS default timezone. Please follow the Java and OS documents if you want to override the timezone.\nHow to query the storage directly from a 3rd party tool? SkyWalking provides different options based on browser UI, CLI and GraphQL to support extensions. But some users may want to query data directly from the storage. For example, in the case of ElasticSearch, Kibana is a great tool for doing this.\nBy default, SkyWalking saves based64-encoded ID(s) only in metrics entities to reduce memory, network and storage space usages. But these tools usually don\u0026rsquo;t support nested queries and are not convenient to work with. For these exceptional reasons, SkyWalking provides a config to add all necessary name column(s) into the final metrics entities with ID as a trade-off.\nTake a look at core/default/activeExtraModelColumns config in the application.yaml, and set it as true to enable this feature.\nNote that this feature is simply for 3rd party integration and doesn\u0026rsquo;t provide any new features to native SkyWalking use cases.\n","excerpt":"Backend setup SkyWalking\u0026rsquo;s backend distribution package consists of the following parts: …","ref":"/docs/main/v9.6.0/en/setup/backend/backend-setup/","title":"Backend setup"},{"body":"Backend setup SkyWalking\u0026rsquo;s backend distribution package consists of the following parts:\n  bin/cmd scripts: Located in the /bin folder. Includes startup Linux shell and Windows cmd scripts for the backend server and UI startup.\n  Backend config: Located in the /config folder. Includes settings files of the backend, which are:\n application.yml log4j.xml alarm-settings.yml    Libraries of backend: Located in the /oap-libs folder. All dependencies of the backend can be found there.\n  Webapp env: Located in the webapp folder. UI frontend jar file can be found here, together with its webapp.yml setting file.\n  Requirements and default settings Requirement: JDK11 or JDK17.\nBefore you begin, you should understand that the main purpose of the following quickstart is to help you obtain a basic configuration for previews/demos. Performance and long-term running are NOT among the purposes of the quickstart.\nFor production/QA/tests environments, see Backend and UI deployment documents.\nYou can use bin/startup.sh (or cmd) to start up the backend and UI with their default settings, set out as follows:\n Backend storage uses H2 by default (for an easier start) Backend listens on 0.0.0.0/11800 for gRPC APIs and 0.0.0.0/12800 for HTTP REST APIs.  In Java, DotNetCore, Node.js, and Istio agents/probes, you should set the gRPC service address to ip/host:11800, and IP/host should be where your backend is.\n UI listens on 8080 port and request 127.0.0.1/12800 to run a GraphQL query.  Interaction Before deploying Skywalking in your distributed environment, you should learn about how agents/probes, the backend, and the UI communicate with each other:\n Most native agents and probes, including language-based or mesh probes, use gRPC service (core/default/gRPC* in application.yml) to report data to the backend. Also, the REST service is supported in JSON format. UI uses GraphQL (HTTP) query to access the backend, also in REST service (core/default/rest* in application.yml).  Startup script The default startup scripts are /bin/oapService.sh(.bat). Read the start up mode document to learn other ways to start up the backend.\nKey Parameters In The Booting Logs After the OAP booting process completed, you should be able to see all important parameters listed in the logs.\n2023-11-06 21:10:45,988 org.apache.skywalking.oap.server.starter.OAPServerBootstrap 67 [main] INFO [] - The key booting parameters of Apache SkyWalking OAP are listed as following. Running Mode | null TTL.metrics | 7 TTL.record | 3 Version | 9.7.0-SNAPSHOT-92af797 module.agent-analyzer.provider | default module.ai-pipeline.provider | default module.alarm.provider | default module.aws-firehose.provider | default module.cluster.provider | standalone module.configuration-discovery.provider | default module.configuration.provider | none module.core.provider | default module.envoy-metric.provider | default module.event-analyzer.provider | default module.log-analyzer.provider | default module.logql.provider | default module.promql.provider | default module.query.provider | graphql module.receiver-browser.provider | default module.receiver-clr.provider | default module.receiver-ebpf.provider | default module.receiver-event.provider | default module.receiver-jvm.provider | default module.receiver-log.provider | default module.receiver-meter.provider | default module.receiver-otel.provider | default module.receiver-profile.provider | default module.receiver-register.provider | default module.receiver-sharing-server.provider | default module.receiver-telegraf.provider | default module.receiver-trace.provider | default module.service-mesh.provider | default module.storage.provider | h2 module.telemetry.provider | none oap.external.grpc.host | 0.0.0.0 oap.external.grpc.port | 11800 oap.external.http.host | 0.0.0.0 oap.external.http.port | 12800 oap.internal.comm.host | 0.0.0.0 oap.internal.comm.port | 11800  oap.external.grpc.host:oap.external.grpc.port is for reporting telemetry data through gRPC channel, including native agents, OTEL. oap.external.http.host:oap.external.http.port is for reporting telemetry data through HTTP channel and query, including native GraphQL(UI), PromQL, LogQL. oap.internal.comm.host:oap.internal.comm.port is for OAP cluster internal communication via gRPC/HTTP2 protocol. The default host(0.0.0.0) is not suitable for the cluster mode, unless in k8s deployment. Please read Cluster Doc to understand how to set up the SkyWalking backend in the cluster mode.  application.yml SkyWalking backend startup behaviours are driven by config/application.yml. Understanding the settings file will help you read this document.\nThe core concept behind this setting file is that the SkyWalking collector is based on a pure modular design. End-users can switch or assemble the collector features according to their unique requirements.\nIn application.yml, there are three levels.\n Level 1: Module name. This means that this module is active in running mode. Level 2: Provider option list and provider selector. Available providers are listed here with a selector to indicate which one will actually take effect. If only one provider is listed, the selector is optional and can be omitted. Level 3. Settings of the chosen provider.  Example:\nstorage:selector:mysql# the mysql storage will actually be activated, while the h2 storage takes no effecth2:properties:jdbcUrl:${SW_STORAGE_H2_URL:jdbc:h2:mem:skywalking-oap-db;DB_CLOSE_DELAY=-1;DATABASE_TO_UPPER=FALSE}dataSource.user:${SW_STORAGE_H2_USER:sa}metadataQueryMaxSize:${SW_STORAGE_H2_QUERY_MAX_SIZE:5000}mysql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:3306/swtest?allowMultiQueries=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root@1234}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}# other configurations storage is the module. selector selects one out of all providers listed below. The unselected ones take no effect as if they were deleted. default is the default implementor of the core module. driver, url, \u0026hellip; metadataQueryMaxSize are all setting items of the implementor.  At the same time, there are two types of modules: required and optional. The required modules provide the skeleton of the backend. Even though their modular design supports pluggability, removing those modules does not serve any purpose. For optional modules, some of them have a provider implementation called none, meaning that it only provides a shell with no actual logic, typically such as telemetry. Setting - to the selector means that this whole module will be excluded at runtime. We advise against changing the APIs of those modules unless you understand the SkyWalking project and its codes very well.\nThe required modules are listed here:\n Core. Provides the basic and major skeleton of all data analysis and stream dispatch. Cluster. Manages multiple backend instances in a cluster, which could provide high throughput process capabilities. See Cluster Management for more details. Storage. Makes the analysis result persistent. See Choose storage for more details Query. Provides query interfaces to UI. Receiver and Fetcher. Expose the service to the agents and probes, or read telemetry data from a channel.  FAQs Why do we need to set the timezone? And when do we do it? SkyWalking provides downsampling time-series metrics features. Query and store at each time dimension (minute, hour, day, month metrics indexes) related to timezone when time formatting.\nFor example, metrics time will be formatted like yyyyMMddHHmm in minute dimension metrics, which is timezone-related.\nBy default, SkyWalking\u0026rsquo;s OAP backend chooses the OS default timezone. Please follow the Java and OS documents if you want to override the timezone.\n","excerpt":"Backend setup SkyWalking\u0026rsquo;s backend distribution package consists of the following parts: …","ref":"/docs/main/v9.7.0/en/setup/backend/backend-setup/","title":"Backend setup"},{"body":"Backend storage The SkyWalking storage is pluggable. We have provided the following storage solutions, which allow you to easily use one of them by specifying it as the selector in application.yml:\nstorage:selector:${SW_STORAGE:elasticsearch}Natively supported storage:\n H2 OpenSearch ElasticSearch 7 and 8. MySQL and its compatible databases PostgreSQL and its compatible databases BanyanDB(alpha stage)  H2 is the default storage option in the distribution package. It is recommended to use H2 for testing and development ONLY. Elasticsearch and OpenSearch are recommended for production environments, specially for large scale deployments. MySQL and PostgreSQL are recommended for production environments for medium scale deployments, especially for low trace and log sampling rate. Some of their compatible databases may support larger scale better, such as TiDB and AWS Aurora.\nBanyanDB is going to be our next generation storage solution. It is still in alpha stage. It has shown high potential performance improvement. Less than 50% CPU usage and 50% memory usage with 40% disk volume compared to Elasticsearch in the same scale with 100% sampling. We are looking for early adoption, and it would be our first-class recommended storage option since 2024.\n","excerpt":"Backend storage The SkyWalking storage is pluggable. We have provided the following storage …","ref":"/docs/main/latest/en/setup/backend/backend-storage/","title":"Backend storage"},{"body":"Backend storage The SkyWalking storage is pluggable. We have provided the following storage solutions, which allow you to easily use one of them by specifying it as the selector in application.yml:\nstorage:selector:${SW_STORAGE:elasticsearch}Natively supported storage:\n H2 OpenSearch ElasticSearch 7 and 8. MySQL and its compatible databases PostgreSQL and its compatible databases BanyanDB(alpha stage)  H2 is the default storage option in the distribution package. It is recommended to use H2 for testing and development ONLY. Elasticsearch and OpenSearch are recommended for production environments, specially for large scale deployments. MySQL and PostgreSQL are recommended for production environments for medium scale deployments, especially for low trace and log sampling rate. Some of their compatible databases may support larger scale better, such as TiDB and AWS Aurora.\nBanyanDB is going to be our next generation storage solution. It is still in alpha stage. It has shown high potential performance improvement. Less than 50% CPU usage and 50% memory usage with 40% disk volume compared to Elasticsearch in the same scale with 100% sampling. We are looking for early adoption, and it would be our first-class recommended storage option since 2024.\n","excerpt":"Backend storage The SkyWalking storage is pluggable. We have provided the following storage …","ref":"/docs/main/next/en/setup/backend/backend-storage/","title":"Backend storage"},{"body":"Backend storage The SkyWalking storage is pluggable. We have provided the following storage solutions, which allows you to easily use one of them by specifying it as the selector in application.yml:\nstorage:selector:${SW_STORAGE:elasticsearch}Natively supported storage:\n H2 OpenSearch ElasticSearch 6, 7, 8 MySQL TiDB InfluxDB PostgreSQL IoTDB  H2 Activate H2 as storage, set storage provider to H2 In-Memory Databases. Default in distribution package. Please read Database URL Overview in H2 official document. You can set the target to H2 in Embedded, Server and Mixed modes.\nSetting fragment example\nstorage:selector:${SW_STORAGE:h2}h2:driver:org.h2.jdbcx.JdbcDataSourceurl:jdbc:h2:mem:skywalking-oap-dbuser:samaxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:100}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:1}OpenSearch OpenSearch storage shares the same configurations as ElasticSearch. In order to activate OpenSearch as storage, set storage provider to elasticsearch.\nElasticSearch NOTE: Elastic announced through their blog that Elasticsearch will be moving over to a Server Side Public License (SSPL), which is incompatible with Apache License 2.0. This license change is effective from Elasticsearch version 7.11. So please choose the suitable ElasticSearch version according to your usage.\nSince 8.8.0, SkyWalking rebuilds the ElasticSearch client on top of ElasticSearch REST API and automatically picks up correct request formats according to the server side version, hence you don\u0026rsquo;t need to download different binaries and don\u0026rsquo;t need to configure different storage selector for different ElasticSearch server side version anymore.\nFor now, SkyWalking supports ElasticSearch 6.x, ElasticSearch 7.x, ElasticSearch 8.x, and OpenSearch 1.x, their configurations are as follows:\nstorage:selector:${SW_STORAGE:elasticsearch}elasticsearch:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}clusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:9200}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;http\u0026#34;}trustStorePath:${SW_STORAGE_ES_SSL_JKS_PATH:\u0026#34;\u0026#34;}trustStorePass:${SW_STORAGE_ES_SSL_JKS_PASS:\u0026#34;\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}password:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}secretsManagementFile:${SW_ES_SECRETS_MANAGEMENT_FILE:\u0026#34;\u0026#34;}# Secrets management file in the properties format includes the username, password, which are managed by 3rd party tool.dayStep:${SW_STORAGE_DAY_STEP:1}# Represent the number of days in the one minute/hour/day index.indexShardsNumber:${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:1}# Shard number of new indexesindexReplicasNumber:${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:1}# Replicas number of new indexes# Super data set has been defined in the codes, such as trace segments.The following 3 config would be improve es performance when storage super size data in es.superDatasetDayStep:${SW_SUPERDATASET_STORAGE_DAY_STEP:-1}# Represent the number of days in the super size dataset record index, the default value is the same as dayStep when the value is less than 0superDatasetIndexShardsFactor:${SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR:5}# This factor provides more shards for the super data set, shards number = indexShardsNumber * superDatasetIndexShardsFactor. Also, this factor effects Zipkin and Jaeger traces.superDatasetIndexReplicasNumber:${SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER:0}# Represent the replicas number in the super size dataset record index, the default value is 0.indexTemplateOrder:${SW_STORAGE_ES_INDEX_TEMPLATE_ORDER:0}# the order of index templatebulkActions:${SW_STORAGE_ES_BULK_ACTIONS:1000}# Execute the async bulk record data every ${SW_STORAGE_ES_BULK_ACTIONS} requestsflushInterval:${SW_STORAGE_ES_FLUSH_INTERVAL:10}# flush the bulk every 10 seconds whatever the number of requestsconcurrentRequests:${SW_STORAGE_ES_CONCURRENT_REQUESTS:2}# the number of concurrent requestsresultWindowMaxSize:${SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE:10000}metadataQueryMaxSize:${SW_STORAGE_ES_QUERY_MAX_SIZE:5000}segmentQueryMaxSize:${SW_STORAGE_ES_QUERY_SEGMENT_SIZE:200}profileTaskQueryMaxSize:${SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE:200}oapAnalyzer:${SW_STORAGE_ES_OAP_ANALYZER:\u0026#34;{\\\u0026#34;analyzer\\\u0026#34;:{\\\u0026#34;oap_analyzer\\\u0026#34;:{\\\u0026#34;type\\\u0026#34;:\\\u0026#34;stop\\\u0026#34;}}}\u0026#34;}# the oap analyzer.oapLogAnalyzer:${SW_STORAGE_ES_OAP_LOG_ANALYZER:\u0026#34;{\\\u0026#34;analyzer\\\u0026#34;:{\\\u0026#34;oap_log_analyzer\\\u0026#34;:{\\\u0026#34;type\\\u0026#34;:\\\u0026#34;standard\\\u0026#34;}}}\u0026#34;}# the oap log analyzer. It could be customized by the ES analyzer configuration to support more language log formats, such as Chinese log, Japanese log and etc.advanced:${SW_STORAGE_ES_ADVANCED:\u0026#34;\u0026#34;}ElasticSearch With Https SSL Encrypting communications. Example:\nstorage:selector:${SW_STORAGE:elasticsearch}elasticsearch:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}# User needs to be set when Http Basic authentication is enabledpassword:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}# Password to be set when Http Basic authentication is enabledclusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:443}trustStorePath:${SW_SW_STORAGE_ES_SSL_JKS_PATH:\u0026#34;../es_keystore.jks\u0026#34;}trustStorePass:${SW_SW_STORAGE_ES_SSL_JKS_PASS:\u0026#34;\u0026#34;}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;https\u0026#34;}... File at trustStorePath is being monitored. Once it is changed, the ElasticSearch client will reconnect. trustStorePass could be changed in the runtime through Secrets Management File Of ElasticSearch Authentication.  Daily Index Step Daily index step(storage/elasticsearch/dayStep, default 1) represents the index creation period. In this period, metrics for several days (dayStep value) are saved.\nIn most cases, users don\u0026rsquo;t need to change the value manually, as SkyWalking is designed to observe large scale distributed systems. But in some cases, users may want to set a long TTL value, such as more than 60 days. However, their ElasticSearch cluster may not be powerful enough due to low traffic in the production environment. This value could be increased to 5 (or more), if users could ensure a single index could support the metrics and traces for these days (5 in this case).\nFor example, if dayStep == 11,\n Data in [2000-01-01, 2000-01-11] will be merged into the index-20000101. Data in [2000-01-12, 2000-01-22] will be merged into the index-20000112.  storage/elasticsearch/superDatasetDayStep overrides the storage/elasticsearch/dayStep if the value is positive. This would affect the record-related entities, such as trace segments. In some cases, the size of metrics is much smaller than the record (trace). This would improve the shards balance in the ElasticSearch cluster.\nNOTE: TTL deletion would be affected by these steps. You should set an extra dayStep in your TTL. For example, if you want to have TTL == 30 days and dayStep == 10, you are commended to set TTL = 40.\nSecrets Management File Of ElasticSearch Authentication The value of secretsManagementFile should point to the secrets management file absolute path. The file includes username, password, and JKS password of the ElasticSearch server in the properties format.\nuser=xxx password=yyy trustStorePass=zzz The major difference between using user, password, trustStorePass configs in the application.yaml file is that the Secrets Management File is being watched by the OAP server. Once it is changed manually or through a 3rd party tool, such as Vault, the storage provider will use the new username, password, and JKS password to establish the connection and close the old one. If the information exists in the file, the user/password will be overrided.\nAdvanced Configurations For Elasticsearch Index You can add advanced configurations in JSON format to set ElasticSearch index settings by following ElasticSearch doc\nFor example, set translog settings:\nstorage:elasticsearch:# ......advanced:${SW_STORAGE_ES_ADVANCED:\u0026#34;{\\\u0026#34;index.translog.durability\\\u0026#34;:\\\u0026#34;request\\\u0026#34;,\\\u0026#34;index.translog.sync_interval\\\u0026#34;:\\\u0026#34;5s\\\u0026#34;}\u0026#34;}Recommended ElasticSearch server-side configurations You could add the following configuration to elasticsearch.yml, and set the value based on your environment.\n# In tracing scenario, consider to set more than this at least.thread_pool.index.queue_size:1000# Only suitable for ElasticSearch 6thread_pool.write.queue_size:1000# Suitable for ElasticSearch 6 and 7# When you face query error at trace page, remember to check this.index.max_result_window:1000000We strongly recommend that you read more about these configurations from ElasticSearch\u0026rsquo;s official document, since they have a direct impact on the performance of ElasticSearch.\nElasticSearch with Zipkin trace extension This implementation is very similar to elasticsearch, except that it extends to support Zipkin span storage. The configurations are largely the same.\nstorage:selector:${SW_STORAGE:zipkin-elasticsearch}zipkin-elasticsearch:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}clusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:9200}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;http\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}password:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}indexShardsNumber:${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:2}indexReplicasNumber:${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:0}# Batch process setting, refer to https://www.elastic.co/guide/en/elasticsearch/client/java-api/5.5/java-docs-bulk-processor.htmlbulkActions:${SW_STORAGE_ES_BULK_ACTIONS:2000}# Execute the bulk every 2000 requestsbulkSize:${SW_STORAGE_ES_BULK_SIZE:20}# flush the bulk every 20mbflushInterval:${SW_STORAGE_ES_FLUSH_INTERVAL:10}# flush the bulk every 10 seconds whatever the number of requestsconcurrentRequests:${SW_STORAGE_ES_CONCURRENT_REQUESTS:2}# the number of concurrent requestsAbout Namespace When namespace is set, all index names in ElasticSearch will use it as prefix.\nMySQL Active MySQL as storage, set storage provider to mysql.\nNOTE: MySQL driver is NOT allowed in Apache official distribution and source codes. Please download MySQL driver on your own. Copy the connection driver jar to oap-libs.\nstorage:selector:${SW_STORAGE:mysql}mysql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:3306/swtest?rewriteBatchedStatements=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root@1234}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password are found in application.yml. Only part of the settings are listed here. See the HikariCP connection pool document for full settings. To understand the function of the parameter rewriteBatchedStatements=true in MySQL, see the MySQL official document.\nTiDB Tested TiDB Server 4.0.8 version and MySQL Client driver 8.0.13 version are currently available. Activate TiDB as storage, and set storage provider to tidb.\nstorage:selector:${SW_STORAGE:tidb}tidb:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:4000/swtest?rewriteBatchedStatements=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:\u0026#34;\u0026#34;}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}dataSource.useAffectedRows:${SW_DATA_SOURCE_USE_AFFECTED_ROWS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfArrayColumn:${SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN:20}numOfSearchableValuesPerTag:${SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG:2}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password are found in application.yml. For details on settings, refer to the configuration of MySQL above. To understand the function of the parameter rewriteBatchedStatements=true in TiDB, see the document of TiDB best practices.\nInfluxDB InfluxDB storage provides a time-series database as a new storage option.\nstorage:selector:${SW_STORAGE:influxdb}influxdb:url:${SW_STORAGE_INFLUXDB_URL:http://localhost:8086}user:${SW_STORAGE_INFLUXDB_USER:root}password:${SW_STORAGE_INFLUXDB_PASSWORD:}database:${SW_STORAGE_INFLUXDB_DATABASE:skywalking}actions:${SW_STORAGE_INFLUXDB_ACTIONS:1000}# the number of actions to collectduration:${SW_STORAGE_INFLUXDB_DURATION:1000}# the time to wait at most (milliseconds)fetchTaskLogMaxSize:${SW_STORAGE_INFLUXDB_FETCH_TASK_LOG_MAX_SIZE:5000}# the max number of fetch task log in a requestAll connection related settings, including URL link, username, and password are found in application.yml. For metadata storage provider settings, refer to the configurations of H2/MySQL above.\nPostgreSQL PostgreSQL jdbc driver uses version 42.3.2. It supports PostgreSQL 8.2 or newer. Activate PostgreSQL as storage, and set storage provider to postgresql.\nstorage:selector:${SW_STORAGE:postgresql}postgresql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:postgresql://localhost:5432/skywalking\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:postgres}dataSource.password:${SW_DATA_SOURCE_PASSWORD:123456}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfArrayColumn:${SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN:20}numOfSearchableValuesPerTag:${SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG:2}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password are found in application.yml. Only part of the settings are listed here. Please follow HikariCP connection pool document for full settings.\nIoTDB IoTDB is a time-series database from Apache, which is one of the storage plugin options.\nIoTDB storage plugin is still in progress. Its efficiency will improve in the future.\nstorage:selector:${SW_STORAGE:iotdb}iotdb:host:${SW_STORAGE_IOTDB_HOST:127.0.0.1}rpcPort:${SW_STORAGE_IOTDB_RPC_PORT:6667}username:${SW_STORAGE_IOTDB_USERNAME:root}password:${SW_STORAGE_IOTDB_PASSWORD:root}storageGroup:${SW_STORAGE_IOTDB_STORAGE_GROUP:root.skywalking}sessionPoolSize:${SW_STORAGE_IOTDB_SESSIONPOOL_SIZE:8}# If it\u0026#39;s zero, the SessionPool size will be 2*CPU_CoresfetchTaskLogMaxSize:${SW_STORAGE_IOTDB_FETCH_TASK_LOG_MAX_SIZE:1000}# the max number of fetch task log in a requestAll connection related settings, including host, rpcPort, username, and password are found in application.yml. Please ensure the IoTDB version \u0026gt;= 0.12.3.\nMore storage extension solutions Follow the Storage extension development guide in the Project Extensions document.\n","excerpt":"Backend storage The SkyWalking storage is pluggable. We have provided the following storage …","ref":"/docs/main/v9.0.0/en/setup/backend/backend-storage/","title":"Backend storage"},{"body":"Backend storage The SkyWalking storage is pluggable. We have provided the following storage solutions, which allow you to easily use one of them by specifying it as the selector in application.yml:\nstorage:selector:${SW_STORAGE:elasticsearch}Natively supported storage:\n H2 OpenSearch ElasticSearch 6, 7, 8 MySQL TiDB PostgreSQL BanyanDB  H2 Activate H2 as storage, set storage provider to H2 In-Memory Databases. Default in the distribution package. Please read Database URL Overview in H2 official document. You can set the target to H2 in Embedded, Server and Mixed modes.\nSetting fragment example\nstorage:selector:${SW_STORAGE:h2}h2:driver:org.h2.jdbcx.JdbcDataSourceurl:jdbc:h2:mem:skywalking-oap-dbuser:samaxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:100}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:1}OpenSearch OpenSearch storage shares the same configurations as ElasticSearch. In order to activate OpenSearch as storage, set the storage provider to elasticsearch.\nElasticSearch NOTE: Elastic announced through their blog that Elasticsearch will be moving over to a Server Side Public License (SSPL), which is incompatible with Apache License 2.0. This license change is effective from Elasticsearch version 7.11. So please choose the suitable ElasticSearch version according to your usage.\nSince 8.8.0, SkyWalking rebuilds the ElasticSearch client on top of ElasticSearch REST API and automatically picks up correct request formats according to the server-side version, hence you don\u0026rsquo;t need to download different binaries and don\u0026rsquo;t need to configure different storage selectors for different ElasticSearch server-side versions anymore.\nFor now, SkyWalking supports ElasticSearch 6.x, ElasticSearch 7.x, ElasticSearch 8.x, and OpenSearch 1.x, their configurations are as follows:\nstorage:selector:${SW_STORAGE:elasticsearch}elasticsearch:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}clusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:9200}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;http\u0026#34;}trustStorePath:${SW_STORAGE_ES_SSL_JKS_PATH:\u0026#34;\u0026#34;}trustStorePass:${SW_STORAGE_ES_SSL_JKS_PASS:\u0026#34;\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}password:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}secretsManagementFile:${SW_ES_SECRETS_MANAGEMENT_FILE:\u0026#34;\u0026#34;}# Secrets management file in the properties format includes the username, password, which are managed by 3rd party tool.dayStep:${SW_STORAGE_DAY_STEP:1}# Represent the number of days in the one minute/hour/day index.indexShardsNumber:${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:1}# Shard number of new indexesindexReplicasNumber:${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:1}# Replicas number of new indexes# Super data set has been defined in the codes, such as trace segments.The following 3 config would be improve es performance when storage super size data in es.superDatasetDayStep:${SW_SUPERDATASET_STORAGE_DAY_STEP:-1}# Represent the number of days in the super size dataset record index, the default value is the same as dayStep when the value is less than 0superDatasetIndexShardsFactor:${SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR:5}# This factor provides more shards for the super data set, shards number = indexShardsNumber * superDatasetIndexShardsFactor. Also, this factor effects Zipkin and Jaeger traces.superDatasetIndexReplicasNumber:${SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER:0}# Represent the replicas number in the super size dataset record index, the default value is 0.indexTemplateOrder:${SW_STORAGE_ES_INDEX_TEMPLATE_ORDER:0}# the order of index templatebulkActions:${SW_STORAGE_ES_BULK_ACTIONS:1000}# Execute the async bulk record data every ${SW_STORAGE_ES_BULK_ACTIONS} requestsflushInterval:${SW_STORAGE_ES_FLUSH_INTERVAL:10}# flush the bulk every 10 seconds whatever the number of requestsconcurrentRequests:${SW_STORAGE_ES_CONCURRENT_REQUESTS:2}# the number of concurrent requestsresultWindowMaxSize:${SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE:10000}metadataQueryMaxSize:${SW_STORAGE_ES_QUERY_MAX_SIZE:5000}segmentQueryMaxSize:${SW_STORAGE_ES_QUERY_SEGMENT_SIZE:200}profileTaskQueryMaxSize:${SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE:200}profileDataQueryScrollBatchSize:${SW_STORAGE_ES_QUERY_PROFILE_DATA_SCROLLING_BATCH_SIZE:100}oapAnalyzer:${SW_STORAGE_ES_OAP_ANALYZER:\u0026#34;{\\\u0026#34;analyzer\\\u0026#34;:{\\\u0026#34;oap_analyzer\\\u0026#34;:{\\\u0026#34;type\\\u0026#34;:\\\u0026#34;stop\\\u0026#34;}}}\u0026#34;}# the oap analyzer.oapLogAnalyzer:${SW_STORAGE_ES_OAP_LOG_ANALYZER:\u0026#34;{\\\u0026#34;analyzer\\\u0026#34;:{\\\u0026#34;oap_log_analyzer\\\u0026#34;:{\\\u0026#34;type\\\u0026#34;:\\\u0026#34;standard\\\u0026#34;}}}\u0026#34;}# the oap log analyzer. It could be customized by the ES analyzer configuration to support more language log formats, such as Chinese log, Japanese log and etc.advanced:${SW_STORAGE_ES_ADVANCED:\u0026#34;\u0026#34;}ElasticSearch With Https SSL Encrypting communications. Example:\nstorage:selector:${SW_STORAGE:elasticsearch}elasticsearch:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}# User needs to be set when Http Basic authentication is enabledpassword:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}# Password to be set when Http Basic authentication is enabledclusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:443}trustStorePath:${SW_SW_STORAGE_ES_SSL_JKS_PATH:\u0026#34;../es_keystore.jks\u0026#34;}trustStorePass:${SW_SW_STORAGE_ES_SSL_JKS_PASS:\u0026#34;\u0026#34;}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;https\u0026#34;}... File at trustStorePath is being monitored. Once it is changed, the ElasticSearch client will reconnect. trustStorePass could be changed in the runtime through Secrets Management File Of ElasticSearch Authentication.  Daily Index Step Daily index step(storage/elasticsearch/dayStep, default 1) represents the index creation period. In this period, metrics for several days (dayStep value) are saved.\nIn most cases, users don\u0026rsquo;t need to change the value manually, as SkyWalking is designed to observe large-scale distributed systems. But in some cases, users may want to set a long TTL value, such as more than 60 days. However, their ElasticSearch cluster may not be powerful enough due to low traffic in the production environment. This value could be increased to 5 (or more) if users could ensure a single index could support the metrics and traces for these days (5 in this case).\nFor example, if dayStep == 11,\n Data in [2000-01-01, 2000-01-11] will be merged into the index-20000101. Data in [2000-01-12, 2000-01-22] will be merged into the index-20000112.  storage/elasticsearch/superDatasetDayStep overrides the storage/elasticsearch/dayStep if the value is positive. This would affect the record-related entities, such as trace segments. In some cases, the size of metrics is much smaller than the record (trace). This would improve the shards balance in the ElasticSearch cluster.\nNOTE: TTL deletion would be affected by these steps. You should set an extra dayStep in your TTL. For example, if you want to have TTL == 30 days and dayStep == 10, you are recommended to set TTL = 40.\nSecrets Management File Of ElasticSearch Authentication The value of secretsManagementFile should point to the secrets management file absolute path. The file includes the username, password, and JKS password of the ElasticSearch server in the properties format.\nuser=xxx password=yyy trustStorePass=zzz The major difference between using user, password, trustStorePass configs in the application.yaml file is that the Secrets Management File is being watched by the OAP server. Once it is changed manually or through a 3rd party tool, such as Vault, the storage provider will use the new username, password, and JKS password to establish the connection and close the old one. If the information exists in the file, the user/password will be overridden.\nAdvanced Configurations For Elasticsearch Index You can add advanced configurations in JSON format to set ElasticSearch index settings by following ElasticSearch doc\nFor example, set translog settings:\nstorage:elasticsearch:# ......advanced:${SW_STORAGE_ES_ADVANCED:\u0026#34;{\\\u0026#34;index.translog.durability\\\u0026#34;:\\\u0026#34;request\\\u0026#34;,\\\u0026#34;index.translog.sync_interval\\\u0026#34;:\\\u0026#34;5s\\\u0026#34;}\u0026#34;}Recommended ElasticSearch server-side configurations You could add the following configuration to elasticsearch.yml, and set the value based on your environment.\n# In tracing scenario, consider to set more than this at least.thread_pool.index.queue_size:1000# Only suitable for ElasticSearch 6thread_pool.write.queue_size:1000# Suitable for ElasticSearch 6 and 7# When you face a query error on the traces page, remember to check this.index.max_result_window:1000000We strongly recommend that you read more about these configurations from ElasticSearch\u0026rsquo;s official documentation since they directly impact the performance of ElasticSearch.\nAbout Namespace When a namespace is set, all index names in ElasticSearch will use it as the prefix.\nMySQL Activate MySQL as storage, and set storage provider to mysql.\nNOTE: MySQL driver is NOT allowed in Apache official distribution and source codes. Please download the MySQL driver on your own. Copy the connection driver jar to oap-libs.\nstorage:selector:${SW_STORAGE:mysql}mysql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:3306/swtest?rewriteBatchedStatements=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root@1234}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password, are found in application.yml. Only part of the settings is listed here. See the HikariCP connection pool document for full settings. To understand the function of the parameter rewriteBatchedStatements=true in MySQL, see the MySQL official document.\nTiDB Tested TiDB Server 4.0.8 version, and MySQL Client driver 8.0.13 version is currently available. Activate TiDB as storage, and set storage provider to tidb.\nstorage:selector:${SW_STORAGE:tidb}tidb:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:4000/swtest?rewriteBatchedStatements=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:\u0026#34;\u0026#34;}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}dataSource.useAffectedRows:${SW_DATA_SOURCE_USE_AFFECTED_ROWS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfArrayColumn:${SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN:20}numOfSearchableValuesPerTag:${SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG:2}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password are found in application.yml. For details on settings, refer to the configuration of MySQL above. To understand the function of the parameter rewriteBatchedStatements=true in TiDB, see the document of TiDB best practices.\nPostgreSQL PostgreSQL JDBC driver uses version 42.3.2. It supports PostgreSQL 8.2 or newer. Activate PostgreSQL as storage, and set storage provider to postgresql.\nstorage:selector:${SW_STORAGE:postgresql}postgresql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:postgresql://localhost:5432/skywalking\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:postgres}dataSource.password:${SW_DATA_SOURCE_PASSWORD:123456}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfArrayColumn:${SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN:20}numOfSearchableValuesPerTag:${SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG:2}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password, are found in application.yml. Only part of the settings is listed here. Please follow HikariCP connection pool document for full settings.\nBanyanDB BanyanDB is a dedicated storage implementation developed by the SkyWalking Team and the community. Activate BanyanDB as the storage, and set storage provider to banyandb.\nstorage:banyandb:host:${SW_STORAGE_BANYANDB_HOST:127.0.0.1}port:${SW_STORAGE_BANYANDB_PORT:17912}maxBulkSize:${SW_STORAGE_BANYANDB_MAX_BULK_SIZE:5000}flushInterval:${SW_STORAGE_BANYANDB_FLUSH_INTERVAL:15}metricsShardsNumber:${SW_STORAGE_BANYANDB_METRICS_SHARDS_NUMBER:1}recordShardsNumber:${SW_STORAGE_BANYANDB_RECORD_SHARDS_NUMBER:1}superDatasetShardsFactor:${SW_STORAGE_BANYANDB_SUPERDATASET_SHARDS_FACTOR:2}concurrentWriteThreads:${SW_STORAGE_BANYANDB_CONCURRENT_WRITE_THREADS:15}profileTaskQueryMaxSize:${SW_STORAGE_BANYANDB_PROFILE_TASK_QUERY_MAX_SIZE:200}# the max number of fetch task in a requestFor more details, please refer to the documents of BanyanDB and BanyanDB Java Client subprojects.\nMore storage extension solutions Follow the Storage extension development guide in the Project Extensions document.\n","excerpt":"Backend storage The SkyWalking storage is pluggable. We have provided the following storage …","ref":"/docs/main/v9.1.0/en/setup/backend/backend-storage/","title":"Backend storage"},{"body":"Backend storage The SkyWalking storage is pluggable. We have provided the following storage solutions, which allow you to easily use one of them by specifying it as the selector in application.yml:\nstorage:selector:${SW_STORAGE:elasticsearch}Natively supported storage:\n H2 OpenSearch ElasticSearch 6, 7, 8 MySQL TiDB PostgreSQL BanyanDB  H2 Activate H2 as storage, set storage provider to H2 In-Memory Databases. Default in the distribution package. Please read Database URL Overview in H2 official document. You can set the target to H2 in Embedded, Server and Mixed modes.\nSetting fragment example\nstorage:selector:${SW_STORAGE:h2}h2:driver:org.h2.jdbcx.JdbcDataSourceurl:jdbc:h2:mem:skywalking-oap-dbuser:samaxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:100}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:1}OpenSearch OpenSearch is a fork from ElasticSearch 7.11 but licensed in Apache 2.0. OpenSearch storage shares the same configurations as ElasticSearch. In order to activate OpenSearch as storage, set the storage provider to elasticsearch.\nElasticSearch NOTE: Elastic announced through their blog that Elasticsearch will be moving over to a Server Side Public License (SSPL), which is incompatible with Apache License 2.0. This license change is effective from Elasticsearch version 7.11. So please choose the suitable ElasticSearch version according to your usage. If you have concerns about SSPL, choose the versions before 7.11 or switch to OpenSearch.\nSince 9.2.0, SkyWalking provides no-sharding/one-index mode to merge all metrics/meter and records(without super datasets) indices into one physical index template metrics-all and records-all on the default setting. In the current one index mode, users still could choose to adjust ElasticSearch\u0026rsquo;s shard number(SW_STORAGE_ES_INDEX_SHARDS_NUMBER) to scale out. After merge all indices, the following indices are available:\n sw_ui_template sw_metrics-all-${day-format} sw_log-${day-format} sw_segment-${day-format} sw_browser_error_log-${day-format} sw_zipkin_span-${day-format} sw_records-all-${day-format}   Provide system environment variable(SW_STORAGE_ES_LOGIC_SHARDING). Set it to true could shard metrics indices into multi-physical indices as same as the versions(one index template per metric/meter aggregation function) before 9.2.0.\n Since 8.8.0, SkyWalking rebuilds the ElasticSearch client on top of ElasticSearch REST API and automatically picks up correct request formats according to the server-side version, hence you don\u0026rsquo;t need to download different binaries and don\u0026rsquo;t need to configure different storage selectors for different ElasticSearch server-side versions anymore.\nFor now, SkyWalking supports ElasticSearch 6.x, ElasticSearch 7.x, ElasticSearch 8.x, and OpenSearch 1.x, their configurations are as follows:\nstorage:selector:${SW_STORAGE:elasticsearch}elasticsearch:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}clusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:9200}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;http\u0026#34;}trustStorePath:${SW_STORAGE_ES_SSL_JKS_PATH:\u0026#34;\u0026#34;}trustStorePass:${SW_STORAGE_ES_SSL_JKS_PASS:\u0026#34;\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}password:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}secretsManagementFile:${SW_ES_SECRETS_MANAGEMENT_FILE:\u0026#34;\u0026#34;}# Secrets management file in the properties format includes the username, password, which are managed by 3rd party tool.dayStep:${SW_STORAGE_DAY_STEP:1}# Represent the number of days in the one minute/hour/day index.indexShardsNumber:${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:1}# Shard number of new indexesindexReplicasNumber:${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:1}# Replicas number of new indexes# Super data set has been defined in the codes, such as trace segments.The following 3 config would be improve es performance when storage super size data in es.superDatasetDayStep:${SW_SUPERDATASET_STORAGE_DAY_STEP:-1}# Represent the number of days in the super size dataset record index, the default value is the same as dayStep when the value is less than 0superDatasetIndexShardsFactor:${SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR:5}# This factor provides more shards for the super data set, shards number = indexShardsNumber * superDatasetIndexShardsFactor. Also, this factor effects Zipkin and Jaeger traces.superDatasetIndexReplicasNumber:${SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER:0}# Represent the replicas number in the super size dataset record index, the default value is 0.indexTemplateOrder:${SW_STORAGE_ES_INDEX_TEMPLATE_ORDER:0}# the order of index templatebulkActions:${SW_STORAGE_ES_BULK_ACTIONS:1000}# Execute the async bulk record data every ${SW_STORAGE_ES_BULK_ACTIONS} requestsflushInterval:${SW_STORAGE_ES_FLUSH_INTERVAL:10}# flush the bulk every 10 seconds whatever the number of requestsconcurrentRequests:${SW_STORAGE_ES_CONCURRENT_REQUESTS:2}# the number of concurrent requestsresultWindowMaxSize:${SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE:10000}metadataQueryMaxSize:${SW_STORAGE_ES_QUERY_MAX_SIZE:5000}segmentQueryMaxSize:${SW_STORAGE_ES_QUERY_SEGMENT_SIZE:200}profileTaskQueryMaxSize:${SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE:200}profileDataQueryScrollBatchSize:${SW_STORAGE_ES_QUERY_PROFILE_DATA_SCROLLING_BATCH_SIZE:100}oapAnalyzer:${SW_STORAGE_ES_OAP_ANALYZER:\u0026#34;{\\\u0026#34;analyzer\\\u0026#34;:{\\\u0026#34;oap_analyzer\\\u0026#34;:{\\\u0026#34;type\\\u0026#34;:\\\u0026#34;stop\\\u0026#34;}}}\u0026#34;}# the oap analyzer.oapLogAnalyzer:${SW_STORAGE_ES_OAP_LOG_ANALYZER:\u0026#34;{\\\u0026#34;analyzer\\\u0026#34;:{\\\u0026#34;oap_log_analyzer\\\u0026#34;:{\\\u0026#34;type\\\u0026#34;:\\\u0026#34;standard\\\u0026#34;}}}\u0026#34;}# the oap log analyzer. It could be customized by the ES analyzer configuration to support more language log formats, such as Chinese log, Japanese log and etc.advanced:${SW_STORAGE_ES_ADVANCED:\u0026#34;\u0026#34;}logicSharding:${SW_STORAGE_ES_LOGIC_SHARDING:false}ElasticSearch With Https SSL Encrypting communications. Example:\nstorage:selector:${SW_STORAGE:elasticsearch}elasticsearch:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}# User needs to be set when Http Basic authentication is enabledpassword:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}# Password to be set when Http Basic authentication is enabledclusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:443}trustStorePath:${SW_SW_STORAGE_ES_SSL_JKS_PATH:\u0026#34;../es_keystore.jks\u0026#34;}trustStorePass:${SW_SW_STORAGE_ES_SSL_JKS_PASS:\u0026#34;\u0026#34;}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;https\u0026#34;}... File at trustStorePath is being monitored. Once it is changed, the ElasticSearch client will reconnect. trustStorePass could be changed in the runtime through Secrets Management File Of ElasticSearch Authentication.  Daily Index Step Daily index step(storage/elasticsearch/dayStep, default 1) represents the index creation period. In this period, metrics for several days (dayStep value) are saved.\nIn most cases, users don\u0026rsquo;t need to change the value manually, as SkyWalking is designed to observe large-scale distributed systems. But in some cases, users may want to set a long TTL value, such as more than 60 days. However, their ElasticSearch cluster may not be powerful enough due to low traffic in the production environment. This value could be increased to 5 (or more) if users could ensure a single index could support the metrics and traces for these days (5 in this case).\nFor example, if dayStep == 11,\n Data in [2000-01-01, 2000-01-11] will be merged into the index-20000101. Data in [2000-01-12, 2000-01-22] will be merged into the index-20000112.  storage/elasticsearch/superDatasetDayStep overrides the storage/elasticsearch/dayStep if the value is positive. This would affect the record-related entities, such as trace segments. In some cases, the size of metrics is much smaller than the record (trace). This would improve the shards balance in the ElasticSearch cluster.\nNOTE: TTL deletion would be affected by these steps. You should set an extra dayStep in your TTL. For example, if you want to have TTL == 30 days and dayStep == 10, you are recommended to set TTL = 40.\nSecrets Management File Of ElasticSearch Authentication The value of secretsManagementFile should point to the secrets management file absolute path. The file includes the username, password, and JKS password of the ElasticSearch server in the properties format.\nuser=xxx password=yyy trustStorePass=zzz The major difference between using user, password, trustStorePass configs in the application.yaml file is that the Secrets Management File is being watched by the OAP server. Once it is changed manually or through a 3rd party tool, such as Vault, the storage provider will use the new username, password, and JKS password to establish the connection and close the old one. If the information exists in the file, the user/password will be overridden.\nAdvanced Configurations For Elasticsearch Index You can add advanced configurations in JSON format to set ElasticSearch index settings by following ElasticSearch doc\nFor example, set translog settings:\nstorage:elasticsearch:# ......advanced:${SW_STORAGE_ES_ADVANCED:\u0026#34;{\\\u0026#34;index.translog.durability\\\u0026#34;:\\\u0026#34;request\\\u0026#34;,\\\u0026#34;index.translog.sync_interval\\\u0026#34;:\\\u0026#34;5s\\\u0026#34;}\u0026#34;}Recommended ElasticSearch server-side configurations You could add the following configuration to elasticsearch.yml, and set the value based on your environment.\n# In tracing scenario, consider to set more than this at least.thread_pool.index.queue_size:1000# Only suitable for ElasticSearch 6thread_pool.write.queue_size:1000# Suitable for ElasticSearch 6 and 7# When you face a query error on the traces page, remember to check this.index.max_result_window:1000000We strongly recommend that you read more about these configurations from ElasticSearch\u0026rsquo;s official documentation since they directly impact the performance of ElasticSearch.\nAbout Namespace When a namespace is set, all index names in ElasticSearch will use it as the prefix.\nMySQL Activate MySQL as storage, and set storage provider to mysql.\nNOTE: MySQL driver is NOT allowed in Apache official distribution and source codes. Please download the MySQL driver on your own. Copy the connection driver jar to oap-libs.\nstorage:selector:${SW_STORAGE:mysql}mysql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:3306/swtest?rewriteBatchedStatements=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root@1234}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password, are found in application.yml. Only part of the settings is listed here. See the HikariCP connection pool document for full settings. To understand the function of the parameter rewriteBatchedStatements=true in MySQL, see the MySQL official document.\nTiDB Tested TiDB Server 4.0.8 version, and MySQL Client driver 8.0.13 version is currently available. Activate TiDB as storage, and set storage provider to tidb.\nstorage:selector:${SW_STORAGE:tidb}tidb:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:4000/swtest?rewriteBatchedStatements=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:\u0026#34;\u0026#34;}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}dataSource.useAffectedRows:${SW_DATA_SOURCE_USE_AFFECTED_ROWS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfArrayColumn:${SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN:20}numOfSearchableValuesPerTag:${SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG:2}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password are found in application.yml. For details on settings, refer to the configuration of MySQL above. To understand the function of the parameter rewriteBatchedStatements=true in TiDB, see the document of TiDB best practices.\nPostgreSQL PostgreSQL JDBC driver uses version 42.3.2. It supports PostgreSQL 8.2 or newer. Activate PostgreSQL as storage, and set storage provider to postgresql.\nstorage:selector:${SW_STORAGE:postgresql}postgresql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:postgresql://localhost:5432/skywalking\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:postgres}dataSource.password:${SW_DATA_SOURCE_PASSWORD:123456}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfArrayColumn:${SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN:20}numOfSearchableValuesPerTag:${SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG:2}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password, are found in application.yml. Only part of the settings is listed here. Please follow HikariCP connection pool document for full settings.\nBanyanDB BanyanDB is a dedicated storage implementation developed by the SkyWalking Team and the community. Activate BanyanDB as the storage, and set storage provider to banyandb.\nstorage:banyandb:host:${SW_STORAGE_BANYANDB_HOST:127.0.0.1}port:${SW_STORAGE_BANYANDB_PORT:17912}maxBulkSize:${SW_STORAGE_BANYANDB_MAX_BULK_SIZE:5000}flushInterval:${SW_STORAGE_BANYANDB_FLUSH_INTERVAL:15}metricsShardsNumber:${SW_STORAGE_BANYANDB_METRICS_SHARDS_NUMBER:1}recordShardsNumber:${SW_STORAGE_BANYANDB_RECORD_SHARDS_NUMBER:1}superDatasetShardsFactor:${SW_STORAGE_BANYANDB_SUPERDATASET_SHARDS_FACTOR:2}concurrentWriteThreads:${SW_STORAGE_BANYANDB_CONCURRENT_WRITE_THREADS:15}profileTaskQueryMaxSize:${SW_STORAGE_BANYANDB_PROFILE_TASK_QUERY_MAX_SIZE:200}# the max number of fetch task in a requestFor more details, please refer to the documents of BanyanDB and BanyanDB Java Client subprojects.\nMore storage extension solutions Follow the Storage extension development guide in the Project Extensions document.\n","excerpt":"Backend storage The SkyWalking storage is pluggable. We have provided the following storage …","ref":"/docs/main/v9.2.0/en/setup/backend/backend-storage/","title":"Backend storage"},{"body":"Backend storage The SkyWalking storage is pluggable. We have provided the following storage solutions, which allow you to easily use one of them by specifying it as the selector in application.yml:\nstorage:selector:${SW_STORAGE:elasticsearch}Natively supported storage:\n H2 OpenSearch ElasticSearch 6, 7, 8 MySQL MySQL-Sharding(Shardingsphere-Proxy 5.1.2) TiDB PostgreSQL BanyanDB  H2 Activate H2 as storage, set storage provider to H2 In-Memory Databases. Default in the distribution package. Please read Database URL Overview in H2 official document. You can set the target to H2 in Embedded, Server and Mixed modes.\nSetting fragment example\nstorage:selector:${SW_STORAGE:h2}h2:driver:org.h2.jdbcx.JdbcDataSourceurl:jdbc:h2:mem:skywalking-oap-dbuser:samaxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:100}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:1}OpenSearch OpenSearch is a fork from ElasticSearch 7.11 but licensed in Apache 2.0. OpenSearch storage shares the same configurations as ElasticSearch. In order to activate OpenSearch as storage, set the storage provider to elasticsearch.\nWe support and tested the following versions of OpenSearch:\n 1.1.0, 1.3.6 2.4.0  ElasticSearch NOTE: Elastic announced through their blog that Elasticsearch will be moving over to a Server Side Public License (SSPL), which is incompatible with Apache License 2.0. This license change is effective from Elasticsearch version 7.11. So please choose the suitable ElasticSearch version according to your usage. If you have concerns about SSPL, choose the versions before 7.11 or switch to OpenSearch.\nBy default, SkyWalking uses following indices for various telemetry data.\n sw_ui_template (UI dashboard settings) sw_metrics-all-${day-format} (All metrics/meters generated through MAL and OAL engines, and metadata of service/instance/endpoint) sw_log-${day-format} (Collected logs, exclude browser logs) sw_segment-${day-format} (Native trace segments) sw_browser_error_log-${day-format} (Collected browser logs) sw_zipkin_span-${day-format} (Zipkin trace spans) sw_records-all-${day-format} (All sampled records, e.g. slow SQLs, agent profiling, and ebpf profiling)  SkyWalking rebuilds the ElasticSearch client on top of ElasticSearch REST API and automatically picks up correct request formats according to the server-side version, hence you don\u0026rsquo;t need to download different binaries and don\u0026rsquo;t need to configure different storage selectors for different ElasticSearch server-side versions anymore.\nFor now, SkyWalking supports ElasticSearch 6.x, ElasticSearch 7.x, ElasticSearch 8.x, and OpenSearch 1.x, their configurations are as follows:\nstorage:selector:${SW_STORAGE:elasticsearch}elasticsearch:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}clusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:9200}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;http\u0026#34;}trustStorePath:${SW_STORAGE_ES_SSL_JKS_PATH:\u0026#34;\u0026#34;}trustStorePass:${SW_STORAGE_ES_SSL_JKS_PASS:\u0026#34;\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}password:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}secretsManagementFile:${SW_ES_SECRETS_MANAGEMENT_FILE:\u0026#34;\u0026#34;}# Secrets management file in the properties format includes the username, password, which are managed by 3rd party tool.dayStep:${SW_STORAGE_DAY_STEP:1}# Represent the number of days in the one minute/hour/day index.indexShardsNumber:${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:1}# Shard number of new indexesindexReplicasNumber:${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:1}# Replicas number of new indexes# Specify the settings for each index individually.# If configured, this setting has the highest priority and overrides the generic settings.specificIndexSettings:${SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS:\u0026#34;\u0026#34;}# Super data set has been defined in the codes, such as trace segments.The following 3 config would be improve es performance when storage super size data in es.superDatasetDayStep:${SW_STORAGE_ES_SUPER_DATASET_DAY_STEP:-1}# Represent the number of days in the super size dataset record index, the default value is the same as dayStep when the value is less than 0superDatasetIndexShardsFactor:${SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR:5}# This factor provides more shards for the super data set, shards number = indexShardsNumber * superDatasetIndexShardsFactor. Also, this factor effects Zipkin traces.superDatasetIndexReplicasNumber:${SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER:0}# Represent the replicas number in the super size dataset record index, the default value is 0.indexTemplateOrder:${SW_STORAGE_ES_INDEX_TEMPLATE_ORDER:0}# the order of index templatebulkActions:${SW_STORAGE_ES_BULK_ACTIONS:1000}# Execute the async bulk record data every ${SW_STORAGE_ES_BULK_ACTIONS} requestsflushInterval:${SW_STORAGE_ES_FLUSH_INTERVAL:10}# flush the bulk every 10 seconds whatever the number of requestsconcurrentRequests:${SW_STORAGE_ES_CONCURRENT_REQUESTS:2}# the number of concurrent requestsresultWindowMaxSize:${SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE:10000}metadataQueryMaxSize:${SW_STORAGE_ES_QUERY_MAX_SIZE:5000}segmentQueryMaxSize:${SW_STORAGE_ES_QUERY_SEGMENT_SIZE:200}profileTaskQueryMaxSize:${SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE:200}profileDataQueryScrollBatchSize:${SW_STORAGE_ES_QUERY_PROFILE_DATA_SCROLLING_BATCH_SIZE:100}oapAnalyzer:${SW_STORAGE_ES_OAP_ANALYZER:\u0026#34;{\\\u0026#34;analyzer\\\u0026#34;:{\\\u0026#34;oap_analyzer\\\u0026#34;:{\\\u0026#34;type\\\u0026#34;:\\\u0026#34;stop\\\u0026#34;}}}\u0026#34;}# the oap analyzer.oapLogAnalyzer:${SW_STORAGE_ES_OAP_LOG_ANALYZER:\u0026#34;{\\\u0026#34;analyzer\\\u0026#34;:{\\\u0026#34;oap_log_analyzer\\\u0026#34;:{\\\u0026#34;type\\\u0026#34;:\\\u0026#34;standard\\\u0026#34;}}}\u0026#34;}# the oap log analyzer. It could be customized by the ES analyzer configuration to support more language log formats, such as Chinese log, Japanese log and etc.advanced:${SW_STORAGE_ES_ADVANCED:\u0026#34;\u0026#34;}# Set it to `true` could shard metrics indices into multi-physical indices# as same as the versions(one index template per metric/meter aggregation function) before 9.2.0.logicSharding:${SW_STORAGE_ES_LOGIC_SHARDING:false}ElasticSearch With Https SSL Encrypting communications. Example:\nstorage:selector:${SW_STORAGE:elasticsearch}elasticsearch:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}# User needs to be set when Http Basic authentication is enabledpassword:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}# Password to be set when Http Basic authentication is enabledclusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:443}trustStorePath:${SW_SW_STORAGE_ES_SSL_JKS_PATH:\u0026#34;../es_keystore.jks\u0026#34;}trustStorePass:${SW_SW_STORAGE_ES_SSL_JKS_PASS:\u0026#34;\u0026#34;}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;https\u0026#34;}... File at trustStorePath is being monitored. Once it is changed, the ElasticSearch client will reconnect. trustStorePass could be changed in the runtime through Secrets Management File Of ElasticSearch Authentication.  Daily Index Step Daily index step(storage/elasticsearch/dayStep, default 1) represents the index creation period. In this period, metrics for several days (dayStep value) are saved.\nIn most cases, users don\u0026rsquo;t need to change the value manually, as SkyWalking is designed to observe large-scale distributed systems. But in some cases, users may want to set a long TTL value, such as more than 60 days. However, their ElasticSearch cluster may not be powerful enough due to low traffic in the production environment. This value could be increased to 5 (or more) if users could ensure a single index could support the metrics and traces for these days (5 in this case).\nFor example, if dayStep == 11,\n Data in [2000-01-01, 2000-01-11] will be merged into the index-20000101. Data in [2000-01-12, 2000-01-22] will be merged into the index-20000112.  storage/elasticsearch/superDatasetDayStep overrides the storage/elasticsearch/dayStep if the value is positive. This would affect the record-related entities, such as trace segments. In some cases, the size of metrics is much smaller than the record (trace). This would improve the shards balance in the ElasticSearch cluster.\nNOTE: TTL deletion would be affected by these steps. You should set an extra dayStep in your TTL. For example, if you want to have TTL == 30 days and dayStep == 10, you are recommended to set TTL = 40.\nSecrets Management File Of ElasticSearch Authentication The value of secretsManagementFile should point to the secrets management file absolute path. The file includes the username, password, and JKS password of the ElasticSearch server in the properties format.\nuser=xxx password=yyy trustStorePass=zzz The major difference between using user, password, trustStorePass configs in the application.yaml file is that the Secrets Management File is being watched by the OAP server. Once it is changed manually or through a 3rd party tool, such as Vault, the storage provider will use the new username, password, and JKS password to establish the connection and close the old one. If the information exists in the file, the user/password will be overridden.\nIndex Settings The following settings control the number of shards and replicas for new and existing index templates. The update only got applied after OAP reboots.\nstorage:elasticsearch:# ......indexShardsNumber:${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:1}indexReplicasNumber:${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:1}specificIndexSettings:${SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS:\u0026#34;\u0026#34;}superDatasetIndexShardsFactor:${SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR:5}superDatasetIndexReplicasNumber:${SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER:0}The following table shows the relationship between those config items and Elasticsearch index number_of_shards/number_of_replicas. And also you can specify the settings for each index individually.\n   index number_of_shards number_of_replicas     sw_ui_template indexShardsNumber indexReplicasNumber   sw_metrics-all-${day-format} indexShardsNumber indexReplicasNumber   sw_log-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_segment-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_browser_error_log-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_zipkin_span-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_records-all-${day-format} indexShardsNumber indexReplicasNumber    Advanced Configurations For Elasticsearch Index You can add advanced configurations in JSON format to set ElasticSearch index settings by following ElasticSearch doc\nFor example, set translog settings:\nstorage:elasticsearch:# ......advanced:${SW_STORAGE_ES_ADVANCED:\u0026#34;{\\\u0026#34;index.translog.durability\\\u0026#34;:\\\u0026#34;request\\\u0026#34;,\\\u0026#34;index.translog.sync_interval\\\u0026#34;:\\\u0026#34;5s\\\u0026#34;}\u0026#34;}Specify Settings For Each Elasticsearch Index Individually You can specify the settings for one or more indexes individually by using SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS.\nNOTE: Supported settings:\n number_of_shards number_of_replicas  NOTE: These settings have the highest priority and will override the existing generic settings mentioned in index settings doc.\nThe settings are in JSON format. The index name here is logic entity name, which should exclude the ${SW_NAMESPACE} which is sw by default, e.g.\n{ \u0026#34;metrics-all\u0026#34;:{ \u0026#34;number_of_shards\u0026#34;:\u0026#34;3\u0026#34;, \u0026#34;number_of_replicas\u0026#34;:\u0026#34;2\u0026#34; }, \u0026#34;segment\u0026#34;:{ \u0026#34;number_of_shards\u0026#34;:\u0026#34;6\u0026#34;, \u0026#34;number_of_replicas\u0026#34;:\u0026#34;1\u0026#34; } } This configuration in the YAML file is like this,\nstorage:elasticsearch:# ......specificIndexSettings:${SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS:\u0026#34;{\\\u0026#34;metrics-all\\\u0026#34;:{\\\u0026#34;number_of_shards\\\u0026#34;:\\\u0026#34;3\\\u0026#34;,\\\u0026#34;number_of_replicas\\\u0026#34;:\\\u0026#34;2\\\u0026#34;},\\\u0026#34;segment\\\u0026#34;:{\\\u0026#34;number_of_shards\\\u0026#34;:\\\u0026#34;6\\\u0026#34;,\\\u0026#34;number_of_replicas\\\u0026#34;:\\\u0026#34;1\\\u0026#34;}}\u0026#34;}Recommended ElasticSearch server-side configurations You could add the following configuration to elasticsearch.yml, and set the value based on your environment.\n# In tracing scenario, consider to set more than this at least.thread_pool.index.queue_size:1000# Only suitable for ElasticSearch 6thread_pool.write.queue_size:1000# Suitable for ElasticSearch 6 and 7# When you face a query error on the traces page, remember to check this.index.max_result_window:1000000We strongly recommend that you read more about these configurations from ElasticSearch\u0026rsquo;s official documentation since they directly impact the performance of ElasticSearch.\nAbout Namespace When a namespace is set, all index names in ElasticSearch will use it as the prefix.\nMySQL Activate MySQL as storage, and set storage provider to mysql.\nNOTE: MySQL driver is NOT allowed in Apache official distribution and source codes. Please download the MySQL driver on your own. Copy the connection driver jar to oap-libs.\nstorage:selector:${SW_STORAGE:mysql}mysql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:3306/swtest?rewriteBatchedStatements=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root@1234}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password, are found in application.yml. Only part of the settings is listed here. See the HikariCP connection pool document for full settings. To understand the function of the parameter rewriteBatchedStatements=true in MySQL, see the MySQL official document.\nMySQL-Sharding MySQL-Sharding plugin provides the MySQL database sharding and table sharding, this feature leverage Shardingsphere-Proxy to manage the JDBC between OAP and multi-database instances, and according to the sharding rules do routing to the database and table sharding.\nTested Shardingsphere-Proxy 5.1.2 version, and MySQL Client driver 8.0.13 version is currently available. Activate MySQL and Shardingsphere-Proxy as storage, and set storage provider to mysql-sharding.\nNOTE: MySQL driver is NOT allowed in Apache official distribution and source codes. Please download the MySQL driver on your own. Copy the connection driver jar to oap-libs.\nstorage:selector:${SW_STORAGE:mysql-sharding}mysql-sharding:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:13307/swtest?rewriteBatchedStatements=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}# The dataSources are configured in ShardingSphere-Proxy config-sharding.yaml# The dataSource name should include the prefix \u0026#34;ds_\u0026#34; and separated by \u0026#34;,\u0026#34;dataSources:${SW_JDBC_SHARDING_DATA_SOURCES:ds_0,ds_1}TiDB Tested TiDB Server 4.0.8 version, and MySQL Client driver 8.0.13 version is currently available. Activate TiDB as storage, and set storage provider to tidb.\nstorage:selector:${SW_STORAGE:tidb}tidb:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:4000/swtest?rewriteBatchedStatements=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:\u0026#34;\u0026#34;}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}dataSource.useAffectedRows:${SW_DATA_SOURCE_USE_AFFECTED_ROWS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfArrayColumn:${SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN:20}numOfSearchableValuesPerTag:${SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG:2}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password are found in application.yml. For details on settings, refer to the configuration of MySQL above. To understand the function of the parameter rewriteBatchedStatements=true in TiDB, see the document of TiDB best practices.\nPostgreSQL PostgreSQL JDBC driver uses version 42.3.2. It supports PostgreSQL 8.2 or newer. Activate PostgreSQL as storage, and set storage provider to postgresql.\nstorage:selector:${SW_STORAGE:postgresql}postgresql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:postgresql://localhost:5432/skywalking\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:postgres}dataSource.password:${SW_DATA_SOURCE_PASSWORD:123456}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfArrayColumn:${SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN:20}numOfSearchableValuesPerTag:${SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG:2}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password, are found in application.yml. Only part of the settings is listed here. Please follow HikariCP connection pool document for full settings.\nBanyanDB BanyanDB is a dedicated storage implementation developed by the SkyWalking Team and the community. Activate BanyanDB as the storage, and set storage provider to banyandb.\nstorage:banyandb:host:${SW_STORAGE_BANYANDB_HOST:127.0.0.1}port:${SW_STORAGE_BANYANDB_PORT:17912}maxBulkSize:${SW_STORAGE_BANYANDB_MAX_BULK_SIZE:5000}flushInterval:${SW_STORAGE_BANYANDB_FLUSH_INTERVAL:15}metricsShardsNumber:${SW_STORAGE_BANYANDB_METRICS_SHARDS_NUMBER:1}recordShardsNumber:${SW_STORAGE_BANYANDB_RECORD_SHARDS_NUMBER:1}superDatasetShardsFactor:${SW_STORAGE_BANYANDB_SUPERDATASET_SHARDS_FACTOR:2}concurrentWriteThreads:${SW_STORAGE_BANYANDB_CONCURRENT_WRITE_THREADS:15}profileTaskQueryMaxSize:${SW_STORAGE_BANYANDB_PROFILE_TASK_QUERY_MAX_SIZE:200}# the max number of fetch task in a requeststreamBlockInterval:${SW_STORAGE_BANYANDB_STREAM_BLOCK_INTERVAL:4}# Unit is hourstreamSegmentInterval:${SW_STORAGE_BANYANDB_STREAM_SEGMENT_INTERVAL:24}# Unit is hourmeasureBlockInterval:${SW_STORAGE_BANYANDB_MEASURE_BLOCK_INTERVAL:4}# Unit is hourmeasureSegmentInterval:${SW_STORAGE_BANYANDB_MEASURE_SEGMENT_INTERVAL:24}# Unit is hourFor more details, please refer to the documents of BanyanDB and BanyanDB Java Client subprojects.\nMore storage extension solutions Follow the Storage extension development guide in the Project Extensions document.\n","excerpt":"Backend storage The SkyWalking storage is pluggable. We have provided the following storage …","ref":"/docs/main/v9.3.0/en/setup/backend/backend-storage/","title":"Backend storage"},{"body":"Backend storage The SkyWalking storage is pluggable. We have provided the following storage solutions, which allow you to easily use one of them by specifying it as the selector in application.yml:\nstorage:selector:${SW_STORAGE:elasticsearch}Natively supported storage:\n H2 OpenSearch ElasticSearch 6, 7, 8 MySQL MySQL-Sharding(Shardingsphere-Proxy 5.3.1) TiDB PostgreSQL BanyanDB  H2 Activate H2 as storage, set storage provider to H2 In-Memory Databases. Default in the distribution package. Please read Database URL Overview in H2 official document. You can set the target to H2 in Embedded, Server and Mixed modes.\nSetting fragment example\nstorage:selector:${SW_STORAGE:h2}h2:driver:org.h2.jdbcx.JdbcDataSourceurl:jdbc:h2:mem:skywalking-oap-dbuser:samaxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:100}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:1}OpenSearch OpenSearch is a fork from ElasticSearch 7.11 but licensed in Apache 2.0. OpenSearch storage shares the same configurations as ElasticSearch. In order to activate OpenSearch as storage, set the storage provider to elasticsearch.\nWe support and tested the following versions of OpenSearch:\n 1.1.0, 1.3.6 2.4.0  ElasticSearch NOTE: Elastic announced through their blog that Elasticsearch will be moving over to a Server Side Public License (SSPL), which is incompatible with Apache License 2.0. This license change is effective from Elasticsearch version 7.11. So please choose the suitable ElasticSearch version according to your usage. If you have concerns about SSPL, choose the versions before 7.11 or switch to OpenSearch.\nBy default, SkyWalking uses following indices for various telemetry data.\n sw_ui_template (UI dashboard settings) sw_metrics-all-${day-format} (All metrics/meters generated through MAL and OAL engines, and metadata of service/instance/endpoint) sw_log-${day-format} (Collected logs, exclude browser logs) sw_segment-${day-format} (Native trace segments) sw_browser_error_log-${day-format} (Collected browser logs) sw_zipkin_span-${day-format} (Zipkin trace spans) sw_records-all-${day-format} (All sampled records, e.g. slow SQLs, agent profiling, and ebpf profiling)  SkyWalking rebuilds the ElasticSearch client on top of ElasticSearch REST API and automatically picks up correct request formats according to the server-side version, hence you don\u0026rsquo;t need to download different binaries and don\u0026rsquo;t need to configure different storage selectors for different ElasticSearch server-side versions anymore.\nFor now, SkyWalking supports ElasticSearch 6.x, ElasticSearch 7.x, ElasticSearch 8.x, and OpenSearch 1.x, their configurations are as follows:\nstorage:selector:${SW_STORAGE:elasticsearch}elasticsearch:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}clusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:9200}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;http\u0026#34;}trustStorePath:${SW_STORAGE_ES_SSL_JKS_PATH:\u0026#34;\u0026#34;}trustStorePass:${SW_STORAGE_ES_SSL_JKS_PASS:\u0026#34;\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}password:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}secretsManagementFile:${SW_ES_SECRETS_MANAGEMENT_FILE:\u0026#34;\u0026#34;}# Secrets management file in the properties format includes the username, password, which are managed by 3rd party tool.dayStep:${SW_STORAGE_DAY_STEP:1}# Represent the number of days in the one minute/hour/day index.indexShardsNumber:${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:1}# Shard number of new indexesindexReplicasNumber:${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:1}# Replicas number of new indexes# Specify the settings for each index individually.# If configured, this setting has the highest priority and overrides the generic settings.specificIndexSettings:${SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS:\u0026#34;\u0026#34;}# Super data set has been defined in the codes, such as trace segments.The following 3 config would be improve es performance when storage super size data in es.superDatasetDayStep:${SW_STORAGE_ES_SUPER_DATASET_DAY_STEP:-1}# Represent the number of days in the super size dataset record index, the default value is the same as dayStep when the value is less than 0superDatasetIndexShardsFactor:${SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR:5}# This factor provides more shards for the super data set, shards number = indexShardsNumber * superDatasetIndexShardsFactor. Also, this factor effects Zipkin traces.superDatasetIndexReplicasNumber:${SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER:0}# Represent the replicas number in the super size dataset record index, the default value is 0.indexTemplateOrder:${SW_STORAGE_ES_INDEX_TEMPLATE_ORDER:0}# the order of index templatebulkActions:${SW_STORAGE_ES_BULK_ACTIONS:1000}# Execute the async bulk record data every ${SW_STORAGE_ES_BULK_ACTIONS} requestsflushInterval:${SW_STORAGE_ES_FLUSH_INTERVAL:10}# flush the bulk every 10 seconds whatever the number of requestsconcurrentRequests:${SW_STORAGE_ES_CONCURRENT_REQUESTS:2}# the number of concurrent requestsresultWindowMaxSize:${SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE:10000}metadataQueryMaxSize:${SW_STORAGE_ES_QUERY_MAX_SIZE:5000}segmentQueryMaxSize:${SW_STORAGE_ES_QUERY_SEGMENT_SIZE:200}profileTaskQueryMaxSize:${SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE:200}profileDataQueryScrollBatchSize:${SW_STORAGE_ES_QUERY_PROFILE_DATA_SCROLLING_BATCH_SIZE:100}oapAnalyzer:${SW_STORAGE_ES_OAP_ANALYZER:\u0026#34;{\\\u0026#34;analyzer\\\u0026#34;:{\\\u0026#34;oap_analyzer\\\u0026#34;:{\\\u0026#34;type\\\u0026#34;:\\\u0026#34;stop\\\u0026#34;}}}\u0026#34;}# the oap analyzer.oapLogAnalyzer:${SW_STORAGE_ES_OAP_LOG_ANALYZER:\u0026#34;{\\\u0026#34;analyzer\\\u0026#34;:{\\\u0026#34;oap_log_analyzer\\\u0026#34;:{\\\u0026#34;type\\\u0026#34;:\\\u0026#34;standard\\\u0026#34;}}}\u0026#34;}# the oap log analyzer. It could be customized by the ES analyzer configuration to support more language log formats, such as Chinese log, Japanese log and etc.advanced:${SW_STORAGE_ES_ADVANCED:\u0026#34;\u0026#34;}# Set it to `true` could shard metrics indices into multi-physical indices# as same as the versions(one index template per metric/meter aggregation function) before 9.2.0.logicSharding:${SW_STORAGE_ES_LOGIC_SHARDING:false}# Custom routing can reduce the impact of searches. Instead of having to fan out a search request to all the shards in an index, the request can be sent to just the shard that matches the specific routing value (or values).enableCustomRouting:${SW_STORAGE_ES_ENABLE_CUSTOM_ROUTING:false}ElasticSearch With Https SSL Encrypting communications. Example:\nstorage:selector:${SW_STORAGE:elasticsearch}elasticsearch:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}# User needs to be set when Http Basic authentication is enabledpassword:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}# Password to be set when Http Basic authentication is enabledclusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:443}trustStorePath:${SW_SW_STORAGE_ES_SSL_JKS_PATH:\u0026#34;../es_keystore.jks\u0026#34;}trustStorePass:${SW_SW_STORAGE_ES_SSL_JKS_PASS:\u0026#34;\u0026#34;}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;https\u0026#34;}... File at trustStorePath is being monitored. Once it is changed, the ElasticSearch client will reconnect. trustStorePass could be changed in the runtime through Secrets Management File Of ElasticSearch Authentication.  Daily Index Step Daily index step(storage/elasticsearch/dayStep, default 1) represents the index creation period. In this period, metrics for several days (dayStep value) are saved.\nIn most cases, users don\u0026rsquo;t need to change the value manually, as SkyWalking is designed to observe large-scale distributed systems. But in some cases, users may want to set a long TTL value, such as more than 60 days. However, their ElasticSearch cluster may not be powerful enough due to low traffic in the production environment. This value could be increased to 5 (or more) if users could ensure a single index could support the metrics and traces for these days (5 in this case).\nFor example, if dayStep == 11,\n Data in [2000-01-01, 2000-01-11] will be merged into the index-20000101. Data in [2000-01-12, 2000-01-22] will be merged into the index-20000112.  storage/elasticsearch/superDatasetDayStep overrides the storage/elasticsearch/dayStep if the value is positive. This would affect the record-related entities, such as trace segments. In some cases, the size of metrics is much smaller than the record (trace). This would improve the shards balance in the ElasticSearch cluster.\nNOTE: TTL deletion would be affected by these steps. You should set an extra dayStep in your TTL. For example, if you want to have TTL == 30 days and dayStep == 10, you are recommended to set TTL = 40.\nSecrets Management File Of ElasticSearch Authentication The value of secretsManagementFile should point to the secrets management file absolute path. The file includes the username, password, and JKS password of the ElasticSearch server in the properties format.\nuser=xxx password=yyy trustStorePass=zzz The major difference between using user, password, trustStorePass configs in the application.yaml file is that the Secrets Management File is being watched by the OAP server. Once it is changed manually or through a 3rd party tool, such as Vault, the storage provider will use the new username, password, and JKS password to establish the connection and close the old one. If the information exists in the file, the user/password will be overridden.\nIndex Settings The following settings control the number of shards and replicas for new and existing index templates. The update only got applied after OAP reboots.\nstorage:elasticsearch:# ......indexShardsNumber:${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:1}indexReplicasNumber:${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:1}specificIndexSettings:${SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS:\u0026#34;\u0026#34;}superDatasetIndexShardsFactor:${SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR:5}superDatasetIndexReplicasNumber:${SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER:0}The following table shows the relationship between those config items and Elasticsearch index number_of_shards/number_of_replicas. And also you can specify the settings for each index individually.\n   index number_of_shards number_of_replicas     sw_ui_template indexShardsNumber indexReplicasNumber   sw_metrics-all-${day-format} indexShardsNumber indexReplicasNumber   sw_log-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_segment-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_browser_error_log-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_zipkin_span-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_records-all-${day-format} indexShardsNumber indexReplicasNumber    Advanced Configurations For Elasticsearch Index You can add advanced configurations in JSON format to set ElasticSearch index settings by following ElasticSearch doc\nFor example, set translog settings:\nstorage:elasticsearch:# ......advanced:${SW_STORAGE_ES_ADVANCED:\u0026#34;{\\\u0026#34;index.translog.durability\\\u0026#34;:\\\u0026#34;request\\\u0026#34;,\\\u0026#34;index.translog.sync_interval\\\u0026#34;:\\\u0026#34;5s\\\u0026#34;}\u0026#34;}Specify Settings For Each Elasticsearch Index Individually You can specify the settings for one or more indexes individually by using SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS.\nNOTE: Supported settings:\n number_of_shards number_of_replicas  NOTE: These settings have the highest priority and will override the existing generic settings mentioned in index settings doc.\nThe settings are in JSON format. The index name here is logic entity name, which should exclude the ${SW_NAMESPACE} which is sw by default, e.g.\n{ \u0026#34;metrics-all\u0026#34;:{ \u0026#34;number_of_shards\u0026#34;:\u0026#34;3\u0026#34;, \u0026#34;number_of_replicas\u0026#34;:\u0026#34;2\u0026#34; }, \u0026#34;segment\u0026#34;:{ \u0026#34;number_of_shards\u0026#34;:\u0026#34;6\u0026#34;, \u0026#34;number_of_replicas\u0026#34;:\u0026#34;1\u0026#34; } } This configuration in the YAML file is like this,\nstorage:elasticsearch:# ......specificIndexSettings:${SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS:\u0026#34;{\\\u0026#34;metrics-all\\\u0026#34;:{\\\u0026#34;number_of_shards\\\u0026#34;:\\\u0026#34;3\\\u0026#34;,\\\u0026#34;number_of_replicas\\\u0026#34;:\\\u0026#34;2\\\u0026#34;},\\\u0026#34;segment\\\u0026#34;:{\\\u0026#34;number_of_shards\\\u0026#34;:\\\u0026#34;6\\\u0026#34;,\\\u0026#34;number_of_replicas\\\u0026#34;:\\\u0026#34;1\\\u0026#34;}}\u0026#34;}Recommended ElasticSearch server-side configurations You could add the following configuration to elasticsearch.yml, and set the value based on your environment.\n# In tracing scenario, consider to set more than this at least.thread_pool.index.queue_size:1000# Only suitable for ElasticSearch 6thread_pool.write.queue_size:1000# Suitable for ElasticSearch 6 and 7# When you face a query error on the traces page, remember to check this.index.max_result_window:1000000We strongly recommend that you read more about these configurations from ElasticSearch\u0026rsquo;s official documentation since they directly impact the performance of ElasticSearch.\nAbout Namespace When a namespace is set, all index names in ElasticSearch will use it as the prefix.\nMySQL Activate MySQL as storage, and set storage provider to mysql.\nNOTE: MySQL driver is NOT allowed in Apache official distribution and source codes. Please download the MySQL driver on your own. Copy the connection driver jar to oap-libs.\nstorage:selector:${SW_STORAGE:mysql}mysql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:3306/swtest?rewriteBatchedStatements=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root@1234}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password, are found in application.yml. Only part of the settings is listed here. See the HikariCP connection pool document for full settings. To understand the function of the parameter rewriteBatchedStatements=true in MySQL, see the MySQL official document.\nMySQL-Sharding MySQL-Sharding plugin provides the MySQL database sharding and table sharding, this feature leverage Shardingsphere-Proxy to manage the JDBC between OAP and multi-database instances, and according to the sharding rules do routing to the database and table sharding.\nTested Shardingsphere-Proxy 5.3.1 version, and MySQL Client driver 8.0.13 version is currently available. Activate MySQL and Shardingsphere-Proxy as storage, and set storage provider to mysql-sharding.\nNOTE: MySQL driver is NOT allowed in Apache official distribution and source codes. Please download the MySQL driver on your own. Copy the connection driver jar to oap-libs.\nstorage:selector:${SW_STORAGE:mysql-sharding}mysql-sharding:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:13307/swtest?rewriteBatchedStatements=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}# The dataSources are configured in ShardingSphere-Proxy config-sharding.yaml# The dataSource name should include the prefix \u0026#34;ds_\u0026#34; and separated by \u0026#34;,\u0026#34;dataSources:${SW_JDBC_SHARDING_DATA_SOURCES:ds_0,ds_1}TiDB Tested TiDB Server 4.0.8 version, and MySQL Client driver 8.0.13 version is currently available. Activate TiDB as storage, and set storage provider to tidb.\nstorage:selector:${SW_STORAGE:tidb}tidb:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:4000/swtest?rewriteBatchedStatements=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:\u0026#34;\u0026#34;}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}dataSource.useAffectedRows:${SW_DATA_SOURCE_USE_AFFECTED_ROWS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfArrayColumn:${SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN:20}numOfSearchableValuesPerTag:${SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG:2}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password are found in application.yml. For details on settings, refer to the configuration of MySQL above. To understand the function of the parameter rewriteBatchedStatements=true in TiDB, see the document of TiDB best practices.\nPostgreSQL PostgreSQL JDBC driver uses version 42.3.2. It supports PostgreSQL 8.2 or newer. Activate PostgreSQL as storage, and set storage provider to postgresql.\nstorage:selector:${SW_STORAGE:postgresql}postgresql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:postgresql://localhost:5432/skywalking\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:postgres}dataSource.password:${SW_DATA_SOURCE_PASSWORD:123456}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfArrayColumn:${SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN:20}numOfSearchableValuesPerTag:${SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG:2}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password, are found in application.yml. Only part of the settings is listed here. Please follow HikariCP connection pool document for full settings.\nBanyanDB BanyanDB is a dedicated storage implementation developed by the SkyWalking Team and the community. Activate BanyanDB as the storage, and set storage provider to banyandb.\nstorage:banyandb:host:${SW_STORAGE_BANYANDB_HOST:127.0.0.1}port:${SW_STORAGE_BANYANDB_PORT:17912}maxBulkSize:${SW_STORAGE_BANYANDB_MAX_BULK_SIZE:5000}flushInterval:${SW_STORAGE_BANYANDB_FLUSH_INTERVAL:15}metricsShardsNumber:${SW_STORAGE_BANYANDB_METRICS_SHARDS_NUMBER:1}recordShardsNumber:${SW_STORAGE_BANYANDB_RECORD_SHARDS_NUMBER:1}superDatasetShardsFactor:${SW_STORAGE_BANYANDB_SUPERDATASET_SHARDS_FACTOR:2}concurrentWriteThreads:${SW_STORAGE_BANYANDB_CONCURRENT_WRITE_THREADS:15}profileTaskQueryMaxSize:${SW_STORAGE_BANYANDB_PROFILE_TASK_QUERY_MAX_SIZE:200}# the max number of fetch task in a requeststreamBlockInterval:${SW_STORAGE_BANYANDB_STREAM_BLOCK_INTERVAL:4}# Unit is hourstreamSegmentInterval:${SW_STORAGE_BANYANDB_STREAM_SEGMENT_INTERVAL:24}# Unit is hourmeasureBlockInterval:${SW_STORAGE_BANYANDB_MEASURE_BLOCK_INTERVAL:4}# Unit is hourmeasureSegmentInterval:${SW_STORAGE_BANYANDB_MEASURE_SEGMENT_INTERVAL:24}# Unit is hourFor more details, please refer to the documents of BanyanDB and BanyanDB Java Client subprojects.\nMore storage extension solutions Follow the Storage extension development guide in the Project Extensions document.\n","excerpt":"Backend storage The SkyWalking storage is pluggable. We have provided the following storage …","ref":"/docs/main/v9.4.0/en/setup/backend/backend-storage/","title":"Backend storage"},{"body":"Backend storage The SkyWalking storage is pluggable. We have provided the following storage solutions, which allow you to easily use one of them by specifying it as the selector in application.yml:\nstorage:selector:${SW_STORAGE:elasticsearch}Natively supported storage:\n H2 OpenSearch ElasticSearch 6, 7, 8 MySQL PostgreSQL BanyanDB  H2 Activate H2 as storage, set storage provider to H2 In-Memory Databases. Default in the distribution package. Please read Database URL Overview in H2 official document. You can set the target to H2 in Embedded, Server and Mixed modes.\nSetting fragment example\nstorage:selector:${SW_STORAGE:h2}h2:driver:org.h2.jdbcx.JdbcDataSourceurl:jdbc:h2:mem:skywalking-oap-dbuser:samaxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:100}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:1}OpenSearch OpenSearch is a fork from ElasticSearch 7.11 but licensed in Apache 2.0. OpenSearch storage shares the same configurations as ElasticSearch. In order to activate OpenSearch as storage, set the storage provider to elasticsearch.\nWe support and tested the following versions of OpenSearch:\n 1.1.0, 1.3.6 2.4.0  ElasticSearch NOTE: Elastic announced through their blog that Elasticsearch will be moving over to a Server Side Public License (SSPL), which is incompatible with Apache License 2.0. This license change is effective from Elasticsearch version 7.11. So please choose the suitable ElasticSearch version according to your usage. If you have concerns about SSPL, choose the versions before 7.11 or switch to OpenSearch.\nBy default, SkyWalking uses following indices for various telemetry data.\n sw_ui_template (UI dashboard settings) sw_metrics-all-${day-format} (All metrics/meters generated through MAL and OAL engines, and metadata of service/instance/endpoint) sw_log-${day-format} (Collected logs, exclude browser logs) sw_segment-${day-format} (Native trace segments) sw_browser_error_log-${day-format} (Collected browser logs) sw_zipkin_span-${day-format} (Zipkin trace spans) sw_records-all-${day-format} (All sampled records, e.g. slow SQLs, agent profiling, and ebpf profiling)  SkyWalking rebuilds the ElasticSearch client on top of ElasticSearch REST API and automatically picks up correct request formats according to the server-side version, hence you don\u0026rsquo;t need to download different binaries and don\u0026rsquo;t need to configure different storage selectors for different ElasticSearch server-side versions anymore.\nFor now, SkyWalking supports ElasticSearch 6.x, ElasticSearch 7.x, ElasticSearch 8.x, and OpenSearch 1.x, their configurations are as follows:\nstorage:selector:${SW_STORAGE:elasticsearch}elasticsearch:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}clusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:9200}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;http\u0026#34;}trustStorePath:${SW_STORAGE_ES_SSL_JKS_PATH:\u0026#34;\u0026#34;}trustStorePass:${SW_STORAGE_ES_SSL_JKS_PASS:\u0026#34;\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}password:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}secretsManagementFile:${SW_ES_SECRETS_MANAGEMENT_FILE:\u0026#34;\u0026#34;}# Secrets management file in the properties format includes the username, password, which are managed by 3rd party tool.dayStep:${SW_STORAGE_DAY_STEP:1}# Represent the number of days in the one minute/hour/day index.indexShardsNumber:${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:1}# Shard number of new indexesindexReplicasNumber:${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:1}# Replicas number of new indexes# Specify the settings for each index individually.# If configured, this setting has the highest priority and overrides the generic settings.specificIndexSettings:${SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS:\u0026#34;\u0026#34;}# Super data set has been defined in the codes, such as trace segments.The following 3 config would be improve es performance when storage super size data in es.superDatasetDayStep:${SW_STORAGE_ES_SUPER_DATASET_DAY_STEP:-1}# Represent the number of days in the super size dataset record index, the default value is the same as dayStep when the value is less than 0superDatasetIndexShardsFactor:${SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR:5}# This factor provides more shards for the super data set, shards number = indexShardsNumber * superDatasetIndexShardsFactor. Also, this factor effects Zipkin traces.superDatasetIndexReplicasNumber:${SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER:0}# Represent the replicas number in the super size dataset record index, the default value is 0.indexTemplateOrder:${SW_STORAGE_ES_INDEX_TEMPLATE_ORDER:0}# the order of index templatebulkActions:${SW_STORAGE_ES_BULK_ACTIONS:1000}# Execute the async bulk record data every ${SW_STORAGE_ES_BULK_ACTIONS} requestsflushInterval:${SW_STORAGE_ES_FLUSH_INTERVAL:10}# flush the bulk every 10 seconds whatever the number of requestsconcurrentRequests:${SW_STORAGE_ES_CONCURRENT_REQUESTS:2}# the number of concurrent requestsresultWindowMaxSize:${SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE:10000}metadataQueryMaxSize:${SW_STORAGE_ES_QUERY_MAX_SIZE:5000}segmentQueryMaxSize:${SW_STORAGE_ES_QUERY_SEGMENT_SIZE:200}profileTaskQueryMaxSize:${SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE:200}profileDataQueryScrollBatchSize:${SW_STORAGE_ES_QUERY_PROFILE_DATA_SCROLLING_BATCH_SIZE:100}oapAnalyzer:${SW_STORAGE_ES_OAP_ANALYZER:\u0026#34;{\\\u0026#34;analyzer\\\u0026#34;:{\\\u0026#34;oap_analyzer\\\u0026#34;:{\\\u0026#34;type\\\u0026#34;:\\\u0026#34;stop\\\u0026#34;}}}\u0026#34;}# the oap analyzer.oapLogAnalyzer:${SW_STORAGE_ES_OAP_LOG_ANALYZER:\u0026#34;{\\\u0026#34;analyzer\\\u0026#34;:{\\\u0026#34;oap_log_analyzer\\\u0026#34;:{\\\u0026#34;type\\\u0026#34;:\\\u0026#34;standard\\\u0026#34;}}}\u0026#34;}# the oap log analyzer. It could be customized by the ES analyzer configuration to support more language log formats, such as Chinese log, Japanese log and etc.advanced:${SW_STORAGE_ES_ADVANCED:\u0026#34;\u0026#34;}# Set it to `true` could shard metrics indices into multi-physical indices# as same as the versions(one index template per metric/meter aggregation function) before 9.2.0.logicSharding:${SW_STORAGE_ES_LOGIC_SHARDING:false}# Custom routing can reduce the impact of searches. Instead of having to fan out a search request to all the shards in an index, the request can be sent to just the shard that matches the specific routing value (or values).enableCustomRouting:${SW_STORAGE_ES_ENABLE_CUSTOM_ROUTING:false}ElasticSearch With Https SSL Encrypting communications. Example:\nstorage:selector:${SW_STORAGE:elasticsearch}elasticsearch:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}# User needs to be set when Http Basic authentication is enabledpassword:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}# Password to be set when Http Basic authentication is enabledclusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:443}trustStorePath:${SW_SW_STORAGE_ES_SSL_JKS_PATH:\u0026#34;../es_keystore.jks\u0026#34;}trustStorePass:${SW_SW_STORAGE_ES_SSL_JKS_PASS:\u0026#34;\u0026#34;}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;https\u0026#34;}... File at trustStorePath is being monitored. Once it is changed, the ElasticSearch client will reconnect. trustStorePass could be changed in the runtime through Secrets Management File Of ElasticSearch Authentication.  Daily Index Step Daily index step(storage/elasticsearch/dayStep, default 1) represents the index creation period. In this period, metrics for several days (dayStep value) are saved.\nIn most cases, users don\u0026rsquo;t need to change the value manually, as SkyWalking is designed to observe large-scale distributed systems. But in some cases, users may want to set a long TTL value, such as more than 60 days. However, their ElasticSearch cluster may not be powerful enough due to low traffic in the production environment. This value could be increased to 5 (or more) if users could ensure a single index could support the metrics and traces for these days (5 in this case).\nFor example, if dayStep == 11,\n Data in [2000-01-01, 2000-01-11] will be merged into the index-20000101. Data in [2000-01-12, 2000-01-22] will be merged into the index-20000112.  storage/elasticsearch/superDatasetDayStep overrides the storage/elasticsearch/dayStep if the value is positive. This would affect the record-related entities, such as trace segments. In some cases, the size of metrics is much smaller than the record (trace). This would improve the shards balance in the ElasticSearch cluster.\nNOTE: TTL deletion would be affected by these steps. You should set an extra dayStep in your TTL. For example, if you want to have TTL == 30 days and dayStep == 10, you are recommended to set TTL = 40.\nSecrets Management File Of ElasticSearch Authentication The value of secretsManagementFile should point to the secrets management file absolute path. The file includes the username, password, and JKS password of the ElasticSearch server in the properties format.\nuser=xxx password=yyy trustStorePass=zzz The major difference between using user, password, trustStorePass configs in the application.yaml file is that the Secrets Management File is being watched by the OAP server. Once it is changed manually or through a 3rd party tool, such as Vault, the storage provider will use the new username, password, and JKS password to establish the connection and close the old one. If the information exists in the file, the user/password will be overridden.\nIndex Settings The following settings control the number of shards and replicas for new and existing index templates. The update only got applied after OAP reboots.\nstorage:elasticsearch:# ......indexShardsNumber:${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:1}indexReplicasNumber:${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:1}specificIndexSettings:${SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS:\u0026#34;\u0026#34;}superDatasetIndexShardsFactor:${SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR:5}superDatasetIndexReplicasNumber:${SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER:0}The following table shows the relationship between those config items and Elasticsearch index number_of_shards/number_of_replicas. And also you can specify the settings for each index individually.\n   index number_of_shards number_of_replicas     sw_ui_template indexShardsNumber indexReplicasNumber   sw_metrics-all-${day-format} indexShardsNumber indexReplicasNumber   sw_log-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_segment-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_browser_error_log-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_zipkin_span-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_records-all-${day-format} indexShardsNumber indexReplicasNumber    Advanced Configurations For Elasticsearch Index You can add advanced configurations in JSON format to set ElasticSearch index settings by following ElasticSearch doc\nFor example, set translog settings:\nstorage:elasticsearch:# ......advanced:${SW_STORAGE_ES_ADVANCED:\u0026#34;{\\\u0026#34;index.translog.durability\\\u0026#34;:\\\u0026#34;request\\\u0026#34;,\\\u0026#34;index.translog.sync_interval\\\u0026#34;:\\\u0026#34;5s\\\u0026#34;}\u0026#34;}Specify Settings For Each Elasticsearch Index Individually You can specify the settings for one or more indexes individually by using SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS.\nNOTE: Supported settings:\n number_of_shards number_of_replicas  NOTE: These settings have the highest priority and will override the existing generic settings mentioned in index settings doc.\nThe settings are in JSON format. The index name here is logic entity name, which should exclude the ${SW_NAMESPACE} which is sw by default, e.g.\n{ \u0026#34;metrics-all\u0026#34;:{ \u0026#34;number_of_shards\u0026#34;:\u0026#34;3\u0026#34;, \u0026#34;number_of_replicas\u0026#34;:\u0026#34;2\u0026#34; }, \u0026#34;segment\u0026#34;:{ \u0026#34;number_of_shards\u0026#34;:\u0026#34;6\u0026#34;, \u0026#34;number_of_replicas\u0026#34;:\u0026#34;1\u0026#34; } } This configuration in the YAML file is like this,\nstorage:elasticsearch:# ......specificIndexSettings:${SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS:\u0026#34;{\\\u0026#34;metrics-all\\\u0026#34;:{\\\u0026#34;number_of_shards\\\u0026#34;:\\\u0026#34;3\\\u0026#34;,\\\u0026#34;number_of_replicas\\\u0026#34;:\\\u0026#34;2\\\u0026#34;},\\\u0026#34;segment\\\u0026#34;:{\\\u0026#34;number_of_shards\\\u0026#34;:\\\u0026#34;6\\\u0026#34;,\\\u0026#34;number_of_replicas\\\u0026#34;:\\\u0026#34;1\\\u0026#34;}}\u0026#34;}Recommended ElasticSearch server-side configurations You could add the following configuration to elasticsearch.yml, and set the value based on your environment.\n# In tracing scenario, consider to set more than this at least.thread_pool.index.queue_size:1000# Only suitable for ElasticSearch 6thread_pool.write.queue_size:1000# Suitable for ElasticSearch 6 and 7# When you face a query error on the traces page, remember to check this.index.max_result_window:1000000We strongly recommend that you read more about these configurations from ElasticSearch\u0026rsquo;s official documentation since they directly impact the performance of ElasticSearch.\nAbout Namespace When a namespace is set, all index names in ElasticSearch will use it as the prefix.\nMySQL Activate MySQL as storage, and set storage provider to mysql.\nNOTE: MySQL driver is NOT allowed in Apache official distribution and source codes. Please download the MySQL driver on your own. Copy the connection driver jar to oap-libs.\nstorage:selector:${SW_STORAGE:mysql}mysql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:3306/swtest?rewriteBatchedStatements=true\u0026amp;allowMultiQueries=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root@1234}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password, are found in application.yml. Only part of the settings is listed here. See the HikariCP connection pool document for full settings. To understand the function of the parameter rewriteBatchedStatements=true in MySQL, see the MySQL official document.\nIn theory, all other databases that are compatible with MySQL protocol should be able to use this storage plugin, such as TiDB. Please compose the JDBC URL according to the database\u0026rsquo;s documentation.\nPostgreSQL PostgreSQL JDBC driver uses version 42.3.2. It supports PostgreSQL 8.2 or newer. Activate PostgreSQL as storage, and set storage provider to postgresql.\nstorage:selector:${SW_STORAGE:postgresql}postgresql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:postgresql://localhost:5432/skywalking\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:postgres}dataSource.password:${SW_DATA_SOURCE_PASSWORD:123456}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfArrayColumn:${SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN:20}numOfSearchableValuesPerTag:${SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG:2}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password, are found in application.yml. Only part of the settings is listed here. Please follow HikariCP connection pool document for full settings.\nBanyanDB BanyanDB is a dedicated storage implementation developed by the SkyWalking Team and the community. Activate BanyanDB as the storage, and set storage provider to banyandb.\nstorage:banyandb:host:${SW_STORAGE_BANYANDB_HOST:127.0.0.1}port:${SW_STORAGE_BANYANDB_PORT:17912}maxBulkSize:${SW_STORAGE_BANYANDB_MAX_BULK_SIZE:5000}flushInterval:${SW_STORAGE_BANYANDB_FLUSH_INTERVAL:15}metricsShardsNumber:${SW_STORAGE_BANYANDB_METRICS_SHARDS_NUMBER:1}recordShardsNumber:${SW_STORAGE_BANYANDB_RECORD_SHARDS_NUMBER:1}superDatasetShardsFactor:${SW_STORAGE_BANYANDB_SUPERDATASET_SHARDS_FACTOR:2}concurrentWriteThreads:${SW_STORAGE_BANYANDB_CONCURRENT_WRITE_THREADS:15}profileTaskQueryMaxSize:${SW_STORAGE_BANYANDB_PROFILE_TASK_QUERY_MAX_SIZE:200}# the max number of fetch task in a requeststreamBlockInterval:${SW_STORAGE_BANYANDB_STREAM_BLOCK_INTERVAL:4}# Unit is hourstreamSegmentInterval:${SW_STORAGE_BANYANDB_STREAM_SEGMENT_INTERVAL:24}# Unit is hourmeasureBlockInterval:${SW_STORAGE_BANYANDB_MEASURE_BLOCK_INTERVAL:4}# Unit is hourmeasureSegmentInterval:${SW_STORAGE_BANYANDB_MEASURE_SEGMENT_INTERVAL:24}# Unit is hourFor more details, please refer to the documents of BanyanDB and BanyanDB Java Client subprojects.\nMore storage extension solutions Follow the Storage extension development guide in the Project Extensions document.\n","excerpt":"Backend storage The SkyWalking storage is pluggable. We have provided the following storage …","ref":"/docs/main/v9.5.0/en/setup/backend/backend-storage/","title":"Backend storage"},{"body":"Backend storage The SkyWalking storage is pluggable. We have provided the following storage solutions, which allow you to easily use one of them by specifying it as the selector in application.yml:\nstorage:selector:${SW_STORAGE:elasticsearch}Natively supported storage:\n H2 OpenSearch ElasticSearch 7 and 8. MySQL PostgreSQL BanyanDB  H2 Activate H2 as storage, set storage provider to H2 In-Memory Databases. Default in the distribution package. Please read Database URL Overview in H2 official document. You can set the target to H2 in Embedded, Server and Mixed modes.\nSetting fragment example\nstorage:selector:${SW_STORAGE:h2}h2:driver:org.h2.jdbcx.JdbcDataSourceurl:jdbc:h2:mem:skywalking-oap-dbuser:samaxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:100}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:1}OpenSearch OpenSearch is a fork from ElasticSearch 7.11 but licensed in Apache 2.0. OpenSearch storage shares the same configurations as ElasticSearch. In order to activate OpenSearch as storage, set the storage provider to elasticsearch.\nWe support and tested the following versions of OpenSearch:\n 1.1.0, 1.3.10 2.4.0, 2.8.0  ElasticSearch NOTE: Elastic announced through their blog that Elasticsearch will be moving over to a Server Side Public License (SSPL) and/or Elastic License 2.0(ELv2), since Feb. 2021, which is incompatible with Apache License 2.0. Both of these licenses are not OSS licenses approved by the Open Source Initiative (OSI). This license change is effective from Elasticsearch version 7.11. So please choose the suitable ElasticSearch version according to your usage. If you have concerns about SSPL/ELv2, choose the versions before 7.11 or switch to OpenSearch.\nBy default, SkyWalking uses following indices for various telemetry data.\n sw_ui_template (UI dashboard settings) sw_metrics-all-${day-format} (All metrics/meters generated through MAL and OAL engines, and metadata of service/instance/endpoint) sw_log-${day-format} (Collected logs, exclude browser logs) sw_segment-${day-format} (Native trace segments) sw_browser_error_log-${day-format} (Collected browser logs) sw_zipkin_span-${day-format} (Zipkin trace spans) sw_records-all-${day-format} (All sampled records, e.g. slow SQLs, agent profiling, and ebpf profiling)  SkyWalking rebuilds the ElasticSearch client on top of ElasticSearch REST API and automatically picks up correct request formats according to the server-side version, hence you don\u0026rsquo;t need to download different binaries and don\u0026rsquo;t need to configure different storage selectors for different ElasticSearch server-side versions anymore.\nFor now, SkyWalking supports ElasticSearch 7.x, ElasticSearch 8.x, and OpenSearch 1.x, their configurations are as follows:\nNotice, ElasticSearch 6 worked and is not promised due to end of life officially.\nstorage:selector:${SW_STORAGE:elasticsearch}elasticsearch:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}clusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:9200}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;http\u0026#34;}trustStorePath:${SW_STORAGE_ES_SSL_JKS_PATH:\u0026#34;\u0026#34;}trustStorePass:${SW_STORAGE_ES_SSL_JKS_PASS:\u0026#34;\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}password:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}secretsManagementFile:${SW_ES_SECRETS_MANAGEMENT_FILE:\u0026#34;\u0026#34;}# Secrets management file in the properties format includes the username, password, which are managed by 3rd party tool.dayStep:${SW_STORAGE_DAY_STEP:1}# Represent the number of days in the one minute/hour/day index.indexShardsNumber:${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:1}# Shard number of new indexesindexReplicasNumber:${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:1}# Replicas number of new indexes# Specify the settings for each index individually.# If configured, this setting has the highest priority and overrides the generic settings.specificIndexSettings:${SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS:\u0026#34;\u0026#34;}# Super data set has been defined in the codes, such as trace segments.The following 3 config would be improve es performance when storage super size data in es.superDatasetDayStep:${SW_STORAGE_ES_SUPER_DATASET_DAY_STEP:-1}# Represent the number of days in the super size dataset record index, the default value is the same as dayStep when the value is less than 0superDatasetIndexShardsFactor:${SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR:5}# This factor provides more shards for the super data set, shards number = indexShardsNumber * superDatasetIndexShardsFactor. Also, this factor effects Zipkin traces.superDatasetIndexReplicasNumber:${SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER:0}# Represent the replicas number in the super size dataset record index, the default value is 0.indexTemplateOrder:${SW_STORAGE_ES_INDEX_TEMPLATE_ORDER:0}# the order of index templatebulkActions:${SW_STORAGE_ES_BULK_ACTIONS:1000}# Execute the async bulk record data every ${SW_STORAGE_ES_BULK_ACTIONS} requestsflushInterval:${SW_STORAGE_ES_FLUSH_INTERVAL:10}# flush the bulk every 10 seconds whatever the number of requestsconcurrentRequests:${SW_STORAGE_ES_CONCURRENT_REQUESTS:2}# the number of concurrent requestsresultWindowMaxSize:${SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE:10000}metadataQueryMaxSize:${SW_STORAGE_ES_QUERY_MAX_SIZE:5000}segmentQueryMaxSize:${SW_STORAGE_ES_QUERY_SEGMENT_SIZE:200}profileTaskQueryMaxSize:${SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE:200}profileDataQueryScrollBatchSize:${SW_STORAGE_ES_QUERY_PROFILE_DATA_SCROLLING_BATCH_SIZE:100}oapAnalyzer:${SW_STORAGE_ES_OAP_ANALYZER:\u0026#34;{\\\u0026#34;analyzer\\\u0026#34;:{\\\u0026#34;oap_analyzer\\\u0026#34;:{\\\u0026#34;type\\\u0026#34;:\\\u0026#34;stop\\\u0026#34;}}}\u0026#34;}# the oap analyzer.oapLogAnalyzer:${SW_STORAGE_ES_OAP_LOG_ANALYZER:\u0026#34;{\\\u0026#34;analyzer\\\u0026#34;:{\\\u0026#34;oap_log_analyzer\\\u0026#34;:{\\\u0026#34;type\\\u0026#34;:\\\u0026#34;standard\\\u0026#34;}}}\u0026#34;}# the oap log analyzer. It could be customized by the ES analyzer configuration to support more language log formats, such as Chinese log, Japanese log and etc.advanced:${SW_STORAGE_ES_ADVANCED:\u0026#34;\u0026#34;}# Set it to `true` could shard metrics indices into multi-physical indices# as same as the versions(one index template per metric/meter aggregation function) before 9.2.0.logicSharding:${SW_STORAGE_ES_LOGIC_SHARDING:false}# Custom routing can reduce the impact of searches. Instead of having to fan out a search request to all the shards in an index, the request can be sent to just the shard that matches the specific routing value (or values).enableCustomRouting:${SW_STORAGE_ES_ENABLE_CUSTOM_ROUTING:false}ElasticSearch With Https SSL Encrypting communications. Example:\nstorage:selector:${SW_STORAGE:elasticsearch}elasticsearch:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}# User needs to be set when Http Basic authentication is enabledpassword:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}# Password to be set when Http Basic authentication is enabledclusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:443}trustStorePath:${SW_SW_STORAGE_ES_SSL_JKS_PATH:\u0026#34;../es_keystore.jks\u0026#34;}trustStorePass:${SW_SW_STORAGE_ES_SSL_JKS_PASS:\u0026#34;\u0026#34;}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;https\u0026#34;}... File at trustStorePath is being monitored. Once it is changed, the ElasticSearch client will reconnect. trustStorePass could be changed in the runtime through Secrets Management File Of ElasticSearch Authentication.  Daily Index Step Daily index step(storage/elasticsearch/dayStep, default 1) represents the index creation period. In this period, metrics for several days (dayStep value) are saved.\nIn most cases, users don\u0026rsquo;t need to change the value manually, as SkyWalking is designed to observe large-scale distributed systems. But in some cases, users may want to set a long TTL value, such as more than 60 days. However, their ElasticSearch cluster may not be powerful enough due to low traffic in the production environment. This value could be increased to 5 (or more) if users could ensure a single index could support the metrics and traces for these days (5 in this case).\nFor example, if dayStep == 11,\n Data in [2000-01-01, 2000-01-11] will be merged into the index-20000101. Data in [2000-01-12, 2000-01-22] will be merged into the index-20000112.  storage/elasticsearch/superDatasetDayStep overrides the storage/elasticsearch/dayStep if the value is positive. This would affect the record-related entities, such as trace segments. In some cases, the size of metrics is much smaller than the record (trace). This would improve the shards balance in the ElasticSearch cluster.\nNOTE: TTL deletion would be affected by these steps. You should set an extra dayStep in your TTL. For example, if you want to have TTL == 30 days and dayStep == 10, you are recommended to set TTL = 40.\nSecrets Management File Of ElasticSearch Authentication The value of secretsManagementFile should point to the secrets management file absolute path. The file includes the username, password, and JKS password of the ElasticSearch server in the properties format.\nuser=xxx password=yyy trustStorePass=zzz The major difference between using user, password, trustStorePass configs in the application.yaml file is that the Secrets Management File is being watched by the OAP server. Once it is changed manually or through a 3rd party tool, such as Vault, the storage provider will use the new username, password, and JKS password to establish the connection and close the old one. If the information exists in the file, the user/password will be overridden.\nIndex Settings The following settings control the number of shards and replicas for new and existing index templates. The update only got applied after OAP reboots.\nstorage:elasticsearch:# ......indexShardsNumber:${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:1}indexReplicasNumber:${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:1}specificIndexSettings:${SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS:\u0026#34;\u0026#34;}superDatasetIndexShardsFactor:${SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR:5}superDatasetIndexReplicasNumber:${SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER:0}The following table shows the relationship between those config items and Elasticsearch index number_of_shards/number_of_replicas. And also you can specify the settings for each index individually.\n   index number_of_shards number_of_replicas     sw_ui_template indexShardsNumber indexReplicasNumber   sw_metrics-all-${day-format} indexShardsNumber indexReplicasNumber   sw_log-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_segment-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_browser_error_log-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_zipkin_span-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_records-all-${day-format} indexShardsNumber indexReplicasNumber    Advanced Configurations For Elasticsearch Index You can add advanced configurations in JSON format to set ElasticSearch index settings by following ElasticSearch doc\nFor example, set translog settings:\nstorage:elasticsearch:# ......advanced:${SW_STORAGE_ES_ADVANCED:\u0026#34;{\\\u0026#34;index.translog.durability\\\u0026#34;:\\\u0026#34;request\\\u0026#34;,\\\u0026#34;index.translog.sync_interval\\\u0026#34;:\\\u0026#34;5s\\\u0026#34;}\u0026#34;}Specify Settings For Each Elasticsearch Index Individually You can specify the settings for one or more indexes individually by using SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS.\nNOTE: Supported settings:\n number_of_shards number_of_replicas  NOTE: These settings have the highest priority and will override the existing generic settings mentioned in index settings doc.\nThe settings are in JSON format. The index name here is logic entity name, which should exclude the ${SW_NAMESPACE} which is sw by default, e.g.\n{ \u0026#34;metrics-all\u0026#34;:{ \u0026#34;number_of_shards\u0026#34;:\u0026#34;3\u0026#34;, \u0026#34;number_of_replicas\u0026#34;:\u0026#34;2\u0026#34; }, \u0026#34;segment\u0026#34;:{ \u0026#34;number_of_shards\u0026#34;:\u0026#34;6\u0026#34;, \u0026#34;number_of_replicas\u0026#34;:\u0026#34;1\u0026#34; } } This configuration in the YAML file is like this,\nstorage:elasticsearch:# ......specificIndexSettings:${SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS:\u0026#34;{\\\u0026#34;metrics-all\\\u0026#34;:{\\\u0026#34;number_of_shards\\\u0026#34;:\\\u0026#34;3\\\u0026#34;,\\\u0026#34;number_of_replicas\\\u0026#34;:\\\u0026#34;2\\\u0026#34;},\\\u0026#34;segment\\\u0026#34;:{\\\u0026#34;number_of_shards\\\u0026#34;:\\\u0026#34;6\\\u0026#34;,\\\u0026#34;number_of_replicas\\\u0026#34;:\\\u0026#34;1\\\u0026#34;}}\u0026#34;}Recommended ElasticSearch server-side configurations You could add the following configuration to elasticsearch.yml, and set the value based on your environment.\n# In tracing scenario, consider to set more than this at least.thread_pool.index.queue_size:1000# Only suitable for ElasticSearch 6thread_pool.write.queue_size:1000# Suitable for ElasticSearch 6 and 7# When you face a query error on the traces page, remember to check this.index.max_result_window:1000000We strongly recommend that you read more about these configurations from ElasticSearch\u0026rsquo;s official documentation since they directly impact the performance of ElasticSearch.\nAbout Namespace When a namespace is set, all index names in ElasticSearch will use it as the prefix.\nMySQL Activate MySQL as storage, and set storage provider to mysql.\nNOTE: MySQL driver is NOT allowed in Apache official distribution and source codes. Please download the MySQL driver on your own. Copy the connection driver jar to oap-libs.\nstorage:selector:${SW_STORAGE:mysql}mysql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:3306/swtest?rewriteBatchedStatements=true\u0026amp;allowMultiQueries=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root@1234}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password, are found in application.yml. Only part of the settings is listed here. See the HikariCP connection pool document for full settings. To understand the function of the parameter rewriteBatchedStatements=true in MySQL, see the MySQL official document.\nIn theory, all other databases that are compatible with MySQL protocol should be able to use this storage plugin, such as TiDB. Please compose the JDBC URL according to the database\u0026rsquo;s documentation.\nPostgreSQL PostgreSQL JDBC driver uses version 42.3.2. It supports PostgreSQL 8.2 or newer. Activate PostgreSQL as storage, and set storage provider to postgresql.\nstorage:selector:${SW_STORAGE:postgresql}postgresql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:postgresql://localhost:5432/skywalking\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:postgres}dataSource.password:${SW_DATA_SOURCE_PASSWORD:123456}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfArrayColumn:${SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN:20}numOfSearchableValuesPerTag:${SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG:2}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password, are found in application.yml. Only part of the settings is listed here. Please follow HikariCP connection pool document for full settings.\nBanyanDB BanyanDB is a dedicated storage implementation developed by the SkyWalking Team and the community. Activate BanyanDB as the storage, and set storage provider to banyandb.\nstorage:banyandb:host:${SW_STORAGE_BANYANDB_HOST:127.0.0.1}port:${SW_STORAGE_BANYANDB_PORT:17912}maxBulkSize:${SW_STORAGE_BANYANDB_MAX_BULK_SIZE:5000}flushInterval:${SW_STORAGE_BANYANDB_FLUSH_INTERVAL:15}metricsShardsNumber:${SW_STORAGE_BANYANDB_METRICS_SHARDS_NUMBER:1}recordShardsNumber:${SW_STORAGE_BANYANDB_RECORD_SHARDS_NUMBER:1}superDatasetShardsFactor:${SW_STORAGE_BANYANDB_SUPERDATASET_SHARDS_FACTOR:2}concurrentWriteThreads:${SW_STORAGE_BANYANDB_CONCURRENT_WRITE_THREADS:15}profileTaskQueryMaxSize:${SW_STORAGE_BANYANDB_PROFILE_TASK_QUERY_MAX_SIZE:200}# the max number of fetch task in a requeststreamBlockInterval:${SW_STORAGE_BANYANDB_STREAM_BLOCK_INTERVAL:4}# Unit is hourstreamSegmentInterval:${SW_STORAGE_BANYANDB_STREAM_SEGMENT_INTERVAL:24}# Unit is hourmeasureBlockInterval:${SW_STORAGE_BANYANDB_MEASURE_BLOCK_INTERVAL:4}# Unit is hourmeasureSegmentInterval:${SW_STORAGE_BANYANDB_MEASURE_SEGMENT_INTERVAL:24}# Unit is hourFor more details, please refer to the documents of BanyanDB and BanyanDB Java Client subprojects.\n","excerpt":"Backend storage The SkyWalking storage is pluggable. We have provided the following storage …","ref":"/docs/main/v9.6.0/en/setup/backend/backend-storage/","title":"Backend storage"},{"body":"Backend storage The SkyWalking storage is pluggable. We have provided the following storage solutions, which allow you to easily use one of them by specifying it as the selector in application.yml:\nstorage:selector:${SW_STORAGE:elasticsearch}Natively supported storage:\n H2 OpenSearch ElasticSearch 7 and 8. MySQL and its compatible databases PostgreSQL and its compatible databases BanyanDB(alpha stage)  H2 is the default storage option in the distribution package. It is recommended to use H2 for testing and development ONLY. Elasticsearch and OpenSearch are recommended for production environments, specially for large scale deployments. MySQL and PostgreSQL are recommended for production environments for medium scale deployments, especially for low trace and log sampling rate. Some of their compatible databases may support larger scale better, such as TiDB and AWS Aurora.\nBanyanDB is going to be our next generation storage solution. It is still in alpha stage. It has shown high potential performance improvement. Less than 50% CPU usage and 50% memory usage with 40% disk volume compared to Elasticsearch in the same scale with 100% sampling. We are looking for early adoption, and it would be our first-class recommended storage option since 2024.\n","excerpt":"Backend storage The SkyWalking storage is pluggable. We have provided the following storage …","ref":"/docs/main/v9.7.0/en/setup/backend/backend-storage/","title":"Backend storage"},{"body":"Background Write Ahead Logging (WAL) is a technique used in databases to ensure that data is not lost due to system crashes or other failures. The basic idea of WAL is to log changes to a database in a separate file before applying them to the database itself. This way, if there is a system failure, the database can be recovered by replaying the log of changes from the WAL file. BanyanDB leverages the WAL to enhance the data buffer for schema resource writing. In such a system, write operations are first written to the WAL file before being applied to the interval buffer. This ensures that the log is written to disk before the actual data is written. Hence the term \u0026ldquo;write ahead\u0026rdquo;.\nFormat A segment refers to a block of data in the WAL file that contains a sequence of database changes. Once rotate is invoked, a new segment is created to continue logging subsequent changes. A \u0026ldquo;WALEntry\u0026rdquo; is a data unit representing a series of changes to a Series. Each WALEntry is written to a segment.\nWAlEntry contains as follows:\n Length:8 bytes, which means the length of a WalEntry. Series ID:8 bytes, the same as request Series ID. Count:4 bytes, how many binary/timestamps in one WalEntry. Timestamp:8 bytes. Binary Length:2 bytes. Binary: value in the write request.  Write process The writing process in WAL is as follows:\n Firstly, the changes are first written to the write buffer. Those with the same series ID will go to the identical WALEntry. When the buffer is full, the WALEntry is created, then flushed to the disk. WAL can optionally use the compression algorithm snappy to compress the data on disk. Each WALEntry is appended to the tail of the WAL file on the disk.  When entries in the buffer are flushed to the disk, the callback function returned by the write operation is invoked. You can ignore this function to improve the writing performance, but it risks losing data.\nRead WAL A client could read a single segment by a segment id. When opening the segment file, the reader will decompress the WAL file if the writing compresses the data.\nRotation WAL supports rotation operation to switch to a new segment. The operation closes the currently open segment and opens a new one, returning the closed segment details.\nDelete A client could delete a segment closed by the rotate operation.\nconfiguration BanyanDB WAL has the following configuration options:\n   Name Default Value Introduction     wal_compression true Compress the WAL entry or not   wal_file_size 64MB The size of the WAL file   wal_buffer_size 16kB The size of WAL buffer.    ","excerpt":"Background Write Ahead Logging (WAL) is a technique used in databases to ensure that data is not …","ref":"/docs/skywalking-banyandb/latest/concept/wal/","title":"Background"},{"body":"Background Write Ahead Logging (WAL) is a technique used in databases to ensure that data is not lost due to system crashes or other failures. The basic idea of WAL is to log changes to a database in a separate file before applying them to the database itself. This way, if there is a system failure, the database can be recovered by replaying the log of changes from the WAL file. BanyanDB leverages the WAL to enhance the data buffer for schema resource writing. In such a system, write operations are first written to the WAL file before being applied to the interval buffer. This ensures that the log is written to disk before the actual data is written. Hence the term \u0026ldquo;write ahead\u0026rdquo;.\nFormat A segment refers to a block of data in the WAL file that contains a sequence of database changes. Once rotate is invoked, a new segment is created to continue logging subsequent changes. A \u0026ldquo;WALEntry\u0026rdquo; is a data unit representing a series of changes to a Series. Each WALEntry is written to a segment.\nWAlEntry contains as follows:\n Length:8 bytes, which means the length of a WalEntry. Series ID:8 bytes, the same as request Series ID. Count:4 bytes, how many binary/timestamps in one WalEntry. Timestamp:8 bytes. Binary Length:2 bytes. Binary: value in the write request.  Write process The writing process in WAL is as follows:\n Firstly, the changes are first written to the write buffer. Those with the same series ID will go to the identical WALEntry. When the buffer is full, the WALEntry is created, then flushed to the disk. WAL can optionally use the compression algorithm snappy to compress the data on disk. Each WALEntry is appended to the tail of the WAL file on the disk.  When entries in the buffer are flushed to the disk, the callback function returned by the write operation is invoked. You can ignore this function to improve the writing performance, but it risks losing data.\nRead WAL A client could read a single segment by a segment id. When opening the segment file, the reader will decompress the WAL file if the writing compresses the data.\nRotation WAL supports rotation operation to switch to a new segment. The operation closes the currently open segment and opens a new one, returning the closed segment details.\nDelete A client could delete a segment closed by the rotate operation.\nconfiguration BanyanDB WAL has the following configuration options:\n   Name Default Value Introduction     wal_compression true Compress the WAL entry or not   wal_file_size 64MB The size of the WAL file   wal_buffer_size 16kB The size of WAL buffer.    ","excerpt":"Background Write Ahead Logging (WAL) is a technique used in databases to ensure that data is not …","ref":"/docs/skywalking-banyandb/v0.5.0/concept/wal/","title":"Background"},{"body":"BanyanDB BanyanDB is a dedicated storage implementation developed by the SkyWalking Team and the community. Activate BanyanDB as the storage, and set storage provider to banyandb.\nThe OAP requires BanyanDB 0.5 server. As BanyanDB is still in the beta phase, we don\u0026rsquo;t provide any compatibility besides the required version.\nstorage:banyandb:targets:${SW_STORAGE_BANYANDB_TARGETS:127.0.0.1:17912}maxBulkSize:${SW_STORAGE_BANYANDB_MAX_BULK_SIZE:5000}flushInterval:${SW_STORAGE_BANYANDB_FLUSH_INTERVAL:15}metricsShardsNumber:${SW_STORAGE_BANYANDB_METRICS_SHARDS_NUMBER:1}recordShardsNumber:${SW_STORAGE_BANYANDB_RECORD_SHARDS_NUMBER:1}superDatasetShardsFactor:${SW_STORAGE_BANYANDB_SUPERDATASET_SHARDS_FACTOR:2}concurrentWriteThreads:${SW_STORAGE_BANYANDB_CONCURRENT_WRITE_THREADS:15}profileTaskQueryMaxSize:${SW_STORAGE_BANYANDB_PROFILE_TASK_QUERY_MAX_SIZE:200}# the max number of fetch task in a requeststreamBlockInterval:${SW_STORAGE_BANYANDB_STREAM_BLOCK_INTERVAL:4}# Unit is hourstreamSegmentInterval:${SW_STORAGE_BANYANDB_STREAM_SEGMENT_INTERVAL:24}# Unit is hourmeasureBlockInterval:${SW_STORAGE_BANYANDB_MEASURE_BLOCK_INTERVAL:4}# Unit is hourmeasureSegmentInterval:${SW_STORAGE_BANYANDB_MEASURE_SEGMENT_INTERVAL:24}# Unit is hourFor more details, please refer to the documents of BanyanDB and BanyanDB Java Client subprojects.\n","excerpt":"BanyanDB BanyanDB is a dedicated storage implementation developed by the SkyWalking Team and the …","ref":"/docs/main/latest/en/setup/backend/storages/banyandb/","title":"BanyanDB"},{"body":"BanyanDB BanyanDB is a dedicated storage implementation developed by the SkyWalking Team and the community. Activate BanyanDB as the storage, and set storage provider to banyandb.\nThe OAP requires BanyanDB 0.5 server. As BanyanDB is still in the beta phase, we don\u0026rsquo;t provide any compatibility besides the required version.\nstorage:banyandb:targets:${SW_STORAGE_BANYANDB_TARGETS:127.0.0.1:17912}maxBulkSize:${SW_STORAGE_BANYANDB_MAX_BULK_SIZE:5000}flushInterval:${SW_STORAGE_BANYANDB_FLUSH_INTERVAL:15}metricsShardsNumber:${SW_STORAGE_BANYANDB_METRICS_SHARDS_NUMBER:1}recordShardsNumber:${SW_STORAGE_BANYANDB_RECORD_SHARDS_NUMBER:1}superDatasetShardsFactor:${SW_STORAGE_BANYANDB_SUPERDATASET_SHARDS_FACTOR:2}concurrentWriteThreads:${SW_STORAGE_BANYANDB_CONCURRENT_WRITE_THREADS:15}profileTaskQueryMaxSize:${SW_STORAGE_BANYANDB_PROFILE_TASK_QUERY_MAX_SIZE:200}# the max number of fetch task in a requeststreamBlockInterval:${SW_STORAGE_BANYANDB_STREAM_BLOCK_INTERVAL:4}# Unit is hourstreamSegmentInterval:${SW_STORAGE_BANYANDB_STREAM_SEGMENT_INTERVAL:24}# Unit is hourmeasureBlockInterval:${SW_STORAGE_BANYANDB_MEASURE_BLOCK_INTERVAL:4}# Unit is hourmeasureSegmentInterval:${SW_STORAGE_BANYANDB_MEASURE_SEGMENT_INTERVAL:24}# Unit is hourFor more details, please refer to the documents of BanyanDB and BanyanDB Java Client subprojects.\n","excerpt":"BanyanDB BanyanDB is a dedicated storage implementation developed by the SkyWalking Team and the …","ref":"/docs/main/next/en/setup/backend/storages/banyandb/","title":"BanyanDB"},{"body":"BanyanDB BanyanDB is a dedicated storage implementation developed by the SkyWalking Team and the community. Activate BanyanDB as the storage, and set storage provider to banyandb.\nThe OAP requires BanyanDB 0.5 server. As BanyanDB is still in the beta phase, we don\u0026rsquo;t provide any compatibility besides the required version.\nstorage:banyandb:targets:${SW_STORAGE_BANYANDB_TARGETS:127.0.0.1:17912}maxBulkSize:${SW_STORAGE_BANYANDB_MAX_BULK_SIZE:5000}flushInterval:${SW_STORAGE_BANYANDB_FLUSH_INTERVAL:15}metricsShardsNumber:${SW_STORAGE_BANYANDB_METRICS_SHARDS_NUMBER:1}recordShardsNumber:${SW_STORAGE_BANYANDB_RECORD_SHARDS_NUMBER:1}superDatasetShardsFactor:${SW_STORAGE_BANYANDB_SUPERDATASET_SHARDS_FACTOR:2}concurrentWriteThreads:${SW_STORAGE_BANYANDB_CONCURRENT_WRITE_THREADS:15}profileTaskQueryMaxSize:${SW_STORAGE_BANYANDB_PROFILE_TASK_QUERY_MAX_SIZE:200}# the max number of fetch task in a requeststreamBlockInterval:${SW_STORAGE_BANYANDB_STREAM_BLOCK_INTERVAL:4}# Unit is hourstreamSegmentInterval:${SW_STORAGE_BANYANDB_STREAM_SEGMENT_INTERVAL:24}# Unit is hourmeasureBlockInterval:${SW_STORAGE_BANYANDB_MEASURE_BLOCK_INTERVAL:4}# Unit is hourmeasureSegmentInterval:${SW_STORAGE_BANYANDB_MEASURE_SEGMENT_INTERVAL:24}# Unit is hourFor more details, please refer to the documents of BanyanDB and BanyanDB Java Client subprojects.\n","excerpt":"BanyanDB BanyanDB is a dedicated storage implementation developed by the SkyWalking Team and the …","ref":"/docs/main/v9.7.0/en/setup/backend/storages/banyandb/","title":"BanyanDB"},{"body":"BanyanDB Clustering BanyanDB Clustering introduces a robust and scalable architecture that comprises \u0026ldquo;Liaison Nodes\u0026rdquo;, \u0026ldquo;Data Nodes\u0026rdquo;, and \u0026ldquo;Meta Nodes\u0026rdquo;. This structure allows for effectively distributing and managing time-series data within the system.\n1. Architectural Overview A BanyanDB installation includes three distinct types of nodes: Data Nodes, Meta Nodes, and Liaison Nodes.\n1.1 Data Nodes Data Nodes hold all the raw time series data, metadata, and indexed data. They handle the storage and management of data, including streams and measures, tag keys and values, as well as field keys and values.\nData Nodes also handle the local query execution. When a query is made, it is directed to a Liaison, which then interacts with Data Nodes to execute the distributed query and return results.\nIn addition to persistent raw data, Data Nodes also handle TopN aggregation calculation or other computational tasks.\n1.2 Meta Nodes Meta Nodes is implemented by etcd. They are responsible for maintaining high-level metadata of the cluster, which includes:\n All nodes in the cluster All database schemas  1.3 Liaison Nodes Liaison Nodes serve as gateways, routing traffic to Data Nodes. In addition to routing, they also provide authentication, TTL, and other security services to ensure secure and effective communication without the cluster.\nLiaison Nodes are also responsible for handling computational tasks associated with distributed querying the database. They build query tasks and search for data from Data Nodes.\n1.4 Standalone Mode BanyanDB integrates multiple roles into a single process in the standalone mode, making it simpler and faster to deploy. This mode is especially useful for scenarios with a limited number of data points or for testing and development purposes.\nIn this mode, the single process performs the roles of the Liaison Node, Data Node, and Meta Node. It receives requests, maintains metadata, processes queries, and handles data, all within a unified setup.\n2. Communication within a Cluster All nodes within a BanyanDB cluster communicate with other nodes according to their roles:\n Meta Nodes share high-level metadata about the cluster. Data Nodes store and manage the raw time series data and communicate with Meta Nodes. Liaison Nodes distribute incoming data to the appropriate Data Nodes. They also handle distributed query execution and communicate with Meta Nodes.  Nodes Discovery All nodes in the cluster are discovered by the Meta Nodes. When a node starts up, it registers itself with the Meta Nodes. The Meta Nodes then share this information with the Liaison Nodes which use it to route requests to the appropriate nodes.\n3. Data Organization Different nodes in BanyanDB are responsible for different parts of the database, while Query and Liaison Nodes manage the routing and processing of queries.\n3.1 Meta Nodes Meta Nodes store all high-level metadata that describes the cluster. This data is kept in an etcd-backed database on disk, including information about the shard allocation of each Data Node. This information is used by the Liaison Nodes to route data to the appropriate Data Nodes, based on the sharding key of the data.\nBy storing shard allocation information, Meta Nodes help ensure that data is routed efficiently and accurately across the cluster. This information is constantly updated as the cluster changes, allowing for dynamic allocation of resources and efficient use of available capacity.\n3.2 Data Nodes Data Nodes store all raw time series data, metadata, and indexed data. On disk, the data is organized by \u0026lt;group\u0026gt;/shard-\u0026lt;shard_id\u0026gt;/\u0026lt;segment_id\u0026gt;/. The segment is designed to support retention policy.\n3.3 Liaison Nodes Liaison Nodes do not store data but manage the routing of incoming requests to the appropriate Query or Data Nodes. They also provide authentication, TTL, and other security services.\nThey also handle the computational tasks associated with data queries, interacting directly with Data Nodes to execute queries and return results.\n4. Determining Optimal Node Counts When creating a BanyanDB cluster, choosing the appropriate number of each node type to configure and connect is crucial. The number of Meta Nodes should always be odd, for instance, “3”. The number of Data Nodes scales based on your storage and query needs. The number of Liaison Nodes depends on the expected query load and routing complexity.\nIf the write and read load is from different sources, it is recommended to separate the Liaison Nodes for write and read. For instance, if the write load is from metrics, trace or log collectors and the read load is from a web application, it is recommended to separate the Liaison Nodes for write and read.\nThis separation allows for more efficient routing of requests and better performance. It also allows for scaling out of the cluster based on the specific needs of each type of request. For instance, if the write load is high, you can scale out the write Liaison Nodes to handle the increased load.\nThe BanyanDB architecture allows for efficient clustering, scaling, and high availability, making it a robust choice for time series data management.\n5. Writes in a Cluster In BanyanDB, writing data in a cluster is designed to take advantage of the robust capabilities of underlying storage systems, such as Google Compute Persistent Disk or Amazon S3(TBD). These platforms ensure high levels of data durability, making them an optimal choice for storing raw time series data.\n5.1 Data Replication Unlike some other systems, BanyanDB does not support application-level replication, which can consume significant disk space. Instead, it delegates the task of replication to these underlying storage systems. This approach simplifies the BanyanDB architecture and reduces the complexity of managing replication at the application level. This approach also results in significant data savings.\nThe comparison between using a storage system and application-level replication boils down to several key factors: reliability, scalability, and complexity.\nReliability: A storage system provides built-in data durability by automatically storing data across multiple systems. It\u0026rsquo;s designed to deliver 99.999999999% durability, ensuring data is reliably stored and available when needed. While replication can increase data availability, it\u0026rsquo;s dependent on the application\u0026rsquo;s implementation. Any bugs or issues in the replication logic can lead to data loss or inconsistencies.\nScalability: A storage system is highly scalable by design and can store and retrieve any amount of data from anywhere. As your data grows, the system grows with you. You don\u0026rsquo;t need to worry about outgrowing your storage capacity. Scaling application-level replication can be challenging. As data grows, so does the need for more disk space and compute resources, potentially leading to increased costs and management complexity.\nComplexity: With the storage system handling replication, the complexity is abstracted away from the user. The user need not concern themselves with the details of how replication is handled. Managing replication at the application level can be complex. It requires careful configuration, monitoring, and potentially significant engineering effort to maintain.\nFuthermore, the storage system might be cheaper. For instance, S3 can be more cost-effective because it eliminates the need for additional resources required for application-level replication. Application-level replication also requires ongoing maintenance, potentially increasing operational costs.\n5.2 Data Sharding Data distribution across the cluster is determined based on the shard_num setting for a group and the specified entity in each resource, be it a stream or measure. The resource’s name with its entity is the sharding key, guiding data distribution to the appropriate Data Node during write operations.\nLiaison Nodes retrieve shard mapping information from Meta Nodes to achieve efficient data routing. This information is used to route data to the appropriate Data Nodes based on the sharding key of the data.\nThis sharding strategy ensures the write load is evenly distributed across the cluster, enhancing write performance and overall system efficiency. BanyanDB uses a hash algorithm for sharding. The hash function maps the sharding key (resource name and entity) to a node in the cluster. Each shard is assigned to the node returned by the hash function.\n5.3 Data Write Path Here\u0026rsquo;s a text-based diagram illustrating the data write path in BanyanDB:\nUser | | API Request (Write) | v ------------------------------------ | Liaison Node | \u0026lt;--- Stateless Node, Routes Request | (Identifies relevant Data Nodes | | and dispatches write request) | ------------------------------------ | v ----------------- ----------------- ----------------- | Data Node 1 | | Data Node 2 | | Data Node 3 | | (Shard 1) | | (Shard 2) | | (Shard 3) | ----------------- ----------------- -----------------  A user makes an API request to the Liaison Node. This request is a write request, containing the data to be written to the database. The Liaison Node, which is stateless, identifies the relevant Data Nodes that will store the data based on the entity specified in the request. The write request is executed across the identified Data Nodes. Each Data Node writes the data to its shard.  This architecture allows BanyanDB to execute write requests efficiently across a distributed system, leveraging the stateless nature and routing/writing capabilities of the Liaison Node, and the distributed storage of Data Nodes.\n6. Queries in a Cluster BanyanDB utilizes a distributed architecture that allows for efficient query processing. When a query is made, it is directed to a Liaison Node.\n6.1 Query Routing Liaison Nodes do not use shard mapping information from Meta Nodes to execute distributed queries. Instead, they access all Data Nodes to retrieve the necessary data for queries. As the query load is lower, it is practical for liaison nodes to access all data nodes for this purpose. It may increase network traffic, but simplifies scaling out of the cluster.\nCompared to the write load, the query load is relatively low. For instance, in a time series database, the write load is typically 100x higher than the query load. This is because the write load is driven by the number of devices sending data to the database, while the query load is driven by the number of users accessing the data.\nThis strategy enables scaling out of the cluster. When the cluster scales out, the liaison node can access all data nodes without any mapping info changes. It eliminates the need to backup previous shard mapping information, reducing complexity of scaling out.\n6.2 Query Execution Parallel execution significantly enhances the efficiency of data retrieval and reduces the overall query processing time. It allows for faster response times as the workload of the query is shared across multiple shards, each working on their part of the problem simultaneously. This feature makes BanyanDB particularly effective for large-scale data analysis tasks.\nIn summary, BanyanDB\u0026rsquo;s approach to querying leverages its unique distributed architecture, enabling high-performance data retrieval across multiple shards in parallel.\n6.3 Query Path User | | API Request (Query) | v ------------------------------------ | Liaison Node | \u0026lt;--- Stateless Node, Distributes Query | (Access all Data nodes to | | execute distributed queries) | ------------------------------------ | | | v v v ----------------- ----------------- ----------------- | Data Node 1 | | Data Node 2 | | Data Node 3 | | (Shard 1) | | (Shard 2) | | (Shard 3) | ----------------- ----------------- -----------------  A user makes an API request to the Liaison Node. This request may be a query for specific data. The Liaison Node builds a distributed query to select all data nodes. The query is executed in parallel across all Data Nodes. Each Data Node execute a local query plan to process the data stored in its shard concurrently with the others. The results from each shard are then returned to the Liaison Node, which consolidates them into a single response to the user.  This architecture allows BanyanDB to execute queries efficiently across a distributed system, leveraging the distributed query capabilities of the Liaison Node and the parallel processing of Data Nodes.\n","excerpt":"BanyanDB Clustering BanyanDB Clustering introduces a robust and scalable architecture that comprises …","ref":"/docs/skywalking-banyandb/latest/concept/clustering/","title":"BanyanDB Clustering"},{"body":"BanyanDB Clustering BanyanDB Clustering introduces a robust and scalable architecture that comprises \u0026ldquo;Liaison Nodes\u0026rdquo;, \u0026ldquo;Data Nodes\u0026rdquo;, and \u0026ldquo;Meta Nodes\u0026rdquo;. This structure allows for effectively distributing and managing time-series data within the system.\n1. Architectural Overview A BanyanDB installation includes three distinct types of nodes: Data Nodes, Meta Nodes, and Liaison Nodes.\n1.1 Data Nodes Data Nodes hold all the raw time series data, metadata, and indexed data. They handle the storage and management of data, including streams and measures, tag keys and values, as well as field keys and values.\nData Nodes also handle the local query execution. When a query is made, it is directed to a Liaison, which then interacts with Data Nodes to execute the distributed query and return results.\nIn addition to persistent raw data, Data Nodes also handle TopN aggregation calculation or other computational tasks.\n1.2 Meta Nodes Meta Nodes is implemented by etcd. They are responsible for maintaining high-level metadata of the cluster, which includes:\n All nodes in the cluster All database schemas  1.3 Liaison Nodes Liaison Nodes serve as gateways, routing traffic to Data Nodes. In addition to routing, they also provide authentication, TTL, and other security services to ensure secure and effective communication without the cluster.\nLiaison Nodes are also responsible for handling computational tasks associated with distributed querying the database. They build query tasks and search for data from Data Nodes.\n1.4 Standalone Mode BanyanDB integrates multiple roles into a single process in the standalone mode, making it simpler and faster to deploy. This mode is especially useful for scenarios with a limited number of data points or for testing and development purposes.\nIn this mode, the single process performs the roles of the Liaison Node, Data Node, and Meta Node. It receives requests, maintains metadata, processes queries, and handles data, all within a unified setup.\n2. Communication within a Cluster All nodes within a BanyanDB cluster communicate with other nodes according to their roles:\n Meta Nodes share high-level metadata about the cluster. Data Nodes store and manage the raw time series data and communicate with Meta Nodes. Liaison Nodes distribute incoming data to the appropriate Data Nodes. They also handle distributed query execution and communicate with Meta Nodes.  Nodes Discovery All nodes in the cluster are discovered by the Meta Nodes. When a node starts up, it registers itself with the Meta Nodes. The Meta Nodes then share this information with the Liaison Nodes which use it to route requests to the appropriate nodes.\nIf data nodes are unable to connect to the meta nodes due to network partition or other issues, they will be removed from the meta nodes. However, the liaison nodes will not remove the data nodes from their routing list until the data nodes are also unreachable from the liaison nodes' perspective. This approach ensures that the system can continue to function even if some data nodes are temporarily unavailable from the meta nodes.\n3. Data Organization Different nodes in BanyanDB are responsible for different parts of the database, while Query and Liaison Nodes manage the routing and processing of queries.\n3.1 Meta Nodes Meta Nodes store all high-level metadata that describes the cluster. This data is kept in an etcd-backed database on disk, including information about the shard allocation of each Data Node. This information is used by the Liaison Nodes to route data to the appropriate Data Nodes, based on the sharding key of the data.\nBy storing shard allocation information, Meta Nodes help ensure that data is routed efficiently and accurately across the cluster. This information is constantly updated as the cluster changes, allowing for dynamic allocation of resources and efficient use of available capacity.\n3.2 Data Nodes Data Nodes store all raw time series data, metadata, and indexed data. On disk, the data is organized by \u0026lt;group\u0026gt;/shard-\u0026lt;shard_id\u0026gt;/\u0026lt;segment_id\u0026gt;/. The segment is designed to support retention policy.\n3.3 Liaison Nodes Liaison Nodes do not store data but manage the routing of incoming requests to the appropriate Query or Data Nodes. They also provide authentication, TTL, and other security services.\nThey also handle the computational tasks associated with data queries, interacting directly with Data Nodes to execute queries and return results.\n4. Determining Optimal Node Counts When creating a BanyanDB cluster, choosing the appropriate number of each node type to configure and connect is crucial. The number of Meta Nodes should always be odd, for instance, “3”. The number of Data Nodes scales based on your storage and query needs. The number of Liaison Nodes depends on the expected query load and routing complexity.\nIf the write and read load is from different sources, it is recommended to separate the Liaison Nodes for write and read. For instance, if the write load is from metrics, trace or log collectors and the read load is from a web application, it is recommended to separate the Liaison Nodes for write and read.\nThis separation allows for more efficient routing of requests and better performance. It also allows for scaling out of the cluster based on the specific needs of each type of request. For instance, if the write load is high, you can scale out the write Liaison Nodes to handle the increased load.\nThe BanyanDB architecture allows for efficient clustering, scaling, and high availability, making it a robust choice for time series data management.\n5. Writes in a Cluster In BanyanDB, writing data in a cluster is designed to take advantage of the robust capabilities of underlying storage systems, such as Google Compute Persistent Disk or Amazon S3(TBD). These platforms ensure high levels of data durability, making them an optimal choice for storing raw time series data.\n5.1 Data Replication Unlike some other systems, BanyanDB does not support application-level replication, which can consume significant disk space. Instead, it delegates the task of replication to these underlying storage systems. This approach simplifies the BanyanDB architecture and reduces the complexity of managing replication at the application level. This approach also results in significant data savings.\nThe comparison between using a storage system and application-level replication boils down to several key factors: reliability, scalability, and complexity.\nReliability: A storage system provides built-in data durability by automatically storing data across multiple systems. It\u0026rsquo;s designed to deliver 99.999999999% durability, ensuring data is reliably stored and available when needed. While replication can increase data availability, it\u0026rsquo;s dependent on the application\u0026rsquo;s implementation. Any bugs or issues in the replication logic can lead to data loss or inconsistencies.\nScalability: A storage system is highly scalable by design and can store and retrieve any amount of data from anywhere. As your data grows, the system grows with you. You don\u0026rsquo;t need to worry about outgrowing your storage capacity. Scaling application-level replication can be challenging. As data grows, so does the need for more disk space and compute resources, potentially leading to increased costs and management complexity.\nComplexity: With the storage system handling replication, the complexity is abstracted away from the user. The user need not concern themselves with the details of how replication is handled. Managing replication at the application level can be complex. It requires careful configuration, monitoring, and potentially significant engineering effort to maintain.\nFuthermore, the storage system might be cheaper. For instance, S3 can be more cost-effective because it eliminates the need for additional resources required for application-level replication. Application-level replication also requires ongoing maintenance, potentially increasing operational costs.\n5.2 Data Sharding Data distribution across the cluster is determined based on the shard_num setting for a group and the specified entity in each resource, be it a stream or measure. The resource’s name with its entity is the sharding key, guiding data distribution to the appropriate Data Node during write operations.\nLiaison Nodes retrieve shard mapping information from Meta Nodes to achieve efficient data routing. This information is used to route data to the appropriate Data Nodes based on the sharding key of the data.\nThis sharding strategy ensures the write load is evenly distributed across the cluster, enhancing write performance and overall system efficiency. BanyanDB uses a hash algorithm for sharding. The hash function maps the sharding key (resource name and entity) to a node in the cluster. Each shard is assigned to the node returned by the hash function.\n5.3 Data Write Path Here\u0026rsquo;s a text-based diagram illustrating the data write path in BanyanDB:\nUser | | API Request (Write) | v ------------------------------------ | Liaison Node | \u0026lt;--- Stateless Node, Routes Request | (Identifies relevant Data Nodes | | and dispatches write request) | ------------------------------------ | v ----------------- ----------------- ----------------- | Data Node 1 | | Data Node 2 | | Data Node 3 | | (Shard 1) | | (Shard 2) | | (Shard 3) | ----------------- ----------------- -----------------  A user makes an API request to the Liaison Node. This request is a write request, containing the data to be written to the database. The Liaison Node, which is stateless, identifies the relevant Data Nodes that will store the data based on the entity specified in the request. The write request is executed across the identified Data Nodes. Each Data Node writes the data to its shard.  This architecture allows BanyanDB to execute write requests efficiently across a distributed system, leveraging the stateless nature and routing/writing capabilities of the Liaison Node, and the distributed storage of Data Nodes.\n6. Queries in a Cluster BanyanDB utilizes a distributed architecture that allows for efficient query processing. When a query is made, it is directed to a Liaison Node.\n6.1 Query Routing Liaison Nodes do not use shard mapping information from Meta Nodes to execute distributed queries. Instead, they access all Data Nodes to retrieve the necessary data for queries. As the query load is lower, it is practical for liaison nodes to access all data nodes for this purpose. It may increase network traffic, but simplifies scaling out of the cluster.\nCompared to the write load, the query load is relatively low. For instance, in a time series database, the write load is typically 100x higher than the query load. This is because the write load is driven by the number of devices sending data to the database, while the query load is driven by the number of users accessing the data.\nThis strategy enables scaling out of the cluster. When the cluster scales out, the liaison node can access all data nodes without any mapping info changes. It eliminates the need to backup previous shard mapping information, reducing complexity of scaling out.\n6.2 Query Execution Parallel execution significantly enhances the efficiency of data retrieval and reduces the overall query processing time. It allows for faster response times as the workload of the query is shared across multiple shards, each working on their part of the problem simultaneously. This feature makes BanyanDB particularly effective for large-scale data analysis tasks.\nIn summary, BanyanDB\u0026rsquo;s approach to querying leverages its unique distributed architecture, enabling high-performance data retrieval across multiple shards in parallel.\n6.3 Query Path User | | API Request (Query) | v ------------------------------------ | Liaison Node | \u0026lt;--- Stateless Node, Distributes Query | (Access all Data nodes to | | execute distributed queries) | ------------------------------------ | | | v v v ----------------- ----------------- ----------------- | Data Node 1 | | Data Node 2 | | Data Node 3 | | (Shard 1) | | (Shard 2) | | (Shard 3) | ----------------- ----------------- -----------------  A user makes an API request to the Liaison Node. This request may be a query for specific data. The Liaison Node builds a distributed query to select all data nodes. The query is executed in parallel across all Data Nodes. Each Data Node execute a local query plan to process the data stored in its shard concurrently with the others. The results from each shard are then returned to the Liaison Node, which consolidates them into a single response to the user.  This architecture allows BanyanDB to execute queries efficiently across a distributed system, leveraging the distributed query capabilities of the Liaison Node and the parallel processing of Data Nodes.\n7. Failover BanyanDB is designed to be highly available and fault-tolerant.\nIn case of a Data Node failure, the system can automatically recover and continue to operate.\nLiaison nodes have a built-in mechanism to detect the failure of a Data Node. When a Data Node fails, the Liaison Node will automatically route requests to other available Data Nodes with the same shard. This ensures that the system remains operational even in the face of node failures. Thanks to the query mode, which allows Liaison Nodes to access all Data Nodes, the system can continue to function even if some Data Nodes are unavailable. When the failed data nodes are restored, the system won\u0026rsquo;t reply data to them since the data is still retrieved from other nodes.\nIn the case of a Liaison Node failure, the system can be configured to have multiple Liaison Nodes for redundancy. If one Liaison Node fails, the other Liaison Nodes can take over its responsibilities, ensuring that the system remains available.\n Please note that any written request which triggers the failover process will be rejected, and the client should re-send the request.\n ","excerpt":"BanyanDB Clustering BanyanDB Clustering introduces a robust and scalable architecture that comprises …","ref":"/docs/skywalking-banyandb/next/concept/clustering/","title":"BanyanDB Clustering"},{"body":"BanyanDB Clustering BanyanDB Clustering introduces a robust and scalable architecture that comprises \u0026ldquo;Liaison Nodes\u0026rdquo;, \u0026ldquo;Data Nodes\u0026rdquo;, and \u0026ldquo;Meta Nodes\u0026rdquo;. This structure allows for effectively distributing and managing time-series data within the system.\n1. Architectural Overview A BanyanDB installation includes three distinct types of nodes: Data Nodes, Meta Nodes, and Liaison Nodes.\n1.1 Data Nodes Data Nodes hold all the raw time series data, metadata, and indexed data. They handle the storage and management of data, including streams and measures, tag keys and values, as well as field keys and values.\nData Nodes also handle the local query execution. When a query is made, it is directed to a Liaison, which then interacts with Data Nodes to execute the distributed query and return results.\nIn addition to persistent raw data, Data Nodes also handle TopN aggregation calculation or other computational tasks.\n1.2 Meta Nodes Meta Nodes is implemented by etcd. They are responsible for maintaining high-level metadata of the cluster, which includes:\n All nodes in the cluster All database schemas  1.3 Liaison Nodes Liaison Nodes serve as gateways, routing traffic to Data Nodes. In addition to routing, they also provide authentication, TTL, and other security services to ensure secure and effective communication without the cluster.\nLiaison Nodes are also responsible for handling computational tasks associated with distributed querying the database. They build query tasks and search for data from Data Nodes.\n1.4 Standalone Mode BanyanDB integrates multiple roles into a single process in the standalone mode, making it simpler and faster to deploy. This mode is especially useful for scenarios with a limited number of data points or for testing and development purposes.\nIn this mode, the single process performs the roles of the Liaison Node, Data Node, and Meta Node. It receives requests, maintains metadata, processes queries, and handles data, all within a unified setup.\n2. Communication within a Cluster All nodes within a BanyanDB cluster communicate with other nodes according to their roles:\n Meta Nodes share high-level metadata about the cluster. Data Nodes store and manage the raw time series data and communicate with Meta Nodes. Liaison Nodes distribute incoming data to the appropriate Data Nodes. They also handle distributed query execution and communicate with Meta Nodes.  Nodes Discovery All nodes in the cluster are discovered by the Meta Nodes. When a node starts up, it registers itself with the Meta Nodes. The Meta Nodes then share this information with the Liaison Nodes which use it to route requests to the appropriate nodes.\n3. Data Organization Different nodes in BanyanDB are responsible for different parts of the database, while Query and Liaison Nodes manage the routing and processing of queries.\n3.1 Meta Nodes Meta Nodes store all high-level metadata that describes the cluster. This data is kept in an etcd-backed database on disk, including information about the shard allocation of each Data Node. This information is used by the Liaison Nodes to route data to the appropriate Data Nodes, based on the sharding key of the data.\nBy storing shard allocation information, Meta Nodes help ensure that data is routed efficiently and accurately across the cluster. This information is constantly updated as the cluster changes, allowing for dynamic allocation of resources and efficient use of available capacity.\n3.2 Data Nodes Data Nodes store all raw time series data, metadata, and indexed data. On disk, the data is organized by \u0026lt;group\u0026gt;/shard-\u0026lt;shard_id\u0026gt;/\u0026lt;segment_id\u0026gt;/. The segment is designed to support retention policy.\n3.3 Liaison Nodes Liaison Nodes do not store data but manage the routing of incoming requests to the appropriate Query or Data Nodes. They also provide authentication, TTL, and other security services.\nThey also handle the computational tasks associated with data queries, interacting directly with Data Nodes to execute queries and return results.\n4. Determining Optimal Node Counts When creating a BanyanDB cluster, choosing the appropriate number of each node type to configure and connect is crucial. The number of Meta Nodes should always be odd, for instance, “3”. The number of Data Nodes scales based on your storage and query needs. The number of Liaison Nodes depends on the expected query load and routing complexity.\nIf the write and read load is from different sources, it is recommended to separate the Liaison Nodes for write and read. For instance, if the write load is from metrics, trace or log collectors and the read load is from a web application, it is recommended to separate the Liaison Nodes for write and read.\nThis separation allows for more efficient routing of requests and better performance. It also allows for scaling out of the cluster based on the specific needs of each type of request. For instance, if the write load is high, you can scale out the write Liaison Nodes to handle the increased load.\nThe BanyanDB architecture allows for efficient clustering, scaling, and high availability, making it a robust choice for time series data management.\n5. Writes in a Cluster In BanyanDB, writing data in a cluster is designed to take advantage of the robust capabilities of underlying storage systems, such as Google Compute Persistent Disk or Amazon S3(TBD). These platforms ensure high levels of data durability, making them an optimal choice for storing raw time series data.\n5.1 Data Replication Unlike some other systems, BanyanDB does not support application-level replication, which can consume significant disk space. Instead, it delegates the task of replication to these underlying storage systems. This approach simplifies the BanyanDB architecture and reduces the complexity of managing replication at the application level. This approach also results in significant data savings.\nThe comparison between using a storage system and application-level replication boils down to several key factors: reliability, scalability, and complexity.\nReliability: A storage system provides built-in data durability by automatically storing data across multiple systems. It\u0026rsquo;s designed to deliver 99.999999999% durability, ensuring data is reliably stored and available when needed. While replication can increase data availability, it\u0026rsquo;s dependent on the application\u0026rsquo;s implementation. Any bugs or issues in the replication logic can lead to data loss or inconsistencies.\nScalability: A storage system is highly scalable by design and can store and retrieve any amount of data from anywhere. As your data grows, the system grows with you. You don\u0026rsquo;t need to worry about outgrowing your storage capacity. Scaling application-level replication can be challenging. As data grows, so does the need for more disk space and compute resources, potentially leading to increased costs and management complexity.\nComplexity: With the storage system handling replication, the complexity is abstracted away from the user. The user need not concern themselves with the details of how replication is handled. Managing replication at the application level can be complex. It requires careful configuration, monitoring, and potentially significant engineering effort to maintain.\nFuthermore, the storage system might be cheaper. For instance, S3 can be more cost-effective because it eliminates the need for additional resources required for application-level replication. Application-level replication also requires ongoing maintenance, potentially increasing operational costs.\n5.2 Data Sharding Data distribution across the cluster is determined based on the shard_num setting for a group and the specified entity in each resource, be it a stream or measure. The resource’s name with its entity is the sharding key, guiding data distribution to the appropriate Data Node during write operations.\nLiaison Nodes retrieve shard mapping information from Meta Nodes to achieve efficient data routing. This information is used to route data to the appropriate Data Nodes based on the sharding key of the data.\nThis sharding strategy ensures the write load is evenly distributed across the cluster, enhancing write performance and overall system efficiency. BanyanDB uses a hash algorithm for sharding. The hash function maps the sharding key (resource name and entity) to a node in the cluster. Each shard is assigned to the node returned by the hash function.\n5.3 Data Write Path Here\u0026rsquo;s a text-based diagram illustrating the data write path in BanyanDB:\nUser | | API Request (Write) | v ------------------------------------ | Liaison Node | \u0026lt;--- Stateless Node, Routes Request | (Identifies relevant Data Nodes | | and dispatches write request) | ------------------------------------ | v ----------------- ----------------- ----------------- | Data Node 1 | | Data Node 2 | | Data Node 3 | | (Shard 1) | | (Shard 2) | | (Shard 3) | ----------------- ----------------- -----------------  A user makes an API request to the Liaison Node. This request is a write request, containing the data to be written to the database. The Liaison Node, which is stateless, identifies the relevant Data Nodes that will store the data based on the entity specified in the request. The write request is executed across the identified Data Nodes. Each Data Node writes the data to its shard.  This architecture allows BanyanDB to execute write requests efficiently across a distributed system, leveraging the stateless nature and routing/writing capabilities of the Liaison Node, and the distributed storage of Data Nodes.\n6. Queries in a Cluster BanyanDB utilizes a distributed architecture that allows for efficient query processing. When a query is made, it is directed to a Liaison Node.\n6.1 Query Routing Liaison Nodes do not use shard mapping information from Meta Nodes to execute distributed queries. Instead, they access all Data Nodes to retrieve the necessary data for queries. As the query load is lower, it is practical for liaison nodes to access all data nodes for this purpose. It may increase network traffic, but simplifies scaling out of the cluster.\nCompared to the write load, the query load is relatively low. For instance, in a time series database, the write load is typically 100x higher than the query load. This is because the write load is driven by the number of devices sending data to the database, while the query load is driven by the number of users accessing the data.\nThis strategy enables scaling out of the cluster. When the cluster scales out, the liaison node can access all data nodes without any mapping info changes. It eliminates the need to backup previous shard mapping information, reducing complexity of scaling out.\n6.2 Query Execution Parallel execution significantly enhances the efficiency of data retrieval and reduces the overall query processing time. It allows for faster response times as the workload of the query is shared across multiple shards, each working on their part of the problem simultaneously. This feature makes BanyanDB particularly effective for large-scale data analysis tasks.\nIn summary, BanyanDB\u0026rsquo;s approach to querying leverages its unique distributed architecture, enabling high-performance data retrieval across multiple shards in parallel.\n6.3 Query Path User | | API Request (Query) | v ------------------------------------ | Liaison Node | \u0026lt;--- Stateless Node, Distributes Query | (Access all Data nodes to | | execute distributed queries) | ------------------------------------ | | | v v v ----------------- ----------------- ----------------- | Data Node 1 | | Data Node 2 | | Data Node 3 | | (Shard 1) | | (Shard 2) | | (Shard 3) | ----------------- ----------------- -----------------  A user makes an API request to the Liaison Node. This request may be a query for specific data. The Liaison Node builds a distributed query to select all data nodes. The query is executed in parallel across all Data Nodes. Each Data Node execute a local query plan to process the data stored in its shard concurrently with the others. The results from each shard are then returned to the Liaison Node, which consolidates them into a single response to the user.  This architecture allows BanyanDB to execute queries efficiently across a distributed system, leveraging the distributed query capabilities of the Liaison Node and the parallel processing of Data Nodes.\n","excerpt":"BanyanDB Clustering BanyanDB Clustering introduces a robust and scalable architecture that comprises …","ref":"/docs/skywalking-banyandb/v0.5.0/concept/clustering/","title":"BanyanDB Clustering"},{"body":"This is the blog section. It has two categories: News and Releases.\nFiles in these directories will be listed in reverse chronological order.\n","excerpt":"This is the blog section. It has two categories: News and Releases.\nFiles in these directories will …","ref":"/blog/","title":"Blog"},{"body":"BookKeeper monitoring SkyWalking leverages OpenTelemetry Collector to collect metrics data from the BookKeeper and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. Kafka entity as a Service in OAP and on the `Layer: BOOKKEEPER.\nData flow  BookKeeper exposes metrics through Prometheus endpoint. OpenTelemetry Collector fetches metrics from BookKeeper cluster via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.`  Setup  Set up BookKeeper Cluster. Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  BookKeeper Monitoring Bookkeeper monitoring provides multidimensional metrics monitoring of BookKeeper cluster as Layer: BOOKKEEPER Service in the OAP. In each cluster, the nodes are represented as Instance.\nBookKeeper Cluster Supported Metrics    Monitoring Panel Metric Name Description Data Source     Bookie Ledgers Count meter_bookkeeper_bookie_ledgers_count The number of the bookie ledgers. Bookkeeper Cluster   Bookie Ledger Writable Dirs meter_bookkeeper_bookie_ledger_writable_dirs The number of writable directories in the bookie. Bookkeeper Cluster   Bookie Ledger Dir Usage meter_bookkeeper_bookie_ledger_dir_data_bookkeeper_ledgers_usage The number of successfully created connections. Bookkeeper Cluster   Bookie Entries Count meter_bookkeeper_bookie_entries_count The number of the bookie write entries. Bookkeeper Cluster   Bookie Write Cache Size meter_bookkeeper_bookie_write_cache_size The size of the bookie write cache (MB). Bookkeeper Cluster   Bookie Write Cache Entry Count meter_bookkeeper_bookie_write_cache_count The entry count in the bookie write cache. Bookkeeper Cluster   Bookie Read Cache Size meter_bookkeeper_bookie_read_cache_size The size of the bookie read cache (MB). Bookkeeper Cluster   Bookie Read Cache Entry Count meter_bookkeeper_bookie_read_cache_count The entry count in the bookie read cache. Bookkeeper Cluster   Bookie Read Rate meter_bookkeeper_bookie_read_rate The bookie read rate (bytes/s). Bookkeeper Cluster   Bookie Write Rate meter_bookkeeper_bookie_write_rate The bookie write rate (bytes/s). Bookkeeper Cluster    BookKeeper Node Supported Metrics    Monitoring Panel Metric Name Description Data Source     JVM Memory Pool Used meter_bookkeeper_node_jvm_memory_pool_used The usage of the broker jvm memory pool. Bookkeeper Bookie   JVM Memory meter_bookkeeper_node_jvm_memory_used meter_bookkeeper_node_jvm_memory_committed meter_bookkeeper_node_jvm_memory_init The usage of the broker jvm memory. Bookkeeper Bookie   JVM Threads meter_bookkeeper_node_jvm_threads_current meter_bookkeeper_node_jvm_threads_daemon meter_bookkeeper_node_jvm_threads_peak meter_bookkeeper_node_jvm_threads_deadlocked The count of the jvm threads. Bookkeeper Bookie   GC Time meter_bookkeeper_node_jvm_gc_collection_seconds_sum Time spent in a given JVM garbage collector in seconds. Bookkeeper Bookie   GC Count meter_bookkeeper_node_jvm_gc_collection_seconds_count The count of a given JVM garbage. Bookkeeper Bookie   Thread Executor Completed meter_bookkeeper_node_thread_executor_completed The count of the executor thread. Bookkeeper Bookie   Thread Executor Tasks meter_bookkeeper_node_thread_executor_tasks_completed meter_bookkeeper_node_thread_executor_tasks_rejected meter_bookkeeper_node_thread_executor_tasks_failed The count of the executor tasks. Bookkeeper Bookie   Pooled Threads meter_bookkeeper_node_high_priority_threads meter_bookkeeper_node_read_thread_pool_threads The count of the pooled thread. Bookkeeper Bookie   Pooled Threads Max Queue Size meter_bookkeeper_node_high_priority_thread_max_queue_size meter_bookkeeper_node_read_thread_pool_max_queue_size The count of the pooled threads max queue size. Bookkeeper Bookie    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in otel-rules/bookkeeper/bookkeeper-cluster.yaml, otel-rules/bookkeeper/bookkeeper-node.yaml. The RabbitMQ dashboard panel configurations are found in /config/ui-initialized-templates/bookkeeper.\n","excerpt":"BookKeeper monitoring SkyWalking leverages OpenTelemetry Collector to collect metrics data from the …","ref":"/docs/main/latest/en/setup/backend/backend-bookkeeper-monitoring/","title":"BookKeeper monitoring"},{"body":"BookKeeper monitoring SkyWalking leverages OpenTelemetry Collector to collect metrics data from the BookKeeper and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. Kafka entity as a Service in OAP and on the `Layer: BOOKKEEPER.\nData flow  BookKeeper exposes metrics through Prometheus endpoint. OpenTelemetry Collector fetches metrics from BookKeeper cluster via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.`  Setup  Set up BookKeeper Cluster. Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  BookKeeper Monitoring Bookkeeper monitoring provides multidimensional metrics monitoring of BookKeeper cluster as Layer: BOOKKEEPER Service in the OAP. In each cluster, the nodes are represented as Instance.\nBookKeeper Cluster Supported Metrics    Monitoring Panel Metric Name Description Data Source     Bookie Ledgers Count meter_bookkeeper_bookie_ledgers_count The number of the bookie ledgers. Bookkeeper Cluster   Bookie Ledger Writable Dirs meter_bookkeeper_bookie_ledger_writable_dirs The number of writable directories in the bookie. Bookkeeper Cluster   Bookie Ledger Dir Usage meter_bookkeeper_bookie_ledger_dir_data_bookkeeper_ledgers_usage The number of successfully created connections. Bookkeeper Cluster   Bookie Entries Count meter_bookkeeper_bookie_entries_count The number of the bookie write entries. Bookkeeper Cluster   Bookie Write Cache Size meter_bookkeeper_bookie_write_cache_size The size of the bookie write cache (MB). Bookkeeper Cluster   Bookie Write Cache Entry Count meter_bookkeeper_bookie_write_cache_count The entry count in the bookie write cache. Bookkeeper Cluster   Bookie Read Cache Size meter_bookkeeper_bookie_read_cache_size The size of the bookie read cache (MB). Bookkeeper Cluster   Bookie Read Cache Entry Count meter_bookkeeper_bookie_read_cache_count The entry count in the bookie read cache. Bookkeeper Cluster   Bookie Read Rate meter_bookkeeper_bookie_read_rate The bookie read rate (bytes/s). Bookkeeper Cluster   Bookie Write Rate meter_bookkeeper_bookie_write_rate The bookie write rate (bytes/s). Bookkeeper Cluster    BookKeeper Node Supported Metrics    Monitoring Panel Metric Name Description Data Source     JVM Memory Pool Used meter_bookkeeper_node_jvm_memory_pool_used The usage of the broker jvm memory pool. Bookkeeper Bookie   JVM Memory meter_bookkeeper_node_jvm_memory_used meter_bookkeeper_node_jvm_memory_committed meter_bookkeeper_node_jvm_memory_init The usage of the broker jvm memory. Bookkeeper Bookie   JVM Threads meter_bookkeeper_node_jvm_threads_current meter_bookkeeper_node_jvm_threads_daemon meter_bookkeeper_node_jvm_threads_peak meter_bookkeeper_node_jvm_threads_deadlocked The count of the jvm threads. Bookkeeper Bookie   GC Time meter_bookkeeper_node_jvm_gc_collection_seconds_sum Time spent in a given JVM garbage collector in seconds. Bookkeeper Bookie   GC Count meter_bookkeeper_node_jvm_gc_collection_seconds_count The count of a given JVM garbage. Bookkeeper Bookie   Thread Executor Completed meter_bookkeeper_node_thread_executor_completed The count of the executor thread. Bookkeeper Bookie   Thread Executor Tasks meter_bookkeeper_node_thread_executor_tasks_completed meter_bookkeeper_node_thread_executor_tasks_rejected meter_bookkeeper_node_thread_executor_tasks_failed The count of the executor tasks. Bookkeeper Bookie   Pooled Threads meter_bookkeeper_node_high_priority_threads meter_bookkeeper_node_read_thread_pool_threads The count of the pooled thread. Bookkeeper Bookie   Pooled Threads Max Queue Size meter_bookkeeper_node_high_priority_thread_max_queue_size meter_bookkeeper_node_read_thread_pool_max_queue_size The count of the pooled threads max queue size. Bookkeeper Bookie    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in otel-rules/bookkeeper/bookkeeper-cluster.yaml, otel-rules/bookkeeper/bookkeeper-node.yaml. The Bookkeeper dashboard panel configurations are found in /config/ui-initialized-templates/bookkeeper.\n","excerpt":"BookKeeper monitoring SkyWalking leverages OpenTelemetry Collector to collect metrics data from the …","ref":"/docs/main/next/en/setup/backend/backend-bookkeeper-monitoring/","title":"BookKeeper monitoring"},{"body":"BookKeeper monitoring SkyWalking leverages OpenTelemetry Collector to collect metrics data from the BookKeeper and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. Kafka entity as a Service in OAP and on the `Layer: BOOKKEEPER.\nData flow  BookKeeper exposes metrics through Prometheus endpoint. OpenTelemetry Collector fetches metrics from BookKeeper cluster via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.`  Setup  Set up BookKeeper Cluster. Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  BookKeeper Monitoring Bookkeeper monitoring provides multidimensional metrics monitoring of BookKeeper cluster as Layer: BOOKKEEPER Service in the OAP. In each cluster, the nodes are represented as Instance.\nBookKeeper Cluster Supported Metrics    Monitoring Panel Metric Name Description Data Source     Bookie Ledgers Count meter_bookkeeper_bookie_ledgers_count The number of the bookie ledgers. Bookkeeper Cluster   Bookie Ledger Writable Dirs meter_bookkeeper_bookie_ledger_writable_dirs The number of writable directories in the bookie. Bookkeeper Cluster   Bookie Ledger Dir Usage meter_bookkeeper_bookie_ledger_dir_data_bookkeeper_ledgers_usage The number of successfully created connections. Bookkeeper Cluster   Bookie Entries Count meter_bookkeeper_bookie_entries_count The number of the bookie write entries. Bookkeeper Cluster   Bookie Write Cache Size meter_bookkeeper_bookie_write_cache_size The size of the bookie write cache (MB). Bookkeeper Cluster   Bookie Write Cache Entry Count meter_bookkeeper_bookie_write_cache_count The entry count in the bookie write cache. Bookkeeper Cluster   Bookie Read Cache Size meter_bookkeeper_bookie_read_cache_size The size of the bookie read cache (MB). Bookkeeper Cluster   Bookie Read Cache Entry Count meter_bookkeeper_bookie_read_cache_count The entry count in the bookie read cache. Bookkeeper Cluster   Bookie Read Rate meter_bookkeeper_bookie_read_rate The bookie read rate (bytes/s). Bookkeeper Cluster   Bookie Write Rate meter_bookkeeper_bookie_write_rate The bookie write rate (bytes/s). Bookkeeper Cluster    BookKeeper Node Supported Metrics    Monitoring Panel Metric Name Description Data Source     JVM Memory Pool Used meter_bookkeeper_node_jvm_memory_pool_used The usage of the broker jvm memory pool. Bookkeeper Bookie   JVM Memory meter_bookkeeper_node_jvm_memory_used meter_bookkeeper_node_jvm_memory_committed meter_bookkeeper_node_jvm_memory_init The usage of the broker jvm memory. Bookkeeper Bookie   JVM Threads meter_bookkeeper_node_jvm_threads_current meter_bookkeeper_node_jvm_threads_daemon meter_bookkeeper_node_jvm_threads_peak meter_bookkeeper_node_jvm_threads_deadlocked The count of the jvm threads. Bookkeeper Bookie   GC Time meter_bookkeeper_node_jvm_gc_collection_seconds_sum Time spent in a given JVM garbage collector in seconds. Bookkeeper Bookie   GC Count meter_bookkeeper_node_jvm_gc_collection_seconds_count The count of a given JVM garbage. Bookkeeper Bookie   Thread Executor Completed meter_bookkeeper_node_thread_executor_completed The count of the executor thread. Bookkeeper Bookie   Thread Executor Tasks meter_bookkeeper_node_thread_executor_tasks_completed meter_bookkeeper_node_thread_executor_tasks_rejected meter_bookkeeper_node_thread_executor_tasks_failed The count of the executor tasks. Bookkeeper Bookie   Pooled Threads meter_bookkeeper_node_high_priority_threads meter_bookkeeper_node_read_thread_pool_threads The count of the pooled thread. Bookkeeper Bookie   Pooled Threads Max Queue Size meter_bookkeeper_node_high_priority_thread_max_queue_size meter_bookkeeper_node_read_thread_pool_max_queue_size The count of the pooled threads max queue size. Bookkeeper Bookie    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in otel-rules/bookkeeper/bookkeeper-cluster.yaml, otel-rules/bookkeeper/bookkeeper-node.yaml. The RabbitMQ dashboard panel configurations are found in /config/ui-initialized-templates/bookkeeper.\n","excerpt":"BookKeeper monitoring SkyWalking leverages OpenTelemetry Collector to collect metrics data from the …","ref":"/docs/main/v9.7.0/en/setup/backend/backend-bookkeeper-monitoring/","title":"BookKeeper monitoring"},{"body":"Bootstrap class plugins All bootstrap plugins are optional, due to unexpected risk. Bootstrap plugins are provided in bootstrap-plugins folder. For using these plugins, you need to put the target plugin jar file into /plugins.\nNow, we have the following known bootstrap plugins.\n Plugin of JDK HttpURLConnection. Agent is compatible with JDK 1.8+ Plugin of JDK Callable and Runnable. Agent is compatible with JDK 1.8+ Plugin of JDK ThreadPoolExecutor. Agent is compatible with JDK 1.8+ Plugin of JDK ForkJoinPool. Agent is compatible with JDK 1.8+  HttpURLConnection Plugin Notice The plugin of JDK HttpURLConnection depended on sun.net.*. When using Java 9+, You should add some JVM options as follows:\n   Java version JVM option     9-15 Nothing to do. Because --illegal-access default model is permitted.   16 Add --add-exports java.base/sun.net.www=ALL-UNNAMED or --illegal-access=permit   17+ Add --add-exports java.base/sun.net.www=ALL-UNNAMED    For more information\n JEP 403: Strongly Encapsulate JDK Internals A peek into Java 17: Encapsulating the Java runtime internals  ","excerpt":"Bootstrap class plugins All bootstrap plugins are optional, due to unexpected risk. Bootstrap …","ref":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/bootstrap-plugins/","title":"Bootstrap class plugins"},{"body":"Bootstrap class plugins All bootstrap plugins are optional, due to unexpected risk. Bootstrap plugins are provided in bootstrap-plugins folder. For using these plugins, you need to put the target plugin jar file into /plugins.\nNow, we have the following known bootstrap plugins.\n Plugin of JDK HttpURLConnection. Agent is compatible with JDK 1.8+ Plugin of JDK Callable and Runnable. Agent is compatible with JDK 1.8+ Plugin of JDK ThreadPoolExecutor. Agent is compatible with JDK 1.8+ Plugin of JDK ForkJoinPool. Agent is compatible with JDK 1.8+  HttpURLConnection Plugin Notice The plugin of JDK HttpURLConnection depended on sun.net.*. When using Java 9+, You should add some JVM options as follows:\n   Java version JVM option     9-15 Nothing to do. Because --illegal-access default model is permitted.   16 Add --add-exports java.base/sun.net.www=ALL-UNNAMED or --illegal-access=permit   17+ Add --add-exports java.base/sun.net.www=ALL-UNNAMED    For more information\n JEP 403: Strongly Encapsulate JDK Internals A peek into Java 17: Encapsulating the Java runtime internals  ","excerpt":"Bootstrap class plugins All bootstrap plugins are optional, due to unexpected risk. Bootstrap …","ref":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/bootstrap-plugins/","title":"Bootstrap class plugins"},{"body":"Bootstrap class plugins All bootstrap plugins are optional, due to unexpected risk. Bootstrap plugins are provided in bootstrap-plugins folder. For using these plugins, you need to put the target plugin jar file into /plugins.\nNow, we have the following known bootstrap plugins.\n Plugin of JDK HttpURLConnection. Agent is compatible with JDK 1.8+ Plugin of JDK Callable and Runnable. Agent is compatible with JDK 1.8+ Plugin of JDK ThreadPoolExecutor. Agent is compatible with JDK 1.8+ Plugin of JDK ForkJoinPool. Agent is compatible with JDK 1.8+  HttpURLConnection Plugin Notice The plugin of JDK HttpURLConnection depended on sun.net.*. When using Java 9+, You should add some JVM options as follows:\n   Java version JVM option     9-15 Nothing to do. Because --illegal-access default model is permitted.   16 Add --add-exports java.base/sun.net.www=ALL-UNNAMED or --illegal-access=permit   17+ Add --add-exports java.base/sun.net.www=ALL-UNNAMED    For more information\n JEP 403: Strongly Encapsulate JDK Internals A peek into Java 17: Encapsulating the Java runtime internals  ","excerpt":"Bootstrap class plugins All bootstrap plugins are optional, due to unexpected risk. Bootstrap …","ref":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/bootstrap-plugins/","title":"Bootstrap class plugins"},{"body":"Bootstrap class plugins All bootstrap plugins are optional, due to unexpected risk. Bootstrap plugins are provided in bootstrap-plugins folder. For using these plugins, you need to put the target plugin jar file into /plugins.\nNow, we have the following known bootstrap plugins.\n Plugin of JDK HttpURLConnection. Agent is compatible with JDK 1.8+ Plugin of JDK Callable and Runnable. Agent is compatible with JDK 1.8+ Plugin of JDK ThreadPoolExecutor. Agent is compatible with JDK 1.8+ Plugin of JDK ForkJoinPool. Agent is compatible with JDK 1.8+  HttpURLConnection Plugin Notice The plugin of JDK HttpURLConnection depended on sun.net.*. When using Java 9+, You should add some JVM options as follows:\n   Java version JVM option     9-15 Nothing to do. Because --illegal-access default model is permitted.   16 Add --add-exports java.base/sun.net.www=ALL-UNNAMED or --illegal-access=permit   17+ Add --add-exports java.base/sun.net.www=ALL-UNNAMED    For more information\n JEP 403: Strongly Encapsulate JDK Internals A peek into Java 17: Encapsulating the Java runtime internals  ","excerpt":"Bootstrap class plugins All bootstrap plugins are optional, due to unexpected risk. Bootstrap …","ref":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/bootstrap-plugins/","title":"Bootstrap class plugins"},{"body":"Bootstrap class plugins All bootstrap plugins are optional, due to unexpected risk. Bootstrap plugins are provided in bootstrap-plugins folder. For using these plugins, you need to put the target plugin jar file into /plugins.\nNow, we have the following known bootstrap plugins.\n Plugin of JDK HttpURLConnection. Agent is compatible with JDK 1.8+ Plugin of JDK Callable and Runnable. Agent is compatible with JDK 1.8+ Plugin of JDK ThreadPoolExecutor. Agent is compatible with JDK 1.8+ Plugin of JDK ForkJoinPool. Agent is compatible with JDK 1.8+  HttpURLConnection Plugin Notice The plugin of JDK HttpURLConnection depended on sun.net.*. When using Java 9+, You should add some JVM options as follows:\n   Java version JVM option     9-15 Nothing to do. Because --illegal-access default model is permitted.   16 Add --add-exports java.base/sun.net.www=ALL-UNNAMED or --illegal-access=permit   17+ Add --add-exports java.base/sun.net.www=ALL-UNNAMED    For more information\n JEP 403: Strongly Encapsulate JDK Internals A peek into Java 17: Encapsulating the Java runtime internals  ","excerpt":"Bootstrap class plugins All bootstrap plugins are optional, due to unexpected risk. Bootstrap …","ref":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/bootstrap-plugins/","title":"Bootstrap class plugins"},{"body":"Browser Monitoring Apache SkyWalking Client JS is a client-side JavaScript exception and tracing library.\nIt has these features:\n Provides metrics and error collection to SkyWalking backend. Lightweight. A simple JavaScript library. No browser plugin is required. Browser serves as a starting point for the entire distributed tracing system.  See Client JS official doc for more information.\nNote: Make sure receiver-browser is enabled. It is ON by default since version 8.2.0.\nreceiver-browser:selector:${SW_RECEIVER_BROWSER:default} // This means activated.default:# The sample rate precision is 1/10000. 10000 means 100% sample in default.sampleRate:${SW_RECEIVER_BROWSER_SAMPLE_RATE:10000}","excerpt":"Browser Monitoring Apache SkyWalking Client JS is a client-side JavaScript exception and tracing …","ref":"/docs/main/latest/en/setup/service-agent/browser-agent/","title":"Browser Monitoring"},{"body":"Browser Monitoring Apache SkyWalking Client JS is a client-side JavaScript exception and tracing library.\nIt has these features:\n Provides metrics and error collection to SkyWalking backend. Lightweight. A simple JavaScript library. No browser plugin is required. Browser serves as a starting point for the entire distributed tracing system.  See Client JS official doc for more information.\nNote: Make sure receiver-browser is enabled. It is ON by default since version 8.2.0.\nreceiver-browser:selector:${SW_RECEIVER_BROWSER:default} // This means activated.default:# The sample rate precision is 1/10000. 10000 means 100% sample in default.sampleRate:${SW_RECEIVER_BROWSER_SAMPLE_RATE:10000}","excerpt":"Browser Monitoring Apache SkyWalking Client JS is a client-side JavaScript exception and tracing …","ref":"/docs/main/next/en/setup/service-agent/browser-agent/","title":"Browser Monitoring"},{"body":"Browser Monitoring Apache SkyWalking Client JS is a client-side JavaScript exception and tracing library.\nIt has these features:\n Provides metrics and error collection to SkyWalking backend. Lightweight. No browser plugin required. A simple JavaScript library. Browser serves as a starting point for the entire distributed tracing system.  See Client JS official doc for more information.\nNote: Make sure receiver-browser is enabled. It is ON by default since version 8.2.0.\nreceiver-browser:selector:${SW_RECEIVER_BROWSER:default} // This means activated.default:# The sample rate precision is 1/10000. 10000 means 100% sample in default.sampleRate:${SW_RECEIVER_BROWSER_SAMPLE_RATE:10000}","excerpt":"Browser Monitoring Apache SkyWalking Client JS is a client-side JavaScript exception and tracing …","ref":"/docs/main/v9.0.0/en/setup/service-agent/browser-agent/","title":"Browser Monitoring"},{"body":"Browser Monitoring Apache SkyWalking Client JS is a client-side JavaScript exception and tracing library.\nIt has these features:\n Provides metrics and error collection to SkyWalking backend. Lightweight. A simple JavaScript library. No browser plugin is required. Browser serves as a starting point for the entire distributed tracing system.  See Client JS official doc for more information.\nNote: Make sure receiver-browser is enabled. It is ON by default since version 8.2.0.\nreceiver-browser:selector:${SW_RECEIVER_BROWSER:default} // This means activated.default:# The sample rate precision is 1/10000. 10000 means 100% sample in default.sampleRate:${SW_RECEIVER_BROWSER_SAMPLE_RATE:10000}","excerpt":"Browser Monitoring Apache SkyWalking Client JS is a client-side JavaScript exception and tracing …","ref":"/docs/main/v9.1.0/en/setup/service-agent/browser-agent/","title":"Browser Monitoring"},{"body":"Browser Monitoring Apache SkyWalking Client JS is a client-side JavaScript exception and tracing library.\nIt has these features:\n Provides metrics and error collection to SkyWalking backend. Lightweight. A simple JavaScript library. No browser plugin is required. Browser serves as a starting point for the entire distributed tracing system.  See Client JS official doc for more information.\nNote: Make sure receiver-browser is enabled. It is ON by default since version 8.2.0.\nreceiver-browser:selector:${SW_RECEIVER_BROWSER:default} // This means activated.default:# The sample rate precision is 1/10000. 10000 means 100% sample in default.sampleRate:${SW_RECEIVER_BROWSER_SAMPLE_RATE:10000}","excerpt":"Browser Monitoring Apache SkyWalking Client JS is a client-side JavaScript exception and tracing …","ref":"/docs/main/v9.2.0/en/setup/service-agent/browser-agent/","title":"Browser Monitoring"},{"body":"Browser Monitoring Apache SkyWalking Client JS is a client-side JavaScript exception and tracing library.\nIt has these features:\n Provides metrics and error collection to SkyWalking backend. Lightweight. A simple JavaScript library. No browser plugin is required. Browser serves as a starting point for the entire distributed tracing system.  See Client JS official doc for more information.\nNote: Make sure receiver-browser is enabled. It is ON by default since version 8.2.0.\nreceiver-browser:selector:${SW_RECEIVER_BROWSER:default} // This means activated.default:# The sample rate precision is 1/10000. 10000 means 100% sample in default.sampleRate:${SW_RECEIVER_BROWSER_SAMPLE_RATE:10000}","excerpt":"Browser Monitoring Apache SkyWalking Client JS is a client-side JavaScript exception and tracing …","ref":"/docs/main/v9.3.0/en/setup/service-agent/browser-agent/","title":"Browser Monitoring"},{"body":"Browser Monitoring Apache SkyWalking Client JS is a client-side JavaScript exception and tracing library.\nIt has these features:\n Provides metrics and error collection to SkyWalking backend. Lightweight. A simple JavaScript library. No browser plugin is required. Browser serves as a starting point for the entire distributed tracing system.  See Client JS official doc for more information.\nNote: Make sure receiver-browser is enabled. It is ON by default since version 8.2.0.\nreceiver-browser:selector:${SW_RECEIVER_BROWSER:default} // This means activated.default:# The sample rate precision is 1/10000. 10000 means 100% sample in default.sampleRate:${SW_RECEIVER_BROWSER_SAMPLE_RATE:10000}","excerpt":"Browser Monitoring Apache SkyWalking Client JS is a client-side JavaScript exception and tracing …","ref":"/docs/main/v9.4.0/en/setup/service-agent/browser-agent/","title":"Browser Monitoring"},{"body":"Browser Monitoring Apache SkyWalking Client JS is a client-side JavaScript exception and tracing library.\nIt has these features:\n Provides metrics and error collection to SkyWalking backend. Lightweight. A simple JavaScript library. No browser plugin is required. Browser serves as a starting point for the entire distributed tracing system.  See Client JS official doc for more information.\nNote: Make sure receiver-browser is enabled. It is ON by default since version 8.2.0.\nreceiver-browser:selector:${SW_RECEIVER_BROWSER:default} // This means activated.default:# The sample rate precision is 1/10000. 10000 means 100% sample in default.sampleRate:${SW_RECEIVER_BROWSER_SAMPLE_RATE:10000}","excerpt":"Browser Monitoring Apache SkyWalking Client JS is a client-side JavaScript exception and tracing …","ref":"/docs/main/v9.5.0/en/setup/service-agent/browser-agent/","title":"Browser Monitoring"},{"body":"Browser Monitoring Apache SkyWalking Client JS is a client-side JavaScript exception and tracing library.\nIt has these features:\n Provides metrics and error collection to SkyWalking backend. Lightweight. A simple JavaScript library. No browser plugin is required. Browser serves as a starting point for the entire distributed tracing system.  See Client JS official doc for more information.\nNote: Make sure receiver-browser is enabled. It is ON by default since version 8.2.0.\nreceiver-browser:selector:${SW_RECEIVER_BROWSER:default} // This means activated.default:# The sample rate precision is 1/10000. 10000 means 100% sample in default.sampleRate:${SW_RECEIVER_BROWSER_SAMPLE_RATE:10000}","excerpt":"Browser Monitoring Apache SkyWalking Client JS is a client-side JavaScript exception and tracing …","ref":"/docs/main/v9.6.0/en/setup/service-agent/browser-agent/","title":"Browser Monitoring"},{"body":"Browser Monitoring Apache SkyWalking Client JS is a client-side JavaScript exception and tracing library.\nIt has these features:\n Provides metrics and error collection to SkyWalking backend. Lightweight. A simple JavaScript library. No browser plugin is required. Browser serves as a starting point for the entire distributed tracing system.  See Client JS official doc for more information.\nNote: Make sure receiver-browser is enabled. It is ON by default since version 8.2.0.\nreceiver-browser:selector:${SW_RECEIVER_BROWSER:default} // This means activated.default:# The sample rate precision is 1/10000. 10000 means 100% sample in default.sampleRate:${SW_RECEIVER_BROWSER_SAMPLE_RATE:10000}","excerpt":"Browser Monitoring Apache SkyWalking Client JS is a client-side JavaScript exception and tracing …","ref":"/docs/main/v9.7.0/en/setup/service-agent/browser-agent/","title":"Browser Monitoring"},{"body":"Browser Protocol Browser protocol describes the data format between skywalking-client-js and the backend.\nOverview Browser protocol is defined and provided in gRPC format, and also implemented in HTTP 1.1\nSend performance data and error logs You can send performance data and error logs using the following services:\n BrowserPerfService#collectPerfData for performance data format. BrowserPerfService#collectErrorLogs for error log format.  For error log format, note that:\n BrowserErrorLog#uniqueId should be unique in all distributed environments.  ","excerpt":"Browser Protocol Browser protocol describes the data format between skywalking-client-js and the …","ref":"/docs/main/latest/en/api/browser-protocol/","title":"Browser Protocol"},{"body":"Browser Protocol Browser protocol describes the data format between skywalking-client-js and the backend.\nOverview Browser protocol is defined and provided in gRPC format, and also implemented in HTTP 1.1\nSend performance data and error logs You can send performance data and error logs using the following services:\n BrowserPerfService#collectPerfData for performance data format. BrowserPerfService#collectErrorLogs for error log format.  For error log format, note that:\n BrowserErrorLog#uniqueId should be unique in all distributed environments.  ","excerpt":"Browser Protocol Browser protocol describes the data format between skywalking-client-js and the …","ref":"/docs/main/next/en/api/browser-protocol/","title":"Browser Protocol"},{"body":"Browser Protocol Browser protocol describes the data format between skywalking-client-js and the backend.\nOverview Browser protocol is defined and provided in gRPC format, and also implemented in HTTP 1.1\nSend performance data and error logs You can send performance data and error logs using the following services:\n BrowserPerfService#collectPerfData for performance data format. BrowserPerfService#collectErrorLogs for error log format.  For error log format, note that:\n BrowserErrorLog#uniqueId should be unique in all distributed environments.  ","excerpt":"Browser Protocol Browser protocol describes the data format between skywalking-client-js and the …","ref":"/docs/main/v9.0.0/en/protocols/browser-protocol/","title":"Browser Protocol"},{"body":"Browser Protocol Browser protocol describes the data format between skywalking-client-js and the backend.\nOverview Browser protocol is defined and provided in gRPC format, and also implemented in HTTP 1.1\nSend performance data and error logs You can send performance data and error logs using the following services:\n BrowserPerfService#collectPerfData for performance data format. BrowserPerfService#collectErrorLogs for error log format.  For error log format, note that:\n BrowserErrorLog#uniqueId should be unique in all distributed environments.  ","excerpt":"Browser Protocol Browser protocol describes the data format between skywalking-client-js and the …","ref":"/docs/main/v9.1.0/en/protocols/browser-protocol/","title":"Browser Protocol"},{"body":"Browser Protocol Browser protocol describes the data format between skywalking-client-js and the backend.\nOverview Browser protocol is defined and provided in gRPC format, and also implemented in HTTP 1.1\nSend performance data and error logs You can send performance data and error logs using the following services:\n BrowserPerfService#collectPerfData for performance data format. BrowserPerfService#collectErrorLogs for error log format.  For error log format, note that:\n BrowserErrorLog#uniqueId should be unique in all distributed environments.  ","excerpt":"Browser Protocol Browser protocol describes the data format between skywalking-client-js and the …","ref":"/docs/main/v9.2.0/en/protocols/browser-protocol/","title":"Browser Protocol"},{"body":"Browser Protocol Browser protocol describes the data format between skywalking-client-js and the backend.\nOverview Browser protocol is defined and provided in gRPC format, and also implemented in HTTP 1.1\nSend performance data and error logs You can send performance data and error logs using the following services:\n BrowserPerfService#collectPerfData for performance data format. BrowserPerfService#collectErrorLogs for error log format.  For error log format, note that:\n BrowserErrorLog#uniqueId should be unique in all distributed environments.  ","excerpt":"Browser Protocol Browser protocol describes the data format between skywalking-client-js and the …","ref":"/docs/main/v9.3.0/en/protocols/browser-protocol/","title":"Browser Protocol"},{"body":"Browser Protocol Browser protocol describes the data format between skywalking-client-js and the backend.\nOverview Browser protocol is defined and provided in gRPC format, and also implemented in HTTP 1.1\nSend performance data and error logs You can send performance data and error logs using the following services:\n BrowserPerfService#collectPerfData for performance data format. BrowserPerfService#collectErrorLogs for error log format.  For error log format, note that:\n BrowserErrorLog#uniqueId should be unique in all distributed environments.  ","excerpt":"Browser Protocol Browser protocol describes the data format between skywalking-client-js and the …","ref":"/docs/main/v9.4.0/en/api/browser-protocol/","title":"Browser Protocol"},{"body":"Browser Protocol Browser protocol describes the data format between skywalking-client-js and the backend.\nOverview Browser protocol is defined and provided in gRPC format, and also implemented in HTTP 1.1\nSend performance data and error logs You can send performance data and error logs using the following services:\n BrowserPerfService#collectPerfData for performance data format. BrowserPerfService#collectErrorLogs for error log format.  For error log format, note that:\n BrowserErrorLog#uniqueId should be unique in all distributed environments.  ","excerpt":"Browser Protocol Browser protocol describes the data format between skywalking-client-js and the …","ref":"/docs/main/v9.5.0/en/api/browser-protocol/","title":"Browser Protocol"},{"body":"Browser Protocol Browser protocol describes the data format between skywalking-client-js and the backend.\nOverview Browser protocol is defined and provided in gRPC format, and also implemented in HTTP 1.1\nSend performance data and error logs You can send performance data and error logs using the following services:\n BrowserPerfService#collectPerfData for performance data format. BrowserPerfService#collectErrorLogs for error log format.  For error log format, note that:\n BrowserErrorLog#uniqueId should be unique in all distributed environments.  ","excerpt":"Browser Protocol Browser protocol describes the data format between skywalking-client-js and the …","ref":"/docs/main/v9.6.0/en/api/browser-protocol/","title":"Browser Protocol"},{"body":"Browser Protocol Browser protocol describes the data format between skywalking-client-js and the backend.\nOverview Browser protocol is defined and provided in gRPC format, and also implemented in HTTP 1.1\nSend performance data and error logs You can send performance data and error logs using the following services:\n BrowserPerfService#collectPerfData for performance data format. BrowserPerfService#collectErrorLogs for error log format.  For error log format, note that:\n BrowserErrorLog#uniqueId should be unique in all distributed environments.  ","excerpt":"Browser Protocol Browser protocol describes the data format between skywalking-client-js and the …","ref":"/docs/main/v9.7.0/en/api/browser-protocol/","title":"Browser Protocol"},{"body":"Build and use the Agent from source codes When you want to build and use the Agent from source code, please follow these steps.\nInstall SkyWalking Go Use go get to import the skywalking-go program.\n// latest or any commit ID go get github.com/apache/skywalking-go@latest Also, import the module to your main package:\nimport _ \u0026#34;github.com/apache/skywalking-go\u0026#34; Build the Agent When building the project, you need to clone the project and build it.\n// git clone the same version(tag or commit ID) as your dependency version. git clone https://github.com/apache/skywalking-go.git cd skywalking-go \u0026amp;\u0026amp; make build Next, you would find several versions of the Go Agent program for different systems in the bin directory of the current project. When you need to compile the program, please add the following statement with the agent program which matches your system:\n-toolexec=\u0026#34;/path/to/go-agent\u0026#34; -a  -toolexec is the path to the Golang enhancement program. -a is the parameter for rebuilding all packages forcibly.  If you want to customize the configuration information for the current service, please add the following parameters, read more please refer the settings override documentation):\n-toolexec=\u0026#34;/path/to/go-agent -config /path/to/config.yaml\u0026#34; -a ","excerpt":"Build and use the Agent from source codes When you want to build and use the Agent from source code, …","ref":"/docs/skywalking-go/latest/en/development-and-contribution/build-and-use-agent/","title":"Build and use the Agent from source codes"},{"body":"Build and use the Agent from source codes When you want to build and use the Agent from source code, please follow these steps.\nInstall SkyWalking Go Use go get to import the skywalking-go program.\n// latest or any commit ID go get github.com/apache/skywalking-go@latest Also, import the module to your main package:\nimport _ \u0026#34;github.com/apache/skywalking-go\u0026#34; Build the Agent When building the project, you need to clone the project and build it.\n// git clone the same version(tag or commit ID) as your dependency version. git clone https://github.com/apache/skywalking-go.git cd skywalking-go \u0026amp;\u0026amp; make build Next, you would find several versions of the Go Agent program for different systems in the bin directory of the current project. When you need to compile the program, please add the following statement with the agent program which matches your system:\n-toolexec=\u0026#34;/path/to/go-agent\u0026#34; -a  -toolexec is the path to the Golang enhancement program. -a is the parameter for rebuilding all packages forcibly.  If you want to customize the configuration information for the current service, please add the following parameters, read more please refer the settings override documentation):\n-toolexec=\u0026#34;/path/to/go-agent -config /path/to/config.yaml\u0026#34; -a ","excerpt":"Build and use the Agent from source codes When you want to build and use the Agent from source code, …","ref":"/docs/skywalking-go/next/en/development-and-contribution/build-and-use-agent/","title":"Build and use the Agent from source codes"},{"body":"Build and use the Agent from source codes When you want to build and use the Agent from source code, please follow these steps.\nInstall SkyWalking Go Use go get to import the skywalking-go program.\n// latest or any commit ID go get github.com/apache/skywalking-go@latest Also, import the module to your main package:\nimport _ \u0026#34;github.com/apache/skywalking-go\u0026#34; Build the Agent When building the project, you need to clone the project and build it.\n// git clone the same version(tag or commit ID) as your dependency version. git clone https://github.com/apache/skywalking-go.git cd skywalking-go \u0026amp;\u0026amp; make build Next, you would find several versions of the Go Agent program for different systems in the bin directory of the current project. When you need to compile the program, please add the following statement with the agent program which matches your system:\n-toolexec=\u0026#34;/path/to/go-agent\u0026#34; -a  -toolexec is the path to the Golang enhancement program. -a is the parameter for rebuilding all packages forcibly.  If you want to customize the configuration information for the current service, please add the following parameters, read more please refer the settings override documentation):\n-toolexec=\u0026#34;/path/to/go-agent -config /path/to/config.yaml\u0026#34; -a ","excerpt":"Build and use the Agent from source codes When you want to build and use the Agent from source code, …","ref":"/docs/skywalking-go/v0.4.0/en/development-and-contribution/build-and-use-agent/","title":"Build and use the Agent from source codes"},{"body":"Building This document will help you compile and build the project in golang environment.\nPlatform Linux, macOS, and Windows are supported in SkyWalking Infra E2E.\nCommand git clone https://github.com/apache/skywalking-infra-e2e.git cd skywalking-infra-e2e make build After these commands, the e2e execute file path is bin/$PLATFORM/e2e.\n","excerpt":"Building This document will help you compile and build the project in golang environment.\nPlatform …","ref":"/docs/skywalking-infra-e2e/latest/en/contribution/compiling-guidance/","title":"Building"},{"body":"Building This document will help you compile and build the project in golang environment.\nPlatform Linux, macOS, and Windows are supported in SkyWalking Infra E2E.\nCommand git clone https://github.com/apache/skywalking-infra-e2e.git cd skywalking-infra-e2e make build After these commands, the e2e execute file path is bin/$PLATFORM/e2e.\n","excerpt":"Building This document will help you compile and build the project in golang environment.\nPlatform …","ref":"/docs/skywalking-infra-e2e/next/en/contribution/compiling-guidance/","title":"Building"},{"body":"Building This document will help you compile and build the project in golang environment.\nPlatform Linux, macOS, and Windows are supported in SkyWalking Infra E2E.\nCommand git clone https://github.com/apache/skywalking-infra-e2e.git cd skywalking-infra-e2e make build After these commands, the e2e execute file path is bin/$PLATFORM/e2e.\n","excerpt":"Building This document will help you compile and build the project in golang environment.\nPlatform …","ref":"/docs/skywalking-infra-e2e/v1.3.0/en/contribution/compiling-guidance/","title":"Building"},{"body":"CDS - Configuration Discovery Service CDS - Configuration Discovery Service provides the dynamic configuration for the agent, defined in gRPC.\nConfiguration Format The configuration content includes the service name and their configs. The\nconfigurations://service nameserviceA:// Configurations of service A// Key and Value are determined by the agent side.// Check the agent setup doc for all available configurations.key1:value1key2:value2...serviceB:...Available key(s) and value(s) in Java Agent. Java agent supports the following dynamic configurations.\n   Config Key Value Description Value Format Example Required Plugin(s)     agent.sample_n_per_3_secs The number of sampled traces per 3 seconds -1 -   agent.ignore_suffix If the operation name of the first span is included in this set, this segment should be ignored. Multiple values should be separated by , .txt,.log -   agent.trace.ignore_path The value is the path that you need to ignore, multiple paths should be separated by , more details /your/path/1/**,/your/path/2/** apm-trace-ignore-plugin   agent.span_limit_per_segment The max number of spans per segment. 300 -   plugin.jdbc.trace_sql_parameters If set to true, the parameters of the sql (typically java.sql.PreparedStatement) would be collected. false -     Required plugin(s), the configuration affects only when the required plugins activated.  ","excerpt":"CDS - Configuration Discovery Service CDS - Configuration Discovery Service provides the dynamic …","ref":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/configuration-discovery/","title":"CDS - Configuration Discovery Service"},{"body":"CDS - Configuration Discovery Service CDS - Configuration Discovery Service provides the dynamic configuration for the agent, defined in gRPC.\nConfiguration Format The configuration content includes the service name and their configs. The\nconfigurations://service nameserviceA:// Configurations of service A// Key and Value are determined by the agent side.// Check the agent setup doc for all available configurations.key1:value1key2:value2...serviceB:...Available key(s) and value(s) in Java Agent. Java agent supports the following dynamic configurations.\n   Config Key Value Description Value Format Example Required Plugin(s)     agent.sample_n_per_3_secs The number of sampled traces per 3 seconds -1 -   agent.ignore_suffix If the operation name of the first span is included in this set, this segment should be ignored. Multiple values should be separated by , .txt,.log -   agent.trace.ignore_path The value is the path that you need to ignore, multiple paths should be separated by , more details /your/path/1/**,/your/path/2/** apm-trace-ignore-plugin   agent.span_limit_per_segment The max number of spans per segment. 300 -   plugin.jdbc.trace_sql_parameters If set to true, the parameters of the sql (typically java.sql.PreparedStatement) would be collected. false -     Required plugin(s), the configuration affects only when the required plugins activated.  ","excerpt":"CDS - Configuration Discovery Service CDS - Configuration Discovery Service provides the dynamic …","ref":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/configuration-discovery/","title":"CDS - Configuration Discovery Service"},{"body":"CDS - Configuration Discovery Service CDS - Configuration Discovery Service provides the dynamic configuration for the agent, defined in gRPC.\nConfiguration Format The configuration content includes the service name and their configs. The\nconfigurations://service nameserviceA:// Configurations of service A// Key and Value are determined by the agent side.// Check the agent setup doc for all available configurations.key1:value1key2:value2...serviceB:...Available key(s) and value(s) in Java Agent. Java agent supports the following dynamic configurations.\n   Config Key Value Description Value Format Example Required Plugin(s)     agent.sample_n_per_3_secs The number of sampled traces per 3 seconds -1 -   agent.ignore_suffix If the operation name of the first span is included in this set, this segment should be ignored. Multiple values should be separated by , .txt,.log -   agent.trace.ignore_path The value is the path that you need to ignore, multiple paths should be separated by , more details /your/path/1/**,/your/path/2/** apm-trace-ignore-plugin   agent.span_limit_per_segment The max number of spans per segment. 300 -   plugin.jdbc.trace_sql_parameters If set to true, the parameters of the sql (typically java.sql.PreparedStatement) would be collected. false -     Required plugin(s), the configuration affects only when the required plugins activated.  ","excerpt":"CDS - Configuration Discovery Service CDS - Configuration Discovery Service provides the dynamic …","ref":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/configuration-discovery/","title":"CDS - Configuration Discovery Service"},{"body":"CDS - Configuration Discovery Service CDS - Configuration Discovery Service provides the dynamic configuration for the agent, defined in gRPC.\nConfiguration Format The configuration content includes the service name and their configs. The\nconfigurations://service nameserviceA:// Configurations of service A// Key and Value are determined by the agent side.// Check the agent setup doc for all available configurations.key1:value1key2:value2...serviceB:...Available key(s) and value(s) in Java Agent. Java agent supports the following dynamic configurations.\n   Config Key Value Description Value Format Example Required Plugin(s)     agent.sample_n_per_3_secs The number of sampled traces per 3 seconds -1 -   agent.ignore_suffix If the operation name of the first span is included in this set, this segment should be ignored. Multiple values should be separated by , .txt,.log -   agent.trace.ignore_path The value is the path that you need to ignore, multiple paths should be separated by , more details /your/path/1/**,/your/path/2/** apm-trace-ignore-plugin   agent.span_limit_per_segment The max number of spans per segment. 300 -   plugin.jdbc.trace_sql_parameters If set to true, the parameters of the sql (typically java.sql.PreparedStatement) would be collected. false -     Required plugin(s), the configuration affects only when the required plugins activated.  ","excerpt":"CDS - Configuration Discovery Service CDS - Configuration Discovery Service provides the dynamic …","ref":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/configuration-discovery/","title":"CDS - Configuration Discovery Service"},{"body":"CDS - Configuration Discovery Service CDS - Configuration Discovery Service provides the dynamic configuration for the agent, defined in gRPC.\nConfiguration Format The configuration content includes the service name and their configs. The\nconfigurations://service nameserviceA:// Configurations of service A// Key and Value are determined by the agent side.// Check the agent setup doc for all available configurations.key1:value1key2:value2...serviceB:...Available key(s) and value(s) in Java Agent. Java agent supports the following dynamic configurations.\n   Config Key Value Description Value Format Example Required Plugin(s)     agent.sample_n_per_3_secs The number of sampled traces per 3 seconds -1 -   agent.ignore_suffix If the operation name of the first span is included in this set, this segment should be ignored. Multiple values should be separated by , .txt,.log -   agent.trace.ignore_path The value is the path that you need to ignore, multiple paths should be separated by , more details /your/path/1/**,/your/path/2/** apm-trace-ignore-plugin   agent.span_limit_per_segment The max number of spans per segment. 300 -   plugin.jdbc.trace_sql_parameters If set to true, the parameters of the sql (typically java.sql.PreparedStatement) would be collected. false -     Required plugin(s), the configuration affects only when the required plugins activated.  ","excerpt":"CDS - Configuration Discovery Service CDS - Configuration Discovery Service provides the dynamic …","ref":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/configuration-discovery/","title":"CDS - Configuration Discovery Service"},{"body":"ClickHouse monitoring ClickHouse server performance from built-in metrics data SkyWalking leverages ClickHouse built-in metrics data since v20.1.2.4. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  Configure ClickHouse to expose metrics data for scraping from Prometheus. OpenTelemetry Collector fetches metrics from ClickeHouse server through Prometheus endpoint, and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up built-in prometheus endpoint . Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  ClickHouse Monitoring ClickHouse monitoring provides monitoring of the metrics 、events and asynchronous_metrics of the ClickHouse server. ClickHouse cluster is cataloged as a Layer: CLICKHOUSE Service in OAP. Each ClickHouse server is cataloged as an Instance in OAP.\nClickHouse Instance Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     CpuUsage count meter_clickhouse_instance_cpu_usage CPU time spent seen by OS per second(according to ClickHouse.system.dashboard.CPU Usage (cores)). ClickHouse   MemoryUsage percentage meter_clickhouse_instance_memory_usage Total amount of memory (bytes) allocated by the server/ total amount of OS memory. ClickHouse   MemoryAvailable percentage meter_clickhouse_instance_memory_available Total amount of memory (bytes) available for program / total amount of OS memory. ClickHouse   Uptime sec meter_clickhouse_instance_uptime The server uptime in seconds. It includes the time spent for server initialization before accepting connections. ClickHouse   Version string meter_clickhouse_instance_version Version of the server in a single integer number in base-1000. ClickHouse   FileOpen count meter_clickhouse_instance_file_open Number of files opened. ClickHouse    ClickHouse Network Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     TcpConnections count meter_clickhouse_instance_tcp_connectionsmeter_clickhouse_tcp_connections Number of connections to TCP server. ClickHouse   MysqlConnections count meter_clickhouse_instance_mysql_connectionsmeter_clickhouse_mysql_connections Number of client connections using MySQL protocol. ClickHouse   HttpConnections count meter_clickhouse_instance_http_connectionsmeter_clickhouse_mysql_connections Number of connections to HTTP server. ClickHouse   InterserverConnections count meter_clickhouse_instance_interserver_connectionsmeter_clickhouse_interserver_connections Number of connections from other replicas to fetch parts. ClickHouse   PostgresqlConnections count meter_clickhouse_instance_postgresql_connectionsmeter_clickhouse_postgresql_connections Number of client connections using PostgreSQL protocol. ClickHouse   ReceiveBytes bytes meter_clickhouse_instance_network_receive_bytesmeter_clickhouse_network_receive_bytes Total number of bytes received from network. ClickHouse   SendBytes bytes meter_clickhouse_instance_network_send_bytesmeter_clickhouse_network_send_bytes Total number of bytes send to network. ClickHouse    ClickHouse Query Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     QueryCount count meter_clickhouse_instance_querymeter_clickhouse_query Number of executing queries. ClickHouse   SelectQueryCount count meter_clickhouse_instance_query_selectmeter_clickhouse_query_select Number of executing queries, but only for SELECT queries. ClickHouse   InsertQueryCount count meter_clickhouse_instance_query_insertmeter_clickhouse_query_insert Number of executing queries, but only for INSERT queries. ClickHouse   SelectQueryRate count/sec meter_clickhouse_instance_query_select_ratemeter_clickhouse_query_select_rate Number of SELECT queries per second. ClickHouse   InsertQueryRate count/sec meter_clickhouse_instance_query_insert_ratemeter_clickhouse_query_insert_rate Number of INSERT queries per second. ClickHouse   Querytime microsec meter_clickhouse_instance_querytime_microsecondsmeter_clickhouse_querytime_microseconds Total time of all queries. ClickHouse   SelectQuerytime microsec meter_clickhouse_instance_querytime_select_microsecondsmeter_clickhouse_querytime_select_microseconds Total time of SELECT queries. ClickHouse   InsertQuerytime microsec meter_clickhouse_instance_querytime_insert_microsecondsmeter_clickhouse_querytime_insert_microseconds Total time of INSERT queries. ClickHouse   OtherQuerytime microsec meter_clickhouse_instance_querytime_other_microsecondsmeter_clickhouse_querytime_other_microseconds Total time of queries that are not SELECT or INSERT. ClickHouse   QuerySlowCount count meter_clickhouse_instance_query_slowmeter_clickhouse_query_slow Number of reads from a file that were slow. ClickHouse    ClickHouse Insertion Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     InsertQueryCount count meter_clickhouse_instance_query_insertmeter_clickhouse_query_insert Number of executing queries, but only for INSERT queries. ClickHouse   InsertedRowCount count meter_clickhouse_instance_inserted_rowsmeter_clickhouse_inserted_rows Number of rows INSERTed to all tables. ClickHouse   InsertedBytes bytes meter_clickhouse_instance_inserted_bytesmeter_clickhouse_inserted_bytes Number of bytes INSERTed to all tables. ClickHouse   DelayedInsertCount count meter_clickhouse_instance_delayed_insertmeter_clickhouse_delayed_insert Number of times the INSERT of a block to a MergeTree table was throttled due to high number of active data parts for partition. ClickHouse    ClickHouse Replicas Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     ReplicatedChecks count meter_clickhouse_instance_replicated_checksmeter_clickhouse_replicated_checks Number of data parts checking for consistency. ClickHouse   ReplicatedFetch count meter_clickhouse_instance_replicated_fetchmeter_clickhouse_replicated_fetch Number of data parts being fetched from replica. ClickHouse   ReplicatedSend count meter_clickhouse_instance_replicated_sendmeter_clickhouse_replicated_send Number of data parts being sent to replicas. ClickHouse    ClickHouse MergeTree Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     BackgroundMergeCount count meter_clickhouse_instance_background_mergemeter_clickhouse_background_merge Number of executing background merges. ClickHouse   MergeRows count meter_clickhouse_instance_merge_rowsmeter_clickhouse_merge_rows Rows read for background merges. This is the number of rows before merge. ClickHouse   MergeUncompressedBytes bytes meter_clickhouse_instance_merge_uncompressed_bytesmeter_clickhouse_merge_uncompressed_bytes Uncompressed bytes (for columns as they stored in memory) that was read for background merges. This is the number before merge. ClickHouse   MoveCount count meter_clickhouse_instance_movemeter_clickhouse_move Number of currently executing moves. ClickHouse   PartsActive Count meter_clickhouse_instance_parts_activemeter_clickhouse_parts_active Active data part, used by current and upcoming SELECTs. ClickHouse   MutationsCount count meter_clickhouse_instance_mutationsmeter_clickhouse_mutations Number of mutations (ALTER DELETE/UPDATE). ClickHouse    ClickHouse Kafka Table Engine Supported Metrics When table engine works with Apache Kafka.\nKafka lets you:\n Publish or subscribe to data flows. Organize fault-tolerant storage. Process streams as they become available.     Monitoring Panel Unit Metric Name Description Data Source     KafkaMessagesRead count meter_clickhouse_instance_kafka_messages_readmeter_clickhouse_kafka_messages_read Number of Kafka messages already processed by ClickHouse. ClickHouse   KafkaWrites count meter_clickhouse_instance_kafka_writesmeter_clickhouse_kafka_writes Number of writes (inserts) to Kafka tables. ClickHouse   KafkaConsumers count meter_clickhouse_instance_kafka_consumersmeter_clickhouse_kafka_consumers Number of active Kafka consumers. ClickHouse   KafkProducers count meter_clickhouse_instance_kafka_producersmeter_clickhouse_kafka_producers Number of active Kafka producer created. ClickHouse    ClickHouse ZooKeeper Supported Metrics ClickHouse uses ZooKeeper for storing metadata of replicas when using replicated tables. If replicated tables are not used, this section of parameters can be omitted.\n   Monitoring Panel Unit Metric Name Description Data Source     ZookeeperSession count meter_clickhouse_instance_zookeeper_sessionmeter_clickhouse_zookeeper_session Number of sessions (connections) to ZooKeeper. ClickHouse   ZookeeperWatch count meter_clickhouse_instance_zookeeper_watchmeter_clickhouse_zookeeper_watch Number of watches (event subscriptions) in ZooKeeper. ClickHouse   ZookeeperBytesSent bytes meter_clickhouse_instance_zookeeper_bytes_sentmeter_clickhouse_zookeeper_bytes_sent Number of bytes send over network while communicating with ZooKeeper. ClickHouse   ZookeeperBytesReceive bytes meter_clickhouse_instance_zookeeper_bytes_receivedmeter_clickhouse_zookeeper_bytes_received Number of bytes send over network while communicating with ZooKeeper. ClickHouse    ClickHouse Keeper Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     KeeperAliveConnections count meter_clickhouse_instance_keeper_connections_alivemeter_clickhouse_keeper_connections_alive Number of alive connections for embedded ClickHouse Keeper. ClickHouse   KeeperOutstandingRequets count meter_clickhouse_instance_keeper_outstanding_requestsmeter_clickhouse_keeper_outstanding_requests Number of outstanding requests for embedded ClickHouse Keeper. ClickHouse    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/clickhouse. The ClickHouse dashboard panel configurations are found in /config/ui-initialized-templates/clickhouse.\n","excerpt":"ClickHouse monitoring ClickHouse server performance from built-in metrics data SkyWalking leverages …","ref":"/docs/main/next/en/setup/backend/backend-clickhouse-monitoring/","title":"ClickHouse monitoring"},{"body":"Client/grpc-client Description The gRPC client is a sharing plugin to keep connection with the gRPC server and delivery the data to it.\nDefaultConfig # The gRPC client finder typefinder_type:\u0026#34;static\u0026#34;# The gRPC server address (default localhost:11800), multiple addresses are split by \u0026#34;,\u0026#34;.server_addr:localhost:11800# The gRPC kubernetes server address finderkubernetes_config:# The kind of resourcekind:pod# The resource namespacesnamespaces:- default# How to get the address exported portextra_port:# Resource target portport:11800# The TLS switch (default false).enable_TLS:false# The file path of client.pem. The config only works when opening the TLS switch.client_pem_path:\u0026#34;\u0026#34;# The file path of client.key. The config only works when opening the TLS switch.client_key_path:\u0026#34;\u0026#34;# The file path oca.pem. The config only works when opening the TLS switch.ca_pem_path:\u0026#34;\u0026#34;# InsecureSkipVerify controls whether a client verifies the server\u0026#39;s certificate chain and host name.insecure_skip_verify:true# The auth value when send requestauthentication:\u0026#34;\u0026#34;# How frequently to check the connection(second)check_period:5# The gRPC send request timeouttimeout:# The timeout for unary single requestunary:5s# The timeout for unary stream requeststream:20sConfiguration    Name Type Description     finder_type string The gRPC server address finder type, support \u0026ldquo;static\u0026rdquo; and \u0026ldquo;kubernetes\u0026rdquo;   server_addr string The gRPC server address, only works for \u0026ldquo;static\u0026rdquo; address finder   kubernetes_config *resolvers.KubernetesConfig The kubernetes config to lookup addresses, only works for \u0026ldquo;kubernetes\u0026rdquo; address finder   kubernetes_config.api_server string The kubernetes API server address, If not define means using in kubernetes mode to connect   kubernetes_config.basic_auth *resolvers.BasicAuth The HTTP basic authentication credentials for the targets.   kubernetes_config.basic_auth.username string    kubernetes_config.basic_auth.password resolvers.Secret    kubernetes_config.basic_auth.password_file string    kubernetes_config.bearer_token resolvers.Secret The bearer token for the targets.   kubernetes_config.bearer_token_file string The bearer token file for the targets.   kubernetes_config.proxy_url string HTTP proxy server to use to connect to the targets.   kubernetes_config.tls_config resolvers.TLSConfig TLSConfig to use to connect to the targets.   kubernetes_config.namespaces []string Support to lookup namespaces   kubernetes_config.kind string The kind of api   kubernetes_config.selector resolvers.Selector The kind selector   kubernetes_config.extra_port resolvers.ExtraPort How to get the address exported port   enable_TLS bool Enable TLS connect to server   client_pem_path string The file path of client.pem. The config only works when opening the TLS switch.   client_key_path string The file path of client.key. The config only works when opening the TLS switch.   ca_pem_path string The file path oca.pem. The config only works when opening the TLS switch.   insecure_skip_verify bool Controls whether a client verifies the server\u0026rsquo;s certificate chain and host name.   authentication string The auth value when send request   check_period int How frequently to check the connection(second)   timeout grpc.TimeoutConfig The gRPC send request timeout    ","excerpt":"Client/grpc-client Description The gRPC client is a sharing plugin to keep connection with the gRPC …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/client_grpc-client/","title":"Client/grpc-client"},{"body":"Client/grpc-client Description The gRPC client is a sharing plugin to keep connection with the gRPC server and delivery the data to it.\nDefaultConfig # The gRPC client finder typefinder_type:\u0026#34;static\u0026#34;# The gRPC server address (default localhost:11800), multiple addresses are split by \u0026#34;,\u0026#34;.server_addr:localhost:11800# The gRPC kubernetes server address finderkubernetes_config:# The kind of resourcekind:pod# The resource namespacesnamespaces:- default# How to get the address exported portextra_port:# Resource target portport:11800# The TLS switch (default false).enable_TLS:false# The file path of client.pem. The config only works when opening the TLS switch.client_pem_path:\u0026#34;\u0026#34;# The file path of client.key. The config only works when opening the TLS switch.client_key_path:\u0026#34;\u0026#34;# The file path oca.pem. The config only works when opening the TLS switch.ca_pem_path:\u0026#34;\u0026#34;# InsecureSkipVerify controls whether a client verifies the server\u0026#39;s certificate chain and host name.insecure_skip_verify:true# The auth value when send requestauthentication:\u0026#34;\u0026#34;# How frequently to check the connection(second)check_period:5# The gRPC send request timeouttimeout:# The timeout for unary single requestunary:5s# The timeout for unary stream requeststream:20sConfiguration    Name Type Description     finder_type string The gRPC server address finder type, support \u0026ldquo;static\u0026rdquo; and \u0026ldquo;kubernetes\u0026rdquo;   server_addr string The gRPC server address, only works for \u0026ldquo;static\u0026rdquo; address finder   kubernetes_config *resolvers.KubernetesConfig The kubernetes config to lookup addresses, only works for \u0026ldquo;kubernetes\u0026rdquo; address finder   kubernetes_config.api_server string The kubernetes API server address, If not define means using in kubernetes mode to connect   kubernetes_config.basic_auth *resolvers.BasicAuth The HTTP basic authentication credentials for the targets.   kubernetes_config.basic_auth.username string    kubernetes_config.basic_auth.password resolvers.Secret    kubernetes_config.basic_auth.password_file string    kubernetes_config.bearer_token resolvers.Secret The bearer token for the targets.   kubernetes_config.bearer_token_file string The bearer token file for the targets.   kubernetes_config.proxy_url string HTTP proxy server to use to connect to the targets.   kubernetes_config.tls_config resolvers.TLSConfig TLSConfig to use to connect to the targets.   kubernetes_config.namespaces []string Support to lookup namespaces   kubernetes_config.kind string The kind of api   kubernetes_config.selector resolvers.Selector The kind selector   kubernetes_config.extra_port resolvers.ExtraPort How to get the address exported port   enable_TLS bool Enable TLS connect to server   client_pem_path string The file path of client.pem. The config only works when opening the TLS switch.   client_key_path string The file path of client.key. The config only works when opening the TLS switch.   ca_pem_path string The file path oca.pem. The config only works when opening the TLS switch.   insecure_skip_verify bool Controls whether a client verifies the server\u0026rsquo;s certificate chain and host name.   authentication string The auth value when send request   check_period int How frequently to check the connection(second)   timeout grpc.TimeoutConfig The gRPC send request timeout    ","excerpt":"Client/grpc-client Description The gRPC client is a sharing plugin to keep connection with the gRPC …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/client_grpc-client/","title":"Client/grpc-client"},{"body":"Client/grpc-client Description The gRPC client is a sharing plugin to keep connection with the gRPC server and delivery the data to it.\nDefaultConfig # The gRPC client finder typefinder_type:\u0026#34;static\u0026#34;# The gRPC server address (default localhost:11800), multiple addresses are split by \u0026#34;,\u0026#34;.server_addr:localhost:11800# The gRPC kubernetes server address finderkubernetes_config:# The kind of resourcekind:pod# The resource namespacesnamespaces:- default# How to get the address exported portextra_port:# Resource target portport:11800# The TLS switch (default false).enable_TLS:false# The file path of client.pem. The config only works when opening the TLS switch.client_pem_path:\u0026#34;\u0026#34;# The file path of client.key. The config only works when opening the TLS switch.client_key_path:\u0026#34;\u0026#34;# The file path oca.pem. The config only works when opening the TLS switch.ca_pem_path:\u0026#34;\u0026#34;# InsecureSkipVerify controls whether a client verifies the server\u0026#39;s certificate chain and host name.insecure_skip_verify:true# The auth value when send requestauthentication:\u0026#34;\u0026#34;# How frequently to check the connection(second)check_period:5# The gRPC send request timeouttimeout:# The timeout for unary single requestunary:5s# The timeout for unary stream requeststream:20sConfiguration    Name Type Description     finder_type string The gRPC server address finder type, support \u0026ldquo;static\u0026rdquo; and \u0026ldquo;kubernetes\u0026rdquo;   server_addr string The gRPC server address, only works for \u0026ldquo;static\u0026rdquo; address finder   kubernetes_config *resolvers.KubernetesConfig The kubernetes config to lookup addresses, only works for \u0026ldquo;kubernetes\u0026rdquo; address finder   kubernetes_config.api_server string The kubernetes API server address, If not define means using in kubernetes mode to connect   kubernetes_config.basic_auth *resolvers.BasicAuth The HTTP basic authentication credentials for the targets.   kubernetes_config.basic_auth.username string    kubernetes_config.basic_auth.password resolvers.Secret    kubernetes_config.basic_auth.password_file string    kubernetes_config.bearer_token resolvers.Secret The bearer token for the targets.   kubernetes_config.bearer_token_file string The bearer token file for the targets.   kubernetes_config.proxy_url string HTTP proxy server to use to connect to the targets.   kubernetes_config.tls_config resolvers.TLSConfig TLSConfig to use to connect to the targets.   kubernetes_config.namespaces []string Support to lookup namespaces   kubernetes_config.kind string The kind of api   kubernetes_config.selector resolvers.Selector The kind selector   kubernetes_config.extra_port resolvers.ExtraPort How to get the address exported port   enable_TLS bool Enable TLS connect to server   client_pem_path string The file path of client.pem. The config only works when opening the TLS switch.   client_key_path string The file path of client.key. The config only works when opening the TLS switch.   ca_pem_path string The file path oca.pem. The config only works when opening the TLS switch.   insecure_skip_verify bool Controls whether a client verifies the server\u0026rsquo;s certificate chain and host name.   authentication string The auth value when send request   check_period int How frequently to check the connection(second)   timeout grpc.TimeoutConfig The gRPC send request timeout    ","excerpt":"Client/grpc-client Description The gRPC client is a sharing plugin to keep connection with the gRPC …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/client_grpc-client/","title":"Client/grpc-client"},{"body":"Client/kafka-client Description The Kafka client is a sharing plugin to keep connection with the Kafka brokers and delivery the data to it.\nDefaultConfig # The Kafka broker addresses (default localhost:9092). Multiple values are separated by commas.brokers:localhost:9092# The Kafka version should follow this pattern, which is major_minor_veryMinor_patch (default 1.0.0.0).version:1.0.0.0# The TLS switch (default false).enable_TLS:false# The file path of client.pem. The config only works when opening the TLS switch.client_pem_path:\u0026#34;\u0026#34;# The file path of client.key. The config only works when opening the TLS switch.client_key_path:\u0026#34;\u0026#34;# The file path oca.pem. The config only works when opening the TLS switch.ca_pem_path:\u0026#34;\u0026#34;# 0 means NoResponse, 1 means WaitForLocal and -1 means WaitForAll (default 1).required_acks:1# The producer max retry times (default 3).producer_max_retry:3# The meta max retry times (default 3).meta_max_retry:3# How long to wait for the cluster to settle between retries (default 100ms). Time unit is ms.retry_backoff:100# The max message bytes.max_message_bytes:1000000# If enabled, the producer will ensure that exactly one copy of each message is written (default false).idempotent_writes:false# A user-provided string sent with every request to the brokers for logging, debugging, and auditing purposes (default Satellite).client_id:Satellite# Compression codec represents the various compression codecs recognized by Kafka in messages. 0 : None, 1 : Gzip, 2 : Snappy, 3 : LZ4, 4 : ZSTDcompression_codec:0# How frequently to refresh the cluster metadata in the background. Defaults to 10 minutes. The unit is minute.refresh_period:10# InsecureSkipVerify controls whether a client verifies the server\u0026#39;s certificate chain and host name.insecure_skip_verify:trueConfiguration    Name Type Description     brokers string The Kafka broker addresses (default localhost:9092).   version string The version should follow this pattern, which is major.minor.veryMinor.patch.   enable_TLS bool The TLS switch (default false).   client_pem_path string The file path of client.pem. The config only works when opening the TLS switch.   client_key_path string The file path of client.key. The config only works when opening the TLS switch.   ca_pem_path string The file path oca.pem. The config only works when opening the TLS switch.   required_acks int16 0 means NoResponse, 1 means WaitForLocal and -1 means WaitForAll (default 1).   producer_max_retry int The producer max retry times (default 3).   meta_max_retry int The meta max retry times (default 3).   retry_backoff int How long to wait for the cluster to settle between retries (default 100ms).   max_message_bytes int The max message bytes.   idempotent_writes bool Ensure that exactly one copy of each message is written when is true.   client_id string A user-provided string sent with every request to the brokers.   compression_codec int Represents the various compression codecs recognized by Kafka in messages.   refresh_period int How frequently to refresh the cluster metadata.   insecure_skip_verify bool Controls whether a client verifies the server\u0026rsquo;s certificate chain and host name.    ","excerpt":"Client/kafka-client Description The Kafka client is a sharing plugin to keep connection with the …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/client_kafka-client/","title":"Client/kafka-client"},{"body":"Client/kafka-client Description The Kafka client is a sharing plugin to keep connection with the Kafka brokers and delivery the data to it.\nDefaultConfig # The Kafka broker addresses (default localhost:9092). Multiple values are separated by commas.brokers:localhost:9092# The Kafka version should follow this pattern, which is major_minor_veryMinor_patch (default 1.0.0.0).version:1.0.0.0# The TLS switch (default false).enable_TLS:false# The file path of client.pem. The config only works when opening the TLS switch.client_pem_path:\u0026#34;\u0026#34;# The file path of client.key. The config only works when opening the TLS switch.client_key_path:\u0026#34;\u0026#34;# The file path oca.pem. The config only works when opening the TLS switch.ca_pem_path:\u0026#34;\u0026#34;# 0 means NoResponse, 1 means WaitForLocal and -1 means WaitForAll (default 1).required_acks:1# The producer max retry times (default 3).producer_max_retry:3# The meta max retry times (default 3).meta_max_retry:3# How long to wait for the cluster to settle between retries (default 100ms). Time unit is ms.retry_backoff:100# The max message bytes.max_message_bytes:1000000# If enabled, the producer will ensure that exactly one copy of each message is written (default false).idempotent_writes:false# A user-provided string sent with every request to the brokers for logging, debugging, and auditing purposes (default Satellite).client_id:Satellite# Compression codec represents the various compression codecs recognized by Kafka in messages. 0 : None, 1 : Gzip, 2 : Snappy, 3 : LZ4, 4 : ZSTDcompression_codec:0# How frequently to refresh the cluster metadata in the background. Defaults to 10 minutes. The unit is minute.refresh_period:10# InsecureSkipVerify controls whether a client verifies the server\u0026#39;s certificate chain and host name.insecure_skip_verify:trueConfiguration    Name Type Description     brokers string The Kafka broker addresses (default localhost:9092).   version string The version should follow this pattern, which is major.minor.veryMinor.patch.   enable_TLS bool The TLS switch (default false).   client_pem_path string The file path of client.pem. The config only works when opening the TLS switch.   client_key_path string The file path of client.key. The config only works when opening the TLS switch.   ca_pem_path string The file path oca.pem. The config only works when opening the TLS switch.   required_acks int16 0 means NoResponse, 1 means WaitForLocal and -1 means WaitForAll (default 1).   producer_max_retry int The producer max retry times (default 3).   meta_max_retry int The meta max retry times (default 3).   retry_backoff int How long to wait for the cluster to settle between retries (default 100ms).   max_message_bytes int The max message bytes.   idempotent_writes bool Ensure that exactly one copy of each message is written when is true.   client_id string A user-provided string sent with every request to the brokers.   compression_codec int Represents the various compression codecs recognized by Kafka in messages.   refresh_period int How frequently to refresh the cluster metadata.   insecure_skip_verify bool Controls whether a client verifies the server\u0026rsquo;s certificate chain and host name.    ","excerpt":"Client/kafka-client Description The Kafka client is a sharing plugin to keep connection with the …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/client_kafka-client/","title":"Client/kafka-client"},{"body":"Client/kafka-client Description The Kafka client is a sharing plugin to keep connection with the Kafka brokers and delivery the data to it.\nDefaultConfig # The Kafka broker addresses (default localhost:9092). Multiple values are separated by commas.brokers:localhost:9092# The Kafka version should follow this pattern, which is major_minor_veryMinor_patch (default 1.0.0.0).version:1.0.0.0# The TLS switch (default false).enable_TLS:false# The file path of client.pem. The config only works when opening the TLS switch.client_pem_path:\u0026#34;\u0026#34;# The file path of client.key. The config only works when opening the TLS switch.client_key_path:\u0026#34;\u0026#34;# The file path oca.pem. The config only works when opening the TLS switch.ca_pem_path:\u0026#34;\u0026#34;# 0 means NoResponse, 1 means WaitForLocal and -1 means WaitForAll (default 1).required_acks:1# The producer max retry times (default 3).producer_max_retry:3# The meta max retry times (default 3).meta_max_retry:3# How long to wait for the cluster to settle between retries (default 100ms). Time unit is ms.retry_backoff:100# The max message bytes.max_message_bytes:1000000# If enabled, the producer will ensure that exactly one copy of each message is written (default false).idempotent_writes:false# A user-provided string sent with every request to the brokers for logging, debugging, and auditing purposes (default Satellite).client_id:Satellite# Compression codec represents the various compression codecs recognized by Kafka in messages. 0 : None, 1 : Gzip, 2 : Snappy, 3 : LZ4, 4 : ZSTDcompression_codec:0# How frequently to refresh the cluster metadata in the background. Defaults to 10 minutes. The unit is minute.refresh_period:10# InsecureSkipVerify controls whether a client verifies the server\u0026#39;s certificate chain and host name.insecure_skip_verify:trueConfiguration    Name Type Description     brokers string The Kafka broker addresses (default localhost:9092).   version string The version should follow this pattern, which is major.minor.veryMinor.patch.   enable_TLS bool The TLS switch (default false).   client_pem_path string The file path of client.pem. The config only works when opening the TLS switch.   client_key_path string The file path of client.key. The config only works when opening the TLS switch.   ca_pem_path string The file path oca.pem. The config only works when opening the TLS switch.   required_acks int16 0 means NoResponse, 1 means WaitForLocal and -1 means WaitForAll (default 1).   producer_max_retry int The producer max retry times (default 3).   meta_max_retry int The meta max retry times (default 3).   retry_backoff int How long to wait for the cluster to settle between retries (default 100ms).   max_message_bytes int The max message bytes.   idempotent_writes bool Ensure that exactly one copy of each message is written when is true.   client_id string A user-provided string sent with every request to the brokers.   compression_codec int Represents the various compression codecs recognized by Kafka in messages.   refresh_period int How frequently to refresh the cluster metadata.   insecure_skip_verify bool Controls whether a client verifies the server\u0026rsquo;s certificate chain and host name.    ","excerpt":"Client/kafka-client Description The Kafka client is a sharing plugin to keep connection with the …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/client_kafka-client/","title":"Client/kafka-client"},{"body":"Clients Command Line The command line tool named bydbctl improves users' interactive experience. The examples listed in this folder show how to use this command to create, update, read and delete schemas. Furthermore, bydbctl could help in querying data stored in streams, measures and properties.\nThese are several ways to install:\n Get binaries from download. Build from sources to get latest features.  The config file named .bydbctl.yaml will be created in $HOME folder after the first CRUD command is applied.\n\u0026gt; more ~/.bydbctl.yaml addr: http://127.0.0.1:64299 group: \u0026#34;\u0026#34; bydbctl leverages HTTP endpoints to retrieve data instead of gRPC.\nHTTP client Users could select any HTTP client to access the HTTP based endpoints. The default address is localhost:17913/api\nJava Client The java native client is hosted at skywalking-banyandb-java-client.\nWeb application The web application is hosted at skywalking-banyandb-webapp when you boot up the BanyanDB server.\ngRPC command-line tool Users have a chance to use any command-line tool to interact with the Banyand server\u0026rsquo;s gRPC endpoints. The only limitation is the CLI tool has to support file descriptor files since the database server does not support server reflection.\nBuf is a Protobuf building tooling the BanyanDB relies on. It can provide FileDescriptorSets usable by gRPC CLI tools like grpcurl\nBanyanDB recommends installing Buf by issuing\n$ make -C api generate Protobuf schema files are compiled Above command will compile *.proto after downloading buf into \u0026lt;project_root\u0026gt;/bin\nUsers could leverage buf\u0026rsquo;s internal compiler to generate the FileDescriptorSets\n$ cd api $ ../bin/buf build -o image.bin If grpcurl is the CLI tool to access the APIs of BanyanDb. To use image.bin with it on the fly:\n$ grpcurl -plaintext -protoset image.bin localhost:17912 ... ","excerpt":"Clients Command Line The command line tool named bydbctl improves users' interactive experience. The …","ref":"/docs/skywalking-banyandb/latest/clients/","title":"Clients"},{"body":"Clients Command Line The command line tool named bydbctl improves users' interactive experience. The examples listed in this folder show how to use this command to create, update, read and delete schemas. Furthermore, bydbctl could help in querying data stored in streams, measures and properties.\nThese are several ways to install:\n Get binaries from download. Build from sources to get latest features.  The config file named .bydbctl.yaml will be created in $HOME folder after the first CRUD command is applied.\n\u0026gt; more ~/.bydbctl.yaml addr: http://127.0.0.1:64299 group: \u0026#34;\u0026#34; bydbctl leverages HTTP endpoints to retrieve data instead of gRPC.\nHTTP client Users could select any HTTP client to access the HTTP based endpoints. The default address is localhost:17913/api\nJava Client The java native client is hosted at skywalking-banyandb-java-client.\nWeb application The web application is hosted at skywalking-banyandb-webapp when you boot up the BanyanDB server.\ngRPC command-line tool Users have a chance to use any command-line tool to interact with the Banyand server\u0026rsquo;s gRPC endpoints. The only limitation is the CLI tool has to support file descriptor files since the database server does not support server reflection.\nBuf is a Protobuf building tooling the BanyanDB relies on. It can provide FileDescriptorSets usable by gRPC CLI tools like grpcurl\nBanyanDB recommends installing Buf by issuing\n$ make -C api generate Protobuf schema files are compiled Above command will compile *.proto after downloading buf into \u0026lt;project_root\u0026gt;/bin\nUsers could leverage buf\u0026rsquo;s internal compiler to generate the FileDescriptorSets\n$ cd api $ ../bin/buf build -o image.bin If grpcurl is the CLI tool to access the APIs of BanyanDb. To use image.bin with it on the fly:\n$ grpcurl -plaintext -protoset image.bin localhost:17912 ... ","excerpt":"Clients Command Line The command line tool named bydbctl improves users' interactive experience. The …","ref":"/docs/skywalking-banyandb/next/clients/","title":"Clients"},{"body":"Clients Command Line The command line tool named bydbctl improves users' interactive experience. The examples listed in this folder show how to use this command to create, update, read and delete schemas. Furthermore, bydbctl could help in querying data stored in streams, measures and properties.\nThese are several ways to install:\n Get binaries from download. Build from sources to get latest features.  The config file named .bydbctl.yaml will be created in $HOME folder after the first CRUD command is applied.\n\u0026gt; more ~/.bydbctl.yaml addr: http://127.0.0.1:64299 group: \u0026#34;\u0026#34; bydbctl leverages HTTP endpoints to retrieve data instead of gRPC.\nHTTP client Users could select any HTTP client to access the HTTP based endpoints. The default address is localhost:17913/api\nJava Client The java native client is hosted at skywalking-banyandb-java-client.\nWeb application The web application is hosted at skywalking-banyandb-webapp when you boot up the BanyanDB server.\ngRPC command-line tool Users have a chance to use any command-line tool to interact with the Banyand server\u0026rsquo;s gRPC endpoints. The only limitation is the CLI tool has to support file descriptor files since the database server does not support server reflection.\nBuf is a Protobuf building tooling the BanyanDB relies on. It can provide FileDescriptorSets usable by gRPC CLI tools like grpcurl\nBanyanDB recommends installing Buf by issuing\n$ make -C api generate Protobuf schema files are compiled Above command will compile *.proto after downloading buf into \u0026lt;project_root\u0026gt;/bin\nUsers could leverage buf\u0026rsquo;s internal compiler to generate the FileDescriptorSets\n$ cd api $ ../bin/buf build -o image.bin If grpcurl is the CLI tool to access the APIs of BanyanDb. To use image.bin with it on the fly:\n$ grpcurl -plaintext -protoset image.bin localhost:17912 ... ","excerpt":"Clients Command Line The command line tool named bydbctl improves users' interactive experience. The …","ref":"/docs/skywalking-banyandb/v0.5.0/clients/","title":"Clients"},{"body":"Cluster Installation Setup Meta Nodes Meta nodes are a etcd cluster which is required for the metadata module to provide the metadata service and nodes discovery service for the whole cluster.\nThe etcd cluster can be setup by the etcd installation guide\nRole-base Banyand Cluster There is an example: The etcd cluster is spread across three nodes with the addresses 10.0.0.1:2379, 10.0.0.2:2379, and 10.0.0.3:2379.\nData nodes and liaison nodes are running as independent processes by\n$ ./banyand-server storage --etcd-endpoints=http://10.0.0.1:2379,http://10.0.0.2:2379,http://10.0.0.3:2379 \u0026lt;flags\u0026gt; $ ./banyand-server storage --etcd-endpoints=http://10.0.0.1:2379,http://10.0.0.2:2379,http://10.0.0.3:2379 \u0026lt;flags\u0026gt; $ ./banyand-server storage --etcd-endpoints=http://10.0.0.1:2379,http://10.0.0.2:2379,http://10.0.0.3:2379 \u0026lt;flags\u0026gt; $ ./banyand-server liaison --etcd-endpoints=http://10.0.0.1:2379,http://10.0.0.2:2379,http://10.0.0.3:2379 \u0026lt;flags\u0026gt; Node Discovery The node discovery is based on the etcd cluster. The etcd cluster is required for the metadata module to provide the metadata service and nodes discovery service for the whole cluster.\nThe host is registered to the etcd cluster by the banyand-server automatically based on node-host-provider :\n node-host-provider=hostname : Default. The OS\u0026rsquo;s hostname is registered as the host part in the address. node-host-provider=ip : The OS\u0026rsquo;s the first non-loopback active IP address(IPv4) is registered as the host part in the address. node-host-provider=flag : node-host is registered as the host part in the address.  ","excerpt":"Cluster Installation Setup Meta Nodes Meta nodes are a etcd cluster which is required for the …","ref":"/docs/skywalking-banyandb/latest/installation/cluster/","title":"Cluster Installation"},{"body":"Cluster Installation Setup Meta Nodes Meta nodes are a etcd cluster which is required for the metadata module to provide the metadata service and nodes discovery service for the whole cluster.\nThe etcd cluster can be setup by the etcd installation guide\nRole-base Banyand Cluster There is an example: The etcd cluster is spread across three nodes with the addresses 10.0.0.1:2379, 10.0.0.2:2379, and 10.0.0.3:2379.\nData nodes and liaison nodes are running as independent processes by\n$ ./banyand-server-static storage --etcd-endpoints=http://10.0.0.1:2379,http://10.0.0.2:2379,http://10.0.0.3:2379 \u0026lt;flags\u0026gt; $ ./banyand-server-static storage --etcd-endpoints=http://10.0.0.1:2379,http://10.0.0.2:2379,http://10.0.0.3:2379 \u0026lt;flags\u0026gt; $ ./banyand-server-static storage --etcd-endpoints=http://10.0.0.1:2379,http://10.0.0.2:2379,http://10.0.0.3:2379 \u0026lt;flags\u0026gt; $ ./banyand-server-static liaison --etcd-endpoints=http://10.0.0.1:2379,http://10.0.0.2:2379,http://10.0.0.3:2379 \u0026lt;flags\u0026gt; Node Discovery The node discovery is based on the etcd cluster. The etcd cluster is required for the metadata module to provide the metadata service and nodes discovery service for the whole cluster.\nThe host is registered to the etcd cluster by the banyand-server-static automatically based on node-host-provider :\n node-host-provider=hostname : Default. The OS\u0026rsquo;s hostname is registered as the host part in the address. node-host-provider=ip : The OS\u0026rsquo;s the first non-loopback active IP address(IPv4) is registered as the host part in the address. node-host-provider=flag : node-host is registered as the host part in the address.  Etcd Authentication etcd supports through tls certificates and RBAC-based authentication for both clients to server communication. This section tends to help users set up authentication for BanyanDB.\nAuthentication with username/password The etcd user can be setup by the etcd authentication guide\nThe username/password is configured in the following command:\n etcd-username: The username for etcd client authentication. etcd-password: The password for etcd client authentication.  Note: recommended using environment variables to set username/password for higher security.\n$ ./banyand-server-static storage --etcd-endpoints=your-endpoints --etcd-username=your-username --etcd-password=your-password \u0026lt;flags\u0026gt; $ ./banyand-server-static liaison --etcd-endpoints=your-endpoints --etcd-username=your-username --etcd-password=your-password \u0026lt;flags\u0026gt; Transport security with HTTPS The etcd trusted certificate file can be setup by the etcd transport security model\n etcd-tls-ca-file: The path of the trusted certificate file.  $ ./banyand-server-static storage --etcd-endpoints=your-https-endpoints --etcd-tls-ca-file=youf-file-path \u0026lt;flags\u0026gt; $ ./banyand-server-static liaison --etcd-endpoints=your-https-endpoints --etcd-tls-ca-file=youf-file-path \u0026lt;flags\u0026gt; Authentication with HTTPS client certificates The etcd client certificates can be setup by the etcd transport security model\n etcd-tls-ca-file: The path of the trusted certificate file. etcd-tls-cert-file: Certificate used for SSL/TLS connections to etcd. When this option is set, advertise-client-urls can use the HTTPS schema. etcd-tls-key-file: Key for the certificate. Must be unencrypted.  $ ./banyand-server-static storage --etcd-endpoints=your-https-endpoints --etcd-tls-ca-file=youf-file-path --etcd-tls-cert-file=youf-file-path --etcd-tls-key-file=youf-file-path \u0026lt;flags\u0026gt; $ ./banyand-server-static liaison --etcd-endpoints=your-https-endpoints --etcd-tls-ca-file=youf-file-path --etcd-tls-cert-file=youf-file-path --etcd-tls-key-file=youf-file-path \u0026lt;flags\u0026gt; ","excerpt":"Cluster Installation Setup Meta Nodes Meta nodes are a etcd cluster which is required for the …","ref":"/docs/skywalking-banyandb/next/installation/cluster/","title":"Cluster Installation"},{"body":"Cluster Installation Setup Meta Nodes Meta nodes are a etcd cluster which is required for the metadata module to provide the metadata service and nodes discovery service for the whole cluster.\nThe etcd cluster can be setup by the etcd installation guide\nRole-base Banyand Cluster There is an example: The etcd cluster is spread across three nodes with the addresses 10.0.0.1:2379, 10.0.0.2:2379, and 10.0.0.3:2379.\nData nodes and liaison nodes are running as independent processes by\n$ ./banyand-server storage --etcd-endpoints=http://10.0.0.1:2379,http://10.0.0.2:2379,http://10.0.0.3:2379 \u0026lt;flags\u0026gt; $ ./banyand-server storage --etcd-endpoints=http://10.0.0.1:2379,http://10.0.0.2:2379,http://10.0.0.3:2379 \u0026lt;flags\u0026gt; $ ./banyand-server storage --etcd-endpoints=http://10.0.0.1:2379,http://10.0.0.2:2379,http://10.0.0.3:2379 \u0026lt;flags\u0026gt; $ ./banyand-server liaison --etcd-endpoints=http://10.0.0.1:2379,http://10.0.0.2:2379,http://10.0.0.3:2379 \u0026lt;flags\u0026gt; Node Discovery The node discovery is based on the etcd cluster. The etcd cluster is required for the metadata module to provide the metadata service and nodes discovery service for the whole cluster.\nThe host is registered to the etcd cluster by the banyand-server automatically based on node-host-provider :\n node-host-provider=hostname : Default. The OS\u0026rsquo;s hostname is registered as the host part in the address. node-host-provider=ip : The OS\u0026rsquo;s the first non-loopback active IP address(IPv4) is registered as the host part in the address. node-host-provider=flag : node-host is registered as the host part in the address.  ","excerpt":"Cluster Installation Setup Meta Nodes Meta nodes are a etcd cluster which is required for the …","ref":"/docs/skywalking-banyandb/v0.5.0/installation/cluster/","title":"Cluster Installation"},{"body":"Cluster Management In many production environments, the backend needs to support distributed aggregation, high throughput and provide high availability (HA) to maintain robustness, so you always need to setup CLUSTER management in product env. Otherwise, you would face metrics inaccurate.\ncore/gRPCHost is listening on 0.0.0.0 for quick start as the single mode for most cases. Besides the Kubernetes coordinator, which is using the cloud-native mode to establish cluster, all other coordinators requires core/gRPCHost updated to real IP addresses or take reference of internalComHost and internalComPort in each coordinator doc.\nNOTICE, cluster management doesn\u0026rsquo;t provide a service discovery mechanism for agents and probes. We recommend agents/probes using gateway to load balancer to access OAP clusters.\nThere are various ways to manage the cluster in the backend. Choose the one that best suits your needs.\n Kubernetes. When the backend clusters are deployed inside Kubernetes, you could make use of this method by using k8s native APIs to manage clusters. Zookeeper coordinator. Use Zookeeper to let the backend instances detect and communicate with each other. Consul. Use Consul as the backend cluster management implementor and coordinate backend instances. Etcd. Use Etcd to coordinate backend instances. Nacos. Use Nacos to coordinate backend instances.  In the application.yml file, there are default configurations for the aforementioned coordinators under the section cluster. You can specify any of them in the selector property to enable it.\nCloud Native Kubernetes The required backend clusters are deployed inside Kubernetes. See the guides in Deploy in kubernetes. Set the selector to kubernetes.\ncluster:selector:${SW_CLUSTER:kubernetes}# other configurationsMeanwhile, the OAP cluster requires the pod\u0026rsquo;s UID which is laid at metadata.uid as the value of the system environment variable SKYWALKING_COLLECTOR_UID\ncontainers:# Original configurations of OAP container- name:{{.Values.oap.name }}image:{{.Values.oap.image.repository }}:{{ required \u0026#34;oap.image.tag is required\u0026#34; .Values.oap.image.tag }}# ...# ...env:# Add metadata.uid as the system environment variable, SKYWALKING_COLLECTOR_UID - name:SKYWALKING_COLLECTOR_UIDvalueFrom:fieldRef:fieldPath:metadata.uidRead the complete helm for more details.\nTraditional Coordinator NOTICE In all the following coordinators, oap.internal.comm.host:oap.internal.comm.port is registered as the ID and address for the current OAP node. By default, because they are same in all OAP nodes, the registrations are conflicted, and (may) show as one registered node, which actually would be the node itself. In this case, the cluster mode is NOT working.\nPlease check the registered nodes on your coordinator servers, to make the registration information unique for every node. You could have two options\n Change core/gRPCHost(oap.internal.comm.host) and core/gRPCPort(oap.internal.comm.port) for internal, and setup external communication channels for data reporting and query. Use internalComHost and internalComPort in the config to provide a unique host and port for every OAP node. This host name port should be accessible for other OAP nodes.  Zookeeper coordinator Zookeeper is a very common and widely used cluster coordinator. Set the cluster/selector to zookeeper in the yml to enable it.\nRequired Zookeeper version: 3.5+\ncluster:selector:${SW_CLUSTER:zookeeper}# other configurations hostPort is the list of zookeeper servers. Format is IP1:PORT1,IP2:PORT2,...,IPn:PORTn enableACL enable Zookeeper ACL to control access to its znode. schema is Zookeeper ACL schemas. expression is a expression of ACL. The format of the expression is specific to the schema. hostPort, baseSleepTimeMs and maxRetries are settings of Zookeeper curator client.  Note:\n If Zookeeper ACL is enabled and /skywalking exists, you must ensure that SkyWalking has CREATE, READ and WRITE permissions. If /skywalking does not exist, it will be created by SkyWalking, and all permissions to the specified user will be granted. Simultaneously, znode grants READ permission to anyone. If you set schema as digest, the password of the expression is set in clear text.  In some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes, such as the default host(0.0.0.0) should not be used in cluster mode. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The exposed host name for other OAP nodes in the cluster internal communication. internalComPort: the exposed port for other OAP nodes in the cluster internal communication.  zookeeper:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}hostPort:${SW_CLUSTER_ZK_HOST_PORT:localhost:2181}#Retry PolicybaseSleepTimeMs:${SW_CLUSTER_ZK_SLEEP_TIME:1000}# initial amount of time to wait between retriesmaxRetries:${SW_CLUSTER_ZK_MAX_RETRIES:3}# max number of times to retryinternalComHost:${SW_CLUSTER_INTERNAL_COM_HOST:172.10.4.10}internalComPort:${SW_CLUSTER_INTERNAL_COM_PORT:11800}# Enable ACLenableACL:${SW_ZK_ENABLE_ACL:false}# disable ACL in defaultschema:${SW_ZK_SCHEMA:digest}# only support digest schemaexpression:${SW_ZK_EXPRESSION:skywalking:skywalking}Consul Recently, the Consul system has become more and more popular, and many companies and developers now use Consul as their service discovery solution. Set the cluster/selector to consul in the yml to enable it.\ncluster:selector:${SW_CLUSTER:consul}# other configurationsSame as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes, such as the default host(0.0.0.0) should not be used in cluster mode. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The exposed host name for other OAP nodes in the cluster internal communication. internalComPort: the exposed port for other OAP nodes in the cluster internal communication.  Etcd Set the cluster/selector to etcd in the yml to enable it. The Etcd client has upgraded to v3 protocol and changed to the CoreOS official library. Since 8.7.0, only the v3 protocol is supported for Etcd.\ncluster:selector:${SW_CLUSTER:etcd}# other configurationsetcd:# etcd cluster nodes, example: 10.0.0.1:2379,10.0.0.2:2379,10.0.0.3:2379endpoints:${SW_CLUSTER_ETCD_ENDPOINTS:localhost:2379}namespace:${SW_CLUSTER_ETCD_NAMESPACE:/skywalking}serviceName:${SW_CLUSTER_ETCD_SERVICE_NAME:\u0026#34;SkyWalking_OAP_Cluster\u0026#34;}authentication:${SW_CLUSTER_ETCD_AUTHENTICATION:false}user:${SW_CLUSTER_ETCD_USER:}password:${SW_CLUSTER_ETCD_PASSWORD:}Same as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes, such as the default host(0.0.0.0) should not be used in cluster mode. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The exposed host name for other OAP nodes in the cluster internal communication. internalComPort: the exposed port for other OAP nodes in the cluster internal communication.  Nacos Set the cluster/selector to nacos in the yml to enable it.\ncluster:selector:${SW_CLUSTER:nacos}# other configurationsNacos supports authentication by username or accessKey. Empty means that there is no need for authentication. Extra config is as follows:\nnacos:username:password:accessKey:secretKey:Same as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes, such as the default host(0.0.0.0) should not be used in cluster mode. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The exposed host name for other OAP nodes in the cluster internal communication. internalComPort: the exposed port for other OAP nodes in the cluster internal communication.  ","excerpt":"Cluster Management In many production environments, the backend needs to support distributed …","ref":"/docs/main/latest/en/setup/backend/backend-cluster/","title":"Cluster Management"},{"body":"Cluster Management In many production environments, the backend needs to support distributed aggregation, high throughput and provide high availability (HA) to maintain robustness, so you always need to setup CLUSTER management in product env. Otherwise, you would face metrics inaccurate.\ncore/gRPCHost is listening on 0.0.0.0 for quick start as the single mode for most cases. Besides the Kubernetes coordinator, which is using the cloud-native mode to establish cluster, all other coordinators requires core/gRPCHost updated to real IP addresses or take reference of internalComHost and internalComPort in each coordinator doc.\nNOTICE, cluster management doesn\u0026rsquo;t provide a service discovery mechanism for agents and probes. We recommend agents/probes using gateway to load balancer to access OAP clusters.\nThere are various ways to manage the cluster in the backend. Choose the one that best suits your needs.\n Kubernetes. When the backend clusters are deployed inside Kubernetes, you could make use of this method by using k8s native APIs to manage clusters. Zookeeper coordinator. Use Zookeeper to let the backend instances detect and communicate with each other. Consul. Use Consul as the backend cluster management implementor and coordinate backend instances. Etcd. Use Etcd to coordinate backend instances. Nacos. Use Nacos to coordinate backend instances.  In the application.yml file, there are default configurations for the aforementioned coordinators under the section cluster. You can specify any of them in the selector property to enable it.\nCloud Native Kubernetes The required backend clusters are deployed inside Kubernetes. See the guides in Deploy in kubernetes. Set the selector to kubernetes.\ncluster:selector:${SW_CLUSTER:kubernetes}# other configurationsMeanwhile, the OAP cluster requires the pod\u0026rsquo;s UID which is laid at metadata.uid as the value of the system environment variable SKYWALKING_COLLECTOR_UID\ncontainers:# Original configurations of OAP container- name:{{.Values.oap.name }}image:{{.Values.oap.image.repository }}:{{ required \u0026#34;oap.image.tag is required\u0026#34; .Values.oap.image.tag }}# ...# ...env:# Add metadata.uid as the system environment variable, SKYWALKING_COLLECTOR_UID - name:SKYWALKING_COLLECTOR_UIDvalueFrom:fieldRef:fieldPath:metadata.uidRead the complete helm for more details.\nTraditional Coordinator NOTICE In all the following coordinators, oap.internal.comm.host:oap.internal.comm.port is registered as the ID and address for the current OAP node. By default, because they are same in all OAP nodes, the registrations are conflicted, and (may) show as one registered node, which actually would be the node itself. In this case, the cluster mode is NOT working.\nPlease check the registered nodes on your coordinator servers, to make the registration information unique for every node. You could have two options\n Change core/gRPCHost(oap.internal.comm.host) and core/gRPCPort(oap.internal.comm.port) for internal, and setup external communication channels for data reporting and query. Use internalComHost and internalComPort in the config to provide a unique host and port for every OAP node. This host name port should be accessible for other OAP nodes.  Zookeeper coordinator Zookeeper is a very common and widely used cluster coordinator. Set the cluster/selector to zookeeper in the yml to enable it.\nRequired Zookeeper version: 3.5+\ncluster:selector:${SW_CLUSTER:zookeeper}# other configurations hostPort is the list of zookeeper servers. Format is IP1:PORT1,IP2:PORT2,...,IPn:PORTn enableACL enable Zookeeper ACL to control access to its znode. schema is Zookeeper ACL schemas. expression is a expression of ACL. The format of the expression is specific to the schema. hostPort, baseSleepTimeMs and maxRetries are settings of Zookeeper curator client.  Note:\n If Zookeeper ACL is enabled and /skywalking exists, you must ensure that SkyWalking has CREATE, READ and WRITE permissions. If /skywalking does not exist, it will be created by SkyWalking, and all permissions to the specified user will be granted. Simultaneously, znode grants READ permission to anyone. If you set schema as digest, the password of the expression is set in clear text.  In some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes, such as the default host(0.0.0.0) should not be used in cluster mode. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The exposed host name for other OAP nodes in the cluster internal communication. internalComPort: the exposed port for other OAP nodes in the cluster internal communication.  cluster:selector:${SW_CLUSTER:zookeeper}...zookeeper:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}hostPort:${SW_CLUSTER_ZK_HOST_PORT:localhost:2181}#Retry PolicybaseSleepTimeMs:${SW_CLUSTER_ZK_SLEEP_TIME:1000}# initial amount of time to wait between retriesmaxRetries:${SW_CLUSTER_ZK_MAX_RETRIES:3}# max number of times to retryinternalComHost:${SW_CLUSTER_INTERNAL_COM_HOST:172.10.4.10}internalComPort:${SW_CLUSTER_INTERNAL_COM_PORT:11800}# Enable ACLenableACL:${SW_ZK_ENABLE_ACL:false}# disable ACL in defaultschema:${SW_ZK_SCHEMA:digest}# only support digest schemaexpression:${SW_ZK_EXPRESSION:skywalking:skywalking}Consul Recently, the Consul system has become more and more popular, and many companies and developers now use Consul as their service discovery solution. Set the cluster/selector to consul in the yml to enable it.\ncluster:selector:${SW_CLUSTER:consul}...consul:serviceName:${SW_SERVICE_NAME:\u0026#34;SkyWalking_OAP_Cluster\u0026#34;}# Consul cluster nodes, example: 10.0.0.1:8500,10.0.0.2:8500,10.0.0.3:8500hostPort:${SW_CLUSTER_CONSUL_HOST_PORT:localhost:8500}aclToken:${SW_CLUSTER_CONSUL_ACLTOKEN:\u0026#34;\u0026#34;}internalComHost:${SW_CLUSTER_INTERNAL_COM_HOST:\u0026#34;\u0026#34;}internalComPort:${SW_CLUSTER_INTERNAL_COM_PORT:-1}Same as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes, such as the default host(0.0.0.0) should not be used in cluster mode. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The exposed host name for other OAP nodes in the cluster internal communication. internalComPort: the exposed port for other OAP nodes in the cluster internal communication.  Etcd Set the cluster/selector to etcd in the yml to enable it. The Etcd client has upgraded to v3 protocol and changed to the CoreOS official library. Since 8.7.0, only the v3 protocol is supported for Etcd.\ncluster:selector:${SW_CLUSTER:etcd}# other configurationsetcd:# etcd cluster nodes, example: 10.0.0.1:2379,10.0.0.2:2379,10.0.0.3:2379endpoints:${SW_CLUSTER_ETCD_ENDPOINTS:localhost:2379}namespace:${SW_CLUSTER_ETCD_NAMESPACE:/skywalking}serviceName:${SW_CLUSTER_ETCD_SERVICE_NAME:\u0026#34;SkyWalking_OAP_Cluster\u0026#34;}authentication:${SW_CLUSTER_ETCD_AUTHENTICATION:false}user:${SW_CLUSTER_ETCD_USER:}password:${SW_CLUSTER_ETCD_PASSWORD:}Same as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes, such as the default host(0.0.0.0) should not be used in cluster mode. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The exposed host name for other OAP nodes in the cluster internal communication. internalComPort: the exposed port for other OAP nodes in the cluster internal communication.  Nacos Set the cluster/selector to nacos in the yml to enable it.\ncluster:selector:${SW_CLUSTER:nacos}# other configurationsNacos supports authentication by username or accessKey. Empty means that there is no need for authentication. Extra config is as follows:\nnacos:username:password:accessKey:secretKey:Same as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes, such as the default host(0.0.0.0) should not be used in cluster mode. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The exposed host name for other OAP nodes in the cluster internal communication. internalComPort: the exposed port for other OAP nodes in the cluster internal communication.  ","excerpt":"Cluster Management In many production environments, the backend needs to support distributed …","ref":"/docs/main/next/en/setup/backend/backend-cluster/","title":"Cluster Management"},{"body":"Cluster Management In many product environments, the backend needs to support high throughput and provide HA to maintain robustness, so you always need cluster management in product env.\nNOTICE, cluster management doesn\u0026rsquo;t provide service discovery mechanism for agents and probes. We recommend agents/probes using gateway to load balancer to access OAP clusters.\nThe core feature of cluster management is supporting the whole OAP cluster running distributed aggregation and analysis for telemetry data.\nThere are various ways to manage the cluster in the backend. Choose the one that best suits your needs.\n Zookeeper coordinator. Use Zookeeper to let the backend instances detect and communicate with each other. Kubernetes. When the backend clusters are deployed inside Kubernetes, you could make use of this method by using k8s native APIs to manage clusters. Consul. Use Consul as the backend cluster management implementor and coordinate backend instances. Etcd. Use Etcd to coordinate backend instances. Nacos. Use Nacos to coordinate backend instances. In the application.yml file, there are default configurations for the aforementioned coordinators under the section cluster. You can specify any of them in the selector property to enable it.  Zookeeper coordinator Zookeeper is a very common and widely used cluster coordinator. Set the cluster/selector to zookeeper in the yml to enable it.\nRequired Zookeeper version: 3.5+\ncluster:selector:${SW_CLUSTER:zookeeper}# other configurations hostPort is the list of zookeeper servers. Format is IP1:PORT1,IP2:PORT2,...,IPn:PORTn enableACL enable Zookeeper ACL to control access to its znode. schema is Zookeeper ACL schemas. expression is a expression of ACL. The format of the expression is specific to the schema. hostPort, baseSleepTimeMs and maxRetries are settings of Zookeeper curator client.  Note:\n If Zookeeper ACL is enabled and /skywalking exists, you must make sure that SkyWalking has CREATE, READ and WRITE permissions. If /skywalking does not exist, it will be created by SkyWalking and all permissions to the specified user will be granted. Simultaneously, znode grants the READ permission to anyone. If you set schema as digest, the password of the expression is set in clear text.  In some cases, the OAP default gRPC host and port in core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: the registered port and other OAP nodes use this to communicate with the current node.  zookeeper:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}hostPort:${SW_CLUSTER_ZK_HOST_PORT:localhost:2181}#Retry PolicybaseSleepTimeMs:${SW_CLUSTER_ZK_SLEEP_TIME:1000}# initial amount of time to wait between retriesmaxRetries:${SW_CLUSTER_ZK_MAX_RETRIES:3}# max number of times to retryinternalComHost:${SW_CLUSTER_INTERNAL_COM_HOST:172.10.4.10}internalComPort:${SW_CLUSTER_INTERNAL_COM_PORT:11800}# Enable ACLenableACL:${SW_ZK_ENABLE_ACL:false}# disable ACL in defaultschema:${SW_ZK_SCHEMA:digest}# only support digest schemaexpression:${SW_ZK_EXPRESSION:skywalking:skywalking}Kubernetes The require backend clusters are deployed inside Kubernetes. See the guides in Deploy in kubernetes. Set the selector to kubernetes.\ncluster:selector:${SW_CLUSTER:kubernetes}# other configurationsConsul Recently, the Consul system has become more and more popular, and many companies and developers now use Consul as their service discovery solution. Set the cluster/selector to consul in the yml to enable it.\ncluster:selector:${SW_CLUSTER:consul}# other configurationsSame as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registed host and other OAP nodes use this to communicate with the current node. internalComPort: The registered port and other OAP nodes use this to communicate with the current node.  Etcd Set the cluster/selector to etcd in the yml to enable it. The Etcd client has upgraded to v3 protocol and changed to the CoreOS official library. Since 8.7.0, only the v3 protocol is supported for Etcd.\ncluster:selector:${SW_CLUSTER:etcd}# other configurationsetcd:# etcd cluster nodes, example: 10.0.0.1:2379,10.0.0.2:2379,10.0.0.3:2379endpoints:${SW_CLUSTER_ETCD_ENDPOINTS:localhost:2379}namespace:${SW_CLUSTER_ETCD_NAMESPACE:/skywalking}serviceName:${SW_CLUSTER_ETCD_SERVICE_NAME:\u0026#34;SkyWalking_OAP_Cluster\u0026#34;}authentication:${SW_CLUSTER_ETCD_AUTHENTICATION:false}user:${SW_CLUSTER_ETCD_USER:}password:${SW_CLUSTER_ETCD_PASSWORD:}Same as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in core are not suitable for internal communication among the oap nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: The registered port and other OAP nodes use this to communicate with the current node.  Nacos Set the cluster/selector to nacos in the yml to enable it.\ncluster:selector:${SW_CLUSTER:nacos}# other configurationsNacos supports authentication by username or accessKey. Empty means that there is no need for authentication. Extra config is as follows:\nnacos:username:password:accessKey:secretKey:Same as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: The registered port and other OAP nodes use this to communicate with the current node.  ","excerpt":"Cluster Management In many product environments, the backend needs to support high throughput and …","ref":"/docs/main/v9.0.0/en/setup/backend/backend-cluster/","title":"Cluster Management"},{"body":"Cluster Management In many production environments, the backend needs to support high throughput and provide high availability (HA) to maintain robustness, so you always need cluster management in product env.\nNOTICE, cluster management doesn\u0026rsquo;t provide a service discovery mechanism for agents and probes. We recommend agents/probes using gateway to load balancer to access OAP clusters.\nThe core feature of cluster management is supporting the whole OAP cluster running distributed aggregation and analysis for telemetry data.\nThere are various ways to manage the cluster in the backend. Choose the one that best suits your needs.\n Zookeeper coordinator. Use Zookeeper to let the backend instances detect and communicate with each other. Kubernetes. When the backend clusters are deployed inside Kubernetes, you could make use of this method by using k8s native APIs to manage clusters. Consul. Use Consul as the backend cluster management implementor and coordinate backend instances. Etcd. Use Etcd to coordinate backend instances. Nacos. Use Nacos to coordinate backend instances. In the application.yml file, there are default configurations for the aforementioned coordinators under the section cluster. You can specify any of them in the selector property to enable it.  Zookeeper coordinator Zookeeper is a very common and widely used cluster coordinator. Set the cluster/selector to zookeeper in the yml to enable it.\nRequired Zookeeper version: 3.5+\ncluster:selector:${SW_CLUSTER:zookeeper}# other configurations hostPort is the list of zookeeper servers. Format is IP1:PORT1,IP2:PORT2,...,IPn:PORTn enableACL enable Zookeeper ACL to control access to its znode. schema is Zookeeper ACL schemas. expression is a expression of ACL. The format of the expression is specific to the schema. hostPort, baseSleepTimeMs and maxRetries are settings of Zookeeper curator client.  Note:\n If Zookeeper ACL is enabled and /skywalking exists, you must ensure that SkyWalking has CREATE, READ and WRITE permissions. If /skywalking does not exist, it will be created by SkyWalking, and all permissions to the specified user will be granted. Simultaneously, znode grants READ permission to anyone. If you set schema as digest, the password of the expression is set in clear text.  In some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: the registered port and other OAP nodes use this to communicate with the current node.  zookeeper:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}hostPort:${SW_CLUSTER_ZK_HOST_PORT:localhost:2181}#Retry PolicybaseSleepTimeMs:${SW_CLUSTER_ZK_SLEEP_TIME:1000}# initial amount of time to wait between retriesmaxRetries:${SW_CLUSTER_ZK_MAX_RETRIES:3}# max number of times to retryinternalComHost:${SW_CLUSTER_INTERNAL_COM_HOST:172.10.4.10}internalComPort:${SW_CLUSTER_INTERNAL_COM_PORT:11800}# Enable ACLenableACL:${SW_ZK_ENABLE_ACL:false}# disable ACL in defaultschema:${SW_ZK_SCHEMA:digest}# only support digest schemaexpression:${SW_ZK_EXPRESSION:skywalking:skywalking}Kubernetes The required backend clusters are deployed inside Kubernetes. See the guides in Deploy in kubernetes. Set the selector to kubernetes.\ncluster:selector:${SW_CLUSTER:kubernetes}# other configurationsConsul Recently, the Consul system has become more and more popular, and many companies and developers now use Consul as their service discovery solution. Set the cluster/selector to consul in the yml to enable it.\ncluster:selector:${SW_CLUSTER:consul}# other configurationsSame as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: The registered port and other OAP nodes use this to communicate with the current node.  Etcd Set the cluster/selector to etcd in the yml to enable it. The Etcd client has upgraded to v3 protocol and changed to the CoreOS official library. Since 8.7.0, only the v3 protocol is supported for Etcd.\ncluster:selector:${SW_CLUSTER:etcd}# other configurationsetcd:# etcd cluster nodes, example: 10.0.0.1:2379,10.0.0.2:2379,10.0.0.3:2379endpoints:${SW_CLUSTER_ETCD_ENDPOINTS:localhost:2379}namespace:${SW_CLUSTER_ETCD_NAMESPACE:/skywalking}serviceName:${SW_CLUSTER_ETCD_SERVICE_NAME:\u0026#34;SkyWalking_OAP_Cluster\u0026#34;}authentication:${SW_CLUSTER_ETCD_AUTHENTICATION:false}user:${SW_CLUSTER_ETCD_USER:}password:${SW_CLUSTER_ETCD_PASSWORD:}Same as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: The registered port and other OAP nodes use this to communicate with the current node.  Nacos Set the cluster/selector to nacos in the yml to enable it.\ncluster:selector:${SW_CLUSTER:nacos}# other configurationsNacos supports authentication by username or accessKey. Empty means that there is no need for authentication. Extra config is as follows:\nnacos:username:password:accessKey:secretKey:Same as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: The registered port and other OAP nodes use this to communicate with the current node.  ","excerpt":"Cluster Management In many production environments, the backend needs to support high throughput and …","ref":"/docs/main/v9.1.0/en/setup/backend/backend-cluster/","title":"Cluster Management"},{"body":"Cluster Management In many production environments, the backend needs to support high throughput and provide high availability (HA) to maintain robustness, so you always need cluster management in product env.\nNOTICE, cluster management doesn\u0026rsquo;t provide a service discovery mechanism for agents and probes. We recommend agents/probes using gateway to load balancer to access OAP clusters.\nThe core feature of cluster management is supporting the whole OAP cluster running distributed aggregation and analysis for telemetry data.\nThere are various ways to manage the cluster in the backend. Choose the one that best suits your needs.\n Zookeeper coordinator. Use Zookeeper to let the backend instances detect and communicate with each other. Kubernetes. When the backend clusters are deployed inside Kubernetes, you could make use of this method by using k8s native APIs to manage clusters. Consul. Use Consul as the backend cluster management implementor and coordinate backend instances. Etcd. Use Etcd to coordinate backend instances. Nacos. Use Nacos to coordinate backend instances. In the application.yml file, there are default configurations for the aforementioned coordinators under the section cluster. You can specify any of them in the selector property to enable it.  Zookeeper coordinator Zookeeper is a very common and widely used cluster coordinator. Set the cluster/selector to zookeeper in the yml to enable it.\nRequired Zookeeper version: 3.5+\ncluster:selector:${SW_CLUSTER:zookeeper}# other configurations hostPort is the list of zookeeper servers. Format is IP1:PORT1,IP2:PORT2,...,IPn:PORTn enableACL enable Zookeeper ACL to control access to its znode. schema is Zookeeper ACL schemas. expression is a expression of ACL. The format of the expression is specific to the schema. hostPort, baseSleepTimeMs and maxRetries are settings of Zookeeper curator client.  Note:\n If Zookeeper ACL is enabled and /skywalking exists, you must ensure that SkyWalking has CREATE, READ and WRITE permissions. If /skywalking does not exist, it will be created by SkyWalking, and all permissions to the specified user will be granted. Simultaneously, znode grants READ permission to anyone. If you set schema as digest, the password of the expression is set in clear text.  In some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: the registered port and other OAP nodes use this to communicate with the current node.  zookeeper:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}hostPort:${SW_CLUSTER_ZK_HOST_PORT:localhost:2181}#Retry PolicybaseSleepTimeMs:${SW_CLUSTER_ZK_SLEEP_TIME:1000}# initial amount of time to wait between retriesmaxRetries:${SW_CLUSTER_ZK_MAX_RETRIES:3}# max number of times to retryinternalComHost:${SW_CLUSTER_INTERNAL_COM_HOST:172.10.4.10}internalComPort:${SW_CLUSTER_INTERNAL_COM_PORT:11800}# Enable ACLenableACL:${SW_ZK_ENABLE_ACL:false}# disable ACL in defaultschema:${SW_ZK_SCHEMA:digest}# only support digest schemaexpression:${SW_ZK_EXPRESSION:skywalking:skywalking}Kubernetes The required backend clusters are deployed inside Kubernetes. See the guides in Deploy in kubernetes. Set the selector to kubernetes.\ncluster:selector:${SW_CLUSTER:kubernetes}# other configurationsConsul Recently, the Consul system has become more and more popular, and many companies and developers now use Consul as their service discovery solution. Set the cluster/selector to consul in the yml to enable it.\ncluster:selector:${SW_CLUSTER:consul}# other configurationsSame as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: The registered port and other OAP nodes use this to communicate with the current node.  Etcd Set the cluster/selector to etcd in the yml to enable it. The Etcd client has upgraded to v3 protocol and changed to the CoreOS official library. Since 8.7.0, only the v3 protocol is supported for Etcd.\ncluster:selector:${SW_CLUSTER:etcd}# other configurationsetcd:# etcd cluster nodes, example: 10.0.0.1:2379,10.0.0.2:2379,10.0.0.3:2379endpoints:${SW_CLUSTER_ETCD_ENDPOINTS:localhost:2379}namespace:${SW_CLUSTER_ETCD_NAMESPACE:/skywalking}serviceName:${SW_CLUSTER_ETCD_SERVICE_NAME:\u0026#34;SkyWalking_OAP_Cluster\u0026#34;}authentication:${SW_CLUSTER_ETCD_AUTHENTICATION:false}user:${SW_CLUSTER_ETCD_USER:}password:${SW_CLUSTER_ETCD_PASSWORD:}Same as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: The registered port and other OAP nodes use this to communicate with the current node.  Nacos Set the cluster/selector to nacos in the yml to enable it.\ncluster:selector:${SW_CLUSTER:nacos}# other configurationsNacos supports authentication by username or accessKey. Empty means that there is no need for authentication. Extra config is as follows:\nnacos:username:password:accessKey:secretKey:Same as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: The registered port and other OAP nodes use this to communicate with the current node.  ","excerpt":"Cluster Management In many production environments, the backend needs to support high throughput and …","ref":"/docs/main/v9.2.0/en/setup/backend/backend-cluster/","title":"Cluster Management"},{"body":"Cluster Management In many production environments, the backend needs to support high throughput and provide high availability (HA) to maintain robustness, so you always need cluster management in product env.\nNOTICE, cluster management doesn\u0026rsquo;t provide a service discovery mechanism for agents and probes. We recommend agents/probes using gateway to load balancer to access OAP clusters.\nThe core feature of cluster management is supporting the whole OAP cluster running distributed aggregation and analysis for telemetry data.\nThere are various ways to manage the cluster in the backend. Choose the one that best suits your needs.\n Zookeeper coordinator. Use Zookeeper to let the backend instances detect and communicate with each other. Kubernetes. When the backend clusters are deployed inside Kubernetes, you could make use of this method by using k8s native APIs to manage clusters. Consul. Use Consul as the backend cluster management implementor and coordinate backend instances. Etcd. Use Etcd to coordinate backend instances. Nacos. Use Nacos to coordinate backend instances. In the application.yml file, there are default configurations for the aforementioned coordinators under the section cluster. You can specify any of them in the selector property to enable it.  Zookeeper coordinator Zookeeper is a very common and widely used cluster coordinator. Set the cluster/selector to zookeeper in the yml to enable it.\nRequired Zookeeper version: 3.5+\ncluster:selector:${SW_CLUSTER:zookeeper}# other configurations hostPort is the list of zookeeper servers. Format is IP1:PORT1,IP2:PORT2,...,IPn:PORTn enableACL enable Zookeeper ACL to control access to its znode. schema is Zookeeper ACL schemas. expression is a expression of ACL. The format of the expression is specific to the schema. hostPort, baseSleepTimeMs and maxRetries are settings of Zookeeper curator client.  Note:\n If Zookeeper ACL is enabled and /skywalking exists, you must ensure that SkyWalking has CREATE, READ and WRITE permissions. If /skywalking does not exist, it will be created by SkyWalking, and all permissions to the specified user will be granted. Simultaneously, znode grants READ permission to anyone. If you set schema as digest, the password of the expression is set in clear text.  In some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: the registered port and other OAP nodes use this to communicate with the current node.  zookeeper:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}hostPort:${SW_CLUSTER_ZK_HOST_PORT:localhost:2181}#Retry PolicybaseSleepTimeMs:${SW_CLUSTER_ZK_SLEEP_TIME:1000}# initial amount of time to wait between retriesmaxRetries:${SW_CLUSTER_ZK_MAX_RETRIES:3}# max number of times to retryinternalComHost:${SW_CLUSTER_INTERNAL_COM_HOST:172.10.4.10}internalComPort:${SW_CLUSTER_INTERNAL_COM_PORT:11800}# Enable ACLenableACL:${SW_ZK_ENABLE_ACL:false}# disable ACL in defaultschema:${SW_ZK_SCHEMA:digest}# only support digest schemaexpression:${SW_ZK_EXPRESSION:skywalking:skywalking}Kubernetes The required backend clusters are deployed inside Kubernetes. See the guides in Deploy in kubernetes. Set the selector to kubernetes.\ncluster:selector:${SW_CLUSTER:kubernetes}# other configurationsMeanwhile, the OAP cluster requires the pod\u0026rsquo;s UID which is laid at metadata.uid as the value of the system environment variable SKYWALKING_COLLECTOR_UID\ncontainers:# Original configurations of OAP container- name:{{.Values.oap.name }}image:{{.Values.oap.image.repository }}:{{ required \u0026#34;oap.image.tag is required\u0026#34; .Values.oap.image.tag }}# ...# ...env:# Add metadata.uid as the system environment variable, SKYWALKING_COLLECTOR_UID - name:SKYWALKING_COLLECTOR_UIDvalueFrom:fieldRef:fieldPath:metadata.uidRead the complete helm for more details.\nConsul Recently, the Consul system has become more and more popular, and many companies and developers now use Consul as their service discovery solution. Set the cluster/selector to consul in the yml to enable it.\ncluster:selector:${SW_CLUSTER:consul}# other configurationsSame as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: The registered port and other OAP nodes use this to communicate with the current node.  Etcd Set the cluster/selector to etcd in the yml to enable it. The Etcd client has upgraded to v3 protocol and changed to the CoreOS official library. Since 8.7.0, only the v3 protocol is supported for Etcd.\ncluster:selector:${SW_CLUSTER:etcd}# other configurationsetcd:# etcd cluster nodes, example: 10.0.0.1:2379,10.0.0.2:2379,10.0.0.3:2379endpoints:${SW_CLUSTER_ETCD_ENDPOINTS:localhost:2379}namespace:${SW_CLUSTER_ETCD_NAMESPACE:/skywalking}serviceName:${SW_CLUSTER_ETCD_SERVICE_NAME:\u0026#34;SkyWalking_OAP_Cluster\u0026#34;}authentication:${SW_CLUSTER_ETCD_AUTHENTICATION:false}user:${SW_CLUSTER_ETCD_USER:}password:${SW_CLUSTER_ETCD_PASSWORD:}Same as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: The registered port and other OAP nodes use this to communicate with the current node.  Nacos Set the cluster/selector to nacos in the yml to enable it.\ncluster:selector:${SW_CLUSTER:nacos}# other configurationsNacos supports authentication by username or accessKey. Empty means that there is no need for authentication. Extra config is as follows:\nnacos:username:password:accessKey:secretKey:Same as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: The registered port and other OAP nodes use this to communicate with the current node.  ","excerpt":"Cluster Management In many production environments, the backend needs to support high throughput and …","ref":"/docs/main/v9.3.0/en/setup/backend/backend-cluster/","title":"Cluster Management"},{"body":"Cluster Management In many production environments, the backend needs to support high throughput and provide high availability (HA) to maintain robustness, so you always need cluster management in product env.\nNOTICE, cluster management doesn\u0026rsquo;t provide a service discovery mechanism for agents and probes. We recommend agents/probes using gateway to load balancer to access OAP clusters.\nThe core feature of cluster management is supporting the whole OAP cluster running distributed aggregation and analysis for telemetry data.\nThere are various ways to manage the cluster in the backend. Choose the one that best suits your needs.\n Zookeeper coordinator. Use Zookeeper to let the backend instances detect and communicate with each other. Kubernetes. When the backend clusters are deployed inside Kubernetes, you could make use of this method by using k8s native APIs to manage clusters. Consul. Use Consul as the backend cluster management implementor and coordinate backend instances. Etcd. Use Etcd to coordinate backend instances. Nacos. Use Nacos to coordinate backend instances. In the application.yml file, there are default configurations for the aforementioned coordinators under the section cluster. You can specify any of them in the selector property to enable it.  Zookeeper coordinator Zookeeper is a very common and widely used cluster coordinator. Set the cluster/selector to zookeeper in the yml to enable it.\nRequired Zookeeper version: 3.5+\ncluster:selector:${SW_CLUSTER:zookeeper}# other configurations hostPort is the list of zookeeper servers. Format is IP1:PORT1,IP2:PORT2,...,IPn:PORTn enableACL enable Zookeeper ACL to control access to its znode. schema is Zookeeper ACL schemas. expression is a expression of ACL. The format of the expression is specific to the schema. hostPort, baseSleepTimeMs and maxRetries are settings of Zookeeper curator client.  Note:\n If Zookeeper ACL is enabled and /skywalking exists, you must ensure that SkyWalking has CREATE, READ and WRITE permissions. If /skywalking does not exist, it will be created by SkyWalking, and all permissions to the specified user will be granted. Simultaneously, znode grants READ permission to anyone. If you set schema as digest, the password of the expression is set in clear text.  In some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: the registered port and other OAP nodes use this to communicate with the current node.  zookeeper:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}hostPort:${SW_CLUSTER_ZK_HOST_PORT:localhost:2181}#Retry PolicybaseSleepTimeMs:${SW_CLUSTER_ZK_SLEEP_TIME:1000}# initial amount of time to wait between retriesmaxRetries:${SW_CLUSTER_ZK_MAX_RETRIES:3}# max number of times to retryinternalComHost:${SW_CLUSTER_INTERNAL_COM_HOST:172.10.4.10}internalComPort:${SW_CLUSTER_INTERNAL_COM_PORT:11800}# Enable ACLenableACL:${SW_ZK_ENABLE_ACL:false}# disable ACL in defaultschema:${SW_ZK_SCHEMA:digest}# only support digest schemaexpression:${SW_ZK_EXPRESSION:skywalking:skywalking}Kubernetes The required backend clusters are deployed inside Kubernetes. See the guides in Deploy in kubernetes. Set the selector to kubernetes.\ncluster:selector:${SW_CLUSTER:kubernetes}# other configurationsMeanwhile, the OAP cluster requires the pod\u0026rsquo;s UID which is laid at metadata.uid as the value of the system environment variable SKYWALKING_COLLECTOR_UID\ncontainers:# Original configurations of OAP container- name:{{.Values.oap.name }}image:{{.Values.oap.image.repository }}:{{ required \u0026#34;oap.image.tag is required\u0026#34; .Values.oap.image.tag }}# ...# ...env:# Add metadata.uid as the system environment variable, SKYWALKING_COLLECTOR_UID - name:SKYWALKING_COLLECTOR_UIDvalueFrom:fieldRef:fieldPath:metadata.uidRead the complete helm for more details.\nConsul Recently, the Consul system has become more and more popular, and many companies and developers now use Consul as their service discovery solution. Set the cluster/selector to consul in the yml to enable it.\ncluster:selector:${SW_CLUSTER:consul}# other configurationsSame as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: The registered port and other OAP nodes use this to communicate with the current node.  Etcd Set the cluster/selector to etcd in the yml to enable it. The Etcd client has upgraded to v3 protocol and changed to the CoreOS official library. Since 8.7.0, only the v3 protocol is supported for Etcd.\ncluster:selector:${SW_CLUSTER:etcd}# other configurationsetcd:# etcd cluster nodes, example: 10.0.0.1:2379,10.0.0.2:2379,10.0.0.3:2379endpoints:${SW_CLUSTER_ETCD_ENDPOINTS:localhost:2379}namespace:${SW_CLUSTER_ETCD_NAMESPACE:/skywalking}serviceName:${SW_CLUSTER_ETCD_SERVICE_NAME:\u0026#34;SkyWalking_OAP_Cluster\u0026#34;}authentication:${SW_CLUSTER_ETCD_AUTHENTICATION:false}user:${SW_CLUSTER_ETCD_USER:}password:${SW_CLUSTER_ETCD_PASSWORD:}Same as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: The registered port and other OAP nodes use this to communicate with the current node.  Nacos Set the cluster/selector to nacos in the yml to enable it.\ncluster:selector:${SW_CLUSTER:nacos}# other configurationsNacos supports authentication by username or accessKey. Empty means that there is no need for authentication. Extra config is as follows:\nnacos:username:password:accessKey:secretKey:Same as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: The registered port and other OAP nodes use this to communicate with the current node.  ","excerpt":"Cluster Management In many production environments, the backend needs to support high throughput and …","ref":"/docs/main/v9.4.0/en/setup/backend/backend-cluster/","title":"Cluster Management"},{"body":"Cluster Management In many production environments, the backend needs to support distributed aggregation, high throughput and provide high availability (HA) to maintain robustness, so you always need to setup CLUSTER management in product env. Otherwise, you would face metrics inaccurate.\ncore/gRPCHost is listening on 0.0.0.0 for quick start as the single mode for most cases. Besides the Kubernetes coordinator, which is using the cloud-native mode to establish cluster, all other coordinators requires core/gRPCHost updated to real IP addresses or take reference of internalComHost and internalComPort in each coordinator doc.\nNOTICE, cluster management doesn\u0026rsquo;t provide a service discovery mechanism for agents and probes. We recommend agents/probes using gateway to load balancer to access OAP clusters.\nThere are various ways to manage the cluster in the backend. Choose the one that best suits your needs.\n Kubernetes. When the backend clusters are deployed inside Kubernetes, you could make use of this method by using k8s native APIs to manage clusters. Zookeeper coordinator. Use Zookeeper to let the backend instances detect and communicate with each other. Consul. Use Consul as the backend cluster management implementor and coordinate backend instances. Etcd. Use Etcd to coordinate backend instances. Nacos. Use Nacos to coordinate backend instances.  In the application.yml file, there are default configurations for the aforementioned coordinators under the section cluster. You can specify any of them in the selector property to enable it.\nKubernetes The required backend clusters are deployed inside Kubernetes. See the guides in Deploy in kubernetes. Set the selector to kubernetes.\ncluster:selector:${SW_CLUSTER:kubernetes}# other configurationsMeanwhile, the OAP cluster requires the pod\u0026rsquo;s UID which is laid at metadata.uid as the value of the system environment variable SKYWALKING_COLLECTOR_UID\ncontainers:# Original configurations of OAP container- name:{{.Values.oap.name }}image:{{.Values.oap.image.repository }}:{{ required \u0026#34;oap.image.tag is required\u0026#34; .Values.oap.image.tag }}# ...# ...env:# Add metadata.uid as the system environment variable, SKYWALKING_COLLECTOR_UID - name:SKYWALKING_COLLECTOR_UIDvalueFrom:fieldRef:fieldPath:metadata.uidRead the complete helm for more details.\nZookeeper coordinator Zookeeper is a very common and widely used cluster coordinator. Set the cluster/selector to zookeeper in the yml to enable it.\nRequired Zookeeper version: 3.5+\ncluster:selector:${SW_CLUSTER:zookeeper}# other configurations hostPort is the list of zookeeper servers. Format is IP1:PORT1,IP2:PORT2,...,IPn:PORTn enableACL enable Zookeeper ACL to control access to its znode. schema is Zookeeper ACL schemas. expression is a expression of ACL. The format of the expression is specific to the schema. hostPort, baseSleepTimeMs and maxRetries are settings of Zookeeper curator client.  Note:\n If Zookeeper ACL is enabled and /skywalking exists, you must ensure that SkyWalking has CREATE, READ and WRITE permissions. If /skywalking does not exist, it will be created by SkyWalking, and all permissions to the specified user will be granted. Simultaneously, znode grants READ permission to anyone. If you set schema as digest, the password of the expression is set in clear text.  In some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: the registered port and other OAP nodes use this to communicate with the current node.  zookeeper:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}hostPort:${SW_CLUSTER_ZK_HOST_PORT:localhost:2181}#Retry PolicybaseSleepTimeMs:${SW_CLUSTER_ZK_SLEEP_TIME:1000}# initial amount of time to wait between retriesmaxRetries:${SW_CLUSTER_ZK_MAX_RETRIES:3}# max number of times to retryinternalComHost:${SW_CLUSTER_INTERNAL_COM_HOST:172.10.4.10}internalComPort:${SW_CLUSTER_INTERNAL_COM_PORT:11800}# Enable ACLenableACL:${SW_ZK_ENABLE_ACL:false}# disable ACL in defaultschema:${SW_ZK_SCHEMA:digest}# only support digest schemaexpression:${SW_ZK_EXPRESSION:skywalking:skywalking}Consul Recently, the Consul system has become more and more popular, and many companies and developers now use Consul as their service discovery solution. Set the cluster/selector to consul in the yml to enable it.\ncluster:selector:${SW_CLUSTER:consul}# other configurationsSame as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: The registered port and other OAP nodes use this to communicate with the current node.  Etcd Set the cluster/selector to etcd in the yml to enable it. The Etcd client has upgraded to v3 protocol and changed to the CoreOS official library. Since 8.7.0, only the v3 protocol is supported for Etcd.\ncluster:selector:${SW_CLUSTER:etcd}# other configurationsetcd:# etcd cluster nodes, example: 10.0.0.1:2379,10.0.0.2:2379,10.0.0.3:2379endpoints:${SW_CLUSTER_ETCD_ENDPOINTS:localhost:2379}namespace:${SW_CLUSTER_ETCD_NAMESPACE:/skywalking}serviceName:${SW_CLUSTER_ETCD_SERVICE_NAME:\u0026#34;SkyWalking_OAP_Cluster\u0026#34;}authentication:${SW_CLUSTER_ETCD_AUTHENTICATION:false}user:${SW_CLUSTER_ETCD_USER:}password:${SW_CLUSTER_ETCD_PASSWORD:}Same as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: The registered port and other OAP nodes use this to communicate with the current node.  Nacos Set the cluster/selector to nacos in the yml to enable it.\ncluster:selector:${SW_CLUSTER:nacos}# other configurationsNacos supports authentication by username or accessKey. Empty means that there is no need for authentication. Extra config is as follows:\nnacos:username:password:accessKey:secretKey:Same as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: The registered port and other OAP nodes use this to communicate with the current node.  ","excerpt":"Cluster Management In many production environments, the backend needs to support distributed …","ref":"/docs/main/v9.5.0/en/setup/backend/backend-cluster/","title":"Cluster Management"},{"body":"Cluster Management In many production environments, the backend needs to support distributed aggregation, high throughput and provide high availability (HA) to maintain robustness, so you always need to setup CLUSTER management in product env. Otherwise, you would face metrics inaccurate.\ncore/gRPCHost is listening on 0.0.0.0 for quick start as the single mode for most cases. Besides the Kubernetes coordinator, which is using the cloud-native mode to establish cluster, all other coordinators requires core/gRPCHost updated to real IP addresses or take reference of internalComHost and internalComPort in each coordinator doc.\nNOTICE, cluster management doesn\u0026rsquo;t provide a service discovery mechanism for agents and probes. We recommend agents/probes using gateway to load balancer to access OAP clusters.\nThere are various ways to manage the cluster in the backend. Choose the one that best suits your needs.\n Kubernetes. When the backend clusters are deployed inside Kubernetes, you could make use of this method by using k8s native APIs to manage clusters. Zookeeper coordinator. Use Zookeeper to let the backend instances detect and communicate with each other. Consul. Use Consul as the backend cluster management implementor and coordinate backend instances. Etcd. Use Etcd to coordinate backend instances. Nacos. Use Nacos to coordinate backend instances.  In the application.yml file, there are default configurations for the aforementioned coordinators under the section cluster. You can specify any of them in the selector property to enable it.\nKubernetes The required backend clusters are deployed inside Kubernetes. See the guides in Deploy in kubernetes. Set the selector to kubernetes.\ncluster:selector:${SW_CLUSTER:kubernetes}# other configurationsMeanwhile, the OAP cluster requires the pod\u0026rsquo;s UID which is laid at metadata.uid as the value of the system environment variable SKYWALKING_COLLECTOR_UID\ncontainers:# Original configurations of OAP container- name:{{.Values.oap.name }}image:{{.Values.oap.image.repository }}:{{ required \u0026#34;oap.image.tag is required\u0026#34; .Values.oap.image.tag }}# ...# ...env:# Add metadata.uid as the system environment variable, SKYWALKING_COLLECTOR_UID - name:SKYWALKING_COLLECTOR_UIDvalueFrom:fieldRef:fieldPath:metadata.uidRead the complete helm for more details.\nZookeeper coordinator Zookeeper is a very common and widely used cluster coordinator. Set the cluster/selector to zookeeper in the yml to enable it.\nRequired Zookeeper version: 3.5+\ncluster:selector:${SW_CLUSTER:zookeeper}# other configurations hostPort is the list of zookeeper servers. Format is IP1:PORT1,IP2:PORT2,...,IPn:PORTn enableACL enable Zookeeper ACL to control access to its znode. schema is Zookeeper ACL schemas. expression is a expression of ACL. The format of the expression is specific to the schema. hostPort, baseSleepTimeMs and maxRetries are settings of Zookeeper curator client.  Note:\n If Zookeeper ACL is enabled and /skywalking exists, you must ensure that SkyWalking has CREATE, READ and WRITE permissions. If /skywalking does not exist, it will be created by SkyWalking, and all permissions to the specified user will be granted. Simultaneously, znode grants READ permission to anyone. If you set schema as digest, the password of the expression is set in clear text.  In some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: the registered port and other OAP nodes use this to communicate with the current node.  zookeeper:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}hostPort:${SW_CLUSTER_ZK_HOST_PORT:localhost:2181}#Retry PolicybaseSleepTimeMs:${SW_CLUSTER_ZK_SLEEP_TIME:1000}# initial amount of time to wait between retriesmaxRetries:${SW_CLUSTER_ZK_MAX_RETRIES:3}# max number of times to retryinternalComHost:${SW_CLUSTER_INTERNAL_COM_HOST:172.10.4.10}internalComPort:${SW_CLUSTER_INTERNAL_COM_PORT:11800}# Enable ACLenableACL:${SW_ZK_ENABLE_ACL:false}# disable ACL in defaultschema:${SW_ZK_SCHEMA:digest}# only support digest schemaexpression:${SW_ZK_EXPRESSION:skywalking:skywalking}Consul Recently, the Consul system has become more and more popular, and many companies and developers now use Consul as their service discovery solution. Set the cluster/selector to consul in the yml to enable it.\ncluster:selector:${SW_CLUSTER:consul}# other configurationsSame as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: The registered port and other OAP nodes use this to communicate with the current node.  Etcd Set the cluster/selector to etcd in the yml to enable it. The Etcd client has upgraded to v3 protocol and changed to the CoreOS official library. Since 8.7.0, only the v3 protocol is supported for Etcd.\ncluster:selector:${SW_CLUSTER:etcd}# other configurationsetcd:# etcd cluster nodes, example: 10.0.0.1:2379,10.0.0.2:2379,10.0.0.3:2379endpoints:${SW_CLUSTER_ETCD_ENDPOINTS:localhost:2379}namespace:${SW_CLUSTER_ETCD_NAMESPACE:/skywalking}serviceName:${SW_CLUSTER_ETCD_SERVICE_NAME:\u0026#34;SkyWalking_OAP_Cluster\u0026#34;}authentication:${SW_CLUSTER_ETCD_AUTHENTICATION:false}user:${SW_CLUSTER_ETCD_USER:}password:${SW_CLUSTER_ETCD_PASSWORD:}Same as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: The registered port and other OAP nodes use this to communicate with the current node.  Nacos Set the cluster/selector to nacos in the yml to enable it.\ncluster:selector:${SW_CLUSTER:nacos}# other configurationsNacos supports authentication by username or accessKey. Empty means that there is no need for authentication. Extra config is as follows:\nnacos:username:password:accessKey:secretKey:Same as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: The registered port and other OAP nodes use this to communicate with the current node.  ","excerpt":"Cluster Management In many production environments, the backend needs to support distributed …","ref":"/docs/main/v9.6.0/en/setup/backend/backend-cluster/","title":"Cluster Management"},{"body":"Cluster Management In many production environments, the backend needs to support distributed aggregation, high throughput and provide high availability (HA) to maintain robustness, so you always need to setup CLUSTER management in product env. Otherwise, you would face metrics inaccurate.\ncore/gRPCHost is listening on 0.0.0.0 for quick start as the single mode for most cases. Besides the Kubernetes coordinator, which is using the cloud-native mode to establish cluster, all other coordinators requires core/gRPCHost updated to real IP addresses or take reference of internalComHost and internalComPort in each coordinator doc.\nNOTICE, cluster management doesn\u0026rsquo;t provide a service discovery mechanism for agents and probes. We recommend agents/probes using gateway to load balancer to access OAP clusters.\nThere are various ways to manage the cluster in the backend. Choose the one that best suits your needs.\n Kubernetes. When the backend clusters are deployed inside Kubernetes, you could make use of this method by using k8s native APIs to manage clusters. Zookeeper coordinator. Use Zookeeper to let the backend instances detect and communicate with each other. Consul. Use Consul as the backend cluster management implementor and coordinate backend instances. Etcd. Use Etcd to coordinate backend instances. Nacos. Use Nacos to coordinate backend instances.  In the application.yml file, there are default configurations for the aforementioned coordinators under the section cluster. You can specify any of them in the selector property to enable it.\nCloud Native Kubernetes The required backend clusters are deployed inside Kubernetes. See the guides in Deploy in kubernetes. Set the selector to kubernetes.\ncluster:selector:${SW_CLUSTER:kubernetes}# other configurationsMeanwhile, the OAP cluster requires the pod\u0026rsquo;s UID which is laid at metadata.uid as the value of the system environment variable SKYWALKING_COLLECTOR_UID\ncontainers:# Original configurations of OAP container- name:{{.Values.oap.name }}image:{{.Values.oap.image.repository }}:{{ required \u0026#34;oap.image.tag is required\u0026#34; .Values.oap.image.tag }}# ...# ...env:# Add metadata.uid as the system environment variable, SKYWALKING_COLLECTOR_UID - name:SKYWALKING_COLLECTOR_UIDvalueFrom:fieldRef:fieldPath:metadata.uidRead the complete helm for more details.\nTraditional Coordinator NOTICE In all the following coordinators, oap.internal.comm.host:oap.internal.comm.port is registered as the ID and address for the current OAP node. By default, because they are same in all OAP nodes, the registrations are conflicted, and (may) show as one registered node, which actually would be the node itself. In this case, the cluster mode is NOT working.\nPlease check the registered nodes on your coordinator servers, to make the registration information unique for every node. You could have two options\n Change core/gRPCHost(oap.internal.comm.host) and core/gRPCPort(oap.internal.comm.port) for internal, and setup external communication channels for data reporting and query. Use internalComHost and internalComPort in the config to provide a unique host and port for every OAP node. This host name port should be accessible for other OAP nodes.  Zookeeper coordinator Zookeeper is a very common and widely used cluster coordinator. Set the cluster/selector to zookeeper in the yml to enable it.\nRequired Zookeeper version: 3.5+\ncluster:selector:${SW_CLUSTER:zookeeper}# other configurations hostPort is the list of zookeeper servers. Format is IP1:PORT1,IP2:PORT2,...,IPn:PORTn enableACL enable Zookeeper ACL to control access to its znode. schema is Zookeeper ACL schemas. expression is a expression of ACL. The format of the expression is specific to the schema. hostPort, baseSleepTimeMs and maxRetries are settings of Zookeeper curator client.  Note:\n If Zookeeper ACL is enabled and /skywalking exists, you must ensure that SkyWalking has CREATE, READ and WRITE permissions. If /skywalking does not exist, it will be created by SkyWalking, and all permissions to the specified user will be granted. Simultaneously, znode grants READ permission to anyone. If you set schema as digest, the password of the expression is set in clear text.  In some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes, such as the default host(0.0.0.0) should not be used in cluster mode. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The exposed host name for other OAP nodes in the cluster internal communication. internalComPort: the exposed port for other OAP nodes in the cluster internal communication.  zookeeper:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}hostPort:${SW_CLUSTER_ZK_HOST_PORT:localhost:2181}#Retry PolicybaseSleepTimeMs:${SW_CLUSTER_ZK_SLEEP_TIME:1000}# initial amount of time to wait between retriesmaxRetries:${SW_CLUSTER_ZK_MAX_RETRIES:3}# max number of times to retryinternalComHost:${SW_CLUSTER_INTERNAL_COM_HOST:172.10.4.10}internalComPort:${SW_CLUSTER_INTERNAL_COM_PORT:11800}# Enable ACLenableACL:${SW_ZK_ENABLE_ACL:false}# disable ACL in defaultschema:${SW_ZK_SCHEMA:digest}# only support digest schemaexpression:${SW_ZK_EXPRESSION:skywalking:skywalking}Consul Recently, the Consul system has become more and more popular, and many companies and developers now use Consul as their service discovery solution. Set the cluster/selector to consul in the yml to enable it.\ncluster:selector:${SW_CLUSTER:consul}# other configurationsSame as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes, such as the default host(0.0.0.0) should not be used in cluster mode. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The exposed host name for other OAP nodes in the cluster internal communication. internalComPort: the exposed port for other OAP nodes in the cluster internal communication.  Etcd Set the cluster/selector to etcd in the yml to enable it. The Etcd client has upgraded to v3 protocol and changed to the CoreOS official library. Since 8.7.0, only the v3 protocol is supported for Etcd.\ncluster:selector:${SW_CLUSTER:etcd}# other configurationsetcd:# etcd cluster nodes, example: 10.0.0.1:2379,10.0.0.2:2379,10.0.0.3:2379endpoints:${SW_CLUSTER_ETCD_ENDPOINTS:localhost:2379}namespace:${SW_CLUSTER_ETCD_NAMESPACE:/skywalking}serviceName:${SW_CLUSTER_ETCD_SERVICE_NAME:\u0026#34;SkyWalking_OAP_Cluster\u0026#34;}authentication:${SW_CLUSTER_ETCD_AUTHENTICATION:false}user:${SW_CLUSTER_ETCD_USER:}password:${SW_CLUSTER_ETCD_PASSWORD:}Same as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes, such as the default host(0.0.0.0) should not be used in cluster mode. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The exposed host name for other OAP nodes in the cluster internal communication. internalComPort: the exposed port for other OAP nodes in the cluster internal communication.  Nacos Set the cluster/selector to nacos in the yml to enable it.\ncluster:selector:${SW_CLUSTER:nacos}# other configurationsNacos supports authentication by username or accessKey. Empty means that there is no need for authentication. Extra config is as follows:\nnacos:username:password:accessKey:secretKey:Same as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes, such as the default host(0.0.0.0) should not be used in cluster mode. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The exposed host name for other OAP nodes in the cluster internal communication. internalComPort: the exposed port for other OAP nodes in the cluster internal communication.  ","excerpt":"Cluster Management In many production environments, the backend needs to support distributed …","ref":"/docs/main/v9.7.0/en/setup/backend/backend-cluster/","title":"Cluster Management"},{"body":"Coding Style for SkyWalking Python String formatting Since Python 3.5 is end of life, we fully utilize the clarity and performance boost brought by f-strings. Please do not use other styles - +, % or .format unless f-string is absolutely unfeasible in the context, or it is a logger message, which is optimized for the % style\nRun make dev-fix to invoke flynt to convert other formats to f-string, pay extra care to possible corner cases leading to a semantically different conversion.\nQuotes As we know both single quotes and double quotes are both acceptable in Python. For a better coding style, we enforce a check for using single quotes when possible.\nPlease only use double quotes on the outside when there are inevitable single quotes inside the string, or when there are nest quotes.\nFor example -\nfoo = f\u0026#34;I\u0026#39;m a string\u0026#34; bar = f\u0026#34;This repo is called \u0026#39;skywalking-python\u0026#39;\u0026#34; Run make dev-fix to invoke unify to deal with your quotes if flake8 complaints about it.\nDebug messages Please import the logger_debug_enabled variable and wrap your debug messages with a check.\nThis should be done for all performance critical components.\nif logger_debug_enabled: logger.debug(\u0026#39;Message - %s\u0026#39;, some_func()) Imports Please make sure the imports are placed in a good order, or flake8-isort will notify you of the violations.\nRun make dev-fix to automatically fix the sorting problem.\nNaming In PEP8 convention, we are required to use snake_case as the accepted style.\nHowever, there are special cases. For example, you are overriding/monkey-patching a method which happens to use the old style camelCase naming, then it is acceptable to have the original naming convention to preserve context.\nPlease mark the line with # noqa to avoid linting.\n","excerpt":"Coding Style for SkyWalking Python String formatting Since Python 3.5 is end of life, we fully …","ref":"/docs/skywalking-python/latest/en/contribution/codingstyle/","title":"Coding Style for SkyWalking Python"},{"body":"Coding Style for SkyWalking Python String formatting Since Python 3.5 is end of life, we fully utilize the clarity and performance boost brought by f-strings. Please do not use other styles - +, % or .format unless f-string is absolutely unfeasible in the context, or it is a logger message, which is optimized for the % style\nRun make dev-fix to invoke flynt to convert other formats to f-string, pay extra care to possible corner cases leading to a semantically different conversion.\nQuotes As we know both single quotes and double quotes are both acceptable in Python. For a better coding style, we enforce a check for using single quotes when possible.\nPlease only use double quotes on the outside when there are inevitable single quotes inside the string, or when there are nest quotes.\nFor example -\nfoo = f\u0026#34;I\u0026#39;m a string\u0026#34; bar = f\u0026#34;This repo is called \u0026#39;skywalking-python\u0026#39;\u0026#34; Run make dev-fix to invoke unify to deal with your quotes if flake8 complaints about it.\nDebug messages Please import the logger_debug_enabled variable and wrap your debug messages with a check.\nThis should be done for all performance critical components.\nif logger_debug_enabled: logger.debug(\u0026#39;Message - %s\u0026#39;, some_func()) Imports Please make sure the imports are placed in a good order, or flake8-isort will notify you of the violations.\nRun make dev-fix to automatically fix the sorting problem.\nNaming In PEP8 convention, we are required to use snake_case as the accepted style.\nHowever, there are special cases. For example, you are overriding/monkey-patching a method which happens to use the old style camelCase naming, then it is acceptable to have the original naming convention to preserve context.\nPlease mark the line with # noqa to avoid linting.\n","excerpt":"Coding Style for SkyWalking Python String formatting Since Python 3.5 is end of life, we fully …","ref":"/docs/skywalking-python/next/en/contribution/codingstyle/","title":"Coding Style for SkyWalking Python"},{"body":"Coding Style for SkyWalking Python String formatting Since Python 3.5 is end of life, we fully utilize the clarity and performance boost brought by f-strings. Please do not use other styles - +, % or .format unless f-string is absolutely unfeasible in the context, or it is a logger message, which is optimized for the % style\nRun make dev-fix to invoke flynt to convert other formats to f-string, pay extra care to possible corner cases leading to a semantically different conversion.\nQuotes As we know both single quotes and double quotes are both acceptable in Python. For a better coding style, we enforce a check for using single quotes when possible.\nPlease only use double quotes on the outside when there are inevitable single quotes inside the string, or when there are nest quotes.\nFor example -\nfoo = f\u0026#34;I\u0026#39;m a string\u0026#34; bar = f\u0026#34;This repo is called \u0026#39;skywalking-python\u0026#39;\u0026#34; Run make dev-fix to invoke unify to deal with your quotes if flake8 complaints about it.\nDebug messages Please import the logger_debug_enabled variable and wrap your debug messages with a check.\nThis should be done for all performance critical components.\nif logger_debug_enabled: logger.debug(\u0026#39;Message - %s\u0026#39;, some_func()) Imports Please make sure the imports are placed in a good order, or flake8-isort will notify you of the violations.\nRun make dev-fix to automatically fix the sorting problem.\nNaming In PEP8 convention, we are required to use snake_case as the accepted style.\nHowever, there are special cases. For example, you are overriding/monkey-patching a method which happens to use the old style camelCase naming, then it is acceptable to have the original naming convention to preserve context.\nPlease mark the line with # noqa to avoid linting.\n","excerpt":"Coding Style for SkyWalking Python String formatting Since Python 3.5 is end of life, we fully …","ref":"/docs/skywalking-python/v1.0.1/en/contribution/codingstyle/","title":"Coding Style for SkyWalking Python"},{"body":"Collecting and Gathering Kubernetes Monitoring Data Motivation SkyWalking has provided an access log collector based on the Agent layer and Service Mesh layer, and can generate corresponding topology maps and metrics based on the data. However, the Kubernetes Layer still lacks corresponding access log collector and analysis work.\nThis proposal is dedicated to collecting and analyzing network access logs in Kubernetes.\nArchitecture Graph There is no significant architecture-level change. Still using the Rover project to collect data and report it to SkyWalking OAP using the gRPC protocol.\nPropose Changes Based on the content in Motivation, if we want to ignore the application types(different program languages) and only monitor network logs, using eBPF is a good choice. It mainly reflects in the following aspects:\n Non-intrusive: When monitoring network access logs with eBPF, the application do not need to make any changes to be monitored. Language-unrestricted: Regardless of which programming language is used in the application, network data will ultimately be accessed through Linux Syscalls. Therefore, we can monitor network data by attaching eBPF to the syscalls layer, thus ignoring programming languages. Kernel interception: Since eBPF can attach to the kernel methods, it can obtain the execution status of each packet at L2-L4 layers and generate more detailed metrics.  Based on these reasons and collected data, they can be implemented in SkyWalking Rover and collected and monitored based on the following steps:\n Monitor the network execution status of all processes in Kubernetes when the Rover system starts. Periodically report data content via gRPC protocol to SkyWalking OAP. SkyWalking OAP parses network access logs and generates corresponding network topology, metrics, etc.  Limitation For content that uses TLS for data transmission, Rover will detect whether the current language uses libraries such as OpenSSL. If it is used, it will asynchronously intercept relevant OpenSSL methods when the process starts to perceive the original data content.\nHowever, this approach is not feasible for Java because Java does not use the OpenSSL library but performs encryption/decryption through Java code. Currently, eBPF cannot intercept Java method calls. Therefore, it results in an inability to perceive the TLS data protocol in Java.\nService with Istio sidecar scenario If the Service is deployed in Istio sidecar, it will still monitor each process. If the Service is a Java service and uses TLS, it can analyze the relevant traffic generated in the sidecar (envoy).\nImported Dependencies libs and their licenses. No new library is planned to be added to the codebase.\nCompatibility About the protocol, there should be no breaking changes, but enhancements only:\n Rover: adding a new gRPC data collection protocol for reporting the access logs. OAP: It should have no protocol updates. The existing query protocols are already sufficient for querying Kubernetes topology and metric data.  Data Generation Entity  service_traffic     column data type value description     name string kubernetes service name   short_name string same with name   service_id string base64(name).1   group string empty string   layer string KUBERNETES     instance_traffic     column data type value description     service_id string base64(service_name).1   name string pod name   last_ping long last access log message timestamp(millisecond)   properties json empty string     endpoint_traffic     column data type value description     service_id string base64(service_name).1   name string access log endpoint name(for HTTP1, is URI)    Entity Relation All entity information is built on connections. If the target address is remote, the name will be resolved in the following order:\n If it is a pod IP, it will be resolved as pod information. If it is a service IP, it will be resolved as service information. If neither exists, only pod information will be displayed.  Different entities have different displays for remote addresses. Please refer to the following table.\n   table name remote info(display by following order)     service_relation service name, remote IP address   instance_relation pod name, remote IP address    NOTICE: If it is the internal data interaction within the pod, such as exchanging data between services and sidecar (envoy), no corresponding traffic will be generated. We only generate and interact with external pods.\nLimitation If the service IP is used to send requests to the upstream, we will use eBPF to perceive the real target PodIP by perceiving relevant conntrack records.\nHowever, if conntrack technology is not used, it is difficult to perceive the real target IP address. In this case, instance relation data of this kind will be dropped, but we will mark all discarded relationship generation counts through a metric for better understanding of the situation.\nMetrics Integrate the data into the OAL system and generate corresponding metrics through predefined data combined with OAL statements.\nGeneral usage docs This proposal will only add a module to Rover that explains the configuration of access logs, and changes in the Kubernetes module on the UI.\nIn the Kubernetes UI, users can see the following additions:\n Topology: A topology diagram showing the calling relationships between services, instances, and processes. Entity Metrics: Metric data for services, instances, and processes. Call Relationship Metrics: Metrics for call relationships between different entities.  ","excerpt":"Collecting and Gathering Kubernetes Monitoring Data Motivation SkyWalking has provided an access log …","ref":"/docs/main/next/en/swip/swip-2/","title":"Collecting and Gathering Kubernetes Monitoring Data"},{"body":"Collecting File Log Application\u0026rsquo;s logs are important data for troubleshooting, usually they are persistent through local or network file system. SkyWalking provides ways to collect logs from those files by leveraging popular open-source tools.\nLog files collector You can use Filebeat, Fluentd and FluentBit to collect logs, and then transport the logs to SkyWalking OAP through Kafka or HTTP protocol, with the formats Kafka JSON or HTTP JSON array.\nFilebeat Filebeat supports using Kafka to transport logs. Open kafka-fetcher and enable configs enableNativeJsonLog.\nTake the following Filebeat config YAML as an example to set up Filebeat:\n filebeat.yml  Fluentd Fluentd supports using Kafka to transport logs. Open kafka-fetcher and enable configs enableNativeJsonLog.\nTake the following fluentd config file as an example to set up Fluentd:\n fluentd.conf  Fluent-bit Fluent-bit sends logs to OAP directly through HTTP(rest port). Point the output address to restHost:restPort of receiver-sharing-server or core(if receiver-sharing-server is inactivated)\nTake the following fluent-bit config files as an example to set up Fluent-bit:\n fluent-bit.conf  ","excerpt":"Collecting File Log Application\u0026rsquo;s logs are important data for troubleshooting, usually they …","ref":"/docs/main/latest/en/setup/backend/filelog-native/","title":"Collecting File Log"},{"body":"Collecting File Log Application\u0026rsquo;s logs are important data for troubleshooting, usually they are persistent through local or network file system. SkyWalking provides ways to collect logs from those files by leveraging popular open-source tools.\nLog files collector You can use Filebeat, Fluentd and FluentBit to collect logs, and then transport the logs to SkyWalking OAP through Kafka or HTTP protocol, with the formats Kafka JSON or HTTP JSON array.\nFilebeat Filebeat supports using Kafka to transport logs. Open kafka-fetcher and enable configs enableNativeJsonLog.\nTake the following Filebeat config YAML as an example to set up Filebeat:\n filebeat.yml  Fluentd Fluentd supports using Kafka to transport logs. Open kafka-fetcher and enable configs enableNativeJsonLog.\nTake the following fluentd config file as an example to set up Fluentd:\n fluentd.conf  Fluent-bit Fluent-bit sends logs to OAP directly through HTTP(rest port). Point the output address to restHost:restPort of receiver-sharing-server or core(if receiver-sharing-server is inactivated)\nTake the following fluent-bit config files as an example to set up Fluent-bit:\n fluent-bit.conf  ","excerpt":"Collecting File Log Application\u0026rsquo;s logs are important data for troubleshooting, usually they …","ref":"/docs/main/next/en/setup/backend/filelog-native/","title":"Collecting File Log"},{"body":"Collecting File Log Application\u0026rsquo;s logs are important data for troubleshooting, usually they are persistent through local or network file system. SkyWalking provides ways to collect logs from those files by leveraging popular open-source tools.\nLog files collector You can use Filebeat, Fluentd and FluentBit to collect logs, and then transport the logs to SkyWalking OAP through Kafka or HTTP protocol, with the formats Kafka JSON or HTTP JSON array.\nFilebeat Filebeat supports using Kafka to transport logs. Open kafka-fetcher and enable configs enableNativeJsonLog.\nTake the following Filebeat config YAML as an example to set up Filebeat:\n filebeat.yml  Fluentd Fluentd supports using Kafka to transport logs. Open kafka-fetcher and enable configs enableNativeJsonLog.\nTake the following fluentd config file as an example to set up Fluentd:\n fluentd.conf  Fluent-bit Fluent-bit sends logs to OAP directly through HTTP(rest port). Point the output address to restHost:restPort of receiver-sharing-server or core(if receiver-sharing-server is inactivated)\nTake the following fluent-bit config files as an example to set up Fluent-bit:\n fluent-bit.conf  ","excerpt":"Collecting File Log Application\u0026rsquo;s logs are important data for troubleshooting, usually they …","ref":"/docs/main/v9.5.0/en/setup/backend/filelog-native/","title":"Collecting File Log"},{"body":"Collecting File Log Application\u0026rsquo;s logs are important data for troubleshooting, usually they are persistent through local or network file system. SkyWalking provides ways to collect logs from those files by leveraging popular open-source tools.\nLog files collector You can use Filebeat, Fluentd and FluentBit to collect logs, and then transport the logs to SkyWalking OAP through Kafka or HTTP protocol, with the formats Kafka JSON or HTTP JSON array.\nFilebeat Filebeat supports using Kafka to transport logs. Open kafka-fetcher and enable configs enableNativeJsonLog.\nTake the following Filebeat config YAML as an example to set up Filebeat:\n filebeat.yml  Fluentd Fluentd supports using Kafka to transport logs. Open kafka-fetcher and enable configs enableNativeJsonLog.\nTake the following fluentd config file as an example to set up Fluentd:\n fluentd.conf  Fluent-bit Fluent-bit sends logs to OAP directly through HTTP(rest port). Point the output address to restHost:restPort of receiver-sharing-server or core(if receiver-sharing-server is inactivated)\nTake the following fluent-bit config files as an example to set up Fluent-bit:\n fluent-bit.conf  ","excerpt":"Collecting File Log Application\u0026rsquo;s logs are important data for troubleshooting, usually they …","ref":"/docs/main/v9.6.0/en/setup/backend/filelog-native/","title":"Collecting File Log"},{"body":"Collecting File Log Application\u0026rsquo;s logs are important data for troubleshooting, usually they are persistent through local or network file system. SkyWalking provides ways to collect logs from those files by leveraging popular open-source tools.\nLog files collector You can use Filebeat, Fluentd and FluentBit to collect logs, and then transport the logs to SkyWalking OAP through Kafka or HTTP protocol, with the formats Kafka JSON or HTTP JSON array.\nFilebeat Filebeat supports using Kafka to transport logs. Open kafka-fetcher and enable configs enableNativeJsonLog.\nTake the following Filebeat config YAML as an example to set up Filebeat:\n filebeat.yml  Fluentd Fluentd supports using Kafka to transport logs. Open kafka-fetcher and enable configs enableNativeJsonLog.\nTake the following fluentd config file as an example to set up Fluentd:\n fluentd.conf  Fluent-bit Fluent-bit sends logs to OAP directly through HTTP(rest port). Point the output address to restHost:restPort of receiver-sharing-server or core(if receiver-sharing-server is inactivated)\nTake the following fluent-bit config files as an example to set up Fluent-bit:\n fluent-bit.conf  ","excerpt":"Collecting File Log Application\u0026rsquo;s logs are important data for troubleshooting, usually they …","ref":"/docs/main/v9.7.0/en/setup/backend/filelog-native/","title":"Collecting File Log"},{"body":"Collecting Logs by Agents Some of SkyWalking native agents support collecting logs and sending them to OAP server without local files and/or file agents, which are listed in here.\nJava agent\u0026rsquo;s toolkits Java agent provides toolkits for log4j, log4j2, and logback to report logs through gRPC with automatically injected trace context.\nSkyWalking Satellite sidecar is a recommended proxy/side that forwards logs (including the use of Kafka MQ to transport logs). When using this, open kafka-fetcher and enable configs enableNativeProtoLog.\nJava agent provides toolkits for log4j, log4j2, and logback to report logs through files with automatically injected trace context.\nLog framework config examples:\n log4j1.x fileAppender log4j2.x fileAppender logback fileAppender  Python agent log reporter SkyWalking Python Agent implements a log reporter for the logging module with functionalities aligning with the Java toolkits.\nTo explore how to enable the reporting features for your use cases, please refer to the Log Reporter Doc for a detailed guide.\n","excerpt":"Collecting Logs by Agents Some of SkyWalking native agents support collecting logs and sending them …","ref":"/docs/main/latest/en/setup/backend/log-agent-native/","title":"Collecting Logs by Agents"},{"body":"Collecting Logs by Agents Some of SkyWalking native agents support collecting logs and sending them to OAP server without local files and/or file agents, which are listed in here.\nJava agent\u0026rsquo;s toolkits Java agent provides toolkits for log4j, log4j2, and logback to report logs through gRPC with automatically injected trace context.\nSkyWalking Satellite sidecar is a recommended proxy/side that forwards logs (including the use of Kafka MQ to transport logs). When using this, open kafka-fetcher and enable configs enableNativeProtoLog.\nJava agent provides toolkits for log4j, log4j2, and logback to report logs through files with automatically injected trace context.\nLog framework config examples:\n log4j1.x fileAppender log4j2.x fileAppender logback fileAppender  Python agent log reporter SkyWalking Python Agent implements a log reporter for the logging module with functionalities aligning with the Java toolkits.\nTo explore how to enable the reporting features for your use cases, please refer to the Log Reporter Doc for a detailed guide.\n","excerpt":"Collecting Logs by Agents Some of SkyWalking native agents support collecting logs and sending them …","ref":"/docs/main/next/en/setup/backend/log-agent-native/","title":"Collecting Logs by Agents"},{"body":"Collecting Logs by Agents Some of SkyWalking native agents support collecting logs and sending them to OAP server without local files and/or file agents, which are listed in here.\nJava agent\u0026rsquo;s toolkits Java agent provides toolkits for log4j, log4j2, and logback to report logs through gRPC with automatically injected trace context.\nSkyWalking Satellite sidecar is a recommended proxy/side that forwards logs (including the use of Kafka MQ to transport logs). When using this, open kafka-fetcher and enable configs enableNativeProtoLog.\nJava agent provides toolkits for log4j, log4j2, and logback to report logs through files with automatically injected trace context.\nLog framework config examples:\n log4j1.x fileAppender log4j2.x fileAppender logback fileAppender  Python agent log reporter SkyWalking Python Agent implements a log reporter for the logging module with functionalities aligning with the Java toolkits.\nTo explore how to enable the reporting features for your use cases, please refer to the Log Reporter Doc for a detailed guide.\n","excerpt":"Collecting Logs by Agents Some of SkyWalking native agents support collecting logs and sending them …","ref":"/docs/main/v9.5.0/en/setup/backend/log-agent-native/","title":"Collecting Logs by Agents"},{"body":"Collecting Logs by Agents Some of SkyWalking native agents support collecting logs and sending them to OAP server without local files and/or file agents, which are listed in here.\nJava agent\u0026rsquo;s toolkits Java agent provides toolkits for log4j, log4j2, and logback to report logs through gRPC with automatically injected trace context.\nSkyWalking Satellite sidecar is a recommended proxy/side that forwards logs (including the use of Kafka MQ to transport logs). When using this, open kafka-fetcher and enable configs enableNativeProtoLog.\nJava agent provides toolkits for log4j, log4j2, and logback to report logs through files with automatically injected trace context.\nLog framework config examples:\n log4j1.x fileAppender log4j2.x fileAppender logback fileAppender  Python agent log reporter SkyWalking Python Agent implements a log reporter for the logging module with functionalities aligning with the Java toolkits.\nTo explore how to enable the reporting features for your use cases, please refer to the Log Reporter Doc for a detailed guide.\n","excerpt":"Collecting Logs by Agents Some of SkyWalking native agents support collecting logs and sending them …","ref":"/docs/main/v9.6.0/en/setup/backend/log-agent-native/","title":"Collecting Logs by Agents"},{"body":"Collecting Logs by Agents Some of SkyWalking native agents support collecting logs and sending them to OAP server without local files and/or file agents, which are listed in here.\nJava agent\u0026rsquo;s toolkits Java agent provides toolkits for log4j, log4j2, and logback to report logs through gRPC with automatically injected trace context.\nSkyWalking Satellite sidecar is a recommended proxy/side that forwards logs (including the use of Kafka MQ to transport logs). When using this, open kafka-fetcher and enable configs enableNativeProtoLog.\nJava agent provides toolkits for log4j, log4j2, and logback to report logs through files with automatically injected trace context.\nLog framework config examples:\n log4j1.x fileAppender log4j2.x fileAppender logback fileAppender  Python agent log reporter SkyWalking Python Agent implements a log reporter for the logging module with functionalities aligning with the Java toolkits.\nTo explore how to enable the reporting features for your use cases, please refer to the Log Reporter Doc for a detailed guide.\n","excerpt":"Collecting Logs by Agents Some of SkyWalking native agents support collecting logs and sending them …","ref":"/docs/main/v9.7.0/en/setup/backend/log-agent-native/","title":"Collecting Logs by Agents"},{"body":"Common configuration Logger Logger is used to configure the system log.\n   Name Default Environment Key Description     logger.level INFO ROVER_LOGGER_LEVEL The lowest level of printing allowed.    Core Core is used to communicate with the backend server. It provides APIs for other modules to establish connections with the backend.\n   Name Default Environment Key Description     core.cluster_name  ROVER_CORE_CLUSTER_NAME The name of the cluster.   core.backend.addr localhost:11800 ROVER_BACKEND_ADDR The backend server address.   core.backend.enable_TLS false ROVER_BACKEND_ENABLE_TLS The TLS switch.   core.backend.client_pem_path client.pem ROVER_BACKEND_PEM_PATH The file path of client.pem. The config only works when opening the TLS switch.   core.backend.client_key_path client.key ROVER_BACKEND_KEY_PATH The file path of client.key. The config only works when opening the TLS switch.   core.backend.insecure_skip_verify false ROVER_BACKEND_INSECURE_SKIP_VERIFY InsecureSkipVerify controls whether a client verifies the server\u0026rsquo;s certificate chain and host name.   core.backend.ca_pem_path ca.pem ROVER_BACKEND_CA_PEM_PATH The file path oca.pem. The config only works when opening the TLS switch.   core.backend.check_period 5 ROVER_BACKEND_CHECK_PERIOD How frequently to check the connection(second).   core.backend.authentication  ROVER_BACKEND_AUTHENTICATION The auth value when send request.    ","excerpt":"Common configuration Logger Logger is used to configure the system log.\n   Name Default Environment …","ref":"/docs/skywalking-rover/latest/en/setup/configuration/common/","title":"Common configuration"},{"body":"Common configuration Logger Logger is used to configure the system log.\n   Name Default Environment Key Description     logger.level INFO ROVER_LOGGER_LEVEL The lowest level of printing allowed.    Core Core is used to communicate with the backend server. It provides APIs for other modules to establish connections with the backend.\n   Name Default Environment Key Description     core.cluster_name  ROVER_CORE_CLUSTER_NAME The name of the cluster.   core.backend.addr localhost:11800 ROVER_BACKEND_ADDR The backend server address.   core.backend.enable_TLS false ROVER_BACKEND_ENABLE_TLS The TLS switch.   core.backend.client_pem_path client.pem ROVER_BACKEND_PEM_PATH The file path of client.pem. The config only works when opening the TLS switch.   core.backend.client_key_path client.key ROVER_BACKEND_KEY_PATH The file path of client.key. The config only works when opening the TLS switch.   core.backend.insecure_skip_verify false ROVER_BACKEND_INSECURE_SKIP_VERIFY InsecureSkipVerify controls whether a client verifies the server\u0026rsquo;s certificate chain and host name.   core.backend.ca_pem_path ca.pem ROVER_BACKEND_CA_PEM_PATH The file path oca.pem. The config only works when opening the TLS switch.   core.backend.check_period 5 ROVER_BACKEND_CHECK_PERIOD How frequently to check the connection(second).   core.backend.authentication  ROVER_BACKEND_AUTHENTICATION The auth value when send request.    ","excerpt":"Common configuration Logger Logger is used to configure the system log.\n   Name Default Environment …","ref":"/docs/skywalking-rover/next/en/setup/configuration/common/","title":"Common configuration"},{"body":"Common configuration Logger Logger is used to configure the system log.\n   Name Default Environment Key Description     logger.level INFO ROVER_LOGGER_LEVEL The lowest level of printing allowed.    Core Core is used to communicate with the backend server. It provides APIs for other modules to establish connections with the backend.\n   Name Default Environment Key Description     core.cluster_name  ROVER_CORE_CLUSTER_NAME The name of the cluster.   core.backend.addr localhost:11800 ROVER_BACKEND_ADDR The backend server address.   core.backend.enable_TLS false ROVER_BACKEND_ENABLE_TLS The TLS switch.   core.backend.client_pem_path client.pem ROVER_BACKEND_PEM_PATH The file path of client.pem. The config only works when opening the TLS switch.   core.backend.client_key_path client.key ROVER_BACKEND_KEY_PATH The file path of client.key. The config only works when opening the TLS switch.   core.backend.insecure_skip_verify false ROVER_BACKEND_INSECURE_SKIP_VERIFY InsecureSkipVerify controls whether a client verifies the server\u0026rsquo;s certificate chain and host name.   core.backend.ca_pem_path ca.pem ROVER_BACKEND_CA_PEM_PATH The file path oca.pem. The config only works when opening the TLS switch.   core.backend.check_period 5 ROVER_BACKEND_CHECK_PERIOD How frequently to check the connection(second).   core.backend.authentication  ROVER_BACKEND_AUTHENTICATION The auth value when send request.    ","excerpt":"Common configuration Logger Logger is used to configure the system log.\n   Name Default Environment …","ref":"/docs/skywalking-rover/v0.6.0/en/setup/configuration/common/","title":"Common configuration"},{"body":"Common configuration The common configuration has 2 parts, which are logger configuration and the telemetry configuration.\nLogger    Config Default Description     log_pattern %time [%level][%field] - %msg The log format pattern configuration.   time_pattern 2006-01-02 15:04:05.000 The time format pattern configuration.   level info The lowest level of printing allowed.    Self Telemetry    Config Default Description     cluster default-cluster The space concept for the deployment, such as the namespace concept in the Kubernetes.   service default-service The group concept for the deployment, such as the service resource concept in the Kubernetes.   instance default-instance The minimum running unit, such as the pod concept in the Kubernetes.    ","excerpt":"Common configuration The common configuration has 2 parts, which are logger configuration and the …","ref":"/docs/skywalking-satellite/latest/en/setup/configuration/common/","title":"Common configuration"},{"body":"Common configuration The common configuration has 2 parts, which are logger configuration and the telemetry configuration.\nLogger    Config Default Description     log_pattern %time [%level][%field] - %msg The log format pattern configuration.   time_pattern 2006-01-02 15:04:05.000 The time format pattern configuration.   level info The lowest level of printing allowed.    Self Telemetry    Config Default Description     cluster default-cluster The space concept for the deployment, such as the namespace concept in the Kubernetes.   service default-service The group concept for the deployment, such as the service resource concept in the Kubernetes.   instance default-instance The minimum running unit, such as the pod concept in the Kubernetes.    ","excerpt":"Common configuration The common configuration has 2 parts, which are logger configuration and the …","ref":"/docs/skywalking-satellite/next/en/setup/configuration/common/","title":"Common configuration"},{"body":"Common configuration The common configuration has 2 parts, which are logger configuration and the telemetry configuration.\nLogger    Config Default Description     log_pattern %time [%level][%field] - %msg The log format pattern configuration.   time_pattern 2006-01-02 15:04:05.000 The time format pattern configuration.   level info The lowest level of printing allowed.    Self Telemetry    Config Default Description     cluster default-cluster The space concept for the deployment, such as the namespace concept in the Kubernetes.   service default-service The group concept for the deployment, such as the service resource concept in the Kubernetes.   instance default-instance The minimum running unit, such as the pod concept in the Kubernetes.    ","excerpt":"Common configuration The common configuration has 2 parts, which are logger configuration and the …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/configuration/common/","title":"Common configuration"},{"body":"Compatibility SkyWalking 8.0+ uses v3 protocols. Agents don\u0026rsquo;t have to keep the identical versions as the OAP backend.\nSkyWalking Native Agents    OAP Server Version Java Python NodeJS LUA Kong Browser Agent Rust PHP Go Rover Satellite     8.0.1 - 8.1.0 8.0.0 - 8.3.0 \u0026lt; = 0.6.0 \u0026lt; = 0.3.0 All All No All No No No No   8.2.0 - 8.3.0 8.0.0 - 8.3.0 \u0026lt; = 0.6.0 \u0026lt; = 0.3.0 All All All All No No No No   8.4.0 - 8.8.1 \u0026gt; = 8.0.0 All All All All All All All No No No   8.9.0+ \u0026gt; = 8.0.0 All All All All All All All No No \u0026gt; = 0.4.0   9.0.0 \u0026gt; = 8.0.0 All All All All All All All No No \u0026gt; = 0.4.0   9.1.0+ \u0026gt; = 8.0.0 All All All All All All All No \u0026gt; = 0.1.0 \u0026gt; = 1.0.0   9.5.0+ \u0026gt; = 8.0.0 \u0026amp; \u0026gt; = 9.0.0 All All All All All All All \u0026gt; = 0.1.0 \u0026gt; = 0.5.0 \u0026gt; = 1.2.0    Ecosystem Agents All following agent implementations are a part of the SkyWalking ecosystem. All the source codes and their distributions don\u0026rsquo;t belong to the Apache Software Foundation.\n   OAP Server Version DotNet cpp2sky     8.0.1 - 8.3.0 1.0.0 - 1.3.0 \u0026lt; = 0.2.0   8.4.0+ \u0026gt; = 1.0.0 All   9.0.0+ \u0026gt; = 1.0.0 All    All these projects are maintained by their own communities, and please reach them if you face any compatibility issues.\n All above compatibility are only references, and if you face an unimplemented error, it means you need to upgrade the OAP backend to support newer features in the agents.\n","excerpt":"Compatibility SkyWalking 8.0+ uses v3 protocols. Agents don\u0026rsquo;t have to keep the identical …","ref":"/docs/main/latest/en/setup/service-agent/agent-compatibility/","title":"Compatibility"},{"body":"Compatibility SkyWalking 8.0+ uses v3 protocols. Agents don\u0026rsquo;t have to keep the identical versions as the OAP backend.\nSkyWalking Native Agents    OAP Server Version Java Python NodeJS LUA Kong Browser Agent Rust PHP Go Rover Satellite     8.0.1 - 8.1.0 8.0.0 - 8.3.0 \u0026lt; = 0.6.0 \u0026lt; = 0.3.0 All All No All No No No No   8.2.0 - 8.3.0 8.0.0 - 8.3.0 \u0026lt; = 0.6.0 \u0026lt; = 0.3.0 All All All All No No No No   8.4.0 - 8.8.1 \u0026gt; = 8.0.0 All All All All All All All No No No   8.9.0+ \u0026gt; = 8.0.0 All All All All All All All No No \u0026gt; = 0.4.0   9.0.0 \u0026gt; = 8.0.0 All All All All All All All No No \u0026gt; = 0.4.0   9.1.0+ \u0026gt; = 8.0.0 All All All All All All All No \u0026gt; = 0.1.0 \u0026gt; = 1.0.0   9.5.0+ \u0026gt; = 8.0.0 \u0026amp; \u0026gt; = 9.0.0 All All All All All All All \u0026gt; = 0.1.0 \u0026gt; = 0.5.0 \u0026gt; = 1.2.0    Ecosystem Agents All following agent implementations are a part of the SkyWalking ecosystem. All the source codes and their distributions don\u0026rsquo;t belong to the Apache Software Foundation.\n   OAP Server Version DotNet cpp2sky     8.0.1 - 8.3.0 1.0.0 - 1.3.0 \u0026lt; = 0.2.0   8.4.0+ \u0026gt; = 1.0.0 All   9.0.0+ \u0026gt; = 1.0.0 All    All these projects are maintained by their own communities, and please reach them if you face any compatibility issues.\n All above compatibility are only references, and if you face an unimplemented error, it means you need to upgrade the OAP backend to support newer features in the agents.\n","excerpt":"Compatibility SkyWalking 8.0+ uses v3 protocols. Agents don\u0026rsquo;t have to keep the identical …","ref":"/docs/main/next/en/setup/service-agent/agent-compatibility/","title":"Compatibility"},{"body":"Compatibility SkyWalking 8.0+ uses v3 protocols. Agents don\u0026rsquo;t have to keep the same versions as the OAP backend.\nSkyWalking Native Agents    OAP Server Version Java Python NodeJS LUA Kong Browser Agent Rust Satellite     8.0.1 - 8.1.0 8.0.0 - 8.3.0 \u0026lt; = 0.6.0 \u0026lt; = 0.3.0 All All No All No   8.2.0 - 8.3.0 8.0.0 - 8.3.0 \u0026lt; = 0.6.0 \u0026lt; = 0.3.0 All All All All No   8.4.0 - 8.8.1 \u0026gt; = 8.0.0 All All All All All All No   8.9.0+ \u0026gt; = 8.0.0 All All All All All All \u0026gt; = 0.4.0   9.0.0+ \u0026gt; = 8.0.0 All All All All All All \u0026gt; = 0.4.0    Ecosystem Agents All following agent implementations are a part of SkyWalking ecosystem. All the source codes and their distributions don\u0026rsquo;t belong to the Apache Software Foundation.\n   OAP Server Version DotNet Go2sky cpp2sky PHP agent     8.0.1 - 8.3.0 1.0.0 - 1.3.0 0.4.0 - 0.6.0 \u0026lt; = 0.2.0 \u0026gt; = 3.0.0   8.4.0+ \u0026gt; = 1.0.0 \u0026gt; = 0.4.0 All \u0026gt; = 3.0.0   9.0.0+ \u0026gt; = 1.0.0 \u0026gt; = 0.4.0 All \u0026gt; = 3.0.0    All these projects are maintained by their own communities, please reach them if you face any compatibility issue.\n All above compatibility are only references, if you face unimplemented error, it means you need to upgrade OAP backend to support newer features in the agents.\n","excerpt":"Compatibility SkyWalking 8.0+ uses v3 protocols. Agents don\u0026rsquo;t have to keep the same versions …","ref":"/docs/main/v9.0.0/en/setup/service-agent/agent-compatibility/","title":"Compatibility"},{"body":"Compatibility SkyWalking 8.0+ uses v3 protocols. Agents don\u0026rsquo;t have to keep the identical versions as the OAP backend.\nSkyWalking Native Agents    OAP Server Version Java Python NodeJS LUA Kong Browser Agent Rust Rover(ebpf agnet) Satellite     8.0.1 - 8.1.0 8.0.0 - 8.3.0 \u0026lt; = 0.6.0 \u0026lt; = 0.3.0 All All No All No No   8.2.0 - 8.3.0 8.0.0 - 8.3.0 \u0026lt; = 0.6.0 \u0026lt; = 0.3.0 All All All All No No   8.4.0 - 8.8.1 \u0026gt; = 8.0.0 All All All All All All No No   8.9.0+ \u0026gt; = 8.0.0 All All All All All All No \u0026gt; = 0.4.0   9.0.0 \u0026gt; = 8.0.0 All All All All All All No \u0026gt; = 0.4.0   9.1.0+ \u0026gt; = 8.0.0 All All All All All All \u0026gt; = 0.1.0 \u0026gt; = 1.0.0    Ecosystem Agents All following agent implementations are a part of the SkyWalking ecosystem. All the source codes and their distributions don\u0026rsquo;t belong to the Apache Software Foundation.\n   OAP Server Version DotNet Go2sky cpp2sky PHP agent     8.0.1 - 8.3.0 1.0.0 - 1.3.0 0.4.0 - 0.6.0 \u0026lt; = 0.2.0 \u0026gt; = 3.0.0   8.4.0+ \u0026gt; = 1.0.0 \u0026gt; = 0.4.0 All \u0026gt; = 3.0.0   9.0.0+ \u0026gt; = 1.0.0 \u0026gt; = 0.4.0 All \u0026gt; = 3.0.0    All these projects are maintained by their own communities, and please reach them if you face any compatibility issues.\n All above compatibility are only references, and if you face an unimplemented error, it means you need to upgrade the OAP backend to support newer features in the agents.\n","excerpt":"Compatibility SkyWalking 8.0+ uses v3 protocols. Agents don\u0026rsquo;t have to keep the identical …","ref":"/docs/main/v9.1.0/en/setup/service-agent/agent-compatibility/","title":"Compatibility"},{"body":"Compatibility SkyWalking 8.0+ uses v3 protocols. Agents don\u0026rsquo;t have to keep the identical versions as the OAP backend.\nSkyWalking Native Agents    OAP Server Version Java Python NodeJS LUA Kong Browser Agent Rust Rover(ebpf agnet) Satellite     8.0.1 - 8.1.0 8.0.0 - 8.3.0 \u0026lt; = 0.6.0 \u0026lt; = 0.3.0 All All No All No No   8.2.0 - 8.3.0 8.0.0 - 8.3.0 \u0026lt; = 0.6.0 \u0026lt; = 0.3.0 All All All All No No   8.4.0 - 8.8.1 \u0026gt; = 8.0.0 All All All All All All No No   8.9.0+ \u0026gt; = 8.0.0 All All All All All All No \u0026gt; = 0.4.0   9.0.0 \u0026gt; = 8.0.0 All All All All All All No \u0026gt; = 0.4.0   9.1.0+ \u0026gt; = 8.0.0 All All All All All All \u0026gt; = 0.1.0 \u0026gt; = 1.0.0    Ecosystem Agents All following agent implementations are a part of the SkyWalking ecosystem. All the source codes and their distributions don\u0026rsquo;t belong to the Apache Software Foundation.\n   OAP Server Version DotNet Go2sky cpp2sky PHP agent     8.0.1 - 8.3.0 1.0.0 - 1.3.0 0.4.0 - 0.6.0 \u0026lt; = 0.2.0 \u0026gt; = 3.0.0 \u0026amp;\u0026amp; \u0026lt; 5.0.0   8.4.0+ \u0026gt; = 1.0.0 \u0026gt; = 0.4.0 All \u0026gt; = 5.0.0   9.0.0+ \u0026gt; = 1.0.0 \u0026gt; = 0.4.0 All \u0026gt; = 5.0.0    All these projects are maintained by their own communities, and please reach them if you face any compatibility issues.\n All above compatibility are only references, and if you face an unimplemented error, it means you need to upgrade the OAP backend to support newer features in the agents.\n","excerpt":"Compatibility SkyWalking 8.0+ uses v3 protocols. Agents don\u0026rsquo;t have to keep the identical …","ref":"/docs/main/v9.2.0/en/setup/service-agent/agent-compatibility/","title":"Compatibility"},{"body":"Compatibility SkyWalking 8.0+ uses v3 protocols. Agents don\u0026rsquo;t have to keep the identical versions as the OAP backend.\nSkyWalking Native Agents    OAP Server Version Java Python NodeJS LUA Kong Browser Agent Rust Rover(ebpf agent) Satellite PHP     8.0.1 - 8.1.0 8.0.0 - 8.3.0 \u0026lt; = 0.6.0 \u0026lt; = 0.3.0 All All No All No No No   8.2.0 - 8.3.0 8.0.0 - 8.3.0 \u0026lt; = 0.6.0 \u0026lt; = 0.3.0 All All All All No No No   8.4.0 - 8.8.1 \u0026gt; = 8.0.0 All All All All All All No No All   8.9.0+ \u0026gt; = 8.0.0 All All All All All All No \u0026gt; = 0.4.0 All   9.0.0 \u0026gt; = 8.0.0 All All All All All All No \u0026gt; = 0.4.0 All   9.1.0+ \u0026gt; = 8.0.0 All All All All All All \u0026gt; = 0.1.0 \u0026gt; = 1.0.0 All    Ecosystem Agents All following agent implementations are a part of the SkyWalking ecosystem. All the source codes and their distributions don\u0026rsquo;t belong to the Apache Software Foundation.\n   OAP Server Version DotNet Go2sky cpp2sky     8.0.1 - 8.3.0 1.0.0 - 1.3.0 0.4.0 - 0.6.0 \u0026lt; = 0.2.0   8.4.0+ \u0026gt; = 1.0.0 \u0026gt; = 0.4.0 All   9.0.0+ \u0026gt; = 1.0.0 \u0026gt; = 0.4.0 All    All these projects are maintained by their own communities, and please reach them if you face any compatibility issues.\n All above compatibility are only references, and if you face an unimplemented error, it means you need to upgrade the OAP backend to support newer features in the agents.\n","excerpt":"Compatibility SkyWalking 8.0+ uses v3 protocols. Agents don\u0026rsquo;t have to keep the identical …","ref":"/docs/main/v9.3.0/en/setup/service-agent/agent-compatibility/","title":"Compatibility"},{"body":"Compatibility SkyWalking 8.0+ uses v3 protocols. Agents don\u0026rsquo;t have to keep the identical versions as the OAP backend.\nSkyWalking Native Agents    OAP Server Version Java Python NodeJS LUA Kong Browser Agent Rust Rover(ebpf agent) Satellite PHP     8.0.1 - 8.1.0 8.0.0 - 8.3.0 \u0026lt; = 0.6.0 \u0026lt; = 0.3.0 All All No All No No No   8.2.0 - 8.3.0 8.0.0 - 8.3.0 \u0026lt; = 0.6.0 \u0026lt; = 0.3.0 All All All All No No No   8.4.0 - 8.8.1 \u0026gt; = 8.0.0 All All All All All All No No All   8.9.0+ \u0026gt; = 8.0.0 All All All All All All No \u0026gt; = 0.4.0 All   9.0.0 \u0026gt; = 8.0.0 All All All All All All No \u0026gt; = 0.4.0 All   9.1.0+ \u0026gt; = 8.0.0 All All All All All All \u0026gt; = 0.1.0 \u0026gt; = 1.0.0 All    Ecosystem Agents All following agent implementations are a part of the SkyWalking ecosystem. All the source codes and their distributions don\u0026rsquo;t belong to the Apache Software Foundation.\n   OAP Server Version DotNet Go2sky cpp2sky     8.0.1 - 8.3.0 1.0.0 - 1.3.0 0.4.0 - 0.6.0 \u0026lt; = 0.2.0   8.4.0+ \u0026gt; = 1.0.0 \u0026gt; = 0.4.0 All   9.0.0+ \u0026gt; = 1.0.0 \u0026gt; = 0.4.0 All    All these projects are maintained by their own communities, and please reach them if you face any compatibility issues.\n All above compatibility are only references, and if you face an unimplemented error, it means you need to upgrade the OAP backend to support newer features in the agents.\n","excerpt":"Compatibility SkyWalking 8.0+ uses v3 protocols. Agents don\u0026rsquo;t have to keep the identical …","ref":"/docs/main/v9.4.0/en/setup/service-agent/agent-compatibility/","title":"Compatibility"},{"body":"Compatibility SkyWalking 8.0+ uses v3 protocols. Agents don\u0026rsquo;t have to keep the identical versions as the OAP backend.\nSkyWalking Native Agents    OAP Server Version Java Python NodeJS LUA Kong Browser Agent Rust Rover(ebpf agent) Satellite PHP     8.0.1 - 8.1.0 8.0.0 - 8.3.0 \u0026lt; = 0.6.0 \u0026lt; = 0.3.0 All All No All No No No   8.2.0 - 8.3.0 8.0.0 - 8.3.0 \u0026lt; = 0.6.0 \u0026lt; = 0.3.0 All All All All No No No   8.4.0 - 8.8.1 \u0026gt; = 8.0.0 All All All All All All No No All   8.9.0+ \u0026gt; = 8.0.0 All All All All All All No \u0026gt; = 0.4.0 All   9.0.0 \u0026gt; = 8.0.0 All All All All All All No \u0026gt; = 0.4.0 All   9.1.0+ \u0026gt; = 8.0.0 All All All All All All \u0026gt; = 0.1.0 \u0026gt; = 1.0.0 All   9.5.0+ \u0026gt; = 8.0.0 All All All All All All \u0026gt; = 0.5.0 \u0026gt; = 1.2.0 All    Ecosystem Agents All following agent implementations are a part of the SkyWalking ecosystem. All the source codes and their distributions don\u0026rsquo;t belong to the Apache Software Foundation.\n   OAP Server Version DotNet Go2sky cpp2sky     8.0.1 - 8.3.0 1.0.0 - 1.3.0 0.4.0 - 0.6.0 \u0026lt; = 0.2.0   8.4.0+ \u0026gt; = 1.0.0 \u0026gt; = 0.4.0 All   9.0.0+ \u0026gt; = 1.0.0 \u0026gt; = 0.4.0 All    All these projects are maintained by their own communities, and please reach them if you face any compatibility issues.\n All above compatibility are only references, and if you face an unimplemented error, it means you need to upgrade the OAP backend to support newer features in the agents.\n","excerpt":"Compatibility SkyWalking 8.0+ uses v3 protocols. Agents don\u0026rsquo;t have to keep the identical …","ref":"/docs/main/v9.5.0/en/setup/service-agent/agent-compatibility/","title":"Compatibility"},{"body":"Compatibility SkyWalking 8.0+ uses v3 protocols. Agents don\u0026rsquo;t have to keep the identical versions as the OAP backend.\nSkyWalking Native Agents    OAP Server Version Java Python NodeJS LUA Kong Browser Agent Rust PHP Go Rover Satellite     8.0.1 - 8.1.0 8.0.0 - 8.3.0 \u0026lt; = 0.6.0 \u0026lt; = 0.3.0 All All No All No No No No   8.2.0 - 8.3.0 8.0.0 - 8.3.0 \u0026lt; = 0.6.0 \u0026lt; = 0.3.0 All All All All No No No No   8.4.0 - 8.8.1 \u0026gt; = 8.0.0 All All All All All All All No No No   8.9.0+ \u0026gt; = 8.0.0 All All All All All All All No No \u0026gt; = 0.4.0   9.0.0 \u0026gt; = 8.0.0 All All All All All All All No No \u0026gt; = 0.4.0   9.1.0+ \u0026gt; = 8.0.0 All All All All All All All No \u0026gt; = 0.1.0 \u0026gt; = 1.0.0   9.5.0+ \u0026gt; = 8.0.0 \u0026amp; \u0026gt; = 9.0.0 All All All All All All All \u0026gt; = 0.1.0 \u0026gt; = 0.5.0 \u0026gt; = 1.2.0    Ecosystem Agents All following agent implementations are a part of the SkyWalking ecosystem. All the source codes and their distributions don\u0026rsquo;t belong to the Apache Software Foundation.\n   OAP Server Version DotNet cpp2sky     8.0.1 - 8.3.0 1.0.0 - 1.3.0 \u0026lt; = 0.2.0   8.4.0+ \u0026gt; = 1.0.0 All   9.0.0+ \u0026gt; = 1.0.0 All    All these projects are maintained by their own communities, and please reach them if you face any compatibility issues.\n All above compatibility are only references, and if you face an unimplemented error, it means you need to upgrade the OAP backend to support newer features in the agents.\n","excerpt":"Compatibility SkyWalking 8.0+ uses v3 protocols. Agents don\u0026rsquo;t have to keep the identical …","ref":"/docs/main/v9.6.0/en/setup/service-agent/agent-compatibility/","title":"Compatibility"},{"body":"Compatibility SkyWalking 8.0+ uses v3 protocols. Agents don\u0026rsquo;t have to keep the identical versions as the OAP backend.\nSkyWalking Native Agents    OAP Server Version Java Python NodeJS LUA Kong Browser Agent Rust PHP Go Rover Satellite     8.0.1 - 8.1.0 8.0.0 - 8.3.0 \u0026lt; = 0.6.0 \u0026lt; = 0.3.0 All All No All No No No No   8.2.0 - 8.3.0 8.0.0 - 8.3.0 \u0026lt; = 0.6.0 \u0026lt; = 0.3.0 All All All All No No No No   8.4.0 - 8.8.1 \u0026gt; = 8.0.0 All All All All All All All No No No   8.9.0+ \u0026gt; = 8.0.0 All All All All All All All No No \u0026gt; = 0.4.0   9.0.0 \u0026gt; = 8.0.0 All All All All All All All No No \u0026gt; = 0.4.0   9.1.0+ \u0026gt; = 8.0.0 All All All All All All All No \u0026gt; = 0.1.0 \u0026gt; = 1.0.0   9.5.0+ \u0026gt; = 8.0.0 \u0026amp; \u0026gt; = 9.0.0 All All All All All All All \u0026gt; = 0.1.0 \u0026gt; = 0.5.0 \u0026gt; = 1.2.0    Ecosystem Agents All following agent implementations are a part of the SkyWalking ecosystem. All the source codes and their distributions don\u0026rsquo;t belong to the Apache Software Foundation.\n   OAP Server Version DotNet cpp2sky     8.0.1 - 8.3.0 1.0.0 - 1.3.0 \u0026lt; = 0.2.0   8.4.0+ \u0026gt; = 1.0.0 All   9.0.0+ \u0026gt; = 1.0.0 All    All these projects are maintained by their own communities, and please reach them if you face any compatibility issues.\n All above compatibility are only references, and if you face an unimplemented error, it means you need to upgrade the OAP backend to support newer features in the agents.\n","excerpt":"Compatibility SkyWalking 8.0+ uses v3 protocols. Agents don\u0026rsquo;t have to keep the identical …","ref":"/docs/main/v9.7.0/en/setup/service-agent/agent-compatibility/","title":"Compatibility"},{"body":"Compatibility with other Java agent bytecode processes Problem   When using the SkyWalking agent, some other agents, such as Arthas, can\u0026rsquo;t work properly. https://github.com/apache/skywalking/pull/4858\n  The retransform classes in the Java agent conflict with the SkyWalking agent, as illustrated in this demo\n  Cause The SkyWalking agent uses ByteBuddy to transform classes when the Java application starts. ByteBuddy generates auxiliary classes with different random names every time.\nWhen another Java agent retransforms the same class, it triggers the SkyWalking agent to enhance the class again. Since the bytecode has been regenerated by ByteBuddy, the fields and imported class names have been modified, and the JVM verifications on class bytecode have failed, the retransform classes would therefore be unsuccessful.\nResolution 1. Enable the class cache feature\nAdd JVM parameters:\n-Dskywalking.agent.is_cache_enhanced_class=true -Dskywalking.agent.class_cache_mode=MEMORY\nOr uncomment the following options in agent.conf:\n# If true, the SkyWalking agent will cache all instrumented classes files to memory or disk files (as determined by the class cache mode), # Allow other Java agents to enhance those classes that are enhanced by the SkyWalking agent. agent.is_cache_enhanced_class = ${SW_AGENT_CACHE_CLASS:false} # The instrumented classes cache mode: MEMORY or FILE # MEMORY: cache class bytes to memory; if there are too many instrumented classes or if their sizes are too large, it may take up more memory # FILE: cache class bytes to user temp folder starts with 'class-cache', and automatically clean up cached class files when the application exits agent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:MEMORY} If the class cache feature is enabled, save the instrumented class bytecode to memory or a temporary file. When other Java agents retransform the same class, the SkyWalking agent first attempts to load from the cache.\nIf the cached class is found, it will be used directly without regenerating an auxiliary class with a new random name. Then, the process of the subsequent Java agent will not be affected.\n2. Class cache save mode\nWe recommend saving cache classes to memory, if it takes up more memory space. Alternatively, you can use the local file system. Set the class cache mode in one of the folliwng ways:\n-Dskywalking.agent.class_cache_mode=MEMORY : save cache classes to Java memory. -Dskywalking.agent.class_cache_mode=FILE : save cache classes to SkyWalking agent path \u0026lsquo;/class-cache\u0026rsquo;.\nOr modify these options in agent.conf:\nagent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:MEMORY} agent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:FILE}\n","excerpt":"Compatibility with other Java agent bytecode processes Problem   When using the SkyWalking agent, …","ref":"/docs/main/latest/en/faq/compatible-with-other-javaagent-bytecode-processing/","title":"Compatibility with other Java agent bytecode processes"},{"body":"Compatibility with other Java agent bytecode processes Problem   When using the SkyWalking agent, some other agents, such as Arthas, can\u0026rsquo;t work properly. https://github.com/apache/skywalking/pull/4858\n  The retransform classes in the Java agent conflict with the SkyWalking agent, as illustrated in this demo\n  Cause The SkyWalking agent uses ByteBuddy to transform classes when the Java application starts. ByteBuddy generates auxiliary classes with different random names every time.\nWhen another Java agent retransforms the same class, it triggers the SkyWalking agent to enhance the class again. Since the bytecode has been regenerated by ByteBuddy, the fields and imported class names have been modified, and the JVM verifications on class bytecode have failed, the retransform classes would therefore be unsuccessful.\nResolution 1. Enable the class cache feature\nAdd JVM parameters:\n-Dskywalking.agent.is_cache_enhanced_class=true -Dskywalking.agent.class_cache_mode=MEMORY\nOr uncomment the following options in agent.conf:\n# If true, the SkyWalking agent will cache all instrumented classes files to memory or disk files (as determined by the class cache mode), # Allow other Java agents to enhance those classes that are enhanced by the SkyWalking agent. agent.is_cache_enhanced_class = ${SW_AGENT_CACHE_CLASS:false} # The instrumented classes cache mode: MEMORY or FILE # MEMORY: cache class bytes to memory; if there are too many instrumented classes or if their sizes are too large, it may take up more memory # FILE: cache class bytes to user temp folder starts with 'class-cache', and automatically clean up cached class files when the application exits agent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:MEMORY} If the class cache feature is enabled, save the instrumented class bytecode to memory or a temporary file. When other Java agents retransform the same class, the SkyWalking agent first attempts to load from the cache.\nIf the cached class is found, it will be used directly without regenerating an auxiliary class with a new random name. Then, the process of the subsequent Java agent will not be affected.\n2. Class cache save mode\nWe recommend saving cache classes to memory, if it takes up more memory space. Alternatively, you can use the local file system. Set the class cache mode in one of the folliwng ways:\n-Dskywalking.agent.class_cache_mode=MEMORY : save cache classes to Java memory. -Dskywalking.agent.class_cache_mode=FILE : save cache classes to SkyWalking agent path \u0026lsquo;/class-cache\u0026rsquo;.\nOr modify these options in agent.conf:\nagent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:MEMORY} agent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:FILE}\n","excerpt":"Compatibility with other Java agent bytecode processes Problem   When using the SkyWalking agent, …","ref":"/docs/main/next/en/faq/compatible-with-other-javaagent-bytecode-processing/","title":"Compatibility with other Java agent bytecode processes"},{"body":"Compatibility with other Java agent bytecode processes Problem   When using the SkyWalking agent, some other agents, such as Arthas, can\u0026rsquo;t work properly. https://github.com/apache/skywalking/pull/4858\n  The retransform classes in the Java agent conflict with the SkyWalking agent, as illustrated in this demo\n  Cause The SkyWalking agent uses ByteBuddy to transform classes when the Java application starts. ByteBuddy generates auxiliary classes with different random names every time.\nWhen another Java agent retransforms the same class, it triggers the SkyWalking agent to enhance the class again. Since the bytecode has been regenerated by ByteBuddy, the fields and imported class names have been modified, and the JVM verifications on class bytecode have failed, the retransform classes would therefore be unsuccessful.\nResolution 1. Enable the class cache feature\nAdd JVM parameters:\n-Dskywalking.agent.is_cache_enhanced_class=true -Dskywalking.agent.class_cache_mode=MEMORY\nOr uncomment the following options in agent.conf:\n# If true, the SkyWalking agent will cache all instrumented classes files to memory or disk files (as determined by the class cache mode), # Allow other Java agents to enhance those classes that are enhanced by the SkyWalking agent. agent.is_cache_enhanced_class = ${SW_AGENT_CACHE_CLASS:false} # The instrumented classes cache mode: MEMORY or FILE # MEMORY: cache class bytes to memory; if there are too many instrumented classes or if their sizes are too large, it may take up more memory # FILE: cache class bytes to user temp folder starts with 'class-cache', and automatically clean up cached class files when the application exits agent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:MEMORY} If the class cache feature is enabled, save the instrumented class bytecode to memory or a temporary file. When other Java agents retransform the same class, the SkyWalking agent first attempts to load from the cache.\nIf the cached class is found, it will be used directly without regenerating an auxiliary class with a new random name. Then, the process of the subsequent Java agent will not be affected.\n2. Class cache save mode\nWe recommend saving cache classes to memory, if it takes up more memory space. Alternatively, you can use the local file system. Set the class cache mode in one of the folliwng ways:\n-Dskywalking.agent.class_cache_mode=MEMORY : save cache classes to Java memory. -Dskywalking.agent.class_cache_mode=FILE : save cache classes to SkyWalking agent path \u0026lsquo;/class-cache\u0026rsquo;.\nOr modify these options in agent.conf:\nagent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:MEMORY} agent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:FILE}\n","excerpt":"Compatibility with other Java agent bytecode processes Problem   When using the SkyWalking agent, …","ref":"/docs/main/v9.0.0/en/faq/compatible-with-other-javaagent-bytecode-processing/","title":"Compatibility with other Java agent bytecode processes"},{"body":"Compatibility with other Java agent bytecode processes Problem   When using the SkyWalking agent, some other agents, such as Arthas, can\u0026rsquo;t work properly. https://github.com/apache/skywalking/pull/4858\n  The retransform classes in the Java agent conflict with the SkyWalking agent, as illustrated in this demo\n  Cause The SkyWalking agent uses ByteBuddy to transform classes when the Java application starts. ByteBuddy generates auxiliary classes with different random names every time.\nWhen another Java agent retransforms the same class, it triggers the SkyWalking agent to enhance the class again. Since the bytecode has been regenerated by ByteBuddy, the fields and imported class names have been modified, and the JVM verifications on class bytecode have failed, the retransform classes would therefore be unsuccessful.\nResolution 1. Enable the class cache feature\nAdd JVM parameters:\n-Dskywalking.agent.is_cache_enhanced_class=true -Dskywalking.agent.class_cache_mode=MEMORY\nOr uncomment the following options in agent.conf:\n# If true, the SkyWalking agent will cache all instrumented classes files to memory or disk files (as determined by the class cache mode), # Allow other Java agents to enhance those classes that are enhanced by the SkyWalking agent. agent.is_cache_enhanced_class = ${SW_AGENT_CACHE_CLASS:false} # The instrumented classes cache mode: MEMORY or FILE # MEMORY: cache class bytes to memory; if there are too many instrumented classes or if their sizes are too large, it may take up more memory # FILE: cache class bytes to user temp folder starts with 'class-cache', and automatically clean up cached class files when the application exits agent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:MEMORY} If the class cache feature is enabled, save the instrumented class bytecode to memory or a temporary file. When other Java agents retransform the same class, the SkyWalking agent first attempts to load from the cache.\nIf the cached class is found, it will be used directly without regenerating an auxiliary class with a new random name. Then, the process of the subsequent Java agent will not be affected.\n2. Class cache save mode\nWe recommend saving cache classes to memory, if it takes up more memory space. Alternatively, you can use the local file system. Set the class cache mode in one of the folliwng ways:\n-Dskywalking.agent.class_cache_mode=MEMORY : save cache classes to Java memory. -Dskywalking.agent.class_cache_mode=FILE : save cache classes to SkyWalking agent path \u0026lsquo;/class-cache\u0026rsquo;.\nOr modify these options in agent.conf:\nagent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:MEMORY} agent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:FILE}\n","excerpt":"Compatibility with other Java agent bytecode processes Problem   When using the SkyWalking agent, …","ref":"/docs/main/v9.1.0/en/faq/compatible-with-other-javaagent-bytecode-processing/","title":"Compatibility with other Java agent bytecode processes"},{"body":"Compatibility with other Java agent bytecode processes Problem   When using the SkyWalking agent, some other agents, such as Arthas, can\u0026rsquo;t work properly. https://github.com/apache/skywalking/pull/4858\n  The retransform classes in the Java agent conflict with the SkyWalking agent, as illustrated in this demo\n  Cause The SkyWalking agent uses ByteBuddy to transform classes when the Java application starts. ByteBuddy generates auxiliary classes with different random names every time.\nWhen another Java agent retransforms the same class, it triggers the SkyWalking agent to enhance the class again. Since the bytecode has been regenerated by ByteBuddy, the fields and imported class names have been modified, and the JVM verifications on class bytecode have failed, the retransform classes would therefore be unsuccessful.\nResolution 1. Enable the class cache feature\nAdd JVM parameters:\n-Dskywalking.agent.is_cache_enhanced_class=true -Dskywalking.agent.class_cache_mode=MEMORY\nOr uncomment the following options in agent.conf:\n# If true, the SkyWalking agent will cache all instrumented classes files to memory or disk files (as determined by the class cache mode), # Allow other Java agents to enhance those classes that are enhanced by the SkyWalking agent. agent.is_cache_enhanced_class = ${SW_AGENT_CACHE_CLASS:false} # The instrumented classes cache mode: MEMORY or FILE # MEMORY: cache class bytes to memory; if there are too many instrumented classes or if their sizes are too large, it may take up more memory # FILE: cache class bytes to user temp folder starts with 'class-cache', and automatically clean up cached class files when the application exits agent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:MEMORY} If the class cache feature is enabled, save the instrumented class bytecode to memory or a temporary file. When other Java agents retransform the same class, the SkyWalking agent first attempts to load from the cache.\nIf the cached class is found, it will be used directly without regenerating an auxiliary class with a new random name. Then, the process of the subsequent Java agent will not be affected.\n2. Class cache save mode\nWe recommend saving cache classes to memory, if it takes up more memory space. Alternatively, you can use the local file system. Set the class cache mode in one of the folliwng ways:\n-Dskywalking.agent.class_cache_mode=MEMORY : save cache classes to Java memory. -Dskywalking.agent.class_cache_mode=FILE : save cache classes to SkyWalking agent path \u0026lsquo;/class-cache\u0026rsquo;.\nOr modify these options in agent.conf:\nagent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:MEMORY} agent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:FILE}\n","excerpt":"Compatibility with other Java agent bytecode processes Problem   When using the SkyWalking agent, …","ref":"/docs/main/v9.2.0/en/faq/compatible-with-other-javaagent-bytecode-processing/","title":"Compatibility with other Java agent bytecode processes"},{"body":"Compatibility with other Java agent bytecode processes Problem   When using the SkyWalking agent, some other agents, such as Arthas, can\u0026rsquo;t work properly. https://github.com/apache/skywalking/pull/4858\n  The retransform classes in the Java agent conflict with the SkyWalking agent, as illustrated in this demo\n  Cause The SkyWalking agent uses ByteBuddy to transform classes when the Java application starts. ByteBuddy generates auxiliary classes with different random names every time.\nWhen another Java agent retransforms the same class, it triggers the SkyWalking agent to enhance the class again. Since the bytecode has been regenerated by ByteBuddy, the fields and imported class names have been modified, and the JVM verifications on class bytecode have failed, the retransform classes would therefore be unsuccessful.\nResolution 1. Enable the class cache feature\nAdd JVM parameters:\n-Dskywalking.agent.is_cache_enhanced_class=true -Dskywalking.agent.class_cache_mode=MEMORY\nOr uncomment the following options in agent.conf:\n# If true, the SkyWalking agent will cache all instrumented classes files to memory or disk files (as determined by the class cache mode), # Allow other Java agents to enhance those classes that are enhanced by the SkyWalking agent. agent.is_cache_enhanced_class = ${SW_AGENT_CACHE_CLASS:false} # The instrumented classes cache mode: MEMORY or FILE # MEMORY: cache class bytes to memory; if there are too many instrumented classes or if their sizes are too large, it may take up more memory # FILE: cache class bytes to user temp folder starts with 'class-cache', and automatically clean up cached class files when the application exits agent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:MEMORY} If the class cache feature is enabled, save the instrumented class bytecode to memory or a temporary file. When other Java agents retransform the same class, the SkyWalking agent first attempts to load from the cache.\nIf the cached class is found, it will be used directly without regenerating an auxiliary class with a new random name. Then, the process of the subsequent Java agent will not be affected.\n2. Class cache save mode\nWe recommend saving cache classes to memory, if it takes up more memory space. Alternatively, you can use the local file system. Set the class cache mode in one of the folliwng ways:\n-Dskywalking.agent.class_cache_mode=MEMORY : save cache classes to Java memory. -Dskywalking.agent.class_cache_mode=FILE : save cache classes to SkyWalking agent path \u0026lsquo;/class-cache\u0026rsquo;.\nOr modify these options in agent.conf:\nagent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:MEMORY} agent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:FILE}\n","excerpt":"Compatibility with other Java agent bytecode processes Problem   When using the SkyWalking agent, …","ref":"/docs/main/v9.3.0/en/faq/compatible-with-other-javaagent-bytecode-processing/","title":"Compatibility with other Java agent bytecode processes"},{"body":"Compatibility with other Java agent bytecode processes Problem   When using the SkyWalking agent, some other agents, such as Arthas, can\u0026rsquo;t work properly. https://github.com/apache/skywalking/pull/4858\n  The retransform classes in the Java agent conflict with the SkyWalking agent, as illustrated in this demo\n  Cause The SkyWalking agent uses ByteBuddy to transform classes when the Java application starts. ByteBuddy generates auxiliary classes with different random names every time.\nWhen another Java agent retransforms the same class, it triggers the SkyWalking agent to enhance the class again. Since the bytecode has been regenerated by ByteBuddy, the fields and imported class names have been modified, and the JVM verifications on class bytecode have failed, the retransform classes would therefore be unsuccessful.\nResolution 1. Enable the class cache feature\nAdd JVM parameters:\n-Dskywalking.agent.is_cache_enhanced_class=true -Dskywalking.agent.class_cache_mode=MEMORY\nOr uncomment the following options in agent.conf:\n# If true, the SkyWalking agent will cache all instrumented classes files to memory or disk files (as determined by the class cache mode), # Allow other Java agents to enhance those classes that are enhanced by the SkyWalking agent. agent.is_cache_enhanced_class = ${SW_AGENT_CACHE_CLASS:false} # The instrumented classes cache mode: MEMORY or FILE # MEMORY: cache class bytes to memory; if there are too many instrumented classes or if their sizes are too large, it may take up more memory # FILE: cache class bytes to user temp folder starts with 'class-cache', and automatically clean up cached class files when the application exits agent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:MEMORY} If the class cache feature is enabled, save the instrumented class bytecode to memory or a temporary file. When other Java agents retransform the same class, the SkyWalking agent first attempts to load from the cache.\nIf the cached class is found, it will be used directly without regenerating an auxiliary class with a new random name. Then, the process of the subsequent Java agent will not be affected.\n2. Class cache save mode\nWe recommend saving cache classes to memory, if it takes up more memory space. Alternatively, you can use the local file system. Set the class cache mode in one of the folliwng ways:\n-Dskywalking.agent.class_cache_mode=MEMORY : save cache classes to Java memory. -Dskywalking.agent.class_cache_mode=FILE : save cache classes to SkyWalking agent path \u0026lsquo;/class-cache\u0026rsquo;.\nOr modify these options in agent.conf:\nagent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:MEMORY} agent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:FILE}\n","excerpt":"Compatibility with other Java agent bytecode processes Problem   When using the SkyWalking agent, …","ref":"/docs/main/v9.4.0/en/faq/compatible-with-other-javaagent-bytecode-processing/","title":"Compatibility with other Java agent bytecode processes"},{"body":"Compatibility with other Java agent bytecode processes Problem   When using the SkyWalking agent, some other agents, such as Arthas, can\u0026rsquo;t work properly. https://github.com/apache/skywalking/pull/4858\n  The retransform classes in the Java agent conflict with the SkyWalking agent, as illustrated in this demo\n  Cause The SkyWalking agent uses ByteBuddy to transform classes when the Java application starts. ByteBuddy generates auxiliary classes with different random names every time.\nWhen another Java agent retransforms the same class, it triggers the SkyWalking agent to enhance the class again. Since the bytecode has been regenerated by ByteBuddy, the fields and imported class names have been modified, and the JVM verifications on class bytecode have failed, the retransform classes would therefore be unsuccessful.\nResolution 1. Enable the class cache feature\nAdd JVM parameters:\n-Dskywalking.agent.is_cache_enhanced_class=true -Dskywalking.agent.class_cache_mode=MEMORY\nOr uncomment the following options in agent.conf:\n# If true, the SkyWalking agent will cache all instrumented classes files to memory or disk files (as determined by the class cache mode), # Allow other Java agents to enhance those classes that are enhanced by the SkyWalking agent. agent.is_cache_enhanced_class = ${SW_AGENT_CACHE_CLASS:false} # The instrumented classes cache mode: MEMORY or FILE # MEMORY: cache class bytes to memory; if there are too many instrumented classes or if their sizes are too large, it may take up more memory # FILE: cache class bytes to user temp folder starts with 'class-cache', and automatically clean up cached class files when the application exits agent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:MEMORY} If the class cache feature is enabled, save the instrumented class bytecode to memory or a temporary file. When other Java agents retransform the same class, the SkyWalking agent first attempts to load from the cache.\nIf the cached class is found, it will be used directly without regenerating an auxiliary class with a new random name. Then, the process of the subsequent Java agent will not be affected.\n2. Class cache save mode\nWe recommend saving cache classes to memory, if it takes up more memory space. Alternatively, you can use the local file system. Set the class cache mode in one of the folliwng ways:\n-Dskywalking.agent.class_cache_mode=MEMORY : save cache classes to Java memory. -Dskywalking.agent.class_cache_mode=FILE : save cache classes to SkyWalking agent path \u0026lsquo;/class-cache\u0026rsquo;.\nOr modify these options in agent.conf:\nagent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:MEMORY} agent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:FILE}\n","excerpt":"Compatibility with other Java agent bytecode processes Problem   When using the SkyWalking agent, …","ref":"/docs/main/v9.5.0/en/faq/compatible-with-other-javaagent-bytecode-processing/","title":"Compatibility with other Java agent bytecode processes"},{"body":"Compatibility with other Java agent bytecode processes Problem   When using the SkyWalking agent, some other agents, such as Arthas, can\u0026rsquo;t work properly. https://github.com/apache/skywalking/pull/4858\n  The retransform classes in the Java agent conflict with the SkyWalking agent, as illustrated in this demo\n  Cause The SkyWalking agent uses ByteBuddy to transform classes when the Java application starts. ByteBuddy generates auxiliary classes with different random names every time.\nWhen another Java agent retransforms the same class, it triggers the SkyWalking agent to enhance the class again. Since the bytecode has been regenerated by ByteBuddy, the fields and imported class names have been modified, and the JVM verifications on class bytecode have failed, the retransform classes would therefore be unsuccessful.\nResolution 1. Enable the class cache feature\nAdd JVM parameters:\n-Dskywalking.agent.is_cache_enhanced_class=true -Dskywalking.agent.class_cache_mode=MEMORY\nOr uncomment the following options in agent.conf:\n# If true, the SkyWalking agent will cache all instrumented classes files to memory or disk files (as determined by the class cache mode), # Allow other Java agents to enhance those classes that are enhanced by the SkyWalking agent. agent.is_cache_enhanced_class = ${SW_AGENT_CACHE_CLASS:false} # The instrumented classes cache mode: MEMORY or FILE # MEMORY: cache class bytes to memory; if there are too many instrumented classes or if their sizes are too large, it may take up more memory # FILE: cache class bytes to user temp folder starts with 'class-cache', and automatically clean up cached class files when the application exits agent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:MEMORY} If the class cache feature is enabled, save the instrumented class bytecode to memory or a temporary file. When other Java agents retransform the same class, the SkyWalking agent first attempts to load from the cache.\nIf the cached class is found, it will be used directly without regenerating an auxiliary class with a new random name. Then, the process of the subsequent Java agent will not be affected.\n2. Class cache save mode\nWe recommend saving cache classes to memory, if it takes up more memory space. Alternatively, you can use the local file system. Set the class cache mode in one of the folliwng ways:\n-Dskywalking.agent.class_cache_mode=MEMORY : save cache classes to Java memory. -Dskywalking.agent.class_cache_mode=FILE : save cache classes to SkyWalking agent path \u0026lsquo;/class-cache\u0026rsquo;.\nOr modify these options in agent.conf:\nagent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:MEMORY} agent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:FILE}\n","excerpt":"Compatibility with other Java agent bytecode processes Problem   When using the SkyWalking agent, …","ref":"/docs/main/v9.6.0/en/faq/compatible-with-other-javaagent-bytecode-processing/","title":"Compatibility with other Java agent bytecode processes"},{"body":"Compatibility with other Java agent bytecode processes Problem   When using the SkyWalking agent, some other agents, such as Arthas, can\u0026rsquo;t work properly. https://github.com/apache/skywalking/pull/4858\n  The retransform classes in the Java agent conflict with the SkyWalking agent, as illustrated in this demo\n  Cause The SkyWalking agent uses ByteBuddy to transform classes when the Java application starts. ByteBuddy generates auxiliary classes with different random names every time.\nWhen another Java agent retransforms the same class, it triggers the SkyWalking agent to enhance the class again. Since the bytecode has been regenerated by ByteBuddy, the fields and imported class names have been modified, and the JVM verifications on class bytecode have failed, the retransform classes would therefore be unsuccessful.\nResolution 1. Enable the class cache feature\nAdd JVM parameters:\n-Dskywalking.agent.is_cache_enhanced_class=true -Dskywalking.agent.class_cache_mode=MEMORY\nOr uncomment the following options in agent.conf:\n# If true, the SkyWalking agent will cache all instrumented classes files to memory or disk files (as determined by the class cache mode), # Allow other Java agents to enhance those classes that are enhanced by the SkyWalking agent. agent.is_cache_enhanced_class = ${SW_AGENT_CACHE_CLASS:false} # The instrumented classes cache mode: MEMORY or FILE # MEMORY: cache class bytes to memory; if there are too many instrumented classes or if their sizes are too large, it may take up more memory # FILE: cache class bytes to user temp folder starts with 'class-cache', and automatically clean up cached class files when the application exits agent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:MEMORY} If the class cache feature is enabled, save the instrumented class bytecode to memory or a temporary file. When other Java agents retransform the same class, the SkyWalking agent first attempts to load from the cache.\nIf the cached class is found, it will be used directly without regenerating an auxiliary class with a new random name. Then, the process of the subsequent Java agent will not be affected.\n2. Class cache save mode\nWe recommend saving cache classes to memory, if it takes up more memory space. Alternatively, you can use the local file system. Set the class cache mode in one of the folliwng ways:\n-Dskywalking.agent.class_cache_mode=MEMORY : save cache classes to Java memory. -Dskywalking.agent.class_cache_mode=FILE : save cache classes to SkyWalking agent path \u0026lsquo;/class-cache\u0026rsquo;.\nOr modify these options in agent.conf:\nagent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:MEMORY} agent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:FILE}\n","excerpt":"Compatibility with other Java agent bytecode processes Problem   When using the SkyWalking agent, …","ref":"/docs/main/v9.7.0/en/faq/compatible-with-other-javaagent-bytecode-processing/","title":"Compatibility with other Java agent bytecode processes"},{"body":"Compiling Go version Go version 1.18 or higher is supported for compilation.\nPlatform Linux Linux version \u0026gt;= 4.4, and dependency these tools:\n llvm \u0026gt;= 13. libbpf-dev.  MacOS or Windows Make sure it already has a docker environment.\nCommand git clone https://github.com/apache/skywalking-rover cd skywalking-rover # Linux platform make generate build # MacOS or Windows make container-generate build ","excerpt":"Compiling Go version Go version 1.18 or higher is supported for compilation.\nPlatform Linux Linux …","ref":"/docs/skywalking-rover/latest/en/guides/compile/how-to-compile/","title":"Compiling"},{"body":"Compiling Go version Go version 1.18 or higher is supported for compilation.\nPlatform Linux Linux version \u0026gt;= 4.4, and dependency these tools:\n llvm \u0026gt;= 13. libbpf-dev.  MacOS or Windows Make sure it already has a docker environment.\nCommand git clone https://github.com/apache/skywalking-rover cd skywalking-rover # Linux platform make generate build # MacOS or Windows make container-generate build ","excerpt":"Compiling Go version Go version 1.18 or higher is supported for compilation.\nPlatform Linux Linux …","ref":"/docs/skywalking-rover/next/en/guides/compile/how-to-compile/","title":"Compiling"},{"body":"Compiling Go version Go version 1.18 or higher is supported for compilation.\nPlatform Linux Linux version \u0026gt;= 4.4, and dependency these tools:\n llvm \u0026gt;= 13. libbpf-dev.  MacOS or Windows Make sure it already has a docker environment.\nCommand git clone https://github.com/apache/skywalking-rover cd skywalking-rover # Linux platform make generate build # MacOS or Windows make container-generate build ","excerpt":"Compiling Go version Go version 1.18 or higher is supported for compilation.\nPlatform Linux Linux …","ref":"/docs/skywalking-rover/v0.6.0/en/guides/compile/how-to-compile/","title":"Compiling"},{"body":"Compiling Go version Go version 1.18 and 1.19 are supported for compilation.\nPlatform Linux, MacOS and Windows are supported in SkyWalking Satellite. However, some components don\u0026rsquo;t fit the Windows platform, including:\n mmap-queue  Command git clone https://github.com/apache/skywalking-satellite cd skywalking-satellite make build ","excerpt":"Compiling Go version Go version 1.18 and 1.19 are supported for compilation.\nPlatform Linux, MacOS …","ref":"/docs/skywalking-satellite/latest/en/guides/compile/how-to-compile/","title":"Compiling"},{"body":"Compiling Go version Go version 1.19 is required for compilation.\nPlatform Linux, MacOS and Windows are supported in SkyWalking Satellite. However, some components don\u0026rsquo;t fit the Windows platform, including:\n mmap-queue  Command git clone https://github.com/apache/skywalking-satellite cd skywalking-satellite make build ","excerpt":"Compiling Go version Go version 1.19 is required for compilation.\nPlatform Linux, MacOS and Windows …","ref":"/docs/skywalking-satellite/next/en/guides/compile/how-to-compile/","title":"Compiling"},{"body":"Compiling Go version Go version 1.18 and 1.19 are supported for compilation.\nPlatform Linux, MacOS and Windows are supported in SkyWalking Satellite. However, some components don\u0026rsquo;t fit the Windows platform, including:\n mmap-queue  Command git clone https://github.com/apache/skywalking-satellite cd skywalking-satellite make build ","excerpt":"Compiling Go version Go version 1.18 and 1.19 are supported for compilation.\nPlatform Linux, MacOS …","ref":"/docs/skywalking-satellite/v1.2.0/en/guides/compile/how-to-compile/","title":"Compiling"},{"body":"Compiling issues on Mac\u0026rsquo;s M1 chip Problem  When compiling according to How-to-build, The following problems may occur, causing the build to fail.  [ERROR] Failed to execute goal org.xolstice.maven.plugins:protobuf-maven-plugin:0.6.1:compile (grpc-build) on project apm-network: Unable to resolve artifact: Missing: [ERROR] ---------- [ERROR] 1) com.google.protobuf:protoc:exe:osx-aarch_64:3.12.0 [ERROR] [ERROR] Try downloading the file manually from the project website. [ERROR] [ERROR] Then, install it using the command: [ERROR] mvn install:install-file -DgroupId=com.google.protobuf -DartifactId=protoc -Dversion=3.12.0 -Dclassifier=osx-aarch_64 -Dpackaging=exe -Dfile=/path/to/file [ERROR] [ERROR] Alternatively, if you host your own repository you can deploy the file there: [ERROR] mvn deploy:deploy-file -DgroupId=com.google.protobuf -DartifactId=protoc -Dversion=3.12.0 -Dclassifier=osx-aarch_64 -Dpackaging=exe -Dfile=/path/to/file -Durl=[url] -DrepositoryId=[id] [ERROR] [ERROR] Path to dependency: [ERROR] 1) org.apache.skywalking:apm-network:jar:8.4.0-SNAPSHOT [ERROR] 2) com.google.protobuf:protoc:exe:osx-aarch_64:3.12.0 [ERROR] [ERROR] ---------- [ERROR] 1 required artifact is missing. Reason The dependent Protocol Buffers v3.14.0 does not come with an osx-aarch_64 version. You may find the osx-aarch_64 version at the Protocol Buffers Releases link here: https://github.com/protocolbuffers/protobuf/releases. Since Mac\u0026rsquo;s M1 is compatible with the osx-x86_64 version, before this version is available for downloading, you need to manually specify the osx-x86_64 version.\nResolution You may add -Dos.detected.classifier=osx-x86_64 after the original compilation parameters, such as: ./mvnw clean package -DskipTests -Dos.detected.classifier=osx-x86_64. After specifying the version, compile and run normally.\n","excerpt":"Compiling issues on Mac\u0026rsquo;s M1 chip Problem  When compiling according to How-to-build, The …","ref":"/docs/main/latest/en/faq/how-to-build-with-mac-m1/","title":"Compiling issues on Mac's M1 chip"},{"body":"Compiling issues on Mac\u0026rsquo;s M1 chip Problem  When compiling according to How-to-build, The following problems may occur, causing the build to fail.  [ERROR] Failed to execute goal org.xolstice.maven.plugins:protobuf-maven-plugin:0.6.1:compile (grpc-build) on project apm-network: Unable to resolve artifact: Missing: [ERROR] ---------- [ERROR] 1) com.google.protobuf:protoc:exe:osx-aarch_64:3.12.0 [ERROR] [ERROR] Try downloading the file manually from the project website. [ERROR] [ERROR] Then, install it using the command: [ERROR] mvn install:install-file -DgroupId=com.google.protobuf -DartifactId=protoc -Dversion=3.12.0 -Dclassifier=osx-aarch_64 -Dpackaging=exe -Dfile=/path/to/file [ERROR] [ERROR] Alternatively, if you host your own repository you can deploy the file there: [ERROR] mvn deploy:deploy-file -DgroupId=com.google.protobuf -DartifactId=protoc -Dversion=3.12.0 -Dclassifier=osx-aarch_64 -Dpackaging=exe -Dfile=/path/to/file -Durl=[url] -DrepositoryId=[id] [ERROR] [ERROR] Path to dependency: [ERROR] 1) org.apache.skywalking:apm-network:jar:8.4.0-SNAPSHOT [ERROR] 2) com.google.protobuf:protoc:exe:osx-aarch_64:3.12.0 [ERROR] [ERROR] ---------- [ERROR] 1 required artifact is missing. Reason The dependent Protocol Buffers v3.14.0 does not come with an osx-aarch_64 version. You may find the osx-aarch_64 version at the Protocol Buffers Releases link here: https://github.com/protocolbuffers/protobuf/releases. Since Mac\u0026rsquo;s M1 is compatible with the osx-x86_64 version, before this version is available for downloading, you need to manually specify the osx-x86_64 version.\nResolution You may add -Dos.detected.classifier=osx-x86_64 after the original compilation parameters, such as: ./mvnw clean package -DskipTests -Dos.detected.classifier=osx-x86_64. After specifying the version, compile and run normally.\n","excerpt":"Compiling issues on Mac\u0026rsquo;s M1 chip Problem  When compiling according to How-to-build, The …","ref":"/docs/main/next/en/faq/how-to-build-with-mac-m1/","title":"Compiling issues on Mac's M1 chip"},{"body":"Compiling issues on Mac\u0026rsquo;s M1 chip Problem  When compiling according to How-to-build, The following problems may occur, causing the build to fail.  [ERROR] Failed to execute goal org.xolstice.maven.plugins:protobuf-maven-plugin:0.6.1:compile (grpc-build) on project apm-network: Unable to resolve artifact: Missing: [ERROR] ---------- [ERROR] 1) com.google.protobuf:protoc:exe:osx-aarch_64:3.12.0 [ERROR] [ERROR] Try downloading the file manually from the project website. [ERROR] [ERROR] Then, install it using the command: [ERROR] mvn install:install-file -DgroupId=com.google.protobuf -DartifactId=protoc -Dversion=3.12.0 -Dclassifier=osx-aarch_64 -Dpackaging=exe -Dfile=/path/to/file [ERROR] [ERROR] Alternatively, if you host your own repository you can deploy the file there: [ERROR] mvn deploy:deploy-file -DgroupId=com.google.protobuf -DartifactId=protoc -Dversion=3.12.0 -Dclassifier=osx-aarch_64 -Dpackaging=exe -Dfile=/path/to/file -Durl=[url] -DrepositoryId=[id] [ERROR] [ERROR] Path to dependency: [ERROR] 1) org.apache.skywalking:apm-network:jar:8.4.0-SNAPSHOT [ERROR] 2) com.google.protobuf:protoc:exe:osx-aarch_64:3.12.0 [ERROR] [ERROR] ---------- [ERROR] 1 required artifact is missing. Reason The dependent Protocol Buffers v3.14.0 does not come with an osx-aarch_64 version. You may find the osx-aarch_64 version at the Protocol Buffers Releases link here: https://github.com/protocolbuffers/protobuf/releases. Since Mac\u0026rsquo;s M1 is compatible with the osx-x86_64 version, before this version is available for downloading, you need to manually specify the osx-x86_64 version.\nResolution You may add -Dos.detected.classifier=osx-x86_64 after the original compilation parameters, such as: ./mvnw clean package -DskipTests -Dos.detected.classifier=osx-x86_64. After specifying the version, compile and run normally.\n","excerpt":"Compiling issues on Mac\u0026rsquo;s M1 chip Problem  When compiling according to How-to-build, The …","ref":"/docs/main/v9.0.0/en/faq/how-to-build-with-mac-m1/","title":"Compiling issues on Mac's M1 chip"},{"body":"Compiling issues on Mac\u0026rsquo;s M1 chip Problem  When compiling according to How-to-build, The following problems may occur, causing the build to fail.  [ERROR] Failed to execute goal org.xolstice.maven.plugins:protobuf-maven-plugin:0.6.1:compile (grpc-build) on project apm-network: Unable to resolve artifact: Missing: [ERROR] ---------- [ERROR] 1) com.google.protobuf:protoc:exe:osx-aarch_64:3.12.0 [ERROR] [ERROR] Try downloading the file manually from the project website. [ERROR] [ERROR] Then, install it using the command: [ERROR] mvn install:install-file -DgroupId=com.google.protobuf -DartifactId=protoc -Dversion=3.12.0 -Dclassifier=osx-aarch_64 -Dpackaging=exe -Dfile=/path/to/file [ERROR] [ERROR] Alternatively, if you host your own repository you can deploy the file there: [ERROR] mvn deploy:deploy-file -DgroupId=com.google.protobuf -DartifactId=protoc -Dversion=3.12.0 -Dclassifier=osx-aarch_64 -Dpackaging=exe -Dfile=/path/to/file -Durl=[url] -DrepositoryId=[id] [ERROR] [ERROR] Path to dependency: [ERROR] 1) org.apache.skywalking:apm-network:jar:8.4.0-SNAPSHOT [ERROR] 2) com.google.protobuf:protoc:exe:osx-aarch_64:3.12.0 [ERROR] [ERROR] ---------- [ERROR] 1 required artifact is missing. Reason The dependent Protocol Buffers v3.14.0 does not come with an osx-aarch_64 version. You may find the osx-aarch_64 version at the Protocol Buffers Releases link here: https://github.com/protocolbuffers/protobuf/releases. Since Mac\u0026rsquo;s M1 is compatible with the osx-x86_64 version, before this version is available for downloading, you need to manually specify the osx-x86_64 version.\nResolution You may add -Dos.detected.classifier=osx-x86_64 after the original compilation parameters, such as: ./mvnw clean package -DskipTests -Dos.detected.classifier=osx-x86_64. After specifying the version, compile and run normally.\n","excerpt":"Compiling issues on Mac\u0026rsquo;s M1 chip Problem  When compiling according to How-to-build, The …","ref":"/docs/main/v9.1.0/en/faq/how-to-build-with-mac-m1/","title":"Compiling issues on Mac's M1 chip"},{"body":"Compiling issues on Mac\u0026rsquo;s M1 chip Problem  When compiling according to How-to-build, The following problems may occur, causing the build to fail.  [ERROR] Failed to execute goal org.xolstice.maven.plugins:protobuf-maven-plugin:0.6.1:compile (grpc-build) on project apm-network: Unable to resolve artifact: Missing: [ERROR] ---------- [ERROR] 1) com.google.protobuf:protoc:exe:osx-aarch_64:3.12.0 [ERROR] [ERROR] Try downloading the file manually from the project website. [ERROR] [ERROR] Then, install it using the command: [ERROR] mvn install:install-file -DgroupId=com.google.protobuf -DartifactId=protoc -Dversion=3.12.0 -Dclassifier=osx-aarch_64 -Dpackaging=exe -Dfile=/path/to/file [ERROR] [ERROR] Alternatively, if you host your own repository you can deploy the file there: [ERROR] mvn deploy:deploy-file -DgroupId=com.google.protobuf -DartifactId=protoc -Dversion=3.12.0 -Dclassifier=osx-aarch_64 -Dpackaging=exe -Dfile=/path/to/file -Durl=[url] -DrepositoryId=[id] [ERROR] [ERROR] Path to dependency: [ERROR] 1) org.apache.skywalking:apm-network:jar:8.4.0-SNAPSHOT [ERROR] 2) com.google.protobuf:protoc:exe:osx-aarch_64:3.12.0 [ERROR] [ERROR] ---------- [ERROR] 1 required artifact is missing. Reason The dependent Protocol Buffers v3.14.0 does not come with an osx-aarch_64 version. You may find the osx-aarch_64 version at the Protocol Buffers Releases link here: https://github.com/protocolbuffers/protobuf/releases. Since Mac\u0026rsquo;s M1 is compatible with the osx-x86_64 version, before this version is available for downloading, you need to manually specify the osx-x86_64 version.\nResolution You may add -Dos.detected.classifier=osx-x86_64 after the original compilation parameters, such as: ./mvnw clean package -DskipTests -Dos.detected.classifier=osx-x86_64. After specifying the version, compile and run normally.\n","excerpt":"Compiling issues on Mac\u0026rsquo;s M1 chip Problem  When compiling according to How-to-build, The …","ref":"/docs/main/v9.2.0/en/faq/how-to-build-with-mac-m1/","title":"Compiling issues on Mac's M1 chip"},{"body":"Compiling issues on Mac\u0026rsquo;s M1 chip Problem  When compiling according to How-to-build, The following problems may occur, causing the build to fail.  [ERROR] Failed to execute goal org.xolstice.maven.plugins:protobuf-maven-plugin:0.6.1:compile (grpc-build) on project apm-network: Unable to resolve artifact: Missing: [ERROR] ---------- [ERROR] 1) com.google.protobuf:protoc:exe:osx-aarch_64:3.12.0 [ERROR] [ERROR] Try downloading the file manually from the project website. [ERROR] [ERROR] Then, install it using the command: [ERROR] mvn install:install-file -DgroupId=com.google.protobuf -DartifactId=protoc -Dversion=3.12.0 -Dclassifier=osx-aarch_64 -Dpackaging=exe -Dfile=/path/to/file [ERROR] [ERROR] Alternatively, if you host your own repository you can deploy the file there: [ERROR] mvn deploy:deploy-file -DgroupId=com.google.protobuf -DartifactId=protoc -Dversion=3.12.0 -Dclassifier=osx-aarch_64 -Dpackaging=exe -Dfile=/path/to/file -Durl=[url] -DrepositoryId=[id] [ERROR] [ERROR] Path to dependency: [ERROR] 1) org.apache.skywalking:apm-network:jar:8.4.0-SNAPSHOT [ERROR] 2) com.google.protobuf:protoc:exe:osx-aarch_64:3.12.0 [ERROR] [ERROR] ---------- [ERROR] 1 required artifact is missing. Reason The dependent Protocol Buffers v3.14.0 does not come with an osx-aarch_64 version. You may find the osx-aarch_64 version at the Protocol Buffers Releases link here: https://github.com/protocolbuffers/protobuf/releases. Since Mac\u0026rsquo;s M1 is compatible with the osx-x86_64 version, before this version is available for downloading, you need to manually specify the osx-x86_64 version.\nResolution You may add -Dos.detected.classifier=osx-x86_64 after the original compilation parameters, such as: ./mvnw clean package -DskipTests -Dos.detected.classifier=osx-x86_64. After specifying the version, compile and run normally.\n","excerpt":"Compiling issues on Mac\u0026rsquo;s M1 chip Problem  When compiling according to How-to-build, The …","ref":"/docs/main/v9.3.0/en/faq/how-to-build-with-mac-m1/","title":"Compiling issues on Mac's M1 chip"},{"body":"Compiling issues on Mac\u0026rsquo;s M1 chip Problem  When compiling according to How-to-build, The following problems may occur, causing the build to fail.  [ERROR] Failed to execute goal org.xolstice.maven.plugins:protobuf-maven-plugin:0.6.1:compile (grpc-build) on project apm-network: Unable to resolve artifact: Missing: [ERROR] ---------- [ERROR] 1) com.google.protobuf:protoc:exe:osx-aarch_64:3.12.0 [ERROR] [ERROR] Try downloading the file manually from the project website. [ERROR] [ERROR] Then, install it using the command: [ERROR] mvn install:install-file -DgroupId=com.google.protobuf -DartifactId=protoc -Dversion=3.12.0 -Dclassifier=osx-aarch_64 -Dpackaging=exe -Dfile=/path/to/file [ERROR] [ERROR] Alternatively, if you host your own repository you can deploy the file there: [ERROR] mvn deploy:deploy-file -DgroupId=com.google.protobuf -DartifactId=protoc -Dversion=3.12.0 -Dclassifier=osx-aarch_64 -Dpackaging=exe -Dfile=/path/to/file -Durl=[url] -DrepositoryId=[id] [ERROR] [ERROR] Path to dependency: [ERROR] 1) org.apache.skywalking:apm-network:jar:8.4.0-SNAPSHOT [ERROR] 2) com.google.protobuf:protoc:exe:osx-aarch_64:3.12.0 [ERROR] [ERROR] ---------- [ERROR] 1 required artifact is missing. Reason The dependent Protocol Buffers v3.14.0 does not come with an osx-aarch_64 version. You may find the osx-aarch_64 version at the Protocol Buffers Releases link here: https://github.com/protocolbuffers/protobuf/releases. Since Mac\u0026rsquo;s M1 is compatible with the osx-x86_64 version, before this version is available for downloading, you need to manually specify the osx-x86_64 version.\nResolution You may add -Dos.detected.classifier=osx-x86_64 after the original compilation parameters, such as: ./mvnw clean package -DskipTests -Dos.detected.classifier=osx-x86_64. After specifying the version, compile and run normally.\n","excerpt":"Compiling issues on Mac\u0026rsquo;s M1 chip Problem  When compiling according to How-to-build, The …","ref":"/docs/main/v9.4.0/en/faq/how-to-build-with-mac-m1/","title":"Compiling issues on Mac's M1 chip"},{"body":"Compiling issues on Mac\u0026rsquo;s M1 chip Problem  When compiling according to How-to-build, The following problems may occur, causing the build to fail.  [ERROR] Failed to execute goal org.xolstice.maven.plugins:protobuf-maven-plugin:0.6.1:compile (grpc-build) on project apm-network: Unable to resolve artifact: Missing: [ERROR] ---------- [ERROR] 1) com.google.protobuf:protoc:exe:osx-aarch_64:3.12.0 [ERROR] [ERROR] Try downloading the file manually from the project website. [ERROR] [ERROR] Then, install it using the command: [ERROR] mvn install:install-file -DgroupId=com.google.protobuf -DartifactId=protoc -Dversion=3.12.0 -Dclassifier=osx-aarch_64 -Dpackaging=exe -Dfile=/path/to/file [ERROR] [ERROR] Alternatively, if you host your own repository you can deploy the file there: [ERROR] mvn deploy:deploy-file -DgroupId=com.google.protobuf -DartifactId=protoc -Dversion=3.12.0 -Dclassifier=osx-aarch_64 -Dpackaging=exe -Dfile=/path/to/file -Durl=[url] -DrepositoryId=[id] [ERROR] [ERROR] Path to dependency: [ERROR] 1) org.apache.skywalking:apm-network:jar:8.4.0-SNAPSHOT [ERROR] 2) com.google.protobuf:protoc:exe:osx-aarch_64:3.12.0 [ERROR] [ERROR] ---------- [ERROR] 1 required artifact is missing. Reason The dependent Protocol Buffers v3.14.0 does not come with an osx-aarch_64 version. You may find the osx-aarch_64 version at the Protocol Buffers Releases link here: https://github.com/protocolbuffers/protobuf/releases. Since Mac\u0026rsquo;s M1 is compatible with the osx-x86_64 version, before this version is available for downloading, you need to manually specify the osx-x86_64 version.\nResolution You may add -Dos.detected.classifier=osx-x86_64 after the original compilation parameters, such as: ./mvnw clean package -DskipTests -Dos.detected.classifier=osx-x86_64. After specifying the version, compile and run normally.\n","excerpt":"Compiling issues on Mac\u0026rsquo;s M1 chip Problem  When compiling according to How-to-build, The …","ref":"/docs/main/v9.5.0/en/faq/how-to-build-with-mac-m1/","title":"Compiling issues on Mac's M1 chip"},{"body":"Compiling issues on Mac\u0026rsquo;s M1 chip Problem  When compiling according to How-to-build, The following problems may occur, causing the build to fail.  [ERROR] Failed to execute goal org.xolstice.maven.plugins:protobuf-maven-plugin:0.6.1:compile (grpc-build) on project apm-network: Unable to resolve artifact: Missing: [ERROR] ---------- [ERROR] 1) com.google.protobuf:protoc:exe:osx-aarch_64:3.12.0 [ERROR] [ERROR] Try downloading the file manually from the project website. [ERROR] [ERROR] Then, install it using the command: [ERROR] mvn install:install-file -DgroupId=com.google.protobuf -DartifactId=protoc -Dversion=3.12.0 -Dclassifier=osx-aarch_64 -Dpackaging=exe -Dfile=/path/to/file [ERROR] [ERROR] Alternatively, if you host your own repository you can deploy the file there: [ERROR] mvn deploy:deploy-file -DgroupId=com.google.protobuf -DartifactId=protoc -Dversion=3.12.0 -Dclassifier=osx-aarch_64 -Dpackaging=exe -Dfile=/path/to/file -Durl=[url] -DrepositoryId=[id] [ERROR] [ERROR] Path to dependency: [ERROR] 1) org.apache.skywalking:apm-network:jar:8.4.0-SNAPSHOT [ERROR] 2) com.google.protobuf:protoc:exe:osx-aarch_64:3.12.0 [ERROR] [ERROR] ---------- [ERROR] 1 required artifact is missing. Reason The dependent Protocol Buffers v3.14.0 does not come with an osx-aarch_64 version. You may find the osx-aarch_64 version at the Protocol Buffers Releases link here: https://github.com/protocolbuffers/protobuf/releases. Since Mac\u0026rsquo;s M1 is compatible with the osx-x86_64 version, before this version is available for downloading, you need to manually specify the osx-x86_64 version.\nResolution You may add -Dos.detected.classifier=osx-x86_64 after the original compilation parameters, such as: ./mvnw clean package -DskipTests -Dos.detected.classifier=osx-x86_64. After specifying the version, compile and run normally.\n","excerpt":"Compiling issues on Mac\u0026rsquo;s M1 chip Problem  When compiling according to How-to-build, The …","ref":"/docs/main/v9.6.0/en/faq/how-to-build-with-mac-m1/","title":"Compiling issues on Mac's M1 chip"},{"body":"Compiling issues on Mac\u0026rsquo;s M1 chip Problem  When compiling according to How-to-build, The following problems may occur, causing the build to fail.  [ERROR] Failed to execute goal org.xolstice.maven.plugins:protobuf-maven-plugin:0.6.1:compile (grpc-build) on project apm-network: Unable to resolve artifact: Missing: [ERROR] ---------- [ERROR] 1) com.google.protobuf:protoc:exe:osx-aarch_64:3.12.0 [ERROR] [ERROR] Try downloading the file manually from the project website. [ERROR] [ERROR] Then, install it using the command: [ERROR] mvn install:install-file -DgroupId=com.google.protobuf -DartifactId=protoc -Dversion=3.12.0 -Dclassifier=osx-aarch_64 -Dpackaging=exe -Dfile=/path/to/file [ERROR] [ERROR] Alternatively, if you host your own repository you can deploy the file there: [ERROR] mvn deploy:deploy-file -DgroupId=com.google.protobuf -DartifactId=protoc -Dversion=3.12.0 -Dclassifier=osx-aarch_64 -Dpackaging=exe -Dfile=/path/to/file -Durl=[url] -DrepositoryId=[id] [ERROR] [ERROR] Path to dependency: [ERROR] 1) org.apache.skywalking:apm-network:jar:8.4.0-SNAPSHOT [ERROR] 2) com.google.protobuf:protoc:exe:osx-aarch_64:3.12.0 [ERROR] [ERROR] ---------- [ERROR] 1 required artifact is missing. Reason The dependent Protocol Buffers v3.14.0 does not come with an osx-aarch_64 version. You may find the osx-aarch_64 version at the Protocol Buffers Releases link here: https://github.com/protocolbuffers/protobuf/releases. Since Mac\u0026rsquo;s M1 is compatible with the osx-x86_64 version, before this version is available for downloading, you need to manually specify the osx-x86_64 version.\nResolution You may add -Dos.detected.classifier=osx-x86_64 after the original compilation parameters, such as: ./mvnw clean package -DskipTests -Dos.detected.classifier=osx-x86_64. After specifying the version, compile and run normally.\n","excerpt":"Compiling issues on Mac\u0026rsquo;s M1 chip Problem  When compiling according to How-to-build, The …","ref":"/docs/main/v9.7.0/en/faq/how-to-build-with-mac-m1/","title":"Compiling issues on Mac's M1 chip"},{"body":"Compiling project This document will help you compile and build a project in your maven and set your IDE.\nPrepare JDK 17 or 21.\n If you clone codes from https://github.com/apache/skywalking-java  git clone https://github.com/apache/skywalking-java.git cd skywalking-java ./mvnw clean package -Pall  If you download source codes tar from https://skywalking.apache.org/downloads/  ./mvnw clean package The agent binary package is generated in skywalking-agent folder.\nSet Generated Source Codes(grpc-java and java folders in apm-protocol/apm-network/target/generated-sources/protobuf) folders if you are using IntelliJ IDE.\nBuilding Docker images After you have compiled the project and have generated the skywalking-agent folder, you can build Docker images. [make docker] builds the agent Docker images based on alpine image, java8, java11 and java 17 images by default. If you want to only build part of the images, add suffix .alpine or .java\u0026lt;x\u0026gt; to the make target, for example:\n Build Docker images based on alpine, Java 8 and Java 11. make docker.alpine docker.java8 docker.java11   You can also customize the Docker registry and Docker image names by specifying the variable HUB, NAME.\n Set private Docker registry to gcr.io/skywalking and custom name to sw-agent. make docker.alpine HUB=gcr.io/skywalking NAME=sw-agent This will name the Docker image to gcr.io/skywalking/sw-agent:latest-alpine\n  If you want to push the Docker images, add suffix to the make target docker., for example:\n Build and push images based on alpine, Java 8 and Java 11. make docker.push.alpine docker.push.java8 docker.push.java11   ","excerpt":"Compiling project This document will help you compile and build a project in your maven and set your …","ref":"/docs/skywalking-java/latest/en/contribution/compiling/","title":"Compiling project"},{"body":"Compiling project This document will help you compile and build a project in your maven and set your IDE.\nPrepare JDK 17 or 21.\n If you clone codes from https://github.com/apache/skywalking-java  git clone https://github.com/apache/skywalking-java.git cd skywalking-java ./mvnw clean package -Pall  If you download source codes tar from https://skywalking.apache.org/downloads/  ./mvnw clean package The agent binary package is generated in skywalking-agent folder.\nSet Generated Source Codes(grpc-java and java folders in apm-protocol/apm-network/target/generated-sources/protobuf) folders if you are using IntelliJ IDE.\nBuilding Docker images After you have compiled the project and have generated the skywalking-agent folder, you can build Docker images. [make docker] builds the agent Docker images based on alpine image, java8, java11 and java 17 images by default. If you want to only build part of the images, add suffix .alpine or .java\u0026lt;x\u0026gt; to the make target, for example:\n Build Docker images based on alpine, Java 8 and Java 11. make docker.alpine docker.java8 docker.java11   You can also customize the Docker registry and Docker image names by specifying the variable HUB, NAME.\n Set private Docker registry to gcr.io/skywalking and custom name to sw-agent. make docker.alpine HUB=gcr.io/skywalking NAME=sw-agent This will name the Docker image to gcr.io/skywalking/sw-agent:latest-alpine\n  If you want to push the Docker images, add suffix to the make target docker., for example:\n Build and push images based on alpine, Java 8 and Java 11. make docker.push.alpine docker.push.java8 docker.push.java11   ","excerpt":"Compiling project This document will help you compile and build a project in your maven and set your …","ref":"/docs/skywalking-java/next/en/contribution/compiling/","title":"Compiling project"},{"body":"Compiling project This document will help you compile and build a project in your maven and set your IDE.\nPrepare JDK 8+.\n If you clone codes from https://github.com/apache/skywalking-java  git clone https://github.com/apache/skywalking-java.git cd skywalking-java ./mvnw clean package -Pall  If you download source codes tar from https://skywalking.apache.org/downloads/  ./mvnw clean package The agent binary package is generated in skywalking-agent folder.\nSet Generated Source Codes(grpc-java and java folders in apm-protocol/apm-network/target/generated-sources/protobuf) folders if you are using IntelliJ IDE.\nBuilding Docker images After you have compiled the project and have generated the skywalking-agent folder, you can build Docker images. [make docker] builds the agent Docker images based on alpine image, java8, java11 and java 17 images by default. If you want to only build part of the images, add suffix .alpine or .java\u0026lt;x\u0026gt; to the make target, for example:\n Build Docker images based on alpine, Java 8 and Java 11. make docker.alpine docker.java8 docker.java11   You can also customize the Docker registry and Docker image names by specifying the variable HUB, NAME.\n Set private Docker registry to gcr.io/skywalking and custom name to sw-agent. make docker.alpine HUB=gcr.io/skywalking NAME=sw-agent This will name the Docker image to gcr.io/skywalking/sw-agent:latest-alpine\n  If you want to push the Docker images, add suffix to the make target docker., for example:\n Build and push images based on alpine, Java 8 and Java 11. make docker.push.alpine docker.push.java8 docker.push.java11   ","excerpt":"Compiling project This document will help you compile and build a project in your maven and set your …","ref":"/docs/skywalking-java/v9.0.0/en/contribution/compiling/","title":"Compiling project"},{"body":"Compiling project This document will help you compile and build a project in your maven and set your IDE.\nPrepare JDK 17 or 21.\n If you clone codes from https://github.com/apache/skywalking-java  git clone https://github.com/apache/skywalking-java.git cd skywalking-java ./mvnw clean package -Pall  If you download source codes tar from https://skywalking.apache.org/downloads/  ./mvnw clean package The agent binary package is generated in skywalking-agent folder.\nSet Generated Source Codes(grpc-java and java folders in apm-protocol/apm-network/target/generated-sources/protobuf) folders if you are using IntelliJ IDE.\nBuilding Docker images After you have compiled the project and have generated the skywalking-agent folder, you can build Docker images. [make docker] builds the agent Docker images based on alpine image, java8, java11 and java 17 images by default. If you want to only build part of the images, add suffix .alpine or .java\u0026lt;x\u0026gt; to the make target, for example:\n Build Docker images based on alpine, Java 8 and Java 11. make docker.alpine docker.java8 docker.java11   You can also customize the Docker registry and Docker image names by specifying the variable HUB, NAME.\n Set private Docker registry to gcr.io/skywalking and custom name to sw-agent. make docker.alpine HUB=gcr.io/skywalking NAME=sw-agent This will name the Docker image to gcr.io/skywalking/sw-agent:latest-alpine\n  If you want to push the Docker images, add suffix to the make target docker., for example:\n Build and push images based on alpine, Java 8 and Java 11. make docker.push.alpine docker.push.java8 docker.push.java11   ","excerpt":"Compiling project This document will help you compile and build a project in your maven and set your …","ref":"/docs/skywalking-java/v9.1.0/en/contribution/compiling/","title":"Compiling project"},{"body":"Compiling project This document will help you compile and build a project in your maven and set your IDE.\nPrepare JDK 17 or 21.\n If you clone codes from https://github.com/apache/skywalking-java  git clone https://github.com/apache/skywalking-java.git cd skywalking-java ./mvnw clean package -Pall  If you download source codes tar from https://skywalking.apache.org/downloads/  ./mvnw clean package The agent binary package is generated in skywalking-agent folder.\nSet Generated Source Codes(grpc-java and java folders in apm-protocol/apm-network/target/generated-sources/protobuf) folders if you are using IntelliJ IDE.\nBuilding Docker images After you have compiled the project and have generated the skywalking-agent folder, you can build Docker images. [make docker] builds the agent Docker images based on alpine image, java8, java11 and java 17 images by default. If you want to only build part of the images, add suffix .alpine or .java\u0026lt;x\u0026gt; to the make target, for example:\n Build Docker images based on alpine, Java 8 and Java 11. make docker.alpine docker.java8 docker.java11   You can also customize the Docker registry and Docker image names by specifying the variable HUB, NAME.\n Set private Docker registry to gcr.io/skywalking and custom name to sw-agent. make docker.alpine HUB=gcr.io/skywalking NAME=sw-agent This will name the Docker image to gcr.io/skywalking/sw-agent:latest-alpine\n  If you want to push the Docker images, add suffix to the make target docker., for example:\n Build and push images based on alpine, Java 8 and Java 11. make docker.push.alpine docker.push.java8 docker.push.java11   ","excerpt":"Compiling project This document will help you compile and build a project in your maven and set your …","ref":"/docs/skywalking-java/v9.2.0/en/contribution/compiling/","title":"Compiling project"},{"body":"Compiling project This document will help you compile and build the package file.\nPrepare PHP and Rust environments.\nInstall PHP Environment For Debian user:\nsudo apt install php-cli php-dev For MacOS user:\nbrew install php Install Rust Environment Install Rust 1.65.0+.\nFor Linux user:\ncurl --proto \u0026#39;=https\u0026#39; --tlsv1.2 -sSf https://sh.rustup.rs | sh For MacOS user:\nbrew install rust Install requirement For Debian user:\nsudo apt install gcc make llvm-dev libclang-dev clang protobuf-compiler For MacOS user:\nbrew install protobuf Build and install Skywalking PHP Agent from archive file For Linux user:\nsudo pecl install skywalking_agent-x.y.z.tgz For MacOS user:\n Running the pecl install command with the php installed in brew may encounter the problem of mkdir, please refer to Installing PHP and PECL Extensions on MacOS.\n pecl install skywalking_agent-x.y.z.tgz The extension file skywalking_agent.so is generated in the php extension folder, get it by run php-config --extension-dir.\n","excerpt":"Compiling project This document will help you compile and build the package file.\nPrepare PHP and …","ref":"/docs/skywalking-php/latest/en/contribution/compiling/","title":"Compiling project"},{"body":"Compiling project This document will help you compile and build the package file.\nPrepare PHP and Rust environments.\nInstall PHP Environment For Debian user:\nsudo apt install php-cli php-dev For MacOS user:\nbrew install php Install Rust Environment Install Rust 1.65.0+.\nFor Linux user:\ncurl --proto \u0026#39;=https\u0026#39; --tlsv1.2 -sSf https://sh.rustup.rs | sh For MacOS user:\nbrew install rust Install requirement For Debian user:\nsudo apt install gcc make llvm-dev libclang-dev clang protobuf-compiler For MacOS user:\nbrew install protobuf Build and install Skywalking PHP Agent from archive file For Linux user:\nsudo pecl install skywalking_agent-x.y.z.tgz For MacOS user:\n Running the pecl install command with the php installed in brew may encounter the problem of mkdir, please refer to Installing PHP and PECL Extensions on MacOS.\n pecl install skywalking_agent-x.y.z.tgz The extension file skywalking_agent.so is generated in the php extension folder, get it by run php-config --extension-dir.\n","excerpt":"Compiling project This document will help you compile and build the package file.\nPrepare PHP and …","ref":"/docs/skywalking-php/next/en/contribution/compiling/","title":"Compiling project"},{"body":"Compiling project This document will help you compile and build the package file.\nPrepare PHP and Rust environments.\nInstall PHP Environment For Debian user:\nsudo apt install php-cli php-dev For MacOS user:\nbrew install php Install Rust Environment Install Rust 1.65.0+.\nFor Linux user:\ncurl --proto \u0026#39;=https\u0026#39; --tlsv1.2 -sSf https://sh.rustup.rs | sh For MacOS user:\nbrew install rust Install requirement For Debian user:\nsudo apt install gcc make llvm-dev libclang-dev clang protobuf-compiler For MacOS user:\nbrew install protobuf Build and install Skywalking PHP Agent from archive file For Linux user:\nsudo pecl install skywalking_agent-x.y.z.tgz For MacOS user:\n Running the pecl install command with the php installed in brew may encounter the problem of mkdir, please refer to Installing PHP and PECL Extensions on MacOS.\n pecl install skywalking_agent-x.y.z.tgz The extension file skywalking_agent.so is generated in the php extension folder, get it by run php-config --extension-dir.\n","excerpt":"Compiling project This document will help you compile and build the package file.\nPrepare PHP and …","ref":"/docs/skywalking-php/v0.7.0/en/contribution/compiling/","title":"Compiling project"},{"body":"Component library settings Component library settings are about your own or third-party libraries used in the monitored application.\nIn agent or SDK, regardless of whether the library name is collected as ID or String (literally, e.g. SpringMVC), the collector formats data in ID for better performance and less storage requirements.\nAlso, the collector conjectures the remote service based on the component library. For example: if the component library is MySQL Driver library, then the remote service should be MySQL Server.\nFor these two reasons, the collector requires two parts of settings in this file:\n Component library ID, names and languages. Remote server mapping based on the local library.  All component names and IDs must be defined in this file.\nComponent Library ID Define all names and IDs from component libraries which are used in the monitored application. This uses a two-way mapping strategy. The agent or SDK could use the value (ID) to represent the component name in uplink data.\n Name: the component name used in agent and UI ID: Unique ID. All IDs are reserved once they are released. Languages: Program languages may use this component. Multi languages should be separated by ,.  ID rules  Java and multi languages shared: (0, 3000) .NET Platform reserved: [3000, 4000) Node.js Platform reserved: [4000, 5000) Go reserved: [5000, 6000) Lua reserved: [6000, 7000) Python reserved: [7000, 8000) PHP reserved: [8000, 9000) C++ reserved: [9000, 10000) Javascript reserved: [10000, 11000) Rust reserved: [11000, 12000)  Example:\nTomcat:id:1languages:JavaHttpClient:id:2languages:Java,C#,Node.jsDubbo:id:3languages:JavaH2:id:4languages:JavaRemote server mapping The remote server will be conjectured by the local component. The mappings are based on names in the component library.\n Key: client component library name Value: server component name  Component-Server-Mappings:Jedis:RedisStackExchange.Redis:RedisRedisson:RedisLettuce:RedisZookeeper:ZookeeperSqlClient:SqlServerNpgsql:PostgreSQLMySqlConnector:MysqlEntityFrameworkCore.InMemory:InMemoryDatabase","excerpt":"Component library settings Component library settings are about your own or third-party libraries …","ref":"/docs/main/latest/en/guides/component-library-settings/","title":"Component library settings"},{"body":"Component library settings Component library settings are about your own or third-party libraries used in the monitored application.\nIn agent or SDK, regardless of whether the library name is collected as ID or String (literally, e.g. SpringMVC), the collector formats data in ID for better performance and less storage requirements.\nAlso, the collector conjectures the remote service based on the component library. For example: if the component library is MySQL Driver library, then the remote service should be MySQL Server.\nFor these two reasons, the collector requires two parts of settings in this file:\n Component library ID, names and languages. Remote server mapping based on the local library.  All component names and IDs must be defined in this file.\nComponent Library ID Define all names and IDs from component libraries which are used in the monitored application. This uses a two-way mapping strategy. The agent or SDK could use the value (ID) to represent the component name in uplink data.\n Name: the component name used in agent and UI ID: Unique ID. All IDs are reserved once they are released. Languages: Program languages may use this component. Multi languages should be separated by ,.  ID rules  Java and multi languages shared: (0, 3000) .NET Platform reserved: [3000, 4000) Node.js Platform reserved: [4000, 5000) Go reserved: [5000, 6000) Lua reserved: [6000, 7000) Python reserved: [7000, 8000) PHP reserved: [8000, 9000) C++ reserved: [9000, 10000) Javascript reserved: [10000, 11000) Rust reserved: [11000, 12000)  Example:\nTomcat:id:1languages:JavaHttpClient:id:2languages:Java,C#,Node.jsDubbo:id:3languages:JavaH2:id:4languages:JavaRemote server mapping The remote server will be conjectured by the local component. The mappings are based on names in the component library.\n Key: client component library name Value: server component name  Component-Server-Mappings:Jedis:RedisStackExchange.Redis:RedisRedisson:RedisLettuce:RedisZookeeper:ZookeeperSqlClient:SqlServerNpgsql:PostgreSQLMySqlConnector:MysqlEntityFrameworkCore.InMemory:InMemoryDatabase","excerpt":"Component library settings Component library settings are about your own or third-party libraries …","ref":"/docs/main/next/en/guides/component-library-settings/","title":"Component library settings"},{"body":"Component library settings Component library settings are about your own or third-party libraries used in the monitored application.\nIn agent or SDK, regardless of whether the library name is collected as ID or String (literally, e.g. SpringMVC), the collector formats data in ID for better performance and less storage requirements.\nAlso, the collector conjectures the remote service based on the component library. For example: if the component library is MySQL Driver library, then the remote service should be MySQL Server.\nFor these two reasons, the collector requires two parts of settings in this file:\n Component library ID, names and languages. Remote server mapping based on the local library.  All component names and IDs must be defined in this file.\nComponent Library ID Define all names and IDs from component libraries which are used in the monitored application. This uses a two-way mapping strategy. The agent or SDK could use the value (ID) to represent the component name in uplink data.\n Name: the component name used in agent and UI ID: Unique ID. All IDs are reserved once they are released. Languages: Program languages may use this component. Multi languages should be separated by ,.  ID rules  Java and multi languages shared: (0, 3000) .NET Platform reserved: [3000, 4000) Node.js Platform reserved: [4000, 5000) Go reserved: [5000, 6000) Lua reserved: [6000, 7000) Python reserved: [7000, 8000) PHP reserved: [8000, 9000) C++ reserved: [9000, 10000)  Example:\nTomcat:id:1languages:JavaHttpClient:id:2languages:Java,C#,Node.jsDubbo:id:3languages:JavaH2:id:4languages:JavaRemote server mapping The remote server will be conjectured by the local component. The mappings are based on names in the component library.\n Key: client component library name Value: server component name  Component-Server-Mappings:Jedis:RedisStackExchange.Redis:RedisRedisson:RedisLettuce:RedisZookeeper:ZookeeperSqlClient:SqlServerNpgsql:PostgreSQLMySqlConnector:MysqlEntityFrameworkCore.InMemory:InMemoryDatabase","excerpt":"Component library settings Component library settings are about your own or third-party libraries …","ref":"/docs/main/v9.0.0/en/guides/component-library-settings/","title":"Component library settings"},{"body":"Component library settings Component library settings are about your own or third-party libraries used in the monitored application.\nIn agent or SDK, regardless of whether the library name is collected as ID or String (literally, e.g. SpringMVC), the collector formats data in ID for better performance and less storage requirements.\nAlso, the collector conjectures the remote service based on the component library. For example: if the component library is MySQL Driver library, then the remote service should be MySQL Server.\nFor these two reasons, the collector requires two parts of settings in this file:\n Component library ID, names and languages. Remote server mapping based on the local library.  All component names and IDs must be defined in this file.\nComponent Library ID Define all names and IDs from component libraries which are used in the monitored application. This uses a two-way mapping strategy. The agent or SDK could use the value (ID) to represent the component name in uplink data.\n Name: the component name used in agent and UI ID: Unique ID. All IDs are reserved once they are released. Languages: Program languages may use this component. Multi languages should be separated by ,.  ID rules  Java and multi languages shared: (0, 3000) .NET Platform reserved: [3000, 4000) Node.js Platform reserved: [4000, 5000) Go reserved: [5000, 6000) Lua reserved: [6000, 7000) Python reserved: [7000, 8000) PHP reserved: [8000, 9000) C++ reserved: [9000, 10000)  Example:\nTomcat:id:1languages:JavaHttpClient:id:2languages:Java,C#,Node.jsDubbo:id:3languages:JavaH2:id:4languages:JavaRemote server mapping The remote server will be conjectured by the local component. The mappings are based on names in the component library.\n Key: client component library name Value: server component name  Component-Server-Mappings:Jedis:RedisStackExchange.Redis:RedisRedisson:RedisLettuce:RedisZookeeper:ZookeeperSqlClient:SqlServerNpgsql:PostgreSQLMySqlConnector:MysqlEntityFrameworkCore.InMemory:InMemoryDatabase","excerpt":"Component library settings Component library settings are about your own or third-party libraries …","ref":"/docs/main/v9.1.0/en/guides/component-library-settings/","title":"Component library settings"},{"body":"Component library settings Component library settings are about your own or third-party libraries used in the monitored application.\nIn agent or SDK, regardless of whether the library name is collected as ID or String (literally, e.g. SpringMVC), the collector formats data in ID for better performance and less storage requirements.\nAlso, the collector conjectures the remote service based on the component library. For example: if the component library is MySQL Driver library, then the remote service should be MySQL Server.\nFor these two reasons, the collector requires two parts of settings in this file:\n Component library ID, names and languages. Remote server mapping based on the local library.  All component names and IDs must be defined in this file.\nComponent Library ID Define all names and IDs from component libraries which are used in the monitored application. This uses a two-way mapping strategy. The agent or SDK could use the value (ID) to represent the component name in uplink data.\n Name: the component name used in agent and UI ID: Unique ID. All IDs are reserved once they are released. Languages: Program languages may use this component. Multi languages should be separated by ,.  ID rules  Java and multi languages shared: (0, 3000) .NET Platform reserved: [3000, 4000) Node.js Platform reserved: [4000, 5000) Go reserved: [5000, 6000) Lua reserved: [6000, 7000) Python reserved: [7000, 8000) PHP reserved: [8000, 9000) C++ reserved: [9000, 10000)  Example:\nTomcat:id:1languages:JavaHttpClient:id:2languages:Java,C#,Node.jsDubbo:id:3languages:JavaH2:id:4languages:JavaRemote server mapping The remote server will be conjectured by the local component. The mappings are based on names in the component library.\n Key: client component library name Value: server component name  Component-Server-Mappings:Jedis:RedisStackExchange.Redis:RedisRedisson:RedisLettuce:RedisZookeeper:ZookeeperSqlClient:SqlServerNpgsql:PostgreSQLMySqlConnector:MysqlEntityFrameworkCore.InMemory:InMemoryDatabase","excerpt":"Component library settings Component library settings are about your own or third-party libraries …","ref":"/docs/main/v9.2.0/en/guides/component-library-settings/","title":"Component library settings"},{"body":"Component library settings Component library settings are about your own or third-party libraries used in the monitored application.\nIn agent or SDK, regardless of whether the library name is collected as ID or String (literally, e.g. SpringMVC), the collector formats data in ID for better performance and less storage requirements.\nAlso, the collector conjectures the remote service based on the component library. For example: if the component library is MySQL Driver library, then the remote service should be MySQL Server.\nFor these two reasons, the collector requires two parts of settings in this file:\n Component library ID, names and languages. Remote server mapping based on the local library.  All component names and IDs must be defined in this file.\nComponent Library ID Define all names and IDs from component libraries which are used in the monitored application. This uses a two-way mapping strategy. The agent or SDK could use the value (ID) to represent the component name in uplink data.\n Name: the component name used in agent and UI ID: Unique ID. All IDs are reserved once they are released. Languages: Program languages may use this component. Multi languages should be separated by ,.  ID rules  Java and multi languages shared: (0, 3000) .NET Platform reserved: [3000, 4000) Node.js Platform reserved: [4000, 5000) Go reserved: [5000, 6000) Lua reserved: [6000, 7000) Python reserved: [7000, 8000) PHP reserved: [8000, 9000) C++ reserved: [9000, 10000) Javascript reserved: [10000, 11000) Rust reserved: [11000, 12000)  Example:\nTomcat:id:1languages:JavaHttpClient:id:2languages:Java,C#,Node.jsDubbo:id:3languages:JavaH2:id:4languages:JavaRemote server mapping The remote server will be conjectured by the local component. The mappings are based on names in the component library.\n Key: client component library name Value: server component name  Component-Server-Mappings:Jedis:RedisStackExchange.Redis:RedisRedisson:RedisLettuce:RedisZookeeper:ZookeeperSqlClient:SqlServerNpgsql:PostgreSQLMySqlConnector:MysqlEntityFrameworkCore.InMemory:InMemoryDatabase","excerpt":"Component library settings Component library settings are about your own or third-party libraries …","ref":"/docs/main/v9.3.0/en/guides/component-library-settings/","title":"Component library settings"},{"body":"Component library settings Component library settings are about your own or third-party libraries used in the monitored application.\nIn agent or SDK, regardless of whether the library name is collected as ID or String (literally, e.g. SpringMVC), the collector formats data in ID for better performance and less storage requirements.\nAlso, the collector conjectures the remote service based on the component library. For example: if the component library is MySQL Driver library, then the remote service should be MySQL Server.\nFor these two reasons, the collector requires two parts of settings in this file:\n Component library ID, names and languages. Remote server mapping based on the local library.  All component names and IDs must be defined in this file.\nComponent Library ID Define all names and IDs from component libraries which are used in the monitored application. This uses a two-way mapping strategy. The agent or SDK could use the value (ID) to represent the component name in uplink data.\n Name: the component name used in agent and UI ID: Unique ID. All IDs are reserved once they are released. Languages: Program languages may use this component. Multi languages should be separated by ,.  ID rules  Java and multi languages shared: (0, 3000) .NET Platform reserved: [3000, 4000) Node.js Platform reserved: [4000, 5000) Go reserved: [5000, 6000) Lua reserved: [6000, 7000) Python reserved: [7000, 8000) PHP reserved: [8000, 9000) C++ reserved: [9000, 10000) Javascript reserved: [10000, 11000) Rust reserved: [11000, 12000)  Example:\nTomcat:id:1languages:JavaHttpClient:id:2languages:Java,C#,Node.jsDubbo:id:3languages:JavaH2:id:4languages:JavaRemote server mapping The remote server will be conjectured by the local component. The mappings are based on names in the component library.\n Key: client component library name Value: server component name  Component-Server-Mappings:Jedis:RedisStackExchange.Redis:RedisRedisson:RedisLettuce:RedisZookeeper:ZookeeperSqlClient:SqlServerNpgsql:PostgreSQLMySqlConnector:MysqlEntityFrameworkCore.InMemory:InMemoryDatabase","excerpt":"Component library settings Component library settings are about your own or third-party libraries …","ref":"/docs/main/v9.4.0/en/guides/component-library-settings/","title":"Component library settings"},{"body":"Component library settings Component library settings are about your own or third-party libraries used in the monitored application.\nIn agent or SDK, regardless of whether the library name is collected as ID or String (literally, e.g. SpringMVC), the collector formats data in ID for better performance and less storage requirements.\nAlso, the collector conjectures the remote service based on the component library. For example: if the component library is MySQL Driver library, then the remote service should be MySQL Server.\nFor these two reasons, the collector requires two parts of settings in this file:\n Component library ID, names and languages. Remote server mapping based on the local library.  All component names and IDs must be defined in this file.\nComponent Library ID Define all names and IDs from component libraries which are used in the monitored application. This uses a two-way mapping strategy. The agent or SDK could use the value (ID) to represent the component name in uplink data.\n Name: the component name used in agent and UI ID: Unique ID. All IDs are reserved once they are released. Languages: Program languages may use this component. Multi languages should be separated by ,.  ID rules  Java and multi languages shared: (0, 3000) .NET Platform reserved: [3000, 4000) Node.js Platform reserved: [4000, 5000) Go reserved: [5000, 6000) Lua reserved: [6000, 7000) Python reserved: [7000, 8000) PHP reserved: [8000, 9000) C++ reserved: [9000, 10000) Javascript reserved: [10000, 11000) Rust reserved: [11000, 12000)  Example:\nTomcat:id:1languages:JavaHttpClient:id:2languages:Java,C#,Node.jsDubbo:id:3languages:JavaH2:id:4languages:JavaRemote server mapping The remote server will be conjectured by the local component. The mappings are based on names in the component library.\n Key: client component library name Value: server component name  Component-Server-Mappings:Jedis:RedisStackExchange.Redis:RedisRedisson:RedisLettuce:RedisZookeeper:ZookeeperSqlClient:SqlServerNpgsql:PostgreSQLMySqlConnector:MysqlEntityFrameworkCore.InMemory:InMemoryDatabase","excerpt":"Component library settings Component library settings are about your own or third-party libraries …","ref":"/docs/main/v9.5.0/en/guides/component-library-settings/","title":"Component library settings"},{"body":"Component library settings Component library settings are about your own or third-party libraries used in the monitored application.\nIn agent or SDK, regardless of whether the library name is collected as ID or String (literally, e.g. SpringMVC), the collector formats data in ID for better performance and less storage requirements.\nAlso, the collector conjectures the remote service based on the component library. For example: if the component library is MySQL Driver library, then the remote service should be MySQL Server.\nFor these two reasons, the collector requires two parts of settings in this file:\n Component library ID, names and languages. Remote server mapping based on the local library.  All component names and IDs must be defined in this file.\nComponent Library ID Define all names and IDs from component libraries which are used in the monitored application. This uses a two-way mapping strategy. The agent or SDK could use the value (ID) to represent the component name in uplink data.\n Name: the component name used in agent and UI ID: Unique ID. All IDs are reserved once they are released. Languages: Program languages may use this component. Multi languages should be separated by ,.  ID rules  Java and multi languages shared: (0, 3000) .NET Platform reserved: [3000, 4000) Node.js Platform reserved: [4000, 5000) Go reserved: [5000, 6000) Lua reserved: [6000, 7000) Python reserved: [7000, 8000) PHP reserved: [8000, 9000) C++ reserved: [9000, 10000) Javascript reserved: [10000, 11000) Rust reserved: [11000, 12000)  Example:\nTomcat:id:1languages:JavaHttpClient:id:2languages:Java,C#,Node.jsDubbo:id:3languages:JavaH2:id:4languages:JavaRemote server mapping The remote server will be conjectured by the local component. The mappings are based on names in the component library.\n Key: client component library name Value: server component name  Component-Server-Mappings:Jedis:RedisStackExchange.Redis:RedisRedisson:RedisLettuce:RedisZookeeper:ZookeeperSqlClient:SqlServerNpgsql:PostgreSQLMySqlConnector:MysqlEntityFrameworkCore.InMemory:InMemoryDatabase","excerpt":"Component library settings Component library settings are about your own or third-party libraries …","ref":"/docs/main/v9.6.0/en/guides/component-library-settings/","title":"Component library settings"},{"body":"Component library settings Component library settings are about your own or third-party libraries used in the monitored application.\nIn agent or SDK, regardless of whether the library name is collected as ID or String (literally, e.g. SpringMVC), the collector formats data in ID for better performance and less storage requirements.\nAlso, the collector conjectures the remote service based on the component library. For example: if the component library is MySQL Driver library, then the remote service should be MySQL Server.\nFor these two reasons, the collector requires two parts of settings in this file:\n Component library ID, names and languages. Remote server mapping based on the local library.  All component names and IDs must be defined in this file.\nComponent Library ID Define all names and IDs from component libraries which are used in the monitored application. This uses a two-way mapping strategy. The agent or SDK could use the value (ID) to represent the component name in uplink data.\n Name: the component name used in agent and UI ID: Unique ID. All IDs are reserved once they are released. Languages: Program languages may use this component. Multi languages should be separated by ,.  ID rules  Java and multi languages shared: (0, 3000) .NET Platform reserved: [3000, 4000) Node.js Platform reserved: [4000, 5000) Go reserved: [5000, 6000) Lua reserved: [6000, 7000) Python reserved: [7000, 8000) PHP reserved: [8000, 9000) C++ reserved: [9000, 10000) Javascript reserved: [10000, 11000) Rust reserved: [11000, 12000)  Example:\nTomcat:id:1languages:JavaHttpClient:id:2languages:Java,C#,Node.jsDubbo:id:3languages:JavaH2:id:4languages:JavaRemote server mapping The remote server will be conjectured by the local component. The mappings are based on names in the component library.\n Key: client component library name Value: server component name  Component-Server-Mappings:Jedis:RedisStackExchange.Redis:RedisRedisson:RedisLettuce:RedisZookeeper:ZookeeperSqlClient:SqlServerNpgsql:PostgreSQLMySqlConnector:MysqlEntityFrameworkCore.InMemory:InMemoryDatabase","excerpt":"Component library settings Component library settings are about your own or third-party libraries …","ref":"/docs/main/v9.7.0/en/guides/component-library-settings/","title":"Component library settings"},{"body":"Concepts and Designs Concepts and Designs help you to learn and understand the SkyWalking Infra E2E and the landscape.\n What is SkyWalking Infra E2E?  Project Goals. Provides the goals, which SkyWalking Infra E2E is trying to focus on and provides features about them.    After you read the above documents, you should understand the basic goals of the SkyWalking Infra E2E. Now, you can choose which following parts you are interested, then dive in.\n Module Design  ","excerpt":"Concepts and Designs Concepts and Designs help you to learn and understand the SkyWalking Infra E2E …","ref":"/docs/skywalking-infra-e2e/latest/en/concepts-and-designs/readme/","title":"Concepts and Designs"},{"body":"Concepts and Designs Concepts and Designs help you to learn and understand the SkyWalking Infra E2E and the landscape.\n What is SkyWalking Infra E2E?  Project Goals. Provides the goals, which SkyWalking Infra E2E is trying to focus on and provides features about them.    After you read the above documents, you should understand the basic goals of the SkyWalking Infra E2E. Now, you can choose which following parts you are interested, then dive in.\n Module Design  ","excerpt":"Concepts and Designs Concepts and Designs help you to learn and understand the SkyWalking Infra E2E …","ref":"/docs/skywalking-infra-e2e/next/en/concepts-and-designs/readme/","title":"Concepts and Designs"},{"body":"Concepts and Designs Concepts and Designs help you to learn and understand the SkyWalking Infra E2E and the landscape.\n What is SkyWalking Infra E2E?  Project Goals. Provides the goals, which SkyWalking Infra E2E is trying to focus on and provides features about them.    After you read the above documents, you should understand the basic goals of the SkyWalking Infra E2E. Now, you can choose which following parts you are interested, then dive in.\n Module Design  ","excerpt":"Concepts and Designs Concepts and Designs help you to learn and understand the SkyWalking Infra E2E …","ref":"/docs/skywalking-infra-e2e/v1.3.0/en/concepts-and-designs/readme/","title":"Concepts and Designs"},{"body":"Concepts and Designs Concepts and Designs help you to learn and understand the SkyWalking Satellite and the landscape.\n What is SkyWalking Satellite?  Overview and Core concepts. Provides a high-level description and introduction, including the problems the project solves. Project Goals. Provides the goals, which SkyWalking Satellite is trying to focus and provide features about them.    After you read the above documents, you should understand basic goals of the SkyWalking Satellite. Now, you can choose which following parts you are interested, then dive in.\n Module Design Plugin Mechanism Project Structure Memory mapped Queue  ","excerpt":"Concepts and Designs Concepts and Designs help you to learn and understand the SkyWalking Satellite …","ref":"/docs/skywalking-satellite/latest/en/concepts-and-designs/readme/","title":"Concepts and Designs"},{"body":"Concepts and Designs Concepts and Designs help you to learn and understand the SkyWalking Satellite and the landscape.\n What is SkyWalking Satellite?  Overview and Core concepts. Provides a high-level description and introduction, including the problems the project solves. Project Goals. Provides the goals, which SkyWalking Satellite is trying to focus and provide features about them.    After you read the above documents, you should understand basic goals of the SkyWalking Satellite. Now, you can choose which following parts you are interested, then dive in.\n Module Design Plugin Mechanism Project Structure Memory mapped Queue  ","excerpt":"Concepts and Designs Concepts and Designs help you to learn and understand the SkyWalking Satellite …","ref":"/docs/skywalking-satellite/next/en/concepts-and-designs/readme/","title":"Concepts and Designs"},{"body":"Concepts and Designs Concepts and Designs help you to learn and understand the SkyWalking Satellite and the landscape.\n What is SkyWalking Satellite?  Overview and Core concepts. Provides a high-level description and introduction, including the problems the project solves. Project Goals. Provides the goals, which SkyWalking Satellite is trying to focus and provide features about them.    After you read the above documents, you should understand basic goals of the SkyWalking Satellite. Now, you can choose which following parts you are interested, then dive in.\n Module Design Plugin Mechanism Project Structure Memory mapped Queue  ","excerpt":"Concepts and Designs Concepts and Designs help you to learn and understand the SkyWalking Satellite …","ref":"/docs/skywalking-satellite/v1.2.0/en/concepts-and-designs/readme/","title":"Concepts and Designs"},{"body":"Configuration Vocabulary The Configuration Vocabulary lists all available configurations provided by application.yml.\n   Module Provider Settings Value(s) and Explanation System Environment Variable¹ Default     core default role Option values: Mixed/Receiver/Aggregator. Receiver mode OAP opens the service to the agents, then analyzes and aggregates the results, and forwards the results for distributed aggregation. Aggregator mode OAP receives data from Mixer and Receiver role OAP nodes, and performs 2nd level aggregation. Mixer means both Receiver and Aggregator. SW_CORE_ROLE Mixed   - - restHost Binding IP of RESTful services. Services include GraphQL query and HTTP data report. SW_CORE_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_CORE_REST_PORT 12800   - - restContextPath Web context path of RESTful services. SW_CORE_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_CORE_REST_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_CORE_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize ServerSocketChannel Backlog of RESTful services. SW_CORE_REST_QUEUE_SIZE 0   - - httpMaxRequestHeaderSize Maximum request header size accepted. SW_CORE_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - gRPCHost Binding IP of gRPC services, including gRPC data report and internal communication among OAP nodes. SW_CORE_GRPC_HOST 0.0.0.0   - - gRPCPort Binding port of gRPC services. SW_CORE_GRPC_PORT 11800   - - gRPCSslEnabled Activates SSL for gRPC services. SW_CORE_GRPC_SSL_ENABLED false   - - gRPCSslKeyPath File path of gRPC SSL key. SW_CORE_GRPC_SSL_KEY_PATH -   - - gRPCSslCertChainPath File path of gRPC SSL cert chain. SW_CORE_GRPC_SSL_CERT_CHAIN_PATH -   - - gRPCSslTrustedCAPath File path of gRPC trusted CA. SW_CORE_GRPC_SSL_TRUSTED_CA_PATH -   - - downsampling Activated level of down sampling aggregation.  Hour,Day   - - enableDataKeeperExecutor Controller of TTL scheduler. Once disabled, TTL wouldn\u0026rsquo;t work. SW_CORE_ENABLE_DATA_KEEPER_EXECUTOR true   - - dataKeeperExecutePeriod Execution period of TTL scheduler (in minutes). Execution doesn\u0026rsquo;t mean deleting data. The storage provider (e.g. ElasticSearch storage) could override this. SW_CORE_DATA_KEEPER_EXECUTE_PERIOD 5   - - recordDataTTL The lifecycle of record data (in days). Record data includes traces, top N sample records, and logs. Minimum value is 2. SW_CORE_RECORD_DATA_TTL 3   - - metricsDataTTL The lifecycle of metrics data (in days), including metadata. We recommend setting metricsDataTTL \u0026gt;= recordDataTTL. Minimum value is 2. SW_CORE_METRICS_DATA_TTL 7   - - l1FlushPeriod The period of L1 aggregation flush to L2 aggregation (in milliseconds). SW_CORE_L1_AGGREGATION_FLUSH_PERIOD 500   - - storageSessionTimeout The threshold of session time (in milliseconds). Default value is 70000. SW_CORE_STORAGE_SESSION_TIMEOUT 70000   - - persistentPeriod The period of doing data persistence. Unit is second.Default value is 25s SW_CORE_PERSISTENT_PERIOD 25   - - topNReportPeriod The execution period (in minutes) of top N sampler, which saves sampled data into the storage. SW_CORE_TOPN_REPORT_PERIOD 10   - - activeExtraModelColumns Appends entity names (e.g. service names) into metrics storage entities. SW_CORE_ACTIVE_EXTRA_MODEL_COLUMNS false   - - serviceNameMaxLength Maximum length limit of service names. SW_SERVICE_NAME_MAX_LENGTH 70   - - instanceNameMaxLength Maximum length limit of service instance names. The maximum length of service + instance names should be less than 200. SW_INSTANCE_NAME_MAX_LENGTH 70   - - endpointNameMaxLength Maximum length limit of endpoint names. The maximum length of service + endpoint names should be less than 240. SW_ENDPOINT_NAME_MAX_LENGTH 150   - - searchableTracesTags Defines a set of span tag keys which are searchable through GraphQL. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_SEARCHABLE_TAG_KEYS http.method,http.status_code,rpc.status_code,db.type,db.instance,mq.queue,mq.topic,mq.broker   - - searchableLogsTags Defines a set of log tag keys which are searchable through GraphQL. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_SEARCHABLE_LOGS_TAG_KEYS level   - - searchableAlarmTags Defines a set of alarm tag keys which are searchable through GraphQL. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_SEARCHABLE_ALARM_TAG_KEYS level   - - autocompleteTagKeysQueryMaxSize The max size of tags keys for autocomplete select. SW_AUTOCOMPLETE_TAG_KEYS_QUERY_MAX_SIZE 100   - - autocompleteTagValuesQueryMaxSize The max size of tags values for autocomplete select. SW_AUTOCOMPLETE_TAG_VALUES_QUERY_MAX_SIZE 100   - - gRPCThreadPoolSize Pool size of gRPC server. SW_CORE_GRPC_THREAD_POOL_SIZE CPU core * 4   - - gRPCThreadPoolQueueSize Queue size of gRPC server. SW_CORE_GRPC_POOL_QUEUE_SIZE 10000   - - maxConcurrentCallsPerConnection The maximum number of concurrent calls permitted for each incoming connection. Defaults to no limit. SW_CORE_GRPC_MAX_CONCURRENT_CALL -   - - maxMessageSize Sets the maximum message size allowed to be received on the server. Empty means 4 MiB. SW_CORE_GRPC_MAX_MESSAGE_SIZE 4M(based on Netty)   - - remoteTimeout Timeout for cluster internal communication (in seconds). - 20   - - maxSizeOfNetworkAddressAlias The maximum size of network address detected in the system being monitored. - 1_000_000   - - maxPageSizeOfQueryProfileSnapshot The maximum size for snapshot analysis in an OAP query. - 500   - - maxSizeOfAnalyzeProfileSnapshot The maximum number of snapshots analyzed by the OAP. - 12000   - - prepareThreads The number of threads used to prepare metrics data to the storage. SW_CORE_PREPARE_THREADS 2   - - enableEndpointNameGroupingByOpenapi Automatically groups endpoints by the given OpenAPI definitions. SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI true   - - maxDurationOfQueryEBPFProfilingData The maximum duration(in second) of query the eBPF profiling data from database. - 30   - - maxThreadCountOfQueryEBPFProfilingData The maximum thread count of query the eBPF profiling data from database. - System CPU core size   - - uiMenuRefreshInterval The period(in seconds) of refreshing the status of all UI menu items. - 20   - - serviceCacheRefreshInterval The period(in seconds) of refreshing the service cache. SW_SERVICE_CACHE_REFRESH_INTERVAL 10   cluster standalone - Standalone is not suitable for running on a single node running. No configuration available. - -   - zookeeper namespace The namespace, represented by root path, isolates the configurations in Zookeeper. SW_NAMESPACE /, root path   - - hostPort Hosts and ports of Zookeeper Cluster. SW_CLUSTER_ZK_HOST_PORT localhost:2181   - - baseSleepTimeMs The period of Zookeeper client between two retries (in milliseconds). SW_CLUSTER_ZK_SLEEP_TIME 1000   - - maxRetries The maximum retry time. SW_CLUSTER_ZK_MAX_RETRIES 3   - - enableACL Opens ACL using schema and expression. SW_ZK_ENABLE_ACL false   - - schema Schema for the authorization. SW_ZK_SCHEMA digest   - - expression Expression for the authorization. SW_ZK_EXPRESSION skywalking:skywalking   - - internalComHost The hostname registered in Zookeeper for the internal communication of OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Zookeeper for the internal communication of OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - kubernetes namespace Namespace deployed by SkyWalking in k8s. SW_CLUSTER_K8S_NAMESPACE default   - - labelSelector Labels used for filtering OAP deployment in k8s. SW_CLUSTER_K8S_LABEL app=collector,release=skywalking   - - uidEnvName Environment variable name for reading uid. SW_CLUSTER_K8S_UID SKYWALKING_COLLECTOR_UID   - consul serviceName Service name for SkyWalking cluster. SW_SERVICE_NAME SkyWalking_OAP_Cluster   - - hostPort Hosts and ports for Consul cluster. SW_CLUSTER_CONSUL_HOST_PORT localhost:8500   - - aclToken ACL Token of Consul. Empty string means without ALC token. SW_CLUSTER_CONSUL_ACLTOKEN -   - - internalComHost The hostname registered in Consul for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Consul for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - etcd serviceName Service name for SkyWalking cluster. SW_CLUSTER_ETCD_SERVICE_NAME SkyWalking_OAP_Cluster   - - endpoints Hosts and ports for etcd cluster. SW_CLUSTER_ETCD_ENDPOINTS localhost:2379   - - namespace Namespace for SkyWalking cluster. SW_CLUSTER_ETCD_NAMESPACE /skywalking   - - authentication Indicates whether there is authentication. SW_CLUSTER_ETCD_AUTHENTICATION false   - - user Etcd auth username. SW_CLUSTER_ETCD_USER    - - password Etcd auth password. SW_CLUSTER_ETCD_PASSWORD    - - internalComHost The hostname registered in etcd for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in etcd for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - Nacos serviceName Service name for SkyWalking cluster. SW_SERVICE_NAME SkyWalking_OAP_Cluster   - - hostPort Hosts and ports for Nacos cluster. SW_CLUSTER_NACOS_HOST_PORT localhost:8848   - - namespace Namespace used by SkyWalking node coordination. SW_CLUSTER_NACOS_NAMESPACE public   - - internalComHost The hostname registered in Nacos for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Nacos for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - - username Nacos Auth username. SW_CLUSTER_NACOS_USERNAME -   - - password Nacos Auth password. SW_CLUSTER_NACOS_PASSWORD -   - - accessKey Nacos Auth accessKey. SW_CLUSTER_NACOS_ACCESSKEY -   - - secretKey Nacos Auth secretKey. SW_CLUSTER_NACOS_SECRETKEY -   - - syncPeriodHttpUriRecognitionPattern The period of HTTP URI recognition pattern synchronization (in seconds). SW_CORE_SYNC_PERIOD_HTTP_URI_RECOGNITION_PATTERN 10   - - trainingPeriodHttpUriRecognitionPattern The period of HTTP URI recognition pattern training (in seconds). SW_CORE_TRAINING_PERIOD_HTTP_URI_RECOGNITION_PATTERN 60   - - maxHttpUrisNumberPerService The maximum number of HTTP URIs per service. SW_MAX_HTTP_URIS_NUMBER_PER_SERVICE 3000   storage elasticsearch - ElasticSearch (and OpenSearch) storage implementation. - -   - - namespace Prefix of indexes created and used by SkyWalking. SW_NAMESPACE -   - - clusterNodes ElasticSearch cluster nodes for client connection. SW_STORAGE_ES_CLUSTER_NODES localhost   - - protocol HTTP or HTTPs. SW_STORAGE_ES_HTTP_PROTOCOL HTTP   - - connectTimeout Connect timeout of ElasticSearch client (in milliseconds). SW_STORAGE_ES_CONNECT_TIMEOUT 3000   - - socketTimeout Socket timeout of ElasticSearch client (in milliseconds). SW_STORAGE_ES_SOCKET_TIMEOUT 30000   - - responseTimeout Response timeout of ElasticSearch client (in milliseconds), 0 disables the timeout. SW_STORAGE_ES_RESPONSE_TIMEOUT 1500   - - numHttpClientThread The number of threads for the underlying HTTP client to perform socket I/O. If the value is \u0026lt;= 0, the number of available processors will be used. SW_STORAGE_ES_NUM_HTTP_CLIENT_THREAD 0   - - user Username of ElasticSearch cluster. SW_ES_USER -   - - password Password of ElasticSearch cluster. SW_ES_PASSWORD -   - - trustStorePath Trust JKS file path. Only works when username and password are enabled. SW_STORAGE_ES_SSL_JKS_PATH -   - - trustStorePass Trust JKS file password. Only works when username and password are enabled. SW_STORAGE_ES_SSL_JKS_PASS -   - - secretsManagementFile Secrets management file in the properties format, including username and password, which are managed by a 3rd party tool. Capable of being updated them at runtime. SW_ES_SECRETS_MANAGEMENT_FILE -   - - dayStep Represents the number of days in the one-minute/hour/day index. SW_STORAGE_DAY_STEP 1   - - indexShardsNumber Shard number of new indexes. SW_STORAGE_ES_INDEX_SHARDS_NUMBER 1   - - indexReplicasNumber Replicas number of new indexes. SW_STORAGE_ES_INDEX_REPLICAS_NUMBER 0   - - specificIndexSettings Specify the settings for each index individually. If configured, this setting has the highest priority and overrides the generic settings. SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS -   - - superDatasetDayStep Represents the number of days in the super size dataset record index. Default value is the same as dayStep when the value is less than 0. SW_STORAGE_ES_SUPER_DATASET_DAY_STEP -1   - - superDatasetIndexShardsFactor Super dataset is defined in the code (e.g. trace segments). This factor provides more shards for the super dataset: shards number = indexShardsNumber * superDatasetIndexShardsFactor. This factor also affects Zipkin and Jaeger traces. SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR 5   - - superDatasetIndexReplicasNumber Represents the replicas number in the super size dataset record index. SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER 0   - - indexTemplateOrder The order of index template. SW_STORAGE_ES_INDEX_TEMPLATE_ORDER 0   - - bulkActions Async bulk size of the record data batch execution. SW_STORAGE_ES_BULK_ACTIONS 5000   - - batchOfBytes A threshold to control the max body size of ElasticSearch Bulk flush. SW_STORAGE_ES_BATCH_OF_BYTES 10485760 (10m)   - - flushInterval Period of flush (in seconds). Does not matter whether bulkActions is reached or not. SW_STORAGE_ES_FLUSH_INTERVAL 5   - - concurrentRequests The number of concurrent requests allowed to be executed. SW_STORAGE_ES_CONCURRENT_REQUESTS 2   - - resultWindowMaxSize The maximum size of dataset when the OAP loads cache, such as network aliases. SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE 10000   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_ES_QUERY_MAX_SIZE 10000   - - scrollingBatchSize The batch size of metadata per iteration when metadataQueryMaxSize or resultWindowMaxSize is too large to be retrieved in a single query. SW_STORAGE_ES_SCROLLING_BATCH_SIZE 5000   - - segmentQueryMaxSize The maximum size of trace segments per query. SW_STORAGE_ES_QUERY_SEGMENT_SIZE 200   - - profileTaskQueryMaxSize The maximum size of profile task per query. SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE 200   - - profileDataQueryScrollBatchSize The batch size of query profiling data. SW_STORAGE_ES_QUERY_PROFILE_DATA_BATCH_SIZE 100   - - advanced All settings of ElasticSearch index creation. The value should be in JSON format. SW_STORAGE_ES_ADVANCED -   - - logicSharding Shard metrics and records indices into multi-physical indices, one index template per metric/meter aggregation function or record. SW_STORAGE_ES_LOGIC_SHARDING false   - h2 - H2 storage is designed for demonstration and running in short term (i.e. 1-2 hours) only. - -   - - url H2 connection URL. Defaults to H2 memory mode. SW_STORAGE_H2_URL jdbc:h2:mem:skywalking-oap-db   - - user Username of H2 database. SW_STORAGE_H2_USER sa   - - password Password of H2 database. - -   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_H2_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 100   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 1   - mysql - MySQL Storage. The MySQL JDBC Driver is not in the dist. Please copy it into the oap-lib folder manually. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - postgresql - PostgreSQL storage. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - banyandb - BanyanDB storage. - -   - - targets Hosts with ports of the BanyanDB. SW_STORAGE_BANYANDB_TARGETS 127.0.0.1:17912   - - maxBulkSize The maximum size of write entities in a single batch write call. SW_STORAGE_BANYANDB_MAX_BULK_SIZE 5000   - - flushInterval Period of flush interval. In the timeunit of seconds. SW_STORAGE_BANYANDB_FLUSH_INTERVAL 15   - - metricsShardsNumber Shards Number for measure/metrics. SW_STORAGE_BANYANDB_METRICS_SHARDS_NUMBER 1   - - recordShardsNumber Shards Number for a normal record. SW_STORAGE_BANYANDB_RECORD_SHARDS_NUMBER 1   - - superDatasetShardsFactor Shards Factor for a super dataset record, i.e. Shard number of a super dataset is recordShardsNumber*superDatasetShardsFactor. SW_STORAGE_BANYANDB_SUPERDATASET_SHARDS_FACTOR 2   - - concurrentWriteThreads Concurrent consumer threads for batch writing. SW_STORAGE_BANYANDB_CONCURRENT_WRITE_THREADS 15   - - profileTaskQueryMaxSize Max size of ProfileTask to be fetched. SW_STORAGE_BANYANDB_PROFILE_TASK_QUERY_MAX_SIZE 200   agent-analyzer default Agent Analyzer. SW_AGENT_ANALYZER default    - - traceSamplingPolicySettingsFile The sampling policy including sampling rate and the threshold of trace segment latency can be configured by the traceSamplingPolicySettingsFile file. SW_TRACE_SAMPLING_POLICY_SETTINGS_FILE trace-sampling-policy-settings.yml   - - slowDBAccessThreshold The slow database access threshold (in milliseconds). SW_SLOW_DB_THRESHOLD default:200,mongodb:100   - - forceSampleErrorSegment When sampling mechanism is activated, this config samples the error status segment and ignores the sampling rate. SW_FORCE_SAMPLE_ERROR_SEGMENT true   - - segmentStatusAnalysisStrategy Determines the final segment status from span status. Available values are FROM_SPAN_STATUS , FROM_ENTRY_SPAN, and FROM_FIRST_SPAN. FROM_SPAN_STATUS indicates that the segment status would be error if any span has an error status. FROM_ENTRY_SPAN means that the segment status would only be determined by the status of entry spans. FROM_FIRST_SPAN means that the segment status would only be determined by the status of the first span. SW_SEGMENT_STATUS_ANALYSIS_STRATEGY FROM_SPAN_STATUS   - - noUpstreamRealAddressAgents Exit spans with the component in the list would not generate client-side instance relation metrics, since some tracing plugins (e.g. Nginx-LUA and Envoy) can\u0026rsquo;t collect the real peer IP address. SW_NO_UPSTREAM_REAL_ADDRESS 6000,9000   - - meterAnalyzerActiveFiles Indicates which files could be instrumented and analyzed. Multiple files are split by \u0026ldquo;,\u0026rdquo;. SW_METER_ANALYZER_ACTIVE_FILES    - - slowCacheWriteThreshold The threshold of slow command which is used for writing operation (in milliseconds). SW_SLOW_CACHE_WRITE_THRESHOLD default:20,redis:10   - - slowCacheReadThreshold The threshold of slow command which is used for reading (getting) operation (in milliseconds). SW_SLOW_CACHE_READ_THRESHOLD default:20,redis:10   receiver-sharing-server default Sharing server provides new gRPC and restful servers for data collection. Ana designates that servers in the core module are to be used for internal communication only. - -    - - restHost Binding IP of RESTful services. Services include GraphQL query and HTTP data report. SW_RECEIVER_SHARING_REST_HOST -   - - restPort Binding port of RESTful services. SW_RECEIVER_SHARING_REST_PORT -   - - restContextPath Web context path of RESTful services. SW_RECEIVER_SHARING_REST_CONTEXT_PATH -   - - restMaxThreads Maximum thread number of RESTful services. SW_RECEIVER_SHARING_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_RECEIVER_SHARING_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize ServerSocketChannel backlog of RESTful services. SW_RECEIVER_SHARING_REST_QUEUE_SIZE 0   - - httpMaxRequestHeaderSize Maximum request header size accepted. SW_RECEIVER_SHARING_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - gRPCHost Binding IP of gRPC services. Services include gRPC data report and internal communication among OAP nodes. SW_RECEIVER_GRPC_HOST 0.0.0.0. Not Activated   - - gRPCPort Binding port of gRPC services. SW_RECEIVER_GRPC_PORT Not Activated   - - gRPCThreadPoolSize Pool size of gRPC server. SW_RECEIVER_GRPC_THREAD_POOL_SIZE CPU core * 4   - - gRPCThreadPoolQueueSize Queue size of gRPC server. SW_RECEIVER_GRPC_POOL_QUEUE_SIZE 10000   - - gRPCSslEnabled Activates SSL for gRPC services. SW_RECEIVER_GRPC_SSL_ENABLED false   - - gRPCSslKeyPath File path of gRPC SSL key. SW_RECEIVER_GRPC_SSL_KEY_PATH -   - - gRPCSslCertChainPath File path of gRPC SSL cert chain. SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH -   - - maxConcurrentCallsPerConnection The maximum number of concurrent calls permitted for each incoming connection. Defaults to no limit. SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL -   - - authentication The token text for authentication. Works for gRPC connection only. Once this is set, the client is required to use the same token. SW_AUTHENTICATION -   log-analyzer default Log Analyzer. SW_LOG_ANALYZER default    - - lalFiles The LAL configuration file names (without file extension) to be activated. Read LAL for more details. SW_LOG_LAL_FILES default   - - malFiles The MAL configuration file names (without file extension) to be activated. Read LAL for more details. SW_LOG_MAL_FILES \u0026quot;\u0026quot;   event-analyzer default Event Analyzer. SW_EVENT_ANALYZER default    receiver-register default gRPC and HTTPRestful services that provide service, service instance and endpoint register. - -    receiver-trace default gRPC and HTTPRestful services that accept SkyWalking format traces. - -    receiver-jvm default gRPC services that accept JVM metrics data. - -    receiver-clr default gRPC services that accept .Net CLR metrics data. - -    receiver-profile default gRPC services that accept profile task status and snapshot reporter. - -    receiver-zabbix default TCP receiver accepts Zabbix format metrics. - -    - - port Exported TCP port. Zabbix agent could connect and transport data. SW_RECEIVER_ZABBIX_PORT 10051   - - host Binds to host. SW_RECEIVER_ZABBIX_HOST 0.0.0.0   - - activeFiles Enables config when agent request is received. SW_RECEIVER_ZABBIX_ACTIVE_FILES agent   service-mesh default gRPC services that accept data from inbound mesh probes. - -    envoy-metric default Envoy metrics_service and ALS(access log service) are supported by this receiver. The OAL script supports all GAUGE type metrics. - -    - - acceptMetricsService Starts Envoy Metrics Service analysis. SW_ENVOY_METRIC_SERVICE true   - - alsHTTPAnalysis Starts Envoy HTTP Access Log Service analysis. Value = k8s-mesh means starting the analysis. SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS -   - - alsTCPAnalysis Starts Envoy TCP Access Log Service analysis. Value = k8s-mesh means starting the analysis. SW_ENVOY_METRIC_ALS_TCP_ANALYSIS -   - - k8sServiceNameRule k8sServiceNameRule allows you to customize the service name in ALS via Kubernetes metadata. The available variables are pod and service. E.g. you can use ${service.metadata.name}-${pod.metadata.labels.version} to append the version number to the service name. Note that when using environment variables to pass this configuration, use single quotes('') to avoid being evaluated by the shell. K8S_SERVICE_NAME_RULE ${pod.metadata.labels.(service.istio.io/canonical-name)}   - - istioServiceNameRule istioServiceNameRule allows you to customize the service name in ALS via Kubernetes metadata. The available variables are serviceEntry. E.g. you can use ${serviceEntry.metadata.name}-${serviceEntry.metadata.labels.version} to append the version number to the service name. Note that when using environment variables to pass this configuration, use single quotes('') to avoid being evaluated by the shell. ISTIO_SERVICE_NAME_RULE ${serviceEntry.metadata.name}   receiver-otel default A receiver for analyzing metrics data from OpenTelemetry. - -    - - enabledHandlers Enabled handlers for otel. SW_OTEL_RECEIVER_ENABLED_HANDLERS -   - - enabledOtelMetricsRules Enabled metric rules for OTLP handler. SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES -   receiver-zipkin default A receiver for Zipkin traces. - -    - - sampleRate The sample rate precision is 1/10000, should be between 0 and 10000 SW_ZIPKIN_SAMPLE_RATE 10000   - - searchableTracesTags Defines a set of span tag keys which are searchable. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_ZIPKIN_SEARCHABLE_TAG_KEYS http.method   - - enableHttpCollector Enable Http Collector. SW_ZIPKIN_HTTP_COLLECTOR_ENABLED true   - - restHost Binding IP of RESTful services. SW_RECEIVER_ZIPKIN_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_RECEIVER_ZIPKIN_REST_PORT 9411   - - restContextPath Web context path of RESTful services. SW_RECEIVER_ZIPKIN_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_RECEIVER_ZIPKIN_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_RECEIVER_ZIPKIN_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_RECEIVER_ZIPKIN_REST_QUEUE_SIZE 0   - - enableKafkaCollector Enable Kafka Collector. SW_ZIPKIN_KAFKA_COLLECTOR_ENABLED false   - - kafkaBootstrapServers Kafka ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG. SW_ZIPKIN_KAFKA_SERVERS localhost:9092   - - kafkaGroupId Kafka ConsumerConfig.GROUP_ID_CONFIG. SW_ZIPKIN_KAFKA_GROUP_ID zipkin   - - kafkaTopic Kafka Topics. SW_ZIPKIN_KAFKA_TOPIC zipkin   - - kafkaConsumerConfig Kafka consumer config, JSON format as Properties. If it contains the same key with above, would override. SW_ZIPKIN_KAFKA_CONSUMER_CONFIG \u0026ldquo;{\u0026quot;auto.offset.reset\u0026quot;:\u0026quot;earliest\u0026quot;,\u0026quot;enable.auto.commit\u0026quot;:true}\u0026rdquo;   - - kafkaConsumers The number of consumers to create. SW_ZIPKIN_KAFKA_CONSUMERS 1   - - kafkaHandlerThreadPoolSize Pool size of Kafka message handler executor. SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_SIZE CPU core * 2   - - kafkaHandlerThreadPoolQueueSize Queue size of Kafka message handler executor. SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE 10000   kafka-fetcher default Read SkyWalking\u0026rsquo;s native metrics/logs/traces through Kafka server. - -    - - bootstrapServers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_KAFKA_FETCHER_SERVERS localhost:9092   - - namespace Namespace aims to isolate multi OAP cluster when using the same Kafka cluster. If you set a namespace for Kafka fetcher, OAP will add a prefix to topic name. You should also set namespace in agent.config. The property is named plugin.kafka.namespace. SW_NAMESPACE -   - - groupId A unique string that identifies the consumer group to which this consumer belongs. - skywalking-consumer   - - createTopicIfNotExist If true, this creates Kafka topic (if it does not already exist). - true   - - partitions The number of partitions for the topic being created. SW_KAFKA_FETCHER_PARTITIONS 3   - - consumers The number of consumers to create. SW_KAFKA_FETCHER_CONSUMERS 1   - - enableNativeProtoLog Enables fetching and handling native proto log data. SW_KAFKA_FETCHER_ENABLE_NATIVE_PROTO_LOG true   - - enableNativeJsonLog Enables fetching and handling native json log data. SW_KAFKA_FETCHER_ENABLE_NATIVE_JSON_LOG true   - - replicationFactor The replication factor for each partition in the topic being created. SW_KAFKA_FETCHER_PARTITIONS_FACTOR 2   - - kafkaHandlerThreadPoolSize Pool size of Kafka message handler executor. SW_KAFKA_HANDLER_THREAD_POOL_SIZE CPU core * 2   - - kafkaHandlerThreadPoolQueueSize Queue size of Kafka message handler executor. SW_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE 10000   - - topicNameOfMeters Kafka topic name for meter system data. - skywalking-meters   - - topicNameOfMetrics Kafka topic name for JVM metrics data. - skywalking-metrics   - - topicNameOfProfiling Kafka topic name for profiling data. - skywalking-profilings   - - topicNameOfTracingSegments Kafka topic name for tracing data. - skywalking-segments   - - topicNameOfManagements Kafka topic name for service instance reporting and registration. - skywalking-managements   - - topicNameOfLogs Kafka topic name for native proto log data. - skywalking-logs   - - topicNameOfJsonLogs Kafka topic name for native json log data. - skywalking-logs-json   receiver-browser default gRPC services that accept browser performance data and error log. - - -   - - sampleRate Sampling rate for receiving trace. Precise to 1/10000. 10000 means sampling rate of 100% by default. SW_RECEIVER_BROWSER_SAMPLE_RATE 10000   query graphql - GraphQL query implementation. -    - - enableLogTestTool Enable the log testing API to test the LAL. NOTE: This API evaluates untrusted code on the OAP server. A malicious script can do significant damage (steal keys and secrets, remove files and directories, install malware, etc). As such, please enable this API only when you completely trust your users. SW_QUERY_GRAPHQL_ENABLE_LOG_TEST_TOOL false   - - maxQueryComplexity Maximum complexity allowed for the GraphQL query that can be used to abort a query if the total number of data fields queried exceeds the defined threshold. SW_QUERY_MAX_QUERY_COMPLEXITY 3000   - - enableUpdateUITemplate Allow user add,disable and update UI template. SW_ENABLE_UPDATE_UI_TEMPLATE false   - - enableOnDemandPodLog Ondemand Pod log: fetch the Pod logs on users' demand, the logs are fetched and displayed in real time, and are not persisted in any kind. This is helpful when users want to do some experiments and monitor the logs and see what\u0026rsquo;s happing inside the service. Note: if you print secrets in the logs, they are also visible to the UI, so for the sake of security, this feature is disabled by default, please set this configuration to enable the feature manually. SW_ENABLE_ON_DEMAND_POD_LOG false   query-zipkin default - This module is for Zipkin query API and support zipkin-lens UI -    - - restHost Binding IP of RESTful services. SW_QUERY_ZIPKIN_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_QUERY_ZIPKIN_REST_PORT 9412   - - restContextPath Web context path of RESTful services. SW_QUERY_ZIPKIN_REST_CONTEXT_PATH zipkin   - - restMaxThreads Maximum thread number of RESTful services. SW_QUERY_ZIPKIN_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_QUERY_ZIPKIN_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_QUERY_ZIPKIN_REST_QUEUE_SIZE 0   - - lookback Default look back for traces and autocompleteTags, 1 day in millis SW_QUERY_ZIPKIN_LOOKBACK 86400000   - - namesMaxAge The Cache-Control max-age (seconds) for serviceNames, remoteServiceNames and spanNames SW_QUERY_ZIPKIN_NAMES_MAX_AGE 300   - - uiQueryLimit Default traces query max size SW_QUERY_ZIPKIN_UI_QUERY_LIMIT 10   - - uiDefaultLookback Default look back on the UI for search traces, 15 minutes in millis SW_QUERY_ZIPKIN_UI_DEFAULT_LOOKBACK 900000   promql default - This module is for PromQL API. -    - - restHost Binding IP of RESTful services. SW_PROMQL_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_PROMQL_REST_PORT 9090   - - restContextPath Web context path of RESTful services. SW_PROMQL_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_PROMQL_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_PROMQL_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_PROMQL_REST_QUEUE_SIZE 0   alarm default - Read alarm doc for more details. -    telemetry - - Read telemetry doc for more details. -    - none - No op implementation. -    - prometheus host Binding host for Prometheus server fetching data. SW_TELEMETRY_PROMETHEUS_HOST 0.0.0.0   - - port Binding port for Prometheus server fetching data. SW_TELEMETRY_PROMETHEUS_PORT 1234   configuration - - Read dynamic configuration doc for more details. -    - grpc host DCS server binding hostname. SW_DCS_SERVER_HOST -   - - port DCS server binding port. SW_DCS_SERVER_PORT 80   - - clusterName Cluster name when reading the latest configuration from DSC server. SW_DCS_CLUSTER_NAME SkyWalking   - - period The period of reading data from DSC server by the OAP (in seconds). SW_DCS_PERIOD 20   - apollo apolloMeta apollo.meta in Apollo. SW_CONFIG_APOLLO http://localhost:8080   - - apolloCluster apollo.cluster in Apollo. SW_CONFIG_APOLLO_CLUSTER default   - - apolloEnv env in Apollo. SW_CONFIG_APOLLO_ENV -   - - appId app.id in Apollo. SW_CONFIG_APOLLO_APP_ID skywalking   - zookeeper namespace The namespace (represented by root path) that isolates the configurations in the Zookeeper. SW_CONFIG_ZK_NAMESPACE /, root path   - - hostPort Hosts and ports of Zookeeper Cluster. SW_CONFIG_ZK_HOST_PORT localhost:2181   - - baseSleepTimeMs The period of Zookeeper client between two retries (in milliseconds). SW_CONFIG_ZK_BASE_SLEEP_TIME_MS 1000   - - maxRetries The maximum retry time. SW_CONFIG_ZK_MAX_RETRIES 3   - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - etcd endpoints Hosts and ports for etcd cluster (separated by commas if multiple). SW_CONFIG_ETCD_ENDPOINTS http://localhost:2379   - - namespace Namespace for SkyWalking cluster. SW_CONFIG_ETCD_NAMESPACE /skywalking   - - authentication Indicates whether there is authentication. SW_CONFIG_ETCD_AUTHENTICATION false   - - user Etcd auth username. SW_CONFIG_ETCD_USER    - - password Etcd auth password. SW_CONFIG_ETCD_PASSWORD    - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - consul hostPort Hosts and ports for Consul cluster. SW_CONFIG_CONSUL_HOST_AND_PORTS localhost:8500   - - aclToken ACL Token of Consul. Empty string means without ACL token. SW_CONFIG_CONSUL_ACL_TOKEN -   - - period The period of data sync (in seconds). SW_CONFIG_CONSUL_PERIOD 60   - k8s-configmap namespace Deployment namespace of the config map. SW_CLUSTER_K8S_NAMESPACE default   - - labelSelector Labels for locating configmap. SW_CLUSTER_K8S_LABEL app=collector,release=skywalking   - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - nacos serverAddr Nacos Server Host. SW_CONFIG_NACOS_SERVER_ADDR 127.0.0.1   - - port Nacos Server Port. SW_CONFIG_NACOS_SERVER_PORT 8848   - - group Nacos Configuration namespace. SW_CONFIG_NACOS_SERVER_NAMESPACE -   - - period The period of data sync (in seconds). SW_CONFIG_CONFIG_NACOS_PERIOD 60   - - username Nacos Auth username. SW_CONFIG_NACOS_USERNAME -   - - password Nacos Auth password. SW_CONFIG_NACOS_PASSWORD -   - - accessKey Nacos Auth accessKey. SW_CONFIG_NACOS_ACCESSKEY -   - - secretKey Nacos Auth secretKey. SW_CONFIG_NACOS_SECRETKEY -   exporter default enableGRPCMetrics Enable gRPC metrics exporter. SW_EXPORTER_ENABLE_GRPC_METRICS false   - - gRPCTargetHost The host of target gRPC server for receiving export data SW_EXPORTER_GRPC_HOST 127.0.0.1   - - gRPCTargetPort The port of target gRPC server for receiving export data. SW_EXPORTER_GRPC_PORT 9870   - - enableKafkaTrace Enable Kafka trace exporter. SW_EXPORTER_ENABLE_KAFKA_TRACE false   - - enableKafkaLog Enable Kafka log exporter. SW_EXPORTER_ENABLE_KAFKA_LOG false   - - kafkaBootstrapServers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_EXPORTER_KAFKA_SERVERS localhost:9092   - - kafkaProducerConfig Kafka producer config, JSON format as Properties. SW_EXPORTER_KAFKA_PRODUCER_CONFIG -   - - kafkaTopicTrace Kafka topic name for trace. SW_EXPORTER_KAFKA_TOPIC_TRACE skywalking-export-trace   - - kafkaTopicLog Kafka topic name for log. SW_EXPORTER_KAFKA_TOPIC_LOG skywalking-export-log   - - exportErrorStatusTraceOnly Export error status trace segments through the Kafka channel. SW_EXPORTER_KAFKA_TRACE_FILTER_ERROR false   health-checker default checkIntervalSeconds The period of checking OAP internal health status (in seconds). SW_HEALTH_CHECKER_INTERVAL_SECONDS 5   debugging-query default       - - keywords4MaskingSecretsOfConfig Include the list of keywords to filter configurations including secrets. Separate keywords by a comma. SW_DEBUGGING_QUERY_KEYWORDS_FOR_MASKING_SECRETS user,password,token,accessKey,secretKey,authentication   configuration-discovery default disableMessageDigest If true, agent receives the latest configuration every time, even without making any changes. By default, OAP uses the SHA512 message digest mechanism to detect changes in configuration. SW_DISABLE_MESSAGE_DIGEST false   receiver-event default gRPC services that handle events data. - -    aws-firehose-receiver default host Binding IP of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_HOST 0.0.0.0   - - port Binding port of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_PORT 12801   - - contextPath Context path of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_CONTEXT_PATH /   - - maxThreads Max Thtread number of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_MAX_THREADS 200   - - idleTimeOut Idle timeout of a connection for keep-alive. SW_RECEIVER_AWS_FIREHOSE_HTTP_IDLE_TIME_OUT 30000   - - acceptQueueSize Maximum allowed number of open connections SW_RECEIVER_AWS_FIREHOSE_HTTP_ACCEPT_QUEUE_SIZE 0   - - maxRequestHeaderSize Maximum length of all headers in an HTTP/1 response SW_RECEIVER_AWS_FIREHOSE_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - firehoseAccessKey The AccessKey of AWS firhose SW_RECEIVER_AWS_FIREHOSE_ACCESS_KEY    - - enableTLS Indicate if enable HTTPS for the server SW_RECEIVER_AWS_FIREHOSE_HTTP_ENABLE_TLS false   - - tlsKeyPath TLS key path SW_RECEIVER_AWS_FIREHOSE_HTTP_TLS_KEY_PATH    - - tlsCertChainPath TLS certificate chain path SW_RECEIVER_AWS_FIREHOSE_HTTP_TLS_CERT_CHAIN_PATH    ai-pipeline default       - - uriRecognitionServerAddr The address of the URI recognition server. SW_AI_PIPELINE_URI_RECOGNITION_SERVER_ADDR -   - - uriRecognitionServerPort The port of the URI recognition server. SW_AI_PIPELINE_URI_RECOGNITION_SERVER_PORT 17128    Note ¹ System Environment Variable name could be declared and changed in application.yml. The names listed here are simply provided in the default application.yml file.\n","excerpt":"Configuration Vocabulary The Configuration Vocabulary lists all available configurations provided by …","ref":"/docs/main/latest/en/setup/backend/configuration-vocabulary/","title":"Configuration Vocabulary"},{"body":"Configuration Vocabulary The Configuration Vocabulary lists all available configurations provided by application.yml.\n   Module Provider Settings Value(s) and Explanation System Environment Variable¹ Default     core default role Option values: Mixed/Receiver/Aggregator. Receiver mode OAP opens the service to the agents, then analyzes and aggregates the results, and forwards the results for distributed aggregation. Aggregator mode OAP receives data from Mixer and Receiver role OAP nodes, and performs 2nd level aggregation. Mixer means both Receiver and Aggregator. SW_CORE_ROLE Mixed   - - restHost Binding IP of RESTful services. Services include GraphQL query and HTTP data report. SW_CORE_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_CORE_REST_PORT 12800   - - restContextPath Web context path of RESTful services. SW_CORE_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_CORE_REST_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_CORE_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize ServerSocketChannel Backlog of RESTful services. SW_CORE_REST_QUEUE_SIZE 0   - - httpMaxRequestHeaderSize Maximum request header size accepted. SW_CORE_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - gRPCHost Binding IP of gRPC services, including gRPC data report and internal communication among OAP nodes. SW_CORE_GRPC_HOST 0.0.0.0   - - gRPCPort Binding port of gRPC services. SW_CORE_GRPC_PORT 11800   - - gRPCSslEnabled Activates SSL for gRPC services. SW_CORE_GRPC_SSL_ENABLED false   - - gRPCSslKeyPath File path of gRPC SSL key. SW_CORE_GRPC_SSL_KEY_PATH -   - - gRPCSslCertChainPath File path of gRPC SSL cert chain. SW_CORE_GRPC_SSL_CERT_CHAIN_PATH -   - - gRPCSslTrustedCAPath File path of gRPC trusted CA. SW_CORE_GRPC_SSL_TRUSTED_CA_PATH -   - - downsampling Activated level of down sampling aggregation.  Hour,Day   - - enableDataKeeperExecutor Controller of TTL scheduler. Once disabled, TTL wouldn\u0026rsquo;t work. SW_CORE_ENABLE_DATA_KEEPER_EXECUTOR true   - - dataKeeperExecutePeriod Execution period of TTL scheduler (in minutes). Execution doesn\u0026rsquo;t mean deleting data. The storage provider (e.g. ElasticSearch storage) could override this. SW_CORE_DATA_KEEPER_EXECUTE_PERIOD 5   - - recordDataTTL The lifecycle of record data (in days). Record data includes traces, top N sample records, and logs. Minimum value is 2. SW_CORE_RECORD_DATA_TTL 3   - - metricsDataTTL The lifecycle of metrics data (in days), including metadata. We recommend setting metricsDataTTL \u0026gt;= recordDataTTL. Minimum value is 2. SW_CORE_METRICS_DATA_TTL 7   - - l1FlushPeriod The period of L1 aggregation flush to L2 aggregation (in milliseconds). SW_CORE_L1_AGGREGATION_FLUSH_PERIOD 500   - - storageSessionTimeout The threshold of session time (in milliseconds). Default value is 70000. SW_CORE_STORAGE_SESSION_TIMEOUT 70000   - - persistentPeriod The period of doing data persistence. Unit is second.Default value is 25s SW_CORE_PERSISTENT_PERIOD 25   - - topNReportPeriod The execution period (in minutes) of top N sampler, which saves sampled data into the storage. SW_CORE_TOPN_REPORT_PERIOD 10   - - activeExtraModelColumns Appends entity names (e.g. service names) into metrics storage entities. SW_CORE_ACTIVE_EXTRA_MODEL_COLUMNS false   - - serviceNameMaxLength Maximum length limit of service names. SW_SERVICE_NAME_MAX_LENGTH 70   - - instanceNameMaxLength Maximum length limit of service instance names. The maximum length of service + instance names should be less than 200. SW_INSTANCE_NAME_MAX_LENGTH 70   - - endpointNameMaxLength Maximum length limit of endpoint names. The maximum length of service + endpoint names should be less than 240. SW_ENDPOINT_NAME_MAX_LENGTH 150   - - searchableTracesTags Defines a set of span tag keys which are searchable through GraphQL. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_SEARCHABLE_TAG_KEYS http.method,http.status_code,rpc.status_code,db.type,db.instance,mq.queue,mq.topic,mq.broker   - - searchableLogsTags Defines a set of log tag keys which are searchable through GraphQL. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_SEARCHABLE_LOGS_TAG_KEYS level   - - searchableAlarmTags Defines a set of alarm tag keys which are searchable through GraphQL. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_SEARCHABLE_ALARM_TAG_KEYS level   - - autocompleteTagKeysQueryMaxSize The max size of tags keys for autocomplete select. SW_AUTOCOMPLETE_TAG_KEYS_QUERY_MAX_SIZE 100   - - autocompleteTagValuesQueryMaxSize The max size of tags values for autocomplete select. SW_AUTOCOMPLETE_TAG_VALUES_QUERY_MAX_SIZE 100   - - gRPCThreadPoolSize Pool size of gRPC server. SW_CORE_GRPC_THREAD_POOL_SIZE Default to gRPC\u0026rsquo;s implementation, which is a cached thread pool that can grow infinitely.   - - maxConcurrentCallsPerConnection The maximum number of concurrent calls permitted for each incoming connection. Defaults to no limit. SW_CORE_GRPC_MAX_CONCURRENT_CALL -   - - maxMessageSize Sets the maximum message size allowed to be received on the server. Empty means 4 MiB. SW_CORE_GRPC_MAX_MESSAGE_SIZE 4M(based on Netty)   - - remoteTimeout Timeout for cluster internal communication (in seconds). - 20   - - maxSizeOfNetworkAddressAlias The maximum size of network address detected in the system being monitored. - 1_000_000   - - maxPageSizeOfQueryProfileSnapshot The maximum size for snapshot analysis in an OAP query. - 500   - - maxSizeOfAnalyzeProfileSnapshot The maximum number of snapshots analyzed by the OAP. - 12000   - - prepareThreads The number of threads used to prepare metrics data to the storage. SW_CORE_PREPARE_THREADS 2   - - enableEndpointNameGroupingByOpenapi Automatically groups endpoints by the given OpenAPI definitions. SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI true   - - maxDurationOfQueryEBPFProfilingData The maximum duration(in second) of query the eBPF profiling data from database. - 30   - - maxThreadCountOfQueryEBPFProfilingData The maximum thread count of query the eBPF profiling data from database. - System CPU core size   - - uiMenuRefreshInterval The period(in seconds) of refreshing the status of all UI menu items. - 20   - - serviceCacheRefreshInterval The period(in seconds) of refreshing the service cache. SW_SERVICE_CACHE_REFRESH_INTERVAL 10   - - enableHierarchy If disable the hierarchy, the service and instance hierarchy relation will not be built. And the query of hierarchy will return empty result. All the hierarchy relations are defined in the hierarchy-definition.yml. Notice: some of the configurations only available for kubernetes environments. SW_CORE_ENABLE_HIERARCHY true   cluster standalone - Standalone is not suitable for running on a single node running. No configuration available. - -   - zookeeper namespace The namespace, represented by root path, isolates the configurations in Zookeeper. SW_NAMESPACE /, root path   - - hostPort Hosts and ports of Zookeeper Cluster. SW_CLUSTER_ZK_HOST_PORT localhost:2181   - - baseSleepTimeMs The period of Zookeeper client between two retries (in milliseconds). SW_CLUSTER_ZK_SLEEP_TIME 1000   - - maxRetries The maximum retry time. SW_CLUSTER_ZK_MAX_RETRIES 3   - - enableACL Opens ACL using schema and expression. SW_ZK_ENABLE_ACL false   - - schema Schema for the authorization. SW_ZK_SCHEMA digest   - - expression Expression for the authorization. SW_ZK_EXPRESSION skywalking:skywalking   - - internalComHost The hostname registered in Zookeeper for the internal communication of OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Zookeeper for the internal communication of OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - kubernetes namespace Namespace deployed by SkyWalking in k8s. SW_CLUSTER_K8S_NAMESPACE default   - - labelSelector Labels used for filtering OAP deployment in k8s. SW_CLUSTER_K8S_LABEL app=collector,release=skywalking   - - uidEnvName Environment variable name for reading uid. SW_CLUSTER_K8S_UID SKYWALKING_COLLECTOR_UID   - consul serviceName Service name for SkyWalking cluster. SW_SERVICE_NAME SkyWalking_OAP_Cluster   - - hostPort Hosts and ports for Consul cluster. SW_CLUSTER_CONSUL_HOST_PORT localhost:8500   - - aclToken ACL Token of Consul. Empty string means without ALC token. SW_CLUSTER_CONSUL_ACLTOKEN -   - - internalComHost The hostname registered in Consul for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Consul for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - etcd serviceName Service name for SkyWalking cluster. SW_CLUSTER_ETCD_SERVICE_NAME SkyWalking_OAP_Cluster   - - endpoints Hosts and ports for etcd cluster. SW_CLUSTER_ETCD_ENDPOINTS localhost:2379   - - namespace Namespace for SkyWalking cluster. SW_CLUSTER_ETCD_NAMESPACE /skywalking   - - authentication Indicates whether there is authentication. SW_CLUSTER_ETCD_AUTHENTICATION false   - - user Etcd auth username. SW_CLUSTER_ETCD_USER    - - password Etcd auth password. SW_CLUSTER_ETCD_PASSWORD    - - internalComHost The hostname registered in etcd for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in etcd for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - Nacos serviceName Service name for SkyWalking cluster. SW_SERVICE_NAME SkyWalking_OAP_Cluster   - - hostPort Hosts and ports for Nacos cluster. SW_CLUSTER_NACOS_HOST_PORT localhost:8848   - - namespace Namespace used by SkyWalking node coordination. SW_CLUSTER_NACOS_NAMESPACE public   - - internalComHost The hostname registered in Nacos for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Nacos for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - - username Nacos Auth username. SW_CLUSTER_NACOS_USERNAME -   - - password Nacos Auth password. SW_CLUSTER_NACOS_PASSWORD -   - - accessKey Nacos Auth accessKey. SW_CLUSTER_NACOS_ACCESSKEY -   - - secretKey Nacos Auth secretKey. SW_CLUSTER_NACOS_SECRETKEY -   - - syncPeriodHttpUriRecognitionPattern The period of HTTP URI recognition pattern synchronization (in seconds). SW_CORE_SYNC_PERIOD_HTTP_URI_RECOGNITION_PATTERN 10   - - trainingPeriodHttpUriRecognitionPattern The period of HTTP URI recognition pattern training (in seconds). SW_CORE_TRAINING_PERIOD_HTTP_URI_RECOGNITION_PATTERN 60   - - maxHttpUrisNumberPerService The maximum number of HTTP URIs per service. SW_MAX_HTTP_URIS_NUMBER_PER_SERVICE 3000   storage elasticsearch - ElasticSearch (and OpenSearch) storage implementation. - -   - - namespace Prefix of indexes created and used by SkyWalking. SW_NAMESPACE -   - - clusterNodes ElasticSearch cluster nodes for client connection. SW_STORAGE_ES_CLUSTER_NODES localhost   - - protocol HTTP or HTTPs. SW_STORAGE_ES_HTTP_PROTOCOL HTTP   - - connectTimeout Connect timeout of ElasticSearch client (in milliseconds). SW_STORAGE_ES_CONNECT_TIMEOUT 3000   - - socketTimeout Socket timeout of ElasticSearch client (in milliseconds). SW_STORAGE_ES_SOCKET_TIMEOUT 30000   - - responseTimeout Response timeout of ElasticSearch client (in milliseconds), 0 disables the timeout. SW_STORAGE_ES_RESPONSE_TIMEOUT 1500   - - numHttpClientThread The number of threads for the underlying HTTP client to perform socket I/O. If the value is \u0026lt;= 0, the number of available processors will be used. SW_STORAGE_ES_NUM_HTTP_CLIENT_THREAD 0   - - user Username of ElasticSearch cluster. SW_ES_USER -   - - password Password of ElasticSearch cluster. SW_ES_PASSWORD -   - - trustStorePath Trust JKS file path. Only works when username and password are enabled. SW_STORAGE_ES_SSL_JKS_PATH -   - - trustStorePass Trust JKS file password. Only works when username and password are enabled. SW_STORAGE_ES_SSL_JKS_PASS -   - - secretsManagementFile Secrets management file in the properties format, including username and password, which are managed by a 3rd party tool. Capable of being updated them at runtime. SW_ES_SECRETS_MANAGEMENT_FILE -   - - dayStep Represents the number of days in the one-minute/hour/day index. SW_STORAGE_DAY_STEP 1   - - indexShardsNumber Shard number of new indexes. SW_STORAGE_ES_INDEX_SHARDS_NUMBER 1   - - indexReplicasNumber Replicas number of new indexes. SW_STORAGE_ES_INDEX_REPLICAS_NUMBER 0   - - specificIndexSettings Specify the settings for each index individually. If configured, this setting has the highest priority and overrides the generic settings. SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS -   - - superDatasetDayStep Represents the number of days in the super size dataset record index. Default value is the same as dayStep when the value is less than 0. SW_STORAGE_ES_SUPER_DATASET_DAY_STEP -1   - - superDatasetIndexShardsFactor Super dataset is defined in the code (e.g. trace segments). This factor provides more shards for the super dataset: shards number = indexShardsNumber * superDatasetIndexShardsFactor. This factor also affects Zipkin and Jaeger traces. SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR 5   - - superDatasetIndexReplicasNumber Represents the replicas number in the super size dataset record index. SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER 0   - - indexTemplateOrder The order of index template. SW_STORAGE_ES_INDEX_TEMPLATE_ORDER 0   - - bulkActions Async bulk size of the record data batch execution. SW_STORAGE_ES_BULK_ACTIONS 5000   - - batchOfBytes A threshold to control the max body size of ElasticSearch Bulk flush. SW_STORAGE_ES_BATCH_OF_BYTES 10485760 (10m)   - - flushInterval Period of flush (in seconds). Does not matter whether bulkActions is reached or not. SW_STORAGE_ES_FLUSH_INTERVAL 5   - - concurrentRequests The number of concurrent requests allowed to be executed. SW_STORAGE_ES_CONCURRENT_REQUESTS 2   - - resultWindowMaxSize The maximum size of dataset when the OAP loads cache, such as network aliases. SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE 10000   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_ES_QUERY_MAX_SIZE 10000   - - scrollingBatchSize The batch size of metadata per iteration when metadataQueryMaxSize or resultWindowMaxSize is too large to be retrieved in a single query. SW_STORAGE_ES_SCROLLING_BATCH_SIZE 5000   - - segmentQueryMaxSize The maximum size of trace segments per query. SW_STORAGE_ES_QUERY_SEGMENT_SIZE 200   - - profileTaskQueryMaxSize The maximum size of profile task per query. SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE 200   - - profileDataQueryScrollBatchSize The batch size of query profiling data. SW_STORAGE_ES_QUERY_PROFILE_DATA_BATCH_SIZE 100   - - advanced All settings of ElasticSearch index creation. The value should be in JSON format. SW_STORAGE_ES_ADVANCED -   - - logicSharding Shard metrics and records indices into multi-physical indices, one index template per metric/meter aggregation function or record. SW_STORAGE_ES_LOGIC_SHARDING false   - h2 - H2 storage is designed for demonstration and running in short term (i.e. 1-2 hours) only. - -   - - url H2 connection URL. Defaults to H2 memory mode. SW_STORAGE_H2_URL jdbc:h2:mem:skywalking-oap-db   - - user Username of H2 database. SW_STORAGE_H2_USER sa   - - password Password of H2 database. - -   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_H2_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 100   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 1   - mysql - MySQL Storage. The MySQL JDBC Driver is not in the dist. Please copy it into the oap-lib folder manually. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - postgresql - PostgreSQL storage. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - banyandb - BanyanDB storage. - -   - - targets Hosts with ports of the BanyanDB. SW_STORAGE_BANYANDB_TARGETS 127.0.0.1:17912   - - maxBulkSize The maximum size of write entities in a single batch write call. SW_STORAGE_BANYANDB_MAX_BULK_SIZE 5000   - - flushInterval Period of flush interval. In the timeunit of seconds. SW_STORAGE_BANYANDB_FLUSH_INTERVAL 15   - - metricsShardsNumber Shards Number for measure/metrics. SW_STORAGE_BANYANDB_METRICS_SHARDS_NUMBER 1   - - recordShardsNumber Shards Number for a normal record. SW_STORAGE_BANYANDB_RECORD_SHARDS_NUMBER 1   - - superDatasetShardsFactor Shards Factor for a super dataset record, i.e. Shard number of a super dataset is recordShardsNumber*superDatasetShardsFactor. SW_STORAGE_BANYANDB_SUPERDATASET_SHARDS_FACTOR 2   - - concurrentWriteThreads Concurrent consumer threads for batch writing. SW_STORAGE_BANYANDB_CONCURRENT_WRITE_THREADS 15   - - profileTaskQueryMaxSize Max size of ProfileTask to be fetched. SW_STORAGE_BANYANDB_PROFILE_TASK_QUERY_MAX_SIZE 200   agent-analyzer default Agent Analyzer. SW_AGENT_ANALYZER default    - - traceSamplingPolicySettingsFile The sampling policy including sampling rate and the threshold of trace segment latency can be configured by the traceSamplingPolicySettingsFile file. SW_TRACE_SAMPLING_POLICY_SETTINGS_FILE trace-sampling-policy-settings.yml   - - slowDBAccessThreshold The slow database access threshold (in milliseconds). SW_SLOW_DB_THRESHOLD default:200,mongodb:100   - - forceSampleErrorSegment When sampling mechanism is activated, this config samples the error status segment and ignores the sampling rate. SW_FORCE_SAMPLE_ERROR_SEGMENT true   - - segmentStatusAnalysisStrategy Determines the final segment status from span status. Available values are FROM_SPAN_STATUS , FROM_ENTRY_SPAN, and FROM_FIRST_SPAN. FROM_SPAN_STATUS indicates that the segment status would be error if any span has an error status. FROM_ENTRY_SPAN means that the segment status would only be determined by the status of entry spans. FROM_FIRST_SPAN means that the segment status would only be determined by the status of the first span. SW_SEGMENT_STATUS_ANALYSIS_STRATEGY FROM_SPAN_STATUS   - - noUpstreamRealAddressAgents Exit spans with the component in the list would not generate client-side instance relation metrics, since some tracing plugins (e.g. Nginx-LUA and Envoy) can\u0026rsquo;t collect the real peer IP address. SW_NO_UPSTREAM_REAL_ADDRESS 6000,9000   - - meterAnalyzerActiveFiles Indicates which files could be instrumented and analyzed. Multiple files are split by \u0026ldquo;,\u0026rdquo;. SW_METER_ANALYZER_ACTIVE_FILES    - - slowCacheWriteThreshold The threshold of slow command which is used for writing operation (in milliseconds). SW_SLOW_CACHE_WRITE_THRESHOLD default:20,redis:10   - - slowCacheReadThreshold The threshold of slow command which is used for reading (getting) operation (in milliseconds). SW_SLOW_CACHE_READ_THRESHOLD default:20,redis:10   receiver-sharing-server default Sharing server provides new gRPC and restful servers for data collection. Ana designates that servers in the core module are to be used for internal communication only. - -    - - restHost Binding IP of RESTful services. Services include GraphQL query and HTTP data report. SW_RECEIVER_SHARING_REST_HOST -   - - restPort Binding port of RESTful services. SW_RECEIVER_SHARING_REST_PORT -   - - restContextPath Web context path of RESTful services. SW_RECEIVER_SHARING_REST_CONTEXT_PATH -   - - restMaxThreads Maximum thread number of RESTful services. SW_RECEIVER_SHARING_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_RECEIVER_SHARING_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize ServerSocketChannel backlog of RESTful services. SW_RECEIVER_SHARING_REST_QUEUE_SIZE 0   - - httpMaxRequestHeaderSize Maximum request header size accepted. SW_RECEIVER_SHARING_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - gRPCHost Binding IP of gRPC services. Services include gRPC data report and internal communication among OAP nodes. SW_RECEIVER_GRPC_HOST 0.0.0.0. Not Activated   - - gRPCPort Binding port of gRPC services. SW_RECEIVER_GRPC_PORT Not Activated   - - gRPCThreadPoolSize Pool size of gRPC server. SW_RECEIVER_GRPC_THREAD_POOL_SIZE Default to gRPC\u0026rsquo;s implementation, which is a cached thread pool that can grow infinitely.   - - gRPCSslEnabled Activates SSL for gRPC services. SW_RECEIVER_GRPC_SSL_ENABLED false   - - gRPCSslKeyPath File path of gRPC SSL key. SW_RECEIVER_GRPC_SSL_KEY_PATH -   - - gRPCSslCertChainPath File path of gRPC SSL cert chain. SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH -   - - maxConcurrentCallsPerConnection The maximum number of concurrent calls permitted for each incoming connection. Defaults to no limit. SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL -   - - authentication The token text for authentication. Works for gRPC connection only. Once this is set, the client is required to use the same token. SW_AUTHENTICATION -   log-analyzer default Log Analyzer. SW_LOG_ANALYZER default    - - lalFiles The LAL configuration file names (without file extension) to be activated. Read LAL for more details. SW_LOG_LAL_FILES default   - - malFiles The MAL configuration file names (without file extension) to be activated. Read LAL for more details. SW_LOG_MAL_FILES \u0026quot;\u0026quot;   event-analyzer default Event Analyzer. SW_EVENT_ANALYZER default    receiver-register default gRPC and HTTPRestful services that provide service, service instance and endpoint register. - -    receiver-trace default gRPC and HTTPRestful services that accept SkyWalking format traces. - -    receiver-jvm default gRPC services that accept JVM metrics data. - -    receiver-clr default gRPC services that accept .Net CLR metrics data. - -    receiver-profile default gRPC services that accept profile task status and snapshot reporter. - -    receiver-zabbix default TCP receiver accepts Zabbix format metrics. - -    - - port Exported TCP port. Zabbix agent could connect and transport data. SW_RECEIVER_ZABBIX_PORT 10051   - - host Binds to host. SW_RECEIVER_ZABBIX_HOST 0.0.0.0   - - activeFiles Enables config when agent request is received. SW_RECEIVER_ZABBIX_ACTIVE_FILES agent   service-mesh default gRPC services that accept data from inbound mesh probes. - -    envoy-metric default Envoy metrics_service and ALS(access log service) are supported by this receiver. The OAL script supports all GAUGE type metrics. - -    - - acceptMetricsService Starts Envoy Metrics Service analysis. SW_ENVOY_METRIC_SERVICE true   - - alsHTTPAnalysis Starts Envoy HTTP Access Log Service analysis. Value = k8s-mesh means starting the analysis. SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS -   - - alsTCPAnalysis Starts Envoy TCP Access Log Service analysis. Value = k8s-mesh means starting the analysis. SW_ENVOY_METRIC_ALS_TCP_ANALYSIS -   - - k8sServiceNameRule k8sServiceNameRule allows you to customize the service name in ALS via Kubernetes metadata. The available variables are pod and service. E.g. you can use ${service.metadata.name}-${pod.metadata.labels.version} to append the version number to the service name. Note that when using environment variables to pass this configuration, use single quotes('') to avoid being evaluated by the shell. K8S_SERVICE_NAME_RULE ${pod.metadata.labels.(service.istio.io/canonical-name)}.${pod.metadata.namespace}   - - istioServiceNameRule istioServiceNameRule allows you to customize the service name in ALS via Kubernetes metadata. The available variables are serviceEntry. E.g. you can use ${serviceEntry.metadata.name}-${serviceEntry.metadata.labels.version} to append the version number to the service name. Note that when using environment variables to pass this configuration, use single quotes('') to avoid being evaluated by the shell. ISTIO_SERVICE_NAME_RULE ${serviceEntry.metadata.name}.${serviceEntry.metadata.namespace}   - - istioServiceEntryIgnoredNamespaces When looking up service informations from the Istio ServiceEntries, some of the ServiceEntries might be created in several namespaces automatically by some components, and OAP will randomly pick one of them to build the service name, users can use this config to exclude ServiceEntries that they don\u0026rsquo;t want to be used. Comma separated. SW_ISTIO_SERVICE_ENTRY_IGNORED_NAMESPACES -   - - gRPCHost Binding IP of gRPC service for Envoy access log service. SW_ALS_GRPC_HOST 0.0.0.0. Not Activated   - - gRPCPort Binding port of gRPC service for Envoy access log service. SW_ALS_GRPC_PORT Not Activated   - - gRPCThreadPoolSize Pool size of gRPC server. SW_ALS_GRPC_THREAD_POOL_SIZE Default to gRPC\u0026rsquo;s implementation, which is a cached thread pool that can grow infinitely.   - - gRPCSslEnabled Activates SSL for gRPC services. SW_ALS_GRPC_SSL_ENABLED false   - - gRPCSslKeyPath File path of gRPC SSL key. SW_ALS_GRPC_SSL_KEY_PATH -   - - gRPCSslCertChainPath File path of gRPC SSL cert chain. SW_ALS_GRPC_SSL_CERT_CHAIN_PATH -   - - maxConcurrentCallsPerConnection The maximum number of concurrent calls permitted for each incoming connection. Defaults to no limit. SW_ALS_GRPC_MAX_CONCURRENT_CALL -   - - maxMessageSize Sets the maximum message size allowed to be received on the server. Empty means 4 MiB. SW_ALS_GRPC_MAX_MESSAGE_SIZE 4M(based on Netty)   receiver-otel default A receiver for analyzing metrics data from OpenTelemetry. - -    - - enabledHandlers Enabled handlers for otel. SW_OTEL_RECEIVER_ENABLED_HANDLERS -   - - enabledOtelMetricsRules Enabled metric rules for OTLP handler. SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES -   receiver-zipkin default A receiver for Zipkin traces. - -    - - sampleRate The sample rate precision is 1/10000, should be between 0 and 10000 SW_ZIPKIN_SAMPLE_RATE 10000   - - searchableTracesTags Defines a set of span tag keys which are searchable. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_ZIPKIN_SEARCHABLE_TAG_KEYS http.method   - - enableHttpCollector Enable Http Collector. SW_ZIPKIN_HTTP_COLLECTOR_ENABLED true   - - restHost Binding IP of RESTful services. SW_RECEIVER_ZIPKIN_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_RECEIVER_ZIPKIN_REST_PORT 9411   - - restContextPath Web context path of RESTful services. SW_RECEIVER_ZIPKIN_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_RECEIVER_ZIPKIN_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_RECEIVER_ZIPKIN_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_RECEIVER_ZIPKIN_REST_QUEUE_SIZE 0   - - enableKafkaCollector Enable Kafka Collector. SW_ZIPKIN_KAFKA_COLLECTOR_ENABLED false   - - kafkaBootstrapServers Kafka ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG. SW_ZIPKIN_KAFKA_SERVERS localhost:9092   - - kafkaGroupId Kafka ConsumerConfig.GROUP_ID_CONFIG. SW_ZIPKIN_KAFKA_GROUP_ID zipkin   - - kafkaTopic Kafka Topics. SW_ZIPKIN_KAFKA_TOPIC zipkin   - - kafkaConsumerConfig Kafka consumer config, JSON format as Properties. If it contains the same key with above, would override. SW_ZIPKIN_KAFKA_CONSUMER_CONFIG \u0026ldquo;{\u0026quot;auto.offset.reset\u0026quot;:\u0026quot;earliest\u0026quot;,\u0026quot;enable.auto.commit\u0026quot;:true}\u0026rdquo;   - - kafkaConsumers The number of consumers to create. SW_ZIPKIN_KAFKA_CONSUMERS 1   - - kafkaHandlerThreadPoolSize Pool size of Kafka message handler executor. SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_SIZE CPU core * 2   - - kafkaHandlerThreadPoolQueueSize Queue size of Kafka message handler executor. SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE 10000   kafka-fetcher default Read SkyWalking\u0026rsquo;s native metrics/logs/traces through Kafka server. - -    - - bootstrapServers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_KAFKA_FETCHER_SERVERS localhost:9092   - - namespace Namespace aims to isolate multi OAP cluster when using the same Kafka cluster. If you set a namespace for Kafka fetcher, OAP will add a prefix to topic name. You should also set namespace in agent.config. The property is named plugin.kafka.namespace. SW_NAMESPACE -   - - groupId A unique string that identifies the consumer group to which this consumer belongs. - skywalking-consumer   - - partitions The number of partitions for the topic being created. SW_KAFKA_FETCHER_PARTITIONS 3   - - consumers The number of consumers to create. SW_KAFKA_FETCHER_CONSUMERS 1   - - enableNativeProtoLog Enables fetching and handling native proto log data. SW_KAFKA_FETCHER_ENABLE_NATIVE_PROTO_LOG true   - - enableNativeJsonLog Enables fetching and handling native json log data. SW_KAFKA_FETCHER_ENABLE_NATIVE_JSON_LOG true   - - replicationFactor The replication factor for each partition in the topic being created. SW_KAFKA_FETCHER_PARTITIONS_FACTOR 2   - - kafkaHandlerThreadPoolSize Pool size of Kafka message handler executor. SW_KAFKA_HANDLER_THREAD_POOL_SIZE CPU core * 2   - - kafkaHandlerThreadPoolQueueSize Queue size of Kafka message handler executor. SW_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE 10000   - - topicNameOfMeters Kafka topic name for meter system data. - skywalking-meters   - - topicNameOfMetrics Kafka topic name for JVM metrics data. - skywalking-metrics   - - topicNameOfProfiling Kafka topic name for profiling data. - skywalking-profilings   - - topicNameOfTracingSegments Kafka topic name for tracing data. - skywalking-segments   - - topicNameOfManagements Kafka topic name for service instance reporting and registration. - skywalking-managements   - - topicNameOfLogs Kafka topic name for native proto log data. - skywalking-logs   - - topicNameOfJsonLogs Kafka topic name for native json log data. - skywalking-logs-json   receiver-browser default gRPC services that accept browser performance data and error log. - - -   - - sampleRate Sampling rate for receiving trace. Precise to 1/10000. 10000 means sampling rate of 100% by default. SW_RECEIVER_BROWSER_SAMPLE_RATE 10000   query graphql - GraphQL query implementation. -    - - enableLogTestTool Enable the log testing API to test the LAL. NOTE: This API evaluates untrusted code on the OAP server. A malicious script can do significant damage (steal keys and secrets, remove files and directories, install malware, etc). As such, please enable this API only when you completely trust your users. SW_QUERY_GRAPHQL_ENABLE_LOG_TEST_TOOL false   - - maxQueryComplexity Maximum complexity allowed for the GraphQL query that can be used to abort a query if the total number of data fields queried exceeds the defined threshold. SW_QUERY_MAX_QUERY_COMPLEXITY 3000   - - enableUpdateUITemplate Allow user add,disable and update UI template. SW_ENABLE_UPDATE_UI_TEMPLATE false   - - enableOnDemandPodLog Ondemand Pod log: fetch the Pod logs on users' demand, the logs are fetched and displayed in real time, and are not persisted in any kind. This is helpful when users want to do some experiments and monitor the logs and see what\u0026rsquo;s happing inside the service. Note: if you print secrets in the logs, they are also visible to the UI, so for the sake of security, this feature is disabled by default, please set this configuration to enable the feature manually. SW_ENABLE_ON_DEMAND_POD_LOG false   query-zipkin default - This module is for Zipkin query API and support zipkin-lens UI -    - - restHost Binding IP of RESTful services. SW_QUERY_ZIPKIN_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_QUERY_ZIPKIN_REST_PORT 9412   - - restContextPath Web context path of RESTful services. SW_QUERY_ZIPKIN_REST_CONTEXT_PATH zipkin   - - restMaxThreads Maximum thread number of RESTful services. SW_QUERY_ZIPKIN_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_QUERY_ZIPKIN_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_QUERY_ZIPKIN_REST_QUEUE_SIZE 0   - - lookback Default look back for traces and autocompleteTags, 1 day in millis SW_QUERY_ZIPKIN_LOOKBACK 86400000   - - namesMaxAge The Cache-Control max-age (seconds) for serviceNames, remoteServiceNames and spanNames SW_QUERY_ZIPKIN_NAMES_MAX_AGE 300   - - uiQueryLimit Default traces query max size SW_QUERY_ZIPKIN_UI_QUERY_LIMIT 10   - - uiDefaultLookback Default look back on the UI for search traces, 15 minutes in millis SW_QUERY_ZIPKIN_UI_DEFAULT_LOOKBACK 900000   promql default - This module is for PromQL API. -    - - restHost Binding IP of RESTful services. SW_PROMQL_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_PROMQL_REST_PORT 9090   - - restContextPath Web context path of RESTful services. SW_PROMQL_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_PROMQL_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_PROMQL_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_PROMQL_REST_QUEUE_SIZE 0   - - buildInfoVersion Mock version for API buildInfo SW_PROMQL_BUILD_INFO_VERSION 2.45.0   - - buildInfoRevision Mock revision for API buildInfo SW_PROMQL_BUILD_INFO_REVISION    - - buildInfoBranch Mock branch for API buildInfo SW_PROMQL_BUILD_INFO_BRANCH    - - buildInfoBuildUser Mock build user for API buildInfo SW_PROMQL_BUILD_INFO_BUILD_USER    - - buildInfoBuildDate Mock build date for API buildInfo SW_PROMQL_BUILD_INFO_BUILD_DATE    - - buildInfoGoVersion Mock go version for API buildInfo SW_PROMQL_BUILD_INFO_GO_VERSION    alarm default - Read alarm doc for more details. -    telemetry - - Read telemetry doc for more details. -    - none - No op implementation. -    - prometheus host Binding host for Prometheus server fetching data. SW_TELEMETRY_PROMETHEUS_HOST 0.0.0.0   - - port Binding port for Prometheus server fetching data. SW_TELEMETRY_PROMETHEUS_PORT 1234   configuration - - Read dynamic configuration doc for more details. -    - grpc host DCS server binding hostname. SW_DCS_SERVER_HOST -   - - port DCS server binding port. SW_DCS_SERVER_PORT 80   - - clusterName Cluster name when reading the latest configuration from DSC server. SW_DCS_CLUSTER_NAME SkyWalking   - - period The period of reading data from DSC server by the OAP (in seconds). SW_DCS_PERIOD 20   - - maxInboundMessageSize The max inbound message size of gRPC. SW_DCS_MAX_INBOUND_MESSAGE_SIZE 4194304   - apollo apolloMeta apollo.meta in Apollo. SW_CONFIG_APOLLO http://localhost:8080   - - apolloCluster apollo.cluster in Apollo. SW_CONFIG_APOLLO_CLUSTER default   - - apolloEnv env in Apollo. SW_CONFIG_APOLLO_ENV -   - - appId app.id in Apollo. SW_CONFIG_APOLLO_APP_ID skywalking   - zookeeper namespace The namespace (represented by root path) that isolates the configurations in the Zookeeper. SW_CONFIG_ZK_NAMESPACE /, root path   - - hostPort Hosts and ports of Zookeeper Cluster. SW_CONFIG_ZK_HOST_PORT localhost:2181   - - baseSleepTimeMs The period of Zookeeper client between two retries (in milliseconds). SW_CONFIG_ZK_BASE_SLEEP_TIME_MS 1000   - - maxRetries The maximum retry time. SW_CONFIG_ZK_MAX_RETRIES 3   - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - etcd endpoints Hosts and ports for etcd cluster (separated by commas if multiple). SW_CONFIG_ETCD_ENDPOINTS http://localhost:2379   - - namespace Namespace for SkyWalking cluster. SW_CONFIG_ETCD_NAMESPACE /skywalking   - - authentication Indicates whether there is authentication. SW_CONFIG_ETCD_AUTHENTICATION false   - - user Etcd auth username. SW_CONFIG_ETCD_USER    - - password Etcd auth password. SW_CONFIG_ETCD_PASSWORD    - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - consul hostPort Hosts and ports for Consul cluster. SW_CONFIG_CONSUL_HOST_AND_PORTS localhost:8500   - - aclToken ACL Token of Consul. Empty string means without ACL token. SW_CONFIG_CONSUL_ACL_TOKEN -   - - period The period of data sync (in seconds). SW_CONFIG_CONSUL_PERIOD 60   - k8s-configmap namespace Deployment namespace of the config map. SW_CLUSTER_K8S_NAMESPACE default   - - labelSelector Labels for locating configmap. SW_CLUSTER_K8S_LABEL app=collector,release=skywalking   - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - nacos serverAddr Nacos Server Host. SW_CONFIG_NACOS_SERVER_ADDR 127.0.0.1   - - port Nacos Server Port. SW_CONFIG_NACOS_SERVER_PORT 8848   - - group Nacos Configuration namespace. SW_CONFIG_NACOS_SERVER_NAMESPACE -   - - period The period of data sync (in seconds). SW_CONFIG_CONFIG_NACOS_PERIOD 60   - - username Nacos Auth username. SW_CONFIG_NACOS_USERNAME -   - - password Nacos Auth password. SW_CONFIG_NACOS_PASSWORD -   - - accessKey Nacos Auth accessKey. SW_CONFIG_NACOS_ACCESSKEY -   - - secretKey Nacos Auth secretKey. SW_CONFIG_NACOS_SECRETKEY -   exporter default enableGRPCMetrics Enable gRPC metrics exporter. SW_EXPORTER_ENABLE_GRPC_METRICS false   - - gRPCTargetHost The host of target gRPC server for receiving export data SW_EXPORTER_GRPC_HOST 127.0.0.1   - - gRPCTargetPort The port of target gRPC server for receiving export data. SW_EXPORTER_GRPC_PORT 9870   - - enableKafkaTrace Enable Kafka trace exporter. SW_EXPORTER_ENABLE_KAFKA_TRACE false   - - enableKafkaLog Enable Kafka log exporter. SW_EXPORTER_ENABLE_KAFKA_LOG false   - - kafkaBootstrapServers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_EXPORTER_KAFKA_SERVERS localhost:9092   - - kafkaProducerConfig Kafka producer config, JSON format as Properties. SW_EXPORTER_KAFKA_PRODUCER_CONFIG -   - - kafkaTopicTrace Kafka topic name for trace. SW_EXPORTER_KAFKA_TOPIC_TRACE skywalking-export-trace   - - kafkaTopicLog Kafka topic name for log. SW_EXPORTER_KAFKA_TOPIC_LOG skywalking-export-log   - - exportErrorStatusTraceOnly Export error status trace segments through the Kafka channel. SW_EXPORTER_KAFKA_TRACE_FILTER_ERROR false   health-checker default checkIntervalSeconds The period of checking OAP internal health status (in seconds). SW_HEALTH_CHECKER_INTERVAL_SECONDS 5   debugging-query default       - - keywords4MaskingSecretsOfConfig Include the list of keywords to filter configurations including secrets. Separate keywords by a comma. SW_DEBUGGING_QUERY_KEYWORDS_FOR_MASKING_SECRETS user,password,token,accessKey,secretKey,authentication   configuration-discovery default disableMessageDigest If true, agent receives the latest configuration every time, even without making any changes. By default, OAP uses the SHA512 message digest mechanism to detect changes in configuration. SW_DISABLE_MESSAGE_DIGEST false   receiver-event default gRPC services that handle events data. - -    aws-firehose-receiver default host Binding IP of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_HOST 0.0.0.0   - - port Binding port of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_PORT 12801   - - contextPath Context path of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_CONTEXT_PATH /   - - maxThreads Max Thtread number of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_MAX_THREADS 200   - - idleTimeOut Idle timeout of a connection for keep-alive. SW_RECEIVER_AWS_FIREHOSE_HTTP_IDLE_TIME_OUT 30000   - - acceptQueueSize Maximum allowed number of open connections SW_RECEIVER_AWS_FIREHOSE_HTTP_ACCEPT_QUEUE_SIZE 0   - - maxRequestHeaderSize Maximum length of all headers in an HTTP/1 response SW_RECEIVER_AWS_FIREHOSE_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - firehoseAccessKey The AccessKey of AWS firhose SW_RECEIVER_AWS_FIREHOSE_ACCESS_KEY    - - enableTLS Indicate if enable HTTPS for the server SW_RECEIVER_AWS_FIREHOSE_HTTP_ENABLE_TLS false   - - tlsKeyPath TLS key path SW_RECEIVER_AWS_FIREHOSE_HTTP_TLS_KEY_PATH    - - tlsCertChainPath TLS certificate chain path SW_RECEIVER_AWS_FIREHOSE_HTTP_TLS_CERT_CHAIN_PATH    ai-pipeline default       - - uriRecognitionServerAddr The address of the URI recognition server. SW_AI_PIPELINE_URI_RECOGNITION_SERVER_ADDR -   - - uriRecognitionServerPort The port of the URI recognition server. SW_AI_PIPELINE_URI_RECOGNITION_SERVER_PORT 17128    Note ¹ System Environment Variable name could be declared and changed in application.yml. The names listed here are simply provided in the default application.yml file.\n","excerpt":"Configuration Vocabulary The Configuration Vocabulary lists all available configurations provided by …","ref":"/docs/main/next/en/setup/backend/configuration-vocabulary/","title":"Configuration Vocabulary"},{"body":"Configuration Vocabulary The Configuration Vocabulary lists all available configurations provided by application.yml.\n   Module Provider Settings Value(s) and Explanation System Environment Variable¹ Default     core default role Option values: Mixed/Receiver/Aggregator. Receiver mode OAP opens the service to the agents, then analyzes and aggregates the results, and forwards the results for distributed aggregation. Aggregator mode OAP receives data from Mixer and Receiver role OAP nodes, and performs 2nd level aggregation. Mixer means both Receiver and Aggregator. SW_CORE_ROLE Mixed   - - restHost Binding IP of RESTful services. Services include GraphQL query and HTTP data report. SW_CORE_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_CORE_REST_PORT 12800   - - restContextPath Web context path of RESTful services. SW_CORE_REST_CONTEXT_PATH /   - - restMinThreads Minimum thread number of RESTful services. SW_CORE_REST_JETTY_MIN_THREADS 1   - - restMaxThreads Maximum thread number of RESTful services. SW_CORE_REST_JETTY_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_CORE_REST_JETTY_IDLE_TIMEOUT 30000   - - restAcceptQueueSize ServerSocketChannel Backlog of RESTful services. SW_CORE_REST_JETTY_QUEUE_SIZE 0   - - httpMaxRequestHeaderSize Maximum request header size accepted. SW_CORE_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - gRPCHost Binding IP of gRPC services, including gRPC data report and internal communication among OAP nodes. SW_CORE_GRPC_HOST 0.0.0.0   - - gRPCPort Binding port of gRPC services. SW_CORE_GRPC_PORT 11800   - - gRPCSslEnabled Activates SSL for gRPC services. SW_CORE_GRPC_SSL_ENABLED false   - - gRPCSslKeyPath File path of gRPC SSL key. SW_CORE_GRPC_SSL_KEY_PATH -   - - gRPCSslCertChainPath File path of gRPC SSL cert chain. SW_CORE_GRPC_SSL_CERT_CHAIN_PATH -   - - gRPCSslTrustedCAPath File path of gRPC trusted CA. SW_CORE_GRPC_SSL_TRUSTED_CA_PATH -   - - downsampling Activated level of down sampling aggregation.  Hour,Day   - - persistentPeriod Execution period of the persistent timer (in seconds).  25   - - enableDataKeeperExecutor Controller of TTL scheduler. Once disabled, TTL wouldn\u0026rsquo;t work. SW_CORE_ENABLE_DATA_KEEPER_EXECUTOR true   - - dataKeeperExecutePeriod Execution period of TTL scheduler (in minutes). Execution doesn\u0026rsquo;t mean deleting data. The storage provider (e.g. ElasticSearch storage) could override this. SW_CORE_DATA_KEEPER_EXECUTE_PERIOD 5   - - recordDataTTL The lifecycle of record data (in days). Record data includes traces, top N sample records, and logs. Minimum value is 2. SW_CORE_RECORD_DATA_TTL 3   - - metricsDataTTL The lifecycle of metrics data (in days), including metadata. We recommend setting metricsDataTTL \u0026gt;= recordDataTTL. Minimum value is 2. SW_CORE_METRICS_DATA_TTL 7   - - l1FlushPeriod The period of L1 aggregation flush to L2 aggregation (in milliseconds). SW_CORE_L1_AGGREGATION_FLUSH_PERIOD 500   - - storageSessionTimeout The threshold of session time (in milliseconds). Default value is 70000. SW_CORE_STORAGE_SESSION_TIMEOUT 70000   - - persistentPeriod The period of doing data persistence. Unit is second.Default value is 25s SW_CORE_PERSISTENT_PERIOD 25   - - enableDatabaseSession Cache metrics data for 1 minute to reduce database queries, and if the OAP cluster changes within that minute. SW_CORE_ENABLE_DATABASE_SESSION true   - - topNReportPeriod The execution period (in minutes) of top N sampler, which saves sampled data into the storage. SW_CORE_TOPN_REPORT_PERIOD 10   - - activeExtraModelColumns Appends entity names (e.g. service names) into metrics storage entities. SW_CORE_ACTIVE_EXTRA_MODEL_COLUMNS false   - - serviceNameMaxLength Maximum length limit of service names. SW_SERVICE_NAME_MAX_LENGTH 70   - - instanceNameMaxLength Maximum length limit of service instance names. The maximum length of service + instance names should be less than 200. SW_INSTANCE_NAME_MAX_LENGTH 70   - - endpointNameMaxLength Maximum length limit of endpoint names. The maximum length of service + endpoint names should be less than 240. SW_ENDPOINT_NAME_MAX_LENGTH 150   - - searchableTracesTags Defines a set of span tag keys which are searchable through GraphQL. Multiple values are separated by commas. SW_SEARCHABLE_TAG_KEYS http.method,status_code,db.type,db.instance,mq.queue,mq.topic,mq.broker   - - searchableLogsTags Defines a set of log tag keys which are searchable through GraphQL. Multiple values are separated by commas. SW_SEARCHABLE_LOGS_TAG_KEYS level   - - searchableAlarmTags Defines a set of alarm tag keys which are searchable through GraphQL. Multiple values are separated by commas. SW_SEARCHABLE_ALARM_TAG_KEYS level   - - gRPCThreadPoolSize Pool size of gRPC server. SW_CORE_GRPC_THREAD_POOL_SIZE CPU core * 4   - - gRPCThreadPoolQueueSize Queue size of gRPC server. SW_CORE_GRPC_POOL_QUEUE_SIZE 10000   - - maxConcurrentCallsPerConnection The maximum number of concurrent calls permitted for each incoming connection. Defaults to no limit. SW_CORE_GRPC_MAX_CONCURRENT_CALL -   - - maxMessageSize Sets the maximum message size allowed to be received on the server. Empty means 4 MiB. SW_CORE_GRPC_MAX_MESSAGE_SIZE 4M(based on Netty)   - - remoteTimeout Timeout for cluster internal communication (in seconds). - 20   - - maxSizeOfNetworkAddressAlias The maximum size of network address detected in the system being monitored. - 1_000_000   - - maxPageSizeOfQueryProfileSnapshot The maximum size for snapshot analysis in an OAP query. - 500   - - maxSizeOfAnalyzeProfileSnapshot The maximum number of snapshots analyzed by the OAP. - 12000   - - prepareThreads The number of threads used to prepare metrics data to the storage. SW_CORE_PREPARE_THREADS 2   - - enableEndpointNameGroupingByOpenapi Automatically groups endpoints by the given OpenAPI definitions. SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPAENAPI true   - - maxDurationOfAnalyzeEBPFProfiling The maximum duration(in minute) of analyze the eBPF profiling data. - 10   - - maxDurationOfQueryEBPFProfilingData The maximum duration(in second) of query the eBPF profiling data from database. - 30   - - maxThreadCountOfQueryEBPFProfilingData The maximum thread count of query the eBPF profiling data from database. - System CPU core size   cluster standalone - Standalone is not suitable for running on a single node running. No configuration available. - -   - zookeeper namespace The namespace, represented by root path, isolates the configurations in Zookeeper. SW_NAMESPACE /, root path   - - hostPort Hosts and ports of Zookeeper Cluster. SW_CLUSTER_ZK_HOST_PORT localhost:2181   - - baseSleepTimeMs The period of Zookeeper client between two retries (in milliseconds). SW_CLUSTER_ZK_SLEEP_TIME 1000   - - maxRetries The maximum retry time. SW_CLUSTER_ZK_MAX_RETRIES 3   - - enableACL Opens ACL using schema and expression. SW_ZK_ENABLE_ACL false   - - schema Schema for the authorization. SW_ZK_SCHEMA digest   - - expression Expression for the authorization. SW_ZK_EXPRESSION skywalking:skywalking   - - internalComHost The hostname registered in Zookeeper for the internal communication of OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Zookeeper for the internal communication of OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - kubernetes namespace Namespace deployed by SkyWalking in k8s. SW_CLUSTER_K8S_NAMESPACE default   - - labelSelector Labels used for filtering OAP deployment in k8s. SW_CLUSTER_K8S_LABEL app=collector,release=skywalking   - - uidEnvName Environment variable name for reading uid. SW_CLUSTER_K8S_UID SKYWALKING_COLLECTOR_UID   - consul serviceName Service name for SkyWalking cluster. SW_SERVICE_NAME SkyWalking_OAP_Cluster   - - hostPort Hosts and ports for Consul cluster. SW_CLUSTER_CONSUL_HOST_PORT localhost:8500   - - aclToken ACL Token of Consul. Empty string means without ALC token. SW_CLUSTER_CONSUL_ACLTOKEN -   - - internalComHost The hostname registered in Consul for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Consul for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - etcd serviceName Service name for SkyWalking cluster. SW_CLUSTER_ETCD_SERVICE_NAME SkyWalking_OAP_Cluster   - - endpoints Hosts and ports for etcd cluster. SW_CLUSTER_ETCD_ENDPOINTS localhost:2379   - - namespace Namespace for SkyWalking cluster. SW_CLUSTER_ETCD_NAMESPACE /skywalking   - - authentication Indicates whether there is authentication. SW_CLUSTER_ETCD_AUTHENTICATION false   - - user Etcd auth username. SW_CLUSTER_ETCD_USER    - - password Etcd auth password. SW_CLUSTER_ETCD_PASSWORD    - - internalComHost The hostname registered in etcd for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in etcd for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - Nacos serviceName Service name for SkyWalking cluster. SW_SERVICE_NAME SkyWalking_OAP_Cluster   - - hostPort Hosts and ports for Nacos cluster. SW_CLUSTER_NACOS_HOST_PORT localhost:8848   - - namespace Namespace used by SkyWalking node coordination. SW_CLUSTER_NACOS_NAMESPACE public   - - internalComHost The hostname registered in Nacos for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Nacos for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - - username Nacos Auth username. SW_CLUSTER_NACOS_USERNAME -   - - password Nacos Auth password. SW_CLUSTER_NACOS_PASSWORD -   - - accessKey Nacos Auth accessKey. SW_CLUSTER_NACOS_ACCESSKEY -   - - secretKey Nacos Auth secretKey. SW_CLUSTER_NACOS_SECRETKEY -   storage elasticsearch - ElasticSearch (and OpenSearch) storage implementation. - -   - - namespace Prefix of indexes created and used by SkyWalking. SW_NAMESPACE -   - - clusterNodes ElasticSearch cluster nodes for client connection. SW_STORAGE_ES_CLUSTER_NODES localhost   - - protocol HTTP or HTTPs. SW_STORAGE_ES_HTTP_PROTOCOL HTTP   - - connectTimeout Connect timeout of ElasticSearch client (in milliseconds). SW_STORAGE_ES_CONNECT_TIMEOUT 3000   - - socketTimeout Socket timeout of ElasticSearch client (in milliseconds). SW_STORAGE_ES_SOCKET_TIMEOUT 30000   - - responseTimeout Response timeout of ElasticSearch client (in milliseconds), 0 disables the timeout. SW_STORAGE_ES_RESPONSE_TIMEOUT 1500   - - numHttpClientThread The number of threads for the underlying HTTP client to perform socket I/O. If the value is \u0026lt;= 0, the number of available processors will be used. SW_STORAGE_ES_NUM_HTTP_CLIENT_THREAD 0   - - user Username of ElasticSearch cluster. SW_ES_USER -   - - password Password of ElasticSearch cluster. SW_ES_PASSWORD -   - - trustStorePath Trust JKS file path. Only works when username and password are enabled. SW_STORAGE_ES_SSL_JKS_PATH -   - - trustStorePass Trust JKS file password. Only works when username and password are enabled. SW_STORAGE_ES_SSL_JKS_PASS -   - - secretsManagementFile Secrets management file in the properties format, including username and password, which are managed by a 3rd party tool. Capable of being updated them at runtime. SW_ES_SECRETS_MANAGEMENT_FILE -   - - dayStep Represents the number of days in the one-minute/hour/day index. SW_STORAGE_DAY_STEP 1   - - indexShardsNumber Shard number of new indexes. SW_STORAGE_ES_INDEX_SHARDS_NUMBER 1   - - indexReplicasNumber Replicas number of new indexes. SW_STORAGE_ES_INDEX_REPLICAS_NUMBER 0   - - superDatasetDayStep Represents the number of days in the super size dataset record index. Default value is the same as dayStep when the value is less than 0. SW_SUPERDATASET_STORAGE_DAY_STEP -1   - - superDatasetIndexShardsFactor Super dataset is defined in the code (e.g. trace segments). This factor provides more shards for the super dataset: shards number = indexShardsNumber * superDatasetIndexShardsFactor. This factor also affects Zipkin and Jaeger traces. SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR 5   - - superDatasetIndexReplicasNumber Represents the replicas number in the super size dataset record index. SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER 0   - - indexTemplateOrder The order of index template. SW_STORAGE_ES_INDEX_TEMPLATE_ORDER 0   - - bulkActions Async bulk size of the record data batch execution. SW_STORAGE_ES_BULK_ACTIONS 5000   - - flushInterval Period of flush (in seconds). Does not matter whether bulkActions is reached or not. INT(flushInterval * 2/3) is used for index refresh period. SW_STORAGE_ES_FLUSH_INTERVAL 15 (index refresh period = 10)   - - concurrentRequests The number of concurrent requests allowed to be executed. SW_STORAGE_ES_CONCURRENT_REQUESTS 2   - - resultWindowMaxSize The maximum size of dataset when the OAP loads cache, such as network aliases. SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE 10000   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_ES_QUERY_MAX_SIZE 10000   - - scrollingBatchSize The batch size of metadata per iteration when metadataQueryMaxSize or resultWindowMaxSize is too large to be retrieved in a single query. SW_STORAGE_ES_SCROLLING_BATCH_SIZE 5000   - - segmentQueryMaxSize The maximum size of trace segments per query. SW_STORAGE_ES_QUERY_SEGMENT_SIZE 200   - - profileTaskQueryMaxSize The maximum size of profile task per query. SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE 200   - - advanced All settings of ElasticSearch index creation. The value should be in JSON format. SW_STORAGE_ES_ADVANCED -   - h2 - H2 storage is designed for demonstration and running in short term (i.e. 1-2 hours) only. - -   - - driver H2 JDBC driver. SW_STORAGE_H2_DRIVER org.h2.jdbcx.JdbcDataSource   - - url H2 connection URL. Defaults to H2 memory mode. SW_STORAGE_H2_URL jdbc:h2:mem:skywalking-oap-db   - - user Username of H2 database. SW_STORAGE_H2_USER sa   - - password Password of H2 database. - -   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_H2_QUERY_MAX_SIZE 5000   - - maxSizeOfArrayColumn Some entities (e.g. trace segments) include the logic column with multiple values. In H2, we use multiple physical columns to host the values: e.g. change column_a with values [1,2,3,4,5] to column_a_0 = 1, column_a_1 = 2, column_a_2 = 3 , column_a_3 = 4, column_a_4 = 5. SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN 20   - - numOfSearchableValuesPerTag In a trace segment, this includes multiple spans with multiple tags. Different spans may have the same tag key, e.g. multiple HTTP exit spans all have their own http.method tags. This configuration sets the limit on the maximum number of values for the same tag key. SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG 2   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 100   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 1   - mysql - MySQL Storage. The MySQL JDBC Driver is not in the dist. Please copy it into the oap-lib folder manually. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfArrayColumn Some entities (e.g. trace segments) include the logic column with multiple values. In MySQL, we use multiple physical columns to host the values, e.g. change column_a with values [1,2,3,4,5] to column_a_0 = 1, column_a_1 = 2, column_a_2 = 3 , column_a_3 = 4, column_a_4 = 5. SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN 20   - - numOfSearchableValuesPerTag In a trace segment, this includes multiple spans with multiple tags. Different spans may have same tag key, e.g. multiple HTTP exit spans all have their own http.method tags. This configuration sets the limit on the maximum number of values for the same tag key. SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG 2   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - postgresql - PostgreSQL storage. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfArrayColumn Some entities (e.g. trace segments) include the logic column with multiple values. In PostgreSQL, we use multiple physical columns to host the values, e.g. change column_a with values [1,2,3,4,5] to column_a_0 = 1, column_a_1 = 2, column_a_2 = 3 , column_a_3 = 4, column_a_4 = 5 SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN 20   - - numOfSearchableValuesPerTag In a trace segment, this includes multiple spans with multiple tags. Different spans may have same tag key, e.g. multiple HTTP exit spans all have their own http.method tags. This configuration sets the limit on the maximum number of values for the same tag key. SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG 2   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - influxdb - InfluxDB storage. - -   - - url InfluxDB connection URL. SW_STORAGE_INFLUXDB_URL http://localhost:8086   - - user User name of InfluxDB. SW_STORAGE_INFLUXDB_USER root   - - password Password of InfluxDB. SW_STORAGE_INFLUXDB_PASSWORD -   - - database Database of InfluxDB. SW_STORAGE_INFLUXDB_DATABASE skywalking   - - actions The number of actions to collect. SW_STORAGE_INFLUXDB_ACTIONS 1000   - - duration The maximum waiting time (in milliseconds). SW_STORAGE_INFLUXDB_DURATION 1000   - - batchEnabled If true, write points with batch API. SW_STORAGE_INFLUXDB_BATCH_ENABLED true   - - fetchTaskLogMaxSize The maximum number of fetch task log in a request. SW_STORAGE_INFLUXDB_FETCH_TASK_LOG_MAX_SIZE 5000   - - connectionResponseFormat The response format of connection to influxDB. It can only be MSGPACK or JSON. SW_STORAGE_INFLUXDB_CONNECTION_RESPONSE_FORMAT MSGPACK   - iotdb - IoTDB storage. - -   - - host The host of IoTDB server. SW_STORAGE_IOTDB_HOST 127.0.0.1   - - rpcPort The port listened by IoTDB server. SW_STORAGE_IOTDB_RPC_PORT 6667   - - username The username of IoTDB SW_STORAGE_IOTDB_USERNAME root   - - password The password of IoTDB SW_STORAGE_IOTDB_PASSWORD root   - - storageGroup The path of Storage Group and it must start with root. SW_STORAGE_IOTDB_STORAGE_GROUP root.skywalking   - - sessionPoolSize The connection pool size for IoTDB. If the value is 0, the size of SessionPool will be 2 * CPU_Cores SW_STORAGE_IOTDB_SESSIONPOOL_SIZE 8   - - fetchTaskLogMaxSize the max number of fetch task log in a request SW_STORAGE_IOTDB_FETCH_TASK_LOG_MAX_SIZE 1000   agent-analyzer default Agent Analyzer. SW_AGENT_ANALYZER default    - - traceSamplingPolicySettingsFile The sampling policy including sampling rate and the threshold of trace segment latency can be configured by the traceSamplingPolicySettingsFile file. SW_TRACE_SAMPLING_POLICY_SETTINGS_FILE trace-sampling-policy-settings.yml   - - slowDBAccessThreshold The slow database access threshold (in milliseconds). SW_SLOW_DB_THRESHOLD default:200,mongodb:100   - - forceSampleErrorSegment When sampling mechanism is activated, this config samples the error status segment and ignores the sampling rate. SW_FORCE_SAMPLE_ERROR_SEGMENT true   - - segmentStatusAnalysisStrategy Determines the final segment status from span status. Available values are FROM_SPAN_STATUS , FROM_ENTRY_SPAN, and FROM_FIRST_SPAN. FROM_SPAN_STATUS indicates that the segment status would be error if any span has an error status. FROM_ENTRY_SPAN means that the segment status would only be determined by the status of entry spans. FROM_FIRST_SPAN means that the segment status would only be determined by the status of the first span. SW_SEGMENT_STATUS_ANALYSIS_STRATEGY FROM_SPAN_STATUS   - - noUpstreamRealAddressAgents Exit spans with the component in the list would not generate client-side instance relation metrics, since some tracing plugins (e.g. Nginx-LUA and Envoy) can\u0026rsquo;t collect the real peer IP address. SW_NO_UPSTREAM_REAL_ADDRESS 6000,9000   - - meterAnalyzerActiveFiles Indicates which files could be instrumented and analyzed. Multiple files are split by \u0026ldquo;,\u0026rdquo;. SW_METER_ANALYZER_ACTIVE_FILES    receiver-sharing-server default Sharing server provides new gRPC and restful servers for data collection. Ana designates that servers in the core module are to be used for internal communication only. - -    - - restHost Binding IP of RESTful services. Services include GraphQL query and HTTP data report. SW_RECEIVER_SHARING_REST_HOST -   - - restPort Binding port of RESTful services. SW_RECEIVER_SHARING_REST_PORT -   - - restContextPath Web context path of RESTful services. SW_RECEIVER_SHARING_REST_CONTEXT_PATH -   - - restMinThreads Minimum thread number of RESTful services. SW_RECEIVER_SHARING_JETTY_MIN_THREADS 1   - - restMaxThreads Maximum thread number of RESTful services. SW_RECEIVER_SHARING_JETTY_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_RECEIVER_SHARING_JETTY_IDLE_TIMEOUT 30000   - - restAcceptQueueSize ServerSocketChannel backlog of RESTful services. SW_RECEIVER_SHARING_JETTY_QUEUE_SIZE 0   - - httpMaxRequestHeaderSize Maximum request header size accepted. SW_RECEIVER_SHARING_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - gRPCHost Binding IP of gRPC services. Services include gRPC data report and internal communication among OAP nodes. SW_RECEIVER_GRPC_HOST 0.0.0.0. Not Activated   - - gRPCPort Binding port of gRPC services. SW_RECEIVER_GRPC_PORT Not Activated   - - gRPCThreadPoolSize Pool size of gRPC server. SW_RECEIVER_GRPC_THREAD_POOL_SIZE CPU core * 4   - - gRPCThreadPoolQueueSize Queue size of gRPC server. SW_RECEIVER_GRPC_POOL_QUEUE_SIZE 10000   - - gRPCSslEnabled Activates SSL for gRPC services. SW_RECEIVER_GRPC_SSL_ENABLED false   - - gRPCSslKeyPath File path of gRPC SSL key. SW_RECEIVER_GRPC_SSL_KEY_PATH -   - - gRPCSslCertChainPath File path of gRPC SSL cert chain. SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH -   - - maxConcurrentCallsPerConnection The maximum number of concurrent calls permitted for each incoming connection. Defaults to no limit. SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL -   - - authentication The token text for authentication. Works for gRPC connection only. Once this is set, the client is required to use the same token. SW_AUTHENTICATION -   log-analyzer default Log Analyzer. SW_LOG_ANALYZER default    - - lalFiles The LAL configuration file names (without file extension) to be activated. Read LAL for more details. SW_LOG_LAL_FILES default   - - malFiles The MAL configuration file names (without file extension) to be activated. Read LAL for more details. SW_LOG_MAL_FILES \u0026quot;\u0026quot;   event-analyzer default Event Analyzer. SW_EVENT_ANALYZER default    receiver-register default gRPC and HTTPRestful services that provide service, service instance and endpoint register. - -    receiver-trace default gRPC and HTTPRestful services that accept SkyWalking format traces. - -    receiver-jvm default gRPC services that accept JVM metrics data. - -    receiver-clr default gRPC services that accept .Net CLR metrics data. - -    receiver-profile default gRPC services that accept profile task status and snapshot reporter. - -    receiver-zabbix default TCP receiver accepts Zabbix format metrics. - -    - - port Exported TCP port. Zabbix agent could connect and transport data. SW_RECEIVER_ZABBIX_PORT 10051   - - host Binds to host. SW_RECEIVER_ZABBIX_HOST 0.0.0.0   - - activeFiles Enables config when agent request is received. SW_RECEIVER_ZABBIX_ACTIVE_FILES agent   service-mesh default gRPC services that accept data from inbound mesh probes. - -    envoy-metric default Envoy metrics_service and ALS(access log service) are supported by this receiver. The OAL script supports all GAUGE type metrics. - -    - - acceptMetricsService Starts Envoy Metrics Service analysis. SW_ENVOY_METRIC_SERVICE true   - - alsHTTPAnalysis Starts Envoy HTTP Access Log Service analysis. Value = k8s-mesh means starting the analysis. SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS -   - - alsTCPAnalysis Starts Envoy TCP Access Log Service analysis. Value = k8s-mesh means starting the analysis. SW_ENVOY_METRIC_ALS_TCP_ANALYSIS -   - - k8sServiceNameRule k8sServiceNameRule allows you to customize the service name in ALS via Kubernetes metadata. The available variables are pod and service. E.g. you can use ${service.metadata.name}-${pod.metadata.labels.version} to append the version number to the service name. Note that when using environment variables to pass this configuration, use single quotes('') to avoid being evaluated by the shell. -    receiver-otel default A receiver for analyzing metrics data from OpenTelemetry. - -    - - enabledHandlers Enabled handlers for otel. SW_OTEL_RECEIVER_ENABLED_HANDLERS -   - - enabledOcRules Enabled metric rules for OC handler. SW_OTEL_RECEIVER_ENABLED_OC_RULES -   receiver-zipkin default A receiver for Zipkin traces. - -    - - restHost Binding IP of RESTful services. SW_RECEIVER_ZIPKIN_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_RECEIVER_ZIPKIN_PORT 9411   - - restContextPath Web context path of RESTful services. SW_RECEIVER_ZIPKIN_CONTEXT_PATH /   prometheus-fetcher default Prometheus fetcher reads metrics from Prometheus endpoint, and transfer the metrics into SkyWalking native format for the MAL engine. - -    - - enabledRules Enabled rules. SW_PROMETHEUS_FETCHER_ENABLED_RULES self   - - maxConvertWorker The maximize meter convert worker. SW_PROMETHEUS_FETCHER_NUM_CONVERT_WORKER -1(by default, half the number of CPU core(s))   kafka-fetcher default Read SkyWalking\u0026rsquo;s native metrics/logs/traces through Kafka server. - -    - - bootstrapServers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_KAFKA_FETCHER_SERVERS localhost:9092   - - namespace Namespace aims to isolate multi OAP cluster when using the same Kafka cluster. If you set a namespace for Kafka fetcher, OAP will add a prefix to topic name. You should also set namespace in agent.config. The property is named plugin.kafka.namespace. SW_NAMESPACE -   - - groupId A unique string that identifies the consumer group to which this consumer belongs. - skywalking-consumer   - - consumePartitions Indicates which PartitionId(s) of the topics is/are assigned to the OAP server. Separated by commas if multiple. SW_KAFKA_FETCHER_CONSUME_PARTITIONS -   - - isSharding True when OAP Server is in cluster. SW_KAFKA_FETCHER_IS_SHARDING false   - - createTopicIfNotExist If true, this creates Kafka topic (if it does not already exist). - true   - - partitions The number of partitions for the topic being created. SW_KAFKA_FETCHER_PARTITIONS 3   - - enableNativeProtoLog Enables fetching and handling native proto log data. SW_KAFKA_FETCHER_ENABLE_NATIVE_PROTO_LOG true   - - enableNativeJsonLog Enables fetching and handling native json log data. SW_KAFKA_FETCHER_ENABLE_NATIVE_JSON_LOG true   - - replicationFactor The replication factor for each partition in the topic being created. SW_KAFKA_FETCHER_PARTITIONS_FACTOR 2   - - kafkaHandlerThreadPoolSize Pool size of Kafka message handler executor. SW_KAFKA_HANDLER_THREAD_POOL_SIZE CPU core * 2   - - kafkaHandlerThreadPoolQueueSize Queue size of Kafka message handler executor. SW_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE 10000   - - topicNameOfMeters Kafka topic name for meter system data. - skywalking-meters   - - topicNameOfMetrics Kafka topic name for JVM metrics data. - skywalking-metrics   - - topicNameOfProfiling Kafka topic name for profiling data. - skywalking-profilings   - - topicNameOfTracingSegments Kafka topic name for tracing data. - skywalking-segments   - - topicNameOfManagements Kafka topic name for service instance reporting and registration. - skywalking-managements   - - topicNameOfLogs Kafka topic name for native proto log data. - skywalking-logs   - - topicNameOfJsonLogs Kafka topic name for native json log data. - skywalking-logs-json   receiver-browser default gRPC services that accept browser performance data and error log. - - -   - - sampleRate Sampling rate for receiving trace. Precise to 1/10000. 10000 means sampling rate of 100% by default. SW_RECEIVER_BROWSER_SAMPLE_RATE 10000   query graphql - GraphQL query implementation. -    - - enableLogTestTool Enable the log testing API to test the LAL. NOTE: This API evaluates untrusted code on the OAP server. A malicious script can do significant damage (steal keys and secrets, remove files and directories, install malware, etc). As such, please enable this API only when you completely trust your users. SW_QUERY_GRAPHQL_ENABLE_LOG_TEST_TOOL false   - - maxQueryComplexity Maximum complexity allowed for the GraphQL query that can be used to abort a query if the total number of data fields queried exceeds the defined threshold. SW_QUERY_MAX_QUERY_COMPLEXITY 100   - - enableUpdateUITemplate Allow user add,disable and update UI template. SW_ENABLE_UPDATE_UI_TEMPLATE false   alarm default - Read alarm doc for more details. -    telemetry - - Read telemetry doc for more details. -    - none - No op implementation. -    - prometheus host Binding host for Prometheus server fetching data. SW_TELEMETRY_PROMETHEUS_HOST 0.0.0.0   - - port Binding port for Prometheus server fetching data. SW_TELEMETRY_PROMETHEUS_PORT 1234   configuration - - Read dynamic configuration doc for more details. -    - grpc host DCS server binding hostname. SW_DCS_SERVER_HOST -   - - port DCS server binding port. SW_DCS_SERVER_PORT 80   - - clusterName Cluster name when reading the latest configuration from DSC server. SW_DCS_CLUSTER_NAME SkyWalking   - - period The period of reading data from DSC server by the OAP (in seconds). SW_DCS_PERIOD 20   - apollo apolloMeta apollo.meta in Apollo. SW_CONFIG_APOLLO http://localhost:8080   - - apolloCluster apollo.cluster in Apollo. SW_CONFIG_APOLLO_CLUSTER default   - - apolloEnv env in Apollo. SW_CONFIG_APOLLO_ENV -   - - appId app.id in Apollo. SW_CONFIG_APOLLO_APP_ID skywalking   - - period The period of data sync (in seconds). SW_CONFIG_APOLLO_PERIOD 60   - zookeeper namespace The namespace (represented by root path) that isolates the configurations in the Zookeeper. SW_CONFIG_ZK_NAMESPACE /, root path   - - hostPort Hosts and ports of Zookeeper Cluster. SW_CONFIG_ZK_HOST_PORT localhost:2181   - - baseSleepTimeMs The period of Zookeeper client between two retries (in milliseconds). SW_CONFIG_ZK_BASE_SLEEP_TIME_MS 1000   - - maxRetries The maximum retry time. SW_CONFIG_ZK_MAX_RETRIES 3   - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - etcd endpoints Hosts and ports for etcd cluster (separated by commas if multiple). SW_CONFIG_ETCD_ENDPOINTS http://localhost:2379   - - namespace Namespace for SkyWalking cluster. SW_CONFIG_ETCD_NAMESPACE /skywalking   - - authentication Indicates whether there is authentication. SW_CONFIG_ETCD_AUTHENTICATION false   - - user Etcd auth username. SW_CONFIG_ETCD_USER    - - password Etcd auth password. SW_CONFIG_ETCD_PASSWORD    - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - consul hostPort Hosts and ports for Consul cluster. SW_CONFIG_CONSUL_HOST_AND_PORTS localhost:8500   - - aclToken ACL Token of Consul. Empty string means without ACL token. SW_CONFIG_CONSUL_ACL_TOKEN -   - - period The period of data sync (in seconds). SW_CONFIG_CONSUL_PERIOD 60   - k8s-configmap namespace Deployment namespace of the config map. SW_CLUSTER_K8S_NAMESPACE default   - - labelSelector Labels for locating configmap. SW_CLUSTER_K8S_LABEL app=collector,release=skywalking   - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - nacos serverAddr Nacos Server Host. SW_CONFIG_NACOS_SERVER_ADDR 127.0.0.1   - - port Nacos Server Port. SW_CONFIG_NACOS_SERVER_PORT 8848   - - group Nacos Configuration namespace. SW_CONFIG_NACOS_SERVER_NAMESPACE -   - - period The period of data sync (in seconds). SW_CONFIG_CONFIG_NACOS_PERIOD 60   - - username Nacos Auth username. SW_CONFIG_NACOS_USERNAME -   - - password Nacos Auth password. SW_CONFIG_NACOS_PASSWORD -   - - accessKey Nacos Auth accessKey. SW_CONFIG_NACOS_ACCESSKEY -   - - secretKey Nacos Auth secretKey. SW_CONFIG_NACOS_SECRETKEY -   exporter grpc targetHost The host of target gRPC server for receiving export data. SW_EXPORTER_GRPC_HOST 127.0.0.1   - - targetPort The port of target gRPC server for receiving export data. SW_EXPORTER_GRPC_PORT 9870   health-checker default checkIntervalSeconds The period of checking OAP internal health status (in seconds). SW_HEALTH_CHECKER_INTERVAL_SECONDS 5   configuration-discovery default disableMessageDigest If true, agent receives the latest configuration every time, even without making any changes. By default, OAP uses the SHA512 message digest mechanism to detect changes in configuration. SW_DISABLE_MESSAGE_DIGEST false   receiver-event default gRPC services that handle events data. - -     Note ¹ System Environment Variable name could be declared and changed in application.yml. The names listed here are simply provided in the default application.yml file.\n","excerpt":"Configuration Vocabulary The Configuration Vocabulary lists all available configurations provided by …","ref":"/docs/main/v9.0.0/en/setup/backend/configuration-vocabulary/","title":"Configuration Vocabulary"},{"body":"Configuration Vocabulary The Configuration Vocabulary lists all available configurations provided by application.yml.\n   Module Provider Settings Value(s) and Explanation System Environment Variable¹ Default     core default role Option values: Mixed/Receiver/Aggregator. Receiver mode OAP opens the service to the agents, then analyzes and aggregates the results, and forwards the results for distributed aggregation. Aggregator mode OAP receives data from Mixer and Receiver role OAP nodes, and performs 2nd level aggregation. Mixer means both Receiver and Aggregator. SW_CORE_ROLE Mixed   - - restHost Binding IP of RESTful services. Services include GraphQL query and HTTP data report. SW_CORE_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_CORE_REST_PORT 12800   - - restContextPath Web context path of RESTful services. SW_CORE_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_CORE_REST_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_CORE_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize ServerSocketChannel Backlog of RESTful services. SW_CORE_REST_QUEUE_SIZE 0   - - httpMaxRequestHeaderSize Maximum request header size accepted. SW_CORE_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - gRPCHost Binding IP of gRPC services, including gRPC data report and internal communication among OAP nodes. SW_CORE_GRPC_HOST 0.0.0.0   - - gRPCPort Binding port of gRPC services. SW_CORE_GRPC_PORT 11800   - - gRPCSslEnabled Activates SSL for gRPC services. SW_CORE_GRPC_SSL_ENABLED false   - - gRPCSslKeyPath File path of gRPC SSL key. SW_CORE_GRPC_SSL_KEY_PATH -   - - gRPCSslCertChainPath File path of gRPC SSL cert chain. SW_CORE_GRPC_SSL_CERT_CHAIN_PATH -   - - gRPCSslTrustedCAPath File path of gRPC trusted CA. SW_CORE_GRPC_SSL_TRUSTED_CA_PATH -   - - downsampling Activated level of down sampling aggregation.  Hour,Day   - - persistentPeriod Execution period of the persistent timer (in seconds).  25   - - enableDataKeeperExecutor Controller of TTL scheduler. Once disabled, TTL wouldn\u0026rsquo;t work. SW_CORE_ENABLE_DATA_KEEPER_EXECUTOR true   - - dataKeeperExecutePeriod Execution period of TTL scheduler (in minutes). Execution doesn\u0026rsquo;t mean deleting data. The storage provider (e.g. ElasticSearch storage) could override this. SW_CORE_DATA_KEEPER_EXECUTE_PERIOD 5   - - recordDataTTL The lifecycle of record data (in days). Record data includes traces, top N sample records, and logs. Minimum value is 2. SW_CORE_RECORD_DATA_TTL 3   - - metricsDataTTL The lifecycle of metrics data (in days), including metadata. We recommend setting metricsDataTTL \u0026gt;= recordDataTTL. Minimum value is 2. SW_CORE_METRICS_DATA_TTL 7   - - l1FlushPeriod The period of L1 aggregation flush to L2 aggregation (in milliseconds). SW_CORE_L1_AGGREGATION_FLUSH_PERIOD 500   - - storageSessionTimeout The threshold of session time (in milliseconds). Default value is 70000. SW_CORE_STORAGE_SESSION_TIMEOUT 70000   - - persistentPeriod The period of doing data persistence. Unit is second.Default value is 25s SW_CORE_PERSISTENT_PERIOD 25   - - enableDatabaseSession Cache metrics data for 1 minute to reduce database queries, and if the OAP cluster changes within that minute. SW_CORE_ENABLE_DATABASE_SESSION true   - - topNReportPeriod The execution period (in minutes) of top N sampler, which saves sampled data into the storage. SW_CORE_TOPN_REPORT_PERIOD 10   - - activeExtraModelColumns Appends entity names (e.g. service names) into metrics storage entities. SW_CORE_ACTIVE_EXTRA_MODEL_COLUMNS false   - - serviceNameMaxLength Maximum length limit of service names. SW_SERVICE_NAME_MAX_LENGTH 70   - - instanceNameMaxLength Maximum length limit of service instance names. The maximum length of service + instance names should be less than 200. SW_INSTANCE_NAME_MAX_LENGTH 70   - - endpointNameMaxLength Maximum length limit of endpoint names. The maximum length of service + endpoint names should be less than 240. SW_ENDPOINT_NAME_MAX_LENGTH 150   - - searchableTracesTags Defines a set of span tag keys which are searchable through GraphQL. Multiple values are separated by commas. SW_SEARCHABLE_TAG_KEYS http.method,http.status_code,rpc.status_code,db.type,db.instance,mq.queue,mq.topic,mq.broker   - - searchableLogsTags Defines a set of log tag keys which are searchable through GraphQL. Multiple values are separated by commas. SW_SEARCHABLE_LOGS_TAG_KEYS level   - - searchableAlarmTags Defines a set of alarm tag keys which are searchable through GraphQL. Multiple values are separated by commas. SW_SEARCHABLE_ALARM_TAG_KEYS level   - - autocompleteTagKeysQueryMaxSize The max size of tags keys for autocomplete select. SW_AUTOCOMPLETE_TAG_KEYS_QUERY_MAX_SIZE 100   - - autocompleteTagValuesQueryMaxSize The max size of tags values for autocomplete select. SW_AUTOCOMPLETE_TAG_VALUES_QUERY_MAX_SIZE 100   - - gRPCThreadPoolSize Pool size of gRPC server. SW_CORE_GRPC_THREAD_POOL_SIZE CPU core * 4   - - gRPCThreadPoolQueueSize Queue size of gRPC server. SW_CORE_GRPC_POOL_QUEUE_SIZE 10000   - - maxConcurrentCallsPerConnection The maximum number of concurrent calls permitted for each incoming connection. Defaults to no limit. SW_CORE_GRPC_MAX_CONCURRENT_CALL -   - - maxMessageSize Sets the maximum message size allowed to be received on the server. Empty means 4 MiB. SW_CORE_GRPC_MAX_MESSAGE_SIZE 4M(based on Netty)   - - remoteTimeout Timeout for cluster internal communication (in seconds). - 20   - - maxSizeOfNetworkAddressAlias The maximum size of network address detected in the system being monitored. - 1_000_000   - - maxPageSizeOfQueryProfileSnapshot The maximum size for snapshot analysis in an OAP query. - 500   - - maxSizeOfAnalyzeProfileSnapshot The maximum number of snapshots analyzed by the OAP. - 12000   - - prepareThreads The number of threads used to prepare metrics data to the storage. SW_CORE_PREPARE_THREADS 2   - - enableEndpointNameGroupingByOpenapi Automatically groups endpoints by the given OpenAPI definitions. SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPAENAPI true   - - maxDurationOfQueryEBPFProfilingData The maximum duration(in second) of query the eBPF profiling data from database. - 30   - - maxThreadCountOfQueryEBPFProfilingData The maximum thread count of query the eBPF profiling data from database. - System CPU core size   cluster standalone - Standalone is not suitable for running on a single node running. No configuration available. - -   - zookeeper namespace The namespace, represented by root path, isolates the configurations in Zookeeper. SW_NAMESPACE /, root path   - - hostPort Hosts and ports of Zookeeper Cluster. SW_CLUSTER_ZK_HOST_PORT localhost:2181   - - baseSleepTimeMs The period of Zookeeper client between two retries (in milliseconds). SW_CLUSTER_ZK_SLEEP_TIME 1000   - - maxRetries The maximum retry time. SW_CLUSTER_ZK_MAX_RETRIES 3   - - enableACL Opens ACL using schema and expression. SW_ZK_ENABLE_ACL false   - - schema Schema for the authorization. SW_ZK_SCHEMA digest   - - expression Expression for the authorization. SW_ZK_EXPRESSION skywalking:skywalking   - - internalComHost The hostname registered in Zookeeper for the internal communication of OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Zookeeper for the internal communication of OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - kubernetes namespace Namespace deployed by SkyWalking in k8s. SW_CLUSTER_K8S_NAMESPACE default   - - labelSelector Labels used for filtering OAP deployment in k8s. SW_CLUSTER_K8S_LABEL app=collector,release=skywalking   - - uidEnvName Environment variable name for reading uid. SW_CLUSTER_K8S_UID SKYWALKING_COLLECTOR_UID   - consul serviceName Service name for SkyWalking cluster. SW_SERVICE_NAME SkyWalking_OAP_Cluster   - - hostPort Hosts and ports for Consul cluster. SW_CLUSTER_CONSUL_HOST_PORT localhost:8500   - - aclToken ACL Token of Consul. Empty string means without ALC token. SW_CLUSTER_CONSUL_ACLTOKEN -   - - internalComHost The hostname registered in Consul for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Consul for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - etcd serviceName Service name for SkyWalking cluster. SW_CLUSTER_ETCD_SERVICE_NAME SkyWalking_OAP_Cluster   - - endpoints Hosts and ports for etcd cluster. SW_CLUSTER_ETCD_ENDPOINTS localhost:2379   - - namespace Namespace for SkyWalking cluster. SW_CLUSTER_ETCD_NAMESPACE /skywalking   - - authentication Indicates whether there is authentication. SW_CLUSTER_ETCD_AUTHENTICATION false   - - user Etcd auth username. SW_CLUSTER_ETCD_USER    - - password Etcd auth password. SW_CLUSTER_ETCD_PASSWORD    - - internalComHost The hostname registered in etcd for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in etcd for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - Nacos serviceName Service name for SkyWalking cluster. SW_SERVICE_NAME SkyWalking_OAP_Cluster   - - hostPort Hosts and ports for Nacos cluster. SW_CLUSTER_NACOS_HOST_PORT localhost:8848   - - namespace Namespace used by SkyWalking node coordination. SW_CLUSTER_NACOS_NAMESPACE public   - - internalComHost The hostname registered in Nacos for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Nacos for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - - username Nacos Auth username. SW_CLUSTER_NACOS_USERNAME -   - - password Nacos Auth password. SW_CLUSTER_NACOS_PASSWORD -   - - accessKey Nacos Auth accessKey. SW_CLUSTER_NACOS_ACCESSKEY -   - - secretKey Nacos Auth secretKey. SW_CLUSTER_NACOS_SECRETKEY -   storage elasticsearch - ElasticSearch (and OpenSearch) storage implementation. - -   - - namespace Prefix of indexes created and used by SkyWalking. SW_NAMESPACE -   - - clusterNodes ElasticSearch cluster nodes for client connection. SW_STORAGE_ES_CLUSTER_NODES localhost   - - protocol HTTP or HTTPs. SW_STORAGE_ES_HTTP_PROTOCOL HTTP   - - connectTimeout Connect timeout of ElasticSearch client (in milliseconds). SW_STORAGE_ES_CONNECT_TIMEOUT 3000   - - socketTimeout Socket timeout of ElasticSearch client (in milliseconds). SW_STORAGE_ES_SOCKET_TIMEOUT 30000   - - responseTimeout Response timeout of ElasticSearch client (in milliseconds), 0 disables the timeout. SW_STORAGE_ES_RESPONSE_TIMEOUT 1500   - - numHttpClientThread The number of threads for the underlying HTTP client to perform socket I/O. If the value is \u0026lt;= 0, the number of available processors will be used. SW_STORAGE_ES_NUM_HTTP_CLIENT_THREAD 0   - - user Username of ElasticSearch cluster. SW_ES_USER -   - - password Password of ElasticSearch cluster. SW_ES_PASSWORD -   - - trustStorePath Trust JKS file path. Only works when username and password are enabled. SW_STORAGE_ES_SSL_JKS_PATH -   - - trustStorePass Trust JKS file password. Only works when username and password are enabled. SW_STORAGE_ES_SSL_JKS_PASS -   - - secretsManagementFile Secrets management file in the properties format, including username and password, which are managed by a 3rd party tool. Capable of being updated them at runtime. SW_ES_SECRETS_MANAGEMENT_FILE -   - - dayStep Represents the number of days in the one-minute/hour/day index. SW_STORAGE_DAY_STEP 1   - - indexShardsNumber Shard number of new indexes. SW_STORAGE_ES_INDEX_SHARDS_NUMBER 1   - - indexReplicasNumber Replicas number of new indexes. SW_STORAGE_ES_INDEX_REPLICAS_NUMBER 0   - - superDatasetDayStep Represents the number of days in the super size dataset record index. Default value is the same as dayStep when the value is less than 0. SW_SUPERDATASET_STORAGE_DAY_STEP -1   - - superDatasetIndexShardsFactor Super dataset is defined in the code (e.g. trace segments). This factor provides more shards for the super dataset: shards number = indexShardsNumber * superDatasetIndexShardsFactor. This factor also affects Zipkin and Jaeger traces. SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR 5   - - superDatasetIndexReplicasNumber Represents the replicas number in the super size dataset record index. SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER 0   - - indexTemplateOrder The order of index template. SW_STORAGE_ES_INDEX_TEMPLATE_ORDER 0   - - bulkActions Async bulk size of the record data batch execution. SW_STORAGE_ES_BULK_ACTIONS 5000   - - flushInterval Period of flush (in seconds). Does not matter whether bulkActions is reached or not. INT(flushInterval * 2/3) is used for index refresh period. SW_STORAGE_ES_FLUSH_INTERVAL 15 (index refresh period = 10)   - - concurrentRequests The number of concurrent requests allowed to be executed. SW_STORAGE_ES_CONCURRENT_REQUESTS 2   - - resultWindowMaxSize The maximum size of dataset when the OAP loads cache, such as network aliases. SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE 10000   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_ES_QUERY_MAX_SIZE 10000   - - scrollingBatchSize The batch size of metadata per iteration when metadataQueryMaxSize or resultWindowMaxSize is too large to be retrieved in a single query. SW_STORAGE_ES_SCROLLING_BATCH_SIZE 5000   - - segmentQueryMaxSize The maximum size of trace segments per query. SW_STORAGE_ES_QUERY_SEGMENT_SIZE 200   - - profileTaskQueryMaxSize The maximum size of profile task per query. SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE 200   - - profileDataQueryScrollBatchSize The batch size of query profiling data. SW_STORAGE_ES_QUERY_PROFILE_DATA_BATCH_SIZE 100   - - advanced All settings of ElasticSearch index creation. The value should be in JSON format. SW_STORAGE_ES_ADVANCED -   - h2 - H2 storage is designed for demonstration and running in short term (i.e. 1-2 hours) only. - -   - - driver H2 JDBC driver. SW_STORAGE_H2_DRIVER org.h2.jdbcx.JdbcDataSource   - - url H2 connection URL. Defaults to H2 memory mode. SW_STORAGE_H2_URL jdbc:h2:mem:skywalking-oap-db   - - user Username of H2 database. SW_STORAGE_H2_USER sa   - - password Password of H2 database. - -   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_H2_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 100   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 1   - mysql - MySQL Storage. The MySQL JDBC Driver is not in the dist. Please copy it into the oap-lib folder manually. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfArrayColumn Some entities (e.g. trace segments) include the logic column with multiple values. In MySQL, we use multiple physical columns to host the values, e.g. change column_a with values [1,2,3,4,5] to column_a_0 = 1, column_a_1 = 2, column_a_2 = 3 , column_a_3 = 4, column_a_4 = 5. SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN 20   - - numOfSearchableValuesPerTag In a trace segment, this includes multiple spans with multiple tags. Different spans may have same tag key, e.g. multiple HTTP exit spans all have their own http.method tags. This configuration sets the limit on the maximum number of values for the same tag key. SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG 2   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - postgresql - PostgreSQL storage. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfArrayColumn Some entities (e.g. trace segments) include the logic column with multiple values. In PostgreSQL, we use multiple physical columns to host the values, e.g. change column_a with values [1,2,3,4,5] to column_a_0 = 1, column_a_1 = 2, column_a_2 = 3 , column_a_3 = 4, column_a_4 = 5 SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN 20   - - numOfSearchableValuesPerTag In a trace segment, this includes multiple spans with multiple tags. Different spans may have same tag key, e.g. multiple HTTP exit spans all have their own http.method tags. This configuration sets the limit on the maximum number of values for the same tag key. SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG 2   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - banyandb - BanyanDB storage. - -   - - host Host of the BanyanDB. SW_STORAGE_BANYANDB_HOST 127.0.0.1   - - port Port of the BanyanDB. SW_STORAGE_BANYANDB_PORT 17912   - - maxBulkSize The maximum size of write entities in a single batch write call. SW_STORAGE_BANYANDB_MAX_BULK_SIZE 5000   - - flushInterval Period of flush interval. In the timeunit of seconds. SW_STORAGE_BANYANDB_FLUSH_INTERVAL 15   - - metricsShardsNumber Shards Number for measure/metrics. SW_STORAGE_BANYANDB_METRICS_SHARDS_NUMBER 1   - - recordShardsNumber Shards Number for a normal record. SW_STORAGE_BANYANDB_RECORD_SHARDS_NUMBER 1   - - superDatasetShardsFactor Shards Factor for a super dataset record, i.e. Shard number of a super dataset is recordShardsNumber*superDatasetShardsFactor. SW_STORAGE_BANYANDB_SUPERDATASET_SHARDS_FACTOR 2   - - concurrentWriteThreads Concurrent consumer threads for batch writing. SW_STORAGE_BANYANDB_CONCURRENT_WRITE_THREADS 15   - - profileTaskQueryMaxSize Max size of ProfileTask to be fetched. SW_STORAGE_BANYANDB_PROFILE_TASK_QUERY_MAX_SIZE 200   agent-analyzer default Agent Analyzer. SW_AGENT_ANALYZER default    - - traceSamplingPolicySettingsFile The sampling policy including sampling rate and the threshold of trace segment latency can be configured by the traceSamplingPolicySettingsFile file. SW_TRACE_SAMPLING_POLICY_SETTINGS_FILE trace-sampling-policy-settings.yml   - - slowDBAccessThreshold The slow database access threshold (in milliseconds). SW_SLOW_DB_THRESHOLD default:200,mongodb:100   - - forceSampleErrorSegment When sampling mechanism is activated, this config samples the error status segment and ignores the sampling rate. SW_FORCE_SAMPLE_ERROR_SEGMENT true   - - segmentStatusAnalysisStrategy Determines the final segment status from span status. Available values are FROM_SPAN_STATUS , FROM_ENTRY_SPAN, and FROM_FIRST_SPAN. FROM_SPAN_STATUS indicates that the segment status would be error if any span has an error status. FROM_ENTRY_SPAN means that the segment status would only be determined by the status of entry spans. FROM_FIRST_SPAN means that the segment status would only be determined by the status of the first span. SW_SEGMENT_STATUS_ANALYSIS_STRATEGY FROM_SPAN_STATUS   - - noUpstreamRealAddressAgents Exit spans with the component in the list would not generate client-side instance relation metrics, since some tracing plugins (e.g. Nginx-LUA and Envoy) can\u0026rsquo;t collect the real peer IP address. SW_NO_UPSTREAM_REAL_ADDRESS 6000,9000   - - meterAnalyzerActiveFiles Indicates which files could be instrumented and analyzed. Multiple files are split by \u0026ldquo;,\u0026rdquo;. SW_METER_ANALYZER_ACTIVE_FILES    receiver-sharing-server default Sharing server provides new gRPC and restful servers for data collection. Ana designates that servers in the core module are to be used for internal communication only. - -    - - restHost Binding IP of RESTful services. Services include GraphQL query and HTTP data report. SW_RECEIVER_SHARING_REST_HOST -   - - restPort Binding port of RESTful services. SW_RECEIVER_SHARING_REST_PORT -   - - restContextPath Web context path of RESTful services. SW_RECEIVER_SHARING_REST_CONTEXT_PATH -   - - restMaxThreads Maximum thread number of RESTful services. SW_RECEIVER_SHARING_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_RECEIVER_SHARING_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize ServerSocketChannel backlog of RESTful services. SW_RECEIVER_SHARING_REST_QUEUE_SIZE 0   - - httpMaxRequestHeaderSize Maximum request header size accepted. SW_RECEIVER_SHARING_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - gRPCHost Binding IP of gRPC services. Services include gRPC data report and internal communication among OAP nodes. SW_RECEIVER_GRPC_HOST 0.0.0.0. Not Activated   - - gRPCPort Binding port of gRPC services. SW_RECEIVER_GRPC_PORT Not Activated   - - gRPCThreadPoolSize Pool size of gRPC server. SW_RECEIVER_GRPC_THREAD_POOL_SIZE CPU core * 4   - - gRPCThreadPoolQueueSize Queue size of gRPC server. SW_RECEIVER_GRPC_POOL_QUEUE_SIZE 10000   - - gRPCSslEnabled Activates SSL for gRPC services. SW_RECEIVER_GRPC_SSL_ENABLED false   - - gRPCSslKeyPath File path of gRPC SSL key. SW_RECEIVER_GRPC_SSL_KEY_PATH -   - - gRPCSslCertChainPath File path of gRPC SSL cert chain. SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH -   - - maxConcurrentCallsPerConnection The maximum number of concurrent calls permitted for each incoming connection. Defaults to no limit. SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL -   - - authentication The token text for authentication. Works for gRPC connection only. Once this is set, the client is required to use the same token. SW_AUTHENTICATION -   log-analyzer default Log Analyzer. SW_LOG_ANALYZER default    - - lalFiles The LAL configuration file names (without file extension) to be activated. Read LAL for more details. SW_LOG_LAL_FILES default   - - malFiles The MAL configuration file names (without file extension) to be activated. Read LAL for more details. SW_LOG_MAL_FILES \u0026quot;\u0026quot;   event-analyzer default Event Analyzer. SW_EVENT_ANALYZER default    receiver-register default gRPC and HTTPRestful services that provide service, service instance and endpoint register. - -    receiver-trace default gRPC and HTTPRestful services that accept SkyWalking format traces. - -    receiver-jvm default gRPC services that accept JVM metrics data. - -    receiver-clr default gRPC services that accept .Net CLR metrics data. - -    receiver-profile default gRPC services that accept profile task status and snapshot reporter. - -    receiver-zabbix default TCP receiver accepts Zabbix format metrics. - -    - - port Exported TCP port. Zabbix agent could connect and transport data. SW_RECEIVER_ZABBIX_PORT 10051   - - host Binds to host. SW_RECEIVER_ZABBIX_HOST 0.0.0.0   - - activeFiles Enables config when agent request is received. SW_RECEIVER_ZABBIX_ACTIVE_FILES agent   service-mesh default gRPC services that accept data from inbound mesh probes. - -    envoy-metric default Envoy metrics_service and ALS(access log service) are supported by this receiver. The OAL script supports all GAUGE type metrics. - -    - - acceptMetricsService Starts Envoy Metrics Service analysis. SW_ENVOY_METRIC_SERVICE true   - - alsHTTPAnalysis Starts Envoy HTTP Access Log Service analysis. Value = k8s-mesh means starting the analysis. SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS -   - - alsTCPAnalysis Starts Envoy TCP Access Log Service analysis. Value = k8s-mesh means starting the analysis. SW_ENVOY_METRIC_ALS_TCP_ANALYSIS -   - - k8sServiceNameRule k8sServiceNameRule allows you to customize the service name in ALS via Kubernetes metadata. The available variables are pod and service. E.g. you can use ${service.metadata.name}-${pod.metadata.labels.version} to append the version number to the service name. Note that when using environment variables to pass this configuration, use single quotes('') to avoid being evaluated by the shell. -    receiver-otel default A receiver for analyzing metrics data from OpenTelemetry. - -    - - enabledHandlers Enabled handlers for otel. SW_OTEL_RECEIVER_ENABLED_HANDLERS -   - - enabledOcRules Enabled metric rules for OC handler. SW_OTEL_RECEIVER_ENABLED_OC_RULES -   receiver-zipkin default A receiver for Zipkin traces. - -    - - restHost Binding IP of RESTful services. SW_RECEIVER_ZIPKIN_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_RECEIVER_ZIPKIN_REST_PORT 9411   - - restContextPath Web context path of RESTful services. SW_RECEIVER_ZIPKIN_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_RECEIVER_ZIPKIN_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_RECEIVER_ZIPKIN_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_RECEIVER_ZIPKIN_REST_QUEUE_SIZE 0   - - sampleRate The sample rate precision is 1/10000, should be between 0 and 10000 SW_ZIPKIN_SAMPLE_RATE 10000   - - searchableTracesTags Defines a set of span tag keys which are searchable. Multiple values are separated by commas. SW_ZIPKIN_SEARCHABLE_TAG_KEYS http.method   prometheus-fetcher default Prometheus fetcher reads metrics from Prometheus endpoint, and transfer the metrics into SkyWalking native format for the MAL engine. - -    - - enabledRules Enabled rules. SW_PROMETHEUS_FETCHER_ENABLED_RULES self   - - maxConvertWorker The maximize meter convert worker. SW_PROMETHEUS_FETCHER_NUM_CONVERT_WORKER -1(by default, half the number of CPU core(s))   kafka-fetcher default Read SkyWalking\u0026rsquo;s native metrics/logs/traces through Kafka server. - -    - - bootstrapServers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_KAFKA_FETCHER_SERVERS localhost:9092   - - namespace Namespace aims to isolate multi OAP cluster when using the same Kafka cluster. If you set a namespace for Kafka fetcher, OAP will add a prefix to topic name. You should also set namespace in agent.config. The property is named plugin.kafka.namespace. SW_NAMESPACE -   - - groupId A unique string that identifies the consumer group to which this consumer belongs. - skywalking-consumer   - - createTopicIfNotExist If true, this creates Kafka topic (if it does not already exist). - true   - - partitions The number of partitions for the topic being created. SW_KAFKA_FETCHER_PARTITIONS 3   - - consumers The number of consumers to create. SW_KAFKA_FETCHER_CONSUMERS 1   - - enableNativeProtoLog Enables fetching and handling native proto log data. SW_KAFKA_FETCHER_ENABLE_NATIVE_PROTO_LOG true   - - enableNativeJsonLog Enables fetching and handling native json log data. SW_KAFKA_FETCHER_ENABLE_NATIVE_JSON_LOG true   - - replicationFactor The replication factor for each partition in the topic being created. SW_KAFKA_FETCHER_PARTITIONS_FACTOR 2   - - kafkaHandlerThreadPoolSize Pool size of Kafka message handler executor. SW_KAFKA_HANDLER_THREAD_POOL_SIZE CPU core * 2   - - kafkaHandlerThreadPoolQueueSize Queue size of Kafka message handler executor. SW_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE 10000   - - topicNameOfMeters Kafka topic name for meter system data. - skywalking-meters   - - topicNameOfMetrics Kafka topic name for JVM metrics data. - skywalking-metrics   - - topicNameOfProfiling Kafka topic name for profiling data. - skywalking-profilings   - - topicNameOfTracingSegments Kafka topic name for tracing data. - skywalking-segments   - - topicNameOfManagements Kafka topic name for service instance reporting and registration. - skywalking-managements   - - topicNameOfLogs Kafka topic name for native proto log data. - skywalking-logs   - - topicNameOfJsonLogs Kafka topic name for native json log data. - skywalking-logs-json   receiver-browser default gRPC services that accept browser performance data and error log. - - -   - - sampleRate Sampling rate for receiving trace. Precise to 1/10000. 10000 means sampling rate of 100% by default. SW_RECEIVER_BROWSER_SAMPLE_RATE 10000   query graphql - GraphQL query implementation. -    - - enableLogTestTool Enable the log testing API to test the LAL. NOTE: This API evaluates untrusted code on the OAP server. A malicious script can do significant damage (steal keys and secrets, remove files and directories, install malware, etc). As such, please enable this API only when you completely trust your users. SW_QUERY_GRAPHQL_ENABLE_LOG_TEST_TOOL false   - - maxQueryComplexity Maximum complexity allowed for the GraphQL query that can be used to abort a query if the total number of data fields queried exceeds the defined threshold. SW_QUERY_MAX_QUERY_COMPLEXITY 1000   - - enableUpdateUITemplate Allow user add,disable and update UI template. SW_ENABLE_UPDATE_UI_TEMPLATE false   - - enableOnDemandPodLog Ondemand Pod log: fetch the Pod logs on users' demand, the logs are fetched and displayed in real time, and are not persisted in any kind. This is helpful when users want to do some experiments and monitor the logs and see what\u0026rsquo;s happing inside the service. Note: if you print secrets in the logs, they are also visible to the UI, so for the sake of security, this feature is disabled by default, please set this configuration to enable the feature manually. SW_ENABLE_ON_DEMAND_POD_LOG false   query graphql - GraphQL query implementation. -    - - restHost Binding IP of RESTful services. SW_QUERY_ZIPKIN_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_QUERY_ZIPKIN_REST_PORT 9412   - - restContextPath Web context path of RESTful services. SW_QUERY_ZIPKIN_REST_CONTEXT_PATH zipkin   - - restMaxThreads Maximum thread number of RESTful services. SW_QUERY_ZIPKIN_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_QUERY_ZIPKIN_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_QUERY_ZIPKIN_REST_QUEUE_SIZE 0   - - lookback Default look back for serviceNames, remoteServiceNames and spanNames, 1 day in millis SW_QUERY_ZIPKIN_LOOKBACK 86400000   - - namesMaxAge The Cache-Control max-age (seconds) for serviceNames, remoteServiceNames and spanNames SW_QUERY_ZIPKIN_NAMES_MAX_AGE 300   - - uiQueryLimit Default traces query max size SW_QUERY_ZIPKIN_UI_QUERY_LIMIT 10   - - uiDefaultLookback Default look back for search traces, 15 minutes in millis SW_QUERY_ZIPKIN_UI_DEFAULT_LOOKBACK 900000   alarm default - Read alarm doc for more details. -    telemetry - - Read telemetry doc for more details. -    - none - No op implementation. -    - prometheus host Binding host for Prometheus server fetching data. SW_TELEMETRY_PROMETHEUS_HOST 0.0.0.0   - - port Binding port for Prometheus server fetching data. SW_TELEMETRY_PROMETHEUS_PORT 1234   configuration - - Read dynamic configuration doc for more details. -    - grpc host DCS server binding hostname. SW_DCS_SERVER_HOST -   - - port DCS server binding port. SW_DCS_SERVER_PORT 80   - - clusterName Cluster name when reading the latest configuration from DSC server. SW_DCS_CLUSTER_NAME SkyWalking   - - period The period of reading data from DSC server by the OAP (in seconds). SW_DCS_PERIOD 20   - apollo apolloMeta apollo.meta in Apollo. SW_CONFIG_APOLLO http://localhost:8080   - - apolloCluster apollo.cluster in Apollo. SW_CONFIG_APOLLO_CLUSTER default   - - apolloEnv env in Apollo. SW_CONFIG_APOLLO_ENV -   - - appId app.id in Apollo. SW_CONFIG_APOLLO_APP_ID skywalking   - - period The period of data sync (in seconds). SW_CONFIG_APOLLO_PERIOD 60   - zookeeper namespace The namespace (represented by root path) that isolates the configurations in the Zookeeper. SW_CONFIG_ZK_NAMESPACE /, root path   - - hostPort Hosts and ports of Zookeeper Cluster. SW_CONFIG_ZK_HOST_PORT localhost:2181   - - baseSleepTimeMs The period of Zookeeper client between two retries (in milliseconds). SW_CONFIG_ZK_BASE_SLEEP_TIME_MS 1000   - - maxRetries The maximum retry time. SW_CONFIG_ZK_MAX_RETRIES 3   - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - etcd endpoints Hosts and ports for etcd cluster (separated by commas if multiple). SW_CONFIG_ETCD_ENDPOINTS http://localhost:2379   - - namespace Namespace for SkyWalking cluster. SW_CONFIG_ETCD_NAMESPACE /skywalking   - - authentication Indicates whether there is authentication. SW_CONFIG_ETCD_AUTHENTICATION false   - - user Etcd auth username. SW_CONFIG_ETCD_USER    - - password Etcd auth password. SW_CONFIG_ETCD_PASSWORD    - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - consul hostPort Hosts and ports for Consul cluster. SW_CONFIG_CONSUL_HOST_AND_PORTS localhost:8500   - - aclToken ACL Token of Consul. Empty string means without ACL token. SW_CONFIG_CONSUL_ACL_TOKEN -   - - period The period of data sync (in seconds). SW_CONFIG_CONSUL_PERIOD 60   - k8s-configmap namespace Deployment namespace of the config map. SW_CLUSTER_K8S_NAMESPACE default   - - labelSelector Labels for locating configmap. SW_CLUSTER_K8S_LABEL app=collector,release=skywalking   - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - nacos serverAddr Nacos Server Host. SW_CONFIG_NACOS_SERVER_ADDR 127.0.0.1   - - port Nacos Server Port. SW_CONFIG_NACOS_SERVER_PORT 8848   - - group Nacos Configuration namespace. SW_CONFIG_NACOS_SERVER_NAMESPACE -   - - period The period of data sync (in seconds). SW_CONFIG_CONFIG_NACOS_PERIOD 60   - - username Nacos Auth username. SW_CONFIG_NACOS_USERNAME -   - - password Nacos Auth password. SW_CONFIG_NACOS_PASSWORD -   - - accessKey Nacos Auth accessKey. SW_CONFIG_NACOS_ACCESSKEY -   - - secretKey Nacos Auth secretKey. SW_CONFIG_NACOS_SECRETKEY -   exporter grpc targetHost The host of target gRPC server for receiving export data. SW_EXPORTER_GRPC_HOST 127.0.0.1   - - targetPort The port of target gRPC server for receiving export data. SW_EXPORTER_GRPC_PORT 9870   health-checker default checkIntervalSeconds The period of checking OAP internal health status (in seconds). SW_HEALTH_CHECKER_INTERVAL_SECONDS 5   configuration-discovery default disableMessageDigest If true, agent receives the latest configuration every time, even without making any changes. By default, OAP uses the SHA512 message digest mechanism to detect changes in configuration. SW_DISABLE_MESSAGE_DIGEST false   receiver-event default gRPC services that handle events data. - -     Note ¹ System Environment Variable name could be declared and changed in application.yml. The names listed here are simply provided in the default application.yml file.\n","excerpt":"Configuration Vocabulary The Configuration Vocabulary lists all available configurations provided by …","ref":"/docs/main/v9.1.0/en/setup/backend/configuration-vocabulary/","title":"Configuration Vocabulary"},{"body":"Configuration Vocabulary The Configuration Vocabulary lists all available configurations provided by application.yml.\n   Module Provider Settings Value(s) and Explanation System Environment Variable¹ Default     core default role Option values: Mixed/Receiver/Aggregator. Receiver mode OAP opens the service to the agents, then analyzes and aggregates the results, and forwards the results for distributed aggregation. Aggregator mode OAP receives data from Mixer and Receiver role OAP nodes, and performs 2nd level aggregation. Mixer means both Receiver and Aggregator. SW_CORE_ROLE Mixed   - - restHost Binding IP of RESTful services. Services include GraphQL query and HTTP data report. SW_CORE_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_CORE_REST_PORT 12800   - - restContextPath Web context path of RESTful services. SW_CORE_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_CORE_REST_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_CORE_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize ServerSocketChannel Backlog of RESTful services. SW_CORE_REST_QUEUE_SIZE 0   - - httpMaxRequestHeaderSize Maximum request header size accepted. SW_CORE_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - gRPCHost Binding IP of gRPC services, including gRPC data report and internal communication among OAP nodes. SW_CORE_GRPC_HOST 0.0.0.0   - - gRPCPort Binding port of gRPC services. SW_CORE_GRPC_PORT 11800   - - gRPCSslEnabled Activates SSL for gRPC services. SW_CORE_GRPC_SSL_ENABLED false   - - gRPCSslKeyPath File path of gRPC SSL key. SW_CORE_GRPC_SSL_KEY_PATH -   - - gRPCSslCertChainPath File path of gRPC SSL cert chain. SW_CORE_GRPC_SSL_CERT_CHAIN_PATH -   - - gRPCSslTrustedCAPath File path of gRPC trusted CA. SW_CORE_GRPC_SSL_TRUSTED_CA_PATH -   - - downsampling Activated level of down sampling aggregation.  Hour,Day   - - persistentPeriod Execution period of the persistent timer (in seconds).  25   - - enableDataKeeperExecutor Controller of TTL scheduler. Once disabled, TTL wouldn\u0026rsquo;t work. SW_CORE_ENABLE_DATA_KEEPER_EXECUTOR true   - - dataKeeperExecutePeriod Execution period of TTL scheduler (in minutes). Execution doesn\u0026rsquo;t mean deleting data. The storage provider (e.g. ElasticSearch storage) could override this. SW_CORE_DATA_KEEPER_EXECUTE_PERIOD 5   - - recordDataTTL The lifecycle of record data (in days). Record data includes traces, top N sample records, and logs. Minimum value is 2. SW_CORE_RECORD_DATA_TTL 3   - - metricsDataTTL The lifecycle of metrics data (in days), including metadata. We recommend setting metricsDataTTL \u0026gt;= recordDataTTL. Minimum value is 2. SW_CORE_METRICS_DATA_TTL 7   - - l1FlushPeriod The period of L1 aggregation flush to L2 aggregation (in milliseconds). SW_CORE_L1_AGGREGATION_FLUSH_PERIOD 500   - - storageSessionTimeout The threshold of session time (in milliseconds). Default value is 70000. SW_CORE_STORAGE_SESSION_TIMEOUT 70000   - - persistentPeriod The period of doing data persistence. Unit is second.Default value is 25s SW_CORE_PERSISTENT_PERIOD 25   - - enableDatabaseSession Cache metrics data for 1 minute to reduce database queries, and if the OAP cluster changes within that minute. SW_CORE_ENABLE_DATABASE_SESSION true   - - topNReportPeriod The execution period (in minutes) of top N sampler, which saves sampled data into the storage. SW_CORE_TOPN_REPORT_PERIOD 10   - - activeExtraModelColumns Appends entity names (e.g. service names) into metrics storage entities. SW_CORE_ACTIVE_EXTRA_MODEL_COLUMNS false   - - serviceNameMaxLength Maximum length limit of service names. SW_SERVICE_NAME_MAX_LENGTH 70   - - instanceNameMaxLength Maximum length limit of service instance names. The maximum length of service + instance names should be less than 200. SW_INSTANCE_NAME_MAX_LENGTH 70   - - endpointNameMaxLength Maximum length limit of endpoint names. The maximum length of service + endpoint names should be less than 240. SW_ENDPOINT_NAME_MAX_LENGTH 150   - - searchableTracesTags Defines a set of span tag keys which are searchable through GraphQL. Multiple values are separated by commas. SW_SEARCHABLE_TAG_KEYS http.method,http.status_code,rpc.status_code,db.type,db.instance,mq.queue,mq.topic,mq.broker   - - searchableLogsTags Defines a set of log tag keys which are searchable through GraphQL. Multiple values are separated by commas. SW_SEARCHABLE_LOGS_TAG_KEYS level   - - searchableAlarmTags Defines a set of alarm tag keys which are searchable through GraphQL. Multiple values are separated by commas. SW_SEARCHABLE_ALARM_TAG_KEYS level   - - autocompleteTagKeysQueryMaxSize The max size of tags keys for autocomplete select. SW_AUTOCOMPLETE_TAG_KEYS_QUERY_MAX_SIZE 100   - - autocompleteTagValuesQueryMaxSize The max size of tags values for autocomplete select. SW_AUTOCOMPLETE_TAG_VALUES_QUERY_MAX_SIZE 100   - - gRPCThreadPoolSize Pool size of gRPC server. SW_CORE_GRPC_THREAD_POOL_SIZE CPU core * 4   - - gRPCThreadPoolQueueSize Queue size of gRPC server. SW_CORE_GRPC_POOL_QUEUE_SIZE 10000   - - maxConcurrentCallsPerConnection The maximum number of concurrent calls permitted for each incoming connection. Defaults to no limit. SW_CORE_GRPC_MAX_CONCURRENT_CALL -   - - maxMessageSize Sets the maximum message size allowed to be received on the server. Empty means 4 MiB. SW_CORE_GRPC_MAX_MESSAGE_SIZE 4M(based on Netty)   - - remoteTimeout Timeout for cluster internal communication (in seconds). - 20   - - maxSizeOfNetworkAddressAlias The maximum size of network address detected in the system being monitored. - 1_000_000   - - maxPageSizeOfQueryProfileSnapshot The maximum size for snapshot analysis in an OAP query. - 500   - - maxSizeOfAnalyzeProfileSnapshot The maximum number of snapshots analyzed by the OAP. - 12000   - - prepareThreads The number of threads used to prepare metrics data to the storage. SW_CORE_PREPARE_THREADS 2   - - enableEndpointNameGroupingByOpenapi Automatically groups endpoints by the given OpenAPI definitions. SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPAENAPI true   - - maxDurationOfQueryEBPFProfilingData The maximum duration(in second) of query the eBPF profiling data from database. - 30   - - maxThreadCountOfQueryEBPFProfilingData The maximum thread count of query the eBPF profiling data from database. - System CPU core size   cluster standalone - Standalone is not suitable for running on a single node running. No configuration available. - -   - zookeeper namespace The namespace, represented by root path, isolates the configurations in Zookeeper. SW_NAMESPACE /, root path   - - hostPort Hosts and ports of Zookeeper Cluster. SW_CLUSTER_ZK_HOST_PORT localhost:2181   - - baseSleepTimeMs The period of Zookeeper client between two retries (in milliseconds). SW_CLUSTER_ZK_SLEEP_TIME 1000   - - maxRetries The maximum retry time. SW_CLUSTER_ZK_MAX_RETRIES 3   - - enableACL Opens ACL using schema and expression. SW_ZK_ENABLE_ACL false   - - schema Schema for the authorization. SW_ZK_SCHEMA digest   - - expression Expression for the authorization. SW_ZK_EXPRESSION skywalking:skywalking   - - internalComHost The hostname registered in Zookeeper for the internal communication of OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Zookeeper for the internal communication of OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - kubernetes namespace Namespace deployed by SkyWalking in k8s. SW_CLUSTER_K8S_NAMESPACE default   - - labelSelector Labels used for filtering OAP deployment in k8s. SW_CLUSTER_K8S_LABEL app=collector,release=skywalking   - - uidEnvName Environment variable name for reading uid. SW_CLUSTER_K8S_UID SKYWALKING_COLLECTOR_UID   - consul serviceName Service name for SkyWalking cluster. SW_SERVICE_NAME SkyWalking_OAP_Cluster   - - hostPort Hosts and ports for Consul cluster. SW_CLUSTER_CONSUL_HOST_PORT localhost:8500   - - aclToken ACL Token of Consul. Empty string means without ALC token. SW_CLUSTER_CONSUL_ACLTOKEN -   - - internalComHost The hostname registered in Consul for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Consul for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - etcd serviceName Service name for SkyWalking cluster. SW_CLUSTER_ETCD_SERVICE_NAME SkyWalking_OAP_Cluster   - - endpoints Hosts and ports for etcd cluster. SW_CLUSTER_ETCD_ENDPOINTS localhost:2379   - - namespace Namespace for SkyWalking cluster. SW_CLUSTER_ETCD_NAMESPACE /skywalking   - - authentication Indicates whether there is authentication. SW_CLUSTER_ETCD_AUTHENTICATION false   - - user Etcd auth username. SW_CLUSTER_ETCD_USER    - - password Etcd auth password. SW_CLUSTER_ETCD_PASSWORD    - - internalComHost The hostname registered in etcd for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in etcd for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - Nacos serviceName Service name for SkyWalking cluster. SW_SERVICE_NAME SkyWalking_OAP_Cluster   - - hostPort Hosts and ports for Nacos cluster. SW_CLUSTER_NACOS_HOST_PORT localhost:8848   - - namespace Namespace used by SkyWalking node coordination. SW_CLUSTER_NACOS_NAMESPACE public   - - internalComHost The hostname registered in Nacos for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Nacos for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - - username Nacos Auth username. SW_CLUSTER_NACOS_USERNAME -   - - password Nacos Auth password. SW_CLUSTER_NACOS_PASSWORD -   - - accessKey Nacos Auth accessKey. SW_CLUSTER_NACOS_ACCESSKEY -   - - secretKey Nacos Auth secretKey. SW_CLUSTER_NACOS_SECRETKEY -   storage elasticsearch - ElasticSearch (and OpenSearch) storage implementation. - -   - - namespace Prefix of indexes created and used by SkyWalking. SW_NAMESPACE -   - - clusterNodes ElasticSearch cluster nodes for client connection. SW_STORAGE_ES_CLUSTER_NODES localhost   - - protocol HTTP or HTTPs. SW_STORAGE_ES_HTTP_PROTOCOL HTTP   - - connectTimeout Connect timeout of ElasticSearch client (in milliseconds). SW_STORAGE_ES_CONNECT_TIMEOUT 3000   - - socketTimeout Socket timeout of ElasticSearch client (in milliseconds). SW_STORAGE_ES_SOCKET_TIMEOUT 30000   - - responseTimeout Response timeout of ElasticSearch client (in milliseconds), 0 disables the timeout. SW_STORAGE_ES_RESPONSE_TIMEOUT 1500   - - numHttpClientThread The number of threads for the underlying HTTP client to perform socket I/O. If the value is \u0026lt;= 0, the number of available processors will be used. SW_STORAGE_ES_NUM_HTTP_CLIENT_THREAD 0   - - user Username of ElasticSearch cluster. SW_ES_USER -   - - password Password of ElasticSearch cluster. SW_ES_PASSWORD -   - - trustStorePath Trust JKS file path. Only works when username and password are enabled. SW_STORAGE_ES_SSL_JKS_PATH -   - - trustStorePass Trust JKS file password. Only works when username and password are enabled. SW_STORAGE_ES_SSL_JKS_PASS -   - - secretsManagementFile Secrets management file in the properties format, including username and password, which are managed by a 3rd party tool. Capable of being updated them at runtime. SW_ES_SECRETS_MANAGEMENT_FILE -   - - dayStep Represents the number of days in the one-minute/hour/day index. SW_STORAGE_DAY_STEP 1   - - indexShardsNumber Shard number of new indexes. SW_STORAGE_ES_INDEX_SHARDS_NUMBER 1   - - indexReplicasNumber Replicas number of new indexes. SW_STORAGE_ES_INDEX_REPLICAS_NUMBER 0   - - superDatasetDayStep Represents the number of days in the super size dataset record index. Default value is the same as dayStep when the value is less than 0. SW_SUPERDATASET_STORAGE_DAY_STEP -1   - - superDatasetIndexShardsFactor Super dataset is defined in the code (e.g. trace segments). This factor provides more shards for the super dataset: shards number = indexShardsNumber * superDatasetIndexShardsFactor. This factor also affects Zipkin and Jaeger traces. SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR 5   - - superDatasetIndexReplicasNumber Represents the replicas number in the super size dataset record index. SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER 0   - - indexTemplateOrder The order of index template. SW_STORAGE_ES_INDEX_TEMPLATE_ORDER 0   - - bulkActions Async bulk size of the record data batch execution. SW_STORAGE_ES_BULK_ACTIONS 5000   - - flushInterval Period of flush (in seconds). Does not matter whether bulkActions is reached or not. INT(flushInterval * 2/3) is used for index refresh period. SW_STORAGE_ES_FLUSH_INTERVAL 15 (index refresh period = 10)   - - concurrentRequests The number of concurrent requests allowed to be executed. SW_STORAGE_ES_CONCURRENT_REQUESTS 2   - - resultWindowMaxSize The maximum size of dataset when the OAP loads cache, such as network aliases. SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE 10000   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_ES_QUERY_MAX_SIZE 10000   - - scrollingBatchSize The batch size of metadata per iteration when metadataQueryMaxSize or resultWindowMaxSize is too large to be retrieved in a single query. SW_STORAGE_ES_SCROLLING_BATCH_SIZE 5000   - - segmentQueryMaxSize The maximum size of trace segments per query. SW_STORAGE_ES_QUERY_SEGMENT_SIZE 200   - - profileTaskQueryMaxSize The maximum size of profile task per query. SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE 200   - - profileDataQueryScrollBatchSize The batch size of query profiling data. SW_STORAGE_ES_QUERY_PROFILE_DATA_BATCH_SIZE 100   - - advanced All settings of ElasticSearch index creation. The value should be in JSON format. SW_STORAGE_ES_ADVANCED -   - - logicSharding Shard metrics and records indices into multi-physical indices, one index template per metric/meter aggregation function or record. SW_STORAGE_ES_LOGIC_SHARDING false   - h2 - H2 storage is designed for demonstration and running in short term (i.e. 1-2 hours) only. - -   - - driver H2 JDBC driver. SW_STORAGE_H2_DRIVER org.h2.jdbcx.JdbcDataSource   - - url H2 connection URL. Defaults to H2 memory mode. SW_STORAGE_H2_URL jdbc:h2:mem:skywalking-oap-db   - - user Username of H2 database. SW_STORAGE_H2_USER sa   - - password Password of H2 database. - -   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_H2_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 100   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 1   - mysql - MySQL Storage. The MySQL JDBC Driver is not in the dist. Please copy it into the oap-lib folder manually. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfArrayColumn Some entities (e.g. trace segments) include the logic column with multiple values. In MySQL, we use multiple physical columns to host the values, e.g. change column_a with values [1,2,3,4,5] to column_a_0 = 1, column_a_1 = 2, column_a_2 = 3 , column_a_3 = 4, column_a_4 = 5. SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN 20   - - numOfSearchableValuesPerTag In a trace segment, this includes multiple spans with multiple tags. Different spans may have same tag key, e.g. multiple HTTP exit spans all have their own http.method tags. This configuration sets the limit on the maximum number of values for the same tag key. SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG 2   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - postgresql - PostgreSQL storage. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfArrayColumn Some entities (e.g. trace segments) include the logic column with multiple values. In PostgreSQL, we use multiple physical columns to host the values, e.g. change column_a with values [1,2,3,4,5] to column_a_0 = 1, column_a_1 = 2, column_a_2 = 3 , column_a_3 = 4, column_a_4 = 5 SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN 20   - - numOfSearchableValuesPerTag In a trace segment, this includes multiple spans with multiple tags. Different spans may have same tag key, e.g. multiple HTTP exit spans all have their own http.method tags. This configuration sets the limit on the maximum number of values for the same tag key. SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG 2   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - banyandb - BanyanDB storage. - -   - - host Host of the BanyanDB. SW_STORAGE_BANYANDB_HOST 127.0.0.1   - - port Port of the BanyanDB. SW_STORAGE_BANYANDB_PORT 17912   - - maxBulkSize The maximum size of write entities in a single batch write call. SW_STORAGE_BANYANDB_MAX_BULK_SIZE 5000   - - flushInterval Period of flush interval. In the timeunit of seconds. SW_STORAGE_BANYANDB_FLUSH_INTERVAL 15   - - metricsShardsNumber Shards Number for measure/metrics. SW_STORAGE_BANYANDB_METRICS_SHARDS_NUMBER 1   - - recordShardsNumber Shards Number for a normal record. SW_STORAGE_BANYANDB_RECORD_SHARDS_NUMBER 1   - - superDatasetShardsFactor Shards Factor for a super dataset record, i.e. Shard number of a super dataset is recordShardsNumber*superDatasetShardsFactor. SW_STORAGE_BANYANDB_SUPERDATASET_SHARDS_FACTOR 2   - - concurrentWriteThreads Concurrent consumer threads for batch writing. SW_STORAGE_BANYANDB_CONCURRENT_WRITE_THREADS 15   - - profileTaskQueryMaxSize Max size of ProfileTask to be fetched. SW_STORAGE_BANYANDB_PROFILE_TASK_QUERY_MAX_SIZE 200   agent-analyzer default Agent Analyzer. SW_AGENT_ANALYZER default    - - traceSamplingPolicySettingsFile The sampling policy including sampling rate and the threshold of trace segment latency can be configured by the traceSamplingPolicySettingsFile file. SW_TRACE_SAMPLING_POLICY_SETTINGS_FILE trace-sampling-policy-settings.yml   - - slowDBAccessThreshold The slow database access threshold (in milliseconds). SW_SLOW_DB_THRESHOLD default:200,mongodb:100   - - forceSampleErrorSegment When sampling mechanism is activated, this config samples the error status segment and ignores the sampling rate. SW_FORCE_SAMPLE_ERROR_SEGMENT true   - - segmentStatusAnalysisStrategy Determines the final segment status from span status. Available values are FROM_SPAN_STATUS , FROM_ENTRY_SPAN, and FROM_FIRST_SPAN. FROM_SPAN_STATUS indicates that the segment status would be error if any span has an error status. FROM_ENTRY_SPAN means that the segment status would only be determined by the status of entry spans. FROM_FIRST_SPAN means that the segment status would only be determined by the status of the first span. SW_SEGMENT_STATUS_ANALYSIS_STRATEGY FROM_SPAN_STATUS   - - noUpstreamRealAddressAgents Exit spans with the component in the list would not generate client-side instance relation metrics, since some tracing plugins (e.g. Nginx-LUA and Envoy) can\u0026rsquo;t collect the real peer IP address. SW_NO_UPSTREAM_REAL_ADDRESS 6000,9000   - - meterAnalyzerActiveFiles Indicates which files could be instrumented and analyzed. Multiple files are split by \u0026ldquo;,\u0026rdquo;. SW_METER_ANALYZER_ACTIVE_FILES    receiver-sharing-server default Sharing server provides new gRPC and restful servers for data collection. Ana designates that servers in the core module are to be used for internal communication only. - -    - - restHost Binding IP of RESTful services. Services include GraphQL query and HTTP data report. SW_RECEIVER_SHARING_REST_HOST -   - - restPort Binding port of RESTful services. SW_RECEIVER_SHARING_REST_PORT -   - - restContextPath Web context path of RESTful services. SW_RECEIVER_SHARING_REST_CONTEXT_PATH -   - - restMaxThreads Maximum thread number of RESTful services. SW_RECEIVER_SHARING_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_RECEIVER_SHARING_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize ServerSocketChannel backlog of RESTful services. SW_RECEIVER_SHARING_REST_QUEUE_SIZE 0   - - httpMaxRequestHeaderSize Maximum request header size accepted. SW_RECEIVER_SHARING_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - gRPCHost Binding IP of gRPC services. Services include gRPC data report and internal communication among OAP nodes. SW_RECEIVER_GRPC_HOST 0.0.0.0. Not Activated   - - gRPCPort Binding port of gRPC services. SW_RECEIVER_GRPC_PORT Not Activated   - - gRPCThreadPoolSize Pool size of gRPC server. SW_RECEIVER_GRPC_THREAD_POOL_SIZE CPU core * 4   - - gRPCThreadPoolQueueSize Queue size of gRPC server. SW_RECEIVER_GRPC_POOL_QUEUE_SIZE 10000   - - gRPCSslEnabled Activates SSL for gRPC services. SW_RECEIVER_GRPC_SSL_ENABLED false   - - gRPCSslKeyPath File path of gRPC SSL key. SW_RECEIVER_GRPC_SSL_KEY_PATH -   - - gRPCSslCertChainPath File path of gRPC SSL cert chain. SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH -   - - maxConcurrentCallsPerConnection The maximum number of concurrent calls permitted for each incoming connection. Defaults to no limit. SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL -   - - authentication The token text for authentication. Works for gRPC connection only. Once this is set, the client is required to use the same token. SW_AUTHENTICATION -   log-analyzer default Log Analyzer. SW_LOG_ANALYZER default    - - lalFiles The LAL configuration file names (without file extension) to be activated. Read LAL for more details. SW_LOG_LAL_FILES default   - - malFiles The MAL configuration file names (without file extension) to be activated. Read LAL for more details. SW_LOG_MAL_FILES \u0026quot;\u0026quot;   event-analyzer default Event Analyzer. SW_EVENT_ANALYZER default    receiver-register default gRPC and HTTPRestful services that provide service, service instance and endpoint register. - -    receiver-trace default gRPC and HTTPRestful services that accept SkyWalking format traces. - -    receiver-jvm default gRPC services that accept JVM metrics data. - -    receiver-clr default gRPC services that accept .Net CLR metrics data. - -    receiver-profile default gRPC services that accept profile task status and snapshot reporter. - -    receiver-zabbix default TCP receiver accepts Zabbix format metrics. - -    - - port Exported TCP port. Zabbix agent could connect and transport data. SW_RECEIVER_ZABBIX_PORT 10051   - - host Binds to host. SW_RECEIVER_ZABBIX_HOST 0.0.0.0   - - activeFiles Enables config when agent request is received. SW_RECEIVER_ZABBIX_ACTIVE_FILES agent   service-mesh default gRPC services that accept data from inbound mesh probes. - -    envoy-metric default Envoy metrics_service and ALS(access log service) are supported by this receiver. The OAL script supports all GAUGE type metrics. - -    - - acceptMetricsService Starts Envoy Metrics Service analysis. SW_ENVOY_METRIC_SERVICE true   - - alsHTTPAnalysis Starts Envoy HTTP Access Log Service analysis. Value = k8s-mesh means starting the analysis. SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS -   - - alsTCPAnalysis Starts Envoy TCP Access Log Service analysis. Value = k8s-mesh means starting the analysis. SW_ENVOY_METRIC_ALS_TCP_ANALYSIS -   - - k8sServiceNameRule k8sServiceNameRule allows you to customize the service name in ALS via Kubernetes metadata. The available variables are pod and service. E.g. you can use ${service.metadata.name}-${pod.metadata.labels.version} to append the version number to the service name. Note that when using environment variables to pass this configuration, use single quotes('') to avoid being evaluated by the shell. -    receiver-otel default A receiver for analyzing metrics data from OpenTelemetry. - -    - - enabledHandlers Enabled handlers for otel. SW_OTEL_RECEIVER_ENABLED_HANDLERS -   - - enabledOtelRules Enabled metric rules for OC handler. SW_OTEL_RECEIVER_ENABLED_OTEL_RULES -   receiver-zipkin default A receiver for Zipkin traces. - -    - - sampleRate The sample rate precision is 1/10000, should be between 0 and 10000 SW_ZIPKIN_SAMPLE_RATE 10000   - - searchableTracesTags Defines a set of span tag keys which are searchable. Multiple values are separated by commas. SW_ZIPKIN_SEARCHABLE_TAG_KEYS http.method   - - enableHttpCollector Enable Http Collector. SW_ZIPKIN_HTTP_COLLECTOR_ENABLED true   - - restHost Binding IP of RESTful services. SW_RECEIVER_ZIPKIN_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_RECEIVER_ZIPKIN_REST_PORT 9411   - - restContextPath Web context path of RESTful services. SW_RECEIVER_ZIPKIN_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_RECEIVER_ZIPKIN_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_RECEIVER_ZIPKIN_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_RECEIVER_ZIPKIN_REST_QUEUE_SIZE 0   - - enableKafkaCollector Enable Kafka Collector. SW_ZIPKIN_KAFKA_COLLECTOR_ENABLED false   - - kafkaBootstrapServers Kafka ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG. SW_ZIPKIN_KAFKA_SERVERS localhost:9092   - - kafkaGroupId Kafka ConsumerConfig.GROUP_ID_CONFIG. SW_ZIPKIN_KAFKA_GROUP_ID zipkin   - - kafkaTopic Kafka Topics. SW_ZIPKIN_KAFKA_TOPIC zipkin   - - kafkaConsumerConfig Kafka consumer config, JSON format as Properties. If it contains the same key with above, would override. SW_ZIPKIN_KAFKA_CONSUMER_CONFIG \u0026ldquo;{\u0026quot;auto.offset.reset\u0026quot;:\u0026quot;earliest\u0026quot;,\u0026quot;enable.auto.commit\u0026quot;:true}\u0026rdquo;   - - kafkaConsumers The number of consumers to create. SW_ZIPKIN_KAFKA_CONSUMERS 1   - - kafkaHandlerThreadPoolSize Pool size of Kafka message handler executor. SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_SIZE CPU core * 2   - - kafkaHandlerThreadPoolQueueSize Queue size of Kafka message handler executor. SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE 10000   prometheus-fetcher default Prometheus fetcher reads metrics from Prometheus endpoint, and transfer the metrics into SkyWalking native format for the MAL engine. - -    - - enabledRules Enabled rules. SW_PROMETHEUS_FETCHER_ENABLED_RULES self   - - maxConvertWorker The maximize meter convert worker. SW_PROMETHEUS_FETCHER_NUM_CONVERT_WORKER -1(by default, half the number of CPU core(s))   kafka-fetcher default Read SkyWalking\u0026rsquo;s native metrics/logs/traces through Kafka server. - -    - - bootstrapServers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_KAFKA_FETCHER_SERVERS localhost:9092   - - namespace Namespace aims to isolate multi OAP cluster when using the same Kafka cluster. If you set a namespace for Kafka fetcher, OAP will add a prefix to topic name. You should also set namespace in agent.config. The property is named plugin.kafka.namespace. SW_NAMESPACE -   - - groupId A unique string that identifies the consumer group to which this consumer belongs. - skywalking-consumer   - - createTopicIfNotExist If true, this creates Kafka topic (if it does not already exist). - true   - - partitions The number of partitions for the topic being created. SW_KAFKA_FETCHER_PARTITIONS 3   - - consumers The number of consumers to create. SW_KAFKA_FETCHER_CONSUMERS 1   - - enableNativeProtoLog Enables fetching and handling native proto log data. SW_KAFKA_FETCHER_ENABLE_NATIVE_PROTO_LOG true   - - enableNativeJsonLog Enables fetching and handling native json log data. SW_KAFKA_FETCHER_ENABLE_NATIVE_JSON_LOG true   - - replicationFactor The replication factor for each partition in the topic being created. SW_KAFKA_FETCHER_PARTITIONS_FACTOR 2   - - kafkaHandlerThreadPoolSize Pool size of Kafka message handler executor. SW_KAFKA_HANDLER_THREAD_POOL_SIZE CPU core * 2   - - kafkaHandlerThreadPoolQueueSize Queue size of Kafka message handler executor. SW_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE 10000   - - topicNameOfMeters Kafka topic name for meter system data. - skywalking-meters   - - topicNameOfMetrics Kafka topic name for JVM metrics data. - skywalking-metrics   - - topicNameOfProfiling Kafka topic name for profiling data. - skywalking-profilings   - - topicNameOfTracingSegments Kafka topic name for tracing data. - skywalking-segments   - - topicNameOfManagements Kafka topic name for service instance reporting and registration. - skywalking-managements   - - topicNameOfLogs Kafka topic name for native proto log data. - skywalking-logs   - - topicNameOfJsonLogs Kafka topic name for native json log data. - skywalking-logs-json   receiver-browser default gRPC services that accept browser performance data and error log. - - -   - - sampleRate Sampling rate for receiving trace. Precise to 1/10000. 10000 means sampling rate of 100% by default. SW_RECEIVER_BROWSER_SAMPLE_RATE 10000   query graphql - GraphQL query implementation. -    - - enableLogTestTool Enable the log testing API to test the LAL. NOTE: This API evaluates untrusted code on the OAP server. A malicious script can do significant damage (steal keys and secrets, remove files and directories, install malware, etc). As such, please enable this API only when you completely trust your users. SW_QUERY_GRAPHQL_ENABLE_LOG_TEST_TOOL false   - - maxQueryComplexity Maximum complexity allowed for the GraphQL query that can be used to abort a query if the total number of data fields queried exceeds the defined threshold. SW_QUERY_MAX_QUERY_COMPLEXITY 1000   - - enableUpdateUITemplate Allow user add,disable and update UI template. SW_ENABLE_UPDATE_UI_TEMPLATE false   - - enableOnDemandPodLog Ondemand Pod log: fetch the Pod logs on users' demand, the logs are fetched and displayed in real time, and are not persisted in any kind. This is helpful when users want to do some experiments and monitor the logs and see what\u0026rsquo;s happing inside the service. Note: if you print secrets in the logs, they are also visible to the UI, so for the sake of security, this feature is disabled by default, please set this configuration to enable the feature manually. SW_ENABLE_ON_DEMAND_POD_LOG false   query graphql - GraphQL query implementation. -    - - restHost Binding IP of RESTful services. SW_QUERY_ZIPKIN_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_QUERY_ZIPKIN_REST_PORT 9412   - - restContextPath Web context path of RESTful services. SW_QUERY_ZIPKIN_REST_CONTEXT_PATH zipkin   - - restMaxThreads Maximum thread number of RESTful services. SW_QUERY_ZIPKIN_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_QUERY_ZIPKIN_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_QUERY_ZIPKIN_REST_QUEUE_SIZE 0   - - lookback Default look back for traces and autocompleteTags, 1 day in millis SW_QUERY_ZIPKIN_LOOKBACK 86400000   - - namesMaxAge The Cache-Control max-age (seconds) for serviceNames, remoteServiceNames and spanNames SW_QUERY_ZIPKIN_NAMES_MAX_AGE 300   - - uiQueryLimit Default traces query max size SW_QUERY_ZIPKIN_UI_QUERY_LIMIT 10   - - uiDefaultLookback Default look back on the UI for search traces, 15 minutes in millis SW_QUERY_ZIPKIN_UI_DEFAULT_LOOKBACK 900000   alarm default - Read alarm doc for more details. -    telemetry - - Read telemetry doc for more details. -    - none - No op implementation. -    - prometheus host Binding host for Prometheus server fetching data. SW_TELEMETRY_PROMETHEUS_HOST 0.0.0.0   - - port Binding port for Prometheus server fetching data. SW_TELEMETRY_PROMETHEUS_PORT 1234   configuration - - Read dynamic configuration doc for more details. -    - grpc host DCS server binding hostname. SW_DCS_SERVER_HOST -   - - port DCS server binding port. SW_DCS_SERVER_PORT 80   - - clusterName Cluster name when reading the latest configuration from DSC server. SW_DCS_CLUSTER_NAME SkyWalking   - - period The period of reading data from DSC server by the OAP (in seconds). SW_DCS_PERIOD 20   - apollo apolloMeta apollo.meta in Apollo. SW_CONFIG_APOLLO http://localhost:8080   - - apolloCluster apollo.cluster in Apollo. SW_CONFIG_APOLLO_CLUSTER default   - - apolloEnv env in Apollo. SW_CONFIG_APOLLO_ENV -   - - appId app.id in Apollo. SW_CONFIG_APOLLO_APP_ID skywalking   - - period The period of data sync (in seconds). SW_CONFIG_APOLLO_PERIOD 60   - zookeeper namespace The namespace (represented by root path) that isolates the configurations in the Zookeeper. SW_CONFIG_ZK_NAMESPACE /, root path   - - hostPort Hosts and ports of Zookeeper Cluster. SW_CONFIG_ZK_HOST_PORT localhost:2181   - - baseSleepTimeMs The period of Zookeeper client between two retries (in milliseconds). SW_CONFIG_ZK_BASE_SLEEP_TIME_MS 1000   - - maxRetries The maximum retry time. SW_CONFIG_ZK_MAX_RETRIES 3   - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - etcd endpoints Hosts and ports for etcd cluster (separated by commas if multiple). SW_CONFIG_ETCD_ENDPOINTS http://localhost:2379   - - namespace Namespace for SkyWalking cluster. SW_CONFIG_ETCD_NAMESPACE /skywalking   - - authentication Indicates whether there is authentication. SW_CONFIG_ETCD_AUTHENTICATION false   - - user Etcd auth username. SW_CONFIG_ETCD_USER    - - password Etcd auth password. SW_CONFIG_ETCD_PASSWORD    - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - consul hostPort Hosts and ports for Consul cluster. SW_CONFIG_CONSUL_HOST_AND_PORTS localhost:8500   - - aclToken ACL Token of Consul. Empty string means without ACL token. SW_CONFIG_CONSUL_ACL_TOKEN -   - - period The period of data sync (in seconds). SW_CONFIG_CONSUL_PERIOD 60   - k8s-configmap namespace Deployment namespace of the config map. SW_CLUSTER_K8S_NAMESPACE default   - - labelSelector Labels for locating configmap. SW_CLUSTER_K8S_LABEL app=collector,release=skywalking   - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - nacos serverAddr Nacos Server Host. SW_CONFIG_NACOS_SERVER_ADDR 127.0.0.1   - - port Nacos Server Port. SW_CONFIG_NACOS_SERVER_PORT 8848   - - group Nacos Configuration namespace. SW_CONFIG_NACOS_SERVER_NAMESPACE -   - - period The period of data sync (in seconds). SW_CONFIG_CONFIG_NACOS_PERIOD 60   - - username Nacos Auth username. SW_CONFIG_NACOS_USERNAME -   - - password Nacos Auth password. SW_CONFIG_NACOS_PASSWORD -   - - accessKey Nacos Auth accessKey. SW_CONFIG_NACOS_ACCESSKEY -   - - secretKey Nacos Auth secretKey. SW_CONFIG_NACOS_SECRETKEY -   exporter grpc targetHost The host of target gRPC server for receiving export data. SW_EXPORTER_GRPC_HOST 127.0.0.1   - - targetPort The port of target gRPC server for receiving export data. SW_EXPORTER_GRPC_PORT 9870   health-checker default checkIntervalSeconds The period of checking OAP internal health status (in seconds). SW_HEALTH_CHECKER_INTERVAL_SECONDS 5   configuration-discovery default disableMessageDigest If true, agent receives the latest configuration every time, even without making any changes. By default, OAP uses the SHA512 message digest mechanism to detect changes in configuration. SW_DISABLE_MESSAGE_DIGEST false   receiver-event default gRPC services that handle events data. - -     Note ¹ System Environment Variable name could be declared and changed in application.yml. The names listed here are simply provided in the default application.yml file.\n","excerpt":"Configuration Vocabulary The Configuration Vocabulary lists all available configurations provided by …","ref":"/docs/main/v9.2.0/en/setup/backend/configuration-vocabulary/","title":"Configuration Vocabulary"},{"body":"Configuration Vocabulary The Configuration Vocabulary lists all available configurations provided by application.yml.\n   Module Provider Settings Value(s) and Explanation System Environment Variable¹ Default     core default role Option values: Mixed/Receiver/Aggregator. Receiver mode OAP opens the service to the agents, then analyzes and aggregates the results, and forwards the results for distributed aggregation. Aggregator mode OAP receives data from Mixer and Receiver role OAP nodes, and performs 2nd level aggregation. Mixer means both Receiver and Aggregator. SW_CORE_ROLE Mixed   - - restHost Binding IP of RESTful services. Services include GraphQL query and HTTP data report. SW_CORE_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_CORE_REST_PORT 12800   - - restContextPath Web context path of RESTful services. SW_CORE_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_CORE_REST_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_CORE_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize ServerSocketChannel Backlog of RESTful services. SW_CORE_REST_QUEUE_SIZE 0   - - httpMaxRequestHeaderSize Maximum request header size accepted. SW_CORE_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - gRPCHost Binding IP of gRPC services, including gRPC data report and internal communication among OAP nodes. SW_CORE_GRPC_HOST 0.0.0.0   - - gRPCPort Binding port of gRPC services. SW_CORE_GRPC_PORT 11800   - - gRPCSslEnabled Activates SSL for gRPC services. SW_CORE_GRPC_SSL_ENABLED false   - - gRPCSslKeyPath File path of gRPC SSL key. SW_CORE_GRPC_SSL_KEY_PATH -   - - gRPCSslCertChainPath File path of gRPC SSL cert chain. SW_CORE_GRPC_SSL_CERT_CHAIN_PATH -   - - gRPCSslTrustedCAPath File path of gRPC trusted CA. SW_CORE_GRPC_SSL_TRUSTED_CA_PATH -   - - downsampling Activated level of down sampling aggregation.  Hour,Day   - - persistentPeriod Execution period of the persistent timer (in seconds).  25   - - enableDataKeeperExecutor Controller of TTL scheduler. Once disabled, TTL wouldn\u0026rsquo;t work. SW_CORE_ENABLE_DATA_KEEPER_EXECUTOR true   - - dataKeeperExecutePeriod Execution period of TTL scheduler (in minutes). Execution doesn\u0026rsquo;t mean deleting data. The storage provider (e.g. ElasticSearch storage) could override this. SW_CORE_DATA_KEEPER_EXECUTE_PERIOD 5   - - recordDataTTL The lifecycle of record data (in days). Record data includes traces, top N sample records, and logs. Minimum value is 2. SW_CORE_RECORD_DATA_TTL 3   - - metricsDataTTL The lifecycle of metrics data (in days), including metadata. We recommend setting metricsDataTTL \u0026gt;= recordDataTTL. Minimum value is 2. SW_CORE_METRICS_DATA_TTL 7   - - l1FlushPeriod The period of L1 aggregation flush to L2 aggregation (in milliseconds). SW_CORE_L1_AGGREGATION_FLUSH_PERIOD 500   - - storageSessionTimeout The threshold of session time (in milliseconds). Default value is 70000. SW_CORE_STORAGE_SESSION_TIMEOUT 70000   - - persistentPeriod The period of doing data persistence. Unit is second.Default value is 25s SW_CORE_PERSISTENT_PERIOD 25   - - topNReportPeriod The execution period (in minutes) of top N sampler, which saves sampled data into the storage. SW_CORE_TOPN_REPORT_PERIOD 10   - - activeExtraModelColumns Appends entity names (e.g. service names) into metrics storage entities. SW_CORE_ACTIVE_EXTRA_MODEL_COLUMNS false   - - serviceNameMaxLength Maximum length limit of service names. SW_SERVICE_NAME_MAX_LENGTH 70   - - instanceNameMaxLength Maximum length limit of service instance names. The maximum length of service + instance names should be less than 200. SW_INSTANCE_NAME_MAX_LENGTH 70   - - endpointNameMaxLength Maximum length limit of endpoint names. The maximum length of service + endpoint names should be less than 240. SW_ENDPOINT_NAME_MAX_LENGTH 150   - - searchableTracesTags Defines a set of span tag keys which are searchable through GraphQL. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_SEARCHABLE_TAG_KEYS http.method,http.status_code,rpc.status_code,db.type,db.instance,mq.queue,mq.topic,mq.broker   - - searchableLogsTags Defines a set of log tag keys which are searchable through GraphQL. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_SEARCHABLE_LOGS_TAG_KEYS level   - - searchableAlarmTags Defines a set of alarm tag keys which are searchable through GraphQL. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_SEARCHABLE_ALARM_TAG_KEYS level   - - autocompleteTagKeysQueryMaxSize The max size of tags keys for autocomplete select. SW_AUTOCOMPLETE_TAG_KEYS_QUERY_MAX_SIZE 100   - - autocompleteTagValuesQueryMaxSize The max size of tags values for autocomplete select. SW_AUTOCOMPLETE_TAG_VALUES_QUERY_MAX_SIZE 100   - - gRPCThreadPoolSize Pool size of gRPC server. SW_CORE_GRPC_THREAD_POOL_SIZE CPU core * 4   - - gRPCThreadPoolQueueSize Queue size of gRPC server. SW_CORE_GRPC_POOL_QUEUE_SIZE 10000   - - maxConcurrentCallsPerConnection The maximum number of concurrent calls permitted for each incoming connection. Defaults to no limit. SW_CORE_GRPC_MAX_CONCURRENT_CALL -   - - maxMessageSize Sets the maximum message size allowed to be received on the server. Empty means 4 MiB. SW_CORE_GRPC_MAX_MESSAGE_SIZE 4M(based on Netty)   - - remoteTimeout Timeout for cluster internal communication (in seconds). - 20   - - maxSizeOfNetworkAddressAlias The maximum size of network address detected in the system being monitored. - 1_000_000   - - maxPageSizeOfQueryProfileSnapshot The maximum size for snapshot analysis in an OAP query. - 500   - - maxSizeOfAnalyzeProfileSnapshot The maximum number of snapshots analyzed by the OAP. - 12000   - - prepareThreads The number of threads used to prepare metrics data to the storage. SW_CORE_PREPARE_THREADS 2   - - enableEndpointNameGroupingByOpenapi Automatically groups endpoints by the given OpenAPI definitions. SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI true   - - maxDurationOfQueryEBPFProfilingData The maximum duration(in second) of query the eBPF profiling data from database. - 30   - - maxThreadCountOfQueryEBPFProfilingData The maximum thread count of query the eBPF profiling data from database. - System CPU core size   cluster standalone - Standalone is not suitable for running on a single node running. No configuration available. - -   - zookeeper namespace The namespace, represented by root path, isolates the configurations in Zookeeper. SW_NAMESPACE /, root path   - - hostPort Hosts and ports of Zookeeper Cluster. SW_CLUSTER_ZK_HOST_PORT localhost:2181   - - baseSleepTimeMs The period of Zookeeper client between two retries (in milliseconds). SW_CLUSTER_ZK_SLEEP_TIME 1000   - - maxRetries The maximum retry time. SW_CLUSTER_ZK_MAX_RETRIES 3   - - enableACL Opens ACL using schema and expression. SW_ZK_ENABLE_ACL false   - - schema Schema for the authorization. SW_ZK_SCHEMA digest   - - expression Expression for the authorization. SW_ZK_EXPRESSION skywalking:skywalking   - - internalComHost The hostname registered in Zookeeper for the internal communication of OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Zookeeper for the internal communication of OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - kubernetes namespace Namespace deployed by SkyWalking in k8s. SW_CLUSTER_K8S_NAMESPACE default   - - labelSelector Labels used for filtering OAP deployment in k8s. SW_CLUSTER_K8S_LABEL app=collector,release=skywalking   - - uidEnvName Environment variable name for reading uid. SW_CLUSTER_K8S_UID SKYWALKING_COLLECTOR_UID   - consul serviceName Service name for SkyWalking cluster. SW_SERVICE_NAME SkyWalking_OAP_Cluster   - - hostPort Hosts and ports for Consul cluster. SW_CLUSTER_CONSUL_HOST_PORT localhost:8500   - - aclToken ACL Token of Consul. Empty string means without ALC token. SW_CLUSTER_CONSUL_ACLTOKEN -   - - internalComHost The hostname registered in Consul for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Consul for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - etcd serviceName Service name for SkyWalking cluster. SW_CLUSTER_ETCD_SERVICE_NAME SkyWalking_OAP_Cluster   - - endpoints Hosts and ports for etcd cluster. SW_CLUSTER_ETCD_ENDPOINTS localhost:2379   - - namespace Namespace for SkyWalking cluster. SW_CLUSTER_ETCD_NAMESPACE /skywalking   - - authentication Indicates whether there is authentication. SW_CLUSTER_ETCD_AUTHENTICATION false   - - user Etcd auth username. SW_CLUSTER_ETCD_USER    - - password Etcd auth password. SW_CLUSTER_ETCD_PASSWORD    - - internalComHost The hostname registered in etcd for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in etcd for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - Nacos serviceName Service name for SkyWalking cluster. SW_SERVICE_NAME SkyWalking_OAP_Cluster   - - hostPort Hosts and ports for Nacos cluster. SW_CLUSTER_NACOS_HOST_PORT localhost:8848   - - namespace Namespace used by SkyWalking node coordination. SW_CLUSTER_NACOS_NAMESPACE public   - - internalComHost The hostname registered in Nacos for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Nacos for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - - username Nacos Auth username. SW_CLUSTER_NACOS_USERNAME -   - - password Nacos Auth password. SW_CLUSTER_NACOS_PASSWORD -   - - accessKey Nacos Auth accessKey. SW_CLUSTER_NACOS_ACCESSKEY -   - - secretKey Nacos Auth secretKey. SW_CLUSTER_NACOS_SECRETKEY -   storage elasticsearch - ElasticSearch (and OpenSearch) storage implementation. - -   - - namespace Prefix of indexes created and used by SkyWalking. SW_NAMESPACE -   - - clusterNodes ElasticSearch cluster nodes for client connection. SW_STORAGE_ES_CLUSTER_NODES localhost   - - protocol HTTP or HTTPs. SW_STORAGE_ES_HTTP_PROTOCOL HTTP   - - connectTimeout Connect timeout of ElasticSearch client (in milliseconds). SW_STORAGE_ES_CONNECT_TIMEOUT 3000   - - socketTimeout Socket timeout of ElasticSearch client (in milliseconds). SW_STORAGE_ES_SOCKET_TIMEOUT 30000   - - responseTimeout Response timeout of ElasticSearch client (in milliseconds), 0 disables the timeout. SW_STORAGE_ES_RESPONSE_TIMEOUT 1500   - - numHttpClientThread The number of threads for the underlying HTTP client to perform socket I/O. If the value is \u0026lt;= 0, the number of available processors will be used. SW_STORAGE_ES_NUM_HTTP_CLIENT_THREAD 0   - - user Username of ElasticSearch cluster. SW_ES_USER -   - - password Password of ElasticSearch cluster. SW_ES_PASSWORD -   - - trustStorePath Trust JKS file path. Only works when username and password are enabled. SW_STORAGE_ES_SSL_JKS_PATH -   - - trustStorePass Trust JKS file password. Only works when username and password are enabled. SW_STORAGE_ES_SSL_JKS_PASS -   - - secretsManagementFile Secrets management file in the properties format, including username and password, which are managed by a 3rd party tool. Capable of being updated them at runtime. SW_ES_SECRETS_MANAGEMENT_FILE -   - - dayStep Represents the number of days in the one-minute/hour/day index. SW_STORAGE_DAY_STEP 1   - - indexShardsNumber Shard number of new indexes. SW_STORAGE_ES_INDEX_SHARDS_NUMBER 1   - - indexReplicasNumber Replicas number of new indexes. SW_STORAGE_ES_INDEX_REPLICAS_NUMBER 0   - - specificIndexSettings Specify the settings for each index individually. If configured, this setting has the highest priority and overrides the generic settings. SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS -   - - superDatasetDayStep Represents the number of days in the super size dataset record index. Default value is the same as dayStep when the value is less than 0. SW_STORAGE_ES_SUPER_DATASET_DAY_STEP -1   - - superDatasetIndexShardsFactor Super dataset is defined in the code (e.g. trace segments). This factor provides more shards for the super dataset: shards number = indexShardsNumber * superDatasetIndexShardsFactor. This factor also affects Zipkin and Jaeger traces. SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR 5   - - superDatasetIndexReplicasNumber Represents the replicas number in the super size dataset record index. SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER 0   - - indexTemplateOrder The order of index template. SW_STORAGE_ES_INDEX_TEMPLATE_ORDER 0   - - bulkActions Async bulk size of the record data batch execution. SW_STORAGE_ES_BULK_ACTIONS 5000   - - flushInterval Period of flush (in seconds). Does not matter whether bulkActions is reached or not. INT(flushInterval * 2/3) is used for index refresh period. SW_STORAGE_ES_FLUSH_INTERVAL 15 (index refresh period = 10)   - - concurrentRequests The number of concurrent requests allowed to be executed. SW_STORAGE_ES_CONCURRENT_REQUESTS 2   - - resultWindowMaxSize The maximum size of dataset when the OAP loads cache, such as network aliases. SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE 10000   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_ES_QUERY_MAX_SIZE 10000   - - scrollingBatchSize The batch size of metadata per iteration when metadataQueryMaxSize or resultWindowMaxSize is too large to be retrieved in a single query. SW_STORAGE_ES_SCROLLING_BATCH_SIZE 5000   - - segmentQueryMaxSize The maximum size of trace segments per query. SW_STORAGE_ES_QUERY_SEGMENT_SIZE 200   - - profileTaskQueryMaxSize The maximum size of profile task per query. SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE 200   - - profileDataQueryScrollBatchSize The batch size of query profiling data. SW_STORAGE_ES_QUERY_PROFILE_DATA_BATCH_SIZE 100   - - advanced All settings of ElasticSearch index creation. The value should be in JSON format. SW_STORAGE_ES_ADVANCED -   - - logicSharding Shard metrics and records indices into multi-physical indices, one index template per metric/meter aggregation function or record. SW_STORAGE_ES_LOGIC_SHARDING false   - h2 - H2 storage is designed for demonstration and running in short term (i.e. 1-2 hours) only. - -   - - url H2 connection URL. Defaults to H2 memory mode. SW_STORAGE_H2_URL jdbc:h2:mem:skywalking-oap-db   - - user Username of H2 database. SW_STORAGE_H2_USER sa   - - password Password of H2 database. - -   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_H2_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 100   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 1   - mysql - MySQL Storage. The MySQL JDBC Driver is not in the dist. Please copy it into the oap-lib folder manually. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - mysql-sharding - Sharding-Proxy for MySQL properties. The MySQL JDBC Driver is not in the dist. Please copy it into the oap-lib folder manually. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - - dataSources The dataSources are configured in ShardingSphere-Proxy config-sharding.yaml.The dataSource name should include the prefix \u0026ldquo;ds_\u0026rdquo; and separated by \u0026ldquo;,\u0026rdquo; and start from ds_0 SW_JDBC_SHARDING_DATA_SOURCES ds_0,ds_1   - postgresql - PostgreSQL storage. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - banyandb - BanyanDB storage. - -   - - host Host of the BanyanDB. SW_STORAGE_BANYANDB_HOST 127.0.0.1   - - port Port of the BanyanDB. SW_STORAGE_BANYANDB_PORT 17912   - - maxBulkSize The maximum size of write entities in a single batch write call. SW_STORAGE_BANYANDB_MAX_BULK_SIZE 5000   - - flushInterval Period of flush interval. In the timeunit of seconds. SW_STORAGE_BANYANDB_FLUSH_INTERVAL 15   - - metricsShardsNumber Shards Number for measure/metrics. SW_STORAGE_BANYANDB_METRICS_SHARDS_NUMBER 1   - - recordShardsNumber Shards Number for a normal record. SW_STORAGE_BANYANDB_RECORD_SHARDS_NUMBER 1   - - superDatasetShardsFactor Shards Factor for a super dataset record, i.e. Shard number of a super dataset is recordShardsNumber*superDatasetShardsFactor. SW_STORAGE_BANYANDB_SUPERDATASET_SHARDS_FACTOR 2   - - concurrentWriteThreads Concurrent consumer threads for batch writing. SW_STORAGE_BANYANDB_CONCURRENT_WRITE_THREADS 15   - - profileTaskQueryMaxSize Max size of ProfileTask to be fetched. SW_STORAGE_BANYANDB_PROFILE_TASK_QUERY_MAX_SIZE 200   agent-analyzer default Agent Analyzer. SW_AGENT_ANALYZER default    - - traceSamplingPolicySettingsFile The sampling policy including sampling rate and the threshold of trace segment latency can be configured by the traceSamplingPolicySettingsFile file. SW_TRACE_SAMPLING_POLICY_SETTINGS_FILE trace-sampling-policy-settings.yml   - - slowDBAccessThreshold The slow database access threshold (in milliseconds). SW_SLOW_DB_THRESHOLD default:200,mongodb:100   - - forceSampleErrorSegment When sampling mechanism is activated, this config samples the error status segment and ignores the sampling rate. SW_FORCE_SAMPLE_ERROR_SEGMENT true   - - segmentStatusAnalysisStrategy Determines the final segment status from span status. Available values are FROM_SPAN_STATUS , FROM_ENTRY_SPAN, and FROM_FIRST_SPAN. FROM_SPAN_STATUS indicates that the segment status would be error if any span has an error status. FROM_ENTRY_SPAN means that the segment status would only be determined by the status of entry spans. FROM_FIRST_SPAN means that the segment status would only be determined by the status of the first span. SW_SEGMENT_STATUS_ANALYSIS_STRATEGY FROM_SPAN_STATUS   - - noUpstreamRealAddressAgents Exit spans with the component in the list would not generate client-side instance relation metrics, since some tracing plugins (e.g. Nginx-LUA and Envoy) can\u0026rsquo;t collect the real peer IP address. SW_NO_UPSTREAM_REAL_ADDRESS 6000,9000   - - meterAnalyzerActiveFiles Indicates which files could be instrumented and analyzed. Multiple files are split by \u0026ldquo;,\u0026rdquo;. SW_METER_ANALYZER_ACTIVE_FILES    - - slowCacheWriteThreshold The threshold of slow command which is used for writing operation (in milliseconds). SW_SLOW_CACHE_WRITE_THRESHOLD default:20,redis:10   - - slowCacheReadThreshold The threshold of slow command which is used for reading (getting) operation (in milliseconds). SW_SLOW_CACHE_READ_THRESHOLD default:20,redis:10   receiver-sharing-server default Sharing server provides new gRPC and restful servers for data collection. Ana designates that servers in the core module are to be used for internal communication only. - -    - - restHost Binding IP of RESTful services. Services include GraphQL query and HTTP data report. SW_RECEIVER_SHARING_REST_HOST -   - - restPort Binding port of RESTful services. SW_RECEIVER_SHARING_REST_PORT -   - - restContextPath Web context path of RESTful services. SW_RECEIVER_SHARING_REST_CONTEXT_PATH -   - - restMaxThreads Maximum thread number of RESTful services. SW_RECEIVER_SHARING_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_RECEIVER_SHARING_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize ServerSocketChannel backlog of RESTful services. SW_RECEIVER_SHARING_REST_QUEUE_SIZE 0   - - httpMaxRequestHeaderSize Maximum request header size accepted. SW_RECEIVER_SHARING_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - gRPCHost Binding IP of gRPC services. Services include gRPC data report and internal communication among OAP nodes. SW_RECEIVER_GRPC_HOST 0.0.0.0. Not Activated   - - gRPCPort Binding port of gRPC services. SW_RECEIVER_GRPC_PORT Not Activated   - - gRPCThreadPoolSize Pool size of gRPC server. SW_RECEIVER_GRPC_THREAD_POOL_SIZE CPU core * 4   - - gRPCThreadPoolQueueSize Queue size of gRPC server. SW_RECEIVER_GRPC_POOL_QUEUE_SIZE 10000   - - gRPCSslEnabled Activates SSL for gRPC services. SW_RECEIVER_GRPC_SSL_ENABLED false   - - gRPCSslKeyPath File path of gRPC SSL key. SW_RECEIVER_GRPC_SSL_KEY_PATH -   - - gRPCSslCertChainPath File path of gRPC SSL cert chain. SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH -   - - maxConcurrentCallsPerConnection The maximum number of concurrent calls permitted for each incoming connection. Defaults to no limit. SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL -   - - authentication The token text for authentication. Works for gRPC connection only. Once this is set, the client is required to use the same token. SW_AUTHENTICATION -   log-analyzer default Log Analyzer. SW_LOG_ANALYZER default    - - lalFiles The LAL configuration file names (without file extension) to be activated. Read LAL for more details. SW_LOG_LAL_FILES default   - - malFiles The MAL configuration file names (without file extension) to be activated. Read LAL for more details. SW_LOG_MAL_FILES \u0026quot;\u0026quot;   event-analyzer default Event Analyzer. SW_EVENT_ANALYZER default    receiver-register default gRPC and HTTPRestful services that provide service, service instance and endpoint register. - -    receiver-trace default gRPC and HTTPRestful services that accept SkyWalking format traces. - -    receiver-jvm default gRPC services that accept JVM metrics data. - -    receiver-clr default gRPC services that accept .Net CLR metrics data. - -    receiver-profile default gRPC services that accept profile task status and snapshot reporter. - -    receiver-zabbix default TCP receiver accepts Zabbix format metrics. - -    - - port Exported TCP port. Zabbix agent could connect and transport data. SW_RECEIVER_ZABBIX_PORT 10051   - - host Binds to host. SW_RECEIVER_ZABBIX_HOST 0.0.0.0   - - activeFiles Enables config when agent request is received. SW_RECEIVER_ZABBIX_ACTIVE_FILES agent   service-mesh default gRPC services that accept data from inbound mesh probes. - -    envoy-metric default Envoy metrics_service and ALS(access log service) are supported by this receiver. The OAL script supports all GAUGE type metrics. - -    - - acceptMetricsService Starts Envoy Metrics Service analysis. SW_ENVOY_METRIC_SERVICE true   - - alsHTTPAnalysis Starts Envoy HTTP Access Log Service analysis. Value = k8s-mesh means starting the analysis. SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS -   - - alsTCPAnalysis Starts Envoy TCP Access Log Service analysis. Value = k8s-mesh means starting the analysis. SW_ENVOY_METRIC_ALS_TCP_ANALYSIS -   - - k8sServiceNameRule k8sServiceNameRule allows you to customize the service name in ALS via Kubernetes metadata. The available variables are pod and service. E.g. you can use ${service.metadata.name}-${pod.metadata.labels.version} to append the version number to the service name. Note that when using environment variables to pass this configuration, use single quotes('') to avoid being evaluated by the shell. -    receiver-otel default A receiver for analyzing metrics data from OpenTelemetry. - -    - - enabledHandlers Enabled handlers for otel. SW_OTEL_RECEIVER_ENABLED_HANDLERS -   - - enabledOtelRules Enabled metric rules for OC handler. SW_OTEL_RECEIVER_ENABLED_OTEL_RULES -   receiver-zipkin default A receiver for Zipkin traces. - -    - - sampleRate The sample rate precision is 1/10000, should be between 0 and 10000 SW_ZIPKIN_SAMPLE_RATE 10000   - - searchableTracesTags Defines a set of span tag keys which are searchable. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_ZIPKIN_SEARCHABLE_TAG_KEYS http.method   - - enableHttpCollector Enable Http Collector. SW_ZIPKIN_HTTP_COLLECTOR_ENABLED true   - - restHost Binding IP of RESTful services. SW_RECEIVER_ZIPKIN_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_RECEIVER_ZIPKIN_REST_PORT 9411   - - restContextPath Web context path of RESTful services. SW_RECEIVER_ZIPKIN_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_RECEIVER_ZIPKIN_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_RECEIVER_ZIPKIN_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_RECEIVER_ZIPKIN_REST_QUEUE_SIZE 0   - - enableKafkaCollector Enable Kafka Collector. SW_ZIPKIN_KAFKA_COLLECTOR_ENABLED false   - - kafkaBootstrapServers Kafka ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG. SW_ZIPKIN_KAFKA_SERVERS localhost:9092   - - kafkaGroupId Kafka ConsumerConfig.GROUP_ID_CONFIG. SW_ZIPKIN_KAFKA_GROUP_ID zipkin   - - kafkaTopic Kafka Topics. SW_ZIPKIN_KAFKA_TOPIC zipkin   - - kafkaConsumerConfig Kafka consumer config, JSON format as Properties. If it contains the same key with above, would override. SW_ZIPKIN_KAFKA_CONSUMER_CONFIG \u0026ldquo;{\u0026quot;auto.offset.reset\u0026quot;:\u0026quot;earliest\u0026quot;,\u0026quot;enable.auto.commit\u0026quot;:true}\u0026rdquo;   - - kafkaConsumers The number of consumers to create. SW_ZIPKIN_KAFKA_CONSUMERS 1   - - kafkaHandlerThreadPoolSize Pool size of Kafka message handler executor. SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_SIZE CPU core * 2   - - kafkaHandlerThreadPoolQueueSize Queue size of Kafka message handler executor. SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE 10000   kafka-fetcher default Read SkyWalking\u0026rsquo;s native metrics/logs/traces through Kafka server. - -    - - bootstrapServers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_KAFKA_FETCHER_SERVERS localhost:9092   - - namespace Namespace aims to isolate multi OAP cluster when using the same Kafka cluster. If you set a namespace for Kafka fetcher, OAP will add a prefix to topic name. You should also set namespace in agent.config. The property is named plugin.kafka.namespace. SW_NAMESPACE -   - - groupId A unique string that identifies the consumer group to which this consumer belongs. - skywalking-consumer   - - createTopicIfNotExist If true, this creates Kafka topic (if it does not already exist). - true   - - partitions The number of partitions for the topic being created. SW_KAFKA_FETCHER_PARTITIONS 3   - - consumers The number of consumers to create. SW_KAFKA_FETCHER_CONSUMERS 1   - - enableNativeProtoLog Enables fetching and handling native proto log data. SW_KAFKA_FETCHER_ENABLE_NATIVE_PROTO_LOG true   - - enableNativeJsonLog Enables fetching and handling native json log data. SW_KAFKA_FETCHER_ENABLE_NATIVE_JSON_LOG true   - - replicationFactor The replication factor for each partition in the topic being created. SW_KAFKA_FETCHER_PARTITIONS_FACTOR 2   - - kafkaHandlerThreadPoolSize Pool size of Kafka message handler executor. SW_KAFKA_HANDLER_THREAD_POOL_SIZE CPU core * 2   - - kafkaHandlerThreadPoolQueueSize Queue size of Kafka message handler executor. SW_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE 10000   - - topicNameOfMeters Kafka topic name for meter system data. - skywalking-meters   - - topicNameOfMetrics Kafka topic name for JVM metrics data. - skywalking-metrics   - - topicNameOfProfiling Kafka topic name for profiling data. - skywalking-profilings   - - topicNameOfTracingSegments Kafka topic name for tracing data. - skywalking-segments   - - topicNameOfManagements Kafka topic name for service instance reporting and registration. - skywalking-managements   - - topicNameOfLogs Kafka topic name for native proto log data. - skywalking-logs   - - topicNameOfJsonLogs Kafka topic name for native json log data. - skywalking-logs-json   receiver-browser default gRPC services that accept browser performance data and error log. - - -   - - sampleRate Sampling rate for receiving trace. Precise to 1/10000. 10000 means sampling rate of 100% by default. SW_RECEIVER_BROWSER_SAMPLE_RATE 10000   query graphql - GraphQL query implementation. -    - - enableLogTestTool Enable the log testing API to test the LAL. NOTE: This API evaluates untrusted code on the OAP server. A malicious script can do significant damage (steal keys and secrets, remove files and directories, install malware, etc). As such, please enable this API only when you completely trust your users. SW_QUERY_GRAPHQL_ENABLE_LOG_TEST_TOOL false   - - maxQueryComplexity Maximum complexity allowed for the GraphQL query that can be used to abort a query if the total number of data fields queried exceeds the defined threshold. SW_QUERY_MAX_QUERY_COMPLEXITY 1000   - - enableUpdateUITemplate Allow user add,disable and update UI template. SW_ENABLE_UPDATE_UI_TEMPLATE false   - - enableOnDemandPodLog Ondemand Pod log: fetch the Pod logs on users' demand, the logs are fetched and displayed in real time, and are not persisted in any kind. This is helpful when users want to do some experiments and monitor the logs and see what\u0026rsquo;s happing inside the service. Note: if you print secrets in the logs, they are also visible to the UI, so for the sake of security, this feature is disabled by default, please set this configuration to enable the feature manually. SW_ENABLE_ON_DEMAND_POD_LOG false   query graphql - GraphQL query implementation. -    - - restHost Binding IP of RESTful services. SW_QUERY_ZIPKIN_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_QUERY_ZIPKIN_REST_PORT 9412   - - restContextPath Web context path of RESTful services. SW_QUERY_ZIPKIN_REST_CONTEXT_PATH zipkin   - - restMaxThreads Maximum thread number of RESTful services. SW_QUERY_ZIPKIN_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_QUERY_ZIPKIN_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_QUERY_ZIPKIN_REST_QUEUE_SIZE 0   - - lookback Default look back for traces and autocompleteTags, 1 day in millis SW_QUERY_ZIPKIN_LOOKBACK 86400000   - - namesMaxAge The Cache-Control max-age (seconds) for serviceNames, remoteServiceNames and spanNames SW_QUERY_ZIPKIN_NAMES_MAX_AGE 300   - - uiQueryLimit Default traces query max size SW_QUERY_ZIPKIN_UI_QUERY_LIMIT 10   - - uiDefaultLookback Default look back on the UI for search traces, 15 minutes in millis SW_QUERY_ZIPKIN_UI_DEFAULT_LOOKBACK 900000   alarm default - Read alarm doc for more details. -    telemetry - - Read telemetry doc for more details. -    - none - No op implementation. -    - prometheus host Binding host for Prometheus server fetching data. SW_TELEMETRY_PROMETHEUS_HOST 0.0.0.0   - - port Binding port for Prometheus server fetching data. SW_TELEMETRY_PROMETHEUS_PORT 1234   configuration - - Read dynamic configuration doc for more details. -    - grpc host DCS server binding hostname. SW_DCS_SERVER_HOST -   - - port DCS server binding port. SW_DCS_SERVER_PORT 80   - - clusterName Cluster name when reading the latest configuration from DSC server. SW_DCS_CLUSTER_NAME SkyWalking   - - period The period of reading data from DSC server by the OAP (in seconds). SW_DCS_PERIOD 20   - apollo apolloMeta apollo.meta in Apollo. SW_CONFIG_APOLLO http://localhost:8080   - - apolloCluster apollo.cluster in Apollo. SW_CONFIG_APOLLO_CLUSTER default   - - apolloEnv env in Apollo. SW_CONFIG_APOLLO_ENV -   - - appId app.id in Apollo. SW_CONFIG_APOLLO_APP_ID skywalking   - - period The period of data sync (in seconds). SW_CONFIG_APOLLO_PERIOD 60   - zookeeper namespace The namespace (represented by root path) that isolates the configurations in the Zookeeper. SW_CONFIG_ZK_NAMESPACE /, root path   - - hostPort Hosts and ports of Zookeeper Cluster. SW_CONFIG_ZK_HOST_PORT localhost:2181   - - baseSleepTimeMs The period of Zookeeper client between two retries (in milliseconds). SW_CONFIG_ZK_BASE_SLEEP_TIME_MS 1000   - - maxRetries The maximum retry time. SW_CONFIG_ZK_MAX_RETRIES 3   - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - etcd endpoints Hosts and ports for etcd cluster (separated by commas if multiple). SW_CONFIG_ETCD_ENDPOINTS http://localhost:2379   - - namespace Namespace for SkyWalking cluster. SW_CONFIG_ETCD_NAMESPACE /skywalking   - - authentication Indicates whether there is authentication. SW_CONFIG_ETCD_AUTHENTICATION false   - - user Etcd auth username. SW_CONFIG_ETCD_USER    - - password Etcd auth password. SW_CONFIG_ETCD_PASSWORD    - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - consul hostPort Hosts and ports for Consul cluster. SW_CONFIG_CONSUL_HOST_AND_PORTS localhost:8500   - - aclToken ACL Token of Consul. Empty string means without ACL token. SW_CONFIG_CONSUL_ACL_TOKEN -   - - period The period of data sync (in seconds). SW_CONFIG_CONSUL_PERIOD 60   - k8s-configmap namespace Deployment namespace of the config map. SW_CLUSTER_K8S_NAMESPACE default   - - labelSelector Labels for locating configmap. SW_CLUSTER_K8S_LABEL app=collector,release=skywalking   - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - nacos serverAddr Nacos Server Host. SW_CONFIG_NACOS_SERVER_ADDR 127.0.0.1   - - port Nacos Server Port. SW_CONFIG_NACOS_SERVER_PORT 8848   - - group Nacos Configuration namespace. SW_CONFIG_NACOS_SERVER_NAMESPACE -   - - period The period of data sync (in seconds). SW_CONFIG_CONFIG_NACOS_PERIOD 60   - - username Nacos Auth username. SW_CONFIG_NACOS_USERNAME -   - - password Nacos Auth password. SW_CONFIG_NACOS_PASSWORD -   - - accessKey Nacos Auth accessKey. SW_CONFIG_NACOS_ACCESSKEY -   - - secretKey Nacos Auth secretKey. SW_CONFIG_NACOS_SECRETKEY -   exporter default enableGRPCMetrics Enable gRPC metrics exporter. SW_EXPORTER_ENABLE_GRPC_METRICS false   - - gRPCTargetHost The host of target gRPC server for receiving export data SW_EXPORTER_GRPC_HOST 127.0.0.1   - - gRPCTargetPort The port of target gRPC server for receiving export data. SW_EXPORTER_GRPC_PORT 9870   - - enableKafkaTrace Enable Kafka trace exporter. SW_EXPORTER_ENABLE_KAFKA_TRACE false   - - enableKafkaLog Enable Kafka log exporter. SW_EXPORTER_ENABLE_KAFKA_LOG false   - - kafkaBootstrapServers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_EXPORTER_KAFKA_SERVERS localhost:9092   - - kafkaProducerConfig Kafka producer config, JSON format as Properties. SW_EXPORTER_KAFKA_PRODUCER_CONFIG -   - - kafkaTopicTrace Kafka topic name for trace. SW_EXPORTER_KAFKA_TOPIC_TRACE skywalking-export-trace   - - kafkaTopicLog Kafka topic name for log. SW_EXPORTER_KAFKA_TOPIC_LOG skywalking-export-log   health-checker default checkIntervalSeconds The period of checking OAP internal health status (in seconds). SW_HEALTH_CHECKER_INTERVAL_SECONDS 5   configuration-discovery default disableMessageDigest If true, agent receives the latest configuration every time, even without making any changes. By default, OAP uses the SHA512 message digest mechanism to detect changes in configuration. SW_DISABLE_MESSAGE_DIGEST false   receiver-event default gRPC services that handle events data. - -     Note ¹ System Environment Variable name could be declared and changed in application.yml. The names listed here are simply provided in the default application.yml file.\n","excerpt":"Configuration Vocabulary The Configuration Vocabulary lists all available configurations provided by …","ref":"/docs/main/v9.3.0/en/setup/backend/configuration-vocabulary/","title":"Configuration Vocabulary"},{"body":"Configuration Vocabulary The Configuration Vocabulary lists all available configurations provided by application.yml.\n   Module Provider Settings Value(s) and Explanation System Environment Variable¹ Default     core default role Option values: Mixed/Receiver/Aggregator. Receiver mode OAP opens the service to the agents, then analyzes and aggregates the results, and forwards the results for distributed aggregation. Aggregator mode OAP receives data from Mixer and Receiver role OAP nodes, and performs 2nd level aggregation. Mixer means both Receiver and Aggregator. SW_CORE_ROLE Mixed   - - restHost Binding IP of RESTful services. Services include GraphQL query and HTTP data report. SW_CORE_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_CORE_REST_PORT 12800   - - restContextPath Web context path of RESTful services. SW_CORE_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_CORE_REST_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_CORE_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize ServerSocketChannel Backlog of RESTful services. SW_CORE_REST_QUEUE_SIZE 0   - - httpMaxRequestHeaderSize Maximum request header size accepted. SW_CORE_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - gRPCHost Binding IP of gRPC services, including gRPC data report and internal communication among OAP nodes. SW_CORE_GRPC_HOST 0.0.0.0   - - gRPCPort Binding port of gRPC services. SW_CORE_GRPC_PORT 11800   - - gRPCSslEnabled Activates SSL for gRPC services. SW_CORE_GRPC_SSL_ENABLED false   - - gRPCSslKeyPath File path of gRPC SSL key. SW_CORE_GRPC_SSL_KEY_PATH -   - - gRPCSslCertChainPath File path of gRPC SSL cert chain. SW_CORE_GRPC_SSL_CERT_CHAIN_PATH -   - - gRPCSslTrustedCAPath File path of gRPC trusted CA. SW_CORE_GRPC_SSL_TRUSTED_CA_PATH -   - - downsampling Activated level of down sampling aggregation.  Hour,Day   - - persistentPeriod Execution period of the persistent timer (in seconds).  25   - - enableDataKeeperExecutor Controller of TTL scheduler. Once disabled, TTL wouldn\u0026rsquo;t work. SW_CORE_ENABLE_DATA_KEEPER_EXECUTOR true   - - dataKeeperExecutePeriod Execution period of TTL scheduler (in minutes). Execution doesn\u0026rsquo;t mean deleting data. The storage provider (e.g. ElasticSearch storage) could override this. SW_CORE_DATA_KEEPER_EXECUTE_PERIOD 5   - - recordDataTTL The lifecycle of record data (in days). Record data includes traces, top N sample records, and logs. Minimum value is 2. SW_CORE_RECORD_DATA_TTL 3   - - metricsDataTTL The lifecycle of metrics data (in days), including metadata. We recommend setting metricsDataTTL \u0026gt;= recordDataTTL. Minimum value is 2. SW_CORE_METRICS_DATA_TTL 7   - - l1FlushPeriod The period of L1 aggregation flush to L2 aggregation (in milliseconds). SW_CORE_L1_AGGREGATION_FLUSH_PERIOD 500   - - storageSessionTimeout The threshold of session time (in milliseconds). Default value is 70000. SW_CORE_STORAGE_SESSION_TIMEOUT 70000   - - persistentPeriod The period of doing data persistence. Unit is second.Default value is 25s SW_CORE_PERSISTENT_PERIOD 25   - - topNReportPeriod The execution period (in minutes) of top N sampler, which saves sampled data into the storage. SW_CORE_TOPN_REPORT_PERIOD 10   - - activeExtraModelColumns Appends entity names (e.g. service names) into metrics storage entities. SW_CORE_ACTIVE_EXTRA_MODEL_COLUMNS false   - - serviceNameMaxLength Maximum length limit of service names. SW_SERVICE_NAME_MAX_LENGTH 70   - - instanceNameMaxLength Maximum length limit of service instance names. The maximum length of service + instance names should be less than 200. SW_INSTANCE_NAME_MAX_LENGTH 70   - - endpointNameMaxLength Maximum length limit of endpoint names. The maximum length of service + endpoint names should be less than 240. SW_ENDPOINT_NAME_MAX_LENGTH 150   - - searchableTracesTags Defines a set of span tag keys which are searchable through GraphQL. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_SEARCHABLE_TAG_KEYS http.method,http.status_code,rpc.status_code,db.type,db.instance,mq.queue,mq.topic,mq.broker   - - searchableLogsTags Defines a set of log tag keys which are searchable through GraphQL. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_SEARCHABLE_LOGS_TAG_KEYS level   - - searchableAlarmTags Defines a set of alarm tag keys which are searchable through GraphQL. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_SEARCHABLE_ALARM_TAG_KEYS level   - - autocompleteTagKeysQueryMaxSize The max size of tags keys for autocomplete select. SW_AUTOCOMPLETE_TAG_KEYS_QUERY_MAX_SIZE 100   - - autocompleteTagValuesQueryMaxSize The max size of tags values for autocomplete select. SW_AUTOCOMPLETE_TAG_VALUES_QUERY_MAX_SIZE 100   - - gRPCThreadPoolSize Pool size of gRPC server. SW_CORE_GRPC_THREAD_POOL_SIZE CPU core * 4   - - gRPCThreadPoolQueueSize Queue size of gRPC server. SW_CORE_GRPC_POOL_QUEUE_SIZE 10000   - - maxConcurrentCallsPerConnection The maximum number of concurrent calls permitted for each incoming connection. Defaults to no limit. SW_CORE_GRPC_MAX_CONCURRENT_CALL -   - - maxMessageSize Sets the maximum message size allowed to be received on the server. Empty means 4 MiB. SW_CORE_GRPC_MAX_MESSAGE_SIZE 4M(based on Netty)   - - remoteTimeout Timeout for cluster internal communication (in seconds). - 20   - - maxSizeOfNetworkAddressAlias The maximum size of network address detected in the system being monitored. - 1_000_000   - - maxPageSizeOfQueryProfileSnapshot The maximum size for snapshot analysis in an OAP query. - 500   - - maxSizeOfAnalyzeProfileSnapshot The maximum number of snapshots analyzed by the OAP. - 12000   - - prepareThreads The number of threads used to prepare metrics data to the storage. SW_CORE_PREPARE_THREADS 2   - - enableEndpointNameGroupingByOpenapi Automatically groups endpoints by the given OpenAPI definitions. SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI true   - - maxDurationOfQueryEBPFProfilingData The maximum duration(in second) of query the eBPF profiling data from database. - 30   - - maxThreadCountOfQueryEBPFProfilingData The maximum thread count of query the eBPF profiling data from database. - System CPU core size   cluster standalone - Standalone is not suitable for running on a single node running. No configuration available. - -   - zookeeper namespace The namespace, represented by root path, isolates the configurations in Zookeeper. SW_NAMESPACE /, root path   - - hostPort Hosts and ports of Zookeeper Cluster. SW_CLUSTER_ZK_HOST_PORT localhost:2181   - - baseSleepTimeMs The period of Zookeeper client between two retries (in milliseconds). SW_CLUSTER_ZK_SLEEP_TIME 1000   - - maxRetries The maximum retry time. SW_CLUSTER_ZK_MAX_RETRIES 3   - - enableACL Opens ACL using schema and expression. SW_ZK_ENABLE_ACL false   - - schema Schema for the authorization. SW_ZK_SCHEMA digest   - - expression Expression for the authorization. SW_ZK_EXPRESSION skywalking:skywalking   - - internalComHost The hostname registered in Zookeeper for the internal communication of OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Zookeeper for the internal communication of OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - kubernetes namespace Namespace deployed by SkyWalking in k8s. SW_CLUSTER_K8S_NAMESPACE default   - - labelSelector Labels used for filtering OAP deployment in k8s. SW_CLUSTER_K8S_LABEL app=collector,release=skywalking   - - uidEnvName Environment variable name for reading uid. SW_CLUSTER_K8S_UID SKYWALKING_COLLECTOR_UID   - consul serviceName Service name for SkyWalking cluster. SW_SERVICE_NAME SkyWalking_OAP_Cluster   - - hostPort Hosts and ports for Consul cluster. SW_CLUSTER_CONSUL_HOST_PORT localhost:8500   - - aclToken ACL Token of Consul. Empty string means without ALC token. SW_CLUSTER_CONSUL_ACLTOKEN -   - - internalComHost The hostname registered in Consul for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Consul for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - etcd serviceName Service name for SkyWalking cluster. SW_CLUSTER_ETCD_SERVICE_NAME SkyWalking_OAP_Cluster   - - endpoints Hosts and ports for etcd cluster. SW_CLUSTER_ETCD_ENDPOINTS localhost:2379   - - namespace Namespace for SkyWalking cluster. SW_CLUSTER_ETCD_NAMESPACE /skywalking   - - authentication Indicates whether there is authentication. SW_CLUSTER_ETCD_AUTHENTICATION false   - - user Etcd auth username. SW_CLUSTER_ETCD_USER    - - password Etcd auth password. SW_CLUSTER_ETCD_PASSWORD    - - internalComHost The hostname registered in etcd for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in etcd for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - Nacos serviceName Service name for SkyWalking cluster. SW_SERVICE_NAME SkyWalking_OAP_Cluster   - - hostPort Hosts and ports for Nacos cluster. SW_CLUSTER_NACOS_HOST_PORT localhost:8848   - - namespace Namespace used by SkyWalking node coordination. SW_CLUSTER_NACOS_NAMESPACE public   - - internalComHost The hostname registered in Nacos for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Nacos for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - - username Nacos Auth username. SW_CLUSTER_NACOS_USERNAME -   - - password Nacos Auth password. SW_CLUSTER_NACOS_PASSWORD -   - - accessKey Nacos Auth accessKey. SW_CLUSTER_NACOS_ACCESSKEY -   - - secretKey Nacos Auth secretKey. SW_CLUSTER_NACOS_SECRETKEY -   storage elasticsearch - ElasticSearch (and OpenSearch) storage implementation. - -   - - namespace Prefix of indexes created and used by SkyWalking. SW_NAMESPACE -   - - clusterNodes ElasticSearch cluster nodes for client connection. SW_STORAGE_ES_CLUSTER_NODES localhost   - - protocol HTTP or HTTPs. SW_STORAGE_ES_HTTP_PROTOCOL HTTP   - - connectTimeout Connect timeout of ElasticSearch client (in milliseconds). SW_STORAGE_ES_CONNECT_TIMEOUT 3000   - - socketTimeout Socket timeout of ElasticSearch client (in milliseconds). SW_STORAGE_ES_SOCKET_TIMEOUT 30000   - - responseTimeout Response timeout of ElasticSearch client (in milliseconds), 0 disables the timeout. SW_STORAGE_ES_RESPONSE_TIMEOUT 1500   - - numHttpClientThread The number of threads for the underlying HTTP client to perform socket I/O. If the value is \u0026lt;= 0, the number of available processors will be used. SW_STORAGE_ES_NUM_HTTP_CLIENT_THREAD 0   - - user Username of ElasticSearch cluster. SW_ES_USER -   - - password Password of ElasticSearch cluster. SW_ES_PASSWORD -   - - trustStorePath Trust JKS file path. Only works when username and password are enabled. SW_STORAGE_ES_SSL_JKS_PATH -   - - trustStorePass Trust JKS file password. Only works when username and password are enabled. SW_STORAGE_ES_SSL_JKS_PASS -   - - secretsManagementFile Secrets management file in the properties format, including username and password, which are managed by a 3rd party tool. Capable of being updated them at runtime. SW_ES_SECRETS_MANAGEMENT_FILE -   - - dayStep Represents the number of days in the one-minute/hour/day index. SW_STORAGE_DAY_STEP 1   - - indexShardsNumber Shard number of new indexes. SW_STORAGE_ES_INDEX_SHARDS_NUMBER 1   - - indexReplicasNumber Replicas number of new indexes. SW_STORAGE_ES_INDEX_REPLICAS_NUMBER 0   - - specificIndexSettings Specify the settings for each index individually. If configured, this setting has the highest priority and overrides the generic settings. SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS -   - - superDatasetDayStep Represents the number of days in the super size dataset record index. Default value is the same as dayStep when the value is less than 0. SW_STORAGE_ES_SUPER_DATASET_DAY_STEP -1   - - superDatasetIndexShardsFactor Super dataset is defined in the code (e.g. trace segments). This factor provides more shards for the super dataset: shards number = indexShardsNumber * superDatasetIndexShardsFactor. This factor also affects Zipkin and Jaeger traces. SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR 5   - - superDatasetIndexReplicasNumber Represents the replicas number in the super size dataset record index. SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER 0   - - indexTemplateOrder The order of index template. SW_STORAGE_ES_INDEX_TEMPLATE_ORDER 0   - - bulkActions Async bulk size of the record data batch execution. SW_STORAGE_ES_BULK_ACTIONS 5000   - - batchOfBytes A threshold to control the max body size of ElasticSearch Bulk flush. SW_STORAGE_ES_BATCH_OF_BYTES 10485760 (10m)   - - flushInterval Period of flush (in seconds). Does not matter whether bulkActions is reached or not. SW_STORAGE_ES_FLUSH_INTERVAL 5   - - concurrentRequests The number of concurrent requests allowed to be executed. SW_STORAGE_ES_CONCURRENT_REQUESTS 2   - - resultWindowMaxSize The maximum size of dataset when the OAP loads cache, such as network aliases. SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE 10000   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_ES_QUERY_MAX_SIZE 10000   - - scrollingBatchSize The batch size of metadata per iteration when metadataQueryMaxSize or resultWindowMaxSize is too large to be retrieved in a single query. SW_STORAGE_ES_SCROLLING_BATCH_SIZE 5000   - - segmentQueryMaxSize The maximum size of trace segments per query. SW_STORAGE_ES_QUERY_SEGMENT_SIZE 200   - - profileTaskQueryMaxSize The maximum size of profile task per query. SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE 200   - - profileDataQueryScrollBatchSize The batch size of query profiling data. SW_STORAGE_ES_QUERY_PROFILE_DATA_BATCH_SIZE 100   - - advanced All settings of ElasticSearch index creation. The value should be in JSON format. SW_STORAGE_ES_ADVANCED -   - - logicSharding Shard metrics and records indices into multi-physical indices, one index template per metric/meter aggregation function or record. SW_STORAGE_ES_LOGIC_SHARDING false   - h2 - H2 storage is designed for demonstration and running in short term (i.e. 1-2 hours) only. - -   - - url H2 connection URL. Defaults to H2 memory mode. SW_STORAGE_H2_URL jdbc:h2:mem:skywalking-oap-db   - - user Username of H2 database. SW_STORAGE_H2_USER sa   - - password Password of H2 database. - -   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_H2_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 100   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 1   - mysql - MySQL Storage. The MySQL JDBC Driver is not in the dist. Please copy it into the oap-lib folder manually. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - mysql-sharding - Sharding-Proxy for MySQL properties. The MySQL JDBC Driver is not in the dist. Please copy it into the oap-lib folder manually. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - - dataSources The dataSources are configured in ShardingSphere-Proxy config-sharding.yaml.The dataSource name should include the prefix \u0026ldquo;ds_\u0026rdquo; and separated by \u0026ldquo;,\u0026rdquo; and start from ds_0 SW_JDBC_SHARDING_DATA_SOURCES ds_0,ds_1   - postgresql - PostgreSQL storage. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - banyandb - BanyanDB storage. - -   - - host Host of the BanyanDB. SW_STORAGE_BANYANDB_HOST 127.0.0.1   - - port Port of the BanyanDB. SW_STORAGE_BANYANDB_PORT 17912   - - maxBulkSize The maximum size of write entities in a single batch write call. SW_STORAGE_BANYANDB_MAX_BULK_SIZE 5000   - - flushInterval Period of flush interval. In the timeunit of seconds. SW_STORAGE_BANYANDB_FLUSH_INTERVAL 15   - - metricsShardsNumber Shards Number for measure/metrics. SW_STORAGE_BANYANDB_METRICS_SHARDS_NUMBER 1   - - recordShardsNumber Shards Number for a normal record. SW_STORAGE_BANYANDB_RECORD_SHARDS_NUMBER 1   - - superDatasetShardsFactor Shards Factor for a super dataset record, i.e. Shard number of a super dataset is recordShardsNumber*superDatasetShardsFactor. SW_STORAGE_BANYANDB_SUPERDATASET_SHARDS_FACTOR 2   - - concurrentWriteThreads Concurrent consumer threads for batch writing. SW_STORAGE_BANYANDB_CONCURRENT_WRITE_THREADS 15   - - profileTaskQueryMaxSize Max size of ProfileTask to be fetched. SW_STORAGE_BANYANDB_PROFILE_TASK_QUERY_MAX_SIZE 200   agent-analyzer default Agent Analyzer. SW_AGENT_ANALYZER default    - - traceSamplingPolicySettingsFile The sampling policy including sampling rate and the threshold of trace segment latency can be configured by the traceSamplingPolicySettingsFile file. SW_TRACE_SAMPLING_POLICY_SETTINGS_FILE trace-sampling-policy-settings.yml   - - slowDBAccessThreshold The slow database access threshold (in milliseconds). SW_SLOW_DB_THRESHOLD default:200,mongodb:100   - - forceSampleErrorSegment When sampling mechanism is activated, this config samples the error status segment and ignores the sampling rate. SW_FORCE_SAMPLE_ERROR_SEGMENT true   - - segmentStatusAnalysisStrategy Determines the final segment status from span status. Available values are FROM_SPAN_STATUS , FROM_ENTRY_SPAN, and FROM_FIRST_SPAN. FROM_SPAN_STATUS indicates that the segment status would be error if any span has an error status. FROM_ENTRY_SPAN means that the segment status would only be determined by the status of entry spans. FROM_FIRST_SPAN means that the segment status would only be determined by the status of the first span. SW_SEGMENT_STATUS_ANALYSIS_STRATEGY FROM_SPAN_STATUS   - - noUpstreamRealAddressAgents Exit spans with the component in the list would not generate client-side instance relation metrics, since some tracing plugins (e.g. Nginx-LUA and Envoy) can\u0026rsquo;t collect the real peer IP address. SW_NO_UPSTREAM_REAL_ADDRESS 6000,9000   - - meterAnalyzerActiveFiles Indicates which files could be instrumented and analyzed. Multiple files are split by \u0026ldquo;,\u0026rdquo;. SW_METER_ANALYZER_ACTIVE_FILES    - - slowCacheWriteThreshold The threshold of slow command which is used for writing operation (in milliseconds). SW_SLOW_CACHE_WRITE_THRESHOLD default:20,redis:10   - - slowCacheReadThreshold The threshold of slow command which is used for reading (getting) operation (in milliseconds). SW_SLOW_CACHE_READ_THRESHOLD default:20,redis:10   receiver-sharing-server default Sharing server provides new gRPC and restful servers for data collection. Ana designates that servers in the core module are to be used for internal communication only. - -    - - restHost Binding IP of RESTful services. Services include GraphQL query and HTTP data report. SW_RECEIVER_SHARING_REST_HOST -   - - restPort Binding port of RESTful services. SW_RECEIVER_SHARING_REST_PORT -   - - restContextPath Web context path of RESTful services. SW_RECEIVER_SHARING_REST_CONTEXT_PATH -   - - restMaxThreads Maximum thread number of RESTful services. SW_RECEIVER_SHARING_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_RECEIVER_SHARING_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize ServerSocketChannel backlog of RESTful services. SW_RECEIVER_SHARING_REST_QUEUE_SIZE 0   - - httpMaxRequestHeaderSize Maximum request header size accepted. SW_RECEIVER_SHARING_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - gRPCHost Binding IP of gRPC services. Services include gRPC data report and internal communication among OAP nodes. SW_RECEIVER_GRPC_HOST 0.0.0.0. Not Activated   - - gRPCPort Binding port of gRPC services. SW_RECEIVER_GRPC_PORT Not Activated   - - gRPCThreadPoolSize Pool size of gRPC server. SW_RECEIVER_GRPC_THREAD_POOL_SIZE CPU core * 4   - - gRPCThreadPoolQueueSize Queue size of gRPC server. SW_RECEIVER_GRPC_POOL_QUEUE_SIZE 10000   - - gRPCSslEnabled Activates SSL for gRPC services. SW_RECEIVER_GRPC_SSL_ENABLED false   - - gRPCSslKeyPath File path of gRPC SSL key. SW_RECEIVER_GRPC_SSL_KEY_PATH -   - - gRPCSslCertChainPath File path of gRPC SSL cert chain. SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH -   - - maxConcurrentCallsPerConnection The maximum number of concurrent calls permitted for each incoming connection. Defaults to no limit. SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL -   - - authentication The token text for authentication. Works for gRPC connection only. Once this is set, the client is required to use the same token. SW_AUTHENTICATION -   log-analyzer default Log Analyzer. SW_LOG_ANALYZER default    - - lalFiles The LAL configuration file names (without file extension) to be activated. Read LAL for more details. SW_LOG_LAL_FILES default   - - malFiles The MAL configuration file names (without file extension) to be activated. Read LAL for more details. SW_LOG_MAL_FILES \u0026quot;\u0026quot;   event-analyzer default Event Analyzer. SW_EVENT_ANALYZER default    receiver-register default gRPC and HTTPRestful services that provide service, service instance and endpoint register. - -    receiver-trace default gRPC and HTTPRestful services that accept SkyWalking format traces. - -    receiver-jvm default gRPC services that accept JVM metrics data. - -    receiver-clr default gRPC services that accept .Net CLR metrics data. - -    receiver-profile default gRPC services that accept profile task status and snapshot reporter. - -    receiver-zabbix default TCP receiver accepts Zabbix format metrics. - -    - - port Exported TCP port. Zabbix agent could connect and transport data. SW_RECEIVER_ZABBIX_PORT 10051   - - host Binds to host. SW_RECEIVER_ZABBIX_HOST 0.0.0.0   - - activeFiles Enables config when agent request is received. SW_RECEIVER_ZABBIX_ACTIVE_FILES agent   service-mesh default gRPC services that accept data from inbound mesh probes. - -    envoy-metric default Envoy metrics_service and ALS(access log service) are supported by this receiver. The OAL script supports all GAUGE type metrics. - -    - - acceptMetricsService Starts Envoy Metrics Service analysis. SW_ENVOY_METRIC_SERVICE true   - - alsHTTPAnalysis Starts Envoy HTTP Access Log Service analysis. Value = k8s-mesh means starting the analysis. SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS -   - - alsTCPAnalysis Starts Envoy TCP Access Log Service analysis. Value = k8s-mesh means starting the analysis. SW_ENVOY_METRIC_ALS_TCP_ANALYSIS -   - - k8sServiceNameRule k8sServiceNameRule allows you to customize the service name in ALS via Kubernetes metadata. The available variables are pod and service. E.g. you can use ${service.metadata.name}-${pod.metadata.labels.version} to append the version number to the service name. Note that when using environment variables to pass this configuration, use single quotes('') to avoid being evaluated by the shell. -    receiver-otel default A receiver for analyzing metrics data from OpenTelemetry. - -    - - enabledHandlers Enabled handlers for otel. SW_OTEL_RECEIVER_ENABLED_HANDLERS -   - - enabledOtelRules Enabled metric rules for OC handler. SW_OTEL_RECEIVER_ENABLED_OTEL_RULES -   receiver-zipkin default A receiver for Zipkin traces. - -    - - sampleRate The sample rate precision is 1/10000, should be between 0 and 10000 SW_ZIPKIN_SAMPLE_RATE 10000   - - searchableTracesTags Defines a set of span tag keys which are searchable. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_ZIPKIN_SEARCHABLE_TAG_KEYS http.method   - - enableHttpCollector Enable Http Collector. SW_ZIPKIN_HTTP_COLLECTOR_ENABLED true   - - restHost Binding IP of RESTful services. SW_RECEIVER_ZIPKIN_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_RECEIVER_ZIPKIN_REST_PORT 9411   - - restContextPath Web context path of RESTful services. SW_RECEIVER_ZIPKIN_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_RECEIVER_ZIPKIN_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_RECEIVER_ZIPKIN_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_RECEIVER_ZIPKIN_REST_QUEUE_SIZE 0   - - enableKafkaCollector Enable Kafka Collector. SW_ZIPKIN_KAFKA_COLLECTOR_ENABLED false   - - kafkaBootstrapServers Kafka ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG. SW_ZIPKIN_KAFKA_SERVERS localhost:9092   - - kafkaGroupId Kafka ConsumerConfig.GROUP_ID_CONFIG. SW_ZIPKIN_KAFKA_GROUP_ID zipkin   - - kafkaTopic Kafka Topics. SW_ZIPKIN_KAFKA_TOPIC zipkin   - - kafkaConsumerConfig Kafka consumer config, JSON format as Properties. If it contains the same key with above, would override. SW_ZIPKIN_KAFKA_CONSUMER_CONFIG \u0026ldquo;{\u0026quot;auto.offset.reset\u0026quot;:\u0026quot;earliest\u0026quot;,\u0026quot;enable.auto.commit\u0026quot;:true}\u0026rdquo;   - - kafkaConsumers The number of consumers to create. SW_ZIPKIN_KAFKA_CONSUMERS 1   - - kafkaHandlerThreadPoolSize Pool size of Kafka message handler executor. SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_SIZE CPU core * 2   - - kafkaHandlerThreadPoolQueueSize Queue size of Kafka message handler executor. SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE 10000   kafka-fetcher default Read SkyWalking\u0026rsquo;s native metrics/logs/traces through Kafka server. - -    - - bootstrapServers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_KAFKA_FETCHER_SERVERS localhost:9092   - - namespace Namespace aims to isolate multi OAP cluster when using the same Kafka cluster. If you set a namespace for Kafka fetcher, OAP will add a prefix to topic name. You should also set namespace in agent.config. The property is named plugin.kafka.namespace. SW_NAMESPACE -   - - groupId A unique string that identifies the consumer group to which this consumer belongs. - skywalking-consumer   - - createTopicIfNotExist If true, this creates Kafka topic (if it does not already exist). - true   - - partitions The number of partitions for the topic being created. SW_KAFKA_FETCHER_PARTITIONS 3   - - consumers The number of consumers to create. SW_KAFKA_FETCHER_CONSUMERS 1   - - enableNativeProtoLog Enables fetching and handling native proto log data. SW_KAFKA_FETCHER_ENABLE_NATIVE_PROTO_LOG true   - - enableNativeJsonLog Enables fetching and handling native json log data. SW_KAFKA_FETCHER_ENABLE_NATIVE_JSON_LOG true   - - replicationFactor The replication factor for each partition in the topic being created. SW_KAFKA_FETCHER_PARTITIONS_FACTOR 2   - - kafkaHandlerThreadPoolSize Pool size of Kafka message handler executor. SW_KAFKA_HANDLER_THREAD_POOL_SIZE CPU core * 2   - - kafkaHandlerThreadPoolQueueSize Queue size of Kafka message handler executor. SW_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE 10000   - - topicNameOfMeters Kafka topic name for meter system data. - skywalking-meters   - - topicNameOfMetrics Kafka topic name for JVM metrics data. - skywalking-metrics   - - topicNameOfProfiling Kafka topic name for profiling data. - skywalking-profilings   - - topicNameOfTracingSegments Kafka topic name for tracing data. - skywalking-segments   - - topicNameOfManagements Kafka topic name for service instance reporting and registration. - skywalking-managements   - - topicNameOfLogs Kafka topic name for native proto log data. - skywalking-logs   - - topicNameOfJsonLogs Kafka topic name for native json log data. - skywalking-logs-json   receiver-browser default gRPC services that accept browser performance data and error log. - - -   - - sampleRate Sampling rate for receiving trace. Precise to 1/10000. 10000 means sampling rate of 100% by default. SW_RECEIVER_BROWSER_SAMPLE_RATE 10000   query graphql - GraphQL query implementation. -    - - enableLogTestTool Enable the log testing API to test the LAL. NOTE: This API evaluates untrusted code on the OAP server. A malicious script can do significant damage (steal keys and secrets, remove files and directories, install malware, etc). As such, please enable this API only when you completely trust your users. SW_QUERY_GRAPHQL_ENABLE_LOG_TEST_TOOL false   - - maxQueryComplexity Maximum complexity allowed for the GraphQL query that can be used to abort a query if the total number of data fields queried exceeds the defined threshold. SW_QUERY_MAX_QUERY_COMPLEXITY 1000   - - enableUpdateUITemplate Allow user add,disable and update UI template. SW_ENABLE_UPDATE_UI_TEMPLATE false   - - enableOnDemandPodLog Ondemand Pod log: fetch the Pod logs on users' demand, the logs are fetched and displayed in real time, and are not persisted in any kind. This is helpful when users want to do some experiments and monitor the logs and see what\u0026rsquo;s happing inside the service. Note: if you print secrets in the logs, they are also visible to the UI, so for the sake of security, this feature is disabled by default, please set this configuration to enable the feature manually. SW_ENABLE_ON_DEMAND_POD_LOG false   query-zipkin default - This module is for Zipkin query API and support zipkin-lens UI -    - - restHost Binding IP of RESTful services. SW_QUERY_ZIPKIN_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_QUERY_ZIPKIN_REST_PORT 9412   - - restContextPath Web context path of RESTful services. SW_QUERY_ZIPKIN_REST_CONTEXT_PATH zipkin   - - restMaxThreads Maximum thread number of RESTful services. SW_QUERY_ZIPKIN_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_QUERY_ZIPKIN_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_QUERY_ZIPKIN_REST_QUEUE_SIZE 0   - - lookback Default look back for traces and autocompleteTags, 1 day in millis SW_QUERY_ZIPKIN_LOOKBACK 86400000   - - namesMaxAge The Cache-Control max-age (seconds) for serviceNames, remoteServiceNames and spanNames SW_QUERY_ZIPKIN_NAMES_MAX_AGE 300   - - uiQueryLimit Default traces query max size SW_QUERY_ZIPKIN_UI_QUERY_LIMIT 10   - - uiDefaultLookback Default look back on the UI for search traces, 15 minutes in millis SW_QUERY_ZIPKIN_UI_DEFAULT_LOOKBACK 900000   promql default - This module is for PromQL API. -    - - restHost Binding IP of RESTful services. SW_PROMQL_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_PROMQL_REST_PORT 9090   - - restContextPath Web context path of RESTful services. SW_PROMQL_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_PROMQL_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_PROMQL_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_PROMQL_REST_QUEUE_SIZE 0   alarm default - Read alarm doc for more details. -    telemetry - - Read telemetry doc for more details. -    - none - No op implementation. -    - prometheus host Binding host for Prometheus server fetching data. SW_TELEMETRY_PROMETHEUS_HOST 0.0.0.0   - - port Binding port for Prometheus server fetching data. SW_TELEMETRY_PROMETHEUS_PORT 1234   configuration - - Read dynamic configuration doc for more details. -    - grpc host DCS server binding hostname. SW_DCS_SERVER_HOST -   - - port DCS server binding port. SW_DCS_SERVER_PORT 80   - - clusterName Cluster name when reading the latest configuration from DSC server. SW_DCS_CLUSTER_NAME SkyWalking   - - period The period of reading data from DSC server by the OAP (in seconds). SW_DCS_PERIOD 20   - apollo apolloMeta apollo.meta in Apollo. SW_CONFIG_APOLLO http://localhost:8080   - - apolloCluster apollo.cluster in Apollo. SW_CONFIG_APOLLO_CLUSTER default   - - apolloEnv env in Apollo. SW_CONFIG_APOLLO_ENV -   - - appId app.id in Apollo. SW_CONFIG_APOLLO_APP_ID skywalking   - - period The period of data sync (in seconds). SW_CONFIG_APOLLO_PERIOD 60   - zookeeper namespace The namespace (represented by root path) that isolates the configurations in the Zookeeper. SW_CONFIG_ZK_NAMESPACE /, root path   - - hostPort Hosts and ports of Zookeeper Cluster. SW_CONFIG_ZK_HOST_PORT localhost:2181   - - baseSleepTimeMs The period of Zookeeper client between two retries (in milliseconds). SW_CONFIG_ZK_BASE_SLEEP_TIME_MS 1000   - - maxRetries The maximum retry time. SW_CONFIG_ZK_MAX_RETRIES 3   - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - etcd endpoints Hosts and ports for etcd cluster (separated by commas if multiple). SW_CONFIG_ETCD_ENDPOINTS http://localhost:2379   - - namespace Namespace for SkyWalking cluster. SW_CONFIG_ETCD_NAMESPACE /skywalking   - - authentication Indicates whether there is authentication. SW_CONFIG_ETCD_AUTHENTICATION false   - - user Etcd auth username. SW_CONFIG_ETCD_USER    - - password Etcd auth password. SW_CONFIG_ETCD_PASSWORD    - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - consul hostPort Hosts and ports for Consul cluster. SW_CONFIG_CONSUL_HOST_AND_PORTS localhost:8500   - - aclToken ACL Token of Consul. Empty string means without ACL token. SW_CONFIG_CONSUL_ACL_TOKEN -   - - period The period of data sync (in seconds). SW_CONFIG_CONSUL_PERIOD 60   - k8s-configmap namespace Deployment namespace of the config map. SW_CLUSTER_K8S_NAMESPACE default   - - labelSelector Labels for locating configmap. SW_CLUSTER_K8S_LABEL app=collector,release=skywalking   - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - nacos serverAddr Nacos Server Host. SW_CONFIG_NACOS_SERVER_ADDR 127.0.0.1   - - port Nacos Server Port. SW_CONFIG_NACOS_SERVER_PORT 8848   - - group Nacos Configuration namespace. SW_CONFIG_NACOS_SERVER_NAMESPACE -   - - period The period of data sync (in seconds). SW_CONFIG_CONFIG_NACOS_PERIOD 60   - - username Nacos Auth username. SW_CONFIG_NACOS_USERNAME -   - - password Nacos Auth password. SW_CONFIG_NACOS_PASSWORD -   - - accessKey Nacos Auth accessKey. SW_CONFIG_NACOS_ACCESSKEY -   - - secretKey Nacos Auth secretKey. SW_CONFIG_NACOS_SECRETKEY -   exporter default enableGRPCMetrics Enable gRPC metrics exporter. SW_EXPORTER_ENABLE_GRPC_METRICS false   - - gRPCTargetHost The host of target gRPC server for receiving export data SW_EXPORTER_GRPC_HOST 127.0.0.1   - - gRPCTargetPort The port of target gRPC server for receiving export data. SW_EXPORTER_GRPC_PORT 9870   - - enableKafkaTrace Enable Kafka trace exporter. SW_EXPORTER_ENABLE_KAFKA_TRACE false   - - enableKafkaLog Enable Kafka log exporter. SW_EXPORTER_ENABLE_KAFKA_LOG false   - - kafkaBootstrapServers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_EXPORTER_KAFKA_SERVERS localhost:9092   - - kafkaProducerConfig Kafka producer config, JSON format as Properties. SW_EXPORTER_KAFKA_PRODUCER_CONFIG -   - - kafkaTopicTrace Kafka topic name for trace. SW_EXPORTER_KAFKA_TOPIC_TRACE skywalking-export-trace   - - kafkaTopicLog Kafka topic name for log. SW_EXPORTER_KAFKA_TOPIC_LOG skywalking-export-log   health-checker default checkIntervalSeconds The period of checking OAP internal health status (in seconds). SW_HEALTH_CHECKER_INTERVAL_SECONDS 5   configuration-discovery default disableMessageDigest If true, agent receives the latest configuration every time, even without making any changes. By default, OAP uses the SHA512 message digest mechanism to detect changes in configuration. SW_DISABLE_MESSAGE_DIGEST false   receiver-event default gRPC services that handle events data. - -    aws-firehose-receiver default host Binding IP of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_HOST 0.0.0.0   - - port Binding port of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_PORT 12801   - - contextPath Context path of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_CONTEXT_PATH /   - - maxThreads Max Thtread number of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_MAX_THREADS 200   - - idleTimeOut Idle timeout of a connection for keep-alive. SW_RECEIVER_AWS_FIREHOSE_HTTP_IDLE_TIME_OUT 30000   - - acceptQueueSize Maximum allowed number of open connections SW_RECEIVER_AWS_FIREHOSE_HTTP_ACCEPT_QUEUE_SIZE 0   - - maxRequestHeaderSize Maximum length of all headers in an HTTP/1 response SW_RECEIVER_AWS_FIREHOSE_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - firehoseAccessKey The AccessKey of AWS firhose SW_RECEIVER_AWS_FIREHOSE_ACCESS_KEY     Note ¹ System Environment Variable name could be declared and changed in application.yml. The names listed here are simply provided in the default application.yml file.\n","excerpt":"Configuration Vocabulary The Configuration Vocabulary lists all available configurations provided by …","ref":"/docs/main/v9.4.0/en/setup/backend/configuration-vocabulary/","title":"Configuration Vocabulary"},{"body":"Configuration Vocabulary The Configuration Vocabulary lists all available configurations provided by application.yml.\n   Module Provider Settings Value(s) and Explanation System Environment Variable¹ Default     core default role Option values: Mixed/Receiver/Aggregator. Receiver mode OAP opens the service to the agents, then analyzes and aggregates the results, and forwards the results for distributed aggregation. Aggregator mode OAP receives data from Mixer and Receiver role OAP nodes, and performs 2nd level aggregation. Mixer means both Receiver and Aggregator. SW_CORE_ROLE Mixed   - - restHost Binding IP of RESTful services. Services include GraphQL query and HTTP data report. SW_CORE_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_CORE_REST_PORT 12800   - - restContextPath Web context path of RESTful services. SW_CORE_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_CORE_REST_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_CORE_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize ServerSocketChannel Backlog of RESTful services. SW_CORE_REST_QUEUE_SIZE 0   - - httpMaxRequestHeaderSize Maximum request header size accepted. SW_CORE_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - gRPCHost Binding IP of gRPC services, including gRPC data report and internal communication among OAP nodes. SW_CORE_GRPC_HOST 0.0.0.0   - - gRPCPort Binding port of gRPC services. SW_CORE_GRPC_PORT 11800   - - gRPCSslEnabled Activates SSL for gRPC services. SW_CORE_GRPC_SSL_ENABLED false   - - gRPCSslKeyPath File path of gRPC SSL key. SW_CORE_GRPC_SSL_KEY_PATH -   - - gRPCSslCertChainPath File path of gRPC SSL cert chain. SW_CORE_GRPC_SSL_CERT_CHAIN_PATH -   - - gRPCSslTrustedCAPath File path of gRPC trusted CA. SW_CORE_GRPC_SSL_TRUSTED_CA_PATH -   - - downsampling Activated level of down sampling aggregation.  Hour,Day   - - enableDataKeeperExecutor Controller of TTL scheduler. Once disabled, TTL wouldn\u0026rsquo;t work. SW_CORE_ENABLE_DATA_KEEPER_EXECUTOR true   - - dataKeeperExecutePeriod Execution period of TTL scheduler (in minutes). Execution doesn\u0026rsquo;t mean deleting data. The storage provider (e.g. ElasticSearch storage) could override this. SW_CORE_DATA_KEEPER_EXECUTE_PERIOD 5   - - recordDataTTL The lifecycle of record data (in days). Record data includes traces, top N sample records, and logs. Minimum value is 2. SW_CORE_RECORD_DATA_TTL 3   - - metricsDataTTL The lifecycle of metrics data (in days), including metadata. We recommend setting metricsDataTTL \u0026gt;= recordDataTTL. Minimum value is 2. SW_CORE_METRICS_DATA_TTL 7   - - l1FlushPeriod The period of L1 aggregation flush to L2 aggregation (in milliseconds). SW_CORE_L1_AGGREGATION_FLUSH_PERIOD 500   - - storageSessionTimeout The threshold of session time (in milliseconds). Default value is 70000. SW_CORE_STORAGE_SESSION_TIMEOUT 70000   - - persistentPeriod The period of doing data persistence. Unit is second.Default value is 25s SW_CORE_PERSISTENT_PERIOD 25   - - topNReportPeriod The execution period (in minutes) of top N sampler, which saves sampled data into the storage. SW_CORE_TOPN_REPORT_PERIOD 10   - - activeExtraModelColumns Appends entity names (e.g. service names) into metrics storage entities. SW_CORE_ACTIVE_EXTRA_MODEL_COLUMNS false   - - serviceNameMaxLength Maximum length limit of service names. SW_SERVICE_NAME_MAX_LENGTH 70   - - instanceNameMaxLength Maximum length limit of service instance names. The maximum length of service + instance names should be less than 200. SW_INSTANCE_NAME_MAX_LENGTH 70   - - endpointNameMaxLength Maximum length limit of endpoint names. The maximum length of service + endpoint names should be less than 240. SW_ENDPOINT_NAME_MAX_LENGTH 150   - - searchableTracesTags Defines a set of span tag keys which are searchable through GraphQL. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_SEARCHABLE_TAG_KEYS http.method,http.status_code,rpc.status_code,db.type,db.instance,mq.queue,mq.topic,mq.broker   - - searchableLogsTags Defines a set of log tag keys which are searchable through GraphQL. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_SEARCHABLE_LOGS_TAG_KEYS level   - - searchableAlarmTags Defines a set of alarm tag keys which are searchable through GraphQL. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_SEARCHABLE_ALARM_TAG_KEYS level   - - autocompleteTagKeysQueryMaxSize The max size of tags keys for autocomplete select. SW_AUTOCOMPLETE_TAG_KEYS_QUERY_MAX_SIZE 100   - - autocompleteTagValuesQueryMaxSize The max size of tags values for autocomplete select. SW_AUTOCOMPLETE_TAG_VALUES_QUERY_MAX_SIZE 100   - - gRPCThreadPoolSize Pool size of gRPC server. SW_CORE_GRPC_THREAD_POOL_SIZE CPU core * 4   - - gRPCThreadPoolQueueSize Queue size of gRPC server. SW_CORE_GRPC_POOL_QUEUE_SIZE 10000   - - maxConcurrentCallsPerConnection The maximum number of concurrent calls permitted for each incoming connection. Defaults to no limit. SW_CORE_GRPC_MAX_CONCURRENT_CALL -   - - maxMessageSize Sets the maximum message size allowed to be received on the server. Empty means 4 MiB. SW_CORE_GRPC_MAX_MESSAGE_SIZE 4M(based on Netty)   - - remoteTimeout Timeout for cluster internal communication (in seconds). - 20   - - maxSizeOfNetworkAddressAlias The maximum size of network address detected in the system being monitored. - 1_000_000   - - maxPageSizeOfQueryProfileSnapshot The maximum size for snapshot analysis in an OAP query. - 500   - - maxSizeOfAnalyzeProfileSnapshot The maximum number of snapshots analyzed by the OAP. - 12000   - - prepareThreads The number of threads used to prepare metrics data to the storage. SW_CORE_PREPARE_THREADS 2   - - enableEndpointNameGroupingByOpenapi Automatically groups endpoints by the given OpenAPI definitions. SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI true   - - maxDurationOfQueryEBPFProfilingData The maximum duration(in second) of query the eBPF profiling data from database. - 30   - - maxThreadCountOfQueryEBPFProfilingData The maximum thread count of query the eBPF profiling data from database. - System CPU core size   cluster standalone - Standalone is not suitable for running on a single node running. No configuration available. - -   - zookeeper namespace The namespace, represented by root path, isolates the configurations in Zookeeper. SW_NAMESPACE /, root path   - - hostPort Hosts and ports of Zookeeper Cluster. SW_CLUSTER_ZK_HOST_PORT localhost:2181   - - baseSleepTimeMs The period of Zookeeper client between two retries (in milliseconds). SW_CLUSTER_ZK_SLEEP_TIME 1000   - - maxRetries The maximum retry time. SW_CLUSTER_ZK_MAX_RETRIES 3   - - enableACL Opens ACL using schema and expression. SW_ZK_ENABLE_ACL false   - - schema Schema for the authorization. SW_ZK_SCHEMA digest   - - expression Expression for the authorization. SW_ZK_EXPRESSION skywalking:skywalking   - - internalComHost The hostname registered in Zookeeper for the internal communication of OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Zookeeper for the internal communication of OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - kubernetes namespace Namespace deployed by SkyWalking in k8s. SW_CLUSTER_K8S_NAMESPACE default   - - labelSelector Labels used for filtering OAP deployment in k8s. SW_CLUSTER_K8S_LABEL app=collector,release=skywalking   - - uidEnvName Environment variable name for reading uid. SW_CLUSTER_K8S_UID SKYWALKING_COLLECTOR_UID   - consul serviceName Service name for SkyWalking cluster. SW_SERVICE_NAME SkyWalking_OAP_Cluster   - - hostPort Hosts and ports for Consul cluster. SW_CLUSTER_CONSUL_HOST_PORT localhost:8500   - - aclToken ACL Token of Consul. Empty string means without ALC token. SW_CLUSTER_CONSUL_ACLTOKEN -   - - internalComHost The hostname registered in Consul for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Consul for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - etcd serviceName Service name for SkyWalking cluster. SW_CLUSTER_ETCD_SERVICE_NAME SkyWalking_OAP_Cluster   - - endpoints Hosts and ports for etcd cluster. SW_CLUSTER_ETCD_ENDPOINTS localhost:2379   - - namespace Namespace for SkyWalking cluster. SW_CLUSTER_ETCD_NAMESPACE /skywalking   - - authentication Indicates whether there is authentication. SW_CLUSTER_ETCD_AUTHENTICATION false   - - user Etcd auth username. SW_CLUSTER_ETCD_USER    - - password Etcd auth password. SW_CLUSTER_ETCD_PASSWORD    - - internalComHost The hostname registered in etcd for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in etcd for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - Nacos serviceName Service name for SkyWalking cluster. SW_SERVICE_NAME SkyWalking_OAP_Cluster   - - hostPort Hosts and ports for Nacos cluster. SW_CLUSTER_NACOS_HOST_PORT localhost:8848   - - namespace Namespace used by SkyWalking node coordination. SW_CLUSTER_NACOS_NAMESPACE public   - - internalComHost The hostname registered in Nacos for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Nacos for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - - username Nacos Auth username. SW_CLUSTER_NACOS_USERNAME -   - - password Nacos Auth password. SW_CLUSTER_NACOS_PASSWORD -   - - accessKey Nacos Auth accessKey. SW_CLUSTER_NACOS_ACCESSKEY -   - - secretKey Nacos Auth secretKey. SW_CLUSTER_NACOS_SECRETKEY -   - - maxHttpUrisNumberPerService The maximum number of HTTP URIs per service. SW_MAX_HTTP_URIS_NUMBER_PER_SERVICE 3000   storage elasticsearch - ElasticSearch (and OpenSearch) storage implementation. - -   - - namespace Prefix of indexes created and used by SkyWalking. SW_NAMESPACE -   - - clusterNodes ElasticSearch cluster nodes for client connection. SW_STORAGE_ES_CLUSTER_NODES localhost   - - protocol HTTP or HTTPs. SW_STORAGE_ES_HTTP_PROTOCOL HTTP   - - connectTimeout Connect timeout of ElasticSearch client (in milliseconds). SW_STORAGE_ES_CONNECT_TIMEOUT 3000   - - socketTimeout Socket timeout of ElasticSearch client (in milliseconds). SW_STORAGE_ES_SOCKET_TIMEOUT 30000   - - responseTimeout Response timeout of ElasticSearch client (in milliseconds), 0 disables the timeout. SW_STORAGE_ES_RESPONSE_TIMEOUT 1500   - - numHttpClientThread The number of threads for the underlying HTTP client to perform socket I/O. If the value is \u0026lt;= 0, the number of available processors will be used. SW_STORAGE_ES_NUM_HTTP_CLIENT_THREAD 0   - - user Username of ElasticSearch cluster. SW_ES_USER -   - - password Password of ElasticSearch cluster. SW_ES_PASSWORD -   - - trustStorePath Trust JKS file path. Only works when username and password are enabled. SW_STORAGE_ES_SSL_JKS_PATH -   - - trustStorePass Trust JKS file password. Only works when username and password are enabled. SW_STORAGE_ES_SSL_JKS_PASS -   - - secretsManagementFile Secrets management file in the properties format, including username and password, which are managed by a 3rd party tool. Capable of being updated them at runtime. SW_ES_SECRETS_MANAGEMENT_FILE -   - - dayStep Represents the number of days in the one-minute/hour/day index. SW_STORAGE_DAY_STEP 1   - - indexShardsNumber Shard number of new indexes. SW_STORAGE_ES_INDEX_SHARDS_NUMBER 1   - - indexReplicasNumber Replicas number of new indexes. SW_STORAGE_ES_INDEX_REPLICAS_NUMBER 0   - - specificIndexSettings Specify the settings for each index individually. If configured, this setting has the highest priority and overrides the generic settings. SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS -   - - superDatasetDayStep Represents the number of days in the super size dataset record index. Default value is the same as dayStep when the value is less than 0. SW_STORAGE_ES_SUPER_DATASET_DAY_STEP -1   - - superDatasetIndexShardsFactor Super dataset is defined in the code (e.g. trace segments). This factor provides more shards for the super dataset: shards number = indexShardsNumber * superDatasetIndexShardsFactor. This factor also affects Zipkin and Jaeger traces. SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR 5   - - superDatasetIndexReplicasNumber Represents the replicas number in the super size dataset record index. SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER 0   - - indexTemplateOrder The order of index template. SW_STORAGE_ES_INDEX_TEMPLATE_ORDER 0   - - bulkActions Async bulk size of the record data batch execution. SW_STORAGE_ES_BULK_ACTIONS 5000   - - batchOfBytes A threshold to control the max body size of ElasticSearch Bulk flush. SW_STORAGE_ES_BATCH_OF_BYTES 10485760 (10m)   - - flushInterval Period of flush (in seconds). Does not matter whether bulkActions is reached or not. SW_STORAGE_ES_FLUSH_INTERVAL 5   - - concurrentRequests The number of concurrent requests allowed to be executed. SW_STORAGE_ES_CONCURRENT_REQUESTS 2   - - resultWindowMaxSize The maximum size of dataset when the OAP loads cache, such as network aliases. SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE 10000   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_ES_QUERY_MAX_SIZE 10000   - - scrollingBatchSize The batch size of metadata per iteration when metadataQueryMaxSize or resultWindowMaxSize is too large to be retrieved in a single query. SW_STORAGE_ES_SCROLLING_BATCH_SIZE 5000   - - segmentQueryMaxSize The maximum size of trace segments per query. SW_STORAGE_ES_QUERY_SEGMENT_SIZE 200   - - profileTaskQueryMaxSize The maximum size of profile task per query. SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE 200   - - profileDataQueryScrollBatchSize The batch size of query profiling data. SW_STORAGE_ES_QUERY_PROFILE_DATA_BATCH_SIZE 100   - - advanced All settings of ElasticSearch index creation. The value should be in JSON format. SW_STORAGE_ES_ADVANCED -   - - logicSharding Shard metrics and records indices into multi-physical indices, one index template per metric/meter aggregation function or record. SW_STORAGE_ES_LOGIC_SHARDING false   - h2 - H2 storage is designed for demonstration and running in short term (i.e. 1-2 hours) only. - -   - - url H2 connection URL. Defaults to H2 memory mode. SW_STORAGE_H2_URL jdbc:h2:mem:skywalking-oap-db   - - user Username of H2 database. SW_STORAGE_H2_USER sa   - - password Password of H2 database. - -   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_H2_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 100   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 1   - mysql - MySQL Storage. The MySQL JDBC Driver is not in the dist. Please copy it into the oap-lib folder manually. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - postgresql - PostgreSQL storage. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - banyandb - BanyanDB storage. - -   - - host Host of the BanyanDB. SW_STORAGE_BANYANDB_HOST 127.0.0.1   - - port Port of the BanyanDB. SW_STORAGE_BANYANDB_PORT 17912   - - maxBulkSize The maximum size of write entities in a single batch write call. SW_STORAGE_BANYANDB_MAX_BULK_SIZE 5000   - - flushInterval Period of flush interval. In the timeunit of seconds. SW_STORAGE_BANYANDB_FLUSH_INTERVAL 15   - - metricsShardsNumber Shards Number for measure/metrics. SW_STORAGE_BANYANDB_METRICS_SHARDS_NUMBER 1   - - recordShardsNumber Shards Number for a normal record. SW_STORAGE_BANYANDB_RECORD_SHARDS_NUMBER 1   - - superDatasetShardsFactor Shards Factor for a super dataset record, i.e. Shard number of a super dataset is recordShardsNumber*superDatasetShardsFactor. SW_STORAGE_BANYANDB_SUPERDATASET_SHARDS_FACTOR 2   - - concurrentWriteThreads Concurrent consumer threads for batch writing. SW_STORAGE_BANYANDB_CONCURRENT_WRITE_THREADS 15   - - profileTaskQueryMaxSize Max size of ProfileTask to be fetched. SW_STORAGE_BANYANDB_PROFILE_TASK_QUERY_MAX_SIZE 200   agent-analyzer default Agent Analyzer. SW_AGENT_ANALYZER default    - - traceSamplingPolicySettingsFile The sampling policy including sampling rate and the threshold of trace segment latency can be configured by the traceSamplingPolicySettingsFile file. SW_TRACE_SAMPLING_POLICY_SETTINGS_FILE trace-sampling-policy-settings.yml   - - slowDBAccessThreshold The slow database access threshold (in milliseconds). SW_SLOW_DB_THRESHOLD default:200,mongodb:100   - - forceSampleErrorSegment When sampling mechanism is activated, this config samples the error status segment and ignores the sampling rate. SW_FORCE_SAMPLE_ERROR_SEGMENT true   - - segmentStatusAnalysisStrategy Determines the final segment status from span status. Available values are FROM_SPAN_STATUS , FROM_ENTRY_SPAN, and FROM_FIRST_SPAN. FROM_SPAN_STATUS indicates that the segment status would be error if any span has an error status. FROM_ENTRY_SPAN means that the segment status would only be determined by the status of entry spans. FROM_FIRST_SPAN means that the segment status would only be determined by the status of the first span. SW_SEGMENT_STATUS_ANALYSIS_STRATEGY FROM_SPAN_STATUS   - - noUpstreamRealAddressAgents Exit spans with the component in the list would not generate client-side instance relation metrics, since some tracing plugins (e.g. Nginx-LUA and Envoy) can\u0026rsquo;t collect the real peer IP address. SW_NO_UPSTREAM_REAL_ADDRESS 6000,9000   - - meterAnalyzerActiveFiles Indicates which files could be instrumented and analyzed. Multiple files are split by \u0026ldquo;,\u0026rdquo;. SW_METER_ANALYZER_ACTIVE_FILES    - - slowCacheWriteThreshold The threshold of slow command which is used for writing operation (in milliseconds). SW_SLOW_CACHE_WRITE_THRESHOLD default:20,redis:10   - - slowCacheReadThreshold The threshold of slow command which is used for reading (getting) operation (in milliseconds). SW_SLOW_CACHE_READ_THRESHOLD default:20,redis:10   receiver-sharing-server default Sharing server provides new gRPC and restful servers for data collection. Ana designates that servers in the core module are to be used for internal communication only. - -    - - restHost Binding IP of RESTful services. Services include GraphQL query and HTTP data report. SW_RECEIVER_SHARING_REST_HOST -   - - restPort Binding port of RESTful services. SW_RECEIVER_SHARING_REST_PORT -   - - restContextPath Web context path of RESTful services. SW_RECEIVER_SHARING_REST_CONTEXT_PATH -   - - restMaxThreads Maximum thread number of RESTful services. SW_RECEIVER_SHARING_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_RECEIVER_SHARING_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize ServerSocketChannel backlog of RESTful services. SW_RECEIVER_SHARING_REST_QUEUE_SIZE 0   - - httpMaxRequestHeaderSize Maximum request header size accepted. SW_RECEIVER_SHARING_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - gRPCHost Binding IP of gRPC services. Services include gRPC data report and internal communication among OAP nodes. SW_RECEIVER_GRPC_HOST 0.0.0.0. Not Activated   - - gRPCPort Binding port of gRPC services. SW_RECEIVER_GRPC_PORT Not Activated   - - gRPCThreadPoolSize Pool size of gRPC server. SW_RECEIVER_GRPC_THREAD_POOL_SIZE CPU core * 4   - - gRPCThreadPoolQueueSize Queue size of gRPC server. SW_RECEIVER_GRPC_POOL_QUEUE_SIZE 10000   - - gRPCSslEnabled Activates SSL for gRPC services. SW_RECEIVER_GRPC_SSL_ENABLED false   - - gRPCSslKeyPath File path of gRPC SSL key. SW_RECEIVER_GRPC_SSL_KEY_PATH -   - - gRPCSslCertChainPath File path of gRPC SSL cert chain. SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH -   - - maxConcurrentCallsPerConnection The maximum number of concurrent calls permitted for each incoming connection. Defaults to no limit. SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL -   - - authentication The token text for authentication. Works for gRPC connection only. Once this is set, the client is required to use the same token. SW_AUTHENTICATION -   log-analyzer default Log Analyzer. SW_LOG_ANALYZER default    - - lalFiles The LAL configuration file names (without file extension) to be activated. Read LAL for more details. SW_LOG_LAL_FILES default   - - malFiles The MAL configuration file names (without file extension) to be activated. Read LAL for more details. SW_LOG_MAL_FILES \u0026quot;\u0026quot;   event-analyzer default Event Analyzer. SW_EVENT_ANALYZER default    receiver-register default gRPC and HTTPRestful services that provide service, service instance and endpoint register. - -    receiver-trace default gRPC and HTTPRestful services that accept SkyWalking format traces. - -    receiver-jvm default gRPC services that accept JVM metrics data. - -    receiver-clr default gRPC services that accept .Net CLR metrics data. - -    receiver-profile default gRPC services that accept profile task status and snapshot reporter. - -    receiver-zabbix default TCP receiver accepts Zabbix format metrics. - -    - - port Exported TCP port. Zabbix agent could connect and transport data. SW_RECEIVER_ZABBIX_PORT 10051   - - host Binds to host. SW_RECEIVER_ZABBIX_HOST 0.0.0.0   - - activeFiles Enables config when agent request is received. SW_RECEIVER_ZABBIX_ACTIVE_FILES agent   service-mesh default gRPC services that accept data from inbound mesh probes. - -    envoy-metric default Envoy metrics_service and ALS(access log service) are supported by this receiver. The OAL script supports all GAUGE type metrics. - -    - - acceptMetricsService Starts Envoy Metrics Service analysis. SW_ENVOY_METRIC_SERVICE true   - - alsHTTPAnalysis Starts Envoy HTTP Access Log Service analysis. Value = k8s-mesh means starting the analysis. SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS -   - - alsTCPAnalysis Starts Envoy TCP Access Log Service analysis. Value = k8s-mesh means starting the analysis. SW_ENVOY_METRIC_ALS_TCP_ANALYSIS -   - - k8sServiceNameRule k8sServiceNameRule allows you to customize the service name in ALS via Kubernetes metadata. The available variables are pod and service. E.g. you can use ${service.metadata.name}-${pod.metadata.labels.version} to append the version number to the service name. Note that when using environment variables to pass this configuration, use single quotes('') to avoid being evaluated by the shell. -    receiver-otel default A receiver for analyzing metrics data from OpenTelemetry. - -    - - enabledHandlers Enabled handlers for otel. SW_OTEL_RECEIVER_ENABLED_HANDLERS -   - - enabledOtelMetricsRules Enabled metric rules for OTLP handler. SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES -   receiver-zipkin default A receiver for Zipkin traces. - -    - - sampleRate The sample rate precision is 1/10000, should be between 0 and 10000 SW_ZIPKIN_SAMPLE_RATE 10000   - - searchableTracesTags Defines a set of span tag keys which are searchable. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_ZIPKIN_SEARCHABLE_TAG_KEYS http.method   - - enableHttpCollector Enable Http Collector. SW_ZIPKIN_HTTP_COLLECTOR_ENABLED true   - - restHost Binding IP of RESTful services. SW_RECEIVER_ZIPKIN_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_RECEIVER_ZIPKIN_REST_PORT 9411   - - restContextPath Web context path of RESTful services. SW_RECEIVER_ZIPKIN_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_RECEIVER_ZIPKIN_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_RECEIVER_ZIPKIN_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_RECEIVER_ZIPKIN_REST_QUEUE_SIZE 0   - - enableKafkaCollector Enable Kafka Collector. SW_ZIPKIN_KAFKA_COLLECTOR_ENABLED false   - - kafkaBootstrapServers Kafka ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG. SW_ZIPKIN_KAFKA_SERVERS localhost:9092   - - kafkaGroupId Kafka ConsumerConfig.GROUP_ID_CONFIG. SW_ZIPKIN_KAFKA_GROUP_ID zipkin   - - kafkaTopic Kafka Topics. SW_ZIPKIN_KAFKA_TOPIC zipkin   - - kafkaConsumerConfig Kafka consumer config, JSON format as Properties. If it contains the same key with above, would override. SW_ZIPKIN_KAFKA_CONSUMER_CONFIG \u0026ldquo;{\u0026quot;auto.offset.reset\u0026quot;:\u0026quot;earliest\u0026quot;,\u0026quot;enable.auto.commit\u0026quot;:true}\u0026rdquo;   - - kafkaConsumers The number of consumers to create. SW_ZIPKIN_KAFKA_CONSUMERS 1   - - kafkaHandlerThreadPoolSize Pool size of Kafka message handler executor. SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_SIZE CPU core * 2   - - kafkaHandlerThreadPoolQueueSize Queue size of Kafka message handler executor. SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE 10000   kafka-fetcher default Read SkyWalking\u0026rsquo;s native metrics/logs/traces through Kafka server. - -    - - bootstrapServers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_KAFKA_FETCHER_SERVERS localhost:9092   - - namespace Namespace aims to isolate multi OAP cluster when using the same Kafka cluster. If you set a namespace for Kafka fetcher, OAP will add a prefix to topic name. You should also set namespace in agent.config. The property is named plugin.kafka.namespace. SW_NAMESPACE -   - - groupId A unique string that identifies the consumer group to which this consumer belongs. - skywalking-consumer   - - createTopicIfNotExist If true, this creates Kafka topic (if it does not already exist). - true   - - partitions The number of partitions for the topic being created. SW_KAFKA_FETCHER_PARTITIONS 3   - - consumers The number of consumers to create. SW_KAFKA_FETCHER_CONSUMERS 1   - - enableNativeProtoLog Enables fetching and handling native proto log data. SW_KAFKA_FETCHER_ENABLE_NATIVE_PROTO_LOG true   - - enableNativeJsonLog Enables fetching and handling native json log data. SW_KAFKA_FETCHER_ENABLE_NATIVE_JSON_LOG true   - - replicationFactor The replication factor for each partition in the topic being created. SW_KAFKA_FETCHER_PARTITIONS_FACTOR 2   - - kafkaHandlerThreadPoolSize Pool size of Kafka message handler executor. SW_KAFKA_HANDLER_THREAD_POOL_SIZE CPU core * 2   - - kafkaHandlerThreadPoolQueueSize Queue size of Kafka message handler executor. SW_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE 10000   - - topicNameOfMeters Kafka topic name for meter system data. - skywalking-meters   - - topicNameOfMetrics Kafka topic name for JVM metrics data. - skywalking-metrics   - - topicNameOfProfiling Kafka topic name for profiling data. - skywalking-profilings   - - topicNameOfTracingSegments Kafka topic name for tracing data. - skywalking-segments   - - topicNameOfManagements Kafka topic name for service instance reporting and registration. - skywalking-managements   - - topicNameOfLogs Kafka topic name for native proto log data. - skywalking-logs   - - topicNameOfJsonLogs Kafka topic name for native json log data. - skywalking-logs-json   receiver-browser default gRPC services that accept browser performance data and error log. - - -   - - sampleRate Sampling rate for receiving trace. Precise to 1/10000. 10000 means sampling rate of 100% by default. SW_RECEIVER_BROWSER_SAMPLE_RATE 10000   query graphql - GraphQL query implementation. -    - - enableLogTestTool Enable the log testing API to test the LAL. NOTE: This API evaluates untrusted code on the OAP server. A malicious script can do significant damage (steal keys and secrets, remove files and directories, install malware, etc). As such, please enable this API only when you completely trust your users. SW_QUERY_GRAPHQL_ENABLE_LOG_TEST_TOOL false   - - maxQueryComplexity Maximum complexity allowed for the GraphQL query that can be used to abort a query if the total number of data fields queried exceeds the defined threshold. SW_QUERY_MAX_QUERY_COMPLEXITY 1000   - - enableUpdateUITemplate Allow user add,disable and update UI template. SW_ENABLE_UPDATE_UI_TEMPLATE false   - - enableOnDemandPodLog Ondemand Pod log: fetch the Pod logs on users' demand, the logs are fetched and displayed in real time, and are not persisted in any kind. This is helpful when users want to do some experiments and monitor the logs and see what\u0026rsquo;s happing inside the service. Note: if you print secrets in the logs, they are also visible to the UI, so for the sake of security, this feature is disabled by default, please set this configuration to enable the feature manually. SW_ENABLE_ON_DEMAND_POD_LOG false   query-zipkin default - This module is for Zipkin query API and support zipkin-lens UI -    - - restHost Binding IP of RESTful services. SW_QUERY_ZIPKIN_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_QUERY_ZIPKIN_REST_PORT 9412   - - restContextPath Web context path of RESTful services. SW_QUERY_ZIPKIN_REST_CONTEXT_PATH zipkin   - - restMaxThreads Maximum thread number of RESTful services. SW_QUERY_ZIPKIN_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_QUERY_ZIPKIN_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_QUERY_ZIPKIN_REST_QUEUE_SIZE 0   - - lookback Default look back for traces and autocompleteTags, 1 day in millis SW_QUERY_ZIPKIN_LOOKBACK 86400000   - - namesMaxAge The Cache-Control max-age (seconds) for serviceNames, remoteServiceNames and spanNames SW_QUERY_ZIPKIN_NAMES_MAX_AGE 300   - - uiQueryLimit Default traces query max size SW_QUERY_ZIPKIN_UI_QUERY_LIMIT 10   - - uiDefaultLookback Default look back on the UI for search traces, 15 minutes in millis SW_QUERY_ZIPKIN_UI_DEFAULT_LOOKBACK 900000   promql default - This module is for PromQL API. -    - - restHost Binding IP of RESTful services. SW_PROMQL_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_PROMQL_REST_PORT 9090   - - restContextPath Web context path of RESTful services. SW_PROMQL_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_PROMQL_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_PROMQL_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_PROMQL_REST_QUEUE_SIZE 0   alarm default - Read alarm doc for more details. -    telemetry - - Read telemetry doc for more details. -    - none - No op implementation. -    - prometheus host Binding host for Prometheus server fetching data. SW_TELEMETRY_PROMETHEUS_HOST 0.0.0.0   - - port Binding port for Prometheus server fetching data. SW_TELEMETRY_PROMETHEUS_PORT 1234   configuration - - Read dynamic configuration doc for more details. -    - grpc host DCS server binding hostname. SW_DCS_SERVER_HOST -   - - port DCS server binding port. SW_DCS_SERVER_PORT 80   - - clusterName Cluster name when reading the latest configuration from DSC server. SW_DCS_CLUSTER_NAME SkyWalking   - - period The period of reading data from DSC server by the OAP (in seconds). SW_DCS_PERIOD 20   - apollo apolloMeta apollo.meta in Apollo. SW_CONFIG_APOLLO http://localhost:8080   - - apolloCluster apollo.cluster in Apollo. SW_CONFIG_APOLLO_CLUSTER default   - - apolloEnv env in Apollo. SW_CONFIG_APOLLO_ENV -   - - appId app.id in Apollo. SW_CONFIG_APOLLO_APP_ID skywalking   - - period The period of data sync (in seconds). SW_CONFIG_APOLLO_PERIOD 60   - zookeeper namespace The namespace (represented by root path) that isolates the configurations in the Zookeeper. SW_CONFIG_ZK_NAMESPACE /, root path   - - hostPort Hosts and ports of Zookeeper Cluster. SW_CONFIG_ZK_HOST_PORT localhost:2181   - - baseSleepTimeMs The period of Zookeeper client between two retries (in milliseconds). SW_CONFIG_ZK_BASE_SLEEP_TIME_MS 1000   - - maxRetries The maximum retry time. SW_CONFIG_ZK_MAX_RETRIES 3   - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - etcd endpoints Hosts and ports for etcd cluster (separated by commas if multiple). SW_CONFIG_ETCD_ENDPOINTS http://localhost:2379   - - namespace Namespace for SkyWalking cluster. SW_CONFIG_ETCD_NAMESPACE /skywalking   - - authentication Indicates whether there is authentication. SW_CONFIG_ETCD_AUTHENTICATION false   - - user Etcd auth username. SW_CONFIG_ETCD_USER    - - password Etcd auth password. SW_CONFIG_ETCD_PASSWORD    - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - consul hostPort Hosts and ports for Consul cluster. SW_CONFIG_CONSUL_HOST_AND_PORTS localhost:8500   - - aclToken ACL Token of Consul. Empty string means without ACL token. SW_CONFIG_CONSUL_ACL_TOKEN -   - - period The period of data sync (in seconds). SW_CONFIG_CONSUL_PERIOD 60   - k8s-configmap namespace Deployment namespace of the config map. SW_CLUSTER_K8S_NAMESPACE default   - - labelSelector Labels for locating configmap. SW_CLUSTER_K8S_LABEL app=collector,release=skywalking   - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - nacos serverAddr Nacos Server Host. SW_CONFIG_NACOS_SERVER_ADDR 127.0.0.1   - - port Nacos Server Port. SW_CONFIG_NACOS_SERVER_PORT 8848   - - group Nacos Configuration namespace. SW_CONFIG_NACOS_SERVER_NAMESPACE -   - - period The period of data sync (in seconds). SW_CONFIG_CONFIG_NACOS_PERIOD 60   - - username Nacos Auth username. SW_CONFIG_NACOS_USERNAME -   - - password Nacos Auth password. SW_CONFIG_NACOS_PASSWORD -   - - accessKey Nacos Auth accessKey. SW_CONFIG_NACOS_ACCESSKEY -   - - secretKey Nacos Auth secretKey. SW_CONFIG_NACOS_SECRETKEY -   exporter default enableGRPCMetrics Enable gRPC metrics exporter. SW_EXPORTER_ENABLE_GRPC_METRICS false   - - gRPCTargetHost The host of target gRPC server for receiving export data SW_EXPORTER_GRPC_HOST 127.0.0.1   - - gRPCTargetPort The port of target gRPC server for receiving export data. SW_EXPORTER_GRPC_PORT 9870   - - enableKafkaTrace Enable Kafka trace exporter. SW_EXPORTER_ENABLE_KAFKA_TRACE false   - - enableKafkaLog Enable Kafka log exporter. SW_EXPORTER_ENABLE_KAFKA_LOG false   - - kafkaBootstrapServers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_EXPORTER_KAFKA_SERVERS localhost:9092   - - kafkaProducerConfig Kafka producer config, JSON format as Properties. SW_EXPORTER_KAFKA_PRODUCER_CONFIG -   - - kafkaTopicTrace Kafka topic name for trace. SW_EXPORTER_KAFKA_TOPIC_TRACE skywalking-export-trace   - - kafkaTopicLog Kafka topic name for log. SW_EXPORTER_KAFKA_TOPIC_LOG skywalking-export-log   - - exportErrorStatusTraceOnly Export error status trace segments through the Kafka channel. SW_EXPORTER_KAFKA_TRACE_FILTER_ERROR false   health-checker default checkIntervalSeconds The period of checking OAP internal health status (in seconds). SW_HEALTH_CHECKER_INTERVAL_SECONDS 5   configuration-discovery default disableMessageDigest If true, agent receives the latest configuration every time, even without making any changes. By default, OAP uses the SHA512 message digest mechanism to detect changes in configuration. SW_DISABLE_MESSAGE_DIGEST false   receiver-event default gRPC services that handle events data. - -    aws-firehose-receiver default host Binding IP of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_HOST 0.0.0.0   - - port Binding port of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_PORT 12801   - - contextPath Context path of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_CONTEXT_PATH /   - - maxThreads Max Thtread number of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_MAX_THREADS 200   - - idleTimeOut Idle timeout of a connection for keep-alive. SW_RECEIVER_AWS_FIREHOSE_HTTP_IDLE_TIME_OUT 30000   - - acceptQueueSize Maximum allowed number of open connections SW_RECEIVER_AWS_FIREHOSE_HTTP_ACCEPT_QUEUE_SIZE 0   - - maxRequestHeaderSize Maximum length of all headers in an HTTP/1 response SW_RECEIVER_AWS_FIREHOSE_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - firehoseAccessKey The AccessKey of AWS firhose SW_RECEIVER_AWS_FIREHOSE_ACCESS_KEY    - - enableTLS Indicate if enable HTTPS for the server SW_RECEIVER_AWS_FIREHOSE_HTTP_ENABLE_TLS false   - - tlsKeyPath TLS key path SW_RECEIVER_AWS_FIREHOSE_HTTP_TLS_KEY_PATH    - - tlsCertChainPath TLS certificate chain path SW_RECEIVER_AWS_FIREHOSE_HTTP_TLS_CERT_CHAIN_PATH    ai-pipeline default       - - uriRecognitionServerAddr The address of the URI recognition server. SW_AI_PIPELINE_URI_RECOGNITION_SERVER_ADDR -   - - uriRecognitionServerPort The port of the URI recognition server. SW_AI_PIPELINE_URI_RECOGNITION_SERVER_PORT 17128    Note ¹ System Environment Variable name could be declared and changed in application.yml. The names listed here are simply provided in the default application.yml file.\n","excerpt":"Configuration Vocabulary The Configuration Vocabulary lists all available configurations provided by …","ref":"/docs/main/v9.5.0/en/setup/backend/configuration-vocabulary/","title":"Configuration Vocabulary"},{"body":"Configuration Vocabulary The Configuration Vocabulary lists all available configurations provided by application.yml.\n   Module Provider Settings Value(s) and Explanation System Environment Variable¹ Default     core default role Option values: Mixed/Receiver/Aggregator. Receiver mode OAP opens the service to the agents, then analyzes and aggregates the results, and forwards the results for distributed aggregation. Aggregator mode OAP receives data from Mixer and Receiver role OAP nodes, and performs 2nd level aggregation. Mixer means both Receiver and Aggregator. SW_CORE_ROLE Mixed   - - restHost Binding IP of RESTful services. Services include GraphQL query and HTTP data report. SW_CORE_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_CORE_REST_PORT 12800   - - restContextPath Web context path of RESTful services. SW_CORE_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_CORE_REST_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_CORE_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize ServerSocketChannel Backlog of RESTful services. SW_CORE_REST_QUEUE_SIZE 0   - - httpMaxRequestHeaderSize Maximum request header size accepted. SW_CORE_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - gRPCHost Binding IP of gRPC services, including gRPC data report and internal communication among OAP nodes. SW_CORE_GRPC_HOST 0.0.0.0   - - gRPCPort Binding port of gRPC services. SW_CORE_GRPC_PORT 11800   - - gRPCSslEnabled Activates SSL for gRPC services. SW_CORE_GRPC_SSL_ENABLED false   - - gRPCSslKeyPath File path of gRPC SSL key. SW_CORE_GRPC_SSL_KEY_PATH -   - - gRPCSslCertChainPath File path of gRPC SSL cert chain. SW_CORE_GRPC_SSL_CERT_CHAIN_PATH -   - - gRPCSslTrustedCAPath File path of gRPC trusted CA. SW_CORE_GRPC_SSL_TRUSTED_CA_PATH -   - - downsampling Activated level of down sampling aggregation.  Hour,Day   - - enableDataKeeperExecutor Controller of TTL scheduler. Once disabled, TTL wouldn\u0026rsquo;t work. SW_CORE_ENABLE_DATA_KEEPER_EXECUTOR true   - - dataKeeperExecutePeriod Execution period of TTL scheduler (in minutes). Execution doesn\u0026rsquo;t mean deleting data. The storage provider (e.g. ElasticSearch storage) could override this. SW_CORE_DATA_KEEPER_EXECUTE_PERIOD 5   - - recordDataTTL The lifecycle of record data (in days). Record data includes traces, top N sample records, and logs. Minimum value is 2. SW_CORE_RECORD_DATA_TTL 3   - - metricsDataTTL The lifecycle of metrics data (in days), including metadata. We recommend setting metricsDataTTL \u0026gt;= recordDataTTL. Minimum value is 2. SW_CORE_METRICS_DATA_TTL 7   - - l1FlushPeriod The period of L1 aggregation flush to L2 aggregation (in milliseconds). SW_CORE_L1_AGGREGATION_FLUSH_PERIOD 500   - - storageSessionTimeout The threshold of session time (in milliseconds). Default value is 70000. SW_CORE_STORAGE_SESSION_TIMEOUT 70000   - - persistentPeriod The period of doing data persistence. Unit is second.Default value is 25s SW_CORE_PERSISTENT_PERIOD 25   - - topNReportPeriod The execution period (in minutes) of top N sampler, which saves sampled data into the storage. SW_CORE_TOPN_REPORT_PERIOD 10   - - activeExtraModelColumns Appends entity names (e.g. service names) into metrics storage entities. SW_CORE_ACTIVE_EXTRA_MODEL_COLUMNS false   - - serviceNameMaxLength Maximum length limit of service names. SW_SERVICE_NAME_MAX_LENGTH 70   - - instanceNameMaxLength Maximum length limit of service instance names. The maximum length of service + instance names should be less than 200. SW_INSTANCE_NAME_MAX_LENGTH 70   - - endpointNameMaxLength Maximum length limit of endpoint names. The maximum length of service + endpoint names should be less than 240. SW_ENDPOINT_NAME_MAX_LENGTH 150   - - searchableTracesTags Defines a set of span tag keys which are searchable through GraphQL. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_SEARCHABLE_TAG_KEYS http.method,http.status_code,rpc.status_code,db.type,db.instance,mq.queue,mq.topic,mq.broker   - - searchableLogsTags Defines a set of log tag keys which are searchable through GraphQL. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_SEARCHABLE_LOGS_TAG_KEYS level   - - searchableAlarmTags Defines a set of alarm tag keys which are searchable through GraphQL. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_SEARCHABLE_ALARM_TAG_KEYS level   - - autocompleteTagKeysQueryMaxSize The max size of tags keys for autocomplete select. SW_AUTOCOMPLETE_TAG_KEYS_QUERY_MAX_SIZE 100   - - autocompleteTagValuesQueryMaxSize The max size of tags values for autocomplete select. SW_AUTOCOMPLETE_TAG_VALUES_QUERY_MAX_SIZE 100   - - gRPCThreadPoolSize Pool size of gRPC server. SW_CORE_GRPC_THREAD_POOL_SIZE CPU core * 4   - - gRPCThreadPoolQueueSize Queue size of gRPC server. SW_CORE_GRPC_POOL_QUEUE_SIZE 10000   - - maxConcurrentCallsPerConnection The maximum number of concurrent calls permitted for each incoming connection. Defaults to no limit. SW_CORE_GRPC_MAX_CONCURRENT_CALL -   - - maxMessageSize Sets the maximum message size allowed to be received on the server. Empty means 4 MiB. SW_CORE_GRPC_MAX_MESSAGE_SIZE 4M(based on Netty)   - - remoteTimeout Timeout for cluster internal communication (in seconds). - 20   - - maxSizeOfNetworkAddressAlias The maximum size of network address detected in the system being monitored. - 1_000_000   - - maxPageSizeOfQueryProfileSnapshot The maximum size for snapshot analysis in an OAP query. - 500   - - maxSizeOfAnalyzeProfileSnapshot The maximum number of snapshots analyzed by the OAP. - 12000   - - prepareThreads The number of threads used to prepare metrics data to the storage. SW_CORE_PREPARE_THREADS 2   - - enableEndpointNameGroupingByOpenapi Automatically groups endpoints by the given OpenAPI definitions. SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI true   - - maxDurationOfQueryEBPFProfilingData The maximum duration(in second) of query the eBPF profiling data from database. - 30   - - maxThreadCountOfQueryEBPFProfilingData The maximum thread count of query the eBPF profiling data from database. - System CPU core size   - - uiMenuRefreshInterval The period(in seconds) of refreshing the status of all UI menu items. - 20   - - serviceCacheRefreshInterval The period(in seconds) of refreshing the service cache. SW_SERVICE_CACHE_REFRESH_INTERVAL 10   cluster standalone - Standalone is not suitable for running on a single node running. No configuration available. - -   - zookeeper namespace The namespace, represented by root path, isolates the configurations in Zookeeper. SW_NAMESPACE /, root path   - - hostPort Hosts and ports of Zookeeper Cluster. SW_CLUSTER_ZK_HOST_PORT localhost:2181   - - baseSleepTimeMs The period of Zookeeper client between two retries (in milliseconds). SW_CLUSTER_ZK_SLEEP_TIME 1000   - - maxRetries The maximum retry time. SW_CLUSTER_ZK_MAX_RETRIES 3   - - enableACL Opens ACL using schema and expression. SW_ZK_ENABLE_ACL false   - - schema Schema for the authorization. SW_ZK_SCHEMA digest   - - expression Expression for the authorization. SW_ZK_EXPRESSION skywalking:skywalking   - - internalComHost The hostname registered in Zookeeper for the internal communication of OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Zookeeper for the internal communication of OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - kubernetes namespace Namespace deployed by SkyWalking in k8s. SW_CLUSTER_K8S_NAMESPACE default   - - labelSelector Labels used for filtering OAP deployment in k8s. SW_CLUSTER_K8S_LABEL app=collector,release=skywalking   - - uidEnvName Environment variable name for reading uid. SW_CLUSTER_K8S_UID SKYWALKING_COLLECTOR_UID   - consul serviceName Service name for SkyWalking cluster. SW_SERVICE_NAME SkyWalking_OAP_Cluster   - - hostPort Hosts and ports for Consul cluster. SW_CLUSTER_CONSUL_HOST_PORT localhost:8500   - - aclToken ACL Token of Consul. Empty string means without ALC token. SW_CLUSTER_CONSUL_ACLTOKEN -   - - internalComHost The hostname registered in Consul for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Consul for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - etcd serviceName Service name for SkyWalking cluster. SW_CLUSTER_ETCD_SERVICE_NAME SkyWalking_OAP_Cluster   - - endpoints Hosts and ports for etcd cluster. SW_CLUSTER_ETCD_ENDPOINTS localhost:2379   - - namespace Namespace for SkyWalking cluster. SW_CLUSTER_ETCD_NAMESPACE /skywalking   - - authentication Indicates whether there is authentication. SW_CLUSTER_ETCD_AUTHENTICATION false   - - user Etcd auth username. SW_CLUSTER_ETCD_USER    - - password Etcd auth password. SW_CLUSTER_ETCD_PASSWORD    - - internalComHost The hostname registered in etcd for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in etcd for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - Nacos serviceName Service name for SkyWalking cluster. SW_SERVICE_NAME SkyWalking_OAP_Cluster   - - hostPort Hosts and ports for Nacos cluster. SW_CLUSTER_NACOS_HOST_PORT localhost:8848   - - namespace Namespace used by SkyWalking node coordination. SW_CLUSTER_NACOS_NAMESPACE public   - - internalComHost The hostname registered in Nacos for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Nacos for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - - username Nacos Auth username. SW_CLUSTER_NACOS_USERNAME -   - - password Nacos Auth password. SW_CLUSTER_NACOS_PASSWORD -   - - accessKey Nacos Auth accessKey. SW_CLUSTER_NACOS_ACCESSKEY -   - - secretKey Nacos Auth secretKey. SW_CLUSTER_NACOS_SECRETKEY -   - - syncPeriodHttpUriRecognitionPattern The period of HTTP URI recognition pattern synchronization (in seconds). SW_CORE_SYNC_PERIOD_HTTP_URI_RECOGNITION_PATTERN 10   - - trainingPeriodHttpUriRecognitionPattern The period of HTTP URI recognition pattern training (in seconds). SW_CORE_TRAINING_PERIOD_HTTP_URI_RECOGNITION_PATTERN 60   - - maxHttpUrisNumberPerService The maximum number of HTTP URIs per service. SW_MAX_HTTP_URIS_NUMBER_PER_SERVICE 3000   storage elasticsearch - ElasticSearch (and OpenSearch) storage implementation. - -   - - namespace Prefix of indexes created and used by SkyWalking. SW_NAMESPACE -   - - clusterNodes ElasticSearch cluster nodes for client connection. SW_STORAGE_ES_CLUSTER_NODES localhost   - - protocol HTTP or HTTPs. SW_STORAGE_ES_HTTP_PROTOCOL HTTP   - - connectTimeout Connect timeout of ElasticSearch client (in milliseconds). SW_STORAGE_ES_CONNECT_TIMEOUT 3000   - - socketTimeout Socket timeout of ElasticSearch client (in milliseconds). SW_STORAGE_ES_SOCKET_TIMEOUT 30000   - - responseTimeout Response timeout of ElasticSearch client (in milliseconds), 0 disables the timeout. SW_STORAGE_ES_RESPONSE_TIMEOUT 1500   - - numHttpClientThread The number of threads for the underlying HTTP client to perform socket I/O. If the value is \u0026lt;= 0, the number of available processors will be used. SW_STORAGE_ES_NUM_HTTP_CLIENT_THREAD 0   - - user Username of ElasticSearch cluster. SW_ES_USER -   - - password Password of ElasticSearch cluster. SW_ES_PASSWORD -   - - trustStorePath Trust JKS file path. Only works when username and password are enabled. SW_STORAGE_ES_SSL_JKS_PATH -   - - trustStorePass Trust JKS file password. Only works when username and password are enabled. SW_STORAGE_ES_SSL_JKS_PASS -   - - secretsManagementFile Secrets management file in the properties format, including username and password, which are managed by a 3rd party tool. Capable of being updated them at runtime. SW_ES_SECRETS_MANAGEMENT_FILE -   - - dayStep Represents the number of days in the one-minute/hour/day index. SW_STORAGE_DAY_STEP 1   - - indexShardsNumber Shard number of new indexes. SW_STORAGE_ES_INDEX_SHARDS_NUMBER 1   - - indexReplicasNumber Replicas number of new indexes. SW_STORAGE_ES_INDEX_REPLICAS_NUMBER 0   - - specificIndexSettings Specify the settings for each index individually. If configured, this setting has the highest priority and overrides the generic settings. SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS -   - - superDatasetDayStep Represents the number of days in the super size dataset record index. Default value is the same as dayStep when the value is less than 0. SW_STORAGE_ES_SUPER_DATASET_DAY_STEP -1   - - superDatasetIndexShardsFactor Super dataset is defined in the code (e.g. trace segments). This factor provides more shards for the super dataset: shards number = indexShardsNumber * superDatasetIndexShardsFactor. This factor also affects Zipkin and Jaeger traces. SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR 5   - - superDatasetIndexReplicasNumber Represents the replicas number in the super size dataset record index. SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER 0   - - indexTemplateOrder The order of index template. SW_STORAGE_ES_INDEX_TEMPLATE_ORDER 0   - - bulkActions Async bulk size of the record data batch execution. SW_STORAGE_ES_BULK_ACTIONS 5000   - - batchOfBytes A threshold to control the max body size of ElasticSearch Bulk flush. SW_STORAGE_ES_BATCH_OF_BYTES 10485760 (10m)   - - flushInterval Period of flush (in seconds). Does not matter whether bulkActions is reached or not. SW_STORAGE_ES_FLUSH_INTERVAL 5   - - concurrentRequests The number of concurrent requests allowed to be executed. SW_STORAGE_ES_CONCURRENT_REQUESTS 2   - - resultWindowMaxSize The maximum size of dataset when the OAP loads cache, such as network aliases. SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE 10000   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_ES_QUERY_MAX_SIZE 10000   - - scrollingBatchSize The batch size of metadata per iteration when metadataQueryMaxSize or resultWindowMaxSize is too large to be retrieved in a single query. SW_STORAGE_ES_SCROLLING_BATCH_SIZE 5000   - - segmentQueryMaxSize The maximum size of trace segments per query. SW_STORAGE_ES_QUERY_SEGMENT_SIZE 200   - - profileTaskQueryMaxSize The maximum size of profile task per query. SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE 200   - - profileDataQueryScrollBatchSize The batch size of query profiling data. SW_STORAGE_ES_QUERY_PROFILE_DATA_BATCH_SIZE 100   - - advanced All settings of ElasticSearch index creation. The value should be in JSON format. SW_STORAGE_ES_ADVANCED -   - - logicSharding Shard metrics and records indices into multi-physical indices, one index template per metric/meter aggregation function or record. SW_STORAGE_ES_LOGIC_SHARDING false   - h2 - H2 storage is designed for demonstration and running in short term (i.e. 1-2 hours) only. - -   - - url H2 connection URL. Defaults to H2 memory mode. SW_STORAGE_H2_URL jdbc:h2:mem:skywalking-oap-db   - - user Username of H2 database. SW_STORAGE_H2_USER sa   - - password Password of H2 database. - -   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_H2_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 100   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 1   - mysql - MySQL Storage. The MySQL JDBC Driver is not in the dist. Please copy it into the oap-lib folder manually. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - postgresql - PostgreSQL storage. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - banyandb - BanyanDB storage. - -   - - host Host of the BanyanDB. SW_STORAGE_BANYANDB_HOST 127.0.0.1   - - port Port of the BanyanDB. SW_STORAGE_BANYANDB_PORT 17912   - - maxBulkSize The maximum size of write entities in a single batch write call. SW_STORAGE_BANYANDB_MAX_BULK_SIZE 5000   - - flushInterval Period of flush interval. In the timeunit of seconds. SW_STORAGE_BANYANDB_FLUSH_INTERVAL 15   - - metricsShardsNumber Shards Number for measure/metrics. SW_STORAGE_BANYANDB_METRICS_SHARDS_NUMBER 1   - - recordShardsNumber Shards Number for a normal record. SW_STORAGE_BANYANDB_RECORD_SHARDS_NUMBER 1   - - superDatasetShardsFactor Shards Factor for a super dataset record, i.e. Shard number of a super dataset is recordShardsNumber*superDatasetShardsFactor. SW_STORAGE_BANYANDB_SUPERDATASET_SHARDS_FACTOR 2   - - concurrentWriteThreads Concurrent consumer threads for batch writing. SW_STORAGE_BANYANDB_CONCURRENT_WRITE_THREADS 15   - - profileTaskQueryMaxSize Max size of ProfileTask to be fetched. SW_STORAGE_BANYANDB_PROFILE_TASK_QUERY_MAX_SIZE 200   agent-analyzer default Agent Analyzer. SW_AGENT_ANALYZER default    - - traceSamplingPolicySettingsFile The sampling policy including sampling rate and the threshold of trace segment latency can be configured by the traceSamplingPolicySettingsFile file. SW_TRACE_SAMPLING_POLICY_SETTINGS_FILE trace-sampling-policy-settings.yml   - - slowDBAccessThreshold The slow database access threshold (in milliseconds). SW_SLOW_DB_THRESHOLD default:200,mongodb:100   - - forceSampleErrorSegment When sampling mechanism is activated, this config samples the error status segment and ignores the sampling rate. SW_FORCE_SAMPLE_ERROR_SEGMENT true   - - segmentStatusAnalysisStrategy Determines the final segment status from span status. Available values are FROM_SPAN_STATUS , FROM_ENTRY_SPAN, and FROM_FIRST_SPAN. FROM_SPAN_STATUS indicates that the segment status would be error if any span has an error status. FROM_ENTRY_SPAN means that the segment status would only be determined by the status of entry spans. FROM_FIRST_SPAN means that the segment status would only be determined by the status of the first span. SW_SEGMENT_STATUS_ANALYSIS_STRATEGY FROM_SPAN_STATUS   - - noUpstreamRealAddressAgents Exit spans with the component in the list would not generate client-side instance relation metrics, since some tracing plugins (e.g. Nginx-LUA and Envoy) can\u0026rsquo;t collect the real peer IP address. SW_NO_UPSTREAM_REAL_ADDRESS 6000,9000   - - meterAnalyzerActiveFiles Indicates which files could be instrumented and analyzed. Multiple files are split by \u0026ldquo;,\u0026rdquo;. SW_METER_ANALYZER_ACTIVE_FILES    - - slowCacheWriteThreshold The threshold of slow command which is used for writing operation (in milliseconds). SW_SLOW_CACHE_WRITE_THRESHOLD default:20,redis:10   - - slowCacheReadThreshold The threshold of slow command which is used for reading (getting) operation (in milliseconds). SW_SLOW_CACHE_READ_THRESHOLD default:20,redis:10   receiver-sharing-server default Sharing server provides new gRPC and restful servers for data collection. Ana designates that servers in the core module are to be used for internal communication only. - -    - - restHost Binding IP of RESTful services. Services include GraphQL query and HTTP data report. SW_RECEIVER_SHARING_REST_HOST -   - - restPort Binding port of RESTful services. SW_RECEIVER_SHARING_REST_PORT -   - - restContextPath Web context path of RESTful services. SW_RECEIVER_SHARING_REST_CONTEXT_PATH -   - - restMaxThreads Maximum thread number of RESTful services. SW_RECEIVER_SHARING_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_RECEIVER_SHARING_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize ServerSocketChannel backlog of RESTful services. SW_RECEIVER_SHARING_REST_QUEUE_SIZE 0   - - httpMaxRequestHeaderSize Maximum request header size accepted. SW_RECEIVER_SHARING_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - gRPCHost Binding IP of gRPC services. Services include gRPC data report and internal communication among OAP nodes. SW_RECEIVER_GRPC_HOST 0.0.0.0. Not Activated   - - gRPCPort Binding port of gRPC services. SW_RECEIVER_GRPC_PORT Not Activated   - - gRPCThreadPoolSize Pool size of gRPC server. SW_RECEIVER_GRPC_THREAD_POOL_SIZE CPU core * 4   - - gRPCThreadPoolQueueSize Queue size of gRPC server. SW_RECEIVER_GRPC_POOL_QUEUE_SIZE 10000   - - gRPCSslEnabled Activates SSL for gRPC services. SW_RECEIVER_GRPC_SSL_ENABLED false   - - gRPCSslKeyPath File path of gRPC SSL key. SW_RECEIVER_GRPC_SSL_KEY_PATH -   - - gRPCSslCertChainPath File path of gRPC SSL cert chain. SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH -   - - maxConcurrentCallsPerConnection The maximum number of concurrent calls permitted for each incoming connection. Defaults to no limit. SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL -   - - authentication The token text for authentication. Works for gRPC connection only. Once this is set, the client is required to use the same token. SW_AUTHENTICATION -   log-analyzer default Log Analyzer. SW_LOG_ANALYZER default    - - lalFiles The LAL configuration file names (without file extension) to be activated. Read LAL for more details. SW_LOG_LAL_FILES default   - - malFiles The MAL configuration file names (without file extension) to be activated. Read LAL for more details. SW_LOG_MAL_FILES \u0026quot;\u0026quot;   event-analyzer default Event Analyzer. SW_EVENT_ANALYZER default    receiver-register default gRPC and HTTPRestful services that provide service, service instance and endpoint register. - -    receiver-trace default gRPC and HTTPRestful services that accept SkyWalking format traces. - -    receiver-jvm default gRPC services that accept JVM metrics data. - -    receiver-clr default gRPC services that accept .Net CLR metrics data. - -    receiver-profile default gRPC services that accept profile task status and snapshot reporter. - -    receiver-zabbix default TCP receiver accepts Zabbix format metrics. - -    - - port Exported TCP port. Zabbix agent could connect and transport data. SW_RECEIVER_ZABBIX_PORT 10051   - - host Binds to host. SW_RECEIVER_ZABBIX_HOST 0.0.0.0   - - activeFiles Enables config when agent request is received. SW_RECEIVER_ZABBIX_ACTIVE_FILES agent   service-mesh default gRPC services that accept data from inbound mesh probes. - -    envoy-metric default Envoy metrics_service and ALS(access log service) are supported by this receiver. The OAL script supports all GAUGE type metrics. - -    - - acceptMetricsService Starts Envoy Metrics Service analysis. SW_ENVOY_METRIC_SERVICE true   - - alsHTTPAnalysis Starts Envoy HTTP Access Log Service analysis. Value = k8s-mesh means starting the analysis. SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS -   - - alsTCPAnalysis Starts Envoy TCP Access Log Service analysis. Value = k8s-mesh means starting the analysis. SW_ENVOY_METRIC_ALS_TCP_ANALYSIS -   - - k8sServiceNameRule k8sServiceNameRule allows you to customize the service name in ALS via Kubernetes metadata. The available variables are pod and service. E.g. you can use ${service.metadata.name}-${pod.metadata.labels.version} to append the version number to the service name. Note that when using environment variables to pass this configuration, use single quotes('') to avoid being evaluated by the shell. K8S_SERVICE_NAME_RULE ${pod.metadata.labels.(service.istio.io/canonical-name)}   - - istioServiceNameRule istioServiceNameRule allows you to customize the service name in ALS via Kubernetes metadata. The available variables are serviceEntry. E.g. you can use ${serviceEntry.metadata.name}-${serviceEntry.metadata.labels.version} to append the version number to the service name. Note that when using environment variables to pass this configuration, use single quotes('') to avoid being evaluated by the shell. ISTIO_SERVICE_NAME_RULE ${serviceEntry.metadata.name}   receiver-otel default A receiver for analyzing metrics data from OpenTelemetry. - -    - - enabledHandlers Enabled handlers for otel. SW_OTEL_RECEIVER_ENABLED_HANDLERS -   - - enabledOtelMetricsRules Enabled metric rules for OTLP handler. SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES -   receiver-zipkin default A receiver for Zipkin traces. - -    - - sampleRate The sample rate precision is 1/10000, should be between 0 and 10000 SW_ZIPKIN_SAMPLE_RATE 10000   - - searchableTracesTags Defines a set of span tag keys which are searchable. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_ZIPKIN_SEARCHABLE_TAG_KEYS http.method   - - enableHttpCollector Enable Http Collector. SW_ZIPKIN_HTTP_COLLECTOR_ENABLED true   - - restHost Binding IP of RESTful services. SW_RECEIVER_ZIPKIN_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_RECEIVER_ZIPKIN_REST_PORT 9411   - - restContextPath Web context path of RESTful services. SW_RECEIVER_ZIPKIN_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_RECEIVER_ZIPKIN_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_RECEIVER_ZIPKIN_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_RECEIVER_ZIPKIN_REST_QUEUE_SIZE 0   - - enableKafkaCollector Enable Kafka Collector. SW_ZIPKIN_KAFKA_COLLECTOR_ENABLED false   - - kafkaBootstrapServers Kafka ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG. SW_ZIPKIN_KAFKA_SERVERS localhost:9092   - - kafkaGroupId Kafka ConsumerConfig.GROUP_ID_CONFIG. SW_ZIPKIN_KAFKA_GROUP_ID zipkin   - - kafkaTopic Kafka Topics. SW_ZIPKIN_KAFKA_TOPIC zipkin   - - kafkaConsumerConfig Kafka consumer config, JSON format as Properties. If it contains the same key with above, would override. SW_ZIPKIN_KAFKA_CONSUMER_CONFIG \u0026ldquo;{\u0026quot;auto.offset.reset\u0026quot;:\u0026quot;earliest\u0026quot;,\u0026quot;enable.auto.commit\u0026quot;:true}\u0026rdquo;   - - kafkaConsumers The number of consumers to create. SW_ZIPKIN_KAFKA_CONSUMERS 1   - - kafkaHandlerThreadPoolSize Pool size of Kafka message handler executor. SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_SIZE CPU core * 2   - - kafkaHandlerThreadPoolQueueSize Queue size of Kafka message handler executor. SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE 10000   kafka-fetcher default Read SkyWalking\u0026rsquo;s native metrics/logs/traces through Kafka server. - -    - - bootstrapServers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_KAFKA_FETCHER_SERVERS localhost:9092   - - namespace Namespace aims to isolate multi OAP cluster when using the same Kafka cluster. If you set a namespace for Kafka fetcher, OAP will add a prefix to topic name. You should also set namespace in agent.config. The property is named plugin.kafka.namespace. SW_NAMESPACE -   - - groupId A unique string that identifies the consumer group to which this consumer belongs. - skywalking-consumer   - - createTopicIfNotExist If true, this creates Kafka topic (if it does not already exist). - true   - - partitions The number of partitions for the topic being created. SW_KAFKA_FETCHER_PARTITIONS 3   - - consumers The number of consumers to create. SW_KAFKA_FETCHER_CONSUMERS 1   - - enableNativeProtoLog Enables fetching and handling native proto log data. SW_KAFKA_FETCHER_ENABLE_NATIVE_PROTO_LOG true   - - enableNativeJsonLog Enables fetching and handling native json log data. SW_KAFKA_FETCHER_ENABLE_NATIVE_JSON_LOG true   - - replicationFactor The replication factor for each partition in the topic being created. SW_KAFKA_FETCHER_PARTITIONS_FACTOR 2   - - kafkaHandlerThreadPoolSize Pool size of Kafka message handler executor. SW_KAFKA_HANDLER_THREAD_POOL_SIZE CPU core * 2   - - kafkaHandlerThreadPoolQueueSize Queue size of Kafka message handler executor. SW_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE 10000   - - topicNameOfMeters Kafka topic name for meter system data. - skywalking-meters   - - topicNameOfMetrics Kafka topic name for JVM metrics data. - skywalking-metrics   - - topicNameOfProfiling Kafka topic name for profiling data. - skywalking-profilings   - - topicNameOfTracingSegments Kafka topic name for tracing data. - skywalking-segments   - - topicNameOfManagements Kafka topic name for service instance reporting and registration. - skywalking-managements   - - topicNameOfLogs Kafka topic name for native proto log data. - skywalking-logs   - - topicNameOfJsonLogs Kafka topic name for native json log data. - skywalking-logs-json   receiver-browser default gRPC services that accept browser performance data and error log. - - -   - - sampleRate Sampling rate for receiving trace. Precise to 1/10000. 10000 means sampling rate of 100% by default. SW_RECEIVER_BROWSER_SAMPLE_RATE 10000   query graphql - GraphQL query implementation. -    - - enableLogTestTool Enable the log testing API to test the LAL. NOTE: This API evaluates untrusted code on the OAP server. A malicious script can do significant damage (steal keys and secrets, remove files and directories, install malware, etc). As such, please enable this API only when you completely trust your users. SW_QUERY_GRAPHQL_ENABLE_LOG_TEST_TOOL false   - - maxQueryComplexity Maximum complexity allowed for the GraphQL query that can be used to abort a query if the total number of data fields queried exceeds the defined threshold. SW_QUERY_MAX_QUERY_COMPLEXITY 3000   - - enableUpdateUITemplate Allow user add,disable and update UI template. SW_ENABLE_UPDATE_UI_TEMPLATE false   - - enableOnDemandPodLog Ondemand Pod log: fetch the Pod logs on users' demand, the logs are fetched and displayed in real time, and are not persisted in any kind. This is helpful when users want to do some experiments and monitor the logs and see what\u0026rsquo;s happing inside the service. Note: if you print secrets in the logs, they are also visible to the UI, so for the sake of security, this feature is disabled by default, please set this configuration to enable the feature manually. SW_ENABLE_ON_DEMAND_POD_LOG false   query-zipkin default - This module is for Zipkin query API and support zipkin-lens UI -    - - restHost Binding IP of RESTful services. SW_QUERY_ZIPKIN_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_QUERY_ZIPKIN_REST_PORT 9412   - - restContextPath Web context path of RESTful services. SW_QUERY_ZIPKIN_REST_CONTEXT_PATH zipkin   - - restMaxThreads Maximum thread number of RESTful services. SW_QUERY_ZIPKIN_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_QUERY_ZIPKIN_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_QUERY_ZIPKIN_REST_QUEUE_SIZE 0   - - lookback Default look back for traces and autocompleteTags, 1 day in millis SW_QUERY_ZIPKIN_LOOKBACK 86400000   - - namesMaxAge The Cache-Control max-age (seconds) for serviceNames, remoteServiceNames and spanNames SW_QUERY_ZIPKIN_NAMES_MAX_AGE 300   - - uiQueryLimit Default traces query max size SW_QUERY_ZIPKIN_UI_QUERY_LIMIT 10   - - uiDefaultLookback Default look back on the UI for search traces, 15 minutes in millis SW_QUERY_ZIPKIN_UI_DEFAULT_LOOKBACK 900000   promql default - This module is for PromQL API. -    - - restHost Binding IP of RESTful services. SW_PROMQL_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_PROMQL_REST_PORT 9090   - - restContextPath Web context path of RESTful services. SW_PROMQL_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_PROMQL_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_PROMQL_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_PROMQL_REST_QUEUE_SIZE 0   alarm default - Read alarm doc for more details. -    telemetry - - Read telemetry doc for more details. -    - none - No op implementation. -    - prometheus host Binding host for Prometheus server fetching data. SW_TELEMETRY_PROMETHEUS_HOST 0.0.0.0   - - port Binding port for Prometheus server fetching data. SW_TELEMETRY_PROMETHEUS_PORT 1234   configuration - - Read dynamic configuration doc for more details. -    - grpc host DCS server binding hostname. SW_DCS_SERVER_HOST -   - - port DCS server binding port. SW_DCS_SERVER_PORT 80   - - clusterName Cluster name when reading the latest configuration from DSC server. SW_DCS_CLUSTER_NAME SkyWalking   - - period The period of reading data from DSC server by the OAP (in seconds). SW_DCS_PERIOD 20   - apollo apolloMeta apollo.meta in Apollo. SW_CONFIG_APOLLO http://localhost:8080   - - apolloCluster apollo.cluster in Apollo. SW_CONFIG_APOLLO_CLUSTER default   - - apolloEnv env in Apollo. SW_CONFIG_APOLLO_ENV -   - - appId app.id in Apollo. SW_CONFIG_APOLLO_APP_ID skywalking   - zookeeper namespace The namespace (represented by root path) that isolates the configurations in the Zookeeper. SW_CONFIG_ZK_NAMESPACE /, root path   - - hostPort Hosts and ports of Zookeeper Cluster. SW_CONFIG_ZK_HOST_PORT localhost:2181   - - baseSleepTimeMs The period of Zookeeper client between two retries (in milliseconds). SW_CONFIG_ZK_BASE_SLEEP_TIME_MS 1000   - - maxRetries The maximum retry time. SW_CONFIG_ZK_MAX_RETRIES 3   - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - etcd endpoints Hosts and ports for etcd cluster (separated by commas if multiple). SW_CONFIG_ETCD_ENDPOINTS http://localhost:2379   - - namespace Namespace for SkyWalking cluster. SW_CONFIG_ETCD_NAMESPACE /skywalking   - - authentication Indicates whether there is authentication. SW_CONFIG_ETCD_AUTHENTICATION false   - - user Etcd auth username. SW_CONFIG_ETCD_USER    - - password Etcd auth password. SW_CONFIG_ETCD_PASSWORD    - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - consul hostPort Hosts and ports for Consul cluster. SW_CONFIG_CONSUL_HOST_AND_PORTS localhost:8500   - - aclToken ACL Token of Consul. Empty string means without ACL token. SW_CONFIG_CONSUL_ACL_TOKEN -   - - period The period of data sync (in seconds). SW_CONFIG_CONSUL_PERIOD 60   - k8s-configmap namespace Deployment namespace of the config map. SW_CLUSTER_K8S_NAMESPACE default   - - labelSelector Labels for locating configmap. SW_CLUSTER_K8S_LABEL app=collector,release=skywalking   - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - nacos serverAddr Nacos Server Host. SW_CONFIG_NACOS_SERVER_ADDR 127.0.0.1   - - port Nacos Server Port. SW_CONFIG_NACOS_SERVER_PORT 8848   - - group Nacos Configuration namespace. SW_CONFIG_NACOS_SERVER_NAMESPACE -   - - period The period of data sync (in seconds). SW_CONFIG_CONFIG_NACOS_PERIOD 60   - - username Nacos Auth username. SW_CONFIG_NACOS_USERNAME -   - - password Nacos Auth password. SW_CONFIG_NACOS_PASSWORD -   - - accessKey Nacos Auth accessKey. SW_CONFIG_NACOS_ACCESSKEY -   - - secretKey Nacos Auth secretKey. SW_CONFIG_NACOS_SECRETKEY -   exporter default enableGRPCMetrics Enable gRPC metrics exporter. SW_EXPORTER_ENABLE_GRPC_METRICS false   - - gRPCTargetHost The host of target gRPC server for receiving export data SW_EXPORTER_GRPC_HOST 127.0.0.1   - - gRPCTargetPort The port of target gRPC server for receiving export data. SW_EXPORTER_GRPC_PORT 9870   - - enableKafkaTrace Enable Kafka trace exporter. SW_EXPORTER_ENABLE_KAFKA_TRACE false   - - enableKafkaLog Enable Kafka log exporter. SW_EXPORTER_ENABLE_KAFKA_LOG false   - - kafkaBootstrapServers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_EXPORTER_KAFKA_SERVERS localhost:9092   - - kafkaProducerConfig Kafka producer config, JSON format as Properties. SW_EXPORTER_KAFKA_PRODUCER_CONFIG -   - - kafkaTopicTrace Kafka topic name for trace. SW_EXPORTER_KAFKA_TOPIC_TRACE skywalking-export-trace   - - kafkaTopicLog Kafka topic name for log. SW_EXPORTER_KAFKA_TOPIC_LOG skywalking-export-log   - - exportErrorStatusTraceOnly Export error status trace segments through the Kafka channel. SW_EXPORTER_KAFKA_TRACE_FILTER_ERROR false   health-checker default checkIntervalSeconds The period of checking OAP internal health status (in seconds). SW_HEALTH_CHECKER_INTERVAL_SECONDS 5   configuration-discovery default disableMessageDigest If true, agent receives the latest configuration every time, even without making any changes. By default, OAP uses the SHA512 message digest mechanism to detect changes in configuration. SW_DISABLE_MESSAGE_DIGEST false   receiver-event default gRPC services that handle events data. - -    aws-firehose-receiver default host Binding IP of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_HOST 0.0.0.0   - - port Binding port of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_PORT 12801   - - contextPath Context path of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_CONTEXT_PATH /   - - maxThreads Max Thtread number of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_MAX_THREADS 200   - - idleTimeOut Idle timeout of a connection for keep-alive. SW_RECEIVER_AWS_FIREHOSE_HTTP_IDLE_TIME_OUT 30000   - - acceptQueueSize Maximum allowed number of open connections SW_RECEIVER_AWS_FIREHOSE_HTTP_ACCEPT_QUEUE_SIZE 0   - - maxRequestHeaderSize Maximum length of all headers in an HTTP/1 response SW_RECEIVER_AWS_FIREHOSE_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - firehoseAccessKey The AccessKey of AWS firhose SW_RECEIVER_AWS_FIREHOSE_ACCESS_KEY    - - enableTLS Indicate if enable HTTPS for the server SW_RECEIVER_AWS_FIREHOSE_HTTP_ENABLE_TLS false   - - tlsKeyPath TLS key path SW_RECEIVER_AWS_FIREHOSE_HTTP_TLS_KEY_PATH    - - tlsCertChainPath TLS certificate chain path SW_RECEIVER_AWS_FIREHOSE_HTTP_TLS_CERT_CHAIN_PATH    ai-pipeline default       - - uriRecognitionServerAddr The address of the URI recognition server. SW_AI_PIPELINE_URI_RECOGNITION_SERVER_ADDR -   - - uriRecognitionServerPort The port of the URI recognition server. SW_AI_PIPELINE_URI_RECOGNITION_SERVER_PORT 17128    Note ¹ System Environment Variable name could be declared and changed in application.yml. The names listed here are simply provided in the default application.yml file.\n","excerpt":"Configuration Vocabulary The Configuration Vocabulary lists all available configurations provided by …","ref":"/docs/main/v9.6.0/en/setup/backend/configuration-vocabulary/","title":"Configuration Vocabulary"},{"body":"Configuration Vocabulary The Configuration Vocabulary lists all available configurations provided by application.yml.\n   Module Provider Settings Value(s) and Explanation System Environment Variable¹ Default     core default role Option values: Mixed/Receiver/Aggregator. Receiver mode OAP opens the service to the agents, then analyzes and aggregates the results, and forwards the results for distributed aggregation. Aggregator mode OAP receives data from Mixer and Receiver role OAP nodes, and performs 2nd level aggregation. Mixer means both Receiver and Aggregator. SW_CORE_ROLE Mixed   - - restHost Binding IP of RESTful services. Services include GraphQL query and HTTP data report. SW_CORE_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_CORE_REST_PORT 12800   - - restContextPath Web context path of RESTful services. SW_CORE_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_CORE_REST_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_CORE_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize ServerSocketChannel Backlog of RESTful services. SW_CORE_REST_QUEUE_SIZE 0   - - httpMaxRequestHeaderSize Maximum request header size accepted. SW_CORE_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - gRPCHost Binding IP of gRPC services, including gRPC data report and internal communication among OAP nodes. SW_CORE_GRPC_HOST 0.0.0.0   - - gRPCPort Binding port of gRPC services. SW_CORE_GRPC_PORT 11800   - - gRPCSslEnabled Activates SSL for gRPC services. SW_CORE_GRPC_SSL_ENABLED false   - - gRPCSslKeyPath File path of gRPC SSL key. SW_CORE_GRPC_SSL_KEY_PATH -   - - gRPCSslCertChainPath File path of gRPC SSL cert chain. SW_CORE_GRPC_SSL_CERT_CHAIN_PATH -   - - gRPCSslTrustedCAPath File path of gRPC trusted CA. SW_CORE_GRPC_SSL_TRUSTED_CA_PATH -   - - downsampling Activated level of down sampling aggregation.  Hour,Day   - - enableDataKeeperExecutor Controller of TTL scheduler. Once disabled, TTL wouldn\u0026rsquo;t work. SW_CORE_ENABLE_DATA_KEEPER_EXECUTOR true   - - dataKeeperExecutePeriod Execution period of TTL scheduler (in minutes). Execution doesn\u0026rsquo;t mean deleting data. The storage provider (e.g. ElasticSearch storage) could override this. SW_CORE_DATA_KEEPER_EXECUTE_PERIOD 5   - - recordDataTTL The lifecycle of record data (in days). Record data includes traces, top N sample records, and logs. Minimum value is 2. SW_CORE_RECORD_DATA_TTL 3   - - metricsDataTTL The lifecycle of metrics data (in days), including metadata. We recommend setting metricsDataTTL \u0026gt;= recordDataTTL. Minimum value is 2. SW_CORE_METRICS_DATA_TTL 7   - - l1FlushPeriod The period of L1 aggregation flush to L2 aggregation (in milliseconds). SW_CORE_L1_AGGREGATION_FLUSH_PERIOD 500   - - storageSessionTimeout The threshold of session time (in milliseconds). Default value is 70000. SW_CORE_STORAGE_SESSION_TIMEOUT 70000   - - persistentPeriod The period of doing data persistence. Unit is second.Default value is 25s SW_CORE_PERSISTENT_PERIOD 25   - - topNReportPeriod The execution period (in minutes) of top N sampler, which saves sampled data into the storage. SW_CORE_TOPN_REPORT_PERIOD 10   - - activeExtraModelColumns Appends entity names (e.g. service names) into metrics storage entities. SW_CORE_ACTIVE_EXTRA_MODEL_COLUMNS false   - - serviceNameMaxLength Maximum length limit of service names. SW_SERVICE_NAME_MAX_LENGTH 70   - - instanceNameMaxLength Maximum length limit of service instance names. The maximum length of service + instance names should be less than 200. SW_INSTANCE_NAME_MAX_LENGTH 70   - - endpointNameMaxLength Maximum length limit of endpoint names. The maximum length of service + endpoint names should be less than 240. SW_ENDPOINT_NAME_MAX_LENGTH 150   - - searchableTracesTags Defines a set of span tag keys which are searchable through GraphQL. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_SEARCHABLE_TAG_KEYS http.method,http.status_code,rpc.status_code,db.type,db.instance,mq.queue,mq.topic,mq.broker   - - searchableLogsTags Defines a set of log tag keys which are searchable through GraphQL. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_SEARCHABLE_LOGS_TAG_KEYS level   - - searchableAlarmTags Defines a set of alarm tag keys which are searchable through GraphQL. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_SEARCHABLE_ALARM_TAG_KEYS level   - - autocompleteTagKeysQueryMaxSize The max size of tags keys for autocomplete select. SW_AUTOCOMPLETE_TAG_KEYS_QUERY_MAX_SIZE 100   - - autocompleteTagValuesQueryMaxSize The max size of tags values for autocomplete select. SW_AUTOCOMPLETE_TAG_VALUES_QUERY_MAX_SIZE 100   - - gRPCThreadPoolSize Pool size of gRPC server. SW_CORE_GRPC_THREAD_POOL_SIZE CPU core * 4   - - gRPCThreadPoolQueueSize Queue size of gRPC server. SW_CORE_GRPC_POOL_QUEUE_SIZE 10000   - - maxConcurrentCallsPerConnection The maximum number of concurrent calls permitted for each incoming connection. Defaults to no limit. SW_CORE_GRPC_MAX_CONCURRENT_CALL -   - - maxMessageSize Sets the maximum message size allowed to be received on the server. Empty means 4 MiB. SW_CORE_GRPC_MAX_MESSAGE_SIZE 4M(based on Netty)   - - remoteTimeout Timeout for cluster internal communication (in seconds). - 20   - - maxSizeOfNetworkAddressAlias The maximum size of network address detected in the system being monitored. - 1_000_000   - - maxPageSizeOfQueryProfileSnapshot The maximum size for snapshot analysis in an OAP query. - 500   - - maxSizeOfAnalyzeProfileSnapshot The maximum number of snapshots analyzed by the OAP. - 12000   - - prepareThreads The number of threads used to prepare metrics data to the storage. SW_CORE_PREPARE_THREADS 2   - - enableEndpointNameGroupingByOpenapi Automatically groups endpoints by the given OpenAPI definitions. SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI true   - - maxDurationOfQueryEBPFProfilingData The maximum duration(in second) of query the eBPF profiling data from database. - 30   - - maxThreadCountOfQueryEBPFProfilingData The maximum thread count of query the eBPF profiling data from database. - System CPU core size   - - uiMenuRefreshInterval The period(in seconds) of refreshing the status of all UI menu items. - 20   - - serviceCacheRefreshInterval The period(in seconds) of refreshing the service cache. SW_SERVICE_CACHE_REFRESH_INTERVAL 10   cluster standalone - Standalone is not suitable for running on a single node running. No configuration available. - -   - zookeeper namespace The namespace, represented by root path, isolates the configurations in Zookeeper. SW_NAMESPACE /, root path   - - hostPort Hosts and ports of Zookeeper Cluster. SW_CLUSTER_ZK_HOST_PORT localhost:2181   - - baseSleepTimeMs The period of Zookeeper client between two retries (in milliseconds). SW_CLUSTER_ZK_SLEEP_TIME 1000   - - maxRetries The maximum retry time. SW_CLUSTER_ZK_MAX_RETRIES 3   - - enableACL Opens ACL using schema and expression. SW_ZK_ENABLE_ACL false   - - schema Schema for the authorization. SW_ZK_SCHEMA digest   - - expression Expression for the authorization. SW_ZK_EXPRESSION skywalking:skywalking   - - internalComHost The hostname registered in Zookeeper for the internal communication of OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Zookeeper for the internal communication of OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - kubernetes namespace Namespace deployed by SkyWalking in k8s. SW_CLUSTER_K8S_NAMESPACE default   - - labelSelector Labels used for filtering OAP deployment in k8s. SW_CLUSTER_K8S_LABEL app=collector,release=skywalking   - - uidEnvName Environment variable name for reading uid. SW_CLUSTER_K8S_UID SKYWALKING_COLLECTOR_UID   - consul serviceName Service name for SkyWalking cluster. SW_SERVICE_NAME SkyWalking_OAP_Cluster   - - hostPort Hosts and ports for Consul cluster. SW_CLUSTER_CONSUL_HOST_PORT localhost:8500   - - aclToken ACL Token of Consul. Empty string means without ALC token. SW_CLUSTER_CONSUL_ACLTOKEN -   - - internalComHost The hostname registered in Consul for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Consul for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - etcd serviceName Service name for SkyWalking cluster. SW_CLUSTER_ETCD_SERVICE_NAME SkyWalking_OAP_Cluster   - - endpoints Hosts and ports for etcd cluster. SW_CLUSTER_ETCD_ENDPOINTS localhost:2379   - - namespace Namespace for SkyWalking cluster. SW_CLUSTER_ETCD_NAMESPACE /skywalking   - - authentication Indicates whether there is authentication. SW_CLUSTER_ETCD_AUTHENTICATION false   - - user Etcd auth username. SW_CLUSTER_ETCD_USER    - - password Etcd auth password. SW_CLUSTER_ETCD_PASSWORD    - - internalComHost The hostname registered in etcd for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in etcd for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - Nacos serviceName Service name for SkyWalking cluster. SW_SERVICE_NAME SkyWalking_OAP_Cluster   - - hostPort Hosts and ports for Nacos cluster. SW_CLUSTER_NACOS_HOST_PORT localhost:8848   - - namespace Namespace used by SkyWalking node coordination. SW_CLUSTER_NACOS_NAMESPACE public   - - internalComHost The hostname registered in Nacos for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Nacos for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - - username Nacos Auth username. SW_CLUSTER_NACOS_USERNAME -   - - password Nacos Auth password. SW_CLUSTER_NACOS_PASSWORD -   - - accessKey Nacos Auth accessKey. SW_CLUSTER_NACOS_ACCESSKEY -   - - secretKey Nacos Auth secretKey. SW_CLUSTER_NACOS_SECRETKEY -   - - syncPeriodHttpUriRecognitionPattern The period of HTTP URI recognition pattern synchronization (in seconds). SW_CORE_SYNC_PERIOD_HTTP_URI_RECOGNITION_PATTERN 10   - - trainingPeriodHttpUriRecognitionPattern The period of HTTP URI recognition pattern training (in seconds). SW_CORE_TRAINING_PERIOD_HTTP_URI_RECOGNITION_PATTERN 60   - - maxHttpUrisNumberPerService The maximum number of HTTP URIs per service. SW_MAX_HTTP_URIS_NUMBER_PER_SERVICE 3000   storage elasticsearch - ElasticSearch (and OpenSearch) storage implementation. - -   - - namespace Prefix of indexes created and used by SkyWalking. SW_NAMESPACE -   - - clusterNodes ElasticSearch cluster nodes for client connection. SW_STORAGE_ES_CLUSTER_NODES localhost   - - protocol HTTP or HTTPs. SW_STORAGE_ES_HTTP_PROTOCOL HTTP   - - connectTimeout Connect timeout of ElasticSearch client (in milliseconds). SW_STORAGE_ES_CONNECT_TIMEOUT 3000   - - socketTimeout Socket timeout of ElasticSearch client (in milliseconds). SW_STORAGE_ES_SOCKET_TIMEOUT 30000   - - responseTimeout Response timeout of ElasticSearch client (in milliseconds), 0 disables the timeout. SW_STORAGE_ES_RESPONSE_TIMEOUT 1500   - - numHttpClientThread The number of threads for the underlying HTTP client to perform socket I/O. If the value is \u0026lt;= 0, the number of available processors will be used. SW_STORAGE_ES_NUM_HTTP_CLIENT_THREAD 0   - - user Username of ElasticSearch cluster. SW_ES_USER -   - - password Password of ElasticSearch cluster. SW_ES_PASSWORD -   - - trustStorePath Trust JKS file path. Only works when username and password are enabled. SW_STORAGE_ES_SSL_JKS_PATH -   - - trustStorePass Trust JKS file password. Only works when username and password are enabled. SW_STORAGE_ES_SSL_JKS_PASS -   - - secretsManagementFile Secrets management file in the properties format, including username and password, which are managed by a 3rd party tool. Capable of being updated them at runtime. SW_ES_SECRETS_MANAGEMENT_FILE -   - - dayStep Represents the number of days in the one-minute/hour/day index. SW_STORAGE_DAY_STEP 1   - - indexShardsNumber Shard number of new indexes. SW_STORAGE_ES_INDEX_SHARDS_NUMBER 1   - - indexReplicasNumber Replicas number of new indexes. SW_STORAGE_ES_INDEX_REPLICAS_NUMBER 0   - - specificIndexSettings Specify the settings for each index individually. If configured, this setting has the highest priority and overrides the generic settings. SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS -   - - superDatasetDayStep Represents the number of days in the super size dataset record index. Default value is the same as dayStep when the value is less than 0. SW_STORAGE_ES_SUPER_DATASET_DAY_STEP -1   - - superDatasetIndexShardsFactor Super dataset is defined in the code (e.g. trace segments). This factor provides more shards for the super dataset: shards number = indexShardsNumber * superDatasetIndexShardsFactor. This factor also affects Zipkin and Jaeger traces. SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR 5   - - superDatasetIndexReplicasNumber Represents the replicas number in the super size dataset record index. SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER 0   - - indexTemplateOrder The order of index template. SW_STORAGE_ES_INDEX_TEMPLATE_ORDER 0   - - bulkActions Async bulk size of the record data batch execution. SW_STORAGE_ES_BULK_ACTIONS 5000   - - batchOfBytes A threshold to control the max body size of ElasticSearch Bulk flush. SW_STORAGE_ES_BATCH_OF_BYTES 10485760 (10m)   - - flushInterval Period of flush (in seconds). Does not matter whether bulkActions is reached or not. SW_STORAGE_ES_FLUSH_INTERVAL 5   - - concurrentRequests The number of concurrent requests allowed to be executed. SW_STORAGE_ES_CONCURRENT_REQUESTS 2   - - resultWindowMaxSize The maximum size of dataset when the OAP loads cache, such as network aliases. SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE 10000   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_ES_QUERY_MAX_SIZE 10000   - - scrollingBatchSize The batch size of metadata per iteration when metadataQueryMaxSize or resultWindowMaxSize is too large to be retrieved in a single query. SW_STORAGE_ES_SCROLLING_BATCH_SIZE 5000   - - segmentQueryMaxSize The maximum size of trace segments per query. SW_STORAGE_ES_QUERY_SEGMENT_SIZE 200   - - profileTaskQueryMaxSize The maximum size of profile task per query. SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE 200   - - profileDataQueryScrollBatchSize The batch size of query profiling data. SW_STORAGE_ES_QUERY_PROFILE_DATA_BATCH_SIZE 100   - - advanced All settings of ElasticSearch index creation. The value should be in JSON format. SW_STORAGE_ES_ADVANCED -   - - logicSharding Shard metrics and records indices into multi-physical indices, one index template per metric/meter aggregation function or record. SW_STORAGE_ES_LOGIC_SHARDING false   - h2 - H2 storage is designed for demonstration and running in short term (i.e. 1-2 hours) only. - -   - - url H2 connection URL. Defaults to H2 memory mode. SW_STORAGE_H2_URL jdbc:h2:mem:skywalking-oap-db   - - user Username of H2 database. SW_STORAGE_H2_USER sa   - - password Password of H2 database. - -   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_H2_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 100   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 1   - mysql - MySQL Storage. The MySQL JDBC Driver is not in the dist. Please copy it into the oap-lib folder manually. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - postgresql - PostgreSQL storage. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - banyandb - BanyanDB storage. - -   - - targets Hosts with ports of the BanyanDB. SW_STORAGE_BANYANDB_TARGETS 127.0.0.1:17912   - - maxBulkSize The maximum size of write entities in a single batch write call. SW_STORAGE_BANYANDB_MAX_BULK_SIZE 5000   - - flushInterval Period of flush interval. In the timeunit of seconds. SW_STORAGE_BANYANDB_FLUSH_INTERVAL 15   - - metricsShardsNumber Shards Number for measure/metrics. SW_STORAGE_BANYANDB_METRICS_SHARDS_NUMBER 1   - - recordShardsNumber Shards Number for a normal record. SW_STORAGE_BANYANDB_RECORD_SHARDS_NUMBER 1   - - superDatasetShardsFactor Shards Factor for a super dataset record, i.e. Shard number of a super dataset is recordShardsNumber*superDatasetShardsFactor. SW_STORAGE_BANYANDB_SUPERDATASET_SHARDS_FACTOR 2   - - concurrentWriteThreads Concurrent consumer threads for batch writing. SW_STORAGE_BANYANDB_CONCURRENT_WRITE_THREADS 15   - - profileTaskQueryMaxSize Max size of ProfileTask to be fetched. SW_STORAGE_BANYANDB_PROFILE_TASK_QUERY_MAX_SIZE 200   agent-analyzer default Agent Analyzer. SW_AGENT_ANALYZER default    - - traceSamplingPolicySettingsFile The sampling policy including sampling rate and the threshold of trace segment latency can be configured by the traceSamplingPolicySettingsFile file. SW_TRACE_SAMPLING_POLICY_SETTINGS_FILE trace-sampling-policy-settings.yml   - - slowDBAccessThreshold The slow database access threshold (in milliseconds). SW_SLOW_DB_THRESHOLD default:200,mongodb:100   - - forceSampleErrorSegment When sampling mechanism is activated, this config samples the error status segment and ignores the sampling rate. SW_FORCE_SAMPLE_ERROR_SEGMENT true   - - segmentStatusAnalysisStrategy Determines the final segment status from span status. Available values are FROM_SPAN_STATUS , FROM_ENTRY_SPAN, and FROM_FIRST_SPAN. FROM_SPAN_STATUS indicates that the segment status would be error if any span has an error status. FROM_ENTRY_SPAN means that the segment status would only be determined by the status of entry spans. FROM_FIRST_SPAN means that the segment status would only be determined by the status of the first span. SW_SEGMENT_STATUS_ANALYSIS_STRATEGY FROM_SPAN_STATUS   - - noUpstreamRealAddressAgents Exit spans with the component in the list would not generate client-side instance relation metrics, since some tracing plugins (e.g. Nginx-LUA and Envoy) can\u0026rsquo;t collect the real peer IP address. SW_NO_UPSTREAM_REAL_ADDRESS 6000,9000   - - meterAnalyzerActiveFiles Indicates which files could be instrumented and analyzed. Multiple files are split by \u0026ldquo;,\u0026rdquo;. SW_METER_ANALYZER_ACTIVE_FILES    - - slowCacheWriteThreshold The threshold of slow command which is used for writing operation (in milliseconds). SW_SLOW_CACHE_WRITE_THRESHOLD default:20,redis:10   - - slowCacheReadThreshold The threshold of slow command which is used for reading (getting) operation (in milliseconds). SW_SLOW_CACHE_READ_THRESHOLD default:20,redis:10   receiver-sharing-server default Sharing server provides new gRPC and restful servers for data collection. Ana designates that servers in the core module are to be used for internal communication only. - -    - - restHost Binding IP of RESTful services. Services include GraphQL query and HTTP data report. SW_RECEIVER_SHARING_REST_HOST -   - - restPort Binding port of RESTful services. SW_RECEIVER_SHARING_REST_PORT -   - - restContextPath Web context path of RESTful services. SW_RECEIVER_SHARING_REST_CONTEXT_PATH -   - - restMaxThreads Maximum thread number of RESTful services. SW_RECEIVER_SHARING_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_RECEIVER_SHARING_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize ServerSocketChannel backlog of RESTful services. SW_RECEIVER_SHARING_REST_QUEUE_SIZE 0   - - httpMaxRequestHeaderSize Maximum request header size accepted. SW_RECEIVER_SHARING_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - gRPCHost Binding IP of gRPC services. Services include gRPC data report and internal communication among OAP nodes. SW_RECEIVER_GRPC_HOST 0.0.0.0. Not Activated   - - gRPCPort Binding port of gRPC services. SW_RECEIVER_GRPC_PORT Not Activated   - - gRPCThreadPoolSize Pool size of gRPC server. SW_RECEIVER_GRPC_THREAD_POOL_SIZE CPU core * 4   - - gRPCThreadPoolQueueSize Queue size of gRPC server. SW_RECEIVER_GRPC_POOL_QUEUE_SIZE 10000   - - gRPCSslEnabled Activates SSL for gRPC services. SW_RECEIVER_GRPC_SSL_ENABLED false   - - gRPCSslKeyPath File path of gRPC SSL key. SW_RECEIVER_GRPC_SSL_KEY_PATH -   - - gRPCSslCertChainPath File path of gRPC SSL cert chain. SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH -   - - maxConcurrentCallsPerConnection The maximum number of concurrent calls permitted for each incoming connection. Defaults to no limit. SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL -   - - authentication The token text for authentication. Works for gRPC connection only. Once this is set, the client is required to use the same token. SW_AUTHENTICATION -   log-analyzer default Log Analyzer. SW_LOG_ANALYZER default    - - lalFiles The LAL configuration file names (without file extension) to be activated. Read LAL for more details. SW_LOG_LAL_FILES default   - - malFiles The MAL configuration file names (without file extension) to be activated. Read LAL for more details. SW_LOG_MAL_FILES \u0026quot;\u0026quot;   event-analyzer default Event Analyzer. SW_EVENT_ANALYZER default    receiver-register default gRPC and HTTPRestful services that provide service, service instance and endpoint register. - -    receiver-trace default gRPC and HTTPRestful services that accept SkyWalking format traces. - -    receiver-jvm default gRPC services that accept JVM metrics data. - -    receiver-clr default gRPC services that accept .Net CLR metrics data. - -    receiver-profile default gRPC services that accept profile task status and snapshot reporter. - -    receiver-zabbix default TCP receiver accepts Zabbix format metrics. - -    - - port Exported TCP port. Zabbix agent could connect and transport data. SW_RECEIVER_ZABBIX_PORT 10051   - - host Binds to host. SW_RECEIVER_ZABBIX_HOST 0.0.0.0   - - activeFiles Enables config when agent request is received. SW_RECEIVER_ZABBIX_ACTIVE_FILES agent   service-mesh default gRPC services that accept data from inbound mesh probes. - -    envoy-metric default Envoy metrics_service and ALS(access log service) are supported by this receiver. The OAL script supports all GAUGE type metrics. - -    - - acceptMetricsService Starts Envoy Metrics Service analysis. SW_ENVOY_METRIC_SERVICE true   - - alsHTTPAnalysis Starts Envoy HTTP Access Log Service analysis. Value = k8s-mesh means starting the analysis. SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS -   - - alsTCPAnalysis Starts Envoy TCP Access Log Service analysis. Value = k8s-mesh means starting the analysis. SW_ENVOY_METRIC_ALS_TCP_ANALYSIS -   - - k8sServiceNameRule k8sServiceNameRule allows you to customize the service name in ALS via Kubernetes metadata. The available variables are pod and service. E.g. you can use ${service.metadata.name}-${pod.metadata.labels.version} to append the version number to the service name. Note that when using environment variables to pass this configuration, use single quotes('') to avoid being evaluated by the shell. K8S_SERVICE_NAME_RULE ${pod.metadata.labels.(service.istio.io/canonical-name)}   - - istioServiceNameRule istioServiceNameRule allows you to customize the service name in ALS via Kubernetes metadata. The available variables are serviceEntry. E.g. you can use ${serviceEntry.metadata.name}-${serviceEntry.metadata.labels.version} to append the version number to the service name. Note that when using environment variables to pass this configuration, use single quotes('') to avoid being evaluated by the shell. ISTIO_SERVICE_NAME_RULE ${serviceEntry.metadata.name}   receiver-otel default A receiver for analyzing metrics data from OpenTelemetry. - -    - - enabledHandlers Enabled handlers for otel. SW_OTEL_RECEIVER_ENABLED_HANDLERS -   - - enabledOtelMetricsRules Enabled metric rules for OTLP handler. SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES -   receiver-zipkin default A receiver for Zipkin traces. - -    - - sampleRate The sample rate precision is 1/10000, should be between 0 and 10000 SW_ZIPKIN_SAMPLE_RATE 10000   - - searchableTracesTags Defines a set of span tag keys which are searchable. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_ZIPKIN_SEARCHABLE_TAG_KEYS http.method   - - enableHttpCollector Enable Http Collector. SW_ZIPKIN_HTTP_COLLECTOR_ENABLED true   - - restHost Binding IP of RESTful services. SW_RECEIVER_ZIPKIN_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_RECEIVER_ZIPKIN_REST_PORT 9411   - - restContextPath Web context path of RESTful services. SW_RECEIVER_ZIPKIN_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_RECEIVER_ZIPKIN_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_RECEIVER_ZIPKIN_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_RECEIVER_ZIPKIN_REST_QUEUE_SIZE 0   - - enableKafkaCollector Enable Kafka Collector. SW_ZIPKIN_KAFKA_COLLECTOR_ENABLED false   - - kafkaBootstrapServers Kafka ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG. SW_ZIPKIN_KAFKA_SERVERS localhost:9092   - - kafkaGroupId Kafka ConsumerConfig.GROUP_ID_CONFIG. SW_ZIPKIN_KAFKA_GROUP_ID zipkin   - - kafkaTopic Kafka Topics. SW_ZIPKIN_KAFKA_TOPIC zipkin   - - kafkaConsumerConfig Kafka consumer config, JSON format as Properties. If it contains the same key with above, would override. SW_ZIPKIN_KAFKA_CONSUMER_CONFIG \u0026ldquo;{\u0026quot;auto.offset.reset\u0026quot;:\u0026quot;earliest\u0026quot;,\u0026quot;enable.auto.commit\u0026quot;:true}\u0026rdquo;   - - kafkaConsumers The number of consumers to create. SW_ZIPKIN_KAFKA_CONSUMERS 1   - - kafkaHandlerThreadPoolSize Pool size of Kafka message handler executor. SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_SIZE CPU core * 2   - - kafkaHandlerThreadPoolQueueSize Queue size of Kafka message handler executor. SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE 10000   kafka-fetcher default Read SkyWalking\u0026rsquo;s native metrics/logs/traces through Kafka server. - -    - - bootstrapServers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_KAFKA_FETCHER_SERVERS localhost:9092   - - namespace Namespace aims to isolate multi OAP cluster when using the same Kafka cluster. If you set a namespace for Kafka fetcher, OAP will add a prefix to topic name. You should also set namespace in agent.config. The property is named plugin.kafka.namespace. SW_NAMESPACE -   - - groupId A unique string that identifies the consumer group to which this consumer belongs. - skywalking-consumer   - - createTopicIfNotExist If true, this creates Kafka topic (if it does not already exist). - true   - - partitions The number of partitions for the topic being created. SW_KAFKA_FETCHER_PARTITIONS 3   - - consumers The number of consumers to create. SW_KAFKA_FETCHER_CONSUMERS 1   - - enableNativeProtoLog Enables fetching and handling native proto log data. SW_KAFKA_FETCHER_ENABLE_NATIVE_PROTO_LOG true   - - enableNativeJsonLog Enables fetching and handling native json log data. SW_KAFKA_FETCHER_ENABLE_NATIVE_JSON_LOG true   - - replicationFactor The replication factor for each partition in the topic being created. SW_KAFKA_FETCHER_PARTITIONS_FACTOR 2   - - kafkaHandlerThreadPoolSize Pool size of Kafka message handler executor. SW_KAFKA_HANDLER_THREAD_POOL_SIZE CPU core * 2   - - kafkaHandlerThreadPoolQueueSize Queue size of Kafka message handler executor. SW_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE 10000   - - topicNameOfMeters Kafka topic name for meter system data. - skywalking-meters   - - topicNameOfMetrics Kafka topic name for JVM metrics data. - skywalking-metrics   - - topicNameOfProfiling Kafka topic name for profiling data. - skywalking-profilings   - - topicNameOfTracingSegments Kafka topic name for tracing data. - skywalking-segments   - - topicNameOfManagements Kafka topic name for service instance reporting and registration. - skywalking-managements   - - topicNameOfLogs Kafka topic name for native proto log data. - skywalking-logs   - - topicNameOfJsonLogs Kafka topic name for native json log data. - skywalking-logs-json   receiver-browser default gRPC services that accept browser performance data and error log. - - -   - - sampleRate Sampling rate for receiving trace. Precise to 1/10000. 10000 means sampling rate of 100% by default. SW_RECEIVER_BROWSER_SAMPLE_RATE 10000   query graphql - GraphQL query implementation. -    - - enableLogTestTool Enable the log testing API to test the LAL. NOTE: This API evaluates untrusted code on the OAP server. A malicious script can do significant damage (steal keys and secrets, remove files and directories, install malware, etc). As such, please enable this API only when you completely trust your users. SW_QUERY_GRAPHQL_ENABLE_LOG_TEST_TOOL false   - - maxQueryComplexity Maximum complexity allowed for the GraphQL query that can be used to abort a query if the total number of data fields queried exceeds the defined threshold. SW_QUERY_MAX_QUERY_COMPLEXITY 3000   - - enableUpdateUITemplate Allow user add,disable and update UI template. SW_ENABLE_UPDATE_UI_TEMPLATE false   - - enableOnDemandPodLog Ondemand Pod log: fetch the Pod logs on users' demand, the logs are fetched and displayed in real time, and are not persisted in any kind. This is helpful when users want to do some experiments and monitor the logs and see what\u0026rsquo;s happing inside the service. Note: if you print secrets in the logs, they are also visible to the UI, so for the sake of security, this feature is disabled by default, please set this configuration to enable the feature manually. SW_ENABLE_ON_DEMAND_POD_LOG false   query-zipkin default - This module is for Zipkin query API and support zipkin-lens UI -    - - restHost Binding IP of RESTful services. SW_QUERY_ZIPKIN_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_QUERY_ZIPKIN_REST_PORT 9412   - - restContextPath Web context path of RESTful services. SW_QUERY_ZIPKIN_REST_CONTEXT_PATH zipkin   - - restMaxThreads Maximum thread number of RESTful services. SW_QUERY_ZIPKIN_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_QUERY_ZIPKIN_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_QUERY_ZIPKIN_REST_QUEUE_SIZE 0   - - lookback Default look back for traces and autocompleteTags, 1 day in millis SW_QUERY_ZIPKIN_LOOKBACK 86400000   - - namesMaxAge The Cache-Control max-age (seconds) for serviceNames, remoteServiceNames and spanNames SW_QUERY_ZIPKIN_NAMES_MAX_AGE 300   - - uiQueryLimit Default traces query max size SW_QUERY_ZIPKIN_UI_QUERY_LIMIT 10   - - uiDefaultLookback Default look back on the UI for search traces, 15 minutes in millis SW_QUERY_ZIPKIN_UI_DEFAULT_LOOKBACK 900000   promql default - This module is for PromQL API. -    - - restHost Binding IP of RESTful services. SW_PROMQL_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_PROMQL_REST_PORT 9090   - - restContextPath Web context path of RESTful services. SW_PROMQL_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_PROMQL_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_PROMQL_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_PROMQL_REST_QUEUE_SIZE 0   alarm default - Read alarm doc for more details. -    telemetry - - Read telemetry doc for more details. -    - none - No op implementation. -    - prometheus host Binding host for Prometheus server fetching data. SW_TELEMETRY_PROMETHEUS_HOST 0.0.0.0   - - port Binding port for Prometheus server fetching data. SW_TELEMETRY_PROMETHEUS_PORT 1234   configuration - - Read dynamic configuration doc for more details. -    - grpc host DCS server binding hostname. SW_DCS_SERVER_HOST -   - - port DCS server binding port. SW_DCS_SERVER_PORT 80   - - clusterName Cluster name when reading the latest configuration from DSC server. SW_DCS_CLUSTER_NAME SkyWalking   - - period The period of reading data from DSC server by the OAP (in seconds). SW_DCS_PERIOD 20   - apollo apolloMeta apollo.meta in Apollo. SW_CONFIG_APOLLO http://localhost:8080   - - apolloCluster apollo.cluster in Apollo. SW_CONFIG_APOLLO_CLUSTER default   - - apolloEnv env in Apollo. SW_CONFIG_APOLLO_ENV -   - - appId app.id in Apollo. SW_CONFIG_APOLLO_APP_ID skywalking   - zookeeper namespace The namespace (represented by root path) that isolates the configurations in the Zookeeper. SW_CONFIG_ZK_NAMESPACE /, root path   - - hostPort Hosts and ports of Zookeeper Cluster. SW_CONFIG_ZK_HOST_PORT localhost:2181   - - baseSleepTimeMs The period of Zookeeper client between two retries (in milliseconds). SW_CONFIG_ZK_BASE_SLEEP_TIME_MS 1000   - - maxRetries The maximum retry time. SW_CONFIG_ZK_MAX_RETRIES 3   - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - etcd endpoints Hosts and ports for etcd cluster (separated by commas if multiple). SW_CONFIG_ETCD_ENDPOINTS http://localhost:2379   - - namespace Namespace for SkyWalking cluster. SW_CONFIG_ETCD_NAMESPACE /skywalking   - - authentication Indicates whether there is authentication. SW_CONFIG_ETCD_AUTHENTICATION false   - - user Etcd auth username. SW_CONFIG_ETCD_USER    - - password Etcd auth password. SW_CONFIG_ETCD_PASSWORD    - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - consul hostPort Hosts and ports for Consul cluster. SW_CONFIG_CONSUL_HOST_AND_PORTS localhost:8500   - - aclToken ACL Token of Consul. Empty string means without ACL token. SW_CONFIG_CONSUL_ACL_TOKEN -   - - period The period of data sync (in seconds). SW_CONFIG_CONSUL_PERIOD 60   - k8s-configmap namespace Deployment namespace of the config map. SW_CLUSTER_K8S_NAMESPACE default   - - labelSelector Labels for locating configmap. SW_CLUSTER_K8S_LABEL app=collector,release=skywalking   - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - nacos serverAddr Nacos Server Host. SW_CONFIG_NACOS_SERVER_ADDR 127.0.0.1   - - port Nacos Server Port. SW_CONFIG_NACOS_SERVER_PORT 8848   - - group Nacos Configuration namespace. SW_CONFIG_NACOS_SERVER_NAMESPACE -   - - period The period of data sync (in seconds). SW_CONFIG_CONFIG_NACOS_PERIOD 60   - - username Nacos Auth username. SW_CONFIG_NACOS_USERNAME -   - - password Nacos Auth password. SW_CONFIG_NACOS_PASSWORD -   - - accessKey Nacos Auth accessKey. SW_CONFIG_NACOS_ACCESSKEY -   - - secretKey Nacos Auth secretKey. SW_CONFIG_NACOS_SECRETKEY -   exporter default enableGRPCMetrics Enable gRPC metrics exporter. SW_EXPORTER_ENABLE_GRPC_METRICS false   - - gRPCTargetHost The host of target gRPC server for receiving export data SW_EXPORTER_GRPC_HOST 127.0.0.1   - - gRPCTargetPort The port of target gRPC server for receiving export data. SW_EXPORTER_GRPC_PORT 9870   - - enableKafkaTrace Enable Kafka trace exporter. SW_EXPORTER_ENABLE_KAFKA_TRACE false   - - enableKafkaLog Enable Kafka log exporter. SW_EXPORTER_ENABLE_KAFKA_LOG false   - - kafkaBootstrapServers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_EXPORTER_KAFKA_SERVERS localhost:9092   - - kafkaProducerConfig Kafka producer config, JSON format as Properties. SW_EXPORTER_KAFKA_PRODUCER_CONFIG -   - - kafkaTopicTrace Kafka topic name for trace. SW_EXPORTER_KAFKA_TOPIC_TRACE skywalking-export-trace   - - kafkaTopicLog Kafka topic name for log. SW_EXPORTER_KAFKA_TOPIC_LOG skywalking-export-log   - - exportErrorStatusTraceOnly Export error status trace segments through the Kafka channel. SW_EXPORTER_KAFKA_TRACE_FILTER_ERROR false   health-checker default checkIntervalSeconds The period of checking OAP internal health status (in seconds). SW_HEALTH_CHECKER_INTERVAL_SECONDS 5   debugging-query default       - - keywords4MaskingSecretsOfConfig Include the list of keywords to filter configurations including secrets. Separate keywords by a comma. SW_DEBUGGING_QUERY_KEYWORDS_FOR_MASKING_SECRETS user,password,token,accessKey,secretKey,authentication   configuration-discovery default disableMessageDigest If true, agent receives the latest configuration every time, even without making any changes. By default, OAP uses the SHA512 message digest mechanism to detect changes in configuration. SW_DISABLE_MESSAGE_DIGEST false   receiver-event default gRPC services that handle events data. - -    aws-firehose-receiver default host Binding IP of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_HOST 0.0.0.0   - - port Binding port of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_PORT 12801   - - contextPath Context path of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_CONTEXT_PATH /   - - maxThreads Max Thtread number of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_MAX_THREADS 200   - - idleTimeOut Idle timeout of a connection for keep-alive. SW_RECEIVER_AWS_FIREHOSE_HTTP_IDLE_TIME_OUT 30000   - - acceptQueueSize Maximum allowed number of open connections SW_RECEIVER_AWS_FIREHOSE_HTTP_ACCEPT_QUEUE_SIZE 0   - - maxRequestHeaderSize Maximum length of all headers in an HTTP/1 response SW_RECEIVER_AWS_FIREHOSE_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - firehoseAccessKey The AccessKey of AWS firhose SW_RECEIVER_AWS_FIREHOSE_ACCESS_KEY    - - enableTLS Indicate if enable HTTPS for the server SW_RECEIVER_AWS_FIREHOSE_HTTP_ENABLE_TLS false   - - tlsKeyPath TLS key path SW_RECEIVER_AWS_FIREHOSE_HTTP_TLS_KEY_PATH    - - tlsCertChainPath TLS certificate chain path SW_RECEIVER_AWS_FIREHOSE_HTTP_TLS_CERT_CHAIN_PATH    ai-pipeline default       - - uriRecognitionServerAddr The address of the URI recognition server. SW_AI_PIPELINE_URI_RECOGNITION_SERVER_ADDR -   - - uriRecognitionServerPort The port of the URI recognition server. SW_AI_PIPELINE_URI_RECOGNITION_SERVER_PORT 17128    Note ¹ System Environment Variable name could be declared and changed in application.yml. The names listed here are simply provided in the default application.yml file.\n","excerpt":"Configuration Vocabulary The Configuration Vocabulary lists all available configurations provided by …","ref":"/docs/main/v9.7.0/en/setup/backend/configuration-vocabulary/","title":"Configuration Vocabulary"},{"body":"Context injection If you want to fetch the SkyWalking Context in your PHP code, which is super helpful for debugging and observability, You can enable the configuration item skywalking_agent.inject_context.\nDescription skywalking_agent.inject_context\nWhether to enable automatic injection of skywalking context variables (such as SW_TRACE_ID). For php-fpm mode, it will be injected into the $_SERVER variable. For swoole mode, it will be injected into the $request-\u0026gt;server variable.\nConfiguration [skywalking_agent] extension = skywalking_agent.so skywalking_agent.inject_context = On Usage For php-fpm mode:\n\u0026lt;?php echo $_SERVER[\u0026#34;SW_SERVICE_NAME\u0026#34;]; // get service name echo $_SERVER[\u0026#34;SW_INSTANCE_NAME\u0026#34;]; // get instance name echo $_SERVER[\u0026#34;SW_TRACE_ID\u0026#34;]; // get trace id For swoole mode:\n\u0026lt;?php $http = new Swoole\\Http\\Server(\u0026#39;127.0.0.1\u0026#39;, 9501); $http-\u0026gt;on(\u0026#39;request\u0026#39;, function ($request, $response) { echo $request-\u0026gt;server[\u0026#34;SW_SERVICE_NAME\u0026#34;]; // get service name  echo $request-\u0026gt;server[\u0026#34;SW_INSTANCE_NAME\u0026#34;]; // get instance name  echo $request-\u0026gt;server[\u0026#34;SW_TRACE_ID\u0026#34;]; // get trace id }); ","excerpt":"Context injection If you want to fetch the SkyWalking Context in your PHP code, which is super …","ref":"/docs/skywalking-php/next/en/configuration/context-injection/","title":"Context injection"},{"body":"Continuous Profiling Continuous profiling utilizes eBPF, process monitoring, and other technologies to collect data. When the configured threshold is met, it would automatically start profiling tasks. Corresponds to Continuous Profiling in the concepts and designs. This approach helps identify performance bottlenecks and potential issues in a proactive manner, allowing users to optimize their applications and systems more effectively.\nActive in the OAP Continuous profiling uses the same protocol service as eBPF Profiling, so you only need to ensure that the eBPF Profiling receiver is running.\nreceiver-ebpf:selector:${SW_RECEIVER_EBPF:default}default:Configuration of Continuous Profiling Policy Continuous profiling can be configured on a service entity, with the following fields in the configuration:\n Service: The service entity for which you want to monitor the processes. Targets: Configuration conditions.  Target Type: Target profiling type, currently supporting On CPU Profiling, Off CPU Profiling, and Network Profiling. Check Items: Detection conditions, only one of the multiple condition rules needs to be met to start the task.  Type: Monitoring type, currently supporting \u0026ldquo;System Load\u0026rdquo;, \u0026ldquo;Process CPU\u0026rdquo;, \u0026ldquo;Process Thread Count\u0026rdquo;, \u0026ldquo;HTTP Error Rate\u0026rdquo;, \u0026ldquo;HTTP Avg Response Time\u0026rdquo;. Threshold: Check if the monitoring value meets the specified expectations. Period: The time period(seconds) for monitoring data, which can also be understood as the most recent duration. Count: The number of times(seconds) the threshold is triggered within the detection period, which can also be understood as the total number of times the specified threshold rule is triggered in the most recent duration(seconds). Once the count check is met, the specified Profiling task will be started. URI: For HTTP-related monitoring types, used to filter specific URIs.      Monitoring After saving the configuration, the eBPF agent can perform monitoring operations on the processes under the specified service based on the service-level configuration.\nMetrics While performing monitoring, the eBPF agent would report the monitoring data to OAP for storage, making it more convenient to understand the real-time monitoring status. The main metrics include:\n   Monitor Type Unit Description     System Load Load System load average over a specified period.   Process CPU Percentage The CPU usage of the process as a percentage.   Process Thread Count Count The number of threads in the process.   HTTP Error Rate Percentage The percentage of HTTP requests that result in error responses (e.g., 4xx or 5xx status codes).   HTTP Avg Response Time Millisecond The average response time for HTTP requests.    Threshold With Trigger In the eBPF agent, data is collected periodically, and the sliding time window technique is used to store the data from the most recent Period cycles. The Threshold rule is used to verify whether the data within each cycle meets the specified criteria. If the number of times the conditions are met within the sliding time window exceeds the Count value, the corresponding Profiling task would be triggered.\nThe sliding time window technique ensures that the most recent and relevant data is considered when evaluating the conditions. This approach allows for a more accurate and dynamic assessment of the system\u0026rsquo;s performance, making it possible to identify and respond to issues in a timely manner. By triggering Profiling tasks when specific conditions are met, the system can automatically initiate performance analysis and help uncover potential bottlenecks or areas for improvement.\nCauses When the eBPF agent reports a Profiling task, it also reports the reason for triggering the Profiling task, which mainly includes the following information:\n Process: The specific process that triggered the policy. Monitor Type: The type of monitoring that was triggered. Threshold: The configured threshold value. Current: The monitoring value at the time the rule was triggered.  Silence Period Upon triggering a continuous profiling task, the eBPF agent supports a feature that prevents re-triggering tasks within a specified period. This feature is designed to prevent an unlimited number of profiling tasks from being initiated if the process continuously reaches the threshold, which could potentially cause system issues.\n","excerpt":"Continuous Profiling Continuous profiling utilizes eBPF, process monitoring, and other technologies …","ref":"/docs/main/latest/en/setup/backend/backend-continuous-profiling/","title":"Continuous Profiling"},{"body":"Continuous Profiling Continuous profiling utilizes eBPF, process monitoring, and other technologies to collect data. When the configured threshold is met, it would automatically start profiling tasks. Corresponds to Continuous Profiling in the concepts and designs. This approach helps identify performance bottlenecks and potential issues in a proactive manner, allowing users to optimize their applications and systems more effectively.\nActive in the OAP Continuous profiling uses the same protocol service as eBPF Profiling, so you only need to ensure that the eBPF Profiling receiver is running.\nreceiver-ebpf:selector:${SW_RECEIVER_EBPF:default}default:Configuration of Continuous Profiling Policy Continuous profiling can be configured on a service entity, with the following fields in the configuration:\n Service: The service entity for which you want to monitor the processes. Targets: Configuration conditions.  Target Type: Target profiling type, currently supporting On CPU Profiling, Off CPU Profiling, and Network Profiling. Check Items: Detection conditions, only one of the multiple condition rules needs to be met to start the task.  Type: Monitoring type, currently supporting \u0026ldquo;System Load\u0026rdquo;, \u0026ldquo;Process CPU\u0026rdquo;, \u0026ldquo;Process Thread Count\u0026rdquo;, \u0026ldquo;HTTP Error Rate\u0026rdquo;, \u0026ldquo;HTTP Avg Response Time\u0026rdquo;. Threshold: Check if the monitoring value meets the specified expectations. Period: The time period(seconds) for monitoring data, which can also be understood as the most recent duration. Count: The number of times(seconds) the threshold is triggered within the detection period, which can also be understood as the total number of times the specified threshold rule is triggered in the most recent duration(seconds). Once the count check is met, the specified Profiling task will be started. URI: For HTTP-related monitoring types, used to filter specific URIs.      Monitoring After saving the configuration, the eBPF agent can perform monitoring operations on the processes under the specified service based on the service-level configuration.\nMetrics While performing monitoring, the eBPF agent would report the monitoring data to OAP for storage, making it more convenient to understand the real-time monitoring status. The main metrics include:\n   Monitor Type Unit Description     System Load Load System load average over a specified period.   Process CPU Percentage The CPU usage of the process as a percentage.   Process Thread Count Count The number of threads in the process.   HTTP Error Rate Percentage The percentage of HTTP requests that result in error responses (e.g., 4xx or 5xx status codes).   HTTP Avg Response Time Millisecond The average response time for HTTP requests.    Threshold With Trigger In the eBPF agent, data is collected periodically, and the sliding time window technique is used to store the data from the most recent Period cycles. The Threshold rule is used to verify whether the data within each cycle meets the specified criteria. If the number of times the conditions are met within the sliding time window exceeds the Count value, the corresponding Profiling task would be triggered.\nThe sliding time window technique ensures that the most recent and relevant data is considered when evaluating the conditions. This approach allows for a more accurate and dynamic assessment of the system\u0026rsquo;s performance, making it possible to identify and respond to issues in a timely manner. By triggering Profiling tasks when specific conditions are met, the system can automatically initiate performance analysis and help uncover potential bottlenecks or areas for improvement.\nCauses When the eBPF agent reports a Profiling task, it also reports the reason for triggering the Profiling task, which mainly includes the following information:\n Process: The specific process that triggered the policy. Monitor Type: The type of monitoring that was triggered. Threshold: The configured threshold value. Current: The monitoring value at the time the rule was triggered.  Silence Period Upon triggering a continuous profiling task, the eBPF agent supports a feature that prevents re-triggering tasks within a specified period. This feature is designed to prevent an unlimited number of profiling tasks from being initiated if the process continuously reaches the threshold, which could potentially cause system issues.\n","excerpt":"Continuous Profiling Continuous profiling utilizes eBPF, process monitoring, and other technologies …","ref":"/docs/main/next/en/setup/backend/backend-continuous-profiling/","title":"Continuous Profiling"},{"body":"Continuous Profiling Continuous profiling utilizes eBPF, process monitoring, and other technologies to collect data. When the configured threshold is met, it would automatically start profiling tasks. Corresponds to Continuous Profiling in the concepts and designs. This approach helps identify performance bottlenecks and potential issues in a proactive manner, allowing users to optimize their applications and systems more effectively.\nActive in the OAP Continuous profiling uses the same protocol service as eBPF Profiling, so you only need to ensure that the eBPF Profiling receiver is running.\nreceiver-ebpf:selector:${SW_RECEIVER_EBPF:default}default:Configuration of Continuous Profiling Policy Continuous profiling can be configured on a service entity, with the following fields in the configuration:\n Service: The service entity for which you want to monitor the processes. Targets: Configuration conditions.  Target Type: Target profiling type, currently supporting On CPU Profiling, Off CPU Profiling, and Network Profiling. Check Items: Detection conditions, only one of the multiple condition rules needs to be met to start the task.  Type: Monitoring type, currently supporting \u0026ldquo;System Load\u0026rdquo;, \u0026ldquo;Process CPU\u0026rdquo;, \u0026ldquo;Process Thread Count\u0026rdquo;, \u0026ldquo;HTTP Error Rate\u0026rdquo;, \u0026ldquo;HTTP Avg Response Time\u0026rdquo;. Threshold: Check if the monitoring value meets the specified expectations. Period: The time period for monitoring data, which can also be understood as the most recent duration. Count: The number of times the threshold is triggered within the detection period, which can also be understood as the total number of times the specified threshold rule is triggered in the most recent duration. Once the count check is met, the specified Profiling task will be started. URI: For HTTP-related monitoring types, used to filter specific URIs.      Monitoring After saving the configuration, the eBPF agent can perform monitoring operations on the processes under the specified service based on the service-level configuration.\nMetrics While performing monitoring, the eBPF agent would report the monitoring data to OAP for storage, making it more convenient to understand the real-time monitoring status. The main metrics include:\n   Monitor Type Unit Description     System Load Load System load average over a specified period.   Process CPU Percentage The CPU usage of the process as a percentage.   Process Thread Count Count The number of threads in the process.   HTTP Error Rate Percentage The percentage of HTTP requests that result in error responses (e.g., 4xx or 5xx status codes).   HTTP Avg Response Time Millisecond The average response time for HTTP requests.    Threshold With Trigger In the eBPF agent, data is collected periodically, and the sliding time window technique is used to store the data from the most recent Period cycles. The Threshold rule is used to verify whether the data within each cycle meets the specified criteria. If the number of times the conditions are met within the sliding time window exceeds the Count value, the corresponding Profiling task would be triggered.\nThe sliding time window technique ensures that the most recent and relevant data is considered when evaluating the conditions. This approach allows for a more accurate and dynamic assessment of the system\u0026rsquo;s performance, making it possible to identify and respond to issues in a timely manner. By triggering Profiling tasks when specific conditions are met, the system can automatically initiate performance analysis and help uncover potential bottlenecks or areas for improvement.\nCauses When the eBPF agent reports a Profiling task, it also reports the reason for triggering the Profiling task, which mainly includes the following information:\n Process: The specific process that triggered the policy. Monitor Type: The type of monitoring that was triggered. Threshold: The configured threshold value. Current: The monitoring value at the time the rule was triggered.  Silence Period Upon triggering a continuous profiling task, the eBPF agent supports a feature that prevents re-triggering tasks within a specified period. This feature is designed to prevent an unlimited number of profiling tasks from being initiated if the process continuously reaches the threshold, which could potentially cause system issues.\n","excerpt":"Continuous Profiling Continuous profiling utilizes eBPF, process monitoring, and other technologies …","ref":"/docs/main/v9.5.0/en/setup/backend/backend-continuous-profiling/","title":"Continuous Profiling"},{"body":"Continuous Profiling Continuous profiling utilizes eBPF, process monitoring, and other technologies to collect data. When the configured threshold is met, it would automatically start profiling tasks. Corresponds to Continuous Profiling in the concepts and designs. This approach helps identify performance bottlenecks and potential issues in a proactive manner, allowing users to optimize their applications and systems more effectively.\nActive in the OAP Continuous profiling uses the same protocol service as eBPF Profiling, so you only need to ensure that the eBPF Profiling receiver is running.\nreceiver-ebpf:selector:${SW_RECEIVER_EBPF:default}default:Configuration of Continuous Profiling Policy Continuous profiling can be configured on a service entity, with the following fields in the configuration:\n Service: The service entity for which you want to monitor the processes. Targets: Configuration conditions.  Target Type: Target profiling type, currently supporting On CPU Profiling, Off CPU Profiling, and Network Profiling. Check Items: Detection conditions, only one of the multiple condition rules needs to be met to start the task.  Type: Monitoring type, currently supporting \u0026ldquo;System Load\u0026rdquo;, \u0026ldquo;Process CPU\u0026rdquo;, \u0026ldquo;Process Thread Count\u0026rdquo;, \u0026ldquo;HTTP Error Rate\u0026rdquo;, \u0026ldquo;HTTP Avg Response Time\u0026rdquo;. Threshold: Check if the monitoring value meets the specified expectations. Period: The time period(seconds) for monitoring data, which can also be understood as the most recent duration. Count: The number of times(seconds) the threshold is triggered within the detection period, which can also be understood as the total number of times the specified threshold rule is triggered in the most recent duration(seconds). Once the count check is met, the specified Profiling task will be started. URI: For HTTP-related monitoring types, used to filter specific URIs.      Monitoring After saving the configuration, the eBPF agent can perform monitoring operations on the processes under the specified service based on the service-level configuration.\nMetrics While performing monitoring, the eBPF agent would report the monitoring data to OAP for storage, making it more convenient to understand the real-time monitoring status. The main metrics include:\n   Monitor Type Unit Description     System Load Load System load average over a specified period.   Process CPU Percentage The CPU usage of the process as a percentage.   Process Thread Count Count The number of threads in the process.   HTTP Error Rate Percentage The percentage of HTTP requests that result in error responses (e.g., 4xx or 5xx status codes).   HTTP Avg Response Time Millisecond The average response time for HTTP requests.    Threshold With Trigger In the eBPF agent, data is collected periodically, and the sliding time window technique is used to store the data from the most recent Period cycles. The Threshold rule is used to verify whether the data within each cycle meets the specified criteria. If the number of times the conditions are met within the sliding time window exceeds the Count value, the corresponding Profiling task would be triggered.\nThe sliding time window technique ensures that the most recent and relevant data is considered when evaluating the conditions. This approach allows for a more accurate and dynamic assessment of the system\u0026rsquo;s performance, making it possible to identify and respond to issues in a timely manner. By triggering Profiling tasks when specific conditions are met, the system can automatically initiate performance analysis and help uncover potential bottlenecks or areas for improvement.\nCauses When the eBPF agent reports a Profiling task, it also reports the reason for triggering the Profiling task, which mainly includes the following information:\n Process: The specific process that triggered the policy. Monitor Type: The type of monitoring that was triggered. Threshold: The configured threshold value. Current: The monitoring value at the time the rule was triggered.  Silence Period Upon triggering a continuous profiling task, the eBPF agent supports a feature that prevents re-triggering tasks within a specified period. This feature is designed to prevent an unlimited number of profiling tasks from being initiated if the process continuously reaches the threshold, which could potentially cause system issues.\n","excerpt":"Continuous Profiling Continuous profiling utilizes eBPF, process monitoring, and other technologies …","ref":"/docs/main/v9.6.0/en/setup/backend/backend-continuous-profiling/","title":"Continuous Profiling"},{"body":"Continuous Profiling Continuous profiling utilizes eBPF, process monitoring, and other technologies to collect data. When the configured threshold is met, it would automatically start profiling tasks. Corresponds to Continuous Profiling in the concepts and designs. This approach helps identify performance bottlenecks and potential issues in a proactive manner, allowing users to optimize their applications and systems more effectively.\nActive in the OAP Continuous profiling uses the same protocol service as eBPF Profiling, so you only need to ensure that the eBPF Profiling receiver is running.\nreceiver-ebpf:selector:${SW_RECEIVER_EBPF:default}default:Configuration of Continuous Profiling Policy Continuous profiling can be configured on a service entity, with the following fields in the configuration:\n Service: The service entity for which you want to monitor the processes. Targets: Configuration conditions.  Target Type: Target profiling type, currently supporting On CPU Profiling, Off CPU Profiling, and Network Profiling. Check Items: Detection conditions, only one of the multiple condition rules needs to be met to start the task.  Type: Monitoring type, currently supporting \u0026ldquo;System Load\u0026rdquo;, \u0026ldquo;Process CPU\u0026rdquo;, \u0026ldquo;Process Thread Count\u0026rdquo;, \u0026ldquo;HTTP Error Rate\u0026rdquo;, \u0026ldquo;HTTP Avg Response Time\u0026rdquo;. Threshold: Check if the monitoring value meets the specified expectations. Period: The time period(seconds) for monitoring data, which can also be understood as the most recent duration. Count: The number of times(seconds) the threshold is triggered within the detection period, which can also be understood as the total number of times the specified threshold rule is triggered in the most recent duration(seconds). Once the count check is met, the specified Profiling task will be started. URI: For HTTP-related monitoring types, used to filter specific URIs.      Monitoring After saving the configuration, the eBPF agent can perform monitoring operations on the processes under the specified service based on the service-level configuration.\nMetrics While performing monitoring, the eBPF agent would report the monitoring data to OAP for storage, making it more convenient to understand the real-time monitoring status. The main metrics include:\n   Monitor Type Unit Description     System Load Load System load average over a specified period.   Process CPU Percentage The CPU usage of the process as a percentage.   Process Thread Count Count The number of threads in the process.   HTTP Error Rate Percentage The percentage of HTTP requests that result in error responses (e.g., 4xx or 5xx status codes).   HTTP Avg Response Time Millisecond The average response time for HTTP requests.    Threshold With Trigger In the eBPF agent, data is collected periodically, and the sliding time window technique is used to store the data from the most recent Period cycles. The Threshold rule is used to verify whether the data within each cycle meets the specified criteria. If the number of times the conditions are met within the sliding time window exceeds the Count value, the corresponding Profiling task would be triggered.\nThe sliding time window technique ensures that the most recent and relevant data is considered when evaluating the conditions. This approach allows for a more accurate and dynamic assessment of the system\u0026rsquo;s performance, making it possible to identify and respond to issues in a timely manner. By triggering Profiling tasks when specific conditions are met, the system can automatically initiate performance analysis and help uncover potential bottlenecks or areas for improvement.\nCauses When the eBPF agent reports a Profiling task, it also reports the reason for triggering the Profiling task, which mainly includes the following information:\n Process: The specific process that triggered the policy. Monitor Type: The type of monitoring that was triggered. Threshold: The configured threshold value. Current: The monitoring value at the time the rule was triggered.  Silence Period Upon triggering a continuous profiling task, the eBPF agent supports a feature that prevents re-triggering tasks within a specified period. This feature is designed to prevent an unlimited number of profiling tasks from being initiated if the process continuously reaches the threshold, which could potentially cause system issues.\n","excerpt":"Continuous Profiling Continuous profiling utilizes eBPF, process monitoring, and other technologies …","ref":"/docs/main/v9.7.0/en/setup/backend/backend-continuous-profiling/","title":"Continuous Profiling"},{"body":"Contribution If you want to debug or develop SkyWalking Infra E2E, The following documentations would guide you.\n  Compiling\n Compiling Guidance    Release\n Release Guidance    ","excerpt":"Contribution If you want to debug or develop SkyWalking Infra E2E, The following documentations …","ref":"/docs/skywalking-infra-e2e/latest/en/contribution/readme/","title":"Contribution"},{"body":"Contribution If you want to debug or develop SkyWalking Infra E2E, The following documentations would guide you.\n  Compiling\n Compiling Guidance    Release\n Release Guidance    ","excerpt":"Contribution If you want to debug or develop SkyWalking Infra E2E, The following documentations …","ref":"/docs/skywalking-infra-e2e/next/en/contribution/readme/","title":"Contribution"},{"body":"Contribution If you want to debug or develop SkyWalking Infra E2E, The following documentations would guide you.\n  Compiling\n Compiling Guidance    Release\n Release Guidance    ","excerpt":"Contribution If you want to debug or develop SkyWalking Infra E2E, The following documentations …","ref":"/docs/skywalking-infra-e2e/v1.3.0/en/contribution/readme/","title":"Contribution"},{"body":" Contributor   Project Contributions Ranking  SkyWalking Showcase     kezhenxu94   109 1  wu-sheng   39 2  wankai123   34 3  mrproliu   33 4  Fine0830   6 5  JaredTan95   4 6  pg-yang   4 7  arugal   4 8  weixiang1862   3 9  dashanji   2 10  innerpeacez   2 11  yswdqz   2 12  peachisai   2 13  CodePrometheus   2 14  hanahmily   1 15  JohnDuncan5171   1 16  nisiyong   1 17  Superskyyy   1 18  azibhassan   1 19  chenxiaohu   1 20  jmjoy   1 21  sacloudy   1 22    SkyWalking Website     wu-sheng   405 1  Jtrust   133 2  kezhenxu94   83 3  mrproliu   50 4  hanahmily   33 5  rootsongjc   20 6  fgksgf   18 7  Superskyyy   18 8  jmjoy   16 9  JaredTan95   14 10  Fine0830   12 11  arugal   12 12  dmsolr   12 13  innerpeacez   11 14  BFergerson   11 15  zhaoyuguang   9 16  wankai123   9 17  dashanji   8 18  TinyAllen   7 19  weixiang1862   7 20  EvanLjp   5 21  peng-yongsheng   5 22  heyanlong   5 23  Humbertzhang   4 24  yswdqz   4 25  yanmaipian   4 26  lujiajing1126   4 27  FingerLeader   3 28  gxthrj   3 29  Ax1an   3 30  YunaiV   2 31  LIU-WEI-git   2 32  langyan1022   2 33  pg-yang   2 34  libinglong   2 35  alonelaval   2 36  nisiyong   2 37  x22x22   2 38  HHoflittlefish777   2 39  CzyerChen   2 40  cheenursn   2 41  thebouv   2 42  Alipebt   2 43  PGDream   1 44  liuhaoyang   1 45  LiteSun   1 46  liqiangz   1 47  geomonlin   1 48  lijing-21   1 49  leimeng-ma   1 50  klboke   1 51  kehuili   1 52  JoeCqupt   1 53  jjlu521016   1 54  jacentsao   1 55  hutaishi   1 56  hailin0   1 57  fushiqinghuan111   1 58  chopin-d   1 59  apmplus   1 60  jxnu-liguobin   1 61  zhang98722   1 62  yimeng   1 63  xu1009   1 64  xiongshiyan   1 65  xdRight   1 66   bing**   1 67  weiqiang333   1 68  vcjmhg   1 69  tristan-tsl   1 70  tisonkun   1 71  tevahp   1 72  sebbASF   1 73  FeynmanZhou   1 74  peachisai   1 75  nic-chen   1 76  lucperkins   1 77  lilien1010   1 78  Dylan-beicheng   1 79  devkanro   1 80  Johor03   1 81  ButterBright   1 82  harshaskumar05   1 83  kylixs   1 84  crl228   1 85  Humbedooh   1 86  thisisgpy   1 87  CharlesMaster   1 88  andrewgkew   1 89  wayilau   1 90  feelwing1314   1 91  adriancole   1 92  agile6v   1 93   394102339**   1 94  YoungHu   1 95  wang-yeliang   1 96  withlin   1 97  moonming   1 98   983708408**   1 99    SkyWalking     wu-sheng   2967 1  peng-yongsheng   874 2  kezhenxu94   470 3   ascrutae**   381 4  ascrutae   352 5  acurtain   251 6  wankai123   211 7  mrproliu   176 8  hanahmily   176 9  Fine0830   133 10  JaredTan95   100 11  dmsolr   83 12  arugal   76 13  zhaoyuguang   65 14  lytscu   64 15  wingwong-knh   53 16   zhangxin**   47 17  BFergerson   45 18  pg-yang   28 19   ascrutae**   28 20  lujiajing1126   28 21  Ax1an   27 22  yswdqz   26 23  wayilau   26 24  EvanLjp   25 25  zifeihan   25 26  IanCao   23 27   295198088**   22 28  weixiang1862   22 29  x22x22   22 30  innerpeacez   21 31   394102339**   20 32  Superskyyy   19 33  clevertension   17 34  liuhaoyang   17 35  withlin   17 36  liqiangz   16 37  xbkaishui   16 38   renliangbu**   16 39  carlvine500   15 40  candyleer   15 41  peachisai   14 42  hailin0   12 43  zhangkewei   11 44  bai-yang   11 45  heyanlong   11 46  tom-pytel   10 47  TinyAllen   10 48  adermxzs   10 49  songzhendong   10 50   55846420**   10 51  wallezhang   10 52  Jtrust   9 53  IluckySi   9 54  qxo   9 55  smartboy37597   9 56  CzyerChen   9 57  alonelaval   8 58  heihaozi   8 59  wendal   8 60  LIU-WEI-git   8 61  CodePrometheus   8 62  Humbertzhang   8 63  toffentoffen   8 64  CalvinKirs   8 65  tristaZero   7 66   liufei**   6 67  zhyyu   6 68  stalary   6 69  honganan   6 70   lxin96**   6 71  jjtyro   6 72  xuanyu66   6 73  J-Cod3r   6 74  YunaiV   5 75  langyan1022   5 76  Liu-XinYuan   5 77  SataQiu   5 78  Cool-Coding   5 79  harvies   5 80  xu1009   5 81  wuwen5   5 82   55846420**   5 83  tuohai666   5 84  flycash   5 85  JohnNiang   5 86  yaojingguo   5 87  fgksgf   5 88  adriancole   5 89  codeglzhang   4 90  yu199195   4 91  yangyiweigege   4 92  VictorZeng   4 93  TeslaCN   4 94  LiWenGu   4 95  haoyann   4 96  chidaodezhongsheng   4 97  xinzhuxiansheng   4 98  aiyanbo   4 99  darcyda1   4 100  sN0wpeak   4 101  FatihErdem   4 102  chenhaipeng   4 103  nisiyong   4 104  Z-Beatles   4 105  YczYanchengzhe   4 106  cyberdak   4 107  dagmom   4 108  codelipenghui   4 109  dominicqi   4 110  dio   3 111  libinglong   3 112  liuzc9   3 113   lizl9**   3 114  neeuq   3 115  snakorse   3 116  xiaospider   3 117  xiaoy00   3 118  Indifer   3 119  huangyoje   3 120  s00373198   3 121  cyejing   3 122  Ahoo-Wang   3 123  yanfch   3 124  devkanro   3 125  oflebbe   3 126  rabajaj0509   3 127  Shikugawa   3 128  LinuxSuRen   3 129  ScienJus   3 130  liu-junchi   3 131  WillemJiang   3 132  chenpengfei   3 133  gnr163   3 134  jiang1997   3 135  jmjoy   2 136  viswaramamoorthy   2 137  vcjmhg   2 138  tzy1316106836   2 139  terranhu   2 140  scolia   2 141  osiriswd   2 142   2278966200**   2 143  novayoung   2 144  muyun12   2 145  mgsheng   2 146  makingtime   2 147  klboke   2 148  katelei6   2 149  karott   2 150  jinlongwang   2 151  hutaishi   2 152  Hen1ng   2 153  kuaikuai   2 154  lkxiaolou   2 155  purgeyao   2 156  michaelsembwever   2 157   bwh12398**   2 158  YunfengGao   2 159  WildWolfBang   2 160  juzhiyuan   2 161  SoberChina   2 162  KangZhiDong   2 163  mufiye   2 164   yushuqiang**   2 165  zxbu   2 166  yazong   2 167  xzyJavaX   2 168  xcaspar   2 169  wuguangkuo   2 170  webb2019   2 171  evanxuhe   2 172  yang-xiaodong   2 173  RaigorJiang   2 174  Qiliang   2 175  Oliverwqcwrw   2 176  buxingzhe   2 177  tsuilouis   2 178  leizhiyuan   2 179  Jargon9   2 180  potiuk   2 181   iluckysi   2 182  kim-up   2 183  HarryFQG   2 184  easonyipj   2 185  willseeyou   2 186  AlexanderWert   2 187  ajanthan   2 188  chen-ni   2 189  844067874   2 190  elk-g   2 191  dsc6636926   2 192  heihei180   2 193  amwyyyy   2 194  dengliming   2 195  cuiweiwei   2 196  coki230   2 197  coder-yqj   2 198  cngdkxw   2 199  chenmudu   2 200  beckhampu   2 201  cheetah012   2 202  ZhuWang1112   2 203  zaunist   2 204  shichaoyuan   2 205  XhangUeiJong   2 206  Switch-vov   2 207  SummerOfServenteen   2 208  maxiaoguang64   1 209  maclong1989   1 210  sourcelliu   1 211  margauxcabrera   1 212  Yebemeto   1 213  momo0313   1 214  Xlinlin   1 215   cheatbeater**   1 216  lxliuxuankb   1 217  lu-xiaoshuang   1 218  lpcy   1 219  louis-zhou   1 220  lngmountain   1 221   lixin40**   1 222  liuyanggithup   1 223  linliaoy   1 224   xlz35429674**   1 225   seiferhu**   1 226   seiferhu**   1 227   72372815\u0026#43;royal-dargon**   1 228   72775443\u0026#43;raybi-asus**   1 229  ralphgj   1 230  qiuyu-d   1 231  thanq   1 232  probeyang   1 233  carrypann   1 234  pkxiuluo   1 235  FeynmanZhou   1 236  ooi22   1 237  onecloud360   1 238  nileblack   1 239  chenyi19851209   1 240  neatlife   1 241  lijial   1 242  inversionhourglass   1 243  huliangdream   1 244  hsoftxl   1 245  hi-sb   1 246  Heguoya   1 247  hardzhang   1 248  haotian2015   1 249  gzlicanyi   1 250  guyukou   1 251  gy09535   1 252  guochen2   1 253  kylixs   1 254  gonedays   1 255  guodongq   1 256  ggndnn   1 257  GerryYuan   1 258  geekymv   1 259  geektcp   1 260  leemove   1 261  lazycathome   1 262  langke93   1 263  landonzeng   1 264  lagagain   1 265  ksewen   1 266  killGC   1 267  kikupotter   1 268  kevinyyyy   1 269  ken-duck   1 270  kayleyang   1 271  aeolusheath   1 272  justeene   1 273  jsbxyyx   1 274  zhangjianweibj   1 275  jianglin1008   1 276  jialong121   1 277  jjlu521016   1 278   zhousiliang163**   1 279   45602777\u0026#43;zhangzhanhong2**   1 280   zcai2**   1 281   zaygrzx**   1 282   yuyujulin**   1 283   yurunchuan**   1 284   182148432**   1 285   wu_yan_tao**   1 286   yanmingbi**   1 287   yangxb2010000**   1 288   yanbinwei2851**   1 289   978861768**   1 290   48479214\u0026#43;xuxiawei**   1 291   9313869\u0026#43;xuchangjunjx**   1 292   yexingren23**   1 293   1903636211**   1 294   xiaozheng**   1 295   281890899**   1 296   66098854\u0026#43;tangshan-brs**   1 297   88840672\u0026#43;wangwang89**   1 298   loushuiyifan**   1 299   305542043**   1 300   381321959**   1 301   zhangliang**   1 302   kzd666**   1 303   45203823\u0026#43;gzshilu**   1 304   28707699**   1 305   yqjdcyy**   1 306   tanjunchen20**   1 307   liuzhengyang**   1 308   hey.yanlong**   1 309   zygfengyuwuzu**   1 310   tmac.back**   1 311   xtha**   1 312   345434645**   1 313   zoidbergwill**   1 314   tbdp.hi**   1 315   tanzhen**   1 316   973117150**   1 317   89574863\u0026#43;4ydx3906**   1 318   sxzaihua**   1 319   hpy253215039**   1 320   814464284**   1 321   stone_wlg**   1 322   stenio**   1 323   hoolooday**   1 324   songzhe_fish**   1 325   wang-yaozheng**   1 326   sk163**   1 327   101088629\u0026#43;simonluo345**   1 328   simonlei**   1 329   41794887\u0026#43;sialais**   1 330   31874857\u0026#43;sikelangya**   1 331   mestarshine**   1 332   34833891\u0026#43;xdright**   1 333   bing**   1 334   23226334**   1 335   wujun8**   1 336   zzhxccw**   1 337   qrw_email**   1 338   wind2008hxy**   1 339   36367435\u0026#43;whl12345**   1 340   45580443\u0026#43;whfjam**   1 341   zwj777**   1 342   xiongchuang**   1 343   lyzhang1999**   1 344   52819067\u0026#43;weiqiang-w**   1 345   55177318\u0026#43;vcjmhg**   1 346   46754544\u0026#43;tristan-tsl**   1 347   wander4096**   1 348   136082619**   1 349   montecristosoul**   1 350  Lin1997   1 351  coolbeevip   1 352  LazyLei   1 353  leileiluoluo   1 354  lt5227   1 355  mostcool   1 356  Alipebt   1 357  zhentaoJin   1 358  kagaya85   1 359  augustowebd   1 360  j-s-3   1 361  JohnDuncan5171   1 362  jbampton   1 363  zouyx   1 364  JoeKerouac   1 365  Linda-pan   1 366  jim075960758   1 367  jiekun   1 368  c1ay   1 369   chenglei**   1 370   chenyao**   1 371  npmmirror   1 372  nikitap492   1 373  nickwongwong   1 374  ZhuoSiChen   1 375  mikechengwei   1 376  mikkeschiren   1 377  zeaposs   1 378  TheRealHaui   1 379  doddi   1 380  marcingrzejszczak   1 381  maolie   1 382  mahmoud-anwer   1 383  donotstopplz   1 384  liuhaoXD   1 385  linghengqian   1 386  darcydai   1 387  sdanzo   1 388  chanjarster   1 389  damonxue   1 390  cvimer   1 391  CommissarXia   1 392  ChengDaqi2023   1 393  CharlesMaster   1 394  shiluo34   1 395  brucewu-fly   1 396   qq327568824**   1 397  ArjenDavid-sjtu   1 398  AngryMills   1 399   andyzzlms**   1 400  AirTrioa   1 401  lunchboxav   1 402  50168383   1 403  1095071913   1 404  Jedore   1 405  mustangxu   1 406   zhongjianno1**   1 407  DeadLion   1 408  Lighfer   1 409  Henry75m39   1 410  onurccn   1 411  tankilo   1 412  Gallardot   1 413  AbelCha0   1 414  bootsrc   1 415  FingerLiu   1 416  Felixnoo   1 417  DuanYuePeng   1 418  efekaptan   1 419  qijianbo010   1 420  qqeasonchen   1 421  devon-ye   1 422   295198088**   1 423   c feng   1 424  buzuotaxuan   1 425  mmm9527   1 426  wolfboys   1 427  beiwangnull   1 428  amogege   1 429  alidisi   1 430  alexkarezin   1 431  aix3   1 432  adamni135   1 433  absorprofess   1 434  ZhengBing520   1 435  ZhHong   1 436  chenbeitang   1 437  ZS-Oliver   1 438  panniyuyu   1 439  fuhuo   1 440  ethan256   1 441  eoeac   1 442  echooymxq   1 443  dzx2018   1 444  IceSoda177   1 445  dvsv2   1 446  drgnchan   1 447  donbing007   1 448  dogblues   1 449  divyakumarjain   1 450  dd1k   1 451  dashanji   1 452  cutePanda123   1 453  cui-liqiang   1 454  cuishuang   1 455  crystaldust   1 456  wbpcode   1 457  TerrellChen   1 458  Technoboy-   1 459  StreamLang   1 460  stevehu   1 461  kun-song   1 462   826245622**   1 463  compilerduck   1 464  SheltonZSL   1 465  sergicastro   1 466  zhangsean   1 467  yymoth   1 468  ruibaby   1 469  rlenferink   1 470  remicollet   1 471  RandyAbernethy   1 472  QHWG67   1 473  pengyongqiang666   1 474  Patrick0308   1 475  yuqichou   1 476  Miss-you   1 477  ycoe   1 478   me**   1 479  yanickxia   1 480  XinweiLyu   1 481  liangyepianzhou   1 482  Wooo0   1 483  ViberW   1 484  wilsonwu   1 485  moonming   1 486  wyt   1 487  victor-yi   1 488  Videl   1 489  trustin   1 490  TomMD   1 491  ThisSeanZhang   1 492  gitter-badger   1 493  Adrian Cole    494  github-actions[bot]    495  dependabot[bot]    496    Booster UI     Fine0830   425 1  wu-sheng   15 2  heyanlong   12 3  pg-yang   9 4  CzyerChen   3 5  yswdqz   3 6  techbirds   3 7  Superskyyy   2 8  peachisai   2 9  zhourunjie1988   2 10  xu1009   2 11  weixiang1862   2 12  lsq27   2 13  innerpeacez   2 14  horochx   2 15  drgnchan   2 16  smartboy37597   2 17  CodePrometheus   2 18  WitMiao   1 19  liuyib   1 20  arugal   1 21  wuwen5   1 22  songzhendong   1 23  pw151294   1 24  kezhenxu94   1 25  jiang1997   1 26  hutaishi   1 27  heihei180   1 28  hadesy   1 29  ZhuWang1112   1 30  XinweiLyu   1 31  liangyepianzhou   1 32  SimonHu1993   1 33  LinuxSuRen   1 34  binbin666   1 35  marcingrzejszczak   1 36  toffentoffen   1 37  mahmoud-anwer   1 38  donotstopplz   1 39  BFergerson   1 40    Plugin for Service Topology     Fine0830   63 1  wu-sheng   4 2  Superskyyy   1 3   fine**   1 4    Java Agent     wu-sheng   2747 1  peng-yongsheng   874 2   ascrutae**   381 3  ascrutae   352 4  kezhenxu94   275 5  acurtain   251 6  hanahmily   165 7  JaredTan95   96 8  dmsolr   87 9  mrproliu   68 10  arugal   66 11  zhaoyuguang   65 12  lytscu   64 13  Fine0830   53 14   zhangxin**   47 15  wingwong-knh   45 16  BFergerson   44 17  wankai123   31 18   ascrutae**   28 19  Ax1an   27 20  wayilau   26 21  zifeihan   26 22  EvanLjp   25 23  IanCao   23 24   295198088**   22 25  x22x22   22 26   394102339**   20 27  pg-yang   19 28  xu1009   19 29  clevertension   17 30  withlin   17 31  xbkaishui   16 32   renliangbu**   16 33  liuhaoyang   16 34  lujiajing1126   16 35  candyleer   15 36  carlvine500   15 37  liqiangz   13 38  nisiyong   13 39  hailin0   12 40  wallezhang   11 41  bai-yang   11 42  zhangkewei   11 43  heyanlong   10 44  xzyJavaX   10 45  songzhendong   10 46  adermxzs   10 47  TinyAllen   10 48  Jtrust   10 49   55846420**   10 50  heihaozi   9 51  IluckySi   9 52  qxo   9 53  wendal   8 54  alonelaval   8 55  CzyerChen   8 56  zhyyu   7 57  Humbertzhang   7 58  tristaZero   7 59  J-Cod3r   6 60  Cool-Coding   6 61  jjtyro   6 62  honganan   6 63  stalary   6 64  wuwen5   6 65   liufei**   6 66  gzlicanyi   6 67   lxin96**   6 68  tom-pytel   6 69  xuanyu66   6 70  devkanro   6 71  hutaishi   5 72  harvies   5 73  langyan1022   5 74  Liu-XinYuan   5 75  YunaiV   5 76  SataQiu   5 77  adriancole   5 78  darcyda1   5 79  yaojingguo   5 80  JohnNiang   5 81  flycash   5 82  tuohai666   5 83  cyberdak   5 84  codelipenghui   5 85  peachisai   5 86   55846420**   5 87  LiWenGu   4 88  kylixs   4 89  TeslaCN   4 90  haoyann   4 91  chidaodezhongsheng   4 92  xinzhuxiansheng   4 93  VictorZeng   4 94  xiaqi1210   4 95  yu199195   4 96  chanjarster   4 97  FatihErdem   4 98  aiyanbo   4 99  sN0wpeak   4 100  fgksgf   4 101  Oliverwqcwrw   4 102  Z-Beatles   4 103  alanlvle   4 104  dagmom   4 105  innerpeacez   4 106  dominicqi   4 107  weixiang1862   4 108  vcjmhg   3 109  cyejing   3 110  s00373198   3 111  huangyoje   3 112  Indifer   3 113  xiaoy00   3 114  snakorse   3 115  neeuq   3 116   lizl9**   3 117  libinglong   3 118  gnr163   3 119  chenpengfei   3 120  YczYanchengzhe   3 121  WillemJiang   3 122  liu-junchi   3 123  ScienJus   3 124  oflebbe   3 125  yanfch   3 126  Ahoo-Wang   3 127  dio   3 128  codeglzhang   3 129  osiriswd   2 130  scolia   2 131  terranhu   2 132  tzy1316106836   2 133  viswaramamoorthy   2 134  webb2019   2 135  gglzf4   2 136  kuaikuai   2 137   2278966200**   2 138  novayoung   2 139  muyun12   2 140  mgsheng   2 141  makingtime   2 142  lpcy   2 143  klboke   2 144  karott   2 145  jinlongwang   2 146  Hen1ng   2 147  Superskyyy   2 148  seifeHu   2 149  lkxiaolou   2 150  purgeyao   2 151  PepoRobert   2 152  michaelsembwever   2 153  marcingrzejszczak   2 154   bwh12398**   2 155  YunfengGao   2 156  WildWolfBang   2 157  shichaoyuan   2 158  juzhiyuan   2 159  SoberChina   2 160  KangZhiDong   2 161   yushuqiang**   2 162  zxbu   2 163  yazong   2 164  xcaspar   2 165  wuguangkuo   2 166  geekymv   2 167  yang-xiaodong   2 168  Shikugawa   2 169  Qiliang   2 170  buxingzhe   2 171  tsuilouis   2 172  Leibnizhu   2 173  leizhiyuan   2 174  CalvinKirs   2 175  Jargon9   2 176  potiuk   2 177   iluckysi   2 178  2han9wen71an   2 179  844067874   2 180  HarryFQG   2 181  ForrestWang123   2 182  ajanthan   2 183  AlexanderWert   2 184  willseeyou   2 185  ArjenDavid-sjtu   2 186  evanxuhe   2 187  elk-g   2 188  dsc6636926   2 189  amwyyyy   2 190  dengliming   2 191  dashanji   2 192  cylx3126   2 193  cuiweiwei   2 194  coki230   2 195  SummerOfServenteen   2 196  Switch-vov   2 197  tjiuming   2 198  XhangUeiJong   2 199  zaunist   2 200  cheetah012   2 201  beckhampu   2 202  chenmudu   2 203  coder-yqj   2 204  cngdkxw   2 205  githubcheng2978   1 206  FeynmanZhou   1 207  onecloud360   1 208  nileblack   1 209  neatlife   1 210  Xlinlin   1 211  momo0313   1 212  Yebemeto   1 213  margauxcabrera   1 214  sourcelliu   1 215  maxiaoguang64   1 216  lxliuxuankb   1 217  lvxiao1   1 218  guodongq   1 219  louis-zhou   1 220   lixin40**   1 221  pkxiuluo   1 222  carrypann   1 223  probeyang   1 224  qiaoxingxing   1 225  thanq   1 226  qiuyu-d   1 227  ggndnn   1 228  ralphgj   1 229  raybi-asus   1 230  GerryYuan   1 231  geektcp   1 232  mestarshine   1 233   chenyao**   1 234  sikelangya   1 235  simonlei   1 236  sk163   1 237  zhangjianweibj   1 238  JoeCqupt   1 239  jialong121   1 240  jjlu521016   1 241  hyhyf   1 242  hxd123456   1 243  huliangdream   1 244  xiaomiusa87   1 245  hsoftxl   1 246  hi-sb   1 247  Heguoya   1 248  hardzhang   1 249  haotian2015   1 250  guyukou   1 251  gy09535   1 252  rechardguo   1 253  gonedays   1 254  liuyanggithup   1 255  linliaoy   1 256  lijial   1 257  leemove   1 258  lbc97   1 259  lazycathome   1 260  langke93   1 261  landonzeng   1 262  ksewen   1 263  killGC   1 264  kikupotter   1 265  kevinyyyy   1 266  kayleyang   1 267  aeolusheath   1 268  justeene   1 269  jsbxyyx   1 270  jmjoy   1 271   tmac.back**   1 272   345434645**   1 273   zoidbergwill**   1 274   zhousiliang163**   1 275   45602777\u0026#43;zhangzhanhong2**   1 276   zcai2**   1 277   zaygrzx**   1 278   yuyujulin**   1 279   yurunchuan**   1 280   74546965\u0026#43;yswdqz**   1 281   182148432**   1 282   wu_yan_tao**   1 283   yanmingbi**   1 284   yangxb2010000**   1 285   yanbinwei2851**   1 286   249021408**   1 287   9313869\u0026#43;xuchangjunjx**   1 288   xiongchuang**   1 289   cheatbeater**   1 290   66098854\u0026#43;tangshan-brs**   1 291   42414099\u0026#43;yanye666**   1 292   893979653**   1 293   88840672\u0026#43;wangwang89**   1 294   loushuiyifan**   1 295   lcbiao34**   1 296   305542043**   1 297   381321959**   1 298   orezsilence**   1 299   zhangliang**   1 300   kzd666**   1 301   45203823\u0026#43;gzshilu**   1 302   28707699**   1 303   tanjunchen20**   1 304   70845636\u0026#43;mufiye**   1 305   liuzhengyang**   1 306   zygfengyuwuzu**   1 307   lyzhang1999**   1 308   wqp1987**   1 309  w2dp   1 310  weiqiang-w   1 311  tristan-tsl   1 312  tincopper   1 313  angty   1 314  tedli   1 315  tbdpmi   1 316   tanzhen**   1 317  tangxqa   1 318  sxzaihua   1 319  hepyu   1 320  surechen   1 321  stone-wlg   1 322  stenio2011   1 323  zhe1926   1 324   xubinghaozs**   1 325   yexingren23**   1 326   1903636211**   1 327   1612202137**   1 328   281890899**   1 329   34833891\u0026#43;xdright**   1 330   bing**   1 331   23226334**   1 332   wujun8**   1 333   809697469**   1 334   zzhxccw**   1 335   qrw_email**   1 336   wind2008hxy**   1 337   63728367\u0026#43;will2020-power**   1 338   36367435\u0026#43;whl12345**   1 339   45580443\u0026#43;whfjam**   1 340   zwj777**   1 341   weihubeats**   1 342  augustowebd   1 343  jbampton   1 344  zouyx   1 345  JoeKerouac   1 346  Linda-pan   1 347  leihuazhe   1 348   zhongjianno1**   1 349  DeadLion   1 350  Lighfer   1 351  kim-up   1 352  hardy4yooz   1 353  onurccn   1 354  guillaume-alvarez   1 355  GuiSong01   1 356  tankilo   1 357  Gallardot   1 358  AbelCha0   1 359  nikitap492   1 360  nickwongwong   1 361  ZhuoSiChen   1 362  mikkeschiren   1 363  zeaposs   1 364  TheRealHaui   1 365  maolie   1 366  donotstopplz   1 367  liuhaoXD   1 368  lishuo5263   1 369  Lin1997   1 370  coolbeevip   1 371  LazyLei   1 372  leileiluoluo   1 373  lt5227   1 374  zhentaoJin   1 375  kagaya85   1 376  CharlesMaster   1 377  shiluo34   1 378  wapkch   1 379  thisisgpy   1 380  brucewu-fly   1 381  BigXin0109   1 382  bmk15897   1 383   qq327568824**   1 384  AngryMills   1 385   andyzzlms**   1 386  guoxiaod   1 387  adaivskenan   1 388  Alceatraz   1 389  AirTrioa   1 390  lunchboxav   1 391  50168383   1 392  1095071913   1 393  bootsrc   1 394  ForestWang123   1 395  FingerLiu   1 396  DuanYuePeng   1 397  efekaptan   1 398  qijianbo010   1 399  qqeasonchen   1 400  DominikHubacek   1 401  devon-ye   1 402  darknesstm   1 403  zhaoxiaojie0415   1 404  darcydai   1 405  sdanzo   1 406  dachuan9e   1 407  cvimer   1 408  CommissarXia   1 409  Chenfx-git   1 410  furaul   1 411  HScarb   1 412  c1ay   1 413   295198088**   1 414   c feng   1 415  buzuotaxuan   1 416  mmm9527   1 417  beiwangnull   1 418  andotorg   1 419  amogege   1 420  alexkarezin   1 421  aix3   1 422  adamni135   1 423  zimmem   1 424  ZhHong   1 425  chenbeitang   1 426  ZS-Oliver   1 427  panniyuyu   1 428  fuhuo   1 429  eoeac   1 430  life-   1 431  echooymxq   1 432  dzx2018   1 433  IceSoda177   1 434  dvsv2   1 435  drgnchan   1 436  donbing007   1 437  divyakumarjain   1 438  AlchemyDing   1 439  dd1k   1 440  cutePanda123   1 441  cui-liqiang   1 442  crystaldust   1 443  jinrongzhang   1 444  wbpcode   1 445  TerrellChen   1 446  Technoboy-   1 447  stevehu   1 448  kun-song   1 449   826245622**   1 450  compilerduck   1 451  sergicastro   1 452  zhangsean   1 453  yymoth   1 454  SWHHEART   1 455  ruibaby   1 456  rlenferink   1 457  RickyLau   1 458  RandyAbernethy   1 459  QHWG67   1 460  Patrick0308   1 461   chenglei**   1 462  yuqichou   1 463  yoyofx   1 464  Miss-you   1 465  ycoe   1 466   me**   1 467  yanickxia   1 468  yangyulely   1 469  Wooo0   1 470  ViberW   1 471  wilsonwu   1 472  moonming   1 473  victor-yi   1 474  Videl   1 475  trustin   1 476  TomMD   1 477  ThisSeanZhang   1 478  gitter-badger   1 479    Python Agent     kezhenxu94   97 1  Superskyyy   63 2  tom-pytel   47 3  alonelaval   21 4  jiang1997   14 5  Humbertzhang   10 6  Jedore   5 7  ZEALi   4 8  katelei6   4 9  SheltonZSL   3 10  jaychoww   3 11  FAWC438   3 12  wu-sheng   2 13  probeyang   2 14  langyizhao   2 15  arcosx   2 16  zkscpqm   1 17  wuwen5   1 18  dafu-wu   1 19  VxCoder   1 20  taskmgr   1 21  Forstwith   1 22  fuhuo   1 23  dcryans   1 24   32413353\u0026#43;cooolr**   1 25  c1ay   1 26  chestarss   1 27  alidisi   1 28  XinweiLyu   1 29  TomMD   1 30  CodePrometheus   1 31  shenxiangzhuang   1 32  doddi   1 33  sungitly   1 34  wzy960520   1 35  JarvisG495   1 36  JaredTan95   1 37  fgksgf   1 38  zgfh   1 39    NodeJS Agent     kezhenxu94   61 1  tom-pytel   38 2  ruleeeer   4 3  BFergerson   3 4  wu-sheng   3 5  michaelzangl   2 6  alanlvle   2 7  tianyk   2 8  ErosZy   1 9  QuanjieDeng   1 10  TonyKingdom   1 11  liu-zhizhu   1 12   wxb17742006482**   1 13  nd-lqj   1 14  wuwen5   1 15    Go Agent     mrproliu   61 1  CodePrometheus   8 2  Alipebt   8 3  wu-sheng   6 4  LinuxSuRen   4 5  ShyunnY   2 6  IceSoda177   2 7  vearne   2 8  rfyiamcool   2 9  ethan256   2 10  jiekun   2 11  zheheBao   1 12  xuyue97   1 13  jarvis-u   1 14  icodeasy   1 15  YenchangChan   1 16  kikoroc   1 17  darknos   1 18  Ecostack   1 19  Ruff-nono   1 20  0o001   1 21  lujiajing1126   1 22  GlqEason   1 23    Rust Agent     jmjoy   39 1  wu-sheng   20 2  Shikugawa   5 3  tisonkun   4 4  CherishCai   2 5  dkkb   2 6  kezhenxu94   1 7    PHP Agent     jmjoy   74 1  heyanlong   12 2  phanalpha   4 3  wu-sheng   2 4  matikij   1 5    Client JavaScript     Fine0830   143 1  wu-sheng   33 2  arugal   19 3  Lighfer   2 4  kezhenxu94   2 5  tianyk   2 6  wuwen5   2 7  Leo555   1 8  qinhang3   1 9  min918   1 10  tthallos   1 11  i7guokui   1 12  aoxls   1 13  givingwu   1 14  Jtrust   1 15  JaredTan95   1 16  AliceTWu   1 17  airene   1 18    Nginx LUA Agent     wu-sheng   50 1  dmsolr   26 2  membphis   10 3  moonming   7 4  mrproliu   6 5  spacewander   3 6  kezhenxu94   2 7  WALL-E   2 8  arugal   2 9  wangrzneu   2 10  yxudong   2 11  JaredTan95   2 12  jeremie1112   1 13  dingdongnigetou   1 14  CalvinKirs   1 15  lilien1010   1 16  Jijun   1 17  Dofine-dufei   1 18  alonelaval   1 19  Frapschen   1 20  tzssangglass   1 21    Kong Agent     dmsolr   15 1  wu-sheng   4 2  kezhenxu94   2 3  CalvinKirs   1 4    SkyWalking Satellite     mrproliu   64 1  EvanLjp   32 2  kezhenxu94   20 3  gxthrj   7 4  wu-sheng   6 5  wangrzneu   2 6  BFergerson   1 7  fgksgf   1 8  CalvinKirs   1 9  guangdashao   1 10  inversionhourglass   1 11  nic-chen   1 12  arugal   1 13    Kubernetes Event Exporter     kezhenxu94   16 1  wu-sheng   6 2  fgksgf   4 3  dmsolr   2 4  CalvinKirs   1 5    SkyWalking Rover     mrproliu   92 1  wu-sheng   5 2  spacewander   3 3  jelipo   2 4  hkmdxlftjf   1 5  IluckySi   1 6  LinuxSuRen   1 7  caiwc   1 8  kezhenxu94   1 9    SkyWalking CLI     kezhenxu94   79 1  mrproliu   46 2  fgksgf   44 3  wu-sheng   11 4  hanahmily   6 5  try-agaaain   5 6  JarvisG495   4 7  arugal   4 8  alonelaval   3 9  BFergerson   2 10  heyanlong   2 11  Alexxxing   1 12  Superskyyy   1 13  clk1st   1 14  innerpeacez   1 15    Kubernetes Helm     innerpeacez   58 1  kezhenxu94   38 2  wu-sheng   32 3  hanahmily   19 4  mrproliu   6 5  JaredTan95   6 6  ButterBright   4 7  dashanji   3 8  rh-at   2 9  chengshiwen   2 10  eric-sailfish   1 11  geffzhang   1 12  glongzh   1 13  chenvista   1 14  swartz-k   1 15  tristan-tsl   1 16  vision-ken   1 17   wang_weihan**   1 18  wayilau   1 19  williamyao1982   1 20  zshrine   1 21  aikin-vip   1 22  wankai123   1 23  SeanKilleen   1 24  ScribblerCoder   1 25  rabajaj0509   1 26  CalvinKirs   1 27  carllhw   1 28  zalintyre   1 29  Yangfisher1   1 30  aviaviavi   1 31    SkyWalking Cloud on Kubernetes     hanahmily   59 1  dashanji   26 2  kezhenxu94   8 3  mrproliu   5 4  weixiang1862   4 5  wu-sheng   3 6  ESonata   2 7  jichengzhi   2 8  heyanlong   1 9  hwzhuhao   1 10  SzyWilliam   1 11   rolandma**   1 12  robberphex   1 13  toffentoffen   1 14  CalvinKirs   1 15  fgksgf   1 16  Duncan-tree-zhou   1 17  ButterBright   1 18  BFergerson   1 19    Data Collect Protocol     wu-sheng   76 1  mrproliu   27 2  arugal   11 3  kezhenxu94   11 4  liuhaoyang   4 5  EvanLjp   3 6  Shikugawa   3 7  peng-yongsheng   2 8  zifeihan   2 9  Switch-vov   2 10  dmsolr   1 11  hanahmily   1 12  fgksgf   1 13  nacx   1 14  yaojingguo   1 15  SataQiu   1 16  stalary   1 17  Z-Beatles   1 18  liqiangz   1 19  snakorse   1 20  xu1009   1 21  heyanlong   1 22  Liu-XinYuan   1 23    Query Protocol     wu-sheng   99 1  mrproliu   39 2  wankai123   17 3  arugal   15 4  peng-yongsheng   11 5  kezhenxu94   10 6  hanahmily   9 7  x22x22   3 8  JaredTan95   3 9  BFergerson   1 10  MiracleDx   1 11  fgksgf   1 12  liuhaoyang   1 13  Fine0830   1 14  chenmudu   1 15  liqiangz   1 16  heyanlong   1 17    Go API     mrproliu   57 1  wu-sheng   17 2  kezhenxu94   6 3  arugal   3 4  fgksgf   2 5   dalekliuhan**   2 6  gxthrj   2 7  liqiangz   2 8  EvanLjp   2 9  JaredTan95   1 10  CalvinKirs   1 11   mrproliu**   1 12    BanyanDB     hanahmily   238 1  lujiajing1126   98 2  Fine0830   23 3  WuChuSheng1   21 4  ButterBright   16 5  wu-sheng   12 6  HHoflittlefish777   10 7  hailin0   9 8  zesiar0   6 9  sivasathyaseeelan   5 10  mikechengwei   5 11  Sylvie-Wxr   5 12  innerpeacez   4 13  sacloudy   4 14  caicancai   4 15  tisonkun   3 16  DevPJ9   2 17  LinuxSuRen   2 18  sksDonni   2 19  mrproliu   2 20  BFergerson   1 21  Muyu-art   1 22  CalvinKirs   1 23  qazxcdswe123   1 24  achintya-7   1 25  e1ijah1   1 26  kezhenxu94   1 27    BanyanDB Java Client     lujiajing1126   44 1  wu-sheng   21 2  hanahmily   14 3  kezhenxu94   2 4  hailin0   1 5    BanyanDB Helm     ButterBright   6 1  wu-sheng   5 2  hanahmily   3 3  wankai123   1 4  kezhenxu94   1 5    Agent Test Tool     dmsolr   13 1  kezhenxu94   6 2  mrproliu   5 3  wu-sheng   5 4  arugal   4 5  nisiyong   2 6  zhyyu   2 7  EvanLjp   1 8  yaojingguo   1 9  CalvinKirs   1 10  LeePui   1 11  marcingrzejszczak   1 12  Shikugawa   1 13  dagmom   1 14  harvies   1 15  alonelaval   1 16  jmjoy   1 17  pg-yang   1 18  OrezzerO   1 19    SkyWalking Eyes     kezhenxu94   108 1  fgksgf   19 2  wu-sheng   16 3  zooltd   7 4  emschu   6 5  tisonkun   5 6  jmjoy   5 7  keiranmraine   4 8  MoGuGuai-hzr   4 9  mrproliu   4 10  dongzl   3 11  spacewander   3 12  gdams   3 13  rovast   2 14  elijaholmos   2 15  ryanmrichard   2 16  freeqaz   2 17  heyanlong   1 18  zifeihan   1 19  mohammedtabish0   1 20  acelyc111   1 21  Xuanwo   1 22  xiaoyawei   1 23  stumins   1 24  steveklabnik   1 25  chengshiwen   1 26  crholm   1 27  fulmicoton   1 28  Two-Hearts   1 29  kevgo   1 30  halacs   1 31  FushuWang   1 32  Juneezee   1 33  ddlees   1 34  dave-tucker   1 35  antgamdia   1 36  guilload   1 37    SkyWalking Infra E2E     mrproliu   35 1  kezhenxu94   26 2  Humbertzhang   10 3  fgksgf   9 4  chunriyeqiongsaigao   8 5  ethan256   4 6  Superskyyy   3 7  dashanji   3 8  lujiajing1126   2 9  JohnNiang   2 10  CalvinKirs   1 11  FeynmanZhou   1 12  arugal   1 13  heyanlong   1 14  wu-sheng   1 15    (Archived) Docker Files     hanahmily   34 1  wu-sheng   14 2  JaredTan95   8 3  kezhenxu94   4 4   lixin40**   2 5  aviaviavi   1 6  andrewgkew   1 7  carlvine500   1 8  kkl129   1 9  tristan-tsl   1 10  arugal   1 11  heyanlong   1 12    (Archived) Rocketbot UI     TinyAllen   179 1  Fine0830   126 2  x22x22   27 3  wu-sheng   20 4  JaredTan95   15 5  kezhenxu94   13 6  heihaozi   8 7  bigflybrother   8 8  Jtrust   7 9  dmsolr   5 10  zhaoyuguang   5 11  alonelaval   4 12  tom-pytel   4 13  hanahmily   3 14  aeolusheath   3 15  arugal   3 16  hailin0   2 17  Indifer   2 18   zhaoyuguang**   2 19  xuchangjunjx   2 20  wuguangkuo   2 21  whfjam   2 22  shiluo34   2 23  ruibaby   2 24  wilsonwu   2 25  constanine   2 26  horber   2 27  liqiangz   2 28  leemove   2 29  fuhuo   1 30   denghaobo**   1 31  jianglin1008   1 32  codelipenghui   1 33  lunamagic1978   1 34  novayoung   1 35  probeyang   1 36  dominicqi   1 37  stone-wlg   1 38  surechen   1 39  wallezhang   1 40  wuwen5   1 41   bing**   1 42  xu1009   1 43  huangyoje   1 44  heyanlong   1 45  llissery   1 46   437376068**   1 47  aiyanbo   1 48  BFergerson   1 49  efekaptan   1 50  yanfch   1 51  grissom-grissom   1 52  grissomsh   1 53  Humbertzhang   1 54  kagaya85   1 55  liuhaoyang   1 56  tsuilouis   1 57  masterxxo   1 58  zeaposs   1 59  QHWG67   1 60  Doublemine   1 61  zaunist   1 62  xiaoxiangmoe   1 63  c1ay   1 64  dagmom   1 65  fredster33   1 66    (Archived) Legacy UI     hanahmily   227 1  wu-sheng   123 2  peng-yongsheng   73 3  ascrutae   36 4  TinyAllen   18 5   zhangxin**   7 6   295198088**   5 7   qiu_jy**   5 8  zhaoyuguang   4 9  zuohl   4 10  wendal   3 11  jjlu521016   2 12  withlin   2 13  bai-yang   1 14  zhangkewei   1 15  wynn5a   1 16  clevertension   1 17  cloudgc   1 18   baiyang06**   1 19  WillemJiang   1 20  liuhaoyang   1 21  leizhiyuan   1 22  ajanthan   1 23    (Archived) OAL Generator     wu-sheng   64 1  peng-yongsheng   15 2    SkyAPM-dotnet     liuhaoyang   127 1  snakorse   28 2  wu-sheng   20 3  lu-xiaoshuang   8 4  ElderJames   7 5  yang-xiaodong   7 6  pengweiqhca   7 7  Ahoo-Wang   6 8  inversionhourglass   5 9  feiyun0112   4 10  sampsonye   4 11  KawhiWei   3 12  zeaposs   3 13  kaanid   3 14  qq362220083   3 15  withlin   2 16   xiaoweiyu**   2 17  witskeeper   2 18  beckjin   2 19  ShaoHans   2 20  misaya   1 21  itsvse   1 22  zhujinhu21   1 23  xclw2000   1 24  startewho   1 25  refactor2   1 26  rider11-dev   1 27  linkinshi   1 28  limfriend   1 29  guochen2   1 30  WeihanLi   1 31  SeanKilleen   1 32  cnlangzi   1 33  joesdu   1 34  SpringHgui   1 35  dimaaan   1 36  ChaunceyLin5152   1 37  catcherwong   1 38  BoydenYubin   1 39  andyliyuze   1 40  AlseinX   1 41    cpp2sky     Shikugawa   55 1   wbphub**   13 2  wuwen5   2 3  wu-sheng   2 4  makefriend8   2 5  wbpcode   2 6  JayInnn   1 7    SourceMarker     BFergerson   761 1  MrMineO5   9 2  voqaldev   3 3  chess-equality   3 4  javamak   2 5    Java Plugin Extensions     wu-sheng   17 1  ascrutae   8 2  JaredTan95   2 3  raybi-asus   2 4  zifeihan   2 5  nisiyong   1 6  bitray   1 7  li20020439   1 8  pg-yang   1 9    uranus     harvies   5 1  wu-sheng   1 2    (outdated) CN Documentations     kezhenxu94   23 1  SataQiu   8 2  wu-sheng   4 3  nikyotensai   2 4  ccccye123   2 5  Frapschen   2 6  shalk   2 7  wujun8   2 8  zhangnew   1 9  yazong   1 10  xiaoping378   1 11  thelight1   1 12   lilulu**   1 13  Hen1ng   1 14  harvies   1 15  dagmom   1 16  alienwow   1 17  system-designer   1 18  Superskyyy   1 19  JaredTan95   1 20  fgksgf   1 21  xing-yin   1 22    (Retired) Transporter Plugins     codeglzhang   3 1  wu-sheng   3 2  dmsolr   2 3  Jargon9   2 4  kezhenxu94   1 5    (Retired) Go2Sky     arugal   27 1  wu-sheng   15 2  hanahmily   12 3  mrproliu   12 4  kagaya85   3 5  easonyipj   2 6  nacx   2 7  Luckyboys   2 8  fgksgf   1 9  Humbertzhang   1 10  JaredTan95   1 11  JJ-Jasmin   1 12  withlin   1 13  yaojingguo   1 14  Just-maple   1 15  kuaikuai   1 16  zhuCheer   1 17  chwjbn   1 18  kehuili   1 19  kezhenxu94   1 20  limfriend   1 21  matianjun1   1 22  lokichoggio   1 23   bing**   1 24  liweiv   1 25    (Retired) Go2Sky Plugins     arugal   15 1  kagaya85   7 2  mrproliu   6 3  wu-sheng   5 4  elza2   3 5  matianjun1   1 6  dgqypl   1 7  zaunist   1 8  kehuili   1 9  newyue588cc   1 10  royal-dargon   1 11    (Retired) SkyAPM PHP Agent     heyanlong   379 1   wangbo78978**   40 2  lpf32   30 3   songzhian**   17 4  songzhian   11 5  wu-sheng   9 6  jmjoy   8 7  remicollet   4 8  kilingzhang   3 9   songzhian**   3 10  xonze   3 11  iamif3000   2 12  mikkeschiren   2 13  anynone   2 14  lvxiao1   2 15  xinfeingxia85   2 16  cyhii   1 17  silverkorn   1 18  AlpherJang   1 19  LJX22222   1 20  MrYzys   1 21  rovast   1 22  SP-66666   1 23  tinyu0   1 24  xudianyang   1 25  huohuanhuan   1 26  kezhenxu94   1 27  limfriend   1 28  ljf-6666   1 29  qjgszzx   1 30  dickens7   1 31  xybingbing   1 32  yaowenqiang   1 33  az13js   1 34    (Retired) SkyAPM Node.js     ascrutae   74 1  kezhenxu94   13 2  wu-sheng   9 3  zouyx   4 4  Jozdortraz   1 5  a526672351   1 6  rovast   1 7  Runrioter   1 8  jasper-zsh   1 9  TJ666   1 10     Loading...  ","excerpt":"Contributor   Project Contributions Ranking  SkyWalking Showcase     kezhenxu94   109 1  wu-sheng …","ref":"/contributors/","title":"Contributors"},{"body":"Create and detect Service Hierarchy Relationship Motivation Service relationship is one of the most important parts of collaborating data in the APM. Service Map is supported for years from tracing to trace analysis. But still due to the means of the probs, a service could be detected from multiple methods, which is the same service in multiple layers. v9 proposal mentioned the concept of the layer. Through this proposal, we plan to establish a kernel-level concept to connect services detected in different layers.\nArchitecture Graph There is no significant architecture-level change.\nPropose Changes The data sources of SkyWalking APM have covered traditional agent installed service, VMs, cloud infra, k8s, etc.\nFor example, a Java service is built in a docker image and is going to be deployed in a k8s cluster, with a sidecar injected due to service mesh managed. The following services would be able to detect cross-layers\n Java service, detected as Java agent installed. A pod of k8s service is detected, due to k8s layer monitoring. Side car perspective service is detected. VM Linux monitoring for a general process, as the container of Java service is deployed on this specific k8s node. Virtual databases, caches, and queues conjectured by agents, and also monitored through k8s monitoring, even traffic monitored by service mesh.  All these services have logic connections or are identical from a physical perspective, but currently, they may be just similar on name(s), no further metadata connection.\nBy those, we have a chance to move one step ahead to connect the dots of the whole infrastructure. This means, for the first time, we are going to establish the connections among services detected from various layers.\nIn the v10, I am proposing a new concept Service Hierarchy. Service Hierarchy defines the relationships of existing services in various layers. With more kinds of agent tech involved(such as eBPF) and deployment tools(such as operator and agent injector), we could inject relative service/instance metadata and try to build the connections, including,\n Agent injector injects the pod ID into the system env, then Java agent could report the relationship through system properties. Rover(eBPF agent) reveals its next iteration forward k8s monitoring rather than profiling. And add the capabilities to establish connections among k8s pods and service mesh srv.  Meanwhile, as usual with the new major version change, I would expect UI side changes as well. UI should have flexible capabilities to show hierarchy services from the service view and topology view. Also, we could consider a deeper view of the instance part as well.\nImported Dependencies libs and their licenses. No new library is planned to be added to the codebase.\nCompatibility About the protocol, there should be no breaking changes, but enhancements only. New query protocols( service-hierarchy and instance-hierarchy) are considered to be added, some new fields should be added on things like topology query and instance dependencies to list relative services/instances from other layers directly rather than an extra query.\nAbout the data structure, due to the new data concept is going to be created, service hierarchy relative data models are going to be added. If the user is using Elasticsearch and BanyanDB, this should be compatible, they just need to re-run init-mode OAP to extend the existing models. But for SQL database users(MySQL, PostgreSQL), this could require new tables.\nGraphQL query protocol New query protocol hierarchy.graphqls is going to be added.\ntypeHierarchyRelatedService{# The related service ID.id:ID!# The literal name of the #id.name:String!# The related service\u0026#39;s Layer name.layer:String!normal:Boolean!}typeHierarchyRelatedInstance{# The related instance ID.id:ID!# The literal name of the #id. Instance Name.name:String!# Service idserviceId:ID!# The literal name of the #serviceId.serviceName:String!# The service\u0026#39;s Layer name.# Service could have multiple layers, this is the layer of the service that the instance belongs to.layer:String!normal:Boolean!}typeHierarchyServiceRelation{upperService:HierarchyRelatedService!lowerService:HierarchyRelatedService!}typeHierarchyInstanceRelation{upperInstance:HierarchyRelatedInstance!lowerInstance:HierarchyRelatedInstance!}typeServiceHierarchy{relations:[HierarchyServiceRelation!]!}typeInstanceHierarchy{relations:[HierarchyInstanceRelation!]!}typeLayerLevel{# The layer name.layer:String!# The layer level.# The level of the upper service should greater than the level of the lower service.level:Int!}extendtypeQuery{# Query the service hierarchy, based on the given service. Will recursively return all related layers services in the hierarchy.getServiceHierarchy(serviceId:ID!,layer:String!):ServiceHierarchy!# Query the instance hierarchy, based on the given instance. Will return all direct related layers instances in the hierarchy, no recursive.getInstanceHierarchy(instanceId:ID!,layer:String!):InstanceHierarchy!# List layer hierarchy levels. The layer levels are defined in the `hierarchy-definition.yml`.listLayerLevels:[LayerLevel!]!}New data models   service_hierarchy_relation\n   Column name Data type Description     id String serviceId.servicelayer-relatedServiceId.relatedServiceLayer   service_id String upper service id   service_layer int upper service layer value   related_service_id String lower service id   related_service_layer int lower service layer value   time_bucket long       instance_hierarchy_relation\n   Column name Data type Description     id String instanceId.servicelayer-relateInstanceId.relatedServiceLayer   instance_id String upper instance id   service_layer int upper service layer value   related_instance_id String lower instance id   related_service_layer int lower service layer value   time_bucket long       Internal APIs Internal APIs should be exposed in the Core module to support building the hierarchy relationship.\npublic void toServiceHierarchyRelation(String upperServiceName, Layer upperServiceLayer, String lowerServiceName, Layer lowerServiceLayer); public void toInstanceHierarchyRelation(String upperInstanceName, String upperServiceName, Layer upperServiceLayer, String lowerInstanceName, String lowerServiceName, Layer lowerServiceLayer); Hierarchy Definition All layers hierarchy relations are defined in the hierarchy-definition.yml file. OAP will check the hierarchy relations before building and use the matching rules to auto match the relations. Here is an example:\n# Define the hierarchy of service layers, the layers under the specific layer are related lower of the layer.# The relation could have a matching rule for auto matching, which are defined in the `auto-matching-rules` section.# All the layers are defined in the file `org.apache.skywalking.oap.server.core.analysis.Layers.java`.hierarchy:MESH:MESH_DP:nameK8S_SERVICE:short-nameMESH_DP:K8S_SERVICE:short-nameGENERAL:K8S_SERVICE:lower-short-name-remove-nsMYSQL:K8S_SERVICE:~VIRTUAL_DATABASE:MYSQL:~# Use Groovy script to define the matching rules, the input parameters are the upper service(u) and the lower service(l) and the return value is a boolean.# which are used to match the relation between the upper service(u) and the lower service(l) on the different layers.auto-matching-rules:# the name of the upper service is equal to the name of the lower servicename:\u0026#34;{ (u, l) -\u0026gt; u.name == l.name }\u0026#34;# the short name of the upper service is equal to the short name of the lower serviceshort-name:\u0026#34;{ (u, l) -\u0026gt; u.shortName == l.shortName }\u0026#34;# remove the namespace from the lower service short namelower-short-name-remove-ns:\u0026#34;{ (u, l) -\u0026gt; u.shortName == l.shortName.substring(0, l.shortName.lastIndexOf(\u0026#39;.\u0026#39;)) }\u0026#34;# The hierarchy level of the service layer, the level is used to define the order of the service layer for UI presentation,# The level of the upper service should greater than the level of the lower service in `hierarchy` section.layer-levels:MESH:3GENERAL:3VIRTUAL_DATABASE:3MYSQL:2MESH_DP:1K8S_SERVICE:0General usage docs This proposal doesn\u0026rsquo;t impact the end user in any way of using SkyWalking. The remarkable change will be in the UI. On the service dashboard and topology map, the user should be able to see the hierarchy relationship, which means other services in other layers are logically the same as the current one. UI would provide the link to jump to the relative service\u0026rsquo;s dashboard.\nNo Goal This proposal doesn\u0026rsquo;t cover all the logic about how to detect the service hierarchy structure. All those should be in a separate SWIP.\n","excerpt":"Create and detect Service Hierarchy Relationship Motivation Service relationship is one of the most …","ref":"/docs/main/next/en/swip/swip-1/","title":"Create and detect Service Hierarchy Relationship"},{"body":"Create Span   Use Tracer.createEntrySpan() API to create entry span, and then use SpanRef to contain the reference of created span in agent kernel. The first parameter is operation name of span and the second parameter is the ContextCarrierRef instance which is the reference of contextcarrier in agent kernel. If the second parameter is not null, the process of creating entry span will do the extract operation which will be introduced in inject/extract scenario.\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... SpanRef spanRef = Tracer.createEntrySpan(\u0026#34;${operationName}\u0026#34;, null);   Use Tracer.createLocalSpan() API to create local span, the only parameter is the operation name of span.\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... SpanRef spanRef = Tracer.createLocalSpan(\u0026#34;${operationName}\u0026#34;);   Use Tracer.createExitSpan() API to create exit span\n  two parameters case: the first parameter is the operation name of span, the second parameter is the remote peer which means the peer address of exit operation.\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... SpanRef spanRef = Tracer.createExitSpan(\u0026#34;${operationName}\u0026#34;, \u0026#34;${remotePeer}\u0026#34;);   three parameters case: the first parameter is the operation name of span, the second parameter is the ContextCarrierRef instance and the third parameter is the remote peer. This case will be introduced in inject/extract scenario.\n    Use Tracer.stopSpan() API to stop current span\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... Tracer.stopSpan();   Inject/Extract Context Carrier The Inject/extract is to pass context information between different process. The ContextCarrierRef contains the reference of ContextCarrier and the CarrierItemRef contains the reference of CarrierItem. The CarrierItem instances compose a linked list.\n Use Tracer.inject() to inject information of current context into carrier Use Tracer.extract() to extract info from contextCarrier. Use items() of ContextCarrierRef instance to get head CarrierItemRef instance. Use hasNext() of CarrierItemRef instance to judge if the CarrierItemRef has next item. Use next() of CarrierItemRef instance to get next item Use getHeadKey of CarrierItemRef instance to get key of current item Use getHeadValue of CarrierItemRef instance to get value of current item Use setHeadValue of CarrierItemRef instance to set value of current item  /* You can consider map as the message\u0026#39;s header/metadata, such as Http, MQ and RPC. Do the inject operation in one process and then pass the map in header/metadata. */ ContextCarrierRef contextCarrierRef = new ContextCarrierRef(); Tracer.inject(contextCarrierRef); Map\u0026lt;String, String\u0026gt; map = new HashMap\u0026lt;\u0026gt;(); CarrierItemRef next = contextCarrierRef.items(); while (next.hasNext()) { next = next.next(); map.put(next.getHeadKey(), next.getHeadValue()); } ... note: Inject can be done only in Exit Span\n// Receive the map representing a header/metadata and do the extract operation in another process. ... ContextCarrierRef contextCarrierRef = new ContextCarrierRef(); CarrierItemRef next = contextCarrierRef.items(); while ((next.hasNext())) { next = next.next(); String value = map.get(next.getHeadKey()); if (value != null){ next.setHeadValue(value); } } Tracer.extract(contextCarrierRef); Also, you can do the inject/extract operation when creating exit/entry span.\nContextCarrierRef contextCarrierRef = new ContextCarrierRef(); SpanRef spanRef = Tracer.createExitSpan(\u0026#34;operationName\u0026#34;, contextCarrierRef, \u0026#34;remotePeer\u0026#34;); Map\u0026lt;String, String\u0026gt; map = new HashMap\u0026lt;\u0026gt;(); CarrierItemRef next = contextCarrierRef.items(); while (next.hasNext()) { next = next.next(); map.put(next.getHeadKey(), next.getHeadValue()); } ... ... ContextCarrierRef contextCarrierRef = new ContextCarrierRef(); CarrierItemRef next = contextCarrierRef.items(); while ((next.hasNext())) { next = next.next(); String value = map.get(next.getHeadKey()); if (value != null){ next.setHeadValue(value); } } SpanRef spanRef = Tracer.createEntrySpan(\u0026#34;${operationName}\u0026#34;, contextCarrierRef); Capture/Continue Context Snapshot   Use Tracer.capture() to capture the segment info and store it in ContextSnapshotRef, and then use Tracer.continued() to load the snapshot as the ref segment info. The capture/continue is used for tracing context in the x-thread tracing.\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... ContextSnapshotRef contextSnapshotRef = Tracer.capture(); Thread thread = new Thread(() -\u0026gt; { SpanRef spanRef = Tracer.createLocalSpan(\u0026#34;${operationName}\u0026#34;); Tracer.continued(contextSnapshotRef); ... }); thread.start(); thread.join();   Add Span\u0026rsquo;s Tag and Log   Use log of SpanRef instance to record log in span\nimport org.apache.skywalking.apm.toolkit.trace.SpanRef; ... SpanRef spanRef = Tracer.createLocalSpan(\u0026#34;${operationName}\u0026#34;); // Throwable parameter spanRef.log(new RuntimeException(\u0026#34;${exception_message}\u0026#34;)); // Map parameter Map\u0026lt;String, String\u0026gt; logMap = new HashMap\u0026lt;\u0026gt;(); logMap.put(\u0026#34;event\u0026#34;, \u0026#34;${event_type}\u0026#34;); logMap.put(\u0026#34;message\u0026#34;, \u0026#34;${message_value}\u0026#34;); spanRef.log(logMap);   Use tag of SpanRef instance to add tag to span, the parameters of tag are two String which are key and value respectively.\nimport org.apache.skywalking.apm.toolkit.trace.SpanRef; ... SpanRef spanRef = Tracer.createLocalSpan(operationName); spanRef.tag(\u0026#34;${key}\u0026#34;, \u0026#34;${value}\u0026#34;);   Async Prepare/Finish   Use prepareForAsync of SpanRef instance to make the span still alive until asyncFinish called, and then in specific time use asyncFinish of this SpanRef instance to notify this span that it could be finished.\nimport org.apache.skywalking.apm.toolkit.trace.SpanRef; ... SpanRef spanRef = Tracer.createLocalSpan(\u0026#34;${operationName}\u0026#34;); spanRef.prepareForAsync(); // the span does not finish because of the prepareForAsync() operation Tracer.stopSpan(); Thread thread = new Thread(() -\u0026gt; { ... spanRef.asyncFinish(); }); thread.start(); thread.join();   ActiveSpan You can use the ActiveSpan to get the current span and do some operations.\n  Add custom tag in the context of traced method, ActiveSpan.tag(\u0026quot;key\u0026quot;, \u0026quot;val\u0026quot;).\n  ActiveSpan.error() Mark the current span as error status.\n  ActiveSpan.error(String errorMsg) Mark the current span as error status with a message.\n  ActiveSpan.error(Throwable throwable) Mark the current span as error status with a Throwable.\n  ActiveSpan.debug(String debugMsg) Add a debug level log message in the current span.\n  ActiveSpan.info(String infoMsg) Add an info level log message in the current span.\n  ActiveSpan.setOperationName(String operationName) Customize an operation name.\n  ActiveSpan.tag(\u0026#34;my_tag\u0026#34;, \u0026#34;my_value\u0026#34;); ActiveSpan.error(); ActiveSpan.error(\u0026#34;Test-Error-Reason\u0026#34;); ActiveSpan.error(new RuntimeException(\u0026#34;Test-Error-Throwable\u0026#34;)); ActiveSpan.info(\u0026#34;Test-Info-Msg\u0026#34;); ActiveSpan.debug(\u0026#34;Test-debug-Msg\u0026#34;); ActiveSpan.setOperationName(\u0026#34;${opetationName}\u0026#34;); Sample codes only\n","excerpt":"Create Span   Use Tracer.createEntrySpan() API to create entry span, and then use SpanRef to contain …","ref":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/application-toolkit-tracer/","title":"Create Span"},{"body":"Create Span   Use Tracer.createEntrySpan() API to create entry span, and then use SpanRef to contain the reference of created span in agent kernel. The first parameter is operation name of span and the second parameter is the ContextCarrierRef instance which is the reference of contextcarrier in agent kernel. If the second parameter is not null, the process of creating entry span will do the extract operation which will be introduced in inject/extract scenario.\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... SpanRef spanRef = Tracer.createEntrySpan(\u0026#34;${operationName}\u0026#34;, null);   Use Tracer.createLocalSpan() API to create local span, the only parameter is the operation name of span.\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... SpanRef spanRef = Tracer.createLocalSpan(\u0026#34;${operationName}\u0026#34;);   Use Tracer.createExitSpan() API to create exit span\n  two parameters case: the first parameter is the operation name of span, the second parameter is the remote peer which means the peer address of exit operation.\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... SpanRef spanRef = Tracer.createExitSpan(\u0026#34;${operationName}\u0026#34;, \u0026#34;${remotePeer}\u0026#34;);   three parameters case: the first parameter is the operation name of span, the second parameter is the ContextCarrierRef instance and the third parameter is the remote peer. This case will be introduced in inject/extract scenario.\n    Use Tracer.stopSpan() API to stop current span\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... Tracer.stopSpan();   Inject/Extract Context Carrier The Inject/extract is to pass context information between different process. The ContextCarrierRef contains the reference of ContextCarrier and the CarrierItemRef contains the reference of CarrierItem. The CarrierItem instances compose a linked list.\n Use Tracer.inject() to inject information of current context into carrier Use Tracer.extract() to extract info from contextCarrier. Use items() of ContextCarrierRef instance to get head CarrierItemRef instance. Use hasNext() of CarrierItemRef instance to judge if the CarrierItemRef has next item. Use next() of CarrierItemRef instance to get next item Use getHeadKey of CarrierItemRef instance to get key of current item Use getHeadValue of CarrierItemRef instance to get value of current item Use setHeadValue of CarrierItemRef instance to set value of current item  /* You can consider map as the message\u0026#39;s header/metadata, such as Http, MQ and RPC. Do the inject operation in one process and then pass the map in header/metadata. */ ContextCarrierRef contextCarrierRef = new ContextCarrierRef(); Tracer.inject(contextCarrierRef); Map\u0026lt;String, String\u0026gt; map = new HashMap\u0026lt;\u0026gt;(); CarrierItemRef next = contextCarrierRef.items(); while (next.hasNext()) { next = next.next(); map.put(next.getHeadKey(), next.getHeadValue()); } ... note: Inject can be done only in Exit Span\n// Receive the map representing a header/metadata and do the extract operation in another process. ... ContextCarrierRef contextCarrierRef = new ContextCarrierRef(); CarrierItemRef next = contextCarrierRef.items(); while ((next.hasNext())) { next = next.next(); String value = map.get(next.getHeadKey()); if (value != null){ next.setHeadValue(value); } } Tracer.extract(contextCarrierRef); Also, you can do the inject/extract operation when creating exit/entry span.\nContextCarrierRef contextCarrierRef = new ContextCarrierRef(); SpanRef spanRef = Tracer.createExitSpan(\u0026#34;operationName\u0026#34;, contextCarrierRef, \u0026#34;remotePeer\u0026#34;); Map\u0026lt;String, String\u0026gt; map = new HashMap\u0026lt;\u0026gt;(); CarrierItemRef next = contextCarrierRef.items(); while (next.hasNext()) { next = next.next(); map.put(next.getHeadKey(), next.getHeadValue()); } ... ... ContextCarrierRef contextCarrierRef = new ContextCarrierRef(); CarrierItemRef next = contextCarrierRef.items(); while ((next.hasNext())) { next = next.next(); String value = map.get(next.getHeadKey()); if (value != null){ next.setHeadValue(value); } } SpanRef spanRef = Tracer.createEntrySpan(\u0026#34;${operationName}\u0026#34;, contextCarrierRef); Capture/Continue Context Snapshot   Use Tracer.capture() to capture the segment info and store it in ContextSnapshotRef, and then use Tracer.continued() to load the snapshot as the ref segment info. The capture/continue is used for tracing context in the x-thread tracing.\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... ContextSnapshotRef contextSnapshotRef = Tracer.capture(); Thread thread = new Thread(() -\u0026gt; { SpanRef spanRef = Tracer.createLocalSpan(\u0026#34;${operationName}\u0026#34;); Tracer.continued(contextSnapshotRef); ... }); thread.start(); thread.join();   Add Span\u0026rsquo;s Tag and Log   Use log of SpanRef instance to record log in span\nimport org.apache.skywalking.apm.toolkit.trace.SpanRef; ... SpanRef spanRef = Tracer.createLocalSpan(\u0026#34;${operationName}\u0026#34;); // Throwable parameter spanRef.log(new RuntimeException(\u0026#34;${exception_message}\u0026#34;)); // Map parameter Map\u0026lt;String, String\u0026gt; logMap = new HashMap\u0026lt;\u0026gt;(); logMap.put(\u0026#34;event\u0026#34;, \u0026#34;${event_type}\u0026#34;); logMap.put(\u0026#34;message\u0026#34;, \u0026#34;${message_value}\u0026#34;); spanRef.log(logMap);   Use tag of SpanRef instance to add tag to span, the parameters of tag are two String which are key and value respectively.\nimport org.apache.skywalking.apm.toolkit.trace.SpanRef; ... SpanRef spanRef = Tracer.createLocalSpan(operationName); spanRef.tag(\u0026#34;${key}\u0026#34;, \u0026#34;${value}\u0026#34;);   Async Prepare/Finish   Use prepareForAsync of SpanRef instance to make the span still alive until asyncFinish called, and then in specific time use asyncFinish of this SpanRef instance to notify this span that it could be finished.\nimport org.apache.skywalking.apm.toolkit.trace.SpanRef; ... SpanRef spanRef = Tracer.createLocalSpan(\u0026#34;${operationName}\u0026#34;); spanRef.prepareForAsync(); // the span does not finish because of the prepareForAsync() operation Tracer.stopSpan(); Thread thread = new Thread(() -\u0026gt; { ... spanRef.asyncFinish(); }); thread.start(); thread.join();   ActiveSpan You can use the ActiveSpan to get the current span and do some operations.\n  Add custom tag in the context of traced method, ActiveSpan.tag(\u0026quot;key\u0026quot;, \u0026quot;val\u0026quot;).\n  ActiveSpan.error() Mark the current span as error status.\n  ActiveSpan.error(String errorMsg) Mark the current span as error status with a message.\n  ActiveSpan.error(Throwable throwable) Mark the current span as error status with a Throwable.\n  ActiveSpan.debug(String debugMsg) Add a debug level log message in the current span.\n  ActiveSpan.info(String infoMsg) Add an info level log message in the current span.\n  ActiveSpan.setOperationName(String operationName) Customize an operation name.\n  ActiveSpan.tag(\u0026#34;my_tag\u0026#34;, \u0026#34;my_value\u0026#34;); ActiveSpan.error(); ActiveSpan.error(\u0026#34;Test-Error-Reason\u0026#34;); ActiveSpan.error(new RuntimeException(\u0026#34;Test-Error-Throwable\u0026#34;)); ActiveSpan.info(\u0026#34;Test-Info-Msg\u0026#34;); ActiveSpan.debug(\u0026#34;Test-debug-Msg\u0026#34;); ActiveSpan.setOperationName(\u0026#34;${opetationName}\u0026#34;); Sample codes only\n","excerpt":"Create Span   Use Tracer.createEntrySpan() API to create entry span, and then use SpanRef to contain …","ref":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-tracer/","title":"Create Span"},{"body":"Create Span   Use Tracer.createEntrySpan() API to create entry span, and then use SpanRef to contain the reference of created span in agent kernel. The first parameter is operation name of span and the second parameter is the ContextCarrierRef instance which is the reference of contextcarrier in agent kernel. If the second parameter is not null, the process of creating entry span will do the extract operation which will be introduced in inject/extract scenario.\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... SpanRef spanRef = Tracer.createEnteySpan(\u0026#34;${operationName}\u0026#34;, null);   Use Tracer.createLocalSpan() API to create local span, the only parameter is the operation name of span.\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... SpanRef spanRef = Tracer.createLocalSpan(\u0026#34;${operationName}\u0026#34;);   Use Tracer.createExitSpan() API to create exit span\n  two parameters case: the first parameter is the operation name of span, the second parameter is the remote peer which means the peer address of exit operation.\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... SpanRef spanRef = Tracer.createExitSpan(\u0026#34;${operationName}\u0026#34;, \u0026#34;${remotePeer}\u0026#34;);   three parameters case: the first parameter is the operation name of span, the second parameter is the ContextCarrierRef instance and the third parameter is the remote peer. This case will be introduced in inject/extract scenario.\n    Use Tracer.stopSpan() API to stop current span\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... Tracer.stopSpan();   Inject/Extract Context Carrier The Inject/extract is to pass context information between different process. The ContextCarrierRef contains the reference of ContextCarrier and the CarrierItemRef contains the reference of CarrierItem. The CarrierItem instances compose a linked list.\n Use Tracer.inject() to inject information of current context into carrier Use Tracer.extract() to extract info from contextCarrier. Use items() of ContextCarrierRef instance to get head CarrierItemRef instance. Use hasNext() of CarrierItemRef instance to judge if the CarrierItemRef has next item. Use next() of CarrierItemRef instance to get next item Use getHeadKey of CarrierItemRef instance to get key of current item Use getHeadValue of CarrierItemRef instance to get value of current item Use setHeadValue of CarrierItemRef instance to set value of current item  /* You can consider map as the message\u0026#39;s header/metadata, such as Http, MQ and RPC. Do the inject operation in one process and then pass the map in header/metadata. */ ContextCarrierRef contextCarrierRef = new ContextCarrierRef(); Tracer.inject(contextCarrierRef); Map\u0026lt;String, String\u0026gt; map = new HashMap\u0026lt;\u0026gt;(); CarrierItemRef next = contextCarrierRef.items(); while (next.hasNext()) { next = next.next(); map.put(next.getHeadKey(), next.getHeadValue()); } ... // Receive the map representing a header/metadata and do the extract operation in another process. ... ContextCarrierRef contextCarrierRef = new ContextCarrierRef(); CarrierItemRef next = contextCarrierRef.items(); for (Map.Entry\u0026lt;String, String\u0026gt; entry : map.entrySet()) { if (next.hasNext()) { next = next.next(); if (entry.getKey().equals(next.getHeadKey())) next.setHeadValue(entry.getValue()); } } Tracer.extract(contextCarrierRef); Also, you can do the inject/extract operation when creating exit/entry span.\nContextCarrierRef contextCarrierRef = new ContextCarrierRef(); SpanRef spanRef = Tracer.createExitSpan(\u0026#34;operationName\u0026#34;, contextCarrierRef, \u0026#34;remotePeer\u0026#34;); Map\u0026lt;String, String\u0026gt; map = new HashMap\u0026lt;\u0026gt;(); CarrierItemRef next = contextCarrierRef.items(); while (next.hasNext()) { next = next.next(); map.put(next.getHeadKey(), next.getHeadValue()); } ... ... ContextCarrierRef contextCarrierRef = new ContextCarrierRef(); CarrierItemRef next = contextCarrierRef.items(); for (Map.Entry\u0026lt;String, String\u0026gt; entry : map.entrySet()) { if (next.hasNext()) { next = next.next(); if (entry.getKey().equals(next.getHeadKey())) next.setHeadValue(entry.getValue()); } } SpanRef spanRef = Tracer.createEntrySpan(\u0026#34;${operationName}\u0026#34;, contextCarrierRef); Capture/Continue Context Snapshot   Use Tracer.capture() to capture the segment info and store it in ContextSnapshotRef, and then use Tracer.continued() to load the snapshot as the ref segment info. The capture/continue is used for tracing context in the x-thread tracing.\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... ContextSnapshotRef contextSnapshotRef = Tracer.capture(); Thread thread = new Thread(() -\u0026gt; { SpanRef spanRef = Tracer.createLocalSpan(\u0026#34;${operationName}\u0026#34;); Tracer.continued(contextSnapshotRef); ... }); thread.start(); thread.join();   Add Span\u0026rsquo;s Tag and Log   Use log of SpanRef instance to record log in span\nimport org.apache.skywalking.apm.toolkit.trace.SpanRef; ... SpanRef spanRef = Tracer.createLocalSpan(\u0026#34;${operationName}\u0026#34;); // Throwable parameter spanRef.log(new RuntimeException(\u0026#34;${exception_message}\u0026#34;)); // Map parameter Map\u0026lt;String, String\u0026gt; logMap = new HashMap\u0026lt;\u0026gt;(); logMap.put(\u0026#34;event\u0026#34;, \u0026#34;${event_type}\u0026#34;); logMap.put(\u0026#34;message\u0026#34;, \u0026#34;${message_value}\u0026#34;); spanRef.log(logMap);   Use tag of SpanRef instance to add tag to span, the parameters of tag are two String which are key and value respectively.\nimport org.apache.skywalking.apm.toolkit.trace.SpanRef; ... SpanRef spanRef = Tracer.createLocalSpan(operationName); spanRef.tag(\u0026#34;${key}\u0026#34;, \u0026#34;${value}\u0026#34;);   Async Prepare/Finish   Use prepareForAsync of SpanRef instance to make the span still alive until asyncFinish called, and then in specific time use asyncFinish of this SpanRef instance to notify this span that it could be finished.\nimport org.apache.skywalking.apm.toolkit.trace.SpanRef; ... SpanRef spanRef = Tracer.createLocalSpan(\u0026#34;${operationName}\u0026#34;); spanRef.prepareForAsync(); // the span does not finish because of the prepareForAsync() operation Tracer.stopSpan(); Thread thread = new Thread(() -\u0026gt; { ... spanRef.asyncFinish(); }); thread.start(); thread.join();   ActiveSpan You can use the ActiveSpan to get the current span and do some operations.\n  Add custom tag in the context of traced method, ActiveSpan.tag(\u0026quot;key\u0026quot;, \u0026quot;val\u0026quot;).\n  ActiveSpan.error() Mark the current span as error status.\n  ActiveSpan.error(String errorMsg) Mark the current span as error status with a message.\n  ActiveSpan.error(Throwable throwable) Mark the current span as error status with a Throwable.\n  ActiveSpan.debug(String debugMsg) Add a debug level log message in the current span.\n  ActiveSpan.info(String infoMsg) Add an info level log message in the current span.\n  ActiveSpan.setOperationName(String operationName) Customize an operation name.\n  ActiveSpan.tag(\u0026#34;my_tag\u0026#34;, \u0026#34;my_value\u0026#34;); ActiveSpan.error(); ActiveSpan.error(\u0026#34;Test-Error-Reason\u0026#34;); ActiveSpan.error(new RuntimeException(\u0026#34;Test-Error-Throwable\u0026#34;)); ActiveSpan.info(\u0026#34;Test-Info-Msg\u0026#34;); ActiveSpan.debug(\u0026#34;Test-debug-Msg\u0026#34;); ActiveSpan.setOperationName(\u0026#34;${opetationName}\u0026#34;); Sample codes only\n","excerpt":"Create Span   Use Tracer.createEntrySpan() API to create entry span, and then use SpanRef to contain …","ref":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/application-toolkit-tracer/","title":"Create Span"},{"body":"Create Span   Use Tracer.createEntrySpan() API to create entry span, and then use SpanRef to contain the reference of created span in agent kernel. The first parameter is operation name of span and the second parameter is the ContextCarrierRef instance which is the reference of contextcarrier in agent kernel. If the second parameter is not null, the process of creating entry span will do the extract operation which will be introduced in inject/extract scenario.\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... SpanRef spanRef = Tracer.createEntrySpan(\u0026#34;${operationName}\u0026#34;, null);   Use Tracer.createLocalSpan() API to create local span, the only parameter is the operation name of span.\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... SpanRef spanRef = Tracer.createLocalSpan(\u0026#34;${operationName}\u0026#34;);   Use Tracer.createExitSpan() API to create exit span\n  two parameters case: the first parameter is the operation name of span, the second parameter is the remote peer which means the peer address of exit operation.\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... SpanRef spanRef = Tracer.createExitSpan(\u0026#34;${operationName}\u0026#34;, \u0026#34;${remotePeer}\u0026#34;);   three parameters case: the first parameter is the operation name of span, the second parameter is the ContextCarrierRef instance and the third parameter is the remote peer. This case will be introduced in inject/extract scenario.\n    Use Tracer.stopSpan() API to stop current span\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... Tracer.stopSpan();   Inject/Extract Context Carrier The Inject/extract is to pass context information between different process. The ContextCarrierRef contains the reference of ContextCarrier and the CarrierItemRef contains the reference of CarrierItem. The CarrierItem instances compose a linked list.\n Use Tracer.inject() to inject information of current context into carrier Use Tracer.extract() to extract info from contextCarrier. Use items() of ContextCarrierRef instance to get head CarrierItemRef instance. Use hasNext() of CarrierItemRef instance to judge if the CarrierItemRef has next item. Use next() of CarrierItemRef instance to get next item Use getHeadKey of CarrierItemRef instance to get key of current item Use getHeadValue of CarrierItemRef instance to get value of current item Use setHeadValue of CarrierItemRef instance to set value of current item  /* You can consider map as the message\u0026#39;s header/metadata, such as Http, MQ and RPC. Do the inject operation in one process and then pass the map in header/metadata. */ ContextCarrierRef contextCarrierRef = new ContextCarrierRef(); Tracer.inject(contextCarrierRef); Map\u0026lt;String, String\u0026gt; map = new HashMap\u0026lt;\u0026gt;(); CarrierItemRef next = contextCarrierRef.items(); while (next.hasNext()) { next = next.next(); map.put(next.getHeadKey(), next.getHeadValue()); } ... note: Inject can be done only in Exit Span\n// Receive the map representing a header/metadata and do the extract operation in another process. ... ContextCarrierRef contextCarrierRef = new ContextCarrierRef(); CarrierItemRef next = contextCarrierRef.items(); while ((next.hasNext())) { next = next.next(); String value = map.get(next.getHeadKey()); if (value != null){ next.setHeadValue(value); } } Tracer.extract(contextCarrierRef); Also, you can do the inject/extract operation when creating exit/entry span.\nContextCarrierRef contextCarrierRef = new ContextCarrierRef(); SpanRef spanRef = Tracer.createExitSpan(\u0026#34;operationName\u0026#34;, contextCarrierRef, \u0026#34;remotePeer\u0026#34;); Map\u0026lt;String, String\u0026gt; map = new HashMap\u0026lt;\u0026gt;(); CarrierItemRef next = contextCarrierRef.items(); while (next.hasNext()) { next = next.next(); map.put(next.getHeadKey(), next.getHeadValue()); } ... ... ContextCarrierRef contextCarrierRef = new ContextCarrierRef(); CarrierItemRef next = contextCarrierRef.items(); while ((next.hasNext())) { next = next.next(); String value = map.get(next.getHeadKey()); if (value != null){ next.setHeadValue(value); } } SpanRef spanRef = Tracer.createEntrySpan(\u0026#34;${operationName}\u0026#34;, contextCarrierRef); Capture/Continue Context Snapshot   Use Tracer.capture() to capture the segment info and store it in ContextSnapshotRef, and then use Tracer.continued() to load the snapshot as the ref segment info. The capture/continue is used for tracing context in the x-thread tracing.\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... ContextSnapshotRef contextSnapshotRef = Tracer.capture(); Thread thread = new Thread(() -\u0026gt; { SpanRef spanRef = Tracer.createLocalSpan(\u0026#34;${operationName}\u0026#34;); Tracer.continued(contextSnapshotRef); ... }); thread.start(); thread.join();   Add Span\u0026rsquo;s Tag and Log   Use log of SpanRef instance to record log in span\nimport org.apache.skywalking.apm.toolkit.trace.SpanRef; ... SpanRef spanRef = Tracer.createLocalSpan(\u0026#34;${operationName}\u0026#34;); // Throwable parameter spanRef.log(new RuntimeException(\u0026#34;${exception_message}\u0026#34;)); // Map parameter Map\u0026lt;String, String\u0026gt; logMap = new HashMap\u0026lt;\u0026gt;(); logMap.put(\u0026#34;event\u0026#34;, \u0026#34;${event_type}\u0026#34;); logMap.put(\u0026#34;message\u0026#34;, \u0026#34;${message_value}\u0026#34;); spanRef.log(logMap);   Use tag of SpanRef instance to add tag to span, the parameters of tag are two String which are key and value respectively.\nimport org.apache.skywalking.apm.toolkit.trace.SpanRef; ... SpanRef spanRef = Tracer.createLocalSpan(operationName); spanRef.tag(\u0026#34;${key}\u0026#34;, \u0026#34;${value}\u0026#34;);   Async Prepare/Finish   Use prepareForAsync of SpanRef instance to make the span still alive until asyncFinish called, and then in specific time use asyncFinish of this SpanRef instance to notify this span that it could be finished.\nimport org.apache.skywalking.apm.toolkit.trace.SpanRef; ... SpanRef spanRef = Tracer.createLocalSpan(\u0026#34;${operationName}\u0026#34;); spanRef.prepareForAsync(); // the span does not finish because of the prepareForAsync() operation Tracer.stopSpan(); Thread thread = new Thread(() -\u0026gt; { ... spanRef.asyncFinish(); }); thread.start(); thread.join();   ActiveSpan You can use the ActiveSpan to get the current span and do some operations.\n  Add custom tag in the context of traced method, ActiveSpan.tag(\u0026quot;key\u0026quot;, \u0026quot;val\u0026quot;).\n  ActiveSpan.error() Mark the current span as error status.\n  ActiveSpan.error(String errorMsg) Mark the current span as error status with a message.\n  ActiveSpan.error(Throwable throwable) Mark the current span as error status with a Throwable.\n  ActiveSpan.debug(String debugMsg) Add a debug level log message in the current span.\n  ActiveSpan.info(String infoMsg) Add an info level log message in the current span.\n  ActiveSpan.setOperationName(String operationName) Customize an operation name.\n  ActiveSpan.tag(\u0026#34;my_tag\u0026#34;, \u0026#34;my_value\u0026#34;); ActiveSpan.error(); ActiveSpan.error(\u0026#34;Test-Error-Reason\u0026#34;); ActiveSpan.error(new RuntimeException(\u0026#34;Test-Error-Throwable\u0026#34;)); ActiveSpan.info(\u0026#34;Test-Info-Msg\u0026#34;); ActiveSpan.debug(\u0026#34;Test-debug-Msg\u0026#34;); ActiveSpan.setOperationName(\u0026#34;${opetationName}\u0026#34;); Sample codes only\n","excerpt":"Create Span   Use Tracer.createEntrySpan() API to create entry span, and then use SpanRef to contain …","ref":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/application-toolkit-tracer/","title":"Create Span"},{"body":"Create Span   Use Tracer.createEntrySpan() API to create entry span, and then use SpanRef to contain the reference of created span in agent kernel. The first parameter is operation name of span and the second parameter is the ContextCarrierRef instance which is the reference of contextcarrier in agent kernel. If the second parameter is not null, the process of creating entry span will do the extract operation which will be introduced in inject/extract scenario.\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... SpanRef spanRef = Tracer.createEntrySpan(\u0026#34;${operationName}\u0026#34;, null);   Use Tracer.createLocalSpan() API to create local span, the only parameter is the operation name of span.\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... SpanRef spanRef = Tracer.createLocalSpan(\u0026#34;${operationName}\u0026#34;);   Use Tracer.createExitSpan() API to create exit span\n  two parameters case: the first parameter is the operation name of span, the second parameter is the remote peer which means the peer address of exit operation.\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... SpanRef spanRef = Tracer.createExitSpan(\u0026#34;${operationName}\u0026#34;, \u0026#34;${remotePeer}\u0026#34;);   three parameters case: the first parameter is the operation name of span, the second parameter is the ContextCarrierRef instance and the third parameter is the remote peer. This case will be introduced in inject/extract scenario.\n    Use Tracer.stopSpan() API to stop current span\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... Tracer.stopSpan();   Inject/Extract Context Carrier The Inject/extract is to pass context information between different process. The ContextCarrierRef contains the reference of ContextCarrier and the CarrierItemRef contains the reference of CarrierItem. The CarrierItem instances compose a linked list.\n Use Tracer.inject() to inject information of current context into carrier Use Tracer.extract() to extract info from contextCarrier. Use items() of ContextCarrierRef instance to get head CarrierItemRef instance. Use hasNext() of CarrierItemRef instance to judge if the CarrierItemRef has next item. Use next() of CarrierItemRef instance to get next item Use getHeadKey of CarrierItemRef instance to get key of current item Use getHeadValue of CarrierItemRef instance to get value of current item Use setHeadValue of CarrierItemRef instance to set value of current item  /* You can consider map as the message\u0026#39;s header/metadata, such as Http, MQ and RPC. Do the inject operation in one process and then pass the map in header/metadata. */ ContextCarrierRef contextCarrierRef = new ContextCarrierRef(); Tracer.inject(contextCarrierRef); Map\u0026lt;String, String\u0026gt; map = new HashMap\u0026lt;\u0026gt;(); CarrierItemRef next = contextCarrierRef.items(); while (next.hasNext()) { next = next.next(); map.put(next.getHeadKey(), next.getHeadValue()); } ... note: Inject can be done only in Exit Span\n// Receive the map representing a header/metadata and do the extract operation in another process. ... ContextCarrierRef contextCarrierRef = new ContextCarrierRef(); CarrierItemRef next = contextCarrierRef.items(); while ((next.hasNext())) { next = next.next(); String value = map.get(next.getHeadKey()); if (value != null){ next.setHeadValue(value); } } Tracer.extract(contextCarrierRef); Also, you can do the inject/extract operation when creating exit/entry span.\nContextCarrierRef contextCarrierRef = new ContextCarrierRef(); SpanRef spanRef = Tracer.createExitSpan(\u0026#34;operationName\u0026#34;, contextCarrierRef, \u0026#34;remotePeer\u0026#34;); Map\u0026lt;String, String\u0026gt; map = new HashMap\u0026lt;\u0026gt;(); CarrierItemRef next = contextCarrierRef.items(); while (next.hasNext()) { next = next.next(); map.put(next.getHeadKey(), next.getHeadValue()); } ... ... ContextCarrierRef contextCarrierRef = new ContextCarrierRef(); CarrierItemRef next = contextCarrierRef.items(); while ((next.hasNext())) { next = next.next(); String value = map.get(next.getHeadKey()); if (value != null){ next.setHeadValue(value); } } SpanRef spanRef = Tracer.createEntrySpan(\u0026#34;${operationName}\u0026#34;, contextCarrierRef); Capture/Continue Context Snapshot   Use Tracer.capture() to capture the segment info and store it in ContextSnapshotRef, and then use Tracer.continued() to load the snapshot as the ref segment info. The capture/continue is used for tracing context in the x-thread tracing.\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... ContextSnapshotRef contextSnapshotRef = Tracer.capture(); Thread thread = new Thread(() -\u0026gt; { SpanRef spanRef = Tracer.createLocalSpan(\u0026#34;${operationName}\u0026#34;); Tracer.continued(contextSnapshotRef); ... }); thread.start(); thread.join();   Add Span\u0026rsquo;s Tag and Log   Use log of SpanRef instance to record log in span\nimport org.apache.skywalking.apm.toolkit.trace.SpanRef; ... SpanRef spanRef = Tracer.createLocalSpan(\u0026#34;${operationName}\u0026#34;); // Throwable parameter spanRef.log(new RuntimeException(\u0026#34;${exception_message}\u0026#34;)); // Map parameter Map\u0026lt;String, String\u0026gt; logMap = new HashMap\u0026lt;\u0026gt;(); logMap.put(\u0026#34;event\u0026#34;, \u0026#34;${event_type}\u0026#34;); logMap.put(\u0026#34;message\u0026#34;, \u0026#34;${message_value}\u0026#34;); spanRef.log(logMap);   Use tag of SpanRef instance to add tag to span, the parameters of tag are two String which are key and value respectively.\nimport org.apache.skywalking.apm.toolkit.trace.SpanRef; ... SpanRef spanRef = Tracer.createLocalSpan(operationName); spanRef.tag(\u0026#34;${key}\u0026#34;, \u0026#34;${value}\u0026#34;);   Async Prepare/Finish   Use prepareForAsync of SpanRef instance to make the span still alive until asyncFinish called, and then in specific time use asyncFinish of this SpanRef instance to notify this span that it could be finished.\nimport org.apache.skywalking.apm.toolkit.trace.SpanRef; ... SpanRef spanRef = Tracer.createLocalSpan(\u0026#34;${operationName}\u0026#34;); spanRef.prepareForAsync(); // the span does not finish because of the prepareForAsync() operation Tracer.stopSpan(); Thread thread = new Thread(() -\u0026gt; { ... spanRef.asyncFinish(); }); thread.start(); thread.join();   ActiveSpan You can use the ActiveSpan to get the current span and do some operations.\n  Add custom tag in the context of traced method, ActiveSpan.tag(\u0026quot;key\u0026quot;, \u0026quot;val\u0026quot;).\n  ActiveSpan.error() Mark the current span as error status.\n  ActiveSpan.error(String errorMsg) Mark the current span as error status with a message.\n  ActiveSpan.error(Throwable throwable) Mark the current span as error status with a Throwable.\n  ActiveSpan.debug(String debugMsg) Add a debug level log message in the current span.\n  ActiveSpan.info(String infoMsg) Add an info level log message in the current span.\n  ActiveSpan.setOperationName(String operationName) Customize an operation name.\n  ActiveSpan.tag(\u0026#34;my_tag\u0026#34;, \u0026#34;my_value\u0026#34;); ActiveSpan.error(); ActiveSpan.error(\u0026#34;Test-Error-Reason\u0026#34;); ActiveSpan.error(new RuntimeException(\u0026#34;Test-Error-Throwable\u0026#34;)); ActiveSpan.info(\u0026#34;Test-Info-Msg\u0026#34;); ActiveSpan.debug(\u0026#34;Test-debug-Msg\u0026#34;); ActiveSpan.setOperationName(\u0026#34;${opetationName}\u0026#34;); Sample codes only\n","excerpt":"Create Span   Use Tracer.createEntrySpan() API to create entry span, and then use SpanRef to contain …","ref":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/application-toolkit-tracer/","title":"Create Span"},{"body":"CRUD Groups CRUD operations create, read, update and delete groups.\nThe group represents a collection of a class of resources. Each resource has a name unique to a group.\nbydbctl is the command line tool in examples.\nCreate operation Create operation adds a new group to the database\u0026rsquo;s metadata registry repository. If the group does not currently exist, create operation will create the schema.\nExamples of creating $ bydbctl group create -f - \u0026lt;\u0026lt;EOF metadata: name: sw_metric catalog: CATALOG_MEASURE resource_opts: shard_num: 2 block_interval: unit: UNIT_HOUR num: 2 segment_interval: unit: UNIT_DAY num: 1 ttl: unit: UNIT_DAY num: 7 EOF The group creates two shards to store group data points. Every day, it would create a segment that will generate a block every 2 hours.\nThe data in this group will keep 7 days.\nGet operation Get operation gets a group\u0026rsquo;s schema.\nExamples of getting $ bydbctl group get -g sw_metric Update operation Update operation updates a group\u0026rsquo;s schema.\nExamples of updating If we want to change the ttl of the data in this group to be 1 day, use the command:\n$ bydbctl group update -f - \u0026lt;\u0026lt;EOF metadata: name: sw_metric catalog: CATALOG_MEASURE resource_opts: shard_num: 2 block_interval: unit: UNIT_HOUR num: 2 segment_interval: unit: UNIT_DAY num: 1 ttl: unit: UNIT_DAY num: 1 EOF Delete operation Delete operation deletes a group\u0026rsquo;s schema.\nExamples of deleting $ bydbctl group delete -g sw_metric List operation The list operation shows all groups' schema.\nExamples $ bydbctl group list API Reference GroupService v1\n","excerpt":"CRUD Groups CRUD operations create, read, update and delete groups.\nThe group represents a …","ref":"/docs/skywalking-banyandb/latest/crud/group/","title":"CRUD Groups"},{"body":"CRUD Groups CRUD operations create, read, update and delete groups.\nThe group represents a collection of a class of resources. Each resource has a name unique to a group.\nbydbctl is the command line tool in examples.\nCreate operation Create operation adds a new group to the database\u0026rsquo;s metadata registry repository. If the group does not currently exist, create operation will create the schema.\nExamples of creating $ bydbctl group create -f - \u0026lt;\u0026lt;EOF metadata: name: sw_metric catalog: CATALOG_MEASURE resource_opts: shard_num: 2 segment_interval: unit: UNIT_DAY num: 1 ttl: unit: UNIT_DAY num: 7 EOF The group creates two shards to store group data points. Every day, it would create a segment that will generate a block every 2 hours.\nThe data in this group will keep 7 days.\nGet operation Get operation gets a group\u0026rsquo;s schema.\nExamples of getting $ bydbctl group get -g sw_metric Update operation Update operation updates a group\u0026rsquo;s schema.\nExamples of updating If we want to change the ttl of the data in this group to be 1 day, use the command:\n$ bydbctl group update -f - \u0026lt;\u0026lt;EOF metadata: name: sw_metric catalog: CATALOG_MEASURE resource_opts: shard_num: 2 segment_interval: unit: UNIT_DAY num: 1 ttl: unit: UNIT_DAY num: 1 EOF Delete operation Delete operation deletes a group\u0026rsquo;s schema.\nExamples of deleting $ bydbctl group delete -g sw_metric List operation The list operation shows all groups' schema.\nExamples $ bydbctl group list API Reference GroupService v1\n","excerpt":"CRUD Groups CRUD operations create, read, update and delete groups.\nThe group represents a …","ref":"/docs/skywalking-banyandb/next/crud/group/","title":"CRUD Groups"},{"body":"CRUD Groups CRUD operations create, read, update and delete groups.\nThe group represents a collection of a class of resources. Each resource has a name unique to a group.\nbydbctl is the command line tool in examples.\nCreate operation Create operation adds a new group to the database\u0026rsquo;s metadata registry repository. If the group does not currently exist, create operation will create the schema.\nExamples of creating $ bydbctl group create -f - \u0026lt;\u0026lt;EOF metadata: name: sw_metric catalog: CATALOG_MEASURE resource_opts: shard_num: 2 block_interval: unit: UNIT_HOUR num: 2 segment_interval: unit: UNIT_DAY num: 1 ttl: unit: UNIT_DAY num: 7 EOF The group creates two shards to store group data points. Every day, it would create a segment that will generate a block every 2 hours.\nThe data in this group will keep 7 days.\nGet operation Get operation gets a group\u0026rsquo;s schema.\nExamples of getting $ bydbctl group get -g sw_metric Update operation Update operation updates a group\u0026rsquo;s schema.\nExamples of updating If we want to change the ttl of the data in this group to be 1 day, use the command:\n$ bydbctl group update -f - \u0026lt;\u0026lt;EOF metadata: name: sw_metric catalog: CATALOG_MEASURE resource_opts: shard_num: 2 block_interval: unit: UNIT_HOUR num: 2 segment_interval: unit: UNIT_DAY num: 1 ttl: unit: UNIT_DAY num: 1 EOF Delete operation Delete operation deletes a group\u0026rsquo;s schema.\nExamples of deleting $ bydbctl group delete -g sw_metric List operation The list operation shows all groups' schema.\nExamples $ bydbctl group list API Reference GroupService v1\n","excerpt":"CRUD Groups CRUD operations create, read, update and delete groups.\nThe group represents a …","ref":"/docs/skywalking-banyandb/v0.5.0/crud/group/","title":"CRUD Groups"},{"body":"CRUD indexRuleBindings CRUD operations create, read, update and delete index rule bindings.\nAn index rule binding is a bridge to connect several index rules to a subject. This binding is valid between begin_at_nanoseconds and expire_at_nanoseconds, that provides flexible strategies to control how to generate time series indices.\nbydbctl is the command line tool in examples.\nCreate operation Create operation adds a new index rule binding to the database\u0026rsquo;s metadata registry repository. If the index rule binding does not currently exist, create operation will create the schema.\nExamples An index rule binding belongs to a unique group. We should create such a group with a catalog CATALOG_STREAM before creating a index rule binding. The subject(stream/measure) and index rule MUST live in the same group with the binding.\n$ bydbctl group create -f - \u0026lt;\u0026lt;EOF metadata: name: default catalog: CATALOG_STREAM resource_opts: shard_num: 2 block_interval: unit: UNIT_HOUR num: 2 segment_interval: unit: UNIT_DAY num: 1 ttl: unit: UNIT_DAY num: 7 EOF The group creates two shards to store indexRuleBinding data points. Every one day, it would create a segment which will generate a block every 2 hours.\nThe data in this group will keep 7 days.\nThen, below command will create a new indexRuleBinding:\n$ bydbctl indexRuleBinding create -f - \u0026lt;\u0026lt;EOF metadata: name: stream_binding group: sw_stream rules: - trace_id - duration - endpoint_id - status_code - http.method - db.instance - db.type - mq.broker - mq.queue - mq.topic - extended_tags subject: catalog: CATALOG_STREAM name: sw begin_at: \u0026#39;2021-04-15T01:30:15.01Z\u0026#39; expire_at: \u0026#39;2121-04-15T01:30:15.01Z\u0026#39; EOF The YAML contains:\n rules: references to the name of index rules. subject: stream or measure\u0026rsquo;s name and catalog. begin_at and expire_at: the TTL of this binding.  Get operation Get(Read) operation gets an index rule binding\u0026rsquo;s schema.\nExamples of getting $ bydbctl indexRuleBinding get -g sw_stream -n stream_binding Update operation Update operation update an index rule binding\u0026rsquo;s schema.\nExamples updating $ bydbctl indexRuleBinding update -f - \u0026lt;\u0026lt;EOF metadata: name: stream_binding group: sw_stream rules: - trace_id - duration - endpoint_id - status_code - http.method - db.instance - db.type - mq.broker - mq.queue - mq.topic # Remove this rule # - extended_tags subject: catalog: CATALOG_STREAM name: sw begin_at: \u0026#39;2021-04-15T01:30:15.01Z\u0026#39; expire_at: \u0026#39;2121-04-15T01:30:15.01Z\u0026#39; EOF The new YAML removed the index rule extended_tags\u0026rsquo;s binding.\nDelete operation Delete operation delete an index rule binding\u0026rsquo;s schema.\nExamples of deleting $ bydbctl indexRuleBinding delete -g sw_stream -n stream_binding List operation List operation list all index rule bindings in a group.\nExamples of listing $ bydbctl indexRuleBinding list -g sw_stream API Reference indexRuleBindingService v1\n","excerpt":"CRUD indexRuleBindings CRUD operations create, read, update and delete index rule bindings.\nAn index …","ref":"/docs/skywalking-banyandb/latest/crud/index_rule_binding/","title":"CRUD indexRuleBindings"},{"body":"CRUD indexRuleBindings CRUD operations create, read, update and delete index rule bindings.\nAn index rule binding is a bridge to connect several index rules to a subject. This binding is valid between begin_at_nanoseconds and expire_at_nanoseconds, that provides flexible strategies to control how to generate time series indices.\nbydbctl is the command line tool in examples.\nCreate operation Create operation adds a new index rule binding to the database\u0026rsquo;s metadata registry repository. If the index rule binding does not currently exist, create operation will create the schema.\nExamples An index rule binding belongs to a unique group. We should create such a group with a catalog CATALOG_STREAM before creating a index rule binding. The subject(stream/measure) and index rule MUST live in the same group with the binding.\n$ bydbctl group create -f - \u0026lt;\u0026lt;EOF metadata: name: default catalog: CATALOG_STREAM resource_opts: shard_num: 2 segment_interval: unit: UNIT_DAY num: 1 ttl: unit: UNIT_DAY num: 7 EOF The group creates two shards to store indexRuleBinding data points. Every one day, it would create a segment which will generate a block every 2 hours.\nThe data in this group will keep 7 days.\nThen, below command will create a new indexRuleBinding:\n$ bydbctl indexRuleBinding create -f - \u0026lt;\u0026lt;EOF metadata: name: stream_binding group: sw_stream rules: - trace_id - duration - endpoint_id - status_code - http.method - db.instance - db.type - mq.broker - mq.queue - mq.topic - extended_tags subject: catalog: CATALOG_STREAM name: sw begin_at: \u0026#39;2021-04-15T01:30:15.01Z\u0026#39; expire_at: \u0026#39;2121-04-15T01:30:15.01Z\u0026#39; EOF The YAML contains:\n rules: references to the name of index rules. subject: stream or measure\u0026rsquo;s name and catalog. begin_at and expire_at: the TTL of this binding.  Get operation Get(Read) operation gets an index rule binding\u0026rsquo;s schema.\nExamples of getting $ bydbctl indexRuleBinding get -g sw_stream -n stream_binding Update operation Update operation update an index rule binding\u0026rsquo;s schema.\nExamples updating $ bydbctl indexRuleBinding update -f - \u0026lt;\u0026lt;EOF metadata: name: stream_binding group: sw_stream rules: - trace_id - duration - endpoint_id - status_code - http.method - db.instance - db.type - mq.broker - mq.queue - mq.topic # Remove this rule # - extended_tags subject: catalog: CATALOG_STREAM name: sw begin_at: \u0026#39;2021-04-15T01:30:15.01Z\u0026#39; expire_at: \u0026#39;2121-04-15T01:30:15.01Z\u0026#39; EOF The new YAML removed the index rule extended_tags\u0026rsquo;s binding.\nDelete operation Delete operation delete an index rule binding\u0026rsquo;s schema.\nExamples of deleting $ bydbctl indexRuleBinding delete -g sw_stream -n stream_binding List operation List operation list all index rule bindings in a group.\nExamples of listing $ bydbctl indexRuleBinding list -g sw_stream API Reference indexRuleBindingService v1\n","excerpt":"CRUD indexRuleBindings CRUD operations create, read, update and delete index rule bindings.\nAn index …","ref":"/docs/skywalking-banyandb/next/crud/index_rule_binding/","title":"CRUD indexRuleBindings"},{"body":"CRUD indexRuleBindings CRUD operations create, read, update and delete index rule bindings.\nAn index rule binding is a bridge to connect several index rules to a subject. This binding is valid between begin_at_nanoseconds and expire_at_nanoseconds, that provides flexible strategies to control how to generate time series indices.\nbydbctl is the command line tool in examples.\nCreate operation Create operation adds a new index rule binding to the database\u0026rsquo;s metadata registry repository. If the index rule binding does not currently exist, create operation will create the schema.\nExamples An index rule binding belongs to a unique group. We should create such a group with a catalog CATALOG_STREAM before creating a index rule binding. The subject(stream/measure) and index rule MUST live in the same group with the binding.\n$ bydbctl group create -f - \u0026lt;\u0026lt;EOF metadata: name: default catalog: CATALOG_STREAM resource_opts: shard_num: 2 block_interval: unit: UNIT_HOUR num: 2 segment_interval: unit: UNIT_DAY num: 1 ttl: unit: UNIT_DAY num: 7 EOF The group creates two shards to store indexRuleBinding data points. Every one day, it would create a segment which will generate a block every 2 hours.\nThe data in this group will keep 7 days.\nThen, below command will create a new indexRuleBinding:\n$ bydbctl indexRuleBinding create -f - \u0026lt;\u0026lt;EOF metadata: name: stream_binding group: sw_stream rules: - trace_id - duration - endpoint_id - status_code - http.method - db.instance - db.type - mq.broker - mq.queue - mq.topic - extended_tags subject: catalog: CATALOG_STREAM name: sw begin_at: \u0026#39;2021-04-15T01:30:15.01Z\u0026#39; expire_at: \u0026#39;2121-04-15T01:30:15.01Z\u0026#39; EOF The YAML contains:\n rules: references to the name of index rules. subject: stream or measure\u0026rsquo;s name and catalog. begin_at and expire_at: the TTL of this binding.  Get operation Get(Read) operation gets an index rule binding\u0026rsquo;s schema.\nExamples of getting $ bydbctl indexRuleBinding get -g sw_stream -n stream_binding Update operation Update operation update an index rule binding\u0026rsquo;s schema.\nExamples updating $ bydbctl indexRuleBinding update -f - \u0026lt;\u0026lt;EOF metadata: name: stream_binding group: sw_stream rules: - trace_id - duration - endpoint_id - status_code - http.method - db.instance - db.type - mq.broker - mq.queue - mq.topic # Remove this rule # - extended_tags subject: catalog: CATALOG_STREAM name: sw begin_at: \u0026#39;2021-04-15T01:30:15.01Z\u0026#39; expire_at: \u0026#39;2121-04-15T01:30:15.01Z\u0026#39; EOF The new YAML removed the index rule extended_tags\u0026rsquo;s binding.\nDelete operation Delete operation delete an index rule binding\u0026rsquo;s schema.\nExamples of deleting $ bydbctl indexRuleBinding delete -g sw_stream -n stream_binding List operation List operation list all index rule bindings in a group.\nExamples of listing $ bydbctl indexRuleBinding list -g sw_stream API Reference indexRuleBindingService v1\n","excerpt":"CRUD indexRuleBindings CRUD operations create, read, update and delete index rule bindings.\nAn index …","ref":"/docs/skywalking-banyandb/v0.5.0/crud/index_rule_binding/","title":"CRUD indexRuleBindings"},{"body":"CRUD IndexRules CRUD operations create, read, update and delete index rules.\nIndexRule defines how to generate indices based on tags and the index type. IndexRule should bind to a subject(stream or measure) through an IndexRuleBinding to generate proper indices.\nbydbctl is the command line tool in examples.\nCreate operation Create operation adds a new index rule to the database\u0026rsquo;s metadata registry repository. If the index rule does not currently exist, create operation will create the schema.\nExamples of creating An index rule belongs to its subjects' group. We should create such a group if there is no such group.\nThe command supposes that the index rule will bind to streams. So it creates a CATALOG_STREAM group here.\n$ bydbctl group create -f - \u0026lt;\u0026lt;EOF metadata: name: sw_stream catalog: CATALOG_STREAM resource_opts: shard_num: 2 block_interval: unit: UNIT_HOUR num: 2 segment_interval: unit: UNIT_DAY num: 1 ttl: unit: UNIT_DAY num: 7 EOF The group creates two shards to store indexRule data points. Every day, it would create a segment that will generate a block every 2 hours.\nThe data in this group will keep 7 days.\nThen, the next command will create a new index rule:\n$ bydbctl indexRule create -f - \u0026lt;\u0026lt;EOF metadata: name: trace_id group: sw_stream tags: - trace_id type: TYPE_TREE location: LOCATION_GLOBAL EOF This YAML creates an index rule which uses the tag trace_id to generate a TREE_TYPE index which is located at GLOBAL.\nGet operation Get(Read) operation gets an index rule\u0026rsquo;s schema.\nExamples of getting $ bydbctl indexRule get -g sw_stream -n trace_id Update operation Update operation updates an index rule\u0026rsquo;s schema.\nExamples of updating This example changes the type from TREE to INVERTED.\n$ bydbctl indexRule update -f - \u0026lt;\u0026lt;EOF metadata: name: trace_id group: sw_stream tags: - trace_id type: TYPE_INVERTED location: LOCATION_GLOBAL EOF Delete operation Delete operation deletes an index rule\u0026rsquo;s schema.\nExamples of deleting $ bydbctl indexRule delete -g sw_stream -n trace_id List operation List operation list all index rules' schema in a group.\nExamples of listing $ bydbctl indexRule list -g sw_stream API Reference indexRuleService v1\n","excerpt":"CRUD IndexRules CRUD operations create, read, update and delete index rules.\nIndexRule defines how …","ref":"/docs/skywalking-banyandb/latest/crud/index_rule/","title":"CRUD IndexRules"},{"body":"CRUD IndexRules CRUD operations create, read, update and delete index rules.\nIndexRule defines how to generate indices based on tags and the index type. IndexRule should bind to a subject(stream or measure) through an IndexRuleBinding to generate proper indices.\nbydbctl is the command line tool in examples.\nCreate operation Create operation adds a new index rule to the database\u0026rsquo;s metadata registry repository. If the index rule does not currently exist, create operation will create the schema.\nExamples of creating An index rule belongs to its subjects' group. We should create such a group if there is no such group.\nThe command supposes that the index rule will bind to streams. So it creates a CATALOG_STREAM group here.\n$ bydbctl group create -f - \u0026lt;\u0026lt;EOF metadata: name: sw_stream catalog: CATALOG_STREAM resource_opts: shard_num: 2 segment_interval: unit: UNIT_DAY num: 1 ttl: unit: UNIT_DAY num: 7 EOF The group creates two shards to store indexRule data points. Every day, it would create a segment that will generate a block every 2 hours.\nThe data in this group will keep 7 days.\nThen, the next command will create a new index rule:\n$ bydbctl indexRule create -f - \u0026lt;\u0026lt;EOF metadata: name: trace_id group: sw_stream tags: - trace_id type: TYPE_INVERTED EOF This YAML creates an index rule which uses the tag trace_id to generate a TYPE_INVERTED index.\nGet operation Get(Read) operation gets an index rule\u0026rsquo;s schema.\nExamples of getting $ bydbctl indexRule get -g sw_stream -n trace_id Update operation Update operation updates an index rule\u0026rsquo;s schema.\nExamples of updating This example changes the type from TREE to INVERTED.\n$ bydbctl indexRule update -f - \u0026lt;\u0026lt;EOF metadata: name: trace_id group: sw_stream tags: - trace_id type: TYPE_INVERTED EOF Delete operation Delete operation deletes an index rule\u0026rsquo;s schema.\nExamples of deleting $ bydbctl indexRule delete -g sw_stream -n trace_id List operation List operation list all index rules' schema in a group.\nExamples of listing $ bydbctl indexRule list -g sw_stream API Reference indexRuleService v1\n","excerpt":"CRUD IndexRules CRUD operations create, read, update and delete index rules.\nIndexRule defines how …","ref":"/docs/skywalking-banyandb/next/crud/index_rule/","title":"CRUD IndexRules"},{"body":"CRUD IndexRules CRUD operations create, read, update and delete index rules.\nIndexRule defines how to generate indices based on tags and the index type. IndexRule should bind to a subject(stream or measure) through an IndexRuleBinding to generate proper indices.\nbydbctl is the command line tool in examples.\nCreate operation Create operation adds a new index rule to the database\u0026rsquo;s metadata registry repository. If the index rule does not currently exist, create operation will create the schema.\nExamples of creating An index rule belongs to its subjects' group. We should create such a group if there is no such group.\nThe command supposes that the index rule will bind to streams. So it creates a CATALOG_STREAM group here.\n$ bydbctl group create -f - \u0026lt;\u0026lt;EOF metadata: name: sw_stream catalog: CATALOG_STREAM resource_opts: shard_num: 2 block_interval: unit: UNIT_HOUR num: 2 segment_interval: unit: UNIT_DAY num: 1 ttl: unit: UNIT_DAY num: 7 EOF The group creates two shards to store indexRule data points. Every day, it would create a segment that will generate a block every 2 hours.\nThe data in this group will keep 7 days.\nThen, the next command will create a new index rule:\n$ bydbctl indexRule create -f - \u0026lt;\u0026lt;EOF metadata: name: trace_id group: sw_stream tags: - trace_id type: TYPE_TREE location: LOCATION_GLOBAL EOF This YAML creates an index rule which uses the tag trace_id to generate a TREE_TYPE index which is located at GLOBAL.\nGet operation Get(Read) operation gets an index rule\u0026rsquo;s schema.\nExamples of getting $ bydbctl indexRule get -g sw_stream -n trace_id Update operation Update operation updates an index rule\u0026rsquo;s schema.\nExamples of updating This example changes the type from TREE to INVERTED.\n$ bydbctl indexRule update -f - \u0026lt;\u0026lt;EOF metadata: name: trace_id group: sw_stream tags: - trace_id type: TYPE_INVERTED location: LOCATION_GLOBAL EOF Delete operation Delete operation deletes an index rule\u0026rsquo;s schema.\nExamples of deleting $ bydbctl indexRule delete -g sw_stream -n trace_id List operation List operation list all index rules' schema in a group.\nExamples of listing $ bydbctl indexRule list -g sw_stream API Reference indexRuleService v1\n","excerpt":"CRUD IndexRules CRUD operations create, read, update and delete index rules.\nIndexRule defines how …","ref":"/docs/skywalking-banyandb/v0.5.0/crud/index_rule/","title":"CRUD IndexRules"},{"body":"CRUD Measures CRUD operations create, read, update and delete measures.\nbydbctl is the command line tool in examples.\nCreate operation Create operation adds a new measure to the database\u0026rsquo;s metadata registry repository. If the measure does not currently exist, create operation will create the schema.\nExamples of creating A measure belongs to a unique group. We should create such a group with a catalog CATALOG_MEASURE before creating a measure.\n$ bydbctl group create -f - \u0026lt;\u0026lt;EOF metadata: name: sw_metric catalog: CATALOG_MEASURE resource_opts: shard_num: 2 block_interval: unit: UNIT_HOUR num: 2 segment_interval: unit: UNIT_DAY num: 1 ttl: unit: UNIT_DAY num: 7 EOF The group creates two shards to store data points. Every day, it would create a segment that will generate a block every 2 hours.\nThe data in this group will keep 7 days.\nThen, the below command will create a new measure:\n$ bydbctl measure create -f - \u0026lt;\u0026lt;EOF metadata: name: service_cpm_minute group: sw_metric tag_families: - name: default tags: - name: id type: TAG_TYPE_STRING - name: entity_id type: TAG_TYPE_STRING fields: - name: total field_type: FIELD_TYPE_INT encoding_method: ENCODING_METHOD_GORILLA compression_method: COMPRESSION_METHOD_ZSTD - name: value field_type: FIELD_TYPE_INT encoding_method: ENCODING_METHOD_GORILLA compression_method: COMPRESSION_METHOD_ZSTD entity: tag_names: - entity_id interval: 1m EOF service_cpm_minute expects to ingest a series of data points with a minute interval.\nGet operation Get(Read) operation gets a measure\u0026rsquo;s schema.\nExamples of getting $ bydbctl measure get -g sw_metric -n service_cpm_minute Update operation Update operation changes a measure\u0026rsquo;s schema.\nExamples of updating $ bydbctl measure update -f - \u0026lt;\u0026lt;EOF metadata: name: service_cpm_minute group: sw_metric tagFamilies: - name: searchable tags: - name: trace_id type: TAG_TYPE_STRING entity: tag_names: - entity_id EOF Delete operation Delete operation removes a measure\u0026rsquo;s schema.\nExamples of deleting $ bydbctl measure delete -g sw_metric -n service_cpm_minute List operation The list operation shows all measures' schema in a group.\nExamples of listing $ bydbctl measure list -g sw_metric API Reference MeasureService v1\n","excerpt":"CRUD Measures CRUD operations create, read, update and delete measures.\nbydbctl is the command line …","ref":"/docs/skywalking-banyandb/latest/crud/measure/schema/","title":"CRUD Measures"},{"body":"CRUD Measures CRUD operations create, read, update and delete measures.\nbydbctl is the command line tool in examples.\nCreate operation Create operation adds a new measure to the database\u0026rsquo;s metadata registry repository. If the measure does not currently exist, create operation will create the schema.\nExamples of creating A measure belongs to a unique group. We should create such a group with a catalog CATALOG_MEASURE before creating a measure.\n$ bydbctl group create -f - \u0026lt;\u0026lt;EOF metadata: name: sw_metric catalog: CATALOG_MEASURE resource_opts: shard_num: 2 segment_interval: unit: UNIT_DAY num: 1 ttl: unit: UNIT_DAY num: 7 EOF The group creates two shards to store data points. Every day, it would create a segment that will generate a block every 2 hours.\nThe data in this group will keep 7 days.\nThen, the below command will create a new measure:\n$ bydbctl measure create -f - \u0026lt;\u0026lt;EOF metadata: name: service_cpm_minute group: sw_metric tag_families: - name: default tags: - name: id type: TAG_TYPE_STRING - name: entity_id type: TAG_TYPE_STRING fields: - name: total field_type: FIELD_TYPE_INT encoding_method: ENCODING_METHOD_GORILLA compression_method: COMPRESSION_METHOD_ZSTD - name: value field_type: FIELD_TYPE_INT encoding_method: ENCODING_METHOD_GORILLA compression_method: COMPRESSION_METHOD_ZSTD entity: tag_names: - entity_id interval: 1m EOF service_cpm_minute expects to ingest a series of data points with a minute interval.\nGet operation Get(Read) operation gets a measure\u0026rsquo;s schema.\nExamples of getting $ bydbctl measure get -g sw_metric -n service_cpm_minute Update operation Update operation changes a measure\u0026rsquo;s schema.\nExamples of updating $ bydbctl measure update -f - \u0026lt;\u0026lt;EOF metadata: name: service_cpm_minute group: sw_metric tagFamilies: - name: searchable tags: - name: trace_id type: TAG_TYPE_STRING entity: tag_names: - entity_id EOF Delete operation Delete operation removes a measure\u0026rsquo;s schema.\nExamples of deleting $ bydbctl measure delete -g sw_metric -n service_cpm_minute List operation The list operation shows all measures' schema in a group.\nExamples of listing $ bydbctl measure list -g sw_metric API Reference MeasureService v1\n","excerpt":"CRUD Measures CRUD operations create, read, update and delete measures.\nbydbctl is the command line …","ref":"/docs/skywalking-banyandb/next/crud/measure/schema/","title":"CRUD Measures"},{"body":"CRUD Measures CRUD operations create, read, update and delete measures.\nbydbctl is the command line tool in examples.\nCreate operation Create operation adds a new measure to the database\u0026rsquo;s metadata registry repository. If the measure does not currently exist, create operation will create the schema.\nExamples of creating A measure belongs to a unique group. We should create such a group with a catalog CATALOG_MEASURE before creating a measure.\n$ bydbctl group create -f - \u0026lt;\u0026lt;EOF metadata: name: sw_metric catalog: CATALOG_MEASURE resource_opts: shard_num: 2 block_interval: unit: UNIT_HOUR num: 2 segment_interval: unit: UNIT_DAY num: 1 ttl: unit: UNIT_DAY num: 7 EOF The group creates two shards to store data points. Every day, it would create a segment that will generate a block every 2 hours.\nThe data in this group will keep 7 days.\nThen, the below command will create a new measure:\n$ bydbctl measure create -f - \u0026lt;\u0026lt;EOF metadata: name: service_cpm_minute group: sw_metric tag_families: - name: default tags: - name: id type: TAG_TYPE_STRING - name: entity_id type: TAG_TYPE_STRING fields: - name: total field_type: FIELD_TYPE_INT encoding_method: ENCODING_METHOD_GORILLA compression_method: COMPRESSION_METHOD_ZSTD - name: value field_type: FIELD_TYPE_INT encoding_method: ENCODING_METHOD_GORILLA compression_method: COMPRESSION_METHOD_ZSTD entity: tag_names: - entity_id interval: 1m EOF service_cpm_minute expects to ingest a series of data points with a minute interval.\nGet operation Get(Read) operation gets a measure\u0026rsquo;s schema.\nExamples of getting $ bydbctl measure get -g sw_metric -n service_cpm_minute Update operation Update operation changes a measure\u0026rsquo;s schema.\nExamples of updating $ bydbctl measure update -f - \u0026lt;\u0026lt;EOF metadata: name: service_cpm_minute group: sw_metric tagFamilies: - name: searchable tags: - name: trace_id type: TAG_TYPE_STRING entity: tag_names: - entity_id EOF Delete operation Delete operation removes a measure\u0026rsquo;s schema.\nExamples of deleting $ bydbctl measure delete -g sw_metric -n service_cpm_minute List operation The list operation shows all measures' schema in a group.\nExamples of listing $ bydbctl measure list -g sw_metric API Reference MeasureService v1\n","excerpt":"CRUD Measures CRUD operations create, read, update and delete measures.\nbydbctl is the command line …","ref":"/docs/skywalking-banyandb/v0.5.0/crud/measure/schema/","title":"CRUD Measures"},{"body":"CRUD Property CRUD operations create/update, read and delete property.\nProperty stores the user defined data.\nbydbctl is the command line tool in examples.\nApply (Create/Update) operation Apply creates a property if it\u0026rsquo;s absent, or updates an existed one based on a strategy. If the property does not currently exist, create operation will create the property.\nExamples of applying A property belongs to a unique group. We should create such a group before creating a property.\nThe group\u0026rsquo;s catalog should be empty.\n$ bydbctl group create -f - \u0026lt;\u0026lt;EOF metadata: name: sw EOF Then, below command will create a new property:\n$ bydbctl property apply -f - \u0026lt;\u0026lt;EOF metadata: container: group: sw name: temp_data id: General-Service tags: - key: name value: str: value: \u0026#34;hello\u0026#34; - key: state value: str: value: \u0026#34;succeed\u0026#34; EOF The operation supports updating partial tags.\n$ bydbctl property apply -f - \u0026lt;\u0026lt;EOF metadata: container: group: sw name: temp_data id: General-Service tags: - key: state value: str: value: \u0026#34;failed\u0026#34; EOF TTL is supported in the operation.\n$ bydbctl property apply -f - \u0026lt;\u0026lt;EOF metadata: container: group: sw name: temp_data id: General-Service tags: - key: state value: str: value: \u0026#34;failed\u0026#34; ttl: \u0026#34;1h\u0026#34; Get operation Get operation gets a property.\nExamples of getting $ bydbctl property get -g sw -n temp_data --id General-Service The operation could filter data by tags.\n$ bydbctl property get -g sw -n temp_data --id General-Service --tags state Delete operation Delete operation delete a property.\nExamples of deleting $ bydbctl property delete -g sw -n temp_data --id General-Service The delete operation could remove specific tags instead of the whole property.\n$ bydbctl property delete -g sw -n temp_data --id General-Service --tags state List operation List operation lists all properties in a group.\nExamples of listing in a group $ bydbctl property list -g sw List operation lists all properties in a group with a name.\nExamples of listing in a group with a name $ bydbctl property list -g sw -n temp_data TTL field in a property TTL field in a property is used to set the time to live of the property. The property will be deleted automatically after the TTL.\nThis functionality is supported by the lease mechanism. The readonly lease_id field is used to identify the lease of the property.\nExamples of setting TTL $ bydbctl property apply -f - \u0026lt;\u0026lt;EOF metadata: container: group: sw name: temp_data id: General-Service tags: - key: state value: str: value: \u0026#34;failed\u0026#34; ttl: \u0026#34;1h\u0026#34; EOF The lease_id is returned in the response. You can use get operation to get the property with the lease_id as well.\n$ bydbctl property get -g sw -n temp_data --id General-Service The lease_id is used to keep the property alive. You can use keepalive operation to keep the property alive. When the keepalive operation is called, the property\u0026rsquo;s TTL will be reset to the original value.\n$ bydbctl property keepalive --lease_id 1 API Reference MeasureService v1\n","excerpt":"CRUD Property CRUD operations create/update, read and delete property.\nProperty stores the user …","ref":"/docs/skywalking-banyandb/latest/crud/property/","title":"CRUD Property"},{"body":"CRUD Property CRUD operations create/update, read and delete property.\nProperty stores the user defined data.\nbydbctl is the command line tool in examples.\nApply (Create/Update) operation Apply creates a property if it\u0026rsquo;s absent, or updates an existed one based on a strategy. If the property does not currently exist, create operation will create the property.\nExamples of applying A property belongs to a unique group. We should create such a group before creating a property.\nThe group\u0026rsquo;s catalog should be empty.\n$ bydbctl group create -f - \u0026lt;\u0026lt;EOF metadata: name: sw EOF Then, below command will create a new property:\n$ bydbctl property apply -f - \u0026lt;\u0026lt;EOF metadata: container: group: sw name: temp_data id: General-Service tags: - key: name value: str: value: \u0026#34;hello\u0026#34; - key: state value: str: value: \u0026#34;succeed\u0026#34; EOF The operation supports updating partial tags.\n$ bydbctl property apply -f - \u0026lt;\u0026lt;EOF metadata: container: group: sw name: temp_data id: General-Service tags: - key: state value: str: value: \u0026#34;failed\u0026#34; EOF TTL is supported in the operation.\n$ bydbctl property apply -f - \u0026lt;\u0026lt;EOF metadata: container: group: sw name: temp_data id: General-Service tags: - key: state value: str: value: \u0026#34;failed\u0026#34; ttl: \u0026#34;1h\u0026#34; Get operation Get operation gets a property.\nExamples of getting $ bydbctl property get -g sw -n temp_data --id General-Service The operation could filter data by tags.\n$ bydbctl property get -g sw -n temp_data --id General-Service --tags state Delete operation Delete operation delete a property.\nExamples of deleting $ bydbctl property delete -g sw -n temp_data --id General-Service The delete operation could remove specific tags instead of the whole property.\n$ bydbctl property delete -g sw -n temp_data --id General-Service --tags state List operation List operation lists all properties in a group.\nExamples of listing in a group $ bydbctl property list -g sw List operation lists all properties in a group with a name.\nExamples of listing in a group with a name $ bydbctl property list -g sw -n temp_data TTL field in a property TTL field in a property is used to set the time to live of the property. The property will be deleted automatically after the TTL.\nThis functionality is supported by the lease mechanism. The readonly lease_id field is used to identify the lease of the property.\nExamples of setting TTL $ bydbctl property apply -f - \u0026lt;\u0026lt;EOF metadata: container: group: sw name: temp_data id: General-Service tags: - key: state value: str: value: \u0026#34;failed\u0026#34; ttl: \u0026#34;1h\u0026#34; EOF The lease_id is returned in the response. You can use get operation to get the property with the lease_id as well.\n$ bydbctl property get -g sw -n temp_data --id General-Service The lease_id is used to keep the property alive. You can use keepalive operation to keep the property alive. When the keepalive operation is called, the property\u0026rsquo;s TTL will be reset to the original value.\n$ bydbctl property keepalive --lease_id 1 API Reference MeasureService v1\n","excerpt":"CRUD Property CRUD operations create/update, read and delete property.\nProperty stores the user …","ref":"/docs/skywalking-banyandb/next/crud/property/","title":"CRUD Property"},{"body":"CRUD Property CRUD operations create/update, read and delete property.\nProperty stores the user defined data.\nbydbctl is the command line tool in examples.\nApply (Create/Update) operation Apply creates a property if it\u0026rsquo;s absent, or updates an existed one based on a strategy. If the property does not currently exist, create operation will create the property.\nExamples of applying A property belongs to a unique group. We should create such a group before creating a property.\nThe group\u0026rsquo;s catalog should be empty.\n$ bydbctl group create -f - \u0026lt;\u0026lt;EOF metadata: name: sw EOF Then, below command will create a new property:\n$ bydbctl property apply -f - \u0026lt;\u0026lt;EOF metadata: container: group: sw name: temp_data id: General-Service tags: - key: name value: str: value: \u0026#34;hello\u0026#34; - key: state value: str: value: \u0026#34;succeed\u0026#34; EOF The operation supports updating partial tags.\n$ bydbctl property apply -f - \u0026lt;\u0026lt;EOF metadata: container: group: sw name: temp_data id: General-Service tags: - key: state value: str: value: \u0026#34;failed\u0026#34; EOF TTL is supported in the operation.\n$ bydbctl property apply -f - \u0026lt;\u0026lt;EOF metadata: container: group: sw name: temp_data id: General-Service tags: - key: state value: str: value: \u0026#34;failed\u0026#34; ttl: \u0026#34;1h\u0026#34; Get operation Get operation gets a property.\nExamples of getting $ bydbctl property get -g sw -n temp_data --id General-Service The operation could filter data by tags.\n$ bydbctl property get -g sw -n temp_data --id General-Service --tags state Delete operation Delete operation delete a property.\nExamples of deleting $ bydbctl property delete -g sw -n temp_data --id General-Service The delete operation could remove specific tags instead of the whole property.\n$ bydbctl property delete -g sw -n temp_data --id General-Service --tags state List operation List operation lists all properties in a group.\nExamples of listing in a group $ bydbctl property list -g sw List operation lists all properties in a group with a name.\nExamples of listing in a group with a name $ bydbctl property list -g sw -n temp_data TTL field in a property TTL field in a property is used to set the time to live of the property. The property will be deleted automatically after the TTL.\nThis functionality is supported by the lease mechanism. The readonly lease_id field is used to identify the lease of the property.\nExamples of setting TTL $ bydbctl property apply -f - \u0026lt;\u0026lt;EOF metadata: container: group: sw name: temp_data id: General-Service tags: - key: state value: str: value: \u0026#34;failed\u0026#34; ttl: \u0026#34;1h\u0026#34; EOF The lease_id is returned in the response. You can use get operation to get the property with the lease_id as well.\n$ bydbctl property get -g sw -n temp_data --id General-Service The lease_id is used to keep the property alive. You can use keepalive operation to keep the property alive. When the keepalive operation is called, the property\u0026rsquo;s TTL will be reset to the original value.\n$ bydbctl property keepalive --lease_id 1 API Reference MeasureService v1\n","excerpt":"CRUD Property CRUD operations create/update, read and delete property.\nProperty stores the user …","ref":"/docs/skywalking-banyandb/v0.5.0/crud/property/","title":"CRUD Property"},{"body":"CRUD Streams CRUD operations create, read, update and delete streams.\nbydbctl is the command line tool in examples.\nStream intends to store streaming data, for example, traces or logs.\nCreate operation Create operation adds a new stream to the database\u0026rsquo;s metadata registry repository. If the stream does not currently exist, create operation will create the schema.\nExamples of creating A stream belongs to a unique group. We should create such a group with a catalog CATALOG_STREAM before creating a stream.\n$ bydbctl group create -f - \u0026lt;\u0026lt;EOF metadata: name: default catalog: CATALOG_STREAM resource_opts: shard_num: 2 block_interval: unit: UNIT_HOUR num: 2 segment_interval: unit: UNIT_DAY num: 1 ttl: unit: UNIT_DAY num: 7 EOF The group creates two shards to store stream data points. Every one day, it would create a segment which will generate a block every 2 hours.\nThe data in this group will keep 7 days.\nThen, below command will create a new stream:\n$ bydbctl stream create -f - \u0026lt;\u0026lt;EOF metadata: name: sw group: default tagFamilies: - name: searchable tags: - name: trace_id type: TAG_TYPE_STRING entity: tagNames: - stream_id EOF Get operation Get(Read) operation get a stream\u0026rsquo;s schema.\nExamples of getting $ bydbctl stream get -g default -n sw Update operation Update operation update a stream\u0026rsquo;s schema.\nExamples of updating bydbctl is the command line tool to update a stream in this example.\n$ bydbctl stream update -f - \u0026lt;\u0026lt;EOF metadata: name: sw group: default tagFamilies: - name: searchable tags: - name: trace_id type: TAG_TYPE_STRING entity: tagNames: - stream_id EOF Delete operation Delete operation delete a stream\u0026rsquo;s schema.\nExamples of deleting bydbctl is the command line tool to delete a stream in this example.\n$ bydbctl stream delete -g default -n sw List operation List operation list all streams' schema in a group.\nExamples of listing $ bydbctl stream list -g default API Reference StreamService v1\n","excerpt":"CRUD Streams CRUD operations create, read, update and delete streams.\nbydbctl is the command line …","ref":"/docs/skywalking-banyandb/latest/crud/stream/schema/","title":"CRUD Streams"},{"body":"CRUD Streams CRUD operations create, read, update and delete streams.\nbydbctl is the command line tool in examples.\nStream intends to store streaming data, for example, traces or logs.\nCreate operation Create operation adds a new stream to the database\u0026rsquo;s metadata registry repository. If the stream does not currently exist, create operation will create the schema.\nExamples of creating A stream belongs to a unique group. We should create such a group with a catalog CATALOG_STREAM before creating a stream.\n$ bydbctl group create -f - \u0026lt;\u0026lt;EOF metadata: name: default catalog: CATALOG_STREAM resource_opts: shard_num: 2 segment_interval: unit: UNIT_DAY num: 1 ttl: unit: UNIT_DAY num: 7 EOF The group creates two shards to store stream data points. Every one day, it would create a segment which will generate a block every 2 hours.\nThe data in this group will keep 7 days.\nThen, below command will create a new stream:\n$ bydbctl stream create -f - \u0026lt;\u0026lt;EOF metadata: name: sw group: default tagFamilies: - name: searchable tags: - name: trace_id type: TAG_TYPE_STRING entity: tagNames: - stream_id EOF Get operation Get(Read) operation get a stream\u0026rsquo;s schema.\nExamples of getting $ bydbctl stream get -g default -n sw Update operation Update operation update a stream\u0026rsquo;s schema.\nExamples of updating bydbctl is the command line tool to update a stream in this example.\n$ bydbctl stream update -f - \u0026lt;\u0026lt;EOF metadata: name: sw group: default tagFamilies: - name: searchable tags: - name: trace_id type: TAG_TYPE_STRING entity: tagNames: - stream_id EOF Delete operation Delete operation delete a stream\u0026rsquo;s schema.\nExamples of deleting bydbctl is the command line tool to delete a stream in this example.\n$ bydbctl stream delete -g default -n sw List operation List operation list all streams' schema in a group.\nExamples of listing $ bydbctl stream list -g default API Reference StreamService v1\n","excerpt":"CRUD Streams CRUD operations create, read, update and delete streams.\nbydbctl is the command line …","ref":"/docs/skywalking-banyandb/next/crud/stream/schema/","title":"CRUD Streams"},{"body":"CRUD Streams CRUD operations create, read, update and delete streams.\nbydbctl is the command line tool in examples.\nStream intends to store streaming data, for example, traces or logs.\nCreate operation Create operation adds a new stream to the database\u0026rsquo;s metadata registry repository. If the stream does not currently exist, create operation will create the schema.\nExamples of creating A stream belongs to a unique group. We should create such a group with a catalog CATALOG_STREAM before creating a stream.\n$ bydbctl group create -f - \u0026lt;\u0026lt;EOF metadata: name: default catalog: CATALOG_STREAM resource_opts: shard_num: 2 block_interval: unit: UNIT_HOUR num: 2 segment_interval: unit: UNIT_DAY num: 1 ttl: unit: UNIT_DAY num: 7 EOF The group creates two shards to store stream data points. Every one day, it would create a segment which will generate a block every 2 hours.\nThe data in this group will keep 7 days.\nThen, below command will create a new stream:\n$ bydbctl stream create -f - \u0026lt;\u0026lt;EOF metadata: name: sw group: default tagFamilies: - name: searchable tags: - name: trace_id type: TAG_TYPE_STRING entity: tagNames: - stream_id EOF Get operation Get(Read) operation get a stream\u0026rsquo;s schema.\nExamples of getting $ bydbctl stream get -g default -n sw Update operation Update operation update a stream\u0026rsquo;s schema.\nExamples of updating bydbctl is the command line tool to update a stream in this example.\n$ bydbctl stream update -f - \u0026lt;\u0026lt;EOF metadata: name: sw group: default tagFamilies: - name: searchable tags: - name: trace_id type: TAG_TYPE_STRING entity: tagNames: - stream_id EOF Delete operation Delete operation delete a stream\u0026rsquo;s schema.\nExamples of deleting bydbctl is the command line tool to delete a stream in this example.\n$ bydbctl stream delete -g default -n sw List operation List operation list all streams' schema in a group.\nExamples of listing $ bydbctl stream list -g default API Reference StreamService v1\n","excerpt":"CRUD Streams CRUD operations create, read, update and delete streams.\nbydbctl is the command line …","ref":"/docs/skywalking-banyandb/v0.5.0/crud/stream/schema/","title":"CRUD Streams"},{"body":"Custom metrics Adapter This adapter contains an implementation of external metrics API. It is therefore suitable for use with the autoscaling/v2 Horizontal Pod Autoscaler in Kubernetes 1.9+.\nUse kustomize to customise your deployment  Clone the source code:  git clone git@github.com:apache/skywalking-swck.git  Edit file adapter/config/adapter/kustomization.yaml file to change your preferences. If you prefer to your private docker image, a quick path to override ADAPTER_IMG environment variable : export ADAPTER_IMG=\u0026lt;private registry\u0026gt;/metrics-adapter:\u0026lt;tag\u0026gt;\n  Use make to generate the final manifests and deploy:\n  make -C adapter deploy Configuration The adapter takes the standard Kubernetes generic API server arguments (including those for authentication and authorization). By default, it will attempt to using Kubernetes in-cluster config to connect to the cluster.\nIt takes the following addition arguments specific to configuring how the adapter talks to SkyWalking OAP cluster:\n --oap-addr The address of OAP cluster. --metric-filter-regex A regular expression to filter metrics retrieved from OAP cluster. --refresh-interval This is the interval at which to update the cache of available metrics from OAP cluster. --namespace A prefix to which metrics are appended. The format is \u0026lsquo;namespace|metric_name\u0026rsquo;, defaults to skywalking.apache.org  HPA Configuration External metrics allow you to autoscale your cluster based on any metric available in OAP cluster. Just provide a metric block with a name and selector, and use the External metric type.\n- type:Externalexternal:metric:name:\u0026lt;metric_name\u0026gt;selector:matchLabels:\u0026lt;label_key\u0026gt;:\u0026lt;label_value\u0026gt;...target:.... metric_name: The name of metric generated by OAL or other subsystem. label: label_key is the entity name of skywalking metrics. if the label value contains special characters more than ., - and _, service.str.\u0026lt;number\u0026gt; represent the literal of label value, and service.byte.\u0026lt;number\u0026gt; could encode these special characters to hex bytes.  Supposing the service name is v1|productpage|bookinfo|demo, the matchLabels should be like the below piece:\nmatchLabels:\u0026#34;service.str.0\u0026#34;: \u0026#34;v1\u0026#34;\u0026#34;service.byte.1\u0026#34;: \u0026#34;7c\u0026#34;// the hex byte of \u0026#34;|\u0026#34;\u0026#34;service.str.2\u0026#34;: \u0026#34;productpage\u0026#34;\u0026#34;service.byte.3\u0026#34;: \u0026#34;7c\u0026#34;\u0026#34;service.str.4\u0026#34;: \u0026#34;bookinfo\u0026#34;\u0026#34;service.byte.5\u0026#34;: \u0026#34;7c\u0026#34;\u0026#34;service.str.6\u0026#34;: \u0026#34;demo\u0026#34; Caveats: byte label only accept a single character. That means || should be transformed to service.byte.0:\u0026quot;7c\u0026quot; and service.byte.1:\u0026quot;7c\u0026quot; instead of service.byte.0:\u0026quot;7c7c\u0026quot;\n The options of label keys are:\n service, service.str.\u0026lt;number\u0026gt; or service.byte.\u0026lt;number\u0026gt; The name of the service. instance, instance.str.\u0026lt;number\u0026gt; or instance.byte.\u0026lt;number\u0026gt; The name of the service instance. endpoint, endpoint.str.\u0026lt;number\u0026gt; or endpoint.byte.\u0026lt;number\u0026gt; The name of the endpoint. label, label.str.\u0026lt;number\u0026gt; or label.byte.\u0026lt;number\u0026gt; is optional, The labels you need to query, used for querying multi-labels metrics. Unlike swctl, this key only supports a single label due to the specification of the custom metrics API.  For example, if your application name is front_gateway, you could add the following section to your HorizontalPodAutoscaler manifest to specify that you need less than 80ms of 90th latency.\n- type:Externalexternal:metric:name:skywalking.apache.org|service_percentileselector:matchLabels:service:front_gateway# The index of [P50, P75, P90, P95, P99]. 2 is the index of P90(90%)label:\u0026#34;2\u0026#34;target:type:Valuevalue:80If the service is v1|productpage|bookinfo|demo|-:\n- type:Externalexternal:metric:name:skywalking.apache.org|service_cpmselector:matchLabels:\u0026#34;service.str.0\u0026#34;: \u0026#34;v1\u0026#34;\u0026#34;service.byte.1\u0026#34;: \u0026#34;7c\u0026#34;\u0026#34;service.str.2\u0026#34;: \u0026#34;productpage\u0026#34;\u0026#34;service.byte.3\u0026#34;: \u0026#34;7c\u0026#34;\u0026#34;service.str.4\u0026#34;: \u0026#34;bookinfo\u0026#34;\u0026#34;service.byte.5\u0026#34;: \u0026#34;7c\u0026#34;\u0026#34;service.str.6\u0026#34;: \u0026#34;demo\u0026#34;\u0026#34;service.byte.7\u0026#34;: \u0026#34;7c\u0026#34;\u0026#34;service.byte.8\u0026#34;: \u0026#34;2d\u0026#34;target:type:Valuevalue:80","excerpt":"Custom metrics Adapter This adapter contains an implementation of external metrics API. It is …","ref":"/docs/skywalking-swck/latest/custom-metrics-adapter/","title":"Custom metrics Adapter"},{"body":"Custom metrics Adapter This adapter contains an implementation of external metrics API. It is therefore suitable for use with the autoscaling/v2 Horizontal Pod Autoscaler in Kubernetes 1.9+.\nUse kustomize to customise your deployment  Clone the source code:  git clone git@github.com:apache/skywalking-swck.git  Edit file adapter/config/adapter/kustomization.yaml file to change your preferences. If you prefer to your private docker image, a quick path to override ADAPTER_IMG environment variable : export ADAPTER_IMG=\u0026lt;private registry\u0026gt;/metrics-adapter:\u0026lt;tag\u0026gt;\n  Use make to generate the final manifests and deploy:\n  make -C adapter deploy Configuration The adapter takes the standard Kubernetes generic API server arguments (including those for authentication and authorization). By default, it will attempt to using Kubernetes in-cluster config to connect to the cluster.\nIt takes the following addition arguments specific to configuring how the adapter talks to SkyWalking OAP cluster:\n --oap-addr The address of OAP cluster. --metric-filter-regex A regular expression to filter metrics retrieved from OAP cluster. --refresh-interval This is the interval at which to update the cache of available metrics from OAP cluster. --namespace A prefix to which metrics are appended. The format is \u0026lsquo;namespace|metric_name\u0026rsquo;, defaults to skywalking.apache.org  HPA Configuration External metrics allow you to autoscale your cluster based on any metric available in OAP cluster. Just provide a metric block with a name and selector, and use the External metric type.\n- type:Externalexternal:metric:name:\u0026lt;metric_name\u0026gt;selector:matchLabels:\u0026lt;label_key\u0026gt;:\u0026lt;label_value\u0026gt;...target:.... metric_name: The name of metric generated by OAL or other subsystem. label: label_key is the entity name of skywalking metrics. if the label value contains special characters more than ., - and _, service.str.\u0026lt;number\u0026gt; represent the literal of label value, and service.byte.\u0026lt;number\u0026gt; could encode these special characters to hex bytes.  Supposing the service name is v1|productpage|bookinfo|demo, the matchLabels should be like the below piece:\nmatchLabels:\u0026#34;service.str.0\u0026#34;: \u0026#34;v1\u0026#34;\u0026#34;service.byte.1\u0026#34;: \u0026#34;7c\u0026#34;// the hex byte of \u0026#34;|\u0026#34;\u0026#34;service.str.2\u0026#34;: \u0026#34;productpage\u0026#34;\u0026#34;service.byte.3\u0026#34;: \u0026#34;7c\u0026#34;\u0026#34;service.str.4\u0026#34;: \u0026#34;bookinfo\u0026#34;\u0026#34;service.byte.5\u0026#34;: \u0026#34;7c\u0026#34;\u0026#34;service.str.6\u0026#34;: \u0026#34;demo\u0026#34; Caveats: byte label only accept a single character. That means || should be transformed to service.byte.0:\u0026quot;7c\u0026quot; and service.byte.1:\u0026quot;7c\u0026quot; instead of service.byte.0:\u0026quot;7c7c\u0026quot;\n The options of label keys are:\n service, service.str.\u0026lt;number\u0026gt; or service.byte.\u0026lt;number\u0026gt; The name of the service. instance, instance.str.\u0026lt;number\u0026gt; or instance.byte.\u0026lt;number\u0026gt; The name of the service instance. endpoint, endpoint.str.\u0026lt;number\u0026gt; or endpoint.byte.\u0026lt;number\u0026gt; The name of the endpoint. label, label.str.\u0026lt;number\u0026gt; or label.byte.\u0026lt;number\u0026gt; is optional, The labels you need to query, used for querying multi-labels metrics. Unlike swctl, this key only supports a single label due to the specification of the custom metrics API.  For example, if your application name is front_gateway, you could add the following section to your HorizontalPodAutoscaler manifest to specify that you need less than 80ms of 90th latency.\n- type:Externalexternal:metric:name:skywalking.apache.org|service_percentileselector:matchLabels:service:front_gateway# The index of [P50, P75, P90, P95, P99]. 2 is the index of P90(90%)label:\u0026#34;2\u0026#34;target:type:Valuevalue:80If the service is v1|productpage|bookinfo|demo|-:\n- type:Externalexternal:metric:name:skywalking.apache.org|service_cpmselector:matchLabels:\u0026#34;service.str.0\u0026#34;: \u0026#34;v1\u0026#34;\u0026#34;service.byte.1\u0026#34;: \u0026#34;7c\u0026#34;\u0026#34;service.str.2\u0026#34;: \u0026#34;productpage\u0026#34;\u0026#34;service.byte.3\u0026#34;: \u0026#34;7c\u0026#34;\u0026#34;service.str.4\u0026#34;: \u0026#34;bookinfo\u0026#34;\u0026#34;service.byte.5\u0026#34;: \u0026#34;7c\u0026#34;\u0026#34;service.str.6\u0026#34;: \u0026#34;demo\u0026#34;\u0026#34;service.byte.7\u0026#34;: \u0026#34;7c\u0026#34;\u0026#34;service.byte.8\u0026#34;: \u0026#34;2d\u0026#34;target:type:Valuevalue:80","excerpt":"Custom metrics Adapter This adapter contains an implementation of external metrics API. It is …","ref":"/docs/skywalking-swck/next/custom-metrics-adapter/","title":"Custom metrics Adapter"},{"body":"Custom metrics Adapter This adapter contains an implementation of external metrics API. It is therefore suitable for use with the autoscaling/v2 Horizontal Pod Autoscaler in Kubernetes 1.9+.\nUse kustomize to customise your deployment  Clone the source code:  git clone git@github.com:apache/skywalking-swck.git  Edit file adapter/config/adapter/kustomization.yaml file to change your preferences. If you prefer to your private docker image, a quick path to override ADAPTER_IMG environment variable : export ADAPTER_IMG=\u0026lt;private registry\u0026gt;/metrics-adapter:\u0026lt;tag\u0026gt;\n  Use make to generate the final manifests and deploy:\n  make -C adapter deploy Configuration The adapter takes the standard Kubernetes generic API server arguments (including those for authentication and authorization). By default, it will attempt to using Kubernetes in-cluster config to connect to the cluster.\nIt takes the following addition arguments specific to configuring how the adapter talks to SkyWalking OAP cluster:\n --oap-addr The address of OAP cluster. --metric-filter-regex A regular expression to filter metrics retrieved from OAP cluster. --refresh-interval This is the interval at which to update the cache of available metrics from OAP cluster. --namespace A prefix to which metrics are appended. The format is \u0026lsquo;namespace|metric_name\u0026rsquo;, defaults to skywalking.apache.org  HPA Configuration External metrics allow you to autoscale your cluster based on any metric available in OAP cluster. Just provide a metric block with a name and selector, and use the External metric type.\n- type:Externalexternal:metric:name:\u0026lt;metric_name\u0026gt;selector:matchLabels:\u0026lt;label_key\u0026gt;:\u0026lt;label_value\u0026gt;...target:.... metric_name: The name of metric generated by OAL or other subsystem. label: label_key is the entity name of skywalking metrics. if the label value contains special characters more than ., - and _, service.str.\u0026lt;number\u0026gt; represent the literal of label value, and service.byte.\u0026lt;number\u0026gt; could encode these special characters to hex bytes.  Supposing the service name is v1|productpage|bookinfo|demo, the matchLabels should be like the below piece:\nmatchLabels:\u0026#34;service.str.0\u0026#34;: \u0026#34;v1\u0026#34;\u0026#34;service.byte.1\u0026#34;: \u0026#34;7c\u0026#34;// the hex byte of \u0026#34;|\u0026#34;\u0026#34;service.str.2\u0026#34;: \u0026#34;productpage\u0026#34;\u0026#34;service.byte.3\u0026#34;: \u0026#34;7c\u0026#34;\u0026#34;service.str.4\u0026#34;: \u0026#34;bookinfo\u0026#34;\u0026#34;service.byte.5\u0026#34;: \u0026#34;7c\u0026#34;\u0026#34;service.str.6\u0026#34;: \u0026#34;demo\u0026#34; Caveats: byte label only accept a single character. That means || should be transformed to service.byte.0:\u0026quot;7c\u0026quot; and service.byte.1:\u0026quot;7c\u0026quot; instead of service.byte.0:\u0026quot;7c7c\u0026quot;\n The options of label keys are:\n service, service.str.\u0026lt;number\u0026gt; or service.byte.\u0026lt;number\u0026gt; The name of the service. instance, instance.str.\u0026lt;number\u0026gt; or instance.byte.\u0026lt;number\u0026gt; The name of the service instance. endpoint, endpoint.str.\u0026lt;number\u0026gt; or endpoint.byte.\u0026lt;number\u0026gt; The name of the endpoint. label, label.str.\u0026lt;number\u0026gt; or label.byte.\u0026lt;number\u0026gt; is optional, The labels you need to query, used for querying multi-labels metrics. Unlike swctl, this key only supports a single label due to the specification of the custom metrics API.  For example, if your application name is front_gateway, you could add the following section to your HorizontalPodAutoscaler manifest to specify that you need less than 80ms of 90th latency.\n- type:Externalexternal:metric:name:skywalking.apache.org|service_percentileselector:matchLabels:service:front_gateway# The index of [P50, P75, P90, P95, P99]. 2 is the index of P90(90%)label:\u0026#34;2\u0026#34;target:type:Valuevalue:80If the service is v1|productpage|bookinfo|demo|-:\n- type:Externalexternal:metric:name:skywalking.apache.org|service_cpmselector:matchLabels:\u0026#34;service.str.0\u0026#34;: \u0026#34;v1\u0026#34;\u0026#34;service.byte.1\u0026#34;: \u0026#34;7c\u0026#34;\u0026#34;service.str.2\u0026#34;: \u0026#34;productpage\u0026#34;\u0026#34;service.byte.3\u0026#34;: \u0026#34;7c\u0026#34;\u0026#34;service.str.4\u0026#34;: \u0026#34;bookinfo\u0026#34;\u0026#34;service.byte.5\u0026#34;: \u0026#34;7c\u0026#34;\u0026#34;service.str.6\u0026#34;: \u0026#34;demo\u0026#34;\u0026#34;service.byte.7\u0026#34;: \u0026#34;7c\u0026#34;\u0026#34;service.byte.8\u0026#34;: \u0026#34;2d\u0026#34;target:type:Valuevalue:80","excerpt":"Custom metrics Adapter This adapter contains an implementation of external metrics API. It is …","ref":"/docs/skywalking-swck/v0.9.0/custom-metrics-adapter/","title":"Custom metrics Adapter"},{"body":"Data Model This chapter introduces BanyanDB\u0026rsquo;s data models and covers the following:\n the high-level data organization data model data retrieval  You can also find examples of how to interact with BanyanDB using bydbctl, how to create and drop groups, or how to create, read, update and drop streams/measures.\nStructure of BanyanDB The hierarchy that data is organized into streams, measures and properties in groups.\nGroups Group does not provide a mechanism for isolating groups of resources within a single banyand-server but is the minimal unit to manage physical structures. Each group contains a set of options, like retention policy, shard number, etc. Several shards distribute in a group.\nmetadata:name:othersor\nmetadata:name:sw_metriccatalog:CATALOG_MEASUREresource_opts:shard_num:2block_interval:unit:UNIT_HOURnum:2segment_interval:unit:UNIT_DAYnum:1ttl:unit:UNIT_DAYnum:7The group creates two shards to store data points. Every day, it would create a segment that will generate a block every 2 hours. The available units are HOUR and DAY. The data in this group will keep 7 days.\nEvery other resource should belong to a group. The catalog indicates which kind of data model the group contains.\n UNSPECIFIED: Property or other data models. MEASURE: Measure. STREAM: Stream.  Group Registration Operations\nMeasures BanyanDB lets you define a measure as follows:\nmetadata:name:service_cpm_minutegroup:sw_metrictag_families:- name:defaulttags:- name:idtype:TAG_TYPE_STRING- name:entity_idtype:TAG_TYPE_STRINGfields:- name:totalfield_type:FIELD_TYPE_INTencoding_method:ENCODING_METHOD_GORILLAcompression_method:COMPRESSION_METHOD_ZSTD- name:valuefield_type:FIELD_TYPE_INTencoding_method:ENCODING_METHOD_GORILLAcompression_method:COMPRESSION_METHOD_ZSTDentity:tag_names:- entity_idinterval:1mMeasure consists of a sequence of data points. Each data point contains tags and fields.\nTags are key-value pairs. The database engine can index tag values by referring to the index rules and rule bindings, confining the query to filtering data points based on tags bound to an index rule.\nTags are grouped into unique tag_families which are the logical and physical grouping of tags.\nMeasure supports the following tag types:\n STRING : Text INT : 64 bits long integer STRING_ARRAY : A group of strings INT_ARRAY : A group of integers DATA_BINARY : Raw binary  A group of selected tags composite an entity that points out a specific time series the data point belongs to. The database engine has capacities to encode and compress values in the same time series. Users should select appropriate tag combinations to optimize the data size. Another role of entity is the sharding key of data points, determining how to fragment data between shards.\nFields are also key-value pairs like tags. But the value of each field is the actual value of a single data point. The database engine would encode and compress the field\u0026rsquo;s values in the same time series. The query operation is forbidden to filter data points based on a field\u0026rsquo;s value. You could apply aggregation functions to them.\nMeasure supports the following fields types:\n STRING : Text INT : 64 bits long integer DATA_BINARY : Raw binary FLOAT : 64 bits double-precision floating-point number  Measure supports the following encoding methods:\n GORILLA : GORILLA encoding is lossless. It is more suitable for a numerical sequence with similar values and is not recommended for sequence data with large fluctuations.  Measure supports the types of the following fields:\n ZSTD : Zstandard is a real-time compression algorithm, that provides high compression ratios. It offers a very wide range of compression/speed trade-offs, while being backed by a very fast decoder. For BanyanDB focus on speed.  Another option named interval plays a critical role in encoding. It indicates the time range between two adjacent data points in a time series and implies that all data points belonging to the same time series are distributed based on a fixed interval. A better practice for the naming measure is to append the interval literal to the tail, for example, service_cpm_minute. It\u0026rsquo;s a parameter of GORILLA encoding method.\nMeasure Registration Operations\nTopNAggregation Find the Top-N entities from a dataset in a time range is a common scenario. We could see the diagrams like \u0026ldquo;Top 10 throughput endpoints\u0026rdquo;, and \u0026ldquo;Most slow 20 endpoints\u0026rdquo;, etc on SkyWalking\u0026rsquo;s UI. Exploring and analyzing the top entities can always reveal some high-value information.\nBanyanDB introduces the TopNAggregation, aiming to pre-calculate the top/bottom entities during the measure writing phase. In the query phase, BanyanDB can quickly retrieve the top/bottom records. The performance would be much better than top() function which is based on the query phase aggregation procedure.\n Caveat: TopNAggregation is an approximate realization, to use it well you need have a good understanding with the algorithm as well as the data distribution.\n ---metadata:name:endpoint_cpm_minute_top_bottomgroup:sw_metricsource_measure:name:endpoint_cpm_minutegroup:sw_metricfield_name:valuefield_value_sort:SORT_UNSPECIFIEDgroup_by_tag_names:- entity_idcounters_number:10000lru_size:10endpoint_cpm_minute_top_bottom is watching the data ingesting of the source measure endpoint_cpm_minute to generate both top 1000 and bottom 1000 entity cardinalities. If only Top 1000 or Bottom 1000 is needed, the field_value_sort could be DESC or ASC respectively.\n SORT_DESC: Top-N. In a series of 1,2,3...1000. Top10\u0026rsquo;s result is 1000,999...991. SORT_ASC: Bottom-N. In a series of 1,2,3...1000. Bottom10\u0026rsquo;s result is 1,2...10.  Tags in group_by_tag_names are used as dimensions. These tags can be searched (only equality is supported) in the query phase. Tags do not exist in group_by_tag_names will be dropped in the pre-calculating phase.\ncounters_number denotes the number of entity cardinality. As the above example shows, calculating the Top 100 among 10 thousands is easier than among 10 millions.\nlru_size is a late data optimizing flag. The higher the number, the more late data, but the more memory space is consumed.\nTopNAggregation Registration Operations\nStreams Stream shares many details with Measure except for abandoning field. Stream focuses on high throughput data collection, for example, tracing and logging. The database engine also supports compressing stream entries based on entity, but no encoding process is involved.\nStream Registration Operations\nProperties Property is a schema-less or schema-free data model. That means you DO NOT have to define a schema before writing a Property\nProperty is a standard key-value store. Users could store their metadata or items on a property and get a sequential consistency guarantee. BanyanDB\u0026rsquo;s motivation for introducing such a particular structure is to support most APM scenarios that need to store critical data, especially for a distributed database cluster.\nWe should create a group before creating a property.\nCreating group.\nmetadata:name:swCreating property.\nmetadata:container:group:swname:temp_dataid:General-Servicetags:- key:namevalue:str:value:\u0026#34;hello\u0026#34;- key:statevalue:str:value:\u0026#34;succeed\u0026#34;Property supports a three-level hierarchy, group/name/id, that is more flexible than schemaful data models.\nThe property supports the TTL mechanism. You could set the ttl field to specify the time to live.\nmetadata:container:group:swname:temp_dataid:General-Servicetags:- key:namevalue:str:value:\u0026#34;hello\u0026#34;- key:statevalue:str:value:\u0026#34;succeed\u0026#34;ttl:\u0026#34;1h\u0026#34;\u0026ldquo;General-Service\u0026rdquo; will be dropped after 1 hour. If you want to extend the TTL, you could use the \u0026ldquo;keepalive\u0026rdquo; operation. The \u0026ldquo;lease_id\u0026rdquo; is returned in the apply response. You can use get operation to get the property with the lease_id as well.\nlease_id:1\u0026ldquo;General-Service\u0026rdquo; lives another 1 hour.\nYou could Create, Read, Update and Drop a property, and update or drop several tags instead of the entire property.\nProperty Operations\nData Models Data models in BanyanDB derive from some classic data models.\nTimeSeries Model A time series is a series of data points indexed in time order. Most commonly, a time series is a sequence taken at successive equally spaced points in time. Thus it is a sequence of discrete-time data.\nYou can store time series data points through Stream or Measure. Examples of Stream are logs, traces and events. Measure could ingest metrics, profiles, etc.\nKey-Value Model The key-value data model is a subset of the Property data model. Every property has a key \u0026lt;group\u0026gt;/\u0026lt;name\u0026gt;/\u0026lt;id\u0026gt; that identifies a property within a collection. This key acts as the primary key to retrieve the data. You can set it when creating a key. It cannot be changed later because the attribute is immutable.\nThere are several Key-Value pairs in a property, named Tags. You could add, update and drop them based on the tag\u0026rsquo;s key.\nData Retrieval Queries and Writes are used to filter schemaful data models, Stream, Measure or TopNAggregation based on certain criteria, as well as to compute or store new data.\n MeasureService provides Write, Query and TopN StreamService provides Write, Query  IndexRule \u0026amp; IndexRuleBinding An IndexRule indicates which tags are indexed. An IndexRuleBinding binds an index rule to the target resources or the subject. There might be several rule bindings to a single resource, but their effective time range could NOT overlap.\nmetadata:name:trace_idgroup:sw_streamtags:- trace_idtype:TYPE_TREElocation:LOCATION_GLOBALIndexRule supports selecting two distinct kinds of index structures. The INVERTED index is the primary option when users set up an index rule. It\u0026rsquo;s suitable for most tag indexing due to a better memory usage ratio and query performance. When there are many unique tag values here, such as the ID tag and numeric duration tag, the TREE index could be better. This index saves much memory space with high-cardinality data sets.\nMost IndexRule\u0026rsquo;s location is LOCAL which places indices with their indexed data together. IndexRule also provides a GLOBAL location to place some indices on a higher layer of hierarchical structure. This option intends to optimize the full-scan operation for some querying cases of no time range specification, such as finding spans from a trace by trace_id.\nmetadata:name:stream_bindinggroup:sw_streamrules:- trace_id- duration- endpoint_id- status_code- http.method- db.instance- db.type- mq.broker- mq.queue- mq.topic- extended_tagssubject:catalog:CATALOG_STREAMname:swbegin_at:\u0026#39;2021-04-15T01:30:15.01Z\u0026#39;expire_at:\u0026#39;2121-04-15T01:30:15.01Z\u0026#39;IndexRuleBinding binds IndexRules to a subject, Stream or Measure. The time range between begin_at and expire_at is the effective time.\nIndexRule Registration Operations\nIndexRuleBinding Registration Operations\nIndex Granularity In BanyanDB, Stream and Measure have different levels of index granularity.\nFor Measure, the indexed target is a data point with specific tag values. The query processor uses the tag values defined in the entity field of the Measure to compose a series ID, which is used to find the several series that match the query criteria. The entity field is a set of tags that defines the unique identity of a time series, and it restricts the tags that can be used as indexed target.\nEach series contains a sequence of data points that share the same tag values. Once the query processor has identified the relevant series, it scans the data points between the desired time range in those series to find the data that matches the query criteria.\nFor example, suppose we have a Measure with the following entity field: {service, operation, instance}. If we get a data point with the following tag values: service=shopping, operation=search, and instance=prod-1, then the query processor would use those tag values to construct a series ID that uniquely identifies the series containing that data point. The query processor would then scan the relevant data points in that series to find the data that matches the query criteria.\nThe side effect of the measure index is that each indexed value has to represent a unique seriesID. This is because the series ID is constructed by concatenating the indexed tag values in the entity field. If two series have the same entity field, they would have the same series ID and would be indistinguishable from one another. This means that if you want to index a tag that is not part of the entity field, you would need to ensure that it is unique across all series. One way to do this would be to include the tag in the entity field, but this may not always be feasible or desirable depending on your use case.\nFor Stream, the indexed target is an element that is a combination of the series ID and timestamp. The Stream query processor uses the time range to find target files. The indexed result points to the target element. The processor doesn\u0026rsquo;t have to scan a series of elements in this time range, which reduces the query time.\nFor example, suppose we have a Stream with the following tags: service, operation, instance, and status_code. If we get a data point with the following tag values: service=shopping, operation=search, instance=prod-1, and status_code=200, and the data point\u0026rsquo;s time is 1:00pm on January 1st, 2022, then the series ID for this data point would be shopping_search_prod-1_200_1641052800, where 1641052800 is the Unix timestamp representing 1:00pm on January 1st, 2022.\nThe indexed target would be the combination of the series ID and timestamp, which in this case would be shopping_search_prod-1_200_1641052800. The Stream query processor would use the time range specified in the query to find target files and then search within those files for the indexed target.\nThe following is a comparison of the indexing granularity, performance, and flexibility of Stream and Measure indices:\n   Indexing Granularity Performance Flexibility     Measure indices are constructed for each series and are based on the entity field of the Measure. Each indexed value has to represent a unique seriesID. Measure index is faster than Stream index. Measure index is less flexible and requires more care when indexing tags that are not part of the entity field.   Stream indices are constructed for each element and are based on the series ID and timestamp. Stream index is slower than Measure index. Stream index is more flexible than Measure index and can index any tag value.    In general, Measure indices are faster and more efficient, but they require more care when indexing tags that are not part of the entity field. Stream indices, on the other hand, are slower and take up more space, but they can index any tag value and do not have the same side effects as Measure indices.\n","excerpt":"Data Model This chapter introduces BanyanDB\u0026rsquo;s data models and covers the following:\n the …","ref":"/docs/skywalking-banyandb/latest/concept/data-model/","title":"Data Model"},{"body":"Data Model This chapter introduces BanyanDB\u0026rsquo;s data models and covers the following:\n the high-level data organization data model data retrieval  You can also find examples of how to interact with BanyanDB using bydbctl, how to create and drop groups, or how to create, read, update and drop streams/measures.\nStructure of BanyanDB The hierarchy that data is organized into streams, measures and properties in groups.\nGroups Group does not provide a mechanism for isolating groups of resources within a single banyand-server but is the minimal unit to manage physical structures. Each group contains a set of options, like retention policy, shard number, etc. Several shards distribute in a group.\nmetadata:name:othersor\nmetadata:name:sw_metriccatalog:CATALOG_MEASUREresource_opts:shard_num:2segment_interval:unit:UNIT_DAYnum:1ttl:unit:UNIT_DAYnum:7The group creates two shards to store data points. Every day, it would create a segment that will generate a block every 2 hours. The available units are HOUR and DAY. The data in this group will keep 7 days.\nEvery other resource should belong to a group. The catalog indicates which kind of data model the group contains.\n UNSPECIFIED: Property or other data models. MEASURE: Measure. STREAM: Stream.  Group Registration Operations\nMeasures BanyanDB lets you define a measure as follows:\nmetadata:name:service_cpm_minutegroup:sw_metrictag_families:- name:defaulttags:- name:idtype:TAG_TYPE_STRING- name:entity_idtype:TAG_TYPE_STRINGfields:- name:totalfield_type:FIELD_TYPE_INTencoding_method:ENCODING_METHOD_GORILLAcompression_method:COMPRESSION_METHOD_ZSTD- name:valuefield_type:FIELD_TYPE_INTencoding_method:ENCODING_METHOD_GORILLAcompression_method:COMPRESSION_METHOD_ZSTDentity:tag_names:- entity_idinterval:1mMeasure consists of a sequence of data points. Each data point contains tags and fields.\nTags are key-value pairs. The database engine can index tag values by referring to the index rules and rule bindings, confining the query to filtering data points based on tags bound to an index rule.\nTags are grouped into unique tag_families which are the logical and physical grouping of tags.\nMeasure supports the following tag types:\n STRING : Text INT : 64 bits long integer STRING_ARRAY : A group of strings INT_ARRAY : A group of integers DATA_BINARY : Raw binary  A group of selected tags composite an entity that points out a specific time series the data point belongs to. The database engine has capacities to encode and compress values in the same time series. Users should select appropriate tag combinations to optimize the data size. Another role of entity is the sharding key of data points, determining how to fragment data between shards.\nFields are also key-value pairs like tags. But the value of each field is the actual value of a single data point. The database engine would encode and compress the field\u0026rsquo;s values in the same time series. The query operation is forbidden to filter data points based on a field\u0026rsquo;s value. You could apply aggregation functions to them.\nMeasure supports the following fields types:\n STRING : Text INT : 64 bits long integer DATA_BINARY : Raw binary FLOAT : 64 bits double-precision floating-point number  Measure supports the following encoding methods:\n GORILLA : GORILLA encoding is lossless. It is more suitable for a numerical sequence with similar values and is not recommended for sequence data with large fluctuations.  Measure supports the types of the following fields:\n ZSTD : Zstandard is a real-time compression algorithm, that provides high compression ratios. It offers a very wide range of compression/speed trade-offs, while being backed by a very fast decoder. For BanyanDB focus on speed.  Another option named interval plays a critical role in encoding. It indicates the time range between two adjacent data points in a time series and implies that all data points belonging to the same time series are distributed based on a fixed interval. A better practice for the naming measure is to append the interval literal to the tail, for example, service_cpm_minute. It\u0026rsquo;s a parameter of GORILLA encoding method.\nMeasure Registration Operations\nTopNAggregation Find the Top-N entities from a dataset in a time range is a common scenario. We could see the diagrams like \u0026ldquo;Top 10 throughput endpoints\u0026rdquo;, and \u0026ldquo;Most slow 20 endpoints\u0026rdquo;, etc on SkyWalking\u0026rsquo;s UI. Exploring and analyzing the top entities can always reveal some high-value information.\nBanyanDB introduces the TopNAggregation, aiming to pre-calculate the top/bottom entities during the measure writing phase. In the query phase, BanyanDB can quickly retrieve the top/bottom records. The performance would be much better than top() function which is based on the query phase aggregation procedure.\n Caveat: TopNAggregation is an approximate realization, to use it well you need have a good understanding with the algorithm as well as the data distribution.\n ---metadata:name:endpoint_cpm_minute_top_bottomgroup:sw_metricsource_measure:name:endpoint_cpm_minutegroup:sw_metricfield_name:valuefield_value_sort:SORT_UNSPECIFIEDgroup_by_tag_names:- entity_idcounters_number:10000lru_size:10endpoint_cpm_minute_top_bottom is watching the data ingesting of the source measure endpoint_cpm_minute to generate both top 1000 and bottom 1000 entity cardinalities. If only Top 1000 or Bottom 1000 is needed, the field_value_sort could be DESC or ASC respectively.\n SORT_DESC: Top-N. In a series of 1,2,3...1000. Top10\u0026rsquo;s result is 1000,999...991. SORT_ASC: Bottom-N. In a series of 1,2,3...1000. Bottom10\u0026rsquo;s result is 1,2...10.  Tags in group_by_tag_names are used as dimensions. These tags can be searched (only equality is supported) in the query phase. Tags do not exist in group_by_tag_names will be dropped in the pre-calculating phase.\ncounters_number denotes the number of entity cardinality. As the above example shows, calculating the Top 100 among 10 thousands is easier than among 10 millions.\nlru_size is a late data optimizing flag. The higher the number, the more late data, but the more memory space is consumed.\nTopNAggregation Registration Operations\nStreams Stream shares many details with Measure except for abandoning field. Stream focuses on high throughput data collection, for example, tracing and logging. The database engine also supports compressing stream entries based on entity, but no encoding process is involved.\nStream Registration Operations\nProperties Property is a schema-less or schema-free data model. That means you DO NOT have to define a schema before writing a Property\nProperty is a standard key-value store. Users could store their metadata or items on a property and get a sequential consistency guarantee. BanyanDB\u0026rsquo;s motivation for introducing such a particular structure is to support most APM scenarios that need to store critical data, especially for a distributed database cluster.\nWe should create a group before creating a property.\nCreating group.\nmetadata:name:swCreating property.\nmetadata:container:group:swname:temp_dataid:General-Servicetags:- key:namevalue:str:value:\u0026#34;hello\u0026#34;- key:statevalue:str:value:\u0026#34;succeed\u0026#34;Property supports a three-level hierarchy, group/name/id, that is more flexible than schemaful data models.\nThe property supports the TTL mechanism. You could set the ttl field to specify the time to live.\nmetadata:container:group:swname:temp_dataid:General-Servicetags:- key:namevalue:str:value:\u0026#34;hello\u0026#34;- key:statevalue:str:value:\u0026#34;succeed\u0026#34;ttl:\u0026#34;1h\u0026#34;\u0026ldquo;General-Service\u0026rdquo; will be dropped after 1 hour. If you want to extend the TTL, you could use the \u0026ldquo;keepalive\u0026rdquo; operation. The \u0026ldquo;lease_id\u0026rdquo; is returned in the apply response. You can use get operation to get the property with the lease_id as well.\nlease_id:1\u0026ldquo;General-Service\u0026rdquo; lives another 1 hour.\nYou could Create, Read, Update and Drop a property, and update or drop several tags instead of the entire property.\nProperty Operations\nData Models Data models in BanyanDB derive from some classic data models.\nTimeSeries Model A time series is a series of data points indexed in time order. Most commonly, a time series is a sequence taken at successive equally spaced points in time. Thus it is a sequence of discrete-time data.\nYou can store time series data points through Stream or Measure. Examples of Stream are logs, traces and events. Measure could ingest metrics, profiles, etc.\nKey-Value Model The key-value data model is a subset of the Property data model. Every property has a key \u0026lt;group\u0026gt;/\u0026lt;name\u0026gt;/\u0026lt;id\u0026gt; that identifies a property within a collection. This key acts as the primary key to retrieve the data. You can set it when creating a key. It cannot be changed later because the attribute is immutable.\nThere are several Key-Value pairs in a property, named Tags. You could add, update and drop them based on the tag\u0026rsquo;s key.\nData Retrieval Queries and Writes are used to filter schemaful data models, Stream, Measure or TopNAggregation based on certain criteria, as well as to compute or store new data.\n MeasureService provides Write, Query and TopN StreamService provides Write, Query  IndexRule \u0026amp; IndexRuleBinding An IndexRule indicates which tags are indexed. An IndexRuleBinding binds an index rule to the target resources or the subject. There might be several rule bindings to a single resource, but their effective time range could NOT overlap.\nmetadata:name:trace_idgroup:sw_streamtags:- trace_idtype:TYPE_INVERTEDIndexRule supports selecting two distinct kinds of index structures. The INVERTED index is the primary option when users set up an index rule. It\u0026rsquo;s suitable for most tag indexing due to a better memory usage ratio and query performance.\nmetadata:name:stream_bindinggroup:sw_streamrules:- trace_id- duration- endpoint_id- status_code- http.method- db.instance- db.type- mq.broker- mq.queue- mq.topic- extended_tagssubject:catalog:CATALOG_STREAMname:swbegin_at:\u0026#39;2021-04-15T01:30:15.01Z\u0026#39;expire_at:\u0026#39;2121-04-15T01:30:15.01Z\u0026#39;IndexRuleBinding binds IndexRules to a subject, Stream or Measure. The time range between begin_at and expire_at is the effective time.\nIndexRule Registration Operations\nIndexRuleBinding Registration Operations\nIndex Granularity In BanyanDB, Stream and Measure have different levels of index granularity.\nFor Measure, the indexed target is a data point with specific tag values. The query processor uses the tag values defined in the entity field of the Measure to compose a series ID, which is used to find the several series that match the query criteria. The entity field is a set of tags that defines the unique identity of a time series, and it restricts the tags that can be used as indexed target.\nEach series contains a sequence of data points that share the same tag values. Once the query processor has identified the relevant series, it scans the data points between the desired time range in those series to find the data that matches the query criteria.\nFor example, suppose we have a Measure with the following entity field: {service, operation, instance}. If we get a data point with the following tag values: service=shopping, operation=search, and instance=prod-1, then the query processor would use those tag values to construct a series ID that uniquely identifies the series containing that data point. The query processor would then scan the relevant data points in that series to find the data that matches the query criteria.\nThe side effect of the measure index is that each indexed value has to represent a unique seriesID. This is because the series ID is constructed by concatenating the indexed tag values in the entity field. If two series have the same entity field, they would have the same series ID and would be indistinguishable from one another. This means that if you want to index a tag that is not part of the entity field, you would need to ensure that it is unique across all series. One way to do this would be to include the tag in the entity field, but this may not always be feasible or desirable depending on your use case.\nFor Stream, the indexed target is an element that is a combination of the series ID and timestamp. The Stream query processor uses the time range to find target files. The indexed result points to the target element. The processor doesn\u0026rsquo;t have to scan a series of elements in this time range, which reduces the query time.\nFor example, suppose we have a Stream with the following tags: service, operation, instance, and status_code. If we get a data point with the following tag values: service=shopping, operation=search, instance=prod-1, and status_code=200, and the data point\u0026rsquo;s time is 1:00pm on January 1st, 2022, then the series ID for this data point would be shopping_search_prod-1_200_1641052800, where 1641052800 is the Unix timestamp representing 1:00pm on January 1st, 2022.\nThe indexed target would be the combination of the series ID and timestamp, which in this case would be shopping_search_prod-1_200_1641052800. The Stream query processor would use the time range specified in the query to find target files and then search within those files for the indexed target.\nThe following is a comparison of the indexing granularity, performance, and flexibility of Stream and Measure indices:\n   Indexing Granularity Performance Flexibility     Measure indices are constructed for each series and are based on the entity field of the Measure. Each indexed value has to represent a unique seriesID. Measure index is faster than Stream index. Measure index is less flexible and requires more care when indexing tags that are not part of the entity field.   Stream indices are constructed for each element and are based on the series ID and timestamp. Stream index is slower than Measure index. Stream index is more flexible than Measure index and can index any tag value.    In general, Measure indices are faster and more efficient, but they require more care when indexing tags that are not part of the entity field. Stream indices, on the other hand, are slower and take up more space, but they can index any tag value and do not have the same side effects as Measure indices.\n","excerpt":"Data Model This chapter introduces BanyanDB\u0026rsquo;s data models and covers the following:\n the …","ref":"/docs/skywalking-banyandb/next/concept/data-model/","title":"Data Model"},{"body":"Data Model This chapter introduces BanyanDB\u0026rsquo;s data models and covers the following:\n the high-level data organization data model data retrieval  You can also find examples of how to interact with BanyanDB using bydbctl, how to create and drop groups, or how to create, read, update and drop streams/measures.\nStructure of BanyanDB The hierarchy that data is organized into streams, measures and properties in groups.\nGroups Group does not provide a mechanism for isolating groups of resources within a single banyand-server but is the minimal unit to manage physical structures. Each group contains a set of options, like retention policy, shard number, etc. Several shards distribute in a group.\nmetadata:name:othersor\nmetadata:name:sw_metriccatalog:CATALOG_MEASUREresource_opts:shard_num:2block_interval:unit:UNIT_HOURnum:2segment_interval:unit:UNIT_DAYnum:1ttl:unit:UNIT_DAYnum:7The group creates two shards to store data points. Every day, it would create a segment that will generate a block every 2 hours. The available units are HOUR and DAY. The data in this group will keep 7 days.\nEvery other resource should belong to a group. The catalog indicates which kind of data model the group contains.\n UNSPECIFIED: Property or other data models. MEASURE: Measure. STREAM: Stream.  Group Registration Operations\nMeasures BanyanDB lets you define a measure as follows:\nmetadata:name:service_cpm_minutegroup:sw_metrictag_families:- name:defaulttags:- name:idtype:TAG_TYPE_STRING- name:entity_idtype:TAG_TYPE_STRINGfields:- name:totalfield_type:FIELD_TYPE_INTencoding_method:ENCODING_METHOD_GORILLAcompression_method:COMPRESSION_METHOD_ZSTD- name:valuefield_type:FIELD_TYPE_INTencoding_method:ENCODING_METHOD_GORILLAcompression_method:COMPRESSION_METHOD_ZSTDentity:tag_names:- entity_idinterval:1mMeasure consists of a sequence of data points. Each data point contains tags and fields.\nTags are key-value pairs. The database engine can index tag values by referring to the index rules and rule bindings, confining the query to filtering data points based on tags bound to an index rule.\nTags are grouped into unique tag_families which are the logical and physical grouping of tags.\nMeasure supports the following tag types:\n STRING : Text INT : 64 bits long integer STRING_ARRAY : A group of strings INT_ARRAY : A group of integers DATA_BINARY : Raw binary  A group of selected tags composite an entity that points out a specific time series the data point belongs to. The database engine has capacities to encode and compress values in the same time series. Users should select appropriate tag combinations to optimize the data size. Another role of entity is the sharding key of data points, determining how to fragment data between shards.\nFields are also key-value pairs like tags. But the value of each field is the actual value of a single data point. The database engine would encode and compress the field\u0026rsquo;s values in the same time series. The query operation is forbidden to filter data points based on a field\u0026rsquo;s value. You could apply aggregation functions to them.\nMeasure supports the following fields types:\n STRING : Text INT : 64 bits long integer DATA_BINARY : Raw binary FLOAT : 64 bits double-precision floating-point number  Measure supports the following encoding methods:\n GORILLA : GORILLA encoding is lossless. It is more suitable for a numerical sequence with similar values and is not recommended for sequence data with large fluctuations.  Measure supports the types of the following fields:\n ZSTD : Zstandard is a real-time compression algorithm, that provides high compression ratios. It offers a very wide range of compression/speed trade-offs, while being backed by a very fast decoder. For BanyanDB focus on speed.  Another option named interval plays a critical role in encoding. It indicates the time range between two adjacent data points in a time series and implies that all data points belonging to the same time series are distributed based on a fixed interval. A better practice for the naming measure is to append the interval literal to the tail, for example, service_cpm_minute. It\u0026rsquo;s a parameter of GORILLA encoding method.\nMeasure Registration Operations\nTopNAggregation Find the Top-N entities from a dataset in a time range is a common scenario. We could see the diagrams like \u0026ldquo;Top 10 throughput endpoints\u0026rdquo;, and \u0026ldquo;Most slow 20 endpoints\u0026rdquo;, etc on SkyWalking\u0026rsquo;s UI. Exploring and analyzing the top entities can always reveal some high-value information.\nBanyanDB introduces the TopNAggregation, aiming to pre-calculate the top/bottom entities during the measure writing phase. In the query phase, BanyanDB can quickly retrieve the top/bottom records. The performance would be much better than top() function which is based on the query phase aggregation procedure.\n Caveat: TopNAggregation is an approximate realization, to use it well you need have a good understanding with the algorithm as well as the data distribution.\n ---metadata:name:endpoint_cpm_minute_top_bottomgroup:sw_metricsource_measure:name:endpoint_cpm_minutegroup:sw_metricfield_name:valuefield_value_sort:SORT_UNSPECIFIEDgroup_by_tag_names:- entity_idcounters_number:10000lru_size:10endpoint_cpm_minute_top_bottom is watching the data ingesting of the source measure endpoint_cpm_minute to generate both top 1000 and bottom 1000 entity cardinalities. If only Top 1000 or Bottom 1000 is needed, the field_value_sort could be DESC or ASC respectively.\n SORT_DESC: Top-N. In a series of 1,2,3...1000. Top10\u0026rsquo;s result is 1000,999...991. SORT_ASC: Bottom-N. In a series of 1,2,3...1000. Bottom10\u0026rsquo;s result is 1,2...10.  Tags in group_by_tag_names are used as dimensions. These tags can be searched (only equality is supported) in the query phase. Tags do not exist in group_by_tag_names will be dropped in the pre-calculating phase.\ncounters_number denotes the number of entity cardinality. As the above example shows, calculating the Top 100 among 10 thousands is easier than among 10 millions.\nlru_size is a late data optimizing flag. The higher the number, the more late data, but the more memory space is consumed.\nTopNAggregation Registration Operations\nStreams Stream shares many details with Measure except for abandoning field. Stream focuses on high throughput data collection, for example, tracing and logging. The database engine also supports compressing stream entries based on entity, but no encoding process is involved.\nStream Registration Operations\nProperties Property is a schema-less or schema-free data model. That means you DO NOT have to define a schema before writing a Property\nProperty is a standard key-value store. Users could store their metadata or items on a property and get a sequential consistency guarantee. BanyanDB\u0026rsquo;s motivation for introducing such a particular structure is to support most APM scenarios that need to store critical data, especially for a distributed database cluster.\nWe should create a group before creating a property.\nCreating group.\nmetadata:name:swCreating property.\nmetadata:container:group:swname:temp_dataid:General-Servicetags:- key:namevalue:str:value:\u0026#34;hello\u0026#34;- key:statevalue:str:value:\u0026#34;succeed\u0026#34;Property supports a three-level hierarchy, group/name/id, that is more flexible than schemaful data models.\nThe property supports the TTL mechanism. You could set the ttl field to specify the time to live.\nmetadata:container:group:swname:temp_dataid:General-Servicetags:- key:namevalue:str:value:\u0026#34;hello\u0026#34;- key:statevalue:str:value:\u0026#34;succeed\u0026#34;ttl:\u0026#34;1h\u0026#34;\u0026ldquo;General-Service\u0026rdquo; will be dropped after 1 hour. If you want to extend the TTL, you could use the \u0026ldquo;keepalive\u0026rdquo; operation. The \u0026ldquo;lease_id\u0026rdquo; is returned in the apply response. You can use get operation to get the property with the lease_id as well.\nlease_id:1\u0026ldquo;General-Service\u0026rdquo; lives another 1 hour.\nYou could Create, Read, Update and Drop a property, and update or drop several tags instead of the entire property.\nProperty Operations\nData Models Data models in BanyanDB derive from some classic data models.\nTimeSeries Model A time series is a series of data points indexed in time order. Most commonly, a time series is a sequence taken at successive equally spaced points in time. Thus it is a sequence of discrete-time data.\nYou can store time series data points through Stream or Measure. Examples of Stream are logs, traces and events. Measure could ingest metrics, profiles, etc.\nKey-Value Model The key-value data model is a subset of the Property data model. Every property has a key \u0026lt;group\u0026gt;/\u0026lt;name\u0026gt;/\u0026lt;id\u0026gt; that identifies a property within a collection. This key acts as the primary key to retrieve the data. You can set it when creating a key. It cannot be changed later because the attribute is immutable.\nThere are several Key-Value pairs in a property, named Tags. You could add, update and drop them based on the tag\u0026rsquo;s key.\nData Retrieval Queries and Writes are used to filter schemaful data models, Stream, Measure or TopNAggregation based on certain criteria, as well as to compute or store new data.\n MeasureService provides Write, Query and TopN StreamService provides Write, Query  IndexRule \u0026amp; IndexRuleBinding An IndexRule indicates which tags are indexed. An IndexRuleBinding binds an index rule to the target resources or the subject. There might be several rule bindings to a single resource, but their effective time range could NOT overlap.\nmetadata:name:trace_idgroup:sw_streamtags:- trace_idtype:TYPE_TREElocation:LOCATION_GLOBALIndexRule supports selecting two distinct kinds of index structures. The INVERTED index is the primary option when users set up an index rule. It\u0026rsquo;s suitable for most tag indexing due to a better memory usage ratio and query performance. When there are many unique tag values here, such as the ID tag and numeric duration tag, the TREE index could be better. This index saves much memory space with high-cardinality data sets.\nMost IndexRule\u0026rsquo;s location is LOCAL which places indices with their indexed data together. IndexRule also provides a GLOBAL location to place some indices on a higher layer of hierarchical structure. This option intends to optimize the full-scan operation for some querying cases of no time range specification, such as finding spans from a trace by trace_id.\nmetadata:name:stream_bindinggroup:sw_streamrules:- trace_id- duration- endpoint_id- status_code- http.method- db.instance- db.type- mq.broker- mq.queue- mq.topic- extended_tagssubject:catalog:CATALOG_STREAMname:swbegin_at:\u0026#39;2021-04-15T01:30:15.01Z\u0026#39;expire_at:\u0026#39;2121-04-15T01:30:15.01Z\u0026#39;IndexRuleBinding binds IndexRules to a subject, Stream or Measure. The time range between begin_at and expire_at is the effective time.\nIndexRule Registration Operations\nIndexRuleBinding Registration Operations\nIndex Granularity In BanyanDB, Stream and Measure have different levels of index granularity.\nFor Measure, the indexed target is a data point with specific tag values. The query processor uses the tag values defined in the entity field of the Measure to compose a series ID, which is used to find the several series that match the query criteria. The entity field is a set of tags that defines the unique identity of a time series, and it restricts the tags that can be used as indexed target.\nEach series contains a sequence of data points that share the same tag values. Once the query processor has identified the relevant series, it scans the data points between the desired time range in those series to find the data that matches the query criteria.\nFor example, suppose we have a Measure with the following entity field: {service, operation, instance}. If we get a data point with the following tag values: service=shopping, operation=search, and instance=prod-1, then the query processor would use those tag values to construct a series ID that uniquely identifies the series containing that data point. The query processor would then scan the relevant data points in that series to find the data that matches the query criteria.\nThe side effect of the measure index is that each indexed value has to represent a unique seriesID. This is because the series ID is constructed by concatenating the indexed tag values in the entity field. If two series have the same entity field, they would have the same series ID and would be indistinguishable from one another. This means that if you want to index a tag that is not part of the entity field, you would need to ensure that it is unique across all series. One way to do this would be to include the tag in the entity field, but this may not always be feasible or desirable depending on your use case.\nFor Stream, the indexed target is an element that is a combination of the series ID and timestamp. The Stream query processor uses the time range to find target files. The indexed result points to the target element. The processor doesn\u0026rsquo;t have to scan a series of elements in this time range, which reduces the query time.\nFor example, suppose we have a Stream with the following tags: service, operation, instance, and status_code. If we get a data point with the following tag values: service=shopping, operation=search, instance=prod-1, and status_code=200, and the data point\u0026rsquo;s time is 1:00pm on January 1st, 2022, then the series ID for this data point would be shopping_search_prod-1_200_1641052800, where 1641052800 is the Unix timestamp representing 1:00pm on January 1st, 2022.\nThe indexed target would be the combination of the series ID and timestamp, which in this case would be shopping_search_prod-1_200_1641052800. The Stream query processor would use the time range specified in the query to find target files and then search within those files for the indexed target.\nThe following is a comparison of the indexing granularity, performance, and flexibility of Stream and Measure indices:\n   Indexing Granularity Performance Flexibility     Measure indices are constructed for each series and are based on the entity field of the Measure. Each indexed value has to represent a unique seriesID. Measure index is faster than Stream index. Measure index is less flexible and requires more care when indexing tags that are not part of the entity field.   Stream indices are constructed for each element and are based on the series ID and timestamp. Stream index is slower than Measure index. Stream index is more flexible than Measure index and can index any tag value.    In general, Measure indices are faster and more efficient, but they require more care when indexing tags that are not part of the entity field. Stream indices, on the other hand, are slower and take up more space, but they can index any tag value and do not have the same side effects as Measure indices.\n","excerpt":"Data Model This chapter introduces BanyanDB\u0026rsquo;s data models and covers the following:\n the …","ref":"/docs/skywalking-banyandb/v0.5.0/concept/data-model/","title":"Data Model"},{"body":"Define Service Hierarchy SkyWalking v10 introduces a new concept Service Hierarchy which defines the relationships of existing logically same services in various layers. The concept and design could be found here.\nService Hierarchy Configuration All the relationships defined in the config/hierarchy-definition.yml file. You can customize it according to your own needs. Here is an example:\nhierarchy:MESH:MESH_DP:nameK8S_SERVICE:short-nameMESH_DP:K8S_SERVICE:short-nameGENERAL:K8S_SERVICE:lower-short-name-remove-nsMYSQL:K8S_SERVICE:short-namePOSTGRESQL:K8S_SERVICE:short-nameSO11Y_OAP:K8S_SERVICE:short-nameVIRTUAL_DATABASE:MYSQL:lower-short-name-with-fqdnPOSTGRESQL:lower-short-name-with-fqdnauto-matching-rules:# the name of the upper service is equal to the name of the lower servicename:\u0026#34;{ (u, l) -\u0026gt; u.name == l.name }\u0026#34;# the short name of the upper service is equal to the short name of the lower serviceshort-name:\u0026#34;{ (u, l) -\u0026gt; u.shortName == l.shortName }\u0026#34;# remove the k8s namespace from the lower service short name# this rule is only works on k8s env.lower-short-name-remove-ns:\u0026#34;{ (u, l) -\u0026gt; { if(l.shortName.lastIndexOf(\u0026#39;.\u0026#39;) \u0026gt; 0) return u.shortName == l.shortName.substring(0, l.shortName.lastIndexOf(\u0026#39;.\u0026#39;)); return false; } }\u0026#34;# the short name of the upper remove port is equal to the short name of the lower service with fqdn suffix# this rule is only works on k8s env.lower-short-name-with-fqdn:\u0026#34;{ (u, l) -\u0026gt; { if(u.shortName.lastIndexOf(\u0026#39;:\u0026#39;) \u0026gt; 0) return u.shortName.substring(0, u.shortName.lastIndexOf(\u0026#39;:\u0026#39;)) == l.shortName.concat(\u0026#39;.svc.cluster.local\u0026#39;); return false; } }\u0026#34;layer-levels:# The hierarchy level of the service layer, the level is used to define the order of the service layer for UI presentation.# The level of the upper service should greater than the level of the lower service in `hierarchy` section.MESH:3GENERAL:3SO11Y_OAP:3VIRTUAL_DATABASE:3MYSQL:2POSTGRESQL:2MESH_DP:1K8S_SERVICE:0Hierarchy  The hierarchy of service layers are defined in the hierarchy section. The layers under the specific layer are related lower of the layer. The relation could have a matching rule for auto matching, which are defined in the auto-matching-rules section. The relation without a matching rule should be built through the internal API. All the layers are defined in the file org.apache.skywalking.oap.server.core.analysis.Layers.java. If the hierarchy is not defined, the service hierarchy relationship will not be built. If you want to add a new relationship, you should certainly know they can be matched automatically by Auto Matching Rules. Notice: some hierarchy relations and auto matching rules are only works on k8s env.  Auto Matching Rules  The auto matching rules are defined in the auto-matching-rules section. Use Groovy script to define the matching rules, the input parameters are the upper service(u) and the lower service(l) and the return value is a boolean, which are used to match the relation between the upper service(u) and the lower service(l) on the different layers. The default matching rules required the service name configured as SkyWalking default and follow the Showcase. If you customized the service name in any layer, you should customize the related matching rules according your service name rules.  Layer Levels  Define the hierarchy level of the service layer in the layer-levels section. The level is used to define the order of the service layer for UI presentation. The level of the upper service should greater than the level of the lower service in hierarchy section.  ","excerpt":"Define Service Hierarchy SkyWalking v10 introduces a new concept Service Hierarchy which defines the …","ref":"/docs/main/next/en/concepts-and-designs/service-hierarchy-configuration/","title":"Define Service Hierarchy"},{"body":" Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-log4j-1.x\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Print trace ID in your logs  Config a layout  log4j.appender.CONSOLE.layout=org.apache.skywalking.apm.toolkit.log.log4j.v1.x.TraceIdPatternLayout  set %T in layout.ConversionPattern ( In 2.0-2016, you should use %x, Why change? )  log4j.appender.CONSOLE.layout.ConversionPattern=%d [%T] %-5p %c{1}:%L - %m%n  When you use -javaagent to active the SkyWalking tracer, log4j will output traceId, if it existed. If the tracer is inactive, the output will be TID: N/A.  Print SkyWalking context in your logs   Your only need to replace pattern %T with %T{SW_CTX}.\n  When you use -javaagent to active the SkyWalking tracer, log4j will output SW_CTX: [$serviceName,$instanceName,$traceId,$traceSegmentId,$spanId], if it existed. If the tracer is inactive, the output will be SW_CTX: N/A.\n  gRPC reporter The gRPC report could forward the collected logs to SkyWalking OAP server, or SkyWalking Satellite sidecar. Trace id, segment id, and span id will attach to logs automatically. You don\u0026rsquo;t need to change the layout.\n Add GRPCLogClientAppender in log4j.properties  log4j.rootLogger=INFO,CustomAppender log4j.appender.CustomAppender=org.apache.skywalking.apm.toolkit.log.log4j.v1.x.log.GRPCLogClientAppender log4j.appender.CustomAppender.layout=org.apache.log4j.PatternLayout log4j.appender.CustomAppender.layout.ConversionPattern=[%t] %-5p %c %x - %m%n  Add config of the plugin or use default  log.max_message_size=${SW_GRPC_LOG_MAX_MESSAGE_SIZE:10485760} ","excerpt":"Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; …","ref":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/application-toolkit-log4j-1.x/","title":"Dependency the toolkit, such as using maven or gradle"},{"body":" Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-log4j-2.x\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Print trace ID in your logs  Config the [%traceId] pattern in your log4j2.xml  \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout pattern=\u0026#34;%d [%traceId] %-5p %c{1}:%L - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;/Appenders\u0026gt;  Support log4j2 AsyncRoot , No additional configuration is required. Refer to the demo of log4j2.xml below. For details: Log4j2 Async Loggers  \u0026lt;Configuration\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout pattern=\u0026#34;%d [%traceId] %-5p %c{1}:%L - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;AsyncRoot level=\u0026#34;INFO\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Console\u0026#34;/\u0026gt; \u0026lt;/AsyncRoot\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;   Support log4j2 AsyncAppender , No additional configuration is required. Refer to the demo of log4j2.xml below.\nFor details: All Loggers Async\nLog4j-2.9 and higher require disruptor-3.3.4.jar or higher on the classpath. Prior to Log4j-2.9, disruptor-3.0.0.jar or higher was required. This is simplest to configure and gives the best performance. To make all loggers asynchronous, add the disruptor jar to the classpath and set the system property log4j2.contextSelector to org.apache.logging.log4j.core.async.AsyncLoggerContextSelector.\n\u0026lt;Configuration status=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;!-- Async Loggers will auto-flush in batches, so switch off immediateFlush. --\u0026gt; \u0026lt;RandomAccessFile name=\u0026#34;RandomAccessFile\u0026#34; fileName=\u0026#34;async.log\u0026#34; immediateFlush=\u0026#34;false\u0026#34; append=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;PatternLayout\u0026gt; \u0026lt;Pattern\u0026gt;%d %p %c{1.} [%t] [%traceId] %m %ex%n\u0026lt;/Pattern\u0026gt; \u0026lt;/PatternLayout\u0026gt; \u0026lt;/RandomAccessFile\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;Root level=\u0026#34;info\u0026#34; includeLocation=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;RandomAccessFile\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt; For details: Mixed Sync \u0026amp; Async\nLog4j-2.9 and higher require disruptor-3.3.4.jar or higher on the classpath. Prior to Log4j-2.9, disruptor-3.0.0.jar or higher was required. There is no need to set system property Log4jContextSelector to any value.\n\u0026lt;Configuration status=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;!-- Async Loggers will auto-flush in batches, so switch off immediateFlush. --\u0026gt; \u0026lt;RandomAccessFile name=\u0026#34;RandomAccessFile\u0026#34; fileName=\u0026#34;asyncWithLocation.log\u0026#34; immediateFlush=\u0026#34;false\u0026#34; append=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;PatternLayout\u0026gt; \u0026lt;Pattern\u0026gt;%d %p %class{1.} [%t] [%traceId] %location %m %ex%n\u0026lt;/Pattern\u0026gt; \u0026lt;/PatternLayout\u0026gt; \u0026lt;/RandomAccessFile\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;!-- pattern layout actually uses location, so we need to include it --\u0026gt; \u0026lt;AsyncLogger name=\u0026#34;com.foo.Bar\u0026#34; level=\u0026#34;trace\u0026#34; includeLocation=\u0026#34;true\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;RandomAccessFile\u0026#34;/\u0026gt; \u0026lt;/AsyncLogger\u0026gt; \u0026lt;Root level=\u0026#34;info\u0026#34; includeLocation=\u0026#34;true\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;RandomAccessFile\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;   Support log4j2 AsyncAppender, For details: Log4j2 AsyncAppender\n  \u0026lt;Configuration\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout pattern=\u0026#34;%d [%traceId] %-5p %c{1}:%L - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;Async name=\u0026#34;Async\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Console\u0026#34;/\u0026gt; \u0026lt;/Async\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;Root level=\u0026#34;INFO\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Async\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;  When you use -javaagent to active the SkyWalking tracer, log4j2 will output traceId, if it existed. If the tracer is inactive, the output will be TID: N/A.  Print SkyWalking context in your logs   Your only need to replace pattern %traceId with %sw_ctx.\n  When you use -javaagent to active the SkyWalking tracer, log4j2 will output SW_CTX: [$serviceName,$instanceName,$traceId,$traceSegmentId,$spanId], if it existed. If the tracer is inactive, the output will be SW_CTX: N/A.\n  gRPC reporter The gRPC report could forward the collected logs to SkyWalking OAP server, or SkyWalking Satellite sidecar. Trace id, segment id, and span id will attach to logs automatically. You don\u0026rsquo;t need to change the layout.\n Add GRPCLogClientAppender in log4j2.xml  \u0026lt;GRPCLogClientAppender name=\u0026#34;grpc-log\u0026#34;\u0026gt; \u0026lt;PatternLayout pattern=\u0026#34;%d{HH:mm:ss.SSS} [%t] %-5level %logger{36} - %msg%n\u0026#34;/\u0026gt; \u0026lt;/GRPCLogClientAppender\u0026gt;  Add config of the plugin or use default  log.max_message_size=${SW_GRPC_LOG_MAX_MESSAGE_SIZE:10485760}  Support -Dlog4j2.contextSelector=org.apache.logging.log4j.core.async.AsyncLoggerContextSelector in gRPC log report.  Transmitting un-formatted messages The log4j 2.x gRPC reporter supports transmitting logs as formatted or un-formatted. Transmitting formatted data is the default but can be disabled by adding the following to the agent config:\nplugin.toolkit.log.transmit_formatted=false The above will result in the content field being used for the log pattern with additional log tags of argument.0, argument.1, and so on representing each logged argument as well as an additional exception tag which is only present if a throwable is also logged.\nFor example, the following code:\nlog.info(\u0026#34;{} {} {}\u0026#34;, 1, 2, 3); Will result in:\n{ \u0026#34;content\u0026#34;: \u0026#34;{} {} {}\u0026#34;, \u0026#34;tags\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;argument.0\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.1\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.2\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;3\u0026#34; } ] } ","excerpt":"Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; …","ref":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/application-toolkit-log4j-2.x/","title":"Dependency the toolkit, such as using maven or gradle"},{"body":" Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-meter\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; If you\u0026rsquo;re using Spring sleuth, you could use Spring Sleuth Setup at the OAP server.\n Counter API represents a single monotonically increasing counter, automatic collect data and report to backend.  import org.apache.skywalking.apm.toolkit.meter.MeterFactory; Counter counter = MeterFactory.counter(meterName).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).mode(Counter.Mode.INCREMENT).build(); counter.increment(1d);  MeterFactory.counter Create a new counter builder with the meter name. Counter.Builder.tag(String key, String value) Mark a tag key/value pair. Counter.Builder.mode(Counter.Mode mode) Change the counter mode, RATE mode means reporting rate to the backend. Counter.Builder.build() Build a new Counter which is collected and reported to the backend. Counter.increment(double count) Increment count to the Counter, It could be a positive value.   Gauge API represents a single numerical value.  import org.apache.skywalking.apm.toolkit.meter.MeterFactory; ThreadPoolExecutor threadPool = ...; Gauge gauge = MeterFactory.gauge(meterName, () -\u0026gt; threadPool.getActiveCount()).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).build();  MeterFactory.gauge(String name, Supplier\u0026lt;Double\u0026gt; getter) Create a new gauge builder with the meter name and supplier function, this function need to return a double value. Gauge.Builder.tag(String key, String value) Mark a tag key/value pair. Gauge.Builder.build() Build a new Gauge which is collected and reported to the backend.   Histogram API represents a summary sample observations with customize buckets.  import org.apache.skywalking.apm.toolkit.meter.MeterFactory; Histogram histogram = MeterFactory.histogram(\u0026#34;test\u0026#34;).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).steps(Arrays.asList(1, 5, 10)).minValue(0).build(); histogram.addValue(3);  MeterFactory.histogram(String name) Create a new histogram builder with the meter name. Histogram.Builder.tag(String key, String value) Mark a tag key/value pair. Histogram.Builder.steps(List\u0026lt;Double\u0026gt; steps) Set up the max values of every histogram buckets. Histogram.Builder.minValue(double value) Set up the minimal value of this histogram, default is 0. Histogram.Builder.build() Build a new Histogram which is collected and reported to the backend. Histogram.addValue(double value) Add value into the histogram, automatically analyze what bucket count needs to be increment. rule: count into [step1, step2).  ","excerpt":"Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; …","ref":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/application-toolkit-meter/","title":"Dependency the toolkit, such as using maven or gradle"},{"body":" Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-trace\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  Use TraceContext.traceId() API to obtain traceId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;traceId\u0026#34;, TraceContext.traceId());  Use TraceContext.segmentId() API to obtain segmentId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;segmentId\u0026#34;, TraceContext.segmentId());  Use TraceContext.spanId() API to obtain spanId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;spanId\u0026#34;, TraceContext.spanId()); Sample codes only\n  Add @Trace to any method you want to trace. After that, you can see the span in the Stack.\n  Methods annotated with @Tag will try to tag the current active span with the given key (Tag#key()) and (Tag#value()), if there is no active span at all, this annotation takes no effect. @Tag can be repeated, and can be used in companion with @Trace, see examples below. The value of Tag is the same as what are supported in Customize Enhance Trace.\n  Add custom tag in the context of traced method, ActiveSpan.tag(\u0026quot;key\u0026quot;, \u0026quot;val\u0026quot;).\n  ActiveSpan.error() Mark the current span as error status.\n  ActiveSpan.error(String errorMsg) Mark the current span as error status with a message.\n  ActiveSpan.error(Throwable throwable) Mark the current span as error status with a Throwable.\n  ActiveSpan.debug(String debugMsg) Add a debug level log message in the current span.\n  ActiveSpan.info(String infoMsg) Add an info level log message in the current span.\n  ActiveSpan.setOperationName(String operationName) Customize an operation name.\n  ActiveSpan.tag(\u0026#34;my_tag\u0026#34;, \u0026#34;my_value\u0026#34;); ActiveSpan.error(); ActiveSpan.error(\u0026#34;Test-Error-Reason\u0026#34;); ActiveSpan.error(new RuntimeException(\u0026#34;Test-Error-Throwable\u0026#34;)); ActiveSpan.info(\u0026#34;Test-Info-Msg\u0026#34;); ActiveSpan.debug(\u0026#34;Test-debug-Msg\u0026#34;); /** * The codes below will generate a span, * and two types of tags, one type tag: keys are `tag1` and `tag2`, values are the passed-in parameters, respectively, the other type tag: keys are `username` and `age`, values are the return value in User, respectively */ @Trace @Tag(key = \u0026#34;tag1\u0026#34;, value = \u0026#34;arg[0]\u0026#34;) @Tag(key = \u0026#34;tag2\u0026#34;, value = \u0026#34;arg[1]\u0026#34;) @Tag(key = \u0026#34;username\u0026#34;, value = \u0026#34;returnedObj.username\u0026#34;) @Tag(key = \u0026#34;age\u0026#34;, value = \u0026#34;returnedObj.age\u0026#34;) public User methodYouWantToTrace(String param1, String param2) { // ActiveSpan.setOperationName(\u0026#34;Customize your own operation name, if this is an entry span, this would be an endpoint name\u0026#34;);  // ... }  Use TraceContext.putCorrelation() API to put custom data in tracing context.  Optional\u0026lt;String\u0026gt; previous = TraceContext.putCorrelation(\u0026#34;customKey\u0026#34;, \u0026#34;customValue\u0026#34;); CorrelationContext will remove the item when the value is null or empty.\n Use TraceContext.getCorrelation() API to get custom data.  Optional\u0026lt;String\u0026gt; value = TraceContext.getCorrelation(\u0026#34;customKey\u0026#34;); CorrelationContext configuration descriptions could be found in the agent configuration documentation, with correlation. as the prefix.\n","excerpt":"Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; …","ref":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/application-toolkit-trace/","title":"Dependency the toolkit, such as using maven or gradle"},{"body":" Dependency the toolkit, such as using maven or gradle  OpenTracing (Deprecated) OpenTracing is a vendor-neutral standard for distributed tracing. It is a set of APIs that can be used to instrument, generate, collect, and report telemetry data for distributed systems. It is designed to be extensible so that new implementations can be created for new platforms or languages. It had been archived by the CNCF TOC. Learn more.\nSkyWalking community keeps the API compatible with 0.30.0 only. All further development will not be accepted.\n\u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-opentracing\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  Use our OpenTracing tracer implementation  Tracer tracer = new SkywalkingTracer(); Tracer.SpanBuilder spanBuilder = tracer.buildSpan(\u0026#34;/yourApplication/yourService\u0026#34;); ","excerpt":"Dependency the toolkit, such as using maven or gradle  OpenTracing (Deprecated) OpenTracing is a …","ref":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/opentracing/","title":"Dependency the toolkit, such as using maven or gradle"},{"body":" Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-log4j-1.x\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Print trace ID in your logs  Config a layout  log4j.appender.CONSOLE.layout=org.apache.skywalking.apm.toolkit.log.log4j.v1.x.TraceIdPatternLayout  set %T in layout.ConversionPattern ( In 2.0-2016, you should use %x, Why change? )  log4j.appender.CONSOLE.layout.ConversionPattern=%d [%T] %-5p %c{1}:%L - %m%n  When you use -javaagent to active the SkyWalking tracer, log4j will output traceId, if it existed. If the tracer is inactive, the output will be TID: N/A.  Print SkyWalking context in your logs   Your only need to replace pattern %T with %T{SW_CTX}.\n  When you use -javaagent to active the SkyWalking tracer, log4j will output SW_CTX: [$serviceName,$instanceName,$traceId,$traceSegmentId,$spanId], if it existed. If the tracer is inactive, the output will be SW_CTX: N/A.\n  gRPC reporter The gRPC report could forward the collected logs to SkyWalking OAP server, or SkyWalking Satellite sidecar. Trace id, segment id, and span id will attach to logs automatically. You don\u0026rsquo;t need to change the layout.\n Add GRPCLogClientAppender in log4j.properties  log4j.rootLogger=INFO,CustomAppender log4j.appender.CustomAppender=org.apache.skywalking.apm.toolkit.log.log4j.v1.x.log.GRPCLogClientAppender log4j.appender.CustomAppender.layout=org.apache.log4j.PatternLayout log4j.appender.CustomAppender.layout.ConversionPattern=[%t] %-5p %c %x - %m%n  Add config of the plugin or use default  log.max_message_size=${SW_GRPC_LOG_MAX_MESSAGE_SIZE:10485760} ","excerpt":"Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; …","ref":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-log4j-1.x/","title":"Dependency the toolkit, such as using maven or gradle"},{"body":" Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-log4j-2.x\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Print trace ID in your logs  Config the [%traceId] pattern in your log4j2.xml  \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout pattern=\u0026#34;%d [%traceId] %-5p %c{1}:%L - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;/Appenders\u0026gt;  Support log4j2 AsyncRoot , No additional configuration is required. Refer to the demo of log4j2.xml below. For details: Log4j2 Async Loggers  \u0026lt;Configuration\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout pattern=\u0026#34;%d [%traceId] %-5p %c{1}:%L - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;AsyncRoot level=\u0026#34;INFO\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Console\u0026#34;/\u0026gt; \u0026lt;/AsyncRoot\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;   Support log4j2 AsyncAppender , No additional configuration is required. Refer to the demo of log4j2.xml below.\nFor details: All Loggers Async\nLog4j-2.9 and higher require disruptor-3.3.4.jar or higher on the classpath. Prior to Log4j-2.9, disruptor-3.0.0.jar or higher was required. This is simplest to configure and gives the best performance. To make all loggers asynchronous, add the disruptor jar to the classpath and set the system property log4j2.contextSelector to org.apache.logging.log4j.core.async.AsyncLoggerContextSelector.\n\u0026lt;Configuration status=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;!-- Async Loggers will auto-flush in batches, so switch off immediateFlush. --\u0026gt; \u0026lt;RandomAccessFile name=\u0026#34;RandomAccessFile\u0026#34; fileName=\u0026#34;async.log\u0026#34; immediateFlush=\u0026#34;false\u0026#34; append=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;PatternLayout\u0026gt; \u0026lt;Pattern\u0026gt;%d %p %c{1.} [%t] [%traceId] %m %ex%n\u0026lt;/Pattern\u0026gt; \u0026lt;/PatternLayout\u0026gt; \u0026lt;/RandomAccessFile\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;Root level=\u0026#34;info\u0026#34; includeLocation=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;RandomAccessFile\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt; For details: Mixed Sync \u0026amp; Async\nLog4j-2.9 and higher require disruptor-3.3.4.jar or higher on the classpath. Prior to Log4j-2.9, disruptor-3.0.0.jar or higher was required. There is no need to set system property Log4jContextSelector to any value.\n\u0026lt;Configuration status=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;!-- Async Loggers will auto-flush in batches, so switch off immediateFlush. --\u0026gt; \u0026lt;RandomAccessFile name=\u0026#34;RandomAccessFile\u0026#34; fileName=\u0026#34;asyncWithLocation.log\u0026#34; immediateFlush=\u0026#34;false\u0026#34; append=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;PatternLayout\u0026gt; \u0026lt;Pattern\u0026gt;%d %p %class{1.} [%t] [%traceId] %location %m %ex%n\u0026lt;/Pattern\u0026gt; \u0026lt;/PatternLayout\u0026gt; \u0026lt;/RandomAccessFile\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;!-- pattern layout actually uses location, so we need to include it --\u0026gt; \u0026lt;AsyncLogger name=\u0026#34;com.foo.Bar\u0026#34; level=\u0026#34;trace\u0026#34; includeLocation=\u0026#34;true\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;RandomAccessFile\u0026#34;/\u0026gt; \u0026lt;/AsyncLogger\u0026gt; \u0026lt;Root level=\u0026#34;info\u0026#34; includeLocation=\u0026#34;true\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;RandomAccessFile\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;   Support log4j2 AsyncAppender, For details: Log4j2 AsyncAppender\n  \u0026lt;Configuration\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout pattern=\u0026#34;%d [%traceId] %-5p %c{1}:%L - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;Async name=\u0026#34;Async\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Console\u0026#34;/\u0026gt; \u0026lt;/Async\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;Root level=\u0026#34;INFO\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Async\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;  When you use -javaagent to active the SkyWalking tracer, log4j2 will output traceId, if it existed. If the tracer is inactive, the output will be TID: N/A.  Print SkyWalking context in your logs   Your only need to replace pattern %traceId with %sw_ctx.\n  When you use -javaagent to active the SkyWalking tracer, log4j2 will output SW_CTX: [$serviceName,$instanceName,$traceId,$traceSegmentId,$spanId], if it existed. If the tracer is inactive, the output will be SW_CTX: N/A.\n  gRPC reporter The gRPC report could forward the collected logs to SkyWalking OAP server, or SkyWalking Satellite sidecar. Trace id, segment id, and span id will attach to logs automatically. You don\u0026rsquo;t need to change the layout.\n Add GRPCLogClientAppender in log4j2.xml  \u0026lt;GRPCLogClientAppender name=\u0026#34;grpc-log\u0026#34;\u0026gt; \u0026lt;PatternLayout pattern=\u0026#34;%d{HH:mm:ss.SSS} [%t] %-5level %logger{36} - %msg%n\u0026#34;/\u0026gt; \u0026lt;/GRPCLogClientAppender\u0026gt;  Add config of the plugin or use default  log.max_message_size=${SW_GRPC_LOG_MAX_MESSAGE_SIZE:10485760}  Support -Dlog4j2.contextSelector=org.apache.logging.log4j.core.async.AsyncLoggerContextSelector in gRPC log report.  Transmitting un-formatted messages The log4j 2.x gRPC reporter supports transmitting logs as formatted or un-formatted. Transmitting formatted data is the default but can be disabled by adding the following to the agent config:\nplugin.toolkit.log.transmit_formatted=false The above will result in the content field being used for the log pattern with additional log tags of argument.0, argument.1, and so on representing each logged argument as well as an additional exception tag which is only present if a throwable is also logged.\nFor example, the following code:\nlog.info(\u0026#34;{} {} {}\u0026#34;, 1, 2, 3); Will result in:\n{ \u0026#34;content\u0026#34;: \u0026#34;{} {} {}\u0026#34;, \u0026#34;tags\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;argument.0\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.1\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.2\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;3\u0026#34; } ] } ","excerpt":"Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; …","ref":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-log4j-2.x/","title":"Dependency the toolkit, such as using maven or gradle"},{"body":" Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-meter\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; If you\u0026rsquo;re using Spring sleuth, you could use Spring Sleuth Setup at the OAP server.\n Counter API represents a single monotonically increasing counter, automatic collect data and report to backend.  import org.apache.skywalking.apm.toolkit.meter.MeterFactory; Counter counter = MeterFactory.counter(meterName).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).mode(Counter.Mode.INCREMENT).build(); counter.increment(1d);  MeterFactory.counter Create a new counter builder with the meter name. Counter.Builder.tag(String key, String value) Mark a tag key/value pair. Counter.Builder.mode(Counter.Mode mode) Change the counter mode, RATE mode means reporting rate to the backend. Counter.Builder.build() Build a new Counter which is collected and reported to the backend. Counter.increment(double count) Increment count to the Counter, It could be a positive value.   Gauge API represents a single numerical value.  import org.apache.skywalking.apm.toolkit.meter.MeterFactory; ThreadPoolExecutor threadPool = ...; Gauge gauge = MeterFactory.gauge(meterName, () -\u0026gt; threadPool.getActiveCount()).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).build();  MeterFactory.gauge(String name, Supplier\u0026lt;Double\u0026gt; getter) Create a new gauge builder with the meter name and supplier function, this function need to return a double value. Gauge.Builder.tag(String key, String value) Mark a tag key/value pair. Gauge.Builder.build() Build a new Gauge which is collected and reported to the backend.   Histogram API represents a summary sample observations with customize buckets.  import org.apache.skywalking.apm.toolkit.meter.MeterFactory; Histogram histogram = MeterFactory.histogram(\u0026#34;test\u0026#34;).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).steps(Arrays.asList(1, 5, 10)).minValue(0).build(); histogram.addValue(3);  MeterFactory.histogram(String name) Create a new histogram builder with the meter name. Histogram.Builder.tag(String key, String value) Mark a tag key/value pair. Histogram.Builder.steps(List\u0026lt;Double\u0026gt; steps) Set up the max values of every histogram buckets. Histogram.Builder.minValue(double value) Set up the minimal value of this histogram, default is 0. Histogram.Builder.build() Build a new Histogram which is collected and reported to the backend. Histogram.addValue(double value) Add value into the histogram, automatically analyze what bucket count needs to be increment. rule: count into [step1, step2).  ","excerpt":"Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; …","ref":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-meter/","title":"Dependency the toolkit, such as using maven or gradle"},{"body":" Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-trace\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  Use TraceContext.traceId() API to obtain traceId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;traceId\u0026#34;, TraceContext.traceId());  Use TraceContext.segmentId() API to obtain segmentId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;segmentId\u0026#34;, TraceContext.segmentId());  Use TraceContext.spanId() API to obtain spanId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;spanId\u0026#34;, TraceContext.spanId()); Sample codes only\n  Add @Trace to any method you want to trace. After that, you can see the span in the Stack.\n  Methods annotated with @Tag will try to tag the current active span with the given key (Tag#key()) and (Tag#value()), if there is no active span at all, this annotation takes no effect. @Tag can be repeated, and can be used in companion with @Trace, see examples below. The value of Tag is the same as what are supported in Customize Enhance Trace.\n  Add custom tag in the context of traced method, ActiveSpan.tag(\u0026quot;key\u0026quot;, \u0026quot;val\u0026quot;).\n  ActiveSpan.error() Mark the current span as error status.\n  ActiveSpan.error(String errorMsg) Mark the current span as error status with a message.\n  ActiveSpan.error(Throwable throwable) Mark the current span as error status with a Throwable.\n  ActiveSpan.debug(String debugMsg) Add a debug level log message in the current span.\n  ActiveSpan.info(String infoMsg) Add an info level log message in the current span.\n  ActiveSpan.setOperationName(String operationName) Customize an operation name.\n  ActiveSpan.tag(\u0026#34;my_tag\u0026#34;, \u0026#34;my_value\u0026#34;); ActiveSpan.error(); ActiveSpan.error(\u0026#34;Test-Error-Reason\u0026#34;); ActiveSpan.error(new RuntimeException(\u0026#34;Test-Error-Throwable\u0026#34;)); ActiveSpan.info(\u0026#34;Test-Info-Msg\u0026#34;); ActiveSpan.debug(\u0026#34;Test-debug-Msg\u0026#34;); /** * The codes below will generate a span, * and two types of tags, one type tag: keys are `tag1` and `tag2`, values are the passed-in parameters, respectively, the other type tag: keys are `username` and `age`, values are the return value in User, respectively */ @Trace @Tag(key = \u0026#34;tag1\u0026#34;, value = \u0026#34;arg[0]\u0026#34;) @Tag(key = \u0026#34;tag2\u0026#34;, value = \u0026#34;arg[1]\u0026#34;) @Tag(key = \u0026#34;username\u0026#34;, value = \u0026#34;returnedObj.username\u0026#34;) @Tag(key = \u0026#34;age\u0026#34;, value = \u0026#34;returnedObj.age\u0026#34;) public User methodYouWantToTrace(String param1, String param2) { // ActiveSpan.setOperationName(\u0026#34;Customize your own operation name, if this is an entry span, this would be an endpoint name\u0026#34;);  // ... }  Use TraceContext.putCorrelation() API to put custom data in tracing context.  Optional\u0026lt;String\u0026gt; previous = TraceContext.putCorrelation(\u0026#34;customKey\u0026#34;, \u0026#34;customValue\u0026#34;); CorrelationContext will remove the item when the value is null or empty.\n Use TraceContext.getCorrelation() API to get custom data.  Optional\u0026lt;String\u0026gt; value = TraceContext.getCorrelation(\u0026#34;customKey\u0026#34;); CorrelationContext configuration descriptions could be found in the agent configuration documentation, with correlation. as the prefix.\n","excerpt":"Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; …","ref":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-trace/","title":"Dependency the toolkit, such as using maven or gradle"},{"body":" Dependency the toolkit, such as using maven or gradle  OpenTracing (Deprecated) OpenTracing is a vendor-neutral standard for distributed tracing. It is a set of APIs that can be used to instrument, generate, collect, and report telemetry data for distributed systems. It is designed to be extensible so that new implementations can be created for new platforms or languages. It had been archived by the CNCF TOC. Learn more.\nSkyWalking community keeps the API compatible with 0.30.0 only. All further development will not be accepted.\n\u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-opentracing\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  Use our OpenTracing tracer implementation  Tracer tracer = new SkywalkingTracer(); Tracer.SpanBuilder spanBuilder = tracer.buildSpan(\u0026#34;/yourApplication/yourService\u0026#34;); ","excerpt":"Dependency the toolkit, such as using maven or gradle  OpenTracing (Deprecated) OpenTracing is a …","ref":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/opentracing/","title":"Dependency the toolkit, such as using maven or gradle"},{"body":" Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-log4j-1.x\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Print trace ID in your logs  Config a layout  log4j.appender.CONSOLE.layout=org.apache.skywalking.apm.toolkit.log.log4j.v1.x.TraceIdPatternLayout  set %T in layout.ConversionPattern ( In 2.0-2016, you should use %x, Why change? )  log4j.appender.CONSOLE.layout.ConversionPattern=%d [%T] %-5p %c{1}:%L - %m%n  When you use -javaagent to active the SkyWalking tracer, log4j will output traceId, if it existed. If the tracer is inactive, the output will be TID: N/A.  Print SkyWalking context in your logs   Your only need to replace pattern %T with %T{SW_CTX}.\n  When you use -javaagent to active the SkyWalking tracer, log4j will output SW_CTX: [$serviceName,$instanceName,$traceId,$traceSegmentId,$spanId], if it existed. If the tracer is inactive, the output will be SW_CTX: N/A.\n  gRPC reporter The gRPC report could forward the collected logs to SkyWalking OAP server, or SkyWalking Satellite sidecar. Trace id, segment id, and span id will attach to logs automatically. You don\u0026rsquo;t need to change the layout.\n Add GRPCLogClientAppender in log4j.properties  log4j.rootLogger=INFO,CustomAppender log4j.appender.CustomAppender=org.apache.skywalking.apm.toolkit.log.log4j.v1.x.log.GRPCLogClientAppender log4j.appender.CustomAppender.layout=org.apache.log4j.PatternLayout log4j.appender.CustomAppender.layout.ConversionPattern=[%t] %-5p %c %x - %m%n  Add config of the plugin or use default  log.max_message_size=${SW_GRPC_LOG_MAX_MESSAGE_SIZE:10485760} ","excerpt":"Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; …","ref":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/application-toolkit-log4j-1.x/","title":"Dependency the toolkit, such as using maven or gradle"},{"body":" Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-log4j-2.x\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Print trace ID in your logs  Config the [%traceId] pattern in your log4j2.xml  \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout pattern=\u0026#34;%d [%traceId] %-5p %c{1}:%L - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;/Appenders\u0026gt;  Support log4j2 AsyncRoot , No additional configuration is required. Refer to the demo of log4j2.xml below. For details: Log4j2 Async Loggers  \u0026lt;Configuration\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout pattern=\u0026#34;%d [%traceId] %-5p %c{1}:%L - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;AsyncRoot level=\u0026#34;INFO\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Console\u0026#34;/\u0026gt; \u0026lt;/AsyncRoot\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;   Support log4j2 AsyncAppender , No additional configuration is required. Refer to the demo of log4j2.xml below.\nFor details: All Loggers Async\nLog4j-2.9 and higher require disruptor-3.3.4.jar or higher on the classpath. Prior to Log4j-2.9, disruptor-3.0.0.jar or higher was required. This is simplest to configure and gives the best performance. To make all loggers asynchronous, add the disruptor jar to the classpath and set the system property log4j2.contextSelector to org.apache.logging.log4j.core.async.AsyncLoggerContextSelector.\n\u0026lt;Configuration status=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;!-- Async Loggers will auto-flush in batches, so switch off immediateFlush. --\u0026gt; \u0026lt;RandomAccessFile name=\u0026#34;RandomAccessFile\u0026#34; fileName=\u0026#34;async.log\u0026#34; immediateFlush=\u0026#34;false\u0026#34; append=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;PatternLayout\u0026gt; \u0026lt;Pattern\u0026gt;%d %p %c{1.} [%t] [%traceId] %m %ex%n\u0026lt;/Pattern\u0026gt; \u0026lt;/PatternLayout\u0026gt; \u0026lt;/RandomAccessFile\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;Root level=\u0026#34;info\u0026#34; includeLocation=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;RandomAccessFile\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt; For details: Mixed Sync \u0026amp; Async\nLog4j-2.9 and higher require disruptor-3.3.4.jar or higher on the classpath. Prior to Log4j-2.9, disruptor-3.0.0.jar or higher was required. There is no need to set system property Log4jContextSelector to any value.\n\u0026lt;Configuration status=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;!-- Async Loggers will auto-flush in batches, so switch off immediateFlush. --\u0026gt; \u0026lt;RandomAccessFile name=\u0026#34;RandomAccessFile\u0026#34; fileName=\u0026#34;asyncWithLocation.log\u0026#34; immediateFlush=\u0026#34;false\u0026#34; append=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;PatternLayout\u0026gt; \u0026lt;Pattern\u0026gt;%d %p %class{1.} [%t] [%traceId] %location %m %ex%n\u0026lt;/Pattern\u0026gt; \u0026lt;/PatternLayout\u0026gt; \u0026lt;/RandomAccessFile\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;!-- pattern layout actually uses location, so we need to include it --\u0026gt; \u0026lt;AsyncLogger name=\u0026#34;com.foo.Bar\u0026#34; level=\u0026#34;trace\u0026#34; includeLocation=\u0026#34;true\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;RandomAccessFile\u0026#34;/\u0026gt; \u0026lt;/AsyncLogger\u0026gt; \u0026lt;Root level=\u0026#34;info\u0026#34; includeLocation=\u0026#34;true\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;RandomAccessFile\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;   Support log4j2 AsyncAppender, For details: Log4j2 AsyncAppender\n  \u0026lt;Configuration\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout pattern=\u0026#34;%d [%traceId] %-5p %c{1}:%L - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;Async name=\u0026#34;Async\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Console\u0026#34;/\u0026gt; \u0026lt;/Async\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;Root level=\u0026#34;INFO\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Async\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;  When you use -javaagent to active the SkyWalking tracer, log4j2 will output traceId, if it existed. If the tracer is inactive, the output will be TID: N/A.  Print SkyWalking context in your logs   Your only need to replace pattern %traceId with %sw_ctx.\n  When you use -javaagent to active the SkyWalking tracer, log4j2 will output SW_CTX: [$serviceName,$instanceName,$traceId,$traceSegmentId,$spanId], if it existed. If the tracer is inactive, the output will be SW_CTX: N/A.\n  gRPC reporter The gRPC report could forward the collected logs to SkyWalking OAP server, or SkyWalking Satellite sidecar. Trace id, segment id, and span id will attach to logs automatically. You don\u0026rsquo;t need to change the layout.\n Add GRPCLogClientAppender in log4j2.xml  \u0026lt;GRPCLogClientAppender name=\u0026#34;grpc-log\u0026#34;\u0026gt; \u0026lt;PatternLayout pattern=\u0026#34;%d{HH:mm:ss.SSS} [%t] %-5level %logger{36} - %msg%n\u0026#34;/\u0026gt; \u0026lt;/GRPCLogClientAppender\u0026gt;  Add config of the plugin or use default  log.max_message_size=${SW_GRPC_LOG_MAX_MESSAGE_SIZE:10485760}  Support -Dlog4j2.contextSelector=org.apache.logging.log4j.core.async.AsyncLoggerContextSelector in gRPC log report.  Transmitting un-formatted messages The log4j 2.x gRPC reporter supports transmitting logs as formatted or un-formatted. Transmitting formatted data is the default but can be disabled by adding the following to the agent config:\nplugin.toolkit.log.transmit_formatted=false The above will result in the content field being used for the log pattern with additional log tags of argument.0, argument.1, and so on representing each logged argument as well as an additional exception tag which is only present if a throwable is also logged.\nFor example, the following code:\nlog.info(\u0026#34;{} {} {}\u0026#34;, 1, 2, 3); Will result in:\n{ \u0026#34;content\u0026#34;: \u0026#34;{} {} {}\u0026#34;, \u0026#34;tags\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;argument.0\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.1\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.2\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;3\u0026#34; } ] } ","excerpt":"Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; …","ref":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/application-toolkit-log4j-2.x/","title":"Dependency the toolkit, such as using maven or gradle"},{"body":" Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-meter\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; If you\u0026rsquo;re using Spring sleuth, you could use Spring Sleuth Setup at the OAP server.\n Counter API represents a single monotonically increasing counter, automatic collect data and report to backend.  import org.apache.skywalking.apm.toolkit.meter.MeterFactory; Counter counter = MeterFactory.counter(meterName).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).mode(Counter.Mode.INCREMENT).build(); counter.increment(1d);  MeterFactory.counter Create a new counter builder with the meter name. Counter.Builder.tag(String key, String value) Mark a tag key/value pair. Counter.Builder.mode(Counter.Mode mode) Change the counter mode, RATE mode means reporting rate to the backend. Counter.Builder.build() Build a new Counter which is collected and reported to the backend. Counter.increment(double count) Increment count to the Counter, It could be a positive value.   Gauge API represents a single numerical value.  import org.apache.skywalking.apm.toolkit.meter.MeterFactory; ThreadPoolExecutor threadPool = ...; Gauge gauge = MeterFactory.gauge(meterName, () -\u0026gt; threadPool.getActiveCount()).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).build();  MeterFactory.gauge(String name, Supplier\u0026lt;Double\u0026gt; getter) Create a new gauge builder with the meter name and supplier function, this function need to return a double value. Gauge.Builder.tag(String key, String value) Mark a tag key/value pair. Gauge.Builder.build() Build a new Gauge which is collected and reported to the backend.   Histogram API represents a summary sample observations with customize buckets.  import org.apache.skywalking.apm.toolkit.meter.MeterFactory; Histogram histogram = MeterFactory.histogram(\u0026#34;test\u0026#34;).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).steps(Arrays.asList(1, 5, 10)).minValue(0).build(); histogram.addValue(3);  MeterFactory.histogram(String name) Create a new histogram builder with the meter name. Histogram.Builder.tag(String key, String value) Mark a tag key/value pair. Histogram.Builder.steps(List\u0026lt;Double\u0026gt; steps) Set up the max values of every histogram buckets. Histogram.Builder.minValue(double value) Set up the minimal value of this histogram, default is 0. Histogram.Builder.build() Build a new Histogram which is collected and reported to the backend. Histogram.addValue(double value) Add value into the histogram, automatically analyze what bucket count needs to be increment. rule: count into [step1, step2).  ","excerpt":"Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; …","ref":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/application-toolkit-meter/","title":"Dependency the toolkit, such as using maven or gradle"},{"body":" Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-trace\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  Use TraceContext.traceId() API to obtain traceId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;traceId\u0026#34;, TraceContext.traceId());  Use TraceContext.segmentId() API to obtain segmentId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;segmentId\u0026#34;, TraceContext.segmentId());  Use TraceContext.spanId() API to obtain spanId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;spanId\u0026#34;, TraceContext.spanId()); Sample codes only\n  Add @Trace to any method you want to trace. After that, you can see the span in the Stack.\n  Methods annotated with @Tag will try to tag the current active span with the given key (Tag#key()) and (Tag#value()), if there is no active span at all, this annotation takes no effect. @Tag can be repeated, and can be used in companion with @Trace, see examples below. The value of Tag is the same as what are supported in Customize Enhance Trace.\n  Add custom tag in the context of traced method, ActiveSpan.tag(\u0026quot;key\u0026quot;, \u0026quot;val\u0026quot;).\n  ActiveSpan.error() Mark the current span as error status.\n  ActiveSpan.error(String errorMsg) Mark the current span as error status with a message.\n  ActiveSpan.error(Throwable throwable) Mark the current span as error status with a Throwable.\n  ActiveSpan.debug(String debugMsg) Add a debug level log message in the current span.\n  ActiveSpan.info(String infoMsg) Add an info level log message in the current span.\n  ActiveSpan.setOperationName(String operationName) Customize an operation name.\n  ActiveSpan.tag(\u0026#34;my_tag\u0026#34;, \u0026#34;my_value\u0026#34;); ActiveSpan.error(); ActiveSpan.error(\u0026#34;Test-Error-Reason\u0026#34;); ActiveSpan.error(new RuntimeException(\u0026#34;Test-Error-Throwable\u0026#34;)); ActiveSpan.info(\u0026#34;Test-Info-Msg\u0026#34;); ActiveSpan.debug(\u0026#34;Test-debug-Msg\u0026#34;); /** * The codes below will generate a span, * and two types of tags, one type tag: keys are `tag1` and `tag2`, values are the passed-in parameters, respectively, the other type tag: keys are `username` and `age`, values are the return value in User, respectively */ @Trace @Tag(key = \u0026#34;tag1\u0026#34;, value = \u0026#34;arg[0]\u0026#34;) @Tag(key = \u0026#34;tag2\u0026#34;, value = \u0026#34;arg[1]\u0026#34;) @Tag(key = \u0026#34;username\u0026#34;, value = \u0026#34;returnedObj.username\u0026#34;) @Tag(key = \u0026#34;age\u0026#34;, value = \u0026#34;returnedObj.age\u0026#34;) public User methodYouWantToTrace(String param1, String param2) { // ActiveSpan.setOperationName(\u0026#34;Customize your own operation name, if this is an entry span, this would be an endpoint name\u0026#34;);  // ... }  Use TraceContext.putCorrelation() API to put custom data in tracing context.  Optional\u0026lt;String\u0026gt; previous = TraceContext.putCorrelation(\u0026#34;customKey\u0026#34;, \u0026#34;customValue\u0026#34;); CorrelationContext will remove the item when the value is null or empty.\n Use TraceContext.getCorrelation() API to get custom data.  Optional\u0026lt;String\u0026gt; value = TraceContext.getCorrelation(\u0026#34;customKey\u0026#34;); CorrelationContext configuration descriptions could be found in the agent configuration documentation, with correlation. as the prefix.\n","excerpt":"Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; …","ref":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/application-toolkit-trace/","title":"Dependency the toolkit, such as using maven or gradle"},{"body":" Dependency the toolkit, such as using maven or gradle  OpenTracing (Deprecated) OpenTracing is a vendor-neutral standard for distributed tracing. It is a set of APIs that can be used to instrument, generate, collect, and report telemetry data for distributed systems. It is designed to be extensible so that new implementations can be created for new platforms or languages. It had been archived by the CNCF TOC. Learn more.\nSkyWalking community keeps the API compatible with 0.30.0 only. All further development will not be accepted.\n\u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-opentracing\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  Use our OpenTracing tracer implementation  Tracer tracer = new SkywalkingTracer(); Tracer.SpanBuilder spanBuilder = tracer.buildSpan(\u0026#34;/yourApplication/yourService\u0026#34;); ","excerpt":"Dependency the toolkit, such as using maven or gradle  OpenTracing (Deprecated) OpenTracing is a …","ref":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/opentracing/","title":"Dependency the toolkit, such as using maven or gradle"},{"body":" Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-log4j-1.x\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Print trace ID in your logs  Config a layout  log4j.appender.CONSOLE.layout=org.apache.skywalking.apm.toolkit.log.log4j.v1.x.TraceIdPatternLayout  set %T in layout.ConversionPattern ( In 2.0-2016, you should use %x, Why change? )  log4j.appender.CONSOLE.layout.ConversionPattern=%d [%T] %-5p %c{1}:%L - %m%n  When you use -javaagent to active the SkyWalking tracer, log4j will output traceId, if it existed. If the tracer is inactive, the output will be TID: N/A.  Print SkyWalking context in your logs   Your only need to replace pattern %T with %T{SW_CTX}.\n  When you use -javaagent to active the SkyWalking tracer, log4j will output SW_CTX: [$serviceName,$instanceName,$traceId,$traceSegmentId,$spanId], if it existed. If the tracer is inactive, the output will be SW_CTX: N/A.\n  gRPC reporter The gRPC report could forward the collected logs to SkyWalking OAP server, or SkyWalking Satellite sidecar. Trace id, segment id, and span id will attach to logs automatically. You don\u0026rsquo;t need to change the layout.\n Add GRPCLogClientAppender in log4j.properties  log4j.rootLogger=INFO,CustomAppender log4j.appender.CustomAppender=org.apache.skywalking.apm.toolkit.log.log4j.v1.x.log.GRPCLogClientAppender log4j.appender.CustomAppender.layout=org.apache.log4j.PatternLayout log4j.appender.CustomAppender.layout.ConversionPattern=[%t] %-5p %c %x - %m%n  Add config of the plugin or use default  log.max_message_size=${SW_GRPC_LOG_MAX_MESSAGE_SIZE:10485760} ","excerpt":"Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; …","ref":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/application-toolkit-log4j-1.x/","title":"Dependency the toolkit, such as using maven or gradle"},{"body":" Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-log4j-2.x\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Print trace ID in your logs  Config the [%traceId] pattern in your log4j2.xml  \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout pattern=\u0026#34;%d [%traceId] %-5p %c{1}:%L - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;/Appenders\u0026gt;  Support log4j2 AsyncRoot , No additional configuration is required. Refer to the demo of log4j2.xml below. For details: Log4j2 Async Loggers  \u0026lt;Configuration\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout pattern=\u0026#34;%d [%traceId] %-5p %c{1}:%L - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;AsyncRoot level=\u0026#34;INFO\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Console\u0026#34;/\u0026gt; \u0026lt;/AsyncRoot\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;   Support log4j2 AsyncAppender , No additional configuration is required. Refer to the demo of log4j2.xml below.\nFor details: All Loggers Async\nLog4j-2.9 and higher require disruptor-3.3.4.jar or higher on the classpath. Prior to Log4j-2.9, disruptor-3.0.0.jar or higher was required. This is simplest to configure and gives the best performance. To make all loggers asynchronous, add the disruptor jar to the classpath and set the system property log4j2.contextSelector to org.apache.logging.log4j.core.async.AsyncLoggerContextSelector.\n\u0026lt;Configuration status=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;!-- Async Loggers will auto-flush in batches, so switch off immediateFlush. --\u0026gt; \u0026lt;RandomAccessFile name=\u0026#34;RandomAccessFile\u0026#34; fileName=\u0026#34;async.log\u0026#34; immediateFlush=\u0026#34;false\u0026#34; append=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;PatternLayout\u0026gt; \u0026lt;Pattern\u0026gt;%d %p %c{1.} [%t] [%traceId] %m %ex%n\u0026lt;/Pattern\u0026gt; \u0026lt;/PatternLayout\u0026gt; \u0026lt;/RandomAccessFile\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;Root level=\u0026#34;info\u0026#34; includeLocation=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;RandomAccessFile\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt; For details: Mixed Sync \u0026amp; Async\nLog4j-2.9 and higher require disruptor-3.3.4.jar or higher on the classpath. Prior to Log4j-2.9, disruptor-3.0.0.jar or higher was required. There is no need to set system property Log4jContextSelector to any value.\n\u0026lt;Configuration status=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;!-- Async Loggers will auto-flush in batches, so switch off immediateFlush. --\u0026gt; \u0026lt;RandomAccessFile name=\u0026#34;RandomAccessFile\u0026#34; fileName=\u0026#34;asyncWithLocation.log\u0026#34; immediateFlush=\u0026#34;false\u0026#34; append=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;PatternLayout\u0026gt; \u0026lt;Pattern\u0026gt;%d %p %class{1.} [%t] [%traceId] %location %m %ex%n\u0026lt;/Pattern\u0026gt; \u0026lt;/PatternLayout\u0026gt; \u0026lt;/RandomAccessFile\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;!-- pattern layout actually uses location, so we need to include it --\u0026gt; \u0026lt;AsyncLogger name=\u0026#34;com.foo.Bar\u0026#34; level=\u0026#34;trace\u0026#34; includeLocation=\u0026#34;true\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;RandomAccessFile\u0026#34;/\u0026gt; \u0026lt;/AsyncLogger\u0026gt; \u0026lt;Root level=\u0026#34;info\u0026#34; includeLocation=\u0026#34;true\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;RandomAccessFile\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;   Support log4j2 AsyncAppender, For details: Log4j2 AsyncAppender\n  \u0026lt;Configuration\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout pattern=\u0026#34;%d [%traceId] %-5p %c{1}:%L - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;Async name=\u0026#34;Async\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Console\u0026#34;/\u0026gt; \u0026lt;/Async\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;Root level=\u0026#34;INFO\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Async\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;  When you use -javaagent to active the SkyWalking tracer, log4j2 will output traceId, if it existed. If the tracer is inactive, the output will be TID: N/A.  Print SkyWalking context in your logs   Your only need to replace pattern %traceId with %sw_ctx.\n  When you use -javaagent to active the SkyWalking tracer, log4j2 will output SW_CTX: [$serviceName,$instanceName,$traceId,$traceSegmentId,$spanId], if it existed. If the tracer is inactive, the output will be SW_CTX: N/A.\n  gRPC reporter The gRPC report could forward the collected logs to SkyWalking OAP server, or SkyWalking Satellite sidecar. Trace id, segment id, and span id will attach to logs automatically. You don\u0026rsquo;t need to change the layout.\n Add GRPCLogClientAppender in log4j2.xml  \u0026lt;GRPCLogClientAppender name=\u0026#34;grpc-log\u0026#34;\u0026gt; \u0026lt;PatternLayout pattern=\u0026#34;%d{HH:mm:ss.SSS} [%t] %-5level %logger{36} - %msg%n\u0026#34;/\u0026gt; \u0026lt;/GRPCLogClientAppender\u0026gt;  Add config of the plugin or use default  log.max_message_size=${SW_GRPC_LOG_MAX_MESSAGE_SIZE:10485760}  Support -Dlog4j2.contextSelector=org.apache.logging.log4j.core.async.AsyncLoggerContextSelector in gRPC log report.  Transmitting un-formatted messages The log4j 2.x gRPC reporter supports transmitting logs as formatted or un-formatted. Transmitting formatted data is the default but can be disabled by adding the following to the agent config:\nplugin.toolkit.log.transmit_formatted=false The above will result in the content field being used for the log pattern with additional log tags of argument.0, argument.1, and so on representing each logged argument as well as an additional exception tag which is only present if a throwable is also logged.\nFor example, the following code:\nlog.info(\u0026#34;{} {} {}\u0026#34;, 1, 2, 3); Will result in:\n{ \u0026#34;content\u0026#34;: \u0026#34;{} {} {}\u0026#34;, \u0026#34;tags\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;argument.0\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.1\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.2\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;3\u0026#34; } ] } ","excerpt":"Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; …","ref":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/application-toolkit-log4j-2.x/","title":"Dependency the toolkit, such as using maven or gradle"},{"body":" Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-meter\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; If you\u0026rsquo;re using Spring sleuth, you could use Spring Sleuth Setup at the OAP server.\n Counter API represents a single monotonically increasing counter, automatic collect data and report to backend.  import org.apache.skywalking.apm.toolkit.meter.MeterFactory; Counter counter = MeterFactory.counter(meterName).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).mode(Counter.Mode.INCREMENT).build(); counter.increment(1d);  MeterFactory.counter Create a new counter builder with the meter name. Counter.Builder.tag(String key, String value) Mark a tag key/value pair. Counter.Builder.mode(Counter.Mode mode) Change the counter mode, RATE mode means reporting rate to the backend. Counter.Builder.build() Build a new Counter which is collected and reported to the backend. Counter.increment(double count) Increment count to the Counter, It could be a positive value.   Gauge API represents a single numerical value.  import org.apache.skywalking.apm.toolkit.meter.MeterFactory; ThreadPoolExecutor threadPool = ...; Gauge gauge = MeterFactory.gauge(meterName, () -\u0026gt; threadPool.getActiveCount()).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).build();  MeterFactory.gauge(String name, Supplier\u0026lt;Double\u0026gt; getter) Create a new gauge builder with the meter name and supplier function, this function need to return a double value. Gauge.Builder.tag(String key, String value) Mark a tag key/value pair. Gauge.Builder.build() Build a new Gauge which is collected and reported to the backend.   Histogram API represents a summary sample observations with customize buckets.  import org.apache.skywalking.apm.toolkit.meter.MeterFactory; Histogram histogram = MeterFactory.histogram(\u0026#34;test\u0026#34;).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).steps(Arrays.asList(1, 5, 10)).minValue(0).build(); histogram.addValue(3);  MeterFactory.histogram(String name) Create a new histogram builder with the meter name. Histogram.Builder.tag(String key, String value) Mark a tag key/value pair. Histogram.Builder.steps(List\u0026lt;Double\u0026gt; steps) Set up the max values of every histogram buckets. Histogram.Builder.minValue(double value) Set up the minimal value of this histogram, default is 0. Histogram.Builder.build() Build a new Histogram which is collected and reported to the backend. Histogram.addValue(double value) Add value into the histogram, automatically analyze what bucket count needs to be increment. rule: count into [step1, step2).  ","excerpt":"Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; …","ref":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/application-toolkit-meter/","title":"Dependency the toolkit, such as using maven or gradle"},{"body":" Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-trace\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  Use TraceContext.traceId() API to obtain traceId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;traceId\u0026#34;, TraceContext.traceId());  Use TraceContext.segmentId() API to obtain segmentId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;segmentId\u0026#34;, TraceContext.segmentId());  Use TraceContext.spanId() API to obtain spanId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;spanId\u0026#34;, TraceContext.spanId()); Sample codes only\n  Add @Trace to any method you want to trace. After that, you can see the span in the Stack.\n  Methods annotated with @Tag will try to tag the current active span with the given key (Tag#key()) and (Tag#value()), if there is no active span at all, this annotation takes no effect. @Tag can be repeated, and can be used in companion with @Trace, see examples below. The value of Tag is the same as what are supported in Customize Enhance Trace.\n  Add custom tag in the context of traced method, ActiveSpan.tag(\u0026quot;key\u0026quot;, \u0026quot;val\u0026quot;).\n  ActiveSpan.error() Mark the current span as error status.\n  ActiveSpan.error(String errorMsg) Mark the current span as error status with a message.\n  ActiveSpan.error(Throwable throwable) Mark the current span as error status with a Throwable.\n  ActiveSpan.debug(String debugMsg) Add a debug level log message in the current span.\n  ActiveSpan.info(String infoMsg) Add an info level log message in the current span.\n  ActiveSpan.setOperationName(String operationName) Customize an operation name.\n  ActiveSpan.tag(\u0026#34;my_tag\u0026#34;, \u0026#34;my_value\u0026#34;); ActiveSpan.error(); ActiveSpan.error(\u0026#34;Test-Error-Reason\u0026#34;); ActiveSpan.error(new RuntimeException(\u0026#34;Test-Error-Throwable\u0026#34;)); ActiveSpan.info(\u0026#34;Test-Info-Msg\u0026#34;); ActiveSpan.debug(\u0026#34;Test-debug-Msg\u0026#34;); /** * The codes below will generate a span, * and two types of tags, one type tag: keys are `tag1` and `tag2`, values are the passed-in parameters, respectively, the other type tag: keys are `username` and `age`, values are the return value in User, respectively */ @Trace @Tag(key = \u0026#34;tag1\u0026#34;, value = \u0026#34;arg[0]\u0026#34;) @Tag(key = \u0026#34;tag2\u0026#34;, value = \u0026#34;arg[1]\u0026#34;) @Tag(key = \u0026#34;username\u0026#34;, value = \u0026#34;returnedObj.username\u0026#34;) @Tag(key = \u0026#34;age\u0026#34;, value = \u0026#34;returnedObj.age\u0026#34;) public User methodYouWantToTrace(String param1, String param2) { // ActiveSpan.setOperationName(\u0026#34;Customize your own operation name, if this is an entry span, this would be an endpoint name\u0026#34;);  // ... }  Use TraceContext.putCorrelation() API to put custom data in tracing context.  Optional\u0026lt;String\u0026gt; previous = TraceContext.putCorrelation(\u0026#34;customKey\u0026#34;, \u0026#34;customValue\u0026#34;); CorrelationContext will remove the item when the value is null or empty.\n Use TraceContext.getCorrelation() API to get custom data.  Optional\u0026lt;String\u0026gt; value = TraceContext.getCorrelation(\u0026#34;customKey\u0026#34;); CorrelationContext configuration descriptions could be found in the agent configuration documentation, with correlation. as the prefix.\n","excerpt":"Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; …","ref":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/application-toolkit-trace/","title":"Dependency the toolkit, such as using maven or gradle"},{"body":" Dependency the toolkit, such as using maven or gradle  OpenTracing (Deprecated) OpenTracing is a vendor-neutral standard for distributed tracing. It is a set of APIs that can be used to instrument, generate, collect, and report telemetry data for distributed systems. It is designed to be extensible so that new implementations can be created for new platforms or languages. It had been archived by the CNCF TOC. Learn more.\nSkyWalking community keeps the API compatible with 0.30.0 only. All further development will not be accepted.\n\u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-opentracing\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  Use our OpenTracing tracer implementation  Tracer tracer = new SkywalkingTracer(); Tracer.SpanBuilder spanBuilder = tracer.buildSpan(\u0026#34;/yourApplication/yourService\u0026#34;); ","excerpt":"Dependency the toolkit, such as using maven or gradle  OpenTracing (Deprecated) OpenTracing is a …","ref":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/opentracing/","title":"Dependency the toolkit, such as using maven or gradle"},{"body":" Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-log4j-1.x\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Print trace ID in your logs  Config a layout  log4j.appender.CONSOLE.layout=org.apache.skywalking.apm.toolkit.log.log4j.v1.x.TraceIdPatternLayout  set %T in layout.ConversionPattern ( In 2.0-2016, you should use %x, Why change? )  log4j.appender.CONSOLE.layout.ConversionPattern=%d [%T] %-5p %c{1}:%L - %m%n  When you use -javaagent to active the SkyWalking tracer, log4j will output traceId, if it existed. If the tracer is inactive, the output will be TID: N/A.  Print SkyWalking context in your logs   Your only need to replace pattern %T with %T{SW_CTX}.\n  When you use -javaagent to active the SkyWalking tracer, log4j will output SW_CTX: [$serviceName,$instanceName,$traceId,$traceSegmentId,$spanId], if it existed. If the tracer is inactive, the output will be SW_CTX: N/A.\n  gRPC reporter The gRPC report could forward the collected logs to SkyWalking OAP server, or SkyWalking Satellite sidecar. Trace id, segment id, and span id will attach to logs automatically. You don\u0026rsquo;t need to change the layout.\n Add GRPCLogClientAppender in log4j.properties  log4j.rootLogger=INFO,CustomAppender log4j.appender.CustomAppender=org.apache.skywalking.apm.toolkit.log.log4j.v1.x.log.GRPCLogClientAppender log4j.appender.CustomAppender.layout=org.apache.log4j.PatternLayout log4j.appender.CustomAppender.layout.ConversionPattern=[%t] %-5p %c %x - %m%n  Add config of the plugin or use default  log.max_message_size=${SW_GRPC_LOG_MAX_MESSAGE_SIZE:10485760} ","excerpt":"Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; …","ref":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/application-toolkit-log4j-1.x/","title":"Dependency the toolkit, such as using maven or gradle"},{"body":" Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-log4j-2.x\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Print trace ID in your logs  Config the [%traceId] pattern in your log4j2.xml  \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout pattern=\u0026#34;%d [%traceId] %-5p %c{1}:%L - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;/Appenders\u0026gt;  Support log4j2 AsyncRoot , No additional configuration is required. Refer to the demo of log4j2.xml below. For details: Log4j2 Async Loggers  \u0026lt;Configuration\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout pattern=\u0026#34;%d [%traceId] %-5p %c{1}:%L - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;AsyncRoot level=\u0026#34;INFO\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Console\u0026#34;/\u0026gt; \u0026lt;/AsyncRoot\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;   Support log4j2 AsyncAppender , No additional configuration is required. Refer to the demo of log4j2.xml below.\nFor details: All Loggers Async\nLog4j-2.9 and higher require disruptor-3.3.4.jar or higher on the classpath. Prior to Log4j-2.9, disruptor-3.0.0.jar or higher was required. This is simplest to configure and gives the best performance. To make all loggers asynchronous, add the disruptor jar to the classpath and set the system property log4j2.contextSelector to org.apache.logging.log4j.core.async.AsyncLoggerContextSelector.\n\u0026lt;Configuration status=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;!-- Async Loggers will auto-flush in batches, so switch off immediateFlush. --\u0026gt; \u0026lt;RandomAccessFile name=\u0026#34;RandomAccessFile\u0026#34; fileName=\u0026#34;async.log\u0026#34; immediateFlush=\u0026#34;false\u0026#34; append=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;PatternLayout\u0026gt; \u0026lt;Pattern\u0026gt;%d %p %c{1.} [%t] [%traceId] %m %ex%n\u0026lt;/Pattern\u0026gt; \u0026lt;/PatternLayout\u0026gt; \u0026lt;/RandomAccessFile\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;Root level=\u0026#34;info\u0026#34; includeLocation=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;RandomAccessFile\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt; For details: Mixed Sync \u0026amp; Async\nLog4j-2.9 and higher require disruptor-3.3.4.jar or higher on the classpath. Prior to Log4j-2.9, disruptor-3.0.0.jar or higher was required. There is no need to set system property Log4jContextSelector to any value.\n\u0026lt;Configuration status=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;!-- Async Loggers will auto-flush in batches, so switch off immediateFlush. --\u0026gt; \u0026lt;RandomAccessFile name=\u0026#34;RandomAccessFile\u0026#34; fileName=\u0026#34;asyncWithLocation.log\u0026#34; immediateFlush=\u0026#34;false\u0026#34; append=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;PatternLayout\u0026gt; \u0026lt;Pattern\u0026gt;%d %p %class{1.} [%t] [%traceId] %location %m %ex%n\u0026lt;/Pattern\u0026gt; \u0026lt;/PatternLayout\u0026gt; \u0026lt;/RandomAccessFile\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;!-- pattern layout actually uses location, so we need to include it --\u0026gt; \u0026lt;AsyncLogger name=\u0026#34;com.foo.Bar\u0026#34; level=\u0026#34;trace\u0026#34; includeLocation=\u0026#34;true\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;RandomAccessFile\u0026#34;/\u0026gt; \u0026lt;/AsyncLogger\u0026gt; \u0026lt;Root level=\u0026#34;info\u0026#34; includeLocation=\u0026#34;true\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;RandomAccessFile\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;   Support log4j2 AsyncAppender, For details: Log4j2 AsyncAppender\n  \u0026lt;Configuration\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout pattern=\u0026#34;%d [%traceId] %-5p %c{1}:%L - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;Async name=\u0026#34;Async\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Console\u0026#34;/\u0026gt; \u0026lt;/Async\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;Root level=\u0026#34;INFO\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Async\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;  When you use -javaagent to active the SkyWalking tracer, log4j2 will output traceId, if it existed. If the tracer is inactive, the output will be TID: N/A.  Print SkyWalking context in your logs   Your only need to replace pattern %traceId with %sw_ctx.\n  When you use -javaagent to active the SkyWalking tracer, log4j2 will output SW_CTX: [$serviceName,$instanceName,$traceId,$traceSegmentId,$spanId], if it existed. If the tracer is inactive, the output will be SW_CTX: N/A.\n  gRPC reporter The gRPC report could forward the collected logs to SkyWalking OAP server, or SkyWalking Satellite sidecar. Trace id, segment id, and span id will attach to logs automatically. You don\u0026rsquo;t need to change the layout.\n Add GRPCLogClientAppender in log4j2.xml  \u0026lt;GRPCLogClientAppender name=\u0026#34;grpc-log\u0026#34;\u0026gt; \u0026lt;PatternLayout pattern=\u0026#34;%d{HH:mm:ss.SSS} [%t] %-5level %logger{36} - %msg%n\u0026#34;/\u0026gt; \u0026lt;/GRPCLogClientAppender\u0026gt;  Add config of the plugin or use default  log.max_message_size=${SW_GRPC_LOG_MAX_MESSAGE_SIZE:10485760}  Support -Dlog4j2.contextSelector=org.apache.logging.log4j.core.async.AsyncLoggerContextSelector in gRPC log report.  Transmitting un-formatted messages The log4j 2.x gRPC reporter supports transmitting logs as formatted or un-formatted. Transmitting formatted data is the default but can be disabled by adding the following to the agent config:\nplugin.toolkit.log.transmit_formatted=false The above will result in the content field being used for the log pattern with additional log tags of argument.0, argument.1, and so on representing each logged argument as well as an additional exception tag which is only present if a throwable is also logged.\nFor example, the following code:\nlog.info(\u0026#34;{} {} {}\u0026#34;, 1, 2, 3); Will result in:\n{ \u0026#34;content\u0026#34;: \u0026#34;{} {} {}\u0026#34;, \u0026#34;tags\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;argument.0\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.1\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.2\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;3\u0026#34; } ] } ","excerpt":"Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; …","ref":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/application-toolkit-log4j-2.x/","title":"Dependency the toolkit, such as using maven or gradle"},{"body":" Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-meter\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; If you\u0026rsquo;re using Spring sleuth, you could use Spring Sleuth Setup at the OAP server.\n Counter API represents a single monotonically increasing counter, automatic collect data and report to backend.  import org.apache.skywalking.apm.toolkit.meter.MeterFactory; Counter counter = MeterFactory.counter(meterName).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).mode(Counter.Mode.INCREMENT).build(); counter.increment(1d);  MeterFactory.counter Create a new counter builder with the meter name. Counter.Builder.tag(String key, String value) Mark a tag key/value pair. Counter.Builder.mode(Counter.Mode mode) Change the counter mode, RATE mode means reporting rate to the backend. Counter.Builder.build() Build a new Counter which is collected and reported to the backend. Counter.increment(double count) Increment count to the Counter, It could be a positive value.   Gauge API represents a single numerical value.  import org.apache.skywalking.apm.toolkit.meter.MeterFactory; ThreadPoolExecutor threadPool = ...; Gauge gauge = MeterFactory.gauge(meterName, () -\u0026gt; threadPool.getActiveCount()).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).build();  MeterFactory.gauge(String name, Supplier\u0026lt;Double\u0026gt; getter) Create a new gauge builder with the meter name and supplier function, this function need to return a double value. Gauge.Builder.tag(String key, String value) Mark a tag key/value pair. Gauge.Builder.build() Build a new Gauge which is collected and reported to the backend.   Histogram API represents a summary sample observations with customize buckets.  import org.apache.skywalking.apm.toolkit.meter.MeterFactory; Histogram histogram = MeterFactory.histogram(\u0026#34;test\u0026#34;).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).steps(Arrays.asList(1, 5, 10)).minValue(0).build(); histogram.addValue(3);  MeterFactory.histogram(String name) Create a new histogram builder with the meter name. Histogram.Builder.tag(String key, String value) Mark a tag key/value pair. Histogram.Builder.steps(List\u0026lt;Double\u0026gt; steps) Set up the max values of every histogram buckets. Histogram.Builder.minValue(double value) Set up the minimal value of this histogram, default is 0. Histogram.Builder.build() Build a new Histogram which is collected and reported to the backend. Histogram.addValue(double value) Add value into the histogram, automatically analyze what bucket count needs to be increment. rule: count into [step1, step2).  ","excerpt":"Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; …","ref":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/application-toolkit-meter/","title":"Dependency the toolkit, such as using maven or gradle"},{"body":" Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-trace\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  Use TraceContext.traceId() API to obtain traceId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;traceId\u0026#34;, TraceContext.traceId());  Use TraceContext.segmentId() API to obtain segmentId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;segmentId\u0026#34;, TraceContext.segmentId());  Use TraceContext.spanId() API to obtain spanId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;spanId\u0026#34;, TraceContext.spanId()); Sample codes only\n  Add @Trace to any method you want to trace. After that, you can see the span in the Stack.\n  Methods annotated with @Tag will try to tag the current active span with the given key (Tag#key()) and (Tag#value()), if there is no active span at all, this annotation takes no effect. @Tag can be repeated, and can be used in companion with @Trace, see examples below. The value of Tag is the same as what are supported in Customize Enhance Trace.\n  Add custom tag in the context of traced method, ActiveSpan.tag(\u0026quot;key\u0026quot;, \u0026quot;val\u0026quot;).\n  ActiveSpan.error() Mark the current span as error status.\n  ActiveSpan.error(String errorMsg) Mark the current span as error status with a message.\n  ActiveSpan.error(Throwable throwable) Mark the current span as error status with a Throwable.\n  ActiveSpan.debug(String debugMsg) Add a debug level log message in the current span.\n  ActiveSpan.info(String infoMsg) Add an info level log message in the current span.\n  ActiveSpan.setOperationName(String operationName) Customize an operation name.\n  ActiveSpan.tag(\u0026#34;my_tag\u0026#34;, \u0026#34;my_value\u0026#34;); ActiveSpan.error(); ActiveSpan.error(\u0026#34;Test-Error-Reason\u0026#34;); ActiveSpan.error(new RuntimeException(\u0026#34;Test-Error-Throwable\u0026#34;)); ActiveSpan.info(\u0026#34;Test-Info-Msg\u0026#34;); ActiveSpan.debug(\u0026#34;Test-debug-Msg\u0026#34;); /** * The codes below will generate a span, * and two types of tags, one type tag: keys are `tag1` and `tag2`, values are the passed-in parameters, respectively, the other type tag: keys are `username` and `age`, values are the return value in User, respectively */ @Trace @Tag(key = \u0026#34;tag1\u0026#34;, value = \u0026#34;arg[0]\u0026#34;) @Tag(key = \u0026#34;tag2\u0026#34;, value = \u0026#34;arg[1]\u0026#34;) @Tag(key = \u0026#34;username\u0026#34;, value = \u0026#34;returnedObj.username\u0026#34;) @Tag(key = \u0026#34;age\u0026#34;, value = \u0026#34;returnedObj.age\u0026#34;) public User methodYouWantToTrace(String param1, String param2) { // ActiveSpan.setOperationName(\u0026#34;Customize your own operation name, if this is an entry span, this would be an endpoint name\u0026#34;);  // ... }  Use TraceContext.putCorrelation() API to put custom data in tracing context.  Optional\u0026lt;String\u0026gt; previous = TraceContext.putCorrelation(\u0026#34;customKey\u0026#34;, \u0026#34;customValue\u0026#34;); CorrelationContext will remove the item when the value is null or empty.\n Use TraceContext.getCorrelation() API to get custom data.  Optional\u0026lt;String\u0026gt; value = TraceContext.getCorrelation(\u0026#34;customKey\u0026#34;); CorrelationContext configuration descriptions could be found in the agent configuration documentation, with correlation. as the prefix.\n","excerpt":"Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; …","ref":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/application-toolkit-trace/","title":"Dependency the toolkit, such as using maven or gradle"},{"body":" Dependency the toolkit, such as using maven or gradle  OpenTracing (Deprecated) OpenTracing is a vendor-neutral standard for distributed tracing. It is a set of APIs that can be used to instrument, generate, collect, and report telemetry data for distributed systems. It is designed to be extensible so that new implementations can be created for new platforms or languages. It had been archived by the CNCF TOC. Learn more.\nSkyWalking community keeps the API compatible with 0.30.0 only. All further development will not be accepted.\n\u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-opentracing\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  Use our OpenTracing tracer implementation  Tracer tracer = new SkywalkingTracer(); Tracer.SpanBuilder spanBuilder = tracer.buildSpan(\u0026#34;/yourApplication/yourService\u0026#34;); ","excerpt":"Dependency the toolkit, such as using maven or gradle  OpenTracing (Deprecated) OpenTracing is a …","ref":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/opentracing/","title":"Dependency the toolkit, such as using maven or gradle"},{"body":"Deploy OAP server and UI with default settings In this example, we will deploy an OAP server and UI to Kubernetes cluster with default settings specified by their Custom Resource Defines(CRD).\nInstall Operator Follow Operator installation instrument to install the operator.\nDeploy OAP server and UI with default setting Clone this repo, then change current directory to samples.\nIssue the below command to deploy an OAP server and UI.\nkubectl apply -f default.yaml Get created custom resources as below:\n$ kubectl get oapserver,ui NAME INSTANCES RUNNING ADDRESS oapserver.operator.skywalking.apache.org/default 1 1 default-oap.skywalking-swck-system NAME INSTANCES RUNNING INTERNALADDRESS EXTERNALIPS PORTS ui.operator.skywalking.apache.org/default 1 1 default-ui.skywalking-swck-system [80] View the UI In order to view the UI from your browser, you should get the external address from the ingress generated by the UI custom resource firstly.\n$ kubectl get ingresses NAME HOSTS ADDRESS PORTS AGE default-ui demo.ui.skywalking \u0026lt;External_IP\u0026gt; 80 33h Edit your local /etc/hosts to append the following host-ip mapping.\ndemo.ui.skywalking \u0026lt;External_IP\u0026gt; Finally, navigate your browser to demo.ui.skywalking to access UI service.\nNotice, please install an ingress controller to your Kubernetes environment.\n","excerpt":"Deploy OAP server and UI with default settings In this example, we will deploy an OAP server and UI …","ref":"/docs/skywalking-swck/latest/examples/default-backend/","title":"Deploy OAP server and UI with default settings"},{"body":"Deploy OAP server and UI with default settings In this example, we will deploy an OAP server and UI to Kubernetes cluster with default settings specified by their Custom Resource Defines(CRD).\nInstall Operator Follow Operator installation instrument to install the operator.\nDeploy OAP server and UI with default setting Clone this repo, then change current directory to samples.\nIssue the below command to deploy an OAP server and UI.\nkubectl apply -f default.yaml Get created custom resources as below:\n$ kubectl get oapserver,ui NAME INSTANCES RUNNING ADDRESS oapserver.operator.skywalking.apache.org/default 1 1 default-oap.skywalking-swck-system NAME INSTANCES RUNNING INTERNALADDRESS EXTERNALIPS PORTS ui.operator.skywalking.apache.org/default 1 1 default-ui.skywalking-swck-system [80] View the UI In order to view the UI from your browser, you should get the external address from the ingress generated by the UI custom resource firstly.\n$ kubectl get ingresses NAME HOSTS ADDRESS PORTS AGE default-ui demo.ui.skywalking \u0026lt;External_IP\u0026gt; 80 33h Edit your local /etc/hosts to append the following host-ip mapping.\ndemo.ui.skywalking \u0026lt;External_IP\u0026gt; Finally, navigate your browser to demo.ui.skywalking to access UI service.\nNotice, please install an ingress controller to your Kubernetes environment.\n","excerpt":"Deploy OAP server and UI with default settings In this example, we will deploy an OAP server and UI …","ref":"/docs/skywalking-swck/next/examples/default-backend/","title":"Deploy OAP server and UI with default settings"},{"body":"Deploy OAP server and UI with default settings In this example, we will deploy an OAP server and UI to Kubernetes cluster with default settings specified by their Custom Resource Defines(CRD).\nInstall Operator Follow Operator installation instrument to install the operator.\nDeploy OAP server and UI with default setting Clone this repo, then change current directory to samples.\nIssue the below command to deploy an OAP server and UI.\nkubectl apply -f default.yaml Get created custom resources as below:\n$ kubectl get oapserver,ui NAME INSTANCES RUNNING ADDRESS oapserver.operator.skywalking.apache.org/default 1 1 default-oap.skywalking-swck-system NAME INSTANCES RUNNING INTERNALADDRESS EXTERNALIPS PORTS ui.operator.skywalking.apache.org/default 1 1 default-ui.skywalking-swck-system [80] View the UI In order to view the UI from your browser, you should get the external address from the ingress generated by the UI custom resource firstly.\n$ kubectl get ingresses NAME HOSTS ADDRESS PORTS AGE default-ui demo.ui.skywalking \u0026lt;External_IP\u0026gt; 80 33h Edit your local /etc/hosts to append the following host-ip mapping.\ndemo.ui.skywalking \u0026lt;External_IP\u0026gt; Finally, navigate your browser to demo.ui.skywalking to access UI service.\nNotice, please install an ingress controller to your Kubernetes environment.\n","excerpt":"Deploy OAP server and UI with default settings In this example, we will deploy an OAP server and UI …","ref":"/docs/skywalking-swck/v0.9.0/examples/default-backend/","title":"Deploy OAP server and UI with default settings"},{"body":"Deploy on Kubernetes This documentation helps you to set up the rover in the Kubernetes environment.\nStartup Kubernetes Make sure that you already have a Kubernetes cluster.\nIf you don\u0026rsquo;t have a running cluster, you can also leverage KinD (Kubernetes in Docker) or minikube to create a cluster.\nDeploy Rover Please follow the rover-daemonset.yml to deploy the rover in your Kubernetes cluster. Update the comment in the file, which includes two configs:\n Rover docker image: You could use make docker to build an image and upload it to your private registry, or update from the public image. OAP address: Update the OAP address.  Then, you could use kuberctl apply -f rover-daemonset.yml to deploy the skywalking-rover into your cluster. It would deploy in each node as a DaemonSet.\n","excerpt":"Deploy on Kubernetes This documentation helps you to set up the rover in the Kubernetes environment. …","ref":"/docs/skywalking-rover/latest/en/setup/deployment/kubernetes/readme/","title":"Deploy on Kubernetes"},{"body":"Deploy on Kubernetes This documentation helps you to set up the rover in the Kubernetes environment.\nStartup Kubernetes Make sure that you already have a Kubernetes cluster.\nIf you don\u0026rsquo;t have a running cluster, you can also leverage KinD (Kubernetes in Docker) or minikube to create a cluster.\nDeploy Rover Please follow the rover-daemonset.yml to deploy the rover in your Kubernetes cluster. Update the comment in the file, which includes two configs:\n Rover docker image: You could use make docker to build an image and upload it to your private registry, or update from the public image. OAP address: Update the OAP address.  Then, you could use kubectl apply -f rover-daemonset.yml to deploy the skywalking-rover into your cluster. It would deploy in each node as a DaemonSet.\n","excerpt":"Deploy on Kubernetes This documentation helps you to set up the rover in the Kubernetes environment. …","ref":"/docs/skywalking-rover/next/en/setup/deployment/kubernetes/readme/","title":"Deploy on Kubernetes"},{"body":"Deploy on Kubernetes This documentation helps you to set up the rover in the Kubernetes environment.\nStartup Kubernetes Make sure that you already have a Kubernetes cluster.\nIf you don\u0026rsquo;t have a running cluster, you can also leverage KinD (Kubernetes in Docker) or minikube to create a cluster.\nDeploy Rover Please follow the rover-daemonset.yml to deploy the rover in your Kubernetes cluster. Update the comment in the file, which includes two configs:\n Rover docker image: You could use make docker to build an image and upload it to your private registry, or update from the public image. OAP address: Update the OAP address.  Then, you could use kuberctl apply -f rover-daemonset.yml to deploy the skywalking-rover into your cluster. It would deploy in each node as a DaemonSet.\n","excerpt":"Deploy on Kubernetes This documentation helps you to set up the rover in the Kubernetes environment. …","ref":"/docs/skywalking-rover/v0.6.0/en/setup/deployment/kubernetes/readme/","title":"Deploy on Kubernetes"},{"body":"Deploy on Kubernetes It could help you run the Satellite as a gateway in Kubernetes environment.\nInstall We recommend install the Satellite by helm, follow command below, it could start the latest release version of SkyWalking Backend, UI and Satellite.\nexport SKYWALKING_RELEASE_NAME=skywalking # change the release name according to your scenario export SKYWALKING_RELEASE_NAMESPACE=default # change the namespace to where you want to install SkyWalking export REPO=skywalking helm repo add ${REPO} https://apache.jfrog.io/artifactory/skywalking-helm helm install \u0026#34;${SKYWALKING_RELEASE_NAME}\u0026#34; ${REPO}/skywalking -n \u0026#34;${SKYWALKING_RELEASE_NAMESPACE}\u0026#34; \\  --set oap.image.tag=8.8.1 \\  --set oap.storageType=elasticsearch \\  --set ui.image.tag=8.8.1 \\  --set elasticsearch.imageTag=6.8.6 \\  --set satellite.enabled=true \\  --set satellite.image.tag=v0.4.0 Change Address After the Satellite and Backend started, need to change the address from agent/node. Then the satellite could load balance the request from agent/node to OAP backend.\nSuch as in Java Agent, you should change the property value in collector.backend_service forward to this: skywalking-satellite.${SKYWALKING_RELEASE_NAMESPACE}:11800.\n","excerpt":"Deploy on Kubernetes It could help you run the Satellite as a gateway in Kubernetes environment. …","ref":"/docs/skywalking-satellite/latest/en/setup/examples/deploy/kubernetes/readme/","title":"Deploy on Kubernetes"},{"body":"Deploy on Kubernetes It could help you run the Satellite as a gateway in Kubernetes environment.\nInstall We recommend install the Satellite by helm, follow command below, it could start the latest release version of SkyWalking Backend, UI and Satellite.\nexport SKYWALKING_RELEASE_NAME=skywalking # change the release name according to your scenario export SKYWALKING_RELEASE_NAMESPACE=default # change the namespace to where you want to install SkyWalking export REPO=skywalking helm repo add ${REPO} https://apache.jfrog.io/artifactory/skywalking-helm helm install \u0026#34;${SKYWALKING_RELEASE_NAME}\u0026#34; ${REPO}/skywalking -n \u0026#34;${SKYWALKING_RELEASE_NAMESPACE}\u0026#34; \\  --set oap.image.tag=8.8.1 \\  --set oap.storageType=elasticsearch \\  --set ui.image.tag=8.8.1 \\  --set elasticsearch.imageTag=6.8.6 \\  --set satellite.enabled=true \\  --set satellite.image.tag=v0.4.0 Change Address After the Satellite and Backend started, need to change the address from agent/node. Then the satellite could load balance the request from agent/node to OAP backend.\nSuch as in Java Agent, you should change the property value in collector.backend_service forward to this: skywalking-satellite.${SKYWALKING_RELEASE_NAMESPACE}:11800.\n","excerpt":"Deploy on Kubernetes It could help you run the Satellite as a gateway in Kubernetes environment. …","ref":"/docs/skywalking-satellite/next/en/setup/examples/deploy/kubernetes/readme/","title":"Deploy on Kubernetes"},{"body":"Deploy on Kubernetes It could help you run the Satellite as a gateway in Kubernetes environment.\nInstall We recommend install the Satellite by helm, follow command below, it could start the latest release version of SkyWalking Backend, UI and Satellite.\nexport SKYWALKING_RELEASE_NAME=skywalking # change the release name according to your scenario export SKYWALKING_RELEASE_NAMESPACE=default # change the namespace to where you want to install SkyWalking export REPO=skywalking helm repo add ${REPO} https://apache.jfrog.io/artifactory/skywalking-helm helm install \u0026#34;${SKYWALKING_RELEASE_NAME}\u0026#34; ${REPO}/skywalking -n \u0026#34;${SKYWALKING_RELEASE_NAMESPACE}\u0026#34; \\  --set oap.image.tag=8.8.1 \\  --set oap.storageType=elasticsearch \\  --set ui.image.tag=8.8.1 \\  --set elasticsearch.imageTag=6.8.6 \\  --set satellite.enabled=true \\  --set satellite.image.tag=v0.4.0 Change Address After the Satellite and Backend started, need to change the address from agent/node. Then the satellite could load balance the request from agent/node to OAP backend.\nSuch as in Java Agent, you should change the property value in collector.backend_service forward to this: skywalking-satellite.${SKYWALKING_RELEASE_NAMESPACE}:11800.\n","excerpt":"Deploy on Kubernetes It could help you run the Satellite as a gateway in Kubernetes environment. …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/examples/deploy/kubernetes/readme/","title":"Deploy on Kubernetes"},{"body":"Deploy on Linux and Windows It could help you run the Satellite as a gateway in Linux or Windows instance.\nInstall Download Download the latest release version from SkyWalking Release Page.\nChange OAP Server addresses Update the OAP Server address in the config file, then satellite could connect to them and use round-robin policy for load-balance server before send each request.\nSupport two ways to locate the server list, using finder_type to change the type to find:\n static: Define the server address list. kubernetes: Define kubernetes pod/service/endpoint, it could be found addresses and dynamic update automatically.  Static server list You could see there define two server address and split by \u0026ldquo;,\u0026rdquo;.\nsharing:clients:- plugin_name:\u0026#34;grpc-client\u0026#34;# The gRPC server address finder typefinder_type:${SATELLITE_GRPC_CLIENT_FINDER:static}# The gRPC server address (default localhost:11800).server_addr:${SATELLITE_GRPC_CLIENT:127.0.0.1:11800,127.0.0.2:11800}# The TLS switchenable_TLS:${SATELLITE_GRPC_ENABLE_TLS:false}# The file path of client.pem. The config only works when opening the TLS switch.client_pem_path:${SATELLITE_GRPC_CLIENT_PEM_PATH:\u0026#34;client.pem\u0026#34;}# The file path of client.key. The config only works when opening the TLS switch.client_key_path:${SATELLITE_GRPC_CLIENT_KEY_PATH:\u0026#34;client.key\u0026#34;}# InsecureSkipVerify controls whether a client verifies the server\u0026#39;s certificate chain and host name.insecure_skip_verify:${SATELLITE_GRPC_INSECURE_SKIP_VERIFY:false}# The file path oca.pem. The config only works when opening the TLS switch.ca_pem_path:${SATELLITE_grpc_CA_PEM_PATH:\u0026#34;ca.pem\u0026#34;}# How frequently to check the connection(second)check_period:${SATELLITE_GRPC_CHECK_PERIOD:5}# The auth value when send requestauthentication:${SATELLITE_GRPC_AUTHENTICATION:\u0026#34;\u0026#34;}Kubernetes selector Using kubernetes_config to define the address\u0026rsquo;s finder.\nsharing:clients:- plugin_name:\u0026#34;grpc-client\u0026#34;# The gRPC server address finder typefinder_type:${SATELLITE_GRPC_CLIENT_FINDER:kubernetes}# The kubernetes config to lookup addresseskubernetes_config:# The kubernetes API server address, If not define means using in kubernetes mode to connectapi_server:http://localhost:8001/# The kind of apikind:endpoints# Support to lookup namespacesnamespaces:- default# The kind selectorselector:label:app=productpage# How to get the address exported portextra_port:port:9080# The TLS switchenable_TLS:${SATELLITE_GRPC_ENABLE_TLS:false}# The file path of client.pem. The config only works when opening the TLS switch.client_pem_path:${SATELLITE_GRPC_CLIENT_PEM_PATH:\u0026#34;client.pem\u0026#34;}# The file path of client.key. The config only works when opening the TLS switch.client_key_path:${SATELLITE_GRPC_CLIENT_KEY_PATH:\u0026#34;client.key\u0026#34;}# InsecureSkipVerify controls whether a client verifies the server\u0026#39;s certificate chain and host name.insecure_skip_verify:${SATELLITE_GRPC_INSECURE_SKIP_VERIFY:false}# The file path oca.pem. The config only works when opening the TLS switch.ca_pem_path:${SATELLITE_grpc_CA_PEM_PATH:\u0026#34;ca.pem\u0026#34;}# How frequently to check the connection(second)check_period:${SATELLITE_GRPC_CHECK_PERIOD:5}# The auth value when send requestauthentication:${SATELLITE_GRPC_AUTHENTICATION:\u0026#34;\u0026#34;}Start Satellite Execute the script bin/startup.sh(linux) or bin/startup.cmd(windows) to start. Then It could start these port:\n gRPC port(11800): listen the gRPC request, It could handle request from SkyWalking Agent protocol and Envoy ALS/Metrics protocol. Prometheus(1234): listen the HTTP request, It could get all SO11Y metrics from /metrics endpoint using Prometheus format.  Change Address After the satellite start, need to change the address from agent/node. Then the satellite could load balance the request from agent/node to OAP backend.\nSuch as in Java Agent, you should change the property value in collector.backend_service forward to the satellite gRPC port.\n","excerpt":"Deploy on Linux and Windows It could help you run the Satellite as a gateway in Linux or Windows …","ref":"/docs/skywalking-satellite/latest/en/setup/examples/deploy/linux-windows/readme/","title":"Deploy on Linux and Windows"},{"body":"Deploy on Linux and Windows It could help you run the Satellite as a gateway in Linux or Windows instance.\nInstall Download Download the latest release version from SkyWalking Release Page.\nChange OAP Server addresses Update the OAP Server address in the config file, then satellite could connect to them and use round-robin policy for load-balance server before send each request.\nSupport two ways to locate the server list, using finder_type to change the type to find:\n static: Define the server address list. kubernetes: Define kubernetes pod/service/endpoint, it could be found addresses and dynamic update automatically.  Static server list You could see there define two server address and split by \u0026ldquo;,\u0026rdquo;.\nsharing:clients:- plugin_name:\u0026#34;grpc-client\u0026#34;# The gRPC server address finder typefinder_type:${SATELLITE_GRPC_CLIENT_FINDER:static}# The gRPC server address (default localhost:11800).server_addr:${SATELLITE_GRPC_CLIENT:127.0.0.1:11800,127.0.0.2:11800}# The TLS switchenable_TLS:${SATELLITE_GRPC_ENABLE_TLS:false}# The file path of client.pem. The config only works when opening the TLS switch.client_pem_path:${SATELLITE_GRPC_CLIENT_PEM_PATH:\u0026#34;client.pem\u0026#34;}# The file path of client.key. The config only works when opening the TLS switch.client_key_path:${SATELLITE_GRPC_CLIENT_KEY_PATH:\u0026#34;client.key\u0026#34;}# InsecureSkipVerify controls whether a client verifies the server\u0026#39;s certificate chain and host name.insecure_skip_verify:${SATELLITE_GRPC_INSECURE_SKIP_VERIFY:false}# The file path oca.pem. The config only works when opening the TLS switch.ca_pem_path:${SATELLITE_grpc_CA_PEM_PATH:\u0026#34;ca.pem\u0026#34;}# How frequently to check the connection(second)check_period:${SATELLITE_GRPC_CHECK_PERIOD:5}# The auth value when send requestauthentication:${SATELLITE_GRPC_AUTHENTICATION:\u0026#34;\u0026#34;}Kubernetes selector Using kubernetes_config to define the address\u0026rsquo;s finder.\nsharing:clients:- plugin_name:\u0026#34;grpc-client\u0026#34;# The gRPC server address finder typefinder_type:${SATELLITE_GRPC_CLIENT_FINDER:kubernetes}# The kubernetes config to lookup addresseskubernetes_config:# The kubernetes API server address, If not define means using in kubernetes mode to connectapi_server:http://localhost:8001/# The kind of apikind:endpoints# Support to lookup namespacesnamespaces:- default# The kind selectorselector:label:app=productpage# How to get the address exported portextra_port:port:9080# The TLS switchenable_TLS:${SATELLITE_GRPC_ENABLE_TLS:false}# The file path of client.pem. The config only works when opening the TLS switch.client_pem_path:${SATELLITE_GRPC_CLIENT_PEM_PATH:\u0026#34;client.pem\u0026#34;}# The file path of client.key. The config only works when opening the TLS switch.client_key_path:${SATELLITE_GRPC_CLIENT_KEY_PATH:\u0026#34;client.key\u0026#34;}# InsecureSkipVerify controls whether a client verifies the server\u0026#39;s certificate chain and host name.insecure_skip_verify:${SATELLITE_GRPC_INSECURE_SKIP_VERIFY:false}# The file path oca.pem. The config only works when opening the TLS switch.ca_pem_path:${SATELLITE_grpc_CA_PEM_PATH:\u0026#34;ca.pem\u0026#34;}# How frequently to check the connection(second)check_period:${SATELLITE_GRPC_CHECK_PERIOD:5}# The auth value when send requestauthentication:${SATELLITE_GRPC_AUTHENTICATION:\u0026#34;\u0026#34;}Start Satellite Execute the script bin/startup.sh(linux) or bin/startup.cmd(windows) to start. Then It could start these port:\n gRPC port(11800): listen the gRPC request, It could handle request from SkyWalking Agent protocol and Envoy ALS/Metrics protocol. Prometheus(1234): listen the HTTP request, It could get all SO11Y metrics from /metrics endpoint using Prometheus format.  Change Address After the satellite start, need to change the address from agent/node. Then the satellite could load balance the request from agent/node to OAP backend.\nSuch as in Java Agent, you should change the property value in collector.backend_service forward to the satellite gRPC port.\n","excerpt":"Deploy on Linux and Windows It could help you run the Satellite as a gateway in Linux or Windows …","ref":"/docs/skywalking-satellite/next/en/setup/examples/deploy/linux-windows/readme/","title":"Deploy on Linux and Windows"},{"body":"Deploy on Linux and Windows It could help you run the Satellite as a gateway in Linux or Windows instance.\nInstall Download Download the latest release version from SkyWalking Release Page.\nChange OAP Server addresses Update the OAP Server address in the config file, then satellite could connect to them and use round-robin policy for load-balance server before send each request.\nSupport two ways to locate the server list, using finder_type to change the type to find:\n static: Define the server address list. kubernetes: Define kubernetes pod/service/endpoint, it could be found addresses and dynamic update automatically.  Static server list You could see there define two server address and split by \u0026ldquo;,\u0026rdquo;.\nsharing:clients:- plugin_name:\u0026#34;grpc-client\u0026#34;# The gRPC server address finder typefinder_type:${SATELLITE_GRPC_CLIENT_FINDER:static}# The gRPC server address (default localhost:11800).server_addr:${SATELLITE_GRPC_CLIENT:127.0.0.1:11800,127.0.0.2:11800}# The TLS switchenable_TLS:${SATELLITE_GRPC_ENABLE_TLS:false}# The file path of client.pem. The config only works when opening the TLS switch.client_pem_path:${SATELLITE_GRPC_CLIENT_PEM_PATH:\u0026#34;client.pem\u0026#34;}# The file path of client.key. The config only works when opening the TLS switch.client_key_path:${SATELLITE_GRPC_CLIENT_KEY_PATH:\u0026#34;client.key\u0026#34;}# InsecureSkipVerify controls whether a client verifies the server\u0026#39;s certificate chain and host name.insecure_skip_verify:${SATELLITE_GRPC_INSECURE_SKIP_VERIFY:false}# The file path oca.pem. The config only works when opening the TLS switch.ca_pem_path:${SATELLITE_grpc_CA_PEM_PATH:\u0026#34;ca.pem\u0026#34;}# How frequently to check the connection(second)check_period:${SATELLITE_GRPC_CHECK_PERIOD:5}# The auth value when send requestauthentication:${SATELLITE_GRPC_AUTHENTICATION:\u0026#34;\u0026#34;}Kubernetes selector Using kubernetes_config to define the address\u0026rsquo;s finder.\nsharing:clients:- plugin_name:\u0026#34;grpc-client\u0026#34;# The gRPC server address finder typefinder_type:${SATELLITE_GRPC_CLIENT_FINDER:kubernetes}# The kubernetes config to lookup addresseskubernetes_config:# The kubernetes API server address, If not define means using in kubernetes mode to connectapi_server:http://localhost:8001/# The kind of apikind:endpoints# Support to lookup namespacesnamespaces:- default# The kind selectorselector:label:app=productpage# How to get the address exported portextra_port:port:9080# The TLS switchenable_TLS:${SATELLITE_GRPC_ENABLE_TLS:false}# The file path of client.pem. The config only works when opening the TLS switch.client_pem_path:${SATELLITE_GRPC_CLIENT_PEM_PATH:\u0026#34;client.pem\u0026#34;}# The file path of client.key. The config only works when opening the TLS switch.client_key_path:${SATELLITE_GRPC_CLIENT_KEY_PATH:\u0026#34;client.key\u0026#34;}# InsecureSkipVerify controls whether a client verifies the server\u0026#39;s certificate chain and host name.insecure_skip_verify:${SATELLITE_GRPC_INSECURE_SKIP_VERIFY:false}# The file path oca.pem. The config only works when opening the TLS switch.ca_pem_path:${SATELLITE_grpc_CA_PEM_PATH:\u0026#34;ca.pem\u0026#34;}# How frequently to check the connection(second)check_period:${SATELLITE_GRPC_CHECK_PERIOD:5}# The auth value when send requestauthentication:${SATELLITE_GRPC_AUTHENTICATION:\u0026#34;\u0026#34;}Start Satellite Execute the script bin/startup.sh(linux) or bin/startup.cmd(windows) to start. Then It could start these port:\n gRPC port(11800): listen the gRPC request, It could handle request from SkyWalking Agent protocol and Envoy ALS/Metrics protocol. Prometheus(1234): listen the HTTP request, It could get all SO11Y metrics from /metrics endpoint using Prometheus format.  Change Address After the satellite start, need to change the address from agent/node. Then the satellite could load balance the request from agent/node to OAP backend.\nSuch as in Java Agent, you should change the property value in collector.backend_service forward to the satellite gRPC port.\n","excerpt":"Deploy on Linux and Windows It could help you run the Satellite as a gateway in Linux or Windows …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/examples/deploy/linux-windows/readme/","title":"Deploy on Linux and Windows"},{"body":"Deploy SkyWalking backend and UI in Kubernetes Before you read Kubernetes deployment guidance, please make sure you have read Quick Start and Advanced Setup documents. Most SkyWalking OAP settings are controlled through System environment variables when applying helm deployment.\nFollow instructions in the deploying SkyWalking backend to Kubernetes cluster to deploy OAP and UI to a Kubernetes cluster.\nPlease refer to the Readme file.\n","excerpt":"Deploy SkyWalking backend and UI in Kubernetes Before you read Kubernetes deployment guidance, …","ref":"/docs/main/latest/en/setup/backend/backend-k8s/","title":"Deploy SkyWalking backend and UI in Kubernetes"},{"body":"Deploy SkyWalking backend and UI in Kubernetes Before you read Kubernetes deployment guidance, please make sure you have read Quick Start and Advanced Setup documents. Most SkyWalking OAP settings are controlled through System environment variables when applying helm deployment.\nFollow instructions in the deploying SkyWalking backend to Kubernetes cluster to deploy OAP and UI to a Kubernetes cluster.\nPlease refer to the Readme file.\n","excerpt":"Deploy SkyWalking backend and UI in Kubernetes Before you read Kubernetes deployment guidance, …","ref":"/docs/main/next/en/setup/backend/backend-k8s/","title":"Deploy SkyWalking backend and UI in Kubernetes"},{"body":"Deploy SkyWalking backend and UI in Kubernetes Before you read Kubernetes deployment guidance, please make sure you have read Quick Start and Advanced Setup documents. Most SkyWalking OAP settings are controlled through System environment variables when applying helm deployment.\nFollow instructions in the deploying SkyWalking backend to Kubernetes cluster to deploy OAP and UI to a Kubernetes cluster.\nPlease refer to the Readme file.\n","excerpt":"Deploy SkyWalking backend and UI in Kubernetes Before you read Kubernetes deployment guidance, …","ref":"/docs/main/v9.1.0/en/setup/backend/backend-k8s/","title":"Deploy SkyWalking backend and UI in Kubernetes"},{"body":"Deploy SkyWalking backend and UI in Kubernetes Before you read Kubernetes deployment guidance, please make sure you have read Quick Start and Advanced Setup documents. Most SkyWalking OAP settings are controlled through System environment variables when applying helm deployment.\nFollow instructions in the deploying SkyWalking backend to Kubernetes cluster to deploy OAP and UI to a Kubernetes cluster.\nPlease refer to the Readme file.\n","excerpt":"Deploy SkyWalking backend and UI in Kubernetes Before you read Kubernetes deployment guidance, …","ref":"/docs/main/v9.2.0/en/setup/backend/backend-k8s/","title":"Deploy SkyWalking backend and UI in Kubernetes"},{"body":"Deploy SkyWalking backend and UI in Kubernetes Before you read Kubernetes deployment guidance, please make sure you have read Quick Start and Advanced Setup documents. Most SkyWalking OAP settings are controlled through System environment variables when applying helm deployment.\nFollow instructions in the deploying SkyWalking backend to Kubernetes cluster to deploy OAP and UI to a Kubernetes cluster.\nPlease refer to the Readme file.\n","excerpt":"Deploy SkyWalking backend and UI in Kubernetes Before you read Kubernetes deployment guidance, …","ref":"/docs/main/v9.3.0/en/setup/backend/backend-k8s/","title":"Deploy SkyWalking backend and UI in Kubernetes"},{"body":"Deploy SkyWalking backend and UI in Kubernetes Before you read Kubernetes deployment guidance, please make sure you have read Quick Start and Advanced Setup documents. Most SkyWalking OAP settings are controlled through System environment variables when applying helm deployment.\nFollow instructions in the deploying SkyWalking backend to Kubernetes cluster to deploy OAP and UI to a Kubernetes cluster.\nPlease refer to the Readme file.\n","excerpt":"Deploy SkyWalking backend and UI in Kubernetes Before you read Kubernetes deployment guidance, …","ref":"/docs/main/v9.4.0/en/setup/backend/backend-k8s/","title":"Deploy SkyWalking backend and UI in Kubernetes"},{"body":"Deploy SkyWalking backend and UI in Kubernetes Before you read Kubernetes deployment guidance, please make sure you have read Quick Start and Advanced Setup documents. Most SkyWalking OAP settings are controlled through System environment variables when applying helm deployment.\nFollow instructions in the deploying SkyWalking backend to Kubernetes cluster to deploy OAP and UI to a Kubernetes cluster.\nPlease refer to the Readme file.\n","excerpt":"Deploy SkyWalking backend and UI in Kubernetes Before you read Kubernetes deployment guidance, …","ref":"/docs/main/v9.5.0/en/setup/backend/backend-k8s/","title":"Deploy SkyWalking backend and UI in Kubernetes"},{"body":"Deploy SkyWalking backend and UI in Kubernetes Before you read Kubernetes deployment guidance, please make sure you have read Quick Start and Advanced Setup documents. Most SkyWalking OAP settings are controlled through System environment variables when applying helm deployment.\nFollow instructions in the deploying SkyWalking backend to Kubernetes cluster to deploy OAP and UI to a Kubernetes cluster.\nPlease refer to the Readme file.\n","excerpt":"Deploy SkyWalking backend and UI in Kubernetes Before you read Kubernetes deployment guidance, …","ref":"/docs/main/v9.6.0/en/setup/backend/backend-k8s/","title":"Deploy SkyWalking backend and UI in Kubernetes"},{"body":"Deploy SkyWalking backend and UI in Kubernetes Before you read Kubernetes deployment guidance, please make sure you have read Quick Start and Advanced Setup documents. Most SkyWalking OAP settings are controlled through System environment variables when applying helm deployment.\nFollow instructions in the deploying SkyWalking backend to Kubernetes cluster to deploy OAP and UI to a Kubernetes cluster.\nPlease refer to the Readme file.\n","excerpt":"Deploy SkyWalking backend and UI in Kubernetes Before you read Kubernetes deployment guidance, …","ref":"/docs/main/v9.7.0/en/setup/backend/backend-k8s/","title":"Deploy SkyWalking backend and UI in Kubernetes"},{"body":"Deploy SkyWalking backend and UI in kubernetes Before you read Kubernetes deployment guidance, please make sure you have read Quick Start and Advanced Setup documents. Most SkyWalking OAP settings are controlled through System environment variables when apply helm deployment.\nFollow instructions in the deploying SkyWalking backend to Kubernetes cluster to deploy oap and ui to a kubernetes cluster.\nPlease read the Readme file.\n","excerpt":"Deploy SkyWalking backend and UI in kubernetes Before you read Kubernetes deployment guidance, …","ref":"/docs/main/v9.0.0/en/setup/backend/backend-k8s/","title":"Deploy SkyWalking backend and UI in kubernetes"},{"body":"Deprecated Query Protocol The following query services are deprecated since 9.5.0. All these queries are still available for the short term to keep compatibility.\nQuery protocol official repository, https://github.com/apache/skywalking-query-protocol.\nMetrics Metrics query targets all objects defined in OAL script and MAL. You may obtain the metrics data in linear or thermodynamic matrix formats based on the aggregation functions in script.\nV2 APIs Provide Metrics V2 query APIs since 8.0.0, including metadata, single/multiple values, heatmap, and sampled records metrics.\nextendtypeQuery{# Read metrics single value in the duration of required metricsreadMetricsValue(condition:MetricsCondition!,duration:Duration!):Long!# Read metrics single value in the duration of required metrics# NullableValue#isEmptyValue == true indicates no telemetry data rather than aggregated value is actually zero.readNullableMetricsValue(condition:MetricsCondition!,duration:Duration!):NullableValue!# Read time-series values in the duration of required metricsreadMetricsValues(condition:MetricsCondition!,duration:Duration!):MetricsValues!# Read entity list of required metrics and parent entity type.sortMetrics(condition:TopNCondition!,duration:Duration!):[SelectedRecord!]!# Read value in the given time duration, usually as a linear.# labels: the labels you need to query.readLabeledMetricsValues(condition:MetricsCondition!,labels:[String!]!,duration:Duration!):[MetricsValues!]!# Heatmap is bucket based value statistic result.readHeatMap(condition:MetricsCondition!,duration:Duration!):HeatMap# Deprecated since 9.3.0, replaced by readRecords defined in record.graphqls# Read the sampled records# TopNCondition#scope is not required.readSampledRecords(condition:TopNCondition!,duration:Duration!):[SelectedRecord!]!}V1 APIs 3 types of metrics can be queried. V1 APIs were introduced since 6.x. Now they are a shell to V2 APIs.\n Single value. Most default metrics are in single value. getValues and getLinearIntValues are suitable for this purpose. Multiple value. A metric defined in OAL includes multiple value calculations. Use getMultipleLinearIntValues to obtain all values. percentile is a typical multiple value function in OAL. Heatmap value. Read Heatmap in WIKI for details. thermodynamic is the only OAL function. Use getThermodynamic to get the values.  extendtypeQuery{getValues(metric:BatchMetricConditions!,duration:Duration!):IntValuesgetLinearIntValues(metric:MetricCondition!,duration:Duration!):IntValues# Query the type of metrics including multiple values, and format them as multiple lines.# The seq of these multiple lines base on the calculation func in OAL# Such as, should us this to query the result of func percentile(50,75,90,95,99) in OAL,# then five lines will be responded, p50 is the first element of return value.getMultipleLinearIntValues(metric:MetricCondition!,numOfLinear:Int!,duration:Duration!):[IntValues!]!getThermodynamic(metric:MetricCondition!,duration:Duration!):Thermodynamic}Aggregation Aggregation query means that the metrics data need a secondary aggregation at query stage, which causes the query interfaces to have some different arguments. A typical example of aggregation query is the TopN list of services. Metrics stream aggregation simply calculates the metrics values of each service, but the expected list requires ordering metrics data by their values.\nAggregation query is for single value metrics only.\n# The aggregation query is different with the metric query.# All aggregation queries require backend or/and storage do aggregation in query time.extendtypeQuery{# TopN is an aggregation query.getServiceTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getAllServiceInstanceTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getServiceInstanceTopN(serviceId:ID!,name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getAllEndpointTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getEndpointTopN(serviceId:ID!,name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!}Record Record is a general and abstract type for collected raw data. In the observability, traces and logs have specific and well-defined meanings, meanwhile, the general records represent other collected records. Such as sampled slow SQL statement, HTTP request raw data(request/response header/body)\nextendtypeQuery{# Query collected records with given metric name and parent entity conditions, and return in the requested order.readRecords(condition:RecordCondition!,duration:Duration!):[Record!]!}","excerpt":"Deprecated Query Protocol The following query services are deprecated since 9.5.0. All these queries …","ref":"/docs/main/latest/en/api/query-protocol-deprecated/","title":"Deprecated Query Protocol"},{"body":"Deprecated Query Protocol The following query services are deprecated since 9.5.0. All these queries are still available for the short term to keep compatibility.\nQuery protocol official repository, https://github.com/apache/skywalking-query-protocol.\nMetadata Metadata contains concise information on all services and their instances, endpoints, etc. under monitoring. You may query the metadata in different ways.\nV1 APIs V1 APIs were introduced since 6.x. Now they are a shell to V2 APIs since 9.0.0.\nextendtypeQuery{# Normal service related meta info getAllServices(duration:Duration!,group:String):[Service!]!searchServices(duration:Duration!,keyword:String!):[Service!]!searchService(serviceCode:String!):Service# Fetch all services of Browser typegetAllBrowserServices(duration:Duration!):[Service!]!searchBrowserServices(duration:Duration!,keyword:String!):[Service!]!searchBrowserService(serviceCode:String!):Service# Service instance querygetServiceInstances(duration:Duration!,serviceId:ID!):[ServiceInstance!]!# Endpoint query# Consider there are huge numbers of endpoint,# must use endpoint owner\u0026#39;s service id, keyword and limit filter to do query.searchEndpoint(keyword:String!,serviceId:ID!,limit:Int!):[Endpoint!]!# Database related meta info.getAllDatabases(duration:Duration!):[Database!]!}Metrics Metrics query targets all objects defined in OAL script and MAL. You may obtain the metrics data in linear or thermodynamic matrix formats based on the aggregation functions in script.\nV2 APIs Provide Metrics V2 query APIs since 8.0.0, including metadata, single/multiple values, heatmap, and sampled records metrics.\nextendtypeQuery{# Read metrics single value in the duration of required metricsreadMetricsValue(condition:MetricsCondition!,duration:Duration!):Long!# Read metrics single value in the duration of required metrics# NullableValue#isEmptyValue == true indicates no telemetry data rather than aggregated value is actually zero.readNullableMetricsValue(condition:MetricsCondition!,duration:Duration!):NullableValue!# Read time-series values in the duration of required metricsreadMetricsValues(condition:MetricsCondition!,duration:Duration!):MetricsValues!# Read entity list of required metrics and parent entity type.sortMetrics(condition:TopNCondition!,duration:Duration!):[SelectedRecord!]!# Read value in the given time duration, usually as a linear.# labels: the labels you need to query.readLabeledMetricsValues(condition:MetricsCondition!,labels:[String!]!,duration:Duration!):[MetricsValues!]!# Heatmap is bucket based value statistic result.readHeatMap(condition:MetricsCondition!,duration:Duration!):HeatMap# Deprecated since 9.3.0, replaced by readRecords defined in record.graphqls# Read the sampled records# TopNCondition#scope is not required.readSampledRecords(condition:TopNCondition!,duration:Duration!):[SelectedRecord!]!}V1 APIs 3 types of metrics can be queried. V1 APIs were introduced since 6.x. Now they are a shell to V2 APIs.\n Single value. Most default metrics are in single value. getValues and getLinearIntValues are suitable for this purpose. Multiple value. A metric defined in OAL includes multiple value calculations. Use getMultipleLinearIntValues to obtain all values. percentile is a typical multiple value function in OAL. Heatmap value. Read Heatmap in WIKI for details. thermodynamic is the only OAL function. Use getThermodynamic to get the values.  extendtypeQuery{getValues(metric:BatchMetricConditions!,duration:Duration!):IntValuesgetLinearIntValues(metric:MetricCondition!,duration:Duration!):IntValues# Query the type of metrics including multiple values, and format them as multiple lines.# The seq of these multiple lines base on the calculation func in OAL# Such as, should us this to query the result of func percentile(50,75,90,95,99) in OAL,# then five lines will be responded, p50 is the first element of return value.getMultipleLinearIntValues(metric:MetricCondition!,numOfLinear:Int!,duration:Duration!):[IntValues!]!getThermodynamic(metric:MetricCondition!,duration:Duration!):Thermodynamic}Aggregation Aggregation query means that the metrics data need a secondary aggregation at query stage, which causes the query interfaces to have some different arguments. A typical example of aggregation query is the TopN list of services. Metrics stream aggregation simply calculates the metrics values of each service, but the expected list requires ordering metrics data by their values.\nAggregation query is for single value metrics only.\n# The aggregation query is different with the metric query.# All aggregation queries require backend or/and storage do aggregation in query time.extendtypeQuery{# TopN is an aggregation query.getServiceTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getAllServiceInstanceTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getServiceInstanceTopN(serviceId:ID!,name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getAllEndpointTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getEndpointTopN(serviceId:ID!,name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!}Record Record is a general and abstract type for collected raw data. In the observability, traces and logs have specific and well-defined meanings, meanwhile, the general records represent other collected records. Such as sampled slow SQL statement, HTTP request raw data(request/response header/body)\nextendtypeQuery{# Query collected records with given metric name and parent entity conditions, and return in the requested order.readRecords(condition:RecordCondition!,duration:Duration!):[Record!]!}","excerpt":"Deprecated Query Protocol The following query services are deprecated since 9.5.0. All these queries …","ref":"/docs/main/next/en/api/query-protocol-deprecated/","title":"Deprecated Query Protocol"},{"body":"Deprecated Query Protocol The following query services are deprecated since 9.5.0. All these queries are still available for the short term to keep compatibility.\nQuery protocol official repository, https://github.com/apache/skywalking-query-protocol.\nMetrics Metrics query targets all objects defined in OAL script and MAL. You may obtain the metrics data in linear or thermodynamic matrix formats based on the aggregation functions in script.\nV2 APIs Provide Metrics V2 query APIs since 8.0.0, including metadata, single/multiple values, heatmap, and sampled records metrics.\nextendtypeQuery{# Read metrics single value in the duration of required metricsreadMetricsValue(condition:MetricsCondition!,duration:Duration!):Long!# Read metrics single value in the duration of required metrics# NullableValue#isEmptyValue == true indicates no telemetry data rather than aggregated value is actually zero.readNullableMetricsValue(condition:MetricsCondition!,duration:Duration!):NullableValue!# Read time-series values in the duration of required metricsreadMetricsValues(condition:MetricsCondition!,duration:Duration!):MetricsValues!# Read entity list of required metrics and parent entity type.sortMetrics(condition:TopNCondition!,duration:Duration!):[SelectedRecord!]!# Read value in the given time duration, usually as a linear.# labels: the labels you need to query.readLabeledMetricsValues(condition:MetricsCondition!,labels:[String!]!,duration:Duration!):[MetricsValues!]!# Heatmap is bucket based value statistic result.readHeatMap(condition:MetricsCondition!,duration:Duration!):HeatMap# Deprecated since 9.3.0, replaced by readRecords defined in record.graphqls# Read the sampled records# TopNCondition#scope is not required.readSampledRecords(condition:TopNCondition!,duration:Duration!):[SelectedRecord!]!}V1 APIs 3 types of metrics can be queried. V1 APIs were introduced since 6.x. Now they are a shell to V2 APIs.\n Single value. Most default metrics are in single value. getValues and getLinearIntValues are suitable for this purpose. Multiple value. A metric defined in OAL includes multiple value calculations. Use getMultipleLinearIntValues to obtain all values. percentile is a typical multiple value function in OAL. Heatmap value. Read Heatmap in WIKI for details. thermodynamic is the only OAL function. Use getThermodynamic to get the values.  extendtypeQuery{getValues(metric:BatchMetricConditions!,duration:Duration!):IntValuesgetLinearIntValues(metric:MetricCondition!,duration:Duration!):IntValues# Query the type of metrics including multiple values, and format them as multiple lines.# The seq of these multiple lines base on the calculation func in OAL# Such as, should us this to query the result of func percentile(50,75,90,95,99) in OAL,# then five lines will be responded, p50 is the first element of return value.getMultipleLinearIntValues(metric:MetricCondition!,numOfLinear:Int!,duration:Duration!):[IntValues!]!getThermodynamic(metric:MetricCondition!,duration:Duration!):Thermodynamic}Aggregation Aggregation query means that the metrics data need a secondary aggregation at query stage, which causes the query interfaces to have some different arguments. A typical example of aggregation query is the TopN list of services. Metrics stream aggregation simply calculates the metrics values of each service, but the expected list requires ordering metrics data by their values.\nAggregation query is for single value metrics only.\n# The aggregation query is different with the metric query.# All aggregation queries require backend or/and storage do aggregation in query time.extendtypeQuery{# TopN is an aggregation query.getServiceTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getAllServiceInstanceTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getServiceInstanceTopN(serviceId:ID!,name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getAllEndpointTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getEndpointTopN(serviceId:ID!,name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!}Record Record is a general and abstract type for collected raw data. In the observability, traces and logs have specific and well-defined meanings, meanwhile, the general records represent other collected records. Such as sampled slow SQL statement, HTTP request raw data(request/response header/body)\nextendtypeQuery{# Query collected records with given metric name and parent entity conditions, and return in the requested order.readRecords(condition:RecordCondition!,duration:Duration!):[Record!]!}","excerpt":"Deprecated Query Protocol The following query services are deprecated since 9.5.0. All these queries …","ref":"/docs/main/v9.5.0/en/api/query-protocol-deprecated/","title":"Deprecated Query Protocol"},{"body":"Deprecated Query Protocol The following query services are deprecated since 9.5.0. All these queries are still available for the short term to keep compatibility.\nQuery protocol official repository, https://github.com/apache/skywalking-query-protocol.\nMetrics Metrics query targets all objects defined in OAL script and MAL. You may obtain the metrics data in linear or thermodynamic matrix formats based on the aggregation functions in script.\nV2 APIs Provide Metrics V2 query APIs since 8.0.0, including metadata, single/multiple values, heatmap, and sampled records metrics.\nextendtypeQuery{# Read metrics single value in the duration of required metricsreadMetricsValue(condition:MetricsCondition!,duration:Duration!):Long!# Read metrics single value in the duration of required metrics# NullableValue#isEmptyValue == true indicates no telemetry data rather than aggregated value is actually zero.readNullableMetricsValue(condition:MetricsCondition!,duration:Duration!):NullableValue!# Read time-series values in the duration of required metricsreadMetricsValues(condition:MetricsCondition!,duration:Duration!):MetricsValues!# Read entity list of required metrics and parent entity type.sortMetrics(condition:TopNCondition!,duration:Duration!):[SelectedRecord!]!# Read value in the given time duration, usually as a linear.# labels: the labels you need to query.readLabeledMetricsValues(condition:MetricsCondition!,labels:[String!]!,duration:Duration!):[MetricsValues!]!# Heatmap is bucket based value statistic result.readHeatMap(condition:MetricsCondition!,duration:Duration!):HeatMap# Deprecated since 9.3.0, replaced by readRecords defined in record.graphqls# Read the sampled records# TopNCondition#scope is not required.readSampledRecords(condition:TopNCondition!,duration:Duration!):[SelectedRecord!]!}V1 APIs 3 types of metrics can be queried. V1 APIs were introduced since 6.x. Now they are a shell to V2 APIs.\n Single value. Most default metrics are in single value. getValues and getLinearIntValues are suitable for this purpose. Multiple value. A metric defined in OAL includes multiple value calculations. Use getMultipleLinearIntValues to obtain all values. percentile is a typical multiple value function in OAL. Heatmap value. Read Heatmap in WIKI for details. thermodynamic is the only OAL function. Use getThermodynamic to get the values.  extendtypeQuery{getValues(metric:BatchMetricConditions!,duration:Duration!):IntValuesgetLinearIntValues(metric:MetricCondition!,duration:Duration!):IntValues# Query the type of metrics including multiple values, and format them as multiple lines.# The seq of these multiple lines base on the calculation func in OAL# Such as, should us this to query the result of func percentile(50,75,90,95,99) in OAL,# then five lines will be responded, p50 is the first element of return value.getMultipleLinearIntValues(metric:MetricCondition!,numOfLinear:Int!,duration:Duration!):[IntValues!]!getThermodynamic(metric:MetricCondition!,duration:Duration!):Thermodynamic}Aggregation Aggregation query means that the metrics data need a secondary aggregation at query stage, which causes the query interfaces to have some different arguments. A typical example of aggregation query is the TopN list of services. Metrics stream aggregation simply calculates the metrics values of each service, but the expected list requires ordering metrics data by their values.\nAggregation query is for single value metrics only.\n# The aggregation query is different with the metric query.# All aggregation queries require backend or/and storage do aggregation in query time.extendtypeQuery{# TopN is an aggregation query.getServiceTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getAllServiceInstanceTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getServiceInstanceTopN(serviceId:ID!,name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getAllEndpointTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getEndpointTopN(serviceId:ID!,name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!}Record Record is a general and abstract type for collected raw data. In the observability, traces and logs have specific and well-defined meanings, meanwhile, the general records represent other collected records. Such as sampled slow SQL statement, HTTP request raw data(request/response header/body)\nextendtypeQuery{# Query collected records with given metric name and parent entity conditions, and return in the requested order.readRecords(condition:RecordCondition!,duration:Duration!):[Record!]!}","excerpt":"Deprecated Query Protocol The following query services are deprecated since 9.5.0. All these queries …","ref":"/docs/main/v9.6.0/en/api/query-protocol-deprecated/","title":"Deprecated Query Protocol"},{"body":"Deprecated Query Protocol The following query services are deprecated since 9.5.0. All these queries are still available for the short term to keep compatibility.\nQuery protocol official repository, https://github.com/apache/skywalking-query-protocol.\nMetrics Metrics query targets all objects defined in OAL script and MAL. You may obtain the metrics data in linear or thermodynamic matrix formats based on the aggregation functions in script.\nV2 APIs Provide Metrics V2 query APIs since 8.0.0, including metadata, single/multiple values, heatmap, and sampled records metrics.\nextendtypeQuery{# Read metrics single value in the duration of required metricsreadMetricsValue(condition:MetricsCondition!,duration:Duration!):Long!# Read metrics single value in the duration of required metrics# NullableValue#isEmptyValue == true indicates no telemetry data rather than aggregated value is actually zero.readNullableMetricsValue(condition:MetricsCondition!,duration:Duration!):NullableValue!# Read time-series values in the duration of required metricsreadMetricsValues(condition:MetricsCondition!,duration:Duration!):MetricsValues!# Read entity list of required metrics and parent entity type.sortMetrics(condition:TopNCondition!,duration:Duration!):[SelectedRecord!]!# Read value in the given time duration, usually as a linear.# labels: the labels you need to query.readLabeledMetricsValues(condition:MetricsCondition!,labels:[String!]!,duration:Duration!):[MetricsValues!]!# Heatmap is bucket based value statistic result.readHeatMap(condition:MetricsCondition!,duration:Duration!):HeatMap# Deprecated since 9.3.0, replaced by readRecords defined in record.graphqls# Read the sampled records# TopNCondition#scope is not required.readSampledRecords(condition:TopNCondition!,duration:Duration!):[SelectedRecord!]!}V1 APIs 3 types of metrics can be queried. V1 APIs were introduced since 6.x. Now they are a shell to V2 APIs.\n Single value. Most default metrics are in single value. getValues and getLinearIntValues are suitable for this purpose. Multiple value. A metric defined in OAL includes multiple value calculations. Use getMultipleLinearIntValues to obtain all values. percentile is a typical multiple value function in OAL. Heatmap value. Read Heatmap in WIKI for details. thermodynamic is the only OAL function. Use getThermodynamic to get the values.  extendtypeQuery{getValues(metric:BatchMetricConditions!,duration:Duration!):IntValuesgetLinearIntValues(metric:MetricCondition!,duration:Duration!):IntValues# Query the type of metrics including multiple values, and format them as multiple lines.# The seq of these multiple lines base on the calculation func in OAL# Such as, should us this to query the result of func percentile(50,75,90,95,99) in OAL,# then five lines will be responded, p50 is the first element of return value.getMultipleLinearIntValues(metric:MetricCondition!,numOfLinear:Int!,duration:Duration!):[IntValues!]!getThermodynamic(metric:MetricCondition!,duration:Duration!):Thermodynamic}Aggregation Aggregation query means that the metrics data need a secondary aggregation at query stage, which causes the query interfaces to have some different arguments. A typical example of aggregation query is the TopN list of services. Metrics stream aggregation simply calculates the metrics values of each service, but the expected list requires ordering metrics data by their values.\nAggregation query is for single value metrics only.\n# The aggregation query is different with the metric query.# All aggregation queries require backend or/and storage do aggregation in query time.extendtypeQuery{# TopN is an aggregation query.getServiceTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getAllServiceInstanceTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getServiceInstanceTopN(serviceId:ID!,name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getAllEndpointTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getEndpointTopN(serviceId:ID!,name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!}Record Record is a general and abstract type for collected raw data. In the observability, traces and logs have specific and well-defined meanings, meanwhile, the general records represent other collected records. Such as sampled slow SQL statement, HTTP request raw data(request/response header/body)\nextendtypeQuery{# Query collected records with given metric name and parent entity conditions, and return in the requested order.readRecords(condition:RecordCondition!,duration:Duration!):[Record!]!}","excerpt":"Deprecated Query Protocol The following query services are deprecated since 9.5.0. All these queries …","ref":"/docs/main/v9.7.0/en/api/query-protocol-deprecated/","title":"Deprecated Query Protocol"},{"body":"Design The mmap-queue is a big, fast, and persistent queue based on the memory-mapped files. One mmap-queue has a directory to store the whole data. The queue directory is made up of many segments and 1 metafile. This is originally implemented by bigqueue project, we changed it a little for fitting the Satellite project requirements.\n Segment: Segment is the real data store center, that provides large-space storage and does not reduce read and write performance as much as possible by using mmap. And we will avoid deleting files by reusing them. Meta: The purpose of meta is to find the data that the consumer needs.  Meta Metadata only needs 80B to store the Metadata for the pipe. But for memory alignment, it takes at least one memory page size, which is generally 4K.\n[ 8Bit ][ 8Bit ][ 8Bit ][ 8Bit ][ 8Bit ][ 8Bit ][ 8Bit ][ 8Bit ][ 8Bit ][ 8Bit ] [metaVersion][ ID ][ offset][ ID ][ offset][ ID ][ offset][ ID ][ offset][capacity] [metaVersion][writing offset][watermark offset][committed offset][reading offset][capacity] Transforming BenchmarkTest Test machine: macbook pro 2018\nModel Name:\tMacBook Pro Model Identifier:\tMacBookPro15,1 Processor Name:\t6-Core Intel Core i7 Processor Speed:\t2.2 GHz Number of Processors:\t1 Total Number of Cores:\t6 L2 Cache (per Core):\t256 KB L3 Cache:\t9 MB Hyper-Threading Technology:\tEnabled Memory:\t16 GB System Firmware Version:\t1554.60.15.0.0 (iBridge: 18.16.13030.0.0,0 push operation goos: darwin goarch: amd64 pkg: github.com/apache/skywalking-satellite/plugins/queue/mmap BenchmarkEnqueue BenchmarkEnqueue/segmentSize:_128KB_maxInMemSegments:18_message:8KB_queueCapacity:10000 27585\t43559 ns/op\t9889 B/op\t9 allocs/op BenchmarkEnqueue/segmentSize:_256KB_maxInMemSegments:10_message:8KB_queueCapacity:10000 39326\t31773 ns/op\t9840 B/op\t9 allocs/op BenchmarkEnqueue/segmentSize:_512KB_maxInMemSegments:6_message:8KB_queueCapacity:10000 56770\t22990 ns/op\t9816 B/op\t9 allocs/op BenchmarkEnqueue/segmentSize:_256KB_maxInMemSegments:20_message:8KB_queueCapacity:10000 43803\t29778 ns/op\t9840 B/op\t9 allocs/op BenchmarkEnqueue/segmentSize:_128KB_maxInMemSegments:10_message:16KB_queueCapacity:10000 16870\t80576 ns/op\t18944 B/op\t10 allocs/op BenchmarkEnqueue/segmentSize:_128KB_maxInMemSegments:10_message:8KB_queueCapacity:100000 36922\t39085 ns/op\t9889 B/op\t9 allocs/op PASS push and pop operation goos: darwin goarch: amd64 pkg: github.com/apache/skywalking-satellite/plugins/queue/mmap BenchmarkEnqueueAndDequeue BenchmarkEnqueueAndDequeue/segmentSize:_128KB_maxInMemSegments:18_message:8KB_queueCapacity:10000 21030\t60728 ns/op\t28774 B/op\t42 allocs/op BenchmarkEnqueueAndDequeue/segmentSize:_256KB_maxInMemSegments:10_message:8KB_queueCapacity:10000 30327\t41274 ns/op\t28726 B/op\t42 allocs/op BenchmarkEnqueueAndDequeue/segmentSize:_512KB_maxInMemSegments:6_message:8KB_queueCapacity:10000 32738\t37923 ns/op\t28700 B/op\t42 allocs/op BenchmarkEnqueueAndDequeue/segmentSize:_256KB_maxInMemSegments:20_message:8KB_queueCapacity:10000 28209\t41169 ns/op\t28726 B/op\t42 allocs/op BenchmarkEnqueueAndDequeue/segmentSize:_128KB_maxInMemSegments:10_message:16KB_queueCapacity:10000 14677\t89637 ns/op\t54981 B/op\t43 allocs/op BenchmarkEnqueueAndDequeue/segmentSize:_128KB_maxInMemSegments:10_message:8KB_queueCapacity:100000 22228\t54963 ns/op\t28774 B/op\t42 allocs/op PASS ","excerpt":"Design The mmap-queue is a big, fast, and persistent queue based on the memory-mapped files. One …","ref":"/docs/skywalking-satellite/latest/en/concepts-and-designs/mmap-queue/","title":"Design"},{"body":"Design The mmap-queue is a big, fast, and persistent queue based on the memory-mapped files. One mmap-queue has a directory to store the whole data. The queue directory is made up of many segments and 1 metafile. This is originally implemented by bigqueue project, we changed it a little for fitting the Satellite project requirements.\n Segment: Segment is the real data store center, that provides large-space storage and does not reduce read and write performance as much as possible by using mmap. And we will avoid deleting files by reusing them. Meta: The purpose of meta is to find the data that the consumer needs.  Meta Metadata only needs 80B to store the Metadata for the pipe. But for memory alignment, it takes at least one memory page size, which is generally 4K.\n[ 8Bit ][ 8Bit ][ 8Bit ][ 8Bit ][ 8Bit ][ 8Bit ][ 8Bit ][ 8Bit ][ 8Bit ][ 8Bit ] [metaVersion][ ID ][ offset][ ID ][ offset][ ID ][ offset][ ID ][ offset][capacity] [metaVersion][writing offset][watermark offset][committed offset][reading offset][capacity] Transforming BenchmarkTest Test machine: macbook pro 2018\nModel Name:\tMacBook Pro Model Identifier:\tMacBookPro15,1 Processor Name:\t6-Core Intel Core i7 Processor Speed:\t2.2 GHz Number of Processors:\t1 Total Number of Cores:\t6 L2 Cache (per Core):\t256 KB L3 Cache:\t9 MB Hyper-Threading Technology:\tEnabled Memory:\t16 GB System Firmware Version:\t1554.60.15.0.0 (iBridge: 18.16.13030.0.0,0 push operation goos: darwin goarch: amd64 pkg: github.com/apache/skywalking-satellite/plugins/queue/mmap BenchmarkEnqueue BenchmarkEnqueue/segmentSize:_128KB_maxInMemSegments:18_message:8KB_queueCapacity:10000 27585\t43559 ns/op\t9889 B/op\t9 allocs/op BenchmarkEnqueue/segmentSize:_256KB_maxInMemSegments:10_message:8KB_queueCapacity:10000 39326\t31773 ns/op\t9840 B/op\t9 allocs/op BenchmarkEnqueue/segmentSize:_512KB_maxInMemSegments:6_message:8KB_queueCapacity:10000 56770\t22990 ns/op\t9816 B/op\t9 allocs/op BenchmarkEnqueue/segmentSize:_256KB_maxInMemSegments:20_message:8KB_queueCapacity:10000 43803\t29778 ns/op\t9840 B/op\t9 allocs/op BenchmarkEnqueue/segmentSize:_128KB_maxInMemSegments:10_message:16KB_queueCapacity:10000 16870\t80576 ns/op\t18944 B/op\t10 allocs/op BenchmarkEnqueue/segmentSize:_128KB_maxInMemSegments:10_message:8KB_queueCapacity:100000 36922\t39085 ns/op\t9889 B/op\t9 allocs/op PASS push and pop operation goos: darwin goarch: amd64 pkg: github.com/apache/skywalking-satellite/plugins/queue/mmap BenchmarkEnqueueAndDequeue BenchmarkEnqueueAndDequeue/segmentSize:_128KB_maxInMemSegments:18_message:8KB_queueCapacity:10000 21030\t60728 ns/op\t28774 B/op\t42 allocs/op BenchmarkEnqueueAndDequeue/segmentSize:_256KB_maxInMemSegments:10_message:8KB_queueCapacity:10000 30327\t41274 ns/op\t28726 B/op\t42 allocs/op BenchmarkEnqueueAndDequeue/segmentSize:_512KB_maxInMemSegments:6_message:8KB_queueCapacity:10000 32738\t37923 ns/op\t28700 B/op\t42 allocs/op BenchmarkEnqueueAndDequeue/segmentSize:_256KB_maxInMemSegments:20_message:8KB_queueCapacity:10000 28209\t41169 ns/op\t28726 B/op\t42 allocs/op BenchmarkEnqueueAndDequeue/segmentSize:_128KB_maxInMemSegments:10_message:16KB_queueCapacity:10000 14677\t89637 ns/op\t54981 B/op\t43 allocs/op BenchmarkEnqueueAndDequeue/segmentSize:_128KB_maxInMemSegments:10_message:8KB_queueCapacity:100000 22228\t54963 ns/op\t28774 B/op\t42 allocs/op PASS ","excerpt":"Design The mmap-queue is a big, fast, and persistent queue based on the memory-mapped files. One …","ref":"/docs/skywalking-satellite/next/en/concepts-and-designs/mmap-queue/","title":"Design"},{"body":"Design The mmap-queue is a big, fast, and persistent queue based on the memory-mapped files. One mmap-queue has a directory to store the whole data. The queue directory is made up of many segments and 1 metafile. This is originally implemented by bigqueue project, we changed it a little for fitting the Satellite project requirements.\n Segment: Segment is the real data store center, that provides large-space storage and does not reduce read and write performance as much as possible by using mmap. And we will avoid deleting files by reusing them. Meta: The purpose of meta is to find the data that the consumer needs.  Meta Metadata only needs 80B to store the Metadata for the pipe. But for memory alignment, it takes at least one memory page size, which is generally 4K.\n[ 8Bit ][ 8Bit ][ 8Bit ][ 8Bit ][ 8Bit ][ 8Bit ][ 8Bit ][ 8Bit ][ 8Bit ][ 8Bit ] [metaVersion][ ID ][ offset][ ID ][ offset][ ID ][ offset][ ID ][ offset][capacity] [metaVersion][writing offset][watermark offset][committed offset][reading offset][capacity] Transforming BenchmarkTest Test machine: macbook pro 2018\nModel Name:\tMacBook Pro Model Identifier:\tMacBookPro15,1 Processor Name:\t6-Core Intel Core i7 Processor Speed:\t2.2 GHz Number of Processors:\t1 Total Number of Cores:\t6 L2 Cache (per Core):\t256 KB L3 Cache:\t9 MB Hyper-Threading Technology:\tEnabled Memory:\t16 GB System Firmware Version:\t1554.60.15.0.0 (iBridge: 18.16.13030.0.0,0 push operation goos: darwin goarch: amd64 pkg: github.com/apache/skywalking-satellite/plugins/queue/mmap BenchmarkEnqueue BenchmarkEnqueue/segmentSize:_128KB_maxInMemSegments:18_message:8KB_queueCapacity:10000 27585\t43559 ns/op\t9889 B/op\t9 allocs/op BenchmarkEnqueue/segmentSize:_256KB_maxInMemSegments:10_message:8KB_queueCapacity:10000 39326\t31773 ns/op\t9840 B/op\t9 allocs/op BenchmarkEnqueue/segmentSize:_512KB_maxInMemSegments:6_message:8KB_queueCapacity:10000 56770\t22990 ns/op\t9816 B/op\t9 allocs/op BenchmarkEnqueue/segmentSize:_256KB_maxInMemSegments:20_message:8KB_queueCapacity:10000 43803\t29778 ns/op\t9840 B/op\t9 allocs/op BenchmarkEnqueue/segmentSize:_128KB_maxInMemSegments:10_message:16KB_queueCapacity:10000 16870\t80576 ns/op\t18944 B/op\t10 allocs/op BenchmarkEnqueue/segmentSize:_128KB_maxInMemSegments:10_message:8KB_queueCapacity:100000 36922\t39085 ns/op\t9889 B/op\t9 allocs/op PASS push and pop operation goos: darwin goarch: amd64 pkg: github.com/apache/skywalking-satellite/plugins/queue/mmap BenchmarkEnqueueAndDequeue BenchmarkEnqueueAndDequeue/segmentSize:_128KB_maxInMemSegments:18_message:8KB_queueCapacity:10000 21030\t60728 ns/op\t28774 B/op\t42 allocs/op BenchmarkEnqueueAndDequeue/segmentSize:_256KB_maxInMemSegments:10_message:8KB_queueCapacity:10000 30327\t41274 ns/op\t28726 B/op\t42 allocs/op BenchmarkEnqueueAndDequeue/segmentSize:_512KB_maxInMemSegments:6_message:8KB_queueCapacity:10000 32738\t37923 ns/op\t28700 B/op\t42 allocs/op BenchmarkEnqueueAndDequeue/segmentSize:_256KB_maxInMemSegments:20_message:8KB_queueCapacity:10000 28209\t41169 ns/op\t28726 B/op\t42 allocs/op BenchmarkEnqueueAndDequeue/segmentSize:_128KB_maxInMemSegments:10_message:16KB_queueCapacity:10000 14677\t89637 ns/op\t54981 B/op\t43 allocs/op BenchmarkEnqueueAndDequeue/segmentSize:_128KB_maxInMemSegments:10_message:8KB_queueCapacity:100000 22228\t54963 ns/op\t28774 B/op\t42 allocs/op PASS ","excerpt":"Design The mmap-queue is a big, fast, and persistent queue based on the memory-mapped files. One …","ref":"/docs/skywalking-satellite/v1.2.0/en/concepts-and-designs/mmap-queue/","title":"Design"},{"body":"Design Goals This document outlines the core design goals for the SkyWalking project.\n  Maintaining Observability. Regardless of the deployment method of the target system, SkyWalking provides an integration solution for it to maintain observability. Based on this, SkyWalking provides multiple runtime forms and probes.\n  Topology, Metrics and Trace Together. The first step to understanding a distributed system is the topology map. It visualizes the entire complex system in an easy-to-read layout. Under the topology, the OSS personnel have higher requirements in terms of the metrics for service, instance, endpoint and calls. Traces are in the form of detailed logs to make sense of those metrics. For example, when the endpoint latency becomes long, you want to see the slowest the trace to find out why. So you can see, they are from big picture to details, they are all needed. SkyWalking integrates and provides a lot of features to make this possible and easy understand.\n  Light Weight. There two parts of light weight are needed. (1) In probe, we just depend on network communication framework, prefer gRPC. By that, the probe should be as small as possible, to avoid the library conflicts and the payload of VM, such as permsize requirement in JVM. (2) As an observability platform, it is secondary and third level system in your project environment. So we are using our own light weight framework to build the backend core. Then you don\u0026rsquo;t need to deploy big data tech platform and maintain them. SkyWalking should be simple in tech stack.\n  Pluggable. SkyWalking core team provides many default implementations, but definitely it is not enough, and also don\u0026rsquo;t fit every scenario. So, we provide a lot of features for being pluggable.\n  Portability. SkyWalking can run in multiple environments, including: (1) Use traditional register center like eureka. (2) Use RPC framework including service discovery, like Spring Cloud, Apache Dubbo. (3) Use Service Mesh in modern infrastructure. (4) Use cloud services. (5) Across cloud deployment. SkyWalking should run well in all of these cases.\n  Interoperability. The observability landscape is so vast that it is virtually impossible for SkyWalking to support all systems, even with the support of its community. Currently, it supports interoperability with other OSS systems, especially probes, such as Zipkin, Jaeger, and OpenTelemetry. It is very important to end users that SkyWalking has the ability to accept and read these data formats, since the users are not required to switch their libraries.\n  What is next?  See probe Introduction to learn about SkyWalking\u0026rsquo;s probe groups. From backend overview, you can understand what the backend does after it receives probe data.  ","excerpt":"Design Goals This document outlines the core design goals for the SkyWalking project.\n  Maintaining …","ref":"/docs/main/latest/en/concepts-and-designs/project-goals/","title":"Design Goals"},{"body":"Design Goals This document outlines the core design goals for the SkyWalking project.\n  Maintaining Observability. Regardless of the deployment method of the target system, SkyWalking provides an integration solution for it to maintain observability. Based on this, SkyWalking provides multiple runtime forms and probes.\n  Topology, Metrics and Trace Together. The first step to understanding a distributed system is the topology map. It visualizes the entire complex system in an easy-to-read layout. Under the topology, the OSS personnel have higher requirements in terms of the metrics for service, instance, endpoint and calls. Traces are in the form of detailed logs to make sense of those metrics. For example, when the endpoint latency becomes long, you want to see the slowest the trace to find out why. So you can see, they are from big picture to details, they are all needed. SkyWalking integrates and provides a lot of features to make this possible and easy understand.\n  Light Weight. There two parts of light weight are needed. (1) In probe, we just depend on network communication framework, prefer gRPC. By that, the probe should be as small as possible, to avoid the library conflicts and the payload of VM, such as permsize requirement in JVM. (2) As an observability platform, it is secondary and third level system in your project environment. So we are using our own light weight framework to build the backend core. Then you don\u0026rsquo;t need to deploy big data tech platform and maintain them. SkyWalking should be simple in tech stack.\n  Pluggable. SkyWalking core team provides many default implementations, but definitely it is not enough, and also don\u0026rsquo;t fit every scenario. So, we provide a lot of features for being pluggable.\n  Portability. SkyWalking can run in multiple environments, including: (1) Use traditional register center like eureka. (2) Use RPC framework including service discovery, like Spring Cloud, Apache Dubbo. (3) Use Service Mesh in modern infrastructure. (4) Use cloud services. (5) Across cloud deployment. SkyWalking should run well in all of these cases.\n  Interoperability. The observability landscape is so vast that it is virtually impossible for SkyWalking to support all systems, even with the support of its community. Currently, it supports interoperability with other OSS systems, especially probes, such as Zipkin, Jaeger, and OpenTelemetry. It is very important to end users that SkyWalking has the ability to accept and read these data formats, since the users are not required to switch their libraries.\n  What is next?  See probe Introduction to learn about SkyWalking\u0026rsquo;s probe groups. From backend overview, you can understand what the backend does after it receives probe data.  ","excerpt":"Design Goals This document outlines the core design goals for the SkyWalking project.\n  Maintaining …","ref":"/docs/main/next/en/concepts-and-designs/project-goals/","title":"Design Goals"},{"body":"Design Goals This document outlines the core design goals for the SkyWalking project.\n  Maintaining Observability. Regardless of the deployment method of the target system, SkyWalking provides an integration solution for it to maintain observability. Based on this, SkyWalking provides multiple runtime forms and probes.\n  Topology, Metrics and Trace Together. The first step to understanding a distributed system is the topology map. It visualizes the entire complex system in an easy-to-read layout. Under the topology, the OSS personnel have higher requirements in terms of the metrics for service, instance, endpoint and calls. Traces are in the form of detailed logs to make sense of those metrics. For example, when the endpoint latency becomes long, you want to see the slowest the trace to find out why. So you can see, they are from big picture to details, they are all needed. SkyWalking integrates and provides a lot of features to make this possible and easy understand.\n  Light Weight. There two parts of light weight are needed. (1) In probe, we just depend on network communication framework, prefer gRPC. By that, the probe should be as small as possible, to avoid the library conflicts and the payload of VM, such as permsize requirement in JVM. (2) As an observability platform, it is secondary and third level system in your project environment. So we are using our own light weight framework to build the backend core. Then you don\u0026rsquo;t need to deploy big data tech platform and maintain them. SkyWalking should be simple in tech stack.\n  Pluggable. SkyWalking core team provides many default implementations, but definitely it is not enough, and also don\u0026rsquo;t fit every scenario. So, we provide a lot of features for being pluggable.\n  Portability. SkyWalking can run in multiple environments, including: (1) Use traditional register center like eureka. (2) Use RPC framework including service discovery, like Spring Cloud, Apache Dubbo. (3) Use Service Mesh in modern infrastructure. (4) Use cloud services. (5) Across cloud deployment. SkyWalking should run well in all of these cases.\n  Interoperability. The observability landscape is so vast that it is virtually impossible for SkyWalking to support all systems, even with the support of its community. Currently, it supports interoperability with other OSS systems, especially probes, such as Zipkin, Jaeger, OpenTracing, and OpenCensus. It is very important to end users that SkyWalking has the ability to accept and read these data formats, since the users are not required to switch their libraries.\n  What is next?  See probe Introduction to learn about SkyWalking\u0026rsquo;s probe groups. From backend overview, you can understand what the backend does after it receives probe data.  ","excerpt":"Design Goals This document outlines the core design goals for the SkyWalking project.\n  Maintaining …","ref":"/docs/main/v9.0.0/en/concepts-and-designs/project-goals/","title":"Design Goals"},{"body":"Design Goals This document outlines the core design goals for the SkyWalking project.\n  Maintaining Observability. Regardless of the deployment method of the target system, SkyWalking provides an integration solution for it to maintain observability. Based on this, SkyWalking provides multiple runtime forms and probes.\n  Topology, Metrics and Trace Together. The first step to understanding a distributed system is the topology map. It visualizes the entire complex system in an easy-to-read layout. Under the topology, the OSS personnel have higher requirements in terms of the metrics for service, instance, endpoint and calls. Traces are in the form of detailed logs to make sense of those metrics. For example, when the endpoint latency becomes long, you want to see the slowest the trace to find out why. So you can see, they are from big picture to details, they are all needed. SkyWalking integrates and provides a lot of features to make this possible and easy understand.\n  Light Weight. There two parts of light weight are needed. (1) In probe, we just depend on network communication framework, prefer gRPC. By that, the probe should be as small as possible, to avoid the library conflicts and the payload of VM, such as permsize requirement in JVM. (2) As an observability platform, it is secondary and third level system in your project environment. So we are using our own light weight framework to build the backend core. Then you don\u0026rsquo;t need to deploy big data tech platform and maintain them. SkyWalking should be simple in tech stack.\n  Pluggable. SkyWalking core team provides many default implementations, but definitely it is not enough, and also don\u0026rsquo;t fit every scenario. So, we provide a lot of features for being pluggable.\n  Portability. SkyWalking can run in multiple environments, including: (1) Use traditional register center like eureka. (2) Use RPC framework including service discovery, like Spring Cloud, Apache Dubbo. (3) Use Service Mesh in modern infrastructure. (4) Use cloud services. (5) Across cloud deployment. SkyWalking should run well in all of these cases.\n  Interoperability. The observability landscape is so vast that it is virtually impossible for SkyWalking to support all systems, even with the support of its community. Currently, it supports interoperability with other OSS systems, especially probes, such as Zipkin, Jaeger, OpenTracing, and OpenCensus. It is very important to end users that SkyWalking has the ability to accept and read these data formats, since the users are not required to switch their libraries.\n  What is next?  See probe Introduction to learn about SkyWalking\u0026rsquo;s probe groups. From backend overview, you can understand what the backend does after it receives probe data.  ","excerpt":"Design Goals This document outlines the core design goals for the SkyWalking project.\n  Maintaining …","ref":"/docs/main/v9.1.0/en/concepts-and-designs/project-goals/","title":"Design Goals"},{"body":"Design Goals This document outlines the core design goals for the SkyWalking project.\n  Maintaining Observability. Regardless of the deployment method of the target system, SkyWalking provides an integration solution for it to maintain observability. Based on this, SkyWalking provides multiple runtime forms and probes.\n  Topology, Metrics and Trace Together. The first step to understanding a distributed system is the topology map. It visualizes the entire complex system in an easy-to-read layout. Under the topology, the OSS personnel have higher requirements in terms of the metrics for service, instance, endpoint and calls. Traces are in the form of detailed logs to make sense of those metrics. For example, when the endpoint latency becomes long, you want to see the slowest the trace to find out why. So you can see, they are from big picture to details, they are all needed. SkyWalking integrates and provides a lot of features to make this possible and easy understand.\n  Light Weight. There two parts of light weight are needed. (1) In probe, we just depend on network communication framework, prefer gRPC. By that, the probe should be as small as possible, to avoid the library conflicts and the payload of VM, such as permsize requirement in JVM. (2) As an observability platform, it is secondary and third level system in your project environment. So we are using our own light weight framework to build the backend core. Then you don\u0026rsquo;t need to deploy big data tech platform and maintain them. SkyWalking should be simple in tech stack.\n  Pluggable. SkyWalking core team provides many default implementations, but definitely it is not enough, and also don\u0026rsquo;t fit every scenario. So, we provide a lot of features for being pluggable.\n  Portability. SkyWalking can run in multiple environments, including: (1) Use traditional register center like eureka. (2) Use RPC framework including service discovery, like Spring Cloud, Apache Dubbo. (3) Use Service Mesh in modern infrastructure. (4) Use cloud services. (5) Across cloud deployment. SkyWalking should run well in all of these cases.\n  Interoperability. The observability landscape is so vast that it is virtually impossible for SkyWalking to support all systems, even with the support of its community. Currently, it supports interoperability with other OSS systems, especially probes, such as Zipkin, Jaeger, OpenTracing, and OpenCensus. It is very important to end users that SkyWalking has the ability to accept and read these data formats, since the users are not required to switch their libraries.\n  What is next?  See probe Introduction to learn about SkyWalking\u0026rsquo;s probe groups. From backend overview, you can understand what the backend does after it receives probe data.  ","excerpt":"Design Goals This document outlines the core design goals for the SkyWalking project.\n  Maintaining …","ref":"/docs/main/v9.2.0/en/concepts-and-designs/project-goals/","title":"Design Goals"},{"body":"Design Goals This document outlines the core design goals for the SkyWalking project.\n  Maintaining Observability. Regardless of the deployment method of the target system, SkyWalking provides an integration solution for it to maintain observability. Based on this, SkyWalking provides multiple runtime forms and probes.\n  Topology, Metrics and Trace Together. The first step to understanding a distributed system is the topology map. It visualizes the entire complex system in an easy-to-read layout. Under the topology, the OSS personnel have higher requirements in terms of the metrics for service, instance, endpoint and calls. Traces are in the form of detailed logs to make sense of those metrics. For example, when the endpoint latency becomes long, you want to see the slowest the trace to find out why. So you can see, they are from big picture to details, they are all needed. SkyWalking integrates and provides a lot of features to make this possible and easy understand.\n  Light Weight. There two parts of light weight are needed. (1) In probe, we just depend on network communication framework, prefer gRPC. By that, the probe should be as small as possible, to avoid the library conflicts and the payload of VM, such as permsize requirement in JVM. (2) As an observability platform, it is secondary and third level system in your project environment. So we are using our own light weight framework to build the backend core. Then you don\u0026rsquo;t need to deploy big data tech platform and maintain them. SkyWalking should be simple in tech stack.\n  Pluggable. SkyWalking core team provides many default implementations, but definitely it is not enough, and also don\u0026rsquo;t fit every scenario. So, we provide a lot of features for being pluggable.\n  Portability. SkyWalking can run in multiple environments, including: (1) Use traditional register center like eureka. (2) Use RPC framework including service discovery, like Spring Cloud, Apache Dubbo. (3) Use Service Mesh in modern infrastructure. (4) Use cloud services. (5) Across cloud deployment. SkyWalking should run well in all of these cases.\n  Interoperability. The observability landscape is so vast that it is virtually impossible for SkyWalking to support all systems, even with the support of its community. Currently, it supports interoperability with other OSS systems, especially probes, such as Zipkin, Jaeger, OpenTracing, and OpenCensus. It is very important to end users that SkyWalking has the ability to accept and read these data formats, since the users are not required to switch their libraries.\n  What is next?  See probe Introduction to learn about SkyWalking\u0026rsquo;s probe groups. From backend overview, you can understand what the backend does after it receives probe data.  ","excerpt":"Design Goals This document outlines the core design goals for the SkyWalking project.\n  Maintaining …","ref":"/docs/main/v9.3.0/en/concepts-and-designs/project-goals/","title":"Design Goals"},{"body":"Design Goals This document outlines the core design goals for the SkyWalking project.\n  Maintaining Observability. Regardless of the deployment method of the target system, SkyWalking provides an integration solution for it to maintain observability. Based on this, SkyWalking provides multiple runtime forms and probes.\n  Topology, Metrics and Trace Together. The first step to understanding a distributed system is the topology map. It visualizes the entire complex system in an easy-to-read layout. Under the topology, the OSS personnel have higher requirements in terms of the metrics for service, instance, endpoint and calls. Traces are in the form of detailed logs to make sense of those metrics. For example, when the endpoint latency becomes long, you want to see the slowest the trace to find out why. So you can see, they are from big picture to details, they are all needed. SkyWalking integrates and provides a lot of features to make this possible and easy understand.\n  Light Weight. There two parts of light weight are needed. (1) In probe, we just depend on network communication framework, prefer gRPC. By that, the probe should be as small as possible, to avoid the library conflicts and the payload of VM, such as permsize requirement in JVM. (2) As an observability platform, it is secondary and third level system in your project environment. So we are using our own light weight framework to build the backend core. Then you don\u0026rsquo;t need to deploy big data tech platform and maintain them. SkyWalking should be simple in tech stack.\n  Pluggable. SkyWalking core team provides many default implementations, but definitely it is not enough, and also don\u0026rsquo;t fit every scenario. So, we provide a lot of features for being pluggable.\n  Portability. SkyWalking can run in multiple environments, including: (1) Use traditional register center like eureka. (2) Use RPC framework including service discovery, like Spring Cloud, Apache Dubbo. (3) Use Service Mesh in modern infrastructure. (4) Use cloud services. (5) Across cloud deployment. SkyWalking should run well in all of these cases.\n  Interoperability. The observability landscape is so vast that it is virtually impossible for SkyWalking to support all systems, even with the support of its community. Currently, it supports interoperability with other OSS systems, especially probes, such as Zipkin, Jaeger, OpenTracing, and OpenCensus. It is very important to end users that SkyWalking has the ability to accept and read these data formats, since the users are not required to switch their libraries.\n  What is next?  See probe Introduction to learn about SkyWalking\u0026rsquo;s probe groups. From backend overview, you can understand what the backend does after it receives probe data.  ","excerpt":"Design Goals This document outlines the core design goals for the SkyWalking project.\n  Maintaining …","ref":"/docs/main/v9.4.0/en/concepts-and-designs/project-goals/","title":"Design Goals"},{"body":"Design Goals This document outlines the core design goals for the SkyWalking project.\n  Maintaining Observability. Regardless of the deployment method of the target system, SkyWalking provides an integration solution for it to maintain observability. Based on this, SkyWalking provides multiple runtime forms and probes.\n  Topology, Metrics and Trace Together. The first step to understanding a distributed system is the topology map. It visualizes the entire complex system in an easy-to-read layout. Under the topology, the OSS personnel have higher requirements in terms of the metrics for service, instance, endpoint and calls. Traces are in the form of detailed logs to make sense of those metrics. For example, when the endpoint latency becomes long, you want to see the slowest the trace to find out why. So you can see, they are from big picture to details, they are all needed. SkyWalking integrates and provides a lot of features to make this possible and easy understand.\n  Light Weight. There two parts of light weight are needed. (1) In probe, we just depend on network communication framework, prefer gRPC. By that, the probe should be as small as possible, to avoid the library conflicts and the payload of VM, such as permsize requirement in JVM. (2) As an observability platform, it is secondary and third level system in your project environment. So we are using our own light weight framework to build the backend core. Then you don\u0026rsquo;t need to deploy big data tech platform and maintain them. SkyWalking should be simple in tech stack.\n  Pluggable. SkyWalking core team provides many default implementations, but definitely it is not enough, and also don\u0026rsquo;t fit every scenario. So, we provide a lot of features for being pluggable.\n  Portability. SkyWalking can run in multiple environments, including: (1) Use traditional register center like eureka. (2) Use RPC framework including service discovery, like Spring Cloud, Apache Dubbo. (3) Use Service Mesh in modern infrastructure. (4) Use cloud services. (5) Across cloud deployment. SkyWalking should run well in all of these cases.\n  Interoperability. The observability landscape is so vast that it is virtually impossible for SkyWalking to support all systems, even with the support of its community. Currently, it supports interoperability with other OSS systems, especially probes, such as Zipkin, Jaeger, and OpenTelemetry. It is very important to end users that SkyWalking has the ability to accept and read these data formats, since the users are not required to switch their libraries.\n  What is next?  See probe Introduction to learn about SkyWalking\u0026rsquo;s probe groups. From backend overview, you can understand what the backend does after it receives probe data.  ","excerpt":"Design Goals This document outlines the core design goals for the SkyWalking project.\n  Maintaining …","ref":"/docs/main/v9.5.0/en/concepts-and-designs/project-goals/","title":"Design Goals"},{"body":"Design Goals This document outlines the core design goals for the SkyWalking project.\n  Maintaining Observability. Regardless of the deployment method of the target system, SkyWalking provides an integration solution for it to maintain observability. Based on this, SkyWalking provides multiple runtime forms and probes.\n  Topology, Metrics and Trace Together. The first step to understanding a distributed system is the topology map. It visualizes the entire complex system in an easy-to-read layout. Under the topology, the OSS personnel have higher requirements in terms of the metrics for service, instance, endpoint and calls. Traces are in the form of detailed logs to make sense of those metrics. For example, when the endpoint latency becomes long, you want to see the slowest the trace to find out why. So you can see, they are from big picture to details, they are all needed. SkyWalking integrates and provides a lot of features to make this possible and easy understand.\n  Light Weight. There two parts of light weight are needed. (1) In probe, we just depend on network communication framework, prefer gRPC. By that, the probe should be as small as possible, to avoid the library conflicts and the payload of VM, such as permsize requirement in JVM. (2) As an observability platform, it is secondary and third level system in your project environment. So we are using our own light weight framework to build the backend core. Then you don\u0026rsquo;t need to deploy big data tech platform and maintain them. SkyWalking should be simple in tech stack.\n  Pluggable. SkyWalking core team provides many default implementations, but definitely it is not enough, and also don\u0026rsquo;t fit every scenario. So, we provide a lot of features for being pluggable.\n  Portability. SkyWalking can run in multiple environments, including: (1) Use traditional register center like eureka. (2) Use RPC framework including service discovery, like Spring Cloud, Apache Dubbo. (3) Use Service Mesh in modern infrastructure. (4) Use cloud services. (5) Across cloud deployment. SkyWalking should run well in all of these cases.\n  Interoperability. The observability landscape is so vast that it is virtually impossible for SkyWalking to support all systems, even with the support of its community. Currently, it supports interoperability with other OSS systems, especially probes, such as Zipkin, Jaeger, and OpenTelemetry. It is very important to end users that SkyWalking has the ability to accept and read these data formats, since the users are not required to switch their libraries.\n  What is next?  See probe Introduction to learn about SkyWalking\u0026rsquo;s probe groups. From backend overview, you can understand what the backend does after it receives probe data.  ","excerpt":"Design Goals This document outlines the core design goals for the SkyWalking project.\n  Maintaining …","ref":"/docs/main/v9.6.0/en/concepts-and-designs/project-goals/","title":"Design Goals"},{"body":"Design Goals This document outlines the core design goals for the SkyWalking project.\n  Maintaining Observability. Regardless of the deployment method of the target system, SkyWalking provides an integration solution for it to maintain observability. Based on this, SkyWalking provides multiple runtime forms and probes.\n  Topology, Metrics and Trace Together. The first step to understanding a distributed system is the topology map. It visualizes the entire complex system in an easy-to-read layout. Under the topology, the OSS personnel have higher requirements in terms of the metrics for service, instance, endpoint and calls. Traces are in the form of detailed logs to make sense of those metrics. For example, when the endpoint latency becomes long, you want to see the slowest the trace to find out why. So you can see, they are from big picture to details, they are all needed. SkyWalking integrates and provides a lot of features to make this possible and easy understand.\n  Light Weight. There two parts of light weight are needed. (1) In probe, we just depend on network communication framework, prefer gRPC. By that, the probe should be as small as possible, to avoid the library conflicts and the payload of VM, such as permsize requirement in JVM. (2) As an observability platform, it is secondary and third level system in your project environment. So we are using our own light weight framework to build the backend core. Then you don\u0026rsquo;t need to deploy big data tech platform and maintain them. SkyWalking should be simple in tech stack.\n  Pluggable. SkyWalking core team provides many default implementations, but definitely it is not enough, and also don\u0026rsquo;t fit every scenario. So, we provide a lot of features for being pluggable.\n  Portability. SkyWalking can run in multiple environments, including: (1) Use traditional register center like eureka. (2) Use RPC framework including service discovery, like Spring Cloud, Apache Dubbo. (3) Use Service Mesh in modern infrastructure. (4) Use cloud services. (5) Across cloud deployment. SkyWalking should run well in all of these cases.\n  Interoperability. The observability landscape is so vast that it is virtually impossible for SkyWalking to support all systems, even with the support of its community. Currently, it supports interoperability with other OSS systems, especially probes, such as Zipkin, Jaeger, and OpenTelemetry. It is very important to end users that SkyWalking has the ability to accept and read these data formats, since the users are not required to switch their libraries.\n  What is next?  See probe Introduction to learn about SkyWalking\u0026rsquo;s probe groups. From backend overview, you can understand what the backend does after it receives probe data.  ","excerpt":"Design Goals This document outlines the core design goals for the SkyWalking project.\n  Maintaining …","ref":"/docs/main/v9.7.0/en/concepts-and-designs/project-goals/","title":"Design Goals"},{"body":"Design Goals The document outlines the core design goals for the SkyWalking Infra E2E project.\n Support various E2E testing requirements in SkyWalking main repository with other ecosystem repositories. Support both docker-compose and KinD to orchestrate the tested services under different environments. Be language-independent as much as possible, users only need to configure YAMLs and run commands, without writing code.  Non-Goal  This framework is not involved with the build process, i.e. it won’t do something like mvn package or docker build, the artifacts (.tar, docker images) should be ready in an earlier process before this; This project doesn’t take the plugin tests into account, at least for now;  ","excerpt":"Design Goals The document outlines the core design goals for the SkyWalking Infra E2E project. …","ref":"/docs/skywalking-infra-e2e/latest/en/concepts-and-designs/project-goals/","title":"Design Goals"},{"body":"Design Goals The document outlines the core design goals for the SkyWalking Infra E2E project.\n Support various E2E testing requirements in SkyWalking main repository with other ecosystem repositories. Support both docker-compose and KinD to orchestrate the tested services under different environments. Be language-independent as much as possible, users only need to configure YAMLs and run commands, without writing code.  Non-Goal  This framework is not involved with the build process, i.e. it won’t do something like mvn package or docker build, the artifacts (.tar, docker images) should be ready in an earlier process before this; This project doesn’t take the plugin tests into account, at least for now;  ","excerpt":"Design Goals The document outlines the core design goals for the SkyWalking Infra E2E project. …","ref":"/docs/skywalking-infra-e2e/next/en/concepts-and-designs/project-goals/","title":"Design Goals"},{"body":"Design Goals The document outlines the core design goals for the SkyWalking Infra E2E project.\n Support various E2E testing requirements in SkyWalking main repository with other ecosystem repositories. Support both docker-compose and KinD to orchestrate the tested services under different environments. Be language-independent as much as possible, users only need to configure YAMLs and run commands, without writing code.  Non-Goal  This framework is not involved with the build process, i.e. it won’t do something like mvn package or docker build, the artifacts (.tar, docker images) should be ready in an earlier process before this; This project doesn’t take the plugin tests into account, at least for now;  ","excerpt":"Design Goals The document outlines the core design goals for the SkyWalking Infra E2E project. …","ref":"/docs/skywalking-infra-e2e/v1.3.0/en/concepts-and-designs/project-goals/","title":"Design Goals"},{"body":"Design Goals The document outlines the core design goals for SkyWalking Satellite project.\n  Light Weight. SkyWalking Satellite has a limited cost for resources and high-performance because of the requirements of the sidecar deployment model.\n  Pluggability. SkyWalking Satellite core team provides many default implementations, but definitely it is not enough, and also don\u0026rsquo;t fit every scenario. So, we provide a lot of features for being pluggable.\n  Portability. SkyWalking Satellite can run in multiple environments, including:\n Use traditional deployment as a daemon process to collect data. Use cloud services as a sidecar, such as in the Kubernetes platform.    Interoperability. Observability is a big landscape, SkyWalking is impossible to support all, even by its community. So SkyWalking Satellite is compatible with many protocols, including:\n SkyWalking protocol (WIP) Prometheus protocol.    ","excerpt":"Design Goals The document outlines the core design goals for SkyWalking Satellite project.\n  Light …","ref":"/docs/skywalking-satellite/latest/en/concepts-and-designs/project-goals/","title":"Design Goals"},{"body":"Design Goals The document outlines the core design goals for SkyWalking Satellite project.\n  Light Weight. SkyWalking Satellite has a limited cost for resources and high-performance because of the requirements of the sidecar deployment model.\n  Pluggability. SkyWalking Satellite core team provides many default implementations, but definitely it is not enough, and also don\u0026rsquo;t fit every scenario. So, we provide a lot of features for being pluggable.\n  Portability. SkyWalking Satellite can run in multiple environments, including:\n Use traditional deployment as a daemon process to collect data. Use cloud services as a sidecar, such as in the Kubernetes platform.    Interoperability. Observability is a big landscape, SkyWalking is impossible to support all, even by its community. So SkyWalking Satellite is compatible with many protocols, including:\n SkyWalking protocol (WIP) Prometheus protocol.    ","excerpt":"Design Goals The document outlines the core design goals for SkyWalking Satellite project.\n  Light …","ref":"/docs/skywalking-satellite/next/en/concepts-and-designs/project-goals/","title":"Design Goals"},{"body":"Design Goals The document outlines the core design goals for SkyWalking Satellite project.\n  Light Weight. SkyWalking Satellite has a limited cost for resources and high-performance because of the requirements of the sidecar deployment model.\n  Pluggability. SkyWalking Satellite core team provides many default implementations, but definitely it is not enough, and also don\u0026rsquo;t fit every scenario. So, we provide a lot of features for being pluggable.\n  Portability. SkyWalking Satellite can run in multiple environments, including:\n Use traditional deployment as a daemon process to collect data. Use cloud services as a sidecar, such as in the Kubernetes platform.    Interoperability. Observability is a big landscape, SkyWalking is impossible to support all, even by its community. So SkyWalking Satellite is compatible with many protocols, including:\n SkyWalking protocol (WIP) Prometheus protocol.    ","excerpt":"Design Goals The document outlines the core design goals for SkyWalking Satellite project.\n  Light …","ref":"/docs/skywalking-satellite/v1.2.0/en/concepts-and-designs/project-goals/","title":"Design Goals"},{"body":"Diagnose Service Mesh Network Performance with eBPF Background This article will show how to use Apache SkyWalking with eBPF to make network troubleshooting easier in a service mesh environment.\nApache SkyWalking is an application performance monitor tool for distributed systems. It observes metrics, logs, traces, and events in the service mesh environment and uses that data to generate a dependency graph of your pods and services. This dependency graph can provide quick insights into your system, especially when there\u0026rsquo;s an issue.\nHowever, when troubleshooting network issues in SkyWalking\u0026rsquo;s service topology, it is not always easy to pinpoint where the error actually is. There are two reasons for the difficulty:\n Traffic through the Envoy sidecar is not easy to observe. Data from Envoy\u0026rsquo;s Access Log Service (ALS) shows traffic between services (sidecar-to-sidecar), but not metrics on communication between the Envoy sidecar and the service it proxies. Without that information, it is more difficult to understand the impact of the sidecar. There is a lack of data from transport layer (OSI Layer 4) communication. Since services generally use application layer (OSI Layer 7) protocols such as HTTP, observability data is generally restricted to application layer communication. However, the root cause may actually be in the transport layer, which is typically opaque to observability tools.  Access to metrics from Envoy-to-service and transport layer communication can make it easier to diagnose service issues. To this end, SkyWalking needs to collect and analyze transport layer metrics between processes inside Kubernetes pods - a task well suited to eBPF. We investigated using eBPF for this purpose and present our results and a demo below.\nMonitoring Kubernetes Networks with eBPF With its origins as the Extended Berkeley Packet Filter, eBPF is a general purpose mechanism for injecting and running your own code into the Linux kernel and is an excellent tool for monitoring network traffic in Kubernetes Pods. In the next few sections, we'll provide an overview of how to use eBPF for network monitoring as background for introducing Skywalking Rover, a metrics collector and profiler powered by eBPF to diagnose CPU and network performance.\nHow Applications and the Network Interact Interactions between the application and the network can generally be divided into the following steps from higher to lower levels of abstraction:\n User Code: Application code uses high-level network libraries in the application stack to exchange data across the network, like sending and receiving HTTP requests. Network Library: When the network library receives a network request, it interacts with the language API to send the network data. Language API: Each language provides an API for operating the network, system, etc. When a request is received, it interacts with the system API. In Linux, this API is called syscalls. Linux API: When the Linux kernel receives the request through the API, it communicates with the socket to send the data, which is usually closer to an OSI Layer 4 protocol, such as TCP, UDP, etc. Socket Ops: Sending or receiving the data to/from the NIC.  Our hypothesis is that eBPF can monitor the network. There are two ways to implement the interception: User space (uprobe) or Kernel space (kprobe). The table below summarizes the differences.\n    Pros Cons     uprobe •\tGet more application-related contexts, such as whether the current request is HTTP or HTTPS.•\tRequests and responses can be intercepted by a single method •\tData structures can be unstable, so it is more difficult to get the desired data.  •\tImplementation may differ between language/library versions.  •\tDoes not work in applications without symbol tables.   kprobe •\tAvailable for all languages.  •\tThe data structure and methods are stable and do not require much adaptation.  •\tEasier correlation with underlying data, such as getting the destination address of TCP, OSI Layer 4 protocol metrics, etc. •\tA single request and response may be split into multiple probes.  •\tContextual information is not easy to get for stateful requests. For example header compression in HTTP/2.    For the general network performance monitor, we chose to use the kprobe (intercept the syscalls) for the following reasons:\n It\u0026rsquo;s available for applications written in any programming language, and it\u0026rsquo;s stable, so it saves a lot of development/adaptation costs. It can be correlated with metrics from the system level, which makes it easier to troubleshoot. As a single request and response are split into multiple probes, we can use technology to correlate them. For contextual information, It\u0026rsquo;s usually used in OSI Layer 7 protocol network analysis. So, if we just monitor the network performance, then they can be ignored.  Kprobes and network monitoring Following the network syscalls of Linux documentation, we can implement network monitoring by intercepting two types of methods: socket operations and send/receive methods.\nSocket Operations When accepting or connecting with another socket, we can get the following information:\n Connection information: Includes the remote address from the connection which helps us to understand which pod is connected. Connection statics: Includes basic metrics from sockets, such as round-trip time (RTT), lost packet count in TCP, etc. Socket and file descriptor (FD) mapping: Includes the relationship between the Linux file descriptor and socket object. It is useful when sending and receiving data through a Linux file descriptor.  Send/Receive The interface related to sending or receiving data is the focus of performance analysis. It mainly contains the following parameters:\n Socket file descriptor: The file descriptor of the current operation corresponding to the socket. Buffer: The data sent or received, passed as a byte array.  Based on the above parameters, we can analyze the following data:\n Bytes: The size of the packet in bytes. Protocol: The protocol analysis according to the buffer data, such as HTTP, MySQL, etc. Execution Time: The time it takes to send/receive the data.  At this point (Figure 1) we can analyze the following steps for the whole lifecycle of the connection:\n Connect/Accept: When the connection is created. Transform: Sending and receiving data on the connection. Close: When the connection is closed.  Figure 1\nProtocol and TLS The previous section described how to analyze connections using send or receive buffer data. For example, following the HTTP/1.1 message specification to analyze the connection. However, this does not work for TLS requests/responses.\nFigure 2\nWhen TLS is in use, the Linux Kernel transmits data encrypted in user space. In the figure above, The application usually transmits SSL data through a third-party library (such as OpenSSL). For this case, the Linux API can only get the encrypted data, so it cannot recognize any higher layer protocol. To decrypt inside eBPF, we need to follow these steps:\n Read unencrypted data through uprobe: Compatible multiple languages, using uprobe to capture the data that is not encrypted before sending or after receiving. In this way, we can get the original data and associate it with the socket. Associate with socket: We can associate unencrypted data with the socket.  OpenSSL Use case For example, the most common way to send/receive SSL data is to use OpenSSL as a shared library, specifically the SSL_read and SSL_write methods to submit the buffer data with the socket.\nFollowing the documentation, we can intercept these two methods, which are almost identical to the API in Linux. The source code of the SSL structure in OpenSSL shows that the Socket FD exists in the BIO object of the SSL structure, and we can get it by the offset.\nIn summary, with knowledge of how OpenSSL works, we can read unencrypted data in an eBPF function.\nIntroducing SkyWalking Rover, an eBPF-based Metrics Collector and Profiler SkyWalking Rover introduces the eBPF network profiling feature into the SkyWalking ecosystem. It\u0026rsquo;s currently supported in a Kubernetes environment, so must be deployed inside a Kubernetes cluster. Once the deployment is complete, SkyWalking Rover can monitor the network for all processes inside a given Pod. Based on the monitoring data, SkyWalking can generate the topology relationship diagram and metrics between processes.\nTopology Diagram The topology diagram can help us understand the network access between processes inside the same Pod, and between the process and external environment (other Pod or service). Additionally, it can identify the data direction of traffic based on the line flow direction.\nIn Figure 3 below, all nodes within the hexagon are the internal process of a Pod, and nodes outside the hexagon are externally associated services or Pods. Nodes are connected by lines, which indicate the direction of requests or responses between nodes (client or server). The protocol is indicated on the line, and it\u0026rsquo;s either HTTP(S), TCP, or TCP(TLS). Also, we can see in this figure that the line between Envoy and Python applications is bidirectional because Envoy intercepts all application traffic.\nFigure 3\nMetrics Once we recognize the network call relationship between processes through the topology, we can select a specific line and view the TCP metrics between the two processes.\nThe diagram below (Figure 4) shows the metrics of network monitoring between two processes. There are four metrics in each line. Two on the left side are on the client side, and two on the right side are on the server side. If the remote process is not in the same Pod, only one side of the metrics is displayed.\nFigure 4\nThe following two metric types are available:\n Counter: Records the total number of data in a certain period. Each counter contains the following data: a. Count: Execution count. b. Bytes: Packet size in bytes. c. Execution time: Execution duration. Histogram: Records the distribution of data in the buckets.  Based on the above data types, the following metrics are exposed:\n   Name Type Unit Description     Write Counter and histogram Millisecond The socket write counter.   Read Counter and histogram Millisecond The socket read counter.   Write RTT Counter and histogram Microsecond The socket write round trip time (RTT) counter.   Connect Counter and histogram Millisecond The socket connect/accept with another server/client counter.   Close Counter and histogram Millisecond The socket with other socket counter.   Retransmit Counter Millisecond The socket retransmit package counter.   Drop Counter Millisecond The socket drop package counter.    Demo In this section, we demonstrate how to perform network profiling in the service mesh. To follow along, you will need a running Kubernetes environment.\nNOTE: All commands and scripts are available in this GitHub repository.\nInstall Istio Istio is the most widely deployed service mesh, and comes with a complete demo application that we can use for testing. To install Istio and the demo application, follow these steps:\n Install Istio using the demo configuration profile. Label the default namespace, so Istio automatically injects Envoy sidecar proxies when we\u0026rsquo;ll deploy the application. Deploy the bookinfo application to the cluster. Deploy the traffic generator to generate some traffic to the application.  export ISTIO_VERSION=1.13.1 # install istio istioctl install -y --set profile=demo kubectl label namespace default istio-injection=enabled # deploy the bookinfo applications kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/platform/kube/bookinfo.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/bookinfo-gateway.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/destination-rule-all.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/virtual-service-all-v1.yaml # generate traffic kubectl apply -f https://raw.githubusercontent.com/mrproliu/skywalking-network-profiling-demo/main/resources/traffic-generator.yaml Install SkyWalking The following will install the storage, backend, and UI needed for SkyWalking:\ngit clone https://github.com/apache/skywalking-helm.git cd skywalking-helm cd chart helm dep up skywalking helm -n istio-system install skywalking skywalking \\  --set fullnameOverride=skywalking \\  --set elasticsearch.minimumMasterNodes=1 \\  --set elasticsearch.imageTag=7.5.1 \\  --set oap.replicas=1 \\  --set ui.image.repository=apache/skywalking-ui \\  --set ui.image.tag=9.2.0 \\  --set oap.image.tag=9.2.0 \\  --set oap.envoy.als.enabled=true \\  --set oap.image.repository=apache/skywalking-oap-server \\  --set oap.storageType=elasticsearch \\  --set oap.env.SW_METER_ANALYZER_ACTIVE_FILES=\u0026#39;network-profiling\u0026#39; Install SkyWalking Rover SkyWalking Rover is deployed on every node in Kubernetes, and it automatically detects the services in the Kubernetes cluster. The network profiling feature has been released in the version 0.3.0 of SkyWalking Rover. When a network monitoring task is created, the SkyWalking rover sends the data to the SkyWalking backend.\nkubectl apply -f https://raw.githubusercontent.com/mrproliu/skywalking-network-profiling-demo/main/resources/skywalking-rover.yaml Start the Network Profiling Task Once all deployments are completed, we must create a network profiling task for a specific instance of the service in the SkyWalking UI.\nTo open SkyWalking UI, run:\nkubectl port-forward svc/skywalking-ui 8080:80 --namespace istio-system Currently, we can select the specific instances that we wish to monitor by clicking the Data Plane item in the Service Mesh panel and the Service item in the Kubernetes panel.\nIn the figure below, we have selected an instance with a list of tasks in the network profiling tab. When we click the start button, the SkyWalking Rover starts monitoring this instance\u0026rsquo;s network.\nFigure 5\nDone! After a few seconds, you will see the process topology appear on the right side of the page.\nFigure 6\nWhen you click on the line between processes, you can see the TCP metrics between the two processes.\nFigure 7\nConclusion In this article, we detailed a problem that makes troubleshooting service mesh architectures difficult: lack of context between layers in the network stack. These are the cases when eBPF begins to really help with debugging/productivity when existing service mesh/envoy cannot. Then, we researched how eBPF could be applied to common communication, such as TLS. Finally, we demo the implementation of this process with SkyWalking Rover.\nFor now, we have completed the performance analysis for OSI layer 4 (mostly TCP). In the future, we will also introduce the analysis for OSI layer 7 protocols like HTTP.\n","excerpt":"Diagnose Service Mesh Network Performance with eBPF Background This article will show how to use …","ref":"/docs/main/latest/en/academy/diagnose-service-mesh-network-performance-with-ebpf/","title":"Diagnose Service Mesh Network Performance with eBPF"},{"body":"Diagnose Service Mesh Network Performance with eBPF Background This article will show how to use Apache SkyWalking with eBPF to make network troubleshooting easier in a service mesh environment.\nApache SkyWalking is an application performance monitor tool for distributed systems. It observes metrics, logs, traces, and events in the service mesh environment and uses that data to generate a dependency graph of your pods and services. This dependency graph can provide quick insights into your system, especially when there\u0026rsquo;s an issue.\nHowever, when troubleshooting network issues in SkyWalking\u0026rsquo;s service topology, it is not always easy to pinpoint where the error actually is. There are two reasons for the difficulty:\n Traffic through the Envoy sidecar is not easy to observe. Data from Envoy\u0026rsquo;s Access Log Service (ALS) shows traffic between services (sidecar-to-sidecar), but not metrics on communication between the Envoy sidecar and the service it proxies. Without that information, it is more difficult to understand the impact of the sidecar. There is a lack of data from transport layer (OSI Layer 4) communication. Since services generally use application layer (OSI Layer 7) protocols such as HTTP, observability data is generally restricted to application layer communication. However, the root cause may actually be in the transport layer, which is typically opaque to observability tools.  Access to metrics from Envoy-to-service and transport layer communication can make it easier to diagnose service issues. To this end, SkyWalking needs to collect and analyze transport layer metrics between processes inside Kubernetes pods - a task well suited to eBPF. We investigated using eBPF for this purpose and present our results and a demo below.\nMonitoring Kubernetes Networks with eBPF With its origins as the Extended Berkeley Packet Filter, eBPF is a general purpose mechanism for injecting and running your own code into the Linux kernel and is an excellent tool for monitoring network traffic in Kubernetes Pods. In the next few sections, we'll provide an overview of how to use eBPF for network monitoring as background for introducing Skywalking Rover, a metrics collector and profiler powered by eBPF to diagnose CPU and network performance.\nHow Applications and the Network Interact Interactions between the application and the network can generally be divided into the following steps from higher to lower levels of abstraction:\n User Code: Application code uses high-level network libraries in the application stack to exchange data across the network, like sending and receiving HTTP requests. Network Library: When the network library receives a network request, it interacts with the language API to send the network data. Language API: Each language provides an API for operating the network, system, etc. When a request is received, it interacts with the system API. In Linux, this API is called syscalls. Linux API: When the Linux kernel receives the request through the API, it communicates with the socket to send the data, which is usually closer to an OSI Layer 4 protocol, such as TCP, UDP, etc. Socket Ops: Sending or receiving the data to/from the NIC.  Our hypothesis is that eBPF can monitor the network. There are two ways to implement the interception: User space (uprobe) or Kernel space (kprobe). The table below summarizes the differences.\n    Pros Cons     uprobe •\tGet more application-related contexts, such as whether the current request is HTTP or HTTPS.•\tRequests and responses can be intercepted by a single method •\tData structures can be unstable, so it is more difficult to get the desired data.  •\tImplementation may differ between language/library versions.  •\tDoes not work in applications without symbol tables.   kprobe •\tAvailable for all languages.  •\tThe data structure and methods are stable and do not require much adaptation.  •\tEasier correlation with underlying data, such as getting the destination address of TCP, OSI Layer 4 protocol metrics, etc. •\tA single request and response may be split into multiple probes.  •\tContextual information is not easy to get for stateful requests. For example header compression in HTTP/2.    For the general network performance monitor, we chose to use the kprobe (intercept the syscalls) for the following reasons:\n It\u0026rsquo;s available for applications written in any programming language, and it\u0026rsquo;s stable, so it saves a lot of development/adaptation costs. It can be correlated with metrics from the system level, which makes it easier to troubleshoot. As a single request and response are split into multiple probes, we can use technology to correlate them. For contextual information, It\u0026rsquo;s usually used in OSI Layer 7 protocol network analysis. So, if we just monitor the network performance, then they can be ignored.  Kprobes and network monitoring Following the network syscalls of Linux documentation, we can implement network monitoring by intercepting two types of methods: socket operations and send/receive methods.\nSocket Operations When accepting or connecting with another socket, we can get the following information:\n Connection information: Includes the remote address from the connection which helps us to understand which pod is connected. Connection statics: Includes basic metrics from sockets, such as round-trip time (RTT), lost packet count in TCP, etc. Socket and file descriptor (FD) mapping: Includes the relationship between the Linux file descriptor and socket object. It is useful when sending and receiving data through a Linux file descriptor.  Send/Receive The interface related to sending or receiving data is the focus of performance analysis. It mainly contains the following parameters:\n Socket file descriptor: The file descriptor of the current operation corresponding to the socket. Buffer: The data sent or received, passed as a byte array.  Based on the above parameters, we can analyze the following data:\n Bytes: The size of the packet in bytes. Protocol: The protocol analysis according to the buffer data, such as HTTP, MySQL, etc. Execution Time: The time it takes to send/receive the data.  At this point (Figure 1) we can analyze the following steps for the whole lifecycle of the connection:\n Connect/Accept: When the connection is created. Transform: Sending and receiving data on the connection. Close: When the connection is closed.  Figure 1\nProtocol and TLS The previous section described how to analyze connections using send or receive buffer data. For example, following the HTTP/1.1 message specification to analyze the connection. However, this does not work for TLS requests/responses.\nFigure 2\nWhen TLS is in use, the Linux Kernel transmits data encrypted in user space. In the figure above, The application usually transmits SSL data through a third-party library (such as OpenSSL). For this case, the Linux API can only get the encrypted data, so it cannot recognize any higher layer protocol. To decrypt inside eBPF, we need to follow these steps:\n Read unencrypted data through uprobe: Compatible multiple languages, using uprobe to capture the data that is not encrypted before sending or after receiving. In this way, we can get the original data and associate it with the socket. Associate with socket: We can associate unencrypted data with the socket.  OpenSSL Use case For example, the most common way to send/receive SSL data is to use OpenSSL as a shared library, specifically the SSL_read and SSL_write methods to submit the buffer data with the socket.\nFollowing the documentation, we can intercept these two methods, which are almost identical to the API in Linux. The source code of the SSL structure in OpenSSL shows that the Socket FD exists in the BIO object of the SSL structure, and we can get it by the offset.\nIn summary, with knowledge of how OpenSSL works, we can read unencrypted data in an eBPF function.\nIntroducing SkyWalking Rover, an eBPF-based Metrics Collector and Profiler SkyWalking Rover introduces the eBPF network profiling feature into the SkyWalking ecosystem. It\u0026rsquo;s currently supported in a Kubernetes environment, so must be deployed inside a Kubernetes cluster. Once the deployment is complete, SkyWalking Rover can monitor the network for all processes inside a given Pod. Based on the monitoring data, SkyWalking can generate the topology relationship diagram and metrics between processes.\nTopology Diagram The topology diagram can help us understand the network access between processes inside the same Pod, and between the process and external environment (other Pod or service). Additionally, it can identify the data direction of traffic based on the line flow direction.\nIn Figure 3 below, all nodes within the hexagon are the internal process of a Pod, and nodes outside the hexagon are externally associated services or Pods. Nodes are connected by lines, which indicate the direction of requests or responses between nodes (client or server). The protocol is indicated on the line, and it\u0026rsquo;s either HTTP(S), TCP, or TCP(TLS). Also, we can see in this figure that the line between Envoy and Python applications is bidirectional because Envoy intercepts all application traffic.\nFigure 3\nMetrics Once we recognize the network call relationship between processes through the topology, we can select a specific line and view the TCP metrics between the two processes.\nThe diagram below (Figure 4) shows the metrics of network monitoring between two processes. There are four metrics in each line. Two on the left side are on the client side, and two on the right side are on the server side. If the remote process is not in the same Pod, only one side of the metrics is displayed.\nFigure 4\nThe following two metric types are available:\n Counter: Records the total number of data in a certain period. Each counter contains the following data: a. Count: Execution count. b. Bytes: Packet size in bytes. c. Execution time: Execution duration. Histogram: Records the distribution of data in the buckets.  Based on the above data types, the following metrics are exposed:\n   Name Type Unit Description     Write Counter and histogram Millisecond The socket write counter.   Read Counter and histogram Millisecond The socket read counter.   Write RTT Counter and histogram Microsecond The socket write round trip time (RTT) counter.   Connect Counter and histogram Millisecond The socket connect/accept with another server/client counter.   Close Counter and histogram Millisecond The socket with other socket counter.   Retransmit Counter Millisecond The socket retransmit package counter.   Drop Counter Millisecond The socket drop package counter.    Demo In this section, we demonstrate how to perform network profiling in the service mesh. To follow along, you will need a running Kubernetes environment.\nNOTE: All commands and scripts are available in this GitHub repository.\nInstall Istio Istio is the most widely deployed service mesh, and comes with a complete demo application that we can use for testing. To install Istio and the demo application, follow these steps:\n Install Istio using the demo configuration profile. Label the default namespace, so Istio automatically injects Envoy sidecar proxies when we\u0026rsquo;ll deploy the application. Deploy the bookinfo application to the cluster. Deploy the traffic generator to generate some traffic to the application.  export ISTIO_VERSION=1.13.1 # install istio istioctl install -y --set profile=demo kubectl label namespace default istio-injection=enabled # deploy the bookinfo applications kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/platform/kube/bookinfo.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/bookinfo-gateway.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/destination-rule-all.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/virtual-service-all-v1.yaml # generate traffic kubectl apply -f https://raw.githubusercontent.com/mrproliu/skywalking-network-profiling-demo/main/resources/traffic-generator.yaml Install SkyWalking The following will install the storage, backend, and UI needed for SkyWalking:\ngit clone https://github.com/apache/skywalking-helm.git cd skywalking-helm cd chart helm dep up skywalking helm -n istio-system install skywalking skywalking \\  --set fullnameOverride=skywalking \\  --set elasticsearch.minimumMasterNodes=1 \\  --set elasticsearch.imageTag=7.5.1 \\  --set oap.replicas=1 \\  --set ui.image.repository=apache/skywalking-ui \\  --set ui.image.tag=9.2.0 \\  --set oap.image.tag=9.2.0 \\  --set oap.envoy.als.enabled=true \\  --set oap.image.repository=apache/skywalking-oap-server \\  --set oap.storageType=elasticsearch \\  --set oap.env.SW_METER_ANALYZER_ACTIVE_FILES=\u0026#39;network-profiling\u0026#39; Install SkyWalking Rover SkyWalking Rover is deployed on every node in Kubernetes, and it automatically detects the services in the Kubernetes cluster. The network profiling feature has been released in the version 0.3.0 of SkyWalking Rover. When a network monitoring task is created, the SkyWalking rover sends the data to the SkyWalking backend.\nkubectl apply -f https://raw.githubusercontent.com/mrproliu/skywalking-network-profiling-demo/main/resources/skywalking-rover.yaml Start the Network Profiling Task Once all deployments are completed, we must create a network profiling task for a specific instance of the service in the SkyWalking UI.\nTo open SkyWalking UI, run:\nkubectl port-forward svc/skywalking-ui 8080:80 --namespace istio-system Currently, we can select the specific instances that we wish to monitor by clicking the Data Plane item in the Service Mesh panel and the Service item in the Kubernetes panel.\nIn the figure below, we have selected an instance with a list of tasks in the network profiling tab. When we click the start button, the SkyWalking Rover starts monitoring this instance\u0026rsquo;s network.\nFigure 5\nDone! After a few seconds, you will see the process topology appear on the right side of the page.\nFigure 6\nWhen you click on the line between processes, you can see the TCP metrics between the two processes.\nFigure 7\nConclusion In this article, we detailed a problem that makes troubleshooting service mesh architectures difficult: lack of context between layers in the network stack. These are the cases when eBPF begins to really help with debugging/productivity when existing service mesh/envoy cannot. Then, we researched how eBPF could be applied to common communication, such as TLS. Finally, we demo the implementation of this process with SkyWalking Rover.\nFor now, we have completed the performance analysis for OSI layer 4 (mostly TCP). In the future, we will also introduce the analysis for OSI layer 7 protocols like HTTP.\n","excerpt":"Diagnose Service Mesh Network Performance with eBPF Background This article will show how to use …","ref":"/docs/main/next/en/academy/diagnose-service-mesh-network-performance-with-ebpf/","title":"Diagnose Service Mesh Network Performance with eBPF"},{"body":"Diagnose Service Mesh Network Performance with eBPF Background This article will show how to use Apache SkyWalking with eBPF to make network troubleshooting easier in a service mesh environment.\nApache SkyWalking is an application performance monitor tool for distributed systems. It observes metrics, logs, traces, and events in the service mesh environment and uses that data to generate a dependency graph of your pods and services. This dependency graph can provide quick insights into your system, especially when there\u0026rsquo;s an issue.\nHowever, when troubleshooting network issues in SkyWalking\u0026rsquo;s service topology, it is not always easy to pinpoint where the error actually is. There are two reasons for the difficulty:\n Traffic through the Envoy sidecar is not easy to observe. Data from Envoy\u0026rsquo;s Access Log Service (ALS) shows traffic between services (sidecar-to-sidecar), but not metrics on communication between the Envoy sidecar and the service it proxies. Without that information, it is more difficult to understand the impact of the sidecar. There is a lack of data from transport layer (OSI Layer 4) communication. Since services generally use application layer (OSI Layer 7) protocols such as HTTP, observability data is generally restricted to application layer communication. However, the root cause may actually be in the transport layer, which is typically opaque to observability tools.  Access to metrics from Envoy-to-service and transport layer communication can make it easier to diagnose service issues. To this end, SkyWalking needs to collect and analyze transport layer metrics between processes inside Kubernetes pods - a task well suited to eBPF. We investigated using eBPF for this purpose and present our results and a demo below.\nMonitoring Kubernetes Networks with eBPF With its origins as the Extended Berkeley Packet Filter, eBPF is a general purpose mechanism for injecting and running your own code into the Linux kernel and is an excellent tool for monitoring network traffic in Kubernetes Pods. In the next few sections, we'll provide an overview of how to use eBPF for network monitoring as background for introducing Skywalking Rover, a metrics collector and profiler powered by eBPF to diagnose CPU and network performance.\nHow Applications and the Network Interact Interactions between the application and the network can generally be divided into the following steps from higher to lower levels of abstraction:\n User Code: Application code uses high-level network libraries in the application stack to exchange data across the network, like sending and receiving HTTP requests. Network Library: When the network library receives a network request, it interacts with the language API to send the network data. Language API: Each language provides an API for operating the network, system, etc. When a request is received, it interacts with the system API. In Linux, this API is called syscalls. Linux API: When the Linux kernel receives the request through the API, it communicates with the socket to send the data, which is usually closer to an OSI Layer 4 protocol, such as TCP, UDP, etc. Socket Ops: Sending or receiving the data to/from the NIC.  Our hypothesis is that eBPF can monitor the network. There are two ways to implement the interception: User space (uprobe) or Kernel space (kprobe). The table below summarizes the differences.\n    Pros Cons     uprobe •\tGet more application-related contexts, such as whether the current request is HTTP or HTTPS.•\tRequests and responses can be intercepted by a single method •\tData structures can be unstable, so it is more difficult to get the desired data.  •\tImplementation may differ between language/library versions.  •\tDoes not work in applications without symbol tables.   kprobe •\tAvailable for all languages.  •\tThe data structure and methods are stable and do not require much adaptation.  •\tEasier correlation with underlying data, such as getting the destination address of TCP, OSI Layer 4 protocol metrics, etc. •\tA single request and response may be split into multiple probes.  •\tContextual information is not easy to get for stateful requests. For example header compression in HTTP/2.    For the general network performance monitor, we chose to use the kprobe (intercept the syscalls) for the following reasons:\n It\u0026rsquo;s available for applications written in any programming language, and it\u0026rsquo;s stable, so it saves a lot of development/adaptation costs. It can be correlated with metrics from the system level, which makes it easier to troubleshoot. As a single request and response are split into multiple probes, we can use technology to correlate them. For contextual information, It\u0026rsquo;s usually used in OSI Layer 7 protocol network analysis. So, if we just monitor the network performance, then they can be ignored.  Kprobes and network monitoring Following the network syscalls of Linux documentation, we can implement network monitoring by intercepting two types of methods: socket operations and send/receive methods.\nSocket Operations When accepting or connecting with another socket, we can get the following information:\n Connection information: Includes the remote address from the connection which helps us to understand which pod is connected. Connection statics: Includes basic metrics from sockets, such as round-trip time (RTT), lost packet count in TCP, etc. Socket and file descriptor (FD) mapping: Includes the relationship between the Linux file descriptor and socket object. It is useful when sending and receiving data through a Linux file descriptor.  Send/Receive The interface related to sending or receiving data is the focus of performance analysis. It mainly contains the following parameters:\n Socket file descriptor: The file descriptor of the current operation corresponding to the socket. Buffer: The data sent or received, passed as a byte array.  Based on the above parameters, we can analyze the following data:\n Bytes: The size of the packet in bytes. Protocol: The protocol analysis according to the buffer data, such as HTTP, MySQL, etc. Execution Time: The time it takes to send/receive the data.  At this point (Figure 1) we can analyze the following steps for the whole lifecycle of the connection:\n Connect/Accept: When the connection is created. Transform: Sending and receiving data on the connection. Close: When the connection is closed.  Figure 1\nProtocol and TLS The previous section described how to analyze connections using send or receive buffer data. For example, following the HTTP/1.1 message specification to analyze the connection. However, this does not work for TLS requests/responses.\nFigure 2\nWhen TLS is in use, the Linux Kernel transmits data encrypted in user space. In the figure above, The application usually transmits SSL data through a third-party library (such as OpenSSL). For this case, the Linux API can only get the encrypted data, so it cannot recognize any higher layer protocol. To decrypt inside eBPF, we need to follow these steps:\n Read unencrypted data through uprobe: Compatible multiple languages, using uprobe to capture the data that is not encrypted before sending or after receiving. In this way, we can get the original data and associate it with the socket. Associate with socket: We can associate unencrypted data with the socket.  OpenSSL Use case For example, the most common way to send/receive SSL data is to use OpenSSL as a shared library, specifically the SSL_read and SSL_write methods to submit the buffer data with the socket.\nFollowing the documentation, we can intercept these two methods, which are almost identical to the API in Linux. The source code of the SSL structure in OpenSSL shows that the Socket FD exists in the BIO object of the SSL structure, and we can get it by the offset.\nIn summary, with knowledge of how OpenSSL works, we can read unencrypted data in an eBPF function.\nIntroducing SkyWalking Rover, an eBPF-based Metrics Collector and Profiler SkyWalking Rover introduces the eBPF network profiling feature into the SkyWalking ecosystem. It\u0026rsquo;s currently supported in a Kubernetes environment, so must be deployed inside a Kubernetes cluster. Once the deployment is complete, SkyWalking Rover can monitor the network for all processes inside a given Pod. Based on the monitoring data, SkyWalking can generate the topology relationship diagram and metrics between processes.\nTopology Diagram The topology diagram can help us understand the network access between processes inside the same Pod, and between the process and external environment (other Pod or service). Additionally, it can identify the data direction of traffic based on the line flow direction.\nIn Figure 3 below, all nodes within the hexagon are the internal process of a Pod, and nodes outside the hexagon are externally associated services or Pods. Nodes are connected by lines, which indicate the direction of requests or responses between nodes (client or server). The protocol is indicated on the line, and it\u0026rsquo;s either HTTP(S), TCP, or TCP(TLS). Also, we can see in this figure that the line between Envoy and Python applications is bidirectional because Envoy intercepts all application traffic.\nFigure 3\nMetrics Once we recognize the network call relationship between processes through the topology, we can select a specific line and view the TCP metrics between the two processes.\nThe diagram below (Figure 4) shows the metrics of network monitoring between two processes. There are four metrics in each line. Two on the left side are on the client side, and two on the right side are on the server side. If the remote process is not in the same Pod, only one side of the metrics is displayed.\nFigure 4\nThe following two metric types are available:\n Counter: Records the total number of data in a certain period. Each counter contains the following data: a. Count: Execution count. b. Bytes: Packet size in bytes. c. Execution time: Execution duration. Histogram: Records the distribution of data in the buckets.  Based on the above data types, the following metrics are exposed:\n   Name Type Unit Description     Write Counter and histogram Millisecond The socket write counter.   Read Counter and histogram Millisecond The socket read counter.   Write RTT Counter and histogram Microsecond The socket write round trip time (RTT) counter.   Connect Counter and histogram Millisecond The socket connect/accept with another server/client counter.   Close Counter and histogram Millisecond The socket with other socket counter.   Retransmit Counter Millisecond The socket retransmit package counter.   Drop Counter Millisecond The socket drop package counter.    Demo In this section, we demonstrate how to perform network profiling in the service mesh. To follow along, you will need a running Kubernetes environment.\nNOTE: All commands and scripts are available in this GitHub repository.\nInstall Istio Istio is the most widely deployed service mesh, and comes with a complete demo application that we can use for testing. To install Istio and the demo application, follow these steps:\n Install Istio using the demo configuration profile. Label the default namespace, so Istio automatically injects Envoy sidecar proxies when we\u0026rsquo;ll deploy the application. Deploy the bookinfo application to the cluster. Deploy the traffic generator to generate some traffic to the application.  export ISTIO_VERSION=1.13.1 # install istio istioctl install -y --set profile=demo kubectl label namespace default istio-injection=enabled # deploy the bookinfo applications kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/platform/kube/bookinfo.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/bookinfo-gateway.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/destination-rule-all.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/virtual-service-all-v1.yaml # generate traffic kubectl apply -f https://raw.githubusercontent.com/mrproliu/skywalking-network-profiling-demo/main/resources/traffic-generator.yaml Install SkyWalking The following will install the storage, backend, and UI needed for SkyWalking:\ngit clone https://github.com/apache/skywalking-kubernetes.git cd skywalking-kubernetes cd chart helm dep up skywalking helm -n istio-system install skywalking skywalking \\  --set fullnameOverride=skywalking \\  --set elasticsearch.minimumMasterNodes=1 \\  --set elasticsearch.imageTag=7.5.1 \\  --set oap.replicas=1 \\  --set ui.image.repository=apache/skywalking-ui \\  --set ui.image.tag=9.2.0 \\  --set oap.image.tag=9.2.0 \\  --set oap.envoy.als.enabled=true \\  --set oap.image.repository=apache/skywalking-oap-server \\  --set oap.storageType=elasticsearch \\  --set oap.env.SW_METER_ANALYZER_ACTIVE_FILES=\u0026#39;network-profiling\u0026#39; Install SkyWalking Rover SkyWalking Rover is deployed on every node in Kubernetes, and it automatically detects the services in the Kubernetes cluster. The network profiling feature has been released in the version 0.3.0 of SkyWalking Rover. When a network monitoring task is created, the SkyWalking rover sends the data to the SkyWalking backend.\nkubectl apply -f https://raw.githubusercontent.com/mrproliu/skywalking-network-profiling-demo/main/resources/skywalking-rover.yaml Start the Network Profiling Task Once all deployments are completed, we must create a network profiling task for a specific instance of the service in the SkyWalking UI.\nTo open SkyWalking UI, run:\nkubectl port-forward svc/skywalking-ui 8080:80 --namespace istio-system Currently, we can select the specific instances that we wish to monitor by clicking the Data Plane item in the Service Mesh panel and the Service item in the Kubernetes panel.\nIn the figure below, we have selected an instance with a list of tasks in the network profiling tab. When we click the start button, the SkyWalking Rover starts monitoring this instance\u0026rsquo;s network.\nFigure 5\nDone! After a few seconds, you will see the process topology appear on the right side of the page.\nFigure 6\nWhen you click on the line between processes, you can see the TCP metrics between the two processes.\nFigure 7\nConclusion In this article, we detailed a problem that makes troubleshooting service mesh architectures difficult: lack of context between layers in the network stack. These are the cases when eBPF begins to really help with debugging/productivity when existing service mesh/envoy cannot. Then, we researched how eBPF could be applied to common communication, such as TLS. Finally, we demo the implementation of this process with SkyWalking Rover.\nFor now, we have completed the performance analysis for OSI layer 4 (mostly TCP). In the future, we will also introduce the analysis for OSI layer 7 protocols like HTTP.\n","excerpt":"Diagnose Service Mesh Network Performance with eBPF Background This article will show how to use …","ref":"/docs/main/v9.3.0/en/academy/diagnose-service-mesh-network-performance-with-ebpf/","title":"Diagnose Service Mesh Network Performance with eBPF"},{"body":"Diagnose Service Mesh Network Performance with eBPF Background This article will show how to use Apache SkyWalking with eBPF to make network troubleshooting easier in a service mesh environment.\nApache SkyWalking is an application performance monitor tool for distributed systems. It observes metrics, logs, traces, and events in the service mesh environment and uses that data to generate a dependency graph of your pods and services. This dependency graph can provide quick insights into your system, especially when there\u0026rsquo;s an issue.\nHowever, when troubleshooting network issues in SkyWalking\u0026rsquo;s service topology, it is not always easy to pinpoint where the error actually is. There are two reasons for the difficulty:\n Traffic through the Envoy sidecar is not easy to observe. Data from Envoy\u0026rsquo;s Access Log Service (ALS) shows traffic between services (sidecar-to-sidecar), but not metrics on communication between the Envoy sidecar and the service it proxies. Without that information, it is more difficult to understand the impact of the sidecar. There is a lack of data from transport layer (OSI Layer 4) communication. Since services generally use application layer (OSI Layer 7) protocols such as HTTP, observability data is generally restricted to application layer communication. However, the root cause may actually be in the transport layer, which is typically opaque to observability tools.  Access to metrics from Envoy-to-service and transport layer communication can make it easier to diagnose service issues. To this end, SkyWalking needs to collect and analyze transport layer metrics between processes inside Kubernetes pods - a task well suited to eBPF. We investigated using eBPF for this purpose and present our results and a demo below.\nMonitoring Kubernetes Networks with eBPF With its origins as the Extended Berkeley Packet Filter, eBPF is a general purpose mechanism for injecting and running your own code into the Linux kernel and is an excellent tool for monitoring network traffic in Kubernetes Pods. In the next few sections, we'll provide an overview of how to use eBPF for network monitoring as background for introducing Skywalking Rover, a metrics collector and profiler powered by eBPF to diagnose CPU and network performance.\nHow Applications and the Network Interact Interactions between the application and the network can generally be divided into the following steps from higher to lower levels of abstraction:\n User Code: Application code uses high-level network libraries in the application stack to exchange data across the network, like sending and receiving HTTP requests. Network Library: When the network library receives a network request, it interacts with the language API to send the network data. Language API: Each language provides an API for operating the network, system, etc. When a request is received, it interacts with the system API. In Linux, this API is called syscalls. Linux API: When the Linux kernel receives the request through the API, it communicates with the socket to send the data, which is usually closer to an OSI Layer 4 protocol, such as TCP, UDP, etc. Socket Ops: Sending or receiving the data to/from the NIC.  Our hypothesis is that eBPF can monitor the network. There are two ways to implement the interception: User space (uprobe) or Kernel space (kprobe). The table below summarizes the differences.\n    Pros Cons     uprobe •\tGet more application-related contexts, such as whether the current request is HTTP or HTTPS.•\tRequests and responses can be intercepted by a single method •\tData structures can be unstable, so it is more difficult to get the desired data.  •\tImplementation may differ between language/library versions.  •\tDoes not work in applications without symbol tables.   kprobe •\tAvailable for all languages.  •\tThe data structure and methods are stable and do not require much adaptation.  •\tEasier correlation with underlying data, such as getting the destination address of TCP, OSI Layer 4 protocol metrics, etc. •\tA single request and response may be split into multiple probes.  •\tContextual information is not easy to get for stateful requests. For example header compression in HTTP/2.    For the general network performance monitor, we chose to use the kprobe (intercept the syscalls) for the following reasons:\n It\u0026rsquo;s available for applications written in any programming language, and it\u0026rsquo;s stable, so it saves a lot of development/adaptation costs. It can be correlated with metrics from the system level, which makes it easier to troubleshoot. As a single request and response are split into multiple probes, we can use technology to correlate them. For contextual information, It\u0026rsquo;s usually used in OSI Layer 7 protocol network analysis. So, if we just monitor the network performance, then they can be ignored.  Kprobes and network monitoring Following the network syscalls of Linux documentation, we can implement network monitoring by intercepting two types of methods: socket operations and send/receive methods.\nSocket Operations When accepting or connecting with another socket, we can get the following information:\n Connection information: Includes the remote address from the connection which helps us to understand which pod is connected. Connection statics: Includes basic metrics from sockets, such as round-trip time (RTT), lost packet count in TCP, etc. Socket and file descriptor (FD) mapping: Includes the relationship between the Linux file descriptor and socket object. It is useful when sending and receiving data through a Linux file descriptor.  Send/Receive The interface related to sending or receiving data is the focus of performance analysis. It mainly contains the following parameters:\n Socket file descriptor: The file descriptor of the current operation corresponding to the socket. Buffer: The data sent or received, passed as a byte array.  Based on the above parameters, we can analyze the following data:\n Bytes: The size of the packet in bytes. Protocol: The protocol analysis according to the buffer data, such as HTTP, MySQL, etc. Execution Time: The time it takes to send/receive the data.  At this point (Figure 1) we can analyze the following steps for the whole lifecycle of the connection:\n Connect/Accept: When the connection is created. Transform: Sending and receiving data on the connection. Close: When the connection is closed.  Figure 1\nProtocol and TLS The previous section described how to analyze connections using send or receive buffer data. For example, following the HTTP/1.1 message specification to analyze the connection. However, this does not work for TLS requests/responses.\nFigure 2\nWhen TLS is in use, the Linux Kernel transmits data encrypted in user space. In the figure above, The application usually transmits SSL data through a third-party library (such as OpenSSL). For this case, the Linux API can only get the encrypted data, so it cannot recognize any higher layer protocol. To decrypt inside eBPF, we need to follow these steps:\n Read unencrypted data through uprobe: Compatible multiple languages, using uprobe to capture the data that is not encrypted before sending or after receiving. In this way, we can get the original data and associate it with the socket. Associate with socket: We can associate unencrypted data with the socket.  OpenSSL Use case For example, the most common way to send/receive SSL data is to use OpenSSL as a shared library, specifically the SSL_read and SSL_write methods to submit the buffer data with the socket.\nFollowing the documentation, we can intercept these two methods, which are almost identical to the API in Linux. The source code of the SSL structure in OpenSSL shows that the Socket FD exists in the BIO object of the SSL structure, and we can get it by the offset.\nIn summary, with knowledge of how OpenSSL works, we can read unencrypted data in an eBPF function.\nIntroducing SkyWalking Rover, an eBPF-based Metrics Collector and Profiler SkyWalking Rover introduces the eBPF network profiling feature into the SkyWalking ecosystem. It\u0026rsquo;s currently supported in a Kubernetes environment, so must be deployed inside a Kubernetes cluster. Once the deployment is complete, SkyWalking Rover can monitor the network for all processes inside a given Pod. Based on the monitoring data, SkyWalking can generate the topology relationship diagram and metrics between processes.\nTopology Diagram The topology diagram can help us understand the network access between processes inside the same Pod, and between the process and external environment (other Pod or service). Additionally, it can identify the data direction of traffic based on the line flow direction.\nIn Figure 3 below, all nodes within the hexagon are the internal process of a Pod, and nodes outside the hexagon are externally associated services or Pods. Nodes are connected by lines, which indicate the direction of requests or responses between nodes (client or server). The protocol is indicated on the line, and it\u0026rsquo;s either HTTP(S), TCP, or TCP(TLS). Also, we can see in this figure that the line between Envoy and Python applications is bidirectional because Envoy intercepts all application traffic.\nFigure 3\nMetrics Once we recognize the network call relationship between processes through the topology, we can select a specific line and view the TCP metrics between the two processes.\nThe diagram below (Figure 4) shows the metrics of network monitoring between two processes. There are four metrics in each line. Two on the left side are on the client side, and two on the right side are on the server side. If the remote process is not in the same Pod, only one side of the metrics is displayed.\nFigure 4\nThe following two metric types are available:\n Counter: Records the total number of data in a certain period. Each counter contains the following data: a. Count: Execution count. b. Bytes: Packet size in bytes. c. Execution time: Execution duration. Histogram: Records the distribution of data in the buckets.  Based on the above data types, the following metrics are exposed:\n   Name Type Unit Description     Write Counter and histogram Millisecond The socket write counter.   Read Counter and histogram Millisecond The socket read counter.   Write RTT Counter and histogram Microsecond The socket write round trip time (RTT) counter.   Connect Counter and histogram Millisecond The socket connect/accept with another server/client counter.   Close Counter and histogram Millisecond The socket with other socket counter.   Retransmit Counter Millisecond The socket retransmit package counter.   Drop Counter Millisecond The socket drop package counter.    Demo In this section, we demonstrate how to perform network profiling in the service mesh. To follow along, you will need a running Kubernetes environment.\nNOTE: All commands and scripts are available in this GitHub repository.\nInstall Istio Istio is the most widely deployed service mesh, and comes with a complete demo application that we can use for testing. To install Istio and the demo application, follow these steps:\n Install Istio using the demo configuration profile. Label the default namespace, so Istio automatically injects Envoy sidecar proxies when we\u0026rsquo;ll deploy the application. Deploy the bookinfo application to the cluster. Deploy the traffic generator to generate some traffic to the application.  export ISTIO_VERSION=1.13.1 # install istio istioctl install -y --set profile=demo kubectl label namespace default istio-injection=enabled # deploy the bookinfo applications kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/platform/kube/bookinfo.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/bookinfo-gateway.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/destination-rule-all.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/virtual-service-all-v1.yaml # generate traffic kubectl apply -f https://raw.githubusercontent.com/mrproliu/skywalking-network-profiling-demo/main/resources/traffic-generator.yaml Install SkyWalking The following will install the storage, backend, and UI needed for SkyWalking:\ngit clone https://github.com/apache/skywalking-kubernetes.git cd skywalking-kubernetes cd chart helm dep up skywalking helm -n istio-system install skywalking skywalking \\  --set fullnameOverride=skywalking \\  --set elasticsearch.minimumMasterNodes=1 \\  --set elasticsearch.imageTag=7.5.1 \\  --set oap.replicas=1 \\  --set ui.image.repository=apache/skywalking-ui \\  --set ui.image.tag=9.2.0 \\  --set oap.image.tag=9.2.0 \\  --set oap.envoy.als.enabled=true \\  --set oap.image.repository=apache/skywalking-oap-server \\  --set oap.storageType=elasticsearch \\  --set oap.env.SW_METER_ANALYZER_ACTIVE_FILES=\u0026#39;network-profiling\u0026#39; Install SkyWalking Rover SkyWalking Rover is deployed on every node in Kubernetes, and it automatically detects the services in the Kubernetes cluster. The network profiling feature has been released in the version 0.3.0 of SkyWalking Rover. When a network monitoring task is created, the SkyWalking rover sends the data to the SkyWalking backend.\nkubectl apply -f https://raw.githubusercontent.com/mrproliu/skywalking-network-profiling-demo/main/resources/skywalking-rover.yaml Start the Network Profiling Task Once all deployments are completed, we must create a network profiling task for a specific instance of the service in the SkyWalking UI.\nTo open SkyWalking UI, run:\nkubectl port-forward svc/skywalking-ui 8080:80 --namespace istio-system Currently, we can select the specific instances that we wish to monitor by clicking the Data Plane item in the Service Mesh panel and the Service item in the Kubernetes panel.\nIn the figure below, we have selected an instance with a list of tasks in the network profiling tab. When we click the start button, the SkyWalking Rover starts monitoring this instance\u0026rsquo;s network.\nFigure 5\nDone! After a few seconds, you will see the process topology appear on the right side of the page.\nFigure 6\nWhen you click on the line between processes, you can see the TCP metrics between the two processes.\nFigure 7\nConclusion In this article, we detailed a problem that makes troubleshooting service mesh architectures difficult: lack of context between layers in the network stack. These are the cases when eBPF begins to really help with debugging/productivity when existing service mesh/envoy cannot. Then, we researched how eBPF could be applied to common communication, such as TLS. Finally, we demo the implementation of this process with SkyWalking Rover.\nFor now, we have completed the performance analysis for OSI layer 4 (mostly TCP). In the future, we will also introduce the analysis for OSI layer 7 protocols like HTTP.\n","excerpt":"Diagnose Service Mesh Network Performance with eBPF Background This article will show how to use …","ref":"/docs/main/v9.4.0/en/academy/diagnose-service-mesh-network-performance-with-ebpf/","title":"Diagnose Service Mesh Network Performance with eBPF"},{"body":"Diagnose Service Mesh Network Performance with eBPF Background This article will show how to use Apache SkyWalking with eBPF to make network troubleshooting easier in a service mesh environment.\nApache SkyWalking is an application performance monitor tool for distributed systems. It observes metrics, logs, traces, and events in the service mesh environment and uses that data to generate a dependency graph of your pods and services. This dependency graph can provide quick insights into your system, especially when there\u0026rsquo;s an issue.\nHowever, when troubleshooting network issues in SkyWalking\u0026rsquo;s service topology, it is not always easy to pinpoint where the error actually is. There are two reasons for the difficulty:\n Traffic through the Envoy sidecar is not easy to observe. Data from Envoy\u0026rsquo;s Access Log Service (ALS) shows traffic between services (sidecar-to-sidecar), but not metrics on communication between the Envoy sidecar and the service it proxies. Without that information, it is more difficult to understand the impact of the sidecar. There is a lack of data from transport layer (OSI Layer 4) communication. Since services generally use application layer (OSI Layer 7) protocols such as HTTP, observability data is generally restricted to application layer communication. However, the root cause may actually be in the transport layer, which is typically opaque to observability tools.  Access to metrics from Envoy-to-service and transport layer communication can make it easier to diagnose service issues. To this end, SkyWalking needs to collect and analyze transport layer metrics between processes inside Kubernetes pods - a task well suited to eBPF. We investigated using eBPF for this purpose and present our results and a demo below.\nMonitoring Kubernetes Networks with eBPF With its origins as the Extended Berkeley Packet Filter, eBPF is a general purpose mechanism for injecting and running your own code into the Linux kernel and is an excellent tool for monitoring network traffic in Kubernetes Pods. In the next few sections, we'll provide an overview of how to use eBPF for network monitoring as background for introducing Skywalking Rover, a metrics collector and profiler powered by eBPF to diagnose CPU and network performance.\nHow Applications and the Network Interact Interactions between the application and the network can generally be divided into the following steps from higher to lower levels of abstraction:\n User Code: Application code uses high-level network libraries in the application stack to exchange data across the network, like sending and receiving HTTP requests. Network Library: When the network library receives a network request, it interacts with the language API to send the network data. Language API: Each language provides an API for operating the network, system, etc. When a request is received, it interacts with the system API. In Linux, this API is called syscalls. Linux API: When the Linux kernel receives the request through the API, it communicates with the socket to send the data, which is usually closer to an OSI Layer 4 protocol, such as TCP, UDP, etc. Socket Ops: Sending or receiving the data to/from the NIC.  Our hypothesis is that eBPF can monitor the network. There are two ways to implement the interception: User space (uprobe) or Kernel space (kprobe). The table below summarizes the differences.\n    Pros Cons     uprobe •\tGet more application-related contexts, such as whether the current request is HTTP or HTTPS.•\tRequests and responses can be intercepted by a single method •\tData structures can be unstable, so it is more difficult to get the desired data.  •\tImplementation may differ between language/library versions.  •\tDoes not work in applications without symbol tables.   kprobe •\tAvailable for all languages.  •\tThe data structure and methods are stable and do not require much adaptation.  •\tEasier correlation with underlying data, such as getting the destination address of TCP, OSI Layer 4 protocol metrics, etc. •\tA single request and response may be split into multiple probes.  •\tContextual information is not easy to get for stateful requests. For example header compression in HTTP/2.    For the general network performance monitor, we chose to use the kprobe (intercept the syscalls) for the following reasons:\n It\u0026rsquo;s available for applications written in any programming language, and it\u0026rsquo;s stable, so it saves a lot of development/adaptation costs. It can be correlated with metrics from the system level, which makes it easier to troubleshoot. As a single request and response are split into multiple probes, we can use technology to correlate them. For contextual information, It\u0026rsquo;s usually used in OSI Layer 7 protocol network analysis. So, if we just monitor the network performance, then they can be ignored.  Kprobes and network monitoring Following the network syscalls of Linux documentation, we can implement network monitoring by intercepting two types of methods: socket operations and send/receive methods.\nSocket Operations When accepting or connecting with another socket, we can get the following information:\n Connection information: Includes the remote address from the connection which helps us to understand which pod is connected. Connection statics: Includes basic metrics from sockets, such as round-trip time (RTT), lost packet count in TCP, etc. Socket and file descriptor (FD) mapping: Includes the relationship between the Linux file descriptor and socket object. It is useful when sending and receiving data through a Linux file descriptor.  Send/Receive The interface related to sending or receiving data is the focus of performance analysis. It mainly contains the following parameters:\n Socket file descriptor: The file descriptor of the current operation corresponding to the socket. Buffer: The data sent or received, passed as a byte array.  Based on the above parameters, we can analyze the following data:\n Bytes: The size of the packet in bytes. Protocol: The protocol analysis according to the buffer data, such as HTTP, MySQL, etc. Execution Time: The time it takes to send/receive the data.  At this point (Figure 1) we can analyze the following steps for the whole lifecycle of the connection:\n Connect/Accept: When the connection is created. Transform: Sending and receiving data on the connection. Close: When the connection is closed.  Figure 1\nProtocol and TLS The previous section described how to analyze connections using send or receive buffer data. For example, following the HTTP/1.1 message specification to analyze the connection. However, this does not work for TLS requests/responses.\nFigure 2\nWhen TLS is in use, the Linux Kernel transmits data encrypted in user space. In the figure above, The application usually transmits SSL data through a third-party library (such as OpenSSL). For this case, the Linux API can only get the encrypted data, so it cannot recognize any higher layer protocol. To decrypt inside eBPF, we need to follow these steps:\n Read unencrypted data through uprobe: Compatible multiple languages, using uprobe to capture the data that is not encrypted before sending or after receiving. In this way, we can get the original data and associate it with the socket. Associate with socket: We can associate unencrypted data with the socket.  OpenSSL Use case For example, the most common way to send/receive SSL data is to use OpenSSL as a shared library, specifically the SSL_read and SSL_write methods to submit the buffer data with the socket.\nFollowing the documentation, we can intercept these two methods, which are almost identical to the API in Linux. The source code of the SSL structure in OpenSSL shows that the Socket FD exists in the BIO object of the SSL structure, and we can get it by the offset.\nIn summary, with knowledge of how OpenSSL works, we can read unencrypted data in an eBPF function.\nIntroducing SkyWalking Rover, an eBPF-based Metrics Collector and Profiler SkyWalking Rover introduces the eBPF network profiling feature into the SkyWalking ecosystem. It\u0026rsquo;s currently supported in a Kubernetes environment, so must be deployed inside a Kubernetes cluster. Once the deployment is complete, SkyWalking Rover can monitor the network for all processes inside a given Pod. Based on the monitoring data, SkyWalking can generate the topology relationship diagram and metrics between processes.\nTopology Diagram The topology diagram can help us understand the network access between processes inside the same Pod, and between the process and external environment (other Pod or service). Additionally, it can identify the data direction of traffic based on the line flow direction.\nIn Figure 3 below, all nodes within the hexagon are the internal process of a Pod, and nodes outside the hexagon are externally associated services or Pods. Nodes are connected by lines, which indicate the direction of requests or responses between nodes (client or server). The protocol is indicated on the line, and it\u0026rsquo;s either HTTP(S), TCP, or TCP(TLS). Also, we can see in this figure that the line between Envoy and Python applications is bidirectional because Envoy intercepts all application traffic.\nFigure 3\nMetrics Once we recognize the network call relationship between processes through the topology, we can select a specific line and view the TCP metrics between the two processes.\nThe diagram below (Figure 4) shows the metrics of network monitoring between two processes. There are four metrics in each line. Two on the left side are on the client side, and two on the right side are on the server side. If the remote process is not in the same Pod, only one side of the metrics is displayed.\nFigure 4\nThe following two metric types are available:\n Counter: Records the total number of data in a certain period. Each counter contains the following data: a. Count: Execution count. b. Bytes: Packet size in bytes. c. Execution time: Execution duration. Histogram: Records the distribution of data in the buckets.  Based on the above data types, the following metrics are exposed:\n   Name Type Unit Description     Write Counter and histogram Millisecond The socket write counter.   Read Counter and histogram Millisecond The socket read counter.   Write RTT Counter and histogram Microsecond The socket write round trip time (RTT) counter.   Connect Counter and histogram Millisecond The socket connect/accept with another server/client counter.   Close Counter and histogram Millisecond The socket with other socket counter.   Retransmit Counter Millisecond The socket retransmit package counter.   Drop Counter Millisecond The socket drop package counter.    Demo In this section, we demonstrate how to perform network profiling in the service mesh. To follow along, you will need a running Kubernetes environment.\nNOTE: All commands and scripts are available in this GitHub repository.\nInstall Istio Istio is the most widely deployed service mesh, and comes with a complete demo application that we can use for testing. To install Istio and the demo application, follow these steps:\n Install Istio using the demo configuration profile. Label the default namespace, so Istio automatically injects Envoy sidecar proxies when we\u0026rsquo;ll deploy the application. Deploy the bookinfo application to the cluster. Deploy the traffic generator to generate some traffic to the application.  export ISTIO_VERSION=1.13.1 # install istio istioctl install -y --set profile=demo kubectl label namespace default istio-injection=enabled # deploy the bookinfo applications kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/platform/kube/bookinfo.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/bookinfo-gateway.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/destination-rule-all.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/virtual-service-all-v1.yaml # generate traffic kubectl apply -f https://raw.githubusercontent.com/mrproliu/skywalking-network-profiling-demo/main/resources/traffic-generator.yaml Install SkyWalking The following will install the storage, backend, and UI needed for SkyWalking:\ngit clone https://github.com/apache/skywalking-kubernetes.git cd skywalking-kubernetes cd chart helm dep up skywalking helm -n istio-system install skywalking skywalking \\  --set fullnameOverride=skywalking \\  --set elasticsearch.minimumMasterNodes=1 \\  --set elasticsearch.imageTag=7.5.1 \\  --set oap.replicas=1 \\  --set ui.image.repository=apache/skywalking-ui \\  --set ui.image.tag=9.2.0 \\  --set oap.image.tag=9.2.0 \\  --set oap.envoy.als.enabled=true \\  --set oap.image.repository=apache/skywalking-oap-server \\  --set oap.storageType=elasticsearch \\  --set oap.env.SW_METER_ANALYZER_ACTIVE_FILES=\u0026#39;network-profiling\u0026#39; Install SkyWalking Rover SkyWalking Rover is deployed on every node in Kubernetes, and it automatically detects the services in the Kubernetes cluster. The network profiling feature has been released in the version 0.3.0 of SkyWalking Rover. When a network monitoring task is created, the SkyWalking rover sends the data to the SkyWalking backend.\nkubectl apply -f https://raw.githubusercontent.com/mrproliu/skywalking-network-profiling-demo/main/resources/skywalking-rover.yaml Start the Network Profiling Task Once all deployments are completed, we must create a network profiling task for a specific instance of the service in the SkyWalking UI.\nTo open SkyWalking UI, run:\nkubectl port-forward svc/skywalking-ui 8080:80 --namespace istio-system Currently, we can select the specific instances that we wish to monitor by clicking the Data Plane item in the Service Mesh panel and the Service item in the Kubernetes panel.\nIn the figure below, we have selected an instance with a list of tasks in the network profiling tab. When we click the start button, the SkyWalking Rover starts monitoring this instance\u0026rsquo;s network.\nFigure 5\nDone! After a few seconds, you will see the process topology appear on the right side of the page.\nFigure 6\nWhen you click on the line between processes, you can see the TCP metrics between the two processes.\nFigure 7\nConclusion In this article, we detailed a problem that makes troubleshooting service mesh architectures difficult: lack of context between layers in the network stack. These are the cases when eBPF begins to really help with debugging/productivity when existing service mesh/envoy cannot. Then, we researched how eBPF could be applied to common communication, such as TLS. Finally, we demo the implementation of this process with SkyWalking Rover.\nFor now, we have completed the performance analysis for OSI layer 4 (mostly TCP). In the future, we will also introduce the analysis for OSI layer 7 protocols like HTTP.\n","excerpt":"Diagnose Service Mesh Network Performance with eBPF Background This article will show how to use …","ref":"/docs/main/v9.5.0/en/academy/diagnose-service-mesh-network-performance-with-ebpf/","title":"Diagnose Service Mesh Network Performance with eBPF"},{"body":"Diagnose Service Mesh Network Performance with eBPF Background This article will show how to use Apache SkyWalking with eBPF to make network troubleshooting easier in a service mesh environment.\nApache SkyWalking is an application performance monitor tool for distributed systems. It observes metrics, logs, traces, and events in the service mesh environment and uses that data to generate a dependency graph of your pods and services. This dependency graph can provide quick insights into your system, especially when there\u0026rsquo;s an issue.\nHowever, when troubleshooting network issues in SkyWalking\u0026rsquo;s service topology, it is not always easy to pinpoint where the error actually is. There are two reasons for the difficulty:\n Traffic through the Envoy sidecar is not easy to observe. Data from Envoy\u0026rsquo;s Access Log Service (ALS) shows traffic between services (sidecar-to-sidecar), but not metrics on communication between the Envoy sidecar and the service it proxies. Without that information, it is more difficult to understand the impact of the sidecar. There is a lack of data from transport layer (OSI Layer 4) communication. Since services generally use application layer (OSI Layer 7) protocols such as HTTP, observability data is generally restricted to application layer communication. However, the root cause may actually be in the transport layer, which is typically opaque to observability tools.  Access to metrics from Envoy-to-service and transport layer communication can make it easier to diagnose service issues. To this end, SkyWalking needs to collect and analyze transport layer metrics between processes inside Kubernetes pods - a task well suited to eBPF. We investigated using eBPF for this purpose and present our results and a demo below.\nMonitoring Kubernetes Networks with eBPF With its origins as the Extended Berkeley Packet Filter, eBPF is a general purpose mechanism for injecting and running your own code into the Linux kernel and is an excellent tool for monitoring network traffic in Kubernetes Pods. In the next few sections, we'll provide an overview of how to use eBPF for network monitoring as background for introducing Skywalking Rover, a metrics collector and profiler powered by eBPF to diagnose CPU and network performance.\nHow Applications and the Network Interact Interactions between the application and the network can generally be divided into the following steps from higher to lower levels of abstraction:\n User Code: Application code uses high-level network libraries in the application stack to exchange data across the network, like sending and receiving HTTP requests. Network Library: When the network library receives a network request, it interacts with the language API to send the network data. Language API: Each language provides an API for operating the network, system, etc. When a request is received, it interacts with the system API. In Linux, this API is called syscalls. Linux API: When the Linux kernel receives the request through the API, it communicates with the socket to send the data, which is usually closer to an OSI Layer 4 protocol, such as TCP, UDP, etc. Socket Ops: Sending or receiving the data to/from the NIC.  Our hypothesis is that eBPF can monitor the network. There are two ways to implement the interception: User space (uprobe) or Kernel space (kprobe). The table below summarizes the differences.\n    Pros Cons     uprobe •\tGet more application-related contexts, such as whether the current request is HTTP or HTTPS.•\tRequests and responses can be intercepted by a single method •\tData structures can be unstable, so it is more difficult to get the desired data.  •\tImplementation may differ between language/library versions.  •\tDoes not work in applications without symbol tables.   kprobe •\tAvailable for all languages.  •\tThe data structure and methods are stable and do not require much adaptation.  •\tEasier correlation with underlying data, such as getting the destination address of TCP, OSI Layer 4 protocol metrics, etc. •\tA single request and response may be split into multiple probes.  •\tContextual information is not easy to get for stateful requests. For example header compression in HTTP/2.    For the general network performance monitor, we chose to use the kprobe (intercept the syscalls) for the following reasons:\n It\u0026rsquo;s available for applications written in any programming language, and it\u0026rsquo;s stable, so it saves a lot of development/adaptation costs. It can be correlated with metrics from the system level, which makes it easier to troubleshoot. As a single request and response are split into multiple probes, we can use technology to correlate them. For contextual information, It\u0026rsquo;s usually used in OSI Layer 7 protocol network analysis. So, if we just monitor the network performance, then they can be ignored.  Kprobes and network monitoring Following the network syscalls of Linux documentation, we can implement network monitoring by intercepting two types of methods: socket operations and send/receive methods.\nSocket Operations When accepting or connecting with another socket, we can get the following information:\n Connection information: Includes the remote address from the connection which helps us to understand which pod is connected. Connection statics: Includes basic metrics from sockets, such as round-trip time (RTT), lost packet count in TCP, etc. Socket and file descriptor (FD) mapping: Includes the relationship between the Linux file descriptor and socket object. It is useful when sending and receiving data through a Linux file descriptor.  Send/Receive The interface related to sending or receiving data is the focus of performance analysis. It mainly contains the following parameters:\n Socket file descriptor: The file descriptor of the current operation corresponding to the socket. Buffer: The data sent or received, passed as a byte array.  Based on the above parameters, we can analyze the following data:\n Bytes: The size of the packet in bytes. Protocol: The protocol analysis according to the buffer data, such as HTTP, MySQL, etc. Execution Time: The time it takes to send/receive the data.  At this point (Figure 1) we can analyze the following steps for the whole lifecycle of the connection:\n Connect/Accept: When the connection is created. Transform: Sending and receiving data on the connection. Close: When the connection is closed.  Figure 1\nProtocol and TLS The previous section described how to analyze connections using send or receive buffer data. For example, following the HTTP/1.1 message specification to analyze the connection. However, this does not work for TLS requests/responses.\nFigure 2\nWhen TLS is in use, the Linux Kernel transmits data encrypted in user space. In the figure above, The application usually transmits SSL data through a third-party library (such as OpenSSL). For this case, the Linux API can only get the encrypted data, so it cannot recognize any higher layer protocol. To decrypt inside eBPF, we need to follow these steps:\n Read unencrypted data through uprobe: Compatible multiple languages, using uprobe to capture the data that is not encrypted before sending or after receiving. In this way, we can get the original data and associate it with the socket. Associate with socket: We can associate unencrypted data with the socket.  OpenSSL Use case For example, the most common way to send/receive SSL data is to use OpenSSL as a shared library, specifically the SSL_read and SSL_write methods to submit the buffer data with the socket.\nFollowing the documentation, we can intercept these two methods, which are almost identical to the API in Linux. The source code of the SSL structure in OpenSSL shows that the Socket FD exists in the BIO object of the SSL structure, and we can get it by the offset.\nIn summary, with knowledge of how OpenSSL works, we can read unencrypted data in an eBPF function.\nIntroducing SkyWalking Rover, an eBPF-based Metrics Collector and Profiler SkyWalking Rover introduces the eBPF network profiling feature into the SkyWalking ecosystem. It\u0026rsquo;s currently supported in a Kubernetes environment, so must be deployed inside a Kubernetes cluster. Once the deployment is complete, SkyWalking Rover can monitor the network for all processes inside a given Pod. Based on the monitoring data, SkyWalking can generate the topology relationship diagram and metrics between processes.\nTopology Diagram The topology diagram can help us understand the network access between processes inside the same Pod, and between the process and external environment (other Pod or service). Additionally, it can identify the data direction of traffic based on the line flow direction.\nIn Figure 3 below, all nodes within the hexagon are the internal process of a Pod, and nodes outside the hexagon are externally associated services or Pods. Nodes are connected by lines, which indicate the direction of requests or responses between nodes (client or server). The protocol is indicated on the line, and it\u0026rsquo;s either HTTP(S), TCP, or TCP(TLS). Also, we can see in this figure that the line between Envoy and Python applications is bidirectional because Envoy intercepts all application traffic.\nFigure 3\nMetrics Once we recognize the network call relationship between processes through the topology, we can select a specific line and view the TCP metrics between the two processes.\nThe diagram below (Figure 4) shows the metrics of network monitoring between two processes. There are four metrics in each line. Two on the left side are on the client side, and two on the right side are on the server side. If the remote process is not in the same Pod, only one side of the metrics is displayed.\nFigure 4\nThe following two metric types are available:\n Counter: Records the total number of data in a certain period. Each counter contains the following data: a. Count: Execution count. b. Bytes: Packet size in bytes. c. Execution time: Execution duration. Histogram: Records the distribution of data in the buckets.  Based on the above data types, the following metrics are exposed:\n   Name Type Unit Description     Write Counter and histogram Millisecond The socket write counter.   Read Counter and histogram Millisecond The socket read counter.   Write RTT Counter and histogram Microsecond The socket write round trip time (RTT) counter.   Connect Counter and histogram Millisecond The socket connect/accept with another server/client counter.   Close Counter and histogram Millisecond The socket with other socket counter.   Retransmit Counter Millisecond The socket retransmit package counter.   Drop Counter Millisecond The socket drop package counter.    Demo In this section, we demonstrate how to perform network profiling in the service mesh. To follow along, you will need a running Kubernetes environment.\nNOTE: All commands and scripts are available in this GitHub repository.\nInstall Istio Istio is the most widely deployed service mesh, and comes with a complete demo application that we can use for testing. To install Istio and the demo application, follow these steps:\n Install Istio using the demo configuration profile. Label the default namespace, so Istio automatically injects Envoy sidecar proxies when we\u0026rsquo;ll deploy the application. Deploy the bookinfo application to the cluster. Deploy the traffic generator to generate some traffic to the application.  export ISTIO_VERSION=1.13.1 # install istio istioctl install -y --set profile=demo kubectl label namespace default istio-injection=enabled # deploy the bookinfo applications kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/platform/kube/bookinfo.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/bookinfo-gateway.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/destination-rule-all.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/virtual-service-all-v1.yaml # generate traffic kubectl apply -f https://raw.githubusercontent.com/mrproliu/skywalking-network-profiling-demo/main/resources/traffic-generator.yaml Install SkyWalking The following will install the storage, backend, and UI needed for SkyWalking:\ngit clone https://github.com/apache/skywalking-kubernetes.git cd skywalking-kubernetes cd chart helm dep up skywalking helm -n istio-system install skywalking skywalking \\  --set fullnameOverride=skywalking \\  --set elasticsearch.minimumMasterNodes=1 \\  --set elasticsearch.imageTag=7.5.1 \\  --set oap.replicas=1 \\  --set ui.image.repository=apache/skywalking-ui \\  --set ui.image.tag=9.2.0 \\  --set oap.image.tag=9.2.0 \\  --set oap.envoy.als.enabled=true \\  --set oap.image.repository=apache/skywalking-oap-server \\  --set oap.storageType=elasticsearch \\  --set oap.env.SW_METER_ANALYZER_ACTIVE_FILES=\u0026#39;network-profiling\u0026#39; Install SkyWalking Rover SkyWalking Rover is deployed on every node in Kubernetes, and it automatically detects the services in the Kubernetes cluster. The network profiling feature has been released in the version 0.3.0 of SkyWalking Rover. When a network monitoring task is created, the SkyWalking rover sends the data to the SkyWalking backend.\nkubectl apply -f https://raw.githubusercontent.com/mrproliu/skywalking-network-profiling-demo/main/resources/skywalking-rover.yaml Start the Network Profiling Task Once all deployments are completed, we must create a network profiling task for a specific instance of the service in the SkyWalking UI.\nTo open SkyWalking UI, run:\nkubectl port-forward svc/skywalking-ui 8080:80 --namespace istio-system Currently, we can select the specific instances that we wish to monitor by clicking the Data Plane item in the Service Mesh panel and the Service item in the Kubernetes panel.\nIn the figure below, we have selected an instance with a list of tasks in the network profiling tab. When we click the start button, the SkyWalking Rover starts monitoring this instance\u0026rsquo;s network.\nFigure 5\nDone! After a few seconds, you will see the process topology appear on the right side of the page.\nFigure 6\nWhen you click on the line between processes, you can see the TCP metrics between the two processes.\nFigure 7\nConclusion In this article, we detailed a problem that makes troubleshooting service mesh architectures difficult: lack of context between layers in the network stack. These are the cases when eBPF begins to really help with debugging/productivity when existing service mesh/envoy cannot. Then, we researched how eBPF could be applied to common communication, such as TLS. Finally, we demo the implementation of this process with SkyWalking Rover.\nFor now, we have completed the performance analysis for OSI layer 4 (mostly TCP). In the future, we will also introduce the analysis for OSI layer 7 protocols like HTTP.\n","excerpt":"Diagnose Service Mesh Network Performance with eBPF Background This article will show how to use …","ref":"/docs/main/v9.6.0/en/academy/diagnose-service-mesh-network-performance-with-ebpf/","title":"Diagnose Service Mesh Network Performance with eBPF"},{"body":"Diagnose Service Mesh Network Performance with eBPF Background This article will show how to use Apache SkyWalking with eBPF to make network troubleshooting easier in a service mesh environment.\nApache SkyWalking is an application performance monitor tool for distributed systems. It observes metrics, logs, traces, and events in the service mesh environment and uses that data to generate a dependency graph of your pods and services. This dependency graph can provide quick insights into your system, especially when there\u0026rsquo;s an issue.\nHowever, when troubleshooting network issues in SkyWalking\u0026rsquo;s service topology, it is not always easy to pinpoint where the error actually is. There are two reasons for the difficulty:\n Traffic through the Envoy sidecar is not easy to observe. Data from Envoy\u0026rsquo;s Access Log Service (ALS) shows traffic between services (sidecar-to-sidecar), but not metrics on communication between the Envoy sidecar and the service it proxies. Without that information, it is more difficult to understand the impact of the sidecar. There is a lack of data from transport layer (OSI Layer 4) communication. Since services generally use application layer (OSI Layer 7) protocols such as HTTP, observability data is generally restricted to application layer communication. However, the root cause may actually be in the transport layer, which is typically opaque to observability tools.  Access to metrics from Envoy-to-service and transport layer communication can make it easier to diagnose service issues. To this end, SkyWalking needs to collect and analyze transport layer metrics between processes inside Kubernetes pods - a task well suited to eBPF. We investigated using eBPF for this purpose and present our results and a demo below.\nMonitoring Kubernetes Networks with eBPF With its origins as the Extended Berkeley Packet Filter, eBPF is a general purpose mechanism for injecting and running your own code into the Linux kernel and is an excellent tool for monitoring network traffic in Kubernetes Pods. In the next few sections, we'll provide an overview of how to use eBPF for network monitoring as background for introducing Skywalking Rover, a metrics collector and profiler powered by eBPF to diagnose CPU and network performance.\nHow Applications and the Network Interact Interactions between the application and the network can generally be divided into the following steps from higher to lower levels of abstraction:\n User Code: Application code uses high-level network libraries in the application stack to exchange data across the network, like sending and receiving HTTP requests. Network Library: When the network library receives a network request, it interacts with the language API to send the network data. Language API: Each language provides an API for operating the network, system, etc. When a request is received, it interacts with the system API. In Linux, this API is called syscalls. Linux API: When the Linux kernel receives the request through the API, it communicates with the socket to send the data, which is usually closer to an OSI Layer 4 protocol, such as TCP, UDP, etc. Socket Ops: Sending or receiving the data to/from the NIC.  Our hypothesis is that eBPF can monitor the network. There are two ways to implement the interception: User space (uprobe) or Kernel space (kprobe). The table below summarizes the differences.\n    Pros Cons     uprobe •\tGet more application-related contexts, such as whether the current request is HTTP or HTTPS.•\tRequests and responses can be intercepted by a single method •\tData structures can be unstable, so it is more difficult to get the desired data.  •\tImplementation may differ between language/library versions.  •\tDoes not work in applications without symbol tables.   kprobe •\tAvailable for all languages.  •\tThe data structure and methods are stable and do not require much adaptation.  •\tEasier correlation with underlying data, such as getting the destination address of TCP, OSI Layer 4 protocol metrics, etc. •\tA single request and response may be split into multiple probes.  •\tContextual information is not easy to get for stateful requests. For example header compression in HTTP/2.    For the general network performance monitor, we chose to use the kprobe (intercept the syscalls) for the following reasons:\n It\u0026rsquo;s available for applications written in any programming language, and it\u0026rsquo;s stable, so it saves a lot of development/adaptation costs. It can be correlated with metrics from the system level, which makes it easier to troubleshoot. As a single request and response are split into multiple probes, we can use technology to correlate them. For contextual information, It\u0026rsquo;s usually used in OSI Layer 7 protocol network analysis. So, if we just monitor the network performance, then they can be ignored.  Kprobes and network monitoring Following the network syscalls of Linux documentation, we can implement network monitoring by intercepting two types of methods: socket operations and send/receive methods.\nSocket Operations When accepting or connecting with another socket, we can get the following information:\n Connection information: Includes the remote address from the connection which helps us to understand which pod is connected. Connection statics: Includes basic metrics from sockets, such as round-trip time (RTT), lost packet count in TCP, etc. Socket and file descriptor (FD) mapping: Includes the relationship between the Linux file descriptor and socket object. It is useful when sending and receiving data through a Linux file descriptor.  Send/Receive The interface related to sending or receiving data is the focus of performance analysis. It mainly contains the following parameters:\n Socket file descriptor: The file descriptor of the current operation corresponding to the socket. Buffer: The data sent or received, passed as a byte array.  Based on the above parameters, we can analyze the following data:\n Bytes: The size of the packet in bytes. Protocol: The protocol analysis according to the buffer data, such as HTTP, MySQL, etc. Execution Time: The time it takes to send/receive the data.  At this point (Figure 1) we can analyze the following steps for the whole lifecycle of the connection:\n Connect/Accept: When the connection is created. Transform: Sending and receiving data on the connection. Close: When the connection is closed.  Figure 1\nProtocol and TLS The previous section described how to analyze connections using send or receive buffer data. For example, following the HTTP/1.1 message specification to analyze the connection. However, this does not work for TLS requests/responses.\nFigure 2\nWhen TLS is in use, the Linux Kernel transmits data encrypted in user space. In the figure above, The application usually transmits SSL data through a third-party library (such as OpenSSL). For this case, the Linux API can only get the encrypted data, so it cannot recognize any higher layer protocol. To decrypt inside eBPF, we need to follow these steps:\n Read unencrypted data through uprobe: Compatible multiple languages, using uprobe to capture the data that is not encrypted before sending or after receiving. In this way, we can get the original data and associate it with the socket. Associate with socket: We can associate unencrypted data with the socket.  OpenSSL Use case For example, the most common way to send/receive SSL data is to use OpenSSL as a shared library, specifically the SSL_read and SSL_write methods to submit the buffer data with the socket.\nFollowing the documentation, we can intercept these two methods, which are almost identical to the API in Linux. The source code of the SSL structure in OpenSSL shows that the Socket FD exists in the BIO object of the SSL structure, and we can get it by the offset.\nIn summary, with knowledge of how OpenSSL works, we can read unencrypted data in an eBPF function.\nIntroducing SkyWalking Rover, an eBPF-based Metrics Collector and Profiler SkyWalking Rover introduces the eBPF network profiling feature into the SkyWalking ecosystem. It\u0026rsquo;s currently supported in a Kubernetes environment, so must be deployed inside a Kubernetes cluster. Once the deployment is complete, SkyWalking Rover can monitor the network for all processes inside a given Pod. Based on the monitoring data, SkyWalking can generate the topology relationship diagram and metrics between processes.\nTopology Diagram The topology diagram can help us understand the network access between processes inside the same Pod, and between the process and external environment (other Pod or service). Additionally, it can identify the data direction of traffic based on the line flow direction.\nIn Figure 3 below, all nodes within the hexagon are the internal process of a Pod, and nodes outside the hexagon are externally associated services or Pods. Nodes are connected by lines, which indicate the direction of requests or responses between nodes (client or server). The protocol is indicated on the line, and it\u0026rsquo;s either HTTP(S), TCP, or TCP(TLS). Also, we can see in this figure that the line between Envoy and Python applications is bidirectional because Envoy intercepts all application traffic.\nFigure 3\nMetrics Once we recognize the network call relationship between processes through the topology, we can select a specific line and view the TCP metrics between the two processes.\nThe diagram below (Figure 4) shows the metrics of network monitoring between two processes. There are four metrics in each line. Two on the left side are on the client side, and two on the right side are on the server side. If the remote process is not in the same Pod, only one side of the metrics is displayed.\nFigure 4\nThe following two metric types are available:\n Counter: Records the total number of data in a certain period. Each counter contains the following data: a. Count: Execution count. b. Bytes: Packet size in bytes. c. Execution time: Execution duration. Histogram: Records the distribution of data in the buckets.  Based on the above data types, the following metrics are exposed:\n   Name Type Unit Description     Write Counter and histogram Millisecond The socket write counter.   Read Counter and histogram Millisecond The socket read counter.   Write RTT Counter and histogram Microsecond The socket write round trip time (RTT) counter.   Connect Counter and histogram Millisecond The socket connect/accept with another server/client counter.   Close Counter and histogram Millisecond The socket with other socket counter.   Retransmit Counter Millisecond The socket retransmit package counter.   Drop Counter Millisecond The socket drop package counter.    Demo In this section, we demonstrate how to perform network profiling in the service mesh. To follow along, you will need a running Kubernetes environment.\nNOTE: All commands and scripts are available in this GitHub repository.\nInstall Istio Istio is the most widely deployed service mesh, and comes with a complete demo application that we can use for testing. To install Istio and the demo application, follow these steps:\n Install Istio using the demo configuration profile. Label the default namespace, so Istio automatically injects Envoy sidecar proxies when we\u0026rsquo;ll deploy the application. Deploy the bookinfo application to the cluster. Deploy the traffic generator to generate some traffic to the application.  export ISTIO_VERSION=1.13.1 # install istio istioctl install -y --set profile=demo kubectl label namespace default istio-injection=enabled # deploy the bookinfo applications kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/platform/kube/bookinfo.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/bookinfo-gateway.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/destination-rule-all.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/virtual-service-all-v1.yaml # generate traffic kubectl apply -f https://raw.githubusercontent.com/mrproliu/skywalking-network-profiling-demo/main/resources/traffic-generator.yaml Install SkyWalking The following will install the storage, backend, and UI needed for SkyWalking:\ngit clone https://github.com/apache/skywalking-helm.git cd skywalking-helm cd chart helm dep up skywalking helm -n istio-system install skywalking skywalking \\  --set fullnameOverride=skywalking \\  --set elasticsearch.minimumMasterNodes=1 \\  --set elasticsearch.imageTag=7.5.1 \\  --set oap.replicas=1 \\  --set ui.image.repository=apache/skywalking-ui \\  --set ui.image.tag=9.2.0 \\  --set oap.image.tag=9.2.0 \\  --set oap.envoy.als.enabled=true \\  --set oap.image.repository=apache/skywalking-oap-server \\  --set oap.storageType=elasticsearch \\  --set oap.env.SW_METER_ANALYZER_ACTIVE_FILES=\u0026#39;network-profiling\u0026#39; Install SkyWalking Rover SkyWalking Rover is deployed on every node in Kubernetes, and it automatically detects the services in the Kubernetes cluster. The network profiling feature has been released in the version 0.3.0 of SkyWalking Rover. When a network monitoring task is created, the SkyWalking rover sends the data to the SkyWalking backend.\nkubectl apply -f https://raw.githubusercontent.com/mrproliu/skywalking-network-profiling-demo/main/resources/skywalking-rover.yaml Start the Network Profiling Task Once all deployments are completed, we must create a network profiling task for a specific instance of the service in the SkyWalking UI.\nTo open SkyWalking UI, run:\nkubectl port-forward svc/skywalking-ui 8080:80 --namespace istio-system Currently, we can select the specific instances that we wish to monitor by clicking the Data Plane item in the Service Mesh panel and the Service item in the Kubernetes panel.\nIn the figure below, we have selected an instance with a list of tasks in the network profiling tab. When we click the start button, the SkyWalking Rover starts monitoring this instance\u0026rsquo;s network.\nFigure 5\nDone! After a few seconds, you will see the process topology appear on the right side of the page.\nFigure 6\nWhen you click on the line between processes, you can see the TCP metrics between the two processes.\nFigure 7\nConclusion In this article, we detailed a problem that makes troubleshooting service mesh architectures difficult: lack of context between layers in the network stack. These are the cases when eBPF begins to really help with debugging/productivity when existing service mesh/envoy cannot. Then, we researched how eBPF could be applied to common communication, such as TLS. Finally, we demo the implementation of this process with SkyWalking Rover.\nFor now, we have completed the performance analysis for OSI layer 4 (mostly TCP). In the future, we will also introduce the analysis for OSI layer 7 protocols like HTTP.\n","excerpt":"Diagnose Service Mesh Network Performance with eBPF Background This article will show how to use …","ref":"/docs/main/v9.7.0/en/academy/diagnose-service-mesh-network-performance-with-ebpf/","title":"Diagnose Service Mesh Network Performance with eBPF"},{"body":"Disable plugins Delete or remove the specific libraries / jars in skywalking-agent/plugins/*.jar\n+-- skywalking-agent +-- activations apm-toolkit-log4j-1.x-activation.jar apm-toolkit-log4j-2.x-activation.jar apm-toolkit-logback-1.x-activation.jar ... +-- config agent.config +-- plugins apm-dubbo-plugin.jar apm-feign-default-http-9.x.jar apm-httpClient-4.x-plugin.jar ..... skywalking-agent.jar ","excerpt":"Disable plugins Delete or remove the specific libraries / jars in skywalking-agent/plugins/*.jar\n+-- …","ref":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/how-to-disable-plugin/","title":"Disable plugins"},{"body":"Disable plugins Delete or remove the specific libraries / jars in skywalking-agent/plugins/*.jar\n+-- skywalking-agent +-- activations apm-toolkit-log4j-1.x-activation.jar apm-toolkit-log4j-2.x-activation.jar apm-toolkit-logback-1.x-activation.jar ... +-- config agent.config +-- plugins apm-dubbo-plugin.jar apm-feign-default-http-9.x.jar apm-httpClient-4.x-plugin.jar ..... skywalking-agent.jar ","excerpt":"Disable plugins Delete or remove the specific libraries / jars in skywalking-agent/plugins/*.jar\n+-- …","ref":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/how-to-disable-plugin/","title":"Disable plugins"},{"body":"Disable plugins Delete or remove the specific libraries / jars in skywalking-agent/plugins/*.jar\n+-- skywalking-agent +-- activations apm-toolkit-log4j-1.x-activation.jar apm-toolkit-log4j-2.x-activation.jar apm-toolkit-logback-1.x-activation.jar ... +-- config agent.config +-- plugins apm-dubbo-plugin.jar apm-feign-default-http-9.x.jar apm-httpClient-4.x-plugin.jar ..... skywalking-agent.jar ","excerpt":"Disable plugins Delete or remove the specific libraries / jars in skywalking-agent/plugins/*.jar\n+-- …","ref":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/how-to-disable-plugin/","title":"Disable plugins"},{"body":"Disable plugins Delete or remove the specific libraries / jars in skywalking-agent/plugins/*.jar\n+-- skywalking-agent +-- activations apm-toolkit-log4j-1.x-activation.jar apm-toolkit-log4j-2.x-activation.jar apm-toolkit-logback-1.x-activation.jar ... +-- config agent.config +-- plugins apm-dubbo-plugin.jar apm-feign-default-http-9.x.jar apm-httpClient-4.x-plugin.jar ..... skywalking-agent.jar ","excerpt":"Disable plugins Delete or remove the specific libraries / jars in skywalking-agent/plugins/*.jar\n+-- …","ref":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/how-to-disable-plugin/","title":"Disable plugins"},{"body":"Disable plugins Delete or remove the specific libraries / jars in skywalking-agent/plugins/*.jar\n+-- skywalking-agent +-- activations apm-toolkit-log4j-1.x-activation.jar apm-toolkit-log4j-2.x-activation.jar apm-toolkit-logback-1.x-activation.jar ... +-- config agent.config +-- plugins apm-dubbo-plugin.jar apm-feign-default-http-9.x.jar apm-httpClient-4.x-plugin.jar ..... skywalking-agent.jar ","excerpt":"Disable plugins Delete or remove the specific libraries / jars in skywalking-agent/plugins/*.jar\n+-- …","ref":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/how-to-disable-plugin/","title":"Disable plugins"},{"body":"","excerpt":"","ref":"/docs/","title":"Documentation"},{"body":"Download Use the links below to download Apache SkyWalking releases from one of our mirrors. Don’t forget to verify the files downloaded. Please note that only source code releases are official Apache releases, binary distributions are just for end user convenience. Foundations  Agents  Operation  Database  Tools  Archived Releases  Docker images   Foundations SkyWalking APM    SkyWalking is an Observability Analysis Platform and Application Performance Management system.\n Source   v9.7.0 | Nov. 28th, 2023 [src] [asc] [sha512]  v9.6.0 | Sep. 4th, 2023 [src] [asc] [sha512]  v9.5.0 | Jun. 16th, 2023 [src] [asc] [sha512]  v9.4.0 | Mar. 12th, 2023 [src] [asc] [sha512]  v9.3.0 | Dec. 4th, 2022 [src] [asc] [sha512]  v9.2.0 | Sep. 2nd, 2022 [src] [asc] [sha512]  v9.1.0 | Jun. 11th, 2022 [src] [asc] [sha512]  v9.0.0 | Apr. 9th, 2022 [src] [asc] [sha512]      Distribution   v9.7.0 | Nov. 28th, 2023 [tar] [asc] [sha512]  v9.6.0 | Sep. 4th, 2023 [tar] [asc] [sha512]  v9.5.0 | Jun. 16th, 2023 [tar] [asc] [sha512]  v9.4.0 | Mar. 12th, 2023 [tar] [asc] [sha512]  v9.3.0 | Dec. 4th, 2022 [tar] [asc] [sha512]  v9.2.0 | Sep. 2nd, 2022 [tar] [asc] [sha512]  v9.1.0 | Jun. 10th, 2022 [tar] [asc] [sha512]  v9.0.0 | Apr. 9th, 2022 [tar] [asc] [sha512]        Booster UI    SkyWalking\u0026#39;s primary UI. All source codes have been included in the main repo release.\n Included in the main repo release     Grafana Plugins    SkyWalking Grafana Plugins provide extensions to visualize topology on Grafana.\n Source   0.1.0 | Sep. 12th, 2023 [src] [asc] [sha512]      Distribution   0.1.0 | Sep. 12th, 2023 [Install via Grafana Plugins]        SkyWalking Website    All source codes of https://skywalking.apache.org\n Deployed       Agents Java Agent    The Java Agent for Apache SkyWalking, which provides the native tracing/metrics/logging/event/profiling abilities for Java projects.\n Source   9.2.0 | Apr. 1st, 2024 [src] [asc] [sha512]  9.1.0 | Dec. 4th, 2023 [src] [asc] [sha512]  9.0.0 | Aug. 31st, 2023 [src] [asc] [sha512]      Distribution   v9.2.0 | Apr. 1st, 2024 [tar] [asc] [sha512]  v9.1.0 | Dec. 4th, 2023 [tar] [asc] [sha512]  v9.0.0 | Aug. 31st, 2023 [tar] [asc] [sha512]        Python Agent    The Python Agent for Apache SkyWalking, which provides the native tracing/metrics/logging/profiling abilities for Python projects.\n Source   v1.0.1 | Apr. 29th, 2023 [src] [asc] [sha512]      Distribution   v1.0.1 | Apr. 29th, 2023 [Install via pip]        Go Agent    The Go Agent for Apache SkyWalking, which provides the native tracing/metrics/logging abilities for Golang projects.\n Source   v0.4.0 | Feb. 27th, 2024 [src] [asc] [sha512]      Distribution   v0.4.0 | Feb. 27th, 2024 [tar] [asc] [sha512]        NodeJS Agent    The NodeJS Agent for Apache SkyWalking, which provides the native tracing abilities for NodeJS projects.\n Source   v0.7.0 | Nov. 8th, 2023 [src] [asc] [sha512]      Distribution   v0.7.0 | Nov. 8th, 2023 [Install via npm]        SkyWalking Rust    The Rust Agent for Apache SkyWalking, which provides the native tracing/metrics/logging abilities for Rust projects.\n Source   v0.8.0 | Aug. 2nd, 2023 [src] [asc] [sha512]      Distribution   v0.8.0 | Aug. 2nd, 2023 [Install via crates.io]        SkyWalking PHP    The PHP Agent for Apache SkyWalking, which provides the native tracing abilities for PHP projects.\n Source   v0.7.0 | Sep. 28th, 2023 [tar] [asc] [sha512]      Distribution   v0.7.0 | Sep. 28th, 2023 [Install via PECL]        Client JavaScript    Apache SkyWalking Client-side JavaScript exception and tracing library.\n Source   v0.11.0 | Mar. 18th, 2024 [src] [asc] [sha512]      Distribution   v0.11.0 | Mar. 18th, 2024 [Install via npm]        Nginx LUA Agent    SkyWalking Nginx Agent provides the native tracing capability for Nginx powered by Nginx LUA module.\n Source   v0.6.0 | Dec. 25th, 2021 [src] [asc] [sha512]      Distribution   v0.6.0 | Dec. 25th, 2021 [Install via luarocks]        Kong Agent    SkyWalking Kong Agent provides the native tracing capability.\n Source   v0.2.0 | Jan. 9th, 2022 [src] [asc] [sha512]      Distribution   v0.2.0 | Jan. 9th, 2022 [Install via luarocks]        SkyWalking Satellite    A lightweight collector/sidecar could be deployed closing to the target monitored system, to collect metrics, traces, and logs.\n Source   v1.2.0 | Jun. 25th, 2023 [src] [asc] [sha512]      Distribution   v1.2.0 | Jun. 25th, 2023 [tar] [asc] [sha512]        Kubernetes Event Exporter    Watch, filter, and send Kubernetes events into Apache SkyWalking.\n Source   v1.0.0 | Apr. 25th, 2022 [src] [asc] [sha512]      Distribution   v1.0.0 | Apr. 25th, 2022 [tar] [asc] [sha512]        SkyWalking Rover    Metrics collector and profiler powered by eBPF to diagnose CPU and network performance.\n Source   v0.6.0 | Mar. 31th, 2024 [src] [asc] [sha512]      Distribution   v0.6.0 | Mar. 31th, 2024 [tar] [asc] [sha512]          Operation SkyWalking CLI    SkyWalking CLI is a command interaction tool for the SkyWalking user or OPS team.\n Source   v0.13.0 | Dec. 4th, 2023 [src] [asc] [sha512]      Distribution   v0.13.0 | Dec. 4th, 2023 [tar] [asc] [sha512]        Kubernetes Helm    SkyWalking Kubernetes Helm repository provides ways to install and configure SkyWalking in a Kubernetes cluster. The scripts are written in Helm 3.\n Source   v4.5.0 | Jul. 16th, 2023 [src] [asc] [sha512]         SkyWalking Cloud on Kubernetes    A bridge project between Apache SkyWalking and Kubernetes.\n Source   v0.9.0 | Mar. 4th, 2024 [src] [asc] [sha512]      Distribution   v0.9.0 | Mar. 4th, 2024 [tar] [asc] [sha512]          Database BanyanDB Server(BanyanD)    The BanyanDB Server\n Source   v0.5.0 | Oct 23th, 2023 [src] [asc] [sha512]      Distribution   v0.5.0 | Oct 23th, 2023 [tar] [asc] [sha512]        BanyanDB Java Client    The client implementation for SkyWalking BanyanDB in Java\n Source   v0.5.0 | Sep. 28th, 2023 [src] [asc] [sha512]      Distribution   v0.5.0 | Sep. 18th, 2023 [Install via maven]        BanyanDB Helm    BanyanDB Helm repository provides ways to install and configure BanyanDB. The scripts are written in Helm 3.\n Source   v0.1.0 | Sep. 25th, 2023 [src] [asc] [sha512]           Support tools for development and testing SkyWalking Eyes    A full-featured license tool to check and fix license headers and resolve dependencies\u0026#39; licenses.\n Source   v0.6.0 | Apr. 12th, 2024 [src] [asc] [sha512]      Distribution   v0.6.0 | Apr. 12th, 2024 [tar] [asc] [sha512]        SkyWalking Infra E2E    An End-to-End Testing framework that aims to help developers to set up, debug, and verify E2E tests with ease.\n Source   v1.3.0 | Nov. 13th, 2023 [src] [asc] [sha512]      Distribution   v1.3.0 | Nov. 13th, 2023 [tar] [asc] [sha512]          Archived Releases    Older releases are not recommended for new users, because they are not maintained, but you still can find them(source codes and binaries) if you have specific reasons. Find all SkyWalking releases in the Archive repository.  Archive incubating repository hosts older releases when SkyWalking was an incubator project.  Docker Images for convenience SkyWalking OAP Server    This image would start up SkyWalking OAP server only.\nDocker Image     SkyWalking UI Image    This image would start up SkyWalking UI only.\nDocker Image     SkyWalking Kubernetes Helm    SkyWalking Kubernetes Helm repository provides ways to install and configure SkyWalking in a Kubernetes cluster. The scripts are written in Helm 3.\nDocker Image     SkyWalking Cloud on Kubernetes    A platform for the SkyWalking user, provisions, upgrades, maintains SkyWalking relevant components, and makes them work natively on Kubernetes.\nDocker Image     SkyWalking Java Agent    The Docker image for Java users to conveniently use SkyWalking agent in containerized scenario.\nDocker Image     SkyWalking Python Agent    The Docker image for Python users to conveniently use SkyWalking agent in containerized scenario.\nDocker Image     SkyWalking Satellite    A lightweight collector/sidecar could be deployed closing to the target monitored system, to collect metrics, traces, and logs.\nDocker Image     SkyWalking CLI    SkyWalking CLI is a command interaction tool for the SkyWalking user or OPS team.\nDocker Image     SkyWalking Eyes Image    A full-featured license tool to check and fix license headers and resolve dependencies\u0026#39; licenses.\nDocker Image     SkyWalking Kubernetes Event Exporter    Watch, filter, and send Kubernetes events into Apache SkyWalking backend.\nDocker Image     SkyWalking Rover    Metrics collector and ebpf-based profiler for C, C\u0026#43;\u0026#43;, Golang, and Rust.\nDocker Image         Verify the releases It is essential that you verify the integrity of the downloaded files using the PGP or SHA signatures. Please download the KEYS as well as the .asc/.sha512 signature files for relevant distribution. It is recommended to get these files from the main distribution directory and not from the mirrors.\n Verify using GPG/PGP Download PGP signatures KEYS, and the release with its .asc signature file. And then:\n# GPG verification gpg --import KEYS gpg --verify apache-skywalking-apm-***.asc apache-skywalking-apm-***   Verify using SHA512 Download the release with its .sha512 signature file. And then:\n# SHA-512 verification shasum -a 512 hadoop-X.Y.Z-src.tar.gz    ","excerpt":"Download Use the links below to download Apache SkyWalking releases from one of our mirrors. Don’t …","ref":"/downloads/","title":"Downloads"},{"body":"Dynamic Configuration SkyWalking Configurations are mostly set through application.yml and OS system environment variables.\nAt the same time, some of them support dynamic settings from an upstream management system.\nCurrently, SkyWalking supports two types of dynamic configurations: Single and Group.\nThis feature depends on upstream service, so it is DISABLED by default.\nconfiguration:selector:${SW_CONFIGURATION:none}none:grpc:host:${SW_DCS_SERVER_HOST:\u0026#34;\u0026#34;}port:${SW_DCS_SERVER_PORT:80}clusterName:${SW_DCS_CLUSTER_NAME:SkyWalking}period:${SW_DCS_PERIOD:20}# ... other implementationsSingle Configuration Single Configuration is a config key that corresponds to a specific config value. The logic structure is:\n{configKey}:{configValue} For example:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} Supported configurations are as follows:\n   Config Key Value Description Value Format Example     agent-analyzer.default.slowDBAccessThreshold Thresholds of slow Database statement. Overrides agent-analyzer/default/slowDBAccessThreshold of application.yml. default:200,mongodb:50   agent-analyzer.default.uninstrumentedGateways The uninstrumented gateways. Overrides gateways.yml. Same as gateways.yml.   alarm.default.alarm-settings The alarm settings. Overrides alarm-settings.yml. Same as alarm-settings.yml.   core.default.apdexThreshold The apdex threshold settings. Overrides service-apdex-threshold.yml. Same as service-apdex-threshold.yml.   core.default.endpoint-name-grouping The endpoint name grouping setting. Overrides endpoint-name-grouping.yml. Same as endpoint-name-grouping.yml.   core.default.log4j-xml The log4j xml configuration. Overrides log4j2.xml. Same as log4j2.xml.   core.default.searchableTracesTags The searchableTracesTags configuration. Override core/default/searchableTracesTags in the application.yml. http.method,http.status_code,rpc.status_code,db.type,db.instance,mq.queue,mq.topic,mq.broker   agent-analyzer.default.traceSamplingPolicy The sampling policy for default and service dimension, override trace-sampling-policy-settings.yml. same as trace-sampling-policy-settings.yml   configuration-discovery.default.agentConfigurations The ConfigurationDiscovery settings. See configuration-discovery.md.    Group Configuration Group Configuration is a config key corresponding to a group sub config item. A sub config item is a key-value pair. The logic structure is:\n{configKey}: |{subItemkey1}:{subItemValue1} |{subItemkey2}:{subItemValue2} |{subItemkey3}:{subItemValue3} ... For example:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} Supported configurations are as follows:\n   Config Key SubItem Key Description Value Description Value Format Example     core.default.endpoint-name-grouping-openapi The serviceName relevant to openAPI definition file. eg. serviceA. If the serviceName relevant to multiple files should add subItems for each files, and each subItem key should split serviceName and fileName with . eg. serviceA.API-file1,serviceA.API-file2 The openAPI definitions file contents(yaml format) for create endpoint name grouping rules. Same as productAPI-v2.yaml    Dynamic Configuration Implementations  Dynamic Configuration Service, DCS Zookeeper Implementation Etcd Implementation Consul Implementation Apollo Implementation Kubernetes Configmap Implementation Nacos Implementation  ","excerpt":"Dynamic Configuration SkyWalking Configurations are mostly set through application.yml and OS system …","ref":"/docs/main/latest/en/setup/backend/dynamic-config/","title":"Dynamic Configuration"},{"body":"Dynamic Configuration SkyWalking Configurations are mostly set through application.yml and OS system environment variables.\nAt the same time, some of them support dynamic settings from an upstream management system.\nCurrently, SkyWalking supports two types of dynamic configurations: Single and Group.\nThis feature depends on upstream service, so it is DISABLED by default.\nconfiguration:selector:${SW_CONFIGURATION:none}none:grpc:host:${SW_DCS_SERVER_HOST:\u0026#34;\u0026#34;}port:${SW_DCS_SERVER_PORT:80}clusterName:${SW_DCS_CLUSTER_NAME:SkyWalking}period:${SW_DCS_PERIOD:20}# ... other implementationsSingle Configuration Single Configuration is a config key that corresponds to a specific config value. The logic structure is:\n{configKey}:{configValue} For example:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} Supported configurations are as follows:\n   Config Key Value Description Value Format Example     agent-analyzer.default.slowDBAccessThreshold Thresholds of slow Database statement. Overrides agent-analyzer/default/slowDBAccessThreshold of application.yml. default:200,mongodb:50   agent-analyzer.default.uninstrumentedGateways The uninstrumented gateways. Overrides gateways.yml. Same as gateways.yml.   alarm.default.alarm-settings The alarm settings. Overrides alarm-settings.yml. Same as alarm-settings.yml.   core.default.apdexThreshold The apdex threshold settings. Overrides service-apdex-threshold.yml. Same as service-apdex-threshold.yml.   core.default.endpoint-name-grouping The endpoint name grouping setting. Overrides endpoint-name-grouping.yml. Same as endpoint-name-grouping.yml.   core.default.log4j-xml The log4j xml configuration. Overrides log4j2.xml. Same as log4j2.xml.   core.default.searchableTracesTags The searchableTracesTags configuration. Override core/default/searchableTracesTags in the application.yml. http.method,http.status_code,rpc.status_code,db.type,db.instance,mq.queue,mq.topic,mq.broker   agent-analyzer.default.traceSamplingPolicy The sampling policy for default and service dimension, override trace-sampling-policy-settings.yml. same as trace-sampling-policy-settings.yml   configuration-discovery.default.agentConfigurations The ConfigurationDiscovery settings. See configuration-discovery.md.    Group Configuration Group Configuration is a config key corresponding to a group sub config item. A sub config item is a key-value pair. The logic structure is:\n{configKey}: |{subItemkey1}:{subItemValue1} |{subItemkey2}:{subItemValue2} |{subItemkey3}:{subItemValue3} ... For example:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} Supported configurations are as follows:\n   Config Key SubItem Key Description Value Description Value Format Example     core.default.endpoint-name-grouping-openapi The serviceName relevant to openAPI definition file. eg. serviceA. If the serviceName relevant to multiple files should add subItems for each files, and each subItem key should split serviceName and fileName with . eg. serviceA.API-file1,serviceA.API-file2 The openAPI definitions file contents(yaml format) for create endpoint name grouping rules. Same as productAPI-v2.yaml    Dynamic Configuration Implementations  Dynamic Configuration Service, DCS Zookeeper Implementation Etcd Implementation Consul Implementation Apollo Implementation Kubernetes Configmap Implementation Nacos Implementation  ","excerpt":"Dynamic Configuration SkyWalking Configurations are mostly set through application.yml and OS system …","ref":"/docs/main/next/en/setup/backend/dynamic-config/","title":"Dynamic Configuration"},{"body":"Dynamic Configuration SkyWalking Configurations are mostly set through application.yml and OS system environment variables. At the same time, some of them support dynamic settings from upstream management system.\nCurrently, SkyWalking supports the 2 types of dynamic configurations: Single and Group.\nThis feature depends on upstream service, so it is DISABLED by default.\nconfiguration:selector:${SW_CONFIGURATION:none}none:grpc:host:${SW_DCS_SERVER_HOST:\u0026#34;\u0026#34;}port:${SW_DCS_SERVER_PORT:80}clusterName:${SW_DCS_CLUSTER_NAME:SkyWalking}period:${SW_DCS_PERIOD:20}# ... other implementationsSingle Configuration Single Configuration is a config key that corresponds to a specific config value. The logic structure is:\n{configKey}:{configVaule} For example:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} Supported configurations are as follows:\n   Config Key Value Description Value Format Example     agent-analyzer.default.slowDBAccessThreshold Thresholds of slow Database statement. Overrides receiver-trace/default/slowDBAccessThreshold of application.yml. default:200,mongodb:50   agent-analyzer.default.uninstrumentedGateways The uninstrumented gateways. Overrides gateways.yml. Same as gateways.yml.   alarm.default.alarm-settings The alarm settings. Overrides alarm-settings.yml. Same as alarm-settings.yml.   core.default.apdexThreshold The apdex threshold settings. Overrides service-apdex-threshold.yml. Same as service-apdex-threshold.yml.   core.default.endpoint-name-grouping The endpoint name grouping setting. Overrides endpoint-name-grouping.yml. Same as endpoint-name-grouping.yml.   core.default.log4j-xml The log4j xml configuration. Overrides log4j2.xml. Same as log4j2.xml.   agent-analyzer.default.traceSamplingPolicy The sampling policy for default and service dimension, override trace-sampling-policy-settings.yml. same as trace-sampling-policy-settings.yml   configuration-discovery.default.agentConfigurations The ConfigurationDiscovery settings. See configuration-discovery.md.    Group Configuration Group Configuration is a config key that corresponds to a group sub config items. A sub config item is a key value pair. The logic structure is:\n{configKey}: |{subItemkey1}:{subItemValue1} |{subItemkey2}:{subItemValue2} |{subItemkey3}:{subItemValue3} ... For example:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} Supported configurations are as follows:\n   Config Key SubItem Key Description Value Description Value Format Example     core.default.endpoint-name-grouping-openapi The serviceName relevant to openAPI definition file. eg. serviceA. If the serviceName relevant to multiple files should add subItems for each files, and each subItem key should split serviceName and fileName with . eg. serviceA.API-file1,serviceA.API-file2 The openAPI definitions file contents(yaml format) for create endpoint name grouping rules. Same as productAPI-v2.yaml    Dynamic Configuration Implementations  Dynamic Configuration Service, DCS Zookeeper Implementation Etcd Implementation Consul Implementation Apollo Implementation Kuberbetes Configmap Implementation Nacos Implementation  ","excerpt":"Dynamic Configuration SkyWalking Configurations are mostly set through application.yml and OS system …","ref":"/docs/main/v9.0.0/en/setup/backend/dynamic-config/","title":"Dynamic Configuration"},{"body":"Dynamic Configuration SkyWalking Configurations are mostly set through application.yml and OS system environment variables.\nAt the same time, some of them support dynamic settings from an upstream management system.\nCurrently, SkyWalking supports two types of dynamic configurations: Single and Group.\nThis feature depends on upstream service, so it is DISABLED by default.\nconfiguration:selector:${SW_CONFIGURATION:none}none:grpc:host:${SW_DCS_SERVER_HOST:\u0026#34;\u0026#34;}port:${SW_DCS_SERVER_PORT:80}clusterName:${SW_DCS_CLUSTER_NAME:SkyWalking}period:${SW_DCS_PERIOD:20}# ... other implementationsSingle Configuration Single Configuration is a config key that corresponds to a specific config value. The logic structure is:\n{configKey}:{configVaule} For example:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} Supported configurations are as follows:\n   Config Key Value Description Value Format Example     agent-analyzer.default.slowDBAccessThreshold Thresholds of slow Database statement. Overrides agent-analyzer/default/slowDBAccessThreshold of application.yml. default:200,mongodb:50   agent-analyzer.default.uninstrumentedGateways The uninstrumented gateways. Overrides gateways.yml. Same as gateways.yml.   alarm.default.alarm-settings The alarm settings. Overrides alarm-settings.yml. Same as alarm-settings.yml.   core.default.apdexThreshold The apdex threshold settings. Overrides service-apdex-threshold.yml. Same as service-apdex-threshold.yml.   core.default.endpoint-name-grouping The endpoint name grouping setting. Overrides endpoint-name-grouping.yml. Same as endpoint-name-grouping.yml.   core.default.log4j-xml The log4j xml configuration. Overrides log4j2.xml. Same as log4j2.xml.   agent-analyzer.default.traceSamplingPolicy The sampling policy for default and service dimension, override trace-sampling-policy-settings.yml. same as trace-sampling-policy-settings.yml   configuration-discovery.default.agentConfigurations The ConfigurationDiscovery settings. See configuration-discovery.md.    Group Configuration Group Configuration is a config key corresponding to a group sub config item. A sub config item is a key-value pair. The logic structure is:\n{configKey}: |{subItemkey1}:{subItemValue1} |{subItemkey2}:{subItemValue2} |{subItemkey3}:{subItemValue3} ... For example:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} Supported configurations are as follows:\n   Config Key SubItem Key Description Value Description Value Format Example     core.default.endpoint-name-grouping-openapi The serviceName relevant to openAPI definition file. eg. serviceA. If the serviceName relevant to multiple files should add subItems for each files, and each subItem key should split serviceName and fileName with . eg. serviceA.API-file1,serviceA.API-file2 The openAPI definitions file contents(yaml format) for create endpoint name grouping rules. Same as productAPI-v2.yaml    Dynamic Configuration Implementations  Dynamic Configuration Service, DCS Zookeeper Implementation Etcd Implementation Consul Implementation Apollo Implementation Kuberbetes Configmap Implementation Nacos Implementation  ","excerpt":"Dynamic Configuration SkyWalking Configurations are mostly set through application.yml and OS system …","ref":"/docs/main/v9.1.0/en/setup/backend/dynamic-config/","title":"Dynamic Configuration"},{"body":"Dynamic Configuration SkyWalking Configurations are mostly set through application.yml and OS system environment variables.\nAt the same time, some of them support dynamic settings from an upstream management system.\nCurrently, SkyWalking supports two types of dynamic configurations: Single and Group.\nThis feature depends on upstream service, so it is DISABLED by default.\nconfiguration:selector:${SW_CONFIGURATION:none}none:grpc:host:${SW_DCS_SERVER_HOST:\u0026#34;\u0026#34;}port:${SW_DCS_SERVER_PORT:80}clusterName:${SW_DCS_CLUSTER_NAME:SkyWalking}period:${SW_DCS_PERIOD:20}# ... other implementationsSingle Configuration Single Configuration is a config key that corresponds to a specific config value. The logic structure is:\n{configKey}:{configValue} For example:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} Supported configurations are as follows:\n   Config Key Value Description Value Format Example     agent-analyzer.default.slowDBAccessThreshold Thresholds of slow Database statement. Overrides agent-analyzer/default/slowDBAccessThreshold of application.yml. default:200,mongodb:50   agent-analyzer.default.uninstrumentedGateways The uninstrumented gateways. Overrides gateways.yml. Same as gateways.yml.   alarm.default.alarm-settings The alarm settings. Overrides alarm-settings.yml. Same as alarm-settings.yml.   core.default.apdexThreshold The apdex threshold settings. Overrides service-apdex-threshold.yml. Same as service-apdex-threshold.yml.   core.default.endpoint-name-grouping The endpoint name grouping setting. Overrides endpoint-name-grouping.yml. Same as endpoint-name-grouping.yml.   core.default.log4j-xml The log4j xml configuration. Overrides log4j2.xml. Same as log4j2.xml.   agent-analyzer.default.traceSamplingPolicy The sampling policy for default and service dimension, override trace-sampling-policy-settings.yml. same as trace-sampling-policy-settings.yml   configuration-discovery.default.agentConfigurations The ConfigurationDiscovery settings. See configuration-discovery.md.    Group Configuration Group Configuration is a config key corresponding to a group sub config item. A sub config item is a key-value pair. The logic structure is:\n{configKey}: |{subItemkey1}:{subItemValue1} |{subItemkey2}:{subItemValue2} |{subItemkey3}:{subItemValue3} ... For example:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} Supported configurations are as follows:\n   Config Key SubItem Key Description Value Description Value Format Example     core.default.endpoint-name-grouping-openapi The serviceName relevant to openAPI definition file. eg. serviceA. If the serviceName relevant to multiple files should add subItems for each files, and each subItem key should split serviceName and fileName with . eg. serviceA.API-file1,serviceA.API-file2 The openAPI definitions file contents(yaml format) for create endpoint name grouping rules. Same as productAPI-v2.yaml    Dynamic Configuration Implementations  Dynamic Configuration Service, DCS Zookeeper Implementation Etcd Implementation Consul Implementation Apollo Implementation Kubernetes Configmap Implementation Nacos Implementation  ","excerpt":"Dynamic Configuration SkyWalking Configurations are mostly set through application.yml and OS system …","ref":"/docs/main/v9.2.0/en/setup/backend/dynamic-config/","title":"Dynamic Configuration"},{"body":"Dynamic Configuration SkyWalking Configurations are mostly set through application.yml and OS system environment variables.\nAt the same time, some of them support dynamic settings from an upstream management system.\nCurrently, SkyWalking supports two types of dynamic configurations: Single and Group.\nThis feature depends on upstream service, so it is DISABLED by default.\nconfiguration:selector:${SW_CONFIGURATION:none}none:grpc:host:${SW_DCS_SERVER_HOST:\u0026#34;\u0026#34;}port:${SW_DCS_SERVER_PORT:80}clusterName:${SW_DCS_CLUSTER_NAME:SkyWalking}period:${SW_DCS_PERIOD:20}# ... other implementationsSingle Configuration Single Configuration is a config key that corresponds to a specific config value. The logic structure is:\n{configKey}:{configValue} For example:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} Supported configurations are as follows:\n   Config Key Value Description Value Format Example     agent-analyzer.default.slowDBAccessThreshold Thresholds of slow Database statement. Overrides agent-analyzer/default/slowDBAccessThreshold of application.yml. default:200,mongodb:50   agent-analyzer.default.uninstrumentedGateways The uninstrumented gateways. Overrides gateways.yml. Same as gateways.yml.   alarm.default.alarm-settings The alarm settings. Overrides alarm-settings.yml. Same as alarm-settings.yml.   core.default.apdexThreshold The apdex threshold settings. Overrides service-apdex-threshold.yml. Same as service-apdex-threshold.yml.   core.default.endpoint-name-grouping The endpoint name grouping setting. Overrides endpoint-name-grouping.yml. Same as endpoint-name-grouping.yml.   core.default.log4j-xml The log4j xml configuration. Overrides log4j2.xml. Same as log4j2.xml.   agent-analyzer.default.traceSamplingPolicy The sampling policy for default and service dimension, override trace-sampling-policy-settings.yml. same as trace-sampling-policy-settings.yml   configuration-discovery.default.agentConfigurations The ConfigurationDiscovery settings. See configuration-discovery.md.    Group Configuration Group Configuration is a config key corresponding to a group sub config item. A sub config item is a key-value pair. The logic structure is:\n{configKey}: |{subItemkey1}:{subItemValue1} |{subItemkey2}:{subItemValue2} |{subItemkey3}:{subItemValue3} ... For example:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} Supported configurations are as follows:\n   Config Key SubItem Key Description Value Description Value Format Example     core.default.endpoint-name-grouping-openapi The serviceName relevant to openAPI definition file. eg. serviceA. If the serviceName relevant to multiple files should add subItems for each files, and each subItem key should split serviceName and fileName with . eg. serviceA.API-file1,serviceA.API-file2 The openAPI definitions file contents(yaml format) for create endpoint name grouping rules. Same as productAPI-v2.yaml    Dynamic Configuration Implementations  Dynamic Configuration Service, DCS Zookeeper Implementation Etcd Implementation Consul Implementation Apollo Implementation Kubernetes Configmap Implementation Nacos Implementation  ","excerpt":"Dynamic Configuration SkyWalking Configurations are mostly set through application.yml and OS system …","ref":"/docs/main/v9.3.0/en/setup/backend/dynamic-config/","title":"Dynamic Configuration"},{"body":"Dynamic Configuration SkyWalking Configurations are mostly set through application.yml and OS system environment variables.\nAt the same time, some of them support dynamic settings from an upstream management system.\nCurrently, SkyWalking supports two types of dynamic configurations: Single and Group.\nThis feature depends on upstream service, so it is DISABLED by default.\nconfiguration:selector:${SW_CONFIGURATION:none}none:grpc:host:${SW_DCS_SERVER_HOST:\u0026#34;\u0026#34;}port:${SW_DCS_SERVER_PORT:80}clusterName:${SW_DCS_CLUSTER_NAME:SkyWalking}period:${SW_DCS_PERIOD:20}# ... other implementationsSingle Configuration Single Configuration is a config key that corresponds to a specific config value. The logic structure is:\n{configKey}:{configValue} For example:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} Supported configurations are as follows:\n   Config Key Value Description Value Format Example     agent-analyzer.default.slowDBAccessThreshold Thresholds of slow Database statement. Overrides agent-analyzer/default/slowDBAccessThreshold of application.yml. default:200,mongodb:50   agent-analyzer.default.uninstrumentedGateways The uninstrumented gateways. Overrides gateways.yml. Same as gateways.yml.   alarm.default.alarm-settings The alarm settings. Overrides alarm-settings.yml. Same as alarm-settings.yml.   core.default.apdexThreshold The apdex threshold settings. Overrides service-apdex-threshold.yml. Same as service-apdex-threshold.yml.   core.default.endpoint-name-grouping The endpoint name grouping setting. Overrides endpoint-name-grouping.yml. Same as endpoint-name-grouping.yml.   core.default.log4j-xml The log4j xml configuration. Overrides log4j2.xml. Same as log4j2.xml.   agent-analyzer.default.traceSamplingPolicy The sampling policy for default and service dimension, override trace-sampling-policy-settings.yml. same as trace-sampling-policy-settings.yml   configuration-discovery.default.agentConfigurations The ConfigurationDiscovery settings. See configuration-discovery.md.    Group Configuration Group Configuration is a config key corresponding to a group sub config item. A sub config item is a key-value pair. The logic structure is:\n{configKey}: |{subItemkey1}:{subItemValue1} |{subItemkey2}:{subItemValue2} |{subItemkey3}:{subItemValue3} ... For example:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} Supported configurations are as follows:\n   Config Key SubItem Key Description Value Description Value Format Example     core.default.endpoint-name-grouping-openapi The serviceName relevant to openAPI definition file. eg. serviceA. If the serviceName relevant to multiple files should add subItems for each files, and each subItem key should split serviceName and fileName with . eg. serviceA.API-file1,serviceA.API-file2 The openAPI definitions file contents(yaml format) for create endpoint name grouping rules. Same as productAPI-v2.yaml    Dynamic Configuration Implementations  Dynamic Configuration Service, DCS Zookeeper Implementation Etcd Implementation Consul Implementation Apollo Implementation Kubernetes Configmap Implementation Nacos Implementation  ","excerpt":"Dynamic Configuration SkyWalking Configurations are mostly set through application.yml and OS system …","ref":"/docs/main/v9.4.0/en/setup/backend/dynamic-config/","title":"Dynamic Configuration"},{"body":"Dynamic Configuration SkyWalking Configurations are mostly set through application.yml and OS system environment variables.\nAt the same time, some of them support dynamic settings from an upstream management system.\nCurrently, SkyWalking supports two types of dynamic configurations: Single and Group.\nThis feature depends on upstream service, so it is DISABLED by default.\nconfiguration:selector:${SW_CONFIGURATION:none}none:grpc:host:${SW_DCS_SERVER_HOST:\u0026#34;\u0026#34;}port:${SW_DCS_SERVER_PORT:80}clusterName:${SW_DCS_CLUSTER_NAME:SkyWalking}period:${SW_DCS_PERIOD:20}# ... other implementationsSingle Configuration Single Configuration is a config key that corresponds to a specific config value. The logic structure is:\n{configKey}:{configValue} For example:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} Supported configurations are as follows:\n   Config Key Value Description Value Format Example     agent-analyzer.default.slowDBAccessThreshold Thresholds of slow Database statement. Overrides agent-analyzer/default/slowDBAccessThreshold of application.yml. default:200,mongodb:50   agent-analyzer.default.uninstrumentedGateways The uninstrumented gateways. Overrides gateways.yml. Same as gateways.yml.   alarm.default.alarm-settings The alarm settings. Overrides alarm-settings.yml. Same as alarm-settings.yml.   core.default.apdexThreshold The apdex threshold settings. Overrides service-apdex-threshold.yml. Same as service-apdex-threshold.yml.   core.default.endpoint-name-grouping The endpoint name grouping setting. Overrides endpoint-name-grouping.yml. Same as endpoint-name-grouping.yml.   core.default.log4j-xml The log4j xml configuration. Overrides log4j2.xml. Same as log4j2.xml.   core.default.searchableTracesTags The searchableTracesTags configuration. Override core/default/searchableTracesTags in the application.yml. http.method,http.status_code,rpc.status_code,db.type,db.instance,mq.queue,mq.topic,mq.broker   agent-analyzer.default.traceSamplingPolicy The sampling policy for default and service dimension, override trace-sampling-policy-settings.yml. same as trace-sampling-policy-settings.yml   configuration-discovery.default.agentConfigurations The ConfigurationDiscovery settings. See configuration-discovery.md.    Group Configuration Group Configuration is a config key corresponding to a group sub config item. A sub config item is a key-value pair. The logic structure is:\n{configKey}: |{subItemkey1}:{subItemValue1} |{subItemkey2}:{subItemValue2} |{subItemkey3}:{subItemValue3} ... For example:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} Supported configurations are as follows:\n   Config Key SubItem Key Description Value Description Value Format Example     core.default.endpoint-name-grouping-openapi The serviceName relevant to openAPI definition file. eg. serviceA. If the serviceName relevant to multiple files should add subItems for each files, and each subItem key should split serviceName and fileName with . eg. serviceA.API-file1,serviceA.API-file2 The openAPI definitions file contents(yaml format) for create endpoint name grouping rules. Same as productAPI-v2.yaml    Dynamic Configuration Implementations  Dynamic Configuration Service, DCS Zookeeper Implementation Etcd Implementation Consul Implementation Apollo Implementation Kubernetes Configmap Implementation Nacos Implementation  ","excerpt":"Dynamic Configuration SkyWalking Configurations are mostly set through application.yml and OS system …","ref":"/docs/main/v9.5.0/en/setup/backend/dynamic-config/","title":"Dynamic Configuration"},{"body":"Dynamic Configuration SkyWalking Configurations are mostly set through application.yml and OS system environment variables.\nAt the same time, some of them support dynamic settings from an upstream management system.\nCurrently, SkyWalking supports two types of dynamic configurations: Single and Group.\nThis feature depends on upstream service, so it is DISABLED by default.\nconfiguration:selector:${SW_CONFIGURATION:none}none:grpc:host:${SW_DCS_SERVER_HOST:\u0026#34;\u0026#34;}port:${SW_DCS_SERVER_PORT:80}clusterName:${SW_DCS_CLUSTER_NAME:SkyWalking}period:${SW_DCS_PERIOD:20}# ... other implementationsSingle Configuration Single Configuration is a config key that corresponds to a specific config value. The logic structure is:\n{configKey}:{configValue} For example:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} Supported configurations are as follows:\n   Config Key Value Description Value Format Example     agent-analyzer.default.slowDBAccessThreshold Thresholds of slow Database statement. Overrides agent-analyzer/default/slowDBAccessThreshold of application.yml. default:200,mongodb:50   agent-analyzer.default.uninstrumentedGateways The uninstrumented gateways. Overrides gateways.yml. Same as gateways.yml.   alarm.default.alarm-settings The alarm settings. Overrides alarm-settings.yml. Same as alarm-settings.yml.   core.default.apdexThreshold The apdex threshold settings. Overrides service-apdex-threshold.yml. Same as service-apdex-threshold.yml.   core.default.endpoint-name-grouping The endpoint name grouping setting. Overrides endpoint-name-grouping.yml. Same as endpoint-name-grouping.yml.   core.default.log4j-xml The log4j xml configuration. Overrides log4j2.xml. Same as log4j2.xml.   core.default.searchableTracesTags The searchableTracesTags configuration. Override core/default/searchableTracesTags in the application.yml. http.method,http.status_code,rpc.status_code,db.type,db.instance,mq.queue,mq.topic,mq.broker   agent-analyzer.default.traceSamplingPolicy The sampling policy for default and service dimension, override trace-sampling-policy-settings.yml. same as trace-sampling-policy-settings.yml   configuration-discovery.default.agentConfigurations The ConfigurationDiscovery settings. See configuration-discovery.md.    Group Configuration Group Configuration is a config key corresponding to a group sub config item. A sub config item is a key-value pair. The logic structure is:\n{configKey}: |{subItemkey1}:{subItemValue1} |{subItemkey2}:{subItemValue2} |{subItemkey3}:{subItemValue3} ... For example:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} Supported configurations are as follows:\n   Config Key SubItem Key Description Value Description Value Format Example     core.default.endpoint-name-grouping-openapi The serviceName relevant to openAPI definition file. eg. serviceA. If the serviceName relevant to multiple files should add subItems for each files, and each subItem key should split serviceName and fileName with . eg. serviceA.API-file1,serviceA.API-file2 The openAPI definitions file contents(yaml format) for create endpoint name grouping rules. Same as productAPI-v2.yaml    Dynamic Configuration Implementations  Dynamic Configuration Service, DCS Zookeeper Implementation Etcd Implementation Consul Implementation Apollo Implementation Kubernetes Configmap Implementation Nacos Implementation  ","excerpt":"Dynamic Configuration SkyWalking Configurations are mostly set through application.yml and OS system …","ref":"/docs/main/v9.6.0/en/setup/backend/dynamic-config/","title":"Dynamic Configuration"},{"body":"Dynamic Configuration SkyWalking Configurations are mostly set through application.yml and OS system environment variables.\nAt the same time, some of them support dynamic settings from an upstream management system.\nCurrently, SkyWalking supports two types of dynamic configurations: Single and Group.\nThis feature depends on upstream service, so it is DISABLED by default.\nconfiguration:selector:${SW_CONFIGURATION:none}none:grpc:host:${SW_DCS_SERVER_HOST:\u0026#34;\u0026#34;}port:${SW_DCS_SERVER_PORT:80}clusterName:${SW_DCS_CLUSTER_NAME:SkyWalking}period:${SW_DCS_PERIOD:20}# ... other implementationsSingle Configuration Single Configuration is a config key that corresponds to a specific config value. The logic structure is:\n{configKey}:{configValue} For example:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} Supported configurations are as follows:\n   Config Key Value Description Value Format Example     agent-analyzer.default.slowDBAccessThreshold Thresholds of slow Database statement. Overrides agent-analyzer/default/slowDBAccessThreshold of application.yml. default:200,mongodb:50   agent-analyzer.default.uninstrumentedGateways The uninstrumented gateways. Overrides gateways.yml. Same as gateways.yml.   alarm.default.alarm-settings The alarm settings. Overrides alarm-settings.yml. Same as alarm-settings.yml.   core.default.apdexThreshold The apdex threshold settings. Overrides service-apdex-threshold.yml. Same as service-apdex-threshold.yml.   core.default.endpoint-name-grouping The endpoint name grouping setting. Overrides endpoint-name-grouping.yml. Same as endpoint-name-grouping.yml.   core.default.log4j-xml The log4j xml configuration. Overrides log4j2.xml. Same as log4j2.xml.   core.default.searchableTracesTags The searchableTracesTags configuration. Override core/default/searchableTracesTags in the application.yml. http.method,http.status_code,rpc.status_code,db.type,db.instance,mq.queue,mq.topic,mq.broker   agent-analyzer.default.traceSamplingPolicy The sampling policy for default and service dimension, override trace-sampling-policy-settings.yml. same as trace-sampling-policy-settings.yml   configuration-discovery.default.agentConfigurations The ConfigurationDiscovery settings. See configuration-discovery.md.    Group Configuration Group Configuration is a config key corresponding to a group sub config item. A sub config item is a key-value pair. The logic structure is:\n{configKey}: |{subItemkey1}:{subItemValue1} |{subItemkey2}:{subItemValue2} |{subItemkey3}:{subItemValue3} ... For example:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} Supported configurations are as follows:\n   Config Key SubItem Key Description Value Description Value Format Example     core.default.endpoint-name-grouping-openapi The serviceName relevant to openAPI definition file. eg. serviceA. If the serviceName relevant to multiple files should add subItems for each files, and each subItem key should split serviceName and fileName with . eg. serviceA.API-file1,serviceA.API-file2 The openAPI definitions file contents(yaml format) for create endpoint name grouping rules. Same as productAPI-v2.yaml    Dynamic Configuration Implementations  Dynamic Configuration Service, DCS Zookeeper Implementation Etcd Implementation Consul Implementation Apollo Implementation Kubernetes Configmap Implementation Nacos Implementation  ","excerpt":"Dynamic Configuration SkyWalking Configurations are mostly set through application.yml and OS system …","ref":"/docs/main/v9.7.0/en/setup/backend/dynamic-config/","title":"Dynamic Configuration"},{"body":"Dynamic Configuration Apollo Implementation Apollo is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:apollo}apollo:apolloMeta:${SW_CONFIG_APOLLO:http://localhost:8080}apolloCluster:${SW_CONFIG_APOLLO_CLUSTER:default}apolloEnv:${SW_CONFIG_APOLLO_ENV:\u0026#34;\u0026#34;}appId:${SW_CONFIG_APOLLO_APP_ID:skywalking}Config Storage Single Config Single configs in Apollo are key/value pairs:\n   Key Value     configKey configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in Apollo is:\n   Key Value     agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in Apollo are key/value pairs as well, and the key is composited by configKey and subItemKey with ..\n   Key Value     configKey.subItemkey1 subItemValue1   configKey.subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config in Apollo is:\n   Key Value     core.default.endpoint-name-grouping-openapi.customerAPI-v1 value of customerAPI-v1   core.default.endpoint-name-grouping-openapi.productAPI-v1 value of productAPI-v1   core.default.endpoint-name-grouping-openapi.productAPI-v2 value of productAPI-v2    ","excerpt":"Dynamic Configuration Apollo Implementation Apollo is also supported as a Dynamic Configuration …","ref":"/docs/main/latest/en/setup/backend/dynamic-config-apollo/","title":"Dynamic Configuration Apollo Implementation"},{"body":"Dynamic Configuration Apollo Implementation Apollo is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:apollo}apollo:apolloMeta:${SW_CONFIG_APOLLO:http://localhost:8080}apolloCluster:${SW_CONFIG_APOLLO_CLUSTER:default}apolloEnv:${SW_CONFIG_APOLLO_ENV:\u0026#34;\u0026#34;}appId:${SW_CONFIG_APOLLO_APP_ID:skywalking}Config Storage Single Config Single configs in Apollo are key/value pairs:\n   Key Value     configKey configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in Apollo is:\n   Key Value     agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in Apollo are key/value pairs as well, and the key is composited by configKey and subItemKey with ..\n   Key Value     configKey.subItemkey1 subItemValue1   configKey.subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config in Apollo is:\n   Key Value     core.default.endpoint-name-grouping-openapi.customerAPI-v1 value of customerAPI-v1   core.default.endpoint-name-grouping-openapi.productAPI-v1 value of productAPI-v1   core.default.endpoint-name-grouping-openapi.productAPI-v2 value of productAPI-v2    ","excerpt":"Dynamic Configuration Apollo Implementation Apollo is also supported as a Dynamic Configuration …","ref":"/docs/main/next/en/setup/backend/dynamic-config-apollo/","title":"Dynamic Configuration Apollo Implementation"},{"body":"Dynamic Configuration Apollo Implementation Apollo is also supported as Dynamic Configuration Center (DCC). To use it, please configure as follows:\nconfiguration:selector:${SW_CONFIGURATION:apollo}apollo:apolloMeta:${SW_CONFIG_APOLLO:http://localhost:8080}apolloCluster:${SW_CONFIG_APOLLO_CLUSTER:default}apolloEnv:${SW_CONFIG_APOLLO_ENV:\u0026#34;\u0026#34;}appId:${SW_CONFIG_APOLLO_APP_ID:skywalking}period:${SW_CONFIG_APOLLO_PERIOD:60}Config Storage Single Config Single configs in apollo are key/value pairs:\n   Key Value     configKey configVaule    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in apollo is:\n   Key Value     agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in apollo are key/value pairs as well, and the key is composited by configKey and subItemKey with ..\n   Key Value     configKey.subItemkey1 subItemValue1   configKey.subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config in apollo is:\n   Key Value     core.default.endpoint-name-grouping-openapi.customerAPI-v1 value of customerAPI-v1   core.default.endpoint-name-grouping-openapi.productAPI-v1 value of productAPI-v1   core.default.endpoint-name-grouping-openapi.productAPI-v2 value of productAPI-v2    ","excerpt":"Dynamic Configuration Apollo Implementation Apollo is also supported as Dynamic Configuration Center …","ref":"/docs/main/v9.0.0/en/setup/backend/dynamic-config-apollo/","title":"Dynamic Configuration Apollo Implementation"},{"body":"Dynamic Configuration Apollo Implementation Apollo is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:apollo}apollo:apolloMeta:${SW_CONFIG_APOLLO:http://localhost:8080}apolloCluster:${SW_CONFIG_APOLLO_CLUSTER:default}apolloEnv:${SW_CONFIG_APOLLO_ENV:\u0026#34;\u0026#34;}appId:${SW_CONFIG_APOLLO_APP_ID:skywalking}period:${SW_CONFIG_APOLLO_PERIOD:60}Config Storage Single Config Single configs in Apollo are key/value pairs:\n   Key Value     configKey configVaule    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in Apollo is:\n   Key Value     agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in Apollo are key/value pairs as well, and the key is composited by configKey and subItemKey with ..\n   Key Value     configKey.subItemkey1 subItemValue1   configKey.subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config in Apollo is:\n   Key Value     core.default.endpoint-name-grouping-openapi.customerAPI-v1 value of customerAPI-v1   core.default.endpoint-name-grouping-openapi.productAPI-v1 value of productAPI-v1   core.default.endpoint-name-grouping-openapi.productAPI-v2 value of productAPI-v2    ","excerpt":"Dynamic Configuration Apollo Implementation Apollo is also supported as a Dynamic Configuration …","ref":"/docs/main/v9.1.0/en/setup/backend/dynamic-config-apollo/","title":"Dynamic Configuration Apollo Implementation"},{"body":"Dynamic Configuration Apollo Implementation Apollo is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:apollo}apollo:apolloMeta:${SW_CONFIG_APOLLO:http://localhost:8080}apolloCluster:${SW_CONFIG_APOLLO_CLUSTER:default}apolloEnv:${SW_CONFIG_APOLLO_ENV:\u0026#34;\u0026#34;}appId:${SW_CONFIG_APOLLO_APP_ID:skywalking}period:${SW_CONFIG_APOLLO_PERIOD:60}Config Storage Single Config Single configs in Apollo are key/value pairs:\n   Key Value     configKey configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in Apollo is:\n   Key Value     agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in Apollo are key/value pairs as well, and the key is composited by configKey and subItemKey with ..\n   Key Value     configKey.subItemkey1 subItemValue1   configKey.subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config in Apollo is:\n   Key Value     core.default.endpoint-name-grouping-openapi.customerAPI-v1 value of customerAPI-v1   core.default.endpoint-name-grouping-openapi.productAPI-v1 value of productAPI-v1   core.default.endpoint-name-grouping-openapi.productAPI-v2 value of productAPI-v2    ","excerpt":"Dynamic Configuration Apollo Implementation Apollo is also supported as a Dynamic Configuration …","ref":"/docs/main/v9.2.0/en/setup/backend/dynamic-config-apollo/","title":"Dynamic Configuration Apollo Implementation"},{"body":"Dynamic Configuration Apollo Implementation Apollo is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:apollo}apollo:apolloMeta:${SW_CONFIG_APOLLO:http://localhost:8080}apolloCluster:${SW_CONFIG_APOLLO_CLUSTER:default}apolloEnv:${SW_CONFIG_APOLLO_ENV:\u0026#34;\u0026#34;}appId:${SW_CONFIG_APOLLO_APP_ID:skywalking}period:${SW_CONFIG_APOLLO_PERIOD:60}Config Storage Single Config Single configs in Apollo are key/value pairs:\n   Key Value     configKey configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in Apollo is:\n   Key Value     agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in Apollo are key/value pairs as well, and the key is composited by configKey and subItemKey with ..\n   Key Value     configKey.subItemkey1 subItemValue1   configKey.subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config in Apollo is:\n   Key Value     core.default.endpoint-name-grouping-openapi.customerAPI-v1 value of customerAPI-v1   core.default.endpoint-name-grouping-openapi.productAPI-v1 value of productAPI-v1   core.default.endpoint-name-grouping-openapi.productAPI-v2 value of productAPI-v2    ","excerpt":"Dynamic Configuration Apollo Implementation Apollo is also supported as a Dynamic Configuration …","ref":"/docs/main/v9.3.0/en/setup/backend/dynamic-config-apollo/","title":"Dynamic Configuration Apollo Implementation"},{"body":"Dynamic Configuration Apollo Implementation Apollo is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:apollo}apollo:apolloMeta:${SW_CONFIG_APOLLO:http://localhost:8080}apolloCluster:${SW_CONFIG_APOLLO_CLUSTER:default}apolloEnv:${SW_CONFIG_APOLLO_ENV:\u0026#34;\u0026#34;}appId:${SW_CONFIG_APOLLO_APP_ID:skywalking}period:${SW_CONFIG_APOLLO_PERIOD:60}Config Storage Single Config Single configs in Apollo are key/value pairs:\n   Key Value     configKey configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in Apollo is:\n   Key Value     agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in Apollo are key/value pairs as well, and the key is composited by configKey and subItemKey with ..\n   Key Value     configKey.subItemkey1 subItemValue1   configKey.subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config in Apollo is:\n   Key Value     core.default.endpoint-name-grouping-openapi.customerAPI-v1 value of customerAPI-v1   core.default.endpoint-name-grouping-openapi.productAPI-v1 value of productAPI-v1   core.default.endpoint-name-grouping-openapi.productAPI-v2 value of productAPI-v2    ","excerpt":"Dynamic Configuration Apollo Implementation Apollo is also supported as a Dynamic Configuration …","ref":"/docs/main/v9.4.0/en/setup/backend/dynamic-config-apollo/","title":"Dynamic Configuration Apollo Implementation"},{"body":"Dynamic Configuration Apollo Implementation Apollo is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:apollo}apollo:apolloMeta:${SW_CONFIG_APOLLO:http://localhost:8080}apolloCluster:${SW_CONFIG_APOLLO_CLUSTER:default}apolloEnv:${SW_CONFIG_APOLLO_ENV:\u0026#34;\u0026#34;}appId:${SW_CONFIG_APOLLO_APP_ID:skywalking}period:${SW_CONFIG_APOLLO_PERIOD:60}Config Storage Single Config Single configs in Apollo are key/value pairs:\n   Key Value     configKey configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in Apollo is:\n   Key Value     agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in Apollo are key/value pairs as well, and the key is composited by configKey and subItemKey with ..\n   Key Value     configKey.subItemkey1 subItemValue1   configKey.subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config in Apollo is:\n   Key Value     core.default.endpoint-name-grouping-openapi.customerAPI-v1 value of customerAPI-v1   core.default.endpoint-name-grouping-openapi.productAPI-v1 value of productAPI-v1   core.default.endpoint-name-grouping-openapi.productAPI-v2 value of productAPI-v2    ","excerpt":"Dynamic Configuration Apollo Implementation Apollo is also supported as a Dynamic Configuration …","ref":"/docs/main/v9.5.0/en/setup/backend/dynamic-config-apollo/","title":"Dynamic Configuration Apollo Implementation"},{"body":"Dynamic Configuration Apollo Implementation Apollo is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:apollo}apollo:apolloMeta:${SW_CONFIG_APOLLO:http://localhost:8080}apolloCluster:${SW_CONFIG_APOLLO_CLUSTER:default}apolloEnv:${SW_CONFIG_APOLLO_ENV:\u0026#34;\u0026#34;}appId:${SW_CONFIG_APOLLO_APP_ID:skywalking}Config Storage Single Config Single configs in Apollo are key/value pairs:\n   Key Value     configKey configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in Apollo is:\n   Key Value     agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in Apollo are key/value pairs as well, and the key is composited by configKey and subItemKey with ..\n   Key Value     configKey.subItemkey1 subItemValue1   configKey.subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config in Apollo is:\n   Key Value     core.default.endpoint-name-grouping-openapi.customerAPI-v1 value of customerAPI-v1   core.default.endpoint-name-grouping-openapi.productAPI-v1 value of productAPI-v1   core.default.endpoint-name-grouping-openapi.productAPI-v2 value of productAPI-v2    ","excerpt":"Dynamic Configuration Apollo Implementation Apollo is also supported as a Dynamic Configuration …","ref":"/docs/main/v9.6.0/en/setup/backend/dynamic-config-apollo/","title":"Dynamic Configuration Apollo Implementation"},{"body":"Dynamic Configuration Apollo Implementation Apollo is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:apollo}apollo:apolloMeta:${SW_CONFIG_APOLLO:http://localhost:8080}apolloCluster:${SW_CONFIG_APOLLO_CLUSTER:default}apolloEnv:${SW_CONFIG_APOLLO_ENV:\u0026#34;\u0026#34;}appId:${SW_CONFIG_APOLLO_APP_ID:skywalking}Config Storage Single Config Single configs in Apollo are key/value pairs:\n   Key Value     configKey configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in Apollo is:\n   Key Value     agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in Apollo are key/value pairs as well, and the key is composited by configKey and subItemKey with ..\n   Key Value     configKey.subItemkey1 subItemValue1   configKey.subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config in Apollo is:\n   Key Value     core.default.endpoint-name-grouping-openapi.customerAPI-v1 value of customerAPI-v1   core.default.endpoint-name-grouping-openapi.productAPI-v1 value of productAPI-v1   core.default.endpoint-name-grouping-openapi.productAPI-v2 value of productAPI-v2    ","excerpt":"Dynamic Configuration Apollo Implementation Apollo is also supported as a Dynamic Configuration …","ref":"/docs/main/v9.7.0/en/setup/backend/dynamic-config-apollo/","title":"Dynamic Configuration Apollo Implementation"},{"body":"Dynamic Configuration Consul Implementation Consul is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:consul}consul:# Consul host and ports, separated by comma, e.g. 1.2.3.4:8500,2.3.4.5:8500hostAndPorts:${SW_CONFIG_CONSUL_HOST_AND_PORTS:1.2.3.4:8500}# Sync period in seconds. Defaults to 60 seconds.period:${SW_CONFIG_CONSUL_PERIOD:1}# Consul aclTokenaclToken:${SW_CONFIG_CONSUL_ACL_TOKEN:\u0026#34;\u0026#34;}Config Storage Single Config Single configs in Consul are key/value pairs:\n   Key Value     configKey configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in Consul is:\n   Key Value     agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in Consul are key/value pairs as well, but according to the level keys organized by /.\n   Key Value     configKey/subItemkey1 subItemValue1   configKey/subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    If we use Consul UI, we can see keys organized like a folder:\nconfigKey -- subItemkey1 -- subItemkey2 ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config in Consul is:\n   Key Value     core.default.endpoint-name-grouping-openapi/customerAPI-v1 value of customerAPI-v1   core.default.endpoint-name-grouping-openapi/productAPI-v1 value of productAPI-v1   core.default.endpoint-name-grouping-openapi/productAPI-v2 value of productAPI-v2    ","excerpt":"Dynamic Configuration Consul Implementation Consul is also supported as a Dynamic Configuration …","ref":"/docs/main/latest/en/setup/backend/dynamic-config-consul/","title":"Dynamic Configuration Consul Implementation"},{"body":"Dynamic Configuration Consul Implementation Consul is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:consul}consul:# Consul host and ports, separated by comma, e.g. 1.2.3.4:8500,2.3.4.5:8500hostAndPorts:${SW_CONFIG_CONSUL_HOST_AND_PORTS:1.2.3.4:8500}# Sync period in seconds. Defaults to 60 seconds.period:${SW_CONFIG_CONSUL_PERIOD:1}# Consul aclTokenaclToken:${SW_CONFIG_CONSUL_ACL_TOKEN:\u0026#34;\u0026#34;}Config Storage Single Config Single configs in Consul are key/value pairs:\n   Key Value     configKey configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in Consul is:\n   Key Value     agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in Consul are key/value pairs as well, but according to the level keys organized by /.\n   Key Value     configKey/subItemkey1 subItemValue1   configKey/subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    If we use Consul UI, we can see keys organized like a folder:\nconfigKey -- subItemkey1 -- subItemkey2 ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config in Consul is:\n   Key Value     core.default.endpoint-name-grouping-openapi/customerAPI-v1 value of customerAPI-v1   core.default.endpoint-name-grouping-openapi/productAPI-v1 value of productAPI-v1   core.default.endpoint-name-grouping-openapi/productAPI-v2 value of productAPI-v2    ","excerpt":"Dynamic Configuration Consul Implementation Consul is also supported as a Dynamic Configuration …","ref":"/docs/main/next/en/setup/backend/dynamic-config-consul/","title":"Dynamic Configuration Consul Implementation"},{"body":"Dynamic Configuration Consul Implementation Consul is also supported as Dynamic Configuration Center (DCC). To use it, please configure as follows:\nconfiguration:selector:${SW_CONFIGURATION:consul}consul:# Consul host and ports, separated by comma, e.g. 1.2.3.4:8500,2.3.4.5:8500hostAndPorts:${SW_CONFIG_CONSUL_HOST_AND_PORTS:1.2.3.4:8500}# Sync period in seconds. Defaults to 60 seconds.period:${SW_CONFIG_CONSUL_PERIOD:1}# Consul aclTokenaclToken:${SW_CONFIG_CONSUL_ACL_TOKEN:\u0026#34;\u0026#34;}Config Storage Single Config Single configs in Consul are key/value pairs:\n   Key Value     configKey configVaule    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in Consul is:\n   Key Value     agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in Consul are key/value pairs as well, but according to the level keys organized by /, see: https://www.consul.io/docs/dynamic-app-config/kv#using-consul-kv\n   Key Value     configKey/subItemkey1 subItemValue1   configKey/subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    If use Consul UI we can see keys organized like folder:\nconfigKey -- subItemkey1 -- subItemkey2 ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config in Consul is:\n   Key Value     core.default.endpoint-name-grouping-openapi/customerAPI-v1 value of customerAPI-v1   core.default.endpoint-name-grouping-openapi/productAPI-v1 value of productAPI-v1   core.default.endpoint-name-grouping-openapi/productAPI-v2 value of productAPI-v2    ","excerpt":"Dynamic Configuration Consul Implementation Consul is also supported as Dynamic Configuration Center …","ref":"/docs/main/v9.0.0/en/setup/backend/dynamic-config-consul/","title":"Dynamic Configuration Consul Implementation"},{"body":"Dynamic Configuration Consul Implementation Consul is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:consul}consul:# Consul host and ports, separated by comma, e.g. 1.2.3.4:8500,2.3.4.5:8500hostAndPorts:${SW_CONFIG_CONSUL_HOST_AND_PORTS:1.2.3.4:8500}# Sync period in seconds. Defaults to 60 seconds.period:${SW_CONFIG_CONSUL_PERIOD:1}# Consul aclTokenaclToken:${SW_CONFIG_CONSUL_ACL_TOKEN:\u0026#34;\u0026#34;}Config Storage Single Config Single configs in Consul are key/value pairs:\n   Key Value     configKey configVaule    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in Consul is:\n   Key Value     agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in Consul are key/value pairs as well, but according to the level keys organized by /, see: https://www.consul.io/docs/dynamic-app-config/kv#using-consul-kv\n   Key Value     configKey/subItemkey1 subItemValue1   configKey/subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    If we use Consul UI, we can see keys organized like a folder:\nconfigKey -- subItemkey1 -- subItemkey2 ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config in Consul is:\n   Key Value     core.default.endpoint-name-grouping-openapi/customerAPI-v1 value of customerAPI-v1   core.default.endpoint-name-grouping-openapi/productAPI-v1 value of productAPI-v1   core.default.endpoint-name-grouping-openapi/productAPI-v2 value of productAPI-v2    ","excerpt":"Dynamic Configuration Consul Implementation Consul is also supported as a Dynamic Configuration …","ref":"/docs/main/v9.1.0/en/setup/backend/dynamic-config-consul/","title":"Dynamic Configuration Consul Implementation"},{"body":"Dynamic Configuration Consul Implementation Consul is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:consul}consul:# Consul host and ports, separated by comma, e.g. 1.2.3.4:8500,2.3.4.5:8500hostAndPorts:${SW_CONFIG_CONSUL_HOST_AND_PORTS:1.2.3.4:8500}# Sync period in seconds. Defaults to 60 seconds.period:${SW_CONFIG_CONSUL_PERIOD:1}# Consul aclTokenaclToken:${SW_CONFIG_CONSUL_ACL_TOKEN:\u0026#34;\u0026#34;}Config Storage Single Config Single configs in Consul are key/value pairs:\n   Key Value     configKey configVaule    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in Consul is:\n   Key Value     agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in Consul are key/value pairs as well, but according to the level keys organized by /, see: https://www.consul.io/docs/dynamic-app-config/kv#using-consul-kv\n   Key Value     configKey/subItemkey1 subItemValue1   configKey/subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    If we use Consul UI, we can see keys organized like a folder:\nconfigKey -- subItemkey1 -- subItemkey2 ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config in Consul is:\n   Key Value     core.default.endpoint-name-grouping-openapi/customerAPI-v1 value of customerAPI-v1   core.default.endpoint-name-grouping-openapi/productAPI-v1 value of productAPI-v1   core.default.endpoint-name-grouping-openapi/productAPI-v2 value of productAPI-v2    ","excerpt":"Dynamic Configuration Consul Implementation Consul is also supported as a Dynamic Configuration …","ref":"/docs/main/v9.2.0/en/setup/backend/dynamic-config-consul/","title":"Dynamic Configuration Consul Implementation"},{"body":"Dynamic Configuration Consul Implementation Consul is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:consul}consul:# Consul host and ports, separated by comma, e.g. 1.2.3.4:8500,2.3.4.5:8500hostAndPorts:${SW_CONFIG_CONSUL_HOST_AND_PORTS:1.2.3.4:8500}# Sync period in seconds. Defaults to 60 seconds.period:${SW_CONFIG_CONSUL_PERIOD:1}# Consul aclTokenaclToken:${SW_CONFIG_CONSUL_ACL_TOKEN:\u0026#34;\u0026#34;}Config Storage Single Config Single configs in Consul are key/value pairs:\n   Key Value     configKey configVaule    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in Consul is:\n   Key Value     agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in Consul are key/value pairs as well, but according to the level keys organized by /.\n   Key Value     configKey/subItemkey1 subItemValue1   configKey/subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    If we use Consul UI, we can see keys organized like a folder:\nconfigKey -- subItemkey1 -- subItemkey2 ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config in Consul is:\n   Key Value     core.default.endpoint-name-grouping-openapi/customerAPI-v1 value of customerAPI-v1   core.default.endpoint-name-grouping-openapi/productAPI-v1 value of productAPI-v1   core.default.endpoint-name-grouping-openapi/productAPI-v2 value of productAPI-v2    ","excerpt":"Dynamic Configuration Consul Implementation Consul is also supported as a Dynamic Configuration …","ref":"/docs/main/v9.3.0/en/setup/backend/dynamic-config-consul/","title":"Dynamic Configuration Consul Implementation"},{"body":"Dynamic Configuration Consul Implementation Consul is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:consul}consul:# Consul host and ports, separated by comma, e.g. 1.2.3.4:8500,2.3.4.5:8500hostAndPorts:${SW_CONFIG_CONSUL_HOST_AND_PORTS:1.2.3.4:8500}# Sync period in seconds. Defaults to 60 seconds.period:${SW_CONFIG_CONSUL_PERIOD:1}# Consul aclTokenaclToken:${SW_CONFIG_CONSUL_ACL_TOKEN:\u0026#34;\u0026#34;}Config Storage Single Config Single configs in Consul are key/value pairs:\n   Key Value     configKey configVaule    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in Consul is:\n   Key Value     agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in Consul are key/value pairs as well, but according to the level keys organized by /.\n   Key Value     configKey/subItemkey1 subItemValue1   configKey/subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    If we use Consul UI, we can see keys organized like a folder:\nconfigKey -- subItemkey1 -- subItemkey2 ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config in Consul is:\n   Key Value     core.default.endpoint-name-grouping-openapi/customerAPI-v1 value of customerAPI-v1   core.default.endpoint-name-grouping-openapi/productAPI-v1 value of productAPI-v1   core.default.endpoint-name-grouping-openapi/productAPI-v2 value of productAPI-v2    ","excerpt":"Dynamic Configuration Consul Implementation Consul is also supported as a Dynamic Configuration …","ref":"/docs/main/v9.4.0/en/setup/backend/dynamic-config-consul/","title":"Dynamic Configuration Consul Implementation"},{"body":"Dynamic Configuration Consul Implementation Consul is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:consul}consul:# Consul host and ports, separated by comma, e.g. 1.2.3.4:8500,2.3.4.5:8500hostAndPorts:${SW_CONFIG_CONSUL_HOST_AND_PORTS:1.2.3.4:8500}# Sync period in seconds. Defaults to 60 seconds.period:${SW_CONFIG_CONSUL_PERIOD:1}# Consul aclTokenaclToken:${SW_CONFIG_CONSUL_ACL_TOKEN:\u0026#34;\u0026#34;}Config Storage Single Config Single configs in Consul are key/value pairs:\n   Key Value     configKey configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in Consul is:\n   Key Value     agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in Consul are key/value pairs as well, but according to the level keys organized by /.\n   Key Value     configKey/subItemkey1 subItemValue1   configKey/subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    If we use Consul UI, we can see keys organized like a folder:\nconfigKey -- subItemkey1 -- subItemkey2 ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config in Consul is:\n   Key Value     core.default.endpoint-name-grouping-openapi/customerAPI-v1 value of customerAPI-v1   core.default.endpoint-name-grouping-openapi/productAPI-v1 value of productAPI-v1   core.default.endpoint-name-grouping-openapi/productAPI-v2 value of productAPI-v2    ","excerpt":"Dynamic Configuration Consul Implementation Consul is also supported as a Dynamic Configuration …","ref":"/docs/main/v9.5.0/en/setup/backend/dynamic-config-consul/","title":"Dynamic Configuration Consul Implementation"},{"body":"Dynamic Configuration Consul Implementation Consul is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:consul}consul:# Consul host and ports, separated by comma, e.g. 1.2.3.4:8500,2.3.4.5:8500hostAndPorts:${SW_CONFIG_CONSUL_HOST_AND_PORTS:1.2.3.4:8500}# Sync period in seconds. Defaults to 60 seconds.period:${SW_CONFIG_CONSUL_PERIOD:1}# Consul aclTokenaclToken:${SW_CONFIG_CONSUL_ACL_TOKEN:\u0026#34;\u0026#34;}Config Storage Single Config Single configs in Consul are key/value pairs:\n   Key Value     configKey configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in Consul is:\n   Key Value     agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in Consul are key/value pairs as well, but according to the level keys organized by /.\n   Key Value     configKey/subItemkey1 subItemValue1   configKey/subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    If we use Consul UI, we can see keys organized like a folder:\nconfigKey -- subItemkey1 -- subItemkey2 ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config in Consul is:\n   Key Value     core.default.endpoint-name-grouping-openapi/customerAPI-v1 value of customerAPI-v1   core.default.endpoint-name-grouping-openapi/productAPI-v1 value of productAPI-v1   core.default.endpoint-name-grouping-openapi/productAPI-v2 value of productAPI-v2    ","excerpt":"Dynamic Configuration Consul Implementation Consul is also supported as a Dynamic Configuration …","ref":"/docs/main/v9.6.0/en/setup/backend/dynamic-config-consul/","title":"Dynamic Configuration Consul Implementation"},{"body":"Dynamic Configuration Consul Implementation Consul is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:consul}consul:# Consul host and ports, separated by comma, e.g. 1.2.3.4:8500,2.3.4.5:8500hostAndPorts:${SW_CONFIG_CONSUL_HOST_AND_PORTS:1.2.3.4:8500}# Sync period in seconds. Defaults to 60 seconds.period:${SW_CONFIG_CONSUL_PERIOD:1}# Consul aclTokenaclToken:${SW_CONFIG_CONSUL_ACL_TOKEN:\u0026#34;\u0026#34;}Config Storage Single Config Single configs in Consul are key/value pairs:\n   Key Value     configKey configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in Consul is:\n   Key Value     agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in Consul are key/value pairs as well, but according to the level keys organized by /.\n   Key Value     configKey/subItemkey1 subItemValue1   configKey/subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    If we use Consul UI, we can see keys organized like a folder:\nconfigKey -- subItemkey1 -- subItemkey2 ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config in Consul is:\n   Key Value     core.default.endpoint-name-grouping-openapi/customerAPI-v1 value of customerAPI-v1   core.default.endpoint-name-grouping-openapi/productAPI-v1 value of productAPI-v1   core.default.endpoint-name-grouping-openapi/productAPI-v2 value of productAPI-v2    ","excerpt":"Dynamic Configuration Consul Implementation Consul is also supported as a Dynamic Configuration …","ref":"/docs/main/v9.7.0/en/setup/backend/dynamic-config-consul/","title":"Dynamic Configuration Consul Implementation"},{"body":"Dynamic Configuration Etcd Implementation Etcd is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:etcd}etcd:period:${SW_CONFIG_ETCD_PERIOD:60}# Unit seconds, sync period. Default fetch every 60 seconds.endpoints:${SW_CONFIG_ETCD_ENDPOINTS:http://localhost:2379}namespace:${SW_CONFIG_ETCD_NAMESPACE:/skywalking}authentication:${SW_CONFIG_ETCD_AUTHENTICATION:false}user:${SW_CONFIG_ETCD_USER:}password:${SW_CONFIG_ETCD_password:}NOTE: Since 8.7.0, only the v3 protocol is supported.\nConfig Storage Single Config Single configs in etcd are key/value pairs:\n   Key Value     {namespace}/configKey configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If namespace = /skywalking the config in etcd is:\n   Key Value     /skywalking/agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in etcd are key/value pairs as well, and the key is composited by configKey and subItemKey with /.\n   Key Value     {namespace}/configKey/subItemkey1 subItemValue1   {namespace}/configKey/subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If namespace = /skywalking the config in etcd is:\n   Key Value     /skywalking/core.default.endpoint-name-grouping-openapi/customerAPI-v1 value of customerAPI-v1   /skywalking/core.default.endpoint-name-grouping-openapi/productAPI-v1 value of productAPI-v1   /skywalking/core.default.endpoint-name-grouping-openapi/productAPI-v2 value of productAPI-v2    ","excerpt":"Dynamic Configuration Etcd Implementation Etcd is also supported as a Dynamic Configuration Center …","ref":"/docs/main/latest/en/setup/backend/dynamic-config-etcd/","title":"Dynamic Configuration Etcd Implementation"},{"body":"Dynamic Configuration Etcd Implementation Etcd is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:etcd}etcd:period:${SW_CONFIG_ETCD_PERIOD:60}# Unit seconds, sync period. Default fetch every 60 seconds.endpoints:${SW_CONFIG_ETCD_ENDPOINTS:http://localhost:2379}namespace:${SW_CONFIG_ETCD_NAMESPACE:/skywalking}authentication:${SW_CONFIG_ETCD_AUTHENTICATION:false}user:${SW_CONFIG_ETCD_USER:}password:${SW_CONFIG_ETCD_password:}NOTE: Since 8.7.0, only the v3 protocol is supported.\nConfig Storage Single Config Single configs in etcd are key/value pairs:\n   Key Value     {namespace}/configKey configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If namespace = /skywalking the config in etcd is:\n   Key Value     /skywalking/agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in etcd are key/value pairs as well, and the key is composited by configKey and subItemKey with /.\n   Key Value     {namespace}/configKey/subItemkey1 subItemValue1   {namespace}/configKey/subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If namespace = /skywalking the config in etcd is:\n   Key Value     /skywalking/core.default.endpoint-name-grouping-openapi/customerAPI-v1 value of customerAPI-v1   /skywalking/core.default.endpoint-name-grouping-openapi/productAPI-v1 value of productAPI-v1   /skywalking/core.default.endpoint-name-grouping-openapi/productAPI-v2 value of productAPI-v2    ","excerpt":"Dynamic Configuration Etcd Implementation Etcd is also supported as a Dynamic Configuration Center …","ref":"/docs/main/next/en/setup/backend/dynamic-config-etcd/","title":"Dynamic Configuration Etcd Implementation"},{"body":"Dynamic Configuration Etcd Implementation Etcd is also supported as Dynamic Configuration Center (DCC). To use it, please configure as follows:\nconfiguration:selector:${SW_CONFIGURATION:etcd}etcd:period:${SW_CONFIG_ETCD_PERIOD:60}# Unit seconds, sync period. Default fetch every 60 seconds.endpoints:${SW_CONFIG_ETCD_ENDPOINTS:http://localhost:2379}namespace:${SW_CONFIG_ETCD_NAMESPACE:/skywalking}authentication:${SW_CONFIG_ETCD_AUTHENTICATION:false}user:${SW_CONFIG_ETCD_USER:}password:${SW_CONFIG_ETCD_password:}NOTE: Only the v3 protocol is supported since 8.7.0.\nConfig Storage Single Config Single configs in etcd are key/value pairs:\n   Key Value     {namespace}/configKey configVaule    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If namespace = /skywalking the config in etcd is:\n   Key Value     /skywalking/agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in etcd are key/value pairs as well and the key is composited by configKey and subItemKey with /.\n   Key Value     {namespace}/configKey/subItemkey1 subItemValue1   {namespace}/configKey/subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If namespace = /skywalking the config in etcd is:\n   Key Value     /skywalking/core.default.endpoint-name-grouping-openapi/customerAPI-v1 value of customerAPI-v1   /skywalking/core.default.endpoint-name-grouping-openapi/productAPI-v1 value of productAPI-v1   /skywalking/core.default.endpoint-name-grouping-openapi/productAPI-v2 value of productAPI-v2    ","excerpt":"Dynamic Configuration Etcd Implementation Etcd is also supported as Dynamic Configuration Center …","ref":"/docs/main/v9.0.0/en/setup/backend/dynamic-config-etcd/","title":"Dynamic Configuration Etcd Implementation"},{"body":"Dynamic Configuration Etcd Implementation Etcd is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:etcd}etcd:period:${SW_CONFIG_ETCD_PERIOD:60}# Unit seconds, sync period. Default fetch every 60 seconds.endpoints:${SW_CONFIG_ETCD_ENDPOINTS:http://localhost:2379}namespace:${SW_CONFIG_ETCD_NAMESPACE:/skywalking}authentication:${SW_CONFIG_ETCD_AUTHENTICATION:false}user:${SW_CONFIG_ETCD_USER:}password:${SW_CONFIG_ETCD_password:}NOTE: Since 8.7.0, only the v3 protocol is supported.\nConfig Storage Single Config Single configs in etcd are key/value pairs:\n   Key Value     {namespace}/configKey configVaule    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If namespace = /skywalking the config in etcd is:\n   Key Value     /skywalking/agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in etcd are key/value pairs as well, and the key is composited by configKey and subItemKey with /.\n   Key Value     {namespace}/configKey/subItemkey1 subItemValue1   {namespace}/configKey/subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If namespace = /skywalking the config in etcd is:\n   Key Value     /skywalking/core.default.endpoint-name-grouping-openapi/customerAPI-v1 value of customerAPI-v1   /skywalking/core.default.endpoint-name-grouping-openapi/productAPI-v1 value of productAPI-v1   /skywalking/core.default.endpoint-name-grouping-openapi/productAPI-v2 value of productAPI-v2    ","excerpt":"Dynamic Configuration Etcd Implementation Etcd is also supported as a Dynamic Configuration Center …","ref":"/docs/main/v9.1.0/en/setup/backend/dynamic-config-etcd/","title":"Dynamic Configuration Etcd Implementation"},{"body":"Dynamic Configuration Etcd Implementation Etcd is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:etcd}etcd:period:${SW_CONFIG_ETCD_PERIOD:60}# Unit seconds, sync period. Default fetch every 60 seconds.endpoints:${SW_CONFIG_ETCD_ENDPOINTS:http://localhost:2379}namespace:${SW_CONFIG_ETCD_NAMESPACE:/skywalking}authentication:${SW_CONFIG_ETCD_AUTHENTICATION:false}user:${SW_CONFIG_ETCD_USER:}password:${SW_CONFIG_ETCD_password:}NOTE: Since 8.7.0, only the v3 protocol is supported.\nConfig Storage Single Config Single configs in etcd are key/value pairs:\n   Key Value     {namespace}/configKey configVaule    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If namespace = /skywalking the config in etcd is:\n   Key Value     /skywalking/agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in etcd are key/value pairs as well, and the key is composited by configKey and subItemKey with /.\n   Key Value     {namespace}/configKey/subItemkey1 subItemValue1   {namespace}/configKey/subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If namespace = /skywalking the config in etcd is:\n   Key Value     /skywalking/core.default.endpoint-name-grouping-openapi/customerAPI-v1 value of customerAPI-v1   /skywalking/core.default.endpoint-name-grouping-openapi/productAPI-v1 value of productAPI-v1   /skywalking/core.default.endpoint-name-grouping-openapi/productAPI-v2 value of productAPI-v2    ","excerpt":"Dynamic Configuration Etcd Implementation Etcd is also supported as a Dynamic Configuration Center …","ref":"/docs/main/v9.2.0/en/setup/backend/dynamic-config-etcd/","title":"Dynamic Configuration Etcd Implementation"},{"body":"Dynamic Configuration Etcd Implementation Etcd is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:etcd}etcd:period:${SW_CONFIG_ETCD_PERIOD:60}# Unit seconds, sync period. Default fetch every 60 seconds.endpoints:${SW_CONFIG_ETCD_ENDPOINTS:http://localhost:2379}namespace:${SW_CONFIG_ETCD_NAMESPACE:/skywalking}authentication:${SW_CONFIG_ETCD_AUTHENTICATION:false}user:${SW_CONFIG_ETCD_USER:}password:${SW_CONFIG_ETCD_password:}NOTE: Since 8.7.0, only the v3 protocol is supported.\nConfig Storage Single Config Single configs in etcd are key/value pairs:\n   Key Value     {namespace}/configKey configVaule    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If namespace = /skywalking the config in etcd is:\n   Key Value     /skywalking/agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in etcd are key/value pairs as well, and the key is composited by configKey and subItemKey with /.\n   Key Value     {namespace}/configKey/subItemkey1 subItemValue1   {namespace}/configKey/subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If namespace = /skywalking the config in etcd is:\n   Key Value     /skywalking/core.default.endpoint-name-grouping-openapi/customerAPI-v1 value of customerAPI-v1   /skywalking/core.default.endpoint-name-grouping-openapi/productAPI-v1 value of productAPI-v1   /skywalking/core.default.endpoint-name-grouping-openapi/productAPI-v2 value of productAPI-v2    ","excerpt":"Dynamic Configuration Etcd Implementation Etcd is also supported as a Dynamic Configuration Center …","ref":"/docs/main/v9.3.0/en/setup/backend/dynamic-config-etcd/","title":"Dynamic Configuration Etcd Implementation"},{"body":"Dynamic Configuration Etcd Implementation Etcd is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:etcd}etcd:period:${SW_CONFIG_ETCD_PERIOD:60}# Unit seconds, sync period. Default fetch every 60 seconds.endpoints:${SW_CONFIG_ETCD_ENDPOINTS:http://localhost:2379}namespace:${SW_CONFIG_ETCD_NAMESPACE:/skywalking}authentication:${SW_CONFIG_ETCD_AUTHENTICATION:false}user:${SW_CONFIG_ETCD_USER:}password:${SW_CONFIG_ETCD_password:}NOTE: Since 8.7.0, only the v3 protocol is supported.\nConfig Storage Single Config Single configs in etcd are key/value pairs:\n   Key Value     {namespace}/configKey configVaule    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If namespace = /skywalking the config in etcd is:\n   Key Value     /skywalking/agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in etcd are key/value pairs as well, and the key is composited by configKey and subItemKey with /.\n   Key Value     {namespace}/configKey/subItemkey1 subItemValue1   {namespace}/configKey/subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If namespace = /skywalking the config in etcd is:\n   Key Value     /skywalking/core.default.endpoint-name-grouping-openapi/customerAPI-v1 value of customerAPI-v1   /skywalking/core.default.endpoint-name-grouping-openapi/productAPI-v1 value of productAPI-v1   /skywalking/core.default.endpoint-name-grouping-openapi/productAPI-v2 value of productAPI-v2    ","excerpt":"Dynamic Configuration Etcd Implementation Etcd is also supported as a Dynamic Configuration Center …","ref":"/docs/main/v9.4.0/en/setup/backend/dynamic-config-etcd/","title":"Dynamic Configuration Etcd Implementation"},{"body":"Dynamic Configuration Etcd Implementation Etcd is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:etcd}etcd:period:${SW_CONFIG_ETCD_PERIOD:60}# Unit seconds, sync period. Default fetch every 60 seconds.endpoints:${SW_CONFIG_ETCD_ENDPOINTS:http://localhost:2379}namespace:${SW_CONFIG_ETCD_NAMESPACE:/skywalking}authentication:${SW_CONFIG_ETCD_AUTHENTICATION:false}user:${SW_CONFIG_ETCD_USER:}password:${SW_CONFIG_ETCD_password:}NOTE: Since 8.7.0, only the v3 protocol is supported.\nConfig Storage Single Config Single configs in etcd are key/value pairs:\n   Key Value     {namespace}/configKey configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If namespace = /skywalking the config in etcd is:\n   Key Value     /skywalking/agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in etcd are key/value pairs as well, and the key is composited by configKey and subItemKey with /.\n   Key Value     {namespace}/configKey/subItemkey1 subItemValue1   {namespace}/configKey/subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If namespace = /skywalking the config in etcd is:\n   Key Value     /skywalking/core.default.endpoint-name-grouping-openapi/customerAPI-v1 value of customerAPI-v1   /skywalking/core.default.endpoint-name-grouping-openapi/productAPI-v1 value of productAPI-v1   /skywalking/core.default.endpoint-name-grouping-openapi/productAPI-v2 value of productAPI-v2    ","excerpt":"Dynamic Configuration Etcd Implementation Etcd is also supported as a Dynamic Configuration Center …","ref":"/docs/main/v9.5.0/en/setup/backend/dynamic-config-etcd/","title":"Dynamic Configuration Etcd Implementation"},{"body":"Dynamic Configuration Etcd Implementation Etcd is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:etcd}etcd:period:${SW_CONFIG_ETCD_PERIOD:60}# Unit seconds, sync period. Default fetch every 60 seconds.endpoints:${SW_CONFIG_ETCD_ENDPOINTS:http://localhost:2379}namespace:${SW_CONFIG_ETCD_NAMESPACE:/skywalking}authentication:${SW_CONFIG_ETCD_AUTHENTICATION:false}user:${SW_CONFIG_ETCD_USER:}password:${SW_CONFIG_ETCD_password:}NOTE: Since 8.7.0, only the v3 protocol is supported.\nConfig Storage Single Config Single configs in etcd are key/value pairs:\n   Key Value     {namespace}/configKey configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If namespace = /skywalking the config in etcd is:\n   Key Value     /skywalking/agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in etcd are key/value pairs as well, and the key is composited by configKey and subItemKey with /.\n   Key Value     {namespace}/configKey/subItemkey1 subItemValue1   {namespace}/configKey/subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If namespace = /skywalking the config in etcd is:\n   Key Value     /skywalking/core.default.endpoint-name-grouping-openapi/customerAPI-v1 value of customerAPI-v1   /skywalking/core.default.endpoint-name-grouping-openapi/productAPI-v1 value of productAPI-v1   /skywalking/core.default.endpoint-name-grouping-openapi/productAPI-v2 value of productAPI-v2    ","excerpt":"Dynamic Configuration Etcd Implementation Etcd is also supported as a Dynamic Configuration Center …","ref":"/docs/main/v9.6.0/en/setup/backend/dynamic-config-etcd/","title":"Dynamic Configuration Etcd Implementation"},{"body":"Dynamic Configuration Etcd Implementation Etcd is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:etcd}etcd:period:${SW_CONFIG_ETCD_PERIOD:60}# Unit seconds, sync period. Default fetch every 60 seconds.endpoints:${SW_CONFIG_ETCD_ENDPOINTS:http://localhost:2379}namespace:${SW_CONFIG_ETCD_NAMESPACE:/skywalking}authentication:${SW_CONFIG_ETCD_AUTHENTICATION:false}user:${SW_CONFIG_ETCD_USER:}password:${SW_CONFIG_ETCD_password:}NOTE: Since 8.7.0, only the v3 protocol is supported.\nConfig Storage Single Config Single configs in etcd are key/value pairs:\n   Key Value     {namespace}/configKey configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If namespace = /skywalking the config in etcd is:\n   Key Value     /skywalking/agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in etcd are key/value pairs as well, and the key is composited by configKey and subItemKey with /.\n   Key Value     {namespace}/configKey/subItemkey1 subItemValue1   {namespace}/configKey/subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If namespace = /skywalking the config in etcd is:\n   Key Value     /skywalking/core.default.endpoint-name-grouping-openapi/customerAPI-v1 value of customerAPI-v1   /skywalking/core.default.endpoint-name-grouping-openapi/productAPI-v1 value of productAPI-v1   /skywalking/core.default.endpoint-name-grouping-openapi/productAPI-v2 value of productAPI-v2    ","excerpt":"Dynamic Configuration Etcd Implementation Etcd is also supported as a Dynamic Configuration Center …","ref":"/docs/main/v9.7.0/en/setup/backend/dynamic-config-etcd/","title":"Dynamic Configuration Etcd Implementation"},{"body":"Dynamic Configuration Kuberbetes Configmap Implementation configmap is also supported as Dynamic Configuration Center (DCC). To use it, please configure as follows:\nconfiguration:selector:${SW_CONFIGURATION:k8s-configmap}# [example] (../../../../oap-server/server-configuration/configuration-k8s-configmap/src/test/resources/skywalking-dynamic-configmap.example.yaml)k8s-configmap:# Sync period in seconds. Defaults to 60 seconds.period:${SW_CONFIG_CONFIGMAP_PERIOD:60}# Which namespace is configmap deployed in.namespace:${SW_CLUSTER_K8S_NAMESPACE:default}# Labelselector is used to locate specific configmaplabelSelector:${SW_CLUSTER_K8S_LABEL:app=collector,release=skywalking}{namespace} is the k8s namespace to which the configmap belongs. {labelSelector} is used to identify which configmaps would be selected.\ne.g. These 2 configmaps would be selected by the above config:\napiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: configKey1: configValue1 configKey2: configValue2 ... apiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config2 namespace: default labels: app: collector release: skywalking data: configKey3: configValue3 ... Config Storage The configs is configmap data items as the above example shows. we can organize the configs in 1 or more configmap files.\nSingle Config Under configmap.data:\n configKey: configValue e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in configmap is:\napiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: agent-analyzer.default.slowDBAccessThreshold: default:200,mongodb:50 Group Config The data key is composited by configKey and subItemKey to identify it is a group config:\nconfigKey.subItemKey1: subItemValue1 configKey.subItemKey2: subItemValue2 ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config can separate into 2 configmaps is:\napiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: core.default.endpoint-name-grouping-openapi.customerAPI-v1: value of customerAPI-v1 core.default.endpoint-name-grouping-openapi.productAPI-v1: value of productAPI-v1 apiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config2 namespace: default labels: app: collector release: skywalking data: core.default.endpoint-name-grouping-openapi.productAPI-v2: value of productAPI-v2 ","excerpt":"Dynamic Configuration Kuberbetes Configmap Implementation configmap is also supported as Dynamic …","ref":"/docs/main/v9.0.0/en/setup/backend/dynamic-config-configmap/","title":"Dynamic Configuration Kuberbetes Configmap Implementation"},{"body":"Dynamic Configuration Kuberbetes Configmap Implementation configmap is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:k8s-configmap}# [example] (../../../../oap-server/server-configuration/configuration-k8s-configmap/src/test/resources/skywalking-dynamic-configmap.example.yaml)k8s-configmap:# Sync period in seconds. Defaults to 60 seconds.period:${SW_CONFIG_CONFIGMAP_PERIOD:60}# Which namespace is configmap deployed in.namespace:${SW_CLUSTER_K8S_NAMESPACE:default}# Labelselector is used to locate specific configmaplabelSelector:${SW_CLUSTER_K8S_LABEL:app=collector,release=skywalking}{namespace} is the k8s namespace to which the configmap belongs. {labelSelector} is used to identify which configmaps would be selected.\ne.g. These 2 configmaps would be selected by the above config:\napiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: configKey1: configValue1 configKey2: configValue2 ... apiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config2 namespace: default labels: app: collector release: skywalking data: configKey3: configValue3 ... Config Storage The configs are configmap data items, as the above example shows. we can organize the configs in 1 or more configmap files.\nSingle Config Under configmap.data:\n configKey: configValue e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in configmap is:\napiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: agent-analyzer.default.slowDBAccessThreshold: default:200,mongodb:50 Group Config The data key is composited by configKey and subItemKey to identify it is a group config:\nconfigKey.subItemKey1: subItemValue1 configKey.subItemKey2: subItemValue2 ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config can separate into 2 configmaps is:\napiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: core.default.endpoint-name-grouping-openapi.customerAPI-v1: value of customerAPI-v1 core.default.endpoint-name-grouping-openapi.productAPI-v1: value of productAPI-v1 apiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config2 namespace: default labels: app: collector release: skywalking data: core.default.endpoint-name-grouping-openapi.productAPI-v2: value of productAPI-v2 ","excerpt":"Dynamic Configuration Kuberbetes Configmap Implementation configmap is also supported as a Dynamic …","ref":"/docs/main/v9.1.0/en/setup/backend/dynamic-config-configmap/","title":"Dynamic Configuration Kuberbetes Configmap Implementation"},{"body":"Dynamic Configuration Kubernetes Configmap Implementation configmap is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:k8s-configmap}# [example] (../../../../oap-server/server-configuration/configuration-k8s-configmap/src/test/resources/skywalking-dynamic-configmap.example.yaml)k8s-configmap:# Sync period in seconds. Defaults to 60 seconds.period:${SW_CONFIG_CONFIGMAP_PERIOD:60}# Which namespace is configmap deployed in.namespace:${SW_CLUSTER_K8S_NAMESPACE:default}# Labelselector is used to locate specific configmaplabelSelector:${SW_CLUSTER_K8S_LABEL:app=collector,release=skywalking}{namespace} is the k8s namespace to which the configmap belongs. {labelSelector} is used to identify which configmaps would be selected.\ne.g. These 2 configmaps would be selected by the above config:\napiVersion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: configKey1: configValue1 configKey2: configValue2 ... apiVersion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config2 namespace: default labels: app: collector release: skywalking data: configKey3: configValue3 ... Config Storage The configs are configmap data items, as the above example shows. we can organize the configs in 1 or more configmap files.\nSingle Config Under configmap.data:\n configKey: configValue e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in configmap is:\napiVersion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: agent-analyzer.default.slowDBAccessThreshold: default:200,mongodb:50 Group Config The data key is composited by configKey and subItemKey to identify it is a group config:\nconfigKey.subItemKey1: subItemValue1 configKey.subItemKey2: subItemValue2 ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config can separate into 2 configmaps is:\napiVersion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: core.default.endpoint-name-grouping-openapi.customerAPI-v1: value of customerAPI-v1 core.default.endpoint-name-grouping-openapi.productAPI-v1: value of productAPI-v1 apiVersion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config2 namespace: default labels: app: collector release: skywalking data: core.default.endpoint-name-grouping-openapi.productAPI-v2: value of productAPI-v2 ","excerpt":"Dynamic Configuration Kubernetes Configmap Implementation configmap is also supported as a Dynamic …","ref":"/docs/main/latest/en/setup/backend/dynamic-config-configmap/","title":"Dynamic Configuration Kubernetes Configmap Implementation"},{"body":"Dynamic Configuration Kubernetes Configmap Implementation configmap is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:k8s-configmap}# [example] (../../../../oap-server/server-configuration/configuration-k8s-configmap/src/test/resources/skywalking-dynamic-configmap.example.yaml)k8s-configmap:# Sync period in seconds. Defaults to 60 seconds.period:${SW_CONFIG_CONFIGMAP_PERIOD:60}# Which namespace is configmap deployed in.namespace:${SW_CLUSTER_K8S_NAMESPACE:default}# Labelselector is used to locate specific configmaplabelSelector:${SW_CLUSTER_K8S_LABEL:app=collector,release=skywalking}{namespace} is the k8s namespace to which the configmap belongs. {labelSelector} is used to identify which configmaps would be selected.\ne.g. These 2 configmaps would be selected by the above config:\napiVersion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: configKey1: configValue1 configKey2: configValue2 ... apiVersion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config2 namespace: default labels: app: collector release: skywalking data: configKey3: configValue3 ... Config Storage The configs are configmap data items, as the above example shows. we can organize the configs in 1 or more configmap files.\nSingle Config Under configmap.data:\n configKey: configValue e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in configmap is:\napiVersion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: agent-analyzer.default.slowDBAccessThreshold: default:200,mongodb:50 Group Config The data key is composited by configKey and subItemKey to identify it is a group config:\nconfigKey.subItemKey1: subItemValue1 configKey.subItemKey2: subItemValue2 ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config can separate into 2 configmaps is:\napiVersion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: core.default.endpoint-name-grouping-openapi.customerAPI-v1: value of customerAPI-v1 core.default.endpoint-name-grouping-openapi.productAPI-v1: value of productAPI-v1 apiVersion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config2 namespace: default labels: app: collector release: skywalking data: core.default.endpoint-name-grouping-openapi.productAPI-v2: value of productAPI-v2 ","excerpt":"Dynamic Configuration Kubernetes Configmap Implementation configmap is also supported as a Dynamic …","ref":"/docs/main/next/en/setup/backend/dynamic-config-configmap/","title":"Dynamic Configuration Kubernetes Configmap Implementation"},{"body":"Dynamic Configuration Kubernetes Configmap Implementation configmap is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:k8s-configmap}# [example] (../../../../oap-server/server-configuration/configuration-k8s-configmap/src/test/resources/skywalking-dynamic-configmap.example.yaml)k8s-configmap:# Sync period in seconds. Defaults to 60 seconds.period:${SW_CONFIG_CONFIGMAP_PERIOD:60}# Which namespace is configmap deployed in.namespace:${SW_CLUSTER_K8S_NAMESPACE:default}# Labelselector is used to locate specific configmaplabelSelector:${SW_CLUSTER_K8S_LABEL:app=collector,release=skywalking}{namespace} is the k8s namespace to which the configmap belongs. {labelSelector} is used to identify which configmaps would be selected.\ne.g. These 2 configmaps would be selected by the above config:\napiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: configKey1: configValue1 configKey2: configValue2 ... apiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config2 namespace: default labels: app: collector release: skywalking data: configKey3: configValue3 ... Config Storage The configs are configmap data items, as the above example shows. we can organize the configs in 1 or more configmap files.\nSingle Config Under configmap.data:\n configKey: configValue e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in configmap is:\napiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: agent-analyzer.default.slowDBAccessThreshold: default:200,mongodb:50 Group Config The data key is composited by configKey and subItemKey to identify it is a group config:\nconfigKey.subItemKey1: subItemValue1 configKey.subItemKey2: subItemValue2 ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config can separate into 2 configmaps is:\napiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: core.default.endpoint-name-grouping-openapi.customerAPI-v1: value of customerAPI-v1 core.default.endpoint-name-grouping-openapi.productAPI-v1: value of productAPI-v1 apiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config2 namespace: default labels: app: collector release: skywalking data: core.default.endpoint-name-grouping-openapi.productAPI-v2: value of productAPI-v2 ","excerpt":"Dynamic Configuration Kubernetes Configmap Implementation configmap is also supported as a Dynamic …","ref":"/docs/main/v9.2.0/en/setup/backend/dynamic-config-configmap/","title":"Dynamic Configuration Kubernetes Configmap Implementation"},{"body":"Dynamic Configuration Kubernetes Configmap Implementation configmap is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:k8s-configmap}# [example] (../../../../oap-server/server-configuration/configuration-k8s-configmap/src/test/resources/skywalking-dynamic-configmap.example.yaml)k8s-configmap:# Sync period in seconds. Defaults to 60 seconds.period:${SW_CONFIG_CONFIGMAP_PERIOD:60}# Which namespace is configmap deployed in.namespace:${SW_CLUSTER_K8S_NAMESPACE:default}# Labelselector is used to locate specific configmaplabelSelector:${SW_CLUSTER_K8S_LABEL:app=collector,release=skywalking}{namespace} is the k8s namespace to which the configmap belongs. {labelSelector} is used to identify which configmaps would be selected.\ne.g. These 2 configmaps would be selected by the above config:\napiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: configKey1: configValue1 configKey2: configValue2 ... apiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config2 namespace: default labels: app: collector release: skywalking data: configKey3: configValue3 ... Config Storage The configs are configmap data items, as the above example shows. we can organize the configs in 1 or more configmap files.\nSingle Config Under configmap.data:\n configKey: configValue e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in configmap is:\napiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: agent-analyzer.default.slowDBAccessThreshold: default:200,mongodb:50 Group Config The data key is composited by configKey and subItemKey to identify it is a group config:\nconfigKey.subItemKey1: subItemValue1 configKey.subItemKey2: subItemValue2 ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config can separate into 2 configmaps is:\napiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: core.default.endpoint-name-grouping-openapi.customerAPI-v1: value of customerAPI-v1 core.default.endpoint-name-grouping-openapi.productAPI-v1: value of productAPI-v1 apiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config2 namespace: default labels: app: collector release: skywalking data: core.default.endpoint-name-grouping-openapi.productAPI-v2: value of productAPI-v2 ","excerpt":"Dynamic Configuration Kubernetes Configmap Implementation configmap is also supported as a Dynamic …","ref":"/docs/main/v9.3.0/en/setup/backend/dynamic-config-configmap/","title":"Dynamic Configuration Kubernetes Configmap Implementation"},{"body":"Dynamic Configuration Kubernetes Configmap Implementation configmap is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:k8s-configmap}# [example] (../../../../oap-server/server-configuration/configuration-k8s-configmap/src/test/resources/skywalking-dynamic-configmap.example.yaml)k8s-configmap:# Sync period in seconds. Defaults to 60 seconds.period:${SW_CONFIG_CONFIGMAP_PERIOD:60}# Which namespace is configmap deployed in.namespace:${SW_CLUSTER_K8S_NAMESPACE:default}# Labelselector is used to locate specific configmaplabelSelector:${SW_CLUSTER_K8S_LABEL:app=collector,release=skywalking}{namespace} is the k8s namespace to which the configmap belongs. {labelSelector} is used to identify which configmaps would be selected.\ne.g. These 2 configmaps would be selected by the above config:\napiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: configKey1: configValue1 configKey2: configValue2 ... apiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config2 namespace: default labels: app: collector release: skywalking data: configKey3: configValue3 ... Config Storage The configs are configmap data items, as the above example shows. we can organize the configs in 1 or more configmap files.\nSingle Config Under configmap.data:\n configKey: configValue e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in configmap is:\napiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: agent-analyzer.default.slowDBAccessThreshold: default:200,mongodb:50 Group Config The data key is composited by configKey and subItemKey to identify it is a group config:\nconfigKey.subItemKey1: subItemValue1 configKey.subItemKey2: subItemValue2 ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config can separate into 2 configmaps is:\napiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: core.default.endpoint-name-grouping-openapi.customerAPI-v1: value of customerAPI-v1 core.default.endpoint-name-grouping-openapi.productAPI-v1: value of productAPI-v1 apiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config2 namespace: default labels: app: collector release: skywalking data: core.default.endpoint-name-grouping-openapi.productAPI-v2: value of productAPI-v2 ","excerpt":"Dynamic Configuration Kubernetes Configmap Implementation configmap is also supported as a Dynamic …","ref":"/docs/main/v9.4.0/en/setup/backend/dynamic-config-configmap/","title":"Dynamic Configuration Kubernetes Configmap Implementation"},{"body":"Dynamic Configuration Kubernetes Configmap Implementation configmap is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:k8s-configmap}# [example] (../../../../oap-server/server-configuration/configuration-k8s-configmap/src/test/resources/skywalking-dynamic-configmap.example.yaml)k8s-configmap:# Sync period in seconds. Defaults to 60 seconds.period:${SW_CONFIG_CONFIGMAP_PERIOD:60}# Which namespace is configmap deployed in.namespace:${SW_CLUSTER_K8S_NAMESPACE:default}# Labelselector is used to locate specific configmaplabelSelector:${SW_CLUSTER_K8S_LABEL:app=collector,release=skywalking}{namespace} is the k8s namespace to which the configmap belongs. {labelSelector} is used to identify which configmaps would be selected.\ne.g. These 2 configmaps would be selected by the above config:\napiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: configKey1: configValue1 configKey2: configValue2 ... apiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config2 namespace: default labels: app: collector release: skywalking data: configKey3: configValue3 ... Config Storage The configs are configmap data items, as the above example shows. we can organize the configs in 1 or more configmap files.\nSingle Config Under configmap.data:\n configKey: configValue e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in configmap is:\napiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: agent-analyzer.default.slowDBAccessThreshold: default:200,mongodb:50 Group Config The data key is composited by configKey and subItemKey to identify it is a group config:\nconfigKey.subItemKey1: subItemValue1 configKey.subItemKey2: subItemValue2 ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config can separate into 2 configmaps is:\napiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: core.default.endpoint-name-grouping-openapi.customerAPI-v1: value of customerAPI-v1 core.default.endpoint-name-grouping-openapi.productAPI-v1: value of productAPI-v1 apiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config2 namespace: default labels: app: collector release: skywalking data: core.default.endpoint-name-grouping-openapi.productAPI-v2: value of productAPI-v2 ","excerpt":"Dynamic Configuration Kubernetes Configmap Implementation configmap is also supported as a Dynamic …","ref":"/docs/main/v9.5.0/en/setup/backend/dynamic-config-configmap/","title":"Dynamic Configuration Kubernetes Configmap Implementation"},{"body":"Dynamic Configuration Kubernetes Configmap Implementation configmap is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:k8s-configmap}# [example] (../../../../oap-server/server-configuration/configuration-k8s-configmap/src/test/resources/skywalking-dynamic-configmap.example.yaml)k8s-configmap:# Sync period in seconds. Defaults to 60 seconds.period:${SW_CONFIG_CONFIGMAP_PERIOD:60}# Which namespace is configmap deployed in.namespace:${SW_CLUSTER_K8S_NAMESPACE:default}# Labelselector is used to locate specific configmaplabelSelector:${SW_CLUSTER_K8S_LABEL:app=collector,release=skywalking}{namespace} is the k8s namespace to which the configmap belongs. {labelSelector} is used to identify which configmaps would be selected.\ne.g. These 2 configmaps would be selected by the above config:\napiVersion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: configKey1: configValue1 configKey2: configValue2 ... apiVersion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config2 namespace: default labels: app: collector release: skywalking data: configKey3: configValue3 ... Config Storage The configs are configmap data items, as the above example shows. we can organize the configs in 1 or more configmap files.\nSingle Config Under configmap.data:\n configKey: configValue e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in configmap is:\napiVersion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: agent-analyzer.default.slowDBAccessThreshold: default:200,mongodb:50 Group Config The data key is composited by configKey and subItemKey to identify it is a group config:\nconfigKey.subItemKey1: subItemValue1 configKey.subItemKey2: subItemValue2 ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config can separate into 2 configmaps is:\napiVersion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: core.default.endpoint-name-grouping-openapi.customerAPI-v1: value of customerAPI-v1 core.default.endpoint-name-grouping-openapi.productAPI-v1: value of productAPI-v1 apiVersion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config2 namespace: default labels: app: collector release: skywalking data: core.default.endpoint-name-grouping-openapi.productAPI-v2: value of productAPI-v2 ","excerpt":"Dynamic Configuration Kubernetes Configmap Implementation configmap is also supported as a Dynamic …","ref":"/docs/main/v9.6.0/en/setup/backend/dynamic-config-configmap/","title":"Dynamic Configuration Kubernetes Configmap Implementation"},{"body":"Dynamic Configuration Kubernetes Configmap Implementation configmap is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:k8s-configmap}# [example] (../../../../oap-server/server-configuration/configuration-k8s-configmap/src/test/resources/skywalking-dynamic-configmap.example.yaml)k8s-configmap:# Sync period in seconds. Defaults to 60 seconds.period:${SW_CONFIG_CONFIGMAP_PERIOD:60}# Which namespace is configmap deployed in.namespace:${SW_CLUSTER_K8S_NAMESPACE:default}# Labelselector is used to locate specific configmaplabelSelector:${SW_CLUSTER_K8S_LABEL:app=collector,release=skywalking}{namespace} is the k8s namespace to which the configmap belongs. {labelSelector} is used to identify which configmaps would be selected.\ne.g. These 2 configmaps would be selected by the above config:\napiVersion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: configKey1: configValue1 configKey2: configValue2 ... apiVersion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config2 namespace: default labels: app: collector release: skywalking data: configKey3: configValue3 ... Config Storage The configs are configmap data items, as the above example shows. we can organize the configs in 1 or more configmap files.\nSingle Config Under configmap.data:\n configKey: configValue e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in configmap is:\napiVersion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: agent-analyzer.default.slowDBAccessThreshold: default:200,mongodb:50 Group Config The data key is composited by configKey and subItemKey to identify it is a group config:\nconfigKey.subItemKey1: subItemValue1 configKey.subItemKey2: subItemValue2 ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config can separate into 2 configmaps is:\napiVersion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: core.default.endpoint-name-grouping-openapi.customerAPI-v1: value of customerAPI-v1 core.default.endpoint-name-grouping-openapi.productAPI-v1: value of productAPI-v1 apiVersion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config2 namespace: default labels: app: collector release: skywalking data: core.default.endpoint-name-grouping-openapi.productAPI-v2: value of productAPI-v2 ","excerpt":"Dynamic Configuration Kubernetes Configmap Implementation configmap is also supported as a Dynamic …","ref":"/docs/main/v9.7.0/en/setup/backend/dynamic-config-configmap/","title":"Dynamic Configuration Kubernetes Configmap Implementation"},{"body":"Dynamic Configuration Nacos Implementation Nacos is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:nacos}nacos:# Nacos Server HostserverAddr:${SW_CONFIG_NACOS_SERVER_ADDR:127.0.0.1}# Nacos Server Portport:${SW_CONFIG_NACOS_SERVER_PORT:8848}# Nacos Configuration Groupgroup:${SW_CONFIG_NACOS_SERVER_GROUP:skywalking}# Nacos Configuration namespacenamespace:${SW_CONFIG_NACOS_SERVER_NAMESPACE:}# Unit seconds, sync period. Default fetch every 60 seconds.period:${SW_CONFIG_NACOS_PERIOD:60}# the name of current cluster, set the name if you want to upstream system known.clusterName:${SW_CONFIG_NACOS_CLUSTER_NAME:default}Config Storage Single Config    Data Id Group Config Value     configKey {group} configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If group = skywalking, the config in Nacos is:\n   Data Id Group Config Value     agent-analyzer.default.slowDBAccessThreshold skywalking default:200,mongodb:50    Group Config    Data Id Group Config Value Config Type     configKey {group} subItemkey1subItemkey2\u0026hellip; TEXT   subItemkey1 {group} subItemValue1    subItemkey2 {group} subItemValue2    \u0026hellip; \u0026hellip; \u0026hellip;     Notice: If you add/remove a subItem, you need to add/remove the subItemKey from the group to which the subItem belongs:\n   Data Id Group Config Value Config Type     configKey {group} subItemkey1subItemkey2\u0026hellip; TEXT    We separate subItemkeys by \\n or \\r\\n, trim leading and trailing whitespace; if you set the config by Nacos UI, each subItemkey should be in a new line:\nsubItemValue1 subItemValue2 ... If you set the config by API, each subItemkey should be separated by \\n or \\r\\n:\nconfigService.publishConfig(\u0026quot;test-module.default.testKeyGroup\u0026quot;, \u0026quot;skywalking\u0026quot;, \u0026quot;subItemkey1\\n subItemkey2\u0026quot;)); e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If group = skywalking, the config in Nacos is:\n   Data Id Group Config Value Config Type     core.default.endpoint-name-grouping-openapi skywalking customerAPI-v1productAPI-v1productAPI-v2 TEXT   customerAPI-v1 skywalking value of customerAPI-v1    productAPI-v1 skywalking value of productAPI-v1    productAPI-v2 skywalking value of productAPI-v2     ","excerpt":"Dynamic Configuration Nacos Implementation Nacos is also supported as a Dynamic Configuration Center …","ref":"/docs/main/latest/en/setup/backend/dynamic-config-nacos/","title":"Dynamic Configuration Nacos Implementation"},{"body":"Dynamic Configuration Nacos Implementation Nacos is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:nacos}nacos:# Nacos Server HostserverAddr:${SW_CONFIG_NACOS_SERVER_ADDR:127.0.0.1}# Nacos Server Portport:${SW_CONFIG_NACOS_SERVER_PORT:8848}# Nacos Configuration Groupgroup:${SW_CONFIG_NACOS_SERVER_GROUP:skywalking}# Nacos Configuration namespacenamespace:${SW_CONFIG_NACOS_SERVER_NAMESPACE:}# Unit seconds, sync period. Default fetch every 60 seconds.period:${SW_CONFIG_NACOS_PERIOD:60}# the name of current cluster, set the name if you want to upstream system known.clusterName:${SW_CONFIG_NACOS_CLUSTER_NAME:default}Config Storage Single Config    Data Id Group Config Value     configKey {group} configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If group = skywalking, the config in Nacos is:\n   Data Id Group Config Value     agent-analyzer.default.slowDBAccessThreshold skywalking default:200,mongodb:50    Group Config    Data Id Group Config Value Config Type     configKey {group} subItemkey1subItemkey2\u0026hellip; TEXT   subItemkey1 {group} subItemValue1    subItemkey2 {group} subItemValue2    \u0026hellip; \u0026hellip; \u0026hellip;     Notice: If you add/remove a subItem, you need to add/remove the subItemKey from the group to which the subItem belongs:\n   Data Id Group Config Value Config Type     configKey {group} subItemkey1subItemkey2\u0026hellip; TEXT    We separate subItemkeys by \\n or \\r\\n, trim leading and trailing whitespace; if you set the config by Nacos UI, each subItemkey should be in a new line:\nsubItemValue1 subItemValue2 ... If you set the config by API, each subItemkey should be separated by \\n or \\r\\n:\nconfigService.publishConfig(\u0026quot;test-module.default.testKeyGroup\u0026quot;, \u0026quot;skywalking\u0026quot;, \u0026quot;subItemkey1\\n subItemkey2\u0026quot;)); e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If group = skywalking, the config in Nacos is:\n   Data Id Group Config Value Config Type     core.default.endpoint-name-grouping-openapi skywalking customerAPI-v1productAPI-v1productAPI-v2 TEXT   customerAPI-v1 skywalking value of customerAPI-v1    productAPI-v1 skywalking value of productAPI-v1    productAPI-v2 skywalking value of productAPI-v2     ","excerpt":"Dynamic Configuration Nacos Implementation Nacos is also supported as a Dynamic Configuration Center …","ref":"/docs/main/next/en/setup/backend/dynamic-config-nacos/","title":"Dynamic Configuration Nacos Implementation"},{"body":"Dynamic Configuration Nacos Implementation Nacos is also supported as Dynamic Configuration Center (DCC). To use it, please configure as follows:\nconfiguration:selector:${SW_CONFIGURATION:nacos}nacos:# Nacos Server HostserverAddr:${SW_CONFIG_NACOS_SERVER_ADDR:127.0.0.1}# Nacos Server Portport:${SW_CONFIG_NACOS_SERVER_PORT:8848}# Nacos Configuration Groupgroup:${SW_CONFIG_NACOS_SERVER_GROUP:skywalking}# Nacos Configuration namespacenamespace:${SW_CONFIG_NACOS_SERVER_NAMESPACE:}# Unit seconds, sync period. Default fetch every 60 seconds.period:${SW_CONFIG_NACOS_PERIOD:60}# the name of current cluster, set the name if you want to upstream system known.clusterName:${SW_CONFIG_NACOS_CLUSTER_NAME:default}Config Storage Single Config    Data Id Group Config Value     configKey {group} configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If group = skywalking the config in nacos is:\n   Data Id Group Config Value     agent-analyzer.default.slowDBAccessThreshold skywalking default:200,mongodb:50    Group Config    Data Id Group Config Value Config Type     configKey {group} subItemkey1subItemkey2\u0026hellip; TEXT   subItemkey1 {group} subItemValue1    subItemkey2 {group} subItemValue2    \u0026hellip; \u0026hellip; \u0026hellip;     Notice: If you add/remove a subItem, you need to add/remove the subItemKey from the group which the subItem belongs:\n   Data Id Group Config Value Config Type     configKey {group} subItemkey1subItemkey2\u0026hellip; TEXT    We separate subItemkeys by \\n or \\r\\n, trim leading and trailing whitespace, if you set the config by Nacos UI each subItemkey should in a new line:\nsubItemValue1 subItemValue2 ... If you set the config by API each subItemkey should separated by \\n or \\r\\n:\nconfigService.publishConfig(\u0026quot;test-module.default.testKeyGroup\u0026quot;, \u0026quot;skywalking\u0026quot;, \u0026quot;subItemkey1\\n subItemkey2\u0026quot;)); e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If group = skywalking the config in nacos is:\n   Data Id Group Config Value Config Type     core.default.endpoint-name-grouping-openapi skywalking customerAPI-v1productAPI-v1productAPI-v2 TEXT   customerAPI-v1 skywalking value of customerAPI-v1    productAPI-v1 skywalking value of productAPI-v1    productAPI-v2 skywalking value of productAPI-v2     ","excerpt":"Dynamic Configuration Nacos Implementation Nacos is also supported as Dynamic Configuration Center …","ref":"/docs/main/v9.0.0/en/setup/backend/dynamic-config-nacos/","title":"Dynamic Configuration Nacos Implementation"},{"body":"Dynamic Configuration Nacos Implementation Nacos is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:nacos}nacos:# Nacos Server HostserverAddr:${SW_CONFIG_NACOS_SERVER_ADDR:127.0.0.1}# Nacos Server Portport:${SW_CONFIG_NACOS_SERVER_PORT:8848}# Nacos Configuration Groupgroup:${SW_CONFIG_NACOS_SERVER_GROUP:skywalking}# Nacos Configuration namespacenamespace:${SW_CONFIG_NACOS_SERVER_NAMESPACE:}# Unit seconds, sync period. Default fetch every 60 seconds.period:${SW_CONFIG_NACOS_PERIOD:60}# the name of current cluster, set the name if you want to upstream system known.clusterName:${SW_CONFIG_NACOS_CLUSTER_NAME:default}Config Storage Single Config    Data Id Group Config Value     configKey {group} configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If group = skywalking, the config in Nacos is:\n   Data Id Group Config Value     agent-analyzer.default.slowDBAccessThreshold skywalking default:200,mongodb:50    Group Config    Data Id Group Config Value Config Type     configKey {group} subItemkey1subItemkey2\u0026hellip; TEXT   subItemkey1 {group} subItemValue1    subItemkey2 {group} subItemValue2    \u0026hellip; \u0026hellip; \u0026hellip;     Notice: If you add/remove a subItem, you need to add/remove the subItemKey from the group to which the subItem belongs:\n   Data Id Group Config Value Config Type     configKey {group} subItemkey1subItemkey2\u0026hellip; TEXT    We separate subItemkeys by \\n or \\r\\n, trim leading and trailing whitespace; if you set the config by Nacos UI, each subItemkey should be in a new line:\nsubItemValue1 subItemValue2 ... If you set the config by API, each subItemkey should be separated by \\n or \\r\\n:\nconfigService.publishConfig(\u0026quot;test-module.default.testKeyGroup\u0026quot;, \u0026quot;skywalking\u0026quot;, \u0026quot;subItemkey1\\n subItemkey2\u0026quot;)); e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If group = skywalking, the config in Nacos is:\n   Data Id Group Config Value Config Type     core.default.endpoint-name-grouping-openapi skywalking customerAPI-v1productAPI-v1productAPI-v2 TEXT   customerAPI-v1 skywalking value of customerAPI-v1    productAPI-v1 skywalking value of productAPI-v1    productAPI-v2 skywalking value of productAPI-v2     ","excerpt":"Dynamic Configuration Nacos Implementation Nacos is also supported as a Dynamic Configuration Center …","ref":"/docs/main/v9.1.0/en/setup/backend/dynamic-config-nacos/","title":"Dynamic Configuration Nacos Implementation"},{"body":"Dynamic Configuration Nacos Implementation Nacos is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:nacos}nacos:# Nacos Server HostserverAddr:${SW_CONFIG_NACOS_SERVER_ADDR:127.0.0.1}# Nacos Server Portport:${SW_CONFIG_NACOS_SERVER_PORT:8848}# Nacos Configuration Groupgroup:${SW_CONFIG_NACOS_SERVER_GROUP:skywalking}# Nacos Configuration namespacenamespace:${SW_CONFIG_NACOS_SERVER_NAMESPACE:}# Unit seconds, sync period. Default fetch every 60 seconds.period:${SW_CONFIG_NACOS_PERIOD:60}# the name of current cluster, set the name if you want to upstream system known.clusterName:${SW_CONFIG_NACOS_CLUSTER_NAME:default}Config Storage Single Config    Data Id Group Config Value     configKey {group} configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If group = skywalking, the config in Nacos is:\n   Data Id Group Config Value     agent-analyzer.default.slowDBAccessThreshold skywalking default:200,mongodb:50    Group Config    Data Id Group Config Value Config Type     configKey {group} subItemkey1subItemkey2\u0026hellip; TEXT   subItemkey1 {group} subItemValue1    subItemkey2 {group} subItemValue2    \u0026hellip; \u0026hellip; \u0026hellip;     Notice: If you add/remove a subItem, you need to add/remove the subItemKey from the group to which the subItem belongs:\n   Data Id Group Config Value Config Type     configKey {group} subItemkey1subItemkey2\u0026hellip; TEXT    We separate subItemkeys by \\n or \\r\\n, trim leading and trailing whitespace; if you set the config by Nacos UI, each subItemkey should be in a new line:\nsubItemValue1 subItemValue2 ... If you set the config by API, each subItemkey should be separated by \\n or \\r\\n:\nconfigService.publishConfig(\u0026quot;test-module.default.testKeyGroup\u0026quot;, \u0026quot;skywalking\u0026quot;, \u0026quot;subItemkey1\\n subItemkey2\u0026quot;)); e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If group = skywalking, the config in Nacos is:\n   Data Id Group Config Value Config Type     core.default.endpoint-name-grouping-openapi skywalking customerAPI-v1productAPI-v1productAPI-v2 TEXT   customerAPI-v1 skywalking value of customerAPI-v1    productAPI-v1 skywalking value of productAPI-v1    productAPI-v2 skywalking value of productAPI-v2     ","excerpt":"Dynamic Configuration Nacos Implementation Nacos is also supported as a Dynamic Configuration Center …","ref":"/docs/main/v9.2.0/en/setup/backend/dynamic-config-nacos/","title":"Dynamic Configuration Nacos Implementation"},{"body":"Dynamic Configuration Nacos Implementation Nacos is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:nacos}nacos:# Nacos Server HostserverAddr:${SW_CONFIG_NACOS_SERVER_ADDR:127.0.0.1}# Nacos Server Portport:${SW_CONFIG_NACOS_SERVER_PORT:8848}# Nacos Configuration Groupgroup:${SW_CONFIG_NACOS_SERVER_GROUP:skywalking}# Nacos Configuration namespacenamespace:${SW_CONFIG_NACOS_SERVER_NAMESPACE:}# Unit seconds, sync period. Default fetch every 60 seconds.period:${SW_CONFIG_NACOS_PERIOD:60}# the name of current cluster, set the name if you want to upstream system known.clusterName:${SW_CONFIG_NACOS_CLUSTER_NAME:default}Config Storage Single Config    Data Id Group Config Value     configKey {group} configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If group = skywalking, the config in Nacos is:\n   Data Id Group Config Value     agent-analyzer.default.slowDBAccessThreshold skywalking default:200,mongodb:50    Group Config    Data Id Group Config Value Config Type     configKey {group} subItemkey1subItemkey2\u0026hellip; TEXT   subItemkey1 {group} subItemValue1    subItemkey2 {group} subItemValue2    \u0026hellip; \u0026hellip; \u0026hellip;     Notice: If you add/remove a subItem, you need to add/remove the subItemKey from the group to which the subItem belongs:\n   Data Id Group Config Value Config Type     configKey {group} subItemkey1subItemkey2\u0026hellip; TEXT    We separate subItemkeys by \\n or \\r\\n, trim leading and trailing whitespace; if you set the config by Nacos UI, each subItemkey should be in a new line:\nsubItemValue1 subItemValue2 ... If you set the config by API, each subItemkey should be separated by \\n or \\r\\n:\nconfigService.publishConfig(\u0026quot;test-module.default.testKeyGroup\u0026quot;, \u0026quot;skywalking\u0026quot;, \u0026quot;subItemkey1\\n subItemkey2\u0026quot;)); e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If group = skywalking, the config in Nacos is:\n   Data Id Group Config Value Config Type     core.default.endpoint-name-grouping-openapi skywalking customerAPI-v1productAPI-v1productAPI-v2 TEXT   customerAPI-v1 skywalking value of customerAPI-v1    productAPI-v1 skywalking value of productAPI-v1    productAPI-v2 skywalking value of productAPI-v2     ","excerpt":"Dynamic Configuration Nacos Implementation Nacos is also supported as a Dynamic Configuration Center …","ref":"/docs/main/v9.3.0/en/setup/backend/dynamic-config-nacos/","title":"Dynamic Configuration Nacos Implementation"},{"body":"Dynamic Configuration Nacos Implementation Nacos is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:nacos}nacos:# Nacos Server HostserverAddr:${SW_CONFIG_NACOS_SERVER_ADDR:127.0.0.1}# Nacos Server Portport:${SW_CONFIG_NACOS_SERVER_PORT:8848}# Nacos Configuration Groupgroup:${SW_CONFIG_NACOS_SERVER_GROUP:skywalking}# Nacos Configuration namespacenamespace:${SW_CONFIG_NACOS_SERVER_NAMESPACE:}# Unit seconds, sync period. Default fetch every 60 seconds.period:${SW_CONFIG_NACOS_PERIOD:60}# the name of current cluster, set the name if you want to upstream system known.clusterName:${SW_CONFIG_NACOS_CLUSTER_NAME:default}Config Storage Single Config    Data Id Group Config Value     configKey {group} configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If group = skywalking, the config in Nacos is:\n   Data Id Group Config Value     agent-analyzer.default.slowDBAccessThreshold skywalking default:200,mongodb:50    Group Config    Data Id Group Config Value Config Type     configKey {group} subItemkey1subItemkey2\u0026hellip; TEXT   subItemkey1 {group} subItemValue1    subItemkey2 {group} subItemValue2    \u0026hellip; \u0026hellip; \u0026hellip;     Notice: If you add/remove a subItem, you need to add/remove the subItemKey from the group to which the subItem belongs:\n   Data Id Group Config Value Config Type     configKey {group} subItemkey1subItemkey2\u0026hellip; TEXT    We separate subItemkeys by \\n or \\r\\n, trim leading and trailing whitespace; if you set the config by Nacos UI, each subItemkey should be in a new line:\nsubItemValue1 subItemValue2 ... If you set the config by API, each subItemkey should be separated by \\n or \\r\\n:\nconfigService.publishConfig(\u0026quot;test-module.default.testKeyGroup\u0026quot;, \u0026quot;skywalking\u0026quot;, \u0026quot;subItemkey1\\n subItemkey2\u0026quot;)); e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If group = skywalking, the config in Nacos is:\n   Data Id Group Config Value Config Type     core.default.endpoint-name-grouping-openapi skywalking customerAPI-v1productAPI-v1productAPI-v2 TEXT   customerAPI-v1 skywalking value of customerAPI-v1    productAPI-v1 skywalking value of productAPI-v1    productAPI-v2 skywalking value of productAPI-v2     ","excerpt":"Dynamic Configuration Nacos Implementation Nacos is also supported as a Dynamic Configuration Center …","ref":"/docs/main/v9.4.0/en/setup/backend/dynamic-config-nacos/","title":"Dynamic Configuration Nacos Implementation"},{"body":"Dynamic Configuration Nacos Implementation Nacos is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:nacos}nacos:# Nacos Server HostserverAddr:${SW_CONFIG_NACOS_SERVER_ADDR:127.0.0.1}# Nacos Server Portport:${SW_CONFIG_NACOS_SERVER_PORT:8848}# Nacos Configuration Groupgroup:${SW_CONFIG_NACOS_SERVER_GROUP:skywalking}# Nacos Configuration namespacenamespace:${SW_CONFIG_NACOS_SERVER_NAMESPACE:}# Unit seconds, sync period. Default fetch every 60 seconds.period:${SW_CONFIG_NACOS_PERIOD:60}# the name of current cluster, set the name if you want to upstream system known.clusterName:${SW_CONFIG_NACOS_CLUSTER_NAME:default}Config Storage Single Config    Data Id Group Config Value     configKey {group} configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If group = skywalking, the config in Nacos is:\n   Data Id Group Config Value     agent-analyzer.default.slowDBAccessThreshold skywalking default:200,mongodb:50    Group Config    Data Id Group Config Value Config Type     configKey {group} subItemkey1subItemkey2\u0026hellip; TEXT   subItemkey1 {group} subItemValue1    subItemkey2 {group} subItemValue2    \u0026hellip; \u0026hellip; \u0026hellip;     Notice: If you add/remove a subItem, you need to add/remove the subItemKey from the group to which the subItem belongs:\n   Data Id Group Config Value Config Type     configKey {group} subItemkey1subItemkey2\u0026hellip; TEXT    We separate subItemkeys by \\n or \\r\\n, trim leading and trailing whitespace; if you set the config by Nacos UI, each subItemkey should be in a new line:\nsubItemValue1 subItemValue2 ... If you set the config by API, each subItemkey should be separated by \\n or \\r\\n:\nconfigService.publishConfig(\u0026quot;test-module.default.testKeyGroup\u0026quot;, \u0026quot;skywalking\u0026quot;, \u0026quot;subItemkey1\\n subItemkey2\u0026quot;)); e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If group = skywalking, the config in Nacos is:\n   Data Id Group Config Value Config Type     core.default.endpoint-name-grouping-openapi skywalking customerAPI-v1productAPI-v1productAPI-v2 TEXT   customerAPI-v1 skywalking value of customerAPI-v1    productAPI-v1 skywalking value of productAPI-v1    productAPI-v2 skywalking value of productAPI-v2     ","excerpt":"Dynamic Configuration Nacos Implementation Nacos is also supported as a Dynamic Configuration Center …","ref":"/docs/main/v9.5.0/en/setup/backend/dynamic-config-nacos/","title":"Dynamic Configuration Nacos Implementation"},{"body":"Dynamic Configuration Nacos Implementation Nacos is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:nacos}nacos:# Nacos Server HostserverAddr:${SW_CONFIG_NACOS_SERVER_ADDR:127.0.0.1}# Nacos Server Portport:${SW_CONFIG_NACOS_SERVER_PORT:8848}# Nacos Configuration Groupgroup:${SW_CONFIG_NACOS_SERVER_GROUP:skywalking}# Nacos Configuration namespacenamespace:${SW_CONFIG_NACOS_SERVER_NAMESPACE:}# Unit seconds, sync period. Default fetch every 60 seconds.period:${SW_CONFIG_NACOS_PERIOD:60}# the name of current cluster, set the name if you want to upstream system known.clusterName:${SW_CONFIG_NACOS_CLUSTER_NAME:default}Config Storage Single Config    Data Id Group Config Value     configKey {group} configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If group = skywalking, the config in Nacos is:\n   Data Id Group Config Value     agent-analyzer.default.slowDBAccessThreshold skywalking default:200,mongodb:50    Group Config    Data Id Group Config Value Config Type     configKey {group} subItemkey1subItemkey2\u0026hellip; TEXT   subItemkey1 {group} subItemValue1    subItemkey2 {group} subItemValue2    \u0026hellip; \u0026hellip; \u0026hellip;     Notice: If you add/remove a subItem, you need to add/remove the subItemKey from the group to which the subItem belongs:\n   Data Id Group Config Value Config Type     configKey {group} subItemkey1subItemkey2\u0026hellip; TEXT    We separate subItemkeys by \\n or \\r\\n, trim leading and trailing whitespace; if you set the config by Nacos UI, each subItemkey should be in a new line:\nsubItemValue1 subItemValue2 ... If you set the config by API, each subItemkey should be separated by \\n or \\r\\n:\nconfigService.publishConfig(\u0026quot;test-module.default.testKeyGroup\u0026quot;, \u0026quot;skywalking\u0026quot;, \u0026quot;subItemkey1\\n subItemkey2\u0026quot;)); e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If group = skywalking, the config in Nacos is:\n   Data Id Group Config Value Config Type     core.default.endpoint-name-grouping-openapi skywalking customerAPI-v1productAPI-v1productAPI-v2 TEXT   customerAPI-v1 skywalking value of customerAPI-v1    productAPI-v1 skywalking value of productAPI-v1    productAPI-v2 skywalking value of productAPI-v2     ","excerpt":"Dynamic Configuration Nacos Implementation Nacos is also supported as a Dynamic Configuration Center …","ref":"/docs/main/v9.6.0/en/setup/backend/dynamic-config-nacos/","title":"Dynamic Configuration Nacos Implementation"},{"body":"Dynamic Configuration Nacos Implementation Nacos is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:nacos}nacos:# Nacos Server HostserverAddr:${SW_CONFIG_NACOS_SERVER_ADDR:127.0.0.1}# Nacos Server Portport:${SW_CONFIG_NACOS_SERVER_PORT:8848}# Nacos Configuration Groupgroup:${SW_CONFIG_NACOS_SERVER_GROUP:skywalking}# Nacos Configuration namespacenamespace:${SW_CONFIG_NACOS_SERVER_NAMESPACE:}# Unit seconds, sync period. Default fetch every 60 seconds.period:${SW_CONFIG_NACOS_PERIOD:60}# the name of current cluster, set the name if you want to upstream system known.clusterName:${SW_CONFIG_NACOS_CLUSTER_NAME:default}Config Storage Single Config    Data Id Group Config Value     configKey {group} configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If group = skywalking, the config in Nacos is:\n   Data Id Group Config Value     agent-analyzer.default.slowDBAccessThreshold skywalking default:200,mongodb:50    Group Config    Data Id Group Config Value Config Type     configKey {group} subItemkey1subItemkey2\u0026hellip; TEXT   subItemkey1 {group} subItemValue1    subItemkey2 {group} subItemValue2    \u0026hellip; \u0026hellip; \u0026hellip;     Notice: If you add/remove a subItem, you need to add/remove the subItemKey from the group to which the subItem belongs:\n   Data Id Group Config Value Config Type     configKey {group} subItemkey1subItemkey2\u0026hellip; TEXT    We separate subItemkeys by \\n or \\r\\n, trim leading and trailing whitespace; if you set the config by Nacos UI, each subItemkey should be in a new line:\nsubItemValue1 subItemValue2 ... If you set the config by API, each subItemkey should be separated by \\n or \\r\\n:\nconfigService.publishConfig(\u0026quot;test-module.default.testKeyGroup\u0026quot;, \u0026quot;skywalking\u0026quot;, \u0026quot;subItemkey1\\n subItemkey2\u0026quot;)); e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If group = skywalking, the config in Nacos is:\n   Data Id Group Config Value Config Type     core.default.endpoint-name-grouping-openapi skywalking customerAPI-v1productAPI-v1productAPI-v2 TEXT   customerAPI-v1 skywalking value of customerAPI-v1    productAPI-v1 skywalking value of productAPI-v1    productAPI-v2 skywalking value of productAPI-v2     ","excerpt":"Dynamic Configuration Nacos Implementation Nacos is also supported as a Dynamic Configuration Center …","ref":"/docs/main/v9.7.0/en/setup/backend/dynamic-config-nacos/","title":"Dynamic Configuration Nacos Implementation"},{"body":"Dynamic Configuration Service, DCS Dynamic Configuration Service is a gRPC service which requires implementation of the upstream system. The SkyWalking OAP fetches the configuration from the implementation (any system) after you open the implementation like this:\nconfiguration:selector:${SW_CONFIGURATION:grpc}grpc:host:${SW_DCS_SERVER_HOST:\u0026#34;\u0026#34;}port:${SW_DCS_SERVER_PORT:80}clusterName:${SW_DCS_CLUSTER_NAME:SkyWalking}period:${SW_DCS_PERIOD:20}Config Server Response uuid: To identify whether the config data changed, if uuid is the same, it is not required to respond to the config data.\nSingle Config Implement:\nrpc call (ConfigurationRequest) returns (ConfigurationResponse) { } e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The response configTable is:\nconfigTable { name: \u0026quot;agent-analyzer.default.slowDBAccessThreshold\u0026quot; value: \u0026quot;default:200,mongodb:50\u0026quot; } Group Config Implement:\nrpc callGroup (ConfigurationRequest) returns (GroupConfigurationResponse) {} Respond config data GroupConfigItems groupConfigTable\ne.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The response groupConfigTable is:\ngroupConfigTable { groupName: \u0026quot;core.default.endpoint-name-grouping-openapi\u0026quot; items { name: \u0026quot;customerAPI-v1\u0026quot; value: \u0026quot;value of customerAPI-v1\u0026quot; } items { name: \u0026quot;productAPI-v1\u0026quot; value: \u0026quot;value of productAPI-v1\u0026quot; } items { name: \u0026quot;productAPI-v2\u0026quot; value: \u0026quot;value of productAPI-v2\u0026quot; } } ","excerpt":"Dynamic Configuration Service, DCS Dynamic Configuration Service is a gRPC service which requires …","ref":"/docs/main/latest/en/setup/backend/dynamic-config-service/","title":"Dynamic Configuration Service, DCS"},{"body":"Dynamic Configuration Service, DCS Dynamic Configuration Service is a gRPC service which requires implementation of the upstream system. The SkyWalking OAP fetches the configuration from the implementation (any system) after you open the implementation like this:\nconfiguration:selector:${SW_CONFIGURATION:grpc}grpc:host:${SW_DCS_SERVER_HOST:\u0026#34;\u0026#34;}port:${SW_DCS_SERVER_PORT:80}clusterName:${SW_DCS_CLUSTER_NAME:SkyWalking}period:${SW_DCS_PERIOD:20}Config Server Response uuid: To identify whether the config data changed, if uuid is the same, it is not required to respond to the config data.\nSingle Config Implement:\nrpc call (ConfigurationRequest) returns (ConfigurationResponse) { } e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The response configTable is:\nconfigTable { name: \u0026quot;agent-analyzer.default.slowDBAccessThreshold\u0026quot; value: \u0026quot;default:200,mongodb:50\u0026quot; } Group Config Implement:\nrpc callGroup (ConfigurationRequest) returns (GroupConfigurationResponse) {} Respond config data GroupConfigItems groupConfigTable\ne.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The response groupConfigTable is:\ngroupConfigTable { groupName: \u0026quot;core.default.endpoint-name-grouping-openapi\u0026quot; items { name: \u0026quot;customerAPI-v1\u0026quot; value: \u0026quot;value of customerAPI-v1\u0026quot; } items { name: \u0026quot;productAPI-v1\u0026quot; value: \u0026quot;value of productAPI-v1\u0026quot; } items { name: \u0026quot;productAPI-v2\u0026quot; value: \u0026quot;value of productAPI-v2\u0026quot; } } ","excerpt":"Dynamic Configuration Service, DCS Dynamic Configuration Service is a gRPC service which requires …","ref":"/docs/main/next/en/setup/backend/dynamic-config-service/","title":"Dynamic Configuration Service, DCS"},{"body":"Dynamic Configuration Service, DCS Dynamic Configuration Service is a gRPC service which requires implementation of the upstream system. The SkyWalking OAP fetches the configuration from the implementation (any system), after you open the implementation like this:\nconfiguration:selector:${SW_CONFIGURATION:grpc}grpc:host:${SW_DCS_SERVER_HOST:\u0026#34;\u0026#34;}port:${SW_DCS_SERVER_PORT:80}clusterName:${SW_DCS_CLUSTER_NAME:SkyWalking}period:${SW_DCS_PERIOD:20}Config Server Response uuid: To identify whether the config data changed, if uuid is the same not required to respond the config data.\nSingle Config Implement:\nrpc call (ConfigurationRequest) returns (ConfigurationResponse) { } e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The response configTable is:\nconfigTable { name: \u0026quot;agent-analyzer.default.slowDBAccessThreshold\u0026quot; value: \u0026quot;default:200,mongodb:50\u0026quot; } Group Config Implement:\nrpc callGroup (ConfigurationRequest) returns (GroupConfigurationResponse) {} Respond config data GroupConfigItems groupConfigTable\ne.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The response groupConfigTable is:\ngroupConfigTable { groupName: \u0026quot;core.default.endpoint-name-grouping-openapi\u0026quot; items { name: \u0026quot;customerAPI-v1\u0026quot; value: \u0026quot;value of customerAPI-v1\u0026quot; } items { name: \u0026quot;productAPI-v1\u0026quot; value: \u0026quot;value of productAPI-v1\u0026quot; } items { name: \u0026quot;productAPI-v2\u0026quot; value: \u0026quot;value of productAPI-v2\u0026quot; } } ","excerpt":"Dynamic Configuration Service, DCS Dynamic Configuration Service is a gRPC service which requires …","ref":"/docs/main/v9.0.0/en/setup/backend/dynamic-config-service/","title":"Dynamic Configuration Service, DCS"},{"body":"Dynamic Configuration Service, DCS Dynamic Configuration Service is a gRPC service which requires implementation of the upstream system. The SkyWalking OAP fetches the configuration from the implementation (any system) after you open the implementation like this:\nconfiguration:selector:${SW_CONFIGURATION:grpc}grpc:host:${SW_DCS_SERVER_HOST:\u0026#34;\u0026#34;}port:${SW_DCS_SERVER_PORT:80}clusterName:${SW_DCS_CLUSTER_NAME:SkyWalking}period:${SW_DCS_PERIOD:20}Config Server Response uuid: To identify whether the config data changed, if uuid is the same, it is not required to respond to the config data.\nSingle Config Implement:\nrpc call (ConfigurationRequest) returns (ConfigurationResponse) { } e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The response configTable is:\nconfigTable { name: \u0026quot;agent-analyzer.default.slowDBAccessThreshold\u0026quot; value: \u0026quot;default:200,mongodb:50\u0026quot; } Group Config Implement:\nrpc callGroup (ConfigurationRequest) returns (GroupConfigurationResponse) {} Respond config data GroupConfigItems groupConfigTable\ne.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The response groupConfigTable is:\ngroupConfigTable { groupName: \u0026quot;core.default.endpoint-name-grouping-openapi\u0026quot; items { name: \u0026quot;customerAPI-v1\u0026quot; value: \u0026quot;value of customerAPI-v1\u0026quot; } items { name: \u0026quot;productAPI-v1\u0026quot; value: \u0026quot;value of productAPI-v1\u0026quot; } items { name: \u0026quot;productAPI-v2\u0026quot; value: \u0026quot;value of productAPI-v2\u0026quot; } } ","excerpt":"Dynamic Configuration Service, DCS Dynamic Configuration Service is a gRPC service which requires …","ref":"/docs/main/v9.1.0/en/setup/backend/dynamic-config-service/","title":"Dynamic Configuration Service, DCS"},{"body":"Dynamic Configuration Service, DCS Dynamic Configuration Service is a gRPC service which requires implementation of the upstream system. The SkyWalking OAP fetches the configuration from the implementation (any system) after you open the implementation like this:\nconfiguration:selector:${SW_CONFIGURATION:grpc}grpc:host:${SW_DCS_SERVER_HOST:\u0026#34;\u0026#34;}port:${SW_DCS_SERVER_PORT:80}clusterName:${SW_DCS_CLUSTER_NAME:SkyWalking}period:${SW_DCS_PERIOD:20}Config Server Response uuid: To identify whether the config data changed, if uuid is the same, it is not required to respond to the config data.\nSingle Config Implement:\nrpc call (ConfigurationRequest) returns (ConfigurationResponse) { } e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The response configTable is:\nconfigTable { name: \u0026quot;agent-analyzer.default.slowDBAccessThreshold\u0026quot; value: \u0026quot;default:200,mongodb:50\u0026quot; } Group Config Implement:\nrpc callGroup (ConfigurationRequest) returns (GroupConfigurationResponse) {} Respond config data GroupConfigItems groupConfigTable\ne.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The response groupConfigTable is:\ngroupConfigTable { groupName: \u0026quot;core.default.endpoint-name-grouping-openapi\u0026quot; items { name: \u0026quot;customerAPI-v1\u0026quot; value: \u0026quot;value of customerAPI-v1\u0026quot; } items { name: \u0026quot;productAPI-v1\u0026quot; value: \u0026quot;value of productAPI-v1\u0026quot; } items { name: \u0026quot;productAPI-v2\u0026quot; value: \u0026quot;value of productAPI-v2\u0026quot; } } ","excerpt":"Dynamic Configuration Service, DCS Dynamic Configuration Service is a gRPC service which requires …","ref":"/docs/main/v9.2.0/en/setup/backend/dynamic-config-service/","title":"Dynamic Configuration Service, DCS"},{"body":"Dynamic Configuration Service, DCS Dynamic Configuration Service is a gRPC service which requires implementation of the upstream system. The SkyWalking OAP fetches the configuration from the implementation (any system) after you open the implementation like this:\nconfiguration:selector:${SW_CONFIGURATION:grpc}grpc:host:${SW_DCS_SERVER_HOST:\u0026#34;\u0026#34;}port:${SW_DCS_SERVER_PORT:80}clusterName:${SW_DCS_CLUSTER_NAME:SkyWalking}period:${SW_DCS_PERIOD:20}Config Server Response uuid: To identify whether the config data changed, if uuid is the same, it is not required to respond to the config data.\nSingle Config Implement:\nrpc call (ConfigurationRequest) returns (ConfigurationResponse) { } e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The response configTable is:\nconfigTable { name: \u0026quot;agent-analyzer.default.slowDBAccessThreshold\u0026quot; value: \u0026quot;default:200,mongodb:50\u0026quot; } Group Config Implement:\nrpc callGroup (ConfigurationRequest) returns (GroupConfigurationResponse) {} Respond config data GroupConfigItems groupConfigTable\ne.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The response groupConfigTable is:\ngroupConfigTable { groupName: \u0026quot;core.default.endpoint-name-grouping-openapi\u0026quot; items { name: \u0026quot;customerAPI-v1\u0026quot; value: \u0026quot;value of customerAPI-v1\u0026quot; } items { name: \u0026quot;productAPI-v1\u0026quot; value: \u0026quot;value of productAPI-v1\u0026quot; } items { name: \u0026quot;productAPI-v2\u0026quot; value: \u0026quot;value of productAPI-v2\u0026quot; } } ","excerpt":"Dynamic Configuration Service, DCS Dynamic Configuration Service is a gRPC service which requires …","ref":"/docs/main/v9.3.0/en/setup/backend/dynamic-config-service/","title":"Dynamic Configuration Service, DCS"},{"body":"Dynamic Configuration Service, DCS Dynamic Configuration Service is a gRPC service which requires implementation of the upstream system. The SkyWalking OAP fetches the configuration from the implementation (any system) after you open the implementation like this:\nconfiguration:selector:${SW_CONFIGURATION:grpc}grpc:host:${SW_DCS_SERVER_HOST:\u0026#34;\u0026#34;}port:${SW_DCS_SERVER_PORT:80}clusterName:${SW_DCS_CLUSTER_NAME:SkyWalking}period:${SW_DCS_PERIOD:20}Config Server Response uuid: To identify whether the config data changed, if uuid is the same, it is not required to respond to the config data.\nSingle Config Implement:\nrpc call (ConfigurationRequest) returns (ConfigurationResponse) { } e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The response configTable is:\nconfigTable { name: \u0026quot;agent-analyzer.default.slowDBAccessThreshold\u0026quot; value: \u0026quot;default:200,mongodb:50\u0026quot; } Group Config Implement:\nrpc callGroup (ConfigurationRequest) returns (GroupConfigurationResponse) {} Respond config data GroupConfigItems groupConfigTable\ne.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The response groupConfigTable is:\ngroupConfigTable { groupName: \u0026quot;core.default.endpoint-name-grouping-openapi\u0026quot; items { name: \u0026quot;customerAPI-v1\u0026quot; value: \u0026quot;value of customerAPI-v1\u0026quot; } items { name: \u0026quot;productAPI-v1\u0026quot; value: \u0026quot;value of productAPI-v1\u0026quot; } items { name: \u0026quot;productAPI-v2\u0026quot; value: \u0026quot;value of productAPI-v2\u0026quot; } } ","excerpt":"Dynamic Configuration Service, DCS Dynamic Configuration Service is a gRPC service which requires …","ref":"/docs/main/v9.4.0/en/setup/backend/dynamic-config-service/","title":"Dynamic Configuration Service, DCS"},{"body":"Dynamic Configuration Service, DCS Dynamic Configuration Service is a gRPC service which requires implementation of the upstream system. The SkyWalking OAP fetches the configuration from the implementation (any system) after you open the implementation like this:\nconfiguration:selector:${SW_CONFIGURATION:grpc}grpc:host:${SW_DCS_SERVER_HOST:\u0026#34;\u0026#34;}port:${SW_DCS_SERVER_PORT:80}clusterName:${SW_DCS_CLUSTER_NAME:SkyWalking}period:${SW_DCS_PERIOD:20}Config Server Response uuid: To identify whether the config data changed, if uuid is the same, it is not required to respond to the config data.\nSingle Config Implement:\nrpc call (ConfigurationRequest) returns (ConfigurationResponse) { } e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The response configTable is:\nconfigTable { name: \u0026quot;agent-analyzer.default.slowDBAccessThreshold\u0026quot; value: \u0026quot;default:200,mongodb:50\u0026quot; } Group Config Implement:\nrpc callGroup (ConfigurationRequest) returns (GroupConfigurationResponse) {} Respond config data GroupConfigItems groupConfigTable\ne.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The response groupConfigTable is:\ngroupConfigTable { groupName: \u0026quot;core.default.endpoint-name-grouping-openapi\u0026quot; items { name: \u0026quot;customerAPI-v1\u0026quot; value: \u0026quot;value of customerAPI-v1\u0026quot; } items { name: \u0026quot;productAPI-v1\u0026quot; value: \u0026quot;value of productAPI-v1\u0026quot; } items { name: \u0026quot;productAPI-v2\u0026quot; value: \u0026quot;value of productAPI-v2\u0026quot; } } ","excerpt":"Dynamic Configuration Service, DCS Dynamic Configuration Service is a gRPC service which requires …","ref":"/docs/main/v9.5.0/en/setup/backend/dynamic-config-service/","title":"Dynamic Configuration Service, DCS"},{"body":"Dynamic Configuration Service, DCS Dynamic Configuration Service is a gRPC service which requires implementation of the upstream system. The SkyWalking OAP fetches the configuration from the implementation (any system) after you open the implementation like this:\nconfiguration:selector:${SW_CONFIGURATION:grpc}grpc:host:${SW_DCS_SERVER_HOST:\u0026#34;\u0026#34;}port:${SW_DCS_SERVER_PORT:80}clusterName:${SW_DCS_CLUSTER_NAME:SkyWalking}period:${SW_DCS_PERIOD:20}Config Server Response uuid: To identify whether the config data changed, if uuid is the same, it is not required to respond to the config data.\nSingle Config Implement:\nrpc call (ConfigurationRequest) returns (ConfigurationResponse) { } e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The response configTable is:\nconfigTable { name: \u0026quot;agent-analyzer.default.slowDBAccessThreshold\u0026quot; value: \u0026quot;default:200,mongodb:50\u0026quot; } Group Config Implement:\nrpc callGroup (ConfigurationRequest) returns (GroupConfigurationResponse) {} Respond config data GroupConfigItems groupConfigTable\ne.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The response groupConfigTable is:\ngroupConfigTable { groupName: \u0026quot;core.default.endpoint-name-grouping-openapi\u0026quot; items { name: \u0026quot;customerAPI-v1\u0026quot; value: \u0026quot;value of customerAPI-v1\u0026quot; } items { name: \u0026quot;productAPI-v1\u0026quot; value: \u0026quot;value of productAPI-v1\u0026quot; } items { name: \u0026quot;productAPI-v2\u0026quot; value: \u0026quot;value of productAPI-v2\u0026quot; } } ","excerpt":"Dynamic Configuration Service, DCS Dynamic Configuration Service is a gRPC service which requires …","ref":"/docs/main/v9.6.0/en/setup/backend/dynamic-config-service/","title":"Dynamic Configuration Service, DCS"},{"body":"Dynamic Configuration Service, DCS Dynamic Configuration Service is a gRPC service which requires implementation of the upstream system. The SkyWalking OAP fetches the configuration from the implementation (any system) after you open the implementation like this:\nconfiguration:selector:${SW_CONFIGURATION:grpc}grpc:host:${SW_DCS_SERVER_HOST:\u0026#34;\u0026#34;}port:${SW_DCS_SERVER_PORT:80}clusterName:${SW_DCS_CLUSTER_NAME:SkyWalking}period:${SW_DCS_PERIOD:20}Config Server Response uuid: To identify whether the config data changed, if uuid is the same, it is not required to respond to the config data.\nSingle Config Implement:\nrpc call (ConfigurationRequest) returns (ConfigurationResponse) { } e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The response configTable is:\nconfigTable { name: \u0026quot;agent-analyzer.default.slowDBAccessThreshold\u0026quot; value: \u0026quot;default:200,mongodb:50\u0026quot; } Group Config Implement:\nrpc callGroup (ConfigurationRequest) returns (GroupConfigurationResponse) {} Respond config data GroupConfigItems groupConfigTable\ne.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The response groupConfigTable is:\ngroupConfigTable { groupName: \u0026quot;core.default.endpoint-name-grouping-openapi\u0026quot; items { name: \u0026quot;customerAPI-v1\u0026quot; value: \u0026quot;value of customerAPI-v1\u0026quot; } items { name: \u0026quot;productAPI-v1\u0026quot; value: \u0026quot;value of productAPI-v1\u0026quot; } items { name: \u0026quot;productAPI-v2\u0026quot; value: \u0026quot;value of productAPI-v2\u0026quot; } } ","excerpt":"Dynamic Configuration Service, DCS Dynamic Configuration Service is a gRPC service which requires …","ref":"/docs/main/v9.7.0/en/setup/backend/dynamic-config-service/","title":"Dynamic Configuration Service, DCS"},{"body":"Dynamic Configuration Zookeeper Implementation Zookeeper is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:zookeeper}zookeeper:period:${SW_CONFIG_ZK_PERIOD:60}# Unit seconds, sync period. Default fetch every 60 seconds.namespace:${SW_CONFIG_ZK_NAMESPACE:/default}hostPort:${SW_CONFIG_ZK_HOST_PORT:localhost:2181}# Retry PolicybaseSleepTimeMs:${SW_CONFIG_ZK_BASE_SLEEP_TIME_MS:1000}# initial amount of time to wait between retriesmaxRetries:${SW_CONFIG_ZK_MAX_RETRIES:3}# max number of times to retryThe namespace is the ZooKeeper path. The config key and value are the properties of the namespace folder.\nConfig Storage Single Config znode.path = {namespace}/configKey configValue = znode.data e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If namespace = /default the config in zookeeper is:\nznode.path = /default/agent-analyzer.default.slowDBAccessThreshold znode.data = default:200,mongodb:50 Group Config znode.path = {namespace}/configKey znode.child1.path = {znode.path}/subItemkey1 znode.child2.path = {znode.path}/subItemkey2 ... subItemValue1 = znode.child1.data subItemValue2 = znode.child2.data ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If namespace = /default the config in zookeeper is:\nznode.path = /default/core.default.endpoint-name-grouping-openapi znode.customerAPI-v1.path = /default/core.default.endpoint-name-grouping-openapi/customerAPI-v1 znode.productAPI-v1.path = /default/core.default.endpoint-name-grouping-openapi/productAPI-v1 znode.productAPI-v2.path = /default/core.default.endpoint-name-grouping-openapi/productAPI-v2 znode.customerAPI-v1.data = value of customerAPI-v1 znode.productAPI-v1.data = value of productAPI-v1 znode.productAPI-v2.data = value of productAPI-v2 ","excerpt":"Dynamic Configuration Zookeeper Implementation Zookeeper is also supported as a Dynamic …","ref":"/docs/main/latest/en/setup/backend/dynamic-config-zookeeper/","title":"Dynamic Configuration Zookeeper Implementation"},{"body":"Dynamic Configuration Zookeeper Implementation Zookeeper is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:zookeeper}zookeeper:period:${SW_CONFIG_ZK_PERIOD:60}# Unit seconds, sync period. Default fetch every 60 seconds.namespace:${SW_CONFIG_ZK_NAMESPACE:/default}hostPort:${SW_CONFIG_ZK_HOST_PORT:localhost:2181}# Retry PolicybaseSleepTimeMs:${SW_CONFIG_ZK_BASE_SLEEP_TIME_MS:1000}# initial amount of time to wait between retriesmaxRetries:${SW_CONFIG_ZK_MAX_RETRIES:3}# max number of times to retryThe namespace is the ZooKeeper path. The config key and value are the properties of the namespace folder.\nConfig Storage Single Config znode.path = {namespace}/configKey configValue = znode.data e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If namespace = /default the config in zookeeper is:\nznode.path = /default/agent-analyzer.default.slowDBAccessThreshold znode.data = default:200,mongodb:50 Group Config znode.path = {namespace}/configKey znode.child1.path = {znode.path}/subItemkey1 znode.child2.path = {znode.path}/subItemkey2 ... subItemValue1 = znode.child1.data subItemValue2 = znode.child2.data ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If namespace = /default the config in zookeeper is:\nznode.path = /default/core.default.endpoint-name-grouping-openapi znode.customerAPI-v1.path = /default/core.default.endpoint-name-grouping-openapi/customerAPI-v1 znode.productAPI-v1.path = /default/core.default.endpoint-name-grouping-openapi/productAPI-v1 znode.productAPI-v2.path = /default/core.default.endpoint-name-grouping-openapi/productAPI-v2 znode.customerAPI-v1.data = value of customerAPI-v1 znode.productAPI-v1.data = value of productAPI-v1 znode.productAPI-v2.data = value of productAPI-v2 ","excerpt":"Dynamic Configuration Zookeeper Implementation Zookeeper is also supported as a Dynamic …","ref":"/docs/main/next/en/setup/backend/dynamic-config-zookeeper/","title":"Dynamic Configuration Zookeeper Implementation"},{"body":"Dynamic Configuration Zookeeper Implementation Zookeeper is also supported as Dynamic Configuration Center (DCC). To use it, please configure as follows:\nconfiguration:selector:${SW_CONFIGURATION:zookeeper}zookeeper:period:${SW_CONFIG_ZK_PERIOD:60}# Unit seconds, sync period. Default fetch every 60 seconds.namespace:${SW_CONFIG_ZK_NAMESPACE:/default}hostPort:${SW_CONFIG_ZK_HOST_PORT:localhost:2181}# Retry PolicybaseSleepTimeMs:${SW_CONFIG_ZK_BASE_SLEEP_TIME_MS:1000}# initial amount of time to wait between retriesmaxRetries:${SW_CONFIG_ZK_MAX_RETRIES:3}# max number of times to retryThe namespace is the ZooKeeper path. The config key and value are the properties of the namespace folder.\nConfig Storage Single Config znode.path = {namespace}/configKey configValue = znode.data e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If namespace = /default the config in zookeeper is:\nznode.path = /default/agent-analyzer.default.slowDBAccessThreshold znode.data = default:200,mongodb:50 Group Config znode.path = {namespace}/configKey znode.child1.path = {znode.path}/subItemkey1 znode.child2.path = {znode.path}/subItemkey2 ... subItemValue1 = znode.child1.data subItemValue2 = znode.child2.data ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If namespace = /default the config in zookeeper is:\nznode.path = /default/core.default.endpoint-name-grouping-openapi znode.customerAPI-v1.path = /default/core.default.endpoint-name-grouping-openapi/customerAPI-v1 znode.productAPI-v1.path = /default/core.default.endpoint-name-grouping-openapi/productAPI-v1 znode.productAPI-v2.path = /default/core.default.endpoint-name-grouping-openapi/productAPI-v2 znode.customerAPI-v1.data = value of customerAPI-v1 znode.productAPI-v1.data = value of productAPI-v1 znode.productAPI-v2.data = value of productAPI-v2 ","excerpt":"Dynamic Configuration Zookeeper Implementation Zookeeper is also supported as Dynamic Configuration …","ref":"/docs/main/v9.0.0/en/setup/backend/dynamic-config-zookeeper/","title":"Dynamic Configuration Zookeeper Implementation"},{"body":"Dynamic Configuration Zookeeper Implementation Zookeeper is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:zookeeper}zookeeper:period:${SW_CONFIG_ZK_PERIOD:60}# Unit seconds, sync period. Default fetch every 60 seconds.namespace:${SW_CONFIG_ZK_NAMESPACE:/default}hostPort:${SW_CONFIG_ZK_HOST_PORT:localhost:2181}# Retry PolicybaseSleepTimeMs:${SW_CONFIG_ZK_BASE_SLEEP_TIME_MS:1000}# initial amount of time to wait between retriesmaxRetries:${SW_CONFIG_ZK_MAX_RETRIES:3}# max number of times to retryThe namespace is the ZooKeeper path. The config key and value are the properties of the namespace folder.\nConfig Storage Single Config znode.path = {namespace}/configKey configValue = znode.data e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If namespace = /default the config in zookeeper is:\nznode.path = /default/agent-analyzer.default.slowDBAccessThreshold znode.data = default:200,mongodb:50 Group Config znode.path = {namespace}/configKey znode.child1.path = {znode.path}/subItemkey1 znode.child2.path = {znode.path}/subItemkey2 ... subItemValue1 = znode.child1.data subItemValue2 = znode.child2.data ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If namespace = /default the config in zookeeper is:\nznode.path = /default/core.default.endpoint-name-grouping-openapi znode.customerAPI-v1.path = /default/core.default.endpoint-name-grouping-openapi/customerAPI-v1 znode.productAPI-v1.path = /default/core.default.endpoint-name-grouping-openapi/productAPI-v1 znode.productAPI-v2.path = /default/core.default.endpoint-name-grouping-openapi/productAPI-v2 znode.customerAPI-v1.data = value of customerAPI-v1 znode.productAPI-v1.data = value of productAPI-v1 znode.productAPI-v2.data = value of productAPI-v2 ","excerpt":"Dynamic Configuration Zookeeper Implementation Zookeeper is also supported as a Dynamic …","ref":"/docs/main/v9.1.0/en/setup/backend/dynamic-config-zookeeper/","title":"Dynamic Configuration Zookeeper Implementation"},{"body":"Dynamic Configuration Zookeeper Implementation Zookeeper is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:zookeeper}zookeeper:period:${SW_CONFIG_ZK_PERIOD:60}# Unit seconds, sync period. Default fetch every 60 seconds.namespace:${SW_CONFIG_ZK_NAMESPACE:/default}hostPort:${SW_CONFIG_ZK_HOST_PORT:localhost:2181}# Retry PolicybaseSleepTimeMs:${SW_CONFIG_ZK_BASE_SLEEP_TIME_MS:1000}# initial amount of time to wait between retriesmaxRetries:${SW_CONFIG_ZK_MAX_RETRIES:3}# max number of times to retryThe namespace is the ZooKeeper path. The config key and value are the properties of the namespace folder.\nConfig Storage Single Config znode.path = {namespace}/configKey configValue = znode.data e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If namespace = /default the config in zookeeper is:\nznode.path = /default/agent-analyzer.default.slowDBAccessThreshold znode.data = default:200,mongodb:50 Group Config znode.path = {namespace}/configKey znode.child1.path = {znode.path}/subItemkey1 znode.child2.path = {znode.path}/subItemkey2 ... subItemValue1 = znode.child1.data subItemValue2 = znode.child2.data ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If namespace = /default the config in zookeeper is:\nznode.path = /default/core.default.endpoint-name-grouping-openapi znode.customerAPI-v1.path = /default/core.default.endpoint-name-grouping-openapi/customerAPI-v1 znode.productAPI-v1.path = /default/core.default.endpoint-name-grouping-openapi/productAPI-v1 znode.productAPI-v2.path = /default/core.default.endpoint-name-grouping-openapi/productAPI-v2 znode.customerAPI-v1.data = value of customerAPI-v1 znode.productAPI-v1.data = value of productAPI-v1 znode.productAPI-v2.data = value of productAPI-v2 ","excerpt":"Dynamic Configuration Zookeeper Implementation Zookeeper is also supported as a Dynamic …","ref":"/docs/main/v9.2.0/en/setup/backend/dynamic-config-zookeeper/","title":"Dynamic Configuration Zookeeper Implementation"},{"body":"Dynamic Configuration Zookeeper Implementation Zookeeper is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:zookeeper}zookeeper:period:${SW_CONFIG_ZK_PERIOD:60}# Unit seconds, sync period. Default fetch every 60 seconds.namespace:${SW_CONFIG_ZK_NAMESPACE:/default}hostPort:${SW_CONFIG_ZK_HOST_PORT:localhost:2181}# Retry PolicybaseSleepTimeMs:${SW_CONFIG_ZK_BASE_SLEEP_TIME_MS:1000}# initial amount of time to wait between retriesmaxRetries:${SW_CONFIG_ZK_MAX_RETRIES:3}# max number of times to retryThe namespace is the ZooKeeper path. The config key and value are the properties of the namespace folder.\nConfig Storage Single Config znode.path = {namespace}/configKey configValue = znode.data e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If namespace = /default the config in zookeeper is:\nznode.path = /default/agent-analyzer.default.slowDBAccessThreshold znode.data = default:200,mongodb:50 Group Config znode.path = {namespace}/configKey znode.child1.path = {znode.path}/subItemkey1 znode.child2.path = {znode.path}/subItemkey2 ... subItemValue1 = znode.child1.data subItemValue2 = znode.child2.data ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If namespace = /default the config in zookeeper is:\nznode.path = /default/core.default.endpoint-name-grouping-openapi znode.customerAPI-v1.path = /default/core.default.endpoint-name-grouping-openapi/customerAPI-v1 znode.productAPI-v1.path = /default/core.default.endpoint-name-grouping-openapi/productAPI-v1 znode.productAPI-v2.path = /default/core.default.endpoint-name-grouping-openapi/productAPI-v2 znode.customerAPI-v1.data = value of customerAPI-v1 znode.productAPI-v1.data = value of productAPI-v1 znode.productAPI-v2.data = value of productAPI-v2 ","excerpt":"Dynamic Configuration Zookeeper Implementation Zookeeper is also supported as a Dynamic …","ref":"/docs/main/v9.3.0/en/setup/backend/dynamic-config-zookeeper/","title":"Dynamic Configuration Zookeeper Implementation"},{"body":"Dynamic Configuration Zookeeper Implementation Zookeeper is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:zookeeper}zookeeper:period:${SW_CONFIG_ZK_PERIOD:60}# Unit seconds, sync period. Default fetch every 60 seconds.namespace:${SW_CONFIG_ZK_NAMESPACE:/default}hostPort:${SW_CONFIG_ZK_HOST_PORT:localhost:2181}# Retry PolicybaseSleepTimeMs:${SW_CONFIG_ZK_BASE_SLEEP_TIME_MS:1000}# initial amount of time to wait between retriesmaxRetries:${SW_CONFIG_ZK_MAX_RETRIES:3}# max number of times to retryThe namespace is the ZooKeeper path. The config key and value are the properties of the namespace folder.\nConfig Storage Single Config znode.path = {namespace}/configKey configValue = znode.data e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If namespace = /default the config in zookeeper is:\nznode.path = /default/agent-analyzer.default.slowDBAccessThreshold znode.data = default:200,mongodb:50 Group Config znode.path = {namespace}/configKey znode.child1.path = {znode.path}/subItemkey1 znode.child2.path = {znode.path}/subItemkey2 ... subItemValue1 = znode.child1.data subItemValue2 = znode.child2.data ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If namespace = /default the config in zookeeper is:\nznode.path = /default/core.default.endpoint-name-grouping-openapi znode.customerAPI-v1.path = /default/core.default.endpoint-name-grouping-openapi/customerAPI-v1 znode.productAPI-v1.path = /default/core.default.endpoint-name-grouping-openapi/productAPI-v1 znode.productAPI-v2.path = /default/core.default.endpoint-name-grouping-openapi/productAPI-v2 znode.customerAPI-v1.data = value of customerAPI-v1 znode.productAPI-v1.data = value of productAPI-v1 znode.productAPI-v2.data = value of productAPI-v2 ","excerpt":"Dynamic Configuration Zookeeper Implementation Zookeeper is also supported as a Dynamic …","ref":"/docs/main/v9.4.0/en/setup/backend/dynamic-config-zookeeper/","title":"Dynamic Configuration Zookeeper Implementation"},{"body":"Dynamic Configuration Zookeeper Implementation Zookeeper is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:zookeeper}zookeeper:period:${SW_CONFIG_ZK_PERIOD:60}# Unit seconds, sync period. Default fetch every 60 seconds.namespace:${SW_CONFIG_ZK_NAMESPACE:/default}hostPort:${SW_CONFIG_ZK_HOST_PORT:localhost:2181}# Retry PolicybaseSleepTimeMs:${SW_CONFIG_ZK_BASE_SLEEP_TIME_MS:1000}# initial amount of time to wait between retriesmaxRetries:${SW_CONFIG_ZK_MAX_RETRIES:3}# max number of times to retryThe namespace is the ZooKeeper path. The config key and value are the properties of the namespace folder.\nConfig Storage Single Config znode.path = {namespace}/configKey configValue = znode.data e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If namespace = /default the config in zookeeper is:\nznode.path = /default/agent-analyzer.default.slowDBAccessThreshold znode.data = default:200,mongodb:50 Group Config znode.path = {namespace}/configKey znode.child1.path = {znode.path}/subItemkey1 znode.child2.path = {znode.path}/subItemkey2 ... subItemValue1 = znode.child1.data subItemValue2 = znode.child2.data ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If namespace = /default the config in zookeeper is:\nznode.path = /default/core.default.endpoint-name-grouping-openapi znode.customerAPI-v1.path = /default/core.default.endpoint-name-grouping-openapi/customerAPI-v1 znode.productAPI-v1.path = /default/core.default.endpoint-name-grouping-openapi/productAPI-v1 znode.productAPI-v2.path = /default/core.default.endpoint-name-grouping-openapi/productAPI-v2 znode.customerAPI-v1.data = value of customerAPI-v1 znode.productAPI-v1.data = value of productAPI-v1 znode.productAPI-v2.data = value of productAPI-v2 ","excerpt":"Dynamic Configuration Zookeeper Implementation Zookeeper is also supported as a Dynamic …","ref":"/docs/main/v9.5.0/en/setup/backend/dynamic-config-zookeeper/","title":"Dynamic Configuration Zookeeper Implementation"},{"body":"Dynamic Configuration Zookeeper Implementation Zookeeper is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:zookeeper}zookeeper:period:${SW_CONFIG_ZK_PERIOD:60}# Unit seconds, sync period. Default fetch every 60 seconds.namespace:${SW_CONFIG_ZK_NAMESPACE:/default}hostPort:${SW_CONFIG_ZK_HOST_PORT:localhost:2181}# Retry PolicybaseSleepTimeMs:${SW_CONFIG_ZK_BASE_SLEEP_TIME_MS:1000}# initial amount of time to wait between retriesmaxRetries:${SW_CONFIG_ZK_MAX_RETRIES:3}# max number of times to retryThe namespace is the ZooKeeper path. The config key and value are the properties of the namespace folder.\nConfig Storage Single Config znode.path = {namespace}/configKey configValue = znode.data e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If namespace = /default the config in zookeeper is:\nznode.path = /default/agent-analyzer.default.slowDBAccessThreshold znode.data = default:200,mongodb:50 Group Config znode.path = {namespace}/configKey znode.child1.path = {znode.path}/subItemkey1 znode.child2.path = {znode.path}/subItemkey2 ... subItemValue1 = znode.child1.data subItemValue2 = znode.child2.data ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If namespace = /default the config in zookeeper is:\nznode.path = /default/core.default.endpoint-name-grouping-openapi znode.customerAPI-v1.path = /default/core.default.endpoint-name-grouping-openapi/customerAPI-v1 znode.productAPI-v1.path = /default/core.default.endpoint-name-grouping-openapi/productAPI-v1 znode.productAPI-v2.path = /default/core.default.endpoint-name-grouping-openapi/productAPI-v2 znode.customerAPI-v1.data = value of customerAPI-v1 znode.productAPI-v1.data = value of productAPI-v1 znode.productAPI-v2.data = value of productAPI-v2 ","excerpt":"Dynamic Configuration Zookeeper Implementation Zookeeper is also supported as a Dynamic …","ref":"/docs/main/v9.6.0/en/setup/backend/dynamic-config-zookeeper/","title":"Dynamic Configuration Zookeeper Implementation"},{"body":"Dynamic Configuration Zookeeper Implementation Zookeeper is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:zookeeper}zookeeper:period:${SW_CONFIG_ZK_PERIOD:60}# Unit seconds, sync period. Default fetch every 60 seconds.namespace:${SW_CONFIG_ZK_NAMESPACE:/default}hostPort:${SW_CONFIG_ZK_HOST_PORT:localhost:2181}# Retry PolicybaseSleepTimeMs:${SW_CONFIG_ZK_BASE_SLEEP_TIME_MS:1000}# initial amount of time to wait between retriesmaxRetries:${SW_CONFIG_ZK_MAX_RETRIES:3}# max number of times to retryThe namespace is the ZooKeeper path. The config key and value are the properties of the namespace folder.\nConfig Storage Single Config znode.path = {namespace}/configKey configValue = znode.data e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If namespace = /default the config in zookeeper is:\nznode.path = /default/agent-analyzer.default.slowDBAccessThreshold znode.data = default:200,mongodb:50 Group Config znode.path = {namespace}/configKey znode.child1.path = {znode.path}/subItemkey1 znode.child2.path = {znode.path}/subItemkey2 ... subItemValue1 = znode.child1.data subItemValue2 = znode.child2.data ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If namespace = /default the config in zookeeper is:\nznode.path = /default/core.default.endpoint-name-grouping-openapi znode.customerAPI-v1.path = /default/core.default.endpoint-name-grouping-openapi/customerAPI-v1 znode.productAPI-v1.path = /default/core.default.endpoint-name-grouping-openapi/productAPI-v1 znode.productAPI-v2.path = /default/core.default.endpoint-name-grouping-openapi/productAPI-v2 znode.customerAPI-v1.data = value of customerAPI-v1 znode.productAPI-v1.data = value of productAPI-v1 znode.productAPI-v2.data = value of productAPI-v2 ","excerpt":"Dynamic Configuration Zookeeper Implementation Zookeeper is also supported as a Dynamic …","ref":"/docs/main/v9.7.0/en/setup/backend/dynamic-config-zookeeper/","title":"Dynamic Configuration Zookeeper Implementation"},{"body":"Dynamical Logging The OAP server leverages log4j2 to manage the logging system. log4j2 supports changing the configuration at runtime, but you have to manually update the XML configuration file, which could be time-consuming and prone to man-made mistakes.\nDynamical logging, which depends on dynamic configuration, provides us with an agile way to update all OAP log4j configurations through a single operation.\nThe key of the configuration item is core.default.log4j-xml, and you can select any of the configuration implements to store the content of log4j.xml. In the booting phase, once the core module gets started, core.default.log4j-xml would come into the OAP log4j context.\nIf the configuration is changed after the OAP startup, you have to wait for a while for the changes to be applied. The default value is 60 seconds, which you could change through configuration.\u0026lt;configuration implement\u0026gt;.period in application.yaml.\nIf you remove core.default.log4j-xml from the configuration center or disable the configuration module, log4j.xml in the config directory would be affected.\n Caveat: The OAP only supports the XML configuration format.\n This is an example of configuring dynamical logging through a ConfigMap in a Kubernetes cluster. You may set up other configuration clusters following the same procedures.\napiVersion:v1data:core.default.log4j-xml:|-\u0026lt;Configuration status=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout charset=\u0026#34;UTF-8\u0026#34; pattern=\u0026#34;%d - %c - %L [%t] %-5p %x - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;logger name=\u0026#34;io.grpc.netty\u0026#34; level=\u0026#34;INFO\u0026#34;/\u0026gt; \u0026lt;logger name=\u0026#34;org.apache.skywalking.oap.server.configuration.api\u0026#34; level=\u0026#34;TRACE\u0026#34;/\u0026gt; \u0026lt;logger name=\u0026#34;org.apache.skywalking.oap.server.configuration.configmap\u0026#34; level=\u0026#34;DEBUG\u0026#34;/\u0026gt; \u0026lt;Root level=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Console\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;kind:ConfigMapmetadata:labels:app:collectorrelease:skywalkingname:skywalking-oapnamespace:default","excerpt":"Dynamical Logging The OAP server leverages log4j2 to manage the logging system. log4j2 supports …","ref":"/docs/main/latest/en/setup/backend/dynamical-logging/","title":"Dynamical Logging"},{"body":"Dynamical Logging The OAP server leverages log4j2 to manage the logging system. log4j2 supports changing the configuration at runtime, but you have to manually update the XML configuration file, which could be time-consuming and prone to man-made mistakes.\nDynamical logging, which depends on dynamic configuration, provides us with an agile way to update all OAP log4j configurations through a single operation.\nThe key of the configuration item is core.default.log4j-xml, and you can select any of the configuration implements to store the content of log4j.xml. In the booting phase, once the core module gets started, core.default.log4j-xml would come into the OAP log4j context.\nIf the configuration is changed after the OAP startup, you have to wait for a while for the changes to be applied. The default value is 60 seconds, which you could change through configuration.\u0026lt;configuration implement\u0026gt;.period in application.yaml.\nIf you remove core.default.log4j-xml from the configuration center or disable the configuration module, log4j.xml in the config directory would be affected.\n Caveat: The OAP only supports the XML configuration format.\n This is an example of configuring dynamical logging through a ConfigMap in a Kubernetes cluster. You may set up other configuration clusters following the same procedures.\napiVersion:v1data:core.default.log4j-xml:|-\u0026lt;Configuration status=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout charset=\u0026#34;UTF-8\u0026#34; pattern=\u0026#34;%d - %c - %L [%t] %-5p %x - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;logger name=\u0026#34;io.grpc.netty\u0026#34; level=\u0026#34;INFO\u0026#34;/\u0026gt; \u0026lt;logger name=\u0026#34;org.apache.skywalking.oap.server.configuration.api\u0026#34; level=\u0026#34;TRACE\u0026#34;/\u0026gt; \u0026lt;logger name=\u0026#34;org.apache.skywalking.oap.server.configuration.configmap\u0026#34; level=\u0026#34;DEBUG\u0026#34;/\u0026gt; \u0026lt;Root level=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Console\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;kind:ConfigMapmetadata:labels:app:collectorrelease:skywalkingname:skywalking-oapnamespace:default","excerpt":"Dynamical Logging The OAP server leverages log4j2 to manage the logging system. log4j2 supports …","ref":"/docs/main/next/en/setup/backend/dynamical-logging/","title":"Dynamical Logging"},{"body":"Dynamical Logging The OAP server leverages log4j2 to manage the logging system. log4j2 supports changing the configuration at runtime, but you have to update the XML configuration file manually, which could be time-consuming and prone to manmade mistakes.\nDynamical logging, which depends on dynamic configuration, provides us with an agile way to update all OAP log4j configurations through a single operation.\nThe key of the configuration item is core.default.log4j-xml, and you can select any of the configuration implements to store the content of log4j.xml. In the booting phase, once the core module gets started, core.default.log4j-xml would come into the OAP log4j context.\nIf the configuration is changed after the OAP has started, you have to wait for a while for the changes to be applied. The default value is 60 seconds, which you could change through configuration.\u0026lt;configuration implement\u0026gt;.peroid in application.yaml.\nIf you remove core.default.log4j-xml from the configuration center or disable the configuration module, log4j.xml in the config directory would be affected.\n Caveat: The OAP only supports the XML configuration format.\n This is an example on how to config dynamical logging through a ConfigMap in a Kubernetes cluster. You may set up other configuration clusters following the same procedures.\napiVersion:v1data:core.default.log4j-xml:|-\u0026lt;Configuration status=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout charset=\u0026#34;UTF-8\u0026#34; pattern=\u0026#34;%d - %c - %L [%t] %-5p %x - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;logger name=\u0026#34;io.grpc.netty\u0026#34; level=\u0026#34;INFO\u0026#34;/\u0026gt; \u0026lt;logger name=\u0026#34;org.apache.skywalking.oap.server.configuration.api\u0026#34; level=\u0026#34;TRACE\u0026#34;/\u0026gt; \u0026lt;logger name=\u0026#34;org.apache.skywalking.oap.server.configuration.configmap\u0026#34; level=\u0026#34;DEBUG\u0026#34;/\u0026gt; \u0026lt;Root level=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Console\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;kind:ConfigMapmetadata:labels:app:collectorrelease:skywalkingname:skywalking-oapnamespace:default","excerpt":"Dynamical Logging The OAP server leverages log4j2 to manage the logging system. log4j2 supports …","ref":"/docs/main/v9.0.0/en/setup/backend/dynamical-logging/","title":"Dynamical Logging"},{"body":"Dynamical Logging The OAP server leverages log4j2 to manage the logging system. log4j2 supports changing the configuration at runtime, but you have to manually update the XML configuration file, which could be time-consuming and prone to man-made mistakes.\nDynamical logging, which depends on dynamic configuration, provides us with an agile way to update all OAP log4j configurations through a single operation.\nThe key of the configuration item is core.default.log4j-xml, and you can select any of the configuration implements to store the content of log4j.xml. In the booting phase, once the core module gets started, core.default.log4j-xml would come into the OAP log4j context.\nIf the configuration is changed after the OAP startup, you have to wait for a while for the changes to be applied. The default value is 60 seconds, which you could change through configuration.\u0026lt;configuration implement\u0026gt;.peroid in application.yaml.\nIf you remove core.default.log4j-xml from the configuration center or disable the configuration module, log4j.xml in the config directory would be affected.\n Caveat: The OAP only supports the XML configuration format.\n This is an example of configuring dynamical logging through a ConfigMap in a Kubernetes cluster. You may set up other configuration clusters following the same procedures.\napiVersion:v1data:core.default.log4j-xml:|-\u0026lt;Configuration status=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout charset=\u0026#34;UTF-8\u0026#34; pattern=\u0026#34;%d - %c - %L [%t] %-5p %x - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;logger name=\u0026#34;io.grpc.netty\u0026#34; level=\u0026#34;INFO\u0026#34;/\u0026gt; \u0026lt;logger name=\u0026#34;org.apache.skywalking.oap.server.configuration.api\u0026#34; level=\u0026#34;TRACE\u0026#34;/\u0026gt; \u0026lt;logger name=\u0026#34;org.apache.skywalking.oap.server.configuration.configmap\u0026#34; level=\u0026#34;DEBUG\u0026#34;/\u0026gt; \u0026lt;Root level=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Console\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;kind:ConfigMapmetadata:labels:app:collectorrelease:skywalkingname:skywalking-oapnamespace:default","excerpt":"Dynamical Logging The OAP server leverages log4j2 to manage the logging system. log4j2 supports …","ref":"/docs/main/v9.1.0/en/setup/backend/dynamical-logging/","title":"Dynamical Logging"},{"body":"Dynamical Logging The OAP server leverages log4j2 to manage the logging system. log4j2 supports changing the configuration at runtime, but you have to manually update the XML configuration file, which could be time-consuming and prone to man-made mistakes.\nDynamical logging, which depends on dynamic configuration, provides us with an agile way to update all OAP log4j configurations through a single operation.\nThe key of the configuration item is core.default.log4j-xml, and you can select any of the configuration implements to store the content of log4j.xml. In the booting phase, once the core module gets started, core.default.log4j-xml would come into the OAP log4j context.\nIf the configuration is changed after the OAP startup, you have to wait for a while for the changes to be applied. The default value is 60 seconds, which you could change through configuration.\u0026lt;configuration implement\u0026gt;.period in application.yaml.\nIf you remove core.default.log4j-xml from the configuration center or disable the configuration module, log4j.xml in the config directory would be affected.\n Caveat: The OAP only supports the XML configuration format.\n This is an example of configuring dynamical logging through a ConfigMap in a Kubernetes cluster. You may set up other configuration clusters following the same procedures.\napiVersion:v1data:core.default.log4j-xml:|-\u0026lt;Configuration status=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout charset=\u0026#34;UTF-8\u0026#34; pattern=\u0026#34;%d - %c - %L [%t] %-5p %x - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;logger name=\u0026#34;io.grpc.netty\u0026#34; level=\u0026#34;INFO\u0026#34;/\u0026gt; \u0026lt;logger name=\u0026#34;org.apache.skywalking.oap.server.configuration.api\u0026#34; level=\u0026#34;TRACE\u0026#34;/\u0026gt; \u0026lt;logger name=\u0026#34;org.apache.skywalking.oap.server.configuration.configmap\u0026#34; level=\u0026#34;DEBUG\u0026#34;/\u0026gt; \u0026lt;Root level=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Console\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;kind:ConfigMapmetadata:labels:app:collectorrelease:skywalkingname:skywalking-oapnamespace:default","excerpt":"Dynamical Logging The OAP server leverages log4j2 to manage the logging system. log4j2 supports …","ref":"/docs/main/v9.2.0/en/setup/backend/dynamical-logging/","title":"Dynamical Logging"},{"body":"Dynamical Logging The OAP server leverages log4j2 to manage the logging system. log4j2 supports changing the configuration at runtime, but you have to manually update the XML configuration file, which could be time-consuming and prone to man-made mistakes.\nDynamical logging, which depends on dynamic configuration, provides us with an agile way to update all OAP log4j configurations through a single operation.\nThe key of the configuration item is core.default.log4j-xml, and you can select any of the configuration implements to store the content of log4j.xml. In the booting phase, once the core module gets started, core.default.log4j-xml would come into the OAP log4j context.\nIf the configuration is changed after the OAP startup, you have to wait for a while for the changes to be applied. The default value is 60 seconds, which you could change through configuration.\u0026lt;configuration implement\u0026gt;.period in application.yaml.\nIf you remove core.default.log4j-xml from the configuration center or disable the configuration module, log4j.xml in the config directory would be affected.\n Caveat: The OAP only supports the XML configuration format.\n This is an example of configuring dynamical logging through a ConfigMap in a Kubernetes cluster. You may set up other configuration clusters following the same procedures.\napiVersion:v1data:core.default.log4j-xml:|-\u0026lt;Configuration status=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout charset=\u0026#34;UTF-8\u0026#34; pattern=\u0026#34;%d - %c - %L [%t] %-5p %x - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;logger name=\u0026#34;io.grpc.netty\u0026#34; level=\u0026#34;INFO\u0026#34;/\u0026gt; \u0026lt;logger name=\u0026#34;org.apache.skywalking.oap.server.configuration.api\u0026#34; level=\u0026#34;TRACE\u0026#34;/\u0026gt; \u0026lt;logger name=\u0026#34;org.apache.skywalking.oap.server.configuration.configmap\u0026#34; level=\u0026#34;DEBUG\u0026#34;/\u0026gt; \u0026lt;Root level=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Console\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;kind:ConfigMapmetadata:labels:app:collectorrelease:skywalkingname:skywalking-oapnamespace:default","excerpt":"Dynamical Logging The OAP server leverages log4j2 to manage the logging system. log4j2 supports …","ref":"/docs/main/v9.3.0/en/setup/backend/dynamical-logging/","title":"Dynamical Logging"},{"body":"Dynamical Logging The OAP server leverages log4j2 to manage the logging system. log4j2 supports changing the configuration at runtime, but you have to manually update the XML configuration file, which could be time-consuming and prone to man-made mistakes.\nDynamical logging, which depends on dynamic configuration, provides us with an agile way to update all OAP log4j configurations through a single operation.\nThe key of the configuration item is core.default.log4j-xml, and you can select any of the configuration implements to store the content of log4j.xml. In the booting phase, once the core module gets started, core.default.log4j-xml would come into the OAP log4j context.\nIf the configuration is changed after the OAP startup, you have to wait for a while for the changes to be applied. The default value is 60 seconds, which you could change through configuration.\u0026lt;configuration implement\u0026gt;.period in application.yaml.\nIf you remove core.default.log4j-xml from the configuration center or disable the configuration module, log4j.xml in the config directory would be affected.\n Caveat: The OAP only supports the XML configuration format.\n This is an example of configuring dynamical logging through a ConfigMap in a Kubernetes cluster. You may set up other configuration clusters following the same procedures.\napiVersion:v1data:core.default.log4j-xml:|-\u0026lt;Configuration status=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout charset=\u0026#34;UTF-8\u0026#34; pattern=\u0026#34;%d - %c - %L [%t] %-5p %x - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;logger name=\u0026#34;io.grpc.netty\u0026#34; level=\u0026#34;INFO\u0026#34;/\u0026gt; \u0026lt;logger name=\u0026#34;org.apache.skywalking.oap.server.configuration.api\u0026#34; level=\u0026#34;TRACE\u0026#34;/\u0026gt; \u0026lt;logger name=\u0026#34;org.apache.skywalking.oap.server.configuration.configmap\u0026#34; level=\u0026#34;DEBUG\u0026#34;/\u0026gt; \u0026lt;Root level=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Console\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;kind:ConfigMapmetadata:labels:app:collectorrelease:skywalkingname:skywalking-oapnamespace:default","excerpt":"Dynamical Logging The OAP server leverages log4j2 to manage the logging system. log4j2 supports …","ref":"/docs/main/v9.4.0/en/setup/backend/dynamical-logging/","title":"Dynamical Logging"},{"body":"Dynamical Logging The OAP server leverages log4j2 to manage the logging system. log4j2 supports changing the configuration at runtime, but you have to manually update the XML configuration file, which could be time-consuming and prone to man-made mistakes.\nDynamical logging, which depends on dynamic configuration, provides us with an agile way to update all OAP log4j configurations through a single operation.\nThe key of the configuration item is core.default.log4j-xml, and you can select any of the configuration implements to store the content of log4j.xml. In the booting phase, once the core module gets started, core.default.log4j-xml would come into the OAP log4j context.\nIf the configuration is changed after the OAP startup, you have to wait for a while for the changes to be applied. The default value is 60 seconds, which you could change through configuration.\u0026lt;configuration implement\u0026gt;.period in application.yaml.\nIf you remove core.default.log4j-xml from the configuration center or disable the configuration module, log4j.xml in the config directory would be affected.\n Caveat: The OAP only supports the XML configuration format.\n This is an example of configuring dynamical logging through a ConfigMap in a Kubernetes cluster. You may set up other configuration clusters following the same procedures.\napiVersion:v1data:core.default.log4j-xml:|-\u0026lt;Configuration status=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout charset=\u0026#34;UTF-8\u0026#34; pattern=\u0026#34;%d - %c - %L [%t] %-5p %x - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;logger name=\u0026#34;io.grpc.netty\u0026#34; level=\u0026#34;INFO\u0026#34;/\u0026gt; \u0026lt;logger name=\u0026#34;org.apache.skywalking.oap.server.configuration.api\u0026#34; level=\u0026#34;TRACE\u0026#34;/\u0026gt; \u0026lt;logger name=\u0026#34;org.apache.skywalking.oap.server.configuration.configmap\u0026#34; level=\u0026#34;DEBUG\u0026#34;/\u0026gt; \u0026lt;Root level=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Console\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;kind:ConfigMapmetadata:labels:app:collectorrelease:skywalkingname:skywalking-oapnamespace:default","excerpt":"Dynamical Logging The OAP server leverages log4j2 to manage the logging system. log4j2 supports …","ref":"/docs/main/v9.5.0/en/setup/backend/dynamical-logging/","title":"Dynamical Logging"},{"body":"Dynamical Logging The OAP server leverages log4j2 to manage the logging system. log4j2 supports changing the configuration at runtime, but you have to manually update the XML configuration file, which could be time-consuming and prone to man-made mistakes.\nDynamical logging, which depends on dynamic configuration, provides us with an agile way to update all OAP log4j configurations through a single operation.\nThe key of the configuration item is core.default.log4j-xml, and you can select any of the configuration implements to store the content of log4j.xml. In the booting phase, once the core module gets started, core.default.log4j-xml would come into the OAP log4j context.\nIf the configuration is changed after the OAP startup, you have to wait for a while for the changes to be applied. The default value is 60 seconds, which you could change through configuration.\u0026lt;configuration implement\u0026gt;.period in application.yaml.\nIf you remove core.default.log4j-xml from the configuration center or disable the configuration module, log4j.xml in the config directory would be affected.\n Caveat: The OAP only supports the XML configuration format.\n This is an example of configuring dynamical logging through a ConfigMap in a Kubernetes cluster. You may set up other configuration clusters following the same procedures.\napiVersion:v1data:core.default.log4j-xml:|-\u0026lt;Configuration status=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout charset=\u0026#34;UTF-8\u0026#34; pattern=\u0026#34;%d - %c - %L [%t] %-5p %x - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;logger name=\u0026#34;io.grpc.netty\u0026#34; level=\u0026#34;INFO\u0026#34;/\u0026gt; \u0026lt;logger name=\u0026#34;org.apache.skywalking.oap.server.configuration.api\u0026#34; level=\u0026#34;TRACE\u0026#34;/\u0026gt; \u0026lt;logger name=\u0026#34;org.apache.skywalking.oap.server.configuration.configmap\u0026#34; level=\u0026#34;DEBUG\u0026#34;/\u0026gt; \u0026lt;Root level=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Console\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;kind:ConfigMapmetadata:labels:app:collectorrelease:skywalkingname:skywalking-oapnamespace:default","excerpt":"Dynamical Logging The OAP server leverages log4j2 to manage the logging system. log4j2 supports …","ref":"/docs/main/v9.6.0/en/setup/backend/dynamical-logging/","title":"Dynamical Logging"},{"body":"Dynamical Logging The OAP server leverages log4j2 to manage the logging system. log4j2 supports changing the configuration at runtime, but you have to manually update the XML configuration file, which could be time-consuming and prone to man-made mistakes.\nDynamical logging, which depends on dynamic configuration, provides us with an agile way to update all OAP log4j configurations through a single operation.\nThe key of the configuration item is core.default.log4j-xml, and you can select any of the configuration implements to store the content of log4j.xml. In the booting phase, once the core module gets started, core.default.log4j-xml would come into the OAP log4j context.\nIf the configuration is changed after the OAP startup, you have to wait for a while for the changes to be applied. The default value is 60 seconds, which you could change through configuration.\u0026lt;configuration implement\u0026gt;.period in application.yaml.\nIf you remove core.default.log4j-xml from the configuration center or disable the configuration module, log4j.xml in the config directory would be affected.\n Caveat: The OAP only supports the XML configuration format.\n This is an example of configuring dynamical logging through a ConfigMap in a Kubernetes cluster. You may set up other configuration clusters following the same procedures.\napiVersion:v1data:core.default.log4j-xml:|-\u0026lt;Configuration status=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout charset=\u0026#34;UTF-8\u0026#34; pattern=\u0026#34;%d - %c - %L [%t] %-5p %x - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;logger name=\u0026#34;io.grpc.netty\u0026#34; level=\u0026#34;INFO\u0026#34;/\u0026gt; \u0026lt;logger name=\u0026#34;org.apache.skywalking.oap.server.configuration.api\u0026#34; level=\u0026#34;TRACE\u0026#34;/\u0026gt; \u0026lt;logger name=\u0026#34;org.apache.skywalking.oap.server.configuration.configmap\u0026#34; level=\u0026#34;DEBUG\u0026#34;/\u0026gt; \u0026lt;Root level=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Console\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;kind:ConfigMapmetadata:labels:app:collectorrelease:skywalkingname:skywalking-oapnamespace:default","excerpt":"Dynamical Logging The OAP server leverages log4j2 to manage the logging system. log4j2 supports …","ref":"/docs/main/v9.7.0/en/setup/backend/dynamical-logging/","title":"Dynamical Logging"},{"body":"eBPF Profiling eBPF Profiling utilizes the eBPF technology to monitor applications without requiring any modifications to the application itself. Corresponds to Out-Process Profiling.\nTo use eBPF Profiling, the SkyWalking Rover application (eBPF Agent) needs to be installed on the host machine. When the agent receives a Profiling task, it starts the Profiling task for the specific application to analyze performance bottlenecks for the corresponding type of Profiling.\nLean more about the eBPF profiling in following blogs:\n Pinpoint Service Mesh Critical Performance Impact by using eBPF Diagnose Service Mesh Network Performance with eBPF  Active in the OAP OAP and the agent use a brand-new protocol to exchange eBPF Profiling data, so it is necessary to start OAP with the following configuration:\nreceiver-ebpf:selector:${SW_RECEIVER_EBPF:default}default:Profiling type eBPF Profiling leverages eBPF technology to provide support for the following types of tasks:\n On CPU Profiling: Periodically samples the thread stacks of the current program while it\u0026rsquo;s executing on the CPU using PERF_COUNT_SW_CPU_CLOCK. Off CPU Profiling: Collects and aggregates thread stacks when the program executes the kernel function finish_task_switch. Network Profiling: Collects the execution details of the application when performing network-related syscalls, and then aggregates them into a topology map and metrics for different network protocols.  On CPU Profiling On CPU Profiling periodically samples the thread stacks of the target program while it\u0026rsquo;s executing on the CPU and aggregates the thread stacks to create a flame graph. This helps users identify performance bottlenecks based on the flame graph information.\nCreating task When creating an On CPU Profiling task, you need to specify which eligible processes need to be sampled. The required configuration information is as follows:\n Service: The processes under which service entity need to perform Profiling tasks. Labels: Specifies which processes with certain labels under the service entity can perform profiling tasks. If left blank, all processes under the specified service will require profiling. Start Time: Whether the current task needs to be executed immediately or at a future point in time. Duration: The execution time of the current profiling task.  The eBPF agent would periodically request from the OAP whether there are any eligible tasks among all the processes collected by the current eBPF agent. When the eBPF agent receives a task, it would start the profiling task with the process.\nProfiling analyze Once the eBPF agent starts a profiling task for a specific process, it would periodically collect data and report it to the OAP. At this point, a scheduling of task is generated. The scheduling data contains the following information:\n Schedule ID: The ID of current schedule. Task: The task to which the current scheduling data belongs. Process: The process for which the current scheduling Profiling data is being collected. Start Time: The execution start time of the current schedule. End Time: The time when the last sampling of the current schedule was completed.  Once the schedule is created, we can use the existing scheduling ID and time range to query the CPU execution situation of the specified process within a specific time period. The query contains the following fields:\n Schedule ID: The schedule ID you want to query. Time: The start and end times you want to query.  After the query, the following data would be returned. With the data, it\u0026rsquo;s easy to generate a flame graph:\n Id: Element ID. Parent ID: Parent element ID. The dependency relationship between elements can be determined using the element ID and parent element ID. Symbol: The symbol name of the current element. Usually, it represents the method names of thread stacks in different languages. Stack Type: The type of thread stack where the current element is located. Supports KERNEL_SPACE and USER_SPACE, which represent user mode and kernel mode, respectively. Dump Count: The number of times the current element was sampled. The more samples of symbol, means the longer the method execution time.  Off CPU Profiling Off CPU Profiling can analyze the thread state when a thread switch occurs in the current process, thereby determining performance loss caused by blocked on I/O, locks, timers, paging/swapping, and other reasons. The execution flow between the eBPF agent and OAP in Off CPU Profiling is the same as in On CPU Profiling, but the data content being analyzed is different.\nCreate task The process of creating an Off CPU Profiling task is the same as creating an On CPU Profiling task, with the only difference being that the Profiling task type is changed to OFF CPU Profiling. For specific parameters, please refer to the previous section.\nProfiling analyze When the eBPF agent receives a Off CPU Profiling task, it would also collect data and generate a schedule. When analyzing data, unlike On CPU Profiling, Off CPU Profiling can generate different flame graphs based on the following two aggregation methods:\n By Time: Aggregate based on the time consumed by each method, allowing you to analyze which methods take longer. By Count: Aggregate based on the number of times a method switches to non-CPU execution, allowing you to analyze which methods cause more non-CPU executions for the task.  Network Profiling Network Profiling can analyze and monitor network requests related to process, and based on the data, generate topology diagrams, metrics, and other information. Furthermore, it can be integrated with existing Tracing systems to enhance the data content.\nCreate task Unlike On/Off CPU Profiling, Network Profiling requires specifying the instance entity information when creating a task. For example, in a Service Mesh, there may be multiple processes under a single instance(Pod), such as an application and Envoy. In network analysis, they usually work together, so analyzing them together can give you a better understanding of the network execution situation of the Pod. The following parameters are needed:\n Instance: The current Instance entity. Sampling: Sampling information for network requests.  Sampling represents how the current system samples raw data and combines it with the existing Tracing system, allowing you to see the complete network data corresponding to a Span in Tracing Span. Currently, it supports sampling Raw information for Spans using HTTP/1.x as RPC and parsing SkyWalking and Zipkin protocols. The sampling information configuration is as follows:\n URI Regex: Only collect requests that match the specified URI. If empty, all requests will be collected. Min Duration: Only sample data with a response time greater than or equal to the specified duration. If empty, all requests will be collected. When 4XX: Only sample data with a response status code between 400 and 500 (exclusive). When 5XX: Only sample data with a response status code between 500 and 600 (exclusive). Settings: When network data meets the above rules, how to collect the data.  Require Complete Request: Whether to collect request data. Max Request Size: The maximum data size for collecting requests. If empty, all data will be collected. Require Complete Response: Whether to collect response data. Max Response Size: The maximum data size for collecting responses. If empty, all data will be collected.    Profiling analysis After starting the task, the following data can be analyzed:\n Topology: Analyze the data flow and data types when the current instance interacts internally and externally. TCP Metrics: Network Layer-4 metrics between two process. HTTP/1.x Metrics: If there are HTTP/1.x requests between two nodes, the HTTP/1.x metrics would be analyzed based on the data content. HTTP Request: If two nodes use HTTP/1.x and include a tracing system, the tracing data would be extended with events.  Topology The topology can generate two types of data:\n Internal entities: The network call relationships between all processes within the current instance. Entities and external: The call relationships between processes inside the entity and external network nodes.  For external nodes, since eBPF can only collect remote IP and port information during data collection, OAP can use Kubernetes cluster information to recognize the corresponding Service or Pod names.\nBetween two nodes, data flow direction can be detected, and the following types of data protocols can be identified:\n HTTP: Two nodes communicate using HTTP/1.x or HTTP/2.x protocol. HTTPS: Two nodes communicate using HTTPS. TLS: Two nodes use encrypted data for transition, such as when using OpenSSL. TCP: There is TCP data transmission between two nodes.  TCP Metrics In the TCP metrics, each metric includes both client-side and server-side data. The metrics are as follows:\n   Name Unit Description     Write CPM Count Number of write requests initiated per minute   Write Total Bytes B Total data size written per minute   Write Avg Execute Time ns Average execution time for each write operation   Write RTT ns Round Trip Time (RTT)   Read CPM Count Number of read requests per minute   Read Total Bytes B Total data size read per minute   Read Avg Execute Time ns Average execution time for each read operation   Connect CPM Count Number of new connections established   Connect Execute Time ns Time taken to establish a connection   Close CPM Count Number of closed connections   Close Execute Time ns Time taken to close a connection   Retransmit CPM Count Number of data retransmissions per minute   Drop CPM Count Number of dropped packets per minute    HTTP/1.x Metrics If there is HTTP/1.x protocol communication between two nodes, the eBPF agent can recognize the request data and parse the following metric information:\n   Name Unit Description     Request CPM Count Number of requests received per minute   Response Status CPM Count Number of occurrences of each response status code per minute   Request Package Size B Average request package data size   Response Package Size B Average response package data size   Client Duration ns Time taken for the client to receive a response   Server Duration ns Time taken for the server to send a response    HTTP Request If two nodes communicate using the HTTP/1.x protocol, and they employ a distributed tracing system, then eBPf agent can collect raw data according to the sampling rules configured in the previous sections.\nSampling Raw Data When the sampling conditions are met, the original request or response data would be collected, including the following fields:\n Data Size: The data size of the current request/response content. Data Content: The raw data content. Non-plain format content would not be collected. Data Direction: The data transfer direction, either Ingress or Egress. Data Type: The data type, either Request or Response. Connection Role: The current node\u0026rsquo;s role as a client or server. Entity: The entity information of the current process. Time: The Request or response sent/received time.  Syscall Event When sampling rules are applied, the related Syscall invocations for the request or response would also be collected, including the following information:\n Method Name: System Syscall method names such as read, write, readv, writev, etc. Packet Size: The current TCP packet size. Packet Count: The number of sent or received packets. Network Interface Information: The network interface from which the packet was sent.  ","excerpt":"eBPF Profiling eBPF Profiling utilizes the eBPF technology to monitor applications without requiring …","ref":"/docs/main/latest/en/setup/backend/backend-ebpf-profiling/","title":"eBPF Profiling"},{"body":"eBPF Profiling eBPF Profiling utilizes the eBPF technology to monitor applications without requiring any modifications to the application itself. Corresponds to Out-Process Profiling.\nTo use eBPF Profiling, the SkyWalking Rover application (eBPF Agent) needs to be installed on the host machine. When the agent receives a Profiling task, it starts the Profiling task for the specific application to analyze performance bottlenecks for the corresponding type of Profiling.\nLean more about the eBPF profiling in following blogs:\n Pinpoint Service Mesh Critical Performance Impact by using eBPF Diagnose Service Mesh Network Performance with eBPF  Active in the OAP OAP and the agent use a brand-new protocol to exchange eBPF Profiling data, so it is necessary to start OAP with the following configuration:\nreceiver-ebpf:selector:${SW_RECEIVER_EBPF:default}default:Profiling type eBPF Profiling leverages eBPF technology to provide support for the following types of tasks:\n On CPU Profiling: Periodically samples the thread stacks of the current program while it\u0026rsquo;s executing on the CPU using PERF_COUNT_SW_CPU_CLOCK. Off CPU Profiling: Collects and aggregates thread stacks when the program executes the kernel function finish_task_switch. Network Profiling: Collects the execution details of the application when performing network-related syscalls, and then aggregates them into a topology map and metrics for different network protocols.  On CPU Profiling On CPU Profiling periodically samples the thread stacks of the target program while it\u0026rsquo;s executing on the CPU and aggregates the thread stacks to create a flame graph. This helps users identify performance bottlenecks based on the flame graph information.\nCreating task When creating an On CPU Profiling task, you need to specify which eligible processes need to be sampled. The required configuration information is as follows:\n Service: The processes under which service entity need to perform Profiling tasks. Labels: Specifies which processes with certain labels under the service entity can perform profiling tasks. If left blank, all processes under the specified service will require profiling. Start Time: Whether the current task needs to be executed immediately or at a future point in time. Duration: The execution time of the current profiling task.  The eBPF agent would periodically request from the OAP whether there are any eligible tasks among all the processes collected by the current eBPF agent. When the eBPF agent receives a task, it would start the profiling task with the process.\nProfiling analyze Once the eBPF agent starts a profiling task for a specific process, it would periodically collect data and report it to the OAP. At this point, a scheduling of task is generated. The scheduling data contains the following information:\n Schedule ID: The ID of current schedule. Task: The task to which the current scheduling data belongs. Process: The process for which the current scheduling Profiling data is being collected. Start Time: The execution start time of the current schedule. End Time: The time when the last sampling of the current schedule was completed.  Once the schedule is created, we can use the existing scheduling ID and time range to query the CPU execution situation of the specified process within a specific time period. The query contains the following fields:\n Schedule ID: The schedule ID you want to query. Time: The start and end times you want to query.  After the query, the following data would be returned. With the data, it\u0026rsquo;s easy to generate a flame graph:\n Id: Element ID. Parent ID: Parent element ID. The dependency relationship between elements can be determined using the element ID and parent element ID. Symbol: The symbol name of the current element. Usually, it represents the method names of thread stacks in different languages. Stack Type: The type of thread stack where the current element is located. Supports KERNEL_SPACE and USER_SPACE, which represent user mode and kernel mode, respectively. Dump Count: The number of times the current element was sampled. The more samples of symbol, means the longer the method execution time.  Off CPU Profiling Off CPU Profiling can analyze the thread state when a thread switch occurs in the current process, thereby determining performance loss caused by blocked on I/O, locks, timers, paging/swapping, and other reasons. The execution flow between the eBPF agent and OAP in Off CPU Profiling is the same as in On CPU Profiling, but the data content being analyzed is different.\nCreate task The process of creating an Off CPU Profiling task is the same as creating an On CPU Profiling task, with the only difference being that the Profiling task type is changed to OFF CPU Profiling. For specific parameters, please refer to the previous section.\nProfiling analyze When the eBPF agent receives a Off CPU Profiling task, it would also collect data and generate a schedule. When analyzing data, unlike On CPU Profiling, Off CPU Profiling can generate different flame graphs based on the following two aggregation methods:\n By Time: Aggregate based on the time consumed by each method, allowing you to analyze which methods take longer. By Count: Aggregate based on the number of times a method switches to non-CPU execution, allowing you to analyze which methods cause more non-CPU executions for the task.  Network Profiling Network Profiling can analyze and monitor network requests related to process, and based on the data, generate topology diagrams, metrics, and other information. Furthermore, it can be integrated with existing Tracing systems to enhance the data content.\nCreate task Unlike On/Off CPU Profiling, Network Profiling requires specifying the instance entity information when creating a task. For example, in a Service Mesh, there may be multiple processes under a single instance(Pod), such as an application and Envoy. In network analysis, they usually work together, so analyzing them together can give you a better understanding of the network execution situation of the Pod. The following parameters are needed:\n Instance: The current Instance entity. Sampling: Sampling information for network requests.  Sampling represents how the current system samples raw data and combines it with the existing Tracing system, allowing you to see the complete network data corresponding to a Span in Tracing Span. Currently, it supports sampling Raw information for Spans using HTTP/1.x as RPC and parsing SkyWalking and Zipkin protocols. The sampling information configuration is as follows:\n URI Regex: Only collect requests that match the specified URI. If empty, all requests will be collected. Min Duration: Only sample data with a response time greater than or equal to the specified duration. If empty, all requests will be collected. When 4XX: Only sample data with a response status code between 400 and 500 (exclusive). When 5XX: Only sample data with a response status code between 500 and 600 (exclusive). Settings: When network data meets the above rules, how to collect the data.  Require Complete Request: Whether to collect request data. Max Request Size: The maximum data size for collecting requests. If empty, all data will be collected. Require Complete Response: Whether to collect response data. Max Response Size: The maximum data size for collecting responses. If empty, all data will be collected.    Profiling analysis After starting the task, the following data can be analyzed:\n Topology: Analyze the data flow and data types when the current instance interacts internally and externally. TCP Metrics: Network Layer-4 metrics between two process. HTTP/1.x Metrics: If there are HTTP/1.x requests between two nodes, the HTTP/1.x metrics would be analyzed based on the data content. HTTP Request: If two nodes use HTTP/1.x and include a tracing system, the tracing data would be extended with events.  Topology The topology can generate two types of data:\n Internal entities: The network call relationships between all processes within the current instance. Entities and external: The call relationships between processes inside the entity and external network nodes.  For external nodes, since eBPF can only collect remote IP and port information during data collection, OAP can use Kubernetes cluster information to recognize the corresponding Service or Pod names.\nBetween two nodes, data flow direction can be detected, and the following types of data protocols can be identified:\n HTTP: Two nodes communicate using HTTP/1.x or HTTP/2.x protocol. HTTPS: Two nodes communicate using HTTPS. TLS: Two nodes use encrypted data for transition, such as when using OpenSSL. TCP: There is TCP data transmission between two nodes.  TCP Metrics In the TCP metrics, each metric includes both client-side and server-side data. The metrics are as follows:\n   Name Unit Description     Write CPM Count Number of write requests initiated per minute   Write Total Bytes B Total data size written per minute   Write Avg Execute Time ns Average execution time for each write operation   Write RTT ns Round Trip Time (RTT)   Read CPM Count Number of read requests per minute   Read Total Bytes B Total data size read per minute   Read Avg Execute Time ns Average execution time for each read operation   Connect CPM Count Number of new connections established   Connect Execute Time ns Time taken to establish a connection   Close CPM Count Number of closed connections   Close Execute Time ns Time taken to close a connection   Retransmit CPM Count Number of data retransmissions per minute   Drop CPM Count Number of dropped packets per minute    HTTP/1.x Metrics If there is HTTP/1.x protocol communication between two nodes, the eBPF agent can recognize the request data and parse the following metric information:\n   Name Unit Description     Request CPM Count Number of requests received per minute   Response Status CPM Count Number of occurrences of each response status code per minute   Request Package Size B Average request package data size   Response Package Size B Average response package data size   Client Duration ns Time taken for the client to receive a response   Server Duration ns Time taken for the server to send a response    HTTP Request If two nodes communicate using the HTTP/1.x protocol, and they employ a distributed tracing system, then eBPf agent can collect raw data according to the sampling rules configured in the previous sections.\nSampling Raw Data When the sampling conditions are met, the original request or response data would be collected, including the following fields:\n Data Size: The data size of the current request/response content. Data Content: The raw data content. Non-plain format content would not be collected. Data Direction: The data transfer direction, either Ingress or Egress. Data Type: The data type, either Request or Response. Connection Role: The current node\u0026rsquo;s role as a client or server. Entity: The entity information of the current process. Time: The Request or response sent/received time.  Syscall Event When sampling rules are applied, the related Syscall invocations for the request or response would also be collected, including the following information:\n Method Name: System Syscall method names such as read, write, readv, writev, etc. Packet Size: The current TCP packet size. Packet Count: The number of sent or received packets. Network Interface Information: The network interface from which the packet was sent.  ","excerpt":"eBPF Profiling eBPF Profiling utilizes the eBPF technology to monitor applications without requiring …","ref":"/docs/main/next/en/setup/backend/backend-ebpf-profiling/","title":"eBPF Profiling"},{"body":"eBPF Profiling eBPF Profiling utilizes the eBPF technology to monitor applications without requiring any modifications to the application itself. Corresponds to Out-Process Profiling.\nTo use eBPF Profiling, the SkyWalking Rover application (eBPF Agent) needs to be installed on the host machine. When the agent receives a Profiling task, it starts the Profiling task for the specific application to analyze performance bottlenecks for the corresponding type of Profiling.\nLean more about the eBPF profiling in following blogs:\n Pinpoint Service Mesh Critical Performance Impact by using eBPF Diagnose Service Mesh Network Performance with eBPF  Active in the OAP OAP and the agent use a brand-new protocol to exchange eBPF Profiling data, so it is necessary to start OAP with the following configuration:\nreceiver-ebpf:selector:${SW_RECEIVER_EBPF:default}default:Profiling type eBPF Profiling leverages eBPF technology to provide support for the following types of tasks:\n On CPU Profiling: Periodically samples the thread stacks of the current program while it\u0026rsquo;s executing on the CPU using PERF_COUNT_SW_CPU_CLOCK. Off CPU Profiling: Collects and aggregates thread stacks when the program executes the kernel function finish_task_switch. Network Profiling: Collects the execution details of the application when performing network-related syscalls, and then aggregates them into a topology map and metrics for different network protocols.  On CPU Profiling On CPU Profiling periodically samples the thread stacks of the target program while it\u0026rsquo;s executing on the CPU and aggregates the thread stacks to create a flame graph. This helps users identify performance bottlenecks based on the flame graph information.\nCreating task When creating an On CPU Profiling task, you need to specify which eligible processes need to be sampled. The required configuration information is as follows:\n Service: The processes under which service entity need to perform Profiling tasks. Labels: Specifies which processes with certain labels under the service entity can perform profiling tasks. If left blank, all processes under the specified service will require profiling. Start Time: Whether the current task needs to be executed immediately or at a future point in time. Duration: The execution time of the current profiling task.  The eBPF agent would periodically request from the OAP whether there are any eligible tasks among all the processes collected by the current eBPF agent. When the eBPF agent receives a task, it would start the profiling task with the process.\nProfiling analyze Once the eBPF agent starts a profiling task for a specific process, it would periodically collect data and report it to the OAP. At this point, a scheduling of task is generated. The scheduling data contains the following information:\n Schedule ID: The ID of current schedule. Task: The task to which the current scheduling data belongs. Process: The process for which the current scheduling Profiling data is being collected. Start Time: The execution start time of the current schedule. End Time: The time when the last sampling of the current schedule was completed.  Once the schedule is created, we can use the existing scheduling ID and time range to query the CPU execution situation of the specified process within a specific time period. The query contains the following fields:\n Schedule ID: The schedule ID you want to query. Time: The start and end times you want to query.  After the query, the following data would be returned. With the data, it\u0026rsquo;s easy to generate a flame graph:\n Id: Element ID. Parent ID: Parent element ID. The dependency relationship between elements can be determined using the element ID and parent element ID. Symbol: The symbol name of the current element. Usually, it represents the method names of thread stacks in different languages. Stack Type: The type of thread stack where the current element is located. Supports KERNEL_SPACE and USER_SPACE, which represent user mode and kernel mode, respectively. Dump Count: The number of times the current element was sampled. The more samples of symbol, means the longer the method execution time.  Off CPU Profiling Off CPU Profiling can analyze the thread state when a thread switch occurs in the current process, thereby determining performance loss caused by blocked on I/O, locks, timers, paging/swapping, and other reasons. The execution flow between the eBPF agent and OAP in Off CPU Profiling is the same as in On CPU Profiling, but the data content being analyzed is different.\nCreate task The process of creating an Off CPU Profiling task is the same as creating an On CPU Profiling task, with the only difference being that the Profiling task type is changed to OFF CPU Profiling. For specific parameters, please refer to the previous section.\nProfiling analyze When the eBPF agent receives a Off CPU Profiling task, it would also collect data and generate a schedule. When analyzing data, unlike On CPU Profiling, Off CPU Profiling can generate different flame graphs based on the following two aggregation methods:\n By Time: Aggregate based on the time consumed by each method, allowing you to analyze which methods take longer. By Count: Aggregate based on the number of times a method switches to non-CPU execution, allowing you to analyze which methods cause more non-CPU executions for the task.  Network Profiling Network Profiling can analyze and monitor network requests related to process, and based on the data, generate topology diagrams, metrics, and other information. Furthermore, it can be integrated with existing Tracing systems to enhance the data content.\nCreate task Unlike On/Off CPU Profiling, Network Profiling requires specifying the instance entity information when creating a task. For example, in a Service Mesh, there may be multiple processes under a single instance(Pod), such as an application and Envoy. In network analysis, they usually work together, so analyzing them together can give you a better understanding of the network execution situation of the Pod. The following parameters are needed:\n Instance: The current Instance entity. Sampling: Sampling information for network requests.  Sampling represents how the current system samples raw data and combines it with the existing Tracing system, allowing you to see the complete network data corresponding to a Span in Tracing Span. Currently, it supports sampling Raw information for Spans using HTTP/1.x as RPC and parsing SkyWalking and Zipkin protocols. The sampling information configuration is as follows:\n URI Regex: Only collect requests that match the specified URI. If empty, all requests will be collected. Min Duration: Only sample data with a response time greater than or equal to the specified duration. If empty, all requests will be collected. When 4XX: Only sample data with a response status code between 400 and 500 (exclusive). When 5XX: Only sample data with a response status code between 500 and 600 (exclusive). Settings: When network data meets the above rules, how to collect the data.  Require Complete Request: Whether to collect request data. Max Request Size: The maximum data size for collecting requests. If empty, all data will be collected. Require Complete Response: Whether to collect response data. Max Response Size: The maximum data size for collecting responses. If empty, all data will be collected.    Profiling analysis After starting the task, the following data can be analyzed:\n Topology: Analyze the data flow and data types when the current instance interacts internally and externally. TCP Metrics: Network Layer-4 metrics between two process. HTTP/1.x Metrics: If there are HTTP/1.x requests between two nodes, the HTTP/1.x metrics would be analyzed based on the data content. HTTP Request: If two nodes use HTTP/1.x and include a tracing system, the tracing data would be extended with events.  Topology The topology can generate two types of data:\n Internal entities: The network call relationships between all processes within the current instance. Entities and external: The call relationships between processes inside the entity and external network nodes.  For external nodes, since eBPF can only collect remote IP and port information during data collection, OAP can use Kubernetes cluster information to recognize the corresponding Service or Pod names.\nBetween two nodes, data flow direction can be detected, and the following types of data protocols can be identified:\n HTTP: Two nodes communicate using HTTP/1.x or HTTP/2.x protocol. HTTPS: Two nodes communicate using HTTPS. TLS: Two nodes use encrypted data for transition, such as when using OpenSSL. TCP: There is TCP data transmission between two nodes.  TCP Metrics In the TCP metrics, each metric includes both client-side and server-side data. The metrics are as follows:\n   Name Unit Description     Write CPM Count Number of write requests initiated per minute   Write Total Bytes B Total data size written per minute   Write Avg Execute Time ns Average execution time for each write operation   Write RTT ns Round Trip Time (RTT)   Read CPM Count Number of read requests per minute   Read Total Bytes B Total data size read per minute   Read Avg Execute Time ns Average execution time for each read operation   Connect CPM Count Number of new connections established   Connect Execute Time ns Time taken to establish a connection   Close CPM Count Number of closed connections   Close Execute Time ns Time taken to close a connection   Retransmit CPM Count Number of data retransmissions per minute   Drop CPM Count Number of dropped packets per minute    HTTP/1.x Metrics If there is HTTP/1.x protocol communication between two nodes, the eBPF agent can recognize the request data and parse the following metric information:\n   Name Unit Description     Request CPM Count Number of requests received per minute   Response Status CPM Count Number of occurrences of each response status code per minute   Request Package Size B Average request package data size   Response Package Size B Average response package data size   Client Duration ns Time taken for the client to receive a response   Server Duration ns Time taken for the server to send a response    HTTP Request If two nodes communicate using the HTTP/1.x protocol, and they employ a distributed tracing system, then eBPf agent can collect raw data according to the sampling rules configured in the previous sections.\nSampling Raw Data When the sampling conditions are met, the original request or response data would be collected, including the following fields:\n Data Size: The data size of the current request/response content. Data Content: The raw data content. Non-plain format content would not be collected. Data Direction: The data transfer direction, either Ingress or Egress. Data Type: The data type, either Request or Response. Connection Role: The current node\u0026rsquo;s role as a client or server. Entity: The entity information of the current process. Time: The Request or response sent/received time.  Syscall Event When sampling rules are applied, the related Syscall invocations for the request or response would also be collected, including the following information:\n Method Name: System Syscall method names such as read, write, readv, writev, etc. Packet Size: The current TCP packet size. Packet Count: The number of sent or received packets. Network Interface Information: The network interface from which the packet was sent.  ","excerpt":"eBPF Profiling eBPF Profiling utilizes the eBPF technology to monitor applications without requiring …","ref":"/docs/main/v9.5.0/en/setup/backend/backend-ebpf-profiling/","title":"eBPF Profiling"},{"body":"eBPF Profiling eBPF Profiling utilizes the eBPF technology to monitor applications without requiring any modifications to the application itself. Corresponds to Out-Process Profiling.\nTo use eBPF Profiling, the SkyWalking Rover application (eBPF Agent) needs to be installed on the host machine. When the agent receives a Profiling task, it starts the Profiling task for the specific application to analyze performance bottlenecks for the corresponding type of Profiling.\nLean more about the eBPF profiling in following blogs:\n Pinpoint Service Mesh Critical Performance Impact by using eBPF Diagnose Service Mesh Network Performance with eBPF  Active in the OAP OAP and the agent use a brand-new protocol to exchange eBPF Profiling data, so it is necessary to start OAP with the following configuration:\nreceiver-ebpf:selector:${SW_RECEIVER_EBPF:default}default:Profiling type eBPF Profiling leverages eBPF technology to provide support for the following types of tasks:\n On CPU Profiling: Periodically samples the thread stacks of the current program while it\u0026rsquo;s executing on the CPU using PERF_COUNT_SW_CPU_CLOCK. Off CPU Profiling: Collects and aggregates thread stacks when the program executes the kernel function finish_task_switch. Network Profiling: Collects the execution details of the application when performing network-related syscalls, and then aggregates them into a topology map and metrics for different network protocols.  On CPU Profiling On CPU Profiling periodically samples the thread stacks of the target program while it\u0026rsquo;s executing on the CPU and aggregates the thread stacks to create a flame graph. This helps users identify performance bottlenecks based on the flame graph information.\nCreating task When creating an On CPU Profiling task, you need to specify which eligible processes need to be sampled. The required configuration information is as follows:\n Service: The processes under which service entity need to perform Profiling tasks. Labels: Specifies which processes with certain labels under the service entity can perform profiling tasks. If left blank, all processes under the specified service will require profiling. Start Time: Whether the current task needs to be executed immediately or at a future point in time. Duration: The execution time of the current profiling task.  The eBPF agent would periodically request from the OAP whether there are any eligible tasks among all the processes collected by the current eBPF agent. When the eBPF agent receives a task, it would start the profiling task with the process.\nProfiling analyze Once the eBPF agent starts a profiling task for a specific process, it would periodically collect data and report it to the OAP. At this point, a scheduling of task is generated. The scheduling data contains the following information:\n Schedule ID: The ID of current schedule. Task: The task to which the current scheduling data belongs. Process: The process for which the current scheduling Profiling data is being collected. Start Time: The execution start time of the current schedule. End Time: The time when the last sampling of the current schedule was completed.  Once the schedule is created, we can use the existing scheduling ID and time range to query the CPU execution situation of the specified process within a specific time period. The query contains the following fields:\n Schedule ID: The schedule ID you want to query. Time: The start and end times you want to query.  After the query, the following data would be returned. With the data, it\u0026rsquo;s easy to generate a flame graph:\n Id: Element ID. Parent ID: Parent element ID. The dependency relationship between elements can be determined using the element ID and parent element ID. Symbol: The symbol name of the current element. Usually, it represents the method names of thread stacks in different languages. Stack Type: The type of thread stack where the current element is located. Supports KERNEL_SPACE and USER_SPACE, which represent user mode and kernel mode, respectively. Dump Count: The number of times the current element was sampled. The more samples of symbol, means the longer the method execution time.  Off CPU Profiling Off CPU Profiling can analyze the thread state when a thread switch occurs in the current process, thereby determining performance loss caused by blocked on I/O, locks, timers, paging/swapping, and other reasons. The execution flow between the eBPF agent and OAP in Off CPU Profiling is the same as in On CPU Profiling, but the data content being analyzed is different.\nCreate task The process of creating an Off CPU Profiling task is the same as creating an On CPU Profiling task, with the only difference being that the Profiling task type is changed to OFF CPU Profiling. For specific parameters, please refer to the previous section.\nProfiling analyze When the eBPF agent receives a Off CPU Profiling task, it would also collect data and generate a schedule. When analyzing data, unlike On CPU Profiling, Off CPU Profiling can generate different flame graphs based on the following two aggregation methods:\n By Time: Aggregate based on the time consumed by each method, allowing you to analyze which methods take longer. By Count: Aggregate based on the number of times a method switches to non-CPU execution, allowing you to analyze which methods cause more non-CPU executions for the task.  Network Profiling Network Profiling can analyze and monitor network requests related to process, and based on the data, generate topology diagrams, metrics, and other information. Furthermore, it can be integrated with existing Tracing systems to enhance the data content.\nCreate task Unlike On/Off CPU Profiling, Network Profiling requires specifying the instance entity information when creating a task. For example, in a Service Mesh, there may be multiple processes under a single instance(Pod), such as an application and Envoy. In network analysis, they usually work together, so analyzing them together can give you a better understanding of the network execution situation of the Pod. The following parameters are needed:\n Instance: The current Instance entity. Sampling: Sampling information for network requests.  Sampling represents how the current system samples raw data and combines it with the existing Tracing system, allowing you to see the complete network data corresponding to a Span in Tracing Span. Currently, it supports sampling Raw information for Spans using HTTP/1.x as RPC and parsing SkyWalking and Zipkin protocols. The sampling information configuration is as follows:\n URI Regex: Only collect requests that match the specified URI. If empty, all requests will be collected. Min Duration: Only sample data with a response time greater than or equal to the specified duration. If empty, all requests will be collected. When 4XX: Only sample data with a response status code between 400 and 500 (exclusive). When 5XX: Only sample data with a response status code between 500 and 600 (exclusive). Settings: When network data meets the above rules, how to collect the data.  Require Complete Request: Whether to collect request data. Max Request Size: The maximum data size for collecting requests. If empty, all data will be collected. Require Complete Response: Whether to collect response data. Max Response Size: The maximum data size for collecting responses. If empty, all data will be collected.    Profiling analysis After starting the task, the following data can be analyzed:\n Topology: Analyze the data flow and data types when the current instance interacts internally and externally. TCP Metrics: Network Layer-4 metrics between two process. HTTP/1.x Metrics: If there are HTTP/1.x requests between two nodes, the HTTP/1.x metrics would be analyzed based on the data content. HTTP Request: If two nodes use HTTP/1.x and include a tracing system, the tracing data would be extended with events.  Topology The topology can generate two types of data:\n Internal entities: The network call relationships between all processes within the current instance. Entities and external: The call relationships between processes inside the entity and external network nodes.  For external nodes, since eBPF can only collect remote IP and port information during data collection, OAP can use Kubernetes cluster information to recognize the corresponding Service or Pod names.\nBetween two nodes, data flow direction can be detected, and the following types of data protocols can be identified:\n HTTP: Two nodes communicate using HTTP/1.x or HTTP/2.x protocol. HTTPS: Two nodes communicate using HTTPS. TLS: Two nodes use encrypted data for transition, such as when using OpenSSL. TCP: There is TCP data transmission between two nodes.  TCP Metrics In the TCP metrics, each metric includes both client-side and server-side data. The metrics are as follows:\n   Name Unit Description     Write CPM Count Number of write requests initiated per minute   Write Total Bytes B Total data size written per minute   Write Avg Execute Time ns Average execution time for each write operation   Write RTT ns Round Trip Time (RTT)   Read CPM Count Number of read requests per minute   Read Total Bytes B Total data size read per minute   Read Avg Execute Time ns Average execution time for each read operation   Connect CPM Count Number of new connections established   Connect Execute Time ns Time taken to establish a connection   Close CPM Count Number of closed connections   Close Execute Time ns Time taken to close a connection   Retransmit CPM Count Number of data retransmissions per minute   Drop CPM Count Number of dropped packets per minute    HTTP/1.x Metrics If there is HTTP/1.x protocol communication between two nodes, the eBPF agent can recognize the request data and parse the following metric information:\n   Name Unit Description     Request CPM Count Number of requests received per minute   Response Status CPM Count Number of occurrences of each response status code per minute   Request Package Size B Average request package data size   Response Package Size B Average response package data size   Client Duration ns Time taken for the client to receive a response   Server Duration ns Time taken for the server to send a response    HTTP Request If two nodes communicate using the HTTP/1.x protocol, and they employ a distributed tracing system, then eBPf agent can collect raw data according to the sampling rules configured in the previous sections.\nSampling Raw Data When the sampling conditions are met, the original request or response data would be collected, including the following fields:\n Data Size: The data size of the current request/response content. Data Content: The raw data content. Non-plain format content would not be collected. Data Direction: The data transfer direction, either Ingress or Egress. Data Type: The data type, either Request or Response. Connection Role: The current node\u0026rsquo;s role as a client or server. Entity: The entity information of the current process. Time: The Request or response sent/received time.  Syscall Event When sampling rules are applied, the related Syscall invocations for the request or response would also be collected, including the following information:\n Method Name: System Syscall method names such as read, write, readv, writev, etc. Packet Size: The current TCP packet size. Packet Count: The number of sent or received packets. Network Interface Information: The network interface from which the packet was sent.  ","excerpt":"eBPF Profiling eBPF Profiling utilizes the eBPF technology to monitor applications without requiring …","ref":"/docs/main/v9.6.0/en/setup/backend/backend-ebpf-profiling/","title":"eBPF Profiling"},{"body":"eBPF Profiling eBPF Profiling utilizes the eBPF technology to monitor applications without requiring any modifications to the application itself. Corresponds to Out-Process Profiling.\nTo use eBPF Profiling, the SkyWalking Rover application (eBPF Agent) needs to be installed on the host machine. When the agent receives a Profiling task, it starts the Profiling task for the specific application to analyze performance bottlenecks for the corresponding type of Profiling.\nLean more about the eBPF profiling in following blogs:\n Pinpoint Service Mesh Critical Performance Impact by using eBPF Diagnose Service Mesh Network Performance with eBPF  Active in the OAP OAP and the agent use a brand-new protocol to exchange eBPF Profiling data, so it is necessary to start OAP with the following configuration:\nreceiver-ebpf:selector:${SW_RECEIVER_EBPF:default}default:Profiling type eBPF Profiling leverages eBPF technology to provide support for the following types of tasks:\n On CPU Profiling: Periodically samples the thread stacks of the current program while it\u0026rsquo;s executing on the CPU using PERF_COUNT_SW_CPU_CLOCK. Off CPU Profiling: Collects and aggregates thread stacks when the program executes the kernel function finish_task_switch. Network Profiling: Collects the execution details of the application when performing network-related syscalls, and then aggregates them into a topology map and metrics for different network protocols.  On CPU Profiling On CPU Profiling periodically samples the thread stacks of the target program while it\u0026rsquo;s executing on the CPU and aggregates the thread stacks to create a flame graph. This helps users identify performance bottlenecks based on the flame graph information.\nCreating task When creating an On CPU Profiling task, you need to specify which eligible processes need to be sampled. The required configuration information is as follows:\n Service: The processes under which service entity need to perform Profiling tasks. Labels: Specifies which processes with certain labels under the service entity can perform profiling tasks. If left blank, all processes under the specified service will require profiling. Start Time: Whether the current task needs to be executed immediately or at a future point in time. Duration: The execution time of the current profiling task.  The eBPF agent would periodically request from the OAP whether there are any eligible tasks among all the processes collected by the current eBPF agent. When the eBPF agent receives a task, it would start the profiling task with the process.\nProfiling analyze Once the eBPF agent starts a profiling task for a specific process, it would periodically collect data and report it to the OAP. At this point, a scheduling of task is generated. The scheduling data contains the following information:\n Schedule ID: The ID of current schedule. Task: The task to which the current scheduling data belongs. Process: The process for which the current scheduling Profiling data is being collected. Start Time: The execution start time of the current schedule. End Time: The time when the last sampling of the current schedule was completed.  Once the schedule is created, we can use the existing scheduling ID and time range to query the CPU execution situation of the specified process within a specific time period. The query contains the following fields:\n Schedule ID: The schedule ID you want to query. Time: The start and end times you want to query.  After the query, the following data would be returned. With the data, it\u0026rsquo;s easy to generate a flame graph:\n Id: Element ID. Parent ID: Parent element ID. The dependency relationship between elements can be determined using the element ID and parent element ID. Symbol: The symbol name of the current element. Usually, it represents the method names of thread stacks in different languages. Stack Type: The type of thread stack where the current element is located. Supports KERNEL_SPACE and USER_SPACE, which represent user mode and kernel mode, respectively. Dump Count: The number of times the current element was sampled. The more samples of symbol, means the longer the method execution time.  Off CPU Profiling Off CPU Profiling can analyze the thread state when a thread switch occurs in the current process, thereby determining performance loss caused by blocked on I/O, locks, timers, paging/swapping, and other reasons. The execution flow between the eBPF agent and OAP in Off CPU Profiling is the same as in On CPU Profiling, but the data content being analyzed is different.\nCreate task The process of creating an Off CPU Profiling task is the same as creating an On CPU Profiling task, with the only difference being that the Profiling task type is changed to OFF CPU Profiling. For specific parameters, please refer to the previous section.\nProfiling analyze When the eBPF agent receives a Off CPU Profiling task, it would also collect data and generate a schedule. When analyzing data, unlike On CPU Profiling, Off CPU Profiling can generate different flame graphs based on the following two aggregation methods:\n By Time: Aggregate based on the time consumed by each method, allowing you to analyze which methods take longer. By Count: Aggregate based on the number of times a method switches to non-CPU execution, allowing you to analyze which methods cause more non-CPU executions for the task.  Network Profiling Network Profiling can analyze and monitor network requests related to process, and based on the data, generate topology diagrams, metrics, and other information. Furthermore, it can be integrated with existing Tracing systems to enhance the data content.\nCreate task Unlike On/Off CPU Profiling, Network Profiling requires specifying the instance entity information when creating a task. For example, in a Service Mesh, there may be multiple processes under a single instance(Pod), such as an application and Envoy. In network analysis, they usually work together, so analyzing them together can give you a better understanding of the network execution situation of the Pod. The following parameters are needed:\n Instance: The current Instance entity. Sampling: Sampling information for network requests.  Sampling represents how the current system samples raw data and combines it with the existing Tracing system, allowing you to see the complete network data corresponding to a Span in Tracing Span. Currently, it supports sampling Raw information for Spans using HTTP/1.x as RPC and parsing SkyWalking and Zipkin protocols. The sampling information configuration is as follows:\n URI Regex: Only collect requests that match the specified URI. If empty, all requests will be collected. Min Duration: Only sample data with a response time greater than or equal to the specified duration. If empty, all requests will be collected. When 4XX: Only sample data with a response status code between 400 and 500 (exclusive). When 5XX: Only sample data with a response status code between 500 and 600 (exclusive). Settings: When network data meets the above rules, how to collect the data.  Require Complete Request: Whether to collect request data. Max Request Size: The maximum data size for collecting requests. If empty, all data will be collected. Require Complete Response: Whether to collect response data. Max Response Size: The maximum data size for collecting responses. If empty, all data will be collected.    Profiling analysis After starting the task, the following data can be analyzed:\n Topology: Analyze the data flow and data types when the current instance interacts internally and externally. TCP Metrics: Network Layer-4 metrics between two process. HTTP/1.x Metrics: If there are HTTP/1.x requests between two nodes, the HTTP/1.x metrics would be analyzed based on the data content. HTTP Request: If two nodes use HTTP/1.x and include a tracing system, the tracing data would be extended with events.  Topology The topology can generate two types of data:\n Internal entities: The network call relationships between all processes within the current instance. Entities and external: The call relationships between processes inside the entity and external network nodes.  For external nodes, since eBPF can only collect remote IP and port information during data collection, OAP can use Kubernetes cluster information to recognize the corresponding Service or Pod names.\nBetween two nodes, data flow direction can be detected, and the following types of data protocols can be identified:\n HTTP: Two nodes communicate using HTTP/1.x or HTTP/2.x protocol. HTTPS: Two nodes communicate using HTTPS. TLS: Two nodes use encrypted data for transition, such as when using OpenSSL. TCP: There is TCP data transmission between two nodes.  TCP Metrics In the TCP metrics, each metric includes both client-side and server-side data. The metrics are as follows:\n   Name Unit Description     Write CPM Count Number of write requests initiated per minute   Write Total Bytes B Total data size written per minute   Write Avg Execute Time ns Average execution time for each write operation   Write RTT ns Round Trip Time (RTT)   Read CPM Count Number of read requests per minute   Read Total Bytes B Total data size read per minute   Read Avg Execute Time ns Average execution time for each read operation   Connect CPM Count Number of new connections established   Connect Execute Time ns Time taken to establish a connection   Close CPM Count Number of closed connections   Close Execute Time ns Time taken to close a connection   Retransmit CPM Count Number of data retransmissions per minute   Drop CPM Count Number of dropped packets per minute    HTTP/1.x Metrics If there is HTTP/1.x protocol communication between two nodes, the eBPF agent can recognize the request data and parse the following metric information:\n   Name Unit Description     Request CPM Count Number of requests received per minute   Response Status CPM Count Number of occurrences of each response status code per minute   Request Package Size B Average request package data size   Response Package Size B Average response package data size   Client Duration ns Time taken for the client to receive a response   Server Duration ns Time taken for the server to send a response    HTTP Request If two nodes communicate using the HTTP/1.x protocol, and they employ a distributed tracing system, then eBPf agent can collect raw data according to the sampling rules configured in the previous sections.\nSampling Raw Data When the sampling conditions are met, the original request or response data would be collected, including the following fields:\n Data Size: The data size of the current request/response content. Data Content: The raw data content. Non-plain format content would not be collected. Data Direction: The data transfer direction, either Ingress or Egress. Data Type: The data type, either Request or Response. Connection Role: The current node\u0026rsquo;s role as a client or server. Entity: The entity information of the current process. Time: The Request or response sent/received time.  Syscall Event When sampling rules are applied, the related Syscall invocations for the request or response would also be collected, including the following information:\n Method Name: System Syscall method names such as read, write, readv, writev, etc. Packet Size: The current TCP packet size. Packet Count: The number of sent or received packets. Network Interface Information: The network interface from which the packet was sent.  ","excerpt":"eBPF Profiling eBPF Profiling utilizes the eBPF technology to monitor applications without requiring …","ref":"/docs/main/v9.7.0/en/setup/backend/backend-ebpf-profiling/","title":"eBPF Profiling"},{"body":"ElasticSearch Some new users may encounter the following issues:\n The performance of ElasticSearch is not as good as expected. For instance, the latest data cannot be accessed after some time.  Or\n ERROR CODE 429.   Suppressed: org.elasticsearch.client.ResponseException: method [POST], host [http://127.0.0.1:9200], URI [/service_instance_inventory/type/6_tcc-app-gateway-77b98ff6ff-crblx.cards_0_0/_update?refresh=true\u0026amp;timeout=1m], status line [HTTP/1.1 429 Too Many Requests] {\u0026quot;error\u0026quot;:{\u0026quot;root_cause\u0026quot;:[{\u0026quot;type\u0026quot;:\u0026quot;remote_transport_exception\u0026quot;,\u0026quot;reason\u0026quot;:\u0026quot;[elasticsearch-0][10.16.9.130:9300][indices:data/write/update[s]]\u0026quot;}],\u0026quot;type\u0026quot;:\u0026quot;es_rejected_execution_exception\u0026quot;,\u0026quot;reason\u0026quot;:\u0026quot;rejected execution of org.elasticsearch.transport.TransportService$7@19a5cf02 on EsThreadPoolExecutor[name = elasticsearch-0/write, queue capacity = 200, org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor@389297ad[Running, pool size = 2, active threads = 2, queued tasks = 200, completed tasks = 147611]]\u0026quot;},\u0026quot;status\u0026quot;:429} at org.elasticsearch.client.RestClient$SyncResponseListener.get(RestClient.java:705) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestClient.performRequest(RestClient.java:235) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestClient.performRequest(RestClient.java:198) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestHighLevelClient.performRequest(RestHighLevelClient.java:522) ~[elasticsearch You could add the following config to elasticsearch.yml, and set the value based on your environment variable.\n# In the case of tracing, consider setting a value higher than this.thread_pool.index.queue_size:1000thread_pool.write.queue_size:1000# When you face query error at trace page, remember to check this.index.max_result_window:1000000For more information, see ElasticSearch\u0026rsquo;s official documentation.\n","excerpt":"ElasticSearch Some new users may encounter the following issues:\n The performance of ElasticSearch …","ref":"/docs/main/latest/en/faq/es-server-faq/","title":"ElasticSearch"},{"body":"ElasticSearch Some new users may encounter the following issues:\n The performance of ElasticSearch is not as good as expected. For instance, the latest data cannot be accessed after some time.  Or\n ERROR CODE 429.   Suppressed: org.elasticsearch.client.ResponseException: method [POST], host [http://127.0.0.1:9200], URI [/service_instance_inventory/type/6_tcc-app-gateway-77b98ff6ff-crblx.cards_0_0/_update?refresh=true\u0026amp;timeout=1m], status line [HTTP/1.1 429 Too Many Requests] {\u0026quot;error\u0026quot;:{\u0026quot;root_cause\u0026quot;:[{\u0026quot;type\u0026quot;:\u0026quot;remote_transport_exception\u0026quot;,\u0026quot;reason\u0026quot;:\u0026quot;[elasticsearch-0][10.16.9.130:9300][indices:data/write/update[s]]\u0026quot;}],\u0026quot;type\u0026quot;:\u0026quot;es_rejected_execution_exception\u0026quot;,\u0026quot;reason\u0026quot;:\u0026quot;rejected execution of org.elasticsearch.transport.TransportService$7@19a5cf02 on EsThreadPoolExecutor[name = elasticsearch-0/write, queue capacity = 200, org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor@389297ad[Running, pool size = 2, active threads = 2, queued tasks = 200, completed tasks = 147611]]\u0026quot;},\u0026quot;status\u0026quot;:429} at org.elasticsearch.client.RestClient$SyncResponseListener.get(RestClient.java:705) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestClient.performRequest(RestClient.java:235) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestClient.performRequest(RestClient.java:198) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestHighLevelClient.performRequest(RestHighLevelClient.java:522) ~[elasticsearch You could add the following config to elasticsearch.yml, and set the value based on your environment variable.\n# In the case of tracing, consider setting a value higher than this.thread_pool.index.queue_size:1000thread_pool.write.queue_size:1000# When you face query error at trace page, remember to check this.index.max_result_window:1000000For more information, see ElasticSearch\u0026rsquo;s official documentation.\n","excerpt":"ElasticSearch Some new users may encounter the following issues:\n The performance of ElasticSearch …","ref":"/docs/main/next/en/faq/es-server-faq/","title":"ElasticSearch"},{"body":"ElasticSearch Some new users may encounter the following issues:\n The performance of ElasticSearch is not as good as expected. For instance, the latest data cannot be accessed after some time.  Or\n ERROR CODE 429.   Suppressed: org.elasticsearch.client.ResponseException: method [POST], host [http://127.0.0.1:9200], URI [/service_instance_inventory/type/6_tcc-app-gateway-77b98ff6ff-crblx.cards_0_0/_update?refresh=true\u0026amp;timeout=1m], status line [HTTP/1.1 429 Too Many Requests] {\u0026quot;error\u0026quot;:{\u0026quot;root_cause\u0026quot;:[{\u0026quot;type\u0026quot;:\u0026quot;remote_transport_exception\u0026quot;,\u0026quot;reason\u0026quot;:\u0026quot;[elasticsearch-0][10.16.9.130:9300][indices:data/write/update[s]]\u0026quot;}],\u0026quot;type\u0026quot;:\u0026quot;es_rejected_execution_exception\u0026quot;,\u0026quot;reason\u0026quot;:\u0026quot;rejected execution of org.elasticsearch.transport.TransportService$7@19a5cf02 on EsThreadPoolExecutor[name = elasticsearch-0/write, queue capacity = 200, org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor@389297ad[Running, pool size = 2, active threads = 2, queued tasks = 200, completed tasks = 147611]]\u0026quot;},\u0026quot;status\u0026quot;:429} at org.elasticsearch.client.RestClient$SyncResponseListener.get(RestClient.java:705) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestClient.performRequest(RestClient.java:235) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestClient.performRequest(RestClient.java:198) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestHighLevelClient.performRequest(RestHighLevelClient.java:522) ~[elasticsearch You could add the following config to elasticsearch.yml, and set the value based on your environment variable.\n# In the case of tracing, consider setting a value higher than this.thread_pool.index.queue_size:1000thread_pool.write.queue_size:1000# When you face query error at trace page, remember to check this.index.max_result_window:1000000For more information, see ElasticSearch\u0026rsquo;s official documentation.\n","excerpt":"ElasticSearch Some new users may encounter the following issues:\n The performance of ElasticSearch …","ref":"/docs/main/v9.0.0/en/faq/es-server-faq/","title":"ElasticSearch"},{"body":"ElasticSearch Some new users may encounter the following issues:\n The performance of ElasticSearch is not as good as expected. For instance, the latest data cannot be accessed after some time.  Or\n ERROR CODE 429.   Suppressed: org.elasticsearch.client.ResponseException: method [POST], host [http://127.0.0.1:9200], URI [/service_instance_inventory/type/6_tcc-app-gateway-77b98ff6ff-crblx.cards_0_0/_update?refresh=true\u0026amp;timeout=1m], status line [HTTP/1.1 429 Too Many Requests] {\u0026quot;error\u0026quot;:{\u0026quot;root_cause\u0026quot;:[{\u0026quot;type\u0026quot;:\u0026quot;remote_transport_exception\u0026quot;,\u0026quot;reason\u0026quot;:\u0026quot;[elasticsearch-0][10.16.9.130:9300][indices:data/write/update[s]]\u0026quot;}],\u0026quot;type\u0026quot;:\u0026quot;es_rejected_execution_exception\u0026quot;,\u0026quot;reason\u0026quot;:\u0026quot;rejected execution of org.elasticsearch.transport.TransportService$7@19a5cf02 on EsThreadPoolExecutor[name = elasticsearch-0/write, queue capacity = 200, org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor@389297ad[Running, pool size = 2, active threads = 2, queued tasks = 200, completed tasks = 147611]]\u0026quot;},\u0026quot;status\u0026quot;:429} at org.elasticsearch.client.RestClient$SyncResponseListener.get(RestClient.java:705) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestClient.performRequest(RestClient.java:235) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestClient.performRequest(RestClient.java:198) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestHighLevelClient.performRequest(RestHighLevelClient.java:522) ~[elasticsearch You could add the following config to elasticsearch.yml, and set the value based on your environment variable.\n# In the case of tracing, consider setting a value higher than this.thread_pool.index.queue_size:1000thread_pool.write.queue_size:1000# When you face query error at trace page, remember to check this.index.max_result_window:1000000For more information, see ElasticSearch\u0026rsquo;s official documentation.\n","excerpt":"ElasticSearch Some new users may encounter the following issues:\n The performance of ElasticSearch …","ref":"/docs/main/v9.1.0/en/faq/es-server-faq/","title":"ElasticSearch"},{"body":"ElasticSearch Some new users may encounter the following issues:\n The performance of ElasticSearch is not as good as expected. For instance, the latest data cannot be accessed after some time.  Or\n ERROR CODE 429.   Suppressed: org.elasticsearch.client.ResponseException: method [POST], host [http://127.0.0.1:9200], URI [/service_instance_inventory/type/6_tcc-app-gateway-77b98ff6ff-crblx.cards_0_0/_update?refresh=true\u0026amp;timeout=1m], status line [HTTP/1.1 429 Too Many Requests] {\u0026quot;error\u0026quot;:{\u0026quot;root_cause\u0026quot;:[{\u0026quot;type\u0026quot;:\u0026quot;remote_transport_exception\u0026quot;,\u0026quot;reason\u0026quot;:\u0026quot;[elasticsearch-0][10.16.9.130:9300][indices:data/write/update[s]]\u0026quot;}],\u0026quot;type\u0026quot;:\u0026quot;es_rejected_execution_exception\u0026quot;,\u0026quot;reason\u0026quot;:\u0026quot;rejected execution of org.elasticsearch.transport.TransportService$7@19a5cf02 on EsThreadPoolExecutor[name = elasticsearch-0/write, queue capacity = 200, org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor@389297ad[Running, pool size = 2, active threads = 2, queued tasks = 200, completed tasks = 147611]]\u0026quot;},\u0026quot;status\u0026quot;:429} at org.elasticsearch.client.RestClient$SyncResponseListener.get(RestClient.java:705) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestClient.performRequest(RestClient.java:235) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestClient.performRequest(RestClient.java:198) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestHighLevelClient.performRequest(RestHighLevelClient.java:522) ~[elasticsearch You could add the following config to elasticsearch.yml, and set the value based on your environment variable.\n# In the case of tracing, consider setting a value higher than this.thread_pool.index.queue_size:1000thread_pool.write.queue_size:1000# When you face query error at trace page, remember to check this.index.max_result_window:1000000For more information, see ElasticSearch\u0026rsquo;s official documentation.\n","excerpt":"ElasticSearch Some new users may encounter the following issues:\n The performance of ElasticSearch …","ref":"/docs/main/v9.2.0/en/faq/es-server-faq/","title":"ElasticSearch"},{"body":"ElasticSearch Some new users may encounter the following issues:\n The performance of ElasticSearch is not as good as expected. For instance, the latest data cannot be accessed after some time.  Or\n ERROR CODE 429.   Suppressed: org.elasticsearch.client.ResponseException: method [POST], host [http://127.0.0.1:9200], URI [/service_instance_inventory/type/6_tcc-app-gateway-77b98ff6ff-crblx.cards_0_0/_update?refresh=true\u0026amp;timeout=1m], status line [HTTP/1.1 429 Too Many Requests] {\u0026quot;error\u0026quot;:{\u0026quot;root_cause\u0026quot;:[{\u0026quot;type\u0026quot;:\u0026quot;remote_transport_exception\u0026quot;,\u0026quot;reason\u0026quot;:\u0026quot;[elasticsearch-0][10.16.9.130:9300][indices:data/write/update[s]]\u0026quot;}],\u0026quot;type\u0026quot;:\u0026quot;es_rejected_execution_exception\u0026quot;,\u0026quot;reason\u0026quot;:\u0026quot;rejected execution of org.elasticsearch.transport.TransportService$7@19a5cf02 on EsThreadPoolExecutor[name = elasticsearch-0/write, queue capacity = 200, org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor@389297ad[Running, pool size = 2, active threads = 2, queued tasks = 200, completed tasks = 147611]]\u0026quot;},\u0026quot;status\u0026quot;:429} at org.elasticsearch.client.RestClient$SyncResponseListener.get(RestClient.java:705) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestClient.performRequest(RestClient.java:235) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestClient.performRequest(RestClient.java:198) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestHighLevelClient.performRequest(RestHighLevelClient.java:522) ~[elasticsearch You could add the following config to elasticsearch.yml, and set the value based on your environment variable.\n# In the case of tracing, consider setting a value higher than this.thread_pool.index.queue_size:1000thread_pool.write.queue_size:1000# When you face query error at trace page, remember to check this.index.max_result_window:1000000For more information, see ElasticSearch\u0026rsquo;s official documentation.\n","excerpt":"ElasticSearch Some new users may encounter the following issues:\n The performance of ElasticSearch …","ref":"/docs/main/v9.3.0/en/faq/es-server-faq/","title":"ElasticSearch"},{"body":"ElasticSearch Some new users may encounter the following issues:\n The performance of ElasticSearch is not as good as expected. For instance, the latest data cannot be accessed after some time.  Or\n ERROR CODE 429.   Suppressed: org.elasticsearch.client.ResponseException: method [POST], host [http://127.0.0.1:9200], URI [/service_instance_inventory/type/6_tcc-app-gateway-77b98ff6ff-crblx.cards_0_0/_update?refresh=true\u0026amp;timeout=1m], status line [HTTP/1.1 429 Too Many Requests] {\u0026quot;error\u0026quot;:{\u0026quot;root_cause\u0026quot;:[{\u0026quot;type\u0026quot;:\u0026quot;remote_transport_exception\u0026quot;,\u0026quot;reason\u0026quot;:\u0026quot;[elasticsearch-0][10.16.9.130:9300][indices:data/write/update[s]]\u0026quot;}],\u0026quot;type\u0026quot;:\u0026quot;es_rejected_execution_exception\u0026quot;,\u0026quot;reason\u0026quot;:\u0026quot;rejected execution of org.elasticsearch.transport.TransportService$7@19a5cf02 on EsThreadPoolExecutor[name = elasticsearch-0/write, queue capacity = 200, org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor@389297ad[Running, pool size = 2, active threads = 2, queued tasks = 200, completed tasks = 147611]]\u0026quot;},\u0026quot;status\u0026quot;:429} at org.elasticsearch.client.RestClient$SyncResponseListener.get(RestClient.java:705) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestClient.performRequest(RestClient.java:235) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestClient.performRequest(RestClient.java:198) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestHighLevelClient.performRequest(RestHighLevelClient.java:522) ~[elasticsearch You could add the following config to elasticsearch.yml, and set the value based on your environment variable.\n# In the case of tracing, consider setting a value higher than this.thread_pool.index.queue_size:1000thread_pool.write.queue_size:1000# When you face query error at trace page, remember to check this.index.max_result_window:1000000For more information, see ElasticSearch\u0026rsquo;s official documentation.\n","excerpt":"ElasticSearch Some new users may encounter the following issues:\n The performance of ElasticSearch …","ref":"/docs/main/v9.4.0/en/faq/es-server-faq/","title":"ElasticSearch"},{"body":"ElasticSearch Some new users may encounter the following issues:\n The performance of ElasticSearch is not as good as expected. For instance, the latest data cannot be accessed after some time.  Or\n ERROR CODE 429.   Suppressed: org.elasticsearch.client.ResponseException: method [POST], host [http://127.0.0.1:9200], URI [/service_instance_inventory/type/6_tcc-app-gateway-77b98ff6ff-crblx.cards_0_0/_update?refresh=true\u0026amp;timeout=1m], status line [HTTP/1.1 429 Too Many Requests] {\u0026quot;error\u0026quot;:{\u0026quot;root_cause\u0026quot;:[{\u0026quot;type\u0026quot;:\u0026quot;remote_transport_exception\u0026quot;,\u0026quot;reason\u0026quot;:\u0026quot;[elasticsearch-0][10.16.9.130:9300][indices:data/write/update[s]]\u0026quot;}],\u0026quot;type\u0026quot;:\u0026quot;es_rejected_execution_exception\u0026quot;,\u0026quot;reason\u0026quot;:\u0026quot;rejected execution of org.elasticsearch.transport.TransportService$7@19a5cf02 on EsThreadPoolExecutor[name = elasticsearch-0/write, queue capacity = 200, org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor@389297ad[Running, pool size = 2, active threads = 2, queued tasks = 200, completed tasks = 147611]]\u0026quot;},\u0026quot;status\u0026quot;:429} at org.elasticsearch.client.RestClient$SyncResponseListener.get(RestClient.java:705) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestClient.performRequest(RestClient.java:235) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestClient.performRequest(RestClient.java:198) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestHighLevelClient.performRequest(RestHighLevelClient.java:522) ~[elasticsearch You could add the following config to elasticsearch.yml, and set the value based on your environment variable.\n# In the case of tracing, consider setting a value higher than this.thread_pool.index.queue_size:1000thread_pool.write.queue_size:1000# When you face query error at trace page, remember to check this.index.max_result_window:1000000For more information, see ElasticSearch\u0026rsquo;s official documentation.\n","excerpt":"ElasticSearch Some new users may encounter the following issues:\n The performance of ElasticSearch …","ref":"/docs/main/v9.5.0/en/faq/es-server-faq/","title":"ElasticSearch"},{"body":"ElasticSearch Some new users may encounter the following issues:\n The performance of ElasticSearch is not as good as expected. For instance, the latest data cannot be accessed after some time.  Or\n ERROR CODE 429.   Suppressed: org.elasticsearch.client.ResponseException: method [POST], host [http://127.0.0.1:9200], URI [/service_instance_inventory/type/6_tcc-app-gateway-77b98ff6ff-crblx.cards_0_0/_update?refresh=true\u0026amp;timeout=1m], status line [HTTP/1.1 429 Too Many Requests] {\u0026quot;error\u0026quot;:{\u0026quot;root_cause\u0026quot;:[{\u0026quot;type\u0026quot;:\u0026quot;remote_transport_exception\u0026quot;,\u0026quot;reason\u0026quot;:\u0026quot;[elasticsearch-0][10.16.9.130:9300][indices:data/write/update[s]]\u0026quot;}],\u0026quot;type\u0026quot;:\u0026quot;es_rejected_execution_exception\u0026quot;,\u0026quot;reason\u0026quot;:\u0026quot;rejected execution of org.elasticsearch.transport.TransportService$7@19a5cf02 on EsThreadPoolExecutor[name = elasticsearch-0/write, queue capacity = 200, org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor@389297ad[Running, pool size = 2, active threads = 2, queued tasks = 200, completed tasks = 147611]]\u0026quot;},\u0026quot;status\u0026quot;:429} at org.elasticsearch.client.RestClient$SyncResponseListener.get(RestClient.java:705) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestClient.performRequest(RestClient.java:235) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestClient.performRequest(RestClient.java:198) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestHighLevelClient.performRequest(RestHighLevelClient.java:522) ~[elasticsearch You could add the following config to elasticsearch.yml, and set the value based on your environment variable.\n# In the case of tracing, consider setting a value higher than this.thread_pool.index.queue_size:1000thread_pool.write.queue_size:1000# When you face query error at trace page, remember to check this.index.max_result_window:1000000For more information, see ElasticSearch\u0026rsquo;s official documentation.\n","excerpt":"ElasticSearch Some new users may encounter the following issues:\n The performance of ElasticSearch …","ref":"/docs/main/v9.6.0/en/faq/es-server-faq/","title":"ElasticSearch"},{"body":"ElasticSearch Some new users may encounter the following issues:\n The performance of ElasticSearch is not as good as expected. For instance, the latest data cannot be accessed after some time.  Or\n ERROR CODE 429.   Suppressed: org.elasticsearch.client.ResponseException: method [POST], host [http://127.0.0.1:9200], URI [/service_instance_inventory/type/6_tcc-app-gateway-77b98ff6ff-crblx.cards_0_0/_update?refresh=true\u0026amp;timeout=1m], status line [HTTP/1.1 429 Too Many Requests] {\u0026quot;error\u0026quot;:{\u0026quot;root_cause\u0026quot;:[{\u0026quot;type\u0026quot;:\u0026quot;remote_transport_exception\u0026quot;,\u0026quot;reason\u0026quot;:\u0026quot;[elasticsearch-0][10.16.9.130:9300][indices:data/write/update[s]]\u0026quot;}],\u0026quot;type\u0026quot;:\u0026quot;es_rejected_execution_exception\u0026quot;,\u0026quot;reason\u0026quot;:\u0026quot;rejected execution of org.elasticsearch.transport.TransportService$7@19a5cf02 on EsThreadPoolExecutor[name = elasticsearch-0/write, queue capacity = 200, org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor@389297ad[Running, pool size = 2, active threads = 2, queued tasks = 200, completed tasks = 147611]]\u0026quot;},\u0026quot;status\u0026quot;:429} at org.elasticsearch.client.RestClient$SyncResponseListener.get(RestClient.java:705) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestClient.performRequest(RestClient.java:235) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestClient.performRequest(RestClient.java:198) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestHighLevelClient.performRequest(RestHighLevelClient.java:522) ~[elasticsearch You could add the following config to elasticsearch.yml, and set the value based on your environment variable.\n# In the case of tracing, consider setting a value higher than this.thread_pool.index.queue_size:1000thread_pool.write.queue_size:1000# When you face query error at trace page, remember to check this.index.max_result_window:1000000For more information, see ElasticSearch\u0026rsquo;s official documentation.\n","excerpt":"ElasticSearch Some new users may encounter the following issues:\n The performance of ElasticSearch …","ref":"/docs/main/v9.7.0/en/faq/es-server-faq/","title":"ElasticSearch"},{"body":"Elasticsearch and OpenSearch Elasticsearch and OpenSearch are supported as storage. The storage provider is elasticsearch. This storage option is recommended for a large scale production environment, such as more than 1000 services, 10000 endpoints, and 100000 traces per minute, and plan to 100% sampling rate for the persistent in the storage.\nOpenSearch OpenSearch is a fork from ElasticSearch 7.11 but licensed in Apache 2.0. OpenSearch storage shares the same configurations as ElasticSearch. In order to activate OpenSearch as storage, set the storage provider to elasticsearch.\nWe support and tested the following versions of OpenSearch:\n 1.1.0, 1.3.10 2.4.0, 2.8.0  Elasticsearch NOTE: Elastic announced through their blog that Elasticsearch will be moving over to a Server Side Public License (SSPL) and/or Elastic License 2.0(ELv2), since Feb. 2021, which is incompatible with Apache License 2.0. Both of these licenses are not OSS licenses approved by the Open Source Initiative (OSI). This license change is effective from Elasticsearch version 7.11. So please choose the suitable ElasticSearch version according to your usage. If you have concerns about SSPL/ELv2, choose the versions before 7.11 or switch to OpenSearch.\nBy default, SkyWalking uses following indices for various telemetry data.\n sw_management (All SkyWalking management data, e.g. UI dashboard settings, UI Menu, Continuous profiling policy) sw_metrics-all-${day-format} (All metrics/meters generated through MAL and OAL engines, and metadata of service/instance/endpoint) sw_log-${day-format} (Collected logs, exclude browser logs) sw_segment-${day-format} (Native trace segments) sw_browser_error_log-${day-format} (Collected browser logs) sw_zipkin_span-${day-format} (Zipkin trace spans) sw_records-all-${day-format} (All sampled records, e.g. slow SQLs, agent profiling, and ebpf profiling)  SkyWalking rebuilds the ElasticSearch client on top of ElasticSearch REST API and automatically picks up correct request formats according to the server-side version, hence you don\u0026rsquo;t need to download different binaries and don\u0026rsquo;t need to configure different storage selectors for different ElasticSearch server-side versions anymore.\nFor now, SkyWalking supports ElasticSearch 7.x, ElasticSearch 8.x, and OpenSearch 1.x, their configurations are as follows:\nNotice, ElasticSearch 6 worked and is not promised due to end of life officially.\nstorage:selector:${SW_STORAGE:elasticsearch}elasticsearch:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}clusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:9200}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;http\u0026#34;}trustStorePath:${SW_STORAGE_ES_SSL_JKS_PATH:\u0026#34;\u0026#34;}trustStorePass:${SW_STORAGE_ES_SSL_JKS_PASS:\u0026#34;\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}password:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}secretsManagementFile:${SW_ES_SECRETS_MANAGEMENT_FILE:\u0026#34;\u0026#34;}# Secrets management file in the properties format includes the username, password, which are managed by 3rd party tool.dayStep:${SW_STORAGE_DAY_STEP:1}# Represent the number of days in the one minute/hour/day index.indexShardsNumber:${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:1}# Shard number of new indexesindexReplicasNumber:${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:1}# Replicas number of new indexes# Specify the settings for each index individually.# If configured, this setting has the highest priority and overrides the generic settings.specificIndexSettings:${SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS:\u0026#34;\u0026#34;}# Super data set has been defined in the codes, such as trace segments.The following 3 config would be improve es performance when storage super size data in es.superDatasetDayStep:${SW_STORAGE_ES_SUPER_DATASET_DAY_STEP:-1}# Represent the number of days in the super size dataset record index, the default value is the same as dayStep when the value is less than 0superDatasetIndexShardsFactor:${SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR:5}# This factor provides more shards for the super data set, shards number = indexShardsNumber * superDatasetIndexShardsFactor. Also, this factor effects Zipkin traces.superDatasetIndexReplicasNumber:${SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER:0}# Represent the replicas number in the super size dataset record index, the default value is 0.indexTemplateOrder:${SW_STORAGE_ES_INDEX_TEMPLATE_ORDER:0}# the order of index templatebulkActions:${SW_STORAGE_ES_BULK_ACTIONS:1000}# Execute the async bulk record data every ${SW_STORAGE_ES_BULK_ACTIONS} requestsflushInterval:${SW_STORAGE_ES_FLUSH_INTERVAL:10}# flush the bulk every 10 seconds whatever the number of requestsconcurrentRequests:${SW_STORAGE_ES_CONCURRENT_REQUESTS:2}# the number of concurrent requestsresultWindowMaxSize:${SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE:10000}metadataQueryMaxSize:${SW_STORAGE_ES_QUERY_MAX_SIZE:5000}segmentQueryMaxSize:${SW_STORAGE_ES_QUERY_SEGMENT_SIZE:200}profileTaskQueryMaxSize:${SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE:200}profileDataQueryScrollBatchSize:${SW_STORAGE_ES_QUERY_PROFILE_DATA_SCROLLING_BATCH_SIZE:100}oapAnalyzer:${SW_STORAGE_ES_OAP_ANALYZER:\u0026#34;{\\\u0026#34;analyzer\\\u0026#34;:{\\\u0026#34;oap_analyzer\\\u0026#34;:{\\\u0026#34;type\\\u0026#34;:\\\u0026#34;stop\\\u0026#34;}}}\u0026#34;}# the oap analyzer.oapLogAnalyzer:${SW_STORAGE_ES_OAP_LOG_ANALYZER:\u0026#34;{\\\u0026#34;analyzer\\\u0026#34;:{\\\u0026#34;oap_log_analyzer\\\u0026#34;:{\\\u0026#34;type\\\u0026#34;:\\\u0026#34;standard\\\u0026#34;}}}\u0026#34;}# the oap log analyzer. It could be customized by the ES analyzer configuration to support more language log formats, such as Chinese log, Japanese log and etc.advanced:${SW_STORAGE_ES_ADVANCED:\u0026#34;\u0026#34;}# Set it to `true` could shard metrics indices into multi-physical indices# as same as the versions(one index template per metric/meter aggregation function) before 9.2.0.logicSharding:${SW_STORAGE_ES_LOGIC_SHARDING:false}# Custom routing can reduce the impact of searches. Instead of having to fan out a search request to all the shards in an index, the request can be sent to just the shard that matches the specific routing value (or values).enableCustomRouting:${SW_STORAGE_ES_ENABLE_CUSTOM_ROUTING:false}ElasticSearch With Https SSL Encrypting communications. Example:\nstorage:selector:${SW_STORAGE:elasticsearch}elasticsearch:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}# User needs to be set when Http Basic authentication is enabledpassword:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}# Password to be set when Http Basic authentication is enabledclusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:443}trustStorePath:${SW_SW_STORAGE_ES_SSL_JKS_PATH:\u0026#34;../es_keystore.jks\u0026#34;}trustStorePass:${SW_SW_STORAGE_ES_SSL_JKS_PASS:\u0026#34;\u0026#34;}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;https\u0026#34;}... File at trustStorePath is being monitored. Once it is changed, the ElasticSearch client will reconnect. trustStorePass could be changed in the runtime through Secrets Management File Of ElasticSearch Authentication.  Daily Index Step Daily index step(storage/elasticsearch/dayStep, default 1) represents the index creation period. In this period, metrics for several days (dayStep value) are saved.\nIn most cases, users don\u0026rsquo;t need to change the value manually, as SkyWalking is designed to observe large-scale distributed systems. But in some cases, users may want to set a long TTL value, such as more than 60 days. However, their ElasticSearch cluster may not be powerful enough due to low traffic in the production environment. This value could be increased to 5 (or more) if users could ensure a single index could support the metrics and traces for these days (5 in this case).\nFor example, if dayStep == 11,\n Data in [2000-01-01, 2000-01-11] will be merged into the index-20000101. Data in [2000-01-12, 2000-01-22] will be merged into the index-20000112.  storage/elasticsearch/superDatasetDayStep overrides the storage/elasticsearch/dayStep if the value is positive. This would affect the record-related entities, such as trace segments. In some cases, the size of metrics is much smaller than the record (trace). This would improve the shards balance in the ElasticSearch cluster.\nNOTE: TTL deletion would be affected by these steps. You should set an extra dayStep in your TTL. For example, if you want to have TTL == 30 days and dayStep == 10, you are recommended to set TTL = 40.\nSecrets Management File Of ElasticSearch Authentication The value of secretsManagementFile should point to the secrets management file absolute path. The file includes the username, password, and JKS password of the ElasticSearch server in the properties format.\nuser=xxx password=yyy trustStorePass=zzz The major difference between using user, password, trustStorePass configs in the application.yaml file is that the Secrets Management File is being watched by the OAP server. Once it is changed manually or through a 3rd party tool, such as Vault, the storage provider will use the new username, password, and JKS password to establish the connection and close the old one. If the information exists in the file, the user/password will be overridden.\nIndex Settings The following settings control the number of shards and replicas for new and existing index templates. The update only got applied after OAP reboots.\nstorage:elasticsearch:# ......indexShardsNumber:${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:1}indexReplicasNumber:${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:1}specificIndexSettings:${SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS:\u0026#34;\u0026#34;}superDatasetIndexShardsFactor:${SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR:5}superDatasetIndexReplicasNumber:${SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER:0}The following table shows the relationship between those config items and Elasticsearch index number_of_shards/number_of_replicas. And also you can specify the settings for each index individually.\n   index number_of_shards number_of_replicas     sw_ui_template indexShardsNumber indexReplicasNumber   sw_metrics-all-${day-format} indexShardsNumber indexReplicasNumber   sw_log-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_segment-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_browser_error_log-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_zipkin_span-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_records-all-${day-format} indexShardsNumber indexReplicasNumber    Advanced Configurations For Elasticsearch Index You can add advanced configurations in JSON format to set ElasticSearch index settings by following ElasticSearch doc\nFor example, set translog settings:\nstorage:elasticsearch:# ......advanced:${SW_STORAGE_ES_ADVANCED:\u0026#34;{\\\u0026#34;index.translog.durability\\\u0026#34;:\\\u0026#34;request\\\u0026#34;,\\\u0026#34;index.translog.sync_interval\\\u0026#34;:\\\u0026#34;5s\\\u0026#34;}\u0026#34;}Specify Settings For Each Elasticsearch Index Individually You can specify the settings for one or more indexes individually by using SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS.\nNOTE: Supported settings:\n number_of_shards number_of_replicas  NOTE: These settings have the highest priority and will override the existing generic settings mentioned in index settings doc.\nThe settings are in JSON format. The index name here is logic entity name, which should exclude the ${SW_NAMESPACE} which is sw by default, e.g.\n{ \u0026#34;metrics-all\u0026#34;:{ \u0026#34;number_of_shards\u0026#34;:\u0026#34;3\u0026#34;, \u0026#34;number_of_replicas\u0026#34;:\u0026#34;2\u0026#34; }, \u0026#34;segment\u0026#34;:{ \u0026#34;number_of_shards\u0026#34;:\u0026#34;6\u0026#34;, \u0026#34;number_of_replicas\u0026#34;:\u0026#34;1\u0026#34; } } This configuration in the YAML file is like this,\nstorage:elasticsearch:# ......specificIndexSettings:${SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS:\u0026#34;{\\\u0026#34;metrics-all\\\u0026#34;:{\\\u0026#34;number_of_shards\\\u0026#34;:\\\u0026#34;3\\\u0026#34;,\\\u0026#34;number_of_replicas\\\u0026#34;:\\\u0026#34;2\\\u0026#34;},\\\u0026#34;segment\\\u0026#34;:{\\\u0026#34;number_of_shards\\\u0026#34;:\\\u0026#34;6\\\u0026#34;,\\\u0026#34;number_of_replicas\\\u0026#34;:\\\u0026#34;1\\\u0026#34;}}\u0026#34;}Recommended ElasticSearch server-side configurations You could add the following configuration to elasticsearch.yml, and set the value based on your environment.\n# In tracing scenario, consider to set more than this at least.thread_pool.index.queue_size:1000# Only suitable for ElasticSearch 6thread_pool.write.queue_size:1000# Suitable for ElasticSearch 6 and 7# When you face a query error on the traces page, remember to check this.index.max_result_window:1000000We strongly recommend that you read more about these configurations from ElasticSearch\u0026rsquo;s official documentation since they directly impact the performance of ElasticSearch.\nAbout Namespace When a namespace is set, all index names in ElasticSearch will use it as the prefix.\n","excerpt":"Elasticsearch and OpenSearch Elasticsearch and OpenSearch are supported as storage. The storage …","ref":"/docs/main/latest/en/setup/backend/storages/elasticsearch/","title":"Elasticsearch and OpenSearch"},{"body":"Elasticsearch and OpenSearch Elasticsearch and OpenSearch are supported as storage. The storage provider is elasticsearch. This storage option is recommended for a large scale production environment, such as more than 1000 services, 10000 endpoints, and 100000 traces per minute, and plan to 100% sampling rate for the persistent in the storage.\nOpenSearch OpenSearch is a fork from ElasticSearch 7.11 but licensed in Apache 2.0. OpenSearch storage shares the same configurations as ElasticSearch. In order to activate OpenSearch as storage, set the storage provider to elasticsearch.\nWe support and tested the following versions of OpenSearch:\n 1.1.0, 1.3.10 2.4.0, 2.8.0  Elasticsearch NOTE: Elastic announced through their blog that Elasticsearch will be moving over to a Server Side Public License (SSPL) and/or Elastic License 2.0(ELv2), since Feb. 2021, which is incompatible with Apache License 2.0. Both of these licenses are not OSS licenses approved by the Open Source Initiative (OSI). This license change is effective from Elasticsearch version 7.11. So please choose the suitable ElasticSearch version according to your usage. If you have concerns about SSPL/ELv2, choose the versions before 7.11 or switch to OpenSearch.\nBy default, SkyWalking uses following indices for various telemetry data.\n sw_management (All SkyWalking management data, e.g. UI dashboard settings, UI Menu, Continuous profiling policy) sw_metrics-all-${day-format} (All metrics/meters generated through MAL and OAL engines, and metadata of service/instance/endpoint) sw_log-${day-format} (Collected logs, exclude browser logs) sw_segment-${day-format} (Native trace segments) sw_browser_error_log-${day-format} (Collected browser logs) sw_zipkin_span-${day-format} (Zipkin trace spans) sw_records-all-${day-format} (All sampled records, e.g. slow SQLs, agent profiling, and ebpf profiling)  SkyWalking rebuilds the ElasticSearch client on top of ElasticSearch REST API and automatically picks up correct request formats according to the server-side version, hence you don\u0026rsquo;t need to download different binaries and don\u0026rsquo;t need to configure different storage selectors for different ElasticSearch server-side versions anymore.\nFor now, SkyWalking supports ElasticSearch 7.x, ElasticSearch 8.x, and OpenSearch 1.x, their configurations are as follows:\nNotice, ElasticSearch 6 worked and is not promised due to end of life officially.\nstorage:selector:${SW_STORAGE:elasticsearch}elasticsearch:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}clusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:9200}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;http\u0026#34;}trustStorePath:${SW_STORAGE_ES_SSL_JKS_PATH:\u0026#34;\u0026#34;}trustStorePass:${SW_STORAGE_ES_SSL_JKS_PASS:\u0026#34;\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}password:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}secretsManagementFile:${SW_ES_SECRETS_MANAGEMENT_FILE:\u0026#34;\u0026#34;}# Secrets management file in the properties format includes the username, password, which are managed by 3rd party tool.dayStep:${SW_STORAGE_DAY_STEP:1}# Represent the number of days in the one minute/hour/day index.indexShardsNumber:${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:1}# Shard number of new indexesindexReplicasNumber:${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:1}# Replicas number of new indexes# Specify the settings for each index individually.# If configured, this setting has the highest priority and overrides the generic settings.specificIndexSettings:${SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS:\u0026#34;\u0026#34;}# Super data set has been defined in the codes, such as trace segments.The following 3 config would be improve es performance when storage super size data in es.superDatasetDayStep:${SW_STORAGE_ES_SUPER_DATASET_DAY_STEP:-1}# Represent the number of days in the super size dataset record index, the default value is the same as dayStep when the value is less than 0superDatasetIndexShardsFactor:${SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR:5}# This factor provides more shards for the super data set, shards number = indexShardsNumber * superDatasetIndexShardsFactor. Also, this factor effects Zipkin traces.superDatasetIndexReplicasNumber:${SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER:0}# Represent the replicas number in the super size dataset record index, the default value is 0.indexTemplateOrder:${SW_STORAGE_ES_INDEX_TEMPLATE_ORDER:0}# the order of index templatebulkActions:${SW_STORAGE_ES_BULK_ACTIONS:1000}# Execute the async bulk record data every ${SW_STORAGE_ES_BULK_ACTIONS} requestsflushInterval:${SW_STORAGE_ES_FLUSH_INTERVAL:10}# flush the bulk every 10 seconds whatever the number of requestsconcurrentRequests:${SW_STORAGE_ES_CONCURRENT_REQUESTS:2}# the number of concurrent requestsresultWindowMaxSize:${SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE:10000}metadataQueryMaxSize:${SW_STORAGE_ES_QUERY_MAX_SIZE:5000}segmentQueryMaxSize:${SW_STORAGE_ES_QUERY_SEGMENT_SIZE:200}profileTaskQueryMaxSize:${SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE:200}profileDataQueryScrollBatchSize:${SW_STORAGE_ES_QUERY_PROFILE_DATA_SCROLLING_BATCH_SIZE:100}oapAnalyzer:${SW_STORAGE_ES_OAP_ANALYZER:\u0026#34;{\\\u0026#34;analyzer\\\u0026#34;:{\\\u0026#34;oap_analyzer\\\u0026#34;:{\\\u0026#34;type\\\u0026#34;:\\\u0026#34;stop\\\u0026#34;}}}\u0026#34;}# the oap analyzer.oapLogAnalyzer:${SW_STORAGE_ES_OAP_LOG_ANALYZER:\u0026#34;{\\\u0026#34;analyzer\\\u0026#34;:{\\\u0026#34;oap_log_analyzer\\\u0026#34;:{\\\u0026#34;type\\\u0026#34;:\\\u0026#34;standard\\\u0026#34;}}}\u0026#34;}# the oap log analyzer. It could be customized by the ES analyzer configuration to support more language log formats, such as Chinese log, Japanese log and etc.advanced:${SW_STORAGE_ES_ADVANCED:\u0026#34;\u0026#34;}# Set it to `true` could shard metrics indices into multi-physical indices# as same as the versions(one index template per metric/meter aggregation function) before 9.2.0.logicSharding:${SW_STORAGE_ES_LOGIC_SHARDING:false}# Custom routing can reduce the impact of searches. Instead of having to fan out a search request to all the shards in an index, the request can be sent to just the shard that matches the specific routing value (or values).enableCustomRouting:${SW_STORAGE_ES_ENABLE_CUSTOM_ROUTING:false}ElasticSearch With Https SSL Encrypting communications. Example:\nstorage:selector:${SW_STORAGE:elasticsearch}elasticsearch:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}# User needs to be set when Http Basic authentication is enabledpassword:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}# Password to be set when Http Basic authentication is enabledclusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:443}trustStorePath:${SW_STORAGE_ES_SSL_JKS_PATH:\u0026#34;../es_keystore.jks\u0026#34;}trustStorePass:${SW_STORAGE_ES_SSL_JKS_PASS:\u0026#34;\u0026#34;}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;https\u0026#34;}... File at trustStorePath is being monitored. Once it is changed, the ElasticSearch client will reconnect. trustStorePass could be changed in the runtime through Secrets Management File Of ElasticSearch Authentication.  Daily Index Step Daily index step(storage/elasticsearch/dayStep, default 1) represents the index creation period. In this period, metrics for several days (dayStep value) are saved.\nIn most cases, users don\u0026rsquo;t need to change the value manually, as SkyWalking is designed to observe large-scale distributed systems. But in some cases, users may want to set a long TTL value, such as more than 60 days. However, their ElasticSearch cluster may not be powerful enough due to low traffic in the production environment. This value could be increased to 5 (or more) if users could ensure a single index could support the metrics and traces for these days (5 in this case).\nFor example, if dayStep == 11,\n Data in [2000-01-01, 2000-01-11] will be merged into the index-20000101. Data in [2000-01-12, 2000-01-22] will be merged into the index-20000112.  storage/elasticsearch/superDatasetDayStep overrides the storage/elasticsearch/dayStep if the value is positive. This would affect the record-related entities, such as trace segments. In some cases, the size of metrics is much smaller than the record (trace). This would improve the shards balance in the ElasticSearch cluster.\nNOTE: TTL deletion would be affected by these steps. You should set an extra dayStep in your TTL. For example, if you want to have TTL == 30 days and dayStep == 10, you are recommended to set TTL = 40.\nSecrets Management File Of ElasticSearch Authentication The value of secretsManagementFile should point to the secrets management file absolute path. The file includes the username, password, and JKS password of the ElasticSearch server in the properties format.\nuser=xxx password=yyy trustStorePass=zzz The major difference between using user, password, trustStorePass configs in the application.yaml file is that the Secrets Management File is being watched by the OAP server. Once it is changed manually or through a 3rd party tool, such as Vault, the storage provider will use the new username, password, and JKS password to establish the connection and close the old one. If the information exists in the file, the user/password will be overridden.\nIndex Settings The following settings control the number of shards and replicas for new and existing index templates. The update only got applied after OAP reboots.\nstorage:elasticsearch:# ......indexShardsNumber:${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:1}indexReplicasNumber:${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:1}specificIndexSettings:${SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS:\u0026#34;\u0026#34;}superDatasetIndexShardsFactor:${SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR:5}superDatasetIndexReplicasNumber:${SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER:0}The following table shows the relationship between those config items and Elasticsearch index number_of_shards/number_of_replicas. And also you can specify the settings for each index individually.\n   index number_of_shards number_of_replicas     sw_ui_template indexShardsNumber indexReplicasNumber   sw_metrics-all-${day-format} indexShardsNumber indexReplicasNumber   sw_log-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_segment-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_browser_error_log-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_zipkin_span-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_records-all-${day-format} indexShardsNumber indexReplicasNumber    Advanced Configurations For Elasticsearch Index You can add advanced configurations in JSON format to set ElasticSearch index settings by following ElasticSearch doc\nFor example, set translog settings:\nstorage:elasticsearch:# ......advanced:${SW_STORAGE_ES_ADVANCED:\u0026#34;{\\\u0026#34;index.translog.durability\\\u0026#34;:\\\u0026#34;request\\\u0026#34;,\\\u0026#34;index.translog.sync_interval\\\u0026#34;:\\\u0026#34;5s\\\u0026#34;}\u0026#34;}Specify Settings For Each Elasticsearch Index Individually You can specify the settings for one or more indexes individually by using SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS.\nNOTE: Supported settings:\n number_of_shards number_of_replicas  NOTE: These settings have the highest priority and will override the existing generic settings mentioned in index settings doc.\nThe settings are in JSON format. The index name here is logic entity name, which should exclude the ${SW_NAMESPACE} which is sw by default, e.g.\n{ \u0026#34;metrics-all\u0026#34;:{ \u0026#34;number_of_shards\u0026#34;:\u0026#34;3\u0026#34;, \u0026#34;number_of_replicas\u0026#34;:\u0026#34;2\u0026#34; }, \u0026#34;segment\u0026#34;:{ \u0026#34;number_of_shards\u0026#34;:\u0026#34;6\u0026#34;, \u0026#34;number_of_replicas\u0026#34;:\u0026#34;1\u0026#34; } } This configuration in the YAML file is like this,\nstorage:elasticsearch:# ......specificIndexSettings:${SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS:\u0026#34;{\\\u0026#34;metrics-all\\\u0026#34;:{\\\u0026#34;number_of_shards\\\u0026#34;:\\\u0026#34;3\\\u0026#34;,\\\u0026#34;number_of_replicas\\\u0026#34;:\\\u0026#34;2\\\u0026#34;},\\\u0026#34;segment\\\u0026#34;:{\\\u0026#34;number_of_shards\\\u0026#34;:\\\u0026#34;6\\\u0026#34;,\\\u0026#34;number_of_replicas\\\u0026#34;:\\\u0026#34;1\\\u0026#34;}}\u0026#34;}Recommended ElasticSearch server-side configurations You could add the following configuration to elasticsearch.yml, and set the value based on your environment.\n# In tracing scenario, consider to set more than this at least.thread_pool.index.queue_size:1000# Only suitable for ElasticSearch 6thread_pool.write.queue_size:1000# Suitable for ElasticSearch 6 and 7# When you face a query error on the traces page, remember to check this.index.max_result_window:1000000We strongly recommend that you read more about these configurations from ElasticSearch\u0026rsquo;s official documentation since they directly impact the performance of ElasticSearch.\nAbout Namespace When a namespace is set, all index names in ElasticSearch will use it as the prefix.\n","excerpt":"Elasticsearch and OpenSearch Elasticsearch and OpenSearch are supported as storage. The storage …","ref":"/docs/main/next/en/setup/backend/storages/elasticsearch/","title":"Elasticsearch and OpenSearch"},{"body":"Elasticsearch and OpenSearch Elasticsearch and OpenSearch are supported as storage. The storage provider is elasticsearch. This storage option is recommended for a large scale production environment, such as more than 1000 services, 10000 endpoints, and 100000 traces per minute, and plan to 100% sampling rate for the persistent in the storage.\nOpenSearch OpenSearch is a fork from ElasticSearch 7.11 but licensed in Apache 2.0. OpenSearch storage shares the same configurations as ElasticSearch. In order to activate OpenSearch as storage, set the storage provider to elasticsearch.\nWe support and tested the following versions of OpenSearch:\n 1.1.0, 1.3.10 2.4.0, 2.8.0  Elasticsearch NOTE: Elastic announced through their blog that Elasticsearch will be moving over to a Server Side Public License (SSPL) and/or Elastic License 2.0(ELv2), since Feb. 2021, which is incompatible with Apache License 2.0. Both of these licenses are not OSS licenses approved by the Open Source Initiative (OSI). This license change is effective from Elasticsearch version 7.11. So please choose the suitable ElasticSearch version according to your usage. If you have concerns about SSPL/ELv2, choose the versions before 7.11 or switch to OpenSearch.\nBy default, SkyWalking uses following indices for various telemetry data.\n sw_management (All SkyWalking management data, e.g. UI dashboard settings, UI Menu, Continuous profiling policy) sw_metrics-all-${day-format} (All metrics/meters generated through MAL and OAL engines, and metadata of service/instance/endpoint) sw_log-${day-format} (Collected logs, exclude browser logs) sw_segment-${day-format} (Native trace segments) sw_browser_error_log-${day-format} (Collected browser logs) sw_zipkin_span-${day-format} (Zipkin trace spans) sw_records-all-${day-format} (All sampled records, e.g. slow SQLs, agent profiling, and ebpf profiling)  SkyWalking rebuilds the ElasticSearch client on top of ElasticSearch REST API and automatically picks up correct request formats according to the server-side version, hence you don\u0026rsquo;t need to download different binaries and don\u0026rsquo;t need to configure different storage selectors for different ElasticSearch server-side versions anymore.\nFor now, SkyWalking supports ElasticSearch 7.x, ElasticSearch 8.x, and OpenSearch 1.x, their configurations are as follows:\nNotice, ElasticSearch 6 worked and is not promised due to end of life officially.\nstorage:selector:${SW_STORAGE:elasticsearch}elasticsearch:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}clusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:9200}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;http\u0026#34;}trustStorePath:${SW_STORAGE_ES_SSL_JKS_PATH:\u0026#34;\u0026#34;}trustStorePass:${SW_STORAGE_ES_SSL_JKS_PASS:\u0026#34;\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}password:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}secretsManagementFile:${SW_ES_SECRETS_MANAGEMENT_FILE:\u0026#34;\u0026#34;}# Secrets management file in the properties format includes the username, password, which are managed by 3rd party tool.dayStep:${SW_STORAGE_DAY_STEP:1}# Represent the number of days in the one minute/hour/day index.indexShardsNumber:${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:1}# Shard number of new indexesindexReplicasNumber:${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:1}# Replicas number of new indexes# Specify the settings for each index individually.# If configured, this setting has the highest priority and overrides the generic settings.specificIndexSettings:${SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS:\u0026#34;\u0026#34;}# Super data set has been defined in the codes, such as trace segments.The following 3 config would be improve es performance when storage super size data in es.superDatasetDayStep:${SW_STORAGE_ES_SUPER_DATASET_DAY_STEP:-1}# Represent the number of days in the super size dataset record index, the default value is the same as dayStep when the value is less than 0superDatasetIndexShardsFactor:${SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR:5}# This factor provides more shards for the super data set, shards number = indexShardsNumber * superDatasetIndexShardsFactor. Also, this factor effects Zipkin traces.superDatasetIndexReplicasNumber:${SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER:0}# Represent the replicas number in the super size dataset record index, the default value is 0.indexTemplateOrder:${SW_STORAGE_ES_INDEX_TEMPLATE_ORDER:0}# the order of index templatebulkActions:${SW_STORAGE_ES_BULK_ACTIONS:1000}# Execute the async bulk record data every ${SW_STORAGE_ES_BULK_ACTIONS} requestsflushInterval:${SW_STORAGE_ES_FLUSH_INTERVAL:10}# flush the bulk every 10 seconds whatever the number of requestsconcurrentRequests:${SW_STORAGE_ES_CONCURRENT_REQUESTS:2}# the number of concurrent requestsresultWindowMaxSize:${SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE:10000}metadataQueryMaxSize:${SW_STORAGE_ES_QUERY_MAX_SIZE:5000}segmentQueryMaxSize:${SW_STORAGE_ES_QUERY_SEGMENT_SIZE:200}profileTaskQueryMaxSize:${SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE:200}profileDataQueryScrollBatchSize:${SW_STORAGE_ES_QUERY_PROFILE_DATA_SCROLLING_BATCH_SIZE:100}oapAnalyzer:${SW_STORAGE_ES_OAP_ANALYZER:\u0026#34;{\\\u0026#34;analyzer\\\u0026#34;:{\\\u0026#34;oap_analyzer\\\u0026#34;:{\\\u0026#34;type\\\u0026#34;:\\\u0026#34;stop\\\u0026#34;}}}\u0026#34;}# the oap analyzer.oapLogAnalyzer:${SW_STORAGE_ES_OAP_LOG_ANALYZER:\u0026#34;{\\\u0026#34;analyzer\\\u0026#34;:{\\\u0026#34;oap_log_analyzer\\\u0026#34;:{\\\u0026#34;type\\\u0026#34;:\\\u0026#34;standard\\\u0026#34;}}}\u0026#34;}# the oap log analyzer. It could be customized by the ES analyzer configuration to support more language log formats, such as Chinese log, Japanese log and etc.advanced:${SW_STORAGE_ES_ADVANCED:\u0026#34;\u0026#34;}# Set it to `true` could shard metrics indices into multi-physical indices# as same as the versions(one index template per metric/meter aggregation function) before 9.2.0.logicSharding:${SW_STORAGE_ES_LOGIC_SHARDING:false}# Custom routing can reduce the impact of searches. Instead of having to fan out a search request to all the shards in an index, the request can be sent to just the shard that matches the specific routing value (or values).enableCustomRouting:${SW_STORAGE_ES_ENABLE_CUSTOM_ROUTING:false}ElasticSearch With Https SSL Encrypting communications. Example:\nstorage:selector:${SW_STORAGE:elasticsearch}elasticsearch:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}# User needs to be set when Http Basic authentication is enabledpassword:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}# Password to be set when Http Basic authentication is enabledclusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:443}trustStorePath:${SW_SW_STORAGE_ES_SSL_JKS_PATH:\u0026#34;../es_keystore.jks\u0026#34;}trustStorePass:${SW_SW_STORAGE_ES_SSL_JKS_PASS:\u0026#34;\u0026#34;}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;https\u0026#34;}... File at trustStorePath is being monitored. Once it is changed, the ElasticSearch client will reconnect. trustStorePass could be changed in the runtime through Secrets Management File Of ElasticSearch Authentication.  Daily Index Step Daily index step(storage/elasticsearch/dayStep, default 1) represents the index creation period. In this period, metrics for several days (dayStep value) are saved.\nIn most cases, users don\u0026rsquo;t need to change the value manually, as SkyWalking is designed to observe large-scale distributed systems. But in some cases, users may want to set a long TTL value, such as more than 60 days. However, their ElasticSearch cluster may not be powerful enough due to low traffic in the production environment. This value could be increased to 5 (or more) if users could ensure a single index could support the metrics and traces for these days (5 in this case).\nFor example, if dayStep == 11,\n Data in [2000-01-01, 2000-01-11] will be merged into the index-20000101. Data in [2000-01-12, 2000-01-22] will be merged into the index-20000112.  storage/elasticsearch/superDatasetDayStep overrides the storage/elasticsearch/dayStep if the value is positive. This would affect the record-related entities, such as trace segments. In some cases, the size of metrics is much smaller than the record (trace). This would improve the shards balance in the ElasticSearch cluster.\nNOTE: TTL deletion would be affected by these steps. You should set an extra dayStep in your TTL. For example, if you want to have TTL == 30 days and dayStep == 10, you are recommended to set TTL = 40.\nSecrets Management File Of ElasticSearch Authentication The value of secretsManagementFile should point to the secrets management file absolute path. The file includes the username, password, and JKS password of the ElasticSearch server in the properties format.\nuser=xxx password=yyy trustStorePass=zzz The major difference between using user, password, trustStorePass configs in the application.yaml file is that the Secrets Management File is being watched by the OAP server. Once it is changed manually or through a 3rd party tool, such as Vault, the storage provider will use the new username, password, and JKS password to establish the connection and close the old one. If the information exists in the file, the user/password will be overridden.\nIndex Settings The following settings control the number of shards and replicas for new and existing index templates. The update only got applied after OAP reboots.\nstorage:elasticsearch:# ......indexShardsNumber:${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:1}indexReplicasNumber:${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:1}specificIndexSettings:${SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS:\u0026#34;\u0026#34;}superDatasetIndexShardsFactor:${SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR:5}superDatasetIndexReplicasNumber:${SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER:0}The following table shows the relationship between those config items and Elasticsearch index number_of_shards/number_of_replicas. And also you can specify the settings for each index individually.\n   index number_of_shards number_of_replicas     sw_ui_template indexShardsNumber indexReplicasNumber   sw_metrics-all-${day-format} indexShardsNumber indexReplicasNumber   sw_log-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_segment-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_browser_error_log-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_zipkin_span-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_records-all-${day-format} indexShardsNumber indexReplicasNumber    Advanced Configurations For Elasticsearch Index You can add advanced configurations in JSON format to set ElasticSearch index settings by following ElasticSearch doc\nFor example, set translog settings:\nstorage:elasticsearch:# ......advanced:${SW_STORAGE_ES_ADVANCED:\u0026#34;{\\\u0026#34;index.translog.durability\\\u0026#34;:\\\u0026#34;request\\\u0026#34;,\\\u0026#34;index.translog.sync_interval\\\u0026#34;:\\\u0026#34;5s\\\u0026#34;}\u0026#34;}Specify Settings For Each Elasticsearch Index Individually You can specify the settings for one or more indexes individually by using SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS.\nNOTE: Supported settings:\n number_of_shards number_of_replicas  NOTE: These settings have the highest priority and will override the existing generic settings mentioned in index settings doc.\nThe settings are in JSON format. The index name here is logic entity name, which should exclude the ${SW_NAMESPACE} which is sw by default, e.g.\n{ \u0026#34;metrics-all\u0026#34;:{ \u0026#34;number_of_shards\u0026#34;:\u0026#34;3\u0026#34;, \u0026#34;number_of_replicas\u0026#34;:\u0026#34;2\u0026#34; }, \u0026#34;segment\u0026#34;:{ \u0026#34;number_of_shards\u0026#34;:\u0026#34;6\u0026#34;, \u0026#34;number_of_replicas\u0026#34;:\u0026#34;1\u0026#34; } } This configuration in the YAML file is like this,\nstorage:elasticsearch:# ......specificIndexSettings:${SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS:\u0026#34;{\\\u0026#34;metrics-all\\\u0026#34;:{\\\u0026#34;number_of_shards\\\u0026#34;:\\\u0026#34;3\\\u0026#34;,\\\u0026#34;number_of_replicas\\\u0026#34;:\\\u0026#34;2\\\u0026#34;},\\\u0026#34;segment\\\u0026#34;:{\\\u0026#34;number_of_shards\\\u0026#34;:\\\u0026#34;6\\\u0026#34;,\\\u0026#34;number_of_replicas\\\u0026#34;:\\\u0026#34;1\\\u0026#34;}}\u0026#34;}Recommended ElasticSearch server-side configurations You could add the following configuration to elasticsearch.yml, and set the value based on your environment.\n# In tracing scenario, consider to set more than this at least.thread_pool.index.queue_size:1000# Only suitable for ElasticSearch 6thread_pool.write.queue_size:1000# Suitable for ElasticSearch 6 and 7# When you face a query error on the traces page, remember to check this.index.max_result_window:1000000We strongly recommend that you read more about these configurations from ElasticSearch\u0026rsquo;s official documentation since they directly impact the performance of ElasticSearch.\nAbout Namespace When a namespace is set, all index names in ElasticSearch will use it as the prefix.\n","excerpt":"Elasticsearch and OpenSearch Elasticsearch and OpenSearch are supported as storage. The storage …","ref":"/docs/main/v9.7.0/en/setup/backend/storages/elasticsearch/","title":"Elasticsearch and OpenSearch"},{"body":"Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Since 8.7.0, we did the following optimization to reduce Elasticsearch load.\nPerformance: remove the synchronous persistence mechanism from batch ElasticSearch DAO. Because the current enhanced persistent session mechanism, don\u0026#39;t require the data queryable immediately after the insert and update anymore. Due to this, we flush the metrics into Elasticsearch without using WriteRequest.RefreshPolicy.WAIT_UNTIL. This reduces the load of persistent works in OAP server and load of Elasticsearch CPU dramatically.\nMeanwhile, there is little chance you could see following warns in your logs.\n{ \u0026quot;timeMillis\u0026quot;: 1626247722647, \u0026quot;thread\u0026quot;: \u0026quot;I/O dispatcher 4\u0026quot;, \u0026quot;level\u0026quot;: \u0026quot;WARN\u0026quot;, \u0026quot;loggerName\u0026quot;: \u0026quot;org.apache.skywalking.oap.server.library.client.elasticsearch.ElasticSearchClient\u0026quot;, \u0026quot;message\u0026quot;: \u0026quot;Bulk [70] executed with failures:[failure in bulk execution:\\n[18875]: index [sw8_service_relation_client_side-20210714], type [_doc], id [20210714_b3BlcmF0aW9uLXJ1bGUtc2VydmVyQDExNDgx.1-bWFya2V0LXJlZmVycmFsLXNlcnZlckAxMDI1MQ==.1], message [[sw8_service_relation_client_side-20210714/D7qzncbeRq6qh2QF5MogTw][[sw8_service_relation_client_side-20210714][0]] ElasticsearchException[Elasticsearch exception [type=version_conflict_engine_exception, reason=[20210714_b3BlcmF0aW9uLXJ1bGUtc2VydmVyQDExNDgx.1-bWFya2V0LXJlZmVycmFsLXNlcnZlckAxMDI1MQ==.1]: version conflict, required seqNo [14012594], primary term [1]. current document has seqNo [14207928] and primary term [1]]]]]\u0026quot;, \u0026quot;endOfBatch\u0026quot;: false, \u0026quot;loggerFqcn\u0026quot;: \u0026quot;org.apache.logging.slf4j.Log4jLogger\u0026quot;, \u0026quot;threadId\u0026quot;: 44, \u0026quot;threadPriority\u0026quot;: 5, \u0026quot;timestamp\u0026quot;: \u0026quot;2021-07-14 15:28:42.647\u0026quot; } This would not affect the system much, just a possibility of inaccurate of metrics. If this wouldn\u0026rsquo;t show up in high frequency, you could ignore this directly.\nIn case you could see many logs like this. Then it is a signal, that the flush period of your ElasticSearch template can\u0026rsquo;t catch up your setting. Or you set the persistentPeriod less than the flush period.\n","excerpt":"Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Since 8.7.0, we did the …","ref":"/docs/main/latest/en/faq/es-version-conflict/","title":"Elasticsearch exception `type=version_conflict_engine_exception` since 8.7.0"},{"body":"Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Since 8.7.0, we did the following optimization to reduce Elasticsearch load.\nPerformance: remove the synchronous persistence mechanism from batch ElasticSearch DAO. Because the current enhanced persistent session mechanism, don\u0026#39;t require the data queryable immediately after the insert and update anymore. Due to this, we flush the metrics into Elasticsearch without using WriteRequest.RefreshPolicy.WAIT_UNTIL. This reduces the load of persistent works in OAP server and load of Elasticsearch CPU dramatically.\nMeanwhile, there is little chance you could see following warns in your logs.\n{ \u0026quot;timeMillis\u0026quot;: 1626247722647, \u0026quot;thread\u0026quot;: \u0026quot;I/O dispatcher 4\u0026quot;, \u0026quot;level\u0026quot;: \u0026quot;WARN\u0026quot;, \u0026quot;loggerName\u0026quot;: \u0026quot;org.apache.skywalking.oap.server.library.client.elasticsearch.ElasticSearchClient\u0026quot;, \u0026quot;message\u0026quot;: \u0026quot;Bulk [70] executed with failures:[failure in bulk execution:\\n[18875]: index [sw8_service_relation_client_side-20210714], type [_doc], id [20210714_b3BlcmF0aW9uLXJ1bGUtc2VydmVyQDExNDgx.1-bWFya2V0LXJlZmVycmFsLXNlcnZlckAxMDI1MQ==.1], message [[sw8_service_relation_client_side-20210714/D7qzncbeRq6qh2QF5MogTw][[sw8_service_relation_client_side-20210714][0]] ElasticsearchException[Elasticsearch exception [type=version_conflict_engine_exception, reason=[20210714_b3BlcmF0aW9uLXJ1bGUtc2VydmVyQDExNDgx.1-bWFya2V0LXJlZmVycmFsLXNlcnZlckAxMDI1MQ==.1]: version conflict, required seqNo [14012594], primary term [1]. current document has seqNo [14207928] and primary term [1]]]]]\u0026quot;, \u0026quot;endOfBatch\u0026quot;: false, \u0026quot;loggerFqcn\u0026quot;: \u0026quot;org.apache.logging.slf4j.Log4jLogger\u0026quot;, \u0026quot;threadId\u0026quot;: 44, \u0026quot;threadPriority\u0026quot;: 5, \u0026quot;timestamp\u0026quot;: \u0026quot;2021-07-14 15:28:42.647\u0026quot; } This would not affect the system much, just a possibility of inaccurate of metrics. If this wouldn\u0026rsquo;t show up in high frequency, you could ignore this directly.\nIn case you could see many logs like this. Then it is a signal, that the flush period of your ElasticSearch template can\u0026rsquo;t catch up your setting. Or you set the persistentPeriod less than the flush period.\n","excerpt":"Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Since 8.7.0, we did the …","ref":"/docs/main/next/en/faq/es-version-conflict/","title":"Elasticsearch exception `type=version_conflict_engine_exception` since 8.7.0"},{"body":"Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Since 8.7.0, we did the following optimization to reduce Elasticsearch load.\nPerformance: remove the synchronous persistence mechanism from batch ElasticSearch DAO. Because the current enhanced persistent session mechanism, don\u0026#39;t require the data queryable immediately after the insert and update anymore. Due to this, we flush the metrics into Elasticsearch without using WriteRequest.RefreshPolicy.WAIT_UNTIL. This reduces the load of persistent works in OAP server and load of Elasticsearch CPU dramatically.\nMeanwhile, there is little chance you could see following warns in your logs.\n{ \u0026quot;timeMillis\u0026quot;: 1626247722647, \u0026quot;thread\u0026quot;: \u0026quot;I/O dispatcher 4\u0026quot;, \u0026quot;level\u0026quot;: \u0026quot;WARN\u0026quot;, \u0026quot;loggerName\u0026quot;: \u0026quot;org.apache.skywalking.oap.server.library.client.elasticsearch.ElasticSearchClient\u0026quot;, \u0026quot;message\u0026quot;: \u0026quot;Bulk [70] executed with failures:[failure in bulk execution:\\n[18875]: index [sw8_service_relation_client_side-20210714], type [_doc], id [20210714_b3BlcmF0aW9uLXJ1bGUtc2VydmVyQDExNDgx.1-bWFya2V0LXJlZmVycmFsLXNlcnZlckAxMDI1MQ==.1], message [[sw8_service_relation_client_side-20210714/D7qzncbeRq6qh2QF5MogTw][[sw8_service_relation_client_side-20210714][0]] ElasticsearchException[Elasticsearch exception [type=version_conflict_engine_exception, reason=[20210714_b3BlcmF0aW9uLXJ1bGUtc2VydmVyQDExNDgx.1-bWFya2V0LXJlZmVycmFsLXNlcnZlckAxMDI1MQ==.1]: version conflict, required seqNo [14012594], primary term [1]. current document has seqNo [14207928] and primary term [1]]]]]\u0026quot;, \u0026quot;endOfBatch\u0026quot;: false, \u0026quot;loggerFqcn\u0026quot;: \u0026quot;org.apache.logging.slf4j.Log4jLogger\u0026quot;, \u0026quot;threadId\u0026quot;: 44, \u0026quot;threadPriority\u0026quot;: 5, \u0026quot;timestamp\u0026quot;: \u0026quot;2021-07-14 15:28:42.647\u0026quot; } This would not affect the system much, just a possibility of inaccurate of metrics. If this wouldn\u0026rsquo;t show up in high frequency, you could ignore this directly.\nIn case you could see many logs like this. Then it is a signal, that the flush period of your ElasticSearch template can\u0026rsquo;t catch up your setting. Or you set the persistentPeriod less than the flush period.\n","excerpt":"Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Since 8.7.0, we did the …","ref":"/docs/main/v9.0.0/en/faq/es-version-conflict/","title":"Elasticsearch exception `type=version_conflict_engine_exception` since 8.7.0"},{"body":"Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Since 8.7.0, we did the following optimization to reduce Elasticsearch load.\nPerformance: remove the synchronous persistence mechanism from batch ElasticSearch DAO. Because the current enhanced persistent session mechanism, don\u0026#39;t require the data queryable immediately after the insert and update anymore. Due to this, we flush the metrics into Elasticsearch without using WriteRequest.RefreshPolicy.WAIT_UNTIL. This reduces the load of persistent works in OAP server and load of Elasticsearch CPU dramatically.\nMeanwhile, there is little chance you could see following warns in your logs.\n{ \u0026quot;timeMillis\u0026quot;: 1626247722647, \u0026quot;thread\u0026quot;: \u0026quot;I/O dispatcher 4\u0026quot;, \u0026quot;level\u0026quot;: \u0026quot;WARN\u0026quot;, \u0026quot;loggerName\u0026quot;: \u0026quot;org.apache.skywalking.oap.server.library.client.elasticsearch.ElasticSearchClient\u0026quot;, \u0026quot;message\u0026quot;: \u0026quot;Bulk [70] executed with failures:[failure in bulk execution:\\n[18875]: index [sw8_service_relation_client_side-20210714], type [_doc], id [20210714_b3BlcmF0aW9uLXJ1bGUtc2VydmVyQDExNDgx.1-bWFya2V0LXJlZmVycmFsLXNlcnZlckAxMDI1MQ==.1], message [[sw8_service_relation_client_side-20210714/D7qzncbeRq6qh2QF5MogTw][[sw8_service_relation_client_side-20210714][0]] ElasticsearchException[Elasticsearch exception [type=version_conflict_engine_exception, reason=[20210714_b3BlcmF0aW9uLXJ1bGUtc2VydmVyQDExNDgx.1-bWFya2V0LXJlZmVycmFsLXNlcnZlckAxMDI1MQ==.1]: version conflict, required seqNo [14012594], primary term [1]. current document has seqNo [14207928] and primary term [1]]]]]\u0026quot;, \u0026quot;endOfBatch\u0026quot;: false, \u0026quot;loggerFqcn\u0026quot;: \u0026quot;org.apache.logging.slf4j.Log4jLogger\u0026quot;, \u0026quot;threadId\u0026quot;: 44, \u0026quot;threadPriority\u0026quot;: 5, \u0026quot;timestamp\u0026quot;: \u0026quot;2021-07-14 15:28:42.647\u0026quot; } This would not affect the system much, just a possibility of inaccurate of metrics. If this wouldn\u0026rsquo;t show up in high frequency, you could ignore this directly.\nIn case you could see many logs like this. Then it is a signal, that the flush period of your ElasticSearch template can\u0026rsquo;t catch up your setting. Or you set the persistentPeriod less than the flush period.\n","excerpt":"Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Since 8.7.0, we did the …","ref":"/docs/main/v9.1.0/en/faq/es-version-conflict/","title":"Elasticsearch exception `type=version_conflict_engine_exception` since 8.7.0"},{"body":"Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Since 8.7.0, we did the following optimization to reduce Elasticsearch load.\nPerformance: remove the synchronous persistence mechanism from batch ElasticSearch DAO. Because the current enhanced persistent session mechanism, don\u0026#39;t require the data queryable immediately after the insert and update anymore. Due to this, we flush the metrics into Elasticsearch without using WriteRequest.RefreshPolicy.WAIT_UNTIL. This reduces the load of persistent works in OAP server and load of Elasticsearch CPU dramatically.\nMeanwhile, there is little chance you could see following warns in your logs.\n{ \u0026quot;timeMillis\u0026quot;: 1626247722647, \u0026quot;thread\u0026quot;: \u0026quot;I/O dispatcher 4\u0026quot;, \u0026quot;level\u0026quot;: \u0026quot;WARN\u0026quot;, \u0026quot;loggerName\u0026quot;: \u0026quot;org.apache.skywalking.oap.server.library.client.elasticsearch.ElasticSearchClient\u0026quot;, \u0026quot;message\u0026quot;: \u0026quot;Bulk [70] executed with failures:[failure in bulk execution:\\n[18875]: index [sw8_service_relation_client_side-20210714], type [_doc], id [20210714_b3BlcmF0aW9uLXJ1bGUtc2VydmVyQDExNDgx.1-bWFya2V0LXJlZmVycmFsLXNlcnZlckAxMDI1MQ==.1], message [[sw8_service_relation_client_side-20210714/D7qzncbeRq6qh2QF5MogTw][[sw8_service_relation_client_side-20210714][0]] ElasticsearchException[Elasticsearch exception [type=version_conflict_engine_exception, reason=[20210714_b3BlcmF0aW9uLXJ1bGUtc2VydmVyQDExNDgx.1-bWFya2V0LXJlZmVycmFsLXNlcnZlckAxMDI1MQ==.1]: version conflict, required seqNo [14012594], primary term [1]. current document has seqNo [14207928] and primary term [1]]]]]\u0026quot;, \u0026quot;endOfBatch\u0026quot;: false, \u0026quot;loggerFqcn\u0026quot;: \u0026quot;org.apache.logging.slf4j.Log4jLogger\u0026quot;, \u0026quot;threadId\u0026quot;: 44, \u0026quot;threadPriority\u0026quot;: 5, \u0026quot;timestamp\u0026quot;: \u0026quot;2021-07-14 15:28:42.647\u0026quot; } This would not affect the system much, just a possibility of inaccurate of metrics. If this wouldn\u0026rsquo;t show up in high frequency, you could ignore this directly.\nIn case you could see many logs like this. Then it is a signal, that the flush period of your ElasticSearch template can\u0026rsquo;t catch up your setting. Or you set the persistentPeriod less than the flush period.\n","excerpt":"Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Since 8.7.0, we did the …","ref":"/docs/main/v9.2.0/en/faq/es-version-conflict/","title":"Elasticsearch exception `type=version_conflict_engine_exception` since 8.7.0"},{"body":"Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Since 8.7.0, we did the following optimization to reduce Elasticsearch load.\nPerformance: remove the synchronous persistence mechanism from batch ElasticSearch DAO. Because the current enhanced persistent session mechanism, don\u0026#39;t require the data queryable immediately after the insert and update anymore. Due to this, we flush the metrics into Elasticsearch without using WriteRequest.RefreshPolicy.WAIT_UNTIL. This reduces the load of persistent works in OAP server and load of Elasticsearch CPU dramatically.\nMeanwhile, there is little chance you could see following warns in your logs.\n{ \u0026quot;timeMillis\u0026quot;: 1626247722647, \u0026quot;thread\u0026quot;: \u0026quot;I/O dispatcher 4\u0026quot;, \u0026quot;level\u0026quot;: \u0026quot;WARN\u0026quot;, \u0026quot;loggerName\u0026quot;: \u0026quot;org.apache.skywalking.oap.server.library.client.elasticsearch.ElasticSearchClient\u0026quot;, \u0026quot;message\u0026quot;: \u0026quot;Bulk [70] executed with failures:[failure in bulk execution:\\n[18875]: index [sw8_service_relation_client_side-20210714], type [_doc], id [20210714_b3BlcmF0aW9uLXJ1bGUtc2VydmVyQDExNDgx.1-bWFya2V0LXJlZmVycmFsLXNlcnZlckAxMDI1MQ==.1], message [[sw8_service_relation_client_side-20210714/D7qzncbeRq6qh2QF5MogTw][[sw8_service_relation_client_side-20210714][0]] ElasticsearchException[Elasticsearch exception [type=version_conflict_engine_exception, reason=[20210714_b3BlcmF0aW9uLXJ1bGUtc2VydmVyQDExNDgx.1-bWFya2V0LXJlZmVycmFsLXNlcnZlckAxMDI1MQ==.1]: version conflict, required seqNo [14012594], primary term [1]. current document has seqNo [14207928] and primary term [1]]]]]\u0026quot;, \u0026quot;endOfBatch\u0026quot;: false, \u0026quot;loggerFqcn\u0026quot;: \u0026quot;org.apache.logging.slf4j.Log4jLogger\u0026quot;, \u0026quot;threadId\u0026quot;: 44, \u0026quot;threadPriority\u0026quot;: 5, \u0026quot;timestamp\u0026quot;: \u0026quot;2021-07-14 15:28:42.647\u0026quot; } This would not affect the system much, just a possibility of inaccurate of metrics. If this wouldn\u0026rsquo;t show up in high frequency, you could ignore this directly.\nIn case you could see many logs like this. Then it is a signal, that the flush period of your ElasticSearch template can\u0026rsquo;t catch up your setting. Or you set the persistentPeriod less than the flush period.\n","excerpt":"Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Since 8.7.0, we did the …","ref":"/docs/main/v9.3.0/en/faq/es-version-conflict/","title":"Elasticsearch exception `type=version_conflict_engine_exception` since 8.7.0"},{"body":"Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Since 8.7.0, we did the following optimization to reduce Elasticsearch load.\nPerformance: remove the synchronous persistence mechanism from batch ElasticSearch DAO. Because the current enhanced persistent session mechanism, don\u0026#39;t require the data queryable immediately after the insert and update anymore. Due to this, we flush the metrics into Elasticsearch without using WriteRequest.RefreshPolicy.WAIT_UNTIL. This reduces the load of persistent works in OAP server and load of Elasticsearch CPU dramatically.\nMeanwhile, there is little chance you could see following warns in your logs.\n{ \u0026quot;timeMillis\u0026quot;: 1626247722647, \u0026quot;thread\u0026quot;: \u0026quot;I/O dispatcher 4\u0026quot;, \u0026quot;level\u0026quot;: \u0026quot;WARN\u0026quot;, \u0026quot;loggerName\u0026quot;: \u0026quot;org.apache.skywalking.oap.server.library.client.elasticsearch.ElasticSearchClient\u0026quot;, \u0026quot;message\u0026quot;: \u0026quot;Bulk [70] executed with failures:[failure in bulk execution:\\n[18875]: index [sw8_service_relation_client_side-20210714], type [_doc], id [20210714_b3BlcmF0aW9uLXJ1bGUtc2VydmVyQDExNDgx.1-bWFya2V0LXJlZmVycmFsLXNlcnZlckAxMDI1MQ==.1], message [[sw8_service_relation_client_side-20210714/D7qzncbeRq6qh2QF5MogTw][[sw8_service_relation_client_side-20210714][0]] ElasticsearchException[Elasticsearch exception [type=version_conflict_engine_exception, reason=[20210714_b3BlcmF0aW9uLXJ1bGUtc2VydmVyQDExNDgx.1-bWFya2V0LXJlZmVycmFsLXNlcnZlckAxMDI1MQ==.1]: version conflict, required seqNo [14012594], primary term [1]. current document has seqNo [14207928] and primary term [1]]]]]\u0026quot;, \u0026quot;endOfBatch\u0026quot;: false, \u0026quot;loggerFqcn\u0026quot;: \u0026quot;org.apache.logging.slf4j.Log4jLogger\u0026quot;, \u0026quot;threadId\u0026quot;: 44, \u0026quot;threadPriority\u0026quot;: 5, \u0026quot;timestamp\u0026quot;: \u0026quot;2021-07-14 15:28:42.647\u0026quot; } This would not affect the system much, just a possibility of inaccurate of metrics. If this wouldn\u0026rsquo;t show up in high frequency, you could ignore this directly.\nIn case you could see many logs like this. Then it is a signal, that the flush period of your ElasticSearch template can\u0026rsquo;t catch up your setting. Or you set the persistentPeriod less than the flush period.\n","excerpt":"Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Since 8.7.0, we did the …","ref":"/docs/main/v9.4.0/en/faq/es-version-conflict/","title":"Elasticsearch exception `type=version_conflict_engine_exception` since 8.7.0"},{"body":"Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Since 8.7.0, we did the following optimization to reduce Elasticsearch load.\nPerformance: remove the synchronous persistence mechanism from batch ElasticSearch DAO. Because the current enhanced persistent session mechanism, don\u0026#39;t require the data queryable immediately after the insert and update anymore. Due to this, we flush the metrics into Elasticsearch without using WriteRequest.RefreshPolicy.WAIT_UNTIL. This reduces the load of persistent works in OAP server and load of Elasticsearch CPU dramatically.\nMeanwhile, there is little chance you could see following warns in your logs.\n{ \u0026quot;timeMillis\u0026quot;: 1626247722647, \u0026quot;thread\u0026quot;: \u0026quot;I/O dispatcher 4\u0026quot;, \u0026quot;level\u0026quot;: \u0026quot;WARN\u0026quot;, \u0026quot;loggerName\u0026quot;: \u0026quot;org.apache.skywalking.oap.server.library.client.elasticsearch.ElasticSearchClient\u0026quot;, \u0026quot;message\u0026quot;: \u0026quot;Bulk [70] executed with failures:[failure in bulk execution:\\n[18875]: index [sw8_service_relation_client_side-20210714], type [_doc], id [20210714_b3BlcmF0aW9uLXJ1bGUtc2VydmVyQDExNDgx.1-bWFya2V0LXJlZmVycmFsLXNlcnZlckAxMDI1MQ==.1], message [[sw8_service_relation_client_side-20210714/D7qzncbeRq6qh2QF5MogTw][[sw8_service_relation_client_side-20210714][0]] ElasticsearchException[Elasticsearch exception [type=version_conflict_engine_exception, reason=[20210714_b3BlcmF0aW9uLXJ1bGUtc2VydmVyQDExNDgx.1-bWFya2V0LXJlZmVycmFsLXNlcnZlckAxMDI1MQ==.1]: version conflict, required seqNo [14012594], primary term [1]. current document has seqNo [14207928] and primary term [1]]]]]\u0026quot;, \u0026quot;endOfBatch\u0026quot;: false, \u0026quot;loggerFqcn\u0026quot;: \u0026quot;org.apache.logging.slf4j.Log4jLogger\u0026quot;, \u0026quot;threadId\u0026quot;: 44, \u0026quot;threadPriority\u0026quot;: 5, \u0026quot;timestamp\u0026quot;: \u0026quot;2021-07-14 15:28:42.647\u0026quot; } This would not affect the system much, just a possibility of inaccurate of metrics. If this wouldn\u0026rsquo;t show up in high frequency, you could ignore this directly.\nIn case you could see many logs like this. Then it is a signal, that the flush period of your ElasticSearch template can\u0026rsquo;t catch up your setting. Or you set the persistentPeriod less than the flush period.\n","excerpt":"Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Since 8.7.0, we did the …","ref":"/docs/main/v9.5.0/en/faq/es-version-conflict/","title":"Elasticsearch exception `type=version_conflict_engine_exception` since 8.7.0"},{"body":"Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Since 8.7.0, we did the following optimization to reduce Elasticsearch load.\nPerformance: remove the synchronous persistence mechanism from batch ElasticSearch DAO. Because the current enhanced persistent session mechanism, don\u0026#39;t require the data queryable immediately after the insert and update anymore. Due to this, we flush the metrics into Elasticsearch without using WriteRequest.RefreshPolicy.WAIT_UNTIL. This reduces the load of persistent works in OAP server and load of Elasticsearch CPU dramatically.\nMeanwhile, there is little chance you could see following warns in your logs.\n{ \u0026quot;timeMillis\u0026quot;: 1626247722647, \u0026quot;thread\u0026quot;: \u0026quot;I/O dispatcher 4\u0026quot;, \u0026quot;level\u0026quot;: \u0026quot;WARN\u0026quot;, \u0026quot;loggerName\u0026quot;: \u0026quot;org.apache.skywalking.oap.server.library.client.elasticsearch.ElasticSearchClient\u0026quot;, \u0026quot;message\u0026quot;: \u0026quot;Bulk [70] executed with failures:[failure in bulk execution:\\n[18875]: index [sw8_service_relation_client_side-20210714], type [_doc], id [20210714_b3BlcmF0aW9uLXJ1bGUtc2VydmVyQDExNDgx.1-bWFya2V0LXJlZmVycmFsLXNlcnZlckAxMDI1MQ==.1], message [[sw8_service_relation_client_side-20210714/D7qzncbeRq6qh2QF5MogTw][[sw8_service_relation_client_side-20210714][0]] ElasticsearchException[Elasticsearch exception [type=version_conflict_engine_exception, reason=[20210714_b3BlcmF0aW9uLXJ1bGUtc2VydmVyQDExNDgx.1-bWFya2V0LXJlZmVycmFsLXNlcnZlckAxMDI1MQ==.1]: version conflict, required seqNo [14012594], primary term [1]. current document has seqNo [14207928] and primary term [1]]]]]\u0026quot;, \u0026quot;endOfBatch\u0026quot;: false, \u0026quot;loggerFqcn\u0026quot;: \u0026quot;org.apache.logging.slf4j.Log4jLogger\u0026quot;, \u0026quot;threadId\u0026quot;: 44, \u0026quot;threadPriority\u0026quot;: 5, \u0026quot;timestamp\u0026quot;: \u0026quot;2021-07-14 15:28:42.647\u0026quot; } This would not affect the system much, just a possibility of inaccurate of metrics. If this wouldn\u0026rsquo;t show up in high frequency, you could ignore this directly.\nIn case you could see many logs like this. Then it is a signal, that the flush period of your ElasticSearch template can\u0026rsquo;t catch up your setting. Or you set the persistentPeriod less than the flush period.\n","excerpt":"Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Since 8.7.0, we did the …","ref":"/docs/main/v9.6.0/en/faq/es-version-conflict/","title":"Elasticsearch exception `type=version_conflict_engine_exception` since 8.7.0"},{"body":"Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Since 8.7.0, we did the following optimization to reduce Elasticsearch load.\nPerformance: remove the synchronous persistence mechanism from batch ElasticSearch DAO. Because the current enhanced persistent session mechanism, don\u0026#39;t require the data queryable immediately after the insert and update anymore. Due to this, we flush the metrics into Elasticsearch without using WriteRequest.RefreshPolicy.WAIT_UNTIL. This reduces the load of persistent works in OAP server and load of Elasticsearch CPU dramatically.\nMeanwhile, there is little chance you could see following warns in your logs.\n{ \u0026quot;timeMillis\u0026quot;: 1626247722647, \u0026quot;thread\u0026quot;: \u0026quot;I/O dispatcher 4\u0026quot;, \u0026quot;level\u0026quot;: \u0026quot;WARN\u0026quot;, \u0026quot;loggerName\u0026quot;: \u0026quot;org.apache.skywalking.oap.server.library.client.elasticsearch.ElasticSearchClient\u0026quot;, \u0026quot;message\u0026quot;: \u0026quot;Bulk [70] executed with failures:[failure in bulk execution:\\n[18875]: index [sw8_service_relation_client_side-20210714], type [_doc], id [20210714_b3BlcmF0aW9uLXJ1bGUtc2VydmVyQDExNDgx.1-bWFya2V0LXJlZmVycmFsLXNlcnZlckAxMDI1MQ==.1], message [[sw8_service_relation_client_side-20210714/D7qzncbeRq6qh2QF5MogTw][[sw8_service_relation_client_side-20210714][0]] ElasticsearchException[Elasticsearch exception [type=version_conflict_engine_exception, reason=[20210714_b3BlcmF0aW9uLXJ1bGUtc2VydmVyQDExNDgx.1-bWFya2V0LXJlZmVycmFsLXNlcnZlckAxMDI1MQ==.1]: version conflict, required seqNo [14012594], primary term [1]. current document has seqNo [14207928] and primary term [1]]]]]\u0026quot;, \u0026quot;endOfBatch\u0026quot;: false, \u0026quot;loggerFqcn\u0026quot;: \u0026quot;org.apache.logging.slf4j.Log4jLogger\u0026quot;, \u0026quot;threadId\u0026quot;: 44, \u0026quot;threadPriority\u0026quot;: 5, \u0026quot;timestamp\u0026quot;: \u0026quot;2021-07-14 15:28:42.647\u0026quot; } This would not affect the system much, just a possibility of inaccurate of metrics. If this wouldn\u0026rsquo;t show up in high frequency, you could ignore this directly.\nIn case you could see many logs like this. Then it is a signal, that the flush period of your ElasticSearch template can\u0026rsquo;t catch up your setting. Or you set the persistentPeriod less than the flush period.\n","excerpt":"Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Since 8.7.0, we did the …","ref":"/docs/main/v9.7.0/en/faq/es-version-conflict/","title":"Elasticsearch exception `type=version_conflict_engine_exception` since 8.7.0"},{"body":"Elasticsearch monitoring SkyWalking leverages elasticsearch-exporter for collecting metrics data from Elasticsearch. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  The elasticsearch-exporter collect metrics data from Elasticsearch. OpenTelemetry Collector fetches metrics from elasticsearch-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup elasticsearch-exporter. Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  Elasticsearch Monitoring Elasticsearch monitoring provides multidimensional metrics monitoring of Elasticsearch clusters as Layer: ELASTICSEARCH Service in the OAP. In each cluster, the nodes are represented as Instance and indices are Endpoints.\nElasticsearch Cluster Supported Metrics    Monitoring Panel Metric Name Description Data Source     Cluster Health meter_elasticsearch_cluster_health_status Whether all primary and replica shards are allocated elasticsearch-exporter   Tripped Of Breakers meter_elasticsearch_cluster_breakers_tripped Tripped for breaker elasticsearch-exporter   Nodes meter_elasticsearch_cluster_nodes Number of nodes in the cluster. elasticsearch-exporter   Data Nodes meter_elasticsearch_cluster_data_nodes Number of data nodes in the cluster elasticsearch-exporter   Pending Tasks meter_elasticsearch_cluster_pending_tasks_total Cluster level changes which have not yet been executed elasticsearch-exporter   CPU Usage Avg. (%) meter_elasticsearch_cluster_cpu_usage_avg Cluster level percent CPU used by process elasticsearch-exporter   JVM Memory Used Avg. (%) meter_elasticsearch_cluster_jvm_memory_used_avg Cluster level percent JVM memory used elasticsearch-exporter   Open Files meter_elasticsearch_cluster_open_file_count Open file descriptors elasticsearch-exporter   Active Primary Shards meter_elasticsearch_cluster_primary_shards_total The number of primary shards in your cluster. This is an aggregate total across all indices elasticsearch-exporter   Active Shards meter_elasticsearch_cluster_shards_total Aggregate total of all shards across all indices, which includes replica shards elasticsearch-exporter   Initializing Shards meter_elasticsearch_cluster_initializing_shards_total Count of shards that are being freshly created elasticsearch-exporter   Delayed Unassigned Shards meter_elasticsearch_cluster_delayed_unassigned_shards_total Shards delayed to reduce reallocation overhead elasticsearch-exporter   Relocating Shards meter_elasticsearch_cluster_relocating_shards_total The number of shards that are currently moving from one node to another node elasticsearch-exporter   Unassigned Shards meter_elasticsearch_cluster_unassigned_shards_total The number of shards that exist in the cluster state, but cannot be found in the cluster itself elasticsearch-exporter    Elasticsearch Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Node Rules  meter_elasticsearch_node_rules Node roles elasticsearch-exporter   JVM Memory Used MB meter_elasticsearch_node_jvm_memory_used Node level JVM memory used size elasticsearch-exporter   CPU Percent % meter_elasticsearch_node_process_cpu_percent Node level percent CPU used by process elasticsearch-exporter   Documents  meter_elasticsearch_node_indices_docs Count of index documents on this node elasticsearch-exporter   Segments  meter_elasticsearch_node_segment_count Count of index segments on this node elasticsearch-exporter   Disk Free Space GB meter_elasticsearch_node_all_disk_free_space Available space on all block device elasticsearch-exporter   Open Files  meter_elasticsearch_node_open_file_count Open file descriptors elasticsearch-exporter   Process CPU Usage Percent % meter_elasticsearch_node_process_cpu_percent Percent CPU used by process elasticsearch-exporter   OS CPU usage percent % meter_elasticsearch_node_os_cpu_percent Percent CPU used by the OS elasticsearch-exporter   Load Average  meter_elasticsearch_node_os_load1 meter_elasticsearch_node_os_load5meter_elasticsearch_node_os_load15 Shortterm, Midterm, Longterm load average elasticsearch-exporter   JVM Memory Usage MB meter_elasticsearch_node_jvm_memory_nonheap_used\nmeter_elasticsearch_node_jvm_memory_heap_usedmeter_elasticsearch_node_jvm_memory_heap_max JVM memory currently usage by area elasticsearch-exporter   JVM Pool Peak Used MB meter_elasticsearch_node_jvm_memory_pool_peak_used JVM memory currently used by pool elasticsearch-exporter   GC Count  meter_elasticsearch_node_jvm_gc_count Count of JVM GC runs elasticsearch-exporter   GC Time ms/min meter_elasticsearch_node_jvm_gc_time GC run time elasticsearch-exporter   All Operations ReqRate  meter_elasticsearch_node_indices_*_req_rate All Operations ReqRate on node elasticsearch-exporter   Indexing Rate reqps meter_elasticsearch_node_indices_indexing_index_total_req_rate\nmeter_elasticsearch_node_indices_indexing_index_total_proc_rate Indexing rate on node elasticsearch-exporter   Searching Rate reqps meter_elasticsearch_node_indices_search_fetch_total_req_rate\nmeter_elasticsearch_node_indices_search_query_time_seconds_proc_rate Searching rate on node elasticsearch-exporter   Total Translog Operations  meter_elasticsearch_node_indices_translog_operations Total translog operations elasticsearch-exporter   Total Translog Size MB meter_elasticsearch_node_indices_translog_size Total translog size elasticsearch-exporter   Tripped For Breakers  meter_elasticsearch_node_breakers_tripped Tripped for breaker elasticsearch-exporter   Estimated Size Of Breaker MB meter_elasticsearch_node_breakers_estimated_size Estimated size of breaker elasticsearch-exporter   Documents Count KB/s meter_elasticsearch_node_indices_docs Count of documents on this node elasticsearch-exporter   Merged Documents Count count/s meter_elasticsearch_node_indices_merges_docs_total Cumulative docs merged elasticsearch-exporter   Deleted Documents Count  meter_elasticsearch_node_indices_docs_deleted_total Count of deleted documents on this node elasticsearch-exporter   Documents Index Rate calls/s meter_elasticsearch_node_indices_indexing_index_total_req_rate Total index calls per second elasticsearch-exporter   Merged Documents Rate MB / s meter_elasticsearch_node_indices_merges_total_size_bytes_total Total merge size per second elasticsearch-exporter   Documents Deleted Rate docs/s meter_elasticsearch_node_indices_docs_deleted Count of deleted documents per second on this node elasticsearch-exporter   Count Of Index Segments  meter_elasticsearch_node_segment_count Count of index segments on this node elasticsearch-exporter   Current Memory Size Of Segments MB meter_elasticsearch_node_segment_memory Current memory size of segments elasticsearch-exporter   Network bytes/sec meter_elasticsearch_node_network_send_bytesmeter_elasticsearch_node_network_receive_bytes Total number of bytes sent and receive elasticsearch-exporter   Disk Usage Percent % meter_elasticsearch_node_disk_usage_percent Used space on block device elasticsearch-exporter   Disk Usage GB meter_elasticsearch_node_disk_usage Used space size of block device elasticsearch-exporter   Disk Read KBs meter_elasticsearch_node_disk_io_read_bytes Total kilobytes read from disk elasticsearch-exporter   Disk Write KBs meter_elasticsearch_node_disk_io_write_bytes Total kilobytes write from disk elasticsearch-exporter    Elasticsearch Index Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Documents Primary  meter_elasticsearch_index_indices_docs_primary Count of documents with only primary shards on all nodes elasticsearch-exporter   Deleted Documents Primary  meter_elasticsearch_index_indices_deleted_docs_primary Count of deleted documents with only primary shards elasticsearch-exporter   Data Primary GB meter_elasticsearch_index_indices_store_size_bytes_primary Current total size of stored index data with only primary shards on all nodes elasticsearch-exporter   Data GB meter_elasticsearch_index_indices_store_size_bytes_total Current total size of stored index data with all shards on all nodes elasticsearch-exporter   Segments Primary  meter_elasticsearch_index_indices_segment_count_primary Current number of segments with only primary shards on all nodes elasticsearch-exporter   Segments Memory Primary MB meter_elasticsearch_index_indices_segment_memory_bytes_primary Current size of segments with only primary shards on all nodes elasticsearch-exporter   Segments  meter_elasticsearch_index_indices_segment_count_total Current number of segments with all shards on all nodes elasticsearch-exporter   Segments Memory MB meter_elasticsearch_index_indices_segment_memory_bytes_total Current size of segments with all shards on all nodes elasticsearch-exporter   Indexing Rate  meter_elasticsearch_index_stats_indexing_index_total_req_ratemeter_elasticsearch_index_stats_indexing_index_total_proc_rate Indexing rate on index elasticsearch-exporter   Searching Rate  meter_elasticsearch_index_stats_search_query_total_req_ratemeter_elasticsearch_index_stats_search_query_total_proc_rate Searching rate on index elasticsearch-exporter   All Operations ReqRate  meter_elasticsearch_index_stats_*_req_rate All Operations ReqRate on index elasticsearch-exporter   All Operations Runtime  meter_elasticsearch_index_stats_*_time_seconds_total All Operations Runtime/s on index elasticsearch-exporter   Avg. Search Time Execute / Request s meter_elasticsearch_index_search_fetch_avg_timemeter_elasticsearch_index_search_query_avg_timemeter_elasticsearch_index_search_scroll_avg_timemeter_elasticsearch_index_search_suggest_avg_time Search Operation Avg. time on index elasticsearch-exporter   Search Operations Rate req/s meter_elasticsearch_index_stats_search_query_total_req_ratemeter_elasticsearch_index_stats_search_fetch_total_req_ratemeter_elasticsearch_index_stats_search_scroll_total_req_ratemeter_elasticsearch_index_stats_search_suggest_total_req_rate Search Operations ReqRate on index elasticsearch-exporter   Shards Documents  meter_elasticsearch_index_indices_shards_docs Count of documents per shards on index elasticsearch-exporter   Documents (Primary Shards)  meter_elasticsearch_index_indices_docs_primary Count of documents with only primary shards on index elasticsearch-exporter   Documents Created Per Min (Primary Shards)  meter_elasticsearch_index_indices_docs_primary_rate Documents rate with only primary shards on index elasticsearch-exporter   Total Size Of Index (Primary Shards) MB meter_elasticsearch_index_indices_store_size_bytes_primary Current total size of stored index data in bytes with only primary shards on all nodes elasticsearch-exporter   Documents (All Shards)  meter_elasticsearch_index_indices_docs_total Count of documents with all shards on index elasticsearch-exporter   Documents Created Per Min (All Shards)  meter_elasticsearch_index_indices_docs_total_rate Documents rate with only all shards on index elasticsearch-exporter   Total Size Of Index (All Shards) MB meter_elasticsearch_index_indices_store_size_bytes_total Current total size of stored index data in bytes with all shards on all nodes elasticsearch-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/elasticsearch/elasticsearch-cluster.yaml, /config/otel-rules/elasticsearch/elasticsearch-node.yaml, /config/otel-rules/elasticsearch/elasticsearch-index.yaml. The Elasticsearch dashboard panel configurations are found in /config/ui-initialized-templates/elasticsearch.\n","excerpt":"Elasticsearch monitoring SkyWalking leverages elasticsearch-exporter for collecting metrics data …","ref":"/docs/main/latest/en/setup/backend/backend-elasticsearch-monitoring/","title":"Elasticsearch monitoring"},{"body":"Elasticsearch monitoring SkyWalking leverages elasticsearch-exporter for collecting metrics data from Elasticsearch. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  The elasticsearch-exporter collect metrics data from Elasticsearch. OpenTelemetry Collector fetches metrics from elasticsearch-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup elasticsearch-exporter. Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  Elasticsearch Monitoring Elasticsearch monitoring provides multidimensional metrics monitoring of Elasticsearch clusters as Layer: ELASTICSEARCH Service in the OAP. In each cluster, the nodes are represented as Instance and indices are Endpoints.\nElasticsearch Cluster Supported Metrics    Monitoring Panel Metric Name Description Data Source     Cluster Health meter_elasticsearch_cluster_health_status Whether all primary and replica shards are allocated elasticsearch-exporter   Tripped Of Breakers meter_elasticsearch_cluster_breakers_tripped Tripped for breaker elasticsearch-exporter   Nodes meter_elasticsearch_cluster_nodes Number of nodes in the cluster. elasticsearch-exporter   Data Nodes meter_elasticsearch_cluster_data_nodes Number of data nodes in the cluster elasticsearch-exporter   Pending Tasks meter_elasticsearch_cluster_pending_tasks_total Cluster level changes which have not yet been executed elasticsearch-exporter   CPU Usage Avg. (%) meter_elasticsearch_cluster_cpu_usage_avg Cluster level percent CPU used by process elasticsearch-exporter   JVM Memory Used Avg. (%) meter_elasticsearch_cluster_jvm_memory_used_avg Cluster level percent JVM memory used elasticsearch-exporter   Open Files meter_elasticsearch_cluster_open_file_count Open file descriptors elasticsearch-exporter   Active Primary Shards meter_elasticsearch_cluster_primary_shards_total The number of primary shards in your cluster. This is an aggregate total across all indices elasticsearch-exporter   Active Shards meter_elasticsearch_cluster_shards_total Aggregate total of all shards across all indices, which includes replica shards elasticsearch-exporter   Initializing Shards meter_elasticsearch_cluster_initializing_shards_total Count of shards that are being freshly created elasticsearch-exporter   Delayed Unassigned Shards meter_elasticsearch_cluster_delayed_unassigned_shards_total Shards delayed to reduce reallocation overhead elasticsearch-exporter   Relocating Shards meter_elasticsearch_cluster_relocating_shards_total The number of shards that are currently moving from one node to another node elasticsearch-exporter   Unassigned Shards meter_elasticsearch_cluster_unassigned_shards_total The number of shards that exist in the cluster state, but cannot be found in the cluster itself elasticsearch-exporter    Elasticsearch Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Node Rules  meter_elasticsearch_node_rules Node roles elasticsearch-exporter   JVM Memory Used MB meter_elasticsearch_node_jvm_memory_used Node level JVM memory used size elasticsearch-exporter   CPU Percent % meter_elasticsearch_node_process_cpu_percent Node level percent CPU used by process elasticsearch-exporter   Documents  meter_elasticsearch_node_indices_docs Count of index documents on this node elasticsearch-exporter   Segments  meter_elasticsearch_node_segment_count Count of index segments on this node elasticsearch-exporter   Disk Free Space GB meter_elasticsearch_node_all_disk_free_space Available space on all block device elasticsearch-exporter   Open Files  meter_elasticsearch_node_open_file_count Open file descriptors elasticsearch-exporter   Process CPU Usage Percent % meter_elasticsearch_node_process_cpu_percent Percent CPU used by process elasticsearch-exporter   OS CPU usage percent % meter_elasticsearch_node_os_cpu_percent Percent CPU used by the OS elasticsearch-exporter   Load Average  meter_elasticsearch_node_os_load1 meter_elasticsearch_node_os_load5meter_elasticsearch_node_os_load15 Shortterm, Midterm, Longterm load average elasticsearch-exporter   JVM Memory Usage MB meter_elasticsearch_node_jvm_memory_nonheap_used\nmeter_elasticsearch_node_jvm_memory_heap_usedmeter_elasticsearch_node_jvm_memory_heap_max JVM memory currently usage by area elasticsearch-exporter   JVM Pool Peak Used MB meter_elasticsearch_node_jvm_memory_pool_peak_used JVM memory currently used by pool elasticsearch-exporter   GC Count  meter_elasticsearch_node_jvm_gc_count Count of JVM GC runs elasticsearch-exporter   GC Time ms/min meter_elasticsearch_node_jvm_gc_time GC run time elasticsearch-exporter   All Operations ReqRate  meter_elasticsearch_node_indices_*_req_rate All Operations ReqRate on node elasticsearch-exporter   Indexing Rate reqps meter_elasticsearch_node_indices_indexing_index_total_req_rate\nmeter_elasticsearch_node_indices_indexing_index_total_proc_rate Indexing rate on node elasticsearch-exporter   Searching Rate reqps meter_elasticsearch_node_indices_search_fetch_total_req_rate\nmeter_elasticsearch_node_indices_search_query_time_seconds_proc_rate Searching rate on node elasticsearch-exporter   Total Translog Operations  meter_elasticsearch_node_indices_translog_operations Total translog operations elasticsearch-exporter   Total Translog Size MB meter_elasticsearch_node_indices_translog_size Total translog size elasticsearch-exporter   Tripped For Breakers  meter_elasticsearch_node_breakers_tripped Tripped for breaker elasticsearch-exporter   Estimated Size Of Breaker MB meter_elasticsearch_node_breakers_estimated_size Estimated size of breaker elasticsearch-exporter   Documents Count KB/s meter_elasticsearch_node_indices_docs Count of documents on this node elasticsearch-exporter   Merged Documents Count count/s meter_elasticsearch_node_indices_merges_docs_total Cumulative docs merged elasticsearch-exporter   Deleted Documents Count  meter_elasticsearch_node_indices_docs_deleted_total Count of deleted documents on this node elasticsearch-exporter   Documents Index Rate calls/s meter_elasticsearch_node_indices_indexing_index_total_req_rate Total index calls per second elasticsearch-exporter   Merged Documents Rate MB / s meter_elasticsearch_node_indices_merges_total_size_bytes_total Total merge size per second elasticsearch-exporter   Documents Deleted Rate docs/s meter_elasticsearch_node_indices_docs_deleted Count of deleted documents per second on this node elasticsearch-exporter   Count Of Index Segments  meter_elasticsearch_node_segment_count Count of index segments on this node elasticsearch-exporter   Current Memory Size Of Segments MB meter_elasticsearch_node_segment_memory Current memory size of segments elasticsearch-exporter   Network bytes/sec meter_elasticsearch_node_network_send_bytesmeter_elasticsearch_node_network_receive_bytes Total number of bytes sent and receive elasticsearch-exporter   Disk Usage Percent % meter_elasticsearch_node_disk_usage_percent Used space on block device elasticsearch-exporter   Disk Usage GB meter_elasticsearch_node_disk_usage Used space size of block device elasticsearch-exporter   Disk Read KBs meter_elasticsearch_node_disk_io_read_bytes Total kilobytes read from disk elasticsearch-exporter   Disk Write KBs meter_elasticsearch_node_disk_io_write_bytes Total kilobytes write from disk elasticsearch-exporter    Elasticsearch Index Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Documents Primary  meter_elasticsearch_index_indices_docs_primary Count of documents with only primary shards on all nodes elasticsearch-exporter   Deleted Documents Primary  meter_elasticsearch_index_indices_deleted_docs_primary Count of deleted documents with only primary shards elasticsearch-exporter   Data Primary GB meter_elasticsearch_index_indices_store_size_bytes_primary Current total size of stored index data with only primary shards on all nodes elasticsearch-exporter   Data GB meter_elasticsearch_index_indices_store_size_bytes_total Current total size of stored index data with all shards on all nodes elasticsearch-exporter   Segments Primary  meter_elasticsearch_index_indices_segment_count_primary Current number of segments with only primary shards on all nodes elasticsearch-exporter   Segments Memory Primary MB meter_elasticsearch_index_indices_segment_memory_bytes_primary Current size of segments with only primary shards on all nodes elasticsearch-exporter   Segments  meter_elasticsearch_index_indices_segment_count_total Current number of segments with all shards on all nodes elasticsearch-exporter   Segments Memory MB meter_elasticsearch_index_indices_segment_memory_bytes_total Current size of segments with all shards on all nodes elasticsearch-exporter   Indexing Rate  meter_elasticsearch_index_stats_indexing_index_total_req_ratemeter_elasticsearch_index_stats_indexing_index_total_proc_rate Indexing rate on index elasticsearch-exporter   Searching Rate  meter_elasticsearch_index_stats_search_query_total_req_ratemeter_elasticsearch_index_stats_search_query_total_proc_rate Searching rate on index elasticsearch-exporter   All Operations ReqRate  meter_elasticsearch_index_stats_*_req_rate All Operations ReqRate on index elasticsearch-exporter   All Operations Runtime  meter_elasticsearch_index_stats_*_time_seconds_total All Operations Runtime/s on index elasticsearch-exporter   Avg. Search Time Execute / Request s meter_elasticsearch_index_search_fetch_avg_timemeter_elasticsearch_index_search_query_avg_timemeter_elasticsearch_index_search_scroll_avg_timemeter_elasticsearch_index_search_suggest_avg_time Search Operation Avg. time on index elasticsearch-exporter   Search Operations Rate req/s meter_elasticsearch_index_stats_search_query_total_req_ratemeter_elasticsearch_index_stats_search_fetch_total_req_ratemeter_elasticsearch_index_stats_search_scroll_total_req_ratemeter_elasticsearch_index_stats_search_suggest_total_req_rate Search Operations ReqRate on index elasticsearch-exporter   Shards Documents  meter_elasticsearch_index_indices_shards_docs Count of documents per shards on index elasticsearch-exporter   Documents (Primary Shards)  meter_elasticsearch_index_indices_docs_primary Count of documents with only primary shards on index elasticsearch-exporter   Documents Created Per Min (Primary Shards)  meter_elasticsearch_index_indices_docs_primary_rate Documents rate with only primary shards on index elasticsearch-exporter   Total Size Of Index (Primary Shards) MB meter_elasticsearch_index_indices_store_size_bytes_primary Current total size of stored index data in bytes with only primary shards on all nodes elasticsearch-exporter   Documents (All Shards)  meter_elasticsearch_index_indices_docs_total Count of documents with all shards on index elasticsearch-exporter   Documents Created Per Min (All Shards)  meter_elasticsearch_index_indices_docs_total_rate Documents rate with only all shards on index elasticsearch-exporter   Total Size Of Index (All Shards) MB meter_elasticsearch_index_indices_store_size_bytes_total Current total size of stored index data in bytes with all shards on all nodes elasticsearch-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/elasticsearch/elasticsearch-cluster.yaml, /config/otel-rules/elasticsearch/elasticsearch-node.yaml, /config/otel-rules/elasticsearch/elasticsearch-index.yaml. The Elasticsearch dashboard panel configurations are found in /config/ui-initialized-templates/elasticsearch.\n","excerpt":"Elasticsearch monitoring SkyWalking leverages elasticsearch-exporter for collecting metrics data …","ref":"/docs/main/next/en/setup/backend/backend-elasticsearch-monitoring/","title":"Elasticsearch monitoring"},{"body":"Elasticsearch monitoring SkyWalking leverages elasticsearch-exporter for collecting metrics data from Elasticsearch. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  The elasticsearch-exporter collect metrics data from Elasticsearch. OpenTelemetry Collector fetches metrics from elasticsearch-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup elasticsearch-exporter. Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  Elasticsearch Monitoring Elasticsearch monitoring provides multidimensional metrics monitoring of Elasticsearch clusters as Layer: ELASTICSEARCH Service in the OAP. In each cluster, the nodes are represented as Instance and indices are Endpoints.\nElasticsearch Cluster Supported Metrics    Monitoring Panel Metric Name Description Data Source     Cluster Health meter_elasticsearch_cluster_health_status Whether all primary and replica shards are allocated elasticsearch-exporter   Tripped Of Breakers meter_elasticsearch_cluster_breakers_tripped Tripped for breaker elasticsearch-exporter   Nodes meter_elasticsearch_cluster_nodes Number of nodes in the cluster. elasticsearch-exporter   Data Nodes meter_elasticsearch_cluster_data_nodes Number of data nodes in the cluster elasticsearch-exporter   Pending Tasks meter_elasticsearch_cluster_pending_tasks_total Cluster level changes which have not yet been executed elasticsearch-exporter   CPU Usage Avg. (%) meter_elasticsearch_cluster_cpu_usage_avg Cluster level percent CPU used by process elasticsearch-exporter   JVM Memory Used Avg. (%) meter_elasticsearch_cluster_jvm_memory_used_avg Cluster level percent JVM memory used elasticsearch-exporter   Open Files meter_elasticsearch_cluster_open_file_count Open file descriptors elasticsearch-exporter   Active Primary Shards meter_elasticsearch_cluster_primary_shards_total The number of primary shards in your cluster. This is an aggregate total across all indices elasticsearch-exporter   Active Shards meter_elasticsearch_cluster_shards_total Aggregate total of all shards across all indices, which includes replica shards elasticsearch-exporter   Initializing Shards meter_elasticsearch_cluster_initializing_shards_total Count of shards that are being freshly created elasticsearch-exporter   Delayed Unassigned Shards meter_elasticsearch_cluster_delayed_unassigned_shards_total Shards delayed to reduce reallocation overhead elasticsearch-exporter   Relocating Shards meter_elasticsearch_cluster_relocating_shards_total The number of shards that are currently moving from one node to another node elasticsearch-exporter   Unassigned Shards meter_elasticsearch_cluster_unassigned_shards_total The number of shards that exist in the cluster state, but cannot be found in the cluster itself elasticsearch-exporter    Elasticsearch Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Node Rules  meter_elasticsearch_node_rules Node roles elasticsearch-exporter   JVM Memory Used MB meter_elasticsearch_node_jvm_memory_used Node level JVM memory used size elasticsearch-exporter   CPU Percent % meter_elasticsearch_node_process_cpu_percent Node level percent CPU used by process elasticsearch-exporter   Documents  meter_elasticsearch_node_indices_docs Count of index documents on this node elasticsearch-exporter   Segments  meter_elasticsearch_node_segment_count Count of index segments on this node elasticsearch-exporter   Disk Free Space GB meter_elasticsearch_node_all_disk_free_space Available space on all block device elasticsearch-exporter   Open Files  meter_elasticsearch_node_open_file_count Open file descriptors elasticsearch-exporter   Process CPU Usage Percent % meter_elasticsearch_node_process_cpu_percent Percent CPU used by process elasticsearch-exporter   OS CPU usage percent % meter_elasticsearch_node_os_cpu_percent Percent CPU used by the OS elasticsearch-exporter   Load Average  meter_elasticsearch_node_os_load1 meter_elasticsearch_node_os_load5meter_elasticsearch_node_os_load15 Shortterm, Midterm, Longterm load average elasticsearch-exporter   JVM Memory Usage MB meter_elasticsearch_node_jvm_memory_nonheap_used\nmeter_elasticsearch_node_jvm_memory_heap_usedmeter_elasticsearch_node_jvm_memory_heap_max JVM memory currently usage by area elasticsearch-exporter   JVM Pool Peak Used MB meter_elasticsearch_node_jvm_memory_pool_peak_used JVM memory currently used by pool elasticsearch-exporter   GC Count  meter_elasticsearch_node_jvm_gc_count Count of JVM GC runs elasticsearch-exporter   GC Time ms/min meter_elasticsearch_node_jvm_gc_time GC run time elasticsearch-exporter   All Operations ReqRate  meter_elasticsearch_node_indices_*_req_rate All Operations ReqRate on node elasticsearch-exporter   Indexing Rate reqps meter_elasticsearch_node_indices_indexing_index_total_req_rate\nmeter_elasticsearch_node_indices_indexing_index_total_proc_rate Indexing rate on node elasticsearch-exporter   Searching Rate reqps meter_elasticsearch_node_indices_search_fetch_total_req_rate\nmeter_elasticsearch_node_indices_search_query_time_seconds_proc_rate Searching rate on node elasticsearch-exporter   Total Translog Operations  meter_elasticsearch_node_indices_translog_operations Total translog operations elasticsearch-exporter   Total Translog Size MB meter_elasticsearch_node_indices_translog_size Total translog size elasticsearch-exporter   Tripped For Breakers  meter_elasticsearch_node_breakers_tripped Tripped for breaker elasticsearch-exporter   Estimated Size Of Breaker MB meter_elasticsearch_node_breakers_estimated_size Estimated size of breaker elasticsearch-exporter   Documents Count KB/s meter_elasticsearch_node_indices_docs Count of documents on this node elasticsearch-exporter   Merged Documents Count count/s meter_elasticsearch_node_indices_merges_docs_total Cumulative docs merged elasticsearch-exporter   Deleted Documents Count  meter_elasticsearch_node_indices_docs_deleted_total Count of deleted documents on this node elasticsearch-exporter   Documents Index Rate calls/s meter_elasticsearch_node_indices_indexing_index_total_req_rate Total index calls per second elasticsearch-exporter   Merged Documents Rate MB / s meter_elasticsearch_node_indices_merges_total_size_bytes_total Total merge size per second elasticsearch-exporter   Documents Deleted Rate docs/s meter_elasticsearch_node_indices_docs_deleted Count of deleted documents per second on this node elasticsearch-exporter   Count Of Index Segments  meter_elasticsearch_node_segment_count Count of index segments on this node elasticsearch-exporter   Current Memory Size Of Segments MB meter_elasticsearch_node_segment_memory Current memory size of segments elasticsearch-exporter   Network bytes/sec meter_elasticsearch_node_network_send_bytesmeter_elasticsearch_node_network_receive_bytes Total number of bytes sent and receive elasticsearch-exporter   Disk Usage Percent % meter_elasticsearch_node_disk_usage_percent Used space on block device elasticsearch-exporter   Disk Usage GB meter_elasticsearch_node_disk_usage Used space size of block device elasticsearch-exporter   Disk Read KBs meter_elasticsearch_node_disk_io_read_bytes Total kilobytes read from disk elasticsearch-exporter   Disk Write KBs meter_elasticsearch_node_disk_io_write_bytes Total kilobytes write from disk elasticsearch-exporter    Elasticsearch Index Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Documents Primary  meter_elasticsearch_index_indices_docs_primary Count of documents with only primary shards on all nodes elasticsearch-exporter   Deleted Documents Primary  meter_elasticsearch_index_indices_deleted_docs_primary Count of deleted documents with only primary shards elasticsearch-exporter   Data Primary GB meter_elasticsearch_index_indices_store_size_bytes_primary Current total size of stored index data with only primary shards on all nodes elasticsearch-exporter   Data GB meter_elasticsearch_index_indices_store_size_bytes_total Current total size of stored index data with all shards on all nodes elasticsearch-exporter   Segments Primary  meter_elasticsearch_index_indices_segment_count_primary Current number of segments with only primary shards on all nodes elasticsearch-exporter   Segments Memory Primary MB meter_elasticsearch_index_indices_segment_memory_bytes_primary Current size of segments with only primary shards on all nodes elasticsearch-exporter   Segments  meter_elasticsearch_index_indices_segment_count_total Current number of segments with all shards on all nodes elasticsearch-exporter   Segments Memory MB meter_elasticsearch_index_indices_segment_memory_bytes_total Current size of segments with all shards on all nodes elasticsearch-exporter   Indexing Rate  meter_elasticsearch_index_stats_indexing_index_total_req_ratemeter_elasticsearch_index_stats_indexing_index_total_proc_rate Indexing rate on index elasticsearch-exporter   Searching Rate  meter_elasticsearch_index_stats_search_query_total_req_ratemeter_elasticsearch_index_stats_search_query_total_proc_rate Searching rate on index elasticsearch-exporter   All Operations ReqRate  meter_elasticsearch_index_stats_*_req_rate All Operations ReqRate on index elasticsearch-exporter   All Operations Runtime  meter_elasticsearch_index_stats_*_time_seconds_total All Operations Runtime/s on index elasticsearch-exporter   Avg. Search Time Execute / Request s meter_elasticsearch_index_search_fetch_avg_timemeter_elasticsearch_index_search_query_avg_timemeter_elasticsearch_index_search_scroll_avg_timemeter_elasticsearch_index_search_suggest_avg_time Search Operation Avg. time on index elasticsearch-exporter   Search Operations Rate req/s meter_elasticsearch_index_stats_search_query_total_req_ratemeter_elasticsearch_index_stats_search_fetch_total_req_ratemeter_elasticsearch_index_stats_search_scroll_total_req_ratemeter_elasticsearch_index_stats_search_suggest_total_req_rate Search Operations ReqRate on index elasticsearch-exporter   Shards Documents  meter_elasticsearch_index_indices_shards_docs Count of documents per shards on index elasticsearch-exporter   Documents (Primary Shards)  meter_elasticsearch_index_indices_docs_primary Count of documents with only primary shards on index elasticsearch-exporter   Documents Created Per Min (Primary Shards)  meter_elasticsearch_index_indices_docs_primary_rate Documents rate with only primary shards on index elasticsearch-exporter   Total Size Of Index (Primary Shards) MB meter_elasticsearch_index_indices_store_size_bytes_primary Current total size of stored index data in bytes with only primary shards on all nodes elasticsearch-exporter   Documents (All Shards)  meter_elasticsearch_index_indices_docs_total Count of documents with all shards on index elasticsearch-exporter   Documents Created Per Min (All Shards)  meter_elasticsearch_index_indices_docs_total_rate Documents rate with only all shards on index elasticsearch-exporter   Total Size Of Index (All Shards) MB meter_elasticsearch_index_indices_store_size_bytes_total Current total size of stored index data in bytes with all shards on all nodes elasticsearch-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/elasticsearch/elasticsearch-cluster.yaml, /config/otel-rules/elasticsearch/elasticsearch-node.yaml, /config/otel-rules/elasticsearch/elasticsearch-index.yaml. The Elasticsearch dashboard panel configurations are found in /config/ui-initialized-templates/elasticsearch.\n","excerpt":"Elasticsearch monitoring SkyWalking leverages elasticsearch-exporter for collecting metrics data …","ref":"/docs/main/v9.5.0/en/setup/backend/backend-elasticsearch-monitoring/","title":"Elasticsearch monitoring"},{"body":"Elasticsearch monitoring SkyWalking leverages elasticsearch-exporter for collecting metrics data from Elasticsearch. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  The elasticsearch-exporter collect metrics data from Elasticsearch. OpenTelemetry Collector fetches metrics from elasticsearch-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup elasticsearch-exporter. Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  Elasticsearch Monitoring Elasticsearch monitoring provides multidimensional metrics monitoring of Elasticsearch clusters as Layer: ELASTICSEARCH Service in the OAP. In each cluster, the nodes are represented as Instance and indices are Endpoints.\nElasticsearch Cluster Supported Metrics    Monitoring Panel Metric Name Description Data Source     Cluster Health meter_elasticsearch_cluster_health_status Whether all primary and replica shards are allocated elasticsearch-exporter   Tripped Of Breakers meter_elasticsearch_cluster_breakers_tripped Tripped for breaker elasticsearch-exporter   Nodes meter_elasticsearch_cluster_nodes Number of nodes in the cluster. elasticsearch-exporter   Data Nodes meter_elasticsearch_cluster_data_nodes Number of data nodes in the cluster elasticsearch-exporter   Pending Tasks meter_elasticsearch_cluster_pending_tasks_total Cluster level changes which have not yet been executed elasticsearch-exporter   CPU Usage Avg. (%) meter_elasticsearch_cluster_cpu_usage_avg Cluster level percent CPU used by process elasticsearch-exporter   JVM Memory Used Avg. (%) meter_elasticsearch_cluster_jvm_memory_used_avg Cluster level percent JVM memory used elasticsearch-exporter   Open Files meter_elasticsearch_cluster_open_file_count Open file descriptors elasticsearch-exporter   Active Primary Shards meter_elasticsearch_cluster_primary_shards_total The number of primary shards in your cluster. This is an aggregate total across all indices elasticsearch-exporter   Active Shards meter_elasticsearch_cluster_shards_total Aggregate total of all shards across all indices, which includes replica shards elasticsearch-exporter   Initializing Shards meter_elasticsearch_cluster_initializing_shards_total Count of shards that are being freshly created elasticsearch-exporter   Delayed Unassigned Shards meter_elasticsearch_cluster_delayed_unassigned_shards_total Shards delayed to reduce reallocation overhead elasticsearch-exporter   Relocating Shards meter_elasticsearch_cluster_relocating_shards_total The number of shards that are currently moving from one node to another node elasticsearch-exporter   Unassigned Shards meter_elasticsearch_cluster_unassigned_shards_total The number of shards that exist in the cluster state, but cannot be found in the cluster itself elasticsearch-exporter    Elasticsearch Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Node Rules  meter_elasticsearch_node_rules Node roles elasticsearch-exporter   JVM Memory Used MB meter_elasticsearch_node_jvm_memory_used Node level JVM memory used size elasticsearch-exporter   CPU Percent % meter_elasticsearch_node_process_cpu_percent Node level percent CPU used by process elasticsearch-exporter   Documents  meter_elasticsearch_node_indices_docs Count of index documents on this node elasticsearch-exporter   Segments  meter_elasticsearch_node_segment_count Count of index segments on this node elasticsearch-exporter   Disk Free Space GB meter_elasticsearch_node_all_disk_free_space Available space on all block device elasticsearch-exporter   Open Files  meter_elasticsearch_node_open_file_count Open file descriptors elasticsearch-exporter   Process CPU Usage Percent % meter_elasticsearch_node_process_cpu_percent Percent CPU used by process elasticsearch-exporter   OS CPU usage percent % meter_elasticsearch_node_os_cpu_percent Percent CPU used by the OS elasticsearch-exporter   Load Average  meter_elasticsearch_node_os_load1 meter_elasticsearch_node_os_load5meter_elasticsearch_node_os_load15 Shortterm, Midterm, Longterm load average elasticsearch-exporter   JVM Memory Usage MB meter_elasticsearch_node_jvm_memory_nonheap_used\nmeter_elasticsearch_node_jvm_memory_heap_usedmeter_elasticsearch_node_jvm_memory_heap_max JVM memory currently usage by area elasticsearch-exporter   JVM Pool Peak Used MB meter_elasticsearch_node_jvm_memory_pool_peak_used JVM memory currently used by pool elasticsearch-exporter   GC Count  meter_elasticsearch_node_jvm_gc_count Count of JVM GC runs elasticsearch-exporter   GC Time ms/min meter_elasticsearch_node_jvm_gc_time GC run time elasticsearch-exporter   All Operations ReqRate  meter_elasticsearch_node_indices_*_req_rate All Operations ReqRate on node elasticsearch-exporter   Indexing Rate reqps meter_elasticsearch_node_indices_indexing_index_total_req_rate\nmeter_elasticsearch_node_indices_indexing_index_total_proc_rate Indexing rate on node elasticsearch-exporter   Searching Rate reqps meter_elasticsearch_node_indices_search_fetch_total_req_rate\nmeter_elasticsearch_node_indices_search_query_time_seconds_proc_rate Searching rate on node elasticsearch-exporter   Total Translog Operations  meter_elasticsearch_node_indices_translog_operations Total translog operations elasticsearch-exporter   Total Translog Size MB meter_elasticsearch_node_indices_translog_size Total translog size elasticsearch-exporter   Tripped For Breakers  meter_elasticsearch_node_breakers_tripped Tripped for breaker elasticsearch-exporter   Estimated Size Of Breaker MB meter_elasticsearch_node_breakers_estimated_size Estimated size of breaker elasticsearch-exporter   Documents Count KB/s meter_elasticsearch_node_indices_docs Count of documents on this node elasticsearch-exporter   Merged Documents Count count/s meter_elasticsearch_node_indices_merges_docs_total Cumulative docs merged elasticsearch-exporter   Deleted Documents Count  meter_elasticsearch_node_indices_docs_deleted_total Count of deleted documents on this node elasticsearch-exporter   Documents Index Rate calls/s meter_elasticsearch_node_indices_indexing_index_total_req_rate Total index calls per second elasticsearch-exporter   Merged Documents Rate MB / s meter_elasticsearch_node_indices_merges_total_size_bytes_total Total merge size per second elasticsearch-exporter   Documents Deleted Rate docs/s meter_elasticsearch_node_indices_docs_deleted Count of deleted documents per second on this node elasticsearch-exporter   Count Of Index Segments  meter_elasticsearch_node_segment_count Count of index segments on this node elasticsearch-exporter   Current Memory Size Of Segments MB meter_elasticsearch_node_segment_memory Current memory size of segments elasticsearch-exporter   Network bytes/sec meter_elasticsearch_node_network_send_bytesmeter_elasticsearch_node_network_receive_bytes Total number of bytes sent and receive elasticsearch-exporter   Disk Usage Percent % meter_elasticsearch_node_disk_usage_percent Used space on block device elasticsearch-exporter   Disk Usage GB meter_elasticsearch_node_disk_usage Used space size of block device elasticsearch-exporter   Disk Read KBs meter_elasticsearch_node_disk_io_read_bytes Total kilobytes read from disk elasticsearch-exporter   Disk Write KBs meter_elasticsearch_node_disk_io_write_bytes Total kilobytes write from disk elasticsearch-exporter    Elasticsearch Index Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Documents Primary  meter_elasticsearch_index_indices_docs_primary Count of documents with only primary shards on all nodes elasticsearch-exporter   Deleted Documents Primary  meter_elasticsearch_index_indices_deleted_docs_primary Count of deleted documents with only primary shards elasticsearch-exporter   Data Primary GB meter_elasticsearch_index_indices_store_size_bytes_primary Current total size of stored index data with only primary shards on all nodes elasticsearch-exporter   Data GB meter_elasticsearch_index_indices_store_size_bytes_total Current total size of stored index data with all shards on all nodes elasticsearch-exporter   Segments Primary  meter_elasticsearch_index_indices_segment_count_primary Current number of segments with only primary shards on all nodes elasticsearch-exporter   Segments Memory Primary MB meter_elasticsearch_index_indices_segment_memory_bytes_primary Current size of segments with only primary shards on all nodes elasticsearch-exporter   Segments  meter_elasticsearch_index_indices_segment_count_total Current number of segments with all shards on all nodes elasticsearch-exporter   Segments Memory MB meter_elasticsearch_index_indices_segment_memory_bytes_total Current size of segments with all shards on all nodes elasticsearch-exporter   Indexing Rate  meter_elasticsearch_index_stats_indexing_index_total_req_ratemeter_elasticsearch_index_stats_indexing_index_total_proc_rate Indexing rate on index elasticsearch-exporter   Searching Rate  meter_elasticsearch_index_stats_search_query_total_req_ratemeter_elasticsearch_index_stats_search_query_total_proc_rate Searching rate on index elasticsearch-exporter   All Operations ReqRate  meter_elasticsearch_index_stats_*_req_rate All Operations ReqRate on index elasticsearch-exporter   All Operations Runtime  meter_elasticsearch_index_stats_*_time_seconds_total All Operations Runtime/s on index elasticsearch-exporter   Avg. Search Time Execute / Request s meter_elasticsearch_index_search_fetch_avg_timemeter_elasticsearch_index_search_query_avg_timemeter_elasticsearch_index_search_scroll_avg_timemeter_elasticsearch_index_search_suggest_avg_time Search Operation Avg. time on index elasticsearch-exporter   Search Operations Rate req/s meter_elasticsearch_index_stats_search_query_total_req_ratemeter_elasticsearch_index_stats_search_fetch_total_req_ratemeter_elasticsearch_index_stats_search_scroll_total_req_ratemeter_elasticsearch_index_stats_search_suggest_total_req_rate Search Operations ReqRate on index elasticsearch-exporter   Shards Documents  meter_elasticsearch_index_indices_shards_docs Count of documents per shards on index elasticsearch-exporter   Documents (Primary Shards)  meter_elasticsearch_index_indices_docs_primary Count of documents with only primary shards on index elasticsearch-exporter   Documents Created Per Min (Primary Shards)  meter_elasticsearch_index_indices_docs_primary_rate Documents rate with only primary shards on index elasticsearch-exporter   Total Size Of Index (Primary Shards) MB meter_elasticsearch_index_indices_store_size_bytes_primary Current total size of stored index data in bytes with only primary shards on all nodes elasticsearch-exporter   Documents (All Shards)  meter_elasticsearch_index_indices_docs_total Count of documents with all shards on index elasticsearch-exporter   Documents Created Per Min (All Shards)  meter_elasticsearch_index_indices_docs_total_rate Documents rate with only all shards on index elasticsearch-exporter   Total Size Of Index (All Shards) MB meter_elasticsearch_index_indices_store_size_bytes_total Current total size of stored index data in bytes with all shards on all nodes elasticsearch-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/elasticsearch/elasticsearch-cluster.yaml, /config/otel-rules/elasticsearch/elasticsearch-node.yaml, /config/otel-rules/elasticsearch/elasticsearch-index.yaml. The Elasticsearch dashboard panel configurations are found in /config/ui-initialized-templates/elasticsearch.\n","excerpt":"Elasticsearch monitoring SkyWalking leverages elasticsearch-exporter for collecting metrics data …","ref":"/docs/main/v9.6.0/en/setup/backend/backend-elasticsearch-monitoring/","title":"Elasticsearch monitoring"},{"body":"Elasticsearch monitoring SkyWalking leverages elasticsearch-exporter for collecting metrics data from Elasticsearch. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  The elasticsearch-exporter collect metrics data from Elasticsearch. OpenTelemetry Collector fetches metrics from elasticsearch-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup elasticsearch-exporter. Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  Elasticsearch Monitoring Elasticsearch monitoring provides multidimensional metrics monitoring of Elasticsearch clusters as Layer: ELASTICSEARCH Service in the OAP. In each cluster, the nodes are represented as Instance and indices are Endpoints.\nElasticsearch Cluster Supported Metrics    Monitoring Panel Metric Name Description Data Source     Cluster Health meter_elasticsearch_cluster_health_status Whether all primary and replica shards are allocated elasticsearch-exporter   Tripped Of Breakers meter_elasticsearch_cluster_breakers_tripped Tripped for breaker elasticsearch-exporter   Nodes meter_elasticsearch_cluster_nodes Number of nodes in the cluster. elasticsearch-exporter   Data Nodes meter_elasticsearch_cluster_data_nodes Number of data nodes in the cluster elasticsearch-exporter   Pending Tasks meter_elasticsearch_cluster_pending_tasks_total Cluster level changes which have not yet been executed elasticsearch-exporter   CPU Usage Avg. (%) meter_elasticsearch_cluster_cpu_usage_avg Cluster level percent CPU used by process elasticsearch-exporter   JVM Memory Used Avg. (%) meter_elasticsearch_cluster_jvm_memory_used_avg Cluster level percent JVM memory used elasticsearch-exporter   Open Files meter_elasticsearch_cluster_open_file_count Open file descriptors elasticsearch-exporter   Active Primary Shards meter_elasticsearch_cluster_primary_shards_total The number of primary shards in your cluster. This is an aggregate total across all indices elasticsearch-exporter   Active Shards meter_elasticsearch_cluster_shards_total Aggregate total of all shards across all indices, which includes replica shards elasticsearch-exporter   Initializing Shards meter_elasticsearch_cluster_initializing_shards_total Count of shards that are being freshly created elasticsearch-exporter   Delayed Unassigned Shards meter_elasticsearch_cluster_delayed_unassigned_shards_total Shards delayed to reduce reallocation overhead elasticsearch-exporter   Relocating Shards meter_elasticsearch_cluster_relocating_shards_total The number of shards that are currently moving from one node to another node elasticsearch-exporter   Unassigned Shards meter_elasticsearch_cluster_unassigned_shards_total The number of shards that exist in the cluster state, but cannot be found in the cluster itself elasticsearch-exporter    Elasticsearch Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Node Rules  meter_elasticsearch_node_rules Node roles elasticsearch-exporter   JVM Memory Used MB meter_elasticsearch_node_jvm_memory_used Node level JVM memory used size elasticsearch-exporter   CPU Percent % meter_elasticsearch_node_process_cpu_percent Node level percent CPU used by process elasticsearch-exporter   Documents  meter_elasticsearch_node_indices_docs Count of index documents on this node elasticsearch-exporter   Segments  meter_elasticsearch_node_segment_count Count of index segments on this node elasticsearch-exporter   Disk Free Space GB meter_elasticsearch_node_all_disk_free_space Available space on all block device elasticsearch-exporter   Open Files  meter_elasticsearch_node_open_file_count Open file descriptors elasticsearch-exporter   Process CPU Usage Percent % meter_elasticsearch_node_process_cpu_percent Percent CPU used by process elasticsearch-exporter   OS CPU usage percent % meter_elasticsearch_node_os_cpu_percent Percent CPU used by the OS elasticsearch-exporter   Load Average  meter_elasticsearch_node_os_load1 meter_elasticsearch_node_os_load5meter_elasticsearch_node_os_load15 Shortterm, Midterm, Longterm load average elasticsearch-exporter   JVM Memory Usage MB meter_elasticsearch_node_jvm_memory_nonheap_used\nmeter_elasticsearch_node_jvm_memory_heap_usedmeter_elasticsearch_node_jvm_memory_heap_max JVM memory currently usage by area elasticsearch-exporter   JVM Pool Peak Used MB meter_elasticsearch_node_jvm_memory_pool_peak_used JVM memory currently used by pool elasticsearch-exporter   GC Count  meter_elasticsearch_node_jvm_gc_count Count of JVM GC runs elasticsearch-exporter   GC Time ms/min meter_elasticsearch_node_jvm_gc_time GC run time elasticsearch-exporter   All Operations ReqRate  meter_elasticsearch_node_indices_*_req_rate All Operations ReqRate on node elasticsearch-exporter   Indexing Rate reqps meter_elasticsearch_node_indices_indexing_index_total_req_rate\nmeter_elasticsearch_node_indices_indexing_index_total_proc_rate Indexing rate on node elasticsearch-exporter   Searching Rate reqps meter_elasticsearch_node_indices_search_fetch_total_req_rate\nmeter_elasticsearch_node_indices_search_query_time_seconds_proc_rate Searching rate on node elasticsearch-exporter   Total Translog Operations  meter_elasticsearch_node_indices_translog_operations Total translog operations elasticsearch-exporter   Total Translog Size MB meter_elasticsearch_node_indices_translog_size Total translog size elasticsearch-exporter   Tripped For Breakers  meter_elasticsearch_node_breakers_tripped Tripped for breaker elasticsearch-exporter   Estimated Size Of Breaker MB meter_elasticsearch_node_breakers_estimated_size Estimated size of breaker elasticsearch-exporter   Documents Count KB/s meter_elasticsearch_node_indices_docs Count of documents on this node elasticsearch-exporter   Merged Documents Count count/s meter_elasticsearch_node_indices_merges_docs_total Cumulative docs merged elasticsearch-exporter   Deleted Documents Count  meter_elasticsearch_node_indices_docs_deleted_total Count of deleted documents on this node elasticsearch-exporter   Documents Index Rate calls/s meter_elasticsearch_node_indices_indexing_index_total_req_rate Total index calls per second elasticsearch-exporter   Merged Documents Rate MB / s meter_elasticsearch_node_indices_merges_total_size_bytes_total Total merge size per second elasticsearch-exporter   Documents Deleted Rate docs/s meter_elasticsearch_node_indices_docs_deleted Count of deleted documents per second on this node elasticsearch-exporter   Count Of Index Segments  meter_elasticsearch_node_segment_count Count of index segments on this node elasticsearch-exporter   Current Memory Size Of Segments MB meter_elasticsearch_node_segment_memory Current memory size of segments elasticsearch-exporter   Network bytes/sec meter_elasticsearch_node_network_send_bytesmeter_elasticsearch_node_network_receive_bytes Total number of bytes sent and receive elasticsearch-exporter   Disk Usage Percent % meter_elasticsearch_node_disk_usage_percent Used space on block device elasticsearch-exporter   Disk Usage GB meter_elasticsearch_node_disk_usage Used space size of block device elasticsearch-exporter   Disk Read KBs meter_elasticsearch_node_disk_io_read_bytes Total kilobytes read from disk elasticsearch-exporter   Disk Write KBs meter_elasticsearch_node_disk_io_write_bytes Total kilobytes write from disk elasticsearch-exporter    Elasticsearch Index Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Documents Primary  meter_elasticsearch_index_indices_docs_primary Count of documents with only primary shards on all nodes elasticsearch-exporter   Deleted Documents Primary  meter_elasticsearch_index_indices_deleted_docs_primary Count of deleted documents with only primary shards elasticsearch-exporter   Data Primary GB meter_elasticsearch_index_indices_store_size_bytes_primary Current total size of stored index data with only primary shards on all nodes elasticsearch-exporter   Data GB meter_elasticsearch_index_indices_store_size_bytes_total Current total size of stored index data with all shards on all nodes elasticsearch-exporter   Segments Primary  meter_elasticsearch_index_indices_segment_count_primary Current number of segments with only primary shards on all nodes elasticsearch-exporter   Segments Memory Primary MB meter_elasticsearch_index_indices_segment_memory_bytes_primary Current size of segments with only primary shards on all nodes elasticsearch-exporter   Segments  meter_elasticsearch_index_indices_segment_count_total Current number of segments with all shards on all nodes elasticsearch-exporter   Segments Memory MB meter_elasticsearch_index_indices_segment_memory_bytes_total Current size of segments with all shards on all nodes elasticsearch-exporter   Indexing Rate  meter_elasticsearch_index_stats_indexing_index_total_req_ratemeter_elasticsearch_index_stats_indexing_index_total_proc_rate Indexing rate on index elasticsearch-exporter   Searching Rate  meter_elasticsearch_index_stats_search_query_total_req_ratemeter_elasticsearch_index_stats_search_query_total_proc_rate Searching rate on index elasticsearch-exporter   All Operations ReqRate  meter_elasticsearch_index_stats_*_req_rate All Operations ReqRate on index elasticsearch-exporter   All Operations Runtime  meter_elasticsearch_index_stats_*_time_seconds_total All Operations Runtime/s on index elasticsearch-exporter   Avg. Search Time Execute / Request s meter_elasticsearch_index_search_fetch_avg_timemeter_elasticsearch_index_search_query_avg_timemeter_elasticsearch_index_search_scroll_avg_timemeter_elasticsearch_index_search_suggest_avg_time Search Operation Avg. time on index elasticsearch-exporter   Search Operations Rate req/s meter_elasticsearch_index_stats_search_query_total_req_ratemeter_elasticsearch_index_stats_search_fetch_total_req_ratemeter_elasticsearch_index_stats_search_scroll_total_req_ratemeter_elasticsearch_index_stats_search_suggest_total_req_rate Search Operations ReqRate on index elasticsearch-exporter   Shards Documents  meter_elasticsearch_index_indices_shards_docs Count of documents per shards on index elasticsearch-exporter   Documents (Primary Shards)  meter_elasticsearch_index_indices_docs_primary Count of documents with only primary shards on index elasticsearch-exporter   Documents Created Per Min (Primary Shards)  meter_elasticsearch_index_indices_docs_primary_rate Documents rate with only primary shards on index elasticsearch-exporter   Total Size Of Index (Primary Shards) MB meter_elasticsearch_index_indices_store_size_bytes_primary Current total size of stored index data in bytes with only primary shards on all nodes elasticsearch-exporter   Documents (All Shards)  meter_elasticsearch_index_indices_docs_total Count of documents with all shards on index elasticsearch-exporter   Documents Created Per Min (All Shards)  meter_elasticsearch_index_indices_docs_total_rate Documents rate with only all shards on index elasticsearch-exporter   Total Size Of Index (All Shards) MB meter_elasticsearch_index_indices_store_size_bytes_total Current total size of stored index data in bytes with all shards on all nodes elasticsearch-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/elasticsearch/elasticsearch-cluster.yaml, /config/otel-rules/elasticsearch/elasticsearch-node.yaml, /config/otel-rules/elasticsearch/elasticsearch-index.yaml. The Elasticsearch dashboard panel configurations are found in /config/ui-initialized-templates/elasticsearch.\n","excerpt":"Elasticsearch monitoring SkyWalking leverages elasticsearch-exporter for collecting metrics data …","ref":"/docs/main/v9.7.0/en/setup/backend/backend-elasticsearch-monitoring/","title":"Elasticsearch monitoring"},{"body":"Enable/Disable Channel Different channels mean that different protocols can be transparently transmitted to upstream services(OAP).\nConfig In the Satellite configuration, a channel is represented under the configured pipes. By default, we open all channels and process all known protocols.\nYou could delete the channel if you don\u0026rsquo;t want to receive and transmit in satellite.\nAfter restart the satellite service, then the channel what you delete is disable.\n","excerpt":"Enable/Disable Channel Different channels mean that different protocols can be transparently …","ref":"/docs/skywalking-satellite/latest/en/setup/examples/feature/enable-disable-channel/readme/","title":"Enable/Disable Channel"},{"body":"Enable/Disable Channel Different channels mean that different protocols can be transparently transmitted to upstream services(OAP).\nConfig In the Satellite configuration, a channel is represented under the configured pipes. By default, we open all channels and process all known protocols.\nYou could delete the channel if you don\u0026rsquo;t want to receive and transmit in satellite.\nAfter restart the satellite service, then the channel what you delete is disable.\n","excerpt":"Enable/Disable Channel Different channels mean that different protocols can be transparently …","ref":"/docs/skywalking-satellite/next/en/setup/examples/feature/enable-disable-channel/readme/","title":"Enable/Disable Channel"},{"body":"Enable/Disable Channel Different channels mean that different protocols can be transparently transmitted to upstream services(OAP).\nConfig In the Satellite configuration, a channel is represented under the configured pipes. By default, we open all channels and process all known protocols.\nYou could delete the channel if you don\u0026rsquo;t want to receive and transmit in satellite.\nAfter restart the satellite service, then the channel what you delete is disable.\n","excerpt":"Enable/Disable Channel Different channels mean that different protocols can be transparently …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/examples/feature/enable-disable-channel/readme/","title":"Enable/Disable Channel"},{"body":"End to End Tests (E2E) SkyWalking heavily rely more automatic tests to perform software quality assurance. E2E is an integral part of it.\n End-to-end testing is a methodology used to test whether the flow of an application is performing as designed from start to finish. The purpose of carrying out end-to-end tests is to identify system dependencies and to ensure that the right information is passed between various system components and systems.\n E2E in SkyWalking is always setting the OAP, monitored services and relative remote server dependencies in a real environment, and verify the dataflow and ultimate query results.\nThe E2E test involves some/all of the OAP server, storage, coordinator, webapp, and the instrumented services, all of which are orchestrated by docker-compose or KinD. Since version 8.9.0, we immigrate to e2e-v2 which leverage skywalking-infra-e2e and skywalking-cli to do the whole e2e process. skywalking-infra-e2e is used to control the e2e process and skywalking-cli is used to interact with the OAP such as request and get response metrics from OAP.\nWriting E2E Cases  Set up the environment   Set up skywalking-infra-e2e Set up skywalking-cli, yq (generally these 2 are enough) and others tools if your cases need. Can reference the script under skywalking/test/e2e-v2/script/prepare/setup-e2e-shell.   Orchestrate the components  The goal of the E2E tests is to test the SkyWalking project as a whole, including the OAP server, storage, coordinator, webapp, and even the frontend UI (not for now), on the single node mode as well as the cluster mode. Therefore, the first step is to determine what case we are going to verify, and orchestrate the components.\nTo make the orchestration process easier, we\u0026rsquo;re using a docker-compose that provides a simple file format (docker-compose.yml) for orchestrating the required containers, and offers an opportunity to define the dependencies of the components.\nFollow these steps:\n Decide what (and how many) containers will be needed. For example, for cluster testing, you\u0026rsquo;ll need \u0026gt; 2 OAP nodes, coordinators (e.g. zookeeper), storage (e.g. ElasticSearch), and instrumented services; Define the containers in docker-compose.yml, and carefully specify the dependencies, starting orders, and most importantly, link them together, e.g. set the correct OAP address on the agent end, and set the correct coordinator address in OAP, etc. Define the e2e case config in e2e.yaml. Write the expected data(yml) for verify.   Run e2e test  All e2e cases should under skywalking/test/e2e-v2/cases. You could execute e2e run command in skywalking/ e.g.\ne2e run -c test/e2e-v2/cases/alarm/h2/e2e.yaml  Troubleshooting  We expose all logs from all containers to the stdout in the non-CI (local) mode, but save and upload them to the GitHub server. You can download them (only when the tests have failed) at \u0026ldquo;Artifacts/Download artifacts/logs\u0026rdquo; (see top right) for debugging.\nNOTE: Please verify the newly-added E2E test case locally first. However, if you find that it has passed locally but failed in the PR check status, make sure that all the updated/newly-added files (especially those in the submodules) are committed and included in the PR, or reset the git HEAD to the remote and verify locally again.\n","excerpt":"End to End Tests (E2E) SkyWalking heavily rely more automatic tests to perform software quality …","ref":"/docs/main/latest/en/guides/e2e/","title":"End to End Tests (E2E)"},{"body":"End to End Tests (E2E) SkyWalking heavily rely more automatic tests to perform software quality assurance. E2E is an integral part of it.\n End-to-end testing is a methodology used to test whether the flow of an application is performing as designed from start to finish. The purpose of carrying out end-to-end tests is to identify system dependencies and to ensure that the right information is passed between various system components and systems.\n E2E in SkyWalking is always setting the OAP, monitored services and relative remote server dependencies in a real environment, and verify the dataflow and ultimate query results.\nThe E2E test involves some/all of the OAP server, storage, coordinator, webapp, and the instrumented services, all of which are orchestrated by docker-compose or KinD. Since version 8.9.0, we immigrate to e2e-v2 which leverage skywalking-infra-e2e and skywalking-cli to do the whole e2e process. skywalking-infra-e2e is used to control the e2e process and skywalking-cli is used to interact with the OAP such as request and get response metrics from OAP.\nWriting E2E Cases  Set up the environment   Set up skywalking-infra-e2e Set up skywalking-cli, yq (generally these 2 are enough) and others tools if your cases need. Can reference the script under skywalking/test/e2e-v2/script/prepare/setup-e2e-shell.   Orchestrate the components  The goal of the E2E tests is to test the SkyWalking project as a whole, including the OAP server, storage, coordinator, webapp, and even the frontend UI (not for now), on the single node mode as well as the cluster mode. Therefore, the first step is to determine what case we are going to verify, and orchestrate the components.\nTo make the orchestration process easier, we\u0026rsquo;re using a docker-compose that provides a simple file format (docker-compose.yml) for orchestrating the required containers, and offers an opportunity to define the dependencies of the components.\nFollow these steps:\n Decide what (and how many) containers will be needed. For example, for cluster testing, you\u0026rsquo;ll need \u0026gt; 2 OAP nodes, coordinators (e.g. zookeeper), storage (e.g. ElasticSearch), and instrumented services; Define the containers in docker-compose.yml, and carefully specify the dependencies, starting orders, and most importantly, link them together, e.g. set the correct OAP address on the agent end, and set the correct coordinator address in OAP, etc. Define the e2e case config in e2e.yaml. Write the expected data(yml) for verify.   Run e2e test  All e2e cases should under skywalking/test/e2e-v2/cases. You could execute e2e run command in skywalking/ e.g.\ne2e run -c test/e2e-v2/cases/alarm/h2/e2e.yaml  Troubleshooting  We expose all logs from all containers to the stdout in the non-CI (local) mode, but save and upload them to the GitHub server. You can download them (only when the tests have failed) at \u0026ldquo;Artifacts/Download artifacts/logs\u0026rdquo; (see top right) for debugging.\nNOTE: Please verify the newly-added E2E test case locally first. However, if you find that it has passed locally but failed in the PR check status, make sure that all the updated/newly-added files (especially those in the submodules) are committed and included in the PR, or reset the git HEAD to the remote and verify locally again.\n","excerpt":"End to End Tests (E2E) SkyWalking heavily rely more automatic tests to perform software quality …","ref":"/docs/main/next/en/guides/e2e/","title":"End to End Tests (E2E)"},{"body":"End to End Tests (E2E) SkyWalking heavily rely more automatic tests to perform software quality assurance. E2E is an integral part of it.\n End-to-end testing is a methodology used to test whether the flow of an application is performing as designed from start to finish. The purpose of carrying out end-to-end tests is to identify system dependencies and to ensure that the right information is passed between various system components and systems.\n E2E in SkyWalking is always setting the OAP, monitored services and relative remote server dependencies in a real environment, and verify the dataflow and ultimate query results.\nThe E2E test involves some/all of the OAP server, storage, coordinator, webapp, and the instrumented services, all of which are orchestrated by docker-compose or KinD. Since version 8.9.0, we immigrate to e2e-v2 which leverage skywalking-infra-e2e and skywalking-cli to do the whole e2e process. skywalking-infra-e2e is used to control the e2e process and skywalking-cli is used to interact with the OAP such as request and get response metrics from OAP.\nWriting E2E Cases  Set up the environment   Set up skywalking-infra-e2e Set up skywalking-cli, yq (generally these 2 are enough) and others tools if your cases need. Can reference the script under skywalking/test/e2e-v2/script/prepare/setup-e2e-shell.   Orchestrate the components  The goal of the E2E tests is to test the SkyWalking project as a whole, including the OAP server, storage, coordinator, webapp, and even the frontend UI (not for now), on the single node mode as well as the cluster mode. Therefore, the first step is to determine what case we are going to verify, and orchestrate the components.\nTo make the orchestration process easier, we\u0026rsquo;re using a docker-compose that provides a simple file format (docker-compose.yml) for orchestrating the required containers, and offers an opportunity to define the dependencies of the components.\nFollow these steps:\n Decide what (and how many) containers will be needed. For example, for cluster testing, you\u0026rsquo;ll need \u0026gt; 2 OAP nodes, coordinators (e.g. zookeeper), storage (e.g. ElasticSearch), and instrumented services; Define the containers in docker-compose.yml, and carefully specify the dependencies, starting orders, and most importantly, link them together, e.g. set the correct OAP address on the agent end, and set the correct coordinator address in OAP, etc. Define the e2e case config in e2e.yaml. Write the expected data(yml) for verify.   Run e2e test  All e2e cases should under skywalking/test/e2e-v2/cases. You could execute e2e run command in skywalking/ e.g.\ne2e run -c test/e2e-v2/cases/alarm/h2/e2e.yaml  Troubleshooting  We expose all logs from all containers to the stdout in the non-CI (local) mode, but save and upload them to the GitHub server. You can download them (only when the tests have failed) at \u0026ldquo;Artifacts/Download artifacts/logs\u0026rdquo; (see top right) for debugging.\nNOTE: Please verify the newly-added E2E test case locally first. However, if you find that it has passed locally but failed in the PR check status, make sure that all the updated/newly-added files (especially those in the submodules) are committed and included in the PR, or reset the git HEAD to the remote and verify locally again.\n","excerpt":"End to End Tests (E2E) SkyWalking heavily rely more automatic tests to perform software quality …","ref":"/docs/main/v9.6.0/en/guides/e2e/","title":"End to End Tests (E2E)"},{"body":"End to End Tests (E2E) SkyWalking heavily rely more automatic tests to perform software quality assurance. E2E is an integral part of it.\n End-to-end testing is a methodology used to test whether the flow of an application is performing as designed from start to finish. The purpose of carrying out end-to-end tests is to identify system dependencies and to ensure that the right information is passed between various system components and systems.\n E2E in SkyWalking is always setting the OAP, monitored services and relative remote server dependencies in a real environment, and verify the dataflow and ultimate query results.\nThe E2E test involves some/all of the OAP server, storage, coordinator, webapp, and the instrumented services, all of which are orchestrated by docker-compose or KinD. Since version 8.9.0, we immigrate to e2e-v2 which leverage skywalking-infra-e2e and skywalking-cli to do the whole e2e process. skywalking-infra-e2e is used to control the e2e process and skywalking-cli is used to interact with the OAP such as request and get response metrics from OAP.\nWriting E2E Cases  Set up the environment   Set up skywalking-infra-e2e Set up skywalking-cli, yq (generally these 2 are enough) and others tools if your cases need. Can reference the script under skywalking/test/e2e-v2/script/prepare/setup-e2e-shell.   Orchestrate the components  The goal of the E2E tests is to test the SkyWalking project as a whole, including the OAP server, storage, coordinator, webapp, and even the frontend UI (not for now), on the single node mode as well as the cluster mode. Therefore, the first step is to determine what case we are going to verify, and orchestrate the components.\nTo make the orchestration process easier, we\u0026rsquo;re using a docker-compose that provides a simple file format (docker-compose.yml) for orchestrating the required containers, and offers an opportunity to define the dependencies of the components.\nFollow these steps:\n Decide what (and how many) containers will be needed. For example, for cluster testing, you\u0026rsquo;ll need \u0026gt; 2 OAP nodes, coordinators (e.g. zookeeper), storage (e.g. ElasticSearch), and instrumented services; Define the containers in docker-compose.yml, and carefully specify the dependencies, starting orders, and most importantly, link them together, e.g. set the correct OAP address on the agent end, and set the correct coordinator address in OAP, etc. Define the e2e case config in e2e.yaml. Write the expected data(yml) for verify.   Run e2e test  All e2e cases should under skywalking/test/e2e-v2/cases. You could execute e2e run command in skywalking/ e.g.\ne2e run -c test/e2e-v2/cases/alarm/h2/e2e.yaml  Troubleshooting  We expose all logs from all containers to the stdout in the non-CI (local) mode, but save and upload them to the GitHub server. You can download them (only when the tests have failed) at \u0026ldquo;Artifacts/Download artifacts/logs\u0026rdquo; (see top right) for debugging.\nNOTE: Please verify the newly-added E2E test case locally first. However, if you find that it has passed locally but failed in the PR check status, make sure that all the updated/newly-added files (especially those in the submodules) are committed and included in the PR, or reset the git HEAD to the remote and verify locally again.\n","excerpt":"End to End Tests (E2E) SkyWalking heavily rely more automatic tests to perform software quality …","ref":"/docs/main/v9.7.0/en/guides/e2e/","title":"End to End Tests (E2E)"},{"body":"Events SkyWalking already supports the three pillars of observability, namely logs, metrics, and traces. In reality, a production system experiences many other events that may affect the performance of the system, such as upgrading, rebooting, chaos testing, etc. Although some of these events are reflected in the logs, many others are not. Hence, SkyWalking provides a more native way to collect these events. This doc details how SkyWalking collects events and what events look like in SkyWalking.\nHow to Report Events The SkyWalking backend supports three protocols to collect events: gRPC, HTTP, and Kafka. Any agent or CLI that implements one of these protocols can report events to SkyWalking. Currently, the officially supported clients to report events are:\n Java Agent Toolkit: Using the Java agent toolkit to report events within the applications. SkyWalking CLI: Using the CLI to report events from the command line interface. Kubernetes Event Exporter: Deploying an event exporter to refine and report Kubernetes events.  Event Definitions An event contains the following fields. The definitions of event can be found at the protocol repo.\nUUID Unique ID of the event. Since an event may span a long period of time, the UUID is necessary to associate the start time with the end time of the same event.\nSource The source object on which the event occurs. In SkyWalking, the object is typically a service, service instance, etc.\nName Name of the event. For example, Start, Stop, Crash, Reboot, Upgrade, etc.\nType Type of the event. This field is friendly for UI visualization, where events of type Normal are considered normal operations, while Error is considered unexpected operations, such as Crash events. Marking them with different colors allows us to more easily identify them.\nMessage The detail of the event that describes why this event happened. This should be a one-line message that briefly describes why the event is reported. Examples of an Upgrade event may be something like Upgrade from ${from_version} to ${to_version}. It\u0026rsquo;s NOT recommended to include the detailed logs of this event, such as the exception stack trace.\nParameters The parameters in the message field. This is a simple \u0026lt;string,string\u0026gt; map.\nStart Time The start time of the event. This field is mandatory when an event occurs.\nEnd Time The end time of the event. This field may be empty if the event has not ended yet, otherwise there should be a valid timestamp after startTime.\nNOTE: When reporting an event, you typically call the report function twice, the first time for starting of the event and the second time for ending of the event, both with the same UUID. There are also cases where you would already have both the start time and end time. For example, when exporting events from a third-party system, the start time and end time are already known so you may simply call the report function once.\nCorrelation between events and metrics SkyWalking UI visualizes the events in the dashboard when the event service / instance / endpoint matches the displayed service / instance / endpoint.\nKnown Events    Name Type When Where     Start Normal When your Java Application starts with SkyWalking Agent installed, the Start Event will be created. Reported from SkyWalking agent.   Shutdown Normal When your Java Application stops with SkyWalking Agent installed, the Shutdown Event will be created. Reported from SkyWalking agent.   Alarm Error When the Alarm is triggered, the corresponding Alarm Event will is created. Reported from internal SkyWalking OAP.    The following events are all reported by Kubernetes Event Exporter, in order to see these events, please make sure you have deployed the exporter.\n   Name Type When Where     Killing Normal When the Kubernetes Pod is being killing. Reporter by Kubernetes Event Exporter.   Pulling Normal When a docker image is being pulled for deployment. Reporter by Kubernetes Event Exporter.   Pulled Normal When a docker image is pulled for deployment. Reporter by Kubernetes Event Exporter.   Created Normal When a container inside a Pod is created. Reporter by Kubernetes Event Exporter.   Started Normal When a container inside a Pod is started. Reporter by Kubernetes Event Exporter.   Unhealthy Error When the readiness probe failed. Reporter by Kubernetes Event Exporter.    The complete event lists can be found in the Kubernetes codebase, please note that not all the events are supported by the exporter for now.\n","excerpt":"Events SkyWalking already supports the three pillars of observability, namely logs, metrics, and …","ref":"/docs/main/latest/en/concepts-and-designs/event/","title":"Events"},{"body":"Events SkyWalking already supports the three pillars of observability, namely logs, metrics, and traces. In reality, a production system experiences many other events that may affect the performance of the system, such as upgrading, rebooting, chaos testing, etc. Although some of these events are reflected in the logs, many others are not. Hence, SkyWalking provides a more native way to collect these events. This doc details how SkyWalking collects events and what events look like in SkyWalking.\nHow to Report Events The SkyWalking backend supports three protocols to collect events: gRPC, HTTP, and Kafka. Any agent or CLI that implements one of these protocols can report events to SkyWalking. Currently, the officially supported clients to report events are:\n Java Agent Toolkit: Using the Java agent toolkit to report events within the applications. SkyWalking CLI: Using the CLI to report events from the command line interface. Kubernetes Event Exporter: Deploying an event exporter to refine and report Kubernetes events.  Event Definitions An event contains the following fields. The definitions of event can be found at the protocol repo.\nUUID Unique ID of the event. Since an event may span a long period of time, the UUID is necessary to associate the start time with the end time of the same event.\nSource The source object on which the event occurs. In SkyWalking, the object is typically a service, service instance, etc.\nName Name of the event. For example, Start, Stop, Crash, Reboot, Upgrade, etc.\nType Type of the event. This field is friendly for UI visualization, where events of type Normal are considered normal operations, while Error is considered unexpected operations, such as Crash events. Marking them with different colors allows us to more easily identify them.\nMessage The detail of the event that describes why this event happened. This should be a one-line message that briefly describes why the event is reported. Examples of an Upgrade event may be something like Upgrade from ${from_version} to ${to_version}. It\u0026rsquo;s NOT recommended to include the detailed logs of this event, such as the exception stack trace.\nParameters The parameters in the message field. This is a simple \u0026lt;string,string\u0026gt; map.\nStart Time The start time of the event. This field is mandatory when an event occurs.\nEnd Time The end time of the event. This field may be empty if the event has not ended yet, otherwise there should be a valid timestamp after startTime.\nNOTE: When reporting an event, you typically call the report function twice, the first time for starting of the event and the second time for ending of the event, both with the same UUID. There are also cases where you would already have both the start time and end time. For example, when exporting events from a third-party system, the start time and end time are already known so you may simply call the report function once.\nCorrelation between events and metrics SkyWalking UI visualizes the events in the dashboard when the event service / instance / endpoint matches the displayed service / instance / endpoint.\nKnown Events    Name Type When Where     Start Normal When your Java Application starts with SkyWalking Agent installed, the Start Event will be created. Reported from SkyWalking agent.   Shutdown Normal When your Java Application stops with SkyWalking Agent installed, the Shutdown Event will be created. Reported from SkyWalking agent.   Alarm Error When the Alarm is triggered, the corresponding Alarm Event will is created. Reported from internal SkyWalking OAP.    The following events are all reported by Kubernetes Event Exporter, in order to see these events, please make sure you have deployed the exporter.\n   Name Type When Where     Killing Normal When the Kubernetes Pod is being killing. Reporter by Kubernetes Event Exporter.   Pulling Normal When a docker image is being pulled for deployment. Reporter by Kubernetes Event Exporter.   Pulled Normal When a docker image is pulled for deployment. Reporter by Kubernetes Event Exporter.   Created Normal When a container inside a Pod is created. Reporter by Kubernetes Event Exporter.   Started Normal When a container inside a Pod is started. Reporter by Kubernetes Event Exporter.   Unhealthy Error When the readiness probe failed. Reporter by Kubernetes Event Exporter.    The complete event lists can be found in the Kubernetes codebase, please note that not all the events are supported by the exporter for now.\n","excerpt":"Events SkyWalking already supports the three pillars of observability, namely logs, metrics, and …","ref":"/docs/main/next/en/concepts-and-designs/event/","title":"Events"},{"body":"Events SkyWalking already supports the three pillars of observability, namely logs, metrics, and traces. In reality, a production system experiences many other events that may affect the performance of the system, such as upgrading, rebooting, chaos testing, etc. Although some of these events are reflected in the logs, many others are not. Hence, SkyWalking provides a more native way to collect these events. This doc details how SkyWalking collects events and what events look like in SkyWalking.\nHow to Report Events The SkyWalking backend supports three protocols to collect events: gRPC, HTTP, and Kafka. Any agent or CLI that implements one of these protocols can report events to SkyWalking. Currently, the officially supported clients to report events are:\n Java Agent Toolkit: Using the Java agent toolkit to report events within the applications. SkyWalking CLI: Using the CLI to report events from the command line interface. Kubernetes Event Exporter: Deploying an event exporter to refine and report Kubernetes events.  Event Definitions An event contains the following fields. The definitions of event can be found at the protocol repo.\nUUID Unique ID of the event. Since an event may span a long period of time, the UUID is necessary to associate the start time with the end time of the same event.\nSource The source object on which the event occurs. In SkyWalking, the object is typically a service, service instance, etc.\nName Name of the event. For example, Start, Stop, Crash, Reboot, Upgrade, etc.\nType Type of the event. This field is friendly for UI visualization, where events of type Normal are considered normal operations, while Error is considered unexpected operations, such as Crash events. Marking them with different colors allows us to more easily identify them.\nMessage The detail of the event that describes why this event happened. This should be a one-line message that briefly describes why the event is reported. Examples of an Upgrade event may be something like Upgrade from ${from_version} to ${to_version}. It\u0026rsquo;s NOT recommended to include the detailed logs of this event, such as the exception stack trace.\nParameters The parameters in the message field. This is a simple \u0026lt;string,string\u0026gt; map.\nStart Time The start time of the event. This field is mandatory when an event occurs.\nEnd Time The end time of the event. This field may be empty if the event has not ended yet, otherwise there should be a valid timestamp after startTime.\nNOTE: When reporting an event, you typically call the report function twice, the first time for starting of the event and the second time for ending of the event, both with the same UUID. There are also cases where you would already have both the start time and end time. For example, when exporting events from a third-party system, the start time and end time are already known so you may simply call the report function once.\nHow to Configure Alarms for Events Events derive from metrics, and can be the source to trigger alarms. For example, if a specific event occurs for a certain times in a period, alarms can be triggered and sent.\nEvery event has a default value = 1, when n events with the same name are reported, they are aggregated into value = n as follows.\nEvent{name=Unhealthy, source={service=A,instance=a}, ...} Event{name=Unhealthy, source={service=A,instance=a}, ...} Event{name=Unhealthy, source={service=A,instance=a}, ...} Event{name=Unhealthy, source={service=A,instance=a}, ...} Event{name=Unhealthy, source={service=A,instance=a}, ...} Event{name=Unhealthy, source={service=A,instance=a}, ...} will be aggregated into\nEvent{name=Unhealthy, source={service=A,instance=a}, ...} \u0026lt;value = 6\u0026gt; so you can configure the following alarm rule to trigger alarm when Unhealthy event occurs more than 5 times within 10 minutes.\nrules:unhealthy_event_rule:metrics-name:Unhealthy# Healthiness check is usually a scheduled task,# they may be unhealthy for the first few times,# and can be unhealthy occasionally due to network jitter,# please adjust the threshold as per your actual situation.threshold:5op:\u0026#34;\u0026gt;\u0026#34;period:10count:1message:Service instance has been unhealthy for 10 minutesFor more alarm configuration details, please refer to the alarm doc.\nNote that the Unhealthy event above is only for demonstration, they are not detected by default in SkyWalking, however, you can use the methods in How to Report Events to report this kind of events.\nCorrelation between events and metrics SkyWalking UI visualizes the events in the dashboard when the event service / instance / endpoint matches the displayed service / instance / endpoint.\nBy default, SkyWalking also generates some metrics for events by using OAL. The default metrics list of event may change over time, you can find the complete list in event.oal. If you want to generate you custom metrics from events, please refer to OAL about how to write OAL rules.\nKnown Events    Name Type When Where     Start Normal When your Java Application starts with SkyWalking Agent installed, the Start Event will be created. Reported from SkyWalking agent.   Shutdown Normal When your Java Application stops with SkyWalking Agent installed, the Shutdown Event will be created. Reported from SkyWalking agent.   Alarm Error When the Alarm is triggered, the corresponding Alarm Event will is created. Reported from internal SkyWalking OAP.    The following events are all reported by Kubernetes Event Exporter, in order to see these events, please make sure you have deployed the exporter.\n   Name Type When Where     Killing Normal When the Kubernetes Pod is being killing. Reporter by Kubernetes Event Exporter.   Pulling Normal When a docker image is being pulled for deployment. Reporter by Kubernetes Event Exporter.   Pulled Normal When a docker image is pulled for deployment. Reporter by Kubernetes Event Exporter.   Created Normal When a container inside a Pod is created. Reporter by Kubernetes Event Exporter.   Started Normal When a container inside a Pod is started. Reporter by Kubernetes Event Exporter.   Unhealthy Error When the readiness probe failed. Reporter by Kubernetes Event Exporter.    The complete event lists can be found in the Kubernetes codebase, please note that not all the events are supported by the exporter for now.\n","excerpt":"Events SkyWalking already supports the three pillars of observability, namely logs, metrics, and …","ref":"/docs/main/v9.0.0/en/concepts-and-designs/event/","title":"Events"},{"body":"Events SkyWalking already supports the three pillars of observability, namely logs, metrics, and traces. In reality, a production system experiences many other events that may affect the performance of the system, such as upgrading, rebooting, chaos testing, etc. Although some of these events are reflected in the logs, many others are not. Hence, SkyWalking provides a more native way to collect these events. This doc details how SkyWalking collects events and what events look like in SkyWalking.\nHow to Report Events The SkyWalking backend supports three protocols to collect events: gRPC, HTTP, and Kafka. Any agent or CLI that implements one of these protocols can report events to SkyWalking. Currently, the officially supported clients to report events are:\n Java Agent Toolkit: Using the Java agent toolkit to report events within the applications. SkyWalking CLI: Using the CLI to report events from the command line interface. Kubernetes Event Exporter: Deploying an event exporter to refine and report Kubernetes events.  Event Definitions An event contains the following fields. The definitions of event can be found at the protocol repo.\nUUID Unique ID of the event. Since an event may span a long period of time, the UUID is necessary to associate the start time with the end time of the same event.\nSource The source object on which the event occurs. In SkyWalking, the object is typically a service, service instance, etc.\nName Name of the event. For example, Start, Stop, Crash, Reboot, Upgrade, etc.\nType Type of the event. This field is friendly for UI visualization, where events of type Normal are considered normal operations, while Error is considered unexpected operations, such as Crash events. Marking them with different colors allows us to more easily identify them.\nMessage The detail of the event that describes why this event happened. This should be a one-line message that briefly describes why the event is reported. Examples of an Upgrade event may be something like Upgrade from ${from_version} to ${to_version}. It\u0026rsquo;s NOT recommended to include the detailed logs of this event, such as the exception stack trace.\nParameters The parameters in the message field. This is a simple \u0026lt;string,string\u0026gt; map.\nStart Time The start time of the event. This field is mandatory when an event occurs.\nEnd Time The end time of the event. This field may be empty if the event has not ended yet, otherwise there should be a valid timestamp after startTime.\nNOTE: When reporting an event, you typically call the report function twice, the first time for starting of the event and the second time for ending of the event, both with the same UUID. There are also cases where you would already have both the start time and end time. For example, when exporting events from a third-party system, the start time and end time are already known so you may simply call the report function once.\nHow to Configure Alarms for Events Events derive from metrics, and can be the source to trigger alarms. For example, if a specific event occurs for a certain times in a period, alarms can be triggered and sent.\nEvery event has a default value = 1, when n events with the same name are reported, they are aggregated into value = n as follows.\nEvent{name=Unhealthy, source={service=A,instance=a}, ...} Event{name=Unhealthy, source={service=A,instance=a}, ...} Event{name=Unhealthy, source={service=A,instance=a}, ...} Event{name=Unhealthy, source={service=A,instance=a}, ...} Event{name=Unhealthy, source={service=A,instance=a}, ...} Event{name=Unhealthy, source={service=A,instance=a}, ...} will be aggregated into\nEvent{name=Unhealthy, source={service=A,instance=a}, ...} \u0026lt;value = 6\u0026gt; so you can configure the following alarm rule to trigger alarm when Unhealthy event occurs more than 5 times within 10 minutes.\nrules:unhealthy_event_rule:metrics-name:Unhealthy# Healthiness check is usually a scheduled task,# they may be unhealthy for the first few times,# and can be unhealthy occasionally due to network jitter,# please adjust the threshold as per your actual situation.threshold:5op:\u0026#34;\u0026gt;\u0026#34;period:10count:1message:Service instance has been unhealthy for 10 minutesFor more alarm configuration details, please refer to the alarm doc.\nNote that the Unhealthy event above is only for demonstration, they are not detected by default in SkyWalking, however, you can use the methods in How to Report Events to report this kind of events.\nCorrelation between events and metrics SkyWalking UI visualizes the events in the dashboard when the event service / instance / endpoint matches the displayed service / instance / endpoint.\nKnown Events    Name Type When Where     Start Normal When your Java Application starts with SkyWalking Agent installed, the Start Event will be created. Reported from SkyWalking agent.   Shutdown Normal When your Java Application stops with SkyWalking Agent installed, the Shutdown Event will be created. Reported from SkyWalking agent.   Alarm Error When the Alarm is triggered, the corresponding Alarm Event will is created. Reported from internal SkyWalking OAP.    The following events are all reported by Kubernetes Event Exporter, in order to see these events, please make sure you have deployed the exporter.\n   Name Type When Where     Killing Normal When the Kubernetes Pod is being killing. Reporter by Kubernetes Event Exporter.   Pulling Normal When a docker image is being pulled for deployment. Reporter by Kubernetes Event Exporter.   Pulled Normal When a docker image is pulled for deployment. Reporter by Kubernetes Event Exporter.   Created Normal When a container inside a Pod is created. Reporter by Kubernetes Event Exporter.   Started Normal When a container inside a Pod is started. Reporter by Kubernetes Event Exporter.   Unhealthy Error When the readiness probe failed. Reporter by Kubernetes Event Exporter.    The complete event lists can be found in the Kubernetes codebase, please note that not all the events are supported by the exporter for now.\n","excerpt":"Events SkyWalking already supports the three pillars of observability, namely logs, metrics, and …","ref":"/docs/main/v9.1.0/en/concepts-and-designs/event/","title":"Events"},{"body":"Events SkyWalking already supports the three pillars of observability, namely logs, metrics, and traces. In reality, a production system experiences many other events that may affect the performance of the system, such as upgrading, rebooting, chaos testing, etc. Although some of these events are reflected in the logs, many others are not. Hence, SkyWalking provides a more native way to collect these events. This doc details how SkyWalking collects events and what events look like in SkyWalking.\nHow to Report Events The SkyWalking backend supports three protocols to collect events: gRPC, HTTP, and Kafka. Any agent or CLI that implements one of these protocols can report events to SkyWalking. Currently, the officially supported clients to report events are:\n Java Agent Toolkit: Using the Java agent toolkit to report events within the applications. SkyWalking CLI: Using the CLI to report events from the command line interface. Kubernetes Event Exporter: Deploying an event exporter to refine and report Kubernetes events.  Event Definitions An event contains the following fields. The definitions of event can be found at the protocol repo.\nUUID Unique ID of the event. Since an event may span a long period of time, the UUID is necessary to associate the start time with the end time of the same event.\nSource The source object on which the event occurs. In SkyWalking, the object is typically a service, service instance, etc.\nName Name of the event. For example, Start, Stop, Crash, Reboot, Upgrade, etc.\nType Type of the event. This field is friendly for UI visualization, where events of type Normal are considered normal operations, while Error is considered unexpected operations, such as Crash events. Marking them with different colors allows us to more easily identify them.\nMessage The detail of the event that describes why this event happened. This should be a one-line message that briefly describes why the event is reported. Examples of an Upgrade event may be something like Upgrade from ${from_version} to ${to_version}. It\u0026rsquo;s NOT recommended to include the detailed logs of this event, such as the exception stack trace.\nParameters The parameters in the message field. This is a simple \u0026lt;string,string\u0026gt; map.\nStart Time The start time of the event. This field is mandatory when an event occurs.\nEnd Time The end time of the event. This field may be empty if the event has not ended yet, otherwise there should be a valid timestamp after startTime.\nNOTE: When reporting an event, you typically call the report function twice, the first time for starting of the event and the second time for ending of the event, both with the same UUID. There are also cases where you would already have both the start time and end time. For example, when exporting events from a third-party system, the start time and end time are already known so you may simply call the report function once.\nCorrelation between events and metrics SkyWalking UI visualizes the events in the dashboard when the event service / instance / endpoint matches the displayed service / instance / endpoint.\nKnown Events    Name Type When Where     Start Normal When your Java Application starts with SkyWalking Agent installed, the Start Event will be created. Reported from SkyWalking agent.   Shutdown Normal When your Java Application stops with SkyWalking Agent installed, the Shutdown Event will be created. Reported from SkyWalking agent.   Alarm Error When the Alarm is triggered, the corresponding Alarm Event will is created. Reported from internal SkyWalking OAP.    The following events are all reported by Kubernetes Event Exporter, in order to see these events, please make sure you have deployed the exporter.\n   Name Type When Where     Killing Normal When the Kubernetes Pod is being killing. Reporter by Kubernetes Event Exporter.   Pulling Normal When a docker image is being pulled for deployment. Reporter by Kubernetes Event Exporter.   Pulled Normal When a docker image is pulled for deployment. Reporter by Kubernetes Event Exporter.   Created Normal When a container inside a Pod is created. Reporter by Kubernetes Event Exporter.   Started Normal When a container inside a Pod is started. Reporter by Kubernetes Event Exporter.   Unhealthy Error When the readiness probe failed. Reporter by Kubernetes Event Exporter.    The complete event lists can be found in the Kubernetes codebase, please note that not all the events are supported by the exporter for now.\n","excerpt":"Events SkyWalking already supports the three pillars of observability, namely logs, metrics, and …","ref":"/docs/main/v9.2.0/en/concepts-and-designs/event/","title":"Events"},{"body":"Events SkyWalking already supports the three pillars of observability, namely logs, metrics, and traces. In reality, a production system experiences many other events that may affect the performance of the system, such as upgrading, rebooting, chaos testing, etc. Although some of these events are reflected in the logs, many others are not. Hence, SkyWalking provides a more native way to collect these events. This doc details how SkyWalking collects events and what events look like in SkyWalking.\nHow to Report Events The SkyWalking backend supports three protocols to collect events: gRPC, HTTP, and Kafka. Any agent or CLI that implements one of these protocols can report events to SkyWalking. Currently, the officially supported clients to report events are:\n Java Agent Toolkit: Using the Java agent toolkit to report events within the applications. SkyWalking CLI: Using the CLI to report events from the command line interface. Kubernetes Event Exporter: Deploying an event exporter to refine and report Kubernetes events.  Event Definitions An event contains the following fields. The definitions of event can be found at the protocol repo.\nUUID Unique ID of the event. Since an event may span a long period of time, the UUID is necessary to associate the start time with the end time of the same event.\nSource The source object on which the event occurs. In SkyWalking, the object is typically a service, service instance, etc.\nName Name of the event. For example, Start, Stop, Crash, Reboot, Upgrade, etc.\nType Type of the event. This field is friendly for UI visualization, where events of type Normal are considered normal operations, while Error is considered unexpected operations, such as Crash events. Marking them with different colors allows us to more easily identify them.\nMessage The detail of the event that describes why this event happened. This should be a one-line message that briefly describes why the event is reported. Examples of an Upgrade event may be something like Upgrade from ${from_version} to ${to_version}. It\u0026rsquo;s NOT recommended to include the detailed logs of this event, such as the exception stack trace.\nParameters The parameters in the message field. This is a simple \u0026lt;string,string\u0026gt; map.\nStart Time The start time of the event. This field is mandatory when an event occurs.\nEnd Time The end time of the event. This field may be empty if the event has not ended yet, otherwise there should be a valid timestamp after startTime.\nNOTE: When reporting an event, you typically call the report function twice, the first time for starting of the event and the second time for ending of the event, both with the same UUID. There are also cases where you would already have both the start time and end time. For example, when exporting events from a third-party system, the start time and end time are already known so you may simply call the report function once.\nCorrelation between events and metrics SkyWalking UI visualizes the events in the dashboard when the event service / instance / endpoint matches the displayed service / instance / endpoint.\nKnown Events    Name Type When Where     Start Normal When your Java Application starts with SkyWalking Agent installed, the Start Event will be created. Reported from SkyWalking agent.   Shutdown Normal When your Java Application stops with SkyWalking Agent installed, the Shutdown Event will be created. Reported from SkyWalking agent.   Alarm Error When the Alarm is triggered, the corresponding Alarm Event will is created. Reported from internal SkyWalking OAP.    The following events are all reported by Kubernetes Event Exporter, in order to see these events, please make sure you have deployed the exporter.\n   Name Type When Where     Killing Normal When the Kubernetes Pod is being killing. Reporter by Kubernetes Event Exporter.   Pulling Normal When a docker image is being pulled for deployment. Reporter by Kubernetes Event Exporter.   Pulled Normal When a docker image is pulled for deployment. Reporter by Kubernetes Event Exporter.   Created Normal When a container inside a Pod is created. Reporter by Kubernetes Event Exporter.   Started Normal When a container inside a Pod is started. Reporter by Kubernetes Event Exporter.   Unhealthy Error When the readiness probe failed. Reporter by Kubernetes Event Exporter.    The complete event lists can be found in the Kubernetes codebase, please note that not all the events are supported by the exporter for now.\n","excerpt":"Events SkyWalking already supports the three pillars of observability, namely logs, metrics, and …","ref":"/docs/main/v9.3.0/en/concepts-and-designs/event/","title":"Events"},{"body":"Events SkyWalking already supports the three pillars of observability, namely logs, metrics, and traces. In reality, a production system experiences many other events that may affect the performance of the system, such as upgrading, rebooting, chaos testing, etc. Although some of these events are reflected in the logs, many others are not. Hence, SkyWalking provides a more native way to collect these events. This doc details how SkyWalking collects events and what events look like in SkyWalking.\nHow to Report Events The SkyWalking backend supports three protocols to collect events: gRPC, HTTP, and Kafka. Any agent or CLI that implements one of these protocols can report events to SkyWalking. Currently, the officially supported clients to report events are:\n Java Agent Toolkit: Using the Java agent toolkit to report events within the applications. SkyWalking CLI: Using the CLI to report events from the command line interface. Kubernetes Event Exporter: Deploying an event exporter to refine and report Kubernetes events.  Event Definitions An event contains the following fields. The definitions of event can be found at the protocol repo.\nUUID Unique ID of the event. Since an event may span a long period of time, the UUID is necessary to associate the start time with the end time of the same event.\nSource The source object on which the event occurs. In SkyWalking, the object is typically a service, service instance, etc.\nName Name of the event. For example, Start, Stop, Crash, Reboot, Upgrade, etc.\nType Type of the event. This field is friendly for UI visualization, where events of type Normal are considered normal operations, while Error is considered unexpected operations, such as Crash events. Marking them with different colors allows us to more easily identify them.\nMessage The detail of the event that describes why this event happened. This should be a one-line message that briefly describes why the event is reported. Examples of an Upgrade event may be something like Upgrade from ${from_version} to ${to_version}. It\u0026rsquo;s NOT recommended to include the detailed logs of this event, such as the exception stack trace.\nParameters The parameters in the message field. This is a simple \u0026lt;string,string\u0026gt; map.\nStart Time The start time of the event. This field is mandatory when an event occurs.\nEnd Time The end time of the event. This field may be empty if the event has not ended yet, otherwise there should be a valid timestamp after startTime.\nNOTE: When reporting an event, you typically call the report function twice, the first time for starting of the event and the second time for ending of the event, both with the same UUID. There are also cases where you would already have both the start time and end time. For example, when exporting events from a third-party system, the start time and end time are already known so you may simply call the report function once.\nCorrelation between events and metrics SkyWalking UI visualizes the events in the dashboard when the event service / instance / endpoint matches the displayed service / instance / endpoint.\nKnown Events    Name Type When Where     Start Normal When your Java Application starts with SkyWalking Agent installed, the Start Event will be created. Reported from SkyWalking agent.   Shutdown Normal When your Java Application stops with SkyWalking Agent installed, the Shutdown Event will be created. Reported from SkyWalking agent.   Alarm Error When the Alarm is triggered, the corresponding Alarm Event will is created. Reported from internal SkyWalking OAP.    The following events are all reported by Kubernetes Event Exporter, in order to see these events, please make sure you have deployed the exporter.\n   Name Type When Where     Killing Normal When the Kubernetes Pod is being killing. Reporter by Kubernetes Event Exporter.   Pulling Normal When a docker image is being pulled for deployment. Reporter by Kubernetes Event Exporter.   Pulled Normal When a docker image is pulled for deployment. Reporter by Kubernetes Event Exporter.   Created Normal When a container inside a Pod is created. Reporter by Kubernetes Event Exporter.   Started Normal When a container inside a Pod is started. Reporter by Kubernetes Event Exporter.   Unhealthy Error When the readiness probe failed. Reporter by Kubernetes Event Exporter.    The complete event lists can be found in the Kubernetes codebase, please note that not all the events are supported by the exporter for now.\n","excerpt":"Events SkyWalking already supports the three pillars of observability, namely logs, metrics, and …","ref":"/docs/main/v9.4.0/en/concepts-and-designs/event/","title":"Events"},{"body":"Events SkyWalking already supports the three pillars of observability, namely logs, metrics, and traces. In reality, a production system experiences many other events that may affect the performance of the system, such as upgrading, rebooting, chaos testing, etc. Although some of these events are reflected in the logs, many others are not. Hence, SkyWalking provides a more native way to collect these events. This doc details how SkyWalking collects events and what events look like in SkyWalking.\nHow to Report Events The SkyWalking backend supports three protocols to collect events: gRPC, HTTP, and Kafka. Any agent or CLI that implements one of these protocols can report events to SkyWalking. Currently, the officially supported clients to report events are:\n Java Agent Toolkit: Using the Java agent toolkit to report events within the applications. SkyWalking CLI: Using the CLI to report events from the command line interface. Kubernetes Event Exporter: Deploying an event exporter to refine and report Kubernetes events.  Event Definitions An event contains the following fields. The definitions of event can be found at the protocol repo.\nUUID Unique ID of the event. Since an event may span a long period of time, the UUID is necessary to associate the start time with the end time of the same event.\nSource The source object on which the event occurs. In SkyWalking, the object is typically a service, service instance, etc.\nName Name of the event. For example, Start, Stop, Crash, Reboot, Upgrade, etc.\nType Type of the event. This field is friendly for UI visualization, where events of type Normal are considered normal operations, while Error is considered unexpected operations, such as Crash events. Marking them with different colors allows us to more easily identify them.\nMessage The detail of the event that describes why this event happened. This should be a one-line message that briefly describes why the event is reported. Examples of an Upgrade event may be something like Upgrade from ${from_version} to ${to_version}. It\u0026rsquo;s NOT recommended to include the detailed logs of this event, such as the exception stack trace.\nParameters The parameters in the message field. This is a simple \u0026lt;string,string\u0026gt; map.\nStart Time The start time of the event. This field is mandatory when an event occurs.\nEnd Time The end time of the event. This field may be empty if the event has not ended yet, otherwise there should be a valid timestamp after startTime.\nNOTE: When reporting an event, you typically call the report function twice, the first time for starting of the event and the second time for ending of the event, both with the same UUID. There are also cases where you would already have both the start time and end time. For example, when exporting events from a third-party system, the start time and end time are already known so you may simply call the report function once.\nCorrelation between events and metrics SkyWalking UI visualizes the events in the dashboard when the event service / instance / endpoint matches the displayed service / instance / endpoint.\nKnown Events    Name Type When Where     Start Normal When your Java Application starts with SkyWalking Agent installed, the Start Event will be created. Reported from SkyWalking agent.   Shutdown Normal When your Java Application stops with SkyWalking Agent installed, the Shutdown Event will be created. Reported from SkyWalking agent.   Alarm Error When the Alarm is triggered, the corresponding Alarm Event will is created. Reported from internal SkyWalking OAP.    The following events are all reported by Kubernetes Event Exporter, in order to see these events, please make sure you have deployed the exporter.\n   Name Type When Where     Killing Normal When the Kubernetes Pod is being killing. Reporter by Kubernetes Event Exporter.   Pulling Normal When a docker image is being pulled for deployment. Reporter by Kubernetes Event Exporter.   Pulled Normal When a docker image is pulled for deployment. Reporter by Kubernetes Event Exporter.   Created Normal When a container inside a Pod is created. Reporter by Kubernetes Event Exporter.   Started Normal When a container inside a Pod is started. Reporter by Kubernetes Event Exporter.   Unhealthy Error When the readiness probe failed. Reporter by Kubernetes Event Exporter.    The complete event lists can be found in the Kubernetes codebase, please note that not all the events are supported by the exporter for now.\n","excerpt":"Events SkyWalking already supports the three pillars of observability, namely logs, metrics, and …","ref":"/docs/main/v9.5.0/en/concepts-and-designs/event/","title":"Events"},{"body":"Events SkyWalking already supports the three pillars of observability, namely logs, metrics, and traces. In reality, a production system experiences many other events that may affect the performance of the system, such as upgrading, rebooting, chaos testing, etc. Although some of these events are reflected in the logs, many others are not. Hence, SkyWalking provides a more native way to collect these events. This doc details how SkyWalking collects events and what events look like in SkyWalking.\nHow to Report Events The SkyWalking backend supports three protocols to collect events: gRPC, HTTP, and Kafka. Any agent or CLI that implements one of these protocols can report events to SkyWalking. Currently, the officially supported clients to report events are:\n Java Agent Toolkit: Using the Java agent toolkit to report events within the applications. SkyWalking CLI: Using the CLI to report events from the command line interface. Kubernetes Event Exporter: Deploying an event exporter to refine and report Kubernetes events.  Event Definitions An event contains the following fields. The definitions of event can be found at the protocol repo.\nUUID Unique ID of the event. Since an event may span a long period of time, the UUID is necessary to associate the start time with the end time of the same event.\nSource The source object on which the event occurs. In SkyWalking, the object is typically a service, service instance, etc.\nName Name of the event. For example, Start, Stop, Crash, Reboot, Upgrade, etc.\nType Type of the event. This field is friendly for UI visualization, where events of type Normal are considered normal operations, while Error is considered unexpected operations, such as Crash events. Marking them with different colors allows us to more easily identify them.\nMessage The detail of the event that describes why this event happened. This should be a one-line message that briefly describes why the event is reported. Examples of an Upgrade event may be something like Upgrade from ${from_version} to ${to_version}. It\u0026rsquo;s NOT recommended to include the detailed logs of this event, such as the exception stack trace.\nParameters The parameters in the message field. This is a simple \u0026lt;string,string\u0026gt; map.\nStart Time The start time of the event. This field is mandatory when an event occurs.\nEnd Time The end time of the event. This field may be empty if the event has not ended yet, otherwise there should be a valid timestamp after startTime.\nNOTE: When reporting an event, you typically call the report function twice, the first time for starting of the event and the second time for ending of the event, both with the same UUID. There are also cases where you would already have both the start time and end time. For example, when exporting events from a third-party system, the start time and end time are already known so you may simply call the report function once.\nCorrelation between events and metrics SkyWalking UI visualizes the events in the dashboard when the event service / instance / endpoint matches the displayed service / instance / endpoint.\nKnown Events    Name Type When Where     Start Normal When your Java Application starts with SkyWalking Agent installed, the Start Event will be created. Reported from SkyWalking agent.   Shutdown Normal When your Java Application stops with SkyWalking Agent installed, the Shutdown Event will be created. Reported from SkyWalking agent.   Alarm Error When the Alarm is triggered, the corresponding Alarm Event will is created. Reported from internal SkyWalking OAP.    The following events are all reported by Kubernetes Event Exporter, in order to see these events, please make sure you have deployed the exporter.\n   Name Type When Where     Killing Normal When the Kubernetes Pod is being killing. Reporter by Kubernetes Event Exporter.   Pulling Normal When a docker image is being pulled for deployment. Reporter by Kubernetes Event Exporter.   Pulled Normal When a docker image is pulled for deployment. Reporter by Kubernetes Event Exporter.   Created Normal When a container inside a Pod is created. Reporter by Kubernetes Event Exporter.   Started Normal When a container inside a Pod is started. Reporter by Kubernetes Event Exporter.   Unhealthy Error When the readiness probe failed. Reporter by Kubernetes Event Exporter.    The complete event lists can be found in the Kubernetes codebase, please note that not all the events are supported by the exporter for now.\n","excerpt":"Events SkyWalking already supports the three pillars of observability, namely logs, metrics, and …","ref":"/docs/main/v9.6.0/en/concepts-and-designs/event/","title":"Events"},{"body":"Events SkyWalking already supports the three pillars of observability, namely logs, metrics, and traces. In reality, a production system experiences many other events that may affect the performance of the system, such as upgrading, rebooting, chaos testing, etc. Although some of these events are reflected in the logs, many others are not. Hence, SkyWalking provides a more native way to collect these events. This doc details how SkyWalking collects events and what events look like in SkyWalking.\nHow to Report Events The SkyWalking backend supports three protocols to collect events: gRPC, HTTP, and Kafka. Any agent or CLI that implements one of these protocols can report events to SkyWalking. Currently, the officially supported clients to report events are:\n Java Agent Toolkit: Using the Java agent toolkit to report events within the applications. SkyWalking CLI: Using the CLI to report events from the command line interface. Kubernetes Event Exporter: Deploying an event exporter to refine and report Kubernetes events.  Event Definitions An event contains the following fields. The definitions of event can be found at the protocol repo.\nUUID Unique ID of the event. Since an event may span a long period of time, the UUID is necessary to associate the start time with the end time of the same event.\nSource The source object on which the event occurs. In SkyWalking, the object is typically a service, service instance, etc.\nName Name of the event. For example, Start, Stop, Crash, Reboot, Upgrade, etc.\nType Type of the event. This field is friendly for UI visualization, where events of type Normal are considered normal operations, while Error is considered unexpected operations, such as Crash events. Marking them with different colors allows us to more easily identify them.\nMessage The detail of the event that describes why this event happened. This should be a one-line message that briefly describes why the event is reported. Examples of an Upgrade event may be something like Upgrade from ${from_version} to ${to_version}. It\u0026rsquo;s NOT recommended to include the detailed logs of this event, such as the exception stack trace.\nParameters The parameters in the message field. This is a simple \u0026lt;string,string\u0026gt; map.\nStart Time The start time of the event. This field is mandatory when an event occurs.\nEnd Time The end time of the event. This field may be empty if the event has not ended yet, otherwise there should be a valid timestamp after startTime.\nNOTE: When reporting an event, you typically call the report function twice, the first time for starting of the event and the second time for ending of the event, both with the same UUID. There are also cases where you would already have both the start time and end time. For example, when exporting events from a third-party system, the start time and end time are already known so you may simply call the report function once.\nCorrelation between events and metrics SkyWalking UI visualizes the events in the dashboard when the event service / instance / endpoint matches the displayed service / instance / endpoint.\nKnown Events    Name Type When Where     Start Normal When your Java Application starts with SkyWalking Agent installed, the Start Event will be created. Reported from SkyWalking agent.   Shutdown Normal When your Java Application stops with SkyWalking Agent installed, the Shutdown Event will be created. Reported from SkyWalking agent.   Alarm Error When the Alarm is triggered, the corresponding Alarm Event will is created. Reported from internal SkyWalking OAP.    The following events are all reported by Kubernetes Event Exporter, in order to see these events, please make sure you have deployed the exporter.\n   Name Type When Where     Killing Normal When the Kubernetes Pod is being killing. Reporter by Kubernetes Event Exporter.   Pulling Normal When a docker image is being pulled for deployment. Reporter by Kubernetes Event Exporter.   Pulled Normal When a docker image is pulled for deployment. Reporter by Kubernetes Event Exporter.   Created Normal When a container inside a Pod is created. Reporter by Kubernetes Event Exporter.   Started Normal When a container inside a Pod is started. Reporter by Kubernetes Event Exporter.   Unhealthy Error When the readiness probe failed. Reporter by Kubernetes Event Exporter.    The complete event lists can be found in the Kubernetes codebase, please note that not all the events are supported by the exporter for now.\n","excerpt":"Events SkyWalking already supports the three pillars of observability, namely logs, metrics, and …","ref":"/docs/main/v9.7.0/en/concepts-and-designs/event/","title":"Events"},{"body":"SkyWalking events.\n","excerpt":"SkyWalking events.","ref":"/events/","title":"Events"},{"body":"Events Report Protocol The protocol is used to report events to the backend. The doc introduces the definition of an event, and the protocol repository defines gRPC services and message formats of events.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.event.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/event/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;service EventService { // When reporting an event, you typically call the collect function twice, one for starting of the event and the other one for ending of the event, with the same UUID.  // There are also cases where you have both start time and end time already, for example, when exporting events from a 3rd-party system,  // the start time and end time are already known so that you can call the collect function only once.  rpc collect (stream Event) returns (Commands) { }}message Event { // Unique ID of the event. Because an event may span a long period of time, the UUID is necessary to associate the  // start time with the end time of the same event.  string uuid = 1; // The source object that the event occurs on.  Source source = 2; // The name of the event. For example, `Reboot`, `Upgrade` etc.  string name = 3; // The type of the event. This field is friendly for UI visualization, where events of type `Normal` are considered as normal operations,  // while `Error` is considered as unexpected operations, such as `Crash` events, therefore we can mark them with different colors to be easier identified.  Type type = 4; // The detail of the event that describes why this event happened. This should be a one-line message that briefly describes why the event is reported.  // Examples of an `Upgrade` event may be something like `Upgrade from ${from_version} to ${to_version}`.  // It\u0026#39;s NOT encouraged to include the detailed logs of this event, such as the exception stack trace.  string message = 5; // The parameters in the `message` field.  map\u0026lt;string, string\u0026gt; parameters = 6; // The start time (in milliseconds) of the event, measured between the current time and midnight, January 1, 1970 UTC.  // This field is mandatory when an event occurs.  int64 startTime = 7; // The end time (in milliseconds) of the event. , measured between the current time and midnight, January 1, 1970 UTC.  // This field may be empty if the event has not stopped yet, otherwise it should be a valid timestamp after `startTime`.  int64 endTime = 8;  // [Required] Since 9.0.0  // Name of the layer to which the event belongs.  string layer = 9;}enum Type { Normal = 0; Error = 1;}// If the event occurs on a service ONLY, the `service` field is mandatory, the serviceInstance field and endpoint field are optional; // If the event occurs on a service instance, the `service` and `serviceInstance` are mandatory and endpoint is optional; // If the event occurs on an endpoint, `service` and `endpoint` are mandatory, `serviceInstance` is optional; message Source { string service = 1; string serviceInstance = 2; string endpoint = 3;}JSON format events can be reported via HTTP API. The endpoint is http://\u0026lt;oap-address\u0026gt;:12800/v3/events. Example of a JSON event record:\n[ { \u0026#34;uuid\u0026#34;: \u0026#34;f498b3c0-8bca-438d-a5b0-3701826ae21c\u0026#34;, \u0026#34;source\u0026#34;: { \u0026#34;service\u0026#34;: \u0026#34;SERVICE-A\u0026#34;, \u0026#34;instance\u0026#34;: \u0026#34;INSTANCE-1\u0026#34; }, \u0026#34;name\u0026#34;: \u0026#34;Reboot\u0026#34;, \u0026#34;type\u0026#34;: \u0026#34;Normal\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;App reboot.\u0026#34;, \u0026#34;parameters\u0026#34;: {}, \u0026#34;startTime\u0026#34;: 1628044330000, \u0026#34;endTime\u0026#34;: 1628044331000 } ] ","excerpt":"Events Report Protocol The protocol is used to report events to the backend. The doc introduces the …","ref":"/docs/main/latest/en/api/event/","title":"Events Report Protocol"},{"body":"Events Report Protocol The protocol is used to report events to the backend. The doc introduces the definition of an event, and the protocol repository defines gRPC services and message formats of events.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.event.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/event/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;service EventService { // When reporting an event, you typically call the collect function twice, one for starting of the event and the other one for ending of the event, with the same UUID.  // There are also cases where you have both start time and end time already, for example, when exporting events from a 3rd-party system,  // the start time and end time are already known so that you can call the collect function only once.  rpc collect (stream Event) returns (Commands) { }}message Event { // Unique ID of the event. Because an event may span a long period of time, the UUID is necessary to associate the  // start time with the end time of the same event.  string uuid = 1; // The source object that the event occurs on.  Source source = 2; // The name of the event. For example, `Reboot`, `Upgrade` etc.  string name = 3; // The type of the event. This field is friendly for UI visualization, where events of type `Normal` are considered as normal operations,  // while `Error` is considered as unexpected operations, such as `Crash` events, therefore we can mark them with different colors to be easier identified.  Type type = 4; // The detail of the event that describes why this event happened. This should be a one-line message that briefly describes why the event is reported.  // Examples of an `Upgrade` event may be something like `Upgrade from ${from_version} to ${to_version}`.  // It\u0026#39;s NOT encouraged to include the detailed logs of this event, such as the exception stack trace.  string message = 5; // The parameters in the `message` field.  map\u0026lt;string, string\u0026gt; parameters = 6; // The start time (in milliseconds) of the event, measured between the current time and midnight, January 1, 1970 UTC.  // This field is mandatory when an event occurs.  int64 startTime = 7; // The end time (in milliseconds) of the event. , measured between the current time and midnight, January 1, 1970 UTC.  // This field may be empty if the event has not stopped yet, otherwise it should be a valid timestamp after `startTime`.  int64 endTime = 8;  // [Required] Since 9.0.0  // Name of the layer to which the event belongs.  string layer = 9;}enum Type { Normal = 0; Error = 1;}// If the event occurs on a service ONLY, the `service` field is mandatory, the serviceInstance field and endpoint field are optional; // If the event occurs on a service instance, the `service` and `serviceInstance` are mandatory and endpoint is optional; // If the event occurs on an endpoint, `service` and `endpoint` are mandatory, `serviceInstance` is optional; message Source { string service = 1; string serviceInstance = 2; string endpoint = 3;}JSON format events can be reported via HTTP API. The endpoint is http://\u0026lt;oap-address\u0026gt;:12800/v3/events. Example of a JSON event record:\n[ { \u0026#34;uuid\u0026#34;: \u0026#34;f498b3c0-8bca-438d-a5b0-3701826ae21c\u0026#34;, \u0026#34;source\u0026#34;: { \u0026#34;service\u0026#34;: \u0026#34;SERVICE-A\u0026#34;, \u0026#34;instance\u0026#34;: \u0026#34;INSTANCE-1\u0026#34; }, \u0026#34;name\u0026#34;: \u0026#34;Reboot\u0026#34;, \u0026#34;type\u0026#34;: \u0026#34;Normal\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;App reboot.\u0026#34;, \u0026#34;parameters\u0026#34;: {}, \u0026#34;startTime\u0026#34;: 1628044330000, \u0026#34;endTime\u0026#34;: 1628044331000 } ] ","excerpt":"Events Report Protocol The protocol is used to report events to the backend. The doc introduces the …","ref":"/docs/main/next/en/api/event/","title":"Events Report Protocol"},{"body":"Events Report Protocol The protocol is used to report events to the backend. The doc introduces the definition of an event, and the protocol repository defines gRPC services and message formats of events.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.event.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/event/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;service EventService { // When reporting an event, you typically call the collect function twice, one for starting of the event and the other one for ending of the event, with the same UUID.  // There are also cases where you have both start time and end time already, for example, when exporting events from a 3rd-party system,  // the start time and end time are already known so that you can call the collect function only once.  rpc collect (stream Event) returns (Commands) { }}message Event { // Unique ID of the event. Because an event may span a long period of time, the UUID is necessary to associate the  // start time with the end time of the same event.  string uuid = 1; // The source object that the event occurs on.  Source source = 2; // The name of the event. For example, `Reboot`, `Upgrade` etc.  string name = 3; // The type of the event. This field is friendly for UI visualization, where events of type `Normal` are considered as normal operations,  // while `Error` is considered as unexpected operations, such as `Crash` events, therefore we can mark them with different colors to be easier identified.  Type type = 4; // The detail of the event that describes why this event happened. This should be a one-line message that briefly describes why the event is reported.  // Examples of an `Upgrade` event may be something like `Upgrade from ${from_version} to ${to_version}`.  // It\u0026#39;s NOT encouraged to include the detailed logs of this event, such as the exception stack trace.  string message = 5; // The parameters in the `message` field.  map\u0026lt;string, string\u0026gt; parameters = 6; // The start time (in milliseconds) of the event, measured between the current time and midnight, January 1, 1970 UTC.  // This field is mandatory when an event occurs.  int64 startTime = 7; // The end time (in milliseconds) of the event. , measured between the current time and midnight, January 1, 1970 UTC.  // This field may be empty if the event has not stopped yet, otherwise it should be a valid timestamp after `startTime`.  int64 endTime = 8;  // [Required] Since 9.0.0  // Name of the layer to which the event belongs.  string layer = 9;}enum Type { Normal = 0; Error = 1;}// If the event occurs on a service ONLY, the `service` field is mandatory, the serviceInstance field and endpoint field are optional; // If the event occurs on a service instance, the `service` and `serviceInstance` are mandatory and endpoint is optional; // If the event occurs on an endpoint, `service` and `endpoint` are mandatory, `serviceInstance` is optional; message Source { string service = 1; string serviceInstance = 2; string endpoint = 3;}JSON format events can be reported via HTTP API. The endpoint is http://\u0026lt;oap-address\u0026gt;:12800/v3/events. Example of a JSON event record:\n[ { \u0026#34;uuid\u0026#34;: \u0026#34;f498b3c0-8bca-438d-a5b0-3701826ae21c\u0026#34;, \u0026#34;source\u0026#34;: { \u0026#34;service\u0026#34;: \u0026#34;SERVICE-A\u0026#34;, \u0026#34;instance\u0026#34;: \u0026#34;INSTANCE-1\u0026#34; }, \u0026#34;name\u0026#34;: \u0026#34;Reboot\u0026#34;, \u0026#34;type\u0026#34;: \u0026#34;Normal\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;App reboot.\u0026#34;, \u0026#34;parameters\u0026#34;: {}, \u0026#34;startTime\u0026#34;: 1628044330000, \u0026#34;endTime\u0026#34;: 1628044331000 } ] ","excerpt":"Events Report Protocol The protocol is used to report events to the backend. The doc introduces the …","ref":"/docs/main/v9.4.0/en/api/event/","title":"Events Report Protocol"},{"body":"Events Report Protocol The protocol is used to report events to the backend. The doc introduces the definition of an event, and the protocol repository defines gRPC services and message formats of events.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.event.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/event/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;service EventService { // When reporting an event, you typically call the collect function twice, one for starting of the event and the other one for ending of the event, with the same UUID.  // There are also cases where you have both start time and end time already, for example, when exporting events from a 3rd-party system,  // the start time and end time are already known so that you can call the collect function only once.  rpc collect (stream Event) returns (Commands) { }}message Event { // Unique ID of the event. Because an event may span a long period of time, the UUID is necessary to associate the  // start time with the end time of the same event.  string uuid = 1; // The source object that the event occurs on.  Source source = 2; // The name of the event. For example, `Reboot`, `Upgrade` etc.  string name = 3; // The type of the event. This field is friendly for UI visualization, where events of type `Normal` are considered as normal operations,  // while `Error` is considered as unexpected operations, such as `Crash` events, therefore we can mark them with different colors to be easier identified.  Type type = 4; // The detail of the event that describes why this event happened. This should be a one-line message that briefly describes why the event is reported.  // Examples of an `Upgrade` event may be something like `Upgrade from ${from_version} to ${to_version}`.  // It\u0026#39;s NOT encouraged to include the detailed logs of this event, such as the exception stack trace.  string message = 5; // The parameters in the `message` field.  map\u0026lt;string, string\u0026gt; parameters = 6; // The start time (in milliseconds) of the event, measured between the current time and midnight, January 1, 1970 UTC.  // This field is mandatory when an event occurs.  int64 startTime = 7; // The end time (in milliseconds) of the event. , measured between the current time and midnight, January 1, 1970 UTC.  // This field may be empty if the event has not stopped yet, otherwise it should be a valid timestamp after `startTime`.  int64 endTime = 8;  // [Required] Since 9.0.0  // Name of the layer to which the event belongs.  string layer = 9;}enum Type { Normal = 0; Error = 1;}// If the event occurs on a service ONLY, the `service` field is mandatory, the serviceInstance field and endpoint field are optional; // If the event occurs on a service instance, the `service` and `serviceInstance` are mandatory and endpoint is optional; // If the event occurs on an endpoint, `service` and `endpoint` are mandatory, `serviceInstance` is optional; message Source { string service = 1; string serviceInstance = 2; string endpoint = 3;}JSON format events can be reported via HTTP API. The endpoint is http://\u0026lt;oap-address\u0026gt;:12800/v3/events. Example of a JSON event record:\n[ { \u0026#34;uuid\u0026#34;: \u0026#34;f498b3c0-8bca-438d-a5b0-3701826ae21c\u0026#34;, \u0026#34;source\u0026#34;: { \u0026#34;service\u0026#34;: \u0026#34;SERVICE-A\u0026#34;, \u0026#34;instance\u0026#34;: \u0026#34;INSTANCE-1\u0026#34; }, \u0026#34;name\u0026#34;: \u0026#34;Reboot\u0026#34;, \u0026#34;type\u0026#34;: \u0026#34;Normal\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;App reboot.\u0026#34;, \u0026#34;parameters\u0026#34;: {}, \u0026#34;startTime\u0026#34;: 1628044330000, \u0026#34;endTime\u0026#34;: 1628044331000 } ] ","excerpt":"Events Report Protocol The protocol is used to report events to the backend. The doc introduces the …","ref":"/docs/main/v9.5.0/en/api/event/","title":"Events Report Protocol"},{"body":"Events Report Protocol The protocol is used to report events to the backend. The doc introduces the definition of an event, and the protocol repository defines gRPC services and message formats of events.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.event.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/event/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;service EventService { // When reporting an event, you typically call the collect function twice, one for starting of the event and the other one for ending of the event, with the same UUID.  // There are also cases where you have both start time and end time already, for example, when exporting events from a 3rd-party system,  // the start time and end time are already known so that you can call the collect function only once.  rpc collect (stream Event) returns (Commands) { }}message Event { // Unique ID of the event. Because an event may span a long period of time, the UUID is necessary to associate the  // start time with the end time of the same event.  string uuid = 1; // The source object that the event occurs on.  Source source = 2; // The name of the event. For example, `Reboot`, `Upgrade` etc.  string name = 3; // The type of the event. This field is friendly for UI visualization, where events of type `Normal` are considered as normal operations,  // while `Error` is considered as unexpected operations, such as `Crash` events, therefore we can mark them with different colors to be easier identified.  Type type = 4; // The detail of the event that describes why this event happened. This should be a one-line message that briefly describes why the event is reported.  // Examples of an `Upgrade` event may be something like `Upgrade from ${from_version} to ${to_version}`.  // It\u0026#39;s NOT encouraged to include the detailed logs of this event, such as the exception stack trace.  string message = 5; // The parameters in the `message` field.  map\u0026lt;string, string\u0026gt; parameters = 6; // The start time (in milliseconds) of the event, measured between the current time and midnight, January 1, 1970 UTC.  // This field is mandatory when an event occurs.  int64 startTime = 7; // The end time (in milliseconds) of the event. , measured between the current time and midnight, January 1, 1970 UTC.  // This field may be empty if the event has not stopped yet, otherwise it should be a valid timestamp after `startTime`.  int64 endTime = 8;  // [Required] Since 9.0.0  // Name of the layer to which the event belongs.  string layer = 9;}enum Type { Normal = 0; Error = 1;}// If the event occurs on a service ONLY, the `service` field is mandatory, the serviceInstance field and endpoint field are optional; // If the event occurs on a service instance, the `service` and `serviceInstance` are mandatory and endpoint is optional; // If the event occurs on an endpoint, `service` and `endpoint` are mandatory, `serviceInstance` is optional; message Source { string service = 1; string serviceInstance = 2; string endpoint = 3;}JSON format events can be reported via HTTP API. The endpoint is http://\u0026lt;oap-address\u0026gt;:12800/v3/events. Example of a JSON event record:\n[ { \u0026#34;uuid\u0026#34;: \u0026#34;f498b3c0-8bca-438d-a5b0-3701826ae21c\u0026#34;, \u0026#34;source\u0026#34;: { \u0026#34;service\u0026#34;: \u0026#34;SERVICE-A\u0026#34;, \u0026#34;instance\u0026#34;: \u0026#34;INSTANCE-1\u0026#34; }, \u0026#34;name\u0026#34;: \u0026#34;Reboot\u0026#34;, \u0026#34;type\u0026#34;: \u0026#34;Normal\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;App reboot.\u0026#34;, \u0026#34;parameters\u0026#34;: {}, \u0026#34;startTime\u0026#34;: 1628044330000, \u0026#34;endTime\u0026#34;: 1628044331000 } ] ","excerpt":"Events Report Protocol The protocol is used to report events to the backend. The doc introduces the …","ref":"/docs/main/v9.6.0/en/api/event/","title":"Events Report Protocol"},{"body":"Events Report Protocol The protocol is used to report events to the backend. The doc introduces the definition of an event, and the protocol repository defines gRPC services and message formats of events.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.event.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/event/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;service EventService { // When reporting an event, you typically call the collect function twice, one for starting of the event and the other one for ending of the event, with the same UUID.  // There are also cases where you have both start time and end time already, for example, when exporting events from a 3rd-party system,  // the start time and end time are already known so that you can call the collect function only once.  rpc collect (stream Event) returns (Commands) { }}message Event { // Unique ID of the event. Because an event may span a long period of time, the UUID is necessary to associate the  // start time with the end time of the same event.  string uuid = 1; // The source object that the event occurs on.  Source source = 2; // The name of the event. For example, `Reboot`, `Upgrade` etc.  string name = 3; // The type of the event. This field is friendly for UI visualization, where events of type `Normal` are considered as normal operations,  // while `Error` is considered as unexpected operations, such as `Crash` events, therefore we can mark them with different colors to be easier identified.  Type type = 4; // The detail of the event that describes why this event happened. This should be a one-line message that briefly describes why the event is reported.  // Examples of an `Upgrade` event may be something like `Upgrade from ${from_version} to ${to_version}`.  // It\u0026#39;s NOT encouraged to include the detailed logs of this event, such as the exception stack trace.  string message = 5; // The parameters in the `message` field.  map\u0026lt;string, string\u0026gt; parameters = 6; // The start time (in milliseconds) of the event, measured between the current time and midnight, January 1, 1970 UTC.  // This field is mandatory when an event occurs.  int64 startTime = 7; // The end time (in milliseconds) of the event. , measured between the current time and midnight, January 1, 1970 UTC.  // This field may be empty if the event has not stopped yet, otherwise it should be a valid timestamp after `startTime`.  int64 endTime = 8;  // [Required] Since 9.0.0  // Name of the layer to which the event belongs.  string layer = 9;}enum Type { Normal = 0; Error = 1;}// If the event occurs on a service ONLY, the `service` field is mandatory, the serviceInstance field and endpoint field are optional; // If the event occurs on a service instance, the `service` and `serviceInstance` are mandatory and endpoint is optional; // If the event occurs on an endpoint, `service` and `endpoint` are mandatory, `serviceInstance` is optional; message Source { string service = 1; string serviceInstance = 2; string endpoint = 3;}JSON format events can be reported via HTTP API. The endpoint is http://\u0026lt;oap-address\u0026gt;:12800/v3/events. Example of a JSON event record:\n[ { \u0026#34;uuid\u0026#34;: \u0026#34;f498b3c0-8bca-438d-a5b0-3701826ae21c\u0026#34;, \u0026#34;source\u0026#34;: { \u0026#34;service\u0026#34;: \u0026#34;SERVICE-A\u0026#34;, \u0026#34;instance\u0026#34;: \u0026#34;INSTANCE-1\u0026#34; }, \u0026#34;name\u0026#34;: \u0026#34;Reboot\u0026#34;, \u0026#34;type\u0026#34;: \u0026#34;Normal\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;App reboot.\u0026#34;, \u0026#34;parameters\u0026#34;: {}, \u0026#34;startTime\u0026#34;: 1628044330000, \u0026#34;endTime\u0026#34;: 1628044331000 } ] ","excerpt":"Events Report Protocol The protocol is used to report events to the backend. The doc introduces the …","ref":"/docs/main/v9.7.0/en/api/event/","title":"Events Report Protocol"},{"body":"Exporter SkyWalking provides the essential functions of observability, including metrics aggregation, trace, log, alerting, and profiling. In many real-world scenarios, users may want to forward their data to a 3rd party system for further in-depth analysis. Exporter has made that possible.\nThe exporter is an independent module that has to be manually activated.\nRight now, we provide the following exporting channels:\n gRPC Exporter   Metrics  Kafka Exporter   Trace Log  gRPC Exporter Metrics gRPC Exporter Metrics gRPC exporter uses SkyWalking\u0026rsquo;s native export service definition. Here is the proto definition: metric-exporter.proto.\nservice MetricExportService { rpc export (stream ExportMetricValue) returns (ExportResponse) { } rpc subscription (SubscriptionReq) returns (SubscriptionsResp) { }}To activate the exporter, you should set ${SW_EXPORTER_ENABLE_GRPC_METRICS:true} and config the target gRPC server address.\nexporter:default:# gRPC exporterenableGRPCMetrics:${SW_EXPORTER_ENABLE_GRPC_METRICS:true}gRPCTargetHost:${SW_EXPORTER_GRPC_HOST:127.0.0.1}gRPCTargetPort:${SW_EXPORTER_GRPC_PORT:9870}... gRPCTargetHost:gRPCTargetPort is the expected target service address. You could set any gRPC server to receive the data. Target gRPC service needs to go on standby; otherwise, the OAP startup may fail.  Target exporter service   Subscription implementation. Return the expected metrics name list with event type (incremental or total). All names must match the OAL/MAL script definition. Return empty list, if you want to export all metrics in the incremental event type.\n  Export implementation. Stream service. All subscribed metrics will be sent here based on the OAP core schedule. Also, if the OAP is deployed as a cluster, this method will be called concurrently. For metrics value, you need to follow #type to choose #longValue or #doubleValue.\n  Kafka Exporter Trace Kafka Exporter Trace kafka exporter pushes messages to the Kafka Broker and Topic skywalking-export-trace to export the trace. Here is the message:\nProducerRecord\u0026lt;String, Bytes\u0026gt; Key: TraceSegmentId Value: Bytes of SegmentObject The SegmentObject definition follows the protocol: SkyWalking data collect protocol#Tracing.proto.\n// The segment is a collection of spans. It includes all collected spans in a simple one request context, such as a HTTP request process. message SegmentObject { string traceId = 1; string traceSegmentId = 2; repeated SpanObject spans = 3; string service = 4; string serviceInstance = 5; bool isSizeLimited = 6;}To activate the exporter, you should set ${SW_EXPORTER_ENABLE_KAFKA_TRACE:true} and config the Kafka server.\nexporter:default:# Kafka exporterenableKafkaTrace:${SW_EXPORTER_ENABLE_KAFKA_TRACE:true}kafkaBootstrapServers:${SW_EXPORTER_KAFKA_SERVERS:localhost:9092}# Kafka producer config, JSON format as Properties.kafkaProducerConfig:${SW_EXPORTER_KAFKA_PRODUCER_CONFIG:\u0026#34;\u0026#34;}kafkaTopicTrace:${SW_EXPORTER_KAFKA_TOPIC_TRACE:skywalking-export-trace}exportErrorStatusTraceOnly:${SW_EXPORTER_KAFKA_TRACE_FILTER_ERROR:false}... exportErrorStatusTraceOnly=true represents that only export the error status trace segments through the Kafka channel.  Log Kafka Exporter Log kafka exporter pushes messages to the Kafka Broker and Topic skywalking-export-log to export the log. Here is the message:\nProducerRecord\u0026lt;String, Bytes\u0026gt; Key: LogRecordId Value: Bytes of LogData The LogData definition follows the protocol: SkyWalking data collect protocol#Logging.proto.\nmessage LogData { int64 timestamp = 1; string service = 2; string serviceInstance = 3; string endpoint = 4; LogDataBody body = 5; TraceContext traceContext = 6; LogTags tags = 7; string layer = 8;}To activate the exporter, you should set ${SW_EXPORTER_ENABLE_KAFKA_LOG:true} and config the Kafka server.\nexporter:default:# Kafka exporterenableKafkaLog:${SW_EXPORTER_ENABLE_KAFKA_LOG:true}kafkaBootstrapServers:${SW_EXPORTER_KAFKA_SERVERS:localhost:9092}# Kafka producer config, JSON format as Properties.kafkaProducerConfig:${SW_EXPORTER_KAFKA_PRODUCER_CONFIG:\u0026#34;\u0026#34;}kafkaTopicLog:${SW_EXPORTER_KAFKA_TOPIC_LOG:skywalking-export-log}...","excerpt":"Exporter SkyWalking provides the essential functions of observability, including metrics …","ref":"/docs/main/latest/en/setup/backend/exporter/","title":"Exporter"},{"body":"Exporter SkyWalking provides the essential functions of observability, including metrics aggregation, trace, log, alerting, and profiling. In many real-world scenarios, users may want to forward their data to a 3rd party system for further in-depth analysis. Exporter has made that possible.\nThe exporter is an independent module that has to be manually activated.\nRight now, we provide the following exporting channels:\n gRPC Exporter   Metrics  Kafka Exporter   Trace Log  gRPC Exporter Metrics gRPC Exporter Metrics gRPC exporter uses SkyWalking\u0026rsquo;s native export service definition. Here is the proto definition: metric-exporter.proto.\nservice MetricExportService { rpc export (stream ExportMetricValue) returns (ExportResponse) { } rpc subscription (SubscriptionReq) returns (SubscriptionsResp) { }}To activate the exporter, you should set ${SW_EXPORTER:default} and ${SW_EXPORTER_ENABLE_GRPC_METRICS:true}, configure the target gRPC server address.\nexporter:selector:${SW_EXPORTER:default}default:# gRPC exporterenableGRPCMetrics:${SW_EXPORTER_ENABLE_GRPC_METRICS:true}gRPCTargetHost:${SW_EXPORTER_GRPC_HOST:127.0.0.1}gRPCTargetPort:${SW_EXPORTER_GRPC_PORT:9870}... gRPCTargetHost:gRPCTargetPort is the expected target service address. You could set any gRPC server to receive the data. Target gRPC service needs to go on standby; otherwise, the OAP startup may fail.  Target exporter service   Subscription implementation. Return the expected metrics name list with event type (incremental or total). All names must match the OAL/MAL script definition. Return empty list, if you want to export all metrics in the incremental event type.\n  Export implementation. Stream service. All subscribed metrics will be sent here based on the OAP core schedule. Also, if the OAP is deployed as a cluster, this method will be called concurrently.\n  Kafka Exporter Trace Kafka Exporter Trace kafka exporter pushes messages to the Kafka Broker and Topic skywalking-export-trace to export the trace. Here is the message:\nProducerRecord\u0026lt;String, Bytes\u0026gt; Key: TraceSegmentId Value: Bytes of SegmentObject The SegmentObject definition follows the protocol: SkyWalking data collect protocol#Tracing.proto.\n// The segment is a collection of spans. It includes all collected spans in a simple one request context, such as a HTTP request process. message SegmentObject { string traceId = 1; string traceSegmentId = 2; repeated SpanObject spans = 3; string service = 4; string serviceInstance = 5; bool isSizeLimited = 6;}To activate the exporter, you should set ${SW_EXPORTER:default} and ${SW_EXPORTER_ENABLE_KAFKA_TRACE:true}, configure the Kafka server addresses.\nexporter:selector:${SW_EXPORTER:default}default:# Kafka exporterenableKafkaTrace:${SW_EXPORTER_ENABLE_KAFKA_TRACE:true}kafkaBootstrapServers:${SW_EXPORTER_KAFKA_SERVERS:localhost:9092}# Kafka producer config, JSON format as Properties.kafkaProducerConfig:${SW_EXPORTER_KAFKA_PRODUCER_CONFIG:\u0026#34;\u0026#34;}kafkaTopicTrace:${SW_EXPORTER_KAFKA_TOPIC_TRACE:skywalking-export-trace}exportErrorStatusTraceOnly:${SW_EXPORTER_KAFKA_TRACE_FILTER_ERROR:false}... exportErrorStatusTraceOnly=true represents that only export the error status trace segments through the Kafka channel.  Log Kafka Exporter Log kafka exporter pushes messages to the Kafka Broker and Topic skywalking-export-log to export the log. Here is the message:\nProducerRecord\u0026lt;String, Bytes\u0026gt; Key: LogRecordId Value: Bytes of LogData The LogData definition follows the protocol: SkyWalking data collect protocol#Logging.proto.\nmessage LogData { int64 timestamp = 1; string service = 2; string serviceInstance = 3; string endpoint = 4; LogDataBody body = 5; TraceContext traceContext = 6; LogTags tags = 7; string layer = 8;}To activate the exporter, you should set ${SW_EXPORTER:default} and ${SW_EXPORTER_ENABLE_KAFKA_LOG:true}, configure the Kafka server addresses.\nexporter:selector:${SW_EXPORTER:default}default:# Kafka exporterenableKafkaLog:${SW_EXPORTER_ENABLE_KAFKA_LOG:true}kafkaBootstrapServers:${SW_EXPORTER_KAFKA_SERVERS:localhost:9092}# Kafka producer config, JSON format as Properties.kafkaProducerConfig:${SW_EXPORTER_KAFKA_PRODUCER_CONFIG:\u0026#34;\u0026#34;}kafkaTopicLog:${SW_EXPORTER_KAFKA_TOPIC_LOG:skywalking-export-log}...","excerpt":"Exporter SkyWalking provides the essential functions of observability, including metrics …","ref":"/docs/main/next/en/setup/backend/exporter/","title":"Exporter"},{"body":"Exporter SkyWalking provides the essential functions of observability, including metrics aggregation, trace, log, alerting, and profiling. In many real-world scenarios, users may want to forward their data to a 3rd party system for further in-depth analysis. Exporter has made that possible.\nThe exporter is an independent module that has to be manually activated.\nRight now, we provide the following exporting channels:\n gRPC Exporter   Metrics   Kafka Exporter   Trace Log  gRPC Exporter Metrics gRPC Exporter Metrics gRPC exporter uses SkyWalking\u0026rsquo;s native export service definition. Here is the proto definition: metric-exporter.proto.\nservice MetricExportService { rpc export (stream ExportMetricValue) returns (ExportResponse) { } rpc subscription (SubscriptionReq) returns (SubscriptionsResp) { }}To activate the exporter, you should set ${SW_EXPORTER_ENABLE_GRPC_METRICS:true} and config the target gRPC server address.\nexporter:default:# gRPC exporterenableGRPCMetrics:${SW_EXPORTER_ENABLE_GRPC_METRICS:true}gRPCTargetHost:${SW_EXPORTER_GRPC_HOST:127.0.0.1}gRPCTargetPort:${SW_EXPORTER_GRPC_PORT:9870}... gRPCTargetHost:gRPCTargetPort is the expected target service address. You could set any gRPC server to receive the data. Target gRPC service needs to go on standby; otherwise, the OAP startup may fail.  Target exporter service   Subscription implementation. Return the expected metrics name list with event type (incremental or total). All names must match the OAL/MAL script definition. Return empty list, if you want to export all metrics in the incremental event type.\n  Export implementation. Stream service. All subscribed metrics will be sent here based on the OAP core schedule. Also, if the OAP is deployed as a cluster, this method will be called concurrently. For metrics value, you need to follow #type to choose #longValue or #doubleValue.\n  Kafka Exporter Trace Kafka Exporter Trace kafka exporter pushes messages to the Kafka Broker and Topic skywalking-export-trace to export the trace. Here is the message:\nProducerRecord\u0026lt;String, Bytes\u0026gt; Key: TraceSegmentId Value: Bytes of SegmentObject The SegmentObject definition follows the protocol: SkyWalking data collect protocol#Tracing.proto.\n// The segment is a collection of spans. It includes all collected spans in a simple one request context, such as a HTTP request process. message SegmentObject { string traceId = 1; string traceSegmentId = 2; repeated SpanObject spans = 3; string service = 4; string serviceInstance = 5; bool isSizeLimited = 6;}To activate the exporter, you should set ${SW_EXPORTER_ENABLE_KAFKA_TRACE:true} and config the Kafka server.\nexporter:default:# Kafka exporterenableKafkaTrace:${SW_EXPORTER_ENABLE_KAFKA_TRACE:true}kafkaBootstrapServers:${SW_EXPORTER_KAFKA_SERVERS:localhost:9092}# Kafka producer config, JSON format as Properties.kafkaProducerConfig:${SW_EXPORTER_KAFKA_PRODUCER_CONFIG:\u0026#34;\u0026#34;}kafkaTopicTrace:${SW_EXPORTER_KAFKA_TOPIC_TRACE:skywalking-export-trace}...Log Kafka Exporter Log kafka exporter pushes messages to the Kafka Broker and Topic skywalking-export-log to export the log. Here is the message:\nProducerRecord\u0026lt;String, Bytes\u0026gt; Key: LogRecordId Value: Bytes of LogData The LogData definition follows the protocol: SkyWalking data collect protocol#Logging.proto.\nmessage LogData { int64 timestamp = 1; string service = 2; string serviceInstance = 3; string endpoint = 4; LogDataBody body = 5; TraceContext traceContext = 6; LogTags tags = 7; string layer = 8;}To activate the exporter, you should set ${SW_EXPORTER_ENABLE_KAFKA_LOG:true} and config the Kafka server.\nexporter:default:# Kafka exporterenableKafkaLog:${SW_EXPORTER_ENABLE_KAFKA_LOG:true}kafkaBootstrapServers:${SW_EXPORTER_KAFKA_SERVERS:localhost:9092}# Kafka producer config, JSON format as Properties.kafkaProducerConfig:${SW_EXPORTER_KAFKA_PRODUCER_CONFIG:\u0026#34;\u0026#34;}kafkaTopicLog:${SW_EXPORTER_KAFKA_TOPIC_LOG:skywalking-export-log}...","excerpt":"Exporter SkyWalking provides the essential functions of observability, including metrics …","ref":"/docs/main/v9.3.0/en/setup/backend/exporter/","title":"Exporter"},{"body":"Exporter SkyWalking provides the essential functions of observability, including metrics aggregation, trace, log, alerting, and profiling. In many real-world scenarios, users may want to forward their data to a 3rd party system for further in-depth analysis. Exporter has made that possible.\nThe exporter is an independent module that has to be manually activated.\nRight now, we provide the following exporting channels:\n gRPC Exporter   Metrics   Kafka Exporter   Trace Log  gRPC Exporter Metrics gRPC Exporter Metrics gRPC exporter uses SkyWalking\u0026rsquo;s native export service definition. Here is the proto definition: metric-exporter.proto.\nservice MetricExportService { rpc export (stream ExportMetricValue) returns (ExportResponse) { } rpc subscription (SubscriptionReq) returns (SubscriptionsResp) { }}To activate the exporter, you should set ${SW_EXPORTER_ENABLE_GRPC_METRICS:true} and config the target gRPC server address.\nexporter:default:# gRPC exporterenableGRPCMetrics:${SW_EXPORTER_ENABLE_GRPC_METRICS:true}gRPCTargetHost:${SW_EXPORTER_GRPC_HOST:127.0.0.1}gRPCTargetPort:${SW_EXPORTER_GRPC_PORT:9870}... gRPCTargetHost:gRPCTargetPort is the expected target service address. You could set any gRPC server to receive the data. Target gRPC service needs to go on standby; otherwise, the OAP startup may fail.  Target exporter service   Subscription implementation. Return the expected metrics name list with event type (incremental or total). All names must match the OAL/MAL script definition. Return empty list, if you want to export all metrics in the incremental event type.\n  Export implementation. Stream service. All subscribed metrics will be sent here based on the OAP core schedule. Also, if the OAP is deployed as a cluster, this method will be called concurrently. For metrics value, you need to follow #type to choose #longValue or #doubleValue.\n  Kafka Exporter Trace Kafka Exporter Trace kafka exporter pushes messages to the Kafka Broker and Topic skywalking-export-trace to export the trace. Here is the message:\nProducerRecord\u0026lt;String, Bytes\u0026gt; Key: TraceSegmentId Value: Bytes of SegmentObject The SegmentObject definition follows the protocol: SkyWalking data collect protocol#Tracing.proto.\n// The segment is a collection of spans. It includes all collected spans in a simple one request context, such as a HTTP request process. message SegmentObject { string traceId = 1; string traceSegmentId = 2; repeated SpanObject spans = 3; string service = 4; string serviceInstance = 5; bool isSizeLimited = 6;}To activate the exporter, you should set ${SW_EXPORTER_ENABLE_KAFKA_TRACE:true} and config the Kafka server.\nexporter:default:# Kafka exporterenableKafkaTrace:${SW_EXPORTER_ENABLE_KAFKA_TRACE:true}kafkaBootstrapServers:${SW_EXPORTER_KAFKA_SERVERS:localhost:9092}# Kafka producer config, JSON format as Properties.kafkaProducerConfig:${SW_EXPORTER_KAFKA_PRODUCER_CONFIG:\u0026#34;\u0026#34;}kafkaTopicTrace:${SW_EXPORTER_KAFKA_TOPIC_TRACE:skywalking-export-trace}...Log Kafka Exporter Log kafka exporter pushes messages to the Kafka Broker and Topic skywalking-export-log to export the log. Here is the message:\nProducerRecord\u0026lt;String, Bytes\u0026gt; Key: LogRecordId Value: Bytes of LogData The LogData definition follows the protocol: SkyWalking data collect protocol#Logging.proto.\nmessage LogData { int64 timestamp = 1; string service = 2; string serviceInstance = 3; string endpoint = 4; LogDataBody body = 5; TraceContext traceContext = 6; LogTags tags = 7; string layer = 8;}To activate the exporter, you should set ${SW_EXPORTER_ENABLE_KAFKA_LOG:true} and config the Kafka server.\nexporter:default:# Kafka exporterenableKafkaLog:${SW_EXPORTER_ENABLE_KAFKA_LOG:true}kafkaBootstrapServers:${SW_EXPORTER_KAFKA_SERVERS:localhost:9092}# Kafka producer config, JSON format as Properties.kafkaProducerConfig:${SW_EXPORTER_KAFKA_PRODUCER_CONFIG:\u0026#34;\u0026#34;}kafkaTopicLog:${SW_EXPORTER_KAFKA_TOPIC_LOG:skywalking-export-log}...","excerpt":"Exporter SkyWalking provides the essential functions of observability, including metrics …","ref":"/docs/main/v9.4.0/en/setup/backend/exporter/","title":"Exporter"},{"body":"Exporter SkyWalking provides the essential functions of observability, including metrics aggregation, trace, log, alerting, and profiling. In many real-world scenarios, users may want to forward their data to a 3rd party system for further in-depth analysis. Exporter has made that possible.\nThe exporter is an independent module that has to be manually activated.\nRight now, we provide the following exporting channels:\n gRPC Exporter   Metrics  Kafka Exporter   Trace Log  gRPC Exporter Metrics gRPC Exporter Metrics gRPC exporter uses SkyWalking\u0026rsquo;s native export service definition. Here is the proto definition: metric-exporter.proto.\nservice MetricExportService { rpc export (stream ExportMetricValue) returns (ExportResponse) { } rpc subscription (SubscriptionReq) returns (SubscriptionsResp) { }}To activate the exporter, you should set ${SW_EXPORTER_ENABLE_GRPC_METRICS:true} and config the target gRPC server address.\nexporter:default:# gRPC exporterenableGRPCMetrics:${SW_EXPORTER_ENABLE_GRPC_METRICS:true}gRPCTargetHost:${SW_EXPORTER_GRPC_HOST:127.0.0.1}gRPCTargetPort:${SW_EXPORTER_GRPC_PORT:9870}... gRPCTargetHost:gRPCTargetPort is the expected target service address. You could set any gRPC server to receive the data. Target gRPC service needs to go on standby; otherwise, the OAP startup may fail.  Target exporter service   Subscription implementation. Return the expected metrics name list with event type (incremental or total). All names must match the OAL/MAL script definition. Return empty list, if you want to export all metrics in the incremental event type.\n  Export implementation. Stream service. All subscribed metrics will be sent here based on the OAP core schedule. Also, if the OAP is deployed as a cluster, this method will be called concurrently. For metrics value, you need to follow #type to choose #longValue or #doubleValue.\n  Kafka Exporter Trace Kafka Exporter Trace kafka exporter pushes messages to the Kafka Broker and Topic skywalking-export-trace to export the trace. Here is the message:\nProducerRecord\u0026lt;String, Bytes\u0026gt; Key: TraceSegmentId Value: Bytes of SegmentObject The SegmentObject definition follows the protocol: SkyWalking data collect protocol#Tracing.proto.\n// The segment is a collection of spans. It includes all collected spans in a simple one request context, such as a HTTP request process. message SegmentObject { string traceId = 1; string traceSegmentId = 2; repeated SpanObject spans = 3; string service = 4; string serviceInstance = 5; bool isSizeLimited = 6;}To activate the exporter, you should set ${SW_EXPORTER_ENABLE_KAFKA_TRACE:true} and config the Kafka server.\nexporter:default:# Kafka exporterenableKafkaTrace:${SW_EXPORTER_ENABLE_KAFKA_TRACE:true}kafkaBootstrapServers:${SW_EXPORTER_KAFKA_SERVERS:localhost:9092}# Kafka producer config, JSON format as Properties.kafkaProducerConfig:${SW_EXPORTER_KAFKA_PRODUCER_CONFIG:\u0026#34;\u0026#34;}kafkaTopicTrace:${SW_EXPORTER_KAFKA_TOPIC_TRACE:skywalking-export-trace}exportErrorStatusTraceOnly:${SW_EXPORTER_KAFKA_TRACE_FILTER_ERROR:false}... exportErrorStatusTraceOnly=true represents that only export the error status trace segments through the Kafka channel.  Log Kafka Exporter Log kafka exporter pushes messages to the Kafka Broker and Topic skywalking-export-log to export the log. Here is the message:\nProducerRecord\u0026lt;String, Bytes\u0026gt; Key: LogRecordId Value: Bytes of LogData The LogData definition follows the protocol: SkyWalking data collect protocol#Logging.proto.\nmessage LogData { int64 timestamp = 1; string service = 2; string serviceInstance = 3; string endpoint = 4; LogDataBody body = 5; TraceContext traceContext = 6; LogTags tags = 7; string layer = 8;}To activate the exporter, you should set ${SW_EXPORTER_ENABLE_KAFKA_LOG:true} and config the Kafka server.\nexporter:default:# Kafka exporterenableKafkaLog:${SW_EXPORTER_ENABLE_KAFKA_LOG:true}kafkaBootstrapServers:${SW_EXPORTER_KAFKA_SERVERS:localhost:9092}# Kafka producer config, JSON format as Properties.kafkaProducerConfig:${SW_EXPORTER_KAFKA_PRODUCER_CONFIG:\u0026#34;\u0026#34;}kafkaTopicLog:${SW_EXPORTER_KAFKA_TOPIC_LOG:skywalking-export-log}...","excerpt":"Exporter SkyWalking provides the essential functions of observability, including metrics …","ref":"/docs/main/v9.5.0/en/setup/backend/exporter/","title":"Exporter"},{"body":"Exporter SkyWalking provides the essential functions of observability, including metrics aggregation, trace, log, alerting, and profiling. In many real-world scenarios, users may want to forward their data to a 3rd party system for further in-depth analysis. Exporter has made that possible.\nThe exporter is an independent module that has to be manually activated.\nRight now, we provide the following exporting channels:\n gRPC Exporter   Metrics  Kafka Exporter   Trace Log  gRPC Exporter Metrics gRPC Exporter Metrics gRPC exporter uses SkyWalking\u0026rsquo;s native export service definition. Here is the proto definition: metric-exporter.proto.\nservice MetricExportService { rpc export (stream ExportMetricValue) returns (ExportResponse) { } rpc subscription (SubscriptionReq) returns (SubscriptionsResp) { }}To activate the exporter, you should set ${SW_EXPORTER_ENABLE_GRPC_METRICS:true} and config the target gRPC server address.\nexporter:default:# gRPC exporterenableGRPCMetrics:${SW_EXPORTER_ENABLE_GRPC_METRICS:true}gRPCTargetHost:${SW_EXPORTER_GRPC_HOST:127.0.0.1}gRPCTargetPort:${SW_EXPORTER_GRPC_PORT:9870}... gRPCTargetHost:gRPCTargetPort is the expected target service address. You could set any gRPC server to receive the data. Target gRPC service needs to go on standby; otherwise, the OAP startup may fail.  Target exporter service   Subscription implementation. Return the expected metrics name list with event type (incremental or total). All names must match the OAL/MAL script definition. Return empty list, if you want to export all metrics in the incremental event type.\n  Export implementation. Stream service. All subscribed metrics will be sent here based on the OAP core schedule. Also, if the OAP is deployed as a cluster, this method will be called concurrently. For metrics value, you need to follow #type to choose #longValue or #doubleValue.\n  Kafka Exporter Trace Kafka Exporter Trace kafka exporter pushes messages to the Kafka Broker and Topic skywalking-export-trace to export the trace. Here is the message:\nProducerRecord\u0026lt;String, Bytes\u0026gt; Key: TraceSegmentId Value: Bytes of SegmentObject The SegmentObject definition follows the protocol: SkyWalking data collect protocol#Tracing.proto.\n// The segment is a collection of spans. It includes all collected spans in a simple one request context, such as a HTTP request process. message SegmentObject { string traceId = 1; string traceSegmentId = 2; repeated SpanObject spans = 3; string service = 4; string serviceInstance = 5; bool isSizeLimited = 6;}To activate the exporter, you should set ${SW_EXPORTER_ENABLE_KAFKA_TRACE:true} and config the Kafka server.\nexporter:default:# Kafka exporterenableKafkaTrace:${SW_EXPORTER_ENABLE_KAFKA_TRACE:true}kafkaBootstrapServers:${SW_EXPORTER_KAFKA_SERVERS:localhost:9092}# Kafka producer config, JSON format as Properties.kafkaProducerConfig:${SW_EXPORTER_KAFKA_PRODUCER_CONFIG:\u0026#34;\u0026#34;}kafkaTopicTrace:${SW_EXPORTER_KAFKA_TOPIC_TRACE:skywalking-export-trace}exportErrorStatusTraceOnly:${SW_EXPORTER_KAFKA_TRACE_FILTER_ERROR:false}... exportErrorStatusTraceOnly=true represents that only export the error status trace segments through the Kafka channel.  Log Kafka Exporter Log kafka exporter pushes messages to the Kafka Broker and Topic skywalking-export-log to export the log. Here is the message:\nProducerRecord\u0026lt;String, Bytes\u0026gt; Key: LogRecordId Value: Bytes of LogData The LogData definition follows the protocol: SkyWalking data collect protocol#Logging.proto.\nmessage LogData { int64 timestamp = 1; string service = 2; string serviceInstance = 3; string endpoint = 4; LogDataBody body = 5; TraceContext traceContext = 6; LogTags tags = 7; string layer = 8;}To activate the exporter, you should set ${SW_EXPORTER_ENABLE_KAFKA_LOG:true} and config the Kafka server.\nexporter:default:# Kafka exporterenableKafkaLog:${SW_EXPORTER_ENABLE_KAFKA_LOG:true}kafkaBootstrapServers:${SW_EXPORTER_KAFKA_SERVERS:localhost:9092}# Kafka producer config, JSON format as Properties.kafkaProducerConfig:${SW_EXPORTER_KAFKA_PRODUCER_CONFIG:\u0026#34;\u0026#34;}kafkaTopicLog:${SW_EXPORTER_KAFKA_TOPIC_LOG:skywalking-export-log}...","excerpt":"Exporter SkyWalking provides the essential functions of observability, including metrics …","ref":"/docs/main/v9.6.0/en/setup/backend/exporter/","title":"Exporter"},{"body":"Exporter SkyWalking provides the essential functions of observability, including metrics aggregation, trace, log, alerting, and profiling. In many real-world scenarios, users may want to forward their data to a 3rd party system for further in-depth analysis. Exporter has made that possible.\nThe exporter is an independent module that has to be manually activated.\nRight now, we provide the following exporting channels:\n gRPC Exporter   Metrics  Kafka Exporter   Trace Log  gRPC Exporter Metrics gRPC Exporter Metrics gRPC exporter uses SkyWalking\u0026rsquo;s native export service definition. Here is the proto definition: metric-exporter.proto.\nservice MetricExportService { rpc export (stream ExportMetricValue) returns (ExportResponse) { } rpc subscription (SubscriptionReq) returns (SubscriptionsResp) { }}To activate the exporter, you should set ${SW_EXPORTER_ENABLE_GRPC_METRICS:true} and config the target gRPC server address.\nexporter:default:# gRPC exporterenableGRPCMetrics:${SW_EXPORTER_ENABLE_GRPC_METRICS:true}gRPCTargetHost:${SW_EXPORTER_GRPC_HOST:127.0.0.1}gRPCTargetPort:${SW_EXPORTER_GRPC_PORT:9870}... gRPCTargetHost:gRPCTargetPort is the expected target service address. You could set any gRPC server to receive the data. Target gRPC service needs to go on standby; otherwise, the OAP startup may fail.  Target exporter service   Subscription implementation. Return the expected metrics name list with event type (incremental or total). All names must match the OAL/MAL script definition. Return empty list, if you want to export all metrics in the incremental event type.\n  Export implementation. Stream service. All subscribed metrics will be sent here based on the OAP core schedule. Also, if the OAP is deployed as a cluster, this method will be called concurrently. For metrics value, you need to follow #type to choose #longValue or #doubleValue.\n  Kafka Exporter Trace Kafka Exporter Trace kafka exporter pushes messages to the Kafka Broker and Topic skywalking-export-trace to export the trace. Here is the message:\nProducerRecord\u0026lt;String, Bytes\u0026gt; Key: TraceSegmentId Value: Bytes of SegmentObject The SegmentObject definition follows the protocol: SkyWalking data collect protocol#Tracing.proto.\n// The segment is a collection of spans. It includes all collected spans in a simple one request context, such as a HTTP request process. message SegmentObject { string traceId = 1; string traceSegmentId = 2; repeated SpanObject spans = 3; string service = 4; string serviceInstance = 5; bool isSizeLimited = 6;}To activate the exporter, you should set ${SW_EXPORTER_ENABLE_KAFKA_TRACE:true} and config the Kafka server.\nexporter:default:# Kafka exporterenableKafkaTrace:${SW_EXPORTER_ENABLE_KAFKA_TRACE:true}kafkaBootstrapServers:${SW_EXPORTER_KAFKA_SERVERS:localhost:9092}# Kafka producer config, JSON format as Properties.kafkaProducerConfig:${SW_EXPORTER_KAFKA_PRODUCER_CONFIG:\u0026#34;\u0026#34;}kafkaTopicTrace:${SW_EXPORTER_KAFKA_TOPIC_TRACE:skywalking-export-trace}exportErrorStatusTraceOnly:${SW_EXPORTER_KAFKA_TRACE_FILTER_ERROR:false}... exportErrorStatusTraceOnly=true represents that only export the error status trace segments through the Kafka channel.  Log Kafka Exporter Log kafka exporter pushes messages to the Kafka Broker and Topic skywalking-export-log to export the log. Here is the message:\nProducerRecord\u0026lt;String, Bytes\u0026gt; Key: LogRecordId Value: Bytes of LogData The LogData definition follows the protocol: SkyWalking data collect protocol#Logging.proto.\nmessage LogData { int64 timestamp = 1; string service = 2; string serviceInstance = 3; string endpoint = 4; LogDataBody body = 5; TraceContext traceContext = 6; LogTags tags = 7; string layer = 8;}To activate the exporter, you should set ${SW_EXPORTER_ENABLE_KAFKA_LOG:true} and config the Kafka server.\nexporter:default:# Kafka exporterenableKafkaLog:${SW_EXPORTER_ENABLE_KAFKA_LOG:true}kafkaBootstrapServers:${SW_EXPORTER_KAFKA_SERVERS:localhost:9092}# Kafka producer config, JSON format as Properties.kafkaProducerConfig:${SW_EXPORTER_KAFKA_PRODUCER_CONFIG:\u0026#34;\u0026#34;}kafkaTopicLog:${SW_EXPORTER_KAFKA_TOPIC_LOG:skywalking-export-log}...","excerpt":"Exporter SkyWalking provides the essential functions of observability, including metrics …","ref":"/docs/main/v9.7.0/en/setup/backend/exporter/","title":"Exporter"},{"body":"Exporter tool for profile raw data When visualization doesn\u0026rsquo;t work well on the official UI, users may submit issue reports. This tool helps users package the original profile data to assist the community in locating the issues in the users' cases. NOTE: This report includes the class name, method name, line number, etc. Before making your submission, please make sure that the security of your system wouldn\u0026rsquo;t be compromised.\nExport using command line  Set the storage in the tools/profile-exporter/application.yml file based on your use case. Prepare the data  Profile task ID: Profile task ID Trace ID: Trace ID of the profile error Export dir: Directory exported by the data   Enter the Skywalking root path Execute shell command bash tools/profile-exporter/profile_exporter.sh --taskid={profileTaskId} --traceid={traceId} {exportDir}  The file {traceId}.tar.gz will be generated after executing shell.  Exported data content  basic.yml: Contains the complete information of the profiled segments in the trace. snapshot.data: All monitored thread snapshot data in the current segment.  Report profile issues  Provide exported data generated from this tool. Provide the operation name and the mode of analysis (including/excluding child span) for the span. Issue description. (It would be great if you could provide UI screenshots.)  ","excerpt":"Exporter tool for profile raw data When visualization doesn\u0026rsquo;t work well on the official UI, …","ref":"/docs/main/latest/en/guides/backend-profile-export/","title":"Exporter tool for profile raw data"},{"body":"Exporter tool for profile raw data When visualization doesn\u0026rsquo;t work well on the official UI, users may submit issue reports. This tool helps users package the original profile data to assist the community in locating the issues in the users' cases. NOTE: This report includes the class name, method name, line number, etc. Before making your submission, please make sure that the security of your system wouldn\u0026rsquo;t be compromised.\nExport using command line  Set the storage in the tools/profile-exporter/application.yml file based on your use case. Prepare the data  Profile task ID: Profile task ID Trace ID: Trace ID of the profile error Export dir: Directory exported by the data   Enter the Skywalking root path Execute shell command bash tools/profile-exporter/profile_exporter.sh --taskid={profileTaskId} --traceid={traceId} {exportDir}  The file {traceId}.tar.gz will be generated after executing shell.  Exported data content  basic.yml: Contains the complete information of the profiled segments in the trace. snapshot.data: All monitored thread snapshot data in the current segment.  Report profile issues  Provide exported data generated from this tool. Provide the operation name and the mode of analysis (including/excluding child span) for the span. Issue description. (It would be great if you could provide UI screenshots.)  ","excerpt":"Exporter tool for profile raw data When visualization doesn\u0026rsquo;t work well on the official UI, …","ref":"/docs/main/next/en/guides/backend-profile-export/","title":"Exporter tool for profile raw data"},{"body":"Exporter tool for profile raw data When visualization doesn\u0026rsquo;t work well on the official UI, users may submit issue reports. This tool helps users package the original profile data to assist the community in locating the issues in the users' cases. NOTE: This report includes the class name, method name, line number, etc. Before making your submission, please make sure that the security of your system wouldn\u0026rsquo;t be compromised.\nExport using command line  Set the storage in the tools/profile-exporter/application.yml file based on your use case. Prepare the data  Profile task ID: Profile task ID Trace ID: Trace ID of the profile error Export dir: Directory exported by the data   Enter the Skywalking root path Execute shell command bash tools/profile-exporter/profile_exporter.sh --taskid={profileTaskId} --traceid={traceId} {exportDir}  The file {traceId}.tar.gz will be generated after executing shell.  Exported data content  basic.yml: Contains the complete information of the profiled segments in the trace. snapshot.data: All monitored thread snapshot data in the current segment.  Report profile issues  Provide exported data generated from this tool. Provide the operation name and the mode of analysis (including/excluding child span) for the span. Issue description. (It would be great if you could provide UI screenshots.)  ","excerpt":"Exporter tool for profile raw data When visualization doesn\u0026rsquo;t work well on the official UI, …","ref":"/docs/main/v9.0.0/en/guides/backend-profile-export/","title":"Exporter tool for profile raw data"},{"body":"Exporter tool for profile raw data When visualization doesn\u0026rsquo;t work well on the official UI, users may submit issue reports. This tool helps users package the original profile data to assist the community in locating the issues in the users' cases. NOTE: This report includes the class name, method name, line number, etc. Before making your submission, please make sure that the security of your system wouldn\u0026rsquo;t be compromised.\nExport using command line  Set the storage in the tools/profile-exporter/application.yml file based on your use case. Prepare the data  Profile task ID: Profile task ID Trace ID: Trace ID of the profile error Export dir: Directory exported by the data   Enter the Skywalking root path Execute shell command bash tools/profile-exporter/profile_exporter.sh --taskid={profileTaskId} --traceid={traceId} {exportDir}  The file {traceId}.tar.gz will be generated after executing shell.  Exported data content  basic.yml: Contains the complete information of the profiled segments in the trace. snapshot.data: All monitored thread snapshot data in the current segment.  Report profile issues  Provide exported data generated from this tool. Provide the operation name and the mode of analysis (including/excluding child span) for the span. Issue description. (It would be great if you could provide UI screenshots.)  ","excerpt":"Exporter tool for profile raw data When visualization doesn\u0026rsquo;t work well on the official UI, …","ref":"/docs/main/v9.1.0/en/guides/backend-profile-export/","title":"Exporter tool for profile raw data"},{"body":"Exporter tool for profile raw data When visualization doesn\u0026rsquo;t work well on the official UI, users may submit issue reports. This tool helps users package the original profile data to assist the community in locating the issues in the users' cases. NOTE: This report includes the class name, method name, line number, etc. Before making your submission, please make sure that the security of your system wouldn\u0026rsquo;t be compromised.\nExport using command line  Set the storage in the tools/profile-exporter/application.yml file based on your use case. Prepare the data  Profile task ID: Profile task ID Trace ID: Trace ID of the profile error Export dir: Directory exported by the data   Enter the Skywalking root path Execute shell command bash tools/profile-exporter/profile_exporter.sh --taskid={profileTaskId} --traceid={traceId} {exportDir}  The file {traceId}.tar.gz will be generated after executing shell.  Exported data content  basic.yml: Contains the complete information of the profiled segments in the trace. snapshot.data: All monitored thread snapshot data in the current segment.  Report profile issues  Provide exported data generated from this tool. Provide the operation name and the mode of analysis (including/excluding child span) for the span. Issue description. (It would be great if you could provide UI screenshots.)  ","excerpt":"Exporter tool for profile raw data When visualization doesn\u0026rsquo;t work well on the official UI, …","ref":"/docs/main/v9.2.0/en/guides/backend-profile-export/","title":"Exporter tool for profile raw data"},{"body":"Exporter tool for profile raw data When visualization doesn\u0026rsquo;t work well on the official UI, users may submit issue reports. This tool helps users package the original profile data to assist the community in locating the issues in the users' cases. NOTE: This report includes the class name, method name, line number, etc. Before making your submission, please make sure that the security of your system wouldn\u0026rsquo;t be compromised.\nExport using command line  Set the storage in the tools/profile-exporter/application.yml file based on your use case. Prepare the data  Profile task ID: Profile task ID Trace ID: Trace ID of the profile error Export dir: Directory exported by the data   Enter the Skywalking root path Execute shell command bash tools/profile-exporter/profile_exporter.sh --taskid={profileTaskId} --traceid={traceId} {exportDir}  The file {traceId}.tar.gz will be generated after executing shell.  Exported data content  basic.yml: Contains the complete information of the profiled segments in the trace. snapshot.data: All monitored thread snapshot data in the current segment.  Report profile issues  Provide exported data generated from this tool. Provide the operation name and the mode of analysis (including/excluding child span) for the span. Issue description. (It would be great if you could provide UI screenshots.)  ","excerpt":"Exporter tool for profile raw data When visualization doesn\u0026rsquo;t work well on the official UI, …","ref":"/docs/main/v9.3.0/en/guides/backend-profile-export/","title":"Exporter tool for profile raw data"},{"body":"Exporter tool for profile raw data When visualization doesn\u0026rsquo;t work well on the official UI, users may submit issue reports. This tool helps users package the original profile data to assist the community in locating the issues in the users' cases. NOTE: This report includes the class name, method name, line number, etc. Before making your submission, please make sure that the security of your system wouldn\u0026rsquo;t be compromised.\nExport using command line  Set the storage in the tools/profile-exporter/application.yml file based on your use case. Prepare the data  Profile task ID: Profile task ID Trace ID: Trace ID of the profile error Export dir: Directory exported by the data   Enter the Skywalking root path Execute shell command bash tools/profile-exporter/profile_exporter.sh --taskid={profileTaskId} --traceid={traceId} {exportDir}  The file {traceId}.tar.gz will be generated after executing shell.  Exported data content  basic.yml: Contains the complete information of the profiled segments in the trace. snapshot.data: All monitored thread snapshot data in the current segment.  Report profile issues  Provide exported data generated from this tool. Provide the operation name and the mode of analysis (including/excluding child span) for the span. Issue description. (It would be great if you could provide UI screenshots.)  ","excerpt":"Exporter tool for profile raw data When visualization doesn\u0026rsquo;t work well on the official UI, …","ref":"/docs/main/v9.4.0/en/guides/backend-profile-export/","title":"Exporter tool for profile raw data"},{"body":"Exporter tool for profile raw data When visualization doesn\u0026rsquo;t work well on the official UI, users may submit issue reports. This tool helps users package the original profile data to assist the community in locating the issues in the users' cases. NOTE: This report includes the class name, method name, line number, etc. Before making your submission, please make sure that the security of your system wouldn\u0026rsquo;t be compromised.\nExport using command line  Set the storage in the tools/profile-exporter/application.yml file based on your use case. Prepare the data  Profile task ID: Profile task ID Trace ID: Trace ID of the profile error Export dir: Directory exported by the data   Enter the Skywalking root path Execute shell command bash tools/profile-exporter/profile_exporter.sh --taskid={profileTaskId} --traceid={traceId} {exportDir}  The file {traceId}.tar.gz will be generated after executing shell.  Exported data content  basic.yml: Contains the complete information of the profiled segments in the trace. snapshot.data: All monitored thread snapshot data in the current segment.  Report profile issues  Provide exported data generated from this tool. Provide the operation name and the mode of analysis (including/excluding child span) for the span. Issue description. (It would be great if you could provide UI screenshots.)  ","excerpt":"Exporter tool for profile raw data When visualization doesn\u0026rsquo;t work well on the official UI, …","ref":"/docs/main/v9.5.0/en/guides/backend-profile-export/","title":"Exporter tool for profile raw data"},{"body":"Exporter tool for profile raw data When visualization doesn\u0026rsquo;t work well on the official UI, users may submit issue reports. This tool helps users package the original profile data to assist the community in locating the issues in the users' cases. NOTE: This report includes the class name, method name, line number, etc. Before making your submission, please make sure that the security of your system wouldn\u0026rsquo;t be compromised.\nExport using command line  Set the storage in the tools/profile-exporter/application.yml file based on your use case. Prepare the data  Profile task ID: Profile task ID Trace ID: Trace ID of the profile error Export dir: Directory exported by the data   Enter the Skywalking root path Execute shell command bash tools/profile-exporter/profile_exporter.sh --taskid={profileTaskId} --traceid={traceId} {exportDir}  The file {traceId}.tar.gz will be generated after executing shell.  Exported data content  basic.yml: Contains the complete information of the profiled segments in the trace. snapshot.data: All monitored thread snapshot data in the current segment.  Report profile issues  Provide exported data generated from this tool. Provide the operation name and the mode of analysis (including/excluding child span) for the span. Issue description. (It would be great if you could provide UI screenshots.)  ","excerpt":"Exporter tool for profile raw data When visualization doesn\u0026rsquo;t work well on the official UI, …","ref":"/docs/main/v9.6.0/en/guides/backend-profile-export/","title":"Exporter tool for profile raw data"},{"body":"Exporter tool for profile raw data When visualization doesn\u0026rsquo;t work well on the official UI, users may submit issue reports. This tool helps users package the original profile data to assist the community in locating the issues in the users' cases. NOTE: This report includes the class name, method name, line number, etc. Before making your submission, please make sure that the security of your system wouldn\u0026rsquo;t be compromised.\nExport using command line  Set the storage in the tools/profile-exporter/application.yml file based on your use case. Prepare the data  Profile task ID: Profile task ID Trace ID: Trace ID of the profile error Export dir: Directory exported by the data   Enter the Skywalking root path Execute shell command bash tools/profile-exporter/profile_exporter.sh --taskid={profileTaskId} --traceid={traceId} {exportDir}  The file {traceId}.tar.gz will be generated after executing shell.  Exported data content  basic.yml: Contains the complete information of the profiled segments in the trace. snapshot.data: All monitored thread snapshot data in the current segment.  Report profile issues  Provide exported data generated from this tool. Provide the operation name and the mode of analysis (including/excluding child span) for the span. Issue description. (It would be great if you could provide UI screenshots.)  ","excerpt":"Exporter tool for profile raw data When visualization doesn\u0026rsquo;t work well on the official UI, …","ref":"/docs/main/v9.7.0/en/guides/backend-profile-export/","title":"Exporter tool for profile raw data"},{"body":"Extend storage SkyWalking has already provided several storage solutions. In this document, you could learn how to easily implement a new storage.\nDefine your storage provider  Define class extension org.apache.skywalking.oap.server.library.module.ModuleProvider. Set this provider targeting to storage module.  @Override public Class\u0026lt;? extends ModuleDefine\u0026gt; module() { return StorageModule.class; } Implement all DAOs Here\u0026rsquo;s a list of all DAO interfaces in storage:\n  IServiceInventoryCacheDAO\n  IServiceInstanceInventoryCacheDAO\n  IEndpointInventoryCacheDAO\n  INetworkAddressInventoryCacheDAO\n  IBatchDAO\n  StorageDAO\n  IRegisterLockDAO\n  ITopologyQueryDAO\n  IMetricsQueryDAO\n  ITraceQueryDAO\n  IMetadataQueryDAO\n  IAggregationQueryDAO\n  IAlarmQueryDAO\n  IHistoryDeleteDAO\n  IMetricsDAO\n  IRecordDAO\n  IRegisterDAO\n  ILogQueryDAO\n  ITopNRecordsQueryDAO\n  IBrowserLogQueryDAO\n  IProfileTaskQueryDAO\n  IProfileTaskLogQueryDAO\n  IProfileThreadSnapshotQueryDAO\n  UITemplateManagementDAO\n  Register all service implementations In public void prepare(), use this#registerServiceImplementation method to register and bind with your implementation of the above interfaces.\nExample org.apache.skywalking.oap.server.storage.plugin.elasticsearch.StorageModuleElasticsearchProvider and org.apache.skywalking.oap.server.storage.plugin.jdbc.mysql.MySQLStorageProvider are good examples.\n","excerpt":"Extend storage SkyWalking has already provided several storage solutions. In this document, you …","ref":"/docs/main/v9.0.0/en/guides/storage-extention/","title":"Extend storage"},{"body":"Extend storage SkyWalking has already provided several storage solutions. In this document, you could learn how to easily implement a new storage.\nDefine your storage provider  Define class extension org.apache.skywalking.oap.server.library.module.ModuleProvider. Set this provider targeting to storage module.  @Override public Class\u0026lt;? extends ModuleDefine\u0026gt; module() { return StorageModule.class; } Implement all DAOs Here\u0026rsquo;s a list of all DAO interfaces in storage:\n  IServiceInventoryCacheDAO\n  IServiceInstanceInventoryCacheDAO\n  IEndpointInventoryCacheDAO\n  INetworkAddressInventoryCacheDAO\n  IBatchDAO\n  StorageDAO\n  IRegisterLockDAO\n  ITopologyQueryDAO\n  IMetricsQueryDAO\n  ITraceQueryDAO\n  IMetadataQueryDAO\n  IAggregationQueryDAO\n  IAlarmQueryDAO\n  IHistoryDeleteDAO\n  IMetricsDAO\n  IRecordDAO\n  IRegisterDAO\n  ILogQueryDAO\n  ITopNRecordsQueryDAO\n  IBrowserLogQueryDAO\n  IProfileTaskQueryDAO\n  IProfileTaskLogQueryDAO\n  IProfileThreadSnapshotQueryDAO\n  UITemplateManagementDAO\n  Register all service implementations In public void prepare(), use this#registerServiceImplementation method to register and bind with your implementation of the above interfaces.\nExample org.apache.skywalking.oap.server.storage.plugin.elasticsearch.StorageModuleElasticsearchProvider and org.apache.skywalking.oap.server.storage.plugin.jdbc.mysql.MySQLStorageProvider are good examples.\n","excerpt":"Extend storage SkyWalking has already provided several storage solutions. In this document, you …","ref":"/docs/main/v9.1.0/en/guides/storage-extention/","title":"Extend storage"},{"body":"Extend storage SkyWalking has already provided several storage solutions. In this document, you could learn how to easily implement a new storage.\nDefine your storage provider  Define class extension org.apache.skywalking.oap.server.library.module.ModuleProvider. Set this provider targeting to storage module.  @Override public Class\u0026lt;? extends ModuleDefine\u0026gt; module() { return StorageModule.class; } Implement all DAOs Here\u0026rsquo;s a list of all DAO interfaces in storage:\n  IServiceInventoryCacheDAO\n  IServiceInstanceInventoryCacheDAO\n  IEndpointInventoryCacheDAO\n  INetworkAddressInventoryCacheDAO\n  IBatchDAO\n  StorageDAO\n  IRegisterLockDAO\n  ITopologyQueryDAO\n  IMetricsQueryDAO\n  ITraceQueryDAO\n  IMetadataQueryDAO\n  IAggregationQueryDAO\n  IAlarmQueryDAO\n  IHistoryDeleteDAO\n  IMetricsDAO\n  IRecordDAO\n  IRegisterDAO\n  ILogQueryDAO\n  ITopNRecordsQueryDAO\n  IBrowserLogQueryDAO\n  IProfileTaskQueryDAO\n  IProfileTaskLogQueryDAO\n  IProfileThreadSnapshotQueryDAO\n  UITemplateManagementDAO\n  Register all service implementations In public void prepare(), use this#registerServiceImplementation method to register and bind with your implementation of the above interfaces.\nExample org.apache.skywalking.oap.server.storage.plugin.elasticsearch.StorageModuleElasticsearchProvider and org.apache.skywalking.oap.server.storage.plugin.jdbc.mysql.MySQLStorageProvider are good examples.\n","excerpt":"Extend storage SkyWalking has already provided several storage solutions. In this document, you …","ref":"/docs/main/v9.2.0/en/guides/storage-extention/","title":"Extend storage"},{"body":"Extend storage SkyWalking has already provided several storage solutions. In this document, you could learn how to easily implement a new storage.\nDefine your storage provider  Define class extension org.apache.skywalking.oap.server.library.module.ModuleProvider. Set this provider targeting to storage module.  @Override public Class\u0026lt;? extends ModuleDefine\u0026gt; module() { return StorageModule.class; } Implement all DAOs Here\u0026rsquo;s a list of all DAO interfaces in storage:\n  IServiceInventoryCacheDAO\n  IServiceInstanceInventoryCacheDAO\n  IEndpointInventoryCacheDAO\n  INetworkAddressInventoryCacheDAO\n  IBatchDAO\n  StorageDAO\n  IRegisterLockDAO\n  ITopologyQueryDAO\n  IMetricsQueryDAO\n  ITraceQueryDAO\n  IMetadataQueryDAO\n  IAggregationQueryDAO\n  IAlarmQueryDAO\n  IHistoryDeleteDAO\n  IMetricsDAO\n  IRecordDAO\n  IRegisterDAO\n  ILogQueryDAO\n  ITopNRecordsQueryDAO\n  IBrowserLogQueryDAO\n  IProfileTaskQueryDAO\n  IProfileTaskLogQueryDAO\n  IProfileThreadSnapshotQueryDAO\n  UITemplateManagementDAO\n  Register all service implementations In public void prepare(), use this#registerServiceImplementation method to register and bind with your implementation of the above interfaces.\nExample org.apache.skywalking.oap.server.storage.plugin.elasticsearch.StorageModuleElasticsearchProvider and org.apache.skywalking.oap.server.storage.plugin.jdbc.mysql.MySQLStorageProvider are good examples.\n","excerpt":"Extend storage SkyWalking has already provided several storage solutions. In this document, you …","ref":"/docs/main/v9.3.0/en/guides/storage-extention/","title":"Extend storage"},{"body":"Extend storage SkyWalking has already provided several storage solutions. In this document, you could learn how to easily implement a new storage.\nDefine your storage provider  Define class extension org.apache.skywalking.oap.server.library.module.ModuleProvider. Set this provider targeting to storage module.  @Override public Class\u0026lt;? extends ModuleDefine\u0026gt; module() { return StorageModule.class; } Implement all DAOs Here\u0026rsquo;s a list of all DAO interfaces in storage:\n  IServiceInventoryCacheDAO\n  IServiceInstanceInventoryCacheDAO\n  IEndpointInventoryCacheDAO\n  INetworkAddressInventoryCacheDAO\n  IBatchDAO\n  StorageDAO\n  IRegisterLockDAO\n  ITopologyQueryDAO\n  IMetricsQueryDAO\n  ITraceQueryDAO\n  IMetadataQueryDAO\n  IAggregationQueryDAO\n  IAlarmQueryDAO\n  IHistoryDeleteDAO\n  IMetricsDAO\n  IRecordDAO\n  IRegisterDAO\n  ILogQueryDAO\n  ITopNRecordsQueryDAO\n  IBrowserLogQueryDAO\n  IProfileTaskQueryDAO\n  IProfileTaskLogQueryDAO\n  IProfileThreadSnapshotQueryDAO\n  UITemplateManagementDAO\n  Register all service implementations In public void prepare(), use this#registerServiceImplementation method to register and bind with your implementation of the above interfaces.\nExample org.apache.skywalking.oap.server.storage.plugin.elasticsearch.StorageModuleElasticsearchProvider and org.apache.skywalking.oap.server.storage.plugin.jdbc.mysql.MySQLStorageProvider are good examples.\n","excerpt":"Extend storage SkyWalking has already provided several storage solutions. In this document, you …","ref":"/docs/main/v9.4.0/en/guides/storage-extention/","title":"Extend storage"},{"body":"Extend storage SkyWalking has already provided several storage solutions. In this document, you could learn how to easily implement a new storage.\nDefine your storage provider  Define class extension org.apache.skywalking.oap.server.library.module.ModuleProvider. Set this provider targeting to storage module.  @Override public Class\u0026lt;? extends ModuleDefine\u0026gt; module() { return StorageModule.class; } Implement all DAOs Here\u0026rsquo;s a list of all DAO interfaces in storage:\n  IServiceInventoryCacheDAO\n  IServiceInstanceInventoryCacheDAO\n  IEndpointInventoryCacheDAO\n  INetworkAddressInventoryCacheDAO\n  IBatchDAO\n  StorageDAO\n  IRegisterLockDAO\n  ITopologyQueryDAO\n  IMetricsQueryDAO\n  ITraceQueryDAO\n  IMetadataQueryDAO\n  IAggregationQueryDAO\n  IAlarmQueryDAO\n  IHistoryDeleteDAO\n  IMetricsDAO\n  IRecordDAO\n  IRegisterDAO\n  ILogQueryDAO\n  ITopNRecordsQueryDAO\n  IBrowserLogQueryDAO\n  IProfileTaskQueryDAO\n  IProfileTaskLogQueryDAO\n  IProfileThreadSnapshotQueryDAO\n  UITemplateManagementDAO\n  Register all service implementations In public void prepare(), use this#registerServiceImplementation method to register and bind with your implementation of the above interfaces.\nExample org.apache.skywalking.oap.server.storage.plugin.elasticsearch.StorageModuleElasticsearchProvider and org.apache.skywalking.oap.server.storage.plugin.jdbc.mysql.MySQLStorageProvider are good examples.\n","excerpt":"Extend storage SkyWalking has already provided several storage solutions. In this document, you …","ref":"/docs/main/v9.5.0/en/guides/storage-extention/","title":"Extend storage"},{"body":"Fallbacker/none-fallbacker Description The fallbacker would do nothing when facing failure data.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Fallbacker/none-fallbacker Description The fallbacker would do nothing when facing failure data. …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/fallbacker_none-fallbacker/","title":"Fallbacker/none-fallbacker"},{"body":"Fallbacker/none-fallbacker Description The fallbacker would do nothing when facing failure data.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Fallbacker/none-fallbacker Description The fallbacker would do nothing when facing failure data. …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/fallbacker_none-fallbacker/","title":"Fallbacker/none-fallbacker"},{"body":"Fallbacker/none-fallbacker Description The fallbacker would do nothing when facing failure data.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Fallbacker/none-fallbacker Description The fallbacker would do nothing when facing failure data. …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/fallbacker_none-fallbacker/","title":"Fallbacker/none-fallbacker"},{"body":"Fallbacker/timer-fallbacker Description This is a timer fallback trigger to process the forward failure data.\nDefaultConfig # The forwarder max attempt times.max_attempts:3# The exponential_backoff is the standard retry duration, and the time for each retry is expanded# by 2 times until the number of retries reaches the maximum.(Time unit is millisecond.)exponential_backoff:2000# The max backoff time used in retrying, which would override the latency time when the latency time# with exponential increasing larger than it.(Time unit is millisecond.)max_backoff:5000Configuration    Name Type Description     max_attempts int    exponential_backoff int    max_backoff int     ","excerpt":"Fallbacker/timer-fallbacker Description This is a timer fallback trigger to process the forward …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/fallbacker_timer-fallbacker/","title":"Fallbacker/timer-fallbacker"},{"body":"Fallbacker/timer-fallbacker Description This is a timer fallback trigger to process the forward failure data.\nDefaultConfig # The forwarder max attempt times.max_attempts:3# The exponential_backoff is the standard retry duration, and the time for each retry is expanded# by 2 times until the number of retries reaches the maximum.(Time unit is millisecond.)exponential_backoff:2000# The max backoff time used in retrying, which would override the latency time when the latency time# with exponential increasing larger than it.(Time unit is millisecond.)max_backoff:5000Configuration    Name Type Description     max_attempts int    exponential_backoff int    max_backoff int     ","excerpt":"Fallbacker/timer-fallbacker Description This is a timer fallback trigger to process the forward …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/fallbacker_timer-fallbacker/","title":"Fallbacker/timer-fallbacker"},{"body":"Fallbacker/timer-fallbacker Description This is a timer fallback trigger to process the forward failure data.\nDefaultConfig # The forwarder max attempt times.max_attempts:3# The exponential_backoff is the standard retry duration, and the time for each retry is expanded# by 2 times until the number of retries reaches the maximum.(Time unit is millisecond.)exponential_backoff:2000# The max backoff time used in retrying, which would override the latency time when the latency time# with exponential increasing larger than it.(Time unit is millisecond.)max_backoff:5000Configuration    Name Type Description     max_attempts int    exponential_backoff int    max_backoff int     ","excerpt":"Fallbacker/timer-fallbacker Description This is a timer fallback trigger to process the forward …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/fallbacker_timer-fallbacker/","title":"Fallbacker/timer-fallbacker"},{"body":"FAQs These are known and frequently asked questions about SkyWalking. We welcome you to contribute here.\nDesign  Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture?  Compiling  Protoc plugin fails in maven build Required items could not be found when importing project into Eclipse Maven compilation failure with error such as python2 not found Compiling issues on Mac\u0026rsquo;s M1 chip  Runtime  New ElasticSearch storage option explanation in 9.2.0 Version 9.x+ upgrade Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Version 8.x+ upgrade Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? Version 6.x upgrade Why are there only traces in UI? Tracing doesn\u0026rsquo;t work on the Kafka consumer end Agent or collector version upgrade, 3.x -\u0026gt; 5.0.0-alpha EnhanceRequireObjectCache class cast exception ElasticSearch server performance issues, including ERROR CODE:429 IllegalStateException when installing Java agent on WebSphere 7 \u0026ldquo;FORBIDDEN/12/index read-only / allow delete (api)\u0026rdquo; appears in the log No data shown and backend replies with \u0026ldquo;Variable \u0026lsquo;serviceId\u0026rsquo; has coerced Null value for NonNull type \u0026lsquo;ID!'\u0026quot; Unexpected endpoint register warning after 6.6.0 Use the profile exporter tool if the profile analysis is not right Compatibility with other javaagent bytecode processes Java agent memory leak when enhancing Worker thread at Thread Pool Thrift plugin  UI  What is VNode? And why does SkyWalking have that?  ","excerpt":"FAQs These are known and frequently asked questions about SkyWalking. We welcome you to contribute …","ref":"/docs/main/latest/en/faq/readme/","title":"FAQs"},{"body":"FAQs These are known and frequently asked questions about SkyWalking. We welcome you to contribute here.\nDesign  Why does SkyWalking use RPC(gRPC and RESTful) rather than MQ as transport layer by default? Why is Clickhouse or Loki or xxx not supported as a storage option?  Compiling  Protoc plugin fails in maven build Required items could not be found when importing project into Eclipse Maven compilation failure with error such as python2 not found Compiling issues on Mac\u0026rsquo;s M1 chip  Runtime  New ElasticSearch storage option explanation in 9.2.0 Version 9.x+ upgrade Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Version 8.x+ upgrade Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? Version 6.x upgrade Why are there only traces in UI? Tracing doesn\u0026rsquo;t work on the Kafka consumer end Agent or collector version upgrade, 3.x -\u0026gt; 5.0.0-alpha EnhanceRequireObjectCache class cast exception ElasticSearch server performance issues, including ERROR CODE:429 IllegalStateException when installing Java agent on WebSphere 7 \u0026ldquo;FORBIDDEN/12/index read-only / allow delete (api)\u0026rdquo; appears in the log No data shown and backend replies with \u0026ldquo;Variable \u0026lsquo;serviceId\u0026rsquo; has coerced Null value for NonNull type \u0026lsquo;ID!'\u0026quot; Unexpected endpoint register warning after 6.6.0 Use the profile exporter tool if the profile analysis is not right Compatibility with other javaagent bytecode processes Java agent memory leak when enhancing Worker thread at Thread Pool Thrift plugin  UI  What is VNode? And why does SkyWalking have that?  ","excerpt":"FAQs These are known and frequently asked questions about SkyWalking. We welcome you to contribute …","ref":"/docs/main/next/en/faq/readme/","title":"FAQs"},{"body":"FAQs These are known and frequently asked questions about SkyWalking. We welcome you to contribute here.\nDesign  Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture?  Compiling  Protoc plugin fails in maven build Required items could not be found when importing project into Eclipse Maven compilation failure with error such as python2 not found Compiling issues on Mac\u0026rsquo;s M1 chip  Runtime  Version 9.x+ upgrade Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Version 8.x+ upgrade Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? Version 6.x upgrade Why are there only traces in UI? Tracing doesn\u0026rsquo;t work on the Kafka consumer end Agent or collector version upgrade, 3.x -\u0026gt; 5.0.0-alpha EnhanceRequireObjectCache class cast exception ElasticSearch server performance issues, including ERROR CODE:429 IllegalStateException when installing Java agent on WebSphere 7 \u0026ldquo;FORBIDDEN/12/index read-only / allow delete (api)\u0026rdquo; appears in the log No data shown and backend replies with \u0026ldquo;Variable \u0026lsquo;serviceId\u0026rsquo; has coerced Null value for NonNull type \u0026lsquo;ID!'\u0026quot; Unexpected endpoint register warning after 6.6.0 Use the profile exporter tool if the profile analysis is not right Compatibility with other javaagent bytecode processes Java agent memory leak when enhancing Worker thread at Thread Pool Thrift plugin  UI  What is VNode? And why does SkyWalking have that?  ","excerpt":"FAQs These are known and frequently asked questions about SkyWalking. We welcome you to contribute …","ref":"/docs/main/v9.0.0/en/faq/readme/","title":"FAQs"},{"body":"FAQs These are known and frequently asked questions about SkyWalking. We welcome you to contribute here.\nDesign  Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture?  Compiling  Protoc plugin fails in maven build Required items could not be found when importing project into Eclipse Maven compilation failure with error such as python2 not found Compiling issues on Mac\u0026rsquo;s M1 chip  Runtime  Version 9.x+ upgrade Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Version 8.x+ upgrade Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? Version 6.x upgrade Why are there only traces in UI? Tracing doesn\u0026rsquo;t work on the Kafka consumer end Agent or collector version upgrade, 3.x -\u0026gt; 5.0.0-alpha EnhanceRequireObjectCache class cast exception ElasticSearch server performance issues, including ERROR CODE:429 IllegalStateException when installing Java agent on WebSphere 7 \u0026ldquo;FORBIDDEN/12/index read-only / allow delete (api)\u0026rdquo; appears in the log No data shown and backend replies with \u0026ldquo;Variable \u0026lsquo;serviceId\u0026rsquo; has coerced Null value for NonNull type \u0026lsquo;ID!'\u0026quot; Unexpected endpoint register warning after 6.6.0 Use the profile exporter tool if the profile analysis is not right Compatibility with other javaagent bytecode processes Java agent memory leak when enhancing Worker thread at Thread Pool Thrift plugin  UI  What is VNode? And why does SkyWalking have that?  ","excerpt":"FAQs These are known and frequently asked questions about SkyWalking. We welcome you to contribute …","ref":"/docs/main/v9.1.0/en/faq/readme/","title":"FAQs"},{"body":"FAQs These are known and frequently asked questions about SkyWalking. We welcome you to contribute here.\nDesign  Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture?  Compiling  Protoc plugin fails in maven build Required items could not be found when importing project into Eclipse Maven compilation failure with error such as python2 not found Compiling issues on Mac\u0026rsquo;s M1 chip  Runtime  New ElasticSearch storage option explanation in 9.2.0 Version 9.x+ upgrade Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Version 8.x+ upgrade Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? Version 6.x upgrade Why are there only traces in UI? Tracing doesn\u0026rsquo;t work on the Kafka consumer end Agent or collector version upgrade, 3.x -\u0026gt; 5.0.0-alpha EnhanceRequireObjectCache class cast exception ElasticSearch server performance issues, including ERROR CODE:429 IllegalStateException when installing Java agent on WebSphere 7 \u0026ldquo;FORBIDDEN/12/index read-only / allow delete (api)\u0026rdquo; appears in the log No data shown and backend replies with \u0026ldquo;Variable \u0026lsquo;serviceId\u0026rsquo; has coerced Null value for NonNull type \u0026lsquo;ID!'\u0026quot; Unexpected endpoint register warning after 6.6.0 Use the profile exporter tool if the profile analysis is not right Compatibility with other javaagent bytecode processes Java agent memory leak when enhancing Worker thread at Thread Pool Thrift plugin  UI  What is VNode? And why does SkyWalking have that?  ","excerpt":"FAQs These are known and frequently asked questions about SkyWalking. We welcome you to contribute …","ref":"/docs/main/v9.2.0/en/faq/readme/","title":"FAQs"},{"body":"FAQs These are known and frequently asked questions about SkyWalking. We welcome you to contribute here.\nDesign  Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture?  Compiling  Protoc plugin fails in maven build Required items could not be found when importing project into Eclipse Maven compilation failure with error such as python2 not found Compiling issues on Mac\u0026rsquo;s M1 chip  Runtime  New ElasticSearch storage option explanation in 9.2.0 Version 9.x+ upgrade Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Version 8.x+ upgrade Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? Version 6.x upgrade Why are there only traces in UI? Tracing doesn\u0026rsquo;t work on the Kafka consumer end Agent or collector version upgrade, 3.x -\u0026gt; 5.0.0-alpha EnhanceRequireObjectCache class cast exception ElasticSearch server performance issues, including ERROR CODE:429 IllegalStateException when installing Java agent on WebSphere 7 \u0026ldquo;FORBIDDEN/12/index read-only / allow delete (api)\u0026rdquo; appears in the log No data shown and backend replies with \u0026ldquo;Variable \u0026lsquo;serviceId\u0026rsquo; has coerced Null value for NonNull type \u0026lsquo;ID!'\u0026quot; Unexpected endpoint register warning after 6.6.0 Use the profile exporter tool if the profile analysis is not right Compatibility with other javaagent bytecode processes Java agent memory leak when enhancing Worker thread at Thread Pool Thrift plugin  UI  What is VNode? And why does SkyWalking have that?  ","excerpt":"FAQs These are known and frequently asked questions about SkyWalking. We welcome you to contribute …","ref":"/docs/main/v9.3.0/en/faq/readme/","title":"FAQs"},{"body":"FAQs These are known and frequently asked questions about SkyWalking. We welcome you to contribute here.\nDesign  Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture?  Compiling  Protoc plugin fails in maven build Required items could not be found when importing project into Eclipse Maven compilation failure with error such as python2 not found Compiling issues on Mac\u0026rsquo;s M1 chip  Runtime  New ElasticSearch storage option explanation in 9.2.0 Version 9.x+ upgrade Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Version 8.x+ upgrade Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? Version 6.x upgrade Why are there only traces in UI? Tracing doesn\u0026rsquo;t work on the Kafka consumer end Agent or collector version upgrade, 3.x -\u0026gt; 5.0.0-alpha EnhanceRequireObjectCache class cast exception ElasticSearch server performance issues, including ERROR CODE:429 IllegalStateException when installing Java agent on WebSphere 7 \u0026ldquo;FORBIDDEN/12/index read-only / allow delete (api)\u0026rdquo; appears in the log No data shown and backend replies with \u0026ldquo;Variable \u0026lsquo;serviceId\u0026rsquo; has coerced Null value for NonNull type \u0026lsquo;ID!'\u0026quot; Unexpected endpoint register warning after 6.6.0 Use the profile exporter tool if the profile analysis is not right Compatibility with other javaagent bytecode processes Java agent memory leak when enhancing Worker thread at Thread Pool Thrift plugin  UI  What is VNode? And why does SkyWalking have that?  ","excerpt":"FAQs These are known and frequently asked questions about SkyWalking. We welcome you to contribute …","ref":"/docs/main/v9.4.0/en/faq/readme/","title":"FAQs"},{"body":"FAQs These are known and frequently asked questions about SkyWalking. We welcome you to contribute here.\nDesign  Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture?  Compiling  Protoc plugin fails in maven build Required items could not be found when importing project into Eclipse Maven compilation failure with error such as python2 not found Compiling issues on Mac\u0026rsquo;s M1 chip  Runtime  New ElasticSearch storage option explanation in 9.2.0 Version 9.x+ upgrade Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Version 8.x+ upgrade Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? Version 6.x upgrade Why are there only traces in UI? Tracing doesn\u0026rsquo;t work on the Kafka consumer end Agent or collector version upgrade, 3.x -\u0026gt; 5.0.0-alpha EnhanceRequireObjectCache class cast exception ElasticSearch server performance issues, including ERROR CODE:429 IllegalStateException when installing Java agent on WebSphere 7 \u0026ldquo;FORBIDDEN/12/index read-only / allow delete (api)\u0026rdquo; appears in the log No data shown and backend replies with \u0026ldquo;Variable \u0026lsquo;serviceId\u0026rsquo; has coerced Null value for NonNull type \u0026lsquo;ID!'\u0026quot; Unexpected endpoint register warning after 6.6.0 Use the profile exporter tool if the profile analysis is not right Compatibility with other javaagent bytecode processes Java agent memory leak when enhancing Worker thread at Thread Pool Thrift plugin  UI  What is VNode? And why does SkyWalking have that?  ","excerpt":"FAQs These are known and frequently asked questions about SkyWalking. We welcome you to contribute …","ref":"/docs/main/v9.5.0/en/faq/readme/","title":"FAQs"},{"body":"FAQs These are known and frequently asked questions about SkyWalking. We welcome you to contribute here.\nDesign  Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture?  Compiling  Protoc plugin fails in maven build Required items could not be found when importing project into Eclipse Maven compilation failure with error such as python2 not found Compiling issues on Mac\u0026rsquo;s M1 chip  Runtime  New ElasticSearch storage option explanation in 9.2.0 Version 9.x+ upgrade Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Version 8.x+ upgrade Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? Version 6.x upgrade Why are there only traces in UI? Tracing doesn\u0026rsquo;t work on the Kafka consumer end Agent or collector version upgrade, 3.x -\u0026gt; 5.0.0-alpha EnhanceRequireObjectCache class cast exception ElasticSearch server performance issues, including ERROR CODE:429 IllegalStateException when installing Java agent on WebSphere 7 \u0026ldquo;FORBIDDEN/12/index read-only / allow delete (api)\u0026rdquo; appears in the log No data shown and backend replies with \u0026ldquo;Variable \u0026lsquo;serviceId\u0026rsquo; has coerced Null value for NonNull type \u0026lsquo;ID!'\u0026quot; Unexpected endpoint register warning after 6.6.0 Use the profile exporter tool if the profile analysis is not right Compatibility with other javaagent bytecode processes Java agent memory leak when enhancing Worker thread at Thread Pool Thrift plugin  UI  What is VNode? And why does SkyWalking have that?  ","excerpt":"FAQs These are known and frequently asked questions about SkyWalking. We welcome you to contribute …","ref":"/docs/main/v9.6.0/en/faq/readme/","title":"FAQs"},{"body":"FAQs These are known and frequently asked questions about SkyWalking. We welcome you to contribute here.\nDesign  Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture?  Compiling  Protoc plugin fails in maven build Required items could not be found when importing project into Eclipse Maven compilation failure with error such as python2 not found Compiling issues on Mac\u0026rsquo;s M1 chip  Runtime  New ElasticSearch storage option explanation in 9.2.0 Version 9.x+ upgrade Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Version 8.x+ upgrade Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? Version 6.x upgrade Why are there only traces in UI? Tracing doesn\u0026rsquo;t work on the Kafka consumer end Agent or collector version upgrade, 3.x -\u0026gt; 5.0.0-alpha EnhanceRequireObjectCache class cast exception ElasticSearch server performance issues, including ERROR CODE:429 IllegalStateException when installing Java agent on WebSphere 7 \u0026ldquo;FORBIDDEN/12/index read-only / allow delete (api)\u0026rdquo; appears in the log No data shown and backend replies with \u0026ldquo;Variable \u0026lsquo;serviceId\u0026rsquo; has coerced Null value for NonNull type \u0026lsquo;ID!'\u0026quot; Unexpected endpoint register warning after 6.6.0 Use the profile exporter tool if the profile analysis is not right Compatibility with other javaagent bytecode processes Java agent memory leak when enhancing Worker thread at Thread Pool Thrift plugin  UI  What is VNode? And why does SkyWalking have that?  ","excerpt":"FAQs These are known and frequently asked questions about SkyWalking. We welcome you to contribute …","ref":"/docs/main/v9.7.0/en/faq/readme/","title":"FAQs"},{"body":"Fetch metrics from the Istio control plane(istiod) In this example, you will learn how to setup a Fetcher to fetch Istio control plane metrics, then push them to OAP server.\nInstall Operator Follow Operator installation instrument to install the operator.\nInstall Istio control plane Follow Install with istioctl to install a istiod.\nDeploy Fetcher, OAP server and UI with default settings Clone this repo, then change current directory to samples.\nIssue the below command to deploy an OAP server and UI.\nkubectl apply -f fetcher.yaml Get created custom resources as below:\n$ kubectl get oapserver,ui,fetcher NAME INSTANCES RUNNING ADDRESS oapserver.operator.skywalking.apache.org/default 1 1 default-oap.skywalking-swck-system NAME INSTANCES RUNNING INTERNALADDRESS EXTERNALIPS PORTS ui.operator.skywalking.apache.org/default 1 1 default-ui.skywalking-swck-system [80] NAME AGE fetcher.operator.skywalking.apache.org/istio-prod-cluster 36h View Istio Control Plane Dashboard from UI Follow View the UI to access the UI service.\nNavigate to Dashboard-\u0026gt;Istio Control Plane to view relevant metric diagrams.\n","excerpt":"Fetch metrics from the Istio control plane(istiod) In this example, you will learn how to setup a …","ref":"/docs/skywalking-swck/latest/examples/istio-controlplane/","title":"Fetch metrics from the Istio control plane(istiod)"},{"body":"Fetch metrics from the Istio control plane(istiod) In this example, you will learn how to setup a Fetcher to fetch Istio control plane metrics, then push them to OAP server.\nInstall Operator Follow Operator installation instrument to install the operator.\nInstall Istio control plane Follow Install with istioctl to install a istiod.\nDeploy Fetcher, OAP server and UI with default settings Clone this repo, then change current directory to samples.\nIssue the below command to deploy an OAP server and UI.\nkubectl apply -f fetcher.yaml Get created custom resources as below:\n$ kubectl get oapserver,ui,fetcher NAME INSTANCES RUNNING ADDRESS oapserver.operator.skywalking.apache.org/default 1 1 default-oap.skywalking-swck-system NAME INSTANCES RUNNING INTERNALADDRESS EXTERNALIPS PORTS ui.operator.skywalking.apache.org/default 1 1 default-ui.skywalking-swck-system [80] NAME AGE fetcher.operator.skywalking.apache.org/istio-prod-cluster 36h View Istio Control Plane Dashboard from UI Follow View the UI to access the UI service.\nNavigate to Dashboard-\u0026gt;Istio Control Plane to view relevant metric diagrams.\n","excerpt":"Fetch metrics from the Istio control plane(istiod) In this example, you will learn how to setup a …","ref":"/docs/skywalking-swck/next/examples/istio-controlplane/","title":"Fetch metrics from the Istio control plane(istiod)"},{"body":"Fetch metrics from the Istio control plane(istiod) In this example, you will learn how to setup a Fetcher to fetch Istio control plane metrics, then push them to OAP server.\nInstall Operator Follow Operator installation instrument to install the operator.\nInstall Istio control plane Follow Install with istioctl to install a istiod.\nDeploy Fetcher, OAP server and UI with default settings Clone this repo, then change current directory to samples.\nIssue the below command to deploy an OAP server and UI.\nkubectl apply -f fetcher.yaml Get created custom resources as below:\n$ kubectl get oapserver,ui,fetcher NAME INSTANCES RUNNING ADDRESS oapserver.operator.skywalking.apache.org/default 1 1 default-oap.skywalking-swck-system NAME INSTANCES RUNNING INTERNALADDRESS EXTERNALIPS PORTS ui.operator.skywalking.apache.org/default 1 1 default-ui.skywalking-swck-system [80] NAME AGE fetcher.operator.skywalking.apache.org/istio-prod-cluster 36h View Istio Control Plane Dashboard from UI Follow View the UI to access the UI service.\nNavigate to Dashboard-\u0026gt;Istio Control Plane to view relevant metric diagrams.\n","excerpt":"Fetch metrics from the Istio control plane(istiod) In this example, you will learn how to setup a …","ref":"/docs/skywalking-swck/v0.9.0/examples/istio-controlplane/","title":"Fetch metrics from the Istio control plane(istiod)"},{"body":"Forwarder/envoy-als-v2-grpc-forwarder Description This is a synchronization ALS v2 grpc forwarder with the Envoy ALS protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Forwarder/envoy-als-v2-grpc-forwarder Description This is a synchronization ALS v2 grpc forwarder …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/forwarder_envoy-als-v2-grpc-forwarder/","title":"Forwarder/envoy-als-v2-grpc-forwarder"},{"body":"Forwarder/envoy-als-v2-grpc-forwarder Description This is a synchronization ALS v2 grpc forwarder with the Envoy ALS protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Forwarder/envoy-als-v2-grpc-forwarder Description This is a synchronization ALS v2 grpc forwarder …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/forwarder_envoy-als-v2-grpc-forwarder/","title":"Forwarder/envoy-als-v2-grpc-forwarder"},{"body":"Forwarder/envoy-als-v2-grpc-forwarder Description This is a synchronization ALS v2 grpc forwarder with the Envoy ALS protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Forwarder/envoy-als-v2-grpc-forwarder Description This is a synchronization ALS v2 grpc forwarder …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/forwarder_envoy-als-v2-grpc-forwarder/","title":"Forwarder/envoy-als-v2-grpc-forwarder"},{"body":"Forwarder/envoy-als-v3-grpc-forwarder Description This is a synchronization ALS v3 grpc forwarder with the Envoy ALS protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Forwarder/envoy-als-v3-grpc-forwarder Description This is a synchronization ALS v3 grpc forwarder …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/forwarder_envoy-als-v3-grpc-forwarder/","title":"Forwarder/envoy-als-v3-grpc-forwarder"},{"body":"Forwarder/envoy-als-v3-grpc-forwarder Description This is a synchronization ALS v3 grpc forwarder with the Envoy ALS protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Forwarder/envoy-als-v3-grpc-forwarder Description This is a synchronization ALS v3 grpc forwarder …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/forwarder_envoy-als-v3-grpc-forwarder/","title":"Forwarder/envoy-als-v3-grpc-forwarder"},{"body":"Forwarder/envoy-als-v3-grpc-forwarder Description This is a synchronization ALS v3 grpc forwarder with the Envoy ALS protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Forwarder/envoy-als-v3-grpc-forwarder Description This is a synchronization ALS v3 grpc forwarder …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/forwarder_envoy-als-v3-grpc-forwarder/","title":"Forwarder/envoy-als-v3-grpc-forwarder"},{"body":"Forwarder/envoy-metrics-v2-grpc-forwarder Description This is a synchronization Metrics v2 grpc forwarder with the Envoy metrics protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Forwarder/envoy-metrics-v2-grpc-forwarder Description This is a synchronization Metrics v2 grpc …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/forwarder_envoy-metrics-v2-grpc-forwarder/","title":"Forwarder/envoy-metrics-v2-grpc-forwarder"},{"body":"Forwarder/envoy-metrics-v2-grpc-forwarder Description This is a synchronization Metrics v2 grpc forwarder with the Envoy metrics protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Forwarder/envoy-metrics-v2-grpc-forwarder Description This is a synchronization Metrics v2 grpc …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/forwarder_envoy-metrics-v2-grpc-forwarder/","title":"Forwarder/envoy-metrics-v2-grpc-forwarder"},{"body":"Forwarder/envoy-metrics-v2-grpc-forwarder Description This is a synchronization Metrics v2 grpc forwarder with the Envoy metrics protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Forwarder/envoy-metrics-v2-grpc-forwarder Description This is a synchronization Metrics v2 grpc …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/forwarder_envoy-metrics-v2-grpc-forwarder/","title":"Forwarder/envoy-metrics-v2-grpc-forwarder"},{"body":"Forwarder/envoy-metrics-v3-grpc-forwarder Description This is a synchronization Metrics v3 grpc forwarder with the Envoy metrics protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Forwarder/envoy-metrics-v3-grpc-forwarder Description This is a synchronization Metrics v3 grpc …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/forwarder_envoy-metrics-v3-grpc-forwarder/","title":"Forwarder/envoy-metrics-v3-grpc-forwarder"},{"body":"Forwarder/envoy-metrics-v3-grpc-forwarder Description This is a synchronization Metrics v3 grpc forwarder with the Envoy metrics protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Forwarder/envoy-metrics-v3-grpc-forwarder Description This is a synchronization Metrics v3 grpc …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/forwarder_envoy-metrics-v3-grpc-forwarder/","title":"Forwarder/envoy-metrics-v3-grpc-forwarder"},{"body":"Forwarder/envoy-metrics-v3-grpc-forwarder Description This is a synchronization Metrics v3 grpc forwarder with the Envoy metrics protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Forwarder/envoy-metrics-v3-grpc-forwarder Description This is a synchronization Metrics v3 grpc …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/forwarder_envoy-metrics-v3-grpc-forwarder/","title":"Forwarder/envoy-metrics-v3-grpc-forwarder"},{"body":"Forwarder/native-cds-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native Configuration Discovery Service protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Forwarder/native-cds-grpc-forwarder Description This is a synchronization grpc forwarder with the …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/forwarder_native-cds-grpc-forwarder/","title":"Forwarder/native-cds-grpc-forwarder"},{"body":"Forwarder/native-cds-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native Configuration Discovery Service protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Forwarder/native-cds-grpc-forwarder Description This is a synchronization grpc forwarder with the …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/forwarder_native-cds-grpc-forwarder/","title":"Forwarder/native-cds-grpc-forwarder"},{"body":"Forwarder/native-cds-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native Configuration Discovery Service protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Forwarder/native-cds-grpc-forwarder Description This is a synchronization grpc forwarder with the …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/forwarder_native-cds-grpc-forwarder/","title":"Forwarder/native-cds-grpc-forwarder"},{"body":"Forwarder/native-clr-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native Configuration Discovery Service protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Forwarder/native-clr-grpc-forwarder Description This is a synchronization grpc forwarder with the …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/forwarder_native-clr-grpc-forwarder/","title":"Forwarder/native-clr-grpc-forwarder"},{"body":"Forwarder/native-clr-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native Configuration Discovery Service protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Forwarder/native-clr-grpc-forwarder Description This is a synchronization grpc forwarder with the …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/forwarder_native-clr-grpc-forwarder/","title":"Forwarder/native-clr-grpc-forwarder"},{"body":"Forwarder/native-clr-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native Configuration Discovery Service protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Forwarder/native-clr-grpc-forwarder Description This is a synchronization grpc forwarder with the …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/forwarder_native-clr-grpc-forwarder/","title":"Forwarder/native-clr-grpc-forwarder"},{"body":"Forwarder/native-ebpf-accesslog-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native eBPF access log protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Forwarder/native-ebpf-accesslog-grpc-forwarder Description This is a synchronization grpc forwarder …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/forwarder_native-ebpf-accesslog-grpc-forwarder/","title":"Forwarder/native-ebpf-accesslog-grpc-forwarder"},{"body":"Forwarder/native-ebpf-profiling-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native process protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Forwarder/native-ebpf-profiling-grpc-forwarder Description This is a synchronization grpc forwarder …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/forwarder_native-ebpf-profiling-grpc-forwarder/","title":"Forwarder/native-ebpf-profiling-grpc-forwarder"},{"body":"Forwarder/native-ebpf-profiling-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native process protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Forwarder/native-ebpf-profiling-grpc-forwarder Description This is a synchronization grpc forwarder …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/forwarder_native-ebpf-profiling-grpc-forwarder/","title":"Forwarder/native-ebpf-profiling-grpc-forwarder"},{"body":"Forwarder/native-ebpf-profiling-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native process protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Forwarder/native-ebpf-profiling-grpc-forwarder Description This is a synchronization grpc forwarder …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/forwarder_native-ebpf-profiling-grpc-forwarder/","title":"Forwarder/native-ebpf-profiling-grpc-forwarder"},{"body":"Forwarder/native-event-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native event protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Forwarder/native-event-grpc-forwarder Description This is a synchronization grpc forwarder with the …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/forwarder_native-event-grpc-forwarder/","title":"Forwarder/native-event-grpc-forwarder"},{"body":"Forwarder/native-event-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native event protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Forwarder/native-event-grpc-forwarder Description This is a synchronization grpc forwarder with the …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/forwarder_native-event-grpc-forwarder/","title":"Forwarder/native-event-grpc-forwarder"},{"body":"Forwarder/native-event-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native event protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Forwarder/native-event-grpc-forwarder Description This is a synchronization grpc forwarder with the …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/forwarder_native-event-grpc-forwarder/","title":"Forwarder/native-event-grpc-forwarder"},{"body":"Forwarder/native-jvm-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native Configuration Discovery Service protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Forwarder/native-jvm-grpc-forwarder Description This is a synchronization grpc forwarder with the …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/forwarder_native-jvm-grpc-forwarder/","title":"Forwarder/native-jvm-grpc-forwarder"},{"body":"Forwarder/native-jvm-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native Configuration Discovery Service protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Forwarder/native-jvm-grpc-forwarder Description This is a synchronization grpc forwarder with the …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/forwarder_native-jvm-grpc-forwarder/","title":"Forwarder/native-jvm-grpc-forwarder"},{"body":"Forwarder/native-jvm-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native Configuration Discovery Service protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Forwarder/native-jvm-grpc-forwarder Description This is a synchronization grpc forwarder with the …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/forwarder_native-jvm-grpc-forwarder/","title":"Forwarder/native-jvm-grpc-forwarder"},{"body":"Forwarder/native-log-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native log protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Forwarder/native-log-grpc-forwarder Description This is a synchronization grpc forwarder with the …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/forwarder_native-log-grpc-forwarder/","title":"Forwarder/native-log-grpc-forwarder"},{"body":"Forwarder/native-log-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native log protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Forwarder/native-log-grpc-forwarder Description This is a synchronization grpc forwarder with the …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/forwarder_native-log-grpc-forwarder/","title":"Forwarder/native-log-grpc-forwarder"},{"body":"Forwarder/native-log-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native log protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Forwarder/native-log-grpc-forwarder Description This is a synchronization grpc forwarder with the …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/forwarder_native-log-grpc-forwarder/","title":"Forwarder/native-log-grpc-forwarder"},{"body":"Forwarder/native-log-kafka-forwarder Description This is a synchronization Kafka forwarder with the SkyWalking native log protocol.\nDefaultConfig # The remote topic. topic:\u0026#34;log-topic\u0026#34;Configuration    Name Type Description     topic string The forwarder topic.    ","excerpt":"Forwarder/native-log-kafka-forwarder Description This is a synchronization Kafka forwarder with the …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/forwarder_native-log-kafka-forwarder/","title":"Forwarder/native-log-kafka-forwarder"},{"body":"Forwarder/native-log-kafka-forwarder Description This is a synchronization Kafka forwarder with the SkyWalking native log protocol.\nDefaultConfig # The remote topic. topic:\u0026#34;log-topic\u0026#34;Configuration    Name Type Description     topic string The forwarder topic.    ","excerpt":"Forwarder/native-log-kafka-forwarder Description This is a synchronization Kafka forwarder with the …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/forwarder_native-log-kafka-forwarder/","title":"Forwarder/native-log-kafka-forwarder"},{"body":"Forwarder/native-log-kafka-forwarder Description This is a synchronization Kafka forwarder with the SkyWalking native log protocol.\nDefaultConfig # The remote topic. topic:\u0026#34;log-topic\u0026#34;Configuration    Name Type Description     topic string The forwarder topic.    ","excerpt":"Forwarder/native-log-kafka-forwarder Description This is a synchronization Kafka forwarder with the …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/forwarder_native-log-kafka-forwarder/","title":"Forwarder/native-log-kafka-forwarder"},{"body":"Forwarder/native-management-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native management protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Forwarder/native-management-grpc-forwarder Description This is a synchronization grpc forwarder with …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/forwarder_native-management-grpc-forwarder/","title":"Forwarder/native-management-grpc-forwarder"},{"body":"Forwarder/native-management-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native management protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Forwarder/native-management-grpc-forwarder Description This is a synchronization grpc forwarder with …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/forwarder_native-management-grpc-forwarder/","title":"Forwarder/native-management-grpc-forwarder"},{"body":"Forwarder/native-management-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native management protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Forwarder/native-management-grpc-forwarder Description This is a synchronization grpc forwarder with …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/forwarder_native-management-grpc-forwarder/","title":"Forwarder/native-management-grpc-forwarder"},{"body":"Forwarder/native-meter-grpc-forwarder Description This is a synchronization meter grpc forwarder with the SkyWalking meter protocol.\nDefaultConfig # The LRU policy cache size for hosting routine rules of service instance.routing_rule_lru_cache_size:5000# The TTL of the LRU cache size for hosting routine rules of service instance.routing_rule_lru_cache_ttl:180Configuration    Name Type Description     routing_rule_lru_cache_size int The LRU policy cache size for hosting routine rules of service instance.   routing_rule_lru_cache_ttl int The TTL of the LRU cache size for hosting routine rules of service instance.    ","excerpt":"Forwarder/native-meter-grpc-forwarder Description This is a synchronization meter grpc forwarder …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/forwarder_native-meter-grpc-forwarder/","title":"Forwarder/native-meter-grpc-forwarder"},{"body":"Forwarder/native-meter-grpc-forwarder Description This is a synchronization meter grpc forwarder with the SkyWalking meter protocol.\nDefaultConfig # The LRU policy cache size for hosting routine rules of service instance.routing_rule_lru_cache_size:5000# The TTL of the LRU cache size for hosting routine rules of service instance.routing_rule_lru_cache_ttl:180Configuration    Name Type Description     routing_rule_lru_cache_size int The LRU policy cache size for hosting routine rules of service instance.   routing_rule_lru_cache_ttl int The TTL of the LRU cache size for hosting routine rules of service instance.    ","excerpt":"Forwarder/native-meter-grpc-forwarder Description This is a synchronization meter grpc forwarder …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/forwarder_native-meter-grpc-forwarder/","title":"Forwarder/native-meter-grpc-forwarder"},{"body":"Forwarder/native-meter-grpc-forwarder Description This is a synchronization meter grpc forwarder with the SkyWalking meter protocol.\nDefaultConfig # The LRU policy cache size for hosting routine rules of service instance.routing_rule_lru_cache_size:5000# The TTL of the LRU cache size for hosting routine rules of service instance.routing_rule_lru_cache_ttl:180Configuration    Name Type Description     routing_rule_lru_cache_size int The LRU policy cache size for hosting routine rules of service instance.   routing_rule_lru_cache_ttl int The TTL of the LRU cache size for hosting routine rules of service instance.    ","excerpt":"Forwarder/native-meter-grpc-forwarder Description This is a synchronization meter grpc forwarder …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/forwarder_native-meter-grpc-forwarder/","title":"Forwarder/native-meter-grpc-forwarder"},{"body":"Forwarder/native-process-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native process protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Forwarder/native-process-grpc-forwarder Description This is a synchronization grpc forwarder with …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/forwarder_native-process-grpc-forwarder/","title":"Forwarder/native-process-grpc-forwarder"},{"body":"Forwarder/native-process-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native process protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Forwarder/native-process-grpc-forwarder Description This is a synchronization grpc forwarder with …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/forwarder_native-process-grpc-forwarder/","title":"Forwarder/native-process-grpc-forwarder"},{"body":"Forwarder/native-process-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native process protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Forwarder/native-process-grpc-forwarder Description This is a synchronization grpc forwarder with …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/forwarder_native-process-grpc-forwarder/","title":"Forwarder/native-process-grpc-forwarder"},{"body":"Forwarder/native-profile-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native log protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Forwarder/native-profile-grpc-forwarder Description This is a synchronization grpc forwarder with …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/forwarder_native-profile-grpc-forwarder/","title":"Forwarder/native-profile-grpc-forwarder"},{"body":"Forwarder/native-profile-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native log protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Forwarder/native-profile-grpc-forwarder Description This is a synchronization grpc forwarder with …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/forwarder_native-profile-grpc-forwarder/","title":"Forwarder/native-profile-grpc-forwarder"},{"body":"Forwarder/native-profile-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native log protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Forwarder/native-profile-grpc-forwarder Description This is a synchronization grpc forwarder with …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/forwarder_native-profile-grpc-forwarder/","title":"Forwarder/native-profile-grpc-forwarder"},{"body":"Forwarder/native-tracing-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native tracing protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Forwarder/native-tracing-grpc-forwarder Description This is a synchronization grpc forwarder with …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/forwarder_native-tracing-grpc-forwarder/","title":"Forwarder/native-tracing-grpc-forwarder"},{"body":"Forwarder/native-tracing-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native tracing protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Forwarder/native-tracing-grpc-forwarder Description This is a synchronization grpc forwarder with …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/forwarder_native-tracing-grpc-forwarder/","title":"Forwarder/native-tracing-grpc-forwarder"},{"body":"Forwarder/native-tracing-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native tracing protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Forwarder/native-tracing-grpc-forwarder Description This is a synchronization grpc forwarder with …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/forwarder_native-tracing-grpc-forwarder/","title":"Forwarder/native-tracing-grpc-forwarder"},{"body":"Forwarder/otlp-metrics-v1-grpc-forwarder Description This is a synchronization grpc forwarder with the OpenTelemetry metrics v1 protocol.\nDefaultConfig # The LRU policy cache size for hosting routine rules of service instance.routing_rule_lru_cache_size:5000# The TTL of the LRU cache size for hosting routine rules of service instance.routing_rule_lru_cache_ttl:180# The label key of the routing data, multiple keys are split by \u0026#34;,\u0026#34;routing_label_keys:net.host.name,host.name,job,service.nameConfiguration    Name Type Description     routing_label_keys string The label key of the routing data, multiple keys are split by \u0026ldquo;,\u0026rdquo;   routing_rule_lru_cache_size int The LRU policy cache size for hosting routine rules of service instance.   routing_rule_lru_cache_ttl int The TTL of the LRU cache size for hosting routine rules of service instance.    ","excerpt":"Forwarder/otlp-metrics-v1-grpc-forwarder Description This is a synchronization grpc forwarder with …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/forwarder_otlp-metrics-v1-grpc-forwarder/","title":"Forwarder/otlp-metrics-v1-grpc-forwarder"},{"body":"Forwarder/otlp-metrics-v1-grpc-forwarder Description This is a synchronization grpc forwarder with the OpenTelemetry metrics v1 protocol.\nDefaultConfig # The LRU policy cache size for hosting routine rules of service instance.routing_rule_lru_cache_size:5000# The TTL of the LRU cache size for hosting routine rules of service instance.routing_rule_lru_cache_ttl:180# The label key of the routing data, multiple keys are split by \u0026#34;,\u0026#34;routing_label_keys:net.host.name,host.name,job,service.nameConfiguration    Name Type Description     routing_label_keys string The label key of the routing data, multiple keys are split by \u0026ldquo;,\u0026rdquo;   routing_rule_lru_cache_size int The LRU policy cache size for hosting routine rules of service instance.   routing_rule_lru_cache_ttl int The TTL of the LRU cache size for hosting routine rules of service instance.    ","excerpt":"Forwarder/otlp-metrics-v1-grpc-forwarder Description This is a synchronization grpc forwarder with …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/forwarder_otlp-metrics-v1-grpc-forwarder/","title":"Forwarder/otlp-metrics-v1-grpc-forwarder"},{"body":"Forwarder/otlp-metrics-v1-grpc-forwarder Description This is a synchronization grpc forwarder with the OpenTelemetry metrics v1 protocol.\nDefaultConfig # The LRU policy cache size for hosting routine rules of service instance.routing_rule_lru_cache_size:5000# The TTL of the LRU cache size for hosting routine rules of service instance.routing_rule_lru_cache_ttl:180# The label key of the routing data, multiple keys are split by \u0026#34;,\u0026#34;routing_label_keys:net.host.name,host.name,job,service.nameConfiguration    Name Type Description     routing_label_keys string The label key of the routing data, multiple keys are split by \u0026ldquo;,\u0026rdquo;   routing_rule_lru_cache_size int The LRU policy cache size for hosting routine rules of service instance.   routing_rule_lru_cache_ttl int The TTL of the LRU cache size for hosting routine rules of service instance.    ","excerpt":"Forwarder/otlp-metrics-v1-grpc-forwarder Description This is a synchronization grpc forwarder with …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/forwarder_otlp-metrics-v1-grpc-forwarder/","title":"Forwarder/otlp-metrics-v1-grpc-forwarder"},{"body":"Get Binaries This page shows how to get binaries of Banyand.\nPrebuilt Released binaries Get binaries from the download.\nBuild From Source Requirements Users who want to build a binary from sources have to set up:\n Go 1.20 Node 18.16 Git \u0026gt;= 2.30 Linux, macOS or Windows+WSL2 GNU make  Windows BanyanDB is built on Linux and macOS that introduced several platform-specific characters to the building system. Therefore, we highly recommend you use WSL2+Ubuntu to execute tasks of the Makefile.\nBuild Binaries To issue the below command to get basic binaries of banyand and bydbctl.\n$ make generate ... $ make build ... --- banyand: all --- make[1]: Entering directory \u0026#39;\u0026lt;path_to_project_root\u0026gt;/banyand\u0026#39; ... chmod +x build/bin/banyand-server Done building banyand server make[1]: Leaving directory \u0026#39;\u0026lt;path_to_project_root\u0026gt;/banyand\u0026#39; ... --- bydbctl: all --- make[1]: Entering directory \u0026#39;\u0026lt;path_to_project_root\u0026gt;/bydbctl\u0026#39; ... chmod +x build/bin/bydbctl Done building bydbctl make[1]: Leaving directory \u0026#39;\u0026lt;path_to_project_root\u0026gt;/bydbctl\u0026#39; The build system provides a series of binary options as well.\n make -C banyand banyand-server generates a basic banyand-server. make -C banyand release builds out a static binary for releasing. make -C banyand debug gives a binary for debugging without the complier\u0026rsquo;s optimizations. make -C banyand debug-static is a static binary for debugging. make -C bydbctl release cross-builds several binaries for multi-platforms.  Then users get binaries as below\n$ ls banyand/build/bin banyand-server banyand-server-debug banyand-server-debug-static banyand-server-static $ ls banyand/build/bin bydbctl ","excerpt":"Get Binaries This page shows how to get binaries of Banyand.\nPrebuilt Released binaries Get binaries …","ref":"/docs/skywalking-banyandb/latest/installation/binaries/","title":"Get Binaries"},{"body":"Get Binaries This page shows how to get binaries of Banyand.\nPrebuilt Released binaries Get binaries from the download.\nBuild From Source Requirements Users who want to build a binary from sources have to set up:\n Go 1.22 Node 20.12 Git \u0026gt;= 2.30 Linux, macOS or Windows+WSL2 GNU make  Windows BanyanDB is built on Linux and macOS that introduced several platform-specific characters to the building system. Therefore, we highly recommend you use WSL2+Ubuntu to execute tasks of the Makefile.\nBuild Binaries To issue the below command to get basic binaries of banyand and bydbctl.\n$ make generate ... $ make build ... --- banyand: all --- make[1]: Entering directory \u0026#39;\u0026lt;path_to_project_root\u0026gt;/banyand\u0026#39; ... chmod +x build/bin/banyand-server Done building banyand server make[1]: Leaving directory \u0026#39;\u0026lt;path_to_project_root\u0026gt;/banyand\u0026#39; ... --- bydbctl: all --- make[1]: Entering directory \u0026#39;\u0026lt;path_to_project_root\u0026gt;/bydbctl\u0026#39; ... chmod +x build/bin/bydbctl Done building bydbctl make[1]: Leaving directory \u0026#39;\u0026lt;path_to_project_root\u0026gt;/bydbctl\u0026#39; The build system provides a series of binary options as well.\n make -C banyand banyand-server generates a basic banyand-server. make -C banyand release or make -C banyand static builds out a static binary banyand-server-static for releasing. make -C banyand debug gives a binary for debugging without the complier\u0026rsquo;s optimizations. make -C banyand debug-static is a static binary for debugging. make -C bydbctl release cross-builds several binaries for multi-platforms.  Then users get binaries as below\n$ ls banyand/build/bin banyand-server banyand-server-debug banyand-server-debug-static $ ls bydbctl/build/bin bydbctl bydbctl--darwin-amd64 bydbctl--darwin-arm64 bydbctl--linux-386 bydbctl--linux-amd64 bydbctl--linux-arm64 bydbctl--windows-386 bydbctl--windows-amd64 ","excerpt":"Get Binaries This page shows how to get binaries of Banyand.\nPrebuilt Released binaries Get binaries …","ref":"/docs/skywalking-banyandb/next/installation/binaries/","title":"Get Binaries"},{"body":"Get Binaries This page shows how to get binaries of Banyand.\nPrebuilt Released binaries Get binaries from the download.\nBuild From Source Requirements Users who want to build a binary from sources have to set up:\n Go 1.20 Node 18.16 Git \u0026gt;= 2.30 Linux, macOS or Windows+WSL2 GNU make  Windows BanyanDB is built on Linux and macOS that introduced several platform-specific characters to the building system. Therefore, we highly recommend you use WSL2+Ubuntu to execute tasks of the Makefile.\nBuild Binaries To issue the below command to get basic binaries of banyand and bydbctl.\n$ make generate ... $ make build ... --- banyand: all --- make[1]: Entering directory \u0026#39;\u0026lt;path_to_project_root\u0026gt;/banyand\u0026#39; ... chmod +x build/bin/banyand-server Done building banyand server make[1]: Leaving directory \u0026#39;\u0026lt;path_to_project_root\u0026gt;/banyand\u0026#39; ... --- bydbctl: all --- make[1]: Entering directory \u0026#39;\u0026lt;path_to_project_root\u0026gt;/bydbctl\u0026#39; ... chmod +x build/bin/bydbctl Done building bydbctl make[1]: Leaving directory \u0026#39;\u0026lt;path_to_project_root\u0026gt;/bydbctl\u0026#39; The build system provides a series of binary options as well.\n make -C banyand banyand-server generates a basic banyand-server. make -C banyand release builds out a static binary for releasing. make -C banyand debug gives a binary for debugging without the complier\u0026rsquo;s optimizations. make -C banyand debug-static is a static binary for debugging. make -C bydbctl release cross-builds several binaries for multi-platforms.  Then users get binaries as below\n$ ls banyand/build/bin banyand-server banyand-server-debug banyand-server-debug-static banyand-server-static $ ls banyand/build/bin bydbctl ","excerpt":"Get Binaries This page shows how to get binaries of Banyand.\nPrebuilt Released binaries Get binaries …","ref":"/docs/skywalking-banyandb/v0.5.0/installation/binaries/","title":"Get Binaries"},{"body":"Getting Started This document introduces how to create a kubernetes cluster locally using kind and how to deploy the basic skywalking components to the cluster.\nPrerequisites  docker \u0026gt;= v20.10.6 kubectl \u0026gt;= v1.21.0 kind \u0026gt;= v0.20.0 swctl \u0026gt;= v0.10.0  Step1: Create a kubernetes cluster locally using kind  Note: If you have a kubernetes cluster (\u0026gt; v1.21.10) already, you can skip this step.\n Here we create a kubernetes cluster with 1 control-plane node and 1 worker nodes.\n$ cat \u0026lt;\u0026lt;EOF | kind create cluster --config=- kind: Cluster apiVersion: kind.x-k8s.io/v1alpha4 nodes: - role: control-plane image: kindest/node:v1.21.10 - role: worker image: kindest/node:v1.21.10 EOF  Expected output Creating cluster \u0026#34;kind\u0026#34; ... ✓ Ensuring node image (kindest/node:v1.21.10) 🖼 ✓ Preparing nodes 📦 📦 ✓ Writing configuration 📜 ✓ Starting control-plane 🕹️ ✓ Installing CNI 🔌 ✓ Installing StorageClass 💾 ✓ Joining worker nodes 🚜 Set kubectl context to \u0026#34;kind-kind\u0026#34; You can now use your cluster with: kubectl cluster-info --context kind-kind Not sure what to do next? 😅 Check out https://kind.sigs.k8s.io/docs/user/quick-start/  Check all pods in the cluster.\n$ kubectl get pods -A  Expected output NAMESPACE NAME READY STATUS RESTARTS AGE kube-system coredns-558bd4d5db-h5gxt 1/1 Running 0 106s kube-system coredns-558bd4d5db-lhnvz 1/1 Running 0 106s kube-system etcd-kind-control-plane 1/1 Running 0 116s kube-system kindnet-fxlkm 1/1 Running 0 106s kube-system kindnet-vmcvl 1/1 Running 0 91s kube-system kube-apiserver-kind-control-plane 1/1 Running 0 116s kube-system kube-controller-manager-kind-control-plane 1/1 Running 0 116s kube-system kube-proxy-nr4f4 1/1 Running 0 91s kube-system kube-proxy-zl4h2 1/1 Running 0 106s kube-system kube-scheduler-kind-control-plane 1/1 Running 0 116s local-path-storage local-path-provisioner-74567d47b4-kmtjh 1/1 Running 0 106s  Step2: Build the operator image Check into the root directory of SWCK and build the operator image as follows.\n$ cd operator # Build the operator image $ make docker-build You will get the operator image controller:latest as follows.\n$ docker images REPOSITORY TAG IMAGE ID CREATED SIZE controller latest 84da7509092a 22 seconds ago 53.6MB Load the operator image into the kind cluster or push the image to a registry that your kubernetes cluster can access.\n$ kind load docker-image controller or\n$ docker push $(YOUR_REGISTRY)/controller Step3: Deploy operator on the kubernetes cluster Install the CRDs as follows.\n$ make install Check the CRDs are installed successfully.\n Expected output kubectl get crd | grep skywalking banyandbs.operator.skywalking.apache.org 2023-11-05T03:30:43Z fetchers.operator.skywalking.apache.org 2023-11-05T03:30:43Z javaagents.operator.skywalking.apache.org 2023-11-05T03:30:43Z oapserverconfigs.operator.skywalking.apache.org 2023-11-05T03:30:43Z oapserverdynamicconfigs.operator.skywalking.apache.org 2023-11-05T03:30:43Z oapservers.operator.skywalking.apache.org 2023-11-05T03:30:43Z satellites.operator.skywalking.apache.org 2023-11-05T03:30:43Z storages.operator.skywalking.apache.org 2023-11-05T03:30:43Z swagents.operator.skywalking.apache.org 2023-11-05T03:30:43Z uis.operator.skywalking.apache.org 2023-11-05T03:30:43Z  Deploy the SWCK operator to the cluster.\n$ make deploy Or deploy the SWCK operator to the cluster with your own image.\n$ make deploy OPERATOR_IMG=$(YOUR_REGISTRY)/controller Get the status of the SWCK operator pod.\n$ kubectl get pod -n skywalking-swck-system NAME READY STATUS RESTARTS AGE skywalking-swck-controller-manager-5f5bbd4fd-9wdw6 2/2 Running 0 34s Step4: Deploy skywalking componentes on the kubernetes cluster Create the skywalking-system namespace.\n$ kubectl create namespace skywalking-system Deploy the skywalking components to the cluster.\n$ cat \u0026lt;\u0026lt;EOF | kubectl apply -f - apiVersion: operator.skywalking.apache.org/v1alpha1 kind: OAPServer metadata: name: skywalking-system namespace: skywalking-system spec: version: 9.5.0 instances: 1 image: apache/skywalking-oap-server:9.5.0 service: template: type: ClusterIP --- apiVersion: operator.skywalking.apache.org/v1alpha1 kind: UI metadata: name: skywalking-system namespace: skywalking-system spec: version: 9.5.0 instances: 1 image: apache/skywalking-ui:9.5.0 OAPServerAddress: http://skywalking-system-oap.skywalking-system:12800 service: template: type: ClusterIP ingress: host: demo.ui.skywalking EOF Check the status of the skywalking components.\n$ kubectl get pod -n skywalking-system NAME READY STATUS RESTARTS AGE skywalking-system-oap-68bd877f57-fhzdz 1/1 Running 0 6m23s skywalking-system-ui-6db8579b47-rphtl 1/1 Running 0 6m23s Step5: Use the java agent injector to inject the java agent into the application pod Label the namespace where the application pod is located with swck-injection=enabled.\n$ kubectl label namespace skywalking-system swck-injection=enabled Create the application pod.\n Note: The application pod must be labeled with swck-java-agent-injected=true and the agent.skywalking.apache.org/collector.backend_service annotation must be set to the address of the OAP server. For more configurations, please refer to the guide.\n $ cat \u0026lt;\u0026lt;EOF | kubectl apply -f - apiVersion: apps/v1 kind: Deployment metadata: name: demo namespace: skywalking-system spec: selector: matchLabels: app: demo template: metadata: labels: # enable the java agent injector swck-java-agent-injected: \u0026#34;true\u0026#34; app: demo annotations: agent.skywalking.apache.org/collector.backend_service: \u0026#34;skywalking-system-oap.skywalking-system:11800\u0026#34; spec: containers: - name: demo1 imagePullPolicy: IfNotPresent image: ghcr.io/apache/skywalking-swck-spring-demo:v0.0.1 command: [\u0026#34;java\u0026#34;] args: [\u0026#34;-jar\u0026#34;,\u0026#34;/app.jar\u0026#34;] ports: - containerPort: 8085 readinessProbe: httpGet: path: /hello port: 8085 initialDelaySeconds: 3 periodSeconds: 3 failureThreshold: 10 --- apiVersion: v1 kind: Service metadata: name: demo namespace: skywalking-system spec: type: ClusterIP ports: - name: 8085-tcp port: 8085 protocol: TCP targetPort: 8085 selector: app: demo EOF Check the status of the application pod and make sure the java agent is injected into the application pod.\n$ kubectl get pod -n skywalking-system -l app=demo -ojsonpath=\u0026#39;{.items[0].spec.initContainers[0]}\u0026#39;  Expected output {\u0026#34;args\u0026#34;:[\u0026#34;-c\u0026#34;,\u0026#34;mkdir -p /sky/agent \\u0026\\u0026 cp -r /skywalking/agent/* /sky/agent\u0026#34;],\u0026#34;command\u0026#34;:[\u0026#34;sh\u0026#34;],\u0026#34;image\u0026#34;:\u0026#34;apache/skywalking-java-agent:8.16.0-java8\u0026#34;,\u0026#34;imagePullPolicy\u0026#34;:\u0026#34;IfNotPresent\u0026#34;,\u0026#34;name\u0026#34;:\u0026#34;inject-skywalking-agent\u0026#34;,\u0026#34;resources\u0026#34;:{},\u0026#34;terminationMessagePath\u0026#34;:\u0026#34;/dev/termination-log\u0026#34;,\u0026#34;terminationMessagePolicy\u0026#34;:\u0026#34;File\u0026#34;,\u0026#34;volumeMounts\u0026#34;:[{\u0026#34;mountPath\u0026#34;:\u0026#34;/sky/agent\u0026#34;,\u0026#34;name\u0026#34;:\u0026#34;sky-agent\u0026#34;},{\u0026#34;mountPath\u0026#34;:\u0026#34;/var/run/secrets/kubernetes.io/serviceaccount\u0026#34;,\u0026#34;name\u0026#34;:\u0026#34;kube-api-access-4qk26\u0026#34;,\u0026#34;readOnly\u0026#34;:true}]}  Also, you could check the final java agent configurations with the following command.\n$ kubectl get javaagent -n skywalking-system -l app=demo -oyaml  Expected output apiVersion: v1 items: - apiVersion: operator.skywalking.apache.org/v1alpha1 kind: JavaAgent metadata: creationTimestamp: \u0026#34;2023-11-19T05:34:03Z\u0026#34; generation: 1 labels: app: demo name: app-demo-javaagent namespace: skywalking-system ownerReferences: - apiVersion: apps/v1 blockOwnerDeletion: true controller: true kind: ReplicaSet name: demo-75d8d995cc uid: 8cb64abc-9b50-4f67-9304-2e09de476168 resourceVersion: \u0026#34;21515\u0026#34; uid: 6cbafb3d-9f43-4448-95e8-bda1f7c72bc3 spec: agentConfiguration: collector.backend_service: skywalking-system-oap.skywalking-system:11800 optional-plugin: webflux|cloud-gateway-2.1.x backendService: skywalking-system-oap.skywalking-system:11800 podSelector: app=demo serviceName: Your_ApplicationName status: creationTime: \u0026#34;2023-11-19T05:34:03Z\u0026#34; expectedInjectiedNum: 1 lastUpdateTime: \u0026#34;2023-11-19T05:34:46Z\u0026#34; realInjectedNum: 1 kind: List metadata: resourceVersion: \u0026#34;\u0026#34; selfLink: \u0026#34;\u0026#34;  If you want to check the logs of the java agent, you can run the following command.\n$ kubectl logs -f -n skywalking-system -l app=demo -c inject-skywalking-agent Step6: Check the application metrics in the skywalking UI First, port-forward the demo service to your local machine.\n$ kubectl port-forward svc/demo 8085:8085 -n skywalking-system Then, trigger the application to generate some metrics.\n$ for i in {1..10}; do curl http://127.0.0.1:8085/hello \u0026amp;\u0026amp; echo \u0026#34;\u0026#34;; done After that, you can port-forward the skywalking UI to your local machine.\n$ kubectl port-forward svc/skywalking-system-ui 8080:80 -n skywalking-system Open the skywalking UI in your browser and navigate to http://127.0.0.1:8080 to check the application metrics.\n Expected output  Also, if you want to expose the external metrics to the kubernetes HPA, you can follow the guide to deploy the custom metrics adapter and you may get some inspiration from the e2e test.\n","excerpt":"Getting Started This document introduces how to create a kubernetes cluster locally using kind and …","ref":"/docs/skywalking-swck/next/getting-started/","title":"Getting Started"},{"body":"Getting Started This document introduces how to create a kubernetes cluster locally using kind and how to deploy the basic skywalking components to the cluster.\nPrerequisites  docker \u0026gt;= v20.10.6 kubectl \u0026gt;= v1.21.0 kind \u0026gt;= v0.20.0 swctl \u0026gt;= v0.10.0  Step1: Create a kubernetes cluster locally using kind  Note: If you have a kubernetes cluster (\u0026gt; v1.21.10) already, you can skip this step.\n Here we create a kubernetes cluster with 1 control-plane node and 1 worker nodes.\n$ cat \u0026lt;\u0026lt;EOF | kind create cluster --config=- kind: Cluster apiVersion: kind.x-k8s.io/v1alpha4 nodes: - role: control-plane image: kindest/node:v1.21.10 - role: worker image: kindest/node:v1.21.10 EOF  Expected output Creating cluster \u0026#34;kind\u0026#34; ... ✓ Ensuring node image (kindest/node:v1.21.10) 🖼 ✓ Preparing nodes 📦 📦 ✓ Writing configuration 📜 ✓ Starting control-plane 🕹️ ✓ Installing CNI 🔌 ✓ Installing StorageClass 💾 ✓ Joining worker nodes 🚜 Set kubectl context to \u0026#34;kind-kind\u0026#34; You can now use your cluster with: kubectl cluster-info --context kind-kind Not sure what to do next? 😅 Check out https://kind.sigs.k8s.io/docs/user/quick-start/  Check all pods in the cluster.\n$ kubectl get pods -A  Expected output NAMESPACE NAME READY STATUS RESTARTS AGE kube-system coredns-558bd4d5db-h5gxt 1/1 Running 0 106s kube-system coredns-558bd4d5db-lhnvz 1/1 Running 0 106s kube-system etcd-kind-control-plane 1/1 Running 0 116s kube-system kindnet-fxlkm 1/1 Running 0 106s kube-system kindnet-vmcvl 1/1 Running 0 91s kube-system kube-apiserver-kind-control-plane 1/1 Running 0 116s kube-system kube-controller-manager-kind-control-plane 1/1 Running 0 116s kube-system kube-proxy-nr4f4 1/1 Running 0 91s kube-system kube-proxy-zl4h2 1/1 Running 0 106s kube-system kube-scheduler-kind-control-plane 1/1 Running 0 116s local-path-storage local-path-provisioner-74567d47b4-kmtjh 1/1 Running 0 106s  Step2: Build the operator image Check into the root directory of SWCK and build the operator image as follows.\n$ cd operator # Build the operator image $ make docker-build You will get the operator image controller:latest as follows.\n$ docker images REPOSITORY TAG IMAGE ID CREATED SIZE controller latest 84da7509092a 22 seconds ago 53.6MB Load the operator image into the kind cluster or push the image to a registry that your kubernetes cluster can access.\n$ kind load docker-image controller or\n$ docker push $(YOUR_REGISTRY)/controller Step3: Deploy operator on the kubernetes cluster Install the CRDs as follows.\n$ make install Check the CRDs are installed successfully.\n Expected output kubectl get crd | grep skywalking banyandbs.operator.skywalking.apache.org 2023-11-05T03:30:43Z fetchers.operator.skywalking.apache.org 2023-11-05T03:30:43Z javaagents.operator.skywalking.apache.org 2023-11-05T03:30:43Z oapserverconfigs.operator.skywalking.apache.org 2023-11-05T03:30:43Z oapserverdynamicconfigs.operator.skywalking.apache.org 2023-11-05T03:30:43Z oapservers.operator.skywalking.apache.org 2023-11-05T03:30:43Z satellites.operator.skywalking.apache.org 2023-11-05T03:30:43Z storages.operator.skywalking.apache.org 2023-11-05T03:30:43Z swagents.operator.skywalking.apache.org 2023-11-05T03:30:43Z uis.operator.skywalking.apache.org 2023-11-05T03:30:43Z  Deploy the SWCK operator to the cluster.\n$ make deploy Or deploy the SWCK operator to the cluster with your own image.\n$ make deploy OPERATOR_IMG=$(YOUR_REGISTRY)/controller Get the status of the SWCK operator pod.\n$ kubectl get pod -n skywalking-swck-system NAME READY STATUS RESTARTS AGE skywalking-swck-controller-manager-5f5bbd4fd-9wdw6 2/2 Running 0 34s Step4: Deploy skywalking componentes on the kubernetes cluster Create the skywalking-system namespace.\n$ kubectl create namespace skywalking-system Deploy the skywalking components to the cluster.\n$ cat \u0026lt;\u0026lt;EOF | kubectl apply -f - apiVersion: operator.skywalking.apache.org/v1alpha1 kind: OAPServer metadata: name: skywalking-system namespace: skywalking-system spec: version: 9.5.0 instances: 1 image: apache/skywalking-oap-server:9.5.0 service: template: type: ClusterIP --- apiVersion: operator.skywalking.apache.org/v1alpha1 kind: UI metadata: name: skywalking-system namespace: skywalking-system spec: version: 9.5.0 instances: 1 image: apache/skywalking-ui:9.5.0 OAPServerAddress: http://skywalking-system-oap.skywalking-system:12800 service: template: type: ClusterIP ingress: host: demo.ui.skywalking EOF Check the status of the skywalking components.\n$ kubectl get pod -n skywalking-system NAME READY STATUS RESTARTS AGE skywalking-system-oap-68bd877f57-fhzdz 1/1 Running 0 6m23s skywalking-system-ui-6db8579b47-rphtl 1/1 Running 0 6m23s Step5: Use the java agent injector to inject the java agent into the application pod Label the namespace where the application pod is located with swck-injection=enabled.\n$ kubectl label namespace skywalking-system swck-injection=enabled Create the application pod.\n Note: The application pod must be labeled with swck-java-agent-injected=true and the agent.skywalking.apache.org/collector.backend_service annotation must be set to the address of the OAP server. For more configurations, please refer to the guide.\n $ cat \u0026lt;\u0026lt;EOF | kubectl apply -f - apiVersion: apps/v1 kind: Deployment metadata: name: demo namespace: skywalking-system spec: selector: matchLabels: app: demo template: metadata: labels: # enable the java agent injector swck-java-agent-injected: \u0026#34;true\u0026#34; app: demo annotations: agent.skywalking.apache.org/collector.backend_service: \u0026#34;skywalking-system-oap.skywalking-system:11800\u0026#34; spec: containers: - name: demo1 imagePullPolicy: IfNotPresent image: ghcr.io/apache/skywalking-swck-spring-demo:v0.0.1 command: [\u0026#34;java\u0026#34;] args: [\u0026#34;-jar\u0026#34;,\u0026#34;/app.jar\u0026#34;] ports: - containerPort: 8085 readinessProbe: httpGet: path: /hello port: 8085 initialDelaySeconds: 3 periodSeconds: 3 failureThreshold: 10 --- apiVersion: v1 kind: Service metadata: name: demo namespace: skywalking-system spec: type: ClusterIP ports: - name: 8085-tcp port: 8085 protocol: TCP targetPort: 8085 selector: app: demo EOF Check the status of the application pod and make sure the java agent is injected into the application pod.\n$ kubectl get pod -n skywalking-system -l app=demo -ojsonpath=\u0026#39;{.items[0].spec.initContainers[0]}\u0026#39;  Expected output {\u0026#34;args\u0026#34;:[\u0026#34;-c\u0026#34;,\u0026#34;mkdir -p /sky/agent \\u0026\\u0026 cp -r /skywalking/agent/* /sky/agent\u0026#34;],\u0026#34;command\u0026#34;:[\u0026#34;sh\u0026#34;],\u0026#34;image\u0026#34;:\u0026#34;apache/skywalking-java-agent:8.16.0-java8\u0026#34;,\u0026#34;imagePullPolicy\u0026#34;:\u0026#34;IfNotPresent\u0026#34;,\u0026#34;name\u0026#34;:\u0026#34;inject-skywalking-agent\u0026#34;,\u0026#34;resources\u0026#34;:{},\u0026#34;terminationMessagePath\u0026#34;:\u0026#34;/dev/termination-log\u0026#34;,\u0026#34;terminationMessagePolicy\u0026#34;:\u0026#34;File\u0026#34;,\u0026#34;volumeMounts\u0026#34;:[{\u0026#34;mountPath\u0026#34;:\u0026#34;/sky/agent\u0026#34;,\u0026#34;name\u0026#34;:\u0026#34;sky-agent\u0026#34;},{\u0026#34;mountPath\u0026#34;:\u0026#34;/var/run/secrets/kubernetes.io/serviceaccount\u0026#34;,\u0026#34;name\u0026#34;:\u0026#34;kube-api-access-4qk26\u0026#34;,\u0026#34;readOnly\u0026#34;:true}]}  Also, you could check the final java agent configurations with the following command.\n$ kubectl get javaagent -n skywalking-system -l app=demo -oyaml  Expected output apiVersion: v1 items: - apiVersion: operator.skywalking.apache.org/v1alpha1 kind: JavaAgent metadata: creationTimestamp: \u0026#34;2023-11-19T05:34:03Z\u0026#34; generation: 1 labels: app: demo name: app-demo-javaagent namespace: skywalking-system ownerReferences: - apiVersion: apps/v1 blockOwnerDeletion: true controller: true kind: ReplicaSet name: demo-75d8d995cc uid: 8cb64abc-9b50-4f67-9304-2e09de476168 resourceVersion: \u0026#34;21515\u0026#34; uid: 6cbafb3d-9f43-4448-95e8-bda1f7c72bc3 spec: agentConfiguration: collector.backend_service: skywalking-system-oap.skywalking-system:11800 optional-plugin: webflux|cloud-gateway-2.1.x backendService: skywalking-system-oap.skywalking-system:11800 podSelector: app=demo serviceName: Your_ApplicationName status: creationTime: \u0026#34;2023-11-19T05:34:03Z\u0026#34; expectedInjectiedNum: 1 lastUpdateTime: \u0026#34;2023-11-19T05:34:46Z\u0026#34; realInjectedNum: 1 kind: List metadata: resourceVersion: \u0026#34;\u0026#34; selfLink: \u0026#34;\u0026#34;  If you want to check the logs of the java agent, you can run the following command.\n$ kubectl logs -f -n skywalking-system -l app=demo -c inject-skywalking-agent Step6: Check the application metrics in the skywalking UI First, port-forward the demo service to your local machine.\n$ kubectl port-forward svc/demo 8085:8085 -n skywalking-system Then, trigger the application to generate some metrics.\n$ for i in {1..10}; do curl http://127.0.0.1:8085/hello \u0026amp;\u0026amp; echo \u0026#34;\u0026#34;; done After that, you can port-forward the skywalking UI to your local machine.\n$ kubectl port-forward svc/skywalking-system-ui 8080:80 -n skywalking-system Open the skywalking UI in your browser and navigate to http://127.0.0.1:8080 to check the application metrics.\n Expected output  Also, if you want to expose the external metrics to the kubernetes HPA, you can follow the guide to deploy the custom metrics adapter and you may get some inspiration from the e2e test.\n","excerpt":"Getting Started This document introduces how to create a kubernetes cluster locally using kind and …","ref":"/docs/skywalking-swck/v0.9.0/getting-started/","title":"Getting Started"},{"body":"Group Parameterized Endpoints In most cases, endpoints are detected automatically through language agents, service mesh observability solutions, or meter system configurations.\nThere are some special cases, especially when REST-style URI is used, where the application codes include the parameter in the endpoint name, such as putting order ID in the URI. Examples are /prod/ORDER123 and /prod/ORDER456. But logically, most would expect to have an endpoint name like prod/{order-id}. This is a specially designed feature in parameterized endpoint grouping.\nIf the incoming endpoint name accords with the rules, SkyWalking will group the endpoint by rules.\nThere are two approaches in which SkyWalking supports endpoint grouping:\n Endpoint name grouping by OpenAPI definitions. Endpoint name grouping by custom configurations.  Both grouping approaches can work together in sequence.\nEndpoint name grouping by OpenAPI definitions The OpenAPI definitions are documents based on the OpenAPI Specification (OAS), which is used to define a standard, language-agnostic interface for HTTP APIs.\nSkyWalking now supports OAS v2.0+. It could parse the documents (yaml) and build grouping rules from them automatically.\nHow to use   Add Specification Extensions for SkyWalking config in the OpenAPI definition documents; otherwise, all configs are default:\n${METHOD} is a reserved placeholder which represents the HTTP method, e.g. POST/GET... . ${PATH} is a reserved placeholder which represents the path, e.g. /products/{id}.\n   Extension Name Required Description Default Value     x-sw-service-name false The service name to which these endpoints belong. The directory name to which the OpenAPI definition documents belong.   x-sw-endpoint-name-match-rule false The rule used to match the endpoint. ${METHOD}:${PATH}   x-sw-endpoint-name-format false The endpoint name after grouping. ${METHOD}:${PATH}    These extensions are under OpenAPI Object. For example, the document below has a full custom config:\n  openapi:3.0.0x-sw-service-name:serviceBx-sw-endpoint-name-match-rule:\u0026#34;${METHOD}:${PATH}\u0026#34;x-sw-endpoint-name-format:\u0026#34;${METHOD}:${PATH}\u0026#34;info:description:OpenAPI definition for SkyWalking test.version:v2title:Product API...We highly recommend using the default config. The custom config (x-sw-endpoint-name-match-rule/x-sw-endpoint-name-format) is considered part of the match rules (regex pattern). We have provided some use cases in org.apache.skywalking.oap.server.core.config.group.openapi.EndpointGroupingRuleReader4OpenapiTest. You may validate your custom config as well.\nAll OpenAPI definition documents are located in the openapi-definitions directory, with directories having at most two levels. We recommend using the service name as the subDirectory name, as you will then not be required to set x-sw-service-name. For example:  ├── openapi-definitions │ ├── serviceA │ │ ├── customerAPI-v1.yaml │ │ └── productAPI-v1.yaml │ └── serviceB │ └── productAPI-v2.yaml The feature is enabled by default. You can disable it by setting the Core Module configuration ${SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI:false}.  Rules match priority We recommend designing the API path as clearly as possible. If the API path is fuzzy and an endpoint name matches multiple paths, SkyWalking would select a path according to the match priority set out below:\n The exact path is matched. E.g. /products or /products/inventory The path with fewer variables. E.g. In the case of /products/{var1}/{var2} and /products/{var1}/abc, endpoint name /products/123/abc will match the second one. If the paths have the same number of variables, the longest path is matched, and the vars are considered to be 1. E.g. In the case of /products/abc/{var1} and products/{var12345}/ef, endpoint name /products/abc/ef will match the first one, because length(\u0026quot;abc\u0026quot;) = 3 is larger than length(\u0026quot;ef\u0026quot;) = 2.  Examples If we have an OpenAPI definition doc productAPI-v2.yaml in directory serviceB, it will look like this:\nopenapi:3.0.0info:description:OpenAPI definition for SkyWalking test.version:v2title:Product APItags:- name:productdescription:product- name:relatedProductsdescription:Related Productspaths:/products:get:tags:- productsummary:Get all products listdescription:Get all products list.operationId:getProductsresponses:\u0026#34;200\u0026#34;:description:Successcontent:application/json:schema:type:arrayitems:$ref:\u0026#34;#/components/schemas/Product\u0026#34;/products/{region}/{country}:get:tags:- productsummary:Get products regionaldescription:Get products regional with the given id.operationId:getProductRegionalparameters:- name:regionin:pathdescription:Products regionrequired:trueschema:type:string- name:countryin:pathdescription:Products countryrequired:trueschema:type:stringresponses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/Product\u0026#34;\u0026#34;400\u0026#34;:description:Invalid parameters supplied/products/{id}:get:tags:- productsummary:Get product detailsdescription:Get product details with the given id.operationId:getProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/ProductDetails\u0026#34;\u0026#34;400\u0026#34;:description:Invalid product idpost:tags:- productsummary:Update product detailsdescription:Update product details with the given id.operationId:updateProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64- name:namein:querydescription:Product namerequired:trueschema:type:stringresponses:\u0026#34;200\u0026#34;:description:successful operationdelete:tags:- productsummary:Delete product detailsdescription:Delete product details with the given id.operationId:deleteProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operation/products/{id}/relatedProducts:get:tags:- relatedProductssummary:Get related productsdescription:Get related products with the given product id.operationId:getRelatedProductsparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/RelatedProducts\u0026#34;\u0026#34;400\u0026#34;:description:Invalid product idcomponents:schemas:Product:type:objectdescription:Product id and nameproperties:id:type:integerformat:int64description:Product idname:type:stringdescription:Product namerequired:- id- nameProductDetails:type:objectdescription:Product detailsproperties:id:type:integerformat:int64description:Product idname:type:stringdescription:Product namedescription:type:stringdescription:Product descriptionrequired:- id- nameRelatedProducts:type:objectdescription:Related Productsproperties:id:type:integerformat:int32description:Product idrelatedProducts:type:arraydescription:List of related productsitems:$ref:\u0026#34;#/components/schemas/Product\u0026#34;Here are some use cases:\n   Incoming Endpoint Incoming Service x-sw-service-name x-sw-endpoint-name-match-rule x-sw-endpoint-name-format Matched Grouping Result     GET:/products serviceB default default default true GET:/products   GET:/products/asia/cn serviceB default default default true GET:/products/{region}/{country}   GET:/products/123 serviceB default default default true GET:/products{id}   GET:/products/123/abc/efg serviceB default default default false GET:/products/123/abc/efg   \u0026lt;GET\u0026gt;:/products/123 serviceB default default default false \u0026lt;GET\u0026gt;:/products/123   GET:/products/123 serviceC default default default false GET:/products/123   GET:/products/123 serviceC serviceC default default true GET:/products/123   \u0026lt;GET\u0026gt;:/products/123 serviceB default \u0026lt;${METHOD}\u0026gt;:${PATH} \u0026lt;${METHOD}\u0026gt;:${PATH} true \u0026lt;GET\u0026gt;:/products/{id}   GET:/products/123 serviceB default default ${PATH}:\u0026lt;${METHOD}\u0026gt; true /products/{id}:\u0026lt;GET\u0026gt;   /products/123:\u0026lt;GET\u0026gt; serviceB default ${PATH}:\u0026lt;${METHOD}\u0026gt; default true GET:/products/{id}    Initialize and update the OpenAPI definitions dynamically Use Dynamic Configuration to initialize and update OpenAPI definitions, the endpoint grouping rules from OpenAPI will re-create by the new config.\nEndpoint name grouping by custom configuration Currently, a user could set up grouping rules through the static YAML file named endpoint-name-grouping.yml, or use Dynamic Configuration to initialize and update endpoint grouping rules.\nConfiguration Format Both the static local file and dynamic configuration value share the same YAML format.\ngrouping:# Endpoint of the service would follow the following rules- service-name:serviceArules:# {var} represents any variable string in the URI.- /prod/{var}","excerpt":"Group Parameterized Endpoints In most cases, endpoints are detected automatically through language …","ref":"/docs/main/latest/en/setup/backend/endpoint-grouping-rules/","title":"Group Parameterized Endpoints"},{"body":"Group Parameterized Endpoints In most cases, endpoints are detected automatically through language agents, service mesh observability solutions, or meter system configurations.\nThere are some special cases, especially when REST-style URI is used, where the application codes include the parameter in the endpoint name, such as putting order ID in the URI. Examples are /prod/ORDER123 and /prod/ORDER456. But logically, most would expect to have an endpoint name like prod/{order-id}. This is a specially designed feature in parameterized endpoint grouping.\nIf the incoming endpoint name accords with the rules, SkyWalking will group the endpoint by rules.\nThere are two approaches in which SkyWalking supports endpoint grouping:\n Endpoint name grouping by OpenAPI definitions. Endpoint name grouping by custom configurations.  Both grouping approaches can work together in sequence.\nEndpoint name grouping by OpenAPI definitions The OpenAPI definitions are documents based on the OpenAPI Specification (OAS), which is used to define a standard, language-agnostic interface for HTTP APIs.\nSkyWalking now supports OAS v2.0+. It could parse the documents (yaml) and build grouping rules from them automatically.\nHow to use   Add Specification Extensions for SkyWalking config in the OpenAPI definition documents; otherwise, all configs are default:\n${METHOD} is a reserved placeholder which represents the HTTP method, e.g. POST/GET... . ${PATH} is a reserved placeholder which represents the path, e.g. /products/{id}.\n   Extension Name Required Description Default Value     x-sw-service-name false The service name to which these endpoints belong. The directory name to which the OpenAPI definition documents belong.   x-sw-endpoint-name-match-rule false The rule used to match the endpoint. ${METHOD}:${PATH}   x-sw-endpoint-name-format false The endpoint name after grouping. ${METHOD}:${PATH}    These extensions are under OpenAPI Object. For example, the document below has a full custom config:\n  openapi:3.0.0x-sw-service-name:serviceBx-sw-endpoint-name-match-rule:\u0026#34;${METHOD}:${PATH}\u0026#34;x-sw-endpoint-name-format:\u0026#34;${METHOD}:${PATH}\u0026#34;info:description:OpenAPI definition for SkyWalking test.version:v2title:Product API...We highly recommend using the default config. The custom config (x-sw-endpoint-name-match-rule/x-sw-endpoint-name-format) is considered part of the match rules (regex pattern). We have provided some use cases in org.apache.skywalking.oap.server.core.config.group.openapi.EndpointGroupingRuleReader4OpenapiTest. You may validate your custom config as well.\nAll OpenAPI definition documents are located in the openapi-definitions directory, with directories having at most two levels. We recommend using the service name as the subDirectory name, as you will then not be required to set x-sw-service-name. For example:  ├── openapi-definitions │ ├── serviceA │ │ ├── customerAPI-v1.yaml │ │ └── productAPI-v1.yaml │ └── serviceB │ └── productAPI-v2.yaml The feature is enabled by default. You can disable it by setting the Core Module configuration ${SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI:false}.  Rules match priority We recommend designing the API path as clearly as possible. If the API path is fuzzy and an endpoint name matches multiple paths, SkyWalking would select a path according to the match priority set out below:\n The exact path is matched. E.g. /products or /products/inventory The path with fewer variables. E.g. In the case of /products/{var1}/{var2} and /products/{var1}/abc, endpoint name /products/123/abc will match the second one. If the paths have the same number of variables, the longest path is matched, and the vars are considered to be 1. E.g. In the case of /products/abc/{var1} and products/{var12345}/ef, endpoint name /products/abc/ef will match the first one, because length(\u0026quot;abc\u0026quot;) = 3 is larger than length(\u0026quot;ef\u0026quot;) = 2.  Examples If we have an OpenAPI definition doc productAPI-v2.yaml in directory serviceB, it will look like this:\nopenapi:3.0.0info:description:OpenAPI definition for SkyWalking test.version:v2title:Product APItags:- name:productdescription:product- name:relatedProductsdescription:Related Productspaths:/products:get:tags:- productsummary:Get all products listdescription:Get all products list.operationId:getProductsresponses:\u0026#34;200\u0026#34;:description:Successcontent:application/json:schema:type:arrayitems:$ref:\u0026#34;#/components/schemas/Product\u0026#34;/products/{region}/{country}:get:tags:- productsummary:Get products regionaldescription:Get products regional with the given id.operationId:getProductRegionalparameters:- name:regionin:pathdescription:Products regionrequired:trueschema:type:string- name:countryin:pathdescription:Products countryrequired:trueschema:type:stringresponses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/Product\u0026#34;\u0026#34;400\u0026#34;:description:Invalid parameters supplied/products/{id}:get:tags:- productsummary:Get product detailsdescription:Get product details with the given id.operationId:getProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/ProductDetails\u0026#34;\u0026#34;400\u0026#34;:description:Invalid product idpost:tags:- productsummary:Update product detailsdescription:Update product details with the given id.operationId:updateProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64- name:namein:querydescription:Product namerequired:trueschema:type:stringresponses:\u0026#34;200\u0026#34;:description:successful operationdelete:tags:- productsummary:Delete product detailsdescription:Delete product details with the given id.operationId:deleteProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operation/products/{id}/relatedProducts:get:tags:- relatedProductssummary:Get related productsdescription:Get related products with the given product id.operationId:getRelatedProductsparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/RelatedProducts\u0026#34;\u0026#34;400\u0026#34;:description:Invalid product idcomponents:schemas:Product:type:objectdescription:Product id and nameproperties:id:type:integerformat:int64description:Product idname:type:stringdescription:Product namerequired:- id- nameProductDetails:type:objectdescription:Product detailsproperties:id:type:integerformat:int64description:Product idname:type:stringdescription:Product namedescription:type:stringdescription:Product descriptionrequired:- id- nameRelatedProducts:type:objectdescription:Related Productsproperties:id:type:integerformat:int32description:Product idrelatedProducts:type:arraydescription:List of related productsitems:$ref:\u0026#34;#/components/schemas/Product\u0026#34;Here are some use cases:\n   Incoming Endpoint Incoming Service x-sw-service-name x-sw-endpoint-name-match-rule x-sw-endpoint-name-format Matched Grouping Result     GET:/products serviceB default default default true GET:/products   GET:/products/asia/cn serviceB default default default true GET:/products/{region}/{country}   GET:/products/123 serviceB default default default true GET:/products{id}   GET:/products/123/abc/efg serviceB default default default false GET:/products/123/abc/efg   \u0026lt;GET\u0026gt;:/products/123 serviceB default default default false \u0026lt;GET\u0026gt;:/products/123   GET:/products/123 serviceC default default default false GET:/products/123   GET:/products/123 serviceC serviceC default default true GET:/products/123   \u0026lt;GET\u0026gt;:/products/123 serviceB default \u0026lt;${METHOD}\u0026gt;:${PATH} \u0026lt;${METHOD}\u0026gt;:${PATH} true \u0026lt;GET\u0026gt;:/products/{id}   GET:/products/123 serviceB default default ${PATH}:\u0026lt;${METHOD}\u0026gt; true /products/{id}:\u0026lt;GET\u0026gt;   /products/123:\u0026lt;GET\u0026gt; serviceB default ${PATH}:\u0026lt;${METHOD}\u0026gt; default true GET:/products/{id}    Initialize and update the OpenAPI definitions dynamically Use Dynamic Configuration to initialize and update OpenAPI definitions, the endpoint grouping rules from OpenAPI will re-create by the new config.\nEndpoint name grouping by custom configuration Currently, a user could set up grouping rules through the static YAML file named endpoint-name-grouping.yml, or use Dynamic Configuration to initialize and update endpoint grouping rules.\nConfiguration Format Both the static local file and dynamic configuration value share the same YAML format.\ngrouping:# Endpoint of the service would follow the following rules- service-name:serviceArules:# {var} represents any variable string in the URI.- /prod/{var}","excerpt":"Group Parameterized Endpoints In most cases, endpoints are detected automatically through language …","ref":"/docs/main/next/en/setup/backend/endpoint-grouping-rules/","title":"Group Parameterized Endpoints"},{"body":"Group Parameterized Endpoints In most cases, endpoints are detected automatically through language agents, service mesh observability solutions, or meter system configurations.\nThere are some special cases, especially when REST style URI is used, where the application codes include the parameter in the endpoint name, such as putting order ID in the URI. Examples are /prod/ORDER123 and /prod/ORDER456. But logically, most would expect to have an endpoint name like prod/{order-id}. This is a specially designed feature in parameterized endpoint grouping.\nIf the incoming endpoint name accords with the rules, SkyWalking will group the endpoint by rules.\nThere are two approaches in which SkyWalking supports endpoint grouping:\n Endpoint name grouping by OpenAPI definitions. Endpoint name grouping by custom configurations.  Both grouping approaches can work together in sequence.\nEndpoint name grouping by OpenAPI definitions The OpenAPI definitions are documents based on the OpenAPI Specification (OAS), which is used to define a standard, language-agnostic interface for HTTP APIs.\nSkyWalking now supports OAS v2.0+. It could parse the documents (yaml) and build grouping rules from them automatically.\nHow to use   Add Specification Extensions for SkyWalking config in the OpenAPI definition documents; otherwise, all configs are default:\n${METHOD} is a reserved placeholder which represents the HTTP method, e.g. POST/GET... . ${PATH} is a reserved placeholder which represents the path, e.g. /products/{id}.\n   Extension Name Required Description Default Value     x-sw-service-name false The service name to which these endpoints belong. The directory name to which the OpenAPI definition documents belong.   x-sw-endpoint-name-match-rule false The rule used to match the endpoint. ${METHOD}:${PATH}   x-sw-endpoint-name-format false The endpoint name after grouping. ${METHOD}:${PATH}    These extensions are under OpenAPI Object. For example, the document below has a full custom config:\n  openapi:3.0.0x-sw-service-name:serviceBx-sw-endpoint-name-match-rule:\u0026#34;${METHOD}:${PATH}\u0026#34;x-sw-endpoint-name-format:\u0026#34;${METHOD}:${PATH}\u0026#34;info:description:OpenAPI definition for SkyWalking test.version:v2title:Product API...We highly recommend using the default config. The custom config (x-sw-endpoint-name-match-rule/x-sw-endpoint-name-format) is considered part of the match rules (regex pattern). We have provided some use cases in org.apache.skywalking.oap.server.core.config.group.openapi.EndpointGroupingRuleReader4OpenapiTest. You may validate your custom config as well.\nAll OpenAPI definition documents are located in the openapi-definitions directory, with directories having at most two levels. We recommend using the service name as the subDirectory name, as you will then not be required to set x-sw-service-name. For example:  ├── openapi-definitions │ ├── serviceA │ │ ├── customerAPI-v1.yaml │ │ └── productAPI-v1.yaml │ └── serviceB │ └── productAPI-v2.yaml The feature is enabled by default. You can disable it by setting the Core Module configuration ${SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPAENAPI:false}.  Rules match priority We recommend designing the API path as clearly as possible. If the API path is fuzzy and an endpoint name matches multiple paths, SkyWalking would select a path according to the match priority set out below:\n The exact path being matched. E.g. /products or /products/inventory The path which has less variables. E.g. In the case of /products/{var1}/{var2} and /products/{var1}/abc, endpoint name /products/123/abc will match the second one. If the paths have the same number of variables, the longest path is matched, and the vars are considered to be 1. E.g. In the case of /products/abc/{var1} and products/{var12345}/ef, endpoint name /products/abc/ef will match the first one, because length(\u0026quot;abc\u0026quot;) = 3 is larger than length(\u0026quot;ef\u0026quot;) = 2.  Examples If we have an OpenAPI definition doc productAPI-v2.yaml in directory serviceB, it will look like this:\nopenapi:3.0.0info:description:OpenAPI definition for SkyWalking test.version:v2title:Product APItags:- name:productdescription:product- name:relatedProductsdescription:Related Productspaths:/products:get:tags:- productsummary:Get all products listdescription:Get all products list.operationId:getProductsresponses:\u0026#34;200\u0026#34;:description:Successcontent:application/json:schema:type:arrayitems:$ref:\u0026#34;#/components/schemas/Product\u0026#34;/products/{region}/{country}:get:tags:- productsummary:Get products regionaldescription:Get products regional with the given id.operationId:getProductRegionalparameters:- name:regionin:pathdescription:Products regionrequired:trueschema:type:string- name:countryin:pathdescription:Products countryrequired:trueschema:type:stringresponses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/Product\u0026#34;\u0026#34;400\u0026#34;:description:Invalid parameters supplied/products/{id}:get:tags:- productsummary:Get product detailsdescription:Get product details with the given id.operationId:getProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/ProductDetails\u0026#34;\u0026#34;400\u0026#34;:description:Invalid product idpost:tags:- productsummary:Update product detailsdescription:Update product details with the given id.operationId:updateProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64- name:namein:querydescription:Product namerequired:trueschema:type:stringresponses:\u0026#34;200\u0026#34;:description:successful operationdelete:tags:- productsummary:Delete product detailsdescription:Delete product details with the given id.operationId:deleteProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operation/products/{id}/relatedProducts:get:tags:- relatedProductssummary:Get related productsdescription:Get related products with the given product id.operationId:getRelatedProductsparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/RelatedProducts\u0026#34;\u0026#34;400\u0026#34;:description:Invalid product idcomponents:schemas:Product:type:objectdescription:Product id and nameproperties:id:type:integerformat:int64description:Product idname:type:stringdescription:Product namerequired:- id- nameProductDetails:type:objectdescription:Product detailsproperties:id:type:integerformat:int64description:Product idname:type:stringdescription:Product namedescription:type:stringdescription:Product descriptionrequired:- id- nameRelatedProducts:type:objectdescription:Related Productsproperties:id:type:integerformat:int32description:Product idrelatedProducts:type:arraydescription:List of related productsitems:$ref:\u0026#34;#/components/schemas/Product\u0026#34;Here are some use cases:\n   Incoming Endpiont Incoming Service x-sw-service-name x-sw-endpoint-name-match-rule x-sw-endpoint-name-format Matched Grouping Result     GET:/products serviceB default default default true GET:/products   GET:/products/123 serviceB default default default true GET:/products{id}   GET:/products/asia/cn serviceB default default default true GET:/products/{region}/{country}   GET:/products/123/abc/efg serviceB default default default false GET:/products/123/abc/efg   \u0026lt;GET\u0026gt;:/products/123 serviceB default default default false \u0026lt;GET\u0026gt;:/products/123   GET:/products/123 serviceC default default default false GET:/products/123   GET:/products/123 serviceC serviceC default default true GET:/products/123   \u0026lt;GET\u0026gt;:/products/123 serviceB default \u0026lt;${METHOD}\u0026gt;:${PATH} \u0026lt;${METHOD}\u0026gt;:${PATH} true \u0026lt;GET\u0026gt;:/products/{id}   GET:/products/123 serviceB default default ${PATH}:\u0026lt;${METHOD}\u0026gt; true /products/{id}:\u0026lt;GET\u0026gt;   /products/123:\u0026lt;GET\u0026gt; serviceB default ${PATH}:\u0026lt;${METHOD}\u0026gt; default true GET:/products/{id}    Initialize and update the OpenAPI definitions dynamically Use Dynamic Configuration to initialize and update OpenAPI definitions, the endpoint grouping rules from OpenAPI will re-create by new config.\nEndpoint name grouping by custom configuration Currently, a user could set up grouping rules through the static YAML file named endpoint-name-grouping.yml, or use Dynamic Configuration to initialize and update endpoint grouping rules.\nConfiguration Format Both the static local file and dynamic configuration value share the same YAML format.\ngrouping:# Endpoint of the service would follow the following rules- service-name:serviceArules:# Logic name when the regex expression matched.- endpoint-name:/prod/{id}regex:\\/prod\\/.+","excerpt":"Group Parameterized Endpoints In most cases, endpoints are detected automatically through language …","ref":"/docs/main/v9.0.0/en/setup/backend/endpoint-grouping-rules/","title":"Group Parameterized Endpoints"},{"body":"Group Parameterized Endpoints In most cases, endpoints are detected automatically through language agents, service mesh observability solutions, or meter system configurations.\nThere are some special cases, especially when REST-style URI is used, where the application codes include the parameter in the endpoint name, such as putting order ID in the URI. Examples are /prod/ORDER123 and /prod/ORDER456. But logically, most would expect to have an endpoint name like prod/{order-id}. This is a specially designed feature in parameterized endpoint grouping.\nIf the incoming endpoint name accords with the rules, SkyWalking will group the endpoint by rules.\nThere are two approaches in which SkyWalking supports endpoint grouping:\n Endpoint name grouping by OpenAPI definitions. Endpoint name grouping by custom configurations.  Both grouping approaches can work together in sequence.\nEndpoint name grouping by OpenAPI definitions The OpenAPI definitions are documents based on the OpenAPI Specification (OAS), which is used to define a standard, language-agnostic interface for HTTP APIs.\nSkyWalking now supports OAS v2.0+. It could parse the documents (yaml) and build grouping rules from them automatically.\nHow to use   Add Specification Extensions for SkyWalking config in the OpenAPI definition documents; otherwise, all configs are default:\n${METHOD} is a reserved placeholder which represents the HTTP method, e.g. POST/GET... . ${PATH} is a reserved placeholder which represents the path, e.g. /products/{id}.\n   Extension Name Required Description Default Value     x-sw-service-name false The service name to which these endpoints belong. The directory name to which the OpenAPI definition documents belong.   x-sw-endpoint-name-match-rule false The rule used to match the endpoint. ${METHOD}:${PATH}   x-sw-endpoint-name-format false The endpoint name after grouping. ${METHOD}:${PATH}    These extensions are under OpenAPI Object. For example, the document below has a full custom config:\n  openapi:3.0.0x-sw-service-name:serviceBx-sw-endpoint-name-match-rule:\u0026#34;${METHOD}:${PATH}\u0026#34;x-sw-endpoint-name-format:\u0026#34;${METHOD}:${PATH}\u0026#34;info:description:OpenAPI definition for SkyWalking test.version:v2title:Product API...We highly recommend using the default config. The custom config (x-sw-endpoint-name-match-rule/x-sw-endpoint-name-format) is considered part of the match rules (regex pattern). We have provided some use cases in org.apache.skywalking.oap.server.core.config.group.openapi.EndpointGroupingRuleReader4OpenapiTest. You may validate your custom config as well.\nAll OpenAPI definition documents are located in the openapi-definitions directory, with directories having at most two levels. We recommend using the service name as the subDirectory name, as you will then not be required to set x-sw-service-name. For example:  ├── openapi-definitions │ ├── serviceA │ │ ├── customerAPI-v1.yaml │ │ └── productAPI-v1.yaml │ └── serviceB │ └── productAPI-v2.yaml The feature is enabled by default. You can disable it by setting the Core Module configuration ${SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPAENAPI:false}.  Rules match priority We recommend designing the API path as clearly as possible. If the API path is fuzzy and an endpoint name matches multiple paths, SkyWalking would select a path according to the match priority set out below:\n The exact path is matched. E.g. /products or /products/inventory The path with fewer variables. E.g. In the case of /products/{var1}/{var2} and /products/{var1}/abc, endpoint name /products/123/abc will match the second one. If the paths have the same number of variables, the longest path is matched, and the vars are considered to be 1. E.g. In the case of /products/abc/{var1} and products/{var12345}/ef, endpoint name /products/abc/ef will match the first one, because length(\u0026quot;abc\u0026quot;) = 3 is larger than length(\u0026quot;ef\u0026quot;) = 2.  Examples If we have an OpenAPI definition doc productAPI-v2.yaml in directory serviceB, it will look like this:\nopenapi:3.0.0info:description:OpenAPI definition for SkyWalking test.version:v2title:Product APItags:- name:productdescription:product- name:relatedProductsdescription:Related Productspaths:/products:get:tags:- productsummary:Get all products listdescription:Get all products list.operationId:getProductsresponses:\u0026#34;200\u0026#34;:description:Successcontent:application/json:schema:type:arrayitems:$ref:\u0026#34;#/components/schemas/Product\u0026#34;/products/{region}/{country}:get:tags:- productsummary:Get products regionaldescription:Get products regional with the given id.operationId:getProductRegionalparameters:- name:regionin:pathdescription:Products regionrequired:trueschema:type:string- name:countryin:pathdescription:Products countryrequired:trueschema:type:stringresponses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/Product\u0026#34;\u0026#34;400\u0026#34;:description:Invalid parameters supplied/products/{id}:get:tags:- productsummary:Get product detailsdescription:Get product details with the given id.operationId:getProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/ProductDetails\u0026#34;\u0026#34;400\u0026#34;:description:Invalid product idpost:tags:- productsummary:Update product detailsdescription:Update product details with the given id.operationId:updateProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64- name:namein:querydescription:Product namerequired:trueschema:type:stringresponses:\u0026#34;200\u0026#34;:description:successful operationdelete:tags:- productsummary:Delete product detailsdescription:Delete product details with the given id.operationId:deleteProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operation/products/{id}/relatedProducts:get:tags:- relatedProductssummary:Get related productsdescription:Get related products with the given product id.operationId:getRelatedProductsparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/RelatedProducts\u0026#34;\u0026#34;400\u0026#34;:description:Invalid product idcomponents:schemas:Product:type:objectdescription:Product id and nameproperties:id:type:integerformat:int64description:Product idname:type:stringdescription:Product namerequired:- id- nameProductDetails:type:objectdescription:Product detailsproperties:id:type:integerformat:int64description:Product idname:type:stringdescription:Product namedescription:type:stringdescription:Product descriptionrequired:- id- nameRelatedProducts:type:objectdescription:Related Productsproperties:id:type:integerformat:int32description:Product idrelatedProducts:type:arraydescription:List of related productsitems:$ref:\u0026#34;#/components/schemas/Product\u0026#34;Here are some use cases:\n   Incoming Endpiont Incoming Service x-sw-service-name x-sw-endpoint-name-match-rule x-sw-endpoint-name-format Matched Grouping Result     GET:/products serviceB default default default true GET:/products   GET:/products/123 serviceB default default default true GET:/products{id}   GET:/products/asia/cn serviceB default default default true GET:/products/{region}/{country}   GET:/products/123/abc/efg serviceB default default default false GET:/products/123/abc/efg   \u0026lt;GET\u0026gt;:/products/123 serviceB default default default false \u0026lt;GET\u0026gt;:/products/123   GET:/products/123 serviceC default default default false GET:/products/123   GET:/products/123 serviceC serviceC default default true GET:/products/123   \u0026lt;GET\u0026gt;:/products/123 serviceB default \u0026lt;${METHOD}\u0026gt;:${PATH} \u0026lt;${METHOD}\u0026gt;:${PATH} true \u0026lt;GET\u0026gt;:/products/{id}   GET:/products/123 serviceB default default ${PATH}:\u0026lt;${METHOD}\u0026gt; true /products/{id}:\u0026lt;GET\u0026gt;   /products/123:\u0026lt;GET\u0026gt; serviceB default ${PATH}:\u0026lt;${METHOD}\u0026gt; default true GET:/products/{id}    Initialize and update the OpenAPI definitions dynamically Use Dynamic Configuration to initialize and update OpenAPI definitions, the endpoint grouping rules from OpenAPI will re-create by the new config.\nEndpoint name grouping by custom configuration Currently, a user could set up grouping rules through the static YAML file named endpoint-name-grouping.yml, or use Dynamic Configuration to initialize and update endpoint grouping rules.\nConfiguration Format Both the static local file and dynamic configuration value share the same YAML format.\ngrouping:# Endpoint of the service would follow the following rules- service-name:serviceArules:# Logic name when the regex expression matched.- endpoint-name:/prod/{id}regex:\\/prod\\/.+","excerpt":"Group Parameterized Endpoints In most cases, endpoints are detected automatically through language …","ref":"/docs/main/v9.1.0/en/setup/backend/endpoint-grouping-rules/","title":"Group Parameterized Endpoints"},{"body":"Group Parameterized Endpoints In most cases, endpoints are detected automatically through language agents, service mesh observability solutions, or meter system configurations.\nThere are some special cases, especially when REST-style URI is used, where the application codes include the parameter in the endpoint name, such as putting order ID in the URI. Examples are /prod/ORDER123 and /prod/ORDER456. But logically, most would expect to have an endpoint name like prod/{order-id}. This is a specially designed feature in parameterized endpoint grouping.\nIf the incoming endpoint name accords with the rules, SkyWalking will group the endpoint by rules.\nThere are two approaches in which SkyWalking supports endpoint grouping:\n Endpoint name grouping by OpenAPI definitions. Endpoint name grouping by custom configurations.  Both grouping approaches can work together in sequence.\nEndpoint name grouping by OpenAPI definitions The OpenAPI definitions are documents based on the OpenAPI Specification (OAS), which is used to define a standard, language-agnostic interface for HTTP APIs.\nSkyWalking now supports OAS v2.0+. It could parse the documents (yaml) and build grouping rules from them automatically.\nHow to use   Add Specification Extensions for SkyWalking config in the OpenAPI definition documents; otherwise, all configs are default:\n${METHOD} is a reserved placeholder which represents the HTTP method, e.g. POST/GET... . ${PATH} is a reserved placeholder which represents the path, e.g. /products/{id}.\n   Extension Name Required Description Default Value     x-sw-service-name false The service name to which these endpoints belong. The directory name to which the OpenAPI definition documents belong.   x-sw-endpoint-name-match-rule false The rule used to match the endpoint. ${METHOD}:${PATH}   x-sw-endpoint-name-format false The endpoint name after grouping. ${METHOD}:${PATH}    These extensions are under OpenAPI Object. For example, the document below has a full custom config:\n  openapi:3.0.0x-sw-service-name:serviceBx-sw-endpoint-name-match-rule:\u0026#34;${METHOD}:${PATH}\u0026#34;x-sw-endpoint-name-format:\u0026#34;${METHOD}:${PATH}\u0026#34;info:description:OpenAPI definition for SkyWalking test.version:v2title:Product API...We highly recommend using the default config. The custom config (x-sw-endpoint-name-match-rule/x-sw-endpoint-name-format) is considered part of the match rules (regex pattern). We have provided some use cases in org.apache.skywalking.oap.server.core.config.group.openapi.EndpointGroupingRuleReader4OpenapiTest. You may validate your custom config as well.\nAll OpenAPI definition documents are located in the openapi-definitions directory, with directories having at most two levels. We recommend using the service name as the subDirectory name, as you will then not be required to set x-sw-service-name. For example:  ├── openapi-definitions │ ├── serviceA │ │ ├── customerAPI-v1.yaml │ │ └── productAPI-v1.yaml │ └── serviceB │ └── productAPI-v2.yaml The feature is enabled by default. You can disable it by setting the Core Module configuration ${SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPAENAPI:false}.  Rules match priority We recommend designing the API path as clearly as possible. If the API path is fuzzy and an endpoint name matches multiple paths, SkyWalking would select a path according to the match priority set out below:\n The exact path is matched. E.g. /products or /products/inventory The path with fewer variables. E.g. In the case of /products/{var1}/{var2} and /products/{var1}/abc, endpoint name /products/123/abc will match the second one. If the paths have the same number of variables, the longest path is matched, and the vars are considered to be 1. E.g. In the case of /products/abc/{var1} and products/{var12345}/ef, endpoint name /products/abc/ef will match the first one, because length(\u0026quot;abc\u0026quot;) = 3 is larger than length(\u0026quot;ef\u0026quot;) = 2.  Examples If we have an OpenAPI definition doc productAPI-v2.yaml in directory serviceB, it will look like this:\nopenapi:3.0.0info:description:OpenAPI definition for SkyWalking test.version:v2title:Product APItags:- name:productdescription:product- name:relatedProductsdescription:Related Productspaths:/products:get:tags:- productsummary:Get all products listdescription:Get all products list.operationId:getProductsresponses:\u0026#34;200\u0026#34;:description:Successcontent:application/json:schema:type:arrayitems:$ref:\u0026#34;#/components/schemas/Product\u0026#34;/products/{region}/{country}:get:tags:- productsummary:Get products regionaldescription:Get products regional with the given id.operationId:getProductRegionalparameters:- name:regionin:pathdescription:Products regionrequired:trueschema:type:string- name:countryin:pathdescription:Products countryrequired:trueschema:type:stringresponses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/Product\u0026#34;\u0026#34;400\u0026#34;:description:Invalid parameters supplied/products/{id}:get:tags:- productsummary:Get product detailsdescription:Get product details with the given id.operationId:getProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/ProductDetails\u0026#34;\u0026#34;400\u0026#34;:description:Invalid product idpost:tags:- productsummary:Update product detailsdescription:Update product details with the given id.operationId:updateProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64- name:namein:querydescription:Product namerequired:trueschema:type:stringresponses:\u0026#34;200\u0026#34;:description:successful operationdelete:tags:- productsummary:Delete product detailsdescription:Delete product details with the given id.operationId:deleteProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operation/products/{id}/relatedProducts:get:tags:- relatedProductssummary:Get related productsdescription:Get related products with the given product id.operationId:getRelatedProductsparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/RelatedProducts\u0026#34;\u0026#34;400\u0026#34;:description:Invalid product idcomponents:schemas:Product:type:objectdescription:Product id and nameproperties:id:type:integerformat:int64description:Product idname:type:stringdescription:Product namerequired:- id- nameProductDetails:type:objectdescription:Product detailsproperties:id:type:integerformat:int64description:Product idname:type:stringdescription:Product namedescription:type:stringdescription:Product descriptionrequired:- id- nameRelatedProducts:type:objectdescription:Related Productsproperties:id:type:integerformat:int32description:Product idrelatedProducts:type:arraydescription:List of related productsitems:$ref:\u0026#34;#/components/schemas/Product\u0026#34;Here are some use cases:\n   Incoming Endpoint Incoming Service x-sw-service-name x-sw-endpoint-name-match-rule x-sw-endpoint-name-format Matched Grouping Result     GET:/products serviceB default default default true GET:/products   GET:/products/123 serviceB default default default true GET:/products{id}   GET:/products/asia/cn serviceB default default default true GET:/products/{region}/{country}   GET:/products/123/abc/efg serviceB default default default false GET:/products/123/abc/efg   \u0026lt;GET\u0026gt;:/products/123 serviceB default default default false \u0026lt;GET\u0026gt;:/products/123   GET:/products/123 serviceC default default default false GET:/products/123   GET:/products/123 serviceC serviceC default default true GET:/products/123   \u0026lt;GET\u0026gt;:/products/123 serviceB default \u0026lt;${METHOD}\u0026gt;:${PATH} \u0026lt;${METHOD}\u0026gt;:${PATH} true \u0026lt;GET\u0026gt;:/products/{id}   GET:/products/123 serviceB default default ${PATH}:\u0026lt;${METHOD}\u0026gt; true /products/{id}:\u0026lt;GET\u0026gt;   /products/123:\u0026lt;GET\u0026gt; serviceB default ${PATH}:\u0026lt;${METHOD}\u0026gt; default true GET:/products/{id}    Initialize and update the OpenAPI definitions dynamically Use Dynamic Configuration to initialize and update OpenAPI definitions, the endpoint grouping rules from OpenAPI will re-create by the new config.\nEndpoint name grouping by custom configuration Currently, a user could set up grouping rules through the static YAML file named endpoint-name-grouping.yml, or use Dynamic Configuration to initialize and update endpoint grouping rules.\nConfiguration Format Both the static local file and dynamic configuration value share the same YAML format.\ngrouping:# Endpoint of the service would follow the following rules- service-name:serviceArules:# Logic name when the regex expression matched.- endpoint-name:/prod/{id}regex:\\/prod\\/.+","excerpt":"Group Parameterized Endpoints In most cases, endpoints are detected automatically through language …","ref":"/docs/main/v9.2.0/en/setup/backend/endpoint-grouping-rules/","title":"Group Parameterized Endpoints"},{"body":"Group Parameterized Endpoints In most cases, endpoints are detected automatically through language agents, service mesh observability solutions, or meter system configurations.\nThere are some special cases, especially when REST-style URI is used, where the application codes include the parameter in the endpoint name, such as putting order ID in the URI. Examples are /prod/ORDER123 and /prod/ORDER456. But logically, most would expect to have an endpoint name like prod/{order-id}. This is a specially designed feature in parameterized endpoint grouping.\nIf the incoming endpoint name accords with the rules, SkyWalking will group the endpoint by rules.\nThere are two approaches in which SkyWalking supports endpoint grouping:\n Endpoint name grouping by OpenAPI definitions. Endpoint name grouping by custom configurations.  Both grouping approaches can work together in sequence.\nEndpoint name grouping by OpenAPI definitions The OpenAPI definitions are documents based on the OpenAPI Specification (OAS), which is used to define a standard, language-agnostic interface for HTTP APIs.\nSkyWalking now supports OAS v2.0+. It could parse the documents (yaml) and build grouping rules from them automatically.\nHow to use   Add Specification Extensions for SkyWalking config in the OpenAPI definition documents; otherwise, all configs are default:\n${METHOD} is a reserved placeholder which represents the HTTP method, e.g. POST/GET... . ${PATH} is a reserved placeholder which represents the path, e.g. /products/{id}.\n   Extension Name Required Description Default Value     x-sw-service-name false The service name to which these endpoints belong. The directory name to which the OpenAPI definition documents belong.   x-sw-endpoint-name-match-rule false The rule used to match the endpoint. ${METHOD}:${PATH}   x-sw-endpoint-name-format false The endpoint name after grouping. ${METHOD}:${PATH}    These extensions are under OpenAPI Object. For example, the document below has a full custom config:\n  openapi:3.0.0x-sw-service-name:serviceBx-sw-endpoint-name-match-rule:\u0026#34;${METHOD}:${PATH}\u0026#34;x-sw-endpoint-name-format:\u0026#34;${METHOD}:${PATH}\u0026#34;info:description:OpenAPI definition for SkyWalking test.version:v2title:Product API...We highly recommend using the default config. The custom config (x-sw-endpoint-name-match-rule/x-sw-endpoint-name-format) is considered part of the match rules (regex pattern). We have provided some use cases in org.apache.skywalking.oap.server.core.config.group.openapi.EndpointGroupingRuleReader4OpenapiTest. You may validate your custom config as well.\nAll OpenAPI definition documents are located in the openapi-definitions directory, with directories having at most two levels. We recommend using the service name as the subDirectory name, as you will then not be required to set x-sw-service-name. For example:  ├── openapi-definitions │ ├── serviceA │ │ ├── customerAPI-v1.yaml │ │ └── productAPI-v1.yaml │ └── serviceB │ └── productAPI-v2.yaml The feature is enabled by default. You can disable it by setting the Core Module configuration ${SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI:false}.  Rules match priority We recommend designing the API path as clearly as possible. If the API path is fuzzy and an endpoint name matches multiple paths, SkyWalking would select a path according to the match priority set out below:\n The exact path is matched. E.g. /products or /products/inventory The path with fewer variables. E.g. In the case of /products/{var1}/{var2} and /products/{var1}/abc, endpoint name /products/123/abc will match the second one. If the paths have the same number of variables, the longest path is matched, and the vars are considered to be 1. E.g. In the case of /products/abc/{var1} and products/{var12345}/ef, endpoint name /products/abc/ef will match the first one, because length(\u0026quot;abc\u0026quot;) = 3 is larger than length(\u0026quot;ef\u0026quot;) = 2.  Examples If we have an OpenAPI definition doc productAPI-v2.yaml in directory serviceB, it will look like this:\nopenapi:3.0.0info:description:OpenAPI definition for SkyWalking test.version:v2title:Product APItags:- name:productdescription:product- name:relatedProductsdescription:Related Productspaths:/products:get:tags:- productsummary:Get all products listdescription:Get all products list.operationId:getProductsresponses:\u0026#34;200\u0026#34;:description:Successcontent:application/json:schema:type:arrayitems:$ref:\u0026#34;#/components/schemas/Product\u0026#34;/products/{region}/{country}:get:tags:- productsummary:Get products regionaldescription:Get products regional with the given id.operationId:getProductRegionalparameters:- name:regionin:pathdescription:Products regionrequired:trueschema:type:string- name:countryin:pathdescription:Products countryrequired:trueschema:type:stringresponses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/Product\u0026#34;\u0026#34;400\u0026#34;:description:Invalid parameters supplied/products/{id}:get:tags:- productsummary:Get product detailsdescription:Get product details with the given id.operationId:getProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/ProductDetails\u0026#34;\u0026#34;400\u0026#34;:description:Invalid product idpost:tags:- productsummary:Update product detailsdescription:Update product details with the given id.operationId:updateProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64- name:namein:querydescription:Product namerequired:trueschema:type:stringresponses:\u0026#34;200\u0026#34;:description:successful operationdelete:tags:- productsummary:Delete product detailsdescription:Delete product details with the given id.operationId:deleteProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operation/products/{id}/relatedProducts:get:tags:- relatedProductssummary:Get related productsdescription:Get related products with the given product id.operationId:getRelatedProductsparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/RelatedProducts\u0026#34;\u0026#34;400\u0026#34;:description:Invalid product idcomponents:schemas:Product:type:objectdescription:Product id and nameproperties:id:type:integerformat:int64description:Product idname:type:stringdescription:Product namerequired:- id- nameProductDetails:type:objectdescription:Product detailsproperties:id:type:integerformat:int64description:Product idname:type:stringdescription:Product namedescription:type:stringdescription:Product descriptionrequired:- id- nameRelatedProducts:type:objectdescription:Related Productsproperties:id:type:integerformat:int32description:Product idrelatedProducts:type:arraydescription:List of related productsitems:$ref:\u0026#34;#/components/schemas/Product\u0026#34;Here are some use cases:\n   Incoming Endpoint Incoming Service x-sw-service-name x-sw-endpoint-name-match-rule x-sw-endpoint-name-format Matched Grouping Result     GET:/products serviceB default default default true GET:/products   GET:/products/123 serviceB default default default true GET:/products{id}   GET:/products/asia/cn serviceB default default default true GET:/products/{region}/{country}   GET:/products/123/abc/efg serviceB default default default false GET:/products/123/abc/efg   \u0026lt;GET\u0026gt;:/products/123 serviceB default default default false \u0026lt;GET\u0026gt;:/products/123   GET:/products/123 serviceC default default default false GET:/products/123   GET:/products/123 serviceC serviceC default default true GET:/products/123   \u0026lt;GET\u0026gt;:/products/123 serviceB default \u0026lt;${METHOD}\u0026gt;:${PATH} \u0026lt;${METHOD}\u0026gt;:${PATH} true \u0026lt;GET\u0026gt;:/products/{id}   GET:/products/123 serviceB default default ${PATH}:\u0026lt;${METHOD}\u0026gt; true /products/{id}:\u0026lt;GET\u0026gt;   /products/123:\u0026lt;GET\u0026gt; serviceB default ${PATH}:\u0026lt;${METHOD}\u0026gt; default true GET:/products/{id}    Initialize and update the OpenAPI definitions dynamically Use Dynamic Configuration to initialize and update OpenAPI definitions, the endpoint grouping rules from OpenAPI will re-create by the new config.\nEndpoint name grouping by custom configuration Currently, a user could set up grouping rules through the static YAML file named endpoint-name-grouping.yml, or use Dynamic Configuration to initialize and update endpoint grouping rules.\nConfiguration Format Both the static local file and dynamic configuration value share the same YAML format.\ngrouping:# Endpoint of the service would follow the following rules- service-name:serviceArules:# Logic name when the regex expression matched.- endpoint-name:/prod/{id}regex:\\/prod\\/.+","excerpt":"Group Parameterized Endpoints In most cases, endpoints are detected automatically through language …","ref":"/docs/main/v9.3.0/en/setup/backend/endpoint-grouping-rules/","title":"Group Parameterized Endpoints"},{"body":"Group Parameterized Endpoints In most cases, endpoints are detected automatically through language agents, service mesh observability solutions, or meter system configurations.\nThere are some special cases, especially when REST-style URI is used, where the application codes include the parameter in the endpoint name, such as putting order ID in the URI. Examples are /prod/ORDER123 and /prod/ORDER456. But logically, most would expect to have an endpoint name like prod/{order-id}. This is a specially designed feature in parameterized endpoint grouping.\nIf the incoming endpoint name accords with the rules, SkyWalking will group the endpoint by rules.\nThere are two approaches in which SkyWalking supports endpoint grouping:\n Endpoint name grouping by OpenAPI definitions. Endpoint name grouping by custom configurations.  Both grouping approaches can work together in sequence.\nEndpoint name grouping by OpenAPI definitions The OpenAPI definitions are documents based on the OpenAPI Specification (OAS), which is used to define a standard, language-agnostic interface for HTTP APIs.\nSkyWalking now supports OAS v2.0+. It could parse the documents (yaml) and build grouping rules from them automatically.\nHow to use   Add Specification Extensions for SkyWalking config in the OpenAPI definition documents; otherwise, all configs are default:\n${METHOD} is a reserved placeholder which represents the HTTP method, e.g. POST/GET... . ${PATH} is a reserved placeholder which represents the path, e.g. /products/{id}.\n   Extension Name Required Description Default Value     x-sw-service-name false The service name to which these endpoints belong. The directory name to which the OpenAPI definition documents belong.   x-sw-endpoint-name-match-rule false The rule used to match the endpoint. ${METHOD}:${PATH}   x-sw-endpoint-name-format false The endpoint name after grouping. ${METHOD}:${PATH}    These extensions are under OpenAPI Object. For example, the document below has a full custom config:\n  openapi:3.0.0x-sw-service-name:serviceBx-sw-endpoint-name-match-rule:\u0026#34;${METHOD}:${PATH}\u0026#34;x-sw-endpoint-name-format:\u0026#34;${METHOD}:${PATH}\u0026#34;info:description:OpenAPI definition for SkyWalking test.version:v2title:Product API...We highly recommend using the default config. The custom config (x-sw-endpoint-name-match-rule/x-sw-endpoint-name-format) is considered part of the match rules (regex pattern). We have provided some use cases in org.apache.skywalking.oap.server.core.config.group.openapi.EndpointGroupingRuleReader4OpenapiTest. You may validate your custom config as well.\nAll OpenAPI definition documents are located in the openapi-definitions directory, with directories having at most two levels. We recommend using the service name as the subDirectory name, as you will then not be required to set x-sw-service-name. For example:  ├── openapi-definitions │ ├── serviceA │ │ ├── customerAPI-v1.yaml │ │ └── productAPI-v1.yaml │ └── serviceB │ └── productAPI-v2.yaml The feature is enabled by default. You can disable it by setting the Core Module configuration ${SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI:false}.  Rules match priority We recommend designing the API path as clearly as possible. If the API path is fuzzy and an endpoint name matches multiple paths, SkyWalking would select a path according to the match priority set out below:\n The exact path is matched. E.g. /products or /products/inventory The path with fewer variables. E.g. In the case of /products/{var1}/{var2} and /products/{var1}/abc, endpoint name /products/123/abc will match the second one. If the paths have the same number of variables, the longest path is matched, and the vars are considered to be 1. E.g. In the case of /products/abc/{var1} and products/{var12345}/ef, endpoint name /products/abc/ef will match the first one, because length(\u0026quot;abc\u0026quot;) = 3 is larger than length(\u0026quot;ef\u0026quot;) = 2.  Examples If we have an OpenAPI definition doc productAPI-v2.yaml in directory serviceB, it will look like this:\nopenapi:3.0.0info:description:OpenAPI definition for SkyWalking test.version:v2title:Product APItags:- name:productdescription:product- name:relatedProductsdescription:Related Productspaths:/products:get:tags:- productsummary:Get all products listdescription:Get all products list.operationId:getProductsresponses:\u0026#34;200\u0026#34;:description:Successcontent:application/json:schema:type:arrayitems:$ref:\u0026#34;#/components/schemas/Product\u0026#34;/products/{region}/{country}:get:tags:- productsummary:Get products regionaldescription:Get products regional with the given id.operationId:getProductRegionalparameters:- name:regionin:pathdescription:Products regionrequired:trueschema:type:string- name:countryin:pathdescription:Products countryrequired:trueschema:type:stringresponses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/Product\u0026#34;\u0026#34;400\u0026#34;:description:Invalid parameters supplied/products/{id}:get:tags:- productsummary:Get product detailsdescription:Get product details with the given id.operationId:getProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/ProductDetails\u0026#34;\u0026#34;400\u0026#34;:description:Invalid product idpost:tags:- productsummary:Update product detailsdescription:Update product details with the given id.operationId:updateProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64- name:namein:querydescription:Product namerequired:trueschema:type:stringresponses:\u0026#34;200\u0026#34;:description:successful operationdelete:tags:- productsummary:Delete product detailsdescription:Delete product details with the given id.operationId:deleteProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operation/products/{id}/relatedProducts:get:tags:- relatedProductssummary:Get related productsdescription:Get related products with the given product id.operationId:getRelatedProductsparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/RelatedProducts\u0026#34;\u0026#34;400\u0026#34;:description:Invalid product idcomponents:schemas:Product:type:objectdescription:Product id and nameproperties:id:type:integerformat:int64description:Product idname:type:stringdescription:Product namerequired:- id- nameProductDetails:type:objectdescription:Product detailsproperties:id:type:integerformat:int64description:Product idname:type:stringdescription:Product namedescription:type:stringdescription:Product descriptionrequired:- id- nameRelatedProducts:type:objectdescription:Related Productsproperties:id:type:integerformat:int32description:Product idrelatedProducts:type:arraydescription:List of related productsitems:$ref:\u0026#34;#/components/schemas/Product\u0026#34;Here are some use cases:\n   Incoming Endpoint Incoming Service x-sw-service-name x-sw-endpoint-name-match-rule x-sw-endpoint-name-format Matched Grouping Result     GET:/products serviceB default default default true GET:/products   GET:/products/123 serviceB default default default true GET:/products{id}   GET:/products/asia/cn serviceB default default default true GET:/products/{region}/{country}   GET:/products/123/abc/efg serviceB default default default false GET:/products/123/abc/efg   \u0026lt;GET\u0026gt;:/products/123 serviceB default default default false \u0026lt;GET\u0026gt;:/products/123   GET:/products/123 serviceC default default default false GET:/products/123   GET:/products/123 serviceC serviceC default default true GET:/products/123   \u0026lt;GET\u0026gt;:/products/123 serviceB default \u0026lt;${METHOD}\u0026gt;:${PATH} \u0026lt;${METHOD}\u0026gt;:${PATH} true \u0026lt;GET\u0026gt;:/products/{id}   GET:/products/123 serviceB default default ${PATH}:\u0026lt;${METHOD}\u0026gt; true /products/{id}:\u0026lt;GET\u0026gt;   /products/123:\u0026lt;GET\u0026gt; serviceB default ${PATH}:\u0026lt;${METHOD}\u0026gt; default true GET:/products/{id}    Initialize and update the OpenAPI definitions dynamically Use Dynamic Configuration to initialize and update OpenAPI definitions, the endpoint grouping rules from OpenAPI will re-create by the new config.\nEndpoint name grouping by custom configuration Currently, a user could set up grouping rules through the static YAML file named endpoint-name-grouping.yml, or use Dynamic Configuration to initialize and update endpoint grouping rules.\nConfiguration Format Both the static local file and dynamic configuration value share the same YAML format.\ngrouping:# Endpoint of the service would follow the following rules- service-name:serviceArules:# Logic name when the regex expression matched.- endpoint-name:/prod/{id}regex:\\/prod\\/.+","excerpt":"Group Parameterized Endpoints In most cases, endpoints are detected automatically through language …","ref":"/docs/main/v9.4.0/en/setup/backend/endpoint-grouping-rules/","title":"Group Parameterized Endpoints"},{"body":"Group Parameterized Endpoints In most cases, endpoints are detected automatically through language agents, service mesh observability solutions, or meter system configurations.\nThere are some special cases, especially when REST-style URI is used, where the application codes include the parameter in the endpoint name, such as putting order ID in the URI. Examples are /prod/ORDER123 and /prod/ORDER456. But logically, most would expect to have an endpoint name like prod/{order-id}. This is a specially designed feature in parameterized endpoint grouping.\nIf the incoming endpoint name accords with the rules, SkyWalking will group the endpoint by rules.\nThere are two approaches in which SkyWalking supports endpoint grouping:\n Endpoint name grouping by OpenAPI definitions. Endpoint name grouping by custom configurations.  Both grouping approaches can work together in sequence.\nEndpoint name grouping by OpenAPI definitions The OpenAPI definitions are documents based on the OpenAPI Specification (OAS), which is used to define a standard, language-agnostic interface for HTTP APIs.\nSkyWalking now supports OAS v2.0+. It could parse the documents (yaml) and build grouping rules from them automatically.\nHow to use   Add Specification Extensions for SkyWalking config in the OpenAPI definition documents; otherwise, all configs are default:\n${METHOD} is a reserved placeholder which represents the HTTP method, e.g. POST/GET... . ${PATH} is a reserved placeholder which represents the path, e.g. /products/{id}.\n   Extension Name Required Description Default Value     x-sw-service-name false The service name to which these endpoints belong. The directory name to which the OpenAPI definition documents belong.   x-sw-endpoint-name-match-rule false The rule used to match the endpoint. ${METHOD}:${PATH}   x-sw-endpoint-name-format false The endpoint name after grouping. ${METHOD}:${PATH}    These extensions are under OpenAPI Object. For example, the document below has a full custom config:\n  openapi:3.0.0x-sw-service-name:serviceBx-sw-endpoint-name-match-rule:\u0026#34;${METHOD}:${PATH}\u0026#34;x-sw-endpoint-name-format:\u0026#34;${METHOD}:${PATH}\u0026#34;info:description:OpenAPI definition for SkyWalking test.version:v2title:Product API...We highly recommend using the default config. The custom config (x-sw-endpoint-name-match-rule/x-sw-endpoint-name-format) is considered part of the match rules (regex pattern). We have provided some use cases in org.apache.skywalking.oap.server.core.config.group.openapi.EndpointGroupingRuleReader4OpenapiTest. You may validate your custom config as well.\nAll OpenAPI definition documents are located in the openapi-definitions directory, with directories having at most two levels. We recommend using the service name as the subDirectory name, as you will then not be required to set x-sw-service-name. For example:  ├── openapi-definitions │ ├── serviceA │ │ ├── customerAPI-v1.yaml │ │ └── productAPI-v1.yaml │ └── serviceB │ └── productAPI-v2.yaml The feature is enabled by default. You can disable it by setting the Core Module configuration ${SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI:false}.  Rules match priority We recommend designing the API path as clearly as possible. If the API path is fuzzy and an endpoint name matches multiple paths, SkyWalking would select a path according to the match priority set out below:\n The exact path is matched. E.g. /products or /products/inventory The path with fewer variables. E.g. In the case of /products/{var1}/{var2} and /products/{var1}/abc, endpoint name /products/123/abc will match the second one. If the paths have the same number of variables, the longest path is matched, and the vars are considered to be 1. E.g. In the case of /products/abc/{var1} and products/{var12345}/ef, endpoint name /products/abc/ef will match the first one, because length(\u0026quot;abc\u0026quot;) = 3 is larger than length(\u0026quot;ef\u0026quot;) = 2.  Examples If we have an OpenAPI definition doc productAPI-v2.yaml in directory serviceB, it will look like this:\nopenapi:3.0.0info:description:OpenAPI definition for SkyWalking test.version:v2title:Product APItags:- name:productdescription:product- name:relatedProductsdescription:Related Productspaths:/products:get:tags:- productsummary:Get all products listdescription:Get all products list.operationId:getProductsresponses:\u0026#34;200\u0026#34;:description:Successcontent:application/json:schema:type:arrayitems:$ref:\u0026#34;#/components/schemas/Product\u0026#34;/products/{region}/{country}:get:tags:- productsummary:Get products regionaldescription:Get products regional with the given id.operationId:getProductRegionalparameters:- name:regionin:pathdescription:Products regionrequired:trueschema:type:string- name:countryin:pathdescription:Products countryrequired:trueschema:type:stringresponses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/Product\u0026#34;\u0026#34;400\u0026#34;:description:Invalid parameters supplied/products/{id}:get:tags:- productsummary:Get product detailsdescription:Get product details with the given id.operationId:getProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/ProductDetails\u0026#34;\u0026#34;400\u0026#34;:description:Invalid product idpost:tags:- productsummary:Update product detailsdescription:Update product details with the given id.operationId:updateProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64- name:namein:querydescription:Product namerequired:trueschema:type:stringresponses:\u0026#34;200\u0026#34;:description:successful operationdelete:tags:- productsummary:Delete product detailsdescription:Delete product details with the given id.operationId:deleteProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operation/products/{id}/relatedProducts:get:tags:- relatedProductssummary:Get related productsdescription:Get related products with the given product id.operationId:getRelatedProductsparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/RelatedProducts\u0026#34;\u0026#34;400\u0026#34;:description:Invalid product idcomponents:schemas:Product:type:objectdescription:Product id and nameproperties:id:type:integerformat:int64description:Product idname:type:stringdescription:Product namerequired:- id- nameProductDetails:type:objectdescription:Product detailsproperties:id:type:integerformat:int64description:Product idname:type:stringdescription:Product namedescription:type:stringdescription:Product descriptionrequired:- id- nameRelatedProducts:type:objectdescription:Related Productsproperties:id:type:integerformat:int32description:Product idrelatedProducts:type:arraydescription:List of related productsitems:$ref:\u0026#34;#/components/schemas/Product\u0026#34;Here are some use cases:\n   Incoming Endpoint Incoming Service x-sw-service-name x-sw-endpoint-name-match-rule x-sw-endpoint-name-format Matched Grouping Result     GET:/products serviceB default default default true GET:/products   GET:/products/asia/cn serviceB default default default true GET:/products/{region}/{country}   GET:/products/123 serviceB default default default true GET:/products{id}   GET:/products/123/abc/efg serviceB default default default false GET:/products/123/abc/efg   \u0026lt;GET\u0026gt;:/products/123 serviceB default default default false \u0026lt;GET\u0026gt;:/products/123   GET:/products/123 serviceC default default default false GET:/products/123   GET:/products/123 serviceC serviceC default default true GET:/products/123   \u0026lt;GET\u0026gt;:/products/123 serviceB default \u0026lt;${METHOD}\u0026gt;:${PATH} \u0026lt;${METHOD}\u0026gt;:${PATH} true \u0026lt;GET\u0026gt;:/products/{id}   GET:/products/123 serviceB default default ${PATH}:\u0026lt;${METHOD}\u0026gt; true /products/{id}:\u0026lt;GET\u0026gt;   /products/123:\u0026lt;GET\u0026gt; serviceB default ${PATH}:\u0026lt;${METHOD}\u0026gt; default true GET:/products/{id}    Initialize and update the OpenAPI definitions dynamically Use Dynamic Configuration to initialize and update OpenAPI definitions, the endpoint grouping rules from OpenAPI will re-create by the new config.\nEndpoint name grouping by custom configuration Currently, a user could set up grouping rules through the static YAML file named endpoint-name-grouping.yml, or use Dynamic Configuration to initialize and update endpoint grouping rules.\nConfiguration Format Both the static local file and dynamic configuration value share the same YAML format.\ngrouping:# Endpoint of the service would follow the following rules- service-name:serviceArules:# {var} represents any variable string in the URI.- /prod/{var}","excerpt":"Group Parameterized Endpoints In most cases, endpoints are detected automatically through language …","ref":"/docs/main/v9.5.0/en/setup/backend/endpoint-grouping-rules/","title":"Group Parameterized Endpoints"},{"body":"Group Parameterized Endpoints In most cases, endpoints are detected automatically through language agents, service mesh observability solutions, or meter system configurations.\nThere are some special cases, especially when REST-style URI is used, where the application codes include the parameter in the endpoint name, such as putting order ID in the URI. Examples are /prod/ORDER123 and /prod/ORDER456. But logically, most would expect to have an endpoint name like prod/{order-id}. This is a specially designed feature in parameterized endpoint grouping.\nIf the incoming endpoint name accords with the rules, SkyWalking will group the endpoint by rules.\nThere are two approaches in which SkyWalking supports endpoint grouping:\n Endpoint name grouping by OpenAPI definitions. Endpoint name grouping by custom configurations.  Both grouping approaches can work together in sequence.\nEndpoint name grouping by OpenAPI definitions The OpenAPI definitions are documents based on the OpenAPI Specification (OAS), which is used to define a standard, language-agnostic interface for HTTP APIs.\nSkyWalking now supports OAS v2.0+. It could parse the documents (yaml) and build grouping rules from them automatically.\nHow to use   Add Specification Extensions for SkyWalking config in the OpenAPI definition documents; otherwise, all configs are default:\n${METHOD} is a reserved placeholder which represents the HTTP method, e.g. POST/GET... . ${PATH} is a reserved placeholder which represents the path, e.g. /products/{id}.\n   Extension Name Required Description Default Value     x-sw-service-name false The service name to which these endpoints belong. The directory name to which the OpenAPI definition documents belong.   x-sw-endpoint-name-match-rule false The rule used to match the endpoint. ${METHOD}:${PATH}   x-sw-endpoint-name-format false The endpoint name after grouping. ${METHOD}:${PATH}    These extensions are under OpenAPI Object. For example, the document below has a full custom config:\n  openapi:3.0.0x-sw-service-name:serviceBx-sw-endpoint-name-match-rule:\u0026#34;${METHOD}:${PATH}\u0026#34;x-sw-endpoint-name-format:\u0026#34;${METHOD}:${PATH}\u0026#34;info:description:OpenAPI definition for SkyWalking test.version:v2title:Product API...We highly recommend using the default config. The custom config (x-sw-endpoint-name-match-rule/x-sw-endpoint-name-format) is considered part of the match rules (regex pattern). We have provided some use cases in org.apache.skywalking.oap.server.core.config.group.openapi.EndpointGroupingRuleReader4OpenapiTest. You may validate your custom config as well.\nAll OpenAPI definition documents are located in the openapi-definitions directory, with directories having at most two levels. We recommend using the service name as the subDirectory name, as you will then not be required to set x-sw-service-name. For example:  ├── openapi-definitions │ ├── serviceA │ │ ├── customerAPI-v1.yaml │ │ └── productAPI-v1.yaml │ └── serviceB │ └── productAPI-v2.yaml The feature is enabled by default. You can disable it by setting the Core Module configuration ${SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI:false}.  Rules match priority We recommend designing the API path as clearly as possible. If the API path is fuzzy and an endpoint name matches multiple paths, SkyWalking would select a path according to the match priority set out below:\n The exact path is matched. E.g. /products or /products/inventory The path with fewer variables. E.g. In the case of /products/{var1}/{var2} and /products/{var1}/abc, endpoint name /products/123/abc will match the second one. If the paths have the same number of variables, the longest path is matched, and the vars are considered to be 1. E.g. In the case of /products/abc/{var1} and products/{var12345}/ef, endpoint name /products/abc/ef will match the first one, because length(\u0026quot;abc\u0026quot;) = 3 is larger than length(\u0026quot;ef\u0026quot;) = 2.  Examples If we have an OpenAPI definition doc productAPI-v2.yaml in directory serviceB, it will look like this:\nopenapi:3.0.0info:description:OpenAPI definition for SkyWalking test.version:v2title:Product APItags:- name:productdescription:product- name:relatedProductsdescription:Related Productspaths:/products:get:tags:- productsummary:Get all products listdescription:Get all products list.operationId:getProductsresponses:\u0026#34;200\u0026#34;:description:Successcontent:application/json:schema:type:arrayitems:$ref:\u0026#34;#/components/schemas/Product\u0026#34;/products/{region}/{country}:get:tags:- productsummary:Get products regionaldescription:Get products regional with the given id.operationId:getProductRegionalparameters:- name:regionin:pathdescription:Products regionrequired:trueschema:type:string- name:countryin:pathdescription:Products countryrequired:trueschema:type:stringresponses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/Product\u0026#34;\u0026#34;400\u0026#34;:description:Invalid parameters supplied/products/{id}:get:tags:- productsummary:Get product detailsdescription:Get product details with the given id.operationId:getProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/ProductDetails\u0026#34;\u0026#34;400\u0026#34;:description:Invalid product idpost:tags:- productsummary:Update product detailsdescription:Update product details with the given id.operationId:updateProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64- name:namein:querydescription:Product namerequired:trueschema:type:stringresponses:\u0026#34;200\u0026#34;:description:successful operationdelete:tags:- productsummary:Delete product detailsdescription:Delete product details with the given id.operationId:deleteProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operation/products/{id}/relatedProducts:get:tags:- relatedProductssummary:Get related productsdescription:Get related products with the given product id.operationId:getRelatedProductsparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/RelatedProducts\u0026#34;\u0026#34;400\u0026#34;:description:Invalid product idcomponents:schemas:Product:type:objectdescription:Product id and nameproperties:id:type:integerformat:int64description:Product idname:type:stringdescription:Product namerequired:- id- nameProductDetails:type:objectdescription:Product detailsproperties:id:type:integerformat:int64description:Product idname:type:stringdescription:Product namedescription:type:stringdescription:Product descriptionrequired:- id- nameRelatedProducts:type:objectdescription:Related Productsproperties:id:type:integerformat:int32description:Product idrelatedProducts:type:arraydescription:List of related productsitems:$ref:\u0026#34;#/components/schemas/Product\u0026#34;Here are some use cases:\n   Incoming Endpoint Incoming Service x-sw-service-name x-sw-endpoint-name-match-rule x-sw-endpoint-name-format Matched Grouping Result     GET:/products serviceB default default default true GET:/products   GET:/products/asia/cn serviceB default default default true GET:/products/{region}/{country}   GET:/products/123 serviceB default default default true GET:/products{id}   GET:/products/123/abc/efg serviceB default default default false GET:/products/123/abc/efg   \u0026lt;GET\u0026gt;:/products/123 serviceB default default default false \u0026lt;GET\u0026gt;:/products/123   GET:/products/123 serviceC default default default false GET:/products/123   GET:/products/123 serviceC serviceC default default true GET:/products/123   \u0026lt;GET\u0026gt;:/products/123 serviceB default \u0026lt;${METHOD}\u0026gt;:${PATH} \u0026lt;${METHOD}\u0026gt;:${PATH} true \u0026lt;GET\u0026gt;:/products/{id}   GET:/products/123 serviceB default default ${PATH}:\u0026lt;${METHOD}\u0026gt; true /products/{id}:\u0026lt;GET\u0026gt;   /products/123:\u0026lt;GET\u0026gt; serviceB default ${PATH}:\u0026lt;${METHOD}\u0026gt; default true GET:/products/{id}    Initialize and update the OpenAPI definitions dynamically Use Dynamic Configuration to initialize and update OpenAPI definitions, the endpoint grouping rules from OpenAPI will re-create by the new config.\nEndpoint name grouping by custom configuration Currently, a user could set up grouping rules through the static YAML file named endpoint-name-grouping.yml, or use Dynamic Configuration to initialize and update endpoint grouping rules.\nConfiguration Format Both the static local file and dynamic configuration value share the same YAML format.\ngrouping:# Endpoint of the service would follow the following rules- service-name:serviceArules:# {var} represents any variable string in the URI.- /prod/{var}","excerpt":"Group Parameterized Endpoints In most cases, endpoints are detected automatically through language …","ref":"/docs/main/v9.6.0/en/setup/backend/endpoint-grouping-rules/","title":"Group Parameterized Endpoints"},{"body":"Group Parameterized Endpoints In most cases, endpoints are detected automatically through language agents, service mesh observability solutions, or meter system configurations.\nThere are some special cases, especially when REST-style URI is used, where the application codes include the parameter in the endpoint name, such as putting order ID in the URI. Examples are /prod/ORDER123 and /prod/ORDER456. But logically, most would expect to have an endpoint name like prod/{order-id}. This is a specially designed feature in parameterized endpoint grouping.\nIf the incoming endpoint name accords with the rules, SkyWalking will group the endpoint by rules.\nThere are two approaches in which SkyWalking supports endpoint grouping:\n Endpoint name grouping by OpenAPI definitions. Endpoint name grouping by custom configurations.  Both grouping approaches can work together in sequence.\nEndpoint name grouping by OpenAPI definitions The OpenAPI definitions are documents based on the OpenAPI Specification (OAS), which is used to define a standard, language-agnostic interface for HTTP APIs.\nSkyWalking now supports OAS v2.0+. It could parse the documents (yaml) and build grouping rules from them automatically.\nHow to use   Add Specification Extensions for SkyWalking config in the OpenAPI definition documents; otherwise, all configs are default:\n${METHOD} is a reserved placeholder which represents the HTTP method, e.g. POST/GET... . ${PATH} is a reserved placeholder which represents the path, e.g. /products/{id}.\n   Extension Name Required Description Default Value     x-sw-service-name false The service name to which these endpoints belong. The directory name to which the OpenAPI definition documents belong.   x-sw-endpoint-name-match-rule false The rule used to match the endpoint. ${METHOD}:${PATH}   x-sw-endpoint-name-format false The endpoint name after grouping. ${METHOD}:${PATH}    These extensions are under OpenAPI Object. For example, the document below has a full custom config:\n  openapi:3.0.0x-sw-service-name:serviceBx-sw-endpoint-name-match-rule:\u0026#34;${METHOD}:${PATH}\u0026#34;x-sw-endpoint-name-format:\u0026#34;${METHOD}:${PATH}\u0026#34;info:description:OpenAPI definition for SkyWalking test.version:v2title:Product API...We highly recommend using the default config. The custom config (x-sw-endpoint-name-match-rule/x-sw-endpoint-name-format) is considered part of the match rules (regex pattern). We have provided some use cases in org.apache.skywalking.oap.server.core.config.group.openapi.EndpointGroupingRuleReader4OpenapiTest. You may validate your custom config as well.\nAll OpenAPI definition documents are located in the openapi-definitions directory, with directories having at most two levels. We recommend using the service name as the subDirectory name, as you will then not be required to set x-sw-service-name. For example:  ├── openapi-definitions │ ├── serviceA │ │ ├── customerAPI-v1.yaml │ │ └── productAPI-v1.yaml │ └── serviceB │ └── productAPI-v2.yaml The feature is enabled by default. You can disable it by setting the Core Module configuration ${SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI:false}.  Rules match priority We recommend designing the API path as clearly as possible. If the API path is fuzzy and an endpoint name matches multiple paths, SkyWalking would select a path according to the match priority set out below:\n The exact path is matched. E.g. /products or /products/inventory The path with fewer variables. E.g. In the case of /products/{var1}/{var2} and /products/{var1}/abc, endpoint name /products/123/abc will match the second one. If the paths have the same number of variables, the longest path is matched, and the vars are considered to be 1. E.g. In the case of /products/abc/{var1} and products/{var12345}/ef, endpoint name /products/abc/ef will match the first one, because length(\u0026quot;abc\u0026quot;) = 3 is larger than length(\u0026quot;ef\u0026quot;) = 2.  Examples If we have an OpenAPI definition doc productAPI-v2.yaml in directory serviceB, it will look like this:\nopenapi:3.0.0info:description:OpenAPI definition for SkyWalking test.version:v2title:Product APItags:- name:productdescription:product- name:relatedProductsdescription:Related Productspaths:/products:get:tags:- productsummary:Get all products listdescription:Get all products list.operationId:getProductsresponses:\u0026#34;200\u0026#34;:description:Successcontent:application/json:schema:type:arrayitems:$ref:\u0026#34;#/components/schemas/Product\u0026#34;/products/{region}/{country}:get:tags:- productsummary:Get products regionaldescription:Get products regional with the given id.operationId:getProductRegionalparameters:- name:regionin:pathdescription:Products regionrequired:trueschema:type:string- name:countryin:pathdescription:Products countryrequired:trueschema:type:stringresponses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/Product\u0026#34;\u0026#34;400\u0026#34;:description:Invalid parameters supplied/products/{id}:get:tags:- productsummary:Get product detailsdescription:Get product details with the given id.operationId:getProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/ProductDetails\u0026#34;\u0026#34;400\u0026#34;:description:Invalid product idpost:tags:- productsummary:Update product detailsdescription:Update product details with the given id.operationId:updateProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64- name:namein:querydescription:Product namerequired:trueschema:type:stringresponses:\u0026#34;200\u0026#34;:description:successful operationdelete:tags:- productsummary:Delete product detailsdescription:Delete product details with the given id.operationId:deleteProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operation/products/{id}/relatedProducts:get:tags:- relatedProductssummary:Get related productsdescription:Get related products with the given product id.operationId:getRelatedProductsparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/RelatedProducts\u0026#34;\u0026#34;400\u0026#34;:description:Invalid product idcomponents:schemas:Product:type:objectdescription:Product id and nameproperties:id:type:integerformat:int64description:Product idname:type:stringdescription:Product namerequired:- id- nameProductDetails:type:objectdescription:Product detailsproperties:id:type:integerformat:int64description:Product idname:type:stringdescription:Product namedescription:type:stringdescription:Product descriptionrequired:- id- nameRelatedProducts:type:objectdescription:Related Productsproperties:id:type:integerformat:int32description:Product idrelatedProducts:type:arraydescription:List of related productsitems:$ref:\u0026#34;#/components/schemas/Product\u0026#34;Here are some use cases:\n   Incoming Endpoint Incoming Service x-sw-service-name x-sw-endpoint-name-match-rule x-sw-endpoint-name-format Matched Grouping Result     GET:/products serviceB default default default true GET:/products   GET:/products/asia/cn serviceB default default default true GET:/products/{region}/{country}   GET:/products/123 serviceB default default default true GET:/products{id}   GET:/products/123/abc/efg serviceB default default default false GET:/products/123/abc/efg   \u0026lt;GET\u0026gt;:/products/123 serviceB default default default false \u0026lt;GET\u0026gt;:/products/123   GET:/products/123 serviceC default default default false GET:/products/123   GET:/products/123 serviceC serviceC default default true GET:/products/123   \u0026lt;GET\u0026gt;:/products/123 serviceB default \u0026lt;${METHOD}\u0026gt;:${PATH} \u0026lt;${METHOD}\u0026gt;:${PATH} true \u0026lt;GET\u0026gt;:/products/{id}   GET:/products/123 serviceB default default ${PATH}:\u0026lt;${METHOD}\u0026gt; true /products/{id}:\u0026lt;GET\u0026gt;   /products/123:\u0026lt;GET\u0026gt; serviceB default ${PATH}:\u0026lt;${METHOD}\u0026gt; default true GET:/products/{id}    Initialize and update the OpenAPI definitions dynamically Use Dynamic Configuration to initialize and update OpenAPI definitions, the endpoint grouping rules from OpenAPI will re-create by the new config.\nEndpoint name grouping by custom configuration Currently, a user could set up grouping rules through the static YAML file named endpoint-name-grouping.yml, or use Dynamic Configuration to initialize and update endpoint grouping rules.\nConfiguration Format Both the static local file and dynamic configuration value share the same YAML format.\ngrouping:# Endpoint of the service would follow the following rules- service-name:serviceArules:# {var} represents any variable string in the URI.- /prod/{var}","excerpt":"Group Parameterized Endpoints In most cases, endpoints are detected automatically through language …","ref":"/docs/main/v9.7.0/en/setup/backend/endpoint-grouping-rules/","title":"Group Parameterized Endpoints"},{"body":"gRPC SSL transportation support for OAP server For OAP communication, we are currently using gRPC, a multi-platform RPC framework that uses protocol buffers for message serialization. The nice part about gRPC is that it promotes the use of SSL/TLS to authenticate and encrypt exchanges. Now OAP supports enabling SSL transportation for gRPC receivers. Since 8.8.0, OAP supports enabling mutual TLS authentication between probes and OAP servers.\nTo enable this feature, follow the steps below.\nPreparation By default, the communication between OAP nodes and the communication between receiver and probe share the same gRPC server. Its configuration is in application.yml/core/default section.\nThe advanced gRPC receiver is only for communication with the probes. This configuration is in application.yml/receiver-sharing-server/default section.\nThe first step is to generate certificates and private key files for encrypting communication.\nCreating SSL/TLS Certificates The first step is to generate certificates and key files for encrypting communication. This is fairly straightforward: use openssl from the command line.\nUse this script if you are not familiar with how to generate key files.\nWe need the following files:\n ca.crt: A certificate authority public key for a client to validate the server\u0026rsquo;s certificate. server.pem, client.pem: A private RSA key to sign and authenticate the public key. It\u0026rsquo;s either a PKCS#8(PEM) or PKCS#1(DER). server.crt, client.crt: Self-signed X.509 public keys for distribution.  TLS on OAP servers By default, the communication between OAP nodes and the communication between receiver and probe share the same gRPC server. That means once you enable SSL for receivers and probes, the OAP nodes will enable it too.\nNOTE: SkyWalking does not support enabling mTLS on OAP server nodes communication. That means you have to enable receiver-sharing-server for enabling mTLS on communication between probes and OAP servers. More details see Enable mTLS mode on gRPC receiver.\nYou can enable gRPC SSL by adding the following lines to application.yml/core/default.\ngRPCSslEnabled:truegRPCSslKeyPath:/path/to/server.pemgRPCSslCertChainPath:/path/to/server.crtgRPCSslTrustedCAPath:/path/to/ca.crtgRPCSslKeyPath and gRPCSslCertChainPath are loaded by the OAP server to encrypt communication. gRPCSslTrustedCAPath helps the gRPC client to verify server certificates in cluster mode.\n There is a gRPC client and server in every OAP server node. The gRPC client communicates with OAP servers in cluster mode. They are sharing the core module configuration.\n When new files are in place, they can be loaded dynamically, and you won\u0026rsquo;t have to restart an OAP instance.\nEnable TLS on independent gRPC receiver If you enable receiver-sharing-server to ingest data from an external source, add the following lines to application.yml/receiver-sharing-server/default:\ngRPCPort:${SW_RECEIVER_GRPC_PORT:\u0026#34;changeMe\u0026#34;}gRPCSslEnabled:truegRPCSslKeyPath:/path/to/server.pemgRPCSslCertChainPath:/path/to/server.crtSince receiver-sharing-server only receives data from an external source, it doesn\u0026rsquo;t need a CA at all. But you have to configure the CA for the clients, such as Java agent, Satellite. If you port to the Java agent, refer to the Java agent repo to configure the Java agent and enable TLS.\nNOTE: change the SW_RECEIVER_GRPC_PORT as non-zero to enable receiver-sharing-server. And the port is open for the clients.\nEnable mTLS mode on gRPC receiver Since 8.8.0, SkyWalking has supported mutual TLS authentication for transporting between clients and OAP servers. Enable mTLS mode for the gRPC channel requires Sharing gRPC Server enabled, as the following configuration.\nreceiver-sharing-server:selector:${SW_RECEIVER_SHARING_SERVER:default}default:# For gRPC servergRPCHost:${SW_RECEIVER_GRPC_HOST:0.0.0.0}gRPCPort:${SW_RECEIVER_GRPC_PORT:\u0026#34;changeMe\u0026#34;}maxConcurrentCallsPerConnection:${SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL:0}maxMessageSize:${SW_RECEIVER_GRPC_MAX_MESSAGE_SIZE:0}gRPCThreadPoolQueueSize:${SW_RECEIVER_GRPC_POOL_QUEUE_SIZE:0}gRPCThreadPoolSize:${SW_RECEIVER_GRPC_THREAD_POOL_SIZE:0}gRPCSslEnabled:${SW_RECEIVER_GRPC_SSL_ENABLED:true}gRPCSslKeyPath:${SW_RECEIVER_GRPC_SSL_KEY_PATH:\u0026#34;/path/to/server.pem\u0026#34;}gRPCSslCertChainPath:${SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH:\u0026#34;/path/to/server.crt\u0026#34;}gRPCSslTrustedCAsPath:${SW_RECEIVER_GRPC_SSL_TRUSTED_CAS_PATH:\u0026#34;/path/to/ca.crt\u0026#34;}authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}You can still use this script to generate CA certificate and the key files of server-side(for OAP Server) and client-side(for Agent/Satellite). You have to notice the keys, including server and client-side, are from the same CA certificate.\n","excerpt":"gRPC SSL transportation support for OAP server For OAP communication, we are currently using gRPC, a …","ref":"/docs/main/latest/en/setup/backend/grpc-security/","title":"gRPC SSL transportation support for OAP server"},{"body":"gRPC SSL transportation support for OAP server For OAP communication, we are currently using gRPC, a multi-platform RPC framework that uses protocol buffers for message serialization. The nice part about gRPC is that it promotes the use of SSL/TLS to authenticate and encrypt exchanges. Now OAP supports enabling SSL transportation for gRPC receivers. Since 8.8.0, OAP supports enabling mutual TLS authentication between probes and OAP servers.\nTo enable this feature, follow the steps below.\nPreparation By default, the communication between OAP nodes and the communication between receiver and probe share the same gRPC server. Its configuration is in application.yml/core/default section.\nThe advanced gRPC receiver is only for communication with the probes. This configuration is in application.yml/receiver-sharing-server/default section.\nThe first step is to generate certificates and private key files for encrypting communication.\nCreating SSL/TLS Certificates The first step is to generate certificates and key files for encrypting communication. This is fairly straightforward: use openssl from the command line.\nUse this script if you are not familiar with how to generate key files.\nWe need the following files:\n ca.crt: A certificate authority public key for a client to validate the server\u0026rsquo;s certificate. server.pem, client.pem: A private RSA key to sign and authenticate the public key. It\u0026rsquo;s either a PKCS#8(PEM) or PKCS#1(DER). server.crt, client.crt: Self-signed X.509 public keys for distribution.  TLS on OAP servers By default, the communication between OAP nodes and the communication between receiver and probe share the same gRPC server. That means once you enable SSL for receivers and probes, the OAP nodes will enable it too.\nNOTE: SkyWalking does not support enabling mTLS on OAP server nodes communication. That means you have to enable receiver-sharing-server for enabling mTLS on communication between probes and OAP servers. More details see Enable mTLS mode on gRPC receiver.\nYou can enable gRPC SSL by adding the following lines to application.yml/core/default.\ngRPCSslEnabled:truegRPCSslKeyPath:/path/to/server.pemgRPCSslCertChainPath:/path/to/server.crtgRPCSslTrustedCAPath:/path/to/ca.crtgRPCSslKeyPath and gRPCSslCertChainPath are loaded by the OAP server to encrypt communication. gRPCSslTrustedCAPath helps the gRPC client to verify server certificates in cluster mode.\n There is a gRPC client and server in every OAP server node. The gRPC client communicates with OAP servers in cluster mode. They are sharing the core module configuration.\n When new files are in place, they can be loaded dynamically, and you won\u0026rsquo;t have to restart an OAP instance.\nEnable TLS on independent gRPC receiver If you enable receiver-sharing-server to ingest data from an external source, add the following lines to application.yml/receiver-sharing-server/default:\ngRPCPort:${SW_RECEIVER_GRPC_PORT:\u0026#34;changeMe\u0026#34;}gRPCSslEnabled:truegRPCSslKeyPath:/path/to/server.pemgRPCSslCertChainPath:/path/to/server.crtSince receiver-sharing-server only receives data from an external source, it doesn\u0026rsquo;t need a CA at all. But you have to configure the CA for the clients, such as Java agent, Satellite. If you port to the Java agent, refer to the Java agent repo to configure the Java agent and enable TLS.\nNOTE: change the SW_RECEIVER_GRPC_PORT as non-zero to enable receiver-sharing-server. And the port is open for the clients.\nEnable mTLS mode on gRPC receiver Since 8.8.0, SkyWalking has supported mutual TLS authentication for transporting between clients and OAP servers. Enable mTLS mode for the gRPC channel requires Sharing gRPC Server enabled, as the following configuration.\nreceiver-sharing-server:selector:${SW_RECEIVER_SHARING_SERVER:default}default:# For gRPC servergRPCHost:${SW_RECEIVER_GRPC_HOST:0.0.0.0}gRPCPort:${SW_RECEIVER_GRPC_PORT:\u0026#34;changeMe\u0026#34;}maxConcurrentCallsPerConnection:${SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL:0}maxMessageSize:${SW_RECEIVER_GRPC_MAX_MESSAGE_SIZE:0}gRPCThreadPoolSize:${SW_RECEIVER_GRPC_THREAD_POOL_SIZE:0}gRPCSslEnabled:${SW_RECEIVER_GRPC_SSL_ENABLED:true}gRPCSslKeyPath:${SW_RECEIVER_GRPC_SSL_KEY_PATH:\u0026#34;/path/to/server.pem\u0026#34;}gRPCSslCertChainPath:${SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH:\u0026#34;/path/to/server.crt\u0026#34;}gRPCSslTrustedCAsPath:${SW_RECEIVER_GRPC_SSL_TRUSTED_CAS_PATH:\u0026#34;/path/to/ca.crt\u0026#34;}authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}You can still use this script to generate CA certificate and the key files of server-side(for OAP Server) and client-side(for Agent/Satellite). You have to notice the keys, including server and client-side, are from the same CA certificate.\n","excerpt":"gRPC SSL transportation support for OAP server For OAP communication, we are currently using gRPC, a …","ref":"/docs/main/next/en/setup/backend/grpc-security/","title":"gRPC SSL transportation support for OAP server"},{"body":"gRPC SSL transportation support for OAP server For OAP communication, we are currently using gRPC, a multi-platform RPC framework that uses protocol buffers for message serialization. The nice part about gRPC is that it promotes the use of SSL/TLS to authenticate and encrypt exchanges. Now OAP supports enabling SSL transportation for gRPC receivers. Since 8.8.0, OAP supports enabling mutual TLS authentication between probes and OAP servers.\nTo enable this feature, follow the steps below.\nPreparation By default, the communication between OAP nodes and the communication between receiver and probe share the same gRPC server. Its configuration is in application.yml/core/default section.\nThe advanced gRPC receiver is only for communication with the probes. This configuration is in application.yml/receiver-sharing-server/default section.\nThe first step is to generate certificates and private key files for encrypting communication.\nCreating SSL/TLS Certificates The first step is to generate certificates and key files for encrypting communication. This is fairly straightforward: use openssl from the command line.\nUse this script if you are not familiar with how to generate key files.\nWe need the following files:\n ca.crt: A certificate authority public key for a client to validate the server\u0026rsquo;s certificate. server.pem, client.pem: A private RSA key to sign and authenticate the public key. It\u0026rsquo;s either a PKCS#8(PEM) or PKCS#1(DER). server.crt, client.crt: Self-signed X.509 public keys for distribution.  TLS on OAP servers By default, the communication between OAP nodes and the communication between receiver and probe share the same gRPC server. That means once you enabling SSL for receivers and probes, the OAP nodes will enable it too.\nNOTE: SkyWalking does not support to enable mTLS on OAP server nodes communication. That means you have to enable receiver-sharing-server for enabling mTLS on communication between probes ang OAP servers. More details see Enable mTLS mode on gRPC receiver.\nYou can enable gRPC SSL by adding the following lines to application.yml/core/default.\ngRPCSslEnabled:truegRPCSslKeyPath:/path/to/server.pemgRPCSslCertChainPath:/path/to/server.crtgRPCSslTrustedCAPath:/path/to/ca.crtgRPCSslKeyPath and gRPCSslCertChainPath are loaded by the OAP server to encrypt communication. gRPCSslTrustedCAPath helps the gRPC client to verify server certificates in cluster mode.\n There is a gRPC client and server in every OAP server node. The gRPC client comunicates with OAP servers in cluster mode. They are sharing the core module configuration.\n When new files are in place, they can be loaded dynamically, and you won\u0026rsquo;t have to restart an OAP instance.\nEnable TLS on independent gRPC receiver If you enable receiver-sharing-server to ingest data from an external source, add the following lines to application.yml/receiver-sharing-server/default:\ngRPCPort:${SW_RECEIVER_GRPC_PORT:\u0026#34;changeMe\u0026#34;}gRPCSslEnabled:truegRPCSslKeyPath:/path/to/server.pemgRPCSslCertChainPath:/path/to/server.crtSince recevier-sharing-server only receives data from an external source, it doesn\u0026rsquo;t need a CA at all. But you have to configure the CA for the clients, such as Java agent, Satellite. If you port to Java agent, refer to the Java agent repo to configure java agent and enable TLS.\nNOTE: change the SW_RECEIVER_GRPC_PORT as non-zore to enable receiver-sharing-server. And the port is open for the clients.\nEnable mTLS mode on gRPC receiver Since 8.8.0, SkyWalking supports enable mutual TLS authentication for transporting between clients and OAP servers. To enable mTLS mode for gRPC channel requires Sharing gRPC Server enabled, as the following configuration.\nreceiver-sharing-server:selector:${SW_RECEIVER_SHARING_SERVER:default}default:# For gRPC servergRPCHost:${SW_RECEIVER_GRPC_HOST:0.0.0.0}gRPCPort:${SW_RECEIVER_GRPC_PORT:\u0026#34;changeMe\u0026#34;}maxConcurrentCallsPerConnection:${SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL:0}maxMessageSize:${SW_RECEIVER_GRPC_MAX_MESSAGE_SIZE:0}gRPCThreadPoolQueueSize:${SW_RECEIVER_GRPC_POOL_QUEUE_SIZE:0}gRPCThreadPoolSize:${SW_RECEIVER_GRPC_THREAD_POOL_SIZE:0}gRPCSslEnabled:${SW_RECEIVER_GRPC_SSL_ENABLED:true}gRPCSslKeyPath:${SW_RECEIVER_GRPC_SSL_KEY_PATH:\u0026#34;/path/to/server.pem\u0026#34;}gRPCSslCertChainPath:${SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH:\u0026#34;/path/to/server.crt\u0026#34;}gRPCSslTrustedCAsPath:${SW_RECEIVER_GRPC_SSL_TRUSTED_CAS_PATH:\u0026#34;/path/to/ca.crt\u0026#34;}authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}You can still use this script to generate CA certificate and the key files of server-side(for OAP Server) and client-side(for Agent/Satellite). You have to notice the keys, including server and client-side, are from the same CA certificate.\n","excerpt":"gRPC SSL transportation support for OAP server For OAP communication, we are currently using gRPC, a …","ref":"/docs/main/v9.0.0/en/setup/backend/grpc-security/","title":"gRPC SSL transportation support for OAP server"},{"body":"gRPC SSL transportation support for OAP server For OAP communication, we are currently using gRPC, a multi-platform RPC framework that uses protocol buffers for message serialization. The nice part about gRPC is that it promotes the use of SSL/TLS to authenticate and encrypt exchanges. Now OAP supports enabling SSL transportation for gRPC receivers. Since 8.8.0, OAP supports enabling mutual TLS authentication between probes and OAP servers.\nTo enable this feature, follow the steps below.\nPreparation By default, the communication between OAP nodes and the communication between receiver and probe share the same gRPC server. Its configuration is in application.yml/core/default section.\nThe advanced gRPC receiver is only for communication with the probes. This configuration is in application.yml/receiver-sharing-server/default section.\nThe first step is to generate certificates and private key files for encrypting communication.\nCreating SSL/TLS Certificates The first step is to generate certificates and key files for encrypting communication. This is fairly straightforward: use openssl from the command line.\nUse this script if you are not familiar with how to generate key files.\nWe need the following files:\n ca.crt: A certificate authority public key for a client to validate the server\u0026rsquo;s certificate. server.pem, client.pem: A private RSA key to sign and authenticate the public key. It\u0026rsquo;s either a PKCS#8(PEM) or PKCS#1(DER). server.crt, client.crt: Self-signed X.509 public keys for distribution.  TLS on OAP servers By default, the communication between OAP nodes and the communication between receiver and probe share the same gRPC server. That means once you enable SSL for receivers and probes, the OAP nodes will enable it too.\nNOTE: SkyWalking does not support enabling mTLS on OAP server nodes communication. That means you have to enable receiver-sharing-server for enabling mTLS on communication between probes and OAP servers. More details see Enable mTLS mode on gRPC receiver.\nYou can enable gRPC SSL by adding the following lines to application.yml/core/default.\ngRPCSslEnabled:truegRPCSslKeyPath:/path/to/server.pemgRPCSslCertChainPath:/path/to/server.crtgRPCSslTrustedCAPath:/path/to/ca.crtgRPCSslKeyPath and gRPCSslCertChainPath are loaded by the OAP server to encrypt communication. gRPCSslTrustedCAPath helps the gRPC client to verify server certificates in cluster mode.\n There is a gRPC client and server in every OAP server node. The gRPC client communicates with OAP servers in cluster mode. They are sharing the core module configuration.\n When new files are in place, they can be loaded dynamically, and you won\u0026rsquo;t have to restart an OAP instance.\nEnable TLS on independent gRPC receiver If you enable receiver-sharing-server to ingest data from an external source, add the following lines to application.yml/receiver-sharing-server/default:\ngRPCPort:${SW_RECEIVER_GRPC_PORT:\u0026#34;changeMe\u0026#34;}gRPCSslEnabled:truegRPCSslKeyPath:/path/to/server.pemgRPCSslCertChainPath:/path/to/server.crtSince receiver-sharing-server only receives data from an external source, it doesn\u0026rsquo;t need a CA at all. But you have to configure the CA for the clients, such as Java agent, Satellite. If you port to the Java agent, refer to the Java agent repo to configure the Java agent and enable TLS.\nNOTE: change the SW_RECEIVER_GRPC_PORT as non-zero to enable receiver-sharing-server. And the port is open for the clients.\nEnable mTLS mode on gRPC receiver Since 8.8.0, SkyWalking has supported mutual TLS authentication for transporting between clients and OAP servers. Enable mTLS mode for the gRPC channel requires Sharing gRPC Server enabled, as the following configuration.\nreceiver-sharing-server:selector:${SW_RECEIVER_SHARING_SERVER:default}default:# For gRPC servergRPCHost:${SW_RECEIVER_GRPC_HOST:0.0.0.0}gRPCPort:${SW_RECEIVER_GRPC_PORT:\u0026#34;changeMe\u0026#34;}maxConcurrentCallsPerConnection:${SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL:0}maxMessageSize:${SW_RECEIVER_GRPC_MAX_MESSAGE_SIZE:0}gRPCThreadPoolQueueSize:${SW_RECEIVER_GRPC_POOL_QUEUE_SIZE:0}gRPCThreadPoolSize:${SW_RECEIVER_GRPC_THREAD_POOL_SIZE:0}gRPCSslEnabled:${SW_RECEIVER_GRPC_SSL_ENABLED:true}gRPCSslKeyPath:${SW_RECEIVER_GRPC_SSL_KEY_PATH:\u0026#34;/path/to/server.pem\u0026#34;}gRPCSslCertChainPath:${SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH:\u0026#34;/path/to/server.crt\u0026#34;}gRPCSslTrustedCAsPath:${SW_RECEIVER_GRPC_SSL_TRUSTED_CAS_PATH:\u0026#34;/path/to/ca.crt\u0026#34;}authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}You can still use this script to generate CA certificate and the key files of server-side(for OAP Server) and client-side(for Agent/Satellite). You have to notice the keys, including server and client-side, are from the same CA certificate.\n","excerpt":"gRPC SSL transportation support for OAP server For OAP communication, we are currently using gRPC, a …","ref":"/docs/main/v9.1.0/en/setup/backend/grpc-security/","title":"gRPC SSL transportation support for OAP server"},{"body":"gRPC SSL transportation support for OAP server For OAP communication, we are currently using gRPC, a multi-platform RPC framework that uses protocol buffers for message serialization. The nice part about gRPC is that it promotes the use of SSL/TLS to authenticate and encrypt exchanges. Now OAP supports enabling SSL transportation for gRPC receivers. Since 8.8.0, OAP supports enabling mutual TLS authentication between probes and OAP servers.\nTo enable this feature, follow the steps below.\nPreparation By default, the communication between OAP nodes and the communication between receiver and probe share the same gRPC server. Its configuration is in application.yml/core/default section.\nThe advanced gRPC receiver is only for communication with the probes. This configuration is in application.yml/receiver-sharing-server/default section.\nThe first step is to generate certificates and private key files for encrypting communication.\nCreating SSL/TLS Certificates The first step is to generate certificates and key files for encrypting communication. This is fairly straightforward: use openssl from the command line.\nUse this script if you are not familiar with how to generate key files.\nWe need the following files:\n ca.crt: A certificate authority public key for a client to validate the server\u0026rsquo;s certificate. server.pem, client.pem: A private RSA key to sign and authenticate the public key. It\u0026rsquo;s either a PKCS#8(PEM) or PKCS#1(DER). server.crt, client.crt: Self-signed X.509 public keys for distribution.  TLS on OAP servers By default, the communication between OAP nodes and the communication between receiver and probe share the same gRPC server. That means once you enable SSL for receivers and probes, the OAP nodes will enable it too.\nNOTE: SkyWalking does not support enabling mTLS on OAP server nodes communication. That means you have to enable receiver-sharing-server for enabling mTLS on communication between probes and OAP servers. More details see Enable mTLS mode on gRPC receiver.\nYou can enable gRPC SSL by adding the following lines to application.yml/core/default.\ngRPCSslEnabled:truegRPCSslKeyPath:/path/to/server.pemgRPCSslCertChainPath:/path/to/server.crtgRPCSslTrustedCAPath:/path/to/ca.crtgRPCSslKeyPath and gRPCSslCertChainPath are loaded by the OAP server to encrypt communication. gRPCSslTrustedCAPath helps the gRPC client to verify server certificates in cluster mode.\n There is a gRPC client and server in every OAP server node. The gRPC client communicates with OAP servers in cluster mode. They are sharing the core module configuration.\n When new files are in place, they can be loaded dynamically, and you won\u0026rsquo;t have to restart an OAP instance.\nEnable TLS on independent gRPC receiver If you enable receiver-sharing-server to ingest data from an external source, add the following lines to application.yml/receiver-sharing-server/default:\ngRPCPort:${SW_RECEIVER_GRPC_PORT:\u0026#34;changeMe\u0026#34;}gRPCSslEnabled:truegRPCSslKeyPath:/path/to/server.pemgRPCSslCertChainPath:/path/to/server.crtSince receiver-sharing-server only receives data from an external source, it doesn\u0026rsquo;t need a CA at all. But you have to configure the CA for the clients, such as Java agent, Satellite. If you port to the Java agent, refer to the Java agent repo to configure the Java agent and enable TLS.\nNOTE: change the SW_RECEIVER_GRPC_PORT as non-zero to enable receiver-sharing-server. And the port is open for the clients.\nEnable mTLS mode on gRPC receiver Since 8.8.0, SkyWalking has supported mutual TLS authentication for transporting between clients and OAP servers. Enable mTLS mode for the gRPC channel requires Sharing gRPC Server enabled, as the following configuration.\nreceiver-sharing-server:selector:${SW_RECEIVER_SHARING_SERVER:default}default:# For gRPC servergRPCHost:${SW_RECEIVER_GRPC_HOST:0.0.0.0}gRPCPort:${SW_RECEIVER_GRPC_PORT:\u0026#34;changeMe\u0026#34;}maxConcurrentCallsPerConnection:${SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL:0}maxMessageSize:${SW_RECEIVER_GRPC_MAX_MESSAGE_SIZE:0}gRPCThreadPoolQueueSize:${SW_RECEIVER_GRPC_POOL_QUEUE_SIZE:0}gRPCThreadPoolSize:${SW_RECEIVER_GRPC_THREAD_POOL_SIZE:0}gRPCSslEnabled:${SW_RECEIVER_GRPC_SSL_ENABLED:true}gRPCSslKeyPath:${SW_RECEIVER_GRPC_SSL_KEY_PATH:\u0026#34;/path/to/server.pem\u0026#34;}gRPCSslCertChainPath:${SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH:\u0026#34;/path/to/server.crt\u0026#34;}gRPCSslTrustedCAsPath:${SW_RECEIVER_GRPC_SSL_TRUSTED_CAS_PATH:\u0026#34;/path/to/ca.crt\u0026#34;}authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}You can still use this script to generate CA certificate and the key files of server-side(for OAP Server) and client-side(for Agent/Satellite). You have to notice the keys, including server and client-side, are from the same CA certificate.\n","excerpt":"gRPC SSL transportation support for OAP server For OAP communication, we are currently using gRPC, a …","ref":"/docs/main/v9.2.0/en/setup/backend/grpc-security/","title":"gRPC SSL transportation support for OAP server"},{"body":"gRPC SSL transportation support for OAP server For OAP communication, we are currently using gRPC, a multi-platform RPC framework that uses protocol buffers for message serialization. The nice part about gRPC is that it promotes the use of SSL/TLS to authenticate and encrypt exchanges. Now OAP supports enabling SSL transportation for gRPC receivers. Since 8.8.0, OAP supports enabling mutual TLS authentication between probes and OAP servers.\nTo enable this feature, follow the steps below.\nPreparation By default, the communication between OAP nodes and the communication between receiver and probe share the same gRPC server. Its configuration is in application.yml/core/default section.\nThe advanced gRPC receiver is only for communication with the probes. This configuration is in application.yml/receiver-sharing-server/default section.\nThe first step is to generate certificates and private key files for encrypting communication.\nCreating SSL/TLS Certificates The first step is to generate certificates and key files for encrypting communication. This is fairly straightforward: use openssl from the command line.\nUse this script if you are not familiar with how to generate key files.\nWe need the following files:\n ca.crt: A certificate authority public key for a client to validate the server\u0026rsquo;s certificate. server.pem, client.pem: A private RSA key to sign and authenticate the public key. It\u0026rsquo;s either a PKCS#8(PEM) or PKCS#1(DER). server.crt, client.crt: Self-signed X.509 public keys for distribution.  TLS on OAP servers By default, the communication between OAP nodes and the communication between receiver and probe share the same gRPC server. That means once you enable SSL for receivers and probes, the OAP nodes will enable it too.\nNOTE: SkyWalking does not support enabling mTLS on OAP server nodes communication. That means you have to enable receiver-sharing-server for enabling mTLS on communication between probes and OAP servers. More details see Enable mTLS mode on gRPC receiver.\nYou can enable gRPC SSL by adding the following lines to application.yml/core/default.\ngRPCSslEnabled:truegRPCSslKeyPath:/path/to/server.pemgRPCSslCertChainPath:/path/to/server.crtgRPCSslTrustedCAPath:/path/to/ca.crtgRPCSslKeyPath and gRPCSslCertChainPath are loaded by the OAP server to encrypt communication. gRPCSslTrustedCAPath helps the gRPC client to verify server certificates in cluster mode.\n There is a gRPC client and server in every OAP server node. The gRPC client communicates with OAP servers in cluster mode. They are sharing the core module configuration.\n When new files are in place, they can be loaded dynamically, and you won\u0026rsquo;t have to restart an OAP instance.\nEnable TLS on independent gRPC receiver If you enable receiver-sharing-server to ingest data from an external source, add the following lines to application.yml/receiver-sharing-server/default:\ngRPCPort:${SW_RECEIVER_GRPC_PORT:\u0026#34;changeMe\u0026#34;}gRPCSslEnabled:truegRPCSslKeyPath:/path/to/server.pemgRPCSslCertChainPath:/path/to/server.crtSince receiver-sharing-server only receives data from an external source, it doesn\u0026rsquo;t need a CA at all. But you have to configure the CA for the clients, such as Java agent, Satellite. If you port to the Java agent, refer to the Java agent repo to configure the Java agent and enable TLS.\nNOTE: change the SW_RECEIVER_GRPC_PORT as non-zero to enable receiver-sharing-server. And the port is open for the clients.\nEnable mTLS mode on gRPC receiver Since 8.8.0, SkyWalking has supported mutual TLS authentication for transporting between clients and OAP servers. Enable mTLS mode for the gRPC channel requires Sharing gRPC Server enabled, as the following configuration.\nreceiver-sharing-server:selector:${SW_RECEIVER_SHARING_SERVER:default}default:# For gRPC servergRPCHost:${SW_RECEIVER_GRPC_HOST:0.0.0.0}gRPCPort:${SW_RECEIVER_GRPC_PORT:\u0026#34;changeMe\u0026#34;}maxConcurrentCallsPerConnection:${SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL:0}maxMessageSize:${SW_RECEIVER_GRPC_MAX_MESSAGE_SIZE:0}gRPCThreadPoolQueueSize:${SW_RECEIVER_GRPC_POOL_QUEUE_SIZE:0}gRPCThreadPoolSize:${SW_RECEIVER_GRPC_THREAD_POOL_SIZE:0}gRPCSslEnabled:${SW_RECEIVER_GRPC_SSL_ENABLED:true}gRPCSslKeyPath:${SW_RECEIVER_GRPC_SSL_KEY_PATH:\u0026#34;/path/to/server.pem\u0026#34;}gRPCSslCertChainPath:${SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH:\u0026#34;/path/to/server.crt\u0026#34;}gRPCSslTrustedCAsPath:${SW_RECEIVER_GRPC_SSL_TRUSTED_CAS_PATH:\u0026#34;/path/to/ca.crt\u0026#34;}authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}You can still use this script to generate CA certificate and the key files of server-side(for OAP Server) and client-side(for Agent/Satellite). You have to notice the keys, including server and client-side, are from the same CA certificate.\n","excerpt":"gRPC SSL transportation support for OAP server For OAP communication, we are currently using gRPC, a …","ref":"/docs/main/v9.3.0/en/setup/backend/grpc-security/","title":"gRPC SSL transportation support for OAP server"},{"body":"gRPC SSL transportation support for OAP server For OAP communication, we are currently using gRPC, a multi-platform RPC framework that uses protocol buffers for message serialization. The nice part about gRPC is that it promotes the use of SSL/TLS to authenticate and encrypt exchanges. Now OAP supports enabling SSL transportation for gRPC receivers. Since 8.8.0, OAP supports enabling mutual TLS authentication between probes and OAP servers.\nTo enable this feature, follow the steps below.\nPreparation By default, the communication between OAP nodes and the communication between receiver and probe share the same gRPC server. Its configuration is in application.yml/core/default section.\nThe advanced gRPC receiver is only for communication with the probes. This configuration is in application.yml/receiver-sharing-server/default section.\nThe first step is to generate certificates and private key files for encrypting communication.\nCreating SSL/TLS Certificates The first step is to generate certificates and key files for encrypting communication. This is fairly straightforward: use openssl from the command line.\nUse this script if you are not familiar with how to generate key files.\nWe need the following files:\n ca.crt: A certificate authority public key for a client to validate the server\u0026rsquo;s certificate. server.pem, client.pem: A private RSA key to sign and authenticate the public key. It\u0026rsquo;s either a PKCS#8(PEM) or PKCS#1(DER). server.crt, client.crt: Self-signed X.509 public keys for distribution.  TLS on OAP servers By default, the communication between OAP nodes and the communication between receiver and probe share the same gRPC server. That means once you enable SSL for receivers and probes, the OAP nodes will enable it too.\nNOTE: SkyWalking does not support enabling mTLS on OAP server nodes communication. That means you have to enable receiver-sharing-server for enabling mTLS on communication between probes and OAP servers. More details see Enable mTLS mode on gRPC receiver.\nYou can enable gRPC SSL by adding the following lines to application.yml/core/default.\ngRPCSslEnabled:truegRPCSslKeyPath:/path/to/server.pemgRPCSslCertChainPath:/path/to/server.crtgRPCSslTrustedCAPath:/path/to/ca.crtgRPCSslKeyPath and gRPCSslCertChainPath are loaded by the OAP server to encrypt communication. gRPCSslTrustedCAPath helps the gRPC client to verify server certificates in cluster mode.\n There is a gRPC client and server in every OAP server node. The gRPC client communicates with OAP servers in cluster mode. They are sharing the core module configuration.\n When new files are in place, they can be loaded dynamically, and you won\u0026rsquo;t have to restart an OAP instance.\nEnable TLS on independent gRPC receiver If you enable receiver-sharing-server to ingest data from an external source, add the following lines to application.yml/receiver-sharing-server/default:\ngRPCPort:${SW_RECEIVER_GRPC_PORT:\u0026#34;changeMe\u0026#34;}gRPCSslEnabled:truegRPCSslKeyPath:/path/to/server.pemgRPCSslCertChainPath:/path/to/server.crtSince receiver-sharing-server only receives data from an external source, it doesn\u0026rsquo;t need a CA at all. But you have to configure the CA for the clients, such as Java agent, Satellite. If you port to the Java agent, refer to the Java agent repo to configure the Java agent and enable TLS.\nNOTE: change the SW_RECEIVER_GRPC_PORT as non-zero to enable receiver-sharing-server. And the port is open for the clients.\nEnable mTLS mode on gRPC receiver Since 8.8.0, SkyWalking has supported mutual TLS authentication for transporting between clients and OAP servers. Enable mTLS mode for the gRPC channel requires Sharing gRPC Server enabled, as the following configuration.\nreceiver-sharing-server:selector:${SW_RECEIVER_SHARING_SERVER:default}default:# For gRPC servergRPCHost:${SW_RECEIVER_GRPC_HOST:0.0.0.0}gRPCPort:${SW_RECEIVER_GRPC_PORT:\u0026#34;changeMe\u0026#34;}maxConcurrentCallsPerConnection:${SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL:0}maxMessageSize:${SW_RECEIVER_GRPC_MAX_MESSAGE_SIZE:0}gRPCThreadPoolQueueSize:${SW_RECEIVER_GRPC_POOL_QUEUE_SIZE:0}gRPCThreadPoolSize:${SW_RECEIVER_GRPC_THREAD_POOL_SIZE:0}gRPCSslEnabled:${SW_RECEIVER_GRPC_SSL_ENABLED:true}gRPCSslKeyPath:${SW_RECEIVER_GRPC_SSL_KEY_PATH:\u0026#34;/path/to/server.pem\u0026#34;}gRPCSslCertChainPath:${SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH:\u0026#34;/path/to/server.crt\u0026#34;}gRPCSslTrustedCAsPath:${SW_RECEIVER_GRPC_SSL_TRUSTED_CAS_PATH:\u0026#34;/path/to/ca.crt\u0026#34;}authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}You can still use this script to generate CA certificate and the key files of server-side(for OAP Server) and client-side(for Agent/Satellite). You have to notice the keys, including server and client-side, are from the same CA certificate.\n","excerpt":"gRPC SSL transportation support for OAP server For OAP communication, we are currently using gRPC, a …","ref":"/docs/main/v9.4.0/en/setup/backend/grpc-security/","title":"gRPC SSL transportation support for OAP server"},{"body":"gRPC SSL transportation support for OAP server For OAP communication, we are currently using gRPC, a multi-platform RPC framework that uses protocol buffers for message serialization. The nice part about gRPC is that it promotes the use of SSL/TLS to authenticate and encrypt exchanges. Now OAP supports enabling SSL transportation for gRPC receivers. Since 8.8.0, OAP supports enabling mutual TLS authentication between probes and OAP servers.\nTo enable this feature, follow the steps below.\nPreparation By default, the communication between OAP nodes and the communication between receiver and probe share the same gRPC server. Its configuration is in application.yml/core/default section.\nThe advanced gRPC receiver is only for communication with the probes. This configuration is in application.yml/receiver-sharing-server/default section.\nThe first step is to generate certificates and private key files for encrypting communication.\nCreating SSL/TLS Certificates The first step is to generate certificates and key files for encrypting communication. This is fairly straightforward: use openssl from the command line.\nUse this script if you are not familiar with how to generate key files.\nWe need the following files:\n ca.crt: A certificate authority public key for a client to validate the server\u0026rsquo;s certificate. server.pem, client.pem: A private RSA key to sign and authenticate the public key. It\u0026rsquo;s either a PKCS#8(PEM) or PKCS#1(DER). server.crt, client.crt: Self-signed X.509 public keys for distribution.  TLS on OAP servers By default, the communication between OAP nodes and the communication between receiver and probe share the same gRPC server. That means once you enable SSL for receivers and probes, the OAP nodes will enable it too.\nNOTE: SkyWalking does not support enabling mTLS on OAP server nodes communication. That means you have to enable receiver-sharing-server for enabling mTLS on communication between probes and OAP servers. More details see Enable mTLS mode on gRPC receiver.\nYou can enable gRPC SSL by adding the following lines to application.yml/core/default.\ngRPCSslEnabled:truegRPCSslKeyPath:/path/to/server.pemgRPCSslCertChainPath:/path/to/server.crtgRPCSslTrustedCAPath:/path/to/ca.crtgRPCSslKeyPath and gRPCSslCertChainPath are loaded by the OAP server to encrypt communication. gRPCSslTrustedCAPath helps the gRPC client to verify server certificates in cluster mode.\n There is a gRPC client and server in every OAP server node. The gRPC client communicates with OAP servers in cluster mode. They are sharing the core module configuration.\n When new files are in place, they can be loaded dynamically, and you won\u0026rsquo;t have to restart an OAP instance.\nEnable TLS on independent gRPC receiver If you enable receiver-sharing-server to ingest data from an external source, add the following lines to application.yml/receiver-sharing-server/default:\ngRPCPort:${SW_RECEIVER_GRPC_PORT:\u0026#34;changeMe\u0026#34;}gRPCSslEnabled:truegRPCSslKeyPath:/path/to/server.pemgRPCSslCertChainPath:/path/to/server.crtSince receiver-sharing-server only receives data from an external source, it doesn\u0026rsquo;t need a CA at all. But you have to configure the CA for the clients, such as Java agent, Satellite. If you port to the Java agent, refer to the Java agent repo to configure the Java agent and enable TLS.\nNOTE: change the SW_RECEIVER_GRPC_PORT as non-zero to enable receiver-sharing-server. And the port is open for the clients.\nEnable mTLS mode on gRPC receiver Since 8.8.0, SkyWalking has supported mutual TLS authentication for transporting between clients and OAP servers. Enable mTLS mode for the gRPC channel requires Sharing gRPC Server enabled, as the following configuration.\nreceiver-sharing-server:selector:${SW_RECEIVER_SHARING_SERVER:default}default:# For gRPC servergRPCHost:${SW_RECEIVER_GRPC_HOST:0.0.0.0}gRPCPort:${SW_RECEIVER_GRPC_PORT:\u0026#34;changeMe\u0026#34;}maxConcurrentCallsPerConnection:${SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL:0}maxMessageSize:${SW_RECEIVER_GRPC_MAX_MESSAGE_SIZE:0}gRPCThreadPoolQueueSize:${SW_RECEIVER_GRPC_POOL_QUEUE_SIZE:0}gRPCThreadPoolSize:${SW_RECEIVER_GRPC_THREAD_POOL_SIZE:0}gRPCSslEnabled:${SW_RECEIVER_GRPC_SSL_ENABLED:true}gRPCSslKeyPath:${SW_RECEIVER_GRPC_SSL_KEY_PATH:\u0026#34;/path/to/server.pem\u0026#34;}gRPCSslCertChainPath:${SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH:\u0026#34;/path/to/server.crt\u0026#34;}gRPCSslTrustedCAsPath:${SW_RECEIVER_GRPC_SSL_TRUSTED_CAS_PATH:\u0026#34;/path/to/ca.crt\u0026#34;}authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}You can still use this script to generate CA certificate and the key files of server-side(for OAP Server) and client-side(for Agent/Satellite). You have to notice the keys, including server and client-side, are from the same CA certificate.\n","excerpt":"gRPC SSL transportation support for OAP server For OAP communication, we are currently using gRPC, a …","ref":"/docs/main/v9.5.0/en/setup/backend/grpc-security/","title":"gRPC SSL transportation support for OAP server"},{"body":"gRPC SSL transportation support for OAP server For OAP communication, we are currently using gRPC, a multi-platform RPC framework that uses protocol buffers for message serialization. The nice part about gRPC is that it promotes the use of SSL/TLS to authenticate and encrypt exchanges. Now OAP supports enabling SSL transportation for gRPC receivers. Since 8.8.0, OAP supports enabling mutual TLS authentication between probes and OAP servers.\nTo enable this feature, follow the steps below.\nPreparation By default, the communication between OAP nodes and the communication between receiver and probe share the same gRPC server. Its configuration is in application.yml/core/default section.\nThe advanced gRPC receiver is only for communication with the probes. This configuration is in application.yml/receiver-sharing-server/default section.\nThe first step is to generate certificates and private key files for encrypting communication.\nCreating SSL/TLS Certificates The first step is to generate certificates and key files for encrypting communication. This is fairly straightforward: use openssl from the command line.\nUse this script if you are not familiar with how to generate key files.\nWe need the following files:\n ca.crt: A certificate authority public key for a client to validate the server\u0026rsquo;s certificate. server.pem, client.pem: A private RSA key to sign and authenticate the public key. It\u0026rsquo;s either a PKCS#8(PEM) or PKCS#1(DER). server.crt, client.crt: Self-signed X.509 public keys for distribution.  TLS on OAP servers By default, the communication between OAP nodes and the communication between receiver and probe share the same gRPC server. That means once you enable SSL for receivers and probes, the OAP nodes will enable it too.\nNOTE: SkyWalking does not support enabling mTLS on OAP server nodes communication. That means you have to enable receiver-sharing-server for enabling mTLS on communication between probes and OAP servers. More details see Enable mTLS mode on gRPC receiver.\nYou can enable gRPC SSL by adding the following lines to application.yml/core/default.\ngRPCSslEnabled:truegRPCSslKeyPath:/path/to/server.pemgRPCSslCertChainPath:/path/to/server.crtgRPCSslTrustedCAPath:/path/to/ca.crtgRPCSslKeyPath and gRPCSslCertChainPath are loaded by the OAP server to encrypt communication. gRPCSslTrustedCAPath helps the gRPC client to verify server certificates in cluster mode.\n There is a gRPC client and server in every OAP server node. The gRPC client communicates with OAP servers in cluster mode. They are sharing the core module configuration.\n When new files are in place, they can be loaded dynamically, and you won\u0026rsquo;t have to restart an OAP instance.\nEnable TLS on independent gRPC receiver If you enable receiver-sharing-server to ingest data from an external source, add the following lines to application.yml/receiver-sharing-server/default:\ngRPCPort:${SW_RECEIVER_GRPC_PORT:\u0026#34;changeMe\u0026#34;}gRPCSslEnabled:truegRPCSslKeyPath:/path/to/server.pemgRPCSslCertChainPath:/path/to/server.crtSince receiver-sharing-server only receives data from an external source, it doesn\u0026rsquo;t need a CA at all. But you have to configure the CA for the clients, such as Java agent, Satellite. If you port to the Java agent, refer to the Java agent repo to configure the Java agent and enable TLS.\nNOTE: change the SW_RECEIVER_GRPC_PORT as non-zero to enable receiver-sharing-server. And the port is open for the clients.\nEnable mTLS mode on gRPC receiver Since 8.8.0, SkyWalking has supported mutual TLS authentication for transporting between clients and OAP servers. Enable mTLS mode for the gRPC channel requires Sharing gRPC Server enabled, as the following configuration.\nreceiver-sharing-server:selector:${SW_RECEIVER_SHARING_SERVER:default}default:# For gRPC servergRPCHost:${SW_RECEIVER_GRPC_HOST:0.0.0.0}gRPCPort:${SW_RECEIVER_GRPC_PORT:\u0026#34;changeMe\u0026#34;}maxConcurrentCallsPerConnection:${SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL:0}maxMessageSize:${SW_RECEIVER_GRPC_MAX_MESSAGE_SIZE:0}gRPCThreadPoolQueueSize:${SW_RECEIVER_GRPC_POOL_QUEUE_SIZE:0}gRPCThreadPoolSize:${SW_RECEIVER_GRPC_THREAD_POOL_SIZE:0}gRPCSslEnabled:${SW_RECEIVER_GRPC_SSL_ENABLED:true}gRPCSslKeyPath:${SW_RECEIVER_GRPC_SSL_KEY_PATH:\u0026#34;/path/to/server.pem\u0026#34;}gRPCSslCertChainPath:${SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH:\u0026#34;/path/to/server.crt\u0026#34;}gRPCSslTrustedCAsPath:${SW_RECEIVER_GRPC_SSL_TRUSTED_CAS_PATH:\u0026#34;/path/to/ca.crt\u0026#34;}authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}You can still use this script to generate CA certificate and the key files of server-side(for OAP Server) and client-side(for Agent/Satellite). You have to notice the keys, including server and client-side, are from the same CA certificate.\n","excerpt":"gRPC SSL transportation support for OAP server For OAP communication, we are currently using gRPC, a …","ref":"/docs/main/v9.6.0/en/setup/backend/grpc-security/","title":"gRPC SSL transportation support for OAP server"},{"body":"gRPC SSL transportation support for OAP server For OAP communication, we are currently using gRPC, a multi-platform RPC framework that uses protocol buffers for message serialization. The nice part about gRPC is that it promotes the use of SSL/TLS to authenticate and encrypt exchanges. Now OAP supports enabling SSL transportation for gRPC receivers. Since 8.8.0, OAP supports enabling mutual TLS authentication between probes and OAP servers.\nTo enable this feature, follow the steps below.\nPreparation By default, the communication between OAP nodes and the communication between receiver and probe share the same gRPC server. Its configuration is in application.yml/core/default section.\nThe advanced gRPC receiver is only for communication with the probes. This configuration is in application.yml/receiver-sharing-server/default section.\nThe first step is to generate certificates and private key files for encrypting communication.\nCreating SSL/TLS Certificates The first step is to generate certificates and key files for encrypting communication. This is fairly straightforward: use openssl from the command line.\nUse this script if you are not familiar with how to generate key files.\nWe need the following files:\n ca.crt: A certificate authority public key for a client to validate the server\u0026rsquo;s certificate. server.pem, client.pem: A private RSA key to sign and authenticate the public key. It\u0026rsquo;s either a PKCS#8(PEM) or PKCS#1(DER). server.crt, client.crt: Self-signed X.509 public keys for distribution.  TLS on OAP servers By default, the communication between OAP nodes and the communication between receiver and probe share the same gRPC server. That means once you enable SSL for receivers and probes, the OAP nodes will enable it too.\nNOTE: SkyWalking does not support enabling mTLS on OAP server nodes communication. That means you have to enable receiver-sharing-server for enabling mTLS on communication between probes and OAP servers. More details see Enable mTLS mode on gRPC receiver.\nYou can enable gRPC SSL by adding the following lines to application.yml/core/default.\ngRPCSslEnabled:truegRPCSslKeyPath:/path/to/server.pemgRPCSslCertChainPath:/path/to/server.crtgRPCSslTrustedCAPath:/path/to/ca.crtgRPCSslKeyPath and gRPCSslCertChainPath are loaded by the OAP server to encrypt communication. gRPCSslTrustedCAPath helps the gRPC client to verify server certificates in cluster mode.\n There is a gRPC client and server in every OAP server node. The gRPC client communicates with OAP servers in cluster mode. They are sharing the core module configuration.\n When new files are in place, they can be loaded dynamically, and you won\u0026rsquo;t have to restart an OAP instance.\nEnable TLS on independent gRPC receiver If you enable receiver-sharing-server to ingest data from an external source, add the following lines to application.yml/receiver-sharing-server/default:\ngRPCPort:${SW_RECEIVER_GRPC_PORT:\u0026#34;changeMe\u0026#34;}gRPCSslEnabled:truegRPCSslKeyPath:/path/to/server.pemgRPCSslCertChainPath:/path/to/server.crtSince receiver-sharing-server only receives data from an external source, it doesn\u0026rsquo;t need a CA at all. But you have to configure the CA for the clients, such as Java agent, Satellite. If you port to the Java agent, refer to the Java agent repo to configure the Java agent and enable TLS.\nNOTE: change the SW_RECEIVER_GRPC_PORT as non-zero to enable receiver-sharing-server. And the port is open for the clients.\nEnable mTLS mode on gRPC receiver Since 8.8.0, SkyWalking has supported mutual TLS authentication for transporting between clients and OAP servers. Enable mTLS mode for the gRPC channel requires Sharing gRPC Server enabled, as the following configuration.\nreceiver-sharing-server:selector:${SW_RECEIVER_SHARING_SERVER:default}default:# For gRPC servergRPCHost:${SW_RECEIVER_GRPC_HOST:0.0.0.0}gRPCPort:${SW_RECEIVER_GRPC_PORT:\u0026#34;changeMe\u0026#34;}maxConcurrentCallsPerConnection:${SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL:0}maxMessageSize:${SW_RECEIVER_GRPC_MAX_MESSAGE_SIZE:0}gRPCThreadPoolQueueSize:${SW_RECEIVER_GRPC_POOL_QUEUE_SIZE:0}gRPCThreadPoolSize:${SW_RECEIVER_GRPC_THREAD_POOL_SIZE:0}gRPCSslEnabled:${SW_RECEIVER_GRPC_SSL_ENABLED:true}gRPCSslKeyPath:${SW_RECEIVER_GRPC_SSL_KEY_PATH:\u0026#34;/path/to/server.pem\u0026#34;}gRPCSslCertChainPath:${SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH:\u0026#34;/path/to/server.crt\u0026#34;}gRPCSslTrustedCAsPath:${SW_RECEIVER_GRPC_SSL_TRUSTED_CAS_PATH:\u0026#34;/path/to/ca.crt\u0026#34;}authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}You can still use this script to generate CA certificate and the key files of server-side(for OAP Server) and client-side(for Agent/Satellite). You have to notice the keys, including server and client-side, are from the same CA certificate.\n","excerpt":"gRPC SSL transportation support for OAP server For OAP communication, we are currently using gRPC, a …","ref":"/docs/main/v9.7.0/en/setup/backend/grpc-security/","title":"gRPC SSL transportation support for OAP server"},{"body":"Guide  This section explains how to manage translations for internationalization of menu items.\n SkyWalking UI\u0026rsquo;s internationalization translations are in the src/locales/lang. The translations include menu name and description. The translation key of menu name is the value of i18nKey from menu definition file. The translation key of description consists of the i18nKey value and _desc suffix. The description contents will be displayed on the Marketplace page.\nThe following is a typical menu name and description for i18nKey=general_service\n{ \u0026#34;general_service\u0026#34;: \u0026#34;General Service\u0026#34;, \u0026#34;general_service_desc\u0026#34;: \u0026#34;Observe services and relative direct dependencies through telemetry data collected from SkyWalking Agents.\u0026#34; } ","excerpt":"Guide  This section explains how to manage translations for internationalization of menu items. …","ref":"/docs/main/latest/en/guides/i18n/","title":"Guide"},{"body":"Guide  This section explains how to manage translations for internationalization of menu items.\n SkyWalking UI\u0026rsquo;s internationalization translations are in the src/locales/lang. The translations include menu name and description. The translation key of menu name is the value of i18nKey from menu definition file. The translation key of description consists of the i18nKey value and _desc suffix. The description contents will be displayed on the Marketplace page.\nThe following is a typical menu name and description for i18nKey=general_service\n{ \u0026#34;general_service\u0026#34;: \u0026#34;General Service\u0026#34;, \u0026#34;general_service_desc\u0026#34;: \u0026#34;Observe services and relative direct dependencies through telemetry data collected from SkyWalking Agents.\u0026#34; } ","excerpt":"Guide  This section explains how to manage translations for internationalization of menu items. …","ref":"/docs/main/next/en/guides/i18n/","title":"Guide"},{"body":"Guide  This section explains how to manage translations for internationalization of menu items.\n SkyWalking UI\u0026rsquo;s internationalization translations are in the src/locales/lang. The translations include menu name and description. The translation key of menu name is the value of i18nKey from menu definition file. The translation key of description consists of the i18nKey value and _desc suffix. The description contents will be displayed on the Marketplace page.\nThe following is a typical menu name and description for i18nKey=general_service\n{ \u0026#34;general_service\u0026#34;: \u0026#34;General Service\u0026#34;, \u0026#34;general_service_desc\u0026#34;: \u0026#34;Observe services and relative direct dependencies through telemetry data collected from SkyWalking Agents.\u0026#34; } ","excerpt":"Guide  This section explains how to manage translations for internationalization of menu items. …","ref":"/docs/main/v9.6.0/en/guides/i18n/","title":"Guide"},{"body":"Guide  This section explains how to manage translations for internationalization of menu items.\n SkyWalking UI\u0026rsquo;s internationalization translations are in the src/locales/lang. The translations include menu name and description. The translation key of menu name is the value of i18nKey from menu definition file. The translation key of description consists of the i18nKey value and _desc suffix. The description contents will be displayed on the Marketplace page.\nThe following is a typical menu name and description for i18nKey=general_service\n{ \u0026#34;general_service\u0026#34;: \u0026#34;General Service\u0026#34;, \u0026#34;general_service_desc\u0026#34;: \u0026#34;Observe services and relative direct dependencies through telemetry data collected from SkyWalking Agents.\u0026#34; } ","excerpt":"Guide  This section explains how to manage translations for internationalization of menu items. …","ref":"/docs/main/v9.7.0/en/guides/i18n/","title":"Guide"},{"body":"Guides There are many ways you can connect and contribute to the SkyWalking community.\n Submit an issue for an addressed issue or feature implementation plan. Submit a discussion to ask questions, feature proposal and uncertain bug discussion. Mail list: dev@skywalking.apache.org. Mail to dev-subscribe@skywalking.apache.org. Follow the instructions in the reply to subscribe to the mail list. Send Request to join SkyWalking slack mail to the mail list(dev@skywalking.apache.org), we will invite you in. For Chinese speaker, send [CN] Request to join SkyWalking slack mail to the mail list(dev@skywalking.apache.org), we will invite you in.  ","excerpt":"Guides There are many ways you can connect and contribute to the SkyWalking community.\n Submit an …","ref":"/docs/main/latest/en/guides/community/","title":"Guides"},{"body":"Guides There are many ways you can connect and contribute to the SkyWalking community.\n Submit an issue for an addressed issue or feature implementation plan. Submit a discussion to ask questions, feature proposal and uncertain bug discussion. Mail list: dev@skywalking.apache.org. Mail to dev-subscribe@skywalking.apache.org. Follow the instructions in the reply to subscribe to the mail list. Send Request to join SkyWalking slack mail to the mail list(dev@skywalking.apache.org), we will invite you in. For Chinese speaker, send [CN] Request to join SkyWalking slack mail to the mail list(dev@skywalking.apache.org), we will invite you in.  ","excerpt":"Guides There are many ways you can connect and contribute to the SkyWalking community.\n Submit an …","ref":"/docs/main/next/en/guides/community/","title":"Guides"},{"body":"Guides There are many ways you can contribute to the SkyWalking community.\n Go through our documents, and point out or fix a problem. Translate the documents into other languages. Download our releases, try to monitor your applications, and provide feedback to us. Read our source codes. For details, reach out to us. If you find any bugs, submit an issue. You can also try to fix it. Find good first issue issues. This is a good place for you to start. Submit an issue or start a discussion at GitHub issue. See all mail list discussions at website list review. If you are already a SkyWalking committer, you can log in and use the mail list in the browser mode. Otherwise, subscribe following the step below. Issue reports and discussions may also take place via dev@skywalking.apache.org. Mail to dev-subscribe@skywalking.apache.org, and follow the instructions in the reply to subscribe to the mail list.  Contact Us All the following channels are open to the community.\n Submit an issue for an issue or feature proposal. Mail list: dev@skywalking.apache.org. Mail to dev-subscribe@skywalking.apache.org. Follow the instructions in the reply to subscribe to the mail list. Submit a discussion to ask questions.  Become an official Apache SkyWalking Committer The PMC assesses the contributions of every contributor, including their code contributions. It also promotes, votes on, and invites new committers and PMC members according to the Apache guides. See Become official Apache SkyWalking Committer for more details.\nFor code developer For developers, the starting point is the Compiling Guide. It guides developers on how to build the project in local and set up the environment.\nIntegration Tests After setting up the environment and writing your codes, to facilitate integration with the SkyWalking project, you\u0026rsquo;ll need to run tests locally to verify that your codes would not break any existing features, as well as write some unit test (UT) codes to verify that the new codes would work well. This will prevent them from being broken by future contributors. If the new codes involve other components or libraries, you should also write integration tests (IT).\nSkyWalking leverages the plugin maven-surefire-plugin to run the UTs and uses maven-failsafe-plugin to run the ITs. maven-surefire-plugin excludes ITs (whose class name starts with IT) and leaves them for maven-failsafe-plugin to run, which is bound to the verify goal. Therefore, to run the UTs, try ./mvnw clean test, which only runs the UTs but not the ITs.\nIf you would like to run the ITs, please set the property skipITs to false as well as the profiles of the modules whose ITs you want to run. E.g. if you would like to run the ITs in oap-server, try ./mvnw -Pbackend clean verify -DskipITs=false, and if you would like to run all the ITs, simply run ./mvnw clean verify -DskipITs=false.\nPlease be advised that if you\u0026rsquo;re writing integration tests, name it with the pattern IT* so they would only run when property skipITs is set to false.\nJava Microbenchmark Harness (JMH) JMH is a Java harness for building, running, and analysing nano/micro/milli/macro benchmarks written in Java and other languages targeting the JVM.\nWe have a module called microbench which performs a series of micro-benchmark tests for JMH testing. Make new JMH tests extend the org.apache.skywalking.oap.server.microbench.base.AbstractMicrobenchmark to customize runtime conditions (Measurement, Fork, Warmup, etc.).\nJMH tests could run as a normal unit test. And they could run as an independent uber jar via java -jar benchmark.jar for all benchmarks, or via java -jar /benchmarks.jar exampleClassName for a specific test.\nOutput test results in JSON format, you can add -rf json like java -jar benchmarks.jar -rf json, if you run through the IDE, you can configure the -DperfReportDir=savePath parameter to set the JMH report result save path, a report results in JSON format will be generated when the run ends.\nMore information about JMH can be found here: jmh docs.\nEnd to End Tests (E2E) Since version 6.3.0, we have introduced more automatic tests to perform software quality assurance. E2E is an integral part of it.\n End-to-end testing is a methodology used to test whether the flow of an application is performing as designed from start to finish. The purpose of carrying out end-to-end tests is to identify system dependencies and to ensure that the right information is passed between various system components and systems.\n The E2E test involves some/all of the OAP server, storage, coordinator, webapp, and the instrumented services, all of which are orchestrated by docker-compose or KinD. Since version 8.9.0, we immigrate to e2e-v2 which leverage skywalking-infra-e2e and skywalking-cli to do the whole e2e process. skywalking-infra-e2e is used to control the e2e process and skywalking-cli is used to interact with the OAP such as request and get response metris from OAP.\nWriting E2E Cases  Set up the environment   Set up skywalking-infra-e2e Set up skywalking-cli, yq (generally these 2 are enough) and others tools if your cases need. Can reference the script under skywalking/test/e2e-v2/script/prepare/setup-e2e-shell.   Orchestrate the components  The goal of the E2E tests is to test the SkyWalking project as a whole, including the OAP server, storage, coordinator, webapp, and even the frontend UI (not for now), on the single node mode as well as the cluster mode. Therefore, the first step is to determine what case we are going to verify, and orchestrate the components.\nTo make the orchestration process easier, we\u0026rsquo;re using a docker-compose that provides a simple file format (docker-compose.yml) for orchestrating the required containers, and offers an opportunity to define the dependencies of the components.\nFollow these steps:\n Decide what (and how many) containers will be needed. For example, for cluster testing, you\u0026rsquo;ll need \u0026gt; 2 OAP nodes, coordinators (e.g. zookeeper), storage (e.g. ElasticSearch), and instrumented services; Define the containers in docker-compose.yml, and carefully specify the dependencies, starting orders, and most importantly, link them together, e.g. set the correct OAP address on the agent end, and set the correct coordinator address in OAP, etc. Define the e2e case config in e2e.yaml. Write the expected data(yml) for verify.   Run e2e test  All e2e cases should under skywalking/test/e2e-v2/cases. You could execute e2e run command in skywalking/ e.g.\ne2e run -c test/e2e-v2/cases/alarm/h2/e2e.yaml  Troubleshooting  We expose all logs from all containers to the stdout in the non-CI (local) mode, but save and upload them to the GitHub server. You can download them (only when the tests have failed) at \u0026ldquo;Artifacts/Download artifacts/logs\u0026rdquo; (see top right) for debugging.\nNOTE: Please verify the newly-added E2E test case locally first. However, if you find that it has passed locally but failed in the PR check status, make sure that all the updated/newly-added files (especially those in the submodules) are committed and included in the PR, or reset the git HEAD to the remote and verify locally again.\nProject Extensions The SkyWalking project supports various extensions of existing features. If you are interesting in writing extensions, read the following guides.\nThis guides you in developing SkyWalking agent plugins to support more frameworks. Developers for both open source and private plugins should read this.\n If you would like to build a new probe or plugin in any language, please read the Component library definition and extension document. Storage extension development guide. Potential contributors can learn how to build a new storage implementor in addition to the official one. Customize analysis using OAL scripts. OAL scripts are located in config/oal/*.oal. You could modify them and reboot the OAP server. Read Observability Analysis Language Introduction to learn more about OAL scripts. Source and scope extension for new metrics. For analysis of a new metric which SkyWalking hasn\u0026rsquo;t yet provided, add a new receiver. You would most likely have to add a new source and scope. To learn how to do this, read the document.  OAP backend dependency management  This section is only applicable to dependencies of the backend module.\n As one of the Top Level Projects of The Apache Software Foundation (ASF), SkyWalking must follow the ASF 3RD PARTY LICENSE POLICY. So if you\u0026rsquo;re adding new dependencies to the project, you should make sure that the new dependencies would not break the policy, and add their LICENSE and NOTICE to the project.\nWe have a simple script to help you make sure that you haven\u0026rsquo;t missed out any new dependencies:\n Build a distribution package and unzip/untar it to folder dist. Run the script in the root directory. It will print out all new dependencies. Check the LICENSE and NOTICE of those dependencies to make sure that they can be included in an ASF project. Add them to the apm-dist/release-docs/{LICENSE,NOTICE} file. Add the names of these dependencies to the tools/dependencies/known-oap-backend-dependencies.txt file (in alphabetical order). check-LICENSE.sh should pass in the next run.  Profile The performance profile is an enhancement feature in the APM system. We use thread dump to estimate the method execution time, rather than adding multiple local spans. In this way, the cost would be significantly reduced compared to using distributed tracing to locate the slow method. This feature is suitable in the production environment. The following documents are key to understanding the essential parts of this feature.\n Profile data report protocol is provided through gRPC, just like other traces and JVM data. Thread dump merging mechanism introduces the merging mechanism. This mechanism helps end users understand profile reports. Exporter tool of profile raw data guides you on how to package the original profile data for issue reports when the visualization doesn\u0026rsquo;t work well on the official UI.  Release If you\u0026rsquo;re a committer, read the Apache Release Guide to learn about how to create an official Apache version release in accordance with avoid Apache\u0026rsquo;s rules. As long as you keep our LICENSE and NOTICE, the Apache license allows everyone to redistribute.\n","excerpt":"Guides There are many ways you can contribute to the SkyWalking community.\n Go through our …","ref":"/docs/main/v9.0.0/en/guides/readme/","title":"Guides"},{"body":"Guides There are many ways you can contribute to the SkyWalking community.\n Go through our documents, and point out or fix a problem. Translate the documents into other languages. Download our releases, try to monitor your applications, and provide feedback to us. Read our source codes. For details, reach out to us. If you find any bugs, submit an issue. You can also try to fix it. Find good first issue issues. This is a good place for you to start. Submit an issue or start a discussion at GitHub issue. See all mail list discussions at website list review. If you are already a SkyWalking committer, you can log in and use the mail list in the browser mode. Otherwise, subscribe following the step below. Issue reports and discussions may also take place via dev@skywalking.apache.org. Mail to dev-subscribe@skywalking.apache.org, and follow the instructions in the reply to subscribe to the mail list.  Contact Us All the following channels are open to the community.\n Submit an issue for an issue or feature proposal. Mail list: dev@skywalking.apache.org. Mail to dev-subscribe@skywalking.apache.org. Follow the instructions in the reply to subscribe to the mail list. Submit a discussion to ask questions.  Become an official Apache SkyWalking Committer The PMC assesses the contributions of every contributor, including their code contributions. It also promotes, votes on, and invites new committers and PMC members according to the Apache guides. See Become official Apache SkyWalking Committer for more details.\nFor code developer For developers, the starting point is the Compiling Guide. It guides developers on how to build the project in local and set up the environment.\nIntegration Tests After setting up the environment and writing your codes, to facilitate integration with the SkyWalking project, you\u0026rsquo;ll need to run tests locally to verify that your codes would not break any existing features, as well as write some unit test (UT) codes to verify that the new codes would work well. This will prevent them from being broken by future contributors. If the new codes involve other components or libraries, you should also write integration tests (IT).\nSkyWalking leverages the plugin maven-surefire-plugin to run the UTs and uses maven-failsafe-plugin to run the ITs. maven-surefire-plugin excludes ITs (whose class name starts with IT) and leaves them for maven-failsafe-plugin to run, which is bound to the verify goal. Therefore, to run the UTs, try ./mvnw clean test, which only runs the UTs but not the ITs.\nIf you would like to run the ITs, please set the property skipITs to false as well as the profiles of the modules whose ITs you want to run. E.g. if you would like to run the ITs in oap-server, try ./mvnw -Pbackend clean verify -DskipITs=false, and if you would like to run all the ITs, simply run ./mvnw clean verify -DskipITs=false.\nPlease be advised that if you\u0026rsquo;re writing integration tests, name it with the pattern IT* so they would only run when property skipITs is set to false.\nJava Microbenchmark Harness (JMH) JMH is a Java harness for building, running, and analysing nano/micro/milli/macro benchmarks written in Java and other languages targeting the JVM.\nWe have a module called microbench which performs a series of micro-benchmark tests for JMH testing. Make new JMH tests extend the org.apache.skywalking.oap.server.microbench.base.AbstractMicrobenchmark to customize runtime conditions (Measurement, Fork, Warmup, etc.).\nJMH tests could run as a normal unit test. And they could run as an independent uber jar via java -jar benchmark.jar for all benchmarks, or via java -jar /benchmarks.jar exampleClassName for a specific test.\nOutput test results in JSON format, you can add -rf json like java -jar benchmarks.jar -rf json, if you run through the IDE, you can configure the -DperfReportDir=savePath parameter to set the JMH report result save path, a report results in JSON format will be generated when the run ends.\nMore information about JMH can be found here: jmh docs.\nEnd to End Tests (E2E) Since version 6.3.0, we have introduced more automatic tests to perform software quality assurance. E2E is an integral part of it.\n End-to-end testing is a methodology used to test whether the flow of an application is performing as designed from start to finish. The purpose of carrying out end-to-end tests is to identify system dependencies and to ensure that the right information is passed between various system components and systems.\n The E2E test involves some/all of the OAP server, storage, coordinator, webapp, and the instrumented services, all of which are orchestrated by docker-compose or KinD. Since version 8.9.0, we immigrate to e2e-v2 which leverage skywalking-infra-e2e and skywalking-cli to do the whole e2e process. skywalking-infra-e2e is used to control the e2e process and skywalking-cli is used to interact with the OAP such as request and get response metris from OAP.\nWriting E2E Cases  Set up the environment   Set up skywalking-infra-e2e Set up skywalking-cli, yq (generally these 2 are enough) and others tools if your cases need. Can reference the script under skywalking/test/e2e-v2/script/prepare/setup-e2e-shell.   Orchestrate the components  The goal of the E2E tests is to test the SkyWalking project as a whole, including the OAP server, storage, coordinator, webapp, and even the frontend UI (not for now), on the single node mode as well as the cluster mode. Therefore, the first step is to determine what case we are going to verify, and orchestrate the components.\nTo make the orchestration process easier, we\u0026rsquo;re using a docker-compose that provides a simple file format (docker-compose.yml) for orchestrating the required containers, and offers an opportunity to define the dependencies of the components.\nFollow these steps:\n Decide what (and how many) containers will be needed. For example, for cluster testing, you\u0026rsquo;ll need \u0026gt; 2 OAP nodes, coordinators (e.g. zookeeper), storage (e.g. ElasticSearch), and instrumented services; Define the containers in docker-compose.yml, and carefully specify the dependencies, starting orders, and most importantly, link them together, e.g. set the correct OAP address on the agent end, and set the correct coordinator address in OAP, etc. Define the e2e case config in e2e.yaml. Write the expected data(yml) for verify.   Run e2e test  All e2e cases should under skywalking/test/e2e-v2/cases. You could execute e2e run command in skywalking/ e.g.\ne2e run -c test/e2e-v2/cases/alarm/h2/e2e.yaml  Troubleshooting  We expose all logs from all containers to the stdout in the non-CI (local) mode, but save and upload them to the GitHub server. You can download them (only when the tests have failed) at \u0026ldquo;Artifacts/Download artifacts/logs\u0026rdquo; (see top right) for debugging.\nNOTE: Please verify the newly-added E2E test case locally first. However, if you find that it has passed locally but failed in the PR check status, make sure that all the updated/newly-added files (especially those in the submodules) are committed and included in the PR, or reset the git HEAD to the remote and verify locally again.\nProject Extensions The SkyWalking project supports various extensions of existing features. If you are interesting in writing extensions, read the following guides.\nThis guides you in developing SkyWalking agent plugins to support more frameworks. Developers for both open source and private plugins should read this.\n If you would like to build a new probe or plugin in any language, please read the Component library definition and extension document. Storage extension development guide. Potential contributors can learn how to build a new storage implementor in addition to the official one. Customize analysis using OAL scripts. OAL scripts are located in config/oal/*.oal. You could modify them and reboot the OAP server. Read Observability Analysis Language Introduction to learn more about OAL scripts. Source and scope extension for new metrics. For analysis of a new metric which SkyWalking hasn\u0026rsquo;t yet provided, add a new receiver. You would most likely have to add a new source and scope. To learn how to do this, read the document.  OAP backend dependency management  This section is only applicable to dependencies of the backend module.\n As one of the Top Level Projects of The Apache Software Foundation (ASF), SkyWalking must follow the ASF 3RD PARTY LICENSE POLICY. So if you\u0026rsquo;re adding new dependencies to the project, you should make sure that the new dependencies would not break the policy, and add their LICENSE and NOTICE to the project.\nWe have a simple script to help you make sure that you haven\u0026rsquo;t missed out any new dependencies:\n Build a distribution package and unzip/untar it to folder dist. Run the script in the root directory. It will print out all new dependencies. Check the LICENSE and NOTICE of those dependencies to make sure that they can be included in an ASF project. Add them to the apm-dist/release-docs/{LICENSE,NOTICE} file. Add the names of these dependencies to the tools/dependencies/known-oap-backend-dependencies.txt file (in alphabetical order). check-LICENSE.sh should pass in the next run.  Profile The performance profile is an enhancement feature in the APM system. We use thread dump to estimate the method execution time, rather than adding multiple local spans. In this way, the cost would be significantly reduced compared to using distributed tracing to locate the slow method. This feature is suitable in the production environment. The following documents are key to understanding the essential parts of this feature.\n Profile data report protocol is provided through gRPC, just like other traces and JVM data. Thread dump merging mechanism introduces the merging mechanism. This mechanism helps end users understand profile reports. Exporter tool of profile raw data guides you on how to package the original profile data for issue reports when the visualization doesn\u0026rsquo;t work well on the official UI.  Release If you\u0026rsquo;re a committer, read the Apache Release Guide to learn about how to create an official Apache version release in accordance with avoid Apache\u0026rsquo;s rules. As long as you keep our LICENSE and NOTICE, the Apache license allows everyone to redistribute.\n","excerpt":"Guides There are many ways you can contribute to the SkyWalking community.\n Go through our …","ref":"/docs/main/v9.1.0/en/guides/readme/","title":"Guides"},{"body":"Guides There are many ways you can contribute to the SkyWalking community.\n Go through our documents, and point out or fix a problem. Translate the documents into other languages. Download our releases, try to monitor your applications, and provide feedback to us. Read our source codes. For details, reach out to us. If you find any bugs, submit an issue. You can also try to fix it. Find good first issue issues. This is a good place for you to start. Submit an issue or start a discussion at GitHub issue. See all mail list discussions at website list review. If you are already a SkyWalking committer, you can log in and use the mail list in the browser mode. Otherwise, subscribe following the step below. Issue reports and discussions may also take place via dev@skywalking.apache.org. Mail to dev-subscribe@skywalking.apache.org, and follow the instructions in the reply to subscribe to the mail list.  Contact Us All the following channels are open to the community.\n Submit an issue for an issue or feature proposal. Mail list: dev@skywalking.apache.org. Mail to dev-subscribe@skywalking.apache.org. Follow the instructions in the reply to subscribe to the mail list. Submit a discussion to ask questions.  Become an official Apache SkyWalking Committer The PMC assesses the contributions of every contributor, including their code contributions. It also promotes, votes on, and invites new committers and PMC members according to the Apache guides. See Become official Apache SkyWalking Committer for more details.\nFor code developer For developers, the starting point is the Compiling Guide. It guides developers on how to build the project in local and set up the environment.\nIntegration Tests After setting up the environment and writing your codes, to facilitate integration with the SkyWalking project, you\u0026rsquo;ll need to run tests locally to verify that your codes would not break any existing features, as well as write some unit test (UT) codes to verify that the new codes would work well. This will prevent them from being broken by future contributors. If the new codes involve other components or libraries, you should also write integration tests (IT).\nSkyWalking leverages the plugin maven-surefire-plugin to run the UTs and uses maven-failsafe-plugin to run the ITs. maven-surefire-plugin excludes ITs (whose class name starts with IT) and leaves them for maven-failsafe-plugin to run, which is bound to the verify goal. Therefore, to run the UTs, try ./mvnw clean test, which only runs the UTs but not the ITs.\nIf you would like to run the ITs, please set the property skipITs to false as well as the profiles of the modules whose ITs you want to run. E.g. if you would like to run the ITs in oap-server, try ./mvnw -Pbackend clean verify -DskipITs=false, and if you would like to run all the ITs, simply run ./mvnw clean verify -DskipITs=false.\nPlease be advised that if you\u0026rsquo;re writing integration tests, name it with the pattern IT* so they would only run when property skipITs is set to false.\nJava Microbenchmark Harness (JMH) JMH is a Java harness for building, running, and analysing nano/micro/milli/macro benchmarks written in Java and other languages targeting the JVM.\nWe have a module called microbench which performs a series of micro-benchmark tests for JMH testing. Make new JMH tests extend the org.apache.skywalking.oap.server.microbench.base.AbstractMicrobenchmark to customize runtime conditions (Measurement, Fork, Warmup, etc.).\nJMH tests could run as a normal unit test. And they could run as an independent uber jar via java -jar benchmark.jar for all benchmarks, or via java -jar /benchmarks.jar exampleClassName for a specific test.\nOutput test results in JSON format, you can add -rf json like java -jar benchmarks.jar -rf json, if you run through the IDE, you can configure the -DperfReportDir=savePath parameter to set the JMH report result save path, a report results in JSON format will be generated when the run ends.\nMore information about JMH can be found here: jmh docs.\nEnd to End Tests (E2E) Since version 6.3.0, we have introduced more automatic tests to perform software quality assurance. E2E is an integral part of it.\n End-to-end testing is a methodology used to test whether the flow of an application is performing as designed from start to finish. The purpose of carrying out end-to-end tests is to identify system dependencies and to ensure that the right information is passed between various system components and systems.\n The E2E test involves some/all of the OAP server, storage, coordinator, webapp, and the instrumented services, all of which are orchestrated by docker-compose or KinD. Since version 8.9.0, we immigrate to e2e-v2 which leverage skywalking-infra-e2e and skywalking-cli to do the whole e2e process. skywalking-infra-e2e is used to control the e2e process and skywalking-cli is used to interact with the OAP such as request and get response metrics from OAP.\nWriting E2E Cases  Set up the environment   Set up skywalking-infra-e2e Set up skywalking-cli, yq (generally these 2 are enough) and others tools if your cases need. Can reference the script under skywalking/test/e2e-v2/script/prepare/setup-e2e-shell.   Orchestrate the components  The goal of the E2E tests is to test the SkyWalking project as a whole, including the OAP server, storage, coordinator, webapp, and even the frontend UI (not for now), on the single node mode as well as the cluster mode. Therefore, the first step is to determine what case we are going to verify, and orchestrate the components.\nTo make the orchestration process easier, we\u0026rsquo;re using a docker-compose that provides a simple file format (docker-compose.yml) for orchestrating the required containers, and offers an opportunity to define the dependencies of the components.\nFollow these steps:\n Decide what (and how many) containers will be needed. For example, for cluster testing, you\u0026rsquo;ll need \u0026gt; 2 OAP nodes, coordinators (e.g. zookeeper), storage (e.g. ElasticSearch), and instrumented services; Define the containers in docker-compose.yml, and carefully specify the dependencies, starting orders, and most importantly, link them together, e.g. set the correct OAP address on the agent end, and set the correct coordinator address in OAP, etc. Define the e2e case config in e2e.yaml. Write the expected data(yml) for verify.   Run e2e test  All e2e cases should under skywalking/test/e2e-v2/cases. You could execute e2e run command in skywalking/ e.g.\ne2e run -c test/e2e-v2/cases/alarm/h2/e2e.yaml  Troubleshooting  We expose all logs from all containers to the stdout in the non-CI (local) mode, but save and upload them to the GitHub server. You can download them (only when the tests have failed) at \u0026ldquo;Artifacts/Download artifacts/logs\u0026rdquo; (see top right) for debugging.\nNOTE: Please verify the newly-added E2E test case locally first. However, if you find that it has passed locally but failed in the PR check status, make sure that all the updated/newly-added files (especially those in the submodules) are committed and included in the PR, or reset the git HEAD to the remote and verify locally again.\nProject Extensions The SkyWalking project supports various extensions of existing features. If you are interesting in writing extensions, read the following guides.\nThis guides you in developing SkyWalking agent plugins to support more frameworks. Developers for both open source and private plugins should read this.\n If you would like to build a new probe or plugin in any language, please read the Component library definition and extension document. Storage extension development guide. Potential contributors can learn how to build a new storage implementor in addition to the official one. Customize analysis using OAL scripts. OAL scripts are located in config/oal/*.oal. You could modify them and reboot the OAP server. Read Observability Analysis Language Introduction to learn more about OAL scripts. Source and scope extension for new metrics. For analysis of a new metric which SkyWalking hasn\u0026rsquo;t yet provided, add a new receiver. You would most likely have to add a new source and scope. To learn how to do this, read the document.  OAP backend dependency management  This section is only applicable to dependencies of the backend module.\n As one of the Top Level Projects of The Apache Software Foundation (ASF), SkyWalking must follow the ASF 3RD PARTY LICENSE POLICY. So if you\u0026rsquo;re adding new dependencies to the project, you should make sure that the new dependencies would not break the policy, and add their LICENSE and NOTICE to the project.\nWe use license-eye to help you make sure that you haven\u0026rsquo;t missed out any new dependencies:\n Install license-eye according to the doc. Run license-eye dependency resolve --summary ./dist-material/release-docs/LICENSE.tpl in the root directory of this project. Check the modified lines in ./dist-material/release-docs/LICENSE (via command git diff -U0 ./dist-material/release-docs/LICENSE) and check whether the new dependencies' licenses are compatible with Apache 2.0. Add the new dependencies' notice files (if any) to ./dist-material/release-docs/NOTICE if they are Apache 2.0 license. Copy their license files to ./dist-material/release-docs/licenses if they are not standard Apache 2.0 license. Copy the new dependencies' license file to ./dist-material/release-docs/licenses if they are not standard Apache 2.0 license.  Profile The performance profile is an enhancement feature in the APM system. We use thread dump to estimate the method execution time, rather than adding multiple local spans. In this way, the cost would be significantly reduced compared to using distributed tracing to locate the slow method. This feature is suitable in the production environment. The following documents are key to understanding the essential parts of this feature.\n Profile data report protocol is provided through gRPC, just like other traces and JVM data. Thread dump merging mechanism introduces the merging mechanism. This mechanism helps end users understand profile reports. Exporter tool of profile raw data guides you on how to package the original profile data for issue reports when the visualization doesn\u0026rsquo;t work well on the official UI.  Release If you\u0026rsquo;re a committer, read the Apache Release Guide to learn about how to create an official Apache version release in accordance with avoid Apache\u0026rsquo;s rules. As long as you keep our LICENSE and NOTICE, the Apache license allows everyone to redistribute.\n","excerpt":"Guides There are many ways you can contribute to the SkyWalking community.\n Go through our …","ref":"/docs/main/v9.2.0/en/guides/readme/","title":"Guides"},{"body":"Guides There are many ways you can contribute to the SkyWalking community.\n Go through our documents, and point out or fix a problem. Translate the documents into other languages. Download our releases, try to monitor your applications, and provide feedback to us. Read our source codes. For details, reach out to us. If you find any bugs, submit an issue. You can also try to fix it. Find good first issue issues. This is a good place for you to start. Submit an issue or start a discussion at GitHub issue. See all mail list discussions at website list review. If you are already a SkyWalking committer, you can log in and use the mail list in the browser mode. Otherwise, subscribe following the step below. Issue reports and discussions may also take place via dev@skywalking.apache.org. Mail to dev-subscribe@skywalking.apache.org, and follow the instructions in the reply to subscribe to the mail list.  Contact Us All the following channels are open to the community.\n Submit an issue for an issue or feature proposal. Mail list: dev@skywalking.apache.org. Mail to dev-subscribe@skywalking.apache.org. Follow the instructions in the reply to subscribe to the mail list. Submit a discussion to ask questions.  Become an official Apache SkyWalking Committer The PMC assesses the contributions of every contributor, including their code contributions. It also promotes, votes on, and invites new committers and PMC members according to the Apache guides. See Become official Apache SkyWalking Committer for more details.\nFor code developer For developers, the starting point is the Compiling Guide. It guides developers on how to build the project in local and set up the environment.\nIntegration Tests After setting up the environment and writing your codes, to facilitate integration with the SkyWalking project, you\u0026rsquo;ll need to run tests locally to verify that your codes would not break any existing features, as well as write some unit test (UT) codes to verify that the new codes would work well. This will prevent them from being broken by future contributors. If the new codes involve other components or libraries, you should also write integration tests (IT).\nSkyWalking leverages the plugin maven-surefire-plugin to run the UTs and uses maven-failsafe-plugin to run the ITs. maven-surefire-plugin excludes ITs (whose class name starts with IT) and leaves them for maven-failsafe-plugin to run, which is bound to the verify goal. Therefore, to run the UTs, try ./mvnw clean test, which only runs the UTs but not the ITs.\nIf you would like to run the ITs, please set the property skipITs to false as well as the profiles of the modules whose ITs you want to run. E.g. if you would like to run the ITs in oap-server, try ./mvnw -Pbackend clean verify -DskipITs=false, and if you would like to run all the ITs, simply run ./mvnw clean verify -DskipITs=false.\nPlease be advised that if you\u0026rsquo;re writing integration tests, name it with the pattern IT* so they would only run when property skipITs is set to false.\nJava Microbenchmark Harness (JMH) JMH is a Java harness for building, running, and analysing nano/micro/milli/macro benchmarks written in Java and other languages targeting the JVM.\nWe have a module called microbench which performs a series of micro-benchmark tests for JMH testing. Make new JMH tests extend the org.apache.skywalking.oap.server.microbench.base.AbstractMicrobenchmark to customize runtime conditions (Measurement, Fork, Warmup, etc.).\nJMH tests could run as a normal unit test. And they could run as an independent uber jar via java -jar benchmark.jar for all benchmarks, or via java -jar /benchmarks.jar exampleClassName for a specific test.\nOutput test results in JSON format, you can add -rf json like java -jar benchmarks.jar -rf json, if you run through the IDE, you can configure the -DperfReportDir=savePath parameter to set the JMH report result save path, a report results in JSON format will be generated when the run ends.\nMore information about JMH can be found here: jmh docs.\nEnd to End Tests (E2E) Since version 6.3.0, we have introduced more automatic tests to perform software quality assurance. E2E is an integral part of it.\n End-to-end testing is a methodology used to test whether the flow of an application is performing as designed from start to finish. The purpose of carrying out end-to-end tests is to identify system dependencies and to ensure that the right information is passed between various system components and systems.\n The E2E test involves some/all of the OAP server, storage, coordinator, webapp, and the instrumented services, all of which are orchestrated by docker-compose or KinD. Since version 8.9.0, we immigrate to e2e-v2 which leverage skywalking-infra-e2e and skywalking-cli to do the whole e2e process. skywalking-infra-e2e is used to control the e2e process and skywalking-cli is used to interact with the OAP such as request and get response metrics from OAP.\nWriting E2E Cases  Set up the environment   Set up skywalking-infra-e2e Set up skywalking-cli, yq (generally these 2 are enough) and others tools if your cases need. Can reference the script under skywalking/test/e2e-v2/script/prepare/setup-e2e-shell.   Orchestrate the components  The goal of the E2E tests is to test the SkyWalking project as a whole, including the OAP server, storage, coordinator, webapp, and even the frontend UI (not for now), on the single node mode as well as the cluster mode. Therefore, the first step is to determine what case we are going to verify, and orchestrate the components.\nTo make the orchestration process easier, we\u0026rsquo;re using a docker-compose that provides a simple file format (docker-compose.yml) for orchestrating the required containers, and offers an opportunity to define the dependencies of the components.\nFollow these steps:\n Decide what (and how many) containers will be needed. For example, for cluster testing, you\u0026rsquo;ll need \u0026gt; 2 OAP nodes, coordinators (e.g. zookeeper), storage (e.g. ElasticSearch), and instrumented services; Define the containers in docker-compose.yml, and carefully specify the dependencies, starting orders, and most importantly, link them together, e.g. set the correct OAP address on the agent end, and set the correct coordinator address in OAP, etc. Define the e2e case config in e2e.yaml. Write the expected data(yml) for verify.   Run e2e test  All e2e cases should under skywalking/test/e2e-v2/cases. You could execute e2e run command in skywalking/ e.g.\ne2e run -c test/e2e-v2/cases/alarm/h2/e2e.yaml  Troubleshooting  We expose all logs from all containers to the stdout in the non-CI (local) mode, but save and upload them to the GitHub server. You can download them (only when the tests have failed) at \u0026ldquo;Artifacts/Download artifacts/logs\u0026rdquo; (see top right) for debugging.\nNOTE: Please verify the newly-added E2E test case locally first. However, if you find that it has passed locally but failed in the PR check status, make sure that all the updated/newly-added files (especially those in the submodules) are committed and included in the PR, or reset the git HEAD to the remote and verify locally again.\nProject Extensions The SkyWalking project supports various extensions of existing features. If you are interesting in writing extensions, read the following guides.\nThis guides you in developing SkyWalking agent plugins to support more frameworks. Developers for both open source and private plugins should read this.\n If you would like to build a new probe or plugin in any language, please read the Component library definition and extension document. Storage extension development guide. Potential contributors can learn how to build a new storage implementor in addition to the official one. Customize analysis using OAL scripts. OAL scripts are located in config/oal/*.oal. You could modify them and reboot the OAP server. Read Observability Analysis Language Introduction to learn more about OAL scripts. Source and scope extension for new metrics. For analysis of a new metric which SkyWalking hasn\u0026rsquo;t yet provided, add a new receiver. You would most likely have to add a new source and scope. To learn how to do this, read the document. If you would like to add a new root menu or sub-menu to booster UI, read the UI menu control document.  OAP backend dependency management  This section is only applicable to dependencies of the backend module.\n As one of the Top Level Projects of The Apache Software Foundation (ASF), SkyWalking must follow the ASF 3RD PARTY LICENSE POLICY. So if you\u0026rsquo;re adding new dependencies to the project, you should make sure that the new dependencies would not break the policy, and add their LICENSE and NOTICE to the project.\nWe use license-eye to help you make sure that you haven\u0026rsquo;t missed out any new dependencies:\n Install license-eye according to the doc. Run license-eye dependency resolve --summary ./dist-material/release-docs/LICENSE.tpl in the root directory of this project. Check the modified lines in ./dist-material/release-docs/LICENSE (via command git diff -U0 ./dist-material/release-docs/LICENSE) and check whether the new dependencies' licenses are compatible with Apache 2.0. Add the new dependencies' notice files (if any) to ./dist-material/release-docs/NOTICE if they are Apache 2.0 license. Copy their license files to ./dist-material/release-docs/licenses if they are not standard Apache 2.0 license. Copy the new dependencies' license file to ./dist-material/release-docs/licenses if they are not standard Apache 2.0 license.  Profile The performance profile is an enhancement feature in the APM system. We use thread dump to estimate the method execution time, rather than adding multiple local spans. In this way, the cost would be significantly reduced compared to using distributed tracing to locate the slow method. This feature is suitable in the production environment. The following documents are key to understanding the essential parts of this feature.\n Profile data report protocol is provided through gRPC, just like other traces and JVM data. Thread dump merging mechanism introduces the merging mechanism. This mechanism helps end users understand profile reports. Exporter tool of profile raw data guides you on how to package the original profile data for issue reports when the visualization doesn\u0026rsquo;t work well on the official UI.  Release If you\u0026rsquo;re a committer, read the Apache Release Guide to learn about how to create an official Apache version release in accordance with avoid Apache\u0026rsquo;s rules. As long as you keep our LICENSE and NOTICE, the Apache license allows everyone to redistribute.\n","excerpt":"Guides There are many ways you can contribute to the SkyWalking community.\n Go through our …","ref":"/docs/main/v9.3.0/en/guides/readme/","title":"Guides"},{"body":"Guides There are many ways you can contribute to the SkyWalking community.\n Go through our documents, and point out or fix a problem. Translate the documents into other languages. Download our releases, try to monitor your applications, and provide feedback to us. Read our source codes. For details, reach out to us. If you find any bugs, submit an issue. You can also try to fix it. Find good first issue issues. This is a good place for you to start. Submit an issue or start a discussion at GitHub issue. See all mail list discussions at website list review. If you are already a SkyWalking committer, you can log in and use the mail list in the browser mode. Otherwise, subscribe following the step below. Issue reports and discussions may also take place via dev@skywalking.apache.org. Mail to dev-subscribe@skywalking.apache.org, and follow the instructions in the reply to subscribe to the mail list.  Contact Us All the following channels are open to the community.\n Submit an issue for an issue or feature proposal. Mail list: dev@skywalking.apache.org. Mail to dev-subscribe@skywalking.apache.org. Follow the instructions in the reply to subscribe to the mail list. Submit a discussion to ask questions.  Become an official Apache SkyWalking Committer The PMC assesses the contributions of every contributor, including their code contributions. It also promotes, votes on, and invites new committers and PMC members according to the Apache guides. See Become official Apache SkyWalking Committer for more details.\nFor code developer For developers, the starting point is the Compiling Guide. It guides developers on how to build the project in local and set up the environment.\nIntegration Tests After setting up the environment and writing your codes, to facilitate integration with the SkyWalking project, you\u0026rsquo;ll need to run tests locally to verify that your codes would not break any existing features, as well as write some unit test (UT) codes to verify that the new codes would work well. This will prevent them from being broken by future contributors. If the new codes involve other components or libraries, you should also write integration tests (IT).\nSkyWalking leverages the plugin maven-surefire-plugin to run the UTs and uses maven-failsafe-plugin to run the ITs. maven-surefire-plugin excludes ITs (whose class name starts or ends with *IT, IT*) and leaves them for maven-failsafe-plugin to run, which is bound to the integration-test goal. Therefore, to run the UTs, try ./mvnw clean test, which only runs the UTs but not the ITs.\nIf you would like to run the ITs, please run ./mvnw integration-test as well as the profiles of the modules whose ITs you want to run. If you don\u0026rsquo;t want to run UTs, please add -DskipUTs=true. E.g. if you would like to only run the ITs in oap-server, try ./mvnw -Pbackend clean verify -DskipUTs=true, and if you would like to run all the ITs, simply run ./mvnw clean integration-test -DskipUTs=true.\nPlease be advised that if you\u0026rsquo;re writing integration tests, name it with the pattern IT* or *IT so they would only run in goal integration-test.\nJava Microbenchmark Harness (JMH) JMH is a Java harness for building, running, and analysing nano/micro/milli/macro benchmarks written in Java and other languages targeting the JVM.\nWe have a module called microbench which performs a series of micro-benchmark tests for JMH testing. Make new JMH tests extend the org.apache.skywalking.oap.server.microbench.base.AbstractMicrobenchmark to customize runtime conditions (Measurement, Fork, Warmup, etc.).\nJMH tests could run as a normal unit test. And they could run as an independent uber jar via java -jar benchmark.jar for all benchmarks, or via java -jar /benchmarks.jar exampleClassName for a specific test.\nOutput test results in JSON format, you can add -rf json like java -jar benchmarks.jar -rf json, if you run through the IDE, you can configure the -DperfReportDir=savePath parameter to set the JMH report result save path, a report results in JSON format will be generated when the run ends.\nMore information about JMH can be found here: jmh docs.\nEnd to End Tests (E2E) Since version 6.3.0, we have introduced more automatic tests to perform software quality assurance. E2E is an integral part of it.\n End-to-end testing is a methodology used to test whether the flow of an application is performing as designed from start to finish. The purpose of carrying out end-to-end tests is to identify system dependencies and to ensure that the right information is passed between various system components and systems.\n The E2E test involves some/all of the OAP server, storage, coordinator, webapp, and the instrumented services, all of which are orchestrated by docker-compose or KinD. Since version 8.9.0, we immigrate to e2e-v2 which leverage skywalking-infra-e2e and skywalking-cli to do the whole e2e process. skywalking-infra-e2e is used to control the e2e process and skywalking-cli is used to interact with the OAP such as request and get response metrics from OAP.\nWriting E2E Cases  Set up the environment   Set up skywalking-infra-e2e Set up skywalking-cli, yq (generally these 2 are enough) and others tools if your cases need. Can reference the script under skywalking/test/e2e-v2/script/prepare/setup-e2e-shell.   Orchestrate the components  The goal of the E2E tests is to test the SkyWalking project as a whole, including the OAP server, storage, coordinator, webapp, and even the frontend UI (not for now), on the single node mode as well as the cluster mode. Therefore, the first step is to determine what case we are going to verify, and orchestrate the components.\nTo make the orchestration process easier, we\u0026rsquo;re using a docker-compose that provides a simple file format (docker-compose.yml) for orchestrating the required containers, and offers an opportunity to define the dependencies of the components.\nFollow these steps:\n Decide what (and how many) containers will be needed. For example, for cluster testing, you\u0026rsquo;ll need \u0026gt; 2 OAP nodes, coordinators (e.g. zookeeper), storage (e.g. ElasticSearch), and instrumented services; Define the containers in docker-compose.yml, and carefully specify the dependencies, starting orders, and most importantly, link them together, e.g. set the correct OAP address on the agent end, and set the correct coordinator address in OAP, etc. Define the e2e case config in e2e.yaml. Write the expected data(yml) for verify.   Run e2e test  All e2e cases should under skywalking/test/e2e-v2/cases. You could execute e2e run command in skywalking/ e.g.\ne2e run -c test/e2e-v2/cases/alarm/h2/e2e.yaml  Troubleshooting  We expose all logs from all containers to the stdout in the non-CI (local) mode, but save and upload them to the GitHub server. You can download them (only when the tests have failed) at \u0026ldquo;Artifacts/Download artifacts/logs\u0026rdquo; (see top right) for debugging.\nNOTE: Please verify the newly-added E2E test case locally first. However, if you find that it has passed locally but failed in the PR check status, make sure that all the updated/newly-added files (especially those in the submodules) are committed and included in the PR, or reset the git HEAD to the remote and verify locally again.\nProject Extensions The SkyWalking project supports various extensions of existing features. If you are interesting in writing extensions, read the following guides.\nThis guides you in developing SkyWalking agent plugins to support more frameworks. Developers for both open source and private plugins should read this.\n If you would like to build a new probe or plugin in any language, please read the Component library definition and extension document. Storage extension development guide. Potential contributors can learn how to build a new storage implementor in addition to the official one. Customize analysis using OAL scripts. OAL scripts are located in config/oal/*.oal. You could modify them and reboot the OAP server. Read Observability Analysis Language Introduction to learn more about OAL scripts. Source and scope extension for new metrics. For analysis of a new metric which SkyWalking hasn\u0026rsquo;t yet provided, add a new receiver. You would most likely have to add a new source and scope. To learn how to do this, read the document. If you would like to add a new root menu or sub-menu to booster UI, read the UI menu control document.  OAP backend dependency management  This section is only applicable to dependencies of the backend module.\n As one of the Top Level Projects of The Apache Software Foundation (ASF), SkyWalking must follow the ASF 3RD PARTY LICENSE POLICY. So if you\u0026rsquo;re adding new dependencies to the project, you should make sure that the new dependencies would not break the policy, and add their LICENSE and NOTICE to the project.\nWe use license-eye to help you make sure that you haven\u0026rsquo;t missed out any new dependencies:\n Install license-eye according to the doc. Run license-eye dependency resolve --summary ./dist-material/release-docs/LICENSE.tpl in the root directory of this project. Check the modified lines in ./dist-material/release-docs/LICENSE (via command git diff -U0 ./dist-material/release-docs/LICENSE) and check whether the new dependencies' licenses are compatible with Apache 2.0. Add the new dependencies' notice files (if any) to ./dist-material/release-docs/NOTICE if they are Apache 2.0 license. Copy their license files to ./dist-material/release-docs/licenses if they are not standard Apache 2.0 license. Copy the new dependencies' license file to ./dist-material/release-docs/licenses if they are not standard Apache 2.0 license.  Profile The performance profile is an enhancement feature in the APM system. We use thread dump to estimate the method execution time, rather than adding multiple local spans. In this way, the cost would be significantly reduced compared to using distributed tracing to locate the slow method. This feature is suitable in the production environment. The following documents are key to understanding the essential parts of this feature.\n Profile data report protocol is provided through gRPC, just like other traces and JVM data. Thread dump merging mechanism introduces the merging mechanism. This mechanism helps end users understand profile reports. Exporter tool of profile raw data guides you on how to package the original profile data for issue reports when the visualization doesn\u0026rsquo;t work well on the official UI.  Release If you\u0026rsquo;re a committer, read the Apache Release Guide to learn about how to create an official Apache version release in accordance with avoid Apache\u0026rsquo;s rules. As long as you keep our LICENSE and NOTICE, the Apache license allows everyone to redistribute.\n","excerpt":"Guides There are many ways you can contribute to the SkyWalking community.\n Go through our …","ref":"/docs/main/v9.4.0/en/guides/readme/","title":"Guides"},{"body":"Guides There are many ways you can contribute to the SkyWalking community.\n Go through our documents, and point out or fix a problem. Translate the documents into other languages. Download our releases, try to monitor your applications, and provide feedback to us. Read our source codes. For details, reach out to us. If you find any bugs, submit an issue. You can also try to fix it. Find good first issue issues. This is a good place for you to start. Submit an issue or start a discussion at GitHub issue. See all mail list discussions at website list review. If you are already a SkyWalking committer, you can log in and use the mail list in the browser mode. Otherwise, subscribe following the step below. Issue reports and discussions may also take place via dev@skywalking.apache.org. Mail to dev-subscribe@skywalking.apache.org, and follow the instructions in the reply to subscribe to the mail list.  Contact Us All the following channels are open to the community.\n Submit an issue for an issue or feature proposal. Mail list: dev@skywalking.apache.org. Mail to dev-subscribe@skywalking.apache.org. Follow the instructions in the reply to subscribe to the mail list. Submit a discussion to ask questions.  Become an official Apache SkyWalking Committer The PMC assesses the contributions of every contributor, including their code contributions. It also promotes, votes on, and invites new committers and PMC members according to the Apache guides. See Become official Apache SkyWalking Committer for more details.\nFor code developer For developers, the starting point is the Compiling Guide. It guides developers on how to build the project in local and set up the environment.\nIntegration Tests After setting up the environment and writing your codes, to facilitate integration with the SkyWalking project, you\u0026rsquo;ll need to run tests locally to verify that your codes would not break any existing features, as well as write some unit test (UT) codes to verify that the new codes would work well. This will prevent them from being broken by future contributors. If the new codes involve other components or libraries, you should also write integration tests (IT).\nSkyWalking leverages the plugin maven-surefire-plugin to run the UTs and uses maven-failsafe-plugin to run the ITs. maven-surefire-plugin excludes ITs (whose class name starts or ends with *IT, IT*) and leaves them for maven-failsafe-plugin to run, which is bound to the integration-test goal. Therefore, to run the UTs, try ./mvnw clean test, which only runs the UTs but not the ITs.\nIf you would like to run the ITs, please run ./mvnw integration-test as well as the profiles of the modules whose ITs you want to run. If you don\u0026rsquo;t want to run UTs, please add -DskipUTs=true. E.g. if you would like to only run the ITs in oap-server, try ./mvnw -Pbackend clean verify -DskipUTs=true, and if you would like to run all the ITs, simply run ./mvnw clean integration-test -DskipUTs=true.\nPlease be advised that if you\u0026rsquo;re writing integration tests, name it with the pattern IT* or *IT so they would only run in goal integration-test.\nJava Microbenchmark Harness (JMH) JMH is a Java harness for building, running, and analysing nano/micro/milli/macro benchmarks written in Java and other languages targeting the JVM.\nWe have a module called microbench which performs a series of micro-benchmark tests for JMH testing. Make new JMH tests extend the org.apache.skywalking.oap.server.microbench.base.AbstractMicrobenchmark to customize runtime conditions (Measurement, Fork, Warmup, etc.).\nJMH tests could run as a normal unit test. And they could run as an independent uber jar via java -jar benchmark.jar for all benchmarks, or via java -jar /benchmarks.jar exampleClassName for a specific test.\nOutput test results in JSON format, you can add -rf json like java -jar benchmarks.jar -rf json, if you run through the IDE, you can configure the -DperfReportDir=savePath parameter to set the JMH report result save path, a report results in JSON format will be generated when the run ends.\nMore information about JMH can be found here: jmh docs.\nEnd to End Tests (E2E) Since version 6.3.0, we have introduced more automatic tests to perform software quality assurance. E2E is an integral part of it.\n End-to-end testing is a methodology used to test whether the flow of an application is performing as designed from start to finish. The purpose of carrying out end-to-end tests is to identify system dependencies and to ensure that the right information is passed between various system components and systems.\n The E2E test involves some/all of the OAP server, storage, coordinator, webapp, and the instrumented services, all of which are orchestrated by docker-compose or KinD. Since version 8.9.0, we immigrate to e2e-v2 which leverage skywalking-infra-e2e and skywalking-cli to do the whole e2e process. skywalking-infra-e2e is used to control the e2e process and skywalking-cli is used to interact with the OAP such as request and get response metrics from OAP.\nWriting E2E Cases  Set up the environment   Set up skywalking-infra-e2e Set up skywalking-cli, yq (generally these 2 are enough) and others tools if your cases need. Can reference the script under skywalking/test/e2e-v2/script/prepare/setup-e2e-shell.   Orchestrate the components  The goal of the E2E tests is to test the SkyWalking project as a whole, including the OAP server, storage, coordinator, webapp, and even the frontend UI (not for now), on the single node mode as well as the cluster mode. Therefore, the first step is to determine what case we are going to verify, and orchestrate the components.\nTo make the orchestration process easier, we\u0026rsquo;re using a docker-compose that provides a simple file format (docker-compose.yml) for orchestrating the required containers, and offers an opportunity to define the dependencies of the components.\nFollow these steps:\n Decide what (and how many) containers will be needed. For example, for cluster testing, you\u0026rsquo;ll need \u0026gt; 2 OAP nodes, coordinators (e.g. zookeeper), storage (e.g. ElasticSearch), and instrumented services; Define the containers in docker-compose.yml, and carefully specify the dependencies, starting orders, and most importantly, link them together, e.g. set the correct OAP address on the agent end, and set the correct coordinator address in OAP, etc. Define the e2e case config in e2e.yaml. Write the expected data(yml) for verify.   Run e2e test  All e2e cases should under skywalking/test/e2e-v2/cases. You could execute e2e run command in skywalking/ e.g.\ne2e run -c test/e2e-v2/cases/alarm/h2/e2e.yaml  Troubleshooting  We expose all logs from all containers to the stdout in the non-CI (local) mode, but save and upload them to the GitHub server. You can download them (only when the tests have failed) at \u0026ldquo;Artifacts/Download artifacts/logs\u0026rdquo; (see top right) for debugging.\nNOTE: Please verify the newly-added E2E test case locally first. However, if you find that it has passed locally but failed in the PR check status, make sure that all the updated/newly-added files (especially those in the submodules) are committed and included in the PR, or reset the git HEAD to the remote and verify locally again.\nProject Extensions The SkyWalking project supports various extensions of existing features. If you are interesting in writing extensions, read the following guides.\nThis guides you in developing SkyWalking agent plugins to support more frameworks. Developers for both open source and private plugins should read this.\n If you would like to build a new probe or plugin in any language, please read the Component library definition and extension document. Storage extension development guide. Potential contributors can learn how to build a new storage implementor in addition to the official one. Customize analysis using OAL scripts. OAL scripts are located in config/oal/*.oal. You could modify them and reboot the OAP server. Read Observability Analysis Language Introduction to learn more about OAL scripts. Source and scope extension for new metrics. For analysis of a new metric which SkyWalking hasn\u0026rsquo;t yet provided, add a new receiver. You would most likely have to add a new source and scope. To learn how to do this, read the document. If you would like to add a new root menu or sub-menu to booster UI, read the UI menu control document.  OAP backend dependency management  This section is only applicable to dependencies of the backend module.\n As one of the Top Level Projects of The Apache Software Foundation (ASF), SkyWalking must follow the ASF 3RD PARTY LICENSE POLICY. So if you\u0026rsquo;re adding new dependencies to the project, you should make sure that the new dependencies would not break the policy, and add their LICENSE and NOTICE to the project.\nWe use license-eye to help you make sure that you haven\u0026rsquo;t missed out any new dependencies:\n Install license-eye according to the doc. Run license-eye dependency resolve --summary ./dist-material/release-docs/LICENSE.tpl in the root directory of this project. Check the modified lines in ./dist-material/release-docs/LICENSE (via command git diff -U0 ./dist-material/release-docs/LICENSE) and check whether the new dependencies' licenses are compatible with Apache 2.0. Add the new dependencies' notice files (if any) to ./dist-material/release-docs/NOTICE if they are Apache 2.0 license. Copy their license files to ./dist-material/release-docs/licenses if they are not standard Apache 2.0 license. Copy the new dependencies' license file to ./dist-material/release-docs/licenses if they are not standard Apache 2.0 license.  Release If you\u0026rsquo;re a committer, read the Apache Release Guide to learn about how to create an official Apache version release in accordance with avoid Apache\u0026rsquo;s rules. As long as you keep our LICENSE and NOTICE, the Apache license allows everyone to redistribute.\n","excerpt":"Guides There are many ways you can contribute to the SkyWalking community.\n Go through our …","ref":"/docs/main/v9.5.0/en/guides/readme/","title":"Guides"},{"body":"Guides There are many ways you can connect and contribute to the SkyWalking community.\n Submit an issue for an addressed issue or feature implementation plan. Submit a discussion to ask questions, feature proposal and uncertain bug discussion. Mail list: dev@skywalking.apache.org. Mail to dev-subscribe@skywalking.apache.org. Follow the instructions in the reply to subscribe to the mail list. Send Request to join SkyWalking slack mail to the mail list(dev@skywalking.apache.org), we will invite you in. For Chinese speaker, send [CN] Request to join SkyWalking slack mail to the mail list(dev@skywalking.apache.org), we will invite you in.  ","excerpt":"Guides There are many ways you can connect and contribute to the SkyWalking community.\n Submit an …","ref":"/docs/main/v9.6.0/en/guides/community/","title":"Guides"},{"body":"Guides There are many ways you can connect and contribute to the SkyWalking community.\n Submit an issue for an addressed issue or feature implementation plan. Submit a discussion to ask questions, feature proposal and uncertain bug discussion. Mail list: dev@skywalking.apache.org. Mail to dev-subscribe@skywalking.apache.org. Follow the instructions in the reply to subscribe to the mail list. Send Request to join SkyWalking slack mail to the mail list(dev@skywalking.apache.org), we will invite you in. For Chinese speaker, send [CN] Request to join SkyWalking slack mail to the mail list(dev@skywalking.apache.org), we will invite you in.  ","excerpt":"Guides There are many ways you can connect and contribute to the SkyWalking community.\n Submit an …","ref":"/docs/main/v9.7.0/en/guides/community/","title":"Guides"},{"body":"Guides If you want to debug or develop SkyWalking Rover, The following documentations would guide you.\n Contribution  How to contribute a module?   Compile  How to compile SkyWalking Rover?    ","excerpt":"Guides If you want to debug or develop SkyWalking Rover, The following documentations would guide …","ref":"/docs/skywalking-rover/latest/en/guides/readme/","title":"Guides"},{"body":"Guides If you want to debug or develop SkyWalking Rover, The following documentations would guide you.\n Contribution  How to contribute a module?   Compile  How to compile SkyWalking Rover?    ","excerpt":"Guides If you want to debug or develop SkyWalking Rover, The following documentations would guide …","ref":"/docs/skywalking-rover/next/en/guides/readme/","title":"Guides"},{"body":"Guides If you want to debug or develop SkyWalking Rover, The following documentations would guide you.\n Contribution  How to contribute a module?   Compile  How to compile SkyWalking Rover?    ","excerpt":"Guides If you want to debug or develop SkyWalking Rover, The following documentations would guide …","ref":"/docs/skywalking-rover/v0.6.0/en/guides/readme/","title":"Guides"},{"body":"Guides If you want to debug or develop SkyWalking Satellite, The following documentations would guide you.\n Contribution  How to contribute a plugin? How to release SkyWalking Satellite?   Compile  How to compile SkyWalking Satellite?   Test  How to add unit test for a plugin?    ","excerpt":"Guides If you want to debug or develop SkyWalking Satellite, The following documentations would …","ref":"/docs/skywalking-satellite/latest/en/guides/readme/","title":"Guides"},{"body":"Guides If you want to debug or develop SkyWalking Satellite, The following documentations would guide you.\n Contribution  How to contribute a plugin? How to release SkyWalking Satellite?   Compile  How to compile SkyWalking Satellite?   Test  How to add unit test for a plugin?    ","excerpt":"Guides If you want to debug or develop SkyWalking Satellite, The following documentations would …","ref":"/docs/skywalking-satellite/next/en/guides/readme/","title":"Guides"},{"body":"Guides If you want to debug or develop SkyWalking Satellite, The following documentations would guide you.\n Contribution  How to contribute a plugin? How to release SkyWalking Satellite?   Compile  How to compile SkyWalking Satellite?   Test  How to add unit test for a plugin?    ","excerpt":"Guides If you want to debug or develop SkyWalking Satellite, The following documentations would …","ref":"/docs/skywalking-satellite/v1.2.0/en/guides/readme/","title":"Guides"},{"body":"H2 Activate H2 as storage, set storage provider to H2 In-Memory Databases by default in the distribution package. Please read Database URL Overview in H2 official document. You can set the target to H2 in Embedded, Server and Mixed modes.\nSetting fragment example\nstorage:selector:${SW_STORAGE:h2}h2:driver:org.h2.jdbcx.JdbcDataSourceurl:jdbc:h2:mem:skywalking-oap-dbuser:samaxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:100}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:1}","excerpt":"H2 Activate H2 as storage, set storage provider to H2 In-Memory Databases by default in the …","ref":"/docs/main/latest/en/setup/backend/storages/h2/","title":"H2"},{"body":"H2 Activate H2 as storage, set storage provider to H2 In-Memory Databases by default in the distribution package. Please read Database URL Overview in H2 official document. You can set the target to H2 in Embedded, Server and Mixed modes.\nSetting fragment example\nstorage:selector:${SW_STORAGE:h2}h2:driver:org.h2.jdbcx.JdbcDataSourceurl:jdbc:h2:mem:skywalking-oap-dbuser:samaxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:100}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:1}","excerpt":"H2 Activate H2 as storage, set storage provider to H2 In-Memory Databases by default in the …","ref":"/docs/main/next/en/setup/backend/storages/h2/","title":"H2"},{"body":"H2 Activate H2 as storage, set storage provider to H2 In-Memory Databases by default in the distribution package. Please read Database URL Overview in H2 official document. You can set the target to H2 in Embedded, Server and Mixed modes.\nSetting fragment example\nstorage:selector:${SW_STORAGE:h2}h2:driver:org.h2.jdbcx.JdbcDataSourceurl:jdbc:h2:mem:skywalking-oap-dbuser:samaxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:100}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:1}","excerpt":"H2 Activate H2 as storage, set storage provider to H2 In-Memory Databases by default in the …","ref":"/docs/main/v9.7.0/en/setup/backend/storages/h2/","title":"H2"},{"body":"Health Check Health check intends to provide a unique approach to checking the health status of the OAP server. It includes the health status of modules, GraphQL, and gRPC services readiness.\n 0 means healthy, and more than 0 means unhealthy. less than 0 means that the OAP doesn\u0026rsquo;t start up.\n Health Checker Module. The Health Checker module helps observe the health status of modules. You may activate it as follows:\nhealth-checker:selector:${SW_HEALTH_CHECKER:default}default:checkIntervalSeconds:${SW_HEALTH_CHECKER_INTERVAL_SECONDS:5}Note: The telemetry module should be enabled at the same time. This means that the provider should not be - and none.\nAfter that, we can check the OAP server health status by querying GraphQL:\nquery{ checkHealth{ score details } } If the OAP server is healthy, the response should be\n{ \u0026#34;data\u0026#34;: { \u0026#34;checkHealth\u0026#34;: { \u0026#34;score\u0026#34;: 0, \u0026#34;details\u0026#34;: \u0026#34;\u0026#34; } } } If some modules are unhealthy (e.g. storage H2 is down), then the result may look as follows:\n{ \u0026#34;data\u0026#34;: { \u0026#34;checkHealth\u0026#34;: { \u0026#34;score\u0026#34;: 1, \u0026#34;details\u0026#34;: \u0026#34;storage_h2,\u0026#34; } } } Refer to checkHealth query for more details.\nThe readiness of GraphQL and gRPC Use the query above to check the readiness of GraphQL.\nOAP has implemented the gRPC Health Checking Protocol. You may use the grpc-health-probe or any other tools to check the health of OAP gRPC services.\nCLI tool Please follow the CLI doc to get the health status score directly through the checkhealth command.\n","excerpt":"Health Check Health check intends to provide a unique approach to checking the health status of the …","ref":"/docs/main/latest/en/setup/backend/backend-health-check/","title":"Health Check"},{"body":"Health Check Health check intends to provide a unique approach to checking the health status of the OAP server. It includes the health status of modules, GraphQL, and gRPC services readiness.\n 0 means healthy, and more than 0 means unhealthy. less than 0 means that the OAP doesn\u0026rsquo;t start up.\n Health Checker Module. The Health Checker module helps observe the health status of modules. You may activate it as follows:\nhealth-checker:selector:${SW_HEALTH_CHECKER:default}default:checkIntervalSeconds:${SW_HEALTH_CHECKER_INTERVAL_SECONDS:5}Note: The telemetry module should be enabled at the same time. This means that the provider should not be - and none.\nAfter that, we can check the OAP server health status by querying GraphQL:\nquery{ checkHealth{ score details } } If the OAP server is healthy, the response should be\n{ \u0026#34;data\u0026#34;: { \u0026#34;checkHealth\u0026#34;: { \u0026#34;score\u0026#34;: 0, \u0026#34;details\u0026#34;: \u0026#34;\u0026#34; } } } If some modules are unhealthy (e.g. storage H2 is down), then the result may look as follows:\n{ \u0026#34;data\u0026#34;: { \u0026#34;checkHealth\u0026#34;: { \u0026#34;score\u0026#34;: 1, \u0026#34;details\u0026#34;: \u0026#34;storage_h2,\u0026#34; } } } Refer to checkHealth query for more details.\nThe readiness of GraphQL and gRPC Use the query above to check the readiness of GraphQL.\nOAP has implemented the gRPC Health Checking Protocol. You may use the grpc-health-probe or any other tools to check the health of OAP gRPC services.\nCLI tool Please follow the CLI doc to get the health status score directly through the checkhealth command.\n","excerpt":"Health Check Health check intends to provide a unique approach to checking the health status of the …","ref":"/docs/main/next/en/setup/backend/backend-health-check/","title":"Health Check"},{"body":"Health Check Health check intends to provide a unique approach to check the health status of the OAP server. It includes the health status of modules, GraphQL, and gRPC services readiness.\n 0 means healthy, and more than 0 means unhealthy. less than 0 means that the OAP doesn\u0026rsquo;t start up.\n Health Checker Module. The Health Checker module helps observe the health status of modules. You may activate it as follows:\nhealth-checker:selector:${SW_HEALTH_CHECKER:default}default:checkIntervalSeconds:${SW_HEALTH_CHECKER_INTERVAL_SECONDS:5}Note: The telemetry module should be enabled at the same time. This means that the provider should not be - and none.\nAfter that, we can check the OAP server health status by querying GraphQL:\nquery{ checkHealth{ score details } } If the OAP server is healthy, the response should be\n{ \u0026#34;data\u0026#34;: { \u0026#34;checkHealth\u0026#34;: { \u0026#34;score\u0026#34;: 0, \u0026#34;details\u0026#34;: \u0026#34;\u0026#34; } } } If some modules are unhealthy (e.g. storage H2 is down), then the result may look as follows:\n{ \u0026#34;data\u0026#34;: { \u0026#34;checkHealth\u0026#34;: { \u0026#34;score\u0026#34;: 1, \u0026#34;details\u0026#34;: \u0026#34;storage_h2,\u0026#34; } } } Refer to checkHealth query for more details.\nThe readiness of GraphQL and gRPC Use the query above to check the readiness of GraphQL.\nOAP has implemented the gRPC Health Checking Protocol. You may use the grpc-health-probe or any other tools to check the health of OAP gRPC services.\nCLI tool Please follow the CLI doc to get the health status score directly through the checkhealth command.\n","excerpt":"Health Check Health check intends to provide a unique approach to check the health status of the OAP …","ref":"/docs/main/v9.0.0/en/setup/backend/backend-health-check/","title":"Health Check"},{"body":"Health Check Health check intends to provide a unique approach to checking the health status of the OAP server. It includes the health status of modules, GraphQL, and gRPC services readiness.\n 0 means healthy, and more than 0 means unhealthy. less than 0 means that the OAP doesn\u0026rsquo;t start up.\n Health Checker Module. The Health Checker module helps observe the health status of modules. You may activate it as follows:\nhealth-checker:selector:${SW_HEALTH_CHECKER:default}default:checkIntervalSeconds:${SW_HEALTH_CHECKER_INTERVAL_SECONDS:5}Note: The telemetry module should be enabled at the same time. This means that the provider should not be - and none.\nAfter that, we can check the OAP server health status by querying GraphQL:\nquery{ checkHealth{ score details } } If the OAP server is healthy, the response should be\n{ \u0026#34;data\u0026#34;: { \u0026#34;checkHealth\u0026#34;: { \u0026#34;score\u0026#34;: 0, \u0026#34;details\u0026#34;: \u0026#34;\u0026#34; } } } If some modules are unhealthy (e.g. storage H2 is down), then the result may look as follows:\n{ \u0026#34;data\u0026#34;: { \u0026#34;checkHealth\u0026#34;: { \u0026#34;score\u0026#34;: 1, \u0026#34;details\u0026#34;: \u0026#34;storage_h2,\u0026#34; } } } Refer to checkHealth query for more details.\nThe readiness of GraphQL and gRPC Use the query above to check the readiness of GraphQL.\nOAP has implemented the gRPC Health Checking Protocol. You may use the grpc-health-probe or any other tools to check the health of OAP gRPC services.\nCLI tool Please follow the CLI doc to get the health status score directly through the checkhealth command.\n","excerpt":"Health Check Health check intends to provide a unique approach to checking the health status of the …","ref":"/docs/main/v9.1.0/en/setup/backend/backend-health-check/","title":"Health Check"},{"body":"Health Check Health check intends to provide a unique approach to checking the health status of the OAP server. It includes the health status of modules, GraphQL, and gRPC services readiness.\n 0 means healthy, and more than 0 means unhealthy. less than 0 means that the OAP doesn\u0026rsquo;t start up.\n Health Checker Module. The Health Checker module helps observe the health status of modules. You may activate it as follows:\nhealth-checker:selector:${SW_HEALTH_CHECKER:default}default:checkIntervalSeconds:${SW_HEALTH_CHECKER_INTERVAL_SECONDS:5}Note: The telemetry module should be enabled at the same time. This means that the provider should not be - and none.\nAfter that, we can check the OAP server health status by querying GraphQL:\nquery{ checkHealth{ score details } } If the OAP server is healthy, the response should be\n{ \u0026#34;data\u0026#34;: { \u0026#34;checkHealth\u0026#34;: { \u0026#34;score\u0026#34;: 0, \u0026#34;details\u0026#34;: \u0026#34;\u0026#34; } } } If some modules are unhealthy (e.g. storage H2 is down), then the result may look as follows:\n{ \u0026#34;data\u0026#34;: { \u0026#34;checkHealth\u0026#34;: { \u0026#34;score\u0026#34;: 1, \u0026#34;details\u0026#34;: \u0026#34;storage_h2,\u0026#34; } } } Refer to checkHealth query for more details.\nThe readiness of GraphQL and gRPC Use the query above to check the readiness of GraphQL.\nOAP has implemented the gRPC Health Checking Protocol. You may use the grpc-health-probe or any other tools to check the health of OAP gRPC services.\nCLI tool Please follow the CLI doc to get the health status score directly through the checkhealth command.\n","excerpt":"Health Check Health check intends to provide a unique approach to checking the health status of the …","ref":"/docs/main/v9.2.0/en/setup/backend/backend-health-check/","title":"Health Check"},{"body":"Health Check Health check intends to provide a unique approach to checking the health status of the OAP server. It includes the health status of modules, GraphQL, and gRPC services readiness.\n 0 means healthy, and more than 0 means unhealthy. less than 0 means that the OAP doesn\u0026rsquo;t start up.\n Health Checker Module. The Health Checker module helps observe the health status of modules. You may activate it as follows:\nhealth-checker:selector:${SW_HEALTH_CHECKER:default}default:checkIntervalSeconds:${SW_HEALTH_CHECKER_INTERVAL_SECONDS:5}Note: The telemetry module should be enabled at the same time. This means that the provider should not be - and none.\nAfter that, we can check the OAP server health status by querying GraphQL:\nquery{ checkHealth{ score details } } If the OAP server is healthy, the response should be\n{ \u0026#34;data\u0026#34;: { \u0026#34;checkHealth\u0026#34;: { \u0026#34;score\u0026#34;: 0, \u0026#34;details\u0026#34;: \u0026#34;\u0026#34; } } } If some modules are unhealthy (e.g. storage H2 is down), then the result may look as follows:\n{ \u0026#34;data\u0026#34;: { \u0026#34;checkHealth\u0026#34;: { \u0026#34;score\u0026#34;: 1, \u0026#34;details\u0026#34;: \u0026#34;storage_h2,\u0026#34; } } } Refer to checkHealth query for more details.\nThe readiness of GraphQL and gRPC Use the query above to check the readiness of GraphQL.\nOAP has implemented the gRPC Health Checking Protocol. You may use the grpc-health-probe or any other tools to check the health of OAP gRPC services.\nCLI tool Please follow the CLI doc to get the health status score directly through the checkhealth command.\n","excerpt":"Health Check Health check intends to provide a unique approach to checking the health status of the …","ref":"/docs/main/v9.3.0/en/setup/backend/backend-health-check/","title":"Health Check"},{"body":"Health Check Health check intends to provide a unique approach to checking the health status of the OAP server. It includes the health status of modules, GraphQL, and gRPC services readiness.\n 0 means healthy, and more than 0 means unhealthy. less than 0 means that the OAP doesn\u0026rsquo;t start up.\n Health Checker Module. The Health Checker module helps observe the health status of modules. You may activate it as follows:\nhealth-checker:selector:${SW_HEALTH_CHECKER:default}default:checkIntervalSeconds:${SW_HEALTH_CHECKER_INTERVAL_SECONDS:5}Note: The telemetry module should be enabled at the same time. This means that the provider should not be - and none.\nAfter that, we can check the OAP server health status by querying GraphQL:\nquery{ checkHealth{ score details } } If the OAP server is healthy, the response should be\n{ \u0026#34;data\u0026#34;: { \u0026#34;checkHealth\u0026#34;: { \u0026#34;score\u0026#34;: 0, \u0026#34;details\u0026#34;: \u0026#34;\u0026#34; } } } If some modules are unhealthy (e.g. storage H2 is down), then the result may look as follows:\n{ \u0026#34;data\u0026#34;: { \u0026#34;checkHealth\u0026#34;: { \u0026#34;score\u0026#34;: 1, \u0026#34;details\u0026#34;: \u0026#34;storage_h2,\u0026#34; } } } Refer to checkHealth query for more details.\nThe readiness of GraphQL and gRPC Use the query above to check the readiness of GraphQL.\nOAP has implemented the gRPC Health Checking Protocol. You may use the grpc-health-probe or any other tools to check the health of OAP gRPC services.\nCLI tool Please follow the CLI doc to get the health status score directly through the checkhealth command.\n","excerpt":"Health Check Health check intends to provide a unique approach to checking the health status of the …","ref":"/docs/main/v9.4.0/en/setup/backend/backend-health-check/","title":"Health Check"},{"body":"Health Check Health check intends to provide a unique approach to checking the health status of the OAP server. It includes the health status of modules, GraphQL, and gRPC services readiness.\n 0 means healthy, and more than 0 means unhealthy. less than 0 means that the OAP doesn\u0026rsquo;t start up.\n Health Checker Module. The Health Checker module helps observe the health status of modules. You may activate it as follows:\nhealth-checker:selector:${SW_HEALTH_CHECKER:default}default:checkIntervalSeconds:${SW_HEALTH_CHECKER_INTERVAL_SECONDS:5}Note: The telemetry module should be enabled at the same time. This means that the provider should not be - and none.\nAfter that, we can check the OAP server health status by querying GraphQL:\nquery{ checkHealth{ score details } } If the OAP server is healthy, the response should be\n{ \u0026#34;data\u0026#34;: { \u0026#34;checkHealth\u0026#34;: { \u0026#34;score\u0026#34;: 0, \u0026#34;details\u0026#34;: \u0026#34;\u0026#34; } } } If some modules are unhealthy (e.g. storage H2 is down), then the result may look as follows:\n{ \u0026#34;data\u0026#34;: { \u0026#34;checkHealth\u0026#34;: { \u0026#34;score\u0026#34;: 1, \u0026#34;details\u0026#34;: \u0026#34;storage_h2,\u0026#34; } } } Refer to checkHealth query for more details.\nThe readiness of GraphQL and gRPC Use the query above to check the readiness of GraphQL.\nOAP has implemented the gRPC Health Checking Protocol. You may use the grpc-health-probe or any other tools to check the health of OAP gRPC services.\nCLI tool Please follow the CLI doc to get the health status score directly through the checkhealth command.\n","excerpt":"Health Check Health check intends to provide a unique approach to checking the health status of the …","ref":"/docs/main/v9.5.0/en/setup/backend/backend-health-check/","title":"Health Check"},{"body":"Health Check Health check intends to provide a unique approach to checking the health status of the OAP server. It includes the health status of modules, GraphQL, and gRPC services readiness.\n 0 means healthy, and more than 0 means unhealthy. less than 0 means that the OAP doesn\u0026rsquo;t start up.\n Health Checker Module. The Health Checker module helps observe the health status of modules. You may activate it as follows:\nhealth-checker:selector:${SW_HEALTH_CHECKER:default}default:checkIntervalSeconds:${SW_HEALTH_CHECKER_INTERVAL_SECONDS:5}Note: The telemetry module should be enabled at the same time. This means that the provider should not be - and none.\nAfter that, we can check the OAP server health status by querying GraphQL:\nquery{ checkHealth{ score details } } If the OAP server is healthy, the response should be\n{ \u0026#34;data\u0026#34;: { \u0026#34;checkHealth\u0026#34;: { \u0026#34;score\u0026#34;: 0, \u0026#34;details\u0026#34;: \u0026#34;\u0026#34; } } } If some modules are unhealthy (e.g. storage H2 is down), then the result may look as follows:\n{ \u0026#34;data\u0026#34;: { \u0026#34;checkHealth\u0026#34;: { \u0026#34;score\u0026#34;: 1, \u0026#34;details\u0026#34;: \u0026#34;storage_h2,\u0026#34; } } } Refer to checkHealth query for more details.\nThe readiness of GraphQL and gRPC Use the query above to check the readiness of GraphQL.\nOAP has implemented the gRPC Health Checking Protocol. You may use the grpc-health-probe or any other tools to check the health of OAP gRPC services.\nCLI tool Please follow the CLI doc to get the health status score directly through the checkhealth command.\n","excerpt":"Health Check Health check intends to provide a unique approach to checking the health status of the …","ref":"/docs/main/v9.6.0/en/setup/backend/backend-health-check/","title":"Health Check"},{"body":"Health Check Health check intends to provide a unique approach to checking the health status of the OAP server. It includes the health status of modules, GraphQL, and gRPC services readiness.\n 0 means healthy, and more than 0 means unhealthy. less than 0 means that the OAP doesn\u0026rsquo;t start up.\n Health Checker Module. The Health Checker module helps observe the health status of modules. You may activate it as follows:\nhealth-checker:selector:${SW_HEALTH_CHECKER:default}default:checkIntervalSeconds:${SW_HEALTH_CHECKER_INTERVAL_SECONDS:5}Note: The telemetry module should be enabled at the same time. This means that the provider should not be - and none.\nAfter that, we can check the OAP server health status by querying GraphQL:\nquery{ checkHealth{ score details } } If the OAP server is healthy, the response should be\n{ \u0026#34;data\u0026#34;: { \u0026#34;checkHealth\u0026#34;: { \u0026#34;score\u0026#34;: 0, \u0026#34;details\u0026#34;: \u0026#34;\u0026#34; } } } If some modules are unhealthy (e.g. storage H2 is down), then the result may look as follows:\n{ \u0026#34;data\u0026#34;: { \u0026#34;checkHealth\u0026#34;: { \u0026#34;score\u0026#34;: 1, \u0026#34;details\u0026#34;: \u0026#34;storage_h2,\u0026#34; } } } Refer to checkHealth query for more details.\nThe readiness of GraphQL and gRPC Use the query above to check the readiness of GraphQL.\nOAP has implemented the gRPC Health Checking Protocol. You may use the grpc-health-probe or any other tools to check the health of OAP gRPC services.\nCLI tool Please follow the CLI doc to get the health status score directly through the checkhealth command.\n","excerpt":"Health Check Health check intends to provide a unique approach to checking the health status of the …","ref":"/docs/main/v9.7.0/en/setup/backend/backend-health-check/","title":"Health Check"},{"body":"How does threading-profiler (the default mode) work These blogs skywalking-profiling and skywalking-python-profiling described how the threading-profiler works\nAnd this figure demonstrates how the profiler works as well:\nsequenceDiagram API-\u0026gt;\u0026gt;+working thread: get: /api/v1/user/ rect rgb(0,200,0) API-\u0026gt;\u0026gt;+profiling thread: start profiling profiling thread-\u0026gt;\u0026gt;working thread: snapshot profiling thread-\u0026gt;\u0026gt;working thread: snapshot profiling thread-\u0026gt;\u0026gt;working thread: snapshot profiling thread-\u0026gt;\u0026gt;-working thread: snapshot end working thread--\u0026gt;\u0026gt;-API: response It works well with threading mode because the whole process will be executed in the same thread, so the profiling thread can fetch the complete profiling info of the process of the API request.\nWhy doesn\u0026rsquo;t threading-profiler work in greenlet mode When the python program runs with gevent + greenlet, the process would be like this:\nsequenceDiagram API-\u0026gt;\u0026gt;+working thread 1: get: /api/v1/user/ rect rgb(0,200,0) greenlet.HUB--\u0026gt;\u0026gt;+working thread 1: swap in the profiled greenlet API-\u0026gt;\u0026gt;+profiling thread: start profiling profiling thread-\u0026gt;\u0026gt;working thread 1: snapshot working thread 1--\u0026gt;\u0026gt;-greenlet.HUB : swap out the profiled greenlet end greenlet.HUB--\u0026gt;\u0026gt;+working thread 1: swap in the other greenlet profiling thread-\u0026gt;\u0026gt;working thread 1: snapshot greenlet.HUB--\u0026gt;\u0026gt;+working thread 2: swap in the profiled greenlet profiling thread-\u0026gt;\u0026gt;working thread 1: snapshot profiling thread-\u0026gt;\u0026gt;working thread 1: snapshot working thread 2--\u0026gt;-greenlet.HUB : swap out the profiled greenlet profiling thread-\u0026gt;\u0026gt;working thread 1: snapshot profiling thread-\u0026gt;\u0026gt;-working thread 1: snapshot working thread 1--\u0026gt;\u0026gt;-greenlet.HUB : swap out the other greenlet working thread 1--\u0026gt;\u0026gt;-API: response In this circumstance, the snapshot of the working thread includes multi contexts of different greenlets, which will make skywalking confused to build the trace stack.\nFortunately, greenlet has an API for profiling, the doc is here. We can implement a greenlet profiler to solve this issue.\nHow the greenlet profiler works A greenlet profiler leverages the trace callback of greenlet, it works like this:\nsequenceDiagram API-\u0026gt;\u0026gt;+working thread 1: get: /api/v1/user/ rect rgb(0,200,0) greenlet.HUB--\u0026gt;\u0026gt;+working thread 1: swap in the profiled greenlet and snapshot working thread 1--\u0026gt;\u0026gt;-greenlet.HUB : swap out the profiled greenlet and snapshot end greenlet.HUB--\u0026gt;\u0026gt;+working thread 1: swap in the other greenlet rect rgb(0,200,0) greenlet.HUB--\u0026gt;\u0026gt;+working thread 2: swap in the profiled greenlet and snapshot working thread 2--\u0026gt;-greenlet.HUB : swap out the profiled greenlet and snapshot end working thread 1--\u0026gt;\u0026gt;-greenlet.HUB : swap out the other greenlet working thread 1--\u0026gt;\u0026gt;-API: response We can set a callback function to the greenlet that we need to profiling, then when the greenlet.HUB switches the context in/out to the working thread, the callback will build a snapshot of the greenlet\u0026rsquo;s traceback and send it to skywalking.\nThe difference between these two profilers The greenlet profiler will significantly reduce the snapshot times of the profiling process, which means that it will cost less CPU time than the threading profiler.\n","excerpt":"How does threading-profiler (the default mode) work These blogs skywalking-profiling and …","ref":"/docs/skywalking-python/latest/en/profiling/profiling/","title":"How does threading-profiler (the default mode) work"},{"body":"How does threading-profiler (the default mode) work These blogs skywalking-profiling and skywalking-python-profiling described how the threading-profiler works\nAnd this figure demonstrates how the profiler works as well:\nsequenceDiagram API-\u0026gt;\u0026gt;+working thread: get: /api/v1/user/ rect rgb(0,200,0) API-\u0026gt;\u0026gt;+profiling thread: start profiling profiling thread-\u0026gt;\u0026gt;working thread: snapshot profiling thread-\u0026gt;\u0026gt;working thread: snapshot profiling thread-\u0026gt;\u0026gt;working thread: snapshot profiling thread-\u0026gt;\u0026gt;-working thread: snapshot end working thread--\u0026gt;\u0026gt;-API: response It works well with threading mode because the whole process will be executed in the same thread, so the profiling thread can fetch the complete profiling info of the process of the API request.\nWhy doesn\u0026rsquo;t threading-profiler work in greenlet mode When the python program runs with gevent + greenlet, the process would be like this:\nsequenceDiagram API-\u0026gt;\u0026gt;+working thread 1: get: /api/v1/user/ rect rgb(0,200,0) greenlet.HUB--\u0026gt;\u0026gt;+working thread 1: swap in the profiled greenlet API-\u0026gt;\u0026gt;+profiling thread: start profiling profiling thread-\u0026gt;\u0026gt;working thread 1: snapshot working thread 1--\u0026gt;\u0026gt;-greenlet.HUB : swap out the profiled greenlet end greenlet.HUB--\u0026gt;\u0026gt;+working thread 1: swap in the other greenlet profiling thread-\u0026gt;\u0026gt;working thread 1: snapshot greenlet.HUB--\u0026gt;\u0026gt;+working thread 2: swap in the profiled greenlet profiling thread-\u0026gt;\u0026gt;working thread 1: snapshot profiling thread-\u0026gt;\u0026gt;working thread 1: snapshot working thread 2--\u0026gt;-greenlet.HUB : swap out the profiled greenlet profiling thread-\u0026gt;\u0026gt;working thread 1: snapshot profiling thread-\u0026gt;\u0026gt;-working thread 1: snapshot working thread 1--\u0026gt;\u0026gt;-greenlet.HUB : swap out the other greenlet working thread 1--\u0026gt;\u0026gt;-API: response In this circumstance, the snapshot of the working thread includes multi contexts of different greenlets, which will make skywalking confused to build the trace stack.\nFortunately, greenlet has an API for profiling, the doc is here. We can implement a greenlet profiler to solve this issue.\nHow the greenlet profiler works A greenlet profiler leverages the trace callback of greenlet, it works like this:\nsequenceDiagram API-\u0026gt;\u0026gt;+working thread 1: get: /api/v1/user/ rect rgb(0,200,0) greenlet.HUB--\u0026gt;\u0026gt;+working thread 1: swap in the profiled greenlet and snapshot working thread 1--\u0026gt;\u0026gt;-greenlet.HUB : swap out the profiled greenlet and snapshot end greenlet.HUB--\u0026gt;\u0026gt;+working thread 1: swap in the other greenlet rect rgb(0,200,0) greenlet.HUB--\u0026gt;\u0026gt;+working thread 2: swap in the profiled greenlet and snapshot working thread 2--\u0026gt;-greenlet.HUB : swap out the profiled greenlet and snapshot end working thread 1--\u0026gt;\u0026gt;-greenlet.HUB : swap out the other greenlet working thread 1--\u0026gt;\u0026gt;-API: response We can set a callback function to the greenlet that we need to profiling, then when the greenlet.HUB switches the context in/out to the working thread, the callback will build a snapshot of the greenlet\u0026rsquo;s traceback and send it to skywalking.\nThe difference between these two profilers The greenlet profiler will significantly reduce the snapshot times of the profiling process, which means that it will cost less CPU time than the threading profiler.\n","excerpt":"How does threading-profiler (the default mode) work These blogs skywalking-profiling and …","ref":"/docs/skywalking-python/next/en/profiling/profiling/","title":"How does threading-profiler (the default mode) work"},{"body":"How does threading-profiler (the default mode) work These blogs skywalking-profiling and skywalking-python-profiling described how the threading-profiler works\nAnd this figure demonstrates how the profiler works as well:\nsequenceDiagram API-\u0026gt;\u0026gt;+working thread: get: /api/v1/user/ rect rgb(0,200,0) API-\u0026gt;\u0026gt;+profiling thread: start profiling profiling thread-\u0026gt;\u0026gt;working thread: snapshot profiling thread-\u0026gt;\u0026gt;working thread: snapshot profiling thread-\u0026gt;\u0026gt;working thread: snapshot profiling thread-\u0026gt;\u0026gt;-working thread: snapshot end working thread--\u0026gt;\u0026gt;-API: response It works well with threading mode because the whole process will be executed in the same thread, so the profiling thread can fetch the complete profiling info of the process of the API request.\nWhy doesn\u0026rsquo;t threading-profiler work in greenlet mode When the python program runs with gevent + greenlet, the process would be like this:\nsequenceDiagram API-\u0026gt;\u0026gt;+working thread 1: get: /api/v1/user/ rect rgb(0,200,0) greenlet.HUB--\u0026gt;\u0026gt;+working thread 1: swap in the profiled greenlet API-\u0026gt;\u0026gt;+profiling thread: start profiling profiling thread-\u0026gt;\u0026gt;working thread 1: snapshot working thread 1--\u0026gt;\u0026gt;-greenlet.HUB : swap out the profiled greenlet end greenlet.HUB--\u0026gt;\u0026gt;+working thread 1: swap in the other greenlet profiling thread-\u0026gt;\u0026gt;working thread 1: snapshot greenlet.HUB--\u0026gt;\u0026gt;+working thread 2: swap in the profiled greenlet profiling thread-\u0026gt;\u0026gt;working thread 1: snapshot profiling thread-\u0026gt;\u0026gt;working thread 1: snapshot working thread 2--\u0026gt;-greenlet.HUB : swap out the profiled greenlet profiling thread-\u0026gt;\u0026gt;working thread 1: snapshot profiling thread-\u0026gt;\u0026gt;-working thread 1: snapshot working thread 1--\u0026gt;\u0026gt;-greenlet.HUB : swap out the other greenlet working thread 1--\u0026gt;\u0026gt;-API: response In this circumstance, the snapshot of the working thread includes multi contexts of different greenlets, which will make skywalking confused to build the trace stack.\nFortunately, greenlet has an API for profiling, the doc is here. We can implement a greenlet profiler to solve this issue.\nHow the greenlet profiler works A greenlet profiler leverages the trace callback of greenlet, it works like this:\nsequenceDiagram API-\u0026gt;\u0026gt;+working thread 1: get: /api/v1/user/ rect rgb(0,200,0) greenlet.HUB--\u0026gt;\u0026gt;+working thread 1: swap in the profiled greenlet and snapshot working thread 1--\u0026gt;\u0026gt;-greenlet.HUB : swap out the profiled greenlet and snapshot end greenlet.HUB--\u0026gt;\u0026gt;+working thread 1: swap in the other greenlet rect rgb(0,200,0) greenlet.HUB--\u0026gt;\u0026gt;+working thread 2: swap in the profiled greenlet and snapshot working thread 2--\u0026gt;-greenlet.HUB : swap out the profiled greenlet and snapshot end working thread 1--\u0026gt;\u0026gt;-greenlet.HUB : swap out the other greenlet working thread 1--\u0026gt;\u0026gt;-API: response We can set a callback function to the greenlet that we need to profiling, then when the greenlet.HUB switches the context in/out to the working thread, the callback will build a snapshot of the greenlet\u0026rsquo;s traceback and send it to skywalking.\nThe difference between these two profilers The greenlet profiler will significantly reduce the snapshot times of the profiling process, which means that it will cost less CPU time than the threading profiler.\n","excerpt":"How does threading-profiler (the default mode) work These blogs skywalking-profiling and …","ref":"/docs/skywalking-python/v1.0.1/en/profiling/profiling/","title":"How does threading-profiler (the default mode) work"},{"body":"How to add a new root menu or sub-menu to booster UI If you would like to add a new root menu or sub-menu, you should add data to src/router/data/xx and add translation contents for the title to src/locales/lang/xx in booster UI.\n Create a new file called xxx.ts in src/router/data. Add configurations to the xxx.ts, configurations should be like this.  export default [ { // Add `Infrastructure` menu  path: \u0026#34;\u0026#34;, name: \u0026#34;Infrastructure\u0026#34;, meta: { title: \u0026#34;infrastructure\u0026#34;, icon: \u0026#34;scatter_plot\u0026#34;, hasGroup: true, }, redirect: \u0026#34;/linux\u0026#34;, children: [ // Add a sub menu of the `Infrastructure`  { path: \u0026#34;/linux\u0026#34;, name: \u0026#34;Linux\u0026#34;, meta: { title: \u0026#34;linux\u0026#34;, layer: \u0026#34;OS_LINUX\u0026#34;, }, }, // If there are Tabs widgets in your dashboards, add following extra configuration to provide static links to the specific tab.  { path: \u0026#34;/linux/tab/:activeTabIndex\u0026#34;, name: \u0026#34;LinuxActiveTabIndex\u0026#34;, meta: { title: \u0026#34;linux\u0026#34;, notShow: true, layer: \u0026#34;OS_LINUX\u0026#34;, }, }, ], }, ]; import configurations in src/router/data/index.ts.  import name from \u0026#34;./xxx\u0026#34;; ","excerpt":"How to add a new root menu or sub-menu to booster UI If you would like to add a new root menu or …","ref":"/docs/main/v9.3.0/en/guides/how-to-add-menu/","title":"How to add a new root menu or sub-menu to booster UI"},{"body":"How to add a new root menu or sub-menu to booster UI If you would like to add a new root menu or sub-menu, you should add data to src/router/data/xx and add translation contents for the title to src/locales/lang/xx in booster UI.\n Create a new file called xxx.ts in src/router/data. Add configurations to the xxx.ts, configurations should be like this.  export default [ { // Add `Infrastructure` menu  path: \u0026#34;\u0026#34;, name: \u0026#34;Infrastructure\u0026#34;, meta: { title: \u0026#34;infrastructure\u0026#34;, icon: \u0026#34;scatter_plot\u0026#34;, hasGroup: true, }, redirect: \u0026#34;/linux\u0026#34;, children: [ // Add a sub menu of the `Infrastructure`  { path: \u0026#34;/linux\u0026#34;, name: \u0026#34;Linux\u0026#34;, meta: { title: \u0026#34;linux\u0026#34;, layer: \u0026#34;OS_LINUX\u0026#34;, }, }, // If there are Tabs widgets in your dashboards, add following extra configuration to provide static links to the specific tab.  { path: \u0026#34;/linux/tab/:activeTabIndex\u0026#34;, name: \u0026#34;LinuxActiveTabIndex\u0026#34;, meta: { title: \u0026#34;linux\u0026#34;, notShow: true, layer: \u0026#34;OS_LINUX\u0026#34;, }, }, ], }, ]; import configurations in src/router/data/index.ts.  import name from \u0026#34;./xxx\u0026#34;; ","excerpt":"How to add a new root menu or sub-menu to booster UI If you would like to add a new root menu or …","ref":"/docs/main/v9.4.0/en/guides/how-to-add-menu/","title":"How to add a new root menu or sub-menu to booster UI"},{"body":"How to add a new root menu or sub-menu to booster UI If you would like to add a new root menu or sub-menu, you should add data to src/router/data/xx and add translation contents for the title to src/locales/lang/xx in booster UI.\n Create a new file called xxx.ts in src/router/data. Add configurations to the xxx.ts, configurations should be like this.  export default [ { // Add `Infrastructure` menu  path: \u0026#34;\u0026#34;, name: \u0026#34;Infrastructure\u0026#34;, meta: { title: \u0026#34;infrastructure\u0026#34;, icon: \u0026#34;scatter_plot\u0026#34;, hasGroup: true, }, redirect: \u0026#34;/linux\u0026#34;, children: [ // Add a sub menu of the `Infrastructure`  { path: \u0026#34;/linux\u0026#34;, name: \u0026#34;Linux\u0026#34;, meta: { title: \u0026#34;linux\u0026#34;, layer: \u0026#34;OS_LINUX\u0026#34;, }, }, // If there are Tabs widgets in your dashboards, add following extra configuration to provide static links to the specific tab.  { path: \u0026#34;/linux/tab/:activeTabIndex\u0026#34;, name: \u0026#34;LinuxActiveTabIndex\u0026#34;, meta: { title: \u0026#34;linux\u0026#34;, notShow: true, layer: \u0026#34;OS_LINUX\u0026#34;, }, }, ], }, ]; import configurations in src/router/data/index.ts.  import name from \u0026#34;./xxx\u0026#34;; ","excerpt":"How to add a new root menu or sub-menu to booster UI If you would like to add a new root menu or …","ref":"/docs/main/v9.5.0/en/guides/how-to-add-menu/","title":"How to add a new root menu or sub-menu to booster UI"},{"body":"How to add CRD and Controller in SWCK? The guide intends to help contributors who want to add CRDs and Controllers in SWCK.\n1. Install the kubebuilder  Notice, SWCK is built by kubebuilder v3.2.0, so you need to install it at first.\n SWCK is based on the kubebuilder, and you could download the kubebuilder by the script.\n2. Create CRD and Controller You can use kubebuilder create api to scaffold a new Kind and corresponding controller. Here we use the Demo as an example.\n$ cd operator \u0026amp;\u0026amp; kubebuilder create api --group operator --version v1alpha1 --kind Demo(Your CRD) Then you need to input twice y to create the Resource and Controller, and there will be some newly added files.\n$ git status On branch master Your branch is up to date with \u0026#39;origin/master\u0026#39;. Changes not staged for commit: (use \u0026#34;git add \u0026lt;file\u0026gt;...\u0026#34; to update what will be committed) (use \u0026#34;git restore \u0026lt;file\u0026gt;...\u0026#34; to discard changes in working directory) modified: PROJECT modified: apis/operator/v1alpha1/zz_generated.deepcopy.go modified: config/crd/bases/operator.skywalking.apache.org_swagents.yaml modified: config/crd/kustomization.yaml modified: config/rbac/role.yaml modified: go.mod modified: go.sum modified: main.go Untracked files: (use \u0026#34;git add \u0026lt;file\u0026gt;...\u0026#34; to include in what will be committed) apis/operator/v1alpha1/demo_types.go config/crd/bases/operator.skywalking.apache.org_demoes.yaml config/crd/patches/cainjection_in_operator_demoes.yaml config/crd/patches/webhook_in_operator_demoes.yaml config/rbac/operator_demo_editor_role.yaml config/rbac/operator_demo_viewer_role.yaml config/samples/operator_v1alpha1_demo.yaml controllers/operator/demo_controller.go controllers/operator/suite_test.go no changes added to commit (use \u0026#34;git add\u0026#34; and/or \u0026#34;git commit -a\u0026#34;) Next, we need to focus on the file apis/operator/v1alpha1/demo_types.go which defines your CRD, and the file controllers/operator/configuration_controller.go which defines the Controller. The others files are some configurations generated by the kubebuilder markers. Here are some references:\n  Kubebuilder project demo, in which you can understand the overall architecture.\n  How to add new-api, which you can find more details for oapserverconfig_types.go.\n  Controller-overview, where you can find more details about oapserverconfig_controller.go.\n  3. Create webhook If you want to fields or set defaults to CRs, creating webhooks is a good practice:\nkubebuilder create webhook --group operator --version v1alpha1 --kind Demo --defaulting --programmatic-validation The newly generated files are as follows.\n$ git status On branch master Your branch is ahead of \u0026#39;origin/master\u0026#39; by 1 commit. (use \u0026#34;git push\u0026#34; to publish your local commits) Changes not staged for commit: (use \u0026#34;git add \u0026lt;file\u0026gt;...\u0026#34; to update what will be committed) (use \u0026#34;git restore \u0026lt;file\u0026gt;...\u0026#34; to discard changes in working directory) modified: PROJECT modified: config/webhook/manifests.yaml modified: main.go Untracked files: (use \u0026#34;git add \u0026lt;file\u0026gt;...\u0026#34; to include in what will be committed) apis/operator/v1alpha1/demo_webhook.go apis/operator/v1alpha1/webhook_suite_test.go no changes added to commit (use \u0026#34;git add\u0026#34; and/or \u0026#34;git commit -a\u0026#34;) You can get more details through webhook-overview.\n4. Create the template Generally, a controller would generate a series of resources, such as workload, rbac, service, etc based on CRDs. SWCK is using the Go standard template engine to generate these resources. All template files are stored in the ./operator/pkg/operator/manifests. You could create a directory there such as demo to hold templates. The framework would transfer the CR as the arguments to these templates. More than CR, it supports passing custom rendering functions by setting up the TmplFunc. At last, you need to change the comment and add a field demo there to embed the template files into golang binaries.\n Notice, every file under the template directory can only contain one resource and we can\u0026rsquo;t use the --- to create multiple resources in a single file.\n 5. Build and Test SWCK needs to run in the k8s environment, so we highly recommend using the kind if you don\u0026rsquo;t have a cluster in hand. There are currently two ways to test your implementation.\n Before testing, please make sure you have the kind installed.\n  Test locally. After finishing your implementation, you could use the following steps to test locally:   Disable the webhook  export ENABLE_WEBHOOKS=false Run the main.go with the kubeconfig file.  go run main.go --kubeconfig=(use your kubeconfig file here, and the default is ~/.kube/config)  If you want to test the webhook, please refer the guide.\n  Test in-cluster.   Before testing the swck, please install cert-manager to provide the certificate for webhook in swck.  kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.9.1/cert-manager.yaml At first, you should build the swck image and load it into the kind cluster, and then you could install the crds and the operator as follows.  make docker-build \u0026amp;\u0026amp; kind load docker-image controller:latest \u0026amp;\u0026amp; make install \u0026amp;\u0026amp; make deploy After the swck is installed, and then you could use the following command to get the logs produced by the operator.  kubectl logs -f [skywalking-swck-controller-manager-*](../use the swck deployment name) -n skywalking-swck-system ","excerpt":"How to add CRD and Controller in SWCK? The guide intends to help contributors who want to add CRDs …","ref":"/docs/skywalking-swck/latest/how-to-add-new-crd-and-controller/","title":"How to add CRD and Controller in SWCK?"},{"body":"How to add CRD and Controller in SWCK? The guide intends to help contributors who want to add CRDs and Controllers in SWCK.\n1. Install the kubebuilder  Notice, SWCK is built by kubebuilder v3.2.0, so you need to install it at first.\n SWCK is based on the kubebuilder, and you could download the kubebuilder by the script.\n2. Create CRD and Controller You can use kubebuilder create api to scaffold a new Kind and corresponding controller. Here we use the Demo as an example.\n$ cd operator \u0026amp;\u0026amp; kubebuilder create api --group operator --version v1alpha1 --kind Demo(Your CRD) Then you need to input twice y to create the Resource and Controller, and there will be some newly added files.\n$ git status On branch master Your branch is up to date with \u0026#39;origin/master\u0026#39;. Changes not staged for commit: (use \u0026#34;git add \u0026lt;file\u0026gt;...\u0026#34; to update what will be committed) (use \u0026#34;git restore \u0026lt;file\u0026gt;...\u0026#34; to discard changes in working directory) modified: PROJECT modified: apis/operator/v1alpha1/zz_generated.deepcopy.go modified: config/crd/bases/operator.skywalking.apache.org_swagents.yaml modified: config/crd/kustomization.yaml modified: config/rbac/role.yaml modified: go.mod modified: go.sum modified: main.go Untracked files: (use \u0026#34;git add \u0026lt;file\u0026gt;...\u0026#34; to include in what will be committed) apis/operator/v1alpha1/demo_types.go config/crd/bases/operator.skywalking.apache.org_demoes.yaml config/crd/patches/cainjection_in_operator_demoes.yaml config/crd/patches/webhook_in_operator_demoes.yaml config/rbac/operator_demo_editor_role.yaml config/rbac/operator_demo_viewer_role.yaml config/samples/operator_v1alpha1_demo.yaml controllers/operator/demo_controller.go controllers/operator/suite_test.go no changes added to commit (use \u0026#34;git add\u0026#34; and/or \u0026#34;git commit -a\u0026#34;) Next, we need to focus on the file apis/operator/v1alpha1/demo_types.go which defines your CRD, and the file controllers/operator/configuration_controller.go which defines the Controller. The others files are some configurations generated by the kubebuilder markers. Here are some references:\n  Kubebuilder project demo, in which you can understand the overall architecture.\n  How to add new-api, which you can find more details for oapserverconfig_types.go.\n  Controller-overview, where you can find more details about oapserverconfig_controller.go.\n  3. Create webhook If you want to fields or set defaults to CRs, creating webhooks is a good practice:\nkubebuilder create webhook --group operator --version v1alpha1 --kind Demo --defaulting --programmatic-validation The newly generated files are as follows.\n$ git status On branch master Your branch is ahead of \u0026#39;origin/master\u0026#39; by 1 commit. (use \u0026#34;git push\u0026#34; to publish your local commits) Changes not staged for commit: (use \u0026#34;git add \u0026lt;file\u0026gt;...\u0026#34; to update what will be committed) (use \u0026#34;git restore \u0026lt;file\u0026gt;...\u0026#34; to discard changes in working directory) modified: PROJECT modified: config/webhook/manifests.yaml modified: main.go Untracked files: (use \u0026#34;git add \u0026lt;file\u0026gt;...\u0026#34; to include in what will be committed) apis/operator/v1alpha1/demo_webhook.go apis/operator/v1alpha1/webhook_suite_test.go no changes added to commit (use \u0026#34;git add\u0026#34; and/or \u0026#34;git commit -a\u0026#34;) You can get more details through webhook-overview.\n4. Create the template Generally, a controller would generate a series of resources, such as workload, rbac, service, etc based on CRDs. SWCK is using the Go standard template engine to generate these resources. All template files are stored in the ./operator/pkg/operator/manifests. You could create a directory there such as demo to hold templates. The framework would transfer the CR as the arguments to these templates. More than CR, it supports passing custom rendering functions by setting up the TmplFunc. At last, you need to change the comment and add a field demo there to embed the template files into golang binaries.\n Notice, every file under the template directory can only contain one resource and we can\u0026rsquo;t use the --- to create multiple resources in a single file.\n 5. Build and Test SWCK needs to run in the k8s environment, so we highly recommend using the kind if you don\u0026rsquo;t have a cluster in hand. There are currently two ways to test your implementation.\n Before testing, please make sure you have the kind installed.\n  Test locally. After finishing your implementation, you could use the following steps to test locally:   Disable the webhook  export ENABLE_WEBHOOKS=false Run the main.go with the kubeconfig file.  go run main.go --kubeconfig=(use your kubeconfig file here, and the default is ~/.kube/config)  If you want to test the webhook, please refer the guide.\n  Test in-cluster.   Before testing the swck, please install cert-manager to provide the certificate for webhook in swck.  kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.9.1/cert-manager.yaml At first, you should build the swck image and load it into the kind cluster, and then you could install the crds and the operator as follows.  make docker-build \u0026amp;\u0026amp; kind load docker-image controller:latest \u0026amp;\u0026amp; make install \u0026amp;\u0026amp; make deploy After the swck is installed, and then you could use the following command to get the logs produced by the operator.  kubectl logs -f [skywalking-swck-controller-manager-*](../use the swck deployment name) -n skywalking-swck-system ","excerpt":"How to add CRD and Controller in SWCK? The guide intends to help contributors who want to add CRDs …","ref":"/docs/skywalking-swck/next/how-to-add-new-crd-and-controller/","title":"How to add CRD and Controller in SWCK?"},{"body":"How to add CRD and Controller in SWCK? The guide intends to help contributors who want to add CRDs and Controllers in SWCK.\n1. Install the kubebuilder  Notice, SWCK is built by kubebuilder v3.2.0, so you need to install it at first.\n SWCK is based on the kubebuilder, and you could download the kubebuilder by the script.\n2. Create CRD and Controller You can use kubebuilder create api to scaffold a new Kind and corresponding controller. Here we use the Demo as an example.\n$ cd operator \u0026amp;\u0026amp; kubebuilder create api --group operator --version v1alpha1 --kind Demo(Your CRD) Then you need to input twice y to create the Resource and Controller, and there will be some newly added files.\n$ git status On branch master Your branch is up to date with \u0026#39;origin/master\u0026#39;. Changes not staged for commit: (use \u0026#34;git add \u0026lt;file\u0026gt;...\u0026#34; to update what will be committed) (use \u0026#34;git restore \u0026lt;file\u0026gt;...\u0026#34; to discard changes in working directory) modified: PROJECT modified: apis/operator/v1alpha1/zz_generated.deepcopy.go modified: config/crd/bases/operator.skywalking.apache.org_swagents.yaml modified: config/crd/kustomization.yaml modified: config/rbac/role.yaml modified: go.mod modified: go.sum modified: main.go Untracked files: (use \u0026#34;git add \u0026lt;file\u0026gt;...\u0026#34; to include in what will be committed) apis/operator/v1alpha1/demo_types.go config/crd/bases/operator.skywalking.apache.org_demoes.yaml config/crd/patches/cainjection_in_operator_demoes.yaml config/crd/patches/webhook_in_operator_demoes.yaml config/rbac/operator_demo_editor_role.yaml config/rbac/operator_demo_viewer_role.yaml config/samples/operator_v1alpha1_demo.yaml controllers/operator/demo_controller.go controllers/operator/suite_test.go no changes added to commit (use \u0026#34;git add\u0026#34; and/or \u0026#34;git commit -a\u0026#34;) Next, we need to focus on the file apis/operator/v1alpha1/demo_types.go which defines your CRD, and the file controllers/operator/configuration_controller.go which defines the Controller. The others files are some configurations generated by the kubebuilder markers. Here are some references:\n  Kubebuilder project demo, in which you can understand the overall architecture.\n  How to add new-api, which you can find more details for oapserverconfig_types.go.\n  Controller-overview, where you can find more details about oapserverconfig_controller.go.\n  3. Create webhook If you want to fields or set defaults to CRs, creating webhooks is a good practice:\nkubebuilder create webhook --group operator --version v1alpha1 --kind Demo --defaulting --programmatic-validation The newly generated files are as follows.\n$ git status On branch master Your branch is ahead of \u0026#39;origin/master\u0026#39; by 1 commit. (use \u0026#34;git push\u0026#34; to publish your local commits) Changes not staged for commit: (use \u0026#34;git add \u0026lt;file\u0026gt;...\u0026#34; to update what will be committed) (use \u0026#34;git restore \u0026lt;file\u0026gt;...\u0026#34; to discard changes in working directory) modified: PROJECT modified: config/webhook/manifests.yaml modified: main.go Untracked files: (use \u0026#34;git add \u0026lt;file\u0026gt;...\u0026#34; to include in what will be committed) apis/operator/v1alpha1/demo_webhook.go apis/operator/v1alpha1/webhook_suite_test.go no changes added to commit (use \u0026#34;git add\u0026#34; and/or \u0026#34;git commit -a\u0026#34;) You can get more details through webhook-overview.\n4. Create the template Generally, a controller would generate a series of resources, such as workload, rbac, service, etc based on CRDs. SWCK is using the Go standard template engine to generate these resources. All template files are stored in the ./operator/pkg/operator/manifests. You could create a directory there such as demo to hold templates. The framework would transfer the CR as the arguments to these templates. More than CR, it supports passing custom rendering functions by setting up the TmplFunc. At last, you need to change the comment and add a field demo there to embed the template files into golang binaries.\n Notice, every file under the template directory can only contain one resource and we can\u0026rsquo;t use the --- to create multiple resources in a single file.\n 5. Build and Test SWCK needs to run in the k8s environment, so we highly recommend using the kind if you don\u0026rsquo;t have a cluster in hand. There are currently two ways to test your implementation.\n Before testing, please make sure you have the kind installed.\n  Test locally. After finishing your implementation, you could use the following steps to test locally:   Disable the webhook  export ENABLE_WEBHOOKS=false Run the main.go with the kubeconfig file.  go run main.go --kubeconfig=(use your kubeconfig file here, and the default is ~/.kube/config)  If you want to test the webhook, please refer the guide.\n  Test in-cluster.   Before testing the swck, please install cert-manager to provide the certificate for webhook in swck.  kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.9.1/cert-manager.yaml At first, you should build the swck image and load it into the kind cluster, and then you could install the crds and the operator as follows.  make docker-build \u0026amp;\u0026amp; kind load docker-image controller:latest \u0026amp;\u0026amp; make install \u0026amp;\u0026amp; make deploy After the swck is installed, and then you could use the following command to get the logs produced by the operator.  kubectl logs -f [skywalking-swck-controller-manager-*](../use the swck deployment name) -n skywalking-swck-system ","excerpt":"How to add CRD and Controller in SWCK? The guide intends to help contributors who want to add CRDs …","ref":"/docs/skywalking-swck/v0.9.0/how-to-add-new-crd-and-controller/","title":"How to add CRD and Controller in SWCK?"},{"body":"How to build a project This document will help you compile and build a project in your maven and set your IDE.\nBuilding the Project Since we are using Git submodule, we do not recommend using the GitHub tag or release page to download source codes for compiling.\nMaven behind the Proxy If you need to execute build behind the proxy, edit the .mvn/jvm.config and set the follow properties:\n-Dhttp.proxyHost=proxy_ip -Dhttp.proxyPort=proxy_port -Dhttps.proxyHost=proxy_ip -Dhttps.proxyPort=proxy_port -Dhttp.proxyUser=username -Dhttp.proxyPassword=password Building from GitHub   Prepare git, JDK 11 or 17 (LTS versions), and Maven 3.6+.\n  Clone the project.\nIf you want to build a release from source codes, set a tag name by using git clone -b [tag_name] ... while cloning.\ngit clone --recurse-submodules https://github.com/apache/skywalking.git cd skywalking/ OR git clone https://github.com/apache/skywalking.git cd skywalking/ git submodule init git submodule update   Run ./mvnw clean package -Dmaven.test.skip\n  All packages are in /dist (.tar.gz for Linux and .zip for Windows).\n  Building from Apache source code release  What is the Apache source code release?  For each official Apache release, there is a complete and independent source code tar, which includes all source codes. You could download it from SkyWalking Apache download page. There is no requirement related to git when compiling this. Just follow these steps.\n Prepare JDK11+ and Maven 3.6+. Run ./mvnw clean package -Dmaven.test.skip. All packages are in /dist.(.tar.gz for Linux and .zip for Windows).  Advanced compiling SkyWalking is a complex maven project that has many modules. Therefore, the time to compile may be a bit longer than usual. If you just want to recompile part of the project, you have the following options:\n Compile backend and package   ./mvnw package -Pbackend,dist\n or\n make build.backend\n If you intend to compile a single plugin, such as one in the dev stage, you could\n cd plugin_module_dir \u0026amp; mvn clean package\n  Compile UI and package   ./mvnw package -Pui,dist\n or\n make build.ui\n Building docker images You can build docker images of backend and ui with Makefile located in root folder.\nRefer to Build docker image for more details.\nSetting up your IntelliJ IDEA NOTE: If you clone the codes from GitHub, please make sure that you have finished steps 1 to 3 in section Build from GitHub. If you download the source codes from the official website of SkyWalking, please make sure that you have followed the steps in section Build from Apache source code release.\n Import the project as a maven project. Run ./mvnw compile -Dmaven.test.skip=true to compile project and generate source codes. The reason is that we use gRPC and protobuf. Set Generated Source Codes folders.  grpc-java and java folders in apm-protocol/apm-network/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-core/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-receiver-plugin/receiver-proto/target/generated-sources/fbs grpc-java and java folders in oap-server/server-receiver-plugin/receiver-proto/target/generated-sources/protobuf grpc-java and java folders in oap-server/exporter/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-configuration/grpc-configuration-sync/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-alarm-plugin/target/generated-sources/protobuf antlr4 folder in oap-server/oal-grammar/target/generated-sources    ","excerpt":"How to build a project This document will help you compile and build a project in your maven and set …","ref":"/docs/main/latest/en/guides/how-to-build/","title":"How to build a project"},{"body":"How to build a project This document will help you compile and build a project in your maven and set your IDE.\nBuilding the Project Since we are using Git submodule, we do not recommend using the GitHub tag or release page to download source codes for compiling.\nMaven behind the Proxy If you need to execute build behind the proxy, edit the .mvn/jvm.config and set the follow properties:\n-Dhttp.proxyHost=proxy_ip -Dhttp.proxyPort=proxy_port -Dhttps.proxyHost=proxy_ip -Dhttps.proxyPort=proxy_port -Dhttp.proxyUser=username -Dhttp.proxyPassword=password Building from GitHub   Prepare git, JDK 11, 17, 21 (LTS versions), and Maven 3.6+.\n  Clone the project.\nIf you want to build a release from source codes, set a tag name by using git clone -b [tag_name] ... while cloning.\ngit clone --recurse-submodules https://github.com/apache/skywalking.git cd skywalking/ OR git clone https://github.com/apache/skywalking.git cd skywalking/ git submodule init git submodule update   Run ./mvnw clean package -Dmaven.test.skip\n  All packages are in /dist (.tar.gz for Linux and .zip for Windows).\n  Building from Apache source code release  What is the Apache source code release?  For each official Apache release, there is a complete and independent source code tar, which includes all source codes. You could download it from SkyWalking Apache download page. There is no requirement related to git when compiling this. Just follow these steps.\n Prepare JDK11+ and Maven 3.6+. Run ./mvnw clean package -Dmaven.test.skip. All packages are in /dist.(.tar.gz for Linux and .zip for Windows).  Advanced compiling SkyWalking is a complex maven project that has many modules. Therefore, the time to compile may be a bit longer than usual. If you just want to recompile part of the project, you have the following options:\n Compile backend and package   ./mvnw package -Pbackend,dist\n or\n make build.backend\n If you intend to compile a single plugin, such as one in the dev stage, you could\n cd plugin_module_dir \u0026amp; mvn clean package\n  Compile UI and package   ./mvnw package -Pui,dist\n or\n make build.ui\n Building docker images You can build docker images of backend and ui with Makefile located in root folder.\nRefer to Build docker image for more details.\nSetting up your IntelliJ IDEA NOTE: If you clone the codes from GitHub, please make sure that you have finished steps 1 to 3 in section Build from GitHub. If you download the source codes from the official website of SkyWalking, please make sure that you have followed the steps in section Build from Apache source code release.\n Import the project as a maven project. Run ./mvnw compile -Dmaven.test.skip=true to compile project and generate source codes. The reason is that we use gRPC and protobuf. Set Generated Source Codes folders.  grpc-java and java folders in apm-protocol/apm-network/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-core/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-receiver-plugin/receiver-proto/target/generated-sources/fbs grpc-java and java folders in oap-server/server-receiver-plugin/receiver-proto/target/generated-sources/protobuf grpc-java and java folders in oap-server/exporter/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-configuration/grpc-configuration-sync/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-alarm-plugin/target/generated-sources/protobuf antlr4 folder in oap-server/oal-grammar/target/generated-sources    ","excerpt":"How to build a project This document will help you compile and build a project in your maven and set …","ref":"/docs/main/next/en/guides/how-to-build/","title":"How to build a project"},{"body":"How to build a project This document will help you compile and build a project in your maven and set your IDE.\nBuilding the Project Since we are using Git submodule, we do not recommend using the GitHub tag or release page to download source codes for compiling.\nMaven behind the Proxy If you need to execute build behind the proxy, edit the .mvn/jvm.config and set the follow properties:\n-Dhttp.proxyHost=proxy_ip -Dhttp.proxyPort=proxy_port -Dhttps.proxyHost=proxy_ip -Dhttps.proxyPort=proxy_port -Dhttp.proxyUser=username -Dhttp.proxyPassword=password Building from GitHub   Prepare git, JDK8+, and Maven 3.6+.\n  Clone the project.\nIf you want to build a release from source codes, set a tag name by using git clone -b [tag_name] ... while cloning.\ngit clone --recurse-submodules https://github.com/apache/skywalking.git cd skywalking/ OR git clone https://github.com/apache/skywalking.git cd skywalking/ git submodule init git submodule update   Run ./mvnw clean package -Dmaven.test.skip\n  All packages are in /dist (.tar.gz for Linux and .zip for Windows).\n  Building from Apache source code release  What is the Apache source code release?  For each official Apache release, there is a complete and independent source code tar, which includes all source codes. You could download it from SkyWalking Apache download page. There is no requirement related to git when compiling this. Just follow these steps.\n Prepare JDK8+ and Maven 3.6+. Run ./mvnw clean package -Dmaven.test.skip. All packages are in /dist.(.tar.gz for Linux and .zip for Windows).  Advanced compiling SkyWalking is a complex maven project that has many modules. Therefore, the time to compile may be a bit longer than usual. If you just want to recompile part of the project, you have the following options:\n Compile backend and package   ./mvnw package -Pbackend,dist\n or\n make build.backend\n If you intend to compile a single plugin, such as one in the dev stage, you could\n cd plugin_module_dir \u0026amp; mvn clean package\n  Compile UI and package   ./mvnw package -Pui,dist\n or\n make build.ui\n Building docker images You can build docker images of backend and ui with Makefile located in root folder.\nRefer to Build docker image for more details.\nSetting up your IntelliJ IDEA NOTE: If you clone the codes from GitHub, please make sure that you have finished steps 1 to 3 in section Build from GitHub. If you download the source codes from the official website of SkyWalking, please make sure that you have followed the steps in section Build from Apache source code release.\n Import the project as a maven project. Run ./mvnw compile -Dmaven.test.skip=true to compile project and generate source codes. The reason is that we use gRPC and protobuf. Set Generated Source Codes folders.  grpc-java and java folders in apm-protocol/apm-network/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-core/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-receiver-plugin/receiver-proto/target/generated-sources/fbs grpc-java and java folders in oap-server/server-receiver-plugin/receiver-proto/target/generated-sources/protobuf grpc-java and java folders in oap-server/exporter/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-configuration/grpc-configuration-sync/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-alarm-plugin/target/generated-sources/protobuf antlr4 folder in oap-server/oal-grammar/target/generated-sources    ","excerpt":"How to build a project This document will help you compile and build a project in your maven and set …","ref":"/docs/main/v9.0.0/en/guides/how-to-build/","title":"How to build a project"},{"body":"How to build a project This document will help you compile and build a project in your maven and set your IDE.\nBuilding the Project Since we are using Git submodule, we do not recommend using the GitHub tag or release page to download source codes for compiling.\nMaven behind the Proxy If you need to execute build behind the proxy, edit the .mvn/jvm.config and set the follow properties:\n-Dhttp.proxyHost=proxy_ip -Dhttp.proxyPort=proxy_port -Dhttps.proxyHost=proxy_ip -Dhttps.proxyPort=proxy_port -Dhttp.proxyUser=username -Dhttp.proxyPassword=password Building from GitHub   Prepare git, JDK8+, and Maven 3.6+.\n  Clone the project.\nIf you want to build a release from source codes, set a tag name by using git clone -b [tag_name] ... while cloning.\ngit clone --recurse-submodules https://github.com/apache/skywalking.git cd skywalking/ OR git clone https://github.com/apache/skywalking.git cd skywalking/ git submodule init git submodule update   Run ./mvnw clean package -Dmaven.test.skip\n  All packages are in /dist (.tar.gz for Linux and .zip for Windows).\n  Building from Apache source code release  What is the Apache source code release?  For each official Apache release, there is a complete and independent source code tar, which includes all source codes. You could download it from SkyWalking Apache download page. There is no requirement related to git when compiling this. Just follow these steps.\n Prepare JDK8+ and Maven 3.6+. Run ./mvnw clean package -Dmaven.test.skip. All packages are in /dist.(.tar.gz for Linux and .zip for Windows).  Advanced compiling SkyWalking is a complex maven project that has many modules. Therefore, the time to compile may be a bit longer than usual. If you just want to recompile part of the project, you have the following options:\n Compile backend and package   ./mvnw package -Pbackend,dist\n or\n make build.backend\n If you intend to compile a single plugin, such as one in the dev stage, you could\n cd plugin_module_dir \u0026amp; mvn clean package\n  Compile UI and package   ./mvnw package -Pui,dist\n or\n make build.ui\n Building docker images You can build docker images of backend and ui with Makefile located in root folder.\nRefer to Build docker image for more details.\nSetting up your IntelliJ IDEA NOTE: If you clone the codes from GitHub, please make sure that you have finished steps 1 to 3 in section Build from GitHub. If you download the source codes from the official website of SkyWalking, please make sure that you have followed the steps in section Build from Apache source code release.\n Import the project as a maven project. Run ./mvnw compile -Dmaven.test.skip=true to compile project and generate source codes. The reason is that we use gRPC and protobuf. Set Generated Source Codes folders.  grpc-java and java folders in apm-protocol/apm-network/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-core/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-receiver-plugin/receiver-proto/target/generated-sources/fbs grpc-java and java folders in oap-server/server-receiver-plugin/receiver-proto/target/generated-sources/protobuf grpc-java and java folders in oap-server/exporter/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-configuration/grpc-configuration-sync/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-alarm-plugin/target/generated-sources/protobuf antlr4 folder in oap-server/oal-grammar/target/generated-sources    ","excerpt":"How to build a project This document will help you compile and build a project in your maven and set …","ref":"/docs/main/v9.1.0/en/guides/how-to-build/","title":"How to build a project"},{"body":"How to build a project This document will help you compile and build a project in your maven and set your IDE.\nBuilding the Project Since we are using Git submodule, we do not recommend using the GitHub tag or release page to download source codes for compiling.\nMaven behind the Proxy If you need to execute build behind the proxy, edit the .mvn/jvm.config and set the follow properties:\n-Dhttp.proxyHost=proxy_ip -Dhttp.proxyPort=proxy_port -Dhttps.proxyHost=proxy_ip -Dhttps.proxyPort=proxy_port -Dhttp.proxyUser=username -Dhttp.proxyPassword=password Building from GitHub   Prepare git, JDK8+, and Maven 3.6+.\n  Clone the project.\nIf you want to build a release from source codes, set a tag name by using git clone -b [tag_name] ... while cloning.\ngit clone --recurse-submodules https://github.com/apache/skywalking.git cd skywalking/ OR git clone https://github.com/apache/skywalking.git cd skywalking/ git submodule init git submodule update   Run ./mvnw clean package -Dmaven.test.skip\n  All packages are in /dist (.tar.gz for Linux and .zip for Windows).\n  Building from Apache source code release  What is the Apache source code release?  For each official Apache release, there is a complete and independent source code tar, which includes all source codes. You could download it from SkyWalking Apache download page. There is no requirement related to git when compiling this. Just follow these steps.\n Prepare JDK8+ and Maven 3.6+. Run ./mvnw clean package -Dmaven.test.skip. All packages are in /dist.(.tar.gz for Linux and .zip for Windows).  Advanced compiling SkyWalking is a complex maven project that has many modules. Therefore, the time to compile may be a bit longer than usual. If you just want to recompile part of the project, you have the following options:\n Compile backend and package   ./mvnw package -Pbackend,dist\n or\n make build.backend\n If you intend to compile a single plugin, such as one in the dev stage, you could\n cd plugin_module_dir \u0026amp; mvn clean package\n  Compile UI and package   ./mvnw package -Pui,dist\n or\n make build.ui\n Building docker images You can build docker images of backend and ui with Makefile located in root folder.\nRefer to Build docker image for more details.\nSetting up your IntelliJ IDEA NOTE: If you clone the codes from GitHub, please make sure that you have finished steps 1 to 3 in section Build from GitHub. If you download the source codes from the official website of SkyWalking, please make sure that you have followed the steps in section Build from Apache source code release.\n Import the project as a maven project. Run ./mvnw compile -Dmaven.test.skip=true to compile project and generate source codes. The reason is that we use gRPC and protobuf. Set Generated Source Codes folders.  grpc-java and java folders in apm-protocol/apm-network/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-core/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-receiver-plugin/receiver-proto/target/generated-sources/fbs grpc-java and java folders in oap-server/server-receiver-plugin/receiver-proto/target/generated-sources/protobuf grpc-java and java folders in oap-server/exporter/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-configuration/grpc-configuration-sync/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-alarm-plugin/target/generated-sources/protobuf antlr4 folder in oap-server/oal-grammar/target/generated-sources    ","excerpt":"How to build a project This document will help you compile and build a project in your maven and set …","ref":"/docs/main/v9.2.0/en/guides/how-to-build/","title":"How to build a project"},{"body":"How to build a project This document will help you compile and build a project in your maven and set your IDE.\nBuilding the Project Since we are using Git submodule, we do not recommend using the GitHub tag or release page to download source codes for compiling.\nMaven behind the Proxy If you need to execute build behind the proxy, edit the .mvn/jvm.config and set the follow properties:\n-Dhttp.proxyHost=proxy_ip -Dhttp.proxyPort=proxy_port -Dhttps.proxyHost=proxy_ip -Dhttps.proxyPort=proxy_port -Dhttp.proxyUser=username -Dhttp.proxyPassword=password Building from GitHub   Prepare git, JDK8+, and Maven 3.6+.\n  Clone the project.\nIf you want to build a release from source codes, set a tag name by using git clone -b [tag_name] ... while cloning.\ngit clone --recurse-submodules https://github.com/apache/skywalking.git cd skywalking/ OR git clone https://github.com/apache/skywalking.git cd skywalking/ git submodule init git submodule update   Run ./mvnw clean package -Dmaven.test.skip\n  All packages are in /dist (.tar.gz for Linux and .zip for Windows).\n  Building from Apache source code release  What is the Apache source code release?  For each official Apache release, there is a complete and independent source code tar, which includes all source codes. You could download it from SkyWalking Apache download page. There is no requirement related to git when compiling this. Just follow these steps.\n Prepare JDK8+ and Maven 3.6+. Run ./mvnw clean package -Dmaven.test.skip. All packages are in /dist.(.tar.gz for Linux and .zip for Windows).  Advanced compiling SkyWalking is a complex maven project that has many modules. Therefore, the time to compile may be a bit longer than usual. If you just want to recompile part of the project, you have the following options:\n Compile backend and package   ./mvnw package -Pbackend,dist\n or\n make build.backend\n If you intend to compile a single plugin, such as one in the dev stage, you could\n cd plugin_module_dir \u0026amp; mvn clean package\n  Compile UI and package   ./mvnw package -Pui,dist\n or\n make build.ui\n Building docker images You can build docker images of backend and ui with Makefile located in root folder.\nRefer to Build docker image for more details.\nSetting up your IntelliJ IDEA NOTE: If you clone the codes from GitHub, please make sure that you have finished steps 1 to 3 in section Build from GitHub. If you download the source codes from the official website of SkyWalking, please make sure that you have followed the steps in section Build from Apache source code release.\n Import the project as a maven project. Run ./mvnw compile -Dmaven.test.skip=true to compile project and generate source codes. The reason is that we use gRPC and protobuf. Set Generated Source Codes folders.  grpc-java and java folders in apm-protocol/apm-network/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-core/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-receiver-plugin/receiver-proto/target/generated-sources/fbs grpc-java and java folders in oap-server/server-receiver-plugin/receiver-proto/target/generated-sources/protobuf grpc-java and java folders in oap-server/exporter/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-configuration/grpc-configuration-sync/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-alarm-plugin/target/generated-sources/protobuf antlr4 folder in oap-server/oal-grammar/target/generated-sources    ","excerpt":"How to build a project This document will help you compile and build a project in your maven and set …","ref":"/docs/main/v9.3.0/en/guides/how-to-build/","title":"How to build a project"},{"body":"How to build a project This document will help you compile and build a project in your maven and set your IDE.\nBuilding the Project Since we are using Git submodule, we do not recommend using the GitHub tag or release page to download source codes for compiling.\nMaven behind the Proxy If you need to execute build behind the proxy, edit the .mvn/jvm.config and set the follow properties:\n-Dhttp.proxyHost=proxy_ip -Dhttp.proxyPort=proxy_port -Dhttps.proxyHost=proxy_ip -Dhttps.proxyPort=proxy_port -Dhttp.proxyUser=username -Dhttp.proxyPassword=password Building from GitHub   Prepare git, JDK11+, and Maven 3.6+.\n  Clone the project.\nIf you want to build a release from source codes, set a tag name by using git clone -b [tag_name] ... while cloning.\ngit clone --recurse-submodules https://github.com/apache/skywalking.git cd skywalking/ OR git clone https://github.com/apache/skywalking.git cd skywalking/ git submodule init git submodule update   Run ./mvnw clean package -Dmaven.test.skip\n  All packages are in /dist (.tar.gz for Linux and .zip for Windows).\n  Building from Apache source code release  What is the Apache source code release?  For each official Apache release, there is a complete and independent source code tar, which includes all source codes. You could download it from SkyWalking Apache download page. There is no requirement related to git when compiling this. Just follow these steps.\n Prepare JDK11+ and Maven 3.6+. Run ./mvnw clean package -Dmaven.test.skip. All packages are in /dist.(.tar.gz for Linux and .zip for Windows).  Advanced compiling SkyWalking is a complex maven project that has many modules. Therefore, the time to compile may be a bit longer than usual. If you just want to recompile part of the project, you have the following options:\n Compile backend and package   ./mvnw package -Pbackend,dist\n or\n make build.backend\n If you intend to compile a single plugin, such as one in the dev stage, you could\n cd plugin_module_dir \u0026amp; mvn clean package\n  Compile UI and package   ./mvnw package -Pui,dist\n or\n make build.ui\n Building docker images You can build docker images of backend and ui with Makefile located in root folder.\nRefer to Build docker image for more details.\nSetting up your IntelliJ IDEA NOTE: If you clone the codes from GitHub, please make sure that you have finished steps 1 to 3 in section Build from GitHub. If you download the source codes from the official website of SkyWalking, please make sure that you have followed the steps in section Build from Apache source code release.\n Import the project as a maven project. Run ./mvnw compile -Dmaven.test.skip=true to compile project and generate source codes. The reason is that we use gRPC and protobuf. Set Generated Source Codes folders.  grpc-java and java folders in apm-protocol/apm-network/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-core/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-receiver-plugin/receiver-proto/target/generated-sources/fbs grpc-java and java folders in oap-server/server-receiver-plugin/receiver-proto/target/generated-sources/protobuf grpc-java and java folders in oap-server/exporter/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-configuration/grpc-configuration-sync/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-alarm-plugin/target/generated-sources/protobuf antlr4 folder in oap-server/oal-grammar/target/generated-sources    ","excerpt":"How to build a project This document will help you compile and build a project in your maven and set …","ref":"/docs/main/v9.4.0/en/guides/how-to-build/","title":"How to build a project"},{"body":"How to build a project This document will help you compile and build a project in your maven and set your IDE.\nBuilding the Project Since we are using Git submodule, we do not recommend using the GitHub tag or release page to download source codes for compiling.\nMaven behind the Proxy If you need to execute build behind the proxy, edit the .mvn/jvm.config and set the follow properties:\n-Dhttp.proxyHost=proxy_ip -Dhttp.proxyPort=proxy_port -Dhttps.proxyHost=proxy_ip -Dhttps.proxyPort=proxy_port -Dhttp.proxyUser=username -Dhttp.proxyPassword=password Building from GitHub   Prepare git, JDK11+, and Maven 3.6+.\n  Clone the project.\nIf you want to build a release from source codes, set a tag name by using git clone -b [tag_name] ... while cloning.\ngit clone --recurse-submodules https://github.com/apache/skywalking.git cd skywalking/ OR git clone https://github.com/apache/skywalking.git cd skywalking/ git submodule init git submodule update   Run ./mvnw clean package -Dmaven.test.skip\n  All packages are in /dist (.tar.gz for Linux and .zip for Windows).\n  Building from Apache source code release  What is the Apache source code release?  For each official Apache release, there is a complete and independent source code tar, which includes all source codes. You could download it from SkyWalking Apache download page. There is no requirement related to git when compiling this. Just follow these steps.\n Prepare JDK11+ and Maven 3.6+. Run ./mvnw clean package -Dmaven.test.skip. All packages are in /dist.(.tar.gz for Linux and .zip for Windows).  Advanced compiling SkyWalking is a complex maven project that has many modules. Therefore, the time to compile may be a bit longer than usual. If you just want to recompile part of the project, you have the following options:\n Compile backend and package   ./mvnw package -Pbackend,dist\n or\n make build.backend\n If you intend to compile a single plugin, such as one in the dev stage, you could\n cd plugin_module_dir \u0026amp; mvn clean package\n  Compile UI and package   ./mvnw package -Pui,dist\n or\n make build.ui\n Building docker images You can build docker images of backend and ui with Makefile located in root folder.\nRefer to Build docker image for more details.\nSetting up your IntelliJ IDEA NOTE: If you clone the codes from GitHub, please make sure that you have finished steps 1 to 3 in section Build from GitHub. If you download the source codes from the official website of SkyWalking, please make sure that you have followed the steps in section Build from Apache source code release.\n Import the project as a maven project. Run ./mvnw compile -Dmaven.test.skip=true to compile project and generate source codes. The reason is that we use gRPC and protobuf. Set Generated Source Codes folders.  grpc-java and java folders in apm-protocol/apm-network/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-core/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-receiver-plugin/receiver-proto/target/generated-sources/fbs grpc-java and java folders in oap-server/server-receiver-plugin/receiver-proto/target/generated-sources/protobuf grpc-java and java folders in oap-server/exporter/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-configuration/grpc-configuration-sync/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-alarm-plugin/target/generated-sources/protobuf antlr4 folder in oap-server/oal-grammar/target/generated-sources    ","excerpt":"How to build a project This document will help you compile and build a project in your maven and set …","ref":"/docs/main/v9.5.0/en/guides/how-to-build/","title":"How to build a project"},{"body":"How to build a project This document will help you compile and build a project in your maven and set your IDE.\nBuilding the Project Since we are using Git submodule, we do not recommend using the GitHub tag or release page to download source codes for compiling.\nMaven behind the Proxy If you need to execute build behind the proxy, edit the .mvn/jvm.config and set the follow properties:\n-Dhttp.proxyHost=proxy_ip -Dhttp.proxyPort=proxy_port -Dhttps.proxyHost=proxy_ip -Dhttps.proxyPort=proxy_port -Dhttp.proxyUser=username -Dhttp.proxyPassword=password Building from GitHub   Prepare git, JDK11+, and Maven 3.6+.\n  Clone the project.\nIf you want to build a release from source codes, set a tag name by using git clone -b [tag_name] ... while cloning.\ngit clone --recurse-submodules https://github.com/apache/skywalking.git cd skywalking/ OR git clone https://github.com/apache/skywalking.git cd skywalking/ git submodule init git submodule update   Run ./mvnw clean package -Dmaven.test.skip\n  All packages are in /dist (.tar.gz for Linux and .zip for Windows).\n  Building from Apache source code release  What is the Apache source code release?  For each official Apache release, there is a complete and independent source code tar, which includes all source codes. You could download it from SkyWalking Apache download page. There is no requirement related to git when compiling this. Just follow these steps.\n Prepare JDK11+ and Maven 3.6+. Run ./mvnw clean package -Dmaven.test.skip. All packages are in /dist.(.tar.gz for Linux and .zip for Windows).  Advanced compiling SkyWalking is a complex maven project that has many modules. Therefore, the time to compile may be a bit longer than usual. If you just want to recompile part of the project, you have the following options:\n Compile backend and package   ./mvnw package -Pbackend,dist\n or\n make build.backend\n If you intend to compile a single plugin, such as one in the dev stage, you could\n cd plugin_module_dir \u0026amp; mvn clean package\n  Compile UI and package   ./mvnw package -Pui,dist\n or\n make build.ui\n Building docker images You can build docker images of backend and ui with Makefile located in root folder.\nRefer to Build docker image for more details.\nSetting up your IntelliJ IDEA NOTE: If you clone the codes from GitHub, please make sure that you have finished steps 1 to 3 in section Build from GitHub. If you download the source codes from the official website of SkyWalking, please make sure that you have followed the steps in section Build from Apache source code release.\n Import the project as a maven project. Run ./mvnw compile -Dmaven.test.skip=true to compile project and generate source codes. The reason is that we use gRPC and protobuf. Set Generated Source Codes folders.  grpc-java and java folders in apm-protocol/apm-network/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-core/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-receiver-plugin/receiver-proto/target/generated-sources/fbs grpc-java and java folders in oap-server/server-receiver-plugin/receiver-proto/target/generated-sources/protobuf grpc-java and java folders in oap-server/exporter/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-configuration/grpc-configuration-sync/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-alarm-plugin/target/generated-sources/protobuf antlr4 folder in oap-server/oal-grammar/target/generated-sources    ","excerpt":"How to build a project This document will help you compile and build a project in your maven and set …","ref":"/docs/main/v9.6.0/en/guides/how-to-build/","title":"How to build a project"},{"body":"How to build a project This document will help you compile and build a project in your maven and set your IDE.\nBuilding the Project Since we are using Git submodule, we do not recommend using the GitHub tag or release page to download source codes for compiling.\nMaven behind the Proxy If you need to execute build behind the proxy, edit the .mvn/jvm.config and set the follow properties:\n-Dhttp.proxyHost=proxy_ip -Dhttp.proxyPort=proxy_port -Dhttps.proxyHost=proxy_ip -Dhttps.proxyPort=proxy_port -Dhttp.proxyUser=username -Dhttp.proxyPassword=password Building from GitHub   Prepare git, JDK 11 or 17 (LTS versions), and Maven 3.6+.\n  Clone the project.\nIf you want to build a release from source codes, set a tag name by using git clone -b [tag_name] ... while cloning.\ngit clone --recurse-submodules https://github.com/apache/skywalking.git cd skywalking/ OR git clone https://github.com/apache/skywalking.git cd skywalking/ git submodule init git submodule update   Run ./mvnw clean package -Dmaven.test.skip\n  All packages are in /dist (.tar.gz for Linux and .zip for Windows).\n  Building from Apache source code release  What is the Apache source code release?  For each official Apache release, there is a complete and independent source code tar, which includes all source codes. You could download it from SkyWalking Apache download page. There is no requirement related to git when compiling this. Just follow these steps.\n Prepare JDK11+ and Maven 3.6+. Run ./mvnw clean package -Dmaven.test.skip. All packages are in /dist.(.tar.gz for Linux and .zip for Windows).  Advanced compiling SkyWalking is a complex maven project that has many modules. Therefore, the time to compile may be a bit longer than usual. If you just want to recompile part of the project, you have the following options:\n Compile backend and package   ./mvnw package -Pbackend,dist\n or\n make build.backend\n If you intend to compile a single plugin, such as one in the dev stage, you could\n cd plugin_module_dir \u0026amp; mvn clean package\n  Compile UI and package   ./mvnw package -Pui,dist\n or\n make build.ui\n Building docker images You can build docker images of backend and ui with Makefile located in root folder.\nRefer to Build docker image for more details.\nSetting up your IntelliJ IDEA NOTE: If you clone the codes from GitHub, please make sure that you have finished steps 1 to 3 in section Build from GitHub. If you download the source codes from the official website of SkyWalking, please make sure that you have followed the steps in section Build from Apache source code release.\n Import the project as a maven project. Run ./mvnw compile -Dmaven.test.skip=true to compile project and generate source codes. The reason is that we use gRPC and protobuf. Set Generated Source Codes folders.  grpc-java and java folders in apm-protocol/apm-network/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-core/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-receiver-plugin/receiver-proto/target/generated-sources/fbs grpc-java and java folders in oap-server/server-receiver-plugin/receiver-proto/target/generated-sources/protobuf grpc-java and java folders in oap-server/exporter/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-configuration/grpc-configuration-sync/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-alarm-plugin/target/generated-sources/protobuf antlr4 folder in oap-server/oal-grammar/target/generated-sources    ","excerpt":"How to build a project This document will help you compile and build a project in your maven and set …","ref":"/docs/main/v9.7.0/en/guides/how-to-build/","title":"How to build a project"},{"body":"How to build from sources? Download the source tar from the official website, and run the following commands to build from source\nMake sure you have Python 3.7+ and the python3 command available\n$ tar -zxf skywalking-python-src-\u0026lt;version\u0026gt;.tgz $ cd skywalking-python-src-\u0026lt;version\u0026gt; $ make install If you want to build from the latest source codes from GitHub for some reasons, for example, you want to try the latest features that are not released yet, please clone the source codes from GitHub and make install it:\n$ git clone https://github.com/apache/skywalking-python $ cd skywalking-python $ git submodule update --init $ make install NOTE that only releases from the website are official Apache releases.\n","excerpt":"How to build from sources? Download the source tar from the official website, and run the following …","ref":"/docs/skywalking-python/latest/en/setup/faq/how-to-build-from-sources/","title":"How to build from sources?"},{"body":"How to build from sources? Download the source tar from the official website, and run the following commands to build from source\nMake sure you have Python 3.7+ and the python3 command available\n$ tar -zxf skywalking-python-src-\u0026lt;version\u0026gt;.tgz $ cd skywalking-python-src-\u0026lt;version\u0026gt; $ make install If you want to build from the latest source codes from GitHub for some reasons, for example, you want to try the latest features that are not released yet, please clone the source codes from GitHub and make install it:\n$ git clone https://github.com/apache/skywalking-python $ cd skywalking-python $ git submodule update --init $ make install NOTE that only releases from the website are official Apache releases.\n","excerpt":"How to build from sources? Download the source tar from the official website, and run the following …","ref":"/docs/skywalking-python/next/en/setup/faq/how-to-build-from-sources/","title":"How to build from sources?"},{"body":"How to build from sources? Download the source tar from the official website, and run the following commands to build from source\nMake sure you have Python 3.7+ and the python3 command available\n$ tar -zxf skywalking-python-src-\u0026lt;version\u0026gt;.tgz $ cd skywalking-python-src-\u0026lt;version\u0026gt; $ make install If you want to build from the latest source codes from GitHub for some reasons, for example, you want to try the latest features that are not released yet, please clone the source codes from GitHub and make install it:\n$ git clone https://github.com/apache/skywalking-python $ cd skywalking-python $ git submodule update --init $ make install NOTE that only releases from the website are official Apache releases.\n","excerpt":"How to build from sources? Download the source tar from the official website, and run the following …","ref":"/docs/skywalking-python/v1.0.1/en/setup/faq/how-to-build-from-sources/","title":"How to build from sources?"},{"body":"How to bump up Zipkin Lens dependency Because SkyWalking embeds Zipkin Lens UI as a part of the SkyWalking UI, and Zipkin Lens UI contains a lot of other front-end dependencies that we also distribute in SkyWalking binary tars, so we have to take care of the dependencies' licenses when we bump up the Zipkin Lens dependency.\nMake sure to do the following steps when you bump up the Zipkin Lens dependency:\n Clone the Zipkin project into a directory.  ZIPKIN_VERSION=\u0026lt;the Zipkin version you want to bump to\u0026gt; git clone https://github.com/openzipkin/zipkin \u0026amp;\u0026amp; cd zipkin git checkout $ZIPKIN_VERSION cd zipkin-lens  Create .licenserc.yaml with the following content.  cat \u0026gt; .licenserc.yaml \u0026lt;\u0026lt; EOF header: license: spdx-id: Apache-2.0 copyright-owner: Apache Software Foundation dependency: files: - package.json licenses: - name: cli-table version: 0.3.1 license: MIT - name: domutils version: 1.5.1 license: BSD-2-Clause - name: rework version: 1.0.1 license: MIT EOF  Create license template LICENSE.tpl with the following content.  {{ range .Groups }} ======================================================================== {{ .LicenseID }} licenses ======================================================================== The following components are provided under the {{ .LicenseID }} License. See project link for details. {{- if eq .LicenseID \u0026quot;Apache-2.0\u0026quot; }} The text of each license is the standard Apache 2.0 license. {{- else }} The text of each license is also included in licenses/LICENSE-[project].txt. {{ end }} {{- range .Deps }} https://npmjs.com/package/{{ .Name }}/v/{{ .Version }} {{ .Version }} {{ .LicenseID }} {{- end }} {{ end }}  Make sure you\u0026rsquo;re using the supported NodeJS version and NPM version.  node -v # should be v14.x.x npm -v # should be 6.x.x  Run the following command to generate the license file.  license-eye dependency resolve --summary LICENSE.tpl  Copy the generated file LICENSE to replace the zipkin-LICENSE in SkyWalking repo.  Note: if there are dependencies that license-eye failed to identify the license, you should manually identify the license and add it to the step above in .licenserc.yaml.\n","excerpt":"How to bump up Zipkin Lens dependency Because SkyWalking embeds Zipkin Lens UI as a part of the …","ref":"/docs/main/latest/en/guides/how-to-bump-up-zipkin/","title":"How to bump up Zipkin Lens dependency"},{"body":"How to bump up Zipkin Lens dependency Because SkyWalking embeds Zipkin Lens UI as a part of the SkyWalking UI, and Zipkin Lens UI contains a lot of other front-end dependencies that we also distribute in SkyWalking binary tars, so we have to take care of the dependencies' licenses when we bump up the Zipkin Lens dependency.\nMake sure to do the following steps when you bump up the Zipkin Lens dependency:\n Clone the Zipkin project into a directory.  ZIPKIN_VERSION=\u0026lt;the Zipkin version you want to bump to\u0026gt; git clone https://github.com/openzipkin/zipkin \u0026amp;\u0026amp; cd zipkin git checkout $ZIPKIN_VERSION cd zipkin-lens  Create .licenserc.yaml with the following content.  cat \u0026gt; .licenserc.yaml \u0026lt;\u0026lt; EOF header: license: spdx-id: Apache-2.0 copyright-owner: Apache Software Foundation dependency: files: - package.json licenses: - name: cli-table version: 0.3.1 license: MIT - name: domutils version: 1.5.1 license: BSD-2-Clause - name: rework version: 1.0.1 license: MIT EOF  Create license template LICENSE.tpl with the following content.  {{ range .Groups }} ======================================================================== {{ .LicenseID }} licenses ======================================================================== The following components are provided under the {{ .LicenseID }} License. See project link for details. {{- if eq .LicenseID \u0026quot;Apache-2.0\u0026quot; }} The text of each license is the standard Apache 2.0 license. {{- else }} The text of each license is also included in licenses/LICENSE-[project].txt. {{ end }} {{- range .Deps }} https://npmjs.com/package/{{ .Name }}/v/{{ .Version }} {{ .Version }} {{ .LicenseID }} {{- end }} {{ end }}  Make sure you\u0026rsquo;re using the supported NodeJS version and NPM version.  node -v # should be v14.x.x npm -v # should be 6.x.x  Run the following command to generate the license file.  license-eye dependency resolve --summary LICENSE.tpl  Copy the generated file LICENSE to replace the zipkin-LICENSE in SkyWalking repo.  Note: if there are dependencies that license-eye failed to identify the license, you should manually identify the license and add it to the step above in .licenserc.yaml.\n","excerpt":"How to bump up Zipkin Lens dependency Because SkyWalking embeds Zipkin Lens UI as a part of the …","ref":"/docs/main/next/en/guides/how-to-bump-up-zipkin/","title":"How to bump up Zipkin Lens dependency"},{"body":"How to bump up Zipkin Lens dependency Because SkyWalking embeds Zipkin Lens UI as a part of the SkyWalking UI, and Zipkin Lens UI contains a lot of other front-end dependencies that we also distribute in SkyWalking binary tars, so we have to take care of the dependencies' licenses when we bump up the Zipkin Lens dependency.\nMake sure to do the following steps when you bump up the Zipkin Lens dependency:\n Clone the Zipkin project into a directory.  ZIPKIN_VERSION=\u0026lt;the Zipkin version you want to bump to\u0026gt; git clone https://github.com/openzipkin/zipkin \u0026amp;\u0026amp; cd zipkin git checkout $ZIPKIN_VERSION cd zipkin-lens  Create .licenserc.yaml with the following content.  cat \u0026gt; .licenserc.yaml \u0026lt;\u0026lt; EOF header: license: spdx-id: Apache-2.0 copyright-owner: Apache Software Foundation dependency: files: - package.json licenses: - name: cli-table version: 0.3.1 license: MIT - name: domutils version: 1.5.1 license: BSD-2-Clause - name: rework version: 1.0.1 license: MIT EOF  Create license template LICENSE.tpl with the following content.  {{ range .Groups }} ======================================================================== {{ .LicenseID }} licenses ======================================================================== The following components are provided under the {{ .LicenseID }} License. See project link for details. {{- if eq .LicenseID \u0026quot;Apache-2.0\u0026quot; }} The text of each license is the standard Apache 2.0 license. {{- else }} The text of each license is also included in licenses/LICENSE-[project].txt. {{ end }} {{- range .Deps }} https://npmjs.com/package/{{ .Name }}/v/{{ .Version }} {{ .Version }} {{ .LicenseID }} {{- end }} {{ end }}  Make sure you\u0026rsquo;re using the supported NodeJS version and NPM version.  node -v # should be v14.x.x npm -v # should be 6.x.x  Run the following command to generate the license file.  license-eye dependency resolve --summary LICENSE.tpl  Copy the generated file LICENSE to replace the zipkin-LICENSE in SkyWalking repo.  Note: if there are dependencies that license-eye failed to identify the license, you should manually identify the license and add it to the step above in .licenserc.yaml.\n","excerpt":"How to bump up Zipkin Lens dependency Because SkyWalking embeds Zipkin Lens UI as a part of the …","ref":"/docs/main/v9.4.0/en/guides/how-to-bump-up-zipkin/","title":"How to bump up Zipkin Lens dependency"},{"body":"How to bump up Zipkin Lens dependency Because SkyWalking embeds Zipkin Lens UI as a part of the SkyWalking UI, and Zipkin Lens UI contains a lot of other front-end dependencies that we also distribute in SkyWalking binary tars, so we have to take care of the dependencies' licenses when we bump up the Zipkin Lens dependency.\nMake sure to do the following steps when you bump up the Zipkin Lens dependency:\n Clone the Zipkin project into a directory.  ZIPKIN_VERSION=\u0026lt;the Zipkin version you want to bump to\u0026gt; git clone https://github.com/openzipkin/zipkin \u0026amp;\u0026amp; cd zipkin git checkout $ZIPKIN_VERSION cd zipkin-lens  Create .licenserc.yaml with the following content.  cat \u0026gt; .licenserc.yaml \u0026lt;\u0026lt; EOF header: license: spdx-id: Apache-2.0 copyright-owner: Apache Software Foundation dependency: files: - package.json licenses: - name: cli-table version: 0.3.1 license: MIT - name: domutils version: 1.5.1 license: BSD-2-Clause - name: rework version: 1.0.1 license: MIT EOF  Create license template LICENSE.tpl with the following content.  {{ range .Groups }} ======================================================================== {{ .LicenseID }} licenses ======================================================================== The following components are provided under the {{ .LicenseID }} License. See project link for details. {{- if eq .LicenseID \u0026quot;Apache-2.0\u0026quot; }} The text of each license is the standard Apache 2.0 license. {{- else }} The text of each license is also included in licenses/LICENSE-[project].txt. {{ end }} {{- range .Deps }} https://npmjs.com/package/{{ .Name }}/v/{{ .Version }} {{ .Version }} {{ .LicenseID }} {{- end }} {{ end }}  Make sure you\u0026rsquo;re using the supported NodeJS version and NPM version.  node -v # should be v14.x.x npm -v # should be 6.x.x  Run the following command to generate the license file.  license-eye dependency resolve --summary LICENSE.tpl  Copy the generated file LICENSE to replace the zipkin-LICENSE in SkyWalking repo.  Note: if there are dependencies that license-eye failed to identify the license, you should manually identify the license and add it to the step above in .licenserc.yaml.\n","excerpt":"How to bump up Zipkin Lens dependency Because SkyWalking embeds Zipkin Lens UI as a part of the …","ref":"/docs/main/v9.5.0/en/guides/how-to-bump-up-zipkin/","title":"How to bump up Zipkin Lens dependency"},{"body":"How to bump up Zipkin Lens dependency Because SkyWalking embeds Zipkin Lens UI as a part of the SkyWalking UI, and Zipkin Lens UI contains a lot of other front-end dependencies that we also distribute in SkyWalking binary tars, so we have to take care of the dependencies' licenses when we bump up the Zipkin Lens dependency.\nMake sure to do the following steps when you bump up the Zipkin Lens dependency:\n Clone the Zipkin project into a directory.  ZIPKIN_VERSION=\u0026lt;the Zipkin version you want to bump to\u0026gt; git clone https://github.com/openzipkin/zipkin \u0026amp;\u0026amp; cd zipkin git checkout $ZIPKIN_VERSION cd zipkin-lens  Create .licenserc.yaml with the following content.  cat \u0026gt; .licenserc.yaml \u0026lt;\u0026lt; EOF header: license: spdx-id: Apache-2.0 copyright-owner: Apache Software Foundation dependency: files: - package.json licenses: - name: cli-table version: 0.3.1 license: MIT - name: domutils version: 1.5.1 license: BSD-2-Clause - name: rework version: 1.0.1 license: MIT EOF  Create license template LICENSE.tpl with the following content.  {{ range .Groups }} ======================================================================== {{ .LicenseID }} licenses ======================================================================== The following components are provided under the {{ .LicenseID }} License. See project link for details. {{- if eq .LicenseID \u0026quot;Apache-2.0\u0026quot; }} The text of each license is the standard Apache 2.0 license. {{- else }} The text of each license is also included in licenses/LICENSE-[project].txt. {{ end }} {{- range .Deps }} https://npmjs.com/package/{{ .Name }}/v/{{ .Version }} {{ .Version }} {{ .LicenseID }} {{- end }} {{ end }}  Make sure you\u0026rsquo;re using the supported NodeJS version and NPM version.  node -v # should be v14.x.x npm -v # should be 6.x.x  Run the following command to generate the license file.  license-eye dependency resolve --summary LICENSE.tpl  Copy the generated file LICENSE to replace the zipkin-LICENSE in SkyWalking repo.  Note: if there are dependencies that license-eye failed to identify the license, you should manually identify the license and add it to the step above in .licenserc.yaml.\n","excerpt":"How to bump up Zipkin Lens dependency Because SkyWalking embeds Zipkin Lens UI as a part of the …","ref":"/docs/main/v9.6.0/en/guides/how-to-bump-up-zipkin/","title":"How to bump up Zipkin Lens dependency"},{"body":"How to bump up Zipkin Lens dependency Because SkyWalking embeds Zipkin Lens UI as a part of the SkyWalking UI, and Zipkin Lens UI contains a lot of other front-end dependencies that we also distribute in SkyWalking binary tars, so we have to take care of the dependencies' licenses when we bump up the Zipkin Lens dependency.\nMake sure to do the following steps when you bump up the Zipkin Lens dependency:\n Clone the Zipkin project into a directory.  ZIPKIN_VERSION=\u0026lt;the Zipkin version you want to bump to\u0026gt; git clone https://github.com/openzipkin/zipkin \u0026amp;\u0026amp; cd zipkin git checkout $ZIPKIN_VERSION cd zipkin-lens  Create .licenserc.yaml with the following content.  cat \u0026gt; .licenserc.yaml \u0026lt;\u0026lt; EOF header: license: spdx-id: Apache-2.0 copyright-owner: Apache Software Foundation dependency: files: - package.json licenses: - name: cli-table version: 0.3.1 license: MIT - name: domutils version: 1.5.1 license: BSD-2-Clause - name: rework version: 1.0.1 license: MIT EOF  Create license template LICENSE.tpl with the following content.  {{ range .Groups }} ======================================================================== {{ .LicenseID }} licenses ======================================================================== The following components are provided under the {{ .LicenseID }} License. See project link for details. {{- if eq .LicenseID \u0026quot;Apache-2.0\u0026quot; }} The text of each license is the standard Apache 2.0 license. {{- else }} The text of each license is also included in licenses/LICENSE-[project].txt. {{ end }} {{- range .Deps }} https://npmjs.com/package/{{ .Name }}/v/{{ .Version }} {{ .Version }} {{ .LicenseID }} {{- end }} {{ end }}  Make sure you\u0026rsquo;re using the supported NodeJS version and NPM version.  node -v # should be v14.x.x npm -v # should be 6.x.x  Run the following command to generate the license file.  license-eye dependency resolve --summary LICENSE.tpl  Copy the generated file LICENSE to replace the zipkin-LICENSE in SkyWalking repo.  Note: if there are dependencies that license-eye failed to identify the license, you should manually identify the license and add it to the step above in .licenserc.yaml.\n","excerpt":"How to bump up Zipkin Lens dependency Because SkyWalking embeds Zipkin Lens UI as a part of the …","ref":"/docs/main/v9.7.0/en/guides/how-to-bump-up-zipkin/","title":"How to bump up Zipkin Lens dependency"},{"body":"How to disable some plugins? You can find the plugin name in the list and disable one or more plugins by following methods.\nfrom skywalking import config config.agent_disable_plugins = [\u0026#39;sw_http_server\u0026#39;, \u0026#39;sw_urllib_request\u0026#39;] # can be also CSV format, i.e. \u0026#39;sw_http_server,sw_urllib_request\u0026#39; You can also disable the plugins via environment variables SW_AGENT_DISABLE_PLUGINS, please check the Environment Variables List for an explanation.\n","excerpt":"How to disable some plugins? You can find the plugin name in the list and disable one or more …","ref":"/docs/skywalking-python/latest/en/setup/faq/how-to-disable-plugin/","title":"How to disable some plugins?"},{"body":"How to disable some plugins? You can find the plugin name in the list and disable one or more plugins by following methods.\nfrom skywalking import config config.agent_disable_plugins = [\u0026#39;sw_http_server\u0026#39;, \u0026#39;sw_urllib_request\u0026#39;] # can be also CSV format, i.e. \u0026#39;sw_http_server,sw_urllib_request\u0026#39; You can also disable the plugins via environment variables SW_AGENT_DISABLE_PLUGINS, please check the Environment Variables List for an explanation.\n","excerpt":"How to disable some plugins? You can find the plugin name in the list and disable one or more …","ref":"/docs/skywalking-python/next/en/setup/faq/how-to-disable-plugin/","title":"How to disable some plugins?"},{"body":"How to disable some plugins? You can find the plugin name in the list and disable one or more plugins by following methods.\nfrom skywalking import config config.agent_disable_plugins = [\u0026#39;sw_http_server\u0026#39;, \u0026#39;sw_urllib_request\u0026#39;] # can be also CSV format, i.e. \u0026#39;sw_http_server,sw_urllib_request\u0026#39; You can also disable the plugins via environment variables SW_AGENT_DISABLE_PLUGINS, please check the Environment Variables List for an explanation.\n","excerpt":"How to disable some plugins? You can find the plugin name in the list and disable one or more …","ref":"/docs/skywalking-python/v1.0.1/en/setup/faq/how-to-disable-plugin/","title":"How to disable some plugins?"},{"body":"How to make SkyWalking agent works in OSGI environment? OSGI implements its own set of modularity, which means that each Bundle has its own unique class loader for isolating different versions of classes. By default, OSGI runtime uses the boot classloader for the bundle codes, which makes the java.lang.NoClassDefFoundError exception in the booting stage.\njava.lang.NoClassDefFoundError: org/apache/skywalking/apm/agent/core/plugin/interceptor/enhance/EnhancedInstance at ch.qos.logback.classic.Logger.buildLoggingEventAndAppend(Logger.java:419) at ch.qos.logback.classic.Logger.filterAndLog_0_Or3Plus(Logger.java:383) at ch.qos.logback.classic.Logger.log(Logger.java:765) at org.apache.commons.logging.impl.SLF4JLocationAwareLog.error(SLF4JLocationAwareLog.java:216) at org.springframework.boot.SpringApplication.reportFailure(SpringApplication.java:771) at org.springframework.boot.SpringApplication.handleRunFailure(SpringApplication.java:748) at org.springframework.boot.SpringApplication.run(SpringApplication.java:314) at org.springframework.boot.SpringApplication.run(SpringApplication.java:1118) at org.springframework.boot.SpringApplication.run(SpringApplication.java:1107) at by.kolodyuk.osgi.springboot.SpringBootBundleActivator.start(SpringBootBundleActivator.java:21) at org.apache.felix.framework.util.SecureAction.startActivator(SecureAction.java:849) at org.apache.felix.framework.Felix.activateBundle(Felix.java:2429) at org.apache.felix.framework.Felix.startBundle(Felix.java:2335) at org.apache.felix.framework.Felix.setActiveStartLevel(Felix.java:1566) at org.apache.felix.framework.FrameworkStartLevelImpl.run(FrameworkStartLevelImpl.java:297) at java.base/java.lang.Thread.run(Thread.java:829) How to resolve this issue?  we need to set the parent classloader in OSGI to AppClassLoader, through the specific parameter org.osgi.framework.bundle.parent=app. The list of parameters can be found in the OSGI API Load the SkyWalking related classes to the bundle parent class loader, AppClassLoader, with the parameter org.osgi.framework.bootdelegation=org.apache.skywalking.apm.* or org.osgi.framework.bootdelegation=*. This step is optional. Some OSGi implementations (i.e. Equinox) enable them by default  ","excerpt":"How to make SkyWalking agent works in OSGI environment? OSGI implements its own set of modularity, …","ref":"/docs/skywalking-java/latest/en/faq/osgi/","title":"How to make SkyWalking agent works in `OSGI` environment?"},{"body":"How to make SkyWalking agent works in OSGI environment? OSGI implements its own set of modularity, which means that each Bundle has its own unique class loader for isolating different versions of classes. By default, OSGI runtime uses the boot classloader for the bundle codes, which makes the java.lang.NoClassDefFoundError exception in the booting stage.\njava.lang.NoClassDefFoundError: org/apache/skywalking/apm/agent/core/plugin/interceptor/enhance/EnhancedInstance at ch.qos.logback.classic.Logger.buildLoggingEventAndAppend(Logger.java:419) at ch.qos.logback.classic.Logger.filterAndLog_0_Or3Plus(Logger.java:383) at ch.qos.logback.classic.Logger.log(Logger.java:765) at org.apache.commons.logging.impl.SLF4JLocationAwareLog.error(SLF4JLocationAwareLog.java:216) at org.springframework.boot.SpringApplication.reportFailure(SpringApplication.java:771) at org.springframework.boot.SpringApplication.handleRunFailure(SpringApplication.java:748) at org.springframework.boot.SpringApplication.run(SpringApplication.java:314) at org.springframework.boot.SpringApplication.run(SpringApplication.java:1118) at org.springframework.boot.SpringApplication.run(SpringApplication.java:1107) at by.kolodyuk.osgi.springboot.SpringBootBundleActivator.start(SpringBootBundleActivator.java:21) at org.apache.felix.framework.util.SecureAction.startActivator(SecureAction.java:849) at org.apache.felix.framework.Felix.activateBundle(Felix.java:2429) at org.apache.felix.framework.Felix.startBundle(Felix.java:2335) at org.apache.felix.framework.Felix.setActiveStartLevel(Felix.java:1566) at org.apache.felix.framework.FrameworkStartLevelImpl.run(FrameworkStartLevelImpl.java:297) at java.base/java.lang.Thread.run(Thread.java:829) How to resolve this issue?  we need to set the parent classloader in OSGI to AppClassLoader, through the specific parameter org.osgi.framework.bundle.parent=app. The list of parameters can be found in the OSGI API Load the SkyWalking related classes to the bundle parent class loader, AppClassLoader, with the parameter org.osgi.framework.bootdelegation=org.apache.skywalking.apm.* or org.osgi.framework.bootdelegation=*. This step is optional. Some OSGi implementations (i.e. Equinox) enable them by default  ","excerpt":"How to make SkyWalking agent works in OSGI environment? OSGI implements its own set of modularity, …","ref":"/docs/skywalking-java/next/en/faq/osgi/","title":"How to make SkyWalking agent works in `OSGI` environment?"},{"body":"How to make SkyWalking agent works in OSGI environment? OSGI implements its own set of modularity, which means that each Bundle has its own unique class loader for isolating different versions of classes. By default, OSGI runtime uses the boot classloader for the bundle codes, which makes the java.lang.NoClassDefFoundError exception in the booting stage.\njava.lang.NoClassDefFoundError: org/apache/skywalking/apm/agent/core/plugin/interceptor/enhance/EnhancedInstance at ch.qos.logback.classic.Logger.buildLoggingEventAndAppend(Logger.java:419) at ch.qos.logback.classic.Logger.filterAndLog_0_Or3Plus(Logger.java:383) at ch.qos.logback.classic.Logger.log(Logger.java:765) at org.apache.commons.logging.impl.SLF4JLocationAwareLog.error(SLF4JLocationAwareLog.java:216) at org.springframework.boot.SpringApplication.reportFailure(SpringApplication.java:771) at org.springframework.boot.SpringApplication.handleRunFailure(SpringApplication.java:748) at org.springframework.boot.SpringApplication.run(SpringApplication.java:314) at org.springframework.boot.SpringApplication.run(SpringApplication.java:1118) at org.springframework.boot.SpringApplication.run(SpringApplication.java:1107) at by.kolodyuk.osgi.springboot.SpringBootBundleActivator.start(SpringBootBundleActivator.java:21) at org.apache.felix.framework.util.SecureAction.startActivator(SecureAction.java:849) at org.apache.felix.framework.Felix.activateBundle(Felix.java:2429) at org.apache.felix.framework.Felix.startBundle(Felix.java:2335) at org.apache.felix.framework.Felix.setActiveStartLevel(Felix.java:1566) at org.apache.felix.framework.FrameworkStartLevelImpl.run(FrameworkStartLevelImpl.java:297) at java.base/java.lang.Thread.run(Thread.java:829) How to resolve this issue?  we need to set the parent classloader in OSGI to AppClassLoader, through the specific parameter org.osgi.framework.bundle.parent=app. The list of parameters can be found in the OSGI API Load the SkyWalking related classes to the bundle parent class loader, AppClassLoader, with the parameter org.osgi.framework.bootdelegation=org.apache.skywalking.apm.* or org.osgi.framework.bootdelegation=*. This step is optional. Some OSGi implementations (i.e. Equinox) enable them by default  ","excerpt":"How to make SkyWalking agent works in OSGI environment? OSGI implements its own set of modularity, …","ref":"/docs/skywalking-java/v9.0.0/en/faq/osgi/","title":"How to make SkyWalking agent works in `OSGI` environment?"},{"body":"How to make SkyWalking agent works in OSGI environment? OSGI implements its own set of modularity, which means that each Bundle has its own unique class loader for isolating different versions of classes. By default, OSGI runtime uses the boot classloader for the bundle codes, which makes the java.lang.NoClassDefFoundError exception in the booting stage.\njava.lang.NoClassDefFoundError: org/apache/skywalking/apm/agent/core/plugin/interceptor/enhance/EnhancedInstance at ch.qos.logback.classic.Logger.buildLoggingEventAndAppend(Logger.java:419) at ch.qos.logback.classic.Logger.filterAndLog_0_Or3Plus(Logger.java:383) at ch.qos.logback.classic.Logger.log(Logger.java:765) at org.apache.commons.logging.impl.SLF4JLocationAwareLog.error(SLF4JLocationAwareLog.java:216) at org.springframework.boot.SpringApplication.reportFailure(SpringApplication.java:771) at org.springframework.boot.SpringApplication.handleRunFailure(SpringApplication.java:748) at org.springframework.boot.SpringApplication.run(SpringApplication.java:314) at org.springframework.boot.SpringApplication.run(SpringApplication.java:1118) at org.springframework.boot.SpringApplication.run(SpringApplication.java:1107) at by.kolodyuk.osgi.springboot.SpringBootBundleActivator.start(SpringBootBundleActivator.java:21) at org.apache.felix.framework.util.SecureAction.startActivator(SecureAction.java:849) at org.apache.felix.framework.Felix.activateBundle(Felix.java:2429) at org.apache.felix.framework.Felix.startBundle(Felix.java:2335) at org.apache.felix.framework.Felix.setActiveStartLevel(Felix.java:1566) at org.apache.felix.framework.FrameworkStartLevelImpl.run(FrameworkStartLevelImpl.java:297) at java.base/java.lang.Thread.run(Thread.java:829) How to resolve this issue?  we need to set the parent classloader in OSGI to AppClassLoader, through the specific parameter org.osgi.framework.bundle.parent=app. The list of parameters can be found in the OSGI API Load the SkyWalking related classes to the bundle parent class loader, AppClassLoader, with the parameter org.osgi.framework.bootdelegation=org.apache.skywalking.apm.* or org.osgi.framework.bootdelegation=*. This step is optional. Some OSGi implementations (i.e. Equinox) enable them by default  ","excerpt":"How to make SkyWalking agent works in OSGI environment? OSGI implements its own set of modularity, …","ref":"/docs/skywalking-java/v9.1.0/en/faq/osgi/","title":"How to make SkyWalking agent works in `OSGI` environment?"},{"body":"How to make SkyWalking agent works in OSGI environment? OSGI implements its own set of modularity, which means that each Bundle has its own unique class loader for isolating different versions of classes. By default, OSGI runtime uses the boot classloader for the bundle codes, which makes the java.lang.NoClassDefFoundError exception in the booting stage.\njava.lang.NoClassDefFoundError: org/apache/skywalking/apm/agent/core/plugin/interceptor/enhance/EnhancedInstance at ch.qos.logback.classic.Logger.buildLoggingEventAndAppend(Logger.java:419) at ch.qos.logback.classic.Logger.filterAndLog_0_Or3Plus(Logger.java:383) at ch.qos.logback.classic.Logger.log(Logger.java:765) at org.apache.commons.logging.impl.SLF4JLocationAwareLog.error(SLF4JLocationAwareLog.java:216) at org.springframework.boot.SpringApplication.reportFailure(SpringApplication.java:771) at org.springframework.boot.SpringApplication.handleRunFailure(SpringApplication.java:748) at org.springframework.boot.SpringApplication.run(SpringApplication.java:314) at org.springframework.boot.SpringApplication.run(SpringApplication.java:1118) at org.springframework.boot.SpringApplication.run(SpringApplication.java:1107) at by.kolodyuk.osgi.springboot.SpringBootBundleActivator.start(SpringBootBundleActivator.java:21) at org.apache.felix.framework.util.SecureAction.startActivator(SecureAction.java:849) at org.apache.felix.framework.Felix.activateBundle(Felix.java:2429) at org.apache.felix.framework.Felix.startBundle(Felix.java:2335) at org.apache.felix.framework.Felix.setActiveStartLevel(Felix.java:1566) at org.apache.felix.framework.FrameworkStartLevelImpl.run(FrameworkStartLevelImpl.java:297) at java.base/java.lang.Thread.run(Thread.java:829) How to resolve this issue?  we need to set the parent classloader in OSGI to AppClassLoader, through the specific parameter org.osgi.framework.bundle.parent=app. The list of parameters can be found in the OSGI API Load the SkyWalking related classes to the bundle parent class loader, AppClassLoader, with the parameter org.osgi.framework.bootdelegation=org.apache.skywalking.apm.* or org.osgi.framework.bootdelegation=*. This step is optional. Some OSGi implementations (i.e. Equinox) enable them by default  ","excerpt":"How to make SkyWalking agent works in OSGI environment? OSGI implements its own set of modularity, …","ref":"/docs/skywalking-java/v9.2.0/en/faq/osgi/","title":"How to make SkyWalking agent works in `OSGI` environment?"},{"body":"How to test locally? This guide assumes you just cloned the repo and are ready to make some changes.\nAfter cloning the repo, make sure you also have cloned the submodule for protocol. Otherwise, run the command below.\ngit submodule update --init Please first refer to the Developer Guide to set up a development environment.\nTL;DR: run make env. This will create virtual environments for python and generate the protocol folder needed for the agent.\nNote: Make sure you have python3 aliased to python available on Windows computers instead of pointing to the Microsoft app store.\nBy now, you can do what you want. Let\u0026rsquo;s get to the topic of how to test.\nThe test process requires docker and docker-compose throughout. If you haven\u0026rsquo;t installed them, please install them first.\nThen run make test, which will generate a list of plugin versions based on the support_matrix variable in each Plugin and orchestrate the tests automatically. Remember to inspect the outcomes carefully to debug your plugin.\nAlternatively, you can run full tests via our GitHub action workflow on your own GitHub fork, it is usually easier since local environment can be tricky to setup for new contributors.\nTo do so, you need to fork this repo on GitHub and enable GitHub actions on your forked repo. Then, you can simply push your changes and open a Pull Request to the fork\u0026rsquo;s master branch.\nNote: GitHub automatically targets Pull Requests to the upstream repo, be careful when you open them to avoid accidental PRs to upstream.\n","excerpt":"How to test locally? This guide assumes you just cloned the repo and are ready to make some changes. …","ref":"/docs/skywalking-python/latest/en/contribution/how-to-test-locally/","title":"How to test locally?"},{"body":"How to test locally? This guide assumes you just cloned the repo and are ready to make some changes.\nAfter cloning the repo, make sure you also have cloned the submodule for protocol. Otherwise, run the command below.\ngit submodule update --init Please first refer to the Developer Guide to set up a development environment.\nTL;DR: run make env. This will create virtual environments for python and generate the protocol folder needed for the agent.\nNote: Make sure you have python3 aliased to python available on Windows computers instead of pointing to the Microsoft app store.\nBy now, you can do what you want. Let\u0026rsquo;s get to the topic of how to test.\nThe test process requires docker and docker-compose throughout. If you haven\u0026rsquo;t installed them, please install them first.\nThen run make test, which will generate a list of plugin versions based on the support_matrix variable in each Plugin and orchestrate the tests automatically. Remember to inspect the outcomes carefully to debug your plugin.\nAlternatively, you can run full tests via our GitHub action workflow on your own GitHub fork, it is usually easier since local environment can be tricky to setup for new contributors.\nTo do so, you need to fork this repo on GitHub and enable GitHub actions on your forked repo. Then, you can simply push your changes and open a Pull Request to the fork\u0026rsquo;s master branch.\nNote: GitHub automatically targets Pull Requests to the upstream repo, be careful when you open them to avoid accidental PRs to upstream.\n","excerpt":"How to test locally? This guide assumes you just cloned the repo and are ready to make some changes. …","ref":"/docs/skywalking-python/next/en/contribution/how-to-test-locally/","title":"How to test locally?"},{"body":"How to test locally? This guide assumes you just cloned the repo and are ready to make some changes.\nAfter cloning the repo, make sure you also have cloned the submodule for protocol. Otherwise, run the command below.\ngit submodule update --init Please first refer to the Developer Guide to set up a development environment.\nTL;DR: run make env. This will create virtual environments for python and generate the protocol folder needed for the agent.\nNote: Make sure you have python3 aliased to python available on Windows computers instead of pointing to the Microsoft app store.\nBy now, you can do what you want. Let\u0026rsquo;s get to the topic of how to test.\nThe test process requires docker and docker-compose throughout. If you haven\u0026rsquo;t installed them, please install them first.\nThen run make test, which will generate a list of plugin versions based on the support_matrix variable in each Plugin and orchestrate the tests automatically. Remember to inspect the outcomes carefully to debug your plugin.\nAlternatively, you can run full tests via our GitHub action workflow on your own GitHub fork, it is usually easier since local environment can be tricky to setup for new contributors.\nTo do so, you need to fork this repo on GitHub and enable GitHub actions on your forked repo. Then, you can simply push your changes and open a Pull Request to the fork\u0026rsquo;s master branch.\nNote: GitHub automatically targets Pull Requests to the upstream repo, be careful when you open them to avoid accidental PRs to upstream.\n","excerpt":"How to test locally? This guide assumes you just cloned the repo and are ready to make some changes. …","ref":"/docs/skywalking-python/v1.0.1/en/contribution/how-to-test-locally/","title":"How to test locally?"},{"body":"How to tolerate custom exceptions In some codes, the exception is being used as a way of controlling business flow. Skywalking provides 2 ways to tolerate an exception which is traced in a span.\n Set the names of exception classes in the agent config Use our annotation in the codes.  Set the names of exception classes in the agent config The property named \u0026ldquo;statuscheck.ignored_exceptions\u0026rdquo; is used to set up class names in the agent configuration file. if the exception listed here are detected in the agent, the agent core would flag the related span as the error status.\nDemo   A custom exception.\n TestNamedMatchException  package org.apache.skywalking.apm.agent.core.context.status; public class TestNamedMatchException extends RuntimeException { public TestNamedMatchException() { } public TestNamedMatchException(final String message) { super(message); } ... }  TestHierarchyMatchException  package org.apache.skywalking.apm.agent.core.context.status; public class TestHierarchyMatchException extends TestNamedMatchException { public TestHierarchyMatchException() { } public TestHierarchyMatchException(final String message) { super(message); } ... }   When the above exceptions traced in some spans, the status is like the following.\n   The traced exception Final span status     org.apache.skywalking.apm.agent.core.context.status.TestNamedMatchException true   org.apache.skywalking.apm.agent.core.context.status.TestHierarchyMatchException true      After set these class names through \u0026ldquo;statuscheck.ignored_exceptions\u0026rdquo;, the status of spans would be changed.\nstatuscheck.ignored_exceptions=org.apache.skywalking.apm.agent.core.context.status.TestNamedMatchException    The traced exception Final span status     org.apache.skywalking.apm.agent.core.context.status.TestNamedMatchException false   org.apache.skywalking.apm.agent.core.context.status.TestHierarchyMatchException false      Use our annotation in the codes. If an exception has the @IgnoredException annotation, the exception wouldn\u0026rsquo;t be marked as error status when tracing. Because the annotation supports inheritance, also affects the subclasses.\nDependency  Dependency the toolkit, such as using maven or gradle. Since 8.2.0.  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-trace\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Demo   A custom exception.\npackage org.apache.skywalking.apm.agent.core.context.status; public class TestAnnotatedException extends RuntimeException { public TestAnnotatedException() { } public TestAnnotatedException(final String message) { super(message); } ... }   When the above exception traced in some spans, the status is like the following.\n   The traced exception Final span status     org.apache.skywalking.apm.agent.core.context.status.TestAnnotatedException true      However, when the exception annotated with the annotation, the status would be changed.\npackage org.apache.skywalking.apm.agent.core.context.status; @IgnoredException public class TestAnnotatedException extends RuntimeException { public TestAnnotatedException() { } public TestAnnotatedException(final String message) { super(message); } ... }    The traced exception Final span status     org.apache.skywalking.apm.agent.core.context.status.TestAnnotatedException false      Recursive check Due to the wrapper nature of Java exceptions, sometimes users need recursive checking. Skywalking also supports it.\nstatuscheck.max_recursive_depth=${SW_STATUSCHECK_MAX_RECURSIVE_DEPTH:1} The following report shows the benchmark results of the exception checks with different recursive depths,\n# JMH version: 1.33 # VM version: JDK 1.8.0_292, OpenJDK 64-Bit Server VM, 25.292-b10 # VM invoker: /Library/Java/JavaVirtualMachines/adoptopenjdk-8.jdk/Contents/Home/jre/bin/java # VM options: -javaagent:/Applications/IntelliJ IDEA.app/Contents/lib/idea_rt.jar=54972:/Applications/IntelliJ IDEA.app/Contents/bin -Dfile.encoding=UTF-8 # Blackhole mode: full + dont-inline hint (default, use -Djmh.blackhole.autoDetect=true to auto-detect) # Warmup: 5 iterations, 10 s each # Measurement: 5 iterations, 10 s each # Timeout: 10 min per iteration # Threads: 1 thread, will synchronize iterations # Benchmark mode: Average time, time/op Benchmark Mode Cnt Score Error Units HierarchyMatchExceptionBenchmark.depthOneBenchmark avgt 25 31.050 ± 0.731 ns/op HierarchyMatchExceptionBenchmark.depthTwoBenchmark avgt 25 64.918 ± 2.537 ns/op HierarchyMatchExceptionBenchmark.depthThreeBenchmark avgt 25 89.645 ± 2.556 ns/op According to the reported results above, the exception check time is nearly proportional to the recursive depth being set. For each single check, it costs about ten of nanoseconds (~30 nanoseconds in the report, but may vary according to different hardware and platforms).\nTypically, we don\u0026rsquo;t recommend setting this more than 10, which could cause a performance issue. Negative value and 0 would be ignored, which means all exceptions would make the span tagged in error status.\n","excerpt":"How to tolerate custom exceptions In some codes, the exception is being used as a way of controlling …","ref":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/how-to-tolerate-exceptions/","title":"How to tolerate custom exceptions"},{"body":"How to tolerate custom exceptions In some codes, the exception is being used as a way of controlling business flow. Skywalking provides 2 ways to tolerate an exception which is traced in a span.\n Set the names of exception classes in the agent config Use our annotation in the codes.  Set the names of exception classes in the agent config The property named \u0026ldquo;statuscheck.ignored_exceptions\u0026rdquo; is used to set up class names in the agent configuration file. if the exception listed here are detected in the agent, the agent core would flag the related span as the error status.\nDemo   A custom exception.\n TestNamedMatchException  package org.apache.skywalking.apm.agent.core.context.status; public class TestNamedMatchException extends RuntimeException { public TestNamedMatchException() { } public TestNamedMatchException(final String message) { super(message); } ... }  TestHierarchyMatchException  package org.apache.skywalking.apm.agent.core.context.status; public class TestHierarchyMatchException extends TestNamedMatchException { public TestHierarchyMatchException() { } public TestHierarchyMatchException(final String message) { super(message); } ... }   When the above exceptions traced in some spans, the status is like the following.\n   The traced exception Final span status     org.apache.skywalking.apm.agent.core.context.status.TestNamedMatchException true   org.apache.skywalking.apm.agent.core.context.status.TestHierarchyMatchException true      After set these class names through \u0026ldquo;statuscheck.ignored_exceptions\u0026rdquo;, the status of spans would be changed.\nstatuscheck.ignored_exceptions=org.apache.skywalking.apm.agent.core.context.status.TestNamedMatchException    The traced exception Final span status     org.apache.skywalking.apm.agent.core.context.status.TestNamedMatchException false   org.apache.skywalking.apm.agent.core.context.status.TestHierarchyMatchException false      Use our annotation in the codes. If an exception has the @IgnoredException annotation, the exception wouldn\u0026rsquo;t be marked as error status when tracing. Because the annotation supports inheritance, also affects the subclasses.\nDependency  Dependency the toolkit, such as using maven or gradle. Since 8.2.0.  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-trace\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Demo   A custom exception.\npackage org.apache.skywalking.apm.agent.core.context.status; public class TestAnnotatedException extends RuntimeException { public TestAnnotatedException() { } public TestAnnotatedException(final String message) { super(message); } ... }   When the above exception traced in some spans, the status is like the following.\n   The traced exception Final span status     org.apache.skywalking.apm.agent.core.context.status.TestAnnotatedException true      However, when the exception annotated with the annotation, the status would be changed.\npackage org.apache.skywalking.apm.agent.core.context.status; @IgnoredException public class TestAnnotatedException extends RuntimeException { public TestAnnotatedException() { } public TestAnnotatedException(final String message) { super(message); } ... }    The traced exception Final span status     org.apache.skywalking.apm.agent.core.context.status.TestAnnotatedException false      Recursive check Due to the wrapper nature of Java exceptions, sometimes users need recursive checking. Skywalking also supports it.\nstatuscheck.max_recursive_depth=${SW_STATUSCHECK_MAX_RECURSIVE_DEPTH:1} The following report shows the benchmark results of the exception checks with different recursive depths,\n# JMH version: 1.33 # VM version: JDK 1.8.0_292, OpenJDK 64-Bit Server VM, 25.292-b10 # VM invoker: /Library/Java/JavaVirtualMachines/adoptopenjdk-8.jdk/Contents/Home/jre/bin/java # VM options: -javaagent:/Applications/IntelliJ IDEA.app/Contents/lib/idea_rt.jar=54972:/Applications/IntelliJ IDEA.app/Contents/bin -Dfile.encoding=UTF-8 # Blackhole mode: full + dont-inline hint (default, use -Djmh.blackhole.autoDetect=true to auto-detect) # Warmup: 5 iterations, 10 s each # Measurement: 5 iterations, 10 s each # Timeout: 10 min per iteration # Threads: 1 thread, will synchronize iterations # Benchmark mode: Average time, time/op Benchmark Mode Cnt Score Error Units HierarchyMatchExceptionBenchmark.depthOneBenchmark avgt 25 31.050 ± 0.731 ns/op HierarchyMatchExceptionBenchmark.depthTwoBenchmark avgt 25 64.918 ± 2.537 ns/op HierarchyMatchExceptionBenchmark.depthThreeBenchmark avgt 25 89.645 ± 2.556 ns/op According to the reported results above, the exception check time is nearly proportional to the recursive depth being set. For each single check, it costs about ten of nanoseconds (~30 nanoseconds in the report, but may vary according to different hardware and platforms).\nTypically, we don\u0026rsquo;t recommend setting this more than 10, which could cause a performance issue. Negative value and 0 would be ignored, which means all exceptions would make the span tagged in error status.\n","excerpt":"How to tolerate custom exceptions In some codes, the exception is being used as a way of controlling …","ref":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/how-to-tolerate-exceptions/","title":"How to tolerate custom exceptions"},{"body":"How to tolerate custom exceptions In some codes, the exception is being used as a way of controlling business flow. Skywalking provides 2 ways to tolerate an exception which is traced in a span.\n Set the names of exception classes in the agent config Use our annotation in the codes.  Set the names of exception classes in the agent config The property named \u0026ldquo;statuscheck.ignored_exceptions\u0026rdquo; is used to set up class names in the agent configuration file. if the exception listed here are detected in the agent, the agent core would flag the related span as the error status.\nDemo   A custom exception.\n TestNamedMatchException  package org.apache.skywalking.apm.agent.core.context.status; public class TestNamedMatchException extends RuntimeException { public TestNamedMatchException() { } public TestNamedMatchException(final String message) { super(message); } ... }  TestHierarchyMatchException  package org.apache.skywalking.apm.agent.core.context.status; public class TestHierarchyMatchException extends TestNamedMatchException { public TestHierarchyMatchException() { } public TestHierarchyMatchException(final String message) { super(message); } ... }   When the above exceptions traced in some spans, the status is like the following.\n   The traced exception Final span status     org.apache.skywalking.apm.agent.core.context.status.TestNamedMatchException true   org.apache.skywalking.apm.agent.core.context.status.TestHierarchyMatchException true      After set these class names through \u0026ldquo;statuscheck.ignored_exceptions\u0026rdquo;, the status of spans would be changed.\nstatuscheck.ignored_exceptions=org.apache.skywalking.apm.agent.core.context.status.TestNamedMatchException    The traced exception Final span status     org.apache.skywalking.apm.agent.core.context.status.TestNamedMatchException false   org.apache.skywalking.apm.agent.core.context.status.TestHierarchyMatchException false      Use our annotation in the codes. If an exception has the @IgnoredException annotation, the exception wouldn\u0026rsquo;t be marked as error status when tracing. Because the annotation supports inheritance, also affects the subclasses.\nDependency  Dependency the toolkit, such as using maven or gradle. Since 8.2.0.  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-trace\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Demo   A custom exception.\npackage org.apache.skywalking.apm.agent.core.context.status; public class TestAnnotatedException extends RuntimeException { public TestAnnotatedException() { } public TestAnnotatedException(final String message) { super(message); } ... }   When the above exception traced in some spans, the status is like the following.\n   The traced exception Final span status     org.apache.skywalking.apm.agent.core.context.status.TestAnnotatedException true      However, when the exception annotated with the annotation, the status would be changed.\npackage org.apache.skywalking.apm.agent.core.context.status; @IgnoredException public class TestAnnotatedException extends RuntimeException { public TestAnnotatedException() { } public TestAnnotatedException(final String message) { super(message); } ... }    The traced exception Final span status     org.apache.skywalking.apm.agent.core.context.status.TestAnnotatedException false      Recursive check Due to the wrapper nature of Java exceptions, sometimes users need recursive checking. Skywalking also supports it.\nstatuscheck.max_recursive_depth=${SW_STATUSCHECK_MAX_RECURSIVE_DEPTH:1} The following report shows the benchmark results of the exception checks with different recursive depths,\n# JMH version: 1.33 # VM version: JDK 1.8.0_292, OpenJDK 64-Bit Server VM, 25.292-b10 # VM invoker: /Library/Java/JavaVirtualMachines/adoptopenjdk-8.jdk/Contents/Home/jre/bin/java # VM options: -javaagent:/Applications/IntelliJ IDEA.app/Contents/lib/idea_rt.jar=54972:/Applications/IntelliJ IDEA.app/Contents/bin -Dfile.encoding=UTF-8 # Blackhole mode: full + dont-inline hint (default, use -Djmh.blackhole.autoDetect=true to auto-detect) # Warmup: 5 iterations, 10 s each # Measurement: 5 iterations, 10 s each # Timeout: 10 min per iteration # Threads: 1 thread, will synchronize iterations # Benchmark mode: Average time, time/op Benchmark Mode Cnt Score Error Units HierarchyMatchExceptionBenchmark.depthOneBenchmark avgt 25 31.050 ± 0.731 ns/op HierarchyMatchExceptionBenchmark.depthTwoBenchmark avgt 25 64.918 ± 2.537 ns/op HierarchyMatchExceptionBenchmark.depthThreeBenchmark avgt 25 89.645 ± 2.556 ns/op According to the reported results above, the exception check time is nearly proportional to the recursive depth being set. For each single check, it costs about ten of nanoseconds (~30 nanoseconds in the report, but may vary according to different hardware and platforms).\nTypically, we don\u0026rsquo;t recommend setting this more than 10, which could cause a performance issue. Negative value and 0 would be ignored, which means all exceptions would make the span tagged in error status.\n","excerpt":"How to tolerate custom exceptions In some codes, the exception is being used as a way of controlling …","ref":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/how-to-tolerate-exceptions/","title":"How to tolerate custom exceptions"},{"body":"How to tolerate custom exceptions In some codes, the exception is being used as a way of controlling business flow. Skywalking provides 2 ways to tolerate an exception which is traced in a span.\n Set the names of exception classes in the agent config Use our annotation in the codes.  Set the names of exception classes in the agent config The property named \u0026ldquo;statuscheck.ignored_exceptions\u0026rdquo; is used to set up class names in the agent configuration file. if the exception listed here are detected in the agent, the agent core would flag the related span as the error status.\nDemo   A custom exception.\n TestNamedMatchException  package org.apache.skywalking.apm.agent.core.context.status; public class TestNamedMatchException extends RuntimeException { public TestNamedMatchException() { } public TestNamedMatchException(final String message) { super(message); } ... }  TestHierarchyMatchException  package org.apache.skywalking.apm.agent.core.context.status; public class TestHierarchyMatchException extends TestNamedMatchException { public TestHierarchyMatchException() { } public TestHierarchyMatchException(final String message) { super(message); } ... }   When the above exceptions traced in some spans, the status is like the following.\n   The traced exception Final span status     org.apache.skywalking.apm.agent.core.context.status.TestNamedMatchException true   org.apache.skywalking.apm.agent.core.context.status.TestHierarchyMatchException true      After set these class names through \u0026ldquo;statuscheck.ignored_exceptions\u0026rdquo;, the status of spans would be changed.\nstatuscheck.ignored_exceptions=org.apache.skywalking.apm.agent.core.context.status.TestNamedMatchException    The traced exception Final span status     org.apache.skywalking.apm.agent.core.context.status.TestNamedMatchException false   org.apache.skywalking.apm.agent.core.context.status.TestHierarchyMatchException false      Use our annotation in the codes. If an exception has the @IgnoredException annotation, the exception wouldn\u0026rsquo;t be marked as error status when tracing. Because the annotation supports inheritance, also affects the subclasses.\nDependency  Dependency the toolkit, such as using maven or gradle. Since 8.2.0.  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-trace\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Demo   A custom exception.\npackage org.apache.skywalking.apm.agent.core.context.status; public class TestAnnotatedException extends RuntimeException { public TestAnnotatedException() { } public TestAnnotatedException(final String message) { super(message); } ... }   When the above exception traced in some spans, the status is like the following.\n   The traced exception Final span status     org.apache.skywalking.apm.agent.core.context.status.TestAnnotatedException true      However, when the exception annotated with the annotation, the status would be changed.\npackage org.apache.skywalking.apm.agent.core.context.status; @IgnoredException public class TestAnnotatedException extends RuntimeException { public TestAnnotatedException() { } public TestAnnotatedException(final String message) { super(message); } ... }    The traced exception Final span status     org.apache.skywalking.apm.agent.core.context.status.TestAnnotatedException false      Recursive check Due to the wrapper nature of Java exceptions, sometimes users need recursive checking. Skywalking also supports it.\nstatuscheck.max_recursive_depth=${SW_STATUSCHECK_MAX_RECURSIVE_DEPTH:1} The following report shows the benchmark results of the exception checks with different recursive depths,\n# JMH version: 1.33 # VM version: JDK 1.8.0_292, OpenJDK 64-Bit Server VM, 25.292-b10 # VM invoker: /Library/Java/JavaVirtualMachines/adoptopenjdk-8.jdk/Contents/Home/jre/bin/java # VM options: -javaagent:/Applications/IntelliJ IDEA.app/Contents/lib/idea_rt.jar=54972:/Applications/IntelliJ IDEA.app/Contents/bin -Dfile.encoding=UTF-8 # Blackhole mode: full + dont-inline hint (default, use -Djmh.blackhole.autoDetect=true to auto-detect) # Warmup: 5 iterations, 10 s each # Measurement: 5 iterations, 10 s each # Timeout: 10 min per iteration # Threads: 1 thread, will synchronize iterations # Benchmark mode: Average time, time/op Benchmark Mode Cnt Score Error Units HierarchyMatchExceptionBenchmark.depthOneBenchmark avgt 25 31.050 ± 0.731 ns/op HierarchyMatchExceptionBenchmark.depthTwoBenchmark avgt 25 64.918 ± 2.537 ns/op HierarchyMatchExceptionBenchmark.depthThreeBenchmark avgt 25 89.645 ± 2.556 ns/op According to the reported results above, the exception check time is nearly proportional to the recursive depth being set. For each single check, it costs about ten of nanoseconds (~30 nanoseconds in the report, but may vary according to different hardware and platforms).\nTypically, we don\u0026rsquo;t recommend setting this more than 10, which could cause a performance issue. Negative value and 0 would be ignored, which means all exceptions would make the span tagged in error status.\n","excerpt":"How to tolerate custom exceptions In some codes, the exception is being used as a way of controlling …","ref":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/how-to-tolerate-exceptions/","title":"How to tolerate custom exceptions"},{"body":"How to tolerate custom exceptions In some codes, the exception is being used as a way of controlling business flow. Skywalking provides 2 ways to tolerate an exception which is traced in a span.\n Set the names of exception classes in the agent config Use our annotation in the codes.  Set the names of exception classes in the agent config The property named \u0026ldquo;statuscheck.ignored_exceptions\u0026rdquo; is used to set up class names in the agent configuration file. if the exception listed here are detected in the agent, the agent core would flag the related span as the error status.\nDemo   A custom exception.\n TestNamedMatchException  package org.apache.skywalking.apm.agent.core.context.status; public class TestNamedMatchException extends RuntimeException { public TestNamedMatchException() { } public TestNamedMatchException(final String message) { super(message); } ... }  TestHierarchyMatchException  package org.apache.skywalking.apm.agent.core.context.status; public class TestHierarchyMatchException extends TestNamedMatchException { public TestHierarchyMatchException() { } public TestHierarchyMatchException(final String message) { super(message); } ... }   When the above exceptions traced in some spans, the status is like the following.\n   The traced exception Final span status     org.apache.skywalking.apm.agent.core.context.status.TestNamedMatchException true   org.apache.skywalking.apm.agent.core.context.status.TestHierarchyMatchException true      After set these class names through \u0026ldquo;statuscheck.ignored_exceptions\u0026rdquo;, the status of spans would be changed.\nstatuscheck.ignored_exceptions=org.apache.skywalking.apm.agent.core.context.status.TestNamedMatchException    The traced exception Final span status     org.apache.skywalking.apm.agent.core.context.status.TestNamedMatchException false   org.apache.skywalking.apm.agent.core.context.status.TestHierarchyMatchException false      Use our annotation in the codes. If an exception has the @IgnoredException annotation, the exception wouldn\u0026rsquo;t be marked as error status when tracing. Because the annotation supports inheritance, also affects the subclasses.\nDependency  Dependency the toolkit, such as using maven or gradle. Since 8.2.0.  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-trace\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Demo   A custom exception.\npackage org.apache.skywalking.apm.agent.core.context.status; public class TestAnnotatedException extends RuntimeException { public TestAnnotatedException() { } public TestAnnotatedException(final String message) { super(message); } ... }   When the above exception traced in some spans, the status is like the following.\n   The traced exception Final span status     org.apache.skywalking.apm.agent.core.context.status.TestAnnotatedException true      However, when the exception annotated with the annotation, the status would be changed.\npackage org.apache.skywalking.apm.agent.core.context.status; @IgnoredException public class TestAnnotatedException extends RuntimeException { public TestAnnotatedException() { } public TestAnnotatedException(final String message) { super(message); } ... }    The traced exception Final span status     org.apache.skywalking.apm.agent.core.context.status.TestAnnotatedException false      Recursive check Due to the wrapper nature of Java exceptions, sometimes users need recursive checking. Skywalking also supports it.\nstatuscheck.max_recursive_depth=${SW_STATUSCHECK_MAX_RECURSIVE_DEPTH:1} The following report shows the benchmark results of the exception checks with different recursive depths,\n# JMH version: 1.33 # VM version: JDK 1.8.0_292, OpenJDK 64-Bit Server VM, 25.292-b10 # VM invoker: /Library/Java/JavaVirtualMachines/adoptopenjdk-8.jdk/Contents/Home/jre/bin/java # VM options: -javaagent:/Applications/IntelliJ IDEA.app/Contents/lib/idea_rt.jar=54972:/Applications/IntelliJ IDEA.app/Contents/bin -Dfile.encoding=UTF-8 # Blackhole mode: full + dont-inline hint (default, use -Djmh.blackhole.autoDetect=true to auto-detect) # Warmup: 5 iterations, 10 s each # Measurement: 5 iterations, 10 s each # Timeout: 10 min per iteration # Threads: 1 thread, will synchronize iterations # Benchmark mode: Average time, time/op Benchmark Mode Cnt Score Error Units HierarchyMatchExceptionBenchmark.depthOneBenchmark avgt 25 31.050 ± 0.731 ns/op HierarchyMatchExceptionBenchmark.depthTwoBenchmark avgt 25 64.918 ± 2.537 ns/op HierarchyMatchExceptionBenchmark.depthThreeBenchmark avgt 25 89.645 ± 2.556 ns/op According to the reported results above, the exception check time is nearly proportional to the recursive depth being set. For each single check, it costs about ten of nanoseconds (~30 nanoseconds in the report, but may vary according to different hardware and platforms).\nTypically, we don\u0026rsquo;t recommend setting this more than 10, which could cause a performance issue. Negative value and 0 would be ignored, which means all exceptions would make the span tagged in error status.\n","excerpt":"How to tolerate custom exceptions In some codes, the exception is being used as a way of controlling …","ref":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/how-to-tolerate-exceptions/","title":"How to tolerate custom exceptions"},{"body":"How to use the Docker images Start a standalone container with H2 storage docker run --name oap --restart always -d apache/skywalking-oap-server:9.0.0 Start a standalone container with ElasticSearch 7 as storage whose address is elasticsearch:9200 docker run --name oap --restart always -d -e SW_STORAGE=elasticsearch -e SW_STORAGE_ES_CLUSTER_NODES=elasticsearch:9200 apache/skywalking-oap-server:9.0.0 Configuration We could set up environment variables to configure this image. They are defined in backend-setup.\nExtend image If you intend to override or add config files in /skywalking/config, /skywalking/ext-config is the location for you to put extra files. The files with the same name will be overridden; otherwise, they will be added to /skywalking/config.\nIf you want to add more libs/jars into the classpath of OAP, for example, new metrics for OAL. These jars can be mounted into /skywalking/ext-libs, then entrypoint bash will append them into the classpath. Notice, you can\u0026rsquo;t override an existing jar in classpath.\n","excerpt":"How to use the Docker images Start a standalone container with H2 storage docker run --name oap …","ref":"/docs/main/latest/en/setup/backend/backend-docker/","title":"How to use the Docker images"},{"body":"How to use the Docker images Start the storage, OAP and Booster UI with docker-compose As a quick start, you can use our one-liner script to start ElasticSearch or BanyanDB as the storage, OAP server and Booster UI, please make sure you have installed Docker.\nLinux, macOS, Windows (WSL)\nbash \u0026lt;(curl -sSL https://skywalking.apache.org/quickstart-docker.sh) Windows (Powershell)\nInvoke-Expression ([System.Text.Encoding]::UTF8.GetString((Invoke-WebRequest -Uri https://skywalking.apache.org/quickstart-docker.ps1 -UseBasicParsing).Content)) You will be prompted to choose the storage type, and then the script will start the backend cluster with the selected storage.\nTo tear down the cluster, run the following command:\ndocker compose --project-name=skywalking-quickstart down Start a standalone container with H2 storage docker run --name oap --restart always -d apache/skywalking-oap-server:9.7.0 Start a standalone container with BanyanDB as storage, whose address is banyandb:17912 docker run --name oap --restart always -d -e SW_STORAGE=banyandb -e SW_STORAGE_BANYANDB_TARGETS=banyandb:17912 apache/skywalking-oap-server:9.7.0 Start a standalone container with ElasticSearch 7 as storage, whose address is elasticsearch:9200 docker run --name oap --restart always -d -e SW_STORAGE=elasticsearch -e SW_STORAGE_ES_CLUSTER_NODES=elasticsearch:9200 apache/skywalking-oap-server:9.7.0 Configuration We could set up environment variables to configure this image. They are defined in backend-setup.\nExtend image If you intend to override or add config files in /skywalking/config, /skywalking/ext-config is the location for you to put extra files. The files with the same name will be overridden; otherwise, they will be added to /skywalking/config.\nIf you want to add more libs/jars into the classpath of OAP, for example, new metrics for OAL. These jars can be mounted into /skywalking/ext-libs, then entrypoint bash will append them into the classpath. Notice, you can\u0026rsquo;t override an existing jar in classpath.\n","excerpt":"How to use the Docker images Start the storage, OAP and Booster UI with docker-compose As a quick …","ref":"/docs/main/next/en/setup/backend/backend-docker/","title":"How to use the Docker images"},{"body":"How to use the Docker images Start a standlone container with H2 storage docker run --name oap --restart always -d apache/skywalking-oap-server:8.8.0 Start a standlone container with ElasticSearch 7 as storage whose address is elasticsearch:9200 docker run --name oap --restart always -d -e SW_STORAGE=elasticsearch -e SW_STORAGE_ES_CLUSTER_NODES=elasticsearch:9200 apache/skywalking-oap-server:8.8.0 Configuration We could set up environment variables to configure this image. They are defined in backend-setup.\nExtend image If you intend to override or add config files in /skywalking/config, /skywalking/ext-config is the location for you to put extra files. The files with the same name will be overridden, otherwise, they will be added in /skywalking/config.\nIf you want to add more libs/jars into the classpath of OAP, for example, new metrics for OAL. These jars can be mounted into /skywalking/ext-libs, then entrypoint bash will append them into the classpath. Notice, you can\u0026rsquo;t override an existing jar in classpath.\n","excerpt":"How to use the Docker images Start a standlone container with H2 storage docker run --name oap …","ref":"/docs/main/v9.0.0/en/setup/backend/backend-docker/","title":"How to use the Docker images"},{"body":"How to use the Docker images Start a standlone container with H2 storage docker run --name oap --restart always -d apache/skywalking-oap-server:9.0.0 Start a standlone container with ElasticSearch 7 as storage whose address is elasticsearch:9200 docker run --name oap --restart always -d -e SW_STORAGE=elasticsearch -e SW_STORAGE_ES_CLUSTER_NODES=elasticsearch:9200 apache/skywalking-oap-server:9.0.0 Configuration We could set up environment variables to configure this image. They are defined in backend-setup.\nExtend image If you intend to override or add config files in /skywalking/config, /skywalking/ext-config is the location for you to put extra files. The files with the same name will be overridden; otherwise, they will be added to /skywalking/config.\nIf you want to add more libs/jars into the classpath of OAP, for example, new metrics for OAL. These jars can be mounted into /skywalking/ext-libs, then entrypoint bash will append them into the classpath. Notice, you can\u0026rsquo;t override an existing jar in classpath.\n","excerpt":"How to use the Docker images Start a standlone container with H2 storage docker run --name oap …","ref":"/docs/main/v9.1.0/en/setup/backend/backend-docker/","title":"How to use the Docker images"},{"body":"How to use the Docker images Start a standalone container with H2 storage docker run --name oap --restart always -d apache/skywalking-oap-server:9.0.0 Start a standalone container with ElasticSearch 7 as storage whose address is elasticsearch:9200 docker run --name oap --restart always -d -e SW_STORAGE=elasticsearch -e SW_STORAGE_ES_CLUSTER_NODES=elasticsearch:9200 apache/skywalking-oap-server:9.0.0 Configuration We could set up environment variables to configure this image. They are defined in backend-setup.\nExtend image If you intend to override or add config files in /skywalking/config, /skywalking/ext-config is the location for you to put extra files. The files with the same name will be overridden; otherwise, they will be added to /skywalking/config.\nIf you want to add more libs/jars into the classpath of OAP, for example, new metrics for OAL. These jars can be mounted into /skywalking/ext-libs, then entrypoint bash will append them into the classpath. Notice, you can\u0026rsquo;t override an existing jar in classpath.\n","excerpt":"How to use the Docker images Start a standalone container with H2 storage docker run --name oap …","ref":"/docs/main/v9.2.0/en/setup/backend/backend-docker/","title":"How to use the Docker images"},{"body":"How to use the Docker images Start a standalone container with H2 storage docker run --name oap --restart always -d apache/skywalking-oap-server:9.0.0 Start a standalone container with ElasticSearch 7 as storage whose address is elasticsearch:9200 docker run --name oap --restart always -d -e SW_STORAGE=elasticsearch -e SW_STORAGE_ES_CLUSTER_NODES=elasticsearch:9200 apache/skywalking-oap-server:9.0.0 Configuration We could set up environment variables to configure this image. They are defined in backend-setup.\nExtend image If you intend to override or add config files in /skywalking/config, /skywalking/ext-config is the location for you to put extra files. The files with the same name will be overridden; otherwise, they will be added to /skywalking/config.\nIf you want to add more libs/jars into the classpath of OAP, for example, new metrics for OAL. These jars can be mounted into /skywalking/ext-libs, then entrypoint bash will append them into the classpath. Notice, you can\u0026rsquo;t override an existing jar in classpath.\n","excerpt":"How to use the Docker images Start a standalone container with H2 storage docker run --name oap …","ref":"/docs/main/v9.3.0/en/setup/backend/backend-docker/","title":"How to use the Docker images"},{"body":"How to use the Docker images Start a standalone container with H2 storage docker run --name oap --restart always -d apache/skywalking-oap-server:9.0.0 Start a standalone container with ElasticSearch 7 as storage whose address is elasticsearch:9200 docker run --name oap --restart always -d -e SW_STORAGE=elasticsearch -e SW_STORAGE_ES_CLUSTER_NODES=elasticsearch:9200 apache/skywalking-oap-server:9.0.0 Configuration We could set up environment variables to configure this image. They are defined in backend-setup.\nExtend image If you intend to override or add config files in /skywalking/config, /skywalking/ext-config is the location for you to put extra files. The files with the same name will be overridden; otherwise, they will be added to /skywalking/config.\nIf you want to add more libs/jars into the classpath of OAP, for example, new metrics for OAL. These jars can be mounted into /skywalking/ext-libs, then entrypoint bash will append them into the classpath. Notice, you can\u0026rsquo;t override an existing jar in classpath.\n","excerpt":"How to use the Docker images Start a standalone container with H2 storage docker run --name oap …","ref":"/docs/main/v9.4.0/en/setup/backend/backend-docker/","title":"How to use the Docker images"},{"body":"How to use the Docker images Start a standalone container with H2 storage docker run --name oap --restart always -d apache/skywalking-oap-server:9.0.0 Start a standalone container with ElasticSearch 7 as storage whose address is elasticsearch:9200 docker run --name oap --restart always -d -e SW_STORAGE=elasticsearch -e SW_STORAGE_ES_CLUSTER_NODES=elasticsearch:9200 apache/skywalking-oap-server:9.0.0 Configuration We could set up environment variables to configure this image. They are defined in backend-setup.\nExtend image If you intend to override or add config files in /skywalking/config, /skywalking/ext-config is the location for you to put extra files. The files with the same name will be overridden; otherwise, they will be added to /skywalking/config.\nIf you want to add more libs/jars into the classpath of OAP, for example, new metrics for OAL. These jars can be mounted into /skywalking/ext-libs, then entrypoint bash will append them into the classpath. Notice, you can\u0026rsquo;t override an existing jar in classpath.\n","excerpt":"How to use the Docker images Start a standalone container with H2 storage docker run --name oap …","ref":"/docs/main/v9.5.0/en/setup/backend/backend-docker/","title":"How to use the Docker images"},{"body":"How to use the Docker images Start a standalone container with H2 storage docker run --name oap --restart always -d apache/skywalking-oap-server:9.0.0 Start a standalone container with ElasticSearch 7 as storage whose address is elasticsearch:9200 docker run --name oap --restart always -d -e SW_STORAGE=elasticsearch -e SW_STORAGE_ES_CLUSTER_NODES=elasticsearch:9200 apache/skywalking-oap-server:9.0.0 Configuration We could set up environment variables to configure this image. They are defined in backend-setup.\nExtend image If you intend to override or add config files in /skywalking/config, /skywalking/ext-config is the location for you to put extra files. The files with the same name will be overridden; otherwise, they will be added to /skywalking/config.\nIf you want to add more libs/jars into the classpath of OAP, for example, new metrics for OAL. These jars can be mounted into /skywalking/ext-libs, then entrypoint bash will append them into the classpath. Notice, you can\u0026rsquo;t override an existing jar in classpath.\n","excerpt":"How to use the Docker images Start a standalone container with H2 storage docker run --name oap …","ref":"/docs/main/v9.6.0/en/setup/backend/backend-docker/","title":"How to use the Docker images"},{"body":"How to use the Docker images Start a standalone container with H2 storage docker run --name oap --restart always -d apache/skywalking-oap-server:9.0.0 Start a standalone container with ElasticSearch 7 as storage whose address is elasticsearch:9200 docker run --name oap --restart always -d -e SW_STORAGE=elasticsearch -e SW_STORAGE_ES_CLUSTER_NODES=elasticsearch:9200 apache/skywalking-oap-server:9.0.0 Configuration We could set up environment variables to configure this image. They are defined in backend-setup.\nExtend image If you intend to override or add config files in /skywalking/config, /skywalking/ext-config is the location for you to put extra files. The files with the same name will be overridden; otherwise, they will be added to /skywalking/config.\nIf you want to add more libs/jars into the classpath of OAP, for example, new metrics for OAL. These jars can be mounted into /skywalking/ext-libs, then entrypoint bash will append them into the classpath. Notice, you can\u0026rsquo;t override an existing jar in classpath.\n","excerpt":"How to use the Docker images Start a standalone container with H2 storage docker run --name oap …","ref":"/docs/main/v9.7.0/en/setup/backend/backend-docker/","title":"How to use the Docker images"},{"body":"How to use with Gunicorn? Gunicorn is another popular process manager and prefork server widely used in production. The state-of-the-art practice is to use Gunicorn as the process manager for ASGI applications such as FastAPI to get resilient \u0026amp; blazing fast services.\nSince Gunicorn is a prefork server, it will fork a new process for each worker, and the forked process will be the one that actually serves requests.\n Tired of understanding these complicated multiprocessing behaviors? Try the new sw-python run --prefork/-p support for Gunicorn first! You can always fall back to the manual approach (although it\u0026rsquo;s also non-intrusive for application).\n Automatic Injection Approach (Non-intrusive)  Caveat: Although E2E test passes for Python3.7, there\u0026rsquo;s a small chance that this approach won\u0026rsquo;t work on Python 3.7 if your application uses gPRC protocol AND subprocess AND fork together (you will immediately see service is not starting normally, not randomly breaking after)\nThis is due to an unfixed bug in gRPC core that leads to deadlock if Python 3.7 application involves subprocess (like debug mode). You should upgrade to Python 3.8+ soon since the EOL is approaching on 2023 June 27th, or fallback to manual approach should this case happen, or simply use HTTP/Kafka protocol.\n TL;DR: specify -p or --prefork in sw-python run -p and all Gunicorn workers and master will get their own working agent.\nImportant: if the call to gunicorn is prefixed with other commands, this approach will fail since agent currently looks for the command line input at index 0 for safety as an experimental feature.\nsw-python run -p gunicorn gunicorn_consumer_prefork:app --workers 2 --worker-class uvicorn.workers.UvicornWorker --bind 0.0.0.0:8088 Long version: (notice this is different from how uWSGI equivalent works)\nBy specifying the -p or \u0026ndash;prefork option in sw-python CLI, the agent_experimental_fork_support agent option will be turned on automatically.\nStartup flow: sw-python -\u0026gt; gunicorn -\u0026gt; master process (agent starts) -\u0026gt; fork -\u0026gt; worker process (agent restarts due to os.register_at_fork)\nThe master process will get its own agent, although it won\u0026rsquo;t report any trace, since obviously it doesn\u0026rsquo;t take requests, it still reports metrics that is useful for debugging\n A runnable example can be found in the demo folder of skywalking-python GitHub repository\n Manual Approach (only use when sw-python doesn\u0026rsquo;t work) Limitation: Using normal postfork hook will not add observability to the master process, you could also define a prefork hook to start an agent in the master process, with a instance name like instance-name-master(\u0026lt;pid\u0026gt;)\nThe following is just an example, since Gunicorn\u0026rsquo;s automatic injection approach is likely to work in many situations.\n The manual approach should not be used together with the agent\u0026rsquo;s fork support. Otherwise, agent will be dual booted and raise an error saying that you should not do so.\n # Usage explained here: https://docs.gunicorn.org/en/stable/settings.html#post-fork bind = '0.0.0.0:8088' workers = 3 def post_fork(server, worker): # Important: The import of skywalking should be inside the post_fork function import os from skywalking import agent, config # append pid-suffix to instance name # This must be done to distinguish instances if you give your instance customized names # (highly recommended to identify workers) # Notice the -child(pid) part is required to tell the difference of each worker. agent_instance_name = f'\u0026lt;some_good_name\u0026gt;-child({os.getpid()})' config.init(agent_collector_backend_services='127.0.0.1:11800', agent_name='your awesome service', agent_instance_name=agent_instance_name) agent.start() Run Gunicorn normally without sw-python CLI:\ngunicorn gunicorn_consumer_prefork:app --workers 2 --worker-class uvicorn.workers.UvicornWorker --bind 0.0.0.0:8088 ","excerpt":"How to use with Gunicorn? Gunicorn is another popular process manager and prefork server widely used …","ref":"/docs/skywalking-python/latest/en/setup/faq/how-to-use-with-gunicorn/","title":"How to use with Gunicorn?"},{"body":"How to use with Gunicorn? Gunicorn is another popular process manager and prefork server widely used in production. The state-of-the-art practice is to use Gunicorn as the process manager for ASGI applications such as FastAPI to get resilient \u0026amp; blazing fast services.\nSince Gunicorn is a prefork server, it will fork a new process for each worker, and the forked process will be the one that actually serves requests.\n Tired of understanding these complicated multiprocessing behaviors? Try the new sw-python run --prefork/-p support for Gunicorn first! You can always fall back to the manual approach (although it\u0026rsquo;s also non-intrusive for application).\n Automatic Injection Approach (Non-intrusive)  Caveat: Although E2E test passes for Python3.7, there\u0026rsquo;s a small chance that this approach won\u0026rsquo;t work on Python 3.7 if your application uses gPRC protocol AND subprocess AND fork together (you will immediately see service is not starting normally, not randomly breaking after)\nThis is due to an unfixed bug in gRPC core that leads to deadlock if Python 3.7 application involves subprocess (like debug mode). You should upgrade to Python 3.8+ soon since the EOL is approaching on 2023 June 27th, or fallback to manual approach should this case happen, or simply use HTTP/Kafka protocol.\n TL;DR: specify -p or --prefork in sw-python run -p and all Gunicorn workers and master will get their own working agent.\nImportant: if the call to gunicorn is prefixed with other commands, this approach will fail since agent currently looks for the command line input at index 0 for safety as an experimental feature.\nsw-python run -p gunicorn gunicorn_consumer_prefork:app --workers 2 --worker-class uvicorn.workers.UvicornWorker --bind 0.0.0.0:8088 Long version: (notice this is different from how uWSGI equivalent works)\nBy specifying the -p or \u0026ndash;prefork option in sw-python CLI, the agent_experimental_fork_support agent option will be turned on automatically.\nStartup flow: sw-python -\u0026gt; gunicorn -\u0026gt; master process (agent starts) -\u0026gt; fork -\u0026gt; worker process (agent restarts due to os.register_at_fork)\nThe master process will get its own agent, although it won\u0026rsquo;t report any trace, since obviously it doesn\u0026rsquo;t take requests, it still reports metrics that is useful for debugging\n A runnable example can be found in the demo folder of skywalking-python GitHub repository\n Manual Approach (only use when sw-python doesn\u0026rsquo;t work) Limitation: Using normal postfork hook will not add observability to the master process, you could also define a prefork hook to start an agent in the master process, with a instance name like instance-name-master(\u0026lt;pid\u0026gt;)\nThe following is just an example, since Gunicorn\u0026rsquo;s automatic injection approach is likely to work in many situations.\n The manual approach should not be used together with the agent\u0026rsquo;s fork support. Otherwise, agent will be dual booted and raise an error saying that you should not do so.\n # Usage explained here: https://docs.gunicorn.org/en/stable/settings.html#post-fork bind = '0.0.0.0:8088' workers = 3 def post_fork(server, worker): # Important: The import of skywalking should be inside the post_fork function import os from skywalking import agent, config # append pid-suffix to instance name # This must be done to distinguish instances if you give your instance customized names # (highly recommended to identify workers) # Notice the -child(pid) part is required to tell the difference of each worker. agent_instance_name = f'\u0026lt;some_good_name\u0026gt;-child({os.getpid()})' config.init(agent_collector_backend_services='127.0.0.1:11800', agent_name='your awesome service', agent_instance_name=agent_instance_name) agent.start() Run Gunicorn normally without sw-python CLI:\ngunicorn gunicorn_consumer_prefork:app --workers 2 --worker-class uvicorn.workers.UvicornWorker --bind 0.0.0.0:8088 ","excerpt":"How to use with Gunicorn? Gunicorn is another popular process manager and prefork server widely used …","ref":"/docs/skywalking-python/next/en/setup/faq/how-to-use-with-gunicorn/","title":"How to use with Gunicorn?"},{"body":"How to use with Gunicorn? Gunicorn is another popular process manager and prefork server widely used in production. The state-of-the-art practice is to use Gunicorn as the process manager for ASGI applications such as FastAPI to get resilient \u0026amp; blazing fast services.\nSince Gunicorn is a prefork server, it will fork a new process for each worker, and the forked process will be the one that actually serves requests.\n Tired of understanding these complicated multiprocessing behaviors? Try the new sw-python run --prefork/-p support for Gunicorn first! You can always fall back to the manual approach (although it\u0026rsquo;s also non-intrusive for application).\n Automatic Injection Approach (Non-intrusive)  Caveat: Although E2E test passes for Python3.7, there\u0026rsquo;s a small chance that this approach won\u0026rsquo;t work on Python 3.7 if your application uses gPRC protocol AND subprocess AND fork together (you will immediately see service is not starting normally, not randomly breaking after)\nThis is due to an unfixed bug in gRPC core that leads to deadlock if Python 3.7 application involves subprocess (like debug mode). You should upgrade to Python 3.8+ soon since the EOL is approaching on 2023 June 27th, or fallback to manual approach should this case happen, or simply use HTTP/Kafka protocol.\n TL;DR: specify -p or --prefork in sw-python run -p and all Gunicorn workers and master will get their own working agent.\nImportant: if the call to gunicorn is prefixed with other commands, this approach will fail since agent currently looks for the command line input at index 0 for safety as an experimental feature.\nsw-python run -p gunicorn gunicorn_consumer_prefork:app --workers 2 --worker-class uvicorn.workers.UvicornWorker --bind 0.0.0.0:8088 Long version: (notice this is different from how uWSGI equivalent works)\nBy specifying the -p or \u0026ndash;prefork option in sw-python CLI, the agent_experimental_fork_support agent option will be turned on automatically.\nStartup flow: sw-python -\u0026gt; gunicorn -\u0026gt; master process (agent starts) -\u0026gt; fork -\u0026gt; worker process (agent restarts due to os.register_at_fork)\nThe master process will get its own agent, although it won\u0026rsquo;t report any trace, since obviously it doesn\u0026rsquo;t take requests, it still reports metrics that is useful for debugging\n A runnable example can be found in the demo folder of skywalking-python GitHub repository\n Manual Approach (only use when sw-python doesn\u0026rsquo;t work) Limitation: Using normal postfork hook will not add observability to the master process, you could also define a prefork hook to start an agent in the master process, with a instance name like instance-name-master(\u0026lt;pid\u0026gt;)\nThe following is just an example, since Gunicorn\u0026rsquo;s automatic injection approach is likely to work in many situations.\n The manual approach should not be used together with the agent\u0026rsquo;s fork support. Otherwise, agent will be dual booted and raise an error saying that you should not do so.\n # Usage explained here: https://docs.gunicorn.org/en/stable/settings.html#post-fork bind = '0.0.0.0:8088' workers = 3 def post_fork(server, worker): # Important: The import of skywalking should be inside the post_fork function import os from skywalking import agent, config # append pid-suffix to instance name # This must be done to distinguish instances if you give your instance customized names # (highly recommended to identify workers) # Notice the -child(pid) part is required to tell the difference of each worker. agent_instance_name = f'\u0026lt;some_good_name\u0026gt;-child({os.getpid()})' config.init(agent_collector_backend_services='127.0.0.1:11800', agent_name='your awesome service', agent_instance_name=agent_instance_name) agent.start() Run Gunicorn normally without sw-python CLI:\ngunicorn gunicorn_consumer_prefork:app --workers 2 --worker-class uvicorn.workers.UvicornWorker --bind 0.0.0.0:8088 ","excerpt":"How to use with Gunicorn? Gunicorn is another popular process manager and prefork server widely used …","ref":"/docs/skywalking-python/v1.0.1/en/setup/faq/how-to-use-with-gunicorn/","title":"How to use with Gunicorn?"},{"body":"How to use with uWSGI? uWSGI is popular in the Python ecosystem. It is a lightweight, fast, and easy-to-use web server.\nSince uWSGI is relatively old and offers multi-language support, it can get quite troublesome due to the usage of a system-level fork.\nSome of the original discussion can be found here:\n [Python] Apache Skywalking, flask uwsgi, no metrics send to server · Issue #6324 · apache/skywalking [Bug] skywalking-python not work with uwsgi + flask in master workers mode and threads mode · Issue #8566 · apache/skywalking   Tired of understanding these complicated multiprocessing behaviours? Try the new sw-python run --prefork/-p support for uWSGI first! You can always fall back to the manual approach. (although it\u0026rsquo;s also possible to pass postfork hook without changing code, which is essentially how sw-python is implemented)\n  Limitation: regardless of the approach used, uWSGI master process cannot be safely monitored. Since it doesn\u0026rsquo;t take any requests, it is generally acceptable. Alternatively, you could switch to Gunicorn, where its master process can be monitored properly along with all child workers.\n Important: The --enable-threads and --master option must be given to allow the usage of post_fork hooks and threading in workers. In the sw-python CLI, these two options will be automatically injected for you in addition to the post_fork hook.\nAutomatic Injection Approach (Non-intrusive) TL;DR: specify -p or --prefork in sw-python run -p and all uWSGI workers will get their own working agent.\nImportant: if the call to uwsgi is prefixed with other commands, this approach will fail since agent currently looks for the command line input at index 0 for safety as an experimental feature.\nsw-python run -p uwsgi --die-on-term \\  --http 0.0.0.0:9090 \\  --http-manage-expect \\  --master --workers 2 \\  --enable-threads \\  --threads 2 \\  --manage-script-name \\  --mount /=flask_consumer_prefork:app Long version: (notice this is different from how Gunicorn equivalent works)\nBy specifying the -p or \u0026ndash;prefork option in sw-python CLI, a uwsgi_hook will be registered by the CLI by adding the environment variable into one of [\u0026lsquo;UWSGI_SHARED_PYTHON_IMPORT\u0026rsquo;, \u0026lsquo;UWSGI_SHARED_IMPORT\u0026rsquo;, \u0026lsquo;UWSGI_SHARED_PYIMPORT\u0026rsquo;, \u0026lsquo;UWSGI_SHARED_PY_IMPORT\u0026rsquo;]. uWSGI will then import the module and start the agent in forked workers.\nStartup flow: sw-python -\u0026gt; uwsgi -\u0026gt; master process (agent doesn\u0026rsquo;t start here) -\u0026gt; fork -\u0026gt; worker process (agent starts due to post_fork hook)\nThe master process (which doesn\u0026rsquo;t accept requests) currently does not get its own agent as it can not be safely started and handled by os.register_at_fork() handlers.\n A runnable example can be found in the demo folder of skywalking-python GitHub repository\n Manual Approach (only use when sw-python doesn\u0026rsquo;t work) If you get some problems when using SkyWalking Python agent, you can try to use the following manual method to call @postfork, the low-level API of uWSGI to initialize the agent.\nThe following is an example of the use of uWSGI and flask.\nImportant: Never directly start the agent in the app, forked workers are unlikely to work properly (even if they do, it\u0026rsquo;s out of luck) you should either add the following postfork, or try our new experimental automatic startup through sw-python CLI (see above).\n# main.py # Note: The --master uwsgi flag must be on, otherwise the decorators will not be available to import from uwsgidecorators import postfork @postfork def init_tracing(): # Important: The import of skywalking must be inside the postfork function from skywalking import agent, config # append pid-suffix to instance name # This must be done to distinguish instances if you give your instance customized names # (highly recommended to identify workers) # Notice the -child(pid) part is required to tell the difference of each worker. agent_instance_name = f'\u0026lt;some_good_name\u0026gt;-child({os.getpid()})' config.init(agent_collector_backend_services='127.0.0.1:11800', agent_name='your awesome service', agent_instance_name=agent_instance_name) agent.start() from flask import Flask app = Flask(__name__) @app.route('/') def hello_world(): return 'Hello World!' if __name__ == '__main__': app.run() Run uWSGI normally without sw-python CLI:\nuwsgi --die-on-term \\  --http 0.0.0.0:5000 \\  --http-manage-expect \\  --master --workers 3 \\  --enable-threads \\  --threads 3 \\  --manage-script-name \\  --mount /=main:app ","excerpt":"How to use with uWSGI? uWSGI is popular in the Python ecosystem. It is a lightweight, fast, and …","ref":"/docs/skywalking-python/latest/en/setup/faq/how-to-use-with-uwsgi/","title":"How to use with uWSGI?"},{"body":"How to use with uWSGI? uWSGI is popular in the Python ecosystem. It is a lightweight, fast, and easy-to-use web server.\nSince uWSGI is relatively old and offers multi-language support, it can get quite troublesome due to the usage of a system-level fork.\nSome of the original discussion can be found here:\n [Python] Apache Skywalking, flask uwsgi, no metrics send to server · Issue #6324 · apache/skywalking [Bug] skywalking-python not work with uwsgi + flask in master workers mode and threads mode · Issue #8566 · apache/skywalking   Tired of understanding these complicated multiprocessing behaviours? Try the new sw-python run --prefork/-p support for uWSGI first! You can always fall back to the manual approach. (although it\u0026rsquo;s also possible to pass postfork hook without changing code, which is essentially how sw-python is implemented)\n  Limitation: regardless of the approach used, uWSGI master process cannot be safely monitored. Since it doesn\u0026rsquo;t take any requests, it is generally acceptable. Alternatively, you could switch to Gunicorn, where its master process can be monitored properly along with all child workers.\n Important: The --enable-threads and --master option must be given to allow the usage of post_fork hooks and threading in workers. In the sw-python CLI, these two options will be automatically injected for you in addition to the post_fork hook.\nAutomatic Injection Approach (Non-intrusive) TL;DR: specify -p or --prefork in sw-python run -p and all uWSGI workers will get their own working agent.\nImportant: if the call to uwsgi is prefixed with other commands, this approach will fail since agent currently looks for the command line input at index 0 for safety as an experimental feature.\nsw-python run -p uwsgi --die-on-term \\  --http 0.0.0.0:9090 \\  --http-manage-expect \\  --master --workers 2 \\  --enable-threads \\  --threads 2 \\  --manage-script-name \\  --mount /=flask_consumer_prefork:app Long version: (notice this is different from how Gunicorn equivalent works)\nBy specifying the -p or \u0026ndash;prefork option in sw-python CLI, a uwsgi_hook will be registered by the CLI by adding the environment variable into one of [\u0026lsquo;UWSGI_SHARED_PYTHON_IMPORT\u0026rsquo;, \u0026lsquo;UWSGI_SHARED_IMPORT\u0026rsquo;, \u0026lsquo;UWSGI_SHARED_PYIMPORT\u0026rsquo;, \u0026lsquo;UWSGI_SHARED_PY_IMPORT\u0026rsquo;]. uWSGI will then import the module and start the agent in forked workers.\nStartup flow: sw-python -\u0026gt; uwsgi -\u0026gt; master process (agent doesn\u0026rsquo;t start here) -\u0026gt; fork -\u0026gt; worker process (agent starts due to post_fork hook)\nThe master process (which doesn\u0026rsquo;t accept requests) currently does not get its own agent as it can not be safely started and handled by os.register_at_fork() handlers.\n A runnable example can be found in the demo folder of skywalking-python GitHub repository\n Manual Approach (only use when sw-python doesn\u0026rsquo;t work) If you get some problems when using SkyWalking Python agent, you can try to use the following manual method to call @postfork, the low-level API of uWSGI to initialize the agent.\nThe following is an example of the use of uWSGI and flask.\nImportant: Never directly start the agent in the app, forked workers are unlikely to work properly (even if they do, it\u0026rsquo;s out of luck) you should either add the following postfork, or try our new experimental automatic startup through sw-python CLI (see above).\n# main.py # Note: The --master uwsgi flag must be on, otherwise the decorators will not be available to import from uwsgidecorators import postfork @postfork def init_tracing(): # Important: The import of skywalking must be inside the postfork function from skywalking import agent, config # append pid-suffix to instance name # This must be done to distinguish instances if you give your instance customized names # (highly recommended to identify workers) # Notice the -child(pid) part is required to tell the difference of each worker. agent_instance_name = f'\u0026lt;some_good_name\u0026gt;-child({os.getpid()})' config.init(agent_collector_backend_services='127.0.0.1:11800', agent_name='your awesome service', agent_instance_name=agent_instance_name) agent.start() from flask import Flask app = Flask(__name__) @app.route('/') def hello_world(): return 'Hello World!' if __name__ == '__main__': app.run() Run uWSGI normally without sw-python CLI:\nuwsgi --die-on-term \\  --http 0.0.0.0:5000 \\  --http-manage-expect \\  --master --workers 3 \\  --enable-threads \\  --threads 3 \\  --manage-script-name \\  --mount /=main:app ","excerpt":"How to use with uWSGI? uWSGI is popular in the Python ecosystem. It is a lightweight, fast, and …","ref":"/docs/skywalking-python/next/en/setup/faq/how-to-use-with-uwsgi/","title":"How to use with uWSGI?"},{"body":"How to use with uWSGI? uWSGI is popular in the Python ecosystem. It is a lightweight, fast, and easy-to-use web server.\nSince uWSGI is relatively old and offers multi-language support, it can get quite troublesome due to the usage of a system-level fork.\nSome of the original discussion can be found here:\n [Python] Apache Skywalking, flask uwsgi, no metrics send to server · Issue #6324 · apache/skywalking [Bug] skywalking-python not work with uwsgi + flask in master workers mode and threads mode · Issue #8566 · apache/skywalking   Tired of understanding these complicated multiprocessing behaviours? Try the new sw-python run --prefork/-p support for uWSGI first! You can always fall back to the manual approach. (although it\u0026rsquo;s also possible to pass postfork hook without changing code, which is essentially how sw-python is implemented)\n  Limitation: regardless of the approach used, uWSGI master process cannot be safely monitored. Since it doesn\u0026rsquo;t take any requests, it is generally acceptable. Alternatively, you could switch to Gunicorn, where its master process can be monitored properly along with all child workers.\n Important: The --enable-threads and --master option must be given to allow the usage of post_fork hooks and threading in workers. In the sw-python CLI, these two options will be automatically injected for you in addition to the post_fork hook.\nAutomatic Injection Approach (Non-intrusive) TL;DR: specify -p or --prefork in sw-python run -p and all uWSGI workers will get their own working agent.\nImportant: if the call to uwsgi is prefixed with other commands, this approach will fail since agent currently looks for the command line input at index 0 for safety as an experimental feature.\nsw-python run -p uwsgi --die-on-term \\  --http 0.0.0.0:9090 \\  --http-manage-expect \\  --master --workers 2 \\  --enable-threads \\  --threads 2 \\  --manage-script-name \\  --mount /=flask_consumer_prefork:app Long version: (notice this is different from how Gunicorn equivalent works)\nBy specifying the -p or \u0026ndash;prefork option in sw-python CLI, a uwsgi_hook will be registered by the CLI by adding the environment variable into one of [\u0026lsquo;UWSGI_SHARED_PYTHON_IMPORT\u0026rsquo;, \u0026lsquo;UWSGI_SHARED_IMPORT\u0026rsquo;, \u0026lsquo;UWSGI_SHARED_PYIMPORT\u0026rsquo;, \u0026lsquo;UWSGI_SHARED_PY_IMPORT\u0026rsquo;]. uWSGI will then import the module and start the agent in forked workers.\nStartup flow: sw-python -\u0026gt; uwsgi -\u0026gt; master process (agent doesn\u0026rsquo;t start here) -\u0026gt; fork -\u0026gt; worker process (agent starts due to post_fork hook)\nThe master process (which doesn\u0026rsquo;t accept requests) currently does not get its own agent as it can not be safely started and handled by os.register_at_fork() handlers.\n A runnable example can be found in the demo folder of skywalking-python GitHub repository\n Manual Approach (only use when sw-python doesn\u0026rsquo;t work) If you get some problems when using SkyWalking Python agent, you can try to use the following manual method to call @postfork, the low-level API of uWSGI to initialize the agent.\nThe following is an example of the use of uWSGI and flask.\nImportant: Never directly start the agent in the app, forked workers are unlikely to work properly (even if they do, it\u0026rsquo;s out of luck) you should either add the following postfork, or try our new experimental automatic startup through sw-python CLI (see above).\n# main.py # Note: The --master uwsgi flag must be on, otherwise the decorators will not be available to import from uwsgidecorators import postfork @postfork def init_tracing(): # Important: The import of skywalking must be inside the postfork function from skywalking import agent, config # append pid-suffix to instance name # This must be done to distinguish instances if you give your instance customized names # (highly recommended to identify workers) # Notice the -child(pid) part is required to tell the difference of each worker. agent_instance_name = f'\u0026lt;some_good_name\u0026gt;-child({os.getpid()})' config.init(agent_collector_backend_services='127.0.0.1:11800', agent_name='your awesome service', agent_instance_name=agent_instance_name) agent.start() from flask import Flask app = Flask(__name__) @app.route('/') def hello_world(): return 'Hello World!' if __name__ == '__main__': app.run() Run uWSGI normally without sw-python CLI:\nuwsgi --die-on-term \\  --http 0.0.0.0:5000 \\  --http-manage-expect \\  --master --workers 3 \\  --enable-threads \\  --threads 3 \\  --manage-script-name \\  --mount /=main:app ","excerpt":"How to use with uWSGI? uWSGI is popular in the Python ecosystem. It is a lightweight, fast, and …","ref":"/docs/skywalking-python/v1.0.1/en/setup/faq/how-to-use-with-uwsgi/","title":"How to use with uWSGI?"},{"body":"How to write a new module? If you want to add a custom module to SkyWalking Rover, the following contents would guide you. Let\u0026rsquo;s use the profiling module as an example of how to write a module.\n Please read the Module Design to understand what is module. The module should be written in the skywalking-rover/pkg directory. So we create a new directory called profiling as the module codes space. Implement the interface in the skywalking-rover/pkg/module. Each module has 6 methods, which are Name, RequiredModules, Config, Start, NotifyStartSuccess, and Shutdown.  Name returns the unique name of the module, also this name is used to define in the configuration file. RequiredModules returns this needs depended on module names. In the profiling module, it needs to query the existing process and send snapshots to the backend, so it needs the core and process module. Config returns the config content of this module, which relate to the configuration file, and you could declare the tag(mapstructure) with the field to define the name in the configuration file. Start is triggered when the module needs to start. if this module start failure, please return the error. NotifyStartSuccess is triggered after all the active modules are Start method success. Shutdown   Add the configuration into the skywalking-rover/configs/rover_configs.yaml. It should same as the config declaration. Register the module into skywalking-rover/pkg/boot/register.go. Add the Unit test or E2E testing for testing the module is works well. Write the documentation under the skywalking-rover/docs/en directory and add it to the documentation index file skywalking-rover/docs/menu.yml.  ","excerpt":"How to write a new module? If you want to add a custom module to SkyWalking Rover, the following …","ref":"/docs/skywalking-rover/latest/en/guides/contribution/how-to-write-module/","title":"How to write a new module?"},{"body":"How to write a new module? If you want to add a custom module to SkyWalking Rover, the following contents would guide you. Let\u0026rsquo;s use the profiling module as an example of how to write a module.\n Please read the Module Design to understand what is module. The module should be written in the skywalking-rover/pkg directory. So we create a new directory called profiling as the module codes space. Implement the interface in the skywalking-rover/pkg/module. Each module has 6 methods, which are Name, RequiredModules, Config, Start, NotifyStartSuccess, and Shutdown.  Name returns the unique name of the module, also this name is used to define in the configuration file. RequiredModules returns this needs depended on module names. In the profiling module, it needs to query the existing process and send snapshots to the backend, so it needs the core and process module. Config returns the config content of this module, which relate to the configuration file, and you could declare the tag(mapstructure) with the field to define the name in the configuration file. Start is triggered when the module needs to start. if this module start failure, please return the error. NotifyStartSuccess is triggered after all the active modules are Start method success. Shutdown   Add the configuration into the skywalking-rover/configs/rover_configs.yaml. It should same as the config declaration. Register the module into skywalking-rover/pkg/boot/register.go. Add the Unit test or E2E testing for testing the module is works well. Write the documentation under the skywalking-rover/docs/en directory and add it to the documentation index file skywalking-rover/docs/menu.yml.  ","excerpt":"How to write a new module? If you want to add a custom module to SkyWalking Rover, the following …","ref":"/docs/skywalking-rover/next/en/guides/contribution/how-to-write-module/","title":"How to write a new module?"},{"body":"How to write a new module? If you want to add a custom module to SkyWalking Rover, the following contents would guide you. Let\u0026rsquo;s use the profiling module as an example of how to write a module.\n Please read the Module Design to understand what is module. The module should be written in the skywalking-rover/pkg directory. So we create a new directory called profiling as the module codes space. Implement the interface in the skywalking-rover/pkg/module. Each module has 6 methods, which are Name, RequiredModules, Config, Start, NotifyStartSuccess, and Shutdown.  Name returns the unique name of the module, also this name is used to define in the configuration file. RequiredModules returns this needs depended on module names. In the profiling module, it needs to query the existing process and send snapshots to the backend, so it needs the core and process module. Config returns the config content of this module, which relate to the configuration file, and you could declare the tag(mapstructure) with the field to define the name in the configuration file. Start is triggered when the module needs to start. if this module start failure, please return the error. NotifyStartSuccess is triggered after all the active modules are Start method success. Shutdown   Add the configuration into the skywalking-rover/configs/rover_configs.yaml. It should same as the config declaration. Register the module into skywalking-rover/pkg/boot/register.go. Add the Unit test or E2E testing for testing the module is works well. Write the documentation under the skywalking-rover/docs/en directory and add it to the documentation index file skywalking-rover/docs/menu.yml.  ","excerpt":"How to write a new module? If you want to add a custom module to SkyWalking Rover, the following …","ref":"/docs/skywalking-rover/v0.6.0/en/guides/contribution/how-to-write-module/","title":"How to write a new module?"},{"body":"How to write a new plugin? If you want to add a custom plugin in SkyWalking Satellite, the following contents would guide you. Let\u0026rsquo;s use memory-queue as an example of how to write a plugin.\n  Choose the plugin category. As the memory-queue is a queue, the plugin should be written in the skywalking-satellite/plugins/queue directory. So we create a new directory called memory as the plugin codes space.\n  Implement the interface in the skywalking-satellite/plugins/queue/api. Each plugin has 3 common methods, which are Name(), Description(), DefaultConfig().\n Name() returns the unique name in the plugin category. Description() returns the description of the plugin, which would be used to generate the plugin documentation. DefaultConfig() returns the default plugin config with yaml pattern, which would be used as the default value in the plugin struct and to generate the plugin documentation.  type Queue struct { config.CommonFields // config  EventBufferSize int `mapstructure:\u0026#34;event_buffer_size\u0026#34;` // The maximum buffer event size.  // components  buffer *goconcurrentqueue.FixedFIFO } func (q *Queue) Name() string { return Name } func (q *Queue) Description() string { return \u0026#34;this is a memory queue to buffer the input event.\u0026#34; } func (q *Queue) DefaultConfig() string { return ` # The maximum buffer event size. event_buffer_size: 5000   Add unit test.\n  Generate the plugin docs.\n  make gen-docs ","excerpt":"How to write a new plugin? If you want to add a custom plugin in SkyWalking Satellite, the following …","ref":"/docs/skywalking-satellite/latest/en/guides/contribution/how-to-write-plugin/","title":"How to write a new plugin?"},{"body":"How to write a new plugin? If you want to add a custom plugin in SkyWalking Satellite, the following contents would guide you. Let\u0026rsquo;s use memory-queue as an example of how to write a plugin.\n  Choose the plugin category. As the memory-queue is a queue, the plugin should be written in the skywalking-satellite/plugins/queue directory. So we create a new directory called memory as the plugin codes space.\n  Implement the interface in the skywalking-satellite/plugins/queue/api. Each plugin has 3 common methods, which are Name(), Description(), DefaultConfig().\n Name() returns the unique name in the plugin category. Description() returns the description of the plugin, which would be used to generate the plugin documentation. DefaultConfig() returns the default plugin config with yaml pattern, which would be used as the default value in the plugin struct and to generate the plugin documentation.  type Queue struct { config.CommonFields // config  EventBufferSize int `mapstructure:\u0026#34;event_buffer_size\u0026#34;` // The maximum buffer event size.  // components  buffer *goconcurrentqueue.FixedFIFO } func (q *Queue) Name() string { return Name } func (q *Queue) Description() string { return \u0026#34;this is a memory queue to buffer the input event.\u0026#34; } func (q *Queue) DefaultConfig() string { return ` # The maximum buffer event size. event_buffer_size: 5000   Add unit test.\n  Generate the plugin docs.\n  make gen-docs ","excerpt":"How to write a new plugin? If you want to add a custom plugin in SkyWalking Satellite, the following …","ref":"/docs/skywalking-satellite/next/en/guides/contribution/how-to-write-plugin/","title":"How to write a new plugin?"},{"body":"How to write a new plugin? If you want to add a custom plugin in SkyWalking Satellite, the following contents would guide you. Let\u0026rsquo;s use memory-queue as an example of how to write a plugin.\n  Choose the plugin category. As the memory-queue is a queue, the plugin should be written in the skywalking-satellite/plugins/queue directory. So we create a new directory called memory as the plugin codes space.\n  Implement the interface in the skywalking-satellite/plugins/queue/api. Each plugin has 3 common methods, which are Name(), Description(), DefaultConfig().\n Name() returns the unique name in the plugin category. Description() returns the description of the plugin, which would be used to generate the plugin documentation. DefaultConfig() returns the default plugin config with yaml pattern, which would be used as the default value in the plugin struct and to generate the plugin documentation.  type Queue struct { config.CommonFields // config  EventBufferSize int `mapstructure:\u0026#34;event_buffer_size\u0026#34;` // The maximum buffer event size.  // components  buffer *goconcurrentqueue.FixedFIFO } func (q *Queue) Name() string { return Name } func (q *Queue) Description() string { return \u0026#34;this is a memory queue to buffer the input event.\u0026#34; } func (q *Queue) DefaultConfig() string { return ` # The maximum buffer event size. event_buffer_size: 5000   Add unit test.\n  Generate the plugin docs.\n  make gen-docs ","excerpt":"How to write a new plugin? If you want to add a custom plugin in SkyWalking Satellite, the following …","ref":"/docs/skywalking-satellite/v1.2.0/en/guides/contribution/how-to-write-plugin/","title":"How to write a new plugin?"},{"body":"HTTP API Protocol HTTP API Protocol defines the API data format, including API request and response data format. They use the HTTP1.1 wrapper of the official SkyWalking Browser Protocol. Read it for more details.\nPerformance Data Report Detailed information about data format can be found in BrowserPerf.proto.\nPOST http://localhost:12800/browser/perfData Send a performance data object in JSON format.\nInput:\n{ \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;redirectTime\u0026#34;: 10, \u0026#34;dnsTime\u0026#34;: 10, \u0026#34;ttfbTime\u0026#34;: 10, \u0026#34;tcpTime\u0026#34;: 10, \u0026#34;transTime\u0026#34;: 10, \u0026#34;domAnalysisTime\u0026#34;: 10, \u0026#34;fptTime\u0026#34;: 10, \u0026#34;domReadyTime\u0026#34;: 10, \u0026#34;loadPageTime\u0026#34;: 10, \u0026#34;resTime\u0026#34;: 10, \u0026#34;sslTime\u0026#34;: 10, \u0026#34;ttlTime\u0026#34;: 10, \u0026#34;firstPackTime\u0026#34;: 10, \u0026#34;fmpTime\u0026#34;: 10 } OutPut:\nHttp Status: 204\nError Log Report Detailed information about data format can be found in BrowserPerf.proto.\nPOST http://localhost:12800/browser/errorLogs Send an error log object list in JSON format.\nInput:\n[ { \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b01\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; }, { \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b02\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; } ] OutPut:\nHttp Status: 204\nPOST http://localhost:12800/browser/errorLog Send a single error log object in JSON format.\nInput:\n{ \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b01\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; } OutPut:\nHttp Status: 204\n","excerpt":"HTTP API Protocol HTTP API Protocol defines the API data format, including API request and response …","ref":"/docs/main/latest/en/api/browser-http-api-protocol/","title":"HTTP API Protocol"},{"body":"HTTP API Protocol HTTP API Protocol defines the API data format, including API request and response data format. They use the HTTP1.1 wrapper of the official SkyWalking Browser Protocol. Read it for more details.\nPerformance Data Report Detailed information about data format can be found in BrowserPerf.proto.\nPOST http://localhost:12800/browser/perfData Send a performance data object in JSON format.\nInput:\n{ \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;redirectTime\u0026#34;: 10, \u0026#34;dnsTime\u0026#34;: 10, \u0026#34;ttfbTime\u0026#34;: 10, \u0026#34;tcpTime\u0026#34;: 10, \u0026#34;transTime\u0026#34;: 10, \u0026#34;domAnalysisTime\u0026#34;: 10, \u0026#34;fptTime\u0026#34;: 10, \u0026#34;domReadyTime\u0026#34;: 10, \u0026#34;loadPageTime\u0026#34;: 10, \u0026#34;resTime\u0026#34;: 10, \u0026#34;sslTime\u0026#34;: 10, \u0026#34;ttlTime\u0026#34;: 10, \u0026#34;firstPackTime\u0026#34;: 10, \u0026#34;fmpTime\u0026#34;: 10 } OutPut:\nHttp Status: 204\nError Log Report Detailed information about data format can be found in BrowserPerf.proto.\nPOST http://localhost:12800/browser/errorLogs Send an error log object list in JSON format.\nInput:\n[ { \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b01\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; }, { \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b02\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; } ] OutPut:\nHttp Status: 204\nPOST http://localhost:12800/browser/errorLog Send a single error log object in JSON format.\nInput:\n{ \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b01\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; } OutPut:\nHttp Status: 204\n","excerpt":"HTTP API Protocol HTTP API Protocol defines the API data format, including API request and response …","ref":"/docs/main/next/en/api/browser-http-api-protocol/","title":"HTTP API Protocol"},{"body":"HTTP API Protocol HTTP API Protocol defines the API data format, including API request and response data format. They use the HTTP1.1 wrapper of the official SkyWalking Browser Protocol. Read it for more details.\nPerformance Data Report Detailed information about data format can be found in BrowserPerf.proto.\nPOST http://localhost:12800/browser/perfData Send a performance data object in JSON format.\nInput:\n{ \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;redirectTime\u0026#34;: 10, \u0026#34;dnsTime\u0026#34;: 10, \u0026#34;ttfbTime\u0026#34;: 10, \u0026#34;tcpTime\u0026#34;: 10, \u0026#34;transTime\u0026#34;: 10, \u0026#34;domAnalysisTime\u0026#34;: 10, \u0026#34;fptTime\u0026#34;: 10, \u0026#34;domReadyTime\u0026#34;: 10, \u0026#34;loadPageTime\u0026#34;: 10, \u0026#34;resTime\u0026#34;: 10, \u0026#34;sslTime\u0026#34;: 10, \u0026#34;ttlTime\u0026#34;: 10, \u0026#34;firstPackTime\u0026#34;: 10, \u0026#34;fmpTime\u0026#34;: 10 } OutPut:\nHttp Status: 204\nError Log Report Detailed information about data format can be found in BrowserPerf.proto.\nPOST http://localhost:12800/browser/errorLogs Send an error log object list in JSON format.\nInput:\n[ { \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b01\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; }, { \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b02\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; } ] OutPut:\nHttp Status: 204\nPOST http://localhost:12800/browser/errorLog Send a single error log object in JSON format.\nInput:\n{ \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b01\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; } OutPut:\nHttp Status: 204\n","excerpt":"HTTP API Protocol HTTP API Protocol defines the API data format, including API request and response …","ref":"/docs/main/v9.0.0/en/protocols/browser-http-api-protocol/","title":"HTTP API Protocol"},{"body":"HTTP API Protocol HTTP API Protocol defines the API data format, including API request and response data format. They use the HTTP1.1 wrapper of the official SkyWalking Trace Data Protocol v3. Read it for more details.\nInstance Management Detailed information about data format can be found in Instance Management.\n Report service instance properties   POST http://localhost:12800/v3/management/reportProperties\n Input:\n{ \u0026#34;service\u0026#34;: \u0026#34;User Service Name\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User Service Instance Name\u0026#34;, \u0026#34;properties\u0026#34;: [{ \u0026#34;language\u0026#34;: \u0026#34;Lua\u0026#34; }] } Output JSON Array:\n{}  Service instance ping   POST http://localhost:12800/v3/management/keepAlive\n Input:\n{ \u0026#34;service\u0026#34;: \u0026#34;User Service Name\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User Service Instance Name\u0026#34; } OutPut:\n{} Trace Report Detailed information about data format can be found in Instance Management. There are two ways to report segment data: one segment per request or segment array in bulk mode.\nPOST http://localhost:12800/v3/segment Send a single segment object in JSON format.\nInput:\n{ \u0026#34;traceId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34; } OutPut:\nPOST http://localhost:12800/v3/segments Send a segment object list in JSON format.\nInput:\n[{ \u0026#34;traceId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34; }, { \u0026#34;traceId\u0026#34;: \u0026#34;f956699e-5106-4ea3-95e5-da748c55bac1\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577250, \u0026#34;endTime\u0026#34;: 1588664577250, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577250, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577250, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;f956699e-5106-4ea3-95e5-da748c55bac1\u0026#34; }] OutPut:\n","excerpt":"HTTP API Protocol HTTP API Protocol defines the API data format, including API request and response …","ref":"/docs/main/v9.0.0/en/protocols/http-api-protocol/","title":"HTTP API Protocol"},{"body":"HTTP API Protocol HTTP API Protocol defines the API data format, including API request and response data format. They use the HTTP1.1 wrapper of the official SkyWalking Browser Protocol. Read it for more details.\nPerformance Data Report Detailed information about data format can be found in BrowserPerf.proto.\nPOST http://localhost:12800/browser/perfData Send a performance data object in JSON format.\nInput:\n{ \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;redirectTime\u0026#34;: 10, \u0026#34;dnsTime\u0026#34;: 10, \u0026#34;ttfbTime\u0026#34;: 10, \u0026#34;tcpTime\u0026#34;: 10, \u0026#34;transTime\u0026#34;: 10, \u0026#34;domAnalysisTime\u0026#34;: 10, \u0026#34;fptTime\u0026#34;: 10, \u0026#34;domReadyTime\u0026#34;: 10, \u0026#34;loadPageTime\u0026#34;: 10, \u0026#34;resTime\u0026#34;: 10, \u0026#34;sslTime\u0026#34;: 10, \u0026#34;ttlTime\u0026#34;: 10, \u0026#34;firstPackTime\u0026#34;: 10, \u0026#34;fmpTime\u0026#34;: 10 } OutPut:\nHttp Status: 204\nError Log Report Detailed information about data format can be found in BrowserPerf.proto.\nPOST http://localhost:12800/browser/errorLogs Send an error log object list in JSON format.\nInput:\n[ { \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b01\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; }, { \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b02\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; } ] OutPut:\nHttp Status: 204\nPOST http://localhost:12800/browser/errorLog Send a single error log object in JSON format.\nInput:\n{ \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b01\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; } OutPut:\nHttp Status: 204\n","excerpt":"HTTP API Protocol HTTP API Protocol defines the API data format, including API request and response …","ref":"/docs/main/v9.1.0/en/protocols/browser-http-api-protocol/","title":"HTTP API Protocol"},{"body":"HTTP API Protocol HTTP API Protocol defines the API data format, including API request and response data format. They use the HTTP1.1 wrapper of the official SkyWalking Trace Data Protocol v3. Read it for more details.\nInstance Management Detailed information about data format can be found in Instance Management.\n Report service instance properties   POST http://localhost:12800/v3/management/reportProperties\n Input:\n{ \u0026#34;service\u0026#34;: \u0026#34;User Service Name\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User Service Instance Name\u0026#34;, \u0026#34;properties\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;language\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;Lua\u0026#34; } ] } Output JSON Array:\n{}  Service instance ping   POST http://localhost:12800/v3/management/keepAlive\n Input:\n{ \u0026#34;service\u0026#34;: \u0026#34;User Service Name\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User Service Instance Name\u0026#34; } OutPut:\n{} Trace Report Detailed information about data format can be found in Instance Management. There are two ways to report segment data: one segment per request or segment array in bulk mode.\nPOST http://localhost:12800/v3/segment Send a single segment object in JSON format.\nInput:\n{ \u0026#34;traceId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34; } OutPut:\nPOST http://localhost:12800/v3/segments Send a segment object list in JSON format.\nInput:\n[{ \u0026#34;traceId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34; }, { \u0026#34;traceId\u0026#34;: \u0026#34;f956699e-5106-4ea3-95e5-da748c55bac1\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577250, \u0026#34;endTime\u0026#34;: 1588664577250, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577250, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577250, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;f956699e-5106-4ea3-95e5-da748c55bac1\u0026#34; }] OutPut:\n","excerpt":"HTTP API Protocol HTTP API Protocol defines the API data format, including API request and response …","ref":"/docs/main/v9.1.0/en/protocols/http-api-protocol/","title":"HTTP API Protocol"},{"body":"HTTP API Protocol HTTP API Protocol defines the API data format, including API request and response data format. They use the HTTP1.1 wrapper of the official SkyWalking Browser Protocol. Read it for more details.\nPerformance Data Report Detailed information about data format can be found in BrowserPerf.proto.\nPOST http://localhost:12800/browser/perfData Send a performance data object in JSON format.\nInput:\n{ \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;redirectTime\u0026#34;: 10, \u0026#34;dnsTime\u0026#34;: 10, \u0026#34;ttfbTime\u0026#34;: 10, \u0026#34;tcpTime\u0026#34;: 10, \u0026#34;transTime\u0026#34;: 10, \u0026#34;domAnalysisTime\u0026#34;: 10, \u0026#34;fptTime\u0026#34;: 10, \u0026#34;domReadyTime\u0026#34;: 10, \u0026#34;loadPageTime\u0026#34;: 10, \u0026#34;resTime\u0026#34;: 10, \u0026#34;sslTime\u0026#34;: 10, \u0026#34;ttlTime\u0026#34;: 10, \u0026#34;firstPackTime\u0026#34;: 10, \u0026#34;fmpTime\u0026#34;: 10 } OutPut:\nHttp Status: 204\nError Log Report Detailed information about data format can be found in BrowserPerf.proto.\nPOST http://localhost:12800/browser/errorLogs Send an error log object list in JSON format.\nInput:\n[ { \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b01\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; }, { \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b02\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; } ] OutPut:\nHttp Status: 204\nPOST http://localhost:12800/browser/errorLog Send a single error log object in JSON format.\nInput:\n{ \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b01\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; } OutPut:\nHttp Status: 204\n","excerpt":"HTTP API Protocol HTTP API Protocol defines the API data format, including API request and response …","ref":"/docs/main/v9.2.0/en/protocols/browser-http-api-protocol/","title":"HTTP API Protocol"},{"body":"HTTP API Protocol HTTP API Protocol defines the API data format, including API request and response data format. They use the HTTP1.1 wrapper of the official SkyWalking Trace Data Protocol v3. Read it for more details.\nInstance Management Detailed information about data format can be found in Instance Management.\n Report service instance properties   POST http://localhost:12800/v3/management/reportProperties\n Input:\n{ \u0026#34;service\u0026#34;: \u0026#34;User Service Name\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User Service Instance Name\u0026#34;, \u0026#34;properties\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;language\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;Lua\u0026#34; } ] } Output JSON Array:\n{}  Service instance ping   POST http://localhost:12800/v3/management/keepAlive\n Input:\n{ \u0026#34;service\u0026#34;: \u0026#34;User Service Name\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User Service Instance Name\u0026#34; } OutPut:\n{} Trace Report Detailed information about data format can be found in Instance Management. There are two ways to report segment data: one segment per request or segment array in bulk mode.\nPOST http://localhost:12800/v3/segment Send a single segment object in JSON format.\nInput:\n{ \u0026#34;traceId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34; } OutPut:\nPOST http://localhost:12800/v3/segments Send a segment object list in JSON format.\nInput:\n[{ \u0026#34;traceId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34; }, { \u0026#34;traceId\u0026#34;: \u0026#34;f956699e-5106-4ea3-95e5-da748c55bac1\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577250, \u0026#34;endTime\u0026#34;: 1588664577250, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577250, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577250, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;f956699e-5106-4ea3-95e5-da748c55bac1\u0026#34; }] OutPut:\n","excerpt":"HTTP API Protocol HTTP API Protocol defines the API data format, including API request and response …","ref":"/docs/main/v9.2.0/en/protocols/http-api-protocol/","title":"HTTP API Protocol"},{"body":"HTTP API Protocol HTTP API Protocol defines the API data format, including API request and response data format. They use the HTTP1.1 wrapper of the official SkyWalking Browser Protocol. Read it for more details.\nPerformance Data Report Detailed information about data format can be found in BrowserPerf.proto.\nPOST http://localhost:12800/browser/perfData Send a performance data object in JSON format.\nInput:\n{ \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;redirectTime\u0026#34;: 10, \u0026#34;dnsTime\u0026#34;: 10, \u0026#34;ttfbTime\u0026#34;: 10, \u0026#34;tcpTime\u0026#34;: 10, \u0026#34;transTime\u0026#34;: 10, \u0026#34;domAnalysisTime\u0026#34;: 10, \u0026#34;fptTime\u0026#34;: 10, \u0026#34;domReadyTime\u0026#34;: 10, \u0026#34;loadPageTime\u0026#34;: 10, \u0026#34;resTime\u0026#34;: 10, \u0026#34;sslTime\u0026#34;: 10, \u0026#34;ttlTime\u0026#34;: 10, \u0026#34;firstPackTime\u0026#34;: 10, \u0026#34;fmpTime\u0026#34;: 10 } OutPut:\nHttp Status: 204\nError Log Report Detailed information about data format can be found in BrowserPerf.proto.\nPOST http://localhost:12800/browser/errorLogs Send an error log object list in JSON format.\nInput:\n[ { \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b01\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; }, { \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b02\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; } ] OutPut:\nHttp Status: 204\nPOST http://localhost:12800/browser/errorLog Send a single error log object in JSON format.\nInput:\n{ \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b01\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; } OutPut:\nHttp Status: 204\n","excerpt":"HTTP API Protocol HTTP API Protocol defines the API data format, including API request and response …","ref":"/docs/main/v9.3.0/en/protocols/browser-http-api-protocol/","title":"HTTP API Protocol"},{"body":"HTTP API Protocol HTTP API Protocol defines the API data format, including API request and response data format. They use the HTTP1.1 wrapper of the official SkyWalking Trace Data Protocol v3. Read it for more details.\nInstance Management Detailed information about data format can be found in Instance Management.\n Report service instance properties   POST http://localhost:12800/v3/management/reportProperties\n Input:\n{ \u0026#34;service\u0026#34;: \u0026#34;User Service Name\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User Service Instance Name\u0026#34;, \u0026#34;properties\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;language\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;Lua\u0026#34; } ] } Output JSON Array:\n{}  Service instance ping   POST http://localhost:12800/v3/management/keepAlive\n Input:\n{ \u0026#34;service\u0026#34;: \u0026#34;User Service Name\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User Service Instance Name\u0026#34; } OutPut:\n{} Trace Report Detailed information about data format can be found in Instance Management. There are two ways to report segment data: one segment per request or segment array in bulk mode.\nPOST http://localhost:12800/v3/segment Send a single segment object in JSON format.\nInput:\n{ \u0026#34;traceId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34; } OutPut:\nPOST http://localhost:12800/v3/segments Send a segment object list in JSON format.\nInput:\n[{ \u0026#34;traceId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34; }, { \u0026#34;traceId\u0026#34;: \u0026#34;f956699e-5106-4ea3-95e5-da748c55bac1\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577250, \u0026#34;endTime\u0026#34;: 1588664577250, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577250, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577250, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;f956699e-5106-4ea3-95e5-da748c55bac1\u0026#34; }] OutPut:\n","excerpt":"HTTP API Protocol HTTP API Protocol defines the API data format, including API request and response …","ref":"/docs/main/v9.3.0/en/protocols/http-api-protocol/","title":"HTTP API Protocol"},{"body":"HTTP API Protocol HTTP API Protocol defines the API data format, including API request and response data format. They use the HTTP1.1 wrapper of the official SkyWalking Browser Protocol. Read it for more details.\nPerformance Data Report Detailed information about data format can be found in BrowserPerf.proto.\nPOST http://localhost:12800/browser/perfData Send a performance data object in JSON format.\nInput:\n{ \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;redirectTime\u0026#34;: 10, \u0026#34;dnsTime\u0026#34;: 10, \u0026#34;ttfbTime\u0026#34;: 10, \u0026#34;tcpTime\u0026#34;: 10, \u0026#34;transTime\u0026#34;: 10, \u0026#34;domAnalysisTime\u0026#34;: 10, \u0026#34;fptTime\u0026#34;: 10, \u0026#34;domReadyTime\u0026#34;: 10, \u0026#34;loadPageTime\u0026#34;: 10, \u0026#34;resTime\u0026#34;: 10, \u0026#34;sslTime\u0026#34;: 10, \u0026#34;ttlTime\u0026#34;: 10, \u0026#34;firstPackTime\u0026#34;: 10, \u0026#34;fmpTime\u0026#34;: 10 } OutPut:\nHttp Status: 204\nError Log Report Detailed information about data format can be found in BrowserPerf.proto.\nPOST http://localhost:12800/browser/errorLogs Send an error log object list in JSON format.\nInput:\n[ { \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b01\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; }, { \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b02\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; } ] OutPut:\nHttp Status: 204\nPOST http://localhost:12800/browser/errorLog Send a single error log object in JSON format.\nInput:\n{ \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b01\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; } OutPut:\nHttp Status: 204\n","excerpt":"HTTP API Protocol HTTP API Protocol defines the API data format, including API request and response …","ref":"/docs/main/v9.4.0/en/api/browser-http-api-protocol/","title":"HTTP API Protocol"},{"body":"HTTP API Protocol HTTP API Protocol defines the API data format, including API request and response data format. They use the HTTP1.1 wrapper of the official SkyWalking Browser Protocol. Read it for more details.\nPerformance Data Report Detailed information about data format can be found in BrowserPerf.proto.\nPOST http://localhost:12800/browser/perfData Send a performance data object in JSON format.\nInput:\n{ \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;redirectTime\u0026#34;: 10, \u0026#34;dnsTime\u0026#34;: 10, \u0026#34;ttfbTime\u0026#34;: 10, \u0026#34;tcpTime\u0026#34;: 10, \u0026#34;transTime\u0026#34;: 10, \u0026#34;domAnalysisTime\u0026#34;: 10, \u0026#34;fptTime\u0026#34;: 10, \u0026#34;domReadyTime\u0026#34;: 10, \u0026#34;loadPageTime\u0026#34;: 10, \u0026#34;resTime\u0026#34;: 10, \u0026#34;sslTime\u0026#34;: 10, \u0026#34;ttlTime\u0026#34;: 10, \u0026#34;firstPackTime\u0026#34;: 10, \u0026#34;fmpTime\u0026#34;: 10 } OutPut:\nHttp Status: 204\nError Log Report Detailed information about data format can be found in BrowserPerf.proto.\nPOST http://localhost:12800/browser/errorLogs Send an error log object list in JSON format.\nInput:\n[ { \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b01\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; }, { \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b02\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; } ] OutPut:\nHttp Status: 204\nPOST http://localhost:12800/browser/errorLog Send a single error log object in JSON format.\nInput:\n{ \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b01\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; } OutPut:\nHttp Status: 204\n","excerpt":"HTTP API Protocol HTTP API Protocol defines the API data format, including API request and response …","ref":"/docs/main/v9.5.0/en/api/browser-http-api-protocol/","title":"HTTP API Protocol"},{"body":"HTTP API Protocol HTTP API Protocol defines the API data format, including API request and response data format. They use the HTTP1.1 wrapper of the official SkyWalking Browser Protocol. Read it for more details.\nPerformance Data Report Detailed information about data format can be found in BrowserPerf.proto.\nPOST http://localhost:12800/browser/perfData Send a performance data object in JSON format.\nInput:\n{ \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;redirectTime\u0026#34;: 10, \u0026#34;dnsTime\u0026#34;: 10, \u0026#34;ttfbTime\u0026#34;: 10, \u0026#34;tcpTime\u0026#34;: 10, \u0026#34;transTime\u0026#34;: 10, \u0026#34;domAnalysisTime\u0026#34;: 10, \u0026#34;fptTime\u0026#34;: 10, \u0026#34;domReadyTime\u0026#34;: 10, \u0026#34;loadPageTime\u0026#34;: 10, \u0026#34;resTime\u0026#34;: 10, \u0026#34;sslTime\u0026#34;: 10, \u0026#34;ttlTime\u0026#34;: 10, \u0026#34;firstPackTime\u0026#34;: 10, \u0026#34;fmpTime\u0026#34;: 10 } OutPut:\nHttp Status: 204\nError Log Report Detailed information about data format can be found in BrowserPerf.proto.\nPOST http://localhost:12800/browser/errorLogs Send an error log object list in JSON format.\nInput:\n[ { \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b01\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; }, { \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b02\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; } ] OutPut:\nHttp Status: 204\nPOST http://localhost:12800/browser/errorLog Send a single error log object in JSON format.\nInput:\n{ \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b01\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; } OutPut:\nHttp Status: 204\n","excerpt":"HTTP API Protocol HTTP API Protocol defines the API data format, including API request and response …","ref":"/docs/main/v9.6.0/en/api/browser-http-api-protocol/","title":"HTTP API Protocol"},{"body":"HTTP API Protocol HTTP API Protocol defines the API data format, including API request and response data format. They use the HTTP1.1 wrapper of the official SkyWalking Browser Protocol. Read it for more details.\nPerformance Data Report Detailed information about data format can be found in BrowserPerf.proto.\nPOST http://localhost:12800/browser/perfData Send a performance data object in JSON format.\nInput:\n{ \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;redirectTime\u0026#34;: 10, \u0026#34;dnsTime\u0026#34;: 10, \u0026#34;ttfbTime\u0026#34;: 10, \u0026#34;tcpTime\u0026#34;: 10, \u0026#34;transTime\u0026#34;: 10, \u0026#34;domAnalysisTime\u0026#34;: 10, \u0026#34;fptTime\u0026#34;: 10, \u0026#34;domReadyTime\u0026#34;: 10, \u0026#34;loadPageTime\u0026#34;: 10, \u0026#34;resTime\u0026#34;: 10, \u0026#34;sslTime\u0026#34;: 10, \u0026#34;ttlTime\u0026#34;: 10, \u0026#34;firstPackTime\u0026#34;: 10, \u0026#34;fmpTime\u0026#34;: 10 } OutPut:\nHttp Status: 204\nError Log Report Detailed information about data format can be found in BrowserPerf.proto.\nPOST http://localhost:12800/browser/errorLogs Send an error log object list in JSON format.\nInput:\n[ { \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b01\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; }, { \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b02\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; } ] OutPut:\nHttp Status: 204\nPOST http://localhost:12800/browser/errorLog Send a single error log object in JSON format.\nInput:\n{ \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b01\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; } OutPut:\nHttp Status: 204\n","excerpt":"HTTP API Protocol HTTP API Protocol defines the API data format, including API request and response …","ref":"/docs/main/v9.7.0/en/api/browser-http-api-protocol/","title":"HTTP API Protocol"},{"body":"HTTP Restful URI recognition As introduced in the Group Parameterized Endpoints doc, HTTP Restful URIs are identified as endpoints. With some additional rules, we can identify the parameters in the URI and group the endpoints in case of annoying and huge size of endpoint candidates with low value of the metrics.\nIn the ML/AI specific fields, decision trees or neural networks can be trained on labeled URI data to automatically recognize and classify different URI patterns, as well as many other ways.\nIn this pipeline, OAP has the capabilities to cache the URI candidates with occurrence count, and push the data to 3rd party for further analysis. Then OAP would pull the analyzed results for processing the further telemetry traffic.\nSet up OAP to connect remote URI recognition server uriRecognitionServerAddr and uriRecognitionServerPort are the configurations to set up the remote URI recognition server.\nThe URI recognition server is a gRPC server, which is defined in URIRecognition.proto.\nservice HttpUriRecognitionService { // Sync for the pattern recognition dictionary.  rpc fetchAllPatterns(HttpUriRecognitionSyncRequest) returns (HttpUriRecognitionResponse) {} // Feed new raw data and matched patterns to the AI-server.  rpc feedRawData(HttpUriRecognitionRequest) returns (google.protobuf.Empty) {}} fetchAllPatterns service  fetchAllPatterns is up and running in 1 minute period from every OAP to fetch all recognized patterns from the remote server.\n feedRawData service  feedRawData is running in 25-30 minutes period to push the raw data to the remote server for training.\nConfigurations  core/maxHttpUrisNumberPerService The max number of HTTP URIs per service for further URI pattern recognition. core/syncPeriodHttpUriRecognitionPattern The period of HTTP URI pattern recognition(feedRawData). Unit is second, 10s by default. core/trainingPeriodHttpUriRecognitionPattern The training period of HTTP URI pattern recognition(fetchAllPatterns). Unit is second, 60s by default.  Optional Server Implementation R3 RESTful Pattern Recognition(R3) is an Apache 2.0 licensed implementation for the URI recognition, and natively supports URIRecognition.proto defined in OAP.\n","excerpt":"HTTP Restful URI recognition As introduced in the Group Parameterized Endpoints doc, HTTP Restful …","ref":"/docs/main/latest/en/setup/ai-pipeline/http-restful-uri-pattern/","title":"HTTP Restful URI recognition"},{"body":"HTTP Restful URI recognition As introduced in the Group Parameterized Endpoints doc, HTTP Restful URIs are identified as endpoints. With some additional rules, we can identify the parameters in the URI and group the endpoints in case of annoying and huge size of endpoint candidates with low value of the metrics.\nIn the ML/AI specific fields, decision trees or neural networks can be trained on labeled URI data to automatically recognize and classify different URI patterns, as well as many other ways.\nIn this pipeline, OAP has the capabilities to cache the URI candidates with occurrence count, and push the data to 3rd party for further analysis. Then OAP would pull the analyzed results for processing the further telemetry traffic.\nSet up OAP to connect remote URI recognition server uriRecognitionServerAddr and uriRecognitionServerPort are the configurations to set up the remote URI recognition server.\nThe URI recognition server is a gRPC server, which is defined in URIRecognition.proto.\nservice HttpUriRecognitionService { // Sync for the pattern recognition dictionary.  rpc fetchAllPatterns(HttpUriRecognitionSyncRequest) returns (HttpUriRecognitionResponse) {} // Feed new raw data and matched patterns to the AI-server.  rpc feedRawData(HttpUriRecognitionRequest) returns (google.protobuf.Empty) {}} fetchAllPatterns service  fetchAllPatterns is up and running in 1 minute period from every OAP to fetch all recognized patterns from the remote server.\n feedRawData service  feedRawData is running in 25-30 minutes period to push the raw data to the remote server for training.\nConfigurations  core/maxHttpUrisNumberPerService The max number of HTTP URIs per service for further URI pattern recognition. core/syncPeriodHttpUriRecognitionPattern The period of HTTP URI pattern recognition(feedRawData). Unit is second, 10s by default. core/trainingPeriodHttpUriRecognitionPattern The training period of HTTP URI pattern recognition(fetchAllPatterns). Unit is second, 60s by default.  Optional Server Implementation R3 RESTful Pattern Recognition(R3) is an Apache 2.0 licensed implementation for the URI recognition, and natively supports URIRecognition.proto defined in OAP.\n","excerpt":"HTTP Restful URI recognition As introduced in the Group Parameterized Endpoints doc, HTTP Restful …","ref":"/docs/main/next/en/setup/ai-pipeline/http-restful-uri-pattern/","title":"HTTP Restful URI recognition"},{"body":"HTTP Restful URI recognition As introduced in the Group Parameterized Endpoints doc, HTTP Restful URIs are identified as endpoints. With some additional rules, we can identify the parameters in the URI and group the endpoints in case of annoying and huge size of endpoint candidates with low value of the metrics.\nIn the ML/AI specific fields, decision trees or neural networks can be trained on labeled URI data to automatically recognize and classify different URI patterns, as well as many other ways.\nIn this pipeline, OAP has the capabilities to cache the URI candidates with occurrence count, and push the data to 3rd party for further analysis. Then OAP would pull the analyzed results for processing the further telemetry traffic.\nSet up OAP to connect remote URI recognition server uriRecognitionServerAddr and uriRecognitionServerPort are the configurations to set up the remote URI recognition server.\nThe URI recognition server is a gRPC server, which is defined in URIRecognition.proto.\nservice HttpUriRecognitionService { // Sync for the pattern recognition dictionary.  rpc fetchAllPatterns(HttpUriRecognitionSyncRequest) returns (HttpUriRecognitionResponse) {} // Feed new raw data and matched patterns to the AI-server.  rpc feedRawData(HttpUriRecognitionRequest) returns (google.protobuf.Empty) {}} fetchAllPatterns service  fetchAllPatterns is up and running in 1 minute period from every OAP to fetch all recognized patterns from the remote server.\n feedRawData service  feedRawData is running in 25-30 minutes period to push the raw data to the remote server for training.\nConfigurations  core/maxHttpUrisNumberPerService The max number of HTTP URIs per service for further URI pattern recognition. No configuration to set periods of feedRawData and fetchAllPatterns services.  Optional Server Implementation R3 RESTful Pattern Recognition(R3) is an Apache 2.0 licensed implementation for the URI recognition, and natively supports URIRecognition.proto defined in OAP.\n","excerpt":"HTTP Restful URI recognition As introduced in the Group Parameterized Endpoints doc, HTTP Restful …","ref":"/docs/main/v9.5.0/en/setup/ai-pipeline/http-restful-uri-pattern/","title":"HTTP Restful URI recognition"},{"body":"HTTP Restful URI recognition As introduced in the Group Parameterized Endpoints doc, HTTP Restful URIs are identified as endpoints. With some additional rules, we can identify the parameters in the URI and group the endpoints in case of annoying and huge size of endpoint candidates with low value of the metrics.\nIn the ML/AI specific fields, decision trees or neural networks can be trained on labeled URI data to automatically recognize and classify different URI patterns, as well as many other ways.\nIn this pipeline, OAP has the capabilities to cache the URI candidates with occurrence count, and push the data to 3rd party for further analysis. Then OAP would pull the analyzed results for processing the further telemetry traffic.\nSet up OAP to connect remote URI recognition server uriRecognitionServerAddr and uriRecognitionServerPort are the configurations to set up the remote URI recognition server.\nThe URI recognition server is a gRPC server, which is defined in URIRecognition.proto.\nservice HttpUriRecognitionService { // Sync for the pattern recognition dictionary.  rpc fetchAllPatterns(HttpUriRecognitionSyncRequest) returns (HttpUriRecognitionResponse) {} // Feed new raw data and matched patterns to the AI-server.  rpc feedRawData(HttpUriRecognitionRequest) returns (google.protobuf.Empty) {}} fetchAllPatterns service  fetchAllPatterns is up and running in 1 minute period from every OAP to fetch all recognized patterns from the remote server.\n feedRawData service  feedRawData is running in 25-30 minutes period to push the raw data to the remote server for training.\nConfigurations  core/maxHttpUrisNumberPerService The max number of HTTP URIs per service for further URI pattern recognition. core/syncPeriodHttpUriRecognitionPattern The period of HTTP URI pattern recognition(feedRawData). Unit is second, 10s by default. core/trainingPeriodHttpUriRecognitionPattern The training period of HTTP URI pattern recognition(fetchAllPatterns). Unit is second, 60s by default.  Optional Server Implementation R3 RESTful Pattern Recognition(R3) is an Apache 2.0 licensed implementation for the URI recognition, and natively supports URIRecognition.proto defined in OAP.\n","excerpt":"HTTP Restful URI recognition As introduced in the Group Parameterized Endpoints doc, HTTP Restful …","ref":"/docs/main/v9.6.0/en/setup/ai-pipeline/http-restful-uri-pattern/","title":"HTTP Restful URI recognition"},{"body":"HTTP Restful URI recognition As introduced in the Group Parameterized Endpoints doc, HTTP Restful URIs are identified as endpoints. With some additional rules, we can identify the parameters in the URI and group the endpoints in case of annoying and huge size of endpoint candidates with low value of the metrics.\nIn the ML/AI specific fields, decision trees or neural networks can be trained on labeled URI data to automatically recognize and classify different URI patterns, as well as many other ways.\nIn this pipeline, OAP has the capabilities to cache the URI candidates with occurrence count, and push the data to 3rd party for further analysis. Then OAP would pull the analyzed results for processing the further telemetry traffic.\nSet up OAP to connect remote URI recognition server uriRecognitionServerAddr and uriRecognitionServerPort are the configurations to set up the remote URI recognition server.\nThe URI recognition server is a gRPC server, which is defined in URIRecognition.proto.\nservice HttpUriRecognitionService { // Sync for the pattern recognition dictionary.  rpc fetchAllPatterns(HttpUriRecognitionSyncRequest) returns (HttpUriRecognitionResponse) {} // Feed new raw data and matched patterns to the AI-server.  rpc feedRawData(HttpUriRecognitionRequest) returns (google.protobuf.Empty) {}} fetchAllPatterns service  fetchAllPatterns is up and running in 1 minute period from every OAP to fetch all recognized patterns from the remote server.\n feedRawData service  feedRawData is running in 25-30 minutes period to push the raw data to the remote server for training.\nConfigurations  core/maxHttpUrisNumberPerService The max number of HTTP URIs per service for further URI pattern recognition. core/syncPeriodHttpUriRecognitionPattern The period of HTTP URI pattern recognition(feedRawData). Unit is second, 10s by default. core/trainingPeriodHttpUriRecognitionPattern The training period of HTTP URI pattern recognition(fetchAllPatterns). Unit is second, 60s by default.  Optional Server Implementation R3 RESTful Pattern Recognition(R3) is an Apache 2.0 licensed implementation for the URI recognition, and natively supports URIRecognition.proto defined in OAP.\n","excerpt":"HTTP Restful URI recognition As introduced in the Group Parameterized Endpoints doc, HTTP Restful …","ref":"/docs/main/v9.7.0/en/setup/ai-pipeline/http-restful-uri-pattern/","title":"HTTP Restful URI recognition"},{"body":"Hybrid Compilation Hybrid compilation technology is the base of SkyWalking Go\u0026rsquo;s implementation.\nIt utilizes the -toolexec flag during Golang compilation to introduce custom programs that intercept all original files in the compilation stage. This allows for the modification or addition of files to be completed seamlessly.\nToolchain in Golang The -toolexec flag in Golang is a powerful feature that can be used during stages such as build, test, and others. When this flag is used, developers can provide a custom program or script to replace the default go tools functionality. This offers greater flexibility and control over the build, test, or analysis processes.\nWhen passing this flag during a go build, it can intercept the execution flow of commands such as compile, asm, and link, which are required during Golang\u0026rsquo;s compilation process. These commands are also referred to as the toolchain within Golang.\nInformation about the Toolchain The following command demonstrates the parameter information for the specified -toolexec program when it is invoked:\n/usr/bin/skywalking-go /usr/local/opt/go/libexec/pkg/tool/darwin_amd64/compile -o /var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build452071603/b011/_pkg_.a -trimpath /var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build452071603/b011=\u0026gt; -p runtime -std -+ -buildid zSeDyjJh0lgXlIqBZScI/zSeDyjJh0lgXlIqBZScI -goversion go1.19.2 -symabis /var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build452071603/b011/symabis -c=4 -nolocalimports -importcfg /var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build452071603/b011/importcfg -pack -asmhdr /var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build452071603/b011/go_asm.h /usr/local/opt/go/libexec/src/runtime/alg.go /usr/local/opt/go/libexec/src/runtime/asan0.go ... The code above demonstrates the parameters used when a custom program is executed, which mainly includes the following information:\n Current toolchain tool: In this example, it is a compilation tool with the path: /usr/local/opt/go/libexec/pkg/tool/darwin_amd64/compile. Target file of the tool: The final target file that the current tool needs to generate. Package information: The module package path information being compiled, which is the parameter value of the -p flag. The current package path is runtime. Temporary directory address: For each compilation, the Go program would generate a corresponding temporary directory. This directory contains all the temporary files required for the compilation. Files to be compiled: Many .go file paths can be seen at the end of the command, which are the file path list of the module that needs to be compiled.  Toolchain with SkyWalking Go Agent SkyWalking Go Agent works by intercepting the compile program through the toolchain and making changes to the program based on the information above. The main parts include:\n AST: Using AST to parse and manipulate the codes. File copying/generation: Copy or generate files to the temporary directory required for the compilation, and add file path addresses when the compilation command is executed. Proxy command execution: After completing the modification of the specified package, the new codes are weaved into the target.  Hybrid Compilation After enhancing the program with SkyWalking Go Agent, the following parts of the program will be enhanced:\n SkyWalking Go: The agent core part of the code would be dynamically copied to the agent path for plugin use. Plugins: Enhance the specified framework code according to the enhancement rules of the plugins. Runtime: Enhance the runtime package in Go, including extensions for goroutines and other content. Main: Enhance the main package during system startup, for stating the system with Agent.  ","excerpt":"Hybrid Compilation Hybrid compilation technology is the base of SkyWalking Go\u0026rsquo;s …","ref":"/docs/skywalking-go/latest/en/concepts-and-designs/hybrid-compilation/","title":"Hybrid Compilation"},{"body":"Hybrid Compilation Hybrid compilation technology is the base of SkyWalking Go\u0026rsquo;s implementation.\nIt utilizes the -toolexec flag during Golang compilation to introduce custom programs that intercept all original files in the compilation stage. This allows for the modification or addition of files to be completed seamlessly.\nToolchain in Golang The -toolexec flag in Golang is a powerful feature that can be used during stages such as build, test, and others. When this flag is used, developers can provide a custom program or script to replace the default go tools functionality. This offers greater flexibility and control over the build, test, or analysis processes.\nWhen passing this flag during a go build, it can intercept the execution flow of commands such as compile, asm, and link, which are required during Golang\u0026rsquo;s compilation process. These commands are also referred to as the toolchain within Golang.\nInformation about the Toolchain The following command demonstrates the parameter information for the specified -toolexec program when it is invoked:\n/usr/bin/skywalking-go /usr/local/opt/go/libexec/pkg/tool/darwin_amd64/compile -o /var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build452071603/b011/_pkg_.a -trimpath /var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build452071603/b011=\u0026gt; -p runtime -std -+ -buildid zSeDyjJh0lgXlIqBZScI/zSeDyjJh0lgXlIqBZScI -goversion go1.19.2 -symabis /var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build452071603/b011/symabis -c=4 -nolocalimports -importcfg /var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build452071603/b011/importcfg -pack -asmhdr /var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build452071603/b011/go_asm.h /usr/local/opt/go/libexec/src/runtime/alg.go /usr/local/opt/go/libexec/src/runtime/asan0.go ... The code above demonstrates the parameters used when a custom program is executed, which mainly includes the following information:\n Current toolchain tool: In this example, it is a compilation tool with the path: /usr/local/opt/go/libexec/pkg/tool/darwin_amd64/compile. Target file of the tool: The final target file that the current tool needs to generate. Package information: The module package path information being compiled, which is the parameter value of the -p flag. The current package path is runtime. Temporary directory address: For each compilation, the Go program would generate a corresponding temporary directory. This directory contains all the temporary files required for the compilation. Files to be compiled: Many .go file paths can be seen at the end of the command, which are the file path list of the module that needs to be compiled.  Toolchain with SkyWalking Go Agent SkyWalking Go Agent works by intercepting the compile program through the toolchain and making changes to the program based on the information above. The main parts include:\n AST: Using AST to parse and manipulate the codes. File copying/generation: Copy or generate files to the temporary directory required for the compilation, and add file path addresses when the compilation command is executed. Proxy command execution: After completing the modification of the specified package, the new codes are weaved into the target.  Hybrid Compilation After enhancing the program with SkyWalking Go Agent, the following parts of the program will be enhanced:\n SkyWalking Go: The agent core part of the code would be dynamically copied to the agent path for plugin use. Plugins: Enhance the specified framework code according to the enhancement rules of the plugins. Runtime: Enhance the runtime package in Go, including extensions for goroutines and other content. Main: Enhance the main package during system startup, for stating the system with Agent.  ","excerpt":"Hybrid Compilation Hybrid compilation technology is the base of SkyWalking Go\u0026rsquo;s …","ref":"/docs/skywalking-go/next/en/concepts-and-designs/hybrid-compilation/","title":"Hybrid Compilation"},{"body":"Hybrid Compilation Hybrid compilation technology is the base of SkyWalking Go\u0026rsquo;s implementation.\nIt utilizes the -toolexec flag during Golang compilation to introduce custom programs that intercept all original files in the compilation stage. This allows for the modification or addition of files to be completed seamlessly.\nToolchain in Golang The -toolexec flag in Golang is a powerful feature that can be used during stages such as build, test, and others. When this flag is used, developers can provide a custom program or script to replace the default go tools functionality. This offers greater flexibility and control over the build, test, or analysis processes.\nWhen passing this flag during a go build, it can intercept the execution flow of commands such as compile, asm, and link, which are required during Golang\u0026rsquo;s compilation process. These commands are also referred to as the toolchain within Golang.\nInformation about the Toolchain The following command demonstrates the parameter information for the specified -toolexec program when it is invoked:\n/usr/bin/skywalking-go /usr/local/opt/go/libexec/pkg/tool/darwin_amd64/compile -o /var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build452071603/b011/_pkg_.a -trimpath /var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build452071603/b011=\u0026gt; -p runtime -std -+ -buildid zSeDyjJh0lgXlIqBZScI/zSeDyjJh0lgXlIqBZScI -goversion go1.19.2 -symabis /var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build452071603/b011/symabis -c=4 -nolocalimports -importcfg /var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build452071603/b011/importcfg -pack -asmhdr /var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build452071603/b011/go_asm.h /usr/local/opt/go/libexec/src/runtime/alg.go /usr/local/opt/go/libexec/src/runtime/asan0.go ... The code above demonstrates the parameters used when a custom program is executed, which mainly includes the following information:\n Current toolchain tool: In this example, it is a compilation tool with the path: /usr/local/opt/go/libexec/pkg/tool/darwin_amd64/compile. Target file of the tool: The final target file that the current tool needs to generate. Package information: The module package path information being compiled, which is the parameter value of the -p flag. The current package path is runtime. Temporary directory address: For each compilation, the Go program would generate a corresponding temporary directory. This directory contains all the temporary files required for the compilation. Files to be compiled: Many .go file paths can be seen at the end of the command, which are the file path list of the module that needs to be compiled.  Toolchain with SkyWalking Go Agent SkyWalking Go Agent works by intercepting the compile program through the toolchain and making changes to the program based on the information above. The main parts include:\n AST: Using AST to parse and manipulate the codes. File copying/generation: Copy or generate files to the temporary directory required for the compilation, and add file path addresses when the compilation command is executed. Proxy command execution: After completing the modification of the specified package, the new codes are weaved into the target.  Hybrid Compilation After enhancing the program with SkyWalking Go Agent, the following parts of the program will be enhanced:\n SkyWalking Go: The agent core part of the code would be dynamically copied to the agent path for plugin use. Plugins: Enhance the specified framework code according to the enhancement rules of the plugins. Runtime: Enhance the runtime package in Go, including extensions for goroutines and other content. Main: Enhance the main package during system startup, for stating the system with Agent.  ","excerpt":"Hybrid Compilation Hybrid compilation technology is the base of SkyWalking Go\u0026rsquo;s …","ref":"/docs/skywalking-go/v0.4.0/en/concepts-and-designs/hybrid-compilation/","title":"Hybrid Compilation"},{"body":"IllegalStateException when installing Java agent on WebSphere This issue was found in our community discussion and feedback. A user installed the SkyWalking Java agent on WebSphere 7.0.0.11 and ibm jdk 1.8_20160719 and 1.7.0_20150407, and experienced the following error logs:\nWARN 2019-05-09 17:01:35:905 SkywalkingAgent-1-GRPCChannelManager-0 ProtectiveShieldMatcher : Byte-buddy occurs exception when match type. java.lang.IllegalStateException: Cannot resolve type description for java.security.PrivilegedAction at org.apache.skywalking.apm.dependencies.net.bytebuddy.pool.TypePool$Resolution$Illegal.resolve(TypePool.java:144) at org.apache.skywalking.apm.dependencies.net.bytebuddy.pool.TypePool$Default$WithLazyResolution$LazyTypeDescription.delegate(TypePool.java:1392) at org.apache.skywalking.apm.dependencies.net.bytebuddy.description.type.TypeDescription$AbstractBase$OfSimpleType$WithDelegation.getInterfaces(TypeDescription.java:8016) at org.apache.skywalking.apm.dependencies.net.bytebuddy.description.type.TypeDescription$Generic$OfNonGenericType.getInterfaces(TypeDescription.java:3621) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.hasInterface(HasSuperTypeMatcher.java:53) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.hasInterface(HasSuperTypeMatcher.java:54) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.matches(HasSuperTypeMatcher.java:38) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.matches(HasSuperTypeMatcher.java:15) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Conjunction.matches(ElementMatcher.java:107) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) ... The exception occurred because access grant was required in WebSphere. Simply follow these steps:\n Set the agent\u0026rsquo;s owner to the owner of WebSphere. Add \u0026ldquo;grant codeBase \u0026ldquo;file:${agent_dir}/-\u0026rdquo; { permission java.security.AllPermission; };\u0026rdquo; in the file of \u0026ldquo;server.policy\u0026rdquo;.  ","excerpt":"IllegalStateException when installing Java agent on WebSphere This issue was found in our community …","ref":"/docs/main/latest/en/faq/install_agent_on_websphere/","title":"IllegalStateException when installing Java agent on WebSphere"},{"body":"IllegalStateException when installing Java agent on WebSphere This issue was found in our community discussion and feedback. A user installed the SkyWalking Java agent on WebSphere 7.0.0.11 and ibm jdk 1.8_20160719 and 1.7.0_20150407, and experienced the following error logs:\nWARN 2019-05-09 17:01:35:905 SkywalkingAgent-1-GRPCChannelManager-0 ProtectiveShieldMatcher : Byte-buddy occurs exception when match type. java.lang.IllegalStateException: Cannot resolve type description for java.security.PrivilegedAction at org.apache.skywalking.apm.dependencies.net.bytebuddy.pool.TypePool$Resolution$Illegal.resolve(TypePool.java:144) at org.apache.skywalking.apm.dependencies.net.bytebuddy.pool.TypePool$Default$WithLazyResolution$LazyTypeDescription.delegate(TypePool.java:1392) at org.apache.skywalking.apm.dependencies.net.bytebuddy.description.type.TypeDescription$AbstractBase$OfSimpleType$WithDelegation.getInterfaces(TypeDescription.java:8016) at org.apache.skywalking.apm.dependencies.net.bytebuddy.description.type.TypeDescription$Generic$OfNonGenericType.getInterfaces(TypeDescription.java:3621) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.hasInterface(HasSuperTypeMatcher.java:53) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.hasInterface(HasSuperTypeMatcher.java:54) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.matches(HasSuperTypeMatcher.java:38) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.matches(HasSuperTypeMatcher.java:15) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Conjunction.matches(ElementMatcher.java:107) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) ... The exception occurred because access grant was required in WebSphere. Simply follow these steps:\n Set the agent\u0026rsquo;s owner to the owner of WebSphere. Add \u0026ldquo;grant codeBase \u0026ldquo;file:${agent_dir}/-\u0026rdquo; { permission java.security.AllPermission; };\u0026rdquo; in the file of \u0026ldquo;server.policy\u0026rdquo;.  ","excerpt":"IllegalStateException when installing Java agent on WebSphere This issue was found in our community …","ref":"/docs/main/next/en/faq/install_agent_on_websphere/","title":"IllegalStateException when installing Java agent on WebSphere"},{"body":"IllegalStateException when installing Java agent on WebSphere This issue was found in our community discussion and feedback. A user installed the SkyWalking Java agent on WebSphere 7.0.0.11 and ibm jdk 1.8_20160719 and 1.7.0_20150407, and experienced the following error logs:\nWARN 2019-05-09 17:01:35:905 SkywalkingAgent-1-GRPCChannelManager-0 ProtectiveShieldMatcher : Byte-buddy occurs exception when match type. java.lang.IllegalStateException: Cannot resolve type description for java.security.PrivilegedAction at org.apache.skywalking.apm.dependencies.net.bytebuddy.pool.TypePool$Resolution$Illegal.resolve(TypePool.java:144) at org.apache.skywalking.apm.dependencies.net.bytebuddy.pool.TypePool$Default$WithLazyResolution$LazyTypeDescription.delegate(TypePool.java:1392) at org.apache.skywalking.apm.dependencies.net.bytebuddy.description.type.TypeDescription$AbstractBase$OfSimpleType$WithDelegation.getInterfaces(TypeDescription.java:8016) at org.apache.skywalking.apm.dependencies.net.bytebuddy.description.type.TypeDescription$Generic$OfNonGenericType.getInterfaces(TypeDescription.java:3621) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.hasInterface(HasSuperTypeMatcher.java:53) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.hasInterface(HasSuperTypeMatcher.java:54) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.matches(HasSuperTypeMatcher.java:38) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.matches(HasSuperTypeMatcher.java:15) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Conjunction.matches(ElementMatcher.java:107) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) ... The exception occured because access grant was required in WebSphere. Simply follow these steps:\n Set the agent\u0026rsquo;s owner to the owner of WebSphere. Add \u0026ldquo;grant codeBase \u0026ldquo;file:${agent_dir}/-\u0026rdquo; { permission java.security.AllPermission; };\u0026rdquo; in the file of \u0026ldquo;server.policy\u0026rdquo;.  ","excerpt":"IllegalStateException when installing Java agent on WebSphere This issue was found in our community …","ref":"/docs/main/v9.0.0/en/faq/install_agent_on_websphere/","title":"IllegalStateException when installing Java agent on WebSphere"},{"body":"IllegalStateException when installing Java agent on WebSphere This issue was found in our community discussion and feedback. A user installed the SkyWalking Java agent on WebSphere 7.0.0.11 and ibm jdk 1.8_20160719 and 1.7.0_20150407, and experienced the following error logs:\nWARN 2019-05-09 17:01:35:905 SkywalkingAgent-1-GRPCChannelManager-0 ProtectiveShieldMatcher : Byte-buddy occurs exception when match type. java.lang.IllegalStateException: Cannot resolve type description for java.security.PrivilegedAction at org.apache.skywalking.apm.dependencies.net.bytebuddy.pool.TypePool$Resolution$Illegal.resolve(TypePool.java:144) at org.apache.skywalking.apm.dependencies.net.bytebuddy.pool.TypePool$Default$WithLazyResolution$LazyTypeDescription.delegate(TypePool.java:1392) at org.apache.skywalking.apm.dependencies.net.bytebuddy.description.type.TypeDescription$AbstractBase$OfSimpleType$WithDelegation.getInterfaces(TypeDescription.java:8016) at org.apache.skywalking.apm.dependencies.net.bytebuddy.description.type.TypeDescription$Generic$OfNonGenericType.getInterfaces(TypeDescription.java:3621) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.hasInterface(HasSuperTypeMatcher.java:53) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.hasInterface(HasSuperTypeMatcher.java:54) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.matches(HasSuperTypeMatcher.java:38) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.matches(HasSuperTypeMatcher.java:15) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Conjunction.matches(ElementMatcher.java:107) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) ... The exception occured because access grant was required in WebSphere. Simply follow these steps:\n Set the agent\u0026rsquo;s owner to the owner of WebSphere. Add \u0026ldquo;grant codeBase \u0026ldquo;file:${agent_dir}/-\u0026rdquo; { permission java.security.AllPermission; };\u0026rdquo; in the file of \u0026ldquo;server.policy\u0026rdquo;.  ","excerpt":"IllegalStateException when installing Java agent on WebSphere This issue was found in our community …","ref":"/docs/main/v9.1.0/en/faq/install_agent_on_websphere/","title":"IllegalStateException when installing Java agent on WebSphere"},{"body":"IllegalStateException when installing Java agent on WebSphere This issue was found in our community discussion and feedback. A user installed the SkyWalking Java agent on WebSphere 7.0.0.11 and ibm jdk 1.8_20160719 and 1.7.0_20150407, and experienced the following error logs:\nWARN 2019-05-09 17:01:35:905 SkywalkingAgent-1-GRPCChannelManager-0 ProtectiveShieldMatcher : Byte-buddy occurs exception when match type. java.lang.IllegalStateException: Cannot resolve type description for java.security.PrivilegedAction at org.apache.skywalking.apm.dependencies.net.bytebuddy.pool.TypePool$Resolution$Illegal.resolve(TypePool.java:144) at org.apache.skywalking.apm.dependencies.net.bytebuddy.pool.TypePool$Default$WithLazyResolution$LazyTypeDescription.delegate(TypePool.java:1392) at org.apache.skywalking.apm.dependencies.net.bytebuddy.description.type.TypeDescription$AbstractBase$OfSimpleType$WithDelegation.getInterfaces(TypeDescription.java:8016) at org.apache.skywalking.apm.dependencies.net.bytebuddy.description.type.TypeDescription$Generic$OfNonGenericType.getInterfaces(TypeDescription.java:3621) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.hasInterface(HasSuperTypeMatcher.java:53) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.hasInterface(HasSuperTypeMatcher.java:54) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.matches(HasSuperTypeMatcher.java:38) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.matches(HasSuperTypeMatcher.java:15) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Conjunction.matches(ElementMatcher.java:107) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) ... The exception occurred because access grant was required in WebSphere. Simply follow these steps:\n Set the agent\u0026rsquo;s owner to the owner of WebSphere. Add \u0026ldquo;grant codeBase \u0026ldquo;file:${agent_dir}/-\u0026rdquo; { permission java.security.AllPermission; };\u0026rdquo; in the file of \u0026ldquo;server.policy\u0026rdquo;.  ","excerpt":"IllegalStateException when installing Java agent on WebSphere This issue was found in our community …","ref":"/docs/main/v9.2.0/en/faq/install_agent_on_websphere/","title":"IllegalStateException when installing Java agent on WebSphere"},{"body":"IllegalStateException when installing Java agent on WebSphere This issue was found in our community discussion and feedback. A user installed the SkyWalking Java agent on WebSphere 7.0.0.11 and ibm jdk 1.8_20160719 and 1.7.0_20150407, and experienced the following error logs:\nWARN 2019-05-09 17:01:35:905 SkywalkingAgent-1-GRPCChannelManager-0 ProtectiveShieldMatcher : Byte-buddy occurs exception when match type. java.lang.IllegalStateException: Cannot resolve type description for java.security.PrivilegedAction at org.apache.skywalking.apm.dependencies.net.bytebuddy.pool.TypePool$Resolution$Illegal.resolve(TypePool.java:144) at org.apache.skywalking.apm.dependencies.net.bytebuddy.pool.TypePool$Default$WithLazyResolution$LazyTypeDescription.delegate(TypePool.java:1392) at org.apache.skywalking.apm.dependencies.net.bytebuddy.description.type.TypeDescription$AbstractBase$OfSimpleType$WithDelegation.getInterfaces(TypeDescription.java:8016) at org.apache.skywalking.apm.dependencies.net.bytebuddy.description.type.TypeDescription$Generic$OfNonGenericType.getInterfaces(TypeDescription.java:3621) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.hasInterface(HasSuperTypeMatcher.java:53) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.hasInterface(HasSuperTypeMatcher.java:54) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.matches(HasSuperTypeMatcher.java:38) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.matches(HasSuperTypeMatcher.java:15) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Conjunction.matches(ElementMatcher.java:107) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) ... The exception occurred because access grant was required in WebSphere. Simply follow these steps:\n Set the agent\u0026rsquo;s owner to the owner of WebSphere. Add \u0026ldquo;grant codeBase \u0026ldquo;file:${agent_dir}/-\u0026rdquo; { permission java.security.AllPermission; };\u0026rdquo; in the file of \u0026ldquo;server.policy\u0026rdquo;.  ","excerpt":"IllegalStateException when installing Java agent on WebSphere This issue was found in our community …","ref":"/docs/main/v9.3.0/en/faq/install_agent_on_websphere/","title":"IllegalStateException when installing Java agent on WebSphere"},{"body":"IllegalStateException when installing Java agent on WebSphere This issue was found in our community discussion and feedback. A user installed the SkyWalking Java agent on WebSphere 7.0.0.11 and ibm jdk 1.8_20160719 and 1.7.0_20150407, and experienced the following error logs:\nWARN 2019-05-09 17:01:35:905 SkywalkingAgent-1-GRPCChannelManager-0 ProtectiveShieldMatcher : Byte-buddy occurs exception when match type. java.lang.IllegalStateException: Cannot resolve type description for java.security.PrivilegedAction at org.apache.skywalking.apm.dependencies.net.bytebuddy.pool.TypePool$Resolution$Illegal.resolve(TypePool.java:144) at org.apache.skywalking.apm.dependencies.net.bytebuddy.pool.TypePool$Default$WithLazyResolution$LazyTypeDescription.delegate(TypePool.java:1392) at org.apache.skywalking.apm.dependencies.net.bytebuddy.description.type.TypeDescription$AbstractBase$OfSimpleType$WithDelegation.getInterfaces(TypeDescription.java:8016) at org.apache.skywalking.apm.dependencies.net.bytebuddy.description.type.TypeDescription$Generic$OfNonGenericType.getInterfaces(TypeDescription.java:3621) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.hasInterface(HasSuperTypeMatcher.java:53) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.hasInterface(HasSuperTypeMatcher.java:54) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.matches(HasSuperTypeMatcher.java:38) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.matches(HasSuperTypeMatcher.java:15) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Conjunction.matches(ElementMatcher.java:107) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) ... The exception occurred because access grant was required in WebSphere. Simply follow these steps:\n Set the agent\u0026rsquo;s owner to the owner of WebSphere. Add \u0026ldquo;grant codeBase \u0026ldquo;file:${agent_dir}/-\u0026rdquo; { permission java.security.AllPermission; };\u0026rdquo; in the file of \u0026ldquo;server.policy\u0026rdquo;.  ","excerpt":"IllegalStateException when installing Java agent on WebSphere This issue was found in our community …","ref":"/docs/main/v9.4.0/en/faq/install_agent_on_websphere/","title":"IllegalStateException when installing Java agent on WebSphere"},{"body":"IllegalStateException when installing Java agent on WebSphere This issue was found in our community discussion and feedback. A user installed the SkyWalking Java agent on WebSphere 7.0.0.11 and ibm jdk 1.8_20160719 and 1.7.0_20150407, and experienced the following error logs:\nWARN 2019-05-09 17:01:35:905 SkywalkingAgent-1-GRPCChannelManager-0 ProtectiveShieldMatcher : Byte-buddy occurs exception when match type. java.lang.IllegalStateException: Cannot resolve type description for java.security.PrivilegedAction at org.apache.skywalking.apm.dependencies.net.bytebuddy.pool.TypePool$Resolution$Illegal.resolve(TypePool.java:144) at org.apache.skywalking.apm.dependencies.net.bytebuddy.pool.TypePool$Default$WithLazyResolution$LazyTypeDescription.delegate(TypePool.java:1392) at org.apache.skywalking.apm.dependencies.net.bytebuddy.description.type.TypeDescription$AbstractBase$OfSimpleType$WithDelegation.getInterfaces(TypeDescription.java:8016) at org.apache.skywalking.apm.dependencies.net.bytebuddy.description.type.TypeDescription$Generic$OfNonGenericType.getInterfaces(TypeDescription.java:3621) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.hasInterface(HasSuperTypeMatcher.java:53) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.hasInterface(HasSuperTypeMatcher.java:54) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.matches(HasSuperTypeMatcher.java:38) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.matches(HasSuperTypeMatcher.java:15) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Conjunction.matches(ElementMatcher.java:107) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) ... The exception occurred because access grant was required in WebSphere. Simply follow these steps:\n Set the agent\u0026rsquo;s owner to the owner of WebSphere. Add \u0026ldquo;grant codeBase \u0026ldquo;file:${agent_dir}/-\u0026rdquo; { permission java.security.AllPermission; };\u0026rdquo; in the file of \u0026ldquo;server.policy\u0026rdquo;.  ","excerpt":"IllegalStateException when installing Java agent on WebSphere This issue was found in our community …","ref":"/docs/main/v9.5.0/en/faq/install_agent_on_websphere/","title":"IllegalStateException when installing Java agent on WebSphere"},{"body":"IllegalStateException when installing Java agent on WebSphere This issue was found in our community discussion and feedback. A user installed the SkyWalking Java agent on WebSphere 7.0.0.11 and ibm jdk 1.8_20160719 and 1.7.0_20150407, and experienced the following error logs:\nWARN 2019-05-09 17:01:35:905 SkywalkingAgent-1-GRPCChannelManager-0 ProtectiveShieldMatcher : Byte-buddy occurs exception when match type. java.lang.IllegalStateException: Cannot resolve type description for java.security.PrivilegedAction at org.apache.skywalking.apm.dependencies.net.bytebuddy.pool.TypePool$Resolution$Illegal.resolve(TypePool.java:144) at org.apache.skywalking.apm.dependencies.net.bytebuddy.pool.TypePool$Default$WithLazyResolution$LazyTypeDescription.delegate(TypePool.java:1392) at org.apache.skywalking.apm.dependencies.net.bytebuddy.description.type.TypeDescription$AbstractBase$OfSimpleType$WithDelegation.getInterfaces(TypeDescription.java:8016) at org.apache.skywalking.apm.dependencies.net.bytebuddy.description.type.TypeDescription$Generic$OfNonGenericType.getInterfaces(TypeDescription.java:3621) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.hasInterface(HasSuperTypeMatcher.java:53) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.hasInterface(HasSuperTypeMatcher.java:54) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.matches(HasSuperTypeMatcher.java:38) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.matches(HasSuperTypeMatcher.java:15) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Conjunction.matches(ElementMatcher.java:107) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) ... The exception occurred because access grant was required in WebSphere. Simply follow these steps:\n Set the agent\u0026rsquo;s owner to the owner of WebSphere. Add \u0026ldquo;grant codeBase \u0026ldquo;file:${agent_dir}/-\u0026rdquo; { permission java.security.AllPermission; };\u0026rdquo; in the file of \u0026ldquo;server.policy\u0026rdquo;.  ","excerpt":"IllegalStateException when installing Java agent on WebSphere This issue was found in our community …","ref":"/docs/main/v9.6.0/en/faq/install_agent_on_websphere/","title":"IllegalStateException when installing Java agent on WebSphere"},{"body":"IllegalStateException when installing Java agent on WebSphere This issue was found in our community discussion and feedback. A user installed the SkyWalking Java agent on WebSphere 7.0.0.11 and ibm jdk 1.8_20160719 and 1.7.0_20150407, and experienced the following error logs:\nWARN 2019-05-09 17:01:35:905 SkywalkingAgent-1-GRPCChannelManager-0 ProtectiveShieldMatcher : Byte-buddy occurs exception when match type. java.lang.IllegalStateException: Cannot resolve type description for java.security.PrivilegedAction at org.apache.skywalking.apm.dependencies.net.bytebuddy.pool.TypePool$Resolution$Illegal.resolve(TypePool.java:144) at org.apache.skywalking.apm.dependencies.net.bytebuddy.pool.TypePool$Default$WithLazyResolution$LazyTypeDescription.delegate(TypePool.java:1392) at org.apache.skywalking.apm.dependencies.net.bytebuddy.description.type.TypeDescription$AbstractBase$OfSimpleType$WithDelegation.getInterfaces(TypeDescription.java:8016) at org.apache.skywalking.apm.dependencies.net.bytebuddy.description.type.TypeDescription$Generic$OfNonGenericType.getInterfaces(TypeDescription.java:3621) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.hasInterface(HasSuperTypeMatcher.java:53) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.hasInterface(HasSuperTypeMatcher.java:54) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.matches(HasSuperTypeMatcher.java:38) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.matches(HasSuperTypeMatcher.java:15) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Conjunction.matches(ElementMatcher.java:107) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) ... The exception occurred because access grant was required in WebSphere. Simply follow these steps:\n Set the agent\u0026rsquo;s owner to the owner of WebSphere. Add \u0026ldquo;grant codeBase \u0026ldquo;file:${agent_dir}/-\u0026rdquo; { permission java.security.AllPermission; };\u0026rdquo; in the file of \u0026ldquo;server.policy\u0026rdquo;.  ","excerpt":"IllegalStateException when installing Java agent on WebSphere This issue was found in our community …","ref":"/docs/main/v9.7.0/en/faq/install_agent_on_websphere/","title":"IllegalStateException when installing Java agent on WebSphere"},{"body":"INI Settings This is the configuration list supported in php.ini.\n   Configuration Item Description Default Value     skywalking_agent.enable Enable skywalking_agent extension or not. Off   skywalking_agent.log_file Log file path. /tmp/skywalking-agent.log   skywalking_agent.log_level Log level: one of OFF, TRACE, DEBUG, INFO, WARN, ERROR. INFO   skywalking_agent.runtime_dir Skywalking agent runtime directory. /tmp/skywalking-agent   skywalking_agent.server_addr Address of skywalking oap server. Only available when reporter_type is grpc. 127.0.0.1:11800   skywalking_agent.service_name Application service name. hello-skywalking   skywalking_agent.skywalking_version Skywalking version, 8 or 9. 8   skywalking_agent.authentication Skywalking authentication token, let it empty if the backend isn\u0026rsquo;t enabled. Only available when reporter_type is grpc.    skywalking_agent.worker_threads Skywalking worker threads, 0 will auto set as the cpu core size. 0   skywalking_agent.enable_tls Wether to enable tls for gPRC, default is false. Only available when reporter_type is grpc. Off   skywalking_agent.ssl_trusted_ca_path The gRPC SSL trusted ca file. Only available when reporter_type is grpc.    skywalking_agent.ssl_key_path The private key file. Enable mTLS when ssl_key_path and ssl_cert_chain_path exist. Only available when reporter_type is grpc.    skywalking_agent.ssl_cert_chain_path The certificate file. Enable mTLS when ssl_key_path and ssl_cert_chain_path exist. Only available when reporter_type is grpc.    skywalking_agent.heartbeat_period Agent heartbeat report period. Unit, second. 30   skywalking_agent.properties_report_period_factor The agent sends the instance properties to the backend every heartbeat_period * properties_report_period_factor seconds. 10   skywalking_agent.enable_zend_observer Whether to use zend observer instead of zend_execute_ex to hook the functions, this feature is only available for PHP8+. Off   skywalking_agent.reporter_type Reporter type, optional values are grpc and kafka. grpc   skywalking_agent.kafka_bootstrap_servers A list of host/port pairs to use for connect to the Kafka cluster. Only available when reporter_type is kafka.    skywalking_agent.kafka_producer_config Configure Kafka Producer configuration in JSON format {\u0026quot;key\u0026quot;: \u0026quot;value}. Only available when reporter_type is kafka. {}    ","excerpt":"INI Settings This is the configuration list supported in php.ini.\n   Configuration Item Description …","ref":"/docs/skywalking-php/latest/en/configuration/ini-settings/","title":"INI Settings"},{"body":"INI Settings This is the configuration list supported in php.ini.\n   Configuration Item Description Default Value     skywalking_agent.enable Enable skywalking_agent extension or not. Off   skywalking_agent.log_file Log file path. /tmp/skywalking-agent.log   skywalking_agent.log_level Log level: one of OFF, TRACE, DEBUG, INFO, WARN, ERROR. INFO   skywalking_agent.runtime_dir Skywalking agent runtime directory. /tmp/skywalking-agent   skywalking_agent.server_addr Address of skywalking oap server. Only available when reporter_type is grpc. 127.0.0.1:11800   skywalking_agent.service_name Application service name. hello-skywalking   skywalking_agent.skywalking_version Skywalking version, 8 or 9. 8   skywalking_agent.authentication Skywalking authentication token, let it empty if the backend isn\u0026rsquo;t enabled. Only available when reporter_type is grpc.    skywalking_agent.worker_threads Skywalking worker threads, 0 will auto set as the cpu core size. 0   skywalking_agent.enable_tls Wether to enable tls for gPRC, default is false. Only available when reporter_type is grpc. Off   skywalking_agent.ssl_trusted_ca_path The gRPC SSL trusted ca file. Only available when reporter_type is grpc.    skywalking_agent.ssl_key_path The private key file. Enable mTLS when ssl_key_path and ssl_cert_chain_path exist. Only available when reporter_type is grpc.    skywalking_agent.ssl_cert_chain_path The certificate file. Enable mTLS when ssl_key_path and ssl_cert_chain_path exist. Only available when reporter_type is grpc.    skywalking_agent.heartbeat_period Agent heartbeat report period. Unit, second. 30   skywalking_agent.properties_report_period_factor The agent sends the instance properties to the backend every heartbeat_period * properties_report_period_factor seconds. 10   skywalking_agent.enable_zend_observer Whether to use zend observer instead of zend_execute_ex to hook the functions, this feature is only available for PHP8+. Off   skywalking_agent.reporter_type Reporter type, optional values are grpc and kafka. grpc   skywalking_agent.kafka_bootstrap_servers A list of host/port pairs to use for connect to the Kafka cluster. Only available when reporter_type is kafka.    skywalking_agent.kafka_producer_config Configure Kafka Producer configuration in JSON format {\u0026quot;key\u0026quot;: \u0026quot;value}. Only available when reporter_type is kafka. {}   skywalking_agent.inject_context Whether to enable automatic injection of skywalking context variables (such as SW_TRACE_ID). For php-fpm mode, it will be injected into the $_SERVER variable. For swoole mode, it will be injected into the $request-\u0026gt;server variable. Off    ","excerpt":"INI Settings This is the configuration list supported in php.ini.\n   Configuration Item Description …","ref":"/docs/skywalking-php/next/en/configuration/ini-settings/","title":"INI Settings"},{"body":"INI Settings This is the configuration list supported in php.ini.\n   Configuration Item Description Default Value     skywalking_agent.enable Enable skywalking_agent extension or not. Off   skywalking_agent.log_file Log file path. /tmp/skywalking-agent.log   skywalking_agent.log_level Log level: one of OFF, TRACE, DEBUG, INFO, WARN, ERROR. INFO   skywalking_agent.runtime_dir Skywalking agent runtime directory. /tmp/skywalking-agent   skywalking_agent.server_addr Address of skywalking oap server. Only available when reporter_type is grpc. 127.0.0.1:11800   skywalking_agent.service_name Application service name. hello-skywalking   skywalking_agent.skywalking_version Skywalking version, 8 or 9. 8   skywalking_agent.authentication Skywalking authentication token, let it empty if the backend isn\u0026rsquo;t enabled. Only available when reporter_type is grpc.    skywalking_agent.worker_threads Skywalking worker threads, 0 will auto set as the cpu core size. 0   skywalking_agent.enable_tls Wether to enable tls for gPRC, default is false. Only available when reporter_type is grpc. Off   skywalking_agent.ssl_trusted_ca_path The gRPC SSL trusted ca file. Only available when reporter_type is grpc.    skywalking_agent.ssl_key_path The private key file. Enable mTLS when ssl_key_path and ssl_cert_chain_path exist. Only available when reporter_type is grpc.    skywalking_agent.ssl_cert_chain_path The certificate file. Enable mTLS when ssl_key_path and ssl_cert_chain_path exist. Only available when reporter_type is grpc.    skywalking_agent.heartbeat_period Agent heartbeat report period. Unit, second. 30   skywalking_agent.properties_report_period_factor The agent sends the instance properties to the backend every heartbeat_period * properties_report_period_factor seconds. 10   skywalking_agent.enable_zend_observer Whether to use zend observer instead of zend_execute_ex to hook the functions, this feature is only available for PHP8+. Off   skywalking_agent.reporter_type Reporter type, optional values are grpc and kafka. grpc   skywalking_agent.kafka_bootstrap_servers A list of host/port pairs to use for connect to the Kafka cluster. Only available when reporter_type is kafka.    skywalking_agent.kafka_producer_config Configure Kafka Producer configuration in JSON format {\u0026quot;key\u0026quot;: \u0026quot;value}. Only available when reporter_type is kafka. {}    ","excerpt":"INI Settings This is the configuration list supported in php.ini.\n   Configuration Item Description …","ref":"/docs/skywalking-php/v0.7.0/en/configuration/ini-settings/","title":"INI Settings"},{"body":"Init mode The SkyWalking backend supports multiple storage implementors. Most of them would automatically initialize the storage, such as Elastic Search or Database, when the backend starts up first.\nBut there may be some unexpected events that may occur with the storage, such as When multiple Elastic Search indexes are created concurrently, these backend instances would startup at the same time., When there is a change, the APIs of Elastic Search would be blocked without reporting any exception. This often happens on container management platforms, such as k8s.\nThis is where you need the Init mode startup.\nSolution Only a single instance should run in the Init mode before other instances start up. And this instance will exit graciously after all initialization steps are done.\nUse oapServiceInit.sh/oapServiceInit.bat to start up backend. You should see the following logs:\n 2018-11-09 23:04:39,465 - org.apache.skywalking.oap.server.starter.OAPServerStartUp -2214 [main] INFO [] - OAP starts up in init mode successfully, exit now\u0026hellip;\n Kubernetes Initialization in this mode would be included in our Kubernetes scripts and Helm.\n","excerpt":"Init mode The SkyWalking backend supports multiple storage implementors. Most of them would …","ref":"/docs/main/latest/en/setup/backend/backend-init-mode/","title":"Init mode"},{"body":"Init mode The SkyWalking backend supports multiple storage implementors. Most of them would automatically initialize the storage, such as Elastic Search or Database, when the backend starts up first.\nBut there may be some unexpected events that may occur with the storage, such as When multiple Elastic Search indexes are created concurrently, these backend instances would startup at the same time., When there is a change, the APIs of Elastic Search would be blocked without reporting any exception. This often happens on container management platforms, such as k8s.\nThis is where you need the Init mode startup.\nSolution Only a single instance should run in the Init mode before other instances start up. And this instance will exit graciously after all initialization steps are done.\nUse oapServiceInit.sh/oapServiceInit.bat to start up backend. You should see the following logs:\n 2018-11-09 23:04:39,465 - org.apache.skywalking.oap.server.starter.OAPServerStartUp -2214 [main] INFO [] - OAP starts up in init mode successfully, exit now\u0026hellip;\n Kubernetes Initialization in this mode would be included in our Kubernetes scripts and Helm.\n","excerpt":"Init mode The SkyWalking backend supports multiple storage implementors. Most of them would …","ref":"/docs/main/next/en/setup/backend/backend-init-mode/","title":"Init mode"},{"body":"Init mode The SkyWalking backend supports multiple storage implementors. Most of them would automatically initialize the storage, such as Elastic Search or Database, when the backend starts up at first.\nBut there may be some unexpected events that may occur with the storage, such as When multiple Elastic Search indexes are created concurrently, these backend instances would start up at the same time., When there is a change, the APIs of Elastic Search would be blocked without reporting any exception. This often happens on container management platforms, such as k8s.\nThis is where you need the Init mode startup.\nSolution Only one single instance should run in the Init mode before other instances start up. And this instance will exit graciously after all initialization steps are done.\nUse oapServiceInit.sh/oapServiceInit.bat to start up backend. You should see the following logs:\n 2018-11-09 23:04:39,465 - org.apache.skywalking.oap.server.starter.OAPServerStartUp -2214 [main] INFO [] - OAP starts up in init mode successfully, exit now\u0026hellip;\n Kubernetes Initialization in this mode would be included in our Kubernetes scripts and Helm.\n","excerpt":"Init mode The SkyWalking backend supports multiple storage implementors. Most of them would …","ref":"/docs/main/v9.0.0/en/setup/backend/backend-init-mode/","title":"Init mode"},{"body":"Init mode The SkyWalking backend supports multiple storage implementors. Most of them would automatically initialize the storage, such as Elastic Search or Database, when the backend starts up first.\nBut there may be some unexpected events that may occur with the storage, such as When multiple Elastic Search indexes are created concurrently, these backend instances would startup at the same time., When there is a change, the APIs of Elastic Search would be blocked without reporting any exception. This often happens on container management platforms, such as k8s.\nThis is where you need the Init mode startup.\nSolution Only a single instance should run in the Init mode before other instances start up. And this instance will exit graciously after all initialization steps are done.\nUse oapServiceInit.sh/oapServiceInit.bat to start up backend. You should see the following logs:\n 2018-11-09 23:04:39,465 - org.apache.skywalking.oap.server.starter.OAPServerStartUp -2214 [main] INFO [] - OAP starts up in init mode successfully, exit now\u0026hellip;\n Kubernetes Initialization in this mode would be included in our Kubernetes scripts and Helm.\n","excerpt":"Init mode The SkyWalking backend supports multiple storage implementors. Most of them would …","ref":"/docs/main/v9.1.0/en/setup/backend/backend-init-mode/","title":"Init mode"},{"body":"Init mode The SkyWalking backend supports multiple storage implementors. Most of them would automatically initialize the storage, such as Elastic Search or Database, when the backend starts up first.\nBut there may be some unexpected events that may occur with the storage, such as When multiple Elastic Search indexes are created concurrently, these backend instances would startup at the same time., When there is a change, the APIs of Elastic Search would be blocked without reporting any exception. This often happens on container management platforms, such as k8s.\nThis is where you need the Init mode startup.\nSolution Only a single instance should run in the Init mode before other instances start up. And this instance will exit graciously after all initialization steps are done.\nUse oapServiceInit.sh/oapServiceInit.bat to start up backend. You should see the following logs:\n 2018-11-09 23:04:39,465 - org.apache.skywalking.oap.server.starter.OAPServerStartUp -2214 [main] INFO [] - OAP starts up in init mode successfully, exit now\u0026hellip;\n Kubernetes Initialization in this mode would be included in our Kubernetes scripts and Helm.\n","excerpt":"Init mode The SkyWalking backend supports multiple storage implementors. Most of them would …","ref":"/docs/main/v9.2.0/en/setup/backend/backend-init-mode/","title":"Init mode"},{"body":"Init mode The SkyWalking backend supports multiple storage implementors. Most of them would automatically initialize the storage, such as Elastic Search or Database, when the backend starts up first.\nBut there may be some unexpected events that may occur with the storage, such as When multiple Elastic Search indexes are created concurrently, these backend instances would startup at the same time., When there is a change, the APIs of Elastic Search would be blocked without reporting any exception. This often happens on container management platforms, such as k8s.\nThis is where you need the Init mode startup.\nSolution Only a single instance should run in the Init mode before other instances start up. And this instance will exit graciously after all initialization steps are done.\nUse oapServiceInit.sh/oapServiceInit.bat to start up backend. You should see the following logs:\n 2018-11-09 23:04:39,465 - org.apache.skywalking.oap.server.starter.OAPServerStartUp -2214 [main] INFO [] - OAP starts up in init mode successfully, exit now\u0026hellip;\n Kubernetes Initialization in this mode would be included in our Kubernetes scripts and Helm.\n","excerpt":"Init mode The SkyWalking backend supports multiple storage implementors. Most of them would …","ref":"/docs/main/v9.3.0/en/setup/backend/backend-init-mode/","title":"Init mode"},{"body":"Init mode The SkyWalking backend supports multiple storage implementors. Most of them would automatically initialize the storage, such as Elastic Search or Database, when the backend starts up first.\nBut there may be some unexpected events that may occur with the storage, such as When multiple Elastic Search indexes are created concurrently, these backend instances would startup at the same time., When there is a change, the APIs of Elastic Search would be blocked without reporting any exception. This often happens on container management platforms, such as k8s.\nThis is where you need the Init mode startup.\nSolution Only a single instance should run in the Init mode before other instances start up. And this instance will exit graciously after all initialization steps are done.\nUse oapServiceInit.sh/oapServiceInit.bat to start up backend. You should see the following logs:\n 2018-11-09 23:04:39,465 - org.apache.skywalking.oap.server.starter.OAPServerStartUp -2214 [main] INFO [] - OAP starts up in init mode successfully, exit now\u0026hellip;\n Kubernetes Initialization in this mode would be included in our Kubernetes scripts and Helm.\n","excerpt":"Init mode The SkyWalking backend supports multiple storage implementors. Most of them would …","ref":"/docs/main/v9.4.0/en/setup/backend/backend-init-mode/","title":"Init mode"},{"body":"Init mode The SkyWalking backend supports multiple storage implementors. Most of them would automatically initialize the storage, such as Elastic Search or Database, when the backend starts up first.\nBut there may be some unexpected events that may occur with the storage, such as When multiple Elastic Search indexes are created concurrently, these backend instances would startup at the same time., When there is a change, the APIs of Elastic Search would be blocked without reporting any exception. This often happens on container management platforms, such as k8s.\nThis is where you need the Init mode startup.\nSolution Only a single instance should run in the Init mode before other instances start up. And this instance will exit graciously after all initialization steps are done.\nUse oapServiceInit.sh/oapServiceInit.bat to start up backend. You should see the following logs:\n 2018-11-09 23:04:39,465 - org.apache.skywalking.oap.server.starter.OAPServerStartUp -2214 [main] INFO [] - OAP starts up in init mode successfully, exit now\u0026hellip;\n Kubernetes Initialization in this mode would be included in our Kubernetes scripts and Helm.\n","excerpt":"Init mode The SkyWalking backend supports multiple storage implementors. Most of them would …","ref":"/docs/main/v9.5.0/en/setup/backend/backend-init-mode/","title":"Init mode"},{"body":"Init mode The SkyWalking backend supports multiple storage implementors. Most of them would automatically initialize the storage, such as Elastic Search or Database, when the backend starts up first.\nBut there may be some unexpected events that may occur with the storage, such as When multiple Elastic Search indexes are created concurrently, these backend instances would startup at the same time., When there is a change, the APIs of Elastic Search would be blocked without reporting any exception. This often happens on container management platforms, such as k8s.\nThis is where you need the Init mode startup.\nSolution Only a single instance should run in the Init mode before other instances start up. And this instance will exit graciously after all initialization steps are done.\nUse oapServiceInit.sh/oapServiceInit.bat to start up backend. You should see the following logs:\n 2018-11-09 23:04:39,465 - org.apache.skywalking.oap.server.starter.OAPServerStartUp -2214 [main] INFO [] - OAP starts up in init mode successfully, exit now\u0026hellip;\n Kubernetes Initialization in this mode would be included in our Kubernetes scripts and Helm.\n","excerpt":"Init mode The SkyWalking backend supports multiple storage implementors. Most of them would …","ref":"/docs/main/v9.6.0/en/setup/backend/backend-init-mode/","title":"Init mode"},{"body":"Init mode The SkyWalking backend supports multiple storage implementors. Most of them would automatically initialize the storage, such as Elastic Search or Database, when the backend starts up first.\nBut there may be some unexpected events that may occur with the storage, such as When multiple Elastic Search indexes are created concurrently, these backend instances would startup at the same time., When there is a change, the APIs of Elastic Search would be blocked without reporting any exception. This often happens on container management platforms, such as k8s.\nThis is where you need the Init mode startup.\nSolution Only a single instance should run in the Init mode before other instances start up. And this instance will exit graciously after all initialization steps are done.\nUse oapServiceInit.sh/oapServiceInit.bat to start up backend. You should see the following logs:\n 2018-11-09 23:04:39,465 - org.apache.skywalking.oap.server.starter.OAPServerStartUp -2214 [main] INFO [] - OAP starts up in init mode successfully, exit now\u0026hellip;\n Kubernetes Initialization in this mode would be included in our Kubernetes scripts and Helm.\n","excerpt":"Init mode The SkyWalking backend supports multiple storage implementors. Most of them would …","ref":"/docs/main/v9.7.0/en/setup/backend/backend-init-mode/","title":"Init mode"},{"body":"Install SkyWalking Infra E2E Download pre-built binaries Download the pre-built binaries from our website, currently we have pre-built binaries for macOS, Linux and Windows. Extract the tarball and add bin/\u0026lt;os\u0026gt;/e2e to you PATH environment variable.\nInstall from source codes If you want to try some features that are not released yet, you can compile from the source code.\nmkdir skywalking-infra-e2e \u0026amp;\u0026amp; cd skywalking-infra-e2e git clone https://github.com/apache/skywalking-infra-e2e.git . make build Then add the binary in bin/\u0026lt;os\u0026gt;/e2e to your PATH.\nInstall via go install If you already have Go SDK installed, you can also directly install e2e via go install.\ngo install github.com/apache/skywalking-infra-e2e/cmd/e2e@\u0026lt;revision\u0026gt; Note that installation via go install is only supported after Git commit 2a33478 so you can only go install a revision afterwards.\n","excerpt":"Install SkyWalking Infra E2E Download pre-built binaries Download the pre-built binaries from our …","ref":"/docs/skywalking-infra-e2e/latest/en/setup/install/","title":"Install SkyWalking Infra E2E"},{"body":"Install SkyWalking Infra E2E Download pre-built binaries Download the pre-built binaries from our website, currently we have pre-built binaries for macOS, Linux and Windows. Extract the tarball and add bin/\u0026lt;os\u0026gt;/e2e to you PATH environment variable.\nInstall from source codes If you want to try some features that are not released yet, you can compile from the source code.\nmkdir skywalking-infra-e2e \u0026amp;\u0026amp; cd skywalking-infra-e2e git clone https://github.com/apache/skywalking-infra-e2e.git . make build Then add the binary in bin/\u0026lt;os\u0026gt;/e2e to your PATH.\nInstall via go install If you already have Go SDK installed, you can also directly install e2e via go install.\ngo install github.com/apache/skywalking-infra-e2e/cmd/e2e@\u0026lt;revision\u0026gt; Note that installation via go install is only supported after Git commit 2a33478 so you can only go install a revision afterwards.\n","excerpt":"Install SkyWalking Infra E2E Download pre-built binaries Download the pre-built binaries from our …","ref":"/docs/skywalking-infra-e2e/next/en/setup/install/","title":"Install SkyWalking Infra E2E"},{"body":"Install SkyWalking Infra E2E Download pre-built binaries Download the pre-built binaries from our website, currently we have pre-built binaries for macOS, Linux and Windows. Extract the tarball and add bin/\u0026lt;os\u0026gt;/e2e to you PATH environment variable.\nInstall from source codes If you want to try some features that are not released yet, you can compile from the source code.\nmkdir skywalking-infra-e2e \u0026amp;\u0026amp; cd skywalking-infra-e2e git clone https://github.com/apache/skywalking-infra-e2e.git . make build Then add the binary in bin/\u0026lt;os\u0026gt;/e2e to your PATH.\nInstall via go install If you already have Go SDK installed, you can also directly install e2e via go install.\ngo install github.com/apache/skywalking-infra-e2e/cmd/e2e@\u0026lt;revision\u0026gt; Note that installation via go install is only supported after Git commit 2a33478 so you can only go install a revision afterwards.\n","excerpt":"Install SkyWalking Infra E2E Download pre-built binaries Download the pre-built binaries from our …","ref":"/docs/skywalking-infra-e2e/v1.3.0/en/setup/install/","title":"Install SkyWalking Infra E2E"},{"body":"Installation Banyand is the daemon server of the BanyanDB database. This section will show several paths installing it in your environment.\nGet Binaries Released binaries Get binaries from the download.\nBuild From Source Requirements Users who want to build a binary from sources have to set up:\n Go 1.20 Node 18.16 Git \u0026gt;= 2.30 Linux, macOS or Windows+WSL2 GNU make  Windows BanyanDB is built on Linux and macOS that introduced several platform-specific characters to the building system. Therefore, we highly recommend you use WSL2+Ubuntu to execute tasks of the Makefile.\nBuild Binaries To issue the below command to get basic binaries of banyand and bydbctl.\n$ make generate ... $ make build ... --- banyand: all --- make[1]: Entering directory \u0026#39;\u0026lt;path_to_project_root\u0026gt;/banyand\u0026#39; ... chmod +x build/bin/banyand-server Done building banyand server make[1]: Leaving directory \u0026#39;\u0026lt;path_to_project_root\u0026gt;/banyand\u0026#39; ... --- bydbctl: all --- make[1]: Entering directory \u0026#39;\u0026lt;path_to_project_root\u0026gt;/bydbctl\u0026#39; ... chmod +x build/bin/bydbctl Done building bydbctl make[1]: Leaving directory \u0026#39;\u0026lt;path_to_project_root\u0026gt;/bydbctl\u0026#39; The build system provides a series of binary options as well.\n make -C banyand banyand-server generates a basic banyand-server. make -C banyand release builds out a static binary for releasing. make -C banyand debug gives a binary for debugging without the complier\u0026rsquo;s optimizations. make -C banyand debug-static is a static binary for debugging. make -C bydbctl release cross-builds several binaries for multi-platforms.  Then users get binaries as below\n$ ls banyand/build/bin banyand-server banyand-server-debug banyand-server-debug-static banyand-server-static $ ls banyand/build/bin bydbctl Setup Banyand Banyand shows its available commands and arguments by\n$ ./banyand-server ██████╗ █████╗ ███╗ ██╗██╗ ██╗ █████╗ ███╗ ██╗██████╗ ██████╗ ██╔══██╗██╔══██╗████╗ ██║╚██╗ ██╔╝██╔══██╗████╗ ██║██╔══██╗██╔══██╗ ██████╔╝███████║██╔██╗ ██║ ╚████╔╝ ███████║██╔██╗ ██║██║ ██║██████╔╝ ██╔══██╗██╔══██║██║╚██╗██║ ╚██╔╝ ██╔══██║██║╚██╗██║██║ ██║██╔══██╗ ██████╔╝██║ ██║██║ ╚████║ ██║ ██║ ██║██║ ╚████║██████╔╝██████╔╝ ╚═════╝ ╚═╝ ╚═╝╚═╝ ╚═══╝ ╚═╝ ╚═╝ ╚═╝╚═╝ ╚═══╝╚═════╝ ╚═════╝ BanyanDB, as an observability database, aims to ingest, analyze and store Metrics, Tracing and Logging data Usage: [command] Available Commands: completion generate the autocompletion script for the specified shell help Help about any command liaison Run as the liaison server meta Run as the meta server standalone Run as the standalone server storage Run as the storage server Flags: -h, --help help for this command -v, --version version for this command Use \u0026#34; [command] --help\u0026#34; for more information about a command. Banyand is running as a standalone server by\n$ ./banyand-server standalone ██████╗ █████╗ ███╗ ██╗██╗ ██╗ █████╗ ███╗ ██╗██████╗ ██████╗ ██╔══██╗██╔══██╗████╗ ██║╚██╗ ██╔╝██╔══██╗████╗ ██║██╔══██╗██╔══██╗ ██████╔╝███████║██╔██╗ ██║ ╚████╔╝ ███████║██╔██╗ ██║██║ ██║██████╔╝ ██╔══██╗██╔══██║██║╚██╗██║ ╚██╔╝ ██╔══██║██║╚██╗██║██║ ██║██╔══██╗ ██████╔╝██║ ██║██║ ╚████║ ██║ ██║ ██║██║ ╚████║██████╔╝██████╔╝ ╚═════╝ ╚═╝ ╚═╝╚═╝ ╚═══╝ ╚═╝ ╚═╝ ╚═╝╚═╝ ╚═══╝╚═════╝ ╚═════╝ ***starting as a standalone server**** ... ... ***Listening to**** addr::17912 module:LIAISON-GRPC The banyand-server would be listening on the 0.0.0.0:17912 if no errors occurred.\nSetup Multiple Banyand as Cluster Firstly, you need to setup a etcd cluster which is required for the metadata module to provide the metadata service and nodes discovery service for the whole cluster. The etcd cluster can be setup by the etcd installation guide. The etcd version should be v3.1 or above.\nThen, you can start the metadata module by\nConsidering the etcd cluster is spread across three nodes with the addresses `10.0.0.1:2379`, `10.0.0.2:2379`, and `10.0.0.3:2379`, Data nodes and liaison nodes are running as independent processes by ```shell $ ./banyand-server storage --etcd-endpoints=http://10.0.0.1:2379,http://10.0.0.2:2379,http://10.0.0.3:2379 \u0026lt;flags\u0026gt; $ ./banyand-server storage --etcd-endpoints=http://10.0.0.1:2379,http://10.0.0.2:2379,http://10.0.0.3:2379 \u0026lt;flags\u0026gt; $ ./banyand-server storage --etcd-endpoints=http://10.0.0.1:2379,http://10.0.0.2:2379,http://10.0.0.3:2379 \u0026lt;flags\u0026gt; $ ./banyand-server liaison --etcd-endpoints=http://10.0.0.1:2379,http://10.0.0.2:2379,http://10.0.0.3:2379 \u0026lt;flags\u0026gt; Docker \u0026amp; Kubernetes The docker image of banyandb is available on Docker Hub.\nIf you want to onboard banyandb to the Kubernetes, you can refer to the banyandb-helm.\n","excerpt":"Installation Banyand is the daemon server of the BanyanDB database. This section will show several …","ref":"/docs/skywalking-banyandb/latest/installation/","title":"Installation"},{"body":"Installation Banyand is the daemon server of the BanyanDB database. This section will show several paths installing it in your environment.\nGet Binaries Released binaries Get binaries from the download.\nBuild From Source Requirements Users who want to build a binary from sources have to set up:\n Go 1.20 Node 18.16 Git \u0026gt;= 2.30 Linux, macOS or Windows+WSL2 GNU make  Windows BanyanDB is built on Linux and macOS that introduced several platform-specific characters to the building system. Therefore, we highly recommend you use WSL2+Ubuntu to execute tasks of the Makefile.\nBuild Binaries To issue the below command to get basic binaries of banyand and bydbctl.\n$ make generate ... $ make build ... --- banyand: all --- make[1]: Entering directory \u0026#39;\u0026lt;path_to_project_root\u0026gt;/banyand\u0026#39; ... chmod +x build/bin/banyand-server Done building banyand server make[1]: Leaving directory \u0026#39;\u0026lt;path_to_project_root\u0026gt;/banyand\u0026#39; ... --- bydbctl: all --- make[1]: Entering directory \u0026#39;\u0026lt;path_to_project_root\u0026gt;/bydbctl\u0026#39; ... chmod +x build/bin/bydbctl Done building bydbctl make[1]: Leaving directory \u0026#39;\u0026lt;path_to_project_root\u0026gt;/bydbctl\u0026#39; The build system provides a series of binary options as well.\n make -C banyand banyand-server generates a basic banyand-server. make -C banyand release builds out a static binary for releasing. make -C banyand debug gives a binary for debugging without the complier\u0026rsquo;s optimizations. make -C banyand debug-static is a static binary for debugging. make -C bydbctl release cross-builds several binaries for multi-platforms.  Then users get binaries as below\n$ ls banyand/build/bin banyand-server banyand-server-debug banyand-server-debug-static banyand-server-static $ ls banyand/build/bin bydbctl Setup Banyand Banyand shows its available commands and arguments by\n$ ./banyand-server ██████╗ █████╗ ███╗ ██╗██╗ ██╗ █████╗ ███╗ ██╗██████╗ ██████╗ ██╔══██╗██╔══██╗████╗ ██║╚██╗ ██╔╝██╔══██╗████╗ ██║██╔══██╗██╔══██╗ ██████╔╝███████║██╔██╗ ██║ ╚████╔╝ ███████║██╔██╗ ██║██║ ██║██████╔╝ ██╔══██╗██╔══██║██║╚██╗██║ ╚██╔╝ ██╔══██║██║╚██╗██║██║ ██║██╔══██╗ ██████╔╝██║ ██║██║ ╚████║ ██║ ██║ ██║██║ ╚████║██████╔╝██████╔╝ ╚═════╝ ╚═╝ ╚═╝╚═╝ ╚═══╝ ╚═╝ ╚═╝ ╚═╝╚═╝ ╚═══╝╚═════╝ ╚═════╝ BanyanDB, as an observability database, aims to ingest, analyze and store Metrics, Tracing and Logging data Usage: [command] Available Commands: completion generate the autocompletion script for the specified shell help Help about any command liaison Run as the liaison server meta Run as the meta server standalone Run as the standalone server storage Run as the storage server Flags: -h, --help help for this command -v, --version version for this command Use \u0026#34; [command] --help\u0026#34; for more information about a command. Banyand is running as a standalone server by\n$ ./banyand-server standalone ██████╗ █████╗ ███╗ ██╗██╗ ██╗ █████╗ ███╗ ██╗██████╗ ██████╗ ██╔══██╗██╔══██╗████╗ ██║╚██╗ ██╔╝██╔══██╗████╗ ██║██╔══██╗██╔══██╗ ██████╔╝███████║██╔██╗ ██║ ╚████╔╝ ███████║██╔██╗ ██║██║ ██║██████╔╝ ██╔══██╗██╔══██║██║╚██╗██║ ╚██╔╝ ██╔══██║██║╚██╗██║██║ ██║██╔══██╗ ██████╔╝██║ ██║██║ ╚████║ ██║ ██║ ██║██║ ╚████║██████╔╝██████╔╝ ╚═════╝ ╚═╝ ╚═╝╚═╝ ╚═══╝ ╚═╝ ╚═╝ ╚═╝╚═╝ ╚═══╝╚═════╝ ╚═════╝ ***starting as a standalone server**** ... ... ***Listening to**** addr::17912 module:LIAISON-GRPC The banyand-server would be listening on the 0.0.0.0:17912 if no errors occurred.\nSetup Multiple Banyand as Cluster Firstly, you need to setup a etcd cluster which is required for the metadata module to provide the metadata service and nodes discovery service for the whole cluster. The etcd cluster can be setup by the etcd installation guide. The etcd version should be v3.1 or above.\nThen, you can start the metadata module by\nConsidering the etcd cluster is spread across three nodes with the addresses `10.0.0.1:2379`, `10.0.0.2:2379`, and `10.0.0.3:2379`, Data nodes and liaison nodes are running as independent processes by ```shell $ ./banyand-server storage --etcd-endpoints=http://10.0.0.1:2379,http://10.0.0.2:2379,http://10.0.0.3:2379 \u0026lt;flags\u0026gt; $ ./banyand-server storage --etcd-endpoints=http://10.0.0.1:2379,http://10.0.0.2:2379,http://10.0.0.3:2379 \u0026lt;flags\u0026gt; $ ./banyand-server storage --etcd-endpoints=http://10.0.0.1:2379,http://10.0.0.2:2379,http://10.0.0.3:2379 \u0026lt;flags\u0026gt; $ ./banyand-server liaison --etcd-endpoints=http://10.0.0.1:2379,http://10.0.0.2:2379,http://10.0.0.3:2379 \u0026lt;flags\u0026gt; Docker \u0026amp; Kubernetes The docker image of banyandb is available on Docker Hub.\nIf you want to onboard banyandb to the Kubernetes, you can refer to the banyandb-helm.\n","excerpt":"Installation Banyand is the daemon server of the BanyanDB database. This section will show several …","ref":"/docs/skywalking-banyandb/v0.5.0/installation/","title":"Installation"},{"body":"Installation SkyWalking Python agent requires SkyWalking 8.0+ and Python 3.7+\nYou can install the SkyWalking Python agent via various ways described next.\n Already installed? Check out easy ways to start the agent in your application\n  Non-intrusive  | Intrusive  | Containerization\n  All available configurations are listed here\n Important Note on Different Reporter Protocols Currently only gRPC protocol fully supports all available telemetry capabilities in the Python agent.\nWhile gRPC is highly recommended, we provide alternative protocols to suit your production requirements.\nPlease refer to the table below before deciding which report protocol suits best for you.\n   Reporter Protocol Trace Reporter Log Reporter Meter Reporter Profiling     gRPC ✅ ✅ ✅ ✅   HTTP ✅ ✅ ❌ ❌   Kafka ✅ ✅ ✅ ❌    From PyPI  If you want to try out the latest features that are not released yet, please refer to this guide to build from sources.\n The Python agent module is published to PyPI, from where you can use pip to install:\n# Install the latest version, using the default gRPC protocol to report data to OAP pip install \u0026#34;apache-skywalking\u0026#34; # Install support for every protocol (gRPC, HTTP, Kafka) pip install \u0026#34;apache-skywalking[all]\u0026#34; # Install the latest version, using the http protocol to report data to OAP pip install \u0026#34;apache-skywalking[http]\u0026#34; # Install the latest version, using the kafka protocol to report data to OAP pip install \u0026#34;apache-skywalking[kafka]\u0026#34; # Install a specific version x.y.z # pip install apache-skywalking==x.y.z pip install apache-skywalking==0.1.0 # For example, install version 0.1.0 no matter what the latest version is From Docker Hub SkyWalking Python agent provides convenient dockerfile and images for easy integration utilizing its auto-bootstrap capability.\nSimply pull SkyWalking Python image from Docker Hub based on desired agent version, protocol and Python version.\nFROMapache/skywalking-python:0.8.0-grpc-py3.10# ... build your Python application# If you prefer compact images (built from official Python slim image)FROMapache/skywalking-python:0.8.0-grpc-py3.10-slim# ... build your Python applicationThen, You can build your Python application image based on our agent-enabled Python images and start your applications with SkyWalking agent enabled for you. Please refer to our Containerization Guide for further instructions on integration and configuring.\nFrom Source Code Please refer to the How-to-build-from-sources FAQ.\n","excerpt":"Installation SkyWalking Python agent requires SkyWalking 8.0+ and Python 3.7+\nYou can install the …","ref":"/docs/skywalking-python/latest/en/setup/installation/","title":"Installation"},{"body":"Installation SkyWalking Python agent requires SkyWalking 8.0+ and Python 3.7+\nYou can install the SkyWalking Python agent via various ways described next.\n Already installed? Check out easy ways to start the agent in your application\n  Non-intrusive  | Intrusive  | Containerization\n  All available configurations are listed here\n Important Note on Different Reporter Protocols Currently only gRPC protocol fully supports all available telemetry capabilities in the Python agent.\nWhile gRPC is highly recommended, we provide alternative protocols to suit your production requirements.\nPlease refer to the table below before deciding which report protocol suits best for you.\n   Reporter Protocol Trace Reporter Log Reporter Meter Reporter Profiling     gRPC ✅ ✅ ✅ ✅   HTTP ✅ ✅ ❌ ❌   Kafka ✅ ✅ ✅ ❌    From PyPI  If you want to try out the latest features that are not released yet, please refer to this guide to build from sources.\n The Python agent module is published to PyPI, from where you can use pip to install:\n# Install the latest version, using the default gRPC protocol to report data to OAP pip install \u0026#34;apache-skywalking\u0026#34; # Install support for every protocol (gRPC, HTTP, Kafka) pip install \u0026#34;apache-skywalking[all]\u0026#34; # Install the latest version, using the http protocol to report data to OAP pip install \u0026#34;apache-skywalking[http]\u0026#34; # Install the latest version, using the kafka protocol to report data to OAP pip install \u0026#34;apache-skywalking[kafka]\u0026#34; # Install a specific version x.y.z # pip install apache-skywalking==x.y.z pip install apache-skywalking==0.1.0 # For example, install version 0.1.0 no matter what the latest version is From Docker Hub SkyWalking Python agent provides convenient dockerfile and images for easy integration utilizing its auto-bootstrap capability.\nSimply pull SkyWalking Python image from Docker Hub based on desired agent version, protocol and Python version.\nFROMapache/skywalking-python:0.8.0-grpc-py3.10# ... build your Python application# If you prefer compact images (built from official Python slim image)FROMapache/skywalking-python:0.8.0-grpc-py3.10-slim# ... build your Python applicationThen, You can build your Python application image based on our agent-enabled Python images and start your applications with SkyWalking agent enabled for you. Please refer to our Containerization Guide for further instructions on integration and configuring.\nFrom Source Code Please refer to the How-to-build-from-sources FAQ.\n","excerpt":"Installation SkyWalking Python agent requires SkyWalking 8.0+ and Python 3.7+\nYou can install the …","ref":"/docs/skywalking-python/next/en/setup/installation/","title":"Installation"},{"body":"Installation SkyWalking Python agent requires SkyWalking 8.0+ and Python 3.7+\nYou can install the SkyWalking Python agent via various ways described next.\n Already installed? Check out easy ways to start the agent in your application\n  Non-intrusive  | Intrusive  | Containerization\n  All available configurations are listed here\n Important Note on Different Reporter Protocols Currently only gRPC protocol fully supports all available telemetry capabilities in the Python agent.\nWhile gRPC is highly recommended, we provide alternative protocols to suit your production requirements.\nPlease refer to the table below before deciding which report protocol suits best for you.\n   Reporter Protocol Trace Reporter Log Reporter Meter Reporter Profiling     gRPC ✅ ✅ ✅ ✅   HTTP ✅ ✅ ❌ ❌   Kafka ✅ ✅ ✅ ❌    From PyPI  If you want to try out the latest features that are not released yet, please refer to this guide to build from sources.\n The Python agent module is published to PyPI, from where you can use pip to install:\n# Install the latest version, using the default gRPC protocol to report data to OAP pip install \u0026#34;apache-skywalking\u0026#34; # Install support for every protocol (gRPC, HTTP, Kafka) pip install \u0026#34;apache-skywalking[all]\u0026#34; # Install the latest version, using the http protocol to report data to OAP pip install \u0026#34;apache-skywalking[http]\u0026#34; # Install the latest version, using the kafka protocol to report data to OAP pip install \u0026#34;apache-skywalking[kafka]\u0026#34; # Install a specific version x.y.z # pip install apache-skywalking==x.y.z pip install apache-skywalking==0.1.0 # For example, install version 0.1.0 no matter what the latest version is From Docker Hub SkyWalking Python agent provides convenient dockerfile and images for easy integration utilizing its auto-bootstrap capability.\nSimply pull SkyWalking Python image from Docker Hub based on desired agent version, protocol and Python version.\nFROMapache/skywalking-python:0.8.0-grpc-py3.10# ... build your Python application# If you prefer compact images (built from official Python slim image)FROMapache/skywalking-python:0.8.0-grpc-py3.10-slim# ... build your Python applicationThen, You can build your Python application image based on our agent-enabled Python images and start your applications with SkyWalking agent enabled for you. Please refer to our Containerization Guide for further instructions on integration and configuring.\nFrom Source Code Please refer to the How-to-build-from-sources FAQ.\n","excerpt":"Installation SkyWalking Python agent requires SkyWalking 8.0+ and Python 3.7+\nYou can install the …","ref":"/docs/skywalking-python/v1.0.1/en/setup/installation/","title":"Installation"},{"body":"Integration Tests IT(Integration Tests) represents the JUnit driven integration test to verify the features and compatibility between lib and known server with various versions.\nAfter setting up the environment and writing your codes, to facilitate integration with the SkyWalking project, you\u0026rsquo;ll need to run tests locally to verify that your codes would not break any existing features, as well as write some unit test (UT) codes to verify that the new codes would work well. This will prevent them from being broken by future contributors. If the new codes involve other components or libraries, you should also write integration tests (IT).\nSkyWalking leverages the plugin maven-surefire-plugin to run the UTs and uses maven-failsafe-plugin to run the ITs. maven-surefire-plugin excludes ITs (whose class name starts or ends with *IT, IT*) and leaves them for maven-failsafe-plugin to run, which is bound to the integration-test goal. Therefore, to run the UTs, try ./mvnw clean test, which only runs the UTs but not the ITs.\nIf you would like to run the ITs, please run ./mvnw integration-test as well as the profiles of the modules whose ITs you want to run. If you don\u0026rsquo;t want to run UTs, please add -DskipUTs=true. E.g. if you would like to only run the ITs in oap-server, try ./mvnw -Pbackend clean verify -DskipUTs=true, and if you would like to run all the ITs, simply run ./mvnw clean integration-test -DskipUTs=true.\nPlease be advised that if you\u0026rsquo;re writing integration tests, name it with the pattern IT* or *IT so they would only run in goal integration-test.\n","excerpt":"Integration Tests IT(Integration Tests) represents the JUnit driven integration test to verify the …","ref":"/docs/main/latest/en/guides/it-guide/","title":"Integration Tests"},{"body":"Integration Tests IT(Integration Tests) represents the JUnit driven integration test to verify the features and compatibility between lib and known server with various versions.\nAfter setting up the environment and writing your codes, to facilitate integration with the SkyWalking project, you\u0026rsquo;ll need to run tests locally to verify that your codes would not break any existing features, as well as write some unit test (UT) codes to verify that the new codes would work well. This will prevent them from being broken by future contributors. If the new codes involve other components or libraries, you should also write integration tests (IT).\nSkyWalking leverages the plugin maven-surefire-plugin to run the UTs and uses maven-failsafe-plugin to run the ITs. maven-surefire-plugin excludes ITs (whose class name starts or ends with *IT, IT*) and leaves them for maven-failsafe-plugin to run, which is bound to the integration-test goal. Therefore, to run the UTs, try ./mvnw clean test, which only runs the UTs but not the ITs.\nIf you would like to run the ITs, please run ./mvnw integration-test as well as the profiles of the modules whose ITs you want to run. If you don\u0026rsquo;t want to run UTs, please add -DskipUTs=true. E.g. if you would like to only run the ITs in oap-server, try ./mvnw -Pbackend clean verify -DskipUTs=true, and if you would like to run all the ITs, simply run ./mvnw clean integration-test -DskipUTs=true.\nPlease be advised that if you\u0026rsquo;re writing integration tests, name it with the pattern IT* or *IT so they would only run in goal integration-test.\n","excerpt":"Integration Tests IT(Integration Tests) represents the JUnit driven integration test to verify the …","ref":"/docs/main/next/en/guides/it-guide/","title":"Integration Tests"},{"body":"Integration Tests IT(Integration Tests) represents the JUnit driven integration test to verify the features and compatibility between lib and known server with various versions.\nAfter setting up the environment and writing your codes, to facilitate integration with the SkyWalking project, you\u0026rsquo;ll need to run tests locally to verify that your codes would not break any existing features, as well as write some unit test (UT) codes to verify that the new codes would work well. This will prevent them from being broken by future contributors. If the new codes involve other components or libraries, you should also write integration tests (IT).\nSkyWalking leverages the plugin maven-surefire-plugin to run the UTs and uses maven-failsafe-plugin to run the ITs. maven-surefire-plugin excludes ITs (whose class name starts or ends with *IT, IT*) and leaves them for maven-failsafe-plugin to run, which is bound to the integration-test goal. Therefore, to run the UTs, try ./mvnw clean test, which only runs the UTs but not the ITs.\nIf you would like to run the ITs, please run ./mvnw integration-test as well as the profiles of the modules whose ITs you want to run. If you don\u0026rsquo;t want to run UTs, please add -DskipUTs=true. E.g. if you would like to only run the ITs in oap-server, try ./mvnw -Pbackend clean verify -DskipUTs=true, and if you would like to run all the ITs, simply run ./mvnw clean integration-test -DskipUTs=true.\nPlease be advised that if you\u0026rsquo;re writing integration tests, name it with the pattern IT* or *IT so they would only run in goal integration-test.\n","excerpt":"Integration Tests IT(Integration Tests) represents the JUnit driven integration test to verify the …","ref":"/docs/main/v9.6.0/en/guides/it-guide/","title":"Integration Tests"},{"body":"Integration Tests IT(Integration Tests) represents the JUnit driven integration test to verify the features and compatibility between lib and known server with various versions.\nAfter setting up the environment and writing your codes, to facilitate integration with the SkyWalking project, you\u0026rsquo;ll need to run tests locally to verify that your codes would not break any existing features, as well as write some unit test (UT) codes to verify that the new codes would work well. This will prevent them from being broken by future contributors. If the new codes involve other components or libraries, you should also write integration tests (IT).\nSkyWalking leverages the plugin maven-surefire-plugin to run the UTs and uses maven-failsafe-plugin to run the ITs. maven-surefire-plugin excludes ITs (whose class name starts or ends with *IT, IT*) and leaves them for maven-failsafe-plugin to run, which is bound to the integration-test goal. Therefore, to run the UTs, try ./mvnw clean test, which only runs the UTs but not the ITs.\nIf you would like to run the ITs, please run ./mvnw integration-test as well as the profiles of the modules whose ITs you want to run. If you don\u0026rsquo;t want to run UTs, please add -DskipUTs=true. E.g. if you would like to only run the ITs in oap-server, try ./mvnw -Pbackend clean verify -DskipUTs=true, and if you would like to run all the ITs, simply run ./mvnw clean integration-test -DskipUTs=true.\nPlease be advised that if you\u0026rsquo;re writing integration tests, name it with the pattern IT* or *IT so they would only run in goal integration-test.\n","excerpt":"Integration Tests IT(Integration Tests) represents the JUnit driven integration test to verify the …","ref":"/docs/main/v9.7.0/en/guides/it-guide/","title":"Integration Tests"},{"body":"Introduction to UI The SkyWalking official UI provides the default and powerful visualization capabilities for SkyWalking to observe full-stack applications.\nThe left side menu lists all available supported stacks with default dashboards.\nFollow the Official Dashboards menu to explore all default dashboards on their ways to monitor different tech stacks.\nSidebar Menu and Marketplace All available feature menu items are only listed in the marketplace(since 9.6.0). They are only visible on the Sidebar Menu when there are relative services being observed by various supported observation agents, such as installed language agents, service mesh platform, OTEL integration.\nThe menu items defined in ui-initialized-templates/menu.yaml are the universal marketplace for all default-supported integration. The menu definition supports one and two levels items. The leaf menu item should have the layer for navigation.\nmenus:- name:GeneralServiceicon:general_servicemenus:- name:Serviceslayer:GENERAL- name:VisualDatabaselayer:VIRTUAL_DATABASE- name:VisualCachelayer:VIRTUAL_CACHE- name:VisualMQlayer:VIRTUAL_MQ....- name:SelfObservabilityicon:self_observabilitymenus:- name:SkyWalkingServerlayer:SO11Y_OAP- name:Satellitelayer:SO11Y_SATELLITEThe menu items would automatically pop up on the left after short period of time that at least one service was observed. For more details, please refer to the \u0026ldquo;uiMenuRefreshInterval\u0026rdquo; configuration item in the backend settings\nCustom Dashboard Besides official dashboards, Dashboards provide customization capabilities to end-users to add new tabs/pages/widgets, and flexibility to re-config the dashboard on your own preference.\nThe dashboard has two key attributes, Layer and Entity Type. Learn these two concepts first before you begin any customization. Also, trace, metrics, and log analysis are relative to OAL, MAL, and LAL engines in the SkyWalking kernel. It would help if you learned them first, too.\nService and All entity type dashboard could be set as root(set this to root), which means this dashboard would be used as the entrance of its Layer. If you have multiple root dashboards, UI will choose one randomly (We don\u0026rsquo;t recommend doing so).\nNotice, dashboard editable is disabled on release; set system env(SW_ENABLE_UPDATE_UI_TEMPLATE=true) to activate them. Before you save the edited dashboard, it is just stored in memory. Closing a tab would LOSE the change permanently.\nA new dashboard should be added through New Dashboard in the Dashboards menu. Meanwhile, there are two ways to edit an existing dashboard.\n Dashboard List in the Dashboard menu provides edit/delete/set-as-root features to manage existing dashboards. In every dashboard page, click the right top V toggle, and turn to E(representing Edit) mode.  Widget A dashboard consists of various widget. In the Edit mode, widgets could be added/moved/removed/edit according to the Layer.(Every widget declares its suitable layer.)\nThe widget provides the ability to visualize the metrics, generated through OAL, MAL, or LAL scripts.\nMetrics To display one or more metrics in a graph, the following information is required:\n Name: The name of the metric. Data Type: The way of reading the metrics data according to various metric types. Visualization: The graph options to visualize the metric. Each data type has its own matched graph options. See the mapping doc for more details. Unit: The unit of the metrics data. Calculation: The calculation formula for the metric. The available formulas are here.  Common Graphs    Metrics Data Type Visualization Demo     read all values in the duration Line    get sorted top N values Top List    read all values of labels in the duration Table    read all values in the duration Area    read all values in the duration Service/Instance/Endpoint List    read sampled records in the duration Records List     Calculations    Label Calculation     Percentage Value / 100   Apdex Value / 10000   Average Sum of values / Count of values   Percentage + Avg-preview Sum of values / Count of values / 100   Apdex + Avg-preview Sum of values / Count of values / 10000   Byte to KB Value / 1024   Byte to MB Value / 1024 / 1024   Byte to GB Value / 1024 / 1024 / 1024   Seconds to YYYY-MM-DD HH:mm:ss dayjs(value * 1000).format(\u0026ldquo;YYYY-MM-DD HH:mm:ss\u0026rdquo;)   Milliseconds to YYYY-MM-DD HH:mm:ss dayjs(value).format(\u0026ldquo;YYYY-MM-DD HH:mm:ss\u0026rdquo;)   Precision Value.toFixed(2)   Milliseconds to seconds Value / 1000   Seconds to days Value / 86400    Graph styles Graph advanced style options.\nWidget options Define the following properties of the widget:\n Name: The name of the widget, which used to associate with other widget in the dashboard. Title: The title name of the widget. Tooltip Content: Additional explanation of the widget.  Association Options Widget provides the ability to associate with other widgets to show axis pointer with tips for the same time point, in order to help users to understand the connectivity among metrics.\nWidget Static Link On the right top of every widget on the dashboard, there is a Generate Link option, which could generate a static link to represent this widget. By using this link, users could share this widget, or integrate it into any 3rd party iFrame to build a network operations center(NOC) dashboard on the wall easily. About this link, there are several customizable options\n Lock Query Duration. Set the query duration manually. It is OFF by default. Auto Fresh option is ON with 6s query period and last 30 mins time range. Query period and range are customizable.  Settings Settings provide language, server time zone, and auto-fresh options. These settings are stored in the browser\u0026rsquo;s local storage. Unless you clear them manually, those will not change.\nFAQ Login and Authentication SkyWalking doesn\u0026rsquo;t provide login and authentication as usual for years. If you need, a lot of Gateway solutions have provides well-established solutions, such as the Nginx ecosystem.\n","excerpt":"Introduction to UI The SkyWalking official UI provides the default and powerful visualization …","ref":"/docs/main/latest/en/ui/readme/","title":"Introduction to UI"},{"body":"Introduction to UI The SkyWalking official UI provides the default and powerful visualization capabilities for SkyWalking to observe full-stack applications.\nThe left side menu lists all available supported stacks with default dashboards.\nFollow the Official Dashboards menu to explore all default dashboards on their ways to monitor different tech stacks.\nSidebar Menu and Marketplace All available feature menu items are only listed in the marketplace(since 9.6.0). They are only visible on the Sidebar Menu when there are relative services being observed by various supported observation agents, such as installed language agents, service mesh platform, OTEL integration.\nThe menu items defined in ui-initialized-templates/menu.yaml are the universal marketplace for all default-supported integration. The menu definition supports one and two levels items. The leaf menu item should have the layer for navigation.\nmenus:- name:GeneralServiceicon:general_servicemenus:- name:Serviceslayer:GENERAL- name:VisualDatabaselayer:VIRTUAL_DATABASE- name:VisualCachelayer:VIRTUAL_CACHE- name:VisualMQlayer:VIRTUAL_MQ....- name:SelfObservabilityicon:self_observabilitymenus:- name:SkyWalkingServerlayer:SO11Y_OAP- name:Satellitelayer:SO11Y_SATELLITEThe menu items would automatically pop up on the left after short period of time that at least one service was observed. For more details, please refer to the \u0026ldquo;uiMenuRefreshInterval\u0026rdquo; configuration item in the backend settings\nCustom Dashboard Besides official dashboards, Dashboards provide customization capabilities to end-users to add new tabs/pages/widgets, and flexibility to re-config the dashboard on your own preference.\nThe dashboard has two key attributes, Layer and Entity Type. Learn these two concepts first before you begin any customization. Also, trace, metrics, and log analysis are relative to OAL, MAL, and LAL engines in the SkyWalking kernel. It would help if you learned them first, too.\nService and All entity type dashboard could be set as root(set this to root), which means this dashboard would be used as the entrance of its Layer. If you have multiple root dashboards, UI will choose one randomly (We don\u0026rsquo;t recommend doing so).\nNotice, dashboard editable is disabled on release; set system env(SW_ENABLE_UPDATE_UI_TEMPLATE=true) to activate them. Before you save the edited dashboard, it is just stored in memory. Closing a tab would LOSE the change permanently.\nA new dashboard should be added through New Dashboard in the Dashboards menu. Meanwhile, there are two ways to edit an existing dashboard.\n Dashboard List in the Dashboard menu provides edit/delete/set-as-root features to manage existing dashboards. In every dashboard page, click the right top V toggle, and turn to E(representing Edit) mode.  Widget A dashboard consists of various widget. In the Edit mode, widgets could be added/moved/removed/edit according to the Layer.(Every widget declares its suitable layer.)\nThe widget provides the ability to visualize the metrics, generated through OAL, MAL, or LAL scripts.\nMetrics To display one or more metrics in a graph, the following information is required:\n Name: The name of the metric. Data Type: The way of reading the metrics data according to various metric types. Visualization: The graph options to visualize the metric. Each data type has its own matched graph options. See the mapping doc for more details. Unit: The unit of the metrics data. Calculation: The calculation formula for the metric. The available formulas are here.  Common Graphs    Metrics Data Type Visualization Demo     read all values in the duration Line    get sorted top N values Top List    read all values of labels in the duration Table    read all values in the duration Area    read all values in the duration Service/Instance/Endpoint List    read sampled records in the duration Records List     Calculations    Label Calculation     Percentage Value / 100   Apdex Value / 10000   Average Sum of values / Count of values   Percentage + Avg-preview Sum of values / Count of values / 100   Apdex + Avg-preview Sum of values / Count of values / 10000   Byte to KB Value / 1024   Byte to MB Value / 1024 / 1024   Byte to GB Value / 1024 / 1024 / 1024   Seconds to YYYY-MM-DD HH:mm:ss dayjs(value * 1000).format(\u0026ldquo;YYYY-MM-DD HH:mm:ss\u0026rdquo;)   Milliseconds to YYYY-MM-DD HH:mm:ss dayjs(value).format(\u0026ldquo;YYYY-MM-DD HH:mm:ss\u0026rdquo;)   Precision Value.toFixed(2)   Milliseconds to seconds Value / 1000   Seconds to days Value / 86400    Graph styles Graph advanced style options.\nWidget options Define the following properties of the widget:\n Name: The name of the widget, which used to associate with other widget in the dashboard. Title: The title name of the widget. Tooltip Content: Additional explanation of the widget.  Association Options Widget provides the ability to associate with other widgets to show axis pointer with tips for the same time point, in order to help users to understand the connectivity among metrics.\nWidget Static Link On the right top of every widget on the dashboard, there is a Generate Link option, which could generate a static link to represent this widget. By using this link, users could share this widget, or integrate it into any 3rd party iFrame to build a network operations center(NOC) dashboard on the wall easily. About this link, there are several customizable options\n Lock Query Duration. Set the query duration manually. It is OFF by default. Auto Fresh option is ON with 6s query period and last 30 mins time range. Query period and range are customizable.  Settings Settings provide language, server time zone, and auto-fresh options. These settings are stored in the browser\u0026rsquo;s local storage. Unless you clear them manually, those will not change.\nFAQ Login and Authentication SkyWalking doesn\u0026rsquo;t provide login and authentication as usual for years. If you need, a lot of Gateway solutions have provides well-established solutions, such as the Nginx ecosystem.\n","excerpt":"Introduction to UI The SkyWalking official UI provides the default and powerful visualization …","ref":"/docs/main/next/en/ui/readme/","title":"Introduction to UI"},{"body":"Introduction to UI The SkyWalking official UI provides the default and powerful visualization capabilities for SkyWalking to observe full-stack application.\nThe left side menu lists all available supported stack, with default dashboards.\nFollow Official Dashboards menu explores all default dashboards about how to monitor different tech stacks.\nCustom Dashboard Besides, official dashboards, Dashboards provides customization to end users to add new tabs/pages/widgets, and flexibility to re-config the dashboard on your own preference.\nThe dashboard has two key attributes, Layer and Entity Type. Learn these two concepts first before you begin any customization. Also, trace, metrics, log analysis are relative to OAL, MAL, and LAL engines in SkyWalking kernel. You should learn them first too.\nService and All entity type dashboard could be set as root(set this to root), which mean this dashboard would be used as the entrance of its layer. If you have multiple root dashboards, UI could choose one randomly(Don\u0026rsquo;t recommend doing so).\nNotice, dashboard editable is disabled on release, set system env(SW_ENABLE_UPDATE_UI_TEMPLATE=true) to activate them. Before you save the edited dashboard, it is just stored in memory, closing tab would LOSE the change permanently.\nSettings Settings provide language, server time zone, and auto-fresh option. These settings are stored in browser local storage. Unless you clear them manually, those would not change.\nFAQ Login and Authentication SkyWalking doesn\u0026rsquo;t provide login and authentication as usual for years. If you need, a lot of Gateway solutions have provides well-established solutions, such as Nginx ecosystem.\n","excerpt":"Introduction to UI The SkyWalking official UI provides the default and powerful visualization …","ref":"/docs/main/v9.0.0/en/ui/readme/","title":"Introduction to UI"},{"body":"Introduction to UI The SkyWalking official UI provides the default and powerful visualization capabilities for SkyWalking to observe full-stack applications.\nThe left side menu lists all available supported stacks with default dashboards.\nFollow the Official Dashboards menu to explore all default dashboards on their ways to monitor different tech stacks.\nCustom Dashboard Besides official dashboards, Dashboards provide customization capabilities to end-users to add new tabs/pages/widgets, and flexibility to re-config the dashboard on your own preference.\nThe dashboard has two key attributes, Layer and Entity Type. Learn these two concepts first before you begin any customization. Also, trace, metrics, and log analysis are relative to OAL, MAL, and LAL engines in the SkyWalking kernel. It would help if you learned them first, too.\nService and All entity type dashboard could be set as root(set this to root), which means this dashboard would be used as the entrance of its Layer. If you have multiple root dashboards, UI will choose one randomly (We don\u0026rsquo;t recommend doing so).\nNotice, dashboard editable is disabled on release; set system env(SW_ENABLE_UPDATE_UI_TEMPLATE=true) to activate them. Before you save the edited dashboard, it is just stored in memory. Closing a tab would LOSE the change permanently.\nSettings Settings provide language, server time zone, and auto-fresh options. These settings are stored in the browser\u0026rsquo;s local storage. Unless you clear them manually, those will not change.\nFAQ Login and Authentication SkyWalking doesn\u0026rsquo;t provide login and authentication as usual for years. If you need, a lot of Gateway solutions have provides well-established solutions, such as the Nginx ecosystem.\n","excerpt":"Introduction to UI The SkyWalking official UI provides the default and powerful visualization …","ref":"/docs/main/v9.1.0/en/ui/readme/","title":"Introduction to UI"},{"body":"Introduction to UI The SkyWalking official UI provides the default and powerful visualization capabilities for SkyWalking to observe full-stack applications.\nThe left side menu lists all available supported stacks with default dashboards.\nFollow the Official Dashboards menu to explore all default dashboards on their ways to monitor different tech stacks.\nCustom Dashboard Besides official dashboards, Dashboards provide customization capabilities to end-users to add new tabs/pages/widgets, and flexibility to re-config the dashboard on your own preference.\nThe dashboard has two key attributes, Layer and Entity Type. Learn these two concepts first before you begin any customization. Also, trace, metrics, and log analysis are relative to OAL, MAL, and LAL engines in the SkyWalking kernel. It would help if you learned them first, too.\nService and All entity type dashboard could be set as root(set this to root), which means this dashboard would be used as the entrance of its Layer. If you have multiple root dashboards, UI will choose one randomly (We don\u0026rsquo;t recommend doing so).\nNotice, dashboard editable is disabled on release; set system env(SW_ENABLE_UPDATE_UI_TEMPLATE=true) to activate them. Before you save the edited dashboard, it is just stored in memory. Closing a tab would LOSE the change permanently.\nWidget The widget provides the ability to visualize the metrics, generated through OAL, MAL, or LAL scripts.\nMetrics To display one or more metrics in a graph, the following information is required:\n Name: The name of the metric. Data Type: The way of reading the metrics data according to various metric types. Visualization: The graph options to visualize the metric. Each data type has its own matched graph options. See the mapping doc for more details. Unit: The unit of the metrics data. Calculation: The calculation formula for the metric. The available formulas are here.  Common Graphs    Metrics Data Type Visualization Demo     read all values in the duration Line    get sorted top N values Top List    read all values of labels in the duration Table    read all values in the duration Area    read all values in the duration Service/Instance/Endpoint List     Calculations    Label Calculation     Percentage Value / 100   Apdex Value / 10000   Average Sum of values / Count of values   Percentage + Avg-preview Sum of values / Count of values / 100   Apdex + Avg-preview Sum of values / Count of values / 10000   Byte to KB Value / 1024   Byte to MB Value / 1024 / 1024   Byte to GB Value / 1024 / 1024 / 1024   Seconds to YYYY-MM-DD HH:mm:ss dayjs(value * 1000).format(\u0026ldquo;YYYY-MM-DD HH:mm:ss\u0026rdquo;)   Milliseconds to YYYY-MM-DD HH:mm:ss dayjs(value).format(\u0026ldquo;YYYY-MM-DD HH:mm:ss\u0026rdquo;)   Precision Value.toFixed(2)   Milliseconds to seconds Value / 1000   Seconds to days Value / 86400    Graph styles Graph advanced style options.\nWidget options Define the following properties of the widget:\n Name: The name of the widget, which used to associate with other widget in the dashboard. Title: The title name of the widget. Tooltip Content: Additional explanation of the widget.  Association Options Widget provides the ability to associate with other widgets to show axis pointer with tips for the same time point, in order to help users to understand the connectivity among metrics.\nSettings Settings provide language, server time zone, and auto-fresh options. These settings are stored in the browser\u0026rsquo;s local storage. Unless you clear them manually, those will not change.\nFAQ Login and Authentication SkyWalking doesn\u0026rsquo;t provide login and authentication as usual for years. If you need, a lot of Gateway solutions have provides well-established solutions, such as the Nginx ecosystem.\n","excerpt":"Introduction to UI The SkyWalking official UI provides the default and powerful visualization …","ref":"/docs/main/v9.2.0/en/ui/readme/","title":"Introduction to UI"},{"body":"Introduction to UI The SkyWalking official UI provides the default and powerful visualization capabilities for SkyWalking to observe full-stack applications.\nThe left side menu lists all available supported stacks with default dashboards.\nFollow the Official Dashboards menu to explore all default dashboards on their ways to monitor different tech stacks.\nCustom Dashboard Besides official dashboards, Dashboards provide customization capabilities to end-users to add new tabs/pages/widgets, and flexibility to re-config the dashboard on your own preference.\nThe dashboard has two key attributes, Layer and Entity Type. Learn these two concepts first before you begin any customization. Also, trace, metrics, and log analysis are relative to OAL, MAL, and LAL engines in the SkyWalking kernel. It would help if you learned them first, too.\nService and All entity type dashboard could be set as root(set this to root), which means this dashboard would be used as the entrance of its Layer. If you have multiple root dashboards, UI will choose one randomly (We don\u0026rsquo;t recommend doing so).\nNotice, dashboard editable is disabled on release; set system env(SW_ENABLE_UPDATE_UI_TEMPLATE=true) to activate them. Before you save the edited dashboard, it is just stored in memory. Closing a tab would LOSE the change permanently.\nA new dashboard should be added through New Dashboard in the Dashboards menu. Meanwhile, there are two ways to edit an existing dashboard.\n Dashboard List in the Dashboard menu provides edit/delete/set-as-root features to manage existing dashboards. In every dashboard page, click the right top V toggle, and turn to E(representing Edit) mode.  Widget A dashboard consists of various widget. In the Edit mode, widgets could be added/moved/removed/edit according to the Layer.(Every widget declares its suitable layer.)\nThe widget provides the ability to visualize the metrics, generated through OAL, MAL, or LAL scripts.\nMetrics To display one or more metrics in a graph, the following information is required:\n Name: The name of the metric. Data Type: The way of reading the metrics data according to various metric types. Visualization: The graph options to visualize the metric. Each data type has its own matched graph options. See the mapping doc for more details. Unit: The unit of the metrics data. Calculation: The calculation formula for the metric. The available formulas are here.  Common Graphs    Metrics Data Type Visualization Demo     read all values in the duration Line    get sorted top N values Top List    read all values of labels in the duration Table    read all values in the duration Area    read all values in the duration Service/Instance/Endpoint List    read sampled records in the duration Records List     Calculations    Label Calculation     Percentage Value / 100   Apdex Value / 10000   Average Sum of values / Count of values   Percentage + Avg-preview Sum of values / Count of values / 100   Apdex + Avg-preview Sum of values / Count of values / 10000   Byte to KB Value / 1024   Byte to MB Value / 1024 / 1024   Byte to GB Value / 1024 / 1024 / 1024   Seconds to YYYY-MM-DD HH:mm:ss dayjs(value * 1000).format(\u0026ldquo;YYYY-MM-DD HH:mm:ss\u0026rdquo;)   Milliseconds to YYYY-MM-DD HH:mm:ss dayjs(value).format(\u0026ldquo;YYYY-MM-DD HH:mm:ss\u0026rdquo;)   Precision Value.toFixed(2)   Milliseconds to seconds Value / 1000   Seconds to days Value / 86400    Graph styles Graph advanced style options.\nWidget options Define the following properties of the widget:\n Name: The name of the widget, which used to associate with other widget in the dashboard. Title: The title name of the widget. Tooltip Content: Additional explanation of the widget.  Association Options Widget provides the ability to associate with other widgets to show axis pointer with tips for the same time point, in order to help users to understand the connectivity among metrics.\nSettings Settings provide language, server time zone, and auto-fresh options. These settings are stored in the browser\u0026rsquo;s local storage. Unless you clear them manually, those will not change.\nFAQ Login and Authentication SkyWalking doesn\u0026rsquo;t provide login and authentication as usual for years. If you need, a lot of Gateway solutions have provides well-established solutions, such as the Nginx ecosystem.\n","excerpt":"Introduction to UI The SkyWalking official UI provides the default and powerful visualization …","ref":"/docs/main/v9.3.0/en/ui/readme/","title":"Introduction to UI"},{"body":"Introduction to UI The SkyWalking official UI provides the default and powerful visualization capabilities for SkyWalking to observe full-stack applications.\nThe left side menu lists all available supported stacks with default dashboards.\nFollow the Official Dashboards menu to explore all default dashboards on their ways to monitor different tech stacks.\nCustom Dashboard Besides official dashboards, Dashboards provide customization capabilities to end-users to add new tabs/pages/widgets, and flexibility to re-config the dashboard on your own preference.\nThe dashboard has two key attributes, Layer and Entity Type. Learn these two concepts first before you begin any customization. Also, trace, metrics, and log analysis are relative to OAL, MAL, and LAL engines in the SkyWalking kernel. It would help if you learned them first, too.\nService and All entity type dashboard could be set as root(set this to root), which means this dashboard would be used as the entrance of its Layer. If you have multiple root dashboards, UI will choose one randomly (We don\u0026rsquo;t recommend doing so).\nNotice, dashboard editable is disabled on release; set system env(SW_ENABLE_UPDATE_UI_TEMPLATE=true) to activate them. Before you save the edited dashboard, it is just stored in memory. Closing a tab would LOSE the change permanently.\nA new dashboard should be added through New Dashboard in the Dashboards menu. Meanwhile, there are two ways to edit an existing dashboard.\n Dashboard List in the Dashboard menu provides edit/delete/set-as-root features to manage existing dashboards. In every dashboard page, click the right top V toggle, and turn to E(representing Edit) mode.  Widget A dashboard consists of various widget. In the Edit mode, widgets could be added/moved/removed/edit according to the Layer.(Every widget declares its suitable layer.)\nThe widget provides the ability to visualize the metrics, generated through OAL, MAL, or LAL scripts.\nMetrics To display one or more metrics in a graph, the following information is required:\n Name: The name of the metric. Data Type: The way of reading the metrics data according to various metric types. Visualization: The graph options to visualize the metric. Each data type has its own matched graph options. See the mapping doc for more details. Unit: The unit of the metrics data. Calculation: The calculation formula for the metric. The available formulas are here.  Common Graphs    Metrics Data Type Visualization Demo     read all values in the duration Line    get sorted top N values Top List    read all values of labels in the duration Table    read all values in the duration Area    read all values in the duration Service/Instance/Endpoint List    read sampled records in the duration Records List     Calculations    Label Calculation     Percentage Value / 100   Apdex Value / 10000   Average Sum of values / Count of values   Percentage + Avg-preview Sum of values / Count of values / 100   Apdex + Avg-preview Sum of values / Count of values / 10000   Byte to KB Value / 1024   Byte to MB Value / 1024 / 1024   Byte to GB Value / 1024 / 1024 / 1024   Seconds to YYYY-MM-DD HH:mm:ss dayjs(value * 1000).format(\u0026ldquo;YYYY-MM-DD HH:mm:ss\u0026rdquo;)   Milliseconds to YYYY-MM-DD HH:mm:ss dayjs(value).format(\u0026ldquo;YYYY-MM-DD HH:mm:ss\u0026rdquo;)   Precision Value.toFixed(2)   Milliseconds to seconds Value / 1000   Seconds to days Value / 86400    Graph styles Graph advanced style options.\nWidget options Define the following properties of the widget:\n Name: The name of the widget, which used to associate with other widget in the dashboard. Title: The title name of the widget. Tooltip Content: Additional explanation of the widget.  Association Options Widget provides the ability to associate with other widgets to show axis pointer with tips for the same time point, in order to help users to understand the connectivity among metrics.\nWidget Static Link On the right top of every widget on the dashboard, there is a Generate Link option, which could generate a static link to represent this widget. By using this link, users could share this widget, or integrate it into any 3rd party iFrame to build a network operations center(NOC) dashboard on the wall easily. About this link, there are several customizable options\n Lock Query Duration. Set the query duration manually. It is OFF by default. Auto Fresh option is ON with 6s query period and last 30 mins time range. Query period and range are customizable.  Settings Settings provide language, server time zone, and auto-fresh options. These settings are stored in the browser\u0026rsquo;s local storage. Unless you clear them manually, those will not change.\nFAQ Login and Authentication SkyWalking doesn\u0026rsquo;t provide login and authentication as usual for years. If you need, a lot of Gateway solutions have provides well-established solutions, such as the Nginx ecosystem.\n","excerpt":"Introduction to UI The SkyWalking official UI provides the default and powerful visualization …","ref":"/docs/main/v9.4.0/en/ui/readme/","title":"Introduction to UI"},{"body":"Introduction to UI The SkyWalking official UI provides the default and powerful visualization capabilities for SkyWalking to observe full-stack applications.\nThe left side menu lists all available supported stacks with default dashboards.\nFollow the Official Dashboards menu to explore all default dashboards on their ways to monitor different tech stacks.\nCustom Dashboard Besides official dashboards, Dashboards provide customization capabilities to end-users to add new tabs/pages/widgets, and flexibility to re-config the dashboard on your own preference.\nThe dashboard has two key attributes, Layer and Entity Type. Learn these two concepts first before you begin any customization. Also, trace, metrics, and log analysis are relative to OAL, MAL, and LAL engines in the SkyWalking kernel. It would help if you learned them first, too.\nService and All entity type dashboard could be set as root(set this to root), which means this dashboard would be used as the entrance of its Layer. If you have multiple root dashboards, UI will choose one randomly (We don\u0026rsquo;t recommend doing so).\nNotice, dashboard editable is disabled on release; set system env(SW_ENABLE_UPDATE_UI_TEMPLATE=true) to activate them. Before you save the edited dashboard, it is just stored in memory. Closing a tab would LOSE the change permanently.\nA new dashboard should be added through New Dashboard in the Dashboards menu. Meanwhile, there are two ways to edit an existing dashboard.\n Dashboard List in the Dashboard menu provides edit/delete/set-as-root features to manage existing dashboards. In every dashboard page, click the right top V toggle, and turn to E(representing Edit) mode.  Widget A dashboard consists of various widget. In the Edit mode, widgets could be added/moved/removed/edit according to the Layer.(Every widget declares its suitable layer.)\nThe widget provides the ability to visualize the metrics, generated through OAL, MAL, or LAL scripts.\nMetrics To display one or more metrics in a graph, the following information is required:\n Name: The name of the metric. Data Type: The way of reading the metrics data according to various metric types. Visualization: The graph options to visualize the metric. Each data type has its own matched graph options. See the mapping doc for more details. Unit: The unit of the metrics data. Calculation: The calculation formula for the metric. The available formulas are here.  Common Graphs    Metrics Data Type Visualization Demo     read all values in the duration Line    get sorted top N values Top List    read all values of labels in the duration Table    read all values in the duration Area    read all values in the duration Service/Instance/Endpoint List    read sampled records in the duration Records List     Calculations    Label Calculation     Percentage Value / 100   Apdex Value / 10000   Average Sum of values / Count of values   Percentage + Avg-preview Sum of values / Count of values / 100   Apdex + Avg-preview Sum of values / Count of values / 10000   Byte to KB Value / 1024   Byte to MB Value / 1024 / 1024   Byte to GB Value / 1024 / 1024 / 1024   Seconds to YYYY-MM-DD HH:mm:ss dayjs(value * 1000).format(\u0026ldquo;YYYY-MM-DD HH:mm:ss\u0026rdquo;)   Milliseconds to YYYY-MM-DD HH:mm:ss dayjs(value).format(\u0026ldquo;YYYY-MM-DD HH:mm:ss\u0026rdquo;)   Precision Value.toFixed(2)   Milliseconds to seconds Value / 1000   Seconds to days Value / 86400    Graph styles Graph advanced style options.\nWidget options Define the following properties of the widget:\n Name: The name of the widget, which used to associate with other widget in the dashboard. Title: The title name of the widget. Tooltip Content: Additional explanation of the widget.  Association Options Widget provides the ability to associate with other widgets to show axis pointer with tips for the same time point, in order to help users to understand the connectivity among metrics.\nWidget Static Link On the right top of every widget on the dashboard, there is a Generate Link option, which could generate a static link to represent this widget. By using this link, users could share this widget, or integrate it into any 3rd party iFrame to build a network operations center(NOC) dashboard on the wall easily. About this link, there are several customizable options\n Lock Query Duration. Set the query duration manually. It is OFF by default. Auto Fresh option is ON with 6s query period and last 30 mins time range. Query period and range are customizable.  Settings Settings provide language, server time zone, and auto-fresh options. These settings are stored in the browser\u0026rsquo;s local storage. Unless you clear them manually, those will not change.\nFAQ Login and Authentication SkyWalking doesn\u0026rsquo;t provide login and authentication as usual for years. If you need, a lot of Gateway solutions have provides well-established solutions, such as the Nginx ecosystem.\n","excerpt":"Introduction to UI The SkyWalking official UI provides the default and powerful visualization …","ref":"/docs/main/v9.5.0/en/ui/readme/","title":"Introduction to UI"},{"body":"Introduction to UI The SkyWalking official UI provides the default and powerful visualization capabilities for SkyWalking to observe full-stack applications.\nThe left side menu lists all available supported stacks with default dashboards.\nFollow the Official Dashboards menu to explore all default dashboards on their ways to monitor different tech stacks.\nSidebar Menu and Marketplace All available feature menu items are only listed in the marketplace(since 9.6.0). They are only visible on the Sidebar Menu when there are relative services being observed by various supported observation agents, such as installed language agents, service mesh platform, OTEL integration.\nThe menu items defined in ui-initialized-templates/menu.yaml are the universal marketplace for all default-supported integration. The menu definition supports one and two levels items. The leaf menu item should have the layer for navigation.\nmenus:- name:GeneralServiceicon:general_servicemenus:- name:Serviceslayer:GENERAL- name:VisualDatabaselayer:VIRTUAL_DATABASE- name:VisualCachelayer:VIRTUAL_CACHE- name:VisualMQlayer:VIRTUAL_MQ....- name:SelfObservabilityicon:self_observabilitymenus:- name:SkyWalkingServerlayer:SO11Y_OAP- name:Satellitelayer:SO11Y_SATELLITEThe menu items would automatically pop up on the left after short period of time that at least one service was observed. For more details, please refer to the \u0026ldquo;uiMenuRefreshInterval\u0026rdquo; configuration item in the backend settings\nCustom Dashboard Besides official dashboards, Dashboards provide customization capabilities to end-users to add new tabs/pages/widgets, and flexibility to re-config the dashboard on your own preference.\nThe dashboard has two key attributes, Layer and Entity Type. Learn these two concepts first before you begin any customization. Also, trace, metrics, and log analysis are relative to OAL, MAL, and LAL engines in the SkyWalking kernel. It would help if you learned them first, too.\nService and All entity type dashboard could be set as root(set this to root), which means this dashboard would be used as the entrance of its Layer. If you have multiple root dashboards, UI will choose one randomly (We don\u0026rsquo;t recommend doing so).\nNotice, dashboard editable is disabled on release; set system env(SW_ENABLE_UPDATE_UI_TEMPLATE=true) to activate them. Before you save the edited dashboard, it is just stored in memory. Closing a tab would LOSE the change permanently.\nA new dashboard should be added through New Dashboard in the Dashboards menu. Meanwhile, there are two ways to edit an existing dashboard.\n Dashboard List in the Dashboard menu provides edit/delete/set-as-root features to manage existing dashboards. In every dashboard page, click the right top V toggle, and turn to E(representing Edit) mode.  Widget A dashboard consists of various widget. In the Edit mode, widgets could be added/moved/removed/edit according to the Layer.(Every widget declares its suitable layer.)\nThe widget provides the ability to visualize the metrics, generated through OAL, MAL, or LAL scripts.\nMetrics To display one or more metrics in a graph, the following information is required:\n Name: The name of the metric. Data Type: The way of reading the metrics data according to various metric types. Visualization: The graph options to visualize the metric. Each data type has its own matched graph options. See the mapping doc for more details. Unit: The unit of the metrics data. Calculation: The calculation formula for the metric. The available formulas are here.  Common Graphs    Metrics Data Type Visualization Demo     read all values in the duration Line    get sorted top N values Top List    read all values of labels in the duration Table    read all values in the duration Area    read all values in the duration Service/Instance/Endpoint List    read sampled records in the duration Records List     Calculations    Label Calculation     Percentage Value / 100   Apdex Value / 10000   Average Sum of values / Count of values   Percentage + Avg-preview Sum of values / Count of values / 100   Apdex + Avg-preview Sum of values / Count of values / 10000   Byte to KB Value / 1024   Byte to MB Value / 1024 / 1024   Byte to GB Value / 1024 / 1024 / 1024   Seconds to YYYY-MM-DD HH:mm:ss dayjs(value * 1000).format(\u0026ldquo;YYYY-MM-DD HH:mm:ss\u0026rdquo;)   Milliseconds to YYYY-MM-DD HH:mm:ss dayjs(value).format(\u0026ldquo;YYYY-MM-DD HH:mm:ss\u0026rdquo;)   Precision Value.toFixed(2)   Milliseconds to seconds Value / 1000   Seconds to days Value / 86400    Graph styles Graph advanced style options.\nWidget options Define the following properties of the widget:\n Name: The name of the widget, which used to associate with other widget in the dashboard. Title: The title name of the widget. Tooltip Content: Additional explanation of the widget.  Association Options Widget provides the ability to associate with other widgets to show axis pointer with tips for the same time point, in order to help users to understand the connectivity among metrics.\nWidget Static Link On the right top of every widget on the dashboard, there is a Generate Link option, which could generate a static link to represent this widget. By using this link, users could share this widget, or integrate it into any 3rd party iFrame to build a network operations center(NOC) dashboard on the wall easily. About this link, there are several customizable options\n Lock Query Duration. Set the query duration manually. It is OFF by default. Auto Fresh option is ON with 6s query period and last 30 mins time range. Query period and range are customizable.  Settings Settings provide language, server time zone, and auto-fresh options. These settings are stored in the browser\u0026rsquo;s local storage. Unless you clear them manually, those will not change.\nFAQ Login and Authentication SkyWalking doesn\u0026rsquo;t provide login and authentication as usual for years. If you need, a lot of Gateway solutions have provides well-established solutions, such as the Nginx ecosystem.\n","excerpt":"Introduction to UI The SkyWalking official UI provides the default and powerful visualization …","ref":"/docs/main/v9.6.0/en/ui/readme/","title":"Introduction to UI"},{"body":"Introduction to UI The SkyWalking official UI provides the default and powerful visualization capabilities for SkyWalking to observe full-stack applications.\nThe left side menu lists all available supported stacks with default dashboards.\nFollow the Official Dashboards menu to explore all default dashboards on their ways to monitor different tech stacks.\nSidebar Menu and Marketplace All available feature menu items are only listed in the marketplace(since 9.6.0). They are only visible on the Sidebar Menu when there are relative services being observed by various supported observation agents, such as installed language agents, service mesh platform, OTEL integration.\nThe menu items defined in ui-initialized-templates/menu.yaml are the universal marketplace for all default-supported integration. The menu definition supports one and two levels items. The leaf menu item should have the layer for navigation.\nmenus:- name:GeneralServiceicon:general_servicemenus:- name:Serviceslayer:GENERAL- name:VisualDatabaselayer:VIRTUAL_DATABASE- name:VisualCachelayer:VIRTUAL_CACHE- name:VisualMQlayer:VIRTUAL_MQ....- name:SelfObservabilityicon:self_observabilitymenus:- name:SkyWalkingServerlayer:SO11Y_OAP- name:Satellitelayer:SO11Y_SATELLITEThe menu items would automatically pop up on the left after short period of time that at least one service was observed. For more details, please refer to the \u0026ldquo;uiMenuRefreshInterval\u0026rdquo; configuration item in the backend settings\nCustom Dashboard Besides official dashboards, Dashboards provide customization capabilities to end-users to add new tabs/pages/widgets, and flexibility to re-config the dashboard on your own preference.\nThe dashboard has two key attributes, Layer and Entity Type. Learn these two concepts first before you begin any customization. Also, trace, metrics, and log analysis are relative to OAL, MAL, and LAL engines in the SkyWalking kernel. It would help if you learned them first, too.\nService and All entity type dashboard could be set as root(set this to root), which means this dashboard would be used as the entrance of its Layer. If you have multiple root dashboards, UI will choose one randomly (We don\u0026rsquo;t recommend doing so).\nNotice, dashboard editable is disabled on release; set system env(SW_ENABLE_UPDATE_UI_TEMPLATE=true) to activate them. Before you save the edited dashboard, it is just stored in memory. Closing a tab would LOSE the change permanently.\nA new dashboard should be added through New Dashboard in the Dashboards menu. Meanwhile, there are two ways to edit an existing dashboard.\n Dashboard List in the Dashboard menu provides edit/delete/set-as-root features to manage existing dashboards. In every dashboard page, click the right top V toggle, and turn to E(representing Edit) mode.  Widget A dashboard consists of various widget. In the Edit mode, widgets could be added/moved/removed/edit according to the Layer.(Every widget declares its suitable layer.)\nThe widget provides the ability to visualize the metrics, generated through OAL, MAL, or LAL scripts.\nMetrics To display one or more metrics in a graph, the following information is required:\n Name: The name of the metric. Data Type: The way of reading the metrics data according to various metric types. Visualization: The graph options to visualize the metric. Each data type has its own matched graph options. See the mapping doc for more details. Unit: The unit of the metrics data. Calculation: The calculation formula for the metric. The available formulas are here.  Common Graphs    Metrics Data Type Visualization Demo     read all values in the duration Line    get sorted top N values Top List    read all values of labels in the duration Table    read all values in the duration Area    read all values in the duration Service/Instance/Endpoint List    read sampled records in the duration Records List     Calculations    Label Calculation     Percentage Value / 100   Apdex Value / 10000   Average Sum of values / Count of values   Percentage + Avg-preview Sum of values / Count of values / 100   Apdex + Avg-preview Sum of values / Count of values / 10000   Byte to KB Value / 1024   Byte to MB Value / 1024 / 1024   Byte to GB Value / 1024 / 1024 / 1024   Seconds to YYYY-MM-DD HH:mm:ss dayjs(value * 1000).format(\u0026ldquo;YYYY-MM-DD HH:mm:ss\u0026rdquo;)   Milliseconds to YYYY-MM-DD HH:mm:ss dayjs(value).format(\u0026ldquo;YYYY-MM-DD HH:mm:ss\u0026rdquo;)   Precision Value.toFixed(2)   Milliseconds to seconds Value / 1000   Seconds to days Value / 86400    Graph styles Graph advanced style options.\nWidget options Define the following properties of the widget:\n Name: The name of the widget, which used to associate with other widget in the dashboard. Title: The title name of the widget. Tooltip Content: Additional explanation of the widget.  Association Options Widget provides the ability to associate with other widgets to show axis pointer with tips for the same time point, in order to help users to understand the connectivity among metrics.\nWidget Static Link On the right top of every widget on the dashboard, there is a Generate Link option, which could generate a static link to represent this widget. By using this link, users could share this widget, or integrate it into any 3rd party iFrame to build a network operations center(NOC) dashboard on the wall easily. About this link, there are several customizable options\n Lock Query Duration. Set the query duration manually. It is OFF by default. Auto Fresh option is ON with 6s query period and last 30 mins time range. Query period and range are customizable.  Settings Settings provide language, server time zone, and auto-fresh options. These settings are stored in the browser\u0026rsquo;s local storage. Unless you clear them manually, those will not change.\nFAQ Login and Authentication SkyWalking doesn\u0026rsquo;t provide login and authentication as usual for years. If you need, a lot of Gateway solutions have provides well-established solutions, such as the Nginx ecosystem.\n","excerpt":"Introduction to UI The SkyWalking official UI provides the default and powerful visualization …","ref":"/docs/main/v9.7.0/en/ui/readme/","title":"Introduction to UI"},{"body":"IP and port setting The backend uses IP and port binding in order to allow the OS to have multiple IPs. The binding/listening IP and port are specified by the core module\ncore:default:restHost:0.0.0.0restPort:12800restContextPath:/gRPCHost:0.0.0.0gRPCPort:11800There are two IP/port pairs for gRPC and HTTP REST services.\n Most agents and probes use gRPC service for better performance and code readability. Some agents use REST service because gRPC may not be supported in that language. The UI uses REST service, but the data is always in GraphQL format.  Note IP binding For users unfamiliar with IP binding, note that once IP binding is complete, the client could only use this IP to access the service. For example, if 172.09.13.28 is bound, even if you are in this machine, you must use 172.09.13.28, rather than 127.0.0.1 or localhost, to access the service.\nModule provider specified IP and port The IP and port in the core module are provided by default. But it is common for some module providers, such as receiver modules, to provide other IP and port settings.\n","excerpt":"IP and port setting The backend uses IP and port binding in order to allow the OS to have multiple …","ref":"/docs/main/latest/en/setup/backend/backend-ip-port/","title":"IP and port setting"},{"body":"IP and port setting The backend uses IP and port binding in order to allow the OS to have multiple IPs. The binding/listening IP and port are specified by the core module\ncore:default:restHost:0.0.0.0restPort:12800restContextPath:/gRPCHost:0.0.0.0gRPCPort:11800There are two IP/port pairs for gRPC and HTTP REST services.\n Most agents and probes use gRPC service for better performance and code readability. Some agents use REST service because gRPC may not be supported in that language. The UI uses REST service, but the data is always in GraphQL format.  Note IP binding For users unfamiliar with IP binding, note that once IP binding is complete, the client could only use this IP to access the service. For example, if 172.09.13.28 is bound, even if you are in this machine, you must use 172.09.13.28, rather than 127.0.0.1 or localhost, to access the service.\nModule provider specified IP and port The IP and port in the core module are provided by default. But it is common for some module providers, such as receiver modules, to provide other IP and port settings.\n","excerpt":"IP and port setting The backend uses IP and port binding in order to allow the OS to have multiple …","ref":"/docs/main/next/en/setup/backend/backend-ip-port/","title":"IP and port setting"},{"body":"IP and port setting The backend uses IP and port binding in order to allow the OS to have multiple IPs. The binding/listening IP and port are specified by the core module\ncore:default:restHost:0.0.0.0restPort:12800restContextPath:/gRPCHost:0.0.0.0gRPCPort:11800There are two IP/port pairs for gRPC and HTTP REST services.\n Most agents and probes use gRPC service for better performance and code readability. Some agents use REST service, because gRPC may be not supported in that language. The UI uses REST service, but the data is always in GraphQL format.  Note IP binding For users who are not familiar with IP binding, note that once IP binding is complete, the client could only use this IP to access the service. For example, if 172.09.13.28 is bound, even if you are in this machine, you must use 172.09.13.28, rather than 127.0.0.1 or localhost, to access the service.\nModule provider specified IP and port The IP and port in the core module are provided by default. But it is common for some module providers, such as receiver modules, to provide other IP and port settings.\n","excerpt":"IP and port setting The backend uses IP and port binding in order to allow the OS to have multiple …","ref":"/docs/main/v9.0.0/en/setup/backend/backend-ip-port/","title":"IP and port setting"},{"body":"IP and port setting The backend uses IP and port binding in order to allow the OS to have multiple IPs. The binding/listening IP and port are specified by the core module\ncore:default:restHost:0.0.0.0restPort:12800restContextPath:/gRPCHost:0.0.0.0gRPCPort:11800There are two IP/port pairs for gRPC and HTTP REST services.\n Most agents and probes use gRPC service for better performance and code readability. Some agents use REST service because gRPC may not be supported in that language. The UI uses REST service, but the data is always in GraphQL format.  Note IP binding For users unfamiliar with IP binding, note that once IP binding is complete, the client could only use this IP to access the service. For example, if 172.09.13.28 is bound, even if you are in this machine, you must use 172.09.13.28, rather than 127.0.0.1 or localhost, to access the service.\nModule provider specified IP and port The IP and port in the core module are provided by default. But it is common for some module providers, such as receiver modules, to provide other IP and port settings.\n","excerpt":"IP and port setting The backend uses IP and port binding in order to allow the OS to have multiple …","ref":"/docs/main/v9.1.0/en/setup/backend/backend-ip-port/","title":"IP and port setting"},{"body":"IP and port setting The backend uses IP and port binding in order to allow the OS to have multiple IPs. The binding/listening IP and port are specified by the core module\ncore:default:restHost:0.0.0.0restPort:12800restContextPath:/gRPCHost:0.0.0.0gRPCPort:11800There are two IP/port pairs for gRPC and HTTP REST services.\n Most agents and probes use gRPC service for better performance and code readability. Some agents use REST service because gRPC may not be supported in that language. The UI uses REST service, but the data is always in GraphQL format.  Note IP binding For users unfamiliar with IP binding, note that once IP binding is complete, the client could only use this IP to access the service. For example, if 172.09.13.28 is bound, even if you are in this machine, you must use 172.09.13.28, rather than 127.0.0.1 or localhost, to access the service.\nModule provider specified IP and port The IP and port in the core module are provided by default. But it is common for some module providers, such as receiver modules, to provide other IP and port settings.\n","excerpt":"IP and port setting The backend uses IP and port binding in order to allow the OS to have multiple …","ref":"/docs/main/v9.2.0/en/setup/backend/backend-ip-port/","title":"IP and port setting"},{"body":"IP and port setting The backend uses IP and port binding in order to allow the OS to have multiple IPs. The binding/listening IP and port are specified by the core module\ncore:default:restHost:0.0.0.0restPort:12800restContextPath:/gRPCHost:0.0.0.0gRPCPort:11800There are two IP/port pairs for gRPC and HTTP REST services.\n Most agents and probes use gRPC service for better performance and code readability. Some agents use REST service because gRPC may not be supported in that language. The UI uses REST service, but the data is always in GraphQL format.  Note IP binding For users unfamiliar with IP binding, note that once IP binding is complete, the client could only use this IP to access the service. For example, if 172.09.13.28 is bound, even if you are in this machine, you must use 172.09.13.28, rather than 127.0.0.1 or localhost, to access the service.\nModule provider specified IP and port The IP and port in the core module are provided by default. But it is common for some module providers, such as receiver modules, to provide other IP and port settings.\n","excerpt":"IP and port setting The backend uses IP and port binding in order to allow the OS to have multiple …","ref":"/docs/main/v9.3.0/en/setup/backend/backend-ip-port/","title":"IP and port setting"},{"body":"IP and port setting The backend uses IP and port binding in order to allow the OS to have multiple IPs. The binding/listening IP and port are specified by the core module\ncore:default:restHost:0.0.0.0restPort:12800restContextPath:/gRPCHost:0.0.0.0gRPCPort:11800There are two IP/port pairs for gRPC and HTTP REST services.\n Most agents and probes use gRPC service for better performance and code readability. Some agents use REST service because gRPC may not be supported in that language. The UI uses REST service, but the data is always in GraphQL format.  Note IP binding For users unfamiliar with IP binding, note that once IP binding is complete, the client could only use this IP to access the service. For example, if 172.09.13.28 is bound, even if you are in this machine, you must use 172.09.13.28, rather than 127.0.0.1 or localhost, to access the service.\nModule provider specified IP and port The IP and port in the core module are provided by default. But it is common for some module providers, such as receiver modules, to provide other IP and port settings.\n","excerpt":"IP and port setting The backend uses IP and port binding in order to allow the OS to have multiple …","ref":"/docs/main/v9.4.0/en/setup/backend/backend-ip-port/","title":"IP and port setting"},{"body":"IP and port setting The backend uses IP and port binding in order to allow the OS to have multiple IPs. The binding/listening IP and port are specified by the core module\ncore:default:restHost:0.0.0.0restPort:12800restContextPath:/gRPCHost:0.0.0.0gRPCPort:11800There are two IP/port pairs for gRPC and HTTP REST services.\n Most agents and probes use gRPC service for better performance and code readability. Some agents use REST service because gRPC may not be supported in that language. The UI uses REST service, but the data is always in GraphQL format.  Note IP binding For users unfamiliar with IP binding, note that once IP binding is complete, the client could only use this IP to access the service. For example, if 172.09.13.28 is bound, even if you are in this machine, you must use 172.09.13.28, rather than 127.0.0.1 or localhost, to access the service.\nModule provider specified IP and port The IP and port in the core module are provided by default. But it is common for some module providers, such as receiver modules, to provide other IP and port settings.\n","excerpt":"IP and port setting The backend uses IP and port binding in order to allow the OS to have multiple …","ref":"/docs/main/v9.5.0/en/setup/backend/backend-ip-port/","title":"IP and port setting"},{"body":"IP and port setting The backend uses IP and port binding in order to allow the OS to have multiple IPs. The binding/listening IP and port are specified by the core module\ncore:default:restHost:0.0.0.0restPort:12800restContextPath:/gRPCHost:0.0.0.0gRPCPort:11800There are two IP/port pairs for gRPC and HTTP REST services.\n Most agents and probes use gRPC service for better performance and code readability. Some agents use REST service because gRPC may not be supported in that language. The UI uses REST service, but the data is always in GraphQL format.  Note IP binding For users unfamiliar with IP binding, note that once IP binding is complete, the client could only use this IP to access the service. For example, if 172.09.13.28 is bound, even if you are in this machine, you must use 172.09.13.28, rather than 127.0.0.1 or localhost, to access the service.\nModule provider specified IP and port The IP and port in the core module are provided by default. But it is common for some module providers, such as receiver modules, to provide other IP and port settings.\n","excerpt":"IP and port setting The backend uses IP and port binding in order to allow the OS to have multiple …","ref":"/docs/main/v9.6.0/en/setup/backend/backend-ip-port/","title":"IP and port setting"},{"body":"IP and port setting The backend uses IP and port binding in order to allow the OS to have multiple IPs. The binding/listening IP and port are specified by the core module\ncore:default:restHost:0.0.0.0restPort:12800restContextPath:/gRPCHost:0.0.0.0gRPCPort:11800There are two IP/port pairs for gRPC and HTTP REST services.\n Most agents and probes use gRPC service for better performance and code readability. Some agents use REST service because gRPC may not be supported in that language. The UI uses REST service, but the data is always in GraphQL format.  Note IP binding For users unfamiliar with IP binding, note that once IP binding is complete, the client could only use this IP to access the service. For example, if 172.09.13.28 is bound, even if you are in this machine, you must use 172.09.13.28, rather than 127.0.0.1 or localhost, to access the service.\nModule provider specified IP and port The IP and port in the core module are provided by default. But it is common for some module providers, such as receiver modules, to provide other IP and port settings.\n","excerpt":"IP and port setting The backend uses IP and port binding in order to allow the OS to have multiple …","ref":"/docs/main/v9.7.0/en/setup/backend/backend-ip-port/","title":"IP and port setting"},{"body":"Java agent injector Manual To use the java agent more natively, we propose the java agent injector to inject the agent sidecar into a pod.\nWhen enabled in a pod\u0026rsquo;s namespace, the injector injects the java agent container at pod creation time using a mutating webhook admission controller. By rendering the java agent to a shared volume, containers within the pod can use the java agent.\nThe following sections describe how to configure the agent, if you want to try it directly, please see Usage for more details.\nInstall Injector The java agent injector is a component of the operator, so you need to follow Operator installation instrument to install the operator firstly.\nActive the java agent injection We have two granularities here: namespace and pod.\n   Resource Label Enabled value Disabled value     Namespace swck-injection enabled disabled   Pod swck-java-agent-injected \u0026ldquo;true\u0026rdquo; \u0026ldquo;false\u0026rdquo;    The injector is configured with the following logic:\n If either label is disabled, the pod is not injected. If two labels are enabled, the pod is injected.  Follow the next steps to active java agent injection.\n Label the namespace with swck-injection=enabled  $ kubectl label namespace default(your namespace) swck-injection=enabled  Add label swck-java-agent-injected: \u0026quot;true\u0026quot; to the pod, and get the result as below.  $ kubectl get pod -l swck-java-agent-injected=true NAME READY STATUS RESTARTS AGE inject-demo 1/1 Running 0 2d2h The ways to configure the agent The java agent injector supports a precedence order to configure the agent:\n Annotations \u0026gt; SwAgent \u0026gt; Configmap (Deprecated) \u0026gt; Default Configmap (Deprecated)\nAnnotations Annotations are described in kubernetes annotations doc.\nWe support annotations in agent annotations and sidecar annotations.\nSwAgent SwAgent is a Customer Resource defined by SWCK.\nWe support SwAgent in SwAgent usage guide\nConfigmap (Deprecated) Configmap is described in kubernetes configmap doc.\nWe need to use configmap to set agent.config so that we can modify the agent configuration without entering the container.\nIf there are different configmap in the namepsace, you can choose a configmap by setting sidecar annotations; If there is no configmap, the injector will create a default configmap.\nDefault configmap (Deprecated) The injector will create the default configmap to overlay the agent.config in the agent container.\nThe default configmap is shown as below, one is agent.service_name and the string can\u0026rsquo;t be empty; the other is collector.backend_service and it needs to be a legal IP address and port, the other fields need to be guaranteed by users themselves. Users can change it as their default configmap.\ndata: agent.config: | # The service name in UI agent.service_name=${SW_AGENT_NAME:Your_ApplicationName} # Backend service addresses. collector.backend_service=${SW_AGENT_COLLECTOR_BACKEND_SERVICES:127.0.0.1:11800} # Please refer to https://skywalking.apache.org/docs/skywalking-java/latest/en/setup/service-agent/java-agent/configurations/#table-of-agent-configuration-properties to get more details. To avoid the default configmap deleting by mistake, we use a configmap controller to watch the default configmap. In addition, if the user applies an invalid configuration, such as a malformed backend_service, the controller will use the default configmap.\nConfigure the agent The injector supports two methods to configure agent:\n Only use the default configuration. Use annotations to overlay the default configuration.  Use the default agent configuration After activating the java agent injection, if not set the annotations, the injector will use the default agent configuration directly as below.\ninitContainers: - args: - -c - mkdir -p /sky/agent \u0026amp;\u0026amp; cp -r /skywalking/agent/* /sky/agent command: - sh image: apache/skywalking-java-agent:8.16.0-java8 name: inject-skywalking-agent volumeMounts: - mountPath: /sky/agent name: sky-agent volumes: - emptyDir: {} name: sky-agent - configMap: name: skywalking-swck-java-agent-configmap name: java-agent-configmap-volume Use SwAgent to overlay default agent configuration The injector will read the SwAgent CR when pods creating.\nSwAgent CRD basic structure is like:\napiVersion:operator.skywalking.apache.org/v1alpha1kind:SwAgentmetadata:name:swagent-demonamespace:defaultspec:containerMatcher:\u0026#39;\u0026#39;selector:javaSidecar:name:swagent-demoimage:apache/skywalking-java-agent:8.16.0-java8env:- name:\u0026#34;SW_LOGGING_LEVEL\u0026#34;value:\u0026#34;DEBUG\u0026#34;- name:\u0026#34;SW_AGENT_COLLECTOR_BACKEND_SERVICES\u0026#34;value:\u0026#34;skywalking-system-oap:11800\u0026#34;sharedVolumeName:\u0026#34;sky-agent-demo\u0026#34;optionalPlugins:- \u0026#34;webflux\u0026#34;- \u0026#34;cloud-gateway-2.1.x\u0026#34;There are three kind of configs in SwAgent CR.\n1. label selector and container matcher label selector and container matcher decides which pod and container should be injected.\n   key path description default value     spec.selector label selector for pods which should be effected during injection. if no label selector was set, SwAgent CR config will affect every pod during injection. no default value   spec.containerMatcher container matcher is used to decide which container to be inject during injection. regular expression is supported. default value \u0026lsquo;.*\u0026rsquo; would match any container name. .*    2. injection configuration injection configuration will affect on agent injection behaviour\n   key path description default value     javaSidecar javaSidecar is the configs for init container, which holds agent sdk and take agent sdk to the target containers.    javaSidecar.name the name of the init container. inject-skywalking-agent   javaSidecar.image the image of the init container. apache/skywalking-java-agent:8.16.0-java8   SharedVolumeName SharedVolume is the name of an empty volume which shared by initContainer and target containers. sky-agent   OptionalPlugins Select the optional plugin which needs to be moved to the directory(/plugins). Such as trace,webflux,cloud-gateway-2.1.x. no default value   OptionalReporterPlugins Select the optional reporter plugin which needs to be moved to the directory(/plugins). such as kafka. no default value    3. skywalking agent configuration skywalking agent configuration is for agent SDK.\n   key path description default value     javaSidecar.env the env list to be appended to target containers. usually we can use it to setup agent configuration at container level. no default value.    Use annotations to overlay default agent configuration The injector can recognize five kinds of annotations to configure the agent as below.\n1. strategy configuration The strategy configuration is the annotation as below.\n   Annotation key Description Annotation Default value     strategy.skywalking.apache.org/inject.Container Select the injected container, if not set, inject all containers. not set    2. agent configuration The agent configuration is the annotation like agent.skywalking.apache.org/{option}: {value}, and the option support agent.xxx 、osinfo.xxx 、collector.xxx 、 logging.xxx 、statuscheck.xxx 、correlation.xxx 、jvm.xxx 、buffer.xxx 、 profile.xxx 、 meter.xxx 、 log.xxx in agent.config, such as agent.skywalking.apache.org/agent.namespace, agent.skywalking.apache.org/meter.max_meter_size, etc.\n3. plugins configuration The plugins configuration is the annotation like plugins.skywalking.apache.org/{option}: {value}, and the option only support plugin.xxx in the agent.config, such as plugins.skywalking.apache.org/plugin.mount, plugins.skywalking.apache.org/plugin.mongodb.trace_param, etc.\n4. optional plugin configuration The optional plugin configuration is the annotation as below.\n   Annotation key Description Annotation value     optional.skywalking.apache.org Select the optional plugin which needs to be moved to the directory(/plugins). Users can select several optional plugins by separating from |, such as trace|webflux|cloud-gateway-2.1.x. not set    5. optional reporter plugin configuration The optional reporter plugin configuration is the annotation as below.\n   Annotation key Description Annotation value     optional-reporter.skywalking.apache.org Select the optional reporter plugin which needs to be moved to the directory(/plugins). Users can select several optional reporter plugins by separating from |, such as kafka. not set    Configure sidecar The injector can recognize the following annotations to configure the sidecar:\n   Annotation key Description Annotation Default value     sidecar.skywalking.apache.org/initcontainer.Name The name of the injected java agent container. inject-skywalking-agent   sidecar.skywalking.apache.org/initcontainer.Image The container image of the injected java agent container. apache/skywalking-java-agent:8.16.0-java8   sidecar.skywalking.apache.org/initcontainer.Command The command of the injected java agent container. sh   sidecar.skywalking.apache.org/initcontainer.args.Option The args option of the injected java agent container. -c   sidecar.skywalking.apache.org/initcontainer.args.Command The args command of the injected java agent container. mkdir -p /sky/agent \u0026amp;\u0026amp; cp -r /skywalking/agent/* /sky/agent   sidecar.skywalking.apache.org/initcontainer.resources.limits The resources limits of the injected java agent container. You should use json type to define it such as {\u0026quot;memory\u0026quot;: \u0026quot;100Mi\u0026quot;,\u0026quot;cpu\u0026quot;: \u0026quot;100m\u0026quot;} nil   sidecar.skywalking.apache.org/initcontainer.resources.requests The resources requests of the injected java agent container. You should use json type to define it such as {\u0026quot;memory\u0026quot;: \u0026quot;100Mi\u0026quot;,\u0026quot;cpu\u0026quot;: \u0026quot;100m\u0026quot;} nil   sidecar.skywalking.apache.org/sidecarVolume.Name The name of sidecar Volume. sky-agent   sidecar.skywalking.apache.org/sidecarVolumeMount.MountPath Mount path of the agent directory in the injected container. /sky/agent   sidecar.skywalking.apache.org/env.Name Environment Name used by the injected container (application container). JAVA_TOOL_OPTIONS   sidecar.skywalking.apache.org/env.Value Environment variables used by the injected container (application container). -javaagent:/sky/agent/skywalking-agent.jar    The ways to get the final injected agent\u0026rsquo;s configuration Please see javaagent introduction for details.\n","excerpt":"Java agent injector Manual To use the java agent more natively, we propose the java agent injector …","ref":"/docs/skywalking-swck/latest/java-agent-injector/","title":"Java agent injector Manual"},{"body":"Java agent injector Manual To use the java agent more natively, we propose the java agent injector to inject the agent sidecar into a pod.\nWhen enabled in a pod\u0026rsquo;s namespace, the injector injects the java agent container at pod creation time using a mutating webhook admission controller. By rendering the java agent to a shared volume, containers within the pod can use the java agent.\nThe following sections describe how to configure the agent, if you want to try it directly, please see Usage for more details.\nInstall Injector The java agent injector is a component of the operator, so you need to follow Operator installation instrument to install the operator firstly.\nActive the java agent injection We have two granularities here: namespace and pod.\n   Resource Label Enabled value Disabled value     Namespace swck-injection enabled disabled   Pod swck-java-agent-injected \u0026ldquo;true\u0026rdquo; \u0026ldquo;false\u0026rdquo;    The injector is configured with the following logic:\n If either label is disabled, the pod is not injected. If two labels are enabled, the pod is injected.  Follow the next steps to active java agent injection.\n Label the namespace with swck-injection=enabled  $ kubectl label namespace default(your namespace) swck-injection=enabled  Add label swck-java-agent-injected: \u0026quot;true\u0026quot; to the pod, and get the result as below.  $ kubectl get pod -l swck-java-agent-injected=true NAME READY STATUS RESTARTS AGE inject-demo 1/1 Running 0 2d2h The ways to configure the agent The java agent injector supports a precedence order to configure the agent:\n Annotations \u0026gt; SwAgent \u0026gt; Configmap (Deprecated) \u0026gt; Default Configmap (Deprecated)\nAnnotations Annotations are described in kubernetes annotations doc.\nWe support annotations in agent annotations and sidecar annotations.\nSwAgent SwAgent is a Customer Resource defined by SWCK.\nWe support SwAgent in SwAgent usage guide\nConfigmap (Deprecated) Configmap is described in kubernetes configmap doc.\nWe need to use configmap to set agent.config so that we can modify the agent configuration without entering the container.\nIf there are different configmap in the namepsace, you can choose a configmap by setting sidecar annotations; If there is no configmap, the injector will create a default configmap.\nDefault configmap (Deprecated) The injector will create the default configmap to overlay the agent.config in the agent container.\nThe default configmap is shown as below, one is agent.service_name and the string can\u0026rsquo;t be empty; the other is collector.backend_service and it needs to be a legal IP address and port, the other fields need to be guaranteed by users themselves. Users can change it as their default configmap.\ndata: agent.config: | # The service name in UI agent.service_name=${SW_AGENT_NAME:Your_ApplicationName} # Backend service addresses. collector.backend_service=${SW_AGENT_COLLECTOR_BACKEND_SERVICES:127.0.0.1:11800} # Please refer to https://skywalking.apache.org/docs/skywalking-java/latest/en/setup/service-agent/java-agent/configurations/#table-of-agent-configuration-properties to get more details. To avoid the default configmap deleting by mistake, we use a configmap controller to watch the default configmap. In addition, if the user applies an invalid configuration, such as a malformed backend_service, the controller will use the default configmap.\nConfigure the agent The injector supports two methods to configure agent:\n Only use the default configuration. Use annotations to overlay the default configuration.  Use the default agent configuration After activating the java agent injection, if not set the annotations, the injector will use the default agent configuration directly as below.\ninitContainers: - args: - -c - mkdir -p /sky/agent \u0026amp;\u0026amp; cp -r /skywalking/agent/* /sky/agent command: - sh image: apache/skywalking-java-agent:8.16.0-java8 name: inject-skywalking-agent volumeMounts: - mountPath: /sky/agent name: sky-agent volumes: - emptyDir: {} name: sky-agent - configMap: name: skywalking-swck-java-agent-configmap name: java-agent-configmap-volume Use SwAgent to overlay default agent configuration The injector will read the SwAgent CR when pods creating.\nSwAgent CRD basic structure is like:\napiVersion:operator.skywalking.apache.org/v1alpha1kind:SwAgentmetadata:name:swagent-demonamespace:defaultspec:containerMatcher:\u0026#39;\u0026#39;selector:javaSidecar:name:swagent-demoimage:apache/skywalking-java-agent:8.16.0-java8env:- name:\u0026#34;SW_LOGGING_LEVEL\u0026#34;value:\u0026#34;DEBUG\u0026#34;- name:\u0026#34;SW_AGENT_COLLECTOR_BACKEND_SERVICES\u0026#34;value:\u0026#34;skywalking-system-oap:11800\u0026#34;sharedVolumeName:\u0026#34;sky-agent-demo\u0026#34;optionalPlugins:- \u0026#34;webflux\u0026#34;- \u0026#34;cloud-gateway-2.1.x\u0026#34;bootstrapPlugins:- \u0026#34;jdk-threading\u0026#34;There are three kind of configs in SwAgent CR.\n1. label selector and container matcher label selector and container matcher decides which pod and container should be injected.\n   key path description default value     spec.selector label selector for pods which should be effected during injection. if no label selector was set, SwAgent CR config will affect every pod during injection. no default value   spec.containerMatcher container matcher is used to decide which container to be inject during injection. regular expression is supported. default value \u0026lsquo;.*\u0026rsquo; would match any container name. .*    2. injection configuration injection configuration will affect on agent injection behaviour\n   key path description default value     javaSidecar javaSidecar is the configs for init container, which holds agent sdk and take agent sdk to the target containers.    javaSidecar.name the name of the init container. inject-skywalking-agent   javaSidecar.image the image of the init container. apache/skywalking-java-agent:8.16.0-java8   SharedVolumeName SharedVolume is the name of an empty volume which shared by initContainer and target containers. sky-agent   OptionalPlugins Select the optional plugin which needs to be moved to the directory(/plugins). Such as trace,webflux,cloud-gateway-2.1.x. no default value   OptionalReporterPlugins Select the optional reporter plugin which needs to be moved to the directory(/plugins). such as kafka. no default value   BootstrapPlugins Select the bootstrap plugin which needs to be moved to the directory(/plugins). such as jdk-threading. no default value    3. skywalking agent configuration skywalking agent configuration is for agent SDK.\n   key path description default value     javaSidecar.env the env list to be appended to target containers. usually we can use it to setup agent configuration at container level. no default value.    Use annotations to overlay default agent configuration The injector can recognize five kinds of annotations to configure the agent as below.\n1. strategy configuration The strategy configuration is the annotation as below.\n   Annotation key Description Annotation Default value     strategy.skywalking.apache.org/inject.Container Select the injected container, if not set, inject all containers. not set    2. agent configuration The agent configuration is the annotation like agent.skywalking.apache.org/{option}: {value}, and the option support agent.xxx 、osinfo.xxx 、collector.xxx 、 logging.xxx 、statuscheck.xxx 、correlation.xxx 、jvm.xxx 、buffer.xxx 、 profile.xxx 、 meter.xxx 、 log.xxx in agent.config, such as agent.skywalking.apache.org/agent.namespace, agent.skywalking.apache.org/meter.max_meter_size, etc.\n3. plugins configuration The plugins configuration is the annotation like plugins.skywalking.apache.org/{option}: {value}, and the option only support plugin.xxx in the agent.config, such as plugins.skywalking.apache.org/plugin.mount, plugins.skywalking.apache.org/plugin.mongodb.trace_param, etc.\n4. optional plugin configuration The optional plugin configuration is the annotation as below.\n   Annotation key Description Annotation value     optional.skywalking.apache.org Select the optional plugin which needs to be moved to the directory(/plugins). Users can select several optional plugins by separating from |, such as trace|webflux|cloud-gateway-2.1.x. not set    5. optional reporter plugin configuration The optional reporter plugin configuration is the annotation as below.\n   Annotation key Description Annotation value     optional-reporter.skywalking.apache.org Select the optional reporter plugin which needs to be moved to the directory(/plugins). Users can select several optional reporter plugins by separating from |, such as kafka. not set    6. bootstrap plugin configuration The bootstrap plugin configuration is the annotation as below.\n   Annotation key Description Annotation value     bootstrap.skywalking.apache.org Select the bootstrap plugin which needs to be moved to the directory(/plugins). Users can select several bootstrap plugins by separating from |, such as jdk-threading. not set    Configure sidecar The injector can recognize the following annotations to configure the sidecar:\n   Annotation key Description Annotation Default value     sidecar.skywalking.apache.org/initcontainer.Name The name of the injected java agent container. inject-skywalking-agent   sidecar.skywalking.apache.org/initcontainer.Image The container image of the injected java agent container. apache/skywalking-java-agent:8.16.0-java8   sidecar.skywalking.apache.org/initcontainer.Command The command of the injected java agent container. sh   sidecar.skywalking.apache.org/initcontainer.args.Option The args option of the injected java agent container. -c   sidecar.skywalking.apache.org/initcontainer.args.Command The args command of the injected java agent container. mkdir -p /sky/agent \u0026amp;\u0026amp; cp -r /skywalking/agent/* /sky/agent   sidecar.skywalking.apache.org/initcontainer.resources.limits The resources limits of the injected java agent container. You should use json type to define it such as {\u0026quot;memory\u0026quot;: \u0026quot;100Mi\u0026quot;,\u0026quot;cpu\u0026quot;: \u0026quot;100m\u0026quot;} nil   sidecar.skywalking.apache.org/initcontainer.resources.requests The resources requests of the injected java agent container. You should use json type to define it such as {\u0026quot;memory\u0026quot;: \u0026quot;100Mi\u0026quot;,\u0026quot;cpu\u0026quot;: \u0026quot;100m\u0026quot;} nil   sidecar.skywalking.apache.org/sidecarVolume.Name The name of sidecar Volume. sky-agent   sidecar.skywalking.apache.org/sidecarVolumeMount.MountPath Mount path of the agent directory in the injected container. /sky/agent   sidecar.skywalking.apache.org/env.Name Environment Name used by the injected container (application container). JAVA_TOOL_OPTIONS   sidecar.skywalking.apache.org/env.Value Environment variables used by the injected container (application container). -javaagent:/sky/agent/skywalking-agent.jar    The ways to get the final injected agent\u0026rsquo;s configuration Please see javaagent introduction for details.\n","excerpt":"Java agent injector Manual To use the java agent more natively, we propose the java agent injector …","ref":"/docs/skywalking-swck/next/java-agent-injector/","title":"Java agent injector Manual"},{"body":"Java agent injector Manual To use the java agent more natively, we propose the java agent injector to inject the agent sidecar into a pod.\nWhen enabled in a pod\u0026rsquo;s namespace, the injector injects the java agent container at pod creation time using a mutating webhook admission controller. By rendering the java agent to a shared volume, containers within the pod can use the java agent.\nThe following sections describe how to configure the agent, if you want to try it directly, please see Usage for more details.\nInstall Injector The java agent injector is a component of the operator, so you need to follow Operator installation instrument to install the operator firstly.\nActive the java agent injection We have two granularities here: namespace and pod.\n   Resource Label Enabled value Disabled value     Namespace swck-injection enabled disabled   Pod swck-java-agent-injected \u0026ldquo;true\u0026rdquo; \u0026ldquo;false\u0026rdquo;    The injector is configured with the following logic:\n If either label is disabled, the pod is not injected. If two labels are enabled, the pod is injected.  Follow the next steps to active java agent injection.\n Label the namespace with swck-injection=enabled  $ kubectl label namespace default(your namespace) swck-injection=enabled  Add label swck-java-agent-injected: \u0026quot;true\u0026quot; to the pod, and get the result as below.  $ kubectl get pod -l swck-java-agent-injected=true NAME READY STATUS RESTARTS AGE inject-demo 1/1 Running 0 2d2h The ways to configure the agent The java agent injector supports a precedence order to configure the agent:\n Annotations \u0026gt; SwAgent \u0026gt; Configmap (Deprecated) \u0026gt; Default Configmap (Deprecated)\nAnnotations Annotations are described in kubernetes annotations doc.\nWe support annotations in agent annotations and sidecar annotations.\nSwAgent SwAgent is a Customer Resource defined by SWCK.\nWe support SwAgent in SwAgent usage guide\nConfigmap (Deprecated) Configmap is described in kubernetes configmap doc.\nWe need to use configmap to set agent.config so that we can modify the agent configuration without entering the container.\nIf there are different configmap in the namepsace, you can choose a configmap by setting sidecar annotations; If there is no configmap, the injector will create a default configmap.\nDefault configmap (Deprecated) The injector will create the default configmap to overlay the agent.config in the agent container.\nThe default configmap is shown as below, one is agent.service_name and the string can\u0026rsquo;t be empty; the other is collector.backend_service and it needs to be a legal IP address and port, the other fields need to be guaranteed by users themselves. Users can change it as their default configmap.\ndata: agent.config: | # The service name in UI agent.service_name=${SW_AGENT_NAME:Your_ApplicationName} # Backend service addresses. collector.backend_service=${SW_AGENT_COLLECTOR_BACKEND_SERVICES:127.0.0.1:11800} # Please refer to https://skywalking.apache.org/docs/skywalking-java/latest/en/setup/service-agent/java-agent/configurations/#table-of-agent-configuration-properties to get more details. To avoid the default configmap deleting by mistake, we use a configmap controller to watch the default configmap. In addition, if the user applies an invalid configuration, such as a malformed backend_service, the controller will use the default configmap.\nConfigure the agent The injector supports two methods to configure agent:\n Only use the default configuration. Use annotations to overlay the default configuration.  Use the default agent configuration After activating the java agent injection, if not set the annotations, the injector will use the default agent configuration directly as below.\ninitContainers: - args: - -c - mkdir -p /sky/agent \u0026amp;\u0026amp; cp -r /skywalking/agent/* /sky/agent command: - sh image: apache/skywalking-java-agent:8.16.0-java8 name: inject-skywalking-agent volumeMounts: - mountPath: /sky/agent name: sky-agent volumes: - emptyDir: {} name: sky-agent - configMap: name: skywalking-swck-java-agent-configmap name: java-agent-configmap-volume Use SwAgent to overlay default agent configuration The injector will read the SwAgent CR when pods creating.\nSwAgent CRD basic structure is like:\napiVersion:operator.skywalking.apache.org/v1alpha1kind:SwAgentmetadata:name:swagent-demonamespace:defaultspec:containerMatcher:\u0026#39;\u0026#39;selector:javaSidecar:name:swagent-demoimage:apache/skywalking-java-agent:8.16.0-java8env:- name:\u0026#34;SW_LOGGING_LEVEL\u0026#34;value:\u0026#34;DEBUG\u0026#34;- name:\u0026#34;SW_AGENT_COLLECTOR_BACKEND_SERVICES\u0026#34;value:\u0026#34;skywalking-system-oap:11800\u0026#34;sharedVolumeName:\u0026#34;sky-agent-demo\u0026#34;optionalPlugins:- \u0026#34;webflux\u0026#34;- \u0026#34;cloud-gateway-2.1.x\u0026#34;bootstrapPlugins:- \u0026#34;jdk-threading\u0026#34;There are three kind of configs in SwAgent CR.\n1. label selector and container matcher label selector and container matcher decides which pod and container should be injected.\n   key path description default value     spec.selector label selector for pods which should be effected during injection. if no label selector was set, SwAgent CR config will affect every pod during injection. no default value   spec.containerMatcher container matcher is used to decide which container to be inject during injection. regular expression is supported. default value \u0026lsquo;.*\u0026rsquo; would match any container name. .*    2. injection configuration injection configuration will affect on agent injection behaviour\n   key path description default value     javaSidecar javaSidecar is the configs for init container, which holds agent sdk and take agent sdk to the target containers.    javaSidecar.name the name of the init container. inject-skywalking-agent   javaSidecar.image the image of the init container. apache/skywalking-java-agent:8.16.0-java8   SharedVolumeName SharedVolume is the name of an empty volume which shared by initContainer and target containers. sky-agent   OptionalPlugins Select the optional plugin which needs to be moved to the directory(/plugins). Such as trace,webflux,cloud-gateway-2.1.x. no default value   OptionalReporterPlugins Select the optional reporter plugin which needs to be moved to the directory(/plugins). such as kafka. no default value   BootstrapPlugins Select the bootstrap plugin which needs to be moved to the directory(/plugins). such as jdk-threading. no default value    3. skywalking agent configuration skywalking agent configuration is for agent SDK.\n   key path description default value     javaSidecar.env the env list to be appended to target containers. usually we can use it to setup agent configuration at container level. no default value.    Use annotations to overlay default agent configuration The injector can recognize five kinds of annotations to configure the agent as below.\n1. strategy configuration The strategy configuration is the annotation as below.\n   Annotation key Description Annotation Default value     strategy.skywalking.apache.org/inject.Container Select the injected container, if not set, inject all containers. not set    2. agent configuration The agent configuration is the annotation like agent.skywalking.apache.org/{option}: {value}, and the option support agent.xxx 、osinfo.xxx 、collector.xxx 、 logging.xxx 、statuscheck.xxx 、correlation.xxx 、jvm.xxx 、buffer.xxx 、 profile.xxx 、 meter.xxx 、 log.xxx in agent.config, such as agent.skywalking.apache.org/agent.namespace, agent.skywalking.apache.org/meter.max_meter_size, etc.\n3. plugins configuration The plugins configuration is the annotation like plugins.skywalking.apache.org/{option}: {value}, and the option only support plugin.xxx in the agent.config, such as plugins.skywalking.apache.org/plugin.mount, plugins.skywalking.apache.org/plugin.mongodb.trace_param, etc.\n4. optional plugin configuration The optional plugin configuration is the annotation as below.\n   Annotation key Description Annotation value     optional.skywalking.apache.org Select the optional plugin which needs to be moved to the directory(/plugins). Users can select several optional plugins by separating from |, such as trace|webflux|cloud-gateway-2.1.x. not set    5. optional reporter plugin configuration The optional reporter plugin configuration is the annotation as below.\n   Annotation key Description Annotation value     optional-reporter.skywalking.apache.org Select the optional reporter plugin which needs to be moved to the directory(/plugins). Users can select several optional reporter plugins by separating from |, such as kafka. not set    6. bootstrap plugin configuration The bootstrap plugin configuration is the annotation as below.\n   Annotation key Description Annotation value     bootstrap.skywalking.apache.org Select the bootstrap plugin which needs to be moved to the directory(/plugins). Users can select several bootstrap plugins by separating from |, such as jdk-threading. not set    Configure sidecar The injector can recognize the following annotations to configure the sidecar:\n   Annotation key Description Annotation Default value     sidecar.skywalking.apache.org/initcontainer.Name The name of the injected java agent container. inject-skywalking-agent   sidecar.skywalking.apache.org/initcontainer.Image The container image of the injected java agent container. apache/skywalking-java-agent:8.16.0-java8   sidecar.skywalking.apache.org/initcontainer.Command The command of the injected java agent container. sh   sidecar.skywalking.apache.org/initcontainer.args.Option The args option of the injected java agent container. -c   sidecar.skywalking.apache.org/initcontainer.args.Command The args command of the injected java agent container. mkdir -p /sky/agent \u0026amp;\u0026amp; cp -r /skywalking/agent/* /sky/agent   sidecar.skywalking.apache.org/initcontainer.resources.limits The resources limits of the injected java agent container. You should use json type to define it such as {\u0026quot;memory\u0026quot;: \u0026quot;100Mi\u0026quot;,\u0026quot;cpu\u0026quot;: \u0026quot;100m\u0026quot;} nil   sidecar.skywalking.apache.org/initcontainer.resources.requests The resources requests of the injected java agent container. You should use json type to define it such as {\u0026quot;memory\u0026quot;: \u0026quot;100Mi\u0026quot;,\u0026quot;cpu\u0026quot;: \u0026quot;100m\u0026quot;} nil   sidecar.skywalking.apache.org/sidecarVolume.Name The name of sidecar Volume. sky-agent   sidecar.skywalking.apache.org/sidecarVolumeMount.MountPath Mount path of the agent directory in the injected container. /sky/agent   sidecar.skywalking.apache.org/env.Name Environment Name used by the injected container (application container). JAVA_TOOL_OPTIONS   sidecar.skywalking.apache.org/env.Value Environment variables used by the injected container (application container). -javaagent:/sky/agent/skywalking-agent.jar    The ways to get the final injected agent\u0026rsquo;s configuration Please see javaagent introduction for details.\n","excerpt":"Java agent injector Manual To use the java agent more natively, we propose the java agent injector …","ref":"/docs/skywalking-swck/v0.9.0/java-agent-injector/","title":"Java agent injector Manual"},{"body":"Java agent injector Usage In this example, you will learn how to use the java agent injector.\nInstall injector The java agent injector is a component of the operator, so you need to follow Operator installation instrument to install the operator firstly.\nDeployment Example Let\u0026rsquo;s take a demo deployment for example.\n# demo1.yamlapiVersion:apps/v1kind:Deploymentmetadata:name:demo1namespace:defaultspec:selector:matchLabels:app:demo1template:metadata:labels:app:demo1spec:containers:- name:demo1image:ghcr.io/apache/skywalking-swck-spring-demo:v0.0.1command:[\u0026#34;java\u0026#34;]args:[\u0026#34;-jar\u0026#34;,\u0026#34;/app.jar\u0026#34;]ports:- containerPort:8085readinessProbe:httpGet:path:/helloport:8085initialDelaySeconds:3periodSeconds:3failureThreshold:10Enable Injection for Namespace and Deployments/StatefulSets. Firstly, set the injection label in your namespace as below.\nkubectl label namespace default(your namespace) swck-injection=enabled Secondly, set the injection label for your target Deployment/StatefulSet.\nkubectl -n default patch deployment demo1 --patch \u0026#39;{ \u0026#34;spec\u0026#34;: { \u0026#34;template\u0026#34;: { \u0026#34;metadata\u0026#34;: { \u0026#34;labels\u0026#34;: { \u0026#34;swck-java-agent-injected\u0026#34;: \u0026#34;true\u0026#34; } } } } }\u0026#39; Then the pods create by the Deployments/StatefulSets would be recreated with agent injected.\nThe injected pods would be like this:\nspec:containers:- args:- -jar- /app.jarcommand:- javaenv:- name:JAVA_TOOL_OPTIONSvalue:-javaagent:/sky/agent/skywalking-agent.jarimage:ghcr.io/apache/skywalking-swck-spring-demo:v0.0.1name:demo1- mountPath:/sky/agentname:sky-agentinitContainers:- args:- -c- mkdir -p /sky/agent \u0026amp;\u0026amp; cp -r /skywalking/agent/* /sky/agentcommand:- shimage:apache/skywalking-java-agent:8.10.0-java8name:inject-skywalking-agentvolumeMounts:- mountPath:/sky/agentname:sky-agentvolumes:- emptyDir:{}name:sky-agentThen you can get the final agent configuration and the pod as below.\n$ kubectl get javaagent NAME PODSELECTOR SERVICENAME BACKENDSERVICE app-demo1-javaagent app=demo1 demo1 127.0.0.1:11800 $ kubectl get pod -l app=demo1(the podSelector) NAME READY STATUS RESTARTS AGE demo1-5fbb6fcd98-cq5ws 1/1 Running 0 54s Get the javaagent\u0026rsquo;s yaml for more datails.\n$ kubectl get javaagent app-demo1-javaagent -o yaml apiVersion: operator.skywalking.apache.org/v1alpha1 kind: JavaAgent metadata: creationTimestamp: \u0026#34;2022-08-16T12:09:34Z\u0026#34; generation: 1 name: app-demo1-javaagent namespace: default ownerReferences: - apiVersion: apps/v1 blockOwnerDeletion: true controller: true kind: ReplicaSet name: demo1-7fdffc7b95 uid: 417c413f-0cc0-41f9-b6eb-0192eb8c8622 resourceVersion: \u0026#34;25067\u0026#34; uid: 1cdab012-784c-4efb-b5d2-c032eb2fb22a spec: backendService: 127.0.0.1:11800 podSelector: app=demo1 serviceName: Your_ApplicationName status: creationTime: \u0026#34;2022-08-16T12:09:34Z\u0026#34; expectedInjectiedNum: 1 lastUpdateTime: \u0026#34;2022-08-16T12:10:04Z\u0026#34; realInjectedNum: 1 Use SwAgent CR to setup override default configuration Suppose that injection label had been set for Namespace and Deployments/StatefulSets as previous said.\nApply SwAgent CR with correct label selector and container matcher:\n# SwAgent.yamlapiVersion:operator.skywalking.apache.org/v1alpha1kind:SwAgentmetadata:name:swagent-demonamespace:defaultspec:containerMatcher:\u0026#39;\u0026#39;selector:javaSidecar:name:swagent-demoimage:apache/skywalking-java-agent:8.16.0-java8env:- name:\u0026#34;SW_LOGGING_LEVEL\u0026#34;value:\u0026#34;DEBUG\u0026#34;- name:\u0026#34;SW_AGENT_COLLECTOR_BACKEND_SERVICES\u0026#34;value:\u0026#34;skywalking-system-oap:11800\u0026#34;sharedVolumeName:\u0026#34;sky-agent-demo\u0026#34;optionalPlugins:- \u0026#34;webflux\u0026#34;- \u0026#34;cloud-gateway-2.1.x\u0026#34;kubectl -n default apply swagent.yaml You can also get SwAgent CR by:\nkubectl -n default get SwAgent NAME AGE swagent-demo 38s Now the pod is still the old one, because pod could not load the SwAgent config automatically.\nSo you need to recreate pod to load SwAgent config. For the pods created by Deployment/StatefulSet, you can just simply delete the old pod.\n# verify pods to be delete  kubectl -n default get pods -l app=demo1 # delete pods kubectl -n default delete pods -l app=demo1 After the pods recreated, we can get injected pod as below.\nkubectl -n default get pods -l app=demo1 spec:containers:- args:- -jar- /app.jarcommand:- javaenv:- name:JAVA_TOOL_OPTIONSvalue:-javaagent:/sky/agent/skywalking-agent.jar=agent.service_name=demo1,collector.backend_service=skywalking-system-oap.skywalking-system:11800- name:SW_LOGGING_LEVELvalue:DEBUG- name:SW_AGENT_COLLECTOR_BACKEND_SERVICESvalue:skywalking-system-oap.default.svc:11800image:ghcr.io/apache/skywalking-swck-spring-demo:v0.0.1name:demo1- mountPath:/sky/agentname:sky-agent-demoinitContainers:- args:- -c- mkdir -p /sky/agent \u0026amp;\u0026amp; cp -r /skywalking/agent/* /sky/agent \u0026amp;\u0026amp; cd /sky/agent/optional-plugins/\u0026amp;\u0026amp;ls | grep -E \u0026#34;webflux|cloud-gateway-2.1.x\u0026#34; | xargs -i cp {} /sky/agent/plugins/command:- shimage:apache/skywalking-java-agent:8.16.0-java8name:swagent-demovolumeMounts:- mountPath:/sky/agentname:sky-agent-demovolumes:- emptyDir:{}name:sky-agent-demoUse annotation to override sidecar configuration Suppose that injection label had been set for Namespace and Deployments/StatefulSets as previous said.\nThen add agent configuration and sidecar configuration to annotations as below.\n# demo1_anno.yamlapiVersion:apps/v1kind:Deploymentmetadata:name:demo1namespace:defaultspec:selector:matchLabels:app:demo1template:metadata:annotations:strategy.skywalking.apache.org/inject.Container:\u0026#34;demo1\u0026#34;agent.skywalking.apache.org/agent.service_name:\u0026#34;app\u0026#34;agent.skywalking.apache.org/agent.sample_n_per_3_secs:\u0026#34;6\u0026#34;agent.skywalking.apache.org/agent.class_cache_mode:\u0026#34;MEMORY\u0026#34;agent.skywalking.apache.org/agent.ignore_suffix:\u0026#34;\u0026#39;jpg,.jpeg\u0026#39;\u0026#34;plugins.skywalking.apache.org/plugin.mount:\u0026#34;\u0026#39;plugins,activations\u0026#39;\u0026#34;plugins.skywalking.apache.org/plugin.mongodb.trace_param:\u0026#34;true\u0026#34;plugins.skywalking.apache.org/plugin.influxdb.trace_influxql:\u0026#34;false\u0026#34;optional.skywalking.apache.org:\u0026#34;trace|webflux|cloud-gateway-2.1.x\u0026#34;optional-reporter.skywalking.apache.org:\u0026#34;kafka\u0026#34;labels:swck-java-agent-injected:\u0026#34;true\u0026#34;app:demo1spec:containers:- name:demo1image:ghcr.io/apache/skywalking-swck-spring-demo:v0.0.1command:[\u0026#34;java\u0026#34;]args:[\u0026#34;-jar\u0026#34;,\u0026#34;/app.jar\u0026#34;]ports:- containerPort:8085readinessProbe:httpGet:path:/helloport:8085initialDelaySeconds:3periodSeconds:3failureThreshold:10Then we can get injected pod as below:\nkubectl -n default get pods -l app=demo1 spec:containers:- image:nginx:1.16.1imagePullPolicy:IfNotPresentname:nginx- args:- -jar- /app.jarcommand:- javaenv:- name:JAVA_TOOL_OPTIONSvalue:-javaagent:/sky/agent/skywalking-agent.jar=agent.ignore_suffix=\u0026#39;jpg,.jpeg\u0026#39;,agent.service_name=app,agent.class_cache_mode=MEMORY,agent.sample_n_per_3_secs=6,plugin.mongodb.trace_param=true,plugin.influxdb.trace_influxql=false,plugin.mount=\u0026#39;plugins,activations\u0026#39;image:ghcr.io/apache/skywalking-swck-spring-demo:v0.0.1name:demo1ports:- containerPort:8085protocol:TCPreadinessProbe:failureThreshold:10httpGet:path:/helloport:8085scheme:HTTPinitialDelaySeconds:3periodSeconds:3successThreshold:1timeoutSeconds:1volumeMounts:- mountPath:/sky/agentname:sky-agentinitContainers:- args:- -c- mkdir -p /sky/agent \u0026amp;\u0026amp; cp -r /skywalking/agent/* /sky/agent \u0026amp;\u0026amp; cd /sky/agent/optional-plugins/\u0026amp;\u0026amp;ls | grep -E \u0026#34;trace|webflux|cloud-gateway-2.1.x\u0026#34; | xargs -i cp {} /sky/agent/plugins/\u0026amp;\u0026amp;cd /sky/agent/optional-reporter-plugins/ \u0026amp;\u0026amp; ls | grep -E \u0026#34;kafka\u0026#34; | xargs-i cp {} /sky/agent/plugins/command:- shimage:apache/skywalking-java-agent:8.16.0-java8name:inject-skywalking-agentvolumeMounts:- mountPath:/sky/agentname:sky-agentvolumes:- emptyDir:{}name:sky-agentThen you can get the final agent configuration and the pod as below.\n$ kubectl get javaagent NAME PODSELECTOR SERVICENAME BACKENDSERVICE app-demo1-javaagent app=demo1 app 127.0.0.1:11800 $ kubectl get pod -l app=demo1(the podSelector) NAME READY STATUS RESTARTS AGE demo1-d48b96467-p7zrv 1/1 Running 0 5m25s Get the javaagent\u0026rsquo;s yaml for more datails.\n$ kubectl get javaagent app-demo1-javaagent -o yaml apiVersion: operator.skywalking.apache.org/v1alpha1 kind: JavaAgent metadata: creationTimestamp: \u0026#34;2022-08-16T12:18:53Z\u0026#34; generation: 1 name: app-demo1-javaagent namespace: default ownerReferences: - apiVersion: apps/v1 blockOwnerDeletion: true controller: true kind: ReplicaSet name: demo1-d48b96467 uid: 2b7f1ac4-b459-41cd-8568-ecd4578ca457 resourceVersion: \u0026#34;26187\u0026#34; uid: c2b2f3e2-9442-4465-9423-d24249b2c53b spec: agentConfiguration: agent.class_cache_mode: MEMORY agent.ignore_suffix: \u0026#39;\u0026#39;\u0026#39;jpg,.jpeg\u0026#39;\u0026#39;\u0026#39; agent.sample_n_per_3_secs: \u0026#34;6\u0026#34; agent.service_name: app optional-plugin: trace|webflux|cloud-gateway-2.1.x optional-reporter-plugin: kafka plugin.influxdb.trace_influxql: \u0026#34;false\u0026#34; plugin.mongodb.trace_param: \u0026#34;true\u0026#34; plugin.mount: \u0026#39;\u0026#39;\u0026#39;plugins,activations\u0026#39;\u0026#39;\u0026#39; backendService: 127.0.0.1:11800 podSelector: app=demo1 serviceName: app status: creationTime: \u0026#34;2022-08-16T12:18:53Z\u0026#34; expectedInjectiedNum: 1 lastUpdateTime: \u0026#34;2022-08-16T12:19:18Z\u0026#34; realInjectedNum: 1 ","excerpt":"Java agent injector Usage In this example, you will learn how to use the java agent injector. …","ref":"/docs/skywalking-swck/latest/examples/java-agent-injector-usage/","title":"Java agent injector Usage"},{"body":"Java agent injector Usage In this example, you will learn how to use the java agent injector.\nInstall injector The java agent injector is a component of the operator, so you need to follow Operator installation instrument to install the operator firstly.\nDeployment Example Let\u0026rsquo;s take a demo deployment for example.\n# demo1.yamlapiVersion:apps/v1kind:Deploymentmetadata:name:demo1namespace:defaultspec:selector:matchLabels:app:demo1template:metadata:labels:app:demo1spec:containers:- name:demo1image:ghcr.io/apache/skywalking-swck-spring-demo:v0.0.1command:[\u0026#34;java\u0026#34;]args:[\u0026#34;-jar\u0026#34;,\u0026#34;/app.jar\u0026#34;]ports:- containerPort:8085readinessProbe:httpGet:path:/helloport:8085initialDelaySeconds:3periodSeconds:3failureThreshold:10Enable Injection for Namespace and Deployments/StatefulSets. Firstly, set the injection label in your namespace as below.\nkubectl label namespace default(your namespace) swck-injection=enabled Secondly, set the injection label for your target Deployment/StatefulSet.\nkubectl -n default patch deployment demo1 --patch \u0026#39;{ \u0026#34;spec\u0026#34;: { \u0026#34;template\u0026#34;: { \u0026#34;metadata\u0026#34;: { \u0026#34;labels\u0026#34;: { \u0026#34;swck-java-agent-injected\u0026#34;: \u0026#34;true\u0026#34; } } } } }\u0026#39; Then the pods create by the Deployments/StatefulSets would be recreated with agent injected.\nThe injected pods would be like this:\nspec:containers:- args:- -jar- /app.jarcommand:- javaenv:- name:JAVA_TOOL_OPTIONSvalue:-javaagent:/sky/agent/skywalking-agent.jarimage:ghcr.io/apache/skywalking-swck-spring-demo:v0.0.1name:demo1- mountPath:/sky/agentname:sky-agentinitContainers:- args:- -c- mkdir -p /sky/agent \u0026amp;\u0026amp; cp -r /skywalking/agent/* /sky/agentcommand:- shimage:apache/skywalking-java-agent:8.10.0-java8name:inject-skywalking-agentvolumeMounts:- mountPath:/sky/agentname:sky-agentvolumes:- emptyDir:{}name:sky-agentThen you can get the final agent configuration and the pod as below.\n$ kubectl get javaagent NAME PODSELECTOR SERVICENAME BACKENDSERVICE app-demo1-javaagent app=demo1 demo1 127.0.0.1:11800 $ kubectl get pod -l app=demo1(the podSelector) NAME READY STATUS RESTARTS AGE demo1-5fbb6fcd98-cq5ws 1/1 Running 0 54s Get the javaagent\u0026rsquo;s yaml for more datails.\n$ kubectl get javaagent app-demo1-javaagent -o yaml apiVersion: operator.skywalking.apache.org/v1alpha1 kind: JavaAgent metadata: creationTimestamp: \u0026#34;2022-08-16T12:09:34Z\u0026#34; generation: 1 name: app-demo1-javaagent namespace: default ownerReferences: - apiVersion: apps/v1 blockOwnerDeletion: true controller: true kind: ReplicaSet name: demo1-7fdffc7b95 uid: 417c413f-0cc0-41f9-b6eb-0192eb8c8622 resourceVersion: \u0026#34;25067\u0026#34; uid: 1cdab012-784c-4efb-b5d2-c032eb2fb22a spec: backendService: 127.0.0.1:11800 podSelector: app=demo1 serviceName: Your_ApplicationName status: creationTime: \u0026#34;2022-08-16T12:09:34Z\u0026#34; expectedInjectiedNum: 1 lastUpdateTime: \u0026#34;2022-08-16T12:10:04Z\u0026#34; realInjectedNum: 1 Use SwAgent CR to setup override default configuration Suppose that injection label had been set for Namespace and Deployments/StatefulSets as previous said.\nApply SwAgent CR with correct label selector and container matcher:\n# SwAgent.yamlapiVersion:operator.skywalking.apache.org/v1alpha1kind:SwAgentmetadata:name:swagent-demonamespace:defaultspec:containerMatcher:\u0026#39;\u0026#39;selector:javaSidecar:name:swagent-demoimage:apache/skywalking-java-agent:8.16.0-java8env:- name:\u0026#34;SW_LOGGING_LEVEL\u0026#34;value:\u0026#34;DEBUG\u0026#34;- name:\u0026#34;SW_AGENT_COLLECTOR_BACKEND_SERVICES\u0026#34;value:\u0026#34;skywalking-system-oap:11800\u0026#34;sharedVolumeName:\u0026#34;sky-agent-demo\u0026#34;optionalPlugins:- \u0026#34;webflux\u0026#34;- \u0026#34;cloud-gateway-2.1.x\u0026#34;kubectl -n default apply swagent.yaml You can also get SwAgent CR by:\nkubectl -n default get SwAgent NAME AGE swagent-demo 38s Now the pod is still the old one, because pod could not load the SwAgent config automatically.\nSo you need to recreate pod to load SwAgent config. For the pods created by Deployment/StatefulSet, you can just simply delete the old pod.\n# verify pods to be delete  kubectl -n default get pods -l app=demo1 # delete pods kubectl -n default delete pods -l app=demo1 After the pods recreated, we can get injected pod as below.\nkubectl -n default get pods -l app=demo1 spec:containers:- args:- -jar- /app.jarcommand:- javaenv:- name:JAVA_TOOL_OPTIONSvalue:-javaagent:/sky/agent/skywalking-agent.jar=agent.service_name=demo1,collector.backend_service=skywalking-system-oap.skywalking-system:11800- name:SW_LOGGING_LEVELvalue:DEBUG- name:SW_AGENT_COLLECTOR_BACKEND_SERVICESvalue:skywalking-system-oap.default.svc:11800image:ghcr.io/apache/skywalking-swck-spring-demo:v0.0.1name:demo1- mountPath:/sky/agentname:sky-agent-demoinitContainers:- args:- -c- mkdir -p /sky/agent \u0026amp;\u0026amp; cp -r /skywalking/agent/* /sky/agent \u0026amp;\u0026amp; cd /sky/agent/optional-plugins/\u0026amp;\u0026amp;ls | grep -E \u0026#34;webflux|cloud-gateway-2.1.x\u0026#34; | xargs -i cp {} /sky/agent/plugins/command:- shimage:apache/skywalking-java-agent:8.16.0-java8name:swagent-demovolumeMounts:- mountPath:/sky/agentname:sky-agent-demovolumes:- emptyDir:{}name:sky-agent-demoUse annotation to override sidecar configuration Suppose that injection label had been set for Namespace and Deployments/StatefulSets as previous said.\nThen add agent configuration and sidecar configuration to annotations as below.\n# demo1_anno.yamlapiVersion:apps/v1kind:Deploymentmetadata:name:demo1namespace:defaultspec:selector:matchLabels:app:demo1template:metadata:annotations:strategy.skywalking.apache.org/inject.Container:\u0026#34;demo1\u0026#34;agent.skywalking.apache.org/agent.service_name:\u0026#34;app\u0026#34;agent.skywalking.apache.org/agent.sample_n_per_3_secs:\u0026#34;6\u0026#34;agent.skywalking.apache.org/agent.class_cache_mode:\u0026#34;MEMORY\u0026#34;agent.skywalking.apache.org/agent.ignore_suffix:\u0026#34;\u0026#39;jpg,.jpeg\u0026#39;\u0026#34;plugins.skywalking.apache.org/plugin.mount:\u0026#34;\u0026#39;plugins,activations\u0026#39;\u0026#34;plugins.skywalking.apache.org/plugin.mongodb.trace_param:\u0026#34;true\u0026#34;plugins.skywalking.apache.org/plugin.influxdb.trace_influxql:\u0026#34;false\u0026#34;optional.skywalking.apache.org:\u0026#34;trace|webflux|cloud-gateway-2.1.x\u0026#34;optional-reporter.skywalking.apache.org:\u0026#34;kafka\u0026#34;labels:swck-java-agent-injected:\u0026#34;true\u0026#34;app:demo1spec:containers:- name:demo1image:ghcr.io/apache/skywalking-swck-spring-demo:v0.0.1command:[\u0026#34;java\u0026#34;]args:[\u0026#34;-jar\u0026#34;,\u0026#34;/app.jar\u0026#34;]ports:- containerPort:8085readinessProbe:httpGet:path:/helloport:8085initialDelaySeconds:3periodSeconds:3failureThreshold:10Then we can get injected pod as below:\nkubectl -n default get pods -l app=demo1 spec:containers:- image:nginx:1.16.1imagePullPolicy:IfNotPresentname:nginx- args:- -jar- /app.jarcommand:- javaenv:- name:JAVA_TOOL_OPTIONSvalue:-javaagent:/sky/agent/skywalking-agent.jar=agent.ignore_suffix=\u0026#39;jpg,.jpeg\u0026#39;,agent.service_name=app,agent.class_cache_mode=MEMORY,agent.sample_n_per_3_secs=6,plugin.mongodb.trace_param=true,plugin.influxdb.trace_influxql=false,plugin.mount=\u0026#39;plugins,activations\u0026#39;image:ghcr.io/apache/skywalking-swck-spring-demo:v0.0.1name:demo1ports:- containerPort:8085protocol:TCPreadinessProbe:failureThreshold:10httpGet:path:/helloport:8085scheme:HTTPinitialDelaySeconds:3periodSeconds:3successThreshold:1timeoutSeconds:1volumeMounts:- mountPath:/sky/agentname:sky-agentinitContainers:- args:- -c- mkdir -p /sky/agent \u0026amp;\u0026amp; cp -r /skywalking/agent/* /sky/agent \u0026amp;\u0026amp; cd /sky/agent/optional-plugins/\u0026amp;\u0026amp;ls | grep -E \u0026#34;trace|webflux|cloud-gateway-2.1.x\u0026#34; | xargs -i cp {} /sky/agent/plugins/\u0026amp;\u0026amp;cd /sky/agent/optional-reporter-plugins/ \u0026amp;\u0026amp; ls | grep -E \u0026#34;kafka\u0026#34; | xargs-i cp {} /sky/agent/plugins/command:- shimage:apache/skywalking-java-agent:8.16.0-java8name:inject-skywalking-agentvolumeMounts:- mountPath:/sky/agentname:sky-agentvolumes:- emptyDir:{}name:sky-agentThen you can get the final agent configuration and the pod as below.\n$ kubectl get javaagent NAME PODSELECTOR SERVICENAME BACKENDSERVICE app-demo1-javaagent app=demo1 app 127.0.0.1:11800 $ kubectl get pod -l app=demo1(the podSelector) NAME READY STATUS RESTARTS AGE demo1-d48b96467-p7zrv 1/1 Running 0 5m25s Get the javaagent\u0026rsquo;s yaml for more datails.\n$ kubectl get javaagent app-demo1-javaagent -o yaml apiVersion: operator.skywalking.apache.org/v1alpha1 kind: JavaAgent metadata: creationTimestamp: \u0026#34;2022-08-16T12:18:53Z\u0026#34; generation: 1 name: app-demo1-javaagent namespace: default ownerReferences: - apiVersion: apps/v1 blockOwnerDeletion: true controller: true kind: ReplicaSet name: demo1-d48b96467 uid: 2b7f1ac4-b459-41cd-8568-ecd4578ca457 resourceVersion: \u0026#34;26187\u0026#34; uid: c2b2f3e2-9442-4465-9423-d24249b2c53b spec: agentConfiguration: agent.class_cache_mode: MEMORY agent.ignore_suffix: \u0026#39;\u0026#39;\u0026#39;jpg,.jpeg\u0026#39;\u0026#39;\u0026#39; agent.sample_n_per_3_secs: \u0026#34;6\u0026#34; agent.service_name: app optional-plugin: trace|webflux|cloud-gateway-2.1.x optional-reporter-plugin: kafka plugin.influxdb.trace_influxql: \u0026#34;false\u0026#34; plugin.mongodb.trace_param: \u0026#34;true\u0026#34; plugin.mount: \u0026#39;\u0026#39;\u0026#39;plugins,activations\u0026#39;\u0026#39;\u0026#39; backendService: 127.0.0.1:11800 podSelector: app=demo1 serviceName: app status: creationTime: \u0026#34;2022-08-16T12:18:53Z\u0026#34; expectedInjectiedNum: 1 lastUpdateTime: \u0026#34;2022-08-16T12:19:18Z\u0026#34; realInjectedNum: 1 ","excerpt":"Java agent injector Usage In this example, you will learn how to use the java agent injector. …","ref":"/docs/skywalking-swck/next/examples/java-agent-injector-usage/","title":"Java agent injector Usage"},{"body":"Java agent injector Usage In this example, you will learn how to use the java agent injector.\nInstall injector The java agent injector is a component of the operator, so you need to follow Operator installation instrument to install the operator firstly.\nDeployment Example Let\u0026rsquo;s take a demo deployment for example.\n# demo1.yamlapiVersion:apps/v1kind:Deploymentmetadata:name:demo1namespace:defaultspec:selector:matchLabels:app:demo1template:metadata:labels:app:demo1spec:containers:- name:demo1image:ghcr.io/apache/skywalking-swck-spring-demo:v0.0.1command:[\u0026#34;java\u0026#34;]args:[\u0026#34;-jar\u0026#34;,\u0026#34;/app.jar\u0026#34;]ports:- containerPort:8085readinessProbe:httpGet:path:/helloport:8085initialDelaySeconds:3periodSeconds:3failureThreshold:10Enable Injection for Namespace and Deployments/StatefulSets. Firstly, set the injection label in your namespace as below.\nkubectl label namespace default(your namespace) swck-injection=enabled Secondly, set the injection label for your target Deployment/StatefulSet.\nkubectl -n default patch deployment demo1 --patch \u0026#39;{ \u0026#34;spec\u0026#34;: { \u0026#34;template\u0026#34;: { \u0026#34;metadata\u0026#34;: { \u0026#34;labels\u0026#34;: { \u0026#34;swck-java-agent-injected\u0026#34;: \u0026#34;true\u0026#34; } } } } }\u0026#39; Then the pods create by the Deployments/StatefulSets would be recreated with agent injected.\nThe injected pods would be like this:\nspec:containers:- args:- -jar- /app.jarcommand:- javaenv:- name:JAVA_TOOL_OPTIONSvalue:-javaagent:/sky/agent/skywalking-agent.jarimage:ghcr.io/apache/skywalking-swck-spring-demo:v0.0.1name:demo1- mountPath:/sky/agentname:sky-agentinitContainers:- args:- -c- mkdir -p /sky/agent \u0026amp;\u0026amp; cp -r /skywalking/agent/* /sky/agentcommand:- shimage:apache/skywalking-java-agent:8.10.0-java8name:inject-skywalking-agentvolumeMounts:- mountPath:/sky/agentname:sky-agentvolumes:- emptyDir:{}name:sky-agentThen you can get the final agent configuration and the pod as below.\n$ kubectl get javaagent NAME PODSELECTOR SERVICENAME BACKENDSERVICE app-demo1-javaagent app=demo1 demo1 127.0.0.1:11800 $ kubectl get pod -l app=demo1(the podSelector) NAME READY STATUS RESTARTS AGE demo1-5fbb6fcd98-cq5ws 1/1 Running 0 54s Get the javaagent\u0026rsquo;s yaml for more datails.\n$ kubectl get javaagent app-demo1-javaagent -o yaml apiVersion: operator.skywalking.apache.org/v1alpha1 kind: JavaAgent metadata: creationTimestamp: \u0026#34;2022-08-16T12:09:34Z\u0026#34; generation: 1 name: app-demo1-javaagent namespace: default ownerReferences: - apiVersion: apps/v1 blockOwnerDeletion: true controller: true kind: ReplicaSet name: demo1-7fdffc7b95 uid: 417c413f-0cc0-41f9-b6eb-0192eb8c8622 resourceVersion: \u0026#34;25067\u0026#34; uid: 1cdab012-784c-4efb-b5d2-c032eb2fb22a spec: backendService: 127.0.0.1:11800 podSelector: app=demo1 serviceName: Your_ApplicationName status: creationTime: \u0026#34;2022-08-16T12:09:34Z\u0026#34; expectedInjectiedNum: 1 lastUpdateTime: \u0026#34;2022-08-16T12:10:04Z\u0026#34; realInjectedNum: 1 Use SwAgent CR to setup override default configuration Suppose that injection label had been set for Namespace and Deployments/StatefulSets as previous said.\nApply SwAgent CR with correct label selector and container matcher:\n# SwAgent.yamlapiVersion:operator.skywalking.apache.org/v1alpha1kind:SwAgentmetadata:name:swagent-demonamespace:defaultspec:containerMatcher:\u0026#39;\u0026#39;selector:javaSidecar:name:swagent-demoimage:apache/skywalking-java-agent:8.16.0-java8env:- name:\u0026#34;SW_LOGGING_LEVEL\u0026#34;value:\u0026#34;DEBUG\u0026#34;- name:\u0026#34;SW_AGENT_COLLECTOR_BACKEND_SERVICES\u0026#34;value:\u0026#34;skywalking-system-oap:11800\u0026#34;sharedVolumeName:\u0026#34;sky-agent-demo\u0026#34;optionalPlugins:- \u0026#34;webflux\u0026#34;- \u0026#34;cloud-gateway-2.1.x\u0026#34;kubectl -n default apply swagent.yaml You can also get SwAgent CR by:\nkubectl -n default get SwAgent NAME AGE swagent-demo 38s Now the pod is still the old one, because pod could not load the SwAgent config automatically.\nSo you need to recreate pod to load SwAgent config. For the pods created by Deployment/StatefulSet, you can just simply delete the old pod.\n# verify pods to be delete  kubectl -n default get pods -l app=demo1 # delete pods kubectl -n default delete pods -l app=demo1 After the pods recreated, we can get injected pod as below.\nkubectl -n default get pods -l app=demo1 spec:containers:- args:- -jar- /app.jarcommand:- javaenv:- name:JAVA_TOOL_OPTIONSvalue:-javaagent:/sky/agent/skywalking-agent.jar=agent.service_name=demo1,collector.backend_service=skywalking-system-oap.skywalking-system:11800- name:SW_LOGGING_LEVELvalue:DEBUG- name:SW_AGENT_COLLECTOR_BACKEND_SERVICESvalue:skywalking-system-oap.default.svc:11800image:ghcr.io/apache/skywalking-swck-spring-demo:v0.0.1name:demo1- mountPath:/sky/agentname:sky-agent-demoinitContainers:- args:- -c- mkdir -p /sky/agent \u0026amp;\u0026amp; cp -r /skywalking/agent/* /sky/agent \u0026amp;\u0026amp; cd /sky/agent/optional-plugins/\u0026amp;\u0026amp;ls | grep -E \u0026#34;webflux|cloud-gateway-2.1.x\u0026#34; | xargs -i cp {} /sky/agent/plugins/command:- shimage:apache/skywalking-java-agent:8.16.0-java8name:swagent-demovolumeMounts:- mountPath:/sky/agentname:sky-agent-demovolumes:- emptyDir:{}name:sky-agent-demoUse annotation to override sidecar configuration Suppose that injection label had been set for Namespace and Deployments/StatefulSets as previous said.\nThen add agent configuration and sidecar configuration to annotations as below.\n# demo1_anno.yamlapiVersion:apps/v1kind:Deploymentmetadata:name:demo1namespace:defaultspec:selector:matchLabels:app:demo1template:metadata:annotations:strategy.skywalking.apache.org/inject.Container:\u0026#34;demo1\u0026#34;agent.skywalking.apache.org/agent.service_name:\u0026#34;app\u0026#34;agent.skywalking.apache.org/agent.sample_n_per_3_secs:\u0026#34;6\u0026#34;agent.skywalking.apache.org/agent.class_cache_mode:\u0026#34;MEMORY\u0026#34;agent.skywalking.apache.org/agent.ignore_suffix:\u0026#34;\u0026#39;jpg,.jpeg\u0026#39;\u0026#34;plugins.skywalking.apache.org/plugin.mount:\u0026#34;\u0026#39;plugins,activations\u0026#39;\u0026#34;plugins.skywalking.apache.org/plugin.mongodb.trace_param:\u0026#34;true\u0026#34;plugins.skywalking.apache.org/plugin.influxdb.trace_influxql:\u0026#34;false\u0026#34;optional.skywalking.apache.org:\u0026#34;trace|webflux|cloud-gateway-2.1.x\u0026#34;optional-reporter.skywalking.apache.org:\u0026#34;kafka\u0026#34;labels:swck-java-agent-injected:\u0026#34;true\u0026#34;app:demo1spec:containers:- name:demo1image:ghcr.io/apache/skywalking-swck-spring-demo:v0.0.1command:[\u0026#34;java\u0026#34;]args:[\u0026#34;-jar\u0026#34;,\u0026#34;/app.jar\u0026#34;]ports:- containerPort:8085readinessProbe:httpGet:path:/helloport:8085initialDelaySeconds:3periodSeconds:3failureThreshold:10Then we can get injected pod as below:\nkubectl -n default get pods -l app=demo1 spec:containers:- image:nginx:1.16.1imagePullPolicy:IfNotPresentname:nginx- args:- -jar- /app.jarcommand:- javaenv:- name:JAVA_TOOL_OPTIONSvalue:-javaagent:/sky/agent/skywalking-agent.jar=agent.ignore_suffix=\u0026#39;jpg,.jpeg\u0026#39;,agent.service_name=app,agent.class_cache_mode=MEMORY,agent.sample_n_per_3_secs=6,plugin.mongodb.trace_param=true,plugin.influxdb.trace_influxql=false,plugin.mount=\u0026#39;plugins,activations\u0026#39;image:ghcr.io/apache/skywalking-swck-spring-demo:v0.0.1name:demo1ports:- containerPort:8085protocol:TCPreadinessProbe:failureThreshold:10httpGet:path:/helloport:8085scheme:HTTPinitialDelaySeconds:3periodSeconds:3successThreshold:1timeoutSeconds:1volumeMounts:- mountPath:/sky/agentname:sky-agentinitContainers:- args:- -c- mkdir -p /sky/agent \u0026amp;\u0026amp; cp -r /skywalking/agent/* /sky/agent \u0026amp;\u0026amp; cd /sky/agent/optional-plugins/\u0026amp;\u0026amp;ls | grep -E \u0026#34;trace|webflux|cloud-gateway-2.1.x\u0026#34; | xargs -i cp {} /sky/agent/plugins/\u0026amp;\u0026amp;cd /sky/agent/optional-reporter-plugins/ \u0026amp;\u0026amp; ls | grep -E \u0026#34;kafka\u0026#34; | xargs-i cp {} /sky/agent/plugins/command:- shimage:apache/skywalking-java-agent:8.16.0-java8name:inject-skywalking-agentvolumeMounts:- mountPath:/sky/agentname:sky-agentvolumes:- emptyDir:{}name:sky-agentThen you can get the final agent configuration and the pod as below.\n$ kubectl get javaagent NAME PODSELECTOR SERVICENAME BACKENDSERVICE app-demo1-javaagent app=demo1 app 127.0.0.1:11800 $ kubectl get pod -l app=demo1(the podSelector) NAME READY STATUS RESTARTS AGE demo1-d48b96467-p7zrv 1/1 Running 0 5m25s Get the javaagent\u0026rsquo;s yaml for more datails.\n$ kubectl get javaagent app-demo1-javaagent -o yaml apiVersion: operator.skywalking.apache.org/v1alpha1 kind: JavaAgent metadata: creationTimestamp: \u0026#34;2022-08-16T12:18:53Z\u0026#34; generation: 1 name: app-demo1-javaagent namespace: default ownerReferences: - apiVersion: apps/v1 blockOwnerDeletion: true controller: true kind: ReplicaSet name: demo1-d48b96467 uid: 2b7f1ac4-b459-41cd-8568-ecd4578ca457 resourceVersion: \u0026#34;26187\u0026#34; uid: c2b2f3e2-9442-4465-9423-d24249b2c53b spec: agentConfiguration: agent.class_cache_mode: MEMORY agent.ignore_suffix: \u0026#39;\u0026#39;\u0026#39;jpg,.jpeg\u0026#39;\u0026#39;\u0026#39; agent.sample_n_per_3_secs: \u0026#34;6\u0026#34; agent.service_name: app optional-plugin: trace|webflux|cloud-gateway-2.1.x optional-reporter-plugin: kafka plugin.influxdb.trace_influxql: \u0026#34;false\u0026#34; plugin.mongodb.trace_param: \u0026#34;true\u0026#34; plugin.mount: \u0026#39;\u0026#39;\u0026#39;plugins,activations\u0026#39;\u0026#39;\u0026#39; backendService: 127.0.0.1:11800 podSelector: app=demo1 serviceName: app status: creationTime: \u0026#34;2022-08-16T12:18:53Z\u0026#34; expectedInjectiedNum: 1 lastUpdateTime: \u0026#34;2022-08-16T12:19:18Z\u0026#34; realInjectedNum: 1 ","excerpt":"Java agent injector Usage In this example, you will learn how to use the java agent injector. …","ref":"/docs/skywalking-swck/v0.9.0/examples/java-agent-injector-usage/","title":"Java agent injector Usage"},{"body":"Java Microbenchmark Harness (JMH) JMH is a Java harness for building, running, and analysing nano/micro/milli/macro benchmarks written in Java and other languages targeting the JVM.\nWe have a module called microbench which performs a series of micro-benchmark tests for JMH testing. Make new JMH tests extend the org.apache.skywalking.oap.server.microbench.base.AbstractMicrobenchmark to customize runtime conditions (Measurement, Fork, Warmup, etc.).\nYou can build the jar with command ./mvnw -Dmaven.test.skip -DskipTests -pl :microbench package -am -Pbenchmark.\nJMH tests could run as a normal unit test. And they could run as an independent uber jar via java -jar benchmark.jar for all benchmarks, or via java -jar /benchmarks.jar exampleClassName for a specific test.\nOutput test results in JSON format, you can add -rf json like java -jar benchmarks.jar -rf json, if you run through the IDE, you can configure the -DperfReportDir=savePath parameter to set the JMH report result save path, a report results in JSON format will be generated when the run ends.\nMore information about JMH can be found here: jmh docs.\n","excerpt":"Java Microbenchmark Harness (JMH) JMH is a Java harness for building, running, and analysing …","ref":"/docs/main/latest/en/guides/benchmark/","title":"Java Microbenchmark Harness (JMH)"},{"body":"Java Microbenchmark Harness (JMH) JMH is a Java harness for building, running, and analysing nano/micro/milli/macro benchmarks written in Java and other languages targeting the JVM.\nWe have a module called microbench which performs a series of micro-benchmark tests for JMH testing. Make new JMH tests extend the org.apache.skywalking.oap.server.microbench.base.AbstractMicrobenchmark to customize runtime conditions (Measurement, Fork, Warmup, etc.).\nYou can build the jar with command ./mvnw -Dmaven.test.skip -DskipTests -pl :microbench package -am -Pbenchmark.\nJMH tests could run as a normal unit test. And they could run as an independent uber jar via java -jar benchmark.jar for all benchmarks, or via java -jar /benchmarks.jar exampleClassName for a specific test.\nOutput test results in JSON format, you can add -rf json like java -jar benchmarks.jar -rf json, if you run through the IDE, you can configure the -DperfReportDir=savePath parameter to set the JMH report result save path, a report results in JSON format will be generated when the run ends.\nMore information about JMH can be found here: jmh docs.\n","excerpt":"Java Microbenchmark Harness (JMH) JMH is a Java harness for building, running, and analysing …","ref":"/docs/main/next/en/guides/benchmark/","title":"Java Microbenchmark Harness (JMH)"},{"body":"Java Microbenchmark Harness (JMH) JMH is a Java harness for building, running, and analysing nano/micro/milli/macro benchmarks written in Java and other languages targeting the JVM.\nWe have a module called microbench which performs a series of micro-benchmark tests for JMH testing. Make new JMH tests extend the org.apache.skywalking.oap.server.microbench.base.AbstractMicrobenchmark to customize runtime conditions (Measurement, Fork, Warmup, etc.).\nYou can build the jar with command ./mvnw -Dmaven.test.skip -DskipTests -pl :microbench package -am -Pbenchmark.\nJMH tests could run as a normal unit test. And they could run as an independent uber jar via java -jar benchmark.jar for all benchmarks, or via java -jar /benchmarks.jar exampleClassName for a specific test.\nOutput test results in JSON format, you can add -rf json like java -jar benchmarks.jar -rf json, if you run through the IDE, you can configure the -DperfReportDir=savePath parameter to set the JMH report result save path, a report results in JSON format will be generated when the run ends.\nMore information about JMH can be found here: jmh docs.\n","excerpt":"Java Microbenchmark Harness (JMH) JMH is a Java harness for building, running, and analysing …","ref":"/docs/main/v9.6.0/en/guides/benchmark/","title":"Java Microbenchmark Harness (JMH)"},{"body":"Java Microbenchmark Harness (JMH) JMH is a Java harness for building, running, and analysing nano/micro/milli/macro benchmarks written in Java and other languages targeting the JVM.\nWe have a module called microbench which performs a series of micro-benchmark tests for JMH testing. Make new JMH tests extend the org.apache.skywalking.oap.server.microbench.base.AbstractMicrobenchmark to customize runtime conditions (Measurement, Fork, Warmup, etc.).\nYou can build the jar with command ./mvnw -Dmaven.test.skip -DskipTests -pl :microbench package -am -Pbenchmark.\nJMH tests could run as a normal unit test. And they could run as an independent uber jar via java -jar benchmark.jar for all benchmarks, or via java -jar /benchmarks.jar exampleClassName for a specific test.\nOutput test results in JSON format, you can add -rf json like java -jar benchmarks.jar -rf json, if you run through the IDE, you can configure the -DperfReportDir=savePath parameter to set the JMH report result save path, a report results in JSON format will be generated when the run ends.\nMore information about JMH can be found here: jmh docs.\n","excerpt":"Java Microbenchmark Harness (JMH) JMH is a Java harness for building, running, and analysing …","ref":"/docs/main/v9.7.0/en/guides/benchmark/","title":"Java Microbenchmark Harness (JMH)"},{"body":"JavaAgent Introduction To see the final injected agent\u0026rsquo;s configuration, we define a CustomDefinitionResource called JavaAgent.\nWhen the pod is injected, the pod will be labeled with sidecar.skywalking.apache.org/succeed, then the controller will watch the specific pod labeled with sidecar.skywalking.apache.org/succeed. After the pod is created, the controller will create JavaAgent(custom resource), which contains the final agent configuration as below.\nSpec    Field Name Description     podSelector We hope users can use workloads to create pods, the podSelector is the selector label of workload.   serviceName serviceName is an important attribute that needs to be printed.   backendService backendService is an important attribute that needs to be printed.   agentConfiguration agentConfiguration contains serviceName、backendService and covered agent configuration, other default configurations will not be displayed, please see agent.config for details.    Status    Field Name Description     creationTime The creation time of the JavaAgent   lastUpdateTime The last Update time of the JavaAgent   expectedInjectiedNum The number of the pod that need to be injected   realInjectedNum The real number of injected pods.    Demo This demo shows the usage of javaagent. If you want to see the complete process, please see java-agent-injector-usagefor details.\nWhen we use java-agent-injector, we can get custom resources as below.\n$ kubectl get javaagent -A NAMESPACE NAME PODSELECTOR SERVICENAME BACKENDSERVICE default app-demo1-javaagent app=demo1 Your_ApplicationName 127.0.0.1:11800 default app-demo2-javaagent app=demo2 Your_ApplicationName 127.0.0.1:11800 $ kubectl get pod -l app=demo1 NAME READY STATUS RESTARTS AGE demo1-bb97b8b4d-bkwm4 1/1 Running 0 28s demo1-bb97b8b4d-wxgs2 1/1 Running 0 28s $ kubectl get pod -l app=demo2 NAME READY STATUS RESTARTS AGE app2-0 1/1 Running 0 27s app2-1 1/1 Running 0 25s app2-2 1/1 Running 0 23s If we want to see more information, we can get the specific javaagent\u0026rsquo;s yaml as below.\n$ kubectl get javaagent app-demo1-javaagent -oyaml apiVersion: operator.skywalking.apache.org/v1alpha1 kind: JavaAgent metadata: creationTimestamp: \u0026quot;2021-10-14T07:07:12Z\u0026quot; generation: 1 name: app-demo1-javaagent namespace: default ownerReferences: - apiVersion: apps/v1 blockOwnerDeletion: true controller: true kind: ReplicaSet name: demo1-bb97b8b4d uid: c712924f-4652-4c07-8332-b3938ad72392 resourceVersion: \u0026quot;330808\u0026quot; selfLink: /apis/operator.skywalking.apache.org/v1alpha1/namespaces/default/javaagents/app-demo1-javaagent uid: 9350338f-15a5-4832-84d1-530f8d0e1c3b spec: agentConfiguration: agent.namespace: default-namespace agent.service_name: Your_ApplicationName collector.backend_service: 127.0.0.1:11800 backendService: 127.0.0.1:11800 podSelector: app=demo1 serviceName: Your_ApplicationName status: creationTime: \u0026quot;2021-10-14T07:07:12Z\u0026quot; expectedInjectiedNum: 2 lastUpdateTime: \u0026quot;2021-10-14T07:07:14Z\u0026quot; realInjectedNum: 2 ","excerpt":"JavaAgent Introduction To see the final injected agent\u0026rsquo;s configuration, we define a …","ref":"/docs/skywalking-swck/latest/javaagent/","title":"JavaAgent Introduction"},{"body":"JavaAgent Introduction To see the final injected agent\u0026rsquo;s configuration, we define a CustomDefinitionResource called JavaAgent.\nWhen the pod is injected, the pod will be labeled with sidecar.skywalking.apache.org/succeed, then the controller will watch the specific pod labeled with sidecar.skywalking.apache.org/succeed. After the pod is created, the controller will create JavaAgent(custom resource), which contains the final agent configuration as below.\nSpec    Field Name Description     podSelector We hope users can use workloads to create pods, the podSelector is the selector label of workload.   serviceName serviceName is an important attribute that needs to be printed.   backendService backendService is an important attribute that needs to be printed.   agentConfiguration agentConfiguration contains serviceName、backendService and covered agent configuration, other default configurations will not be displayed, please see agent.config for details.    Status    Field Name Description     creationTime The creation time of the JavaAgent   lastUpdateTime The last Update time of the JavaAgent   expectedInjectiedNum The number of the pod that need to be injected   realInjectedNum The real number of injected pods.    Demo This demo shows the usage of javaagent. If you want to see the complete process, please see java-agent-injector-usagefor details.\nWhen we use java-agent-injector, we can get custom resources as below.\n$ kubectl get javaagent -A NAMESPACE NAME PODSELECTOR SERVICENAME BACKENDSERVICE default app-demo1-javaagent app=demo1 Your_ApplicationName 127.0.0.1:11800 default app-demo2-javaagent app=demo2 Your_ApplicationName 127.0.0.1:11800 $ kubectl get pod -l app=demo1 NAME READY STATUS RESTARTS AGE demo1-bb97b8b4d-bkwm4 1/1 Running 0 28s demo1-bb97b8b4d-wxgs2 1/1 Running 0 28s $ kubectl get pod -l app=demo2 NAME READY STATUS RESTARTS AGE app2-0 1/1 Running 0 27s app2-1 1/1 Running 0 25s app2-2 1/1 Running 0 23s If we want to see more information, we can get the specific javaagent\u0026rsquo;s yaml as below.\n$ kubectl get javaagent app-demo1-javaagent -oyaml apiVersion: operator.skywalking.apache.org/v1alpha1 kind: JavaAgent metadata: creationTimestamp: \u0026quot;2021-10-14T07:07:12Z\u0026quot; generation: 1 name: app-demo1-javaagent namespace: default ownerReferences: - apiVersion: apps/v1 blockOwnerDeletion: true controller: true kind: ReplicaSet name: demo1-bb97b8b4d uid: c712924f-4652-4c07-8332-b3938ad72392 resourceVersion: \u0026quot;330808\u0026quot; selfLink: /apis/operator.skywalking.apache.org/v1alpha1/namespaces/default/javaagents/app-demo1-javaagent uid: 9350338f-15a5-4832-84d1-530f8d0e1c3b spec: agentConfiguration: agent.namespace: default-namespace agent.service_name: Your_ApplicationName collector.backend_service: 127.0.0.1:11800 backendService: 127.0.0.1:11800 podSelector: app=demo1 serviceName: Your_ApplicationName status: creationTime: \u0026quot;2021-10-14T07:07:12Z\u0026quot; expectedInjectiedNum: 2 lastUpdateTime: \u0026quot;2021-10-14T07:07:14Z\u0026quot; realInjectedNum: 2 ","excerpt":"JavaAgent Introduction To see the final injected agent\u0026rsquo;s configuration, we define a …","ref":"/docs/skywalking-swck/next/javaagent/","title":"JavaAgent Introduction"},{"body":"JavaAgent Introduction To see the final injected agent\u0026rsquo;s configuration, we define a CustomDefinitionResource called JavaAgent.\nWhen the pod is injected, the pod will be labeled with sidecar.skywalking.apache.org/succeed, then the controller will watch the specific pod labeled with sidecar.skywalking.apache.org/succeed. After the pod is created, the controller will create JavaAgent(custom resource), which contains the final agent configuration as below.\nSpec    Field Name Description     podSelector We hope users can use workloads to create pods, the podSelector is the selector label of workload.   serviceName serviceName is an important attribute that needs to be printed.   backendService backendService is an important attribute that needs to be printed.   agentConfiguration agentConfiguration contains serviceName、backendService and covered agent configuration, other default configurations will not be displayed, please see agent.config for details.    Status    Field Name Description     creationTime The creation time of the JavaAgent   lastUpdateTime The last Update time of the JavaAgent   expectedInjectiedNum The number of the pod that need to be injected   realInjectedNum The real number of injected pods.    Demo This demo shows the usage of javaagent. If you want to see the complete process, please see java-agent-injector-usagefor details.\nWhen we use java-agent-injector, we can get custom resources as below.\n$ kubectl get javaagent -A NAMESPACE NAME PODSELECTOR SERVICENAME BACKENDSERVICE default app-demo1-javaagent app=demo1 Your_ApplicationName 127.0.0.1:11800 default app-demo2-javaagent app=demo2 Your_ApplicationName 127.0.0.1:11800 $ kubectl get pod -l app=demo1 NAME READY STATUS RESTARTS AGE demo1-bb97b8b4d-bkwm4 1/1 Running 0 28s demo1-bb97b8b4d-wxgs2 1/1 Running 0 28s $ kubectl get pod -l app=demo2 NAME READY STATUS RESTARTS AGE app2-0 1/1 Running 0 27s app2-1 1/1 Running 0 25s app2-2 1/1 Running 0 23s If we want to see more information, we can get the specific javaagent\u0026rsquo;s yaml as below.\n$ kubectl get javaagent app-demo1-javaagent -oyaml apiVersion: operator.skywalking.apache.org/v1alpha1 kind: JavaAgent metadata: creationTimestamp: \u0026quot;2021-10-14T07:07:12Z\u0026quot; generation: 1 name: app-demo1-javaagent namespace: default ownerReferences: - apiVersion: apps/v1 blockOwnerDeletion: true controller: true kind: ReplicaSet name: demo1-bb97b8b4d uid: c712924f-4652-4c07-8332-b3938ad72392 resourceVersion: \u0026quot;330808\u0026quot; selfLink: /apis/operator.skywalking.apache.org/v1alpha1/namespaces/default/javaagents/app-demo1-javaagent uid: 9350338f-15a5-4832-84d1-530f8d0e1c3b spec: agentConfiguration: agent.namespace: default-namespace agent.service_name: Your_ApplicationName collector.backend_service: 127.0.0.1:11800 backendService: 127.0.0.1:11800 podSelector: app=demo1 serviceName: Your_ApplicationName status: creationTime: \u0026quot;2021-10-14T07:07:12Z\u0026quot; expectedInjectiedNum: 2 lastUpdateTime: \u0026quot;2021-10-14T07:07:14Z\u0026quot; realInjectedNum: 2 ","excerpt":"JavaAgent Introduction To see the final injected agent\u0026rsquo;s configuration, we define a …","ref":"/docs/skywalking-swck/v0.9.0/javaagent/","title":"JavaAgent Introduction"},{"body":"JVM Metrics APIs Notice, SkyWalking has provided general available meter APIs for all kinds of metrics. This API is still supported for forward compatibility only. SkyWalking community would not accept new language specific metric APIs anymore.\nUplink the JVM metrics, including PermSize, HeapSize, CPU, Memory, etc., every second.\ngRPC service define\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.language.agent.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/language/agent/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the JVM metrics report service. service JVMMetricReportService { rpc collect (JVMMetricCollection) returns (Commands) { }}message JVMMetricCollection { repeated JVMMetric metrics = 1; string service = 2; string serviceInstance = 3;}message JVMMetric { int64 time = 1; CPU cpu = 2; repeated Memory memory = 3; repeated MemoryPool memoryPool = 4; repeated GC gc = 5; Thread thread = 6; Class clazz = 7;}message Memory { bool isHeap = 1; int64 init = 2; int64 max = 3; int64 used = 4; int64 committed = 5;}message MemoryPool { PoolType type = 1; int64 init = 2; int64 max = 3; int64 used = 4; int64 committed = 5;}enum PoolType { CODE_CACHE_USAGE = 0; NEWGEN_USAGE = 1; OLDGEN_USAGE = 2; SURVIVOR_USAGE = 3; PERMGEN_USAGE = 4; METASPACE_USAGE = 5; ZHEAP_USAGE = 6; COMPRESSED_CLASS_SPACE_USAGE = 7; CODEHEAP_NON_NMETHODS_USAGE = 8; CODEHEAP_PROFILED_NMETHODS_USAGE = 9; CODEHEAP_NON_PROFILED_NMETHODS_USAGE = 10;}message GC { GCPhase phase = 1; int64 count = 2; int64 time = 3;}enum GCPhase { NEW = 0; OLD = 1; NORMAL = 2; // The type of GC doesn\u0026#39;t have new and old phases, like Z Garbage Collector (ZGC) }// See: https://docs.oracle.com/javase/8/docs/api/java/lang/management/ThreadMXBean.html message Thread { int64 liveCount = 1; int64 daemonCount = 2; int64 peakCount = 3; int64 runnableStateThreadCount = 4; int64 blockedStateThreadCount = 5; int64 waitingStateThreadCount = 6; int64 timedWaitingStateThreadCount = 7;}// See: https://docs.oracle.com/javase/8/docs/api/java/lang/management/ClassLoadingMXBean.html message Class { int64 loadedClassCount = 1; int64 totalUnloadedClassCount = 2; int64 totalLoadedClassCount = 3;}","excerpt":"JVM Metrics APIs Notice, SkyWalking has provided general available meter APIs for all kinds of …","ref":"/docs/main/latest/en/api/jvm-protocol/","title":"JVM Metrics APIs"},{"body":"JVM Metrics APIs Notice, SkyWalking has provided general available meter APIs for all kinds of metrics. This API is still supported for forward compatibility only. SkyWalking community would not accept new language specific metric APIs anymore.\nUplink the JVM metrics, including PermSize, HeapSize, CPU, Memory, etc., every second.\ngRPC service define\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.language.agent.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/language/agent/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the JVM metrics report service. service JVMMetricReportService { rpc collect (JVMMetricCollection) returns (Commands) { }}message JVMMetricCollection { repeated JVMMetric metrics = 1; string service = 2; string serviceInstance = 3;}message JVMMetric { int64 time = 1; CPU cpu = 2; repeated Memory memory = 3; repeated MemoryPool memoryPool = 4; repeated GC gc = 5; Thread thread = 6; Class clazz = 7;}message Memory { bool isHeap = 1; int64 init = 2; int64 max = 3; int64 used = 4; int64 committed = 5;}message MemoryPool { PoolType type = 1; int64 init = 2; int64 max = 3; int64 used = 4; int64 committed = 5;}enum PoolType { CODE_CACHE_USAGE = 0; NEWGEN_USAGE = 1; OLDGEN_USAGE = 2; SURVIVOR_USAGE = 3; PERMGEN_USAGE = 4; METASPACE_USAGE = 5; ZHEAP_USAGE = 6; COMPRESSED_CLASS_SPACE_USAGE = 7; CODEHEAP_NON_NMETHODS_USAGE = 8; CODEHEAP_PROFILED_NMETHODS_USAGE = 9; CODEHEAP_NON_PROFILED_NMETHODS_USAGE = 10;}message GC { GCPhase phase = 1; int64 count = 2; int64 time = 3;}enum GCPhase { NEW = 0; OLD = 1; NORMAL = 2; // The type of GC doesn\u0026#39;t have new and old phases, like Z Garbage Collector (ZGC) }// See: https://docs.oracle.com/javase/8/docs/api/java/lang/management/ThreadMXBean.html message Thread { int64 liveCount = 1; int64 daemonCount = 2; int64 peakCount = 3; int64 runnableStateThreadCount = 4; int64 blockedStateThreadCount = 5; int64 waitingStateThreadCount = 6; int64 timedWaitingStateThreadCount = 7;}// See: https://docs.oracle.com/javase/8/docs/api/java/lang/management/ClassLoadingMXBean.html message Class { int64 loadedClassCount = 1; int64 totalUnloadedClassCount = 2; int64 totalLoadedClassCount = 3;}","excerpt":"JVM Metrics APIs Notice, SkyWalking has provided general available meter APIs for all kinds of …","ref":"/docs/main/next/en/api/jvm-protocol/","title":"JVM Metrics APIs"},{"body":"JVM Metrics APIs Notice, SkyWalking has provided general available meter APIs for all kinds of metrics. This API is still supported for forward compatibility only. SkyWalking community would not accept new language specific metric APIs anymore.\nUplink the JVM metrics, including PermSize, HeapSize, CPU, Memory, etc., every second.\ngRPC service define\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.language.agent.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/language/agent/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the JVM metrics report service. service JVMMetricReportService { rpc collect (JVMMetricCollection) returns (Commands) { }}message JVMMetricCollection { repeated JVMMetric metrics = 1; string service = 2; string serviceInstance = 3;}message JVMMetric { int64 time = 1; CPU cpu = 2; repeated Memory memory = 3; repeated MemoryPool memoryPool = 4; repeated GC gc = 5; Thread thread = 6; Class clazz = 7;}message Memory { bool isHeap = 1; int64 init = 2; int64 max = 3; int64 used = 4; int64 committed = 5;}message MemoryPool { PoolType type = 1; int64 init = 2; int64 max = 3; int64 used = 4; int64 committed = 5;}enum PoolType { CODE_CACHE_USAGE = 0; NEWGEN_USAGE = 1; OLDGEN_USAGE = 2; SURVIVOR_USAGE = 3; PERMGEN_USAGE = 4; METASPACE_USAGE = 5;}message GC { GCPhase phase = 1; int64 count = 2; int64 time = 3;}enum GCPhase { NEW = 0; OLD = 1; NORMAL = 2; // The type of GC doesn\u0026#39;t have new and old phases, like Z Garbage Collector (ZGC) }// See: https://docs.oracle.com/javase/8/docs/api/java/lang/management/ThreadMXBean.html message Thread { int64 liveCount = 1; int64 daemonCount = 2; int64 peakCount = 3; int64 runnableStateThreadCount = 4; int64 blockedStateThreadCount = 5; int64 waitingStateThreadCount = 6; int64 timedWaitingStateThreadCount = 7;}// See: https://docs.oracle.com/javase/8/docs/api/java/lang/management/ClassLoadingMXBean.html message Class { int64 loadedClassCount = 1; int64 totalUnloadedClassCount = 2; int64 totalLoadedClassCount = 3;}","excerpt":"JVM Metrics APIs Notice, SkyWalking has provided general available meter APIs for all kinds of …","ref":"/docs/main/v9.4.0/en/api/jvm-protocol/","title":"JVM Metrics APIs"},{"body":"JVM Metrics APIs Notice, SkyWalking has provided general available meter APIs for all kinds of metrics. This API is still supported for forward compatibility only. SkyWalking community would not accept new language specific metric APIs anymore.\nUplink the JVM metrics, including PermSize, HeapSize, CPU, Memory, etc., every second.\ngRPC service define\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.language.agent.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/language/agent/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the JVM metrics report service. service JVMMetricReportService { rpc collect (JVMMetricCollection) returns (Commands) { }}message JVMMetricCollection { repeated JVMMetric metrics = 1; string service = 2; string serviceInstance = 3;}message JVMMetric { int64 time = 1; CPU cpu = 2; repeated Memory memory = 3; repeated MemoryPool memoryPool = 4; repeated GC gc = 5; Thread thread = 6; Class clazz = 7;}message Memory { bool isHeap = 1; int64 init = 2; int64 max = 3; int64 used = 4; int64 committed = 5;}message MemoryPool { PoolType type = 1; int64 init = 2; int64 max = 3; int64 used = 4; int64 committed = 5;}enum PoolType { CODE_CACHE_USAGE = 0; NEWGEN_USAGE = 1; OLDGEN_USAGE = 2; SURVIVOR_USAGE = 3; PERMGEN_USAGE = 4; METASPACE_USAGE = 5;}message GC { GCPhase phase = 1; int64 count = 2; int64 time = 3;}enum GCPhase { NEW = 0; OLD = 1; NORMAL = 2; // The type of GC doesn\u0026#39;t have new and old phases, like Z Garbage Collector (ZGC) }// See: https://docs.oracle.com/javase/8/docs/api/java/lang/management/ThreadMXBean.html message Thread { int64 liveCount = 1; int64 daemonCount = 2; int64 peakCount = 3; int64 runnableStateThreadCount = 4; int64 blockedStateThreadCount = 5; int64 waitingStateThreadCount = 6; int64 timedWaitingStateThreadCount = 7;}// See: https://docs.oracle.com/javase/8/docs/api/java/lang/management/ClassLoadingMXBean.html message Class { int64 loadedClassCount = 1; int64 totalUnloadedClassCount = 2; int64 totalLoadedClassCount = 3;}","excerpt":"JVM Metrics APIs Notice, SkyWalking has provided general available meter APIs for all kinds of …","ref":"/docs/main/v9.5.0/en/api/jvm-protocol/","title":"JVM Metrics APIs"},{"body":"JVM Metrics APIs Notice, SkyWalking has provided general available meter APIs for all kinds of metrics. This API is still supported for forward compatibility only. SkyWalking community would not accept new language specific metric APIs anymore.\nUplink the JVM metrics, including PermSize, HeapSize, CPU, Memory, etc., every second.\ngRPC service define\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.language.agent.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/language/agent/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the JVM metrics report service. service JVMMetricReportService { rpc collect (JVMMetricCollection) returns (Commands) { }}message JVMMetricCollection { repeated JVMMetric metrics = 1; string service = 2; string serviceInstance = 3;}message JVMMetric { int64 time = 1; CPU cpu = 2; repeated Memory memory = 3; repeated MemoryPool memoryPool = 4; repeated GC gc = 5; Thread thread = 6; Class clazz = 7;}message Memory { bool isHeap = 1; int64 init = 2; int64 max = 3; int64 used = 4; int64 committed = 5;}message MemoryPool { PoolType type = 1; int64 init = 2; int64 max = 3; int64 used = 4; int64 committed = 5;}enum PoolType { CODE_CACHE_USAGE = 0; NEWGEN_USAGE = 1; OLDGEN_USAGE = 2; SURVIVOR_USAGE = 3; PERMGEN_USAGE = 4; METASPACE_USAGE = 5;}message GC { GCPhase phase = 1; int64 count = 2; int64 time = 3;}enum GCPhase { NEW = 0; OLD = 1; NORMAL = 2; // The type of GC doesn\u0026#39;t have new and old phases, like Z Garbage Collector (ZGC) }// See: https://docs.oracle.com/javase/8/docs/api/java/lang/management/ThreadMXBean.html message Thread { int64 liveCount = 1; int64 daemonCount = 2; int64 peakCount = 3; int64 runnableStateThreadCount = 4; int64 blockedStateThreadCount = 5; int64 waitingStateThreadCount = 6; int64 timedWaitingStateThreadCount = 7;}// See: https://docs.oracle.com/javase/8/docs/api/java/lang/management/ClassLoadingMXBean.html message Class { int64 loadedClassCount = 1; int64 totalUnloadedClassCount = 2; int64 totalLoadedClassCount = 3;}","excerpt":"JVM Metrics APIs Notice, SkyWalking has provided general available meter APIs for all kinds of …","ref":"/docs/main/v9.6.0/en/api/jvm-protocol/","title":"JVM Metrics APIs"},{"body":"JVM Metrics APIs Notice, SkyWalking has provided general available meter APIs for all kinds of metrics. This API is still supported for forward compatibility only. SkyWalking community would not accept new language specific metric APIs anymore.\nUplink the JVM metrics, including PermSize, HeapSize, CPU, Memory, etc., every second.\ngRPC service define\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.language.agent.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/language/agent/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the JVM metrics report service. service JVMMetricReportService { rpc collect (JVMMetricCollection) returns (Commands) { }}message JVMMetricCollection { repeated JVMMetric metrics = 1; string service = 2; string serviceInstance = 3;}message JVMMetric { int64 time = 1; CPU cpu = 2; repeated Memory memory = 3; repeated MemoryPool memoryPool = 4; repeated GC gc = 5; Thread thread = 6; Class clazz = 7;}message Memory { bool isHeap = 1; int64 init = 2; int64 max = 3; int64 used = 4; int64 committed = 5;}message MemoryPool { PoolType type = 1; int64 init = 2; int64 max = 3; int64 used = 4; int64 committed = 5;}enum PoolType { CODE_CACHE_USAGE = 0; NEWGEN_USAGE = 1; OLDGEN_USAGE = 2; SURVIVOR_USAGE = 3; PERMGEN_USAGE = 4; METASPACE_USAGE = 5; ZHEAP_USAGE = 6; COMPRESSED_CLASS_SPACE_USAGE = 7; CODEHEAP_NON_NMETHODS_USAGE = 8; CODEHEAP_PROFILED_NMETHODS_USAGE = 9; CODEHEAP_NON_PROFILED_NMETHODS_USAGE = 10;}message GC { GCPhase phase = 1; int64 count = 2; int64 time = 3;}enum GCPhase { NEW = 0; OLD = 1; NORMAL = 2; // The type of GC doesn\u0026#39;t have new and old phases, like Z Garbage Collector (ZGC) }// See: https://docs.oracle.com/javase/8/docs/api/java/lang/management/ThreadMXBean.html message Thread { int64 liveCount = 1; int64 daemonCount = 2; int64 peakCount = 3; int64 runnableStateThreadCount = 4; int64 blockedStateThreadCount = 5; int64 waitingStateThreadCount = 6; int64 timedWaitingStateThreadCount = 7;}// See: https://docs.oracle.com/javase/8/docs/api/java/lang/management/ClassLoadingMXBean.html message Class { int64 loadedClassCount = 1; int64 totalUnloadedClassCount = 2; int64 totalLoadedClassCount = 3;}","excerpt":"JVM Metrics APIs Notice, SkyWalking has provided general available meter APIs for all kinds of …","ref":"/docs/main/v9.7.0/en/api/jvm-protocol/","title":"JVM Metrics APIs"},{"body":"JVM Metrics Service Abstract Uplink the JVM metrics, including PermSize, HeapSize, CPU, Memory, etc., every second.\ngRPC service define\n","excerpt":"JVM Metrics Service Abstract Uplink the JVM metrics, including PermSize, HeapSize, CPU, Memory, …","ref":"/docs/main/v9.0.0/en/protocols/jvm-protocol/","title":"JVM Metrics Service"},{"body":"JVM Metrics Service Abstract Uplink the JVM metrics, including PermSize, HeapSize, CPU, Memory, etc., every second.\ngRPC service define\n","excerpt":"JVM Metrics Service Abstract Uplink the JVM metrics, including PermSize, HeapSize, CPU, Memory, …","ref":"/docs/main/v9.1.0/en/protocols/jvm-protocol/","title":"JVM Metrics Service"},{"body":"JVM Metrics Service Abstract Uplink the JVM metrics, including PermSize, HeapSize, CPU, Memory, etc., every second.\ngRPC service define\n","excerpt":"JVM Metrics Service Abstract Uplink the JVM metrics, including PermSize, HeapSize, CPU, Memory, …","ref":"/docs/main/v9.2.0/en/protocols/jvm-protocol/","title":"JVM Metrics Service"},{"body":"JVM Metrics Service Abstract Uplink the JVM metrics, including PermSize, HeapSize, CPU, Memory, etc., every second.\ngRPC service define\n","excerpt":"JVM Metrics Service Abstract Uplink the JVM metrics, including PermSize, HeapSize, CPU, Memory, …","ref":"/docs/main/v9.3.0/en/protocols/jvm-protocol/","title":"JVM Metrics Service"},{"body":"K8s monitoring SkyWalking leverages K8s kube-state-metrics and cAdvisor for collecting metrics data from K8s, and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. This feature requires authorizing the OAP Server to access K8s\u0026rsquo;s API Server.\nData flow  K8s kube-state-metrics and cAdvisor collect metrics data from K8s. OpenTelemetry Collector fetches metrics from kube-state-metrics and cAdvisor via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via the OpenCensus GRPC Exporter. The SkyWalking OAP Server access to K8s\u0026rsquo;s API Server gets meta info and parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup kube-state-metric. cAdvisor is integrated into kubelet by default. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector for K8s, refer to here. For a quick start, we have provided a full example of configuration and recommended version , you can refer to showcase. Config SkyWalking OpenTelemetry receiver.  K8s Cluster Monitoring K8s cluster monitoring provide monitoring of the status and resources of the K8S Cluster, including the whole cluster and each node. K8s cluster as a Service in OAP, K8s node as a Instance in OAP, and land on the Layer: K8S.\nK8s Cluster Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Node Total  k8s_cluster_node_total The number of nodes K8s kube-state-metrics   Namespace Total  k8s_cluster_namespace_total The number of namespaces K8s kube-state-metrics   Deployment Total  k8s_cluster_deployment_total The number of deployments K8s kube-state-metrics   Service Total  k8s_cluster_service_total The number of services K8s kube-state-metrics   Pod Total  k8s_cluster_pod_total The number of pods K8s kube-state-metrics   Container Total  k8s_cluster_container_total The number of containers K8s kube-state-metrics   CPU Resources m k8s_cluster_cpu_cores\nk8s_cluster_cpu_cores_requests\nk8s_cluster_cpu_cores_limits\nk8s_cluster_cpu_cores_allocatable The capacity and the Requests / Limits / Allocatable of the CPU K8s kube-state-metrics   Memory Resources Gi k8s_cluster_memory_total\nk8s_cluster_memory_requests\nk8s_cluster_memory_limits\nk8s_cluster_memory_allocatable The capacity and the Requests / Limits / Allocatable of the memory K8s kube-state-metrics   Storage Resources Gi k8s_cluster_storage_total\nk8s_cluster_storage_allocatable The capacity and allocatable of the storage K8s kube-state-metrics   Node Status  k8s_cluster_node_status The current status of the nodes K8s kube-state-metrics   Deployment Status  k8s_cluster_deployment_status The current status of the deployment K8s kube-state-metrics   Deployment Spec Replicas  k8s_cluster_deployment_spec_replicas The number of desired pods for a deployment K8s kube-state-metrics   Service Status  k8s_cluster_service_pod_status The services current status, depending on the related pods' status K8s kube-state-metrics   Pod Status Not Running  k8s_cluster_pod_status_not_running The pods which are not running in the current phase K8s kube-state-metrics   Pod Status Waiting  k8s_cluster_pod_status_waiting The pods and containers which are currently in the waiting status, with reasons shown K8s kube-state-metrics   Pod Status Terminated  k8s_cluster_container_status_terminated The pods and containers which are currently in the terminated status, with reasons shown K8s kube-state-metrics    K8s Cluster Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Pod Total  k8s_node_pod_total The number of pods in this node K8s kube-state-metrics   Node Status  k8s_node_node_status The current status of this node K8s kube-state-metrics   CPU Resources m k8s_node_cpu_cores\nk8s_node_cpu_cores_allocatable\nk8s_node_cpu_cores_requests\nk8s_node_cpu_cores_limits The capacity and the requests / Limits / Allocatable of the CPU K8s kube-state-metrics   Memory Resources Gi k8s_node_memory_total\nk8s_node_memory_allocatable\nk8s_node_memory_requests\nk8s_node_memory_limits The capacity and the requests / Limits / Allocatable of the memory K8s kube-state-metrics   Storage Resources Gi k8s_node_storage_total\nk8s_node_storage_allocatable The capacity and allocatable of the storage K8s kube-state-metrics   CPU Usage m k8s_node_cpu_usage The total usage of the CPU core, if there are 2 cores the maximum usage is 2000m cAdvisor   Memory Usage Gi k8s_node_memory_usage The totaly memory usage cAdvisor   Network I/O KB/s k8s_node_network_receive\nk8s_node_network_transmit The network receive and transmit cAdvisor    K8s Service Monitoring K8s Service Monitoring provide observe service status and resources from Kubernetes. K8s Service as a Service in OAP and land on the Layer: K8S_SERVICE.\nK8s Service Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Service Pod Total  k8s_service_pod_total The number of pods K8s kube-state-metrics   Service Pod Status  k8s_service_pod_status The current status of pods K8s kube-state-metrics   Service CPU Resources m k8s_service_cpu_cores_requests\nk8s_service_cpu_cores_limits The CPU resources requests / Limits of this service K8s kube-state-metrics   Service Memory Resources MB k8s_service_memory_requests\nk8s_service_memory_limits The memory resources requests / Limits of this service K8s kube-state-metrics   Pod CPU Usage m k8s_service_pod_cpu_usage The CPU resources total usage of pods cAdvisor   Pod Memory Usage MB k8s_service_pod_memory_usage The memory resources total usage of pods cAdvisor   Pod Waiting  k8s_service_pod_status_waiting The pods and containers which are currently in the waiting status, with reasons shown K8s kube-state-metrics   Pod Terminated  k8s_service_pod_status_terminated The pods and containers which are currently in the terminated status, with reasons shown K8s kube-state-metrics   Pod Restarts  k8s_service_pod_status_restarts_total The number of per container restarts related to the pods K8s kube-state-metrics    Customizing You can customize your own metrics/expression/dashboard panel.\nThe metrics definition and expression rules are found in /config/otel-oc-rules/k8s-cluster.yaml,/config/otel-oc-rules/k8s-node.yaml, /config/otel-oc-rules/k8s-service.yaml.\nThe K8s Cluster dashboard panel configurations are found in /config/ui-initialized-templates/k8s. The K8s Service dashboard panel configurations are found in /config/ui-initialized-templates/k8s_service.\n","excerpt":"K8s monitoring SkyWalking leverages K8s kube-state-metrics and cAdvisor for collecting metrics data …","ref":"/docs/main/v9.0.0/en/setup/backend/backend-k8s-monitoring/","title":"K8s monitoring"},{"body":"Kafka Fetcher The Kafka Fetcher pulls messages from the Kafka Broker to learn about what agents have delivered. Check the agent documentation for details on how to enable the Kafka reporter. Typically, tracing segments, service/instance properties, JVM metrics, and meter system data are supported (depending on the agent implementation). Kafka Fetcher can work with gRPC/HTTP Receivers simultaneously for adopting different transport protocols.\nKafka Fetcher is disabled by default. To enable it, configure it as follows.\nNamespace aims to isolate multi OAP clusters when using the same Kafka cluster. If you set a namespace for Kafka fetcher, the OAP will add a prefix to the topic name. You should also set the namespace in the property named plugin.kafka.namespace in agent.config.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}skywalking-segments, skywalking-metrics, skywalking-profilings, skywalking-managements, skywalking-meters, skywalking-logs and skywalking-logs-json topics are required by kafka-fetcher. If they do not exist, Kafka Fetcher will create them by default. Also, you can create them by yourself before the OAP server starts.\nWhen using the OAP server automatic creation mechanism, you could modify the number of partitions and replications of the topics using the following configurations:\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}When using Kafka MirrorMaker 2.0 to replicate topics between Kafka clusters, you can set the source Kafka Cluster alias (mm2SourceAlias) and separator (mm2SourceSeparator) according to your Kafka MirrorMaker config.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}mm2SourceAlias:${SW_KAFKA_MM2_SOURCE_ALIAS:\u0026#34;\u0026#34;}mm2SourceSeparator:${SW_KAFKA_MM2_SOURCE_SEPARATOR:\u0026#34;\u0026#34;}kafkaConsumerConfig:enable.auto.commit:true...","excerpt":"Kafka Fetcher The Kafka Fetcher pulls messages from the Kafka Broker to learn about what agents have …","ref":"/docs/main/latest/en/setup/backend/kafka-fetcher/","title":"Kafka Fetcher"},{"body":"Kafka Fetcher The Kafka Fetcher pulls messages from the Kafka Broker to learn about what agents have delivered. Check the agent documentation for details on how to enable the Kafka reporter. Typically, tracing segments, service/instance properties, JVM metrics, and meter system data are supported (depending on the agent implementation). Kafka Fetcher can work with gRPC/HTTP Receivers simultaneously for adopting different transport protocols.\nKafka Fetcher is disabled by default. To enable it, configure it as follows.\nNamespace aims to isolate multi OAP clusters when using the same Kafka cluster. If you set a namespace for Kafka fetcher, the OAP will add a prefix to the topic name. You should also set the namespace in the property named plugin.kafka.namespace in agent.config.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}skywalking-segments, skywalking-metrics, skywalking-profilings, skywalking-managements, skywalking-meters, skywalking-logs and skywalking-logs-json topics are required by kafka-fetcher. If they do not exist, Kafka Fetcher will create them by default. Also, you can create them by yourself before the OAP server starts.\nWhen using the OAP server automatic creation mechanism, you could modify the number of partitions and replications of the topics using the following configurations:\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}When using Kafka MirrorMaker 2.0 to replicate topics between Kafka clusters, you can set the source Kafka Cluster alias (mm2SourceAlias) and separator (mm2SourceSeparator) according to your Kafka MirrorMaker config.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}mm2SourceAlias:${SW_KAFKA_MM2_SOURCE_ALIAS:\u0026#34;\u0026#34;}mm2SourceSeparator:${SW_KAFKA_MM2_SOURCE_SEPARATOR:\u0026#34;\u0026#34;}kafkaConsumerConfig:enable.auto.commit:true...","excerpt":"Kafka Fetcher The Kafka Fetcher pulls messages from the Kafka Broker to learn about what agents have …","ref":"/docs/main/next/en/setup/backend/kafka-fetcher/","title":"Kafka Fetcher"},{"body":"Kafka Fetcher The Kafka Fetcher pulls messages from the Kafka Broker to learn about what agent is delivered. Check the agent documentation for details. Typically, tracing segments, service/instance properties, JVM metrics, and meter system data are supported. Kafka Fetcher can work with gRPC/HTTP Receivers at the same time for adopting different transport protocols.\nKafka Fetcher is disabled by default. To enable it, configure as follows.\nNamespace aims to isolate multi OAP cluster when using the same Kafka cluster. If you set a namespace for Kafka fetcher, the OAP will add a prefix to topic name. You should also set namespace in the property named plugin.kafka.namespace in agent.config.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}skywalking-segments, skywalking-metrics, skywalking-profilings, skywalking-managements, skywalking-meters, skywalking-logs and skywalking-logs-json topics are required by kafka-fetcher. If they do not exist, Kafka Fetcher will create them by default. Also, you can create them by yourself before the OAP server starts.\nWhen using the OAP server automatic creation mechanism, you could modify the number of partitions and replications of the topics using the following configurations:\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}isSharding:${SW_KAFKA_FETCHER_IS_SHARDING:false}consumePartitions:${SW_KAFKA_FETCHER_CONSUME_PARTITIONS:\u0026#34;\u0026#34;}In the cluster mode, all topics have the same number of partitions. Set \u0026quot;isSharding\u0026quot; to \u0026quot;true\u0026quot; and assign the partitions to consume for the OAP server. Use commas to separate multiple partitions for the OAP server.\nThe Kafka Fetcher allows you to configure all the Kafka producers listed here in property kafkaConsumerConfig. For example:\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}isSharding:${SW_KAFKA_FETCHER_IS_SHARDING:true}consumePartitions:${SW_KAFKA_FETCHER_CONSUME_PARTITIONS:1,3,5}kafkaConsumerConfig:enable.auto.commit:true...When using Kafka MirrorMaker 2.0 to replicate topics between Kafka clusters, you can set the source Kafka Cluster alias (mm2SourceAlias) and separator (mm2SourceSeparator) according to your Kafka MirrorMaker config.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}isSharding:${SW_KAFKA_FETCHER_IS_SHARDING:true}consumePartitions:${SW_KAFKA_FETCHER_CONSUME_PARTITIONS:1,3,5}mm2SourceAlias:${SW_KAFKA_MM2_SOURCE_ALIAS:\u0026#34;\u0026#34;}mm2SourceSeparator:${SW_KAFKA_MM2_SOURCE_SEPARATOR:\u0026#34;\u0026#34;}kafkaConsumerConfig:enable.auto.commit:true...Other Fetcher Plugins There are other transporter plugins. You could find these plugins from 3rd party repositories.\n  Pulsar Fetcher Plugin\n  RocketMQ Fetcher Plugin\n  ","excerpt":"Kafka Fetcher The Kafka Fetcher pulls messages from the Kafka Broker to learn about what agent is …","ref":"/docs/main/v9.0.0/en/setup/backend/kafka-fetcher/","title":"Kafka Fetcher"},{"body":"Kafka Fetcher The Kafka Fetcher pulls messages from the Kafka Broker to learn about what agents have delivered. Check the agent documentation for details on how to enable the Kafka reporter. Typically, tracing segments, service/instance properties, JVM metrics, and meter system data are supported (depending on the agent implementation). Kafka Fetcher can work with gRPC/HTTP Receivers simultaneously for adopting different transport protocols.\nKafka Fetcher is disabled by default. To enable it, configure it as follows.\nNamespace aims to isolate multi OAP clusters when using the same Kafka cluster. If you set a namespace for Kafka fetcher, the OAP will add a prefix to the topic name. You should also set the namespace in the property named plugin.kafka.namespace in agent.config.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}skywalking-segments, skywalking-metrics, skywalking-profilings, skywalking-managements, skywalking-meters, skywalking-logs and skywalking-logs-json topics are required by kafka-fetcher. If they do not exist, Kafka Fetcher will create them by default. Also, you can create them by yourself before the OAP server starts.\nWhen using the OAP server automatic creation mechanism, you could modify the number of partitions and replications of the topics using the following configurations:\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}isSharding:${SW_KAFKA_FETCHER_IS_SHARDING:false}consumePartitions:${SW_KAFKA_FETCHER_CONSUME_PARTITIONS:\u0026#34;\u0026#34;}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}In the cluster mode, all topics have the same number of partitions. Set \u0026quot;isSharding\u0026quot; to \u0026quot;true\u0026quot; and assign the partitions to consume for the OAP server. Use commas to separate multiple partitions for the OAP server.\nThe Kafka Fetcher allows you to configure all the Kafka producers listed here in property kafkaConsumerConfig. For example:\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}isSharding:${SW_KAFKA_FETCHER_IS_SHARDING:true}consumePartitions:${SW_KAFKA_FETCHER_CONSUME_PARTITIONS:1,3,5}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}kafkaConsumerConfig:enable.auto.commit:true...When using Kafka MirrorMaker 2.0 to replicate topics between Kafka clusters, you can set the source Kafka Cluster alias (mm2SourceAlias) and separator (mm2SourceSeparator) according to your Kafka MirrorMaker config.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}isSharding:${SW_KAFKA_FETCHER_IS_SHARDING:true}consumePartitions:${SW_KAFKA_FETCHER_CONSUME_PARTITIONS:1,3,5}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}mm2SourceAlias:${SW_KAFKA_MM2_SOURCE_ALIAS:\u0026#34;\u0026#34;}mm2SourceSeparator:${SW_KAFKA_MM2_SOURCE_SEPARATOR:\u0026#34;\u0026#34;}kafkaConsumerConfig:enable.auto.commit:true...Other Fetcher Plugins There are other transporter plugins. You can find these plugins from 3rd party repositories.\n  Pulsar Fetcher Plugin\n  RocketMQ Fetcher Plugin\n  ","excerpt":"Kafka Fetcher The Kafka Fetcher pulls messages from the Kafka Broker to learn about what agents have …","ref":"/docs/main/v9.1.0/en/setup/backend/kafka-fetcher/","title":"Kafka Fetcher"},{"body":"Kafka Fetcher The Kafka Fetcher pulls messages from the Kafka Broker to learn about what agents have delivered. Check the agent documentation for details on how to enable the Kafka reporter. Typically, tracing segments, service/instance properties, JVM metrics, and meter system data are supported (depending on the agent implementation). Kafka Fetcher can work with gRPC/HTTP Receivers simultaneously for adopting different transport protocols.\nKafka Fetcher is disabled by default. To enable it, configure it as follows.\nNamespace aims to isolate multi OAP clusters when using the same Kafka cluster. If you set a namespace for Kafka fetcher, the OAP will add a prefix to the topic name. You should also set the namespace in the property named plugin.kafka.namespace in agent.config.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}skywalking-segments, skywalking-metrics, skywalking-profilings, skywalking-managements, skywalking-meters, skywalking-logs and skywalking-logs-json topics are required by kafka-fetcher. If they do not exist, Kafka Fetcher will create them by default. Also, you can create them by yourself before the OAP server starts.\nWhen using the OAP server automatic creation mechanism, you could modify the number of partitions and replications of the topics using the following configurations:\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}isSharding:${SW_KAFKA_FETCHER_IS_SHARDING:false}consumePartitions:${SW_KAFKA_FETCHER_CONSUME_PARTITIONS:\u0026#34;\u0026#34;}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}In the cluster mode, all topics have the same number of partitions. Set \u0026quot;isSharding\u0026quot; to \u0026quot;true\u0026quot; and assign the partitions to consume for the OAP server. Use commas to separate multiple partitions for the OAP server.\nThe Kafka Fetcher allows you to configure all the Kafka producers listed here in property kafkaConsumerConfig. For example:\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}isSharding:${SW_KAFKA_FETCHER_IS_SHARDING:true}consumePartitions:${SW_KAFKA_FETCHER_CONSUME_PARTITIONS:1,3,5}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}kafkaConsumerConfig:enable.auto.commit:true...When using Kafka MirrorMaker 2.0 to replicate topics between Kafka clusters, you can set the source Kafka Cluster alias (mm2SourceAlias) and separator (mm2SourceSeparator) according to your Kafka MirrorMaker config.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}isSharding:${SW_KAFKA_FETCHER_IS_SHARDING:true}consumePartitions:${SW_KAFKA_FETCHER_CONSUME_PARTITIONS:1,3,5}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}mm2SourceAlias:${SW_KAFKA_MM2_SOURCE_ALIAS:\u0026#34;\u0026#34;}mm2SourceSeparator:${SW_KAFKA_MM2_SOURCE_SEPARATOR:\u0026#34;\u0026#34;}kafkaConsumerConfig:enable.auto.commit:true...Other Fetcher Plugins There are other transporter plugins. You can find these plugins from 3rd party repositories.\n  Pulsar Fetcher Plugin\n  RocketMQ Fetcher Plugin\n  ","excerpt":"Kafka Fetcher The Kafka Fetcher pulls messages from the Kafka Broker to learn about what agents have …","ref":"/docs/main/v9.2.0/en/setup/backend/kafka-fetcher/","title":"Kafka Fetcher"},{"body":"Kafka Fetcher The Kafka Fetcher pulls messages from the Kafka Broker to learn about what agents have delivered. Check the agent documentation for details on how to enable the Kafka reporter. Typically, tracing segments, service/instance properties, JVM metrics, and meter system data are supported (depending on the agent implementation). Kafka Fetcher can work with gRPC/HTTP Receivers simultaneously for adopting different transport protocols.\nKafka Fetcher is disabled by default. To enable it, configure it as follows.\nNamespace aims to isolate multi OAP clusters when using the same Kafka cluster. If you set a namespace for Kafka fetcher, the OAP will add a prefix to the topic name. You should also set the namespace in the property named plugin.kafka.namespace in agent.config.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}skywalking-segments, skywalking-metrics, skywalking-profilings, skywalking-managements, skywalking-meters, skywalking-logs and skywalking-logs-json topics are required by kafka-fetcher. If they do not exist, Kafka Fetcher will create them by default. Also, you can create them by yourself before the OAP server starts.\nWhen using the OAP server automatic creation mechanism, you could modify the number of partitions and replications of the topics using the following configurations:\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}isSharding:${SW_KAFKA_FETCHER_IS_SHARDING:false}consumePartitions:${SW_KAFKA_FETCHER_CONSUME_PARTITIONS:\u0026#34;\u0026#34;}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}In the cluster mode, all topics have the same number of partitions. Set \u0026quot;isSharding\u0026quot; to \u0026quot;true\u0026quot; and assign the partitions to consume for the OAP server. Use commas to separate multiple partitions for the OAP server.\nThe Kafka Fetcher allows you to configure all the Kafka producers listed here in property kafkaConsumerConfig. For example:\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}isSharding:${SW_KAFKA_FETCHER_IS_SHARDING:true}consumePartitions:${SW_KAFKA_FETCHER_CONSUME_PARTITIONS:1,3,5}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}kafkaConsumerConfig:enable.auto.commit:true...When using Kafka MirrorMaker 2.0 to replicate topics between Kafka clusters, you can set the source Kafka Cluster alias (mm2SourceAlias) and separator (mm2SourceSeparator) according to your Kafka MirrorMaker config.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}isSharding:${SW_KAFKA_FETCHER_IS_SHARDING:true}consumePartitions:${SW_KAFKA_FETCHER_CONSUME_PARTITIONS:1,3,5}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}mm2SourceAlias:${SW_KAFKA_MM2_SOURCE_ALIAS:\u0026#34;\u0026#34;}mm2SourceSeparator:${SW_KAFKA_MM2_SOURCE_SEPARATOR:\u0026#34;\u0026#34;}kafkaConsumerConfig:enable.auto.commit:true...Other Fetcher Plugins There are other transporter plugins. You can find these plugins from 3rd party repositories.\n  Pulsar Fetcher Plugin\n  RocketMQ Fetcher Plugin\n  ","excerpt":"Kafka Fetcher The Kafka Fetcher pulls messages from the Kafka Broker to learn about what agents have …","ref":"/docs/main/v9.3.0/en/setup/backend/kafka-fetcher/","title":"Kafka Fetcher"},{"body":"Kafka Fetcher The Kafka Fetcher pulls messages from the Kafka Broker to learn about what agents have delivered. Check the agent documentation for details on how to enable the Kafka reporter. Typically, tracing segments, service/instance properties, JVM metrics, and meter system data are supported (depending on the agent implementation). Kafka Fetcher can work with gRPC/HTTP Receivers simultaneously for adopting different transport protocols.\nKafka Fetcher is disabled by default. To enable it, configure it as follows.\nNamespace aims to isolate multi OAP clusters when using the same Kafka cluster. If you set a namespace for Kafka fetcher, the OAP will add a prefix to the topic name. You should also set the namespace in the property named plugin.kafka.namespace in agent.config.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}skywalking-segments, skywalking-metrics, skywalking-profilings, skywalking-managements, skywalking-meters, skywalking-logs and skywalking-logs-json topics are required by kafka-fetcher. If they do not exist, Kafka Fetcher will create them by default. Also, you can create them by yourself before the OAP server starts.\nWhen using the OAP server automatic creation mechanism, you could modify the number of partitions and replications of the topics using the following configurations:\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}isSharding:${SW_KAFKA_FETCHER_IS_SHARDING:false}consumePartitions:${SW_KAFKA_FETCHER_CONSUME_PARTITIONS:\u0026#34;\u0026#34;}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}In the cluster mode, all topics have the same number of partitions. Set \u0026quot;isSharding\u0026quot; to \u0026quot;true\u0026quot; and assign the partitions to consume for the OAP server. Use commas to separate multiple partitions for the OAP server.\nThe Kafka Fetcher allows you to configure all the Kafka producers listed here in property kafkaConsumerConfig. For example:\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}isSharding:${SW_KAFKA_FETCHER_IS_SHARDING:true}consumePartitions:${SW_KAFKA_FETCHER_CONSUME_PARTITIONS:1,3,5}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}kafkaConsumerConfig:enable.auto.commit:true...When using Kafka MirrorMaker 2.0 to replicate topics between Kafka clusters, you can set the source Kafka Cluster alias (mm2SourceAlias) and separator (mm2SourceSeparator) according to your Kafka MirrorMaker config.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}isSharding:${SW_KAFKA_FETCHER_IS_SHARDING:true}consumePartitions:${SW_KAFKA_FETCHER_CONSUME_PARTITIONS:1,3,5}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}mm2SourceAlias:${SW_KAFKA_MM2_SOURCE_ALIAS:\u0026#34;\u0026#34;}mm2SourceSeparator:${SW_KAFKA_MM2_SOURCE_SEPARATOR:\u0026#34;\u0026#34;}kafkaConsumerConfig:enable.auto.commit:true...Other Fetcher Plugins There are other transporter plugins. You can find these plugins from 3rd party repositories.\n  Pulsar Fetcher Plugin\n  RocketMQ Fetcher Plugin\n  ","excerpt":"Kafka Fetcher The Kafka Fetcher pulls messages from the Kafka Broker to learn about what agents have …","ref":"/docs/main/v9.4.0/en/setup/backend/kafka-fetcher/","title":"Kafka Fetcher"},{"body":"Kafka Fetcher The Kafka Fetcher pulls messages from the Kafka Broker to learn about what agents have delivered. Check the agent documentation for details on how to enable the Kafka reporter. Typically, tracing segments, service/instance properties, JVM metrics, and meter system data are supported (depending on the agent implementation). Kafka Fetcher can work with gRPC/HTTP Receivers simultaneously for adopting different transport protocols.\nKafka Fetcher is disabled by default. To enable it, configure it as follows.\nNamespace aims to isolate multi OAP clusters when using the same Kafka cluster. If you set a namespace for Kafka fetcher, the OAP will add a prefix to the topic name. You should also set the namespace in the property named plugin.kafka.namespace in agent.config.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}skywalking-segments, skywalking-metrics, skywalking-profilings, skywalking-managements, skywalking-meters, skywalking-logs and skywalking-logs-json topics are required by kafka-fetcher. If they do not exist, Kafka Fetcher will create them by default. Also, you can create them by yourself before the OAP server starts.\nWhen using the OAP server automatic creation mechanism, you could modify the number of partitions and replications of the topics using the following configurations:\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}isSharding:${SW_KAFKA_FETCHER_IS_SHARDING:false}consumePartitions:${SW_KAFKA_FETCHER_CONSUME_PARTITIONS:\u0026#34;\u0026#34;}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}In the cluster mode, all topics have the same number of partitions. Set \u0026quot;isSharding\u0026quot; to \u0026quot;true\u0026quot; and assign the partitions to consume for the OAP server. Use commas to separate multiple partitions for the OAP server.\nThe Kafka Fetcher allows you to configure all the Kafka producers listed here in property kafkaConsumerConfig. For example:\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}isSharding:${SW_KAFKA_FETCHER_IS_SHARDING:true}consumePartitions:${SW_KAFKA_FETCHER_CONSUME_PARTITIONS:1,3,5}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}kafkaConsumerConfig:enable.auto.commit:true...When using Kafka MirrorMaker 2.0 to replicate topics between Kafka clusters, you can set the source Kafka Cluster alias (mm2SourceAlias) and separator (mm2SourceSeparator) according to your Kafka MirrorMaker config.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}isSharding:${SW_KAFKA_FETCHER_IS_SHARDING:true}consumePartitions:${SW_KAFKA_FETCHER_CONSUME_PARTITIONS:1,3,5}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}mm2SourceAlias:${SW_KAFKA_MM2_SOURCE_ALIAS:\u0026#34;\u0026#34;}mm2SourceSeparator:${SW_KAFKA_MM2_SOURCE_SEPARATOR:\u0026#34;\u0026#34;}kafkaConsumerConfig:enable.auto.commit:true...Other Fetcher Plugins There are other transporter plugins. You can find these plugins from 3rd party repositories.\n  Pulsar Fetcher Plugin\n  RocketMQ Fetcher Plugin\n  ","excerpt":"Kafka Fetcher The Kafka Fetcher pulls messages from the Kafka Broker to learn about what agents have …","ref":"/docs/main/v9.5.0/en/setup/backend/kafka-fetcher/","title":"Kafka Fetcher"},{"body":"Kafka Fetcher The Kafka Fetcher pulls messages from the Kafka Broker to learn about what agents have delivered. Check the agent documentation for details on how to enable the Kafka reporter. Typically, tracing segments, service/instance properties, JVM metrics, and meter system data are supported (depending on the agent implementation). Kafka Fetcher can work with gRPC/HTTP Receivers simultaneously for adopting different transport protocols.\nKafka Fetcher is disabled by default. To enable it, configure it as follows.\nNamespace aims to isolate multi OAP clusters when using the same Kafka cluster. If you set a namespace for Kafka fetcher, the OAP will add a prefix to the topic name. You should also set the namespace in the property named plugin.kafka.namespace in agent.config.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}skywalking-segments, skywalking-metrics, skywalking-profilings, skywalking-managements, skywalking-meters, skywalking-logs and skywalking-logs-json topics are required by kafka-fetcher. If they do not exist, Kafka Fetcher will create them by default. Also, you can create them by yourself before the OAP server starts.\nWhen using the OAP server automatic creation mechanism, you could modify the number of partitions and replications of the topics using the following configurations:\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}When using Kafka MirrorMaker 2.0 to replicate topics between Kafka clusters, you can set the source Kafka Cluster alias (mm2SourceAlias) and separator (mm2SourceSeparator) according to your Kafka MirrorMaker config.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}mm2SourceAlias:${SW_KAFKA_MM2_SOURCE_ALIAS:\u0026#34;\u0026#34;}mm2SourceSeparator:${SW_KAFKA_MM2_SOURCE_SEPARATOR:\u0026#34;\u0026#34;}kafkaConsumerConfig:enable.auto.commit:true...","excerpt":"Kafka Fetcher The Kafka Fetcher pulls messages from the Kafka Broker to learn about what agents have …","ref":"/docs/main/v9.6.0/en/setup/backend/kafka-fetcher/","title":"Kafka Fetcher"},{"body":"Kafka Fetcher The Kafka Fetcher pulls messages from the Kafka Broker to learn about what agents have delivered. Check the agent documentation for details on how to enable the Kafka reporter. Typically, tracing segments, service/instance properties, JVM metrics, and meter system data are supported (depending on the agent implementation). Kafka Fetcher can work with gRPC/HTTP Receivers simultaneously for adopting different transport protocols.\nKafka Fetcher is disabled by default. To enable it, configure it as follows.\nNamespace aims to isolate multi OAP clusters when using the same Kafka cluster. If you set a namespace for Kafka fetcher, the OAP will add a prefix to the topic name. You should also set the namespace in the property named plugin.kafka.namespace in agent.config.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}skywalking-segments, skywalking-metrics, skywalking-profilings, skywalking-managements, skywalking-meters, skywalking-logs and skywalking-logs-json topics are required by kafka-fetcher. If they do not exist, Kafka Fetcher will create them by default. Also, you can create them by yourself before the OAP server starts.\nWhen using the OAP server automatic creation mechanism, you could modify the number of partitions and replications of the topics using the following configurations:\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}When using Kafka MirrorMaker 2.0 to replicate topics between Kafka clusters, you can set the source Kafka Cluster alias (mm2SourceAlias) and separator (mm2SourceSeparator) according to your Kafka MirrorMaker config.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}mm2SourceAlias:${SW_KAFKA_MM2_SOURCE_ALIAS:\u0026#34;\u0026#34;}mm2SourceSeparator:${SW_KAFKA_MM2_SOURCE_SEPARATOR:\u0026#34;\u0026#34;}kafkaConsumerConfig:enable.auto.commit:true...","excerpt":"Kafka Fetcher The Kafka Fetcher pulls messages from the Kafka Broker to learn about what agents have …","ref":"/docs/main/v9.7.0/en/setup/backend/kafka-fetcher/","title":"Kafka Fetcher"},{"body":"Kafka monitoring SkyWalking leverages Prometheus JMX Exporter to collect metrics data from the Kafka and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. Kafka entity as a Service in OAP and on the Layer: KAFKA.\nData flow  The prometheus_JMX_Exporter collect metrics data from Kafka. Note: Running the exporter as a Java agent. OpenTelemetry Collector fetches metrics from prometheus_JMX_Exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup prometheus_JMX_Exporter. This is an example for JMX Exporter configuration kafka-2_0_0.yml. Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  Kafka Monitoring Kafka monitoring provides multidimensional metrics monitoring of Kafka cluster as Layer: KAFKA Service in the OAP. In each cluster, the kafka brokers are represented as Instance.\nKafka Cluster Supported Metrics    Monitoring Panel Metric Name Description Data Source     Under-Replicated Partitions meter_kafka_under_replicated_partitions Number of under-replicated partitions in the broker. A higher number is a sign of potential issues. Prometheus JMX Exporter   Offline Partitions Count meter_kafka_offline_partitions_count Number of partitions that are offline. Non-zero values indicate a problem. Prometheus JMX Exporter   Partition Count meter_kafka_partition_count Total number of partitions on the broker. Prometheus JMX Exporter   Leader Count meter_kafka_leader_count Number of leader partitions on this broker. Prometheus JMX Exporter   Active Controller Count meter_kafka_active_controller_count The number of active controllers in the cluster. Typically should be 1. Prometheus JMX Exporter   Leader Election Rate meter_kafka_leader_election_rate The rate of leader elections per minute. High rate could be a sign of instability. Prometheus JMX Exporter   Unclean Leader Elections Per Second meter_kafka_unclean_leader_elections_per_second The rate of unclean leader elections per second. Non-zero values indicate a serious problem. Prometheus JMX Exporter   Max Lag meter_kafka_max_lag The maximum lag between the leader and followers in terms of messages still needed to be sent. Higher lag indicates delays. Prometheus JMX Exporter    Kafka Broker Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     CPU Usage % meter_kafka_broker_cpu_time_total CPU usage in percentage Prometheus JMX Exporter   Memory Usage % meter_kafka_broker_memory_usage_percentage JVM heap memory usage in percentage Prometheus JMX Exporter   Incoming Messages Msg/sec meter_kafka_broker_messages_per_second Rate of incoming messages Prometheus JMX Exporter   Bytes In Bytes/sec meter_kafka_broker_bytes_in_per_second Rate of incoming bytes Prometheus JMX Exporter   Bytes Out Bytes/sec meter_kafka_broker_bytes_out_per_second Rate of outgoing bytes Prometheus JMX Exporter   Replication Bytes In Bytes/sec meter_kafka_broker_replication_bytes_in_per_second Rate of incoming bytes for replication Prometheus JMX Exporter   Replication Bytes Out Bytes/sec meter_kafka_broker_replication_bytes_out_per_second Rate of outgoing bytes for replication Prometheus JMX Exporter   Under-Replicated Partitions Count meter_kafka_broker_under_replicated_partitions Number of under-replicated partitions Prometheus JMX Exporter   Under Min ISR Partition Count Count meter_kafka_broker_under_min_isr_partition_count Number of partitions below the minimum ISR (In-Sync Replicas) Prometheus JMX Exporter   Partition Count Count meter_kafka_broker_partition_count Total number of partitions Prometheus JMX Exporter   Leader Count Count meter_kafka_broker_leader_count Number of partitions for which this broker is the leader Prometheus JMX Exporter   ISR Shrinks Count/sec meter_kafka_broker_isr_shrinks_per_second Rate of ISR (In-Sync Replicas) shrinking Prometheus JMX Exporter   ISR Expands Count/sec meter_kafka_broker_isr_expands_per_second Rate of ISR (In-Sync Replicas) expanding Prometheus JMX Exporter   Max Lag Count meter_kafka_broker_max_lag Maximum lag between the leader and follower for a partition Prometheus JMX Exporter   Purgatory Size Count meter_kafka_broker_purgatory_size Size of purgatory for Produce and Fetch operations Prometheus JMX Exporter   Garbage Collector Count Count/sec meter_kafka_broker_garbage_collector_count Rate of garbage collection cycles Prometheus JMX Exporter   Requests Per Second Req/sec meter_kafka_broker_requests_per_second Rate of requests to the broker Prometheus JMX Exporter   Request Queue Time ms meter_kafka_broker_request_queue_time_ms Average time a request spends in the request queue Prometheus JMX Exporter   Remote Time ms meter_kafka_broker_remote_time_ms Average time taken for a remote operation Prometheus JMX Exporter   Response Queue Time ms meter_kafka_broker_response_queue_time_ms Average time a response spends in the response queue Prometheus JMX Exporter   Response Send Time ms meter_kafka_broker_response_send_time_ms Average time taken to send a response Prometheus JMX Exporter   Network Processor Avg Idle % meter_kafka_broker_network_processor_avg_idle_percent Percentage of idle time for the network processor Prometheus JMX Exporter   Topic Messages In Total Count meter_kafka_broker_topic_messages_in_total Total number of messages per topic Prometheus JMX Exporter   Topic Bytes Out Per Second Bytes/sec meter_kafka_broker_topic_bytesout_per_second Rate of outgoing bytes per topic Prometheus JMX Exporter   Topic Bytes In Per Second Bytes/sec meter_kafka_broker_topic_bytesin_per_second Rate of incoming bytes per topic Prometheus JMX Exporter   Topic Fetch Requests Per Second Req/sec meter_kafka_broker_topic_fetch_requests_per_second Rate of fetch requests per topic Prometheus JMX Exporter   Topic Produce Requests Per Second Req/sec meter_kafka_broker_topic_produce_requests_per_second Rate of produce requests per topic Prometheus JMX Exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/kafka/kafka-cluster.yaml, /config/otel-rules/kafka/kafka-node.yaml. The Kafka dashboard panel configurations are found in /config/ui-initialized-templates/kafka.\nReference For more details on monitoring Kafka and the metrics to focus on, see the following articles:\n Monitoring Kafka Streams Applications Kafka Monitoring  ","excerpt":"Kafka monitoring SkyWalking leverages Prometheus JMX Exporter to collect metrics data from the Kafka …","ref":"/docs/main/latest/en/setup/backend/backend-kafka-monitoring/","title":"Kafka monitoring"},{"body":"Kafka monitoring SkyWalking leverages Prometheus JMX Exporter to collect metrics data from the Kafka and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. Kafka entity as a Service in OAP and on the Layer: KAFKA.\nData flow  The prometheus_JMX_Exporter collect metrics data from Kafka. Note: Running the exporter as a Java agent. OpenTelemetry Collector fetches metrics from prometheus_JMX_Exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup prometheus_JMX_Exporter. This is an example for JMX Exporter configuration kafka-2_0_0.yml. Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  Kafka Monitoring Kafka monitoring provides multidimensional metrics monitoring of Kafka cluster as Layer: KAFKA Service in the OAP. In each cluster, the kafka brokers are represented as Instance.\nKafka Cluster Supported Metrics    Monitoring Panel Metric Name Description Data Source     Under-Replicated Partitions meter_kafka_under_replicated_partitions Number of under-replicated partitions in the broker. A higher number is a sign of potential issues. Prometheus JMX Exporter   Offline Partitions Count meter_kafka_offline_partitions_count Number of partitions that are offline. Non-zero values indicate a problem. Prometheus JMX Exporter   Partition Count meter_kafka_partition_count Total number of partitions on the broker. Prometheus JMX Exporter   Leader Count meter_kafka_leader_count Number of leader partitions on this broker. Prometheus JMX Exporter   Active Controller Count meter_kafka_active_controller_count The number of active controllers in the cluster. Typically should be 1. Prometheus JMX Exporter   Leader Election Rate meter_kafka_leader_election_rate The rate of leader elections per minute. High rate could be a sign of instability. Prometheus JMX Exporter   Unclean Leader Elections Per Second meter_kafka_unclean_leader_elections_per_second The rate of unclean leader elections per second. Non-zero values indicate a serious problem. Prometheus JMX Exporter   Max Lag meter_kafka_max_lag The maximum lag between the leader and followers in terms of messages still needed to be sent. Higher lag indicates delays. Prometheus JMX Exporter    Kafka Broker Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     CPU Usage % meter_kafka_broker_cpu_time_total CPU usage in percentage Prometheus JMX Exporter   Memory Usage % meter_kafka_broker_memory_usage_percentage JVM heap memory usage in percentage Prometheus JMX Exporter   Incoming Messages Msg/sec meter_kafka_broker_messages_per_second Rate of incoming messages Prometheus JMX Exporter   Bytes In Bytes/sec meter_kafka_broker_bytes_in_per_second Rate of incoming bytes Prometheus JMX Exporter   Bytes Out Bytes/sec meter_kafka_broker_bytes_out_per_second Rate of outgoing bytes Prometheus JMX Exporter   Replication Bytes In Bytes/sec meter_kafka_broker_replication_bytes_in_per_second Rate of incoming bytes for replication Prometheus JMX Exporter   Replication Bytes Out Bytes/sec meter_kafka_broker_replication_bytes_out_per_second Rate of outgoing bytes for replication Prometheus JMX Exporter   Under-Replicated Partitions Count meter_kafka_broker_under_replicated_partitions Number of under-replicated partitions Prometheus JMX Exporter   Under Min ISR Partition Count Count meter_kafka_broker_under_min_isr_partition_count Number of partitions below the minimum ISR (In-Sync Replicas) Prometheus JMX Exporter   Partition Count Count meter_kafka_broker_partition_count Total number of partitions Prometheus JMX Exporter   Leader Count Count meter_kafka_broker_leader_count Number of partitions for which this broker is the leader Prometheus JMX Exporter   ISR Shrinks Count/sec meter_kafka_broker_isr_shrinks_per_second Rate of ISR (In-Sync Replicas) shrinking Prometheus JMX Exporter   ISR Expands Count/sec meter_kafka_broker_isr_expands_per_second Rate of ISR (In-Sync Replicas) expanding Prometheus JMX Exporter   Max Lag Count meter_kafka_broker_max_lag Maximum lag between the leader and follower for a partition Prometheus JMX Exporter   Purgatory Size Count meter_kafka_broker_purgatory_size Size of purgatory for Produce and Fetch operations Prometheus JMX Exporter   Garbage Collector Count Count/sec meter_kafka_broker_garbage_collector_count Rate of garbage collection cycles Prometheus JMX Exporter   Requests Per Second Req/sec meter_kafka_broker_requests_per_second Rate of requests to the broker Prometheus JMX Exporter   Request Queue Time ms meter_kafka_broker_request_queue_time_ms Average time a request spends in the request queue Prometheus JMX Exporter   Remote Time ms meter_kafka_broker_remote_time_ms Average time taken for a remote operation Prometheus JMX Exporter   Response Queue Time ms meter_kafka_broker_response_queue_time_ms Average time a response spends in the response queue Prometheus JMX Exporter   Response Send Time ms meter_kafka_broker_response_send_time_ms Average time taken to send a response Prometheus JMX Exporter   Network Processor Avg Idle % meter_kafka_broker_network_processor_avg_idle_percent Percentage of idle time for the network processor Prometheus JMX Exporter   Topic Messages In Total Count meter_kafka_broker_topic_messages_in_total Total number of messages per topic Prometheus JMX Exporter   Topic Bytes Out Per Second Bytes/sec meter_kafka_broker_topic_bytesout_per_second Rate of outgoing bytes per topic Prometheus JMX Exporter   Topic Bytes In Per Second Bytes/sec meter_kafka_broker_topic_bytesin_per_second Rate of incoming bytes per topic Prometheus JMX Exporter   Topic Fetch Requests Per Second Req/sec meter_kafka_broker_topic_fetch_requests_per_second Rate of fetch requests per topic Prometheus JMX Exporter   Topic Produce Requests Per Second Req/sec meter_kafka_broker_topic_produce_requests_per_second Rate of produce requests per topic Prometheus JMX Exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/kafka/kafka-cluster.yaml, /config/otel-rules/kafka/kafka-node.yaml. The Kafka dashboard panel configurations are found in /config/ui-initialized-templates/kafka.\nReference For more details on monitoring Kafka and the metrics to focus on, see the following articles:\n Monitoring Kafka Streams Applications Kafka Monitoring  ","excerpt":"Kafka monitoring SkyWalking leverages Prometheus JMX Exporter to collect metrics data from the Kafka …","ref":"/docs/main/next/en/setup/backend/backend-kafka-monitoring/","title":"Kafka monitoring"},{"body":"Kafka monitoring SkyWalking leverages Prometheus JMX Exporter to collect metrics data from the Kafka and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. Kafka entity as a Service in OAP and on the Layer: KAFKA.\nData flow  The prometheus_JMX_Exporter collect metrics data from Kafka. Note: Running the exporter as a Java agent. OpenTelemetry Collector fetches metrics from prometheus_JMX_Exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup prometheus_JMX_Exporter. This is an example for JMX Exporter configuration kafka-2_0_0.yml. Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  Kafka Monitoring Kafka monitoring provides multidimensional metrics monitoring of Kafka cluster as Layer: KAFKA Service in the OAP. In each cluster, the kafka brokers are represented as Instance.\nKafka Cluster Supported Metrics    Monitoring Panel Metric Name Description Data Source     Under-Replicated Partitions meter_kafka_under_replicated_partitions Number of under-replicated partitions in the broker. A higher number is a sign of potential issues. Prometheus JMX Exporter   Offline Partitions Count meter_kafka_offline_partitions_count Number of partitions that are offline. Non-zero values indicate a problem. Prometheus JMX Exporter   Partition Count meter_kafka_partition_count Total number of partitions on the broker. Prometheus JMX Exporter   Leader Count meter_kafka_leader_count Number of leader partitions on this broker. Prometheus JMX Exporter   Active Controller Count meter_kafka_active_controller_count The number of active controllers in the cluster. Typically should be 1. Prometheus JMX Exporter   Leader Election Rate meter_kafka_leader_election_rate The rate of leader elections per minute. High rate could be a sign of instability. Prometheus JMX Exporter   Unclean Leader Elections Per Second meter_kafka_unclean_leader_elections_per_second The rate of unclean leader elections per second. Non-zero values indicate a serious problem. Prometheus JMX Exporter   Max Lag meter_kafka_max_lag The maximum lag between the leader and followers in terms of messages still needed to be sent. Higher lag indicates delays. Prometheus JMX Exporter    Kafka Broker Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     CPU Usage % meter_kafka_broker_cpu_time_total CPU usage in percentage Prometheus JMX Exporter   Memory Usage % meter_kafka_broker_memory_usage_percentage JVM heap memory usage in percentage Prometheus JMX Exporter   Incoming Messages Msg/sec meter_kafka_broker_messages_per_second Rate of incoming messages Prometheus JMX Exporter   Bytes In Bytes/sec meter_kafka_broker_bytes_in_per_second Rate of incoming bytes Prometheus JMX Exporter   Bytes Out Bytes/sec meter_kafka_broker_bytes_out_per_second Rate of outgoing bytes Prometheus JMX Exporter   Replication Bytes In Bytes/sec meter_kafka_broker_replication_bytes_in_per_second Rate of incoming bytes for replication Prometheus JMX Exporter   Replication Bytes Out Bytes/sec meter_kafka_broker_replication_bytes_out_per_second Rate of outgoing bytes for replication Prometheus JMX Exporter   Under-Replicated Partitions Count meter_kafka_broker_under_replicated_partitions Number of under-replicated partitions Prometheus JMX Exporter   Under Min ISR Partition Count Count meter_kafka_broker_under_min_isr_partition_count Number of partitions below the minimum ISR (In-Sync Replicas) Prometheus JMX Exporter   Partition Count Count meter_kafka_broker_partition_count Total number of partitions Prometheus JMX Exporter   Leader Count Count meter_kafka_broker_leader_count Number of partitions for which this broker is the leader Prometheus JMX Exporter   ISR Shrinks Count/sec meter_kafka_broker_isr_shrinks_per_second Rate of ISR (In-Sync Replicas) shrinking Prometheus JMX Exporter   ISR Expands Count/sec meter_kafka_broker_isr_expands_per_second Rate of ISR (In-Sync Replicas) expanding Prometheus JMX Exporter   Max Lag Count meter_kafka_broker_max_lag Maximum lag between the leader and follower for a partition Prometheus JMX Exporter   Purgatory Size Count meter_kafka_broker_purgatory_size Size of purgatory for Produce and Fetch operations Prometheus JMX Exporter   Garbage Collector Count Count/sec meter_kafka_broker_garbage_collector_count Rate of garbage collection cycles Prometheus JMX Exporter   Requests Per Second Req/sec meter_kafka_broker_requests_per_second Rate of requests to the broker Prometheus JMX Exporter   Request Queue Time ms meter_kafka_broker_request_queue_time_ms Average time a request spends in the request queue Prometheus JMX Exporter   Remote Time ms meter_kafka_broker_remote_time_ms Average time taken for a remote operation Prometheus JMX Exporter   Response Queue Time ms meter_kafka_broker_response_queue_time_ms Average time a response spends in the response queue Prometheus JMX Exporter   Response Send Time ms meter_kafka_broker_response_send_time_ms Average time taken to send a response Prometheus JMX Exporter   Network Processor Avg Idle % meter_kafka_broker_network_processor_avg_idle_percent Percentage of idle time for the network processor Prometheus JMX Exporter   Topic Messages In Total Count meter_kafka_broker_topic_messages_in_total Total number of messages per topic Prometheus JMX Exporter   Topic Bytes Out Per Second Bytes/sec meter_kafka_broker_topic_bytesout_per_second Rate of outgoing bytes per topic Prometheus JMX Exporter   Topic Bytes In Per Second Bytes/sec meter_kafka_broker_topic_bytesin_per_second Rate of incoming bytes per topic Prometheus JMX Exporter   Topic Fetch Requests Per Second Req/sec meter_kafka_broker_topic_fetch_requests_per_second Rate of fetch requests per topic Prometheus JMX Exporter   Topic Produce Requests Per Second Req/sec meter_kafka_broker_topic_produce_requests_per_second Rate of produce requests per topic Prometheus JMX Exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/kafka/kafka-cluster.yaml, /config/otel-rules/kafka/kafka-node.yaml. The Kafka dashboard panel configurations are found in /config/ui-initialized-templates/kafka.\nReference For more details on monitoring Kafka and the metrics to focus on, see the following articles:\n Monitoring Kafka Streams Applications Kafka Monitoring  ","excerpt":"Kafka monitoring SkyWalking leverages Prometheus JMX Exporter to collect metrics data from the Kafka …","ref":"/docs/main/v9.7.0/en/setup/backend/backend-kafka-monitoring/","title":"Kafka monitoring"},{"body":"Kafka Poll And Invoke  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-kafka\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  usage 1.  public class ConsumerThread2 extends Thread { @Override public void run() { Properties consumerProperties = new Properties(); //...consumerProperties.put()  KafkaConsumer\u0026lt;String, String\u0026gt; consumer = new KafkaConsumer\u0026lt;\u0026gt;(consumerProperties); consumer.subscribe(topicPattern, new NoOpConsumerRebalanceListener()); while (true) { if (pollAndInvoke(consumer)) break; } consumer.close(); } @KafkaPollAndInvoke private boolean pollAndInvoke(KafkaConsumer\u0026lt;String, String\u0026gt; consumer) { try { Thread.sleep(1000); } catch (InterruptedException e) { } ConsumerRecords\u0026lt;String, String\u0026gt; records = consumer.poll(100); if (!records.isEmpty()) { OkHttpClient client = new OkHttpClient.Builder().build(); Request request = new Request.Builder().url(\u0026#34;http://localhost:8080/kafka-scenario/case/kafka-thread2-ping\u0026#34;).build(); Response response = null; try { response = client.newCall(request).execute(); } catch (IOException e) { } response.body().close(); return true; } return false; } } Sample codes only\n","excerpt":"Kafka Poll And Invoke  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; …","ref":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/application-toolkit-kafka/","title":"Kafka Poll And Invoke"},{"body":"Kafka Poll And Invoke  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-kafka\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  usage 1.  public class ConsumerThread2 extends Thread { @Override public void run() { Properties consumerProperties = new Properties(); //...consumerProperties.put()  KafkaConsumer\u0026lt;String, String\u0026gt; consumer = new KafkaConsumer\u0026lt;\u0026gt;(consumerProperties); consumer.subscribe(topicPattern, new NoOpConsumerRebalanceListener()); while (true) { if (pollAndInvoke(consumer)) break; } consumer.close(); } @KafkaPollAndInvoke private boolean pollAndInvoke(KafkaConsumer\u0026lt;String, String\u0026gt; consumer) { try { Thread.sleep(1000); } catch (InterruptedException e) { } ConsumerRecords\u0026lt;String, String\u0026gt; records = consumer.poll(100); if (!records.isEmpty()) { OkHttpClient client = new OkHttpClient.Builder().build(); Request request = new Request.Builder().url(\u0026#34;http://localhost:8080/kafka-scenario/case/kafka-thread2-ping\u0026#34;).build(); Response response = null; try { response = client.newCall(request).execute(); } catch (IOException e) { } response.body().close(); return true; } return false; } } Sample codes only\n","excerpt":"Kafka Poll And Invoke  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; …","ref":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-kafka/","title":"Kafka Poll And Invoke"},{"body":"Kafka Poll And Invoke  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-kafka\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  usage 1.  public class ConsumerThread2 extends Thread { @Override public void run() { Properties consumerProperties = new Properties(); //...consumerProperties.put()  KafkaConsumer\u0026lt;String, String\u0026gt; consumer = new KafkaConsumer\u0026lt;\u0026gt;(consumerProperties); consumer.subscribe(topicPattern, new NoOpConsumerRebalanceListener()); while (true) { if (pollAndInvoke(consumer)) break; } consumer.close(); } @KafkaPollAndInvoke private boolean pollAndInvoke(KafkaConsumer\u0026lt;String, String\u0026gt; consumer) { try { Thread.sleep(1000); } catch (InterruptedException e) { } ConsumerRecords\u0026lt;String, String\u0026gt; records = consumer.poll(100); if (!records.isEmpty()) { OkHttpClient client = new OkHttpClient.Builder().build(); Request request = new Request.Builder().url(\u0026#34;http://localhost:8080/kafka-scenario/case/kafka-thread2-ping\u0026#34;).build(); Response response = null; try { response = client.newCall(request).execute(); } catch (IOException e) { } response.body().close(); return true; } return false; } } Sample codes only\n","excerpt":"Kafka Poll And Invoke  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; …","ref":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/application-toolkit-kafka/","title":"Kafka Poll And Invoke"},{"body":"Kafka Poll And Invoke  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-kafka\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  usage 1.  public class ConsumerThread2 extends Thread { @Override public void run() { Properties consumerProperties = new Properties(); //...consumerProperties.put()  KafkaConsumer\u0026lt;String, String\u0026gt; consumer = new KafkaConsumer\u0026lt;\u0026gt;(consumerProperties); consumer.subscribe(topicPattern, new NoOpConsumerRebalanceListener()); while (true) { if (pollAndInvoke(consumer)) break; } consumer.close(); } @KafkaPollAndInvoke private boolean pollAndInvoke(KafkaConsumer\u0026lt;String, String\u0026gt; consumer) { try { Thread.sleep(1000); } catch (InterruptedException e) { } ConsumerRecords\u0026lt;String, String\u0026gt; records = consumer.poll(100); if (!records.isEmpty()) { OkHttpClient client = new OkHttpClient.Builder().build(); Request request = new Request.Builder().url(\u0026#34;http://localhost:8080/kafka-scenario/case/kafka-thread2-ping\u0026#34;).build(); Response response = null; try { response = client.newCall(request).execute(); } catch (IOException e) { } response.body().close(); return true; } return false; } } Sample codes only\n","excerpt":"Kafka Poll And Invoke  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; …","ref":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/application-toolkit-kafka/","title":"Kafka Poll And Invoke"},{"body":"Kafka Poll And Invoke  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-kafka\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  usage 1.  public class ConsumerThread2 extends Thread { @Override public void run() { Properties consumerProperties = new Properties(); //...consumerProperties.put()  KafkaConsumer\u0026lt;String, String\u0026gt; consumer = new KafkaConsumer\u0026lt;\u0026gt;(consumerProperties); consumer.subscribe(topicPattern, new NoOpConsumerRebalanceListener()); while (true) { if (pollAndInvoke(consumer)) break; } consumer.close(); } @KafkaPollAndInvoke private boolean pollAndInvoke(KafkaConsumer\u0026lt;String, String\u0026gt; consumer) { try { Thread.sleep(1000); } catch (InterruptedException e) { } ConsumerRecords\u0026lt;String, String\u0026gt; records = consumer.poll(100); if (!records.isEmpty()) { OkHttpClient client = new OkHttpClient.Builder().build(); Request request = new Request.Builder().url(\u0026#34;http://localhost:8080/kafka-scenario/case/kafka-thread2-ping\u0026#34;).build(); Response response = null; try { response = client.newCall(request).execute(); } catch (IOException e) { } response.body().close(); return true; } return false; } } Sample codes only\n","excerpt":"Kafka Poll And Invoke  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; …","ref":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/application-toolkit-kafka/","title":"Kafka Poll And Invoke"},{"body":"Kafka reporter By default, the configuration option skywalking_agent.reporter_type is grpc, means that the skywalking agent will report the traces, metrics, logs etc. to SkyWalking OAP Server by gPRC protocol.\nAt the same time, SkyWalking also supports kafka-fetcher, so you can report traces, metrics, logs, etc. by kafka.\nBut the skywalking agent does not compile the kafka-reporter feature by default, you need to enable the it.\nSteps   Compile the skywalking agent with feature kafka-reporter.\nFor pecl:\npecl install skywalking_agent Enable the kafka reporter interactively:\n68 source files, building running: phpize Configuring for: PHP Api Version: 20220829 Zend Module Api No: 20220829 Zend Extension Api No: 420220829 enable cargo debug? [no] : enable kafka reporter? [no] : yes Or, build from sources:\nphpize ./configure --enable-kafka-reporter make make install   Config php.ini.\nSwitch to use kafka reporter.\n[skywalking_agent] extension = skywalking_agent.so skywalking_agent.reporter_type = kafka skywalking_agent.kafka_bootstrap_servers = 127.0.0.1:9092,127.0.0.2:9092,127.0.0.3:9092 If you want to custom the kafka reporter properties, you can specify it by JSON format:\nskywalking_agent.kafka_producer_config = {\u0026#34;delivery.timeout.ms\u0026#34;: \u0026#34;12000\u0026#34;}   ","excerpt":"Kafka reporter By default, the configuration option skywalking_agent.reporter_type is grpc, means …","ref":"/docs/skywalking-php/latest/en/reporter/kafka-reporter/","title":"Kafka reporter"},{"body":"Kafka reporter By default, the configuration option skywalking_agent.reporter_type is grpc, means that the skywalking agent will report the traces, metrics, logs etc. to SkyWalking OAP Server by gPRC protocol.\nAt the same time, SkyWalking also supports kafka-fetcher, so you can report traces, metrics, logs, etc. by kafka.\nBut the skywalking agent does not compile the kafka-reporter feature by default, you need to enable the it.\nSteps   Compile the skywalking agent with feature kafka-reporter.\nFor pecl:\npecl install skywalking_agent Enable the kafka reporter interactively:\n68 source files, building running: phpize Configuring for: PHP Api Version: 20220829 Zend Module Api No: 20220829 Zend Extension Api No: 420220829 enable cargo debug? [no] : enable kafka reporter? [no] : yes Or, build from sources:\nphpize ./configure --enable-kafka-reporter make make install   Config php.ini.\nSwitch to use kafka reporter.\n[skywalking_agent] extension = skywalking_agent.so skywalking_agent.reporter_type = kafka skywalking_agent.kafka_bootstrap_servers = 127.0.0.1:9092,127.0.0.2:9092,127.0.0.3:9092 If you want to custom the kafka reporter properties, you can specify it by JSON format:\nskywalking_agent.kafka_producer_config = {\u0026#34;delivery.timeout.ms\u0026#34;: \u0026#34;12000\u0026#34;}   ","excerpt":"Kafka reporter By default, the configuration option skywalking_agent.reporter_type is grpc, means …","ref":"/docs/skywalking-php/next/en/reporter/kafka-reporter/","title":"Kafka reporter"},{"body":"Kafka reporter By default, the configuration option skywalking_agent.reporter_type is grpc, means that the skywalking agent will report the traces, metrics, logs etc. to SkyWalking OAP Server by gPRC protocol.\nAt the same time, SkyWalking also supports kafka-fetcher, so you can report traces, metrics, logs, etc. by kafka.\nBut the skywalking agent does not compile the kafka-reporter feature by default, you need to enable the it.\nSteps   Compile the skywalking agent with feature kafka-reporter.\nFor pecl:\npecl install skywalking_agent Enable the kafka reporter interactively:\n68 source files, building running: phpize Configuring for: PHP Api Version: 20220829 Zend Module Api No: 20220829 Zend Extension Api No: 420220829 enable cargo debug? [no] : enable kafka reporter? [no] : yes Or, build from sources:\nphpize ./configure --enable-kafka-reporter make make install   Config php.ini.\nSwitch to use kafka reporter.\n[skywalking_agent] extension = skywalking_agent.so skywalking_agent.reporter_type = kafka skywalking_agent.kafka_bootstrap_servers = 127.0.0.1:9092,127.0.0.2:9092,127.0.0.3:9092 If you want to custom the kafka reporter properties, you can specify it by JSON format:\nskywalking_agent.kafka_producer_config = {\u0026#34;delivery.timeout.ms\u0026#34;: \u0026#34;12000\u0026#34;}   ","excerpt":"Kafka reporter By default, the configuration option skywalking_agent.reporter_type is grpc, means …","ref":"/docs/skywalking-php/v0.7.0/en/reporter/kafka-reporter/","title":"Kafka reporter"},{"body":"Key Principle Introduce the key technical processes used in the SkyWalking Go Agent, to help the developers and end users understand how the agent works easier.\nMethod Interceptor Method interception is particularly important in SkyWalking Go, as it enables the creation of plugins. In SkyWalking Go, method interception mainly involves the following key points:\n Finding Method: Using AST to find method information in the target code to be enhanced. Modifying Methods: Enhancing the specified methods and embedding interceptor code. Saving and Compiling: Updating the modified files in the compilation arguments.  Finding Method When looking for methods, the SkyWalking Go Agent requires to search according to the provided compilation arguments, which mainly include the following two parts:\n Package information: Based on the package name provided by the arguments, the Agent can find the specific plugin. Go files: When a matching plugin is found, the Agent reads the .go files and uses AST to parse the method information from these source files. When the method information matches the method information required by the plugin for the interception, the agent would consider the method found.  Modifying Methods After finding the method, the SkyWalking Go Agent needs to modify the method implication and embed the interceptor code.\nChange Method Body When intercepting a method, the first thing to do is to modify the method and embed the template code. This code segment includes two method executions:\n Before method execution: Pass in the current method\u0026rsquo;s arguments, instances, and other information. After method execution: Using the defer method, intercept the result parameters after the code execution is completed.  Based on these two methods, the agent can intercept before and after method execution.\nIn order not to affect the line of code execution, this code segment will only be executed in the same line as the first statement in the method. This ensures that when an exception occurs in the framework code execution, the exact location can still be found without being affected by the enhanced code.\nWrite Delegator File After the agent enhances the method body, it needs to implement the above two methods and write them into a single file, called the delegator file. These two methods would do the following:\n Before method execution: Build by the template. Build the context for before and after interception, and pass the parameter information during execution to the interceptor in each plugin. After method execution: Build by the template. Pass the method return value to the interceptor and execute the method.  Copy Files After completing the delegator file, the agent would perform the following copy operations:\n Plugin Code: Copy the Go files containing the interceptors in the plugin to the same level directory as the current framework. Plugin Development API Code: Copy the operation APIs required by the interceptors in the plugin to the same level directory as the current framework, such as tracing.  After copying the files, they cannot be immediately added to the compilation parameters, because they may have the same name as the existing framework code. Therefore, we need to perform some rewriting operations, which include the following parts:\n Types: Rename created structures, interfaces, methods, and other types by adding a unified prefix. Static Methods: Add a prefix to non-instance methods. Static methods do not need to be rewritten since they have already been processed in the types. Variables: Add a prefix to global variables. It\u0026rsquo;s not necessary to add a prefix to variables inside methods because they can ensure no conflicts would arise and are helpful for debugging.  In the Tracing API, we can see several methods, such as:\nvar ( errParameter = operator.NewError(\u0026#34;parameter are nil\u0026#34;) ) func CreateLocalSpan(operationName string, opts ...SpanOption) (s Span, err error) type SpanOption interface { Apply(interface{}) } After performed rewriting operations, they would become:\nvar ( skywalkingOperatorVarTracingerrParameter = skywalkingOperatorStaticMethodOperatorNewError(\u0026#34;parameter are nil\u0026#34;) ) func skywalkingOperatorStaticMethodTracingCreateLocalSpan(operationName string, opts ...skywalkingOperatorTypeTracingSpanOption) (s skywalkingOperatorTypeTracingSpan, err error) type skywalkingOperatorTypeTracingSpanOption interface { Apply(interface{}) } Saving and Compiling After the above steps are completed, the agent needs to save the modified files and add them to the compilation parameters.\nAt this point, when the framework executes the enhanced method, it can have the following capabilities:\n Execute Plugin Code: Custom code can be embedded before and after the method execution, and real-time parameter information can be obtained. Operate Agent: By calling the Agent API, interaction with the Agent Core can be achieved, enabling functions such as distributed tracing.  Propagation Context SkyWalking uses a new and internal mechanism to propagate context(e.g. tracing context) instead of relying on go native context.Context. This reduces the requirement for the target codes.\nContext Propagation between Methods In the agent, it would enhance the g structure in the runtime package. The g structure in Golang represents the internal data of the current goroutine. By enhancing this structure and using the runtime.getg() method, we can obtain the enhanced data in the current structure in real-time.\nEnhancement includes the following steps:\n Add Attributes to g: Add a new field to the g struct, and value as interface{}. Export Methods: Export methods for real-time setting and getting of custom field values in the current goroutine through go:linkname. Import methods: In the Agent Core, import the setting and getting methods for custom fields.  Through these, the agent has a shared context in any place within the same goroutine, similar to Java\u0026rsquo;s Thread Local.\nContext Propagation between Goroutines Besides using g object as the in-goroutine context propagation, SkyWalking builds a mechanism to propagate context between Goroutines.\nWhen a new goroutine is started on an existing goroutine, the runtime.newproc1 method is called to create a new goroutine based on the existing one. The agent would do context-copy from the previous goroutine to the newly created goroutine. The new context in the goroutine only shares limited information to help continues tracing.\nThe specific operation process is as follows:\n Write the copy method: Create a method for copying data from the previous goroutine. Insert code into newproc1: Insert the defer code, intercept the g objects before and after the execution, and call the copy method to assign values to the custom fields' data.  Agent with Dependency Since SkyWalking Go Agent is based on compile-time enhancement, it cannot introduce third-party modules. For example, when SkyWalking Agent communicates with OAP, it needs to exchange data through the gRPC protocol. If the user does not introduce the gRPC module, it cannot be completed.\nDue to resolve this problem, users need to introduce relevant modules to complete the basic dependency functions. This is why import _ \u0026quot;github.com/apache/skywalking-go\u0026quot; is required. The main key modules that users currently need to introduce include:\n uuid: Used to generate UUIDs, mainly for TraceID generation. errors: To encapsulate error content. gRPC: The basic library used for communication between SkyWalking Go Agent and the Server. skywalking-goapi: The data protocol for communication between Agent and Server in SkyWalking.  Agent Core Copy To simplify the complexity of using Agent, the SkyWalking Go introduced by users only contains the user usage API and code import. The Agent Core code would be dynamically added during hybrid compilation, so when the Agent releases new features, users only need to upgrade the Agent enhancement program without modifying the references in the program.\nCode Import You can see a lot of imports.go files anywhere in the SkyWalking Go, such as imports.go in the root directory, but there is no actual code. This is because, during hybrid compilation, if the code to be compiled references other libraries, such as os, fmt, etc., they need to be referenced through the importcfg file during compilation.\nThe content of the importcfg file is shown below, which specifies the package dependency information required for all Go files to be compiled in the current package path.\npackagefile errors=/var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build2774248373/b006/_pkg_.a packagefile internal/itoa=/var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build2774248373/b027/_pkg_.a packagefile internal/oserror=/var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build2774248373/b035/_pkg_.a So when the file is copied and added to the compilation process, the relevant dependency libraries need to be declared in importcfg. Therefore, by predefining import in the project, the compiler can be forced to introduce the relevant libraries during compilation, thus completing the dynamic enhancement operation.\nPlugin with Agent Core As mentioned in the previous section, it is not possible to dynamically add dependencies between modules. Agent can only modify the importcfg file to reference dependencies if we are sure that the previous dependencies have already been loaded, but this is often impractical. For example, Agent cannot introduce dependencies from the plugin code into the Agent Core, because the plugin is unaware of the Agent\u0026rsquo;s existence. This raises a question: how can agent enable communication between plugins and Agent Core?\nCurrently, agent employ the following method: a global object is introduced in the runtime package, provided by Agent Core. When a plugin needs to interact with Agent Core, it simply searches for this global object from runtime package. The specific steps are as follows:\n Global object definition: Add a global variable when the runtime package is loaded and provide corresponding set and get methods. Set the variable when the Agent loads: When the Agent Core is copied and enhanced, import the method for setting the global variable and initialize the object in the global variable. Plugins: When the plugin is built, import the methods for reading the global variables and APIs. At this point, we can access the object set in Agent Core and use the defined interface for the plugin to access methods in Agent Core.  Limitation Since the communication between the plugin API and Agent Core is through an interface, and the plugin API is copied in each plugin, they can only transfer basic data types or any(interface{}) type. The reason is that when additional types are transferred, agent would be copied multiple times, so the types transferred in the plugin are not consistent with the types in Agent Core, as the types also need to be defined multiple times.\nTherefore, when communicating, they only pass structured data through any type, and when the Agent Core or plugin obtains the data, a type cast is simply required.\nDebugging Based on the introductions in the previous sections, both Agent Core and plugin code are dynamically copied/modified into the target package. So, how can we debug the program during development to identify issues?\nOur current approach consists of the following steps:\n Inform the source code location during flag: Enhance the debug parameters during compilation and inform the system path, for example: -toolexec \u0026quot;/path/to/agent -debug /path/to/code\u0026quot; Get the original file path: Find the absolute location of the source code of the file to be copied based on the rules. Introduce the //line directive: Add the //line directive to the copied target file to inform the compiler of the location of the original file after copying.  At this point, when the program is executed, developer can find the original file to be copied in the source code.\n","excerpt":"Key Principle Introduce the key technical processes used in the SkyWalking Go Agent, to help the …","ref":"/docs/skywalking-go/latest/en/concepts-and-designs/key-principles/","title":"Key Principle"},{"body":"Key Principle Introduce the key technical processes used in the SkyWalking Go Agent, to help the developers and end users understand how the agent works easier.\nMethod Interceptor Method interception is particularly important in SkyWalking Go, as it enables the creation of plugins. In SkyWalking Go, method interception mainly involves the following key points:\n Finding Method: Using AST to find method information in the target code to be enhanced. Modifying Methods: Enhancing the specified methods and embedding interceptor code. Saving and Compiling: Updating the modified files in the compilation arguments.  Finding Method When looking for methods, the SkyWalking Go Agent requires to search according to the provided compilation arguments, which mainly include the following two parts:\n Package information: Based on the package name provided by the arguments, the Agent can find the specific plugin. Go files: When a matching plugin is found, the Agent reads the .go files and uses AST to parse the method information from these source files. When the method information matches the method information required by the plugin for the interception, the agent would consider the method found.  Modifying Methods After finding the method, the SkyWalking Go Agent needs to modify the method implication and embed the interceptor code.\nChange Method Body When intercepting a method, the first thing to do is to modify the method and embed the template code. This code segment includes two method executions:\n Before method execution: Pass in the current method\u0026rsquo;s arguments, instances, and other information. After method execution: Using the defer method, intercept the result parameters after the code execution is completed.  Based on these two methods, the agent can intercept before and after method execution.\nIn order not to affect the line of code execution, this code segment will only be executed in the same line as the first statement in the method. This ensures that when an exception occurs in the framework code execution, the exact location can still be found without being affected by the enhanced code.\nWrite Delegator File After the agent enhances the method body, it needs to implement the above two methods and write them into a single file, called the delegator file. These two methods would do the following:\n Before method execution: Build by the template. Build the context for before and after interception, and pass the parameter information during execution to the interceptor in each plugin. After method execution: Build by the template. Pass the method return value to the interceptor and execute the method.  Copy Files After completing the delegator file, the agent would perform the following copy operations:\n Plugin Code: Copy the Go files containing the interceptors in the plugin to the same level directory as the current framework. Plugin Development API Code: Copy the operation APIs required by the interceptors in the plugin to the same level directory as the current framework, such as tracing.  After copying the files, they cannot be immediately added to the compilation parameters, because they may have the same name as the existing framework code. Therefore, we need to perform some rewriting operations, which include the following parts:\n Types: Rename created structures, interfaces, methods, and other types by adding a unified prefix. Static Methods: Add a prefix to non-instance methods. Static methods do not need to be rewritten since they have already been processed in the types. Variables: Add a prefix to global variables. It\u0026rsquo;s not necessary to add a prefix to variables inside methods because they can ensure no conflicts would arise and are helpful for debugging.  In the Tracing API, we can see several methods, such as:\nvar ( errParameter = operator.NewError(\u0026#34;parameter are nil\u0026#34;) ) func CreateLocalSpan(operationName string, opts ...SpanOption) (s Span, err error) type SpanOption interface { Apply(interface{}) } After performed rewriting operations, they would become:\nvar ( skywalkingOperatorVarTracingerrParameter = skywalkingOperatorStaticMethodOperatorNewError(\u0026#34;parameter are nil\u0026#34;) ) func skywalkingOperatorStaticMethodTracingCreateLocalSpan(operationName string, opts ...skywalkingOperatorTypeTracingSpanOption) (s skywalkingOperatorTypeTracingSpan, err error) type skywalkingOperatorTypeTracingSpanOption interface { Apply(interface{}) } Saving and Compiling After the above steps are completed, the agent needs to save the modified files and add them to the compilation parameters.\nAt this point, when the framework executes the enhanced method, it can have the following capabilities:\n Execute Plugin Code: Custom code can be embedded before and after the method execution, and real-time parameter information can be obtained. Operate Agent: By calling the Agent API, interaction with the Agent Core can be achieved, enabling functions such as distributed tracing.  Propagation Context SkyWalking uses a new and internal mechanism to propagate context(e.g. tracing context) instead of relying on go native context.Context. This reduces the requirement for the target codes.\nContext Propagation between Methods In the agent, it would enhance the g structure in the runtime package. The g structure in Golang represents the internal data of the current goroutine. By enhancing this structure and using the runtime.getg() method, we can obtain the enhanced data in the current structure in real-time.\nEnhancement includes the following steps:\n Add Attributes to g: Add a new field to the g struct, and value as interface{}. Export Methods: Export methods for real-time setting and getting of custom field values in the current goroutine through go:linkname. Import methods: In the Agent Core, import the setting and getting methods for custom fields.  Through these, the agent has a shared context in any place within the same goroutine, similar to Java\u0026rsquo;s Thread Local.\nContext Propagation between Goroutines Besides using g object as the in-goroutine context propagation, SkyWalking builds a mechanism to propagate context between Goroutines.\nWhen a new goroutine is started on an existing goroutine, the runtime.newproc1 method is called to create a new goroutine based on the existing one. The agent would do context-copy from the previous goroutine to the newly created goroutine. The new context in the goroutine only shares limited information to help continues tracing.\nThe specific operation process is as follows:\n Write the copy method: Create a method for copying data from the previous goroutine. Insert code into newproc1: Insert the defer code, intercept the g objects before and after the execution, and call the copy method to assign values to the custom fields' data.  Agent with Dependency Since SkyWalking Go Agent is based on compile-time enhancement, it cannot introduce third-party modules. For example, when SkyWalking Agent communicates with OAP, it needs to exchange data through the gRPC protocol. If the user does not introduce the gRPC module, it cannot be completed.\nDue to resolve this problem, users need to introduce relevant modules to complete the basic dependency functions. This is why import _ \u0026quot;github.com/apache/skywalking-go\u0026quot; is required. The main key modules that users currently need to introduce include:\n uuid: Used to generate UUIDs, mainly for TraceID generation. errors: To encapsulate error content. gRPC: The basic library used for communication between SkyWalking Go Agent and the Server. skywalking-goapi: The data protocol for communication between Agent and Server in SkyWalking.  Agent Core Copy To simplify the complexity of using Agent, the SkyWalking Go introduced by users only contains the user usage API and code import. The Agent Core code would be dynamically added during hybrid compilation, so when the Agent releases new features, users only need to upgrade the Agent enhancement program without modifying the references in the program.\nCode Import You can see a lot of imports.go files anywhere in the SkyWalking Go, such as imports.go in the root directory, but there is no actual code. This is because, during hybrid compilation, if the code to be compiled references other libraries, such as os, fmt, etc., they need to be referenced through the importcfg file during compilation.\nThe content of the importcfg file is shown below, which specifies the package dependency information required for all Go files to be compiled in the current package path.\npackagefile errors=/var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build2774248373/b006/_pkg_.a packagefile internal/itoa=/var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build2774248373/b027/_pkg_.a packagefile internal/oserror=/var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build2774248373/b035/_pkg_.a So when the file is copied and added to the compilation process, the relevant dependency libraries need to be declared in importcfg. Therefore, by predefining import in the project, the compiler can be forced to introduce the relevant libraries during compilation, thus completing the dynamic enhancement operation.\nPlugin with Agent Core As mentioned in the previous section, it is not possible to dynamically add dependencies between modules. Agent can only modify the importcfg file to reference dependencies if we are sure that the previous dependencies have already been loaded, but this is often impractical. For example, Agent cannot introduce dependencies from the plugin code into the Agent Core, because the plugin is unaware of the Agent\u0026rsquo;s existence. This raises a question: how can agent enable communication between plugins and Agent Core?\nCurrently, agent employ the following method: a global object is introduced in the runtime package, provided by Agent Core. When a plugin needs to interact with Agent Core, it simply searches for this global object from runtime package. The specific steps are as follows:\n Global object definition: Add a global variable when the runtime package is loaded and provide corresponding set and get methods. Set the variable when the Agent loads: When the Agent Core is copied and enhanced, import the method for setting the global variable and initialize the object in the global variable. Plugins: When the plugin is built, import the methods for reading the global variables and APIs. At this point, we can access the object set in Agent Core and use the defined interface for the plugin to access methods in Agent Core.  Limitation Since the communication between the plugin API and Agent Core is through an interface, and the plugin API is copied in each plugin, they can only transfer basic data types or any(interface{}) type. The reason is that when additional types are transferred, agent would be copied multiple times, so the types transferred in the plugin are not consistent with the types in Agent Core, as the types also need to be defined multiple times.\nTherefore, when communicating, they only pass structured data through any type, and when the Agent Core or plugin obtains the data, a type cast is simply required.\nDebugging Based on the introductions in the previous sections, both Agent Core and plugin code are dynamically copied/modified into the target package. So, how can we debug the program during development to identify issues?\nOur current approach consists of the following steps:\n Inform the source code location during flag: Enhance the debug parameters during compilation and inform the system path, for example: -toolexec \u0026quot;/path/to/agent -debug /path/to/code\u0026quot; Get the original file path: Find the absolute location of the source code of the file to be copied based on the rules. Introduce the //line directive: Add the //line directive to the copied target file to inform the compiler of the location of the original file after copying.  At this point, when the program is executed, developer can find the original file to be copied in the source code.\n","excerpt":"Key Principle Introduce the key technical processes used in the SkyWalking Go Agent, to help the …","ref":"/docs/skywalking-go/next/en/concepts-and-designs/key-principles/","title":"Key Principle"},{"body":"Key Principle Introduce the key technical processes used in the SkyWalking Go Agent, to help the developers and end users understand how the agent works easier.\nMethod Interceptor Method interception is particularly important in SkyWalking Go, as it enables the creation of plugins. In SkyWalking Go, method interception mainly involves the following key points:\n Finding Method: Using AST to find method information in the target code to be enhanced. Modifying Methods: Enhancing the specified methods and embedding interceptor code. Saving and Compiling: Updating the modified files in the compilation arguments.  Finding Method When looking for methods, the SkyWalking Go Agent requires to search according to the provided compilation arguments, which mainly include the following two parts:\n Package information: Based on the package name provided by the arguments, the Agent can find the specific plugin. Go files: When a matching plugin is found, the Agent reads the .go files and uses AST to parse the method information from these source files. When the method information matches the method information required by the plugin for the interception, the agent would consider the method found.  Modifying Methods After finding the method, the SkyWalking Go Agent needs to modify the method implication and embed the interceptor code.\nChange Method Body When intercepting a method, the first thing to do is to modify the method and embed the template code. This code segment includes two method executions:\n Before method execution: Pass in the current method\u0026rsquo;s arguments, instances, and other information. After method execution: Using the defer method, intercept the result parameters after the code execution is completed.  Based on these two methods, the agent can intercept before and after method execution.\nIn order not to affect the line of code execution, this code segment will only be executed in the same line as the first statement in the method. This ensures that when an exception occurs in the framework code execution, the exact location can still be found without being affected by the enhanced code.\nWrite Delegator File After the agent enhances the method body, it needs to implement the above two methods and write them into a single file, called the delegator file. These two methods would do the following:\n Before method execution: Build by the template. Build the context for before and after interception, and pass the parameter information during execution to the interceptor in each plugin. After method execution: Build by the template. Pass the method return value to the interceptor and execute the method.  Copy Files After completing the delegator file, the agent would perform the following copy operations:\n Plugin Code: Copy the Go files containing the interceptors in the plugin to the same level directory as the current framework. Plugin Development API Code: Copy the operation APIs required by the interceptors in the plugin to the same level directory as the current framework, such as tracing.  After copying the files, they cannot be immediately added to the compilation parameters, because they may have the same name as the existing framework code. Therefore, we need to perform some rewriting operations, which include the following parts:\n Types: Rename created structures, interfaces, methods, and other types by adding a unified prefix. Static Methods: Add a prefix to non-instance methods. Static methods do not need to be rewritten since they have already been processed in the types. Variables: Add a prefix to global variables. It\u0026rsquo;s not necessary to add a prefix to variables inside methods because they can ensure no conflicts would arise and are helpful for debugging.  In the Tracing API, we can see several methods, such as:\nvar ( errParameter = operator.NewError(\u0026#34;parameter are nil\u0026#34;) ) func CreateLocalSpan(operationName string, opts ...SpanOption) (s Span, err error) type SpanOption interface { Apply(interface{}) } After performed rewriting operations, they would become:\nvar ( skywalkingOperatorVarTracingerrParameter = skywalkingOperatorStaticMethodOperatorNewError(\u0026#34;parameter are nil\u0026#34;) ) func skywalkingOperatorStaticMethodTracingCreateLocalSpan(operationName string, opts ...skywalkingOperatorTypeTracingSpanOption) (s skywalkingOperatorTypeTracingSpan, err error) type skywalkingOperatorTypeTracingSpanOption interface { Apply(interface{}) } Saving and Compiling After the above steps are completed, the agent needs to save the modified files and add them to the compilation parameters.\nAt this point, when the framework executes the enhanced method, it can have the following capabilities:\n Execute Plugin Code: Custom code can be embedded before and after the method execution, and real-time parameter information can be obtained. Operate Agent: By calling the Agent API, interaction with the Agent Core can be achieved, enabling functions such as distributed tracing.  Propagation Context SkyWalking uses a new and internal mechanism to propagate context(e.g. tracing context) instead of relying on go native context.Context. This reduces the requirement for the target codes.\nContext Propagation between Methods In the agent, it would enhance the g structure in the runtime package. The g structure in Golang represents the internal data of the current goroutine. By enhancing this structure and using the runtime.getg() method, we can obtain the enhanced data in the current structure in real-time.\nEnhancement includes the following steps:\n Add Attributes to g: Add a new field to the g struct, and value as interface{}. Export Methods: Export methods for real-time setting and getting of custom field values in the current goroutine through go:linkname. Import methods: In the Agent Core, import the setting and getting methods for custom fields.  Through these, the agent has a shared context in any place within the same goroutine, similar to Java\u0026rsquo;s Thread Local.\nContext Propagation between Goroutines Besides using g object as the in-goroutine context propagation, SkyWalking builds a mechanism to propagate context between Goroutines.\nWhen a new goroutine is started on an existing goroutine, the runtime.newproc1 method is called to create a new goroutine based on the existing one. The agent would do context-copy from the previous goroutine to the newly created goroutine. The new context in the goroutine only shares limited information to help continues tracing.\nThe specific operation process is as follows:\n Write the copy method: Create a method for copying data from the previous goroutine. Insert code into newproc1: Insert the defer code, intercept the g objects before and after the execution, and call the copy method to assign values to the custom fields' data.  Agent with Dependency Since SkyWalking Go Agent is based on compile-time enhancement, it cannot introduce third-party modules. For example, when SkyWalking Agent communicates with OAP, it needs to exchange data through the gRPC protocol. If the user does not introduce the gRPC module, it cannot be completed.\nDue to resolve this problem, users need to introduce relevant modules to complete the basic dependency functions. This is why import _ \u0026quot;github.com/apache/skywalking-go\u0026quot; is required. The main key modules that users currently need to introduce include:\n uuid: Used to generate UUIDs, mainly for TraceID generation. errors: To encapsulate error content. gRPC: The basic library used for communication between SkyWalking Go Agent and the Server. skywalking-goapi: The data protocol for communication between Agent and Server in SkyWalking.  Agent Core Copy To simplify the complexity of using Agent, the SkyWalking Go introduced by users only contains the user usage API and code import. The Agent Core code would be dynamically added during hybrid compilation, so when the Agent releases new features, users only need to upgrade the Agent enhancement program without modifying the references in the program.\nCode Import You can see a lot of imports.go files anywhere in the SkyWalking Go, such as imports.go in the root directory, but there is no actual code. This is because, during hybrid compilation, if the code to be compiled references other libraries, such as os, fmt, etc., they need to be referenced through the importcfg file during compilation.\nThe content of the importcfg file is shown below, which specifies the package dependency information required for all Go files to be compiled in the current package path.\npackagefile errors=/var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build2774248373/b006/_pkg_.a packagefile internal/itoa=/var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build2774248373/b027/_pkg_.a packagefile internal/oserror=/var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build2774248373/b035/_pkg_.a So when the file is copied and added to the compilation process, the relevant dependency libraries need to be declared in importcfg. Therefore, by predefining import in the project, the compiler can be forced to introduce the relevant libraries during compilation, thus completing the dynamic enhancement operation.\nPlugin with Agent Core As mentioned in the previous section, it is not possible to dynamically add dependencies between modules. Agent can only modify the importcfg file to reference dependencies if we are sure that the previous dependencies have already been loaded, but this is often impractical. For example, Agent cannot introduce dependencies from the plugin code into the Agent Core, because the plugin is unaware of the Agent\u0026rsquo;s existence. This raises a question: how can agent enable communication between plugins and Agent Core?\nCurrently, agent employ the following method: a global object is introduced in the runtime package, provided by Agent Core. When a plugin needs to interact with Agent Core, it simply searches for this global object from runtime package. The specific steps are as follows:\n Global object definition: Add a global variable when the runtime package is loaded and provide corresponding set and get methods. Set the variable when the Agent loads: When the Agent Core is copied and enhanced, import the method for setting the global variable and initialize the object in the global variable. Plugins: When the plugin is built, import the methods for reading the global variables and APIs. At this point, we can access the object set in Agent Core and use the defined interface for the plugin to access methods in Agent Core.  Limitation Since the communication between the plugin API and Agent Core is through an interface, and the plugin API is copied in each plugin, they can only transfer basic data types or any(interface{}) type. The reason is that when additional types are transferred, agent would be copied multiple times, so the types transferred in the plugin are not consistent with the types in Agent Core, as the types also need to be defined multiple times.\nTherefore, when communicating, they only pass structured data through any type, and when the Agent Core or plugin obtains the data, a type cast is simply required.\nDebugging Based on the introductions in the previous sections, both Agent Core and plugin code are dynamically copied/modified into the target package. So, how can we debug the program during development to identify issues?\nOur current approach consists of the following steps:\n Inform the source code location during flag: Enhance the debug parameters during compilation and inform the system path, for example: -toolexec \u0026quot;/path/to/agent -debug /path/to/code\u0026quot; Get the original file path: Find the absolute location of the source code of the file to be copied based on the rules. Introduce the //line directive: Add the //line directive to the copied target file to inform the compiler of the location of the original file after copying.  At this point, when the program is executed, developer can find the original file to be copied in the source code.\n","excerpt":"Key Principle Introduce the key technical processes used in the SkyWalking Go Agent, to help the …","ref":"/docs/skywalking-go/v0.4.0/en/concepts-and-designs/key-principles/","title":"Key Principle"},{"body":"Kubernetes (K8s) monitoring SkyWalking leverages K8s kube-state-metrics (KSM) and cAdvisor for collecting metrics data from K8s. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. This feature requires authorizing the OAP Server to access K8s\u0026rsquo;s API Server.\nData flow  K8s kube-state-metrics and cAdvisor collect metrics data from K8s. OpenTelemetry Collector fetches metrics from kube-state-metrics and cAdvisor via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server access to K8s\u0026rsquo;s API Server gets meta info and parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup kube-state-metric. cAdvisor is integrated into kubelet by default. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector for K8s, refer to here. For a quick start, we have provided a complete example of configuration and recommended version; you can refer to showcase. Config SkyWalking OpenTelemetry receiver.  Kubernetes Cluster Monitoring K8s cluster monitoring provides monitoring of the status and resources of the whole cluster and each node. K8s cluster as a Service in OAP, K8s node as an Instance in OAP, and land on the Layer: K8S.\nKubernetes Cluster Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Node Total  k8s_cluster_node_total The number of nodes K8s kube-state-metrics   Namespace Total  k8s_cluster_namespace_total The number of namespaces K8s kube-state-metrics   Deployment Total  k8s_cluster_deployment_total The number of deployments K8s kube-state-metrics   StatefulSet Total  k8s_cluster_statefulset_total The number of statefulsets K8s kube-state-metrics   DaemonSet Total  k8s_cluster_daemonset_total The number of daemonsets K8s kube-state-metrics   Service Total  k8s_cluster_service_total The number of services K8s kube-state-metrics   Pod Total  k8s_cluster_pod_total The number of pods K8s kube-state-metrics   Container Total  k8s_cluster_container_total The number of containers K8s kube-state-metrics   CPU Resources m k8s_cluster_cpu_cores\nk8s_cluster_cpu_cores_requests\nk8s_cluster_cpu_cores_limits\nk8s_cluster_cpu_cores_allocatable The capacity and the Requests / Limits / Allocatable of the CPU K8s kube-state-metrics   Memory Resources Gi k8s_cluster_memory_total\nk8s_cluster_memory_requests\nk8s_cluster_memory_limits\nk8s_cluster_memory_allocatable The capacity and the Requests / Limits / Allocatable of the memory K8s kube-state-metrics   Storage Resources Gi k8s_cluster_storage_total\nk8s_cluster_storage_allocatable The capacity and allocatable of the storage K8s kube-state-metrics   Node Status  k8s_cluster_node_status The current status of the nodes K8s kube-state-metrics   Deployment Status  k8s_cluster_deployment_status The current status of the deployment K8s kube-state-metrics   Deployment Spec Replicas  k8s_cluster_deployment_spec_replicas The number of desired pods for a deployment K8s kube-state-metrics   Service Status  k8s_cluster_service_pod_status The services current status, depending on the related pods' status K8s kube-state-metrics   Pod Status Not Running  k8s_cluster_pod_status_not_running The pods which are not running in the current phase K8s kube-state-metrics   Pod Status Waiting  k8s_cluster_pod_status_waiting The pods and containers which are currently in the waiting status, with reasons shown K8s kube-state-metrics   Pod Status Terminated  k8s_cluster_container_status_terminated The pods and containers which are currently in the terminated status, with reasons shown K8s kube-state-metrics    Kubernetes Cluster Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Pod Total  k8s_node_pod_total The number of pods in this node K8s kube-state-metrics   Node Status  k8s_node_node_status The current status of this node K8s kube-state-metrics   CPU Resources m k8s_node_cpu_cores\nk8s_node_cpu_cores_allocatable\nk8s_node_cpu_cores_requests\nk8s_node_cpu_cores_limits The capacity and the requests / Limits / Allocatable of the CPU K8s kube-state-metrics   Memory Resources Gi k8s_node_memory_total\nk8s_node_memory_allocatable\nk8s_node_memory_requests\nk8s_node_memory_limits The capacity and the requests / Limits / Allocatable of the memory K8s kube-state-metrics   Storage Resources Gi k8s_node_storage_total\nk8s_node_storage_allocatable The capacity and allocatable of the storage K8s kube-state-metrics   CPU Usage m k8s_node_cpu_usage The total usage of the CPU core, if there are 2 cores the maximum usage is 2000m cAdvisor   Memory Usage Gi k8s_node_memory_usage The totaly memory usage cAdvisor   Network I/O KB/s k8s_node_network_receive\nk8s_node_network_transmit The network receive and transmit cAdvisor    Kubernetes Service Monitoring K8s Service Monitoring provides observabilities into service status and resources from Kubernetes. K8s Service as a Service in OAP and land on the Layer: K8S_SERVICE.\nKubernetes Service Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Service Pod Total  k8s_service_pod_total The number of pods K8s kube-state-metrics   Service Pod Status  k8s_service_pod_status The current status of pods K8s kube-state-metrics   Service CPU Resources m k8s_service_cpu_cores_requests\nk8s_service_cpu_cores_limits The CPU resources requests / Limits of this service K8s kube-state-metrics   Service Memory Resources MB k8s_service_memory_requests\nk8s_service_memory_limits The memory resources requests / Limits of this service K8s kube-state-metrics   Pod CPU Usage m k8s_service_pod_cpu_usage The CPU resources total usage of pods cAdvisor   Pod Memory Usage MB k8s_service_pod_memory_usage The memory resources total usage of pods cAdvisor   Pod Waiting  k8s_service_pod_status_waiting The pods and containers which are currently in the waiting status, with reasons shown K8s kube-state-metrics   Pod Terminated  k8s_service_pod_status_terminated The pods and containers which are currently in the terminated status, with reasons shown K8s kube-state-metrics   Pod Restarts  k8s_service_pod_status_restarts_total The number of per container restarts related to the pods K8s kube-state-metrics    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/k8s/k8s-cluster.yaml,/config/otel-rules/k8s/k8s-node.yaml, /config/otel-rules/k8s/k8s-service.yaml. The K8s Cluster dashboard panel configurations are found in /config/ui-initialized-templates/k8s. The K8s Service dashboard panel configurations are found in /config/ui-initialized-templates/k8s_service.\n","excerpt":"Kubernetes (K8s) monitoring SkyWalking leverages K8s kube-state-metrics (KSM) and cAdvisor for …","ref":"/docs/main/latest/en/setup/backend/backend-k8s-monitoring/","title":"Kubernetes (K8s) monitoring"},{"body":"Kubernetes (K8s) monitoring Kubernetes is an open-source container-orchestration system for automating computer application deployment, scaling, and management. It was originally designed by Google and is now maintained by the Cloud Native Computing Foundation. It aims to provide a \u0026ldquo;platform for automating deployment, scaling, and operations of application containers across clusters of hosts\u0026rdquo;. It works with a range of container tools, including Docker.\nNowadays, Kubernetes is the fundamental infrastructure for cloud native applications. SkyWalking provides the following ways to monitor deployments on Kubernetes.\n Use kube-state-metrics (KSM) and cAdvisor to collect metrics of Kubernetes resources, such as CPU, service, pod, and node. Read kube-state-metrics and cAdvisor setup guide for more details. Rover is a SkyWalking native eBPF agent to collect network Access Logs to support topology-aware and metrics analysis. Meanwhile, due to the power of eBPF, it could profile running services written by C++, Rust, Golang, etc. Read Rover setup guide for more details.  SkyWalking deeply integrates with Kubernetes to help users understand the status of their applications on Kubernetes. Cillium with Hubble is in our v10 plan.\n","excerpt":"Kubernetes (K8s) monitoring Kubernetes is an open-source container-orchestration system for …","ref":"/docs/main/next/en/setup/backend/backend-k8s-monitoring/","title":"Kubernetes (K8s) monitoring"},{"body":"Kubernetes (K8s) monitoring SkyWalking leverages K8s kube-state-metrics (KSM) and cAdvisor for collecting metrics data from K8s. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. This feature requires authorizing the OAP Server to access K8s\u0026rsquo;s API Server.\nData flow  K8s kube-state-metrics and cAdvisor collect metrics data from K8s. OpenTelemetry Collector fetches metrics from kube-state-metrics and cAdvisor via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via the OpenCensus GRPC Exporter. The SkyWalking OAP Server access to K8s\u0026rsquo;s API Server gets meta info and parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup kube-state-metric. cAdvisor is integrated into kubelet by default. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector for K8s, refer to here. For a quick start, we have provided a complete example of configuration and recommended version; you can refer to showcase. Config SkyWalking OpenTelemetry receiver.  Kubernetes Cluster Monitoring K8s cluster monitoring provides monitoring of the status and resources of the whole cluster and each node. K8s cluster as a Service in OAP, K8s node as an Instance in OAP, and land on the Layer: K8S.\nKubernetes Cluster Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Node Total  k8s_cluster_node_total The number of nodes K8s kube-state-metrics   Namespace Total  k8s_cluster_namespace_total The number of namespaces K8s kube-state-metrics   Deployment Total  k8s_cluster_deployment_total The number of deployments K8s kube-state-metrics   Service Total  k8s_cluster_service_total The number of services K8s kube-state-metrics   Pod Total  k8s_cluster_pod_total The number of pods K8s kube-state-metrics   Container Total  k8s_cluster_container_total The number of containers K8s kube-state-metrics   CPU Resources m k8s_cluster_cpu_cores\nk8s_cluster_cpu_cores_requests\nk8s_cluster_cpu_cores_limits\nk8s_cluster_cpu_cores_allocatable The capacity and the Requests / Limits / Allocatable of the CPU K8s kube-state-metrics   Memory Resources Gi k8s_cluster_memory_total\nk8s_cluster_memory_requests\nk8s_cluster_memory_limits\nk8s_cluster_memory_allocatable The capacity and the Requests / Limits / Allocatable of the memory K8s kube-state-metrics   Storage Resources Gi k8s_cluster_storage_total\nk8s_cluster_storage_allocatable The capacity and allocatable of the storage K8s kube-state-metrics   Node Status  k8s_cluster_node_status The current status of the nodes K8s kube-state-metrics   Deployment Status  k8s_cluster_deployment_status The current status of the deployment K8s kube-state-metrics   Deployment Spec Replicas  k8s_cluster_deployment_spec_replicas The number of desired pods for a deployment K8s kube-state-metrics   Service Status  k8s_cluster_service_pod_status The services current status, depending on the related pods' status K8s kube-state-metrics   Pod Status Not Running  k8s_cluster_pod_status_not_running The pods which are not running in the current phase K8s kube-state-metrics   Pod Status Waiting  k8s_cluster_pod_status_waiting The pods and containers which are currently in the waiting status, with reasons shown K8s kube-state-metrics   Pod Status Terminated  k8s_cluster_container_status_terminated The pods and containers which are currently in the terminated status, with reasons shown K8s kube-state-metrics    Kubernetes Cluster Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Pod Total  k8s_node_pod_total The number of pods in this node K8s kube-state-metrics   Node Status  k8s_node_node_status The current status of this node K8s kube-state-metrics   CPU Resources m k8s_node_cpu_cores\nk8s_node_cpu_cores_allocatable\nk8s_node_cpu_cores_requests\nk8s_node_cpu_cores_limits The capacity and the requests / Limits / Allocatable of the CPU K8s kube-state-metrics   Memory Resources Gi k8s_node_memory_total\nk8s_node_memory_allocatable\nk8s_node_memory_requests\nk8s_node_memory_limits The capacity and the requests / Limits / Allocatable of the memory K8s kube-state-metrics   Storage Resources Gi k8s_node_storage_total\nk8s_node_storage_allocatable The capacity and allocatable of the storage K8s kube-state-metrics   CPU Usage m k8s_node_cpu_usage The total usage of the CPU core, if there are 2 cores the maximum usage is 2000m cAdvisor   Memory Usage Gi k8s_node_memory_usage The totaly memory usage cAdvisor   Network I/O KB/s k8s_node_network_receive\nk8s_node_network_transmit The network receive and transmit cAdvisor    Kubernetes Service Monitoring K8s Service Monitoring provides observabilities into service status and resources from Kubernetes. K8s Service as a Service in OAP and land on the Layer: K8S_SERVICE.\nKubernetes Service Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Service Pod Total  k8s_service_pod_total The number of pods K8s kube-state-metrics   Service Pod Status  k8s_service_pod_status The current status of pods K8s kube-state-metrics   Service CPU Resources m k8s_service_cpu_cores_requests\nk8s_service_cpu_cores_limits The CPU resources requests / Limits of this service K8s kube-state-metrics   Service Memory Resources MB k8s_service_memory_requests\nk8s_service_memory_limits The memory resources requests / Limits of this service K8s kube-state-metrics   Pod CPU Usage m k8s_service_pod_cpu_usage The CPU resources total usage of pods cAdvisor   Pod Memory Usage MB k8s_service_pod_memory_usage The memory resources total usage of pods cAdvisor   Pod Waiting  k8s_service_pod_status_waiting The pods and containers which are currently in the waiting status, with reasons shown K8s kube-state-metrics   Pod Terminated  k8s_service_pod_status_terminated The pods and containers which are currently in the terminated status, with reasons shown K8s kube-state-metrics   Pod Restarts  k8s_service_pod_status_restarts_total The number of per container restarts related to the pods K8s kube-state-metrics    Customizations You can customize your own metrics/expression/dashboard panel.\nThe metrics definition and expression rules are found in /config/otel-oc-rules/k8s-cluster.yaml,/config/otel-oc-rules/k8s-node.yaml, /config/otel-oc-rules/k8s-service.yaml.\nThe K8s Cluster dashboard panel configurations are found in /config/ui-initialized-templates/k8s. The K8s Service dashboard panel configurations are found in /config/ui-initialized-templates/k8s_service.\n","excerpt":"Kubernetes (K8s) monitoring SkyWalking leverages K8s kube-state-metrics (KSM) and cAdvisor for …","ref":"/docs/main/v9.1.0/en/setup/backend/backend-k8s-monitoring/","title":"Kubernetes (K8s) monitoring"},{"body":"Kubernetes (K8s) monitoring SkyWalking leverages K8s kube-state-metrics (KSM) and cAdvisor for collecting metrics data from K8s. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. This feature requires authorizing the OAP Server to access K8s\u0026rsquo;s API Server.\nData flow  K8s kube-state-metrics and cAdvisor collect metrics data from K8s. OpenTelemetry Collector fetches metrics from kube-state-metrics and cAdvisor via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via the OpenCensus gRPC Exporter or OpenTelemetry gRPC exporter. The SkyWalking OAP Server access to K8s\u0026rsquo;s API Server gets meta info and parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup kube-state-metric. cAdvisor is integrated into kubelet by default. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector for K8s, refer to here. For a quick start, we have provided a complete example of configuration and recommended version; you can refer to showcase. Config SkyWalking OpenTelemetry receiver.  Kubernetes Cluster Monitoring K8s cluster monitoring provides monitoring of the status and resources of the whole cluster and each node. K8s cluster as a Service in OAP, K8s node as an Instance in OAP, and land on the Layer: K8S.\nKubernetes Cluster Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Node Total  k8s_cluster_node_total The number of nodes K8s kube-state-metrics   Namespace Total  k8s_cluster_namespace_total The number of namespaces K8s kube-state-metrics   Deployment Total  k8s_cluster_deployment_total The number of deployments K8s kube-state-metrics   Service Total  k8s_cluster_service_total The number of services K8s kube-state-metrics   Pod Total  k8s_cluster_pod_total The number of pods K8s kube-state-metrics   Container Total  k8s_cluster_container_total The number of containers K8s kube-state-metrics   CPU Resources m k8s_cluster_cpu_cores\nk8s_cluster_cpu_cores_requests\nk8s_cluster_cpu_cores_limits\nk8s_cluster_cpu_cores_allocatable The capacity and the Requests / Limits / Allocatable of the CPU K8s kube-state-metrics   Memory Resources Gi k8s_cluster_memory_total\nk8s_cluster_memory_requests\nk8s_cluster_memory_limits\nk8s_cluster_memory_allocatable The capacity and the Requests / Limits / Allocatable of the memory K8s kube-state-metrics   Storage Resources Gi k8s_cluster_storage_total\nk8s_cluster_storage_allocatable The capacity and allocatable of the storage K8s kube-state-metrics   Node Status  k8s_cluster_node_status The current status of the nodes K8s kube-state-metrics   Deployment Status  k8s_cluster_deployment_status The current status of the deployment K8s kube-state-metrics   Deployment Spec Replicas  k8s_cluster_deployment_spec_replicas The number of desired pods for a deployment K8s kube-state-metrics   Service Status  k8s_cluster_service_pod_status The services current status, depending on the related pods' status K8s kube-state-metrics   Pod Status Not Running  k8s_cluster_pod_status_not_running The pods which are not running in the current phase K8s kube-state-metrics   Pod Status Waiting  k8s_cluster_pod_status_waiting The pods and containers which are currently in the waiting status, with reasons shown K8s kube-state-metrics   Pod Status Terminated  k8s_cluster_container_status_terminated The pods and containers which are currently in the terminated status, with reasons shown K8s kube-state-metrics    Kubernetes Cluster Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Pod Total  k8s_node_pod_total The number of pods in this node K8s kube-state-metrics   Node Status  k8s_node_node_status The current status of this node K8s kube-state-metrics   CPU Resources m k8s_node_cpu_cores\nk8s_node_cpu_cores_allocatable\nk8s_node_cpu_cores_requests\nk8s_node_cpu_cores_limits The capacity and the requests / Limits / Allocatable of the CPU K8s kube-state-metrics   Memory Resources Gi k8s_node_memory_total\nk8s_node_memory_allocatable\nk8s_node_memory_requests\nk8s_node_memory_limits The capacity and the requests / Limits / Allocatable of the memory K8s kube-state-metrics   Storage Resources Gi k8s_node_storage_total\nk8s_node_storage_allocatable The capacity and allocatable of the storage K8s kube-state-metrics   CPU Usage m k8s_node_cpu_usage The total usage of the CPU core, if there are 2 cores the maximum usage is 2000m cAdvisor   Memory Usage Gi k8s_node_memory_usage The totaly memory usage cAdvisor   Network I/O KB/s k8s_node_network_receive\nk8s_node_network_transmit The network receive and transmit cAdvisor    Kubernetes Service Monitoring K8s Service Monitoring provides observabilities into service status and resources from Kubernetes. K8s Service as a Service in OAP and land on the Layer: K8S_SERVICE.\nKubernetes Service Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Service Pod Total  k8s_service_pod_total The number of pods K8s kube-state-metrics   Service Pod Status  k8s_service_pod_status The current status of pods K8s kube-state-metrics   Service CPU Resources m k8s_service_cpu_cores_requests\nk8s_service_cpu_cores_limits The CPU resources requests / Limits of this service K8s kube-state-metrics   Service Memory Resources MB k8s_service_memory_requests\nk8s_service_memory_limits The memory resources requests / Limits of this service K8s kube-state-metrics   Pod CPU Usage m k8s_service_pod_cpu_usage The CPU resources total usage of pods cAdvisor   Pod Memory Usage MB k8s_service_pod_memory_usage The memory resources total usage of pods cAdvisor   Pod Waiting  k8s_service_pod_status_waiting The pods and containers which are currently in the waiting status, with reasons shown K8s kube-state-metrics   Pod Terminated  k8s_service_pod_status_terminated The pods and containers which are currently in the terminated status, with reasons shown K8s kube-state-metrics   Pod Restarts  k8s_service_pod_status_restarts_total The number of per container restarts related to the pods K8s kube-state-metrics    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/k8s-cluster.yaml,/config/otel-rules/k8s-node.yaml, /config/otel-rules/k8s-service.yaml. The K8s Cluster dashboard panel configurations are found in /config/ui-initialized-templates/k8s. The K8s Service dashboard panel configurations are found in /config/ui-initialized-templates/k8s_service.\n","excerpt":"Kubernetes (K8s) monitoring SkyWalking leverages K8s kube-state-metrics (KSM) and cAdvisor for …","ref":"/docs/main/v9.2.0/en/setup/backend/backend-k8s-monitoring/","title":"Kubernetes (K8s) monitoring"},{"body":"Kubernetes (K8s) monitoring SkyWalking leverages K8s kube-state-metrics (KSM) and cAdvisor for collecting metrics data from K8s. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. This feature requires authorizing the OAP Server to access K8s\u0026rsquo;s API Server.\nData flow  K8s kube-state-metrics and cAdvisor collect metrics data from K8s. OpenTelemetry Collector fetches metrics from kube-state-metrics and cAdvisor via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via the OpenCensus gRPC Exporter or OpenTelemetry gRPC exporter. The SkyWalking OAP Server access to K8s\u0026rsquo;s API Server gets meta info and parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup kube-state-metric. cAdvisor is integrated into kubelet by default. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector for K8s, refer to here. For a quick start, we have provided a complete example of configuration and recommended version; you can refer to showcase. Config SkyWalking OpenTelemetry receiver.  Kubernetes Cluster Monitoring K8s cluster monitoring provides monitoring of the status and resources of the whole cluster and each node. K8s cluster as a Service in OAP, K8s node as an Instance in OAP, and land on the Layer: K8S.\nKubernetes Cluster Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Node Total  k8s_cluster_node_total The number of nodes K8s kube-state-metrics   Namespace Total  k8s_cluster_namespace_total The number of namespaces K8s kube-state-metrics   Deployment Total  k8s_cluster_deployment_total The number of deployments K8s kube-state-metrics   Service Total  k8s_cluster_service_total The number of services K8s kube-state-metrics   Pod Total  k8s_cluster_pod_total The number of pods K8s kube-state-metrics   Container Total  k8s_cluster_container_total The number of containers K8s kube-state-metrics   CPU Resources m k8s_cluster_cpu_cores\nk8s_cluster_cpu_cores_requests\nk8s_cluster_cpu_cores_limits\nk8s_cluster_cpu_cores_allocatable The capacity and the Requests / Limits / Allocatable of the CPU K8s kube-state-metrics   Memory Resources Gi k8s_cluster_memory_total\nk8s_cluster_memory_requests\nk8s_cluster_memory_limits\nk8s_cluster_memory_allocatable The capacity and the Requests / Limits / Allocatable of the memory K8s kube-state-metrics   Storage Resources Gi k8s_cluster_storage_total\nk8s_cluster_storage_allocatable The capacity and allocatable of the storage K8s kube-state-metrics   Node Status  k8s_cluster_node_status The current status of the nodes K8s kube-state-metrics   Deployment Status  k8s_cluster_deployment_status The current status of the deployment K8s kube-state-metrics   Deployment Spec Replicas  k8s_cluster_deployment_spec_replicas The number of desired pods for a deployment K8s kube-state-metrics   Service Status  k8s_cluster_service_pod_status The services current status, depending on the related pods' status K8s kube-state-metrics   Pod Status Not Running  k8s_cluster_pod_status_not_running The pods which are not running in the current phase K8s kube-state-metrics   Pod Status Waiting  k8s_cluster_pod_status_waiting The pods and containers which are currently in the waiting status, with reasons shown K8s kube-state-metrics   Pod Status Terminated  k8s_cluster_container_status_terminated The pods and containers which are currently in the terminated status, with reasons shown K8s kube-state-metrics    Kubernetes Cluster Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Pod Total  k8s_node_pod_total The number of pods in this node K8s kube-state-metrics   Node Status  k8s_node_node_status The current status of this node K8s kube-state-metrics   CPU Resources m k8s_node_cpu_cores\nk8s_node_cpu_cores_allocatable\nk8s_node_cpu_cores_requests\nk8s_node_cpu_cores_limits The capacity and the requests / Limits / Allocatable of the CPU K8s kube-state-metrics   Memory Resources Gi k8s_node_memory_total\nk8s_node_memory_allocatable\nk8s_node_memory_requests\nk8s_node_memory_limits The capacity and the requests / Limits / Allocatable of the memory K8s kube-state-metrics   Storage Resources Gi k8s_node_storage_total\nk8s_node_storage_allocatable The capacity and allocatable of the storage K8s kube-state-metrics   CPU Usage m k8s_node_cpu_usage The total usage of the CPU core, if there are 2 cores the maximum usage is 2000m cAdvisor   Memory Usage Gi k8s_node_memory_usage The totaly memory usage cAdvisor   Network I/O KB/s k8s_node_network_receive\nk8s_node_network_transmit The network receive and transmit cAdvisor    Kubernetes Service Monitoring K8s Service Monitoring provides observabilities into service status and resources from Kubernetes. K8s Service as a Service in OAP and land on the Layer: K8S_SERVICE.\nKubernetes Service Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Service Pod Total  k8s_service_pod_total The number of pods K8s kube-state-metrics   Service Pod Status  k8s_service_pod_status The current status of pods K8s kube-state-metrics   Service CPU Resources m k8s_service_cpu_cores_requests\nk8s_service_cpu_cores_limits The CPU resources requests / Limits of this service K8s kube-state-metrics   Service Memory Resources MB k8s_service_memory_requests\nk8s_service_memory_limits The memory resources requests / Limits of this service K8s kube-state-metrics   Pod CPU Usage m k8s_service_pod_cpu_usage The CPU resources total usage of pods cAdvisor   Pod Memory Usage MB k8s_service_pod_memory_usage The memory resources total usage of pods cAdvisor   Pod Waiting  k8s_service_pod_status_waiting The pods and containers which are currently in the waiting status, with reasons shown K8s kube-state-metrics   Pod Terminated  k8s_service_pod_status_terminated The pods and containers which are currently in the terminated status, with reasons shown K8s kube-state-metrics   Pod Restarts  k8s_service_pod_status_restarts_total The number of per container restarts related to the pods K8s kube-state-metrics    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/k8s-cluster.yaml,/config/otel-rules/k8s-node.yaml, /config/otel-rules/k8s-service.yaml. The K8s Cluster dashboard panel configurations are found in /config/ui-initialized-templates/k8s. The K8s Service dashboard panel configurations are found in /config/ui-initialized-templates/k8s_service.\n","excerpt":"Kubernetes (K8s) monitoring SkyWalking leverages K8s kube-state-metrics (KSM) and cAdvisor for …","ref":"/docs/main/v9.3.0/en/setup/backend/backend-k8s-monitoring/","title":"Kubernetes (K8s) monitoring"},{"body":"Kubernetes (K8s) monitoring SkyWalking leverages K8s kube-state-metrics (KSM) and cAdvisor for collecting metrics data from K8s. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. This feature requires authorizing the OAP Server to access K8s\u0026rsquo;s API Server.\nData flow  K8s kube-state-metrics and cAdvisor collect metrics data from K8s. OpenTelemetry Collector fetches metrics from kube-state-metrics and cAdvisor via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via the OpenCensus gRPC Exporter or OpenTelemetry gRPC exporter. The SkyWalking OAP Server access to K8s\u0026rsquo;s API Server gets meta info and parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup kube-state-metric. cAdvisor is integrated into kubelet by default. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector for K8s, refer to here. For a quick start, we have provided a complete example of configuration and recommended version; you can refer to showcase. Config SkyWalking OpenTelemetry receiver.  Kubernetes Cluster Monitoring K8s cluster monitoring provides monitoring of the status and resources of the whole cluster and each node. K8s cluster as a Service in OAP, K8s node as an Instance in OAP, and land on the Layer: K8S.\nKubernetes Cluster Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Node Total  k8s_cluster_node_total The number of nodes K8s kube-state-metrics   Namespace Total  k8s_cluster_namespace_total The number of namespaces K8s kube-state-metrics   Deployment Total  k8s_cluster_deployment_total The number of deployments K8s kube-state-metrics   Service Total  k8s_cluster_service_total The number of services K8s kube-state-metrics   Pod Total  k8s_cluster_pod_total The number of pods K8s kube-state-metrics   Container Total  k8s_cluster_container_total The number of containers K8s kube-state-metrics   CPU Resources m k8s_cluster_cpu_cores\nk8s_cluster_cpu_cores_requests\nk8s_cluster_cpu_cores_limits\nk8s_cluster_cpu_cores_allocatable The capacity and the Requests / Limits / Allocatable of the CPU K8s kube-state-metrics   Memory Resources Gi k8s_cluster_memory_total\nk8s_cluster_memory_requests\nk8s_cluster_memory_limits\nk8s_cluster_memory_allocatable The capacity and the Requests / Limits / Allocatable of the memory K8s kube-state-metrics   Storage Resources Gi k8s_cluster_storage_total\nk8s_cluster_storage_allocatable The capacity and allocatable of the storage K8s kube-state-metrics   Node Status  k8s_cluster_node_status The current status of the nodes K8s kube-state-metrics   Deployment Status  k8s_cluster_deployment_status The current status of the deployment K8s kube-state-metrics   Deployment Spec Replicas  k8s_cluster_deployment_spec_replicas The number of desired pods for a deployment K8s kube-state-metrics   Service Status  k8s_cluster_service_pod_status The services current status, depending on the related pods' status K8s kube-state-metrics   Pod Status Not Running  k8s_cluster_pod_status_not_running The pods which are not running in the current phase K8s kube-state-metrics   Pod Status Waiting  k8s_cluster_pod_status_waiting The pods and containers which are currently in the waiting status, with reasons shown K8s kube-state-metrics   Pod Status Terminated  k8s_cluster_container_status_terminated The pods and containers which are currently in the terminated status, with reasons shown K8s kube-state-metrics    Kubernetes Cluster Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Pod Total  k8s_node_pod_total The number of pods in this node K8s kube-state-metrics   Node Status  k8s_node_node_status The current status of this node K8s kube-state-metrics   CPU Resources m k8s_node_cpu_cores\nk8s_node_cpu_cores_allocatable\nk8s_node_cpu_cores_requests\nk8s_node_cpu_cores_limits The capacity and the requests / Limits / Allocatable of the CPU K8s kube-state-metrics   Memory Resources Gi k8s_node_memory_total\nk8s_node_memory_allocatable\nk8s_node_memory_requests\nk8s_node_memory_limits The capacity and the requests / Limits / Allocatable of the memory K8s kube-state-metrics   Storage Resources Gi k8s_node_storage_total\nk8s_node_storage_allocatable The capacity and allocatable of the storage K8s kube-state-metrics   CPU Usage m k8s_node_cpu_usage The total usage of the CPU core, if there are 2 cores the maximum usage is 2000m cAdvisor   Memory Usage Gi k8s_node_memory_usage The totaly memory usage cAdvisor   Network I/O KB/s k8s_node_network_receive\nk8s_node_network_transmit The network receive and transmit cAdvisor    Kubernetes Service Monitoring K8s Service Monitoring provides observabilities into service status and resources from Kubernetes. K8s Service as a Service in OAP and land on the Layer: K8S_SERVICE.\nKubernetes Service Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Service Pod Total  k8s_service_pod_total The number of pods K8s kube-state-metrics   Service Pod Status  k8s_service_pod_status The current status of pods K8s kube-state-metrics   Service CPU Resources m k8s_service_cpu_cores_requests\nk8s_service_cpu_cores_limits The CPU resources requests / Limits of this service K8s kube-state-metrics   Service Memory Resources MB k8s_service_memory_requests\nk8s_service_memory_limits The memory resources requests / Limits of this service K8s kube-state-metrics   Pod CPU Usage m k8s_service_pod_cpu_usage The CPU resources total usage of pods cAdvisor   Pod Memory Usage MB k8s_service_pod_memory_usage The memory resources total usage of pods cAdvisor   Pod Waiting  k8s_service_pod_status_waiting The pods and containers which are currently in the waiting status, with reasons shown K8s kube-state-metrics   Pod Terminated  k8s_service_pod_status_terminated The pods and containers which are currently in the terminated status, with reasons shown K8s kube-state-metrics   Pod Restarts  k8s_service_pod_status_restarts_total The number of per container restarts related to the pods K8s kube-state-metrics    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/k8s/k8s-cluster.yaml,/config/otel-rules/k8s/k8s-node.yaml, /config/otel-rules/k8s/k8s-service.yaml. The K8s Cluster dashboard panel configurations are found in /config/ui-initialized-templates/k8s. The K8s Service dashboard panel configurations are found in /config/ui-initialized-templates/k8s_service.\n","excerpt":"Kubernetes (K8s) monitoring SkyWalking leverages K8s kube-state-metrics (KSM) and cAdvisor for …","ref":"/docs/main/v9.4.0/en/setup/backend/backend-k8s-monitoring/","title":"Kubernetes (K8s) monitoring"},{"body":"Kubernetes (K8s) monitoring SkyWalking leverages K8s kube-state-metrics (KSM) and cAdvisor for collecting metrics data from K8s. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. This feature requires authorizing the OAP Server to access K8s\u0026rsquo;s API Server.\nData flow  K8s kube-state-metrics and cAdvisor collect metrics data from K8s. OpenTelemetry Collector fetches metrics from kube-state-metrics and cAdvisor via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server access to K8s\u0026rsquo;s API Server gets meta info and parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup kube-state-metric. cAdvisor is integrated into kubelet by default. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector for K8s, refer to here. For a quick start, we have provided a complete example of configuration and recommended version; you can refer to showcase. Config SkyWalking OpenTelemetry receiver.  Kubernetes Cluster Monitoring K8s cluster monitoring provides monitoring of the status and resources of the whole cluster and each node. K8s cluster as a Service in OAP, K8s node as an Instance in OAP, and land on the Layer: K8S.\nKubernetes Cluster Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Node Total  k8s_cluster_node_total The number of nodes K8s kube-state-metrics   Namespace Total  k8s_cluster_namespace_total The number of namespaces K8s kube-state-metrics   Deployment Total  k8s_cluster_deployment_total The number of deployments K8s kube-state-metrics   StatefulSet Total  k8s_cluster_statefulset_total The number of statefulsets K8s kube-state-metrics   DaemonSet Total  k8s_cluster_daemonset_total The number of daemonsets K8s kube-state-metrics   Service Total  k8s_cluster_service_total The number of services K8s kube-state-metrics   Pod Total  k8s_cluster_pod_total The number of pods K8s kube-state-metrics   Container Total  k8s_cluster_container_total The number of containers K8s kube-state-metrics   CPU Resources m k8s_cluster_cpu_cores\nk8s_cluster_cpu_cores_requests\nk8s_cluster_cpu_cores_limits\nk8s_cluster_cpu_cores_allocatable The capacity and the Requests / Limits / Allocatable of the CPU K8s kube-state-metrics   Memory Resources Gi k8s_cluster_memory_total\nk8s_cluster_memory_requests\nk8s_cluster_memory_limits\nk8s_cluster_memory_allocatable The capacity and the Requests / Limits / Allocatable of the memory K8s kube-state-metrics   Storage Resources Gi k8s_cluster_storage_total\nk8s_cluster_storage_allocatable The capacity and allocatable of the storage K8s kube-state-metrics   Node Status  k8s_cluster_node_status The current status of the nodes K8s kube-state-metrics   Deployment Status  k8s_cluster_deployment_status The current status of the deployment K8s kube-state-metrics   Deployment Spec Replicas  k8s_cluster_deployment_spec_replicas The number of desired pods for a deployment K8s kube-state-metrics   Service Status  k8s_cluster_service_pod_status The services current status, depending on the related pods' status K8s kube-state-metrics   Pod Status Not Running  k8s_cluster_pod_status_not_running The pods which are not running in the current phase K8s kube-state-metrics   Pod Status Waiting  k8s_cluster_pod_status_waiting The pods and containers which are currently in the waiting status, with reasons shown K8s kube-state-metrics   Pod Status Terminated  k8s_cluster_container_status_terminated The pods and containers which are currently in the terminated status, with reasons shown K8s kube-state-metrics    Kubernetes Cluster Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Pod Total  k8s_node_pod_total The number of pods in this node K8s kube-state-metrics   Node Status  k8s_node_node_status The current status of this node K8s kube-state-metrics   CPU Resources m k8s_node_cpu_cores\nk8s_node_cpu_cores_allocatable\nk8s_node_cpu_cores_requests\nk8s_node_cpu_cores_limits The capacity and the requests / Limits / Allocatable of the CPU K8s kube-state-metrics   Memory Resources Gi k8s_node_memory_total\nk8s_node_memory_allocatable\nk8s_node_memory_requests\nk8s_node_memory_limits The capacity and the requests / Limits / Allocatable of the memory K8s kube-state-metrics   Storage Resources Gi k8s_node_storage_total\nk8s_node_storage_allocatable The capacity and allocatable of the storage K8s kube-state-metrics   CPU Usage m k8s_node_cpu_usage The total usage of the CPU core, if there are 2 cores the maximum usage is 2000m cAdvisor   Memory Usage Gi k8s_node_memory_usage The totaly memory usage cAdvisor   Network I/O KB/s k8s_node_network_receive\nk8s_node_network_transmit The network receive and transmit cAdvisor    Kubernetes Service Monitoring K8s Service Monitoring provides observabilities into service status and resources from Kubernetes. K8s Service as a Service in OAP and land on the Layer: K8S_SERVICE.\nKubernetes Service Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Service Pod Total  k8s_service_pod_total The number of pods K8s kube-state-metrics   Service Pod Status  k8s_service_pod_status The current status of pods K8s kube-state-metrics   Service CPU Resources m k8s_service_cpu_cores_requests\nk8s_service_cpu_cores_limits The CPU resources requests / Limits of this service K8s kube-state-metrics   Service Memory Resources MB k8s_service_memory_requests\nk8s_service_memory_limits The memory resources requests / Limits of this service K8s kube-state-metrics   Pod CPU Usage m k8s_service_pod_cpu_usage The CPU resources total usage of pods cAdvisor   Pod Memory Usage MB k8s_service_pod_memory_usage The memory resources total usage of pods cAdvisor   Pod Waiting  k8s_service_pod_status_waiting The pods and containers which are currently in the waiting status, with reasons shown K8s kube-state-metrics   Pod Terminated  k8s_service_pod_status_terminated The pods and containers which are currently in the terminated status, with reasons shown K8s kube-state-metrics   Pod Restarts  k8s_service_pod_status_restarts_total The number of per container restarts related to the pods K8s kube-state-metrics    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/k8s/k8s-cluster.yaml,/config/otel-rules/k8s/k8s-node.yaml, /config/otel-rules/k8s/k8s-service.yaml. The K8s Cluster dashboard panel configurations are found in /config/ui-initialized-templates/k8s. The K8s Service dashboard panel configurations are found in /config/ui-initialized-templates/k8s_service.\n","excerpt":"Kubernetes (K8s) monitoring SkyWalking leverages K8s kube-state-metrics (KSM) and cAdvisor for …","ref":"/docs/main/v9.5.0/en/setup/backend/backend-k8s-monitoring/","title":"Kubernetes (K8s) monitoring"},{"body":"Kubernetes (K8s) monitoring SkyWalking leverages K8s kube-state-metrics (KSM) and cAdvisor for collecting metrics data from K8s. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. This feature requires authorizing the OAP Server to access K8s\u0026rsquo;s API Server.\nData flow  K8s kube-state-metrics and cAdvisor collect metrics data from K8s. OpenTelemetry Collector fetches metrics from kube-state-metrics and cAdvisor via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server access to K8s\u0026rsquo;s API Server gets meta info and parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup kube-state-metric. cAdvisor is integrated into kubelet by default. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector for K8s, refer to here. For a quick start, we have provided a complete example of configuration and recommended version; you can refer to showcase. Config SkyWalking OpenTelemetry receiver.  Kubernetes Cluster Monitoring K8s cluster monitoring provides monitoring of the status and resources of the whole cluster and each node. K8s cluster as a Service in OAP, K8s node as an Instance in OAP, and land on the Layer: K8S.\nKubernetes Cluster Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Node Total  k8s_cluster_node_total The number of nodes K8s kube-state-metrics   Namespace Total  k8s_cluster_namespace_total The number of namespaces K8s kube-state-metrics   Deployment Total  k8s_cluster_deployment_total The number of deployments K8s kube-state-metrics   StatefulSet Total  k8s_cluster_statefulset_total The number of statefulsets K8s kube-state-metrics   DaemonSet Total  k8s_cluster_daemonset_total The number of daemonsets K8s kube-state-metrics   Service Total  k8s_cluster_service_total The number of services K8s kube-state-metrics   Pod Total  k8s_cluster_pod_total The number of pods K8s kube-state-metrics   Container Total  k8s_cluster_container_total The number of containers K8s kube-state-metrics   CPU Resources m k8s_cluster_cpu_cores\nk8s_cluster_cpu_cores_requests\nk8s_cluster_cpu_cores_limits\nk8s_cluster_cpu_cores_allocatable The capacity and the Requests / Limits / Allocatable of the CPU K8s kube-state-metrics   Memory Resources Gi k8s_cluster_memory_total\nk8s_cluster_memory_requests\nk8s_cluster_memory_limits\nk8s_cluster_memory_allocatable The capacity and the Requests / Limits / Allocatable of the memory K8s kube-state-metrics   Storage Resources Gi k8s_cluster_storage_total\nk8s_cluster_storage_allocatable The capacity and allocatable of the storage K8s kube-state-metrics   Node Status  k8s_cluster_node_status The current status of the nodes K8s kube-state-metrics   Deployment Status  k8s_cluster_deployment_status The current status of the deployment K8s kube-state-metrics   Deployment Spec Replicas  k8s_cluster_deployment_spec_replicas The number of desired pods for a deployment K8s kube-state-metrics   Service Status  k8s_cluster_service_pod_status The services current status, depending on the related pods' status K8s kube-state-metrics   Pod Status Not Running  k8s_cluster_pod_status_not_running The pods which are not running in the current phase K8s kube-state-metrics   Pod Status Waiting  k8s_cluster_pod_status_waiting The pods and containers which are currently in the waiting status, with reasons shown K8s kube-state-metrics   Pod Status Terminated  k8s_cluster_container_status_terminated The pods and containers which are currently in the terminated status, with reasons shown K8s kube-state-metrics    Kubernetes Cluster Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Pod Total  k8s_node_pod_total The number of pods in this node K8s kube-state-metrics   Node Status  k8s_node_node_status The current status of this node K8s kube-state-metrics   CPU Resources m k8s_node_cpu_cores\nk8s_node_cpu_cores_allocatable\nk8s_node_cpu_cores_requests\nk8s_node_cpu_cores_limits The capacity and the requests / Limits / Allocatable of the CPU K8s kube-state-metrics   Memory Resources Gi k8s_node_memory_total\nk8s_node_memory_allocatable\nk8s_node_memory_requests\nk8s_node_memory_limits The capacity and the requests / Limits / Allocatable of the memory K8s kube-state-metrics   Storage Resources Gi k8s_node_storage_total\nk8s_node_storage_allocatable The capacity and allocatable of the storage K8s kube-state-metrics   CPU Usage m k8s_node_cpu_usage The total usage of the CPU core, if there are 2 cores the maximum usage is 2000m cAdvisor   Memory Usage Gi k8s_node_memory_usage The totaly memory usage cAdvisor   Network I/O KB/s k8s_node_network_receive\nk8s_node_network_transmit The network receive and transmit cAdvisor    Kubernetes Service Monitoring K8s Service Monitoring provides observabilities into service status and resources from Kubernetes. K8s Service as a Service in OAP and land on the Layer: K8S_SERVICE.\nKubernetes Service Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Service Pod Total  k8s_service_pod_total The number of pods K8s kube-state-metrics   Service Pod Status  k8s_service_pod_status The current status of pods K8s kube-state-metrics   Service CPU Resources m k8s_service_cpu_cores_requests\nk8s_service_cpu_cores_limits The CPU resources requests / Limits of this service K8s kube-state-metrics   Service Memory Resources MB k8s_service_memory_requests\nk8s_service_memory_limits The memory resources requests / Limits of this service K8s kube-state-metrics   Pod CPU Usage m k8s_service_pod_cpu_usage The CPU resources total usage of pods cAdvisor   Pod Memory Usage MB k8s_service_pod_memory_usage The memory resources total usage of pods cAdvisor   Pod Waiting  k8s_service_pod_status_waiting The pods and containers which are currently in the waiting status, with reasons shown K8s kube-state-metrics   Pod Terminated  k8s_service_pod_status_terminated The pods and containers which are currently in the terminated status, with reasons shown K8s kube-state-metrics   Pod Restarts  k8s_service_pod_status_restarts_total The number of per container restarts related to the pods K8s kube-state-metrics    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/k8s/k8s-cluster.yaml,/config/otel-rules/k8s/k8s-node.yaml, /config/otel-rules/k8s/k8s-service.yaml. The K8s Cluster dashboard panel configurations are found in /config/ui-initialized-templates/k8s. The K8s Service dashboard panel configurations are found in /config/ui-initialized-templates/k8s_service.\n","excerpt":"Kubernetes (K8s) monitoring SkyWalking leverages K8s kube-state-metrics (KSM) and cAdvisor for …","ref":"/docs/main/v9.6.0/en/setup/backend/backend-k8s-monitoring/","title":"Kubernetes (K8s) monitoring"},{"body":"Kubernetes (K8s) monitoring SkyWalking leverages K8s kube-state-metrics (KSM) and cAdvisor for collecting metrics data from K8s. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. This feature requires authorizing the OAP Server to access K8s\u0026rsquo;s API Server.\nData flow  K8s kube-state-metrics and cAdvisor collect metrics data from K8s. OpenTelemetry Collector fetches metrics from kube-state-metrics and cAdvisor via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server access to K8s\u0026rsquo;s API Server gets meta info and parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup kube-state-metric. cAdvisor is integrated into kubelet by default. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector for K8s, refer to here. For a quick start, we have provided a complete example of configuration and recommended version; you can refer to showcase. Config SkyWalking OpenTelemetry receiver.  Kubernetes Cluster Monitoring K8s cluster monitoring provides monitoring of the status and resources of the whole cluster and each node. K8s cluster as a Service in OAP, K8s node as an Instance in OAP, and land on the Layer: K8S.\nKubernetes Cluster Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Node Total  k8s_cluster_node_total The number of nodes K8s kube-state-metrics   Namespace Total  k8s_cluster_namespace_total The number of namespaces K8s kube-state-metrics   Deployment Total  k8s_cluster_deployment_total The number of deployments K8s kube-state-metrics   StatefulSet Total  k8s_cluster_statefulset_total The number of statefulsets K8s kube-state-metrics   DaemonSet Total  k8s_cluster_daemonset_total The number of daemonsets K8s kube-state-metrics   Service Total  k8s_cluster_service_total The number of services K8s kube-state-metrics   Pod Total  k8s_cluster_pod_total The number of pods K8s kube-state-metrics   Container Total  k8s_cluster_container_total The number of containers K8s kube-state-metrics   CPU Resources m k8s_cluster_cpu_cores\nk8s_cluster_cpu_cores_requests\nk8s_cluster_cpu_cores_limits\nk8s_cluster_cpu_cores_allocatable The capacity and the Requests / Limits / Allocatable of the CPU K8s kube-state-metrics   Memory Resources Gi k8s_cluster_memory_total\nk8s_cluster_memory_requests\nk8s_cluster_memory_limits\nk8s_cluster_memory_allocatable The capacity and the Requests / Limits / Allocatable of the memory K8s kube-state-metrics   Storage Resources Gi k8s_cluster_storage_total\nk8s_cluster_storage_allocatable The capacity and allocatable of the storage K8s kube-state-metrics   Node Status  k8s_cluster_node_status The current status of the nodes K8s kube-state-metrics   Deployment Status  k8s_cluster_deployment_status The current status of the deployment K8s kube-state-metrics   Deployment Spec Replicas  k8s_cluster_deployment_spec_replicas The number of desired pods for a deployment K8s kube-state-metrics   Service Status  k8s_cluster_service_pod_status The services current status, depending on the related pods' status K8s kube-state-metrics   Pod Status Not Running  k8s_cluster_pod_status_not_running The pods which are not running in the current phase K8s kube-state-metrics   Pod Status Waiting  k8s_cluster_pod_status_waiting The pods and containers which are currently in the waiting status, with reasons shown K8s kube-state-metrics   Pod Status Terminated  k8s_cluster_container_status_terminated The pods and containers which are currently in the terminated status, with reasons shown K8s kube-state-metrics    Kubernetes Cluster Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Pod Total  k8s_node_pod_total The number of pods in this node K8s kube-state-metrics   Node Status  k8s_node_node_status The current status of this node K8s kube-state-metrics   CPU Resources m k8s_node_cpu_cores\nk8s_node_cpu_cores_allocatable\nk8s_node_cpu_cores_requests\nk8s_node_cpu_cores_limits The capacity and the requests / Limits / Allocatable of the CPU K8s kube-state-metrics   Memory Resources Gi k8s_node_memory_total\nk8s_node_memory_allocatable\nk8s_node_memory_requests\nk8s_node_memory_limits The capacity and the requests / Limits / Allocatable of the memory K8s kube-state-metrics   Storage Resources Gi k8s_node_storage_total\nk8s_node_storage_allocatable The capacity and allocatable of the storage K8s kube-state-metrics   CPU Usage m k8s_node_cpu_usage The total usage of the CPU core, if there are 2 cores the maximum usage is 2000m cAdvisor   Memory Usage Gi k8s_node_memory_usage The totaly memory usage cAdvisor   Network I/O KB/s k8s_node_network_receive\nk8s_node_network_transmit The network receive and transmit cAdvisor    Kubernetes Service Monitoring K8s Service Monitoring provides observabilities into service status and resources from Kubernetes. K8s Service as a Service in OAP and land on the Layer: K8S_SERVICE.\nKubernetes Service Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Service Pod Total  k8s_service_pod_total The number of pods K8s kube-state-metrics   Service Pod Status  k8s_service_pod_status The current status of pods K8s kube-state-metrics   Service CPU Resources m k8s_service_cpu_cores_requests\nk8s_service_cpu_cores_limits The CPU resources requests / Limits of this service K8s kube-state-metrics   Service Memory Resources MB k8s_service_memory_requests\nk8s_service_memory_limits The memory resources requests / Limits of this service K8s kube-state-metrics   Pod CPU Usage m k8s_service_pod_cpu_usage The CPU resources total usage of pods cAdvisor   Pod Memory Usage MB k8s_service_pod_memory_usage The memory resources total usage of pods cAdvisor   Pod Waiting  k8s_service_pod_status_waiting The pods and containers which are currently in the waiting status, with reasons shown K8s kube-state-metrics   Pod Terminated  k8s_service_pod_status_terminated The pods and containers which are currently in the terminated status, with reasons shown K8s kube-state-metrics   Pod Restarts  k8s_service_pod_status_restarts_total The number of per container restarts related to the pods K8s kube-state-metrics    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/k8s/k8s-cluster.yaml,/config/otel-rules/k8s/k8s-node.yaml, /config/otel-rules/k8s/k8s-service.yaml. The K8s Cluster dashboard panel configurations are found in /config/ui-initialized-templates/k8s. The K8s Service dashboard panel configurations are found in /config/ui-initialized-templates/k8s_service.\n","excerpt":"Kubernetes (K8s) monitoring SkyWalking leverages K8s kube-state-metrics (KSM) and cAdvisor for …","ref":"/docs/main/v9.7.0/en/setup/backend/backend-k8s-monitoring/","title":"Kubernetes (K8s) monitoring"},{"body":"Kubernetes (K8s) monitoring from kube-state-metrics and cAdvisor SkyWalking leverages K8s kube-state-metrics (KSM) and cAdvisor for collecting metrics data from K8s. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. This feature requires authorizing the OAP Server to access K8s\u0026rsquo;s API Server.\nData flow  K8s kube-state-metrics and cAdvisor collect metrics data from K8s. OpenTelemetry Collector fetches metrics from kube-state-metrics and cAdvisor via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server access to K8s\u0026rsquo;s API Server gets meta info and parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup kube-state-metric. cAdvisor is integrated into kubelet by default. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector for K8s, refer to here. For a quick start, we have provided a complete example of configuration and recommended version; you can refer to showcase. Config SkyWalking OpenTelemetry receiver.  Kubernetes Cluster Monitoring K8s cluster monitoring provides monitoring of the status and resources of the whole cluster and each node. K8s cluster as a Service in OAP, K8s node as an Instance in OAP, and land on the Layer: K8S.\nKubernetes Cluster Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Node Total  k8s_cluster_node_total The number of nodes K8s kube-state-metrics   Namespace Total  k8s_cluster_namespace_total The number of namespaces K8s kube-state-metrics   Deployment Total  k8s_cluster_deployment_total The number of deployments K8s kube-state-metrics   StatefulSet Total  k8s_cluster_statefulset_total The number of statefulsets K8s kube-state-metrics   DaemonSet Total  k8s_cluster_daemonset_total The number of daemonsets K8s kube-state-metrics   Service Total  k8s_cluster_service_total The number of services K8s kube-state-metrics   Pod Total  k8s_cluster_pod_total The number of pods K8s kube-state-metrics   Container Total  k8s_cluster_container_total The number of containers K8s kube-state-metrics   CPU Resources m k8s_cluster_cpu_cores\nk8s_cluster_cpu_cores_requests\nk8s_cluster_cpu_cores_limits\nk8s_cluster_cpu_cores_allocatable The capacity and the Requests / Limits / Allocatable of the CPU K8s kube-state-metrics   Memory Resources Gi k8s_cluster_memory_total\nk8s_cluster_memory_requests\nk8s_cluster_memory_limits\nk8s_cluster_memory_allocatable The capacity and the Requests / Limits / Allocatable of the memory K8s kube-state-metrics   Storage Resources Gi k8s_cluster_storage_total\nk8s_cluster_storage_allocatable The capacity and allocatable of the storage K8s kube-state-metrics   Node Status  k8s_cluster_node_status The current status of the nodes K8s kube-state-metrics   Deployment Status  k8s_cluster_deployment_status The current status of the deployment K8s kube-state-metrics   Deployment Spec Replicas  k8s_cluster_deployment_spec_replicas The number of desired pods for a deployment K8s kube-state-metrics   Service Status  k8s_cluster_service_pod_status The services current status, depending on the related pods' status K8s kube-state-metrics   Pod Status Not Running  k8s_cluster_pod_status_not_running The pods which are not running in the current phase K8s kube-state-metrics   Pod Status Waiting  k8s_cluster_pod_status_waiting The pods and containers which are currently in the waiting status, with reasons shown K8s kube-state-metrics   Pod Status Terminated  k8s_cluster_container_status_terminated The pods and containers which are currently in the terminated status, with reasons shown K8s kube-state-metrics    Kubernetes Cluster Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Pod Total  k8s_node_pod_total The number of pods in this node K8s kube-state-metrics   Node Status  k8s_node_node_status The current status of this node K8s kube-state-metrics   CPU Resources m k8s_node_cpu_cores\nk8s_node_cpu_cores_allocatable\nk8s_node_cpu_cores_requests\nk8s_node_cpu_cores_limits The capacity and the requests / Limits / Allocatable of the CPU K8s kube-state-metrics   Memory Resources Gi k8s_node_memory_total\nk8s_node_memory_allocatable\nk8s_node_memory_requests\nk8s_node_memory_limits The capacity and the requests / Limits / Allocatable of the memory K8s kube-state-metrics   Storage Resources Gi k8s_node_storage_total\nk8s_node_storage_allocatable The capacity and allocatable of the storage K8s kube-state-metrics   CPU Usage m k8s_node_cpu_usage The total usage of the CPU core, if there are 2 cores the maximum usage is 2000m cAdvisor   Memory Usage Gi k8s_node_memory_usage The totaly memory usage cAdvisor   Network I/O KB/s k8s_node_network_receive\nk8s_node_network_transmit The network receive and transmit cAdvisor    Kubernetes Service Monitoring K8s Service Monitoring provides observabilities into service status and resources from Kubernetes. K8s Service as a Service in OAP and land on the Layer: K8S_SERVICE.\nKubernetes Service Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Service Pod Total  k8s_service_pod_total The number of pods K8s kube-state-metrics   Service Pod Status  k8s_service_pod_status The current status of pods K8s kube-state-metrics   Service CPU Resources m k8s_service_cpu_cores_requests\nk8s_service_cpu_cores_limits The CPU resources requests / Limits of this service K8s kube-state-metrics   Service Memory Resources MB k8s_service_memory_requests\nk8s_service_memory_limits The memory resources requests / Limits of this service K8s kube-state-metrics   Pod CPU Usage m k8s_service_pod_cpu_usage The CPU resources total usage of pods cAdvisor   Pod Memory Usage MB k8s_service_pod_memory_usage The memory resources total usage of pods cAdvisor   Pod Waiting  k8s_service_pod_status_waiting The pods and containers which are currently in the waiting status, with reasons shown K8s kube-state-metrics   Pod Terminated  k8s_service_pod_status_terminated The pods and containers which are currently in the terminated status, with reasons shown K8s kube-state-metrics   Pod Restarts  k8s_service_pod_status_restarts_total The number of per container restarts related to the pods K8s kube-state-metrics    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/k8s/k8s-cluster.yaml,/config/otel-rules/k8s/k8s-node.yaml, /config/otel-rules/k8s/k8s-service.yaml. The K8s Cluster dashboard panel configurations are found in /config/ui-initialized-templates/k8s. The K8s Service dashboard panel configurations are found in /config/ui-initialized-templates/k8s_service.\n","excerpt":"Kubernetes (K8s) monitoring from kube-state-metrics and cAdvisor SkyWalking leverages K8s …","ref":"/docs/main/next/en/setup/backend/backend-k8s-monitoring-metrics-cadvisor/","title":"Kubernetes (K8s) monitoring from kube-state-metrics and cAdvisor"},{"body":"Kubernetes (K8s) monitoring from Rover SkyWalking uses the SkyWalking Rover system to collect access logs from Kubernetes clusters and hands them over to the OAL system for metrics and entity analysis.\nData flow  SkyWalking Rover monitoring access log data from K8s and send to the OAP. The SkyWalking OAP Server receive access log from Rover through gRPC, analysis the generate entity, and using OAL to generating metrics.  Setup  Setup Rover in the Kubernetes and enable access log service. Setup eBPF receiver module by the following configuration.  receiver-ebpf:selector:${SW_RECEIVER_EBPF:default}default:Generated Entities SkyWalking receive the access logs from Rover, analyzes the kubernetes connection information to parse out the following corresponding entities:\n Service Service Instance Service Endpoint Service Relation Service Instance Relation Service Endpoint Relation  Generate Metrics For each of the above-mentioned entities, metrics such as connection, transmission, and protocol can be analyzed.\nConnection Metrics Record the relevant metrics for every service establishing/closing connections with other services.\n   Name Unit Description     Connect CPM Count Total Connect to other Service counts per minutes.   Connect Duration Nanoseconds Total Connect to other Service use duration.   Connect Success CPM Count Success to connect to other Service counts per minutes.   Accept CPM Count Accept new connection from other Service counts per minutes.   Accept Duration Nanoseconds Total accept new connection from other Service use duration.   Close CPM Count Close one connection counts per minutes.   Close Duration Nanoseconds Total Close connections use duration.    Transfer Metrics Record the basic information and L2-L4 layer details for each syscall made during network requests by every service to other services.\nRead Data from Connection    Name Unit Description     Read CPM Count Read from connection counts per minutes.   Read Duration Nanoseconds Total read data use duration.   Read Package CPM Count Total read TCP Package count per minutes.   Read Package Size Bytes Total read TCP package size per minutes.   Read Layer 4 Duration Nanoseconds Total read data on the Layer 4 use duration.   Read Layer 3 Duration Nanoseconds Total read data on the Layer 3 use duration.   Read Layer 3 Recv Duration Nanoseconds Total read data on the Layer 3 receive use duration.   Read Layer 3 Local Duration Nanoseconds Total read data on the Layer 3 local use duration.   Read Package To Queue Duration Nanoseconds Total duration between TCP package received and send to Queue.   Read Package From Queue Duration Nanoseconds Total duration between send to Queue and receive from Queue.   Read Net Filter CPM Count Total Net Filtered count when read data.   Read Net Filter Duration Nanoseconds Total Net Filtered use duration.    Write Data to Connection    Name Unit Description     Write CPM Count Write to connection counts per minutes.   Write Duration Nanoseconds Total write data to connection use duration.   Write Package CPM Count Total write TCP Package count per minutes.   Write Package Size Bytes Total write TCP Package size per minutes.   Write L4 Duration Nanoseconds Total write data to connection Layer 4 use duration.   Write L3 Duration Nanoseconds Total write data to connection Layer 3 use duration.   Write L3 Local Duration Nanoseconds Total write data to the connection Layer 3 Local use duration.   Write L3 Output Duration Nanoseconds Total write data to the connection Layer 3 Output use duration.   Write L2 Duration Nanoseconds Total write data to connection Layer 2 use duration.   Write L2 Ready Send Duration Nanoseconds Total write data to the connection Layer 2 ready send data queue use duration.   Write L2 Send NetDevice Duration Nanoseconds Total write data to the connection Layer 2 send data to net device use duration.    Protocol Based on each transfer data analysis, extract the information of the 7-layer network protocol.\nHTTP/1.x or HTTP/2.x    Name Init Description     Call CPM Count HTTP Request calls per minutes.   Duration Nanoseconds Total HTTP Response use duration.   Success CPM Count Total HTTP Response success(status \u0026lt; 500) count.   Request Header Size Bytes Total Request Header size.   Request Body Size Bytes Total Request Body size.   Response Header Size Bytes Total Response Header size.   Response Body Size Bytes Total Response Body size.    Customizations You can customize your own metrics/dashboard panel. The metrics definition and expression rules are found in /config/oal/ebpf.oal, please refer the Scope Declaration Documentation. The K8s dashboard panel configurations are found in /config/ui-initialized-templates/k8s_service.\n","excerpt":"Kubernetes (K8s) monitoring from Rover SkyWalking uses the SkyWalking Rover system to collect access …","ref":"/docs/main/next/en/setup/backend/backend-k8s-monitoring-rover/","title":"Kubernetes (K8s) monitoring from Rover"},{"body":"Kubernetes Network monitoring SkyWalking leverages SkyWalking Rover network profiling feature to measure network performance for particular pods on-demand, including metrics of L4(TCP) and L7(HTTP) traffic and raw data of HTTP requests and responses. Underlying, SkyWalking Rover converts data from socket data to metrics using eBPF technology.\nData flow  SkyWalking OAP server observes which specific k8s pod needs to monitor the network. SkyWalking Rover receives tasks from SkyWalking OAP server and executes them, and converts the network data into metrics send to the backend service. The SkyWalking OAP Server accesses K8s\u0026rsquo;s API Server to fetch meta info and parses the expression with MAL to aggregate.  Setup  Setup SkyWalking Rover. Enable the network profiling MAL file in the OAP server.  agent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:meterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:network-profiling}Sampling config Notice the precondition, the HTTP request must have the trace header in SkyWalking(sw8 header) or Zipkin(b3 header(s)) format.\nThe sampling configurations define the sampling boundaries for the HTTP traffic. When a HTTP calling is sampled, the SkyWalking Rover could collect the HTTP request/response raw data and upload it to the span attached event.\nThe sampling config contains multiple rules, and each of rules has the following configurations:\n URI Regex: The match pattern for HTTP requests is HTTP URI-oriented. Match all requests if the URI regex is not set. Minimal Request Duration (ms): Sample the HTTP requests with slower latency than this threshold. Sample HTTP requests and responses with tracing when the response code is between 400 and 499: This is OFF by default. Sample HTTP requests and responses with tracing when the response code is between 500 and 599: This is ON by default.  Supported metrics After SkyWalking OAP server receives the metrics from the SkyWalking Rover, it supports to analysis the following data:\n Topology: Based on the process and peer address, the following topology data is supported:  Relation: Analyze the relationship between local processes, or local process with external pods or services. SSL: The socket read or write package with SSL. Protocol: The protocols for write or read data.   TCP socket read and write metrics, including following types:  Call Per Minute: The count of the socket read or write. Bytes: The package size of the socket data. Execute Time: The executed time of the socket read or write. Connect: The socket connect/accept with peer address count and execute time. Close: The socket close the socket count and execute time. RTT: The RTT(Round Trip Time) of socket communicate with peer address.   Local process communicate with peer address exception data, including following types:  Retransmit: The count of TCP package is retransmitted. Drop: The count of TCP package is dropped.   HTTP/1.x request/response related metrics, including following types:  Request CPM: The calls per minute of requests. Response CPM: The calls per minute of responses with status code. Request Package Size: The size(KB) of the request package. Response Package Size: The size(KB) of the response package. Client Side Response Duration: The duration(ms) of the client receive the response. Server Side Response Duration: The duration(ms) of the server send the response.   HTTP sampled request with traces, including following types:  Slow traces: The traces which have slow duration. Traces from HTTP Code in [400, 500) (ms): The traces which response status code in [400, 500). Traces from HTTP Code in [500, 600) (ms): The traces which response status code in [500, 600).    ","excerpt":"Kubernetes Network monitoring SkyWalking leverages SkyWalking Rover network profiling feature to …","ref":"/docs/main/latest/en/setup/backend/backend-k8s-network-monitoring/","title":"Kubernetes Network monitoring"},{"body":"Kubernetes Network monitoring SkyWalking leverages SkyWalking Rover network profiling feature to measure network performance for particular pods on-demand, including metrics of L4(TCP) and L7(HTTP) traffic and raw data of HTTP requests and responses. Underlying, SkyWalking Rover converts data from socket data to metrics using eBPF technology.\nData flow  SkyWalking OAP server observes which specific k8s pod needs to monitor the network. SkyWalking Rover receives tasks from SkyWalking OAP server and executes them, and converts the network data into metrics send to the backend service. The SkyWalking OAP Server accesses K8s\u0026rsquo;s API Server to fetch meta info and parses the expression with MAL to aggregate.  Setup  Setup SkyWalking Rover. Enable the network profiling MAL file in the OAP server.  agent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:meterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:network-profiling}Sampling config Notice the precondition, the HTTP request must have the trace header in SkyWalking(sw8 header) or Zipkin(b3 header(s)) format.\nThe sampling configurations define the sampling boundaries for the HTTP traffic. When a HTTP calling is sampled, the SkyWalking Rover could collect the HTTP request/response raw data and upload it to the span attached event.\nThe sampling config contains multiple rules, and each of rules has the following configurations:\n URI Regex: The match pattern for HTTP requests is HTTP URI-oriented. Match all requests if the URI regex is not set. Minimal Request Duration (ms): Sample the HTTP requests with slower latency than this threshold. Sample HTTP requests and responses with tracing when the response code is between 400 and 499: This is OFF by default. Sample HTTP requests and responses with tracing when the response code is between 500 and 599: This is ON by default.  Supported metrics After SkyWalking OAP server receives the metrics from the SkyWalking Rover, it supports to analysis the following data:\n Topology: Based on the process and peer address, the following topology data is supported:  Relation: Analyze the relationship between local processes, or local process with external pods or services. SSL: The socket read or write package with SSL. Protocol: The protocols for write or read data.   TCP socket read and write metrics, including following types:  Call Per Minute: The count of the socket read or write. Bytes: The package size of the socket data. Execute Time: The executed time of the socket read or write. Connect: The socket connect/accept with peer address count and execute time. Close: The socket close the socket count and execute time. RTT: The RTT(Round Trip Time) of socket communicate with peer address.   Local process communicate with peer address exception data, including following types:  Retransmit: The count of TCP package is retransmitted. Drop: The count of TCP package is dropped.   HTTP/1.x request/response related metrics, including following types:  Request CPM: The calls per minute of requests. Response CPM: The calls per minute of responses with status code. Request Package Size: The size(KB) of the request package. Response Package Size: The size(KB) of the response package. Client Side Response Duration: The duration(ms) of the client receive the response. Server Side Response Duration: The duration(ms) of the server send the response.   HTTP sampled request with traces, including following types:  Slow traces: The traces which have slow duration. Traces from HTTP Code in [400, 500) (ms): The traces which response status code in [400, 500). Traces from HTTP Code in [500, 600) (ms): The traces which response status code in [500, 600).    ","excerpt":"Kubernetes Network monitoring SkyWalking leverages SkyWalking Rover network profiling feature to …","ref":"/docs/main/next/en/setup/backend/backend-k8s-network-monitoring/","title":"Kubernetes Network monitoring"},{"body":"Kubernetes Network monitoring SkyWalking leverages SkyWalking Rover network profiling feature for collecting metrics data from the network. SkyWalking Rover converts data from socket data to metrics using eBPF technology.\nData flow  SkyWalking OAP server observes which specific k8s pod needs to monitor the network. SkyWalking Rover receives tasks from SkyWalking OAP server and executes them, and converts the network data into metrics send to the backend service. The SkyWalking OAP Server accesses K8s\u0026rsquo;s API Server to fetch meta info and parses the expression with MAL to aggregate.  Setup  Setup SkyWalking Rover. Enable the network profiling MAL file in the OAP server.  agent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:meterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:network-profiling}Supported metrics After SkyWalking OAP server receives the metrics from the SkyWalking Rover, it supports to analysis the following data:\n Topology: Based on the process and peer address, the following topology data is supported:  Relation: Analyze the relationship between local processes, or local process with external pods or services. SSL: The socket read or write package with SSL. Protocol: The protocols for write or read data.   TCP socket read and write metrics, including following types:  Call Per Minute: The count of the socket read or write. Bytes: The package size of the socket data. Execute Time: The executed time of the socket read or write. Connect: The socket connect/accept with peer address count and execute time. Close: The socket close the socket count and execute time. RTT: The RTT(Round Trip Time) of socket communicate with peer address.   Local process communicate with peer address exception data, including following types:  Retransmit: The count of TCP package is retransmitted. Drop: The count of TCP package is dropped.    ","excerpt":"Kubernetes Network monitoring SkyWalking leverages SkyWalking Rover network profiling feature for …","ref":"/docs/main/v9.2.0/en/setup/backend/backend-k8s-network-monitoring/","title":"Kubernetes Network monitoring"},{"body":"Kubernetes Network monitoring SkyWalking leverages SkyWalking Rover network profiling feature to measure network performance for particular pods on-demand, including metrics of L4(TCP) and L7(HTTP) traffic and raw data of HTTP requests and responses. Underlying, SkyWalking Rover converts data from socket data to metrics using eBPF technology.\nData flow  SkyWalking OAP server observes which specific k8s pod needs to monitor the network. SkyWalking Rover receives tasks from SkyWalking OAP server and executes them, and converts the network data into metrics send to the backend service. The SkyWalking OAP Server accesses K8s\u0026rsquo;s API Server to fetch meta info and parses the expression with MAL to aggregate.  Setup  Setup SkyWalking Rover. Enable the network profiling MAL file in the OAP server.  agent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:meterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:network-profiling}Sampling config Notice the precondition, the HTTP request must have the trace header in SkyWalking(sw8 header) or Zipkin(b3 header(s)) format.\nThe sampling configurations define the sampling boundaries for the HTTP traffic. When a HTTP calling is sampled, the SkyWalking Rover could collect the HTTP request/response raw data and upload it to the span attached event.\nThe sampling config contains multiple rules, and each of rules has the following configurations:\n URI Regex: The match pattern for HTTP requests is HTTP URI-oriented. Match all requests if the URI regex is not set. Minimal Request Duration (ms): Sample the HTTP requests with slower latency than this threshold. Sample HTTP requests and responses with tracing when the response code is between 400 and 499: This is OFF by default. Sample HTTP requests and responses with tracing when the response code is between 500 and 599: This is ON by default.  Supported metrics After SkyWalking OAP server receives the metrics from the SkyWalking Rover, it supports to analysis the following data:\n Topology: Based on the process and peer address, the following topology data is supported:  Relation: Analyze the relationship between local processes, or local process with external pods or services. SSL: The socket read or write package with SSL. Protocol: The protocols for write or read data.   TCP socket read and write metrics, including following types:  Call Per Minute: The count of the socket read or write. Bytes: The package size of the socket data. Execute Time: The executed time of the socket read or write. Connect: The socket connect/accept with peer address count and execute time. Close: The socket close the socket count and execute time. RTT: The RTT(Round Trip Time) of socket communicate with peer address.   Local process communicate with peer address exception data, including following types:  Retransmit: The count of TCP package is retransmitted. Drop: The count of TCP package is dropped.   HTTP/1.x request/response related metrics, including following types:  Request CPM: The calls per minute of requests. Response CPM: The calls per minute of responses with status code. Request Package Size: The size(KB) of the request package. Response Package Size: The size(KB) of the response package. Client Side Response Duration: The duration(ms) of the client receive the response. Server Side Response Duration: The duration(ms) of the server send the response.   HTTP sampled request with traces, including following types:  Slow traces: The traces which have slow duration. Traces from HTTP Code in [400, 500) (ms): The traces which response status code in [400, 500). Traces from HTTP Code in [500, 600) (ms): The traces which response status code in [500, 600).    ","excerpt":"Kubernetes Network monitoring SkyWalking leverages SkyWalking Rover network profiling feature to …","ref":"/docs/main/v9.3.0/en/setup/backend/backend-k8s-network-monitoring/","title":"Kubernetes Network monitoring"},{"body":"Kubernetes Network monitoring SkyWalking leverages SkyWalking Rover network profiling feature to measure network performance for particular pods on-demand, including metrics of L4(TCP) and L7(HTTP) traffic and raw data of HTTP requests and responses. Underlying, SkyWalking Rover converts data from socket data to metrics using eBPF technology.\nData flow  SkyWalking OAP server observes which specific k8s pod needs to monitor the network. SkyWalking Rover receives tasks from SkyWalking OAP server and executes them, and converts the network data into metrics send to the backend service. The SkyWalking OAP Server accesses K8s\u0026rsquo;s API Server to fetch meta info and parses the expression with MAL to aggregate.  Setup  Setup SkyWalking Rover. Enable the network profiling MAL file in the OAP server.  agent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:meterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:network-profiling}Sampling config Notice the precondition, the HTTP request must have the trace header in SkyWalking(sw8 header) or Zipkin(b3 header(s)) format.\nThe sampling configurations define the sampling boundaries for the HTTP traffic. When a HTTP calling is sampled, the SkyWalking Rover could collect the HTTP request/response raw data and upload it to the span attached event.\nThe sampling config contains multiple rules, and each of rules has the following configurations:\n URI Regex: The match pattern for HTTP requests is HTTP URI-oriented. Match all requests if the URI regex is not set. Minimal Request Duration (ms): Sample the HTTP requests with slower latency than this threshold. Sample HTTP requests and responses with tracing when the response code is between 400 and 499: This is OFF by default. Sample HTTP requests and responses with tracing when the response code is between 500 and 599: This is ON by default.  Supported metrics After SkyWalking OAP server receives the metrics from the SkyWalking Rover, it supports to analysis the following data:\n Topology: Based on the process and peer address, the following topology data is supported:  Relation: Analyze the relationship between local processes, or local process with external pods or services. SSL: The socket read or write package with SSL. Protocol: The protocols for write or read data.   TCP socket read and write metrics, including following types:  Call Per Minute: The count of the socket read or write. Bytes: The package size of the socket data. Execute Time: The executed time of the socket read or write. Connect: The socket connect/accept with peer address count and execute time. Close: The socket close the socket count and execute time. RTT: The RTT(Round Trip Time) of socket communicate with peer address.   Local process communicate with peer address exception data, including following types:  Retransmit: The count of TCP package is retransmitted. Drop: The count of TCP package is dropped.   HTTP/1.x request/response related metrics, including following types:  Request CPM: The calls per minute of requests. Response CPM: The calls per minute of responses with status code. Request Package Size: The size(KB) of the request package. Response Package Size: The size(KB) of the response package. Client Side Response Duration: The duration(ms) of the client receive the response. Server Side Response Duration: The duration(ms) of the server send the response.   HTTP sampled request with traces, including following types:  Slow traces: The traces which have slow duration. Traces from HTTP Code in [400, 500) (ms): The traces which response status code in [400, 500). Traces from HTTP Code in [500, 600) (ms): The traces which response status code in [500, 600).    ","excerpt":"Kubernetes Network monitoring SkyWalking leverages SkyWalking Rover network profiling feature to …","ref":"/docs/main/v9.4.0/en/setup/backend/backend-k8s-network-monitoring/","title":"Kubernetes Network monitoring"},{"body":"Kubernetes Network monitoring SkyWalking leverages SkyWalking Rover network profiling feature to measure network performance for particular pods on-demand, including metrics of L4(TCP) and L7(HTTP) traffic and raw data of HTTP requests and responses. Underlying, SkyWalking Rover converts data from socket data to metrics using eBPF technology.\nData flow  SkyWalking OAP server observes which specific k8s pod needs to monitor the network. SkyWalking Rover receives tasks from SkyWalking OAP server and executes them, and converts the network data into metrics send to the backend service. The SkyWalking OAP Server accesses K8s\u0026rsquo;s API Server to fetch meta info and parses the expression with MAL to aggregate.  Setup  Setup SkyWalking Rover. Enable the network profiling MAL file in the OAP server.  agent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:meterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:network-profiling}Sampling config Notice the precondition, the HTTP request must have the trace header in SkyWalking(sw8 header) or Zipkin(b3 header(s)) format.\nThe sampling configurations define the sampling boundaries for the HTTP traffic. When a HTTP calling is sampled, the SkyWalking Rover could collect the HTTP request/response raw data and upload it to the span attached event.\nThe sampling config contains multiple rules, and each of rules has the following configurations:\n URI Regex: The match pattern for HTTP requests is HTTP URI-oriented. Match all requests if the URI regex is not set. Minimal Request Duration (ms): Sample the HTTP requests with slower latency than this threshold. Sample HTTP requests and responses with tracing when the response code is between 400 and 499: This is OFF by default. Sample HTTP requests and responses with tracing when the response code is between 500 and 599: This is ON by default.  Supported metrics After SkyWalking OAP server receives the metrics from the SkyWalking Rover, it supports to analysis the following data:\n Topology: Based on the process and peer address, the following topology data is supported:  Relation: Analyze the relationship between local processes, or local process with external pods or services. SSL: The socket read or write package with SSL. Protocol: The protocols for write or read data.   TCP socket read and write metrics, including following types:  Call Per Minute: The count of the socket read or write. Bytes: The package size of the socket data. Execute Time: The executed time of the socket read or write. Connect: The socket connect/accept with peer address count and execute time. Close: The socket close the socket count and execute time. RTT: The RTT(Round Trip Time) of socket communicate with peer address.   Local process communicate with peer address exception data, including following types:  Retransmit: The count of TCP package is retransmitted. Drop: The count of TCP package is dropped.   HTTP/1.x request/response related metrics, including following types:  Request CPM: The calls per minute of requests. Response CPM: The calls per minute of responses with status code. Request Package Size: The size(KB) of the request package. Response Package Size: The size(KB) of the response package. Client Side Response Duration: The duration(ms) of the client receive the response. Server Side Response Duration: The duration(ms) of the server send the response.   HTTP sampled request with traces, including following types:  Slow traces: The traces which have slow duration. Traces from HTTP Code in [400, 500) (ms): The traces which response status code in [400, 500). Traces from HTTP Code in [500, 600) (ms): The traces which response status code in [500, 600).    ","excerpt":"Kubernetes Network monitoring SkyWalking leverages SkyWalking Rover network profiling feature to …","ref":"/docs/main/v9.5.0/en/setup/backend/backend-k8s-network-monitoring/","title":"Kubernetes Network monitoring"},{"body":"Kubernetes Network monitoring SkyWalking leverages SkyWalking Rover network profiling feature to measure network performance for particular pods on-demand, including metrics of L4(TCP) and L7(HTTP) traffic and raw data of HTTP requests and responses. Underlying, SkyWalking Rover converts data from socket data to metrics using eBPF technology.\nData flow  SkyWalking OAP server observes which specific k8s pod needs to monitor the network. SkyWalking Rover receives tasks from SkyWalking OAP server and executes them, and converts the network data into metrics send to the backend service. The SkyWalking OAP Server accesses K8s\u0026rsquo;s API Server to fetch meta info and parses the expression with MAL to aggregate.  Setup  Setup SkyWalking Rover. Enable the network profiling MAL file in the OAP server.  agent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:meterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:network-profiling}Sampling config Notice the precondition, the HTTP request must have the trace header in SkyWalking(sw8 header) or Zipkin(b3 header(s)) format.\nThe sampling configurations define the sampling boundaries for the HTTP traffic. When a HTTP calling is sampled, the SkyWalking Rover could collect the HTTP request/response raw data and upload it to the span attached event.\nThe sampling config contains multiple rules, and each of rules has the following configurations:\n URI Regex: The match pattern for HTTP requests is HTTP URI-oriented. Match all requests if the URI regex is not set. Minimal Request Duration (ms): Sample the HTTP requests with slower latency than this threshold. Sample HTTP requests and responses with tracing when the response code is between 400 and 499: This is OFF by default. Sample HTTP requests and responses with tracing when the response code is between 500 and 599: This is ON by default.  Supported metrics After SkyWalking OAP server receives the metrics from the SkyWalking Rover, it supports to analysis the following data:\n Topology: Based on the process and peer address, the following topology data is supported:  Relation: Analyze the relationship between local processes, or local process with external pods or services. SSL: The socket read or write package with SSL. Protocol: The protocols for write or read data.   TCP socket read and write metrics, including following types:  Call Per Minute: The count of the socket read or write. Bytes: The package size of the socket data. Execute Time: The executed time of the socket read or write. Connect: The socket connect/accept with peer address count and execute time. Close: The socket close the socket count and execute time. RTT: The RTT(Round Trip Time) of socket communicate with peer address.   Local process communicate with peer address exception data, including following types:  Retransmit: The count of TCP package is retransmitted. Drop: The count of TCP package is dropped.   HTTP/1.x request/response related metrics, including following types:  Request CPM: The calls per minute of requests. Response CPM: The calls per minute of responses with status code. Request Package Size: The size(KB) of the request package. Response Package Size: The size(KB) of the response package. Client Side Response Duration: The duration(ms) of the client receive the response. Server Side Response Duration: The duration(ms) of the server send the response.   HTTP sampled request with traces, including following types:  Slow traces: The traces which have slow duration. Traces from HTTP Code in [400, 500) (ms): The traces which response status code in [400, 500). Traces from HTTP Code in [500, 600) (ms): The traces which response status code in [500, 600).    ","excerpt":"Kubernetes Network monitoring SkyWalking leverages SkyWalking Rover network profiling feature to …","ref":"/docs/main/v9.6.0/en/setup/backend/backend-k8s-network-monitoring/","title":"Kubernetes Network monitoring"},{"body":"Kubernetes Network monitoring SkyWalking leverages SkyWalking Rover network profiling feature to measure network performance for particular pods on-demand, including metrics of L4(TCP) and L7(HTTP) traffic and raw data of HTTP requests and responses. Underlying, SkyWalking Rover converts data from socket data to metrics using eBPF technology.\nData flow  SkyWalking OAP server observes which specific k8s pod needs to monitor the network. SkyWalking Rover receives tasks from SkyWalking OAP server and executes them, and converts the network data into metrics send to the backend service. The SkyWalking OAP Server accesses K8s\u0026rsquo;s API Server to fetch meta info and parses the expression with MAL to aggregate.  Setup  Setup SkyWalking Rover. Enable the network profiling MAL file in the OAP server.  agent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:meterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:network-profiling}Sampling config Notice the precondition, the HTTP request must have the trace header in SkyWalking(sw8 header) or Zipkin(b3 header(s)) format.\nThe sampling configurations define the sampling boundaries for the HTTP traffic. When a HTTP calling is sampled, the SkyWalking Rover could collect the HTTP request/response raw data and upload it to the span attached event.\nThe sampling config contains multiple rules, and each of rules has the following configurations:\n URI Regex: The match pattern for HTTP requests is HTTP URI-oriented. Match all requests if the URI regex is not set. Minimal Request Duration (ms): Sample the HTTP requests with slower latency than this threshold. Sample HTTP requests and responses with tracing when the response code is between 400 and 499: This is OFF by default. Sample HTTP requests and responses with tracing when the response code is between 500 and 599: This is ON by default.  Supported metrics After SkyWalking OAP server receives the metrics from the SkyWalking Rover, it supports to analysis the following data:\n Topology: Based on the process and peer address, the following topology data is supported:  Relation: Analyze the relationship between local processes, or local process with external pods or services. SSL: The socket read or write package with SSL. Protocol: The protocols for write or read data.   TCP socket read and write metrics, including following types:  Call Per Minute: The count of the socket read or write. Bytes: The package size of the socket data. Execute Time: The executed time of the socket read or write. Connect: The socket connect/accept with peer address count and execute time. Close: The socket close the socket count and execute time. RTT: The RTT(Round Trip Time) of socket communicate with peer address.   Local process communicate with peer address exception data, including following types:  Retransmit: The count of TCP package is retransmitted. Drop: The count of TCP package is dropped.   HTTP/1.x request/response related metrics, including following types:  Request CPM: The calls per minute of requests. Response CPM: The calls per minute of responses with status code. Request Package Size: The size(KB) of the request package. Response Package Size: The size(KB) of the response package. Client Side Response Duration: The duration(ms) of the client receive the response. Server Side Response Duration: The duration(ms) of the server send the response.   HTTP sampled request with traces, including following types:  Slow traces: The traces which have slow duration. Traces from HTTP Code in [400, 500) (ms): The traces which response status code in [400, 500). Traces from HTTP Code in [500, 600) (ms): The traces which response status code in [500, 600).    ","excerpt":"Kubernetes Network monitoring SkyWalking leverages SkyWalking Rover network profiling feature to …","ref":"/docs/main/v9.7.0/en/setup/backend/backend-k8s-network-monitoring/","title":"Kubernetes Network monitoring"},{"body":"Legacy Setup You can always fall back to our traditional way of integration as introduced below, which is by importing SkyWalking into your project and starting the agent.\nDefaults By default, SkyWalking Python agent uses gRPC protocol to report data to SkyWalking backend, in SkyWalking backend, the port of gRPC protocol is 11800, and the port of HTTP protocol is 12800,\nSee all default configuration values in the Configuration Vocabulary\nYou could configure agent_collector_backend_services (or environment variable SW_AGENT_COLLECTOR_BACKEND_SERVICES) and set agent_protocol (or environment variable SW_AGENT_PROTOCOL to one of gprc, http or kafka according to the protocol you would like to use.\nReport data via gRPC protocol (Default) For example, if you want to use gRPC protocol to report data, configure agent_collector_backend_services (or environment variable SW_AGENT_COLLECTOR_BACKEND_SERVICES) to \u0026lt;oap-ip-or-host\u0026gt;:11800, such as 127.0.0.1:11800:\nfrom skywalking import agent, config config.init(agent_collector_backend_services=\u0026#39;127.0.0.1:11800\u0026#39;, agent_name=\u0026#39;your awesome service\u0026#39;, agent_instance_name=\u0026#39;your-instance-name or \u0026lt;generated uuid\u0026gt;\u0026#39;) agent.start() Report data via HTTP protocol However, if you want to use HTTP protocol to report data, configure agent_collector_backend_services (or environment variable SW_AGENT_COLLECTOR_BACKEND_SERVICES) to \u0026lt;oap-ip-or-host\u0026gt;:12800, such as 127.0.0.1:12800, further set agent_protocol (or environment variable SW_AGENT_PROTOCOL to http):\n Remember you should install skywalking-python with extra requires http, pip install \u0026quot;apache-skywalking[http].\n from skywalking import agent, config config.init(agent_collector_backend_services=\u0026#39;127.0.0.1:12800\u0026#39;, agent_name=\u0026#39;your awesome service\u0026#39;, agent_protocol=\u0026#39;http\u0026#39;, agent_instance_name=\u0026#39;your-instance-name or \u0026lt;generated uuid\u0026gt;\u0026#39;) agent.start() Report data via Kafka protocol Please make sure OAP is consuming the same Kafka topic as your agent produces to, kafka_namespace must match OAP side configuration plugin.kafka.namespace\nFinally, if you want to use Kafka protocol to report data, configure kafka_bootstrap_servers (or environment variable SW_KAFKA_BOOTSTRAP_SERVERS) to kafka-brokers, such as 127.0.0.1:9200, further set agent_protocol (or environment variable SW_AGENT_PROTOCOL to kafka):\n Remember you should install skywalking-python with extra requires kafka, pip install \u0026quot;apache-skywalking[kafka]\u0026quot;.\n from skywalking import agent, config config.init(kafka_bootstrap_servers=\u0026#39;127.0.0.1:9200\u0026#39;, agent_name=\u0026#39;your awesome service\u0026#39;, agent_protocol=\u0026#39;kafka\u0026#39;, agent_instance_name=\u0026#39;your-instance-name or \u0026lt;generated uuid\u0026gt;\u0026#39;) agent.start() Alternatively, you can also pass the configurations via environment variables (such as SW_AGENT_NAME, SW_AGENT_COLLECTOR_BACKEND_SERVICES, etc.) so that you don\u0026rsquo;t need to call config.init.\nAll supported environment variables can be found in the Environment Variables List.\n","excerpt":"Legacy Setup You can always fall back to our traditional way of integration as introduced below, …","ref":"/docs/skywalking-python/latest/en/setup/intrusive/","title":"Legacy Setup"},{"body":"Legacy Setup You can always fall back to our traditional way of integration as introduced below, which is by importing SkyWalking into your project and starting the agent.\nDefaults By default, SkyWalking Python agent uses gRPC protocol to report data to SkyWalking backend, in SkyWalking backend, the port of gRPC protocol is 11800, and the port of HTTP protocol is 12800,\nSee all default configuration values in the Configuration Vocabulary\nYou could configure agent_collector_backend_services (or environment variable SW_AGENT_COLLECTOR_BACKEND_SERVICES) and set agent_protocol (or environment variable SW_AGENT_PROTOCOL to one of gprc, http or kafka according to the protocol you would like to use.\nReport data via gRPC protocol (Default) For example, if you want to use gRPC protocol to report data, configure agent_collector_backend_services (or environment variable SW_AGENT_COLLECTOR_BACKEND_SERVICES) to \u0026lt;oap-ip-or-host\u0026gt;:11800, such as 127.0.0.1:11800:\nfrom skywalking import agent, config config.init(agent_collector_backend_services=\u0026#39;127.0.0.1:11800\u0026#39;, agent_name=\u0026#39;your awesome service\u0026#39;, agent_instance_name=\u0026#39;your-instance-name or \u0026lt;generated uuid\u0026gt;\u0026#39;) agent.start() Report data via HTTP protocol However, if you want to use HTTP protocol to report data, configure agent_collector_backend_services (or environment variable SW_AGENT_COLLECTOR_BACKEND_SERVICES) to \u0026lt;oap-ip-or-host\u0026gt;:12800, such as 127.0.0.1:12800, further set agent_protocol (or environment variable SW_AGENT_PROTOCOL to http):\n Remember you should install skywalking-python with extra requires http, pip install \u0026quot;apache-skywalking[http].\n from skywalking import agent, config config.init(agent_collector_backend_services=\u0026#39;127.0.0.1:12800\u0026#39;, agent_name=\u0026#39;your awesome service\u0026#39;, agent_protocol=\u0026#39;http\u0026#39;, agent_instance_name=\u0026#39;your-instance-name or \u0026lt;generated uuid\u0026gt;\u0026#39;) agent.start() Report data via Kafka protocol Please make sure OAP is consuming the same Kafka topic as your agent produces to, kafka_namespace must match OAP side configuration plugin.kafka.namespace\nFinally, if you want to use Kafka protocol to report data, configure kafka_bootstrap_servers (or environment variable SW_KAFKA_BOOTSTRAP_SERVERS) to kafka-brokers, such as 127.0.0.1:9200, further set agent_protocol (or environment variable SW_AGENT_PROTOCOL to kafka):\n Remember you should install skywalking-python with extra requires kafka, pip install \u0026quot;apache-skywalking[kafka]\u0026quot;.\n from skywalking import agent, config config.init(kafka_bootstrap_servers=\u0026#39;127.0.0.1:9200\u0026#39;, agent_name=\u0026#39;your awesome service\u0026#39;, agent_protocol=\u0026#39;kafka\u0026#39;, agent_instance_name=\u0026#39;your-instance-name or \u0026lt;generated uuid\u0026gt;\u0026#39;) agent.start() Alternatively, you can also pass the configurations via environment variables (such as SW_AGENT_NAME, SW_AGENT_COLLECTOR_BACKEND_SERVICES, etc.) so that you don\u0026rsquo;t need to call config.init.\nAll supported environment variables can be found in the Environment Variables List.\n","excerpt":"Legacy Setup You can always fall back to our traditional way of integration as introduced below, …","ref":"/docs/skywalking-python/next/en/setup/intrusive/","title":"Legacy Setup"},{"body":"Legacy Setup You can always fall back to our traditional way of integration as introduced below, which is by importing SkyWalking into your project and starting the agent.\nDefaults By default, SkyWalking Python agent uses gRPC protocol to report data to SkyWalking backend, in SkyWalking backend, the port of gRPC protocol is 11800, and the port of HTTP protocol is 12800,\nSee all default configuration values in the Configuration Vocabulary\nYou could configure agent_collector_backend_services (or environment variable SW_AGENT_COLLECTOR_BACKEND_SERVICES) and set agent_protocol (or environment variable SW_AGENT_PROTOCOL to one of gprc, http or kafka according to the protocol you would like to use.\nReport data via gRPC protocol (Default) For example, if you want to use gRPC protocol to report data, configure agent_collector_backend_services (or environment variable SW_AGENT_COLLECTOR_BACKEND_SERVICES) to \u0026lt;oap-ip-or-host\u0026gt;:11800, such as 127.0.0.1:11800:\nfrom skywalking import agent, config config.init(agent_collector_backend_services=\u0026#39;127.0.0.1:11800\u0026#39;, agent_name=\u0026#39;your awesome service\u0026#39;, agent_instance_name=\u0026#39;your-instance-name or \u0026lt;generated uuid\u0026gt;\u0026#39;) agent.start() Report data via HTTP protocol However, if you want to use HTTP protocol to report data, configure agent_collector_backend_services (or environment variable SW_AGENT_COLLECTOR_BACKEND_SERVICES) to \u0026lt;oap-ip-or-host\u0026gt;:12800, such as 127.0.0.1:12800, further set agent_protocol (or environment variable SW_AGENT_PROTOCOL to http):\n Remember you should install skywalking-python with extra requires http, pip install \u0026quot;apache-skywalking[http].\n from skywalking import agent, config config.init(agent_collector_backend_services=\u0026#39;127.0.0.1:12800\u0026#39;, agent_name=\u0026#39;your awesome service\u0026#39;, agent_protocol=\u0026#39;http\u0026#39;, agent_instance_name=\u0026#39;your-instance-name or \u0026lt;generated uuid\u0026gt;\u0026#39;) agent.start() Report data via Kafka protocol Please make sure OAP is consuming the same Kafka topic as your agent produces to, kafka_namespace must match OAP side configuration plugin.kafka.namespace\nFinally, if you want to use Kafka protocol to report data, configure kafka_bootstrap_servers (or environment variable SW_KAFKA_BOOTSTRAP_SERVERS) to kafka-brokers, such as 127.0.0.1:9200, further set agent_protocol (or environment variable SW_AGENT_PROTOCOL to kafka):\n Remember you should install skywalking-python with extra requires kafka, pip install \u0026quot;apache-skywalking[kafka]\u0026quot;.\n from skywalking import agent, config config.init(kafka_bootstrap_servers=\u0026#39;127.0.0.1:9200\u0026#39;, agent_name=\u0026#39;your awesome service\u0026#39;, agent_protocol=\u0026#39;kafka\u0026#39;, agent_instance_name=\u0026#39;your-instance-name or \u0026lt;generated uuid\u0026gt;\u0026#39;) agent.start() Alternatively, you can also pass the configurations via environment variables (such as SW_AGENT_NAME, SW_AGENT_COLLECTOR_BACKEND_SERVICES, etc.) so that you don\u0026rsquo;t need to call config.init.\nAll supported environment variables can be found in the Environment Variables List.\n","excerpt":"Legacy Setup You can always fall back to our traditional way of integration as introduced below, …","ref":"/docs/skywalking-python/v1.0.1/en/setup/intrusive/","title":"Legacy Setup"},{"body":"Linux Monitoring SkyWalking leverages Prometheus node-exporter to collect metrics data from the VMs and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. VM entity as a Service in OAP and on the Layer: OS_LINUX.\nSkyWalking also provides InfluxDB Telegraf to receive VMs' metrics data by Telegraf receiver. The telegraf receiver plugin receiver, process and convert the metrics, then it send converted metrics to Meter System. VM entity as a Service in OAP and on the Layer: OS_LINUX.\nData flow For OpenTelemetry receiver:\n The Prometheus node-exporter collects metrics data from the VMs. The OpenTelemetry Collector fetches metrics from node-exporter via Prometheus Receiver and pushes metrics to the SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  For Telegraf receiver:\n The InfluxDB Telegraf input plugins collects various metrics data from the VMs. The cpu, mem, system, disk and diskio input plugins should be set in telegraf.conf file. The InfluxDB Telegraf send JSON format metrics by HTTP messages to Telegraf Receiver, then pushes converted metrics to the SkyWalking OAP Server Meter System. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate ad store the results. The meter_vm_cpu_average_used metrics indicates the average usage of each CPU core for telegraf receiver.  Setup For OpenTelemetry receiver:\n Setup Prometheus node-exporter. Setup OpenTelemetry Collector. This is an example for OpenTelemetry Collector configuration otel-collector-config.yaml. Config SkyWalking OpenTelemetry receiver.  For Telegraf receiver:\n Setup InfluxDB Telegraf\u0026rsquo;s telegraf.conf file according to Telegraf office document. Setup InfluxDB Telegraf\u0026rsquo;s telegraf.conf file specific rules according to Telegraf receiver document. Config SkyWalking Telegraf receiver.  Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     CPU Usage % meter_vm_cpu_total_percentage The total percentage usage of the CPU core. If there are 2 cores, the maximum usage is 200%. Prometheus node-exporter\nTelegraf input plugin   Memory RAM Usage MB meter_vm_memory_used The total RAM usage Prometheus node-exporter\nTelegraf input plugin   Memory Swap Usage % meter_vm_memory_swap_percentage The percentage usage of swap memory Prometheus node-exporter\nTelegraf input plugin   CPU Average Used % meter_vm_cpu_average_used The percentage usage of the CPU core in each mode Prometheus node-exporter\nTelegraf input plugin   CPU Load  meter_vm_cpu_load1\nmeter_vm_cpu_load5\nmeter_vm_cpu_load15 The CPU 1m / 5m / 15m average load Prometheus node-exporter\nTelegraf input plugin   Memory RAM MB meter_vm_memory_total\nmeter_vm_memory_available\nmeter_vm_memory_used\nmeter_vm_memory_buff_cache The RAM statistics, including Total / Available / Used / Buff-Cache Prometheus node-exporter\nTelegraf input plugin   Memory Swap MB meter_vm_memory_swap_free\nmeter_vm_memory_swap_total Swap memory statistics, including Free / Total Prometheus node-exporter\nTelegraf input plugin   File System Mountpoint Usage % meter_vm_filesystem_percentage The percentage usage of the file system at each mount point Prometheus node-exporter\nTelegraf input plugin   Disk R/W KB/s meter_vm_disk_read\nmeter_vm_disk_written The disk read and written Prometheus node-exporter\nTelegraf input plugin   Network Bandwidth Usage KB/s meter_vm_network_receive\nmeter_vm_network_transmit The network receive and transmit Prometheus node-exporter\nTelegraf input plugin   Network Status  meter_vm_tcp_curr_estab\nmeter_vm_tcp_tw\nmeter_vm_tcp_alloc\nmeter_vm_sockets_used\nmeter_vm_udp_inuse The number of TCPs established / TCP time wait / TCPs allocated / sockets in use / UDPs in use Prometheus node-exporter\nTelegraf input plugin   Filefd Allocated  meter_vm_filefd_allocated The number of file descriptors allocated Prometheus node-exporter    Customizing You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/vm.yaml and /config/telegraf-rules/vm.yaml. The dashboard panel confirmations are found in /config/ui-initialized-templates/os_linux.\nBlog For more details, see the blog article SkyWalking 8.4 provides infrastructure monitoring.\n","excerpt":"Linux Monitoring SkyWalking leverages Prometheus node-exporter to collect metrics data from the VMs …","ref":"/docs/main/latest/en/setup/backend/backend-vm-monitoring/","title":"Linux Monitoring"},{"body":"Linux Monitoring SkyWalking leverages Prometheus node-exporter to collect metrics data from the VMs and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. VM entity as a Service in OAP and on the Layer: OS_LINUX.\nSkyWalking also provides InfluxDB Telegraf to receive VMs' metrics data by Telegraf receiver. The telegraf receiver plugin receiver, process and convert the metrics, then it send converted metrics to Meter System. VM entity as a Service in OAP and on the Layer: OS_LINUX.\nData flow For OpenTelemetry receiver:\n The Prometheus node-exporter collects metrics data from the VMs. The OpenTelemetry Collector fetches metrics from node-exporter via Prometheus Receiver and pushes metrics to the SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  For Telegraf receiver:\n The InfluxDB Telegraf input plugins collects various metrics data from the VMs. The cpu, mem, system, disk and diskio input plugins should be set in telegraf.conf file. The InfluxDB Telegraf send JSON format metrics by HTTP messages to Telegraf Receiver, then pushes converted metrics to the SkyWalking OAP Server Meter System. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate ad store the results. The meter_vm_cpu_average_used metrics indicates the average usage of each CPU core for telegraf receiver.  Setup For OpenTelemetry receiver:\n Setup Prometheus node-exporter. Setup OpenTelemetry Collector. This is an example for OpenTelemetry Collector configuration otel-collector-config.yaml. Config SkyWalking OpenTelemetry receiver.  For Telegraf receiver:\n Setup InfluxDB Telegraf\u0026rsquo;s telegraf.conf file according to Telegraf office document. Setup InfluxDB Telegraf\u0026rsquo;s telegraf.conf file specific rules according to Telegraf receiver document. Config SkyWalking Telegraf receiver.  Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     CPU Usage % meter_vm_cpu_total_percentage The total percentage usage of the CPU core. If there are 2 cores, the maximum usage is 200%. Prometheus node-exporter\nTelegraf input plugin   Memory RAM Usage MB meter_vm_memory_used The total RAM usage Prometheus node-exporter\nTelegraf input plugin   Memory Swap Usage % meter_vm_memory_swap_percentage The percentage usage of swap memory Prometheus node-exporter\nTelegraf input plugin   CPU Average Used % meter_vm_cpu_average_used The percentage usage of the CPU core in each mode Prometheus node-exporter\nTelegraf input plugin   CPU Load  meter_vm_cpu_load1\nmeter_vm_cpu_load5\nmeter_vm_cpu_load15 The CPU 1m / 5m / 15m average load Prometheus node-exporter\nTelegraf input plugin   Memory RAM MB meter_vm_memory_total\nmeter_vm_memory_available\nmeter_vm_memory_used\nmeter_vm_memory_buff_cache The RAM statistics, including Total / Available / Used / Buff-Cache Prometheus node-exporter\nTelegraf input plugin   Memory Swap MB meter_vm_memory_swap_free\nmeter_vm_memory_swap_total Swap memory statistics, including Free / Total Prometheus node-exporter\nTelegraf input plugin   File System Mountpoint Usage % meter_vm_filesystem_percentage The percentage usage of the file system at each mount point Prometheus node-exporter\nTelegraf input plugin   Disk R/W KB/s meter_vm_disk_read\nmeter_vm_disk_written The disk read and written Prometheus node-exporter\nTelegraf input plugin   Network Bandwidth Usage KB/s meter_vm_network_receive\nmeter_vm_network_transmit The network receive and transmit Prometheus node-exporter\nTelegraf input plugin   Network Status  meter_vm_tcp_curr_estab\nmeter_vm_tcp_tw\nmeter_vm_tcp_alloc\nmeter_vm_sockets_used\nmeter_vm_udp_inuse The number of TCPs established / TCP time wait / TCPs allocated / sockets in use / UDPs in use Prometheus node-exporter\nTelegraf input plugin   Filefd Allocated  meter_vm_filefd_allocated The number of file descriptors allocated Prometheus node-exporter    Customizing You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/vm.yaml and /config/telegraf-rules/vm.yaml. The dashboard panel confirmations are found in /config/ui-initialized-templates/os_linux.\nBlog For more details, see the blog article SkyWalking 8.4 provides infrastructure monitoring.\n","excerpt":"Linux Monitoring SkyWalking leverages Prometheus node-exporter to collect metrics data from the VMs …","ref":"/docs/main/next/en/setup/backend/backend-vm-monitoring/","title":"Linux Monitoring"},{"body":"Linux Monitoring SkyWalking leverages Prometheus node-exporter to collect metrics data from the VMs, and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nVM entity as a Service in OAP, and on the Layer: OS_LINUX.\nData flow  The Prometheus node-exporter collects metrics data from the VMs. The OpenTelemetry Collector fetches metrics from node-exporter via Prometheus Receiver and pushes metrics to the SkyWalking OAP Server via the OpenCensus gRPC Exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup Prometheus node-exporter. Setup OpenTelemetry Collector . This is an example for OpenTelemetry Collector configuration otel-collector-config.yaml. Config SkyWalking OpenTelemetry receiver.  Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     CPU Usage % cpu_total_percentage The total percentage usage of the CPU core. If there are 2 cores, the maximum usage is 200%. Prometheus node-exporter   Memory RAM Usage MB meter_vm_memory_used The total RAM usage Prometheus node-exporter   Memory Swap Usage % meter_vm_memory_swap_percentage The percentage usage of swap memory Prometheus node-exporter   CPU Average Used % meter_vm_cpu_average_used The percentage usage of the CPU core in each mode Prometheus node-exporter   CPU Load  meter_vm_cpu_load1\nmeter_vm_cpu_load5\nmeter_vm_cpu_load15 The CPU 1m / 5m / 15m average load Prometheus node-exporter   Memory RAM MB meter_vm_memory_total\nmeter_vm_memory_available\nmeter_vm_memory_used The RAM statistics, including Total / Available / Used Prometheus node-exporter   Memory Swap MB meter_vm_memory_swap_free\nmeter_vm_memory_swap_total Swap memory statistics, including Free / Total Prometheus node-exporter   File System Mountpoint Usage % meter_vm_filesystem_percentage The percentage usage of the file system at each mount point Prometheus node-exporter   Disk R/W KB/s meter_vm_disk_read,meter_vm_disk_written The disk read and written Prometheus node-exporter   Network Bandwidth Usage KB/s meter_vm_network_receive\nmeter_vm_network_transmit The network receive and transmit Prometheus node-exporter   Network Status  meter_vm_tcp_curr_estab\nmeter_vm_tcp_tw\nmeter_vm_tcp_alloc\nmeter_vm_sockets_used\nmeter_vm_udp_inuse The number of TCPs established / TCP time wait / TCPs allocated / sockets in use / UDPs in use Prometheus node-exporter   Filefd Allocated  meter_vm_filefd_allocated The number of file descriptors allocated Prometheus node-exporter    Customizing You can customize your own metrics/expression/dashboard panel.\nThe metrics definition and expression rules are found in /config/otel-oc-rules/vm.yaml.\nThe dashboard panel confirmations are found in /config/ui-initialized-templates/os_linux.\nBlog For more details, see blog article SkyWalking 8.4 provides infrastructure monitoring.\n","excerpt":"Linux Monitoring SkyWalking leverages Prometheus node-exporter to collect metrics data from the VMs, …","ref":"/docs/main/v9.0.0/en/setup/backend/backend-vm-monitoring/","title":"Linux Monitoring"},{"body":"Linux Monitoring SkyWalking leverages Prometheus node-exporter to collect metrics data from the VMs and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nVM entity as a Service in OAP and on the Layer: OS_LINUX.\nData flow  The Prometheus node-exporter collects metrics data from the VMs. The OpenTelemetry Collector fetches metrics from node-exporter via Prometheus Receiver and pushes metrics to the SkyWalking OAP Server via the OpenCensus gRPC Exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup Prometheus node-exporter. Setup OpenTelemetry Collector . This is an example for OpenTelemetry Collector configuration otel-collector-config.yaml. Config SkyWalking OpenTelemetry receiver.  Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     CPU Usage % cpu_total_percentage The total percentage usage of the CPU core. If there are 2 cores, the maximum usage is 200%. Prometheus node-exporter   Memory RAM Usage MB meter_vm_memory_used The total RAM usage Prometheus node-exporter   Memory Swap Usage % meter_vm_memory_swap_percentage The percentage usage of swap memory Prometheus node-exporter   CPU Average Used % meter_vm_cpu_average_used The percentage usage of the CPU core in each mode Prometheus node-exporter   CPU Load  meter_vm_cpu_load1\nmeter_vm_cpu_load5\nmeter_vm_cpu_load15 The CPU 1m / 5m / 15m average load Prometheus node-exporter   Memory RAM MB meter_vm_memory_total\nmeter_vm_memory_available\nmeter_vm_memory_used The RAM statistics, including Total / Available / Used Prometheus node-exporter   Memory Swap MB meter_vm_memory_swap_free\nmeter_vm_memory_swap_total Swap memory statistics, including Free / Total Prometheus node-exporter   File System Mountpoint Usage % meter_vm_filesystem_percentage The percentage usage of the file system at each mount point Prometheus node-exporter   Disk R/W KB/s meter_vm_disk_read,meter_vm_disk_written The disk read and written Prometheus node-exporter   Network Bandwidth Usage KB/s meter_vm_network_receive\nmeter_vm_network_transmit The network receive and transmit Prometheus node-exporter   Network Status  meter_vm_tcp_curr_estab\nmeter_vm_tcp_tw\nmeter_vm_tcp_alloc\nmeter_vm_sockets_used\nmeter_vm_udp_inuse The number of TCPs established / TCP time wait / TCPs allocated / sockets in use / UDPs in use Prometheus node-exporter   Filefd Allocated  meter_vm_filefd_allocated The number of file descriptors allocated Prometheus node-exporter    Customizing You can customize your own metrics/expression/dashboard panel.\nThe metrics definition and expression rules are found in /config/otel-oc-rules/vm.yaml.\nThe dashboard panel confirmations are found in /config/ui-initialized-templates/os_linux.\nBlog For more details, see the blog article SkyWalking 8.4 provides infrastructure monitoring.\n","excerpt":"Linux Monitoring SkyWalking leverages Prometheus node-exporter to collect metrics data from the VMs …","ref":"/docs/main/v9.1.0/en/setup/backend/backend-vm-monitoring/","title":"Linux Monitoring"},{"body":"Linux Monitoring SkyWalking leverages Prometheus node-exporter to collect metrics data from the VMs and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. VM entity as a Service in OAP and on the Layer: OS_LINUX.\nData flow  The Prometheus node-exporter collects metrics data from the VMs. The OpenTelemetry Collector fetches metrics from node-exporter via Prometheus Receiver and pushes metrics to the SkyWalking OAP Server via the OpenCensus gRPC Exporter or OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup Prometheus node-exporter. Setup OpenTelemetry Collector . This is an example for OpenTelemetry Collector configuration otel-collector-config.yaml. Config SkyWalking OpenTelemetry receiver.  Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     CPU Usage % cpu_total_percentage The total percentage usage of the CPU core. If there are 2 cores, the maximum usage is 200%. Prometheus node-exporter   Memory RAM Usage MB meter_vm_memory_used The total RAM usage Prometheus node-exporter   Memory Swap Usage % meter_vm_memory_swap_percentage The percentage usage of swap memory Prometheus node-exporter   CPU Average Used % meter_vm_cpu_average_used The percentage usage of the CPU core in each mode Prometheus node-exporter   CPU Load  meter_vm_cpu_load1\nmeter_vm_cpu_load5\nmeter_vm_cpu_load15 The CPU 1m / 5m / 15m average load Prometheus node-exporter   Memory RAM MB meter_vm_memory_total\nmeter_vm_memory_available\nmeter_vm_memory_used The RAM statistics, including Total / Available / Used Prometheus node-exporter   Memory Swap MB meter_vm_memory_swap_free\nmeter_vm_memory_swap_total Swap memory statistics, including Free / Total Prometheus node-exporter   File System Mountpoint Usage % meter_vm_filesystem_percentage The percentage usage of the file system at each mount point Prometheus node-exporter   Disk R/W KB/s meter_vm_disk_read,meter_vm_disk_written The disk read and written Prometheus node-exporter   Network Bandwidth Usage KB/s meter_vm_network_receive\nmeter_vm_network_transmit The network receive and transmit Prometheus node-exporter   Network Status  meter_vm_tcp_curr_estab\nmeter_vm_tcp_tw\nmeter_vm_tcp_alloc\nmeter_vm_sockets_used\nmeter_vm_udp_inuse The number of TCPs established / TCP time wait / TCPs allocated / sockets in use / UDPs in use Prometheus node-exporter   Filefd Allocated  meter_vm_filefd_allocated The number of file descriptors allocated Prometheus node-exporter    Customizing You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/vm.yaml. The dashboard panel confirmations are found in /config/ui-initialized-templates/os_linux.\nBlog For more details, see the blog article SkyWalking 8.4 provides infrastructure monitoring.\n","excerpt":"Linux Monitoring SkyWalking leverages Prometheus node-exporter to collect metrics data from the VMs …","ref":"/docs/main/v9.2.0/en/setup/backend/backend-vm-monitoring/","title":"Linux Monitoring"},{"body":"Linux Monitoring SkyWalking leverages Prometheus node-exporter to collect metrics data from the VMs and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. VM entity as a Service in OAP and on the Layer: OS_LINUX.\nSkyWalking also provides InfluxDB Telegraf to receive VMs' metrics data by Telegraf receiver. The telegraf receiver plugin receiver, process and convert the metrics, then it send converted metrics to Meter System. VM entity as a Service in OAP and on the Layer: OS_LINUX.\nData flow For OpenTelemetry receiver:\n The Prometheus node-exporter collects metrics data from the VMs. The OpenTelemetry Collector fetches metrics from node-exporter via Prometheus Receiver and pushes metrics to the SkyWalking OAP Server via the OpenCensus gRPC Exporter or OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  For Telegraf receiver:\n The InfluxDB Telegraf input plugins collects various metrics data from the VMs. The cpu, mem, system, disk and diskio input plugins should be set in telegraf.conf file. The InfluxDB Telegraf send JSON format metrics by HTTP messages to Telegraf Receiver, then pushes converted metrics to the SkyWalking OAP Server Meter System. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate ad store the results. The meter_vm_cpu_average_used metrics indicates the average usage of each CPU core for telegraf receiver.  Setup For OpenTelemetry receiver:\n Setup Prometheus node-exporter. Setup OpenTelemetry Collector . This is an example for OpenTelemetry Collector configuration otel-collector-config.yaml. Config SkyWalking OpenTelemetry receiver.  For Telegraf receiver:\n Setup InfluxDB Telegraf\u0026rsquo;s telegraf.conf file according to Telegraf office document. Setup InfluxDB Telegraf\u0026rsquo;s telegraf.conf file specific rules according to Telegraf receiver document. Config SkyWalking Telegraf receiver.  Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     CPU Usage % meter_vm_cpu_total_percentage The total percentage usage of the CPU core. If there are 2 cores, the maximum usage is 200%. Prometheus node-exporter\nTelegraf input plugin   Memory RAM Usage MB meter_vm_memory_used The total RAM usage Prometheus node-exporter\nTelegraf input plugin   Memory Swap Usage % meter_vm_memory_swap_percentage The percentage usage of swap memory Prometheus node-exporter\nTelegraf input plugin   CPU Average Used % meter_vm_cpu_average_used The percentage usage of the CPU core in each mode Prometheus node-exporter\nTelegraf input plugin   CPU Load  meter_vm_cpu_load1\nmeter_vm_cpu_load5\nmeter_vm_cpu_load15 The CPU 1m / 5m / 15m average load Prometheus node-exporter\nTelegraf input plugin   Memory RAM MB meter_vm_memory_total\nmeter_vm_memory_available\nmeter_vm_memory_used The RAM statistics, including Total / Available / Used Prometheus node-exporter\nTelegraf input plugin   Memory Swap MB meter_vm_memory_swap_free\nmeter_vm_memory_swap_total Swap memory statistics, including Free / Total Prometheus node-exporter\nTelegraf input plugin   File System Mountpoint Usage % meter_vm_filesystem_percentage The percentage usage of the file system at each mount point Prometheus node-exporter\nTelegraf input plugin   Disk R/W KB/s meter_vm_disk_read,meter_vm_disk_written The disk read and written Prometheus node-exporter\nTelegraf input plugin   Network Bandwidth Usage KB/s meter_vm_network_receive\nmeter_vm_network_transmit The network receive and transmit Prometheus node-exporter\nTelegraf input plugin   Network Status  meter_vm_tcp_curr_estab\nmeter_vm_tcp_tw\nmeter_vm_tcp_alloc\nmeter_vm_sockets_used\nmeter_vm_udp_inuse The number of TCPs established / TCP time wait / TCPs allocated / sockets in use / UDPs in use Prometheus node-exporter\nTelegraf input plugin   Filefd Allocated  meter_vm_filefd_allocated The number of file descriptors allocated Prometheus node-exporter    Customizing You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/vm.yaml and /config/telegraf-rules/vm.yaml. The dashboard panel confirmations are found in /config/ui-initialized-templates/os_linux.\nBlog For more details, see the blog article SkyWalking 8.4 provides infrastructure monitoring.\n","excerpt":"Linux Monitoring SkyWalking leverages Prometheus node-exporter to collect metrics data from the VMs …","ref":"/docs/main/v9.3.0/en/setup/backend/backend-vm-monitoring/","title":"Linux Monitoring"},{"body":"Linux Monitoring SkyWalking leverages Prometheus node-exporter to collect metrics data from the VMs and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. VM entity as a Service in OAP and on the Layer: OS_LINUX.\nSkyWalking also provides InfluxDB Telegraf to receive VMs' metrics data by Telegraf receiver. The telegraf receiver plugin receiver, process and convert the metrics, then it send converted metrics to Meter System. VM entity as a Service in OAP and on the Layer: OS_LINUX.\nData flow For OpenTelemetry receiver:\n The Prometheus node-exporter collects metrics data from the VMs. The OpenTelemetry Collector fetches metrics from node-exporter via Prometheus Receiver and pushes metrics to the SkyWalking OAP Server via the OpenCensus gRPC Exporter or OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  For Telegraf receiver:\n The InfluxDB Telegraf input plugins collects various metrics data from the VMs. The cpu, mem, system, disk and diskio input plugins should be set in telegraf.conf file. The InfluxDB Telegraf send JSON format metrics by HTTP messages to Telegraf Receiver, then pushes converted metrics to the SkyWalking OAP Server Meter System. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate ad store the results. The meter_vm_cpu_average_used metrics indicates the average usage of each CPU core for telegraf receiver.  Setup For OpenTelemetry receiver:\n Setup Prometheus node-exporter. Setup OpenTelemetry Collector . This is an example for OpenTelemetry Collector configuration otel-collector-config.yaml. Config SkyWalking OpenTelemetry receiver.  For Telegraf receiver:\n Setup InfluxDB Telegraf\u0026rsquo;s telegraf.conf file according to Telegraf office document. Setup InfluxDB Telegraf\u0026rsquo;s telegraf.conf file specific rules according to Telegraf receiver document. Config SkyWalking Telegraf receiver.  Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     CPU Usage % meter_vm_cpu_total_percentage The total percentage usage of the CPU core. If there are 2 cores, the maximum usage is 200%. Prometheus node-exporter\nTelegraf input plugin   Memory RAM Usage MB meter_vm_memory_used The total RAM usage Prometheus node-exporter\nTelegraf input plugin   Memory Swap Usage % meter_vm_memory_swap_percentage The percentage usage of swap memory Prometheus node-exporter\nTelegraf input plugin   CPU Average Used % meter_vm_cpu_average_used The percentage usage of the CPU core in each mode Prometheus node-exporter\nTelegraf input plugin   CPU Load  meter_vm_cpu_load1\nmeter_vm_cpu_load5\nmeter_vm_cpu_load15 The CPU 1m / 5m / 15m average load Prometheus node-exporter\nTelegraf input plugin   Memory RAM MB meter_vm_memory_total\nmeter_vm_memory_available\nmeter_vm_memory_used The RAM statistics, including Total / Available / Used Prometheus node-exporter\nTelegraf input plugin   Memory Swap MB meter_vm_memory_swap_free\nmeter_vm_memory_swap_total Swap memory statistics, including Free / Total Prometheus node-exporter\nTelegraf input plugin   File System Mountpoint Usage % meter_vm_filesystem_percentage The percentage usage of the file system at each mount point Prometheus node-exporter\nTelegraf input plugin   Disk R/W KB/s meter_vm_disk_read,meter_vm_disk_written The disk read and written Prometheus node-exporter\nTelegraf input plugin   Network Bandwidth Usage KB/s meter_vm_network_receive\nmeter_vm_network_transmit The network receive and transmit Prometheus node-exporter\nTelegraf input plugin   Network Status  meter_vm_tcp_curr_estab\nmeter_vm_tcp_tw\nmeter_vm_tcp_alloc\nmeter_vm_sockets_used\nmeter_vm_udp_inuse The number of TCPs established / TCP time wait / TCPs allocated / sockets in use / UDPs in use Prometheus node-exporter\nTelegraf input plugin   Filefd Allocated  meter_vm_filefd_allocated The number of file descriptors allocated Prometheus node-exporter    Customizing You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/vm.yaml and /config/telegraf-rules/vm.yaml. The dashboard panel confirmations are found in /config/ui-initialized-templates/os_linux.\nBlog For more details, see the blog article SkyWalking 8.4 provides infrastructure monitoring.\n","excerpt":"Linux Monitoring SkyWalking leverages Prometheus node-exporter to collect metrics data from the VMs …","ref":"/docs/main/v9.4.0/en/setup/backend/backend-vm-monitoring/","title":"Linux Monitoring"},{"body":"Linux Monitoring SkyWalking leverages Prometheus node-exporter to collect metrics data from the VMs and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. VM entity as a Service in OAP and on the Layer: OS_LINUX.\nSkyWalking also provides InfluxDB Telegraf to receive VMs' metrics data by Telegraf receiver. The telegraf receiver plugin receiver, process and convert the metrics, then it send converted metrics to Meter System. VM entity as a Service in OAP and on the Layer: OS_LINUX.\nData flow For OpenTelemetry receiver:\n The Prometheus node-exporter collects metrics data from the VMs. The OpenTelemetry Collector fetches metrics from node-exporter via Prometheus Receiver and pushes metrics to the SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  For Telegraf receiver:\n The InfluxDB Telegraf input plugins collects various metrics data from the VMs. The cpu, mem, system, disk and diskio input plugins should be set in telegraf.conf file. The InfluxDB Telegraf send JSON format metrics by HTTP messages to Telegraf Receiver, then pushes converted metrics to the SkyWalking OAP Server Meter System. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate ad store the results. The meter_vm_cpu_average_used metrics indicates the average usage of each CPU core for telegraf receiver.  Setup For OpenTelemetry receiver:\n Setup Prometheus node-exporter. Setup OpenTelemetry Collector. This is an example for OpenTelemetry Collector configuration otel-collector-config.yaml. Config SkyWalking OpenTelemetry receiver.  For Telegraf receiver:\n Setup InfluxDB Telegraf\u0026rsquo;s telegraf.conf file according to Telegraf office document. Setup InfluxDB Telegraf\u0026rsquo;s telegraf.conf file specific rules according to Telegraf receiver document. Config SkyWalking Telegraf receiver.  Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     CPU Usage % meter_vm_cpu_total_percentage The total percentage usage of the CPU core. If there are 2 cores, the maximum usage is 200%. Prometheus node-exporter\nTelegraf input plugin   Memory RAM Usage MB meter_vm_memory_used The total RAM usage Prometheus node-exporter\nTelegraf input plugin   Memory Swap Usage % meter_vm_memory_swap_percentage The percentage usage of swap memory Prometheus node-exporter\nTelegraf input plugin   CPU Average Used % meter_vm_cpu_average_used The percentage usage of the CPU core in each mode Prometheus node-exporter\nTelegraf input plugin   CPU Load  meter_vm_cpu_load1\nmeter_vm_cpu_load5\nmeter_vm_cpu_load15 The CPU 1m / 5m / 15m average load Prometheus node-exporter\nTelegraf input plugin   Memory RAM MB meter_vm_memory_total\nmeter_vm_memory_available\nmeter_vm_memory_used\nmeter_vm_memory_buff_cache The RAM statistics, including Total / Available / Used / Buff-Cache Prometheus node-exporter\nTelegraf input plugin   Memory Swap MB meter_vm_memory_swap_free\nmeter_vm_memory_swap_total Swap memory statistics, including Free / Total Prometheus node-exporter\nTelegraf input plugin   File System Mountpoint Usage % meter_vm_filesystem_percentage The percentage usage of the file system at each mount point Prometheus node-exporter\nTelegraf input plugin   Disk R/W KB/s meter_vm_disk_read\nmeter_vm_disk_written The disk read and written Prometheus node-exporter\nTelegraf input plugin   Network Bandwidth Usage KB/s meter_vm_network_receive\nmeter_vm_network_transmit The network receive and transmit Prometheus node-exporter\nTelegraf input plugin   Network Status  meter_vm_tcp_curr_estab\nmeter_vm_tcp_tw\nmeter_vm_tcp_alloc\nmeter_vm_sockets_used\nmeter_vm_udp_inuse The number of TCPs established / TCP time wait / TCPs allocated / sockets in use / UDPs in use Prometheus node-exporter\nTelegraf input plugin   Filefd Allocated  meter_vm_filefd_allocated The number of file descriptors allocated Prometheus node-exporter    Customizing You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/vm.yaml and /config/telegraf-rules/vm.yaml. The dashboard panel confirmations are found in /config/ui-initialized-templates/os_linux.\nBlog For more details, see the blog article SkyWalking 8.4 provides infrastructure monitoring.\n","excerpt":"Linux Monitoring SkyWalking leverages Prometheus node-exporter to collect metrics data from the VMs …","ref":"/docs/main/v9.5.0/en/setup/backend/backend-vm-monitoring/","title":"Linux Monitoring"},{"body":"Linux Monitoring SkyWalking leverages Prometheus node-exporter to collect metrics data from the VMs and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. VM entity as a Service in OAP and on the Layer: OS_LINUX.\nSkyWalking also provides InfluxDB Telegraf to receive VMs' metrics data by Telegraf receiver. The telegraf receiver plugin receiver, process and convert the metrics, then it send converted metrics to Meter System. VM entity as a Service in OAP and on the Layer: OS_LINUX.\nData flow For OpenTelemetry receiver:\n The Prometheus node-exporter collects metrics data from the VMs. The OpenTelemetry Collector fetches metrics from node-exporter via Prometheus Receiver and pushes metrics to the SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  For Telegraf receiver:\n The InfluxDB Telegraf input plugins collects various metrics data from the VMs. The cpu, mem, system, disk and diskio input plugins should be set in telegraf.conf file. The InfluxDB Telegraf send JSON format metrics by HTTP messages to Telegraf Receiver, then pushes converted metrics to the SkyWalking OAP Server Meter System. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate ad store the results. The meter_vm_cpu_average_used metrics indicates the average usage of each CPU core for telegraf receiver.  Setup For OpenTelemetry receiver:\n Setup Prometheus node-exporter. Setup OpenTelemetry Collector. This is an example for OpenTelemetry Collector configuration otel-collector-config.yaml. Config SkyWalking OpenTelemetry receiver.  For Telegraf receiver:\n Setup InfluxDB Telegraf\u0026rsquo;s telegraf.conf file according to Telegraf office document. Setup InfluxDB Telegraf\u0026rsquo;s telegraf.conf file specific rules according to Telegraf receiver document. Config SkyWalking Telegraf receiver.  Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     CPU Usage % meter_vm_cpu_total_percentage The total percentage usage of the CPU core. If there are 2 cores, the maximum usage is 200%. Prometheus node-exporter\nTelegraf input plugin   Memory RAM Usage MB meter_vm_memory_used The total RAM usage Prometheus node-exporter\nTelegraf input plugin   Memory Swap Usage % meter_vm_memory_swap_percentage The percentage usage of swap memory Prometheus node-exporter\nTelegraf input plugin   CPU Average Used % meter_vm_cpu_average_used The percentage usage of the CPU core in each mode Prometheus node-exporter\nTelegraf input plugin   CPU Load  meter_vm_cpu_load1\nmeter_vm_cpu_load5\nmeter_vm_cpu_load15 The CPU 1m / 5m / 15m average load Prometheus node-exporter\nTelegraf input plugin   Memory RAM MB meter_vm_memory_total\nmeter_vm_memory_available\nmeter_vm_memory_used\nmeter_vm_memory_buff_cache The RAM statistics, including Total / Available / Used / Buff-Cache Prometheus node-exporter\nTelegraf input plugin   Memory Swap MB meter_vm_memory_swap_free\nmeter_vm_memory_swap_total Swap memory statistics, including Free / Total Prometheus node-exporter\nTelegraf input plugin   File System Mountpoint Usage % meter_vm_filesystem_percentage The percentage usage of the file system at each mount point Prometheus node-exporter\nTelegraf input plugin   Disk R/W KB/s meter_vm_disk_read\nmeter_vm_disk_written The disk read and written Prometheus node-exporter\nTelegraf input plugin   Network Bandwidth Usage KB/s meter_vm_network_receive\nmeter_vm_network_transmit The network receive and transmit Prometheus node-exporter\nTelegraf input plugin   Network Status  meter_vm_tcp_curr_estab\nmeter_vm_tcp_tw\nmeter_vm_tcp_alloc\nmeter_vm_sockets_used\nmeter_vm_udp_inuse The number of TCPs established / TCP time wait / TCPs allocated / sockets in use / UDPs in use Prometheus node-exporter\nTelegraf input plugin   Filefd Allocated  meter_vm_filefd_allocated The number of file descriptors allocated Prometheus node-exporter    Customizing You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/vm.yaml and /config/telegraf-rules/vm.yaml. The dashboard panel confirmations are found in /config/ui-initialized-templates/os_linux.\nBlog For more details, see the blog article SkyWalking 8.4 provides infrastructure monitoring.\n","excerpt":"Linux Monitoring SkyWalking leverages Prometheus node-exporter to collect metrics data from the VMs …","ref":"/docs/main/v9.6.0/en/setup/backend/backend-vm-monitoring/","title":"Linux Monitoring"},{"body":"Linux Monitoring SkyWalking leverages Prometheus node-exporter to collect metrics data from the VMs and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. VM entity as a Service in OAP and on the Layer: OS_LINUX.\nSkyWalking also provides InfluxDB Telegraf to receive VMs' metrics data by Telegraf receiver. The telegraf receiver plugin receiver, process and convert the metrics, then it send converted metrics to Meter System. VM entity as a Service in OAP and on the Layer: OS_LINUX.\nData flow For OpenTelemetry receiver:\n The Prometheus node-exporter collects metrics data from the VMs. The OpenTelemetry Collector fetches metrics from node-exporter via Prometheus Receiver and pushes metrics to the SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  For Telegraf receiver:\n The InfluxDB Telegraf input plugins collects various metrics data from the VMs. The cpu, mem, system, disk and diskio input plugins should be set in telegraf.conf file. The InfluxDB Telegraf send JSON format metrics by HTTP messages to Telegraf Receiver, then pushes converted metrics to the SkyWalking OAP Server Meter System. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate ad store the results. The meter_vm_cpu_average_used metrics indicates the average usage of each CPU core for telegraf receiver.  Setup For OpenTelemetry receiver:\n Setup Prometheus node-exporter. Setup OpenTelemetry Collector. This is an example for OpenTelemetry Collector configuration otel-collector-config.yaml. Config SkyWalking OpenTelemetry receiver.  For Telegraf receiver:\n Setup InfluxDB Telegraf\u0026rsquo;s telegraf.conf file according to Telegraf office document. Setup InfluxDB Telegraf\u0026rsquo;s telegraf.conf file specific rules according to Telegraf receiver document. Config SkyWalking Telegraf receiver.  Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     CPU Usage % meter_vm_cpu_total_percentage The total percentage usage of the CPU core. If there are 2 cores, the maximum usage is 200%. Prometheus node-exporter\nTelegraf input plugin   Memory RAM Usage MB meter_vm_memory_used The total RAM usage Prometheus node-exporter\nTelegraf input plugin   Memory Swap Usage % meter_vm_memory_swap_percentage The percentage usage of swap memory Prometheus node-exporter\nTelegraf input plugin   CPU Average Used % meter_vm_cpu_average_used The percentage usage of the CPU core in each mode Prometheus node-exporter\nTelegraf input plugin   CPU Load  meter_vm_cpu_load1\nmeter_vm_cpu_load5\nmeter_vm_cpu_load15 The CPU 1m / 5m / 15m average load Prometheus node-exporter\nTelegraf input plugin   Memory RAM MB meter_vm_memory_total\nmeter_vm_memory_available\nmeter_vm_memory_used\nmeter_vm_memory_buff_cache The RAM statistics, including Total / Available / Used / Buff-Cache Prometheus node-exporter\nTelegraf input plugin   Memory Swap MB meter_vm_memory_swap_free\nmeter_vm_memory_swap_total Swap memory statistics, including Free / Total Prometheus node-exporter\nTelegraf input plugin   File System Mountpoint Usage % meter_vm_filesystem_percentage The percentage usage of the file system at each mount point Prometheus node-exporter\nTelegraf input plugin   Disk R/W KB/s meter_vm_disk_read\nmeter_vm_disk_written The disk read and written Prometheus node-exporter\nTelegraf input plugin   Network Bandwidth Usage KB/s meter_vm_network_receive\nmeter_vm_network_transmit The network receive and transmit Prometheus node-exporter\nTelegraf input plugin   Network Status  meter_vm_tcp_curr_estab\nmeter_vm_tcp_tw\nmeter_vm_tcp_alloc\nmeter_vm_sockets_used\nmeter_vm_udp_inuse The number of TCPs established / TCP time wait / TCPs allocated / sockets in use / UDPs in use Prometheus node-exporter\nTelegraf input plugin   Filefd Allocated  meter_vm_filefd_allocated The number of file descriptors allocated Prometheus node-exporter    Customizing You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/vm.yaml and /config/telegraf-rules/vm.yaml. The dashboard panel confirmations are found in /config/ui-initialized-templates/os_linux.\nBlog For more details, see the blog article SkyWalking 8.4 provides infrastructure monitoring.\n","excerpt":"Linux Monitoring SkyWalking leverages Prometheus node-exporter to collect metrics data from the VMs …","ref":"/docs/main/v9.7.0/en/setup/backend/backend-vm-monitoring/","title":"Linux Monitoring"},{"body":"Locate agent config file by system property Supported version 5.0.0-RC+\nWhat is Locate agent config file by system property ? In Default. The agent will try to locate agent.config, which should be in the /config dictionary of agent package. If User sets the specified agent config file through system properties, The agent will try to load file from there. By the way, This function has no conflict with Setting Override\nOverride priority The specified agent config \u0026gt; The default agent config\nHow to use The content formats of the specified config must be same as the default config.\nUsing System.Properties(-D) to set the specified config path\n-Dskywalking_config=/path/to/agent.config /path/to/agent.config is the absolute path of the specified config file\n","excerpt":"Locate agent config file by system property Supported version 5.0.0-RC+\nWhat is Locate agent config …","ref":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/specified-agent-config/","title":"Locate agent config file by system property"},{"body":"Locate agent config file by system property Supported version 5.0.0-RC+\nWhat is Locate agent config file by system property ? In Default. The agent will try to locate agent.config, which should be in the /config dictionary of agent package. If User sets the specified agent config file through system properties, The agent will try to load file from there. By the way, This function has no conflict with Setting Override\nOverride priority The specified agent config \u0026gt; The default agent config\nHow to use The content formats of the specified config must be same as the default config.\nUsing System.Properties(-D) to set the specified config path\n-Dskywalking_config=/path/to/agent.config /path/to/agent.config is the absolute path of the specified config file\n","excerpt":"Locate agent config file by system property Supported version 5.0.0-RC+\nWhat is Locate agent config …","ref":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/specified-agent-config/","title":"Locate agent config file by system property"},{"body":"Locate agent config file by system property Supported version 5.0.0-RC+\nWhat is Locate agent config file by system property ? In Default. The agent will try to locate agent.config, which should be in the /config dictionary of agent package. If User sets the specified agent config file through system properties, The agent will try to load file from there. By the way, This function has no conflict with Setting Override\nOverride priority The specified agent config \u0026gt; The default agent config\nHow to use The content formats of the specified config must be same as the default config.\nUsing System.Properties(-D) to set the specified config path\n-Dskywalking_config=/path/to/agent.config /path/to/agent.config is the absolute path of the specified config file\n","excerpt":"Locate agent config file by system property Supported version 5.0.0-RC+\nWhat is Locate agent config …","ref":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/specified-agent-config/","title":"Locate agent config file by system property"},{"body":"Locate agent config file by system property Supported version 5.0.0-RC+\nWhat is Locate agent config file by system property ? In Default. The agent will try to locate agent.config, which should be in the /config dictionary of agent package. If User sets the specified agent config file through system properties, The agent will try to load file from there. By the way, This function has no conflict with Setting Override\nOverride priority The specified agent config \u0026gt; The default agent config\nHow to use The content formats of the specified config must be same as the default config.\nUsing System.Properties(-D) to set the specified config path\n-Dskywalking_config=/path/to/agent.config /path/to/agent.config is the absolute path of the specified config file\n","excerpt":"Locate agent config file by system property Supported version 5.0.0-RC+\nWhat is Locate agent config …","ref":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/specified-agent-config/","title":"Locate agent config file by system property"},{"body":"Locate agent config file by system property Supported version 5.0.0-RC+\nWhat is Locate agent config file by system property ? In Default. The agent will try to locate agent.config, which should be in the /config dictionary of agent package. If User sets the specified agent config file through system properties, The agent will try to load file from there. By the way, This function has no conflict with Setting Override\nOverride priority The specified agent config \u0026gt; The default agent config\nHow to use The content formats of the specified config must be same as the default config.\nUsing System.Properties(-D) to set the specified config path\n-Dskywalking_config=/path/to/agent.config /path/to/agent.config is the absolute path of the specified config file\n","excerpt":"Locate agent config file by system property Supported version 5.0.0-RC+\nWhat is Locate agent config …","ref":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/specified-agent-config/","title":"Locate agent config file by system property"},{"body":"Log Analysis Log analyzer of OAP server supports native log data. OAP could use Log Analysis Language to structure log content through parsing, extracting and saving logs. The analyzer also uses Meter Analysis Language Engine for further metrics calculation.\nlog-analyzer:selector:${SW_LOG_ANALYZER:default}default:lalFiles:${SW_LOG_LAL_FILES:default}malFiles:${SW_LOG_MAL_FILES:\u0026#34;\u0026#34;}Read the doc on Log Analysis Language(LAL) for more on log structuring and metrics analysis. The LAL\u0026rsquo;s metrics extracts provide the capabilities to generate new metrics from the raw log text for further calculation.\n","excerpt":"Log Analysis Log analyzer of OAP server supports native log data. OAP could use Log Analysis …","ref":"/docs/main/latest/en/setup/backend/log-analyzer/","title":"Log Analysis"},{"body":"Log Analysis Log analyzer of OAP server supports native log data. OAP could use Log Analysis Language to structure log content through parsing, extracting and saving logs. The analyzer also uses Meter Analysis Language Engine for further metrics calculation.\nlog-analyzer:selector:${SW_LOG_ANALYZER:default}default:lalFiles:${SW_LOG_LAL_FILES:default}malFiles:${SW_LOG_MAL_FILES:\u0026#34;\u0026#34;}Read the doc on Log Analysis Language(LAL) for more on log structuring and metrics analysis. The LAL\u0026rsquo;s metrics extracts provide the capabilities to generate new metrics from the raw log text for further calculation.\n","excerpt":"Log Analysis Log analyzer of OAP server supports native log data. OAP could use Log Analysis …","ref":"/docs/main/next/en/setup/backend/log-analyzer/","title":"Log Analysis"},{"body":"Log Analysis Log analyzer of OAP server supports native log data. OAP could use Log Analysis Language to structure log content through parsing, extracting and saving logs. The analyzer also uses Meter Analysis Language Engine for further metrics calculation.\nlog-analyzer:selector:${SW_LOG_ANALYZER:default}default:lalFiles:${SW_LOG_LAL_FILES:default}malFiles:${SW_LOG_MAL_FILES:\u0026#34;\u0026#34;}Read the doc on Log Analysis Language(LAL) for more on log structuring and metrics analysis. The LAL\u0026rsquo;s metrics extracts provide the capabilities to generate new metrics from the raw log text for further calculation.\n","excerpt":"Log Analysis Log analyzer of OAP server supports native log data. OAP could use Log Analysis …","ref":"/docs/main/v9.5.0/en/setup/backend/log-analyzer/","title":"Log Analysis"},{"body":"Log Analysis Log analyzer of OAP server supports native log data. OAP could use Log Analysis Language to structure log content through parsing, extracting and saving logs. The analyzer also uses Meter Analysis Language Engine for further metrics calculation.\nlog-analyzer:selector:${SW_LOG_ANALYZER:default}default:lalFiles:${SW_LOG_LAL_FILES:default}malFiles:${SW_LOG_MAL_FILES:\u0026#34;\u0026#34;}Read the doc on Log Analysis Language(LAL) for more on log structuring and metrics analysis. The LAL\u0026rsquo;s metrics extracts provide the capabilities to generate new metrics from the raw log text for further calculation.\n","excerpt":"Log Analysis Log analyzer of OAP server supports native log data. OAP could use Log Analysis …","ref":"/docs/main/v9.6.0/en/setup/backend/log-analyzer/","title":"Log Analysis"},{"body":"Log Analysis Log analyzer of OAP server supports native log data. OAP could use Log Analysis Language to structure log content through parsing, extracting and saving logs. The analyzer also uses Meter Analysis Language Engine for further metrics calculation.\nlog-analyzer:selector:${SW_LOG_ANALYZER:default}default:lalFiles:${SW_LOG_LAL_FILES:default}malFiles:${SW_LOG_MAL_FILES:\u0026#34;\u0026#34;}Read the doc on Log Analysis Language(LAL) for more on log structuring and metrics analysis. The LAL\u0026rsquo;s metrics extracts provide the capabilities to generate new metrics from the raw log text for further calculation.\n","excerpt":"Log Analysis Log analyzer of OAP server supports native log data. OAP could use Log Analysis …","ref":"/docs/main/v9.7.0/en/setup/backend/log-analyzer/","title":"Log Analysis"},{"body":"Log Analysis Language Log Analysis Language (LAL) in SkyWalking is essentially a Domain-Specific Language (DSL) to analyze logs. You can use LAL to parse, extract, and save the logs, as well as collaborate the logs with traces (by extracting the trace ID, segment ID and span ID) and metrics (by generating metrics from the logs and sending them to the meter system).\nThe LAL config files are in YAML format, and are located under directory lal. You can set log-analyzer/default/lalFiles in the application.yml file or set environment variable SW_LOG_LAL_FILES to activate specific LAL config files.\nLayer Layer should be declared in the LAL script to represent the analysis scope of the logs.\nFilter A filter is a group of parser, extractor and sink. Users can use one or more filters to organize their processing logic. Every piece of log will be sent to all filters in an LAL rule. A piece of log sent to the filter is available as property log in the LAL, therefore you can access the log service name via log.service. For all available fields of log, please refer to the protocol definition.\nAll components are executed sequentially in the orders they are declared.\nGlobal Functions Globally available functions may be used them in all components (i.e. parsers, extractors, and sinks) where necessary.\n abort  By default, all components declared are executed no matter what flags (dropped, saved, etc.) have been set. There are cases where you may want the filter chain to stop earlier when specified conditions are met. abort function aborts the remaining filter chain from where it\u0026rsquo;s declared, and all the remaining components won\u0026rsquo;t be executed at all. abort function serves as a fast-fail mechanism in LAL.\nfilter { if (log.service == \u0026#34;TestingService\u0026#34;) { // Don\u0026#39;t waste resources on TestingServices  abort {} // all remaining components won\u0026#39;t be executed at all  } // ... parsers, extractors, sinks } Note that when you put regexp in an if statement, you need to surround the expression with () like regexp(\u0026lt;the expression\u0026gt;), instead of regexp \u0026lt;the expression\u0026gt;.\n tag  tag function provide a convenient way to get the value of a tag key.\nWe can add tags like following:\n[ { \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;TEST_KEY\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;TEST_VALUE\u0026#34; } ] }, \u0026#34;body\u0026#34;:{ ... } ... } ] And we can use this method to get the value of the tag key TEST_KEY.\nfilter { if (tag(\u0026#34;TEST_KEY\u0026#34;) == \u0026#34;TEST_VALUE\u0026#34;) { ... } } Parser Parsers are responsible for parsing the raw logs into structured data in SkyWalking for further processing. There are 3 types of parsers at the moment, namely json, yaml, and text.\nWhen a piece of log is parsed, there is a corresponding property available, called parsed, injected by LAL. Property parsed is typically a map, containing all the fields parsed from the raw logs. For example, if the parser is json / yaml, parsed is a map containing all the key-values in the json / yaml; if the parser is text , parsed is a map containing all the captured groups and their values (for regexp and grok).\nAll parsers share the following options:\n   Option Type Description Default Value     abortOnFailure boolean Whether the filter chain should abort if the parser failed to parse / match the logs true    See examples below.\njson filter { json { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  } } yaml filter { yaml { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  } } text For unstructured logs, there are some text parsers for use.\n regexp  regexp parser uses a regular expression (regexp) to parse the logs. It leverages the captured groups of the regexp, all the captured groups can be used later in the extractors or sinks. regexp returns a boolean indicating whether the log matches the pattern or not.\nfilter { text { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  // this is just a demo pattern  regexp \u0026#34;(?\u0026lt;timestamp\u0026gt;\\\\d{8}) (?\u0026lt;thread\u0026gt;\\\\w+) (?\u0026lt;level\u0026gt;\\\\w+) (?\u0026lt;traceId\u0026gt;\\\\w+) (?\u0026lt;msg\u0026gt;.+)\u0026#34; } extractor { tag level: parsed.level // we add a tag called `level` and its value is parsed.level, captured from the regexp above  traceId parsed.traceId // we also extract the trace id from the parsed result, which will be used to associate the log with the trace  } // ... }  grok (TODO)  We\u0026rsquo;re aware of certain performance issues in the grok Java library, and so we\u0026rsquo;re currently conducting investigations and benchmarking. Contributions are welcome.\nExtractor Extractors aim to extract metadata from the logs. The metadata can be a service name, a service instance name, an endpoint name, or even a trace ID, all of which can be associated with the existing traces and metrics.\n service  service extracts the service name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n instance  instance extracts the service instance name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n endpoint  endpoint extracts the service instance name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n traceId  traceId extracts the trace ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n segmentId  segmentId extracts the segment ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n spanId  spanId extracts the span ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n timestamp  timestamp extracts the timestamp from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\nThe parameter of timestamp can be a millisecond:\nfilter { // ... parser  extractor { timestamp parsed.time as String } } or a datetime string with a specified pattern:\nfilter { // ... parser  extractor { timestamp parsed.time as String, \u0026#34;yyyy-MM-dd HH:mm:ss\u0026#34; } }  layer  layer extracts the layer from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with service.\n tag  tag extracts the tags from the parsed result, and set them into the LogData. The form of this extractor should look something like this: tag key1: value, key2: value2. You may use the properties of parsed as both keys and values.\nimport javax.swing.text.LayeredHighlighter filter { // ... parser  extractor { tag level: parsed.level, (parsed.statusCode): parsed.statusMsg tag anotherKey: \u0026#34;anotherConstantValue\u0026#34; layer \u0026#39;GENERAL\u0026#39; } }  metrics  metrics extracts / generates metrics from the logs, and sends the generated metrics to the meter system. You may configure MAL for further analysis of these metrics. The dedicated MAL config files are under directory log-mal-rules, and you can set log-analyzer/default/malFiles to enable configured files.\n# application.yml# ...log-analyzer:selector:${SW_LOG_ANALYZER:default}default:lalFiles:${SW_LOG_LAL_FILES:my-lal-config}# files are under \u0026#34;lal\u0026#34; directorymalFiles:${SW_LOG_MAL_FILES:my-lal-mal-config, folder1/another-lal-mal-config, folder2/*}# files are under \u0026#34;log-mal-rules\u0026#34; directoryExamples are as follows:\nfilter { // ...  extractor { service parsed.serviceName metrics { name \u0026#34;log_count\u0026#34; timestamp parsed.timestamp labels level: parsed.level, service: parsed.service, instance: parsed.instance value 1 } metrics { name \u0026#34;http_response_time\u0026#34; timestamp parsed.timestamp labels status_code: parsed.statusCode, service: parsed.service, instance: parsed.instance value parsed.duration } } // ... } The extractor above generates a metrics named log_count, with tag key level and value 1. After that, you can configure MAL rules to calculate the log count grouping by logging level like this:\n# ... other configurations of MALmetrics:- name:log_count_debugexp:log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;DEBUG\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT1M\u0026#39;)- name:log_count_errorexp:log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;ERROR\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT1M\u0026#39;)The other metrics generated is http_response_time, so you can configure MAL rules to generate more useful metrics like percentiles.\n# ... other configurations of MALmetrics:- name:response_time_percentileexp:http_response_time.sum([\u0026#39;le\u0026#39;, \u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT5M\u0026#39;).histogram().histogram_percentile([50,70,90,99]) slowSql  slowSql aims to convert LogData to DatabaseSlowStatement. It extracts data from parsed result and save them as DatabaseSlowStatement. SlowSql will not abort or edit logs, you can use other LAL for further processing. SlowSql will reuse service, layer and timestamp of extractor, so it is necessary to use SlowSQL after setting these. We require a log tag \u0026quot;LOG_KIND\u0026quot; = \u0026quot;SLOW_SQL\u0026quot; to make OAP distinguish slow SQL logs from other log reports.\nNote, slow SQL sampling would only flag this SQL in the candidate list. The OAP server would run statistic per service and only persistent the top 50 every 10(controlled by topNReportPeriod: ${SW_CORE_TOPN_REPORT_PERIOD:10}) minutes by default.\nAn example of JSON sent to OAP is as following:\n[ { \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;LOG_KIND\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;SLOW_SQL\u0026#34; } ] }, \u0026#34;layer\u0026#34;:\u0026#34;MYSQL\u0026#34;, \u0026#34;body\u0026#34;:{ \u0026#34;json\u0026#34;:{ \u0026#34;json\u0026#34;:\u0026#34;{\\\u0026#34;time\\\u0026#34;:\\\u0026#34;1663063011\\\u0026#34;,\\\u0026#34;id\\\u0026#34;:\\\u0026#34;cb92c1a5b-2691e-fb2f-457a-9c72a392d9ed\\\u0026#34;,\\\u0026#34;service\\\u0026#34;:\\\u0026#34;root[root]@[localhost]\\\u0026#34;,\\\u0026#34;statement\\\u0026#34;:\\\u0026#34;select sleep(2);\\\u0026#34;,\\\u0026#34;layer\\\u0026#34;:\\\u0026#34;MYSQL\\\u0026#34;,\\\u0026#34;query_time\\\u0026#34;:2000}\u0026#34; } }, \u0026#34;service\u0026#34;:\u0026#34;root[root]@[localhost]\u0026#34; } ]  statement  statement extracts the SQL statement from the parsed result, and set it into the DatabaseSlowStatement, which will be persisted (if not dropped) and is used to associate with TopNDatabaseStatement.\n latency  latency extracts the latency from the parsed result, and set it into the DatabaseSlowStatement, which will be persisted (if not dropped) and is used to associate with TopNDatabaseStatement.\n id  id extracts the id from the parsed result, and set it into the DatabaseSlowStatement, which will be persisted (if not dropped) and is used to associate with TopNDatabaseStatement.\nA Example of LAL to distinguish slow logs:\nfilter { json{ } extractor{ layer parsed.layer as String service parsed.service as String timestamp parsed.time as String if (tag(\u0026#34;LOG_KIND\u0026#34;) == \u0026#34;SLOW_SQL\u0026#34;) { slowSql { id parsed.id as String statement parsed.statement as String latency parsed.query_time as Long } } } }  sampledTrace  sampledTrace aims to convert LogData to SampledTrace Records. It extracts data from parsed result and save them as SampledTraceRecord. SampledTrace will not abort or edit logs, you can use other LAL for further processing. We require a log tag \u0026quot;LOG_KIND\u0026quot; = \u0026quot;NET_PROFILING_SAMPLED_TRACE\u0026quot; to make OAP distinguish slow trace logs from other log reports. An example of JSON sent to OAP is as following:\n[ { \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;LOG_KIND\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;NET_PROFILING_SAMPLED_TRACE\u0026#34; } ] }, \u0026#34;layer\u0026#34;:\u0026#34;MESH\u0026#34;, \u0026#34;body\u0026#34;:{ \u0026#34;json\u0026#34;:{ \u0026#34;json\u0026#34;:\u0026#34;{\\\u0026#34;uri\\\u0026#34;:\\\u0026#34;/provider\\\u0026#34;,\\\u0026#34;reason\\\u0026#34;:\\\u0026#34;slow\\\u0026#34;,\\\u0026#34;latency\\\u0026#34;:2048,\\\u0026#34;client_process\\\u0026#34;:{\\\u0026#34;process_id\\\u0026#34;:\\\u0026#34;c1519f4555ec11eda8df0242ac1d0002\\\u0026#34;,\\\u0026#34;local\\\u0026#34;:false,\\\u0026#34;address\\\u0026#34;:\\\u0026#34;\\\u0026#34;},\\\u0026#34;server_process\\\u0026#34;:{\\\u0026#34;process_id\\\u0026#34;:\\\u0026#34;\\\u0026#34;,\\\u0026#34;local\\\u0026#34;:false,\\\u0026#34;address\\\u0026#34;:\\\u0026#34;172.31.0.3:443\\\u0026#34;},\\\u0026#34;detect_point\\\u0026#34;:\\\u0026#34;client\\\u0026#34;,\\\u0026#34;component\\\u0026#34;:\\\u0026#34;http\\\u0026#34;,\\\u0026#34;ssl\\\u0026#34;:true}\u0026#34; } }, \u0026#34;service\u0026#34;:\u0026#34;test-service\u0026#34;, \u0026#34;serviceInstance\u0026#34;:\u0026#34;test-service-instance\u0026#34;, \u0026#34;timestamp\u0026#34;: 1666916962406, } ] Examples are as follows:\nfilter { json { } if (tag(\u0026#34;LOG_KIND\u0026#34;) == \u0026#34;NET_PROFILING_SAMPLED_TRACE\u0026#34;) { sampledTrace { latency parsed.latency as Long uri parsed.uri as String reason parsed.reason as String if (parsed.client_process.process_id as String != \u0026#34;\u0026#34;) { processId parsed.client_process.process_id as String } else if (parsed.client_process.local as Boolean) { processId ProcessRegistry.generateVirtualLocalProcess(parsed.service as String, parsed.serviceInstance as String) as String } else { processId ProcessRegistry.generateVirtualRemoteProcess(parsed.service as String, parsed.serviceInstance as String, parsed.client_process.address as String) as String } if (parsed.server_process.process_id as String != \u0026#34;\u0026#34;) { destProcessId parsed.server_process.process_id as String } else if (parsed.server_process.local as Boolean) { destProcessId ProcessRegistry.generateVirtualLocalProcess(parsed.service as String, parsed.serviceInstance as String) as String } else { destProcessId ProcessRegistry.generateVirtualRemoteProcess(parsed.service as String, parsed.serviceInstance as String, parsed.server_process.address as String) as String } detectPoint parsed.detect_point as String if (parsed.component as String == \u0026#34;http\u0026#34; \u0026amp;\u0026amp; parsed.ssl as Boolean) { componentId 129 } else if (parsed.component as String == \u0026#34;http\u0026#34;) { componentId 49 } else if (parsed.ssl as Boolean) { componentId 130 } else { componentId 110 } } } } Sink Sinks are the persistent layer of the LAL. By default, all the logs of each filter are persisted into the storage. However, some mechanisms allow you to selectively save some logs, or even drop all the logs after you\u0026rsquo;ve extracted useful information, such as metrics.\nSampler Sampler allows you to save the logs in a sampling manner. Currently, the following sampling strategies are supported:\n rateLimit: samples n logs at a maximum rate of 1 minute. rateLimit(\u0026quot;SamplerID\u0026quot;) requires an ID for the sampler. Sampler declarations with the same ID share the same sampler instance, thus sharing the same rpm and resetting logic. possibility: every piece of log has a pseudo possibility of percentage to be sampled, the possibility was generated by Java random number generator and compare to the given percentage option.  We welcome contributions on more sampling strategies. If multiple samplers are specified, the last one determines the final sampling result. See examples in Enforcer.\nExamples 1, rateLimit:\nfilter { // ... parser  sink { sampler { if (parsed.service == \u0026#34;ImportantApp\u0026#34;) { rateLimit(\u0026#34;ImportantAppSampler\u0026#34;) { rpm 1800 // samples 1800 pieces of logs every minute for service \u0026#34;ImportantApp\u0026#34;  } } else { rateLimit(\u0026#34;OtherSampler\u0026#34;) { rpm 180 // samples 180 pieces of logs every minute for other services than \u0026#34;ImportantApp\u0026#34;  } } } } } Examples 2, possibility:\nfilter { // ... parser  sink { sampler { if (parsed.service == \u0026#34;ImportantApp\u0026#34;) { possibility(80) { // samples 80% of the logs for service \u0026#34;ImportantApp\u0026#34;  } } else { possibility(30) { // samples 30% of the logs for other services than \u0026#34;ImportantApp\u0026#34;  } } } } } Dropper Dropper is a special sink, meaning that all logs are dropped without any exception. This is useful when you want to drop debugging logs.\nfilter { // ... parser  sink { if (parsed.level == \u0026#34;DEBUG\u0026#34;) { dropper {} } else { sampler { // ... configs  } } } } Or if you have multiple filters, some of which are for extracting metrics, only one of them has to be persisted.\nfilter { // filter A: this is for persistence  // ... parser  sink { sampler { // .. sampler configs  } } } filter { // filter B:  // ... extractors to generate many metrics  extractors { metrics { // ... metrics  } } sink { dropper {} // drop all logs because they have been saved in \u0026#34;filter A\u0026#34; above.  } } Enforcer Enforcer is another special sink that forcibly samples the log. A typical use case of enforcer is when you have configured a sampler and want to save some logs forcibly, such as to save error logs even if the sampling mechanism has been configured.\nfilter { // ... parser  sink { sampler { // ... sampler configs  } if (parsed.level == \u0026#34;ERROR\u0026#34; || parsed.userId == \u0026#34;TestingUserId\u0026#34;) { // sample error logs or testing users\u0026#39; logs (userId == \u0026#34;TestingUserId\u0026#34;) even if the sampling strategy is configured  enforcer { } } } } ","excerpt":"Log Analysis Language Log Analysis Language (LAL) in SkyWalking is essentially a Domain-Specific …","ref":"/docs/main/latest/en/concepts-and-designs/lal/","title":"Log Analysis Language"},{"body":"Log Analysis Language Log Analysis Language (LAL) in SkyWalking is essentially a Domain-Specific Language (DSL) to analyze logs. You can use LAL to parse, extract, and save the logs, as well as collaborate the logs with traces (by extracting the trace ID, segment ID and span ID) and metrics (by generating metrics from the logs and sending them to the meter system).\nThe LAL config files are in YAML format, and are located under directory lal. You can set log-analyzer/default/lalFiles in the application.yml file or set environment variable SW_LOG_LAL_FILES to activate specific LAL config files.\nLayer Layer should be declared in the LAL script to represent the analysis scope of the logs.\nFilter A filter is a group of parser, extractor and sink. Users can use one or more filters to organize their processing logic. Every piece of log will be sent to all filters in an LAL rule. A piece of log sent to the filter is available as property log in the LAL, therefore you can access the log service name via log.service. For all available fields of log, please refer to the protocol definition.\nAll components are executed sequentially in the orders they are declared.\nGlobal Functions Globally available functions may be used them in all components (i.e. parsers, extractors, and sinks) where necessary.\n abort  By default, all components declared are executed no matter what flags (dropped, saved, etc.) have been set. There are cases where you may want the filter chain to stop earlier when specified conditions are met. abort function aborts the remaining filter chain from where it\u0026rsquo;s declared, and all the remaining components won\u0026rsquo;t be executed at all. abort function serves as a fast-fail mechanism in LAL.\nfilter { if (log.service == \u0026#34;TestingService\u0026#34;) { // Don\u0026#39;t waste resources on TestingServices  abort {} // all remaining components won\u0026#39;t be executed at all  } // ... parsers, extractors, sinks } Note that when you put regexp in an if statement, you need to surround the expression with () like regexp(\u0026lt;the expression\u0026gt;), instead of regexp \u0026lt;the expression\u0026gt;.\n tag  tag function provide a convenient way to get the value of a tag key.\nWe can add tags like following:\n[ { \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;TEST_KEY\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;TEST_VALUE\u0026#34; } ] }, \u0026#34;body\u0026#34;:{ ... } ... } ] And we can use this method to get the value of the tag key TEST_KEY.\nfilter { if (tag(\u0026#34;TEST_KEY\u0026#34;) == \u0026#34;TEST_VALUE\u0026#34;) { ... } } Parser Parsers are responsible for parsing the raw logs into structured data in SkyWalking for further processing. There are 3 types of parsers at the moment, namely json, yaml, and text.\nWhen a piece of log is parsed, there is a corresponding property available, called parsed, injected by LAL. Property parsed is typically a map, containing all the fields parsed from the raw logs. For example, if the parser is json / yaml, parsed is a map containing all the key-values in the json / yaml; if the parser is text , parsed is a map containing all the captured groups and their values (for regexp and grok).\nAll parsers share the following options:\n   Option Type Description Default Value     abortOnFailure boolean Whether the filter chain should abort if the parser failed to parse / match the logs true    See examples below.\njson filter { json { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  } } yaml filter { yaml { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  } } text For unstructured logs, there are some text parsers for use.\n regexp  regexp parser uses a regular expression (regexp) to parse the logs. It leverages the captured groups of the regexp, all the captured groups can be used later in the extractors or sinks. regexp returns a boolean indicating whether the log matches the pattern or not.\nfilter { text { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  // this is just a demo pattern  regexp \u0026#34;(?\u0026lt;timestamp\u0026gt;\\\\d{8}) (?\u0026lt;thread\u0026gt;\\\\w+) (?\u0026lt;level\u0026gt;\\\\w+) (?\u0026lt;traceId\u0026gt;\\\\w+) (?\u0026lt;msg\u0026gt;.+)\u0026#34; } extractor { tag level: parsed.level // we add a tag called `level` and its value is parsed.level, captured from the regexp above  traceId parsed.traceId // we also extract the trace id from the parsed result, which will be used to associate the log with the trace  } // ... }  grok (TODO)  We\u0026rsquo;re aware of certain performance issues in the grok Java library, and so we\u0026rsquo;re currently conducting investigations and benchmarking. Contributions are welcome.\nExtractor Extractors aim to extract metadata from the logs. The metadata can be a service name, a service instance name, an endpoint name, or even a trace ID, all of which can be associated with the existing traces and metrics.\n service  service extracts the service name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n instance  instance extracts the service instance name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n endpoint  endpoint extracts the service instance name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n traceId  traceId extracts the trace ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n segmentId  segmentId extracts the segment ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n spanId  spanId extracts the span ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n timestamp  timestamp extracts the timestamp from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\nThe parameter of timestamp can be a millisecond:\nfilter { // ... parser  extractor { timestamp parsed.time as String } } or a datetime string with a specified pattern:\nfilter { // ... parser  extractor { timestamp parsed.time as String, \u0026#34;yyyy-MM-dd HH:mm:ss\u0026#34; } }  layer  layer extracts the layer from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with service.\n tag  tag extracts the tags from the parsed result, and set them into the LogData. The form of this extractor should look something like this: tag key1: value, key2: value2. You may use the properties of parsed as both keys and values.\nimport javax.swing.text.LayeredHighlighter filter { // ... parser  extractor { tag level: parsed.level, (parsed.statusCode): parsed.statusMsg tag anotherKey: \u0026#34;anotherConstantValue\u0026#34; layer \u0026#39;GENERAL\u0026#39; } }  metrics  metrics extracts / generates metrics from the logs, and sends the generated metrics to the meter system. You may configure MAL for further analysis of these metrics. The dedicated MAL config files are under directory log-mal-rules, and you can set log-analyzer/default/malFiles to enable configured files.\n# application.yml# ...log-analyzer:selector:${SW_LOG_ANALYZER:default}default:lalFiles:${SW_LOG_LAL_FILES:my-lal-config}# files are under \u0026#34;lal\u0026#34; directorymalFiles:${SW_LOG_MAL_FILES:my-lal-mal-config, folder1/another-lal-mal-config, folder2/*}# files are under \u0026#34;log-mal-rules\u0026#34; directoryExamples are as follows:\nfilter { // ...  extractor { service parsed.serviceName metrics { name \u0026#34;log_count\u0026#34; timestamp parsed.timestamp labels level: parsed.level, service: parsed.service, instance: parsed.instance value 1 } metrics { name \u0026#34;http_response_time\u0026#34; timestamp parsed.timestamp labels status_code: parsed.statusCode, service: parsed.service, instance: parsed.instance value parsed.duration } } // ... } The extractor above generates a metrics named log_count, with tag key level and value 1. After that, you can configure MAL rules to calculate the log count grouping by logging level like this:\n# ... other configurations of MALmetrics:- name:log_count_debugexp:log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;DEBUG\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT1M\u0026#39;)- name:log_count_errorexp:log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;ERROR\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT1M\u0026#39;)The other metrics generated is http_response_time, so you can configure MAL rules to generate more useful metrics like percentiles.\n# ... other configurations of MALmetrics:- name:response_time_percentileexp:http_response_time.sum([\u0026#39;le\u0026#39;, \u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT5M\u0026#39;).histogram().histogram_percentile([50,70,90,99]) slowSql  slowSql aims to convert LogData to DatabaseSlowStatement. It extracts data from parsed result and save them as DatabaseSlowStatement. SlowSql will not abort or edit logs, you can use other LAL for further processing. SlowSql will reuse service, layer and timestamp of extractor, so it is necessary to use SlowSQL after setting these. We require a log tag \u0026quot;LOG_KIND\u0026quot; = \u0026quot;SLOW_SQL\u0026quot; to make OAP distinguish slow SQL logs from other log reports.\nNote, slow SQL sampling would only flag this SQL in the candidate list. The OAP server would run statistic per service and only persistent the top 50 every 10(controlled by topNReportPeriod: ${SW_CORE_TOPN_REPORT_PERIOD:10}) minutes by default.\nAn example of JSON sent to OAP is as following:\n[ { \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;LOG_KIND\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;SLOW_SQL\u0026#34; } ] }, \u0026#34;layer\u0026#34;:\u0026#34;MYSQL\u0026#34;, \u0026#34;body\u0026#34;:{ \u0026#34;json\u0026#34;:{ \u0026#34;json\u0026#34;:\u0026#34;{\\\u0026#34;time\\\u0026#34;:\\\u0026#34;1663063011\\\u0026#34;,\\\u0026#34;id\\\u0026#34;:\\\u0026#34;cb92c1a5b-2691e-fb2f-457a-9c72a392d9ed\\\u0026#34;,\\\u0026#34;service\\\u0026#34;:\\\u0026#34;root[root]@[localhost]\\\u0026#34;,\\\u0026#34;statement\\\u0026#34;:\\\u0026#34;select sleep(2);\\\u0026#34;,\\\u0026#34;layer\\\u0026#34;:\\\u0026#34;MYSQL\\\u0026#34;,\\\u0026#34;query_time\\\u0026#34;:2000}\u0026#34; } }, \u0026#34;service\u0026#34;:\u0026#34;root[root]@[localhost]\u0026#34; } ]  statement  statement extracts the SQL statement from the parsed result, and set it into the DatabaseSlowStatement, which will be persisted (if not dropped) and is used to associate with TopNDatabaseStatement.\n latency  latency extracts the latency from the parsed result, and set it into the DatabaseSlowStatement, which will be persisted (if not dropped) and is used to associate with TopNDatabaseStatement.\n id  id extracts the id from the parsed result, and set it into the DatabaseSlowStatement, which will be persisted (if not dropped) and is used to associate with TopNDatabaseStatement.\nA Example of LAL to distinguish slow logs:\nfilter { json{ } extractor{ layer parsed.layer as String service parsed.service as String timestamp parsed.time as String if (tag(\u0026#34;LOG_KIND\u0026#34;) == \u0026#34;SLOW_SQL\u0026#34;) { slowSql { id parsed.id as String statement parsed.statement as String latency parsed.query_time as Long } } } }  sampledTrace  sampledTrace aims to convert LogData to SampledTrace Records. It extracts data from parsed result and save them as SampledTraceRecord. SampledTrace will not abort or edit logs, you can use other LAL for further processing. We require a log tag \u0026quot;LOG_KIND\u0026quot; = \u0026quot;NET_PROFILING_SAMPLED_TRACE\u0026quot; to make OAP distinguish slow trace logs from other log reports. An example of JSON sent to OAP is as following:\n[ { \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;LOG_KIND\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;NET_PROFILING_SAMPLED_TRACE\u0026#34; } ] }, \u0026#34;layer\u0026#34;:\u0026#34;MESH\u0026#34;, \u0026#34;body\u0026#34;:{ \u0026#34;json\u0026#34;:{ \u0026#34;json\u0026#34;:\u0026#34;{\\\u0026#34;uri\\\u0026#34;:\\\u0026#34;/provider\\\u0026#34;,\\\u0026#34;reason\\\u0026#34;:\\\u0026#34;slow\\\u0026#34;,\\\u0026#34;latency\\\u0026#34;:2048,\\\u0026#34;client_process\\\u0026#34;:{\\\u0026#34;process_id\\\u0026#34;:\\\u0026#34;c1519f4555ec11eda8df0242ac1d0002\\\u0026#34;,\\\u0026#34;local\\\u0026#34;:false,\\\u0026#34;address\\\u0026#34;:\\\u0026#34;\\\u0026#34;},\\\u0026#34;server_process\\\u0026#34;:{\\\u0026#34;process_id\\\u0026#34;:\\\u0026#34;\\\u0026#34;,\\\u0026#34;local\\\u0026#34;:false,\\\u0026#34;address\\\u0026#34;:\\\u0026#34;172.31.0.3:443\\\u0026#34;},\\\u0026#34;detect_point\\\u0026#34;:\\\u0026#34;client\\\u0026#34;,\\\u0026#34;component\\\u0026#34;:\\\u0026#34;http\\\u0026#34;,\\\u0026#34;ssl\\\u0026#34;:true}\u0026#34; } }, \u0026#34;service\u0026#34;:\u0026#34;test-service\u0026#34;, \u0026#34;serviceInstance\u0026#34;:\u0026#34;test-service-instance\u0026#34;, \u0026#34;timestamp\u0026#34;: 1666916962406, } ] Examples are as follows:\nfilter { json { } if (tag(\u0026#34;LOG_KIND\u0026#34;) == \u0026#34;NET_PROFILING_SAMPLED_TRACE\u0026#34;) { sampledTrace { latency parsed.latency as Long uri parsed.uri as String reason parsed.reason as String if (parsed.client_process.process_id as String != \u0026#34;\u0026#34;) { processId parsed.client_process.process_id as String } else if (parsed.client_process.local as Boolean) { processId ProcessRegistry.generateVirtualLocalProcess(parsed.service as String, parsed.serviceInstance as String) as String } else { processId ProcessRegistry.generateVirtualRemoteProcess(parsed.service as String, parsed.serviceInstance as String, parsed.client_process.address as String) as String } if (parsed.server_process.process_id as String != \u0026#34;\u0026#34;) { destProcessId parsed.server_process.process_id as String } else if (parsed.server_process.local as Boolean) { destProcessId ProcessRegistry.generateVirtualLocalProcess(parsed.service as String, parsed.serviceInstance as String) as String } else { destProcessId ProcessRegistry.generateVirtualRemoteProcess(parsed.service as String, parsed.serviceInstance as String, parsed.server_process.address as String) as String } detectPoint parsed.detect_point as String if (parsed.component as String == \u0026#34;http\u0026#34; \u0026amp;\u0026amp; parsed.ssl as Boolean) { componentId 129 } else if (parsed.component as String == \u0026#34;http\u0026#34;) { componentId 49 } else if (parsed.ssl as Boolean) { componentId 130 } else { componentId 110 } } } } Sink Sinks are the persistent layer of the LAL. By default, all the logs of each filter are persisted into the storage. However, some mechanisms allow you to selectively save some logs, or even drop all the logs after you\u0026rsquo;ve extracted useful information, such as metrics.\nSampler Sampler allows you to save the logs in a sampling manner. Currently, the following sampling strategies are supported:\n rateLimit: samples n logs at a maximum rate of 1 minute. rateLimit(\u0026quot;SamplerID\u0026quot;) requires an ID for the sampler. Sampler declarations with the same ID share the same sampler instance, thus sharing the same rpm and resetting logic. possibility: every piece of log has a pseudo possibility of percentage to be sampled, the possibility was generated by Java random number generator and compare to the given percentage option.  We welcome contributions on more sampling strategies. If multiple samplers are specified, the last one determines the final sampling result. See examples in Enforcer.\nExamples 1, rateLimit:\nfilter { // ... parser  sink { sampler { if (parsed.service == \u0026#34;ImportantApp\u0026#34;) { rateLimit(\u0026#34;ImportantAppSampler\u0026#34;) { rpm 1800 // samples 1800 pieces of logs every minute for service \u0026#34;ImportantApp\u0026#34;  } } else { rateLimit(\u0026#34;OtherSampler\u0026#34;) { rpm 180 // samples 180 pieces of logs every minute for other services than \u0026#34;ImportantApp\u0026#34;  } } } } } Examples 2, possibility:\nfilter { // ... parser  sink { sampler { if (parsed.service == \u0026#34;ImportantApp\u0026#34;) { possibility(80) { // samples 80% of the logs for service \u0026#34;ImportantApp\u0026#34;  } } else { possibility(30) { // samples 30% of the logs for other services than \u0026#34;ImportantApp\u0026#34;  } } } } } Dropper Dropper is a special sink, meaning that all logs are dropped without any exception. This is useful when you want to drop debugging logs.\nfilter { // ... parser  sink { if (parsed.level == \u0026#34;DEBUG\u0026#34;) { dropper {} } else { sampler { // ... configs  } } } } Or if you have multiple filters, some of which are for extracting metrics, only one of them has to be persisted.\nfilter { // filter A: this is for persistence  // ... parser  sink { sampler { // .. sampler configs  } } } filter { // filter B:  // ... extractors to generate many metrics  extractors { metrics { // ... metrics  } } sink { dropper {} // drop all logs because they have been saved in \u0026#34;filter A\u0026#34; above.  } } Enforcer Enforcer is another special sink that forcibly samples the log. A typical use case of enforcer is when you have configured a sampler and want to save some logs forcibly, such as to save error logs even if the sampling mechanism has been configured.\nfilter { // ... parser  sink { sampler { // ... sampler configs  } if (parsed.level == \u0026#34;ERROR\u0026#34; || parsed.userId == \u0026#34;TestingUserId\u0026#34;) { // sample error logs or testing users\u0026#39; logs (userId == \u0026#34;TestingUserId\u0026#34;) even if the sampling strategy is configured  enforcer { } } } } ","excerpt":"Log Analysis Language Log Analysis Language (LAL) in SkyWalking is essentially a Domain-Specific …","ref":"/docs/main/next/en/concepts-and-designs/lal/","title":"Log Analysis Language"},{"body":"Log Analysis Language Log Analysis Language (LAL) in SkyWalking is essentially a Domain-Specific Language (DSL) to analyze logs. You can use LAL to parse, extract, and save the logs, as well as collaborate the logs with traces (by extracting the trace ID, segment ID and span ID) and metrics (by generating metrics from the logs and sending them to the meter system).\nThe LAL config files are in YAML format, and are located under directory lal. You can set log-analyzer/default/lalFiles in the application.yml file or set environment variable SW_LOG_LAL_FILES to activate specific LAL config files.\nFilter A filter is a group of parser, extractor and sink. Users can use one or more filters to organize their processing logic. Every piece of log will be sent to all filters in an LAL rule. A piece of log sent to the filter is available as property log in the LAL, therefore you can access the log service name via log.service. For all available fields of log, please refer to the protocol definition.\nAll components are executed sequentially in the orders they are declared.\nGlobal Functions Globally available functions may be used them in all components (i.e. parsers, extractors, and sinks) where necessary.\n abort  By default, all components declared are executed no matter what flags (dropped, saved, etc.) have been set. There are cases where you may want the filter chain to stop earlier when specified conditions are met. abort function aborts the remaining filter chain from where it\u0026rsquo;s declared, and all the remaining components won\u0026rsquo;t be executed at all. abort function serves as a fast-fail mechanism in LAL.\nfilter { if (log.service == \u0026#34;TestingService\u0026#34;) { // Don\u0026#39;t waste resources on TestingServices  abort {} // all remaining components won\u0026#39;t be executed at all  } // ... parsers, extractors, sinks } Note that when you put regexp in an if statement, you need to surround the expression with () like regexp(\u0026lt;the expression\u0026gt;), instead of regexp \u0026lt;the expression\u0026gt;.\nParser Parsers are responsible for parsing the raw logs into structured data in SkyWalking for further processing. There are 3 types of parsers at the moment, namely json, yaml, and text.\nWhen a piece of log is parsed, there is a corresponding property available, called parsed, injected by LAL. Property parsed is typically a map, containing all the fields parsed from the raw logs. For example, if the parser is json / yaml, parsed is a map containing all the key-values in the json / yaml; if the parser is text , parsed is a map containing all the captured groups and their values (for regexp and grok).\nAll parsers share the following options:\n   Option Type Description Default Value     abortOnFailure boolean Whether the filter chain should abort if the parser failed to parse / match the logs true    See examples below.\njson filter { json { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  } } yaml filter { yaml { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  } } text For unstructured logs, there are some text parsers for use.\n regexp  regexp parser uses a regular expression (regexp) to parse the logs. It leverages the captured groups of the regexp, all the captured groups can be used later in the extractors or sinks. regexp returns a boolean indicating whether the log matches the pattern or not.\nfilter { text { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  // this is just a demo pattern  regexp \u0026#34;(?\u0026lt;timestamp\u0026gt;\\\\d{8}) (?\u0026lt;thread\u0026gt;\\\\w+) (?\u0026lt;level\u0026gt;\\\\w+) (?\u0026lt;traceId\u0026gt;\\\\w+) (?\u0026lt;msg\u0026gt;.+)\u0026#34; } extractor { tag level: parsed.level // we add a tag called `level` and its value is parsed.level, captured from the regexp above  traceId parsed.traceId // we also extract the trace id from the parsed result, which will be used to associate the log with the trace  } // ... }  grok (TODO)  We\u0026rsquo;re aware of certains performance issues in the grok Java library, and so we\u0026rsquo;re currently conducting investigations and benchmarking. Contributions are welcome.\nExtractor Extractors aim to extract metadata from the logs. The metadata can be a service name, a service instance name, an endpoint name, or even a trace ID, all of which can be associated with the existing traces and metrics.\n service  service extracts the service name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n instance  instance extracts the service instance name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n endpoint  endpoint extracts the service instance name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n traceId  traceId extracts the trace ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n segmentId  segmentId extracts the segment ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n spanId  spanId extracts the span ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n timestamp  timestamp extracts the timestamp from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\nThe unit of timestamp is millisecond.\n layer  layer extracts the layer from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with service / instance.\n tag  tag extracts the tags from the parsed result, and set them into the LogData. The form of this extractor should look something like this: tag key1: value, key2: value2. You may use the properties of parsed as both keys and values.\nimport javax.swing.text.LayeredHighlighter filter { // ... parser  extractor { tag level: parsed.level, (parsed.statusCode): parsed.statusMsg tag anotherKey: \u0026#34;anotherConstantValue\u0026#34; layer \u0026#39;GENERAL\u0026#39; } }  metrics  metrics extracts / generates metrics from the logs, and sends the generated metrics to the meter system. You may configure MAL for further analysis of these metrics. The dedicated MAL config files are under directory log-mal-rules, and you can set log-analyzer/default/malFiles to enable configured files.\n# application.yml# ...log-analyzer:selector:${SW_LOG_ANALYZER:default}default:lalFiles:${SW_LOG_LAL_FILES:my-lal-config}# files are under \u0026#34;lal\u0026#34; directorymalFiles:${SW_LOG_MAL_FILES:my-lal-mal-config,another-lal-mal-config}# files are under \u0026#34;log-mal-rules\u0026#34; directoryExamples are as follows:\nfilter { // ...  extractor { service parsed.serviceName metrics { name \u0026#34;log_count\u0026#34; timestamp parsed.timestamp labels level: parsed.level, service: parsed.service, instance: parsed.instance value 1 } metrics { name \u0026#34;http_response_time\u0026#34; timestamp parsed.timestamp labels status_code: parsed.statusCode, service: parsed.service, instance: parsed.instance value parsed.duration } } // ... } The extractor above generates a metrics named log_count, with tag key level and value 1. After that, you can configure MAL rules to calculate the log count grouping by logging level like this:\n# ... other configurations of MALmetrics:- name:log_count_debugexp:log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;DEBUG\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT1M\u0026#39;)- name:log_count_errorexp:log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;ERROR\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT1M\u0026#39;)The other metrics generated is http_response_time, so you can configure MAL rules to generate more useful metrics like percentiles.\n# ... other configurations of MALmetrics:- name:response_time_percentileexp:http_response_time.sum([\u0026#39;le\u0026#39;, \u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT5M\u0026#39;).histogram().histogram_percentile([50,70,90,99])Sink Sinks are the persistent layer of the LAL. By default, all the logs of each filter are persisted into the storage. However, some mechanisms allow you to selectively save some logs, or even drop all the logs after you\u0026rsquo;ve extracted useful information, such as metrics.\nSampler Sampler allows you to save the logs in a sampling manner. Currently, the following sampling strategies are supported:\n rateLimit: samples n logs at a maximum rate of 1 minute. rateLimit(\u0026quot;SamplerID\u0026quot;) requires an ID for the sampler. Sampler declarations with the same ID share the same sampler instance, thus sharing the same rpm and resetting logic. possibility: every piece of log has a pseudo possibility of percentage to be sampled, the possibility was generated by Java random number generator and compare to the given percentage option.  We welcome contributions on more sampling strategies. If multiple samplers are specified, the last one determines the final sampling result. See examples in Enforcer.\nExamples 1, rateLimit:\nfilter { // ... parser  sink { sampler { if (parsed.service == \u0026#34;ImportantApp\u0026#34;) { rateLimit(\u0026#34;ImportantAppSampler\u0026#34;) { rpm 1800 // samples 1800 pieces of logs every minute for service \u0026#34;ImportantApp\u0026#34;  } } else { rateLimit(\u0026#34;OtherSampler\u0026#34;) { rpm 180 // samples 180 pieces of logs every minute for other services than \u0026#34;ImportantApp\u0026#34;  } } } } } Examples 2, possibility:\nfilter { // ... parser  sink { sampler { if (parsed.service == \u0026#34;ImportantApp\u0026#34;) { possibility(80) { // samples 80% of the logs for service \u0026#34;ImportantApp\u0026#34;  } } else { possibility(30) { // samples 30% of the logs for other services than \u0026#34;ImportantApp\u0026#34;  } } } } } Dropper Dropper is a special sink, meaning that all logs are dropped without any exception. This is useful when you want to drop debugging logs.\nfilter { // ... parser  sink { if (parsed.level == \u0026#34;DEBUG\u0026#34;) { dropper {} } else { sampler { // ... configs  } } } } Or if you have multiple filters, some of which are for extracting metrics, only one of them has to be persisted.\nfilter { // filter A: this is for persistence  // ... parser  sink { sampler { // .. sampler configs  } } } filter { // filter B:  // ... extractors to generate many metrics  extractors { metrics { // ... metrics  } } sink { dropper {} // drop all logs because they have been saved in \u0026#34;filter A\u0026#34; above.  } } Enforcer Enforcer is another special sink that forcibly samples the log. A typical use case of enforcer is when you have configured a sampler and want to save some logs forcibly, such as to save error logs even if the sampling mechanism has been configured.\nfilter { // ... parser  sink { sampler { // ... sampler configs  } if (parserd.level == \u0026#34;ERROR\u0026#34; || parsed.userId == \u0026#34;TestingUserId\u0026#34;) { // sample error logs or testing users\u0026#39; logs (userId == \u0026#34;TestingUserId\u0026#34;) even if the sampling strategy is configured  enforcer { } } } } ","excerpt":"Log Analysis Language Log Analysis Language (LAL) in SkyWalking is essentially a Domain-Specific …","ref":"/docs/main/v9.0.0/en/concepts-and-designs/lal/","title":"Log Analysis Language"},{"body":"Log Analysis Language Log Analysis Language (LAL) in SkyWalking is essentially a Domain-Specific Language (DSL) to analyze logs. You can use LAL to parse, extract, and save the logs, as well as collaborate the logs with traces (by extracting the trace ID, segment ID and span ID) and metrics (by generating metrics from the logs and sending them to the meter system).\nThe LAL config files are in YAML format, and are located under directory lal. You can set log-analyzer/default/lalFiles in the application.yml file or set environment variable SW_LOG_LAL_FILES to activate specific LAL config files.\nFilter A filter is a group of parser, extractor and sink. Users can use one or more filters to organize their processing logic. Every piece of log will be sent to all filters in an LAL rule. A piece of log sent to the filter is available as property log in the LAL, therefore you can access the log service name via log.service. For all available fields of log, please refer to the protocol definition.\nAll components are executed sequentially in the orders they are declared.\nGlobal Functions Globally available functions may be used them in all components (i.e. parsers, extractors, and sinks) where necessary.\n abort  By default, all components declared are executed no matter what flags (dropped, saved, etc.) have been set. There are cases where you may want the filter chain to stop earlier when specified conditions are met. abort function aborts the remaining filter chain from where it\u0026rsquo;s declared, and all the remaining components won\u0026rsquo;t be executed at all. abort function serves as a fast-fail mechanism in LAL.\nfilter { if (log.service == \u0026#34;TestingService\u0026#34;) { // Don\u0026#39;t waste resources on TestingServices  abort {} // all remaining components won\u0026#39;t be executed at all  } // ... parsers, extractors, sinks } Note that when you put regexp in an if statement, you need to surround the expression with () like regexp(\u0026lt;the expression\u0026gt;), instead of regexp \u0026lt;the expression\u0026gt;.\nParser Parsers are responsible for parsing the raw logs into structured data in SkyWalking for further processing. There are 3 types of parsers at the moment, namely json, yaml, and text.\nWhen a piece of log is parsed, there is a corresponding property available, called parsed, injected by LAL. Property parsed is typically a map, containing all the fields parsed from the raw logs. For example, if the parser is json / yaml, parsed is a map containing all the key-values in the json / yaml; if the parser is text , parsed is a map containing all the captured groups and their values (for regexp and grok).\nAll parsers share the following options:\n   Option Type Description Default Value     abortOnFailure boolean Whether the filter chain should abort if the parser failed to parse / match the logs true    See examples below.\njson filter { json { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  } } yaml filter { yaml { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  } } text For unstructured logs, there are some text parsers for use.\n regexp  regexp parser uses a regular expression (regexp) to parse the logs. It leverages the captured groups of the regexp, all the captured groups can be used later in the extractors or sinks. regexp returns a boolean indicating whether the log matches the pattern or not.\nfilter { text { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  // this is just a demo pattern  regexp \u0026#34;(?\u0026lt;timestamp\u0026gt;\\\\d{8}) (?\u0026lt;thread\u0026gt;\\\\w+) (?\u0026lt;level\u0026gt;\\\\w+) (?\u0026lt;traceId\u0026gt;\\\\w+) (?\u0026lt;msg\u0026gt;.+)\u0026#34; } extractor { tag level: parsed.level // we add a tag called `level` and its value is parsed.level, captured from the regexp above  traceId parsed.traceId // we also extract the trace id from the parsed result, which will be used to associate the log with the trace  } // ... }  grok (TODO)  We\u0026rsquo;re aware of certains performance issues in the grok Java library, and so we\u0026rsquo;re currently conducting investigations and benchmarking. Contributions are welcome.\nExtractor Extractors aim to extract metadata from the logs. The metadata can be a service name, a service instance name, an endpoint name, or even a trace ID, all of which can be associated with the existing traces and metrics.\n service  service extracts the service name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n instance  instance extracts the service instance name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n endpoint  endpoint extracts the service instance name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n traceId  traceId extracts the trace ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n segmentId  segmentId extracts the segment ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n spanId  spanId extracts the span ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n timestamp  timestamp extracts the timestamp from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\nThe unit of timestamp is millisecond.\n layer  layer extracts the layer from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with service.\n tag  tag extracts the tags from the parsed result, and set them into the LogData. The form of this extractor should look something like this: tag key1: value, key2: value2. You may use the properties of parsed as both keys and values.\nimport javax.swing.text.LayeredHighlighter filter { // ... parser  extractor { tag level: parsed.level, (parsed.statusCode): parsed.statusMsg tag anotherKey: \u0026#34;anotherConstantValue\u0026#34; layer \u0026#39;GENERAL\u0026#39; } }  metrics  metrics extracts / generates metrics from the logs, and sends the generated metrics to the meter system. You may configure MAL for further analysis of these metrics. The dedicated MAL config files are under directory log-mal-rules, and you can set log-analyzer/default/malFiles to enable configured files.\n# application.yml# ...log-analyzer:selector:${SW_LOG_ANALYZER:default}default:lalFiles:${SW_LOG_LAL_FILES:my-lal-config}# files are under \u0026#34;lal\u0026#34; directorymalFiles:${SW_LOG_MAL_FILES:my-lal-mal-config,another-lal-mal-config}# files are under \u0026#34;log-mal-rules\u0026#34; directoryExamples are as follows:\nfilter { // ...  extractor { service parsed.serviceName metrics { name \u0026#34;log_count\u0026#34; timestamp parsed.timestamp labels level: parsed.level, service: parsed.service, instance: parsed.instance value 1 } metrics { name \u0026#34;http_response_time\u0026#34; timestamp parsed.timestamp labels status_code: parsed.statusCode, service: parsed.service, instance: parsed.instance value parsed.duration } } // ... } The extractor above generates a metrics named log_count, with tag key level and value 1. After that, you can configure MAL rules to calculate the log count grouping by logging level like this:\n# ... other configurations of MALmetrics:- name:log_count_debugexp:log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;DEBUG\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT1M\u0026#39;)- name:log_count_errorexp:log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;ERROR\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT1M\u0026#39;)The other metrics generated is http_response_time, so you can configure MAL rules to generate more useful metrics like percentiles.\n# ... other configurations of MALmetrics:- name:response_time_percentileexp:http_response_time.sum([\u0026#39;le\u0026#39;, \u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT5M\u0026#39;).histogram().histogram_percentile([50,70,90,99])Sink Sinks are the persistent layer of the LAL. By default, all the logs of each filter are persisted into the storage. However, some mechanisms allow you to selectively save some logs, or even drop all the logs after you\u0026rsquo;ve extracted useful information, such as metrics.\nSampler Sampler allows you to save the logs in a sampling manner. Currently, the following sampling strategies are supported:\n rateLimit: samples n logs at a maximum rate of 1 minute. rateLimit(\u0026quot;SamplerID\u0026quot;) requires an ID for the sampler. Sampler declarations with the same ID share the same sampler instance, thus sharing the same rpm and resetting logic. possibility: every piece of log has a pseudo possibility of percentage to be sampled, the possibility was generated by Java random number generator and compare to the given percentage option.  We welcome contributions on more sampling strategies. If multiple samplers are specified, the last one determines the final sampling result. See examples in Enforcer.\nExamples 1, rateLimit:\nfilter { // ... parser  sink { sampler { if (parsed.service == \u0026#34;ImportantApp\u0026#34;) { rateLimit(\u0026#34;ImportantAppSampler\u0026#34;) { rpm 1800 // samples 1800 pieces of logs every minute for service \u0026#34;ImportantApp\u0026#34;  } } else { rateLimit(\u0026#34;OtherSampler\u0026#34;) { rpm 180 // samples 180 pieces of logs every minute for other services than \u0026#34;ImportantApp\u0026#34;  } } } } } Examples 2, possibility:\nfilter { // ... parser  sink { sampler { if (parsed.service == \u0026#34;ImportantApp\u0026#34;) { possibility(80) { // samples 80% of the logs for service \u0026#34;ImportantApp\u0026#34;  } } else { possibility(30) { // samples 30% of the logs for other services than \u0026#34;ImportantApp\u0026#34;  } } } } } Dropper Dropper is a special sink, meaning that all logs are dropped without any exception. This is useful when you want to drop debugging logs.\nfilter { // ... parser  sink { if (parsed.level == \u0026#34;DEBUG\u0026#34;) { dropper {} } else { sampler { // ... configs  } } } } Or if you have multiple filters, some of which are for extracting metrics, only one of them has to be persisted.\nfilter { // filter A: this is for persistence  // ... parser  sink { sampler { // .. sampler configs  } } } filter { // filter B:  // ... extractors to generate many metrics  extractors { metrics { // ... metrics  } } sink { dropper {} // drop all logs because they have been saved in \u0026#34;filter A\u0026#34; above.  } } Enforcer Enforcer is another special sink that forcibly samples the log. A typical use case of enforcer is when you have configured a sampler and want to save some logs forcibly, such as to save error logs even if the sampling mechanism has been configured.\nfilter { // ... parser  sink { sampler { // ... sampler configs  } if (parserd.level == \u0026#34;ERROR\u0026#34; || parsed.userId == \u0026#34;TestingUserId\u0026#34;) { // sample error logs or testing users\u0026#39; logs (userId == \u0026#34;TestingUserId\u0026#34;) even if the sampling strategy is configured  enforcer { } } } } ","excerpt":"Log Analysis Language Log Analysis Language (LAL) in SkyWalking is essentially a Domain-Specific …","ref":"/docs/main/v9.1.0/en/concepts-and-designs/lal/","title":"Log Analysis Language"},{"body":"Log Analysis Language Log Analysis Language (LAL) in SkyWalking is essentially a Domain-Specific Language (DSL) to analyze logs. You can use LAL to parse, extract, and save the logs, as well as collaborate the logs with traces (by extracting the trace ID, segment ID and span ID) and metrics (by generating metrics from the logs and sending them to the meter system).\nThe LAL config files are in YAML format, and are located under directory lal. You can set log-analyzer/default/lalFiles in the application.yml file or set environment variable SW_LOG_LAL_FILES to activate specific LAL config files.\nFilter A filter is a group of parser, extractor and sink. Users can use one or more filters to organize their processing logic. Every piece of log will be sent to all filters in an LAL rule. A piece of log sent to the filter is available as property log in the LAL, therefore you can access the log service name via log.service. For all available fields of log, please refer to the protocol definition.\nAll components are executed sequentially in the orders they are declared.\nGlobal Functions Globally available functions may be used them in all components (i.e. parsers, extractors, and sinks) where necessary.\n abort  By default, all components declared are executed no matter what flags (dropped, saved, etc.) have been set. There are cases where you may want the filter chain to stop earlier when specified conditions are met. abort function aborts the remaining filter chain from where it\u0026rsquo;s declared, and all the remaining components won\u0026rsquo;t be executed at all. abort function serves as a fast-fail mechanism in LAL.\nfilter { if (log.service == \u0026#34;TestingService\u0026#34;) { // Don\u0026#39;t waste resources on TestingServices  abort {} // all remaining components won\u0026#39;t be executed at all  } // ... parsers, extractors, sinks } Note that when you put regexp in an if statement, you need to surround the expression with () like regexp(\u0026lt;the expression\u0026gt;), instead of regexp \u0026lt;the expression\u0026gt;.\nParser Parsers are responsible for parsing the raw logs into structured data in SkyWalking for further processing. There are 3 types of parsers at the moment, namely json, yaml, and text.\nWhen a piece of log is parsed, there is a corresponding property available, called parsed, injected by LAL. Property parsed is typically a map, containing all the fields parsed from the raw logs. For example, if the parser is json / yaml, parsed is a map containing all the key-values in the json / yaml; if the parser is text , parsed is a map containing all the captured groups and their values (for regexp and grok).\nAll parsers share the following options:\n   Option Type Description Default Value     abortOnFailure boolean Whether the filter chain should abort if the parser failed to parse / match the logs true    See examples below.\njson filter { json { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  } } yaml filter { yaml { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  } } text For unstructured logs, there are some text parsers for use.\n regexp  regexp parser uses a regular expression (regexp) to parse the logs. It leverages the captured groups of the regexp, all the captured groups can be used later in the extractors or sinks. regexp returns a boolean indicating whether the log matches the pattern or not.\nfilter { text { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  // this is just a demo pattern  regexp \u0026#34;(?\u0026lt;timestamp\u0026gt;\\\\d{8}) (?\u0026lt;thread\u0026gt;\\\\w+) (?\u0026lt;level\u0026gt;\\\\w+) (?\u0026lt;traceId\u0026gt;\\\\w+) (?\u0026lt;msg\u0026gt;.+)\u0026#34; } extractor { tag level: parsed.level // we add a tag called `level` and its value is parsed.level, captured from the regexp above  traceId parsed.traceId // we also extract the trace id from the parsed result, which will be used to associate the log with the trace  } // ... }  grok (TODO)  We\u0026rsquo;re aware of certain performance issues in the grok Java library, and so we\u0026rsquo;re currently conducting investigations and benchmarking. Contributions are welcome.\nExtractor Extractors aim to extract metadata from the logs. The metadata can be a service name, a service instance name, an endpoint name, or even a trace ID, all of which can be associated with the existing traces and metrics.\n service  service extracts the service name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n instance  instance extracts the service instance name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n endpoint  endpoint extracts the service instance name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n traceId  traceId extracts the trace ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n segmentId  segmentId extracts the segment ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n spanId  spanId extracts the span ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n timestamp  timestamp extracts the timestamp from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\nThe unit of timestamp is millisecond.\n layer  layer extracts the layer from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with service.\n tag  tag extracts the tags from the parsed result, and set them into the LogData. The form of this extractor should look something like this: tag key1: value, key2: value2. You may use the properties of parsed as both keys and values.\nimport javax.swing.text.LayeredHighlighter filter { // ... parser  extractor { tag level: parsed.level, (parsed.statusCode): parsed.statusMsg tag anotherKey: \u0026#34;anotherConstantValue\u0026#34; layer \u0026#39;GENERAL\u0026#39; } }  metrics  metrics extracts / generates metrics from the logs, and sends the generated metrics to the meter system. You may configure MAL for further analysis of these metrics. The dedicated MAL config files are under directory log-mal-rules, and you can set log-analyzer/default/malFiles to enable configured files.\n# application.yml# ...log-analyzer:selector:${SW_LOG_ANALYZER:default}default:lalFiles:${SW_LOG_LAL_FILES:my-lal-config}# files are under \u0026#34;lal\u0026#34; directorymalFiles:${SW_LOG_MAL_FILES:my-lal-mal-config,another-lal-mal-config}# files are under \u0026#34;log-mal-rules\u0026#34; directoryExamples are as follows:\nfilter { // ...  extractor { service parsed.serviceName metrics { name \u0026#34;log_count\u0026#34; timestamp parsed.timestamp labels level: parsed.level, service: parsed.service, instance: parsed.instance value 1 } metrics { name \u0026#34;http_response_time\u0026#34; timestamp parsed.timestamp labels status_code: parsed.statusCode, service: parsed.service, instance: parsed.instance value parsed.duration } } // ... } The extractor above generates a metrics named log_count, with tag key level and value 1. After that, you can configure MAL rules to calculate the log count grouping by logging level like this:\n# ... other configurations of MALmetrics:- name:log_count_debugexp:log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;DEBUG\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT1M\u0026#39;)- name:log_count_errorexp:log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;ERROR\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT1M\u0026#39;)The other metrics generated is http_response_time, so you can configure MAL rules to generate more useful metrics like percentiles.\n# ... other configurations of MALmetrics:- name:response_time_percentileexp:http_response_time.sum([\u0026#39;le\u0026#39;, \u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT5M\u0026#39;).histogram().histogram_percentile([50,70,90,99])Sink Sinks are the persistent layer of the LAL. By default, all the logs of each filter are persisted into the storage. However, some mechanisms allow you to selectively save some logs, or even drop all the logs after you\u0026rsquo;ve extracted useful information, such as metrics.\nSampler Sampler allows you to save the logs in a sampling manner. Currently, the following sampling strategies are supported:\n rateLimit: samples n logs at a maximum rate of 1 minute. rateLimit(\u0026quot;SamplerID\u0026quot;) requires an ID for the sampler. Sampler declarations with the same ID share the same sampler instance, thus sharing the same rpm and resetting logic. possibility: every piece of log has a pseudo possibility of percentage to be sampled, the possibility was generated by Java random number generator and compare to the given percentage option.  We welcome contributions on more sampling strategies. If multiple samplers are specified, the last one determines the final sampling result. See examples in Enforcer.\nExamples 1, rateLimit:\nfilter { // ... parser  sink { sampler { if (parsed.service == \u0026#34;ImportantApp\u0026#34;) { rateLimit(\u0026#34;ImportantAppSampler\u0026#34;) { rpm 1800 // samples 1800 pieces of logs every minute for service \u0026#34;ImportantApp\u0026#34;  } } else { rateLimit(\u0026#34;OtherSampler\u0026#34;) { rpm 180 // samples 180 pieces of logs every minute for other services than \u0026#34;ImportantApp\u0026#34;  } } } } } Examples 2, possibility:\nfilter { // ... parser  sink { sampler { if (parsed.service == \u0026#34;ImportantApp\u0026#34;) { possibility(80) { // samples 80% of the logs for service \u0026#34;ImportantApp\u0026#34;  } } else { possibility(30) { // samples 30% of the logs for other services than \u0026#34;ImportantApp\u0026#34;  } } } } } Dropper Dropper is a special sink, meaning that all logs are dropped without any exception. This is useful when you want to drop debugging logs.\nfilter { // ... parser  sink { if (parsed.level == \u0026#34;DEBUG\u0026#34;) { dropper {} } else { sampler { // ... configs  } } } } Or if you have multiple filters, some of which are for extracting metrics, only one of them has to be persisted.\nfilter { // filter A: this is for persistence  // ... parser  sink { sampler { // .. sampler configs  } } } filter { // filter B:  // ... extractors to generate many metrics  extractors { metrics { // ... metrics  } } sink { dropper {} // drop all logs because they have been saved in \u0026#34;filter A\u0026#34; above.  } } Enforcer Enforcer is another special sink that forcibly samples the log. A typical use case of enforcer is when you have configured a sampler and want to save some logs forcibly, such as to save error logs even if the sampling mechanism has been configured.\nfilter { // ... parser  sink { sampler { // ... sampler configs  } if (parsed.level == \u0026#34;ERROR\u0026#34; || parsed.userId == \u0026#34;TestingUserId\u0026#34;) { // sample error logs or testing users\u0026#39; logs (userId == \u0026#34;TestingUserId\u0026#34;) even if the sampling strategy is configured  enforcer { } } } } ","excerpt":"Log Analysis Language Log Analysis Language (LAL) in SkyWalking is essentially a Domain-Specific …","ref":"/docs/main/v9.2.0/en/concepts-and-designs/lal/","title":"Log Analysis Language"},{"body":"Log Analysis Language Log Analysis Language (LAL) in SkyWalking is essentially a Domain-Specific Language (DSL) to analyze logs. You can use LAL to parse, extract, and save the logs, as well as collaborate the logs with traces (by extracting the trace ID, segment ID and span ID) and metrics (by generating metrics from the logs and sending them to the meter system).\nThe LAL config files are in YAML format, and are located under directory lal. You can set log-analyzer/default/lalFiles in the application.yml file or set environment variable SW_LOG_LAL_FILES to activate specific LAL config files.\nLayer Layer should be declared in the LAL script to represent the analysis scope of the logs.\nFilter A filter is a group of parser, extractor and sink. Users can use one or more filters to organize their processing logic. Every piece of log will be sent to all filters in an LAL rule. A piece of log sent to the filter is available as property log in the LAL, therefore you can access the log service name via log.service. For all available fields of log, please refer to the protocol definition.\nAll components are executed sequentially in the orders they are declared.\nGlobal Functions Globally available functions may be used them in all components (i.e. parsers, extractors, and sinks) where necessary.\n abort  By default, all components declared are executed no matter what flags (dropped, saved, etc.) have been set. There are cases where you may want the filter chain to stop earlier when specified conditions are met. abort function aborts the remaining filter chain from where it\u0026rsquo;s declared, and all the remaining components won\u0026rsquo;t be executed at all. abort function serves as a fast-fail mechanism in LAL.\nfilter { if (log.service == \u0026#34;TestingService\u0026#34;) { // Don\u0026#39;t waste resources on TestingServices  abort {} // all remaining components won\u0026#39;t be executed at all  } // ... parsers, extractors, sinks } Note that when you put regexp in an if statement, you need to surround the expression with () like regexp(\u0026lt;the expression\u0026gt;), instead of regexp \u0026lt;the expression\u0026gt;.\n tag  tag function provide a convenient way to get the value of a tag key.\nWe can add tags like following:\n[ { \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;TEST_KEY\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;TEST_VALUE\u0026#34; } ] }, \u0026#34;body\u0026#34;:{ ... } ... } ] And we can use this method to get the value of the tag key TEST_KEY.\nfilter { if (tag(\u0026#34;TEST_KEY\u0026#34;) == \u0026#34;TEST_VALUE\u0026#34;) { ... } } Parser Parsers are responsible for parsing the raw logs into structured data in SkyWalking for further processing. There are 3 types of parsers at the moment, namely json, yaml, and text.\nWhen a piece of log is parsed, there is a corresponding property available, called parsed, injected by LAL. Property parsed is typically a map, containing all the fields parsed from the raw logs. For example, if the parser is json / yaml, parsed is a map containing all the key-values in the json / yaml; if the parser is text , parsed is a map containing all the captured groups and their values (for regexp and grok).\nAll parsers share the following options:\n   Option Type Description Default Value     abortOnFailure boolean Whether the filter chain should abort if the parser failed to parse / match the logs true    See examples below.\njson filter { json { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  } } yaml filter { yaml { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  } } text For unstructured logs, there are some text parsers for use.\n regexp  regexp parser uses a regular expression (regexp) to parse the logs. It leverages the captured groups of the regexp, all the captured groups can be used later in the extractors or sinks. regexp returns a boolean indicating whether the log matches the pattern or not.\nfilter { text { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  // this is just a demo pattern  regexp \u0026#34;(?\u0026lt;timestamp\u0026gt;\\\\d{8}) (?\u0026lt;thread\u0026gt;\\\\w+) (?\u0026lt;level\u0026gt;\\\\w+) (?\u0026lt;traceId\u0026gt;\\\\w+) (?\u0026lt;msg\u0026gt;.+)\u0026#34; } extractor { tag level: parsed.level // we add a tag called `level` and its value is parsed.level, captured from the regexp above  traceId parsed.traceId // we also extract the trace id from the parsed result, which will be used to associate the log with the trace  } // ... }  grok (TODO)  We\u0026rsquo;re aware of certain performance issues in the grok Java library, and so we\u0026rsquo;re currently conducting investigations and benchmarking. Contributions are welcome.\nExtractor Extractors aim to extract metadata from the logs. The metadata can be a service name, a service instance name, an endpoint name, or even a trace ID, all of which can be associated with the existing traces and metrics.\n service  service extracts the service name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n instance  instance extracts the service instance name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n endpoint  endpoint extracts the service instance name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n traceId  traceId extracts the trace ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n segmentId  segmentId extracts the segment ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n spanId  spanId extracts the span ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n timestamp  timestamp extracts the timestamp from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\nThe unit of timestamp is millisecond.\n layer  layer extracts the layer from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with service.\n tag  tag extracts the tags from the parsed result, and set them into the LogData. The form of this extractor should look something like this: tag key1: value, key2: value2. You may use the properties of parsed as both keys and values.\nimport javax.swing.text.LayeredHighlighter filter { // ... parser  extractor { tag level: parsed.level, (parsed.statusCode): parsed.statusMsg tag anotherKey: \u0026#34;anotherConstantValue\u0026#34; layer \u0026#39;GENERAL\u0026#39; } }  metrics  metrics extracts / generates metrics from the logs, and sends the generated metrics to the meter system. You may configure MAL for further analysis of these metrics. The dedicated MAL config files are under directory log-mal-rules, and you can set log-analyzer/default/malFiles to enable configured files.\n# application.yml# ...log-analyzer:selector:${SW_LOG_ANALYZER:default}default:lalFiles:${SW_LOG_LAL_FILES:my-lal-config}# files are under \u0026#34;lal\u0026#34; directorymalFiles:${SW_LOG_MAL_FILES:my-lal-mal-config, folder1/another-lal-mal-config, folder2/*}# files are under \u0026#34;log-mal-rules\u0026#34; directoryExamples are as follows:\nfilter { // ...  extractor { service parsed.serviceName metrics { name \u0026#34;log_count\u0026#34; timestamp parsed.timestamp labels level: parsed.level, service: parsed.service, instance: parsed.instance value 1 } metrics { name \u0026#34;http_response_time\u0026#34; timestamp parsed.timestamp labels status_code: parsed.statusCode, service: parsed.service, instance: parsed.instance value parsed.duration } } // ... } The extractor above generates a metrics named log_count, with tag key level and value 1. After that, you can configure MAL rules to calculate the log count grouping by logging level like this:\n# ... other configurations of MALmetrics:- name:log_count_debugexp:log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;DEBUG\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT1M\u0026#39;)- name:log_count_errorexp:log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;ERROR\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT1M\u0026#39;)The other metrics generated is http_response_time, so you can configure MAL rules to generate more useful metrics like percentiles.\n# ... other configurations of MALmetrics:- name:response_time_percentileexp:http_response_time.sum([\u0026#39;le\u0026#39;, \u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT5M\u0026#39;).histogram().histogram_percentile([50,70,90,99]) slowSql  slowSql aims to convert LogData to DatabaseSlowStatement. It extracts data from parsed result and save them as DatabaseSlowStatement. SlowSql will not abort or edit logs, you can use other LAL for further processing. SlowSql will reuse service, layer and timestamp of extractor, so it is necessary to use SlowSQL after setting these. We require a log tag \u0026quot;LOG_KIND\u0026quot; = \u0026quot;SLOW_SQL\u0026quot; to make OAP distinguish slow SQL logs from other log reports.\nNote, slow SQL sampling would only flag this SQL in the candidate list. The OAP server would run statistic per service and only persistent the top 50 every 10(controlled by topNReportPeriod: ${SW_CORE_TOPN_REPORT_PERIOD:10}) minutes by default.\nAn example of JSON sent to OAP is as following:\n[ { \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;LOG_KIND\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;SLOW_SQL\u0026#34; } ] }, \u0026#34;layer\u0026#34;:\u0026#34;MYSQL\u0026#34;, \u0026#34;body\u0026#34;:{ \u0026#34;json\u0026#34;:{ \u0026#34;json\u0026#34;:\u0026#34;{\\\u0026#34;time\\\u0026#34;:\\\u0026#34;1663063011\\\u0026#34;,\\\u0026#34;id\\\u0026#34;:\\\u0026#34;cb92c1a5b-2691e-fb2f-457a-9c72a392d9ed\\\u0026#34;,\\\u0026#34;service\\\u0026#34;:\\\u0026#34;root[root]@[localhost]\\\u0026#34;,\\\u0026#34;statement\\\u0026#34;:\\\u0026#34;select sleep(2);\\\u0026#34;,\\\u0026#34;layer\\\u0026#34;:\\\u0026#34;MYSQL\\\u0026#34;,\\\u0026#34;query_time\\\u0026#34;:2000}\u0026#34; } }, \u0026#34;service\u0026#34;:\u0026#34;root[root]@[localhost]\u0026#34; } ]  statement  statement extracts the SQL statement from the parsed result, and set it into the DatabaseSlowStatement, which will be persisted (if not dropped) and is used to associate with TopNDatabaseStatement.\n latency  latency extracts the latency from the parsed result, and set it into the DatabaseSlowStatement, which will be persisted (if not dropped) and is used to associate with TopNDatabaseStatement.\n id  id extracts the id from the parsed result, and set it into the DatabaseSlowStatement, which will be persisted (if not dropped) and is used to associate with TopNDatabaseStatement.\nA Example of LAL to distinguish slow logs:\nfilter { json{ } extractor{ layer parsed.layer as String service parsed.service as String timestamp parsed.time as String if (tag(\u0026#34;LOG_KIND\u0026#34;) == \u0026#34;SLOW_SQL\u0026#34;) { slowSql { id parsed.id as String statement parsed.statement as String latency parsed.query_time as Long } } } }  sampledTrace  sampledTrace aims to convert LogData to SampledTrace Records. It extracts data from parsed result and save them as SampledTraceRecord. SampledTrace will not abort or edit logs, you can use other LAL for further processing. We require a log tag \u0026quot;LOG_KIND\u0026quot; = \u0026quot;NET_PROFILING_SAMPLED_TRACE\u0026quot; to make OAP distinguish slow trace logs from other log reports. An example of JSON sent to OAP is as following:\n[ { \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;LOG_KIND\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;NET_PROFILING_SAMPLED_TRACE\u0026#34; } ] }, \u0026#34;layer\u0026#34;:\u0026#34;MESH\u0026#34;, \u0026#34;body\u0026#34;:{ \u0026#34;json\u0026#34;:{ \u0026#34;json\u0026#34;:\u0026#34;{\\\u0026#34;uri\\\u0026#34;:\\\u0026#34;/provider\\\u0026#34;,\\\u0026#34;reason\\\u0026#34;:\\\u0026#34;slow\\\u0026#34;,\\\u0026#34;latency\\\u0026#34;:2048,\\\u0026#34;client_process\\\u0026#34;:{\\\u0026#34;process_id\\\u0026#34;:\\\u0026#34;c1519f4555ec11eda8df0242ac1d0002\\\u0026#34;,\\\u0026#34;local\\\u0026#34;:false,\\\u0026#34;address\\\u0026#34;:\\\u0026#34;\\\u0026#34;},\\\u0026#34;server_process\\\u0026#34;:{\\\u0026#34;process_id\\\u0026#34;:\\\u0026#34;\\\u0026#34;,\\\u0026#34;local\\\u0026#34;:false,\\\u0026#34;address\\\u0026#34;:\\\u0026#34;172.31.0.3:443\\\u0026#34;},\\\u0026#34;detect_point\\\u0026#34;:\\\u0026#34;client\\\u0026#34;,\\\u0026#34;component\\\u0026#34;:\\\u0026#34;http\\\u0026#34;,\\\u0026#34;ssl\\\u0026#34;:true}\u0026#34; } }, \u0026#34;service\u0026#34;:\u0026#34;test-service\u0026#34;, \u0026#34;serviceInstance\u0026#34;:\u0026#34;test-service-instance\u0026#34;, \u0026#34;timestamp\u0026#34;: 1666916962406, } ] Examples are as follows:\nfilter { json { } if (tag(\u0026#34;LOG_KIND\u0026#34;) == \u0026#34;NET_PROFILING_SAMPLED_TRACE\u0026#34;) { sampledTrace { latency parsed.latency as Long uri parsed.uri as String reason parsed.reason as String if (parsed.client_process.process_id as String != \u0026#34;\u0026#34;) { processId parsed.client_process.process_id as String } else if (parsed.client_process.local as Boolean) { processId ProcessRegistry.generateVirtualLocalProcess(parsed.service as String, parsed.serviceInstance as String) as String } else { processId ProcessRegistry.generateVirtualRemoteProcess(parsed.service as String, parsed.serviceInstance as String, parsed.client_process.address as String) as String } if (parsed.server_process.process_id as String != \u0026#34;\u0026#34;) { destProcessId parsed.server_process.process_id as String } else if (parsed.server_process.local as Boolean) { destProcessId ProcessRegistry.generateVirtualLocalProcess(parsed.service as String, parsed.serviceInstance as String) as String } else { destProcessId ProcessRegistry.generateVirtualRemoteProcess(parsed.service as String, parsed.serviceInstance as String, parsed.server_process.address as String) as String } detectPoint parsed.detect_point as String if (parsed.component as String == \u0026#34;http\u0026#34; \u0026amp;\u0026amp; parsed.ssl as Boolean) { componentId 129 } else if (parsed.component as String == \u0026#34;http\u0026#34;) { componentId 49 } else if (parsed.ssl as Boolean) { componentId 130 } else { componentId 110 } } } } Sink Sinks are the persistent layer of the LAL. By default, all the logs of each filter are persisted into the storage. However, some mechanisms allow you to selectively save some logs, or even drop all the logs after you\u0026rsquo;ve extracted useful information, such as metrics.\nSampler Sampler allows you to save the logs in a sampling manner. Currently, the following sampling strategies are supported:\n rateLimit: samples n logs at a maximum rate of 1 minute. rateLimit(\u0026quot;SamplerID\u0026quot;) requires an ID for the sampler. Sampler declarations with the same ID share the same sampler instance, thus sharing the same rpm and resetting logic. possibility: every piece of log has a pseudo possibility of percentage to be sampled, the possibility was generated by Java random number generator and compare to the given percentage option.  We welcome contributions on more sampling strategies. If multiple samplers are specified, the last one determines the final sampling result. See examples in Enforcer.\nExamples 1, rateLimit:\nfilter { // ... parser  sink { sampler { if (parsed.service == \u0026#34;ImportantApp\u0026#34;) { rateLimit(\u0026#34;ImportantAppSampler\u0026#34;) { rpm 1800 // samples 1800 pieces of logs every minute for service \u0026#34;ImportantApp\u0026#34;  } } else { rateLimit(\u0026#34;OtherSampler\u0026#34;) { rpm 180 // samples 180 pieces of logs every minute for other services than \u0026#34;ImportantApp\u0026#34;  } } } } } Examples 2, possibility:\nfilter { // ... parser  sink { sampler { if (parsed.service == \u0026#34;ImportantApp\u0026#34;) { possibility(80) { // samples 80% of the logs for service \u0026#34;ImportantApp\u0026#34;  } } else { possibility(30) { // samples 30% of the logs for other services than \u0026#34;ImportantApp\u0026#34;  } } } } } Dropper Dropper is a special sink, meaning that all logs are dropped without any exception. This is useful when you want to drop debugging logs.\nfilter { // ... parser  sink { if (parsed.level == \u0026#34;DEBUG\u0026#34;) { dropper {} } else { sampler { // ... configs  } } } } Or if you have multiple filters, some of which are for extracting metrics, only one of them has to be persisted.\nfilter { // filter A: this is for persistence  // ... parser  sink { sampler { // .. sampler configs  } } } filter { // filter B:  // ... extractors to generate many metrics  extractors { metrics { // ... metrics  } } sink { dropper {} // drop all logs because they have been saved in \u0026#34;filter A\u0026#34; above.  } } Enforcer Enforcer is another special sink that forcibly samples the log. A typical use case of enforcer is when you have configured a sampler and want to save some logs forcibly, such as to save error logs even if the sampling mechanism has been configured.\nfilter { // ... parser  sink { sampler { // ... sampler configs  } if (parsed.level == \u0026#34;ERROR\u0026#34; || parsed.userId == \u0026#34;TestingUserId\u0026#34;) { // sample error logs or testing users\u0026#39; logs (userId == \u0026#34;TestingUserId\u0026#34;) even if the sampling strategy is configured  enforcer { } } } } ","excerpt":"Log Analysis Language Log Analysis Language (LAL) in SkyWalking is essentially a Domain-Specific …","ref":"/docs/main/v9.3.0/en/concepts-and-designs/lal/","title":"Log Analysis Language"},{"body":"Log Analysis Language Log Analysis Language (LAL) in SkyWalking is essentially a Domain-Specific Language (DSL) to analyze logs. You can use LAL to parse, extract, and save the logs, as well as collaborate the logs with traces (by extracting the trace ID, segment ID and span ID) and metrics (by generating metrics from the logs and sending them to the meter system).\nThe LAL config files are in YAML format, and are located under directory lal. You can set log-analyzer/default/lalFiles in the application.yml file or set environment variable SW_LOG_LAL_FILES to activate specific LAL config files.\nLayer Layer should be declared in the LAL script to represent the analysis scope of the logs.\nFilter A filter is a group of parser, extractor and sink. Users can use one or more filters to organize their processing logic. Every piece of log will be sent to all filters in an LAL rule. A piece of log sent to the filter is available as property log in the LAL, therefore you can access the log service name via log.service. For all available fields of log, please refer to the protocol definition.\nAll components are executed sequentially in the orders they are declared.\nGlobal Functions Globally available functions may be used them in all components (i.e. parsers, extractors, and sinks) where necessary.\n abort  By default, all components declared are executed no matter what flags (dropped, saved, etc.) have been set. There are cases where you may want the filter chain to stop earlier when specified conditions are met. abort function aborts the remaining filter chain from where it\u0026rsquo;s declared, and all the remaining components won\u0026rsquo;t be executed at all. abort function serves as a fast-fail mechanism in LAL.\nfilter { if (log.service == \u0026#34;TestingService\u0026#34;) { // Don\u0026#39;t waste resources on TestingServices  abort {} // all remaining components won\u0026#39;t be executed at all  } // ... parsers, extractors, sinks } Note that when you put regexp in an if statement, you need to surround the expression with () like regexp(\u0026lt;the expression\u0026gt;), instead of regexp \u0026lt;the expression\u0026gt;.\n tag  tag function provide a convenient way to get the value of a tag key.\nWe can add tags like following:\n[ { \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;TEST_KEY\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;TEST_VALUE\u0026#34; } ] }, \u0026#34;body\u0026#34;:{ ... } ... } ] And we can use this method to get the value of the tag key TEST_KEY.\nfilter { if (tag(\u0026#34;TEST_KEY\u0026#34;) == \u0026#34;TEST_VALUE\u0026#34;) { ... } } Parser Parsers are responsible for parsing the raw logs into structured data in SkyWalking for further processing. There are 3 types of parsers at the moment, namely json, yaml, and text.\nWhen a piece of log is parsed, there is a corresponding property available, called parsed, injected by LAL. Property parsed is typically a map, containing all the fields parsed from the raw logs. For example, if the parser is json / yaml, parsed is a map containing all the key-values in the json / yaml; if the parser is text , parsed is a map containing all the captured groups and their values (for regexp and grok).\nAll parsers share the following options:\n   Option Type Description Default Value     abortOnFailure boolean Whether the filter chain should abort if the parser failed to parse / match the logs true    See examples below.\njson filter { json { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  } } yaml filter { yaml { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  } } text For unstructured logs, there are some text parsers for use.\n regexp  regexp parser uses a regular expression (regexp) to parse the logs. It leverages the captured groups of the regexp, all the captured groups can be used later in the extractors or sinks. regexp returns a boolean indicating whether the log matches the pattern or not.\nfilter { text { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  // this is just a demo pattern  regexp \u0026#34;(?\u0026lt;timestamp\u0026gt;\\\\d{8}) (?\u0026lt;thread\u0026gt;\\\\w+) (?\u0026lt;level\u0026gt;\\\\w+) (?\u0026lt;traceId\u0026gt;\\\\w+) (?\u0026lt;msg\u0026gt;.+)\u0026#34; } extractor { tag level: parsed.level // we add a tag called `level` and its value is parsed.level, captured from the regexp above  traceId parsed.traceId // we also extract the trace id from the parsed result, which will be used to associate the log with the trace  } // ... }  grok (TODO)  We\u0026rsquo;re aware of certain performance issues in the grok Java library, and so we\u0026rsquo;re currently conducting investigations and benchmarking. Contributions are welcome.\nExtractor Extractors aim to extract metadata from the logs. The metadata can be a service name, a service instance name, an endpoint name, or even a trace ID, all of which can be associated with the existing traces and metrics.\n service  service extracts the service name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n instance  instance extracts the service instance name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n endpoint  endpoint extracts the service instance name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n traceId  traceId extracts the trace ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n segmentId  segmentId extracts the segment ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n spanId  spanId extracts the span ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n timestamp  timestamp extracts the timestamp from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\nThe unit of timestamp is millisecond.\n layer  layer extracts the layer from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with service.\n tag  tag extracts the tags from the parsed result, and set them into the LogData. The form of this extractor should look something like this: tag key1: value, key2: value2. You may use the properties of parsed as both keys and values.\nimport javax.swing.text.LayeredHighlighter filter { // ... parser  extractor { tag level: parsed.level, (parsed.statusCode): parsed.statusMsg tag anotherKey: \u0026#34;anotherConstantValue\u0026#34; layer \u0026#39;GENERAL\u0026#39; } }  metrics  metrics extracts / generates metrics from the logs, and sends the generated metrics to the meter system. You may configure MAL for further analysis of these metrics. The dedicated MAL config files are under directory log-mal-rules, and you can set log-analyzer/default/malFiles to enable configured files.\n# application.yml# ...log-analyzer:selector:${SW_LOG_ANALYZER:default}default:lalFiles:${SW_LOG_LAL_FILES:my-lal-config}# files are under \u0026#34;lal\u0026#34; directorymalFiles:${SW_LOG_MAL_FILES:my-lal-mal-config, folder1/another-lal-mal-config, folder2/*}# files are under \u0026#34;log-mal-rules\u0026#34; directoryExamples are as follows:\nfilter { // ...  extractor { service parsed.serviceName metrics { name \u0026#34;log_count\u0026#34; timestamp parsed.timestamp labels level: parsed.level, service: parsed.service, instance: parsed.instance value 1 } metrics { name \u0026#34;http_response_time\u0026#34; timestamp parsed.timestamp labels status_code: parsed.statusCode, service: parsed.service, instance: parsed.instance value parsed.duration } } // ... } The extractor above generates a metrics named log_count, with tag key level and value 1. After that, you can configure MAL rules to calculate the log count grouping by logging level like this:\n# ... other configurations of MALmetrics:- name:log_count_debugexp:log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;DEBUG\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT1M\u0026#39;)- name:log_count_errorexp:log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;ERROR\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT1M\u0026#39;)The other metrics generated is http_response_time, so you can configure MAL rules to generate more useful metrics like percentiles.\n# ... other configurations of MALmetrics:- name:response_time_percentileexp:http_response_time.sum([\u0026#39;le\u0026#39;, \u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT5M\u0026#39;).histogram().histogram_percentile([50,70,90,99]) slowSql  slowSql aims to convert LogData to DatabaseSlowStatement. It extracts data from parsed result and save them as DatabaseSlowStatement. SlowSql will not abort or edit logs, you can use other LAL for further processing. SlowSql will reuse service, layer and timestamp of extractor, so it is necessary to use SlowSQL after setting these. We require a log tag \u0026quot;LOG_KIND\u0026quot; = \u0026quot;SLOW_SQL\u0026quot; to make OAP distinguish slow SQL logs from other log reports.\nNote, slow SQL sampling would only flag this SQL in the candidate list. The OAP server would run statistic per service and only persistent the top 50 every 10(controlled by topNReportPeriod: ${SW_CORE_TOPN_REPORT_PERIOD:10}) minutes by default.\nAn example of JSON sent to OAP is as following:\n[ { \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;LOG_KIND\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;SLOW_SQL\u0026#34; } ] }, \u0026#34;layer\u0026#34;:\u0026#34;MYSQL\u0026#34;, \u0026#34;body\u0026#34;:{ \u0026#34;json\u0026#34;:{ \u0026#34;json\u0026#34;:\u0026#34;{\\\u0026#34;time\\\u0026#34;:\\\u0026#34;1663063011\\\u0026#34;,\\\u0026#34;id\\\u0026#34;:\\\u0026#34;cb92c1a5b-2691e-fb2f-457a-9c72a392d9ed\\\u0026#34;,\\\u0026#34;service\\\u0026#34;:\\\u0026#34;root[root]@[localhost]\\\u0026#34;,\\\u0026#34;statement\\\u0026#34;:\\\u0026#34;select sleep(2);\\\u0026#34;,\\\u0026#34;layer\\\u0026#34;:\\\u0026#34;MYSQL\\\u0026#34;,\\\u0026#34;query_time\\\u0026#34;:2000}\u0026#34; } }, \u0026#34;service\u0026#34;:\u0026#34;root[root]@[localhost]\u0026#34; } ]  statement  statement extracts the SQL statement from the parsed result, and set it into the DatabaseSlowStatement, which will be persisted (if not dropped) and is used to associate with TopNDatabaseStatement.\n latency  latency extracts the latency from the parsed result, and set it into the DatabaseSlowStatement, which will be persisted (if not dropped) and is used to associate with TopNDatabaseStatement.\n id  id extracts the id from the parsed result, and set it into the DatabaseSlowStatement, which will be persisted (if not dropped) and is used to associate with TopNDatabaseStatement.\nA Example of LAL to distinguish slow logs:\nfilter { json{ } extractor{ layer parsed.layer as String service parsed.service as String timestamp parsed.time as String if (tag(\u0026#34;LOG_KIND\u0026#34;) == \u0026#34;SLOW_SQL\u0026#34;) { slowSql { id parsed.id as String statement parsed.statement as String latency parsed.query_time as Long } } } }  sampledTrace  sampledTrace aims to convert LogData to SampledTrace Records. It extracts data from parsed result and save them as SampledTraceRecord. SampledTrace will not abort or edit logs, you can use other LAL for further processing. We require a log tag \u0026quot;LOG_KIND\u0026quot; = \u0026quot;NET_PROFILING_SAMPLED_TRACE\u0026quot; to make OAP distinguish slow trace logs from other log reports. An example of JSON sent to OAP is as following:\n[ { \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;LOG_KIND\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;NET_PROFILING_SAMPLED_TRACE\u0026#34; } ] }, \u0026#34;layer\u0026#34;:\u0026#34;MESH\u0026#34;, \u0026#34;body\u0026#34;:{ \u0026#34;json\u0026#34;:{ \u0026#34;json\u0026#34;:\u0026#34;{\\\u0026#34;uri\\\u0026#34;:\\\u0026#34;/provider\\\u0026#34;,\\\u0026#34;reason\\\u0026#34;:\\\u0026#34;slow\\\u0026#34;,\\\u0026#34;latency\\\u0026#34;:2048,\\\u0026#34;client_process\\\u0026#34;:{\\\u0026#34;process_id\\\u0026#34;:\\\u0026#34;c1519f4555ec11eda8df0242ac1d0002\\\u0026#34;,\\\u0026#34;local\\\u0026#34;:false,\\\u0026#34;address\\\u0026#34;:\\\u0026#34;\\\u0026#34;},\\\u0026#34;server_process\\\u0026#34;:{\\\u0026#34;process_id\\\u0026#34;:\\\u0026#34;\\\u0026#34;,\\\u0026#34;local\\\u0026#34;:false,\\\u0026#34;address\\\u0026#34;:\\\u0026#34;172.31.0.3:443\\\u0026#34;},\\\u0026#34;detect_point\\\u0026#34;:\\\u0026#34;client\\\u0026#34;,\\\u0026#34;component\\\u0026#34;:\\\u0026#34;http\\\u0026#34;,\\\u0026#34;ssl\\\u0026#34;:true}\u0026#34; } }, \u0026#34;service\u0026#34;:\u0026#34;test-service\u0026#34;, \u0026#34;serviceInstance\u0026#34;:\u0026#34;test-service-instance\u0026#34;, \u0026#34;timestamp\u0026#34;: 1666916962406, } ] Examples are as follows:\nfilter { json { } if (tag(\u0026#34;LOG_KIND\u0026#34;) == \u0026#34;NET_PROFILING_SAMPLED_TRACE\u0026#34;) { sampledTrace { latency parsed.latency as Long uri parsed.uri as String reason parsed.reason as String if (parsed.client_process.process_id as String != \u0026#34;\u0026#34;) { processId parsed.client_process.process_id as String } else if (parsed.client_process.local as Boolean) { processId ProcessRegistry.generateVirtualLocalProcess(parsed.service as String, parsed.serviceInstance as String) as String } else { processId ProcessRegistry.generateVirtualRemoteProcess(parsed.service as String, parsed.serviceInstance as String, parsed.client_process.address as String) as String } if (parsed.server_process.process_id as String != \u0026#34;\u0026#34;) { destProcessId parsed.server_process.process_id as String } else if (parsed.server_process.local as Boolean) { destProcessId ProcessRegistry.generateVirtualLocalProcess(parsed.service as String, parsed.serviceInstance as String) as String } else { destProcessId ProcessRegistry.generateVirtualRemoteProcess(parsed.service as String, parsed.serviceInstance as String, parsed.server_process.address as String) as String } detectPoint parsed.detect_point as String if (parsed.component as String == \u0026#34;http\u0026#34; \u0026amp;\u0026amp; parsed.ssl as Boolean) { componentId 129 } else if (parsed.component as String == \u0026#34;http\u0026#34;) { componentId 49 } else if (parsed.ssl as Boolean) { componentId 130 } else { componentId 110 } } } } Sink Sinks are the persistent layer of the LAL. By default, all the logs of each filter are persisted into the storage. However, some mechanisms allow you to selectively save some logs, or even drop all the logs after you\u0026rsquo;ve extracted useful information, such as metrics.\nSampler Sampler allows you to save the logs in a sampling manner. Currently, the following sampling strategies are supported:\n rateLimit: samples n logs at a maximum rate of 1 minute. rateLimit(\u0026quot;SamplerID\u0026quot;) requires an ID for the sampler. Sampler declarations with the same ID share the same sampler instance, thus sharing the same rpm and resetting logic. possibility: every piece of log has a pseudo possibility of percentage to be sampled, the possibility was generated by Java random number generator and compare to the given percentage option.  We welcome contributions on more sampling strategies. If multiple samplers are specified, the last one determines the final sampling result. See examples in Enforcer.\nExamples 1, rateLimit:\nfilter { // ... parser  sink { sampler { if (parsed.service == \u0026#34;ImportantApp\u0026#34;) { rateLimit(\u0026#34;ImportantAppSampler\u0026#34;) { rpm 1800 // samples 1800 pieces of logs every minute for service \u0026#34;ImportantApp\u0026#34;  } } else { rateLimit(\u0026#34;OtherSampler\u0026#34;) { rpm 180 // samples 180 pieces of logs every minute for other services than \u0026#34;ImportantApp\u0026#34;  } } } } } Examples 2, possibility:\nfilter { // ... parser  sink { sampler { if (parsed.service == \u0026#34;ImportantApp\u0026#34;) { possibility(80) { // samples 80% of the logs for service \u0026#34;ImportantApp\u0026#34;  } } else { possibility(30) { // samples 30% of the logs for other services than \u0026#34;ImportantApp\u0026#34;  } } } } } Dropper Dropper is a special sink, meaning that all logs are dropped without any exception. This is useful when you want to drop debugging logs.\nfilter { // ... parser  sink { if (parsed.level == \u0026#34;DEBUG\u0026#34;) { dropper {} } else { sampler { // ... configs  } } } } Or if you have multiple filters, some of which are for extracting metrics, only one of them has to be persisted.\nfilter { // filter A: this is for persistence  // ... parser  sink { sampler { // .. sampler configs  } } } filter { // filter B:  // ... extractors to generate many metrics  extractors { metrics { // ... metrics  } } sink { dropper {} // drop all logs because they have been saved in \u0026#34;filter A\u0026#34; above.  } } Enforcer Enforcer is another special sink that forcibly samples the log. A typical use case of enforcer is when you have configured a sampler and want to save some logs forcibly, such as to save error logs even if the sampling mechanism has been configured.\nfilter { // ... parser  sink { sampler { // ... sampler configs  } if (parsed.level == \u0026#34;ERROR\u0026#34; || parsed.userId == \u0026#34;TestingUserId\u0026#34;) { // sample error logs or testing users\u0026#39; logs (userId == \u0026#34;TestingUserId\u0026#34;) even if the sampling strategy is configured  enforcer { } } } } ","excerpt":"Log Analysis Language Log Analysis Language (LAL) in SkyWalking is essentially a Domain-Specific …","ref":"/docs/main/v9.4.0/en/concepts-and-designs/lal/","title":"Log Analysis Language"},{"body":"Log Analysis Language Log Analysis Language (LAL) in SkyWalking is essentially a Domain-Specific Language (DSL) to analyze logs. You can use LAL to parse, extract, and save the logs, as well as collaborate the logs with traces (by extracting the trace ID, segment ID and span ID) and metrics (by generating metrics from the logs and sending them to the meter system).\nThe LAL config files are in YAML format, and are located under directory lal. You can set log-analyzer/default/lalFiles in the application.yml file or set environment variable SW_LOG_LAL_FILES to activate specific LAL config files.\nLayer Layer should be declared in the LAL script to represent the analysis scope of the logs.\nFilter A filter is a group of parser, extractor and sink. Users can use one or more filters to organize their processing logic. Every piece of log will be sent to all filters in an LAL rule. A piece of log sent to the filter is available as property log in the LAL, therefore you can access the log service name via log.service. For all available fields of log, please refer to the protocol definition.\nAll components are executed sequentially in the orders they are declared.\nGlobal Functions Globally available functions may be used them in all components (i.e. parsers, extractors, and sinks) where necessary.\n abort  By default, all components declared are executed no matter what flags (dropped, saved, etc.) have been set. There are cases where you may want the filter chain to stop earlier when specified conditions are met. abort function aborts the remaining filter chain from where it\u0026rsquo;s declared, and all the remaining components won\u0026rsquo;t be executed at all. abort function serves as a fast-fail mechanism in LAL.\nfilter { if (log.service == \u0026#34;TestingService\u0026#34;) { // Don\u0026#39;t waste resources on TestingServices  abort {} // all remaining components won\u0026#39;t be executed at all  } // ... parsers, extractors, sinks } Note that when you put regexp in an if statement, you need to surround the expression with () like regexp(\u0026lt;the expression\u0026gt;), instead of regexp \u0026lt;the expression\u0026gt;.\n tag  tag function provide a convenient way to get the value of a tag key.\nWe can add tags like following:\n[ { \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;TEST_KEY\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;TEST_VALUE\u0026#34; } ] }, \u0026#34;body\u0026#34;:{ ... } ... } ] And we can use this method to get the value of the tag key TEST_KEY.\nfilter { if (tag(\u0026#34;TEST_KEY\u0026#34;) == \u0026#34;TEST_VALUE\u0026#34;) { ... } } Parser Parsers are responsible for parsing the raw logs into structured data in SkyWalking for further processing. There are 3 types of parsers at the moment, namely json, yaml, and text.\nWhen a piece of log is parsed, there is a corresponding property available, called parsed, injected by LAL. Property parsed is typically a map, containing all the fields parsed from the raw logs. For example, if the parser is json / yaml, parsed is a map containing all the key-values in the json / yaml; if the parser is text , parsed is a map containing all the captured groups and their values (for regexp and grok).\nAll parsers share the following options:\n   Option Type Description Default Value     abortOnFailure boolean Whether the filter chain should abort if the parser failed to parse / match the logs true    See examples below.\njson filter { json { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  } } yaml filter { yaml { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  } } text For unstructured logs, there are some text parsers for use.\n regexp  regexp parser uses a regular expression (regexp) to parse the logs. It leverages the captured groups of the regexp, all the captured groups can be used later in the extractors or sinks. regexp returns a boolean indicating whether the log matches the pattern or not.\nfilter { text { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  // this is just a demo pattern  regexp \u0026#34;(?\u0026lt;timestamp\u0026gt;\\\\d{8}) (?\u0026lt;thread\u0026gt;\\\\w+) (?\u0026lt;level\u0026gt;\\\\w+) (?\u0026lt;traceId\u0026gt;\\\\w+) (?\u0026lt;msg\u0026gt;.+)\u0026#34; } extractor { tag level: parsed.level // we add a tag called `level` and its value is parsed.level, captured from the regexp above  traceId parsed.traceId // we also extract the trace id from the parsed result, which will be used to associate the log with the trace  } // ... }  grok (TODO)  We\u0026rsquo;re aware of certain performance issues in the grok Java library, and so we\u0026rsquo;re currently conducting investigations and benchmarking. Contributions are welcome.\nExtractor Extractors aim to extract metadata from the logs. The metadata can be a service name, a service instance name, an endpoint name, or even a trace ID, all of which can be associated with the existing traces and metrics.\n service  service extracts the service name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n instance  instance extracts the service instance name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n endpoint  endpoint extracts the service instance name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n traceId  traceId extracts the trace ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n segmentId  segmentId extracts the segment ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n spanId  spanId extracts the span ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n timestamp  timestamp extracts the timestamp from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\nThe unit of timestamp is millisecond.\n layer  layer extracts the layer from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with service.\n tag  tag extracts the tags from the parsed result, and set them into the LogData. The form of this extractor should look something like this: tag key1: value, key2: value2. You may use the properties of parsed as both keys and values.\nimport javax.swing.text.LayeredHighlighter filter { // ... parser  extractor { tag level: parsed.level, (parsed.statusCode): parsed.statusMsg tag anotherKey: \u0026#34;anotherConstantValue\u0026#34; layer \u0026#39;GENERAL\u0026#39; } }  metrics  metrics extracts / generates metrics from the logs, and sends the generated metrics to the meter system. You may configure MAL for further analysis of these metrics. The dedicated MAL config files are under directory log-mal-rules, and you can set log-analyzer/default/malFiles to enable configured files.\n# application.yml# ...log-analyzer:selector:${SW_LOG_ANALYZER:default}default:lalFiles:${SW_LOG_LAL_FILES:my-lal-config}# files are under \u0026#34;lal\u0026#34; directorymalFiles:${SW_LOG_MAL_FILES:my-lal-mal-config, folder1/another-lal-mal-config, folder2/*}# files are under \u0026#34;log-mal-rules\u0026#34; directoryExamples are as follows:\nfilter { // ...  extractor { service parsed.serviceName metrics { name \u0026#34;log_count\u0026#34; timestamp parsed.timestamp labels level: parsed.level, service: parsed.service, instance: parsed.instance value 1 } metrics { name \u0026#34;http_response_time\u0026#34; timestamp parsed.timestamp labels status_code: parsed.statusCode, service: parsed.service, instance: parsed.instance value parsed.duration } } // ... } The extractor above generates a metrics named log_count, with tag key level and value 1. After that, you can configure MAL rules to calculate the log count grouping by logging level like this:\n# ... other configurations of MALmetrics:- name:log_count_debugexp:log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;DEBUG\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT1M\u0026#39;)- name:log_count_errorexp:log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;ERROR\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT1M\u0026#39;)The other metrics generated is http_response_time, so you can configure MAL rules to generate more useful metrics like percentiles.\n# ... other configurations of MALmetrics:- name:response_time_percentileexp:http_response_time.sum([\u0026#39;le\u0026#39;, \u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT5M\u0026#39;).histogram().histogram_percentile([50,70,90,99]) slowSql  slowSql aims to convert LogData to DatabaseSlowStatement. It extracts data from parsed result and save them as DatabaseSlowStatement. SlowSql will not abort or edit logs, you can use other LAL for further processing. SlowSql will reuse service, layer and timestamp of extractor, so it is necessary to use SlowSQL after setting these. We require a log tag \u0026quot;LOG_KIND\u0026quot; = \u0026quot;SLOW_SQL\u0026quot; to make OAP distinguish slow SQL logs from other log reports.\nNote, slow SQL sampling would only flag this SQL in the candidate list. The OAP server would run statistic per service and only persistent the top 50 every 10(controlled by topNReportPeriod: ${SW_CORE_TOPN_REPORT_PERIOD:10}) minutes by default.\nAn example of JSON sent to OAP is as following:\n[ { \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;LOG_KIND\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;SLOW_SQL\u0026#34; } ] }, \u0026#34;layer\u0026#34;:\u0026#34;MYSQL\u0026#34;, \u0026#34;body\u0026#34;:{ \u0026#34;json\u0026#34;:{ \u0026#34;json\u0026#34;:\u0026#34;{\\\u0026#34;time\\\u0026#34;:\\\u0026#34;1663063011\\\u0026#34;,\\\u0026#34;id\\\u0026#34;:\\\u0026#34;cb92c1a5b-2691e-fb2f-457a-9c72a392d9ed\\\u0026#34;,\\\u0026#34;service\\\u0026#34;:\\\u0026#34;root[root]@[localhost]\\\u0026#34;,\\\u0026#34;statement\\\u0026#34;:\\\u0026#34;select sleep(2);\\\u0026#34;,\\\u0026#34;layer\\\u0026#34;:\\\u0026#34;MYSQL\\\u0026#34;,\\\u0026#34;query_time\\\u0026#34;:2000}\u0026#34; } }, \u0026#34;service\u0026#34;:\u0026#34;root[root]@[localhost]\u0026#34; } ]  statement  statement extracts the SQL statement from the parsed result, and set it into the DatabaseSlowStatement, which will be persisted (if not dropped) and is used to associate with TopNDatabaseStatement.\n latency  latency extracts the latency from the parsed result, and set it into the DatabaseSlowStatement, which will be persisted (if not dropped) and is used to associate with TopNDatabaseStatement.\n id  id extracts the id from the parsed result, and set it into the DatabaseSlowStatement, which will be persisted (if not dropped) and is used to associate with TopNDatabaseStatement.\nA Example of LAL to distinguish slow logs:\nfilter { json{ } extractor{ layer parsed.layer as String service parsed.service as String timestamp parsed.time as String if (tag(\u0026#34;LOG_KIND\u0026#34;) == \u0026#34;SLOW_SQL\u0026#34;) { slowSql { id parsed.id as String statement parsed.statement as String latency parsed.query_time as Long } } } }  sampledTrace  sampledTrace aims to convert LogData to SampledTrace Records. It extracts data from parsed result and save them as SampledTraceRecord. SampledTrace will not abort or edit logs, you can use other LAL for further processing. We require a log tag \u0026quot;LOG_KIND\u0026quot; = \u0026quot;NET_PROFILING_SAMPLED_TRACE\u0026quot; to make OAP distinguish slow trace logs from other log reports. An example of JSON sent to OAP is as following:\n[ { \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;LOG_KIND\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;NET_PROFILING_SAMPLED_TRACE\u0026#34; } ] }, \u0026#34;layer\u0026#34;:\u0026#34;MESH\u0026#34;, \u0026#34;body\u0026#34;:{ \u0026#34;json\u0026#34;:{ \u0026#34;json\u0026#34;:\u0026#34;{\\\u0026#34;uri\\\u0026#34;:\\\u0026#34;/provider\\\u0026#34;,\\\u0026#34;reason\\\u0026#34;:\\\u0026#34;slow\\\u0026#34;,\\\u0026#34;latency\\\u0026#34;:2048,\\\u0026#34;client_process\\\u0026#34;:{\\\u0026#34;process_id\\\u0026#34;:\\\u0026#34;c1519f4555ec11eda8df0242ac1d0002\\\u0026#34;,\\\u0026#34;local\\\u0026#34;:false,\\\u0026#34;address\\\u0026#34;:\\\u0026#34;\\\u0026#34;},\\\u0026#34;server_process\\\u0026#34;:{\\\u0026#34;process_id\\\u0026#34;:\\\u0026#34;\\\u0026#34;,\\\u0026#34;local\\\u0026#34;:false,\\\u0026#34;address\\\u0026#34;:\\\u0026#34;172.31.0.3:443\\\u0026#34;},\\\u0026#34;detect_point\\\u0026#34;:\\\u0026#34;client\\\u0026#34;,\\\u0026#34;component\\\u0026#34;:\\\u0026#34;http\\\u0026#34;,\\\u0026#34;ssl\\\u0026#34;:true}\u0026#34; } }, \u0026#34;service\u0026#34;:\u0026#34;test-service\u0026#34;, \u0026#34;serviceInstance\u0026#34;:\u0026#34;test-service-instance\u0026#34;, \u0026#34;timestamp\u0026#34;: 1666916962406, } ] Examples are as follows:\nfilter { json { } if (tag(\u0026#34;LOG_KIND\u0026#34;) == \u0026#34;NET_PROFILING_SAMPLED_TRACE\u0026#34;) { sampledTrace { latency parsed.latency as Long uri parsed.uri as String reason parsed.reason as String if (parsed.client_process.process_id as String != \u0026#34;\u0026#34;) { processId parsed.client_process.process_id as String } else if (parsed.client_process.local as Boolean) { processId ProcessRegistry.generateVirtualLocalProcess(parsed.service as String, parsed.serviceInstance as String) as String } else { processId ProcessRegistry.generateVirtualRemoteProcess(parsed.service as String, parsed.serviceInstance as String, parsed.client_process.address as String) as String } if (parsed.server_process.process_id as String != \u0026#34;\u0026#34;) { destProcessId parsed.server_process.process_id as String } else if (parsed.server_process.local as Boolean) { destProcessId ProcessRegistry.generateVirtualLocalProcess(parsed.service as String, parsed.serviceInstance as String) as String } else { destProcessId ProcessRegistry.generateVirtualRemoteProcess(parsed.service as String, parsed.serviceInstance as String, parsed.server_process.address as String) as String } detectPoint parsed.detect_point as String if (parsed.component as String == \u0026#34;http\u0026#34; \u0026amp;\u0026amp; parsed.ssl as Boolean) { componentId 129 } else if (parsed.component as String == \u0026#34;http\u0026#34;) { componentId 49 } else if (parsed.ssl as Boolean) { componentId 130 } else { componentId 110 } } } } Sink Sinks are the persistent layer of the LAL. By default, all the logs of each filter are persisted into the storage. However, some mechanisms allow you to selectively save some logs, or even drop all the logs after you\u0026rsquo;ve extracted useful information, such as metrics.\nSampler Sampler allows you to save the logs in a sampling manner. Currently, the following sampling strategies are supported:\n rateLimit: samples n logs at a maximum rate of 1 minute. rateLimit(\u0026quot;SamplerID\u0026quot;) requires an ID for the sampler. Sampler declarations with the same ID share the same sampler instance, thus sharing the same rpm and resetting logic. possibility: every piece of log has a pseudo possibility of percentage to be sampled, the possibility was generated by Java random number generator and compare to the given percentage option.  We welcome contributions on more sampling strategies. If multiple samplers are specified, the last one determines the final sampling result. See examples in Enforcer.\nExamples 1, rateLimit:\nfilter { // ... parser  sink { sampler { if (parsed.service == \u0026#34;ImportantApp\u0026#34;) { rateLimit(\u0026#34;ImportantAppSampler\u0026#34;) { rpm 1800 // samples 1800 pieces of logs every minute for service \u0026#34;ImportantApp\u0026#34;  } } else { rateLimit(\u0026#34;OtherSampler\u0026#34;) { rpm 180 // samples 180 pieces of logs every minute for other services than \u0026#34;ImportantApp\u0026#34;  } } } } } Examples 2, possibility:\nfilter { // ... parser  sink { sampler { if (parsed.service == \u0026#34;ImportantApp\u0026#34;) { possibility(80) { // samples 80% of the logs for service \u0026#34;ImportantApp\u0026#34;  } } else { possibility(30) { // samples 30% of the logs for other services than \u0026#34;ImportantApp\u0026#34;  } } } } } Dropper Dropper is a special sink, meaning that all logs are dropped without any exception. This is useful when you want to drop debugging logs.\nfilter { // ... parser  sink { if (parsed.level == \u0026#34;DEBUG\u0026#34;) { dropper {} } else { sampler { // ... configs  } } } } Or if you have multiple filters, some of which are for extracting metrics, only one of them has to be persisted.\nfilter { // filter A: this is for persistence  // ... parser  sink { sampler { // .. sampler configs  } } } filter { // filter B:  // ... extractors to generate many metrics  extractors { metrics { // ... metrics  } } sink { dropper {} // drop all logs because they have been saved in \u0026#34;filter A\u0026#34; above.  } } Enforcer Enforcer is another special sink that forcibly samples the log. A typical use case of enforcer is when you have configured a sampler and want to save some logs forcibly, such as to save error logs even if the sampling mechanism has been configured.\nfilter { // ... parser  sink { sampler { // ... sampler configs  } if (parsed.level == \u0026#34;ERROR\u0026#34; || parsed.userId == \u0026#34;TestingUserId\u0026#34;) { // sample error logs or testing users\u0026#39; logs (userId == \u0026#34;TestingUserId\u0026#34;) even if the sampling strategy is configured  enforcer { } } } } ","excerpt":"Log Analysis Language Log Analysis Language (LAL) in SkyWalking is essentially a Domain-Specific …","ref":"/docs/main/v9.5.0/en/concepts-and-designs/lal/","title":"Log Analysis Language"},{"body":"Log Analysis Language Log Analysis Language (LAL) in SkyWalking is essentially a Domain-Specific Language (DSL) to analyze logs. You can use LAL to parse, extract, and save the logs, as well as collaborate the logs with traces (by extracting the trace ID, segment ID and span ID) and metrics (by generating metrics from the logs and sending them to the meter system).\nThe LAL config files are in YAML format, and are located under directory lal. You can set log-analyzer/default/lalFiles in the application.yml file or set environment variable SW_LOG_LAL_FILES to activate specific LAL config files.\nLayer Layer should be declared in the LAL script to represent the analysis scope of the logs.\nFilter A filter is a group of parser, extractor and sink. Users can use one or more filters to organize their processing logic. Every piece of log will be sent to all filters in an LAL rule. A piece of log sent to the filter is available as property log in the LAL, therefore you can access the log service name via log.service. For all available fields of log, please refer to the protocol definition.\nAll components are executed sequentially in the orders they are declared.\nGlobal Functions Globally available functions may be used them in all components (i.e. parsers, extractors, and sinks) where necessary.\n abort  By default, all components declared are executed no matter what flags (dropped, saved, etc.) have been set. There are cases where you may want the filter chain to stop earlier when specified conditions are met. abort function aborts the remaining filter chain from where it\u0026rsquo;s declared, and all the remaining components won\u0026rsquo;t be executed at all. abort function serves as a fast-fail mechanism in LAL.\nfilter { if (log.service == \u0026#34;TestingService\u0026#34;) { // Don\u0026#39;t waste resources on TestingServices  abort {} // all remaining components won\u0026#39;t be executed at all  } // ... parsers, extractors, sinks } Note that when you put regexp in an if statement, you need to surround the expression with () like regexp(\u0026lt;the expression\u0026gt;), instead of regexp \u0026lt;the expression\u0026gt;.\n tag  tag function provide a convenient way to get the value of a tag key.\nWe can add tags like following:\n[ { \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;TEST_KEY\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;TEST_VALUE\u0026#34; } ] }, \u0026#34;body\u0026#34;:{ ... } ... } ] And we can use this method to get the value of the tag key TEST_KEY.\nfilter { if (tag(\u0026#34;TEST_KEY\u0026#34;) == \u0026#34;TEST_VALUE\u0026#34;) { ... } } Parser Parsers are responsible for parsing the raw logs into structured data in SkyWalking for further processing. There are 3 types of parsers at the moment, namely json, yaml, and text.\nWhen a piece of log is parsed, there is a corresponding property available, called parsed, injected by LAL. Property parsed is typically a map, containing all the fields parsed from the raw logs. For example, if the parser is json / yaml, parsed is a map containing all the key-values in the json / yaml; if the parser is text , parsed is a map containing all the captured groups and their values (for regexp and grok).\nAll parsers share the following options:\n   Option Type Description Default Value     abortOnFailure boolean Whether the filter chain should abort if the parser failed to parse / match the logs true    See examples below.\njson filter { json { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  } } yaml filter { yaml { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  } } text For unstructured logs, there are some text parsers for use.\n regexp  regexp parser uses a regular expression (regexp) to parse the logs. It leverages the captured groups of the regexp, all the captured groups can be used later in the extractors or sinks. regexp returns a boolean indicating whether the log matches the pattern or not.\nfilter { text { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  // this is just a demo pattern  regexp \u0026#34;(?\u0026lt;timestamp\u0026gt;\\\\d{8}) (?\u0026lt;thread\u0026gt;\\\\w+) (?\u0026lt;level\u0026gt;\\\\w+) (?\u0026lt;traceId\u0026gt;\\\\w+) (?\u0026lt;msg\u0026gt;.+)\u0026#34; } extractor { tag level: parsed.level // we add a tag called `level` and its value is parsed.level, captured from the regexp above  traceId parsed.traceId // we also extract the trace id from the parsed result, which will be used to associate the log with the trace  } // ... }  grok (TODO)  We\u0026rsquo;re aware of certain performance issues in the grok Java library, and so we\u0026rsquo;re currently conducting investigations and benchmarking. Contributions are welcome.\nExtractor Extractors aim to extract metadata from the logs. The metadata can be a service name, a service instance name, an endpoint name, or even a trace ID, all of which can be associated with the existing traces and metrics.\n service  service extracts the service name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n instance  instance extracts the service instance name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n endpoint  endpoint extracts the service instance name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n traceId  traceId extracts the trace ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n segmentId  segmentId extracts the segment ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n spanId  spanId extracts the span ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n timestamp  timestamp extracts the timestamp from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\nThe unit of timestamp is millisecond.\n layer  layer extracts the layer from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with service.\n tag  tag extracts the tags from the parsed result, and set them into the LogData. The form of this extractor should look something like this: tag key1: value, key2: value2. You may use the properties of parsed as both keys and values.\nimport javax.swing.text.LayeredHighlighter filter { // ... parser  extractor { tag level: parsed.level, (parsed.statusCode): parsed.statusMsg tag anotherKey: \u0026#34;anotherConstantValue\u0026#34; layer \u0026#39;GENERAL\u0026#39; } }  metrics  metrics extracts / generates metrics from the logs, and sends the generated metrics to the meter system. You may configure MAL for further analysis of these metrics. The dedicated MAL config files are under directory log-mal-rules, and you can set log-analyzer/default/malFiles to enable configured files.\n# application.yml# ...log-analyzer:selector:${SW_LOG_ANALYZER:default}default:lalFiles:${SW_LOG_LAL_FILES:my-lal-config}# files are under \u0026#34;lal\u0026#34; directorymalFiles:${SW_LOG_MAL_FILES:my-lal-mal-config, folder1/another-lal-mal-config, folder2/*}# files are under \u0026#34;log-mal-rules\u0026#34; directoryExamples are as follows:\nfilter { // ...  extractor { service parsed.serviceName metrics { name \u0026#34;log_count\u0026#34; timestamp parsed.timestamp labels level: parsed.level, service: parsed.service, instance: parsed.instance value 1 } metrics { name \u0026#34;http_response_time\u0026#34; timestamp parsed.timestamp labels status_code: parsed.statusCode, service: parsed.service, instance: parsed.instance value parsed.duration } } // ... } The extractor above generates a metrics named log_count, with tag key level and value 1. After that, you can configure MAL rules to calculate the log count grouping by logging level like this:\n# ... other configurations of MALmetrics:- name:log_count_debugexp:log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;DEBUG\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT1M\u0026#39;)- name:log_count_errorexp:log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;ERROR\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT1M\u0026#39;)The other metrics generated is http_response_time, so you can configure MAL rules to generate more useful metrics like percentiles.\n# ... other configurations of MALmetrics:- name:response_time_percentileexp:http_response_time.sum([\u0026#39;le\u0026#39;, \u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT5M\u0026#39;).histogram().histogram_percentile([50,70,90,99]) slowSql  slowSql aims to convert LogData to DatabaseSlowStatement. It extracts data from parsed result and save them as DatabaseSlowStatement. SlowSql will not abort or edit logs, you can use other LAL for further processing. SlowSql will reuse service, layer and timestamp of extractor, so it is necessary to use SlowSQL after setting these. We require a log tag \u0026quot;LOG_KIND\u0026quot; = \u0026quot;SLOW_SQL\u0026quot; to make OAP distinguish slow SQL logs from other log reports.\nNote, slow SQL sampling would only flag this SQL in the candidate list. The OAP server would run statistic per service and only persistent the top 50 every 10(controlled by topNReportPeriod: ${SW_CORE_TOPN_REPORT_PERIOD:10}) minutes by default.\nAn example of JSON sent to OAP is as following:\n[ { \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;LOG_KIND\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;SLOW_SQL\u0026#34; } ] }, \u0026#34;layer\u0026#34;:\u0026#34;MYSQL\u0026#34;, \u0026#34;body\u0026#34;:{ \u0026#34;json\u0026#34;:{ \u0026#34;json\u0026#34;:\u0026#34;{\\\u0026#34;time\\\u0026#34;:\\\u0026#34;1663063011\\\u0026#34;,\\\u0026#34;id\\\u0026#34;:\\\u0026#34;cb92c1a5b-2691e-fb2f-457a-9c72a392d9ed\\\u0026#34;,\\\u0026#34;service\\\u0026#34;:\\\u0026#34;root[root]@[localhost]\\\u0026#34;,\\\u0026#34;statement\\\u0026#34;:\\\u0026#34;select sleep(2);\\\u0026#34;,\\\u0026#34;layer\\\u0026#34;:\\\u0026#34;MYSQL\\\u0026#34;,\\\u0026#34;query_time\\\u0026#34;:2000}\u0026#34; } }, \u0026#34;service\u0026#34;:\u0026#34;root[root]@[localhost]\u0026#34; } ]  statement  statement extracts the SQL statement from the parsed result, and set it into the DatabaseSlowStatement, which will be persisted (if not dropped) and is used to associate with TopNDatabaseStatement.\n latency  latency extracts the latency from the parsed result, and set it into the DatabaseSlowStatement, which will be persisted (if not dropped) and is used to associate with TopNDatabaseStatement.\n id  id extracts the id from the parsed result, and set it into the DatabaseSlowStatement, which will be persisted (if not dropped) and is used to associate with TopNDatabaseStatement.\nA Example of LAL to distinguish slow logs:\nfilter { json{ } extractor{ layer parsed.layer as String service parsed.service as String timestamp parsed.time as String if (tag(\u0026#34;LOG_KIND\u0026#34;) == \u0026#34;SLOW_SQL\u0026#34;) { slowSql { id parsed.id as String statement parsed.statement as String latency parsed.query_time as Long } } } }  sampledTrace  sampledTrace aims to convert LogData to SampledTrace Records. It extracts data from parsed result and save them as SampledTraceRecord. SampledTrace will not abort or edit logs, you can use other LAL for further processing. We require a log tag \u0026quot;LOG_KIND\u0026quot; = \u0026quot;NET_PROFILING_SAMPLED_TRACE\u0026quot; to make OAP distinguish slow trace logs from other log reports. An example of JSON sent to OAP is as following:\n[ { \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;LOG_KIND\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;NET_PROFILING_SAMPLED_TRACE\u0026#34; } ] }, \u0026#34;layer\u0026#34;:\u0026#34;MESH\u0026#34;, \u0026#34;body\u0026#34;:{ \u0026#34;json\u0026#34;:{ \u0026#34;json\u0026#34;:\u0026#34;{\\\u0026#34;uri\\\u0026#34;:\\\u0026#34;/provider\\\u0026#34;,\\\u0026#34;reason\\\u0026#34;:\\\u0026#34;slow\\\u0026#34;,\\\u0026#34;latency\\\u0026#34;:2048,\\\u0026#34;client_process\\\u0026#34;:{\\\u0026#34;process_id\\\u0026#34;:\\\u0026#34;c1519f4555ec11eda8df0242ac1d0002\\\u0026#34;,\\\u0026#34;local\\\u0026#34;:false,\\\u0026#34;address\\\u0026#34;:\\\u0026#34;\\\u0026#34;},\\\u0026#34;server_process\\\u0026#34;:{\\\u0026#34;process_id\\\u0026#34;:\\\u0026#34;\\\u0026#34;,\\\u0026#34;local\\\u0026#34;:false,\\\u0026#34;address\\\u0026#34;:\\\u0026#34;172.31.0.3:443\\\u0026#34;},\\\u0026#34;detect_point\\\u0026#34;:\\\u0026#34;client\\\u0026#34;,\\\u0026#34;component\\\u0026#34;:\\\u0026#34;http\\\u0026#34;,\\\u0026#34;ssl\\\u0026#34;:true}\u0026#34; } }, \u0026#34;service\u0026#34;:\u0026#34;test-service\u0026#34;, \u0026#34;serviceInstance\u0026#34;:\u0026#34;test-service-instance\u0026#34;, \u0026#34;timestamp\u0026#34;: 1666916962406, } ] Examples are as follows:\nfilter { json { } if (tag(\u0026#34;LOG_KIND\u0026#34;) == \u0026#34;NET_PROFILING_SAMPLED_TRACE\u0026#34;) { sampledTrace { latency parsed.latency as Long uri parsed.uri as String reason parsed.reason as String if (parsed.client_process.process_id as String != \u0026#34;\u0026#34;) { processId parsed.client_process.process_id as String } else if (parsed.client_process.local as Boolean) { processId ProcessRegistry.generateVirtualLocalProcess(parsed.service as String, parsed.serviceInstance as String) as String } else { processId ProcessRegistry.generateVirtualRemoteProcess(parsed.service as String, parsed.serviceInstance as String, parsed.client_process.address as String) as String } if (parsed.server_process.process_id as String != \u0026#34;\u0026#34;) { destProcessId parsed.server_process.process_id as String } else if (parsed.server_process.local as Boolean) { destProcessId ProcessRegistry.generateVirtualLocalProcess(parsed.service as String, parsed.serviceInstance as String) as String } else { destProcessId ProcessRegistry.generateVirtualRemoteProcess(parsed.service as String, parsed.serviceInstance as String, parsed.server_process.address as String) as String } detectPoint parsed.detect_point as String if (parsed.component as String == \u0026#34;http\u0026#34; \u0026amp;\u0026amp; parsed.ssl as Boolean) { componentId 129 } else if (parsed.component as String == \u0026#34;http\u0026#34;) { componentId 49 } else if (parsed.ssl as Boolean) { componentId 130 } else { componentId 110 } } } } Sink Sinks are the persistent layer of the LAL. By default, all the logs of each filter are persisted into the storage. However, some mechanisms allow you to selectively save some logs, or even drop all the logs after you\u0026rsquo;ve extracted useful information, such as metrics.\nSampler Sampler allows you to save the logs in a sampling manner. Currently, the following sampling strategies are supported:\n rateLimit: samples n logs at a maximum rate of 1 minute. rateLimit(\u0026quot;SamplerID\u0026quot;) requires an ID for the sampler. Sampler declarations with the same ID share the same sampler instance, thus sharing the same rpm and resetting logic. possibility: every piece of log has a pseudo possibility of percentage to be sampled, the possibility was generated by Java random number generator and compare to the given percentage option.  We welcome contributions on more sampling strategies. If multiple samplers are specified, the last one determines the final sampling result. See examples in Enforcer.\nExamples 1, rateLimit:\nfilter { // ... parser  sink { sampler { if (parsed.service == \u0026#34;ImportantApp\u0026#34;) { rateLimit(\u0026#34;ImportantAppSampler\u0026#34;) { rpm 1800 // samples 1800 pieces of logs every minute for service \u0026#34;ImportantApp\u0026#34;  } } else { rateLimit(\u0026#34;OtherSampler\u0026#34;) { rpm 180 // samples 180 pieces of logs every minute for other services than \u0026#34;ImportantApp\u0026#34;  } } } } } Examples 2, possibility:\nfilter { // ... parser  sink { sampler { if (parsed.service == \u0026#34;ImportantApp\u0026#34;) { possibility(80) { // samples 80% of the logs for service \u0026#34;ImportantApp\u0026#34;  } } else { possibility(30) { // samples 30% of the logs for other services than \u0026#34;ImportantApp\u0026#34;  } } } } } Dropper Dropper is a special sink, meaning that all logs are dropped without any exception. This is useful when you want to drop debugging logs.\nfilter { // ... parser  sink { if (parsed.level == \u0026#34;DEBUG\u0026#34;) { dropper {} } else { sampler { // ... configs  } } } } Or if you have multiple filters, some of which are for extracting metrics, only one of them has to be persisted.\nfilter { // filter A: this is for persistence  // ... parser  sink { sampler { // .. sampler configs  } } } filter { // filter B:  // ... extractors to generate many metrics  extractors { metrics { // ... metrics  } } sink { dropper {} // drop all logs because they have been saved in \u0026#34;filter A\u0026#34; above.  } } Enforcer Enforcer is another special sink that forcibly samples the log. A typical use case of enforcer is when you have configured a sampler and want to save some logs forcibly, such as to save error logs even if the sampling mechanism has been configured.\nfilter { // ... parser  sink { sampler { // ... sampler configs  } if (parsed.level == \u0026#34;ERROR\u0026#34; || parsed.userId == \u0026#34;TestingUserId\u0026#34;) { // sample error logs or testing users\u0026#39; logs (userId == \u0026#34;TestingUserId\u0026#34;) even if the sampling strategy is configured  enforcer { } } } } ","excerpt":"Log Analysis Language Log Analysis Language (LAL) in SkyWalking is essentially a Domain-Specific …","ref":"/docs/main/v9.6.0/en/concepts-and-designs/lal/","title":"Log Analysis Language"},{"body":"Log Analysis Language Log Analysis Language (LAL) in SkyWalking is essentially a Domain-Specific Language (DSL) to analyze logs. You can use LAL to parse, extract, and save the logs, as well as collaborate the logs with traces (by extracting the trace ID, segment ID and span ID) and metrics (by generating metrics from the logs and sending them to the meter system).\nThe LAL config files are in YAML format, and are located under directory lal. You can set log-analyzer/default/lalFiles in the application.yml file or set environment variable SW_LOG_LAL_FILES to activate specific LAL config files.\nLayer Layer should be declared in the LAL script to represent the analysis scope of the logs.\nFilter A filter is a group of parser, extractor and sink. Users can use one or more filters to organize their processing logic. Every piece of log will be sent to all filters in an LAL rule. A piece of log sent to the filter is available as property log in the LAL, therefore you can access the log service name via log.service. For all available fields of log, please refer to the protocol definition.\nAll components are executed sequentially in the orders they are declared.\nGlobal Functions Globally available functions may be used them in all components (i.e. parsers, extractors, and sinks) where necessary.\n abort  By default, all components declared are executed no matter what flags (dropped, saved, etc.) have been set. There are cases where you may want the filter chain to stop earlier when specified conditions are met. abort function aborts the remaining filter chain from where it\u0026rsquo;s declared, and all the remaining components won\u0026rsquo;t be executed at all. abort function serves as a fast-fail mechanism in LAL.\nfilter { if (log.service == \u0026#34;TestingService\u0026#34;) { // Don\u0026#39;t waste resources on TestingServices  abort {} // all remaining components won\u0026#39;t be executed at all  } // ... parsers, extractors, sinks } Note that when you put regexp in an if statement, you need to surround the expression with () like regexp(\u0026lt;the expression\u0026gt;), instead of regexp \u0026lt;the expression\u0026gt;.\n tag  tag function provide a convenient way to get the value of a tag key.\nWe can add tags like following:\n[ { \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;TEST_KEY\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;TEST_VALUE\u0026#34; } ] }, \u0026#34;body\u0026#34;:{ ... } ... } ] And we can use this method to get the value of the tag key TEST_KEY.\nfilter { if (tag(\u0026#34;TEST_KEY\u0026#34;) == \u0026#34;TEST_VALUE\u0026#34;) { ... } } Parser Parsers are responsible for parsing the raw logs into structured data in SkyWalking for further processing. There are 3 types of parsers at the moment, namely json, yaml, and text.\nWhen a piece of log is parsed, there is a corresponding property available, called parsed, injected by LAL. Property parsed is typically a map, containing all the fields parsed from the raw logs. For example, if the parser is json / yaml, parsed is a map containing all the key-values in the json / yaml; if the parser is text , parsed is a map containing all the captured groups and their values (for regexp and grok).\nAll parsers share the following options:\n   Option Type Description Default Value     abortOnFailure boolean Whether the filter chain should abort if the parser failed to parse / match the logs true    See examples below.\njson filter { json { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  } } yaml filter { yaml { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  } } text For unstructured logs, there are some text parsers for use.\n regexp  regexp parser uses a regular expression (regexp) to parse the logs. It leverages the captured groups of the regexp, all the captured groups can be used later in the extractors or sinks. regexp returns a boolean indicating whether the log matches the pattern or not.\nfilter { text { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  // this is just a demo pattern  regexp \u0026#34;(?\u0026lt;timestamp\u0026gt;\\\\d{8}) (?\u0026lt;thread\u0026gt;\\\\w+) (?\u0026lt;level\u0026gt;\\\\w+) (?\u0026lt;traceId\u0026gt;\\\\w+) (?\u0026lt;msg\u0026gt;.+)\u0026#34; } extractor { tag level: parsed.level // we add a tag called `level` and its value is parsed.level, captured from the regexp above  traceId parsed.traceId // we also extract the trace id from the parsed result, which will be used to associate the log with the trace  } // ... }  grok (TODO)  We\u0026rsquo;re aware of certain performance issues in the grok Java library, and so we\u0026rsquo;re currently conducting investigations and benchmarking. Contributions are welcome.\nExtractor Extractors aim to extract metadata from the logs. The metadata can be a service name, a service instance name, an endpoint name, or even a trace ID, all of which can be associated with the existing traces and metrics.\n service  service extracts the service name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n instance  instance extracts the service instance name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n endpoint  endpoint extracts the service instance name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n traceId  traceId extracts the trace ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n segmentId  segmentId extracts the segment ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n spanId  spanId extracts the span ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n timestamp  timestamp extracts the timestamp from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\nThe parameter of timestamp can be a millisecond:\nfilter { // ... parser  extractor { timestamp parsed.time as String } } or a datetime string with a specified pattern:\nfilter { // ... parser  extractor { timestamp parsed.time as String, \u0026#34;yyyy-MM-dd HH:mm:ss\u0026#34; } }  layer  layer extracts the layer from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with service.\n tag  tag extracts the tags from the parsed result, and set them into the LogData. The form of this extractor should look something like this: tag key1: value, key2: value2. You may use the properties of parsed as both keys and values.\nimport javax.swing.text.LayeredHighlighter filter { // ... parser  extractor { tag level: parsed.level, (parsed.statusCode): parsed.statusMsg tag anotherKey: \u0026#34;anotherConstantValue\u0026#34; layer \u0026#39;GENERAL\u0026#39; } }  metrics  metrics extracts / generates metrics from the logs, and sends the generated metrics to the meter system. You may configure MAL for further analysis of these metrics. The dedicated MAL config files are under directory log-mal-rules, and you can set log-analyzer/default/malFiles to enable configured files.\n# application.yml# ...log-analyzer:selector:${SW_LOG_ANALYZER:default}default:lalFiles:${SW_LOG_LAL_FILES:my-lal-config}# files are under \u0026#34;lal\u0026#34; directorymalFiles:${SW_LOG_MAL_FILES:my-lal-mal-config, folder1/another-lal-mal-config, folder2/*}# files are under \u0026#34;log-mal-rules\u0026#34; directoryExamples are as follows:\nfilter { // ...  extractor { service parsed.serviceName metrics { name \u0026#34;log_count\u0026#34; timestamp parsed.timestamp labels level: parsed.level, service: parsed.service, instance: parsed.instance value 1 } metrics { name \u0026#34;http_response_time\u0026#34; timestamp parsed.timestamp labels status_code: parsed.statusCode, service: parsed.service, instance: parsed.instance value parsed.duration } } // ... } The extractor above generates a metrics named log_count, with tag key level and value 1. After that, you can configure MAL rules to calculate the log count grouping by logging level like this:\n# ... other configurations of MALmetrics:- name:log_count_debugexp:log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;DEBUG\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT1M\u0026#39;)- name:log_count_errorexp:log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;ERROR\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT1M\u0026#39;)The other metrics generated is http_response_time, so you can configure MAL rules to generate more useful metrics like percentiles.\n# ... other configurations of MALmetrics:- name:response_time_percentileexp:http_response_time.sum([\u0026#39;le\u0026#39;, \u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT5M\u0026#39;).histogram().histogram_percentile([50,70,90,99]) slowSql  slowSql aims to convert LogData to DatabaseSlowStatement. It extracts data from parsed result and save them as DatabaseSlowStatement. SlowSql will not abort or edit logs, you can use other LAL for further processing. SlowSql will reuse service, layer and timestamp of extractor, so it is necessary to use SlowSQL after setting these. We require a log tag \u0026quot;LOG_KIND\u0026quot; = \u0026quot;SLOW_SQL\u0026quot; to make OAP distinguish slow SQL logs from other log reports.\nNote, slow SQL sampling would only flag this SQL in the candidate list. The OAP server would run statistic per service and only persistent the top 50 every 10(controlled by topNReportPeriod: ${SW_CORE_TOPN_REPORT_PERIOD:10}) minutes by default.\nAn example of JSON sent to OAP is as following:\n[ { \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;LOG_KIND\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;SLOW_SQL\u0026#34; } ] }, \u0026#34;layer\u0026#34;:\u0026#34;MYSQL\u0026#34;, \u0026#34;body\u0026#34;:{ \u0026#34;json\u0026#34;:{ \u0026#34;json\u0026#34;:\u0026#34;{\\\u0026#34;time\\\u0026#34;:\\\u0026#34;1663063011\\\u0026#34;,\\\u0026#34;id\\\u0026#34;:\\\u0026#34;cb92c1a5b-2691e-fb2f-457a-9c72a392d9ed\\\u0026#34;,\\\u0026#34;service\\\u0026#34;:\\\u0026#34;root[root]@[localhost]\\\u0026#34;,\\\u0026#34;statement\\\u0026#34;:\\\u0026#34;select sleep(2);\\\u0026#34;,\\\u0026#34;layer\\\u0026#34;:\\\u0026#34;MYSQL\\\u0026#34;,\\\u0026#34;query_time\\\u0026#34;:2000}\u0026#34; } }, \u0026#34;service\u0026#34;:\u0026#34;root[root]@[localhost]\u0026#34; } ]  statement  statement extracts the SQL statement from the parsed result, and set it into the DatabaseSlowStatement, which will be persisted (if not dropped) and is used to associate with TopNDatabaseStatement.\n latency  latency extracts the latency from the parsed result, and set it into the DatabaseSlowStatement, which will be persisted (if not dropped) and is used to associate with TopNDatabaseStatement.\n id  id extracts the id from the parsed result, and set it into the DatabaseSlowStatement, which will be persisted (if not dropped) and is used to associate with TopNDatabaseStatement.\nA Example of LAL to distinguish slow logs:\nfilter { json{ } extractor{ layer parsed.layer as String service parsed.service as String timestamp parsed.time as String if (tag(\u0026#34;LOG_KIND\u0026#34;) == \u0026#34;SLOW_SQL\u0026#34;) { slowSql { id parsed.id as String statement parsed.statement as String latency parsed.query_time as Long } } } }  sampledTrace  sampledTrace aims to convert LogData to SampledTrace Records. It extracts data from parsed result and save them as SampledTraceRecord. SampledTrace will not abort or edit logs, you can use other LAL for further processing. We require a log tag \u0026quot;LOG_KIND\u0026quot; = \u0026quot;NET_PROFILING_SAMPLED_TRACE\u0026quot; to make OAP distinguish slow trace logs from other log reports. An example of JSON sent to OAP is as following:\n[ { \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;LOG_KIND\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;NET_PROFILING_SAMPLED_TRACE\u0026#34; } ] }, \u0026#34;layer\u0026#34;:\u0026#34;MESH\u0026#34;, \u0026#34;body\u0026#34;:{ \u0026#34;json\u0026#34;:{ \u0026#34;json\u0026#34;:\u0026#34;{\\\u0026#34;uri\\\u0026#34;:\\\u0026#34;/provider\\\u0026#34;,\\\u0026#34;reason\\\u0026#34;:\\\u0026#34;slow\\\u0026#34;,\\\u0026#34;latency\\\u0026#34;:2048,\\\u0026#34;client_process\\\u0026#34;:{\\\u0026#34;process_id\\\u0026#34;:\\\u0026#34;c1519f4555ec11eda8df0242ac1d0002\\\u0026#34;,\\\u0026#34;local\\\u0026#34;:false,\\\u0026#34;address\\\u0026#34;:\\\u0026#34;\\\u0026#34;},\\\u0026#34;server_process\\\u0026#34;:{\\\u0026#34;process_id\\\u0026#34;:\\\u0026#34;\\\u0026#34;,\\\u0026#34;local\\\u0026#34;:false,\\\u0026#34;address\\\u0026#34;:\\\u0026#34;172.31.0.3:443\\\u0026#34;},\\\u0026#34;detect_point\\\u0026#34;:\\\u0026#34;client\\\u0026#34;,\\\u0026#34;component\\\u0026#34;:\\\u0026#34;http\\\u0026#34;,\\\u0026#34;ssl\\\u0026#34;:true}\u0026#34; } }, \u0026#34;service\u0026#34;:\u0026#34;test-service\u0026#34;, \u0026#34;serviceInstance\u0026#34;:\u0026#34;test-service-instance\u0026#34;, \u0026#34;timestamp\u0026#34;: 1666916962406, } ] Examples are as follows:\nfilter { json { } if (tag(\u0026#34;LOG_KIND\u0026#34;) == \u0026#34;NET_PROFILING_SAMPLED_TRACE\u0026#34;) { sampledTrace { latency parsed.latency as Long uri parsed.uri as String reason parsed.reason as String if (parsed.client_process.process_id as String != \u0026#34;\u0026#34;) { processId parsed.client_process.process_id as String } else if (parsed.client_process.local as Boolean) { processId ProcessRegistry.generateVirtualLocalProcess(parsed.service as String, parsed.serviceInstance as String) as String } else { processId ProcessRegistry.generateVirtualRemoteProcess(parsed.service as String, parsed.serviceInstance as String, parsed.client_process.address as String) as String } if (parsed.server_process.process_id as String != \u0026#34;\u0026#34;) { destProcessId parsed.server_process.process_id as String } else if (parsed.server_process.local as Boolean) { destProcessId ProcessRegistry.generateVirtualLocalProcess(parsed.service as String, parsed.serviceInstance as String) as String } else { destProcessId ProcessRegistry.generateVirtualRemoteProcess(parsed.service as String, parsed.serviceInstance as String, parsed.server_process.address as String) as String } detectPoint parsed.detect_point as String if (parsed.component as String == \u0026#34;http\u0026#34; \u0026amp;\u0026amp; parsed.ssl as Boolean) { componentId 129 } else if (parsed.component as String == \u0026#34;http\u0026#34;) { componentId 49 } else if (parsed.ssl as Boolean) { componentId 130 } else { componentId 110 } } } } Sink Sinks are the persistent layer of the LAL. By default, all the logs of each filter are persisted into the storage. However, some mechanisms allow you to selectively save some logs, or even drop all the logs after you\u0026rsquo;ve extracted useful information, such as metrics.\nSampler Sampler allows you to save the logs in a sampling manner. Currently, the following sampling strategies are supported:\n rateLimit: samples n logs at a maximum rate of 1 minute. rateLimit(\u0026quot;SamplerID\u0026quot;) requires an ID for the sampler. Sampler declarations with the same ID share the same sampler instance, thus sharing the same rpm and resetting logic. possibility: every piece of log has a pseudo possibility of percentage to be sampled, the possibility was generated by Java random number generator and compare to the given percentage option.  We welcome contributions on more sampling strategies. If multiple samplers are specified, the last one determines the final sampling result. See examples in Enforcer.\nExamples 1, rateLimit:\nfilter { // ... parser  sink { sampler { if (parsed.service == \u0026#34;ImportantApp\u0026#34;) { rateLimit(\u0026#34;ImportantAppSampler\u0026#34;) { rpm 1800 // samples 1800 pieces of logs every minute for service \u0026#34;ImportantApp\u0026#34;  } } else { rateLimit(\u0026#34;OtherSampler\u0026#34;) { rpm 180 // samples 180 pieces of logs every minute for other services than \u0026#34;ImportantApp\u0026#34;  } } } } } Examples 2, possibility:\nfilter { // ... parser  sink { sampler { if (parsed.service == \u0026#34;ImportantApp\u0026#34;) { possibility(80) { // samples 80% of the logs for service \u0026#34;ImportantApp\u0026#34;  } } else { possibility(30) { // samples 30% of the logs for other services than \u0026#34;ImportantApp\u0026#34;  } } } } } Dropper Dropper is a special sink, meaning that all logs are dropped without any exception. This is useful when you want to drop debugging logs.\nfilter { // ... parser  sink { if (parsed.level == \u0026#34;DEBUG\u0026#34;) { dropper {} } else { sampler { // ... configs  } } } } Or if you have multiple filters, some of which are for extracting metrics, only one of them has to be persisted.\nfilter { // filter A: this is for persistence  // ... parser  sink { sampler { // .. sampler configs  } } } filter { // filter B:  // ... extractors to generate many metrics  extractors { metrics { // ... metrics  } } sink { dropper {} // drop all logs because they have been saved in \u0026#34;filter A\u0026#34; above.  } } Enforcer Enforcer is another special sink that forcibly samples the log. A typical use case of enforcer is when you have configured a sampler and want to save some logs forcibly, such as to save error logs even if the sampling mechanism has been configured.\nfilter { // ... parser  sink { sampler { // ... sampler configs  } if (parsed.level == \u0026#34;ERROR\u0026#34; || parsed.userId == \u0026#34;TestingUserId\u0026#34;) { // sample error logs or testing users\u0026#39; logs (userId == \u0026#34;TestingUserId\u0026#34;) even if the sampling strategy is configured  enforcer { } } } } ","excerpt":"Log Analysis Language Log Analysis Language (LAL) in SkyWalking is essentially a Domain-Specific …","ref":"/docs/main/v9.7.0/en/concepts-and-designs/lal/","title":"Log Analysis Language"},{"body":"Log Collection and Analysis Collection There are various ways to collect logs from applications.\nLog files collector You can use Filebeat, Fluentd and FluentBit to collect logs, and then transport the logs to SkyWalking OAP through Kafka or HTTP protocol, with the formats Kafka JSON or HTTP JSON array.\nFilebeat Filebeat supports using Kafka to transport logs. Open kafka-fetcher and enable configs enableNativeJsonLog.\nTake the following filebeat config yaml as an example to set up Filebeat:\n filebeat.yml  Fluentd Fluentd supports using Kafka to transport logs. Open kafka-fetcher and enable configs enableNativeJsonLog.\nTake the following fluentd config file as an example to set up Fluentd:\n fluentd.conf  Fluent-bit Fluent-bit sends logs to OAP directly through HTTP(rest port). Point the output address to restHost:restPort of receiver-sharing-server or core(if receiver-sharing-server is inactivated)\nTake the following fluent-bit config files as an example to set up Fluent-bit:\n fluent-bit.conf  OpenTelemetry You can use OpenTelemetry Collector to transport the logs to SkyWalking OAP. Read the doc on Skywalking Exporter for a detailed guide.\nJava agent\u0026rsquo;s toolkits Java agent provides toolkits for log4j, log4j2, and logback to report logs through gRPC with automatically injected trace context.\nSkyWalking Satellite sidecar is a recommended proxy/side that forwards logs (including the use of Kafka MQ to transport logs). When using this, open kafka-fetcher and enable configs enableNativeProtoLog.\nJava agent provides toolkits for log4j, log4j2, and logback to report logs through files with automatically injected trace context.\nLog framework config examples:\n log4j1.x fileAppender log4j2.x fileAppender logback fileAppender  Python agent log reporter SkyWalking Python Agent implements a log reporter for the logging module with functionalities aligning with the Java toolkits.\nTo explore how to enable the reporting features for your use cases, please refer to the Log Reporter Doc for a detailed guide.\nLog Analyzer Log analyzer of OAP server supports native log data. OAP could use Log Analysis Language to structure log content through parsing, extracting, and saving logs. The analyzer also uses Meter Analysis Language Engine for further metrics calculation.\nlog-analyzer:selector:${SW_LOG_ANALYZER:default}default:lalFiles:${SW_LOG_LAL_FILES:default}malFiles:${SW_LOG_MAL_FILES:\u0026#34;\u0026#34;}Read the doc on Log Analysis Language for more on log structuring and metrics analysis.\n","excerpt":"Log Collection and Analysis Collection There are various ways to collect logs from applications.\nLog …","ref":"/docs/main/v9.0.0/en/setup/backend/log-analyzer/","title":"Log Collection and Analysis"},{"body":"Log Collection and Analysis Collection There are various ways to collect logs from applications.\nLog files collector You can use Filebeat, Fluentd and FluentBit to collect logs, and then transport the logs to SkyWalking OAP through Kafka or HTTP protocol, with the formats Kafka JSON or HTTP JSON array.\nFilebeat Filebeat supports using Kafka to transport logs. Open kafka-fetcher and enable configs enableNativeJsonLog.\nTake the following Filebeat config YAML as an example to set up Filebeat:\n filebeat.yml  Fluentd Fluentd supports using Kafka to transport logs. Open kafka-fetcher and enable configs enableNativeJsonLog.\nTake the following fluentd config file as an example to set up Fluentd:\n fluentd.conf  Fluent-bit Fluent-bit sends logs to OAP directly through HTTP(rest port). Point the output address to restHost:restPort of receiver-sharing-server or core(if receiver-sharing-server is inactivated)\nTake the following fluent-bit config files as an example to set up Fluent-bit:\n fluent-bit.conf  OpenTelemetry You can use OpenTelemetry Collector to transport the logs to SkyWalking OAP. Read the doc on Skywalking Exporter for a detailed guide.\nJava agent\u0026rsquo;s toolkits Java agent provides toolkits for log4j, log4j2, and logback to report logs through gRPC with automatically injected trace context.\nSkyWalking Satellite sidecar is a recommended proxy/side that forwards logs (including the use of Kafka MQ to transport logs). When using this, open kafka-fetcher and enable configs enableNativeProtoLog.\nJava agent provides toolkits for log4j, log4j2, and logback to report logs through files with automatically injected trace context.\nLog framework config examples:\n log4j1.x fileAppender log4j2.x fileAppender logback fileAppender  Python agent log reporter SkyWalking Python Agent implements a log reporter for the logging module with functionalities aligning with the Java toolkits.\nTo explore how to enable the reporting features for your use cases, please refer to the Log Reporter Doc for a detailed guide.\nLog Analyzer Log analyzer of OAP server supports native log data. OAP could use Log Analysis Language to structure log content through parsing, extracting and saving logs. The analyzer also uses Meter Analysis Language Engine for further metrics calculation.\nlog-analyzer:selector:${SW_LOG_ANALYZER:default}default:lalFiles:${SW_LOG_LAL_FILES:default}malFiles:${SW_LOG_MAL_FILES:\u0026#34;\u0026#34;}Read the doc on Log Analysis Language for more on log structuring and metrics analysis.\n","excerpt":"Log Collection and Analysis Collection There are various ways to collect logs from applications.\nLog …","ref":"/docs/main/v9.1.0/en/setup/backend/log-analyzer/","title":"Log Collection and Analysis"},{"body":"Log Collection and Analysis Collection There are various ways to collect logs from applications.\nLog files collector You can use Filebeat, Fluentd and FluentBit to collect logs, and then transport the logs to SkyWalking OAP through Kafka or HTTP protocol, with the formats Kafka JSON or HTTP JSON array.\nFilebeat Filebeat supports using Kafka to transport logs. Open kafka-fetcher and enable configs enableNativeJsonLog.\nTake the following Filebeat config YAML as an example to set up Filebeat:\n filebeat.yml  Fluentd Fluentd supports using Kafka to transport logs. Open kafka-fetcher and enable configs enableNativeJsonLog.\nTake the following fluentd config file as an example to set up Fluentd:\n fluentd.conf  Fluent-bit Fluent-bit sends logs to OAP directly through HTTP(rest port). Point the output address to restHost:restPort of receiver-sharing-server or core(if receiver-sharing-server is inactivated)\nTake the following fluent-bit config files as an example to set up Fluent-bit:\n fluent-bit.conf  OpenTelemetry You can use OpenTelemetry Collector to transport the logs to SkyWalking OAP. Read the doc on Skywalking Exporter for a detailed guide.\nJava agent\u0026rsquo;s toolkits Java agent provides toolkits for log4j, log4j2, and logback to report logs through gRPC with automatically injected trace context.\nSkyWalking Satellite sidecar is a recommended proxy/side that forwards logs (including the use of Kafka MQ to transport logs). When using this, open kafka-fetcher and enable configs enableNativeProtoLog.\nJava agent provides toolkits for log4j, log4j2, and logback to report logs through files with automatically injected trace context.\nLog framework config examples:\n log4j1.x fileAppender log4j2.x fileAppender logback fileAppender  Python agent log reporter SkyWalking Python Agent implements a log reporter for the logging module with functionalities aligning with the Java toolkits.\nTo explore how to enable the reporting features for your use cases, please refer to the Log Reporter Doc for a detailed guide.\nLog Analyzer Log analyzer of OAP server supports native log data. OAP could use Log Analysis Language to structure log content through parsing, extracting and saving logs. The analyzer also uses Meter Analysis Language Engine for further metrics calculation.\nlog-analyzer:selector:${SW_LOG_ANALYZER:default}default:lalFiles:${SW_LOG_LAL_FILES:default}malFiles:${SW_LOG_MAL_FILES:\u0026#34;\u0026#34;}Read the doc on Log Analysis Language for more on log structuring and metrics analysis.\n","excerpt":"Log Collection and Analysis Collection There are various ways to collect logs from applications.\nLog …","ref":"/docs/main/v9.2.0/en/setup/backend/log-analyzer/","title":"Log Collection and Analysis"},{"body":"Log Collection and Analysis Collection There are various ways to collect logs from applications.\nLog files collector You can use Filebeat, Fluentd and FluentBit to collect logs, and then transport the logs to SkyWalking OAP through Kafka or HTTP protocol, with the formats Kafka JSON or HTTP JSON array.\nFilebeat Filebeat supports using Kafka to transport logs. Open kafka-fetcher and enable configs enableNativeJsonLog.\nTake the following Filebeat config YAML as an example to set up Filebeat:\n filebeat.yml  Fluentd Fluentd supports using Kafka to transport logs. Open kafka-fetcher and enable configs enableNativeJsonLog.\nTake the following fluentd config file as an example to set up Fluentd:\n fluentd.conf  Fluent-bit Fluent-bit sends logs to OAP directly through HTTP(rest port). Point the output address to restHost:restPort of receiver-sharing-server or core(if receiver-sharing-server is inactivated)\nTake the following fluent-bit config files as an example to set up Fluent-bit:\n fluent-bit.conf  OpenTelemetry You can use OpenTelemetry Collector to transport the logs to SkyWalking OAP. Read the doc on Skywalking Exporter for a detailed guide.\nJava agent\u0026rsquo;s toolkits Java agent provides toolkits for log4j, log4j2, and logback to report logs through gRPC with automatically injected trace context.\nSkyWalking Satellite sidecar is a recommended proxy/side that forwards logs (including the use of Kafka MQ to transport logs). When using this, open kafka-fetcher and enable configs enableNativeProtoLog.\nJava agent provides toolkits for log4j, log4j2, and logback to report logs through files with automatically injected trace context.\nLog framework config examples:\n log4j1.x fileAppender log4j2.x fileAppender logback fileAppender  Python agent log reporter SkyWalking Python Agent implements a log reporter for the logging module with functionalities aligning with the Java toolkits.\nTo explore how to enable the reporting features for your use cases, please refer to the Log Reporter Doc for a detailed guide.\nLog Analyzer Log analyzer of OAP server supports native log data. OAP could use Log Analysis Language to structure log content through parsing, extracting and saving logs. The analyzer also uses Meter Analysis Language Engine for further metrics calculation.\nlog-analyzer:selector:${SW_LOG_ANALYZER:default}default:lalFiles:${SW_LOG_LAL_FILES:default}malFiles:${SW_LOG_MAL_FILES:\u0026#34;\u0026#34;}Read the doc on Log Analysis Language for more on log structuring and metrics analysis.\n","excerpt":"Log Collection and Analysis Collection There are various ways to collect logs from applications.\nLog …","ref":"/docs/main/v9.3.0/en/setup/backend/log-analyzer/","title":"Log Collection and Analysis"},{"body":"Log Collection and Analysis Collection There are various ways to collect logs from applications.\nLog files collector You can use Filebeat, Fluentd and FluentBit to collect logs, and then transport the logs to SkyWalking OAP through Kafka or HTTP protocol, with the formats Kafka JSON or HTTP JSON array.\nFilebeat Filebeat supports using Kafka to transport logs. Open kafka-fetcher and enable configs enableNativeJsonLog.\nTake the following Filebeat config YAML as an example to set up Filebeat:\n filebeat.yml  Fluentd Fluentd supports using Kafka to transport logs. Open kafka-fetcher and enable configs enableNativeJsonLog.\nTake the following fluentd config file as an example to set up Fluentd:\n fluentd.conf  Fluent-bit Fluent-bit sends logs to OAP directly through HTTP(rest port). Point the output address to restHost:restPort of receiver-sharing-server or core(if receiver-sharing-server is inactivated)\nTake the following fluent-bit config files as an example to set up Fluent-bit:\n fluent-bit.conf  OpenTelemetry You can use OpenTelemetry Collector to transport the logs to SkyWalking OAP. Read the doc on Skywalking Exporter for a detailed guide.\nJava agent\u0026rsquo;s toolkits Java agent provides toolkits for log4j, log4j2, and logback to report logs through gRPC with automatically injected trace context.\nSkyWalking Satellite sidecar is a recommended proxy/side that forwards logs (including the use of Kafka MQ to transport logs). When using this, open kafka-fetcher and enable configs enableNativeProtoLog.\nJava agent provides toolkits for log4j, log4j2, and logback to report logs through files with automatically injected trace context.\nLog framework config examples:\n log4j1.x fileAppender log4j2.x fileAppender logback fileAppender  Python agent log reporter SkyWalking Python Agent implements a log reporter for the logging module with functionalities aligning with the Java toolkits.\nTo explore how to enable the reporting features for your use cases, please refer to the Log Reporter Doc for a detailed guide.\nLog Analyzer Log analyzer of OAP server supports native log data. OAP could use Log Analysis Language to structure log content through parsing, extracting and saving logs. The analyzer also uses Meter Analysis Language Engine for further metrics calculation.\nlog-analyzer:selector:${SW_LOG_ANALYZER:default}default:lalFiles:${SW_LOG_LAL_FILES:default}malFiles:${SW_LOG_MAL_FILES:\u0026#34;\u0026#34;}Read the doc on Log Analysis Language for more on log structuring and metrics analysis.\n","excerpt":"Log Collection and Analysis Collection There are various ways to collect logs from applications.\nLog …","ref":"/docs/main/v9.4.0/en/setup/backend/log-analyzer/","title":"Log Collection and Analysis"},{"body":"Log Data Protocol Report log data via protocol.\nNative Proto Protocol Report native-proto format log via gRPC.\ngRPC service define\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.logging.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/logging/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Report collected logs into the OAP backend service LogReportService { // Recommend to report log data in a stream mode.  // The service/instance/endpoint of the log could share the previous value if they are not set.  // Reporting the logs of same service in the batch mode could reduce the network cost.  rpc collect (stream LogData) returns (Commands) { }}// Log data is collected through file scratcher of agent. // Natively, Satellite provides various ways to collect logs. message LogData { // [Optional] The timestamp of the log, in millisecond.  // If not set, OAP server would use the received timestamp as log\u0026#39;s timestamp, or relies on the OAP server analyzer.  int64 timestamp = 1; // [Required] **Service**. Represents a set/group of workloads which provide the same behaviours for incoming requests.  //  // The logic name represents the service. This would show as a separate node in the topology.  // The metrics analyzed from the spans, would be aggregated for this entity as the service level.  //  // If this is not the first element of the streaming, use the previous not-null name as the service name.  string service = 2; // [Optional] **Service Instance**. Each individual workload in the Service group is known as an instance. Like `pods` in Kubernetes, it  // doesn\u0026#39;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process.  //  // The logic name represents the service instance. This would show as a separate node in the instance relationship.  // The metrics analyzed from the spans, would be aggregated for this entity as the service instance level.  string serviceInstance = 3; // [Optional] **Endpoint**. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature.  //  // The logic name represents the endpoint, which logs belong.  string endpoint = 4; // [Required] The content of the log.  LogDataBody body = 5; // [Optional] Logs with trace context  TraceContext traceContext = 6; // [Optional] The available tags. OAP server could provide search/analysis capabilities based on these.  LogTags tags = 7; // [Optional] Since 9.0.0  // The layer of the service and servce instance. If absent, the OAP would set `layer`=`ID: 2, NAME: general`  string layer = 8;}// The content of the log data message LogDataBody { // A type to match analyzer(s) at the OAP server.  // The data could be analyzed at the client side, but could be partial  string type = 1; // Content with extendable format.  oneof content { TextLog text = 2; JSONLog json = 3; YAMLLog yaml = 4; }}// Literal text log, typically requires regex or split mechanism to filter meaningful info. message TextLog { string text = 1;}// JSON formatted log. The json field represents the string that could be formatted as a JSON object. message JSONLog { string json = 1;}// YAML formatted log. The yaml field represents the string that could be formatted as a YAML map. message YAMLLog { string yaml = 1;}// Logs with trace context, represent agent system has injects context(IDs) into log text. message TraceContext { // [Optional] A string id represents the whole trace.  string traceId = 1; // [Optional] A unique id represents this segment. Other segments could use this id to reference as a child segment.  string traceSegmentId = 2; // [Optional] The number id of the span. Should be unique in the whole segment.  // Starting at 0.  int32 spanId = 3;}message LogTags { // String key, String value pair.  repeated KeyStringValuePair data = 1;}Native Kafka Protocol Report native-json format log via kafka.\nJson log record example:\n{ \u0026#34;timestamp\u0026#34;:1618161813371, \u0026#34;service\u0026#34;:\u0026#34;Your_ApplicationName\u0026#34;, \u0026#34;serviceInstance\u0026#34;:\u0026#34;3a5b8da5a5ba40c0b192e91b5c80f1a8@192.168.1.8\u0026#34;, \u0026#34;layer\u0026#34;:\u0026#34;GENERAL\u0026#34;, \u0026#34;traceContext\u0026#34;:{ \u0026#34;traceId\u0026#34;:\u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470001\u0026#34;, \u0026#34;spanId\u0026#34;:\u0026#34;0\u0026#34;, \u0026#34;traceSegmentId\u0026#34;:\u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470000\u0026#34; }, \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;INFO\u0026#34; }, { \u0026#34;key\u0026#34;:\u0026#34;logger\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;com.example.MyLogger\u0026#34; } ] }, \u0026#34;body\u0026#34;:{ \u0026#34;text\u0026#34;:{ \u0026#34;text\u0026#34;:\u0026#34;log message\u0026#34; } } } HTTP API Report json format logs via HTTP API, the endpoint is http://\u0026lt;oap-address\u0026gt;:12800/v3/logs.\nJson log record example:\n[ { \u0026#34;timestamp\u0026#34;: 1618161813371, \u0026#34;service\u0026#34;: \u0026#34;Your_ApplicationName\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;3a5b8da5a5ba40c0b192e91b5c80f1a8@192.168.1.8\u0026#34;, \u0026#34;layer\u0026#34;:\u0026#34;GENERAL\u0026#34;, \u0026#34;traceContext\u0026#34;: { \u0026#34;traceId\u0026#34;: \u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470001\u0026#34;, \u0026#34;spanId\u0026#34;: \u0026#34;0\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470000\u0026#34; }, \u0026#34;tags\u0026#34;: { \u0026#34;data\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;INFO\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;logger\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;com.example.MyLogger\u0026#34; } ] }, \u0026#34;body\u0026#34;: { \u0026#34;text\u0026#34;: { \u0026#34;text\u0026#34;: \u0026#34;log message\u0026#34; } } } ] ","excerpt":"Log Data Protocol Report log data via protocol.\nNative Proto Protocol Report native-proto format log …","ref":"/docs/main/latest/en/api/log-data-protocol/","title":"Log Data Protocol"},{"body":"Log Data Protocol Report log data via protocol.\nNative Proto Protocol Report native-proto format log via gRPC.\ngRPC service define\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.logging.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/logging/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Report collected logs into the OAP backend service LogReportService { // Recommend to report log data in a stream mode.  // The service/instance/endpoint of the log could share the previous value if they are not set.  // Reporting the logs of same service in the batch mode could reduce the network cost.  rpc collect (stream LogData) returns (Commands) { }}// Log data is collected through file scratcher of agent. // Natively, Satellite provides various ways to collect logs. message LogData { // [Optional] The timestamp of the log, in millisecond.  // If not set, OAP server would use the received timestamp as log\u0026#39;s timestamp, or relies on the OAP server analyzer.  int64 timestamp = 1; // [Required] **Service**. Represents a set/group of workloads which provide the same behaviours for incoming requests.  //  // The logic name represents the service. This would show as a separate node in the topology.  // The metrics analyzed from the spans, would be aggregated for this entity as the service level.  //  // If this is not the first element of the streaming, use the previous not-null name as the service name.  string service = 2; // [Optional] **Service Instance**. Each individual workload in the Service group is known as an instance. Like `pods` in Kubernetes, it  // doesn\u0026#39;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process.  //  // The logic name represents the service instance. This would show as a separate node in the instance relationship.  // The metrics analyzed from the spans, would be aggregated for this entity as the service instance level.  string serviceInstance = 3; // [Optional] **Endpoint**. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature.  //  // The logic name represents the endpoint, which logs belong.  string endpoint = 4; // [Required] The content of the log.  LogDataBody body = 5; // [Optional] Logs with trace context  TraceContext traceContext = 6; // [Optional] The available tags. OAP server could provide search/analysis capabilities based on these.  LogTags tags = 7; // [Optional] Since 9.0.0  // The layer of the service and servce instance. If absent, the OAP would set `layer`=`ID: 2, NAME: general`  string layer = 8;}// The content of the log data message LogDataBody { // A type to match analyzer(s) at the OAP server.  // The data could be analyzed at the client side, but could be partial  string type = 1; // Content with extendable format.  oneof content { TextLog text = 2; JSONLog json = 3; YAMLLog yaml = 4; }}// Literal text log, typically requires regex or split mechanism to filter meaningful info. message TextLog { string text = 1;}// JSON formatted log. The json field represents the string that could be formatted as a JSON object. message JSONLog { string json = 1;}// YAML formatted log. The yaml field represents the string that could be formatted as a YAML map. message YAMLLog { string yaml = 1;}// Logs with trace context, represent agent system has injects context(IDs) into log text. message TraceContext { // [Optional] A string id represents the whole trace.  string traceId = 1; // [Optional] A unique id represents this segment. Other segments could use this id to reference as a child segment.  string traceSegmentId = 2; // [Optional] The number id of the span. Should be unique in the whole segment.  // Starting at 0.  int32 spanId = 3;}message LogTags { // String key, String value pair.  repeated KeyStringValuePair data = 1;}Native Kafka Protocol Report native-json format log via kafka.\nJson log record example:\n{ \u0026#34;timestamp\u0026#34;:1618161813371, \u0026#34;service\u0026#34;:\u0026#34;Your_ApplicationName\u0026#34;, \u0026#34;serviceInstance\u0026#34;:\u0026#34;3a5b8da5a5ba40c0b192e91b5c80f1a8@192.168.1.8\u0026#34;, \u0026#34;layer\u0026#34;:\u0026#34;GENERAL\u0026#34;, \u0026#34;traceContext\u0026#34;:{ \u0026#34;traceId\u0026#34;:\u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470001\u0026#34;, \u0026#34;spanId\u0026#34;:\u0026#34;0\u0026#34;, \u0026#34;traceSegmentId\u0026#34;:\u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470000\u0026#34; }, \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;INFO\u0026#34; }, { \u0026#34;key\u0026#34;:\u0026#34;logger\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;com.example.MyLogger\u0026#34; } ] }, \u0026#34;body\u0026#34;:{ \u0026#34;text\u0026#34;:{ \u0026#34;text\u0026#34;:\u0026#34;log message\u0026#34; } } } HTTP API Report json format logs via HTTP API, the endpoint is http://\u0026lt;oap-address\u0026gt;:12800/v3/logs.\nJson log record example:\n[ { \u0026#34;timestamp\u0026#34;: 1618161813371, \u0026#34;service\u0026#34;: \u0026#34;Your_ApplicationName\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;3a5b8da5a5ba40c0b192e91b5c80f1a8@192.168.1.8\u0026#34;, \u0026#34;layer\u0026#34;:\u0026#34;GENERAL\u0026#34;, \u0026#34;traceContext\u0026#34;: { \u0026#34;traceId\u0026#34;: \u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470001\u0026#34;, \u0026#34;spanId\u0026#34;: \u0026#34;0\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470000\u0026#34; }, \u0026#34;tags\u0026#34;: { \u0026#34;data\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;INFO\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;logger\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;com.example.MyLogger\u0026#34; } ] }, \u0026#34;body\u0026#34;: { \u0026#34;text\u0026#34;: { \u0026#34;text\u0026#34;: \u0026#34;log message\u0026#34; } } } ] ","excerpt":"Log Data Protocol Report log data via protocol.\nNative Proto Protocol Report native-proto format log …","ref":"/docs/main/next/en/api/log-data-protocol/","title":"Log Data Protocol"},{"body":"Log Data Protocol Report log data via protocol.\nNative Proto Protocol Report native-proto format log via gRPC.\ngRPC service define\nNative Kafka Protocol Report native-json format log via kafka.\nJson log record example:\n{ \u0026#34;timestamp\u0026#34;:1618161813371, \u0026#34;service\u0026#34;:\u0026#34;Your_ApplicationName\u0026#34;, \u0026#34;serviceInstance\u0026#34;:\u0026#34;3a5b8da5a5ba40c0b192e91b5c80f1a8@192.168.1.8\u0026#34;, \u0026#34;layer\u0026#34;:\u0026#34;GENERAL\u0026#34;, \u0026#34;traceContext\u0026#34;:{ \u0026#34;traceId\u0026#34;:\u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470001\u0026#34;, \u0026#34;spanId\u0026#34;:\u0026#34;0\u0026#34;, \u0026#34;traceSegmentId\u0026#34;:\u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470000\u0026#34; }, \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;INFO\u0026#34; }, { \u0026#34;key\u0026#34;:\u0026#34;logger\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;com.example.MyLogger\u0026#34; } ] }, \u0026#34;body\u0026#34;:{ \u0026#34;text\u0026#34;:{ \u0026#34;text\u0026#34;:\u0026#34;log message\u0026#34; } } } HTTP API Report json format logs via HTTP API, the endpoint is http://\u0026lt;oap-address\u0026gt;:12800/v3/logs.\nJson log record example:\n[ { \u0026#34;timestamp\u0026#34;: 1618161813371, \u0026#34;service\u0026#34;: \u0026#34;Your_ApplicationName\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;3a5b8da5a5ba40c0b192e91b5c80f1a8@192.168.1.8\u0026#34;, \u0026#34;layer\u0026#34;:\u0026#34;GENERAL\u0026#34;, \u0026#34;traceContext\u0026#34;: { \u0026#34;traceId\u0026#34;: \u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470001\u0026#34;, \u0026#34;spanId\u0026#34;: \u0026#34;0\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470000\u0026#34; }, \u0026#34;tags\u0026#34;: { \u0026#34;data\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;INFO\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;logger\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;com.example.MyLogger\u0026#34; } ] }, \u0026#34;body\u0026#34;: { \u0026#34;text\u0026#34;: { \u0026#34;text\u0026#34;: \u0026#34;log message\u0026#34; } } } ] ","excerpt":"Log Data Protocol Report log data via protocol.\nNative Proto Protocol Report native-proto format log …","ref":"/docs/main/v9.0.0/en/protocols/log-data-protocol/","title":"Log Data Protocol"},{"body":"Log Data Protocol Report log data via protocol.\nNative Proto Protocol Report native-proto format log via gRPC.\ngRPC service define\nNative Kafka Protocol Report native-json format log via kafka.\nJson log record example:\n{ \u0026#34;timestamp\u0026#34;:1618161813371, \u0026#34;service\u0026#34;:\u0026#34;Your_ApplicationName\u0026#34;, \u0026#34;serviceInstance\u0026#34;:\u0026#34;3a5b8da5a5ba40c0b192e91b5c80f1a8@192.168.1.8\u0026#34;, \u0026#34;layer\u0026#34;:\u0026#34;GENERAL\u0026#34;, \u0026#34;traceContext\u0026#34;:{ \u0026#34;traceId\u0026#34;:\u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470001\u0026#34;, \u0026#34;spanId\u0026#34;:\u0026#34;0\u0026#34;, \u0026#34;traceSegmentId\u0026#34;:\u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470000\u0026#34; }, \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;INFO\u0026#34; }, { \u0026#34;key\u0026#34;:\u0026#34;logger\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;com.example.MyLogger\u0026#34; } ] }, \u0026#34;body\u0026#34;:{ \u0026#34;text\u0026#34;:{ \u0026#34;text\u0026#34;:\u0026#34;log message\u0026#34; } } } HTTP API Report json format logs via HTTP API, the endpoint is http://\u0026lt;oap-address\u0026gt;:12800/v3/logs.\nJson log record example:\n[ { \u0026#34;timestamp\u0026#34;: 1618161813371, \u0026#34;service\u0026#34;: \u0026#34;Your_ApplicationName\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;3a5b8da5a5ba40c0b192e91b5c80f1a8@192.168.1.8\u0026#34;, \u0026#34;layer\u0026#34;:\u0026#34;GENERAL\u0026#34;, \u0026#34;traceContext\u0026#34;: { \u0026#34;traceId\u0026#34;: \u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470001\u0026#34;, \u0026#34;spanId\u0026#34;: \u0026#34;0\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470000\u0026#34; }, \u0026#34;tags\u0026#34;: { \u0026#34;data\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;INFO\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;logger\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;com.example.MyLogger\u0026#34; } ] }, \u0026#34;body\u0026#34;: { \u0026#34;text\u0026#34;: { \u0026#34;text\u0026#34;: \u0026#34;log message\u0026#34; } } } ] ","excerpt":"Log Data Protocol Report log data via protocol.\nNative Proto Protocol Report native-proto format log …","ref":"/docs/main/v9.1.0/en/protocols/log-data-protocol/","title":"Log Data Protocol"},{"body":"Log Data Protocol Report log data via protocol.\nNative Proto Protocol Report native-proto format log via gRPC.\ngRPC service define\nNative Kafka Protocol Report native-json format log via kafka.\nJson log record example:\n{ \u0026#34;timestamp\u0026#34;:1618161813371, \u0026#34;service\u0026#34;:\u0026#34;Your_ApplicationName\u0026#34;, \u0026#34;serviceInstance\u0026#34;:\u0026#34;3a5b8da5a5ba40c0b192e91b5c80f1a8@192.168.1.8\u0026#34;, \u0026#34;layer\u0026#34;:\u0026#34;GENERAL\u0026#34;, \u0026#34;traceContext\u0026#34;:{ \u0026#34;traceId\u0026#34;:\u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470001\u0026#34;, \u0026#34;spanId\u0026#34;:\u0026#34;0\u0026#34;, \u0026#34;traceSegmentId\u0026#34;:\u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470000\u0026#34; }, \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;INFO\u0026#34; }, { \u0026#34;key\u0026#34;:\u0026#34;logger\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;com.example.MyLogger\u0026#34; } ] }, \u0026#34;body\u0026#34;:{ \u0026#34;text\u0026#34;:{ \u0026#34;text\u0026#34;:\u0026#34;log message\u0026#34; } } } HTTP API Report json format logs via HTTP API, the endpoint is http://\u0026lt;oap-address\u0026gt;:12800/v3/logs.\nJson log record example:\n[ { \u0026#34;timestamp\u0026#34;: 1618161813371, \u0026#34;service\u0026#34;: \u0026#34;Your_ApplicationName\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;3a5b8da5a5ba40c0b192e91b5c80f1a8@192.168.1.8\u0026#34;, \u0026#34;layer\u0026#34;:\u0026#34;GENERAL\u0026#34;, \u0026#34;traceContext\u0026#34;: { \u0026#34;traceId\u0026#34;: \u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470001\u0026#34;, \u0026#34;spanId\u0026#34;: \u0026#34;0\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470000\u0026#34; }, \u0026#34;tags\u0026#34;: { \u0026#34;data\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;INFO\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;logger\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;com.example.MyLogger\u0026#34; } ] }, \u0026#34;body\u0026#34;: { \u0026#34;text\u0026#34;: { \u0026#34;text\u0026#34;: \u0026#34;log message\u0026#34; } } } ] ","excerpt":"Log Data Protocol Report log data via protocol.\nNative Proto Protocol Report native-proto format log …","ref":"/docs/main/v9.2.0/en/protocols/log-data-protocol/","title":"Log Data Protocol"},{"body":"Log Data Protocol Report log data via protocol.\nNative Proto Protocol Report native-proto format log via gRPC.\ngRPC service define\nNative Kafka Protocol Report native-json format log via kafka.\nJson log record example:\n{ \u0026#34;timestamp\u0026#34;:1618161813371, \u0026#34;service\u0026#34;:\u0026#34;Your_ApplicationName\u0026#34;, \u0026#34;serviceInstance\u0026#34;:\u0026#34;3a5b8da5a5ba40c0b192e91b5c80f1a8@192.168.1.8\u0026#34;, \u0026#34;layer\u0026#34;:\u0026#34;GENERAL\u0026#34;, \u0026#34;traceContext\u0026#34;:{ \u0026#34;traceId\u0026#34;:\u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470001\u0026#34;, \u0026#34;spanId\u0026#34;:\u0026#34;0\u0026#34;, \u0026#34;traceSegmentId\u0026#34;:\u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470000\u0026#34; }, \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;INFO\u0026#34; }, { \u0026#34;key\u0026#34;:\u0026#34;logger\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;com.example.MyLogger\u0026#34; } ] }, \u0026#34;body\u0026#34;:{ \u0026#34;text\u0026#34;:{ \u0026#34;text\u0026#34;:\u0026#34;log message\u0026#34; } } } HTTP API Report json format logs via HTTP API, the endpoint is http://\u0026lt;oap-address\u0026gt;:12800/v3/logs.\nJson log record example:\n[ { \u0026#34;timestamp\u0026#34;: 1618161813371, \u0026#34;service\u0026#34;: \u0026#34;Your_ApplicationName\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;3a5b8da5a5ba40c0b192e91b5c80f1a8@192.168.1.8\u0026#34;, \u0026#34;layer\u0026#34;:\u0026#34;GENERAL\u0026#34;, \u0026#34;traceContext\u0026#34;: { \u0026#34;traceId\u0026#34;: \u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470001\u0026#34;, \u0026#34;spanId\u0026#34;: \u0026#34;0\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470000\u0026#34; }, \u0026#34;tags\u0026#34;: { \u0026#34;data\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;INFO\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;logger\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;com.example.MyLogger\u0026#34; } ] }, \u0026#34;body\u0026#34;: { \u0026#34;text\u0026#34;: { \u0026#34;text\u0026#34;: \u0026#34;log message\u0026#34; } } } ] ","excerpt":"Log Data Protocol Report log data via protocol.\nNative Proto Protocol Report native-proto format log …","ref":"/docs/main/v9.3.0/en/protocols/log-data-protocol/","title":"Log Data Protocol"},{"body":"Log Data Protocol Report log data via protocol.\nNative Proto Protocol Report native-proto format log via gRPC.\ngRPC service define\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.logging.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/logging/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Report collected logs into the OAP backend service LogReportService { // Recommend to report log data in a stream mode.  // The service/instance/endpoint of the log could share the previous value if they are not set.  // Reporting the logs of same service in the batch mode could reduce the network cost.  rpc collect (stream LogData) returns (Commands) { }}// Log data is collected through file scratcher of agent. // Natively, Satellite provides various ways to collect logs. message LogData { // [Optional] The timestamp of the log, in millisecond.  // If not set, OAP server would use the received timestamp as log\u0026#39;s timestamp, or relies on the OAP server analyzer.  int64 timestamp = 1; // [Required] **Service**. Represents a set/group of workloads which provide the same behaviours for incoming requests.  //  // The logic name represents the service. This would show as a separate node in the topology.  // The metrics analyzed from the spans, would be aggregated for this entity as the service level.  //  // If this is not the first element of the streaming, use the previous not-null name as the service name.  string service = 2; // [Optional] **Service Instance**. Each individual workload in the Service group is known as an instance. Like `pods` in Kubernetes, it  // doesn\u0026#39;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process.  //  // The logic name represents the service instance. This would show as a separate node in the instance relationship.  // The metrics analyzed from the spans, would be aggregated for this entity as the service instance level.  string serviceInstance = 3; // [Optional] **Endpoint**. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature.  //  // The logic name represents the endpoint, which logs belong.  string endpoint = 4; // [Required] The content of the log.  LogDataBody body = 5; // [Optional] Logs with trace context  TraceContext traceContext = 6; // [Optional] The available tags. OAP server could provide search/analysis capabilities based on these.  LogTags tags = 7; // [Optional] Since 9.0.0  // The layer of the service and servce instance. If absent, the OAP would set `layer`=`ID: 2, NAME: general`  string layer = 8;}// The content of the log data message LogDataBody { // A type to match analyzer(s) at the OAP server.  // The data could be analyzed at the client side, but could be partial  string type = 1; // Content with extendable format.  oneof content { TextLog text = 2; JSONLog json = 3; YAMLLog yaml = 4; }}// Literal text log, typically requires regex or split mechanism to filter meaningful info. message TextLog { string text = 1;}// JSON formatted log. The json field represents the string that could be formatted as a JSON object. message JSONLog { string json = 1;}// YAML formatted log. The yaml field represents the string that could be formatted as a YAML map. message YAMLLog { string yaml = 1;}// Logs with trace context, represent agent system has injects context(IDs) into log text. message TraceContext { // [Optional] A string id represents the whole trace.  string traceId = 1; // [Optional] A unique id represents this segment. Other segments could use this id to reference as a child segment.  string traceSegmentId = 2; // [Optional] The number id of the span. Should be unique in the whole segment.  // Starting at 0.  int32 spanId = 3;}message LogTags { // String key, String value pair.  repeated KeyStringValuePair data = 1;}Native Kafka Protocol Report native-json format log via kafka.\nJson log record example:\n{ \u0026#34;timestamp\u0026#34;:1618161813371, \u0026#34;service\u0026#34;:\u0026#34;Your_ApplicationName\u0026#34;, \u0026#34;serviceInstance\u0026#34;:\u0026#34;3a5b8da5a5ba40c0b192e91b5c80f1a8@192.168.1.8\u0026#34;, \u0026#34;layer\u0026#34;:\u0026#34;GENERAL\u0026#34;, \u0026#34;traceContext\u0026#34;:{ \u0026#34;traceId\u0026#34;:\u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470001\u0026#34;, \u0026#34;spanId\u0026#34;:\u0026#34;0\u0026#34;, \u0026#34;traceSegmentId\u0026#34;:\u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470000\u0026#34; }, \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;INFO\u0026#34; }, { \u0026#34;key\u0026#34;:\u0026#34;logger\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;com.example.MyLogger\u0026#34; } ] }, \u0026#34;body\u0026#34;:{ \u0026#34;text\u0026#34;:{ \u0026#34;text\u0026#34;:\u0026#34;log message\u0026#34; } } } HTTP API Report json format logs via HTTP API, the endpoint is http://\u0026lt;oap-address\u0026gt;:12800/v3/logs.\nJson log record example:\n[ { \u0026#34;timestamp\u0026#34;: 1618161813371, \u0026#34;service\u0026#34;: \u0026#34;Your_ApplicationName\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;3a5b8da5a5ba40c0b192e91b5c80f1a8@192.168.1.8\u0026#34;, \u0026#34;layer\u0026#34;:\u0026#34;GENERAL\u0026#34;, \u0026#34;traceContext\u0026#34;: { \u0026#34;traceId\u0026#34;: \u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470001\u0026#34;, \u0026#34;spanId\u0026#34;: \u0026#34;0\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470000\u0026#34; }, \u0026#34;tags\u0026#34;: { \u0026#34;data\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;INFO\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;logger\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;com.example.MyLogger\u0026#34; } ] }, \u0026#34;body\u0026#34;: { \u0026#34;text\u0026#34;: { \u0026#34;text\u0026#34;: \u0026#34;log message\u0026#34; } } } ] ","excerpt":"Log Data Protocol Report log data via protocol.\nNative Proto Protocol Report native-proto format log …","ref":"/docs/main/v9.4.0/en/api/log-data-protocol/","title":"Log Data Protocol"},{"body":"Log Data Protocol Report log data via protocol.\nNative Proto Protocol Report native-proto format log via gRPC.\ngRPC service define\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.logging.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/logging/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Report collected logs into the OAP backend service LogReportService { // Recommend to report log data in a stream mode.  // The service/instance/endpoint of the log could share the previous value if they are not set.  // Reporting the logs of same service in the batch mode could reduce the network cost.  rpc collect (stream LogData) returns (Commands) { }}// Log data is collected through file scratcher of agent. // Natively, Satellite provides various ways to collect logs. message LogData { // [Optional] The timestamp of the log, in millisecond.  // If not set, OAP server would use the received timestamp as log\u0026#39;s timestamp, or relies on the OAP server analyzer.  int64 timestamp = 1; // [Required] **Service**. Represents a set/group of workloads which provide the same behaviours for incoming requests.  //  // The logic name represents the service. This would show as a separate node in the topology.  // The metrics analyzed from the spans, would be aggregated for this entity as the service level.  //  // If this is not the first element of the streaming, use the previous not-null name as the service name.  string service = 2; // [Optional] **Service Instance**. Each individual workload in the Service group is known as an instance. Like `pods` in Kubernetes, it  // doesn\u0026#39;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process.  //  // The logic name represents the service instance. This would show as a separate node in the instance relationship.  // The metrics analyzed from the spans, would be aggregated for this entity as the service instance level.  string serviceInstance = 3; // [Optional] **Endpoint**. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature.  //  // The logic name represents the endpoint, which logs belong.  string endpoint = 4; // [Required] The content of the log.  LogDataBody body = 5; // [Optional] Logs with trace context  TraceContext traceContext = 6; // [Optional] The available tags. OAP server could provide search/analysis capabilities based on these.  LogTags tags = 7; // [Optional] Since 9.0.0  // The layer of the service and servce instance. If absent, the OAP would set `layer`=`ID: 2, NAME: general`  string layer = 8;}// The content of the log data message LogDataBody { // A type to match analyzer(s) at the OAP server.  // The data could be analyzed at the client side, but could be partial  string type = 1; // Content with extendable format.  oneof content { TextLog text = 2; JSONLog json = 3; YAMLLog yaml = 4; }}// Literal text log, typically requires regex or split mechanism to filter meaningful info. message TextLog { string text = 1;}// JSON formatted log. The json field represents the string that could be formatted as a JSON object. message JSONLog { string json = 1;}// YAML formatted log. The yaml field represents the string that could be formatted as a YAML map. message YAMLLog { string yaml = 1;}// Logs with trace context, represent agent system has injects context(IDs) into log text. message TraceContext { // [Optional] A string id represents the whole trace.  string traceId = 1; // [Optional] A unique id represents this segment. Other segments could use this id to reference as a child segment.  string traceSegmentId = 2; // [Optional] The number id of the span. Should be unique in the whole segment.  // Starting at 0.  int32 spanId = 3;}message LogTags { // String key, String value pair.  repeated KeyStringValuePair data = 1;}Native Kafka Protocol Report native-json format log via kafka.\nJson log record example:\n{ \u0026#34;timestamp\u0026#34;:1618161813371, \u0026#34;service\u0026#34;:\u0026#34;Your_ApplicationName\u0026#34;, \u0026#34;serviceInstance\u0026#34;:\u0026#34;3a5b8da5a5ba40c0b192e91b5c80f1a8@192.168.1.8\u0026#34;, \u0026#34;layer\u0026#34;:\u0026#34;GENERAL\u0026#34;, \u0026#34;traceContext\u0026#34;:{ \u0026#34;traceId\u0026#34;:\u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470001\u0026#34;, \u0026#34;spanId\u0026#34;:\u0026#34;0\u0026#34;, \u0026#34;traceSegmentId\u0026#34;:\u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470000\u0026#34; }, \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;INFO\u0026#34; }, { \u0026#34;key\u0026#34;:\u0026#34;logger\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;com.example.MyLogger\u0026#34; } ] }, \u0026#34;body\u0026#34;:{ \u0026#34;text\u0026#34;:{ \u0026#34;text\u0026#34;:\u0026#34;log message\u0026#34; } } } HTTP API Report json format logs via HTTP API, the endpoint is http://\u0026lt;oap-address\u0026gt;:12800/v3/logs.\nJson log record example:\n[ { \u0026#34;timestamp\u0026#34;: 1618161813371, \u0026#34;service\u0026#34;: \u0026#34;Your_ApplicationName\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;3a5b8da5a5ba40c0b192e91b5c80f1a8@192.168.1.8\u0026#34;, \u0026#34;layer\u0026#34;:\u0026#34;GENERAL\u0026#34;, \u0026#34;traceContext\u0026#34;: { \u0026#34;traceId\u0026#34;: \u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470001\u0026#34;, \u0026#34;spanId\u0026#34;: \u0026#34;0\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470000\u0026#34; }, \u0026#34;tags\u0026#34;: { \u0026#34;data\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;INFO\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;logger\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;com.example.MyLogger\u0026#34; } ] }, \u0026#34;body\u0026#34;: { \u0026#34;text\u0026#34;: { \u0026#34;text\u0026#34;: \u0026#34;log message\u0026#34; } } } ] ","excerpt":"Log Data Protocol Report log data via protocol.\nNative Proto Protocol Report native-proto format log …","ref":"/docs/main/v9.5.0/en/api/log-data-protocol/","title":"Log Data Protocol"},{"body":"Log Data Protocol Report log data via protocol.\nNative Proto Protocol Report native-proto format log via gRPC.\ngRPC service define\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.logging.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/logging/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Report collected logs into the OAP backend service LogReportService { // Recommend to report log data in a stream mode.  // The service/instance/endpoint of the log could share the previous value if they are not set.  // Reporting the logs of same service in the batch mode could reduce the network cost.  rpc collect (stream LogData) returns (Commands) { }}// Log data is collected through file scratcher of agent. // Natively, Satellite provides various ways to collect logs. message LogData { // [Optional] The timestamp of the log, in millisecond.  // If not set, OAP server would use the received timestamp as log\u0026#39;s timestamp, or relies on the OAP server analyzer.  int64 timestamp = 1; // [Required] **Service**. Represents a set/group of workloads which provide the same behaviours for incoming requests.  //  // The logic name represents the service. This would show as a separate node in the topology.  // The metrics analyzed from the spans, would be aggregated for this entity as the service level.  //  // If this is not the first element of the streaming, use the previous not-null name as the service name.  string service = 2; // [Optional] **Service Instance**. Each individual workload in the Service group is known as an instance. Like `pods` in Kubernetes, it  // doesn\u0026#39;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process.  //  // The logic name represents the service instance. This would show as a separate node in the instance relationship.  // The metrics analyzed from the spans, would be aggregated for this entity as the service instance level.  string serviceInstance = 3; // [Optional] **Endpoint**. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature.  //  // The logic name represents the endpoint, which logs belong.  string endpoint = 4; // [Required] The content of the log.  LogDataBody body = 5; // [Optional] Logs with trace context  TraceContext traceContext = 6; // [Optional] The available tags. OAP server could provide search/analysis capabilities based on these.  LogTags tags = 7; // [Optional] Since 9.0.0  // The layer of the service and servce instance. If absent, the OAP would set `layer`=`ID: 2, NAME: general`  string layer = 8;}// The content of the log data message LogDataBody { // A type to match analyzer(s) at the OAP server.  // The data could be analyzed at the client side, but could be partial  string type = 1; // Content with extendable format.  oneof content { TextLog text = 2; JSONLog json = 3; YAMLLog yaml = 4; }}// Literal text log, typically requires regex or split mechanism to filter meaningful info. message TextLog { string text = 1;}// JSON formatted log. The json field represents the string that could be formatted as a JSON object. message JSONLog { string json = 1;}// YAML formatted log. The yaml field represents the string that could be formatted as a YAML map. message YAMLLog { string yaml = 1;}// Logs with trace context, represent agent system has injects context(IDs) into log text. message TraceContext { // [Optional] A string id represents the whole trace.  string traceId = 1; // [Optional] A unique id represents this segment. Other segments could use this id to reference as a child segment.  string traceSegmentId = 2; // [Optional] The number id of the span. Should be unique in the whole segment.  // Starting at 0.  int32 spanId = 3;}message LogTags { // String key, String value pair.  repeated KeyStringValuePair data = 1;}Native Kafka Protocol Report native-json format log via kafka.\nJson log record example:\n{ \u0026#34;timestamp\u0026#34;:1618161813371, \u0026#34;service\u0026#34;:\u0026#34;Your_ApplicationName\u0026#34;, \u0026#34;serviceInstance\u0026#34;:\u0026#34;3a5b8da5a5ba40c0b192e91b5c80f1a8@192.168.1.8\u0026#34;, \u0026#34;layer\u0026#34;:\u0026#34;GENERAL\u0026#34;, \u0026#34;traceContext\u0026#34;:{ \u0026#34;traceId\u0026#34;:\u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470001\u0026#34;, \u0026#34;spanId\u0026#34;:\u0026#34;0\u0026#34;, \u0026#34;traceSegmentId\u0026#34;:\u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470000\u0026#34; }, \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;INFO\u0026#34; }, { \u0026#34;key\u0026#34;:\u0026#34;logger\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;com.example.MyLogger\u0026#34; } ] }, \u0026#34;body\u0026#34;:{ \u0026#34;text\u0026#34;:{ \u0026#34;text\u0026#34;:\u0026#34;log message\u0026#34; } } } HTTP API Report json format logs via HTTP API, the endpoint is http://\u0026lt;oap-address\u0026gt;:12800/v3/logs.\nJson log record example:\n[ { \u0026#34;timestamp\u0026#34;: 1618161813371, \u0026#34;service\u0026#34;: \u0026#34;Your_ApplicationName\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;3a5b8da5a5ba40c0b192e91b5c80f1a8@192.168.1.8\u0026#34;, \u0026#34;layer\u0026#34;:\u0026#34;GENERAL\u0026#34;, \u0026#34;traceContext\u0026#34;: { \u0026#34;traceId\u0026#34;: \u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470001\u0026#34;, \u0026#34;spanId\u0026#34;: \u0026#34;0\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470000\u0026#34; }, \u0026#34;tags\u0026#34;: { \u0026#34;data\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;INFO\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;logger\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;com.example.MyLogger\u0026#34; } ] }, \u0026#34;body\u0026#34;: { \u0026#34;text\u0026#34;: { \u0026#34;text\u0026#34;: \u0026#34;log message\u0026#34; } } } ] ","excerpt":"Log Data Protocol Report log data via protocol.\nNative Proto Protocol Report native-proto format log …","ref":"/docs/main/v9.6.0/en/api/log-data-protocol/","title":"Log Data Protocol"},{"body":"Log Data Protocol Report log data via protocol.\nNative Proto Protocol Report native-proto format log via gRPC.\ngRPC service define\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.logging.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/logging/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Report collected logs into the OAP backend service LogReportService { // Recommend to report log data in a stream mode.  // The service/instance/endpoint of the log could share the previous value if they are not set.  // Reporting the logs of same service in the batch mode could reduce the network cost.  rpc collect (stream LogData) returns (Commands) { }}// Log data is collected through file scratcher of agent. // Natively, Satellite provides various ways to collect logs. message LogData { // [Optional] The timestamp of the log, in millisecond.  // If not set, OAP server would use the received timestamp as log\u0026#39;s timestamp, or relies on the OAP server analyzer.  int64 timestamp = 1; // [Required] **Service**. Represents a set/group of workloads which provide the same behaviours for incoming requests.  //  // The logic name represents the service. This would show as a separate node in the topology.  // The metrics analyzed from the spans, would be aggregated for this entity as the service level.  //  // If this is not the first element of the streaming, use the previous not-null name as the service name.  string service = 2; // [Optional] **Service Instance**. Each individual workload in the Service group is known as an instance. Like `pods` in Kubernetes, it  // doesn\u0026#39;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process.  //  // The logic name represents the service instance. This would show as a separate node in the instance relationship.  // The metrics analyzed from the spans, would be aggregated for this entity as the service instance level.  string serviceInstance = 3; // [Optional] **Endpoint**. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature.  //  // The logic name represents the endpoint, which logs belong.  string endpoint = 4; // [Required] The content of the log.  LogDataBody body = 5; // [Optional] Logs with trace context  TraceContext traceContext = 6; // [Optional] The available tags. OAP server could provide search/analysis capabilities based on these.  LogTags tags = 7; // [Optional] Since 9.0.0  // The layer of the service and servce instance. If absent, the OAP would set `layer`=`ID: 2, NAME: general`  string layer = 8;}// The content of the log data message LogDataBody { // A type to match analyzer(s) at the OAP server.  // The data could be analyzed at the client side, but could be partial  string type = 1; // Content with extendable format.  oneof content { TextLog text = 2; JSONLog json = 3; YAMLLog yaml = 4; }}// Literal text log, typically requires regex or split mechanism to filter meaningful info. message TextLog { string text = 1;}// JSON formatted log. The json field represents the string that could be formatted as a JSON object. message JSONLog { string json = 1;}// YAML formatted log. The yaml field represents the string that could be formatted as a YAML map. message YAMLLog { string yaml = 1;}// Logs with trace context, represent agent system has injects context(IDs) into log text. message TraceContext { // [Optional] A string id represents the whole trace.  string traceId = 1; // [Optional] A unique id represents this segment. Other segments could use this id to reference as a child segment.  string traceSegmentId = 2; // [Optional] The number id of the span. Should be unique in the whole segment.  // Starting at 0.  int32 spanId = 3;}message LogTags { // String key, String value pair.  repeated KeyStringValuePair data = 1;}Native Kafka Protocol Report native-json format log via kafka.\nJson log record example:\n{ \u0026#34;timestamp\u0026#34;:1618161813371, \u0026#34;service\u0026#34;:\u0026#34;Your_ApplicationName\u0026#34;, \u0026#34;serviceInstance\u0026#34;:\u0026#34;3a5b8da5a5ba40c0b192e91b5c80f1a8@192.168.1.8\u0026#34;, \u0026#34;layer\u0026#34;:\u0026#34;GENERAL\u0026#34;, \u0026#34;traceContext\u0026#34;:{ \u0026#34;traceId\u0026#34;:\u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470001\u0026#34;, \u0026#34;spanId\u0026#34;:\u0026#34;0\u0026#34;, \u0026#34;traceSegmentId\u0026#34;:\u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470000\u0026#34; }, \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;INFO\u0026#34; }, { \u0026#34;key\u0026#34;:\u0026#34;logger\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;com.example.MyLogger\u0026#34; } ] }, \u0026#34;body\u0026#34;:{ \u0026#34;text\u0026#34;:{ \u0026#34;text\u0026#34;:\u0026#34;log message\u0026#34; } } } HTTP API Report json format logs via HTTP API, the endpoint is http://\u0026lt;oap-address\u0026gt;:12800/v3/logs.\nJson log record example:\n[ { \u0026#34;timestamp\u0026#34;: 1618161813371, \u0026#34;service\u0026#34;: \u0026#34;Your_ApplicationName\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;3a5b8da5a5ba40c0b192e91b5c80f1a8@192.168.1.8\u0026#34;, \u0026#34;layer\u0026#34;:\u0026#34;GENERAL\u0026#34;, \u0026#34;traceContext\u0026#34;: { \u0026#34;traceId\u0026#34;: \u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470001\u0026#34;, \u0026#34;spanId\u0026#34;: \u0026#34;0\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470000\u0026#34; }, \u0026#34;tags\u0026#34;: { \u0026#34;data\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;INFO\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;logger\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;com.example.MyLogger\u0026#34; } ] }, \u0026#34;body\u0026#34;: { \u0026#34;text\u0026#34;: { \u0026#34;text\u0026#34;: \u0026#34;log message\u0026#34; } } } ] ","excerpt":"Log Data Protocol Report log data via protocol.\nNative Proto Protocol Report native-proto format log …","ref":"/docs/main/v9.7.0/en/api/log-data-protocol/","title":"Log Data Protocol"},{"body":"logback plugin  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-logback-1.x\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Print trace ID in your logs  set %tid in Pattern section of logback.xml  \u0026lt;appender name=\u0026#34;STDOUT\u0026#34; class=\u0026#34;ch.qos.logback.core.ConsoleAppender\u0026#34;\u0026gt; \u0026lt;encoder class=\u0026#34;ch.qos.logback.core.encoder.LayoutWrappingEncoder\u0026#34;\u0026gt; \u0026lt;layout class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.TraceIdPatternLogbackLayout\u0026#34;\u0026gt; \u0026lt;Pattern\u0026gt;%d{yyyy-MM-dd HH:mm:ss.SSS} [%tid] [%thread] %-5level %logger{36} -%msg%n\u0026lt;/Pattern\u0026gt; \u0026lt;/layout\u0026gt; \u0026lt;/encoder\u0026gt; \u0026lt;/appender\u0026gt;  with the MDC, set %X{tid} in Pattern section of logback.xml  \u0026lt;appender name=\u0026#34;STDOUT\u0026#34; class=\u0026#34;ch.qos.logback.core.ConsoleAppender\u0026#34;\u0026gt; \u0026lt;encoder class=\u0026#34;ch.qos.logback.core.encoder.LayoutWrappingEncoder\u0026#34;\u0026gt; \u0026lt;layout class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.mdc.TraceIdMDCPatternLogbackLayout\u0026#34;\u0026gt; \u0026lt;Pattern\u0026gt;%d{yyyy-MM-dd HH:mm:ss.SSS} [%X{tid}] [%thread] %-5level %logger{36} -%msg%n\u0026lt;/Pattern\u0026gt; \u0026lt;/layout\u0026gt; \u0026lt;/encoder\u0026gt; \u0026lt;/appender\u0026gt;  Support logback AsyncAppender(MDC also support), No additional configuration is required. Refer to the demo of logback.xml below. For details: Logback AsyncAppender  \u0026lt;configuration scan=\u0026#34;true\u0026#34; scanPeriod=\u0026#34; 5 seconds\u0026#34;\u0026gt; \u0026lt;appender name=\u0026#34;STDOUT\u0026#34; class=\u0026#34;ch.qos.logback.core.ConsoleAppender\u0026#34;\u0026gt; \u0026lt;encoder class=\u0026#34;ch.qos.logback.core.encoder.LayoutWrappingEncoder\u0026#34;\u0026gt; \u0026lt;layout class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.mdc.TraceIdMDCPatternLogbackLayout\u0026#34;\u0026gt; \u0026lt;Pattern\u0026gt;%d{yyyy-MM-dd HH:mm:ss.SSS} [%X{tid}] [%thread] %-5level %logger{36} -%msg%n\u0026lt;/Pattern\u0026gt; \u0026lt;/layout\u0026gt; \u0026lt;/encoder\u0026gt; \u0026lt;/appender\u0026gt; \u0026lt;appender name=\u0026#34;ASYNC\u0026#34; class=\u0026#34;ch.qos.logback.classic.AsyncAppender\u0026#34;\u0026gt; \u0026lt;discardingThreshold\u0026gt;0\u0026lt;/discardingThreshold\u0026gt; \u0026lt;queueSize\u0026gt;1024\u0026lt;/queueSize\u0026gt; \u0026lt;neverBlock\u0026gt;true\u0026lt;/neverBlock\u0026gt; \u0026lt;appender-ref ref=\u0026#34;STDOUT\u0026#34;/\u0026gt; \u0026lt;/appender\u0026gt; \u0026lt;root level=\u0026#34;INFO\u0026#34;\u0026gt; \u0026lt;appender-ref ref=\u0026#34;ASYNC\u0026#34;/\u0026gt; \u0026lt;/root\u0026gt; \u0026lt;/configuration\u0026gt;  When you use -javaagent to active the SkyWalking tracer, logback will output traceId, if it existed. If the tracer is inactive, the output will be TID: N/A.  Print SkyWalking context in your logs   Your only need to replace pattern %tid or %X{tid]} with %sw_ctx or %X{sw_ctx}.\n  When you use -javaagent to active the SkyWalking tracer, logback will output SW_CTX: [$serviceName,$instanceName,$traceId,$traceSegmentId,$spanId], if it existed. If the tracer is inactive, the output will be SW_CTX: N/A.\n  logstash logback plugin  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-logback-1.x\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  set LogstashEncoder of logback.xml  \u0026lt;encoder charset=\u0026#34;UTF-8\u0026#34; class=\u0026#34;net.logstash.logback.encoder.LogstashEncoder\u0026#34;\u0026gt; \u0026lt;!-- add TID(traceId) field --\u0026gt; \u0026lt;provider class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.logstash.TraceIdJsonProvider\u0026#34;\u0026gt; \u0026lt;/provider\u0026gt; \u0026lt;!-- add SW_CTX(SkyWalking context) field --\u0026gt; \u0026lt;provider class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.logstash.SkyWalkingContextJsonProvider\u0026#34;\u0026gt; \u0026lt;/provider\u0026gt; \u0026lt;/encoder\u0026gt;  set LoggingEventCompositeJsonEncoder of logstash in logback-spring.xml for custom json format  1.add converter for %tid or %sw_ctx as child of  node\n\u0026lt;!-- add converter for %tid --\u0026gt; \u0026lt;conversionRule conversionWord=\u0026#34;tid\u0026#34; converterClass=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.LogbackPatternConverter\u0026#34;/\u0026gt; \u0026lt;!-- add converter for %sw_ctx --\u0026gt; \u0026lt;conversionRule conversionWord=\u0026#34;sw_ctx\u0026#34; converterClass=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.LogbackSkyWalkingContextPatternConverter\u0026#34;/\u0026gt; 2.add json encoder for custom json format\n\u0026lt;encoder class=\u0026#34;net.logstash.logback.encoder.LoggingEventCompositeJsonEncoder\u0026#34;\u0026gt; \u0026lt;providers\u0026gt; \u0026lt;timestamp\u0026gt; \u0026lt;timeZone\u0026gt;UTC\u0026lt;/timeZone\u0026gt; \u0026lt;/timestamp\u0026gt; \u0026lt;pattern\u0026gt; \u0026lt;pattern\u0026gt; { \u0026#34;level\u0026#34;: \u0026#34;%level\u0026#34;, \u0026#34;tid\u0026#34;: \u0026#34;%tid\u0026#34;, \u0026#34;skyWalkingContext\u0026#34;: \u0026#34;%sw_ctx\u0026#34;, \u0026#34;thread\u0026#34;: \u0026#34;%thread\u0026#34;, \u0026#34;class\u0026#34;: \u0026#34;%logger{1.}:%L\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;%message\u0026#34;, \u0026#34;stackTrace\u0026#34;: \u0026#34;%exception{10}\u0026#34; } \u0026lt;/pattern\u0026gt; \u0026lt;/pattern\u0026gt; \u0026lt;/providers\u0026gt; \u0026lt;/encoder\u0026gt; gRPC reporter The gRPC reporter could forward the collected logs to SkyWalking OAP server, or SkyWalking Satellite sidecar. Trace id, segment id, and span id will attach to logs automatically. There is no need to modify existing layouts.\n Add GRPCLogClientAppender in logback.xml  \u0026lt;appender name=\u0026#34;grpc-log\u0026#34; class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.log.GRPCLogClientAppender\u0026#34;\u0026gt; \u0026lt;encoder class=\u0026#34;ch.qos.logback.core.encoder.LayoutWrappingEncoder\u0026#34;\u0026gt; \u0026lt;layout class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.mdc.TraceIdMDCPatternLogbackLayout\u0026#34;\u0026gt; \u0026lt;Pattern\u0026gt;%d{yyyy-MM-dd HH:mm:ss.SSS} [%X{tid}] [%thread] %-5level %logger{36} -%msg%n\u0026lt;/Pattern\u0026gt; \u0026lt;/layout\u0026gt; \u0026lt;/encoder\u0026gt; \u0026lt;/appender\u0026gt;  Add config of the plugin or use default  log.max_message_size=${SW_GRPC_LOG_MAX_MESSAGE_SIZE:10485760} Transmitting un-formatted messages The logback 1.x gRPC reporter supports transmitting logs as formatted or un-formatted. Transmitting formatted data is the default but can be disabled by adding the following to the agent config:\nplugin.toolkit.log.transmit_formatted=false The above will result in the content field being used for the log pattern with additional log tags of argument.0, argument.1, and so on representing each logged argument as well as an additional exception tag which is only present if a throwable is also logged.\nFor example, the following code:\nlog.info(\u0026#34;{} {} {}\u0026#34;, 1, 2, 3); Will result in:\n{ \u0026#34;content\u0026#34;: \u0026#34;{} {} {}\u0026#34;, \u0026#34;tags\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;argument.0\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.1\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.2\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;3\u0026#34; } ] } ","excerpt":"logback plugin  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; …","ref":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/application-toolkit-logback-1.x/","title":"logback plugin"},{"body":"logback plugin  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-logback-1.x\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Print trace ID in your logs  set %tid in Pattern section of logback.xml  \u0026lt;appender name=\u0026#34;STDOUT\u0026#34; class=\u0026#34;ch.qos.logback.core.ConsoleAppender\u0026#34;\u0026gt; \u0026lt;encoder class=\u0026#34;ch.qos.logback.core.encoder.LayoutWrappingEncoder\u0026#34;\u0026gt; \u0026lt;layout class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.TraceIdPatternLogbackLayout\u0026#34;\u0026gt; \u0026lt;Pattern\u0026gt;%d{yyyy-MM-dd HH:mm:ss.SSS} [%tid] [%thread] %-5level %logger{36} -%msg%n\u0026lt;/Pattern\u0026gt; \u0026lt;/layout\u0026gt; \u0026lt;/encoder\u0026gt; \u0026lt;/appender\u0026gt;  with the MDC, set %X{tid} in Pattern section of logback.xml  \u0026lt;appender name=\u0026#34;STDOUT\u0026#34; class=\u0026#34;ch.qos.logback.core.ConsoleAppender\u0026#34;\u0026gt; \u0026lt;encoder class=\u0026#34;ch.qos.logback.core.encoder.LayoutWrappingEncoder\u0026#34;\u0026gt; \u0026lt;layout class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.mdc.TraceIdMDCPatternLogbackLayout\u0026#34;\u0026gt; \u0026lt;Pattern\u0026gt;%d{yyyy-MM-dd HH:mm:ss.SSS} [%X{tid}] [%thread] %-5level %logger{36} -%msg%n\u0026lt;/Pattern\u0026gt; \u0026lt;/layout\u0026gt; \u0026lt;/encoder\u0026gt; \u0026lt;/appender\u0026gt;  Support logback AsyncAppender(MDC also support), No additional configuration is required. Refer to the demo of logback.xml below. For details: Logback AsyncAppender  \u0026lt;configuration scan=\u0026#34;true\u0026#34; scanPeriod=\u0026#34; 5 seconds\u0026#34;\u0026gt; \u0026lt;appender name=\u0026#34;STDOUT\u0026#34; class=\u0026#34;ch.qos.logback.core.ConsoleAppender\u0026#34;\u0026gt; \u0026lt;encoder class=\u0026#34;ch.qos.logback.core.encoder.LayoutWrappingEncoder\u0026#34;\u0026gt; \u0026lt;layout class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.mdc.TraceIdMDCPatternLogbackLayout\u0026#34;\u0026gt; \u0026lt;Pattern\u0026gt;%d{yyyy-MM-dd HH:mm:ss.SSS} [%X{tid}] [%thread] %-5level %logger{36} -%msg%n\u0026lt;/Pattern\u0026gt; \u0026lt;/layout\u0026gt; \u0026lt;/encoder\u0026gt; \u0026lt;/appender\u0026gt; \u0026lt;appender name=\u0026#34;ASYNC\u0026#34; class=\u0026#34;ch.qos.logback.classic.AsyncAppender\u0026#34;\u0026gt; \u0026lt;discardingThreshold\u0026gt;0\u0026lt;/discardingThreshold\u0026gt; \u0026lt;queueSize\u0026gt;1024\u0026lt;/queueSize\u0026gt; \u0026lt;neverBlock\u0026gt;true\u0026lt;/neverBlock\u0026gt; \u0026lt;appender-ref ref=\u0026#34;STDOUT\u0026#34;/\u0026gt; \u0026lt;/appender\u0026gt; \u0026lt;root level=\u0026#34;INFO\u0026#34;\u0026gt; \u0026lt;appender-ref ref=\u0026#34;ASYNC\u0026#34;/\u0026gt; \u0026lt;/root\u0026gt; \u0026lt;/configuration\u0026gt;  When you use -javaagent to active the SkyWalking tracer, logback will output traceId, if it existed. If the tracer is inactive, the output will be TID: N/A.  Print SkyWalking context in your logs   Your only need to replace pattern %tid or %X{tid]} with %sw_ctx or %X{sw_ctx}.\n  When you use -javaagent to active the SkyWalking tracer, logback will output SW_CTX: [$serviceName,$instanceName,$traceId,$traceSegmentId,$spanId], if it existed. If the tracer is inactive, the output will be SW_CTX: N/A.\n  logstash logback plugin  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-logback-1.x\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  set LogstashEncoder of logback.xml  \u0026lt;encoder charset=\u0026#34;UTF-8\u0026#34; class=\u0026#34;net.logstash.logback.encoder.LogstashEncoder\u0026#34;\u0026gt; \u0026lt;!-- add TID(traceId) field --\u0026gt; \u0026lt;provider class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.logstash.TraceIdJsonProvider\u0026#34;\u0026gt; \u0026lt;/provider\u0026gt; \u0026lt;!-- add SW_CTX(SkyWalking context) field --\u0026gt; \u0026lt;provider class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.logstash.SkyWalkingContextJsonProvider\u0026#34;\u0026gt; \u0026lt;/provider\u0026gt; \u0026lt;/encoder\u0026gt;  set LoggingEventCompositeJsonEncoder of logstash in logback-spring.xml for custom json format  1.add converter for %tid or %sw_ctx as child of  node\n\u0026lt;!-- add converter for %tid --\u0026gt; \u0026lt;conversionRule conversionWord=\u0026#34;tid\u0026#34; converterClass=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.LogbackPatternConverter\u0026#34;/\u0026gt; \u0026lt;!-- add converter for %sw_ctx --\u0026gt; \u0026lt;conversionRule conversionWord=\u0026#34;sw_ctx\u0026#34; converterClass=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.LogbackSkyWalkingContextPatternConverter\u0026#34;/\u0026gt; 2.add json encoder for custom json format\n\u0026lt;encoder class=\u0026#34;net.logstash.logback.encoder.LoggingEventCompositeJsonEncoder\u0026#34;\u0026gt; \u0026lt;providers\u0026gt; \u0026lt;timestamp\u0026gt; \u0026lt;timeZone\u0026gt;UTC\u0026lt;/timeZone\u0026gt; \u0026lt;/timestamp\u0026gt; \u0026lt;pattern\u0026gt; \u0026lt;pattern\u0026gt; { \u0026#34;level\u0026#34;: \u0026#34;%level\u0026#34;, \u0026#34;tid\u0026#34;: \u0026#34;%tid\u0026#34;, \u0026#34;skyWalkingContext\u0026#34;: \u0026#34;%sw_ctx\u0026#34;, \u0026#34;thread\u0026#34;: \u0026#34;%thread\u0026#34;, \u0026#34;class\u0026#34;: \u0026#34;%logger{1.}:%L\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;%message\u0026#34;, \u0026#34;stackTrace\u0026#34;: \u0026#34;%exception{10}\u0026#34; } \u0026lt;/pattern\u0026gt; \u0026lt;/pattern\u0026gt; \u0026lt;/providers\u0026gt; \u0026lt;/encoder\u0026gt; gRPC reporter The gRPC reporter could forward the collected logs to SkyWalking OAP server, or SkyWalking Satellite sidecar. Trace id, segment id, and span id will attach to logs automatically. There is no need to modify existing layouts.\n Add GRPCLogClientAppender in logback.xml  \u0026lt;appender name=\u0026#34;grpc-log\u0026#34; class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.log.GRPCLogClientAppender\u0026#34;\u0026gt; \u0026lt;encoder class=\u0026#34;ch.qos.logback.core.encoder.LayoutWrappingEncoder\u0026#34;\u0026gt; \u0026lt;layout class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.mdc.TraceIdMDCPatternLogbackLayout\u0026#34;\u0026gt; \u0026lt;Pattern\u0026gt;%d{yyyy-MM-dd HH:mm:ss.SSS} [%X{tid}] [%thread] %-5level %logger{36} -%msg%n\u0026lt;/Pattern\u0026gt; \u0026lt;/layout\u0026gt; \u0026lt;/encoder\u0026gt; \u0026lt;/appender\u0026gt;  Add config of the plugin or use default  log.max_message_size=${SW_GRPC_LOG_MAX_MESSAGE_SIZE:10485760} Transmitting un-formatted messages The logback 1.x gRPC reporter supports transmitting logs as formatted or un-formatted. Transmitting formatted data is the default but can be disabled by adding the following to the agent config:\nplugin.toolkit.log.transmit_formatted=false The above will result in the content field being used for the log pattern with additional log tags of argument.0, argument.1, and so on representing each logged argument as well as an additional exception tag which is only present if a throwable is also logged.\nFor example, the following code:\nlog.info(\u0026#34;{} {} {}\u0026#34;, 1, 2, 3); Will result in:\n{ \u0026#34;content\u0026#34;: \u0026#34;{} {} {}\u0026#34;, \u0026#34;tags\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;argument.0\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.1\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.2\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;3\u0026#34; } ] } ","excerpt":"logback plugin  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; …","ref":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-logback-1.x/","title":"logback plugin"},{"body":"logback plugin  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-logback-1.x\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Print trace ID in your logs  set %tid in Pattern section of logback.xml  \u0026lt;appender name=\u0026#34;STDOUT\u0026#34; class=\u0026#34;ch.qos.logback.core.ConsoleAppender\u0026#34;\u0026gt; \u0026lt;encoder class=\u0026#34;ch.qos.logback.core.encoder.LayoutWrappingEncoder\u0026#34;\u0026gt; \u0026lt;layout class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.TraceIdPatternLogbackLayout\u0026#34;\u0026gt; \u0026lt;Pattern\u0026gt;%d{yyyy-MM-dd HH:mm:ss.SSS} [%tid] [%thread] %-5level %logger{36} -%msg%n\u0026lt;/Pattern\u0026gt; \u0026lt;/layout\u0026gt; \u0026lt;/encoder\u0026gt; \u0026lt;/appender\u0026gt;  with the MDC, set %X{tid} in Pattern section of logback.xml  \u0026lt;appender name=\u0026#34;STDOUT\u0026#34; class=\u0026#34;ch.qos.logback.core.ConsoleAppender\u0026#34;\u0026gt; \u0026lt;encoder class=\u0026#34;ch.qos.logback.core.encoder.LayoutWrappingEncoder\u0026#34;\u0026gt; \u0026lt;layout class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.mdc.TraceIdMDCPatternLogbackLayout\u0026#34;\u0026gt; \u0026lt;Pattern\u0026gt;%d{yyyy-MM-dd HH:mm:ss.SSS} [%X{tid}] [%thread] %-5level %logger{36} -%msg%n\u0026lt;/Pattern\u0026gt; \u0026lt;/layout\u0026gt; \u0026lt;/encoder\u0026gt; \u0026lt;/appender\u0026gt;  Support logback AsyncAppender(MDC also support), No additional configuration is required. Refer to the demo of logback.xml below. For details: Logback AsyncAppender  \u0026lt;configuration scan=\u0026#34;true\u0026#34; scanPeriod=\u0026#34; 5 seconds\u0026#34;\u0026gt; \u0026lt;appender name=\u0026#34;STDOUT\u0026#34; class=\u0026#34;ch.qos.logback.core.ConsoleAppender\u0026#34;\u0026gt; \u0026lt;encoder class=\u0026#34;ch.qos.logback.core.encoder.LayoutWrappingEncoder\u0026#34;\u0026gt; \u0026lt;layout class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.mdc.TraceIdMDCPatternLogbackLayout\u0026#34;\u0026gt; \u0026lt;Pattern\u0026gt;%d{yyyy-MM-dd HH:mm:ss.SSS} [%X{tid}] [%thread] %-5level %logger{36} -%msg%n\u0026lt;/Pattern\u0026gt; \u0026lt;/layout\u0026gt; \u0026lt;/encoder\u0026gt; \u0026lt;/appender\u0026gt; \u0026lt;appender name=\u0026#34;ASYNC\u0026#34; class=\u0026#34;ch.qos.logback.classic.AsyncAppender\u0026#34;\u0026gt; \u0026lt;discardingThreshold\u0026gt;0\u0026lt;/discardingThreshold\u0026gt; \u0026lt;queueSize\u0026gt;1024\u0026lt;/queueSize\u0026gt; \u0026lt;neverBlock\u0026gt;true\u0026lt;/neverBlock\u0026gt; \u0026lt;appender-ref ref=\u0026#34;STDOUT\u0026#34;/\u0026gt; \u0026lt;/appender\u0026gt; \u0026lt;root level=\u0026#34;INFO\u0026#34;\u0026gt; \u0026lt;appender-ref ref=\u0026#34;ASYNC\u0026#34;/\u0026gt; \u0026lt;/root\u0026gt; \u0026lt;/configuration\u0026gt;  When you use -javaagent to active the SkyWalking tracer, logback will output traceId, if it existed. If the tracer is inactive, the output will be TID: N/A.  Print SkyWalking context in your logs   Your only need to replace pattern %tid or %X{tid]} with %sw_ctx or %X{sw_ctx}.\n  When you use -javaagent to active the SkyWalking tracer, logback will output SW_CTX: [$serviceName,$instanceName,$traceId,$traceSegmentId,$spanId], if it existed. If the tracer is inactive, the output will be SW_CTX: N/A.\n  logstash logback plugin  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-logback-1.x\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  set LogstashEncoder of logback.xml  \u0026lt;encoder charset=\u0026#34;UTF-8\u0026#34; class=\u0026#34;net.logstash.logback.encoder.LogstashEncoder\u0026#34;\u0026gt; \u0026lt;!-- add TID(traceId) field --\u0026gt; \u0026lt;provider class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.logstash.TraceIdJsonProvider\u0026#34;\u0026gt; \u0026lt;/provider\u0026gt; \u0026lt;!-- add SW_CTX(SkyWalking context) field --\u0026gt; \u0026lt;provider class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.logstash.SkyWalkingContextJsonProvider\u0026#34;\u0026gt; \u0026lt;/provider\u0026gt; \u0026lt;/encoder\u0026gt;  set LoggingEventCompositeJsonEncoder of logstash in logback-spring.xml for custom json format  1.add converter for %tid or %sw_ctx as child of  node\n\u0026lt;!-- add converter for %tid --\u0026gt; \u0026lt;conversionRule conversionWord=\u0026#34;tid\u0026#34; converterClass=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.LogbackPatternConverter\u0026#34;/\u0026gt; \u0026lt;!-- add converter for %sw_ctx --\u0026gt; \u0026lt;conversionRule conversionWord=\u0026#34;sw_ctx\u0026#34; converterClass=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.LogbackSkyWalkingContextPatternConverter\u0026#34;/\u0026gt; 2.add json encoder for custom json format\n\u0026lt;encoder class=\u0026#34;net.logstash.logback.encoder.LoggingEventCompositeJsonEncoder\u0026#34;\u0026gt; \u0026lt;providers\u0026gt; \u0026lt;timestamp\u0026gt; \u0026lt;timeZone\u0026gt;UTC\u0026lt;/timeZone\u0026gt; \u0026lt;/timestamp\u0026gt; \u0026lt;pattern\u0026gt; \u0026lt;pattern\u0026gt; { \u0026#34;level\u0026#34;: \u0026#34;%level\u0026#34;, \u0026#34;tid\u0026#34;: \u0026#34;%tid\u0026#34;, \u0026#34;skyWalkingContext\u0026#34;: \u0026#34;%sw_ctx\u0026#34;, \u0026#34;thread\u0026#34;: \u0026#34;%thread\u0026#34;, \u0026#34;class\u0026#34;: \u0026#34;%logger{1.}:%L\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;%message\u0026#34;, \u0026#34;stackTrace\u0026#34;: \u0026#34;%exception{10}\u0026#34; } \u0026lt;/pattern\u0026gt; \u0026lt;/pattern\u0026gt; \u0026lt;/providers\u0026gt; \u0026lt;/encoder\u0026gt; gRPC reporter The gRPC reporter could forward the collected logs to SkyWalking OAP server, or SkyWalking Satellite sidecar. Trace id, segment id, and span id will attach to logs automatically. There is no need to modify existing layouts.\n Add GRPCLogClientAppender in logback.xml  \u0026lt;appender name=\u0026#34;grpc-log\u0026#34; class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.log.GRPCLogClientAppender\u0026#34;\u0026gt; \u0026lt;encoder class=\u0026#34;ch.qos.logback.core.encoder.LayoutWrappingEncoder\u0026#34;\u0026gt; \u0026lt;layout class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.mdc.TraceIdMDCPatternLogbackLayout\u0026#34;\u0026gt; \u0026lt;Pattern\u0026gt;%d{yyyy-MM-dd HH:mm:ss.SSS} [%X{tid}] [%thread] %-5level %logger{36} -%msg%n\u0026lt;/Pattern\u0026gt; \u0026lt;/layout\u0026gt; \u0026lt;/encoder\u0026gt; \u0026lt;/appender\u0026gt;  Add config of the plugin or use default  log.max_message_size=${SW_GRPC_LOG_MAX_MESSAGE_SIZE:10485760} Transmitting un-formatted messages The logback 1.x gRPC reporter supports transmitting logs as formatted or un-formatted. Transmitting formatted data is the default but can be disabled by adding the following to the agent config:\nplugin.toolkit.log.transmit_formatted=false The above will result in the content field being used for the log pattern with additional log tags of argument.0, argument.1, and so on representing each logged argument as well as an additional exception tag which is only present if a throwable is also logged.\nFor example, the following code:\nlog.info(\u0026#34;{} {} {}\u0026#34;, 1, 2, 3); Will result in:\n{ \u0026#34;content\u0026#34;: \u0026#34;{} {} {}\u0026#34;, \u0026#34;tags\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;argument.0\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.1\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.2\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;3\u0026#34; } ] } ","excerpt":"logback plugin  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; …","ref":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/application-toolkit-logback-1.x/","title":"logback plugin"},{"body":"logback plugin  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-logback-1.x\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Print trace ID in your logs  set %tid in Pattern section of logback.xml  \u0026lt;appender name=\u0026#34;STDOUT\u0026#34; class=\u0026#34;ch.qos.logback.core.ConsoleAppender\u0026#34;\u0026gt; \u0026lt;encoder class=\u0026#34;ch.qos.logback.core.encoder.LayoutWrappingEncoder\u0026#34;\u0026gt; \u0026lt;layout class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.TraceIdPatternLogbackLayout\u0026#34;\u0026gt; \u0026lt;Pattern\u0026gt;%d{yyyy-MM-dd HH:mm:ss.SSS} [%tid] [%thread] %-5level %logger{36} -%msg%n\u0026lt;/Pattern\u0026gt; \u0026lt;/layout\u0026gt; \u0026lt;/encoder\u0026gt; \u0026lt;/appender\u0026gt;  with the MDC, set %X{tid} in Pattern section of logback.xml  \u0026lt;appender name=\u0026#34;STDOUT\u0026#34; class=\u0026#34;ch.qos.logback.core.ConsoleAppender\u0026#34;\u0026gt; \u0026lt;encoder class=\u0026#34;ch.qos.logback.core.encoder.LayoutWrappingEncoder\u0026#34;\u0026gt; \u0026lt;layout class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.mdc.TraceIdMDCPatternLogbackLayout\u0026#34;\u0026gt; \u0026lt;Pattern\u0026gt;%d{yyyy-MM-dd HH:mm:ss.SSS} [%X{tid}] [%thread] %-5level %logger{36} -%msg%n\u0026lt;/Pattern\u0026gt; \u0026lt;/layout\u0026gt; \u0026lt;/encoder\u0026gt; \u0026lt;/appender\u0026gt;  Support logback AsyncAppender(MDC also support), No additional configuration is required. Refer to the demo of logback.xml below. For details: Logback AsyncAppender  \u0026lt;configuration scan=\u0026#34;true\u0026#34; scanPeriod=\u0026#34; 5 seconds\u0026#34;\u0026gt; \u0026lt;appender name=\u0026#34;STDOUT\u0026#34; class=\u0026#34;ch.qos.logback.core.ConsoleAppender\u0026#34;\u0026gt; \u0026lt;encoder class=\u0026#34;ch.qos.logback.core.encoder.LayoutWrappingEncoder\u0026#34;\u0026gt; \u0026lt;layout class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.mdc.TraceIdMDCPatternLogbackLayout\u0026#34;\u0026gt; \u0026lt;Pattern\u0026gt;%d{yyyy-MM-dd HH:mm:ss.SSS} [%X{tid}] [%thread] %-5level %logger{36} -%msg%n\u0026lt;/Pattern\u0026gt; \u0026lt;/layout\u0026gt; \u0026lt;/encoder\u0026gt; \u0026lt;/appender\u0026gt; \u0026lt;appender name=\u0026#34;ASYNC\u0026#34; class=\u0026#34;ch.qos.logback.classic.AsyncAppender\u0026#34;\u0026gt; \u0026lt;discardingThreshold\u0026gt;0\u0026lt;/discardingThreshold\u0026gt; \u0026lt;queueSize\u0026gt;1024\u0026lt;/queueSize\u0026gt; \u0026lt;neverBlock\u0026gt;true\u0026lt;/neverBlock\u0026gt; \u0026lt;appender-ref ref=\u0026#34;STDOUT\u0026#34;/\u0026gt; \u0026lt;/appender\u0026gt; \u0026lt;root level=\u0026#34;INFO\u0026#34;\u0026gt; \u0026lt;appender-ref ref=\u0026#34;ASYNC\u0026#34;/\u0026gt; \u0026lt;/root\u0026gt; \u0026lt;/configuration\u0026gt;  When you use -javaagent to active the SkyWalking tracer, logback will output traceId, if it existed. If the tracer is inactive, the output will be TID: N/A.  Print SkyWalking context in your logs   Your only need to replace pattern %tid or %X{tid]} with %sw_ctx or %X{sw_ctx}.\n  When you use -javaagent to active the SkyWalking tracer, logback will output SW_CTX: [$serviceName,$instanceName,$traceId,$traceSegmentId,$spanId], if it existed. If the tracer is inactive, the output will be SW_CTX: N/A.\n  logstash logback plugin  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-logback-1.x\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  set LogstashEncoder of logback.xml  \u0026lt;encoder charset=\u0026#34;UTF-8\u0026#34; class=\u0026#34;net.logstash.logback.encoder.LogstashEncoder\u0026#34;\u0026gt; \u0026lt;!-- add TID(traceId) field --\u0026gt; \u0026lt;provider class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.logstash.TraceIdJsonProvider\u0026#34;\u0026gt; \u0026lt;/provider\u0026gt; \u0026lt;!-- add SW_CTX(SkyWalking context) field --\u0026gt; \u0026lt;provider class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.logstash.SkyWalkingContextJsonProvider\u0026#34;\u0026gt; \u0026lt;/provider\u0026gt; \u0026lt;/encoder\u0026gt;  set LoggingEventCompositeJsonEncoder of logstash in logback-spring.xml for custom json format  1.add converter for %tid or %sw_ctx as child of  node\n\u0026lt;!-- add converter for %tid --\u0026gt; \u0026lt;conversionRule conversionWord=\u0026#34;tid\u0026#34; converterClass=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.LogbackPatternConverter\u0026#34;/\u0026gt; \u0026lt;!-- add converter for %sw_ctx --\u0026gt; \u0026lt;conversionRule conversionWord=\u0026#34;sw_ctx\u0026#34; converterClass=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.LogbackSkyWalkingContextPatternConverter\u0026#34;/\u0026gt; 2.add json encoder for custom json format\n\u0026lt;encoder class=\u0026#34;net.logstash.logback.encoder.LoggingEventCompositeJsonEncoder\u0026#34;\u0026gt; \u0026lt;providers\u0026gt; \u0026lt;timestamp\u0026gt; \u0026lt;timeZone\u0026gt;UTC\u0026lt;/timeZone\u0026gt; \u0026lt;/timestamp\u0026gt; \u0026lt;pattern\u0026gt; \u0026lt;pattern\u0026gt; { \u0026#34;level\u0026#34;: \u0026#34;%level\u0026#34;, \u0026#34;tid\u0026#34;: \u0026#34;%tid\u0026#34;, \u0026#34;skyWalkingContext\u0026#34;: \u0026#34;%sw_ctx\u0026#34;, \u0026#34;thread\u0026#34;: \u0026#34;%thread\u0026#34;, \u0026#34;class\u0026#34;: \u0026#34;%logger{1.}:%L\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;%message\u0026#34;, \u0026#34;stackTrace\u0026#34;: \u0026#34;%exception{10}\u0026#34; } \u0026lt;/pattern\u0026gt; \u0026lt;/pattern\u0026gt; \u0026lt;/providers\u0026gt; \u0026lt;/encoder\u0026gt; gRPC reporter The gRPC reporter could forward the collected logs to SkyWalking OAP server, or SkyWalking Satellite sidecar. Trace id, segment id, and span id will attach to logs automatically. There is no need to modify existing layouts.\n Add GRPCLogClientAppender in logback.xml  \u0026lt;appender name=\u0026#34;grpc-log\u0026#34; class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.log.GRPCLogClientAppender\u0026#34;\u0026gt; \u0026lt;encoder class=\u0026#34;ch.qos.logback.core.encoder.LayoutWrappingEncoder\u0026#34;\u0026gt; \u0026lt;layout class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.mdc.TraceIdMDCPatternLogbackLayout\u0026#34;\u0026gt; \u0026lt;Pattern\u0026gt;%d{yyyy-MM-dd HH:mm:ss.SSS} [%X{tid}] [%thread] %-5level %logger{36} -%msg%n\u0026lt;/Pattern\u0026gt; \u0026lt;/layout\u0026gt; \u0026lt;/encoder\u0026gt; \u0026lt;/appender\u0026gt;  Add config of the plugin or use default  log.max_message_size=${SW_GRPC_LOG_MAX_MESSAGE_SIZE:10485760} Transmitting un-formatted messages The logback 1.x gRPC reporter supports transmitting logs as formatted or un-formatted. Transmitting formatted data is the default but can be disabled by adding the following to the agent config:\nplugin.toolkit.log.transmit_formatted=false The above will result in the content field being used for the log pattern with additional log tags of argument.0, argument.1, and so on representing each logged argument as well as an additional exception tag which is only present if a throwable is also logged.\nFor example, the following code:\nlog.info(\u0026#34;{} {} {}\u0026#34;, 1, 2, 3); Will result in:\n{ \u0026#34;content\u0026#34;: \u0026#34;{} {} {}\u0026#34;, \u0026#34;tags\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;argument.0\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.1\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.2\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;3\u0026#34; } ] } ","excerpt":"logback plugin  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; …","ref":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/application-toolkit-logback-1.x/","title":"logback plugin"},{"body":"logback plugin  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-logback-1.x\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Print trace ID in your logs  set %tid in Pattern section of logback.xml  \u0026lt;appender name=\u0026#34;STDOUT\u0026#34; class=\u0026#34;ch.qos.logback.core.ConsoleAppender\u0026#34;\u0026gt; \u0026lt;encoder class=\u0026#34;ch.qos.logback.core.encoder.LayoutWrappingEncoder\u0026#34;\u0026gt; \u0026lt;layout class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.TraceIdPatternLogbackLayout\u0026#34;\u0026gt; \u0026lt;Pattern\u0026gt;%d{yyyy-MM-dd HH:mm:ss.SSS} [%tid] [%thread] %-5level %logger{36} -%msg%n\u0026lt;/Pattern\u0026gt; \u0026lt;/layout\u0026gt; \u0026lt;/encoder\u0026gt; \u0026lt;/appender\u0026gt;  with the MDC, set %X{tid} in Pattern section of logback.xml  \u0026lt;appender name=\u0026#34;STDOUT\u0026#34; class=\u0026#34;ch.qos.logback.core.ConsoleAppender\u0026#34;\u0026gt; \u0026lt;encoder class=\u0026#34;ch.qos.logback.core.encoder.LayoutWrappingEncoder\u0026#34;\u0026gt; \u0026lt;layout class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.mdc.TraceIdMDCPatternLogbackLayout\u0026#34;\u0026gt; \u0026lt;Pattern\u0026gt;%d{yyyy-MM-dd HH:mm:ss.SSS} [%X{tid}] [%thread] %-5level %logger{36} -%msg%n\u0026lt;/Pattern\u0026gt; \u0026lt;/layout\u0026gt; \u0026lt;/encoder\u0026gt; \u0026lt;/appender\u0026gt;  Support logback AsyncAppender(MDC also support), No additional configuration is required. Refer to the demo of logback.xml below. For details: Logback AsyncAppender  \u0026lt;configuration scan=\u0026#34;true\u0026#34; scanPeriod=\u0026#34; 5 seconds\u0026#34;\u0026gt; \u0026lt;appender name=\u0026#34;STDOUT\u0026#34; class=\u0026#34;ch.qos.logback.core.ConsoleAppender\u0026#34;\u0026gt; \u0026lt;encoder class=\u0026#34;ch.qos.logback.core.encoder.LayoutWrappingEncoder\u0026#34;\u0026gt; \u0026lt;layout class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.mdc.TraceIdMDCPatternLogbackLayout\u0026#34;\u0026gt; \u0026lt;Pattern\u0026gt;%d{yyyy-MM-dd HH:mm:ss.SSS} [%X{tid}] [%thread] %-5level %logger{36} -%msg%n\u0026lt;/Pattern\u0026gt; \u0026lt;/layout\u0026gt; \u0026lt;/encoder\u0026gt; \u0026lt;/appender\u0026gt; \u0026lt;appender name=\u0026#34;ASYNC\u0026#34; class=\u0026#34;ch.qos.logback.classic.AsyncAppender\u0026#34;\u0026gt; \u0026lt;discardingThreshold\u0026gt;0\u0026lt;/discardingThreshold\u0026gt; \u0026lt;queueSize\u0026gt;1024\u0026lt;/queueSize\u0026gt; \u0026lt;neverBlock\u0026gt;true\u0026lt;/neverBlock\u0026gt; \u0026lt;appender-ref ref=\u0026#34;STDOUT\u0026#34;/\u0026gt; \u0026lt;/appender\u0026gt; \u0026lt;root level=\u0026#34;INFO\u0026#34;\u0026gt; \u0026lt;appender-ref ref=\u0026#34;ASYNC\u0026#34;/\u0026gt; \u0026lt;/root\u0026gt; \u0026lt;/configuration\u0026gt;  When you use -javaagent to active the SkyWalking tracer, logback will output traceId, if it existed. If the tracer is inactive, the output will be TID: N/A.  Print SkyWalking context in your logs   Your only need to replace pattern %tid or %X{tid]} with %sw_ctx or %X{sw_ctx}.\n  When you use -javaagent to active the SkyWalking tracer, logback will output SW_CTX: [$serviceName,$instanceName,$traceId,$traceSegmentId,$spanId], if it existed. If the tracer is inactive, the output will be SW_CTX: N/A.\n  logstash logback plugin  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-logback-1.x\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  set LogstashEncoder of logback.xml  \u0026lt;encoder charset=\u0026#34;UTF-8\u0026#34; class=\u0026#34;net.logstash.logback.encoder.LogstashEncoder\u0026#34;\u0026gt; \u0026lt;!-- add TID(traceId) field --\u0026gt; \u0026lt;provider class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.logstash.TraceIdJsonProvider\u0026#34;\u0026gt; \u0026lt;/provider\u0026gt; \u0026lt;!-- add SW_CTX(SkyWalking context) field --\u0026gt; \u0026lt;provider class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.logstash.SkyWalkingContextJsonProvider\u0026#34;\u0026gt; \u0026lt;/provider\u0026gt; \u0026lt;/encoder\u0026gt;  set LoggingEventCompositeJsonEncoder of logstash in logback-spring.xml for custom json format  1.add converter for %tid or %sw_ctx as child of  node\n\u0026lt;!-- add converter for %tid --\u0026gt; \u0026lt;conversionRule conversionWord=\u0026#34;tid\u0026#34; converterClass=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.LogbackPatternConverter\u0026#34;/\u0026gt; \u0026lt;!-- add converter for %sw_ctx --\u0026gt; \u0026lt;conversionRule conversionWord=\u0026#34;sw_ctx\u0026#34; converterClass=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.LogbackSkyWalkingContextPatternConverter\u0026#34;/\u0026gt; 2.add json encoder for custom json format\n\u0026lt;encoder class=\u0026#34;net.logstash.logback.encoder.LoggingEventCompositeJsonEncoder\u0026#34;\u0026gt; \u0026lt;providers\u0026gt; \u0026lt;timestamp\u0026gt; \u0026lt;timeZone\u0026gt;UTC\u0026lt;/timeZone\u0026gt; \u0026lt;/timestamp\u0026gt; \u0026lt;pattern\u0026gt; \u0026lt;pattern\u0026gt; { \u0026#34;level\u0026#34;: \u0026#34;%level\u0026#34;, \u0026#34;tid\u0026#34;: \u0026#34;%tid\u0026#34;, \u0026#34;skyWalkingContext\u0026#34;: \u0026#34;%sw_ctx\u0026#34;, \u0026#34;thread\u0026#34;: \u0026#34;%thread\u0026#34;, \u0026#34;class\u0026#34;: \u0026#34;%logger{1.}:%L\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;%message\u0026#34;, \u0026#34;stackTrace\u0026#34;: \u0026#34;%exception{10}\u0026#34; } \u0026lt;/pattern\u0026gt; \u0026lt;/pattern\u0026gt; \u0026lt;/providers\u0026gt; \u0026lt;/encoder\u0026gt; gRPC reporter The gRPC reporter could forward the collected logs to SkyWalking OAP server, or SkyWalking Satellite sidecar. Trace id, segment id, and span id will attach to logs automatically. There is no need to modify existing layouts.\n Add GRPCLogClientAppender in logback.xml  \u0026lt;appender name=\u0026#34;grpc-log\u0026#34; class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.log.GRPCLogClientAppender\u0026#34;\u0026gt; \u0026lt;encoder class=\u0026#34;ch.qos.logback.core.encoder.LayoutWrappingEncoder\u0026#34;\u0026gt; \u0026lt;layout class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.mdc.TraceIdMDCPatternLogbackLayout\u0026#34;\u0026gt; \u0026lt;Pattern\u0026gt;%d{yyyy-MM-dd HH:mm:ss.SSS} [%X{tid}] [%thread] %-5level %logger{36} -%msg%n\u0026lt;/Pattern\u0026gt; \u0026lt;/layout\u0026gt; \u0026lt;/encoder\u0026gt; \u0026lt;/appender\u0026gt;  Add config of the plugin or use default  log.max_message_size=${SW_GRPC_LOG_MAX_MESSAGE_SIZE:10485760} Transmitting un-formatted messages The logback 1.x gRPC reporter supports transmitting logs as formatted or un-formatted. Transmitting formatted data is the default but can be disabled by adding the following to the agent config:\nplugin.toolkit.log.transmit_formatted=false The above will result in the content field being used for the log pattern with additional log tags of argument.0, argument.1, and so on representing each logged argument as well as an additional exception tag which is only present if a throwable is also logged.\nFor example, the following code:\nlog.info(\u0026#34;{} {} {}\u0026#34;, 1, 2, 3); Will result in:\n{ \u0026#34;content\u0026#34;: \u0026#34;{} {} {}\u0026#34;, \u0026#34;tags\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;argument.0\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.1\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.2\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;3\u0026#34; } ] } ","excerpt":"logback plugin  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; …","ref":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/application-toolkit-logback-1.x/","title":"logback plugin"},{"body":"Logging Setup Logging Setup is used to integrate the Go Agent with the logging system in the current service. It currently supports the recognition of Logrus and Zap frameworks. If neither of these frameworks is present, it would output logs using Std Error.\nYou can learn about the configuration details through the \u0026ldquo;log\u0026rdquo; configuration item in the default settings.\nLogging Detection Log detection means that the logging plugin would automatically detect the usage of logs in your application. When the log type is set to auto, it would choose the appropriate log based on the creation rules of different frameworks. The selection rules vary depending on the framework:\n Logrus: It automatically selects the current logger when executing functions such as logrus.New, logger.SetOutput, or logger.SetFormatter. Zap: It automatically selects the current logger when executing functions such as zap.New, zap.NewNop, zap.NewProduction, zap.NewDevelopment, or zap.NewExample.  If there are multiple different logging systems in your current application, the last-called logging system would be chosen.\nThe configuration information is as follows:\n   Name Environment Key Default Value Description     log.type SW_LOG_TYPE auto The type of logging system. It currently supports auto, logrus, zap, and std.    Agent with Logging system The integration of the Agent with logs includes the two parts as following.\n Integrating Agent logs into the Service: Integrating the logs from the Agent into the framework used by the service. Integrating Tracing information into the Service: Integrating the information from Tracing into the service logs.  Agent logs into the Service Agent logs output the current running status of the Agent system, most of which are execution exceptions. For example, communication anomalies between the Agent and the backend service, plugin execution exceptions, etc.\nIntegrating Agent logs into the service\u0026rsquo;s logging system can effectively help users quickly troubleshoot whether there are issues with the current Agent execution.\nTracing information into the Service The Agent would also enhance the existing logging system. When the service outputs log, if the current goroutine contains Tracing data, it would be outputted together with the current logs. This helps users to quickly locate the link based on the Tracing data.\nTracing data The Tracing includes the following information:\n ServiceName: Current service name. ServiceInstanceName: Current service instance name. TraceID: The current Trace ID. If there is no link, it outputs N/A. SegmentID: The Segment ID in the current Trace. If there is no link, it outputs N/A. SpanID: The Span ID currently being operated on. If there is no link, it outputs -1.  The output format is as follows: [${ServiceName},${ServiceInstanceName},${TraceID},${SegmentID},${SpanID}].\nThe following is an example of a log output when using Zap.NewProduction:\n{\u0026quot;level\u0026quot;:\u0026quot;info\u0026quot;,\u0026quot;ts\u0026quot;:1683641507.052247,\u0026quot;caller\u0026quot;:\u0026quot;gin/main.go:45\u0026quot;,\u0026quot;msg\u0026quot;:\u0026quot;test log\u0026quot;,\u0026quot;SW_CTX\u0026quot;:\u0026quot;[Your_ApplicationName,681e4178ee7311ed864facde48001122@192.168.50.193,6f13069eee7311ed864facde48001122,6f13070cee7311ed864facde48001122,0]\u0026quot;} The configuration information is as follows:\n   Name Environment Key Default Value Description     log.tracing.enable SW_AGENT_LOG_TRACING_ENABLE true Whether to automatically integrate Tracing information into the logs.   log.tracing.key SW_AGENT_LOG_TRACING_KEY SW_CTX The key of the Tracing information in the log.    Log Upload The Agent would report the following two types of logs to the SkyWalking backend for storage and querying:\n Application Logs: It provides support for various logging frameworks and reports logs along with the corresponding distributed tracing information related to the current request. Only the relevant logs matching the current system log level would be output. Agent Logs: These are the logs generated by the Agent itself.  The current configuration options available are as follows:\n   Name Environment Key Default Value Description     log.reporter.enable SW_LOG_REPORTER_ENABLE true Whether to enable log reporting.   log.reporter.label_keys SW_LOG_REPORTER_LABEL_KEYS  By default, all fields are not reported. To specify the fields that need to be reported, please provide a comma-separated list of configuration item keys.    ","excerpt":"Logging Setup Logging Setup is used to integrate the Go Agent with the logging system in the current …","ref":"/docs/skywalking-go/latest/en/advanced-features/logging-setup/","title":"Logging Setup"},{"body":"Logging Setup Logging Setup is used to integrate the Go Agent with the logging system in the current service. It currently supports the recognition of Logrus and Zap frameworks. If neither of these frameworks is present, it would output logs using Std Error.\nYou can learn about the configuration details through the \u0026ldquo;log\u0026rdquo; configuration item in the default settings.\nLogging Detection Log detection means that the logging plugin would automatically detect the usage of logs in your application. When the log type is set to auto, it would choose the appropriate log based on the creation rules of different frameworks. The selection rules vary depending on the framework:\n Logrus: It automatically selects the current logger when executing functions such as logrus.New, logger.SetOutput, or logger.SetFormatter. Zap: It automatically selects the current logger when executing functions such as zap.New, zap.NewNop, zap.NewProduction, zap.NewDevelopment, or zap.NewExample.  If there are multiple different logging systems in your current application, the last-called logging system would be chosen.\nThe configuration information is as follows:\n   Name Environment Key Default Value Description     log.type SW_LOG_TYPE auto The type of logging system. It currently supports auto, logrus, zap, and std.    Agent with Logging system The integration of the Agent with logs includes the two parts as following.\n Integrating Agent logs into the Service: Integrating the logs from the Agent into the framework used by the service. Integrating Tracing information into the Service: Integrating the information from Tracing into the service logs.  Agent logs into the Service Agent logs output the current running status of the Agent system, most of which are execution exceptions. For example, communication anomalies between the Agent and the backend service, plugin execution exceptions, etc.\nIntegrating Agent logs into the service\u0026rsquo;s logging system can effectively help users quickly troubleshoot whether there are issues with the current Agent execution.\nTracing information into the Service The Agent would also enhance the existing logging system. When the service outputs log, if the current goroutine contains Tracing data, it would be outputted together with the current logs. This helps users to quickly locate the link based on the Tracing data.\nTracing data The Tracing includes the following information:\n ServiceName: Current service name. ServiceInstanceName: Current service instance name. TraceID: The current Trace ID. If there is no link, it outputs N/A. SegmentID: The Segment ID in the current Trace. If there is no link, it outputs N/A. SpanID: The Span ID currently being operated on. If there is no link, it outputs -1.  The output format is as follows: [${ServiceName},${ServiceInstanceName},${TraceID},${SegmentID},${SpanID}].\nThe following is an example of a log output when using Zap.NewProduction:\n{\u0026quot;level\u0026quot;:\u0026quot;info\u0026quot;,\u0026quot;ts\u0026quot;:1683641507.052247,\u0026quot;caller\u0026quot;:\u0026quot;gin/main.go:45\u0026quot;,\u0026quot;msg\u0026quot;:\u0026quot;test log\u0026quot;,\u0026quot;SW_CTX\u0026quot;:\u0026quot;[Your_ApplicationName,681e4178ee7311ed864facde48001122@192.168.50.193,6f13069eee7311ed864facde48001122,6f13070cee7311ed864facde48001122,0]\u0026quot;} The configuration information is as follows:\n   Name Environment Key Default Value Description     log.tracing.enable SW_AGENT_LOG_TRACING_ENABLE true Whether to automatically integrate Tracing information into the logs.   log.tracing.key SW_AGENT_LOG_TRACING_KEY SW_CTX The key of the Tracing information in the log.    Log Upload The Agent would report the following two types of logs to the SkyWalking backend for storage and querying:\n Application Logs: It provides support for various logging frameworks and reports logs along with the corresponding distributed tracing information related to the current request. Only the relevant logs matching the current system log level would be output. Agent Logs: These are the logs generated by the Agent itself.  The current configuration options available are as follows:\n   Name Environment Key Default Value Description     log.reporter.enable SW_LOG_REPORTER_ENABLE true Whether to enable log reporting.   log.reporter.label_keys SW_LOG_REPORTER_LABEL_KEYS  By default, all fields are not reported. To specify the fields that need to be reported, please provide a comma-separated list of configuration item keys.    ","excerpt":"Logging Setup Logging Setup is used to integrate the Go Agent with the logging system in the current …","ref":"/docs/skywalking-go/next/en/advanced-features/logging-setup/","title":"Logging Setup"},{"body":"Logging Setup Logging Setup is used to integrate the Go Agent with the logging system in the current service. It currently supports the recognition of Logrus and Zap frameworks. If neither of these frameworks is present, it would output logs using Std Error.\nYou can learn about the configuration details through the \u0026ldquo;log\u0026rdquo; configuration item in the default settings.\nLogging Detection Log detection means that the logging plugin would automatically detect the usage of logs in your application. When the log type is set to auto, it would choose the appropriate log based on the creation rules of different frameworks. The selection rules vary depending on the framework:\n Logrus: It automatically selects the current logger when executing functions such as logrus.New, logger.SetOutput, or logger.SetFormatter. Zap: It automatically selects the current logger when executing functions such as zap.New, zap.NewNop, zap.NewProduction, zap.NewDevelopment, or zap.NewExample.  If there are multiple different logging systems in your current application, the last-called logging system would be chosen.\nThe configuration information is as follows:\n   Name Environment Key Default Value Description     log.type SW_LOG_TYPE auto The type of logging system. It currently supports auto, logrus, zap, and std.    Agent with Logging system The integration of the Agent with logs includes the two parts as following.\n Integrating Agent logs into the Service: Integrating the logs from the Agent into the framework used by the service. Integrating Tracing information into the Service: Integrating the information from Tracing into the service logs.  Agent logs into the Service Agent logs output the current running status of the Agent system, most of which are execution exceptions. For example, communication anomalies between the Agent and the backend service, plugin execution exceptions, etc.\nIntegrating Agent logs into the service\u0026rsquo;s logging system can effectively help users quickly troubleshoot whether there are issues with the current Agent execution.\nTracing information into the Service The Agent would also enhance the existing logging system. When the service outputs log, if the current goroutine contains Tracing data, it would be outputted together with the current logs. This helps users to quickly locate the link based on the Tracing data.\nTracing data The Tracing includes the following information:\n ServiceName: Current service name. ServiceInstanceName: Current service instance name. TraceID: The current Trace ID. If there is no link, it outputs N/A. SegmentID: The Segment ID in the current Trace. If there is no link, it outputs N/A. SpanID: The Span ID currently being operated on. If there is no link, it outputs -1.  The output format is as follows: [${ServiceName},${ServiceInstanceName},${TraceID},${SegmentID},${SpanID}].\nThe following is an example of a log output when using Zap.NewProduction:\n{\u0026quot;level\u0026quot;:\u0026quot;info\u0026quot;,\u0026quot;ts\u0026quot;:1683641507.052247,\u0026quot;caller\u0026quot;:\u0026quot;gin/main.go:45\u0026quot;,\u0026quot;msg\u0026quot;:\u0026quot;test log\u0026quot;,\u0026quot;SW_CTX\u0026quot;:\u0026quot;[Your_ApplicationName,681e4178ee7311ed864facde48001122@192.168.50.193,6f13069eee7311ed864facde48001122,6f13070cee7311ed864facde48001122,0]\u0026quot;} The configuration information is as follows:\n   Name Environment Key Default Value Description     log.tracing.enable SW_AGENT_LOG_TRACING_ENABLE true Whether to automatically integrate Tracing information into the logs.   log.tracing.key SW_AGENT_LOG_TRACING_KEY SW_CTX The key of the Tracing information in the log.    Log Upload The Agent would report the following two types of logs to the SkyWalking backend for storage and querying:\n Application Logs: It provides support for various logging frameworks and reports logs along with the corresponding distributed tracing information related to the current request. Only the relevant logs matching the current system log level would be output. Agent Logs: These are the logs generated by the Agent itself.  The current configuration options available are as follows:\n   Name Environment Key Default Value Description     log.reporter.enable SW_LOG_REPORTER_ENABLE true Whether to enable log reporting.   log.reporter.label_keys SW_LOG_REPORTER_LABEL_KEYS  By default, all fields are not reported. To specify the fields that need to be reported, please provide a comma-separated list of configuration item keys.    ","excerpt":"Logging Setup Logging Setup is used to integrate the Go Agent with the logging system in the current …","ref":"/docs/skywalking-go/v0.4.0/en/advanced-features/logging-setup/","title":"Logging Setup"},{"body":"LogQL Service LogQL (Log Query Language) is Grafana Loki’s PromQL-inspired query language. LogQL Service exposes Loki Querying HTTP APIs including the bundled LogQL expression system. Third-party systems or visualization platforms that already support LogQL (such as Grafana), could obtain logs through LogQL Service.\nAs Skywalking log mechanism is different from Loki(metric extract, storage, etc.), the LogQL implemented by Skywalking won\u0026rsquo;t be a full features LogQL.\nDetails Of Supported LogQL The following doc describes the details of the supported protocol and compared it to the LogQL official documentation. If not mentioned, it will not be supported by default.\nLog queries The picture bellow is LogQL syntax in log queries: The expression supported by LogQL is composed of the following parts (expression with [✅] is implemented in SkyWalking):\n stream selector:The stream selector determines which log streams to include in a query’s results by labels. line filter: The line filter expression does a grep over the logs from the matching log streams. label filter: Label filter expression allows filtering log line using their original and extracted labels. parser: Parser expression can parse and extract labels from the log content. Those extracted labels can then be used by label filter expressions. line formate: The line format expression can rewrite the log line content by using the text/template format. labels formate: The label format expression can rename, modify or add labels. drop labels: The drop expression will drop the given labels in the pipeline.  The stream selector operator supported by LogQL is composed of the following (operator with [✅] is implemented in SkyWalking):\n =: exactly equal !=: not equal =~: regex matches !~: regex does not match  The filter operator supported by LogQL is composed of the following (operator with [✅] is implemented in SkyWalking):\n |=: Log line contains string !=: Log line does not contain string |~: Log line contains a match to the regular expression !~: Log line does not contain a match to the regular expression  Here are some typical expressions used in SkyWalking log query:\n# query service instance logs with specified traceId {service=\u0026quot;$service\u0026quot;, service_instance=\u0026quot;$service_instance\u0026quot;, trace_id=\u0026quot;$trace_id\u0026quot;} # query service instance logs contains keyword in content {service=\u0026quot;$service\u0026quot;, service_instance=\u0026quot;$service_instance\u0026quot;} |= \u0026quot;$keyword_contains\u0026quot; # query service instance logs not contains keyword in content {service=\u0026quot;$service\u0026quot;, service_instance=\u0026quot;$service_instance\u0026quot;} != \u0026quot;$keyword_not_contains\u0026quot; # query service instance logs contains A keyword but not contains B keyword in content {service=\u0026quot;$service\u0026quot;, service_instance=\u0026quot;$service_instance\u0026quot;} |= \u0026quot;$keyword_contains\u0026quot; != \u0026quot;$keyword_not_contains\u0026quot; Metric queries Metric queries is used to calculate metrics from logs in Loki. In SkyWalking, it is recommended to use LAL(Log Analysis Language). So metric queries LogQL won\u0026rsquo;t be supported in SkyWalking.\nDetails Of Supported Http Query API List Labels Query log tags within a range of time. It is different from Loki. In loki, this api query all labels used in stream selector, but in SkyWalking, this api only for log tags query. Others metadata (service, service_instance, endpoint) query is provided by PromQL Service.\nGET /loki/api/v1/labels    Parameter Definition Optional     start start timestamp in nanoseconds no   end end timestamp in nanoseconds no    For example:\n/loki/api/v1/labels?start=1690947455457000000\u0026amp;end=1690947671936000000 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;level\u0026#34; ] } List Label values Query log tag values of tag within a range of time.\nGET /loki/api/v1/label/\u0026lt;label_name\u0026gt;/values    Parameter Definition Optional     start start timestamp in nanoseconds no   end end timestamp in nanoseconds no    For example:\n/loki/api/v1/label/level/values?start=1690947455457000000\u0026amp;end=1690947671936000000 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;INFO\u0026#34;, \u0026#34;WARN\u0026#34;, \u0026#34;ERROR\u0026#34; ] } Range queries Query logs within a range of time with LogQL expression.\nGET /loki/api/v1/query_range    Parameter Definition Optional     query logql expression no   start start timestamp in nanoseconds no   end end timestamp in nanoseconds no   limit numbers of log line returned in a query no   direction log order,FORWARD or BACKWARD no    For example:\n/api/v1/query_range?query={service=\u0026#39;agent::songs\u0026#39;}\u0026amp;start=1690947455457000000\u0026amp;end=1690947671936000000\u0026amp;limit=100\u0026amp;direction=BACKWARD Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;streams\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;stream\u0026#34;: { \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;instance1\u0026#34;, \u0026#34;endpoint\u0026#34;: \u0026#34;xxx\u0026#34;, \u0026#34;trace_id\u0026#34;: \u0026#34;xxx\u0026#34; }, \u0026#34;values\u0026#34;: [ [ \u0026#34;1690947671936000000\u0026#34;, \u0026#34;foo\u0026#34; ], [ \u0026#34;1690947455457000000\u0026#34;, \u0026#34;bar\u0026#34; ] ] }, { \u0026#34;stream\u0026#34;: { \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;instance2\u0026#34;, \u0026#34;endpoint\u0026#34;: \u0026#34;xxx\u0026#34;, \u0026#34;trace_id\u0026#34;: \u0026#34;xxx\u0026#34; }, \u0026#34;values\u0026#34;: [ [ \u0026#34;1690947671936000000\u0026#34;, \u0026#34;foo\u0026#34; ], [ \u0026#34;1690947455457000000\u0026#34;, \u0026#34;bar\u0026#34; ] ] } ] } } ","excerpt":"LogQL Service LogQL (Log Query Language) is Grafana Loki’s PromQL-inspired query language. LogQL …","ref":"/docs/main/latest/en/api/logql-service/","title":"LogQL Service"},{"body":"LogQL Service LogQL (Log Query Language) is Grafana Loki’s PromQL-inspired query language. LogQL Service exposes Loki Querying HTTP APIs including the bundled LogQL expression system. Third-party systems or visualization platforms that already support LogQL (such as Grafana), could obtain logs through LogQL Service.\nAs Skywalking log mechanism is different from Loki(metric extract, storage, etc.), the LogQL implemented by Skywalking won\u0026rsquo;t be a full features LogQL.\nDetails Of Supported LogQL The following doc describes the details of the supported protocol and compared it to the LogQL official documentation. If not mentioned, it will not be supported by default.\nLog queries The picture bellow is LogQL syntax in log queries: The expression supported by LogQL is composed of the following parts (expression with [✅] is implemented in SkyWalking):\n stream selector:The stream selector determines which log streams to include in a query’s results by labels. line filter: The line filter expression does a grep over the logs from the matching log streams. label filter: Label filter expression allows filtering log line using their original and extracted labels. parser: Parser expression can parse and extract labels from the log content. Those extracted labels can then be used by label filter expressions. line formate: The line format expression can rewrite the log line content by using the text/template format. labels formate: The label format expression can rename, modify or add labels. drop labels: The drop expression will drop the given labels in the pipeline.  The stream selector operator supported by LogQL is composed of the following (operator with [✅] is implemented in SkyWalking):\n =: exactly equal !=: not equal =~: regex matches !~: regex does not match  The filter operator supported by LogQL is composed of the following (operator with [✅] is implemented in SkyWalking):\n |=: Log line contains string !=: Log line does not contain string |~: Log line contains a match to the regular expression !~: Log line does not contain a match to the regular expression  Here are some typical expressions used in SkyWalking log query:\n# query service instance logs with specified traceId {service=\u0026quot;$service\u0026quot;, service_instance=\u0026quot;$service_instance\u0026quot;, trace_id=\u0026quot;$trace_id\u0026quot;} # query service instance logs contains keyword in content {service=\u0026quot;$service\u0026quot;, service_instance=\u0026quot;$service_instance\u0026quot;} |= \u0026quot;$keyword_contains\u0026quot; # query service instance logs not contains keyword in content {service=\u0026quot;$service\u0026quot;, service_instance=\u0026quot;$service_instance\u0026quot;} != \u0026quot;$keyword_not_contains\u0026quot; # query service instance logs contains A keyword but not contains B keyword in content {service=\u0026quot;$service\u0026quot;, service_instance=\u0026quot;$service_instance\u0026quot;} |= \u0026quot;$keyword_contains\u0026quot; != \u0026quot;$keyword_not_contains\u0026quot; Metric queries Metric queries is used to calculate metrics from logs in Loki. In SkyWalking, it is recommended to use LAL(Log Analysis Language). So metric queries LogQL won\u0026rsquo;t be supported in SkyWalking.\nDetails Of Supported Http Query API List Labels Query log tags within a range of time. It is different from Loki. In loki, this api query all labels used in stream selector, but in SkyWalking, this api only for log tags query. Others metadata (service, service_instance, endpoint) query is provided by PromQL Service.\nGET /loki/api/v1/labels    Parameter Definition Optional     start start timestamp in nanoseconds no   end end timestamp in nanoseconds no    For example:\n/loki/api/v1/labels?start=1690947455457000000\u0026amp;end=1690947671936000000 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;level\u0026#34; ] } List Label values Query log tag values of tag within a range of time.\nGET /loki/api/v1/label/\u0026lt;label_name\u0026gt;/values    Parameter Definition Optional     start start timestamp in nanoseconds no   end end timestamp in nanoseconds no    For example:\n/loki/api/v1/label/level/values?start=1690947455457000000\u0026amp;end=1690947671936000000 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;INFO\u0026#34;, \u0026#34;WARN\u0026#34;, \u0026#34;ERROR\u0026#34; ] } Range queries Query logs within a range of time with LogQL expression.\nGET /loki/api/v1/query_range    Parameter Definition Optional     query logql expression no   start start timestamp in nanoseconds no   end end timestamp in nanoseconds no   limit numbers of log line returned in a query no   direction log order,FORWARD or BACKWARD no    For example:\n/api/v1/query_range?query={service=\u0026#39;agent::songs\u0026#39;}\u0026amp;start=1690947455457000000\u0026amp;end=1690947671936000000\u0026amp;limit=100\u0026amp;direction=BACKWARD Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;streams\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;stream\u0026#34;: { \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;instance1\u0026#34;, \u0026#34;endpoint\u0026#34;: \u0026#34;xxx\u0026#34;, \u0026#34;trace_id\u0026#34;: \u0026#34;xxx\u0026#34; }, \u0026#34;values\u0026#34;: [ [ \u0026#34;1690947671936000000\u0026#34;, \u0026#34;foo\u0026#34; ], [ \u0026#34;1690947455457000000\u0026#34;, \u0026#34;bar\u0026#34; ] ] }, { \u0026#34;stream\u0026#34;: { \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;instance2\u0026#34;, \u0026#34;endpoint\u0026#34;: \u0026#34;xxx\u0026#34;, \u0026#34;trace_id\u0026#34;: \u0026#34;xxx\u0026#34; }, \u0026#34;values\u0026#34;: [ [ \u0026#34;1690947671936000000\u0026#34;, \u0026#34;foo\u0026#34; ], [ \u0026#34;1690947455457000000\u0026#34;, \u0026#34;bar\u0026#34; ] ] } ] } } ","excerpt":"LogQL Service LogQL (Log Query Language) is Grafana Loki’s PromQL-inspired query language. LogQL …","ref":"/docs/main/next/en/api/logql-service/","title":"LogQL Service"},{"body":"LogQL Service LogQL (Log Query Language) is Grafana Loki’s PromQL-inspired query language. LogQL Service exposes Loki Querying HTTP APIs including the bundled LogQL expression system. Third-party systems or visualization platforms that already support LogQL (such as Grafana), could obtain logs through LogQL Service.\nAs Skywalking log mechanism is different from Loki(metric extract, storage, etc.), the LogQL implemented by Skywalking won\u0026rsquo;t be a full features LogQL.\nDetails Of Supported LogQL The following doc describes the details of the supported protocol and compared it to the LogQL official documentation. If not mentioned, it will not be supported by default.\nLog queries The picture bellow is LogQL syntax in log queries: The expression supported by LogQL is composed of the following parts (expression with [✅] is implemented in SkyWalking):\n stream selector:The stream selector determines which log streams to include in a query’s results by labels. line filter: The line filter expression does a grep over the logs from the matching log streams. label filter: Label filter expression allows filtering log line using their original and extracted labels. parser: Parser expression can parse and extract labels from the log content. Those extracted labels can then be used by label filter expressions. line formate: The line format expression can rewrite the log line content by using the text/template format. labels formate: The label format expression can rename, modify or add labels. drop labels: The drop expression will drop the given labels in the pipeline.  The stream selector operator supported by LogQL is composed of the following (operator with [✅] is implemented in SkyWalking):\n =: exactly equal !=: not equal =~: regex matches !~: regex does not match  The filter operator supported by LogQL is composed of the following (operator with [✅] is implemented in SkyWalking):\n |=: Log line contains string !=: Log line does not contain string |~: Log line contains a match to the regular expression !~: Log line does not contain a match to the regular expression  Here are some typical expressions used in SkyWalking log query:\n# query service instance logs with specified traceId {service=\u0026quot;$service\u0026quot;, service_instance=\u0026quot;$service_instance\u0026quot;, trace_id=\u0026quot;$trace_id\u0026quot;} # query service instance logs contains keyword in content {service=\u0026quot;$service\u0026quot;, service_instance=\u0026quot;$service_instance\u0026quot;} |= \u0026quot;$keyword_contains\u0026quot; # query service instance logs not contains keyword in content {service=\u0026quot;$service\u0026quot;, service_instance=\u0026quot;$service_instance\u0026quot;} != \u0026quot;$keyword_not_contains\u0026quot; # query service instance logs contains A keyword but not contains B keyword in content {service=\u0026quot;$service\u0026quot;, service_instance=\u0026quot;$service_instance\u0026quot;} |= \u0026quot;$keyword_contains\u0026quot; != \u0026quot;$keyword_not_contains\u0026quot; Metric queries Metric queries is used to calculate metrics from logs in Loki. In SkyWalking, it is recommended to use LAL(Log Analysis Language). So metric queries LogQL won\u0026rsquo;t be supported in SkyWalking.\nDetails Of Supported Http Query API List Labels Query log tags within a range of time. It is different from Loki. In loki, this api query all labels used in stream selector, but in SkyWalking, this api only for log tags query. Others metadata (service, service_instance, endpoint) query is provided by PromQL Service.\nGET /loki/api/v1/labels    Parameter Definition Optional     start start timestamp in nanoseconds no   end end timestamp in nanoseconds no    For example:\n/loki/api/v1/labels?start=1690947455457000000\u0026amp;end=1690947671936000000 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;level\u0026#34; ] } List Label values Query log tag values of tag within a range of time.\nGET /loki/api/v1/label/\u0026lt;label_name\u0026gt;/values    Parameter Definition Optional     start start timestamp in nanoseconds no   end end timestamp in nanoseconds no    For example:\n/loki/api/v1/label/level/values?start=1690947455457000000\u0026amp;end=1690947671936000000 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;INFO\u0026#34;, \u0026#34;WARN\u0026#34;, \u0026#34;ERROR\u0026#34; ] } Range queries Query logs within a range of time with LogQL expression.\nGET /loki/api/v1/query_range    Parameter Definition Optional     query logql expression no   start start timestamp in nanoseconds no   end end timestamp in nanoseconds no   limit numbers of log line returned in a query no   direction log order,FORWARD or BACKWARD no    For example:\n/api/v1/query_range?query={service=\u0026#39;agent::songs\u0026#39;}\u0026amp;start=1690947455457000000\u0026amp;end=1690947671936000000\u0026amp;limit=100\u0026amp;direction=BACKWARD Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;streams\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;stream\u0026#34;: { \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;instance1\u0026#34;, \u0026#34;endpoint\u0026#34;: \u0026#34;xxx\u0026#34;, \u0026#34;trace_id\u0026#34;: \u0026#34;xxx\u0026#34; }, \u0026#34;values\u0026#34;: [ [ \u0026#34;1690947671936000000\u0026#34;, \u0026#34;foo\u0026#34; ], [ \u0026#34;1690947455457000000\u0026#34;, \u0026#34;bar\u0026#34; ] ] }, { \u0026#34;stream\u0026#34;: { \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;instance2\u0026#34;, \u0026#34;endpoint\u0026#34;: \u0026#34;xxx\u0026#34;, \u0026#34;trace_id\u0026#34;: \u0026#34;xxx\u0026#34; }, \u0026#34;values\u0026#34;: [ [ \u0026#34;1690947671936000000\u0026#34;, \u0026#34;foo\u0026#34; ], [ \u0026#34;1690947455457000000\u0026#34;, \u0026#34;bar\u0026#34; ] ] } ] } } ","excerpt":"LogQL Service LogQL (Log Query Language) is Grafana Loki’s PromQL-inspired query language. LogQL …","ref":"/docs/main/v9.6.0/en/api/logql-service/","title":"LogQL Service"},{"body":"LogQL Service LogQL (Log Query Language) is Grafana Loki’s PromQL-inspired query language. LogQL Service exposes Loki Querying HTTP APIs including the bundled LogQL expression system. Third-party systems or visualization platforms that already support LogQL (such as Grafana), could obtain logs through LogQL Service.\nAs Skywalking log mechanism is different from Loki(metric extract, storage, etc.), the LogQL implemented by Skywalking won\u0026rsquo;t be a full features LogQL.\nDetails Of Supported LogQL The following doc describes the details of the supported protocol and compared it to the LogQL official documentation. If not mentioned, it will not be supported by default.\nLog queries The picture bellow is LogQL syntax in log queries: The expression supported by LogQL is composed of the following parts (expression with [✅] is implemented in SkyWalking):\n stream selector:The stream selector determines which log streams to include in a query’s results by labels. line filter: The line filter expression does a grep over the logs from the matching log streams. label filter: Label filter expression allows filtering log line using their original and extracted labels. parser: Parser expression can parse and extract labels from the log content. Those extracted labels can then be used by label filter expressions. line formate: The line format expression can rewrite the log line content by using the text/template format. labels formate: The label format expression can rename, modify or add labels. drop labels: The drop expression will drop the given labels in the pipeline.  The stream selector operator supported by LogQL is composed of the following (operator with [✅] is implemented in SkyWalking):\n =: exactly equal !=: not equal =~: regex matches !~: regex does not match  The filter operator supported by LogQL is composed of the following (operator with [✅] is implemented in SkyWalking):\n |=: Log line contains string !=: Log line does not contain string |~: Log line contains a match to the regular expression !~: Log line does not contain a match to the regular expression  Here are some typical expressions used in SkyWalking log query:\n# query service instance logs with specified traceId {service=\u0026quot;$service\u0026quot;, service_instance=\u0026quot;$service_instance\u0026quot;, trace_id=\u0026quot;$trace_id\u0026quot;} # query service instance logs contains keyword in content {service=\u0026quot;$service\u0026quot;, service_instance=\u0026quot;$service_instance\u0026quot;} |= \u0026quot;$keyword_contains\u0026quot; # query service instance logs not contains keyword in content {service=\u0026quot;$service\u0026quot;, service_instance=\u0026quot;$service_instance\u0026quot;} != \u0026quot;$keyword_not_contains\u0026quot; # query service instance logs contains A keyword but not contains B keyword in content {service=\u0026quot;$service\u0026quot;, service_instance=\u0026quot;$service_instance\u0026quot;} |= \u0026quot;$keyword_contains\u0026quot; != \u0026quot;$keyword_not_contains\u0026quot; Metric queries Metric queries is used to calculate metrics from logs in Loki. In SkyWalking, it is recommended to use LAL(Log Analysis Language). So metric queries LogQL won\u0026rsquo;t be supported in SkyWalking.\nDetails Of Supported Http Query API List Labels Query log tags within a range of time. It is different from Loki. In loki, this api query all labels used in stream selector, but in SkyWalking, this api only for log tags query. Others metadata (service, service_instance, endpoint) query is provided by PromQL Service.\nGET /loki/api/v1/labels    Parameter Definition Optional     start start timestamp in nanoseconds no   end end timestamp in nanoseconds no    For example:\n/loki/api/v1/labels?start=1690947455457000000\u0026amp;end=1690947671936000000 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;level\u0026#34; ] } List Label values Query log tag values of tag within a range of time.\nGET /loki/api/v1/label/\u0026lt;label_name\u0026gt;/values    Parameter Definition Optional     start start timestamp in nanoseconds no   end end timestamp in nanoseconds no    For example:\n/loki/api/v1/label/level/values?start=1690947455457000000\u0026amp;end=1690947671936000000 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;INFO\u0026#34;, \u0026#34;WARN\u0026#34;, \u0026#34;ERROR\u0026#34; ] } Range queries Query logs within a range of time with LogQL expression.\nGET /loki/api/v1/query_range    Parameter Definition Optional     query logql expression no   start start timestamp in nanoseconds no   end end timestamp in nanoseconds no   limit numbers of log line returned in a query no   direction log order,FORWARD or BACKWARD no    For example:\n/api/v1/query_range?query={service=\u0026#39;agent::songs\u0026#39;}\u0026amp;start=1690947455457000000\u0026amp;end=1690947671936000000\u0026amp;limit=100\u0026amp;direction=BACKWARD Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;streams\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;stream\u0026#34;: { \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;instance1\u0026#34;, \u0026#34;endpoint\u0026#34;: \u0026#34;xxx\u0026#34;, \u0026#34;trace_id\u0026#34;: \u0026#34;xxx\u0026#34; }, \u0026#34;values\u0026#34;: [ [ \u0026#34;1690947671936000000\u0026#34;, \u0026#34;foo\u0026#34; ], [ \u0026#34;1690947455457000000\u0026#34;, \u0026#34;bar\u0026#34; ] ] }, { \u0026#34;stream\u0026#34;: { \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;instance2\u0026#34;, \u0026#34;endpoint\u0026#34;: \u0026#34;xxx\u0026#34;, \u0026#34;trace_id\u0026#34;: \u0026#34;xxx\u0026#34; }, \u0026#34;values\u0026#34;: [ [ \u0026#34;1690947671936000000\u0026#34;, \u0026#34;foo\u0026#34; ], [ \u0026#34;1690947455457000000\u0026#34;, \u0026#34;bar\u0026#34; ] ] } ] } } ","excerpt":"LogQL Service LogQL (Log Query Language) is Grafana Loki’s PromQL-inspired query language. LogQL …","ref":"/docs/main/v9.7.0/en/api/logql-service/","title":"LogQL Service"},{"body":"Manual instrument SDK Our incredible community has contributed to the manual instrument SDK.\n Rust. Rust SDK follows the SkyWalking format. C++. C++ SDK follows the SkyWalking format.  Below is the archived list.\n Go2Sky. Since Jun 14, 2023.  What are the SkyWalking format and the propagation protocols?  Tracing APIs Meter APIs Logging APIs  Envoy tracer Envoy has its internal tracer implementation for SkyWalking. Read SkyWalking Tracer doc and SkyWalking tracing sandbox for more details.\n","excerpt":"Manual instrument SDK Our incredible community has contributed to the manual instrument SDK.\n Rust. …","ref":"/docs/main/latest/en/concepts-and-designs/manual-sdk/","title":"Manual instrument SDK"},{"body":"Manual instrument SDK Our incredible community has contributed to the manual instrument SDK.\n Rust. Rust SDK follows the SkyWalking format. C++. C++ SDK follows the SkyWalking format.  Below is the archived list.\n Go2Sky. Since Jun 14, 2023.  What are the SkyWalking format and the propagation protocols?  Tracing APIs Meter APIs Logging APIs  Envoy tracer Envoy has its internal tracer implementation for SkyWalking. Read SkyWalking Tracer doc and SkyWalking tracing sandbox for more details.\n","excerpt":"Manual instrument SDK Our incredible community has contributed to the manual instrument SDK.\n Rust. …","ref":"/docs/main/next/en/concepts-and-designs/manual-sdk/","title":"Manual instrument SDK"},{"body":"Manual instrument SDK Our incredible community has contributed to the manual instrument SDK.\n Go2Sky. Go SDK follows the SkyWalking format. C++. C++ SDK follows the SkyWalking format.  What are the SkyWalking format and the propagation protocols? See these protocols in protocols document.\nEnvoy tracer Envoy has its internal tracer implementation for SkyWalking. Read SkyWalking Tracer doc and SkyWalking tracing sandbox for more details.\n","excerpt":"Manual instrument SDK Our incredible community has contributed to the manual instrument SDK. …","ref":"/docs/main/v9.0.0/en/concepts-and-designs/manual-sdk/","title":"Manual instrument SDK"},{"body":"Manual instrument SDK Our incredible community has contributed to the manual instrument SDK.\n Go2Sky. Go SDK follows the SkyWalking format. C++. C++ SDK follows the SkyWalking format.  What are the SkyWalking format and the propagation protocols? See these protocols in protocols document.\nEnvoy tracer Envoy has its internal tracer implementation for SkyWalking. Read SkyWalking Tracer doc and SkyWalking tracing sandbox for more details.\n","excerpt":"Manual instrument SDK Our incredible community has contributed to the manual instrument SDK. …","ref":"/docs/main/v9.1.0/en/concepts-and-designs/manual-sdk/","title":"Manual instrument SDK"},{"body":"Manual instrument SDK Our incredible community has contributed to the manual instrument SDK.\n Go2Sky. Go SDK follows the SkyWalking format. C++. C++ SDK follows the SkyWalking format.  What are the SkyWalking format and the propagation protocols? See these protocols in protocols document.\nEnvoy tracer Envoy has its internal tracer implementation for SkyWalking. Read SkyWalking Tracer doc and SkyWalking tracing sandbox for more details.\n","excerpt":"Manual instrument SDK Our incredible community has contributed to the manual instrument SDK. …","ref":"/docs/main/v9.2.0/en/concepts-and-designs/manual-sdk/","title":"Manual instrument SDK"},{"body":"Manual instrument SDK Our incredible community has contributed to the manual instrument SDK.\n Rust. Rust SDK follows the SkyWalking format. Go2Sky. Go SDK follows the SkyWalking format. C++. C++ SDK follows the SkyWalking format.  What are the SkyWalking format and the propagation protocols? See these protocols in protocols document.\nEnvoy tracer Envoy has its internal tracer implementation for SkyWalking. Read SkyWalking Tracer doc and SkyWalking tracing sandbox for more details.\n","excerpt":"Manual instrument SDK Our incredible community has contributed to the manual instrument SDK.\n Rust. …","ref":"/docs/main/v9.3.0/en/concepts-and-designs/manual-sdk/","title":"Manual instrument SDK"},{"body":"Manual instrument SDK Our incredible community has contributed to the manual instrument SDK.\n Rust. Rust SDK follows the SkyWalking format. Go2Sky. Go SDK follows the SkyWalking format. C++. C++ SDK follows the SkyWalking format.  What are the SkyWalking format and the propagation protocols?  Tracing APIs Meter APIs Logging APIs  Envoy tracer Envoy has its internal tracer implementation for SkyWalking. Read SkyWalking Tracer doc and SkyWalking tracing sandbox for more details.\n","excerpt":"Manual instrument SDK Our incredible community has contributed to the manual instrument SDK.\n Rust. …","ref":"/docs/main/v9.4.0/en/concepts-and-designs/manual-sdk/","title":"Manual instrument SDK"},{"body":"Manual instrument SDK Our incredible community has contributed to the manual instrument SDK.\n Rust. Rust SDK follows the SkyWalking format. Go2Sky. Go SDK follows the SkyWalking format. C++. C++ SDK follows the SkyWalking format.  What are the SkyWalking format and the propagation protocols?  Tracing APIs Meter APIs Logging APIs  Envoy tracer Envoy has its internal tracer implementation for SkyWalking. Read SkyWalking Tracer doc and SkyWalking tracing sandbox for more details.\n","excerpt":"Manual instrument SDK Our incredible community has contributed to the manual instrument SDK.\n Rust. …","ref":"/docs/main/v9.5.0/en/concepts-and-designs/manual-sdk/","title":"Manual instrument SDK"},{"body":"Manual instrument SDK Our incredible community has contributed to the manual instrument SDK.\n Rust. Rust SDK follows the SkyWalking format. Go2Sky. Go SDK follows the SkyWalking format. C++. C++ SDK follows the SkyWalking format.  What are the SkyWalking format and the propagation protocols?  Tracing APIs Meter APIs Logging APIs  Envoy tracer Envoy has its internal tracer implementation for SkyWalking. Read SkyWalking Tracer doc and SkyWalking tracing sandbox for more details.\n","excerpt":"Manual instrument SDK Our incredible community has contributed to the manual instrument SDK.\n Rust. …","ref":"/docs/main/v9.6.0/en/concepts-and-designs/manual-sdk/","title":"Manual instrument SDK"},{"body":"Manual instrument SDK Our incredible community has contributed to the manual instrument SDK.\n Rust. Rust SDK follows the SkyWalking format. C++. C++ SDK follows the SkyWalking format.  Below is the archived list.\n Go2Sky. Since Jun 14, 2023.  What are the SkyWalking format and the propagation protocols?  Tracing APIs Meter APIs Logging APIs  Envoy tracer Envoy has its internal tracer implementation for SkyWalking. Read SkyWalking Tracer doc and SkyWalking tracing sandbox for more details.\n","excerpt":"Manual instrument SDK Our incredible community has contributed to the manual instrument SDK.\n Rust. …","ref":"/docs/main/v9.7.0/en/concepts-and-designs/manual-sdk/","title":"Manual instrument SDK"},{"body":"Message Queue performance and consuming latency monitoring Message Queue server plays an essential role in today\u0026rsquo;s distributed system to reduce the length and latency of blocking RPC and eventually improve user experience. But in this async way, the measure for queue consuming traffic and latency becomes significant.\nSince 8.9.0, SkyWalking leverages native tracing agent and Extension Header Item of SkyWalking Cross Process Propagation Headers Protocol v3 To provide performance monitoring for the Message Queue systems.\nIn default, we provide Message Queue Consuming Count and Message Queue Avg Consuming Latency metrics for service and endpoint levels.\nMore metrics could be added through core.oal.\n","excerpt":"Message Queue performance and consuming latency monitoring Message Queue server plays an essential …","ref":"/docs/main/latest/en/setup/backend/mq/","title":"Message Queue performance and consuming latency monitoring"},{"body":"Message Queue performance and consuming latency monitoring Message Queue server plays an essential role in today\u0026rsquo;s distributed system to reduce the length and latency of blocking RPC and eventually improve user experience. But in this async way, the measure for queue consuming traffic and latency becomes significant.\nSince 8.9.0, SkyWalking leverages native tracing agent and Extension Header Item of SkyWalking Cross Process Propagation Headers Protocol v3 To provide performance monitoring for the Message Queue systems.\nIn default, we provide Message Queue Consuming Count and Message Queue Avg Consuming Latency metrics for service and endpoint levels.\nMore metrics could be added through core.oal.\n","excerpt":"Message Queue performance and consuming latency monitoring Message Queue server plays an essential …","ref":"/docs/main/next/en/setup/backend/mq/","title":"Message Queue performance and consuming latency monitoring"},{"body":"Message Queue performance and consuming latency monitoring Message Queue server plays an important role in today\u0026rsquo;s distributed system, in order to reduce the length and latency of blocking RPC, and eventually improve user experience. But in this async way, the measure for queue consuming traffic and latency becomes significant.\nSince 8.9.0, SkyWalking leverages native tracing agent and Extension Header Item of SkyWalking Cross Process Propagation Headers Protocol v3 , to provide performance monitoring for Message Queue system.\nIn default, we provide Message Queue Consuming Count and Message Queue Avg Consuming Latency metrics for service and endpoint levels.\nMore metrics could be added through core.oal.\n","excerpt":"Message Queue performance and consuming latency monitoring Message Queue server plays an important …","ref":"/docs/main/v9.0.0/en/setup/backend/mq/","title":"Message Queue performance and consuming latency monitoring"},{"body":"Message Queue performance and consuming latency monitoring Message Queue server plays an essential role in today\u0026rsquo;s distributed system to reduce the length and latency of blocking RPC and eventually improve user experience. But in this async way, the measure for queue consuming traffic and latency becomes significant.\nSince 8.9.0, SkyWalking leverages native tracing agent and Extension Header Item of SkyWalking Cross Process Propagation Headers Protocol v3 To provide performance monitoring for the Message Queue systems.\nIn default, we provide Message Queue Consuming Count and Message Queue Avg Consuming Latency metrics for service and endpoint levels.\nMore metrics could be added through core.oal.\n","excerpt":"Message Queue performance and consuming latency monitoring Message Queue server plays an essential …","ref":"/docs/main/v9.1.0/en/setup/backend/mq/","title":"Message Queue performance and consuming latency monitoring"},{"body":"Message Queue performance and consuming latency monitoring Message Queue server plays an essential role in today\u0026rsquo;s distributed system to reduce the length and latency of blocking RPC and eventually improve user experience. But in this async way, the measure for queue consuming traffic and latency becomes significant.\nSince 8.9.0, SkyWalking leverages native tracing agent and Extension Header Item of SkyWalking Cross Process Propagation Headers Protocol v3 To provide performance monitoring for the Message Queue systems.\nIn default, we provide Message Queue Consuming Count and Message Queue Avg Consuming Latency metrics for service and endpoint levels.\nMore metrics could be added through core.oal.\n","excerpt":"Message Queue performance and consuming latency monitoring Message Queue server plays an essential …","ref":"/docs/main/v9.2.0/en/setup/backend/mq/","title":"Message Queue performance and consuming latency monitoring"},{"body":"Message Queue performance and consuming latency monitoring Message Queue server plays an essential role in today\u0026rsquo;s distributed system to reduce the length and latency of blocking RPC and eventually improve user experience. But in this async way, the measure for queue consuming traffic and latency becomes significant.\nSince 8.9.0, SkyWalking leverages native tracing agent and Extension Header Item of SkyWalking Cross Process Propagation Headers Protocol v3 To provide performance monitoring for the Message Queue systems.\nIn default, we provide Message Queue Consuming Count and Message Queue Avg Consuming Latency metrics for service and endpoint levels.\nMore metrics could be added through core.oal.\n","excerpt":"Message Queue performance and consuming latency monitoring Message Queue server plays an essential …","ref":"/docs/main/v9.3.0/en/setup/backend/mq/","title":"Message Queue performance and consuming latency monitoring"},{"body":"Message Queue performance and consuming latency monitoring Message Queue server plays an essential role in today\u0026rsquo;s distributed system to reduce the length and latency of blocking RPC and eventually improve user experience. But in this async way, the measure for queue consuming traffic and latency becomes significant.\nSince 8.9.0, SkyWalking leverages native tracing agent and Extension Header Item of SkyWalking Cross Process Propagation Headers Protocol v3 To provide performance monitoring for the Message Queue systems.\nIn default, we provide Message Queue Consuming Count and Message Queue Avg Consuming Latency metrics for service and endpoint levels.\nMore metrics could be added through core.oal.\n","excerpt":"Message Queue performance and consuming latency monitoring Message Queue server plays an essential …","ref":"/docs/main/v9.4.0/en/setup/backend/mq/","title":"Message Queue performance and consuming latency monitoring"},{"body":"Message Queue performance and consuming latency monitoring Message Queue server plays an essential role in today\u0026rsquo;s distributed system to reduce the length and latency of blocking RPC and eventually improve user experience. But in this async way, the measure for queue consuming traffic and latency becomes significant.\nSince 8.9.0, SkyWalking leverages native tracing agent and Extension Header Item of SkyWalking Cross Process Propagation Headers Protocol v3 To provide performance monitoring for the Message Queue systems.\nIn default, we provide Message Queue Consuming Count and Message Queue Avg Consuming Latency metrics for service and endpoint levels.\nMore metrics could be added through core.oal.\n","excerpt":"Message Queue performance and consuming latency monitoring Message Queue server plays an essential …","ref":"/docs/main/v9.5.0/en/setup/backend/mq/","title":"Message Queue performance and consuming latency monitoring"},{"body":"Message Queue performance and consuming latency monitoring Message Queue server plays an essential role in today\u0026rsquo;s distributed system to reduce the length and latency of blocking RPC and eventually improve user experience. But in this async way, the measure for queue consuming traffic and latency becomes significant.\nSince 8.9.0, SkyWalking leverages native tracing agent and Extension Header Item of SkyWalking Cross Process Propagation Headers Protocol v3 To provide performance monitoring for the Message Queue systems.\nIn default, we provide Message Queue Consuming Count and Message Queue Avg Consuming Latency metrics for service and endpoint levels.\nMore metrics could be added through core.oal.\n","excerpt":"Message Queue performance and consuming latency monitoring Message Queue server plays an essential …","ref":"/docs/main/v9.6.0/en/setup/backend/mq/","title":"Message Queue performance and consuming latency monitoring"},{"body":"Message Queue performance and consuming latency monitoring Message Queue server plays an essential role in today\u0026rsquo;s distributed system to reduce the length and latency of blocking RPC and eventually improve user experience. But in this async way, the measure for queue consuming traffic and latency becomes significant.\nSince 8.9.0, SkyWalking leverages native tracing agent and Extension Header Item of SkyWalking Cross Process Propagation Headers Protocol v3 To provide performance monitoring for the Message Queue systems.\nIn default, we provide Message Queue Consuming Count and Message Queue Avg Consuming Latency metrics for service and endpoint levels.\nMore metrics could be added through core.oal.\n","excerpt":"Message Queue performance and consuming latency monitoring Message Queue server plays an essential …","ref":"/docs/main/v9.7.0/en/setup/backend/mq/","title":"Message Queue performance and consuming latency monitoring"},{"body":"Meter Analysis Language The meter system provides a functional analysis language called MAL (Meter Analysis Language) that lets users analyze and aggregate meter data in the OAP streaming system. The result of an expression can either be ingested by the agent analyzer, or the OpenTelemetry/Prometheus analyzer.\nLanguage data type In MAL, an expression or sub-expression can evaluate to one of the following two types:\n Sample family: A set of samples (metrics) containing a range of metrics whose names are identical. Scalar: A simple numeric value that supports integer/long and floating/double.  Sample family A set of samples, which acts as the basic unit in MAL. For example:\ninstance_trace_count The sample family above may contain the following samples which are provided by external modules, such as the agent analyzer:\ninstance_trace_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 100 instance_trace_count{region=\u0026quot;us-east\u0026quot;,az=\u0026quot;az-3\u0026quot;} 20 instance_trace_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 33 Tag filter MAL supports four type operations to filter samples in a sample family by tag:\n tagEqual: Filter tags exactly equal to the string provided. tagNotEqual: Filter tags not equal to the string provided. tagMatch: Filter tags that regex-match the string provided. tagNotMatch: Filter labels that do not regex-match the string provided.  For example, this filters all instance_trace_count samples for us-west and asia-north region and az-1 az:\ninstance_trace_count.tagMatch(\u0026quot;region\u0026quot;, \u0026quot;us-west|asia-north\u0026quot;).tagEqual(\u0026quot;az\u0026quot;, \u0026quot;az-1\u0026quot;) Value filter MAL supports six type operations to filter samples in a sample family by value:\n valueEqual: Filter values exactly equal to the value provided. valueNotEqual: Filter values equal to the value provided. valueGreater: Filter values greater than the value provided. valueGreaterEqual: Filter values greater than or equal to the value provided. valueLess: Filter values less than the value provided. valueLessEqual: Filter values less than or equal to the value provided.  For example, this filters all instance_trace_count samples for values \u0026gt;= 33:\ninstance_trace_count.valueGreaterEqual(33) Tag manipulator MAL allows tag manipulators to change (i.e. add/delete/update) tags and their values.\nK8s MAL supports using the metadata of K8s to manipulate the tags and their values. This feature requires authorizing the OAP Server to access K8s\u0026rsquo;s API Server.\nretagByK8sMeta retagByK8sMeta(newLabelName, K8sRetagType, existingLabelName, namespaceLabelName). Add a new tag to the sample family based on the value of an existing label. Provide several internal converting types, including\n K8sRetagType.Pod2Service  Add a tag to the sample using service as the key, $serviceName.$namespace as the value, and according to the given value of the tag key, which represents the name of a pod.\nFor example:\ncontainer_cpu_usage_seconds_total{namespace=default, container=my-nginx, cpu=total, pod=my-nginx-5dc4865748-mbczh} 2 Expression:\ncontainer_cpu_usage_seconds_total.retagByK8sMeta('service' , K8sRetagType.Pod2Service , 'pod' , 'namespace') Output:\ncontainer_cpu_usage_seconds_total{namespace=default, container=my-nginx, cpu=total, pod=my-nginx-5dc4865748-mbczh, service='nginx-service.default'} 2 Binary operators The following binary arithmetic operators are available in MAL:\n + (addition) - (subtraction) * (multiplication) / (division)  Binary operators are defined between scalar/scalar, sampleFamily/scalar and sampleFamily/sampleFamily value pairs.\nBetween two scalars: they evaluate to another scalar that is the result of the operator being applied to both scalar operands:\n1 + 2 Between a sample family and a scalar, the operator is applied to the value of every sample in the sample family. For example:\ninstance_trace_count + 2 or\n2 + instance_trace_count results in\ninstance_trace_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 102 // 100 + 2 instance_trace_count{region=\u0026quot;us-east\u0026quot;,az=\u0026quot;az-3\u0026quot;} 22 // 20 + 2 instance_trace_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 35 // 33 + 2 Between two sample families, a binary operator is applied to each sample in the sample family on the left and its matching sample in the sample family on the right. A new sample family with empty name will be generated. Only the matched tags will be reserved. Samples with no matching samples in the sample family on the right will not be found in the result.\nAnother sample family instance_trace_analysis_error_count is\ninstance_trace_analysis_error_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 20 instance_trace_analysis_error_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 11 Example expression:\ninstance_trace_analysis_error_count / instance_trace_count This returns a resulting sample family containing the error rate of trace analysis. Samples with region us-west and az az-3 have no match and will not show up in the result:\n{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 0.2 // 20 / 100 {region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 0.3333 // 11 / 33 Aggregation Operation Sample family supports the following aggregation operations that can be used to aggregate the samples of a single sample family, resulting in a new sample family having fewer samples (sometimes having just a single sample) with aggregated values:\n sum (calculate sum over dimensions) min (select minimum over dimensions) max (select maximum over dimensions) avg (calculate the average over dimensions)  These operations can be used to aggregate overall label dimensions or preserve distinct dimensions by inputting by parameter( the keyword by could be omitted)\n\u0026lt;aggr-op\u0026gt;(by=[\u0026lt;tag1\u0026gt;, \u0026lt;tag2\u0026gt;, ...]) Example expression:\ninstance_trace_count.sum(by=['az']) will output the following result:\ninstance_trace_count{az=\u0026quot;az-1\u0026quot;} 133 // 100 + 33 instance_trace_count{az=\u0026quot;az-3\u0026quot;} 20 Function Duration is a textual representation of a time range. The formats accepted are based on the ISO-8601 duration format {@code PnDTnHnMn.nS} where a day is regarded as exactly 24 hours.\nExamples:\n \u0026ldquo;PT20.345S\u0026rdquo; \u0026ndash; parses as \u0026ldquo;20.345 seconds\u0026rdquo; \u0026ldquo;PT15M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;15 minutes\u0026rdquo; (where a minute is 60 seconds) \u0026ldquo;PT10H\u0026rdquo; \u0026ndash; parses as \u0026ldquo;10 hours\u0026rdquo; (where an hour is 3600 seconds) \u0026ldquo;P2D\u0026rdquo; \u0026ndash; parses as \u0026ldquo;2 days\u0026rdquo; (where a day is 24 hours or 86400 seconds) \u0026ldquo;P2DT3H4M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;2 days, 3 hours and 4 minutes\u0026rdquo; \u0026ldquo;P-6H3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;-6 hours and +3 minutes\u0026rdquo; \u0026ldquo;-P6H3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;-6 hours and -3 minutes\u0026rdquo; \u0026ldquo;-P-6H+3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;+6 hours and -3 minutes\u0026rdquo;  increase increase(Duration): Calculates the increase in the time range.\nrate rate(Duration): Calculates the per-second average rate of increase in the time range.\nirate irate(): Calculates the per-second instant rate of increase in the time range.\ntag tag({allTags -\u0026gt; }): Updates tags of samples. User can add, drop, rename and update tags.\nhistogram histogram(le: '\u0026lt;the tag name of le\u0026gt;'): Transforms less-based histogram buckets to meter system histogram buckets. le parameter represents the tag name of the bucket.\nhistogram_percentile histogram_percentile([\u0026lt;p scalar\u0026gt;]): Represents the meter-system to calculate the p-percentile (0 ≤ p ≤ 100) from the buckets.\ntime time(): Returns the number of seconds since January 1, 1970 UTC.\nforeach forEach([string_array], Closure\u0026lt;Void\u0026gt; each): Iterates all samples according to the first array argument, and provide two parameters in the second closure argument:\n element: element in the array. tags: tags in each sample.  Down Sampling Operation MAL should instruct meter-system on how to downsample for metrics. It doesn\u0026rsquo;t only refer to aggregate raw samples to minute level, but also expresses data from minute in higher levels, such as hour and day.\nDown sampling function is called downsampling in MAL, and it accepts the following types:\n AVG SUM LATEST SUM_PER_MIN MIN (TODO) MAX (TODO) MEAN (TODO) COUNT (TODO)  The default type is AVG.\nIf users want to get the latest time from last_server_state_sync_time_in_seconds:\nlast_server_state_sync_time_in_seconds.tagEqual('production', 'catalog').downsampling(LATEST) Metric level function They extract level relevant labels from metric labels, then informs the meter-system the level and layer to which this metric belongs.\n service([svc_label1, svc_label2...], Layer) extracts service level labels from the array argument, extracts layer from Layer argument. instance([svc_label1, svc_label2...], [ins_label1, ins_label2...], Layer, Closure\u0026lt;Map\u0026lt;String, String\u0026gt;\u0026gt; propertiesExtractor) extracts service level labels from the first array argument, extracts instance level labels from the second array argument, extracts layer from Layer argument, propertiesExtractor is an optional closure that extracts instance properties from tags, e.g. { tags -\u0026gt; ['pod': tags.pod, 'namespace': tags.namespace] }. endpoint([svc_label1, svc_label2...], [ep_label1, ep_label2...]) extracts service level labels from the first array argument, extracts endpoint level labels from the second array argument, extracts layer from Layer argument. process([svc_label1, svc_label2...], [ins_label1, ins_label2...], [ps_label1, ps_label2...], layer_lable) extracts service level labels from the first array argument, extracts instance level labels from the second array argument, extracts process level labels from the third array argument, extracts layer label from fourse argument. serviceRelation(DetectPoint, [source_svc_label1...], [dest_svc_label1...], Layer) DetectPoint including DetectPoint.CLIENT and DetectPoint.SERVER, extracts sourceService labels from the first array argument, extracts destService labels from the second array argument, extracts layer from Layer argument. processRelation(detect_point_label, [service_label1...], [instance_label1...], source_process_id_label, dest_process_id_label, component_label) extracts DetectPoint labels from first argument, the label value should be client or server. extracts Service labels from the first array argument, extracts Instance labels from the second array argument, extracts ProcessID labels from the fourth and fifth arguments of the source and destination.  Configuration file The OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP fails to start up. The files are located at $CLASSPATH/otel-rules, $CLASSPATH/meter-analyzer-config, $CLASSPATH/envoy-metrics-rules and $CLASSPATH/zabbix-rules.\nThe file is written in YAML format, defined by the scheme described below. Brackets indicate that a parameter is optional.\nA full example can be found here\nGeneric placeholders are defined as follows:\n \u0026lt;string\u0026gt;: A regular string. \u0026lt;closure\u0026gt;: A closure with custom logic.  # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# filter the metrics, only those metrics that satisfy this condition will be passed into the `metricsRules` below.filter: \u0026lt;closure\u0026gt; # example:\u0026#39;{ tags -\u0026gt; tags.job_name == \u0026#34;vm-monitoring\u0026#34; }\u0026#39;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# Metrics rule allow you to recompute queries.metricsRules:[- \u0026lt;metric_rules\u0026gt; ]\u0026lt;metric_rules\u0026gt; # The name of rule, which combinates with a prefix \u0026#39;meter_\u0026#39; as the index/table name in storage.name:\u0026lt;string\u0026gt;# MAL expression.exp:\u0026lt;string\u0026gt;More Examples Please refer to OAP Self-Observability.\n","excerpt":"Meter Analysis Language The meter system provides a functional analysis language called MAL (Meter …","ref":"/docs/main/latest/en/concepts-and-designs/mal/","title":"Meter Analysis Language"},{"body":"Meter Analysis Language The meter system provides a functional analysis language called MAL (Meter Analysis Language) that lets users analyze and aggregate meter data in the OAP streaming system. The result of an expression can either be ingested by the agent analyzer, or the OpenTelemetry/Prometheus analyzer.\nLanguage data type In MAL, an expression or sub-expression can evaluate to one of the following two types:\n Sample family: A set of samples (metrics) containing a range of metrics whose names are identical. Scalar: A simple numeric value that supports integer/long and floating/double.  Sample family A set of samples, which acts as the basic unit in MAL. For example:\ninstance_trace_count The sample family above may contain the following samples which are provided by external modules, such as the agent analyzer:\ninstance_trace_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 100 instance_trace_count{region=\u0026quot;us-east\u0026quot;,az=\u0026quot;az-3\u0026quot;} 20 instance_trace_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 33 Tag filter MAL supports four type operations to filter samples in a sample family by tag:\n tagEqual: Filter tags exactly equal to the string provided. tagNotEqual: Filter tags not equal to the string provided. tagMatch: Filter tags that regex-match the string provided. tagNotMatch: Filter labels that do not regex-match the string provided.  For example, this filters all instance_trace_count samples for us-west and asia-north region and az-1 az:\ninstance_trace_count.tagMatch(\u0026quot;region\u0026quot;, \u0026quot;us-west|asia-north\u0026quot;).tagEqual(\u0026quot;az\u0026quot;, \u0026quot;az-1\u0026quot;) Value filter MAL supports six type operations to filter samples in a sample family by value:\n valueEqual: Filter values exactly equal to the value provided. valueNotEqual: Filter values equal to the value provided. valueGreater: Filter values greater than the value provided. valueGreaterEqual: Filter values greater than or equal to the value provided. valueLess: Filter values less than the value provided. valueLessEqual: Filter values less than or equal to the value provided.  For example, this filters all instance_trace_count samples for values \u0026gt;= 33:\ninstance_trace_count.valueGreaterEqual(33) Tag manipulator MAL allows tag manipulators to change (i.e. add/delete/update) tags and their values.\nK8s MAL supports using the metadata of K8s to manipulate the tags and their values. This feature requires authorizing the OAP Server to access K8s\u0026rsquo;s API Server.\nretagByK8sMeta retagByK8sMeta(newLabelName, K8sRetagType, existingLabelName, namespaceLabelName). Add a new tag to the sample family based on the value of an existing label. Provide several internal converting types, including\n K8sRetagType.Pod2Service  Add a tag to the sample using service as the key, $serviceName.$namespace as the value, and according to the given value of the tag key, which represents the name of a pod.\nFor example:\ncontainer_cpu_usage_seconds_total{namespace=default, container=my-nginx, cpu=total, pod=my-nginx-5dc4865748-mbczh} 2 Expression:\ncontainer_cpu_usage_seconds_total.retagByK8sMeta('service' , K8sRetagType.Pod2Service , 'pod' , 'namespace') Output:\ncontainer_cpu_usage_seconds_total{namespace=default, container=my-nginx, cpu=total, pod=my-nginx-5dc4865748-mbczh, service='nginx-service.default'} 2 Binary operators The following binary arithmetic operators are available in MAL:\n + (addition) - (subtraction) * (multiplication) / (division)  Binary operators are defined between scalar/scalar, sampleFamily/scalar and sampleFamily/sampleFamily value pairs.\nBetween two scalars: they evaluate to another scalar that is the result of the operator being applied to both scalar operands:\n1 + 2 Between a sample family and a scalar, the operator is applied to the value of every sample in the sample family. For example:\ninstance_trace_count + 2 or\n2 + instance_trace_count results in\ninstance_trace_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 102 // 100 + 2 instance_trace_count{region=\u0026quot;us-east\u0026quot;,az=\u0026quot;az-3\u0026quot;} 22 // 20 + 2 instance_trace_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 35 // 33 + 2 Between two sample families, a binary operator is applied to each sample in the sample family on the left and its matching sample in the sample family on the right. A new sample family with empty name will be generated. Only the matched tags will be reserved. Samples with no matching samples in the sample family on the right will not be found in the result.\nAnother sample family instance_trace_analysis_error_count is\ninstance_trace_analysis_error_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 20 instance_trace_analysis_error_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 11 Example expression:\ninstance_trace_analysis_error_count / instance_trace_count This returns a resulting sample family containing the error rate of trace analysis. Samples with region us-west and az az-3 have no match and will not show up in the result:\n{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 0.2 // 20 / 100 {region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 0.3333 // 11 / 33 Aggregation Operation Sample family supports the following aggregation operations that can be used to aggregate the samples of a single sample family, resulting in a new sample family having fewer samples (sometimes having just a single sample) with aggregated values:\n sum (calculate sum over dimensions) min (select minimum over dimensions) max (select maximum over dimensions) avg (calculate the average over dimensions) count (calculate the count over dimensions, the last tag will be counted)  These operations can be used to aggregate overall label dimensions or preserve distinct dimensions by inputting by parameter( the keyword by could be omitted)\n\u0026lt;aggr-op\u0026gt;(by=[\u0026lt;tag1\u0026gt;, \u0026lt;tag2\u0026gt;, ...]) Example expression:\ninstance_trace_count.sum(by=['az']) will output the following result:\ninstance_trace_count{az=\u0026quot;az-1\u0026quot;} 133 // 100 + 33 instance_trace_count{az=\u0026quot;az-3\u0026quot;} 20  Note, aggregation operations affect the samples from one bulk only. If the metrics are reported parallel from multiple instances/nodes through different SampleFamily, this aggregation would NOT work.\nIn the best practice for this scenario, build the metric with labels that represent each instance/node. Then use the AggregateLabels Operation in MQE to aggregate the metrics.\n Function Duration is a textual representation of a time range. The formats accepted are based on the ISO-8601 duration format {@code PnDTnHnMn.nS} where a day is regarded as exactly 24 hours.\nExamples:\n \u0026ldquo;PT20.345S\u0026rdquo; \u0026ndash; parses as \u0026ldquo;20.345 seconds\u0026rdquo; \u0026ldquo;PT15M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;15 minutes\u0026rdquo; (where a minute is 60 seconds) \u0026ldquo;PT10H\u0026rdquo; \u0026ndash; parses as \u0026ldquo;10 hours\u0026rdquo; (where an hour is 3600 seconds) \u0026ldquo;P2D\u0026rdquo; \u0026ndash; parses as \u0026ldquo;2 days\u0026rdquo; (where a day is 24 hours or 86400 seconds) \u0026ldquo;P2DT3H4M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;2 days, 3 hours and 4 minutes\u0026rdquo; \u0026ldquo;P-6H3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;-6 hours and +3 minutes\u0026rdquo; \u0026ldquo;-P6H3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;-6 hours and -3 minutes\u0026rdquo; \u0026ldquo;-P-6H+3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;+6 hours and -3 minutes\u0026rdquo;  increase increase(Duration): Calculates the increase in the time range.\nrate rate(Duration): Calculates the per-second average rate of increase in the time range.\nirate irate(): Calculates the per-second instant rate of increase in the time range.\ntag tag({allTags -\u0026gt; }): Updates tags of samples. User can add, drop, rename and update tags.\nhistogram histogram(le: '\u0026lt;the tag name of le\u0026gt;'): Transforms less-based histogram buckets to meter system histogram buckets. le parameter represents the tag name of the bucket.\nhistogram_percentile histogram_percentile([\u0026lt;p scalar\u0026gt;]): Represents the meter-system to calculate the p-percentile (0 ≤ p ≤ 100) from the buckets.\ntime time(): Returns the number of seconds since January 1, 1970 UTC.\nforeach forEach([string_array], Closure\u0026lt;Void\u0026gt; each): Iterates all samples according to the first array argument, and provide two parameters in the second closure argument:\n element: element in the array. tags: tags in each sample.  Down Sampling Operation MAL should instruct meter-system on how to downsample for metrics. It doesn\u0026rsquo;t only refer to aggregate raw samples to minute level, but also expresses data from minute in higher levels, such as hour and day.\nDown sampling function is called downsampling in MAL, and it accepts the following types:\n AVG SUM LATEST SUM_PER_MIN MIN MAX MEAN (TODO) COUNT (TODO)  The default type is AVG.\nIf users want to get the latest time from last_server_state_sync_time_in_seconds:\nlast_server_state_sync_time_in_seconds.tagEqual('production', 'catalog').downsampling(LATEST) Metric level function They extract level relevant labels from metric labels, then informs the meter-system the level and layer to which this metric belongs.\n service([svc_label1, svc_label2...], Layer) extracts service level labels from the array argument, extracts layer from Layer argument. instance([svc_label1, svc_label2...], [ins_label1, ins_label2...], Layer, Closure\u0026lt;Map\u0026lt;String, String\u0026gt;\u0026gt; propertiesExtractor) extracts service level labels from the first array argument, extracts instance level labels from the second array argument, extracts layer from Layer argument, propertiesExtractor is an optional closure that extracts instance properties from tags, e.g. { tags -\u0026gt; ['pod': tags.pod, 'namespace': tags.namespace] }. endpoint([svc_label1, svc_label2...], [ep_label1, ep_label2...]) extracts service level labels from the first array argument, extracts endpoint level labels from the second array argument, extracts layer from Layer argument. process([svc_label1, svc_label2...], [ins_label1, ins_label2...], [ps_label1, ps_label2...], layer_lable) extracts service level labels from the first array argument, extracts instance level labels from the second array argument, extracts process level labels from the third array argument, extracts layer label from fourse argument. serviceRelation(DetectPoint, [source_svc_label1...], [dest_svc_label1...], Layer) DetectPoint including DetectPoint.CLIENT and DetectPoint.SERVER, extracts sourceService labels from the first array argument, extracts destService labels from the second array argument, extracts layer from Layer argument. processRelation(detect_point_label, [service_label1...], [instance_label1...], source_process_id_label, dest_process_id_label, component_label) extracts DetectPoint labels from first argument, the label value should be client or server. extracts Service labels from the first array argument, extracts Instance labels from the second array argument, extracts ProcessID labels from the fourth and fifth arguments of the source and destination.  Configuration file The OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP fails to start up. The files are located at $CLASSPATH/otel-rules, $CLASSPATH/meter-analyzer-config, $CLASSPATH/envoy-metrics-rules and $CLASSPATH/zabbix-rules.\nThe file is written in YAML format, defined by the scheme described below. Brackets indicate that a parameter is optional.\nA full example can be found here\nGeneric placeholders are defined as follows:\n \u0026lt;string\u0026gt;: A regular string. \u0026lt;closure\u0026gt;: A closure with custom logic.  # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# filter the metrics, only those metrics that satisfy this condition will be passed into the `metricsRules` below.filter: \u0026lt;closure\u0026gt; # example:\u0026#39;{ tags -\u0026gt; tags.job_name == \u0026#34;vm-monitoring\u0026#34; }\u0026#39;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# Metrics rule allow you to recompute queries.metricsRules:[- \u0026lt;metric_rules\u0026gt; ]\u0026lt;metric_rules\u0026gt; # The name of rule, which combinates with a prefix \u0026#39;meter_\u0026#39; as the index/table name in storage.name:\u0026lt;string\u0026gt;# MAL expression.exp:\u0026lt;string\u0026gt;More Examples Please refer to OAP Self-Observability.\n","excerpt":"Meter Analysis Language The meter system provides a functional analysis language called MAL (Meter …","ref":"/docs/main/next/en/concepts-and-designs/mal/","title":"Meter Analysis Language"},{"body":"Meter Analysis Language The meter system provides a functional analysis language called MAL (Meter Analysis Language) that lets users analyze and aggregate meter data in the OAP streaming system. The result of an expression can either be ingested by the agent analyzer, or the OC/Prometheus analyzer.\nLanguage data type In MAL, an expression or sub-expression can evaluate to one of the following two types:\n Sample family: A set of samples (metrics) containing a range of metrics whose names are identical. Scalar: A simple numeric value that supports integer/long and floating/double.  Sample family A set of samples, which acts as the basic unit in MAL. For example:\ninstance_trace_count The sample family above may contain the following samples which are provided by external modules, such as the agent analyzer:\ninstance_trace_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 100 instance_trace_count{region=\u0026quot;us-east\u0026quot;,az=\u0026quot;az-3\u0026quot;} 20 instance_trace_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 33 Tag filter MAL supports four type operations to filter samples in a sample family:\n tagEqual: Filter tags exactly equal to the string provided. tagNotEqual: Filter tags not equal to the string provided. tagMatch: Filter tags that regex-match the string provided. tagNotMatch: Filter labels that do not regex-match the string provided.  For example, this filters all instance_trace_count samples for us-west and asia-north region and az-1 az:\ninstance_trace_count.tagMatch(\u0026quot;region\u0026quot;, \u0026quot;us-west|asia-north\u0026quot;).tagEqual(\u0026quot;az\u0026quot;, \u0026quot;az-1\u0026quot;) Value filter MAL supports six type operations to filter samples in a sample family by value:\n valueEqual: Filter values exactly equal to the value provided. valueNotEqual: Filter values equal to the value provided. valueGreater: Filter values greater than the value provided. valueGreaterEqual: Filter values greater than or equal to the value provided. valueLess: Filter values less than the value provided. valueLessEqual: Filter values less than or equal to the value provided.  For example, this filters all instance_trace_count samples for values \u0026gt;= 33:\ninstance_trace_count.valueGreaterEqual(33) Tag manipulator MAL allows tag manipulators to change (i.e. add/delete/update) tags and their values.\nK8s MAL supports using the metadata of K8s to manipulate the tags and their values. This feature requires authorizing the OAP Server to access K8s\u0026rsquo;s API Server.\nretagByK8sMeta retagByK8sMeta(newLabelName, K8sRetagType, existingLabelName, namespaceLabelName). Add a new tag to the sample family based on the value of an existing label. Provide several internal converting types, including\n K8sRetagType.Pod2Service  Add a tag to the sample using service as the key, $serviceName.$namespace as the value, and according to the given value of the tag key, which represents the name of a pod.\nFor example:\ncontainer_cpu_usage_seconds_total{namespace=default, container=my-nginx, cpu=total, pod=my-nginx-5dc4865748-mbczh} 2 Expression:\ncontainer_cpu_usage_seconds_total.retagByK8sMeta('service' , K8sRetagType.Pod2Service , 'pod' , 'namespace') Output:\ncontainer_cpu_usage_seconds_total{namespace=default, container=my-nginx, cpu=total, pod=my-nginx-5dc4865748-mbczh, service='nginx-service.default'} 2 Binary operators The following binary arithmetic operators are available in MAL:\n + (addition) - (subtraction) * (multiplication) / (division)  Binary operators are defined between scalar/scalar, sampleFamily/scalar and sampleFamily/sampleFamily value pairs.\nBetween two scalars: they evaluate to another scalar that is the result of the operator being applied to both scalar operands:\n1 + 2 Between a sample family and a scalar, the operator is applied to the value of every sample in the sample family. For example:\ninstance_trace_count + 2 or\n2 + instance_trace_count results in\ninstance_trace_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 102 // 100 + 2 instance_trace_count{region=\u0026quot;us-east\u0026quot;,az=\u0026quot;az-3\u0026quot;} 22 // 20 + 2 instance_trace_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 35 // 33 + 2 Between two sample families, a binary operator is applied to each sample in the sample family on the left and its matching sample in the sample family on the right. A new sample family with empty name will be generated. Only the matched tags will be reserved. Samples with no matching samples in the sample family on the right will not be found in the result.\nAnother sample family instance_trace_analysis_error_count is\ninstance_trace_analysis_error_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 20 instance_trace_analysis_error_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 11 Example expression:\ninstance_trace_analysis_error_count / instance_trace_count This returns a resulting sample family containing the error rate of trace analysis. Samples with region us-west and az az-3 have no match and will not show up in the result:\n{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 0.8 // 20 / 100 {region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 0.3333 // 11 / 33 Aggregation Operation Sample family supports the following aggregation operations that can be used to aggregate the samples of a single sample family, resulting in a new sample family having fewer samples (sometimes having just a single sample) with aggregated values:\n sum (calculate sum over dimensions) min (select minimum over dimensions) max (select maximum over dimensions) avg (calculate the average over dimensions)  These operations can be used to aggregate overall label dimensions or preserve distinct dimensions by inputting by parameter.\n\u0026lt;aggr-op\u0026gt;(by: \u0026lt;tag1, tag2, ...\u0026gt;) Example expression:\ninstance_trace_count.sum(by: ['az']) will output the following result:\ninstance_trace_count{az=\u0026quot;az-1\u0026quot;} 133 // 100 + 33 instance_trace_count{az=\u0026quot;az-3\u0026quot;} 20 Function Duraton is a textual representation of a time range. The formats accepted are based on the ISO-8601 duration format {@code PnDTnHnMn.nS} where a day is regarded as exactly 24 hours.\nExamples:\n \u0026ldquo;PT20.345S\u0026rdquo; \u0026ndash; parses as \u0026ldquo;20.345 seconds\u0026rdquo; \u0026ldquo;PT15M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;15 minutes\u0026rdquo; (where a minute is 60 seconds) \u0026ldquo;PT10H\u0026rdquo; \u0026ndash; parses as \u0026ldquo;10 hours\u0026rdquo; (where an hour is 3600 seconds) \u0026ldquo;P2D\u0026rdquo; \u0026ndash; parses as \u0026ldquo;2 days\u0026rdquo; (where a day is 24 hours or 86400 seconds) \u0026ldquo;P2DT3H4M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;2 days, 3 hours and 4 minutes\u0026rdquo; \u0026ldquo;P-6H3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;-6 hours and +3 minutes\u0026rdquo; \u0026ldquo;-P6H3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;-6 hours and -3 minutes\u0026rdquo; \u0026ldquo;-P-6H+3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;+6 hours and -3 minutes\u0026rdquo;  increase increase(Duration): Calculates the increase in the time range.\nrate rate(Duration): Calculates the per-second average rate of increase in the time range.\nirate irate(): Calculates the per-second instant rate of increase in the time range.\ntag tag({allTags -\u0026gt; }): Updates tags of samples. User can add, drop, rename and update tags.\nhistogram histogram(le: '\u0026lt;the tag name of le\u0026gt;'): Transforms less-based histogram buckets to meter system histogram buckets. le parameter represents the tag name of the bucket.\nhistogram_percentile histogram_percentile([\u0026lt;p scalar\u0026gt;]). Represents the meter-system to calculate the p-percentile (0 ≤ p ≤ 100) from the buckets.\ntime time(): Returns the number of seconds since January 1, 1970 UTC.\nDown Sampling Operation MAL should instruct meter-system on how to downsample for metrics. It doesn\u0026rsquo;t only refer to aggregate raw samples to minute level, but also expresses data from minute in higher levels, such as hour and day.\nDown sampling function is called downsampling in MAL, and it accepts the following types:\n AVG SUM LATEST MIN (TODO) MAX (TODO) MEAN (TODO) COUNT (TODO)  The default type is AVG.\nIf users want to get the latest time from last_server_state_sync_time_in_seconds:\nlast_server_state_sync_time_in_seconds.tagEqual('production', 'catalog').downsampling(LATEST) Metric level function They extract level relevant labels from metric labels, then informs the meter-system the level and layer to which this metric belongs.\n service([svc_label1, svc_label2...], Layer) extracts service level labels from the array argument, extracts layer from Layer argument. instance([svc_label1, svc_label2...], [ins_label1, ins_label2...], Layer) extracts service level labels from the first array argument, extracts instance level labels from the second array argument, extracts layer from Layer argument. endpoint([svc_label1, svc_label2...], [ep_label1, ep_label2...]) extracts service level labels from the first array argument, extracts endpoint level labels from the second array argument, extracts layer from Layer argument. serviceRelation(DetectPoint, [source_svc_label1...], [dest_svc_label1...], Layer) DetectPoint including DetectPoint.CLIENT and DetectPoint.SERVER, extracts sourceService labels from the first array argument, extracts destService labels from the second array argument, extracts layer from Layer argument.  More Examples Please refer to OAP Self-Observability\n","excerpt":"Meter Analysis Language The meter system provides a functional analysis language called MAL (Meter …","ref":"/docs/main/v9.0.0/en/concepts-and-designs/mal/","title":"Meter Analysis Language"},{"body":"Meter Analysis Language The meter system provides a functional analysis language called MAL (Meter Analysis Language) that lets users analyze and aggregate meter data in the OAP streaming system. The result of an expression can either be ingested by the agent analyzer, or the OC/Prometheus analyzer.\nLanguage data type In MAL, an expression or sub-expression can evaluate to one of the following two types:\n Sample family: A set of samples (metrics) containing a range of metrics whose names are identical. Scalar: A simple numeric value that supports integer/long and floating/double.  Sample family A set of samples, which acts as the basic unit in MAL. For example:\ninstance_trace_count The sample family above may contain the following samples which are provided by external modules, such as the agent analyzer:\ninstance_trace_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 100 instance_trace_count{region=\u0026quot;us-east\u0026quot;,az=\u0026quot;az-3\u0026quot;} 20 instance_trace_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 33 Tag filter MAL supports four type operations to filter samples in a sample family:\n tagEqual: Filter tags exactly equal to the string provided. tagNotEqual: Filter tags not equal to the string provided. tagMatch: Filter tags that regex-match the string provided. tagNotMatch: Filter labels that do not regex-match the string provided.  For example, this filters all instance_trace_count samples for us-west and asia-north region and az-1 az:\ninstance_trace_count.tagMatch(\u0026quot;region\u0026quot;, \u0026quot;us-west|asia-north\u0026quot;).tagEqual(\u0026quot;az\u0026quot;, \u0026quot;az-1\u0026quot;) Value filter MAL supports six type operations to filter samples in a sample family by value:\n valueEqual: Filter values exactly equal to the value provided. valueNotEqual: Filter values equal to the value provided. valueGreater: Filter values greater than the value provided. valueGreaterEqual: Filter values greater than or equal to the value provided. valueLess: Filter values less than the value provided. valueLessEqual: Filter values less than or equal to the value provided.  For example, this filters all instance_trace_count samples for values \u0026gt;= 33:\ninstance_trace_count.valueGreaterEqual(33) Tag manipulator MAL allows tag manipulators to change (i.e. add/delete/update) tags and their values.\nK8s MAL supports using the metadata of K8s to manipulate the tags and their values. This feature requires authorizing the OAP Server to access K8s\u0026rsquo;s API Server.\nretagByK8sMeta retagByK8sMeta(newLabelName, K8sRetagType, existingLabelName, namespaceLabelName). Add a new tag to the sample family based on the value of an existing label. Provide several internal converting types, including\n K8sRetagType.Pod2Service  Add a tag to the sample using service as the key, $serviceName.$namespace as the value, and according to the given value of the tag key, which represents the name of a pod.\nFor example:\ncontainer_cpu_usage_seconds_total{namespace=default, container=my-nginx, cpu=total, pod=my-nginx-5dc4865748-mbczh} 2 Expression:\ncontainer_cpu_usage_seconds_total.retagByK8sMeta('service' , K8sRetagType.Pod2Service , 'pod' , 'namespace') Output:\ncontainer_cpu_usage_seconds_total{namespace=default, container=my-nginx, cpu=total, pod=my-nginx-5dc4865748-mbczh, service='nginx-service.default'} 2 Binary operators The following binary arithmetic operators are available in MAL:\n + (addition) - (subtraction) * (multiplication) / (division)  Binary operators are defined between scalar/scalar, sampleFamily/scalar and sampleFamily/sampleFamily value pairs.\nBetween two scalars: they evaluate to another scalar that is the result of the operator being applied to both scalar operands:\n1 + 2 Between a sample family and a scalar, the operator is applied to the value of every sample in the sample family. For example:\ninstance_trace_count + 2 or\n2 + instance_trace_count results in\ninstance_trace_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 102 // 100 + 2 instance_trace_count{region=\u0026quot;us-east\u0026quot;,az=\u0026quot;az-3\u0026quot;} 22 // 20 + 2 instance_trace_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 35 // 33 + 2 Between two sample families, a binary operator is applied to each sample in the sample family on the left and its matching sample in the sample family on the right. A new sample family with empty name will be generated. Only the matched tags will be reserved. Samples with no matching samples in the sample family on the right will not be found in the result.\nAnother sample family instance_trace_analysis_error_count is\ninstance_trace_analysis_error_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 20 instance_trace_analysis_error_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 11 Example expression:\ninstance_trace_analysis_error_count / instance_trace_count This returns a resulting sample family containing the error rate of trace analysis. Samples with region us-west and az az-3 have no match and will not show up in the result:\n{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 0.2 // 20 / 100 {region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 0.3333 // 11 / 33 Aggregation Operation Sample family supports the following aggregation operations that can be used to aggregate the samples of a single sample family, resulting in a new sample family having fewer samples (sometimes having just a single sample) with aggregated values:\n sum (calculate sum over dimensions) min (select minimum over dimensions) max (select maximum over dimensions) avg (calculate the average over dimensions)  These operations can be used to aggregate overall label dimensions or preserve distinct dimensions by inputting by parameter.\n\u0026lt;aggr-op\u0026gt;(by: \u0026lt;tag1, tag2, ...\u0026gt;) Example expression:\ninstance_trace_count.sum(by: ['az']) will output the following result:\ninstance_trace_count{az=\u0026quot;az-1\u0026quot;} 133 // 100 + 33 instance_trace_count{az=\u0026quot;az-3\u0026quot;} 20 Function Duration is a textual representation of a time range. The formats accepted are based on the ISO-8601 duration format {@code PnDTnHnMn.nS} where a day is regarded as exactly 24 hours.\nExamples:\n \u0026ldquo;PT20.345S\u0026rdquo; \u0026ndash; parses as \u0026ldquo;20.345 seconds\u0026rdquo; \u0026ldquo;PT15M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;15 minutes\u0026rdquo; (where a minute is 60 seconds) \u0026ldquo;PT10H\u0026rdquo; \u0026ndash; parses as \u0026ldquo;10 hours\u0026rdquo; (where an hour is 3600 seconds) \u0026ldquo;P2D\u0026rdquo; \u0026ndash; parses as \u0026ldquo;2 days\u0026rdquo; (where a day is 24 hours or 86400 seconds) \u0026ldquo;P2DT3H4M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;2 days, 3 hours and 4 minutes\u0026rdquo; \u0026ldquo;P-6H3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;-6 hours and +3 minutes\u0026rdquo; \u0026ldquo;-P6H3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;-6 hours and -3 minutes\u0026rdquo; \u0026ldquo;-P-6H+3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;+6 hours and -3 minutes\u0026rdquo;  increase increase(Duration): Calculates the increase in the time range.\nrate rate(Duration): Calculates the per-second average rate of increase in the time range.\nirate irate(): Calculates the per-second instant rate of increase in the time range.\ntag tag({allTags -\u0026gt; }): Updates tags of samples. User can add, drop, rename and update tags.\nhistogram histogram(le: '\u0026lt;the tag name of le\u0026gt;'): Transforms less-based histogram buckets to meter system histogram buckets. le parameter represents the tag name of the bucket.\nhistogram_percentile histogram_percentile([\u0026lt;p scalar\u0026gt;]). Represents the meter-system to calculate the p-percentile (0 ≤ p ≤ 100) from the buckets.\ntime time(): Returns the number of seconds since January 1, 1970 UTC.\nDown Sampling Operation MAL should instruct meter-system on how to downsample for metrics. It doesn\u0026rsquo;t only refer to aggregate raw samples to minute level, but also expresses data from minute in higher levels, such as hour and day.\nDown sampling function is called downsampling in MAL, and it accepts the following types:\n AVG SUM LATEST MIN (TODO) MAX (TODO) MEAN (TODO) COUNT (TODO)  The default type is AVG.\nIf users want to get the latest time from last_server_state_sync_time_in_seconds:\nlast_server_state_sync_time_in_seconds.tagEqual('production', 'catalog').downsampling(LATEST) Metric level function They extract level relevant labels from metric labels, then informs the meter-system the level and layer to which this metric belongs.\n service([svc_label1, svc_label2...], Layer) extracts service level labels from the array argument, extracts layer from Layer argument. instance([svc_label1, svc_label2...], [ins_label1, ins_label2...], Layer, Closure\u0026lt;Map\u0026lt;String, String\u0026gt;\u0026gt; propertiesExtractor) extracts service level labels from the first array argument, extracts instance level labels from the second array argument, extracts layer from Layer argument, propertiesExtractor is an optional closure that extracts instance properties from tags, e.g. { tags -\u0026gt; ['pod': tags.pod, 'namespace': tags.namespace] }. endpoint([svc_label1, svc_label2...], [ep_label1, ep_label2...]) extracts service level labels from the first array argument, extracts endpoint level labels from the second array argument, extracts layer from Layer argument. serviceRelation(DetectPoint, [source_svc_label1...], [dest_svc_label1...], Layer) DetectPoint including DetectPoint.CLIENT and DetectPoint.SERVER, extracts sourceService labels from the first array argument, extracts destService labels from the second array argument, extracts layer from Layer argument.  More Examples Please refer to OAP Self-Observability\n","excerpt":"Meter Analysis Language The meter system provides a functional analysis language called MAL (Meter …","ref":"/docs/main/v9.1.0/en/concepts-and-designs/mal/","title":"Meter Analysis Language"},{"body":"Meter Analysis Language The meter system provides a functional analysis language called MAL (Meter Analysis Language) that lets users analyze and aggregate meter data in the OAP streaming system. The result of an expression can either be ingested by the agent analyzer, or the OC/Prometheus analyzer.\nLanguage data type In MAL, an expression or sub-expression can evaluate to one of the following two types:\n Sample family: A set of samples (metrics) containing a range of metrics whose names are identical. Scalar: A simple numeric value that supports integer/long and floating/double.  Sample family A set of samples, which acts as the basic unit in MAL. For example:\ninstance_trace_count The sample family above may contain the following samples which are provided by external modules, such as the agent analyzer:\ninstance_trace_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 100 instance_trace_count{region=\u0026quot;us-east\u0026quot;,az=\u0026quot;az-3\u0026quot;} 20 instance_trace_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 33 Tag filter MAL supports four type operations to filter samples in a sample family:\n tagEqual: Filter tags exactly equal to the string provided. tagNotEqual: Filter tags not equal to the string provided. tagMatch: Filter tags that regex-match the string provided. tagNotMatch: Filter labels that do not regex-match the string provided.  For example, this filters all instance_trace_count samples for us-west and asia-north region and az-1 az:\ninstance_trace_count.tagMatch(\u0026quot;region\u0026quot;, \u0026quot;us-west|asia-north\u0026quot;).tagEqual(\u0026quot;az\u0026quot;, \u0026quot;az-1\u0026quot;) Value filter MAL supports six type operations to filter samples in a sample family by value:\n valueEqual: Filter values exactly equal to the value provided. valueNotEqual: Filter values equal to the value provided. valueGreater: Filter values greater than the value provided. valueGreaterEqual: Filter values greater than or equal to the value provided. valueLess: Filter values less than the value provided. valueLessEqual: Filter values less than or equal to the value provided.  For example, this filters all instance_trace_count samples for values \u0026gt;= 33:\ninstance_trace_count.valueGreaterEqual(33) Tag manipulator MAL allows tag manipulators to change (i.e. add/delete/update) tags and their values.\nK8s MAL supports using the metadata of K8s to manipulate the tags and their values. This feature requires authorizing the OAP Server to access K8s\u0026rsquo;s API Server.\nretagByK8sMeta retagByK8sMeta(newLabelName, K8sRetagType, existingLabelName, namespaceLabelName). Add a new tag to the sample family based on the value of an existing label. Provide several internal converting types, including\n K8sRetagType.Pod2Service  Add a tag to the sample using service as the key, $serviceName.$namespace as the value, and according to the given value of the tag key, which represents the name of a pod.\nFor example:\ncontainer_cpu_usage_seconds_total{namespace=default, container=my-nginx, cpu=total, pod=my-nginx-5dc4865748-mbczh} 2 Expression:\ncontainer_cpu_usage_seconds_total.retagByK8sMeta('service' , K8sRetagType.Pod2Service , 'pod' , 'namespace') Output:\ncontainer_cpu_usage_seconds_total{namespace=default, container=my-nginx, cpu=total, pod=my-nginx-5dc4865748-mbczh, service='nginx-service.default'} 2 Binary operators The following binary arithmetic operators are available in MAL:\n + (addition) - (subtraction) * (multiplication) / (division)  Binary operators are defined between scalar/scalar, sampleFamily/scalar and sampleFamily/sampleFamily value pairs.\nBetween two scalars: they evaluate to another scalar that is the result of the operator being applied to both scalar operands:\n1 + 2 Between a sample family and a scalar, the operator is applied to the value of every sample in the sample family. For example:\ninstance_trace_count + 2 or\n2 + instance_trace_count results in\ninstance_trace_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 102 // 100 + 2 instance_trace_count{region=\u0026quot;us-east\u0026quot;,az=\u0026quot;az-3\u0026quot;} 22 // 20 + 2 instance_trace_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 35 // 33 + 2 Between two sample families, a binary operator is applied to each sample in the sample family on the left and its matching sample in the sample family on the right. A new sample family with empty name will be generated. Only the matched tags will be reserved. Samples with no matching samples in the sample family on the right will not be found in the result.\nAnother sample family instance_trace_analysis_error_count is\ninstance_trace_analysis_error_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 20 instance_trace_analysis_error_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 11 Example expression:\ninstance_trace_analysis_error_count / instance_trace_count This returns a resulting sample family containing the error rate of trace analysis. Samples with region us-west and az az-3 have no match and will not show up in the result:\n{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 0.2 // 20 / 100 {region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 0.3333 // 11 / 33 Aggregation Operation Sample family supports the following aggregation operations that can be used to aggregate the samples of a single sample family, resulting in a new sample family having fewer samples (sometimes having just a single sample) with aggregated values:\n sum (calculate sum over dimensions) min (select minimum over dimensions) max (select maximum over dimensions) avg (calculate the average over dimensions)  These operations can be used to aggregate overall label dimensions or preserve distinct dimensions by inputting by parameter.\n\u0026lt;aggr-op\u0026gt;(by: \u0026lt;tag1, tag2, ...\u0026gt;) Example expression:\ninstance_trace_count.sum(by: ['az']) will output the following result:\ninstance_trace_count{az=\u0026quot;az-1\u0026quot;} 133 // 100 + 33 instance_trace_count{az=\u0026quot;az-3\u0026quot;} 20 Function Duration is a textual representation of a time range. The formats accepted are based on the ISO-8601 duration format {@code PnDTnHnMn.nS} where a day is regarded as exactly 24 hours.\nExamples:\n \u0026ldquo;PT20.345S\u0026rdquo; \u0026ndash; parses as \u0026ldquo;20.345 seconds\u0026rdquo; \u0026ldquo;PT15M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;15 minutes\u0026rdquo; (where a minute is 60 seconds) \u0026ldquo;PT10H\u0026rdquo; \u0026ndash; parses as \u0026ldquo;10 hours\u0026rdquo; (where an hour is 3600 seconds) \u0026ldquo;P2D\u0026rdquo; \u0026ndash; parses as \u0026ldquo;2 days\u0026rdquo; (where a day is 24 hours or 86400 seconds) \u0026ldquo;P2DT3H4M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;2 days, 3 hours and 4 minutes\u0026rdquo; \u0026ldquo;P-6H3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;-6 hours and +3 minutes\u0026rdquo; \u0026ldquo;-P6H3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;-6 hours and -3 minutes\u0026rdquo; \u0026ldquo;-P-6H+3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;+6 hours and -3 minutes\u0026rdquo;  increase increase(Duration): Calculates the increase in the time range.\nrate rate(Duration): Calculates the per-second average rate of increase in the time range.\nirate irate(): Calculates the per-second instant rate of increase in the time range.\ntag tag({allTags -\u0026gt; }): Updates tags of samples. User can add, drop, rename and update tags.\nhistogram histogram(le: '\u0026lt;the tag name of le\u0026gt;'): Transforms less-based histogram buckets to meter system histogram buckets. le parameter represents the tag name of the bucket.\nhistogram_percentile histogram_percentile([\u0026lt;p scalar\u0026gt;]). Represents the meter-system to calculate the p-percentile (0 ≤ p ≤ 100) from the buckets.\ntime time(): Returns the number of seconds since January 1, 1970 UTC.\nforeach forEach([string_array], Closure\u0026lt;Void\u0026gt; each): Iterates all samples according to the first array argument, and provide two parameters in the second closure argument:\n element: element in the array. tags: tags in each sample.  Down Sampling Operation MAL should instruct meter-system on how to downsample for metrics. It doesn\u0026rsquo;t only refer to aggregate raw samples to minute level, but also expresses data from minute in higher levels, such as hour and day.\nDown sampling function is called downsampling in MAL, and it accepts the following types:\n AVG SUM LATEST MIN (TODO) MAX (TODO) MEAN (TODO) COUNT (TODO)  The default type is AVG.\nIf users want to get the latest time from last_server_state_sync_time_in_seconds:\nlast_server_state_sync_time_in_seconds.tagEqual('production', 'catalog').downsampling(LATEST) Metric level function They extract level relevant labels from metric labels, then informs the meter-system the level and layer to which this metric belongs.\n service([svc_label1, svc_label2...], Layer) extracts service level labels from the array argument, extracts layer from Layer argument. instance([svc_label1, svc_label2...], [ins_label1, ins_label2...], Layer, Closure\u0026lt;Map\u0026lt;String, String\u0026gt;\u0026gt; propertiesExtractor) extracts service level labels from the first array argument, extracts instance level labels from the second array argument, extracts layer from Layer argument, propertiesExtractor is an optional closure that extracts instance properties from tags, e.g. { tags -\u0026gt; ['pod': tags.pod, 'namespace': tags.namespace] }. endpoint([svc_label1, svc_label2...], [ep_label1, ep_label2...]) extracts service level labels from the first array argument, extracts endpoint level labels from the second array argument, extracts layer from Layer argument. serviceRelation(DetectPoint, [source_svc_label1...], [dest_svc_label1...], Layer) DetectPoint including DetectPoint.CLIENT and DetectPoint.SERVER, extracts sourceService labels from the first array argument, extracts destService labels from the second array argument, extracts layer from Layer argument. processRelation(detect_point_label, [service_label1...], [instance_label1...], source_process_id_label, dest_process_id_label, component_label) extracts DetectPoint labels from first argument, the label value should be client or server. extracts Service labels from the first array argument, extracts Instance labels from the second array argument, extracts ProcessID labels from the fourth and fifth arguments of the source and destination.  More Examples Please refer to OAP Self-Observability\n","excerpt":"Meter Analysis Language The meter system provides a functional analysis language called MAL (Meter …","ref":"/docs/main/v9.2.0/en/concepts-and-designs/mal/","title":"Meter Analysis Language"},{"body":"Meter Analysis Language The meter system provides a functional analysis language called MAL (Meter Analysis Language) that lets users analyze and aggregate meter data in the OAP streaming system. The result of an expression can either be ingested by the agent analyzer, or the OC/Prometheus analyzer.\nLanguage data type In MAL, an expression or sub-expression can evaluate to one of the following two types:\n Sample family: A set of samples (metrics) containing a range of metrics whose names are identical. Scalar: A simple numeric value that supports integer/long and floating/double.  Sample family A set of samples, which acts as the basic unit in MAL. For example:\ninstance_trace_count The sample family above may contain the following samples which are provided by external modules, such as the agent analyzer:\ninstance_trace_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 100 instance_trace_count{region=\u0026quot;us-east\u0026quot;,az=\u0026quot;az-3\u0026quot;} 20 instance_trace_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 33 Tag filter MAL supports four type operations to filter samples in a sample family:\n tagEqual: Filter tags exactly equal to the string provided. tagNotEqual: Filter tags not equal to the string provided. tagMatch: Filter tags that regex-match the string provided. tagNotMatch: Filter labels that do not regex-match the string provided.  For example, this filters all instance_trace_count samples for us-west and asia-north region and az-1 az:\ninstance_trace_count.tagMatch(\u0026quot;region\u0026quot;, \u0026quot;us-west|asia-north\u0026quot;).tagEqual(\u0026quot;az\u0026quot;, \u0026quot;az-1\u0026quot;) Value filter MAL supports six type operations to filter samples in a sample family by value:\n valueEqual: Filter values exactly equal to the value provided. valueNotEqual: Filter values equal to the value provided. valueGreater: Filter values greater than the value provided. valueGreaterEqual: Filter values greater than or equal to the value provided. valueLess: Filter values less than the value provided. valueLessEqual: Filter values less than or equal to the value provided.  For example, this filters all instance_trace_count samples for values \u0026gt;= 33:\ninstance_trace_count.valueGreaterEqual(33) Tag manipulator MAL allows tag manipulators to change (i.e. add/delete/update) tags and their values.\nK8s MAL supports using the metadata of K8s to manipulate the tags and their values. This feature requires authorizing the OAP Server to access K8s\u0026rsquo;s API Server.\nretagByK8sMeta retagByK8sMeta(newLabelName, K8sRetagType, existingLabelName, namespaceLabelName). Add a new tag to the sample family based on the value of an existing label. Provide several internal converting types, including\n K8sRetagType.Pod2Service  Add a tag to the sample using service as the key, $serviceName.$namespace as the value, and according to the given value of the tag key, which represents the name of a pod.\nFor example:\ncontainer_cpu_usage_seconds_total{namespace=default, container=my-nginx, cpu=total, pod=my-nginx-5dc4865748-mbczh} 2 Expression:\ncontainer_cpu_usage_seconds_total.retagByK8sMeta('service' , K8sRetagType.Pod2Service , 'pod' , 'namespace') Output:\ncontainer_cpu_usage_seconds_total{namespace=default, container=my-nginx, cpu=total, pod=my-nginx-5dc4865748-mbczh, service='nginx-service.default'} 2 Binary operators The following binary arithmetic operators are available in MAL:\n + (addition) - (subtraction) * (multiplication) / (division)  Binary operators are defined between scalar/scalar, sampleFamily/scalar and sampleFamily/sampleFamily value pairs.\nBetween two scalars: they evaluate to another scalar that is the result of the operator being applied to both scalar operands:\n1 + 2 Between a sample family and a scalar, the operator is applied to the value of every sample in the sample family. For example:\ninstance_trace_count + 2 or\n2 + instance_trace_count results in\ninstance_trace_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 102 // 100 + 2 instance_trace_count{region=\u0026quot;us-east\u0026quot;,az=\u0026quot;az-3\u0026quot;} 22 // 20 + 2 instance_trace_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 35 // 33 + 2 Between two sample families, a binary operator is applied to each sample in the sample family on the left and its matching sample in the sample family on the right. A new sample family with empty name will be generated. Only the matched tags will be reserved. Samples with no matching samples in the sample family on the right will not be found in the result.\nAnother sample family instance_trace_analysis_error_count is\ninstance_trace_analysis_error_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 20 instance_trace_analysis_error_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 11 Example expression:\ninstance_trace_analysis_error_count / instance_trace_count This returns a resulting sample family containing the error rate of trace analysis. Samples with region us-west and az az-3 have no match and will not show up in the result:\n{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 0.2 // 20 / 100 {region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 0.3333 // 11 / 33 Aggregation Operation Sample family supports the following aggregation operations that can be used to aggregate the samples of a single sample family, resulting in a new sample family having fewer samples (sometimes having just a single sample) with aggregated values:\n sum (calculate sum over dimensions) min (select minimum over dimensions) max (select maximum over dimensions) avg (calculate the average over dimensions)  These operations can be used to aggregate overall label dimensions or preserve distinct dimensions by inputting by parameter( the keyword by could be omitted)\n\u0026lt;aggr-op\u0026gt;(by=[\u0026lt;tag1\u0026gt;, \u0026lt;tag2\u0026gt;, ...]) Example expression:\ninstance_trace_count.sum(by=['az']) will output the following result:\ninstance_trace_count{az=\u0026quot;az-1\u0026quot;} 133 // 100 + 33 instance_trace_count{az=\u0026quot;az-3\u0026quot;} 20 Function Duration is a textual representation of a time range. The formats accepted are based on the ISO-8601 duration format {@code PnDTnHnMn.nS} where a day is regarded as exactly 24 hours.\nExamples:\n \u0026ldquo;PT20.345S\u0026rdquo; \u0026ndash; parses as \u0026ldquo;20.345 seconds\u0026rdquo; \u0026ldquo;PT15M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;15 minutes\u0026rdquo; (where a minute is 60 seconds) \u0026ldquo;PT10H\u0026rdquo; \u0026ndash; parses as \u0026ldquo;10 hours\u0026rdquo; (where an hour is 3600 seconds) \u0026ldquo;P2D\u0026rdquo; \u0026ndash; parses as \u0026ldquo;2 days\u0026rdquo; (where a day is 24 hours or 86400 seconds) \u0026ldquo;P2DT3H4M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;2 days, 3 hours and 4 minutes\u0026rdquo; \u0026ldquo;P-6H3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;-6 hours and +3 minutes\u0026rdquo; \u0026ldquo;-P6H3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;-6 hours and -3 minutes\u0026rdquo; \u0026ldquo;-P-6H+3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;+6 hours and -3 minutes\u0026rdquo;  increase increase(Duration): Calculates the increase in the time range.\nrate rate(Duration): Calculates the per-second average rate of increase in the time range.\nirate irate(): Calculates the per-second instant rate of increase in the time range.\ntag tag({allTags -\u0026gt; }): Updates tags of samples. User can add, drop, rename and update tags.\nhistogram histogram(le: '\u0026lt;the tag name of le\u0026gt;'): Transforms less-based histogram buckets to meter system histogram buckets. le parameter represents the tag name of the bucket.\nhistogram_percentile histogram_percentile([\u0026lt;p scalar\u0026gt;]). Represents the meter-system to calculate the p-percentile (0 ≤ p ≤ 100) from the buckets.\ntime time(): Returns the number of seconds since January 1, 1970 UTC.\nforeach forEach([string_array], Closure\u0026lt;Void\u0026gt; each): Iterates all samples according to the first array argument, and provide two parameters in the second closure argument:\n element: element in the array. tags: tags in each sample.  Down Sampling Operation MAL should instruct meter-system on how to downsample for metrics. It doesn\u0026rsquo;t only refer to aggregate raw samples to minute level, but also expresses data from minute in higher levels, such as hour and day.\nDown sampling function is called downsampling in MAL, and it accepts the following types:\n AVG SUM LATEST MIN (TODO) MAX (TODO) MEAN (TODO) COUNT (TODO)  The default type is AVG.\nIf users want to get the latest time from last_server_state_sync_time_in_seconds:\nlast_server_state_sync_time_in_seconds.tagEqual('production', 'catalog').downsampling(LATEST) Metric level function They extract level relevant labels from metric labels, then informs the meter-system the level and layer to which this metric belongs.\n service([svc_label1, svc_label2...], Layer) extracts service level labels from the array argument, extracts layer from Layer argument. instance([svc_label1, svc_label2...], [ins_label1, ins_label2...], Layer, Closure\u0026lt;Map\u0026lt;String, String\u0026gt;\u0026gt; propertiesExtractor) extracts service level labels from the first array argument, extracts instance level labels from the second array argument, extracts layer from Layer argument, propertiesExtractor is an optional closure that extracts instance properties from tags, e.g. { tags -\u0026gt; ['pod': tags.pod, 'namespace': tags.namespace] }. endpoint([svc_label1, svc_label2...], [ep_label1, ep_label2...]) extracts service level labels from the first array argument, extracts endpoint level labels from the second array argument, extracts layer from Layer argument. serviceRelation(DetectPoint, [source_svc_label1...], [dest_svc_label1...], Layer) DetectPoint including DetectPoint.CLIENT and DetectPoint.SERVER, extracts sourceService labels from the first array argument, extracts destService labels from the second array argument, extracts layer from Layer argument. processRelation(detect_point_label, [service_label1...], [instance_label1...], source_process_id_label, dest_process_id_label, component_label) extracts DetectPoint labels from first argument, the label value should be client or server. extracts Service labels from the first array argument, extracts Instance labels from the second array argument, extracts ProcessID labels from the fourth and fifth arguments of the source and destination.  Configuration file The OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP fails to start up. The files are located at $CLASSPATH/otel-rules, $CLASSPATH/meter-analyzer-config, $CLASSPATH/envoy-metrics-rules and $CLASSPATH/zabbix-rules.\nThe file is written in YAML format, defined by the scheme described below. Brackets indicate that a parameter is optional.\nA full example can be found here\nGeneric placeholders are defined as follows:\n \u0026lt;string\u0026gt;: A regular string. \u0026lt;closure\u0026gt;: A closure with custom logic.  # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# filter the metrics, only those metrics that satisfy this condition will be passed into the `metricsRules` below.filter: \u0026lt;closure\u0026gt; # example:\u0026#39;{ tags -\u0026gt; tags.job_name == \u0026#34;vm-monitoring\u0026#34; }\u0026#39;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# Metrics rule allow you to recompute queries.metricsRules:[- \u0026lt;metric_rules\u0026gt; ]\u0026lt;metric_rules\u0026gt; # The name of rule, which combinates with a prefix \u0026#39;meter_\u0026#39; as the index/table name in storage.name:\u0026lt;string\u0026gt;# MAL expression.exp:\u0026lt;string\u0026gt;More Examples Please refer to OAP Self-Observability.\n","excerpt":"Meter Analysis Language The meter system provides a functional analysis language called MAL (Meter …","ref":"/docs/main/v9.3.0/en/concepts-and-designs/mal/","title":"Meter Analysis Language"},{"body":"Meter Analysis Language The meter system provides a functional analysis language called MAL (Meter Analysis Language) that lets users analyze and aggregate meter data in the OAP streaming system. The result of an expression can either be ingested by the agent analyzer, or the OC/Prometheus analyzer.\nLanguage data type In MAL, an expression or sub-expression can evaluate to one of the following two types:\n Sample family: A set of samples (metrics) containing a range of metrics whose names are identical. Scalar: A simple numeric value that supports integer/long and floating/double.  Sample family A set of samples, which acts as the basic unit in MAL. For example:\ninstance_trace_count The sample family above may contain the following samples which are provided by external modules, such as the agent analyzer:\ninstance_trace_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 100 instance_trace_count{region=\u0026quot;us-east\u0026quot;,az=\u0026quot;az-3\u0026quot;} 20 instance_trace_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 33 Tag filter MAL supports four type operations to filter samples in a sample family:\n tagEqual: Filter tags exactly equal to the string provided. tagNotEqual: Filter tags not equal to the string provided. tagMatch: Filter tags that regex-match the string provided. tagNotMatch: Filter labels that do not regex-match the string provided.  For example, this filters all instance_trace_count samples for us-west and asia-north region and az-1 az:\ninstance_trace_count.tagMatch(\u0026quot;region\u0026quot;, \u0026quot;us-west|asia-north\u0026quot;).tagEqual(\u0026quot;az\u0026quot;, \u0026quot;az-1\u0026quot;) Value filter MAL supports six type operations to filter samples in a sample family by value:\n valueEqual: Filter values exactly equal to the value provided. valueNotEqual: Filter values equal to the value provided. valueGreater: Filter values greater than the value provided. valueGreaterEqual: Filter values greater than or equal to the value provided. valueLess: Filter values less than the value provided. valueLessEqual: Filter values less than or equal to the value provided.  For example, this filters all instance_trace_count samples for values \u0026gt;= 33:\ninstance_trace_count.valueGreaterEqual(33) Tag manipulator MAL allows tag manipulators to change (i.e. add/delete/update) tags and their values.\nK8s MAL supports using the metadata of K8s to manipulate the tags and their values. This feature requires authorizing the OAP Server to access K8s\u0026rsquo;s API Server.\nretagByK8sMeta retagByK8sMeta(newLabelName, K8sRetagType, existingLabelName, namespaceLabelName). Add a new tag to the sample family based on the value of an existing label. Provide several internal converting types, including\n K8sRetagType.Pod2Service  Add a tag to the sample using service as the key, $serviceName.$namespace as the value, and according to the given value of the tag key, which represents the name of a pod.\nFor example:\ncontainer_cpu_usage_seconds_total{namespace=default, container=my-nginx, cpu=total, pod=my-nginx-5dc4865748-mbczh} 2 Expression:\ncontainer_cpu_usage_seconds_total.retagByK8sMeta('service' , K8sRetagType.Pod2Service , 'pod' , 'namespace') Output:\ncontainer_cpu_usage_seconds_total{namespace=default, container=my-nginx, cpu=total, pod=my-nginx-5dc4865748-mbczh, service='nginx-service.default'} 2 Binary operators The following binary arithmetic operators are available in MAL:\n + (addition) - (subtraction) * (multiplication) / (division)  Binary operators are defined between scalar/scalar, sampleFamily/scalar and sampleFamily/sampleFamily value pairs.\nBetween two scalars: they evaluate to another scalar that is the result of the operator being applied to both scalar operands:\n1 + 2 Between a sample family and a scalar, the operator is applied to the value of every sample in the sample family. For example:\ninstance_trace_count + 2 or\n2 + instance_trace_count results in\ninstance_trace_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 102 // 100 + 2 instance_trace_count{region=\u0026quot;us-east\u0026quot;,az=\u0026quot;az-3\u0026quot;} 22 // 20 + 2 instance_trace_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 35 // 33 + 2 Between two sample families, a binary operator is applied to each sample in the sample family on the left and its matching sample in the sample family on the right. A new sample family with empty name will be generated. Only the matched tags will be reserved. Samples with no matching samples in the sample family on the right will not be found in the result.\nAnother sample family instance_trace_analysis_error_count is\ninstance_trace_analysis_error_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 20 instance_trace_analysis_error_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 11 Example expression:\ninstance_trace_analysis_error_count / instance_trace_count This returns a resulting sample family containing the error rate of trace analysis. Samples with region us-west and az az-3 have no match and will not show up in the result:\n{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 0.2 // 20 / 100 {region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 0.3333 // 11 / 33 Aggregation Operation Sample family supports the following aggregation operations that can be used to aggregate the samples of a single sample family, resulting in a new sample family having fewer samples (sometimes having just a single sample) with aggregated values:\n sum (calculate sum over dimensions) min (select minimum over dimensions) max (select maximum over dimensions) avg (calculate the average over dimensions)  These operations can be used to aggregate overall label dimensions or preserve distinct dimensions by inputting by parameter( the keyword by could be omitted)\n\u0026lt;aggr-op\u0026gt;(by=[\u0026lt;tag1\u0026gt;, \u0026lt;tag2\u0026gt;, ...]) Example expression:\ninstance_trace_count.sum(by=['az']) will output the following result:\ninstance_trace_count{az=\u0026quot;az-1\u0026quot;} 133 // 100 + 33 instance_trace_count{az=\u0026quot;az-3\u0026quot;} 20 Function Duration is a textual representation of a time range. The formats accepted are based on the ISO-8601 duration format {@code PnDTnHnMn.nS} where a day is regarded as exactly 24 hours.\nExamples:\n \u0026ldquo;PT20.345S\u0026rdquo; \u0026ndash; parses as \u0026ldquo;20.345 seconds\u0026rdquo; \u0026ldquo;PT15M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;15 minutes\u0026rdquo; (where a minute is 60 seconds) \u0026ldquo;PT10H\u0026rdquo; \u0026ndash; parses as \u0026ldquo;10 hours\u0026rdquo; (where an hour is 3600 seconds) \u0026ldquo;P2D\u0026rdquo; \u0026ndash; parses as \u0026ldquo;2 days\u0026rdquo; (where a day is 24 hours or 86400 seconds) \u0026ldquo;P2DT3H4M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;2 days, 3 hours and 4 minutes\u0026rdquo; \u0026ldquo;P-6H3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;-6 hours and +3 minutes\u0026rdquo; \u0026ldquo;-P6H3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;-6 hours and -3 minutes\u0026rdquo; \u0026ldquo;-P-6H+3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;+6 hours and -3 minutes\u0026rdquo;  increase increase(Duration): Calculates the increase in the time range.\nrate rate(Duration): Calculates the per-second average rate of increase in the time range.\nirate irate(): Calculates the per-second instant rate of increase in the time range.\ntag tag({allTags -\u0026gt; }): Updates tags of samples. User can add, drop, rename and update tags.\nhistogram histogram(le: '\u0026lt;the tag name of le\u0026gt;'): Transforms less-based histogram buckets to meter system histogram buckets. le parameter represents the tag name of the bucket.\nhistogram_percentile histogram_percentile([\u0026lt;p scalar\u0026gt;]). Represents the meter-system to calculate the p-percentile (0 ≤ p ≤ 100) from the buckets.\ntime time(): Returns the number of seconds since January 1, 1970 UTC.\nforeach forEach([string_array], Closure\u0026lt;Void\u0026gt; each): Iterates all samples according to the first array argument, and provide two parameters in the second closure argument:\n element: element in the array. tags: tags in each sample.  Down Sampling Operation MAL should instruct meter-system on how to downsample for metrics. It doesn\u0026rsquo;t only refer to aggregate raw samples to minute level, but also expresses data from minute in higher levels, such as hour and day.\nDown sampling function is called downsampling in MAL, and it accepts the following types:\n AVG SUM LATEST MIN (TODO) MAX (TODO) MEAN (TODO) COUNT (TODO)  The default type is AVG.\nIf users want to get the latest time from last_server_state_sync_time_in_seconds:\nlast_server_state_sync_time_in_seconds.tagEqual('production', 'catalog').downsampling(LATEST) Metric level function They extract level relevant labels from metric labels, then informs the meter-system the level and layer to which this metric belongs.\n service([svc_label1, svc_label2...], Layer) extracts service level labels from the array argument, extracts layer from Layer argument. instance([svc_label1, svc_label2...], [ins_label1, ins_label2...], Layer, Closure\u0026lt;Map\u0026lt;String, String\u0026gt;\u0026gt; propertiesExtractor) extracts service level labels from the first array argument, extracts instance level labels from the second array argument, extracts layer from Layer argument, propertiesExtractor is an optional closure that extracts instance properties from tags, e.g. { tags -\u0026gt; ['pod': tags.pod, 'namespace': tags.namespace] }. endpoint([svc_label1, svc_label2...], [ep_label1, ep_label2...]) extracts service level labels from the first array argument, extracts endpoint level labels from the second array argument, extracts layer from Layer argument. serviceRelation(DetectPoint, [source_svc_label1...], [dest_svc_label1...], Layer) DetectPoint including DetectPoint.CLIENT and DetectPoint.SERVER, extracts sourceService labels from the first array argument, extracts destService labels from the second array argument, extracts layer from Layer argument. processRelation(detect_point_label, [service_label1...], [instance_label1...], source_process_id_label, dest_process_id_label, component_label) extracts DetectPoint labels from first argument, the label value should be client or server. extracts Service labels from the first array argument, extracts Instance labels from the second array argument, extracts ProcessID labels from the fourth and fifth arguments of the source and destination.  Configuration file The OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP fails to start up. The files are located at $CLASSPATH/otel-rules, $CLASSPATH/meter-analyzer-config, $CLASSPATH/envoy-metrics-rules and $CLASSPATH/zabbix-rules.\nThe file is written in YAML format, defined by the scheme described below. Brackets indicate that a parameter is optional.\nA full example can be found here\nGeneric placeholders are defined as follows:\n \u0026lt;string\u0026gt;: A regular string. \u0026lt;closure\u0026gt;: A closure with custom logic.  # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# filter the metrics, only those metrics that satisfy this condition will be passed into the `metricsRules` below.filter: \u0026lt;closure\u0026gt; # example:\u0026#39;{ tags -\u0026gt; tags.job_name == \u0026#34;vm-monitoring\u0026#34; }\u0026#39;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# Metrics rule allow you to recompute queries.metricsRules:[- \u0026lt;metric_rules\u0026gt; ]\u0026lt;metric_rules\u0026gt; # The name of rule, which combinates with a prefix \u0026#39;meter_\u0026#39; as the index/table name in storage.name:\u0026lt;string\u0026gt;# MAL expression.exp:\u0026lt;string\u0026gt;More Examples Please refer to OAP Self-Observability.\n","excerpt":"Meter Analysis Language The meter system provides a functional analysis language called MAL (Meter …","ref":"/docs/main/v9.4.0/en/concepts-and-designs/mal/","title":"Meter Analysis Language"},{"body":"Meter Analysis Language The meter system provides a functional analysis language called MAL (Meter Analysis Language) that lets users analyze and aggregate meter data in the OAP streaming system. The result of an expression can either be ingested by the agent analyzer, or the OpenTelemetry/Prometheus analyzer.\nLanguage data type In MAL, an expression or sub-expression can evaluate to one of the following two types:\n Sample family: A set of samples (metrics) containing a range of metrics whose names are identical. Scalar: A simple numeric value that supports integer/long and floating/double.  Sample family A set of samples, which acts as the basic unit in MAL. For example:\ninstance_trace_count The sample family above may contain the following samples which are provided by external modules, such as the agent analyzer:\ninstance_trace_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 100 instance_trace_count{region=\u0026quot;us-east\u0026quot;,az=\u0026quot;az-3\u0026quot;} 20 instance_trace_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 33 Tag filter MAL supports four type operations to filter samples in a sample family by tag:\n tagEqual: Filter tags exactly equal to the string provided. tagNotEqual: Filter tags not equal to the string provided. tagMatch: Filter tags that regex-match the string provided. tagNotMatch: Filter labels that do not regex-match the string provided.  For example, this filters all instance_trace_count samples for us-west and asia-north region and az-1 az:\ninstance_trace_count.tagMatch(\u0026quot;region\u0026quot;, \u0026quot;us-west|asia-north\u0026quot;).tagEqual(\u0026quot;az\u0026quot;, \u0026quot;az-1\u0026quot;) Value filter MAL supports six type operations to filter samples in a sample family by value:\n valueEqual: Filter values exactly equal to the value provided. valueNotEqual: Filter values equal to the value provided. valueGreater: Filter values greater than the value provided. valueGreaterEqual: Filter values greater than or equal to the value provided. valueLess: Filter values less than the value provided. valueLessEqual: Filter values less than or equal to the value provided.  For example, this filters all instance_trace_count samples for values \u0026gt;= 33:\ninstance_trace_count.valueGreaterEqual(33) Tag manipulator MAL allows tag manipulators to change (i.e. add/delete/update) tags and their values.\nK8s MAL supports using the metadata of K8s to manipulate the tags and their values. This feature requires authorizing the OAP Server to access K8s\u0026rsquo;s API Server.\nretagByK8sMeta retagByK8sMeta(newLabelName, K8sRetagType, existingLabelName, namespaceLabelName). Add a new tag to the sample family based on the value of an existing label. Provide several internal converting types, including\n K8sRetagType.Pod2Service  Add a tag to the sample using service as the key, $serviceName.$namespace as the value, and according to the given value of the tag key, which represents the name of a pod.\nFor example:\ncontainer_cpu_usage_seconds_total{namespace=default, container=my-nginx, cpu=total, pod=my-nginx-5dc4865748-mbczh} 2 Expression:\ncontainer_cpu_usage_seconds_total.retagByK8sMeta('service' , K8sRetagType.Pod2Service , 'pod' , 'namespace') Output:\ncontainer_cpu_usage_seconds_total{namespace=default, container=my-nginx, cpu=total, pod=my-nginx-5dc4865748-mbczh, service='nginx-service.default'} 2 Binary operators The following binary arithmetic operators are available in MAL:\n + (addition) - (subtraction) * (multiplication) / (division)  Binary operators are defined between scalar/scalar, sampleFamily/scalar and sampleFamily/sampleFamily value pairs.\nBetween two scalars: they evaluate to another scalar that is the result of the operator being applied to both scalar operands:\n1 + 2 Between a sample family and a scalar, the operator is applied to the value of every sample in the sample family. For example:\ninstance_trace_count + 2 or\n2 + instance_trace_count results in\ninstance_trace_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 102 // 100 + 2 instance_trace_count{region=\u0026quot;us-east\u0026quot;,az=\u0026quot;az-3\u0026quot;} 22 // 20 + 2 instance_trace_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 35 // 33 + 2 Between two sample families, a binary operator is applied to each sample in the sample family on the left and its matching sample in the sample family on the right. A new sample family with empty name will be generated. Only the matched tags will be reserved. Samples with no matching samples in the sample family on the right will not be found in the result.\nAnother sample family instance_trace_analysis_error_count is\ninstance_trace_analysis_error_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 20 instance_trace_analysis_error_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 11 Example expression:\ninstance_trace_analysis_error_count / instance_trace_count This returns a resulting sample family containing the error rate of trace analysis. Samples with region us-west and az az-3 have no match and will not show up in the result:\n{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 0.2 // 20 / 100 {region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 0.3333 // 11 / 33 Aggregation Operation Sample family supports the following aggregation operations that can be used to aggregate the samples of a single sample family, resulting in a new sample family having fewer samples (sometimes having just a single sample) with aggregated values:\n sum (calculate sum over dimensions) min (select minimum over dimensions) max (select maximum over dimensions) avg (calculate the average over dimensions)  These operations can be used to aggregate overall label dimensions or preserve distinct dimensions by inputting by parameter( the keyword by could be omitted)\n\u0026lt;aggr-op\u0026gt;(by=[\u0026lt;tag1\u0026gt;, \u0026lt;tag2\u0026gt;, ...]) Example expression:\ninstance_trace_count.sum(by=['az']) will output the following result:\ninstance_trace_count{az=\u0026quot;az-1\u0026quot;} 133 // 100 + 33 instance_trace_count{az=\u0026quot;az-3\u0026quot;} 20 Function Duration is a textual representation of a time range. The formats accepted are based on the ISO-8601 duration format {@code PnDTnHnMn.nS} where a day is regarded as exactly 24 hours.\nExamples:\n \u0026ldquo;PT20.345S\u0026rdquo; \u0026ndash; parses as \u0026ldquo;20.345 seconds\u0026rdquo; \u0026ldquo;PT15M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;15 minutes\u0026rdquo; (where a minute is 60 seconds) \u0026ldquo;PT10H\u0026rdquo; \u0026ndash; parses as \u0026ldquo;10 hours\u0026rdquo; (where an hour is 3600 seconds) \u0026ldquo;P2D\u0026rdquo; \u0026ndash; parses as \u0026ldquo;2 days\u0026rdquo; (where a day is 24 hours or 86400 seconds) \u0026ldquo;P2DT3H4M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;2 days, 3 hours and 4 minutes\u0026rdquo; \u0026ldquo;P-6H3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;-6 hours and +3 minutes\u0026rdquo; \u0026ldquo;-P6H3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;-6 hours and -3 minutes\u0026rdquo; \u0026ldquo;-P-6H+3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;+6 hours and -3 minutes\u0026rdquo;  increase increase(Duration): Calculates the increase in the time range.\nrate rate(Duration): Calculates the per-second average rate of increase in the time range.\nirate irate(): Calculates the per-second instant rate of increase in the time range.\ntag tag({allTags -\u0026gt; }): Updates tags of samples. User can add, drop, rename and update tags.\nhistogram histogram(le: '\u0026lt;the tag name of le\u0026gt;'): Transforms less-based histogram buckets to meter system histogram buckets. le parameter represents the tag name of the bucket.\nhistogram_percentile histogram_percentile([\u0026lt;p scalar\u0026gt;]): Represents the meter-system to calculate the p-percentile (0 ≤ p ≤ 100) from the buckets.\ntime time(): Returns the number of seconds since January 1, 1970 UTC.\nforeach forEach([string_array], Closure\u0026lt;Void\u0026gt; each): Iterates all samples according to the first array argument, and provide two parameters in the second closure argument:\n element: element in the array. tags: tags in each sample.  Down Sampling Operation MAL should instruct meter-system on how to downsample for metrics. It doesn\u0026rsquo;t only refer to aggregate raw samples to minute level, but also expresses data from minute in higher levels, such as hour and day.\nDown sampling function is called downsampling in MAL, and it accepts the following types:\n AVG SUM LATEST SUM_PER_MIN MIN (TODO) MAX (TODO) MEAN (TODO) COUNT (TODO)  The default type is AVG.\nIf users want to get the latest time from last_server_state_sync_time_in_seconds:\nlast_server_state_sync_time_in_seconds.tagEqual('production', 'catalog').downsampling(LATEST) Metric level function They extract level relevant labels from metric labels, then informs the meter-system the level and layer to which this metric belongs.\n service([svc_label1, svc_label2...], Layer) extracts service level labels from the array argument, extracts layer from Layer argument. instance([svc_label1, svc_label2...], [ins_label1, ins_label2...], Layer, Closure\u0026lt;Map\u0026lt;String, String\u0026gt;\u0026gt; propertiesExtractor) extracts service level labels from the first array argument, extracts instance level labels from the second array argument, extracts layer from Layer argument, propertiesExtractor is an optional closure that extracts instance properties from tags, e.g. { tags -\u0026gt; ['pod': tags.pod, 'namespace': tags.namespace] }. endpoint([svc_label1, svc_label2...], [ep_label1, ep_label2...]) extracts service level labels from the first array argument, extracts endpoint level labels from the second array argument, extracts layer from Layer argument. process([svc_label1, svc_label2...], [ins_label1, ins_label2...], [ps_label1, ps_label2...], layer_lable) extracts service level labels from the first array argument, extracts instance level labels from the second array argument, extracts process level labels from the third array argument, extracts layer label from fourse argument. serviceRelation(DetectPoint, [source_svc_label1...], [dest_svc_label1...], Layer) DetectPoint including DetectPoint.CLIENT and DetectPoint.SERVER, extracts sourceService labels from the first array argument, extracts destService labels from the second array argument, extracts layer from Layer argument. processRelation(detect_point_label, [service_label1...], [instance_label1...], source_process_id_label, dest_process_id_label, component_label) extracts DetectPoint labels from first argument, the label value should be client or server. extracts Service labels from the first array argument, extracts Instance labels from the second array argument, extracts ProcessID labels from the fourth and fifth arguments of the source and destination.  Configuration file The OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP fails to start up. The files are located at $CLASSPATH/otel-rules, $CLASSPATH/meter-analyzer-config, $CLASSPATH/envoy-metrics-rules and $CLASSPATH/zabbix-rules.\nThe file is written in YAML format, defined by the scheme described below. Brackets indicate that a parameter is optional.\nA full example can be found here\nGeneric placeholders are defined as follows:\n \u0026lt;string\u0026gt;: A regular string. \u0026lt;closure\u0026gt;: A closure with custom logic.  # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# filter the metrics, only those metrics that satisfy this condition will be passed into the `metricsRules` below.filter: \u0026lt;closure\u0026gt; # example:\u0026#39;{ tags -\u0026gt; tags.job_name == \u0026#34;vm-monitoring\u0026#34; }\u0026#39;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# Metrics rule allow you to recompute queries.metricsRules:[- \u0026lt;metric_rules\u0026gt; ]\u0026lt;metric_rules\u0026gt; # The name of rule, which combinates with a prefix \u0026#39;meter_\u0026#39; as the index/table name in storage.name:\u0026lt;string\u0026gt;# MAL expression.exp:\u0026lt;string\u0026gt;More Examples Please refer to OAP Self-Observability.\n","excerpt":"Meter Analysis Language The meter system provides a functional analysis language called MAL (Meter …","ref":"/docs/main/v9.5.0/en/concepts-and-designs/mal/","title":"Meter Analysis Language"},{"body":"Meter Analysis Language The meter system provides a functional analysis language called MAL (Meter Analysis Language) that lets users analyze and aggregate meter data in the OAP streaming system. The result of an expression can either be ingested by the agent analyzer, or the OpenTelemetry/Prometheus analyzer.\nLanguage data type In MAL, an expression or sub-expression can evaluate to one of the following two types:\n Sample family: A set of samples (metrics) containing a range of metrics whose names are identical. Scalar: A simple numeric value that supports integer/long and floating/double.  Sample family A set of samples, which acts as the basic unit in MAL. For example:\ninstance_trace_count The sample family above may contain the following samples which are provided by external modules, such as the agent analyzer:\ninstance_trace_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 100 instance_trace_count{region=\u0026quot;us-east\u0026quot;,az=\u0026quot;az-3\u0026quot;} 20 instance_trace_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 33 Tag filter MAL supports four type operations to filter samples in a sample family by tag:\n tagEqual: Filter tags exactly equal to the string provided. tagNotEqual: Filter tags not equal to the string provided. tagMatch: Filter tags that regex-match the string provided. tagNotMatch: Filter labels that do not regex-match the string provided.  For example, this filters all instance_trace_count samples for us-west and asia-north region and az-1 az:\ninstance_trace_count.tagMatch(\u0026quot;region\u0026quot;, \u0026quot;us-west|asia-north\u0026quot;).tagEqual(\u0026quot;az\u0026quot;, \u0026quot;az-1\u0026quot;) Value filter MAL supports six type operations to filter samples in a sample family by value:\n valueEqual: Filter values exactly equal to the value provided. valueNotEqual: Filter values equal to the value provided. valueGreater: Filter values greater than the value provided. valueGreaterEqual: Filter values greater than or equal to the value provided. valueLess: Filter values less than the value provided. valueLessEqual: Filter values less than or equal to the value provided.  For example, this filters all instance_trace_count samples for values \u0026gt;= 33:\ninstance_trace_count.valueGreaterEqual(33) Tag manipulator MAL allows tag manipulators to change (i.e. add/delete/update) tags and their values.\nK8s MAL supports using the metadata of K8s to manipulate the tags and their values. This feature requires authorizing the OAP Server to access K8s\u0026rsquo;s API Server.\nretagByK8sMeta retagByK8sMeta(newLabelName, K8sRetagType, existingLabelName, namespaceLabelName). Add a new tag to the sample family based on the value of an existing label. Provide several internal converting types, including\n K8sRetagType.Pod2Service  Add a tag to the sample using service as the key, $serviceName.$namespace as the value, and according to the given value of the tag key, which represents the name of a pod.\nFor example:\ncontainer_cpu_usage_seconds_total{namespace=default, container=my-nginx, cpu=total, pod=my-nginx-5dc4865748-mbczh} 2 Expression:\ncontainer_cpu_usage_seconds_total.retagByK8sMeta('service' , K8sRetagType.Pod2Service , 'pod' , 'namespace') Output:\ncontainer_cpu_usage_seconds_total{namespace=default, container=my-nginx, cpu=total, pod=my-nginx-5dc4865748-mbczh, service='nginx-service.default'} 2 Binary operators The following binary arithmetic operators are available in MAL:\n + (addition) - (subtraction) * (multiplication) / (division)  Binary operators are defined between scalar/scalar, sampleFamily/scalar and sampleFamily/sampleFamily value pairs.\nBetween two scalars: they evaluate to another scalar that is the result of the operator being applied to both scalar operands:\n1 + 2 Between a sample family and a scalar, the operator is applied to the value of every sample in the sample family. For example:\ninstance_trace_count + 2 or\n2 + instance_trace_count results in\ninstance_trace_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 102 // 100 + 2 instance_trace_count{region=\u0026quot;us-east\u0026quot;,az=\u0026quot;az-3\u0026quot;} 22 // 20 + 2 instance_trace_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 35 // 33 + 2 Between two sample families, a binary operator is applied to each sample in the sample family on the left and its matching sample in the sample family on the right. A new sample family with empty name will be generated. Only the matched tags will be reserved. Samples with no matching samples in the sample family on the right will not be found in the result.\nAnother sample family instance_trace_analysis_error_count is\ninstance_trace_analysis_error_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 20 instance_trace_analysis_error_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 11 Example expression:\ninstance_trace_analysis_error_count / instance_trace_count This returns a resulting sample family containing the error rate of trace analysis. Samples with region us-west and az az-3 have no match and will not show up in the result:\n{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 0.2 // 20 / 100 {region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 0.3333 // 11 / 33 Aggregation Operation Sample family supports the following aggregation operations that can be used to aggregate the samples of a single sample family, resulting in a new sample family having fewer samples (sometimes having just a single sample) with aggregated values:\n sum (calculate sum over dimensions) min (select minimum over dimensions) max (select maximum over dimensions) avg (calculate the average over dimensions)  These operations can be used to aggregate overall label dimensions or preserve distinct dimensions by inputting by parameter( the keyword by could be omitted)\n\u0026lt;aggr-op\u0026gt;(by=[\u0026lt;tag1\u0026gt;, \u0026lt;tag2\u0026gt;, ...]) Example expression:\ninstance_trace_count.sum(by=['az']) will output the following result:\ninstance_trace_count{az=\u0026quot;az-1\u0026quot;} 133 // 100 + 33 instance_trace_count{az=\u0026quot;az-3\u0026quot;} 20 Function Duration is a textual representation of a time range. The formats accepted are based on the ISO-8601 duration format {@code PnDTnHnMn.nS} where a day is regarded as exactly 24 hours.\nExamples:\n \u0026ldquo;PT20.345S\u0026rdquo; \u0026ndash; parses as \u0026ldquo;20.345 seconds\u0026rdquo; \u0026ldquo;PT15M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;15 minutes\u0026rdquo; (where a minute is 60 seconds) \u0026ldquo;PT10H\u0026rdquo; \u0026ndash; parses as \u0026ldquo;10 hours\u0026rdquo; (where an hour is 3600 seconds) \u0026ldquo;P2D\u0026rdquo; \u0026ndash; parses as \u0026ldquo;2 days\u0026rdquo; (where a day is 24 hours or 86400 seconds) \u0026ldquo;P2DT3H4M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;2 days, 3 hours and 4 minutes\u0026rdquo; \u0026ldquo;P-6H3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;-6 hours and +3 minutes\u0026rdquo; \u0026ldquo;-P6H3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;-6 hours and -3 minutes\u0026rdquo; \u0026ldquo;-P-6H+3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;+6 hours and -3 minutes\u0026rdquo;  increase increase(Duration): Calculates the increase in the time range.\nrate rate(Duration): Calculates the per-second average rate of increase in the time range.\nirate irate(): Calculates the per-second instant rate of increase in the time range.\ntag tag({allTags -\u0026gt; }): Updates tags of samples. User can add, drop, rename and update tags.\nhistogram histogram(le: '\u0026lt;the tag name of le\u0026gt;'): Transforms less-based histogram buckets to meter system histogram buckets. le parameter represents the tag name of the bucket.\nhistogram_percentile histogram_percentile([\u0026lt;p scalar\u0026gt;]): Represents the meter-system to calculate the p-percentile (0 ≤ p ≤ 100) from the buckets.\ntime time(): Returns the number of seconds since January 1, 1970 UTC.\nforeach forEach([string_array], Closure\u0026lt;Void\u0026gt; each): Iterates all samples according to the first array argument, and provide two parameters in the second closure argument:\n element: element in the array. tags: tags in each sample.  Down Sampling Operation MAL should instruct meter-system on how to downsample for metrics. It doesn\u0026rsquo;t only refer to aggregate raw samples to minute level, but also expresses data from minute in higher levels, such as hour and day.\nDown sampling function is called downsampling in MAL, and it accepts the following types:\n AVG SUM LATEST SUM_PER_MIN MIN (TODO) MAX (TODO) MEAN (TODO) COUNT (TODO)  The default type is AVG.\nIf users want to get the latest time from last_server_state_sync_time_in_seconds:\nlast_server_state_sync_time_in_seconds.tagEqual('production', 'catalog').downsampling(LATEST) Metric level function They extract level relevant labels from metric labels, then informs the meter-system the level and layer to which this metric belongs.\n service([svc_label1, svc_label2...], Layer) extracts service level labels from the array argument, extracts layer from Layer argument. instance([svc_label1, svc_label2...], [ins_label1, ins_label2...], Layer, Closure\u0026lt;Map\u0026lt;String, String\u0026gt;\u0026gt; propertiesExtractor) extracts service level labels from the first array argument, extracts instance level labels from the second array argument, extracts layer from Layer argument, propertiesExtractor is an optional closure that extracts instance properties from tags, e.g. { tags -\u0026gt; ['pod': tags.pod, 'namespace': tags.namespace] }. endpoint([svc_label1, svc_label2...], [ep_label1, ep_label2...]) extracts service level labels from the first array argument, extracts endpoint level labels from the second array argument, extracts layer from Layer argument. process([svc_label1, svc_label2...], [ins_label1, ins_label2...], [ps_label1, ps_label2...], layer_lable) extracts service level labels from the first array argument, extracts instance level labels from the second array argument, extracts process level labels from the third array argument, extracts layer label from fourse argument. serviceRelation(DetectPoint, [source_svc_label1...], [dest_svc_label1...], Layer) DetectPoint including DetectPoint.CLIENT and DetectPoint.SERVER, extracts sourceService labels from the first array argument, extracts destService labels from the second array argument, extracts layer from Layer argument. processRelation(detect_point_label, [service_label1...], [instance_label1...], source_process_id_label, dest_process_id_label, component_label) extracts DetectPoint labels from first argument, the label value should be client or server. extracts Service labels from the first array argument, extracts Instance labels from the second array argument, extracts ProcessID labels from the fourth and fifth arguments of the source and destination.  Configuration file The OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP fails to start up. The files are located at $CLASSPATH/otel-rules, $CLASSPATH/meter-analyzer-config, $CLASSPATH/envoy-metrics-rules and $CLASSPATH/zabbix-rules.\nThe file is written in YAML format, defined by the scheme described below. Brackets indicate that a parameter is optional.\nA full example can be found here\nGeneric placeholders are defined as follows:\n \u0026lt;string\u0026gt;: A regular string. \u0026lt;closure\u0026gt;: A closure with custom logic.  # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# filter the metrics, only those metrics that satisfy this condition will be passed into the `metricsRules` below.filter: \u0026lt;closure\u0026gt; # example:\u0026#39;{ tags -\u0026gt; tags.job_name == \u0026#34;vm-monitoring\u0026#34; }\u0026#39;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# Metrics rule allow you to recompute queries.metricsRules:[- \u0026lt;metric_rules\u0026gt; ]\u0026lt;metric_rules\u0026gt; # The name of rule, which combinates with a prefix \u0026#39;meter_\u0026#39; as the index/table name in storage.name:\u0026lt;string\u0026gt;# MAL expression.exp:\u0026lt;string\u0026gt;More Examples Please refer to OAP Self-Observability.\n","excerpt":"Meter Analysis Language The meter system provides a functional analysis language called MAL (Meter …","ref":"/docs/main/v9.6.0/en/concepts-and-designs/mal/","title":"Meter Analysis Language"},{"body":"Meter Analysis Language The meter system provides a functional analysis language called MAL (Meter Analysis Language) that lets users analyze and aggregate meter data in the OAP streaming system. The result of an expression can either be ingested by the agent analyzer, or the OpenTelemetry/Prometheus analyzer.\nLanguage data type In MAL, an expression or sub-expression can evaluate to one of the following two types:\n Sample family: A set of samples (metrics) containing a range of metrics whose names are identical. Scalar: A simple numeric value that supports integer/long and floating/double.  Sample family A set of samples, which acts as the basic unit in MAL. For example:\ninstance_trace_count The sample family above may contain the following samples which are provided by external modules, such as the agent analyzer:\ninstance_trace_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 100 instance_trace_count{region=\u0026quot;us-east\u0026quot;,az=\u0026quot;az-3\u0026quot;} 20 instance_trace_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 33 Tag filter MAL supports four type operations to filter samples in a sample family by tag:\n tagEqual: Filter tags exactly equal to the string provided. tagNotEqual: Filter tags not equal to the string provided. tagMatch: Filter tags that regex-match the string provided. tagNotMatch: Filter labels that do not regex-match the string provided.  For example, this filters all instance_trace_count samples for us-west and asia-north region and az-1 az:\ninstance_trace_count.tagMatch(\u0026quot;region\u0026quot;, \u0026quot;us-west|asia-north\u0026quot;).tagEqual(\u0026quot;az\u0026quot;, \u0026quot;az-1\u0026quot;) Value filter MAL supports six type operations to filter samples in a sample family by value:\n valueEqual: Filter values exactly equal to the value provided. valueNotEqual: Filter values equal to the value provided. valueGreater: Filter values greater than the value provided. valueGreaterEqual: Filter values greater than or equal to the value provided. valueLess: Filter values less than the value provided. valueLessEqual: Filter values less than or equal to the value provided.  For example, this filters all instance_trace_count samples for values \u0026gt;= 33:\ninstance_trace_count.valueGreaterEqual(33) Tag manipulator MAL allows tag manipulators to change (i.e. add/delete/update) tags and their values.\nK8s MAL supports using the metadata of K8s to manipulate the tags and their values. This feature requires authorizing the OAP Server to access K8s\u0026rsquo;s API Server.\nretagByK8sMeta retagByK8sMeta(newLabelName, K8sRetagType, existingLabelName, namespaceLabelName). Add a new tag to the sample family based on the value of an existing label. Provide several internal converting types, including\n K8sRetagType.Pod2Service  Add a tag to the sample using service as the key, $serviceName.$namespace as the value, and according to the given value of the tag key, which represents the name of a pod.\nFor example:\ncontainer_cpu_usage_seconds_total{namespace=default, container=my-nginx, cpu=total, pod=my-nginx-5dc4865748-mbczh} 2 Expression:\ncontainer_cpu_usage_seconds_total.retagByK8sMeta('service' , K8sRetagType.Pod2Service , 'pod' , 'namespace') Output:\ncontainer_cpu_usage_seconds_total{namespace=default, container=my-nginx, cpu=total, pod=my-nginx-5dc4865748-mbczh, service='nginx-service.default'} 2 Binary operators The following binary arithmetic operators are available in MAL:\n + (addition) - (subtraction) * (multiplication) / (division)  Binary operators are defined between scalar/scalar, sampleFamily/scalar and sampleFamily/sampleFamily value pairs.\nBetween two scalars: they evaluate to another scalar that is the result of the operator being applied to both scalar operands:\n1 + 2 Between a sample family and a scalar, the operator is applied to the value of every sample in the sample family. For example:\ninstance_trace_count + 2 or\n2 + instance_trace_count results in\ninstance_trace_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 102 // 100 + 2 instance_trace_count{region=\u0026quot;us-east\u0026quot;,az=\u0026quot;az-3\u0026quot;} 22 // 20 + 2 instance_trace_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 35 // 33 + 2 Between two sample families, a binary operator is applied to each sample in the sample family on the left and its matching sample in the sample family on the right. A new sample family with empty name will be generated. Only the matched tags will be reserved. Samples with no matching samples in the sample family on the right will not be found in the result.\nAnother sample family instance_trace_analysis_error_count is\ninstance_trace_analysis_error_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 20 instance_trace_analysis_error_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 11 Example expression:\ninstance_trace_analysis_error_count / instance_trace_count This returns a resulting sample family containing the error rate of trace analysis. Samples with region us-west and az az-3 have no match and will not show up in the result:\n{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 0.2 // 20 / 100 {region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 0.3333 // 11 / 33 Aggregation Operation Sample family supports the following aggregation operations that can be used to aggregate the samples of a single sample family, resulting in a new sample family having fewer samples (sometimes having just a single sample) with aggregated values:\n sum (calculate sum over dimensions) min (select minimum over dimensions) max (select maximum over dimensions) avg (calculate the average over dimensions)  These operations can be used to aggregate overall label dimensions or preserve distinct dimensions by inputting by parameter( the keyword by could be omitted)\n\u0026lt;aggr-op\u0026gt;(by=[\u0026lt;tag1\u0026gt;, \u0026lt;tag2\u0026gt;, ...]) Example expression:\ninstance_trace_count.sum(by=['az']) will output the following result:\ninstance_trace_count{az=\u0026quot;az-1\u0026quot;} 133 // 100 + 33 instance_trace_count{az=\u0026quot;az-3\u0026quot;} 20 Function Duration is a textual representation of a time range. The formats accepted are based on the ISO-8601 duration format {@code PnDTnHnMn.nS} where a day is regarded as exactly 24 hours.\nExamples:\n \u0026ldquo;PT20.345S\u0026rdquo; \u0026ndash; parses as \u0026ldquo;20.345 seconds\u0026rdquo; \u0026ldquo;PT15M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;15 minutes\u0026rdquo; (where a minute is 60 seconds) \u0026ldquo;PT10H\u0026rdquo; \u0026ndash; parses as \u0026ldquo;10 hours\u0026rdquo; (where an hour is 3600 seconds) \u0026ldquo;P2D\u0026rdquo; \u0026ndash; parses as \u0026ldquo;2 days\u0026rdquo; (where a day is 24 hours or 86400 seconds) \u0026ldquo;P2DT3H4M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;2 days, 3 hours and 4 minutes\u0026rdquo; \u0026ldquo;P-6H3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;-6 hours and +3 minutes\u0026rdquo; \u0026ldquo;-P6H3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;-6 hours and -3 minutes\u0026rdquo; \u0026ldquo;-P-6H+3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;+6 hours and -3 minutes\u0026rdquo;  increase increase(Duration): Calculates the increase in the time range.\nrate rate(Duration): Calculates the per-second average rate of increase in the time range.\nirate irate(): Calculates the per-second instant rate of increase in the time range.\ntag tag({allTags -\u0026gt; }): Updates tags of samples. User can add, drop, rename and update tags.\nhistogram histogram(le: '\u0026lt;the tag name of le\u0026gt;'): Transforms less-based histogram buckets to meter system histogram buckets. le parameter represents the tag name of the bucket.\nhistogram_percentile histogram_percentile([\u0026lt;p scalar\u0026gt;]): Represents the meter-system to calculate the p-percentile (0 ≤ p ≤ 100) from the buckets.\ntime time(): Returns the number of seconds since January 1, 1970 UTC.\nforeach forEach([string_array], Closure\u0026lt;Void\u0026gt; each): Iterates all samples according to the first array argument, and provide two parameters in the second closure argument:\n element: element in the array. tags: tags in each sample.  Down Sampling Operation MAL should instruct meter-system on how to downsample for metrics. It doesn\u0026rsquo;t only refer to aggregate raw samples to minute level, but also expresses data from minute in higher levels, such as hour and day.\nDown sampling function is called downsampling in MAL, and it accepts the following types:\n AVG SUM LATEST SUM_PER_MIN MIN (TODO) MAX (TODO) MEAN (TODO) COUNT (TODO)  The default type is AVG.\nIf users want to get the latest time from last_server_state_sync_time_in_seconds:\nlast_server_state_sync_time_in_seconds.tagEqual('production', 'catalog').downsampling(LATEST) Metric level function They extract level relevant labels from metric labels, then informs the meter-system the level and layer to which this metric belongs.\n service([svc_label1, svc_label2...], Layer) extracts service level labels from the array argument, extracts layer from Layer argument. instance([svc_label1, svc_label2...], [ins_label1, ins_label2...], Layer, Closure\u0026lt;Map\u0026lt;String, String\u0026gt;\u0026gt; propertiesExtractor) extracts service level labels from the first array argument, extracts instance level labels from the second array argument, extracts layer from Layer argument, propertiesExtractor is an optional closure that extracts instance properties from tags, e.g. { tags -\u0026gt; ['pod': tags.pod, 'namespace': tags.namespace] }. endpoint([svc_label1, svc_label2...], [ep_label1, ep_label2...]) extracts service level labels from the first array argument, extracts endpoint level labels from the second array argument, extracts layer from Layer argument. process([svc_label1, svc_label2...], [ins_label1, ins_label2...], [ps_label1, ps_label2...], layer_lable) extracts service level labels from the first array argument, extracts instance level labels from the second array argument, extracts process level labels from the third array argument, extracts layer label from fourse argument. serviceRelation(DetectPoint, [source_svc_label1...], [dest_svc_label1...], Layer) DetectPoint including DetectPoint.CLIENT and DetectPoint.SERVER, extracts sourceService labels from the first array argument, extracts destService labels from the second array argument, extracts layer from Layer argument. processRelation(detect_point_label, [service_label1...], [instance_label1...], source_process_id_label, dest_process_id_label, component_label) extracts DetectPoint labels from first argument, the label value should be client or server. extracts Service labels from the first array argument, extracts Instance labels from the second array argument, extracts ProcessID labels from the fourth and fifth arguments of the source and destination.  Configuration file The OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP fails to start up. The files are located at $CLASSPATH/otel-rules, $CLASSPATH/meter-analyzer-config, $CLASSPATH/envoy-metrics-rules and $CLASSPATH/zabbix-rules.\nThe file is written in YAML format, defined by the scheme described below. Brackets indicate that a parameter is optional.\nA full example can be found here\nGeneric placeholders are defined as follows:\n \u0026lt;string\u0026gt;: A regular string. \u0026lt;closure\u0026gt;: A closure with custom logic.  # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# filter the metrics, only those metrics that satisfy this condition will be passed into the `metricsRules` below.filter: \u0026lt;closure\u0026gt; # example:\u0026#39;{ tags -\u0026gt; tags.job_name == \u0026#34;vm-monitoring\u0026#34; }\u0026#39;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# Metrics rule allow you to recompute queries.metricsRules:[- \u0026lt;metric_rules\u0026gt; ]\u0026lt;metric_rules\u0026gt; # The name of rule, which combinates with a prefix \u0026#39;meter_\u0026#39; as the index/table name in storage.name:\u0026lt;string\u0026gt;# MAL expression.exp:\u0026lt;string\u0026gt;More Examples Please refer to OAP Self-Observability.\n","excerpt":"Meter Analysis Language The meter system provides a functional analysis language called MAL (Meter …","ref":"/docs/main/v9.7.0/en/concepts-and-designs/mal/","title":"Meter Analysis Language"},{"body":"Meter APIs SkyWalking has a native metrics format, and supports widely used metric formats, such as Prometheus, OpenTelemetry, and Zabbix.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.language.agent.v3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/language/agent/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;service MeterReportService { // Meter data is reported in a certain period. The agent/SDK should report all collected metrics in this period through one stream.  // The whole stream is an input data set, client should onComplete the stream per report period.  rpc collect (stream MeterData) returns (Commands) { } // Reporting meter data in bulk mode as MeterDataCollection.  // By using this, each one in the stream would be treated as a complete input for MAL engine,  // comparing to `collect (stream MeterData)`, which is using one stream as an input data set.  rpc collectBatch (stream MeterDataCollection) returns (Commands) { }}// Label of the meter message Label { string name = 1; string value = 2;}// The histogram element definition. It includes the bucket lower boundary and the count in the bucket. message MeterBucketValue { // The value represents the min value of the bucket,  // the upper boundary is determined by next MeterBucketValue$bucket,  // if it doesn\u0026#39;t exist, the upper boundary is positive infinity.  double bucket = 1; int64 count = 2; // If is negative infinity, the value of the bucket is invalid  bool isNegativeInfinity = 3;}// Meter single value message MeterSingleValue { // Meter name  string name = 1; // Labels  repeated Label labels = 2; // Single value  double value = 3;}// Histogram message MeterHistogram { // Meter name  string name = 1; // Labels  repeated Label labels = 2; // Customize the buckets  repeated MeterBucketValue values = 3;}// Single meter data, if the same metrics have a different label, they will separate. message MeterData { // Meter data could be a single value or histogram.  oneof metric { MeterSingleValue singleValue = 1; MeterHistogram histogram = 2; } // Service name, be set value in the first element in the stream-call.  string service = 3; // Service instance name, be set value in the first element in the stream-call.  string serviceInstance = 4; // Meter data report time, be set value in the first element in the stream-call.  int64 timestamp = 5;}message MeterDataCollection { repeated MeterData meterData = 1;}OpenTelemetry collector, Telegraf agents, Zabbix agents could use their native protocol(e.g. OTLP) and OAP server would convert metrics into native format and forward them to Meter Analysis Language engine.\nTo learn more about receiving 3rd party formats metrics, see\n Meter receiver OpenTelemetry receiver. Zabbix receiver  ","excerpt":"Meter APIs SkyWalking has a native metrics format, and supports widely used metric formats, such as …","ref":"/docs/main/latest/en/api/meter/","title":"Meter APIs"},{"body":"Meter APIs SkyWalking has a native metrics format, and supports widely used metric formats, such as Prometheus, OpenTelemetry, and Zabbix.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.language.agent.v3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/language/agent/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;service MeterReportService { // Meter data is reported in a certain period. The agent/SDK should report all collected metrics in this period through one stream.  // The whole stream is an input data set, client should onComplete the stream per report period.  rpc collect (stream MeterData) returns (Commands) { } // Reporting meter data in bulk mode as MeterDataCollection.  // By using this, each one in the stream would be treated as a complete input for MAL engine,  // comparing to `collect (stream MeterData)`, which is using one stream as an input data set.  rpc collectBatch (stream MeterDataCollection) returns (Commands) { }}// Label of the meter message Label { string name = 1; string value = 2;}// The histogram element definition. It includes the bucket lower boundary and the count in the bucket. message MeterBucketValue { // The value represents the min value of the bucket,  // the upper boundary is determined by next MeterBucketValue$bucket,  // if it doesn\u0026#39;t exist, the upper boundary is positive infinity.  double bucket = 1; int64 count = 2; // If is negative infinity, the value of the bucket is invalid  bool isNegativeInfinity = 3;}// Meter single value message MeterSingleValue { // Meter name  string name = 1; // Labels  repeated Label labels = 2; // Single value  double value = 3;}// Histogram message MeterHistogram { // Meter name  string name = 1; // Labels  repeated Label labels = 2; // Customize the buckets  repeated MeterBucketValue values = 3;}// Single meter data, if the same metrics have a different label, they will separate. message MeterData { // Meter data could be a single value or histogram.  oneof metric { MeterSingleValue singleValue = 1; MeterHistogram histogram = 2; } // Service name, be set value in the first element in the stream-call.  string service = 3; // Service instance name, be set value in the first element in the stream-call.  string serviceInstance = 4; // Meter data report time, be set value in the first element in the stream-call.  int64 timestamp = 5;}message MeterDataCollection { repeated MeterData meterData = 1;}OpenTelemetry collector, Telegraf agents, Zabbix agents could use their native protocol(e.g. OTLP) and OAP server would convert metrics into native format and forward them to Meter Analysis Language engine.\nTo learn more about receiving 3rd party formats metrics, see\n Meter receiver OpenTelemetry receiver. Zabbix receiver  ","excerpt":"Meter APIs SkyWalking has a native metrics format, and supports widely used metric formats, such as …","ref":"/docs/main/next/en/api/meter/","title":"Meter APIs"},{"body":"Meter APIs SkyWalking has a native metrics format, and supports widely used metric formats, such as Prometheus, OpenCensus, OpenTelemetry, and Zabbix.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.language.agent.v3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/language/agent/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;service MeterReportService { // Meter data is reported in a certain period. The agent/SDK should report all collected metrics in this period through one stream.  // The whole stream is an input data set, client should onComplete the stream per report period.  rpc collect (stream MeterData) returns (Commands) { } // Reporting meter data in bulk mode as MeterDataCollection.  // By using this, each one in the stream would be treated as a complete input for MAL engine,  // comparing to `collect (stream MeterData)`, which is using one stream as an input data set.  rpc collectBatch (stream MeterDataCollection) returns (Commands) { }}// Label of the meter message Label { string name = 1; string value = 2;}// The histogram element definition. It includes the bucket lower boundary and the count in the bucket. message MeterBucketValue { // The value represents the min value of the bucket,  // the upper boundary is determined by next MeterBucketValue$bucket,  // if it doesn\u0026#39;t exist, the upper boundary is positive infinity.  double bucket = 1; int64 count = 2; // If is negative infinity, the value of the bucket is invalid  bool isNegativeInfinity = 3;}// Meter single value message MeterSingleValue { // Meter name  string name = 1; // Labels  repeated Label labels = 2; // Single value  double value = 3;}// Histogram message MeterHistogram { // Meter name  string name = 1; // Labels  repeated Label labels = 2; // Customize the buckets  repeated MeterBucketValue values = 3;}// Single meter data, if the same metrics have a different label, they will separate. message MeterData { // Meter data could be a single value or histogram.  oneof metric { MeterSingleValue singleValue = 1; MeterHistogram histogram = 2; } // Service name, be set value in the first element in the stream-call.  string service = 3; // Service instance name, be set value in the first element in the stream-call.  string serviceInstance = 4; // Meter data report time, be set value in the first element in the stream-call.  int64 timestamp = 5;}message MeterDataCollection { repeated MeterData meterData = 1;}OpenTelemetry collector, Telegraf agents, Zabbix agents could use their native protocol(e.g. OTLP) and OAP server would convert metrics into native format and forward them to Meter Analysis Language engine.\nTo learn more about receiving 3rd party formats metrics, see\n Meter receiver OpenTelemetry receiver. Zabbix receiver  ","excerpt":"Meter APIs SkyWalking has a native metrics format, and supports widely used metric formats, such as …","ref":"/docs/main/v9.4.0/en/api/meter/","title":"Meter APIs"},{"body":"Meter APIs SkyWalking has a native metrics format, and supports widely used metric formats, such as Prometheus, OpenTelemetry, and Zabbix.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.language.agent.v3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/language/agent/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;service MeterReportService { // Meter data is reported in a certain period. The agent/SDK should report all collected metrics in this period through one stream.  // The whole stream is an input data set, client should onComplete the stream per report period.  rpc collect (stream MeterData) returns (Commands) { } // Reporting meter data in bulk mode as MeterDataCollection.  // By using this, each one in the stream would be treated as a complete input for MAL engine,  // comparing to `collect (stream MeterData)`, which is using one stream as an input data set.  rpc collectBatch (stream MeterDataCollection) returns (Commands) { }}// Label of the meter message Label { string name = 1; string value = 2;}// The histogram element definition. It includes the bucket lower boundary and the count in the bucket. message MeterBucketValue { // The value represents the min value of the bucket,  // the upper boundary is determined by next MeterBucketValue$bucket,  // if it doesn\u0026#39;t exist, the upper boundary is positive infinity.  double bucket = 1; int64 count = 2; // If is negative infinity, the value of the bucket is invalid  bool isNegativeInfinity = 3;}// Meter single value message MeterSingleValue { // Meter name  string name = 1; // Labels  repeated Label labels = 2; // Single value  double value = 3;}// Histogram message MeterHistogram { // Meter name  string name = 1; // Labels  repeated Label labels = 2; // Customize the buckets  repeated MeterBucketValue values = 3;}// Single meter data, if the same metrics have a different label, they will separate. message MeterData { // Meter data could be a single value or histogram.  oneof metric { MeterSingleValue singleValue = 1; MeterHistogram histogram = 2; } // Service name, be set value in the first element in the stream-call.  string service = 3; // Service instance name, be set value in the first element in the stream-call.  string serviceInstance = 4; // Meter data report time, be set value in the first element in the stream-call.  int64 timestamp = 5;}message MeterDataCollection { repeated MeterData meterData = 1;}OpenTelemetry collector, Telegraf agents, Zabbix agents could use their native protocol(e.g. OTLP) and OAP server would convert metrics into native format and forward them to Meter Analysis Language engine.\nTo learn more about receiving 3rd party formats metrics, see\n Meter receiver OpenTelemetry receiver. Zabbix receiver  ","excerpt":"Meter APIs SkyWalking has a native metrics format, and supports widely used metric formats, such as …","ref":"/docs/main/v9.5.0/en/api/meter/","title":"Meter APIs"},{"body":"Meter APIs SkyWalking has a native metrics format, and supports widely used metric formats, such as Prometheus, OpenTelemetry, and Zabbix.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.language.agent.v3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/language/agent/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;service MeterReportService { // Meter data is reported in a certain period. The agent/SDK should report all collected metrics in this period through one stream.  // The whole stream is an input data set, client should onComplete the stream per report period.  rpc collect (stream MeterData) returns (Commands) { } // Reporting meter data in bulk mode as MeterDataCollection.  // By using this, each one in the stream would be treated as a complete input for MAL engine,  // comparing to `collect (stream MeterData)`, which is using one stream as an input data set.  rpc collectBatch (stream MeterDataCollection) returns (Commands) { }}// Label of the meter message Label { string name = 1; string value = 2;}// The histogram element definition. It includes the bucket lower boundary and the count in the bucket. message MeterBucketValue { // The value represents the min value of the bucket,  // the upper boundary is determined by next MeterBucketValue$bucket,  // if it doesn\u0026#39;t exist, the upper boundary is positive infinity.  double bucket = 1; int64 count = 2; // If is negative infinity, the value of the bucket is invalid  bool isNegativeInfinity = 3;}// Meter single value message MeterSingleValue { // Meter name  string name = 1; // Labels  repeated Label labels = 2; // Single value  double value = 3;}// Histogram message MeterHistogram { // Meter name  string name = 1; // Labels  repeated Label labels = 2; // Customize the buckets  repeated MeterBucketValue values = 3;}// Single meter data, if the same metrics have a different label, they will separate. message MeterData { // Meter data could be a single value or histogram.  oneof metric { MeterSingleValue singleValue = 1; MeterHistogram histogram = 2; } // Service name, be set value in the first element in the stream-call.  string service = 3; // Service instance name, be set value in the first element in the stream-call.  string serviceInstance = 4; // Meter data report time, be set value in the first element in the stream-call.  int64 timestamp = 5;}message MeterDataCollection { repeated MeterData meterData = 1;}OpenTelemetry collector, Telegraf agents, Zabbix agents could use their native protocol(e.g. OTLP) and OAP server would convert metrics into native format and forward them to Meter Analysis Language engine.\nTo learn more about receiving 3rd party formats metrics, see\n Meter receiver OpenTelemetry receiver. Zabbix receiver  ","excerpt":"Meter APIs SkyWalking has a native metrics format, and supports widely used metric formats, such as …","ref":"/docs/main/v9.6.0/en/api/meter/","title":"Meter APIs"},{"body":"Meter APIs SkyWalking has a native metrics format, and supports widely used metric formats, such as Prometheus, OpenTelemetry, and Zabbix.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.language.agent.v3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/language/agent/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;service MeterReportService { // Meter data is reported in a certain period. The agent/SDK should report all collected metrics in this period through one stream.  // The whole stream is an input data set, client should onComplete the stream per report period.  rpc collect (stream MeterData) returns (Commands) { } // Reporting meter data in bulk mode as MeterDataCollection.  // By using this, each one in the stream would be treated as a complete input for MAL engine,  // comparing to `collect (stream MeterData)`, which is using one stream as an input data set.  rpc collectBatch (stream MeterDataCollection) returns (Commands) { }}// Label of the meter message Label { string name = 1; string value = 2;}// The histogram element definition. It includes the bucket lower boundary and the count in the bucket. message MeterBucketValue { // The value represents the min value of the bucket,  // the upper boundary is determined by next MeterBucketValue$bucket,  // if it doesn\u0026#39;t exist, the upper boundary is positive infinity.  double bucket = 1; int64 count = 2; // If is negative infinity, the value of the bucket is invalid  bool isNegativeInfinity = 3;}// Meter single value message MeterSingleValue { // Meter name  string name = 1; // Labels  repeated Label labels = 2; // Single value  double value = 3;}// Histogram message MeterHistogram { // Meter name  string name = 1; // Labels  repeated Label labels = 2; // Customize the buckets  repeated MeterBucketValue values = 3;}// Single meter data, if the same metrics have a different label, they will separate. message MeterData { // Meter data could be a single value or histogram.  oneof metric { MeterSingleValue singleValue = 1; MeterHistogram histogram = 2; } // Service name, be set value in the first element in the stream-call.  string service = 3; // Service instance name, be set value in the first element in the stream-call.  string serviceInstance = 4; // Meter data report time, be set value in the first element in the stream-call.  int64 timestamp = 5;}message MeterDataCollection { repeated MeterData meterData = 1;}OpenTelemetry collector, Telegraf agents, Zabbix agents could use their native protocol(e.g. OTLP) and OAP server would convert metrics into native format and forward them to Meter Analysis Language engine.\nTo learn more about receiving 3rd party formats metrics, see\n Meter receiver OpenTelemetry receiver. Zabbix receiver  ","excerpt":"Meter APIs SkyWalking has a native metrics format, and supports widely used metric formats, such as …","ref":"/docs/main/v9.7.0/en/api/meter/","title":"Meter APIs"},{"body":"Meter receiver The meter receiver accepts the metrics of meter protocol into the meter system.\nModule definition Module definition is defined in application.yml, typically located at $SKYWALKING_BASE_DIR/config/application.yml by default.\nreceiver-meter:selector:${SW_RECEIVER_METER:default}default:In Kafka Fetcher, follow these configurations to enable it.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}Report Meter Telemetry Data Manual Meter API Custom metrics may be collected by the Manual Meter API. Custom metrics collected cannot be used directly; they should be configured in the meter-analyzer-config configuration files described in the next part.\nThe receiver adds labels with key = service and key = instance to the collected data samples, and values from service and service instance name defined in SkyWalking Agent, for identification of the metric data.\nA typical manual meter API set is Spring MicroMeter Observations APIs\nOpenTelemetry Exporter You can use OpenTelemetry Collector to transport the metrics to SkyWalking OAP. Read the doc on Skywalking Exporter for a detailed guide.\nThe following is the correspondence between the OpenTelemetry Metric Data Type and the Skywalking Data Collect Protocol:\n   OpenTelemetry Metric Data Type Skywalking Data Collect Protocol     MetricDataTypeGauge MeterSingleValue   MetricDataTypeSum MeterSingleValue   MetricDataTypeHistogram MeterHistogram and two MeterSingleValues containing $name_sum and $name_count metrics.   MetricDataTypeSummary A series of MeterSingleValue containing tag quantile and two MeterSingleValues containing $name_sum and $name_count metrics.   MetricDataTypeExponentialHistogram Not Supported    Note: $name is the original metric name.\nConfiguration file The meter receiver is configured via a configuration file. The configuration file defines everything related to receiving from agents, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP may fail to start up. The files are located at $CLASSPATH/meter-analyzer-config.\nNew meter-analyzer-config files is NOT enabled by default, you should make meter configuration take effect through section agent-analyzer in application.yml of skywalking backend.\nagent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:# ... take care of other analyzersmeterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:your-custom-meter-conf-without-ext-name}# The multiple files should be separated by \u0026#34;,\u0026#34;Meter-analyzer-config file is written in YAML format, defined by the scheme described below. Brackets indicate that a parameter is optional.\nAll available meter analysis scripts could be found here.\n   Rule Name Description Configuration File Data Source     satellite Metrics of SkyWalking Satellite self-observability(so11y) meter-analyzer-config/satellite.yaml SkyWalking Satellite \u0026ndash;meter format\u0026ndash;\u0026gt;SkyWalking OAP Server   threadpool Metrics of Thread Pool meter-analyzer-config/threadpool.yaml Thread Pool \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server   datasource Metrics of DataSource metrics meter-analyzer-config/datasource.yaml Datasource \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server   spring-micrometer Metrics of Spring Sleuth Application meter-analyzer-config/spring-micrometer.yaml Spring Sleuth Application \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server    An example can be found here. If you\u0026rsquo;re using Spring MicroMeter Observations, see Spring MicroMeter Observations APIs.\nMeters configuration # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# filter the metrics, only those metrics that satisfy this condition will be passed into the `metricsRules` below.filter: \u0026lt;closure\u0026gt; # example:\u0026#39;{ tags -\u0026gt; tags.job_name == \u0026#34;vm-monitoring\u0026#34; }\u0026#39;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# Metrics rule allow you to recompute queries.metricsRules:# The name of rule, which combinates with a prefix \u0026#39;\u0026lt;metricPrefix\u0026gt;_\u0026#39; as the index/table name in storage.# The name with prefix can also be quoted in UI (Dashboard/Template/Item/Metrics)name:\u0026lt;string\u0026gt;# MAL expression. Raw name of custom metrics collected can be used hereexp:\u0026lt;string\u0026gt;For more information on MAL, please refer to mal.md\nrate, irate, and increase Although we support the rate, irate, increase functions in the backend, we still recommend users to consider using client-side APIs to run these functions. The reasons are as follows:\n The OAP has to set up caches to calculate the values. Once the agent reconnects to another OAP instance, the time windows of rate calculation break. This leads to inaccurate results.  ","excerpt":"Meter receiver The meter receiver accepts the metrics of meter protocol into the meter system. …","ref":"/docs/main/latest/en/setup/backend/backend-meter/","title":"Meter receiver"},{"body":"Meter receiver The meter receiver accepts the metrics of meter protocol into the meter system.\nModule definition Module definition is defined in application.yml, typically located at $SKYWALKING_BASE_DIR/config/application.yml by default.\nreceiver-meter:selector:${SW_RECEIVER_METER:default}default:In Kafka Fetcher, follow these configurations to enable it.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}Report Meter Telemetry Data Manual Meter API Custom metrics may be collected by the Manual Meter API. Custom metrics collected cannot be used directly; they should be configured in the meter-analyzer-config configuration files described in the next part.\nThe receiver adds labels with key = service and key = instance to the collected data samples, and values from service and service instance name defined in SkyWalking Agent, for identification of the metric data.\nThere are following known API libs to report meter telemetry data:\n SkyWalking Java Meter toolkit APIs Spring MicroMeter Observations APIs works with OAP MicroMeter Observations setup  Agents Bundled Meters All following agents and components have built-in meters reporting to the OAP through Meter APIs.\n Go agent for Go VM metrics Python agent for PVM metrics Java agent with Spring micrometer toolkit Java agent for datasource metrics Java agent for thread-pool metrics Rover(eBPF) agent for metrics used continues profiling Satellite proxy self-observability metrics  Configuration file The meter receiver is configured via a configuration file. The configuration file defines everything related to receiving from agents, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP may fail to start up. The files are located at $CLASSPATH/meter-analyzer-config.\nNew meter-analyzer-config files is NOT enabled by default, you should make meter configuration take effect through section agent-analyzer in application.yml of skywalking backend.\nagent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:# ... take care of other analyzersmeterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:your-custom-meter-conf-without-ext-name}# The multiple files should be separated by \u0026#34;,\u0026#34;Meter-analyzer-config file is written in YAML format, defined by the scheme described below. Brackets indicate that a parameter is optional.\nMeters configuration # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# filter the metrics, only those metrics that satisfy this condition will be passed into the `metricsRules` below.filter: \u0026lt;closure\u0026gt; # example:\u0026#39;{ tags -\u0026gt; tags.job_name == \u0026#34;vm-monitoring\u0026#34; }\u0026#39;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# Metrics rule allow you to recompute queries.metricsRules:# The name of rule, which combinates with a prefix \u0026#39;\u0026lt;metricPrefix\u0026gt;_\u0026#39; as the index/table name in storage.# The name with prefix can also be quoted in UI (Dashboard/Template/Item/Metrics)name:\u0026lt;string\u0026gt;# MAL expression. Raw name of custom metrics collected can be used hereexp:\u0026lt;string\u0026gt;For more information on MAL, please refer to mal.md\nrate, irate, and increase Although we support the rate, irate, increase functions in the backend, we still recommend users to consider using client-side APIs to run these functions. The reasons are as follows:\n The OAP has to set up caches to calculate the values. Once the agent reconnects to another OAP instance, the time windows of rate calculation break. This leads to inaccurate results.  ","excerpt":"Meter receiver The meter receiver accepts the metrics of meter protocol into the meter system. …","ref":"/docs/main/next/en/setup/backend/backend-meter/","title":"Meter receiver"},{"body":"Meter receiver The meter receiver accepts the metrics of meter protocol into the meter system.\nModule definition Module definition is defined in application.yml, typically located at $SKYWALKING_BASE_DIR/config/application.yml by default.\nreceiver-meter:selector:${SW_RECEIVER_METER:default}default:In Kafka Fetcher, follow these configurations to enable it.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}Report Meter Telemetry Data Manual Meter API Custom metrics may be collected by Manual Meter API. Custom metrics collected cannot be used directly, they should be configured in meter-analyzer-config configuration files, which is described in next part.\nThe receiver adds labels with key = service and key = instance to the collected data samples, and values from service and service instance name defined in SkyWalking Agent, for identification of the metric data.\nA typical manual meter API set is Spring Sleuth APIs\nOpenTelemetry Exporter You can use OpenTelemetry Collector to transport the metrics to SkyWalking OAP. Read the doc on Skywalking Exporter for a detailed guide.\nThe following is the correspondence between the OpenTelemetry Metric Data Type and the Skywalking Data Collect Protocol:\n   OpenTelemetry Metric Data Type Skywalking Data Collect Protocol     MetricDataTypeGauge MeterSingleValue   MetricDataTypeSum MeterSingleValue   MetricDataTypeHistogram MeterHistogram and two MeterSingleValues containing $name_sum and $name_count metrics.   MetricDataTypeSummary A series of MeterSingleValue containing tag quantile and two MeterSingleValues containing $name_sum and $name_count metrics.   MetricDataTypeExponentialHistogram Not Supported    Note: $name is the original metric name.\nConfiguration file The meter receiver is configured via a configuration file. The configuration file defines everything related to receiving from agents, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP may fail to start up. The files are located at $CLASSPATH/meter-analyzer-config.\nNew meter-analyzer-config files is NOT enabled by default, you should make meter configuration take effect through section agent-analyzer in application.yml of skywalking backend.\nagent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:# ... take care of other analyzersmeterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:your-custom-meter-conf-without-ext-name}# The multiple files should be separated by \u0026#34;,\u0026#34;Meter-analyzer-config file is written in YAML format, defined by the scheme described below. Brackets indicate that a parameter is optional.\nAll available meter analysis scripts could be found here.\n   Rule Name Description Configuration File Data Source     satellite Metrics of SkyWalking Satellite self-observability(so11y) meter-analyzer-config/satellite.yaml SkyWalking Satellite \u0026ndash;meter format\u0026ndash;\u0026gt;SkyWalking OAP Server   threadpool Metrics of Thread Pool meter-analyzer-config/threadpool.yaml Thread Pool \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server   datasource Metrics of DataSource metrics meter-analyzer-config/datasource.yaml Datasource \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server   spring-sleuth Metrics of Spring Sleuth Application meter-analyzer-config/spring-sleuth.yaml Sprign Sleuth Application \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server    An example can be found here. If you\u0026rsquo;re using Spring Sleuth, see Spring Sleuth Setup.\nMeters configuration # filter the metrics, only those metrics that satisfy this condition will be passed into the `metricsRules` below.filter: \u0026lt;closure\u0026gt; # example:\u0026#39;{ tags -\u0026gt; tags.job_name == \u0026#34;vm-monitoring\u0026#34; }\u0026#39;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# Metrics rule allow you to recompute queries.metricsRules:# The name of rule, which combinates with a prefix \u0026#39;\u0026lt;metricPrefix\u0026gt;_\u0026#39; as the index/table name in storage.# The name with prefix can also be quoted in UI (Dashboard/Template/Item/Metrics)name:\u0026lt;string\u0026gt;# MAL expression. Raw name of custom metrics collected can be used hereexp:\u0026lt;string\u0026gt;For more information on MAL, please refer to mal.md\nrate, irate, and increase Although we support the rate, irate, increase functions in the backend, we still recommend users to consider using client-side APIs to run these functions. The reasons are as follows:\n The OAP has to set up caches to calculate the values. Once the agent reconnects to another OAP instance, the time windows of rate calculation break. This leads to inaccurate results.  ","excerpt":"Meter receiver The meter receiver accepts the metrics of meter protocol into the meter system. …","ref":"/docs/main/v9.0.0/en/setup/backend/backend-meter/","title":"Meter receiver"},{"body":"Meter receiver The meter receiver accepts the metrics of meter protocol into the meter system.\nModule definition Module definition is defined in application.yml, typically located at $SKYWALKING_BASE_DIR/config/application.yml by default.\nreceiver-meter:selector:${SW_RECEIVER_METER:default}default:In Kafka Fetcher, follow these configurations to enable it.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}Report Meter Telemetry Data Manual Meter API Custom metrics may be collected by the Manual Meter API. Custom metrics collected cannot be used directly; they should be configured in the meter-analyzer-config configuration files described in the next part.\nThe receiver adds labels with key = service and key = instance to the collected data samples, and values from service and service instance name defined in SkyWalking Agent, for identification of the metric data.\nA typical manual meter API set is Spring Sleuth APIs\nOpenTelemetry Exporter You can use OpenTelemetry Collector to transport the metrics to SkyWalking OAP. Read the doc on Skywalking Exporter for a detailed guide.\nThe following is the correspondence between the OpenTelemetry Metric Data Type and the Skywalking Data Collect Protocol:\n   OpenTelemetry Metric Data Type Skywalking Data Collect Protocol     MetricDataTypeGauge MeterSingleValue   MetricDataTypeSum MeterSingleValue   MetricDataTypeHistogram MeterHistogram and two MeterSingleValues containing $name_sum and $name_count metrics.   MetricDataTypeSummary A series of MeterSingleValue containing tag quantile and two MeterSingleValues containing $name_sum and $name_count metrics.   MetricDataTypeExponentialHistogram Not Supported    Note: $name is the original metric name.\nConfiguration file The meter receiver is configured via a configuration file. The configuration file defines everything related to receiving from agents, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP may fail to start up. The files are located at $CLASSPATH/meter-analyzer-config.\nNew meter-analyzer-config files is NOT enabled by default, you should make meter configuration take effect through section agent-analyzer in application.yml of skywalking backend.\nagent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:# ... take care of other analyzersmeterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:your-custom-meter-conf-without-ext-name}# The multiple files should be separated by \u0026#34;,\u0026#34;Meter-analyzer-config file is written in YAML format, defined by the scheme described below. Brackets indicate that a parameter is optional.\nAll available meter analysis scripts could be found here.\n   Rule Name Description Configuration File Data Source     satellite Metrics of SkyWalking Satellite self-observability(so11y) meter-analyzer-config/satellite.yaml SkyWalking Satellite \u0026ndash;meter format\u0026ndash;\u0026gt;SkyWalking OAP Server   threadpool Metrics of Thread Pool meter-analyzer-config/threadpool.yaml Thread Pool \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server   datasource Metrics of DataSource metrics meter-analyzer-config/datasource.yaml Datasource \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server   spring-sleuth Metrics of Spring Sleuth Application meter-analyzer-config/spring-sleuth.yaml Sprign Sleuth Application \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server    An example can be found here. If you\u0026rsquo;re using Spring Sleuth, see Spring Sleuth Setup.\nMeters configuration # filter the metrics, only those metrics that satisfy this condition will be passed into the `metricsRules` below.filter: \u0026lt;closure\u0026gt; # example:\u0026#39;{ tags -\u0026gt; tags.job_name == \u0026#34;vm-monitoring\u0026#34; }\u0026#39;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# Metrics rule allow you to recompute queries.metricsRules:# The name of rule, which combinates with a prefix \u0026#39;\u0026lt;metricPrefix\u0026gt;_\u0026#39; as the index/table name in storage.# The name with prefix can also be quoted in UI (Dashboard/Template/Item/Metrics)name:\u0026lt;string\u0026gt;# MAL expression. Raw name of custom metrics collected can be used hereexp:\u0026lt;string\u0026gt;For more information on MAL, please refer to mal.md\nrate, irate, and increase Although we support the rate, irate, increase functions in the backend, we still recommend users to consider using client-side APIs to run these functions. The reasons are as follows:\n The OAP has to set up caches to calculate the values. Once the agent reconnects to another OAP instance, the time windows of rate calculation break. This leads to inaccurate results.  ","excerpt":"Meter receiver The meter receiver accepts the metrics of meter protocol into the meter system. …","ref":"/docs/main/v9.1.0/en/setup/backend/backend-meter/","title":"Meter receiver"},{"body":"Meter receiver The meter receiver accepts the metrics of meter protocol into the meter system.\nModule definition Module definition is defined in application.yml, typically located at $SKYWALKING_BASE_DIR/config/application.yml by default.\nreceiver-meter:selector:${SW_RECEIVER_METER:default}default:In Kafka Fetcher, follow these configurations to enable it.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}Report Meter Telemetry Data Manual Meter API Custom metrics may be collected by the Manual Meter API. Custom metrics collected cannot be used directly; they should be configured in the meter-analyzer-config configuration files described in the next part.\nThe receiver adds labels with key = service and key = instance to the collected data samples, and values from service and service instance name defined in SkyWalking Agent, for identification of the metric data.\nA typical manual meter API set is Spring Sleuth APIs\nOpenTelemetry Exporter You can use OpenTelemetry Collector to transport the metrics to SkyWalking OAP. Read the doc on Skywalking Exporter for a detailed guide.\nThe following is the correspondence between the OpenTelemetry Metric Data Type and the Skywalking Data Collect Protocol:\n   OpenTelemetry Metric Data Type Skywalking Data Collect Protocol     MetricDataTypeGauge MeterSingleValue   MetricDataTypeSum MeterSingleValue   MetricDataTypeHistogram MeterHistogram and two MeterSingleValues containing $name_sum and $name_count metrics.   MetricDataTypeSummary A series of MeterSingleValue containing tag quantile and two MeterSingleValues containing $name_sum and $name_count metrics.   MetricDataTypeExponentialHistogram Not Supported    Note: $name is the original metric name.\nConfiguration file The meter receiver is configured via a configuration file. The configuration file defines everything related to receiving from agents, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP may fail to start up. The files are located at $CLASSPATH/meter-analyzer-config.\nNew meter-analyzer-config files is NOT enabled by default, you should make meter configuration take effect through section agent-analyzer in application.yml of skywalking backend.\nagent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:# ... take care of other analyzersmeterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:your-custom-meter-conf-without-ext-name}# The multiple files should be separated by \u0026#34;,\u0026#34;Meter-analyzer-config file is written in YAML format, defined by the scheme described below. Brackets indicate that a parameter is optional.\nAll available meter analysis scripts could be found here.\n   Rule Name Description Configuration File Data Source     satellite Metrics of SkyWalking Satellite self-observability(so11y) meter-analyzer-config/satellite.yaml SkyWalking Satellite \u0026ndash;meter format\u0026ndash;\u0026gt;SkyWalking OAP Server   threadpool Metrics of Thread Pool meter-analyzer-config/threadpool.yaml Thread Pool \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server   datasource Metrics of DataSource metrics meter-analyzer-config/datasource.yaml Datasource \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server   spring-sleuth Metrics of Spring Sleuth Application meter-analyzer-config/spring-sleuth.yaml Sprign Sleuth Application \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server    An example can be found here. If you\u0026rsquo;re using Spring Sleuth, see Spring Sleuth Setup.\nMeters configuration # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# filter the metrics, only those metrics that satisfy this condition will be passed into the `metricsRules` below.filter: \u0026lt;closure\u0026gt; # example:\u0026#39;{ tags -\u0026gt; tags.job_name == \u0026#34;vm-monitoring\u0026#34; }\u0026#39;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# Metrics rule allow you to recompute queries.metricsRules:# The name of rule, which combinates with a prefix \u0026#39;\u0026lt;metricPrefix\u0026gt;_\u0026#39; as the index/table name in storage.# The name with prefix can also be quoted in UI (Dashboard/Template/Item/Metrics)name:\u0026lt;string\u0026gt;# MAL expression. Raw name of custom metrics collected can be used hereexp:\u0026lt;string\u0026gt;For more information on MAL, please refer to mal.md\nrate, irate, and increase Although we support the rate, irate, increase functions in the backend, we still recommend users to consider using client-side APIs to run these functions. The reasons are as follows:\n The OAP has to set up caches to calculate the values. Once the agent reconnects to another OAP instance, the time windows of rate calculation break. This leads to inaccurate results.  ","excerpt":"Meter receiver The meter receiver accepts the metrics of meter protocol into the meter system. …","ref":"/docs/main/v9.2.0/en/setup/backend/backend-meter/","title":"Meter receiver"},{"body":"Meter receiver The meter receiver accepts the metrics of meter protocol into the meter system.\nModule definition Module definition is defined in application.yml, typically located at $SKYWALKING_BASE_DIR/config/application.yml by default.\nreceiver-meter:selector:${SW_RECEIVER_METER:default}default:In Kafka Fetcher, follow these configurations to enable it.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}Report Meter Telemetry Data Manual Meter API Custom metrics may be collected by the Manual Meter API. Custom metrics collected cannot be used directly; they should be configured in the meter-analyzer-config configuration files described in the next part.\nThe receiver adds labels with key = service and key = instance to the collected data samples, and values from service and service instance name defined in SkyWalking Agent, for identification of the metric data.\nA typical manual meter API set is Spring Sleuth APIs\nOpenTelemetry Exporter You can use OpenTelemetry Collector to transport the metrics to SkyWalking OAP. Read the doc on Skywalking Exporter for a detailed guide.\nThe following is the correspondence between the OpenTelemetry Metric Data Type and the Skywalking Data Collect Protocol:\n   OpenTelemetry Metric Data Type Skywalking Data Collect Protocol     MetricDataTypeGauge MeterSingleValue   MetricDataTypeSum MeterSingleValue   MetricDataTypeHistogram MeterHistogram and two MeterSingleValues containing $name_sum and $name_count metrics.   MetricDataTypeSummary A series of MeterSingleValue containing tag quantile and two MeterSingleValues containing $name_sum and $name_count metrics.   MetricDataTypeExponentialHistogram Not Supported    Note: $name is the original metric name.\nConfiguration file The meter receiver is configured via a configuration file. The configuration file defines everything related to receiving from agents, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP may fail to start up. The files are located at $CLASSPATH/meter-analyzer-config.\nNew meter-analyzer-config files is NOT enabled by default, you should make meter configuration take effect through section agent-analyzer in application.yml of skywalking backend.\nagent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:# ... take care of other analyzersmeterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:your-custom-meter-conf-without-ext-name}# The multiple files should be separated by \u0026#34;,\u0026#34;Meter-analyzer-config file is written in YAML format, defined by the scheme described below. Brackets indicate that a parameter is optional.\nAll available meter analysis scripts could be found here.\n   Rule Name Description Configuration File Data Source     satellite Metrics of SkyWalking Satellite self-observability(so11y) meter-analyzer-config/satellite.yaml SkyWalking Satellite \u0026ndash;meter format\u0026ndash;\u0026gt;SkyWalking OAP Server   threadpool Metrics of Thread Pool meter-analyzer-config/threadpool.yaml Thread Pool \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server   datasource Metrics of DataSource metrics meter-analyzer-config/datasource.yaml Datasource \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server   spring-sleuth Metrics of Spring Sleuth Application meter-analyzer-config/spring-sleuth.yaml Sprign Sleuth Application \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server    An example can be found here. If you\u0026rsquo;re using Spring Sleuth, see Spring Sleuth Setup.\nMeters configuration # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# filter the metrics, only those metrics that satisfy this condition will be passed into the `metricsRules` below.filter: \u0026lt;closure\u0026gt; # example:\u0026#39;{ tags -\u0026gt; tags.job_name == \u0026#34;vm-monitoring\u0026#34; }\u0026#39;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# Metrics rule allow you to recompute queries.metricsRules:# The name of rule, which combinates with a prefix \u0026#39;\u0026lt;metricPrefix\u0026gt;_\u0026#39; as the index/table name in storage.# The name with prefix can also be quoted in UI (Dashboard/Template/Item/Metrics)name:\u0026lt;string\u0026gt;# MAL expression. Raw name of custom metrics collected can be used hereexp:\u0026lt;string\u0026gt;For more information on MAL, please refer to mal.md\nrate, irate, and increase Although we support the rate, irate, increase functions in the backend, we still recommend users to consider using client-side APIs to run these functions. The reasons are as follows:\n The OAP has to set up caches to calculate the values. Once the agent reconnects to another OAP instance, the time windows of rate calculation break. This leads to inaccurate results.  ","excerpt":"Meter receiver The meter receiver accepts the metrics of meter protocol into the meter system. …","ref":"/docs/main/v9.3.0/en/setup/backend/backend-meter/","title":"Meter receiver"},{"body":"Meter receiver The meter receiver accepts the metrics of meter protocol into the meter system.\nModule definition Module definition is defined in application.yml, typically located at $SKYWALKING_BASE_DIR/config/application.yml by default.\nreceiver-meter:selector:${SW_RECEIVER_METER:default}default:In Kafka Fetcher, follow these configurations to enable it.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}Report Meter Telemetry Data Manual Meter API Custom metrics may be collected by the Manual Meter API. Custom metrics collected cannot be used directly; they should be configured in the meter-analyzer-config configuration files described in the next part.\nThe receiver adds labels with key = service and key = instance to the collected data samples, and values from service and service instance name defined in SkyWalking Agent, for identification of the metric data.\nA typical manual meter API set is Spring MicroMeter Observations APIs\nOpenTelemetry Exporter You can use OpenTelemetry Collector to transport the metrics to SkyWalking OAP. Read the doc on Skywalking Exporter for a detailed guide.\nThe following is the correspondence between the OpenTelemetry Metric Data Type and the Skywalking Data Collect Protocol:\n   OpenTelemetry Metric Data Type Skywalking Data Collect Protocol     MetricDataTypeGauge MeterSingleValue   MetricDataTypeSum MeterSingleValue   MetricDataTypeHistogram MeterHistogram and two MeterSingleValues containing $name_sum and $name_count metrics.   MetricDataTypeSummary A series of MeterSingleValue containing tag quantile and two MeterSingleValues containing $name_sum and $name_count metrics.   MetricDataTypeExponentialHistogram Not Supported    Note: $name is the original metric name.\nConfiguration file The meter receiver is configured via a configuration file. The configuration file defines everything related to receiving from agents, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP may fail to start up. The files are located at $CLASSPATH/meter-analyzer-config.\nNew meter-analyzer-config files is NOT enabled by default, you should make meter configuration take effect through section agent-analyzer in application.yml of skywalking backend.\nagent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:# ... take care of other analyzersmeterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:your-custom-meter-conf-without-ext-name}# The multiple files should be separated by \u0026#34;,\u0026#34;Meter-analyzer-config file is written in YAML format, defined by the scheme described below. Brackets indicate that a parameter is optional.\nAll available meter analysis scripts could be found here.\n   Rule Name Description Configuration File Data Source     satellite Metrics of SkyWalking Satellite self-observability(so11y) meter-analyzer-config/satellite.yaml SkyWalking Satellite \u0026ndash;meter format\u0026ndash;\u0026gt;SkyWalking OAP Server   threadpool Metrics of Thread Pool meter-analyzer-config/threadpool.yaml Thread Pool \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server   datasource Metrics of DataSource metrics meter-analyzer-config/datasource.yaml Datasource \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server   spring-micrometer Metrics of Spring Sleuth Application meter-analyzer-config/spring-micrometer.yaml Sprign Sleuth Application \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server    An example can be found here. If you\u0026rsquo;re using Spring MicroMeter Observations, see Spring MicroMeter Observations APIs.\nMeters configuration # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# filter the metrics, only those metrics that satisfy this condition will be passed into the `metricsRules` below.filter: \u0026lt;closure\u0026gt; # example:\u0026#39;{ tags -\u0026gt; tags.job_name == \u0026#34;vm-monitoring\u0026#34; }\u0026#39;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# Metrics rule allow you to recompute queries.metricsRules:# The name of rule, which combinates with a prefix \u0026#39;\u0026lt;metricPrefix\u0026gt;_\u0026#39; as the index/table name in storage.# The name with prefix can also be quoted in UI (Dashboard/Template/Item/Metrics)name:\u0026lt;string\u0026gt;# MAL expression. Raw name of custom metrics collected can be used hereexp:\u0026lt;string\u0026gt;For more information on MAL, please refer to mal.md\nrate, irate, and increase Although we support the rate, irate, increase functions in the backend, we still recommend users to consider using client-side APIs to run these functions. The reasons are as follows:\n The OAP has to set up caches to calculate the values. Once the agent reconnects to another OAP instance, the time windows of rate calculation break. This leads to inaccurate results.  ","excerpt":"Meter receiver The meter receiver accepts the metrics of meter protocol into the meter system. …","ref":"/docs/main/v9.4.0/en/setup/backend/backend-meter/","title":"Meter receiver"},{"body":"Meter receiver The meter receiver accepts the metrics of meter protocol into the meter system.\nModule definition Module definition is defined in application.yml, typically located at $SKYWALKING_BASE_DIR/config/application.yml by default.\nreceiver-meter:selector:${SW_RECEIVER_METER:default}default:In Kafka Fetcher, follow these configurations to enable it.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}Report Meter Telemetry Data Manual Meter API Custom metrics may be collected by the Manual Meter API. Custom metrics collected cannot be used directly; they should be configured in the meter-analyzer-config configuration files described in the next part.\nThe receiver adds labels with key = service and key = instance to the collected data samples, and values from service and service instance name defined in SkyWalking Agent, for identification of the metric data.\nA typical manual meter API set is Spring MicroMeter Observations APIs\nOpenTelemetry Exporter You can use OpenTelemetry Collector to transport the metrics to SkyWalking OAP. Read the doc on Skywalking Exporter for a detailed guide.\nThe following is the correspondence between the OpenTelemetry Metric Data Type and the Skywalking Data Collect Protocol:\n   OpenTelemetry Metric Data Type Skywalking Data Collect Protocol     MetricDataTypeGauge MeterSingleValue   MetricDataTypeSum MeterSingleValue   MetricDataTypeHistogram MeterHistogram and two MeterSingleValues containing $name_sum and $name_count metrics.   MetricDataTypeSummary A series of MeterSingleValue containing tag quantile and two MeterSingleValues containing $name_sum and $name_count metrics.   MetricDataTypeExponentialHistogram Not Supported    Note: $name is the original metric name.\nConfiguration file The meter receiver is configured via a configuration file. The configuration file defines everything related to receiving from agents, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP may fail to start up. The files are located at $CLASSPATH/meter-analyzer-config.\nNew meter-analyzer-config files is NOT enabled by default, you should make meter configuration take effect through section agent-analyzer in application.yml of skywalking backend.\nagent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:# ... take care of other analyzersmeterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:your-custom-meter-conf-without-ext-name}# The multiple files should be separated by \u0026#34;,\u0026#34;Meter-analyzer-config file is written in YAML format, defined by the scheme described below. Brackets indicate that a parameter is optional.\nAll available meter analysis scripts could be found here.\n   Rule Name Description Configuration File Data Source     satellite Metrics of SkyWalking Satellite self-observability(so11y) meter-analyzer-config/satellite.yaml SkyWalking Satellite \u0026ndash;meter format\u0026ndash;\u0026gt;SkyWalking OAP Server   threadpool Metrics of Thread Pool meter-analyzer-config/threadpool.yaml Thread Pool \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server   datasource Metrics of DataSource metrics meter-analyzer-config/datasource.yaml Datasource \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server   spring-micrometer Metrics of Spring Sleuth Application meter-analyzer-config/spring-micrometer.yaml Spring Sleuth Application \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server    An example can be found here. If you\u0026rsquo;re using Spring MicroMeter Observations, see Spring MicroMeter Observations APIs.\nMeters configuration # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# filter the metrics, only those metrics that satisfy this condition will be passed into the `metricsRules` below.filter: \u0026lt;closure\u0026gt; # example:\u0026#39;{ tags -\u0026gt; tags.job_name == \u0026#34;vm-monitoring\u0026#34; }\u0026#39;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# Metrics rule allow you to recompute queries.metricsRules:# The name of rule, which combinates with a prefix \u0026#39;\u0026lt;metricPrefix\u0026gt;_\u0026#39; as the index/table name in storage.# The name with prefix can also be quoted in UI (Dashboard/Template/Item/Metrics)name:\u0026lt;string\u0026gt;# MAL expression. Raw name of custom metrics collected can be used hereexp:\u0026lt;string\u0026gt;For more information on MAL, please refer to mal.md\nrate, irate, and increase Although we support the rate, irate, increase functions in the backend, we still recommend users to consider using client-side APIs to run these functions. The reasons are as follows:\n The OAP has to set up caches to calculate the values. Once the agent reconnects to another OAP instance, the time windows of rate calculation break. This leads to inaccurate results.  ","excerpt":"Meter receiver The meter receiver accepts the metrics of meter protocol into the meter system. …","ref":"/docs/main/v9.5.0/en/setup/backend/backend-meter/","title":"Meter receiver"},{"body":"Meter receiver The meter receiver accepts the metrics of meter protocol into the meter system.\nModule definition Module definition is defined in application.yml, typically located at $SKYWALKING_BASE_DIR/config/application.yml by default.\nreceiver-meter:selector:${SW_RECEIVER_METER:default}default:In Kafka Fetcher, follow these configurations to enable it.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}Report Meter Telemetry Data Manual Meter API Custom metrics may be collected by the Manual Meter API. Custom metrics collected cannot be used directly; they should be configured in the meter-analyzer-config configuration files described in the next part.\nThe receiver adds labels with key = service and key = instance to the collected data samples, and values from service and service instance name defined in SkyWalking Agent, for identification of the metric data.\nA typical manual meter API set is Spring MicroMeter Observations APIs\nOpenTelemetry Exporter You can use OpenTelemetry Collector to transport the metrics to SkyWalking OAP. Read the doc on Skywalking Exporter for a detailed guide.\nThe following is the correspondence between the OpenTelemetry Metric Data Type and the Skywalking Data Collect Protocol:\n   OpenTelemetry Metric Data Type Skywalking Data Collect Protocol     MetricDataTypeGauge MeterSingleValue   MetricDataTypeSum MeterSingleValue   MetricDataTypeHistogram MeterHistogram and two MeterSingleValues containing $name_sum and $name_count metrics.   MetricDataTypeSummary A series of MeterSingleValue containing tag quantile and two MeterSingleValues containing $name_sum and $name_count metrics.   MetricDataTypeExponentialHistogram Not Supported    Note: $name is the original metric name.\nConfiguration file The meter receiver is configured via a configuration file. The configuration file defines everything related to receiving from agents, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP may fail to start up. The files are located at $CLASSPATH/meter-analyzer-config.\nNew meter-analyzer-config files is NOT enabled by default, you should make meter configuration take effect through section agent-analyzer in application.yml of skywalking backend.\nagent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:# ... take care of other analyzersmeterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:your-custom-meter-conf-without-ext-name}# The multiple files should be separated by \u0026#34;,\u0026#34;Meter-analyzer-config file is written in YAML format, defined by the scheme described below. Brackets indicate that a parameter is optional.\nAll available meter analysis scripts could be found here.\n   Rule Name Description Configuration File Data Source     satellite Metrics of SkyWalking Satellite self-observability(so11y) meter-analyzer-config/satellite.yaml SkyWalking Satellite \u0026ndash;meter format\u0026ndash;\u0026gt;SkyWalking OAP Server   threadpool Metrics of Thread Pool meter-analyzer-config/threadpool.yaml Thread Pool \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server   datasource Metrics of DataSource metrics meter-analyzer-config/datasource.yaml Datasource \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server   spring-micrometer Metrics of Spring Sleuth Application meter-analyzer-config/spring-micrometer.yaml Spring Sleuth Application \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server    An example can be found here. If you\u0026rsquo;re using Spring MicroMeter Observations, see Spring MicroMeter Observations APIs.\nMeters configuration # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# filter the metrics, only those metrics that satisfy this condition will be passed into the `metricsRules` below.filter: \u0026lt;closure\u0026gt; # example:\u0026#39;{ tags -\u0026gt; tags.job_name == \u0026#34;vm-monitoring\u0026#34; }\u0026#39;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# Metrics rule allow you to recompute queries.metricsRules:# The name of rule, which combinates with a prefix \u0026#39;\u0026lt;metricPrefix\u0026gt;_\u0026#39; as the index/table name in storage.# The name with prefix can also be quoted in UI (Dashboard/Template/Item/Metrics)name:\u0026lt;string\u0026gt;# MAL expression. Raw name of custom metrics collected can be used hereexp:\u0026lt;string\u0026gt;For more information on MAL, please refer to mal.md\nrate, irate, and increase Although we support the rate, irate, increase functions in the backend, we still recommend users to consider using client-side APIs to run these functions. The reasons are as follows:\n The OAP has to set up caches to calculate the values. Once the agent reconnects to another OAP instance, the time windows of rate calculation break. This leads to inaccurate results.  ","excerpt":"Meter receiver The meter receiver accepts the metrics of meter protocol into the meter system. …","ref":"/docs/main/v9.6.0/en/setup/backend/backend-meter/","title":"Meter receiver"},{"body":"Meter receiver The meter receiver accepts the metrics of meter protocol into the meter system.\nModule definition Module definition is defined in application.yml, typically located at $SKYWALKING_BASE_DIR/config/application.yml by default.\nreceiver-meter:selector:${SW_RECEIVER_METER:default}default:In Kafka Fetcher, follow these configurations to enable it.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}Report Meter Telemetry Data Manual Meter API Custom metrics may be collected by the Manual Meter API. Custom metrics collected cannot be used directly; they should be configured in the meter-analyzer-config configuration files described in the next part.\nThe receiver adds labels with key = service and key = instance to the collected data samples, and values from service and service instance name defined in SkyWalking Agent, for identification of the metric data.\nA typical manual meter API set is Spring MicroMeter Observations APIs\nOpenTelemetry Exporter You can use OpenTelemetry Collector to transport the metrics to SkyWalking OAP. Read the doc on Skywalking Exporter for a detailed guide.\nThe following is the correspondence between the OpenTelemetry Metric Data Type and the Skywalking Data Collect Protocol:\n   OpenTelemetry Metric Data Type Skywalking Data Collect Protocol     MetricDataTypeGauge MeterSingleValue   MetricDataTypeSum MeterSingleValue   MetricDataTypeHistogram MeterHistogram and two MeterSingleValues containing $name_sum and $name_count metrics.   MetricDataTypeSummary A series of MeterSingleValue containing tag quantile and two MeterSingleValues containing $name_sum and $name_count metrics.   MetricDataTypeExponentialHistogram Not Supported    Note: $name is the original metric name.\nConfiguration file The meter receiver is configured via a configuration file. The configuration file defines everything related to receiving from agents, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP may fail to start up. The files are located at $CLASSPATH/meter-analyzer-config.\nNew meter-analyzer-config files is NOT enabled by default, you should make meter configuration take effect through section agent-analyzer in application.yml of skywalking backend.\nagent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:# ... take care of other analyzersmeterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:your-custom-meter-conf-without-ext-name}# The multiple files should be separated by \u0026#34;,\u0026#34;Meter-analyzer-config file is written in YAML format, defined by the scheme described below. Brackets indicate that a parameter is optional.\nAll available meter analysis scripts could be found here.\n   Rule Name Description Configuration File Data Source     satellite Metrics of SkyWalking Satellite self-observability(so11y) meter-analyzer-config/satellite.yaml SkyWalking Satellite \u0026ndash;meter format\u0026ndash;\u0026gt;SkyWalking OAP Server   threadpool Metrics of Thread Pool meter-analyzer-config/threadpool.yaml Thread Pool \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server   datasource Metrics of DataSource metrics meter-analyzer-config/datasource.yaml Datasource \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server   spring-micrometer Metrics of Spring Sleuth Application meter-analyzer-config/spring-micrometer.yaml Spring Sleuth Application \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server    An example can be found here. If you\u0026rsquo;re using Spring MicroMeter Observations, see Spring MicroMeter Observations APIs.\nMeters configuration # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# filter the metrics, only those metrics that satisfy this condition will be passed into the `metricsRules` below.filter: \u0026lt;closure\u0026gt; # example:\u0026#39;{ tags -\u0026gt; tags.job_name == \u0026#34;vm-monitoring\u0026#34; }\u0026#39;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# Metrics rule allow you to recompute queries.metricsRules:# The name of rule, which combinates with a prefix \u0026#39;\u0026lt;metricPrefix\u0026gt;_\u0026#39; as the index/table name in storage.# The name with prefix can also be quoted in UI (Dashboard/Template/Item/Metrics)name:\u0026lt;string\u0026gt;# MAL expression. Raw name of custom metrics collected can be used hereexp:\u0026lt;string\u0026gt;For more information on MAL, please refer to mal.md\nrate, irate, and increase Although we support the rate, irate, increase functions in the backend, we still recommend users to consider using client-side APIs to run these functions. The reasons are as follows:\n The OAP has to set up caches to calculate the values. Once the agent reconnects to another OAP instance, the time windows of rate calculation break. This leads to inaccurate results.  ","excerpt":"Meter receiver The meter receiver accepts the metrics of meter protocol into the meter system. …","ref":"/docs/main/v9.7.0/en/setup/backend/backend-meter/","title":"Meter receiver"},{"body":"Meter System Meter system is another streaming calculation mode designed for metrics data. In the OAL, there are clear Scope Definitions, including definitions for native objects. Meter system is focused on the data type itself, and provides a more flexible approach to the end user in defining the scope entity.\nThe meter system is open to different receivers and fetchers in the backend, see the backend setup document for more details.\nEvery metric is declared in the meter system to include the following attributes:\n Metrics Name. A globally unique name to avoid overlapping between the OAL variable names. Function Name. The function used for this metric, namely distributed aggregation, value calculation or down sampling calculation based on the function implementation. Further, the data structure is determined by the function as well, such as function Avg is for Long. Scope Type. Unlike within the OAL, there are plenty of logic scope definitions. In the meter system, only type is required. Type values include service, instance, and endpoint, just as we have described in the Overview section. The values of scope entity name, such as service name, are required when metrics data are generated with the metrics data values.  NOTE: The metrics must be declared in the bootstrap stage, and there must be no change to runtime.\nThe Meter System supports the following binding functions:\n avg. Calculates the avg value for every entity under the same metrics name. histogram. Aggregates the counts in the configurable buckets. Buckets are configurable but must be assigned in the declaration stage. percentile. See percentile in WIKI. Unlike the OAL, we provide 50/75/90/95/99 by default. In the meter system function, the percentile function accepts several ranks, which should be in the (0, 100) range.  ","excerpt":"Meter System Meter system is another streaming calculation mode designed for metrics data. In the …","ref":"/docs/main/latest/en/concepts-and-designs/meter/","title":"Meter System"},{"body":"Meter System Meter system is another streaming calculation mode designed for metrics data. In the OAL, there are clear Scope Definitions, including definitions for native objects. Meter system is focused on the data type itself, and provides a more flexible approach to the end user in defining the scope entity.\nThe meter system is open to different receivers and fetchers in the backend, see the backend setup document for more details.\nEvery metric is declared in the meter system to include the following attributes:\n Metrics Name. A globally unique name to avoid overlapping between the OAL variable names. Function Name. The function used for this metric, namely distributed aggregation, value calculation or down sampling calculation based on the function implementation. Further, the data structure is determined by the function as well, such as function Avg is for Long. Scope Type. Unlike within the OAL, there are plenty of logic scope definitions. In the meter system, only type is required. Type values include service, instance, and endpoint, just as we have described in the Overview section. The values of scope entity name, such as service name, are required when metrics data are generated with the metrics data values.  NOTE: The metrics must be declared in the bootstrap stage, and there must be no change to runtime.\nThe Meter System supports the following binding functions:\n avg. Calculates the avg value for every entity under the same metrics name. histogram. Aggregates the counts in the configurable buckets. Buckets are configurable but must be assigned in the declaration stage. percentile. See percentile in WIKI. Unlike the OAL, we provide 50/75/90/95/99 by default. In the meter system function, the percentile function accepts several ranks, which should be in the (0, 100) range.  ","excerpt":"Meter System Meter system is another streaming calculation mode designed for metrics data. In the …","ref":"/docs/main/next/en/concepts-and-designs/meter/","title":"Meter System"},{"body":"Meter System Meter system is another streaming calculation mode designed for metrics data. In the OAL, there are clear Scope Definitions, including definitions for native objects. Meter system is focused on the data type itself, and provides a more flexible approach to the end user in defining the scope entity.\nThe meter system is open to different receivers and fetchers in the backend, see the backend setup document for more details.\nEvery metric is declared in the meter system to include the following attributes:\n Metrics Name. A globally unique name to avoid overlapping between the OAL variable names. Function Name. The function used for this metric, namely distributed aggregation, value calculation or down sampling calculation based on the function implementation. Further, the data structure is determined by the function as well, such as function Avg is for Long. Scope Type. Unlike within the OAL, there are plenty of logic scope definitions. In the meter system, only type is required. Type values include service, instance, and endpoint, just as we have described in the Overview section. The values of scope entity name, such as service name, are required when metrics data are generated with the metrics data values.  NOTE: The metrics must be declared in the bootstrap stage, and there must be no change to runtime.\nThe Meter System supports the following binding functions:\n avg. Calculates the avg value for every entity under the same metrics name. histogram. Aggregates the counts in the configurable buckets. Buckets are configurable but must be assigned in the declaration stage. percentile. See percentile in WIKI. Unlike the OAL, we provide 50/75/90/95/99 by default. In the meter system function, the percentile function accepts several ranks, which should be in the (0, 100) range.  ","excerpt":"Meter System Meter system is another streaming calculation mode designed for metrics data. In the …","ref":"/docs/main/v9.0.0/en/concepts-and-designs/meter/","title":"Meter System"},{"body":"Meter System Meter system is another streaming calculation mode designed for metrics data. In the OAL, there are clear Scope Definitions, including definitions for native objects. Meter system is focused on the data type itself, and provides a more flexible approach to the end user in defining the scope entity.\nThe meter system is open to different receivers and fetchers in the backend, see the backend setup document for more details.\nEvery metric is declared in the meter system to include the following attributes:\n Metrics Name. A globally unique name to avoid overlapping between the OAL variable names. Function Name. The function used for this metric, namely distributed aggregation, value calculation or down sampling calculation based on the function implementation. Further, the data structure is determined by the function as well, such as function Avg is for Long. Scope Type. Unlike within the OAL, there are plenty of logic scope definitions. In the meter system, only type is required. Type values include service, instance, and endpoint, just as we have described in the Overview section. The values of scope entity name, such as service name, are required when metrics data are generated with the metrics data values.  NOTE: The metrics must be declared in the bootstrap stage, and there must be no change to runtime.\nThe Meter System supports the following binding functions:\n avg. Calculates the avg value for every entity under the same metrics name. histogram. Aggregates the counts in the configurable buckets. Buckets are configurable but must be assigned in the declaration stage. percentile. See percentile in WIKI. Unlike the OAL, we provide 50/75/90/95/99 by default. In the meter system function, the percentile function accepts several ranks, which should be in the (0, 100) range.  ","excerpt":"Meter System Meter system is another streaming calculation mode designed for metrics data. In the …","ref":"/docs/main/v9.1.0/en/concepts-and-designs/meter/","title":"Meter System"},{"body":"Meter System Meter system is another streaming calculation mode designed for metrics data. In the OAL, there are clear Scope Definitions, including definitions for native objects. Meter system is focused on the data type itself, and provides a more flexible approach to the end user in defining the scope entity.\nThe meter system is open to different receivers and fetchers in the backend, see the backend setup document for more details.\nEvery metric is declared in the meter system to include the following attributes:\n Metrics Name. A globally unique name to avoid overlapping between the OAL variable names. Function Name. The function used for this metric, namely distributed aggregation, value calculation or down sampling calculation based on the function implementation. Further, the data structure is determined by the function as well, such as function Avg is for Long. Scope Type. Unlike within the OAL, there are plenty of logic scope definitions. In the meter system, only type is required. Type values include service, instance, and endpoint, just as we have described in the Overview section. The values of scope entity name, such as service name, are required when metrics data are generated with the metrics data values.  NOTE: The metrics must be declared in the bootstrap stage, and there must be no change to runtime.\nThe Meter System supports the following binding functions:\n avg. Calculates the avg value for every entity under the same metrics name. histogram. Aggregates the counts in the configurable buckets. Buckets are configurable but must be assigned in the declaration stage. percentile. See percentile in WIKI. Unlike the OAL, we provide 50/75/90/95/99 by default. In the meter system function, the percentile function accepts several ranks, which should be in the (0, 100) range.  ","excerpt":"Meter System Meter system is another streaming calculation mode designed for metrics data. In the …","ref":"/docs/main/v9.2.0/en/concepts-and-designs/meter/","title":"Meter System"},{"body":"Meter System Meter system is another streaming calculation mode designed for metrics data. In the OAL, there are clear Scope Definitions, including definitions for native objects. Meter system is focused on the data type itself, and provides a more flexible approach to the end user in defining the scope entity.\nThe meter system is open to different receivers and fetchers in the backend, see the backend setup document for more details.\nEvery metric is declared in the meter system to include the following attributes:\n Metrics Name. A globally unique name to avoid overlapping between the OAL variable names. Function Name. The function used for this metric, namely distributed aggregation, value calculation or down sampling calculation based on the function implementation. Further, the data structure is determined by the function as well, such as function Avg is for Long. Scope Type. Unlike within the OAL, there are plenty of logic scope definitions. In the meter system, only type is required. Type values include service, instance, and endpoint, just as we have described in the Overview section. The values of scope entity name, such as service name, are required when metrics data are generated with the metrics data values.  NOTE: The metrics must be declared in the bootstrap stage, and there must be no change to runtime.\nThe Meter System supports the following binding functions:\n avg. Calculates the avg value for every entity under the same metrics name. histogram. Aggregates the counts in the configurable buckets. Buckets are configurable but must be assigned in the declaration stage. percentile. See percentile in WIKI. Unlike the OAL, we provide 50/75/90/95/99 by default. In the meter system function, the percentile function accepts several ranks, which should be in the (0, 100) range.  ","excerpt":"Meter System Meter system is another streaming calculation mode designed for metrics data. In the …","ref":"/docs/main/v9.3.0/en/concepts-and-designs/meter/","title":"Meter System"},{"body":"Meter System Meter system is another streaming calculation mode designed for metrics data. In the OAL, there are clear Scope Definitions, including definitions for native objects. Meter system is focused on the data type itself, and provides a more flexible approach to the end user in defining the scope entity.\nThe meter system is open to different receivers and fetchers in the backend, see the backend setup document for more details.\nEvery metric is declared in the meter system to include the following attributes:\n Metrics Name. A globally unique name to avoid overlapping between the OAL variable names. Function Name. The function used for this metric, namely distributed aggregation, value calculation or down sampling calculation based on the function implementation. Further, the data structure is determined by the function as well, such as function Avg is for Long. Scope Type. Unlike within the OAL, there are plenty of logic scope definitions. In the meter system, only type is required. Type values include service, instance, and endpoint, just as we have described in the Overview section. The values of scope entity name, such as service name, are required when metrics data are generated with the metrics data values.  NOTE: The metrics must be declared in the bootstrap stage, and there must be no change to runtime.\nThe Meter System supports the following binding functions:\n avg. Calculates the avg value for every entity under the same metrics name. histogram. Aggregates the counts in the configurable buckets. Buckets are configurable but must be assigned in the declaration stage. percentile. See percentile in WIKI. Unlike the OAL, we provide 50/75/90/95/99 by default. In the meter system function, the percentile function accepts several ranks, which should be in the (0, 100) range.  ","excerpt":"Meter System Meter system is another streaming calculation mode designed for metrics data. In the …","ref":"/docs/main/v9.4.0/en/concepts-and-designs/meter/","title":"Meter System"},{"body":"Meter System Meter system is another streaming calculation mode designed for metrics data. In the OAL, there are clear Scope Definitions, including definitions for native objects. Meter system is focused on the data type itself, and provides a more flexible approach to the end user in defining the scope entity.\nThe meter system is open to different receivers and fetchers in the backend, see the backend setup document for more details.\nEvery metric is declared in the meter system to include the following attributes:\n Metrics Name. A globally unique name to avoid overlapping between the OAL variable names. Function Name. The function used for this metric, namely distributed aggregation, value calculation or down sampling calculation based on the function implementation. Further, the data structure is determined by the function as well, such as function Avg is for Long. Scope Type. Unlike within the OAL, there are plenty of logic scope definitions. In the meter system, only type is required. Type values include service, instance, and endpoint, just as we have described in the Overview section. The values of scope entity name, such as service name, are required when metrics data are generated with the metrics data values.  NOTE: The metrics must be declared in the bootstrap stage, and there must be no change to runtime.\nThe Meter System supports the following binding functions:\n avg. Calculates the avg value for every entity under the same metrics name. histogram. Aggregates the counts in the configurable buckets. Buckets are configurable but must be assigned in the declaration stage. percentile. See percentile in WIKI. Unlike the OAL, we provide 50/75/90/95/99 by default. In the meter system function, the percentile function accepts several ranks, which should be in the (0, 100) range.  ","excerpt":"Meter System Meter system is another streaming calculation mode designed for metrics data. In the …","ref":"/docs/main/v9.5.0/en/concepts-and-designs/meter/","title":"Meter System"},{"body":"Meter System Meter system is another streaming calculation mode designed for metrics data. In the OAL, there are clear Scope Definitions, including definitions for native objects. Meter system is focused on the data type itself, and provides a more flexible approach to the end user in defining the scope entity.\nThe meter system is open to different receivers and fetchers in the backend, see the backend setup document for more details.\nEvery metric is declared in the meter system to include the following attributes:\n Metrics Name. A globally unique name to avoid overlapping between the OAL variable names. Function Name. The function used for this metric, namely distributed aggregation, value calculation or down sampling calculation based on the function implementation. Further, the data structure is determined by the function as well, such as function Avg is for Long. Scope Type. Unlike within the OAL, there are plenty of logic scope definitions. In the meter system, only type is required. Type values include service, instance, and endpoint, just as we have described in the Overview section. The values of scope entity name, such as service name, are required when metrics data are generated with the metrics data values.  NOTE: The metrics must be declared in the bootstrap stage, and there must be no change to runtime.\nThe Meter System supports the following binding functions:\n avg. Calculates the avg value for every entity under the same metrics name. histogram. Aggregates the counts in the configurable buckets. Buckets are configurable but must be assigned in the declaration stage. percentile. See percentile in WIKI. Unlike the OAL, we provide 50/75/90/95/99 by default. In the meter system function, the percentile function accepts several ranks, which should be in the (0, 100) range.  ","excerpt":"Meter System Meter system is another streaming calculation mode designed for metrics data. In the …","ref":"/docs/main/v9.6.0/en/concepts-and-designs/meter/","title":"Meter System"},{"body":"Meter System Meter system is another streaming calculation mode designed for metrics data. In the OAL, there are clear Scope Definitions, including definitions for native objects. Meter system is focused on the data type itself, and provides a more flexible approach to the end user in defining the scope entity.\nThe meter system is open to different receivers and fetchers in the backend, see the backend setup document for more details.\nEvery metric is declared in the meter system to include the following attributes:\n Metrics Name. A globally unique name to avoid overlapping between the OAL variable names. Function Name. The function used for this metric, namely distributed aggregation, value calculation or down sampling calculation based on the function implementation. Further, the data structure is determined by the function as well, such as function Avg is for Long. Scope Type. Unlike within the OAL, there are plenty of logic scope definitions. In the meter system, only type is required. Type values include service, instance, and endpoint, just as we have described in the Overview section. The values of scope entity name, such as service name, are required when metrics data are generated with the metrics data values.  NOTE: The metrics must be declared in the bootstrap stage, and there must be no change to runtime.\nThe Meter System supports the following binding functions:\n avg. Calculates the avg value for every entity under the same metrics name. histogram. Aggregates the counts in the configurable buckets. Buckets are configurable but must be assigned in the declaration stage. percentile. See percentile in WIKI. Unlike the OAL, we provide 50/75/90/95/99 by default. In the meter system function, the percentile function accepts several ranks, which should be in the (0, 100) range.  ","excerpt":"Meter System Meter system is another streaming calculation mode designed for metrics data. In the …","ref":"/docs/main/v9.7.0/en/concepts-and-designs/meter/","title":"Meter System"},{"body":"Metrics  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-micrometer-registry\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  Using org.apache.skywalking.apm.meter.micrometer.SkywalkingMeterRegistry as the registry, it could forward the MicroMeter collected metrics to OAP server.  import org.apache.skywalking.apm.meter.micrometer.SkywalkingMeterRegistry; SkywalkingMeterRegistry registry = new SkywalkingMeterRegistry(); // If you has some counter want to rate by agent side SkywalkingConfig config = new SkywalkingConfig(Arrays.asList(\u0026#34;test_rate_counter\u0026#34;)); new SkywalkingMeterRegistry(config); // Also you could using composite registry to combine multiple meter registry, such as collect to Skywalking and prometheus CompositeMeterRegistry compositeRegistry = new CompositeMeterRegistry(); compositeRegistry.add(new PrometheusMeterRegistry(PrometheusConfig.DEFAULT)); compositeRegistry.add(new SkywalkingMeterRegistry());   Using snake case as the naming convention. Such as test.meter will be send to test_meter.\n  Using Millisecond as the time unit.\n  Adapt micrometer data convention.\n     Micrometer data type Transform to meter name Skywalking data type Description     Counter Counter name Counter Same with counter   Gauges Gauges name Gauges Same with gauges   Timer Timer name + \u0026ldquo;_count\u0026rdquo; Counter Execute finished count    Timer name + \u0026ldquo;_sum\u0026rdquo; Counter Total execute finished duration    Timer name + \u0026ldquo;_max\u0026rdquo; Gauges Max duration of execute finished time    Timer name + \u0026ldquo;_histogram\u0026rdquo; Histogram Histogram of execute finished duration   LongTaskTimer Timer name + \u0026ldquo;_active_count\u0026rdquo; Gauges Executing task count    Timer name + \u0026ldquo;_duration_sum\u0026rdquo; Counter All of executing task sum duration    Timer name + \u0026ldquo;_max\u0026rdquo; Counter Current longest running task execute duration   Function Timer Timer name + \u0026ldquo;_count\u0026rdquo; Gauges Execute finished timer count    Timer name + \u0026ldquo;_sum\u0026rdquo; Gauges Execute finished timer total duration   Function Counter Counter name Counter Custom counter value   Distribution summary Summary name + \u0026ldquo;_count\u0026rdquo; Counter Total record count    Summary name + \u0026ldquo;_sum\u0026rdquo; Counter Total record amount sum    Summary name + \u0026ldquo;_max\u0026rdquo; Gauges Max record amount    Summary name + \u0026ldquo;_histogram\u0026rdquo; Gauges Histogram of the amount     Not Adapt data convention.     Micrometer data type Data type     LongTaskTimer Histogram    ","excerpt":"Metrics  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; …","ref":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/application-toolkit-micrometer/","title":"Metrics"},{"body":"Metrics  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-micrometer-registry\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  Using org.apache.skywalking.apm.meter.micrometer.SkywalkingMeterRegistry as the registry, it could forward the MicroMeter collected metrics to OAP server.  import org.apache.skywalking.apm.meter.micrometer.SkywalkingMeterRegistry; SkywalkingMeterRegistry registry = new SkywalkingMeterRegistry(); // If you has some counter want to rate by agent side SkywalkingConfig config = new SkywalkingConfig(Arrays.asList(\u0026#34;test_rate_counter\u0026#34;)); new SkywalkingMeterRegistry(config); // Also you could using composite registry to combine multiple meter registry, such as collect to Skywalking and prometheus CompositeMeterRegistry compositeRegistry = new CompositeMeterRegistry(); compositeRegistry.add(new PrometheusMeterRegistry(PrometheusConfig.DEFAULT)); compositeRegistry.add(new SkywalkingMeterRegistry());   Using snake case as the naming convention. Such as test.meter will be send to test_meter.\n  Using Millisecond as the time unit.\n  Adapt micrometer data convention.\n     Micrometer data type Transform to meter name Skywalking data type Description     Counter Counter name Counter Same with counter   Gauges Gauges name Gauges Same with gauges   Timer Timer name + \u0026ldquo;_count\u0026rdquo; Counter Execute finished count    Timer name + \u0026ldquo;_sum\u0026rdquo; Counter Total execute finished duration    Timer name + \u0026ldquo;_max\u0026rdquo; Gauges Max duration of execute finished time    Timer name + \u0026ldquo;_histogram\u0026rdquo; Histogram Histogram of execute finished duration   LongTaskTimer Timer name + \u0026ldquo;_active_count\u0026rdquo; Gauges Executing task count    Timer name + \u0026ldquo;_duration_sum\u0026rdquo; Counter All of executing task sum duration    Timer name + \u0026ldquo;_max\u0026rdquo; Counter Current longest running task execute duration   Function Timer Timer name + \u0026ldquo;_count\u0026rdquo; Gauges Execute finished timer count    Timer name + \u0026ldquo;_sum\u0026rdquo; Gauges Execute finished timer total duration   Function Counter Counter name Counter Custom counter value   Distribution summary Summary name + \u0026ldquo;_count\u0026rdquo; Counter Total record count    Summary name + \u0026ldquo;_sum\u0026rdquo; Counter Total record amount sum    Summary name + \u0026ldquo;_max\u0026rdquo; Gauges Max record amount    Summary name + \u0026ldquo;_histogram\u0026rdquo; Gauges Histogram of the amount     Not Adapt data convention.     Micrometer data type Data type     LongTaskTimer Histogram    ","excerpt":"Metrics  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; …","ref":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-micrometer/","title":"Metrics"},{"body":"Metrics  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-micrometer-registry\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  Using org.apache.skywalking.apm.meter.micrometer.SkywalkingMeterRegistry as the registry, it could forward the MicroMeter collected metrics to OAP server.  import org.apache.skywalking.apm.meter.micrometer.SkywalkingMeterRegistry; SkywalkingMeterRegistry registry = new SkywalkingMeterRegistry(); // If you has some counter want to rate by agent side SkywalkingConfig config = new SkywalkingConfig(Arrays.asList(\u0026#34;test_rate_counter\u0026#34;)); new SkywalkingMeterRegistry(config); // Also you could using composite registry to combine multiple meter registry, such as collect to Skywalking and prometheus CompositeMeterRegistry compositeRegistry = new CompositeMeterRegistry(); compositeRegistry.add(new PrometheusMeterRegistry(PrometheusConfig.DEFAULT)); compositeRegistry.add(new SkywalkingMeterRegistry());   Using snake case as the naming convention. Such as test.meter will be send to test_meter.\n  Using Millisecond as the time unit.\n  Adapt micrometer data convention.\n     Micrometer data type Transform to meter name Skywalking data type Description     Counter Counter name Counter Same with counter   Gauges Gauges name Gauges Same with gauges   Timer Timer name + \u0026ldquo;_count\u0026rdquo; Counter Execute finished count    Timer name + \u0026ldquo;_sum\u0026rdquo; Counter Total execute finished duration    Timer name + \u0026ldquo;_max\u0026rdquo; Gauges Max duration of execute finished time    Timer name + \u0026ldquo;_histogram\u0026rdquo; Histogram Histogram of execute finished duration   LongTaskTimer Timer name + \u0026ldquo;_active_count\u0026rdquo; Gauges Executing task count    Timer name + \u0026ldquo;_duration_sum\u0026rdquo; Counter All of executing task sum duration    Timer name + \u0026ldquo;_max\u0026rdquo; Counter Current longest running task execute duration   Function Timer Timer name + \u0026ldquo;_count\u0026rdquo; Gauges Execute finished timer count    Timer name + \u0026ldquo;_sum\u0026rdquo; Gauges Execute finished timer total duration   Function Counter Counter name Counter Custom counter value   Distribution summary Summary name + \u0026ldquo;_count\u0026rdquo; Counter Total record count    Summary name + \u0026ldquo;_sum\u0026rdquo; Counter Total record amount sum    Summary name + \u0026ldquo;_max\u0026rdquo; Gauges Max record amount    Summary name + \u0026ldquo;_histogram\u0026rdquo; Gauges Histogram of the amount     Not Adapt data convention.     Micrometer data type Data type     LongTaskTimer Histogram    ","excerpt":"Metrics  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; …","ref":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/application-toolkit-micrometer/","title":"Metrics"},{"body":"Metrics  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-micrometer-registry\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  Using org.apache.skywalking.apm.meter.micrometer.SkywalkingMeterRegistry as the registry, it could forward the MicroMeter collected metrics to OAP server.  import org.apache.skywalking.apm.meter.micrometer.SkywalkingMeterRegistry; SkywalkingMeterRegistry registry = new SkywalkingMeterRegistry(); // If you has some counter want to rate by agent side SkywalkingConfig config = new SkywalkingConfig(Arrays.asList(\u0026#34;test_rate_counter\u0026#34;)); new SkywalkingMeterRegistry(config); // Also you could using composite registry to combine multiple meter registry, such as collect to Skywalking and prometheus CompositeMeterRegistry compositeRegistry = new CompositeMeterRegistry(); compositeRegistry.add(new PrometheusMeterRegistry(PrometheusConfig.DEFAULT)); compositeRegistry.add(new SkywalkingMeterRegistry());   Using snake case as the naming convention. Such as test.meter will be send to test_meter.\n  Using Millisecond as the time unit.\n  Adapt micrometer data convention.\n     Micrometer data type Transform to meter name Skywalking data type Description     Counter Counter name Counter Same with counter   Gauges Gauges name Gauges Same with gauges   Timer Timer name + \u0026ldquo;_count\u0026rdquo; Counter Execute finished count    Timer name + \u0026ldquo;_sum\u0026rdquo; Counter Total execute finished duration    Timer name + \u0026ldquo;_max\u0026rdquo; Gauges Max duration of execute finished time    Timer name + \u0026ldquo;_histogram\u0026rdquo; Histogram Histogram of execute finished duration   LongTaskTimer Timer name + \u0026ldquo;_active_count\u0026rdquo; Gauges Executing task count    Timer name + \u0026ldquo;_duration_sum\u0026rdquo; Counter All of executing task sum duration    Timer name + \u0026ldquo;_max\u0026rdquo; Counter Current longest running task execute duration   Function Timer Timer name + \u0026ldquo;_count\u0026rdquo; Gauges Execute finished timer count    Timer name + \u0026ldquo;_sum\u0026rdquo; Gauges Execute finished timer total duration   Function Counter Counter name Counter Custom counter value   Distribution summary Summary name + \u0026ldquo;_count\u0026rdquo; Counter Total record count    Summary name + \u0026ldquo;_sum\u0026rdquo; Counter Total record amount sum    Summary name + \u0026ldquo;_max\u0026rdquo; Gauges Max record amount    Summary name + \u0026ldquo;_histogram\u0026rdquo; Gauges Histogram of the amount     Not Adapt data convention.     Micrometer data type Data type     LongTaskTimer Histogram    ","excerpt":"Metrics  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; …","ref":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/application-toolkit-micrometer/","title":"Metrics"},{"body":"Metrics  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-micrometer-registry\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  Using org.apache.skywalking.apm.meter.micrometer.SkywalkingMeterRegistry as the registry, it could forward the MicroMeter collected metrics to OAP server.  import org.apache.skywalking.apm.meter.micrometer.SkywalkingMeterRegistry; SkywalkingMeterRegistry registry = new SkywalkingMeterRegistry(); // If you has some counter want to rate by agent side SkywalkingConfig config = new SkywalkingConfig(Arrays.asList(\u0026#34;test_rate_counter\u0026#34;)); new SkywalkingMeterRegistry(config); // Also you could using composite registry to combine multiple meter registry, such as collect to Skywalking and prometheus CompositeMeterRegistry compositeRegistry = new CompositeMeterRegistry(); compositeRegistry.add(new PrometheusMeterRegistry(PrometheusConfig.DEFAULT)); compositeRegistry.add(new SkywalkingMeterRegistry());   Using snake case as the naming convention. Such as test.meter will be send to test_meter.\n  Using Millisecond as the time unit.\n  Adapt micrometer data convention.\n     Micrometer data type Transform to meter name Skywalking data type Description     Counter Counter name Counter Same with counter   Gauges Gauges name Gauges Same with gauges   Timer Timer name + \u0026ldquo;_count\u0026rdquo; Counter Execute finished count    Timer name + \u0026ldquo;_sum\u0026rdquo; Counter Total execute finished duration    Timer name + \u0026ldquo;_max\u0026rdquo; Gauges Max duration of execute finished time    Timer name + \u0026ldquo;_histogram\u0026rdquo; Histogram Histogram of execute finished duration   LongTaskTimer Timer name + \u0026ldquo;_active_count\u0026rdquo; Gauges Executing task count    Timer name + \u0026ldquo;_duration_sum\u0026rdquo; Counter All of executing task sum duration    Timer name + \u0026ldquo;_max\u0026rdquo; Counter Current longest running task execute duration   Function Timer Timer name + \u0026ldquo;_count\u0026rdquo; Gauges Execute finished timer count    Timer name + \u0026ldquo;_sum\u0026rdquo; Gauges Execute finished timer total duration   Function Counter Counter name Counter Custom counter value   Distribution summary Summary name + \u0026ldquo;_count\u0026rdquo; Counter Total record count    Summary name + \u0026ldquo;_sum\u0026rdquo; Counter Total record amount sum    Summary name + \u0026ldquo;_max\u0026rdquo; Gauges Max record amount    Summary name + \u0026ldquo;_histogram\u0026rdquo; Gauges Histogram of the amount     Not Adapt data convention.     Micrometer data type Data type     LongTaskTimer Histogram    ","excerpt":"Metrics  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; …","ref":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/application-toolkit-micrometer/","title":"Metrics"},{"body":"Metrics Exporter SkyWalking provides the essential functions of metrics aggregation, alarm, and analysis. In the real world, many may want to forward their data to a 3rd party system for an in-depth analysis or otherwise. Metrics Exporter has made that possible.\nMetrics exporter is an independent module that has to be manually activated.\nRight now, we provide the following exporters:\n gRPC exporter  gRPC exporter gRPC exporter uses SkyWalking\u0026rsquo;s native exporter service definition. Here is the proto definition.\nservice MetricExportService { rpc export (stream ExportMetricValue) returns (ExportResponse) { } rpc subscription (SubscriptionReq) returns (SubscriptionsResp) { }}message ExportMetricValue { string metricName = 1; string entityName = 2; string entityId = 3; ValueType type = 4; int64 timeBucket = 5; int64 longValue = 6; double doubleValue = 7; repeated int64 longValues = 8;}message SubscriptionsResp { repeated SubscriptionMetric metrics = 1;}message SubscriptionMetric { string metricName = 1; EventType eventType = 2;}enum ValueType { LONG = 0; DOUBLE = 1; MULTI_LONG = 2;}enum EventType { // The metrics aggregated in this bulk, not include the existing persistent data.  INCREMENT = 0; // Final result of the metrics at this moment.  TOTAL = 1;}message SubscriptionReq {}message ExportResponse {}To activate the exporter, you should add this into your application.yml\nexporter:grpc:targetHost:127.0.0.1targetPort:9870 targetHost:targetPort is the expected target service address. You could set any gRPC server to receive the data. Target gRPC service needs to go on standby; otherwise, the OAP startup may fail.  Target exporter service Subscription implementation Return the expected metrics name list with event type (incremental or total). All names must match the OAL/MAL script definition. Return empty list, if you want to export all metrics in the incremental event type.\nExport implementation Stream service. All subscribed metrics will be sent here based on the OAP core schedule. Also, if the OAP is deployed as cluster, this method will be called concurrently. For metrics value, you need to follow #type to choose #longValue or #doubleValue.\n","excerpt":"Metrics Exporter SkyWalking provides the essential functions of metrics aggregation, alarm, and …","ref":"/docs/main/v9.0.0/en/setup/backend/metrics-exporter/","title":"Metrics Exporter"},{"body":"Metrics Exporter SkyWalking provides the essential functions of metrics aggregation, alarm, and analysis. In many real-world scenarios, users may want to forward their data to a 3rd party system for further in-depth analysis. Metrics Exporter has made that possible.\nThe metrics exporter is an independent module that has to be manually activated.\nRight now, we provide the following exporters:\n gRPC exporter  gRPC exporter gRPC exporter uses SkyWalking\u0026rsquo;s native exporter service definition. Here is the proto definition.\nservice MetricExportService { rpc export (stream ExportMetricValue) returns (ExportResponse) { } rpc subscription (SubscriptionReq) returns (SubscriptionsResp) { }}message ExportMetricValue { string metricName = 1; string entityName = 2; string entityId = 3; ValueType type = 4; int64 timeBucket = 5; int64 longValue = 6; double doubleValue = 7; repeated int64 longValues = 8;}message SubscriptionsResp { repeated SubscriptionMetric metrics = 1;}message SubscriptionMetric { string metricName = 1; EventType eventType = 2;}enum ValueType { LONG = 0; DOUBLE = 1; MULTI_LONG = 2;}enum EventType { // The metrics aggregated in this bulk, not include the existing persistent data.  INCREMENT = 0; // Final result of the metrics at this moment.  TOTAL = 1;}message SubscriptionReq {}message ExportResponse {}To activate the exporter, you should add this into your application.yml\nexporter:grpc:targetHost:127.0.0.1targetPort:9870 targetHost:targetPort is the expected target service address. You could set any gRPC server to receive the data. Target gRPC service needs to go on standby; otherwise, the OAP startup may fail.  Target exporter service Subscription implementation Return the expected metrics name list with event type (incremental or total). All names must match the OAL/MAL script definition. Return empty list, if you want to export all metrics in the incremental event type.\nExport implementation Stream service. All subscribed metrics will be sent here based on the OAP core schedule. Also, if the OAP is deployed as a cluster, this method will be called concurrently. For metrics value, you need to follow #type to choose #longValue or #doubleValue.\n","excerpt":"Metrics Exporter SkyWalking provides the essential functions of metrics aggregation, alarm, and …","ref":"/docs/main/v9.1.0/en/setup/backend/metrics-exporter/","title":"Metrics Exporter"},{"body":"Metrics Exporter SkyWalking provides the essential functions of metrics aggregation, alarm, and analysis. In many real-world scenarios, users may want to forward their data to a 3rd party system for further in-depth analysis. Metrics Exporter has made that possible.\nThe metrics exporter is an independent module that has to be manually activated.\nRight now, we provide the following exporters:\n gRPC exporter  gRPC exporter gRPC exporter uses SkyWalking\u0026rsquo;s native exporter service definition. Here is the proto definition.\nservice MetricExportService { rpc export (stream ExportMetricValue) returns (ExportResponse) { } rpc subscription (SubscriptionReq) returns (SubscriptionsResp) { }}message ExportMetricValue { string metricName = 1; string entityName = 2; string entityId = 3; ValueType type = 4; int64 timeBucket = 5; int64 longValue = 6; double doubleValue = 7; repeated int64 longValues = 8;}message SubscriptionsResp { repeated SubscriptionMetric metrics = 1;}message SubscriptionMetric { string metricName = 1; EventType eventType = 2;}enum ValueType { LONG = 0; DOUBLE = 1; MULTI_LONG = 2;}enum EventType { // The metrics aggregated in this bulk, not include the existing persistent data.  INCREMENT = 0; // Final result of the metrics at this moment.  TOTAL = 1;}message SubscriptionReq {}message ExportResponse {}To activate the exporter, you should add this into your application.yml\nexporter:grpc:targetHost:127.0.0.1targetPort:9870 targetHost:targetPort is the expected target service address. You could set any gRPC server to receive the data. Target gRPC service needs to go on standby; otherwise, the OAP startup may fail.  Target exporter service Subscription implementation Return the expected metrics name list with event type (incremental or total). All names must match the OAL/MAL script definition. Return empty list, if you want to export all metrics in the incremental event type.\nExport implementation Stream service. All subscribed metrics will be sent here based on the OAP core schedule. Also, if the OAP is deployed as a cluster, this method will be called concurrently. For metrics value, you need to follow #type to choose #longValue or #doubleValue.\n","excerpt":"Metrics Exporter SkyWalking provides the essential functions of metrics aggregation, alarm, and …","ref":"/docs/main/v9.2.0/en/setup/backend/metrics-exporter/","title":"Metrics Exporter"},{"body":"Metrics Query Expression(MQE) Syntax MQE is a string that consists of one or more expressions. Each expression could be a combination of one or more operations. The expression allows users to do simple query-stage calculation through V3 APIs.\nExpression = \u0026lt;Operation\u0026gt; Expression1 \u0026lt;Operation\u0026gt; Expression2 \u0026lt;Operation\u0026gt; Expression3 ... The following document lists the operations supported by MQE.\nMetrics Expression Metrics Expression will return a collection of time-series values.\nCommon Value Metrics Expression:\n\u0026lt;metric_name\u0026gt; For example: If we want to query the service_sla metric, we can use the following expression:\nservice_sla Result Type The ExpressionResultType of the expression is TIME_SERIES_VALUES.\nLabeled Value Metrics For now, we only have a single anonymous label with multi label values in a labeled metric. To be able to use it in expressions, define _ as the anonymous label name (key).\nExpression:\n\u0026lt;metric_name\u0026gt;{_=\u0026#39;\u0026lt;label_value_1\u0026gt;,...\u0026#39;} {_='\u0026lt;label_value_1\u0026gt;,...'} is the selected label value of the metric. If is not specified, all label values of the metric will be selected.\nFor example: If we want to query the service_percentile metric with the label values 0,1,2,3,4, we can use the following expression:\nservice_percentile{_=\u0026#39;0,1,2,3,4\u0026#39;} If we want to rename the label values to P50,P75,P90,P95,P99, see Relabel Operation.\nResult Type The ExpressionResultType of the expression is TIME_SERIES_VALUES and with labels.\nBinary Operation The Binary Operation is an operation that takes two expressions and performs a calculation on their results. The following table lists the binary operations supported by MQE.\nExpression:\nExpression1 \u0026lt;Binary-Operator\u0026gt; Expression2    Operator Definition     + addition   - subtraction   * multiplication   / division   % modulo    For example: If we want to transform the service_sla metric value to percent, we can use the following expression:\nservice_sla / 100 Result Type For the result type of the expression, please refer to the following table.\nBinary Operation Rules The following table lists if the different result types of the input expressions could do this operation and the result type after the operation. The expression could be on the left or right side of the operator. Note: If the expressions on both sides of the operator are the TIME_SERIES_VALUES with labels, they should have the same labels for calculation.\n   Expression Expression Yes/No ExpressionResultType     SINGLE_VALUE SINGLE_VALUE Yes SINGLE_VALUE   SINGLE_VALUE TIME_SERIES_VALUES Yes TIME_SERIES_VALUES   SINGLE_VALUE SORTED_LIST/RECORD_LIST Yes SORTED_LIST/RECORD_LIST   TIME_SERIES_VALUES TIME_SERIES_VALUES Yes TIME_SERIES_VALUES   TIME_SERIES_VALUES SORTED_LIST/RECORD_LIST no    SORTED_LIST/RECORD_LIST SORTED_LIST/RECORD_LIST no     Compare Operation Compare Operation takes two expressions and compares their results. The following table lists the compare operations supported by MQE.\nExpression:\nExpression1 \u0026lt;Compare-Operator\u0026gt; Expression2    Operator Definition     \u0026gt; greater than   \u0026gt;= greater than or equal   \u0026lt; less than   \u0026lt;= less than or equal   == equal   != not equal    The result of the compare operation is an int value:\n 1: true 0: false  For example: Compare the service_resp_time metric value if greater than 3000, if the service_resp_time result is:\n{ \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 3500, \u0026#34;traceID\u0026#34;: null}] } ] } } } we can use the following expression:\nservice_resp_time \u0026gt; 3000 and get result:\n{ \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;0\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 1, \u0026#34;traceID\u0026#34;: null}] } ] } } } Compare Operation Rules and Result Type Same as the Binary Operation Rules.\nAggregation Operation Aggregation Operation takes an expression and performs aggregate calculations on its results.\nExpression:\n\u0026lt;Aggregation-Operator\u0026gt;(Expression)    Operator Definition ExpressionResultType     avg average the result SINGLE_VALUE   count count number of the result SINGLE_VALUE   latest select the latest non-null value from the result SINGLE_VALUE   sum sum the result SINGLE_VALUE   max select maximum from the result SINGLE_VALUE   min select minimum from the result SINGLE_VALUE    For example: If we want to query the average value of the service_cpm metric, we can use the following expression:\navg(service_cpm) Result Type The different operators could impact the ExpressionResultType, please refer to the above table.\nMathematical Operation Mathematical Operation takes an expression and performs mathematical calculations on its results.\nExpression:\n\u0026lt;Mathematical-Operator\u0026gt;(Expression, parameters)    Operator Definition parameters ExpressionResultType     abs returns the absolute value of the result  follow the input expression   ceil returns the smallest integer value that is greater or equal to the result  follow the input expression   floor returns the largest integer value that is greater or equal to the result  follow the input expression   round returns result round to specific decimal places places: a positive integer specific decimal places of the result follow the input expression    For example: If we want to query the average value of the service_cpm metric in seconds, and round the result to 2 decimal places, we can use the following expression:\nround(service_cpm / 60 , 2) Result Type The different operators could impact the ExpressionResultType, please refer to the above table.\nTopN Operation TopN Operation takes an expression and performs TopN calculation on its results.\nExpression:\ntop_n(\u0026lt;metric_name\u0026gt;, \u0026lt;top_number\u0026gt;, \u0026lt;order\u0026gt;) top_number is the number of the top results, should be a positive integer.\norder is the order of the top results. The value of order can be asc or des.\nFor example: If we want to query the top 10 services with the highest service_cpm metric value, we can use the following expression:\ntop_n(service_instance_cpm, 10, des) Result Type According to the type of the metric, the ExpressionResultType of the expression will be SORTED_LIST or RECORD_LIST.\nRelabel Operation Relabel Operation takes an expression and replaces the label values with new label values on its results.\nExpression:\nrelabel(Expression, _=\u0026#39;\u0026lt;new_label_value_1\u0026gt;,...\u0026#39;) _ is the new label of the metric after the label is relabeled, the order of the new label values should be the same as the order of the label values in the input expression result.\nFor example: If we want to query the service_percentile metric with the label values 0,1,2,3,4, and rename the label values to P50,P75,P90,P95,P99, we can use the following expression:\nrelabel(service_percentile{_=\u0026#39;0,1,2,3,4\u0026#39;}, _=\u0026#39;P50,P75,P90,P95,P99\u0026#39;) Result Type Follow the input expression.\nAggregateLabels Operation AggregateLabels Operation takes an expression and performs an aggregate calculation on its Labeled Value Metrics results. It aggregates a group of TIME_SERIES_VALUES into a single TIME_SERIES_VALUES.\nExpression:\naggregate_labels(Expression, parameter)    parameter Definition ExpressionResultType     avg calculate avg value of a Labeled Value Metrics TIME_SERIES_VALUES   sum calculate sum value of a Labeled Value Metrics TIME_SERIES_VALUES   max select the maximum value from a Labeled Value Metrics TIME_SERIES_VALUES   min select the minimum value from a Labeled Value Metrics TIME_SERIES_VALUES    For example: If we want to query all Redis command total rates, we can use the following expression(total_commands_rate is a metric which recorded every command rate in labeled value):\naggregate_labels(total_commands_rate, SUM) Result Type The ExpressionResultType of the aggregateLabels operation is TIME_SERIES_VALUES.\nLogical Operation ViewAsSequence Operation ViewAsSequence operation represents the first not-null metric from the listing metrics in the given prioritized sequence(left to right). It could also be considered as a short-circuit of given metrics for the first value existing metric.\nExpression:\nview_as_seq([\u0026lt;expression_1\u0026gt;, \u0026lt;expression_2\u0026gt;, ...]) For example: if the first expression value is empty but the second one is not empty, it would return the result from the second expression. The following example would return the content of the service_cpm metric.\nview_as_seq(not_existing, service_cpm) Result Type The result type is determined by the type of selected not-null metric expression.\nTrend Operation Trend Operation takes an expression and performs a trend calculation on its results.\nExpression:\n\u0026lt;Trend-Operator\u0026gt;(Metrics Expression, time_range) time_range is the positive int of the calculated range. The unit will automatically align with to the query Step, for example, if the query Step is MINUTE, the unit of time_range is minute.\n   Operator Definition ExpressionResultType     increase returns the increase in the time range in the time series TIME_SERIES_VALUES   rate returns the per-second average rate of increase in the time range in the time series TIME_SERIES_VALUES    For example: If we want to query the increase value of the service_cpm metric in 2 minute(assume the query Step is MINUTE), we can use the following expression:\nincrease(service_cpm, 2) If the query duration is 3 minutes, from (T1 to T3) and the metric has values in time series:\nV(T1-2), V(T1-1), V(T1), V(T2), V(T3) then the expression result is:\nV(T1)-V(T1-2), V(T2)-V(T1-1), V(T3)-V(T1) Note:\n If the calculated metric value is empty, the result will be empty. Assume in the T3 point, the increase value = V(T3)-V(T1), If the metric V(T3) or V(T1) is empty, the result value in T3 will be empty.  Result Type TIME_SERIES_VALUES.\nExpression Query Example Labeled Value Metrics service_percentile{_=\u0026#39;0,1\u0026#39;} The example result is:\n{ \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;0\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1000\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 2000, \u0026#34;traceID\u0026#34;: null}] }, { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2000\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 3000, \u0026#34;traceID\u0026#34;: null}] } ] } } } If we want to transform the percentile value unit from ms to s the expression is:\nservice_percentile{_=\u0026#39;0,1\u0026#39;} / 1000 { \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;0\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 2, \u0026#34;traceID\u0026#34;: null}] }, { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 3, \u0026#34;traceID\u0026#34;: null}] } ] } } } Get the average value of each percentile, the expression is:\navg(service_percentile{_=\u0026#39;0,1\u0026#39;}) { \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;SINGLE_VALUE\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;0\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: null, \u0026#34;value\u0026#34;: \u0026#34;1500\u0026#34;, \u0026#34;traceID\u0026#34;: null}] }, { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: null, \u0026#34;value\u0026#34;: \u0026#34;2500\u0026#34;, \u0026#34;traceID\u0026#34;: null}] } ] } } } Calculate the difference between the percentile and the average value, the expression is:\nservice_percentile{_=\u0026#39;0,1\u0026#39;} - avg(service_percentile{_=\u0026#39;0,1\u0026#39;}) { \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;0\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;-500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 500, \u0026#34;traceID\u0026#34;: null}] }, { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;-500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 500, \u0026#34;traceID\u0026#34;: null}] } ] } } } Calculate the difference between the service_resp_time and the service_percentile, if the service_resp_time result is:\n{ \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 3500, \u0026#34;traceID\u0026#34;: null}] } ] } } } The expression is:\nservice_resp_time - service_percentile{_=\u0026#39;0,1\u0026#39;} { \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;0\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1500\u0026#34;, \u0026#34;traceID\u0026#34;: null}] }, { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;500\u0026#34;, \u0026#34;traceID\u0026#34;: null}] } ] } } } ","excerpt":"Metrics Query Expression(MQE) Syntax MQE is a string that consists of one or more expressions. Each …","ref":"/docs/main/latest/en/api/metrics-query-expression/","title":"Metrics Query Expression(MQE) Syntax"},{"body":"Metrics Query Expression(MQE) Syntax MQE is a string that consists of one or more expressions. Each expression could be a combination of one or more operations. The expression allows users to do simple query-stage calculation through V3 APIs.\nExpression = \u0026lt;Operation\u0026gt; Expression1 \u0026lt;Operation\u0026gt; Expression2 \u0026lt;Operation\u0026gt; Expression3 ... The following document lists the operations supported by MQE.\nMetrics Expression Metrics Expression will return a collection of time-series values.\nCommon Value Metrics Expression:\n\u0026lt;metric_name\u0026gt; For example: If we want to query the service_sla metric, we can use the following expression:\nservice_sla Result Type The ExpressionResultType of the expression is TIME_SERIES_VALUES.\nLabeled Value Metrics Since v10.0.0, SkyWalking supports multiple labels metrics. We could query the specific labels of the metric by the following expression.\nExpression:\n\u0026lt;metric_name\u0026gt;{\u0026lt;label1_name\u0026gt;=\u0026#39;\u0026lt;label1_value_1\u0026gt;,...\u0026#39;, \u0026lt;label2_name\u0026gt;=\u0026#39;\u0026lt;label2_value_1\u0026gt;,...\u0026#39;,\u0026lt;label2...} {\u0026lt;label1_name\u0026gt;='\u0026lt;label_value_1\u0026gt;,...'} is the selected label name/value of the metric. If is not specified, all label values of the metric will be selected.\nFor example: The k8s_cluster_deployment_status metric has labels namespace, deployment and status. If we want to query all deployment metric value with namespace=skywalking-showcase and status=true, we can use the following expression:\nk8s_cluster_deployment_status{namespace=\u0026#39;skywalking-showcase\u0026#39;, status=\u0026#39;true\u0026#39;} We also could query the label with multiple values by separating the values with ,: If we want to query the service_percentile metric with the label name p and values 50,75,90,95,99, we can use the following expression:\nservice_percentile{p=\u0026#39;50,75,90,95,99\u0026#39;} If we want to rename the label values to P50,P75,P90,P95,P99, see Relabel Operation.\nResult Type The ExpressionResultType of the expression is TIME_SERIES_VALUES and with labels.\nBinary Operation The Binary Operation is an operation that takes two expressions and performs a calculation on their results. The following table lists the binary operations supported by MQE.\nExpression:\nExpression1 \u0026lt;Binary-Operator\u0026gt; Expression2    Operator Definition     + addition   - subtraction   * multiplication   / division   % modulo    For example: If we want to transform the service_sla metric value to percent, we can use the following expression:\nservice_sla / 100 Result Type For the result type of the expression, please refer to the following table.\nBinary Operation Rules The following table lists if the different result types of the input expressions could do this operation and the result type after the operation. The expression could be on the left or right side of the operator. Note: If the expressions result on both sides of the operator are with labels, they should have the same labels for calculation. If the labels match, will reserve left expression result labels and the calculated value. Otherwise, will return empty value.\n   Expression Expression Yes/No ExpressionResultType     SINGLE_VALUE SINGLE_VALUE Yes SINGLE_VALUE   SINGLE_VALUE TIME_SERIES_VALUES Yes TIME_SERIES_VALUES   SINGLE_VALUE SORTED_LIST/RECORD_LIST Yes SORTED_LIST/RECORD_LIST   TIME_SERIES_VALUES TIME_SERIES_VALUES Yes TIME_SERIES_VALUES   TIME_SERIES_VALUES SORTED_LIST/RECORD_LIST no    SORTED_LIST/RECORD_LIST SORTED_LIST/RECORD_LIST no     Compare Operation Compare Operation takes two expressions and compares their results. The following table lists the compare operations supported by MQE.\nExpression:\nExpression1 \u0026lt;Compare-Operator\u0026gt; Expression2    Operator Definition     \u0026gt; greater than   \u0026gt;= greater than or equal   \u0026lt; less than   \u0026lt;= less than or equal   == equal   != not equal    The result of the compare operation is an int value:\n 1: true 0: false  For example: Compare the service_resp_time metric value if greater than 3000, if the service_resp_time result is:\n{ \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 3500, \u0026#34;traceID\u0026#34;: null}] } ] } } } we can use the following expression:\nservice_resp_time \u0026gt; 3000 and get result:\n{ \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;0\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 1, \u0026#34;traceID\u0026#34;: null}] } ] } } } Compare Operation Rules and Result Type Same as the Binary Operation Rules.\nAggregation Operation Aggregation Operation takes an expression and performs aggregate calculations on its results.\nExpression:\n\u0026lt;Aggregation-Operator\u0026gt;(Expression)    Operator Definition ExpressionResultType     avg average the result SINGLE_VALUE   count count number of the result SINGLE_VALUE   latest select the latest non-null value from the result SINGLE_VALUE   sum sum the result SINGLE_VALUE   max select maximum from the result SINGLE_VALUE   min select minimum from the result SINGLE_VALUE    For example: If we want to query the average value of the service_cpm metric, we can use the following expression:\navg(service_cpm) Result Type The different operators could impact the ExpressionResultType, please refer to the above table.\nMathematical Operation Mathematical Operation takes an expression and performs mathematical calculations on its results.\nExpression:\n\u0026lt;Mathematical-Operator\u0026gt;(Expression, parameters)    Operator Definition parameters ExpressionResultType     abs returns the absolute value of the result  follow the input expression   ceil returns the smallest integer value that is greater or equal to the result  follow the input expression   floor returns the largest integer value that is greater or equal to the result  follow the input expression   round returns result round to specific decimal places places: a positive integer specific decimal places of the result follow the input expression    For example: If we want to query the average value of the service_cpm metric in seconds, and round the result to 2 decimal places, we can use the following expression:\nround(service_cpm / 60 , 2) Result Type The different operators could impact the ExpressionResultType, please refer to the above table.\nTopN Operation TopN Operation takes an expression and performs calculation to get the TopN of Services/Instances/Endpoints. The result depends on the entity condition in the query.\n Global TopN:  The entity is empty. The result is the topN Services/Instances/Endpoints in the whole traffics. Notice: If query the Endpoints metric, the global candidate set could be huge, please use it carefully.   Service\u0026rsquo;s Instances/Endpoints TopN:  The serviceName in the entity is not empty. The result is the topN Instances/Endpoints of the service.    Expression:\ntop_n(\u0026lt;metric_name\u0026gt;, \u0026lt;top_number\u0026gt;, \u0026lt;order\u0026gt;) top_number is the number of the top results, should be a positive integer.\norder is the order of the top results. The value of order can be asc or des.\nFor example: If we want to query the current service\u0026rsquo;s top 10 instances with the highest service_instance_cpm metric value, we can use the following expression under specific service:\ntop_n(service_instance_cpm, 10, des) Result Type According to the type of the metric, the ExpressionResultType of the expression will be SORTED_LIST or RECORD_LIST.\nRelabel Operation Relabel Operation takes an expression and replaces the label values with new label values on its results. Since v10.0.0, SkyWalking supports relabel multiple labels.\nExpression:\nrelabel(Expression, \u0026lt;target_label_name\u0026gt;=\u0026#39;\u0026lt;origin_label_value_1\u0026gt;,...\u0026#39;, \u0026lt;new_label_name\u0026gt;=\u0026#39;\u0026lt;new_label_value_1\u0026gt;,...\u0026#39;) The order of the new label values should be the same as the order of the label values in the input expression result.\nFor example: If we want to query the service_percentile metric with the label values 50,75,90,95,99, and rename the label name to percentile and the label values to P50,P75,P90,P95,P99, we can use the following expression:\nrelabel(service_percentile{p=\u0026#39;50,75,90,95,99\u0026#39;}, p=\u0026#39;50,75,90,95,99\u0026#39;, percentile=\u0026#39;P50,P75,P90,P95,P99\u0026#39;) Result Type Follow the input expression.\nAggregateLabels Operation AggregateLabels Operation takes an expression and performs an aggregate calculation on its Labeled Value Metrics results. It aggregates a group of TIME_SERIES_VALUES into a single TIME_SERIES_VALUES.\nExpression:\naggregate_labels(Expression, AggregateType\u0026lt;Optional\u0026gt;(\u0026lt;label1_name\u0026gt;,\u0026lt;label2_name\u0026gt;...))  AggregateType is the type of the aggregation operation. \u0026lt;label1_name\u0026gt;,\u0026lt;label2_name\u0026gt;... is the label names that need to be aggregated. If not specified, all labels will be aggregated.     AggregateType Definition ExpressionResultType     avg calculate avg value of a Labeled Value Metrics TIME_SERIES_VALUES   sum calculate sum value of a Labeled Value Metrics TIME_SERIES_VALUES   max select the maximum value from a Labeled Value Metrics TIME_SERIES_VALUES   min select the minimum value from a Labeled Value Metrics TIME_SERIES_VALUES    For example: If we want to query all Redis command total rates, we can use the following expression(total_commands_rate is a metric which recorded every command rate in labeled value): Aggregating all the labels:\naggregate_labels(total_commands_rate, sum) Also, we can aggregate by the cmd label:\naggregate_labels(total_commands_rate, sum(cmd)) Result Type The ExpressionResultType of the aggregateLabels operation is TIME_SERIES_VALUES.\nLogical Operation ViewAsSequence Operation ViewAsSequence operation represents the first not-null metric from the listing metrics in the given prioritized sequence(left to right). It could also be considered as a short-circuit of given metrics for the first value existing metric.\nExpression:\nview_as_seq([\u0026lt;expression_1\u0026gt;, \u0026lt;expression_2\u0026gt;, ...]) For example: if the first expression value is empty but the second one is not empty, it would return the result from the second expression. The following example would return the content of the service_cpm metric.\nview_as_seq(not_existing, service_cpm) Result Type The result type is determined by the type of selected not-null metric expression.\nIsPresent Operation IsPresent operation represents that in a list of metrics, if any expression has a value, it would return 1 in the result; otherwise, it would return 0.\nExpression:\nis_present([\u0026lt;expression_1\u0026gt;, \u0026lt;expression_2\u0026gt;, ...]) For example: When the meter does not exist or the metrics has no value, it would return 0. However, if the metrics list contains meter with values, it would return 1.\nis_present(not_existing, existing_without_value, existing_with_value) Result Type The result type is SINGLE_VALUE, and the result(1 or 0) in the first value.\nTrend Operation Trend Operation takes an expression and performs a trend calculation on its results.\nExpression:\n\u0026lt;Trend-Operator\u0026gt;(Metrics Expression, time_range) time_range is the positive int of the calculated range. The unit will automatically align with to the query Step, for example, if the query Step is MINUTE, the unit of time_range is minute.\n   Operator Definition ExpressionResultType     increase returns the increase in the time range in the time series TIME_SERIES_VALUES   rate returns the per-second average rate of increase in the time range in the time series TIME_SERIES_VALUES    For example: If we want to query the increase value of the service_cpm metric in 2 minute(assume the query Step is MINUTE), we can use the following expression:\nincrease(service_cpm, 2) If the query duration is 3 minutes, from (T1 to T3) and the metric has values in time series:\nV(T1-2), V(T1-1), V(T1), V(T2), V(T3) then the expression result is:\nV(T1)-V(T1-2), V(T2)-V(T1-1), V(T3)-V(T1) Note:\n If the calculated metric value is empty, the result will be empty. Assume in the T3 point, the increase value = V(T3)-V(T1), If the metric V(T3) or V(T1) is empty, the result value in T3 will be empty.  Result Type TIME_SERIES_VALUES.\nExpression Query Example Labeled Value Metrics service_percentile{p=\u0026#39;50,95\u0026#39;} The example result is:\n{ \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;p\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;50\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1000\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 2000, \u0026#34;traceID\u0026#34;: null}] }, { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;p\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;75\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2000\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 3000, \u0026#34;traceID\u0026#34;: null}] } ] } } } If we want to transform the percentile value unit from ms to s the expression is:\nservice_percentile{p=\u0026#39;50,75\u0026#39;} / 1000 { \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;p\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;50\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 2, \u0026#34;traceID\u0026#34;: null}] }, { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;p\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;75\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 3, \u0026#34;traceID\u0026#34;: null}] } ] } } } Get the average value of each percentile, the expression is:\navg(service_percentile{p=\u0026#39;50,75\u0026#39;}) { \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;SINGLE_VALUE\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;p\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;50\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: null, \u0026#34;value\u0026#34;: \u0026#34;1500\u0026#34;, \u0026#34;traceID\u0026#34;: null}] }, { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;p\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;75\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: null, \u0026#34;value\u0026#34;: \u0026#34;2500\u0026#34;, \u0026#34;traceID\u0026#34;: null}] } ] } } } Calculate the difference between the percentile and the average value, the expression is:\nservice_percentile{p=\u0026#39;50,75\u0026#39;} - avg(service_percentile{p=\u0026#39;50,75\u0026#39;}) { \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;p\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;50\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;-500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 500, \u0026#34;traceID\u0026#34;: null}] }, { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;p\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;75\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;-500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 500, \u0026#34;traceID\u0026#34;: null}] } ] } } } Calculate the difference between the service_resp_time and the service_percentile, if the service_resp_time result is:\n{ \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 3500, \u0026#34;traceID\u0026#34;: null}] } ] } } } The expression is:\nservice_resp_time - service_percentile{p=\u0026#39;50,75\u0026#39;} { \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;p\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;50\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1500\u0026#34;, \u0026#34;traceID\u0026#34;: null}] }, { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;p\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;75\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;500\u0026#34;, \u0026#34;traceID\u0026#34;: null}] } ] } } } ","excerpt":"Metrics Query Expression(MQE) Syntax MQE is a string that consists of one or more expressions. Each …","ref":"/docs/main/next/en/api/metrics-query-expression/","title":"Metrics Query Expression(MQE) Syntax"},{"body":"Metrics Query Expression(MQE) Syntax MQE is a string that consists of one or more expressions. Each expression could be a combination of one or more operations. The expression allows users to do simple query-stage calculation through V3 APIs.\nExpression = \u0026lt;Operation\u0026gt; Expression1 \u0026lt;Operation\u0026gt; Expression2 \u0026lt;Operation\u0026gt; Expression3 ... The following document lists the operations supported by MQE.\nMetrics Expression Metrics Expression will return a collection of time-series values.\nCommon Value Metrics Expression:\n\u0026lt;metric_name\u0026gt; For example: If we want to query the service_sla metric, we can use the following expression:\nservice_sla Result Type The ExpressionResultType of the expression is TIME_SERIES_VALUES.\nLabeled Value Metrics Expression:\n\u0026lt;metric_name\u0026gt;{label=\u0026#39;\u0026lt;label_1\u0026gt;,...\u0026#39;} label is the selected label of the metric. If label is not specified, all label values of the metric will be selected.\nFor example: If we want to query the service_percentile metric with the labels 0,1,2,3,4, we can use the following expression:\nservice_percentile{label=\u0026#39;0,1,2,3,4\u0026#39;} If we want to rename the labels to P50,P75,P90,P95,P99, see Relabel Operation.\nResult Type The ExpressionResultType of the expression is TIME_SERIES_VALUES and with labels.\nBinary Operation Binary Operation is an operation that takes two expressions and performs a calculation on their results. The following table lists the binary operations supported by MQE.\nExpression:\nExpression1 \u0026lt;Binary-Operator\u0026gt; Expression2    Operator Definition     + addition   - subtraction   * multiplication   / division   % modulo    For example: If we want to transform the service_sla metric value to percent, we can use the following expression:\nservice_sla / 100 Result Type The result type of the expression please refer to the following table.\nBinary Operation Rules The following table listed if the difference result types of the input expressions could do this operation and the result type after the operation. The expression could on the left or right side of the operator. Note: If the expressions on both sides of the operator are the TIME_SERIES_VALUES with labels, they should have the same labels for calculation.\n   Expression Expression Yes/No ExpressionResultType     SINGLE_VALUE SINGLE_VALUE Yes SINGLE_VALUE   SINGLE_VALUE TIME_SERIES_VALUES Yes TIME_SERIES_VALUES   SINGLE_VALUE SORTED_LIST/RECORD_LIST Yes SORTED_LIST/RECORD_LIST   TIME_SERIES_VALUES TIME_SERIES_VALUES Yes TIME_SERIES_VALUES   TIME_SERIES_VALUES SORTED_LIST/RECORD_LIST no    SORTED_LIST/RECORD_LIST SORTED_LIST/RECORD_LIST no     Aggregation Operation Aggregation Operation takes an expression and performs aggregate calculation on its results.\nExpression:\n\u0026lt;Aggregation-Operator\u0026gt;(Expression)    Operator Definition ExpressionResultType     avg average the result SINGLE_VALUE   count count number of the result SINGLE_VALUE   latest select the latest non-null value from the result SINGLE_VALUE   sum sum the result SINGLE_VALUE   max select maximum from the result SINGLE_VALUE   min select minimum from the result SINGLE_VALUE    For example: If we want to query the average value of the service_cpm metric, we can use the following expression:\navg(service_cpm) Result Type The different operator could impact the ExpressionResultType, please refer to the above table.\nFunction Operation Function Operation takes an expression and performs function calculation on its results.\nExpression:\n\u0026lt;Function-Operator\u0026gt;(Expression, parameters)    Operator Definition parameters ExpressionResultType     abs returns the absolute value of the result  follow the input expression   ceil returns the smallest integer value that is greater or equal to the result  follow the input expression   floor returns the largest integer value that is greater or equal to the result  follow the input expression   round returns result round to specific decimal places places: a positive integer specific decimal places of the result follow the input expression    For example: If we want to query the average value of the service_cpm metric in seconds, and round the result to 2 decimal places, we can use the following expression:\nround(service_cpm / 60 , 2) Result Type The different operator could impact the ExpressionResultType, please refer to the above table.\nTopN Operation TopN Operation takes an expression and performs TopN calculation on its results.\nExpression:\ntop_n(\u0026lt;metric_name\u0026gt;, \u0026lt;top_number\u0026gt;, \u0026lt;order\u0026gt;) top_number is the number of the top results, should be a positive integer.\norder is the order of the top results. The value of order can be asc or des.\nFor example: If we want to query the top 10 services with the highest service_cpm metric value, we can use the following expression:\ntop_n(service_instance_cpm, 10, des) Result Type According to the type of the metric, the ExpressionResultType of the expression will be SORTED_LIST or RECORD_LIST.\nRelabel Operation Relabel Operation takes an expression and replace the labels to new labels on its results.\nExpression:\nrelabel(Expression, label=\u0026#39;\u0026lt;new_label_1\u0026gt;,...\u0026#39;) label is the new labels of the metric after the label is relabeled, the order of the new labels should be the same as the order of the labels in the input expression result.\nFor example: If we want to query the service_percentile metric with the labels 0,1,2,3,4, and rename the labels to P50,P75,P90,P95,P99, we can use the following expression:\nrelabel(service_percentile{label=\u0026#39;0,1,2,3,4\u0026#39;}, label=\u0026#39;P50,P75,P90,P95,P99\u0026#39;) Result Type Follow the input expression.\n","excerpt":"Metrics Query Expression(MQE) Syntax MQE is a string that consists of one or more expressions. Each …","ref":"/docs/main/v9.5.0/en/api/metrics-query-expression/","title":"Metrics Query Expression(MQE) Syntax"},{"body":"Metrics Query Expression(MQE) Syntax MQE is a string that consists of one or more expressions. Each expression could be a combination of one or more operations. The expression allows users to do simple query-stage calculation through V3 APIs.\nExpression = \u0026lt;Operation\u0026gt; Expression1 \u0026lt;Operation\u0026gt; Expression2 \u0026lt;Operation\u0026gt; Expression3 ... The following document lists the operations supported by MQE.\nMetrics Expression Metrics Expression will return a collection of time-series values.\nCommon Value Metrics Expression:\n\u0026lt;metric_name\u0026gt; For example: If we want to query the service_sla metric, we can use the following expression:\nservice_sla Result Type The ExpressionResultType of the expression is TIME_SERIES_VALUES.\nLabeled Value Metrics For now, we only have a single anonymous label with multi label values in a labeled metric. To be able to use it in expressions, define _ as the anonymous label name (key).\nExpression:\n\u0026lt;metric_name\u0026gt;{_=\u0026#39;\u0026lt;label_value_1\u0026gt;,...\u0026#39;} {_='\u0026lt;label_value_1\u0026gt;,...'} is the selected label value of the metric. If is not specified, all label values of the metric will be selected.\nFor example: If we want to query the service_percentile metric with the label values 0,1,2,3,4, we can use the following expression:\nservice_percentile{_=\u0026#39;0,1,2,3,4\u0026#39;} If we want to rename the label values to P50,P75,P90,P95,P99, see Relabel Operation.\nResult Type The ExpressionResultType of the expression is TIME_SERIES_VALUES and with labels.\nBinary Operation The Binary Operation is an operation that takes two expressions and performs a calculation on their results. The following table lists the binary operations supported by MQE.\nExpression:\nExpression1 \u0026lt;Binary-Operator\u0026gt; Expression2    Operator Definition     + addition   - subtraction   * multiplication   / division   % modulo    For example: If we want to transform the service_sla metric value to percent, we can use the following expression:\nservice_sla / 100 Result Type For the result type of the expression, please refer to the following table.\nBinary Operation Rules The following table lists if the different result types of the input expressions could do this operation and the result type after the operation. The expression could be on the left or right side of the operator. Note: If the expressions on both sides of the operator are the TIME_SERIES_VALUES with labels, they should have the same labels for calculation.\n   Expression Expression Yes/No ExpressionResultType     SINGLE_VALUE SINGLE_VALUE Yes SINGLE_VALUE   SINGLE_VALUE TIME_SERIES_VALUES Yes TIME_SERIES_VALUES   SINGLE_VALUE SORTED_LIST/RECORD_LIST Yes SORTED_LIST/RECORD_LIST   TIME_SERIES_VALUES TIME_SERIES_VALUES Yes TIME_SERIES_VALUES   TIME_SERIES_VALUES SORTED_LIST/RECORD_LIST no    SORTED_LIST/RECORD_LIST SORTED_LIST/RECORD_LIST no     Compare Operation Compare Operation takes two expressions and compares their results. The following table lists the compare operations supported by MQE.\nExpression:\nExpression1 \u0026lt;Compare-Operator\u0026gt; Expression2    Operator Definition     \u0026gt; greater than   \u0026gt;= greater than or equal   \u0026lt; less than   \u0026lt;= less than or equal   == equal   != not equal    The result of the compare operation is an int value:\n 1: true 0: false  For example: Compare the service_resp_time metric value if greater than 3000, if the service_resp_time result is:\n{ \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 3500, \u0026#34;traceID\u0026#34;: null}] } ] } } } we can use the following expression:\nservice_resp_time \u0026gt; 3000 and get result:\n{ \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;0\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 1, \u0026#34;traceID\u0026#34;: null}] } ] } } } Compare Operation Rules and Result Type Same as the Binary Operation Rules.\nAggregation Operation Aggregation Operation takes an expression and performs aggregate calculations on its results.\nExpression:\n\u0026lt;Aggregation-Operator\u0026gt;(Expression)    Operator Definition ExpressionResultType     avg average the result SINGLE_VALUE   count count number of the result SINGLE_VALUE   latest select the latest non-null value from the result SINGLE_VALUE   sum sum the result SINGLE_VALUE   max select maximum from the result SINGLE_VALUE   min select minimum from the result SINGLE_VALUE    For example: If we want to query the average value of the service_cpm metric, we can use the following expression:\navg(service_cpm) Result Type The different operators could impact the ExpressionResultType, please refer to the above table.\nMathematical Operation Mathematical Operation takes an expression and performs mathematical calculations on its results.\nExpression:\n\u0026lt;Mathematical-Operator\u0026gt;(Expression, parameters)    Operator Definition parameters ExpressionResultType     abs returns the absolute value of the result  follow the input expression   ceil returns the smallest integer value that is greater or equal to the result  follow the input expression   floor returns the largest integer value that is greater or equal to the result  follow the input expression   round returns result round to specific decimal places places: a positive integer specific decimal places of the result follow the input expression    For example: If we want to query the average value of the service_cpm metric in seconds, and round the result to 2 decimal places, we can use the following expression:\nround(service_cpm / 60 , 2) Result Type The different operators could impact the ExpressionResultType, please refer to the above table.\nTopN Operation TopN Operation takes an expression and performs TopN calculation on its results.\nExpression:\ntop_n(\u0026lt;metric_name\u0026gt;, \u0026lt;top_number\u0026gt;, \u0026lt;order\u0026gt;) top_number is the number of the top results, should be a positive integer.\norder is the order of the top results. The value of order can be asc or des.\nFor example: If we want to query the top 10 services with the highest service_cpm metric value, we can use the following expression:\ntop_n(service_instance_cpm, 10, des) Result Type According to the type of the metric, the ExpressionResultType of the expression will be SORTED_LIST or RECORD_LIST.\nRelabel Operation Relabel Operation takes an expression and replaces the label values with new label values on its results.\nExpression:\nrelabel(Expression, _=\u0026#39;\u0026lt;new_label_value_1\u0026gt;,...\u0026#39;) _ is the new label of the metric after the label is relabeled, the order of the new label values should be the same as the order of the label values in the input expression result.\nFor example: If we want to query the service_percentile metric with the label values 0,1,2,3,4, and rename the label values to P50,P75,P90,P95,P99, we can use the following expression:\nrelabel(service_percentile{_=\u0026#39;0,1,2,3,4\u0026#39;}, _=\u0026#39;P50,P75,P90,P95,P99\u0026#39;) Result Type Follow the input expression.\nAggregateLabels Operation AggregateLabels Operation takes an expression and performs an aggregate calculation on its Labeled Value Metrics results. It aggregates a group of TIME_SERIES_VALUES into a single TIME_SERIES_VALUES.\nExpression:\naggregate_labels(Expression, parameter)    parameter Definition ExpressionResultType     avg calculate avg value of a Labeled Value Metrics TIME_SERIES_VALUES   sum calculate sum value of a Labeled Value Metrics TIME_SERIES_VALUES   max select the maximum value from a Labeled Value Metrics TIME_SERIES_VALUES   min select the minimum value from a Labeled Value Metrics TIME_SERIES_VALUES    For example: If we want to query all Redis command total rates, we can use the following expression(total_commands_rate is a metric which recorded every command rate in labeled value):\naggregate_labels(total_commands_rate, SUM) Result Type The ExpressionResultType of the aggregateLabels operation is TIME_SERIES_VALUES.\nLogical Operation ViewAsSequence Operation ViewAsSequence operation represents the first not-null metric from the listing metrics in the given prioritized sequence(left to right). It could also be considered as a short-circuit of given metrics for the first value existing metric.\nExpression:\nview_as_seq([\u0026lt;expression_1\u0026gt;, \u0026lt;expression_2\u0026gt;, ...]) For example: if the first expression value is empty but the second one is not empty, it would return the result from the second expression. The following example would return the content of the service_cpm metric.\nview_as_seq(not_existing, service_cpm) Result Type The result type is determined by the type of selected not-null metric expression.\nExpression Query Example Labeled Value Metrics service_percentile{_=\u0026#39;0,1\u0026#39;} The example result is:\n{ \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;0\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1000\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 2000, \u0026#34;traceID\u0026#34;: null}] }, { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2000\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 3000, \u0026#34;traceID\u0026#34;: null}] } ] } } } If we want to transform the percentile value unit from ms to s the expression is:\nservice_percentile{_=\u0026#39;0,1\u0026#39;} / 1000 { \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;0\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 2, \u0026#34;traceID\u0026#34;: null}] }, { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 3, \u0026#34;traceID\u0026#34;: null}] } ] } } } Get the average value of each percentile, the expression is:\navg(service_percentile{_=\u0026#39;0,1\u0026#39;}) { \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;SINGLE_VALUE\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;0\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: null, \u0026#34;value\u0026#34;: \u0026#34;1500\u0026#34;, \u0026#34;traceID\u0026#34;: null}] }, { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: null, \u0026#34;value\u0026#34;: \u0026#34;2500\u0026#34;, \u0026#34;traceID\u0026#34;: null}] } ] } } } Calculate the difference between the percentile and the average value, the expression is:\nservice_percentile{_=\u0026#39;0,1\u0026#39;} - avg(service_percentile{_=\u0026#39;0,1\u0026#39;}) { \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;0\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;-500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 500, \u0026#34;traceID\u0026#34;: null}] }, { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;-500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 500, \u0026#34;traceID\u0026#34;: null}] } ] } } } Calculate the difference between the service_resp_time and the service_percentile, if the service_resp_time result is:\n{ \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 3500, \u0026#34;traceID\u0026#34;: null}] } ] } } } The expression is:\nservice_resp_time - service_percentile{_=\u0026#39;0,1\u0026#39;} { \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;0\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1500\u0026#34;, \u0026#34;traceID\u0026#34;: null}] }, { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;500\u0026#34;, \u0026#34;traceID\u0026#34;: null}] } ] } } } ","excerpt":"Metrics Query Expression(MQE) Syntax MQE is a string that consists of one or more expressions. Each …","ref":"/docs/main/v9.6.0/en/api/metrics-query-expression/","title":"Metrics Query Expression(MQE) Syntax"},{"body":"Metrics Query Expression(MQE) Syntax MQE is a string that consists of one or more expressions. Each expression could be a combination of one or more operations. The expression allows users to do simple query-stage calculation through V3 APIs.\nExpression = \u0026lt;Operation\u0026gt; Expression1 \u0026lt;Operation\u0026gt; Expression2 \u0026lt;Operation\u0026gt; Expression3 ... The following document lists the operations supported by MQE.\nMetrics Expression Metrics Expression will return a collection of time-series values.\nCommon Value Metrics Expression:\n\u0026lt;metric_name\u0026gt; For example: If we want to query the service_sla metric, we can use the following expression:\nservice_sla Result Type The ExpressionResultType of the expression is TIME_SERIES_VALUES.\nLabeled Value Metrics For now, we only have a single anonymous label with multi label values in a labeled metric. To be able to use it in expressions, define _ as the anonymous label name (key).\nExpression:\n\u0026lt;metric_name\u0026gt;{_=\u0026#39;\u0026lt;label_value_1\u0026gt;,...\u0026#39;} {_='\u0026lt;label_value_1\u0026gt;,...'} is the selected label value of the metric. If is not specified, all label values of the metric will be selected.\nFor example: If we want to query the service_percentile metric with the label values 0,1,2,3,4, we can use the following expression:\nservice_percentile{_=\u0026#39;0,1,2,3,4\u0026#39;} If we want to rename the label values to P50,P75,P90,P95,P99, see Relabel Operation.\nResult Type The ExpressionResultType of the expression is TIME_SERIES_VALUES and with labels.\nBinary Operation The Binary Operation is an operation that takes two expressions and performs a calculation on their results. The following table lists the binary operations supported by MQE.\nExpression:\nExpression1 \u0026lt;Binary-Operator\u0026gt; Expression2    Operator Definition     + addition   - subtraction   * multiplication   / division   % modulo    For example: If we want to transform the service_sla metric value to percent, we can use the following expression:\nservice_sla / 100 Result Type For the result type of the expression, please refer to the following table.\nBinary Operation Rules The following table lists if the different result types of the input expressions could do this operation and the result type after the operation. The expression could be on the left or right side of the operator. Note: If the expressions on both sides of the operator are the TIME_SERIES_VALUES with labels, they should have the same labels for calculation.\n   Expression Expression Yes/No ExpressionResultType     SINGLE_VALUE SINGLE_VALUE Yes SINGLE_VALUE   SINGLE_VALUE TIME_SERIES_VALUES Yes TIME_SERIES_VALUES   SINGLE_VALUE SORTED_LIST/RECORD_LIST Yes SORTED_LIST/RECORD_LIST   TIME_SERIES_VALUES TIME_SERIES_VALUES Yes TIME_SERIES_VALUES   TIME_SERIES_VALUES SORTED_LIST/RECORD_LIST no    SORTED_LIST/RECORD_LIST SORTED_LIST/RECORD_LIST no     Compare Operation Compare Operation takes two expressions and compares their results. The following table lists the compare operations supported by MQE.\nExpression:\nExpression1 \u0026lt;Compare-Operator\u0026gt; Expression2    Operator Definition     \u0026gt; greater than   \u0026gt;= greater than or equal   \u0026lt; less than   \u0026lt;= less than or equal   == equal   != not equal    The result of the compare operation is an int value:\n 1: true 0: false  For example: Compare the service_resp_time metric value if greater than 3000, if the service_resp_time result is:\n{ \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 3500, \u0026#34;traceID\u0026#34;: null}] } ] } } } we can use the following expression:\nservice_resp_time \u0026gt; 3000 and get result:\n{ \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;0\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 1, \u0026#34;traceID\u0026#34;: null}] } ] } } } Compare Operation Rules and Result Type Same as the Binary Operation Rules.\nAggregation Operation Aggregation Operation takes an expression and performs aggregate calculations on its results.\nExpression:\n\u0026lt;Aggregation-Operator\u0026gt;(Expression)    Operator Definition ExpressionResultType     avg average the result SINGLE_VALUE   count count number of the result SINGLE_VALUE   latest select the latest non-null value from the result SINGLE_VALUE   sum sum the result SINGLE_VALUE   max select maximum from the result SINGLE_VALUE   min select minimum from the result SINGLE_VALUE    For example: If we want to query the average value of the service_cpm metric, we can use the following expression:\navg(service_cpm) Result Type The different operators could impact the ExpressionResultType, please refer to the above table.\nMathematical Operation Mathematical Operation takes an expression and performs mathematical calculations on its results.\nExpression:\n\u0026lt;Mathematical-Operator\u0026gt;(Expression, parameters)    Operator Definition parameters ExpressionResultType     abs returns the absolute value of the result  follow the input expression   ceil returns the smallest integer value that is greater or equal to the result  follow the input expression   floor returns the largest integer value that is greater or equal to the result  follow the input expression   round returns result round to specific decimal places places: a positive integer specific decimal places of the result follow the input expression    For example: If we want to query the average value of the service_cpm metric in seconds, and round the result to 2 decimal places, we can use the following expression:\nround(service_cpm / 60 , 2) Result Type The different operators could impact the ExpressionResultType, please refer to the above table.\nTopN Operation TopN Operation takes an expression and performs TopN calculation on its results.\nExpression:\ntop_n(\u0026lt;metric_name\u0026gt;, \u0026lt;top_number\u0026gt;, \u0026lt;order\u0026gt;) top_number is the number of the top results, should be a positive integer.\norder is the order of the top results. The value of order can be asc or des.\nFor example: If we want to query the top 10 services with the highest service_cpm metric value, we can use the following expression:\ntop_n(service_instance_cpm, 10, des) Result Type According to the type of the metric, the ExpressionResultType of the expression will be SORTED_LIST or RECORD_LIST.\nRelabel Operation Relabel Operation takes an expression and replaces the label values with new label values on its results.\nExpression:\nrelabel(Expression, _=\u0026#39;\u0026lt;new_label_value_1\u0026gt;,...\u0026#39;) _ is the new label of the metric after the label is relabeled, the order of the new label values should be the same as the order of the label values in the input expression result.\nFor example: If we want to query the service_percentile metric with the label values 0,1,2,3,4, and rename the label values to P50,P75,P90,P95,P99, we can use the following expression:\nrelabel(service_percentile{_=\u0026#39;0,1,2,3,4\u0026#39;}, _=\u0026#39;P50,P75,P90,P95,P99\u0026#39;) Result Type Follow the input expression.\nAggregateLabels Operation AggregateLabels Operation takes an expression and performs an aggregate calculation on its Labeled Value Metrics results. It aggregates a group of TIME_SERIES_VALUES into a single TIME_SERIES_VALUES.\nExpression:\naggregate_labels(Expression, parameter)    parameter Definition ExpressionResultType     avg calculate avg value of a Labeled Value Metrics TIME_SERIES_VALUES   sum calculate sum value of a Labeled Value Metrics TIME_SERIES_VALUES   max select the maximum value from a Labeled Value Metrics TIME_SERIES_VALUES   min select the minimum value from a Labeled Value Metrics TIME_SERIES_VALUES    For example: If we want to query all Redis command total rates, we can use the following expression(total_commands_rate is a metric which recorded every command rate in labeled value):\naggregate_labels(total_commands_rate, SUM) Result Type The ExpressionResultType of the aggregateLabels operation is TIME_SERIES_VALUES.\nLogical Operation ViewAsSequence Operation ViewAsSequence operation represents the first not-null metric from the listing metrics in the given prioritized sequence(left to right). It could also be considered as a short-circuit of given metrics for the first value existing metric.\nExpression:\nview_as_seq([\u0026lt;expression_1\u0026gt;, \u0026lt;expression_2\u0026gt;, ...]) For example: if the first expression value is empty but the second one is not empty, it would return the result from the second expression. The following example would return the content of the service_cpm metric.\nview_as_seq(not_existing, service_cpm) Result Type The result type is determined by the type of selected not-null metric expression.\nTrend Operation Trend Operation takes an expression and performs a trend calculation on its results.\nExpression:\n\u0026lt;Trend-Operator\u0026gt;(Metrics Expression, time_range) time_range is the positive int of the calculated range. The unit will automatically align with to the query Step, for example, if the query Step is MINUTE, the unit of time_range is minute.\n   Operator Definition ExpressionResultType     increase returns the increase in the time range in the time series TIME_SERIES_VALUES   rate returns the per-second average rate of increase in the time range in the time series TIME_SERIES_VALUES    For example: If we want to query the increase value of the service_cpm metric in 2 minute(assume the query Step is MINUTE), we can use the following expression:\nincrease(service_cpm, 2) If the query duration is 3 minutes, from (T1 to T3) and the metric has values in time series:\nV(T1-2), V(T1-1), V(T1), V(T2), V(T3) then the expression result is:\nV(T1)-V(T1-2), V(T2)-V(T1-1), V(T3)-V(T1) Note:\n If the calculated metric value is empty, the result will be empty. Assume in the T3 point, the increase value = V(T3)-V(T1), If the metric V(T3) or V(T1) is empty, the result value in T3 will be empty.  Result Type TIME_SERIES_VALUES.\nExpression Query Example Labeled Value Metrics service_percentile{_=\u0026#39;0,1\u0026#39;} The example result is:\n{ \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;0\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1000\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 2000, \u0026#34;traceID\u0026#34;: null}] }, { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2000\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 3000, \u0026#34;traceID\u0026#34;: null}] } ] } } } If we want to transform the percentile value unit from ms to s the expression is:\nservice_percentile{_=\u0026#39;0,1\u0026#39;} / 1000 { \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;0\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 2, \u0026#34;traceID\u0026#34;: null}] }, { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 3, \u0026#34;traceID\u0026#34;: null}] } ] } } } Get the average value of each percentile, the expression is:\navg(service_percentile{_=\u0026#39;0,1\u0026#39;}) { \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;SINGLE_VALUE\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;0\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: null, \u0026#34;value\u0026#34;: \u0026#34;1500\u0026#34;, \u0026#34;traceID\u0026#34;: null}] }, { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: null, \u0026#34;value\u0026#34;: \u0026#34;2500\u0026#34;, \u0026#34;traceID\u0026#34;: null}] } ] } } } Calculate the difference between the percentile and the average value, the expression is:\nservice_percentile{_=\u0026#39;0,1\u0026#39;} - avg(service_percentile{_=\u0026#39;0,1\u0026#39;}) { \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;0\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;-500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 500, \u0026#34;traceID\u0026#34;: null}] }, { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;-500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 500, \u0026#34;traceID\u0026#34;: null}] } ] } } } Calculate the difference between the service_resp_time and the service_percentile, if the service_resp_time result is:\n{ \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 3500, \u0026#34;traceID\u0026#34;: null}] } ] } } } The expression is:\nservice_resp_time - service_percentile{_=\u0026#39;0,1\u0026#39;} { \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;0\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1500\u0026#34;, \u0026#34;traceID\u0026#34;: null}] }, { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;500\u0026#34;, \u0026#34;traceID\u0026#34;: null}] } ] } } } ","excerpt":"Metrics Query Expression(MQE) Syntax MQE is a string that consists of one or more expressions. Each …","ref":"/docs/main/v9.7.0/en/api/metrics-query-expression/","title":"Metrics Query Expression(MQE) Syntax"},{"body":"MicroMeter Observations setup Micrometer Observation is part of the Micrometer project and contains the Observation API. SkyWalking integrates its MicroMeter 1.10 APIs so that it can send metrics to the Skywalking Meter System.\nFollow Java agent Observations docs to set up agent in the Spring first.\nSet up backend receiver  Make sure to enable meter receiver in application.yml.  receiver-meter:selector:${SW_RECEIVER_METER:default}default: Configure the meter config file. It already has the spring sleuth meter config. If you have a customized meter at the agent side, please configure the meter using the steps set out in the meter document.\n  Enable Spring sleuth config in application.yml.\n  agent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:meterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:spring-micrometer}Dashboard configuration SkyWalking provides the Spring Sleuth dashboard by default under the general service instance, which contains the metrics provided by Spring Sleuth by default. Once you have added customized metrics in the application and configuration the meter config file in the backend. Please following the customized dashboard documentation to add the metrics in the dashboard.\nSupported meter Three types of information are supported: Application, System, and JVM.\n Application: HTTP request count and duration, JDBC max/idle/active connection count, and Tomcat session active/reject count. System: CPU system/process usage, OS system load, and OS process file count. JVM: GC pause count and duration, memory max/used/committed size, thread peak/live/daemon count, and classes loaded/unloaded count.  ","excerpt":"MicroMeter Observations setup Micrometer Observation is part of the Micrometer project and contains …","ref":"/docs/main/latest/en/setup/backend/micrometer-observations/","title":"MicroMeter Observations setup"},{"body":"MicroMeter Observations setup Micrometer Observation is part of the Micrometer project and contains the Observation API. SkyWalking integrates its MicroMeter 1.10 APIs so that it can send metrics to the SkyWalking Meter System.\nFollow Java agent Observations docs to set up agent in the Spring first.\nSet up backend receiver  Make sure to enable meter receiver in application.yml.  receiver-meter:selector:${SW_RECEIVER_METER:default}default: Configure the meter config file. It already has the spring sleuth meter config. If you have a customized meter at the agent side, please configure the meter using the steps set out in the meter document.\n  Enable Spring sleuth config in application.yml.\n  agent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:meterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:spring-micrometer}Dashboard configuration SkyWalking provides the Spring Sleuth dashboard by default under the general service instance, which contains the metrics provided by Spring Sleuth by default. Once you have added customized metrics in the application and configuration the meter config file in the backend. Please following the customized dashboard documentation to add the metrics in the dashboard.\nSupported meter Three types of information are supported: Application, System, and JVM.\n Application: HTTP request count and duration, JDBC max/idle/active connection count, and Tomcat session active/reject count. System: CPU system/process usage, OS system load, and OS process file count. JVM: GC pause count and duration, memory max/used/committed size, thread peak/live/daemon count, and classes loaded/unloaded count.  ","excerpt":"MicroMeter Observations setup Micrometer Observation is part of the Micrometer project and contains …","ref":"/docs/main/next/en/setup/backend/micrometer-observations/","title":"MicroMeter Observations setup"},{"body":"MicroMeter Observations setup Micrometer Observation is part of the Micrometer project and contains the Observation API. SkyWalking integrates its MicroMeter 1.10 APIs so that it can send metrics to the Skywalking Meter System.\nFollow Java agent Observations docs to set up agent in the Spring first.\nSet up backend receiver  Make sure to enable meter receiver in application.yml.  receiver-meter:selector:${SW_RECEIVER_METER:default}default: Configure the meter config file. It already has the spring sleuth meter config. If you have a customized meter at the agent side, please configure the meter using the steps set out in the meter document.\n  Enable Spring sleuth config in application.yml.\n  agent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:meterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:spring-micrometer}Dashboard configuration SkyWalking provides the Spring Sleuth dashboard by default under the general service instance, which contains the metrics provided by Spring Sleuth by default. Once you have added customized metrics in the application and configuration the meter config file in the backend. Please following the customized dashboard documentation to add the metrics in the dashboard.\nSupported meter Three types of information are supported: Application, System, and JVM.\n Application: HTTP request count and duration, JDBC max/idle/active connection count, and Tomcat session active/reject count. System: CPU system/process usage, OS system load, and OS process file count. JVM: GC pause count and duration, memory max/used/committed size, thread peak/live/daemon count, and classes loaded/unloaded count.  ","excerpt":"MicroMeter Observations setup Micrometer Observation is part of the Micrometer project and contains …","ref":"/docs/main/v9.4.0/en/setup/backend/micrometer-observations/","title":"MicroMeter Observations setup"},{"body":"MicroMeter Observations setup Micrometer Observation is part of the Micrometer project and contains the Observation API. SkyWalking integrates its MicroMeter 1.10 APIs so that it can send metrics to the Skywalking Meter System.\nFollow Java agent Observations docs to set up agent in the Spring first.\nSet up backend receiver  Make sure to enable meter receiver in application.yml.  receiver-meter:selector:${SW_RECEIVER_METER:default}default: Configure the meter config file. It already has the spring sleuth meter config. If you have a customized meter at the agent side, please configure the meter using the steps set out in the meter document.\n  Enable Spring sleuth config in application.yml.\n  agent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:meterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:spring-micrometer}Dashboard configuration SkyWalking provides the Spring Sleuth dashboard by default under the general service instance, which contains the metrics provided by Spring Sleuth by default. Once you have added customized metrics in the application and configuration the meter config file in the backend. Please following the customized dashboard documentation to add the metrics in the dashboard.\nSupported meter Three types of information are supported: Application, System, and JVM.\n Application: HTTP request count and duration, JDBC max/idle/active connection count, and Tomcat session active/reject count. System: CPU system/process usage, OS system load, and OS process file count. JVM: GC pause count and duration, memory max/used/committed size, thread peak/live/daemon count, and classes loaded/unloaded count.  ","excerpt":"MicroMeter Observations setup Micrometer Observation is part of the Micrometer project and contains …","ref":"/docs/main/v9.5.0/en/setup/backend/micrometer-observations/","title":"MicroMeter Observations setup"},{"body":"MicroMeter Observations setup Micrometer Observation is part of the Micrometer project and contains the Observation API. SkyWalking integrates its MicroMeter 1.10 APIs so that it can send metrics to the Skywalking Meter System.\nFollow Java agent Observations docs to set up agent in the Spring first.\nSet up backend receiver  Make sure to enable meter receiver in application.yml.  receiver-meter:selector:${SW_RECEIVER_METER:default}default: Configure the meter config file. It already has the spring sleuth meter config. If you have a customized meter at the agent side, please configure the meter using the steps set out in the meter document.\n  Enable Spring sleuth config in application.yml.\n  agent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:meterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:spring-micrometer}Dashboard configuration SkyWalking provides the Spring Sleuth dashboard by default under the general service instance, which contains the metrics provided by Spring Sleuth by default. Once you have added customized metrics in the application and configuration the meter config file in the backend. Please following the customized dashboard documentation to add the metrics in the dashboard.\nSupported meter Three types of information are supported: Application, System, and JVM.\n Application: HTTP request count and duration, JDBC max/idle/active connection count, and Tomcat session active/reject count. System: CPU system/process usage, OS system load, and OS process file count. JVM: GC pause count and duration, memory max/used/committed size, thread peak/live/daemon count, and classes loaded/unloaded count.  ","excerpt":"MicroMeter Observations setup Micrometer Observation is part of the Micrometer project and contains …","ref":"/docs/main/v9.6.0/en/setup/backend/micrometer-observations/","title":"MicroMeter Observations setup"},{"body":"MicroMeter Observations setup Micrometer Observation is part of the Micrometer project and contains the Observation API. SkyWalking integrates its MicroMeter 1.10 APIs so that it can send metrics to the Skywalking Meter System.\nFollow Java agent Observations docs to set up agent in the Spring first.\nSet up backend receiver  Make sure to enable meter receiver in application.yml.  receiver-meter:selector:${SW_RECEIVER_METER:default}default: Configure the meter config file. It already has the spring sleuth meter config. If you have a customized meter at the agent side, please configure the meter using the steps set out in the meter document.\n  Enable Spring sleuth config in application.yml.\n  agent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:meterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:spring-micrometer}Dashboard configuration SkyWalking provides the Spring Sleuth dashboard by default under the general service instance, which contains the metrics provided by Spring Sleuth by default. Once you have added customized metrics in the application and configuration the meter config file in the backend. Please following the customized dashboard documentation to add the metrics in the dashboard.\nSupported meter Three types of information are supported: Application, System, and JVM.\n Application: HTTP request count and duration, JDBC max/idle/active connection count, and Tomcat session active/reject count. System: CPU system/process usage, OS system load, and OS process file count. JVM: GC pause count and duration, memory max/used/committed size, thread peak/live/daemon count, and classes loaded/unloaded count.  ","excerpt":"MicroMeter Observations setup Micrometer Observation is part of the Micrometer project and contains …","ref":"/docs/main/v9.7.0/en/setup/backend/micrometer-observations/","title":"MicroMeter Observations setup"},{"body":"Mock data generator for testing In 9.1.0, SkyWalking adds a module to generate mock data for testing. You can use this module to generate mock data that will be sent to the storage.\nTo start the data generator, execute the script tools/data-generator/bin/start.sh.\nNote that SkyWalking doesn\u0026rsquo;t release a Docker image for this module, but you can still build it yourselves by running the commands:\n# build a Docker image for local use make docker.data-generator # or push to your registry export HUB=\u0026lt;your-registry\u0026gt; make push.docker.data-generator Currently the module can generate two kinds of SkyWalking data, segments and logs. For each type, there are some generators that can be used to fill the fields.\nGenerate mock data To generate mock data, POST a request to URL path /mock-data/segments/tasks (segments) or /mock-data/logs/tasks (logs) with a generator template:\ncurl -XPOST \u0026#39;http://localhost:12800/mock-data/segments/tasks?size=20\u0026#39; -H\u0026#39;Content-Type: application/json\u0026#39; -d \u0026#34;@segment-template.json\u0026#34; curl -XPOST \u0026#39;http://localhost:12800/mock-data/logs/tasks?size=20\u0026#39; -H\u0026#39;Content-Type: application/json\u0026#39; -d \u0026#34;@logs-template.json\u0026#34; There are two possible types of task to generate mock data, size and qps:\n size (/mock-data/segments/tasks?size=20): the task will generate total number of size segments/logs and then finish. qps (/mock-data/segments/tasks?qps=20): the task will generate qps segments/logs per second continuously, until the task is cancelled.  Refer to the segment template, the log template and the Generators for more details about how to compose a template.\nCancel a task When the task is acknowledged by the server it will return a task id that can be used to cancelled the task by sending a DELETE request to URL path /mock-data/logs/tasks with a parameter requestId (i.e. /mock-data/logs/tasks?requestId={request id returned in previous request}):\ncurl -XDELETE \u0026#39;http://localhost:12800/mock-data/segments/task?requestId=70d8a39e-b51e-49de-a6fc-43abf80482c1\u0026#39; curl -XDELETE \u0026#39;http://localhost:12800/mock-data/logs/task?requestId=70d8a39e-b51e-49de-a6fc-43abf80482c1\u0026#39; Cancel all tasks When needed, you can also send a DELETE request to path /mock-data/segments/tasks to cancel all segment tasks.\ncurl -XDELETE \u0026#39;http://localhost:12800/mock-data/segments/tasks curl -XDELETE \u0026#39;http://localhost:12800/mock-data/logs/tasks Generators uuid uuid generator leverages java.util.UUID to generate a string. You can use uuid generator to fill the traceId field of segments.\nchangingFrequency property can be used when you want to reuse a uuid for multiple times, for example, if you want a traceId to be reused by 5 segments, then setting changingFrequency to 5 would do the trick. By setting changingFrequency to 5, uuid generates 1 string, and uses it for 5 times, then re-generates a new uuid string and uses it for another 5 times.\n\u0026#34;traceId\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;uuid\u0026#34;, \u0026#34;changingFrequency\u0026#34;: \u0026#34;5\u0026#34; } randomString (String) length (int) length specifies the length of the random string to be generated, i.e. generatedString.length() == length is always true.\nprefix (String) prefix is always added to the random strings after they are generated, that means:\n generatedString.startsWith(prefix) is always true, and, generatedString.length() == length + prefix.length() is always true.  letters (boolean) Specifies whether the random string contains letters (i.e. a-zA-Z).\nnumbers (boolean) Specifies whether the random string contains numbers (i.e. 0-9).\ndomainSize (int) When generating random strings, you might just want some random strings and use them over and over again randomly, by setting domainSize, the generator generates domainSize random strings, and pick them randomly every time you need a string.\nrandomBool (boolean) This generator generates a Boolean value, true or false with a default possibility of 50%, while you can change the possibility below.\npossibility (double, [0, 1]) possibility is a double value \u0026gt;= 0 and \u0026lt;= 1, it\u0026rsquo;s 0.5 by default, meaning about half of the generated values are true.\nTo always return a fixed boolean value true, you can just set the possibility to 1, to always return a fixed boolean value false, you can set the possibility to 0\n\u0026#34;error\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomBool\u0026#34;, \u0026#34;possibility\u0026#34;: \u0026#34;0.9\u0026#34; }  90 percent of the generated values are true.\n randomInt (long) min (long) The minimum value of the random integers, meaning all generated values satisfy generatedInt \u0026gt;= min.\nmax (long) The maximum value of the random integers, meaning all generated values satisfy generatedInt \u0026lt; min.\ndomainSize (int) This is similar to randomString\u0026rsquo;s domainSize.\nrandomList (list / array) size (int) The list size of the generated list, i.e. generatedList.size() == size.\nitem (object) item is a template that will be use as a prototype to generate the list items, for example when generating a list of Tag, the item should be the prototype of Tag, which can be composed by the generators again.\n\u0026#34;tags\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomList\u0026#34;, \u0026#34;size\u0026#34;: 5, \u0026#34;item\u0026#34;: { \u0026#34;key\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomString\u0026#34;, \u0026#34;length\u0026#34;: \u0026#34;10\u0026#34;, \u0026#34;prefix\u0026#34;: \u0026#34;test_tag_\u0026#34;, \u0026#34;letters\u0026#34;: true, \u0026#34;numbers\u0026#34;: true, \u0026#34;domainSize\u0026#34;: 10 }, \u0026#34;value\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomString\u0026#34;, \u0026#34;length\u0026#34;: \u0026#34;10\u0026#34;, \u0026#34;prefix\u0026#34;: \u0026#34;test_value_\u0026#34;, \u0026#34;letters\u0026#34;: true, \u0026#34;numbers\u0026#34;: true } } } fixedString (string) This generator always returns a fixed value of string.\nsequence (long) sequence generator generates a sequence of monotonically increasing integers, with a configurable fluctuation.\nmin (long) The minimum value of the sequence.\nmax (long) The maximum value of the sequence.\nstep (long) The increasing step of this sequence, i.e. the next generated value == the previous value + step.\ndomainSize (int) This is similar to randomString\u0026rsquo;s domainSize.\nfluctuation (int) By default, sequence is strictly increasing numbers, but in some cases you might want the numbers to fluctuate slightly while they are increasing. Adding property fluctuation to the generator will add a random number \u0026gt;= -fluctuation, \u0026lt;= fluctuation to the sequence elements.\nFor example, min = 10, max = 15, step = 1 generates a sequence [10, 11, 12, 13, 14, 15], but adding fluctuation = 2 might generate a sequence [10, 12, 11, 14, 13, 15].\n","excerpt":"Mock data generator for testing In 9.1.0, SkyWalking adds a module to generate mock data for …","ref":"/docs/main/latest/en/setup/backend/backend-data-generator/","title":"Mock data generator for testing"},{"body":"Mock data generator for testing In 9.1.0, SkyWalking adds a module to generate mock data for testing. You can use this module to generate mock data that will be sent to the storage.\nTo start the data generator, execute the script tools/data-generator/bin/start.sh.\nNote that SkyWalking doesn\u0026rsquo;t release a Docker image for this module, but you can still build it yourselves by running the commands:\n# build a Docker image for local use make docker.data-generator # or push to your registry export HUB=\u0026lt;your-registry\u0026gt; make push.docker.data-generator Currently the module can generate two kinds of SkyWalking data, segments and logs. For each type, there are some generators that can be used to fill the fields.\nGenerate mock data To generate mock data, POST a request to URL path /mock-data/segments/tasks (segments) or /mock-data/logs/tasks (logs) with a generator template:\ncurl -XPOST \u0026#39;http://localhost:12800/mock-data/segments/tasks?size=20\u0026#39; -H\u0026#39;Content-Type: application/json\u0026#39; -d \u0026#34;@segment-template.json\u0026#34; curl -XPOST \u0026#39;http://localhost:12800/mock-data/logs/tasks?size=20\u0026#39; -H\u0026#39;Content-Type: application/json\u0026#39; -d \u0026#34;@logs-template.json\u0026#34; There are two possible types of task to generate mock data, size and qps:\n size (/mock-data/segments/tasks?size=20): the task will generate total number of size segments/logs and then finish. qps (/mock-data/segments/tasks?qps=20): the task will generate qps segments/logs per second continuously, until the task is cancelled.  Refer to the segment template, the log template and the Generators for more details about how to compose a template.\nCancel a task When the task is acknowledged by the server it will return a task id that can be used to cancelled the task by sending a DELETE request to URL path /mock-data/logs/tasks with a parameter requestId (i.e. /mock-data/logs/tasks?requestId={request id returned in previous request}):\ncurl -XDELETE \u0026#39;http://localhost:12800/mock-data/segments/task?requestId=70d8a39e-b51e-49de-a6fc-43abf80482c1\u0026#39; curl -XDELETE \u0026#39;http://localhost:12800/mock-data/logs/task?requestId=70d8a39e-b51e-49de-a6fc-43abf80482c1\u0026#39; Cancel all tasks When needed, you can also send a DELETE request to path /mock-data/segments/tasks to cancel all segment tasks.\ncurl -XDELETE \u0026#39;http://localhost:12800/mock-data/segments/tasks curl -XDELETE \u0026#39;http://localhost:12800/mock-data/logs/tasks Generators uuid uuid generator leverages java.util.UUID to generate a string. You can use uuid generator to fill the traceId field of segments.\nchangingFrequency property can be used when you want to reuse a uuid for multiple times, for example, if you want a traceId to be reused by 5 segments, then setting changingFrequency to 5 would do the trick. By setting changingFrequency to 5, uuid generates 1 string, and uses it for 5 times, then re-generates a new uuid string and uses it for another 5 times.\n\u0026#34;traceId\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;uuid\u0026#34;, \u0026#34;changingFrequency\u0026#34;: \u0026#34;5\u0026#34; } randomString (String) length (int) length specifies the length of the random string to be generated, i.e. generatedString.length() == length is always true.\nprefix (String) prefix is always added to the random strings after they are generated, that means:\n generatedString.startsWith(prefix) is always true, and, generatedString.length() == length + prefix.length() is always true.  letters (boolean) Specifies whether the random string contains letters (i.e. a-zA-Z).\nnumbers (boolean) Specifies whether the random string contains numbers (i.e. 0-9).\ndomainSize (int) When generating random strings, you might just want some random strings and use them over and over again randomly, by setting domainSize, the generator generates domainSize random strings, and pick them randomly every time you need a string.\nrandomBool (boolean) This generator generates a Boolean value, true or false with a default possibility of 50%, while you can change the possibility below.\npossibility (double, [0, 1]) possibility is a double value \u0026gt;= 0 and \u0026lt;= 1, it\u0026rsquo;s 0.5 by default, meaning about half of the generated values are true.\nTo always return a fixed boolean value true, you can just set the possibility to 1, to always return a fixed boolean value false, you can set the possibility to 0\n\u0026#34;error\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomBool\u0026#34;, \u0026#34;possibility\u0026#34;: \u0026#34;0.9\u0026#34; }  90 percent of the generated values are true.\n randomInt (long) min (long) The minimum value of the random integers, meaning all generated values satisfy generatedInt \u0026gt;= min.\nmax (long) The maximum value of the random integers, meaning all generated values satisfy generatedInt \u0026lt; min.\ndomainSize (int) This is similar to randomString\u0026rsquo;s domainSize.\nrandomList (list / array) size (int) The list size of the generated list, i.e. generatedList.size() == size.\nitem (object) item is a template that will be use as a prototype to generate the list items, for example when generating a list of Tag, the item should be the prototype of Tag, which can be composed by the generators again.\n\u0026#34;tags\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomList\u0026#34;, \u0026#34;size\u0026#34;: 5, \u0026#34;item\u0026#34;: { \u0026#34;key\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomString\u0026#34;, \u0026#34;length\u0026#34;: \u0026#34;10\u0026#34;, \u0026#34;prefix\u0026#34;: \u0026#34;test_tag_\u0026#34;, \u0026#34;letters\u0026#34;: true, \u0026#34;numbers\u0026#34;: true, \u0026#34;domainSize\u0026#34;: 10 }, \u0026#34;value\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomString\u0026#34;, \u0026#34;length\u0026#34;: \u0026#34;10\u0026#34;, \u0026#34;prefix\u0026#34;: \u0026#34;test_value_\u0026#34;, \u0026#34;letters\u0026#34;: true, \u0026#34;numbers\u0026#34;: true } } } fixedString (string) This generator always returns a fixed value of string.\nsequence (long) sequence generator generates a sequence of monotonically increasing integers, with a configurable fluctuation.\nmin (long) The minimum value of the sequence.\nmax (long) The maximum value of the sequence.\nstep (long) The increasing step of this sequence, i.e. the next generated value == the previous value + step.\ndomainSize (int) This is similar to randomString\u0026rsquo;s domainSize.\nfluctuation (int) By default, sequence is strictly increasing numbers, but in some cases you might want the numbers to fluctuate slightly while they are increasing. Adding property fluctuation to the generator will add a random number \u0026gt;= -fluctuation, \u0026lt;= fluctuation to the sequence elements.\nFor example, min = 10, max = 15, step = 1 generates a sequence [10, 11, 12, 13, 14, 15], but adding fluctuation = 2 might generate a sequence [10, 12, 11, 14, 13, 15].\n","excerpt":"Mock data generator for testing In 9.1.0, SkyWalking adds a module to generate mock data for …","ref":"/docs/main/next/en/setup/backend/backend-data-generator/","title":"Mock data generator for testing"},{"body":"Mock data generator for testing In 9.1.0, SkyWalking adds a module to generate mock data for testing. You can use this module to generate mock data that will be sent to the storage.\nTo start the data generator, execute the script tools/data-generator/bin/start.sh.\nNote that SkyWalking doesn\u0026rsquo;t release a Docker image for this module, but you can still build it yourselves by running the commands:\n# build a Docker image for local use make docker.data-generator # or push to your registry export HUB=\u0026lt;your-registry\u0026gt; make push.docker.data-generator Currently the module can generate two kinds of SkyWalking data, segments and logs. For each type, there are some generators that can be used to fill the fields.\nGenerate mock data To generate mock data, POST a request to URL path /mock-data/segments/tasks (segments) or /mock-data/logs/tasks (logs) with a generator template:\ncurl -XPOST \u0026#39;http://localhost:12800/mock-data/segments/tasks?size=20\u0026#39; -H\u0026#39;Content-Type: application/json\u0026#39; -d \u0026#34;@segment-template.json\u0026#34; curl -XPOST \u0026#39;http://localhost:12800/mock-data/logs/tasks?size=20\u0026#39; -H\u0026#39;Content-Type: application/json\u0026#39; -d \u0026#34;@logs-template.json\u0026#34; There are two possible types of task to generate mock data, size and qps:\n size (/mock-data/segments/tasks?size=20): the task will generate total number of size segments/logs and then finish. qps (/mock-data/segments/tasks?qps=20): the task will generate qps segments/logs per second continuously, until the task is cancelled.  Refer to the segment template, the log template and the Generators for more details about how to compose a template.\nCancel a task When the task is acknowledged by the server it will return a task id that can be used to cancelled the task by sending a DELETE request to URL path /mock-data/logs/tasks with a parameter requestId (i.e. /mock-data/logs/tasks?requestId={request id returned in previous request}):\ncurl -XDELETE \u0026#39;http://localhost:12800/mock-data/segments/task?requestId=70d8a39e-b51e-49de-a6fc-43abf80482c1\u0026#39; curl -XDELETE \u0026#39;http://localhost:12800/mock-data/logs/task?requestId=70d8a39e-b51e-49de-a6fc-43abf80482c1\u0026#39; Cancel all tasks When needed, you can also send a DELETE request to path /mock-data/segments/tasks to cancel all segment tasks.\ncurl -XDELETE \u0026#39;http://localhost:12800/mock-data/segments/tasks curl -XDELETE \u0026#39;http://localhost:12800/mock-data/logs/tasks Generators uuid uuid generator leverages java.util.UUID to generate a string. You can use uuid generator to fill the traceId field of segments.\nchangingFrequency property can be used when you want to reuse a uuid for multiple times, for example, if you want a traceId to be reused by 5 segments, then setting changingFrequency to 5 would do the trick. By setting changingFrequency to 5, uuid generates 1 string, and uses it for 5 times, then re-generates a new uuid string and uses it for another 5 times.\n\u0026#34;traceId\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;uuid\u0026#34;, \u0026#34;changingFrequency\u0026#34;: \u0026#34;5\u0026#34; } randomString (String) length (int) length specifies the length of the random string to be generated, i.e. generatedString.length() == length is always true.\nprefix (String) prefix is always added to the random strings after they are generated, that means:\n generatedString.startsWith(prefix) is always true, and, generatedString.length() == length + prefix.length() is always true.  letters (boolean) Specifies whether the random string contains letters (i.e. a-zA-Z).\nnumbers (boolean) Specifies whether the random string contains numbers (i.e. 0-9).\ndomainSize (int) When generating random strings, you might just want some random strings and use them over and over again randomly, by setting domainSize, the generator generates domainSize random strings, and pick them randomly every time you need a string.\nrandomBool (boolean) This generator generates a Boolean value, true or false with a default possibility of 50%, while you can change the possibility below.\npossibility (double, [0, 1]) possibility is a double value \u0026gt;= 0 and \u0026lt;= 1, it\u0026rsquo;s 0.5 by default, meaning about half of the generated values are true.\nTo always return a fixed boolean value true, you can just set the possibility to 1, to always return a fixed boolean value false, you can set the possibility to 0\n\u0026#34;error\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomBool\u0026#34;, \u0026#34;possibility\u0026#34;: \u0026#34;0.9\u0026#34; }  90 percent of the generated values are true.\n randomInt (long) min (long) The minimum value of the random integers, meaning all generated values satisfy generatedInt \u0026gt;= min.\nmax (long) The maximum value of the random integers, meaning all generated values satisfy generatedInt \u0026lt; min.\ndomainSize (int) This is similar to randomString\u0026rsquo;s domainSize.\nrandomList (list / array) size (int) The list size of the generated list, i.e. generatedList.size() == size.\nitem (object) item is a template that will be use as a prototype to generate the list items, for example when generating a list of Tag, the item should be the prototype of Tag, which can be composed by the generators again.\n\u0026#34;tags\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomList\u0026#34;, \u0026#34;size\u0026#34;: 5, \u0026#34;item\u0026#34;: { \u0026#34;key\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomString\u0026#34;, \u0026#34;length\u0026#34;: \u0026#34;10\u0026#34;, \u0026#34;prefix\u0026#34;: \u0026#34;test_tag_\u0026#34;, \u0026#34;letters\u0026#34;: true, \u0026#34;numbers\u0026#34;: true, \u0026#34;domainSize\u0026#34;: 10 }, \u0026#34;value\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomString\u0026#34;, \u0026#34;length\u0026#34;: \u0026#34;10\u0026#34;, \u0026#34;prefix\u0026#34;: \u0026#34;test_value_\u0026#34;, \u0026#34;letters\u0026#34;: true, \u0026#34;numbers\u0026#34;: true } } } fixedString (string) This generator always returns a fixed value of string.\nsequence (long) sequence generator generates a sequence of monotonically increasing integers, with a configurable fluctuation.\nmin (long) The minimum value of the sequence.\nmax (long) The maximum value of the sequence.\nstep (long) The increasing step of this sequence, i.e. the next generated value == the previous value + step.\ndomainSize (int) This is similar to randomString\u0026rsquo;s domainSize.\nfluctuation (int) By default, sequence is strictly increasing numbers, but in some cases you might want the numbers to fluctuate slightly while they are increasing. Adding property fluctuation to the generator will add a random number \u0026gt;= -fluctuation, \u0026lt;= fluctuation to the sequence elements.\nFor example, min = 10, max = 15, step = 1 generates a sequence [10, 11, 12, 13, 14, 15], but adding fluctuation = 2 might generate a sequence [10, 12, 11, 14, 13, 15].\n","excerpt":"Mock data generator for testing In 9.1.0, SkyWalking adds a module to generate mock data for …","ref":"/docs/main/v9.1.0/en/setup/backend/backend-data-generator/","title":"Mock data generator for testing"},{"body":"Mock data generator for testing In 9.1.0, SkyWalking adds a module to generate mock data for testing. You can use this module to generate mock data that will be sent to the storage.\nTo start the data generator, execute the script tools/data-generator/bin/start.sh.\nNote that SkyWalking doesn\u0026rsquo;t release a Docker image for this module, but you can still build it yourselves by running the commands:\n# build a Docker image for local use make docker.data-generator # or push to your registry export HUB=\u0026lt;your-registry\u0026gt; make push.docker.data-generator Currently the module can generate two kinds of SkyWalking data, segments and logs. For each type, there are some generators that can be used to fill the fields.\nGenerate mock data To generate mock data, POST a request to URL path /mock-data/segments/tasks (segments) or /mock-data/logs/tasks (logs) with a generator template:\ncurl -XPOST \u0026#39;http://localhost:12800/mock-data/segments/tasks?size=20\u0026#39; -H\u0026#39;Content-Type: application/json\u0026#39; -d \u0026#34;@segment-template.json\u0026#34; curl -XPOST \u0026#39;http://localhost:12800/mock-data/logs/tasks?size=20\u0026#39; -H\u0026#39;Content-Type: application/json\u0026#39; -d \u0026#34;@logs-template.json\u0026#34; There are two possible types of task to generate mock data, size and qps:\n size (/mock-data/segments/tasks?size=20): the task will generate total number of size segments/logs and then finish. qps (/mock-data/segments/tasks?qps=20): the task will generate qps segments/logs per second continuously, until the task is cancelled.  Refer to the segment template, the log template and the Generators for more details about how to compose a template.\nCancel a task When the task is acknowledged by the server it will return a task id that can be used to cancelled the task by sending a DELETE request to URL path /mock-data/logs/tasks with a parameter requestId (i.e. /mock-data/logs/tasks?requestId={request id returned in previous request}):\ncurl -XDELETE \u0026#39;http://localhost:12800/mock-data/segments/task?requestId=70d8a39e-b51e-49de-a6fc-43abf80482c1\u0026#39; curl -XDELETE \u0026#39;http://localhost:12800/mock-data/logs/task?requestId=70d8a39e-b51e-49de-a6fc-43abf80482c1\u0026#39; Cancel all tasks When needed, you can also send a DELETE request to path /mock-data/segments/tasks to cancel all segment tasks.\ncurl -XDELETE \u0026#39;http://localhost:12800/mock-data/segments/tasks curl -XDELETE \u0026#39;http://localhost:12800/mock-data/logs/tasks Generators uuid uuid generator leverages java.util.UUID to generate a string. You can use uuid generator to fill the traceId field of segments.\nchangingFrequency property can be used when you want to reuse a uuid for multiple times, for example, if you want a traceId to be reused by 5 segments, then setting changingFrequency to 5 would do the trick. By setting changingFrequency to 5, uuid generates 1 string, and uses it for 5 times, then re-generates a new uuid string and uses it for another 5 times.\n\u0026#34;traceId\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;uuid\u0026#34;, \u0026#34;changingFrequency\u0026#34;: \u0026#34;5\u0026#34; } randomString (String) length (int) length specifies the length of the random string to be generated, i.e. generatedString.length() == length is always true.\nprefix (String) prefix is always added to the random strings after they are generated, that means:\n generatedString.startsWith(prefix) is always true, and, generatedString.length() == length + prefix.length() is always true.  letters (boolean) Specifies whether the random string contains letters (i.e. a-zA-Z).\nnumbers (boolean) Specifies whether the random string contains numbers (i.e. 0-9).\ndomainSize (int) When generating random strings, you might just want some random strings and use them over and over again randomly, by setting domainSize, the generator generates domainSize random strings, and pick them randomly every time you need a string.\nrandomBool (boolean) This generator generates a Boolean value, true or false with a default possibility of 50%, while you can change the possibility below.\npossibility (double, [0, 1]) possibility is a double value \u0026gt;= 0 and \u0026lt;= 1, it\u0026rsquo;s 0.5 by default, meaning about half of the generated values are true.\nTo always return a fixed boolean value true, you can just set the possibility to 1, to always return a fixed boolean value false, you can set the possibility to 0\n\u0026#34;error\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomBool\u0026#34;, \u0026#34;possibility\u0026#34;: \u0026#34;0.9\u0026#34; }  90 percent of the generated values are true.\n randomInt (long) min (long) The minimum value of the random integers, meaning all generated values satisfy generatedInt \u0026gt;= min.\nmax (long) The maximum value of the random integers, meaning all generated values satisfy generatedInt \u0026lt; min.\ndomainSize (int) This is similar to randomString\u0026rsquo;s domainSize.\nrandomList (list / array) size (int) The list size of the generated list, i.e. generatedList.size() == size.\nitem (object) item is a template that will be use as a prototype to generate the list items, for example when generating a list of Tag, the item should be the prototype of Tag, which can be composed by the generators again.\n\u0026#34;tags\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomList\u0026#34;, \u0026#34;size\u0026#34;: 5, \u0026#34;item\u0026#34;: { \u0026#34;key\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomString\u0026#34;, \u0026#34;length\u0026#34;: \u0026#34;10\u0026#34;, \u0026#34;prefix\u0026#34;: \u0026#34;test_tag_\u0026#34;, \u0026#34;letters\u0026#34;: true, \u0026#34;numbers\u0026#34;: true, \u0026#34;domainSize\u0026#34;: 10 }, \u0026#34;value\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomString\u0026#34;, \u0026#34;length\u0026#34;: \u0026#34;10\u0026#34;, \u0026#34;prefix\u0026#34;: \u0026#34;test_value_\u0026#34;, \u0026#34;letters\u0026#34;: true, \u0026#34;numbers\u0026#34;: true } } } fixedString (string) This generator always returns a fixed value of string.\nsequence (long) sequence generator generates a sequence of monotonically increasing integers, with a configurable fluctuation.\nmin (long) The minimum value of the sequence.\nmax (long) The maximum value of the sequence.\nstep (long) The increasing step of this sequence, i.e. the next generated value == the previous value + step.\ndomainSize (int) This is similar to randomString\u0026rsquo;s domainSize.\nfluctuation (int) By default, sequence is strictly increasing numbers, but in some cases you might want the numbers to fluctuate slightly while they are increasing. Adding property fluctuation to the generator will add a random number \u0026gt;= -fluctuation, \u0026lt;= fluctuation to the sequence elements.\nFor example, min = 10, max = 15, step = 1 generates a sequence [10, 11, 12, 13, 14, 15], but adding fluctuation = 2 might generate a sequence [10, 12, 11, 14, 13, 15].\n","excerpt":"Mock data generator for testing In 9.1.0, SkyWalking adds a module to generate mock data for …","ref":"/docs/main/v9.2.0/en/setup/backend/backend-data-generator/","title":"Mock data generator for testing"},{"body":"Mock data generator for testing In 9.1.0, SkyWalking adds a module to generate mock data for testing. You can use this module to generate mock data that will be sent to the storage.\nTo start the data generator, execute the script tools/data-generator/bin/start.sh.\nNote that SkyWalking doesn\u0026rsquo;t release a Docker image for this module, but you can still build it yourselves by running the commands:\n# build a Docker image for local use make docker.data-generator # or push to your registry export HUB=\u0026lt;your-registry\u0026gt; make push.docker.data-generator Currently the module can generate two kinds of SkyWalking data, segments and logs. For each type, there are some generators that can be used to fill the fields.\nGenerate mock data To generate mock data, POST a request to URL path /mock-data/segments/tasks (segments) or /mock-data/logs/tasks (logs) with a generator template:\ncurl -XPOST \u0026#39;http://localhost:12800/mock-data/segments/tasks?size=20\u0026#39; -H\u0026#39;Content-Type: application/json\u0026#39; -d \u0026#34;@segment-template.json\u0026#34; curl -XPOST \u0026#39;http://localhost:12800/mock-data/logs/tasks?size=20\u0026#39; -H\u0026#39;Content-Type: application/json\u0026#39; -d \u0026#34;@logs-template.json\u0026#34; There are two possible types of task to generate mock data, size and qps:\n size (/mock-data/segments/tasks?size=20): the task will generate total number of size segments/logs and then finish. qps (/mock-data/segments/tasks?qps=20): the task will generate qps segments/logs per second continuously, until the task is cancelled.  Refer to the segment template, the log template and the Generators for more details about how to compose a template.\nCancel a task When the task is acknowledged by the server it will return a task id that can be used to cancelled the task by sending a DELETE request to URL path /mock-data/logs/tasks with a parameter requestId (i.e. /mock-data/logs/tasks?requestId={request id returned in previous request}):\ncurl -XDELETE \u0026#39;http://localhost:12800/mock-data/segments/task?requestId=70d8a39e-b51e-49de-a6fc-43abf80482c1\u0026#39; curl -XDELETE \u0026#39;http://localhost:12800/mock-data/logs/task?requestId=70d8a39e-b51e-49de-a6fc-43abf80482c1\u0026#39; Cancel all tasks When needed, you can also send a DELETE request to path /mock-data/segments/tasks to cancel all segment tasks.\ncurl -XDELETE \u0026#39;http://localhost:12800/mock-data/segments/tasks curl -XDELETE \u0026#39;http://localhost:12800/mock-data/logs/tasks Generators uuid uuid generator leverages java.util.UUID to generate a string. You can use uuid generator to fill the traceId field of segments.\nchangingFrequency property can be used when you want to reuse a uuid for multiple times, for example, if you want a traceId to be reused by 5 segments, then setting changingFrequency to 5 would do the trick. By setting changingFrequency to 5, uuid generates 1 string, and uses it for 5 times, then re-generates a new uuid string and uses it for another 5 times.\n\u0026#34;traceId\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;uuid\u0026#34;, \u0026#34;changingFrequency\u0026#34;: \u0026#34;5\u0026#34; } randomString (String) length (int) length specifies the length of the random string to be generated, i.e. generatedString.length() == length is always true.\nprefix (String) prefix is always added to the random strings after they are generated, that means:\n generatedString.startsWith(prefix) is always true, and, generatedString.length() == length + prefix.length() is always true.  letters (boolean) Specifies whether the random string contains letters (i.e. a-zA-Z).\nnumbers (boolean) Specifies whether the random string contains numbers (i.e. 0-9).\ndomainSize (int) When generating random strings, you might just want some random strings and use them over and over again randomly, by setting domainSize, the generator generates domainSize random strings, and pick them randomly every time you need a string.\nrandomBool (boolean) This generator generates a Boolean value, true or false with a default possibility of 50%, while you can change the possibility below.\npossibility (double, [0, 1]) possibility is a double value \u0026gt;= 0 and \u0026lt;= 1, it\u0026rsquo;s 0.5 by default, meaning about half of the generated values are true.\nTo always return a fixed boolean value true, you can just set the possibility to 1, to always return a fixed boolean value false, you can set the possibility to 0\n\u0026#34;error\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomBool\u0026#34;, \u0026#34;possibility\u0026#34;: \u0026#34;0.9\u0026#34; }  90 percent of the generated values are true.\n randomInt (long) min (long) The minimum value of the random integers, meaning all generated values satisfy generatedInt \u0026gt;= min.\nmax (long) The maximum value of the random integers, meaning all generated values satisfy generatedInt \u0026lt; min.\ndomainSize (int) This is similar to randomString\u0026rsquo;s domainSize.\nrandomList (list / array) size (int) The list size of the generated list, i.e. generatedList.size() == size.\nitem (object) item is a template that will be use as a prototype to generate the list items, for example when generating a list of Tag, the item should be the prototype of Tag, which can be composed by the generators again.\n\u0026#34;tags\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomList\u0026#34;, \u0026#34;size\u0026#34;: 5, \u0026#34;item\u0026#34;: { \u0026#34;key\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomString\u0026#34;, \u0026#34;length\u0026#34;: \u0026#34;10\u0026#34;, \u0026#34;prefix\u0026#34;: \u0026#34;test_tag_\u0026#34;, \u0026#34;letters\u0026#34;: true, \u0026#34;numbers\u0026#34;: true, \u0026#34;domainSize\u0026#34;: 10 }, \u0026#34;value\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomString\u0026#34;, \u0026#34;length\u0026#34;: \u0026#34;10\u0026#34;, \u0026#34;prefix\u0026#34;: \u0026#34;test_value_\u0026#34;, \u0026#34;letters\u0026#34;: true, \u0026#34;numbers\u0026#34;: true } } } fixedString (string) This generator always returns a fixed value of string.\nsequence (long) sequence generator generates a sequence of monotonically increasing integers, with a configurable fluctuation.\nmin (long) The minimum value of the sequence.\nmax (long) The maximum value of the sequence.\nstep (long) The increasing step of this sequence, i.e. the next generated value == the previous value + step.\ndomainSize (int) This is similar to randomString\u0026rsquo;s domainSize.\nfluctuation (int) By default, sequence is strictly increasing numbers, but in some cases you might want the numbers to fluctuate slightly while they are increasing. Adding property fluctuation to the generator will add a random number \u0026gt;= -fluctuation, \u0026lt;= fluctuation to the sequence elements.\nFor example, min = 10, max = 15, step = 1 generates a sequence [10, 11, 12, 13, 14, 15], but adding fluctuation = 2 might generate a sequence [10, 12, 11, 14, 13, 15].\n","excerpt":"Mock data generator for testing In 9.1.0, SkyWalking adds a module to generate mock data for …","ref":"/docs/main/v9.3.0/en/setup/backend/backend-data-generator/","title":"Mock data generator for testing"},{"body":"Mock data generator for testing In 9.1.0, SkyWalking adds a module to generate mock data for testing. You can use this module to generate mock data that will be sent to the storage.\nTo start the data generator, execute the script tools/data-generator/bin/start.sh.\nNote that SkyWalking doesn\u0026rsquo;t release a Docker image for this module, but you can still build it yourselves by running the commands:\n# build a Docker image for local use make docker.data-generator # or push to your registry export HUB=\u0026lt;your-registry\u0026gt; make push.docker.data-generator Currently the module can generate two kinds of SkyWalking data, segments and logs. For each type, there are some generators that can be used to fill the fields.\nGenerate mock data To generate mock data, POST a request to URL path /mock-data/segments/tasks (segments) or /mock-data/logs/tasks (logs) with a generator template:\ncurl -XPOST \u0026#39;http://localhost:12800/mock-data/segments/tasks?size=20\u0026#39; -H\u0026#39;Content-Type: application/json\u0026#39; -d \u0026#34;@segment-template.json\u0026#34; curl -XPOST \u0026#39;http://localhost:12800/mock-data/logs/tasks?size=20\u0026#39; -H\u0026#39;Content-Type: application/json\u0026#39; -d \u0026#34;@logs-template.json\u0026#34; There are two possible types of task to generate mock data, size and qps:\n size (/mock-data/segments/tasks?size=20): the task will generate total number of size segments/logs and then finish. qps (/mock-data/segments/tasks?qps=20): the task will generate qps segments/logs per second continuously, until the task is cancelled.  Refer to the segment template, the log template and the Generators for more details about how to compose a template.\nCancel a task When the task is acknowledged by the server it will return a task id that can be used to cancelled the task by sending a DELETE request to URL path /mock-data/logs/tasks with a parameter requestId (i.e. /mock-data/logs/tasks?requestId={request id returned in previous request}):\ncurl -XDELETE \u0026#39;http://localhost:12800/mock-data/segments/task?requestId=70d8a39e-b51e-49de-a6fc-43abf80482c1\u0026#39; curl -XDELETE \u0026#39;http://localhost:12800/mock-data/logs/task?requestId=70d8a39e-b51e-49de-a6fc-43abf80482c1\u0026#39; Cancel all tasks When needed, you can also send a DELETE request to path /mock-data/segments/tasks to cancel all segment tasks.\ncurl -XDELETE \u0026#39;http://localhost:12800/mock-data/segments/tasks curl -XDELETE \u0026#39;http://localhost:12800/mock-data/logs/tasks Generators uuid uuid generator leverages java.util.UUID to generate a string. You can use uuid generator to fill the traceId field of segments.\nchangingFrequency property can be used when you want to reuse a uuid for multiple times, for example, if you want a traceId to be reused by 5 segments, then setting changingFrequency to 5 would do the trick. By setting changingFrequency to 5, uuid generates 1 string, and uses it for 5 times, then re-generates a new uuid string and uses it for another 5 times.\n\u0026#34;traceId\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;uuid\u0026#34;, \u0026#34;changingFrequency\u0026#34;: \u0026#34;5\u0026#34; } randomString (String) length (int) length specifies the length of the random string to be generated, i.e. generatedString.length() == length is always true.\nprefix (String) prefix is always added to the random strings after they are generated, that means:\n generatedString.startsWith(prefix) is always true, and, generatedString.length() == length + prefix.length() is always true.  letters (boolean) Specifies whether the random string contains letters (i.e. a-zA-Z).\nnumbers (boolean) Specifies whether the random string contains numbers (i.e. 0-9).\ndomainSize (int) When generating random strings, you might just want some random strings and use them over and over again randomly, by setting domainSize, the generator generates domainSize random strings, and pick them randomly every time you need a string.\nrandomBool (boolean) This generator generates a Boolean value, true or false with a default possibility of 50%, while you can change the possibility below.\npossibility (double, [0, 1]) possibility is a double value \u0026gt;= 0 and \u0026lt;= 1, it\u0026rsquo;s 0.5 by default, meaning about half of the generated values are true.\nTo always return a fixed boolean value true, you can just set the possibility to 1, to always return a fixed boolean value false, you can set the possibility to 0\n\u0026#34;error\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomBool\u0026#34;, \u0026#34;possibility\u0026#34;: \u0026#34;0.9\u0026#34; }  90 percent of the generated values are true.\n randomInt (long) min (long) The minimum value of the random integers, meaning all generated values satisfy generatedInt \u0026gt;= min.\nmax (long) The maximum value of the random integers, meaning all generated values satisfy generatedInt \u0026lt; min.\ndomainSize (int) This is similar to randomString\u0026rsquo;s domainSize.\nrandomList (list / array) size (int) The list size of the generated list, i.e. generatedList.size() == size.\nitem (object) item is a template that will be use as a prototype to generate the list items, for example when generating a list of Tag, the item should be the prototype of Tag, which can be composed by the generators again.\n\u0026#34;tags\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomList\u0026#34;, \u0026#34;size\u0026#34;: 5, \u0026#34;item\u0026#34;: { \u0026#34;key\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomString\u0026#34;, \u0026#34;length\u0026#34;: \u0026#34;10\u0026#34;, \u0026#34;prefix\u0026#34;: \u0026#34;test_tag_\u0026#34;, \u0026#34;letters\u0026#34;: true, \u0026#34;numbers\u0026#34;: true, \u0026#34;domainSize\u0026#34;: 10 }, \u0026#34;value\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomString\u0026#34;, \u0026#34;length\u0026#34;: \u0026#34;10\u0026#34;, \u0026#34;prefix\u0026#34;: \u0026#34;test_value_\u0026#34;, \u0026#34;letters\u0026#34;: true, \u0026#34;numbers\u0026#34;: true } } } fixedString (string) This generator always returns a fixed value of string.\nsequence (long) sequence generator generates a sequence of monotonically increasing integers, with a configurable fluctuation.\nmin (long) The minimum value of the sequence.\nmax (long) The maximum value of the sequence.\nstep (long) The increasing step of this sequence, i.e. the next generated value == the previous value + step.\ndomainSize (int) This is similar to randomString\u0026rsquo;s domainSize.\nfluctuation (int) By default, sequence is strictly increasing numbers, but in some cases you might want the numbers to fluctuate slightly while they are increasing. Adding property fluctuation to the generator will add a random number \u0026gt;= -fluctuation, \u0026lt;= fluctuation to the sequence elements.\nFor example, min = 10, max = 15, step = 1 generates a sequence [10, 11, 12, 13, 14, 15], but adding fluctuation = 2 might generate a sequence [10, 12, 11, 14, 13, 15].\n","excerpt":"Mock data generator for testing In 9.1.0, SkyWalking adds a module to generate mock data for …","ref":"/docs/main/v9.4.0/en/setup/backend/backend-data-generator/","title":"Mock data generator for testing"},{"body":"Mock data generator for testing In 9.1.0, SkyWalking adds a module to generate mock data for testing. You can use this module to generate mock data that will be sent to the storage.\nTo start the data generator, execute the script tools/data-generator/bin/start.sh.\nNote that SkyWalking doesn\u0026rsquo;t release a Docker image for this module, but you can still build it yourselves by running the commands:\n# build a Docker image for local use make docker.data-generator # or push to your registry export HUB=\u0026lt;your-registry\u0026gt; make push.docker.data-generator Currently the module can generate two kinds of SkyWalking data, segments and logs. For each type, there are some generators that can be used to fill the fields.\nGenerate mock data To generate mock data, POST a request to URL path /mock-data/segments/tasks (segments) or /mock-data/logs/tasks (logs) with a generator template:\ncurl -XPOST \u0026#39;http://localhost:12800/mock-data/segments/tasks?size=20\u0026#39; -H\u0026#39;Content-Type: application/json\u0026#39; -d \u0026#34;@segment-template.json\u0026#34; curl -XPOST \u0026#39;http://localhost:12800/mock-data/logs/tasks?size=20\u0026#39; -H\u0026#39;Content-Type: application/json\u0026#39; -d \u0026#34;@logs-template.json\u0026#34; There are two possible types of task to generate mock data, size and qps:\n size (/mock-data/segments/tasks?size=20): the task will generate total number of size segments/logs and then finish. qps (/mock-data/segments/tasks?qps=20): the task will generate qps segments/logs per second continuously, until the task is cancelled.  Refer to the segment template, the log template and the Generators for more details about how to compose a template.\nCancel a task When the task is acknowledged by the server it will return a task id that can be used to cancelled the task by sending a DELETE request to URL path /mock-data/logs/tasks with a parameter requestId (i.e. /mock-data/logs/tasks?requestId={request id returned in previous request}):\ncurl -XDELETE \u0026#39;http://localhost:12800/mock-data/segments/task?requestId=70d8a39e-b51e-49de-a6fc-43abf80482c1\u0026#39; curl -XDELETE \u0026#39;http://localhost:12800/mock-data/logs/task?requestId=70d8a39e-b51e-49de-a6fc-43abf80482c1\u0026#39; Cancel all tasks When needed, you can also send a DELETE request to path /mock-data/segments/tasks to cancel all segment tasks.\ncurl -XDELETE \u0026#39;http://localhost:12800/mock-data/segments/tasks curl -XDELETE \u0026#39;http://localhost:12800/mock-data/logs/tasks Generators uuid uuid generator leverages java.util.UUID to generate a string. You can use uuid generator to fill the traceId field of segments.\nchangingFrequency property can be used when you want to reuse a uuid for multiple times, for example, if you want a traceId to be reused by 5 segments, then setting changingFrequency to 5 would do the trick. By setting changingFrequency to 5, uuid generates 1 string, and uses it for 5 times, then re-generates a new uuid string and uses it for another 5 times.\n\u0026#34;traceId\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;uuid\u0026#34;, \u0026#34;changingFrequency\u0026#34;: \u0026#34;5\u0026#34; } randomString (String) length (int) length specifies the length of the random string to be generated, i.e. generatedString.length() == length is always true.\nprefix (String) prefix is always added to the random strings after they are generated, that means:\n generatedString.startsWith(prefix) is always true, and, generatedString.length() == length + prefix.length() is always true.  letters (boolean) Specifies whether the random string contains letters (i.e. a-zA-Z).\nnumbers (boolean) Specifies whether the random string contains numbers (i.e. 0-9).\ndomainSize (int) When generating random strings, you might just want some random strings and use them over and over again randomly, by setting domainSize, the generator generates domainSize random strings, and pick them randomly every time you need a string.\nrandomBool (boolean) This generator generates a Boolean value, true or false with a default possibility of 50%, while you can change the possibility below.\npossibility (double, [0, 1]) possibility is a double value \u0026gt;= 0 and \u0026lt;= 1, it\u0026rsquo;s 0.5 by default, meaning about half of the generated values are true.\nTo always return a fixed boolean value true, you can just set the possibility to 1, to always return a fixed boolean value false, you can set the possibility to 0\n\u0026#34;error\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomBool\u0026#34;, \u0026#34;possibility\u0026#34;: \u0026#34;0.9\u0026#34; }  90 percent of the generated values are true.\n randomInt (long) min (long) The minimum value of the random integers, meaning all generated values satisfy generatedInt \u0026gt;= min.\nmax (long) The maximum value of the random integers, meaning all generated values satisfy generatedInt \u0026lt; min.\ndomainSize (int) This is similar to randomString\u0026rsquo;s domainSize.\nrandomList (list / array) size (int) The list size of the generated list, i.e. generatedList.size() == size.\nitem (object) item is a template that will be use as a prototype to generate the list items, for example when generating a list of Tag, the item should be the prototype of Tag, which can be composed by the generators again.\n\u0026#34;tags\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomList\u0026#34;, \u0026#34;size\u0026#34;: 5, \u0026#34;item\u0026#34;: { \u0026#34;key\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomString\u0026#34;, \u0026#34;length\u0026#34;: \u0026#34;10\u0026#34;, \u0026#34;prefix\u0026#34;: \u0026#34;test_tag_\u0026#34;, \u0026#34;letters\u0026#34;: true, \u0026#34;numbers\u0026#34;: true, \u0026#34;domainSize\u0026#34;: 10 }, \u0026#34;value\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomString\u0026#34;, \u0026#34;length\u0026#34;: \u0026#34;10\u0026#34;, \u0026#34;prefix\u0026#34;: \u0026#34;test_value_\u0026#34;, \u0026#34;letters\u0026#34;: true, \u0026#34;numbers\u0026#34;: true } } } fixedString (string) This generator always returns a fixed value of string.\nsequence (long) sequence generator generates a sequence of monotonically increasing integers, with a configurable fluctuation.\nmin (long) The minimum value of the sequence.\nmax (long) The maximum value of the sequence.\nstep (long) The increasing step of this sequence, i.e. the next generated value == the previous value + step.\ndomainSize (int) This is similar to randomString\u0026rsquo;s domainSize.\nfluctuation (int) By default, sequence is strictly increasing numbers, but in some cases you might want the numbers to fluctuate slightly while they are increasing. Adding property fluctuation to the generator will add a random number \u0026gt;= -fluctuation, \u0026lt;= fluctuation to the sequence elements.\nFor example, min = 10, max = 15, step = 1 generates a sequence [10, 11, 12, 13, 14, 15], but adding fluctuation = 2 might generate a sequence [10, 12, 11, 14, 13, 15].\n","excerpt":"Mock data generator for testing In 9.1.0, SkyWalking adds a module to generate mock data for …","ref":"/docs/main/v9.5.0/en/setup/backend/backend-data-generator/","title":"Mock data generator for testing"},{"body":"Mock data generator for testing In 9.1.0, SkyWalking adds a module to generate mock data for testing. You can use this module to generate mock data that will be sent to the storage.\nTo start the data generator, execute the script tools/data-generator/bin/start.sh.\nNote that SkyWalking doesn\u0026rsquo;t release a Docker image for this module, but you can still build it yourselves by running the commands:\n# build a Docker image for local use make docker.data-generator # or push to your registry export HUB=\u0026lt;your-registry\u0026gt; make push.docker.data-generator Currently the module can generate two kinds of SkyWalking data, segments and logs. For each type, there are some generators that can be used to fill the fields.\nGenerate mock data To generate mock data, POST a request to URL path /mock-data/segments/tasks (segments) or /mock-data/logs/tasks (logs) with a generator template:\ncurl -XPOST \u0026#39;http://localhost:12800/mock-data/segments/tasks?size=20\u0026#39; -H\u0026#39;Content-Type: application/json\u0026#39; -d \u0026#34;@segment-template.json\u0026#34; curl -XPOST \u0026#39;http://localhost:12800/mock-data/logs/tasks?size=20\u0026#39; -H\u0026#39;Content-Type: application/json\u0026#39; -d \u0026#34;@logs-template.json\u0026#34; There are two possible types of task to generate mock data, size and qps:\n size (/mock-data/segments/tasks?size=20): the task will generate total number of size segments/logs and then finish. qps (/mock-data/segments/tasks?qps=20): the task will generate qps segments/logs per second continuously, until the task is cancelled.  Refer to the segment template, the log template and the Generators for more details about how to compose a template.\nCancel a task When the task is acknowledged by the server it will return a task id that can be used to cancelled the task by sending a DELETE request to URL path /mock-data/logs/tasks with a parameter requestId (i.e. /mock-data/logs/tasks?requestId={request id returned in previous request}):\ncurl -XDELETE \u0026#39;http://localhost:12800/mock-data/segments/task?requestId=70d8a39e-b51e-49de-a6fc-43abf80482c1\u0026#39; curl -XDELETE \u0026#39;http://localhost:12800/mock-data/logs/task?requestId=70d8a39e-b51e-49de-a6fc-43abf80482c1\u0026#39; Cancel all tasks When needed, you can also send a DELETE request to path /mock-data/segments/tasks to cancel all segment tasks.\ncurl -XDELETE \u0026#39;http://localhost:12800/mock-data/segments/tasks curl -XDELETE \u0026#39;http://localhost:12800/mock-data/logs/tasks Generators uuid uuid generator leverages java.util.UUID to generate a string. You can use uuid generator to fill the traceId field of segments.\nchangingFrequency property can be used when you want to reuse a uuid for multiple times, for example, if you want a traceId to be reused by 5 segments, then setting changingFrequency to 5 would do the trick. By setting changingFrequency to 5, uuid generates 1 string, and uses it for 5 times, then re-generates a new uuid string and uses it for another 5 times.\n\u0026#34;traceId\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;uuid\u0026#34;, \u0026#34;changingFrequency\u0026#34;: \u0026#34;5\u0026#34; } randomString (String) length (int) length specifies the length of the random string to be generated, i.e. generatedString.length() == length is always true.\nprefix (String) prefix is always added to the random strings after they are generated, that means:\n generatedString.startsWith(prefix) is always true, and, generatedString.length() == length + prefix.length() is always true.  letters (boolean) Specifies whether the random string contains letters (i.e. a-zA-Z).\nnumbers (boolean) Specifies whether the random string contains numbers (i.e. 0-9).\ndomainSize (int) When generating random strings, you might just want some random strings and use them over and over again randomly, by setting domainSize, the generator generates domainSize random strings, and pick them randomly every time you need a string.\nrandomBool (boolean) This generator generates a Boolean value, true or false with a default possibility of 50%, while you can change the possibility below.\npossibility (double, [0, 1]) possibility is a double value \u0026gt;= 0 and \u0026lt;= 1, it\u0026rsquo;s 0.5 by default, meaning about half of the generated values are true.\nTo always return a fixed boolean value true, you can just set the possibility to 1, to always return a fixed boolean value false, you can set the possibility to 0\n\u0026#34;error\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomBool\u0026#34;, \u0026#34;possibility\u0026#34;: \u0026#34;0.9\u0026#34; }  90 percent of the generated values are true.\n randomInt (long) min (long) The minimum value of the random integers, meaning all generated values satisfy generatedInt \u0026gt;= min.\nmax (long) The maximum value of the random integers, meaning all generated values satisfy generatedInt \u0026lt; min.\ndomainSize (int) This is similar to randomString\u0026rsquo;s domainSize.\nrandomList (list / array) size (int) The list size of the generated list, i.e. generatedList.size() == size.\nitem (object) item is a template that will be use as a prototype to generate the list items, for example when generating a list of Tag, the item should be the prototype of Tag, which can be composed by the generators again.\n\u0026#34;tags\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomList\u0026#34;, \u0026#34;size\u0026#34;: 5, \u0026#34;item\u0026#34;: { \u0026#34;key\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomString\u0026#34;, \u0026#34;length\u0026#34;: \u0026#34;10\u0026#34;, \u0026#34;prefix\u0026#34;: \u0026#34;test_tag_\u0026#34;, \u0026#34;letters\u0026#34;: true, \u0026#34;numbers\u0026#34;: true, \u0026#34;domainSize\u0026#34;: 10 }, \u0026#34;value\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomString\u0026#34;, \u0026#34;length\u0026#34;: \u0026#34;10\u0026#34;, \u0026#34;prefix\u0026#34;: \u0026#34;test_value_\u0026#34;, \u0026#34;letters\u0026#34;: true, \u0026#34;numbers\u0026#34;: true } } } fixedString (string) This generator always returns a fixed value of string.\nsequence (long) sequence generator generates a sequence of monotonically increasing integers, with a configurable fluctuation.\nmin (long) The minimum value of the sequence.\nmax (long) The maximum value of the sequence.\nstep (long) The increasing step of this sequence, i.e. the next generated value == the previous value + step.\ndomainSize (int) This is similar to randomString\u0026rsquo;s domainSize.\nfluctuation (int) By default, sequence is strictly increasing numbers, but in some cases you might want the numbers to fluctuate slightly while they are increasing. Adding property fluctuation to the generator will add a random number \u0026gt;= -fluctuation, \u0026lt;= fluctuation to the sequence elements.\nFor example, min = 10, max = 15, step = 1 generates a sequence [10, 11, 12, 13, 14, 15], but adding fluctuation = 2 might generate a sequence [10, 12, 11, 14, 13, 15].\n","excerpt":"Mock data generator for testing In 9.1.0, SkyWalking adds a module to generate mock data for …","ref":"/docs/main/v9.6.0/en/setup/backend/backend-data-generator/","title":"Mock data generator for testing"},{"body":"Mock data generator for testing In 9.1.0, SkyWalking adds a module to generate mock data for testing. You can use this module to generate mock data that will be sent to the storage.\nTo start the data generator, execute the script tools/data-generator/bin/start.sh.\nNote that SkyWalking doesn\u0026rsquo;t release a Docker image for this module, but you can still build it yourselves by running the commands:\n# build a Docker image for local use make docker.data-generator # or push to your registry export HUB=\u0026lt;your-registry\u0026gt; make push.docker.data-generator Currently the module can generate two kinds of SkyWalking data, segments and logs. For each type, there are some generators that can be used to fill the fields.\nGenerate mock data To generate mock data, POST a request to URL path /mock-data/segments/tasks (segments) or /mock-data/logs/tasks (logs) with a generator template:\ncurl -XPOST \u0026#39;http://localhost:12800/mock-data/segments/tasks?size=20\u0026#39; -H\u0026#39;Content-Type: application/json\u0026#39; -d \u0026#34;@segment-template.json\u0026#34; curl -XPOST \u0026#39;http://localhost:12800/mock-data/logs/tasks?size=20\u0026#39; -H\u0026#39;Content-Type: application/json\u0026#39; -d \u0026#34;@logs-template.json\u0026#34; There are two possible types of task to generate mock data, size and qps:\n size (/mock-data/segments/tasks?size=20): the task will generate total number of size segments/logs and then finish. qps (/mock-data/segments/tasks?qps=20): the task will generate qps segments/logs per second continuously, until the task is cancelled.  Refer to the segment template, the log template and the Generators for more details about how to compose a template.\nCancel a task When the task is acknowledged by the server it will return a task id that can be used to cancelled the task by sending a DELETE request to URL path /mock-data/logs/tasks with a parameter requestId (i.e. /mock-data/logs/tasks?requestId={request id returned in previous request}):\ncurl -XDELETE \u0026#39;http://localhost:12800/mock-data/segments/task?requestId=70d8a39e-b51e-49de-a6fc-43abf80482c1\u0026#39; curl -XDELETE \u0026#39;http://localhost:12800/mock-data/logs/task?requestId=70d8a39e-b51e-49de-a6fc-43abf80482c1\u0026#39; Cancel all tasks When needed, you can also send a DELETE request to path /mock-data/segments/tasks to cancel all segment tasks.\ncurl -XDELETE \u0026#39;http://localhost:12800/mock-data/segments/tasks curl -XDELETE \u0026#39;http://localhost:12800/mock-data/logs/tasks Generators uuid uuid generator leverages java.util.UUID to generate a string. You can use uuid generator to fill the traceId field of segments.\nchangingFrequency property can be used when you want to reuse a uuid for multiple times, for example, if you want a traceId to be reused by 5 segments, then setting changingFrequency to 5 would do the trick. By setting changingFrequency to 5, uuid generates 1 string, and uses it for 5 times, then re-generates a new uuid string and uses it for another 5 times.\n\u0026#34;traceId\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;uuid\u0026#34;, \u0026#34;changingFrequency\u0026#34;: \u0026#34;5\u0026#34; } randomString (String) length (int) length specifies the length of the random string to be generated, i.e. generatedString.length() == length is always true.\nprefix (String) prefix is always added to the random strings after they are generated, that means:\n generatedString.startsWith(prefix) is always true, and, generatedString.length() == length + prefix.length() is always true.  letters (boolean) Specifies whether the random string contains letters (i.e. a-zA-Z).\nnumbers (boolean) Specifies whether the random string contains numbers (i.e. 0-9).\ndomainSize (int) When generating random strings, you might just want some random strings and use them over and over again randomly, by setting domainSize, the generator generates domainSize random strings, and pick them randomly every time you need a string.\nrandomBool (boolean) This generator generates a Boolean value, true or false with a default possibility of 50%, while you can change the possibility below.\npossibility (double, [0, 1]) possibility is a double value \u0026gt;= 0 and \u0026lt;= 1, it\u0026rsquo;s 0.5 by default, meaning about half of the generated values are true.\nTo always return a fixed boolean value true, you can just set the possibility to 1, to always return a fixed boolean value false, you can set the possibility to 0\n\u0026#34;error\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomBool\u0026#34;, \u0026#34;possibility\u0026#34;: \u0026#34;0.9\u0026#34; }  90 percent of the generated values are true.\n randomInt (long) min (long) The minimum value of the random integers, meaning all generated values satisfy generatedInt \u0026gt;= min.\nmax (long) The maximum value of the random integers, meaning all generated values satisfy generatedInt \u0026lt; min.\ndomainSize (int) This is similar to randomString\u0026rsquo;s domainSize.\nrandomList (list / array) size (int) The list size of the generated list, i.e. generatedList.size() == size.\nitem (object) item is a template that will be use as a prototype to generate the list items, for example when generating a list of Tag, the item should be the prototype of Tag, which can be composed by the generators again.\n\u0026#34;tags\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomList\u0026#34;, \u0026#34;size\u0026#34;: 5, \u0026#34;item\u0026#34;: { \u0026#34;key\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomString\u0026#34;, \u0026#34;length\u0026#34;: \u0026#34;10\u0026#34;, \u0026#34;prefix\u0026#34;: \u0026#34;test_tag_\u0026#34;, \u0026#34;letters\u0026#34;: true, \u0026#34;numbers\u0026#34;: true, \u0026#34;domainSize\u0026#34;: 10 }, \u0026#34;value\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomString\u0026#34;, \u0026#34;length\u0026#34;: \u0026#34;10\u0026#34;, \u0026#34;prefix\u0026#34;: \u0026#34;test_value_\u0026#34;, \u0026#34;letters\u0026#34;: true, \u0026#34;numbers\u0026#34;: true } } } fixedString (string) This generator always returns a fixed value of string.\nsequence (long) sequence generator generates a sequence of monotonically increasing integers, with a configurable fluctuation.\nmin (long) The minimum value of the sequence.\nmax (long) The maximum value of the sequence.\nstep (long) The increasing step of this sequence, i.e. the next generated value == the previous value + step.\ndomainSize (int) This is similar to randomString\u0026rsquo;s domainSize.\nfluctuation (int) By default, sequence is strictly increasing numbers, but in some cases you might want the numbers to fluctuate slightly while they are increasing. Adding property fluctuation to the generator will add a random number \u0026gt;= -fluctuation, \u0026lt;= fluctuation to the sequence elements.\nFor example, min = 10, max = 15, step = 1 generates a sequence [10, 11, 12, 13, 14, 15], but adding fluctuation = 2 might generate a sequence [10, 12, 11, 14, 13, 15].\n","excerpt":"Mock data generator for testing In 9.1.0, SkyWalking adds a module to generate mock data for …","ref":"/docs/main/v9.7.0/en/setup/backend/backend-data-generator/","title":"Mock data generator for testing"},{"body":"Module Design Controller The controller means composing all the steps declared in the configuration file, it progressive and display which step is currently running. If it failed in a step, the error message could be shown, as much comprehensive as possible. An example of the output might be.\ne2e run ✔ Started Kind Cluster - Cluster Name ✔ Checked Pods Readiness - All pods are ready ? Generating Traffic - HTTP localhost:9090/users (progress spinner) ✔ Verified Output - service ls (progress spinner) Verifying Output - endpoint ls ✘ Failed to Verify Output Data - endpoint ls \u0026lt;the diff content\u0026gt; ✔ Clean Up Compared with running the steps one by one, the controller is also responsible for cleaning up the environment (by executing the cleanup command) no matter what status other commands are, even if they are failed, the controller has the following semantics in terms of setup and cleanup.\n// Java try { setup(); // trigger step // verify step // ... } finally { cleanup(); } // GoLang func run() { setup(); defer cleanup(); // trigger step // verify step // ... } Steps According to the content in the Controller, E2E Testing can be divided into the following steps.\nSetup Start the environment required for this E2E Testing, such as database, back-end process, API, etc.\nSupport two ways to set up the environment:\n compose:  Start the docker-compose services. Check the services' healthiness. Wait until all services are ready according to the interval, etc. Execute command to set up the testing environment or help verify, such as yq help to eval the YAML format.   kind:  Start the KinD cluster according to the config files or Start on an existing kubernetes cluster. Apply the resources files (--manifests) or/and run the custom init command (--commands). Check the pods' readiness. Wait until all pods are ready according to the interval, etc.    Trigger Generate traffic by trigger the action, It could access HTTP API or execute commands with interval.\nIt could have these settings:\n interval: How frequency to trigger the action. times: How many times the operation is triggered before aborting on the condition that the trigger had failed always. 0=infinite. action: The action of the trigger.  Verify Verify that the data content is matching with the expected results. such as unit test assert, etc.\nIt could have these settings:\n actual: The actual data file. query: The query to get the actual data, could run shell commands to generate the data. expected: The expected data file, could specify some matching rules to verify the actual content.  Cleanup This step requires the same options in the setup step so that it can clean up all things necessarily. Such as destroy the environment, etc.\n","excerpt":"Module Design Controller The controller means composing all the steps declared in the configuration …","ref":"/docs/skywalking-infra-e2e/latest/en/concepts-and-designs/module-design/","title":"Module Design"},{"body":"Module Design Controller The controller means composing all the steps declared in the configuration file, it progressive and display which step is currently running. If it failed in a step, the error message could be shown, as much comprehensive as possible. An example of the output might be.\ne2e run ✔ Started Kind Cluster - Cluster Name ✔ Checked Pods Readiness - All pods are ready ? Generating Traffic - HTTP localhost:9090/users (progress spinner) ✔ Verified Output - service ls (progress spinner) Verifying Output - endpoint ls ✘ Failed to Verify Output Data - endpoint ls \u0026lt;the diff content\u0026gt; ✔ Clean Up Compared with running the steps one by one, the controller is also responsible for cleaning up the environment (by executing the cleanup command) no matter what status other commands are, even if they are failed, the controller has the following semantics in terms of setup and cleanup.\n// Java try { setup(); // trigger step // verify step // ... } finally { cleanup(); } // GoLang func run() { setup(); defer cleanup(); // trigger step // verify step // ... } Steps According to the content in the Controller, E2E Testing can be divided into the following steps.\nSetup Start the environment required for this E2E Testing, such as database, back-end process, API, etc.\nSupport two ways to set up the environment:\n compose:  Start the docker-compose services. Check the services' healthiness. Wait until all services are ready according to the interval, etc. Execute command to set up the testing environment or help verify, such as yq help to eval the YAML format.   kind:  Start the KinD cluster according to the config files or Start on an existing kubernetes cluster. Apply the resources files (--manifests) or/and run the custom init command (--commands). Check the pods' readiness. Wait until all pods are ready according to the interval, etc.    Trigger Generate traffic by trigger the action, It could access HTTP API or execute commands with interval.\nIt could have these settings:\n interval: How frequency to trigger the action. times: How many times the operation is triggered before aborting on the condition that the trigger had failed always. 0=infinite. action: The action of the trigger.  Verify Verify that the data content is matching with the expected results. such as unit test assert, etc.\nIt could have these settings:\n actual: The actual data file. query: The query to get the actual data, could run shell commands to generate the data. expected: The expected data file, could specify some matching rules to verify the actual content.  Cleanup This step requires the same options in the setup step so that it can clean up all things necessarily. Such as destroy the environment, etc.\n","excerpt":"Module Design Controller The controller means composing all the steps declared in the configuration …","ref":"/docs/skywalking-infra-e2e/next/en/concepts-and-designs/module-design/","title":"Module Design"},{"body":"Module Design Controller The controller means composing all the steps declared in the configuration file, it progressive and display which step is currently running. If it failed in a step, the error message could be shown, as much comprehensive as possible. An example of the output might be.\ne2e run ✔ Started Kind Cluster - Cluster Name ✔ Checked Pods Readiness - All pods are ready ? Generating Traffic - HTTP localhost:9090/users (progress spinner) ✔ Verified Output - service ls (progress spinner) Verifying Output - endpoint ls ✘ Failed to Verify Output Data - endpoint ls \u0026lt;the diff content\u0026gt; ✔ Clean Up Compared with running the steps one by one, the controller is also responsible for cleaning up the environment (by executing the cleanup command) no matter what status other commands are, even if they are failed, the controller has the following semantics in terms of setup and cleanup.\n// Java try { setup(); // trigger step // verify step // ... } finally { cleanup(); } // GoLang func run() { setup(); defer cleanup(); // trigger step // verify step // ... } Steps According to the content in the Controller, E2E Testing can be divided into the following steps.\nSetup Start the environment required for this E2E Testing, such as database, back-end process, API, etc.\nSupport two ways to set up the environment:\n compose:  Start the docker-compose services. Check the services' healthiness. Wait until all services are ready according to the interval, etc. Execute command to set up the testing environment or help verify, such as yq help to eval the YAML format.   kind:  Start the KinD cluster according to the config files or Start on an existing kubernetes cluster. Apply the resources files (--manifests) or/and run the custom init command (--commands). Check the pods' readiness. Wait until all pods are ready according to the interval, etc.    Trigger Generate traffic by trigger the action, It could access HTTP API or execute commands with interval.\nIt could have these settings:\n interval: How frequency to trigger the action. times: How many times the operation is triggered before aborting on the condition that the trigger had failed always. 0=infinite. action: The action of the trigger.  Verify Verify that the data content is matching with the expected results. such as unit test assert, etc.\nIt could have these settings:\n actual: The actual data file. query: The query to get the actual data, could run shell commands to generate the data. expected: The expected data file, could specify some matching rules to verify the actual content.  Cleanup This step requires the same options in the setup step so that it can clean up all things necessarily. Such as destroy the environment, etc.\n","excerpt":"Module Design Controller The controller means composing all the steps declared in the configuration …","ref":"/docs/skywalking-infra-e2e/v1.3.0/en/concepts-and-designs/module-design/","title":"Module Design"},{"body":"Module Design Pipe The pipe is an isolation concept in Satellite. Each pipe has one pipeline to process the telemetry data(metrics/traces/logs). Two pipes are not sharing data.\n Satellite --------------------------------------------------------------------- | ------------------------------------------- | | | Pipe | | | ------------------------------------------- | | ------------------------------------------- | | | Pipe | | | ------------------------------------------- | | ------------------------------------------- | | | Pipe | | | ------------------------------------------- | --------------------------------------------------------------------- Modules Module is the core workers in Satellite. Module is constituted by the specific extension plugins. There are 3 modules in one namespace, which are Gatherer, Processor, and Sender.\n The Gatherer module is responsible for fetching or receiving data and pushing the data to Queue. So there are 2 kinds of Gatherer, which are ReceiverGatherer and FetcherGatherer. The Processor module is responsible for reading data from the queue and processing data by a series of filter chains. The Sender module is responsible for async processing and forwarding the data to the external services in the batch mode. After sending success, Sender would also acknowledge the offset of Queue in Gatherer.   Pipe -------------------------------------------------------------------- | ---------- ----------- -------- | | | Gatherer | =\u0026gt; | Processor | =\u0026gt; | Sender | | | ---------- ----------- -------- | -------------------------------------------------------------------- LifeCycle\n Prepare: Prepare phase is to do some preparation works, such as register the client status listener to the client in ReceiverGatherer. Boot: Boot phase is to start the current module until receives a close signal. ShutDown: ShutDown phase is to close the used resources.  Plugins Plugin is the minimal components in the module. Satellite has 2 plugin catalogs, which are sharing plugins and normal plugins.\n a sharing plugin instance could be sharing with multiple modules in the different pipes. a normal plugin instance is only be used in a fixed module of the fixed pipes.  Sharing plugin Nowadays, there are 2 kinds of sharing plugins in Satellite, which are server plugins and client plugins. The reason why they are sharing plugins is to reduce the resource cost in connection. Server plugins are sharing with the ReceiverGatherer modules in the different pipes to receive the external requests. And the client plugins is sharing with the Sender modules in the different pipes to connect with external services, such as Kafka and OAP.\n Sharing Server Sharing Client -------------------------------------------------------------------- | ------------------ ----------- -------- | | | ReceiverGatherer | =\u0026gt; | Processor | =\u0026gt; | Sender | | | ------------------ ----------- -------- | -------------------------------------------------------------------- -------------------------------------------------------------------- | ------------------ ----------- -------- | | | ReceiverGatherer | =\u0026gt; | Processor | =\u0026gt; | Sender | | | ------------------ ----------- -------- | -------------------------------------------------------------------- -------------------------------------------------------------------- | ------------------ ----------- -------- | | | ReceiverGatherer | =\u0026gt; | Processor | =\u0026gt; | Sender | | | ------------------ ----------- -------- | -------------------------------------------------------------------- Normal plugin There are 7 kinds of normal plugins in Satellite, which are Receiver, Fetcher, Queue, Parser, Filter, Forwarder, and Fallbacker.\n Receiver: receives the input APM data from the request. Fetcher: fetch the APM data by fetching. Queue: store the APM data to ensure the data stability. Parser: supports some ways to parse data, such parse a csv file. Filter: processes the APM data. Forwarder: forwards the APM data to the external receiver, such as Kafka and OAP. Fallbacker: supports some fallback strategies, such as timer retry strategy.   Gatherer Processor ------------------------------- ------------------------------------------- | ----------- --------- | | ----------- ----------- | | | Receiver | ==\u0026gt; | Queue | |==\u0026gt;| | Filter | ==\u0026gt; ... ==\u0026gt; | Filter | | | | /Fetcher | | Mem/File | | | ----------- ----------- | | ----------- ---------- | | || || | -------------------------------- | \\/\t\\/ | | --------------------------------------- | | | OutputEventContext | | | --------------------------------------- | ------------------------------------------- || \\/ Sender ------------------------------------------ | --- --- | | | B | | D | ----------------- | | | A | | I | |Segment Forwarder| | | | T | | S | | (Fallbacker) | | | | C | | P | ----------------- | | | H | =\u0026gt; | A | | ===\u0026gt; Kafka/OAP | | B | | T | =\u0026gt; ...... | | | U | | C | | | | F | | H | ----------------- | | | F | | E | | Meter Forwarder| | | | E | | R | | (Fallbacker | | | | R | | | ----------------- | | --- --- | ------------------------------------------ 1. The Fetcher/Receiver plugin would fetch or receive the input data. 2. The Parser plugin would parse the input data to SerializableEvent that is supported to be stored in Queue. 3. The Queue plugin stores the SerializableEvent. However, whether serializing depends on the Queue implements. For example, the serialization is unnecessary when using a Memory Queue. Once an event is pulled by the consumer of Queue, the event will be processed by the filters in Processor. 4. The Filter plugin would process the event to create a new event. Next, the event is passed to the next filter to do the same things until the whole filters are performed. All created events would be stored in the OutputEventContext. However, only the events labeled with RemoteEvent type would be forwarded by Forwarder. 5. After processing, the events in OutputEventContext would be stored in the BatchBuffer. When the timer is triggered or the capacity limit is reached, the events in BatchBuffer would be partitioned by EventType and sent to the different Forwarders, such as Segment Forwarder and Meter Forwarder. 6. The Follower in different Senders would share with the remote client to avoid make duplicate connections and have the same Fallbacker(FallBack strategy) to process data. When all forwarders send success or process success in Fallbacker, the dispatcher would also ack the batch is a success. ============================================================================================ ","excerpt":"Module Design Pipe The pipe is an isolation concept in Satellite. Each pipe has one pipeline to …","ref":"/docs/skywalking-satellite/latest/en/concepts-and-designs/module_design/","title":"Module Design"},{"body":"Module Design Pipe The pipe is an isolation concept in Satellite. Each pipe has one pipeline to process the telemetry data(metrics/traces/logs). Two pipes are not sharing data.\n Satellite --------------------------------------------------------------------- | ------------------------------------------- | | | Pipe | | | ------------------------------------------- | | ------------------------------------------- | | | Pipe | | | ------------------------------------------- | | ------------------------------------------- | | | Pipe | | | ------------------------------------------- | --------------------------------------------------------------------- Modules Module is the core workers in Satellite. Module is constituted by the specific extension plugins. There are 3 modules in one namespace, which are Gatherer, Processor, and Sender.\n The Gatherer module is responsible for fetching or receiving data and pushing the data to Queue. So there are 2 kinds of Gatherer, which are ReceiverGatherer and FetcherGatherer. The Processor module is responsible for reading data from the queue and processing data by a series of filter chains. The Sender module is responsible for async processing and forwarding the data to the external services in the batch mode. After sending success, Sender would also acknowledge the offset of Queue in Gatherer.   Pipe -------------------------------------------------------------------- | ---------- ----------- -------- | | | Gatherer | =\u0026gt; | Processor | =\u0026gt; | Sender | | | ---------- ----------- -------- | -------------------------------------------------------------------- LifeCycle\n Prepare: Prepare phase is to do some preparation works, such as register the client status listener to the client in ReceiverGatherer. Boot: Boot phase is to start the current module until receives a close signal. ShutDown: ShutDown phase is to close the used resources.  Plugins Plugin is the minimal components in the module. Satellite has 2 plugin catalogs, which are sharing plugins and normal plugins.\n a sharing plugin instance could be sharing with multiple modules in the different pipes. a normal plugin instance is only be used in a fixed module of the fixed pipes.  Sharing plugin Nowadays, there are 2 kinds of sharing plugins in Satellite, which are server plugins and client plugins. The reason why they are sharing plugins is to reduce the resource cost in connection. Server plugins are sharing with the ReceiverGatherer modules in the different pipes to receive the external requests. And the client plugins is sharing with the Sender modules in the different pipes to connect with external services, such as Kafka and OAP.\n Sharing Server Sharing Client -------------------------------------------------------------------- | ------------------ ----------- -------- | | | ReceiverGatherer | =\u0026gt; | Processor | =\u0026gt; | Sender | | | ------------------ ----------- -------- | -------------------------------------------------------------------- -------------------------------------------------------------------- | ------------------ ----------- -------- | | | ReceiverGatherer | =\u0026gt; | Processor | =\u0026gt; | Sender | | | ------------------ ----------- -------- | -------------------------------------------------------------------- -------------------------------------------------------------------- | ------------------ ----------- -------- | | | ReceiverGatherer | =\u0026gt; | Processor | =\u0026gt; | Sender | | | ------------------ ----------- -------- | -------------------------------------------------------------------- Normal plugin There are 7 kinds of normal plugins in Satellite, which are Receiver, Fetcher, Queue, Parser, Filter, Forwarder, and Fallbacker.\n Receiver: receives the input APM data from the request. Fetcher: fetch the APM data by fetching. Queue: store the APM data to ensure the data stability. Parser: supports some ways to parse data, such parse a csv file. Filter: processes the APM data. Forwarder: forwards the APM data to the external receiver, such as Kafka and OAP. Fallbacker: supports some fallback strategies, such as timer retry strategy.   Gatherer Processor ------------------------------- ------------------------------------------- | ----------- --------- | | ----------- ----------- | | | Receiver | ==\u0026gt; | Queue | |==\u0026gt;| | Filter | ==\u0026gt; ... ==\u0026gt; | Filter | | | | /Fetcher | | Mem/File | | | ----------- ----------- | | ----------- ---------- | | || || | -------------------------------- | \\/\t\\/ | | --------------------------------------- | | | OutputEventContext | | | --------------------------------------- | ------------------------------------------- || \\/ Sender ------------------------------------------ | --- --- | | | B | | D | ----------------- | | | A | | I | |Segment Forwarder| | | | T | | S | | (Fallbacker) | | | | C | | P | ----------------- | | | H | =\u0026gt; | A | | ===\u0026gt; Kafka/OAP | | B | | T | =\u0026gt; ...... | | | U | | C | | | | F | | H | ----------------- | | | F | | E | | Meter Forwarder| | | | E | | R | | (Fallbacker | | | | R | | | ----------------- | | --- --- | ------------------------------------------ 1. The Fetcher/Receiver plugin would fetch or receive the input data. 2. The Parser plugin would parse the input data to SerializableEvent that is supported to be stored in Queue. 3. The Queue plugin stores the SerializableEvent. However, whether serializing depends on the Queue implements. For example, the serialization is unnecessary when using a Memory Queue. Once an event is pulled by the consumer of Queue, the event will be processed by the filters in Processor. 4. The Filter plugin would process the event to create a new event. Next, the event is passed to the next filter to do the same things until the whole filters are performed. All created events would be stored in the OutputEventContext. However, only the events labeled with RemoteEvent type would be forwarded by Forwarder. 5. After processing, the events in OutputEventContext would be stored in the BatchBuffer. When the timer is triggered or the capacity limit is reached, the events in BatchBuffer would be partitioned by EventType and sent to the different Forwarders, such as Segment Forwarder and Meter Forwarder. 6. The Follower in different Senders would share with the remote client to avoid make duplicate connections and have the same Fallbacker(FallBack strategy) to process data. When all forwarders send success or process success in Fallbacker, the dispatcher would also ack the batch is a success. ============================================================================================ ","excerpt":"Module Design Pipe The pipe is an isolation concept in Satellite. Each pipe has one pipeline to …","ref":"/docs/skywalking-satellite/next/en/concepts-and-designs/module_design/","title":"Module Design"},{"body":"Module Design Pipe The pipe is an isolation concept in Satellite. Each pipe has one pipeline to process the telemetry data(metrics/traces/logs). Two pipes are not sharing data.\n Satellite --------------------------------------------------------------------- | ------------------------------------------- | | | Pipe | | | ------------------------------------------- | | ------------------------------------------- | | | Pipe | | | ------------------------------------------- | | ------------------------------------------- | | | Pipe | | | ------------------------------------------- | --------------------------------------------------------------------- Modules Module is the core workers in Satellite. Module is constituted by the specific extension plugins. There are 3 modules in one namespace, which are Gatherer, Processor, and Sender.\n The Gatherer module is responsible for fetching or receiving data and pushing the data to Queue. So there are 2 kinds of Gatherer, which are ReceiverGatherer and FetcherGatherer. The Processor module is responsible for reading data from the queue and processing data by a series of filter chains. The Sender module is responsible for async processing and forwarding the data to the external services in the batch mode. After sending success, Sender would also acknowledge the offset of Queue in Gatherer.   Pipe -------------------------------------------------------------------- | ---------- ----------- -------- | | | Gatherer | =\u0026gt; | Processor | =\u0026gt; | Sender | | | ---------- ----------- -------- | -------------------------------------------------------------------- LifeCycle\n Prepare: Prepare phase is to do some preparation works, such as register the client status listener to the client in ReceiverGatherer. Boot: Boot phase is to start the current module until receives a close signal. ShutDown: ShutDown phase is to close the used resources.  Plugins Plugin is the minimal components in the module. Satellite has 2 plugin catalogs, which are sharing plugins and normal plugins.\n a sharing plugin instance could be sharing with multiple modules in the different pipes. a normal plugin instance is only be used in a fixed module of the fixed pipes.  Sharing plugin Nowadays, there are 2 kinds of sharing plugins in Satellite, which are server plugins and client plugins. The reason why they are sharing plugins is to reduce the resource cost in connection. Server plugins are sharing with the ReceiverGatherer modules in the different pipes to receive the external requests. And the client plugins is sharing with the Sender modules in the different pipes to connect with external services, such as Kafka and OAP.\n Sharing Server Sharing Client -------------------------------------------------------------------- | ------------------ ----------- -------- | | | ReceiverGatherer | =\u0026gt; | Processor | =\u0026gt; | Sender | | | ------------------ ----------- -------- | -------------------------------------------------------------------- -------------------------------------------------------------------- | ------------------ ----------- -------- | | | ReceiverGatherer | =\u0026gt; | Processor | =\u0026gt; | Sender | | | ------------------ ----------- -------- | -------------------------------------------------------------------- -------------------------------------------------------------------- | ------------------ ----------- -------- | | | ReceiverGatherer | =\u0026gt; | Processor | =\u0026gt; | Sender | | | ------------------ ----------- -------- | -------------------------------------------------------------------- Normal plugin There are 7 kinds of normal plugins in Satellite, which are Receiver, Fetcher, Queue, Parser, Filter, Forwarder, and Fallbacker.\n Receiver: receives the input APM data from the request. Fetcher: fetch the APM data by fetching. Queue: store the APM data to ensure the data stability. Parser: supports some ways to parse data, such parse a csv file. Filter: processes the APM data. Forwarder: forwards the APM data to the external receiver, such as Kafka and OAP. Fallbacker: supports some fallback strategies, such as timer retry strategy.   Gatherer Processor ------------------------------- ------------------------------------------- | ----------- --------- | | ----------- ----------- | | | Receiver | ==\u0026gt; | Queue | |==\u0026gt;| | Filter | ==\u0026gt; ... ==\u0026gt; | Filter | | | | /Fetcher | | Mem/File | | | ----------- ----------- | | ----------- ---------- | | || || | -------------------------------- | \\/\t\\/ | | --------------------------------------- | | | OutputEventContext | | | --------------------------------------- | ------------------------------------------- || \\/ Sender ------------------------------------------ | --- --- | | | B | | D | ----------------- | | | A | | I | |Segment Forwarder| | | | T | | S | | (Fallbacker) | | | | C | | P | ----------------- | | | H | =\u0026gt; | A | | ===\u0026gt; Kafka/OAP | | B | | T | =\u0026gt; ...... | | | U | | C | | | | F | | H | ----------------- | | | F | | E | | Meter Forwarder| | | | E | | R | | (Fallbacker | | | | R | | | ----------------- | | --- --- | ------------------------------------------ 1. The Fetcher/Receiver plugin would fetch or receive the input data. 2. The Parser plugin would parse the input data to SerializableEvent that is supported to be stored in Queue. 3. The Queue plugin stores the SerializableEvent. However, whether serializing depends on the Queue implements. For example, the serialization is unnecessary when using a Memory Queue. Once an event is pulled by the consumer of Queue, the event will be processed by the filters in Processor. 4. The Filter plugin would process the event to create a new event. Next, the event is passed to the next filter to do the same things until the whole filters are performed. All created events would be stored in the OutputEventContext. However, only the events labeled with RemoteEvent type would be forwarded by Forwarder. 5. After processing, the events in OutputEventContext would be stored in the BatchBuffer. When the timer is triggered or the capacity limit is reached, the events in BatchBuffer would be partitioned by EventType and sent to the different Forwarders, such as Segment Forwarder and Meter Forwarder. 6. The Follower in different Senders would share with the remote client to avoid make duplicate connections and have the same Fallbacker(FallBack strategy) to process data. When all forwarders send success or process success in Fallbacker, the dispatcher would also ack the batch is a success. ============================================================================================ ","excerpt":"Module Design Pipe The pipe is an isolation concept in Satellite. Each pipe has one pipeline to …","ref":"/docs/skywalking-satellite/v1.2.0/en/concepts-and-designs/module_design/","title":"Module Design"},{"body":"MongoDB monitoring SkyWalking leverages mongodb-exporter for collecting metrics data from MongoDB. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  The mongodb-exporter collects metrics data from MongoDB. The exporter works side by side with the MongoDB node. OpenTelemetry Collector fetches metrics from mongodb-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup mongodb-exporter. Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  MongoDB Monitoring MongoDB monitoring provides multidimensional metrics monitoring of MongoDB clusters as Layer: MONGODB Service in the OAP. In each cluster, the nodes are represented as Instance.\nMongoDB Cluster Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Uptime (day) day meter_mongodb_cluster_uptime Maximum uptime of nodes in the cluster mongodb-exporter   Data Size (GB) GB meter_mongodb_cluster_data_size Total data size of the cluster mongodb-exporter   Collection Count  meter_mongodb_cluster_collection_count Number of collection of the cluster mongodb-exporter   Object Count  meter_mongodb_cluster_object_count Number of object of the cluster mongodb-exporter   Document Avg QPS  meter_mongodb_cluster_document_avg_qps Avg document operations rate of nodes mongodb-exporter   Operation Avg QPS  meter_mongodb_cluster_operation_avg_qps Avg operations rate of nodes mongodb-exporter   Total Connections  meter_mongodb_cluster_connections Cluster total connections of nodes mongodb-exporter   Cursor Avg  meter_mongodb_cluster_cursor_avg Avg Opened cursor of nodes mongodb-exporter   Replication Lag (ms) ms meter_mongodb_cluster_repl_lag Repl set member avg replication lag, this metric works in repl mode mongodb-exporter   DB Avg Data Size Per Shard (GB) GB meter_mongodb_cluster_db_data_size Avg data size per shard (replSet) of every database mongodb-exporter   DB Avg Index Size Per Shard (GB) GB meter_mongodb_cluster_db_index_size Avg index size per shard (replSet) of every database mongodb-exporter   DB Avg Collection Count Per Shard  meter_mongodb_cluster_db_collection_count Avg collection count per shard (replSet) of every database mongodb-exporter   DB Avg Index Count Per Shard  meter_mongodb_cluster_db_index_count Avg index count per shard (replSet) of every database mongodb-exporter    MongoDB Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Uptime (day) day meter_mongodb_node_uptime Uptime of the node mongodb-exporter   QPS  meter_mongodb_node_qps Operations per second of the node mongodb-exporter   Latency µs meter_mongodb_node_latency Latency of operations mongodb-exporter   Memory Usage % meter_mongodb_node_memory_usage Memory usage percent of RAM mongodb-exporter   Version  meter_mongodb_node_version MongoDB edition and version mongodb-exporter   ReplSet State  meter_mongodb_node_rs_state Repl set state of the node, this metric works in repl mode mongodb-exporter   CPU Usage (%) % meter_mongodb_node_cpu_total_percentage Cpu usage percent of the node mongodb-exporter   Network (KB/s) KB/s meter_mongodb_node_network_bytes_inmeter_mongodb_node_network_bytes_out Inbound and outbound network bytes of node mongodb-exporter   Memory Free (GB) GB meter_mongodb_node_memory_free_kbmeter_mongodb_node_swap_memory_free_kb Free memory of RAM and swap mongodb-exporter   Disk (GB) GB meter_mongodb_node_fs_used_sizemeter_mongodb_node_fs_total_size Used and total size of disk mongodb-exporter   Connections  meter_mongodb_node_connections Connection nums of node mongodb-exporter   Active Client  meter_mongodb_node_active_total_nummeter_mongodb_node_active_reader_nummeter_mongodb_node_active_writer_num Count of active reader and writer mongodb-exporter   Transactions  meter_mongodb_node_transactions_activemeter_mongodb_node_transactions_inactive Count of transactions running on the node mongodb-exporter   Document QPS  meter_mongodb_node_document_qps Document operations per second mongodb-exporter   Operation QPS  meter_mongodb_node_operation_qps Operations per second mongodb-exporter   Repl Operation QPS  meter_mongodb_node_repl_operation_qps Repl operations per second mongodb-exporter   Operation Latency (µs) µs meter_mongodb_node_operation_latency Latencies for different operation type mongodb-exporter   Cursor  meter_mongodb_node_cursor Opened cursor of the node mongodb-exporter   Server Status Memory (MB) MB meter_mongodb_node_mem_virtualmeter_mongodb_node_mem_resident Virtual and resident memory of the node mongodb-exporter   Asserts  meter_mongodb_node_asserts The rate of raised assertions mongodb-exporter   Repl Buffer Count  meter_mongodb_node_repl_buffer_count The current number of operations in the oplog buffer mongodb-exporter   Repl Buffer Size (MB) MB meter_mongodb_node_repl_buffer_sizemeter_mongodb_node_repl_buffer_size_max The maximum size of the oplog buffer mongodb-exporter   Queued Operation  meter_mongodb_node_queued_operation The number of operations queued because of a lock mongodb-exporter   getLastError Write Num  meter_mongodb_node_write_wait_nummeter_mongodb_node_write_wait_timeout_num The number of write concern operation mongodb-exporter   getLastError Write Time (ms) ms meter_mongodb_node_write_wait_time The wait time of write concern operation mongodb-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/mongodb/mongodb-cluster.yaml, /config/otel-rules/mongodb/mongodb-node.yaml. The MongoDB dashboard panel configurations are found in /config/ui-initialized-templates/mongodb.\n","excerpt":"MongoDB monitoring SkyWalking leverages mongodb-exporter for collecting metrics data from MongoDB. …","ref":"/docs/main/latest/en/setup/backend/backend-mongodb-monitoring/","title":"MongoDB monitoring"},{"body":"MongoDB monitoring SkyWalking leverages mongodb-exporter for collecting metrics data from MongoDB. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  The mongodb-exporter collects metrics data from MongoDB. The exporter works side by side with the MongoDB node. OpenTelemetry Collector fetches metrics from mongodb-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup mongodb-exporter. Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  MongoDB Monitoring MongoDB monitoring provides multidimensional metrics monitoring of MongoDB clusters as Layer: MONGODB Service in the OAP. In each cluster, the nodes are represented as Instance.\nMongoDB Cluster Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Uptime (day) day meter_mongodb_cluster_uptime Maximum uptime of nodes in the cluster mongodb-exporter   Data Size (GB) GB meter_mongodb_cluster_data_size Total data size of the cluster mongodb-exporter   Collection Count  meter_mongodb_cluster_collection_count Number of collection of the cluster mongodb-exporter   Object Count  meter_mongodb_cluster_object_count Number of object of the cluster mongodb-exporter   Document Total QPS  meter_mongodb_cluster_document_avg_qps Total document operations rate of nodes mongodb-exporter   Operation Total QPS  meter_mongodb_cluster_operation_avg_qps Total operations rate of nodes mongodb-exporter   Total Connections  meter_mongodb_cluster_connections Cluster total connections of nodes mongodb-exporter   Cursor Total  meter_mongodb_cluster_cursor_avg Total Opened cursor of nodes mongodb-exporter   Replication Lag (ms) ms meter_mongodb_cluster_repl_lag Repl set member avg replication lag, this metric works in repl mode mongodb-exporter   DB Total Data Size (GB) GB meter_mongodb_cluster_db_data_size Total data size of every database mongodb-exporter   DB Total Index Size (GB) GB meter_mongodb_cluster_db_index_size Total index size per of every database mongodb-exporter   DB Total Collection Count  meter_mongodb_cluster_db_collection_count Total collection count of every database mongodb-exporter   DB Total Index Count  meter_mongodb_cluster_db_index_count Total index count of every database mongodb-exporter    MongoDB Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Uptime (day) day meter_mongodb_node_uptime Uptime of the node mongodb-exporter   QPS  meter_mongodb_node_qps Operations per second of the node mongodb-exporter   Latency µs meter_mongodb_node_op_ratemeter_mongodb_node_latency_rate Latency of operations mongodb-exporter   Memory Usage % meter_mongodb_node_memory_usage Memory usage percent of RAM mongodb-exporter   Version  meter_mongodb_node_version MongoDB edition and version mongodb-exporter   ReplSet State  meter_mongodb_node_rs_state Repl set state of the node, this metric works in repl mode mongodb-exporter   CPU Usage (%) % meter_mongodb_node_cpu_total_percentage Cpu usage percent of the node mongodb-exporter   Network (KB/s) KB/s meter_mongodb_node_network_bytes_inmeter_mongodb_node_network_bytes_out Inbound and outbound network bytes of node mongodb-exporter   Memory Free (GB) GB meter_mongodb_node_memory_free_kbmeter_mongodb_node_swap_memory_free_kb Free memory of RAM and swap mongodb-exporter   Disk (GB) GB meter_mongodb_node_fs_used_sizemeter_mongodb_node_fs_total_size Used and total size of disk mongodb-exporter   Connections  meter_mongodb_node_connections Connection nums of node mongodb-exporter   Active Client  meter_mongodb_node_active_total_nummeter_mongodb_node_active_reader_nummeter_mongodb_node_active_writer_num Count of active reader and writer mongodb-exporter   Transactions  meter_mongodb_node_transactions_activemeter_mongodb_node_transactions_inactive Count of transactions running on the node mongodb-exporter   Document QPS  meter_mongodb_node_document_qps Document operations per second mongodb-exporter   Operation QPS  meter_mongodb_node_operation_qps Operations per second mongodb-exporter   Repl Operation QPS  meter_mongodb_node_repl_operation_qps Repl operations per second mongodb-exporter   Operation Latency (µs) µs meter_mongodb_node_op_ratemeter_mongodb_node_latency_rate Latencies for different operation type mongodb-exporter   Cursor  meter_mongodb_node_cursor Opened cursor of the node mongodb-exporter   Server Status Memory (MB) MB meter_mongodb_node_mem_virtualmeter_mongodb_node_mem_resident Virtual and resident memory of the node mongodb-exporter   Asserts  meter_mongodb_node_asserts The rate of raised assertions mongodb-exporter   Repl Buffer Count  meter_mongodb_node_repl_buffer_count The current number of operations in the oplog buffer mongodb-exporter   Repl Buffer Size (MB) MB meter_mongodb_node_repl_buffer_sizemeter_mongodb_node_repl_buffer_size_max The maximum size of the oplog buffer mongodb-exporter   Queued Operation  meter_mongodb_node_queued_operation The number of operations queued because of a lock mongodb-exporter   getLastError Write Num  meter_mongodb_node_write_wait_nummeter_mongodb_node_write_wait_timeout_num The number of write concern operation mongodb-exporter   getLastError Write Time (ms) ms meter_mongodb_node_write_wait_time The wait time of write concern operation mongodb-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/mongodb/mongodb-cluster.yaml, /config/otel-rules/mongodb/mongodb-node.yaml. The MongoDB dashboard panel configurations are found in /config/ui-initialized-templates/mongodb.\n","excerpt":"MongoDB monitoring SkyWalking leverages mongodb-exporter for collecting metrics data from MongoDB. …","ref":"/docs/main/next/en/setup/backend/backend-mongodb-monitoring/","title":"MongoDB monitoring"},{"body":"MongoDB monitoring SkyWalking leverages mongodb-exporter for collecting metrics data from MongoDB. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  The mongodb-exporter collects metrics data from MongoDB. The exporter works side by side with the MongoDB node. OpenTelemetry Collector fetches metrics from mongodb-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup mongodb-exporter. Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  MongoDB Monitoring MongoDB monitoring provides multidimensional metrics monitoring of MongoDB clusters as Layer: MONGODB Service in the OAP. In each cluster, the nodes are represented as Instance.\nMongoDB Cluster Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Uptime (day) day meter_mongodb_cluster_uptime Maximum uptime of nodes in the cluster mongodb-exporter   Data Size (GB) GB meter_mongodb_cluster_data_size Total data size of the cluster mongodb-exporter   Collection Count  meter_mongodb_cluster_collection_count Number of collection of the cluster mongodb-exporter   Object Count  meter_mongodb_cluster_object_count Number of object of the cluster mongodb-exporter   Document Avg QPS  meter_mongodb_cluster_document_avg_qps Avg document operations rate of nodes mongodb-exporter   Operation Avg QPS  meter_mongodb_cluster_operation_avg_qps Avg operations rate of nodes mongodb-exporter   Total Connections  meter_mongodb_cluster_connections Cluster total connections of nodes mongodb-exporter   Cursor Avg  meter_mongodb_cluster_cursor_avg Avg Opened cursor of nodes mongodb-exporter   Replication Lag (ms) ms meter_mongodb_cluster_repl_lag Repl set member avg replication lag, this metric works in repl mode mongodb-exporter   DB Avg Data Size Per Shard (GB) GB meter_mongodb_cluster_db_data_size Avg data size per shard (replSet) of every database mongodb-exporter   DB Avg Index Size Per Shard (GB) GB meter_mongodb_cluster_db_index_size Avg index size per shard (replSet) of every database mongodb-exporter   DB Avg Collection Count Per Shard  meter_mongodb_cluster_db_collection_count Avg collection count per shard (replSet) of every database mongodb-exporter   DB Avg Index Count Per Shard  meter_mongodb_cluster_db_index_count Avg index count per shard (replSet) of every database mongodb-exporter    MongoDB Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Uptime (day) day meter_mongodb_node_uptime Uptime of the node mongodb-exporter   QPS  meter_mongodb_node_qps Operations per second of the node mongodb-exporter   Latency µs meter_mongodb_node_latency Latency of operations mongodb-exporter   Memory Usage % meter_mongodb_node_memory_usage Memory usage percent of RAM mongodb-exporter   Version  meter_mongodb_node_version MongoDB edition and version mongodb-exporter   ReplSet State  meter_mongodb_node_rs_state Repl set state of the node, this metric works in repl mode mongodb-exporter   CPU Usage (%) % meter_mongodb_node_cpu_total_percentage Cpu usage percent of the node mongodb-exporter   Network (KB/s) KB/s meter_mongodb_node_network_bytes_inmeter_mongodb_node_network_bytes_out Inbound and outbound network bytes of node mongodb-exporter   Memory Free (GB) GB meter_mongodb_node_memory_free_kbmeter_mongodb_node_swap_memory_free_kb Free memory of RAM and swap mongodb-exporter   Disk (GB) GB meter_mongodb_node_fs_used_sizemeter_mongodb_node_fs_total_size Used and total size of disk mongodb-exporter   Connections  meter_mongodb_node_connections Connection nums of node mongodb-exporter   Active Client  meter_mongodb_node_active_total_nummeter_mongodb_node_active_reader_nummeter_mongodb_node_active_writer_num Count of active reader and writer mongodb-exporter   Transactions  meter_mongodb_node_transactions_activemeter_mongodb_node_transactions_inactive Count of transactions running on the node mongodb-exporter   Document QPS  meter_mongodb_node_document_qps Document operations per second mongodb-exporter   Operation QPS  meter_mongodb_node_operation_qps Operations per second mongodb-exporter   Repl Operation QPS  meter_mongodb_node_repl_operation_qps Repl operations per second mongodb-exporter   Operation Latency (µs) µs meter_mongodb_node_operation_latency Latencies for different operation type mongodb-exporter   Cursor  meter_mongodb_node_cursor Opened cursor of the node mongodb-exporter   Server Status Memory (MB) MB meter_mongodb_node_mem_virtualmeter_mongodb_node_mem_resident Virtual and resident memory of the node mongodb-exporter   Asserts  meter_mongodb_node_asserts The rate of raised assertions mongodb-exporter   Repl Buffer Count  meter_mongodb_node_repl_buffer_count The current number of operations in the oplog buffer mongodb-exporter   Repl Buffer Size (MB) MB meter_mongodb_node_repl_buffer_sizemeter_mongodb_node_repl_buffer_size_max The maximum size of the oplog buffer mongodb-exporter   Queued Operation  meter_mongodb_node_queued_operation The number of operations queued because of a lock mongodb-exporter   getLastError Write Num  meter_mongodb_node_write_wait_nummeter_mongodb_node_write_wait_timeout_num The number of write concern operation mongodb-exporter   getLastError Write Time (ms) ms meter_mongodb_node_write_wait_time The wait time of write concern operation mongodb-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/mongodb/mongodb-cluster.yaml, /config/otel-rules/mongodb/mongodb-node.yaml. The MongoDB dashboard panel configurations are found in /config/ui-initialized-templates/mongodb.\n","excerpt":"MongoDB monitoring SkyWalking leverages mongodb-exporter for collecting metrics data from MongoDB. …","ref":"/docs/main/v9.6.0/en/setup/backend/backend-mongodb-monitoring/","title":"MongoDB monitoring"},{"body":"MongoDB monitoring SkyWalking leverages mongodb-exporter for collecting metrics data from MongoDB. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  The mongodb-exporter collects metrics data from MongoDB. The exporter works side by side with the MongoDB node. OpenTelemetry Collector fetches metrics from mongodb-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup mongodb-exporter. Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  MongoDB Monitoring MongoDB monitoring provides multidimensional metrics monitoring of MongoDB clusters as Layer: MONGODB Service in the OAP. In each cluster, the nodes are represented as Instance.\nMongoDB Cluster Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Uptime (day) day meter_mongodb_cluster_uptime Maximum uptime of nodes in the cluster mongodb-exporter   Data Size (GB) GB meter_mongodb_cluster_data_size Total data size of the cluster mongodb-exporter   Collection Count  meter_mongodb_cluster_collection_count Number of collection of the cluster mongodb-exporter   Object Count  meter_mongodb_cluster_object_count Number of object of the cluster mongodb-exporter   Document Avg QPS  meter_mongodb_cluster_document_avg_qps Avg document operations rate of nodes mongodb-exporter   Operation Avg QPS  meter_mongodb_cluster_operation_avg_qps Avg operations rate of nodes mongodb-exporter   Total Connections  meter_mongodb_cluster_connections Cluster total connections of nodes mongodb-exporter   Cursor Avg  meter_mongodb_cluster_cursor_avg Avg Opened cursor of nodes mongodb-exporter   Replication Lag (ms) ms meter_mongodb_cluster_repl_lag Repl set member avg replication lag, this metric works in repl mode mongodb-exporter   DB Avg Data Size Per Shard (GB) GB meter_mongodb_cluster_db_data_size Avg data size per shard (replSet) of every database mongodb-exporter   DB Avg Index Size Per Shard (GB) GB meter_mongodb_cluster_db_index_size Avg index size per shard (replSet) of every database mongodb-exporter   DB Avg Collection Count Per Shard  meter_mongodb_cluster_db_collection_count Avg collection count per shard (replSet) of every database mongodb-exporter   DB Avg Index Count Per Shard  meter_mongodb_cluster_db_index_count Avg index count per shard (replSet) of every database mongodb-exporter    MongoDB Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Uptime (day) day meter_mongodb_node_uptime Uptime of the node mongodb-exporter   QPS  meter_mongodb_node_qps Operations per second of the node mongodb-exporter   Latency µs meter_mongodb_node_latency Latency of operations mongodb-exporter   Memory Usage % meter_mongodb_node_memory_usage Memory usage percent of RAM mongodb-exporter   Version  meter_mongodb_node_version MongoDB edition and version mongodb-exporter   ReplSet State  meter_mongodb_node_rs_state Repl set state of the node, this metric works in repl mode mongodb-exporter   CPU Usage (%) % meter_mongodb_node_cpu_total_percentage Cpu usage percent of the node mongodb-exporter   Network (KB/s) KB/s meter_mongodb_node_network_bytes_inmeter_mongodb_node_network_bytes_out Inbound and outbound network bytes of node mongodb-exporter   Memory Free (GB) GB meter_mongodb_node_memory_free_kbmeter_mongodb_node_swap_memory_free_kb Free memory of RAM and swap mongodb-exporter   Disk (GB) GB meter_mongodb_node_fs_used_sizemeter_mongodb_node_fs_total_size Used and total size of disk mongodb-exporter   Connections  meter_mongodb_node_connections Connection nums of node mongodb-exporter   Active Client  meter_mongodb_node_active_total_nummeter_mongodb_node_active_reader_nummeter_mongodb_node_active_writer_num Count of active reader and writer mongodb-exporter   Transactions  meter_mongodb_node_transactions_activemeter_mongodb_node_transactions_inactive Count of transactions running on the node mongodb-exporter   Document QPS  meter_mongodb_node_document_qps Document operations per second mongodb-exporter   Operation QPS  meter_mongodb_node_operation_qps Operations per second mongodb-exporter   Repl Operation QPS  meter_mongodb_node_repl_operation_qps Repl operations per second mongodb-exporter   Operation Latency (µs) µs meter_mongodb_node_operation_latency Latencies for different operation type mongodb-exporter   Cursor  meter_mongodb_node_cursor Opened cursor of the node mongodb-exporter   Server Status Memory (MB) MB meter_mongodb_node_mem_virtualmeter_mongodb_node_mem_resident Virtual and resident memory of the node mongodb-exporter   Asserts  meter_mongodb_node_asserts The rate of raised assertions mongodb-exporter   Repl Buffer Count  meter_mongodb_node_repl_buffer_count The current number of operations in the oplog buffer mongodb-exporter   Repl Buffer Size (MB) MB meter_mongodb_node_repl_buffer_sizemeter_mongodb_node_repl_buffer_size_max The maximum size of the oplog buffer mongodb-exporter   Queued Operation  meter_mongodb_node_queued_operation The number of operations queued because of a lock mongodb-exporter   getLastError Write Num  meter_mongodb_node_write_wait_nummeter_mongodb_node_write_wait_timeout_num The number of write concern operation mongodb-exporter   getLastError Write Time (ms) ms meter_mongodb_node_write_wait_time The wait time of write concern operation mongodb-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/mongodb/mongodb-cluster.yaml, /config/otel-rules/mongodb/mongodb-node.yaml. The MongoDB dashboard panel configurations are found in /config/ui-initialized-templates/mongodb.\n","excerpt":"MongoDB monitoring SkyWalking leverages mongodb-exporter for collecting metrics data from MongoDB. …","ref":"/docs/main/v9.7.0/en/setup/backend/backend-mongodb-monitoring/","title":"MongoDB monitoring"},{"body":"MySQL Activate MySQL as storage, and set storage provider to mysql.\nNOTE: MySQL driver is NOT allowed in Apache official distribution and source codes. Please download the MySQL driver on your own. Copy the connection driver jar to oap-libs.\nstorage:selector:${SW_STORAGE:mysql}mysql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:3306/swtest?rewriteBatchedStatements=true\u0026amp;allowMultiQueries=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root@1234}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password, are found in application.yml. Only part of the settings is listed here. See the HikariCP connection pool document for full settings. To understand the function of the parameter rewriteBatchedStatements=true in MySQL, see the MySQL official document for more details.\nIn theory, all other databases that are compatible with MySQL protocol should be able to use this storage plugin, such as TiDB. Please compose the JDBC URL according to the database\u0026rsquo;s documentation.\n","excerpt":"MySQL Activate MySQL as storage, and set storage provider to mysql.\nNOTE: MySQL driver is NOT …","ref":"/docs/main/latest/en/setup/backend/storages/mysql/","title":"MySQL"},{"body":"MySQL Activate MySQL as storage, and set storage provider to mysql.\nNOTE: MySQL driver is NOT allowed in Apache official distribution and source codes. Please download the MySQL driver on your own. Copy the connection driver jar to oap-libs.\nstorage:selector:${SW_STORAGE:mysql}mysql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:3306/swtest?rewriteBatchedStatements=true\u0026amp;allowMultiQueries=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root@1234}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password, are found in application.yml. Only part of the settings is listed here. See the HikariCP connection pool document for full settings. To understand the function of the parameter rewriteBatchedStatements=true in MySQL, see the MySQL official document for more details.\nIn theory, all other databases that are compatible with MySQL protocol should be able to use this storage plugin, such as TiDB. Please compose the JDBC URL according to the database\u0026rsquo;s documentation.\n","excerpt":"MySQL Activate MySQL as storage, and set storage provider to mysql.\nNOTE: MySQL driver is NOT …","ref":"/docs/main/next/en/setup/backend/storages/mysql/","title":"MySQL"},{"body":"MySQL Activate MySQL as storage, and set storage provider to mysql.\nNOTE: MySQL driver is NOT allowed in Apache official distribution and source codes. Please download the MySQL driver on your own. Copy the connection driver jar to oap-libs.\nstorage:selector:${SW_STORAGE:mysql}mysql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:3306/swtest?rewriteBatchedStatements=true\u0026amp;allowMultiQueries=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root@1234}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password, are found in application.yml. Only part of the settings is listed here. See the HikariCP connection pool document for full settings. To understand the function of the parameter rewriteBatchedStatements=true in MySQL, see the MySQL official document for more details.\nIn theory, all other databases that are compatible with MySQL protocol should be able to use this storage plugin, such as TiDB. Please compose the JDBC URL according to the database\u0026rsquo;s documentation.\n","excerpt":"MySQL Activate MySQL as storage, and set storage provider to mysql.\nNOTE: MySQL driver is NOT …","ref":"/docs/main/v9.7.0/en/setup/backend/storages/mysql/","title":"MySQL"},{"body":"MySQL monitoring SkyWalking leverages prometheus/mysqld_exporter for collecting metrics data from MySQL. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  mysqld_exporter collect metrics data from MySQL. OpenTelemetry Collector fetches metrics from mysqld_exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via the OpenCensus gRPC Exporter or OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up mysqld_exporter. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  MySQL Monitoring MySQL monitoring provides monitoring of the status and resources of the MySQL server. MySQL server as a Service in OAP, and land on the Layer: MYSQL.\nMySQL Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     MySQL Uptime day meter_mysql_uptime The MySQL startup time mysqld_exporter   Max Connections  meter_mysql_max_connections The max number of connections. mysqld_exporter   Innodb Buffer Pool Size MB meter_mysql_innodb_buffer_pool_size The buffer pool size in Innodb engine mysqld_exporter   Thread Cache Size  meter_mysql_thread_cache_size The size of thread cache mysqld_exporter   Current QPS  meter_mysql_qps Queries Per Second mysqld_exporter   Current TPS  meter_mysql_tps Transactions Per Second mysqld_exporter   Commands Rate  meter_mysql_commands_insert_rate meter_mysql_commands_select_rate\nmeter_mysql_commands_delete_rate\nmeter_mysql_commands_update_rate The rate of total number of insert/select/delete/update executed by the current server mysqld_exporter   Threads  meter_mysql_threads_connected\nmeter_mysql_threads_created\nmeter_mysql_threads_cached\nmeter_mysql_threads_running The number of currently open connections(threads_connected)  The number of threads created(threads_created)  The number of threads in the thread cache(threads_cached)  The number of threads that are not sleeping(threads_running) mysqld_exporter   Connects  meter_mysql_connects_available\nmeter_mysql_connects_aborted The number of available connections(connects_available)The number of MySQL instance connection rejections(connects_aborted) mysqld_exporter   Connection Errors  meter_mysql_connection_errors_internal  meter_mysql_connection_errors_max_connections Errors due to exceeding the max_connections(connection_errors_max_connections) Error caused by internal system(connection_errors_internal) mysqld_exporter   Slow Queries Rate  meter_mysql_slow_queries_rate The rate of slow queries mysqld_exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/mysql.yaml. The MySQL dashboard panel configurations are found in /config/ui-initialized-templates/mysql.\n","excerpt":"MySQL monitoring SkyWalking leverages prometheus/mysqld_exporter for collecting metrics data from …","ref":"/docs/main/v9.2.0/en/setup/backend/backend-mysql-monitoring/","title":"MySQL monitoring"},{"body":"MySQL monitoring MySQL server performance from prometheus/mysqld_exporter SkyWalking leverages prometheus/mysqld_exporter for collecting metrics data. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  mysqld_exporter collect metrics data from MySQL. OpenTelemetry Collector fetches metrics from mysqld_exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via the OpenCensus gRPC Exporter or OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up mysqld_exporter. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  MySQL Monitoring MySQL monitoring provides monitoring of the status and resources of the MySQL server. MySQL cluster is cataloged as a Layer: MYSQL Service in OAP. Each MySQL server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     MySQL Uptime day meter_mysql_uptime The MySQL startup time mysqld_exporter   Max Connections  meter_mysql_max_connections The max number of connections. mysqld_exporter   Innodb Buffer Pool Size MB meter_mysql_innodb_buffer_pool_size The buffer pool size in Innodb engine mysqld_exporter   Thread Cache Size  meter_mysql_thread_cache_size The size of thread cache mysqld_exporter   Current QPS  meter_mysql_qps Queries Per Second mysqld_exporter   Current TPS  meter_mysql_tps Transactions Per Second mysqld_exporter   Commands Rate  meter_mysql_commands_insert_rate meter_mysql_commands_select_rate\nmeter_mysql_commands_delete_rate\nmeter_mysql_commands_update_rate The rate of total number of insert/select/delete/update executed by the current server mysqld_exporter   Threads  meter_mysql_threads_connected\nmeter_mysql_threads_created\nmeter_mysql_threads_cached\nmeter_mysql_threads_running The number of currently open connections(threads_connected)  The number of threads created(threads_created)  The number of threads in the thread cache(threads_cached)  The number of threads that are not sleeping(threads_running) mysqld_exporter   Connects  meter_mysql_connects_available\nmeter_mysql_connects_aborted The number of available connections(connects_available)The number of MySQL instance connection rejections(connects_aborted) mysqld_exporter   Connection Errors  meter_mysql_connection_errors_internal  meter_mysql_connection_errors_max_connections Errors due to exceeding the max_connections(connection_errors_max_connections) Error caused by internal system(connection_errors_internal) mysqld_exporter   Slow Queries Rate  meter_mysql_slow_queries_rate The rate of slow queries mysqld_exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/mysql.yaml. The MySQL dashboard panel configurations are found in /config/ui-initialized-templates/mysql.\nCollect sampled slow SQLs SkyWalking leverages fluentbit or other log agents for collecting slow SQL statements from MySQL.\nData flow  fluentbit agent collects slow sql logs from MySQL. fluentbit agent sends data to SkyWalking OAP Server using native meter APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Set up fluentbit. Config fluentbit Config MySQL to enable slow log.example.  Slow SQL Monitoring Slow SQL monitoring provides monitoring of the slow SQL statements of the MySQL server. MySQL server is cataloged as a Layer: MYSQL Service in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Slow Statements ms top_n_database_statement The latency and statement of MySQL slow SQLs fluentbit    Customizations You can customize your own metrics/expression/dashboard panel. The slowsql expression rules are found in /config/lal/mysql-slowsql.yaml The MySQL dashboard panel configurations are found in /config/ui-initialized-templates/mysql.\n","excerpt":"MySQL monitoring MySQL server performance from prometheus/mysqld_exporter SkyWalking leverages …","ref":"/docs/main/v9.3.0/en/setup/backend/backend-mysql-monitoring/","title":"MySQL monitoring"},{"body":"MySQL/MariaDB monitoring MySQL/MariaDB server performance from prometheus/mysqld_exporter SkyWalking leverages prometheus/mysqld_exporter for collecting metrics data. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  mysqld_exporter collect metrics data from MySQL/MariaDB. OpenTelemetry Collector fetches metrics from mysqld_exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up mysqld_exporter. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  MySQL/MariaDB Monitoring MySQL/MariaDB monitoring provides monitoring of the status and resources of the MySQL/MariaDB server. MySQL/MariaDB cluster is cataloged as a Layer: MYSQL Service in OAP. Each MySQL/MariaDB server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     MySQL Uptime day meter_mysql_uptime The MySQL startup time mysqld_exporter   Max Connections  meter_mysql_max_connections The max number of connections. mysqld_exporter   Innodb Buffer Pool Size MB meter_mysql_innodb_buffer_pool_size The buffer pool size in Innodb engine mysqld_exporter   Thread Cache Size  meter_mysql_thread_cache_size The size of thread cache mysqld_exporter   Current QPS  meter_mysql_qps Queries Per Second mysqld_exporter   Current TPS  meter_mysql_tps Transactions Per Second mysqld_exporter   Commands Rate  meter_mysql_commands_insert_rate meter_mysql_commands_select_rate\nmeter_mysql_commands_delete_rate\nmeter_mysql_commands_update_rate The rate of total number of insert/select/delete/update executed by the current server mysqld_exporter   Threads  meter_mysql_threads_connected\nmeter_mysql_threads_created\nmeter_mysql_threads_cached\nmeter_mysql_threads_running The number of currently open connections(threads_connected)  The number of threads created(threads_created)  The number of threads in the thread cache(threads_cached)  The number of threads that are not sleeping(threads_running) mysqld_exporter   Connects  meter_mysql_connects_available\nmeter_mysql_connects_aborted The number of available connections(connects_available)The number of MySQL instance connection rejections(connects_aborted) mysqld_exporter   Connection Errors  meter_mysql_connection_errors_internal  meter_mysql_connection_errors_max_connections Errors due to exceeding the max_connections(connection_errors_max_connections) Error caused by internal system(connection_errors_internal) mysqld_exporter   Slow Queries Rate  meter_mysql_slow_queries_rate The rate of slow queries mysqld_exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/mysql. The MySQL dashboard panel configurations are found in /config/ui-initialized-templates/mysql.\nCollect sampled slow SQLs SkyWalking leverages fluentbit or other log agents for collecting slow SQL statements from MySQL/MariaDB.\nData flow  fluentbit agent collects slow sql logs from MySQL/MariaDB. fluentbit agent sends data to SkyWalking OAP Server using native meter APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Set up fluentbit. Config fluentbit from here for MySQL or here for MariaDB. Enable slow log from here for MySQL or here for MariaDB.  Slow SQL Monitoring Slow SQL monitoring provides monitoring of the slow SQL statements of the MySQL/MariaDB server. MySQL/MariaDB server is cataloged as a Layer: MYSQL Service in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Slow Statements ms top_n_database_statement The latency and statement of MySQL/MariaDB slow SQLs fluentbit    Customizations You can customize your own metrics/expression/dashboard panel. The slowsql expression rules are found in /config/lal/mysql-slowsql.yaml The MySQL/MariaDB dashboard panel configurations are found in /config/ui-initialized-templates/mysql.\n","excerpt":"MySQL/MariaDB monitoring MySQL/MariaDB server performance from prometheus/mysqld_exporter SkyWalking …","ref":"/docs/main/latest/en/setup/backend/backend-mysql-monitoring/","title":"MySQL/MariaDB monitoring"},{"body":"MySQL/MariaDB monitoring MySQL/MariaDB server performance from prometheus/mysqld_exporter SkyWalking leverages prometheus/mysqld_exporter for collecting metrics data. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  mysqld_exporter collect metrics data from MySQL/MariaDB. OpenTelemetry Collector fetches metrics from mysqld_exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up mysqld_exporter. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  MySQL/MariaDB Monitoring MySQL/MariaDB monitoring provides monitoring of the status and resources of the MySQL/MariaDB server. MySQL/MariaDB cluster is cataloged as a Layer: MYSQL Service in OAP. Each MySQL/MariaDB server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     MySQL Uptime day meter_mysql_uptime The MySQL startup time mysqld_exporter   Max Connections  meter_mysql_max_connections The max number of connections. mysqld_exporter   Innodb Buffer Pool Size MB meter_mysql_innodb_buffer_pool_size The buffer pool size in Innodb engine mysqld_exporter   Thread Cache Size  meter_mysql_thread_cache_size The size of thread cache mysqld_exporter   Current QPS  meter_mysql_qps Queries Per Second mysqld_exporter   Current TPS  meter_mysql_tps Transactions Per Second mysqld_exporter   Commands Rate  meter_mysql_commands_insert_rate meter_mysql_commands_select_rate\nmeter_mysql_commands_delete_rate\nmeter_mysql_commands_update_rate The rate of total number of insert/select/delete/update executed by the current server mysqld_exporter   Threads  meter_mysql_threads_connected\nmeter_mysql_threads_created\nmeter_mysql_threads_cached\nmeter_mysql_threads_running The number of currently open connections(threads_connected)  The number of threads created(threads_created)  The number of threads in the thread cache(threads_cached)  The number of threads that are not sleeping(threads_running) mysqld_exporter   Connects  meter_mysql_max_connections\nmeter_mysql_status_thread_connected\nmeter_mysql_connects_aborted The number of available connections(connects_available)The number of MySQL instance connection rejections(connects_aborted) mysqld_exporter   Connection Errors  meter_mysql_connection_errors_internal  meter_mysql_connection_errors_max_connections Errors due to exceeding the max_connections(connection_errors_max_connections) Error caused by internal system(connection_errors_internal) mysqld_exporter   Slow Queries Rate  meter_mysql_slow_queries_rate The rate of slow queries mysqld_exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/mysql. The MySQL dashboard panel configurations are found in /config/ui-initialized-templates/mysql.\nCollect sampled slow SQLs SkyWalking leverages fluentbit or other log agents for collecting slow SQL statements from MySQL/MariaDB.\nData flow  fluentbit agent collects slow sql logs from MySQL/MariaDB. fluentbit agent sends data to SkyWalking OAP Server using native meter APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Set up fluentbit. Config fluentbit from here for MySQL or here for MariaDB. Enable slow log from here for MySQL or here for MariaDB.  Slow SQL Monitoring Slow SQL monitoring provides monitoring of the slow SQL statements of the MySQL/MariaDB server. MySQL/MariaDB server is cataloged as a Layer: MYSQL Service in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Slow Statements ms top_n_database_statement The latency and statement of MySQL/MariaDB slow SQLs fluentbit    Customizations You can customize your own metrics/expression/dashboard panel. The slowsql expression rules are found in /config/lal/mysql-slowsql.yaml The MySQL/MariaDB dashboard panel configurations are found in /config/ui-initialized-templates/mysql.\n","excerpt":"MySQL/MariaDB monitoring MySQL/MariaDB server performance from prometheus/mysqld_exporter SkyWalking …","ref":"/docs/main/next/en/setup/backend/backend-mysql-monitoring/","title":"MySQL/MariaDB monitoring"},{"body":"MySQL/MariaDB monitoring MySQL/MariaDB server performance from prometheus/mysqld_exporter SkyWalking leverages prometheus/mysqld_exporter for collecting metrics data. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  mysqld_exporter collect metrics data from MySQL/MariaDB. OpenTelemetry Collector fetches metrics from mysqld_exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via the OpenCensus gRPC Exporter or OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up mysqld_exporter. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  MySQL/MariaDB Monitoring MySQL/MariaDB monitoring provides monitoring of the status and resources of the MySQL/MariaDB server. MySQL/MariaDB cluster is cataloged as a Layer: MYSQL Service in OAP. Each MySQL/MariaDB server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     MySQL Uptime day meter_mysql_uptime The MySQL startup time mysqld_exporter   Max Connections  meter_mysql_max_connections The max number of connections. mysqld_exporter   Innodb Buffer Pool Size MB meter_mysql_innodb_buffer_pool_size The buffer pool size in Innodb engine mysqld_exporter   Thread Cache Size  meter_mysql_thread_cache_size The size of thread cache mysqld_exporter   Current QPS  meter_mysql_qps Queries Per Second mysqld_exporter   Current TPS  meter_mysql_tps Transactions Per Second mysqld_exporter   Commands Rate  meter_mysql_commands_insert_rate meter_mysql_commands_select_rate\nmeter_mysql_commands_delete_rate\nmeter_mysql_commands_update_rate The rate of total number of insert/select/delete/update executed by the current server mysqld_exporter   Threads  meter_mysql_threads_connected\nmeter_mysql_threads_created\nmeter_mysql_threads_cached\nmeter_mysql_threads_running The number of currently open connections(threads_connected)  The number of threads created(threads_created)  The number of threads in the thread cache(threads_cached)  The number of threads that are not sleeping(threads_running) mysqld_exporter   Connects  meter_mysql_connects_available\nmeter_mysql_connects_aborted The number of available connections(connects_available)The number of MySQL instance connection rejections(connects_aborted) mysqld_exporter   Connection Errors  meter_mysql_connection_errors_internal  meter_mysql_connection_errors_max_connections Errors due to exceeding the max_connections(connection_errors_max_connections) Error caused by internal system(connection_errors_internal) mysqld_exporter   Slow Queries Rate  meter_mysql_slow_queries_rate The rate of slow queries mysqld_exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/mysql. The MySQL dashboard panel configurations are found in /config/ui-initialized-templates/mysql.\nCollect sampled slow SQLs SkyWalking leverages fluentbit or other log agents for collecting slow SQL statements from MySQL/MariaDB.\nData flow  fluentbit agent collects slow sql logs from MySQL/MariaDB. fluentbit agent sends data to SkyWalking OAP Server using native meter APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Set up fluentbit. Config fluentbit from here for MySQL or here for MariaDB. Enable slow log from here for MySQL or here for MariaDB.  Slow SQL Monitoring Slow SQL monitoring provides monitoring of the slow SQL statements of the MySQL/MariaDB server. MySQL/MariaDB server is cataloged as a Layer: MYSQL Service in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Slow Statements ms top_n_database_statement The latency and statement of MySQL/MariaDB slow SQLs fluentbit    Customizations You can customize your own metrics/expression/dashboard panel. The slowsql expression rules are found in /config/lal/mysql-slowsql.yaml The MySQL/MariaDB dashboard panel configurations are found in /config/ui-initialized-templates/mysql.\n","excerpt":"MySQL/MariaDB monitoring MySQL/MariaDB server performance from prometheus/mysqld_exporter SkyWalking …","ref":"/docs/main/v9.4.0/en/setup/backend/backend-mysql-monitoring/","title":"MySQL/MariaDB monitoring"},{"body":"MySQL/MariaDB monitoring MySQL/MariaDB server performance from prometheus/mysqld_exporter SkyWalking leverages prometheus/mysqld_exporter for collecting metrics data. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  mysqld_exporter collect metrics data from MySQL/MariaDB. OpenTelemetry Collector fetches metrics from mysqld_exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up mysqld_exporter. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  MySQL/MariaDB Monitoring MySQL/MariaDB monitoring provides monitoring of the status and resources of the MySQL/MariaDB server. MySQL/MariaDB cluster is cataloged as a Layer: MYSQL Service in OAP. Each MySQL/MariaDB server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     MySQL Uptime day meter_mysql_uptime The MySQL startup time mysqld_exporter   Max Connections  meter_mysql_max_connections The max number of connections. mysqld_exporter   Innodb Buffer Pool Size MB meter_mysql_innodb_buffer_pool_size The buffer pool size in Innodb engine mysqld_exporter   Thread Cache Size  meter_mysql_thread_cache_size The size of thread cache mysqld_exporter   Current QPS  meter_mysql_qps Queries Per Second mysqld_exporter   Current TPS  meter_mysql_tps Transactions Per Second mysqld_exporter   Commands Rate  meter_mysql_commands_insert_rate meter_mysql_commands_select_rate\nmeter_mysql_commands_delete_rate\nmeter_mysql_commands_update_rate The rate of total number of insert/select/delete/update executed by the current server mysqld_exporter   Threads  meter_mysql_threads_connected\nmeter_mysql_threads_created\nmeter_mysql_threads_cached\nmeter_mysql_threads_running The number of currently open connections(threads_connected)  The number of threads created(threads_created)  The number of threads in the thread cache(threads_cached)  The number of threads that are not sleeping(threads_running) mysqld_exporter   Connects  meter_mysql_connects_available\nmeter_mysql_connects_aborted The number of available connections(connects_available)The number of MySQL instance connection rejections(connects_aborted) mysqld_exporter   Connection Errors  meter_mysql_connection_errors_internal  meter_mysql_connection_errors_max_connections Errors due to exceeding the max_connections(connection_errors_max_connections) Error caused by internal system(connection_errors_internal) mysqld_exporter   Slow Queries Rate  meter_mysql_slow_queries_rate The rate of slow queries mysqld_exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/mysql. The MySQL dashboard panel configurations are found in /config/ui-initialized-templates/mysql.\nCollect sampled slow SQLs SkyWalking leverages fluentbit or other log agents for collecting slow SQL statements from MySQL/MariaDB.\nData flow  fluentbit agent collects slow sql logs from MySQL/MariaDB. fluentbit agent sends data to SkyWalking OAP Server using native meter APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Set up fluentbit. Config fluentbit from here for MySQL or here for MariaDB. Enable slow log from here for MySQL or here for MariaDB.  Slow SQL Monitoring Slow SQL monitoring provides monitoring of the slow SQL statements of the MySQL/MariaDB server. MySQL/MariaDB server is cataloged as a Layer: MYSQL Service in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Slow Statements ms top_n_database_statement The latency and statement of MySQL/MariaDB slow SQLs fluentbit    Customizations You can customize your own metrics/expression/dashboard panel. The slowsql expression rules are found in /config/lal/mysql-slowsql.yaml The MySQL/MariaDB dashboard panel configurations are found in /config/ui-initialized-templates/mysql.\n","excerpt":"MySQL/MariaDB monitoring MySQL/MariaDB server performance from prometheus/mysqld_exporter SkyWalking …","ref":"/docs/main/v9.5.0/en/setup/backend/backend-mysql-monitoring/","title":"MySQL/MariaDB monitoring"},{"body":"MySQL/MariaDB monitoring MySQL/MariaDB server performance from prometheus/mysqld_exporter SkyWalking leverages prometheus/mysqld_exporter for collecting metrics data. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  mysqld_exporter collect metrics data from MySQL/MariaDB. OpenTelemetry Collector fetches metrics from mysqld_exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up mysqld_exporter. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  MySQL/MariaDB Monitoring MySQL/MariaDB monitoring provides monitoring of the status and resources of the MySQL/MariaDB server. MySQL/MariaDB cluster is cataloged as a Layer: MYSQL Service in OAP. Each MySQL/MariaDB server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     MySQL Uptime day meter_mysql_uptime The MySQL startup time mysqld_exporter   Max Connections  meter_mysql_max_connections The max number of connections. mysqld_exporter   Innodb Buffer Pool Size MB meter_mysql_innodb_buffer_pool_size The buffer pool size in Innodb engine mysqld_exporter   Thread Cache Size  meter_mysql_thread_cache_size The size of thread cache mysqld_exporter   Current QPS  meter_mysql_qps Queries Per Second mysqld_exporter   Current TPS  meter_mysql_tps Transactions Per Second mysqld_exporter   Commands Rate  meter_mysql_commands_insert_rate meter_mysql_commands_select_rate\nmeter_mysql_commands_delete_rate\nmeter_mysql_commands_update_rate The rate of total number of insert/select/delete/update executed by the current server mysqld_exporter   Threads  meter_mysql_threads_connected\nmeter_mysql_threads_created\nmeter_mysql_threads_cached\nmeter_mysql_threads_running The number of currently open connections(threads_connected)  The number of threads created(threads_created)  The number of threads in the thread cache(threads_cached)  The number of threads that are not sleeping(threads_running) mysqld_exporter   Connects  meter_mysql_connects_available\nmeter_mysql_connects_aborted The number of available connections(connects_available)The number of MySQL instance connection rejections(connects_aborted) mysqld_exporter   Connection Errors  meter_mysql_connection_errors_internal  meter_mysql_connection_errors_max_connections Errors due to exceeding the max_connections(connection_errors_max_connections) Error caused by internal system(connection_errors_internal) mysqld_exporter   Slow Queries Rate  meter_mysql_slow_queries_rate The rate of slow queries mysqld_exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/mysql. The MySQL dashboard panel configurations are found in /config/ui-initialized-templates/mysql.\nCollect sampled slow SQLs SkyWalking leverages fluentbit or other log agents for collecting slow SQL statements from MySQL/MariaDB.\nData flow  fluentbit agent collects slow sql logs from MySQL/MariaDB. fluentbit agent sends data to SkyWalking OAP Server using native meter APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Set up fluentbit. Config fluentbit from here for MySQL or here for MariaDB. Enable slow log from here for MySQL or here for MariaDB.  Slow SQL Monitoring Slow SQL monitoring provides monitoring of the slow SQL statements of the MySQL/MariaDB server. MySQL/MariaDB server is cataloged as a Layer: MYSQL Service in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Slow Statements ms top_n_database_statement The latency and statement of MySQL/MariaDB slow SQLs fluentbit    Customizations You can customize your own metrics/expression/dashboard panel. The slowsql expression rules are found in /config/lal/mysql-slowsql.yaml The MySQL/MariaDB dashboard panel configurations are found in /config/ui-initialized-templates/mysql.\n","excerpt":"MySQL/MariaDB monitoring MySQL/MariaDB server performance from prometheus/mysqld_exporter SkyWalking …","ref":"/docs/main/v9.6.0/en/setup/backend/backend-mysql-monitoring/","title":"MySQL/MariaDB monitoring"},{"body":"MySQL/MariaDB monitoring MySQL/MariaDB server performance from prometheus/mysqld_exporter SkyWalking leverages prometheus/mysqld_exporter for collecting metrics data. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  mysqld_exporter collect metrics data from MySQL/MariaDB. OpenTelemetry Collector fetches metrics from mysqld_exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up mysqld_exporter. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  MySQL/MariaDB Monitoring MySQL/MariaDB monitoring provides monitoring of the status and resources of the MySQL/MariaDB server. MySQL/MariaDB cluster is cataloged as a Layer: MYSQL Service in OAP. Each MySQL/MariaDB server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     MySQL Uptime day meter_mysql_uptime The MySQL startup time mysqld_exporter   Max Connections  meter_mysql_max_connections The max number of connections. mysqld_exporter   Innodb Buffer Pool Size MB meter_mysql_innodb_buffer_pool_size The buffer pool size in Innodb engine mysqld_exporter   Thread Cache Size  meter_mysql_thread_cache_size The size of thread cache mysqld_exporter   Current QPS  meter_mysql_qps Queries Per Second mysqld_exporter   Current TPS  meter_mysql_tps Transactions Per Second mysqld_exporter   Commands Rate  meter_mysql_commands_insert_rate meter_mysql_commands_select_rate\nmeter_mysql_commands_delete_rate\nmeter_mysql_commands_update_rate The rate of total number of insert/select/delete/update executed by the current server mysqld_exporter   Threads  meter_mysql_threads_connected\nmeter_mysql_threads_created\nmeter_mysql_threads_cached\nmeter_mysql_threads_running The number of currently open connections(threads_connected)  The number of threads created(threads_created)  The number of threads in the thread cache(threads_cached)  The number of threads that are not sleeping(threads_running) mysqld_exporter   Connects  meter_mysql_connects_available\nmeter_mysql_connects_aborted The number of available connections(connects_available)The number of MySQL instance connection rejections(connects_aborted) mysqld_exporter   Connection Errors  meter_mysql_connection_errors_internal  meter_mysql_connection_errors_max_connections Errors due to exceeding the max_connections(connection_errors_max_connections) Error caused by internal system(connection_errors_internal) mysqld_exporter   Slow Queries Rate  meter_mysql_slow_queries_rate The rate of slow queries mysqld_exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/mysql. The MySQL dashboard panel configurations are found in /config/ui-initialized-templates/mysql.\nCollect sampled slow SQLs SkyWalking leverages fluentbit or other log agents for collecting slow SQL statements from MySQL/MariaDB.\nData flow  fluentbit agent collects slow sql logs from MySQL/MariaDB. fluentbit agent sends data to SkyWalking OAP Server using native meter APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Set up fluentbit. Config fluentbit from here for MySQL or here for MariaDB. Enable slow log from here for MySQL or here for MariaDB.  Slow SQL Monitoring Slow SQL monitoring provides monitoring of the slow SQL statements of the MySQL/MariaDB server. MySQL/MariaDB server is cataloged as a Layer: MYSQL Service in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Slow Statements ms top_n_database_statement The latency and statement of MySQL/MariaDB slow SQLs fluentbit    Customizations You can customize your own metrics/expression/dashboard panel. The slowsql expression rules are found in /config/lal/mysql-slowsql.yaml The MySQL/MariaDB dashboard panel configurations are found in /config/ui-initialized-templates/mysql.\n","excerpt":"MySQL/MariaDB monitoring MySQL/MariaDB server performance from prometheus/mysqld_exporter SkyWalking …","ref":"/docs/main/v9.7.0/en/setup/backend/backend-mysql-monitoring/","title":"MySQL/MariaDB monitoring"},{"body":"New ElasticSearch storage option explanation in 9.2.0 Since v9.2.0, SkyWalking OAP provides 2 storage options for all data, including metadata, metrics, traces, logs, events, profiling data, etc.. OAP exposes a system environment variable (SW_STORAGE_ES_LOGIC_SHARDING) to control the running mode.\nNo-Sharding Mode (OAP default setting, SW_STORAGE_ES_LOGIC_SHARDING = false) This is the new mode introduced in 9.2.0. It prefers to keep data with similar properties in one index template, such as all metrics and metadata.\n OAP merges all metrics/meter and records(without super datasets, such as segments) indices into one physical index template metrics-all and records-all. The logic index name would be present in columns metric_table or record_table. If the logic column name has an alias (configured through @ElasticSearch.Column()), the alias would be the real physical column name.  The super dataset would not be affected by this, such as traces and logs.\nSharding Mode (SW_STORAGE_ES_LOGIC_SHARDING = true )  OAP shard metrics/meter indices into multi-physical indices as in the previous versions(one index template per metric/meter aggregation function). Records and metrics without configuring aggregation functions with @MetricsFunction or @MeterFunction annotation would not be merged. They would be kept in a separate index template. The shard template name would be metrics-aggregation function name or meter-aggregation function name such as metrics-count, and the logic index name would be present in column metric_table. The OAP would not use the column alias, the logic column name would be the real physical column name.   Notice: Users still could choose to adjust ElasticSearch\u0026rsquo;s shard number(SW_STORAGE_ES_INDEX_SHARDS_NUMBER) to scale out in either mode.\n","excerpt":"New ElasticSearch storage option explanation in 9.2.0 Since v9.2.0, SkyWalking OAP provides 2 …","ref":"/docs/main/latest/en/faq/new-elasticsearch-storage-option-explanation-in-9.2.0/","title":"New ElasticSearch storage option explanation in 9.2.0"},{"body":"New ElasticSearch storage option explanation in 9.2.0 Since v9.2.0, SkyWalking OAP provides 2 storage options for all data, including metadata, metrics, traces, logs, events, profiling data, etc.. OAP exposes a system environment variable (SW_STORAGE_ES_LOGIC_SHARDING) to control the running mode.\nNo-Sharding Mode (OAP default setting, SW_STORAGE_ES_LOGIC_SHARDING = false) This is the new mode introduced in 9.2.0. It prefers to keep data with similar properties in one index template, such as all metrics and metadata.\n OAP merges all metrics/meter and records(without super datasets, such as segments) indices into one physical index template metrics-all and records-all. The logic index name would be present in columns metric_table or record_table. If the logic column name has an alias (configured through @ElasticSearch.Column()), the alias would be the real physical column name.  The super dataset would not be affected by this, such as traces and logs.\nSharding Mode (SW_STORAGE_ES_LOGIC_SHARDING = true )  OAP shard metrics/meter indices into multi-physical indices as in the previous versions(one index template per metric/meter aggregation function). Records and metrics without configuring aggregation functions with @MetricsFunction or @MeterFunction annotation would not be merged. They would be kept in a separate index template. The shard template name would be metrics-aggregation function name or meter-aggregation function name such as metrics-count, and the logic index name would be present in column metric_table. The OAP would not use the column alias, the logic column name would be the real physical column name.   Notice: Users still could choose to adjust ElasticSearch\u0026rsquo;s shard number(SW_STORAGE_ES_INDEX_SHARDS_NUMBER) to scale out in either mode.\n","excerpt":"New ElasticSearch storage option explanation in 9.2.0 Since v9.2.0, SkyWalking OAP provides 2 …","ref":"/docs/main/next/en/faq/new-elasticsearch-storage-option-explanation-in-9.2.0/","title":"New ElasticSearch storage option explanation in 9.2.0"},{"body":"New ElasticSearch storage option explanation in 9.2.0 Since v9.2.0, SkyWalking OAP provides 2 storage options for all data, including metadata, metrics, traces, logs, events, profiling data, etc.. OAP exposes a system environment variable (SW_STORAGE_ES_LOGIC_SHARDING) to control the running mode.\nNo-Sharding Mode (OAP default setting, SW_STORAGE_ES_LOGIC_SHARDING = false) This is the new mode introduced in 9.2.0. It prefers to keep data with similar properties in one index template, such as all metrics and metadata.\n OAP merges all metrics/meter and records(without super datasets, such as segments) indices into one physical index template metrics-all and records-all. The logic index name would be present in columns metric_table or record_table. If the logic column name has an alias (configured through @ElasticSearch.Column()), the alias would be the real physical column name.  The super dataset would not be affected by this, such as traces and logs.\nSharding Mode (SW_STORAGE_ES_LOGIC_SHARDING = true )  OAP shard metrics/meter indices into multi-physical indices as in the previous versions(one index template per metric/meter aggregation function). Records and metrics without configuring aggregation functions with @MetricsFunction or @MeterFunction annotation would not be merged. They would be kept in a separate index template. The shard template name would be metrics-aggregation function name or meter-aggregation function name such as metrics-count, and the logic index name would be present in column metric_table. The OAP would not use the column alias, the logic column name would be the real physical column name.   Notice: Users still could choose to adjust ElasticSearch\u0026rsquo;s shard number(SW_STORAGE_ES_INDEX_SHARDS_NUMBER) to scale out in either mode.\n","excerpt":"New ElasticSearch storage option explanation in 9.2.0 Since v9.2.0, SkyWalking OAP provides 2 …","ref":"/docs/main/v9.2.0/en/faq/new-elasticsearch-storage-option-explanation-in-9.2.0/","title":"New ElasticSearch storage option explanation in 9.2.0"},{"body":"New ElasticSearch storage option explanation in 9.2.0 Since v9.2.0, SkyWalking OAP provides 2 storage options for all data, including metadata, metrics, traces, logs, events, profiling data, etc.. OAP exposes a system environment variable (SW_STORAGE_ES_LOGIC_SHARDING) to control the running mode.\nNo-Sharding Mode (OAP default setting, SW_STORAGE_ES_LOGIC_SHARDING = false) This is the new mode introduced in 9.2.0. It prefers to keep data with similar properties in one index template, such as all metrics and metadata.\n OAP merges all metrics/meter and records(without super datasets, such as segments) indices into one physical index template metrics-all and records-all. The logic index name would be present in columns metric_table or record_table. If the logic column name has an alias (configured through @ElasticSearch.Column()), the alias would be the real physical column name.  The super dataset would not be affected by this, such as traces and logs.\nSharding Mode (SW_STORAGE_ES_LOGIC_SHARDING = true )  OAP shard metrics/meter indices into multi-physical indices as in the previous versions(one index template per metric/meter aggregation function). Records and metrics without configuring aggregation functions with @MetricsFunction or @MeterFunction annotation would not be merged. They would be kept in a separate index template. The shard template name would be metrics-aggregation function name or meter-aggregation function name such as metrics-count, and the logic index name would be present in column metric_table. The OAP would not use the column alias, the logic column name would be the real physical column name.   Notice: Users still could choose to adjust ElasticSearch\u0026rsquo;s shard number(SW_STORAGE_ES_INDEX_SHARDS_NUMBER) to scale out in either mode.\n","excerpt":"New ElasticSearch storage option explanation in 9.2.0 Since v9.2.0, SkyWalking OAP provides 2 …","ref":"/docs/main/v9.3.0/en/faq/new-elasticsearch-storage-option-explanation-in-9.2.0/","title":"New ElasticSearch storage option explanation in 9.2.0"},{"body":"New ElasticSearch storage option explanation in 9.2.0 Since v9.2.0, SkyWalking OAP provides 2 storage options for all data, including metadata, metrics, traces, logs, events, profiling data, etc.. OAP exposes a system environment variable (SW_STORAGE_ES_LOGIC_SHARDING) to control the running mode.\nNo-Sharding Mode (OAP default setting, SW_STORAGE_ES_LOGIC_SHARDING = false) This is the new mode introduced in 9.2.0. It prefers to keep data with similar properties in one index template, such as all metrics and metadata.\n OAP merges all metrics/meter and records(without super datasets, such as segments) indices into one physical index template metrics-all and records-all. The logic index name would be present in columns metric_table or record_table. If the logic column name has an alias (configured through @ElasticSearch.Column()), the alias would be the real physical column name.  The super dataset would not be affected by this, such as traces and logs.\nSharding Mode (SW_STORAGE_ES_LOGIC_SHARDING = true )  OAP shard metrics/meter indices into multi-physical indices as in the previous versions(one index template per metric/meter aggregation function). Records and metrics without configuring aggregation functions with @MetricsFunction or @MeterFunction annotation would not be merged. They would be kept in a separate index template. The shard template name would be metrics-aggregation function name or meter-aggregation function name such as metrics-count, and the logic index name would be present in column metric_table. The OAP would not use the column alias, the logic column name would be the real physical column name.   Notice: Users still could choose to adjust ElasticSearch\u0026rsquo;s shard number(SW_STORAGE_ES_INDEX_SHARDS_NUMBER) to scale out in either mode.\n","excerpt":"New ElasticSearch storage option explanation in 9.2.0 Since v9.2.0, SkyWalking OAP provides 2 …","ref":"/docs/main/v9.4.0/en/faq/new-elasticsearch-storage-option-explanation-in-9.2.0/","title":"New ElasticSearch storage option explanation in 9.2.0"},{"body":"New ElasticSearch storage option explanation in 9.2.0 Since v9.2.0, SkyWalking OAP provides 2 storage options for all data, including metadata, metrics, traces, logs, events, profiling data, etc.. OAP exposes a system environment variable (SW_STORAGE_ES_LOGIC_SHARDING) to control the running mode.\nNo-Sharding Mode (OAP default setting, SW_STORAGE_ES_LOGIC_SHARDING = false) This is the new mode introduced in 9.2.0. It prefers to keep data with similar properties in one index template, such as all metrics and metadata.\n OAP merges all metrics/meter and records(without super datasets, such as segments) indices into one physical index template metrics-all and records-all. The logic index name would be present in columns metric_table or record_table. If the logic column name has an alias (configured through @ElasticSearch.Column()), the alias would be the real physical column name.  The super dataset would not be affected by this, such as traces and logs.\nSharding Mode (SW_STORAGE_ES_LOGIC_SHARDING = true )  OAP shard metrics/meter indices into multi-physical indices as in the previous versions(one index template per metric/meter aggregation function). Records and metrics without configuring aggregation functions with @MetricsFunction or @MeterFunction annotation would not be merged. They would be kept in a separate index template. The shard template name would be metrics-aggregation function name or meter-aggregation function name such as metrics-count, and the logic index name would be present in column metric_table. The OAP would not use the column alias, the logic column name would be the real physical column name.   Notice: Users still could choose to adjust ElasticSearch\u0026rsquo;s shard number(SW_STORAGE_ES_INDEX_SHARDS_NUMBER) to scale out in either mode.\n","excerpt":"New ElasticSearch storage option explanation in 9.2.0 Since v9.2.0, SkyWalking OAP provides 2 …","ref":"/docs/main/v9.5.0/en/faq/new-elasticsearch-storage-option-explanation-in-9.2.0/","title":"New ElasticSearch storage option explanation in 9.2.0"},{"body":"New ElasticSearch storage option explanation in 9.2.0 Since v9.2.0, SkyWalking OAP provides 2 storage options for all data, including metadata, metrics, traces, logs, events, profiling data, etc.. OAP exposes a system environment variable (SW_STORAGE_ES_LOGIC_SHARDING) to control the running mode.\nNo-Sharding Mode (OAP default setting, SW_STORAGE_ES_LOGIC_SHARDING = false) This is the new mode introduced in 9.2.0. It prefers to keep data with similar properties in one index template, such as all metrics and metadata.\n OAP merges all metrics/meter and records(without super datasets, such as segments) indices into one physical index template metrics-all and records-all. The logic index name would be present in columns metric_table or record_table. If the logic column name has an alias (configured through @ElasticSearch.Column()), the alias would be the real physical column name.  The super dataset would not be affected by this, such as traces and logs.\nSharding Mode (SW_STORAGE_ES_LOGIC_SHARDING = true )  OAP shard metrics/meter indices into multi-physical indices as in the previous versions(one index template per metric/meter aggregation function). Records and metrics without configuring aggregation functions with @MetricsFunction or @MeterFunction annotation would not be merged. They would be kept in a separate index template. The shard template name would be metrics-aggregation function name or meter-aggregation function name such as metrics-count, and the logic index name would be present in column metric_table. The OAP would not use the column alias, the logic column name would be the real physical column name.   Notice: Users still could choose to adjust ElasticSearch\u0026rsquo;s shard number(SW_STORAGE_ES_INDEX_SHARDS_NUMBER) to scale out in either mode.\n","excerpt":"New ElasticSearch storage option explanation in 9.2.0 Since v9.2.0, SkyWalking OAP provides 2 …","ref":"/docs/main/v9.6.0/en/faq/new-elasticsearch-storage-option-explanation-in-9.2.0/","title":"New ElasticSearch storage option explanation in 9.2.0"},{"body":"New ElasticSearch storage option explanation in 9.2.0 Since v9.2.0, SkyWalking OAP provides 2 storage options for all data, including metadata, metrics, traces, logs, events, profiling data, etc.. OAP exposes a system environment variable (SW_STORAGE_ES_LOGIC_SHARDING) to control the running mode.\nNo-Sharding Mode (OAP default setting, SW_STORAGE_ES_LOGIC_SHARDING = false) This is the new mode introduced in 9.2.0. It prefers to keep data with similar properties in one index template, such as all metrics and metadata.\n OAP merges all metrics/meter and records(without super datasets, such as segments) indices into one physical index template metrics-all and records-all. The logic index name would be present in columns metric_table or record_table. If the logic column name has an alias (configured through @ElasticSearch.Column()), the alias would be the real physical column name.  The super dataset would not be affected by this, such as traces and logs.\nSharding Mode (SW_STORAGE_ES_LOGIC_SHARDING = true )  OAP shard metrics/meter indices into multi-physical indices as in the previous versions(one index template per metric/meter aggregation function). Records and metrics without configuring aggregation functions with @MetricsFunction or @MeterFunction annotation would not be merged. They would be kept in a separate index template. The shard template name would be metrics-aggregation function name or meter-aggregation function name such as metrics-count, and the logic index name would be present in column metric_table. The OAP would not use the column alias, the logic column name would be the real physical column name.   Notice: Users still could choose to adjust ElasticSearch\u0026rsquo;s shard number(SW_STORAGE_ES_INDEX_SHARDS_NUMBER) to scale out in either mode.\n","excerpt":"New ElasticSearch storage option explanation in 9.2.0 Since v9.2.0, SkyWalking OAP provides 2 …","ref":"/docs/main/v9.7.0/en/faq/new-elasticsearch-storage-option-explanation-in-9.2.0/","title":"New ElasticSearch storage option explanation in 9.2.0"},{"body":"Nginx monitoring Nginx performance from nginx-lua-prometheus The nginx-lua-prometheus is a lua library that can be used with Nginx to collect metrics and expose them on a separate web page. To use this library, you will need Nginx with lua-nginx-module or directly OpenResty.\nSkyWalking leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  nginx-lua-prometheus collects metrics from Nginx and expose them to an endpoint. OpenTelemetry Collector fetches metrics from the endpoint expose above via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Collect Nginx metrics and expose the following four metrics by nginx-lua-prometheus. For details on metrics definition, refer to here.   histogram: nginx_http_latency gauge: nginx_http_connections counter: nginx_http_size_bytes counter: nginx_http_requests_total  Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  Nginx Monitoring SkyWalking observes the status, payload, and latency of the Nginx server, which is cataloged as a LAYER: Nginx Service in the OAP and instances would be recognized as LAYER: Nginx instance.\nAbout LAYER: Nginx endpoint, it depends on how precision you want to monitor the nginx. We do not recommend expose every request path metrics, because it will cause explosion of metrics endpoint data.\nYou can collect host metrics:\nhttp { log_by_lua_block { metric_bytes:inc(tonumber(ngx.var.request_length), {\u0026quot;request\u0026quot;, ngx.var.host}) metric_bytes:inc(tonumber(ngx.var.bytes_send), {\u0026quot;response\u0026quot;, ngx.var.host}) metric_requests:inc(1, {ngx.var.status, ngx.var.host}) metric_latency:observe(tonumber(ngx.var.request_time), {ngx.var.host}) } } or grouped urls and upstream metrics:\nupstream backend { server ip:port; } server { location /test { default_type application/json; return 200 '{\u0026quot;code\u0026quot;: 200, \u0026quot;message\u0026quot;: \u0026quot;success\u0026quot;}'; log_by_lua_block { metric_bytes:inc(tonumber(ngx.var.request_length), {\u0026quot;request\u0026quot;, \u0026quot;/test/**\u0026quot;}) metric_bytes:inc(tonumber(ngx.var.bytes_send), {\u0026quot;response\u0026quot;, \u0026quot;/test/**\u0026quot;}) metric_requests:inc(1, {ngx.var.status, \u0026quot;/test/**\u0026quot;}) metric_latency:observe(tonumber(ngx.var.request_time), {\u0026quot;/test/**\u0026quot;}) } } location /test_upstream { proxy_pass http://backend; log_by_lua_block { metric_bytes:inc(tonumber(ngx.var.request_length), {\u0026quot;request\u0026quot;, \u0026quot;upstream/backend\u0026quot;}) metric_bytes:inc(tonumber(ngx.var.bytes_send), {\u0026quot;response\u0026quot;, \u0026quot;upstream/backend\u0026quot;}) metric_requests:inc(1, {ngx.var.status, \u0026quot;upstream/backend\u0026quot;}) metric_latency:observe(tonumber(ngx.var.request_time), {\u0026quot;upstream/backend\u0026quot;}) } } } Nginx Service Supported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     HTTP Request Trend  meter_nginx_service_http_requests Service The increment rate of HTTP requests nginx-lua-prometheus   HTTP Latency ms meter_nginx_service_http_latency Service The increment rate of the latency of HTTP requests nginx-lua-prometheus   HTTP Bandwidth KB meter_nginx_service_bandwidth Service The increment rate of the bandwidth of HTTP requests nginx-lua-prometheus   HTTP Connections  meter_nginx_service_http_connections Service The avg number of the connections nginx-lua-prometheus   HTTP Status Trend  meter_nginx_service_http_status Service The increment rate of the status of HTTP requests nginx-lua-prometheus   HTTP Status 4xx Percent % meter_nginx_service_http_4xx_requests_increment / meter_nginx_service_http_requests_increment Service The percentage of 4xx status of HTTP requests nginx-lua-prometheus   HTTP Status 5xx Percent % meter_nginx_service_http_5xx_requests_increment / meter_nginx_service_http_requests_increment Service The percentage of 4xx status of HTTP requests nginx-lua-prometheus    Nginx Instance Supported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     HTTP Request Trend  meter_nginx_instance_http_requests Instance The increment rate of HTTP requests nginx-lua-prometheus   HTTP Latency ms meter_nginx_instance_http_latency Instance The increment rate of the latency of HTTP requests nginx-lua-prometheus   HTTP Bandwidth KB meter_nginx_instance_bandwidth Instance The increment rate of the bandwidth of HTTP requests nginx-lua-prometheus   HTTP Connections  meter_nginx_instance_http_connections Instance The avg number of the connections nginx-lua-prometheus   HTTP Status Trend  meter_nginx_instance_http_status Instance The increment rate of the status of HTTP requests nginx-lua-prometheus   HTTP Status 4xx Percent % meter_nginx_instance_http_4xx_requests_increment / meter_nginx_instance_http_requests_increment Instance The percentage of 4xx status of HTTP requests nginx-lua-prometheus   HTTP Status 5xx Percent % meter_nginx_instance_http_5xx_requests_increment / meter_nginx_instance_http_requests_increment Instance The percentage of 4xx status of HTTP requests nginx-lua-prometheus    Nginx Endpoint Supported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     HTTP Request Trend  meter_nginx_endpoint_http_requests Endpoint The increment rate of HTTP requests nginx-lua-prometheus   HTTP Latency ms meter_nginx_endpoint_http_latency Endpoint The increment rate of the latency of HTTP requests nginx-lua-prometheus   HTTP Bandwidth KB meter_nginx_endpoint_bandwidth Endpoint The increment rate of the bandwidth of HTTP requests nginx-lua-prometheus   HTTP Status Trend  meter_nginx_endpoint_http_status Endpoint The increment rate of the status of HTTP requests nginx-lua-prometheus   HTTP Status 4xx Percent % meter_nginx_endpoint_http_4xx_requests_increment / meter_nginx_endpoint_http_requests_increment Endpoint The percentage of 4xx status of HTTP requests nginx-lua-prometheus   HTTP Status 5xx Percent % meter_nginx_endpoint_http_5xx_requests_increment / meter_nginx_endpoint_http_requests_increment Endpoint The percentage of 4xx status of HTTP requests nginx-lua-prometheus    Customizations You can customize your own metrics/expression/dashboard panel.\nThe metrics definition and expression rules are found in /config/otel-rules/nginx-service.yaml, /config/otel-rules/nginx-instance.yaml, /config/otel-rules/nginx-endpoint.yaml.\nThe Nginx dashboard panel configurations are found in /config/ui-initialized-templates/nginx.\nCollect nginx access and error log SkyWalking leverages fluentbit or other log agents for collecting access log and error log of Nginx.\nData flow  fluentbit agent collects access log and error log from Nginx. fluentbit agent sends data to SkyWalking OAP Server using native meter APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Install fluentbit. Config fluent bit with fluent-bit.conf, refer to here.  Error Log Monitoring Error Log monitoring provides monitoring of the error.log of the Nginx server.\nSupported Metrics    Monitoring Panel Metric Name Catalog Description Data Source     Service Error Log Count meter_nginx_service_error_log_count Service The count of log level of nginx error.log fluent bit   Instance Error Log Count meter_nginx_instance_error_log_count Instance The count of log level of nginx error.log fluent bit    Customizations You can customize your own metrics/expression/dashboard panel.\nThe log collect and analyse rules are found in /config/lal/nginx.yaml, /config/log-mal-rules/nginx.yaml.\nThe Nginx dashboard panel configurations are found in /config/ui-initialized-templates/nginx.\n","excerpt":"Nginx monitoring Nginx performance from nginx-lua-prometheus The nginx-lua-prometheus is a lua …","ref":"/docs/main/latest/en/setup/backend/backend-nginx-monitoring/","title":"Nginx monitoring"},{"body":"Nginx monitoring Nginx performance from nginx-lua-prometheus The nginx-lua-prometheus is a lua library that can be used with Nginx to collect metrics and expose them on a separate web page. To use this library, you will need Nginx with lua-nginx-module or directly OpenResty.\nSkyWalking leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  nginx-lua-prometheus collects metrics from Nginx and expose them to an endpoint. OpenTelemetry Collector fetches metrics from the endpoint expose above via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Collect Nginx metrics and expose the following four metrics by nginx-lua-prometheus. For details on metrics definition, refer to here.   histogram: nginx_http_latency gauge: nginx_http_connections counter: nginx_http_size_bytes counter: nginx_http_requests_total  Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  Nginx Monitoring SkyWalking observes the status, payload, and latency of the Nginx server, which is cataloged as a LAYER: Nginx Service in the OAP and instances would be recognized as LAYER: Nginx instance.\nAbout LAYER: Nginx endpoint, it depends on how precision you want to monitor the nginx. We do not recommend expose every request path metrics, because it will cause explosion of metrics endpoint data.\nYou can collect host metrics:\nhttp { log_by_lua_block { metric_bytes:inc(tonumber(ngx.var.request_length), {\u0026quot;request\u0026quot;, ngx.var.host}) metric_bytes:inc(tonumber(ngx.var.bytes_send), {\u0026quot;response\u0026quot;, ngx.var.host}) metric_requests:inc(1, {ngx.var.status, ngx.var.host}) metric_latency:observe(tonumber(ngx.var.request_time), {ngx.var.host}) } } or grouped urls and upstream metrics:\nupstream backend { server ip:port; } server { location /test { default_type application/json; return 200 '{\u0026quot;code\u0026quot;: 200, \u0026quot;message\u0026quot;: \u0026quot;success\u0026quot;}'; log_by_lua_block { metric_bytes:inc(tonumber(ngx.var.request_length), {\u0026quot;request\u0026quot;, \u0026quot;/test/**\u0026quot;}) metric_bytes:inc(tonumber(ngx.var.bytes_send), {\u0026quot;response\u0026quot;, \u0026quot;/test/**\u0026quot;}) metric_requests:inc(1, {ngx.var.status, \u0026quot;/test/**\u0026quot;}) metric_latency:observe(tonumber(ngx.var.request_time), {\u0026quot;/test/**\u0026quot;}) } } location /test_upstream { proxy_pass http://backend; log_by_lua_block { metric_bytes:inc(tonumber(ngx.var.request_length), {\u0026quot;request\u0026quot;, \u0026quot;upstream/backend\u0026quot;}) metric_bytes:inc(tonumber(ngx.var.bytes_send), {\u0026quot;response\u0026quot;, \u0026quot;upstream/backend\u0026quot;}) metric_requests:inc(1, {ngx.var.status, \u0026quot;upstream/backend\u0026quot;}) metric_latency:observe(tonumber(ngx.var.request_time), {\u0026quot;upstream/backend\u0026quot;}) } } } Nginx Service Supported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     HTTP Request Trend  meter_nginx_service_http_requests Service The increment rate of HTTP requests nginx-lua-prometheus   HTTP Latency ms meter_nginx_service_http_latency Service The increment rate of the latency of HTTP requests nginx-lua-prometheus   HTTP Bandwidth KB meter_nginx_service_bandwidth Service The increment rate of the bandwidth of HTTP requests nginx-lua-prometheus   HTTP Connections  meter_nginx_service_http_connections Service The avg number of the connections nginx-lua-prometheus   HTTP Status Trend  meter_nginx_service_http_status Service The increment rate of the status of HTTP requests nginx-lua-prometheus   HTTP Status 4xx Percent % meter_nginx_service_http_4xx_requests_increment / meter_nginx_service_http_requests_increment Service The percentage of 4xx status of HTTP requests nginx-lua-prometheus   HTTP Status 5xx Percent % meter_nginx_service_http_5xx_requests_increment / meter_nginx_service_http_requests_increment Service The percentage of 4xx status of HTTP requests nginx-lua-prometheus    Nginx Instance Supported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     HTTP Request Trend  meter_nginx_instance_http_requests Instance The increment rate of HTTP requests nginx-lua-prometheus   HTTP Latency ms meter_nginx_instance_http_latency Instance The increment rate of the latency of HTTP requests nginx-lua-prometheus   HTTP Bandwidth KB meter_nginx_instance_bandwidth Instance The increment rate of the bandwidth of HTTP requests nginx-lua-prometheus   HTTP Connections  meter_nginx_instance_http_connections Instance The avg number of the connections nginx-lua-prometheus   HTTP Status Trend  meter_nginx_instance_http_status Instance The increment rate of the status of HTTP requests nginx-lua-prometheus   HTTP Status 4xx Percent % meter_nginx_instance_http_4xx_requests_increment / meter_nginx_instance_http_requests_increment Instance The percentage of 4xx status of HTTP requests nginx-lua-prometheus   HTTP Status 5xx Percent % meter_nginx_instance_http_5xx_requests_increment / meter_nginx_instance_http_requests_increment Instance The percentage of 4xx status of HTTP requests nginx-lua-prometheus    Nginx Endpoint Supported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     HTTP Request Trend  meter_nginx_endpoint_http_requests Endpoint The increment rate of HTTP requests nginx-lua-prometheus   HTTP Latency ms meter_nginx_endpoint_http_latency Endpoint The increment rate of the latency of HTTP requests nginx-lua-prometheus   HTTP Bandwidth KB meter_nginx_endpoint_bandwidth Endpoint The increment rate of the bandwidth of HTTP requests nginx-lua-prometheus   HTTP Status Trend  meter_nginx_endpoint_http_status Endpoint The increment rate of the status of HTTP requests nginx-lua-prometheus   HTTP Status 4xx Percent % meter_nginx_endpoint_http_4xx_requests_increment / meter_nginx_endpoint_http_requests_increment Endpoint The percentage of 4xx status of HTTP requests nginx-lua-prometheus   HTTP Status 5xx Percent % meter_nginx_endpoint_http_5xx_requests_increment / meter_nginx_endpoint_http_requests_increment Endpoint The percentage of 4xx status of HTTP requests nginx-lua-prometheus    Customizations You can customize your own metrics/expression/dashboard panel.\nThe metrics definition and expression rules are found in /config/otel-rules/nginx-service.yaml, /config/otel-rules/nginx-instance.yaml, /config/otel-rules/nginx-endpoint.yaml.\nThe Nginx dashboard panel configurations are found in /config/ui-initialized-templates/nginx.\nCollect nginx access and error log SkyWalking leverages fluentbit or other log agents for collecting access log and error log of Nginx.\nData flow  fluentbit agent collects access log and error log from Nginx. fluentbit agent sends data to SkyWalking OAP Server using native meter APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Install fluentbit. Config fluent bit with fluent-bit.conf, refer to here.  Error Log Monitoring Error Log monitoring provides monitoring of the error.log of the Nginx server.\nSupported Metrics    Monitoring Panel Metric Name Catalog Description Data Source     Service Error Log Count meter_nginx_service_error_log_count Service The count of log level of nginx error.log fluent bit   Instance Error Log Count meter_nginx_instance_error_log_count Instance The count of log level of nginx error.log fluent bit    Customizations You can customize your own metrics/expression/dashboard panel.\nThe log collect and analyse rules are found in /config/lal/nginx.yaml, /config/log-mal-rules/nginx.yaml.\nThe Nginx dashboard panel configurations are found in /config/ui-initialized-templates/nginx.\n","excerpt":"Nginx monitoring Nginx performance from nginx-lua-prometheus The nginx-lua-prometheus is a lua …","ref":"/docs/main/next/en/setup/backend/backend-nginx-monitoring/","title":"Nginx monitoring"},{"body":"Nginx monitoring Nginx performance from nginx-lua-prometheus The nginx-lua-prometheus is a lua library that can be used with Nginx to collect metrics and expose them on a separate web page. To use this library, you will need Nginx with lua-nginx-module or directly OpenResty.\nSkyWalking leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  nginx-lua-prometheus collects metrics from Nginx and expose them to an endpoint. OpenTelemetry Collector fetches metrics from the endpoint expose above via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Collect Nginx metrics and expose the following four metrics by nginx-lua-prometheus. For details on metrics definition, refer to here.   histogram: nginx_http_latency gauge: nginx_http_connections counter: nginx_http_size_bytes counter: nginx_http_requests_total  Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  Nginx Monitoring SkyWalking observes the status, payload, and latency of the Nginx server, which is cataloged as a LAYER: Nginx Service in the OAP and instances would be recognized as LAYER: Nginx instance.\nAbout LAYER: Nginx endpoint, it depends on how precision you want to monitor the nginx. We do not recommend expose every request path metrics, because it will cause explosion of metrics endpoint data.\nYou can collect host metrics:\nhttp { log_by_lua_block { metric_bytes:inc(tonumber(ngx.var.request_length), {\u0026quot;request\u0026quot;, ngx.var.host}) metric_bytes:inc(tonumber(ngx.var.bytes_send), {\u0026quot;response\u0026quot;, ngx.var.host}) metric_requests:inc(1, {ngx.var.status, ngx.var.host}) metric_latency:observe(tonumber(ngx.var.request_time), {ngx.var.host}) } } or grouped urls and upstream metrics:\nupstream backend { server ip:port; } server { location /test { default_type application/json; return 200 '{\u0026quot;code\u0026quot;: 200, \u0026quot;message\u0026quot;: \u0026quot;success\u0026quot;}'; log_by_lua_block { metric_bytes:inc(tonumber(ngx.var.request_length), {\u0026quot;request\u0026quot;, \u0026quot;/test/**\u0026quot;}) metric_bytes:inc(tonumber(ngx.var.bytes_send), {\u0026quot;response\u0026quot;, \u0026quot;/test/**\u0026quot;}) metric_requests:inc(1, {ngx.var.status, \u0026quot;/test/**\u0026quot;}) metric_latency:observe(tonumber(ngx.var.request_time), {\u0026quot;/test/**\u0026quot;}) } } location /test_upstream { proxy_pass http://backend; log_by_lua_block { metric_bytes:inc(tonumber(ngx.var.request_length), {\u0026quot;request\u0026quot;, \u0026quot;upstream/backend\u0026quot;}) metric_bytes:inc(tonumber(ngx.var.bytes_send), {\u0026quot;response\u0026quot;, \u0026quot;upstream/backend\u0026quot;}) metric_requests:inc(1, {ngx.var.status, \u0026quot;upstream/backend\u0026quot;}) metric_latency:observe(tonumber(ngx.var.request_time), {\u0026quot;upstream/backend\u0026quot;}) } } } Nginx Service Supported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     HTTP Request Trend  meter_nginx_service_http_requests Service The increment rate of HTTP requests nginx-lua-prometheus   HTTP Latency ms meter_nginx_service_http_latency Service The increment rate of the latency of HTTP requests nginx-lua-prometheus   HTTP Bandwidth KB meter_nginx_service_bandwidth Service The increment rate of the bandwidth of HTTP requests nginx-lua-prometheus   HTTP Connections  meter_nginx_service_http_connections Service The avg number of the connections nginx-lua-prometheus   HTTP Status Trend  meter_nginx_service_http_status Service The increment rate of the status of HTTP requests nginx-lua-prometheus   HTTP Status 4xx Percent % meter_nginx_service_http_4xx_requests_increment / meter_nginx_service_http_requests_increment Service The percentage of 4xx status of HTTP requests nginx-lua-prometheus   HTTP Status 5xx Percent % meter_nginx_service_http_5xx_requests_increment / meter_nginx_service_http_requests_increment Service The percentage of 4xx status of HTTP requests nginx-lua-prometheus    Nginx Instance Supported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     HTTP Request Trend  meter_nginx_instance_http_requests Instance The increment rate of HTTP requests nginx-lua-prometheus   HTTP Latency ms meter_nginx_instance_http_latency Instance The increment rate of the latency of HTTP requests nginx-lua-prometheus   HTTP Bandwidth KB meter_nginx_instance_bandwidth Instance The increment rate of the bandwidth of HTTP requests nginx-lua-prometheus   HTTP Connections  meter_nginx_instance_http_connections Instance The avg number of the connections nginx-lua-prometheus   HTTP Status Trend  meter_nginx_instance_http_status Instance The increment rate of the status of HTTP requests nginx-lua-prometheus   HTTP Status 4xx Percent % meter_nginx_instance_http_4xx_requests_increment / meter_nginx_instance_http_requests_increment Instance The percentage of 4xx status of HTTP requests nginx-lua-prometheus   HTTP Status 5xx Percent % meter_nginx_instance_http_5xx_requests_increment / meter_nginx_instance_http_requests_increment Instance The percentage of 4xx status of HTTP requests nginx-lua-prometheus    Nginx Endpoint Supported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     HTTP Request Trend  meter_nginx_endpoint_http_requests Endpoint The increment rate of HTTP requests nginx-lua-prometheus   HTTP Latency ms meter_nginx_endpoint_http_latency Endpoint The increment rate of the latency of HTTP requests nginx-lua-prometheus   HTTP Bandwidth KB meter_nginx_endpoint_bandwidth Endpoint The increment rate of the bandwidth of HTTP requests nginx-lua-prometheus   HTTP Status Trend  meter_nginx_endpoint_http_status Endpoint The increment rate of the status of HTTP requests nginx-lua-prometheus   HTTP Status 4xx Percent % meter_nginx_endpoint_http_4xx_requests_increment / meter_nginx_endpoint_http_requests_increment Endpoint The percentage of 4xx status of HTTP requests nginx-lua-prometheus   HTTP Status 5xx Percent % meter_nginx_endpoint_http_5xx_requests_increment / meter_nginx_endpoint_http_requests_increment Endpoint The percentage of 4xx status of HTTP requests nginx-lua-prometheus    Customizations You can customize your own metrics/expression/dashboard panel.\nThe metrics definition and expression rules are found in /config/otel-rules/nginx-service.yaml, /config/otel-rules/nginx-instance.yaml, /config/otel-rules/nginx-endpoint.yaml.\nThe Nginx dashboard panel configurations are found in /config/ui-initialized-templates/nginx.\nCollect nginx access and error log SkyWalking leverages fluentbit or other log agents for collecting access log and error log of Nginx.\nData flow  fluentbit agent collects access log and error log from Nginx. fluentbit agent sends data to SkyWalking OAP Server using native meter APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Install fluentbit. Config fluent bit with fluent-bit.conf, refer to here.  Error Log Monitoring Error Log monitoring provides monitoring of the error.log of the Nginx server.\nSupported Metrics    Monitoring Panel Metric Name Catalog Description Data Source     Service Error Log Count meter_nginx_service_error_log_count Service The count of log level of nginx error.log fluent bit   Instance Error Log Count meter_nginx_instance_error_log_count Instance The count of log level of nginx error.log fluent bit    Customizations You can customize your own metrics/expression/dashboard panel.\nThe log collect and analyse rules are found in /config/lal/nginx.yaml, /config/log-mal-rules/nginx.yaml.\nThe Nginx dashboard panel configurations are found in /config/ui-initialized-templates/nginx.\n","excerpt":"Nginx monitoring Nginx performance from nginx-lua-prometheus The nginx-lua-prometheus is a lua …","ref":"/docs/main/v9.7.0/en/setup/backend/backend-nginx-monitoring/","title":"Nginx monitoring"},{"body":"OAP backend dependency management  This section is only applicable to dependencies of the OAP server and UI.\n As one of the Top Level Projects of The Apache Software Foundation (ASF), SkyWalking must follow the ASF 3RD PARTY LICENSE POLICY. So if you\u0026rsquo;re adding new dependencies to the project, you should make sure that the new dependencies would not break the policy, and add their LICENSE and NOTICE to the project.\nWe use license-eye to help you make sure that you haven\u0026rsquo;t missed out any new dependencies:\n Install license-eye according to the doc. Run license-eye dependency resolve --summary ./dist-material/release-docs/LICENSE.tpl in the root directory of this project. Check the modified lines in ./dist-material/release-docs/LICENSE (via command git diff -U0 ./dist-material/release-docs/LICENSE) and check whether the new dependencies' licenses are compatible with Apache 2.0. Add the new dependencies' notice files (if any) to ./dist-material/release-docs/NOTICE if they are Apache 2.0 license. Copy their license files to ./dist-material/release-docs/licenses if they are not standard Apache 2.0 license. Copy the new dependencies' license file to ./dist-material/release-docs/licenses if they are not standard Apache 2.0 license.  ","excerpt":"OAP backend dependency management  This section is only applicable to dependencies of the OAP server …","ref":"/docs/main/latest/en/guides/dependencies/","title":"OAP backend dependency management"},{"body":"OAP backend dependency management  This section is only applicable to dependencies of the OAP server and UI.\n As one of the Top Level Projects of The Apache Software Foundation (ASF), SkyWalking must follow the ASF 3RD PARTY LICENSE POLICY. So if you\u0026rsquo;re adding new dependencies to the project, you should make sure that the new dependencies would not break the policy, and add their LICENSE and NOTICE to the project.\nWe use license-eye to help you make sure that you haven\u0026rsquo;t missed out any new dependencies:\n Install license-eye according to the doc. Run license-eye dependency resolve --summary ./dist-material/release-docs/LICENSE.tpl in the root directory of this project. Check the modified lines in ./dist-material/release-docs/LICENSE (via command git diff -U0 ./dist-material/release-docs/LICENSE) and check whether the new dependencies' licenses are compatible with Apache 2.0. Add the new dependencies' notice files (if any) to ./dist-material/release-docs/NOTICE if they are Apache 2.0 license. Copy their license files to ./dist-material/release-docs/licenses if they are not standard Apache 2.0 license. Copy the new dependencies' license file to ./dist-material/release-docs/licenses if they are not standard Apache 2.0 license.  ","excerpt":"OAP backend dependency management  This section is only applicable to dependencies of the OAP server …","ref":"/docs/main/next/en/guides/dependencies/","title":"OAP backend dependency management"},{"body":"OAP backend dependency management  This section is only applicable to dependencies of the OAP server and UI.\n As one of the Top Level Projects of The Apache Software Foundation (ASF), SkyWalking must follow the ASF 3RD PARTY LICENSE POLICY. So if you\u0026rsquo;re adding new dependencies to the project, you should make sure that the new dependencies would not break the policy, and add their LICENSE and NOTICE to the project.\nWe use license-eye to help you make sure that you haven\u0026rsquo;t missed out any new dependencies:\n Install license-eye according to the doc. Run license-eye dependency resolve --summary ./dist-material/release-docs/LICENSE.tpl in the root directory of this project. Check the modified lines in ./dist-material/release-docs/LICENSE (via command git diff -U0 ./dist-material/release-docs/LICENSE) and check whether the new dependencies' licenses are compatible with Apache 2.0. Add the new dependencies' notice files (if any) to ./dist-material/release-docs/NOTICE if they are Apache 2.0 license. Copy their license files to ./dist-material/release-docs/licenses if they are not standard Apache 2.0 license. Copy the new dependencies' license file to ./dist-material/release-docs/licenses if they are not standard Apache 2.0 license.  ","excerpt":"OAP backend dependency management  This section is only applicable to dependencies of the OAP server …","ref":"/docs/main/v9.6.0/en/guides/dependencies/","title":"OAP backend dependency management"},{"body":"OAP backend dependency management  This section is only applicable to dependencies of the OAP server and UI.\n As one of the Top Level Projects of The Apache Software Foundation (ASF), SkyWalking must follow the ASF 3RD PARTY LICENSE POLICY. So if you\u0026rsquo;re adding new dependencies to the project, you should make sure that the new dependencies would not break the policy, and add their LICENSE and NOTICE to the project.\nWe use license-eye to help you make sure that you haven\u0026rsquo;t missed out any new dependencies:\n Install license-eye according to the doc. Run license-eye dependency resolve --summary ./dist-material/release-docs/LICENSE.tpl in the root directory of this project. Check the modified lines in ./dist-material/release-docs/LICENSE (via command git diff -U0 ./dist-material/release-docs/LICENSE) and check whether the new dependencies' licenses are compatible with Apache 2.0. Add the new dependencies' notice files (if any) to ./dist-material/release-docs/NOTICE if they are Apache 2.0 license. Copy their license files to ./dist-material/release-docs/licenses if they are not standard Apache 2.0 license. Copy the new dependencies' license file to ./dist-material/release-docs/licenses if they are not standard Apache 2.0 license.  ","excerpt":"OAP backend dependency management  This section is only applicable to dependencies of the OAP server …","ref":"/docs/main/v9.7.0/en/guides/dependencies/","title":"OAP backend dependency management"},{"body":"OAP self observability dashboard SkyWalking itself collects and exports metrics in Prometheus format for consuming, it also provides a dashboard to visualize the self-observability metrics.\nData flow  SkyWalking OAP collects metrics data internally and exposes a Prometheus http endpoint to retrieve the metrics. SkyWalking OAP itself (or OpenTelemetry Collector, prefered in Kubernetes scenarios) fetches metrics from the Prometheus endpoint in step (1). OAP (or OpenTelemetry Collector) pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up Follow OAP Self Observability Telemetry doc to set up OAP and OpenTelemetry Collector.\nSelf observability monitoring Self observability monitoring provides monitoring of the status and resources of the OAP server itself. oap-server is a Service in OAP, and land on the Layer: SO11Y_OAP.\nSelf observability metrics    Unit Metric Name Description Data Source     Count Per Minute meter_oap_instance_jvm_gc_count GC Count oap self observability   MB meter_oap_instance_jvm_memory_bytes_used Memory oap self observability   ms / min meter_oap_instance_jvm_young_gc_time GC Time (ms / min) oap self observability   ms / min meter_oap_instance_jvm_old_gc_time GC Time (ms / min) oap self observability   Count Per Minute meter_oap_instance_mesh_count Mesh Analysis Count (Per Minute) oap self observability   Count Per Minute meter_oap_instance_mesh_analysis_error_count Mesh Analysis Count (Per Minute) oap self observability   ms meter_oap_instance_trace_latency_percentile Trace Analysis Latency (ms) oap self observability   Count meter_oap_jvm_class_loaded_count Class Count oap self observability   Count meter_oap_jvm_class_total_unloaded_count Class Count oap self observability   Count meter_oap_jvm_class_total_loaded_count Class Count oap self observability   Count meter_oap_instance_persistence_prepare_count Persistence Count (Per 5 Minutes) oap self observability   Count meter_oap_instance_persistence_execute_count Persistence Count (Per 5 Minutes) oap self observability   Count meter_oap_jvm_thread_live_count Thread Count oap self observability   Count meter_oap_jvm_thread_peak_count Thread Count oap self observability   Count meter_oap_jvm_thread_daemon_count Thread Count oap self observability   ms meter_oap_instance_persistence_execute_percentile Persistence Execution Latency Per Metric Type (ms) oap self observability   ms meter_oap_instance_persistence_prepare_percentile Persistence Preparing Latency Per Metric Type (ms) oap self observability   Count meter_oap_jvm_thread_runnable_count Thread State Count oap self observability   Count meter_oap_jvm_thread_timed_waiting_count Thread State Count oap self observability   Count meter_oap_jvm_thread_blocked_count Thread State Count oap self observability   Count meter_oap_jvm_thread_waiting_count Thread State Count oap self observability   Count per minute meter_oap_instance_metrics_aggregation Aggregation (Per Minute) oap self observability   ms meter_oap_instance_mesh_latency_percentile Mesh Analysis Latency (ms) oap self observability   Count per minute meter_oap_instance_trace_count Trace Analysis Count (Per Minute) oap self observability   Count per minute meter_oap_instance_trace_analysis_error_count Trace Analysis Count (Per Minute) oap self observability   Percentage meter_oap_instance_cpu_percentage CPU (%) oap self observability   Count meter_oap_instance_metrics_persistent_cache count of metrics cache hit and no-hit oap self observability    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/fetcher-prom-rules/self.yaml and config/otel-rules/oap.yaml. The self observability dashboard panel configurations are found in /config/ui-initialized-templates/so11y_oap.\n","excerpt":"OAP self observability dashboard SkyWalking itself collects and exports metrics in Prometheus format …","ref":"/docs/main/latest/en/setup/backend/dashboards-so11y/","title":"OAP self observability dashboard"},{"body":"OAP self observability dashboard SkyWalking itself collects and exports metrics in Prometheus format for consuming, it also provides a dashboard to visualize the self-observability metrics.\nData flow  SkyWalking OAP collects metrics data internally and exposes a Prometheus http endpoint to retrieve the metrics. SkyWalking OAP itself (or OpenTelemetry Collector, prefered in Kubernetes scenarios) fetches metrics from the Prometheus endpoint in step (1). OAP (or OpenTelemetry Collector) pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up Follow OAP Self Observability Telemetry doc to set up OAP and OpenTelemetry Collector.\nSelf observability monitoring Self observability monitoring provides monitoring of the status and resources of the OAP server itself. oap-server is a Service in OAP, and land on the Layer: SO11Y_OAP.\nSelf observability metrics    Unit Metric Name Description Data Source     Count Per Minute meter_oap_instance_jvm_gc_count GC Count oap self observability   MB meter_oap_instance_jvm_memory_bytes_used Memory oap self observability   ms / min meter_oap_instance_jvm_young_gc_time GC Time (ms / min) oap self observability   ms / min meter_oap_instance_jvm_old_gc_time GC Time (ms / min) oap self observability   Count Per Minute meter_oap_instance_mesh_count Mesh Analysis Count (Per Minute) oap self observability   Count Per Minute meter_oap_instance_mesh_analysis_error_count Mesh Analysis Count (Per Minute) oap self observability   ms meter_oap_instance_trace_latency_percentile Trace Analysis Latency (ms) oap self observability   Count meter_oap_jvm_class_loaded_count Class Count oap self observability   Count meter_oap_jvm_class_total_unloaded_count Class Count oap self observability   Count meter_oap_jvm_class_total_loaded_count Class Count oap self observability   Count meter_oap_instance_persistence_prepare_count Persistence Count (Per 5 Minutes) oap self observability   Count meter_oap_instance_persistence_execute_count Persistence Count (Per 5 Minutes) oap self observability   Count meter_oap_jvm_thread_live_count Thread Count oap self observability   Count meter_oap_jvm_thread_peak_count Thread Count oap self observability   Count meter_oap_jvm_thread_daemon_count Thread Count oap self observability   ms meter_oap_instance_persistence_execute_percentile Persistence Execution Latency Per Metric Type (ms) oap self observability   ms meter_oap_instance_persistence_prepare_percentile Persistence Preparing Latency Per Metric Type (ms) oap self observability   Count meter_oap_jvm_thread_runnable_count Thread State Count oap self observability   Count meter_oap_jvm_thread_timed_waiting_count Thread State Count oap self observability   Count meter_oap_jvm_thread_blocked_count Thread State Count oap self observability   Count meter_oap_jvm_thread_waiting_count Thread State Count oap self observability   Count per minute meter_oap_instance_metrics_aggregation Aggregation (Per Minute) oap self observability   ms meter_oap_instance_mesh_latency_percentile Mesh Analysis Latency (ms) oap self observability   Count per minute meter_oap_instance_trace_count Trace Analysis Count (Per Minute) oap self observability   Count per minute meter_oap_instance_trace_analysis_error_count Trace Analysis Count (Per Minute) oap self observability   Percentage meter_oap_instance_cpu_percentage CPU (%) oap self observability   Count meter_oap_instance_metrics_persistent_cache count of metrics cache hit and no-hit oap self observability    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/fetcher-prom-rules/self.yaml and config/otel-rules/oap.yaml. The self observability dashboard panel configurations are found in /config/ui-initialized-templates/so11y_oap.\n","excerpt":"OAP self observability dashboard SkyWalking itself collects and exports metrics in Prometheus format …","ref":"/docs/main/next/en/setup/backend/dashboards-so11y/","title":"OAP self observability dashboard"},{"body":"OAP self observability dashboard SkyWalking itself collects and exports metrics in Prometheus format for consuming, it also provides a dashboard to visualize the self-observability metrics.\nData flow  SkyWalking OAP collects metrics data internally and exposes a Prometheus http endpoint to retrieve the metrics. SkyWalking OAP itself (or OpenTelemetry Collector, prefered in Kubernetes scenarios) fetches metrics from the Prometheus endpoint in step (1). OAP (or OpenTelemetry Collector) pushes metrics to SkyWalking OAP Server via the OpenCensus gRPC Exporter or OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up SkyWalking Self Observability. (Optional) Set up OpenTelemetry Collector .. Config SkyWalking OpenTelemetry receiver.  Self observability monitoring Self observability monitoring provides monitoring of the status and resources of the OAP server itself. oap-server is a Service in OAP, and land on the Layer: SO11Y_OAP.\nSelf observability metrics    Unit Metric Name Description Data Source     Count Per Minute meter_oap_instance_jvm_gc_count GC Count oap self observability   MB meter_oap_instance_jvm_memory_bytes_used Memory oap self observability   ms / min meter_oap_instance_jvm_young_gc_time GC Time (ms / min) oap self observability   ms / min meter_oap_instance_jvm_old_gc_time GC Time (ms / min) oap self observability   Count Per Minute meter_oap_instance_mesh_count Mesh Analysis Count (Per Minute) oap self observability   Count Per Minute meter_oap_instance_mesh_analysis_error_count Mesh Analysis Count (Per Minute) oap self observability   ms meter_oap_instance_trace_latency_percentile Trace Analysis Latency (ms) oap self observability   Count meter_oap_jvm_class_loaded_count Class Count oap self observability   Count meter_oap_jvm_class_total_unloaded_count Class Count oap self observability   Count meter_oap_jvm_class_total_loaded_count Class Count oap self observability   Count meter_oap_instance_persistence_prepare_count Persistence Count (Per 5 Minutes) oap self observability   Count meter_oap_instance_persistence_execute_count Persistence Count (Per 5 Minutes) oap self observability   Count meter_oap_jvm_thread_live_count Thread Count oap self observability   Count meter_oap_jvm_thread_peak_count Thread Count oap self observability   Count meter_oap_jvm_thread_daemon_count Thread Count oap self observability   ms meter_oap_instance_persistence_execute_percentile Persistence Execution Latency Per Metric Type (ms) oap self observability   ms meter_oap_instance_persistence_prepare_percentile Persistence Preparing Latency Per Metric Type (ms) oap self observability   Count meter_oap_jvm_thread_runnable_count Thread State Count oap self observability   Count meter_oap_jvm_thread_timed_waiting_count Thread State Count oap self observability   Count meter_oap_jvm_thread_blocked_count Thread State Count oap self observability   Count meter_oap_jvm_thread_waiting_count Thread State Count oap self observability   Count per minute meter_oap_instance_metrics_aggregation Aggregation (Per Minute) oap self observability   ms meter_oap_instance_mesh_latency_percentile Mesh Analysis Latency (ms) oap self observability   Count per minute meter_oap_instance_trace_count Trace Analysis Count (Per Minute) oap self observability   Count per minute meter_oap_instance_trace_analysis_error_count Trace Analysis Count (Per Minute) oap self observability   Percentage meter_oap_instance_cpu_percentage CPU (%) oap self observability   Count meter_oap_instance_metrics_persistent_cache count of metrics cache hit and no-hit oap self observability    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/fetcher-prom-rules/self.yaml and config/otel-rules/oap.yaml. The self observability dashboard panel configurations are found in /config/ui-initialized-templates/so11y_oap.\n","excerpt":"OAP self observability dashboard SkyWalking itself collects and exports metrics in Prometheus format …","ref":"/docs/main/v9.3.0/en/setup/backend/dashboards-so11y/","title":"OAP self observability dashboard"},{"body":"OAP self observability dashboard SkyWalking itself collects and exports metrics in Prometheus format for consuming, it also provides a dashboard to visualize the self-observability metrics.\nData flow  SkyWalking OAP collects metrics data internally and exposes a Prometheus http endpoint to retrieve the metrics. SkyWalking OAP itself (or OpenTelemetry Collector, prefered in Kubernetes scenarios) fetches metrics from the Prometheus endpoint in step (1). OAP (or OpenTelemetry Collector) pushes metrics to SkyWalking OAP Server via the OpenCensus gRPC Exporter or OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up Follow OAP Self Observability Telemetry doc to set up OAP and OpenTelemetry Collector.\nSelf observability monitoring Self observability monitoring provides monitoring of the status and resources of the OAP server itself. oap-server is a Service in OAP, and land on the Layer: SO11Y_OAP.\nSelf observability metrics    Unit Metric Name Description Data Source     Count Per Minute meter_oap_instance_jvm_gc_count GC Count oap self observability   MB meter_oap_instance_jvm_memory_bytes_used Memory oap self observability   ms / min meter_oap_instance_jvm_young_gc_time GC Time (ms / min) oap self observability   ms / min meter_oap_instance_jvm_old_gc_time GC Time (ms / min) oap self observability   Count Per Minute meter_oap_instance_mesh_count Mesh Analysis Count (Per Minute) oap self observability   Count Per Minute meter_oap_instance_mesh_analysis_error_count Mesh Analysis Count (Per Minute) oap self observability   ms meter_oap_instance_trace_latency_percentile Trace Analysis Latency (ms) oap self observability   Count meter_oap_jvm_class_loaded_count Class Count oap self observability   Count meter_oap_jvm_class_total_unloaded_count Class Count oap self observability   Count meter_oap_jvm_class_total_loaded_count Class Count oap self observability   Count meter_oap_instance_persistence_prepare_count Persistence Count (Per 5 Minutes) oap self observability   Count meter_oap_instance_persistence_execute_count Persistence Count (Per 5 Minutes) oap self observability   Count meter_oap_jvm_thread_live_count Thread Count oap self observability   Count meter_oap_jvm_thread_peak_count Thread Count oap self observability   Count meter_oap_jvm_thread_daemon_count Thread Count oap self observability   ms meter_oap_instance_persistence_execute_percentile Persistence Execution Latency Per Metric Type (ms) oap self observability   ms meter_oap_instance_persistence_prepare_percentile Persistence Preparing Latency Per Metric Type (ms) oap self observability   Count meter_oap_jvm_thread_runnable_count Thread State Count oap self observability   Count meter_oap_jvm_thread_timed_waiting_count Thread State Count oap self observability   Count meter_oap_jvm_thread_blocked_count Thread State Count oap self observability   Count meter_oap_jvm_thread_waiting_count Thread State Count oap self observability   Count per minute meter_oap_instance_metrics_aggregation Aggregation (Per Minute) oap self observability   ms meter_oap_instance_mesh_latency_percentile Mesh Analysis Latency (ms) oap self observability   Count per minute meter_oap_instance_trace_count Trace Analysis Count (Per Minute) oap self observability   Count per minute meter_oap_instance_trace_analysis_error_count Trace Analysis Count (Per Minute) oap self observability   Percentage meter_oap_instance_cpu_percentage CPU (%) oap self observability   Count meter_oap_instance_metrics_persistent_cache count of metrics cache hit and no-hit oap self observability    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/fetcher-prom-rules/self.yaml and config/otel-rules/oap.yaml. The self observability dashboard panel configurations are found in /config/ui-initialized-templates/so11y_oap.\n","excerpt":"OAP self observability dashboard SkyWalking itself collects and exports metrics in Prometheus format …","ref":"/docs/main/v9.4.0/en/setup/backend/dashboards-so11y/","title":"OAP self observability dashboard"},{"body":"OAP self observability dashboard SkyWalking itself collects and exports metrics in Prometheus format for consuming, it also provides a dashboard to visualize the self-observability metrics.\nData flow  SkyWalking OAP collects metrics data internally and exposes a Prometheus http endpoint to retrieve the metrics. SkyWalking OAP itself (or OpenTelemetry Collector, prefered in Kubernetes scenarios) fetches metrics from the Prometheus endpoint in step (1). OAP (or OpenTelemetry Collector) pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up Follow OAP Self Observability Telemetry doc to set up OAP and OpenTelemetry Collector.\nSelf observability monitoring Self observability monitoring provides monitoring of the status and resources of the OAP server itself. oap-server is a Service in OAP, and land on the Layer: SO11Y_OAP.\nSelf observability metrics    Unit Metric Name Description Data Source     Count Per Minute meter_oap_instance_jvm_gc_count GC Count oap self observability   MB meter_oap_instance_jvm_memory_bytes_used Memory oap self observability   ms / min meter_oap_instance_jvm_young_gc_time GC Time (ms / min) oap self observability   ms / min meter_oap_instance_jvm_old_gc_time GC Time (ms / min) oap self observability   Count Per Minute meter_oap_instance_mesh_count Mesh Analysis Count (Per Minute) oap self observability   Count Per Minute meter_oap_instance_mesh_analysis_error_count Mesh Analysis Count (Per Minute) oap self observability   ms meter_oap_instance_trace_latency_percentile Trace Analysis Latency (ms) oap self observability   Count meter_oap_jvm_class_loaded_count Class Count oap self observability   Count meter_oap_jvm_class_total_unloaded_count Class Count oap self observability   Count meter_oap_jvm_class_total_loaded_count Class Count oap self observability   Count meter_oap_instance_persistence_prepare_count Persistence Count (Per 5 Minutes) oap self observability   Count meter_oap_instance_persistence_execute_count Persistence Count (Per 5 Minutes) oap self observability   Count meter_oap_jvm_thread_live_count Thread Count oap self observability   Count meter_oap_jvm_thread_peak_count Thread Count oap self observability   Count meter_oap_jvm_thread_daemon_count Thread Count oap self observability   ms meter_oap_instance_persistence_execute_percentile Persistence Execution Latency Per Metric Type (ms) oap self observability   ms meter_oap_instance_persistence_prepare_percentile Persistence Preparing Latency Per Metric Type (ms) oap self observability   Count meter_oap_jvm_thread_runnable_count Thread State Count oap self observability   Count meter_oap_jvm_thread_timed_waiting_count Thread State Count oap self observability   Count meter_oap_jvm_thread_blocked_count Thread State Count oap self observability   Count meter_oap_jvm_thread_waiting_count Thread State Count oap self observability   Count per minute meter_oap_instance_metrics_aggregation Aggregation (Per Minute) oap self observability   ms meter_oap_instance_mesh_latency_percentile Mesh Analysis Latency (ms) oap self observability   Count per minute meter_oap_instance_trace_count Trace Analysis Count (Per Minute) oap self observability   Count per minute meter_oap_instance_trace_analysis_error_count Trace Analysis Count (Per Minute) oap self observability   Percentage meter_oap_instance_cpu_percentage CPU (%) oap self observability   Count meter_oap_instance_metrics_persistent_cache count of metrics cache hit and no-hit oap self observability    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/fetcher-prom-rules/self.yaml and config/otel-rules/oap.yaml. The self observability dashboard panel configurations are found in /config/ui-initialized-templates/so11y_oap.\n","excerpt":"OAP self observability dashboard SkyWalking itself collects and exports metrics in Prometheus format …","ref":"/docs/main/v9.5.0/en/setup/backend/dashboards-so11y/","title":"OAP self observability dashboard"},{"body":"OAP self observability dashboard SkyWalking itself collects and exports metrics in Prometheus format for consuming, it also provides a dashboard to visualize the self-observability metrics.\nData flow  SkyWalking OAP collects metrics data internally and exposes a Prometheus http endpoint to retrieve the metrics. SkyWalking OAP itself (or OpenTelemetry Collector, prefered in Kubernetes scenarios) fetches metrics from the Prometheus endpoint in step (1). OAP (or OpenTelemetry Collector) pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up Follow OAP Self Observability Telemetry doc to set up OAP and OpenTelemetry Collector.\nSelf observability monitoring Self observability monitoring provides monitoring of the status and resources of the OAP server itself. oap-server is a Service in OAP, and land on the Layer: SO11Y_OAP.\nSelf observability metrics    Unit Metric Name Description Data Source     Count Per Minute meter_oap_instance_jvm_gc_count GC Count oap self observability   MB meter_oap_instance_jvm_memory_bytes_used Memory oap self observability   ms / min meter_oap_instance_jvm_young_gc_time GC Time (ms / min) oap self observability   ms / min meter_oap_instance_jvm_old_gc_time GC Time (ms / min) oap self observability   Count Per Minute meter_oap_instance_mesh_count Mesh Analysis Count (Per Minute) oap self observability   Count Per Minute meter_oap_instance_mesh_analysis_error_count Mesh Analysis Count (Per Minute) oap self observability   ms meter_oap_instance_trace_latency_percentile Trace Analysis Latency (ms) oap self observability   Count meter_oap_jvm_class_loaded_count Class Count oap self observability   Count meter_oap_jvm_class_total_unloaded_count Class Count oap self observability   Count meter_oap_jvm_class_total_loaded_count Class Count oap self observability   Count meter_oap_instance_persistence_prepare_count Persistence Count (Per 5 Minutes) oap self observability   Count meter_oap_instance_persistence_execute_count Persistence Count (Per 5 Minutes) oap self observability   Count meter_oap_jvm_thread_live_count Thread Count oap self observability   Count meter_oap_jvm_thread_peak_count Thread Count oap self observability   Count meter_oap_jvm_thread_daemon_count Thread Count oap self observability   ms meter_oap_instance_persistence_execute_percentile Persistence Execution Latency Per Metric Type (ms) oap self observability   ms meter_oap_instance_persistence_prepare_percentile Persistence Preparing Latency Per Metric Type (ms) oap self observability   Count meter_oap_jvm_thread_runnable_count Thread State Count oap self observability   Count meter_oap_jvm_thread_timed_waiting_count Thread State Count oap self observability   Count meter_oap_jvm_thread_blocked_count Thread State Count oap self observability   Count meter_oap_jvm_thread_waiting_count Thread State Count oap self observability   Count per minute meter_oap_instance_metrics_aggregation Aggregation (Per Minute) oap self observability   ms meter_oap_instance_mesh_latency_percentile Mesh Analysis Latency (ms) oap self observability   Count per minute meter_oap_instance_trace_count Trace Analysis Count (Per Minute) oap self observability   Count per minute meter_oap_instance_trace_analysis_error_count Trace Analysis Count (Per Minute) oap self observability   Percentage meter_oap_instance_cpu_percentage CPU (%) oap self observability   Count meter_oap_instance_metrics_persistent_cache count of metrics cache hit and no-hit oap self observability    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/fetcher-prom-rules/self.yaml and config/otel-rules/oap.yaml. The self observability dashboard panel configurations are found in /config/ui-initialized-templates/so11y_oap.\n","excerpt":"OAP self observability dashboard SkyWalking itself collects and exports metrics in Prometheus format …","ref":"/docs/main/v9.6.0/en/setup/backend/dashboards-so11y/","title":"OAP self observability dashboard"},{"body":"OAP self observability dashboard SkyWalking itself collects and exports metrics in Prometheus format for consuming, it also provides a dashboard to visualize the self-observability metrics.\nData flow  SkyWalking OAP collects metrics data internally and exposes a Prometheus http endpoint to retrieve the metrics. SkyWalking OAP itself (or OpenTelemetry Collector, prefered in Kubernetes scenarios) fetches metrics from the Prometheus endpoint in step (1). OAP (or OpenTelemetry Collector) pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up Follow OAP Self Observability Telemetry doc to set up OAP and OpenTelemetry Collector.\nSelf observability monitoring Self observability monitoring provides monitoring of the status and resources of the OAP server itself. oap-server is a Service in OAP, and land on the Layer: SO11Y_OAP.\nSelf observability metrics    Unit Metric Name Description Data Source     Count Per Minute meter_oap_instance_jvm_gc_count GC Count oap self observability   MB meter_oap_instance_jvm_memory_bytes_used Memory oap self observability   ms / min meter_oap_instance_jvm_young_gc_time GC Time (ms / min) oap self observability   ms / min meter_oap_instance_jvm_old_gc_time GC Time (ms / min) oap self observability   Count Per Minute meter_oap_instance_mesh_count Mesh Analysis Count (Per Minute) oap self observability   Count Per Minute meter_oap_instance_mesh_analysis_error_count Mesh Analysis Count (Per Minute) oap self observability   ms meter_oap_instance_trace_latency_percentile Trace Analysis Latency (ms) oap self observability   Count meter_oap_jvm_class_loaded_count Class Count oap self observability   Count meter_oap_jvm_class_total_unloaded_count Class Count oap self observability   Count meter_oap_jvm_class_total_loaded_count Class Count oap self observability   Count meter_oap_instance_persistence_prepare_count Persistence Count (Per 5 Minutes) oap self observability   Count meter_oap_instance_persistence_execute_count Persistence Count (Per 5 Minutes) oap self observability   Count meter_oap_jvm_thread_live_count Thread Count oap self observability   Count meter_oap_jvm_thread_peak_count Thread Count oap self observability   Count meter_oap_jvm_thread_daemon_count Thread Count oap self observability   ms meter_oap_instance_persistence_execute_percentile Persistence Execution Latency Per Metric Type (ms) oap self observability   ms meter_oap_instance_persistence_prepare_percentile Persistence Preparing Latency Per Metric Type (ms) oap self observability   Count meter_oap_jvm_thread_runnable_count Thread State Count oap self observability   Count meter_oap_jvm_thread_timed_waiting_count Thread State Count oap self observability   Count meter_oap_jvm_thread_blocked_count Thread State Count oap self observability   Count meter_oap_jvm_thread_waiting_count Thread State Count oap self observability   Count per minute meter_oap_instance_metrics_aggregation Aggregation (Per Minute) oap self observability   ms meter_oap_instance_mesh_latency_percentile Mesh Analysis Latency (ms) oap self observability   Count per minute meter_oap_instance_trace_count Trace Analysis Count (Per Minute) oap self observability   Count per minute meter_oap_instance_trace_analysis_error_count Trace Analysis Count (Per Minute) oap self observability   Percentage meter_oap_instance_cpu_percentage CPU (%) oap self observability   Count meter_oap_instance_metrics_persistent_cache count of metrics cache hit and no-hit oap self observability    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/fetcher-prom-rules/self.yaml and config/otel-rules/oap.yaml. The self observability dashboard panel configurations are found in /config/ui-initialized-templates/so11y_oap.\n","excerpt":"OAP self observability dashboard SkyWalking itself collects and exports metrics in Prometheus format …","ref":"/docs/main/v9.7.0/en/setup/backend/dashboards-so11y/","title":"OAP self observability dashboard"},{"body":"OAPSever Configuration Introduction To configure the OAP Sever, we propose two CRDs:\n OAPServerConfig: The CRD holds all static configuration, including environment variable and file configuration. OAPServerDynamicConfig: The CRD holds all dynamic configuration.  Spec of OAPServerConfig    Field Name Description     Version The version of OAP server, the default value is 9.5.0   Env The environment variable of OAP server   File The static file in OAP Server, which contains three fieldsfile.path、file.name and file.data. The file.path plus the file.name is the real file that needs to be replaced in the container image, and the file.data is the final data in the specific file.    Status of OAPServerConfig    Field Name Description     Desired The number of oapserver that need to be configured   Ready The number of oapserver that configured successfully   CreationTime The time the OAPServerConfig was created.   LastUpdateTime The last time this condition was updated.    Demo of OAPServerConfig  When using the file, please don\u0026rsquo;t set the same name\n # static configuration of OAPServerapiVersion:operator.skywalking.apache.org/v1alpha1kind:OAPServerConfigmetadata:name:oapserverconfig-samplenamespace:skywalking-systemspec:# The version of OAPServerversion:9.5.0# The env configuration of OAPServerenv:- name:JAVA_OPTSvalue:-Xmx2048M- name:SW_CLUSTERvalue:kubernetes- name:SW_CLUSTER_K8S_NAMESPACEvalue:skywalking-system# enable the dynamic configuration- name:SW_CONFIGURATIONvalue:k8s-configmap# set the labelselector of the dynamic configuration- name:SW_CLUSTER_K8S_LABELvalue:app=collector,release=skywalking- name:SW_TELEMETRYvalue:prometheus- name:SW_HEALTH_CHECKERvalue:default- name:SKYWALKING_COLLECTOR_UIDvalueFrom:fieldRef:fieldPath:metadata.uid- name:SW_LOG_LAL_FILESvalue:test1- name:SW_LOG_MAL_FILESvalue:test2# The file configuration of OAPServer# we should avoid setting the same file name in the filefile:- name:test1.yamlpath:/skywalking/config/laldata:|rules: - name: example dsl: | filter { text { abortOnFailure false // for test purpose, we want to persist all logs regexp $/(?s)(?\u0026lt;timestamp\u0026gt;\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}.\\d{3}) \\[TID:(?\u0026lt;tid\u0026gt;.+?)] \\[(?\u0026lt;thread\u0026gt;.+?)] (?\u0026lt;level\u0026gt;\\w{4,}) (?\u0026lt;logger\u0026gt;.{1,36}) (?\u0026lt;msg\u0026gt;.+)/$ } extractor { metrics { timestamp log.timestamp as Long labels level: parsed.level, service: log.service, instance: log.serviceInstance name \u0026#34;log_count\u0026#34; value 1 } } sink { } }- name:test2.yamlpath:/skywalking/config/log-mal-rulesdata:|expSuffix: instance([\u0026#39;service\u0026#39;], [\u0026#39;instance\u0026#39;], Layer.GENERAL) metricPrefix: log metricsRules: - name: count_info exp: log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;INFO\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).downsampling(SUM)Spec of OAPServerDynamicConfig    Field Name Description     Version The version of the OAP server, the default value is 9.5.0   LabelSelector The label selector of the specific configmap, the default value is \u0026ldquo;app=collector,release=skywalking\u0026rdquo;   Data All configurations' key and value    Status of OAPServerDynamicConfig    Field Name Description     State The state of dynamic configuration, running or stopped   CreationTime All configurations in one CR, the default value is false   LastUpdateTime The last time this condition was updated    Usage of OAPServerDynamicConfig  Notice, the CR\u0026rsquo;s name cannot contain capital letters.\n Users can split all configurations into several CRs. when using the OAPServerDynamicConfig, users can not only put some configurations in a CR, but also put a configuration in a CR, and the spec.data.name in CR represents one dynamic configuration.\nDemo of Global configuration apiVersion:operator.skywalking.apache.org/v1alpha1kind:OAPServerDynamicConfigmetadata:name:oapserverdynamicconfig-samplespec:# The version of OAPServerversion:9.5.0# The labelselector of OAPServer\u0026#39;s dynamic configuration, it should be the same as labelSelector of OAPServerConfiglabelSelector:app=collector,release=skywalkingdata:- name:agent-analyzer.default.slowDBAccessThresholdvalue:default:200,mongodb:50- name:alarm.default.alarm-settingsvalue:|-rules: # Rule unique name, must be ended with `_rule`. service_resp_time_rule: metrics-name: service_resp_time op: \u0026#34;\u0026gt;\u0026#34; threshold: 1000 period: 10 count: 3 silence-period: 5 message: Response time of service {name} is more than 1000ms in 3 minutes of last 10 minutes. service_sla_rule: # Metrics value need to be long, double or int metrics-name: service_sla op: \u0026#34;\u0026lt;\u0026#34; threshold: 8000 # The length of time to evaluate the metrics period: 10 # How many times after the metrics match the condition, will trigger alarm count: 2 # How many times of checks, the alarm keeps silence after alarm triggered, default as same as period. silence-period: 3 message: Successful rate of service {name} is lower than 80% in 2 minutes of last 10 minutes service_resp_time_percentile_rule: # Metrics value need to be long, double or int metrics-name: service_percentile op: \u0026#34;\u0026gt;\u0026#34; threshold: 1000,1000,1000,1000,1000 period: 10 count: 3 silence-period: 5 message: Percentile response time of service {name} alarm in 3 minutes of last 10 minutes, due to more than one condition of p50 \u0026gt; 1000, p75 \u0026gt; 1000, p90 \u0026gt; 1000, p95 \u0026gt; 1000, p99 \u0026gt; 1000 service_instance_resp_time_rule: metrics-name: service_instance_resp_time op: \u0026#34;\u0026gt;\u0026#34; threshold: 1000 period: 10 count: 2 silence-period: 5 message: Response time of service instance {name} is more than 1000ms in 2 minutes of last 10 minutes database_access_resp_time_rule: metrics-name: database_access_resp_time threshold: 1000 op: \u0026#34;\u0026gt;\u0026#34; period: 10 count: 2 message: Response time of database access {name} is more than 1000ms in 2 minutes of last 10 minutes endpoint_relation_resp_time_rule: metrics-name: endpoint_relation_resp_time threshold: 1000 op: \u0026#34;\u0026gt;\u0026#34; period: 10 count: 2 message: Response time of endpoint relation {name} is more than 1000ms in 2 minutes of last 10 minutes # Active endpoint related metrics alarm will cost more memory than service and service instance metrics alarm. # Because the number of endpoint is much more than service and instance. # # endpoint_resp_time_rule: # metrics-name: endpoint_resp_time # op: \u0026#34;\u0026gt;\u0026#34; # threshold: 1000 # period: 10 # count: 2 # silence-period: 5 # message: Response time of endpoint {name} is more than 1000ms in 2 minutes of last 10 minutes webhooks: # - http://127.0.0.1/notify/ # - http://127.0.0.1/go-wechat/- name:core.default.apdexThresholdvalue:|-default: 500 # example: # the threshold of service \u0026#34;tomcat\u0026#34; is 1s # tomcat: 1000 # the threshold of service \u0026#34;springboot1\u0026#34; is 50ms # springboot1: 50- name:agent-analyzer.default.uninstrumentedGatewaysvalue:|-#gateways: # - name: proxy0 # instances: # - host: 127.0.0.1 # the host/ip of this gateway instance # port: 9099 # the port of this gateway instance, defaults to 80Demo of Single configuration Set the dynamic configuration agent-analyzer.default.slowDBAccessThreshold as follows.\napiVersion:operator.skywalking.apache.org/v1alpha1kind:OAPServerDynamicConfigmetadata:name:agent-analyzer.defaultspec:# The version of OAPServerversion:9.5.0# The labelselector of OAPServer\u0026#39;s dynamic configuration, it should be the same as labelSelector of OAPServerConfiglabelSelector:app=collector,release=skywalkingdata:- name:slowDBAccessThresholdvalue:default:200,mongodb:50Set the dynamic configuration core.default.endpoint-name-grouping-openapi.customerAPI-v1 and core.default.endpoint-name-grouping-openapi.productAPI-v1 as follows.\napiVersion:operator.skywalking.apache.org/v1alpha1kind:OAPServerDynamicConfigmetadata:name:core.default.endpoint-name-grouping-openapispec:# The version of OAPServerversion:9.5.0# The labelselector of OAPServer\u0026#39;s dynamic configuration, it should be the same as labelSelector of OAPServerConfiglabelSelector:app=collector,release=skywalkingdata:- name:customerAPI-v1value:value of customerAPI-v1- name:productAPI-v1value:value of productAPI-v1","excerpt":"OAPSever Configuration Introduction To configure the OAP Sever, we propose two CRDs: …","ref":"/docs/skywalking-swck/latest/oapserver-configuration/","title":"OAPSever Configuration Introduction"},{"body":"OAPSever Configuration Introduction To configure the OAP Sever, we propose two CRDs:\n OAPServerConfig: The CRD holds all static configuration, including environment variable and file configuration. OAPServerDynamicConfig: The CRD holds all dynamic configuration.  Spec of OAPServerConfig    Field Name Description     Version The version of OAP server, the default value is 9.5.0   Env The environment variable of OAP server   File The static file in OAP Server, which contains three fieldsfile.path、file.name and file.data. The file.path plus the file.name is the real file that needs to be replaced in the container image, and the file.data is the final data in the specific file.    Status of OAPServerConfig    Field Name Description     Desired The number of oapserver that need to be configured   Ready The number of oapserver that configured successfully   CreationTime The time the OAPServerConfig was created.   LastUpdateTime The last time this condition was updated.    Demo of OAPServerConfig  When using the file, please don\u0026rsquo;t set the same name\n # static configuration of OAPServerapiVersion:operator.skywalking.apache.org/v1alpha1kind:OAPServerConfigmetadata:name:oapserverconfig-samplenamespace:skywalking-systemspec:# The version of OAPServerversion:9.5.0# The env configuration of OAPServerenv:- name:JAVA_OPTSvalue:-Xmx2048M- name:SW_CLUSTERvalue:kubernetes- name:SW_CLUSTER_K8S_NAMESPACEvalue:skywalking-system# enable the dynamic configuration- name:SW_CONFIGURATIONvalue:k8s-configmap# set the labelselector of the dynamic configuration- name:SW_CLUSTER_K8S_LABELvalue:app=collector,release=skywalking- name:SW_TELEMETRYvalue:prometheus- name:SW_HEALTH_CHECKERvalue:default- name:SKYWALKING_COLLECTOR_UIDvalueFrom:fieldRef:fieldPath:metadata.uid- name:SW_LOG_LAL_FILESvalue:test1- name:SW_LOG_MAL_FILESvalue:test2# The file configuration of OAPServer# we should avoid setting the same file name in the filefile:- name:test1.yamlpath:/skywalking/config/laldata:|rules: - name: example dsl: | filter { text { abortOnFailure false // for test purpose, we want to persist all logs regexp $/(?s)(?\u0026lt;timestamp\u0026gt;\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}.\\d{3}) \\[TID:(?\u0026lt;tid\u0026gt;.+?)] \\[(?\u0026lt;thread\u0026gt;.+?)] (?\u0026lt;level\u0026gt;\\w{4,}) (?\u0026lt;logger\u0026gt;.{1,36}) (?\u0026lt;msg\u0026gt;.+)/$ } extractor { metrics { timestamp log.timestamp as Long labels level: parsed.level, service: log.service, instance: log.serviceInstance name \u0026#34;log_count\u0026#34; value 1 } } sink { } }- name:test2.yamlpath:/skywalking/config/log-mal-rulesdata:|expSuffix: instance([\u0026#39;service\u0026#39;], [\u0026#39;instance\u0026#39;], Layer.GENERAL) metricPrefix: log metricsRules: - name: count_info exp: log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;INFO\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).downsampling(SUM)Spec of OAPServerDynamicConfig    Field Name Description     Version The version of the OAP server, the default value is 9.5.0   LabelSelector The label selector of the specific configmap, the default value is \u0026ldquo;app=collector,release=skywalking\u0026rdquo;   Data All configurations' key and value    Status of OAPServerDynamicConfig    Field Name Description     State The state of dynamic configuration, running or stopped   CreationTime All configurations in one CR, the default value is false   LastUpdateTime The last time this condition was updated    Usage of OAPServerDynamicConfig  Notice, the CR\u0026rsquo;s name cannot contain capital letters.\n Users can split all configurations into several CRs. when using the OAPServerDynamicConfig, users can not only put some configurations in a CR, but also put a configuration in a CR, and the spec.data.name in CR represents one dynamic configuration.\nDemo of Global configuration apiVersion:operator.skywalking.apache.org/v1alpha1kind:OAPServerDynamicConfigmetadata:name:oapserverdynamicconfig-samplespec:# The version of OAPServerversion:9.5.0# The labelselector of OAPServer\u0026#39;s dynamic configuration, it should be the same as labelSelector of OAPServerConfiglabelSelector:app=collector,release=skywalkingdata:- name:agent-analyzer.default.slowDBAccessThresholdvalue:default:200,mongodb:50- name:alarm.default.alarm-settingsvalue:|-rules: # Rule unique name, must be ended with `_rule`. service_resp_time_rule: metrics-name: service_resp_time op: \u0026#34;\u0026gt;\u0026#34; threshold: 1000 period: 10 count: 3 silence-period: 5 message: Response time of service {name} is more than 1000ms in 3 minutes of last 10 minutes. service_sla_rule: # Metrics value need to be long, double or int metrics-name: service_sla op: \u0026#34;\u0026lt;\u0026#34; threshold: 8000 # The length of time to evaluate the metrics period: 10 # How many times after the metrics match the condition, will trigger alarm count: 2 # How many times of checks, the alarm keeps silence after alarm triggered, default as same as period. silence-period: 3 message: Successful rate of service {name} is lower than 80% in 2 minutes of last 10 minutes service_resp_time_percentile_rule: # Metrics value need to be long, double or int metrics-name: service_percentile op: \u0026#34;\u0026gt;\u0026#34; threshold: 1000,1000,1000,1000,1000 period: 10 count: 3 silence-period: 5 message: Percentile response time of service {name} alarm in 3 minutes of last 10 minutes, due to more than one condition of p50 \u0026gt; 1000, p75 \u0026gt; 1000, p90 \u0026gt; 1000, p95 \u0026gt; 1000, p99 \u0026gt; 1000 service_instance_resp_time_rule: metrics-name: service_instance_resp_time op: \u0026#34;\u0026gt;\u0026#34; threshold: 1000 period: 10 count: 2 silence-period: 5 message: Response time of service instance {name} is more than 1000ms in 2 minutes of last 10 minutes database_access_resp_time_rule: metrics-name: database_access_resp_time threshold: 1000 op: \u0026#34;\u0026gt;\u0026#34; period: 10 count: 2 message: Response time of database access {name} is more than 1000ms in 2 minutes of last 10 minutes endpoint_relation_resp_time_rule: metrics-name: endpoint_relation_resp_time threshold: 1000 op: \u0026#34;\u0026gt;\u0026#34; period: 10 count: 2 message: Response time of endpoint relation {name} is more than 1000ms in 2 minutes of last 10 minutes # Active endpoint related metrics alarm will cost more memory than service and service instance metrics alarm. # Because the number of endpoint is much more than service and instance. # # endpoint_resp_time_rule: # metrics-name: endpoint_resp_time # op: \u0026#34;\u0026gt;\u0026#34; # threshold: 1000 # period: 10 # count: 2 # silence-period: 5 # message: Response time of endpoint {name} is more than 1000ms in 2 minutes of last 10 minutes webhooks: # - http://127.0.0.1/notify/ # - http://127.0.0.1/go-wechat/- name:core.default.apdexThresholdvalue:|-default: 500 # example: # the threshold of service \u0026#34;tomcat\u0026#34; is 1s # tomcat: 1000 # the threshold of service \u0026#34;springboot1\u0026#34; is 50ms # springboot1: 50- name:agent-analyzer.default.uninstrumentedGatewaysvalue:|-#gateways: # - name: proxy0 # instances: # - host: 127.0.0.1 # the host/ip of this gateway instance # port: 9099 # the port of this gateway instance, defaults to 80Demo of Single configuration Set the dynamic configuration agent-analyzer.default.slowDBAccessThreshold as follows.\napiVersion:operator.skywalking.apache.org/v1alpha1kind:OAPServerDynamicConfigmetadata:name:agent-analyzer.defaultspec:# The version of OAPServerversion:9.5.0# The labelselector of OAPServer\u0026#39;s dynamic configuration, it should be the same as labelSelector of OAPServerConfiglabelSelector:app=collector,release=skywalkingdata:- name:slowDBAccessThresholdvalue:default:200,mongodb:50Set the dynamic configuration core.default.endpoint-name-grouping-openapi.customerAPI-v1 and core.default.endpoint-name-grouping-openapi.productAPI-v1 as follows.\napiVersion:operator.skywalking.apache.org/v1alpha1kind:OAPServerDynamicConfigmetadata:name:core.default.endpoint-name-grouping-openapispec:# The version of OAPServerversion:9.5.0# The labelselector of OAPServer\u0026#39;s dynamic configuration, it should be the same as labelSelector of OAPServerConfiglabelSelector:app=collector,release=skywalkingdata:- name:customerAPI-v1value:value of customerAPI-v1- name:productAPI-v1value:value of productAPI-v1","excerpt":"OAPSever Configuration Introduction To configure the OAP Sever, we propose two CRDs: …","ref":"/docs/skywalking-swck/next/oapserver-configuration/","title":"OAPSever Configuration Introduction"},{"body":"OAPSever Configuration Introduction To configure the OAP Sever, we propose two CRDs:\n OAPServerConfig: The CRD holds all static configuration, including environment variable and file configuration. OAPServerDynamicConfig: The CRD holds all dynamic configuration.  Spec of OAPServerConfig    Field Name Description     Version The version of OAP server, the default value is 9.5.0   Env The environment variable of OAP server   File The static file in OAP Server, which contains three fieldsfile.path、file.name and file.data. The file.path plus the file.name is the real file that needs to be replaced in the container image, and the file.data is the final data in the specific file.    Status of OAPServerConfig    Field Name Description     Desired The number of oapserver that need to be configured   Ready The number of oapserver that configured successfully   CreationTime The time the OAPServerConfig was created.   LastUpdateTime The last time this condition was updated.    Demo of OAPServerConfig  When using the file, please don\u0026rsquo;t set the same name\n # static configuration of OAPServerapiVersion:operator.skywalking.apache.org/v1alpha1kind:OAPServerConfigmetadata:name:oapserverconfig-samplenamespace:skywalking-systemspec:# The version of OAPServerversion:9.5.0# The env configuration of OAPServerenv:- name:JAVA_OPTSvalue:-Xmx2048M- name:SW_CLUSTERvalue:kubernetes- name:SW_CLUSTER_K8S_NAMESPACEvalue:skywalking-system# enable the dynamic configuration- name:SW_CONFIGURATIONvalue:k8s-configmap# set the labelselector of the dynamic configuration- name:SW_CLUSTER_K8S_LABELvalue:app=collector,release=skywalking- name:SW_TELEMETRYvalue:prometheus- name:SW_HEALTH_CHECKERvalue:default- name:SKYWALKING_COLLECTOR_UIDvalueFrom:fieldRef:fieldPath:metadata.uid- name:SW_LOG_LAL_FILESvalue:test1- name:SW_LOG_MAL_FILESvalue:test2# The file configuration of OAPServer# we should avoid setting the same file name in the filefile:- name:test1.yamlpath:/skywalking/config/laldata:|rules: - name: example dsl: | filter { text { abortOnFailure false // for test purpose, we want to persist all logs regexp $/(?s)(?\u0026lt;timestamp\u0026gt;\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}.\\d{3}) \\[TID:(?\u0026lt;tid\u0026gt;.+?)] \\[(?\u0026lt;thread\u0026gt;.+?)] (?\u0026lt;level\u0026gt;\\w{4,}) (?\u0026lt;logger\u0026gt;.{1,36}) (?\u0026lt;msg\u0026gt;.+)/$ } extractor { metrics { timestamp log.timestamp as Long labels level: parsed.level, service: log.service, instance: log.serviceInstance name \u0026#34;log_count\u0026#34; value 1 } } sink { } }- name:test2.yamlpath:/skywalking/config/log-mal-rulesdata:|expSuffix: instance([\u0026#39;service\u0026#39;], [\u0026#39;instance\u0026#39;], Layer.GENERAL) metricPrefix: log metricsRules: - name: count_info exp: log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;INFO\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).downsampling(SUM)Spec of OAPServerDynamicConfig    Field Name Description     Version The version of the OAP server, the default value is 9.5.0   LabelSelector The label selector of the specific configmap, the default value is \u0026ldquo;app=collector,release=skywalking\u0026rdquo;   Data All configurations' key and value    Status of OAPServerDynamicConfig    Field Name Description     State The state of dynamic configuration, running or stopped   CreationTime All configurations in one CR, the default value is false   LastUpdateTime The last time this condition was updated    Usage of OAPServerDynamicConfig  Notice, the CR\u0026rsquo;s name cannot contain capital letters.\n Users can split all configurations into several CRs. when using the OAPServerDynamicConfig, users can not only put some configurations in a CR, but also put a configuration in a CR, and the spec.data.name in CR represents one dynamic configuration.\nDemo of Global configuration apiVersion:operator.skywalking.apache.org/v1alpha1kind:OAPServerDynamicConfigmetadata:name:oapserverdynamicconfig-samplespec:# The version of OAPServerversion:9.5.0# The labelselector of OAPServer\u0026#39;s dynamic configuration, it should be the same as labelSelector of OAPServerConfiglabelSelector:app=collector,release=skywalkingdata:- name:agent-analyzer.default.slowDBAccessThresholdvalue:default:200,mongodb:50- name:alarm.default.alarm-settingsvalue:|-rules: # Rule unique name, must be ended with `_rule`. service_resp_time_rule: metrics-name: service_resp_time op: \u0026#34;\u0026gt;\u0026#34; threshold: 1000 period: 10 count: 3 silence-period: 5 message: Response time of service {name} is more than 1000ms in 3 minutes of last 10 minutes. service_sla_rule: # Metrics value need to be long, double or int metrics-name: service_sla op: \u0026#34;\u0026lt;\u0026#34; threshold: 8000 # The length of time to evaluate the metrics period: 10 # How many times after the metrics match the condition, will trigger alarm count: 2 # How many times of checks, the alarm keeps silence after alarm triggered, default as same as period. silence-period: 3 message: Successful rate of service {name} is lower than 80% in 2 minutes of last 10 minutes service_resp_time_percentile_rule: # Metrics value need to be long, double or int metrics-name: service_percentile op: \u0026#34;\u0026gt;\u0026#34; threshold: 1000,1000,1000,1000,1000 period: 10 count: 3 silence-period: 5 message: Percentile response time of service {name} alarm in 3 minutes of last 10 minutes, due to more than one condition of p50 \u0026gt; 1000, p75 \u0026gt; 1000, p90 \u0026gt; 1000, p95 \u0026gt; 1000, p99 \u0026gt; 1000 service_instance_resp_time_rule: metrics-name: service_instance_resp_time op: \u0026#34;\u0026gt;\u0026#34; threshold: 1000 period: 10 count: 2 silence-period: 5 message: Response time of service instance {name} is more than 1000ms in 2 minutes of last 10 minutes database_access_resp_time_rule: metrics-name: database_access_resp_time threshold: 1000 op: \u0026#34;\u0026gt;\u0026#34; period: 10 count: 2 message: Response time of database access {name} is more than 1000ms in 2 minutes of last 10 minutes endpoint_relation_resp_time_rule: metrics-name: endpoint_relation_resp_time threshold: 1000 op: \u0026#34;\u0026gt;\u0026#34; period: 10 count: 2 message: Response time of endpoint relation {name} is more than 1000ms in 2 minutes of last 10 minutes # Active endpoint related metrics alarm will cost more memory than service and service instance metrics alarm. # Because the number of endpoint is much more than service and instance. # # endpoint_resp_time_rule: # metrics-name: endpoint_resp_time # op: \u0026#34;\u0026gt;\u0026#34; # threshold: 1000 # period: 10 # count: 2 # silence-period: 5 # message: Response time of endpoint {name} is more than 1000ms in 2 minutes of last 10 minutes webhooks: # - http://127.0.0.1/notify/ # - http://127.0.0.1/go-wechat/- name:core.default.apdexThresholdvalue:|-default: 500 # example: # the threshold of service \u0026#34;tomcat\u0026#34; is 1s # tomcat: 1000 # the threshold of service \u0026#34;springboot1\u0026#34; is 50ms # springboot1: 50- name:agent-analyzer.default.uninstrumentedGatewaysvalue:|-#gateways: # - name: proxy0 # instances: # - host: 127.0.0.1 # the host/ip of this gateway instance # port: 9099 # the port of this gateway instance, defaults to 80Demo of Single configuration Set the dynamic configuration agent-analyzer.default.slowDBAccessThreshold as follows.\napiVersion:operator.skywalking.apache.org/v1alpha1kind:OAPServerDynamicConfigmetadata:name:agent-analyzer.defaultspec:# The version of OAPServerversion:9.5.0# The labelselector of OAPServer\u0026#39;s dynamic configuration, it should be the same as labelSelector of OAPServerConfiglabelSelector:app=collector,release=skywalkingdata:- name:slowDBAccessThresholdvalue:default:200,mongodb:50Set the dynamic configuration core.default.endpoint-name-grouping-openapi.customerAPI-v1 and core.default.endpoint-name-grouping-openapi.productAPI-v1 as follows.\napiVersion:operator.skywalking.apache.org/v1alpha1kind:OAPServerDynamicConfigmetadata:name:core.default.endpoint-name-grouping-openapispec:# The version of OAPServerversion:9.5.0# The labelselector of OAPServer\u0026#39;s dynamic configuration, it should be the same as labelSelector of OAPServerConfiglabelSelector:app=collector,release=skywalkingdata:- name:customerAPI-v1value:value of customerAPI-v1- name:productAPI-v1value:value of productAPI-v1","excerpt":"OAPSever Configuration Introduction To configure the OAP Sever, we propose two CRDs: …","ref":"/docs/skywalking-swck/v0.9.0/oapserver-configuration/","title":"OAPSever Configuration Introduction"},{"body":"Observability This document outlines the observability features of BanyanDB, which include metrics, profiling, and tracing. These features help monitor and understand the performance, behavior, and overall health of BanyanDB.\nMetrics BanyanDB has built-in support for metrics collection through the use of build tags. The metrics provider can be enabled by specifying the build tag during the compilation process.\nCurrently, there is only one supported metrics provider: Prometheus. To use Prometheus as the metrics client, include the prometheus build tag when building BanyanDB:\nBUILD_TAGS=prometheus make -C banyand banyand-server\nIf no build tag is specified, the metrics server will not be started, and no metrics will be collected:\nmake -C banyand banyand-server\nWhen the Prometheus metrics provider is enabled, the metrics server listens on port 2121. This allows Prometheus to scrape metrics data from BanyanDB for monitoring and analysis.\nThe Docker image is tagged as \u0026ldquo;prometheus\u0026rdquo; to facilitate cloud-native operations and simplify deployment on Kubernetes. This allows users to directly deploy the Docker image onto their Kubernetes cluster without having to rebuild it with the \u0026ldquo;prometheus\u0026rdquo; tag.\nProfiling Banyand, the server of BanyanDB, supports profiling automatically. The profiling data is collected by the pprof package and can be accessed through the /debug/pprof endpoint. The port of the profiling server is 2122 by default.\nTracing TODO: Add details about the tracing support in BanyanDB, such as how to enable tracing, available tracing tools, and how to analyze tracing data.\n","excerpt":"Observability This document outlines the observability features of BanyanDB, which include metrics, …","ref":"/docs/skywalking-banyandb/latest/observability/","title":"Observability"},{"body":"Observability This document outlines the observability features of BanyanDB, which include metrics, profiling, and tracing. These features help monitor and understand the performance, behavior, and overall health of BanyanDB.\nMetrics BanyanDB has built-in support for metrics collection through the use of build tags. The metrics provider can be enabled by specifying the build tag during the compilation process.\nCurrently, there is only one supported metrics provider: Prometheus. To use Prometheus as the metrics client, include the prometheus build tag when building BanyanDB:\nBUILD_TAGS=prometheus make -C banyand banyand-server\nIf no build tag is specified, the metrics server will not be started, and no metrics will be collected:\nmake -C banyand banyand-server\nWhen the Prometheus metrics provider is enabled, the metrics server listens on port 2121. This allows Prometheus to scrape metrics data from BanyanDB for monitoring and analysis.\nThe Docker image is tagged as \u0026ldquo;prometheus\u0026rdquo; to facilitate cloud-native operations and simplify deployment on Kubernetes. This allows users to directly deploy the Docker image onto their Kubernetes cluster without having to rebuild it with the \u0026ldquo;prometheus\u0026rdquo; tag.\nProfiling Banyand, the server of BanyanDB, supports profiling automatically. The profiling data is collected by the pprof package and can be accessed through the /debug/pprof endpoint. The port of the profiling server is 2122 by default.\nTracing TODO: Add details about the tracing support in BanyanDB, such as how to enable tracing, available tracing tools, and how to analyze tracing data.\n","excerpt":"Observability This document outlines the observability features of BanyanDB, which include metrics, …","ref":"/docs/skywalking-banyandb/next/observability/","title":"Observability"},{"body":"Observability This document outlines the observability features of BanyanDB, which include metrics, profiling, and tracing. These features help monitor and understand the performance, behavior, and overall health of BanyanDB.\nMetrics BanyanDB has built-in support for metrics collection through the use of build tags. The metrics provider can be enabled by specifying the build tag during the compilation process.\nCurrently, there is only one supported metrics provider: Prometheus. To use Prometheus as the metrics client, include the prometheus build tag when building BanyanDB:\nBUILD_TAGS=prometheus make -C banyand banyand-server\nIf no build tag is specified, the metrics server will not be started, and no metrics will be collected:\nmake -C banyand banyand-server\nWhen the Prometheus metrics provider is enabled, the metrics server listens on port 2121. This allows Prometheus to scrape metrics data from BanyanDB for monitoring and analysis.\nThe Docker image is tagged as \u0026ldquo;prometheus\u0026rdquo; to facilitate cloud-native operations and simplify deployment on Kubernetes. This allows users to directly deploy the Docker image onto their Kubernetes cluster without having to rebuild it with the \u0026ldquo;prometheus\u0026rdquo; tag.\nProfiling Banyand, the server of BanyanDB, supports profiling automatically. The profiling data is collected by the pprof package and can be accessed through the /debug/pprof endpoint. The port of the profiling server is 2122 by default.\nTracing TODO: Add details about the tracing support in BanyanDB, such as how to enable tracing, available tracing tools, and how to analyze tracing data.\n","excerpt":"Observability This document outlines the observability features of BanyanDB, which include metrics, …","ref":"/docs/skywalking-banyandb/v0.5.0/observability/","title":"Observability"},{"body":"Observability Analysis Language OAL(Observability Analysis Language) serves to analyze incoming data in streaming mode.\nOAL focuses on metrics in Service, Service Instance and Endpoint. Therefore, the language is easy to learn and use.\nSince 6.3, the OAL engine is embedded in OAP server runtime as oal-rt(OAL Runtime). OAL scripts are now found in the /config folder, and users could simply change and reboot the server to run them. However, the OAL script is a compiled language, and the OAL Runtime generates java codes dynamically.\nYou can open set SW_OAL_ENGINE_DEBUG=Y at system env to see which classes are generated.\nGrammar Scripts should be named *.oal\n// Declare the metrics. METRICS_NAME = from(CAST SCOPE.(* | [FIELD][,FIELD ...])) [.filter(CAST FIELD OP [INT | STRING])] .FUNCTION([PARAM][, PARAM ...]) // Disable hard code disable(METRICS_NAME); From The from statement defines the data source of this OAL expression.\nPrimary SCOPEs are Service, ServiceInstance, Endpoint, ServiceRelation, ServiceInstanceRelation, and EndpointRelation. There are also some secondary scopes which belong to a primary scope.\nSee Scope Definitions, where you can find all existing Scopes and Fields.\nFilter Use filter to build conditions for the value of fields by using field name and expression.\nThe expressions support linking by and, or and (...). The OPs support ==, !=, \u0026gt;, \u0026lt;, \u0026gt;=, \u0026lt;=, in [...] ,like %..., like ...% , like %...% , contain and not contain, with type detection based on field type. In the event of incompatibility, compile or code generation errors may be triggered.\nAggregation Function The default functions are provided by the SkyWalking OAP core, and it is possible to implement additional functions.\nFunctions provided\n longAvg. The avg of all input per scope entity. The input field must be a long.   instance_jvm_memory_max = from(ServiceInstanceJVMMemory.max).longAvg();\n In this case, the input represents the request of each ServiceInstanceJVMMemory scope, and avg is based on field max.\n doubleAvg. The avg of all input per scope entity. The input field must be a double.   instance_jvm_cpu = from(ServiceInstanceJVMCPU.usePercent).doubleAvg();\n In this case, the input represents the request of each ServiceInstanceJVMCPU scope, and avg is based on field usePercent.\n percent. The number or ratio is expressed as a fraction of 100, where the input matches with the condition.   endpoint_percent = from(Endpoint.*).percent(status == true);\n In this case, all input represents requests of each endpoint, and the condition is endpoint.status == true.\n rate. The rate expressed is as a fraction of 100, where the input matches with the condition.   browser_app_error_rate = from(BrowserAppTraffic.*).rate(trafficCategory == BrowserAppTrafficCategory.FIRST_ERROR, trafficCategory == BrowserAppTrafficCategory.NORMAL);\n In this case, all input represents requests of each browser app traffic, the numerator condition is trafficCategory == BrowserAppTrafficCategory.FIRST_ERROR and denominator condition is trafficCategory == BrowserAppTrafficCategory.NORMAL. Parameter (1) is the numerator condition. Parameter (2) is the denominator condition.\n count. The sum of calls per scope entity.   service_calls_sum = from(Service.*).count();\n In this case, the number of calls of each service.\n histogram. See Heatmap in WIKI.   service_heatmap = from(Service.latency).histogram(100, 20);\n In this case, the thermodynamic heatmap of all incoming requests. Parameter (1) is the precision of latency calculation, such as in the above case, where 113ms and 193ms are considered the same in the 101-200ms group. Parameter (2) is the group amount. In the above case, 21(param value + 1) groups are 0-100ms, 101-200ms, \u0026hellip; 1901-2000ms, 2000+ms\n apdex. See Apdex in WIKI.   service_apdex = from(Service.latency).apdex(name, status);\n In this case, the apdex score of each service. Parameter (1) is the service name, which reflects the Apdex threshold value loaded from service-apdex-threshold.yml in the config folder. Parameter (2) is the status of this request. The status(success/failure) reflects the Apdex calculation.\n p99, p95, p90, p75, p50. See percentile in WIKI.   service_percentile = from(Service.latency).percentile(10);\n percentile is the first multiple-value metric, which has been introduced since 7.0.0. As a metric with multiple values, it could be queried through the getMultipleLinearIntValues GraphQL query. In this case, see p99, p95, p90, p75, and p50 of all incoming requests. The parameter is precise to a latency at p99, such as in the above case, and 120ms and 124ms are considered to produce the same response time. Before 7.0.0, p99, p95, p90, p75, p50 func(s) are used to calculate metrics separately. They are still supported in 7.x, but they are no longer recommended and are not included in the current official OAL script.\n service_p99 = from(Service.latency).p99(10);\n In this case, the p99 value of all incoming requests. The parameter is precise to a latency at p99, such as in the above case, and 120ms and 124ms are considered to produce the same response time.\nMetrics name The metrics name for storage implementor, alarm and query modules. The type inference is supported by core.\nGroup All metrics data will be grouped by Scope.ID and min-level TimeBucket.\n In the Endpoint scope, the Scope.ID is same as the Endpoint ID (i.e. the unique ID based on service and its endpoint).  Cast Fields of source are static type. In some cases, the type required by the filter expression and aggregation function doesn\u0026rsquo;t match the type in the source, such as tag value in the source is String type, most aggregation calculation requires numeric.\nCast expression is provided to do so.\n (str-\u0026gt;long) or (long), cast string type into long. (str-\u0026gt;int) or (int), cast string type into int.  mq_consume_latency = from((str-\u0026gt;long)Service.tag[\u0026quot;transmission.latency\u0026quot;]).longAvg(); // the value of tag is string type. Cast statement is supported in\n From statement. from((cast)source.attre). Filter expression. .filter((cast)tag[\u0026quot;transmission.latency\u0026quot;] \u0026gt; 0) Aggregation function parameter. .longAvg((cast)strField1== 1, (cast)strField2)  Disable Disable is an advanced statement in OAL, which is only used in certain cases. Some of the aggregation and metrics are defined through core hard codes. Examples include segment and top_n_database_statement. This disable statement is designed to render them inactive. By default, none of them are disabled.\nNOTICE, all disable statements should be in oal/disable.oal script file.\nExamples // Calculate p99 of both Endpoint1 and Endpoint2 endpoint_p99 = from(Endpoint.latency).filter(name in (\u0026quot;Endpoint1\u0026quot;, \u0026quot;Endpoint2\u0026quot;)).summary(0.99) // Calculate p99 of Endpoint name started with `serv` serv_Endpoint_p99 = from(Endpoint.latency).filter(name like \u0026quot;serv%\u0026quot;).summary(0.99) // Calculate the avg response time of each Endpoint endpoint_resp_time = from(Endpoint.latency).avg() // Calculate the p50, p75, p90, p95 and p99 of each Endpoint by 50 ms steps. endpoint_percentile = from(Endpoint.latency).percentile(10) // Calculate the percent of response status is true, for each service. endpoint_success = from(Endpoint.*).filter(status == true).percent() // Calculate the sum of response code in [404, 500, 503], for each service. endpoint_abnormal = from(Endpoint.*).filter(responseCode in [404, 500, 503]).count() // Calculate the sum of request type in [RequestType.RPC, RequestType.gRPC], for each service. endpoint_rpc_calls_sum = from(Endpoint.*).filter(type in [RequestType.RPC, RequestType.gRPC]).count() // Calculate the sum of endpoint name in [\u0026quot;/v1\u0026quot;, \u0026quot;/v2\u0026quot;], for each service. endpoint_url_sum = from(Endpoint.*).filter(name in [\u0026quot;/v1\u0026quot;, \u0026quot;/v2\u0026quot;]).count() // Calculate the sum of calls for each service. endpoint_calls = from(Endpoint.*).count() // Calculate the CPM with the GET method for each service.The value is made up with `tagKey:tagValue`. // Option 1, use `tags contain`. service_cpm_http_get = from(Service.*).filter(tags contain \u0026quot;http.method:GET\u0026quot;).cpm() // Option 2, use `tag[key]`. service_cpm_http_get = from(Service.*).filter(tag[\u0026quot;http.method\u0026quot;] == \u0026quot;GET\u0026quot;).cpm(); // Calculate the CPM with the HTTP method except for the GET method for each service.The value is made up with `tagKey:tagValue`. service_cpm_http_other = from(Service.*).filter(tags not contain \u0026quot;http.method:GET\u0026quot;).cpm() disable(segment); disable(endpoint_relation_server_side); disable(top_n_database_statement); ","excerpt":"Observability Analysis Language OAL(Observability Analysis Language) serves to analyze incoming data …","ref":"/docs/main/v9.0.0/en/concepts-and-designs/oal/","title":"Observability Analysis Language"},{"body":"Observability Analysis Language OAL(Observability Analysis Language) serves to analyze incoming data in streaming mode.\nOAL focuses on metrics in Service, Service Instance and Endpoint. Therefore, the language is easy to learn and use.\nSince 6.3, the OAL engine is embedded in OAP server runtime as oal-rt(OAL Runtime). OAL scripts are now found in the /config folder, and users could simply change and reboot the server to run them. However, the OAL script is a compiled language, and the OAL Runtime generates java codes dynamically.\nYou can open set SW_OAL_ENGINE_DEBUG=Y at system env to see which classes are generated.\nGrammar Scripts should be named *.oal\n// Declare the metrics. METRICS_NAME = from(CAST SCOPE.(* | [FIELD][,FIELD ...])) [.filter(CAST FIELD OP [INT | STRING])] .FUNCTION([PARAM][, PARAM ...]) // Disable hard code disable(METRICS_NAME); From The from statement defines the data source of this OAL expression.\nPrimary SCOPEs are Service, ServiceInstance, Endpoint, ServiceRelation, ServiceInstanceRelation, and EndpointRelation. There are also some secondary scopes which belong to a primary scope.\nSee Scope Definitions, where you can find all existing Scopes and Fields.\nFilter Use filter to build conditions for the value of fields by using field name and expression.\nThe expressions support linking by and, or and (...). The OPs support ==, !=, \u0026gt;, \u0026lt;, \u0026gt;=, \u0026lt;=, in [...] ,like %..., like ...% , like %...% , contain and not contain, with type detection based on field type. In the event of incompatibility, compile or code generation errors may be triggered.\nAggregation Function The default functions are provided by the SkyWalking OAP core, and it is possible to implement additional functions.\nFunctions provided\n longAvg. The avg of all input per scope entity. The input field must be a long.   instance_jvm_memory_max = from(ServiceInstanceJVMMemory.max).longAvg();\n In this case, the input represents the request of each ServiceInstanceJVMMemory scope, and avg is based on field max.\n doubleAvg. The avg of all input per scope entity. The input field must be a double.   instance_jvm_cpu = from(ServiceInstanceJVMCPU.usePercent).doubleAvg();\n In this case, the input represents the request of each ServiceInstanceJVMCPU scope, and avg is based on field usePercent.\n percent. The number or ratio is expressed as a fraction of 100, where the input matches with the condition.   endpoint_percent = from(Endpoint.*).percent(status == true);\n In this case, all input represents requests of each endpoint, and the condition is endpoint.status == true.\n rate. The rate expressed is as a fraction of 100, where the input matches with the condition.   browser_app_error_rate = from(BrowserAppTraffic.*).rate(trafficCategory == BrowserAppTrafficCategory.FIRST_ERROR, trafficCategory == BrowserAppTrafficCategory.NORMAL);\n In this case, all input represents requests of each browser app traffic, the numerator condition is trafficCategory == BrowserAppTrafficCategory.FIRST_ERROR and denominator condition is trafficCategory == BrowserAppTrafficCategory.NORMAL. Parameter (1) is the numerator condition. Parameter (2) is the denominator condition.\n count. The sum of calls per scope entity.   service_calls_sum = from(Service.*).count();\n In this case, the number of calls of each service.\n histogram. See Heatmap in WIKI.   service_heatmap = from(Service.latency).histogram(100, 20);\n In this case, the thermodynamic heatmap of all incoming requests. Parameter (1) is the precision of latency calculation, such as in the above case, where 113ms and 193ms are considered the same in the 101-200ms group. Parameter (2) is the group amount. In the above case, 21(param value + 1) groups are 0-100ms, 101-200ms, \u0026hellip; 1901-2000ms, 2000+ms\n apdex. See Apdex in WIKI.   service_apdex = from(Service.latency).apdex(name, status);\n In this case, the apdex score of each service. Parameter (1) is the service name, which reflects the Apdex threshold value loaded from service-apdex-threshold.yml in the config folder. Parameter (2) is the status of this request. The status(success/failure) reflects the Apdex calculation.\n p99, p95, p90, p75, p50. See percentile in WIKI.   service_percentile = from(Service.latency).percentile(10);\n percentile is the first multiple-value metric, which has been introduced since 7.0.0. As a metric with multiple values, it could be queried through the getMultipleLinearIntValues GraphQL query. In this case, see p99, p95, p90, p75, and p50 of all incoming requests. The parameter is precise to a latency at p99, such as in the above case, and 120ms and 124ms are considered to produce the same response time. Before 7.0.0, p99, p95, p90, p75, p50 func(s) are used to calculate metrics separately. They are still supported in 7.x, but they are no longer recommended and are not included in the current official OAL script.\n service_p99 = from(Service.latency).p99(10);\n In this case, the p99 value of all incoming requests. The parameter is precise to a latency at p99, such as in the above case, and 120ms and 124ms are considered to produce the same response time.\nMetrics name The metrics name for storage implementor, alarm and query modules. The type inference is supported by core.\nGroup All metrics data will be grouped by Scope.ID and min-level TimeBucket.\n In the Endpoint scope, the Scope.ID is same as the Endpoint ID (i.e. the unique ID based on service and its endpoint).  Cast Fields of source are static type. In some cases, the type required by the filter expression and aggregation function doesn\u0026rsquo;t match the type in the source, such as tag value in the source is String type, most aggregation calculation requires numeric.\nCast expression is provided to do so.\n (str-\u0026gt;long) or (long), cast string type into long. (str-\u0026gt;int) or (int), cast string type into int.  mq_consume_latency = from((str-\u0026gt;long)Service.tag[\u0026quot;transmission.latency\u0026quot;]).longAvg(); // the value of tag is string type. Cast statement is supported in\n From statement. from((cast)source.attre). Filter expression. .filter((cast)tag[\u0026quot;transmission.latency\u0026quot;] \u0026gt; 0) Aggregation function parameter. .longAvg((cast)strField1== 1, (cast)strField2)  Disable Disable is an advanced statement in OAL, which is only used in certain cases. Some of the aggregation and metrics are defined through core hard codes. Examples include segment and top_n_database_statement. This disable statement is designed to render them inactive. By default, none of them are disabled.\nNOTICE, all disable statements should be in oal/disable.oal script file.\nExamples // Calculate p99 of both Endpoint1 and Endpoint2 endpoint_p99 = from(Endpoint.latency).filter(name in (\u0026quot;Endpoint1\u0026quot;, \u0026quot;Endpoint2\u0026quot;)).summary(0.99) // Calculate p99 of Endpoint name started with `serv` serv_Endpoint_p99 = from(Endpoint.latency).filter(name like \u0026quot;serv%\u0026quot;).summary(0.99) // Calculate the avg response time of each Endpoint endpoint_resp_time = from(Endpoint.latency).avg() // Calculate the p50, p75, p90, p95 and p99 of each Endpoint by 50 ms steps. endpoint_percentile = from(Endpoint.latency).percentile(10) // Calculate the percent of response status is true, for each service. endpoint_success = from(Endpoint.*).filter(status == true).percent() // Calculate the sum of response code in [404, 500, 503], for each service. endpoint_abnormal = from(Endpoint.*).filter(responseCode in [404, 500, 503]).count() // Calculate the sum of request type in [RequestType.RPC, RequestType.gRPC], for each service. endpoint_rpc_calls_sum = from(Endpoint.*).filter(type in [RequestType.RPC, RequestType.gRPC]).count() // Calculate the sum of endpoint name in [\u0026quot;/v1\u0026quot;, \u0026quot;/v2\u0026quot;], for each service. endpoint_url_sum = from(Endpoint.*).filter(name in [\u0026quot;/v1\u0026quot;, \u0026quot;/v2\u0026quot;]).count() // Calculate the sum of calls for each service. endpoint_calls = from(Endpoint.*).count() // Calculate the CPM with the GET method for each service.The value is made up with `tagKey:tagValue`. // Option 1, use `tags contain`. service_cpm_http_get = from(Service.*).filter(tags contain \u0026quot;http.method:GET\u0026quot;).cpm() // Option 2, use `tag[key]`. service_cpm_http_get = from(Service.*).filter(tag[\u0026quot;http.method\u0026quot;] == \u0026quot;GET\u0026quot;).cpm(); // Calculate the CPM with the HTTP method except for the GET method for each service.The value is made up with `tagKey:tagValue`. service_cpm_http_other = from(Service.*).filter(tags not contain \u0026quot;http.method:GET\u0026quot;).cpm() disable(segment); disable(endpoint_relation_server_side); disable(top_n_database_statement); ","excerpt":"Observability Analysis Language OAL(Observability Analysis Language) serves to analyze incoming data …","ref":"/docs/main/v9.1.0/en/concepts-and-designs/oal/","title":"Observability Analysis Language"},{"body":"Observability Analysis Language OAL(Observability Analysis Language) serves to analyze incoming data in streaming mode.\nOAL focuses on metrics in Service, Service Instance and Endpoint. Therefore, the language is easy to learn and use.\nSince 6.3, the OAL engine is embedded in OAP server runtime as oal-rt(OAL Runtime). OAL scripts are now found in the /config folder, and users could simply change and reboot the server to run them. However, the OAL script is a compiled language, and the OAL Runtime generates java codes dynamically.\nYou can open set SW_OAL_ENGINE_DEBUG=Y at system env to see which classes are generated.\nGrammar Scripts should be named *.oal\n// Declare the metrics. METRICS_NAME = from(CAST SCOPE.(* | [FIELD][,FIELD ...])) [.filter(CAST FIELD OP [INT | STRING])] .FUNCTION([PARAM][, PARAM ...]) // Disable hard code disable(METRICS_NAME); From The from statement defines the data source of this OAL expression.\nPrimary SCOPEs are Service, ServiceInstance, Endpoint, ServiceRelation, ServiceInstanceRelation, and EndpointRelation. There are also some secondary scopes which belong to a primary scope.\nSee Scope Definitions, where you can find all existing Scopes and Fields.\nFilter Use filter to build conditions for the value of fields by using field name and expression.\nThe expressions support linking by and, or and (...). The OPs support ==, !=, \u0026gt;, \u0026lt;, \u0026gt;=, \u0026lt;=, in [...] ,like %..., like ...% , like %...% , contain and not contain, with type detection based on field type. In the event of incompatibility, compile or code generation errors may be triggered.\nAggregation Function The default functions are provided by the SkyWalking OAP core, and it is possible to implement additional functions.\nFunctions provided\n longAvg. The avg of all input per scope entity. The input field must be a long.   instance_jvm_memory_max = from(ServiceInstanceJVMMemory.max).longAvg();\n In this case, the input represents the request of each ServiceInstanceJVMMemory scope, and avg is based on field max.\n doubleAvg. The avg of all input per scope entity. The input field must be a double.   instance_jvm_cpu = from(ServiceInstanceJVMCPU.usePercent).doubleAvg();\n In this case, the input represents the request of each ServiceInstanceJVMCPU scope, and avg is based on field usePercent.\n percent. The number or ratio is expressed as a fraction of 100, where the input matches with the condition.   endpoint_percent = from(Endpoint.*).percent(status == true);\n In this case, all input represents requests of each endpoint, and the condition is endpoint.status == true.\n rate. The rate expressed is as a fraction of 100, where the input matches with the condition.   browser_app_error_rate = from(BrowserAppTraffic.*).rate(trafficCategory == BrowserAppTrafficCategory.FIRST_ERROR, trafficCategory == BrowserAppTrafficCategory.NORMAL);\n In this case, all input represents requests of each browser app traffic, the numerator condition is trafficCategory == BrowserAppTrafficCategory.FIRST_ERROR and denominator condition is trafficCategory == BrowserAppTrafficCategory.NORMAL. Parameter (1) is the numerator condition. Parameter (2) is the denominator condition.\n count. The sum of calls per scope entity.   service_calls_sum = from(Service.*).count();\n In this case, the number of calls of each service.\n histogram. See Heatmap in WIKI.   service_heatmap = from(Service.latency).histogram(100, 20);\n In this case, the thermodynamic heatmap of all incoming requests. Parameter (1) is the precision of latency calculation, such as in the above case, where 113ms and 193ms are considered the same in the 101-200ms group. Parameter (2) is the group amount. In the above case, 21(param value + 1) groups are 0-100ms, 101-200ms, \u0026hellip; 1901-2000ms, 2000+ms\n apdex. See Apdex in WIKI.   service_apdex = from(Service.latency).apdex(name, status);\n In this case, the apdex score of each service. Parameter (1) is the service name, which reflects the Apdex threshold value loaded from service-apdex-threshold.yml in the config folder. Parameter (2) is the status of this request. The status(success/failure) reflects the Apdex calculation.\n p99, p95, p90, p75, p50. See percentile in WIKI.   service_percentile = from(Service.latency).percentile(10);\n percentile is the first multiple-value metric, which has been introduced since 7.0.0. As a metric with multiple values, it could be queried through the getMultipleLinearIntValues GraphQL query. In this case, see p99, p95, p90, p75, and p50 of all incoming requests. The parameter is precise to a latency at p99, such as in the above case, and 120ms and 124ms are considered to produce the same response time.\nIn this case, the p99 value of all incoming requests. The parameter is precise to a latency at p99, such as in the above case, and 120ms and 124ms are considered to produce the same response time.\nMetrics name The metrics name for storage implementor, alarm and query modules. The type inference is supported by core.\nGroup All metrics data will be grouped by Scope.ID and min-level TimeBucket.\n In the Endpoint scope, the Scope.ID is same as the Endpoint ID (i.e. the unique ID based on service and its endpoint).  Cast Fields of source are static type. In some cases, the type required by the filter expression and aggregation function doesn\u0026rsquo;t match the type in the source, such as tag value in the source is String type, most aggregation calculation requires numeric.\nCast expression is provided to do so.\n (str-\u0026gt;long) or (long), cast string type into long. (str-\u0026gt;int) or (int), cast string type into int.  mq_consume_latency = from((str-\u0026gt;long)Service.tag[\u0026quot;transmission.latency\u0026quot;]).longAvg(); // the value of tag is string type. Cast statement is supported in\n From statement. from((cast)source.attre). Filter expression. .filter((cast)tag[\u0026quot;transmission.latency\u0026quot;] \u0026gt; 0) Aggregation function parameter. .longAvg((cast)strField1== 1, (cast)strField2)  Disable Disable is an advanced statement in OAL, which is only used in certain cases. Some of the aggregation and metrics are defined through core hard codes. Examples include segment and top_n_database_statement. This disable statement is designed to render them inactive. By default, none of them are disabled.\nNOTICE, all disable statements should be in oal/disable.oal script file.\nExamples // Calculate p99 of both Endpoint1 and Endpoint2 endpoint_p99 = from(Endpoint.latency).filter(name in (\u0026quot;Endpoint1\u0026quot;, \u0026quot;Endpoint2\u0026quot;)).summary(0.99) // Calculate p99 of Endpoint name started with `serv` serv_Endpoint_p99 = from(Endpoint.latency).filter(name like \u0026quot;serv%\u0026quot;).summary(0.99) // Calculate the avg response time of each Endpoint endpoint_resp_time = from(Endpoint.latency).avg() // Calculate the p50, p75, p90, p95 and p99 of each Endpoint by 50 ms steps. endpoint_percentile = from(Endpoint.latency).percentile(10) // Calculate the percent of response status is true, for each service. endpoint_success = from(Endpoint.*).filter(status == true).percent() // Calculate the sum of response code in [404, 500, 503], for each service. endpoint_abnormal = from(Endpoint.*).filter(responseCode in [404, 500, 503]).count() // Calculate the sum of request type in [RequestType.RPC, RequestType.gRPC], for each service. endpoint_rpc_calls_sum = from(Endpoint.*).filter(type in [RequestType.RPC, RequestType.gRPC]).count() // Calculate the sum of endpoint name in [\u0026quot;/v1\u0026quot;, \u0026quot;/v2\u0026quot;], for each service. endpoint_url_sum = from(Endpoint.*).filter(name in [\u0026quot;/v1\u0026quot;, \u0026quot;/v2\u0026quot;]).count() // Calculate the sum of calls for each service. endpoint_calls = from(Endpoint.*).count() // Calculate the CPM with the GET method for each service.The value is made up with `tagKey:tagValue`. // Option 1, use `tags contain`. service_cpm_http_get = from(Service.*).filter(tags contain \u0026quot;http.method:GET\u0026quot;).cpm() // Option 2, use `tag[key]`. service_cpm_http_get = from(Service.*).filter(tag[\u0026quot;http.method\u0026quot;] == \u0026quot;GET\u0026quot;).cpm(); // Calculate the CPM with the HTTP method except for the GET method for each service.The value is made up with `tagKey:tagValue`. service_cpm_http_other = from(Service.*).filter(tags not contain \u0026quot;http.method:GET\u0026quot;).cpm() disable(segment); disable(endpoint_relation_server_side); disable(top_n_database_statement); ","excerpt":"Observability Analysis Language OAL(Observability Analysis Language) serves to analyze incoming data …","ref":"/docs/main/v9.2.0/en/concepts-and-designs/oal/","title":"Observability Analysis Language"},{"body":"Observability Analysis Language OAL(Observability Analysis Language) serves to analyze incoming data in streaming mode.\nOAL focuses on metrics in Service, Service Instance and Endpoint. Therefore, the language is easy to learn and use.\nSince 6.3, the OAL engine is embedded in OAP server runtime as oal-rt(OAL Runtime). OAL scripts are now found in the /config folder, and users could simply change and reboot the server to run them. However, the OAL script is a compiled language, and the OAL Runtime generates java codes dynamically.\nYou can open set SW_OAL_ENGINE_DEBUG=Y at system env to see which classes are generated.\nGrammar Scripts should be named *.oal\n// Declare the metrics. METRICS_NAME = from(CAST SCOPE.(* | [FIELD][,FIELD ...])) [.filter(CAST FIELD OP [INT | STRING])] .FUNCTION([PARAM][, PARAM ...]) // Disable hard code disable(METRICS_NAME); From The from statement defines the data source of this OAL expression.\nPrimary SCOPEs are Service, ServiceInstance, Endpoint, ServiceRelation, ServiceInstanceRelation, and EndpointRelation. There are also some secondary scopes which belong to a primary scope.\nSee Scope Definitions, where you can find all existing Scopes and Fields.\nFilter Use filter to build conditions for the value of fields by using field name and expression.\nThe expressions support linking by and, or and (...). The OPs support ==, !=, \u0026gt;, \u0026lt;, \u0026gt;=, \u0026lt;=, in [...] ,like %..., like ...% , like %...% , contain and not contain, with type detection based on field type. In the event of incompatibility, compile or code generation errors may be triggered.\nAggregation Function The default functions are provided by the SkyWalking OAP core, and it is possible to implement additional functions.\nFunctions provided\n longAvg. The avg of all input per scope entity. The input field must be a long.   instance_jvm_memory_max = from(ServiceInstanceJVMMemory.max).longAvg();\n In this case, the input represents the request of each ServiceInstanceJVMMemory scope, and avg is based on field max.\n doubleAvg. The avg of all input per scope entity. The input field must be a double.   instance_jvm_cpu = from(ServiceInstanceJVMCPU.usePercent).doubleAvg();\n In this case, the input represents the request of each ServiceInstanceJVMCPU scope, and avg is based on field usePercent.\n percent. The number or ratio is expressed as a fraction of 100, where the input matches with the condition.   endpoint_percent = from(Endpoint.*).percent(status == true);\n In this case, all input represents requests of each endpoint, and the condition is endpoint.status == true.\n rate. The rate expressed is as a fraction of 100, where the input matches with the condition.   browser_app_error_rate = from(BrowserAppTraffic.*).rate(trafficCategory == BrowserAppTrafficCategory.FIRST_ERROR, trafficCategory == BrowserAppTrafficCategory.NORMAL);\n In this case, all input represents requests of each browser app traffic, the numerator condition is trafficCategory == BrowserAppTrafficCategory.FIRST_ERROR and denominator condition is trafficCategory == BrowserAppTrafficCategory.NORMAL. Parameter (1) is the numerator condition. Parameter (2) is the denominator condition.\n count. The sum of calls per scope entity.   service_calls_sum = from(Service.*).count();\n In this case, the number of calls of each service.\n histogram. See Heatmap in WIKI.   service_heatmap = from(Service.latency).histogram(100, 20);\n In this case, the thermodynamic heatmap of all incoming requests. Parameter (1) is the precision of latency calculation, such as in the above case, where 113ms and 193ms are considered the same in the 101-200ms group. Parameter (2) is the group amount. In the above case, 21(param value + 1) groups are 0-100ms, 101-200ms, \u0026hellip; 1901-2000ms, 2000+ms\n apdex. See Apdex in WIKI.   service_apdex = from(Service.latency).apdex(name, status);\n In this case, the apdex score of each service. Parameter (1) is the service name, which reflects the Apdex threshold value loaded from service-apdex-threshold.yml in the config folder. Parameter (2) is the status of this request. The status(success/failure) reflects the Apdex calculation.\n p99, p95, p90, p75, p50. See percentile in WIKI.   service_percentile = from(Service.latency).percentile(10);\n percentile is the first multiple-value metric, which has been introduced since 7.0.0. As a metric with multiple values, it could be queried through the getMultipleLinearIntValues GraphQL query. In this case, see p99, p95, p90, p75, and p50 of all incoming requests. The parameter is precise to a latency at p99, such as in the above case, and 120ms and 124ms are considered to produce the same response time.\nIn this case, the p99 value of all incoming requests. The parameter is precise to a latency at p99, such as in the above case, and 120ms and 124ms are considered to produce the same response time.\nMetrics name The metrics name for storage implementor, alarm and query modules. The type inference is supported by core.\nGroup All metrics data will be grouped by Scope.ID and min-level TimeBucket.\n In the Endpoint scope, the Scope.ID is same as the Endpoint ID (i.e. the unique ID based on service and its endpoint).  Cast Fields of source are static type. In some cases, the type required by the filter expression and aggregation function doesn\u0026rsquo;t match the type in the source, such as tag value in the source is String type, most aggregation calculation requires numeric.\nCast expression is provided to do so.\n (str-\u0026gt;long) or (long), cast string type into long. (str-\u0026gt;int) or (int), cast string type into int.  mq_consume_latency = from((str-\u0026gt;long)Service.tag[\u0026quot;transmission.latency\u0026quot;]).longAvg(); // the value of tag is string type. Cast statement is supported in\n From statement. from((cast)source.attre). Filter expression. .filter((cast)tag[\u0026quot;transmission.latency\u0026quot;] \u0026gt; 0) Aggregation function parameter. .longAvg((cast)strField1== 1, (cast)strField2)  Disable Disable is an advanced statement in OAL, which is only used in certain cases. Some of the aggregation and metrics are defined through core hard codes. Examples include segment and top_n_database_statement. This disable statement is designed to render them inactive. By default, none of them are disabled.\nNOTICE, all disable statements should be in oal/disable.oal script file.\nExamples // Calculate p99 of both Endpoint1 and Endpoint2 endpoint_p99 = from(Endpoint.latency).filter(name in (\u0026quot;Endpoint1\u0026quot;, \u0026quot;Endpoint2\u0026quot;)).summary(0.99) // Calculate p99 of Endpoint name started with `serv` serv_Endpoint_p99 = from(Endpoint.latency).filter(name like \u0026quot;serv%\u0026quot;).summary(0.99) // Calculate the avg response time of each Endpoint endpoint_resp_time = from(Endpoint.latency).avg() // Calculate the p50, p75, p90, p95 and p99 of each Endpoint by 50 ms steps. endpoint_percentile = from(Endpoint.latency).percentile(10) // Calculate the percent of response status is true, for each service. endpoint_success = from(Endpoint.*).filter(status == true).percent() // Calculate the sum of response code in [404, 500, 503], for each service. endpoint_abnormal = from(Endpoint.*).filter(httpResponseStatusCode in [404, 500, 503]).count() // Calculate the sum of request type in [RequestType.RPC, RequestType.gRPC], for each service. endpoint_rpc_calls_sum = from(Endpoint.*).filter(type in [RequestType.RPC, RequestType.gRPC]).count() // Calculate the sum of endpoint name in [\u0026quot;/v1\u0026quot;, \u0026quot;/v2\u0026quot;], for each service. endpoint_url_sum = from(Endpoint.*).filter(name in [\u0026quot;/v1\u0026quot;, \u0026quot;/v2\u0026quot;]).count() // Calculate the sum of calls for each service. endpoint_calls = from(Endpoint.*).count() // Calculate the CPM with the GET method for each service.The value is made up with `tagKey:tagValue`. // Option 1, use `tags contain`. service_cpm_http_get = from(Service.*).filter(tags contain \u0026quot;http.method:GET\u0026quot;).cpm() // Option 2, use `tag[key]`. service_cpm_http_get = from(Service.*).filter(tag[\u0026quot;http.method\u0026quot;] == \u0026quot;GET\u0026quot;).cpm(); // Calculate the CPM with the HTTP method except for the GET method for each service.The value is made up with `tagKey:tagValue`. service_cpm_http_other = from(Service.*).filter(tags not contain \u0026quot;http.method:GET\u0026quot;).cpm() disable(segment); disable(endpoint_relation_server_side); disable(top_n_database_statement); ","excerpt":"Observability Analysis Language OAL(Observability Analysis Language) serves to analyze incoming data …","ref":"/docs/main/v9.3.0/en/concepts-and-designs/oal/","title":"Observability Analysis Language"},{"body":"Observability Analysis Language OAL(Observability Analysis Language) serves to analyze incoming data in streaming mode.\nOAL focuses on metrics in Service, Service Instance and Endpoint. Therefore, the language is easy to learn and use.\nSince 6.3, the OAL engine is embedded in OAP server runtime as oal-rt(OAL Runtime). OAL scripts are now found in the /config folder, and users could simply change and reboot the server to run them. However, the OAL script is a compiled language, and the OAL Runtime generates java codes dynamically.\nYou can open set SW_OAL_ENGINE_DEBUG=Y at system env to see which classes are generated.\nGrammar Scripts should be named *.oal\n// Declare the metrics. METRICS_NAME = from(CAST SCOPE.(* | [FIELD][,FIELD ...])) [.filter(CAST FIELD OP [INT | STRING])] .FUNCTION([PARAM][, PARAM ...]) // Disable hard code disable(METRICS_NAME); From The from statement defines the data source of this OAL expression.\nPrimary SCOPEs are Service, ServiceInstance, Endpoint, ServiceRelation, ServiceInstanceRelation, and EndpointRelation. There are also some secondary scopes which belong to a primary scope.\nSee Scope Definitions, where you can find all existing Scopes and Fields.\nFilter Use filter to build conditions for the value of fields by using field name and expression.\nThe expressions support linking by and, or and (...). The OPs support ==, !=, \u0026gt;, \u0026lt;, \u0026gt;=, \u0026lt;=, in [...] ,like %..., like ...% , like %...% , contain and not contain, with type detection based on field type. In the event of incompatibility, compile or code generation errors may be triggered.\nAggregation Function The default functions are provided by the SkyWalking OAP core, and it is possible to implement additional functions.\nFunctions provided\n longAvg. The avg of all input per scope entity. The input field must be a long.   instance_jvm_memory_max = from(ServiceInstanceJVMMemory.max).longAvg();\n In this case, the input represents the request of each ServiceInstanceJVMMemory scope, and avg is based on field max.\n doubleAvg. The avg of all input per scope entity. The input field must be a double.   instance_jvm_cpu = from(ServiceInstanceJVMCPU.usePercent).doubleAvg();\n In this case, the input represents the request of each ServiceInstanceJVMCPU scope, and avg is based on field usePercent.\n percent. The number or ratio is expressed as a fraction of 100, where the input matches with the condition.   endpoint_percent = from(Endpoint.*).percent(status == true);\n In this case, all input represents requests of each endpoint, and the condition is endpoint.status == true.\n rate. The rate expressed is as a fraction of 100, where the input matches with the condition.   browser_app_error_rate = from(BrowserAppTraffic.*).rate(trafficCategory == BrowserAppTrafficCategory.FIRST_ERROR, trafficCategory == BrowserAppTrafficCategory.NORMAL);\n In this case, all input represents requests of each browser app traffic, the numerator condition is trafficCategory == BrowserAppTrafficCategory.FIRST_ERROR and denominator condition is trafficCategory == BrowserAppTrafficCategory.NORMAL. Parameter (1) is the numerator condition. Parameter (2) is the denominator condition.\n count. The sum of calls per scope entity.   service_calls_sum = from(Service.*).count();\n In this case, the number of calls of each service.\n histogram. See Heatmap in WIKI.   service_heatmap = from(Service.latency).histogram(100, 20);\n In this case, the thermodynamic heatmap of all incoming requests. Parameter (1) is the precision of latency calculation, such as in the above case, where 113ms and 193ms are considered the same in the 101-200ms group. Parameter (2) is the group amount. In the above case, 21(param value + 1) groups are 0-100ms, 101-200ms, \u0026hellip; 1901-2000ms, 2000+ms\n apdex. See Apdex in WIKI.   service_apdex = from(Service.latency).apdex(name, status);\n In this case, the apdex score of each service. Parameter (1) is the service name, which reflects the Apdex threshold value loaded from service-apdex-threshold.yml in the config folder. Parameter (2) is the status of this request. The status(success/failure) reflects the Apdex calculation.\n p99, p95, p90, p75, p50. See percentile in WIKI.   service_percentile = from(Service.latency).percentile(10);\n percentile is the first multiple-value metric, which has been introduced since 7.0.0. As a metric with multiple values, it could be queried through the getMultipleLinearIntValues GraphQL query. In this case, see p99, p95, p90, p75, and p50 of all incoming requests. The parameter is precise to a latency at p99, such as in the above case, and 120ms and 124ms are considered to produce the same response time.\nIn this case, the p99 value of all incoming requests. The parameter is precise to a latency at p99, such as in the above case, and 120ms and 124ms are considered to produce the same response time.\nMetrics name The metrics name for storage implementor, alarm and query modules. The type inference is supported by core.\nGroup All metrics data will be grouped by Scope.ID and min-level TimeBucket.\n In the Endpoint scope, the Scope.ID is same as the Endpoint ID (i.e. the unique ID based on service and its endpoint).  Cast Fields of source are static type. In some cases, the type required by the filter expression and aggregation function doesn\u0026rsquo;t match the type in the source, such as tag value in the source is String type, most aggregation calculation requires numeric.\nCast expression is provided to do so.\n (str-\u0026gt;long) or (long), cast string type into long. (str-\u0026gt;int) or (int), cast string type into int.  mq_consume_latency = from((str-\u0026gt;long)Service.tag[\u0026quot;transmission.latency\u0026quot;]).longAvg(); // the value of tag is string type. Cast statement is supported in\n From statement. from((cast)source.attre). Filter expression. .filter((cast)tag[\u0026quot;transmission.latency\u0026quot;] \u0026gt; 0) Aggregation function parameter. .longAvg((cast)strField1== 1, (cast)strField2)  Disable Disable is an advanced statement in OAL, which is only used in certain cases. Some of the aggregation and metrics are defined through core hard codes. Examples include segment and top_n_database_statement. This disable statement is designed to render them inactive. By default, none of them are disabled.\nNOTICE, all disable statements should be in oal/disable.oal script file.\nExamples // Calculate p99 of both Endpoint1 and Endpoint2 endpoint_p99 = from(Endpoint.latency).filter(name in (\u0026quot;Endpoint1\u0026quot;, \u0026quot;Endpoint2\u0026quot;)).summary(0.99) // Calculate p99 of Endpoint name started with `serv` serv_Endpoint_p99 = from(Endpoint.latency).filter(name like \u0026quot;serv%\u0026quot;).summary(0.99) // Calculate the avg response time of each Endpoint endpoint_resp_time = from(Endpoint.latency).avg() // Calculate the p50, p75, p90, p95 and p99 of each Endpoint by 50 ms steps. endpoint_percentile = from(Endpoint.latency).percentile(10) // Calculate the percent of response status is true, for each service. endpoint_success = from(Endpoint.*).filter(status == true).percent() // Calculate the sum of response code in [404, 500, 503], for each service. endpoint_abnormal = from(Endpoint.*).filter(httpResponseStatusCode in [404, 500, 503]).count() // Calculate the sum of request type in [RequestType.RPC, RequestType.gRPC], for each service. endpoint_rpc_calls_sum = from(Endpoint.*).filter(type in [RequestType.RPC, RequestType.gRPC]).count() // Calculate the sum of endpoint name in [\u0026quot;/v1\u0026quot;, \u0026quot;/v2\u0026quot;], for each service. endpoint_url_sum = from(Endpoint.*).filter(name in [\u0026quot;/v1\u0026quot;, \u0026quot;/v2\u0026quot;]).count() // Calculate the sum of calls for each service. endpoint_calls = from(Endpoint.*).count() // Calculate the CPM with the GET method for each service.The value is made up with `tagKey:tagValue`. // Option 1, use `tags contain`. service_cpm_http_get = from(Service.*).filter(tags contain \u0026quot;http.method:GET\u0026quot;).cpm() // Option 2, use `tag[key]`. service_cpm_http_get = from(Service.*).filter(tag[\u0026quot;http.method\u0026quot;] == \u0026quot;GET\u0026quot;).cpm(); // Calculate the CPM with the HTTP method except for the GET method for each service.The value is made up with `tagKey:tagValue`. service_cpm_http_other = from(Service.*).filter(tags not contain \u0026quot;http.method:GET\u0026quot;).cpm() disable(segment); disable(endpoint_relation_server_side); disable(top_n_database_statement); ","excerpt":"Observability Analysis Language OAL(Observability Analysis Language) serves to analyze incoming data …","ref":"/docs/main/v9.4.0/en/concepts-and-designs/oal/","title":"Observability Analysis Language"},{"body":"Observability Analysis Language OAL(Observability Analysis Language) serves to analyze incoming data in streaming mode.\nOAL focuses on metrics in Service, Service Instance and Endpoint. Therefore, the language is easy to learn and use.\nSince 6.3, the OAL engine is embedded in OAP server runtime as oal-rt(OAL Runtime). OAL scripts are now found in the /config folder, and users could simply change and reboot the server to run them. However, the OAL script is a compiled language, and the OAL Runtime generates java codes dynamically.\nYou can open set SW_OAL_ENGINE_DEBUG=Y at system env to see which classes are generated.\nGrammar Scripts should be named *.oal\n// Declare the metrics. METRICS_NAME = from(CAST SCOPE.(* | [FIELD][,FIELD ...])) [.filter(CAST FIELD OP [INT | STRING])] .FUNCTION([PARAM][, PARAM ...]) // Disable hard code disable(METRICS_NAME); From The from statement defines the data source of this OAL expression.\nPrimary SCOPEs are Service, ServiceInstance, Endpoint, ServiceRelation, ServiceInstanceRelation, and EndpointRelation. There are also some secondary scopes which belong to a primary scope.\nSee Scope Definitions, where you can find all existing Scopes and Fields.\nFilter Use filter to build conditions for the value of fields by using field name and expression.\nThe expressions support linking by and, or and (...). The OPs support ==, !=, \u0026gt;, \u0026lt;, \u0026gt;=, \u0026lt;=, in [...] ,like %..., like ...% , like %...% , contain and not contain, with type detection based on field type. In the event of incompatibility, compile or code generation errors may be triggered.\nAggregation Function The default functions are provided by the SkyWalking OAP core, and it is possible to implement additional functions.\nFunctions provided\n longAvg. The avg of all input per scope entity. The input field must be a long.   instance_jvm_memory_max = from(ServiceInstanceJVMMemory.max).longAvg();\n In this case, the input represents the request of each ServiceInstanceJVMMemory scope, and avg is based on field max.\n doubleAvg. The avg of all input per scope entity. The input field must be a double.   instance_jvm_cpu = from(ServiceInstanceJVMCPU.usePercent).doubleAvg();\n In this case, the input represents the request of each ServiceInstanceJVMCPU scope, and avg is based on field usePercent.\n percent. The number or ratio is expressed as a fraction of 100, where the input matches with the condition.   endpoint_percent = from(Endpoint.*).percent(status == true);\n In this case, all input represents requests of each endpoint, and the condition is endpoint.status == true.\n rate. The rate expressed is as a fraction of 100, where the input matches with the condition.   browser_app_error_rate = from(BrowserAppTraffic.*).rate(trafficCategory == BrowserAppTrafficCategory.FIRST_ERROR, trafficCategory == BrowserAppTrafficCategory.NORMAL);\n In this case, all input represents requests of each browser app traffic, the numerator condition is trafficCategory == BrowserAppTrafficCategory.FIRST_ERROR and denominator condition is trafficCategory == BrowserAppTrafficCategory.NORMAL. Parameter (1) is the numerator condition. Parameter (2) is the denominator condition.\n count. The sum of calls per scope entity.   service_calls_sum = from(Service.*).count();\n In this case, the number of calls of each service.\n histogram. See Heatmap in WIKI.   service_heatmap = from(Service.latency).histogram(100, 20);\n In this case, the thermodynamic heatmap of all incoming requests. Parameter (1) is the precision of latency calculation, such as in the above case, where 113ms and 193ms are considered the same in the 101-200ms group. Parameter (2) is the group amount. In the above case, 21(param value + 1) groups are 0-100ms, 101-200ms, \u0026hellip; 1901-2000ms, 2000+ms\n apdex. See Apdex in WIKI.   service_apdex = from(Service.latency).apdex(name, status);\n In this case, the apdex score of each service. Parameter (1) is the service name, which reflects the Apdex threshold value loaded from service-apdex-threshold.yml in the config folder. Parameter (2) is the status of this request. The status(success/failure) reflects the Apdex calculation.\n p99, p95, p90, p75, p50. See percentile in WIKI.   service_percentile = from(Service.latency).percentile(10);\n percentile is the first multiple-value metric, which has been introduced since 7.0.0. As a metric with multiple values, it could be queried through the getMultipleLinearIntValues GraphQL query. In this case, see p99, p95, p90, p75, and p50 of all incoming requests. The parameter is precise to a latency at p99, such as in the above case, and 120ms and 124ms are considered to produce the same response time.\nIn this case, the p99 value of all incoming requests. The parameter is precise to a latency at p99, such as in the above case, and 120ms and 124ms are considered to produce the same response time.\nMetrics name The metrics name for storage implementor, alarm and query modules. The type inference is supported by core.\nGroup All metrics data will be grouped by Scope.ID and min-level TimeBucket.\n In the Endpoint scope, the Scope.ID is same as the Endpoint ID (i.e. the unique ID based on service and its endpoint).  Cast Fields of source are static type. In some cases, the type required by the filter expression and aggregation function doesn\u0026rsquo;t match the type in the source, such as tag value in the source is String type, most aggregation calculation requires numeric.\nCast expression is provided to do so.\n (str-\u0026gt;long) or (long), cast string type into long. (str-\u0026gt;int) or (int), cast string type into int.  mq_consume_latency = from((str-\u0026gt;long)Service.tag[\u0026quot;transmission.latency\u0026quot;]).longAvg(); // the value of tag is string type. Cast statement is supported in\n From statement. from((cast)source.attre). Filter expression. .filter((cast)tag[\u0026quot;transmission.latency\u0026quot;] \u0026gt; 0) Aggregation function parameter. .longAvg((cast)strField1== 1, (cast)strField2)  Disable Disable is an advanced statement in OAL, which is only used in certain cases. Some of the aggregation and metrics are defined through core hard codes. Examples include segment and top_n_database_statement. This disable statement is designed to render them inactive. By default, none of them are disabled.\nNOTICE, all disable statements should be in oal/disable.oal script file.\nExamples // Calculate p99 of both Endpoint1 and Endpoint2 endpoint_p99 = from(Endpoint.latency).filter(name in (\u0026quot;Endpoint1\u0026quot;, \u0026quot;Endpoint2\u0026quot;)).summary(0.99) // Calculate p99 of Endpoint name started with `serv` serv_Endpoint_p99 = from(Endpoint.latency).filter(name like \u0026quot;serv%\u0026quot;).summary(0.99) // Calculate the avg response time of each Endpoint endpoint_resp_time = from(Endpoint.latency).avg() // Calculate the p50, p75, p90, p95 and p99 of each Endpoint by 50 ms steps. endpoint_percentile = from(Endpoint.latency).percentile(10) // Calculate the percent of response status is true, for each service. endpoint_success = from(Endpoint.*).filter(status == true).percent() // Calculate the sum of response code in [404, 500, 503], for each service. endpoint_abnormal = from(Endpoint.*).filter(httpResponseStatusCode in [404, 500, 503]).count() // Calculate the sum of request type in [RequestType.RPC, RequestType.gRPC], for each service. endpoint_rpc_calls_sum = from(Endpoint.*).filter(type in [RequestType.RPC, RequestType.gRPC]).count() // Calculate the sum of endpoint name in [\u0026quot;/v1\u0026quot;, \u0026quot;/v2\u0026quot;], for each service. endpoint_url_sum = from(Endpoint.*).filter(name in [\u0026quot;/v1\u0026quot;, \u0026quot;/v2\u0026quot;]).count() // Calculate the sum of calls for each service. endpoint_calls = from(Endpoint.*).count() // Calculate the CPM with the GET method for each service.The value is made up with `tagKey:tagValue`. // Option 1, use `tags contain`. service_cpm_http_get = from(Service.*).filter(tags contain \u0026quot;http.method:GET\u0026quot;).cpm() // Option 2, use `tag[key]`. service_cpm_http_get = from(Service.*).filter(tag[\u0026quot;http.method\u0026quot;] == \u0026quot;GET\u0026quot;).cpm(); // Calculate the CPM with the HTTP method except for the GET method for each service.The value is made up with `tagKey:tagValue`. service_cpm_http_other = from(Service.*).filter(tags not contain \u0026quot;http.method:GET\u0026quot;).cpm() disable(segment); disable(endpoint_relation_server_side); disable(top_n_database_statement); ","excerpt":"Observability Analysis Language OAL(Observability Analysis Language) serves to analyze incoming data …","ref":"/docs/main/v9.5.0/en/concepts-and-designs/oal/","title":"Observability Analysis Language"},{"body":"Observability Analysis Language OAL(Observability Analysis Language) serves to analyze incoming data in streaming mode.\nOAL focuses on metrics in Service, Service Instance and Endpoint. Therefore, the language is easy to learn and use.\nSince 6.3, the OAL engine is embedded in OAP server runtime as oal-rt(OAL Runtime). OAL scripts are now found in the /config folder, and users could simply change and reboot the server to run them. However, the OAL script is a compiled language, and the OAL Runtime generates java codes dynamically.\nYou can open set SW_OAL_ENGINE_DEBUG=Y at system env to see which classes are generated.\nGrammar Scripts should be named *.oal\n// Declare the metrics. METRICS_NAME = from(CAST SCOPE.(* | [FIELD][,FIELD ...])) [.filter(CAST FIELD OP [INT | STRING])] .FUNCTION([PARAM][, PARAM ...]) // Disable hard code disable(METRICS_NAME); From The from statement defines the data source of this OAL expression.\nPrimary SCOPEs are Service, ServiceInstance, Endpoint, ServiceRelation, ServiceInstanceRelation, and EndpointRelation. There are also some secondary scopes which belong to a primary scope.\nSee Scope Definitions, where you can find all existing Scopes and Fields.\nFilter Use filter to build conditions for the value of fields by using field name and expression.\nThe filter expressions run as a chain, generally connected with logic AND. The OPs support ==, !=, \u0026gt;, \u0026lt;, \u0026gt;=, \u0026lt;=, in [...] ,like %..., like ...% , like %...% , contain and not contain, with type detection based on field type. In the event of incompatibility, compile or code generation errors may be triggered.\nAggregation Function The default functions are provided by the SkyWalking OAP core, and it is possible to implement additional functions.\nFunctions provided\n longAvg. The avg of all input per scope entity. The input field must be a long.   instance_jvm_memory_max = from(ServiceInstanceJVMMemory.max).longAvg();\n In this case, the input represents the request of each ServiceInstanceJVMMemory scope, and avg is based on field max.\n doubleAvg. The avg of all input per scope entity. The input field must be a double.   instance_jvm_cpu = from(ServiceInstanceJVMCPU.usePercent).doubleAvg();\n In this case, the input represents the request of each ServiceInstanceJVMCPU scope, and avg is based on field usePercent.\n percent. The number or ratio is expressed as a fraction of 100, where the input matches with the condition.   endpoint_percent = from(Endpoint.*).percent(status == true);\n In this case, all input represents requests of each endpoint, and the condition is endpoint.status == true.\n rate. The rate expressed is as a fraction of 100, where the input matches with the condition.   browser_app_error_rate = from(BrowserAppTraffic.*).rate(trafficCategory == BrowserAppTrafficCategory.FIRST_ERROR, trafficCategory == BrowserAppTrafficCategory.NORMAL);\n In this case, all input represents requests of each browser app traffic, the numerator condition is trafficCategory == BrowserAppTrafficCategory.FIRST_ERROR and denominator condition is trafficCategory == BrowserAppTrafficCategory.NORMAL. Parameter (1) is the numerator condition. Parameter (2) is the denominator condition.\n count. The sum of calls per scope entity.   service_calls_sum = from(Service.*).count();\n In this case, the number of calls of each service.\n histogram. See Heatmap in WIKI.   service_heatmap = from(Service.latency).histogram(100, 20);\n In this case, the thermodynamic heatmap of all incoming requests. Parameter (1) is the precision of latency calculation, such as in the above case, where 113ms and 193ms are considered the same in the 101-200ms group. Parameter (2) is the group amount. In the above case, 21(param value + 1) groups are 0-100ms, 101-200ms, \u0026hellip; 1901-2000ms, 2000+ms\n apdex. See Apdex in WIKI.   service_apdex = from(Service.latency).apdex(name, status);\n In this case, the apdex score of each service. Parameter (1) is the service name, which reflects the Apdex threshold value loaded from service-apdex-threshold.yml in the config folder. Parameter (2) is the status of this request. The status(success/failure) reflects the Apdex calculation.\n p99, p95, p90, p75, p50. See percentile in WIKI.   service_percentile = from(Service.latency).percentile(10);\n percentile is the first multiple-value metric, which has been introduced since 7.0.0. As a metric with multiple values, it could be queried through the getMultipleLinearIntValues GraphQL query. In this case, see p99, p95, p90, p75, and p50 of all incoming requests. The parameter is precise to a latency at p99, such as in the above case, and 120ms and 124ms are considered to produce the same response time.\nIn this case, the p99 value of all incoming requests. The parameter is precise to a latency at p99, such as in the above case, and 120ms and 124ms are considered to produce the same response time.\nMetrics name The metrics name for storage implementor, alarm and query modules. The type inference is supported by core.\nGroup All metrics data will be grouped by Scope.ID and min-level TimeBucket.\n In the Endpoint scope, the Scope.ID is same as the Endpoint ID (i.e. the unique ID based on service and its endpoint).  Cast Fields of source are static type. In some cases, the type required by the filter expression and aggregation function doesn\u0026rsquo;t match the type in the source, such as tag value in the source is String type, most aggregation calculation requires numeric.\nCast expression is provided to do so.\n (str-\u0026gt;long) or (long), cast string type into long. (str-\u0026gt;int) or (int), cast string type into int.  mq_consume_latency = from((str-\u0026gt;long)Service.tag[\u0026quot;transmission.latency\u0026quot;]).longAvg(); // the value of tag is string type. Cast statement is supported in\n From statement. from((cast)source.attre). Filter expression. .filter((cast)tag[\u0026quot;transmission.latency\u0026quot;] \u0026gt; 0) Aggregation function parameter. .longAvg((cast)strField1== 1, (cast)strField2)  Disable Disable is an advanced statement in OAL, which is only used in certain cases. Some of the aggregation and metrics are defined through core hard codes. Examples include segment and top_n_database_statement. This disable statement is designed to render them inactive. By default, none of them are disabled.\nNOTICE, all disable statements should be in oal/disable.oal script file.\nExamples // Calculate p99 of both Endpoint1 and Endpoint2 endpoint_p99 = from(Endpoint.latency).filter(name in (\u0026quot;Endpoint1\u0026quot;, \u0026quot;Endpoint2\u0026quot;)).summary(0.99) // Calculate p99 of Endpoint name started with `serv` serv_Endpoint_p99 = from(Endpoint.latency).filter(name like \u0026quot;serv%\u0026quot;).summary(0.99) // Calculate the avg response time of each Endpoint endpoint_resp_time = from(Endpoint.latency).avg() // Calculate the p50, p75, p90, p95 and p99 of each Endpoint by 50 ms steps. endpoint_percentile = from(Endpoint.latency).percentile(10) // Calculate the percent of response status is true, for each service. endpoint_success = from(Endpoint.*).filter(status == true).percent() // Calculate the sum of response code in [404, 500, 503], for each service. endpoint_abnormal = from(Endpoint.*).filter(httpResponseStatusCode in [404, 500, 503]).count() // Calculate the sum of request type in [RequestType.RPC, RequestType.gRPC], for each service. endpoint_rpc_calls_sum = from(Endpoint.*).filter(type in [RequestType.RPC, RequestType.gRPC]).count() // Calculate the sum of endpoint name in [\u0026quot;/v1\u0026quot;, \u0026quot;/v2\u0026quot;], for each service. endpoint_url_sum = from(Endpoint.*).filter(name in [\u0026quot;/v1\u0026quot;, \u0026quot;/v2\u0026quot;]).count() // Calculate the sum of calls for each service. endpoint_calls = from(Endpoint.*).count() // Calculate the CPM with the GET method for each service.The value is made up with `tagKey:tagValue`. // Option 1, use `tags contain`. service_cpm_http_get = from(Service.*).filter(tags contain \u0026quot;http.method:GET\u0026quot;).cpm() // Option 2, use `tag[key]`. service_cpm_http_get = from(Service.*).filter(tag[\u0026quot;http.method\u0026quot;] == \u0026quot;GET\u0026quot;).cpm(); // Calculate the CPM with the HTTP method except for the GET method for each service.The value is made up with `tagKey:tagValue`. service_cpm_http_other = from(Service.*).filter(tags not contain \u0026quot;http.method:GET\u0026quot;).cpm() disable(segment); disable(endpoint_relation_server_side); disable(top_n_database_statement); ","excerpt":"Observability Analysis Language OAL(Observability Analysis Language) serves to analyze incoming data …","ref":"/docs/main/v9.6.0/en/concepts-and-designs/oal/","title":"Observability Analysis Language"},{"body":"Observability Analysis Platform SkyWalking is an Observability Analysis Platform that provides full observability to services running in both brown and green zones, as well as services using a hybrid model.\nCapabilities SkyWalking covers all 3 areas of observability, including, Tracing, Metrics and Logging.\n Tracing. SkyWalking native data formats, and Zipkin traces of v1 and v2 formats are supported. Metrics. SkyWalking supports mature metrics formats, including native meter format, OTEL metrics format, and Telegraf format. SkyWalking integrates with Service Mesh platforms, typically Istio and Envoy, to build observability into the data plane or control plane. Also, SkyWalking native agents can run in the metrics mode, which greatly improves performances. Logging. Includes logs collected from disk or through network. Native agents could bind the tracing context with logs automatically, or use SkyWalking to bind the trace and log through the text content.  There are 3 powerful and native language engines designed to analyze observability data from the above areas.\n Observability Analysis Language processes native traces and service mesh data. Meter Analysis Language is responsible for metrics calculation for native meter data, and adopts a stable and widely used metrics system, such as Prometheus and OpenTelemetry. Log Analysis Language focuses on log contents and collaborate with Meter Analysis Language.  ","excerpt":"Observability Analysis Platform SkyWalking is an Observability Analysis Platform that provides full …","ref":"/docs/main/latest/en/concepts-and-designs/backend-overview/","title":"Observability Analysis Platform"},{"body":"Observability Analysis Platform SkyWalking OAP and UI provides dozens of features to support observability analysis for your services, cloud infrastructure, open-source components, and more.\nBesides those out-of-box features for monitoring, users could leverage the powerful and flexible analysis language to build their own analysis and visualization.\nThere are 3 powerful and native language engines designed to analyze observability data from the above areas.\n Observability Analysis Language processes native traces and service mesh data to build metrics of entity and topology map. Meter Analysis Language is responsible for metrics calculation for native meter data, and adopts a stable and widely used metrics system, such as Prometheus and OpenTelemetry. Log Analysis Language focuses on analyzing log contents to format and label them, and extract metrics from them to feed Meter Analysis Language for further analysis.  SkyWalking community is willing to accept your monitoring extension powered by these languages, if the monitoring targets are public and general usable.\n","excerpt":"Observability Analysis Platform SkyWalking OAP and UI provides dozens of features to support …","ref":"/docs/main/next/en/concepts-and-designs/backend-overview/","title":"Observability Analysis Platform"},{"body":"Observability Analysis Platform SkyWalking is an Observability Analysis Platform that provides full observability to services running in both brown and green zones, as well as services using a hybrid model.\nCapabilities SkyWalking covers all 3 areas of observability, including, Tracing, Metrics and Logging.\n Tracing. SkyWalking native data formats, including Zipkin v1 and v2, as well as Jaeger. Metrics. SkyWalking integrates with Service Mesh platforms, such as Istio, Envoy, and Linkerd, to build observability into the data plane or control plane. Also, SkyWalking native agents can run in the metrics mode, which greatly improves performances. Logging. Includes logs collected from disk or through network. Native agents could bind the tracing context with logs automatically, or use SkyWalking to bind the trace and log through the text content.  There are 3 powerful and native language engines designed to analyze observability data from the above areas.\n Observability Analysis Language processes native traces and service mesh data. Meter Analysis Language is responsible for metrics calculation for native meter data, and adopts a stable and widely used metrics system, such as Prometheus and OpenTelemetry. Log Analysis Language focuses on log contents and collaborate with Meter Analysis Language.  ","excerpt":"Observability Analysis Platform SkyWalking is an Observability Analysis Platform that provides full …","ref":"/docs/main/v9.0.0/en/concepts-and-designs/backend-overview/","title":"Observability Analysis Platform"},{"body":"Observability Analysis Platform SkyWalking is an Observability Analysis Platform that provides full observability to services running in both brown and green zones, as well as services using a hybrid model.\nCapabilities SkyWalking covers all 3 areas of observability, including, Tracing, Metrics and Logging.\n Tracing. SkyWalking native data formats, including Zipkin v1 and v2, as well as Jaeger. Metrics. SkyWalking integrates with Service Mesh platforms, such as Istio, Envoy, and Linkerd, to build observability into the data plane or control plane. Also, SkyWalking native agents can run in the metrics mode, which greatly improves performances. Logging. Includes logs collected from disk or through network. Native agents could bind the tracing context with logs automatically, or use SkyWalking to bind the trace and log through the text content.  There are 3 powerful and native language engines designed to analyze observability data from the above areas.\n Observability Analysis Language processes native traces and service mesh data. Meter Analysis Language is responsible for metrics calculation for native meter data, and adopts a stable and widely used metrics system, such as Prometheus and OpenTelemetry. Log Analysis Language focuses on log contents and collaborate with Meter Analysis Language.  ","excerpt":"Observability Analysis Platform SkyWalking is an Observability Analysis Platform that provides full …","ref":"/docs/main/v9.1.0/en/concepts-and-designs/backend-overview/","title":"Observability Analysis Platform"},{"body":"Observability Analysis Platform SkyWalking is an Observability Analysis Platform that provides full observability to services running in both brown and green zones, as well as services using a hybrid model.\nCapabilities SkyWalking covers all 3 areas of observability, including, Tracing, Metrics and Logging.\n Tracing. SkyWalking native data formats, including Zipkin v1 and v2, as well as Jaeger. Metrics. SkyWalking integrates with Service Mesh platforms, such as Istio, Envoy, and Linkerd, to build observability into the data plane or control plane. Also, SkyWalking native agents can run in the metrics mode, which greatly improves performances. Logging. Includes logs collected from disk or through network. Native agents could bind the tracing context with logs automatically, or use SkyWalking to bind the trace and log through the text content.  There are 3 powerful and native language engines designed to analyze observability data from the above areas.\n Observability Analysis Language processes native traces and service mesh data. Meter Analysis Language is responsible for metrics calculation for native meter data, and adopts a stable and widely used metrics system, such as Prometheus and OpenTelemetry. Log Analysis Language focuses on log contents and collaborate with Meter Analysis Language.  ","excerpt":"Observability Analysis Platform SkyWalking is an Observability Analysis Platform that provides full …","ref":"/docs/main/v9.2.0/en/concepts-and-designs/backend-overview/","title":"Observability Analysis Platform"},{"body":"Observability Analysis Platform SkyWalking is an Observability Analysis Platform that provides full observability to services running in both brown and green zones, as well as services using a hybrid model.\nCapabilities SkyWalking covers all 3 areas of observability, including, Tracing, Metrics and Logging.\n Tracing. SkyWalking native data formats, including Zipkin v1 and v2, as well as Jaeger. Metrics. SkyWalking integrates with Service Mesh platforms, such as Istio, Envoy, and Linkerd, to build observability into the data plane or control plane. Also, SkyWalking native agents can run in the metrics mode, which greatly improves performances. Logging. Includes logs collected from disk or through network. Native agents could bind the tracing context with logs automatically, or use SkyWalking to bind the trace and log through the text content.  There are 3 powerful and native language engines designed to analyze observability data from the above areas.\n Observability Analysis Language processes native traces and service mesh data. Meter Analysis Language is responsible for metrics calculation for native meter data, and adopts a stable and widely used metrics system, such as Prometheus and OpenTelemetry. Log Analysis Language focuses on log contents and collaborate with Meter Analysis Language.  ","excerpt":"Observability Analysis Platform SkyWalking is an Observability Analysis Platform that provides full …","ref":"/docs/main/v9.3.0/en/concepts-and-designs/backend-overview/","title":"Observability Analysis Platform"},{"body":"Observability Analysis Platform SkyWalking is an Observability Analysis Platform that provides full observability to services running in both brown and green zones, as well as services using a hybrid model.\nCapabilities SkyWalking covers all 3 areas of observability, including, Tracing, Metrics and Logging.\n Tracing. SkyWalking native data formats, including Zipkin v1 and v2, as well as Jaeger. Metrics. SkyWalking integrates with Service Mesh platforms, such as Istio, Envoy, and Linkerd, to build observability into the data plane or control plane. Also, SkyWalking native agents can run in the metrics mode, which greatly improves performances. Logging. Includes logs collected from disk or through network. Native agents could bind the tracing context with logs automatically, or use SkyWalking to bind the trace and log through the text content.  There are 3 powerful and native language engines designed to analyze observability data from the above areas.\n Observability Analysis Language processes native traces and service mesh data. Meter Analysis Language is responsible for metrics calculation for native meter data, and adopts a stable and widely used metrics system, such as Prometheus and OpenTelemetry. Log Analysis Language focuses on log contents and collaborate with Meter Analysis Language.  ","excerpt":"Observability Analysis Platform SkyWalking is an Observability Analysis Platform that provides full …","ref":"/docs/main/v9.4.0/en/concepts-and-designs/backend-overview/","title":"Observability Analysis Platform"},{"body":"Observability Analysis Platform SkyWalking is an Observability Analysis Platform that provides full observability to services running in both brown and green zones, as well as services using a hybrid model.\nCapabilities SkyWalking covers all 3 areas of observability, including, Tracing, Metrics and Logging.\n Tracing. SkyWalking native data formats, including Zipkin v1 and v2, as well as Jaeger. Metrics. SkyWalking integrates with Service Mesh platforms, such as Istio, Envoy, and Linkerd, to build observability into the data plane or control plane. Also, SkyWalking native agents can run in the metrics mode, which greatly improves performances. Logging. Includes logs collected from disk or through network. Native agents could bind the tracing context with logs automatically, or use SkyWalking to bind the trace and log through the text content.  There are 3 powerful and native language engines designed to analyze observability data from the above areas.\n Observability Analysis Language processes native traces and service mesh data. Meter Analysis Language is responsible for metrics calculation for native meter data, and adopts a stable and widely used metrics system, such as Prometheus and OpenTelemetry. Log Analysis Language focuses on log contents and collaborate with Meter Analysis Language.  ","excerpt":"Observability Analysis Platform SkyWalking is an Observability Analysis Platform that provides full …","ref":"/docs/main/v9.5.0/en/concepts-and-designs/backend-overview/","title":"Observability Analysis Platform"},{"body":"Observability Analysis Platform SkyWalking is an Observability Analysis Platform that provides full observability to services running in both brown and green zones, as well as services using a hybrid model.\nCapabilities SkyWalking covers all 3 areas of observability, including, Tracing, Metrics and Logging.\n Tracing. SkyWalking native data formats, including Zipkin v1 and v2, as well as Jaeger. Metrics. SkyWalking integrates with Service Mesh platforms, such as Istio, Envoy, and Linkerd, to build observability into the data plane or control plane. Also, SkyWalking native agents can run in the metrics mode, which greatly improves performances. Logging. Includes logs collected from disk or through network. Native agents could bind the tracing context with logs automatically, or use SkyWalking to bind the trace and log through the text content.  There are 3 powerful and native language engines designed to analyze observability data from the above areas.\n Observability Analysis Language processes native traces and service mesh data. Meter Analysis Language is responsible for metrics calculation for native meter data, and adopts a stable and widely used metrics system, such as Prometheus and OpenTelemetry. Log Analysis Language focuses on log contents and collaborate with Meter Analysis Language.  ","excerpt":"Observability Analysis Platform SkyWalking is an Observability Analysis Platform that provides full …","ref":"/docs/main/v9.6.0/en/concepts-and-designs/backend-overview/","title":"Observability Analysis Platform"},{"body":"Observability Analysis Platform SkyWalking is an Observability Analysis Platform that provides full observability to services running in both brown and green zones, as well as services using a hybrid model.\nCapabilities SkyWalking covers all 3 areas of observability, including, Tracing, Metrics and Logging.\n Tracing. SkyWalking native data formats, and Zipkin traces of v1 and v2 formats are supported. Metrics. SkyWalking supports mature metrics formats, including native meter format, OTEL metrics format, and Telegraf format. SkyWalking integrates with Service Mesh platforms, typically Istio and Envoy, to build observability into the data plane or control plane. Also, SkyWalking native agents can run in the metrics mode, which greatly improves performances. Logging. Includes logs collected from disk or through network. Native agents could bind the tracing context with logs automatically, or use SkyWalking to bind the trace and log through the text content.  There are 3 powerful and native language engines designed to analyze observability data from the above areas.\n Observability Analysis Language processes native traces and service mesh data. Meter Analysis Language is responsible for metrics calculation for native meter data, and adopts a stable and widely used metrics system, such as Prometheus and OpenTelemetry. Log Analysis Language focuses on log contents and collaborate with Meter Analysis Language.  ","excerpt":"Observability Analysis Platform SkyWalking is an Observability Analysis Platform that provides full …","ref":"/docs/main/v9.7.0/en/concepts-and-designs/backend-overview/","title":"Observability Analysis Platform"},{"body":"Observations  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-micrometer-1.10\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  To use the Micrometer Observation Registry with Skywalking, you need to add handlers to the registry. Skywalking comes with dedicated SkywalkingMeterHandler (for metrics) and SkywalkingSenderTracingHandler, SkywalkingReceiverTracingHandler SkywalkingDefaultTracingHandler (for traces).  // Here we create the Observation Registry with attached handlers ObservationRegistry registry = ObservationRegistry.create(); // Here we add a meter handler registry.observationConfig() .observationHandler(new ObservationHandler.FirstMatchingCompositeObservationHandler( new SkywalkingMeterHandler(new SkywalkingMeterRegistry()) ); // Here we add tracing handlers registry.observationConfig() .observationHandler(new ObservationHandler.FirstMatchingCompositeObservationHandler( new SkywalkingSenderTracingHandler(), new SkywalkingReceiverTracingHandler(), new SkywalkingDefaultTracingHandler() )); With such setup metrics and traces will be created for any Micrometer Observation based instrumentations.\n","excerpt":"Observations  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; …","ref":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/application-toolkit-micrometer-1.10/","title":"Observations"},{"body":"Observations  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-micrometer-1.10\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  To use the Micrometer Observation Registry with Skywalking, you need to add handlers to the registry. Skywalking comes with dedicated SkywalkingMeterHandler (for metrics) and SkywalkingSenderTracingHandler, SkywalkingReceiverTracingHandler SkywalkingDefaultTracingHandler (for traces).  // Here we create the Observation Registry with attached handlers ObservationRegistry registry = ObservationRegistry.create(); // Here we add a meter handler registry.observationConfig() .observationHandler(new ObservationHandler.FirstMatchingCompositeObservationHandler( new SkywalkingMeterHandler(new SkywalkingMeterRegistry()) ); // Here we add tracing handlers registry.observationConfig() .observationHandler(new ObservationHandler.FirstMatchingCompositeObservationHandler( new SkywalkingSenderTracingHandler(), new SkywalkingReceiverTracingHandler(), new SkywalkingDefaultTracingHandler() )); With such setup metrics and traces will be created for any Micrometer Observation based instrumentations.\n","excerpt":"Observations  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; …","ref":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-micrometer-1.10/","title":"Observations"},{"body":"Observations  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-micrometer-1.10\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  To use the Micrometer Observation Registry with Skywalking, you need to add handlers to the registry. Skywalking comes with dedicated SkywalkingMeterHandler (for metrics) and SkywalkingSenderTracingHandler, SkywalkingReceiverTracingHandler SkywalkingDefaultTracingHandler (for traces).  // Here we create the Observation Registry with attached handlers ObservationRegistry registry = ObservationRegistry.create(); // Here we add a meter handler registry.observationConfig() .observationHandler(new ObservationHandler.FirstMatchingCompositeObservationHandler( new SkywalkingMeterHandler(new SkywalkingMeterRegistry()) ); // Here we add tracing handlers registry.observationConfig() .observationHandler(new ObservationHandler.FirstMatchingCompositeObservationHandler( new SkywalkingSenderTracingHandler(), new SkywalkingReceiverTracingHandler(), new SkywalkingDefaultTracingHandler() )); With such setup metrics and traces will be created for any Micrometer Observation based instrumentations.\n","excerpt":"Observations  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; …","ref":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/application-toolkit-micrometer-1.10/","title":"Observations"},{"body":"Observations  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-micrometer-1.10\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  To use the Micrometer Observation Registry with Skywalking, you need to add handlers to the registry. Skywalking comes with dedicated SkywalkingMeterHandler (for metrics) and SkywalkingSenderTracingHandler, SkywalkingReceiverTracingHandler SkywalkingDefaultTracingHandler (for traces).  // Here we create the Observation Registry with attached handlers ObservationRegistry registry = ObservationRegistry.create(); // Here we add a meter handler registry.observationConfig() .observationHandler(new ObservationHandler.FirstMatchingCompositeObservationHandler( new SkywalkingMeterHandler(new SkywalkingMeterRegistry()) ); // Here we add tracing handlers registry.observationConfig() .observationHandler(new ObservationHandler.FirstMatchingCompositeObservationHandler( new SkywalkingSenderTracingHandler(), new SkywalkingReceiverTracingHandler(), new SkywalkingDefaultTracingHandler() )); With such setup metrics and traces will be created for any Micrometer Observation based instrumentations.\n","excerpt":"Observations  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; …","ref":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/application-toolkit-micrometer-1.10/","title":"Observations"},{"body":"Observations  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-micrometer-1.10\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  To use the Micrometer Observation Registry with Skywalking, you need to add handlers to the registry. Skywalking comes with dedicated SkywalkingMeterHandler (for metrics) and SkywalkingSenderTracingHandler, SkywalkingReceiverTracingHandler SkywalkingDefaultTracingHandler (for traces).  // Here we create the Observation Registry with attached handlers ObservationRegistry registry = ObservationRegistry.create(); // Here we add a meter handler registry.observationConfig() .observationHandler(new ObservationHandler.FirstMatchingCompositeObservationHandler( new SkywalkingMeterHandler(new SkywalkingMeterRegistry()) ); // Here we add tracing handlers registry.observationConfig() .observationHandler(new ObservationHandler.FirstMatchingCompositeObservationHandler( new SkywalkingSenderTracingHandler(), new SkywalkingReceiverTracingHandler(), new SkywalkingDefaultTracingHandler() )); With such setup metrics and traces will be created for any Micrometer Observation based instrumentations.\n","excerpt":"Observations  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; …","ref":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/application-toolkit-micrometer-1.10/","title":"Observations"},{"body":"Observe Service Mesh through ALS Envoy Access Log Service (ALS) provides full logs on routed RPC, including HTTP and TCP.\nBackground The solution was initialized and first implemented by Sheng Wu, Hongtao Gao, Lizan Zhou, and Dhi Aurrahman on May 17, 2019, and was presented at KubeCon China 2019. Here is a video recording of the presentation.\nSkyWalking is the first open-source project that introduced an ALS-based solution to the world. This solution provides a new take on observability with a lightweight payload on the service mesh.\nEnable ALS and SkyWalking Receiver You need the following steps to set up ALS.\n  Enable envoyAccessLogService in ProxyConfig and set the ALS address to where the SkyWalking OAP listens. In Istio version 1.6.0+, if Istio is installed with demo profile, you can enable ALS with this command:\nistioctl manifest apply \\  --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=\u0026lt;skywalking-oap.skywalking.svc:11800\u0026gt; Note: Replace \u0026lt;skywalking-oap.skywalking.svc:11800\u0026gt; with the real address where SkyWalking OAP is deployed.\n  Activate SkyWalking Envoy Receiver. (activated in default)\n  envoy-metric:selector:${SW_ENVOY_METRIC:default}  Choose an ALS analyzer. There are two available analyzers for both HTTP access logs and TCP access logs: k8s-mesh and mx-mesh. Set the system environment variables SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS and SW_ENVOY_METRIC_ALS_TCP_ANALYSIS, such as SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=mx-mesh and SW_ENVOY_METRIC_ALS_TCP_ANALYSIS=mx-mesh, or in application.yaml to activate the analyzers. For more about the analyzers, see SkyWalking ALS Analyzers.\nenvoy-metric:selector:${SW_ENVOY_METRIC:default}default:acceptMetricsService:${SW_ENVOY_METRIC_SERVICE:true}alsHTTPAnalysis:${SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS:\u0026#34;\u0026#34;}# Setting the system env variable would override this.alsTCPAnalysis:${SW_ENVOY_METRIC_ALS_TCP_ANALYSIS:\u0026#34;\u0026#34;}To use multiple analyzers as a fallback, please use , to concatenate.\n  Example Here\u0026rsquo;s an example of installing Istio and deploying SkyWalking by Helm chart.\nistioctl install \\  --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-oap.istio-system:11800 git clone https://github.com/apache/skywalking-helm.git cd skywalking-helm/chart helm repo add elastic https://helm.elastic.co helm dep up skywalking helm install 8.1.0 skywalking -n istio-system \\  --set oap.env.SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=mx-mesh \\  --set oap.env.SW_ENVOY_METRIC_ALS_TCP_ANALYSIS=mx-mesh \\  --set fullnameOverride=skywalking \\  --set oap.envoy.als.enabled=true You can use kubectl -n istio-system logs -l app=skywalking | grep \u0026quot;K8sALSServiceMeshHTTPAnalysis\u0026quot; to ensure that OAP ALS mx-mesh analyzer has been activated.\nSkyWalking ALS Analyzers There are several available analyzers: k8s-mesh, mx-mesh, and persistence. You can specify one or more analyzers to analyze the access logs. When multiple analyzers are specified, it acts as a fast-success mechanism: SkyWalking loops over the analyzers and use them to analyze the logs. Once there is an analyzer that is able to produce a result, it stops the loop.\nk8s-mesh k8s-mesh uses the metadata from Kubernetes clusters, hence in this analyzer, OAP needs access roles to Pod, Service, and Endpoints.\nThe blog illustrates the details of how it works and a step-by-step tutorial to apply it to the bookinfo application.\nmx-mesh mx-mesh uses the Envoy metadata exchange mechanism to get the service name, etc. This analyzer requires Istio to enable the metadata exchange plugin (you can enable it by --set values.telemetry.v2.enabled=true, or if you\u0026rsquo;re using Istio 1.7+ and installing it with profile demo/preview, it should already be enabled).\nThe blog illustrates the details of how it works and a step-by-step tutorial on applying it to the Online Boutique system.\npersistence persistence analyzer adapts the Envoy access log format to SkyWalking\u0026rsquo;s native log format, and forwards the formatted logs to LAL, where you can configure persistent conditions, such as sampler, only persist error logs, etc. SkyWalking provides a default configuration file envoy-als.yaml that you can adjust as per your needs. Please make sure to activate this rule via adding the rule name envoy-als into config item log-analyzer/default/lalFiles (or environment variable SW_LOG_LAL_FILES, e.g. SW_LOG_LAL_FILES=envoy-als).\nAttention: Since the persistence analyzer also needs a mechanism to map the logs into responding services, you need to configure at least one of k8s-mesh or mx-mesh as its antecedent so that persistence analyzer knows which service the logs belong to. For example, you should set envoy-metric/default/alsHTTPAnalysis (or environment variable SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS) to something like k8s-mesh,persistence, mx-mesh,persistence, or mx-mesh,k8s-mesh,persistence.\n","excerpt":"Observe Service Mesh through ALS Envoy Access Log Service (ALS) provides full logs on routed RPC, …","ref":"/docs/main/latest/en/setup/envoy/als_setting/","title":"Observe Service Mesh through ALS"},{"body":"Observe Service Mesh through ALS Envoy Access Log Service (ALS) provides full logs on routed RPC, including HTTP and TCP.\nBackground The solution was initialized and first implemented by Sheng Wu, Hongtao Gao, Lizan Zhou, and Dhi Aurrahman on May 17, 2019, and was presented at KubeCon China 2019. Here is a video recording of the presentation.\nSkyWalking is the first open-source project that introduced an ALS-based solution to the world. This solution provides a new take on observability with a lightweight payload on the service mesh.\nEnable ALS and SkyWalking Receiver You need the following steps to set up ALS.\n  Enable envoyAccessLogService in ProxyConfig and set the ALS address to where the SkyWalking OAP listens. In Istio version 1.6.0+, if Istio is installed with demo profile, you can enable ALS with this command:\nistioctl manifest apply \\  --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=\u0026lt;skywalking-oap.skywalking.svc:11800\u0026gt; Note: Replace \u0026lt;skywalking-oap.skywalking.svc:11800\u0026gt; with the real address where SkyWalking OAP is deployed.\n  Activate SkyWalking Envoy Receiver. (activated in default)\n  envoy-metric:selector:${SW_ENVOY_METRIC:default}  Choose an ALS analyzer. There are two available analyzers for both HTTP access logs and TCP access logs: k8s-mesh and mx-mesh. Set the system environment variables SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS and SW_ENVOY_METRIC_ALS_TCP_ANALYSIS, such as SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=mx-mesh and SW_ENVOY_METRIC_ALS_TCP_ANALYSIS=mx-mesh, or in application.yaml to activate the analyzers. For more about the analyzers, see SkyWalking ALS Analyzers.\nenvoy-metric:selector:${SW_ENVOY_METRIC:default}default:acceptMetricsService:${SW_ENVOY_METRIC_SERVICE:true}alsHTTPAnalysis:${SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS:\u0026#34;\u0026#34;}# Setting the system env variable would override this.alsTCPAnalysis:${SW_ENVOY_METRIC_ALS_TCP_ANALYSIS:\u0026#34;\u0026#34;}To use multiple analyzers as a fallback, please use , to concatenate.\n  Example Here\u0026rsquo;s an example of installing Istio and deploying SkyWalking by Helm chart.\nistioctl install \\  --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-oap.istio-system:11800 git clone https://github.com/apache/skywalking-helm.git cd skywalking-helm/chart helm repo add elastic https://helm.elastic.co helm dep up skywalking helm install 8.1.0 skywalking -n istio-system \\  --set oap.env.SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=mx-mesh \\  --set oap.env.SW_ENVOY_METRIC_ALS_TCP_ANALYSIS=mx-mesh \\  --set fullnameOverride=skywalking \\  --set oap.envoy.als.enabled=true You can use kubectl -n istio-system logs -l app=skywalking | grep \u0026quot;K8sALSServiceMeshHTTPAnalysis\u0026quot; to ensure that OAP ALS mx-mesh analyzer has been activated.\nSkyWalking ALS Analyzers There are several available analyzers: k8s-mesh, mx-mesh, and persistence. You can specify one or more analyzers to analyze the access logs. When multiple analyzers are specified, it acts as a fast-success mechanism: SkyWalking loops over the analyzers and use them to analyze the logs. Once there is an analyzer that is able to produce a result, it stops the loop.\nk8s-mesh k8s-mesh uses the metadata from Kubernetes clusters, hence in this analyzer, OAP needs access roles to Pod, Service, and Endpoints.\nThe blog illustrates the details of how it works and a step-by-step tutorial to apply it to the bookinfo application.\nmx-mesh mx-mesh uses the Envoy metadata exchange mechanism to get the service name, etc. This analyzer requires Istio to enable the metadata exchange plugin (you can enable it by --set values.telemetry.v2.enabled=true, or if you\u0026rsquo;re using Istio 1.7+ and installing it with profile demo/preview, it should already be enabled).\nThe blog illustrates the details of how it works and a step-by-step tutorial on applying it to the Online Boutique system.\npersistence persistence analyzer adapts the Envoy access log format to SkyWalking\u0026rsquo;s native log format, and forwards the formatted logs to LAL, where you can configure persistent conditions, such as sampler, only persist error logs, etc. SkyWalking provides a default configuration file envoy-als.yaml that you can adjust as per your needs. Please make sure to activate this rule via adding the rule name envoy-als into config item log-analyzer/default/lalFiles (or environment variable SW_LOG_LAL_FILES, e.g. SW_LOG_LAL_FILES=envoy-als).\nAttention: Since the persistence analyzer also needs a mechanism to map the logs into responding services, you need to configure at least one of k8s-mesh or mx-mesh as its antecedent so that persistence analyzer knows which service the logs belong to. For example, you should set envoy-metric/default/alsHTTPAnalysis (or environment variable SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS) to something like k8s-mesh,persistence, mx-mesh,persistence, or mx-mesh,k8s-mesh,persistence.\n","excerpt":"Observe Service Mesh through ALS Envoy Access Log Service (ALS) provides full logs on routed RPC, …","ref":"/docs/main/next/en/setup/envoy/als_setting/","title":"Observe Service Mesh through ALS"},{"body":"Observe Service Mesh through ALS Envoy Access Log Service (ALS) provides full logs on routed RPC, including HTTP and TCP.\nBackground The solution was initialized and first implemented by Sheng Wu, Hongtao Gao, Lizan Zhou, and Dhi Aurrahman on May 17, 2019, and was presented at KubeCon China 2019. Here is a video recording of the presentation.\nSkyWalking is the first open source project that introduced an ALS-based solution to the world. This solution provides a new take on observability with a lightweight payload on the service mesh.\nEnable ALS and SkyWalking Receiver You need the following steps to set up ALS.\n  Enable envoyAccessLogService in ProxyConfig and set the ALS address to where the SkyWalking OAP listens. In Istio version 1.6.0+, if Istio is installed with demo profile, you can enable ALS with this command:\nistioctl manifest apply \\  --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=\u0026lt;skywalking-oap.skywalking.svc:11800\u0026gt; Note: Replace \u0026lt;skywalking-oap.skywalking.svc:11800\u0026gt; with the real address where SkyWalking OAP is deployed.\n  Activate SkyWalking Envoy Receiver. (activated in default)\n  envoy-metric:selector:${SW_ENVOY_METRIC:default}  Choose an ALS analyzer. There are two available analyzers for both HTTP access logs and TCP access logs: k8s-mesh and mx-mesh. Set the system environment variables SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS and SW_ENVOY_METRIC_ALS_TCP_ANALYSIS, such as SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=mx-mesh and SW_ENVOY_METRIC_ALS_TCP_ANALYSIS=mx-mesh, or in application.yaml to activate the analyzers. For more about the analyzers, see SkyWalking ALS Analyzers.\nenvoy-metric:selector:${SW_ENVOY_METRIC:default}default:acceptMetricsService:${SW_ENVOY_METRIC_SERVICE:true}alsHTTPAnalysis:${SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS:\u0026#34;\u0026#34;}# Setting the system env variable would override this. alsTCPAnalysis:${SW_ENVOY_METRIC_ALS_TCP_ANALYSIS:\u0026#34;\u0026#34;}To use multiple analyzers as a fallback, please use , to concatenate.\n  Example Here\u0026rsquo;s an example on installing Istio and deploying SkyWalking by Helm chart.\nistioctl install \\  --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-oap.istio-system:11800 git clone https://github.com/apache/skywalking-kubernetes.git cd skywalking-kubernetes/chart helm repo add elastic https://helm.elastic.co helm dep up skywalking helm install 8.1.0 skywalking -n istio-system \\  --set oap.env.SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=mx-mesh \\  --set oap.env.SW_ENVOY_METRIC_ALS_TCP_ANALYSIS=mx-mesh \\  --set fullnameOverride=skywalking \\  --set oap.envoy.als.enabled=true You can use kubectl -n istio-system logs -l app=skywalking | grep \u0026quot;K8sALSServiceMeshHTTPAnalysis\u0026quot; to ensure that OAP ALS mx-mesh analyzer has been activated.\nSkyWalking ALS Analyzers There are several available analyzers: k8s-mesh, mx-mesh, and persistence. You can specify one or more analyzers to analyze the access logs. When multiple analyzers are specified, it acts as a fast-success mechanism: SkyWalking loops over the analyzers and use them to analyze the logs. Once there is an analyzer that is able to produce a result, it stops the loop.\nk8s-mesh k8s-mesh uses the metadata from Kubernetes cluster, hence in this analyzer OAP needs access roles to Pod, Service, and Endpoints.\nThe blog illustrates the details of how it works, and a step-by-step tutorial to apply it into the bookinfo application.\nmx-mesh mx-mesh uses the Envoy metadata exchange mechanism to get the service name, etc. This analyzer requires Istio to enable the metadata exchange plugin (you can enable it by --set values.telemetry.v2.enabled=true, or if you\u0026rsquo;re using Istio 1.7+ and installing it with profile demo/preview, it should already be enabled).\nThe blog illustrates the details of how it works, and a step-by-step tutorial to apply it into the Online Boutique system.\npersistence persistence analyzer adapts the Envoy access log format to SkyWalking\u0026rsquo;s native log format, and forwards the formatted logs to LAL, where you can configure persistent conditions, such as sampler, only persist error logs, etc. SkyWalking provides a default configuration file envoy-als.yaml that you can adjust as per your needs. Please make sure to activate this rule via adding the rule name envoy-als into config item log-analyzer/default/lalFiles (or environment variable SW_LOG_LAL_FILES, e.g. SW_LOG_LAL_FILES=envoy-als).\nAttention: Since the persistence analyzer also needs a mechanism to map the logs into responding services, you need to configure at least one of k8s-mesh or mx-mesh as its antecedent so that persistence analyzer knows which service the logs belong to. For example, you should set envoy-metric/default/alsHTTPAnalysis (or environment variable SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS) to something like k8s-mesh,persistence, mx-mesh,persistence, or mx-mesh,k8s-mesh,persistence.\n","excerpt":"Observe Service Mesh through ALS Envoy Access Log Service (ALS) provides full logs on routed RPC, …","ref":"/docs/main/v9.0.0/en/setup/envoy/als_setting/","title":"Observe Service Mesh through ALS"},{"body":"Observe Service Mesh through ALS Envoy Access Log Service (ALS) provides full logs on routed RPC, including HTTP and TCP.\nBackground The solution was initialized and first implemented by Sheng Wu, Hongtao Gao, Lizan Zhou, and Dhi Aurrahman on May 17, 2019, and was presented at KubeCon China 2019. Here is a video recording of the presentation.\nSkyWalking is the first open-source project that introduced an ALS-based solution to the world. This solution provides a new take on observability with a lightweight payload on the service mesh.\nEnable ALS and SkyWalking Receiver You need the following steps to set up ALS.\n  Enable envoyAccessLogService in ProxyConfig and set the ALS address to where the SkyWalking OAP listens. In Istio version 1.6.0+, if Istio is installed with demo profile, you can enable ALS with this command:\nistioctl manifest apply \\  --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=\u0026lt;skywalking-oap.skywalking.svc:11800\u0026gt; Note: Replace \u0026lt;skywalking-oap.skywalking.svc:11800\u0026gt; with the real address where SkyWalking OAP is deployed.\n  Activate SkyWalking Envoy Receiver. (activated in default)\n  envoy-metric:selector:${SW_ENVOY_METRIC:default}  Choose an ALS analyzer. There are two available analyzers for both HTTP access logs and TCP access logs: k8s-mesh and mx-mesh. Set the system environment variables SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS and SW_ENVOY_METRIC_ALS_TCP_ANALYSIS, such as SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=mx-mesh and SW_ENVOY_METRIC_ALS_TCP_ANALYSIS=mx-mesh, or in application.yaml to activate the analyzers. For more about the analyzers, see SkyWalking ALS Analyzers.\nenvoy-metric:selector:${SW_ENVOY_METRIC:default}default:acceptMetricsService:${SW_ENVOY_METRIC_SERVICE:true}alsHTTPAnalysis:${SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS:\u0026#34;\u0026#34;}# Setting the system env variable would override this. alsTCPAnalysis:${SW_ENVOY_METRIC_ALS_TCP_ANALYSIS:\u0026#34;\u0026#34;}To use multiple analyzers as a fallback, please use , to concatenate.\n  Example Here\u0026rsquo;s an example of installing Istio and deploying SkyWalking by Helm chart.\nistioctl install \\  --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-oap.istio-system:11800 git clone https://github.com/apache/skywalking-kubernetes.git cd skywalking-kubernetes/chart helm repo add elastic https://helm.elastic.co helm dep up skywalking helm install 8.1.0 skywalking -n istio-system \\  --set oap.env.SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=mx-mesh \\  --set oap.env.SW_ENVOY_METRIC_ALS_TCP_ANALYSIS=mx-mesh \\  --set fullnameOverride=skywalking \\  --set oap.envoy.als.enabled=true You can use kubectl -n istio-system logs -l app=skywalking | grep \u0026quot;K8sALSServiceMeshHTTPAnalysis\u0026quot; to ensure that OAP ALS mx-mesh analyzer has been activated.\nSkyWalking ALS Analyzers There are several available analyzers: k8s-mesh, mx-mesh, and persistence. You can specify one or more analyzers to analyze the access logs. When multiple analyzers are specified, it acts as a fast-success mechanism: SkyWalking loops over the analyzers and use them to analyze the logs. Once there is an analyzer that is able to produce a result, it stops the loop.\nk8s-mesh k8s-mesh uses the metadata from Kubernetes clusters, hence in this analyzer, OAP needs access roles to Pod, Service, and Endpoints.\nThe blog illustrates the details of how it works and a step-by-step tutorial to apply it to the bookinfo application.\nmx-mesh mx-mesh uses the Envoy metadata exchange mechanism to get the service name, etc. This analyzer requires Istio to enable the metadata exchange plugin (you can enable it by --set values.telemetry.v2.enabled=true, or if you\u0026rsquo;re using Istio 1.7+ and installing it with profile demo/preview, it should already be enabled).\nThe blog illustrates the details of how it works and a step-by-step tutorial on applying it to the Online Boutique system.\npersistence persistence analyzer adapts the Envoy access log format to SkyWalking\u0026rsquo;s native log format, and forwards the formatted logs to LAL, where you can configure persistent conditions, such as sampler, only persist error logs, etc. SkyWalking provides a default configuration file envoy-als.yaml that you can adjust as per your needs. Please make sure to activate this rule via adding the rule name envoy-als into config item log-analyzer/default/lalFiles (or environment variable SW_LOG_LAL_FILES, e.g. SW_LOG_LAL_FILES=envoy-als).\nAttention: Since the persistence analyzer also needs a mechanism to map the logs into responding services, you need to configure at least one of k8s-mesh or mx-mesh as its antecedent so that persistence analyzer knows which service the logs belong to. For example, you should set envoy-metric/default/alsHTTPAnalysis (or environment variable SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS) to something like k8s-mesh,persistence, mx-mesh,persistence, or mx-mesh,k8s-mesh,persistence.\n","excerpt":"Observe Service Mesh through ALS Envoy Access Log Service (ALS) provides full logs on routed RPC, …","ref":"/docs/main/v9.1.0/en/setup/envoy/als_setting/","title":"Observe Service Mesh through ALS"},{"body":"Observe Service Mesh through ALS Envoy Access Log Service (ALS) provides full logs on routed RPC, including HTTP and TCP.\nBackground The solution was initialized and first implemented by Sheng Wu, Hongtao Gao, Lizan Zhou, and Dhi Aurrahman on May 17, 2019, and was presented at KubeCon China 2019. Here is a video recording of the presentation.\nSkyWalking is the first open-source project that introduced an ALS-based solution to the world. This solution provides a new take on observability with a lightweight payload on the service mesh.\nEnable ALS and SkyWalking Receiver You need the following steps to set up ALS.\n  Enable envoyAccessLogService in ProxyConfig and set the ALS address to where the SkyWalking OAP listens. In Istio version 1.6.0+, if Istio is installed with demo profile, you can enable ALS with this command:\nistioctl manifest apply \\  --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=\u0026lt;skywalking-oap.skywalking.svc:11800\u0026gt; Note: Replace \u0026lt;skywalking-oap.skywalking.svc:11800\u0026gt; with the real address where SkyWalking OAP is deployed.\n  Activate SkyWalking Envoy Receiver. (activated in default)\n  envoy-metric:selector:${SW_ENVOY_METRIC:default}  Choose an ALS analyzer. There are two available analyzers for both HTTP access logs and TCP access logs: k8s-mesh and mx-mesh. Set the system environment variables SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS and SW_ENVOY_METRIC_ALS_TCP_ANALYSIS, such as SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=mx-mesh and SW_ENVOY_METRIC_ALS_TCP_ANALYSIS=mx-mesh, or in application.yaml to activate the analyzers. For more about the analyzers, see SkyWalking ALS Analyzers.\nenvoy-metric:selector:${SW_ENVOY_METRIC:default}default:acceptMetricsService:${SW_ENVOY_METRIC_SERVICE:true}alsHTTPAnalysis:${SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS:\u0026#34;\u0026#34;}# Setting the system env variable would override this. alsTCPAnalysis:${SW_ENVOY_METRIC_ALS_TCP_ANALYSIS:\u0026#34;\u0026#34;}To use multiple analyzers as a fallback, please use , to concatenate.\n  Example Here\u0026rsquo;s an example of installing Istio and deploying SkyWalking by Helm chart.\nistioctl install \\  --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-oap.istio-system:11800 git clone https://github.com/apache/skywalking-kubernetes.git cd skywalking-kubernetes/chart helm repo add elastic https://helm.elastic.co helm dep up skywalking helm install 8.1.0 skywalking -n istio-system \\  --set oap.env.SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=mx-mesh \\  --set oap.env.SW_ENVOY_METRIC_ALS_TCP_ANALYSIS=mx-mesh \\  --set fullnameOverride=skywalking \\  --set oap.envoy.als.enabled=true You can use kubectl -n istio-system logs -l app=skywalking | grep \u0026quot;K8sALSServiceMeshHTTPAnalysis\u0026quot; to ensure that OAP ALS mx-mesh analyzer has been activated.\nSkyWalking ALS Analyzers There are several available analyzers: k8s-mesh, mx-mesh, and persistence. You can specify one or more analyzers to analyze the access logs. When multiple analyzers are specified, it acts as a fast-success mechanism: SkyWalking loops over the analyzers and use them to analyze the logs. Once there is an analyzer that is able to produce a result, it stops the loop.\nk8s-mesh k8s-mesh uses the metadata from Kubernetes clusters, hence in this analyzer, OAP needs access roles to Pod, Service, and Endpoints.\nThe blog illustrates the details of how it works and a step-by-step tutorial to apply it to the bookinfo application.\nmx-mesh mx-mesh uses the Envoy metadata exchange mechanism to get the service name, etc. This analyzer requires Istio to enable the metadata exchange plugin (you can enable it by --set values.telemetry.v2.enabled=true, or if you\u0026rsquo;re using Istio 1.7+ and installing it with profile demo/preview, it should already be enabled).\nThe blog illustrates the details of how it works and a step-by-step tutorial on applying it to the Online Boutique system.\npersistence persistence analyzer adapts the Envoy access log format to SkyWalking\u0026rsquo;s native log format, and forwards the formatted logs to LAL, where you can configure persistent conditions, such as sampler, only persist error logs, etc. SkyWalking provides a default configuration file envoy-als.yaml that you can adjust as per your needs. Please make sure to activate this rule via adding the rule name envoy-als into config item log-analyzer/default/lalFiles (or environment variable SW_LOG_LAL_FILES, e.g. SW_LOG_LAL_FILES=envoy-als).\nAttention: Since the persistence analyzer also needs a mechanism to map the logs into responding services, you need to configure at least one of k8s-mesh or mx-mesh as its antecedent so that persistence analyzer knows which service the logs belong to. For example, you should set envoy-metric/default/alsHTTPAnalysis (or environment variable SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS) to something like k8s-mesh,persistence, mx-mesh,persistence, or mx-mesh,k8s-mesh,persistence.\n","excerpt":"Observe Service Mesh through ALS Envoy Access Log Service (ALS) provides full logs on routed RPC, …","ref":"/docs/main/v9.2.0/en/setup/envoy/als_setting/","title":"Observe Service Mesh through ALS"},{"body":"Observe Service Mesh through ALS Envoy Access Log Service (ALS) provides full logs on routed RPC, including HTTP and TCP.\nBackground The solution was initialized and first implemented by Sheng Wu, Hongtao Gao, Lizan Zhou, and Dhi Aurrahman on May 17, 2019, and was presented at KubeCon China 2019. Here is a video recording of the presentation.\nSkyWalking is the first open-source project that introduced an ALS-based solution to the world. This solution provides a new take on observability with a lightweight payload on the service mesh.\nEnable ALS and SkyWalking Receiver You need the following steps to set up ALS.\n  Enable envoyAccessLogService in ProxyConfig and set the ALS address to where the SkyWalking OAP listens. In Istio version 1.6.0+, if Istio is installed with demo profile, you can enable ALS with this command:\nistioctl manifest apply \\  --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=\u0026lt;skywalking-oap.skywalking.svc:11800\u0026gt; Note: Replace \u0026lt;skywalking-oap.skywalking.svc:11800\u0026gt; with the real address where SkyWalking OAP is deployed.\n  Activate SkyWalking Envoy Receiver. (activated in default)\n  envoy-metric:selector:${SW_ENVOY_METRIC:default}  Choose an ALS analyzer. There are two available analyzers for both HTTP access logs and TCP access logs: k8s-mesh and mx-mesh. Set the system environment variables SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS and SW_ENVOY_METRIC_ALS_TCP_ANALYSIS, such as SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=mx-mesh and SW_ENVOY_METRIC_ALS_TCP_ANALYSIS=mx-mesh, or in application.yaml to activate the analyzers. For more about the analyzers, see SkyWalking ALS Analyzers.\nenvoy-metric:selector:${SW_ENVOY_METRIC:default}default:acceptMetricsService:${SW_ENVOY_METRIC_SERVICE:true}alsHTTPAnalysis:${SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS:\u0026#34;\u0026#34;}# Setting the system env variable would override this. alsTCPAnalysis:${SW_ENVOY_METRIC_ALS_TCP_ANALYSIS:\u0026#34;\u0026#34;}To use multiple analyzers as a fallback, please use , to concatenate.\n  Example Here\u0026rsquo;s an example of installing Istio and deploying SkyWalking by Helm chart.\nistioctl install \\  --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-oap.istio-system:11800 git clone https://github.com/apache/skywalking-kubernetes.git cd skywalking-kubernetes/chart helm repo add elastic https://helm.elastic.co helm dep up skywalking helm install 8.1.0 skywalking -n istio-system \\  --set oap.env.SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=mx-mesh \\  --set oap.env.SW_ENVOY_METRIC_ALS_TCP_ANALYSIS=mx-mesh \\  --set fullnameOverride=skywalking \\  --set oap.envoy.als.enabled=true You can use kubectl -n istio-system logs -l app=skywalking | grep \u0026quot;K8sALSServiceMeshHTTPAnalysis\u0026quot; to ensure that OAP ALS mx-mesh analyzer has been activated.\nSkyWalking ALS Analyzers There are several available analyzers: k8s-mesh, mx-mesh, and persistence. You can specify one or more analyzers to analyze the access logs. When multiple analyzers are specified, it acts as a fast-success mechanism: SkyWalking loops over the analyzers and use them to analyze the logs. Once there is an analyzer that is able to produce a result, it stops the loop.\nk8s-mesh k8s-mesh uses the metadata from Kubernetes clusters, hence in this analyzer, OAP needs access roles to Pod, Service, and Endpoints.\nThe blog illustrates the details of how it works and a step-by-step tutorial to apply it to the bookinfo application.\nmx-mesh mx-mesh uses the Envoy metadata exchange mechanism to get the service name, etc. This analyzer requires Istio to enable the metadata exchange plugin (you can enable it by --set values.telemetry.v2.enabled=true, or if you\u0026rsquo;re using Istio 1.7+ and installing it with profile demo/preview, it should already be enabled).\nThe blog illustrates the details of how it works and a step-by-step tutorial on applying it to the Online Boutique system.\npersistence persistence analyzer adapts the Envoy access log format to SkyWalking\u0026rsquo;s native log format, and forwards the formatted logs to LAL, where you can configure persistent conditions, such as sampler, only persist error logs, etc. SkyWalking provides a default configuration file envoy-als.yaml that you can adjust as per your needs. Please make sure to activate this rule via adding the rule name envoy-als into config item log-analyzer/default/lalFiles (or environment variable SW_LOG_LAL_FILES, e.g. SW_LOG_LAL_FILES=envoy-als).\nAttention: Since the persistence analyzer also needs a mechanism to map the logs into responding services, you need to configure at least one of k8s-mesh or mx-mesh as its antecedent so that persistence analyzer knows which service the logs belong to. For example, you should set envoy-metric/default/alsHTTPAnalysis (or environment variable SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS) to something like k8s-mesh,persistence, mx-mesh,persistence, or mx-mesh,k8s-mesh,persistence.\n","excerpt":"Observe Service Mesh through ALS Envoy Access Log Service (ALS) provides full logs on routed RPC, …","ref":"/docs/main/v9.3.0/en/setup/envoy/als_setting/","title":"Observe Service Mesh through ALS"},{"body":"Observe Service Mesh through ALS Envoy Access Log Service (ALS) provides full logs on routed RPC, including HTTP and TCP.\nBackground The solution was initialized and first implemented by Sheng Wu, Hongtao Gao, Lizan Zhou, and Dhi Aurrahman on May 17, 2019, and was presented at KubeCon China 2019. Here is a video recording of the presentation.\nSkyWalking is the first open-source project that introduced an ALS-based solution to the world. This solution provides a new take on observability with a lightweight payload on the service mesh.\nEnable ALS and SkyWalking Receiver You need the following steps to set up ALS.\n  Enable envoyAccessLogService in ProxyConfig and set the ALS address to where the SkyWalking OAP listens. In Istio version 1.6.0+, if Istio is installed with demo profile, you can enable ALS with this command:\nistioctl manifest apply \\  --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=\u0026lt;skywalking-oap.skywalking.svc:11800\u0026gt; Note: Replace \u0026lt;skywalking-oap.skywalking.svc:11800\u0026gt; with the real address where SkyWalking OAP is deployed.\n  Activate SkyWalking Envoy Receiver. (activated in default)\n  envoy-metric:selector:${SW_ENVOY_METRIC:default}  Choose an ALS analyzer. There are two available analyzers for both HTTP access logs and TCP access logs: k8s-mesh and mx-mesh. Set the system environment variables SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS and SW_ENVOY_METRIC_ALS_TCP_ANALYSIS, such as SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=mx-mesh and SW_ENVOY_METRIC_ALS_TCP_ANALYSIS=mx-mesh, or in application.yaml to activate the analyzers. For more about the analyzers, see SkyWalking ALS Analyzers.\nenvoy-metric:selector:${SW_ENVOY_METRIC:default}default:acceptMetricsService:${SW_ENVOY_METRIC_SERVICE:true}alsHTTPAnalysis:${SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS:\u0026#34;\u0026#34;}# Setting the system env variable would override this.alsTCPAnalysis:${SW_ENVOY_METRIC_ALS_TCP_ANALYSIS:\u0026#34;\u0026#34;}To use multiple analyzers as a fallback, please use , to concatenate.\n  Example Here\u0026rsquo;s an example of installing Istio and deploying SkyWalking by Helm chart.\nistioctl install \\  --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-oap.istio-system:11800 git clone https://github.com/apache/skywalking-kubernetes.git cd skywalking-kubernetes/chart helm repo add elastic https://helm.elastic.co helm dep up skywalking helm install 8.1.0 skywalking -n istio-system \\  --set oap.env.SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=mx-mesh \\  --set oap.env.SW_ENVOY_METRIC_ALS_TCP_ANALYSIS=mx-mesh \\  --set fullnameOverride=skywalking \\  --set oap.envoy.als.enabled=true You can use kubectl -n istio-system logs -l app=skywalking | grep \u0026quot;K8sALSServiceMeshHTTPAnalysis\u0026quot; to ensure that OAP ALS mx-mesh analyzer has been activated.\nSkyWalking ALS Analyzers There are several available analyzers: k8s-mesh, mx-mesh, and persistence. You can specify one or more analyzers to analyze the access logs. When multiple analyzers are specified, it acts as a fast-success mechanism: SkyWalking loops over the analyzers and use them to analyze the logs. Once there is an analyzer that is able to produce a result, it stops the loop.\nk8s-mesh k8s-mesh uses the metadata from Kubernetes clusters, hence in this analyzer, OAP needs access roles to Pod, Service, and Endpoints.\nThe blog illustrates the details of how it works and a step-by-step tutorial to apply it to the bookinfo application.\nmx-mesh mx-mesh uses the Envoy metadata exchange mechanism to get the service name, etc. This analyzer requires Istio to enable the metadata exchange plugin (you can enable it by --set values.telemetry.v2.enabled=true, or if you\u0026rsquo;re using Istio 1.7+ and installing it with profile demo/preview, it should already be enabled).\nThe blog illustrates the details of how it works and a step-by-step tutorial on applying it to the Online Boutique system.\npersistence persistence analyzer adapts the Envoy access log format to SkyWalking\u0026rsquo;s native log format, and forwards the formatted logs to LAL, where you can configure persistent conditions, such as sampler, only persist error logs, etc. SkyWalking provides a default configuration file envoy-als.yaml that you can adjust as per your needs. Please make sure to activate this rule via adding the rule name envoy-als into config item log-analyzer/default/lalFiles (or environment variable SW_LOG_LAL_FILES, e.g. SW_LOG_LAL_FILES=envoy-als).\nAttention: Since the persistence analyzer also needs a mechanism to map the logs into responding services, you need to configure at least one of k8s-mesh or mx-mesh as its antecedent so that persistence analyzer knows which service the logs belong to. For example, you should set envoy-metric/default/alsHTTPAnalysis (or environment variable SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS) to something like k8s-mesh,persistence, mx-mesh,persistence, or mx-mesh,k8s-mesh,persistence.\n","excerpt":"Observe Service Mesh through ALS Envoy Access Log Service (ALS) provides full logs on routed RPC, …","ref":"/docs/main/v9.4.0/en/setup/envoy/als_setting/","title":"Observe Service Mesh through ALS"},{"body":"Observe Service Mesh through ALS Envoy Access Log Service (ALS) provides full logs on routed RPC, including HTTP and TCP.\nBackground The solution was initialized and first implemented by Sheng Wu, Hongtao Gao, Lizan Zhou, and Dhi Aurrahman on May 17, 2019, and was presented at KubeCon China 2019. Here is a video recording of the presentation.\nSkyWalking is the first open-source project that introduced an ALS-based solution to the world. This solution provides a new take on observability with a lightweight payload on the service mesh.\nEnable ALS and SkyWalking Receiver You need the following steps to set up ALS.\n  Enable envoyAccessLogService in ProxyConfig and set the ALS address to where the SkyWalking OAP listens. In Istio version 1.6.0+, if Istio is installed with demo profile, you can enable ALS with this command:\nistioctl manifest apply \\  --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=\u0026lt;skywalking-oap.skywalking.svc:11800\u0026gt; Note: Replace \u0026lt;skywalking-oap.skywalking.svc:11800\u0026gt; with the real address where SkyWalking OAP is deployed.\n  Activate SkyWalking Envoy Receiver. (activated in default)\n  envoy-metric:selector:${SW_ENVOY_METRIC:default}  Choose an ALS analyzer. There are two available analyzers for both HTTP access logs and TCP access logs: k8s-mesh and mx-mesh. Set the system environment variables SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS and SW_ENVOY_METRIC_ALS_TCP_ANALYSIS, such as SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=mx-mesh and SW_ENVOY_METRIC_ALS_TCP_ANALYSIS=mx-mesh, or in application.yaml to activate the analyzers. For more about the analyzers, see SkyWalking ALS Analyzers.\nenvoy-metric:selector:${SW_ENVOY_METRIC:default}default:acceptMetricsService:${SW_ENVOY_METRIC_SERVICE:true}alsHTTPAnalysis:${SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS:\u0026#34;\u0026#34;}# Setting the system env variable would override this.alsTCPAnalysis:${SW_ENVOY_METRIC_ALS_TCP_ANALYSIS:\u0026#34;\u0026#34;}To use multiple analyzers as a fallback, please use , to concatenate.\n  Example Here\u0026rsquo;s an example of installing Istio and deploying SkyWalking by Helm chart.\nistioctl install \\  --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-oap.istio-system:11800 git clone https://github.com/apache/skywalking-kubernetes.git cd skywalking-kubernetes/chart helm repo add elastic https://helm.elastic.co helm dep up skywalking helm install 8.1.0 skywalking -n istio-system \\  --set oap.env.SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=mx-mesh \\  --set oap.env.SW_ENVOY_METRIC_ALS_TCP_ANALYSIS=mx-mesh \\  --set fullnameOverride=skywalking \\  --set oap.envoy.als.enabled=true You can use kubectl -n istio-system logs -l app=skywalking | grep \u0026quot;K8sALSServiceMeshHTTPAnalysis\u0026quot; to ensure that OAP ALS mx-mesh analyzer has been activated.\nSkyWalking ALS Analyzers There are several available analyzers: k8s-mesh, mx-mesh, and persistence. You can specify one or more analyzers to analyze the access logs. When multiple analyzers are specified, it acts as a fast-success mechanism: SkyWalking loops over the analyzers and use them to analyze the logs. Once there is an analyzer that is able to produce a result, it stops the loop.\nk8s-mesh k8s-mesh uses the metadata from Kubernetes clusters, hence in this analyzer, OAP needs access roles to Pod, Service, and Endpoints.\nThe blog illustrates the details of how it works and a step-by-step tutorial to apply it to the bookinfo application.\nmx-mesh mx-mesh uses the Envoy metadata exchange mechanism to get the service name, etc. This analyzer requires Istio to enable the metadata exchange plugin (you can enable it by --set values.telemetry.v2.enabled=true, or if you\u0026rsquo;re using Istio 1.7+ and installing it with profile demo/preview, it should already be enabled).\nThe blog illustrates the details of how it works and a step-by-step tutorial on applying it to the Online Boutique system.\npersistence persistence analyzer adapts the Envoy access log format to SkyWalking\u0026rsquo;s native log format, and forwards the formatted logs to LAL, where you can configure persistent conditions, such as sampler, only persist error logs, etc. SkyWalking provides a default configuration file envoy-als.yaml that you can adjust as per your needs. Please make sure to activate this rule via adding the rule name envoy-als into config item log-analyzer/default/lalFiles (or environment variable SW_LOG_LAL_FILES, e.g. SW_LOG_LAL_FILES=envoy-als).\nAttention: Since the persistence analyzer also needs a mechanism to map the logs into responding services, you need to configure at least one of k8s-mesh or mx-mesh as its antecedent so that persistence analyzer knows which service the logs belong to. For example, you should set envoy-metric/default/alsHTTPAnalysis (or environment variable SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS) to something like k8s-mesh,persistence, mx-mesh,persistence, or mx-mesh,k8s-mesh,persistence.\n","excerpt":"Observe Service Mesh through ALS Envoy Access Log Service (ALS) provides full logs on routed RPC, …","ref":"/docs/main/v9.5.0/en/setup/envoy/als_setting/","title":"Observe Service Mesh through ALS"},{"body":"Observe Service Mesh through ALS Envoy Access Log Service (ALS) provides full logs on routed RPC, including HTTP and TCP.\nBackground The solution was initialized and first implemented by Sheng Wu, Hongtao Gao, Lizan Zhou, and Dhi Aurrahman on May 17, 2019, and was presented at KubeCon China 2019. Here is a video recording of the presentation.\nSkyWalking is the first open-source project that introduced an ALS-based solution to the world. This solution provides a new take on observability with a lightweight payload on the service mesh.\nEnable ALS and SkyWalking Receiver You need the following steps to set up ALS.\n  Enable envoyAccessLogService in ProxyConfig and set the ALS address to where the SkyWalking OAP listens. In Istio version 1.6.0+, if Istio is installed with demo profile, you can enable ALS with this command:\nistioctl manifest apply \\  --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=\u0026lt;skywalking-oap.skywalking.svc:11800\u0026gt; Note: Replace \u0026lt;skywalking-oap.skywalking.svc:11800\u0026gt; with the real address where SkyWalking OAP is deployed.\n  Activate SkyWalking Envoy Receiver. (activated in default)\n  envoy-metric:selector:${SW_ENVOY_METRIC:default}  Choose an ALS analyzer. There are two available analyzers for both HTTP access logs and TCP access logs: k8s-mesh and mx-mesh. Set the system environment variables SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS and SW_ENVOY_METRIC_ALS_TCP_ANALYSIS, such as SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=mx-mesh and SW_ENVOY_METRIC_ALS_TCP_ANALYSIS=mx-mesh, or in application.yaml to activate the analyzers. For more about the analyzers, see SkyWalking ALS Analyzers.\nenvoy-metric:selector:${SW_ENVOY_METRIC:default}default:acceptMetricsService:${SW_ENVOY_METRIC_SERVICE:true}alsHTTPAnalysis:${SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS:\u0026#34;\u0026#34;}# Setting the system env variable would override this.alsTCPAnalysis:${SW_ENVOY_METRIC_ALS_TCP_ANALYSIS:\u0026#34;\u0026#34;}To use multiple analyzers as a fallback, please use , to concatenate.\n  Example Here\u0026rsquo;s an example of installing Istio and deploying SkyWalking by Helm chart.\nistioctl install \\  --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-oap.istio-system:11800 git clone https://github.com/apache/skywalking-kubernetes.git cd skywalking-kubernetes/chart helm repo add elastic https://helm.elastic.co helm dep up skywalking helm install 8.1.0 skywalking -n istio-system \\  --set oap.env.SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=mx-mesh \\  --set oap.env.SW_ENVOY_METRIC_ALS_TCP_ANALYSIS=mx-mesh \\  --set fullnameOverride=skywalking \\  --set oap.envoy.als.enabled=true You can use kubectl -n istio-system logs -l app=skywalking | grep \u0026quot;K8sALSServiceMeshHTTPAnalysis\u0026quot; to ensure that OAP ALS mx-mesh analyzer has been activated.\nSkyWalking ALS Analyzers There are several available analyzers: k8s-mesh, mx-mesh, and persistence. You can specify one or more analyzers to analyze the access logs. When multiple analyzers are specified, it acts as a fast-success mechanism: SkyWalking loops over the analyzers and use them to analyze the logs. Once there is an analyzer that is able to produce a result, it stops the loop.\nk8s-mesh k8s-mesh uses the metadata from Kubernetes clusters, hence in this analyzer, OAP needs access roles to Pod, Service, and Endpoints.\nThe blog illustrates the details of how it works and a step-by-step tutorial to apply it to the bookinfo application.\nmx-mesh mx-mesh uses the Envoy metadata exchange mechanism to get the service name, etc. This analyzer requires Istio to enable the metadata exchange plugin (you can enable it by --set values.telemetry.v2.enabled=true, or if you\u0026rsquo;re using Istio 1.7+ and installing it with profile demo/preview, it should already be enabled).\nThe blog illustrates the details of how it works and a step-by-step tutorial on applying it to the Online Boutique system.\npersistence persistence analyzer adapts the Envoy access log format to SkyWalking\u0026rsquo;s native log format, and forwards the formatted logs to LAL, where you can configure persistent conditions, such as sampler, only persist error logs, etc. SkyWalking provides a default configuration file envoy-als.yaml that you can adjust as per your needs. Please make sure to activate this rule via adding the rule name envoy-als into config item log-analyzer/default/lalFiles (or environment variable SW_LOG_LAL_FILES, e.g. SW_LOG_LAL_FILES=envoy-als).\nAttention: Since the persistence analyzer also needs a mechanism to map the logs into responding services, you need to configure at least one of k8s-mesh or mx-mesh as its antecedent so that persistence analyzer knows which service the logs belong to. For example, you should set envoy-metric/default/alsHTTPAnalysis (or environment variable SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS) to something like k8s-mesh,persistence, mx-mesh,persistence, or mx-mesh,k8s-mesh,persistence.\n","excerpt":"Observe Service Mesh through ALS Envoy Access Log Service (ALS) provides full logs on routed RPC, …","ref":"/docs/main/v9.6.0/en/setup/envoy/als_setting/","title":"Observe Service Mesh through ALS"},{"body":"Observe Service Mesh through ALS Envoy Access Log Service (ALS) provides full logs on routed RPC, including HTTP and TCP.\nBackground The solution was initialized and first implemented by Sheng Wu, Hongtao Gao, Lizan Zhou, and Dhi Aurrahman on May 17, 2019, and was presented at KubeCon China 2019. Here is a video recording of the presentation.\nSkyWalking is the first open-source project that introduced an ALS-based solution to the world. This solution provides a new take on observability with a lightweight payload on the service mesh.\nEnable ALS and SkyWalking Receiver You need the following steps to set up ALS.\n  Enable envoyAccessLogService in ProxyConfig and set the ALS address to where the SkyWalking OAP listens. In Istio version 1.6.0+, if Istio is installed with demo profile, you can enable ALS with this command:\nistioctl manifest apply \\  --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=\u0026lt;skywalking-oap.skywalking.svc:11800\u0026gt; Note: Replace \u0026lt;skywalking-oap.skywalking.svc:11800\u0026gt; with the real address where SkyWalking OAP is deployed.\n  Activate SkyWalking Envoy Receiver. (activated in default)\n  envoy-metric:selector:${SW_ENVOY_METRIC:default}  Choose an ALS analyzer. There are two available analyzers for both HTTP access logs and TCP access logs: k8s-mesh and mx-mesh. Set the system environment variables SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS and SW_ENVOY_METRIC_ALS_TCP_ANALYSIS, such as SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=mx-mesh and SW_ENVOY_METRIC_ALS_TCP_ANALYSIS=mx-mesh, or in application.yaml to activate the analyzers. For more about the analyzers, see SkyWalking ALS Analyzers.\nenvoy-metric:selector:${SW_ENVOY_METRIC:default}default:acceptMetricsService:${SW_ENVOY_METRIC_SERVICE:true}alsHTTPAnalysis:${SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS:\u0026#34;\u0026#34;}# Setting the system env variable would override this.alsTCPAnalysis:${SW_ENVOY_METRIC_ALS_TCP_ANALYSIS:\u0026#34;\u0026#34;}To use multiple analyzers as a fallback, please use , to concatenate.\n  Example Here\u0026rsquo;s an example of installing Istio and deploying SkyWalking by Helm chart.\nistioctl install \\  --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-oap.istio-system:11800 git clone https://github.com/apache/skywalking-helm.git cd skywalking-helm/chart helm repo add elastic https://helm.elastic.co helm dep up skywalking helm install 8.1.0 skywalking -n istio-system \\  --set oap.env.SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=mx-mesh \\  --set oap.env.SW_ENVOY_METRIC_ALS_TCP_ANALYSIS=mx-mesh \\  --set fullnameOverride=skywalking \\  --set oap.envoy.als.enabled=true You can use kubectl -n istio-system logs -l app=skywalking | grep \u0026quot;K8sALSServiceMeshHTTPAnalysis\u0026quot; to ensure that OAP ALS mx-mesh analyzer has been activated.\nSkyWalking ALS Analyzers There are several available analyzers: k8s-mesh, mx-mesh, and persistence. You can specify one or more analyzers to analyze the access logs. When multiple analyzers are specified, it acts as a fast-success mechanism: SkyWalking loops over the analyzers and use them to analyze the logs. Once there is an analyzer that is able to produce a result, it stops the loop.\nk8s-mesh k8s-mesh uses the metadata from Kubernetes clusters, hence in this analyzer, OAP needs access roles to Pod, Service, and Endpoints.\nThe blog illustrates the details of how it works and a step-by-step tutorial to apply it to the bookinfo application.\nmx-mesh mx-mesh uses the Envoy metadata exchange mechanism to get the service name, etc. This analyzer requires Istio to enable the metadata exchange plugin (you can enable it by --set values.telemetry.v2.enabled=true, or if you\u0026rsquo;re using Istio 1.7+ and installing it with profile demo/preview, it should already be enabled).\nThe blog illustrates the details of how it works and a step-by-step tutorial on applying it to the Online Boutique system.\npersistence persistence analyzer adapts the Envoy access log format to SkyWalking\u0026rsquo;s native log format, and forwards the formatted logs to LAL, where you can configure persistent conditions, such as sampler, only persist error logs, etc. SkyWalking provides a default configuration file envoy-als.yaml that you can adjust as per your needs. Please make sure to activate this rule via adding the rule name envoy-als into config item log-analyzer/default/lalFiles (or environment variable SW_LOG_LAL_FILES, e.g. SW_LOG_LAL_FILES=envoy-als).\nAttention: Since the persistence analyzer also needs a mechanism to map the logs into responding services, you need to configure at least one of k8s-mesh or mx-mesh as its antecedent so that persistence analyzer knows which service the logs belong to. For example, you should set envoy-metric/default/alsHTTPAnalysis (or environment variable SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS) to something like k8s-mesh,persistence, mx-mesh,persistence, or mx-mesh,k8s-mesh,persistence.\n","excerpt":"Observe Service Mesh through ALS Envoy Access Log Service (ALS) provides full logs on routed RPC, …","ref":"/docs/main/v9.7.0/en/setup/envoy/als_setting/","title":"Observe Service Mesh through ALS"},{"body":"Observe Service Mesh through Zipkin traces Istio has built-in support to generate Zipkin traces from Envoy proxy sidecar, and SkyWalking can serve as a Zipkin server to collect and provide query APIs for these traces, you can deploy SkyWalking to replace Zipkin server in Istio, and point the Zipkin address to SkyWalking. SkyWalking also embeds Zipkin Lens UI as part of SkyWalking UI, you can use it to query Zipkin traces.\nEnable Zipkin Traces Receiver SkyWalking has built-in Zipkin receiver, you can enable it by setting receiver-zipkin to default in application.yml, or by setting environment variable SW_RECEIVER_ZIPKIN=default before starting OAP server:\nreceiver-zipkin:selector:${SW_RECEIVER_ZIPKIN:default}default:# Other configurations...After enabling the Zipkin receiver, SkyWalking listens on port 9411 for Zipkin traces, you can just change the Zipkin server address to SkyWalking\u0026rsquo;s address with 9411 as the port.\nEnable Zipkin Traces Query Module If you want to query Zipkin traces from SkyWalking, you need to enable the Zipkin traces query module by setting query-zipkin to default in application.yml, or by setting environment variable SW_QUERY_ZIPKIN=default before starting OAP server:\nquery-zipkin:selector:${SW_QUERY_ZIPKIN:default}default:# Other configurationsAfter enabling Zipkin query module, SkyWalking listens on port 9412 for Zipkin query APIs, you can also query the Zipkin traces from SkyWalking UI, menu Service Mesh --\u0026gt; Services --\u0026gt; Zipkin Trace.\nSet Up Zipkin Traces in Istio When installing Istio, you can enable Zipkin tracing and point it to SkyWalking by setting\nistioctl install -y --set profile=demo \\ \t--set meshConfig.defaultConfig.tracing.sampling=100 \\ \t--set meshConfig.defaultConfig.tracing.zipkin.address=oap.istio-system.svc.cluster.local:9411 \\ \t--set meshConfig.enableTracing=true so that Istio proxy (Envoy) can generate traces and sent them to SkyWalking.\nFor more details about Zipkin on Istio, refer to the Istio doc.\n","excerpt":"Observe Service Mesh through Zipkin traces Istio has built-in support to generate Zipkin traces from …","ref":"/docs/main/latest/en/setup/zipkin/tracing/","title":"Observe Service Mesh through Zipkin traces"},{"body":"Observe Service Mesh through Zipkin traces Istio has built-in support to generate Zipkin traces from Envoy proxy sidecar, and SkyWalking can serve as a Zipkin server to collect and provide query APIs for these traces, you can deploy SkyWalking to replace Zipkin server in Istio, and point the Zipkin address to SkyWalking. SkyWalking also embeds Zipkin Lens UI as part of SkyWalking UI, you can use it to query Zipkin traces.\nEnable Zipkin Traces Receiver SkyWalking has built-in Zipkin receiver, you can enable it by setting receiver-zipkin to default in application.yml, or by setting environment variable SW_RECEIVER_ZIPKIN=default before starting OAP server:\nreceiver-zipkin:selector:${SW_RECEIVER_ZIPKIN:default}default:# Other configurations...After enabling the Zipkin receiver, SkyWalking listens on port 9411 for Zipkin traces, you can just change the Zipkin server address to SkyWalking\u0026rsquo;s address with 9411 as the port.\nEnable Zipkin Traces Query Module If you want to query Zipkin traces from SkyWalking, you need to enable the Zipkin traces query module by setting query-zipkin to default in application.yml, or by setting environment variable SW_QUERY_ZIPKIN=default before starting OAP server:\nquery-zipkin:selector:${SW_QUERY_ZIPKIN:default}default:# Other configurationsAfter enabling Zipkin query module, SkyWalking listens on port 9412 for Zipkin query APIs, you can also query the Zipkin traces from SkyWalking UI, menu Service Mesh --\u0026gt; Services --\u0026gt; Zipkin Trace.\nSet Up Zipkin Traces in Istio When installing Istio, you can enable Zipkin tracing and point it to SkyWalking by setting\nistioctl install -y --set profile=demo \\ \t--set meshConfig.defaultConfig.tracing.sampling=100 \\ \t--set meshConfig.defaultConfig.tracing.zipkin.address=oap.istio-system.svc.cluster.local:9411 \\ \t--set meshConfig.enableTracing=true so that Istio proxy (Envoy) can generate traces and sent them to SkyWalking.\nFor more details about Zipkin on Istio, refer to the Istio doc.\n","excerpt":"Observe Service Mesh through Zipkin traces Istio has built-in support to generate Zipkin traces from …","ref":"/docs/main/next/en/setup/zipkin/tracing/","title":"Observe Service Mesh through Zipkin traces"},{"body":"Observe Service Mesh through Zipkin traces Istio has built-in support to generate Zipkin traces from Envoy proxy sidecar, and SkyWalking can serve as a Zipkin server to collect and provide query APIs for these traces, you can deploy SkyWalking to replace Zipkin server in Istio, and point the Zipkin address to SkyWalking. SkyWalking also embeds Zipkin Lens UI as part of SkyWalking UI, you can use it to query Zipkin traces.\nEnable Zipkin Traces Receiver SkyWalking has built-in Zipkin receiver, you can enable it by setting receiver-zipkin to default in application.yml, or by setting environment variable SW_RECEIVER_ZIPKIN=default before starting OAP server:\nreceiver-zipkin:selector:${SW_RECEIVER_ZIPKIN:default}default:# Other configurations...After enabling the Zipkin receiver, SkyWalking listens on port 9411 for Zipkin traces, you can just change the Zipkin server address to SkyWalking\u0026rsquo;s address with 9411 as the port.\nEnable Zipkin Traces Query Module If you want to query Zipkin traces from SkyWalking, you need to enable the Zipkin traces query module by setting query-zipkin to default in application.yml, or by setting environment variable SW_QUERY_ZIPKIN=default before starting OAP server:\nquery-zipkin:selector:${SW_QUERY_ZIPKIN:default}default:# Other configurationsAfter enabling Zipkin query module, SkyWalking listens on port 9412 for Zipkin query APIs, you can also query the Zipkin traces from SkyWalking UI, menu Service Mesh --\u0026gt; Services --\u0026gt; Zipkin Trace.\nSet Up Zipkin Traces in Istio When installing Istio, you can enable Zipkin tracing and point it to SkyWalking by setting\nistioctl install -y --set profile=demo \\ \t--set meshConfig.defaultConfig.tracing.sampling=100 \\ \t--set meshConfig.defaultConfig.tracing.zipkin.address=oap.istio-system.svc.cluster.local:9411 \\ \t--set meshConfig.enableTracing=true so that Istio proxy (Envoy) can generate traces and sent them to SkyWalking.\nFor more details about Zipkin on Istio, refer to the Istio doc.\n","excerpt":"Observe Service Mesh through Zipkin traces Istio has built-in support to generate Zipkin traces from …","ref":"/docs/main/v9.4.0/en/setup/zipkin/tracing/","title":"Observe Service Mesh through Zipkin traces"},{"body":"Observe Service Mesh through Zipkin traces Istio has built-in support to generate Zipkin traces from Envoy proxy sidecar, and SkyWalking can serve as a Zipkin server to collect and provide query APIs for these traces, you can deploy SkyWalking to replace Zipkin server in Istio, and point the Zipkin address to SkyWalking. SkyWalking also embeds Zipkin Lens UI as part of SkyWalking UI, you can use it to query Zipkin traces.\nEnable Zipkin Traces Receiver SkyWalking has built-in Zipkin receiver, you can enable it by setting receiver-zipkin to default in application.yml, or by setting environment variable SW_RECEIVER_ZIPKIN=default before starting OAP server:\nreceiver-zipkin:selector:${SW_RECEIVER_ZIPKIN:default}default:# Other configurations...After enabling the Zipkin receiver, SkyWalking listens on port 9411 for Zipkin traces, you can just change the Zipkin server address to SkyWalking\u0026rsquo;s address with 9411 as the port.\nEnable Zipkin Traces Query Module If you want to query Zipkin traces from SkyWalking, you need to enable the Zipkin traces query module by setting query-zipkin to default in application.yml, or by setting environment variable SW_QUERY_ZIPKIN=default before starting OAP server:\nquery-zipkin:selector:${SW_QUERY_ZIPKIN:default}default:# Other configurationsAfter enabling Zipkin query module, SkyWalking listens on port 9412 for Zipkin query APIs, you can also query the Zipkin traces from SkyWalking UI, menu Service Mesh --\u0026gt; Services --\u0026gt; Zipkin Trace.\nSet Up Zipkin Traces in Istio When installing Istio, you can enable Zipkin tracing and point it to SkyWalking by setting\nistioctl install -y --set profile=demo \\ \t--set meshConfig.defaultConfig.tracing.sampling=100 \\ \t--set meshConfig.defaultConfig.tracing.zipkin.address=oap.istio-system.svc.cluster.local:9411 \\ \t--set meshConfig.enableTracing=true so that Istio proxy (Envoy) can generate traces and sent them to SkyWalking.\nFor more details about Zipkin on Istio, refer to the Istio doc.\n","excerpt":"Observe Service Mesh through Zipkin traces Istio has built-in support to generate Zipkin traces from …","ref":"/docs/main/v9.5.0/en/setup/zipkin/tracing/","title":"Observe Service Mesh through Zipkin traces"},{"body":"Observe Service Mesh through Zipkin traces Istio has built-in support to generate Zipkin traces from Envoy proxy sidecar, and SkyWalking can serve as a Zipkin server to collect and provide query APIs for these traces, you can deploy SkyWalking to replace Zipkin server in Istio, and point the Zipkin address to SkyWalking. SkyWalking also embeds Zipkin Lens UI as part of SkyWalking UI, you can use it to query Zipkin traces.\nEnable Zipkin Traces Receiver SkyWalking has built-in Zipkin receiver, you can enable it by setting receiver-zipkin to default in application.yml, or by setting environment variable SW_RECEIVER_ZIPKIN=default before starting OAP server:\nreceiver-zipkin:selector:${SW_RECEIVER_ZIPKIN:default}default:# Other configurations...After enabling the Zipkin receiver, SkyWalking listens on port 9411 for Zipkin traces, you can just change the Zipkin server address to SkyWalking\u0026rsquo;s address with 9411 as the port.\nEnable Zipkin Traces Query Module If you want to query Zipkin traces from SkyWalking, you need to enable the Zipkin traces query module by setting query-zipkin to default in application.yml, or by setting environment variable SW_QUERY_ZIPKIN=default before starting OAP server:\nquery-zipkin:selector:${SW_QUERY_ZIPKIN:default}default:# Other configurationsAfter enabling Zipkin query module, SkyWalking listens on port 9412 for Zipkin query APIs, you can also query the Zipkin traces from SkyWalking UI, menu Service Mesh --\u0026gt; Services --\u0026gt; Zipkin Trace.\nSet Up Zipkin Traces in Istio When installing Istio, you can enable Zipkin tracing and point it to SkyWalking by setting\nistioctl install -y --set profile=demo \\ \t--set meshConfig.defaultConfig.tracing.sampling=100 \\ \t--set meshConfig.defaultConfig.tracing.zipkin.address=oap.istio-system.svc.cluster.local:9411 \\ \t--set meshConfig.enableTracing=true so that Istio proxy (Envoy) can generate traces and sent them to SkyWalking.\nFor more details about Zipkin on Istio, refer to the Istio doc.\n","excerpt":"Observe Service Mesh through Zipkin traces Istio has built-in support to generate Zipkin traces from …","ref":"/docs/main/v9.6.0/en/setup/zipkin/tracing/","title":"Observe Service Mesh through Zipkin traces"},{"body":"Observe Service Mesh through Zipkin traces Istio has built-in support to generate Zipkin traces from Envoy proxy sidecar, and SkyWalking can serve as a Zipkin server to collect and provide query APIs for these traces, you can deploy SkyWalking to replace Zipkin server in Istio, and point the Zipkin address to SkyWalking. SkyWalking also embeds Zipkin Lens UI as part of SkyWalking UI, you can use it to query Zipkin traces.\nEnable Zipkin Traces Receiver SkyWalking has built-in Zipkin receiver, you can enable it by setting receiver-zipkin to default in application.yml, or by setting environment variable SW_RECEIVER_ZIPKIN=default before starting OAP server:\nreceiver-zipkin:selector:${SW_RECEIVER_ZIPKIN:default}default:# Other configurations...After enabling the Zipkin receiver, SkyWalking listens on port 9411 for Zipkin traces, you can just change the Zipkin server address to SkyWalking\u0026rsquo;s address with 9411 as the port.\nEnable Zipkin Traces Query Module If you want to query Zipkin traces from SkyWalking, you need to enable the Zipkin traces query module by setting query-zipkin to default in application.yml, or by setting environment variable SW_QUERY_ZIPKIN=default before starting OAP server:\nquery-zipkin:selector:${SW_QUERY_ZIPKIN:default}default:# Other configurationsAfter enabling Zipkin query module, SkyWalking listens on port 9412 for Zipkin query APIs, you can also query the Zipkin traces from SkyWalking UI, menu Service Mesh --\u0026gt; Services --\u0026gt; Zipkin Trace.\nSet Up Zipkin Traces in Istio When installing Istio, you can enable Zipkin tracing and point it to SkyWalking by setting\nistioctl install -y --set profile=demo \\ \t--set meshConfig.defaultConfig.tracing.sampling=100 \\ \t--set meshConfig.defaultConfig.tracing.zipkin.address=oap.istio-system.svc.cluster.local:9411 \\ \t--set meshConfig.enableTracing=true so that Istio proxy (Envoy) can generate traces and sent them to SkyWalking.\nFor more details about Zipkin on Istio, refer to the Istio doc.\n","excerpt":"Observe Service Mesh through Zipkin traces Istio has built-in support to generate Zipkin traces from …","ref":"/docs/main/v9.7.0/en/setup/zipkin/tracing/","title":"Observe Service Mesh through Zipkin traces"},{"body":"Official OAL script First, read the OAL introduction to learn the OAL script grammar and the source concept.\nFrom 8.0.0, you may find the OAL script at /config/oal/*.oal of the SkyWalking dist. You could change it, such as by adding filter conditions or new metrics. Then, reboot the OAP server, and it will come into effect.\nAll metrics named in this script could be used in alarm and UI query.\nExtension Logic Endpoint In default, SkyWalking only treats the operation name of entry span as the endpoint, which are used in the OAL engine. Users could declare their custom endpoint names by adding the logic endpoint tag manually through agent\u0026rsquo;s plugins or manual APIs.\nThe logic endpoint is a concept that doesn\u0026rsquo;t represent a real RPC call, but requires the statistic. The value of x-le should be in JSON format. There are two options:\n Define a new logic endpoint in the entry span as a separate new endpoint. Provide its own endpoint name, latency and status. Suitable for entry and local span.  { \u0026#34;name\u0026#34;: \u0026#34;GraphQL-service\u0026#34;, \u0026#34;latency\u0026#34;: 100, \u0026#34;status\u0026#34;: true } Declare the current local span representing a logic endpoint.  { \u0026#34;logic-span\u0026#34;: true } References  Java plugin API guides users to write plugins with logic endpoint. Java agent\u0026rsquo;s plugins include native included logic endpoints, also it provides ways to set the tag of logic span. The document could be found here.  ","excerpt":"Official OAL script First, read the OAL introduction to learn the OAL script grammar and the source …","ref":"/docs/main/latest/en/guides/backend-oal-scripts/","title":"Official OAL script"},{"body":"Official OAL script First, read the OAL introduction to learn the OAL script grammar and the source concept.\nFrom 8.0.0, you may find the OAL script at /config/oal/*.oal of the SkyWalking dist. You could change it, such as by adding filter conditions or new metrics. Then, reboot the OAP server, and it will come into effect.\nAll metrics named in this script could be used in alarm and UI query.\nExtension Logic Endpoint In default, SkyWalking only treats the operation name of entry span as the endpoint, which are used in the OAL engine. Users could declare their custom endpoint names by adding the logic endpoint tag manually through agent\u0026rsquo;s plugins or manual APIs.\nThe logic endpoint is a concept that doesn\u0026rsquo;t represent a real RPC call, but requires the statistic. The value of x-le should be in JSON format. There are two options:\n Define a new logic endpoint in the entry span as a separate new endpoint. Provide its own endpoint name, latency and status. Suitable for entry and local span.  { \u0026#34;name\u0026#34;: \u0026#34;GraphQL-service\u0026#34;, \u0026#34;latency\u0026#34;: 100, \u0026#34;status\u0026#34;: true } Declare the current local span representing a logic endpoint.  { \u0026#34;logic-span\u0026#34;: true } References  Java plugin API guides users to write plugins with logic endpoint. Java agent\u0026rsquo;s plugins include native included logic endpoints, also it provides ways to set the tag of logic span. The document could be found here.  ","excerpt":"Official OAL script First, read the OAL introduction to learn the OAL script grammar and the source …","ref":"/docs/main/next/en/guides/backend-oal-scripts/","title":"Official OAL script"},{"body":"Official OAL script First, read the OAL introduction to learn the OAL script grammar and the source concept.\nFrom 8.0.0, you may find the OAL script at /config/oal/*.oal of the SkyWalking dist. You could change it, such as by adding filter conditions or new metrics. Then, reboot the OAP server, and it will come into effect.\nAll metrics named in this script could be used in alarm and UI query.\nExtension Logic Endpoint In default, SkyWalking only treats the operation name of entry span as the endpoint, which are used in the OAL engine. Users could declare their custom endpoint names by adding the logic endpoint tag manually through agent\u0026rsquo;s plugins or manual APIs.\nThe logic endpoint is a concept that doesn\u0026rsquo;t represent a real RPC call, but requires the statistic. The value of x-le should be in JSON format. There are two options:\n Define a new logic endpoint in the entry span as a separate new endpoint. Provide its own endpoint name, latency and status. Suitable for entry and local span.  { \u0026#34;name\u0026#34;: \u0026#34;GraphQL-service\u0026#34;, \u0026#34;latency\u0026#34;: 100, \u0026#34;status\u0026#34;: true } Declare the current local span representing a logic endpoint.  { \u0026#34;logic-span\u0026#34;: true } References  Java plugin API guides users to write plugins with logic endpoint. Java agent\u0026rsquo;s plugins include native included logic endpoints, also it provides ways to set the tag of logic span. The document could be found here.  ","excerpt":"Official OAL script First, read the OAL introduction to learn the OAL script grammar and the source …","ref":"/docs/main/v9.0.0/en/guides/backend-oal-scripts/","title":"Official OAL script"},{"body":"Official OAL script First, read the OAL introduction to learn the OAL script grammar and the source concept.\nFrom 8.0.0, you may find the OAL script at /config/oal/*.oal of the SkyWalking dist. You could change it, such as by adding filter conditions or new metrics. Then, reboot the OAP server, and it will come into effect.\nAll metrics named in this script could be used in alarm and UI query.\nExtension Logic Endpoint In default, SkyWalking only treats the operation name of entry span as the endpoint, which are used in the OAL engine. Users could declare their custom endpoint names by adding the logic endpoint tag manually through agent\u0026rsquo;s plugins or manual APIs.\nThe logic endpoint is a concept that doesn\u0026rsquo;t represent a real RPC call, but requires the statistic. The value of x-le should be in JSON format. There are two options:\n Define a new logic endpoint in the entry span as a separate new endpoint. Provide its own endpoint name, latency and status. Suitable for entry and local span.  { \u0026#34;name\u0026#34;: \u0026#34;GraphQL-service\u0026#34;, \u0026#34;latency\u0026#34;: 100, \u0026#34;status\u0026#34;: true } Declare the current local span representing a logic endpoint.  { \u0026#34;logic-span\u0026#34;: true } References  Java plugin API guides users to write plugins with logic endpoint. Java agent\u0026rsquo;s plugins include native included logic endpoints, also it provides ways to set the tag of logic span. The document could be found here.  ","excerpt":"Official OAL script First, read the OAL introduction to learn the OAL script grammar and the source …","ref":"/docs/main/v9.1.0/en/guides/backend-oal-scripts/","title":"Official OAL script"},{"body":"Official OAL script First, read the OAL introduction to learn the OAL script grammar and the source concept.\nFrom 8.0.0, you may find the OAL script at /config/oal/*.oal of the SkyWalking dist. You could change it, such as by adding filter conditions or new metrics. Then, reboot the OAP server, and it will come into effect.\nAll metrics named in this script could be used in alarm and UI query.\nExtension Logic Endpoint In default, SkyWalking only treats the operation name of entry span as the endpoint, which are used in the OAL engine. Users could declare their custom endpoint names by adding the logic endpoint tag manually through agent\u0026rsquo;s plugins or manual APIs.\nThe logic endpoint is a concept that doesn\u0026rsquo;t represent a real RPC call, but requires the statistic. The value of x-le should be in JSON format. There are two options:\n Define a new logic endpoint in the entry span as a separate new endpoint. Provide its own endpoint name, latency and status. Suitable for entry and local span.  { \u0026#34;name\u0026#34;: \u0026#34;GraphQL-service\u0026#34;, \u0026#34;latency\u0026#34;: 100, \u0026#34;status\u0026#34;: true } Declare the current local span representing a logic endpoint.  { \u0026#34;logic-span\u0026#34;: true } References  Java plugin API guides users to write plugins with logic endpoint. Java agent\u0026rsquo;s plugins include native included logic endpoints, also it provides ways to set the tag of logic span. The document could be found here.  ","excerpt":"Official OAL script First, read the OAL introduction to learn the OAL script grammar and the source …","ref":"/docs/main/v9.2.0/en/guides/backend-oal-scripts/","title":"Official OAL script"},{"body":"Official OAL script First, read the OAL introduction to learn the OAL script grammar and the source concept.\nFrom 8.0.0, you may find the OAL script at /config/oal/*.oal of the SkyWalking dist. You could change it, such as by adding filter conditions or new metrics. Then, reboot the OAP server, and it will come into effect.\nAll metrics named in this script could be used in alarm and UI query.\nExtension Logic Endpoint In default, SkyWalking only treats the operation name of entry span as the endpoint, which are used in the OAL engine. Users could declare their custom endpoint names by adding the logic endpoint tag manually through agent\u0026rsquo;s plugins or manual APIs.\nThe logic endpoint is a concept that doesn\u0026rsquo;t represent a real RPC call, but requires the statistic. The value of x-le should be in JSON format. There are two options:\n Define a new logic endpoint in the entry span as a separate new endpoint. Provide its own endpoint name, latency and status. Suitable for entry and local span.  { \u0026#34;name\u0026#34;: \u0026#34;GraphQL-service\u0026#34;, \u0026#34;latency\u0026#34;: 100, \u0026#34;status\u0026#34;: true } Declare the current local span representing a logic endpoint.  { \u0026#34;logic-span\u0026#34;: true } References  Java plugin API guides users to write plugins with logic endpoint. Java agent\u0026rsquo;s plugins include native included logic endpoints, also it provides ways to set the tag of logic span. The document could be found here.  ","excerpt":"Official OAL script First, read the OAL introduction to learn the OAL script grammar and the source …","ref":"/docs/main/v9.3.0/en/guides/backend-oal-scripts/","title":"Official OAL script"},{"body":"Official OAL script First, read the OAL introduction to learn the OAL script grammar and the source concept.\nFrom 8.0.0, you may find the OAL script at /config/oal/*.oal of the SkyWalking dist. You could change it, such as by adding filter conditions or new metrics. Then, reboot the OAP server, and it will come into effect.\nAll metrics named in this script could be used in alarm and UI query.\nExtension Logic Endpoint In default, SkyWalking only treats the operation name of entry span as the endpoint, which are used in the OAL engine. Users could declare their custom endpoint names by adding the logic endpoint tag manually through agent\u0026rsquo;s plugins or manual APIs.\nThe logic endpoint is a concept that doesn\u0026rsquo;t represent a real RPC call, but requires the statistic. The value of x-le should be in JSON format. There are two options:\n Define a new logic endpoint in the entry span as a separate new endpoint. Provide its own endpoint name, latency and status. Suitable for entry and local span.  { \u0026#34;name\u0026#34;: \u0026#34;GraphQL-service\u0026#34;, \u0026#34;latency\u0026#34;: 100, \u0026#34;status\u0026#34;: true } Declare the current local span representing a logic endpoint.  { \u0026#34;logic-span\u0026#34;: true } References  Java plugin API guides users to write plugins with logic endpoint. Java agent\u0026rsquo;s plugins include native included logic endpoints, also it provides ways to set the tag of logic span. The document could be found here.  ","excerpt":"Official OAL script First, read the OAL introduction to learn the OAL script grammar and the source …","ref":"/docs/main/v9.4.0/en/guides/backend-oal-scripts/","title":"Official OAL script"},{"body":"Official OAL script First, read the OAL introduction to learn the OAL script grammar and the source concept.\nFrom 8.0.0, you may find the OAL script at /config/oal/*.oal of the SkyWalking dist. You could change it, such as by adding filter conditions or new metrics. Then, reboot the OAP server, and it will come into effect.\nAll metrics named in this script could be used in alarm and UI query.\nExtension Logic Endpoint In default, SkyWalking only treats the operation name of entry span as the endpoint, which are used in the OAL engine. Users could declare their custom endpoint names by adding the logic endpoint tag manually through agent\u0026rsquo;s plugins or manual APIs.\nThe logic endpoint is a concept that doesn\u0026rsquo;t represent a real RPC call, but requires the statistic. The value of x-le should be in JSON format. There are two options:\n Define a new logic endpoint in the entry span as a separate new endpoint. Provide its own endpoint name, latency and status. Suitable for entry and local span.  { \u0026#34;name\u0026#34;: \u0026#34;GraphQL-service\u0026#34;, \u0026#34;latency\u0026#34;: 100, \u0026#34;status\u0026#34;: true } Declare the current local span representing a logic endpoint.  { \u0026#34;logic-span\u0026#34;: true } References  Java plugin API guides users to write plugins with logic endpoint. Java agent\u0026rsquo;s plugins include native included logic endpoints, also it provides ways to set the tag of logic span. The document could be found here.  ","excerpt":"Official OAL script First, read the OAL introduction to learn the OAL script grammar and the source …","ref":"/docs/main/v9.5.0/en/guides/backend-oal-scripts/","title":"Official OAL script"},{"body":"Official OAL script First, read the OAL introduction to learn the OAL script grammar and the source concept.\nFrom 8.0.0, you may find the OAL script at /config/oal/*.oal of the SkyWalking dist. You could change it, such as by adding filter conditions or new metrics. Then, reboot the OAP server, and it will come into effect.\nAll metrics named in this script could be used in alarm and UI query.\nExtension Logic Endpoint In default, SkyWalking only treats the operation name of entry span as the endpoint, which are used in the OAL engine. Users could declare their custom endpoint names by adding the logic endpoint tag manually through agent\u0026rsquo;s plugins or manual APIs.\nThe logic endpoint is a concept that doesn\u0026rsquo;t represent a real RPC call, but requires the statistic. The value of x-le should be in JSON format. There are two options:\n Define a new logic endpoint in the entry span as a separate new endpoint. Provide its own endpoint name, latency and status. Suitable for entry and local span.  { \u0026#34;name\u0026#34;: \u0026#34;GraphQL-service\u0026#34;, \u0026#34;latency\u0026#34;: 100, \u0026#34;status\u0026#34;: true } Declare the current local span representing a logic endpoint.  { \u0026#34;logic-span\u0026#34;: true } References  Java plugin API guides users to write plugins with logic endpoint. Java agent\u0026rsquo;s plugins include native included logic endpoints, also it provides ways to set the tag of logic span. The document could be found here.  ","excerpt":"Official OAL script First, read the OAL introduction to learn the OAL script grammar and the source …","ref":"/docs/main/v9.6.0/en/guides/backend-oal-scripts/","title":"Official OAL script"},{"body":"Official OAL script First, read the OAL introduction to learn the OAL script grammar and the source concept.\nFrom 8.0.0, you may find the OAL script at /config/oal/*.oal of the SkyWalking dist. You could change it, such as by adding filter conditions or new metrics. Then, reboot the OAP server, and it will come into effect.\nAll metrics named in this script could be used in alarm and UI query.\nExtension Logic Endpoint In default, SkyWalking only treats the operation name of entry span as the endpoint, which are used in the OAL engine. Users could declare their custom endpoint names by adding the logic endpoint tag manually through agent\u0026rsquo;s plugins or manual APIs.\nThe logic endpoint is a concept that doesn\u0026rsquo;t represent a real RPC call, but requires the statistic. The value of x-le should be in JSON format. There are two options:\n Define a new logic endpoint in the entry span as a separate new endpoint. Provide its own endpoint name, latency and status. Suitable for entry and local span.  { \u0026#34;name\u0026#34;: \u0026#34;GraphQL-service\u0026#34;, \u0026#34;latency\u0026#34;: 100, \u0026#34;status\u0026#34;: true } Declare the current local span representing a logic endpoint.  { \u0026#34;logic-span\u0026#34;: true } References  Java plugin API guides users to write plugins with logic endpoint. Java agent\u0026rsquo;s plugins include native included logic endpoints, also it provides ways to set the tag of logic span. The document could be found here.  ","excerpt":"Official OAL script First, read the OAL introduction to learn the OAL script grammar and the source …","ref":"/docs/main/v9.7.0/en/guides/backend-oal-scripts/","title":"Official OAL script"},{"body":"On Demand Pod Logs This feature is to fetch the Pod logs on users' demand, the logs are fetched and displayed in real time, and are not persisted in any kind. This is helpful when users want to do some experiments and monitor the logs and see what\u0026rsquo;s happing inside the service.\nNote: if you print secrets in the logs, they are also visible to the UI, so for the sake of security, this feature is disabled by default, please read the configuration documentation to enable this feature manually.\nHow it works As the name indicates, this feature only works for Kubernetes Pods.\nSkyWalking OAP collects and saves the service instance\u0026rsquo;s namespace and Pod name in the service instance\u0026rsquo;s properties, named namespace and pod, users can select the same and UI should fetch the logs by service instance in a given interval and display the logs in UI, OAP receives the query and checks the instance\u0026rsquo;s properties and use the namespace and pod to locate the Pod and query the logs.\nIf you want to register a service instance that has on demand logs available, you should add namespace and pod in the service instance properties, so that you can query the real time logs from that Pod.\nThat said, in order to make this feature work properly, you should in advance configure the cluster role for OAP to list/get namespaces, services, pods and pods/log.\n","excerpt":"On Demand Pod Logs This feature is to fetch the Pod logs on users' demand, the logs are fetched and …","ref":"/docs/main/latest/en/setup/backend/on-demand-pod-log/","title":"On Demand Pod Logs"},{"body":"On Demand Pod Logs This feature is to fetch the Pod logs on users' demand, the logs are fetched and displayed in real time, and are not persisted in any kind. This is helpful when users want to do some experiments and monitor the logs and see what\u0026rsquo;s happing inside the service.\nNote: if you print secrets in the logs, they are also visible to the UI, so for the sake of security, this feature is disabled by default, please read the configuration documentation to enable this feature manually.\nHow it works As the name indicates, this feature only works for Kubernetes Pods.\nSkyWalking OAP collects and saves the service instance\u0026rsquo;s namespace and Pod name in the service instance\u0026rsquo;s properties, named namespace and pod, users can select the same and UI should fetch the logs by service instance in a given interval and display the logs in UI, OAP receives the query and checks the instance\u0026rsquo;s properties and use the namespace and pod to locate the Pod and query the logs.\nIf you want to register a service instance that has on demand logs available, you should add namespace and pod in the service instance properties, so that you can query the real time logs from that Pod.\nThat said, in order to make this feature work properly, you should in advance configure the cluster role for OAP to list/get namespaces, services, pods and pods/log.\n","excerpt":"On Demand Pod Logs This feature is to fetch the Pod logs on users' demand, the logs are fetched and …","ref":"/docs/main/next/en/setup/backend/on-demand-pod-log/","title":"On Demand Pod Logs"},{"body":"On Demand Pod Logs This feature is to fetch the Pod logs on users' demand, the logs are fetched and displayed in real time, and are not persisted in any kind. This is helpful when users want to do some experiments and monitor the logs and see what\u0026rsquo;s happing inside the service.\nNote: if you print secrets in the logs, they are also visible to the UI, so for the sake of security, this feature is disabled by default, please read the configuration documentation to enable this feature manually.\nHow it works As the name indicates, this feature only works for Kubernetes Pods.\nSkyWalking OAP collects and saves the service instance\u0026rsquo;s namespace and Pod name in ther serivce instance\u0026rsquo;s properties, named namespace and pod, users can select the same and UI should fetch the logs by service instance in a given interval and display the logs in UI, OAP receives the query and checks the instance\u0026rsquo;s properties and use the namespace and pod to locate the Pod and query the logs.\nIf you want to register a service instance that has on demand logs available, you should add namespace and pod in the service instance properties, so that you can query the real time logs from that Pod.\nThat said, in order to make this feature work properly, you should in advance configure the cluster role for OAP to list/get namespaces, services, pods and pods/log.\n","excerpt":"On Demand Pod Logs This feature is to fetch the Pod logs on users' demand, the logs are fetched and …","ref":"/docs/main/v9.1.0/en/setup/backend/on-demand-pod-log/","title":"On Demand Pod Logs"},{"body":"On Demand Pod Logs This feature is to fetch the Pod logs on users' demand, the logs are fetched and displayed in real time, and are not persisted in any kind. This is helpful when users want to do some experiments and monitor the logs and see what\u0026rsquo;s happing inside the service.\nNote: if you print secrets in the logs, they are also visible to the UI, so for the sake of security, this feature is disabled by default, please read the configuration documentation to enable this feature manually.\nHow it works As the name indicates, this feature only works for Kubernetes Pods.\nSkyWalking OAP collects and saves the service instance\u0026rsquo;s namespace and Pod name in the service instance\u0026rsquo;s properties, named namespace and pod, users can select the same and UI should fetch the logs by service instance in a given interval and display the logs in UI, OAP receives the query and checks the instance\u0026rsquo;s properties and use the namespace and pod to locate the Pod and query the logs.\nIf you want to register a service instance that has on demand logs available, you should add namespace and pod in the service instance properties, so that you can query the real time logs from that Pod.\nThat said, in order to make this feature work properly, you should in advance configure the cluster role for OAP to list/get namespaces, services, pods and pods/log.\n","excerpt":"On Demand Pod Logs This feature is to fetch the Pod logs on users' demand, the logs are fetched and …","ref":"/docs/main/v9.2.0/en/setup/backend/on-demand-pod-log/","title":"On Demand Pod Logs"},{"body":"On Demand Pod Logs This feature is to fetch the Pod logs on users' demand, the logs are fetched and displayed in real time, and are not persisted in any kind. This is helpful when users want to do some experiments and monitor the logs and see what\u0026rsquo;s happing inside the service.\nNote: if you print secrets in the logs, they are also visible to the UI, so for the sake of security, this feature is disabled by default, please read the configuration documentation to enable this feature manually.\nHow it works As the name indicates, this feature only works for Kubernetes Pods.\nSkyWalking OAP collects and saves the service instance\u0026rsquo;s namespace and Pod name in the service instance\u0026rsquo;s properties, named namespace and pod, users can select the same and UI should fetch the logs by service instance in a given interval and display the logs in UI, OAP receives the query and checks the instance\u0026rsquo;s properties and use the namespace and pod to locate the Pod and query the logs.\nIf you want to register a service instance that has on demand logs available, you should add namespace and pod in the service instance properties, so that you can query the real time logs from that Pod.\nThat said, in order to make this feature work properly, you should in advance configure the cluster role for OAP to list/get namespaces, services, pods and pods/log.\n","excerpt":"On Demand Pod Logs This feature is to fetch the Pod logs on users' demand, the logs are fetched and …","ref":"/docs/main/v9.3.0/en/setup/backend/on-demand-pod-log/","title":"On Demand Pod Logs"},{"body":"On Demand Pod Logs This feature is to fetch the Pod logs on users' demand, the logs are fetched and displayed in real time, and are not persisted in any kind. This is helpful when users want to do some experiments and monitor the logs and see what\u0026rsquo;s happing inside the service.\nNote: if you print secrets in the logs, they are also visible to the UI, so for the sake of security, this feature is disabled by default, please read the configuration documentation to enable this feature manually.\nHow it works As the name indicates, this feature only works for Kubernetes Pods.\nSkyWalking OAP collects and saves the service instance\u0026rsquo;s namespace and Pod name in the service instance\u0026rsquo;s properties, named namespace and pod, users can select the same and UI should fetch the logs by service instance in a given interval and display the logs in UI, OAP receives the query and checks the instance\u0026rsquo;s properties and use the namespace and pod to locate the Pod and query the logs.\nIf you want to register a service instance that has on demand logs available, you should add namespace and pod in the service instance properties, so that you can query the real time logs from that Pod.\nThat said, in order to make this feature work properly, you should in advance configure the cluster role for OAP to list/get namespaces, services, pods and pods/log.\n","excerpt":"On Demand Pod Logs This feature is to fetch the Pod logs on users' demand, the logs are fetched and …","ref":"/docs/main/v9.4.0/en/setup/backend/on-demand-pod-log/","title":"On Demand Pod Logs"},{"body":"On Demand Pod Logs This feature is to fetch the Pod logs on users' demand, the logs are fetched and displayed in real time, and are not persisted in any kind. This is helpful when users want to do some experiments and monitor the logs and see what\u0026rsquo;s happing inside the service.\nNote: if you print secrets in the logs, they are also visible to the UI, so for the sake of security, this feature is disabled by default, please read the configuration documentation to enable this feature manually.\nHow it works As the name indicates, this feature only works for Kubernetes Pods.\nSkyWalking OAP collects and saves the service instance\u0026rsquo;s namespace and Pod name in the service instance\u0026rsquo;s properties, named namespace and pod, users can select the same and UI should fetch the logs by service instance in a given interval and display the logs in UI, OAP receives the query and checks the instance\u0026rsquo;s properties and use the namespace and pod to locate the Pod and query the logs.\nIf you want to register a service instance that has on demand logs available, you should add namespace and pod in the service instance properties, so that you can query the real time logs from that Pod.\nThat said, in order to make this feature work properly, you should in advance configure the cluster role for OAP to list/get namespaces, services, pods and pods/log.\n","excerpt":"On Demand Pod Logs This feature is to fetch the Pod logs on users' demand, the logs are fetched and …","ref":"/docs/main/v9.5.0/en/setup/backend/on-demand-pod-log/","title":"On Demand Pod Logs"},{"body":"On Demand Pod Logs This feature is to fetch the Pod logs on users' demand, the logs are fetched and displayed in real time, and are not persisted in any kind. This is helpful when users want to do some experiments and monitor the logs and see what\u0026rsquo;s happing inside the service.\nNote: if you print secrets in the logs, they are also visible to the UI, so for the sake of security, this feature is disabled by default, please read the configuration documentation to enable this feature manually.\nHow it works As the name indicates, this feature only works for Kubernetes Pods.\nSkyWalking OAP collects and saves the service instance\u0026rsquo;s namespace and Pod name in the service instance\u0026rsquo;s properties, named namespace and pod, users can select the same and UI should fetch the logs by service instance in a given interval and display the logs in UI, OAP receives the query and checks the instance\u0026rsquo;s properties and use the namespace and pod to locate the Pod and query the logs.\nIf you want to register a service instance that has on demand logs available, you should add namespace and pod in the service instance properties, so that you can query the real time logs from that Pod.\nThat said, in order to make this feature work properly, you should in advance configure the cluster role for OAP to list/get namespaces, services, pods and pods/log.\n","excerpt":"On Demand Pod Logs This feature is to fetch the Pod logs on users' demand, the logs are fetched and …","ref":"/docs/main/v9.6.0/en/setup/backend/on-demand-pod-log/","title":"On Demand Pod Logs"},{"body":"On Demand Pod Logs This feature is to fetch the Pod logs on users' demand, the logs are fetched and displayed in real time, and are not persisted in any kind. This is helpful when users want to do some experiments and monitor the logs and see what\u0026rsquo;s happing inside the service.\nNote: if you print secrets in the logs, they are also visible to the UI, so for the sake of security, this feature is disabled by default, please read the configuration documentation to enable this feature manually.\nHow it works As the name indicates, this feature only works for Kubernetes Pods.\nSkyWalking OAP collects and saves the service instance\u0026rsquo;s namespace and Pod name in the service instance\u0026rsquo;s properties, named namespace and pod, users can select the same and UI should fetch the logs by service instance in a given interval and display the logs in UI, OAP receives the query and checks the instance\u0026rsquo;s properties and use the namespace and pod to locate the Pod and query the logs.\nIf you want to register a service instance that has on demand logs available, you should add namespace and pod in the service instance properties, so that you can query the real time logs from that Pod.\nThat said, in order to make this feature work properly, you should in advance configure the cluster role for OAP to list/get namespaces, services, pods and pods/log.\n","excerpt":"On Demand Pod Logs This feature is to fetch the Pod logs on users' demand, the logs are fetched and …","ref":"/docs/main/v9.7.0/en/setup/backend/on-demand-pod-log/","title":"On Demand Pod Logs"},{"body":"OpenTelemetry Logging Format SkyWalking can receive logs exported from OpenTelemetry collector, the data flow is:\ngraph LR B[OpenTelemetry SDK 1] C[FluentBit/FluentD, etc.] K[Other sources that OpenTelemetry supports ...] D[OpenTelemetry Collector] E[SkyWalking OAP Server] B --\u0026gt; D C --\u0026gt; D K --\u0026gt; D D -- exporter --\u0026gt; E where the exporter can be one of the following:\n OpenTelemetry SkyWalking Exporter. An exporter that transforms the logs to SkyWalking format before sending them to SkyWalking OAP. Read the doc in the aforementioned link for a detailed guide. OpenTelemetry OTLP Exporter. An exporter that sends the logs to SkyWalking OAP in OTLP format, and SkyWalking OAP is responsible for transforming the data format.  OpenTelemetry OTLP Exporter By using this exporter, you can send any log data to SkyWalking OAP as long as the data is in OTLP format, no matter where the data is generated.\nTo enable this exporter, make sure the receiver-otel is enabled and the otlp-logs value is in the receiver-otel/default/enabledHandlers configuration section:\nreceiver-otel:selector:${SW_OTEL_RECEIVER:default}default:enabledHandlers:${SW_OTEL_RECEIVER_ENABLED_HANDLERS:\u0026#34;otlp-metrics,otlp-logs\u0026#34;}Also, because most of the language SDKs of OpenTelemetry do not support logging feature (yet) or the logging feature is experimental, it\u0026rsquo;s your responsibility to make sure the reported log data contains the following attributes, otherwise SkyWalking is not able to consume them:\n service.name: the name of the service that generates the log data, OpenTelemetry Java SDK (experimental) has this attribute set, if you\u0026rsquo;re using other SDK or agent, please check the corresponding doc.  ","excerpt":"OpenTelemetry Logging Format SkyWalking can receive logs exported from OpenTelemetry collector, the …","ref":"/docs/main/latest/en/setup/backend/log-otlp/","title":"OpenTelemetry Logging Format"},{"body":"OpenTelemetry Logging Format SkyWalking can receive logs exported from OpenTelemetry collector, the data flow is:\ngraph LR B[OpenTelemetry SDK 1] C[FluentBit/FluentD, etc.] K[Other sources that OpenTelemetry supports ...] D[OpenTelemetry Collector] E[SkyWalking OAP Server] B --\u0026gt; D C --\u0026gt; D K --\u0026gt; D D -- exporter --\u0026gt; E Recommend to use OpenTelemetry OTLP Exporter to forward collected logs to OAP server in OTLP format, and SkyWalking OAP is responsible for transforming the data format into native log format with analysis support powered by LAL script.\n Deprecated: unmaintained and not recommended to use, will be removed.\nOpenTelemetry SkyWalking Exporter was first added into open-telemetry/opentelemetry-collector-contrib before OAP OTLP support. It transforms the logs to SkyWalking format before sending them to SkyWalking OAP. Currently, from OTLP community, it is not well maintained, and already being marked as unmaintained, and may be removed in 2024.\n OpenTelemetry OTLP Exporter By using this exporter, you can send any log data to SkyWalking OAP as long as the data is in OTLP format, no matter where the data is generated.\nTo enable this exporter, make sure the receiver-otel is enabled and the otlp-logs value is in the receiver-otel/default/enabledHandlers configuration section:\nreceiver-otel:selector:${SW_OTEL_RECEIVER:default}default:enabledHandlers:${SW_OTEL_RECEIVER_ENABLED_HANDLERS:\u0026#34;otlp-metrics,otlp-logs\u0026#34;}Also, because most of the language SDKs of OpenTelemetry do not support logging feature (yet) or the logging feature is experimental, it\u0026rsquo;s your responsibility to make sure the reported log data contains the following attributes, otherwise SkyWalking is not able to consume them:\n service.name: the name of the service that generates the log data.  And several attributes are optional as add-on information for the logs before analyzing.\n service.layer: the layer of the service that generates the logs. The default value is GENERAL layer, which is 100% sampled defined by LAL general rule service.instance: the instance name that generates the logs. The default value is empty.  Note, that these attributes should be set manually through OpenTelemetry SDK or through attribute#insert in OpenTelemetry Collector.\n","excerpt":"OpenTelemetry Logging Format SkyWalking can receive logs exported from OpenTelemetry collector, the …","ref":"/docs/main/next/en/setup/backend/log-otlp/","title":"OpenTelemetry Logging Format"},{"body":"OpenTelemetry Logging Format SkyWalking can receive logs exported from OpenTelemetry collector, the data flow is:\ngraph LR B[OpenTelemetry SDK 1] C[FluentBit/FluentD, etc.] K[Other sources that OpenTelemetry supports ...] D[OpenTelemetry Collector] E[SkyWalking OAP Server] B --\u0026gt; D C --\u0026gt; D K --\u0026gt; D D -- exporter --\u0026gt; E where the exporter can be one of the following:\n OpenTelemetry SkyWalking Exporter. An exporter that transforms the logs to SkyWalking format before sending them to SkyWalking OAP. Read the doc in the aforementioned link for a detailed guide. OpenTelemetry OTLP Exporter. An exporter that sends the logs to SkyWalking OAP in OTLP format, and SkyWalking OAP is responsible for transforming the data format.  OpenTelemetry OTLP Exporter By using this exporter, you can send any log data to SkyWalking OAP as long as the data is in OTLP format, no matter where the data is generated.\nTo enable this exporter, make sure the receiver-otel is enabled and the otlp-logs value is in the receiver-otel/default/enabledHandlers configuration section:\nreceiver-otel:selector:${SW_OTEL_RECEIVER:default}default:enabledHandlers:${SW_OTEL_RECEIVER_ENABLED_HANDLERS:\u0026#34;otlp-metrics,otlp-logs\u0026#34;}Also, because most of the language SDKs of OpenTelemetry do not support logging feature (yet) or the logging feature is experimental, it\u0026rsquo;s your responsibility to make sure the reported log data contains the following attributes, otherwise SkyWalking is not able to consume them:\n service.name: the name of the service that generates the log data, OpenTelemetry Java SDK (experimental) has this attribute set, if you\u0026rsquo;re using other SDK or agent, please check the corresponding doc.  ","excerpt":"OpenTelemetry Logging Format SkyWalking can receive logs exported from OpenTelemetry collector, the …","ref":"/docs/main/v9.5.0/en/setup/backend/log-otlp/","title":"OpenTelemetry Logging Format"},{"body":"OpenTelemetry Logging Format SkyWalking can receive logs exported from OpenTelemetry collector, the data flow is:\ngraph LR B[OpenTelemetry SDK 1] C[FluentBit/FluentD, etc.] K[Other sources that OpenTelemetry supports ...] D[OpenTelemetry Collector] E[SkyWalking OAP Server] B --\u0026gt; D C --\u0026gt; D K --\u0026gt; D D -- exporter --\u0026gt; E where the exporter can be one of the following:\n OpenTelemetry SkyWalking Exporter. An exporter that transforms the logs to SkyWalking format before sending them to SkyWalking OAP. Read the doc in the aforementioned link for a detailed guide. OpenTelemetry OTLP Exporter. An exporter that sends the logs to SkyWalking OAP in OTLP format, and SkyWalking OAP is responsible for transforming the data format.  OpenTelemetry OTLP Exporter By using this exporter, you can send any log data to SkyWalking OAP as long as the data is in OTLP format, no matter where the data is generated.\nTo enable this exporter, make sure the receiver-otel is enabled and the otlp-logs value is in the receiver-otel/default/enabledHandlers configuration section:\nreceiver-otel:selector:${SW_OTEL_RECEIVER:default}default:enabledHandlers:${SW_OTEL_RECEIVER_ENABLED_HANDLERS:\u0026#34;otlp-metrics,otlp-logs\u0026#34;}Also, because most of the language SDKs of OpenTelemetry do not support logging feature (yet) or the logging feature is experimental, it\u0026rsquo;s your responsibility to make sure the reported log data contains the following attributes, otherwise SkyWalking is not able to consume them:\n service.name: the name of the service that generates the log data, OpenTelemetry Java SDK (experimental) has this attribute set, if you\u0026rsquo;re using other SDK or agent, please check the corresponding doc.  ","excerpt":"OpenTelemetry Logging Format SkyWalking can receive logs exported from OpenTelemetry collector, the …","ref":"/docs/main/v9.6.0/en/setup/backend/log-otlp/","title":"OpenTelemetry Logging Format"},{"body":"OpenTelemetry Logging Format SkyWalking can receive logs exported from OpenTelemetry collector, the data flow is:\ngraph LR B[OpenTelemetry SDK 1] C[FluentBit/FluentD, etc.] K[Other sources that OpenTelemetry supports ...] D[OpenTelemetry Collector] E[SkyWalking OAP Server] B --\u0026gt; D C --\u0026gt; D K --\u0026gt; D D -- exporter --\u0026gt; E where the exporter can be one of the following:\n OpenTelemetry SkyWalking Exporter. An exporter that transforms the logs to SkyWalking format before sending them to SkyWalking OAP. Read the doc in the aforementioned link for a detailed guide. OpenTelemetry OTLP Exporter. An exporter that sends the logs to SkyWalking OAP in OTLP format, and SkyWalking OAP is responsible for transforming the data format.  OpenTelemetry OTLP Exporter By using this exporter, you can send any log data to SkyWalking OAP as long as the data is in OTLP format, no matter where the data is generated.\nTo enable this exporter, make sure the receiver-otel is enabled and the otlp-logs value is in the receiver-otel/default/enabledHandlers configuration section:\nreceiver-otel:selector:${SW_OTEL_RECEIVER:default}default:enabledHandlers:${SW_OTEL_RECEIVER_ENABLED_HANDLERS:\u0026#34;otlp-metrics,otlp-logs\u0026#34;}Also, because most of the language SDKs of OpenTelemetry do not support logging feature (yet) or the logging feature is experimental, it\u0026rsquo;s your responsibility to make sure the reported log data contains the following attributes, otherwise SkyWalking is not able to consume them:\n service.name: the name of the service that generates the log data, OpenTelemetry Java SDK (experimental) has this attribute set, if you\u0026rsquo;re using other SDK or agent, please check the corresponding doc.  ","excerpt":"OpenTelemetry Logging Format SkyWalking can receive logs exported from OpenTelemetry collector, the …","ref":"/docs/main/v9.7.0/en/setup/backend/log-otlp/","title":"OpenTelemetry Logging Format"},{"body":"OpenTelemetry Metrics Format The OpenTelemetry receiver supports ingesting agent metrics by meter-system. The OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP may fail to start up. The files are located at $CLASSPATH/otel-rules.\nSupported handlers:\n otlp: OpenTelemetry gRPC service handler.  Notice: Set SW_OTEL_RECEIVER=default through system environment or change receiver-otel/selector=${SW_OTEL_RECEIVER:default} to activate the OpenTelemetry receiver.\nThe rule file should be in YAML format, defined by the scheme described in MAL. Note: receiver-otel only supports the group, defaultMetricLevel, and metricsRules nodes of the scheme due to its push mode.\nTo activate the otlp handler and relevant rules of istio:\nreceiver-otel:selector:${SW_OTEL_RECEIVER:default}default:enabledHandlers:${SW_OTEL_RECEIVER_ENABLED_HANDLERS:\u0026#34;otlp-metrics\u0026#34;}enabledOtelMetricsRules:${SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES:\u0026#34;istio-controlplane\u0026#34;}The receiver adds label with key node_identifier_host_name to the collected data samples, and its value is from net.host.name (or host.name for some OTLP versions) resource attributes defined in OpenTelemetry proto, for identification of the metric data.\n   Description Configuration File Data Source     Metrics of Istio Control Plane otel-rules/istio-controlplane.yaml Istio Control Plane -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of SkyWalking OAP server itself otel-rules/oap.yaml SkyWalking OAP Server(SelfObservability) -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Linux OS otel-rules/vm.yaml prometheus/node_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Windows OS otel-rules/windows.yaml prometheus-community/windows_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of K8s cluster otel-rules/k8s/k8s-cluster.yaml K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of K8s cluster otel-rules/k8s/k8s-node.yaml cAdvisor \u0026amp; K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of K8s cluster otel-rules/k8s/k8s-service.yaml cAdvisor \u0026amp; K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of MYSQL otel-rules/mysql/mysql-instance.yaml prometheus/mysqld_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of MYSQL otel-rules/mysql/mysql-service.yaml prometheus/mysqld_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of PostgreSQL otel-rules/postgresql/postgresql-instance.yaml prometheus-community/postgres_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of PostgreSQL otel-rules/postgresql/postgresql-service.yaml prometheus-community/postgres_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Apache APISIX otel-rules/apisix.yaml apisix prometheus plugin -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of AWS Cloud EKS otel-rules/aws-eks/eks-cluster.yaml AWS Container Insights Receiver -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of AWS Cloud EKS otel-rules/aws-eks/eks-service.yaml AWS Container Insights Receiver -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of AWS Cloud EKS otel-rules/aws-eks/eks-node.yaml AWS Container Insights Receiver -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Elasticsearch otel-rules/elasticsearch/elasticsearch-cluster.yaml prometheus-community/elasticsearch_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Elasticsearch otel-rules/elasticsearch/elasticsearch-index.yaml prometheus-community/elasticsearch_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Elasticsearch otel-rules/elasticsearch/elasticsearch-node.yaml prometheus-community/elasticsearch_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Redis otel-rules/redis/redis-service.yaml oliver006/redis_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Redis otel-rules/redis/redis-instance.yaml oliver006/redis_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of RabbitMQ otel-rules/rabbitmq/rabbitmq-cluster.yaml rabbitmq-prometheus -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of RabbitMQ otel-rules/rabbitmq/rabbitmq-node.yaml rabbitmq-prometheus -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of MongoDB otel-rules/mongodb/mongodb-cluster.yaml percona/mongodb_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of MongoDB otel-rules/mongodb/mongodb-node.yaml percona/mongodb_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Kafka otel-rules/kafka/kafka-clusteryaml prometheus/jmx_exporter/jmx_prometheus_javaagent -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Kafka otel-rules/kafka/kafka-broker.yaml prometheus/jmx_exporter/jmx_prometheus_javaagent -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Note: You can also use OpenTelemetry exporter to transport the metrics to SkyWalking OAP directly. See OpenTelemetry Exporter.      ","excerpt":"OpenTelemetry Metrics Format The OpenTelemetry receiver supports ingesting agent metrics by …","ref":"/docs/main/latest/en/setup/backend/opentelemetry-receiver/","title":"OpenTelemetry Metrics Format"},{"body":"OpenTelemetry Metrics Format The OpenTelemetry receiver supports ingesting agent metrics by meter-system. The OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP may fail to start up. The files are located at $CLASSPATH/otel-rules.\nSupported handlers:\n otlp: OpenTelemetry gRPC service handler.  Notice: Set SW_OTEL_RECEIVER=default through system environment or change receiver-otel/selector=${SW_OTEL_RECEIVER:default} to activate the OpenTelemetry receiver.\nThe rule file should be in YAML format, defined by the scheme described in MAL. Note: receiver-otel only supports the group, defaultMetricLevel, and metricsRules nodes of the scheme due to its push mode.\nTo activate the otlp handler and relevant rules of istio:\nreceiver-otel:selector:${SW_OTEL_RECEIVER:default}default:enabledHandlers:${SW_OTEL_RECEIVER_ENABLED_HANDLERS:\u0026#34;otlp-metrics\u0026#34;}enabledOtelMetricsRules:${SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES:\u0026#34;istio-controlplane\u0026#34;}The receiver adds label with key node_identifier_host_name to the collected data samples, and its value is from net.host.name (or host.name for some OTLP versions) resource attributes defined in OpenTelemetry proto, for identification of the metric data.\nNotice: In the resource scope, dots (.) in the attributes' key names are converted to underscores (_), whereas in the metrics scope, they are not converted.\n   Description Configuration File Data Source     Metrics of Istio Control Plane otel-rules/istio-controlplane.yaml Istio Control Plane -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of SkyWalking OAP server itself otel-rules/oap.yaml SkyWalking OAP Server(SelfObservability) -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Linux OS otel-rules/vm.yaml prometheus/node_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Windows OS otel-rules/windows.yaml prometheus-community/windows_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of K8s cluster otel-rules/k8s/k8s-cluster.yaml K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of K8s cluster otel-rules/k8s/k8s-node.yaml cAdvisor \u0026amp; K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of K8s cluster otel-rules/k8s/k8s-service.yaml cAdvisor \u0026amp; K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of MYSQL otel-rules/mysql/mysql-instance.yaml prometheus/mysqld_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of MYSQL otel-rules/mysql/mysql-service.yaml prometheus/mysqld_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of PostgreSQL otel-rules/postgresql/postgresql-instance.yaml prometheus-community/postgres_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of PostgreSQL otel-rules/postgresql/postgresql-service.yaml prometheus-community/postgres_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Apache APISIX otel-rules/apisix.yaml apisix prometheus plugin -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of AWS Cloud EKS otel-rules/aws-eks/eks-cluster.yaml AWS Container Insights Receiver -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of AWS Cloud EKS otel-rules/aws-eks/eks-service.yaml AWS Container Insights Receiver -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of AWS Cloud EKS otel-rules/aws-eks/eks-node.yaml AWS Container Insights Receiver -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Elasticsearch otel-rules/elasticsearch/elasticsearch-cluster.yaml prometheus-community/elasticsearch_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Elasticsearch otel-rules/elasticsearch/elasticsearch-index.yaml prometheus-community/elasticsearch_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Elasticsearch otel-rules/elasticsearch/elasticsearch-node.yaml prometheus-community/elasticsearch_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Redis otel-rules/redis/redis-service.yaml oliver006/redis_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Redis otel-rules/redis/redis-instance.yaml oliver006/redis_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of RabbitMQ otel-rules/rabbitmq/rabbitmq-cluster.yaml rabbitmq-prometheus -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of RabbitMQ otel-rules/rabbitmq/rabbitmq-node.yaml rabbitmq-prometheus -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of MongoDB otel-rules/mongodb/mongodb-cluster.yaml percona/mongodb_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of MongoDB otel-rules/mongodb/mongodb-node.yaml percona/mongodb_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Kafka otel-rules/kafka/kafka-cluster.yaml prometheus/jmx_exporter/jmx_prometheus_javaagent -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Kafka otel-rules/kafka/kafka-broker.yaml prometheus/jmx_exporter/jmx_prometheus_javaagent -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of ClickHouse otel-rules/clickhouse/clickhouse-instance.yaml ClickHouse(embedded prometheus endpoint) -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of ClickHouse otel-rules/clickhouse/clickhouse-service.yaml ClickHouse(embedded prometheus endpoint) -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of RocketMQ otel-rules/rocketmq/rocketmq-cluster.yaml rocketmq-exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of RocketMQ otel-rules/rocketmq/rocketmq-broker.yaml rocketmq-exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of RocketMQ otel-rules/rocketmq/rocketmq-topic.yaml rocketmq-exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server    ","excerpt":"OpenTelemetry Metrics Format The OpenTelemetry receiver supports ingesting agent metrics by …","ref":"/docs/main/next/en/setup/backend/opentelemetry-receiver/","title":"OpenTelemetry Metrics Format"},{"body":"OpenTelemetry Metrics Format The OpenTelemetry receiver supports ingesting agent metrics by meter-system. The OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP may fail to start up. The files are located at $CLASSPATH/otel-rules.\nSupported handlers:\n otlp: OpenTelemetry gRPC service handler.  Notice: Set SW_OTEL_RECEIVER=default through system environment or change receiver-otel/selector=${SW_OTEL_RECEIVER:default} to activate the OpenTelemetry receiver.\nThe rule file should be in YAML format, defined by the scheme described in MAL. Note: receiver-otel only supports the group, defaultMetricLevel, and metricsRules nodes of the scheme due to its push mode.\nTo activate the otlp handler and relevant rules of istio:\nreceiver-otel:selector:${SW_OTEL_RECEIVER:default}default:enabledHandlers:${SW_OTEL_RECEIVER_ENABLED_HANDLERS:\u0026#34;otlp-metrics\u0026#34;}enabledOtelMetricsRules:${SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES:\u0026#34;istio-controlplane\u0026#34;}The receiver adds label with key node_identifier_host_name to the collected data samples, and its value is from net.host.name (or host.name for some OTLP versions) resource attributes defined in OpenTelemetry proto, for identification of the metric data.\n   Description Configuration File Data Source     Metrics of Istio Control Plane otel-rules/istio-controlplane.yaml Istio Control Plane -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of SkyWalking OAP server itself otel-rules/oap.yaml SkyWalking OAP Server(SelfObservability) -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of VMs otel-rules/vm.yaml Prometheus node-exporter(VMs) -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of K8s cluster otel-rules/k8s/k8s-cluster.yaml K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of K8s cluster otel-rules/k8s/k8s-node.yaml cAdvisor \u0026amp; K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of K8s cluster otel-rules/k8s/k8s-service.yaml cAdvisor \u0026amp; K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of MYSQL otel-rules/mysql/mysql-instance.yaml prometheus/mysqld_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of MYSQL otel-rules/mysql/mysql-service.yaml prometheus/mysqld_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of PostgreSQL otel-rules/postgresql/postgresql-instance.yaml postgres_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of PostgreSQL otel-rules/postgresql/postgresql-service.yaml postgres_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Apache APISIX otel-rules/apisix.yaml apisix prometheus plugin -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of AWS Cloud EKS otel-rules/aws-eks/eks-cluster.yaml AWS Container Insights Receiver -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of AWS Cloud EKS otel-rules/aws-eks/eks-service.yaml AWS Container Insights Receiver -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of AWS Cloud EKS otel-rules/aws-eks/eks-node.yaml AWS Container Insights Receiver -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server    Note: You can also use OpenTelemetry exporter to transport the metrics to SkyWalking OAP directly. See OpenTelemetry Exporter.\n","excerpt":"OpenTelemetry Metrics Format The OpenTelemetry receiver supports ingesting agent metrics by …","ref":"/docs/main/v9.6.0/en/setup/backend/opentelemetry-receiver/","title":"OpenTelemetry Metrics Format"},{"body":"OpenTelemetry Metrics Format The OpenTelemetry receiver supports ingesting agent metrics by meter-system. The OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP may fail to start up. The files are located at $CLASSPATH/otel-rules.\nSupported handlers:\n otlp: OpenTelemetry gRPC service handler.  Notice: Set SW_OTEL_RECEIVER=default through system environment or change receiver-otel/selector=${SW_OTEL_RECEIVER:default} to activate the OpenTelemetry receiver.\nThe rule file should be in YAML format, defined by the scheme described in MAL. Note: receiver-otel only supports the group, defaultMetricLevel, and metricsRules nodes of the scheme due to its push mode.\nTo activate the otlp handler and relevant rules of istio:\nreceiver-otel:selector:${SW_OTEL_RECEIVER:default}default:enabledHandlers:${SW_OTEL_RECEIVER_ENABLED_HANDLERS:\u0026#34;otlp-metrics\u0026#34;}enabledOtelMetricsRules:${SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES:\u0026#34;istio-controlplane\u0026#34;}The receiver adds label with key node_identifier_host_name to the collected data samples, and its value is from net.host.name (or host.name for some OTLP versions) resource attributes defined in OpenTelemetry proto, for identification of the metric data.\n   Description Configuration File Data Source     Metrics of Istio Control Plane otel-rules/istio-controlplane.yaml Istio Control Plane -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of SkyWalking OAP server itself otel-rules/oap.yaml SkyWalking OAP Server(SelfObservability) -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Linux OS otel-rules/vm.yaml prometheus/node_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Windows OS otel-rules/windows.yaml prometheus-community/windows_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of K8s cluster otel-rules/k8s/k8s-cluster.yaml K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of K8s cluster otel-rules/k8s/k8s-node.yaml cAdvisor \u0026amp; K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of K8s cluster otel-rules/k8s/k8s-service.yaml cAdvisor \u0026amp; K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of MYSQL otel-rules/mysql/mysql-instance.yaml prometheus/mysqld_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of MYSQL otel-rules/mysql/mysql-service.yaml prometheus/mysqld_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of PostgreSQL otel-rules/postgresql/postgresql-instance.yaml prometheus-community/postgres_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of PostgreSQL otel-rules/postgresql/postgresql-service.yaml prometheus-community/postgres_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Apache APISIX otel-rules/apisix.yaml apisix prometheus plugin -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of AWS Cloud EKS otel-rules/aws-eks/eks-cluster.yaml AWS Container Insights Receiver -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of AWS Cloud EKS otel-rules/aws-eks/eks-service.yaml AWS Container Insights Receiver -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of AWS Cloud EKS otel-rules/aws-eks/eks-node.yaml AWS Container Insights Receiver -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Elasticsearch otel-rules/elasticsearch/elasticsearch-cluster.yaml prometheus-community/elasticsearch_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Elasticsearch otel-rules/elasticsearch/elasticsearch-index.yaml prometheus-community/elasticsearch_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Elasticsearch otel-rules/elasticsearch/elasticsearch-node.yaml prometheus-community/elasticsearch_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Redis otel-rules/redis/redis-service.yaml oliver006/redis_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Redis otel-rules/redis/redis-instance.yaml oliver006/redis_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of RabbitMQ otel-rules/rabbitmq/rabbitmq-cluster.yaml rabbitmq-prometheus -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of RabbitMQ otel-rules/rabbitmq/rabbitmq-node.yaml rabbitmq-prometheus -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of MongoDB otel-rules/mongodb/mongodb-cluster.yaml percona/mongodb_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of MongoDB otel-rules/mongodb/mongodb-node.yaml percona/mongodb_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Kafka otel-rules/kafka/kafka-clusteryaml prometheus/jmx_exporter/jmx_prometheus_javaagent -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Kafka otel-rules/kafka/kafka-broker.yaml prometheus/jmx_exporter/jmx_prometheus_javaagent -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Note: You can also use OpenTelemetry exporter to transport the metrics to SkyWalking OAP directly. See OpenTelemetry Exporter.      ","excerpt":"OpenTelemetry Metrics Format The OpenTelemetry receiver supports ingesting agent metrics by …","ref":"/docs/main/v9.7.0/en/setup/backend/opentelemetry-receiver/","title":"OpenTelemetry Metrics Format"},{"body":"OpenTelemetry receiver The OpenTelemetry receiver supports ingesting agent metrics by meter-system. The OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP may fail to start up. The files are located at $CLASSPATH/otel-\u0026lt;handler\u0026gt;-rules. E.g. The oc handler loads rules from $CLASSPATH/otel-oc-rules.\nSupported handlers:\n oc: OpenCensus gRPC service handler.  Notice: Set SW_OTEL_RECEIVER=default through system environment or change receiver-otel/selector=${SW_OTEL_RECEIVER:default} to activate the OpenTelemetry receiver.\nThe rule file should be in YAML format, defined by the scheme described in prometheus-fetcher. Note: receiver-otel only supports the group, defaultMetricLevel, and metricsRules nodes of the scheme due to its push mode.\nTo activate the oc handler and relevant rules of istio:\nreceiver-otel:// Change selector value to default, for activating the otel receiver.selector:${SW_OTEL_RECEIVER:default}default:enabledHandlers:${SW_OTEL_RECEIVER_ENABLED_HANDLERS:\u0026#34;oc\u0026#34;}enabledOcRules:${SW_OTEL_RECEIVER_ENABLED_OC_RULES:\u0026#34;istio-controlplane\u0026#34;}The receiver adds labels with key = node_identifier_host_name and key = node_identifier_pid to the collected data samples, and values from Node.identifier.host_name and Node.identifier.pid defined in OpenCensus Agent Proto, for identification of the metric data.\n   Rule Name Description Configuration File Data Source     istio-controlplane Metrics of Istio Control Plane otel-oc-rules/istio-controlplane.yaml Istio Control Plane -\u0026gt; OpenTelemetry Collector \u0026ndash;OC format\u0026ndash;\u0026gt; SkyWalking OAP Server   oap Metrics of SkyWalking OAP server itself otel-oc-rules/oap.yaml SkyWalking OAP Server(SelfObservability) -\u0026gt; OpenTelemetry Collector \u0026ndash;OC format\u0026ndash;\u0026gt; SkyWalking OAP Server   vm Metrics of VMs otel-oc-rules/vm.yaml Prometheus node-exporter(VMs) -\u0026gt; OpenTelemetry Collector \u0026ndash;OC format\u0026ndash;\u0026gt; SkyWalking OAP Server   k8s-cluster Metrics of K8s cluster otel-oc-rules/k8s-cluster.yaml K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash;OC format\u0026ndash;\u0026gt; SkyWalking OAP Server   k8s-node Metrics of K8s cluster otel-oc-rules/k8s-node.yaml cAdvisor \u0026amp; K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash;OC format\u0026ndash;\u0026gt; SkyWalking OAP Server   k8s-service Metrics of K8s cluster otel-oc-rules/k8s-service.yaml cAdvisor \u0026amp; K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash;OC format\u0026ndash;\u0026gt; SkyWalking OAP Server    Note: You can also use OpenTelemetry exporter to directly transport the metrics to SkyWalking OAP. See OpenTelemetry Exporter.\n","excerpt":"OpenTelemetry receiver The OpenTelemetry receiver supports ingesting agent metrics by meter-system. …","ref":"/docs/main/v9.0.0/en/setup/backend/opentelemetry-receiver/","title":"OpenTelemetry receiver"},{"body":"OpenTelemetry receiver The OpenTelemetry receiver supports ingesting agent metrics by meter-system. The OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP may fail to start up. The files are located at $CLASSPATH/otel-\u0026lt;handler\u0026gt;-rules. E.g. The oc handler loads rules from $CLASSPATH/otel-oc-rules.\nSupported handlers:\n oc: OpenCensus gRPC service handler.  Notice: Set SW_OTEL_RECEIVER=default through system environment or change receiver-otel/selector=${SW_OTEL_RECEIVER:default} to activate the OpenTelemetry receiver.\nThe rule file should be in YAML format, defined by the scheme described in prometheus-fetcher. Note: receiver-otel only supports the group, defaultMetricLevel, and metricsRules nodes of the scheme due to its push mode.\nTo activate the oc handler and relevant rules of istio:\nreceiver-otel:// Change selector value to default, for activating the otel receiver.selector:${SW_OTEL_RECEIVER:default}default:enabledHandlers:${SW_OTEL_RECEIVER_ENABLED_HANDLERS:\u0026#34;oc\u0026#34;}enabledOcRules:${SW_OTEL_RECEIVER_ENABLED_OC_RULES:\u0026#34;istio-controlplane\u0026#34;}The receiver adds labels with key = node_identifier_host_name and key = node_identifier_pid to the collected data samples, and values from Node.identifier.host_name and Node.identifier.pid defined in OpenCensus Agent Proto, for identification of the metric data.\n   Rule Name Description Configuration File Data Source     istio-controlplane Metrics of Istio Control Plane otel-oc-rules/istio-controlplane.yaml Istio Control Plane -\u0026gt; OpenTelemetry Collector \u0026ndash;OC format\u0026ndash;\u0026gt; SkyWalking OAP Server   oap Metrics of SkyWalking OAP server itself otel-oc-rules/oap.yaml SkyWalking OAP Server(SelfObservability) -\u0026gt; OpenTelemetry Collector \u0026ndash;OC format\u0026ndash;\u0026gt; SkyWalking OAP Server   vm Metrics of VMs otel-oc-rules/vm.yaml Prometheus node-exporter(VMs) -\u0026gt; OpenTelemetry Collector \u0026ndash;OC format\u0026ndash;\u0026gt; SkyWalking OAP Server   k8s-cluster Metrics of K8s cluster otel-oc-rules/k8s-cluster.yaml K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash;OC format\u0026ndash;\u0026gt; SkyWalking OAP Server   k8s-node Metrics of K8s cluster otel-oc-rules/k8s-node.yaml cAdvisor \u0026amp; K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash;OC format\u0026ndash;\u0026gt; SkyWalking OAP Server   k8s-service Metrics of K8s cluster otel-oc-rules/k8s-service.yaml cAdvisor \u0026amp; K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash;OC format\u0026ndash;\u0026gt; SkyWalking OAP Server    Note: You can also use OpenTelemetry exporter to transport the metrics to SkyWalking OAP directly. See OpenTelemetry Exporter.\n","excerpt":"OpenTelemetry receiver The OpenTelemetry receiver supports ingesting agent metrics by meter-system. …","ref":"/docs/main/v9.1.0/en/setup/backend/opentelemetry-receiver/","title":"OpenTelemetry receiver"},{"body":"OpenTelemetry receiver The OpenTelemetry receiver supports ingesting agent metrics by meter-system. The OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP may fail to start up. The files are located at $CLASSPATH/otel-rules.\nSupported handlers:\n oc: OpenCensus gRPC service handler. otlp: OpenTelemetry gRPC service handler.  Notice: Set SW_OTEL_RECEIVER=default through system environment or change receiver-otel/selector=${SW_OTEL_RECEIVER:default} to activate the OpenTelemetry receiver.\nThe rule file should be in YAML format, defined by the scheme described in prometheus-fetcher. Note: receiver-otel only supports the group, defaultMetricLevel, and metricsRules nodes of the scheme due to its push mode.\nTo activate the oc handler and relevant rules of istio:\nreceiver-otel:// Change selector value to default, for activating the otel receiver.selector:${SW_OTEL_RECEIVER:default}default:enabledHandlers:${SW_OTEL_RECEIVER_ENABLED_HANDLERS:\u0026#34;oc,otlp\u0026#34;}enabledOtelRules:${SW_OTEL_RECEIVER_ENABLED_OTEL_RULES:\u0026#34;istio-controlplane\u0026#34;}The receiver adds label with key node_identifier_host_name to the collected data samples, and its value is from Node.identifier.host_name defined in OpenCensus Agent Proto, or net.host.name (or host.name for some OTLP versions) resource attributes defined in OpenTelemetry proto, for identification of the metric data.\n   Rule Name Description Configuration File Data Source     istio-controlplane Metrics of Istio Control Plane otel-rules/istio-controlplane.yaml Istio Control Plane -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   oap Metrics of SkyWalking OAP server itself otel-rules/oap.yaml SkyWalking OAP Server(SelfObservability) -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   vm Metrics of VMs otel-rules/vm.yaml Prometheus node-exporter(VMs) -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   k8s-cluster Metrics of K8s cluster otel-rules/k8s-cluster.yaml K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   k8s-node Metrics of K8s cluster otel-rules/k8s-node.yaml cAdvisor \u0026amp; K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   k8s-service Metrics of K8s cluster otel-rules/k8s-service.yaml cAdvisor \u0026amp; K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   mysql Metrics of MYSQL otel-rules/mysql.yaml prometheus/mysqld_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   postgresql Metrics of PostgreSQL otel-rules/postgresql.yaml postgres_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server    Note: You can also use OpenTelemetry exporter to transport the metrics to SkyWalking OAP directly. See OpenTelemetry Exporter.\n","excerpt":"OpenTelemetry receiver The OpenTelemetry receiver supports ingesting agent metrics by meter-system. …","ref":"/docs/main/v9.2.0/en/setup/backend/opentelemetry-receiver/","title":"OpenTelemetry receiver"},{"body":"OpenTelemetry receiver The OpenTelemetry receiver supports ingesting agent metrics by meter-system. The OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP may fail to start up. The files are located at $CLASSPATH/otel-rules.\nSupported handlers:\n oc: OpenCensus gRPC service handler. otlp: OpenTelemetry gRPC service handler.  Notice: Set SW_OTEL_RECEIVER=default through system environment or change receiver-otel/selector=${SW_OTEL_RECEIVER:default} to activate the OpenTelemetry receiver.\nThe rule file should be in YAML format, defined by the scheme described in MAL. Note: receiver-otel only supports the group, defaultMetricLevel, and metricsRules nodes of the scheme due to its push mode.\nTo activate the oc handler and relevant rules of istio:\nreceiver-otel:// Change selector value to default, for activating the otel receiver.selector:${SW_OTEL_RECEIVER:default}default:enabledHandlers:${SW_OTEL_RECEIVER_ENABLED_HANDLERS:\u0026#34;oc,otlp\u0026#34;}enabledOtelRules:${SW_OTEL_RECEIVER_ENABLED_OTEL_RULES:\u0026#34;istio-controlplane\u0026#34;}The receiver adds label with key node_identifier_host_name to the collected data samples, and its value is from Node.identifier.host_name defined in OpenCensus Agent Proto, or net.host.name (or host.name for some OTLP versions) resource attributes defined in OpenTelemetry proto, for identification of the metric data.\n   Description Configuration File Data Source     Metrics of Istio Control Plane otel-rules/istio-controlplane.yaml Istio Control Plane -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of SkyWalking OAP server itself otel-rules/oap.yaml SkyWalking OAP Server(SelfObservability) -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of VMs otel-rules/vm.yaml Prometheus node-exporter(VMs) -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of K8s cluster otel-rules/k8s-cluster.yaml K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of K8s cluster otel-rules/k8s-node.yaml cAdvisor \u0026amp; K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of K8s cluster otel-rules/k8s-service.yaml cAdvisor \u0026amp; K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of MYSQL otel-rules/mysql.yaml prometheus/mysqld_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of PostgreSQL otel-rules/postgresql.yaml postgres_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Apache APISIX otel-rules/apisix.yaml apisix prometheus plugin -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server    Note: You can also use OpenTelemetry exporter to transport the metrics to SkyWalking OAP directly. See OpenTelemetry Exporter.\n","excerpt":"OpenTelemetry receiver The OpenTelemetry receiver supports ingesting agent metrics by meter-system. …","ref":"/docs/main/v9.3.0/en/setup/backend/opentelemetry-receiver/","title":"OpenTelemetry receiver"},{"body":"OpenTelemetry receiver The OpenTelemetry receiver supports ingesting agent metrics by meter-system. The OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP may fail to start up. The files are located at $CLASSPATH/otel-rules.\nSupported handlers:\n oc: OpenCensus gRPC service handler. otlp: OpenTelemetry gRPC service handler.  Notice: Set SW_OTEL_RECEIVER=default through system environment or change receiver-otel/selector=${SW_OTEL_RECEIVER:default} to activate the OpenTelemetry receiver.\nThe rule file should be in YAML format, defined by the scheme described in MAL. Note: receiver-otel only supports the group, defaultMetricLevel, and metricsRules nodes of the scheme due to its push mode.\nTo activate the oc handler and relevant rules of istio:\nreceiver-otel:// Change selector value to default, for activating the otel receiver.selector:${SW_OTEL_RECEIVER:default}default:enabledHandlers:${SW_OTEL_RECEIVER_ENABLED_HANDLERS:\u0026#34;oc,otlp\u0026#34;}enabledOtelRules:${SW_OTEL_RECEIVER_ENABLED_OTEL_RULES:\u0026#34;istio-controlplane\u0026#34;}The receiver adds label with key node_identifier_host_name to the collected data samples, and its value is from Node.identifier.host_name defined in OpenCensus Agent Proto, or net.host.name (or host.name for some OTLP versions) resource attributes defined in OpenTelemetry proto, for identification of the metric data.\n   Description Configuration File Data Source     Metrics of Istio Control Plane otel-rules/istio-controlplane.yaml Istio Control Plane -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of SkyWalking OAP server itself otel-rules/oap.yaml SkyWalking OAP Server(SelfObservability) -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of VMs otel-rules/vm.yaml Prometheus node-exporter(VMs) -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of K8s cluster otel-rules/k8s/k8s-cluster.yaml K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of K8s cluster otel-rules/k8s/k8s-node.yaml cAdvisor \u0026amp; K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of K8s cluster otel-rules/k8s/k8s-service.yaml cAdvisor \u0026amp; K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of MYSQL otel-rules/mysql/mysql-instance.yaml prometheus/mysqld_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of MYSQL otel-rules/mysql/mysql-service.yaml prometheus/mysqld_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of PostgreSQL otel-rules/postgresql/postgresql-instance.yaml postgres_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of PostgreSQL otel-rules/postgresql/postgresql-service.yaml postgres_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Apache APISIX otel-rules/apisix.yaml apisix prometheus plugin -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of AWS Cloud EKS otel-rules/aws-eks/eks-cluster.yaml AWS Container Insights Receiver -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of AWS Cloud EKS otel-rules/aws-eks/eks-service.yaml AWS Container Insights Receiver -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of AWS Cloud EKS otel-rules/aws-eks/eks-node.yaml AWS Container Insights Receiver -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server    Note: You can also use OpenTelemetry exporter to transport the metrics to SkyWalking OAP directly. See OpenTelemetry Exporter.\n","excerpt":"OpenTelemetry receiver The OpenTelemetry receiver supports ingesting agent metrics by meter-system. …","ref":"/docs/main/v9.4.0/en/setup/backend/opentelemetry-receiver/","title":"OpenTelemetry receiver"},{"body":"OpenTelemetry receiver The OpenTelemetry receiver supports ingesting agent metrics by meter-system. The OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP may fail to start up. The files are located at $CLASSPATH/otel-rules.\nSupported handlers:\n otlp: OpenTelemetry gRPC service handler.  Notice: Set SW_OTEL_RECEIVER=default through system environment or change receiver-otel/selector=${SW_OTEL_RECEIVER:default} to activate the OpenTelemetry receiver.\nThe rule file should be in YAML format, defined by the scheme described in MAL. Note: receiver-otel only supports the group, defaultMetricLevel, and metricsRules nodes of the scheme due to its push mode.\nTo activate the otlp handler and relevant rules of istio:\nreceiver-otel:selector:${SW_OTEL_RECEIVER:default}default:enabledHandlers:${SW_OTEL_RECEIVER_ENABLED_HANDLERS:\u0026#34;otlp\u0026#34;}enabledOtelMetricsRules:${SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES:\u0026#34;istio-controlplane\u0026#34;}The receiver adds label with key node_identifier_host_name to the collected data samples, and its value is from net.host.name (or host.name for some OTLP versions) resource attributes defined in OpenTelemetry proto, for identification of the metric data.\n   Description Configuration File Data Source     Metrics of Istio Control Plane otel-rules/istio-controlplane.yaml Istio Control Plane -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of SkyWalking OAP server itself otel-rules/oap.yaml SkyWalking OAP Server(SelfObservability) -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of VMs otel-rules/vm.yaml Prometheus node-exporter(VMs) -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of K8s cluster otel-rules/k8s/k8s-cluster.yaml K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of K8s cluster otel-rules/k8s/k8s-node.yaml cAdvisor \u0026amp; K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of K8s cluster otel-rules/k8s/k8s-service.yaml cAdvisor \u0026amp; K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of MYSQL otel-rules/mysql/mysql-instance.yaml prometheus/mysqld_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of MYSQL otel-rules/mysql/mysql-service.yaml prometheus/mysqld_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of PostgreSQL otel-rules/postgresql/postgresql-instance.yaml postgres_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of PostgreSQL otel-rules/postgresql/postgresql-service.yaml postgres_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Apache APISIX otel-rules/apisix.yaml apisix prometheus plugin -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of AWS Cloud EKS otel-rules/aws-eks/eks-cluster.yaml AWS Container Insights Receiver -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of AWS Cloud EKS otel-rules/aws-eks/eks-service.yaml AWS Container Insights Receiver -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of AWS Cloud EKS otel-rules/aws-eks/eks-node.yaml AWS Container Insights Receiver -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server    Note: You can also use OpenTelemetry exporter to transport the metrics to SkyWalking OAP directly. See OpenTelemetry Exporter.\n","excerpt":"OpenTelemetry receiver The OpenTelemetry receiver supports ingesting agent metrics by meter-system. …","ref":"/docs/main/v9.5.0/en/setup/backend/opentelemetry-receiver/","title":"OpenTelemetry receiver"},{"body":"OpenTelemetry Trace Format SkyWalking can receive traces from Traces in OTLP format and convert them to Zipkin Trace format eventually. For data analysis and queries related to Zipkin Trace, please refer to the relevant documentation.\nOTLP Trace handler references the Zipkin Exporter in the OpenTelemetry Collector to convert the data format.\nSet up backend receiver  Make sure to enable otlp-traces handler in OTLP receiver of application.yml.  receiver-otel:selector:defaultdefault:enabledHandlers:otlp-tracesMake sure to enable zipkin receiver and zipkin query in application.yml for config the zipkin.  Setup Query and Lens UI Please read deploy Lens UI documentation for query OTLP traces.\n","excerpt":"OpenTelemetry Trace Format SkyWalking can receive traces from Traces in OTLP format and convert them …","ref":"/docs/main/latest/en/setup/backend/otlp-trace/","title":"OpenTelemetry Trace Format"},{"body":"OpenTelemetry Trace Format SkyWalking can receive traces from Traces in OTLP format and convert them to Zipkin Trace format eventually. For data analysis and queries related to Zipkin Trace, please refer to the relevant documentation.\nOTLP Trace handler references the Zipkin Exporter in the OpenTelemetry Collector to convert the data format.\nSet up backend receiver  Make sure to enable otlp-traces handler in OTLP receiver of application.yml.  receiver-otel:selector:defaultdefault:enabledHandlers:otlp-tracesMake sure to enable zipkin receiver and zipkin query in application.yml for config the zipkin.  Setup Query and Lens UI Please read deploy Lens UI documentation for query OTLP traces.\n","excerpt":"OpenTelemetry Trace Format SkyWalking can receive traces from Traces in OTLP format and convert them …","ref":"/docs/main/next/en/setup/backend/otlp-trace/","title":"OpenTelemetry Trace Format"},{"body":"OpenTelemetry Trace Format SkyWalking can receive traces from Traces in OTLP format and convert them to Zipkin Trace format eventually. For data analysis and queries related to Zipkin Trace, please refer to the relevant documentation.\nOTLP Trace handler references the Zipkin Exporter in the OpenTelemetry Collector to convert the data format.\nSet up backend receiver  Make sure to enable otlp-traces handler in OTLP receiver of application.yml.  receiver-otel:selector:defaultdefault:enabledHandlers:otlp-tracesMake sure to enable zipkin receiver and zipkin query in application.yml for config the zipkin.  Setup Query and Lens UI Please read deploy Lens UI documentation for query OTLP traces.\n","excerpt":"OpenTelemetry Trace Format SkyWalking can receive traces from Traces in OTLP format and convert them …","ref":"/docs/main/v9.6.0/en/setup/backend/otlp-trace/","title":"OpenTelemetry Trace Format"},{"body":"OpenTelemetry Trace Format SkyWalking can receive traces from Traces in OTLP format and convert them to Zipkin Trace format eventually. For data analysis and queries related to Zipkin Trace, please refer to the relevant documentation.\nOTLP Trace handler references the Zipkin Exporter in the OpenTelemetry Collector to convert the data format.\nSet up backend receiver  Make sure to enable otlp-traces handler in OTLP receiver of application.yml.  receiver-otel:selector:defaultdefault:enabledHandlers:otlp-tracesMake sure to enable zipkin receiver and zipkin query in application.yml for config the zipkin.  Setup Query and Lens UI Please read deploy Lens UI documentation for query OTLP traces.\n","excerpt":"OpenTelemetry Trace Format SkyWalking can receive traces from Traces in OTLP format and convert them …","ref":"/docs/main/v9.7.0/en/setup/backend/otlp-trace/","title":"OpenTelemetry Trace Format"},{"body":"Operator Usage Guide In this guide, you will learn:\n How to deploy the operator from a released package or scratch The core CRDs the operator supports  Operator Deployment You could provision the operator from a binary package or build from sources.\nBinary Package  Go to the download page to download the latest release binary, skywalking-swck-\u0026lt;SWCK_VERSION\u0026gt;-bin.tgz. Unarchive the package to a folder named skywalking-swck-\u0026lt;SWCK_VERSION\u0026gt;-bin To install the operator in an existing cluster, make sure you have cert-manager installed. Apply the manifests for the Controller and CRDs in config:  kubectl apply -f skywalking-swck-\u0026lt;SWCK_VERSION\u0026gt;-bin/config/operator-bundle.yaml Build from sources  Download released source package or clone the source code:  git clone git@github.com:apache/skywalking-swck.git  Build docker image from scratch. If you prefer to your private docker image, a quick path to override OPERATOR_IMG environment variable : export OPERATOR_IMG=\u0026lt;private registry\u0026gt;/controller:\u0026lt;tag\u0026gt;  export OPERATOR_IMG=controller make -C operator docker-build Then, push this image controller:latest to a repository where the operator\u0026rsquo;s pod could pull from. If you use a local KinD cluster:\nkind load docker-image controller   Customize resource configurations based the templates laid in operator/config. We use kustomize to build them, please refer to kustomize in case you don\u0026rsquo;t familiar with its syntax.\n  Install the CRDs to Kubernetes:\n  make -C operator install  Use make to generate the final manifests and deploy:  make -C operator deploy Test your deployment  Deploy a sample OAP server, this will create an OAP server in the default namespace:  curl https://raw.githubusercontent.com/apache/skywalking-swck/master/operator/config/samples/default.yaml | kubectl apply -f -  Check the OAP server in Kubernetes:  kubectl get oapserver  Check the UI server in Kubernetes:  kubectl get ui Troubleshooting If you encounter any issue, you can check the log of the controller by pulling it from Kubernetes:\n# get the pod name of your controller kubectl --namespace skywalking-swck-system get pods # pull the logs kubectl --namespace skywalking-swck-system logs -f [name_of_the_controller_pod] Custom Resource Define(CRD) The custom resources that the operator introduced are:\nJavaAgent The JavaAgent custom resource definition (CRD) declaratively defines a view to tracing the injection result.\nThe java-agent-injector creat JavaAgents once it injects agents into some workloads. Refer to Java Agent for more details.\nOAP The OAP custom resource definition (CRD) declaratively defines a desired OAP setup to run in a Kubernetes cluster. It provides options to configure environment variables and how to connect a Storage.\nUI The UI custom resource definition (CRD) declaratively defines a desired UI setup to run in a Kubernetes cluster. It provides options for how to connect an OAP.\nStorage The Storage custom resource definition (CRD) declaratively defines a desired storage setup to run in a Kubernetes cluster. The Storage could be managed instances onboarded by the operator or an external service. The OAP has options to select which Storage it would connect.\n Caveat: Stroage only supports the Elasticsearch.\n Satellite The Satellite custom resource definition (CRD) declaratively defines a desired Satellite setup to run in a Kubernetes cluster. It provides options for how to connect an OAP.\nFetcher The Fetcher custom resource definition (CRD) declaratively defines a desired Fetcher setup to run in a Kubernetes cluster. It provides options to configure OpenTelemetry collector, which fetches metrics to the deployed OAP.\nExamples of the Operator There are some instant examples to represent the functions or features of the Operator.\n Deploy OAP server and UI with default settings Fetch metrics from the Istio control plane(istiod) Inject the java agent to pods Deploy a storage Deploy a Satellite  ","excerpt":"Operator Usage Guide In this guide, you will learn:\n How to deploy the operator from a released …","ref":"/docs/skywalking-swck/latest/operator/","title":"Operator Usage Guide"},{"body":"Operator Usage Guide In this guide, you will learn:\n How to deploy the operator from a released package or scratch The core CRDs the operator supports  Operator Deployment You could provision the operator from a binary package or build from sources.\nBinary Package  Go to the download page to download the latest release binary, skywalking-swck-\u0026lt;SWCK_VERSION\u0026gt;-bin.tgz. Unarchive the package to a folder named skywalking-swck-\u0026lt;SWCK_VERSION\u0026gt;-bin To install the operator in an existing cluster, make sure you have cert-manager installed. Apply the manifests for the Controller and CRDs in config:  kubectl apply -f skywalking-swck-\u0026lt;SWCK_VERSION\u0026gt;-bin/config/operator-bundle.yaml Build from sources  Download released source package or clone the source code:  git clone git@github.com:apache/skywalking-swck.git  Build docker image from scratch. If you prefer to your private docker image, a quick path to override OPERATOR_IMG environment variable : export OPERATOR_IMG=\u0026lt;private registry\u0026gt;/controller:\u0026lt;tag\u0026gt;  export OPERATOR_IMG=controller make -C operator docker-build Then, push this image controller:latest to a repository where the operator\u0026rsquo;s pod could pull from. If you use a local KinD cluster:\nkind load docker-image controller   Customize resource configurations based the templates laid in operator/config. We use kustomize to build them, please refer to kustomize in case you don\u0026rsquo;t familiar with its syntax.\n  Install the CRDs to Kubernetes:\n  make -C operator install  Use make to generate the final manifests and deploy:  make -C operator deploy Test your deployment  Deploy a sample OAP server, this will create an OAP server in the default namespace:  curl https://raw.githubusercontent.com/apache/skywalking-swck/master/operator/config/samples/default.yaml | kubectl apply -f -  Check the OAP server in Kubernetes:  kubectl get oapserver  Check the UI server in Kubernetes:  kubectl get ui Troubleshooting If you encounter any issue, you can check the log of the controller by pulling it from Kubernetes:\n# get the pod name of your controller kubectl --namespace skywalking-swck-system get pods # pull the logs kubectl --namespace skywalking-swck-system logs -f [name_of_the_controller_pod] Custom Resource Define(CRD) The custom resources that the operator introduced are:\nJavaAgent The JavaAgent custom resource definition (CRD) declaratively defines a view to tracing the injection result.\nThe java-agent-injector creat JavaAgents once it injects agents into some workloads. Refer to Java Agent for more details.\nOAP The OAP custom resource definition (CRD) declaratively defines a desired OAP setup to run in a Kubernetes cluster. It provides options to configure environment variables and how to connect a Storage.\nUI The UI custom resource definition (CRD) declaratively defines a desired UI setup to run in a Kubernetes cluster. It provides options for how to connect an OAP.\nStorage The Storage custom resource definition (CRD) declaratively defines a desired storage setup to run in a Kubernetes cluster. The Storage could be managed instances onboarded by the operator or an external service. The OAP has options to select which Storage it would connect.\n Caveat: Stroage only supports the Elasticsearch.\n Satellite The Satellite custom resource definition (CRD) declaratively defines a desired Satellite setup to run in a Kubernetes cluster. It provides options for how to connect an OAP.\nFetcher The Fetcher custom resource definition (CRD) declaratively defines a desired Fetcher setup to run in a Kubernetes cluster. It provides options to configure OpenTelemetry collector, which fetches metrics to the deployed OAP.\nExamples of the Operator There are some instant examples to represent the functions or features of the Operator.\n Deploy OAP server and UI with default settings Fetch metrics from the Istio control plane(istiod) Inject the java agent to pods Deploy a storage Deploy a Satellite  ","excerpt":"Operator Usage Guide In this guide, you will learn:\n How to deploy the operator from a released …","ref":"/docs/skywalking-swck/next/operator/","title":"Operator Usage Guide"},{"body":"Operator Usage Guide In this guide, you will learn:\n How to deploy the operator from a released package or scratch The core CRDs the operator supports  Operator Deployment You could provision the operator from a binary package or build from sources.\nBinary Package  Go to the download page to download the latest release binary, skywalking-swck-\u0026lt;SWCK_VERSION\u0026gt;-bin.tgz. Unarchive the package to a folder named skywalking-swck-\u0026lt;SWCK_VERSION\u0026gt;-bin To install the operator in an existing cluster, make sure you have cert-manager installed. Apply the manifests for the Controller and CRDs in config:  kubectl apply -f skywalking-swck-\u0026lt;SWCK_VERSION\u0026gt;-bin/config/operator-bundle.yaml Build from sources  Download released source package or clone the source code:  git clone git@github.com:apache/skywalking-swck.git  Build docker image from scratch. If you prefer to your private docker image, a quick path to override OPERATOR_IMG environment variable : export OPERATOR_IMG=\u0026lt;private registry\u0026gt;/controller:\u0026lt;tag\u0026gt;  export OPERATOR_IMG=controller make -C operator docker-build Then, push this image controller:latest to a repository where the operator\u0026rsquo;s pod could pull from. If you use a local KinD cluster:\nkind load docker-image controller   Customize resource configurations based the templates laid in operator/config. We use kustomize to build them, please refer to kustomize in case you don\u0026rsquo;t familiar with its syntax.\n  Install the CRDs to Kubernetes:\n  make -C operator install  Use make to generate the final manifests and deploy:  make -C operator deploy Test your deployment  Deploy a sample OAP server, this will create an OAP server in the default namespace:  curl https://raw.githubusercontent.com/apache/skywalking-swck/master/operator/config/samples/default.yaml | kubectl apply -f -  Check the OAP server in Kubernetes:  kubectl get oapserver  Check the UI server in Kubernetes:  kubectl get ui Troubleshooting If you encounter any issue, you can check the log of the controller by pulling it from Kubernetes:\n# get the pod name of your controller kubectl --namespace skywalking-swck-system get pods # pull the logs kubectl --namespace skywalking-swck-system logs -f [name_of_the_controller_pod] Custom Resource Define(CRD) The custom resources that the operator introduced are:\nJavaAgent The JavaAgent custom resource definition (CRD) declaratively defines a view to tracing the injection result.\nThe java-agent-injector creat JavaAgents once it injects agents into some workloads. Refer to Java Agent for more details.\nOAP The OAP custom resource definition (CRD) declaratively defines a desired OAP setup to run in a Kubernetes cluster. It provides options to configure environment variables and how to connect a Storage.\nUI The UI custom resource definition (CRD) declaratively defines a desired UI setup to run in a Kubernetes cluster. It provides options for how to connect an OAP.\nStorage The Storage custom resource definition (CRD) declaratively defines a desired storage setup to run in a Kubernetes cluster. The Storage could be managed instances onboarded by the operator or an external service. The OAP has options to select which Storage it would connect.\n Caveat: Stroage only supports the Elasticsearch.\n Satellite The Satellite custom resource definition (CRD) declaratively defines a desired Satellite setup to run in a Kubernetes cluster. It provides options for how to connect an OAP.\nFetcher The Fetcher custom resource definition (CRD) declaratively defines a desired Fetcher setup to run in a Kubernetes cluster. It provides options to configure OpenTelemetry collector, which fetches metrics to the deployed OAP.\nExamples of the Operator There are some instant examples to represent the functions or features of the Operator.\n Deploy OAP server and UI with default settings Fetch metrics from the Istio control plane(istiod) Inject the java agent to pods Deploy a storage Deploy a Satellite  ","excerpt":"Operator Usage Guide In this guide, you will learn:\n How to deploy the operator from a released …","ref":"/docs/skywalking-swck/v0.9.0/operator/","title":"Operator Usage Guide"},{"body":"Optional Plugins Java agent plugins are all pluggable. Optional plugins could be provided in optional-plugins and expired-plugins folder under agent or 3rd party repositories. For using these plugins, you need to put the target plugin jar file into /plugins.\nNow, we have the following known 2 kinds of optional plugins.\nOptional Level 2 Plugins These plugins affect the performance or must be used under some conditions, from experiences. So only released in /optional-plugins or /bootstrap-plugins, copy to /plugins in order to make them work.\n Plugin of tracing Spring annotation beans Plugin of tracing Oracle and Resin Filter traces through specified endpoint name patterns Plugin of Gson serialization lib in optional plugin folder. Plugin of Zookeeper 3.4.x in optional plugin folder. The reason of being optional plugin is, many business irrelevant traces are generated, which cause extra payload to agents and backends. At the same time, those traces may be just heartbeat(s). Customize enhance Trace methods based on description files, rather than write plugin or change source codes. Plugin of Spring Cloud Gateway 2.x and 3.x and 4.x in optional plugin folder. Please only activate this plugin when you install agent in Spring Gateway. Plugin of Spring Transaction in optional plugin folder. The reason of being optional plugin is, many local span are generated, which also spend more CPU, memory and network. Plugin of Kotlin coroutine provides the tracing across coroutines automatically. As it will add local spans to all across routines scenarios, Please assess the performance impact. Plugin of quartz-scheduler-2.x in the optional plugin folder. The reason for being an optional plugin is, many task scheduling systems are based on quartz-scheduler, this will cause duplicate tracing and link different sub-tasks as they share the same quartz level trigger, such as ElasticJob. Plugin of spring-webflux-5.x in the optional plugin folder. Please only activate this plugin when you use webflux alone as a web container. If you are using SpringMVC 5 or Spring Gateway, you don\u0026rsquo;t need this plugin. Plugin of mybatis-3.x in optional plugin folder. The reason of being optional plugin is, many local span are generated, which also spend more CPU, memory and network. Plugin of sentinel-1.x in the optional plugin folder. The reason for being an optional plugin is, the sentinel plugin generates a large number of local spans, which have a potential performance impact. Plugin of ehcache-2.x in the optional plugin folder. The reason for being an optional plugin is, this plugin enhanced cache framework, generates large number of local spans, which have a potential performance impact. Plugin of guava-cache in the optional plugin folder. The reason for being an optional plugin is, this plugin enhanced cache framework, generates large number of local spans, which have a potential performance impact. Plugin of fastjson serialization lib in optional plugin folder. Plugin of jackson serialization lib in optional plugin folder. Plugin of Apache ShenYu(incubating) Gateway 2.4.x in optional plugin folder. Please only activate this plugin when you install agent in Apache ShenYu Gateway. Plugin of trace sampler CPU policy in the optional plugin folder. Please only activate this plugin when you need to disable trace collecting when the agent process CPU usage is too high(over threshold). Plugin for Spring 6.x and RestTemplate 6.x are in the optional plugin folder. Spring 6 requires Java 17 but SkyWalking is still compatible with Java 8. So, we put it in the optional plugin folder. Plugin of nacos-client 2.x lib in optional plugin folder. The reason is many business irrelevant traces are generated, which cause extra payload to agents and backends, also spend more CPU, memory and network. Plugin of netty-http 4.1.x lib in optional plugin folder. The reason is some frameworks use Netty HTTP as kernel, which could double the unnecessary spans and create incorrect RPC relative metrics.  Optional Level 3 Plugins. Expired Plugins These plugins are not tested in the CI/CD pipeline, as the previous added tests are not able to run according to the latest CI/CD infrastructure limitations, lack of maintenance, or dependencies/images not available(e.g. removed from DockerHub).\nWarning, there is no guarantee of working and maintenance. The committer team may remove them from the agent package in the future without further notice.\n Plugin of Spring Impala 2.6.x was tested through parrot-stream released images. The images are not available since Mar. 2024. This plugin is expired due to lack of testing.  ","excerpt":"Optional Plugins Java agent plugins are all pluggable. Optional plugins could be provided in …","ref":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/optional-plugins/","title":"Optional Plugins"},{"body":"Optional Plugins Java agent plugins are all pluggable. Optional plugins could be provided in optional-plugins and expired-plugins folder under agent or 3rd party repositories. For using these plugins, you need to put the target plugin jar file into /plugins.\nNow, we have the following known 2 kinds of optional plugins.\nOptional Level 2 Plugins These plugins affect the performance or must be used under some conditions, from experiences. So only released in /optional-plugins or /bootstrap-plugins, copy to /plugins in order to make them work.\n Plugin of tracing Spring annotation beans Plugin of tracing Oracle and Resin Filter traces through specified endpoint name patterns Plugin of Gson serialization lib in optional plugin folder. Plugin of Zookeeper 3.4.x in optional plugin folder. The reason of being optional plugin is, many business irrelevant traces are generated, which cause extra payload to agents and backends. At the same time, those traces may be just heartbeat(s). Customize enhance Trace methods based on description files, rather than write plugin or change source codes. Plugin of Spring Cloud Gateway 2.x and 3.x and 4.x in optional plugin folder. Please only activate this plugin when you install agent in Spring Gateway. Plugin of Spring Transaction in optional plugin folder. The reason of being optional plugin is, many local span are generated, which also spend more CPU, memory and network. Plugin of Kotlin coroutine provides the tracing across coroutines automatically. As it will add local spans to all across routines scenarios, Please assess the performance impact. Plugin of quartz-scheduler-2.x in the optional plugin folder. The reason for being an optional plugin is, many task scheduling systems are based on quartz-scheduler, this will cause duplicate tracing and link different sub-tasks as they share the same quartz level trigger, such as ElasticJob. Plugin of spring-webflux-5.x in the optional plugin folder. Please only activate this plugin when you use webflux alone as a web container. If you are using SpringMVC 5 or Spring Gateway, you don\u0026rsquo;t need this plugin. Plugin of mybatis-3.x in optional plugin folder. The reason of being optional plugin is, many local span are generated, which also spend more CPU, memory and network. Plugin of sentinel-1.x in the optional plugin folder. The reason for being an optional plugin is, the sentinel plugin generates a large number of local spans, which have a potential performance impact. Plugin of ehcache-2.x in the optional plugin folder. The reason for being an optional plugin is, this plugin enhanced cache framework, generates large number of local spans, which have a potential performance impact. Plugin of guava-cache in the optional plugin folder. The reason for being an optional plugin is, this plugin enhanced cache framework, generates large number of local spans, which have a potential performance impact. Plugin of fastjson serialization lib in optional plugin folder. Plugin of jackson serialization lib in optional plugin folder. Plugin of Apache ShenYu(incubating) Gateway 2.4.x in optional plugin folder. Please only activate this plugin when you install agent in Apache ShenYu Gateway. Plugin of trace sampler CPU policy in the optional plugin folder. Please only activate this plugin when you need to disable trace collecting when the agent process CPU usage is too high(over threshold). Plugin for Spring 6.x and RestTemplate 6.x are in the optional plugin folder. Spring 6 requires Java 17 but SkyWalking is still compatible with Java 8. So, we put it in the optional plugin folder. Plugin of nacos-client 2.x lib in optional plugin folder. The reason is many business irrelevant traces are generated, which cause extra payload to agents and backends, also spend more CPU, memory and network. Plugin of netty-http 4.1.x lib in optional plugin folder. The reason is some frameworks use Netty HTTP as kernel, which could double the unnecessary spans and create incorrect RPC relative metrics.  Optional Level 3 Plugins. Expired Plugins These plugins are not tested in the CI/CD pipeline, as the previous added tests are not able to run according to the latest CI/CD infrastructure limitations, lack of maintenance, or dependencies/images not available(e.g. removed from DockerHub).\nWarning, there is no guarantee of working and maintenance. The committer team may remove them from the agent package in the future without further notice.\n Plugin of Spring Impala 2.6.x was tested through parrot-stream released images. The images are not available since Mar. 2024. This plugin is expired due to lack of testing.  ","excerpt":"Optional Plugins Java agent plugins are all pluggable. Optional plugins could be provided in …","ref":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/optional-plugins/","title":"Optional Plugins"},{"body":"Optional Plugins Java agent plugins are all pluggable. Optional plugins could be provided in optional-plugins folder under agent or 3rd party repositories. For using these plugins, you need to put the target plugin jar file into /plugins.\nNow, we have the following known optional plugins.\n Plugin of tracing Spring annotation beans Plugin of tracing Oracle and Resin Filter traces through specified endpoint name patterns Plugin of Gson serialization lib in optional plugin folder. Plugin of Zookeeper 3.4.x in optional plugin folder. The reason of being optional plugin is, many business irrelevant traces are generated, which cause extra payload to agents and backends. At the same time, those traces may be just heartbeat(s). Customize enhance Trace methods based on description files, rather than write plugin or change source codes. Plugin of Spring Cloud Gateway 2.x and 3.x in optional plugin folder. Please only activate this plugin when you install agent in Spring Gateway. Plugin of Spring Transaction in optional plugin folder. The reason of being optional plugin is, many local span are generated, which also spend more CPU, memory and network. Plugin of Kotlin coroutine provides the tracing across coroutines automatically. As it will add local spans to all across routines scenarios, Please assess the performance impact. Plugin of quartz-scheduler-2.x in the optional plugin folder. The reason for being an optional plugin is, many task scheduling systems are based on quartz-scheduler, this will cause duplicate tracing and link different sub-tasks as they share the same quartz level trigger, such as ElasticJob. Plugin of spring-webflux-5.x in the optional plugin folder. Please only activate this plugin when you use webflux alone as a web container. If you are using SpringMVC 5 or Spring Gateway, you don\u0026rsquo;t need this plugin. Plugin of mybatis-3.x in optional plugin folder. The reason of being optional plugin is, many local span are generated, which also spend more CPU, memory and network. Plugin of sentinel-1.x in the optional plugin folder. The reason for being an optional plugin is, the sentinel plugin generates a large number of local spans, which have a potential performance impact. Plugin of ehcache-2.x in the optional plugin folder. The reason for being an optional plugin is, this plugin enhanced cache framework, generates large number of local spans, which have a potential performance impact. Plugin of guava-cache in the optional plugin folder. The reason for being an optional plugin is, this plugin enhanced cache framework, generates large number of local spans, which have a potential performance impact. Plugin of fastjson serialization lib in optional plugin folder. Plugin of jackson serialization lib in optional plugin folder. Plugin of Apache ShenYu(incubating) Gateway 2.4.x in optional plugin folder. Please only activate this plugin when you install agent in Apache ShenYu Gateway. Plugin of trace sampler CPU policy in the optional plugin folder. Please only activate this plugin when you need to disable trace collecting when the agent process CPU usage is too high(over threshold). Plugin for Spring 6.x and RestTemplate 6.x are in the optional plugin folder. Spring 6 requires Java 17 but SkyWalking is still compatible with Java 8. So, we put it in the optional plugin folder. Plugin of nacos-client 2.x lib in optional plugin folder.The reason is many business irrelevant traces are generated, which cause extra payload to agents and backends, also spend more CPU, memory and network.  ","excerpt":"Optional Plugins Java agent plugins are all pluggable. Optional plugins could be provided in …","ref":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/optional-plugins/","title":"Optional Plugins"},{"body":"Optional Plugins Java agent plugins are all pluggable. Optional plugins could be provided in optional-plugins folder under agent or 3rd party repositories. For using these plugins, you need to put the target plugin jar file into /plugins.\nNow, we have the following known optional plugins.\n Plugin of tracing Spring annotation beans Plugin of tracing Oracle and Resin Filter traces through specified endpoint name patterns Plugin of Gson serialization lib in optional plugin folder. Plugin of Zookeeper 3.4.x in optional plugin folder. The reason of being optional plugin is, many business irrelevant traces are generated, which cause extra payload to agents and backends. At the same time, those traces may be just heartbeat(s). Customize enhance Trace methods based on description files, rather than write plugin or change source codes. Plugin of Spring Cloud Gateway 2.x and 3.x in optional plugin folder. Please only activate this plugin when you install agent in Spring Gateway. Plugin of Spring Transaction in optional plugin folder. The reason of being optional plugin is, many local span are generated, which also spend more CPU, memory and network. Plugin of Kotlin coroutine provides the tracing across coroutines automatically. As it will add local spans to all across routines scenarios, Please assess the performance impact. Plugin of quartz-scheduler-2.x in the optional plugin folder. The reason for being an optional plugin is, many task scheduling systems are based on quartz-scheduler, this will cause duplicate tracing and link different sub-tasks as they share the same quartz level trigger, such as ElasticJob. Plugin of spring-webflux-5.x in the optional plugin folder. Please only activate this plugin when you use webflux alone as a web container. If you are using SpringMVC 5 or Spring Gateway, you don\u0026rsquo;t need this plugin. Plugin of mybatis-3.x in optional plugin folder. The reason of being optional plugin is, many local span are generated, which also spend more CPU, memory and network. Plugin of sentinel-1.x in the optional plugin folder. The reason for being an optional plugin is, the sentinel plugin generates a large number of local spans, which have a potential performance impact. Plugin of ehcache-2.x in the optional plugin folder. The reason for being an optional plugin is, this plugin enhanced cache framework, generates large number of local spans, which have a potential performance impact. Plugin of guava-cache in the optional plugin folder. The reason for being an optional plugin is, this plugin enhanced cache framework, generates large number of local spans, which have a potential performance impact. Plugin of fastjson serialization lib in optional plugin folder. Plugin of jackson serialization lib in optional plugin folder. Plugin of Apache ShenYu(incubating) Gateway 2.4.x in optional plugin folder. Please only activate this plugin when you install agent in Apache ShenYu Gateway. Plugin of trace sampler CPU policy in the optional plugin folder. Please only activate this plugin when you need to disable trace collecting when the agent process CPU usage is too high(over threshold). Plugin for Spring 6.x and RestTemplate 6.x are in the optional plugin folder. Spring 6 requires Java 17 but SkyWalking is still compatible with Java 8. So, we put it in the optional plugin folder. Plugin of nacos-client 2.x lib in optional plugin folder. The reason is many business irrelevant traces are generated, which cause extra payload to agents and backends, also spend more CPU, memory and network. Plugin of netty-http 4.1.x lib in optional plugin folder. The reason is some frameworks use Netty HTTP as kernel, which could double the unnecessary spans and create incorrect RPC relative metrics.  ","excerpt":"Optional Plugins Java agent plugins are all pluggable. Optional plugins could be provided in …","ref":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/optional-plugins/","title":"Optional Plugins"},{"body":"Optional Plugins Java agent plugins are all pluggable. Optional plugins could be provided in optional-plugins and expired-plugins folder under agent or 3rd party repositories. For using these plugins, you need to put the target plugin jar file into /plugins.\nNow, we have the following known 2 kinds of optional plugins.\nOptional Level 2 Plugins These plugins affect the performance or must be used under some conditions, from experiences. So only released in /optional-plugins or /bootstrap-plugins, copy to /plugins in order to make them work.\n Plugin of tracing Spring annotation beans Plugin of tracing Oracle and Resin Filter traces through specified endpoint name patterns Plugin of Gson serialization lib in optional plugin folder. Plugin of Zookeeper 3.4.x in optional plugin folder. The reason of being optional plugin is, many business irrelevant traces are generated, which cause extra payload to agents and backends. At the same time, those traces may be just heartbeat(s). Customize enhance Trace methods based on description files, rather than write plugin or change source codes. Plugin of Spring Cloud Gateway 2.x and 3.x and 4.x in optional plugin folder. Please only activate this plugin when you install agent in Spring Gateway. Plugin of Spring Transaction in optional plugin folder. The reason of being optional plugin is, many local span are generated, which also spend more CPU, memory and network. Plugin of Kotlin coroutine provides the tracing across coroutines automatically. As it will add local spans to all across routines scenarios, Please assess the performance impact. Plugin of quartz-scheduler-2.x in the optional plugin folder. The reason for being an optional plugin is, many task scheduling systems are based on quartz-scheduler, this will cause duplicate tracing and link different sub-tasks as they share the same quartz level trigger, such as ElasticJob. Plugin of spring-webflux-5.x in the optional plugin folder. Please only activate this plugin when you use webflux alone as a web container. If you are using SpringMVC 5 or Spring Gateway, you don\u0026rsquo;t need this plugin. Plugin of mybatis-3.x in optional plugin folder. The reason of being optional plugin is, many local span are generated, which also spend more CPU, memory and network. Plugin of sentinel-1.x in the optional plugin folder. The reason for being an optional plugin is, the sentinel plugin generates a large number of local spans, which have a potential performance impact. Plugin of ehcache-2.x in the optional plugin folder. The reason for being an optional plugin is, this plugin enhanced cache framework, generates large number of local spans, which have a potential performance impact. Plugin of guava-cache in the optional plugin folder. The reason for being an optional plugin is, this plugin enhanced cache framework, generates large number of local spans, which have a potential performance impact. Plugin of fastjson serialization lib in optional plugin folder. Plugin of jackson serialization lib in optional plugin folder. Plugin of Apache ShenYu(incubating) Gateway 2.4.x in optional plugin folder. Please only activate this plugin when you install agent in Apache ShenYu Gateway. Plugin of trace sampler CPU policy in the optional plugin folder. Please only activate this plugin when you need to disable trace collecting when the agent process CPU usage is too high(over threshold). Plugin for Spring 6.x and RestTemplate 6.x are in the optional plugin folder. Spring 6 requires Java 17 but SkyWalking is still compatible with Java 8. So, we put it in the optional plugin folder. Plugin of nacos-client 2.x lib in optional plugin folder. The reason is many business irrelevant traces are generated, which cause extra payload to agents and backends, also spend more CPU, memory and network. Plugin of netty-http 4.1.x lib in optional plugin folder. The reason is some frameworks use Netty HTTP as kernel, which could double the unnecessary spans and create incorrect RPC relative metrics.  Optional Level 3 Plugins. Expired Plugins These plugins are not tested in the CI/CD pipeline, as the previous added tests are not able to run according to the latest CI/CD infrastructure limitations, lack of maintenance, or dependencies/images not available(e.g. removed from DockerHub).\nWarning, there is no guarantee of working and maintenance. The committer team may remove them from the agent package in the future without further notice.\n Plugin of Spring Impala 2.6.x was tested through parrot-stream released images. The images are not available since Mar. 2024. This plugin is expired due to lack of testing.  ","excerpt":"Optional Plugins Java agent plugins are all pluggable. Optional plugins could be provided in …","ref":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/optional-plugins/","title":"Optional Plugins"},{"body":"Oracle and Resin plugins These plugins can\u0026rsquo;t be provided in Apache release because of Oracle and Resin Licenses. If you want to know details, please read Apache license legal document\nDue to license incompatibilities/restrictions these plugins are hosted and released in 3rd part repository, go to OpenSkywalking java plugin extension repository to get these.\n","excerpt":"Oracle and Resin plugins These plugins can\u0026rsquo;t be provided in Apache release because of Oracle …","ref":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/agent-optional-plugins/oracle-resin-plugins/","title":"Oracle and Resin plugins"},{"body":"Oracle and Resin plugins These plugins can\u0026rsquo;t be provided in Apache release because of Oracle and Resin Licenses. If you want to know details, please read Apache license legal document\nDue to license incompatibilities/restrictions these plugins are hosted and released in 3rd part repository, go to OpenSkywalking java plugin extension repository to get these.\n","excerpt":"Oracle and Resin plugins These plugins can\u0026rsquo;t be provided in Apache release because of Oracle …","ref":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/agent-optional-plugins/oracle-resin-plugins/","title":"Oracle and Resin plugins"},{"body":"Oracle and Resin plugins These plugins can\u0026rsquo;t be provided in Apache release because of Oracle and Resin Licenses. If you want to know details, please read Apache license legal document\nDue to license incompatibilities/restrictions these plugins are hosted and released in 3rd part repository, go to OpenSkywalking java plugin extension repository to get these.\n","excerpt":"Oracle and Resin plugins These plugins can\u0026rsquo;t be provided in Apache release because of Oracle …","ref":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/agent-optional-plugins/oracle-resin-plugins/","title":"Oracle and Resin plugins"},{"body":"Oracle and Resin plugins These plugins can\u0026rsquo;t be provided in Apache release because of Oracle and Resin Licenses. If you want to know details, please read Apache license legal document\nDue to license incompatibilities/restrictions these plugins are hosted and released in 3rd part repository, go to OpenSkywalking java plugin extension repository to get these.\n","excerpt":"Oracle and Resin plugins These plugins can\u0026rsquo;t be provided in Apache release because of Oracle …","ref":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/agent-optional-plugins/oracle-resin-plugins/","title":"Oracle and Resin plugins"},{"body":"Oracle and Resin plugins These plugins can\u0026rsquo;t be provided in Apache release because of Oracle and Resin Licenses. If you want to know details, please read Apache license legal document\nDue to license incompatibilities/restrictions these plugins are hosted and released in 3rd part repository, go to OpenSkywalking java plugin extension repository to get these.\n","excerpt":"Oracle and Resin plugins These plugins can\u0026rsquo;t be provided in Apache release because of Oracle …","ref":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/agent-optional-plugins/oracle-resin-plugins/","title":"Oracle and Resin plugins"},{"body":"Overview SkyWalking is an open source observability platform used to collect, analyze, aggregate and visualize data from services and cloud native infrastructures. SkyWalking provides an easy way to maintain a clear view of your distributed systems, even across Clouds. It is a modern APM, specially designed for cloud native, container based distributed systems.\nWhy use SkyWalking? SkyWalking provides solutions for observing and monitoring distributed systems, in many different scenarios. First of all, like traditional approaches, SkyWalking provides auto instrument agents for services, such as Java, C#, Node.js, Go, PHP and Nginx LUA. (with calls out for Python and C++ SDK contributions). In multi-language, continuously deployed environments, cloud native infrastructures grow more powerful but also more complex. SkyWalking\u0026rsquo;s service mesh receiver allows SkyWalking to receive telemetry data from service mesh frameworks such as Istio/Envoy and Linkerd, allowing users to understand the entire distributed system.\nSkyWalking provides observability capabilities for service(s), service instance(s), endpoint(s), process(s). The terms Service, Instance and Endpoint are used everywhere today, so it is worth defining their specific meanings in the context of SkyWalking:\n Service. Represents a set/group of workloads which provide the same behaviours for incoming requests. You can define the service name when you are using instrument agents or SDKs. SkyWalking can also use the name you define in platforms such as Istio. Service Instance. Each individual workload in the Service group is known as an instance. Like pods in Kubernetes, it doesn\u0026rsquo;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process. Endpoint. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature. Process. An operating system process. In some scenarios, a Service Instance is not a process, such as a pod Kubernetes could contain multiple processes.  SkyWalking allows users to understand the topology relationship between Services and Endpoints, to view the metrics of every Service/Service Instance/Endpoint and to set alarm rules.\nStarting from v9, SkyWalking introduces the new core concept Layer. A layer represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), Kubernetes(k8s layer). All detected instances belong to a layer to represent the running environment of this instance, the service would have one or multiple layer definitions according to its instances.\nIn addition, you can integrate\n Other distributed tracing using Zipkin. Other metrics systems, such as Prometheus, Sleuth(Micrometer), OpenTelemetry, Telegraf.  Architecture SkyWalking is logically split into four parts: Probes, Platform backend, Storage and UI.\n Probes collect telemetry data, including metrics, traces, logs and events in various formats(SkyWalking, Zipkin, OpenTelemetry, Prometheus, Zabbix, etc.) Platform backend supports data aggregation, analysis and streaming process covers traces, metrics, logs and events. Work as Aggregator Role, Receiver Role or both. Storage houses SkyWalking data through an open/plugable interface. You can choose an existing implementation, such as ElasticSearch, H2, MySQL, TiDB, BanyanDB, or implement your own. UI is a highly customizable web based interface allowing SkyWalking end users to visualize and manage SkyWalking data.  What is next?  Learn SkyWalking\u0026rsquo;s Project Goals FAQ, Why doesn\u0026rsquo;t SkyWalking involve MQ in the architecture in default?  ","excerpt":"Overview SkyWalking is an open source observability platform used to collect, analyze, aggregate and …","ref":"/docs/main/latest/en/concepts-and-designs/overview/","title":"Overview"},{"body":"Overview SkyWalking is an open source observability platform used to collect, analyze, aggregate and visualize data from services and cloud native infrastructures. SkyWalking provides an easy way to maintain a clear view of your distributed systems, even across Clouds. It is a modern APM, specially designed for cloud native, container based distributed systems.\nSkyWalking covers all the observability needs in Cloud Native world, including:\n Tracing. SkyWalking native data formats, and Zipkin traces of v1 and v2 formats are supported. Metrics. SkyWalking supports mature metrics formats, including native meter format, OTEL metrics format, and Telegraf format. SkyWalking integrates with Service Mesh platforms, typically Istio and Envoy, to build observability into the data plane or control plane. Also, SkyWalking native agents can run in the metrics mode, which greatly improves performances. Logging. Includes logs collected from disk or through network. Native agents could bind the tracing context with logs automatically, or use SkyWalking to bind the trace and log through the text content. Profiling. Profiling is a powerful tool to help developers understand the performance of their applications from lines of codes perspective. SkyWalking provides profiling feature bundled in native language agents and independent ebpf agents. Event. Event is a special kind of data, which is used to record the important moments in the system, such as version upgrade, configuration change, etc. Linking the events with metrics could help on explain the peaks or valleys in the metrics, and linking the events with traces and logs could help on troubleshooting root cause.  Why use SkyWalking? SkyWalking provides solutions for observing and monitoring distributed systems, in many different scenarios. First of all, like traditional approaches, SkyWalking provides auto instrument agents for services, such as Java, C#, Node.js, Go, PHP and Python, and manually SDKs for C++, Rust, and Nginx LUA. In multi-language, continuously deployed environments, cloud native infrastructures grow more powerful but also more complex. SkyWalking\u0026rsquo;s service mesh receiver allows SkyWalking to receive telemetry data from service mesh frameworks such as Istio/Envoy, allowing users to understand the entire distributed system. Powered by eBPF stack, SkyWalking provides k8s monitoring. Also, by adopting OpenTelemetry, Telegraf, Zabbix, Zipkin, Prometheus, SkyWalking can integrate with other distributed tracing, metrics and logging systems and build a unified APM system to host all data.\nBesides the support of various kinds of telemetry formats, the hierarchy structure of objects in SkyWalking is defined as service(s), service instance(s), endpoint(s), process(s). The terms Service, Instance and Endpoint are used everywhere today, so it is worth defining their specific meanings in the context of SkyWalking:\n Layer. A layer represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), and Kubernetes(k8s layer). A layer is an abstract collection of services. A service typically only belongs to one layer, but in some scenarios, a service could belong to multiple layers. For example, a service could be deployed in an Istio service mesh, it could belong to mesh and mesh-dp(mesh data plane) layer. Service. Represents a set/group of workloads which provide the same behaviours for incoming requests. You can define the service name when you are using instrument agents or SDKs. SkyWalking can also use the name you define in platforms such as Istio. Service Instance. Each individual workload in the Service group is known as an instance. Like pods in Kubernetes, it doesn\u0026rsquo;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process. Endpoint. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature. Process. An operating system process. In some scenarios, a Service Instance is not a process, such as a pod Kubernetes could contain multiple processes.  SkyWalking allows users to understand the topology relationship between Services and Endpoints, also detect API dependencies in the distributed environment if you use our native agents.,\nBesides topology map, SkyWalking provides Service Hierarchy Relationship , which defines the relationships of existing logically same services in various layers. For example, a service could be deployed in a Kubernetes cluster with Istio mesh, services are detected by k8s monitoring and Istio mesh, this hierarchy relationship could connect the services in k8s layer and mesh layer.\nArchitecture SkyWalking is logically split into four parts: Probes, Platform backend, Storage and UI.\n Probes collect telemetry data, including metrics, traces, logs and events in various formats(SkyWalking, Zipkin, OpenTelemetry, Prometheus, Zabbix, etc.) Platform backend supports data aggregation, analysis and streaming process covers traces, metrics, logs and events. Work as Aggregator Role, Receiver Role or both. Storage houses SkyWalking data through an open/plugable interface. You can choose an existing implementation, such as ElasticSearch, H2, MySQL, TiDB, BanyanDB, or implement your own. UI is a highly customizable web based interface allowing SkyWalking end users to visualize and manage SkyWalking data.  What is next?  Learn SkyWalking\u0026rsquo;s Project Goals FAQ, Why doesn\u0026rsquo;t SkyWalking involve MQ in the architecture in default?  ","excerpt":"Overview SkyWalking is an open source observability platform used to collect, analyze, aggregate and …","ref":"/docs/main/next/en/concepts-and-designs/overview/","title":"Overview"},{"body":"Overview SkyWalking is an open source observability platform used to collect, analyze, aggregate and visualize data from services and cloud native infrastructures. SkyWalking provides an easy way to maintain a clear view of your distributed systems, even across Clouds. It is a modern APM, specially designed for cloud native, container based distributed systems.\nWhy use SkyWalking? SkyWalking provides solutions for observing and monitoring distributed systems, in many different scenarios. First of all, like traditional approaches, SkyWalking provides auto instrument agents for services, such as Java, C#, Node.js, Go, PHP and Nginx LUA. (with calls out for Python and C++ SDK contributions). In multi-language, continuously deployed environments, cloud native infrastructures grow more powerful but also more complex. SkyWalking\u0026rsquo;s service mesh receiver allows SkyWalking to receive telemetry data from service mesh frameworks such as Istio/Envoy and Linkerd, allowing users to understand the entire distributed system.\nSkyWalking provides observability capabilities for service(s), service instance(s), endpoint(s), process(s). The terms Service, Instance and Endpoint are used everywhere today, so it is worth defining their specific meanings in the context of SkyWalking:\n Service. Represents a set/group of workloads which provide the same behaviours for incoming requests. You can define the service name when you are using instrument agents or SDKs. SkyWalking can also use the name you define in platforms such as Istio. Service Instance. Each individual workload in the Service group is known as an instance. Like pods in Kubernetes, it doesn\u0026rsquo;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process. Endpoint. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature. Process. An operating system process. In some scenarios, a Service Instance is not a process, such as a pod Kubernetes could contain multiple processes.  SkyWalking allows users to understand the topology relationship between Services and Endpoints, to view the metrics of every Service/Service Instance/Endpoint and to set alarm rules.\nStarting from v9, SkyWalking introduces the new core concept Layer. A layer represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), Kubernetes(k8s layer). All detected instances belong to a layer to represent the running environment of this instance, the service would have one or multiple layer definitions according to its instances.\nIn addition, you can integrate\n Other distributed tracing using SkyWalking native agents and SDKs with Zipkin, Jaeger and OpenCensus. Other metrics systems, such as Prometheus, Sleuth(Micrometer), OpenTelemetry.  Architecture SkyWalking is logically split into four parts: Probes, Platform backend, Storage and UI.\n Probes collect data and reformat them for SkyWalking requirements (different probes support different sources). Platform backend supports data aggregation, analysis and streaming process covers traces, metrics, and logs. Storage houses SkyWalking data through an open/plugable interface. You can choose an existing implementation, such as ElasticSearch, H2, MySQL, TiDB, InfluxDB, or implement your own. Patches for new storage implementors welcome! UI is a highly customizable web based interface allowing SkyWalking end users to visualize and manage SkyWalking data.  What is next?  Learn SkyWalking\u0026rsquo;s Project Goals FAQ, Why doesn\u0026rsquo;t SkyWalking involve MQ in the architecture in default?  ","excerpt":"Overview SkyWalking is an open source observability platform used to collect, analyze, aggregate and …","ref":"/docs/main/v9.0.0/en/concepts-and-designs/overview/","title":"Overview"},{"body":"Overview SkyWalking is an open source observability platform used to collect, analyze, aggregate and visualize data from services and cloud native infrastructures. SkyWalking provides an easy way to maintain a clear view of your distributed systems, even across Clouds. It is a modern APM, specially designed for cloud native, container based distributed systems.\nWhy use SkyWalking? SkyWalking provides solutions for observing and monitoring distributed systems, in many different scenarios. First of all, like traditional approaches, SkyWalking provides auto instrument agents for services, such as Java, C#, Node.js, Go, PHP and Nginx LUA. (with calls out for Python and C++ SDK contributions). In multi-language, continuously deployed environments, cloud native infrastructures grow more powerful but also more complex. SkyWalking\u0026rsquo;s service mesh receiver allows SkyWalking to receive telemetry data from service mesh frameworks such as Istio/Envoy and Linkerd, allowing users to understand the entire distributed system.\nSkyWalking provides observability capabilities for service(s), service instance(s), endpoint(s), process(s). The terms Service, Instance and Endpoint are used everywhere today, so it is worth defining their specific meanings in the context of SkyWalking:\n Service. Represents a set/group of workloads which provide the same behaviours for incoming requests. You can define the service name when you are using instrument agents or SDKs. SkyWalking can also use the name you define in platforms such as Istio. Service Instance. Each individual workload in the Service group is known as an instance. Like pods in Kubernetes, it doesn\u0026rsquo;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process. Endpoint. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature. Process. An operating system process. In some scenarios, a Service Instance is not a process, such as a pod Kubernetes could contain multiple processes.  SkyWalking allows users to understand the topology relationship between Services and Endpoints, to view the metrics of every Service/Service Instance/Endpoint and to set alarm rules.\nStarting from v9, SkyWalking introduces the new core concept Layer. A layer represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), Kubernetes(k8s layer). All detected instances belong to a layer to represent the running environment of this instance, the service would have one or multiple layer definitions according to its instances.\nIn addition, you can integrate\n Other distributed tracing using SkyWalking native agents and SDKs with Zipkin, Jaeger and OpenCensus. Other metrics systems, such as Prometheus, Sleuth(Micrometer), OpenTelemetry.  Architecture SkyWalking is logically split into four parts: Probes, Platform backend, Storage and UI.\n Probes collect data and reformat them for SkyWalking requirements (different probes support different sources). Platform backend supports data aggregation, analysis and streaming process covers traces, metrics, and logs. Storage houses SkyWalking data through an open/plugable interface. You can choose an existing implementation, such as ElasticSearch, H2, MySQL, TiDB, InfluxDB, or implement your own. Patches for new storage implementors welcome! UI is a highly customizable web based interface allowing SkyWalking end users to visualize and manage SkyWalking data.  What is next?  Learn SkyWalking\u0026rsquo;s Project Goals FAQ, Why doesn\u0026rsquo;t SkyWalking involve MQ in the architecture in default?  ","excerpt":"Overview SkyWalking is an open source observability platform used to collect, analyze, aggregate and …","ref":"/docs/main/v9.1.0/en/concepts-and-designs/overview/","title":"Overview"},{"body":"Overview SkyWalking is an open source observability platform used to collect, analyze, aggregate and visualize data from services and cloud native infrastructures. SkyWalking provides an easy way to maintain a clear view of your distributed systems, even across Clouds. It is a modern APM, specially designed for cloud native, container based distributed systems.\nWhy use SkyWalking? SkyWalking provides solutions for observing and monitoring distributed systems, in many different scenarios. First of all, like traditional approaches, SkyWalking provides auto instrument agents for services, such as Java, C#, Node.js, Go, PHP and Nginx LUA. (with calls out for Python and C++ SDK contributions). In multi-language, continuously deployed environments, cloud native infrastructures grow more powerful but also more complex. SkyWalking\u0026rsquo;s service mesh receiver allows SkyWalking to receive telemetry data from service mesh frameworks such as Istio/Envoy and Linkerd, allowing users to understand the entire distributed system.\nSkyWalking provides observability capabilities for service(s), service instance(s), endpoint(s), process(s). The terms Service, Instance and Endpoint are used everywhere today, so it is worth defining their specific meanings in the context of SkyWalking:\n Service. Represents a set/group of workloads which provide the same behaviours for incoming requests. You can define the service name when you are using instrument agents or SDKs. SkyWalking can also use the name you define in platforms such as Istio. Service Instance. Each individual workload in the Service group is known as an instance. Like pods in Kubernetes, it doesn\u0026rsquo;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process. Endpoint. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature. Process. An operating system process. In some scenarios, a Service Instance is not a process, such as a pod Kubernetes could contain multiple processes.  SkyWalking allows users to understand the topology relationship between Services and Endpoints, to view the metrics of every Service/Service Instance/Endpoint and to set alarm rules.\nStarting from v9, SkyWalking introduces the new core concept Layer. A layer represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), Kubernetes(k8s layer). All detected instances belong to a layer to represent the running environment of this instance, the service would have one or multiple layer definitions according to its instances.\nIn addition, you can integrate\n Other distributed tracing using SkyWalking native agents and SDKs with Zipkin, Jaeger and OpenCensus. Other metrics systems, such as Prometheus, Sleuth(Micrometer), OpenTelemetry.  Architecture SkyWalking is logically split into four parts: Probes, Platform backend, Storage and UI.\n Probes collect telemetry data, including metrics, traces, logs and events in various formats(SkyWalking, Zipkin, OpenTelemetry, Prometheus, Zabbix, etc.) Platform backend supports data aggregation, analysis and streaming process covers traces, metrics, logs and events. Work as Aggregator Role, Receiver Role or both. Storage houses SkyWalking data through an open/plugable interface. You can choose an existing implementation, such as ElasticSearch, H2, MySQL, TiDB, BanyanDB, or implement your own. UI is a highly customizable web based interface allowing SkyWalking end users to visualize and manage SkyWalking data.  What is next?  Learn SkyWalking\u0026rsquo;s Project Goals FAQ, Why doesn\u0026rsquo;t SkyWalking involve MQ in the architecture in default?  ","excerpt":"Overview SkyWalking is an open source observability platform used to collect, analyze, aggregate and …","ref":"/docs/main/v9.2.0/en/concepts-and-designs/overview/","title":"Overview"},{"body":"Overview SkyWalking is an open source observability platform used to collect, analyze, aggregate and visualize data from services and cloud native infrastructures. SkyWalking provides an easy way to maintain a clear view of your distributed systems, even across Clouds. It is a modern APM, specially designed for cloud native, container based distributed systems.\nWhy use SkyWalking? SkyWalking provides solutions for observing and monitoring distributed systems, in many different scenarios. First of all, like traditional approaches, SkyWalking provides auto instrument agents for services, such as Java, C#, Node.js, Go, PHP and Nginx LUA. (with calls out for Python and C++ SDK contributions). In multi-language, continuously deployed environments, cloud native infrastructures grow more powerful but also more complex. SkyWalking\u0026rsquo;s service mesh receiver allows SkyWalking to receive telemetry data from service mesh frameworks such as Istio/Envoy and Linkerd, allowing users to understand the entire distributed system.\nSkyWalking provides observability capabilities for service(s), service instance(s), endpoint(s), process(s). The terms Service, Instance and Endpoint are used everywhere today, so it is worth defining their specific meanings in the context of SkyWalking:\n Service. Represents a set/group of workloads which provide the same behaviours for incoming requests. You can define the service name when you are using instrument agents or SDKs. SkyWalking can also use the name you define in platforms such as Istio. Service Instance. Each individual workload in the Service group is known as an instance. Like pods in Kubernetes, it doesn\u0026rsquo;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process. Endpoint. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature. Process. An operating system process. In some scenarios, a Service Instance is not a process, such as a pod Kubernetes could contain multiple processes.  SkyWalking allows users to understand the topology relationship between Services and Endpoints, to view the metrics of every Service/Service Instance/Endpoint and to set alarm rules.\nStarting from v9, SkyWalking introduces the new core concept Layer. A layer represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), Kubernetes(k8s layer). All detected instances belong to a layer to represent the running environment of this instance, the service would have one or multiple layer definitions according to its instances.\nIn addition, you can integrate\n Other distributed tracing using SkyWalking native agents and SDKs with Zipkin, Jaeger and OpenCensus. Other metrics systems, such as Prometheus, Sleuth(Micrometer), OpenTelemetry.  Architecture SkyWalking is logically split into four parts: Probes, Platform backend, Storage and UI.\n Probes collect telemetry data, including metrics, traces, logs and events in various formats(SkyWalking, Zipkin, OpenTelemetry, Prometheus, Zabbix, etc.) Platform backend supports data aggregation, analysis and streaming process covers traces, metrics, logs and events. Work as Aggregator Role, Receiver Role or both. Storage houses SkyWalking data through an open/plugable interface. You can choose an existing implementation, such as ElasticSearch, H2, MySQL, TiDB, BanyanDB, or implement your own. UI is a highly customizable web based interface allowing SkyWalking end users to visualize and manage SkyWalking data.  What is next?  Learn SkyWalking\u0026rsquo;s Project Goals FAQ, Why doesn\u0026rsquo;t SkyWalking involve MQ in the architecture in default?  ","excerpt":"Overview SkyWalking is an open source observability platform used to collect, analyze, aggregate and …","ref":"/docs/main/v9.3.0/en/concepts-and-designs/overview/","title":"Overview"},{"body":"Overview SkyWalking is an open source observability platform used to collect, analyze, aggregate and visualize data from services and cloud native infrastructures. SkyWalking provides an easy way to maintain a clear view of your distributed systems, even across Clouds. It is a modern APM, specially designed for cloud native, container based distributed systems.\nWhy use SkyWalking? SkyWalking provides solutions for observing and monitoring distributed systems, in many different scenarios. First of all, like traditional approaches, SkyWalking provides auto instrument agents for services, such as Java, C#, Node.js, Go, PHP and Nginx LUA. (with calls out for Python and C++ SDK contributions). In multi-language, continuously deployed environments, cloud native infrastructures grow more powerful but also more complex. SkyWalking\u0026rsquo;s service mesh receiver allows SkyWalking to receive telemetry data from service mesh frameworks such as Istio/Envoy and Linkerd, allowing users to understand the entire distributed system.\nSkyWalking provides observability capabilities for service(s), service instance(s), endpoint(s), process(s). The terms Service, Instance and Endpoint are used everywhere today, so it is worth defining their specific meanings in the context of SkyWalking:\n Service. Represents a set/group of workloads which provide the same behaviours for incoming requests. You can define the service name when you are using instrument agents or SDKs. SkyWalking can also use the name you define in platforms such as Istio. Service Instance. Each individual workload in the Service group is known as an instance. Like pods in Kubernetes, it doesn\u0026rsquo;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process. Endpoint. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature. Process. An operating system process. In some scenarios, a Service Instance is not a process, such as a pod Kubernetes could contain multiple processes.  SkyWalking allows users to understand the topology relationship between Services and Endpoints, to view the metrics of every Service/Service Instance/Endpoint and to set alarm rules.\nStarting from v9, SkyWalking introduces the new core concept Layer. A layer represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), Kubernetes(k8s layer). All detected instances belong to a layer to represent the running environment of this instance, the service would have one or multiple layer definitions according to its instances.\nIn addition, you can integrate\n Other distributed tracing using SkyWalking native agents and SDKs with Zipkin, Jaeger and OpenCensus. Other metrics systems, such as Prometheus, Sleuth(Micrometer), OpenTelemetry.  Architecture SkyWalking is logically split into four parts: Probes, Platform backend, Storage and UI.\n Probes collect telemetry data, including metrics, traces, logs and events in various formats(SkyWalking, Zipkin, OpenTelemetry, Prometheus, Zabbix, etc.) Platform backend supports data aggregation, analysis and streaming process covers traces, metrics, logs and events. Work as Aggregator Role, Receiver Role or both. Storage houses SkyWalking data through an open/plugable interface. You can choose an existing implementation, such as ElasticSearch, H2, MySQL, TiDB, BanyanDB, or implement your own. UI is a highly customizable web based interface allowing SkyWalking end users to visualize and manage SkyWalking data.  What is next?  Learn SkyWalking\u0026rsquo;s Project Goals FAQ, Why doesn\u0026rsquo;t SkyWalking involve MQ in the architecture in default?  ","excerpt":"Overview SkyWalking is an open source observability platform used to collect, analyze, aggregate and …","ref":"/docs/main/v9.4.0/en/concepts-and-designs/overview/","title":"Overview"},{"body":"Overview SkyWalking is an open source observability platform used to collect, analyze, aggregate and visualize data from services and cloud native infrastructures. SkyWalking provides an easy way to maintain a clear view of your distributed systems, even across Clouds. It is a modern APM, specially designed for cloud native, container based distributed systems.\nWhy use SkyWalking? SkyWalking provides solutions for observing and monitoring distributed systems, in many different scenarios. First of all, like traditional approaches, SkyWalking provides auto instrument agents for services, such as Java, C#, Node.js, Go, PHP and Nginx LUA. (with calls out for Python and C++ SDK contributions). In multi-language, continuously deployed environments, cloud native infrastructures grow more powerful but also more complex. SkyWalking\u0026rsquo;s service mesh receiver allows SkyWalking to receive telemetry data from service mesh frameworks such as Istio/Envoy and Linkerd, allowing users to understand the entire distributed system.\nSkyWalking provides observability capabilities for service(s), service instance(s), endpoint(s), process(s). The terms Service, Instance and Endpoint are used everywhere today, so it is worth defining their specific meanings in the context of SkyWalking:\n Service. Represents a set/group of workloads which provide the same behaviours for incoming requests. You can define the service name when you are using instrument agents or SDKs. SkyWalking can also use the name you define in platforms such as Istio. Service Instance. Each individual workload in the Service group is known as an instance. Like pods in Kubernetes, it doesn\u0026rsquo;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process. Endpoint. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature. Process. An operating system process. In some scenarios, a Service Instance is not a process, such as a pod Kubernetes could contain multiple processes.  SkyWalking allows users to understand the topology relationship between Services and Endpoints, to view the metrics of every Service/Service Instance/Endpoint and to set alarm rules.\nStarting from v9, SkyWalking introduces the new core concept Layer. A layer represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), Kubernetes(k8s layer). All detected instances belong to a layer to represent the running environment of this instance, the service would have one or multiple layer definitions according to its instances.\nIn addition, you can integrate\n Other distributed tracing using SkyWalking native agents and SDKs with Zipkin and Jaeger. Other metrics systems, such as Prometheus, Sleuth(Micrometer), OpenTelemetry.  Architecture SkyWalking is logically split into four parts: Probes, Platform backend, Storage and UI.\n Probes collect telemetry data, including metrics, traces, logs and events in various formats(SkyWalking, Zipkin, OpenTelemetry, Prometheus, Zabbix, etc.) Platform backend supports data aggregation, analysis and streaming process covers traces, metrics, logs and events. Work as Aggregator Role, Receiver Role or both. Storage houses SkyWalking data through an open/plugable interface. You can choose an existing implementation, such as ElasticSearch, H2, MySQL, TiDB, BanyanDB, or implement your own. UI is a highly customizable web based interface allowing SkyWalking end users to visualize and manage SkyWalking data.  What is next?  Learn SkyWalking\u0026rsquo;s Project Goals FAQ, Why doesn\u0026rsquo;t SkyWalking involve MQ in the architecture in default?  ","excerpt":"Overview SkyWalking is an open source observability platform used to collect, analyze, aggregate and …","ref":"/docs/main/v9.5.0/en/concepts-and-designs/overview/","title":"Overview"},{"body":"Overview SkyWalking is an open source observability platform used to collect, analyze, aggregate and visualize data from services and cloud native infrastructures. SkyWalking provides an easy way to maintain a clear view of your distributed systems, even across Clouds. It is a modern APM, specially designed for cloud native, container based distributed systems.\nWhy use SkyWalking? SkyWalking provides solutions for observing and monitoring distributed systems, in many different scenarios. First of all, like traditional approaches, SkyWalking provides auto instrument agents for services, such as Java, C#, Node.js, Go, PHP and Nginx LUA. (with calls out for Python and C++ SDK contributions). In multi-language, continuously deployed environments, cloud native infrastructures grow more powerful but also more complex. SkyWalking\u0026rsquo;s service mesh receiver allows SkyWalking to receive telemetry data from service mesh frameworks such as Istio/Envoy and Linkerd, allowing users to understand the entire distributed system.\nSkyWalking provides observability capabilities for service(s), service instance(s), endpoint(s), process(s). The terms Service, Instance and Endpoint are used everywhere today, so it is worth defining their specific meanings in the context of SkyWalking:\n Service. Represents a set/group of workloads which provide the same behaviours for incoming requests. You can define the service name when you are using instrument agents or SDKs. SkyWalking can also use the name you define in platforms such as Istio. Service Instance. Each individual workload in the Service group is known as an instance. Like pods in Kubernetes, it doesn\u0026rsquo;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process. Endpoint. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature. Process. An operating system process. In some scenarios, a Service Instance is not a process, such as a pod Kubernetes could contain multiple processes.  SkyWalking allows users to understand the topology relationship between Services and Endpoints, to view the metrics of every Service/Service Instance/Endpoint and to set alarm rules.\nStarting from v9, SkyWalking introduces the new core concept Layer. A layer represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), Kubernetes(k8s layer). All detected instances belong to a layer to represent the running environment of this instance, the service would have one or multiple layer definitions according to its instances.\nIn addition, you can integrate\n Other distributed tracing using SkyWalking native agents and SDKs with Zipkin and Jaeger. Other metrics systems, such as Prometheus, Sleuth(Micrometer), OpenTelemetry.  Architecture SkyWalking is logically split into four parts: Probes, Platform backend, Storage and UI.\n Probes collect telemetry data, including metrics, traces, logs and events in various formats(SkyWalking, Zipkin, OpenTelemetry, Prometheus, Zabbix, etc.) Platform backend supports data aggregation, analysis and streaming process covers traces, metrics, logs and events. Work as Aggregator Role, Receiver Role or both. Storage houses SkyWalking data through an open/plugable interface. You can choose an existing implementation, such as ElasticSearch, H2, MySQL, TiDB, BanyanDB, or implement your own. UI is a highly customizable web based interface allowing SkyWalking end users to visualize and manage SkyWalking data.  What is next?  Learn SkyWalking\u0026rsquo;s Project Goals FAQ, Why doesn\u0026rsquo;t SkyWalking involve MQ in the architecture in default?  ","excerpt":"Overview SkyWalking is an open source observability platform used to collect, analyze, aggregate and …","ref":"/docs/main/v9.6.0/en/concepts-and-designs/overview/","title":"Overview"},{"body":"Overview SkyWalking is an open source observability platform used to collect, analyze, aggregate and visualize data from services and cloud native infrastructures. SkyWalking provides an easy way to maintain a clear view of your distributed systems, even across Clouds. It is a modern APM, specially designed for cloud native, container based distributed systems.\nWhy use SkyWalking? SkyWalking provides solutions for observing and monitoring distributed systems, in many different scenarios. First of all, like traditional approaches, SkyWalking provides auto instrument agents for services, such as Java, C#, Node.js, Go, PHP and Nginx LUA. (with calls out for Python and C++ SDK contributions). In multi-language, continuously deployed environments, cloud native infrastructures grow more powerful but also more complex. SkyWalking\u0026rsquo;s service mesh receiver allows SkyWalking to receive telemetry data from service mesh frameworks such as Istio/Envoy and Linkerd, allowing users to understand the entire distributed system.\nSkyWalking provides observability capabilities for service(s), service instance(s), endpoint(s), process(s). The terms Service, Instance and Endpoint are used everywhere today, so it is worth defining their specific meanings in the context of SkyWalking:\n Service. Represents a set/group of workloads which provide the same behaviours for incoming requests. You can define the service name when you are using instrument agents or SDKs. SkyWalking can also use the name you define in platforms such as Istio. Service Instance. Each individual workload in the Service group is known as an instance. Like pods in Kubernetes, it doesn\u0026rsquo;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process. Endpoint. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature. Process. An operating system process. In some scenarios, a Service Instance is not a process, such as a pod Kubernetes could contain multiple processes.  SkyWalking allows users to understand the topology relationship between Services and Endpoints, to view the metrics of every Service/Service Instance/Endpoint and to set alarm rules.\nStarting from v9, SkyWalking introduces the new core concept Layer. A layer represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), Kubernetes(k8s layer). All detected instances belong to a layer to represent the running environment of this instance, the service would have one or multiple layer definitions according to its instances.\nIn addition, you can integrate\n Other distributed tracing using Zipkin. Other metrics systems, such as Prometheus, Sleuth(Micrometer), OpenTelemetry, Telegraf.  Architecture SkyWalking is logically split into four parts: Probes, Platform backend, Storage and UI.\n Probes collect telemetry data, including metrics, traces, logs and events in various formats(SkyWalking, Zipkin, OpenTelemetry, Prometheus, Zabbix, etc.) Platform backend supports data aggregation, analysis and streaming process covers traces, metrics, logs and events. Work as Aggregator Role, Receiver Role or both. Storage houses SkyWalking data through an open/plugable interface. You can choose an existing implementation, such as ElasticSearch, H2, MySQL, TiDB, BanyanDB, or implement your own. UI is a highly customizable web based interface allowing SkyWalking end users to visualize and manage SkyWalking data.  What is next?  Learn SkyWalking\u0026rsquo;s Project Goals FAQ, Why doesn\u0026rsquo;t SkyWalking involve MQ in the architecture in default?  ","excerpt":"Overview SkyWalking is an open source observability platform used to collect, analyze, aggregate and …","ref":"/docs/main/v9.7.0/en/concepts-and-designs/overview/","title":"Overview"},{"body":"Overview SkyWalking Rover is an open-source collector, which provides a eBPF-based monitor and profiler in the Kubernetes.\nWhy use SkyWalking Rover? On the Kubernetes platform, we could collect a lot of telemetry data. Rover could collect them based on the eBPF technology, and upload them to the SkyWalking backend for analysis, aggregate, and visualize them.\n EBPF-based profiling for C, C++, Golang, and Rust. Network profiling for L4(TCP) and L7(HTTP) traffic, including with TLS. Tracing enhancement. Collect extra information from OS level as attached events for the existing tracing system, such as attach raw data of HTTP request and response. Network monitoring for generating network access logs.  Architecture  Process represents the data monitored by Rover. Rover is deployed in the VM instance, collects data in VM and Process, and reports it to the OAP cluster. OAP collect data from the rover side, analysis, and stores them.  ","excerpt":"Overview SkyWalking Rover is an open-source collector, which provides a eBPF-based monitor and …","ref":"/docs/skywalking-rover/latest/en/concepts-and-designs/overview/","title":"Overview"},{"body":"Overview SkyWalking Rover is an open-source collector, which provides a eBPF-based monitor and profiler in the Kubernetes.\nWhy use SkyWalking Rover? On the Kubernetes platform, we could collect a lot of telemetry data. Rover could collect them based on the eBPF technology, and upload them to the SkyWalking backend for analysis, aggregate, and visualize them.\n EBPF-based profiling for C, C++, Golang, and Rust. Network profiling for L4(TCP) and L7(HTTP) traffic, including with TLS. Tracing enhancement. Collect extra information from OS level as attached events for the existing tracing system, such as attach raw data of HTTP request and response. Network monitoring for generating network access logs.  Architecture  Process represents the data monitored by Rover. Rover is deployed in the VM instance, collects data in VM and Process, and reports it to the OAP cluster. OAP collect data from the rover side, analysis, and stores them.  ","excerpt":"Overview SkyWalking Rover is an open-source collector, which provides a eBPF-based monitor and …","ref":"/docs/skywalking-rover/next/en/concepts-and-designs/overview/","title":"Overview"},{"body":"Overview SkyWalking Rover is an open-source collector, which provides a eBPF-based monitor and profiler in the Kubernetes.\nWhy use SkyWalking Rover? On the Kubernetes platform, we could collect a lot of telemetry data. Rover could collect them based on the eBPF technology, and upload them to the SkyWalking backend for analysis, aggregate, and visualize them.\n EBPF-based profiling for C, C++, Golang, and Rust. Network profiling for L4(TCP) and L7(HTTP) traffic, including with TLS. Tracing enhancement. Collect extra information from OS level as attached events for the existing tracing system, such as attach raw data of HTTP request and response. Network monitoring for generating network access logs.  Architecture  Process represents the data monitored by Rover. Rover is deployed in the VM instance, collects data in VM and Process, and reports it to the OAP cluster. OAP collect data from the rover side, analysis, and stores them.  ","excerpt":"Overview SkyWalking Rover is an open-source collector, which provides a eBPF-based monitor and …","ref":"/docs/skywalking-rover/v0.6.0/en/concepts-and-designs/overview/","title":"Overview"},{"body":"Overview SkyWalking Satellite: an open-source agent designed for the cloud-native infrastructures, which provides a low-cost, high-efficient, and more secure way to collect telemetry data, such that Trace Segments, Logs, or Metrics.\nWhy use SkyWalking Satellite? Observability is the solution to the complex scenario of cloud-native services. However, we may encounter different telemetry data scenarios, different language services, big data analysis, etc. Satellite provides a unified data collection layer for cloud-native services. You can easily use it to connect to the SkyWalking ecosystem and enhance the capacity of SkyWalking. There are some enhance features on the following when using Satellite.\n Provide a unified data collection layer to collect logs, traces, and metrics. Provide a safer local cache to reduce the memory cost of the service. Provide the unified transfer way shields the functional differences in the different language libs, such as MQ. Provides the preprocessing functions to ensure accuracy of the metrics, such as sampling.  Architecture SkyWalking Satellite is logically split into three parts: Gatherer, Processor, and Sender.\n Gatherer collect data and reformat them for SkyWalking requirements. Processor processes the input data to generate the new data for Observability. Sender would transfer the downstream data to the SkyWalking OAP with different protocols.  ","excerpt":"Overview SkyWalking Satellite: an open-source agent designed for the cloud-native infrastructures, …","ref":"/docs/skywalking-satellite/latest/en/concepts-and-designs/overview/","title":"Overview"},{"body":"Overview SkyWalking Satellite: an open-source agent designed for the cloud-native infrastructures, which provides a low-cost, high-efficient, and more secure way to collect telemetry data, such that Trace Segments, Logs, or Metrics.\nWhy use SkyWalking Satellite? Observability is the solution to the complex scenario of cloud-native services. However, we may encounter different telemetry data scenarios, different language services, big data analysis, etc. Satellite provides a unified data collection layer for cloud-native services. You can easily use it to connect to the SkyWalking ecosystem and enhance the capacity of SkyWalking. There are some enhance features on the following when using Satellite.\n Provide a unified data collection layer to collect logs, traces, and metrics. Provide a safer local cache to reduce the memory cost of the service. Provide the unified transfer way shields the functional differences in the different language libs, such as MQ. Provides the preprocessing functions to ensure accuracy of the metrics, such as sampling.  Architecture SkyWalking Satellite is logically split into three parts: Gatherer, Processor, and Sender.\n Gatherer collect data and reformat them for SkyWalking requirements. Processor processes the input data to generate the new data for Observability. Sender would transfer the downstream data to the SkyWalking OAP with different protocols.  ","excerpt":"Overview SkyWalking Satellite: an open-source agent designed for the cloud-native infrastructures, …","ref":"/docs/skywalking-satellite/next/en/concepts-and-designs/overview/","title":"Overview"},{"body":"Overview SkyWalking Satellite: an open-source agent designed for the cloud-native infrastructures, which provides a low-cost, high-efficient, and more secure way to collect telemetry data, such that Trace Segments, Logs, or Metrics.\nWhy use SkyWalking Satellite? Observability is the solution to the complex scenario of cloud-native services. However, we may encounter different telemetry data scenarios, different language services, big data analysis, etc. Satellite provides a unified data collection layer for cloud-native services. You can easily use it to connect to the SkyWalking ecosystem and enhance the capacity of SkyWalking. There are some enhance features on the following when using Satellite.\n Provide a unified data collection layer to collect logs, traces, and metrics. Provide a safer local cache to reduce the memory cost of the service. Provide the unified transfer way shields the functional differences in the different language libs, such as MQ. Provides the preprocessing functions to ensure accuracy of the metrics, such as sampling.  Architecture SkyWalking Satellite is logically split into three parts: Gatherer, Processor, and Sender.\n Gatherer collect data and reformat them for SkyWalking requirements. Processor processes the input data to generate the new data for Observability. Sender would transfer the downstream data to the SkyWalking OAP with different protocols.  ","excerpt":"Overview SkyWalking Satellite: an open-source agent designed for the cloud-native infrastructures, …","ref":"/docs/skywalking-satellite/v1.2.0/en/concepts-and-designs/overview/","title":"Overview"},{"body":"Performance best practices  Following changes are expected in the next official release (v1.1.0).\n The Python agent currently uses a number of threads to communicate with SkyWalking OAP, it is planned to be refactored using AsyncIO (Uvloop) along with an async version of gRPC(aio-client)/HTTP(aiohttp/httpx)/Kafka(aio-kafka) to further minimize the cost of thread switching and IO time.\nFor now, we still have a few points to mention to keep the overhead to your application minimal.\n When using the gRPC protocol to report data, a higher version of gRPC is always recommended. Please also make sure that:  By running python -c \u0026quot;from google.protobuf.internal import api_implementation; print(api_implementation._implementation_type)\u0026quot;, or python -c \u0026quot;from google.protobuf.internal import api_implementation; print(api_implementation._default_implementation_type)\u0026quot; you should either see upb or cpp as the returned value. It means the Protobuf library is using a much faster implementation than Python native. If not, try setting PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION='cpp' or 'upb' or upgrade the gRPC dependency (SkyWalking Python will use whatever version your application uses).   Though HTTP is provided as an alternative, it could be slower compared to other protocols, Kafka is often a good choice when gRPC is not suitable. When some features are not needed in your use case, you could turn them off either via config.init(agent_some_reporter_active=False) or environment variables. Use ignore_path, ignore_method, and log filters to avoid reporting less valuable data that is of large amount. Log reporter safe mode is designed for situations where HTTP basic auth info could be visible in traceback and logs but shouldn\u0026rsquo;t be reported to OAP. You should keep the option as OFF if it\u0026rsquo;s not your case because frequent regular expression searches will inevitably introduce overhead to the CPU. Do not turn on sw-python CLI or agent debug logging in production, otherwise large amount of log will be produced.  sw-python CLI debug mode will automatically turn on agent debug log (override from sitecustomize.py).    ","excerpt":"Performance best practices  Following changes are expected in the next official release (v1.1.0). …","ref":"/docs/skywalking-python/latest/en/setup/faq/performance/","title":"Performance best practices"},{"body":"Performance best practices  Following changes are expected in the next official release (v1.1.0).\n The Python agent currently uses a number of threads to communicate with SkyWalking OAP, it is planned to be refactored using AsyncIO (Uvloop) along with an async version of gRPC(aio-client)/HTTP(aiohttp/httpx)/Kafka(aio-kafka) to further minimize the cost of thread switching and IO time.\nFor now, we still have a few points to mention to keep the overhead to your application minimal.\n When using the gRPC protocol to report data, a higher version of gRPC is always recommended. Please also make sure that:  By running python -c \u0026quot;from google.protobuf.internal import api_implementation; print(api_implementation._implementation_type)\u0026quot;, or python -c \u0026quot;from google.protobuf.internal import api_implementation; print(api_implementation._default_implementation_type)\u0026quot; you should either see upb or cpp as the returned value. It means the Protobuf library is using a much faster implementation than Python native. If not, try setting PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION='cpp' or 'upb' or upgrade the gRPC dependency (SkyWalking Python will use whatever version your application uses).   Though HTTP is provided as an alternative, it could be slower compared to other protocols, Kafka is often a good choice when gRPC is not suitable. When some features are not needed in your use case, you could turn them off either via config.init(agent_some_reporter_active=False) or environment variables. Use ignore_path, ignore_method, and log filters to avoid reporting less valuable data that is of large amount. Log reporter safe mode is designed for situations where HTTP basic auth info could be visible in traceback and logs but shouldn\u0026rsquo;t be reported to OAP. You should keep the option as OFF if it\u0026rsquo;s not your case because frequent regular expression searches will inevitably introduce overhead to the CPU. Do not turn on sw-python CLI or agent debug logging in production, otherwise large amount of log will be produced.  sw-python CLI debug mode will automatically turn on agent debug log (override from sitecustomize.py).    ","excerpt":"Performance best practices  Following changes are expected in the next official release (v1.1.0). …","ref":"/docs/skywalking-python/next/en/setup/faq/performance/","title":"Performance best practices"},{"body":"Performance best practices  Following changes are expected in the next official release (v1.1.0).\n The Python agent currently uses a number of threads to communicate with SkyWalking OAP, it is planned to be refactored using AsyncIO (Uvloop) along with an async version of gRPC(aio-client)/HTTP(aiohttp/httpx)/Kafka(aio-kafka) to further minimize the cost of thread switching and IO time.\nFor now, we still have a few points to mention to keep the overhead to your application minimal.\n When using the gRPC protocol to report data, a higher version of gRPC is always recommended. Please also make sure that:  By running python -c \u0026quot;from google.protobuf.internal import api_implementation; print(api_implementation._implementation_type)\u0026quot;, or python -c \u0026quot;from google.protobuf.internal import api_implementation; print(api_implementation._default_implementation_type)\u0026quot; you should either see upb or cpp as the returned value. It means the Protobuf library is using a much faster implementation than Python native. If not, try setting PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION='cpp' or 'upb' or upgrade the gRPC dependency (SkyWalking Python will use whatever version your application uses).   Though HTTP is provided as an alternative, it could be slower compared to other protocols, Kafka is often a good choice when gRPC is not suitable. When some features are not needed in your use case, you could turn them off either via config.init(agent_some_reporter_active=False) or environment variables. Use ignore_path, ignore_method, and log filters to avoid reporting less valuable data that is of large amount. Log reporter safe mode is designed for situations where HTTP basic auth info could be visible in traceback and logs but shouldn\u0026rsquo;t be reported to OAP. You should keep the option as OFF if it\u0026rsquo;s not your case because frequent regular expression searches will inevitably introduce overhead to the CPU. Do not turn on sw-python CLI or agent debug logging in production, otherwise large amount of log will be produced.  sw-python CLI debug mode will automatically turn on agent debug log (override from sitecustomize.py).    ","excerpt":"Performance best practices  Following changes are expected in the next official release (v1.1.0). …","ref":"/docs/skywalking-python/v1.0.1/en/setup/faq/performance/","title":"Performance best practices"},{"body":"Performance Tests Performance testing is used to verify the impact on application performance when using SkyWalking Go.\nTest Objective By launching both the agent and non-agent compiled applications, we subject them to the same QPS under stress testing, evaluating the CPU, memory, and network latency of the machine during the testing period.\nThe application has been saved and submitted to the test/benchmark-codebase directory, with the following topology:\ntraffic generator -\u0026gt; consumer -\u0026gt; provider The payload(traffic) generator uses multithreading to send HTTP requests to the consumer service. When the consumer receives a request, it sends three requests to the provider service to obtain return data results. Based on these network requests, when using SkyWalking Go, the consumer service generates four Spans (1 Entry Span, 3 Exit Spans).\nApplication The application\u0026rsquo;s integration with SkyWalking Go follows the same process as other applications. For more information, please refer to the documentation.\nIn the application, we use loops and mathematical calculations (math.Log) to simulate the execution of the business program. This consumes a certain amount of CPU usage, preventing idle processing during service stress testing and amplifying the impact of the Agent program on the business application.\nStress Testing Service We use the Vegeta service for stress testing, which launches traffic at a specified QPS to the application. It is based on the Go language and uses goroutines to provide a more efficient stress testing solution.\nTest Environment A total of 4 GCP machines are launched, all instances are running on tbe 4C8G VM.\n traffic generator: Used for deploying traffic to the consumer machine. consumer: Used for deploying the consumer service. provider: Used for deploying the provider service. skywalking: Used for deploying the SkyWalking backend cluster, providing a standalone OAP node (in-memory H2 storage) and a UI interface.  Each service is deployed on a separate machine to ensure there is no interference with one another.\nTest Process Preparation Phase The preparation phase is used to ensure that all machines and test case preparations are completed.\nTraffic Generator Install the Vegeta service on the stress testing instance and create the following file(request.txt) to simulate traffic usage.\nGET http://${CONSUMER_IP}:8080/consumer Sw8: 1-MWYyZDRiZjQ3YmY3MTFlYWI3OTRhY2RlNDgwMDExMjI=-MWU3YzIwNGE3YmY3MTFlYWI4NThhY2RlNDgwMDExMjI=-0-c2VydmljZQ==-aW5zdGFuY2U=-cHJvcGFnYXRpb24=-cHJvcGFnYXRpb246NTU2Ng== Please replace the above CONSUMER_IP with the real IP address of the consumer instance.\nConsumer and Provider Install the skywalking-go service on the machines to be tested, and compile with and without the Agent.\nModify the machine\u0026rsquo;s file limit to prevent the inability to create new connections due to excessive handles: ulimit -n 65536.\nStart the provider service(without Agent) and obtain the provider machine\u0026rsquo;s IP address. Please provide this address when starting the consumer machine later.\nSkyWalking Download the SkyWalking service, modify the SkyWalking OAP startup script to increase the memory size, preventing OAP crashes due to insufficient memory.\nTesting without Agent  Start the Consumer service without the Agent version. Please add the provider flag for the provider address, the format is: http://${PROVIDER_IP}:8080/provider. Execute this command to preheat the system: vegeta attack -duration=1m -rate=1000/s -max-workers=2000 -targets=request.txt Execute this command to perform the stress test. The command will output statistical data of the stress test when completed: vegeta attack -duration=20m -rate=1000/s -max-workers=2000 -targets=request.txt | tee results.bin | vegeta report  Testing with Agent The only difference in the test without the Agent is the version of the consumer that is compiled and launched.\n Add the SW_AGENT_REPORTER_GRPC_BACKEND_SERVICE environment variables to the consumer service, for setting the IP address of the SkyWalking OAP service. Start the Consumer service with the Agent version. Please add the provider flag for the provider address, the format is: http://${PROVIDER_IP}:8080/provider. Execute this command to preheat the system: vegeta attack -duration=1m -rate=1000/s -max-workers=2000 -targets=request.txt Execute this command to perform the stress test. The command will output statistical data of the stress test when completed: vegeta attack -duration=20m -rate=1000/s -max-workers=2000 -targets=request.txt | tee results.bin | vegeta report  Test Results In the tests, we used 1000 QPS as a benchmark to stress test both the Consumer services with and without the Agent.\n In the non-Agent version, the CPU usage was around 74%, memory usage was 2.53%, and the average response time for a single request was 4.18ms. In the Agent-compiled version, the CPU usage was around 81%, memory usage was 2.61%, and the average response time for a single request was 4.32ms.  From these results, we can conclude that after adding the Agent, the CPU usage increased by about 9%, memory usage experienced almost no growth, and the average response time for requests increased by approximately 0.15ms.\nExplanation, approximately 0.15ms is the in-band cost. The most of CPU(extra 9%) cost are due to the amount of out of band data being sent to the collectors from the application(consumer), which is 4000 spans/s in our test case.\n","excerpt":"Performance Tests Performance testing is used to verify the impact on application performance when …","ref":"/docs/skywalking-go/latest/en/agent/performance-tests/","title":"Performance Tests"},{"body":"Performance Tests Performance testing is used to verify the impact on application performance when using SkyWalking Go.\nTest Objective By launching both the agent and non-agent compiled applications, we subject them to the same QPS under stress testing, evaluating the CPU, memory, and network latency of the machine during the testing period.\nThe application has been saved and submitted to the test/benchmark-codebase directory, with the following topology:\ntraffic generator -\u0026gt; consumer -\u0026gt; provider The payload(traffic) generator uses multithreading to send HTTP requests to the consumer service. When the consumer receives a request, it sends three requests to the provider service to obtain return data results. Based on these network requests, when using SkyWalking Go, the consumer service generates four Spans (1 Entry Span, 3 Exit Spans).\nApplication The application\u0026rsquo;s integration with SkyWalking Go follows the same process as other applications. For more information, please refer to the documentation.\nIn the application, we use loops and mathematical calculations (math.Log) to simulate the execution of the business program. This consumes a certain amount of CPU usage, preventing idle processing during service stress testing and amplifying the impact of the Agent program on the business application.\nStress Testing Service We use the Vegeta service for stress testing, which launches traffic at a specified QPS to the application. It is based on the Go language and uses goroutines to provide a more efficient stress testing solution.\nTest Environment A total of 4 GCP machines are launched, all instances are running on tbe 4C8G VM.\n traffic generator: Used for deploying traffic to the consumer machine. consumer: Used for deploying the consumer service. provider: Used for deploying the provider service. skywalking: Used for deploying the SkyWalking backend cluster, providing a standalone OAP node (in-memory H2 storage) and a UI interface.  Each service is deployed on a separate machine to ensure there is no interference with one another.\nTest Process Preparation Phase The preparation phase is used to ensure that all machines and test case preparations are completed.\nTraffic Generator Install the Vegeta service on the stress testing instance and create the following file(request.txt) to simulate traffic usage.\nGET http://${CONSUMER_IP}:8080/consumer Sw8: 1-MWYyZDRiZjQ3YmY3MTFlYWI3OTRhY2RlNDgwMDExMjI=-MWU3YzIwNGE3YmY3MTFlYWI4NThhY2RlNDgwMDExMjI=-0-c2VydmljZQ==-aW5zdGFuY2U=-cHJvcGFnYXRpb24=-cHJvcGFnYXRpb246NTU2Ng== Please replace the above CONSUMER_IP with the real IP address of the consumer instance.\nConsumer and Provider Install the skywalking-go service on the machines to be tested, and compile with and without the Agent.\nModify the machine\u0026rsquo;s file limit to prevent the inability to create new connections due to excessive handles: ulimit -n 65536.\nStart the provider service(without Agent) and obtain the provider machine\u0026rsquo;s IP address. Please provide this address when starting the consumer machine later.\nSkyWalking Download the SkyWalking service, modify the SkyWalking OAP startup script to increase the memory size, preventing OAP crashes due to insufficient memory.\nTesting without Agent  Start the Consumer service without the Agent version. Please add the provider flag for the provider address, the format is: http://${PROVIDER_IP}:8080/provider. Execute this command to preheat the system: vegeta attack -duration=1m -rate=1000/s -max-workers=2000 -targets=request.txt Execute this command to perform the stress test. The command will output statistical data of the stress test when completed: vegeta attack -duration=20m -rate=1000/s -max-workers=2000 -targets=request.txt | tee results.bin | vegeta report  Testing with Agent The only difference in the test without the Agent is the version of the consumer that is compiled and launched.\n Add the SW_AGENT_REPORTER_GRPC_BACKEND_SERVICE environment variables to the consumer service, for setting the IP address of the SkyWalking OAP service. Start the Consumer service with the Agent version. Please add the provider flag for the provider address, the format is: http://${PROVIDER_IP}:8080/provider. Execute this command to preheat the system: vegeta attack -duration=1m -rate=1000/s -max-workers=2000 -targets=request.txt Execute this command to perform the stress test. The command will output statistical data of the stress test when completed: vegeta attack -duration=20m -rate=1000/s -max-workers=2000 -targets=request.txt | tee results.bin | vegeta report  Test Results In the tests, we used 1000 QPS as a benchmark to stress test both the Consumer services with and without the Agent.\n In the non-Agent version, the CPU usage was around 74%, memory usage was 2.53%, and the average response time for a single request was 4.18ms. In the Agent-compiled version, the CPU usage was around 81%, memory usage was 2.61%, and the average response time for a single request was 4.32ms.  From these results, we can conclude that after adding the Agent, the CPU usage increased by about 9%, memory usage experienced almost no growth, and the average response time for requests increased by approximately 0.15ms.\nExplanation, approximately 0.15ms is the in-band cost. The most of CPU(extra 9%) cost are due to the amount of out of band data being sent to the collectors from the application(consumer), which is 4000 spans/s in our test case.\n","excerpt":"Performance Tests Performance testing is used to verify the impact on application performance when …","ref":"/docs/skywalking-go/next/en/agent/performance-tests/","title":"Performance Tests"},{"body":"Performance Tests Performance testing is used to verify the impact on application performance when using SkyWalking Go.\nTest Objective By launching both the agent and non-agent compiled applications, we subject them to the same QPS under stress testing, evaluating the CPU, memory, and network latency of the machine during the testing period.\nThe application has been saved and submitted to the test/benchmark-codebase directory, with the following topology:\ntraffic generator -\u0026gt; consumer -\u0026gt; provider The payload(traffic) generator uses multithreading to send HTTP requests to the consumer service. When the consumer receives a request, it sends three requests to the provider service to obtain return data results. Based on these network requests, when using SkyWalking Go, the consumer service generates four Spans (1 Entry Span, 3 Exit Spans).\nApplication The application\u0026rsquo;s integration with SkyWalking Go follows the same process as other applications. For more information, please refer to the documentation.\nIn the application, we use loops and mathematical calculations (math.Log) to simulate the execution of the business program. This consumes a certain amount of CPU usage, preventing idle processing during service stress testing and amplifying the impact of the Agent program on the business application.\nStress Testing Service We use the Vegeta service for stress testing, which launches traffic at a specified QPS to the application. It is based on the Go language and uses goroutines to provide a more efficient stress testing solution.\nTest Environment A total of 4 GCP machines are launched, all instances are running on tbe 4C8G VM.\n traffic generator: Used for deploying traffic to the consumer machine. consumer: Used for deploying the consumer service. provider: Used for deploying the provider service. skywalking: Used for deploying the SkyWalking backend cluster, providing a standalone OAP node (in-memory H2 storage) and a UI interface.  Each service is deployed on a separate machine to ensure there is no interference with one another.\nTest Process Preparation Phase The preparation phase is used to ensure that all machines and test case preparations are completed.\nTraffic Generator Install the Vegeta service on the stress testing instance and create the following file(request.txt) to simulate traffic usage.\nGET http://${CONSUMER_IP}:8080/consumer Sw8: 1-MWYyZDRiZjQ3YmY3MTFlYWI3OTRhY2RlNDgwMDExMjI=-MWU3YzIwNGE3YmY3MTFlYWI4NThhY2RlNDgwMDExMjI=-0-c2VydmljZQ==-aW5zdGFuY2U=-cHJvcGFnYXRpb24=-cHJvcGFnYXRpb246NTU2Ng== Please replace the above CONSUMER_IP with the real IP address of the consumer instance.\nConsumer and Provider Install the skywalking-go service on the machines to be tested, and compile with and without the Agent.\nModify the machine\u0026rsquo;s file limit to prevent the inability to create new connections due to excessive handles: ulimit -n 65536.\nStart the provider service(without Agent) and obtain the provider machine\u0026rsquo;s IP address. Please provide this address when starting the consumer machine later.\nSkyWalking Download the SkyWalking service, modify the SkyWalking OAP startup script to increase the memory size, preventing OAP crashes due to insufficient memory.\nTesting without Agent  Start the Consumer service without the Agent version. Please add the provider flag for the provider address, the format is: http://${PROVIDER_IP}:8080/provider. Execute this command to preheat the system: vegeta attack -duration=1m -rate=1000/s -max-workers=2000 -targets=request.txt Execute this command to perform the stress test. The command will output statistical data of the stress test when completed: vegeta attack -duration=20m -rate=1000/s -max-workers=2000 -targets=request.txt | tee results.bin | vegeta report  Testing with Agent The only difference in the test without the Agent is the version of the consumer that is compiled and launched.\n Add the SW_AGENT_REPORTER_GRPC_BACKEND_SERVICE environment variables to the consumer service, for setting the IP address of the SkyWalking OAP service. Start the Consumer service with the Agent version. Please add the provider flag for the provider address, the format is: http://${PROVIDER_IP}:8080/provider. Execute this command to preheat the system: vegeta attack -duration=1m -rate=1000/s -max-workers=2000 -targets=request.txt Execute this command to perform the stress test. The command will output statistical data of the stress test when completed: vegeta attack -duration=20m -rate=1000/s -max-workers=2000 -targets=request.txt | tee results.bin | vegeta report  Test Results In the tests, we used 1000 QPS as a benchmark to stress test both the Consumer services with and without the Agent.\n In the non-Agent version, the CPU usage was around 74%, memory usage was 2.53%, and the average response time for a single request was 4.18ms. In the Agent-compiled version, the CPU usage was around 81%, memory usage was 2.61%, and the average response time for a single request was 4.32ms.  From these results, we can conclude that after adding the Agent, the CPU usage increased by about 9%, memory usage experienced almost no growth, and the average response time for requests increased by approximately 0.15ms.\nExplanation, approximately 0.15ms is the in-band cost. The most of CPU(extra 9%) cost are due to the amount of out of band data being sent to the collectors from the application(consumer), which is 4000 spans/s in our test case.\n","excerpt":"Performance Tests Performance testing is used to verify the impact on application performance when …","ref":"/docs/skywalking-go/v0.4.0/en/agent/performance-tests/","title":"Performance Tests"},{"body":"Persistence Storage Persistence storage is used for unifying data of BanyanDB persistence, including write-ahead logging(WAL), index, and data collected from skywalking and other observability platforms or APM systems. It provides various implementations and IO modes to satisfy the need of different components. BanyanDB provides a concise interface that shields the complexity of the implementation from the upper layer. By exposing necessary interfaces, upper components do not need to care how persistence is implemented and avoid dealing with differences between different operating systems.\nArchitecture BanyanDB uses third-party storage for actual storage, and the file system shields the differences between different platforms and storage systems, allowing developers to operate files as easily as the local file system without worrying about specific details.\nFor different data models, stored in different locations, such as for meta and wal data, BanyanDB uses a local file system for storage. For index and data, the architecture of the file system is divided into three layers.\n The first layer is the API interface, which developers only need to care about how to operate the remote file system. The second layer is the storage system adapter, which is used to mask the differences between different storage systems. The last layer is the actual storage system. With the use of remote storage architecture, the local system can still play its role and can borrow the local system to speed up reading and writing.  IO Mode Persistence storage offers a range of IO modes to cater to various throughput requirements. The interface can be accessed by developers and can be configured through settings, which can be set in the configuration file.\nIo_uring Io_uring is a new feature in Linux 5.1, which is fully asynchronous and offers high throughput. In the scene of massive storage, io_uring can bring significant benefits. The following is the diagram about how io_uring works. If the user sets io_uring for use, the read and write requests will first be placed in the submission queue buffer when calling the operation API. When the threshold is reached, batch submissions will be made to SQ. After the kernel threads complete execution, the requests will be placed in the CQ, and the user can obtain the request results.\nSynchronous IO The most common IO mode is Synchronous IO, but it has a relatively low throughput. BanyanDB provides a nonblocking mode that is compatible with lower Linux versions.\nOperation Directory Create Create the specified directory and return the file descriptor, the error will happen if the directory already exists. The following is the pseudocode that calls the API in the go style.、\nparam:\nname: The name of the directory.\npermisson: Permission you want to set. BanyanDB provides three modes: Read, Write, ReadAndWrite. you can use it as Mode.Read.\nCreateDirectory(name String, permission Mode) (error)\nOpen Open the directory and return an error if the file descriptor does not exist. The following is the pseudocode that calls the API in the go style.\nparam:\nname: The name of the directory.\nreturn: Directory pointer, you can use it for various operations.\nOpenDirectory(name String) (*Dir, error)\nDelete Delete the directory and all files and return an error if the directory does not exist or the directory not reading or writing. The following is the pseudocode that calls the API in the go style.\nDir.DeleteDirectory() (error)\nRename Rename the directory and return an error if the directory already exists. The following is the pseudocode that calls the API in the go style.\nparam:\nname: The name of the directory.\nDir.RenameDirectory(newName String) (error)\nRead Get all lists of files or children\u0026rsquo;s directories in the directory and an error if the directory does not exist. The following is the pseudocode that calls the API in the go style.\nreturn: List of files belonging to the directory.\nDir.ReadDirectory() (FileList, error)\nPermission When creating a file, the default owner is the user who created the directory. The owner can specify read and write permissions of the directory. If not specified, the default is read and write permissions, which include permissions for all files in the directory. The following is the pseudocode that calls the API in the go style.\nparam:\npermisson: Permission you want to set. BanyanDB provides three mode: Read, Write, ReadAndWrite. you can use it as Mode.Read.\nDir.SetDirectoryPermission(permission Mode) (error)\nFile Create Create the specified file and return the file descriptor, the error will happen if the file already exists. The following is the pseudocode that calls the API in the go style.\nparam:\nname: The name of the file.\npermisson: Permission you want to set. BanyanDB provides three mode: Read, Write, ReadAndWrite. you can use it as Mode.Read.\nCreateFile(name String, permission Mode) (error)\nOpen Open the file and return an error if the file descriptor does not exist. The following is the pseudocode that calls the API in the go style.\nparam:\nname: The name of the file.\nreturn: File pointer, you can use it for various operations.\nOpenFile(name String) (*File, error)\nWrite BanyanDB provides two methods for writing files. Append mode, which adds new data to the end of a file. This mode is typically used for WAL. And BanyanDB supports vector Append mode, which supports appending consecutive buffers to the end of the file. Flush mode, which flushes all data to one file. It will return an error when writing a directory, the file does not exist or there is not enough space, and the incomplete file will be discarded. The flush operation is atomic, which means the file won\u0026rsquo;t be created if an error happens during the flush process. The following is the pseudocode that calls the API in the go style.\nFor append mode:\nparam:\nbuffer: The data append to the file.\nFile.AppendWriteFile(buffer []byte) (error)\nFor vector append mode:\nparam:\niov: The data in consecutive buffers.\nFile.AppendWritevFile(iov *[][]byte) (error)\nFor flush mode:\nparam:\nbuffer: The data append to the file.\npermisson: Permission you want to set. BanyanDB provides three mode: Read, Write, ReadAndWrite. you can use it as Mode.Read.\nreturn: File pointer, you can use it for various operations.\nFlushWriteFile(buffer []byte, permission Mode) (*File, error)\nDelete BanyanDB provides the deleting operation, which can delete a file at once. it will return an error if the directory does not exist or the file not reading or writing.\nThe following is the pseudocode that calls the API in the go style.\nFile.DeleteFile() (error)\nRead For reading operation, two read methods are provided: Reading a specified location of data, which relies on a specified offset and a buffer. And BanyanDB supports reading contiguous regions of a file and dispersing them into discontinuous buffers. Read the entire file, BanyanDB provides stream reading, which can use when the file is too large, the size gets each time can be set when using stream reading. If entering incorrect parameters such as incorrect offset or non-existent file, it will return an error. The following is the pseudocode that calls the API in the go style.\nFor reading specified location of data:\nparam:\noffset: Read begin location of the file.\nbuffer: The read length is the same as the buffer length.\nFile.ReadFile(offset int, buffer []byte) (error)\nFor vector reading:\nparam:\niov: Discontinuous buffers in memory.\nFile.ReadvFile(iov *[][]byte) (error)\nFor stream reading:\nparam:\noffset: Read begin location of the file.\nbuffer: Every read length in the stream is the same as the buffer length.\nreturn: A Iterator, the size of each iteration is the length of the buffer.\nFile.StreamReadFile(offset int, buffer []byte) (*iter, error)\nRename Rename the file and return an error if the directory exists in this directory. The following is the pseudocode that calls the API in the go style.\nparam:\nnewName: The new name of the file.\nFile.RenameFile(newName String) (error)\nGet size Get the file written data\u0026rsquo;s size and return an error if the file does not exist. The unit of file size is Byte. The following is the pseudocode that calls the API in the go style.\nreturn: the file written data\u0026rsquo;s size.\nFile.GetFileSize() (int, error)\nPermission When creating a file, the default owner is the user who created the file. The owner can specify the read and write permissions of the file. If not specified, the default is read and write permissions. The following is the pseudocode that calls the API in the go style.\nparam:\npermisson: Permission you want to set. BanyanDB provides three mode: Read, Write, ReadAndWrite. you can use it as Mode.Read.\nFile.SetFilePermission(permission Mode) (error)\n","excerpt":"Persistence Storage Persistence storage is used for unifying data of BanyanDB persistence, including …","ref":"/docs/skywalking-banyandb/latest/concept/persistence-storage/","title":"Persistence Storage"},{"body":"Persistence Storage Persistence storage is used for unifying data of BanyanDB persistence, including index, and data collected from skywalking and other observability platforms or APM systems. It provides various implementations and IO modes to satisfy the need of different components. BanyanDB provides a concise interface that shields the complexity of the implementation from the upper layer. By exposing necessary interfaces, upper components do not need to care how persistence is implemented and avoid dealing with differences between different operating systems.\nArchitecture BanyanDB uses third-party storage for actual storage, and the file system shields the differences between different platforms and storage systems, allowing developers to operate files as easily as the local file system without worrying about specific details.\nFor different data models, stored in different locations, such as for meta data, BanyanDB uses a local file system for storage. For index and data, the architecture of the file system is divided into three layers.\n The first layer is the API interface, which developers only need to care about how to operate the remote file system. The second layer is the storage system adapter, which is used to mask the differences between different storage systems. The last layer is the actual storage system. With the use of remote storage architecture, the local system can still play its role and can borrow the local system to speed up reading and writing.  IO Mode Persistence storage offers a range of IO modes to cater to various throughput requirements. The interface can be accessed by developers and can be configured through settings, which can be set in the configuration file.\nIo_uring Io_uring is a new feature in Linux 5.1, which is fully asynchronous and offers high throughput. In the scene of massive storage, io_uring can bring significant benefits. The following is the diagram about how io_uring works. If the user sets io_uring for use, the read and write requests will first be placed in the submission queue buffer when calling the operation API. When the threshold is reached, batch submissions will be made to SQ. After the kernel threads complete execution, the requests will be placed in the CQ, and the user can obtain the request results.\nSynchronous IO The most common IO mode is Synchronous IO, but it has a relatively low throughput. BanyanDB provides a nonblocking mode that is compatible with lower Linux versions.\nOperation File Create Create the specified file and return the file descriptor, the error will happen if the file already exists. The following is the pseudocode that calls the API in the go style.\nparam:\nname: The name of the file.\npermisson: Permission you want to set. BanyanDB provides three mode: Read, Write, ReadAndWrite. you can use it as Mode.Read.\nreturn: The file instance, can be used for various file operations.\nCreateFile(name String, permission Mode) (File, error)\nWrite BanyanDB provides two methods for writing files. Append mode, which adds new data to the end of a file. BanyanDB also supports vector Append mode, which supports appending consecutive buffers to the end of the file. Flush mode, which flushes all data to one file. It will return an error when writing a directory, the file does not exist or there is not enough space, and the incomplete file will be discarded. The flush operation is atomic, which means the file won\u0026rsquo;t be created if an error happens during the flush process. The following is the pseudocode that calls the API in the go style.\nFor append mode:\nparam:\nbuffer: The data append to the file.\nActual length of written data.\nFile.Write(buffer []byte) (int, error)\nFor vector append mode:\nparam:\niov: The data in consecutive buffers.\nreturn: Actual length of written data.\nFile.Writev(iov *[][]byte) (int, error)\nFor flush mode:\nparam:\nbuffer: The data append to the file.\npermisson: Permission you want to set. BanyanDB provides three mode: Read, Write, ReadAndWrite. you can use it as Mode.Read.\nreturn: Actual length of flushed data.\nWrite(buffer []byte, permission Mode) (int, error)\nDelete BanyanDB provides the deleting operation, which can delete a file at once. it will return an error if the directory does not exist or the file not reading or writing.\nThe following is the pseudocode that calls the API in the go style.\nDeleteFile(name string) (error)\nRead For reading operation, two read methods are provided: Reading a specified location of data, which relies on a specified offset and a buffer. And BanyanDB supports reading contiguous regions of a file and dispersing them into discontinuous buffers. Read the entire file, BanyanDB provides stream reading, which can use when the file is too large, the size gets each time can be set when using stream reading. If entering incorrect parameters such as incorrect offset or non-existent file, it will return an error. The following is the pseudocode that calls the API in the go style.\nFor reading specified location of data:\nparam:\noffset: Read begin location of the file.\nbuffer: The read length is the same as the buffer length.\nreturn: Actual length of reading data.\nFile.Read(offset int64, buffer []byte) (int, error)\nFor vector reading:\nparam:\niov: Discontinuous buffers in memory.\nreturn: Actual length of reading data.\nFile.Readv(iov *[][]byte) (int, error)\nFor stream reading:\nparam:\nbuffer: Every read length in the stream is the same as the buffer length.\nreturn: A Iterator, the size of each iteration is the length of the buffer.\nFile.StreamRead(buffer []byte) (*iter, error)\nGet size Get the file written data\u0026rsquo;s size and return an error if the file does not exist. The unit of file size is Byte. The following is the pseudocode that calls the API in the go style.\nreturn: the file written data\u0026rsquo;s size.\nFile.Size() (int, error)\nClose Close File.The following is the pseudocode that calls the API in the go style.\nFile.Close() error\n","excerpt":"Persistence Storage Persistence storage is used for unifying data of BanyanDB persistence, including …","ref":"/docs/skywalking-banyandb/next/concept/persistence-storage/","title":"Persistence Storage"},{"body":"Persistence Storage Persistence storage is used for unifying data of BanyanDB persistence, including write-ahead logging(WAL), index, and data collected from skywalking and other observability platforms or APM systems. It provides various implementations and IO modes to satisfy the need of different components. BanyanDB provides a concise interface that shields the complexity of the implementation from the upper layer. By exposing necessary interfaces, upper components do not need to care how persistence is implemented and avoid dealing with differences between different operating systems.\nArchitecture BanyanDB uses third-party storage for actual storage, and the file system shields the differences between different platforms and storage systems, allowing developers to operate files as easily as the local file system without worrying about specific details.\nFor different data models, stored in different locations, such as for meta and wal data, BanyanDB uses a local file system for storage. For index and data, the architecture of the file system is divided into three layers.\n The first layer is the API interface, which developers only need to care about how to operate the remote file system. The second layer is the storage system adapter, which is used to mask the differences between different storage systems. The last layer is the actual storage system. With the use of remote storage architecture, the local system can still play its role and can borrow the local system to speed up reading and writing.  IO Mode Persistence storage offers a range of IO modes to cater to various throughput requirements. The interface can be accessed by developers and can be configured through settings, which can be set in the configuration file.\nIo_uring Io_uring is a new feature in Linux 5.1, which is fully asynchronous and offers high throughput. In the scene of massive storage, io_uring can bring significant benefits. The following is the diagram about how io_uring works. If the user sets io_uring for use, the read and write requests will first be placed in the submission queue buffer when calling the operation API. When the threshold is reached, batch submissions will be made to SQ. After the kernel threads complete execution, the requests will be placed in the CQ, and the user can obtain the request results.\nSynchronous IO The most common IO mode is Synchronous IO, but it has a relatively low throughput. BanyanDB provides a nonblocking mode that is compatible with lower Linux versions.\nOperation Directory Create Create the specified directory and return the file descriptor, the error will happen if the directory already exists. The following is the pseudocode that calls the API in the go style.、\nparam:\nname: The name of the directory.\npermisson: Permission you want to set. BanyanDB provides three modes: Read, Write, ReadAndWrite. you can use it as Mode.Read.\nCreateDirectory(name String, permission Mode) (error)\nOpen Open the directory and return an error if the file descriptor does not exist. The following is the pseudocode that calls the API in the go style.\nparam:\nname: The name of the directory.\nreturn: Directory pointer, you can use it for various operations.\nOpenDirectory(name String) (*Dir, error)\nDelete Delete the directory and all files and return an error if the directory does not exist or the directory not reading or writing. The following is the pseudocode that calls the API in the go style.\nDir.DeleteDirectory() (error)\nRename Rename the directory and return an error if the directory already exists. The following is the pseudocode that calls the API in the go style.\nparam:\nname: The name of the directory.\nDir.RenameDirectory(newName String) (error)\nRead Get all lists of files or children\u0026rsquo;s directories in the directory and an error if the directory does not exist. The following is the pseudocode that calls the API in the go style.\nreturn: List of files belonging to the directory.\nDir.ReadDirectory() (FileList, error)\nPermission When creating a file, the default owner is the user who created the directory. The owner can specify read and write permissions of the directory. If not specified, the default is read and write permissions, which include permissions for all files in the directory. The following is the pseudocode that calls the API in the go style.\nparam:\npermisson: Permission you want to set. BanyanDB provides three mode: Read, Write, ReadAndWrite. you can use it as Mode.Read.\nDir.SetDirectoryPermission(permission Mode) (error)\nFile Create Create the specified file and return the file descriptor, the error will happen if the file already exists. The following is the pseudocode that calls the API in the go style.\nparam:\nname: The name of the file.\npermisson: Permission you want to set. BanyanDB provides three mode: Read, Write, ReadAndWrite. you can use it as Mode.Read.\nCreateFile(name String, permission Mode) (error)\nOpen Open the file and return an error if the file descriptor does not exist. The following is the pseudocode that calls the API in the go style.\nparam:\nname: The name of the file.\nreturn: File pointer, you can use it for various operations.\nOpenFile(name String) (*File, error)\nWrite BanyanDB provides two methods for writing files. Append mode, which adds new data to the end of a file. This mode is typically used for WAL. And BanyanDB supports vector Append mode, which supports appending consecutive buffers to the end of the file. Flush mode, which flushes all data to one file. It will return an error when writing a directory, the file does not exist or there is not enough space, and the incomplete file will be discarded. The flush operation is atomic, which means the file won\u0026rsquo;t be created if an error happens during the flush process. The following is the pseudocode that calls the API in the go style.\nFor append mode:\nparam:\nbuffer: The data append to the file.\nFile.AppendWriteFile(buffer []byte) (error)\nFor vector append mode:\nparam:\niov: The data in consecutive buffers.\nFile.AppendWritevFile(iov *[][]byte) (error)\nFor flush mode:\nparam:\nbuffer: The data append to the file.\npermisson: Permission you want to set. BanyanDB provides three mode: Read, Write, ReadAndWrite. you can use it as Mode.Read.\nreturn: File pointer, you can use it for various operations.\nFlushWriteFile(buffer []byte, permission Mode) (*File, error)\nDelete BanyanDB provides the deleting operation, which can delete a file at once. it will return an error if the directory does not exist or the file not reading or writing.\nThe following is the pseudocode that calls the API in the go style.\nFile.DeleteFile() (error)\nRead For reading operation, two read methods are provided: Reading a specified location of data, which relies on a specified offset and a buffer. And BanyanDB supports reading contiguous regions of a file and dispersing them into discontinuous buffers. Read the entire file, BanyanDB provides stream reading, which can use when the file is too large, the size gets each time can be set when using stream reading. If entering incorrect parameters such as incorrect offset or non-existent file, it will return an error. The following is the pseudocode that calls the API in the go style.\nFor reading specified location of data:\nparam:\noffset: Read begin location of the file.\nbuffer: The read length is the same as the buffer length.\nFile.ReadFile(offset int, buffer []byte) (error)\nFor vector reading:\nparam:\niov: Discontinuous buffers in memory.\nFile.ReadvFile(iov *[][]byte) (error)\nFor stream reading:\nparam:\noffset: Read begin location of the file.\nbuffer: Every read length in the stream is the same as the buffer length.\nreturn: A Iterator, the size of each iteration is the length of the buffer.\nFile.StreamReadFile(offset int, buffer []byte) (*iter, error)\nRename Rename the file and return an error if the directory exists in this directory. The following is the pseudocode that calls the API in the go style.\nparam:\nnewName: The new name of the file.\nFile.RenameFile(newName String) (error)\nGet size Get the file written data\u0026rsquo;s size and return an error if the file does not exist. The unit of file size is Byte. The following is the pseudocode that calls the API in the go style.\nreturn: the file written data\u0026rsquo;s size.\nFile.GetFileSize() (int, error)\nPermission When creating a file, the default owner is the user who created the file. The owner can specify the read and write permissions of the file. If not specified, the default is read and write permissions. The following is the pseudocode that calls the API in the go style.\nparam:\npermisson: Permission you want to set. BanyanDB provides three mode: Read, Write, ReadAndWrite. you can use it as Mode.Read.\nFile.SetFilePermission(permission Mode) (error)\n","excerpt":"Persistence Storage Persistence storage is used for unifying data of BanyanDB persistence, including …","ref":"/docs/skywalking-banyandb/v0.5.0/concept/persistence-storage/","title":"Persistence Storage"},{"body":"Pinpoint Service Mesh Critical Performance Impact by using eBPF Background Apache SkyWalking observes metrics, logs, traces, and events for services deployed into the service mesh. When troubleshooting, SkyWalking error analysis can be an invaluable tool helping to pinpoint where an error occurred. However, performance problems are more difficult: It’s often impossible to locate the root cause of performance problems with pre-existing observation data. To move beyond the status quo, dynamic debugging and troubleshooting are essential service performance tools. In this article, we\u0026rsquo;ll discuss how to use eBPF technology to improve the profiling feature in SkyWalking and analyze the performance impact in the service mesh.\nTrace Profiling in SkyWalking Since SkyWalking 7.0.0, Trace Profiling has helped developers find performance problems by periodically sampling the thread stack to let developers know which lines of code take more time. However, Trace Profiling is not suitable for the following scenarios:\n Thread Model: Trace Profiling is most useful for profiling code that executes in a single thread. It is less useful for middleware that relies heavily on async execution models. For example Goroutines in Go or Kotlin Coroutines. Language: Currently, Trace Profiling is only supported in Java and Python, since it’s not easy to obtain the thread stack in the runtimes of some languages such as Go and Node.js. Agent Binding: Trace Profiling requires Agent installation, which can be tricky depending on the language (e.g., PHP has to rely on its C kernel; Rust and C/C++ require manual instrumentation to make install). Trace Correlation: Since Trace Profiling is only associated with a single request it can be hard to determine which request is causing the problem. Short Lifecycle Services: Trace Profiling doesn\u0026rsquo;t support short-lived services for (at least) two reasons:  It\u0026rsquo;s hard to differentiate system performance from class code manipulation in the booting stage. Trace profiling is linked to an endpoint to identify performance impact, but there is no endpoint to match these short-lived services.    Fortunately, there are techniques that can go further than Trace Profiling in these situations.\nIntroduce eBPF We have found that eBPF — a technology that can run sandboxed programs in an operating system kernel and thus safely and efficiently extend the capabilities of the kernel without requiring kernel modifications or loading kernel modules — can help us fill gaps left by Trace Profiling. eBPF is a trending technology because it breaks the traditional barrier between user and kernel space. Programs can now inject bytecode that runs in the kernel, instead of having to recompile the kernel to customize it. This is naturally a good fit for observability.\nIn the figure below, we can see that when the system executes the execve syscalls, the eBPF program is triggered, and the current process runtime information is obtained by using function calls.\nUsing eBPF technology, we can expand the scope of Skywalking\u0026rsquo;s profiling capabilities:\n Global Performance Analysis: Before eBPF, data collection was limited to what agents can observe. Since eBPF programs run in the kernel, they can observe all threads. This is especially useful when you are not sure whether a performance problem is caused by a particular request. Data Content: eBPF can dump both user and kernel space thread stacks, so if a performance issue happens in kernel space, it’s easier to find. Agent Binding: All modern Linux kernels support eBPF, so there is no need to install anything. This means it is an orchestration-free vs an agent model. This reduces friction caused by built-in software which may not have the correct agents installed, such as Envoy in a Service Mesh. Sampling Type: Unlike Trace Profiling, eBPF is event-driven and, therefore, not constrained by interval polling. For example, eBPF can trigger events and collect more data depending on a transfer size threshold. This can allow the system to triage and prioritize data collection under extreme load.  eBPF Limitations While eBPF offers significant advantages for hunting performance bottlenecks, no technology is perfect. eBPF has a number of limitations described below. Fortunately, since SkyWalking does not require eBPF, the impact is limited.\n Linux Version Requirement: eBPF programs require a Linux kernel version above 4.4, with later kernel versions offering more data to be collected. The BCC has documented the features supported by different Linux kernel versions, with the differences between versions usually being what data can be collected with eBPF. Privileges Required: All processes that intend to load eBPF programs into the Linux kernel must be running in privileged mode. As such, bugs or other issues in such code may have a big impact. Weak Support for Dynamic Language: eBPF has weak support for JIT-based dynamic languages, such as Java. It also depends on what data you want to collect. For Profiling, eBPF does not support parsing the symbols of the program, which is why most eBPF-based profiling technologies only support static languages like C, C++, Go, and Rust. However, symbol mapping can sometimes be solved through tools provided by the language. For example, in Java, perf-map-agent can be used to generate the symbol mapping. However, dynamic languages don\u0026rsquo;t support the attach (uprobe) functionality that would allow us to trace execution events through symbols.  Introducing SkyWalking Rover SkyWalking Rover introduces the eBPF profiling feature into the SkyWalking ecosystem. The figure below shows the overall architecture of SkyWalking Rover. SkyWalking Rover is currently supported in Kubernetes environments and must be deployed inside a Kubernetes cluster. After establishing a connection with the SkyWalking backend server, it saves information about the processes on the current machine to SkyWalking. When the user creates an eBPF profiling task via the user interface, SkyWalking Rover receives the task and executes it in the relevant C, C++, Golang, and Rust language-based programs.\nOther than an eBPF-capable kernel, there are no additional prerequisites for deploying SkyWalking Rover.\nCPU Profiling with Rover CPU profiling is the most intuitive way to show service performance. Inspired by Brendan Gregg‘s blog post, we\u0026rsquo;ve divided CPU profiling into two types that we have implemented in Rover:\n On-CPU Profiling: Where threads are spending time running on-CPU. Off-CPU Profiling: Where time is spent waiting while blocked on I/O, locks, timers, paging/swapping, etc.  Profiling Envoy with eBPF Envoy is a popular proxy, used as the data plane by the Istio service mesh. In a Kubernetes cluster, Istio injects Envoy into each service’s pod as a sidecar where it transparently intercepts and processes incoming and outgoing traffic. As the data plane, any performance issues in Envoy can affect all service traffic in the mesh. In this scenario, it’s more powerful to use eBPF profiling to analyze issues in production caused by service mesh configuration.\nDemo Environment If you want to see this scenario in action, we\u0026rsquo;ve built a demo environment where we deploy an Nginx service for stress testing. Traffic is intercepted by Envoy and forwarded to Nginx. The commands to install the whole environment can be accessed through GitHub.\nOn-CPU Profiling On-CPU profiling is suitable for analyzing thread stacks when service CPU usage is high. If the stack is dumped more times, it means that the thread stack occupies more CPU resources.\nWhen installing Istio using the demo configuration profile, we found there are two places where we can optimize performance:\n Zipkin Tracing: Different Zipkin sampling percentages have a direct impact on QPS. Access Log Format: Reducing the fields of the Envoy access log can improve QPS.  Zipkin Tracing Zipkin with 100% sampling In the default demo configuration profile, Envoy is using 100% sampling as default tracing policy. How does that impact the performance?\nAs shown in the figure below, using the on-CPU profiling, we found that it takes about 16% of the CPU overhead. At a fixed consumption of 2 CPUs, its QPS can reach 5.7K.\nDisable Zipkin tracing At this point, we found that if Zipkin is not necessary, the sampling percentage can be reduced or we can even disable tracing. Based on the Istio documentation, we can disable tracing when installing the service mesh using the following command:\nistioctl install -y --set profile=demo \\  --set \u0026#39;meshConfig.enableTracing=false\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.tracing.sampling=0.0\u0026#39; After disabling tracing, we performed on-CPU profiling again. According to the figure below, we found that Zipkin has disappeared from the flame graph. With the same 2 CPU consumption as in the previous example, the QPS reached 9K, which is an almost 60% increase. Tracing with Throughput With the same CPU usage, we\u0026rsquo;ve discovered that Envoy performance greatly improves when the tracing feature is disabled. Of course, this requires us to make trade-offs between the number of samples Zipkin collects and the desired performance of Envoy (QPS).\nThe table below illustrates how different Zipkin sampling percentages under the same CPU usage affect QPS.\n   Zipkin sampling % QPS CPUs Note     100% (default) 5.7K 2 16% used by Zipkin   1% 8.1K 2 0.3% used by Zipkin   disabled 9.2K 2 0% used by Zipkin    Access Log Format Default Log Format In the default demo configuration profile, the default Access Log format contains a lot of data. The flame graph below shows various functions involved in parsing the data such as request headers, response headers, and streaming the body.\nSimplifying Access Log Format Typically, we don’t need all the information in the access log, so we can often simplify it to get what we need. The following command simplifies the access log format to only display basic information:\nistioctl install -y --set profile=demo \\  --set meshConfig.accessLogFormat=\u0026#34;[%START_TIME%] \\\u0026#34;%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\\\u0026#34; %RESPONSE_CODE%\\n\u0026#34; After simplifying the access log format, we found that the QPS increased from 5.7K to 5.9K. When executing the on-CPU profiling again, the CPU usage of log formatting dropped from 2.4% to 0.7%.\nSimplifying the log format helped us to improve the performance.\nOff-CPU Profiling Off-CPU profiling is suitable for performance issues that are not caused by high CPU usage. For example, when there are too many threads in one service, using off-CPU profiling could reveal which threads spend more time context switching.\nWe provide data aggregation in two dimensions:\n Switch count: The number of times a thread switches context. When the thread returns to the CPU, it completes one context switch. A thread stack with a higher switch count spends more time context switching. Switch duration: The time it takes a thread to switch the context. A thread stack with a higher switch duration spends more time off-CPU.  Write Access Log Enable Write Using the same environment and settings as before in the on-CPU test, we performed off-CPU profiling. As shown below, we found that access log writes accounted for about 28% of the total context switches. The \u0026ldquo;__write\u0026rdquo; shown below also indicates that this method is the Linux kernel method.\nDisable Write SkyWalking implements Envoy\u0026rsquo;s Access Log Service (ALS) feature which allows us to send access logs to the SkyWalking Observability Analysis Platform (OAP) using the gRPC protocol. Even by disabling the access logging, we can still use ALS to capture/aggregate the logs. We\u0026rsquo;ve disabled writing to the access log using the following command:\nistioctl install -y --set profile=demo --set meshConfig.accessLogFile=\u0026#34;\u0026#34; After disabling the Access Log feature, we performed the off-CPU profiling. File writing entries have disappeared as shown in the figure below. Envoy throughput also increased from 5.7K to 5.9K.\nConclusion In this article, we\u0026rsquo;ve examined the insights Apache Skywalking\u0026rsquo;s Trace Profiling can give us and how much more can be achieved with eBPF profiling. All of these features are implemented in skywalking-rover. In addition to on- and off-CPU profiling, you will also find the following features:\n Continuous profiling, helps you automatically profile without manual intervention. For example, when Rover detects that the CPU exceeds a configurable threshold, it automatically executes the on-CPU profiling task. More profiling types to enrich usage scenarios, such as network, and memory profiling.  ","excerpt":"Pinpoint Service Mesh Critical Performance Impact by using eBPF Background Apache SkyWalking …","ref":"/docs/main/latest/en/concepts-and-designs/ebpf-cpu-profiling/","title":"Pinpoint Service Mesh Critical Performance Impact by using eBPF"},{"body":"Pinpoint Service Mesh Critical Performance Impact by using eBPF Background Apache SkyWalking observes metrics, logs, traces, and events for services deployed into the service mesh. When troubleshooting, SkyWalking error analysis can be an invaluable tool helping to pinpoint where an error occurred. However, performance problems are more difficult: It’s often impossible to locate the root cause of performance problems with pre-existing observation data. To move beyond the status quo, dynamic debugging and troubleshooting are essential service performance tools. In this article, we\u0026rsquo;ll discuss how to use eBPF technology to improve the profiling feature in SkyWalking and analyze the performance impact in the service mesh.\nTrace Profiling in SkyWalking Since SkyWalking 7.0.0, Trace Profiling has helped developers find performance problems by periodically sampling the thread stack to let developers know which lines of code take more time. However, Trace Profiling is not suitable for the following scenarios:\n Thread Model: Trace Profiling is most useful for profiling code that executes in a single thread. It is less useful for middleware that relies heavily on async execution models. For example Goroutines in Go or Kotlin Coroutines. Language: Currently, Trace Profiling is only supported in Java and Python, since it’s not easy to obtain the thread stack in the runtimes of some languages such as Go and Node.js. Agent Binding: Trace Profiling requires Agent installation, which can be tricky depending on the language (e.g., PHP has to rely on its C kernel; Rust and C/C++ require manual instrumentation to make install). Trace Correlation: Since Trace Profiling is only associated with a single request it can be hard to determine which request is causing the problem. Short Lifecycle Services: Trace Profiling doesn\u0026rsquo;t support short-lived services for (at least) two reasons:  It\u0026rsquo;s hard to differentiate system performance from class code manipulation in the booting stage. Trace profiling is linked to an endpoint to identify performance impact, but there is no endpoint to match these short-lived services.    Fortunately, there are techniques that can go further than Trace Profiling in these situations.\nIntroduce eBPF We have found that eBPF — a technology that can run sandboxed programs in an operating system kernel and thus safely and efficiently extend the capabilities of the kernel without requiring kernel modifications or loading kernel modules — can help us fill gaps left by Trace Profiling. eBPF is a trending technology because it breaks the traditional barrier between user and kernel space. Programs can now inject bytecode that runs in the kernel, instead of having to recompile the kernel to customize it. This is naturally a good fit for observability.\nIn the figure below, we can see that when the system executes the execve syscalls, the eBPF program is triggered, and the current process runtime information is obtained by using function calls.\nUsing eBPF technology, we can expand the scope of Skywalking\u0026rsquo;s profiling capabilities:\n Global Performance Analysis: Before eBPF, data collection was limited to what agents can observe. Since eBPF programs run in the kernel, they can observe all threads. This is especially useful when you are not sure whether a performance problem is caused by a particular request. Data Content: eBPF can dump both user and kernel space thread stacks, so if a performance issue happens in kernel space, it’s easier to find. Agent Binding: All modern Linux kernels support eBPF, so there is no need to install anything. This means it is an orchestration-free vs an agent model. This reduces friction caused by built-in software which may not have the correct agents installed, such as Envoy in a Service Mesh. Sampling Type: Unlike Trace Profiling, eBPF is event-driven and, therefore, not constrained by interval polling. For example, eBPF can trigger events and collect more data depending on a transfer size threshold. This can allow the system to triage and prioritize data collection under extreme load.  eBPF Limitations While eBPF offers significant advantages for hunting performance bottlenecks, no technology is perfect. eBPF has a number of limitations described below. Fortunately, since SkyWalking does not require eBPF, the impact is limited.\n Linux Version Requirement: eBPF programs require a Linux kernel version above 4.4, with later kernel versions offering more data to be collected. The BCC has documented the features supported by different Linux kernel versions, with the differences between versions usually being what data can be collected with eBPF. Privileges Required: All processes that intend to load eBPF programs into the Linux kernel must be running in privileged mode. As such, bugs or other issues in such code may have a big impact. Weak Support for Dynamic Language: eBPF has weak support for JIT-based dynamic languages, such as Java. It also depends on what data you want to collect. For Profiling, eBPF does not support parsing the symbols of the program, which is why most eBPF-based profiling technologies only support static languages like C, C++, Go, and Rust. However, symbol mapping can sometimes be solved through tools provided by the language. For example, in Java, perf-map-agent can be used to generate the symbol mapping. However, dynamic languages don\u0026rsquo;t support the attach (uprobe) functionality that would allow us to trace execution events through symbols.  Introducing SkyWalking Rover SkyWalking Rover introduces the eBPF profiling feature into the SkyWalking ecosystem. The figure below shows the overall architecture of SkyWalking Rover. SkyWalking Rover is currently supported in Kubernetes environments and must be deployed inside a Kubernetes cluster. After establishing a connection with the SkyWalking backend server, it saves information about the processes on the current machine to SkyWalking. When the user creates an eBPF profiling task via the user interface, SkyWalking Rover receives the task and executes it in the relevant C, C++, Golang, and Rust language-based programs.\nOther than an eBPF-capable kernel, there are no additional prerequisites for deploying SkyWalking Rover.\nCPU Profiling with Rover CPU profiling is the most intuitive way to show service performance. Inspired by Brendan Gregg‘s blog post, we\u0026rsquo;ve divided CPU profiling into two types that we have implemented in Rover:\n On-CPU Profiling: Where threads are spending time running on-CPU. Off-CPU Profiling: Where time is spent waiting while blocked on I/O, locks, timers, paging/swapping, etc.  Profiling Envoy with eBPF Envoy is a popular proxy, used as the data plane by the Istio service mesh. In a Kubernetes cluster, Istio injects Envoy into each service’s pod as a sidecar where it transparently intercepts and processes incoming and outgoing traffic. As the data plane, any performance issues in Envoy can affect all service traffic in the mesh. In this scenario, it’s more powerful to use eBPF profiling to analyze issues in production caused by service mesh configuration.\nDemo Environment If you want to see this scenario in action, we\u0026rsquo;ve built a demo environment where we deploy an Nginx service for stress testing. Traffic is intercepted by Envoy and forwarded to Nginx. The commands to install the whole environment can be accessed through GitHub.\nOn-CPU Profiling On-CPU profiling is suitable for analyzing thread stacks when service CPU usage is high. If the stack is dumped more times, it means that the thread stack occupies more CPU resources.\nWhen installing Istio using the demo configuration profile, we found there are two places where we can optimize performance:\n Zipkin Tracing: Different Zipkin sampling percentages have a direct impact on QPS. Access Log Format: Reducing the fields of the Envoy access log can improve QPS.  Zipkin Tracing Zipkin with 100% sampling In the default demo configuration profile, Envoy is using 100% sampling as default tracing policy. How does that impact the performance?\nAs shown in the figure below, using the on-CPU profiling, we found that it takes about 16% of the CPU overhead. At a fixed consumption of 2 CPUs, its QPS can reach 5.7K.\nDisable Zipkin tracing At this point, we found that if Zipkin is not necessary, the sampling percentage can be reduced or we can even disable tracing. Based on the Istio documentation, we can disable tracing when installing the service mesh using the following command:\nistioctl install -y --set profile=demo \\  --set \u0026#39;meshConfig.enableTracing=false\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.tracing.sampling=0.0\u0026#39; After disabling tracing, we performed on-CPU profiling again. According to the figure below, we found that Zipkin has disappeared from the flame graph. With the same 2 CPU consumption as in the previous example, the QPS reached 9K, which is an almost 60% increase. Tracing with Throughput With the same CPU usage, we\u0026rsquo;ve discovered that Envoy performance greatly improves when the tracing feature is disabled. Of course, this requires us to make trade-offs between the number of samples Zipkin collects and the desired performance of Envoy (QPS).\nThe table below illustrates how different Zipkin sampling percentages under the same CPU usage affect QPS.\n   Zipkin sampling % QPS CPUs Note     100% (default) 5.7K 2 16% used by Zipkin   1% 8.1K 2 0.3% used by Zipkin   disabled 9.2K 2 0% used by Zipkin    Access Log Format Default Log Format In the default demo configuration profile, the default Access Log format contains a lot of data. The flame graph below shows various functions involved in parsing the data such as request headers, response headers, and streaming the body.\nSimplifying Access Log Format Typically, we don’t need all the information in the access log, so we can often simplify it to get what we need. The following command simplifies the access log format to only display basic information:\nistioctl install -y --set profile=demo \\  --set meshConfig.accessLogFormat=\u0026#34;[%START_TIME%] \\\u0026#34;%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\\\u0026#34; %RESPONSE_CODE%\\n\u0026#34; After simplifying the access log format, we found that the QPS increased from 5.7K to 5.9K. When executing the on-CPU profiling again, the CPU usage of log formatting dropped from 2.4% to 0.7%.\nSimplifying the log format helped us to improve the performance.\nOff-CPU Profiling Off-CPU profiling is suitable for performance issues that are not caused by high CPU usage. For example, when there are too many threads in one service, using off-CPU profiling could reveal which threads spend more time context switching.\nWe provide data aggregation in two dimensions:\n Switch count: The number of times a thread switches context. When the thread returns to the CPU, it completes one context switch. A thread stack with a higher switch count spends more time context switching. Switch duration: The time it takes a thread to switch the context. A thread stack with a higher switch duration spends more time off-CPU.  Write Access Log Enable Write Using the same environment and settings as before in the on-CPU test, we performed off-CPU profiling. As shown below, we found that access log writes accounted for about 28% of the total context switches. The \u0026ldquo;__write\u0026rdquo; shown below also indicates that this method is the Linux kernel method.\nDisable Write SkyWalking implements Envoy\u0026rsquo;s Access Log Service (ALS) feature which allows us to send access logs to the SkyWalking Observability Analysis Platform (OAP) using the gRPC protocol. Even by disabling the access logging, we can still use ALS to capture/aggregate the logs. We\u0026rsquo;ve disabled writing to the access log using the following command:\nistioctl install -y --set profile=demo --set meshConfig.accessLogFile=\u0026#34;\u0026#34; After disabling the Access Log feature, we performed the off-CPU profiling. File writing entries have disappeared as shown in the figure below. Envoy throughput also increased from 5.7K to 5.9K.\nConclusion In this article, we\u0026rsquo;ve examined the insights Apache Skywalking\u0026rsquo;s Trace Profiling can give us and how much more can be achieved with eBPF profiling. All of these features are implemented in skywalking-rover. In addition to on- and off-CPU profiling, you will also find the following features:\n Continuous profiling, helps you automatically profile without manual intervention. For example, when Rover detects that the CPU exceeds a configurable threshold, it automatically executes the on-CPU profiling task. More profiling types to enrich usage scenarios, such as network, and memory profiling.  ","excerpt":"Pinpoint Service Mesh Critical Performance Impact by using eBPF Background Apache SkyWalking …","ref":"/docs/main/next/en/concepts-and-designs/ebpf-cpu-profiling/","title":"Pinpoint Service Mesh Critical Performance Impact by using eBPF"},{"body":"Pinpoint Service Mesh Critical Performance Impact by using eBPF Background Apache SkyWalking observes metrics, logs, traces, and events for services deployed into the service mesh. When troubleshooting, SkyWalking error analysis can be an invaluable tool helping to pinpoint where an error occurred. However, performance problems are more difficult: It’s often impossible to locate the root cause of performance problems with pre-existing observation data. To move beyond the status quo, dynamic debugging and troubleshooting are essential service performance tools. In this article, we\u0026rsquo;ll discuss how to use eBPF technology to improve the profiling feature in SkyWalking and analyze the performance impact in the service mesh.\nTrace Profiling in SkyWalking Since SkyWalking 7.0.0, Trace Profiling has helped developers find performance problems by periodically sampling the thread stack to let developers know which lines of code take more time. However, Trace Profiling is not suitable for the following scenarios:\n Thread Model: Trace Profiling is most useful for profiling code that executes in a single thread. It is less useful for middleware that relies heavily on async execution models. For example Goroutines in Go or Kotlin Coroutines. Language: Currently, Trace Profiling is only supported in Java and Python, since it’s not easy to obtain the thread stack in the runtimes of some languages such as Go and Node.js. Agent Binding: Trace Profiling requires Agent installation, which can be tricky depending on the language (e.g., PHP has to rely on its C kernel; Rust and C/C++ require manual instrumentation to make install). Trace Correlation: Since Trace Profiling is only associated with a single request it can be hard to determine which request is causing the problem. Short Lifecycle Services: Trace Profiling doesn\u0026rsquo;t support short-lived services for (at least) two reasons:  It\u0026rsquo;s hard to differentiate system performance from class code manipulation in the booting stage. Trace profiling is linked to an endpoint to identify performance impact, but there is no endpoint to match these short-lived services.    Fortunately, there are techniques that can go further than Trace Profiling in these situations.\nIntroduce eBPF We have found that eBPF — a technology that can run sandboxed programs in an operating system kernel and thus safely and efficiently extend the capabilities of the kernel without requiring kernel modifications or loading kernel modules — can help us fill gaps left by Trace Profiling. eBPF is a trending technology because it breaks the traditional barrier between user and kernel space. Programs can now inject bytecode that runs in the kernel, instead of having to recompile the kernel to customize it. This is naturally a good fit for observability.\nIn the figure below, we can see that when the system executes the execve syscalls, the eBPF program is triggered, and the current process runtime information is obtained by using function calls.\nUsing eBPF technology, we can expand the scope of Skywalking\u0026rsquo;s profiling capabilities:\n Global Performance Analysis: Before eBPF, data collection was limited to what agents can observe. Since eBPF programs run in the kernel, they can observe all threads. This is especially useful when you are not sure whether a performance problem is caused by a particular request. Data Content: eBPF can dump both user and kernel space thread stacks, so if a performance issue happens in kernel space, it’s easier to find. Agent Binding: All modern Linux kernels support eBPF, so there is no need to install anything. This means it is an orchestration-free vs an agent model. This reduces friction caused by built-in software which may not have the correct agents installed, such as Envoy in a Service Mesh. Sampling Type: Unlike Trace Profiling, eBPF is event-driven and, therefore, not constrained by interval polling. For example, eBPF can trigger events and collect more data depending on a transfer size threshold. This can allow the system to triage and prioritize data collection under extreme load.  eBPF Limitations While eBPF offers significant advantages for hunting performance bottlenecks, no technology is perfect. eBPF has a number of limitations described below. Fortunately, since SkyWalking does not require eBPF, the impact is limited.\n Linux Version Requirement: eBPF programs require a Linux kernel version above 4.4, with later kernel versions offering more data to be collected. The BCC has documented the features supported by different Linux kernel versions, with the differences between versions usually being what data can be collected with eBPF. Privileges Required: All processes that intend to load eBPF programs into the Linux kernel must be running in privileged mode. As such, bugs or other issues in such code may have a big impact. Weak Support for Dynamic Language: eBPF has weak support for JIT-based dynamic languages, such as Java. It also depends on what data you want to collect. For Profiling, eBPF does not support parsing the symbols of the program, which is why most eBPF-based profiling technologies only support static languages like C, C++, Go, and Rust. However, symbol mapping can sometimes be solved through tools provided by the language. For example, in Java, perf-map-agent can be used to generate the symbol mapping. However, dynamic languages don\u0026rsquo;t support the attach (uprobe) functionality that would allow us to trace execution events through symbols.  Introducing SkyWalking Rover SkyWalking Rover introduces the eBPF profiling feature into the SkyWalking ecosystem. The figure below shows the overall architecture of SkyWalking Rover. SkyWalking Rover is currently supported in Kubernetes environments and must be deployed inside a Kubernetes cluster. After establishing a connection with the SkyWalking backend server, it saves information about the processes on the current machine to SkyWalking. When the user creates an eBPF profiling task via the user interface, SkyWalking Rover receives the task and executes it in the relevant C, C++, Golang, and Rust language-based programs.\nOther than an eBPF-capable kernel, there are no additional prerequisites for deploying SkyWalking Rover.\nCPU Profiling with Rover CPU profiling is the most intuitive way to show service performance. Inspired by Brendan Gregg‘s blog post, we\u0026rsquo;ve divided CPU profiling into two types that we have implemented in Rover:\n On-CPU Profiling: Where threads are spending time running on-CPU. Off-CPU Profiling: Where time is spent waiting while blocked on I/O, locks, timers, paging/swapping, etc.  Profiling Envoy with eBPF Envoy is a popular proxy, used as the data plane by the Istio service mesh. In a Kubernetes cluster, Istio injects Envoy into each service’s pod as a sidecar where it transparently intercepts and processes incoming and outgoing traffic. As the data plane, any performance issues in Envoy can affect all service traffic in the mesh. In this scenario, it’s more powerful to use eBPF profiling to analyze issues in production caused by service mesh configuration.\nDemo Environment If you want to see this scenario in action, we\u0026rsquo;ve built a demo environment where we deploy an Nginx service for stress testing. Traffic is intercepted by Envoy and forwarded to Nginx. The commands to install the whole environment can be accessed through GitHub.\nOn-CPU Profiling On-CPU profiling is suitable for analyzing thread stacks when service CPU usage is high. If the stack is dumped more times, it means that the thread stack occupies more CPU resources.\nWhen installing Istio using the demo configuration profile, we found there are two places where we can optimize performance:\n Zipkin Tracing: Different Zipkin sampling percentages have a direct impact on QPS. Access Log Format: Reducing the fields of the Envoy access log can improve QPS.  Zipkin Tracing Zipkin with 100% sampling In the default demo configuration profile, Envoy is using 100% sampling as default tracing policy. How does that impact the performance?\nAs shown in the figure below, using the on-CPU profiling, we found that it takes about 16% of the CPU overhead. At a fixed consumption of 2 CPUs, its QPS can reach 5.7K.\nDisable Zipkin tracing At this point, we found that if Zipkin is not necessary, the sampling percentage can be reduced or we can even disable tracing. Based on the Istio documentation, we can disable tracing when installing the service mesh using the following command:\nistioctl install -y --set profile=demo \\  --set \u0026#39;meshConfig.enableTracing=false\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.tracing.sampling=0.0\u0026#39; After disabling tracing, we performed on-CPU profiling again. According to the figure below, we found that Zipkin has disappeared from the flame graph. With the same 2 CPU consumption as in the previous example, the QPS reached 9K, which is an almost 60% increase. Tracing with Throughput With the same CPU usage, we\u0026rsquo;ve discovered that Envoy performance greatly improves when the tracing feature is disabled. Of course, this requires us to make trade-offs between the number of samples Zipkin collects and the desired performance of Envoy (QPS).\nThe table below illustrates how different Zipkin sampling percentages under the same CPU usage affect QPS.\n   Zipkin sampling % QPS CPUs Note     100% (default) 5.7K 2 16% used by Zipkin   1% 8.1K 2 0.3% used by Zipkin   disabled 9.2K 2 0% used by Zipkin    Access Log Format Default Log Format In the default demo configuration profile, the default Access Log format contains a lot of data. The flame graph below shows various functions involved in parsing the data such as request headers, response headers, and streaming the body.\nSimplifying Access Log Format Typically, we don’t need all the information in the access log, so we can often simplify it to get what we need. The following command simplifies the access log format to only display basic information:\nistioctl install -y --set profile=demo \\  --set meshConfig.accessLogFormat=\u0026#34;[%START_TIME%] \\\u0026#34;%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\\\u0026#34; %RESPONSE_CODE%\\n\u0026#34; After simplifying the access log format, we found that the QPS increased from 5.7K to 5.9K. When executing the on-CPU profiling again, the CPU usage of log formatting dropped from 2.4% to 0.7%.\nSimplifying the log format helped us to improve the performance.\nOff-CPU Profiling Off-CPU profiling is suitable for performance issues that are not caused by high CPU usage. For example, when there are too many threads in one service, using off-CPU profiling could reveal which threads spend more time context switching.\nWe provide data aggregation in two dimensions:\n Switch count: The number of times a thread switches context. When the thread returns to the CPU, it completes one context switch. A thread stack with a higher switch count spends more time context switching. Switch duration: The time it takes a thread to switch the context. A thread stack with a higher switch duration spends more time off-CPU.  Write Access Log Enable Write Using the same environment and settings as before in the on-CPU test, we performed off-CPU profiling. As shown below, we found that access log writes accounted for about 28% of the total context switches. The \u0026ldquo;__write\u0026rdquo; shown below also indicates that this method is the Linux kernel method.\nDisable Write SkyWalking implements Envoy\u0026rsquo;s Access Log Service (ALS) feature which allows us to send access logs to the SkyWalking Observability Analysis Platform (OAP) using the gRPC protocol. Even by disabling the access logging, we can still use ALS to capture/aggregate the logs. We\u0026rsquo;ve disabled writing to the access log using the following command:\nistioctl install -y --set profile=demo --set meshConfig.accessLogFile=\u0026#34;\u0026#34; After disabling the Access Log feature, we performed the off-CPU profiling. File writing entries have disappeared as shown in the figure below. Envoy throughput also increased from 5.7K to 5.9K.\nConclusion In this article, we\u0026rsquo;ve examined the insights Apache Skywalking\u0026rsquo;s Trace Profiling can give us and how much more can be achieved with eBPF profiling. All of these features are implemented in skywalking-rover. In addition to on- and off-CPU profiling, you will also find the following features:\n Continuous profiling, helps you automatically profile without manual intervention. For example, when Rover detects that the CPU exceeds a configurable threshold, it automatically executes the on-CPU profiling task. More profiling types to enrich usage scenarios, such as network, and memory profiling.  ","excerpt":"Pinpoint Service Mesh Critical Performance Impact by using eBPF Background Apache SkyWalking …","ref":"/docs/main/v9.2.0/en/concepts-and-designs/ebpf-cpu-profiling/","title":"Pinpoint Service Mesh Critical Performance Impact by using eBPF"},{"body":"Pinpoint Service Mesh Critical Performance Impact by using eBPF Background Apache SkyWalking observes metrics, logs, traces, and events for services deployed into the service mesh. When troubleshooting, SkyWalking error analysis can be an invaluable tool helping to pinpoint where an error occurred. However, performance problems are more difficult: It’s often impossible to locate the root cause of performance problems with pre-existing observation data. To move beyond the status quo, dynamic debugging and troubleshooting are essential service performance tools. In this article, we\u0026rsquo;ll discuss how to use eBPF technology to improve the profiling feature in SkyWalking and analyze the performance impact in the service mesh.\nTrace Profiling in SkyWalking Since SkyWalking 7.0.0, Trace Profiling has helped developers find performance problems by periodically sampling the thread stack to let developers know which lines of code take more time. However, Trace Profiling is not suitable for the following scenarios:\n Thread Model: Trace Profiling is most useful for profiling code that executes in a single thread. It is less useful for middleware that relies heavily on async execution models. For example Goroutines in Go or Kotlin Coroutines. Language: Currently, Trace Profiling is only supported in Java and Python, since it’s not easy to obtain the thread stack in the runtimes of some languages such as Go and Node.js. Agent Binding: Trace Profiling requires Agent installation, which can be tricky depending on the language (e.g., PHP has to rely on its C kernel; Rust and C/C++ require manual instrumentation to make install). Trace Correlation: Since Trace Profiling is only associated with a single request it can be hard to determine which request is causing the problem. Short Lifecycle Services: Trace Profiling doesn\u0026rsquo;t support short-lived services for (at least) two reasons:  It\u0026rsquo;s hard to differentiate system performance from class code manipulation in the booting stage. Trace profiling is linked to an endpoint to identify performance impact, but there is no endpoint to match these short-lived services.    Fortunately, there are techniques that can go further than Trace Profiling in these situations.\nIntroduce eBPF We have found that eBPF — a technology that can run sandboxed programs in an operating system kernel and thus safely and efficiently extend the capabilities of the kernel without requiring kernel modifications or loading kernel modules — can help us fill gaps left by Trace Profiling. eBPF is a trending technology because it breaks the traditional barrier between user and kernel space. Programs can now inject bytecode that runs in the kernel, instead of having to recompile the kernel to customize it. This is naturally a good fit for observability.\nIn the figure below, we can see that when the system executes the execve syscalls, the eBPF program is triggered, and the current process runtime information is obtained by using function calls.\nUsing eBPF technology, we can expand the scope of Skywalking\u0026rsquo;s profiling capabilities:\n Global Performance Analysis: Before eBPF, data collection was limited to what agents can observe. Since eBPF programs run in the kernel, they can observe all threads. This is especially useful when you are not sure whether a performance problem is caused by a particular request. Data Content: eBPF can dump both user and kernel space thread stacks, so if a performance issue happens in kernel space, it’s easier to find. Agent Binding: All modern Linux kernels support eBPF, so there is no need to install anything. This means it is an orchestration-free vs an agent model. This reduces friction caused by built-in software which may not have the correct agents installed, such as Envoy in a Service Mesh. Sampling Type: Unlike Trace Profiling, eBPF is event-driven and, therefore, not constrained by interval polling. For example, eBPF can trigger events and collect more data depending on a transfer size threshold. This can allow the system to triage and prioritize data collection under extreme load.  eBPF Limitations While eBPF offers significant advantages for hunting performance bottlenecks, no technology is perfect. eBPF has a number of limitations described below. Fortunately, since SkyWalking does not require eBPF, the impact is limited.\n Linux Version Requirement: eBPF programs require a Linux kernel version above 4.4, with later kernel versions offering more data to be collected. The BCC has documented the features supported by different Linux kernel versions, with the differences between versions usually being what data can be collected with eBPF. Privileges Required: All processes that intend to load eBPF programs into the Linux kernel must be running in privileged mode. As such, bugs or other issues in such code may have a big impact. Weak Support for Dynamic Language: eBPF has weak support for JIT-based dynamic languages, such as Java. It also depends on what data you want to collect. For Profiling, eBPF does not support parsing the symbols of the program, which is why most eBPF-based profiling technologies only support static languages like C, C++, Go, and Rust. However, symbol mapping can sometimes be solved through tools provided by the language. For example, in Java, perf-map-agent can be used to generate the symbol mapping. However, dynamic languages don\u0026rsquo;t support the attach (uprobe) functionality that would allow us to trace execution events through symbols.  Introducing SkyWalking Rover SkyWalking Rover introduces the eBPF profiling feature into the SkyWalking ecosystem. The figure below shows the overall architecture of SkyWalking Rover. SkyWalking Rover is currently supported in Kubernetes environments and must be deployed inside a Kubernetes cluster. After establishing a connection with the SkyWalking backend server, it saves information about the processes on the current machine to SkyWalking. When the user creates an eBPF profiling task via the user interface, SkyWalking Rover receives the task and executes it in the relevant C, C++, Golang, and Rust language-based programs.\nOther than an eBPF-capable kernel, there are no additional prerequisites for deploying SkyWalking Rover.\nCPU Profiling with Rover CPU profiling is the most intuitive way to show service performance. Inspired by Brendan Gregg‘s blog post, we\u0026rsquo;ve divided CPU profiling into two types that we have implemented in Rover:\n On-CPU Profiling: Where threads are spending time running on-CPU. Off-CPU Profiling: Where time is spent waiting while blocked on I/O, locks, timers, paging/swapping, etc.  Profiling Envoy with eBPF Envoy is a popular proxy, used as the data plane by the Istio service mesh. In a Kubernetes cluster, Istio injects Envoy into each service’s pod as a sidecar where it transparently intercepts and processes incoming and outgoing traffic. As the data plane, any performance issues in Envoy can affect all service traffic in the mesh. In this scenario, it’s more powerful to use eBPF profiling to analyze issues in production caused by service mesh configuration.\nDemo Environment If you want to see this scenario in action, we\u0026rsquo;ve built a demo environment where we deploy an Nginx service for stress testing. Traffic is intercepted by Envoy and forwarded to Nginx. The commands to install the whole environment can be accessed through GitHub.\nOn-CPU Profiling On-CPU profiling is suitable for analyzing thread stacks when service CPU usage is high. If the stack is dumped more times, it means that the thread stack occupies more CPU resources.\nWhen installing Istio using the demo configuration profile, we found there are two places where we can optimize performance:\n Zipkin Tracing: Different Zipkin sampling percentages have a direct impact on QPS. Access Log Format: Reducing the fields of the Envoy access log can improve QPS.  Zipkin Tracing Zipkin with 100% sampling In the default demo configuration profile, Envoy is using 100% sampling as default tracing policy. How does that impact the performance?\nAs shown in the figure below, using the on-CPU profiling, we found that it takes about 16% of the CPU overhead. At a fixed consumption of 2 CPUs, its QPS can reach 5.7K.\nDisable Zipkin tracing At this point, we found that if Zipkin is not necessary, the sampling percentage can be reduced or we can even disable tracing. Based on the Istio documentation, we can disable tracing when installing the service mesh using the following command:\nistioctl install -y --set profile=demo \\  --set \u0026#39;meshConfig.enableTracing=false\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.tracing.sampling=0.0\u0026#39; After disabling tracing, we performed on-CPU profiling again. According to the figure below, we found that Zipkin has disappeared from the flame graph. With the same 2 CPU consumption as in the previous example, the QPS reached 9K, which is an almost 60% increase. Tracing with Throughput With the same CPU usage, we\u0026rsquo;ve discovered that Envoy performance greatly improves when the tracing feature is disabled. Of course, this requires us to make trade-offs between the number of samples Zipkin collects and the desired performance of Envoy (QPS).\nThe table below illustrates how different Zipkin sampling percentages under the same CPU usage affect QPS.\n   Zipkin sampling % QPS CPUs Note     100% (default) 5.7K 2 16% used by Zipkin   1% 8.1K 2 0.3% used by Zipkin   disabled 9.2K 2 0% used by Zipkin    Access Log Format Default Log Format In the default demo configuration profile, the default Access Log format contains a lot of data. The flame graph below shows various functions involved in parsing the data such as request headers, response headers, and streaming the body.\nSimplifying Access Log Format Typically, we don’t need all the information in the access log, so we can often simplify it to get what we need. The following command simplifies the access log format to only display basic information:\nistioctl install -y --set profile=demo \\  --set meshConfig.accessLogFormat=\u0026#34;[%START_TIME%] \\\u0026#34;%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\\\u0026#34; %RESPONSE_CODE%\\n\u0026#34; After simplifying the access log format, we found that the QPS increased from 5.7K to 5.9K. When executing the on-CPU profiling again, the CPU usage of log formatting dropped from 2.4% to 0.7%.\nSimplifying the log format helped us to improve the performance.\nOff-CPU Profiling Off-CPU profiling is suitable for performance issues that are not caused by high CPU usage. For example, when there are too many threads in one service, using off-CPU profiling could reveal which threads spend more time context switching.\nWe provide data aggregation in two dimensions:\n Switch count: The number of times a thread switches context. When the thread returns to the CPU, it completes one context switch. A thread stack with a higher switch count spends more time context switching. Switch duration: The time it takes a thread to switch the context. A thread stack with a higher switch duration spends more time off-CPU.  Write Access Log Enable Write Using the same environment and settings as before in the on-CPU test, we performed off-CPU profiling. As shown below, we found that access log writes accounted for about 28% of the total context switches. The \u0026ldquo;__write\u0026rdquo; shown below also indicates that this method is the Linux kernel method.\nDisable Write SkyWalking implements Envoy\u0026rsquo;s Access Log Service (ALS) feature which allows us to send access logs to the SkyWalking Observability Analysis Platform (OAP) using the gRPC protocol. Even by disabling the access logging, we can still use ALS to capture/aggregate the logs. We\u0026rsquo;ve disabled writing to the access log using the following command:\nistioctl install -y --set profile=demo --set meshConfig.accessLogFile=\u0026#34;\u0026#34; After disabling the Access Log feature, we performed the off-CPU profiling. File writing entries have disappeared as shown in the figure below. Envoy throughput also increased from 5.7K to 5.9K.\nConclusion In this article, we\u0026rsquo;ve examined the insights Apache Skywalking\u0026rsquo;s Trace Profiling can give us and how much more can be achieved with eBPF profiling. All of these features are implemented in skywalking-rover. In addition to on- and off-CPU profiling, you will also find the following features:\n Continuous profiling, helps you automatically profile without manual intervention. For example, when Rover detects that the CPU exceeds a configurable threshold, it automatically executes the on-CPU profiling task. More profiling types to enrich usage scenarios, such as network, and memory profiling.  ","excerpt":"Pinpoint Service Mesh Critical Performance Impact by using eBPF Background Apache SkyWalking …","ref":"/docs/main/v9.3.0/en/concepts-and-designs/ebpf-cpu-profiling/","title":"Pinpoint Service Mesh Critical Performance Impact by using eBPF"},{"body":"Pinpoint Service Mesh Critical Performance Impact by using eBPF Background Apache SkyWalking observes metrics, logs, traces, and events for services deployed into the service mesh. When troubleshooting, SkyWalking error analysis can be an invaluable tool helping to pinpoint where an error occurred. However, performance problems are more difficult: It’s often impossible to locate the root cause of performance problems with pre-existing observation data. To move beyond the status quo, dynamic debugging and troubleshooting are essential service performance tools. In this article, we\u0026rsquo;ll discuss how to use eBPF technology to improve the profiling feature in SkyWalking and analyze the performance impact in the service mesh.\nTrace Profiling in SkyWalking Since SkyWalking 7.0.0, Trace Profiling has helped developers find performance problems by periodically sampling the thread stack to let developers know which lines of code take more time. However, Trace Profiling is not suitable for the following scenarios:\n Thread Model: Trace Profiling is most useful for profiling code that executes in a single thread. It is less useful for middleware that relies heavily on async execution models. For example Goroutines in Go or Kotlin Coroutines. Language: Currently, Trace Profiling is only supported in Java and Python, since it’s not easy to obtain the thread stack in the runtimes of some languages such as Go and Node.js. Agent Binding: Trace Profiling requires Agent installation, which can be tricky depending on the language (e.g., PHP has to rely on its C kernel; Rust and C/C++ require manual instrumentation to make install). Trace Correlation: Since Trace Profiling is only associated with a single request it can be hard to determine which request is causing the problem. Short Lifecycle Services: Trace Profiling doesn\u0026rsquo;t support short-lived services for (at least) two reasons:  It\u0026rsquo;s hard to differentiate system performance from class code manipulation in the booting stage. Trace profiling is linked to an endpoint to identify performance impact, but there is no endpoint to match these short-lived services.    Fortunately, there are techniques that can go further than Trace Profiling in these situations.\nIntroduce eBPF We have found that eBPF — a technology that can run sandboxed programs in an operating system kernel and thus safely and efficiently extend the capabilities of the kernel without requiring kernel modifications or loading kernel modules — can help us fill gaps left by Trace Profiling. eBPF is a trending technology because it breaks the traditional barrier between user and kernel space. Programs can now inject bytecode that runs in the kernel, instead of having to recompile the kernel to customize it. This is naturally a good fit for observability.\nIn the figure below, we can see that when the system executes the execve syscalls, the eBPF program is triggered, and the current process runtime information is obtained by using function calls.\nUsing eBPF technology, we can expand the scope of Skywalking\u0026rsquo;s profiling capabilities:\n Global Performance Analysis: Before eBPF, data collection was limited to what agents can observe. Since eBPF programs run in the kernel, they can observe all threads. This is especially useful when you are not sure whether a performance problem is caused by a particular request. Data Content: eBPF can dump both user and kernel space thread stacks, so if a performance issue happens in kernel space, it’s easier to find. Agent Binding: All modern Linux kernels support eBPF, so there is no need to install anything. This means it is an orchestration-free vs an agent model. This reduces friction caused by built-in software which may not have the correct agents installed, such as Envoy in a Service Mesh. Sampling Type: Unlike Trace Profiling, eBPF is event-driven and, therefore, not constrained by interval polling. For example, eBPF can trigger events and collect more data depending on a transfer size threshold. This can allow the system to triage and prioritize data collection under extreme load.  eBPF Limitations While eBPF offers significant advantages for hunting performance bottlenecks, no technology is perfect. eBPF has a number of limitations described below. Fortunately, since SkyWalking does not require eBPF, the impact is limited.\n Linux Version Requirement: eBPF programs require a Linux kernel version above 4.4, with later kernel versions offering more data to be collected. The BCC has documented the features supported by different Linux kernel versions, with the differences between versions usually being what data can be collected with eBPF. Privileges Required: All processes that intend to load eBPF programs into the Linux kernel must be running in privileged mode. As such, bugs or other issues in such code may have a big impact. Weak Support for Dynamic Language: eBPF has weak support for JIT-based dynamic languages, such as Java. It also depends on what data you want to collect. For Profiling, eBPF does not support parsing the symbols of the program, which is why most eBPF-based profiling technologies only support static languages like C, C++, Go, and Rust. However, symbol mapping can sometimes be solved through tools provided by the language. For example, in Java, perf-map-agent can be used to generate the symbol mapping. However, dynamic languages don\u0026rsquo;t support the attach (uprobe) functionality that would allow us to trace execution events through symbols.  Introducing SkyWalking Rover SkyWalking Rover introduces the eBPF profiling feature into the SkyWalking ecosystem. The figure below shows the overall architecture of SkyWalking Rover. SkyWalking Rover is currently supported in Kubernetes environments and must be deployed inside a Kubernetes cluster. After establishing a connection with the SkyWalking backend server, it saves information about the processes on the current machine to SkyWalking. When the user creates an eBPF profiling task via the user interface, SkyWalking Rover receives the task and executes it in the relevant C, C++, Golang, and Rust language-based programs.\nOther than an eBPF-capable kernel, there are no additional prerequisites for deploying SkyWalking Rover.\nCPU Profiling with Rover CPU profiling is the most intuitive way to show service performance. Inspired by Brendan Gregg‘s blog post, we\u0026rsquo;ve divided CPU profiling into two types that we have implemented in Rover:\n On-CPU Profiling: Where threads are spending time running on-CPU. Off-CPU Profiling: Where time is spent waiting while blocked on I/O, locks, timers, paging/swapping, etc.  Profiling Envoy with eBPF Envoy is a popular proxy, used as the data plane by the Istio service mesh. In a Kubernetes cluster, Istio injects Envoy into each service’s pod as a sidecar where it transparently intercepts and processes incoming and outgoing traffic. As the data plane, any performance issues in Envoy can affect all service traffic in the mesh. In this scenario, it’s more powerful to use eBPF profiling to analyze issues in production caused by service mesh configuration.\nDemo Environment If you want to see this scenario in action, we\u0026rsquo;ve built a demo environment where we deploy an Nginx service for stress testing. Traffic is intercepted by Envoy and forwarded to Nginx. The commands to install the whole environment can be accessed through GitHub.\nOn-CPU Profiling On-CPU profiling is suitable for analyzing thread stacks when service CPU usage is high. If the stack is dumped more times, it means that the thread stack occupies more CPU resources.\nWhen installing Istio using the demo configuration profile, we found there are two places where we can optimize performance:\n Zipkin Tracing: Different Zipkin sampling percentages have a direct impact on QPS. Access Log Format: Reducing the fields of the Envoy access log can improve QPS.  Zipkin Tracing Zipkin with 100% sampling In the default demo configuration profile, Envoy is using 100% sampling as default tracing policy. How does that impact the performance?\nAs shown in the figure below, using the on-CPU profiling, we found that it takes about 16% of the CPU overhead. At a fixed consumption of 2 CPUs, its QPS can reach 5.7K.\nDisable Zipkin tracing At this point, we found that if Zipkin is not necessary, the sampling percentage can be reduced or we can even disable tracing. Based on the Istio documentation, we can disable tracing when installing the service mesh using the following command:\nistioctl install -y --set profile=demo \\  --set \u0026#39;meshConfig.enableTracing=false\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.tracing.sampling=0.0\u0026#39; After disabling tracing, we performed on-CPU profiling again. According to the figure below, we found that Zipkin has disappeared from the flame graph. With the same 2 CPU consumption as in the previous example, the QPS reached 9K, which is an almost 60% increase. Tracing with Throughput With the same CPU usage, we\u0026rsquo;ve discovered that Envoy performance greatly improves when the tracing feature is disabled. Of course, this requires us to make trade-offs between the number of samples Zipkin collects and the desired performance of Envoy (QPS).\nThe table below illustrates how different Zipkin sampling percentages under the same CPU usage affect QPS.\n   Zipkin sampling % QPS CPUs Note     100% (default) 5.7K 2 16% used by Zipkin   1% 8.1K 2 0.3% used by Zipkin   disabled 9.2K 2 0% used by Zipkin    Access Log Format Default Log Format In the default demo configuration profile, the default Access Log format contains a lot of data. The flame graph below shows various functions involved in parsing the data such as request headers, response headers, and streaming the body.\nSimplifying Access Log Format Typically, we don’t need all the information in the access log, so we can often simplify it to get what we need. The following command simplifies the access log format to only display basic information:\nistioctl install -y --set profile=demo \\  --set meshConfig.accessLogFormat=\u0026#34;[%START_TIME%] \\\u0026#34;%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\\\u0026#34; %RESPONSE_CODE%\\n\u0026#34; After simplifying the access log format, we found that the QPS increased from 5.7K to 5.9K. When executing the on-CPU profiling again, the CPU usage of log formatting dropped from 2.4% to 0.7%.\nSimplifying the log format helped us to improve the performance.\nOff-CPU Profiling Off-CPU profiling is suitable for performance issues that are not caused by high CPU usage. For example, when there are too many threads in one service, using off-CPU profiling could reveal which threads spend more time context switching.\nWe provide data aggregation in two dimensions:\n Switch count: The number of times a thread switches context. When the thread returns to the CPU, it completes one context switch. A thread stack with a higher switch count spends more time context switching. Switch duration: The time it takes a thread to switch the context. A thread stack with a higher switch duration spends more time off-CPU.  Write Access Log Enable Write Using the same environment and settings as before in the on-CPU test, we performed off-CPU profiling. As shown below, we found that access log writes accounted for about 28% of the total context switches. The \u0026ldquo;__write\u0026rdquo; shown below also indicates that this method is the Linux kernel method.\nDisable Write SkyWalking implements Envoy\u0026rsquo;s Access Log Service (ALS) feature which allows us to send access logs to the SkyWalking Observability Analysis Platform (OAP) using the gRPC protocol. Even by disabling the access logging, we can still use ALS to capture/aggregate the logs. We\u0026rsquo;ve disabled writing to the access log using the following command:\nistioctl install -y --set profile=demo --set meshConfig.accessLogFile=\u0026#34;\u0026#34; After disabling the Access Log feature, we performed the off-CPU profiling. File writing entries have disappeared as shown in the figure below. Envoy throughput also increased from 5.7K to 5.9K.\nConclusion In this article, we\u0026rsquo;ve examined the insights Apache Skywalking\u0026rsquo;s Trace Profiling can give us and how much more can be achieved with eBPF profiling. All of these features are implemented in skywalking-rover. In addition to on- and off-CPU profiling, you will also find the following features:\n Continuous profiling, helps you automatically profile without manual intervention. For example, when Rover detects that the CPU exceeds a configurable threshold, it automatically executes the on-CPU profiling task. More profiling types to enrich usage scenarios, such as network, and memory profiling.  ","excerpt":"Pinpoint Service Mesh Critical Performance Impact by using eBPF Background Apache SkyWalking …","ref":"/docs/main/v9.4.0/en/concepts-and-designs/ebpf-cpu-profiling/","title":"Pinpoint Service Mesh Critical Performance Impact by using eBPF"},{"body":"Pinpoint Service Mesh Critical Performance Impact by using eBPF Background Apache SkyWalking observes metrics, logs, traces, and events for services deployed into the service mesh. When troubleshooting, SkyWalking error analysis can be an invaluable tool helping to pinpoint where an error occurred. However, performance problems are more difficult: It’s often impossible to locate the root cause of performance problems with pre-existing observation data. To move beyond the status quo, dynamic debugging and troubleshooting are essential service performance tools. In this article, we\u0026rsquo;ll discuss how to use eBPF technology to improve the profiling feature in SkyWalking and analyze the performance impact in the service mesh.\nTrace Profiling in SkyWalking Since SkyWalking 7.0.0, Trace Profiling has helped developers find performance problems by periodically sampling the thread stack to let developers know which lines of code take more time. However, Trace Profiling is not suitable for the following scenarios:\n Thread Model: Trace Profiling is most useful for profiling code that executes in a single thread. It is less useful for middleware that relies heavily on async execution models. For example Goroutines in Go or Kotlin Coroutines. Language: Currently, Trace Profiling is only supported in Java and Python, since it’s not easy to obtain the thread stack in the runtimes of some languages such as Go and Node.js. Agent Binding: Trace Profiling requires Agent installation, which can be tricky depending on the language (e.g., PHP has to rely on its C kernel; Rust and C/C++ require manual instrumentation to make install). Trace Correlation: Since Trace Profiling is only associated with a single request it can be hard to determine which request is causing the problem. Short Lifecycle Services: Trace Profiling doesn\u0026rsquo;t support short-lived services for (at least) two reasons:  It\u0026rsquo;s hard to differentiate system performance from class code manipulation in the booting stage. Trace profiling is linked to an endpoint to identify performance impact, but there is no endpoint to match these short-lived services.    Fortunately, there are techniques that can go further than Trace Profiling in these situations.\nIntroduce eBPF We have found that eBPF — a technology that can run sandboxed programs in an operating system kernel and thus safely and efficiently extend the capabilities of the kernel without requiring kernel modifications or loading kernel modules — can help us fill gaps left by Trace Profiling. eBPF is a trending technology because it breaks the traditional barrier between user and kernel space. Programs can now inject bytecode that runs in the kernel, instead of having to recompile the kernel to customize it. This is naturally a good fit for observability.\nIn the figure below, we can see that when the system executes the execve syscalls, the eBPF program is triggered, and the current process runtime information is obtained by using function calls.\nUsing eBPF technology, we can expand the scope of Skywalking\u0026rsquo;s profiling capabilities:\n Global Performance Analysis: Before eBPF, data collection was limited to what agents can observe. Since eBPF programs run in the kernel, they can observe all threads. This is especially useful when you are not sure whether a performance problem is caused by a particular request. Data Content: eBPF can dump both user and kernel space thread stacks, so if a performance issue happens in kernel space, it’s easier to find. Agent Binding: All modern Linux kernels support eBPF, so there is no need to install anything. This means it is an orchestration-free vs an agent model. This reduces friction caused by built-in software which may not have the correct agents installed, such as Envoy in a Service Mesh. Sampling Type: Unlike Trace Profiling, eBPF is event-driven and, therefore, not constrained by interval polling. For example, eBPF can trigger events and collect more data depending on a transfer size threshold. This can allow the system to triage and prioritize data collection under extreme load.  eBPF Limitations While eBPF offers significant advantages for hunting performance bottlenecks, no technology is perfect. eBPF has a number of limitations described below. Fortunately, since SkyWalking does not require eBPF, the impact is limited.\n Linux Version Requirement: eBPF programs require a Linux kernel version above 4.4, with later kernel versions offering more data to be collected. The BCC has documented the features supported by different Linux kernel versions, with the differences between versions usually being what data can be collected with eBPF. Privileges Required: All processes that intend to load eBPF programs into the Linux kernel must be running in privileged mode. As such, bugs or other issues in such code may have a big impact. Weak Support for Dynamic Language: eBPF has weak support for JIT-based dynamic languages, such as Java. It also depends on what data you want to collect. For Profiling, eBPF does not support parsing the symbols of the program, which is why most eBPF-based profiling technologies only support static languages like C, C++, Go, and Rust. However, symbol mapping can sometimes be solved through tools provided by the language. For example, in Java, perf-map-agent can be used to generate the symbol mapping. However, dynamic languages don\u0026rsquo;t support the attach (uprobe) functionality that would allow us to trace execution events through symbols.  Introducing SkyWalking Rover SkyWalking Rover introduces the eBPF profiling feature into the SkyWalking ecosystem. The figure below shows the overall architecture of SkyWalking Rover. SkyWalking Rover is currently supported in Kubernetes environments and must be deployed inside a Kubernetes cluster. After establishing a connection with the SkyWalking backend server, it saves information about the processes on the current machine to SkyWalking. When the user creates an eBPF profiling task via the user interface, SkyWalking Rover receives the task and executes it in the relevant C, C++, Golang, and Rust language-based programs.\nOther than an eBPF-capable kernel, there are no additional prerequisites for deploying SkyWalking Rover.\nCPU Profiling with Rover CPU profiling is the most intuitive way to show service performance. Inspired by Brendan Gregg‘s blog post, we\u0026rsquo;ve divided CPU profiling into two types that we have implemented in Rover:\n On-CPU Profiling: Where threads are spending time running on-CPU. Off-CPU Profiling: Where time is spent waiting while blocked on I/O, locks, timers, paging/swapping, etc.  Profiling Envoy with eBPF Envoy is a popular proxy, used as the data plane by the Istio service mesh. In a Kubernetes cluster, Istio injects Envoy into each service’s pod as a sidecar where it transparently intercepts and processes incoming and outgoing traffic. As the data plane, any performance issues in Envoy can affect all service traffic in the mesh. In this scenario, it’s more powerful to use eBPF profiling to analyze issues in production caused by service mesh configuration.\nDemo Environment If you want to see this scenario in action, we\u0026rsquo;ve built a demo environment where we deploy an Nginx service for stress testing. Traffic is intercepted by Envoy and forwarded to Nginx. The commands to install the whole environment can be accessed through GitHub.\nOn-CPU Profiling On-CPU profiling is suitable for analyzing thread stacks when service CPU usage is high. If the stack is dumped more times, it means that the thread stack occupies more CPU resources.\nWhen installing Istio using the demo configuration profile, we found there are two places where we can optimize performance:\n Zipkin Tracing: Different Zipkin sampling percentages have a direct impact on QPS. Access Log Format: Reducing the fields of the Envoy access log can improve QPS.  Zipkin Tracing Zipkin with 100% sampling In the default demo configuration profile, Envoy is using 100% sampling as default tracing policy. How does that impact the performance?\nAs shown in the figure below, using the on-CPU profiling, we found that it takes about 16% of the CPU overhead. At a fixed consumption of 2 CPUs, its QPS can reach 5.7K.\nDisable Zipkin tracing At this point, we found that if Zipkin is not necessary, the sampling percentage can be reduced or we can even disable tracing. Based on the Istio documentation, we can disable tracing when installing the service mesh using the following command:\nistioctl install -y --set profile=demo \\  --set \u0026#39;meshConfig.enableTracing=false\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.tracing.sampling=0.0\u0026#39; After disabling tracing, we performed on-CPU profiling again. According to the figure below, we found that Zipkin has disappeared from the flame graph. With the same 2 CPU consumption as in the previous example, the QPS reached 9K, which is an almost 60% increase. Tracing with Throughput With the same CPU usage, we\u0026rsquo;ve discovered that Envoy performance greatly improves when the tracing feature is disabled. Of course, this requires us to make trade-offs between the number of samples Zipkin collects and the desired performance of Envoy (QPS).\nThe table below illustrates how different Zipkin sampling percentages under the same CPU usage affect QPS.\n   Zipkin sampling % QPS CPUs Note     100% (default) 5.7K 2 16% used by Zipkin   1% 8.1K 2 0.3% used by Zipkin   disabled 9.2K 2 0% used by Zipkin    Access Log Format Default Log Format In the default demo configuration profile, the default Access Log format contains a lot of data. The flame graph below shows various functions involved in parsing the data such as request headers, response headers, and streaming the body.\nSimplifying Access Log Format Typically, we don’t need all the information in the access log, so we can often simplify it to get what we need. The following command simplifies the access log format to only display basic information:\nistioctl install -y --set profile=demo \\  --set meshConfig.accessLogFormat=\u0026#34;[%START_TIME%] \\\u0026#34;%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\\\u0026#34; %RESPONSE_CODE%\\n\u0026#34; After simplifying the access log format, we found that the QPS increased from 5.7K to 5.9K. When executing the on-CPU profiling again, the CPU usage of log formatting dropped from 2.4% to 0.7%.\nSimplifying the log format helped us to improve the performance.\nOff-CPU Profiling Off-CPU profiling is suitable for performance issues that are not caused by high CPU usage. For example, when there are too many threads in one service, using off-CPU profiling could reveal which threads spend more time context switching.\nWe provide data aggregation in two dimensions:\n Switch count: The number of times a thread switches context. When the thread returns to the CPU, it completes one context switch. A thread stack with a higher switch count spends more time context switching. Switch duration: The time it takes a thread to switch the context. A thread stack with a higher switch duration spends more time off-CPU.  Write Access Log Enable Write Using the same environment and settings as before in the on-CPU test, we performed off-CPU profiling. As shown below, we found that access log writes accounted for about 28% of the total context switches. The \u0026ldquo;__write\u0026rdquo; shown below also indicates that this method is the Linux kernel method.\nDisable Write SkyWalking implements Envoy\u0026rsquo;s Access Log Service (ALS) feature which allows us to send access logs to the SkyWalking Observability Analysis Platform (OAP) using the gRPC protocol. Even by disabling the access logging, we can still use ALS to capture/aggregate the logs. We\u0026rsquo;ve disabled writing to the access log using the following command:\nistioctl install -y --set profile=demo --set meshConfig.accessLogFile=\u0026#34;\u0026#34; After disabling the Access Log feature, we performed the off-CPU profiling. File writing entries have disappeared as shown in the figure below. Envoy throughput also increased from 5.7K to 5.9K.\nConclusion In this article, we\u0026rsquo;ve examined the insights Apache Skywalking\u0026rsquo;s Trace Profiling can give us and how much more can be achieved with eBPF profiling. All of these features are implemented in skywalking-rover. In addition to on- and off-CPU profiling, you will also find the following features:\n Continuous profiling, helps you automatically profile without manual intervention. For example, when Rover detects that the CPU exceeds a configurable threshold, it automatically executes the on-CPU profiling task. More profiling types to enrich usage scenarios, such as network, and memory profiling.  ","excerpt":"Pinpoint Service Mesh Critical Performance Impact by using eBPF Background Apache SkyWalking …","ref":"/docs/main/v9.5.0/en/concepts-and-designs/ebpf-cpu-profiling/","title":"Pinpoint Service Mesh Critical Performance Impact by using eBPF"},{"body":"Pinpoint Service Mesh Critical Performance Impact by using eBPF Background Apache SkyWalking observes metrics, logs, traces, and events for services deployed into the service mesh. When troubleshooting, SkyWalking error analysis can be an invaluable tool helping to pinpoint where an error occurred. However, performance problems are more difficult: It’s often impossible to locate the root cause of performance problems with pre-existing observation data. To move beyond the status quo, dynamic debugging and troubleshooting are essential service performance tools. In this article, we\u0026rsquo;ll discuss how to use eBPF technology to improve the profiling feature in SkyWalking and analyze the performance impact in the service mesh.\nTrace Profiling in SkyWalking Since SkyWalking 7.0.0, Trace Profiling has helped developers find performance problems by periodically sampling the thread stack to let developers know which lines of code take more time. However, Trace Profiling is not suitable for the following scenarios:\n Thread Model: Trace Profiling is most useful for profiling code that executes in a single thread. It is less useful for middleware that relies heavily on async execution models. For example Goroutines in Go or Kotlin Coroutines. Language: Currently, Trace Profiling is only supported in Java and Python, since it’s not easy to obtain the thread stack in the runtimes of some languages such as Go and Node.js. Agent Binding: Trace Profiling requires Agent installation, which can be tricky depending on the language (e.g., PHP has to rely on its C kernel; Rust and C/C++ require manual instrumentation to make install). Trace Correlation: Since Trace Profiling is only associated with a single request it can be hard to determine which request is causing the problem. Short Lifecycle Services: Trace Profiling doesn\u0026rsquo;t support short-lived services for (at least) two reasons:  It\u0026rsquo;s hard to differentiate system performance from class code manipulation in the booting stage. Trace profiling is linked to an endpoint to identify performance impact, but there is no endpoint to match these short-lived services.    Fortunately, there are techniques that can go further than Trace Profiling in these situations.\nIntroduce eBPF We have found that eBPF — a technology that can run sandboxed programs in an operating system kernel and thus safely and efficiently extend the capabilities of the kernel without requiring kernel modifications or loading kernel modules — can help us fill gaps left by Trace Profiling. eBPF is a trending technology because it breaks the traditional barrier between user and kernel space. Programs can now inject bytecode that runs in the kernel, instead of having to recompile the kernel to customize it. This is naturally a good fit for observability.\nIn the figure below, we can see that when the system executes the execve syscalls, the eBPF program is triggered, and the current process runtime information is obtained by using function calls.\nUsing eBPF technology, we can expand the scope of Skywalking\u0026rsquo;s profiling capabilities:\n Global Performance Analysis: Before eBPF, data collection was limited to what agents can observe. Since eBPF programs run in the kernel, they can observe all threads. This is especially useful when you are not sure whether a performance problem is caused by a particular request. Data Content: eBPF can dump both user and kernel space thread stacks, so if a performance issue happens in kernel space, it’s easier to find. Agent Binding: All modern Linux kernels support eBPF, so there is no need to install anything. This means it is an orchestration-free vs an agent model. This reduces friction caused by built-in software which may not have the correct agents installed, such as Envoy in a Service Mesh. Sampling Type: Unlike Trace Profiling, eBPF is event-driven and, therefore, not constrained by interval polling. For example, eBPF can trigger events and collect more data depending on a transfer size threshold. This can allow the system to triage and prioritize data collection under extreme load.  eBPF Limitations While eBPF offers significant advantages for hunting performance bottlenecks, no technology is perfect. eBPF has a number of limitations described below. Fortunately, since SkyWalking does not require eBPF, the impact is limited.\n Linux Version Requirement: eBPF programs require a Linux kernel version above 4.4, with later kernel versions offering more data to be collected. The BCC has documented the features supported by different Linux kernel versions, with the differences between versions usually being what data can be collected with eBPF. Privileges Required: All processes that intend to load eBPF programs into the Linux kernel must be running in privileged mode. As such, bugs or other issues in such code may have a big impact. Weak Support for Dynamic Language: eBPF has weak support for JIT-based dynamic languages, such as Java. It also depends on what data you want to collect. For Profiling, eBPF does not support parsing the symbols of the program, which is why most eBPF-based profiling technologies only support static languages like C, C++, Go, and Rust. However, symbol mapping can sometimes be solved through tools provided by the language. For example, in Java, perf-map-agent can be used to generate the symbol mapping. However, dynamic languages don\u0026rsquo;t support the attach (uprobe) functionality that would allow us to trace execution events through symbols.  Introducing SkyWalking Rover SkyWalking Rover introduces the eBPF profiling feature into the SkyWalking ecosystem. The figure below shows the overall architecture of SkyWalking Rover. SkyWalking Rover is currently supported in Kubernetes environments and must be deployed inside a Kubernetes cluster. After establishing a connection with the SkyWalking backend server, it saves information about the processes on the current machine to SkyWalking. When the user creates an eBPF profiling task via the user interface, SkyWalking Rover receives the task and executes it in the relevant C, C++, Golang, and Rust language-based programs.\nOther than an eBPF-capable kernel, there are no additional prerequisites for deploying SkyWalking Rover.\nCPU Profiling with Rover CPU profiling is the most intuitive way to show service performance. Inspired by Brendan Gregg‘s blog post, we\u0026rsquo;ve divided CPU profiling into two types that we have implemented in Rover:\n On-CPU Profiling: Where threads are spending time running on-CPU. Off-CPU Profiling: Where time is spent waiting while blocked on I/O, locks, timers, paging/swapping, etc.  Profiling Envoy with eBPF Envoy is a popular proxy, used as the data plane by the Istio service mesh. In a Kubernetes cluster, Istio injects Envoy into each service’s pod as a sidecar where it transparently intercepts and processes incoming and outgoing traffic. As the data plane, any performance issues in Envoy can affect all service traffic in the mesh. In this scenario, it’s more powerful to use eBPF profiling to analyze issues in production caused by service mesh configuration.\nDemo Environment If you want to see this scenario in action, we\u0026rsquo;ve built a demo environment where we deploy an Nginx service for stress testing. Traffic is intercepted by Envoy and forwarded to Nginx. The commands to install the whole environment can be accessed through GitHub.\nOn-CPU Profiling On-CPU profiling is suitable for analyzing thread stacks when service CPU usage is high. If the stack is dumped more times, it means that the thread stack occupies more CPU resources.\nWhen installing Istio using the demo configuration profile, we found there are two places where we can optimize performance:\n Zipkin Tracing: Different Zipkin sampling percentages have a direct impact on QPS. Access Log Format: Reducing the fields of the Envoy access log can improve QPS.  Zipkin Tracing Zipkin with 100% sampling In the default demo configuration profile, Envoy is using 100% sampling as default tracing policy. How does that impact the performance?\nAs shown in the figure below, using the on-CPU profiling, we found that it takes about 16% of the CPU overhead. At a fixed consumption of 2 CPUs, its QPS can reach 5.7K.\nDisable Zipkin tracing At this point, we found that if Zipkin is not necessary, the sampling percentage can be reduced or we can even disable tracing. Based on the Istio documentation, we can disable tracing when installing the service mesh using the following command:\nistioctl install -y --set profile=demo \\  --set \u0026#39;meshConfig.enableTracing=false\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.tracing.sampling=0.0\u0026#39; After disabling tracing, we performed on-CPU profiling again. According to the figure below, we found that Zipkin has disappeared from the flame graph. With the same 2 CPU consumption as in the previous example, the QPS reached 9K, which is an almost 60% increase. Tracing with Throughput With the same CPU usage, we\u0026rsquo;ve discovered that Envoy performance greatly improves when the tracing feature is disabled. Of course, this requires us to make trade-offs between the number of samples Zipkin collects and the desired performance of Envoy (QPS).\nThe table below illustrates how different Zipkin sampling percentages under the same CPU usage affect QPS.\n   Zipkin sampling % QPS CPUs Note     100% (default) 5.7K 2 16% used by Zipkin   1% 8.1K 2 0.3% used by Zipkin   disabled 9.2K 2 0% used by Zipkin    Access Log Format Default Log Format In the default demo configuration profile, the default Access Log format contains a lot of data. The flame graph below shows various functions involved in parsing the data such as request headers, response headers, and streaming the body.\nSimplifying Access Log Format Typically, we don’t need all the information in the access log, so we can often simplify it to get what we need. The following command simplifies the access log format to only display basic information:\nistioctl install -y --set profile=demo \\  --set meshConfig.accessLogFormat=\u0026#34;[%START_TIME%] \\\u0026#34;%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\\\u0026#34; %RESPONSE_CODE%\\n\u0026#34; After simplifying the access log format, we found that the QPS increased from 5.7K to 5.9K. When executing the on-CPU profiling again, the CPU usage of log formatting dropped from 2.4% to 0.7%.\nSimplifying the log format helped us to improve the performance.\nOff-CPU Profiling Off-CPU profiling is suitable for performance issues that are not caused by high CPU usage. For example, when there are too many threads in one service, using off-CPU profiling could reveal which threads spend more time context switching.\nWe provide data aggregation in two dimensions:\n Switch count: The number of times a thread switches context. When the thread returns to the CPU, it completes one context switch. A thread stack with a higher switch count spends more time context switching. Switch duration: The time it takes a thread to switch the context. A thread stack with a higher switch duration spends more time off-CPU.  Write Access Log Enable Write Using the same environment and settings as before in the on-CPU test, we performed off-CPU profiling. As shown below, we found that access log writes accounted for about 28% of the total context switches. The \u0026ldquo;__write\u0026rdquo; shown below also indicates that this method is the Linux kernel method.\nDisable Write SkyWalking implements Envoy\u0026rsquo;s Access Log Service (ALS) feature which allows us to send access logs to the SkyWalking Observability Analysis Platform (OAP) using the gRPC protocol. Even by disabling the access logging, we can still use ALS to capture/aggregate the logs. We\u0026rsquo;ve disabled writing to the access log using the following command:\nistioctl install -y --set profile=demo --set meshConfig.accessLogFile=\u0026#34;\u0026#34; After disabling the Access Log feature, we performed the off-CPU profiling. File writing entries have disappeared as shown in the figure below. Envoy throughput also increased from 5.7K to 5.9K.\nConclusion In this article, we\u0026rsquo;ve examined the insights Apache Skywalking\u0026rsquo;s Trace Profiling can give us and how much more can be achieved with eBPF profiling. All of these features are implemented in skywalking-rover. In addition to on- and off-CPU profiling, you will also find the following features:\n Continuous profiling, helps you automatically profile without manual intervention. For example, when Rover detects that the CPU exceeds a configurable threshold, it automatically executes the on-CPU profiling task. More profiling types to enrich usage scenarios, such as network, and memory profiling.  ","excerpt":"Pinpoint Service Mesh Critical Performance Impact by using eBPF Background Apache SkyWalking …","ref":"/docs/main/v9.6.0/en/concepts-and-designs/ebpf-cpu-profiling/","title":"Pinpoint Service Mesh Critical Performance Impact by using eBPF"},{"body":"Pinpoint Service Mesh Critical Performance Impact by using eBPF Background Apache SkyWalking observes metrics, logs, traces, and events for services deployed into the service mesh. When troubleshooting, SkyWalking error analysis can be an invaluable tool helping to pinpoint where an error occurred. However, performance problems are more difficult: It’s often impossible to locate the root cause of performance problems with pre-existing observation data. To move beyond the status quo, dynamic debugging and troubleshooting are essential service performance tools. In this article, we\u0026rsquo;ll discuss how to use eBPF technology to improve the profiling feature in SkyWalking and analyze the performance impact in the service mesh.\nTrace Profiling in SkyWalking Since SkyWalking 7.0.0, Trace Profiling has helped developers find performance problems by periodically sampling the thread stack to let developers know which lines of code take more time. However, Trace Profiling is not suitable for the following scenarios:\n Thread Model: Trace Profiling is most useful for profiling code that executes in a single thread. It is less useful for middleware that relies heavily on async execution models. For example Goroutines in Go or Kotlin Coroutines. Language: Currently, Trace Profiling is only supported in Java and Python, since it’s not easy to obtain the thread stack in the runtimes of some languages such as Go and Node.js. Agent Binding: Trace Profiling requires Agent installation, which can be tricky depending on the language (e.g., PHP has to rely on its C kernel; Rust and C/C++ require manual instrumentation to make install). Trace Correlation: Since Trace Profiling is only associated with a single request it can be hard to determine which request is causing the problem. Short Lifecycle Services: Trace Profiling doesn\u0026rsquo;t support short-lived services for (at least) two reasons:  It\u0026rsquo;s hard to differentiate system performance from class code manipulation in the booting stage. Trace profiling is linked to an endpoint to identify performance impact, but there is no endpoint to match these short-lived services.    Fortunately, there are techniques that can go further than Trace Profiling in these situations.\nIntroduce eBPF We have found that eBPF — a technology that can run sandboxed programs in an operating system kernel and thus safely and efficiently extend the capabilities of the kernel without requiring kernel modifications or loading kernel modules — can help us fill gaps left by Trace Profiling. eBPF is a trending technology because it breaks the traditional barrier between user and kernel space. Programs can now inject bytecode that runs in the kernel, instead of having to recompile the kernel to customize it. This is naturally a good fit for observability.\nIn the figure below, we can see that when the system executes the execve syscalls, the eBPF program is triggered, and the current process runtime information is obtained by using function calls.\nUsing eBPF technology, we can expand the scope of Skywalking\u0026rsquo;s profiling capabilities:\n Global Performance Analysis: Before eBPF, data collection was limited to what agents can observe. Since eBPF programs run in the kernel, they can observe all threads. This is especially useful when you are not sure whether a performance problem is caused by a particular request. Data Content: eBPF can dump both user and kernel space thread stacks, so if a performance issue happens in kernel space, it’s easier to find. Agent Binding: All modern Linux kernels support eBPF, so there is no need to install anything. This means it is an orchestration-free vs an agent model. This reduces friction caused by built-in software which may not have the correct agents installed, such as Envoy in a Service Mesh. Sampling Type: Unlike Trace Profiling, eBPF is event-driven and, therefore, not constrained by interval polling. For example, eBPF can trigger events and collect more data depending on a transfer size threshold. This can allow the system to triage and prioritize data collection under extreme load.  eBPF Limitations While eBPF offers significant advantages for hunting performance bottlenecks, no technology is perfect. eBPF has a number of limitations described below. Fortunately, since SkyWalking does not require eBPF, the impact is limited.\n Linux Version Requirement: eBPF programs require a Linux kernel version above 4.4, with later kernel versions offering more data to be collected. The BCC has documented the features supported by different Linux kernel versions, with the differences between versions usually being what data can be collected with eBPF. Privileges Required: All processes that intend to load eBPF programs into the Linux kernel must be running in privileged mode. As such, bugs or other issues in such code may have a big impact. Weak Support for Dynamic Language: eBPF has weak support for JIT-based dynamic languages, such as Java. It also depends on what data you want to collect. For Profiling, eBPF does not support parsing the symbols of the program, which is why most eBPF-based profiling technologies only support static languages like C, C++, Go, and Rust. However, symbol mapping can sometimes be solved through tools provided by the language. For example, in Java, perf-map-agent can be used to generate the symbol mapping. However, dynamic languages don\u0026rsquo;t support the attach (uprobe) functionality that would allow us to trace execution events through symbols.  Introducing SkyWalking Rover SkyWalking Rover introduces the eBPF profiling feature into the SkyWalking ecosystem. The figure below shows the overall architecture of SkyWalking Rover. SkyWalking Rover is currently supported in Kubernetes environments and must be deployed inside a Kubernetes cluster. After establishing a connection with the SkyWalking backend server, it saves information about the processes on the current machine to SkyWalking. When the user creates an eBPF profiling task via the user interface, SkyWalking Rover receives the task and executes it in the relevant C, C++, Golang, and Rust language-based programs.\nOther than an eBPF-capable kernel, there are no additional prerequisites for deploying SkyWalking Rover.\nCPU Profiling with Rover CPU profiling is the most intuitive way to show service performance. Inspired by Brendan Gregg‘s blog post, we\u0026rsquo;ve divided CPU profiling into two types that we have implemented in Rover:\n On-CPU Profiling: Where threads are spending time running on-CPU. Off-CPU Profiling: Where time is spent waiting while blocked on I/O, locks, timers, paging/swapping, etc.  Profiling Envoy with eBPF Envoy is a popular proxy, used as the data plane by the Istio service mesh. In a Kubernetes cluster, Istio injects Envoy into each service’s pod as a sidecar where it transparently intercepts and processes incoming and outgoing traffic. As the data plane, any performance issues in Envoy can affect all service traffic in the mesh. In this scenario, it’s more powerful to use eBPF profiling to analyze issues in production caused by service mesh configuration.\nDemo Environment If you want to see this scenario in action, we\u0026rsquo;ve built a demo environment where we deploy an Nginx service for stress testing. Traffic is intercepted by Envoy and forwarded to Nginx. The commands to install the whole environment can be accessed through GitHub.\nOn-CPU Profiling On-CPU profiling is suitable for analyzing thread stacks when service CPU usage is high. If the stack is dumped more times, it means that the thread stack occupies more CPU resources.\nWhen installing Istio using the demo configuration profile, we found there are two places where we can optimize performance:\n Zipkin Tracing: Different Zipkin sampling percentages have a direct impact on QPS. Access Log Format: Reducing the fields of the Envoy access log can improve QPS.  Zipkin Tracing Zipkin with 100% sampling In the default demo configuration profile, Envoy is using 100% sampling as default tracing policy. How does that impact the performance?\nAs shown in the figure below, using the on-CPU profiling, we found that it takes about 16% of the CPU overhead. At a fixed consumption of 2 CPUs, its QPS can reach 5.7K.\nDisable Zipkin tracing At this point, we found that if Zipkin is not necessary, the sampling percentage can be reduced or we can even disable tracing. Based on the Istio documentation, we can disable tracing when installing the service mesh using the following command:\nistioctl install -y --set profile=demo \\  --set \u0026#39;meshConfig.enableTracing=false\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.tracing.sampling=0.0\u0026#39; After disabling tracing, we performed on-CPU profiling again. According to the figure below, we found that Zipkin has disappeared from the flame graph. With the same 2 CPU consumption as in the previous example, the QPS reached 9K, which is an almost 60% increase. Tracing with Throughput With the same CPU usage, we\u0026rsquo;ve discovered that Envoy performance greatly improves when the tracing feature is disabled. Of course, this requires us to make trade-offs between the number of samples Zipkin collects and the desired performance of Envoy (QPS).\nThe table below illustrates how different Zipkin sampling percentages under the same CPU usage affect QPS.\n   Zipkin sampling % QPS CPUs Note     100% (default) 5.7K 2 16% used by Zipkin   1% 8.1K 2 0.3% used by Zipkin   disabled 9.2K 2 0% used by Zipkin    Access Log Format Default Log Format In the default demo configuration profile, the default Access Log format contains a lot of data. The flame graph below shows various functions involved in parsing the data such as request headers, response headers, and streaming the body.\nSimplifying Access Log Format Typically, we don’t need all the information in the access log, so we can often simplify it to get what we need. The following command simplifies the access log format to only display basic information:\nistioctl install -y --set profile=demo \\  --set meshConfig.accessLogFormat=\u0026#34;[%START_TIME%] \\\u0026#34;%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\\\u0026#34; %RESPONSE_CODE%\\n\u0026#34; After simplifying the access log format, we found that the QPS increased from 5.7K to 5.9K. When executing the on-CPU profiling again, the CPU usage of log formatting dropped from 2.4% to 0.7%.\nSimplifying the log format helped us to improve the performance.\nOff-CPU Profiling Off-CPU profiling is suitable for performance issues that are not caused by high CPU usage. For example, when there are too many threads in one service, using off-CPU profiling could reveal which threads spend more time context switching.\nWe provide data aggregation in two dimensions:\n Switch count: The number of times a thread switches context. When the thread returns to the CPU, it completes one context switch. A thread stack with a higher switch count spends more time context switching. Switch duration: The time it takes a thread to switch the context. A thread stack with a higher switch duration spends more time off-CPU.  Write Access Log Enable Write Using the same environment and settings as before in the on-CPU test, we performed off-CPU profiling. As shown below, we found that access log writes accounted for about 28% of the total context switches. The \u0026ldquo;__write\u0026rdquo; shown below also indicates that this method is the Linux kernel method.\nDisable Write SkyWalking implements Envoy\u0026rsquo;s Access Log Service (ALS) feature which allows us to send access logs to the SkyWalking Observability Analysis Platform (OAP) using the gRPC protocol. Even by disabling the access logging, we can still use ALS to capture/aggregate the logs. We\u0026rsquo;ve disabled writing to the access log using the following command:\nistioctl install -y --set profile=demo --set meshConfig.accessLogFile=\u0026#34;\u0026#34; After disabling the Access Log feature, we performed the off-CPU profiling. File writing entries have disappeared as shown in the figure below. Envoy throughput also increased from 5.7K to 5.9K.\nConclusion In this article, we\u0026rsquo;ve examined the insights Apache Skywalking\u0026rsquo;s Trace Profiling can give us and how much more can be achieved with eBPF profiling. All of these features are implemented in skywalking-rover. In addition to on- and off-CPU profiling, you will also find the following features:\n Continuous profiling, helps you automatically profile without manual intervention. For example, when Rover detects that the CPU exceeds a configurable threshold, it automatically executes the on-CPU profiling task. More profiling types to enrich usage scenarios, such as network, and memory profiling.  ","excerpt":"Pinpoint Service Mesh Critical Performance Impact by using eBPF Background Apache SkyWalking …","ref":"/docs/main/v9.7.0/en/concepts-and-designs/ebpf-cpu-profiling/","title":"Pinpoint Service Mesh Critical Performance Impact by using eBPF"},{"body":"Pipe Plugins The pipe plugin configurations contain a series of pipe configuration. Each pipe configuration has 5 parts, which are common_config, gatherer, processor and the sender.\ncommon_config    Config Description     pipe_name The unique collect space name.    Gatherer The gatherer has 2 roles, which are the receiver and fetcher.\nReceiver Role    Config Description     server_name The server name in the sharing pipe, which would be used in the receiver plugin.   receiver The receiver configuration. Please read the doc to find all receiver plugins.   queue The queue buffers the input telemetry data. Please read the doc to find all queue plugins.    Fetcher Role    Config Description     fetch_interval The time interval between two fetch operations. The time unit is millisecond.   fetcher The fetcher configuration. Please read the doc to find all fetcher plugins.   queue The queue buffers the input telemetry data. Please read the doc to find all queue plugins.    processor The filter configuration. Please read the doc to find all filter plugins.\nsender    Config Description     flush_time The time interval between two flush operations. And the time unit is millisecond.   max_buffer_size The maximum buffer elements.   min_flush_events The minimum flush elements.   client_name The client name used in the forwarders of the sharing pipe.   forwarders The forwarder plugin list. Please read the doc to find all forwarders plugins.   fallbacker The fallbacker plugin. Please read the doc to find all fallbacker plugins.    Example pipes:- common_config:pipe_name:pipe1gatherer:server_name:\u0026#34;grpc-server\u0026#34;receiver:plugin_name:\u0026#34;grpc-native-log-receiver\u0026#34;queue:plugin_name:\u0026#34;mmap-queue\u0026#34;segment_size:${SATELLITE_MMAP_QUEUE_SIZE:524288}max_in_mem_segments:${SATELLITE_MMAP_QUEUE_MAX_IN_MEM_SEGMENTS:6}queue_dir:\u0026#34;pipe1-log-grpc-receiver-queue\u0026#34;processor:filters:sender:fallbacker:plugin_name:none-fallbackerflush_time:${SATELLITE_PIPE1_SENDER_FLUSH_TIME:1000}max_buffer_size:${SATELLITE_PIPE1_SENDER_MAX_BUFFER_SIZE:200}min_flush_events:${SATELLITE_PIPE1_SENDER_MIN_FLUSH_EVENTS:100}client_name:kafka-clientforwarders:- plugin_name:native-log-kafka-forwardertopic:${SATELLITE_NATIVELOG-TOPIC:log-topic}","excerpt":"Pipe Plugins The pipe plugin configurations contain a series of pipe configuration. Each pipe …","ref":"/docs/skywalking-satellite/latest/en/setup/configuration/pipe-plugins/","title":"Pipe Plugins"},{"body":"Pipe Plugins The pipe plugin configurations contain a series of pipe configuration. Each pipe configuration has 5 parts, which are common_config, gatherer, processor and the sender.\ncommon_config    Config Description     pipe_name The unique collect space name.    Gatherer The gatherer has 2 roles, which are the receiver and fetcher.\nReceiver Role    Config Description     server_name The server name in the sharing pipe, which would be used in the receiver plugin.   receiver The receiver configuration. Please read the doc to find all receiver plugins.   queue The queue buffers the input telemetry data. Please read the doc to find all queue plugins.    Fetcher Role    Config Description     fetch_interval The time interval between two fetch operations. The time unit is millisecond.   fetcher The fetcher configuration. Please read the doc to find all fetcher plugins.   queue The queue buffers the input telemetry data. Please read the doc to find all queue plugins.    processor The filter configuration. Please read the doc to find all filter plugins.\nsender    Config Description     flush_time The time interval between two flush operations. And the time unit is millisecond.   max_buffer_size The maximum buffer elements.   min_flush_events The minimum flush elements.   client_name The client name used in the forwarders of the sharing pipe.   forwarders The forwarder plugin list. Please read the doc to find all forwarders plugins.   fallbacker The fallbacker plugin. Please read the doc to find all fallbacker plugins.    Example pipes:- common_config:pipe_name:pipe1gatherer:server_name:\u0026#34;grpc-server\u0026#34;receiver:plugin_name:\u0026#34;grpc-native-log-receiver\u0026#34;queue:plugin_name:\u0026#34;mmap-queue\u0026#34;segment_size:${SATELLITE_MMAP_QUEUE_SIZE:524288}max_in_mem_segments:${SATELLITE_MMAP_QUEUE_MAX_IN_MEM_SEGMENTS:6}queue_dir:\u0026#34;pipe1-log-grpc-receiver-queue\u0026#34;processor:filters:sender:fallbacker:plugin_name:none-fallbackerflush_time:${SATELLITE_PIPE1_SENDER_FLUSH_TIME:1000}max_buffer_size:${SATELLITE_PIPE1_SENDER_MAX_BUFFER_SIZE:200}min_flush_events:${SATELLITE_PIPE1_SENDER_MIN_FLUSH_EVENTS:100}client_name:kafka-clientforwarders:- plugin_name:native-log-kafka-forwardertopic:${SATELLITE_NATIVELOG-TOPIC:log-topic}","excerpt":"Pipe Plugins The pipe plugin configurations contain a series of pipe configuration. Each pipe …","ref":"/docs/skywalking-satellite/next/en/setup/configuration/pipe-plugins/","title":"Pipe Plugins"},{"body":"Pipe Plugins The pipe plugin configurations contain a series of pipe configuration. Each pipe configuration has 5 parts, which are common_config, gatherer, processor and the sender.\ncommon_config    Config Description     pipe_name The unique collect space name.    Gatherer The gatherer has 2 roles, which are the receiver and fetcher.\nReceiver Role    Config Description     server_name The server name in the sharing pipe, which would be used in the receiver plugin.   receiver The receiver configuration. Please read the doc to find all receiver plugins.   queue The queue buffers the input telemetry data. Please read the doc to find all queue plugins.    Fetcher Role    Config Description     fetch_interval The time interval between two fetch operations. The time unit is millisecond.   fetcher The fetcher configuration. Please read the doc to find all fetcher plugins.   queue The queue buffers the input telemetry data. Please read the doc to find all queue plugins.    processor The filter configuration. Please read the doc to find all filter plugins.\nsender    Config Description     flush_time The time interval between two flush operations. And the time unit is millisecond.   max_buffer_size The maximum buffer elements.   min_flush_events The minimum flush elements.   client_name The client name used in the forwarders of the sharing pipe.   forwarders The forwarder plugin list. Please read the doc to find all forwarders plugins.   fallbacker The fallbacker plugin. Please read the doc to find all fallbacker plugins.    Example pipes:- common_config:pipe_name:pipe1gatherer:server_name:\u0026#34;grpc-server\u0026#34;receiver:plugin_name:\u0026#34;grpc-native-log-receiver\u0026#34;queue:plugin_name:\u0026#34;mmap-queue\u0026#34;segment_size:${SATELLITE_MMAP_QUEUE_SIZE:524288}max_in_mem_segments:${SATELLITE_MMAP_QUEUE_MAX_IN_MEM_SEGMENTS:6}queue_dir:\u0026#34;pipe1-log-grpc-receiver-queue\u0026#34;processor:filters:sender:fallbacker:plugin_name:none-fallbackerflush_time:${SATELLITE_PIPE1_SENDER_FLUSH_TIME:1000}max_buffer_size:${SATELLITE_PIPE1_SENDER_MAX_BUFFER_SIZE:200}min_flush_events:${SATELLITE_PIPE1_SENDER_MIN_FLUSH_EVENTS:100}client_name:kafka-clientforwarders:- plugin_name:native-log-kafka-forwardertopic:${SATELLITE_NATIVELOG-TOPIC:log-topic}","excerpt":"Pipe Plugins The pipe plugin configurations contain a series of pipe configuration. Each pipe …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/configuration/pipe-plugins/","title":"Pipe Plugins"},{"body":"Plugin automatic test framework The plugin test framework is designed to verify the function and compatibility of plugins. As there are dozens of plugins and hundreds of versions that need to be verified, it is impossible to do it manually. The test framework uses container-based tech stack and requires a set of real services with the agents installed. Then, the test mock OAP backend runs to check the segments data sent from agents.\nEvery plugin maintained in the main repo requires corresponding test cases as well as matching versions in the supported list doc.\nEnvironment Requirements  MacOS/Linux JDK 8+ Docker Docker Compose  Case Base Image Introduction The test framework provides JVM-container and Tomcat-container base images including JDK8 and JDK17. You can choose the best one for your test case. If both are suitable for your case, JVM-container is preferred.\nJVM-container Image Introduction JVM-container uses eclipse-temurin:8-jdk as the base image. JVM-container supports JDK8 and JDK17 as well in CI, which inherits eclipse-temurin:8-jdk and eclipse-temurin:17-jdk. It is supported to custom the base Java docker image by specify base_image_java. The test case project must be packaged as project-name.zip, including startup.sh and uber jar, by using mvn clean package.\nTake the following test projects as examples:\n sofarpc-scenario is a single project case. webflux-scenario is a case including multiple projects. jdk17-with-gson-scenario is a single project case with JDK17.  Tomcat-container Image Introduction Tomcat-container uses tomcat:8.5-jdk8-openjdk, tomcat:8.5-jdk17-openjdk as the base image. It is supported to custom the base Tomcat docker image by specify base_image_tomcat. The test case project must be packaged as project-name.war by using mvn package.\nTake the following test project as an example\n spring-4.3.x-scenario  Test project hierarchical structure The test case is an independent maven project, and it must be packaged as a war tar ball or zip file, depending on the chosen base image. Also, two external accessible endpoints usually two URLs) are required.\nAll test case codes should be in the org.apache.skywalking.apm.testcase.* package. If there are some codes expected to be instrumented, then the classes could be in the test.org.apache.skywalking.apm.testcase.* package.\nJVM-container test project hierarchical structure\n[plugin-scenario] |- [bin] |- startup.sh |- [config] |- expectedData.yaml |- [src] |- [main] |- ... |- [resource] |- log4j2.xml |- pom.xml |- configuration.yml |- support-version.list [] = directory Tomcat-container test project hierarchical structure\n[plugin-scenario] |- [config] |- expectedData.yaml |- [src] |- [main] |- ... |- [resource] |- log4j2.xml |- [webapp] |- [WEB-INF] |- web.xml |- pom.xml |- configuration.yml |- support-version.list [] = directory Test case configuration files The following files are required in every test case.\n   File Name Descriptions     configuration.yml Declare the basic case information, including case name, entrance endpoints, mode, and dependencies.   expectedData.yaml Describe the expected segmentItems, meterItems or logItems.   support-version.list List the target versions for this case.   startup.sh JVM-container only. This is not required when using Tomcat-container.    * support-version.list format requires every line for a single version (contains only the last version number of each minor version). You may use # to comment out this version.\nconfiguration.yml    Field description     type Image type, options, jvm, or tomcat. Required.   entryService The entrance endpoint (URL) for test case access. Required. (HTTP Method: GET)   healthCheck The health check endpoint (URL) for test case access. Required. (HTTP Method: HEAD)   startScript Path of the start up script. Required in type: jvm only.   runningMode Running mode with the optional plugin, options, default(default), with_optional, or with_bootstrap.   withPlugins Plugin selector rule, e.g.:apm-spring-annotation-plugin-*.jar. Required for runningMode=with_optional or runningMode=with_bootstrap.   environment Same as docker-compose#environment.   depends_on Same as docker-compose#depends_on.   dependencies Same as docker-compose#services, image, links, hostname, command, environment and depends_on are supported.    Note:, docker-compose activates only when dependencies is blank.\nrunningMode option description.\n   Option description     default Activate all plugins in plugin folder like the official distribution agent.   with_optional Activate default and plugins in optional-plugin by the give selector.   with_bootstrap Activate default and plugins in bootstrap-plugin by the give selector.    with_optional/with_bootstrap supports multiple selectors, separated by ;.\nFile Format\ntype: entryService: healthCheck: startScript: runningMode: withPlugins: environment: ... depends_on: ... dependencies: service1: image: hostname: expose: ... environment: ... depends_on: ... links: ... entrypoint: ... healthcheck: ...  dependencies support docker compose healthcheck. But the format is a little different. We need to have - as the start of every config item, and describe it as a string line.  For example, in the official document, the health check is:\nhealthcheck:test:[\u0026#34;CMD\u0026#34;,\u0026#34;curl\u0026#34;,\u0026#34;-f\u0026#34;,\u0026#34;http://localhost\u0026#34;]interval:1m30stimeout:10sretries:3start_period:40sHere you should write:\nhealthcheck:- \u0026#39;test:[\u0026#34;CMD\u0026#34;,\u0026#34;curl\u0026#34;,\u0026#34;-f\u0026#34;,\u0026#34;http://localhost\u0026#34;]\u0026#39;- \u0026#34;interval: 1m30s\u0026#34;- \u0026#34;timeout: 10s\u0026#34;- \u0026#34;retries: 3\u0026#34;- \u0026#34;start_period: 40s\u0026#34;In some cases, the dependency service (usually a third-party server like the SolrJ server) is required to keep the same version as the client lib version, which is defined as ${test.framework.version} in pom. You may use ${CASE_SERVER_IMAGE_VERSION} as the version number, which will be changed in the test for each version.\n It does not support resource related configurations, such as volumes, ports, and ulimits. The reason for this is that in test scenarios, no mapping is required for any port to the host VM, or to mount any folder.\n Take the following test cases as examples:\n dubbo-2.7.x with JVM-container jetty with JVM-container gateway with runningMode canal with docker-compose  expectedData.yaml Operator for number\n   Operator Description     nq Not equal   eq Equal(default)   ge Greater than or equal   gt Greater than    Operator for String\n   Operator Description     not null Not null   not blank Not blank ,it\u0026rsquo;s recommended for String type field as the default value maybe blank string, such as span tags   null Null or empty String   eq Equal(default)   start with Tests if this string starts with the specified prefix. DO NOT use it with meterItem tags value   end with Tests if this string ends with the specified suffix. DO NOT use it with meterItem tags value    Expected Data Format Of The Segment\nsegmentItems:- serviceName:SERVICE_NAME(string)segmentSize:SEGMENT_SIZE(int)segments:- segmentId:SEGMENT_ID(string)spans:...   Field Description     serviceName Service Name.   segmentSize The number of segments is expected.   segmentId Trace ID.   spans Segment span list. In the next section, you will learn how to describe each span.    Expected Data Format Of The Span\nNote: The order of span list should follow the order of the span finish time.\noperationName:OPERATION_NAME(string)parentSpanId:PARENT_SPAN_ID(int)spanId:SPAN_ID(int)startTime:START_TIME(int)endTime:END_TIME(int)isError: IS_ERROR(string:true,false)spanLayer: SPAN_LAYER(string:DB, RPC_FRAMEWORK, HTTP, MQ, CACHE)spanType: SPAN_TYPE(string:Exit, Entry, Local)componentId:COMPONENT_ID(int)tags:- {key: TAG_KEY(string), value:TAG_VALUE(string)}...logs:- {key: LOG_KEY(string), value:LOG_VALUE(string)}...peer:PEER(string)refs:- {traceId:TRACE_ID(string),parentTraceSegmentId:PARENT_TRACE_SEGMENT_ID(string),parentSpanId:PARENT_SPAN_ID(int),parentService:PARENT_SERVICE(string),parentServiceInstance:PARENT_SERVICE_INSTANCE(string),parentEndpoint:PARENT_ENDPOINT_NAME(string),networkAddress:NETWORK_ADDRESS(string),refType: REF_TYPE(string:CrossProcess, CrossThread)}...   Field Description     operationName Span Operation Name.   parentSpanId Parent span ID. Note: The parent span ID of the first span should be -1.   spanId Span ID. Note: Start from 0.   startTime Span start time. It is impossible to get the accurate time, not 0 should be enough.   endTime Span finish time. It is impossible to get the accurate time, not 0 should be enough.   isError Span status, true or false.   componentId Component id for your plugin.   tags Span tag list. Notice, Keep in the same order as the plugin coded.   logs Span log list. Notice, Keep in the same order as the plugin coded.   SpanLayer Options, DB, RPC_FRAMEWORK, HTTP, MQ, CACHE.   SpanType Span type, options, Exit, Entry or Local.   peer Remote network address, IP + port mostly. For exit span, this should be required.    The verify description for SegmentRef\n   Field Description     traceId    parentTraceSegmentId Parent SegmentId, pointing to the segment id in the parent segment.   parentSpanId Parent SpanID, pointing to the span id in the parent segment.   parentService The service of parent/downstream service name.   parentServiceInstance The instance of parent/downstream service instance name.   parentEndpoint The endpoint of parent/downstream service.   networkAddress The peer value of parent exit span.   refType Ref type, options, CrossProcess or CrossThread.    Expected Data Format Of The Meter Items\nmeterItems:- serviceName:SERVICE_NAME(string)meterSize:METER_SIZE(int)meters:- ...   Field Description     serviceName Service Name.   meterSize The number of meters is expected.   meters meter list. Follow the next section to see how to describe every meter.    Expected Data Format Of The Meter\nmeterId:name:NAME(string)tags:- {name: TAG_NAME(string), value:TAG_VALUE(string)}singleValue:SINGLE_VALUE(double)histogramBuckets:- HISTOGRAM_BUCKET(double)...The verify description for MeterId\n   Field Description     name meter name.   tags meter tags.   tags.name tag name.   tags.value tag value.   singleValue counter or gauge value. Using condition operate of the number to validate, such as gt, ge. If current meter is histogram, don\u0026rsquo;t need to write this field.   histogramBuckets histogram bucket. The bucket list must be ordered. The tool assert at least one bucket of the histogram having nonzero count. If current meter is counter or gauge, don\u0026rsquo;t need to write this field.    Expected Data Format Of The Log Items\nlogItems:- serviceName:SERVICE_NAME(string)logSize:LOG_SIZE(int)logs:- ...   Field Description     serviceName Service Name.   logSize The number of logs is expected.   logs log list. Follow the next section to see how to describe every log.    Expected Data Format Of The Log\ntimestamp:TIMESTAMP_VALUE(int)endpoint:ENDPOINT_VALUE(int)traceContext:traceId:TRACE_ID_VALUE(string)traceSegmentId:TRACE_SEGMENT_ID_VALUE(string)spanId:SPAN_ID_VALUE(int)body:type:TYPE_VALUE(string)content:# Choose one of three (text, json or yaml)text:TEXT_VALUE(string)# json: JSON_VALUE(string)# yaml: YAML_VALUE(string)tags:data:- key:TAG_KEY(string)value:TAG_VALUE(string)...layer:LAYER_VALUE(string)...The verify description for Log\n   Field Description     timestamp log timestamp.   endpoint log endpoint.   traceContext.traceId log associated trace id.   traceContext.traceSegmentId log associated trace segment id.   traceContext.spanId log associated span id.   body.type log body type.   body.content log content, the sub field choose one of three (text, json or yaml).   tags.data log tags, key value pairs.   layer log layer.    startup.sh This script provide a start point to JVM based service, most of them starts by a java -jar, with some variables. The following system environment variables are available in the shell.\n   Variable Description     agent_opts Agent plugin opts, check the detail in plugin doc or the same opt added in this PR.   SCENARIO_NAME Service name. Default same as the case folder name   SCENARIO_VERSION Version   SCENARIO_ENTRY_SERVICE Entrance URL to access this service   SCENARIO_HEALTH_CHECK_URL Health check URL     ${agent_opts} is required to add into your java -jar command, which including the parameter injected by test framework, and make agent installed. All other parameters should be added after ${agent_opts}.\n The test framework will set the service name as the test case folder name by default, but in some cases, there are more than one test projects are required to run in different service codes, could set it explicitly like the following example.\nExample\nhome=\u0026#34;$(cd \u0026#34;$(dirname $0)\u0026#34;; pwd)\u0026#34; java -jar ${agent_opts} \u0026#34;-Dskywalking.agent.service_name=jettyserver-scenario\u0026#34; ${home}/../libs/jettyserver-scenario.jar \u0026amp; sleep 1 java -jar ${agent_opts} \u0026#34;-Dskywalking.agent.service_name=jettyclient-scenario\u0026#34; ${home}/../libs/jettyclient-scenario.jar \u0026amp;  Only set this or use other skywalking options when it is really necessary.\n Take the following test cases as examples\n undertow webflux  Best Practices How To Use The Archetype To Create A Test Case Project We provided archetypes and a script to make creating a project easier. It creates a completed project of a test case. So that we only need to focus on cases. First, we can use followed command to get usage about the script.\nbash ${SKYWALKING_HOME}/test/plugin/generator.sh\nThen, runs and generates a project, named by scenario_name, in ./scenarios.\nRecommendations for pom \u0026lt;properties\u0026gt; \u0026lt;!-- Provide and use this property in the pom. --\u0026gt; \u0026lt;!-- This version should match the library version, --\u0026gt; \u0026lt;!-- in this case, http components lib version 4.3. --\u0026gt; \u0026lt;test.framework.version\u0026gt;4.3\u0026lt;/test.framework.version\u0026gt; \u0026lt;/properties\u0026gt; \u0026lt;dependencies\u0026gt; \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.httpcomponents\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;httpclient\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${test.framework.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; ... \u0026lt;/dependencies\u0026gt; \u0026lt;build\u0026gt; \u0026lt;!-- Set the package final name as same as the test case folder case. --\u0026gt; \u0026lt;finalName\u0026gt;httpclient-4.3.x-scenario\u0026lt;/finalName\u0026gt; .... \u0026lt;/build\u0026gt; How To Implement Heartbeat Service Heartbeat service is designed for checking the service available status. This service is a simple HTTP service, returning 200 means the target service is ready. Then the traffic generator will access the entry service and verify the expected data. User should consider to use this service to detect such as whether the dependent services are ready, especially when dependent services are database or cluster.\nNotice, because heartbeat service could be traced fully or partially, so, segmentSize in expectedData.yaml should use ge as the operator, and don\u0026rsquo;t include the segments of heartbeat service in the expected segment data.\nThe example Process of Writing Tracing Expected Data Expected data file, expectedData.yaml, include SegmentItems part.\nWe are using the HttpClient plugin to show how to write the expected data.\nThere are two key points of testing\n Whether is HttpClient span created. Whether the ContextCarrier created correctly, and propagates across processes.  +-------------+ +------------------+ +-------------------------+ | Browser | | Case Servlet | | ContextPropagateServlet | | | | | | | +-----|-------+ +---------|--------+ +------------|------------+ | | | | | | | WebHttp +-+ | +------------------------\u0026gt; |-| HttpClient +-+ | |--------------------------------\u0026gt; |-| | |-| |-| | |-| |-| | |-| \u0026lt;--------------------------------| | |-| +-+ | \u0026lt;--------------------------| | | +-+ | | | | | | | | | | | | | + + + segmentItems By following the flow of HttpClient case, there should be two segments created.\n Segment represents the CaseServlet access. Let\u0026rsquo;s name it as SegmentA. Segment represents the ContextPropagateServlet access. Let\u0026rsquo;s name it as SegmentB.  segmentItems:- serviceName:httpclient-casesegmentSize:ge 2# Could have more than one health check segments, because, the dependency is not standby.Because Tomcat plugin is a default plugin of SkyWalking, so, in SegmentA, there are two spans\n Tomcat entry span HttpClient exit span  SegmentA span list should like following\n- segmentId:not nullspans:- operationName:/httpclient-case/case/context-propagateparentSpanId:0spanId:1startTime:nq 0endTime:nq 0isError:falsespanLayer:HttpspanType:ExitcomponentId:eq 2tags:- {key: url, value:\u0026#39;http://127.0.0.1:8080/httpclient-case/case/context-propagate\u0026#39;}- {key: http.method, value:GET}- {key: http.status_code, value:\u0026#39;200\u0026#39;}logs:[]peer:127.0.0.1:8080- operationName:/httpclient-case/case/httpclientparentSpanId:-1spanId:0startTime:nq 0endTime:nq 0spanLayer:HttpisError:falsespanType:EntrycomponentId:1tags:- {key: url, value:\u0026#39;http://localhost:{SERVER_OUTPUT_PORT}/httpclient-case/case/httpclient\u0026#39;}- {key: http.method, value:GET}- {key: http.status_code, value:\u0026#39;200\u0026#39;}logs:[]peer:nullSegmentB should only have one Tomcat entry span, but includes the Ref pointing to SegmentA.\nSegmentB span list should like following\n- segmentId:not nullspans:-operationName:/httpclient-case/case/context-propagateparentSpanId:-1spanId:0tags:- {key: url, value:\u0026#39;http://127.0.0.1:8080/httpclient-case/case/context-propagate\u0026#39;}- {key: http.method, value:GET}- {key: http.status_code, value:\u0026#39;200\u0026#39;}logs:[]startTime:nq 0endTime:nq 0spanLayer:HttpisError:falsespanType:EntrycomponentId:1peer:nullrefs:- {parentEndpoint: /httpclient-case/case/httpclient, networkAddress: \u0026#39;localhost:8080\u0026#39;, refType: CrossProcess, parentSpanId: 1, parentTraceSegmentId: not null, parentServiceInstance: not null, parentService: not null, traceId:not null}The example Process of Writing Meter Expected Data Expected data file, expectedData.yaml, include MeterItems part.\nWe are using the toolkit plugin to demonstrate how to write the expected data. When write the meter plugin, the expected data file keeps the same.\nThere is one key point of testing\n Build a meter and operate it.  Such as Counter:\nMeterFactory.counter(\u0026#34;test_counter\u0026#34;).tag(\u0026#34;ck1\u0026#34;, \u0026#34;cv1\u0026#34;).build().increment(1d); MeterFactory.histogram(\u0026#34;test_histogram\u0026#34;).tag(\u0026#34;hk1\u0026#34;, \u0026#34;hv1\u0026#34;).steps(1d, 5d, 10d).build().addValue(2d); +-------------+ +------------------+ | Plugin | | Agent core | | | | | +-----|-------+ +---------|--------+ | | | | | Build or operate +-+ +------------------------\u0026gt; |-| | |-] | |-| | |-| | |-| | |-| | \u0026lt;--------------------------| | +-+ | | | | | | | | + + meterItems By following the flow of the toolkit case, there should be two meters created.\n Meter test_counter created from MeterFactory#counter. Let\u0026rsquo;s name it as MeterA. Meter test_histogram created from MeterFactory#histogram. Let\u0026rsquo;s name it as MeterB.  meterItems:- serviceName:toolkit-casemeterSize:2They\u0026rsquo;re showing two kinds of meter, MeterA has a single value, MeterB has a histogram value.\nMeterA should like following, counter and gauge use the same data format.\n- meterId:name:test_countertags:- {name: ck1, value:cv1}singleValue:gt 0MeterB should like following.\n- meterId:name:test_histogramtags:- {name: hk1, value:hv1}histogramBuckets:- 0.0- 1.0- 5.0- 10.0Local Test and Pull Request To The Upstream First of all, the test case project could be compiled successfully, with right project structure and be able to deploy. The developer should test the start script could run in Linux/MacOS, and entryService/health services are able to provide the response.\nYou could run test by using following commands\ncd ${SKYWALKING_HOME} bash ./test/plugin/run.sh -f ${scenario_name} Notice,if codes in ./apm-sniffer have been changed, no matter because your change or git update, please recompile the skywalking-agent. Because the test framework will use the existing skywalking-agent folder, rather than recompiling it every time.\nUse ${SKYWALKING_HOME}/test/plugin/run.sh -h to know more command options.\nIf the local test passed, then you could add it to .github/workflows/plugins-test.\u0026lt;n\u0026gt;.yaml file, which will drive the tests running on the GitHub Actions of official SkyWalking repository. Based on your plugin\u0026rsquo;s name, please add the test case into file .github/workflows/plugins-test.\u0026lt;n\u0026gt;.yaml, by alphabetical orders.\nEvery test case is a GitHub Actions Job. Please use the scenario directory name as the case name, mostly you\u0026rsquo;ll just need to decide which file (plugins-test.\u0026lt;n\u0026gt;.yaml) to add your test case, and simply put one line (as follows) in it, take the existed cases as examples. You can run python3 tools/select-group.py to see which file contains the least cases and add your cases into it, in order to balance the running time of each group.\nIf a test case required to run in JDK 17 environment, please add you test case into file plugins-jdk17-test.\u0026lt;n\u0026gt;.yaml. If a test case required to run in JDK 21 environment, please add you test case into file plugins-jdk21-test.\u0026lt;n\u0026gt;.yaml.\njobs:PluginsTest:name:Pluginruns-on:ubuntu-latesttimeout-minutes:90strategy:fail-fast:truematrix:case:# ...- \u0026lt;your scenario test directory name\u0026gt;# ...","excerpt":"Plugin automatic test framework The plugin test framework is designed to verify the function and …","ref":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/plugin-test/","title":"Plugin automatic test framework"},{"body":"Plugin automatic test framework The plugin test framework is designed to verify the function and compatibility of plugins. As there are dozens of plugins and hundreds of versions that need to be verified, it is impossible to do it manually. The test framework uses container-based tech stack and requires a set of real services with the agents installed. Then, the test mock OAP backend runs to check the segments data sent from agents.\nEvery plugin maintained in the main repo requires corresponding test cases as well as matching versions in the supported list doc.\nEnvironment Requirements  MacOS/Linux JDK 8+ Docker Docker Compose  Case Base Image Introduction The test framework provides JVM-container and Tomcat-container base images including JDK8 and JDK17. You can choose the best one for your test case. If both are suitable for your case, JVM-container is preferred.\nJVM-container Image Introduction JVM-container uses eclipse-temurin:8-jdk as the base image. JVM-container supports JDK8 and JDK17 as well in CI, which inherits eclipse-temurin:8-jdk and eclipse-temurin:17-jdk. It is supported to custom the base Java docker image by specify base_image_java. The test case project must be packaged as project-name.zip, including startup.sh and uber jar, by using mvn clean package.\nTake the following test projects as examples:\n sofarpc-scenario is a single project case. webflux-scenario is a case including multiple projects. jdk17-with-gson-scenario is a single project case with JDK17.  Tomcat-container Image Introduction Tomcat-container uses tomcat:8.5-jdk8-openjdk, tomcat:8.5-jdk17-openjdk as the base image. It is supported to custom the base Tomcat docker image by specify base_image_tomcat. The test case project must be packaged as project-name.war by using mvn package.\nTake the following test project as an example\n spring-4.3.x-scenario  Test project hierarchical structure The test case is an independent maven project, and it must be packaged as a war tar ball or zip file, depending on the chosen base image. Also, two external accessible endpoints usually two URLs) are required.\nAll test case codes should be in the org.apache.skywalking.apm.testcase.* package. If there are some codes expected to be instrumented, then the classes could be in the test.org.apache.skywalking.apm.testcase.* package.\nJVM-container test project hierarchical structure\n[plugin-scenario] |- [bin] |- startup.sh |- [config] |- expectedData.yaml |- [src] |- [main] |- ... |- [resource] |- log4j2.xml |- pom.xml |- configuration.yml |- support-version.list [] = directory Tomcat-container test project hierarchical structure\n[plugin-scenario] |- [config] |- expectedData.yaml |- [src] |- [main] |- ... |- [resource] |- log4j2.xml |- [webapp] |- [WEB-INF] |- web.xml |- pom.xml |- configuration.yml |- support-version.list [] = directory Test case configuration files The following files are required in every test case.\n   File Name Descriptions     configuration.yml Declare the basic case information, including case name, entrance endpoints, mode, and dependencies.   expectedData.yaml Describe the expected segmentItems, meterItems or logItems.   support-version.list List the target versions for this case.   startup.sh JVM-container only. This is not required when using Tomcat-container.    * support-version.list format requires every line for a single version (contains only the last version number of each minor version). You may use # to comment out this version.\nconfiguration.yml    Field description     type Image type, options, jvm, or tomcat. Required.   entryService The entrance endpoint (URL) for test case access. Required. (HTTP Method: GET)   healthCheck The health check endpoint (URL) for test case access. Required. (HTTP Method: HEAD)   startScript Path of the start up script. Required in type: jvm only.   runningMode Running mode with the optional plugin, options, default(default), with_optional, or with_bootstrap.   withPlugins Plugin selector rule, e.g.:apm-spring-annotation-plugin-*.jar. Required for runningMode=with_optional or runningMode=with_bootstrap.   environment Same as docker-compose#environment.   depends_on Same as docker-compose#depends_on.   dependencies Same as docker-compose#services, image, links, hostname, command, environment and depends_on are supported.    Note:, docker-compose activates only when dependencies is blank.\nrunningMode option description.\n   Option description     default Activate all plugins in plugin folder like the official distribution agent.   with_optional Activate default and plugins in optional-plugin by the give selector.   with_bootstrap Activate default and plugins in bootstrap-plugin by the give selector.    with_optional/with_bootstrap supports multiple selectors, separated by ;.\nFile Format\ntype: entryService: healthCheck: startScript: runningMode: withPlugins: environment: ... depends_on: ... dependencies: service1: image: hostname: expose: ... environment: ... depends_on: ... links: ... entrypoint: ... healthcheck: ...  dependencies support docker compose healthcheck. But the format is a little different. We need to have - as the start of every config item, and describe it as a string line.  For example, in the official document, the health check is:\nhealthcheck:test:[\u0026#34;CMD\u0026#34;,\u0026#34;curl\u0026#34;,\u0026#34;-f\u0026#34;,\u0026#34;http://localhost\u0026#34;]interval:1m30stimeout:10sretries:3start_period:40sHere you should write:\nhealthcheck:- \u0026#39;test:[\u0026#34;CMD\u0026#34;,\u0026#34;curl\u0026#34;,\u0026#34;-f\u0026#34;,\u0026#34;http://localhost\u0026#34;]\u0026#39;- \u0026#34;interval: 1m30s\u0026#34;- \u0026#34;timeout: 10s\u0026#34;- \u0026#34;retries: 3\u0026#34;- \u0026#34;start_period: 40s\u0026#34;In some cases, the dependency service (usually a third-party server like the SolrJ server) is required to keep the same version as the client lib version, which is defined as ${test.framework.version} in pom. You may use ${CASE_SERVER_IMAGE_VERSION} as the version number, which will be changed in the test for each version.\n It does not support resource related configurations, such as volumes, ports, and ulimits. The reason for this is that in test scenarios, no mapping is required for any port to the host VM, or to mount any folder.\n Take the following test cases as examples:\n dubbo-2.7.x with JVM-container jetty with JVM-container gateway with runningMode canal with docker-compose  expectedData.yaml Operator for number\n   Operator Description     nq Not equal   eq Equal(default)   ge Greater than or equal   gt Greater than    Operator for String\n   Operator Description     not null Not null   not blank Not blank ,it\u0026rsquo;s recommended for String type field as the default value maybe blank string, such as span tags   null Null or empty String   eq Equal(default)   start with Tests if this string starts with the specified prefix. DO NOT use it with meterItem tags value   end with Tests if this string ends with the specified suffix. DO NOT use it with meterItem tags value    Expected Data Format Of The Segment\nsegmentItems:- serviceName:SERVICE_NAME(string)segmentSize:SEGMENT_SIZE(int)segments:- segmentId:SEGMENT_ID(string)spans:...   Field Description     serviceName Service Name.   segmentSize The number of segments is expected.   segmentId Trace ID.   spans Segment span list. In the next section, you will learn how to describe each span.    Expected Data Format Of The Span\nNote: The order of span list should follow the order of the span finish time.\noperationName:OPERATION_NAME(string)parentSpanId:PARENT_SPAN_ID(int)spanId:SPAN_ID(int)startTime:START_TIME(int)endTime:END_TIME(int)isError: IS_ERROR(string:true,false)spanLayer: SPAN_LAYER(string:DB, RPC_FRAMEWORK, HTTP, MQ, CACHE)spanType: SPAN_TYPE(string:Exit, Entry, Local)componentId:COMPONENT_ID(int)tags:- {key: TAG_KEY(string), value:TAG_VALUE(string)}...logs:- {key: LOG_KEY(string), value:LOG_VALUE(string)}...peer:PEER(string)refs:- {traceId:TRACE_ID(string),parentTraceSegmentId:PARENT_TRACE_SEGMENT_ID(string),parentSpanId:PARENT_SPAN_ID(int),parentService:PARENT_SERVICE(string),parentServiceInstance:PARENT_SERVICE_INSTANCE(string),parentEndpoint:PARENT_ENDPOINT_NAME(string),networkAddress:NETWORK_ADDRESS(string),refType: REF_TYPE(string:CrossProcess, CrossThread)}...   Field Description     operationName Span Operation Name.   parentSpanId Parent span ID. Note: The parent span ID of the first span should be -1.   spanId Span ID. Note: Start from 0.   startTime Span start time. It is impossible to get the accurate time, not 0 should be enough.   endTime Span finish time. It is impossible to get the accurate time, not 0 should be enough.   isError Span status, true or false.   componentId Component id for your plugin.   tags Span tag list. Notice, Keep in the same order as the plugin coded.   logs Span log list. Notice, Keep in the same order as the plugin coded.   SpanLayer Options, DB, RPC_FRAMEWORK, HTTP, MQ, CACHE.   SpanType Span type, options, Exit, Entry or Local.   peer Remote network address, IP + port mostly. For exit span, this should be required.    The verify description for SegmentRef\n   Field Description     traceId    parentTraceSegmentId Parent SegmentId, pointing to the segment id in the parent segment.   parentSpanId Parent SpanID, pointing to the span id in the parent segment.   parentService The service of parent/downstream service name.   parentServiceInstance The instance of parent/downstream service instance name.   parentEndpoint The endpoint of parent/downstream service.   networkAddress The peer value of parent exit span.   refType Ref type, options, CrossProcess or CrossThread.    Expected Data Format Of The Meter Items\nmeterItems:- serviceName:SERVICE_NAME(string)meterSize:METER_SIZE(int)meters:- ...   Field Description     serviceName Service Name.   meterSize The number of meters is expected.   meters meter list. Follow the next section to see how to describe every meter.    Expected Data Format Of The Meter\nmeterId:name:NAME(string)tags:- {name: TAG_NAME(string), value:TAG_VALUE(string)}singleValue:SINGLE_VALUE(double)histogramBuckets:- HISTOGRAM_BUCKET(double)...The verify description for MeterId\n   Field Description     name meter name.   tags meter tags.   tags.name tag name.   tags.value tag value.   singleValue counter or gauge value. Using condition operate of the number to validate, such as gt, ge. If current meter is histogram, don\u0026rsquo;t need to write this field.   histogramBuckets histogram bucket. The bucket list must be ordered. The tool assert at least one bucket of the histogram having nonzero count. If current meter is counter or gauge, don\u0026rsquo;t need to write this field.    Expected Data Format Of The Log Items\nlogItems:- serviceName:SERVICE_NAME(string)logSize:LOG_SIZE(int)logs:- ...   Field Description     serviceName Service Name.   logSize The number of logs is expected.   logs log list. Follow the next section to see how to describe every log.    Expected Data Format Of The Log\ntimestamp:TIMESTAMP_VALUE(int)endpoint:ENDPOINT_VALUE(int)traceContext:traceId:TRACE_ID_VALUE(string)traceSegmentId:TRACE_SEGMENT_ID_VALUE(string)spanId:SPAN_ID_VALUE(int)body:type:TYPE_VALUE(string)content:# Choose one of three (text, json or yaml)text:TEXT_VALUE(string)# json: JSON_VALUE(string)# yaml: YAML_VALUE(string)tags:data:- key:TAG_KEY(string)value:TAG_VALUE(string)...layer:LAYER_VALUE(string)...The verify description for Log\n   Field Description     timestamp log timestamp.   endpoint log endpoint.   traceContext.traceId log associated trace id.   traceContext.traceSegmentId log associated trace segment id.   traceContext.spanId log associated span id.   body.type log body type.   body.content log content, the sub field choose one of three (text, json or yaml).   tags.data log tags, key value pairs.   layer log layer.    startup.sh This script provide a start point to JVM based service, most of them starts by a java -jar, with some variables. The following system environment variables are available in the shell.\n   Variable Description     agent_opts Agent plugin opts, check the detail in plugin doc or the same opt added in this PR.   SCENARIO_NAME Service name. Default same as the case folder name   SCENARIO_VERSION Version   SCENARIO_ENTRY_SERVICE Entrance URL to access this service   SCENARIO_HEALTH_CHECK_URL Health check URL     ${agent_opts} is required to add into your java -jar command, which including the parameter injected by test framework, and make agent installed. All other parameters should be added after ${agent_opts}.\n The test framework will set the service name as the test case folder name by default, but in some cases, there are more than one test projects are required to run in different service codes, could set it explicitly like the following example.\nExample\nhome=\u0026#34;$(cd \u0026#34;$(dirname $0)\u0026#34;; pwd)\u0026#34; java -jar ${agent_opts} \u0026#34;-Dskywalking.agent.service_name=jettyserver-scenario\u0026#34; ${home}/../libs/jettyserver-scenario.jar \u0026amp; sleep 1 java -jar ${agent_opts} \u0026#34;-Dskywalking.agent.service_name=jettyclient-scenario\u0026#34; ${home}/../libs/jettyclient-scenario.jar \u0026amp;  Only set this or use other skywalking options when it is really necessary.\n Take the following test cases as examples\n undertow webflux  Best Practices How To Use The Archetype To Create A Test Case Project We provided archetypes and a script to make creating a project easier. It creates a completed project of a test case. So that we only need to focus on cases. First, we can use followed command to get usage about the script.\nbash ${SKYWALKING_HOME}/test/plugin/generator.sh\nThen, runs and generates a project, named by scenario_name, in ./scenarios.\nRecommendations for pom \u0026lt;properties\u0026gt; \u0026lt;!-- Provide and use this property in the pom. --\u0026gt; \u0026lt;!-- This version should match the library version, --\u0026gt; \u0026lt;!-- in this case, http components lib version 4.3. --\u0026gt; \u0026lt;test.framework.version\u0026gt;4.3\u0026lt;/test.framework.version\u0026gt; \u0026lt;/properties\u0026gt; \u0026lt;dependencies\u0026gt; \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.httpcomponents\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;httpclient\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${test.framework.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; ... \u0026lt;/dependencies\u0026gt; \u0026lt;build\u0026gt; \u0026lt;!-- Set the package final name as same as the test case folder case. --\u0026gt; \u0026lt;finalName\u0026gt;httpclient-4.3.x-scenario\u0026lt;/finalName\u0026gt; .... \u0026lt;/build\u0026gt; How To Implement Heartbeat Service Heartbeat service is designed for checking the service available status. This service is a simple HTTP service, returning 200 means the target service is ready. Then the traffic generator will access the entry service and verify the expected data. User should consider to use this service to detect such as whether the dependent services are ready, especially when dependent services are database or cluster.\nNotice, because heartbeat service could be traced fully or partially, so, segmentSize in expectedData.yaml should use ge as the operator, and don\u0026rsquo;t include the segments of heartbeat service in the expected segment data.\nThe example Process of Writing Tracing Expected Data Expected data file, expectedData.yaml, include SegmentItems part.\nWe are using the HttpClient plugin to show how to write the expected data.\nThere are two key points of testing\n Whether is HttpClient span created. Whether the ContextCarrier created correctly, and propagates across processes.  +-------------+ +------------------+ +-------------------------+ | Browser | | Case Servlet | | ContextPropagateServlet | | | | | | | +-----|-------+ +---------|--------+ +------------|------------+ | | | | | | | WebHttp +-+ | +------------------------\u0026gt; |-| HttpClient +-+ | |--------------------------------\u0026gt; |-| | |-| |-| | |-| |-| | |-| \u0026lt;--------------------------------| | |-| +-+ | \u0026lt;--------------------------| | | +-+ | | | | | | | | | | | | | + + + segmentItems By following the flow of HttpClient case, there should be two segments created.\n Segment represents the CaseServlet access. Let\u0026rsquo;s name it as SegmentA. Segment represents the ContextPropagateServlet access. Let\u0026rsquo;s name it as SegmentB.  segmentItems:- serviceName:httpclient-casesegmentSize:ge 2# Could have more than one health check segments, because, the dependency is not standby.Because Tomcat plugin is a default plugin of SkyWalking, so, in SegmentA, there are two spans\n Tomcat entry span HttpClient exit span  SegmentA span list should like following\n- segmentId:not nullspans:- operationName:/httpclient-case/case/context-propagateparentSpanId:0spanId:1startTime:nq 0endTime:nq 0isError:falsespanLayer:HttpspanType:ExitcomponentId:eq 2tags:- {key: url, value:\u0026#39;http://127.0.0.1:8080/httpclient-case/case/context-propagate\u0026#39;}- {key: http.method, value:GET}- {key: http.status_code, value:\u0026#39;200\u0026#39;}logs:[]peer:127.0.0.1:8080- operationName:/httpclient-case/case/httpclientparentSpanId:-1spanId:0startTime:nq 0endTime:nq 0spanLayer:HttpisError:falsespanType:EntrycomponentId:1tags:- {key: url, value:\u0026#39;http://localhost:{SERVER_OUTPUT_PORT}/httpclient-case/case/httpclient\u0026#39;}- {key: http.method, value:GET}- {key: http.status_code, value:\u0026#39;200\u0026#39;}logs:[]peer:nullSegmentB should only have one Tomcat entry span, but includes the Ref pointing to SegmentA.\nSegmentB span list should like following\n- segmentId:not nullspans:-operationName:/httpclient-case/case/context-propagateparentSpanId:-1spanId:0tags:- {key: url, value:\u0026#39;http://127.0.0.1:8080/httpclient-case/case/context-propagate\u0026#39;}- {key: http.method, value:GET}- {key: http.status_code, value:\u0026#39;200\u0026#39;}logs:[]startTime:nq 0endTime:nq 0spanLayer:HttpisError:falsespanType:EntrycomponentId:1peer:nullrefs:- {parentEndpoint: /httpclient-case/case/httpclient, networkAddress: \u0026#39;localhost:8080\u0026#39;, refType: CrossProcess, parentSpanId: 1, parentTraceSegmentId: not null, parentServiceInstance: not null, parentService: not null, traceId:not null}The example Process of Writing Meter Expected Data Expected data file, expectedData.yaml, include MeterItems part.\nWe are using the toolkit plugin to demonstrate how to write the expected data. When write the meter plugin, the expected data file keeps the same.\nThere is one key point of testing\n Build a meter and operate it.  Such as Counter:\nMeterFactory.counter(\u0026#34;test_counter\u0026#34;).tag(\u0026#34;ck1\u0026#34;, \u0026#34;cv1\u0026#34;).build().increment(1d); MeterFactory.histogram(\u0026#34;test_histogram\u0026#34;).tag(\u0026#34;hk1\u0026#34;, \u0026#34;hv1\u0026#34;).steps(1d, 5d, 10d).build().addValue(2d); +-------------+ +------------------+ | Plugin | | Agent core | | | | | +-----|-------+ +---------|--------+ | | | | | Build or operate +-+ +------------------------\u0026gt; |-| | |-] | |-| | |-| | |-| | |-| | \u0026lt;--------------------------| | +-+ | | | | | | | | + + meterItems By following the flow of the toolkit case, there should be two meters created.\n Meter test_counter created from MeterFactory#counter. Let\u0026rsquo;s name it as MeterA. Meter test_histogram created from MeterFactory#histogram. Let\u0026rsquo;s name it as MeterB.  meterItems:- serviceName:toolkit-casemeterSize:2They\u0026rsquo;re showing two kinds of meter, MeterA has a single value, MeterB has a histogram value.\nMeterA should like following, counter and gauge use the same data format.\n- meterId:name:test_countertags:- {name: ck1, value:cv1}singleValue:gt 0MeterB should like following.\n- meterId:name:test_histogramtags:- {name: hk1, value:hv1}histogramBuckets:- 0.0- 1.0- 5.0- 10.0Local Test and Pull Request To The Upstream First of all, the test case project could be compiled successfully, with right project structure and be able to deploy. The developer should test the start script could run in Linux/MacOS, and entryService/health services are able to provide the response.\nYou could run test by using following commands\ncd ${SKYWALKING_HOME} bash ./test/plugin/run.sh -f ${scenario_name} Notice,if codes in ./apm-sniffer have been changed, no matter because your change or git update, please recompile the skywalking-agent. Because the test framework will use the existing skywalking-agent folder, rather than recompiling it every time.\nUse ${SKYWALKING_HOME}/test/plugin/run.sh -h to know more command options.\nIf the local test passed, then you could add it to .github/workflows/plugins-test.\u0026lt;n\u0026gt;.yaml file, which will drive the tests running on the GitHub Actions of official SkyWalking repository. Based on your plugin\u0026rsquo;s name, please add the test case into file .github/workflows/plugins-test.\u0026lt;n\u0026gt;.yaml, by alphabetical orders.\nEvery test case is a GitHub Actions Job. Please use the scenario directory name as the case name, mostly you\u0026rsquo;ll just need to decide which file (plugins-test.\u0026lt;n\u0026gt;.yaml) to add your test case, and simply put one line (as follows) in it, take the existed cases as examples. You can run python3 tools/select-group.py to see which file contains the least cases and add your cases into it, in order to balance the running time of each group.\nIf a test case required to run in JDK 17 environment, please add you test case into file plugins-jdk17-test.\u0026lt;n\u0026gt;.yaml. If a test case required to run in JDK 21 environment, please add you test case into file plugins-jdk21-test.\u0026lt;n\u0026gt;.yaml.\njobs:PluginsTest:name:Pluginruns-on:ubuntu-latesttimeout-minutes:90strategy:fail-fast:truematrix:case:# ...- \u0026lt;your scenario test directory name\u0026gt;# ...","excerpt":"Plugin automatic test framework The plugin test framework is designed to verify the function and …","ref":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/plugin-test/","title":"Plugin automatic test framework"},{"body":"Plugin automatic test framework The plugin test framework is designed to verify the function and compatibility of plugins. As there are dozens of plugins and hundreds of versions that need to be verified, it is impossible to do it manually. The test framework uses container-based tech stack and requires a set of real services with the agents installed. Then, the test mock OAP backend runs to check the segments data sent from agents.\nEvery plugin maintained in the main repo requires corresponding test cases as well as matching versions in the supported list doc.\nEnvironment Requirements  MacOS/Linux JDK 8+ Docker Docker Compose  Case Base Image Introduction The test framework provides JVM-container and Tomcat-container base images including JDK8 and JDK17. You can choose the best one for your test case. If both are suitable for your case, JVM-container is preferred.\nJVM-container Image Introduction JVM-container uses eclipse-temurin:8-jdk as the base image. JVM-container supports JDK8 and JDK17 as well in CI, which inherits eclipse-temurin:8-jdk and eclipse-temurin:17-jdk. It is supported to custom the base Java docker image by specify base_image_java. The test case project must be packaged as project-name.zip, including startup.sh and uber jar, by using mvn clean package.\nTake the following test projects as examples:\n sofarpc-scenario is a single project case. webflux-scenario is a case including multiple projects. jdk17-with-gson-scenario is a single project case with JDK17.  Tomcat-container Image Introduction Tomcat-container uses tomcat:8.5-jdk8-openjdk, tomcat:8.5-jdk17-openjdk as the base image. It is supported to custom the base Tomcat docker image by specify base_image_tomcat. The test case project must be packaged as project-name.war by using mvn package.\nTake the following test project as an example\n spring-4.3.x-scenario  Test project hierarchical structure The test case is an independent maven project, and it must be packaged as a war tar ball or zip file, depending on the chosen base image. Also, two external accessible endpoints usually two URLs) are required.\nAll test case codes should be in the org.apache.skywalking.apm.testcase.* package. If there are some codes expected to be instrumented, then the classes could be in the test.org.apache.skywalking.apm.testcase.* package.\nJVM-container test project hierarchical structure\n[plugin-scenario] |- [bin] |- startup.sh |- [config] |- expectedData.yaml |- [src] |- [main] |- ... |- [resource] |- log4j2.xml |- pom.xml |- configuration.yml |- support-version.list [] = directory Tomcat-container test project hierarchical structure\n[plugin-scenario] |- [config] |- expectedData.yaml |- [src] |- [main] |- ... |- [resource] |- log4j2.xml |- [webapp] |- [WEB-INF] |- web.xml |- pom.xml |- configuration.yml |- support-version.list [] = directory Test case configuration files The following files are required in every test case.\n   File Name Descriptions     configuration.yml Declare the basic case information, including case name, entrance endpoints, mode, and dependencies.   expectedData.yaml Describe the expected segmentItems, meterItems or logItems.   support-version.list List the target versions for this case.   startup.sh JVM-container only. This is not required when using Tomcat-container.    * support-version.list format requires every line for a single version (contains only the last version number of each minor version). You may use # to comment out this version.\nconfiguration.yml    Field description     type Image type, options, jvm, or tomcat. Required.   entryService The entrance endpoint (URL) for test case access. Required. (HTTP Method: GET)   healthCheck The health check endpoint (URL) for test case access. Required. (HTTP Method: HEAD)   startScript Path of the start up script. Required in type: jvm only.   runningMode Running mode with the optional plugin, options, default(default), with_optional, or with_bootstrap.   withPlugins Plugin selector rule, e.g.:apm-spring-annotation-plugin-*.jar. Required for runningMode=with_optional or runningMode=with_bootstrap.   environment Same as docker-compose#environment.   depends_on Same as docker-compose#depends_on.   dependencies Same as docker-compose#services, image, links, hostname, command, environment and depends_on are supported.    Note:, docker-compose activates only when dependencies is blank.\nrunningMode option description.\n   Option description     default Activate all plugins in plugin folder like the official distribution agent.   with_optional Activate default and plugins in optional-plugin by the give selector.   with_bootstrap Activate default and plugins in bootstrap-plugin by the give selector.    with_optional/with_bootstrap supports multiple selectors, separated by ;.\nFile Format\ntype: entryService: healthCheck: startScript: runningMode: withPlugins: environment: ... depends_on: ... dependencies: service1: image: hostname: expose: ... environment: ... depends_on: ... links: ... entrypoint: ... healthcheck: ...  dependencies support docker compose healthcheck. But the format is a little different. We need to have - as the start of every config item, and describe it as a string line.  For example, in the official document, the health check is:\nhealthcheck:test:[\u0026#34;CMD\u0026#34;,\u0026#34;curl\u0026#34;,\u0026#34;-f\u0026#34;,\u0026#34;http://localhost\u0026#34;]interval:1m30stimeout:10sretries:3start_period:40sHere you should write:\nhealthcheck:- \u0026#39;test:[\u0026#34;CMD\u0026#34;,\u0026#34;curl\u0026#34;,\u0026#34;-f\u0026#34;,\u0026#34;http://localhost\u0026#34;]\u0026#39;- \u0026#34;interval: 1m30s\u0026#34;- \u0026#34;timeout: 10s\u0026#34;- \u0026#34;retries: 3\u0026#34;- \u0026#34;start_period: 40s\u0026#34;In some cases, the dependency service (usually a third-party server like the SolrJ server) is required to keep the same version as the client lib version, which is defined as ${test.framework.version} in pom. You may use ${CASE_SERVER_IMAGE_VERSION} as the version number, which will be changed in the test for each version.\n It does not support resource related configurations, such as volumes, ports, and ulimits. The reason for this is that in test scenarios, no mapping is required for any port to the host VM, or to mount any folder.\n Take the following test cases as examples:\n dubbo-2.7.x with JVM-container jetty with JVM-container gateway with runningMode canal with docker-compose  expectedData.yaml Operator for number\n   Operator Description     nq Not equal   eq Equal(default)   ge Greater than or equal   gt Greater than    Operator for String\n   Operator Description     not null Not null   not blank Not blank ,it\u0026rsquo;s recommended for String type field as the default value maybe blank string, such as span tags   null Null or empty String   eq Equal(default)   start with Tests if this string starts with the specified prefix. DO NOT use it with meterItem tags value   end with Tests if this string ends with the specified suffix. DO NOT use it with meterItem tags value    Expected Data Format Of The Segment\nsegmentItems:- serviceName:SERVICE_NAME(string)segmentSize:SEGMENT_SIZE(int)segments:- segmentId:SEGMENT_ID(string)spans:...   Field Description     serviceName Service Name.   segmentSize The number of segments is expected.   segmentId Trace ID.   spans Segment span list. In the next section, you will learn how to describe each span.    Expected Data Format Of The Span\nNote: The order of span list should follow the order of the span finish time.\noperationName:OPERATION_NAME(string)parentSpanId:PARENT_SPAN_ID(int)spanId:SPAN_ID(int)startTime:START_TIME(int)endTime:END_TIME(int)isError: IS_ERROR(string:true,false)spanLayer: SPAN_LAYER(string:DB, RPC_FRAMEWORK, HTTP, MQ, CACHE)spanType: SPAN_TYPE(string:Exit, Entry, Local)componentId:COMPONENT_ID(int)tags:- {key: TAG_KEY(string), value:TAG_VALUE(string)}...logs:- {key: LOG_KEY(string), value:LOG_VALUE(string)}...peer:PEER(string)refs:- {traceId:TRACE_ID(string),parentTraceSegmentId:PARENT_TRACE_SEGMENT_ID(string),parentSpanId:PARENT_SPAN_ID(int),parentService:PARENT_SERVICE(string),parentServiceInstance:PARENT_SERVICE_INSTANCE(string),parentEndpoint:PARENT_ENDPOINT_NAME(string),networkAddress:NETWORK_ADDRESS(string),refType: REF_TYPE(string:CrossProcess, CrossThread)}...   Field Description     operationName Span Operation Name.   parentSpanId Parent span ID. Note: The parent span ID of the first span should be -1.   spanId Span ID. Note: Start from 0.   startTime Span start time. It is impossible to get the accurate time, not 0 should be enough.   endTime Span finish time. It is impossible to get the accurate time, not 0 should be enough.   isError Span status, true or false.   componentId Component id for your plugin.   tags Span tag list. Notice, Keep in the same order as the plugin coded.   logs Span log list. Notice, Keep in the same order as the plugin coded.   SpanLayer Options, DB, RPC_FRAMEWORK, HTTP, MQ, CACHE.   SpanType Span type, options, Exit, Entry or Local.   peer Remote network address, IP + port mostly. For exit span, this should be required.    The verify description for SegmentRef\n   Field Description     traceId    parentTraceSegmentId Parent SegmentId, pointing to the segment id in the parent segment.   parentSpanId Parent SpanID, pointing to the span id in the parent segment.   parentService The service of parent/downstream service name.   parentServiceInstance The instance of parent/downstream service instance name.   parentEndpoint The endpoint of parent/downstream service.   networkAddress The peer value of parent exit span.   refType Ref type, options, CrossProcess or CrossThread.    Expected Data Format Of The Meter Items\nmeterItems:- serviceName:SERVICE_NAME(string)meterSize:METER_SIZE(int)meters:- ...   Field Description     serviceName Service Name.   meterSize The number of meters is expected.   meters meter list. Follow the next section to see how to describe every meter.    Expected Data Format Of The Meter\nmeterId:name:NAME(string)tags:- {name: TAG_NAME(string), value:TAG_VALUE(string)}singleValue:SINGLE_VALUE(double)histogramBuckets:- HISTOGRAM_BUCKET(double)...The verify description for MeterId\n   Field Description     name meter name.   tags meter tags.   tags.name tag name.   tags.value tag value.   singleValue counter or gauge value. Using condition operate of the number to validate, such as gt, ge. If current meter is histogram, don\u0026rsquo;t need to write this field.   histogramBuckets histogram bucket. The bucket list must be ordered. The tool assert at least one bucket of the histogram having nonzero count. If current meter is counter or gauge, don\u0026rsquo;t need to write this field.    Expected Data Format Of The Log Items\nlogItems:- serviceName:SERVICE_NAME(string)logSize:LOG_SIZE(int)logs:- ...   Field Description     serviceName Service Name.   logSize The number of logs is expected.   logs log list. Follow the next section to see how to describe every log.    Expected Data Format Of The Log\ntimestamp:TIMESTAMP_VALUE(int)endpoint:ENDPOINT_VALUE(int)traceContext:traceId:TRACE_ID_VALUE(string)traceSegmentId:TRACE_SEGMENT_ID_VALUE(string)spanId:SPAN_ID_VALUE(int)body:type:TYPE_VALUE(string)content:# Choose one of three (text, json or yaml)text:TEXT_VALUE(string)# json: JSON_VALUE(string)# yaml: YAML_VALUE(string)tags:data:- key:TAG_KEY(string)value:TAG_VALUE(string)...layer:LAYER_VALUE(string)...The verify description for Log\n   Field Description     timestamp log timestamp.   endpoint log endpoint.   traceContext.traceId log associated trace id.   traceContext.traceSegmentId log associated trace segment id.   traceContext.spanId log associated span id.   body.type log body type.   body.content log content, the sub field choose one of three (text, json or yaml).   tags.data log tags, key value pairs.   layer log layer.    startup.sh This script provide a start point to JVM based service, most of them starts by a java -jar, with some variables. The following system environment variables are available in the shell.\n   Variable Description     agent_opts Agent plugin opts, check the detail in plugin doc or the same opt added in this PR.   SCENARIO_NAME Service name. Default same as the case folder name   SCENARIO_VERSION Version   SCENARIO_ENTRY_SERVICE Entrance URL to access this service   SCENARIO_HEALTH_CHECK_URL Health check URL     ${agent_opts} is required to add into your java -jar command, which including the parameter injected by test framework, and make agent installed. All other parameters should be added after ${agent_opts}.\n The test framework will set the service name as the test case folder name by default, but in some cases, there are more than one test projects are required to run in different service codes, could set it explicitly like the following example.\nExample\nhome=\u0026#34;$(cd \u0026#34;$(dirname $0)\u0026#34;; pwd)\u0026#34; java -jar ${agent_opts} \u0026#34;-Dskywalking.agent.service_name=jettyserver-scenario\u0026#34; ${home}/../libs/jettyserver-scenario.jar \u0026amp; sleep 1 java -jar ${agent_opts} \u0026#34;-Dskywalking.agent.service_name=jettyclient-scenario\u0026#34; ${home}/../libs/jettyclient-scenario.jar \u0026amp;  Only set this or use other skywalking options when it is really necessary.\n Take the following test cases as examples\n undertow webflux  Best Practices How To Use The Archetype To Create A Test Case Project We provided archetypes and a script to make creating a project easier. It creates a completed project of a test case. So that we only need to focus on cases. First, we can use followed command to get usage about the script.\nbash ${SKYWALKING_HOME}/test/plugin/generator.sh\nThen, runs and generates a project, named by scenario_name, in ./scenarios.\nRecommendations for pom \u0026lt;properties\u0026gt; \u0026lt;!-- Provide and use this property in the pom. --\u0026gt; \u0026lt;!-- This version should match the library version, --\u0026gt; \u0026lt;!-- in this case, http components lib version 4.3. --\u0026gt; \u0026lt;test.framework.version\u0026gt;4.3\u0026lt;/test.framework.version\u0026gt; \u0026lt;/properties\u0026gt; \u0026lt;dependencies\u0026gt; \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.httpcomponents\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;httpclient\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${test.framework.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; ... \u0026lt;/dependencies\u0026gt; \u0026lt;build\u0026gt; \u0026lt;!-- Set the package final name as same as the test case folder case. --\u0026gt; \u0026lt;finalName\u0026gt;httpclient-4.3.x-scenario\u0026lt;/finalName\u0026gt; .... \u0026lt;/build\u0026gt; How To Implement Heartbeat Service Heartbeat service is designed for checking the service available status. This service is a simple HTTP service, returning 200 means the target service is ready. Then the traffic generator will access the entry service and verify the expected data. User should consider to use this service to detect such as whether the dependent services are ready, especially when dependent services are database or cluster.\nNotice, because heartbeat service could be traced fully or partially, so, segmentSize in expectedData.yaml should use ge as the operator, and don\u0026rsquo;t include the segments of heartbeat service in the expected segment data.\nThe example Process of Writing Tracing Expected Data Expected data file, expectedData.yaml, include SegmentItems part.\nWe are using the HttpClient plugin to show how to write the expected data.\nThere are two key points of testing\n Whether is HttpClient span created. Whether the ContextCarrier created correctly, and propagates across processes.  +-------------+ +------------------+ +-------------------------+ | Browser | | Case Servlet | | ContextPropagateServlet | | | | | | | +-----|-------+ +---------|--------+ +------------|------------+ | | | | | | | WebHttp +-+ | +------------------------\u0026gt; |-| HttpClient +-+ | |--------------------------------\u0026gt; |-| | |-| |-| | |-| |-| | |-| \u0026lt;--------------------------------| | |-| +-+ | \u0026lt;--------------------------| | | +-+ | | | | | | | | | | | | | + + + segmentItems By following the flow of HttpClient case, there should be two segments created.\n Segment represents the CaseServlet access. Let\u0026rsquo;s name it as SegmentA. Segment represents the ContextPropagateServlet access. Let\u0026rsquo;s name it as SegmentB.  segmentItems:- serviceName:httpclient-casesegmentSize:ge 2# Could have more than one health check segments, because, the dependency is not standby.Because Tomcat plugin is a default plugin of SkyWalking, so, in SegmentA, there are two spans\n Tomcat entry span HttpClient exit span  SegmentA span list should like following\n- segmentId:not nullspans:- operationName:/httpclient-case/case/context-propagateparentSpanId:0spanId:1startTime:nq 0endTime:nq 0isError:falsespanLayer:HttpspanType:ExitcomponentId:eq 2tags:- {key: url, value:\u0026#39;http://127.0.0.1:8080/httpclient-case/case/context-propagate\u0026#39;}- {key: http.method, value:GET}- {key: http.status_code, value:\u0026#39;200\u0026#39;}logs:[]peer:127.0.0.1:8080- operationName:/httpclient-case/case/httpclientparentSpanId:-1spanId:0startTime:nq 0endTime:nq 0spanLayer:HttpisError:falsespanType:EntrycomponentId:1tags:- {key: url, value:\u0026#39;http://localhost:{SERVER_OUTPUT_PORT}/httpclient-case/case/httpclient\u0026#39;}- {key: http.method, value:GET}- {key: http.status_code, value:\u0026#39;200\u0026#39;}logs:[]peer:nullSegmentB should only have one Tomcat entry span, but includes the Ref pointing to SegmentA.\nSegmentB span list should like following\n- segmentId:not nullspans:-operationName:/httpclient-case/case/context-propagateparentSpanId:-1spanId:0tags:- {key: url, value:\u0026#39;http://127.0.0.1:8080/httpclient-case/case/context-propagate\u0026#39;}- {key: http.method, value:GET}- {key: http.status_code, value:\u0026#39;200\u0026#39;}logs:[]startTime:nq 0endTime:nq 0spanLayer:HttpisError:falsespanType:EntrycomponentId:1peer:nullrefs:- {parentEndpoint: /httpclient-case/case/httpclient, networkAddress: \u0026#39;localhost:8080\u0026#39;, refType: CrossProcess, parentSpanId: 1, parentTraceSegmentId: not null, parentServiceInstance: not null, parentService: not null, traceId:not null}The example Process of Writing Meter Expected Data Expected data file, expectedData.yaml, include MeterItems part.\nWe are using the toolkit plugin to demonstrate how to write the expected data. When write the meter plugin, the expected data file keeps the same.\nThere is one key point of testing\n Build a meter and operate it.  Such as Counter:\nMeterFactory.counter(\u0026#34;test_counter\u0026#34;).tag(\u0026#34;ck1\u0026#34;, \u0026#34;cv1\u0026#34;).build().increment(1d); MeterFactory.histogram(\u0026#34;test_histogram\u0026#34;).tag(\u0026#34;hk1\u0026#34;, \u0026#34;hv1\u0026#34;).steps(1d, 5d, 10d).build().addValue(2d); +-------------+ +------------------+ | Plugin | | Agent core | | | | | +-----|-------+ +---------|--------+ | | | | | Build or operate +-+ +------------------------\u0026gt; |-| | |-] | |-| | |-| | |-| | |-| | \u0026lt;--------------------------| | +-+ | | | | | | | | + + meterItems By following the flow of the toolkit case, there should be two meters created.\n Meter test_counter created from MeterFactory#counter. Let\u0026rsquo;s name it as MeterA. Meter test_histogram created from MeterFactory#histogram. Let\u0026rsquo;s name it as MeterB.  meterItems:- serviceName:toolkit-casemeterSize:2They\u0026rsquo;re showing two kinds of meter, MeterA has a single value, MeterB has a histogram value.\nMeterA should like following, counter and gauge use the same data format.\n- meterId:name:test_countertags:- {name: ck1, value:cv1}singleValue:gt 0MeterB should like following.\n- meterId:name:test_histogramtags:- {name: hk1, value:hv1}histogramBuckets:- 0.0- 1.0- 5.0- 10.0Local Test and Pull Request To The Upstream First of all, the test case project could be compiled successfully, with right project structure and be able to deploy. The developer should test the start script could run in Linux/MacOS, and entryService/health services are able to provide the response.\nYou could run test by using following commands\ncd ${SKYWALKING_HOME} bash ./test/plugin/run.sh -f ${scenario_name} Notice,if codes in ./apm-sniffer have been changed, no matter because your change or git update, please recompile the skywalking-agent. Because the test framework will use the existing skywalking-agent folder, rather than recompiling it every time.\nUse ${SKYWALKING_HOME}/test/plugin/run.sh -h to know more command options.\nIf the local test passed, then you could add it to .github/workflows/plugins-test.\u0026lt;n\u0026gt;.yaml file, which will drive the tests running on the GitHub Actions of official SkyWalking repository. Based on your plugin\u0026rsquo;s name, please add the test case into file .github/workflows/plugins-test.\u0026lt;n\u0026gt;.yaml, by alphabetical orders.\nEvery test case is a GitHub Actions Job. Please use the scenario directory name as the case name, mostly you\u0026rsquo;ll just need to decide which file (plugins-test.\u0026lt;n\u0026gt;.yaml) to add your test case, and simply put one line (as follows) in it, take the existed cases as examples. You can run python3 tools/select-group.py to see which file contains the least cases and add your cases into it, in order to balance the running time of each group.\nIf a test case required to run in JDK 17 environment, please add you test case into file plugins-jdk17-test.\u0026lt;n\u0026gt;.yaml.\njobs:PluginsTest:name:Pluginruns-on:ubuntu-latesttimeout-minutes:90strategy:fail-fast:truematrix:case:# ...- \u0026lt;your scenario test directory name\u0026gt;# ...","excerpt":"Plugin automatic test framework The plugin test framework is designed to verify the function and …","ref":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/plugin-test/","title":"Plugin automatic test framework"},{"body":"Plugin automatic test framework The plugin test framework is designed to verify the function and compatibility of plugins. As there are dozens of plugins and hundreds of versions that need to be verified, it is impossible to do it manually. The test framework uses container-based tech stack and requires a set of real services with the agents installed. Then, the test mock OAP backend runs to check the segments data sent from agents.\nEvery plugin maintained in the main repo requires corresponding test cases as well as matching versions in the supported list doc.\nEnvironment Requirements  MacOS/Linux JDK 8+ Docker Docker Compose  Case Base Image Introduction The test framework provides JVM-container and Tomcat-container base images including JDK8 and JDK17. You can choose the best one for your test case. If both are suitable for your case, JVM-container is preferred.\nJVM-container Image Introduction JVM-container uses eclipse-temurin:8-jdk as the base image. JVM-container supports JDK8 and JDK17 as well in CI, which inherits eclipse-temurin:8-jdk and eclipse-temurin:17-jdk. It is supported to custom the base Java docker image by specify base_image_java. The test case project must be packaged as project-name.zip, including startup.sh and uber jar, by using mvn clean package.\nTake the following test projects as examples:\n sofarpc-scenario is a single project case. webflux-scenario is a case including multiple projects. jdk17-with-gson-scenario is a single project case with JDK17.  Tomcat-container Image Introduction Tomcat-container uses tomcat:8.5-jdk8-openjdk, tomcat:8.5-jdk17-openjdk as the base image. It is supported to custom the base Tomcat docker image by specify base_image_tomcat. The test case project must be packaged as project-name.war by using mvn package.\nTake the following test project as an example\n spring-4.3.x-scenario  Test project hierarchical structure The test case is an independent maven project, and it must be packaged as a war tar ball or zip file, depending on the chosen base image. Also, two external accessible endpoints usually two URLs) are required.\nAll test case codes should be in the org.apache.skywalking.apm.testcase.* package. If there are some codes expected to be instrumented, then the classes could be in the test.org.apache.skywalking.apm.testcase.* package.\nJVM-container test project hierarchical structure\n[plugin-scenario] |- [bin] |- startup.sh |- [config] |- expectedData.yaml |- [src] |- [main] |- ... |- [resource] |- log4j2.xml |- pom.xml |- configuration.yml |- support-version.list [] = directory Tomcat-container test project hierarchical structure\n[plugin-scenario] |- [config] |- expectedData.yaml |- [src] |- [main] |- ... |- [resource] |- log4j2.xml |- [webapp] |- [WEB-INF] |- web.xml |- pom.xml |- configuration.yml |- support-version.list [] = directory Test case configuration files The following files are required in every test case.\n   File Name Descriptions     configuration.yml Declare the basic case information, including case name, entrance endpoints, mode, and dependencies.   expectedData.yaml Describe the expected segmentItems, meterItems or logItems.   support-version.list List the target versions for this case.   startup.sh JVM-container only. This is not required when using Tomcat-container.    * support-version.list format requires every line for a single version (contains only the last version number of each minor version). You may use # to comment out this version.\nconfiguration.yml    Field description     type Image type, options, jvm, or tomcat. Required.   entryService The entrance endpoint (URL) for test case access. Required. (HTTP Method: GET)   healthCheck The health check endpoint (URL) for test case access. Required. (HTTP Method: HEAD)   startScript Path of the start up script. Required in type: jvm only.   runningMode Running mode with the optional plugin, options, default(default), with_optional, or with_bootstrap.   withPlugins Plugin selector rule, e.g.:apm-spring-annotation-plugin-*.jar. Required for runningMode=with_optional or runningMode=with_bootstrap.   environment Same as docker-compose#environment.   depends_on Same as docker-compose#depends_on.   dependencies Same as docker-compose#services, image, links, hostname, command, environment and depends_on are supported.    Note:, docker-compose activates only when dependencies is blank.\nrunningMode option description.\n   Option description     default Activate all plugins in plugin folder like the official distribution agent.   with_optional Activate default and plugins in optional-plugin by the give selector.   with_bootstrap Activate default and plugins in bootstrap-plugin by the give selector.    with_optional/with_bootstrap supports multiple selectors, separated by ;.\nFile Format\ntype: entryService: healthCheck: startScript: runningMode: withPlugins: environment: ... depends_on: ... dependencies: service1: image: hostname: expose: ... environment: ... depends_on: ... links: ... entrypoint: ... healthcheck: ...  dependencies support docker compose healthcheck. But the format is a little different. We need to have - as the start of every config item, and describe it as a string line.  For example, in the official document, the health check is:\nhealthcheck:test:[\u0026#34;CMD\u0026#34;,\u0026#34;curl\u0026#34;,\u0026#34;-f\u0026#34;,\u0026#34;http://localhost\u0026#34;]interval:1m30stimeout:10sretries:3start_period:40sHere you should write:\nhealthcheck:- \u0026#39;test:[\u0026#34;CMD\u0026#34;,\u0026#34;curl\u0026#34;,\u0026#34;-f\u0026#34;,\u0026#34;http://localhost\u0026#34;]\u0026#39;- \u0026#34;interval: 1m30s\u0026#34;- \u0026#34;timeout: 10s\u0026#34;- \u0026#34;retries: 3\u0026#34;- \u0026#34;start_period: 40s\u0026#34;In some cases, the dependency service (usually a third-party server like the SolrJ server) is required to keep the same version as the client lib version, which is defined as ${test.framework.version} in pom. You may use ${CASE_SERVER_IMAGE_VERSION} as the version number, which will be changed in the test for each version.\n It does not support resource related configurations, such as volumes, ports, and ulimits. The reason for this is that in test scenarios, no mapping is required for any port to the host VM, or to mount any folder.\n Take the following test cases as examples:\n dubbo-2.7.x with JVM-container jetty with JVM-container gateway with runningMode canal with docker-compose  expectedData.yaml Operator for number\n   Operator Description     nq Not equal   eq Equal(default)   ge Greater than or equal   gt Greater than    Operator for String\n   Operator Description     not null Not null   not blank Not blank ,it\u0026rsquo;s recommended for String type field as the default value maybe blank string, such as span tags   null Null or empty String   eq Equal(default)   start with Tests if this string starts with the specified prefix. DO NOT use it with meterItem tags value   end with Tests if this string ends with the specified suffix. DO NOT use it with meterItem tags value    Expected Data Format Of The Segment\nsegmentItems:- serviceName:SERVICE_NAME(string)segmentSize:SEGMENT_SIZE(int)segments:- segmentId:SEGMENT_ID(string)spans:...   Field Description     serviceName Service Name.   segmentSize The number of segments is expected.   segmentId Trace ID.   spans Segment span list. In the next section, you will learn how to describe each span.    Expected Data Format Of The Span\nNote: The order of span list should follow the order of the span finish time.\noperationName:OPERATION_NAME(string)parentSpanId:PARENT_SPAN_ID(int)spanId:SPAN_ID(int)startTime:START_TIME(int)endTime:END_TIME(int)isError: IS_ERROR(string:true,false)spanLayer: SPAN_LAYER(string:DB, RPC_FRAMEWORK, HTTP, MQ, CACHE)spanType: SPAN_TYPE(string:Exit, Entry, Local)componentId:COMPONENT_ID(int)tags:- {key: TAG_KEY(string), value:TAG_VALUE(string)}...logs:- {key: LOG_KEY(string), value:LOG_VALUE(string)}...peer:PEER(string)refs:- {traceId:TRACE_ID(string),parentTraceSegmentId:PARENT_TRACE_SEGMENT_ID(string),parentSpanId:PARENT_SPAN_ID(int),parentService:PARENT_SERVICE(string),parentServiceInstance:PARENT_SERVICE_INSTANCE(string),parentEndpoint:PARENT_ENDPOINT_NAME(string),networkAddress:NETWORK_ADDRESS(string),refType: REF_TYPE(string:CrossProcess, CrossThread)}...   Field Description     operationName Span Operation Name.   parentSpanId Parent span ID. Note: The parent span ID of the first span should be -1.   spanId Span ID. Note: Start from 0.   startTime Span start time. It is impossible to get the accurate time, not 0 should be enough.   endTime Span finish time. It is impossible to get the accurate time, not 0 should be enough.   isError Span status, true or false.   componentId Component id for your plugin.   tags Span tag list. Notice, Keep in the same order as the plugin coded.   logs Span log list. Notice, Keep in the same order as the plugin coded.   SpanLayer Options, DB, RPC_FRAMEWORK, HTTP, MQ, CACHE.   SpanType Span type, options, Exit, Entry or Local.   peer Remote network address, IP + port mostly. For exit span, this should be required.    The verify description for SegmentRef\n   Field Description     traceId    parentTraceSegmentId Parent SegmentId, pointing to the segment id in the parent segment.   parentSpanId Parent SpanID, pointing to the span id in the parent segment.   parentService The service of parent/downstream service name.   parentServiceInstance The instance of parent/downstream service instance name.   parentEndpoint The endpoint of parent/downstream service.   networkAddress The peer value of parent exit span.   refType Ref type, options, CrossProcess or CrossThread.    Expected Data Format Of The Meter Items\nmeterItems:- serviceName:SERVICE_NAME(string)meterSize:METER_SIZE(int)meters:- ...   Field Description     serviceName Service Name.   meterSize The number of meters is expected.   meters meter list. Follow the next section to see how to describe every meter.    Expected Data Format Of The Meter\nmeterId:name:NAME(string)tags:- {name: TAG_NAME(string), value:TAG_VALUE(string)}singleValue:SINGLE_VALUE(double)histogramBuckets:- HISTOGRAM_BUCKET(double)...The verify description for MeterId\n   Field Description     name meter name.   tags meter tags.   tags.name tag name.   tags.value tag value.   singleValue counter or gauge value. Using condition operate of the number to validate, such as gt, ge. If current meter is histogram, don\u0026rsquo;t need to write this field.   histogramBuckets histogram bucket. The bucket list must be ordered. The tool assert at least one bucket of the histogram having nonzero count. If current meter is counter or gauge, don\u0026rsquo;t need to write this field.    Expected Data Format Of The Log Items\nlogItems:- serviceName:SERVICE_NAME(string)logSize:LOG_SIZE(int)logs:- ...   Field Description     serviceName Service Name.   logSize The number of logs is expected.   logs log list. Follow the next section to see how to describe every log.    Expected Data Format Of The Log\ntimestamp:TIMESTAMP_VALUE(int)endpoint:ENDPOINT_VALUE(int)traceContext:traceId:TRACE_ID_VALUE(string)traceSegmentId:TRACE_SEGMENT_ID_VALUE(string)spanId:SPAN_ID_VALUE(int)body:type:TYPE_VALUE(string)content:# Choose one of three (text, json or yaml)text:TEXT_VALUE(string)# json: JSON_VALUE(string)# yaml: YAML_VALUE(string)tags:data:- key:TAG_KEY(string)value:TAG_VALUE(string)...layer:LAYER_VALUE(string)...The verify description for Log\n   Field Description     timestamp log timestamp.   endpoint log endpoint.   traceContext.traceId log associated trace id.   traceContext.traceSegmentId log associated trace segment id.   traceContext.spanId log associated span id.   body.type log body type.   body.content log content, the sub field choose one of three (text, json or yaml).   tags.data log tags, key value pairs.   layer log layer.    startup.sh This script provide a start point to JVM based service, most of them starts by a java -jar, with some variables. The following system environment variables are available in the shell.\n   Variable Description     agent_opts Agent plugin opts, check the detail in plugin doc or the same opt added in this PR.   SCENARIO_NAME Service name. Default same as the case folder name   SCENARIO_VERSION Version   SCENARIO_ENTRY_SERVICE Entrance URL to access this service   SCENARIO_HEALTH_CHECK_URL Health check URL     ${agent_opts} is required to add into your java -jar command, which including the parameter injected by test framework, and make agent installed. All other parameters should be added after ${agent_opts}.\n The test framework will set the service name as the test case folder name by default, but in some cases, there are more than one test projects are required to run in different service codes, could set it explicitly like the following example.\nExample\nhome=\u0026#34;$(cd \u0026#34;$(dirname $0)\u0026#34;; pwd)\u0026#34; java -jar ${agent_opts} \u0026#34;-Dskywalking.agent.service_name=jettyserver-scenario\u0026#34; ${home}/../libs/jettyserver-scenario.jar \u0026amp; sleep 1 java -jar ${agent_opts} \u0026#34;-Dskywalking.agent.service_name=jettyclient-scenario\u0026#34; ${home}/../libs/jettyclient-scenario.jar \u0026amp;  Only set this or use other skywalking options when it is really necessary.\n Take the following test cases as examples\n undertow webflux  Best Practices How To Use The Archetype To Create A Test Case Project We provided archetypes and a script to make creating a project easier. It creates a completed project of a test case. So that we only need to focus on cases. First, we can use followed command to get usage about the script.\nbash ${SKYWALKING_HOME}/test/plugin/generator.sh\nThen, runs and generates a project, named by scenario_name, in ./scenarios.\nRecommendations for pom \u0026lt;properties\u0026gt; \u0026lt;!-- Provide and use this property in the pom. --\u0026gt; \u0026lt;!-- This version should match the library version, --\u0026gt; \u0026lt;!-- in this case, http components lib version 4.3. --\u0026gt; \u0026lt;test.framework.version\u0026gt;4.3\u0026lt;/test.framework.version\u0026gt; \u0026lt;/properties\u0026gt; \u0026lt;dependencies\u0026gt; \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.httpcomponents\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;httpclient\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${test.framework.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; ... \u0026lt;/dependencies\u0026gt; \u0026lt;build\u0026gt; \u0026lt;!-- Set the package final name as same as the test case folder case. --\u0026gt; \u0026lt;finalName\u0026gt;httpclient-4.3.x-scenario\u0026lt;/finalName\u0026gt; .... \u0026lt;/build\u0026gt; How To Implement Heartbeat Service Heartbeat service is designed for checking the service available status. This service is a simple HTTP service, returning 200 means the target service is ready. Then the traffic generator will access the entry service and verify the expected data. User should consider to use this service to detect such as whether the dependent services are ready, especially when dependent services are database or cluster.\nNotice, because heartbeat service could be traced fully or partially, so, segmentSize in expectedData.yaml should use ge as the operator, and don\u0026rsquo;t include the segments of heartbeat service in the expected segment data.\nThe example Process of Writing Tracing Expected Data Expected data file, expectedData.yaml, include SegmentItems part.\nWe are using the HttpClient plugin to show how to write the expected data.\nThere are two key points of testing\n Whether is HttpClient span created. Whether the ContextCarrier created correctly, and propagates across processes.  +-------------+ +------------------+ +-------------------------+ | Browser | | Case Servlet | | ContextPropagateServlet | | | | | | | +-----|-------+ +---------|--------+ +------------|------------+ | | | | | | | WebHttp +-+ | +------------------------\u0026gt; |-| HttpClient +-+ | |--------------------------------\u0026gt; |-| | |-| |-| | |-| |-| | |-| \u0026lt;--------------------------------| | |-| +-+ | \u0026lt;--------------------------| | | +-+ | | | | | | | | | | | | | + + + segmentItems By following the flow of HttpClient case, there should be two segments created.\n Segment represents the CaseServlet access. Let\u0026rsquo;s name it as SegmentA. Segment represents the ContextPropagateServlet access. Let\u0026rsquo;s name it as SegmentB.  segmentItems:- serviceName:httpclient-casesegmentSize:ge 2# Could have more than one health check segments, because, the dependency is not standby.Because Tomcat plugin is a default plugin of SkyWalking, so, in SegmentA, there are two spans\n Tomcat entry span HttpClient exit span  SegmentA span list should like following\n- segmentId:not nullspans:- operationName:/httpclient-case/case/context-propagateparentSpanId:0spanId:1startTime:nq 0endTime:nq 0isError:falsespanLayer:HttpspanType:ExitcomponentId:eq 2tags:- {key: url, value:\u0026#39;http://127.0.0.1:8080/httpclient-case/case/context-propagate\u0026#39;}- {key: http.method, value:GET}- {key: http.status_code, value:\u0026#39;200\u0026#39;}logs:[]peer:127.0.0.1:8080- operationName:/httpclient-case/case/httpclientparentSpanId:-1spanId:0startTime:nq 0endTime:nq 0spanLayer:HttpisError:falsespanType:EntrycomponentId:1tags:- {key: url, value:\u0026#39;http://localhost:{SERVER_OUTPUT_PORT}/httpclient-case/case/httpclient\u0026#39;}- {key: http.method, value:GET}- {key: http.status_code, value:\u0026#39;200\u0026#39;}logs:[]peer:nullSegmentB should only have one Tomcat entry span, but includes the Ref pointing to SegmentA.\nSegmentB span list should like following\n- segmentId:not nullspans:-operationName:/httpclient-case/case/context-propagateparentSpanId:-1spanId:0tags:- {key: url, value:\u0026#39;http://127.0.0.1:8080/httpclient-case/case/context-propagate\u0026#39;}- {key: http.method, value:GET}- {key: http.status_code, value:\u0026#39;200\u0026#39;}logs:[]startTime:nq 0endTime:nq 0spanLayer:HttpisError:falsespanType:EntrycomponentId:1peer:nullrefs:- {parentEndpoint: /httpclient-case/case/httpclient, networkAddress: \u0026#39;localhost:8080\u0026#39;, refType: CrossProcess, parentSpanId: 1, parentTraceSegmentId: not null, parentServiceInstance: not null, parentService: not null, traceId:not null}The example Process of Writing Meter Expected Data Expected data file, expectedData.yaml, include MeterItems part.\nWe are using the toolkit plugin to demonstrate how to write the expected data. When write the meter plugin, the expected data file keeps the same.\nThere is one key point of testing\n Build a meter and operate it.  Such as Counter:\nMeterFactory.counter(\u0026#34;test_counter\u0026#34;).tag(\u0026#34;ck1\u0026#34;, \u0026#34;cv1\u0026#34;).build().increment(1d); MeterFactory.histogram(\u0026#34;test_histogram\u0026#34;).tag(\u0026#34;hk1\u0026#34;, \u0026#34;hv1\u0026#34;).steps(1d, 5d, 10d).build().addValue(2d); +-------------+ +------------------+ | Plugin | | Agent core | | | | | +-----|-------+ +---------|--------+ | | | | | Build or operate +-+ +------------------------\u0026gt; |-| | |-] | |-| | |-| | |-| | |-| | \u0026lt;--------------------------| | +-+ | | | | | | | | + + meterItems By following the flow of the toolkit case, there should be two meters created.\n Meter test_counter created from MeterFactory#counter. Let\u0026rsquo;s name it as MeterA. Meter test_histogram created from MeterFactory#histogram. Let\u0026rsquo;s name it as MeterB.  meterItems:- serviceName:toolkit-casemeterSize:2They\u0026rsquo;re showing two kinds of meter, MeterA has a single value, MeterB has a histogram value.\nMeterA should like following, counter and gauge use the same data format.\n- meterId:name:test_countertags:- {name: ck1, value:cv1}singleValue:gt 0MeterB should like following.\n- meterId:name:test_histogramtags:- {name: hk1, value:hv1}histogramBuckets:- 0.0- 1.0- 5.0- 10.0Local Test and Pull Request To The Upstream First of all, the test case project could be compiled successfully, with right project structure and be able to deploy. The developer should test the start script could run in Linux/MacOS, and entryService/health services are able to provide the response.\nYou could run test by using following commands\ncd ${SKYWALKING_HOME} bash ./test/plugin/run.sh -f ${scenario_name} Notice,if codes in ./apm-sniffer have been changed, no matter because your change or git update, please recompile the skywalking-agent. Because the test framework will use the existing skywalking-agent folder, rather than recompiling it every time.\nUse ${SKYWALKING_HOME}/test/plugin/run.sh -h to know more command options.\nIf the local test passed, then you could add it to .github/workflows/plugins-test.\u0026lt;n\u0026gt;.yaml file, which will drive the tests running on the GitHub Actions of official SkyWalking repository. Based on your plugin\u0026rsquo;s name, please add the test case into file .github/workflows/plugins-test.\u0026lt;n\u0026gt;.yaml, by alphabetical orders.\nEvery test case is a GitHub Actions Job. Please use the scenario directory name as the case name, mostly you\u0026rsquo;ll just need to decide which file (plugins-test.\u0026lt;n\u0026gt;.yaml) to add your test case, and simply put one line (as follows) in it, take the existed cases as examples. You can run python3 tools/select-group.py to see which file contains the least cases and add your cases into it, in order to balance the running time of each group.\nIf a test case required to run in JDK 17 environment, please add you test case into file plugins-jdk17-test.\u0026lt;n\u0026gt;.yaml. If a test case required to run in JDK 21 environment, please add you test case into file plugins-jdk21-test.\u0026lt;n\u0026gt;.yaml.\njobs:PluginsTest:name:Pluginruns-on:ubuntu-latesttimeout-minutes:90strategy:fail-fast:truematrix:case:# ...- \u0026lt;your scenario test directory name\u0026gt;# ...","excerpt":"Plugin automatic test framework The plugin test framework is designed to verify the function and …","ref":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/plugin-test/","title":"Plugin automatic test framework"},{"body":"Plugin automatic test framework The plugin test framework is designed to verify the function and compatibility of plugins. As there are dozens of plugins and hundreds of versions that need to be verified, it is impossible to do it manually. The test framework uses container-based tech stack and requires a set of real services with the agents installed. Then, the test mock OAP backend runs to check the segments data sent from agents.\nEvery plugin maintained in the main repo requires corresponding test cases as well as matching versions in the supported list doc.\nEnvironment Requirements  MacOS/Linux JDK 8+ Docker Docker Compose  Case Base Image Introduction The test framework provides JVM-container and Tomcat-container base images including JDK8 and JDK17. You can choose the best one for your test case. If both are suitable for your case, JVM-container is preferred.\nJVM-container Image Introduction JVM-container uses eclipse-temurin:8-jdk as the base image. JVM-container supports JDK8 and JDK17 as well in CI, which inherits eclipse-temurin:8-jdk and eclipse-temurin:17-jdk. It is supported to custom the base Java docker image by specify base_image_java. The test case project must be packaged as project-name.zip, including startup.sh and uber jar, by using mvn clean package.\nTake the following test projects as examples:\n sofarpc-scenario is a single project case. webflux-scenario is a case including multiple projects. jdk17-with-gson-scenario is a single project case with JDK17.  Tomcat-container Image Introduction Tomcat-container uses tomcat:8.5-jdk8-openjdk, tomcat:8.5-jdk17-openjdk as the base image. It is supported to custom the base Tomcat docker image by specify base_image_tomcat. The test case project must be packaged as project-name.war by using mvn package.\nTake the following test project as an example\n spring-4.3.x-scenario  Test project hierarchical structure The test case is an independent maven project, and it must be packaged as a war tar ball or zip file, depending on the chosen base image. Also, two external accessible endpoints usually two URLs) are required.\nAll test case codes should be in the org.apache.skywalking.apm.testcase.* package. If there are some codes expected to be instrumented, then the classes could be in the test.org.apache.skywalking.apm.testcase.* package.\nJVM-container test project hierarchical structure\n[plugin-scenario] |- [bin] |- startup.sh |- [config] |- expectedData.yaml |- [src] |- [main] |- ... |- [resource] |- log4j2.xml |- pom.xml |- configuration.yml |- support-version.list [] = directory Tomcat-container test project hierarchical structure\n[plugin-scenario] |- [config] |- expectedData.yaml |- [src] |- [main] |- ... |- [resource] |- log4j2.xml |- [webapp] |- [WEB-INF] |- web.xml |- pom.xml |- configuration.yml |- support-version.list [] = directory Test case configuration files The following files are required in every test case.\n   File Name Descriptions     configuration.yml Declare the basic case information, including case name, entrance endpoints, mode, and dependencies.   expectedData.yaml Describe the expected segmentItems, meterItems or logItems.   support-version.list List the target versions for this case.   startup.sh JVM-container only. This is not required when using Tomcat-container.    * support-version.list format requires every line for a single version (contains only the last version number of each minor version). You may use # to comment out this version.\nconfiguration.yml    Field description     type Image type, options, jvm, or tomcat. Required.   entryService The entrance endpoint (URL) for test case access. Required. (HTTP Method: GET)   healthCheck The health check endpoint (URL) for test case access. Required. (HTTP Method: HEAD)   startScript Path of the start up script. Required in type: jvm only.   runningMode Running mode with the optional plugin, options, default(default), with_optional, or with_bootstrap.   withPlugins Plugin selector rule, e.g.:apm-spring-annotation-plugin-*.jar. Required for runningMode=with_optional or runningMode=with_bootstrap.   environment Same as docker-compose#environment.   depends_on Same as docker-compose#depends_on.   dependencies Same as docker-compose#services, image, links, hostname, command, environment and depends_on are supported.    Note:, docker-compose activates only when dependencies is blank.\nrunningMode option description.\n   Option description     default Activate all plugins in plugin folder like the official distribution agent.   with_optional Activate default and plugins in optional-plugin by the give selector.   with_bootstrap Activate default and plugins in bootstrap-plugin by the give selector.    with_optional/with_bootstrap supports multiple selectors, separated by ;.\nFile Format\ntype: entryService: healthCheck: startScript: runningMode: withPlugins: environment: ... depends_on: ... dependencies: service1: image: hostname: expose: ... environment: ... depends_on: ... links: ... entrypoint: ... healthcheck: ...  dependencies support docker compose healthcheck. But the format is a little different. We need to have - as the start of every config item, and describe it as a string line.  For example, in the official document, the health check is:\nhealthcheck:test:[\u0026#34;CMD\u0026#34;,\u0026#34;curl\u0026#34;,\u0026#34;-f\u0026#34;,\u0026#34;http://localhost\u0026#34;]interval:1m30stimeout:10sretries:3start_period:40sHere you should write:\nhealthcheck:- \u0026#39;test:[\u0026#34;CMD\u0026#34;,\u0026#34;curl\u0026#34;,\u0026#34;-f\u0026#34;,\u0026#34;http://localhost\u0026#34;]\u0026#39;- \u0026#34;interval: 1m30s\u0026#34;- \u0026#34;timeout: 10s\u0026#34;- \u0026#34;retries: 3\u0026#34;- \u0026#34;start_period: 40s\u0026#34;In some cases, the dependency service (usually a third-party server like the SolrJ server) is required to keep the same version as the client lib version, which is defined as ${test.framework.version} in pom. You may use ${CASE_SERVER_IMAGE_VERSION} as the version number, which will be changed in the test for each version.\n It does not support resource related configurations, such as volumes, ports, and ulimits. The reason for this is that in test scenarios, no mapping is required for any port to the host VM, or to mount any folder.\n Take the following test cases as examples:\n dubbo-2.7.x with JVM-container jetty with JVM-container gateway with runningMode canal with docker-compose  expectedData.yaml Operator for number\n   Operator Description     nq Not equal   eq Equal(default)   ge Greater than or equal   gt Greater than    Operator for String\n   Operator Description     not null Not null   not blank Not blank ,it\u0026rsquo;s recommended for String type field as the default value maybe blank string, such as span tags   null Null or empty String   eq Equal(default)   start with Tests if this string starts with the specified prefix. DO NOT use it with meterItem tags value   end with Tests if this string ends with the specified suffix. DO NOT use it with meterItem tags value    Expected Data Format Of The Segment\nsegmentItems:- serviceName:SERVICE_NAME(string)segmentSize:SEGMENT_SIZE(int)segments:- segmentId:SEGMENT_ID(string)spans:...   Field Description     serviceName Service Name.   segmentSize The number of segments is expected.   segmentId Trace ID.   spans Segment span list. In the next section, you will learn how to describe each span.    Expected Data Format Of The Span\nNote: The order of span list should follow the order of the span finish time.\noperationName:OPERATION_NAME(string)parentSpanId:PARENT_SPAN_ID(int)spanId:SPAN_ID(int)startTime:START_TIME(int)endTime:END_TIME(int)isError: IS_ERROR(string:true,false)spanLayer: SPAN_LAYER(string:DB, RPC_FRAMEWORK, HTTP, MQ, CACHE)spanType: SPAN_TYPE(string:Exit, Entry, Local)componentId:COMPONENT_ID(int)tags:- {key: TAG_KEY(string), value:TAG_VALUE(string)}...logs:- {key: LOG_KEY(string), value:LOG_VALUE(string)}...peer:PEER(string)refs:- {traceId:TRACE_ID(string),parentTraceSegmentId:PARENT_TRACE_SEGMENT_ID(string),parentSpanId:PARENT_SPAN_ID(int),parentService:PARENT_SERVICE(string),parentServiceInstance:PARENT_SERVICE_INSTANCE(string),parentEndpoint:PARENT_ENDPOINT_NAME(string),networkAddress:NETWORK_ADDRESS(string),refType: REF_TYPE(string:CrossProcess, CrossThread)}...   Field Description     operationName Span Operation Name.   parentSpanId Parent span ID. Note: The parent span ID of the first span should be -1.   spanId Span ID. Note: Start from 0.   startTime Span start time. It is impossible to get the accurate time, not 0 should be enough.   endTime Span finish time. It is impossible to get the accurate time, not 0 should be enough.   isError Span status, true or false.   componentId Component id for your plugin.   tags Span tag list. Notice, Keep in the same order as the plugin coded.   logs Span log list. Notice, Keep in the same order as the plugin coded.   SpanLayer Options, DB, RPC_FRAMEWORK, HTTP, MQ, CACHE.   SpanType Span type, options, Exit, Entry or Local.   peer Remote network address, IP + port mostly. For exit span, this should be required.    The verify description for SegmentRef\n   Field Description     traceId    parentTraceSegmentId Parent SegmentId, pointing to the segment id in the parent segment.   parentSpanId Parent SpanID, pointing to the span id in the parent segment.   parentService The service of parent/downstream service name.   parentServiceInstance The instance of parent/downstream service instance name.   parentEndpoint The endpoint of parent/downstream service.   networkAddress The peer value of parent exit span.   refType Ref type, options, CrossProcess or CrossThread.    Expected Data Format Of The Meter Items\nmeterItems:- serviceName:SERVICE_NAME(string)meterSize:METER_SIZE(int)meters:- ...   Field Description     serviceName Service Name.   meterSize The number of meters is expected.   meters meter list. Follow the next section to see how to describe every meter.    Expected Data Format Of The Meter\nmeterId:name:NAME(string)tags:- {name: TAG_NAME(string), value:TAG_VALUE(string)}singleValue:SINGLE_VALUE(double)histogramBuckets:- HISTOGRAM_BUCKET(double)...The verify description for MeterId\n   Field Description     name meter name.   tags meter tags.   tags.name tag name.   tags.value tag value.   singleValue counter or gauge value. Using condition operate of the number to validate, such as gt, ge. If current meter is histogram, don\u0026rsquo;t need to write this field.   histogramBuckets histogram bucket. The bucket list must be ordered. The tool assert at least one bucket of the histogram having nonzero count. If current meter is counter or gauge, don\u0026rsquo;t need to write this field.    Expected Data Format Of The Log Items\nlogItems:- serviceName:SERVICE_NAME(string)logSize:LOG_SIZE(int)logs:- ...   Field Description     serviceName Service Name.   logSize The number of logs is expected.   logs log list. Follow the next section to see how to describe every log.    Expected Data Format Of The Log\ntimestamp:TIMESTAMP_VALUE(int)endpoint:ENDPOINT_VALUE(int)traceContext:traceId:TRACE_ID_VALUE(string)traceSegmentId:TRACE_SEGMENT_ID_VALUE(string)spanId:SPAN_ID_VALUE(int)body:type:TYPE_VALUE(string)content:# Choose one of three (text, json or yaml)text:TEXT_VALUE(string)# json: JSON_VALUE(string)# yaml: YAML_VALUE(string)tags:data:- key:TAG_KEY(string)value:TAG_VALUE(string)...layer:LAYER_VALUE(string)...The verify description for Log\n   Field Description     timestamp log timestamp.   endpoint log endpoint.   traceContext.traceId log associated trace id.   traceContext.traceSegmentId log associated trace segment id.   traceContext.spanId log associated span id.   body.type log body type.   body.content log content, the sub field choose one of three (text, json or yaml).   tags.data log tags, key value pairs.   layer log layer.    startup.sh This script provide a start point to JVM based service, most of them starts by a java -jar, with some variables. The following system environment variables are available in the shell.\n   Variable Description     agent_opts Agent plugin opts, check the detail in plugin doc or the same opt added in this PR.   SCENARIO_NAME Service name. Default same as the case folder name   SCENARIO_VERSION Version   SCENARIO_ENTRY_SERVICE Entrance URL to access this service   SCENARIO_HEALTH_CHECK_URL Health check URL     ${agent_opts} is required to add into your java -jar command, which including the parameter injected by test framework, and make agent installed. All other parameters should be added after ${agent_opts}.\n The test framework will set the service name as the test case folder name by default, but in some cases, there are more than one test projects are required to run in different service codes, could set it explicitly like the following example.\nExample\nhome=\u0026#34;$(cd \u0026#34;$(dirname $0)\u0026#34;; pwd)\u0026#34; java -jar ${agent_opts} \u0026#34;-Dskywalking.agent.service_name=jettyserver-scenario\u0026#34; ${home}/../libs/jettyserver-scenario.jar \u0026amp; sleep 1 java -jar ${agent_opts} \u0026#34;-Dskywalking.agent.service_name=jettyclient-scenario\u0026#34; ${home}/../libs/jettyclient-scenario.jar \u0026amp;  Only set this or use other skywalking options when it is really necessary.\n Take the following test cases as examples\n undertow webflux  Best Practices How To Use The Archetype To Create A Test Case Project We provided archetypes and a script to make creating a project easier. It creates a completed project of a test case. So that we only need to focus on cases. First, we can use followed command to get usage about the script.\nbash ${SKYWALKING_HOME}/test/plugin/generator.sh\nThen, runs and generates a project, named by scenario_name, in ./scenarios.\nRecommendations for pom \u0026lt;properties\u0026gt; \u0026lt;!-- Provide and use this property in the pom. --\u0026gt; \u0026lt;!-- This version should match the library version, --\u0026gt; \u0026lt;!-- in this case, http components lib version 4.3. --\u0026gt; \u0026lt;test.framework.version\u0026gt;4.3\u0026lt;/test.framework.version\u0026gt; \u0026lt;/properties\u0026gt; \u0026lt;dependencies\u0026gt; \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.httpcomponents\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;httpclient\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${test.framework.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; ... \u0026lt;/dependencies\u0026gt; \u0026lt;build\u0026gt; \u0026lt;!-- Set the package final name as same as the test case folder case. --\u0026gt; \u0026lt;finalName\u0026gt;httpclient-4.3.x-scenario\u0026lt;/finalName\u0026gt; .... \u0026lt;/build\u0026gt; How To Implement Heartbeat Service Heartbeat service is designed for checking the service available status. This service is a simple HTTP service, returning 200 means the target service is ready. Then the traffic generator will access the entry service and verify the expected data. User should consider to use this service to detect such as whether the dependent services are ready, especially when dependent services are database or cluster.\nNotice, because heartbeat service could be traced fully or partially, so, segmentSize in expectedData.yaml should use ge as the operator, and don\u0026rsquo;t include the segments of heartbeat service in the expected segment data.\nThe example Process of Writing Tracing Expected Data Expected data file, expectedData.yaml, include SegmentItems part.\nWe are using the HttpClient plugin to show how to write the expected data.\nThere are two key points of testing\n Whether is HttpClient span created. Whether the ContextCarrier created correctly, and propagates across processes.  +-------------+ +------------------+ +-------------------------+ | Browser | | Case Servlet | | ContextPropagateServlet | | | | | | | +-----|-------+ +---------|--------+ +------------|------------+ | | | | | | | WebHttp +-+ | +------------------------\u0026gt; |-| HttpClient +-+ | |--------------------------------\u0026gt; |-| | |-| |-| | |-| |-| | |-| \u0026lt;--------------------------------| | |-| +-+ | \u0026lt;--------------------------| | | +-+ | | | | | | | | | | | | | + + + segmentItems By following the flow of HttpClient case, there should be two segments created.\n Segment represents the CaseServlet access. Let\u0026rsquo;s name it as SegmentA. Segment represents the ContextPropagateServlet access. Let\u0026rsquo;s name it as SegmentB.  segmentItems:- serviceName:httpclient-casesegmentSize:ge 2# Could have more than one health check segments, because, the dependency is not standby.Because Tomcat plugin is a default plugin of SkyWalking, so, in SegmentA, there are two spans\n Tomcat entry span HttpClient exit span  SegmentA span list should like following\n- segmentId:not nullspans:- operationName:/httpclient-case/case/context-propagateparentSpanId:0spanId:1startTime:nq 0endTime:nq 0isError:falsespanLayer:HttpspanType:ExitcomponentId:eq 2tags:- {key: url, value:\u0026#39;http://127.0.0.1:8080/httpclient-case/case/context-propagate\u0026#39;}- {key: http.method, value:GET}- {key: http.status_code, value:\u0026#39;200\u0026#39;}logs:[]peer:127.0.0.1:8080- operationName:/httpclient-case/case/httpclientparentSpanId:-1spanId:0startTime:nq 0endTime:nq 0spanLayer:HttpisError:falsespanType:EntrycomponentId:1tags:- {key: url, value:\u0026#39;http://localhost:{SERVER_OUTPUT_PORT}/httpclient-case/case/httpclient\u0026#39;}- {key: http.method, value:GET}- {key: http.status_code, value:\u0026#39;200\u0026#39;}logs:[]peer:nullSegmentB should only have one Tomcat entry span, but includes the Ref pointing to SegmentA.\nSegmentB span list should like following\n- segmentId:not nullspans:-operationName:/httpclient-case/case/context-propagateparentSpanId:-1spanId:0tags:- {key: url, value:\u0026#39;http://127.0.0.1:8080/httpclient-case/case/context-propagate\u0026#39;}- {key: http.method, value:GET}- {key: http.status_code, value:\u0026#39;200\u0026#39;}logs:[]startTime:nq 0endTime:nq 0spanLayer:HttpisError:falsespanType:EntrycomponentId:1peer:nullrefs:- {parentEndpoint: /httpclient-case/case/httpclient, networkAddress: \u0026#39;localhost:8080\u0026#39;, refType: CrossProcess, parentSpanId: 1, parentTraceSegmentId: not null, parentServiceInstance: not null, parentService: not null, traceId:not null}The example Process of Writing Meter Expected Data Expected data file, expectedData.yaml, include MeterItems part.\nWe are using the toolkit plugin to demonstrate how to write the expected data. When write the meter plugin, the expected data file keeps the same.\nThere is one key point of testing\n Build a meter and operate it.  Such as Counter:\nMeterFactory.counter(\u0026#34;test_counter\u0026#34;).tag(\u0026#34;ck1\u0026#34;, \u0026#34;cv1\u0026#34;).build().increment(1d); MeterFactory.histogram(\u0026#34;test_histogram\u0026#34;).tag(\u0026#34;hk1\u0026#34;, \u0026#34;hv1\u0026#34;).steps(1d, 5d, 10d).build().addValue(2d); +-------------+ +------------------+ | Plugin | | Agent core | | | | | +-----|-------+ +---------|--------+ | | | | | Build or operate +-+ +------------------------\u0026gt; |-| | |-] | |-| | |-| | |-| | |-| | \u0026lt;--------------------------| | +-+ | | | | | | | | + + meterItems By following the flow of the toolkit case, there should be two meters created.\n Meter test_counter created from MeterFactory#counter. Let\u0026rsquo;s name it as MeterA. Meter test_histogram created from MeterFactory#histogram. Let\u0026rsquo;s name it as MeterB.  meterItems:- serviceName:toolkit-casemeterSize:2They\u0026rsquo;re showing two kinds of meter, MeterA has a single value, MeterB has a histogram value.\nMeterA should like following, counter and gauge use the same data format.\n- meterId:name:test_countertags:- {name: ck1, value:cv1}singleValue:gt 0MeterB should like following.\n- meterId:name:test_histogramtags:- {name: hk1, value:hv1}histogramBuckets:- 0.0- 1.0- 5.0- 10.0Local Test and Pull Request To The Upstream First of all, the test case project could be compiled successfully, with right project structure and be able to deploy. The developer should test the start script could run in Linux/MacOS, and entryService/health services are able to provide the response.\nYou could run test by using following commands\ncd ${SKYWALKING_HOME} bash ./test/plugin/run.sh -f ${scenario_name} Notice,if codes in ./apm-sniffer have been changed, no matter because your change or git update, please recompile the skywalking-agent. Because the test framework will use the existing skywalking-agent folder, rather than recompiling it every time.\nUse ${SKYWALKING_HOME}/test/plugin/run.sh -h to know more command options.\nIf the local test passed, then you could add it to .github/workflows/plugins-test.\u0026lt;n\u0026gt;.yaml file, which will drive the tests running on the GitHub Actions of official SkyWalking repository. Based on your plugin\u0026rsquo;s name, please add the test case into file .github/workflows/plugins-test.\u0026lt;n\u0026gt;.yaml, by alphabetical orders.\nEvery test case is a GitHub Actions Job. Please use the scenario directory name as the case name, mostly you\u0026rsquo;ll just need to decide which file (plugins-test.\u0026lt;n\u0026gt;.yaml) to add your test case, and simply put one line (as follows) in it, take the existed cases as examples. You can run python3 tools/select-group.py to see which file contains the least cases and add your cases into it, in order to balance the running time of each group.\nIf a test case required to run in JDK 17 environment, please add you test case into file plugins-jdk17-test.\u0026lt;n\u0026gt;.yaml. If a test case required to run in JDK 21 environment, please add you test case into file plugins-jdk21-test.\u0026lt;n\u0026gt;.yaml.\njobs:PluginsTest:name:Pluginruns-on:ubuntu-latesttimeout-minutes:90strategy:fail-fast:truematrix:case:# ...- \u0026lt;your scenario test directory name\u0026gt;# ...","excerpt":"Plugin automatic test framework The plugin test framework is designed to verify the function and …","ref":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/plugin-test/","title":"Plugin automatic test framework"},{"body":"Plugin Configurations    key environment key default value description     http.server_collect_parameters SW_AGENT_PLUGIN_CONFIG_HTTP_SERVER_COLLECT_PARAMETERS false Collect the parameters of the HTTP request on the server side.   mongo.collect_statement SW_AGENT_PLUGIN_CONFIG_MONGO_COLLECT_STATEMENT false Collect the statement of the MongoDB request.   sql.collect_parameter SW_AGENT_PLUGIN_CONFIG_SQL_COLLECT_PARAMETER false Collect the parameter of the SQL request.   redis.max_args_bytes SW_AGENT_PLUGIN_CONFIG_REDIS_MAX_ARGS_BYTES 1024 Limit the bytes size of redis args request.   reporter.discard SW_AGENT_REPORTER_DISCARD false Discard the reporter.    ","excerpt":"Plugin Configurations    key environment key default value description …","ref":"/docs/skywalking-go/latest/en/agent/plugin-configurations/","title":"Plugin Configurations"},{"body":"Plugin Configurations    key environment key default value description     http.server_collect_parameters SW_AGENT_PLUGIN_CONFIG_HTTP_SERVER_COLLECT_PARAMETERS false Collect the parameters of the HTTP request on the server side.   mongo.collect_statement SW_AGENT_PLUGIN_CONFIG_MONGO_COLLECT_STATEMENT false Collect the statement of the MongoDB request.   sql.collect_parameter SW_AGENT_PLUGIN_CONFIG_SQL_COLLECT_PARAMETER false Collect the parameter of the SQL request.   redis.max_args_bytes SW_AGENT_PLUGIN_CONFIG_REDIS_MAX_ARGS_BYTES 1024 Limit the bytes size of redis args request.   reporter.discard SW_AGENT_REPORTER_DISCARD false Discard the reporter.   gin.collect_request_headers SW_AGENT_PLUGIN_CONFIG_GIN_COLLECT_REQUEST_HEADERS  Collect the http header of gin request.   gin.header_length_threshold SW_AGENT_PLUGIN_CONFIG_GIN_HEADER_LENGTH_THRESHOLD 2048 Controlling the length limitation of all header values.    ","excerpt":"Plugin Configurations    key environment key default value description …","ref":"/docs/skywalking-go/next/en/agent/plugin-configurations/","title":"Plugin Configurations"},{"body":"Plugin Configurations    key environment key default value description     http.server_collect_parameters SW_AGENT_PLUGIN_CONFIG_HTTP_SERVER_COLLECT_PARAMETERS false Collect the parameters of the HTTP request on the server side.   mongo.collect_statement SW_AGENT_PLUGIN_CONFIG_MONGO_COLLECT_STATEMENT false Collect the statement of the MongoDB request.   sql.collect_parameter SW_AGENT_PLUGIN_CONFIG_SQL_COLLECT_PARAMETER false Collect the parameter of the SQL request.   redis.max_args_bytes SW_AGENT_PLUGIN_CONFIG_REDIS_MAX_ARGS_BYTES 1024 Limit the bytes size of redis args request.   reporter.discard SW_AGENT_REPORTER_DISCARD false Discard the reporter.    ","excerpt":"Plugin Configurations    key environment key default value description …","ref":"/docs/skywalking-go/v0.4.0/en/agent/plugin-configurations/","title":"Plugin Configurations"},{"body":"Plugin Development Guide This documentation introduces how developers can create a plugin.\nAll plugins must follow these steps:\n Create a new plugin module: Create a new project in the specified directory and import the plugin API module. Define the enhancement object: Define the description for the plugin. Invoke the plugin API: Call the API provided by the core to complete the core invocation. Import the plugin module: Import the plugin into the management module for users to use.  Create a new plugin module The plugin must create a new module, which is currently stored in the project\u0026rsquo;s plugins directory.\nPlugins can import the following two modules:\n Agent core: This module provides all the dependencies needed for the plugin, including the plugin API, enhancement declaration objects, etc. Agent core plugin should be github.com/apache/skywalking-go/plugins/core and replaced by the relative location. Framework to be enhanced: Import the framework you wish to enhance.  Note: Plugins should NOT import and use any other modules, as this may cause compilation issues for users. If certain tools are needed, they should be provided by the agent core.\nDefine the enhancement object In the root directory of the project, create a new go file to define the basic information of the plugin. The basic information includes the following methods, corresponding to the Instrument interface:\n Name: The name of the plugin. Please keep this name consistent with the newly created project name. The reason will be explained later. Base Package: Declare which package this plugin intercepts. For example, if you want to intercept gin, you can write: \u0026ldquo;github.com/gin-gonic/gin\u0026rdquo;. Version Checker: This method passes the version number to the enhancement object to verify whether the specified version of the framework is supported. If not, the enhancement program will not be executed. Points: A plugin can define one or more enhancement points. This will be explained in more detail in the following sections. File System: Use //go:embed * in the current file to import all files in this module, which will be used for file copying during the mixed compilation process.  Note: Please declare //skywalking:nocopy at any position in this file to indicate that the file would not be copied. This file is only used for guidance during hybrid compilation. Also, this file involves the use of the embed package, and if the target framework does not import the package embed, a compilation error may occur.\nManage Instrument and Interceptor codes in hierarchy structure Instrument and interceptor codes are placed in root by default. In complex instrumentation scenarios, there could be dozens of interceptors, we provide PluginSourceCodePath to build a hierarchy folder structure to manage those codes.\nNotice: The instrumentation still works without proper setting of this, but the debug tool would lose the location of the source codes.\nExample For example, the framework needs to enhance two packages, as shown in the following directory structure:\n- plugins - test - go.mod - package1 - instrument.go - interceptor.go - package2 - instrument.go - interceptor.go ... In the above directory structure, the test framework needs to provide multiple different enhancement objects. In this case, a PluginSourceCodePath Source Code Path** method needs to be added for each enhancement object, the values of this method should be package1 and package2.\nInstrument Point Instrument points are used to declare that which methods and structs in the current package should be instrumented. They mainly include the following information:\n Package path: If the interception point that needs to be intercepted is not in the root directory of the current package, you need to fill in the relative path to the package. For example, if this interception point wants to instrument content in the github.com/gin-gonic/gin/render directory, you need to fill in render here. Package Name(optional): Define the package name of the current package. If the package name is not defined, the package name of the current package would be used by default. It\u0026rsquo;s used when the package path and package name are not same, such as the name of github.com/emicklei/go-restful/v3 is restful. Matcher(At): Specify which eligible content in the current package path needs to be enhanced. Interceptor: If the current method is being intercepted (whether it\u0026rsquo;s a static method or an instance method), the name of the interceptor must be specified.  Method Matcher Method matchers are used to intercept both static and non-static methods. The specific definitions are as follows:\n// NewStaticMethodEnhance creates a new EnhanceMatcher for static method. // name: method name needs to be enhanced.(Public and private methods are supported) // filters: filters for method. func NewStaticMethodEnhance(name string, filters ...MethodFilterOption) // NewMethodEnhance creates a new EnhanceMatcher for method. // receiver: receiver type name of method needs to be enhanced. // name: method name needs to be enhanced.(Public and private methods are supported) // filters: filters for method. func NewMethodEnhance(receiver, name string, filters ...MethodFilterOption) Filter Option Filter Options are used to validate the parameters or return values in the method. If the method name matches but the Options validation fails, the enhancement would not be performed.\n// WithArgsCount filter methods with specific count of arguments. func WithArgsCount(argsCount int) // WithResultCount filter methods with specific count of results. func WithResultCount(resultCount int) // WithArgType filter methods with specific type of the index of the argument. func WithArgType(argIndex int, dataType string) // WithResultType filter methods with specific type of the index of the result. func WithResultType(argIndex int, dataType string) Demo For example, if you have the following method that needs to be intercepted:\nfunc (c *Context) HandleMethod(name string) bool you can describe it using this condition:\ninstrument.NewMethodEnhance(\u0026#34;*Context\u0026#34;, \u0026#34;HandleMethod\u0026#34;, instrument.WithArgsCount(1), instrument.WithArgType(0, \u0026#34;string\u0026#34;), instrument.WithResultCount(1), instrument.WithResultType(0, \u0026#34;bool\u0026#34;)) Struct Matcher Enhancement structures can embed enhanced fields within specified structs. After the struct is instantiated, custom data content can be added to the specified struct in the method interceptor.\nStruct matchers are used to intercept struct methods. The specific definitions are as follows:\n// NewStructEnhance creates a new EnhanceMatcher for struct. // name: struct name needs to be enhanced.(Public and private structs are supported) // filters: filters for struct. func NewStructEnhance(name string, filters ...StructFilterOption) Filter Option Filter Options are used to validate the fields in the structure.\n// WithFieldExists filter the struct has the field with specific name. func WithFieldExists(fieldName string) // WithFiledType filter the struct has the field with specific name and type. func WithFiledType(filedName, filedType string) Enhanced Instance After completing the definition of the struct enhancement, you can convert the specified instance into the following interface when intercepting methods, and get or set custom field information. The interface definition is as follows:\ntype EnhancedInstance interface { // GetSkyWalkingDynamicField get the customized data from instance \tGetSkyWalkingDynamicField() interface{} // SetSkyWalkingDynamicField set the customized data into the instance \tSetSkyWalkingDynamicField(interface{}) } Demo For example, if you have the following struct that needs to be enhanced:\ntype Test struct { value *Context } you can describe it using this condition:\ninstrument.NewStructEnhance(\u0026#34;Test\u0026#34;, instrument.WithFieldExists(\u0026#34;value\u0026#34;), instrument.WithFiledType(\u0026#34;value\u0026#34;, \u0026#34;*Context\u0026#34;)) Next, you can set custom content for the specified enhanced instance when intercepting methods.\nins := testInstance.(instrument.EnhancedInstance) // setting custom content ins.SetSkyWalkingDynamicField(\u0026#34;custom content\u0026#34;) // getting custom content res := ins.GetSkyWalkingDynamicField() Interceptor Interceptors are used to define custom business logic before and after method execution, allowing you to access data from before and after method execution and interact with the Agent Core by using the Agent API.\nThe interceptor definition is as follows, you need to create a new structure and implement it:\ntype Interceptor interface { // BeforeInvoke would be called before the target method invocation.  BeforeInvoke(invocation Invocation) error // AfterInvoke would be called after the target method invocation.  AfterInvoke(invocation Invocation, result ...interface{}) error } Within the interface, you can see the Invocation interface, which defines the context of an interception. The specific definition is as follows:\ntype Invocation interface { // CallerInstance is the instance of the caller, nil if the method is static method.  CallerInstance() interface{} // Args is get the arguments of the method, please cast to the specific type to get more information.  Args() []interface{} // ChangeArg is change the argument value of the method  ChangeArg(int, interface{}) // IsContinue is the flag to control the method invocation, if it is true, the target method would not be invoked.  IsContinue() bool // DefineReturnValues are defined the return value of the method, and continue the method invoked  DefineReturnValues(...interface{}) // SetContext is the customized context of the method invocation, it should be propagated the tracing span.  SetContext(interface{}) // GetContext is get the customized context of the method invocation  GetContext() interface{} } Thread safe The Interceptor instance would define new instance at the current package level, rather than creating a new instance each time a method is intercepted.\nTherefore, do not declare objects in the interceptor, and instead use Invocation.Context to pass data.\nPackage Path If the method you want to intercept is not located in the root directory of the framework, place your interceptor code in the relative location within the plugin. The Agent would only copy files from the same package directory.\nFor example, if you want to intercept a method in github.com/gin-gonic/gin/render, create a render directory in the root of your plugin, and put the interceptor inside it. This ensures that the interceptor is properly included during the copy operation and can be correctly applied to the target package.\nPlugin Configuration Plugin configuration is used to add custom configuration parameters to a specified plugin. When users specify configuration items, the plugin can dynamically adapt the content needed in the plugin according to the user\u0026rsquo;s configuration items.\nDeclaration Please declare the configuration file you need in the package you want to use. Declare it using var, and add the //skywalking:config directive to specify that this variable requires dynamic updating.\nBy default, the configuration item belongs to the configuration of the current plugin. For example, if the name of my current plugin is gin, then this configuration item is under the gin plugin. Of course, you can also change it to the http plugin to reference the configuration information of the relevant plugin, in which case you need to specify it as //skywalking:config http.\nItem Each configuration item needs to add a config tag. This is used to specify the name of the current configuration content. By default, it would lowercase all letters and add an _ identifier before each uppercase letter.\nCurrently, it supports basic data types and struct types, and it also supports obtaining data values through environment variables.\nDemo For example, I have declared the following configuration item:\n//skywalking:config http var config struct { ServerCollectParameters bool `config:\u0026#34;server_collect_parameters\u0026#34;` Client struct{ CollectParameters bool `config:\u0026#34;collect_parameters\u0026#34;` } `config:\u0026#34;client\u0026#34;` } In the above example, I created a plugin configuration for http, which includes two configuration items.\n config.ServerCollectParameters: Its configuration is located at http.server_collect_parameters. config.Client.CollectParameter: Its configuration is located at http.client.collect_parameter.  When the plugin needs to be used, it can be accessed directly by reading the config configuration.\nAgent API The Agent API is used when a method is intercepted and interacts with the Agent Core.\nTracing API The Tracing API is used for building distributed tracing, and currently supports the following methods:\n// CreateEntrySpan creates a new entry span. // operationName is the name of the span. // extractor is the extractor to extract the context from the carrier. // opts is the options to create the span. func CreateEntrySpan(operationName string, extractor Extractor, opts ...SpanOption) // CreateLocalSpan creates a new local span. // operationName is the name of the span. // opts is the options to create the span. func CreateLocalSpan(operationName string, opts ...SpanOption) // CreateExitSpan creates a new exit span. // operationName is the name of the span. // peer is the peer address of the span. // injector is the injector to inject the context into the carrier. // opts is the options to create the span. func CreateExitSpan(operationName, peer string, injector Injector, opts ...SpanOption) // ActiveSpan returns the current active span, it can be got the current span in the current goroutine. // If the current goroutine is not in the context of the span, it will return nil. // If get the span from other goroutine, it can only get information but cannot be operated. func ActiveSpan() // GetRuntimeContextValue returns the value of the key in the runtime context, which is current goroutine. // The value can also read from the goroutine which is created by the current goroutine func GetRuntimeContextValue(key string) // SetRuntimeContextValue sets the value of the key in the runtime context. func SetRuntimeContextValue(key string, val interface{}) Context Carrier The context carrier is used to pass the context between the difference application.\nWhen creating an Entry Span, you need to obtain the context carrier from the request. When creating an Exit Span, you need to write the context carrier into the target RPC request.\n// Extractor is a tool specification which define how to // extract trace parent context from propagation context type Extractor func(headerKey string) (string, error) // Injector is a tool specification which define how to // inject trace context into propagation context type Injector func(headerKey, headerValue string) error The following demo demonstrates how to pass the Context Carrier in the Tracing API:\n// create a new entry span and extract the context carrier from the request tracing.CreateEntrySpan(fmt.Sprintf(\u0026#34;%s:%s\u0026#34;, request.Method, request.URL.Path), func(headerKey string) (string, error) { return request.Header.Get(headerKey), nil }) // create a new exit span and inject the context carrier into the request tracing.CreateExitSpan(fmt.Sprintf(\u0026#34;%s:%s\u0026#34;, request.Method, request.URL.Path), request.Host, func(headerKey, headerValue string) error { request.Header.Add(headerKey, headerValue) return nil } Span Option Span Options can be passed when creating a Span to configure the information in the Span.\nThe following options are currently supported:\n// WithLayer set the SpanLayer of the Span func WithLayer(layer SpanLayer) // WithComponent set the component id of the Span func WithComponent(componentID int32) // WithTag set the Tag of the Span func WithTag(key Tag, value string) Span Component The Component ID in Span is used to identify the current component, with its data defined in SkyWalking OAP. If the framework you are writing does not exist in this file, please submit a PR in the SkyWalking project to add the definition of this plugin.\nSpan Operation After creating a Span, you can perform additional operations on it.\n// Span for plugin API type Span interface { // AsyncSpan for the async API \tAsyncSpan // Tag set the Tag of the Span \tTag(Tag, string) // SetSpanLayer set the SpanLayer of the Span \tSetSpanLayer(SpanLayer) // SetOperationName re-set the operation name of the Span \tSetOperationName(string) // SetPeer re-set the peer address of the Span \tSetPeer(string) // Log add log to the Span \tLog(...string) // Error add error log to the Span \tError(...string) // End end the Span \tEnd() } Async Span There is a set of advanced APIs in Span which is specifically designed for async use cases. When setting name, tags, logs, and other operations (including end span) of the span in another goroutine, you should use these APIs.\ntype AsyncSpan interface { // PrepareAsync the span finished at current tracing context, but current span is still alive until AsyncFinish called  PrepareAsync() // AsyncFinish to finished current async span  AsyncFinish() } Following the previous API define, you should following these steps to use the async API:\n Call span.PrepareAsync() to prepare the span to do any operation in another goroutine. Use Span.End() in the original goroutine when your job in the current goroutine is complete. Propagate the span to any other goroutine in your plugin. Once the above steps are all set, call span.AsyncFinish() in any goroutine. When the span.AsyncFinish() is complete for all spans, the all spans would be finished and report to the backend.  Tracing Context Operation In the Go Agent, Trace Context would continue cross goroutines automatically by default. However, in some cases, goroutine would be context sharing due to be scheduled by the pool mechanism. Consider these advanced APIs to manipulate context and switch the current context.\n// CaptureContext capture current tracing context in the current goroutine. func CaptureContext() ContextSnapshot // ContinueContext continue the tracing context in the current goroutine. func ContinueContext(ctx ContextSnapshot) // CleanContext clean the tracing context in the current goroutine. func CleanContext() Typically, use APIs as following to control or switch the context:\n Use tracing.CaptureContext() to get the ContextSnapshot object. Propagate the snapshot context to any other goroutine in your plugin. Use tracing.ContinueContext(snapshot) to continue the snapshot context in the target goroutine.  Meter API The Meter API is used to record the metrics of the target program, and currently supports the following methods:\n// NewCounter creates a new counter metrics. // name is the name of the metrics // opts is the options for the metrics func NewCounter(name string, opts ...Opt) Counter // NewGauge creates a new gauge metrics. // name is the name of the metrics // getter is the function to get the value of the gauge meter // opts is the options for the metrics func NewGauge(name string, getter func() float64, opts ...Opt) Gauge // NewHistogram creates a new histogram metrics. // name is the name of the metrics // steps is the buckets of the histogram // opts is the options for the metrics func NewHistogram(name string, steps []float64, opts ...Opt) Histogram // NewHistogramWithMinValue creates a new histogram metrics. // name is the name of the metrics // minVal is the min value of the histogram bucket // steps is the buckets of the histogram // opts is the options for the metrics func NewHistogramWithMinValue(name string, minVal float64, steps []float64, opts ...Opt) Histogram // RegisterBeforeCollectHook registers a hook function which will be called before metrics collect. func RegisterBeforeCollectHook(f func()) Meter Option The Meter Options can be passed when creating a Meter to configure the information in the Meter.\n// WithLabel adds a label to the metrics. func WithLabel(key, value string) Opt Meter Type Counter Counter is a cumulative metric that represents a single monotonically increasing counter whose value can only increase.\ntype Counter interface { // Get returns the current value of the counter. \tGet() float64 // Inc increments the counter with value. \tInc(val float64) } Gauge Gauge is a metric that represents a single numerical value that can arbitrarily go up and down.\ntype Gauge interface { // Get returns the current value of the gauge.  Get() float64 } Histogram Histogram is a metric that represents the distribution of a set of values.\ntype Histogram interface { // Observe find the value associate bucket and add 1. \tObserve(val float64) // ObserveWithCount find the value associate bucket and add specific count. \tObserveWithCount(val float64, count int64) } Import Plugin Once you have finished developing the plugin, you need to import the completed module into the Agent program and define it in the corresponding file.\nAt this point, your plugin development process is complete. When the Agent performs hybrid compilation on the target program, your plugin will be executed as expected.\n","excerpt":"Plugin Development Guide This documentation introduces how developers can create a plugin.\nAll …","ref":"/docs/skywalking-go/latest/en/development-and-contribution/development-guide/","title":"Plugin Development Guide"},{"body":"Plugin Development Guide This documentation introduces how developers can create a plugin.\nAll plugins must follow these steps:\n Create a new plugin module: Create a new project in the specified directory and import the plugin API module. Define the enhancement object: Define the description for the plugin. Invoke the plugin API: Call the API provided by the core to complete the core invocation. Import the plugin module: Import the plugin into the management module for users to use.  Create a new plugin module The plugin must create a new module, which is currently stored in the project\u0026rsquo;s plugins directory.\nPlugins can import the following two modules:\n Agent core: This module provides all the dependencies needed for the plugin, including the plugin API, enhancement declaration objects, etc. Agent core plugin should be github.com/apache/skywalking-go/plugins/core and replaced by the relative location. Framework to be enhanced: Import the framework you wish to enhance.  Note: Plugins should NOT import and use any other modules, as this may cause compilation issues for users. If certain tools are needed, they should be provided by the agent core.\nDefine the enhancement object In the root directory of the project, create a new go file to define the basic information of the plugin. The basic information includes the following methods, corresponding to the Instrument interface:\n Name: The name of the plugin. Please keep this name consistent with the newly created project name. The reason will be explained later. Base Package: Declare which package this plugin intercepts. For example, if you want to intercept gin, you can write: \u0026ldquo;github.com/gin-gonic/gin\u0026rdquo;. Version Checker: This method passes the version number to the enhancement object to verify whether the specified version of the framework is supported. If not, the enhancement program will not be executed. Points: A plugin can define one or more enhancement points. This will be explained in more detail in the following sections. File System: Use //go:embed * in the current file to import all files in this module, which will be used for file copying during the mixed compilation process.  Note: Please declare //skywalking:nocopy at any position in this file to indicate that the file would not be copied. This file is only used for guidance during hybrid compilation. Also, this file involves the use of the embed package, and if the target framework does not import the package embed, a compilation error may occur.\nManage Instrument and Interceptor codes in hierarchy structure Instrument and interceptor codes are placed in root by default. In complex instrumentation scenarios, there could be dozens of interceptors, we provide PluginSourceCodePath to build a hierarchy folder structure to manage those codes.\nNotice: The instrumentation still works without proper setting of this, but the debug tool would lose the location of the source codes.\nExample For example, the framework needs to enhance two packages, as shown in the following directory structure:\n- plugins - test - go.mod - package1 - instrument.go - interceptor.go - package2 - instrument.go - interceptor.go ... In the above directory structure, the test framework needs to provide multiple different enhancement objects. In this case, a PluginSourceCodePath Source Code Path** method needs to be added for each enhancement object, the values of this method should be package1 and package2.\nInstrument Point Instrument points are used to declare that which methods and structs in the current package should be instrumented. They mainly include the following information:\n Package path: If the interception point that needs to be intercepted is not in the root directory of the current package, you need to fill in the relative path to the package. For example, if this interception point wants to instrument content in the github.com/gin-gonic/gin/render directory, you need to fill in render here. Package Name(optional): Define the package name of the current package. If the package name is not defined, the package name of the current package would be used by default. It\u0026rsquo;s used when the package path and package name are not same, such as the name of github.com/emicklei/go-restful/v3 is restful. Matcher(At): Specify which eligible content in the current package path needs to be enhanced. Interceptor: If the current method is being intercepted (whether it\u0026rsquo;s a static method or an instance method), the name of the interceptor must be specified.  Method Matcher Method matchers are used to intercept both static and non-static methods. The specific definitions are as follows:\n// NewStaticMethodEnhance creates a new EnhanceMatcher for static method. // name: method name needs to be enhanced.(Public and private methods are supported) // filters: filters for method. func NewStaticMethodEnhance(name string, filters ...MethodFilterOption) // NewMethodEnhance creates a new EnhanceMatcher for method. // receiver: receiver type name of method needs to be enhanced. // name: method name needs to be enhanced.(Public and private methods are supported) // filters: filters for method. func NewMethodEnhance(receiver, name string, filters ...MethodFilterOption) Filter Option Filter Options are used to validate the parameters or return values in the method. If the method name matches but the Options validation fails, the enhancement would not be performed.\n// WithArgsCount filter methods with specific count of arguments. func WithArgsCount(argsCount int) // WithResultCount filter methods with specific count of results. func WithResultCount(resultCount int) // WithArgType filter methods with specific type of the index of the argument. func WithArgType(argIndex int, dataType string) // WithResultType filter methods with specific type of the index of the result. func WithResultType(argIndex int, dataType string) Demo For example, if you have the following method that needs to be intercepted:\nfunc (c *Context) HandleMethod(name string) bool you can describe it using this condition:\ninstrument.NewMethodEnhance(\u0026#34;*Context\u0026#34;, \u0026#34;HandleMethod\u0026#34;, instrument.WithArgsCount(1), instrument.WithArgType(0, \u0026#34;string\u0026#34;), instrument.WithResultCount(1), instrument.WithResultType(0, \u0026#34;bool\u0026#34;)) Struct Matcher Enhancement structures can embed enhanced fields within specified structs. After the struct is instantiated, custom data content can be added to the specified struct in the method interceptor.\nStruct matchers are used to intercept struct methods. The specific definitions are as follows:\n// NewStructEnhance creates a new EnhanceMatcher for struct. // name: struct name needs to be enhanced.(Public and private structs are supported) // filters: filters for struct. func NewStructEnhance(name string, filters ...StructFilterOption) Filter Option Filter Options are used to validate the fields in the structure.\n// WithFieldExists filter the struct has the field with specific name. func WithFieldExists(fieldName string) // WithFiledType filter the struct has the field with specific name and type. func WithFiledType(filedName, filedType string) Enhanced Instance After completing the definition of the struct enhancement, you can convert the specified instance into the following interface when intercepting methods, and get or set custom field information. The interface definition is as follows:\ntype EnhancedInstance interface { // GetSkyWalkingDynamicField get the customized data from instance \tGetSkyWalkingDynamicField() interface{} // SetSkyWalkingDynamicField set the customized data into the instance \tSetSkyWalkingDynamicField(interface{}) } Demo For example, if you have the following struct that needs to be enhanced:\ntype Test struct { value *Context } you can describe it using this condition:\ninstrument.NewStructEnhance(\u0026#34;Test\u0026#34;, instrument.WithFieldExists(\u0026#34;value\u0026#34;), instrument.WithFiledType(\u0026#34;value\u0026#34;, \u0026#34;*Context\u0026#34;)) Next, you can set custom content for the specified enhanced instance when intercepting methods.\nins := testInstance.(instrument.EnhancedInstance) // setting custom content ins.SetSkyWalkingDynamicField(\u0026#34;custom content\u0026#34;) // getting custom content res := ins.GetSkyWalkingDynamicField() Interceptor Interceptors are used to define custom business logic before and after method execution, allowing you to access data from before and after method execution and interact with the Agent Core by using the Agent API.\nThe interceptor definition is as follows, you need to create a new structure and implement it:\ntype Interceptor interface { // BeforeInvoke would be called before the target method invocation.  BeforeInvoke(invocation Invocation) error // AfterInvoke would be called after the target method invocation.  AfterInvoke(invocation Invocation, result ...interface{}) error } Within the interface, you can see the Invocation interface, which defines the context of an interception. The specific definition is as follows:\ntype Invocation interface { // CallerInstance is the instance of the caller, nil if the method is static method.  CallerInstance() interface{} // Args is get the arguments of the method, please cast to the specific type to get more information.  Args() []interface{} // ChangeArg is change the argument value of the method  ChangeArg(int, interface{}) // IsContinue is the flag to control the method invocation, if it is true, the target method would not be invoked.  IsContinue() bool // DefineReturnValues are defined the return value of the method, and continue the method invoked  DefineReturnValues(...interface{}) // SetContext is the customized context of the method invocation, it should be propagated the tracing span.  SetContext(interface{}) // GetContext is get the customized context of the method invocation  GetContext() interface{} } Thread safe The Interceptor instance would define new instance at the current package level, rather than creating a new instance each time a method is intercepted.\nTherefore, do not declare objects in the interceptor, and instead use Invocation.Context to pass data.\nPackage Path If the method you want to intercept is not located in the root directory of the framework, place your interceptor code in the relative location within the plugin. The Agent would only copy files from the same package directory.\nFor example, if you want to intercept a method in github.com/gin-gonic/gin/render, create a render directory in the root of your plugin, and put the interceptor inside it. This ensures that the interceptor is properly included during the copy operation and can be correctly applied to the target package.\nPlugin Configuration Plugin configuration is used to add custom configuration parameters to a specified plugin. When users specify configuration items, the plugin can dynamically adapt the content needed in the plugin according to the user\u0026rsquo;s configuration items.\nDeclaration Please declare the configuration file you need in the package you want to use. Declare it using var, and add the //skywalking:config directive to specify that this variable requires dynamic updating.\nBy default, the configuration item belongs to the configuration of the current plugin. For example, if the name of my current plugin is gin, then this configuration item is under the gin plugin. Of course, you can also change it to the http plugin to reference the configuration information of the relevant plugin, in which case you need to specify it as //skywalking:config http.\nItem Each configuration item needs to add a config tag. This is used to specify the name of the current configuration content. By default, it would lowercase all letters and add an _ identifier before each uppercase letter.\nCurrently, it supports basic data types and struct types, and it also supports obtaining data values through environment variables.\nDemo For example, I have declared the following configuration item:\n//skywalking:config http var config struct { ServerCollectParameters bool `config:\u0026#34;server_collect_parameters\u0026#34;` Client struct{ CollectParameters bool `config:\u0026#34;collect_parameters\u0026#34;` } `config:\u0026#34;client\u0026#34;` } In the above example, I created a plugin configuration for http, which includes two configuration items.\n config.ServerCollectParameters: Its configuration is located at http.server_collect_parameters. config.Client.CollectParameter: Its configuration is located at http.client.collect_parameter.  When the plugin needs to be used, it can be accessed directly by reading the config configuration.\nAgent API The Agent API is used when a method is intercepted and interacts with the Agent Core.\nTracing API The Tracing API is used for building distributed tracing, and currently supports the following methods:\n// CreateEntrySpan creates a new entry span. // operationName is the name of the span. // extractor is the extractor to extract the context from the carrier. // opts is the options to create the span. func CreateEntrySpan(operationName string, extractor Extractor, opts ...SpanOption) // CreateLocalSpan creates a new local span. // operationName is the name of the span. // opts is the options to create the span. func CreateLocalSpan(operationName string, opts ...SpanOption) // CreateExitSpan creates a new exit span. // operationName is the name of the span. // peer is the peer address of the span. // injector is the injector to inject the context into the carrier. // opts is the options to create the span. func CreateExitSpan(operationName, peer string, injector Injector, opts ...SpanOption) // ActiveSpan returns the current active span, it can be got the current span in the current goroutine. // If the current goroutine is not in the context of the span, it will return nil. // If get the span from other goroutine, it can only get information but cannot be operated. func ActiveSpan() // GetRuntimeContextValue returns the value of the key in the runtime context, which is current goroutine. // The value can also read from the goroutine which is created by the current goroutine func GetRuntimeContextValue(key string) // SetRuntimeContextValue sets the value of the key in the runtime context. func SetRuntimeContextValue(key string, val interface{}) Context Carrier The context carrier is used to pass the context between the difference application.\nWhen creating an Entry Span, you need to obtain the context carrier from the request. When creating an Exit Span, you need to write the context carrier into the target RPC request.\n// Extractor is a tool specification which define how to // extract trace parent context from propagation context type Extractor func(headerKey string) (string, error) // Injector is a tool specification which define how to // inject trace context into propagation context type Injector func(headerKey, headerValue string) error The following demo demonstrates how to pass the Context Carrier in the Tracing API:\n// create a new entry span and extract the context carrier from the request tracing.CreateEntrySpan(fmt.Sprintf(\u0026#34;%s:%s\u0026#34;, request.Method, request.URL.Path), func(headerKey string) (string, error) { return request.Header.Get(headerKey), nil }) // create a new exit span and inject the context carrier into the request tracing.CreateExitSpan(fmt.Sprintf(\u0026#34;%s:%s\u0026#34;, request.Method, request.URL.Path), request.Host, func(headerKey, headerValue string) error { request.Header.Add(headerKey, headerValue) return nil } Span Option Span Options can be passed when creating a Span to configure the information in the Span.\nThe following options are currently supported:\n// WithLayer set the SpanLayer of the Span func WithLayer(layer SpanLayer) // WithComponent set the component id of the Span func WithComponent(componentID int32) // WithTag set the Tag of the Span func WithTag(key Tag, value string) Span Component The Component ID in Span is used to identify the current component, with its data defined in SkyWalking OAP. If the framework you are writing does not exist in this file, please submit a PR in the SkyWalking project to add the definition of this plugin.\nSpan Operation After creating a Span, you can perform additional operations on it.\n// Span for plugin API type Span interface { // AsyncSpan for the async API \tAsyncSpan // Tag set the Tag of the Span \tTag(Tag, string) // SetSpanLayer set the SpanLayer of the Span \tSetSpanLayer(SpanLayer) // SetOperationName re-set the operation name of the Span \tSetOperationName(string) // SetPeer re-set the peer address of the Span \tSetPeer(string) // Log add log to the Span \tLog(...string) // Error add error log to the Span \tError(...string) // End end the Span \tEnd() } Async Span There is a set of advanced APIs in Span which is specifically designed for async use cases. When setting name, tags, logs, and other operations (including end span) of the span in another goroutine, you should use these APIs.\ntype AsyncSpan interface { // PrepareAsync the span finished at current tracing context, but current span is still alive until AsyncFinish called  PrepareAsync() // AsyncFinish to finished current async span  AsyncFinish() } Following the previous API define, you should following these steps to use the async API:\n Call span.PrepareAsync() to prepare the span to do any operation in another goroutine. Use Span.End() in the original goroutine when your job in the current goroutine is complete. Propagate the span to any other goroutine in your plugin. Once the above steps are all set, call span.AsyncFinish() in any goroutine. When the span.AsyncFinish() is complete for all spans, the all spans would be finished and report to the backend.  Tracing Context Operation In the Go Agent, Trace Context would continue cross goroutines automatically by default. However, in some cases, goroutine would be context sharing due to be scheduled by the pool mechanism. Consider these advanced APIs to manipulate context and switch the current context.\n// CaptureContext capture current tracing context in the current goroutine. func CaptureContext() ContextSnapshot // ContinueContext continue the tracing context in the current goroutine. func ContinueContext(ctx ContextSnapshot) // CleanContext clean the tracing context in the current goroutine. func CleanContext() Typically, use APIs as following to control or switch the context:\n Use tracing.CaptureContext() to get the ContextSnapshot object. Propagate the snapshot context to any other goroutine in your plugin. Use tracing.ContinueContext(snapshot) to continue the snapshot context in the target goroutine.  Meter API The Meter API is used to record the metrics of the target program, and currently supports the following methods:\n// NewCounter creates a new counter metrics. // name is the name of the metrics // opts is the options for the metrics func NewCounter(name string, opts ...Opt) Counter // NewGauge creates a new gauge metrics. // name is the name of the metrics // getter is the function to get the value of the gauge meter // opts is the options for the metrics func NewGauge(name string, getter func() float64, opts ...Opt) Gauge // NewHistogram creates a new histogram metrics. // name is the name of the metrics // steps is the buckets of the histogram // opts is the options for the metrics func NewHistogram(name string, steps []float64, opts ...Opt) Histogram // NewHistogramWithMinValue creates a new histogram metrics. // name is the name of the metrics // minVal is the min value of the histogram bucket // steps is the buckets of the histogram // opts is the options for the metrics func NewHistogramWithMinValue(name string, minVal float64, steps []float64, opts ...Opt) Histogram // RegisterBeforeCollectHook registers a hook function which will be called before metrics collect. func RegisterBeforeCollectHook(f func()) Meter Option The Meter Options can be passed when creating a Meter to configure the information in the Meter.\n// WithLabel adds a label to the metrics. func WithLabel(key, value string) Opt Meter Type Counter Counter is a cumulative metric that represents a single monotonically increasing counter whose value can only increase.\ntype Counter interface { // Get returns the current value of the counter. \tGet() float64 // Inc increments the counter with value. \tInc(val float64) } Gauge Gauge is a metric that represents a single numerical value that can arbitrarily go up and down.\ntype Gauge interface { // Get returns the current value of the gauge.  Get() float64 } Histogram Histogram is a metric that represents the distribution of a set of values.\ntype Histogram interface { // Observe find the value associate bucket and add 1. \tObserve(val float64) // ObserveWithCount find the value associate bucket and add specific count. \tObserveWithCount(val float64, count int64) } Import Plugin Once you have finished developing the plugin, you need to import the completed module into the Agent program and define it in the corresponding file.\nAt this point, your plugin development process is complete. When the Agent performs hybrid compilation on the target program, your plugin will be executed as expected.\n","excerpt":"Plugin Development Guide This documentation introduces how developers can create a plugin.\nAll …","ref":"/docs/skywalking-go/next/en/development-and-contribution/development-guide/","title":"Plugin Development Guide"},{"body":"Plugin Development Guide This documentation introduces how developers can create a plugin.\nAll plugins must follow these steps:\n Create a new plugin module: Create a new project in the specified directory and import the plugin API module. Define the enhancement object: Define the description for the plugin. Invoke the plugin API: Call the API provided by the core to complete the core invocation. Import the plugin module: Import the plugin into the management module for users to use.  Create a new plugin module The plugin must create a new module, which is currently stored in the project\u0026rsquo;s plugins directory.\nPlugins can import the following two modules:\n Agent core: This module provides all the dependencies needed for the plugin, including the plugin API, enhancement declaration objects, etc. Agent core plugin should be github.com/apache/skywalking-go/plugins/core and replaced by the relative location. Framework to be enhanced: Import the framework you wish to enhance.  Note: Plugins should NOT import and use any other modules, as this may cause compilation issues for users. If certain tools are needed, they should be provided by the agent core.\nDefine the enhancement object In the root directory of the project, create a new go file to define the basic information of the plugin. The basic information includes the following methods, corresponding to the Instrument interface:\n Name: The name of the plugin. Please keep this name consistent with the newly created project name. The reason will be explained later. Base Package: Declare which package this plugin intercepts. For example, if you want to intercept gin, you can write: \u0026ldquo;github.com/gin-gonic/gin\u0026rdquo;. Version Checker: This method passes the version number to the enhancement object to verify whether the specified version of the framework is supported. If not, the enhancement program will not be executed. Points: A plugin can define one or more enhancement points. This will be explained in more detail in the following sections. File System: Use //go:embed * in the current file to import all files in this module, which will be used for file copying during the mixed compilation process.  Note: Please declare //skywalking:nocopy at any position in this file to indicate that the file would not be copied. This file is only used for guidance during hybrid compilation. Also, this file involves the use of the embed package, and if the target framework does not import the package embed, a compilation error may occur.\nManage Instrument and Interceptor codes in hierarchy structure Instrument and interceptor codes are placed in root by default. In complex instrumentation scenarios, there could be dozens of interceptors, we provide PluginSourceCodePath to build a hierarchy folder structure to manage those codes.\nNotice: The instrumentation still works without proper setting of this, but the debug tool would lose the location of the source codes.\nExample For example, the framework needs to enhance two packages, as shown in the following directory structure:\n- plugins - test - go.mod - package1 - instrument.go - interceptor.go - package2 - instrument.go - interceptor.go ... In the above directory structure, the test framework needs to provide multiple different enhancement objects. In this case, a PluginSourceCodePath Source Code Path** method needs to be added for each enhancement object, the values of this method should be package1 and package2.\nInstrument Point Instrument points are used to declare that which methods and structs in the current package should be instrumented. They mainly include the following information:\n Package path: If the interception point that needs to be intercepted is not in the root directory of the current package, you need to fill in the relative path to the package. For example, if this interception point wants to instrument content in the github.com/gin-gonic/gin/render directory, you need to fill in render here. Package Name(optional): Define the package name of the current package. If the package name is not defined, the package name of the current package would be used by default. It\u0026rsquo;s used when the package path and package name are not same, such as the name of github.com/emicklei/go-restful/v3 is restful. Matcher(At): Specify which eligible content in the current package path needs to be enhanced. Interceptor: If the current method is being intercepted (whether it\u0026rsquo;s a static method or an instance method), the name of the interceptor must be specified.  Method Matcher Method matchers are used to intercept both static and non-static methods. The specific definitions are as follows:\n// NewStaticMethodEnhance creates a new EnhanceMatcher for static method. // name: method name needs to be enhanced.(Public and private methods are supported) // filters: filters for method. func NewStaticMethodEnhance(name string, filters ...MethodFilterOption) // NewMethodEnhance creates a new EnhanceMatcher for method. // receiver: receiver type name of method needs to be enhanced. // name: method name needs to be enhanced.(Public and private methods are supported) // filters: filters for method. func NewMethodEnhance(receiver, name string, filters ...MethodFilterOption) Filter Option Filter Options are used to validate the parameters or return values in the method. If the method name matches but the Options validation fails, the enhancement would not be performed.\n// WithArgsCount filter methods with specific count of arguments. func WithArgsCount(argsCount int) // WithResultCount filter methods with specific count of results. func WithResultCount(resultCount int) // WithArgType filter methods with specific type of the index of the argument. func WithArgType(argIndex int, dataType string) // WithResultType filter methods with specific type of the index of the result. func WithResultType(argIndex int, dataType string) Demo For example, if you have the following method that needs to be intercepted:\nfunc (c *Context) HandleMethod(name string) bool you can describe it using this condition:\ninstrument.NewMethodEnhance(\u0026#34;*Context\u0026#34;, \u0026#34;HandleMethod\u0026#34;, instrument.WithArgsCount(1), instrument.WithArgType(0, \u0026#34;string\u0026#34;), instrument.WithResultCount(1), instrument.WithResultType(0, \u0026#34;bool\u0026#34;)) Struct Matcher Enhancement structures can embed enhanced fields within specified structs. After the struct is instantiated, custom data content can be added to the specified struct in the method interceptor.\nStruct matchers are used to intercept struct methods. The specific definitions are as follows:\n// NewStructEnhance creates a new EnhanceMatcher for struct. // name: struct name needs to be enhanced.(Public and private structs are supported) // filters: filters for struct. func NewStructEnhance(name string, filters ...StructFilterOption) Filter Option Filter Options are used to validate the fields in the structure.\n// WithFieldExists filter the struct has the field with specific name. func WithFieldExists(fieldName string) // WithFiledType filter the struct has the field with specific name and type. func WithFiledType(filedName, filedType string) Enhanced Instance After completing the definition of the struct enhancement, you can convert the specified instance into the following interface when intercepting methods, and get or set custom field information. The interface definition is as follows:\ntype EnhancedInstance interface { // GetSkyWalkingDynamicField get the customized data from instance \tGetSkyWalkingDynamicField() interface{} // SetSkyWalkingDynamicField set the customized data into the instance \tSetSkyWalkingDynamicField(interface{}) } Demo For example, if you have the following struct that needs to be enhanced:\ntype Test struct { value *Context } you can describe it using this condition:\ninstrument.NewStructEnhance(\u0026#34;Test\u0026#34;, instrument.WithFieldExists(\u0026#34;value\u0026#34;), instrument.WithFiledType(\u0026#34;value\u0026#34;, \u0026#34;*Context\u0026#34;)) Next, you can set custom content for the specified enhanced instance when intercepting methods.\nins := testInstance.(instrument.EnhancedInstance) // setting custom content ins.SetSkyWalkingDynamicField(\u0026#34;custom content\u0026#34;) // getting custom content res := ins.GetSkyWalkingDynamicField() Interceptor Interceptors are used to define custom business logic before and after method execution, allowing you to access data from before and after method execution and interact with the Agent Core by using the Agent API.\nThe interceptor definition is as follows, you need to create a new structure and implement it:\ntype Interceptor interface { // BeforeInvoke would be called before the target method invocation.  BeforeInvoke(invocation Invocation) error // AfterInvoke would be called after the target method invocation.  AfterInvoke(invocation Invocation, result ...interface{}) error } Within the interface, you can see the Invocation interface, which defines the context of an interception. The specific definition is as follows:\ntype Invocation interface { // CallerInstance is the instance of the caller, nil if the method is static method.  CallerInstance() interface{} // Args is get the arguments of the method, please cast to the specific type to get more information.  Args() []interface{} // ChangeArg is change the argument value of the method  ChangeArg(int, interface{}) // IsContinue is the flag to control the method invocation, if it is true, the target method would not be invoked.  IsContinue() bool // DefineReturnValues are defined the return value of the method, and continue the method invoked  DefineReturnValues(...interface{}) // SetContext is the customized context of the method invocation, it should be propagated the tracing span.  SetContext(interface{}) // GetContext is get the customized context of the method invocation  GetContext() interface{} } Thread safe The Interceptor instance would define new instance at the current package level, rather than creating a new instance each time a method is intercepted.\nTherefore, do not declare objects in the interceptor, and instead use Invocation.Context to pass data.\nPackage Path If the method you want to intercept is not located in the root directory of the framework, place your interceptor code in the relative location within the plugin. The Agent would only copy files from the same package directory.\nFor example, if you want to intercept a method in github.com/gin-gonic/gin/render, create a render directory in the root of your plugin, and put the interceptor inside it. This ensures that the interceptor is properly included during the copy operation and can be correctly applied to the target package.\nPlugin Configuration Plugin configuration is used to add custom configuration parameters to a specified plugin. When users specify configuration items, the plugin can dynamically adapt the content needed in the plugin according to the user\u0026rsquo;s configuration items.\nDeclaration Please declare the configuration file you need in the package you want to use. Declare it using var, and add the //skywalking:config directive to specify that this variable requires dynamic updating.\nBy default, the configuration item belongs to the configuration of the current plugin. For example, if the name of my current plugin is gin, then this configuration item is under the gin plugin. Of course, you can also change it to the http plugin to reference the configuration information of the relevant plugin, in which case you need to specify it as //skywalking:config http.\nItem Each configuration item needs to add a config tag. This is used to specify the name of the current configuration content. By default, it would lowercase all letters and add an _ identifier before each uppercase letter.\nCurrently, it supports basic data types and struct types, and it also supports obtaining data values through environment variables.\nDemo For example, I have declared the following configuration item:\n//skywalking:config http var config struct { ServerCollectParameters bool `config:\u0026#34;server_collect_parameters\u0026#34;` Client struct{ CollectParameters bool `config:\u0026#34;collect_parameters\u0026#34;` } `config:\u0026#34;client\u0026#34;` } In the above example, I created a plugin configuration for http, which includes two configuration items.\n config.ServerCollectParameters: Its configuration is located at http.server_collect_parameters. config.Client.CollectParameter: Its configuration is located at http.client.collect_parameter.  When the plugin needs to be used, it can be accessed directly by reading the config configuration.\nAgent API The Agent API is used when a method is intercepted and interacts with the Agent Core.\nTracing API The Tracing API is used for building distributed tracing, and currently supports the following methods:\n// CreateEntrySpan creates a new entry span. // operationName is the name of the span. // extractor is the extractor to extract the context from the carrier. // opts is the options to create the span. func CreateEntrySpan(operationName string, extractor Extractor, opts ...SpanOption) // CreateLocalSpan creates a new local span. // operationName is the name of the span. // opts is the options to create the span. func CreateLocalSpan(operationName string, opts ...SpanOption) // CreateExitSpan creates a new exit span. // operationName is the name of the span. // peer is the peer address of the span. // injector is the injector to inject the context into the carrier. // opts is the options to create the span. func CreateExitSpan(operationName, peer string, injector Injector, opts ...SpanOption) // ActiveSpan returns the current active span, it can be got the current span in the current goroutine. // If the current goroutine is not in the context of the span, it will return nil. // If get the span from other goroutine, it can only get information but cannot be operated. func ActiveSpan() // GetRuntimeContextValue returns the value of the key in the runtime context, which is current goroutine. // The value can also read from the goroutine which is created by the current goroutine func GetRuntimeContextValue(key string) // SetRuntimeContextValue sets the value of the key in the runtime context. func SetRuntimeContextValue(key string, val interface{}) Context Carrier The context carrier is used to pass the context between the difference application.\nWhen creating an Entry Span, you need to obtain the context carrier from the request. When creating an Exit Span, you need to write the context carrier into the target RPC request.\n// Extractor is a tool specification which define how to // extract trace parent context from propagation context type Extractor func(headerKey string) (string, error) // Injector is a tool specification which define how to // inject trace context into propagation context type Injector func(headerKey, headerValue string) error The following demo demonstrates how to pass the Context Carrier in the Tracing API:\n// create a new entry span and extract the context carrier from the request tracing.CreateEntrySpan(fmt.Sprintf(\u0026#34;%s:%s\u0026#34;, request.Method, request.URL.Path), func(headerKey string) (string, error) { return request.Header.Get(headerKey), nil }) // create a new exit span and inject the context carrier into the request tracing.CreateExitSpan(fmt.Sprintf(\u0026#34;%s:%s\u0026#34;, request.Method, request.URL.Path), request.Host, func(headerKey, headerValue string) error { request.Header.Add(headerKey, headerValue) return nil } Span Option Span Options can be passed when creating a Span to configure the information in the Span.\nThe following options are currently supported:\n// WithLayer set the SpanLayer of the Span func WithLayer(layer SpanLayer) // WithComponent set the component id of the Span func WithComponent(componentID int32) // WithTag set the Tag of the Span func WithTag(key Tag, value string) Span Component The Component ID in Span is used to identify the current component, with its data defined in SkyWalking OAP. If the framework you are writing does not exist in this file, please submit a PR in the SkyWalking project to add the definition of this plugin.\nSpan Operation After creating a Span, you can perform additional operations on it.\n// Span for plugin API type Span interface { // AsyncSpan for the async API \tAsyncSpan // Tag set the Tag of the Span \tTag(Tag, string) // SetSpanLayer set the SpanLayer of the Span \tSetSpanLayer(SpanLayer) // SetOperationName re-set the operation name of the Span \tSetOperationName(string) // SetPeer re-set the peer address of the Span \tSetPeer(string) // Log add log to the Span \tLog(...string) // Error add error log to the Span \tError(...string) // End end the Span \tEnd() } Async Span There is a set of advanced APIs in Span which is specifically designed for async use cases. When setting name, tags, logs, and other operations (including end span) of the span in another goroutine, you should use these APIs.\ntype AsyncSpan interface { // PrepareAsync the span finished at current tracing context, but current span is still alive until AsyncFinish called  PrepareAsync() // AsyncFinish to finished current async span  AsyncFinish() } Following the previous API define, you should following these steps to use the async API:\n Call span.PrepareAsync() to prepare the span to do any operation in another goroutine. Use Span.End() in the original goroutine when your job in the current goroutine is complete. Propagate the span to any other goroutine in your plugin. Once the above steps are all set, call span.AsyncFinish() in any goroutine. When the span.AsyncFinish() is complete for all spans, the all spans would be finished and report to the backend.  Tracing Context Operation In the Go Agent, Trace Context would continue cross goroutines automatically by default. However, in some cases, goroutine would be context sharing due to be scheduled by the pool mechanism. Consider these advanced APIs to manipulate context and switch the current context.\n// CaptureContext capture current tracing context in the current goroutine. func CaptureContext() ContextSnapshot // ContinueContext continue the tracing context in the current goroutine. func ContinueContext(ctx ContextSnapshot) // CleanContext clean the tracing context in the current goroutine. func CleanContext() Typically, use APIs as following to control or switch the context:\n Use tracing.CaptureContext() to get the ContextSnapshot object. Propagate the snapshot context to any other goroutine in your plugin. Use tracing.ContinueContext(snapshot) to continue the snapshot context in the target goroutine.  Meter API The Meter API is used to record the metrics of the target program, and currently supports the following methods:\n// NewCounter creates a new counter metrics. // name is the name of the metrics // opts is the options for the metrics func NewCounter(name string, opts ...Opt) Counter // NewGauge creates a new gauge metrics. // name is the name of the metrics // getter is the function to get the value of the gauge meter // opts is the options for the metrics func NewGauge(name string, getter func() float64, opts ...Opt) Gauge // NewHistogram creates a new histogram metrics. // name is the name of the metrics // steps is the buckets of the histogram // opts is the options for the metrics func NewHistogram(name string, steps []float64, opts ...Opt) Histogram // NewHistogramWithMinValue creates a new histogram metrics. // name is the name of the metrics // minVal is the min value of the histogram bucket // steps is the buckets of the histogram // opts is the options for the metrics func NewHistogramWithMinValue(name string, minVal float64, steps []float64, opts ...Opt) Histogram // RegisterBeforeCollectHook registers a hook function which will be called before metrics collect. func RegisterBeforeCollectHook(f func()) Meter Option The Meter Options can be passed when creating a Meter to configure the information in the Meter.\n// WithLabel adds a label to the metrics. func WithLabel(key, value string) Opt Meter Type Counter Counter is a cumulative metric that represents a single monotonically increasing counter whose value can only increase.\ntype Counter interface { // Get returns the current value of the counter. \tGet() float64 // Inc increments the counter with value. \tInc(val float64) } Gauge Gauge is a metric that represents a single numerical value that can arbitrarily go up and down.\ntype Gauge interface { // Get returns the current value of the gauge.  Get() float64 } Histogram Histogram is a metric that represents the distribution of a set of values.\ntype Histogram interface { // Observe find the value associate bucket and add 1. \tObserve(val float64) // ObserveWithCount find the value associate bucket and add specific count. \tObserveWithCount(val float64, count int64) } Import Plugin Once you have finished developing the plugin, you need to import the completed module into the Agent program and define it in the corresponding file.\nAt this point, your plugin development process is complete. When the Agent performs hybrid compilation on the target program, your plugin will be executed as expected.\n","excerpt":"Plugin Development Guide This documentation introduces how developers can create a plugin.\nAll …","ref":"/docs/skywalking-go/v0.4.0/en/development-and-contribution/development-guide/","title":"Plugin Development Guide"},{"body":"Plugin Development Guide This document describes how to understand, develop and contribute a plugin.\nThere are 2 kinds of plugin:\n Tracing plugin. Follow the distributed tracing concept to collect spans with tags and logs. Meter plugin. Collect numeric metrics in Counter, Gauge, and Histogram formats.  We also provide the plugin test tool to verify the data collected and reported by the plugin. If you plan to contribute any plugin to our main repo, the data would be verified by this tool too.\nTracing plugin Concepts Span The span is an important and recognized concept in the distributed tracing system. Learn about the span from the Google Dapper Paper and OpenTracing\nSkyWalking has supported OpenTracing and OpenTracing-Java API since 2017. Our concepts of the span are similar to that of the Google Dapper Paper and OpenTracing. We have also extended the span.\nThere are three types of span:\n1.1 EntrySpan The EntrySpan represents a service provider. It is also an endpoint on the server end. As an APM system, our target is the application servers. Therefore, almost all the services and MQ-consumers are EntrySpan.\n1.2 LocalSpan The LocalSpan represents a normal Java method that does not concern remote services. It is neither a MQ producer/consumer nor a service (e.g. HTTP service) provider/consumer.\n1.3 ExitSpan The ExitSpan represents a client of service or MQ-producer. It is named the LeafSpan in the early versions of SkyWalking. For example, accessing DB through JDBC and reading Redis/Memcached are classified as an ExitSpan.\nContextCarrier In order to implement distributed tracing, cross-process tracing has to be bound, and the context must propagate across the process. This is where the ContextCarrier comes in.\nHere are the steps on how to use the ContextCarrier in an A-\u0026gt;B distributed call.\n Create a new and empty ContextCarrier on the client end. Create an ExitSpan by ContextManager#createExitSpan or use ContextManager#inject to initalize the ContextCarrier. Place all items of ContextCarrier into heads (e.g. HTTP HEAD), attachments (e.g. Dubbo RPC framework) or messages (e.g. Kafka). The ContextCarrier propagates to the server end through the service call. On the server end, obtain all items from the heads, attachments or messages. Create an EntrySpan by ContextManager#createEntrySpan or use ContextManager#extract to bind the client and server ends.  See the following examples, where we use the Apache HTTPComponent client plugin and Tomcat 7 server plugin:\n Using the Apache HTTPComponent client plugin on the client end  span = ContextManager.createExitSpan(\u0026#34;/span/operation/name\u0026#34;, contextCarrier, \u0026#34;ip:port\u0026#34;); CarrierItem next = contextCarrier.items(); while (next.hasNext()) { next = next.next(); httpRequest.setHeader(next.getHeadKey(), next.getHeadValue()); } Using the Tomcat 7 server plugin on the server end  ContextCarrier contextCarrier = new ContextCarrier(); CarrierItem next = contextCarrier.items(); while (next.hasNext()) { next = next.next(); next.setHeadValue(request.getHeader(next.getHeadKey())); } span = ContextManager.createEntrySpan(“/span/operation/name”, contextCarrier); ContextSnapshot Besides cross-process tracing, cross-thread tracing has to be supported as well. For instance, both async process (in-memory MQ) and batch process are common in Java. Cross-process and cross-thread tracing are very similar in that they both require propagating context, except that cross-thread tracing does not require serialization.\nHere are the three steps on cross-thread propagation:\n Use ContextManager#capture to get the ContextSnapshot object. Let the sub-thread access the ContextSnapshot through method arguments or being carried by existing arguments Use ContextManager#continued in sub-thread.  Core APIs ContextManager ContextManager provides all major and primary APIs.\n Create EntrySpan  public static AbstractSpan createEntrySpan(String endpointName, ContextCarrier carrier) Create EntrySpan according to the operation name (e.g. service name, uri) and ContextCarrier.\nCreate LocalSpan  public static AbstractSpan createLocalSpan(String endpointName) Create LocalSpan according to the operation name (e.g. full method signature).\nCreate ExitSpan  public static AbstractSpan createExitSpan(String endpointName, ContextCarrier carrier, String remotePeer) Create ExitSpan according to the operation name (e.g. service name, uri) and the new ContextCarrier and peer address (e.g. ip+port, hostname+port).\nAbstractSpan /** * Set the component id, which defines in {@link ComponentsDefine} * * @param component * @return the span for chaining. */ AbstractSpan setComponent(Component component); AbstractSpan setLayer(SpanLayer layer); /** * Set a key:value tag on the Span. * * @return this Span instance, for chaining */ AbstractSpan tag(String key, String value); /** * Record an exception event of the current walltime timestamp. * * @param t any subclass of {@link Throwable}, which occurs in this span. * @return the Span, for chaining */ AbstractSpan log(Throwable t); AbstractSpan errorOccurred(); /** * Record an event at a specific timestamp. * * @param timestamp The explicit timestamp for the log record. * @param event the events * @return the Span, for chaining */ AbstractSpan log(long timestamp, Map\u0026lt;String, ?\u0026gt; event); /** * Sets the string name for the logical operation this span represents. * * @return this Span instance, for chaining */ AbstractSpan setOperationName(String endpointName); Besides setting the operation name, tags and logs, two attributes must be set, namely the component and layer. This is especially important for the EntrySpan and ExitSpan.\nSpanLayer is the type of span. There are 5 values:\n UNKNOWN (default) DB RPC_FRAMEWORK (designed for the RPC framework, rather than an ordinary HTTP call) HTTP MQ  Component IDs are defined and reserved by the SkyWalking project. For extension of the component name/ID, please follow the OAP server Component library settings document.\nSpecial Span Tags All tags are available in the trace view. Meanwhile, in the OAP backend analysis, some special tags or tag combinations provide other advanced features.\nTag key http.status_code The value should be an integer. The response code of OAL entities corresponds to this value.\nTag keys db.statement and db.type. The value of db.statement should be a string that represents the database statement, such as SQL, or [No statement]/+span#operationName if the value is empty. When the exit span contains this tag, OAP samples the slow statements based on agent-analyzer/default/maxSlowSQLLength. The threshold of slow statement is defined in accordance with agent-analyzer/default/slowDBAccessThreshold. Check Slow Database Statement document of OAP server for details.\nExtension logic endpoint: Tag key x-le The logic endpoint is a concept that doesn\u0026rsquo;t represent a real RPC call, but requires the statistic. The value of x-le should be in JSON format. There are two options:\n Define a separated logic endpoint. Provide its own endpoint name, latency and status. Suitable for entry and local span.  { \u0026#34;name\u0026#34;: \u0026#34;GraphQL-service\u0026#34;, \u0026#34;latency\u0026#34;: 100, \u0026#34;status\u0026#34;: true } Declare the current local span representing a logic endpoint.  { \u0026#34;logic-span\u0026#34;: true } Virtual Database Relative Tags SkyWalking analysis Database(SQL-like) performance metrics through the following tags.\npublic static final StringTag DB_TYPE = new StringTag(3, \u0026#34;db.type\u0026#34;); public static final StringTag DB_STATEMENT = new StringTag(5, \u0026#34;db.statement\u0026#34;);  db.type records database type, such as sql, cassandra, Elasticsearch. db.statementrecords the sql statement of the database access.  Read backend\u0026rsquo;s virtual database doc for more details.\nVirtual Cache Relative Tags SkyWalking analysis cache performance related metrics through the following tags.\npublic static final StringTag CACHE_TYPE = new StringTag(15, \u0026#34;cache.type\u0026#34;); public static final StringTag CACHE_CMD = new StringTag(17, \u0026#34;cache.cmd\u0026#34;); public static final StringTag CACHE_OP = new StringTag(16, \u0026#34;cache.op\u0026#34;); public static final StringTag CACHE_KEY = new StringTag(18, \u0026#34;cache.key\u0026#34;);  cache.type indicates the cache type , usually it\u0026rsquo;s official name of cache (e.g. Redis) cache.cmd indicates the cache command that would be sent to cache server (e.g. setnx) cache.op indicates the command is used for write or read operation , usually the value is converting from command cache.key indicates the cache key that would be sent to cache server , this tag maybe null , as string type key would be collected usually.  In order to decide which op should be converted to flexibly , It\u0026rsquo;s better that providing config property . Reference Jedis-4.x-plugin\nVirtual Message Queue (MQ) Relative Tags SkyWalking analysis MQ performance related metrics through the following tags.\npublic static final StringTag MQ_QUEUE = new StringTag(7, \u0026#34;mq.queue\u0026#34;); public static final StringTag MQ_TOPIC = new StringTag(9, \u0026#34;mq.topic\u0026#34;); public static final StringTag TRANSMISSION_LATENCY = new StringTag(15, \u0026#34;transmission.latency\u0026#34;, false);  mq.queue indicates MQ queue name mq.topic indicates MQ topic name , It\u0026rsquo;s optional as some MQ don\u0026rsquo;t hava concept of topic transmission.latency The transmission latency from consumer to producer. Usually you needn\u0026rsquo;t to record this tag manually, instead to call contextCarrier.extensionInjector().injectSendingTimestamp(); to record tag sendingTimestamp on producer side , and SkyWalking would record this tag on consumer side if sw8-x context carrier(from producer side) contains sendingTimestamp  Notice , you should set peer at both sides(producer and consumer). And the value of peer should represent the MQ server cluster.\nAdvanced APIs Async Span APIs There is a set of advanced APIs in Span which is specifically designed for async use cases. When tags, logs, and attributes (including end time) of the span need to be set in another thread, you should use these APIs.\n/** * The span finish at current tracing context, but the current span is still alive, until {@link #asyncFinish} * called. * * This method must be called\u0026lt;br/\u0026gt; * 1. In original thread(tracing context). * 2. Current span is active span. * * During alive, tags, logs and attributes of the span could be changed, in any thread. * * The execution times of {@link #prepareForAsync} and {@link #asyncFinish()} must match. * * @return the current span */ AbstractSpan prepareForAsync(); /** * Notify the span, it could be finished. * * The execution times of {@link #prepareForAsync} and {@link #asyncFinish()} must match. * * @return the current span */ AbstractSpan asyncFinish();  Call #prepareForAsync in the original context. Run ContextManager#stopSpan in the original context when your job in the current thread is complete. Propagate the span to any other thread. Once the above steps are all set, call #asyncFinish in any thread. When #prepareForAsync is complete for all spans, the tracing context will be finished and will report to the backend (based on the count of API execution).  Develop a plugin Abstract The basic method to trace is to intercept a Java method, by using byte code manipulation tech and AOP concept. SkyWalking has packaged the byte code manipulation tech and tracing context propagation, so you simply have to define the intercept point (a.k.a. aspect pointcut in Spring).\nIntercept SkyWalking provides two common definitions to intercept constructor, instance method and class method.\nv1 APIs  Extend ClassInstanceMethodsEnhancePluginDefine to define constructor intercept points and instance method intercept points. Extend ClassStaticMethodsEnhancePluginDefine to define class method intercept points.  Of course, you can extend ClassEnhancePluginDefine to set all intercept points, although it is uncommon to do so.\nv2 APIs v2 APIs provide an enhanced interceptor, which could propagate context through MIC(MethodInvocationContext).\n Extend ClassInstanceMethodsEnhancePluginDefineV2 to define constructor intercept points and instance method intercept points. Extend ClassStaticMethodsEnhancePluginDefineV2 to define class method intercept points.  Of course, you can extend ClassEnhancePluginDefineV2 to set all intercept points, although it is uncommon to do so.\nImplement plugin See the following demonstration on how to implement a plugin by extending ClassInstanceMethodsEnhancePluginDefine.\n Define the target class name.  protected abstract ClassMatch enhanceClass(); ClassMatch represents how to match the target classes. There are 4 ways:\n byName: Based on the full class names (package name + . + class name). byClassAnnotationMatch: Depends on whether there are certain annotations in the target classes. byMethodAnnotationMatch: Depends on whether there are certain annotations in the methods of the target classes. byHierarchyMatch: Based on the parent classes or interfaces of the target classes.  Attention:\n Never use ThirdPartyClass.class in the instrumentation definitions, such as takesArguments(ThirdPartyClass.class), or byName(ThirdPartyClass.class.getName()), because of the fact that ThirdPartyClass dose not necessarily exist in the target application and this will break the agent; we have import checks to assist in checking this in CI, but it doesn\u0026rsquo;t cover all scenarios of this limitation, so never try to work around this limitation by something like using full-qualified-class-name (FQCN), i.e. takesArguments(full.qualified.ThirdPartyClass.class) and byName(full.qualified.ThirdPartyClass.class.getName()) will pass the CI check, but are still invalid in the agent codes. Therefore, Use Full Qualified Class Name String Literature Instead. Even if you are perfectly sure that the class to be intercepted exists in the target application (such as JDK classes), still, do not use *.class.getName() to get the class String name. We recommend you to use a literal string. This is to avoid ClassLoader issues. by*AnnotationMatch does not support inherited annotations. We do not recommend using byHierarchyMatch unless necessary. Using it may trigger the interception of many unexcepted methods, which would cause performance issues.  Example:\n@Override protected ClassMatch enhanceClassName() { return byName(\u0026#34;org.apache.catalina.core.StandardEngineValve\u0026#34;); } Define an instance method intercept point.  public InstanceMethodsInterceptPoint[] getInstanceMethodsInterceptPoints(); public interface InstanceMethodsInterceptPoint { /** * class instance methods matcher. * * @return methods matcher */ ElementMatcher\u0026lt;MethodDescription\u0026gt; getMethodsMatcher(); /** * @return represents a class name, the class instance must instanceof InstanceMethodsAroundInterceptor. */ String getMethodsInterceptor(); boolean isOverrideArgs(); } You may also use Matcher to set the target methods. Return true in isOverrideArgs, if you want to change the argument ref in interceptor. Please refer to bytebuddy for details of defining ElementMatcher.\nIn Skywalking, we provide 3 classes to facilitate ElementMatcher definition:\n AnnotationTypeNameMatch: Check on whether there is a certain annotation in the target method. ReturnTypeNameMatch: Check the return type name (package name + . + class name) of the target method. ArgumentTypeNameMatch: Check on the argument index and the type name (package name + . + class name) of the target method.  Attention:\n In case of using ReturnTypeNameMatch and ArgumentTypeNameMatch, use [Lxxx; (Java file format defined in JVM Specification) to define an Array type. For example, you should write [Ljava.lang.String; for java.lang.String[].  The following sections will tell you how to implement the interceptor.\nAdd plugin definition into the skywalking-plugin.def file.  tomcat-7.x/8.x=TomcatInstrumentation  Set up witnessClasses and/or witnessMethods if the instrumentation has to be activated in specific versions.\nExample:\n// The plugin is activated only when the foo.Bar class exists. @Override protected String[] witnessClasses() { return new String[] { \u0026#34;foo.Bar\u0026#34; }; } // The plugin is activated only when the foo.Bar#hello method exists. @Override protected List\u0026lt;WitnessMethod\u0026gt; witnessMethods() { List\u0026lt;WitnessMethod\u0026gt; witnessMethodList = new ArrayList\u0026lt;\u0026gt;(); WitnessMethod witnessMethod = new WitnessMethod(\u0026#34;foo.Bar\u0026#34;, ElementMatchers.named(\u0026#34;hello\u0026#34;)); witnessMethodList.add(witnessMethod); return witnessMethodList; } For more examples, see WitnessTest.java\n  Implement an interceptor As an interceptor for an instance method, it has to implement org.apache.skywalking.apm.agent.core.plugin.interceptor.enhance.InstanceMethodsAroundInterceptor\n/** * A interceptor, which intercept method\u0026#39;s invocation. The target methods will be defined in {@link * ClassEnhancePluginDefine}\u0026#39;s subclass, most likely in {@link ClassInstanceMethodsEnhancePluginDefine} */ public interface InstanceMethodsAroundInterceptor { /** * called before target method invocation. * * @param result change this result, if you want to truncate the method. * @throws Throwable */ void beforeMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, MethodInterceptResult result) throws Throwable; /** * called after target method invocation. Even method\u0026#39;s invocation triggers an exception. * * @param ret the method\u0026#39;s original return value. * @return the method\u0026#39;s actual return value. * @throws Throwable */ Object afterMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Object ret) throws Throwable; /** * called when occur exception. * * @param t the exception occur. */ void handleMethodException(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Throwable t); } Use the core APIs before and after calling the method, as well as during exception handling.\nV2 APIs The interceptor of V2 API uses MethodInvocationContext context to replace the MethodInterceptResult result in the beforeMethod, and be added as a new parameter in afterMethod and handleMethodException.\nMethodInvocationContext context is only shared in one time execution, and safe to use when face concurrency execution.\n/** * A v2 interceptor, which intercept method\u0026#39;s invocation. The target methods will be defined in {@link * ClassEnhancePluginDefineV2}\u0026#39;s subclass, most likely in {@link ClassInstanceMethodsEnhancePluginDefine} */ public interface InstanceMethodsAroundInterceptorV2 { /** * called before target method invocation. * * @param context the method invocation context including result context. */ void beforeMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, MethodInvocationContext context) throws Throwable; /** * called after target method invocation. Even method\u0026#39;s invocation triggers an exception. * * @param ret the method\u0026#39;s original return value. May be null if the method triggers an exception. * @return the method\u0026#39;s actual return value. */ Object afterMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Object ret, MethodInvocationContext context) throws Throwable; /** * called when occur exception. * * @param t the exception occur. */ void handleMethodException(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Throwable t, MethodInvocationContext context); } Bootstrap class instrumentation. SkyWalking has packaged the bootstrap instrumentation in the agent core. You can easily implement it by declaring it in the instrumentation definition.\nOverride the public boolean isBootstrapInstrumentation() and return true. Such as\npublic class URLInstrumentation extends ClassEnhancePluginDefine { private static String CLASS_NAME = \u0026#34;java.net.URL\u0026#34;; @Override protected ClassMatch enhanceClass() { return byName(CLASS_NAME); } @Override public ConstructorInterceptPoint[] getConstructorsInterceptPoints() { return new ConstructorInterceptPoint[] { new ConstructorInterceptPoint() { @Override public ElementMatcher\u0026lt;MethodDescription\u0026gt; getConstructorMatcher() { return any(); } @Override public String getConstructorInterceptor() { return \u0026#34;org.apache.skywalking.apm.plugin.jre.httpurlconnection.Interceptor2\u0026#34;; } } }; } @Override public InstanceMethodsInterceptPoint[] getInstanceMethodsInterceptPoints() { return new InstanceMethodsInterceptPoint[0]; } @Override public StaticMethodsInterceptPoint[] getStaticMethodsInterceptPoints() { return new StaticMethodsInterceptPoint[0]; } @Override public boolean isBootstrapInstrumentation() { return true; } } ClassEnhancePluginDefineV2 is provided in v2 APIs, #isBootstrapInstrumentation works too.\nNOTE: Bootstrap instrumentation should be used only where necessary. During its actual execution, it mostly affects the JRE core(rt.jar). Defining it other than where necessary could lead to unexpected results or side effects.\nProvide custom config for the plugin The config could provide different behaviours based on the configurations. The SkyWalking plugin mechanism provides the configuration injection and initialization system in the agent core.\nEvery plugin could declare one or more classes to represent the config by using @PluginConfig annotation. The agent core could initialize this class' static field through System environments, System properties, and agent.config static file.\nThe #root() method in the @PluginConfig annotation requires declaring the root class for the initialization process. Typically, SkyWalking prefers to use nested inner static classes for the hierarchy of the configuration. We recommend using Plugin/plugin-name/config-key as the nested classes structure of the config class.\nNOTE: because of the Java ClassLoader mechanism, the @PluginConfig annotation should be added on the real class used in the interceptor codes.\nIn the following example, @PluginConfig(root = SpringMVCPluginConfig.class) indicates that initialization should start with using SpringMVCPluginConfig as the root. Then, the config key of the attribute USE_QUALIFIED_NAME_AS_ENDPOINT_NAME should be plugin.springmvc.use_qualified_name_as_endpoint_name.\npublic class SpringMVCPluginConfig { public static class Plugin { // NOTE, if move this annotation on the `Plugin` or `SpringMVCPluginConfig` class, it no longer has any effect.  @PluginConfig(root = SpringMVCPluginConfig.class) public static class SpringMVC { /** * If true, the fully qualified method name will be used as the endpoint name instead of the request URL, * default is false. */ public static boolean USE_QUALIFIED_NAME_AS_ENDPOINT_NAME = false; /** * This config item controls that whether the SpringMVC plugin should collect the parameters of the * request. */ public static boolean COLLECT_HTTP_PARAMS = false; } @PluginConfig(root = SpringMVCPluginConfig.class) public static class Http { /** * When either {@link Plugin.SpringMVC#COLLECT_HTTP_PARAMS} is enabled, how many characters to keep and send * to the OAP backend, use negative values to keep and send the complete parameters, NB. this config item is * added for the sake of performance */ public static int HTTP_PARAMS_LENGTH_THRESHOLD = 1024; } } } Meter Plugin Java agent plugin could use meter APIs to collect metrics for backend analysis.\n Counter API represents a single monotonically increasing counter which automatically collects data and reports to the backend. import org.apache.skywalking.apm.agent.core.meter.MeterFactory; Counter counter = MeterFactory.counter(meterName).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).mode(Counter.Mode.INCREMENT).build(); counter.increment(1d);    MeterFactory.counter creates a new counter builder with the meter name. Counter.Builder.tag(String key, String value) marks a tag key/value pair. Counter.Builder.mode(Counter.Mode mode) changes the counter mode. RATE mode means the reporting rate to the backend. Counter.Builder.build() builds a new Counter which is collected and reported to the backend. Counter.increment(double count) increment counts to the Counter. It could be a positive value.   Gauge API represents a single numerical value.  import org.apache.skywalking.apm.agent.core.meter.MeterFactory; ThreadPoolExecutor threadPool = ...; Gauge gauge = MeterFactory.gauge(meterName, () -\u0026gt; threadPool.getActiveCount()).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).build();  MeterFactory.gauge(String name, Supplier\u0026lt;Double\u0026gt; getter) creates a new gauge builder with the meter name and supplier function. This function must return a double value. Gauge.Builder.tag(String key, String value) marks a tag key/value pair. Gauge.Builder.build() builds a new Gauge which is collected and reported to the backend.   Histogram API represents a summary sample observations with customized buckets.  import org.apache.skywalking.apm.agent.core.meter.MeterFactory; Histogram histogram = MeterFactory.histogram(\u0026#34;test\u0026#34;).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).steps(Arrays.asList(1, 5, 10)).minValue(0).build(); histogram.addValue(3);  MeterFactory.histogram(String name) creates a new histogram builder with the meter name. Histogram.Builder.tag(String key, String value) marks a tag key/value pair. Histogram.Builder.steps(List\u0026lt;Double\u0026gt; steps) sets up the max values of every histogram buckets. Histogram.Builder.minValue(double value) sets up the minimal value of this histogram. Default is 0. Histogram.Builder.build() builds a new Histogram which is collected and reported to the backend. Histogram.addValue(double value) adds value into the histogram, and automatically analyzes what bucket count needs to be incremented. Rule: count into [step1, step2).  Plugin Test Tool The Apache SkyWalking Agent Test Tool Suite is an incredibly useful test tool suite that is available in a wide variety of agent languages. It includes the mock collector and validator. The mock collector is a SkyWalking receiver, like the OAP server.\nYou could learn how to use this tool to test the plugin in this doc. This is a must if you want to contribute plugins to the SkyWalking official repo.\nContribute plugins to the Apache SkyWalking repository We welcome everyone to contribute their plugins.\nPlease follow these steps:\n Submit an issue for your plugin, including any supported versions. Create sub modules under apm-sniffer/apm-sdk-plugin or apm-sniffer/optional-plugins, and the name should include supported library name and versions. Follow this guide to develop. Make sure comments and test cases are provided. Develop and test. Provide the automatic test cases. Learn how to write the plugin test case from this doc Send a pull request and ask for review. The plugin committers will approve your plugins, plugin CI-with-IT, e2e, and the plugin tests will be passed. The plugin is accepted by SkyWalking.  ","excerpt":"Plugin Development Guide This document describes how to understand, develop and contribute a plugin. …","ref":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/java-plugin-development-guide/","title":"Plugin Development Guide"},{"body":"Plugin Development Guide This document describes how to understand, develop and contribute a plugin.\nThere are 2 kinds of plugin:\n Tracing plugin. Follow the distributed tracing concept to collect spans with tags and logs. Meter plugin. Collect numeric metrics in Counter, Gauge, and Histogram formats.  We also provide the plugin test tool to verify the data collected and reported by the plugin. If you plan to contribute any plugin to our main repo, the data would be verified by this tool too.\nTracing plugin Concepts Span The span is an important and recognized concept in the distributed tracing system. Learn about the span from the Google Dapper Paper and OpenTracing\nSkyWalking has supported OpenTracing and OpenTracing-Java API since 2017. Our concepts of the span are similar to that of the Google Dapper Paper and OpenTracing. We have also extended the span.\nThere are three types of span:\n1.1 EntrySpan The EntrySpan represents a service provider. It is also an endpoint on the server end. As an APM system, our target is the application servers. Therefore, almost all the services and MQ-consumers are EntrySpan.\n1.2 LocalSpan The LocalSpan represents a normal Java method that does not concern remote services. It is neither a MQ producer/consumer nor a service (e.g. HTTP service) provider/consumer.\n1.3 ExitSpan The ExitSpan represents a client of service or MQ-producer. It is named the LeafSpan in the early versions of SkyWalking. For example, accessing DB through JDBC and reading Redis/Memcached are classified as an ExitSpan.\nContextCarrier In order to implement distributed tracing, cross-process tracing has to be bound, and the context must propagate across the process. This is where the ContextCarrier comes in.\nHere are the steps on how to use the ContextCarrier in an A-\u0026gt;B distributed call.\n Create a new and empty ContextCarrier on the client end. Create an ExitSpan by ContextManager#createExitSpan or use ContextManager#inject to initalize the ContextCarrier. Place all items of ContextCarrier into heads (e.g. HTTP HEAD), attachments (e.g. Dubbo RPC framework) or messages (e.g. Kafka). The ContextCarrier propagates to the server end through the service call. On the server end, obtain all items from the heads, attachments or messages. Create an EntrySpan by ContextManager#createEntrySpan or use ContextManager#extract to bind the client and server ends.  See the following examples, where we use the Apache HTTPComponent client plugin and Tomcat 7 server plugin:\n Using the Apache HTTPComponent client plugin on the client end  span = ContextManager.createExitSpan(\u0026#34;/span/operation/name\u0026#34;, contextCarrier, \u0026#34;ip:port\u0026#34;); CarrierItem next = contextCarrier.items(); while (next.hasNext()) { next = next.next(); httpRequest.setHeader(next.getHeadKey(), next.getHeadValue()); } Using the Tomcat 7 server plugin on the server end  ContextCarrier contextCarrier = new ContextCarrier(); CarrierItem next = contextCarrier.items(); while (next.hasNext()) { next = next.next(); next.setHeadValue(request.getHeader(next.getHeadKey())); } span = ContextManager.createEntrySpan(“/span/operation/name”, contextCarrier); ContextSnapshot Besides cross-process tracing, cross-thread tracing has to be supported as well. For instance, both async process (in-memory MQ) and batch process are common in Java. Cross-process and cross-thread tracing are very similar in that they both require propagating context, except that cross-thread tracing does not require serialization.\nHere are the three steps on cross-thread propagation:\n Use ContextManager#capture to get the ContextSnapshot object. Let the sub-thread access the ContextSnapshot through method arguments or being carried by existing arguments Use ContextManager#continued in sub-thread.  Core APIs ContextManager ContextManager provides all major and primary APIs.\n Create EntrySpan  public static AbstractSpan createEntrySpan(String endpointName, ContextCarrier carrier) Create EntrySpan according to the operation name (e.g. service name, uri) and ContextCarrier.\nCreate LocalSpan  public static AbstractSpan createLocalSpan(String endpointName) Create LocalSpan according to the operation name (e.g. full method signature).\nCreate ExitSpan  public static AbstractSpan createExitSpan(String endpointName, ContextCarrier carrier, String remotePeer) Create ExitSpan according to the operation name (e.g. service name, uri) and the new ContextCarrier and peer address (e.g. ip+port, hostname+port).\nAbstractSpan /** * Set the component id, which defines in {@link ComponentsDefine} * * @param component * @return the span for chaining. */ AbstractSpan setComponent(Component component); AbstractSpan setLayer(SpanLayer layer); /** * Set a key:value tag on the Span. * * @return this Span instance, for chaining */ AbstractSpan tag(String key, String value); /** * Record an exception event of the current walltime timestamp. * * @param t any subclass of {@link Throwable}, which occurs in this span. * @return the Span, for chaining */ AbstractSpan log(Throwable t); AbstractSpan errorOccurred(); /** * Record an event at a specific timestamp. * * @param timestamp The explicit timestamp for the log record. * @param event the events * @return the Span, for chaining */ AbstractSpan log(long timestamp, Map\u0026lt;String, ?\u0026gt; event); /** * Sets the string name for the logical operation this span represents. * * @return this Span instance, for chaining */ AbstractSpan setOperationName(String endpointName); Besides setting the operation name, tags and logs, two attributes must be set, namely the component and layer. This is especially important for the EntrySpan and ExitSpan.\nSpanLayer is the type of span. There are 5 values:\n UNKNOWN (default) DB RPC_FRAMEWORK (designed for the RPC framework, rather than an ordinary HTTP call) HTTP MQ  Component IDs are defined and reserved by the SkyWalking project. For extension of the component name/ID, please follow the OAP server Component library settings document.\nSpecial Span Tags All tags are available in the trace view. Meanwhile, in the OAP backend analysis, some special tags or tag combinations provide other advanced features.\nTag key http.status_code The value should be an integer. The response code of OAL entities corresponds to this value.\nTag keys db.statement and db.type. The value of db.statement should be a string that represents the database statement, such as SQL, or [No statement]/+span#operationName if the value is empty. When the exit span contains this tag, OAP samples the slow statements based on agent-analyzer/default/maxSlowSQLLength. The threshold of slow statement is defined in accordance with agent-analyzer/default/slowDBAccessThreshold. Check Slow Database Statement document of OAP server for details.\nExtension logic endpoint: Tag key x-le The logic endpoint is a concept that doesn\u0026rsquo;t represent a real RPC call, but requires the statistic. The value of x-le should be in JSON format. There are two options:\n Define a separated logic endpoint. Provide its own endpoint name, latency and status. Suitable for entry and local span.  { \u0026#34;name\u0026#34;: \u0026#34;GraphQL-service\u0026#34;, \u0026#34;latency\u0026#34;: 100, \u0026#34;status\u0026#34;: true } Declare the current local span representing a logic endpoint.  { \u0026#34;logic-span\u0026#34;: true } Virtual Database Relative Tags SkyWalking analysis Database(SQL-like) performance metrics through the following tags.\npublic static final StringTag DB_TYPE = new StringTag(3, \u0026#34;db.type\u0026#34;); public static final StringTag DB_STATEMENT = new StringTag(5, \u0026#34;db.statement\u0026#34;);  db.type records database type, such as sql, cassandra, Elasticsearch. db.statementrecords the sql statement of the database access.  Read backend\u0026rsquo;s virtual database doc for more details.\nVirtual Cache Relative Tags SkyWalking analysis cache performance related metrics through the following tags.\npublic static final StringTag CACHE_TYPE = new StringTag(15, \u0026#34;cache.type\u0026#34;); public static final StringTag CACHE_CMD = new StringTag(17, \u0026#34;cache.cmd\u0026#34;); public static final StringTag CACHE_OP = new StringTag(16, \u0026#34;cache.op\u0026#34;); public static final StringTag CACHE_KEY = new StringTag(18, \u0026#34;cache.key\u0026#34;);  cache.type indicates the cache type , usually it\u0026rsquo;s official name of cache (e.g. Redis) cache.cmd indicates the cache command that would be sent to cache server (e.g. setnx) cache.op indicates the command is used for write or read operation , usually the value is converting from command cache.key indicates the cache key that would be sent to cache server , this tag maybe null , as string type key would be collected usually.  In order to decide which op should be converted to flexibly , It\u0026rsquo;s better that providing config property . Reference Jedis-4.x-plugin\nVirtual Message Queue (MQ) Relative Tags SkyWalking analysis MQ performance related metrics through the following tags.\npublic static final StringTag MQ_QUEUE = new StringTag(7, \u0026#34;mq.queue\u0026#34;); public static final StringTag MQ_TOPIC = new StringTag(9, \u0026#34;mq.topic\u0026#34;); public static final StringTag TRANSMISSION_LATENCY = new StringTag(15, \u0026#34;transmission.latency\u0026#34;, false);  mq.queue indicates MQ queue name mq.topic indicates MQ topic name , It\u0026rsquo;s optional as some MQ don\u0026rsquo;t hava concept of topic transmission.latency The transmission latency from consumer to producer. Usually you needn\u0026rsquo;t to record this tag manually, instead to call contextCarrier.extensionInjector().injectSendingTimestamp(); to record tag sendingTimestamp on producer side , and SkyWalking would record this tag on consumer side if sw8-x context carrier(from producer side) contains sendingTimestamp  Notice , you should set peer at both sides(producer and consumer). And the value of peer should represent the MQ server cluster.\nAdvanced APIs Async Span APIs There is a set of advanced APIs in Span which is specifically designed for async use cases. When tags, logs, and attributes (including end time) of the span need to be set in another thread, you should use these APIs.\n/** * The span finish at current tracing context, but the current span is still alive, until {@link #asyncFinish} * called. * * This method must be called\u0026lt;br/\u0026gt; * 1. In original thread(tracing context). * 2. Current span is active span. * * During alive, tags, logs and attributes of the span could be changed, in any thread. * * The execution times of {@link #prepareForAsync} and {@link #asyncFinish()} must match. * * @return the current span */ AbstractSpan prepareForAsync(); /** * Notify the span, it could be finished. * * The execution times of {@link #prepareForAsync} and {@link #asyncFinish()} must match. * * @return the current span */ AbstractSpan asyncFinish();  Call #prepareForAsync in the original context. Run ContextManager#stopSpan in the original context when your job in the current thread is complete. Propagate the span to any other thread. Once the above steps are all set, call #asyncFinish in any thread. When #prepareForAsync is complete for all spans, the tracing context will be finished and will report to the backend (based on the count of API execution).  Develop a plugin Abstract The basic method to trace is to intercept a Java method, by using byte code manipulation tech and AOP concept. SkyWalking has packaged the byte code manipulation tech and tracing context propagation, so you simply have to define the intercept point (a.k.a. aspect pointcut in Spring).\nIntercept SkyWalking provides two common definitions to intercept constructor, instance method and class method.\nv1 APIs  Extend ClassInstanceMethodsEnhancePluginDefine to define constructor intercept points and instance method intercept points. Extend ClassStaticMethodsEnhancePluginDefine to define class method intercept points.  Of course, you can extend ClassEnhancePluginDefine to set all intercept points, although it is uncommon to do so.\nv2 APIs v2 APIs provide an enhanced interceptor, which could propagate context through MIC(MethodInvocationContext).\n Extend ClassInstanceMethodsEnhancePluginDefineV2 to define constructor intercept points and instance method intercept points. Extend ClassStaticMethodsEnhancePluginDefineV2 to define class method intercept points.  Of course, you can extend ClassEnhancePluginDefineV2 to set all intercept points, although it is uncommon to do so.\nImplement plugin See the following demonstration on how to implement a plugin by extending ClassInstanceMethodsEnhancePluginDefine.\n Define the target class name.  protected abstract ClassMatch enhanceClass(); ClassMatch represents how to match the target classes. There are 4 ways:\n byName: Based on the full class names (package name + . + class name). byClassAnnotationMatch: Depends on whether there are certain annotations in the target classes. byMethodAnnotationMatch: Depends on whether there are certain annotations in the methods of the target classes. byHierarchyMatch: Based on the parent classes or interfaces of the target classes.  Attention:\n Never use ThirdPartyClass.class in the instrumentation definitions, such as takesArguments(ThirdPartyClass.class), or byName(ThirdPartyClass.class.getName()), because of the fact that ThirdPartyClass dose not necessarily exist in the target application and this will break the agent; we have import checks to assist in checking this in CI, but it doesn\u0026rsquo;t cover all scenarios of this limitation, so never try to work around this limitation by something like using full-qualified-class-name (FQCN), i.e. takesArguments(full.qualified.ThirdPartyClass.class) and byName(full.qualified.ThirdPartyClass.class.getName()) will pass the CI check, but are still invalid in the agent codes. Therefore, Use Full Qualified Class Name String Literature Instead. Even if you are perfectly sure that the class to be intercepted exists in the target application (such as JDK classes), still, do not use *.class.getName() to get the class String name. We recommend you to use a literal string. This is to avoid ClassLoader issues. by*AnnotationMatch does not support inherited annotations. We do not recommend using byHierarchyMatch unless necessary. Using it may trigger the interception of many unexcepted methods, which would cause performance issues.  Example:\n@Override protected ClassMatch enhanceClassName() { return byName(\u0026#34;org.apache.catalina.core.StandardEngineValve\u0026#34;); } Define an instance method intercept point.  public InstanceMethodsInterceptPoint[] getInstanceMethodsInterceptPoints(); public interface InstanceMethodsInterceptPoint { /** * class instance methods matcher. * * @return methods matcher */ ElementMatcher\u0026lt;MethodDescription\u0026gt; getMethodsMatcher(); /** * @return represents a class name, the class instance must instanceof InstanceMethodsAroundInterceptor. */ String getMethodsInterceptor(); boolean isOverrideArgs(); } You may also use Matcher to set the target methods. Return true in isOverrideArgs, if you want to change the argument ref in interceptor. Please refer to bytebuddy for details of defining ElementMatcher.\nIn Skywalking, we provide 3 classes to facilitate ElementMatcher definition:\n AnnotationTypeNameMatch: Check on whether there is a certain annotation in the target method. ReturnTypeNameMatch: Check the return type name (package name + . + class name) of the target method. ArgumentTypeNameMatch: Check on the argument index and the type name (package name + . + class name) of the target method.  Attention:\n In case of using ReturnTypeNameMatch and ArgumentTypeNameMatch, use [Lxxx; (Java file format defined in JVM Specification) to define an Array type. For example, you should write [Ljava.lang.String; for java.lang.String[].  The following sections will tell you how to implement the interceptor.\nAdd plugin definition into the skywalking-plugin.def file.  tomcat-7.x/8.x=TomcatInstrumentation  Set up witnessClasses and/or witnessMethods if the instrumentation has to be activated in specific versions.\nExample:\n// The plugin is activated only when the foo.Bar class exists. @Override protected String[] witnessClasses() { return new String[] { \u0026#34;foo.Bar\u0026#34; }; } // The plugin is activated only when the foo.Bar#hello method exists. @Override protected List\u0026lt;WitnessMethod\u0026gt; witnessMethods() { List\u0026lt;WitnessMethod\u0026gt; witnessMethodList = new ArrayList\u0026lt;\u0026gt;(); WitnessMethod witnessMethod = new WitnessMethod(\u0026#34;foo.Bar\u0026#34;, ElementMatchers.named(\u0026#34;hello\u0026#34;)); witnessMethodList.add(witnessMethod); return witnessMethodList; } For more examples, see WitnessTest.java\n  Implement an interceptor As an interceptor for an instance method, it has to implement org.apache.skywalking.apm.agent.core.plugin.interceptor.enhance.InstanceMethodsAroundInterceptor\n/** * A interceptor, which intercept method\u0026#39;s invocation. The target methods will be defined in {@link * ClassEnhancePluginDefine}\u0026#39;s subclass, most likely in {@link ClassInstanceMethodsEnhancePluginDefine} */ public interface InstanceMethodsAroundInterceptor { /** * called before target method invocation. * * @param result change this result, if you want to truncate the method. * @throws Throwable */ void beforeMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, MethodInterceptResult result) throws Throwable; /** * called after target method invocation. Even method\u0026#39;s invocation triggers an exception. * * @param ret the method\u0026#39;s original return value. * @return the method\u0026#39;s actual return value. * @throws Throwable */ Object afterMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Object ret) throws Throwable; /** * called when occur exception. * * @param t the exception occur. */ void handleMethodException(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Throwable t); } Use the core APIs before and after calling the method, as well as during exception handling.\nV2 APIs The interceptor of V2 API uses MethodInvocationContext context to replace the MethodInterceptResult result in the beforeMethod, and be added as a new parameter in afterMethod and handleMethodException.\nMethodInvocationContext context is only shared in one time execution, and safe to use when face concurrency execution.\n/** * A v2 interceptor, which intercept method\u0026#39;s invocation. The target methods will be defined in {@link * ClassEnhancePluginDefineV2}\u0026#39;s subclass, most likely in {@link ClassInstanceMethodsEnhancePluginDefine} */ public interface InstanceMethodsAroundInterceptorV2 { /** * called before target method invocation. * * @param context the method invocation context including result context. */ void beforeMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, MethodInvocationContext context) throws Throwable; /** * called after target method invocation. Even method\u0026#39;s invocation triggers an exception. * * @param ret the method\u0026#39;s original return value. May be null if the method triggers an exception. * @return the method\u0026#39;s actual return value. */ Object afterMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Object ret, MethodInvocationContext context) throws Throwable; /** * called when occur exception. * * @param t the exception occur. */ void handleMethodException(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Throwable t, MethodInvocationContext context); } Bootstrap class instrumentation. SkyWalking has packaged the bootstrap instrumentation in the agent core. You can easily implement it by declaring it in the instrumentation definition.\nOverride the public boolean isBootstrapInstrumentation() and return true. Such as\npublic class URLInstrumentation extends ClassEnhancePluginDefine { private static String CLASS_NAME = \u0026#34;java.net.URL\u0026#34;; @Override protected ClassMatch enhanceClass() { return byName(CLASS_NAME); } @Override public ConstructorInterceptPoint[] getConstructorsInterceptPoints() { return new ConstructorInterceptPoint[] { new ConstructorInterceptPoint() { @Override public ElementMatcher\u0026lt;MethodDescription\u0026gt; getConstructorMatcher() { return any(); } @Override public String getConstructorInterceptor() { return \u0026#34;org.apache.skywalking.apm.plugin.jre.httpurlconnection.Interceptor2\u0026#34;; } } }; } @Override public InstanceMethodsInterceptPoint[] getInstanceMethodsInterceptPoints() { return new InstanceMethodsInterceptPoint[0]; } @Override public StaticMethodsInterceptPoint[] getStaticMethodsInterceptPoints() { return new StaticMethodsInterceptPoint[0]; } @Override public boolean isBootstrapInstrumentation() { return true; } } ClassEnhancePluginDefineV2 is provided in v2 APIs, #isBootstrapInstrumentation works too.\nNOTE: Bootstrap instrumentation should be used only where necessary. During its actual execution, it mostly affects the JRE core(rt.jar). Defining it other than where necessary could lead to unexpected results or side effects.\nProvide custom config for the plugin The config could provide different behaviours based on the configurations. The SkyWalking plugin mechanism provides the configuration injection and initialization system in the agent core.\nEvery plugin could declare one or more classes to represent the config by using @PluginConfig annotation. The agent core could initialize this class' static field through System environments, System properties, and agent.config static file.\nThe #root() method in the @PluginConfig annotation requires declaring the root class for the initialization process. Typically, SkyWalking prefers to use nested inner static classes for the hierarchy of the configuration. We recommend using Plugin/plugin-name/config-key as the nested classes structure of the config class.\nNOTE: because of the Java ClassLoader mechanism, the @PluginConfig annotation should be added on the real class used in the interceptor codes.\nIn the following example, @PluginConfig(root = SpringMVCPluginConfig.class) indicates that initialization should start with using SpringMVCPluginConfig as the root. Then, the config key of the attribute USE_QUALIFIED_NAME_AS_ENDPOINT_NAME should be plugin.springmvc.use_qualified_name_as_endpoint_name.\npublic class SpringMVCPluginConfig { public static class Plugin { // NOTE, if move this annotation on the `Plugin` or `SpringMVCPluginConfig` class, it no longer has any effect.  @PluginConfig(root = SpringMVCPluginConfig.class) public static class SpringMVC { /** * If true, the fully qualified method name will be used as the endpoint name instead of the request URL, * default is false. */ public static boolean USE_QUALIFIED_NAME_AS_ENDPOINT_NAME = false; /** * This config item controls that whether the SpringMVC plugin should collect the parameters of the * request. */ public static boolean COLLECT_HTTP_PARAMS = false; } @PluginConfig(root = SpringMVCPluginConfig.class) public static class Http { /** * When either {@link Plugin.SpringMVC#COLLECT_HTTP_PARAMS} is enabled, how many characters to keep and send * to the OAP backend, use negative values to keep and send the complete parameters, NB. this config item is * added for the sake of performance */ public static int HTTP_PARAMS_LENGTH_THRESHOLD = 1024; } } } Meter Plugin Java agent plugin could use meter APIs to collect metrics for backend analysis.\n Counter API represents a single monotonically increasing counter which automatically collects data and reports to the backend. import org.apache.skywalking.apm.agent.core.meter.MeterFactory; Counter counter = MeterFactory.counter(meterName).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).mode(Counter.Mode.INCREMENT).build(); counter.increment(1d);    MeterFactory.counter creates a new counter builder with the meter name. Counter.Builder.tag(String key, String value) marks a tag key/value pair. Counter.Builder.mode(Counter.Mode mode) changes the counter mode. RATE mode means the reporting rate to the backend. Counter.Builder.build() builds a new Counter which is collected and reported to the backend. Counter.increment(double count) increment counts to the Counter. It could be a positive value.   Gauge API represents a single numerical value.  import org.apache.skywalking.apm.agent.core.meter.MeterFactory; ThreadPoolExecutor threadPool = ...; Gauge gauge = MeterFactory.gauge(meterName, () -\u0026gt; threadPool.getActiveCount()).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).build();  MeterFactory.gauge(String name, Supplier\u0026lt;Double\u0026gt; getter) creates a new gauge builder with the meter name and supplier function. This function must return a double value. Gauge.Builder.tag(String key, String value) marks a tag key/value pair. Gauge.Builder.build() builds a new Gauge which is collected and reported to the backend.   Histogram API represents a summary sample observations with customized buckets.  import org.apache.skywalking.apm.agent.core.meter.MeterFactory; Histogram histogram = MeterFactory.histogram(\u0026#34;test\u0026#34;).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).steps(Arrays.asList(1, 5, 10)).minValue(0).build(); histogram.addValue(3);  MeterFactory.histogram(String name) creates a new histogram builder with the meter name. Histogram.Builder.tag(String key, String value) marks a tag key/value pair. Histogram.Builder.steps(List\u0026lt;Double\u0026gt; steps) sets up the max values of every histogram buckets. Histogram.Builder.minValue(double value) sets up the minimal value of this histogram. Default is 0. Histogram.Builder.build() builds a new Histogram which is collected and reported to the backend. Histogram.addValue(double value) adds value into the histogram, and automatically analyzes what bucket count needs to be incremented. Rule: count into [step1, step2).  Plugin Test Tool The Apache SkyWalking Agent Test Tool Suite is an incredibly useful test tool suite that is available in a wide variety of agent languages. It includes the mock collector and validator. The mock collector is a SkyWalking receiver, like the OAP server.\nYou could learn how to use this tool to test the plugin in this doc. This is a must if you want to contribute plugins to the SkyWalking official repo.\nContribute plugins to the Apache SkyWalking repository We welcome everyone to contribute their plugins.\nPlease follow these steps:\n Submit an issue for your plugin, including any supported versions. Create sub modules under apm-sniffer/apm-sdk-plugin or apm-sniffer/optional-plugins, and the name should include supported library name and versions. Follow this guide to develop. Make sure comments and test cases are provided. Develop and test. Provide the automatic test cases. Learn how to write the plugin test case from this doc Send a pull request and ask for review. The plugin committers will approve your plugins, plugin CI-with-IT, e2e, and the plugin tests will be passed. The plugin is accepted by SkyWalking.  ","excerpt":"Plugin Development Guide This document describes how to understand, develop and contribute a plugin. …","ref":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/java-plugin-development-guide/","title":"Plugin Development Guide"},{"body":"Plugin Development Guide This document describes how to understand, develop and contribute a plugin.\nThere are 2 kinds of plugin:\n Tracing plugin. Follow the distributed tracing concept to collect spans with tags and logs. Meter plugin. Collect numeric metrics in Counter, Gauge, and Histogram formats.  We also provide the plugin test tool to verify the data collected and reported by the plugin. If you plan to contribute any plugin to our main repo, the data would be verified by this tool too.\nTracing plugin Concepts Span The span is an important and recognized concept in the distributed tracing system. Learn about the span from the Google Dapper Paper and OpenTracing\nSkyWalking has supported OpenTracing and OpenTracing-Java API since 2017. Our concepts of the span are similar to that of the Google Dapper Paper and OpenTracing. We have also extended the span.\nThere are three types of span:\n1.1 EntrySpan The EntrySpan represents a service provider. It is also an endpoint on the server end. As an APM system, our target is the application servers. Therefore, almost all the services and MQ-consumers are EntrySpan.\n1.2 LocalSpan The LocalSpan represents a normal Java method that does not concern remote services. It is neither a MQ producer/consumer nor a service (e.g. HTTP service) provider/consumer.\n1.3 ExitSpan The ExitSpan represents a client of service or MQ-producer. It is named the LeafSpan in the early versions of SkyWalking. For example, accessing DB through JDBC and reading Redis/Memcached are classified as an ExitSpan.\nContextCarrier In order to implement distributed tracing, cross-process tracing has to be bound, and the context must propagate across the process. This is where the ContextCarrier comes in.\nHere are the steps on how to use the ContextCarrier in an A-\u0026gt;B distributed call.\n Create a new and empty ContextCarrier on the client end. Create an ExitSpan by ContextManager#createExitSpan or use ContextManager#inject to initalize the ContextCarrier. Place all items of ContextCarrier into heads (e.g. HTTP HEAD), attachments (e.g. Dubbo RPC framework) or messages (e.g. Kafka). The ContextCarrier propagates to the server end through the service call. On the server end, obtain all items from the heads, attachments or messages. Create an EntrySpan by ContextManager#createEntrySpan or use ContextManager#extract to bind the client and server ends.  See the following examples, where we use the Apache HTTPComponent client plugin and Tomcat 7 server plugin:\n Using the Apache HTTPComponent client plugin on the client end  span = ContextManager.createExitSpan(\u0026#34;/span/operation/name\u0026#34;, contextCarrier, \u0026#34;ip:port\u0026#34;); CarrierItem next = contextCarrier.items(); while (next.hasNext()) { next = next.next(); httpRequest.setHeader(next.getHeadKey(), next.getHeadValue()); } Using the Tomcat 7 server plugin on the server end  ContextCarrier contextCarrier = new ContextCarrier(); CarrierItem next = contextCarrier.items(); while (next.hasNext()) { next = next.next(); next.setHeadValue(request.getHeader(next.getHeadKey())); } span = ContextManager.createEntrySpan(“/span/operation/name”, contextCarrier); ContextSnapshot Besides cross-process tracing, cross-thread tracing has to be supported as well. For instance, both async process (in-memory MQ) and batch process are common in Java. Cross-process and cross-thread tracing are very similar in that they both require propagating context, except that cross-thread tracing does not require serialization.\nHere are the three steps on cross-thread propagation:\n Use ContextManager#capture to get the ContextSnapshot object. Let the sub-thread access the ContextSnapshot through method arguments or being carried by existing arguments Use ContextManager#continued in sub-thread.  Core APIs ContextManager ContextManager provides all major and primary APIs.\n Create EntrySpan  public static AbstractSpan createEntrySpan(String endpointName, ContextCarrier carrier) Create EntrySpan according to the operation name (e.g. service name, uri) and ContextCarrier.\nCreate LocalSpan  public static AbstractSpan createLocalSpan(String endpointName) Create LocalSpan according to the operation name (e.g. full method signature).\nCreate ExitSpan  public static AbstractSpan createExitSpan(String endpointName, ContextCarrier carrier, String remotePeer) Create ExitSpan according to the operation name (e.g. service name, uri) and the new ContextCarrier and peer address (e.g. ip+port, hostname+port).\nAbstractSpan /** * Set the component id, which defines in {@link ComponentsDefine} * * @param component * @return the span for chaining. */ AbstractSpan setComponent(Component component); AbstractSpan setLayer(SpanLayer layer); /** * Set a key:value tag on the Span. * * @return this Span instance, for chaining */ AbstractSpan tag(String key, String value); /** * Record an exception event of the current walltime timestamp. * * @param t any subclass of {@link Throwable}, which occurs in this span. * @return the Span, for chaining */ AbstractSpan log(Throwable t); AbstractSpan errorOccurred(); /** * Record an event at a specific timestamp. * * @param timestamp The explicit timestamp for the log record. * @param event the events * @return the Span, for chaining */ AbstractSpan log(long timestamp, Map\u0026lt;String, ?\u0026gt; event); /** * Sets the string name for the logical operation this span represents. * * @return this Span instance, for chaining */ AbstractSpan setOperationName(String endpointName); Besides setting the operation name, tags and logs, two attributes must be set, namely the component and layer. This is especially important for the EntrySpan and ExitSpan.\nSpanLayer is the type of span. There are 5 values:\n UNKNOWN (default) DB RPC_FRAMEWORK (designed for the RPC framework, rather than an ordinary HTTP call) HTTP MQ  Component IDs are defined and reserved by the SkyWalking project. For extension of the component name/ID, please follow the OAP server Component library settings document.\nSpecial Span Tags All tags are available in the trace view. Meanwhile, in the OAP backend analysis, some special tags or tag combinations provide other advanced features.\nTag key http.status_code The value should be an integer. The response code of OAL entities corresponds to this value.\nTag keys db.statement and db.type. The value of db.statement should be a string that represents the database statement, such as SQL, or [No statement]/+span#operationName if the value is empty. When the exit span contains this tag, OAP samples the slow statements based on agent-analyzer/default/maxSlowSQLLength. The threshold of slow statement is defined in accordance with agent-analyzer/default/slowDBAccessThreshold. Check Slow Database Statement document of OAP server for details.\nExtension logic endpoint: Tag key x-le The logic endpoint is a concept that doesn\u0026rsquo;t represent a real RPC call, but requires the statistic. The value of x-le should be in JSON format. There are two options:\n Define a separated logic endpoint. Provide its own endpoint name, latency and status. Suitable for entry and local span.  { \u0026#34;name\u0026#34;: \u0026#34;GraphQL-service\u0026#34;, \u0026#34;latency\u0026#34;: 100, \u0026#34;status\u0026#34;: true } Declare the current local span representing a logic endpoint.  { \u0026#34;logic-span\u0026#34;: true } Virtual Database Relative Tags SkyWalking analysis Database(SQL-like) performance metrics through the following tags.\npublic static final StringTag DB_TYPE = new StringTag(3, \u0026#34;db.type\u0026#34;); public static final StringTag DB_STATEMENT = new StringTag(5, \u0026#34;db.statement\u0026#34;);  db.type records database type, such as sql, cassandra, Elasticsearch. db.statementrecords the sql statement of the database access.  Read backend\u0026rsquo;s virtual database doc for more details.\nVirtual Cache Relative Tags SkyWalking analysis cache performance related metrics through the following tags.\npublic static final StringTag CACHE_TYPE = new StringTag(15, \u0026#34;cache.type\u0026#34;); public static final StringTag CACHE_CMD = new StringTag(17, \u0026#34;cache.cmd\u0026#34;); public static final StringTag CACHE_OP = new StringTag(16, \u0026#34;cache.op\u0026#34;); public static final StringTag CACHE_KEY = new StringTag(18, \u0026#34;cache.key\u0026#34;);  cache.type indicates the cache type , usually it\u0026rsquo;s official name of cache (e.g. Redis) cache.cmd indicates the cache command that would be sent to cache server (e.g. setnx) cache.op indicates the command is used for write or read operation , usually the value is converting from command cache.key indicates the cache key that would be sent to cache server , this tag maybe null , as string type key would be collected usually.  In order to decide which op should be converted to flexibly , It\u0026rsquo;s better that providing config property . Reference Jedis-4.x-plugin\nVirtual Message Queue (MQ) Relative Tags SkyWalking analysis MQ performance related metrics through the following tags.\npublic static final StringTag MQ_QUEUE = new StringTag(7, \u0026#34;mq.queue\u0026#34;); public static final StringTag MQ_TOPIC = new StringTag(9, \u0026#34;mq.topic\u0026#34;); public static final StringTag TRANSMISSION_LATENCY = new StringTag(15, \u0026#34;transmission.latency\u0026#34;, false);  mq.queue indicates MQ queue name mq.topic indicates MQ topic name , It\u0026rsquo;s optional as some MQ don\u0026rsquo;t hava concept of topic transmission.latency The transmission latency from consumer to producer. Usually you needn\u0026rsquo;t to record this tag manually, instead to call contextCarrier.extensionInjector().injectSendingTimestamp(); to record tag sendingTimestamp on producer side , and SkyWalking would record this tag on consumer side if sw8-x context carrier(from producer side) contains sendingTimestamp  Notice , you should set peer at both sides(producer and consumer). And the value of peer should represent the MQ server cluster.\nAdvanced APIs Async Span APIs There is a set of advanced APIs in Span which is specifically designed for async use cases. When tags, logs, and attributes (including end time) of the span need to be set in another thread, you should use these APIs.\n/** * The span finish at current tracing context, but the current span is still alive, until {@link #asyncFinish} * called. * * This method must be called\u0026lt;br/\u0026gt; * 1. In original thread(tracing context). * 2. Current span is active span. * * During alive, tags, logs and attributes of the span could be changed, in any thread. * * The execution times of {@link #prepareForAsync} and {@link #asyncFinish()} must match. * * @return the current span */ AbstractSpan prepareForAsync(); /** * Notify the span, it could be finished. * * The execution times of {@link #prepareForAsync} and {@link #asyncFinish()} must match. * * @return the current span */ AbstractSpan asyncFinish();  Call #prepareForAsync in the original context. Run ContextManager#stopSpan in the original context when your job in the current thread is complete. Propagate the span to any other thread. Once the above steps are all set, call #asyncFinish in any thread. When #prepareForAsync is complete for all spans, the tracing context will be finished and will report to the backend (based on the count of API execution).  Develop a plugin Abstract The basic method to trace is to intercept a Java method, by using byte code manipulation tech and AOP concept. SkyWalking has packaged the byte code manipulation tech and tracing context propagation, so you simply have to define the intercept point (a.k.a. aspect pointcut in Spring).\nIntercept SkyWalking provides two common definitions to intercept constructor, instance method and class method.\nv1 APIs  Extend ClassInstanceMethodsEnhancePluginDefine to define constructor intercept points and instance method intercept points. Extend ClassStaticMethodsEnhancePluginDefine to define class method intercept points.  Of course, you can extend ClassEnhancePluginDefine to set all intercept points, although it is uncommon to do so.\nv2 APIs v2 APIs provide an enhanced interceptor, which could propagate context through MIC(MethodInvocationContext).\n Extend ClassInstanceMethodsEnhancePluginDefineV2 to define constructor intercept points and instance method intercept points. Extend ClassStaticMethodsEnhancePluginDefineV2 to define class method intercept points.  Of course, you can extend ClassEnhancePluginDefineV2 to set all intercept points, although it is uncommon to do so.\nImplement plugin See the following demonstration on how to implement a plugin by extending ClassInstanceMethodsEnhancePluginDefine.\n Define the target class name.  protected abstract ClassMatch enhanceClass(); ClassMatch represents how to match the target classes. There are 4 ways:\n byName: Based on the full class names (package name + . + class name). byClassAnnotationMatch: Depends on whether there are certain annotations in the target classes. byMethodAnnotationMatch: Depends on whether there are certain annotations in the methods of the target classes. byHierarchyMatch: Based on the parent classes or interfaces of the target classes.  Attention:\n Never use ThirdPartyClass.class in the instrumentation definitions, such as takesArguments(ThirdPartyClass.class), or byName(ThirdPartyClass.class.getName()), because of the fact that ThirdPartyClass dose not necessarily exist in the target application and this will break the agent; we have import checks to assist in checking this in CI, but it doesn\u0026rsquo;t cover all scenarios of this limitation, so never try to work around this limitation by something like using full-qualified-class-name (FQCN), i.e. takesArguments(full.qualified.ThirdPartyClass.class) and byName(full.qualified.ThirdPartyClass.class.getName()) will pass the CI check, but are still invalid in the agent codes. Therefore, Use Full Qualified Class Name String Literature Instead. Even if you are perfectly sure that the class to be intercepted exists in the target application (such as JDK classes), still, do not use *.class.getName() to get the class String name. We recommend you to use a literal string. This is to avoid ClassLoader issues. by*AnnotationMatch does not support inherited annotations. We do not recommend using byHierarchyMatch unless necessary. Using it may trigger the interception of many unexcepted methods, which would cause performance issues.  Example:\n@Override protected ClassMatch enhanceClassName() { return byName(\u0026#34;org.apache.catalina.core.StandardEngineValve\u0026#34;); } Define an instance method intercept point.  public InstanceMethodsInterceptPoint[] getInstanceMethodsInterceptPoints(); public interface InstanceMethodsInterceptPoint { /** * class instance methods matcher. * * @return methods matcher */ ElementMatcher\u0026lt;MethodDescription\u0026gt; getMethodsMatcher(); /** * @return represents a class name, the class instance must instanceof InstanceMethodsAroundInterceptor. */ String getMethodsInterceptor(); boolean isOverrideArgs(); } You may also use Matcher to set the target methods. Return true in isOverrideArgs, if you want to change the argument ref in interceptor. Please refer to bytebuddy for details of defining ElementMatcher.\nIn Skywalking, we provide 3 classes to facilitate ElementMatcher definition:\n AnnotationTypeNameMatch: Check on whether there is a certain annotation in the target method. ReturnTypeNameMatch: Check the return type name (package name + . + class name) of the target method. ArgumentTypeNameMatch: Check on the argument index and the type name (package name + . + class name) of the target method.  Attention:\n In case of using ReturnTypeNameMatch and ArgumentTypeNameMatch, use [Lxxx; (Java file format defined in JVM Specification) to define an Array type. For example, you should write [Ljava.lang.String; for java.lang.String[].  The following sections will tell you how to implement the interceptor.\nAdd plugin definition into the skywalking-plugin.def file.  tomcat-7.x/8.x=TomcatInstrumentation  Set up witnessClasses and/or witnessMethods if the instrumentation has to be activated in specific versions.\nExample:\n// The plugin is activated only when the foo.Bar class exists. @Override protected String[] witnessClasses() { return new String[] { \u0026#34;foo.Bar\u0026#34; }; } // The plugin is activated only when the foo.Bar#hello method exists. @Override protected List\u0026lt;WitnessMethod\u0026gt; witnessMethods() { List\u0026lt;WitnessMethod\u0026gt; witnessMethodList = new ArrayList\u0026lt;\u0026gt;(); WitnessMethod witnessMethod = new WitnessMethod(\u0026#34;foo.Bar\u0026#34;, ElementMatchers.named(\u0026#34;hello\u0026#34;)); witnessMethodList.add(witnessMethod); return witnessMethodList; } For more examples, see WitnessTest.java\n  Implement an interceptor As an interceptor for an instance method, it has to implement org.apache.skywalking.apm.agent.core.plugin.interceptor.enhance.InstanceMethodsAroundInterceptor\n/** * A interceptor, which intercept method\u0026#39;s invocation. The target methods will be defined in {@link * ClassEnhancePluginDefine}\u0026#39;s subclass, most likely in {@link ClassInstanceMethodsEnhancePluginDefine} */ public interface InstanceMethodsAroundInterceptor { /** * called before target method invocation. * * @param result change this result, if you want to truncate the method. * @throws Throwable */ void beforeMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, MethodInterceptResult result) throws Throwable; /** * called after target method invocation. Even method\u0026#39;s invocation triggers an exception. * * @param ret the method\u0026#39;s original return value. * @return the method\u0026#39;s actual return value. * @throws Throwable */ Object afterMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Object ret) throws Throwable; /** * called when occur exception. * * @param t the exception occur. */ void handleMethodException(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Throwable t); } Use the core APIs before and after calling the method, as well as during exception handling.\nV2 APIs The interceptor of V2 API uses MethodInvocationContext context to replace the MethodInterceptResult result in the beforeMethod, and be added as a new parameter in afterMethod and handleMethodException.\nMethodInvocationContext context is only shared in one time execution, and safe to use when face concurrency execution.\n/** * A v2 interceptor, which intercept method\u0026#39;s invocation. The target methods will be defined in {@link * ClassEnhancePluginDefineV2}\u0026#39;s subclass, most likely in {@link ClassInstanceMethodsEnhancePluginDefine} */ public interface InstanceMethodsAroundInterceptorV2 { /** * called before target method invocation. * * @param context the method invocation context including result context. */ void beforeMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, MethodInvocationContext context) throws Throwable; /** * called after target method invocation. Even method\u0026#39;s invocation triggers an exception. * * @param ret the method\u0026#39;s original return value. May be null if the method triggers an exception. * @return the method\u0026#39;s actual return value. */ Object afterMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Object ret, MethodInvocationContext context) throws Throwable; /** * called when occur exception. * * @param t the exception occur. */ void handleMethodException(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Throwable t, MethodInvocationContext context); } Bootstrap class instrumentation. SkyWalking has packaged the bootstrap instrumentation in the agent core. You can easily implement it by declaring it in the instrumentation definition.\nOverride the public boolean isBootstrapInstrumentation() and return true. Such as\npublic class URLInstrumentation extends ClassEnhancePluginDefine { private static String CLASS_NAME = \u0026#34;java.net.URL\u0026#34;; @Override protected ClassMatch enhanceClass() { return byName(CLASS_NAME); } @Override public ConstructorInterceptPoint[] getConstructorsInterceptPoints() { return new ConstructorInterceptPoint[] { new ConstructorInterceptPoint() { @Override public ElementMatcher\u0026lt;MethodDescription\u0026gt; getConstructorMatcher() { return any(); } @Override public String getConstructorInterceptor() { return \u0026#34;org.apache.skywalking.apm.plugin.jre.httpurlconnection.Interceptor2\u0026#34;; } } }; } @Override public InstanceMethodsInterceptPoint[] getInstanceMethodsInterceptPoints() { return new InstanceMethodsInterceptPoint[0]; } @Override public StaticMethodsInterceptPoint[] getStaticMethodsInterceptPoints() { return new StaticMethodsInterceptPoint[0]; } @Override public boolean isBootstrapInstrumentation() { return true; } } ClassEnhancePluginDefineV2 is provided in v2 APIs, #isBootstrapInstrumentation works too.\nNOTE: Bootstrap instrumentation should be used only where necessary. During its actual execution, it mostly affects the JRE core(rt.jar). Defining it other than where necessary could lead to unexpected results or side effects.\nProvide custom config for the plugin The config could provide different behaviours based on the configurations. The SkyWalking plugin mechanism provides the configuration injection and initialization system in the agent core.\nEvery plugin could declare one or more classes to represent the config by using @PluginConfig annotation. The agent core could initialize this class' static field through System environments, System properties, and agent.config static file.\nThe #root() method in the @PluginConfig annotation requires declaring the root class for the initialization process. Typically, SkyWalking prefers to use nested inner static classes for the hierarchy of the configuration. We recommend using Plugin/plugin-name/config-key as the nested classes structure of the config class.\nNOTE: because of the Java ClassLoader mechanism, the @PluginConfig annotation should be added on the real class used in the interceptor codes.\nIn the following example, @PluginConfig(root = SpringMVCPluginConfig.class) indicates that initialization should start with using SpringMVCPluginConfig as the root. Then, the config key of the attribute USE_QUALIFIED_NAME_AS_ENDPOINT_NAME should be plugin.springmvc.use_qualified_name_as_endpoint_name.\npublic class SpringMVCPluginConfig { public static class Plugin { // NOTE, if move this annotation on the `Plugin` or `SpringMVCPluginConfig` class, it no longer has any effect.  @PluginConfig(root = SpringMVCPluginConfig.class) public static class SpringMVC { /** * If true, the fully qualified method name will be used as the endpoint name instead of the request URL, * default is false. */ public static boolean USE_QUALIFIED_NAME_AS_ENDPOINT_NAME = false; /** * This config item controls that whether the SpringMVC plugin should collect the parameters of the * request. */ public static boolean COLLECT_HTTP_PARAMS = false; } @PluginConfig(root = SpringMVCPluginConfig.class) public static class Http { /** * When either {@link Plugin.SpringMVC#COLLECT_HTTP_PARAMS} is enabled, how many characters to keep and send * to the OAP backend, use negative values to keep and send the complete parameters, NB. this config item is * added for the sake of performance */ public static int HTTP_PARAMS_LENGTH_THRESHOLD = 1024; } } } Meter Plugin Java agent plugin could use meter APIs to collect metrics for backend analysis.\n Counter API represents a single monotonically increasing counter which automatically collects data and reports to the backend. import org.apache.skywalking.apm.agent.core.meter.MeterFactory; Counter counter = MeterFactory.counter(meterName).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).mode(Counter.Mode.INCREMENT).build(); counter.increment(1d);    MeterFactory.counter creates a new counter builder with the meter name. Counter.Builder.tag(String key, String value) marks a tag key/value pair. Counter.Builder.mode(Counter.Mode mode) changes the counter mode. RATE mode means the reporting rate to the backend. Counter.Builder.build() builds a new Counter which is collected and reported to the backend. Counter.increment(double count) increment counts to the Counter. It could be a positive value.   Gauge API represents a single numerical value.  import org.apache.skywalking.apm.agent.core.meter.MeterFactory; ThreadPoolExecutor threadPool = ...; Gauge gauge = MeterFactory.gauge(meterName, () -\u0026gt; threadPool.getActiveCount()).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).build();  MeterFactory.gauge(String name, Supplier\u0026lt;Double\u0026gt; getter) creates a new gauge builder with the meter name and supplier function. This function must return a double value. Gauge.Builder.tag(String key, String value) marks a tag key/value pair. Gauge.Builder.build() builds a new Gauge which is collected and reported to the backend.   Histogram API represents a summary sample observations with customized buckets.  import org.apache.skywalking.apm.agent.core.meter.MeterFactory; Histogram histogram = MeterFactory.histogram(\u0026#34;test\u0026#34;).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).steps(Arrays.asList(1, 5, 10)).minValue(0).build(); histogram.addValue(3);  MeterFactory.histogram(String name) creates a new histogram builder with the meter name. Histogram.Builder.tag(String key, String value) marks a tag key/value pair. Histogram.Builder.steps(List\u0026lt;Double\u0026gt; steps) sets up the max values of every histogram buckets. Histogram.Builder.minValue(double value) sets up the minimal value of this histogram. Default is 0. Histogram.Builder.build() builds a new Histogram which is collected and reported to the backend. Histogram.addValue(double value) adds value into the histogram, and automatically analyzes what bucket count needs to be incremented. Rule: count into [step1, step2).  Plugin Test Tool The Apache SkyWalking Agent Test Tool Suite is an incredibly useful test tool suite that is available in a wide variety of agent languages. It includes the mock collector and validator. The mock collector is a SkyWalking receiver, like the OAP server.\nYou could learn how to use this tool to test the plugin in this doc. This is a must if you want to contribute plugins to the SkyWalking official repo.\nContribute plugins to the Apache SkyWalking repository We welcome everyone to contribute their plugins.\nPlease follow these steps:\n Submit an issue for your plugin, including any supported versions. Create sub modules under apm-sniffer/apm-sdk-plugin or apm-sniffer/optional-plugins, and the name should include supported library name and versions. Follow this guide to develop. Make sure comments and test cases are provided. Develop and test. Provide the automatic test cases. Learn how to write the plugin test case from this doc Send a pull request and ask for review. The plugin committers will approve your plugins, plugin CI-with-IT, e2e, and the plugin tests will be passed. The plugin is accepted by SkyWalking.  ","excerpt":"Plugin Development Guide This document describes how to understand, develop and contribute a plugin. …","ref":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/java-plugin-development-guide/","title":"Plugin Development Guide"},{"body":"Plugin Development Guide This document describes how to understand, develop and contribute a plugin.\nThere are 2 kinds of plugin:\n Tracing plugin. Follow the distributed tracing concept to collect spans with tags and logs. Meter plugin. Collect numeric metrics in Counter, Gauge, and Histogram formats.  We also provide the plugin test tool to verify the data collected and reported by the plugin. If you plan to contribute any plugin to our main repo, the data would be verified by this tool too.\nTracing plugin Concepts Span The span is an important and recognized concept in the distributed tracing system. Learn about the span from the Google Dapper Paper and OpenTracing\nSkyWalking has supported OpenTracing and OpenTracing-Java API since 2017. Our concepts of the span are similar to that of the Google Dapper Paper and OpenTracing. We have also extended the span.\nThere are three types of span:\n1.1 EntrySpan The EntrySpan represents a service provider. It is also an endpoint on the server end. As an APM system, our target is the application servers. Therefore, almost all the services and MQ-consumers are EntrySpan.\n1.2 LocalSpan The LocalSpan represents a normal Java method that does not concern remote services. It is neither a MQ producer/consumer nor a service (e.g. HTTP service) provider/consumer.\n1.3 ExitSpan The ExitSpan represents a client of service or MQ-producer. It is named the LeafSpan in the early versions of SkyWalking. For example, accessing DB through JDBC and reading Redis/Memcached are classified as an ExitSpan.\nContextCarrier In order to implement distributed tracing, cross-process tracing has to be bound, and the context must propagate across the process. This is where the ContextCarrier comes in.\nHere are the steps on how to use the ContextCarrier in an A-\u0026gt;B distributed call.\n Create a new and empty ContextCarrier on the client end. Create an ExitSpan by ContextManager#createExitSpan or use ContextManager#inject to initalize the ContextCarrier. Place all items of ContextCarrier into heads (e.g. HTTP HEAD), attachments (e.g. Dubbo RPC framework) or messages (e.g. Kafka). The ContextCarrier propagates to the server end through the service call. On the server end, obtain all items from the heads, attachments or messages. Create an EntrySpan by ContextManager#createEntrySpan or use ContextManager#extract to bind the client and server ends.  See the following examples, where we use the Apache HTTPComponent client plugin and Tomcat 7 server plugin:\n Using the Apache HTTPComponent client plugin on the client end  span = ContextManager.createExitSpan(\u0026#34;/span/operation/name\u0026#34;, contextCarrier, \u0026#34;ip:port\u0026#34;); CarrierItem next = contextCarrier.items(); while (next.hasNext()) { next = next.next(); httpRequest.setHeader(next.getHeadKey(), next.getHeadValue()); } Using the Tomcat 7 server plugin on the server end  ContextCarrier contextCarrier = new ContextCarrier(); CarrierItem next = contextCarrier.items(); while (next.hasNext()) { next = next.next(); next.setHeadValue(request.getHeader(next.getHeadKey())); } span = ContextManager.createEntrySpan(“/span/operation/name”, contextCarrier); ContextSnapshot Besides cross-process tracing, cross-thread tracing has to be supported as well. For instance, both async process (in-memory MQ) and batch process are common in Java. Cross-process and cross-thread tracing are very similar in that they both require propagating context, except that cross-thread tracing does not require serialization.\nHere are the three steps on cross-thread propagation:\n Use ContextManager#capture to get the ContextSnapshot object. Let the sub-thread access the ContextSnapshot through method arguments or being carried by existing arguments Use ContextManager#continued in sub-thread.  Core APIs ContextManager ContextManager provides all major and primary APIs.\n Create EntrySpan  public static AbstractSpan createEntrySpan(String endpointName, ContextCarrier carrier) Create EntrySpan according to the operation name (e.g. service name, uri) and ContextCarrier.\nCreate LocalSpan  public static AbstractSpan createLocalSpan(String endpointName) Create LocalSpan according to the operation name (e.g. full method signature).\nCreate ExitSpan  public static AbstractSpan createExitSpan(String endpointName, ContextCarrier carrier, String remotePeer) Create ExitSpan according to the operation name (e.g. service name, uri) and the new ContextCarrier and peer address (e.g. ip+port, hostname+port).\nAbstractSpan /** * Set the component id, which defines in {@link ComponentsDefine} * * @param component * @return the span for chaining. */ AbstractSpan setComponent(Component component); AbstractSpan setLayer(SpanLayer layer); /** * Set a key:value tag on the Span. * * @return this Span instance, for chaining */ AbstractSpan tag(String key, String value); /** * Record an exception event of the current walltime timestamp. * * @param t any subclass of {@link Throwable}, which occurs in this span. * @return the Span, for chaining */ AbstractSpan log(Throwable t); AbstractSpan errorOccurred(); /** * Record an event at a specific timestamp. * * @param timestamp The explicit timestamp for the log record. * @param event the events * @return the Span, for chaining */ AbstractSpan log(long timestamp, Map\u0026lt;String, ?\u0026gt; event); /** * Sets the string name for the logical operation this span represents. * * @return this Span instance, for chaining */ AbstractSpan setOperationName(String endpointName); Besides setting the operation name, tags and logs, two attributes must be set, namely the component and layer. This is especially important for the EntrySpan and ExitSpan.\nSpanLayer is the type of span. There are 5 values:\n UNKNOWN (default) DB RPC_FRAMEWORK (designed for the RPC framework, rather than an ordinary HTTP call) HTTP MQ  Component IDs are defined and reserved by the SkyWalking project. For extension of the component name/ID, please follow the OAP server Component library settings document.\nSpecial Span Tags All tags are available in the trace view. Meanwhile, in the OAP backend analysis, some special tags or tag combinations provide other advanced features.\nTag key http.status_code The value should be an integer. The response code of OAL entities corresponds to this value.\nTag keys db.statement and db.type. The value of db.statement should be a string that represents the database statement, such as SQL, or [No statement]/+span#operationName if the value is empty. When the exit span contains this tag, OAP samples the slow statements based on agent-analyzer/default/maxSlowSQLLength. The threshold of slow statement is defined in accordance with agent-analyzer/default/slowDBAccessThreshold. Check Slow Database Statement document of OAP server for details.\nExtension logic endpoint: Tag key x-le The logic endpoint is a concept that doesn\u0026rsquo;t represent a real RPC call, but requires the statistic. The value of x-le should be in JSON format. There are two options:\n Define a separated logic endpoint. Provide its own endpoint name, latency and status. Suitable for entry and local span.  { \u0026#34;name\u0026#34;: \u0026#34;GraphQL-service\u0026#34;, \u0026#34;latency\u0026#34;: 100, \u0026#34;status\u0026#34;: true } Declare the current local span representing a logic endpoint.  { \u0026#34;logic-span\u0026#34;: true } Virtual Database Relative Tags SkyWalking analysis Database(SQL-like) performance metrics through the following tags.\npublic static final StringTag DB_TYPE = new StringTag(3, \u0026#34;db.type\u0026#34;); public static final StringTag DB_STATEMENT = new StringTag(5, \u0026#34;db.statement\u0026#34;);  db.type records database type, such as sql, cassandra, Elasticsearch. db.statementrecords the sql statement of the database access.  Read backend\u0026rsquo;s virtual database doc for more details.\nVirtual Cache Relative Tags SkyWalking analysis cache performance related metrics through the following tags.\npublic static final StringTag CACHE_TYPE = new StringTag(15, \u0026#34;cache.type\u0026#34;); public static final StringTag CACHE_CMD = new StringTag(17, \u0026#34;cache.cmd\u0026#34;); public static final StringTag CACHE_OP = new StringTag(16, \u0026#34;cache.op\u0026#34;); public static final StringTag CACHE_KEY = new StringTag(18, \u0026#34;cache.key\u0026#34;);  cache.type indicates the cache type , usually it\u0026rsquo;s official name of cache (e.g. Redis) cache.cmd indicates the cache command that would be sent to cache server (e.g. setnx) cache.op indicates the command is used for write or read operation , usually the value is converting from command cache.key indicates the cache key that would be sent to cache server , this tag maybe null , as string type key would be collected usually.  In order to decide which op should be converted to flexibly , It\u0026rsquo;s better that providing config property . Reference Jedis-4.x-plugin\nVirtual Message Queue (MQ) Relative Tags SkyWalking analysis MQ performance related metrics through the following tags.\npublic static final StringTag MQ_QUEUE = new StringTag(7, \u0026#34;mq.queue\u0026#34;); public static final StringTag MQ_TOPIC = new StringTag(9, \u0026#34;mq.topic\u0026#34;); public static final StringTag TRANSMISSION_LATENCY = new StringTag(15, \u0026#34;transmission.latency\u0026#34;, false);  mq.queue indicates MQ queue name mq.topic indicates MQ topic name , It\u0026rsquo;s optional as some MQ don\u0026rsquo;t hava concept of topic transmission.latency The transmission latency from consumer to producer. Usually you needn\u0026rsquo;t to record this tag manually, instead to call contextCarrier.extensionInjector().injectSendingTimestamp(); to record tag sendingTimestamp on producer side , and SkyWalking would record this tag on consumer side if sw8-x context carrier(from producer side) contains sendingTimestamp  Notice , you should set peer at both sides(producer and consumer). And the value of peer should represent the MQ server cluster.\nAdvanced APIs Async Span APIs There is a set of advanced APIs in Span which is specifically designed for async use cases. When tags, logs, and attributes (including end time) of the span need to be set in another thread, you should use these APIs.\n/** * The span finish at current tracing context, but the current span is still alive, until {@link #asyncFinish} * called. * * This method must be called\u0026lt;br/\u0026gt; * 1. In original thread(tracing context). * 2. Current span is active span. * * During alive, tags, logs and attributes of the span could be changed, in any thread. * * The execution times of {@link #prepareForAsync} and {@link #asyncFinish()} must match. * * @return the current span */ AbstractSpan prepareForAsync(); /** * Notify the span, it could be finished. * * The execution times of {@link #prepareForAsync} and {@link #asyncFinish()} must match. * * @return the current span */ AbstractSpan asyncFinish();  Call #prepareForAsync in the original context. Run ContextManager#stopSpan in the original context when your job in the current thread is complete. Propagate the span to any other thread. Once the above steps are all set, call #asyncFinish in any thread. When #prepareForAsync is complete for all spans, the tracing context will be finished and will report to the backend (based on the count of API execution).  Develop a plugin Abstract The basic method to trace is to intercept a Java method, by using byte code manipulation tech and AOP concept. SkyWalking has packaged the byte code manipulation tech and tracing context propagation, so you simply have to define the intercept point (a.k.a. aspect pointcut in Spring).\nIntercept SkyWalking provides two common definitions to intercept constructor, instance method and class method.\nv1 APIs  Extend ClassInstanceMethodsEnhancePluginDefine to define constructor intercept points and instance method intercept points. Extend ClassStaticMethodsEnhancePluginDefine to define class method intercept points.  Of course, you can extend ClassEnhancePluginDefine to set all intercept points, although it is uncommon to do so.\nv2 APIs v2 APIs provide an enhanced interceptor, which could propagate context through MIC(MethodInvocationContext).\n Extend ClassInstanceMethodsEnhancePluginDefineV2 to define constructor intercept points and instance method intercept points. Extend ClassStaticMethodsEnhancePluginDefineV2 to define class method intercept points.  Of course, you can extend ClassEnhancePluginDefineV2 to set all intercept points, although it is uncommon to do so.\nImplement plugin See the following demonstration on how to implement a plugin by extending ClassInstanceMethodsEnhancePluginDefine.\n Define the target class name.  protected abstract ClassMatch enhanceClass(); ClassMatch represents how to match the target classes. There are 4 ways:\n byName: Based on the full class names (package name + . + class name). byClassAnnotationMatch: Depends on whether there are certain annotations in the target classes. byMethodAnnotationMatch: Depends on whether there are certain annotations in the methods of the target classes. byHierarchyMatch: Based on the parent classes or interfaces of the target classes.  Attention:\n Never use ThirdPartyClass.class in the instrumentation definitions, such as takesArguments(ThirdPartyClass.class), or byName(ThirdPartyClass.class.getName()), because of the fact that ThirdPartyClass dose not necessarily exist in the target application and this will break the agent; we have import checks to assist in checking this in CI, but it doesn\u0026rsquo;t cover all scenarios of this limitation, so never try to work around this limitation by something like using full-qualified-class-name (FQCN), i.e. takesArguments(full.qualified.ThirdPartyClass.class) and byName(full.qualified.ThirdPartyClass.class.getName()) will pass the CI check, but are still invalid in the agent codes. Therefore, Use Full Qualified Class Name String Literature Instead. Even if you are perfectly sure that the class to be intercepted exists in the target application (such as JDK classes), still, do not use *.class.getName() to get the class String name. We recommend you to use a literal string. This is to avoid ClassLoader issues. by*AnnotationMatch does not support inherited annotations. We do not recommend using byHierarchyMatch unless necessary. Using it may trigger the interception of many unexcepted methods, which would cause performance issues.  Example:\n@Override protected ClassMatch enhanceClassName() { return byName(\u0026#34;org.apache.catalina.core.StandardEngineValve\u0026#34;); } Define an instance method intercept point.  public InstanceMethodsInterceptPoint[] getInstanceMethodsInterceptPoints(); public interface InstanceMethodsInterceptPoint { /** * class instance methods matcher. * * @return methods matcher */ ElementMatcher\u0026lt;MethodDescription\u0026gt; getMethodsMatcher(); /** * @return represents a class name, the class instance must instanceof InstanceMethodsAroundInterceptor. */ String getMethodsInterceptor(); boolean isOverrideArgs(); } You may also use Matcher to set the target methods. Return true in isOverrideArgs, if you want to change the argument ref in interceptor. Please refer to bytebuddy for details of defining ElementMatcher.\nIn Skywalking, we provide 3 classes to facilitate ElementMatcher definition:\n AnnotationTypeNameMatch: Check on whether there is a certain annotation in the target method. ReturnTypeNameMatch: Check the return type name (package name + . + class name) of the target method. ArgumentTypeNameMatch: Check on the argument index and the type name (package name + . + class name) of the target method.  Attention:\n In case of using ReturnTypeNameMatch and ArgumentTypeNameMatch, use [Lxxx; (Java file format defined in JVM Specification) to define an Array type. For example, you should write [Ljava.lang.String; for java.lang.String[].  The following sections will tell you how to implement the interceptor.\nAdd plugin definition into the skywalking-plugin.def file.  tomcat-7.x/8.x=TomcatInstrumentation  Set up witnessClasses and/or witnessMethods if the instrumentation has to be activated in specific versions.\nExample:\n// The plugin is activated only when the foo.Bar class exists. @Override protected String[] witnessClasses() { return new String[] { \u0026#34;foo.Bar\u0026#34; }; } // The plugin is activated only when the foo.Bar#hello method exists. @Override protected List\u0026lt;WitnessMethod\u0026gt; witnessMethods() { List\u0026lt;WitnessMethod\u0026gt; witnessMethodList = new ArrayList\u0026lt;\u0026gt;(); WitnessMethod witnessMethod = new WitnessMethod(\u0026#34;foo.Bar\u0026#34;, ElementMatchers.named(\u0026#34;hello\u0026#34;)); witnessMethodList.add(witnessMethod); return witnessMethodList; } For more examples, see WitnessTest.java\n  Implement an interceptor As an interceptor for an instance method, it has to implement org.apache.skywalking.apm.agent.core.plugin.interceptor.enhance.InstanceMethodsAroundInterceptor\n/** * A interceptor, which intercept method\u0026#39;s invocation. The target methods will be defined in {@link * ClassEnhancePluginDefine}\u0026#39;s subclass, most likely in {@link ClassInstanceMethodsEnhancePluginDefine} */ public interface InstanceMethodsAroundInterceptor { /** * called before target method invocation. * * @param result change this result, if you want to truncate the method. * @throws Throwable */ void beforeMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, MethodInterceptResult result) throws Throwable; /** * called after target method invocation. Even method\u0026#39;s invocation triggers an exception. * * @param ret the method\u0026#39;s original return value. * @return the method\u0026#39;s actual return value. * @throws Throwable */ Object afterMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Object ret) throws Throwable; /** * called when occur exception. * * @param t the exception occur. */ void handleMethodException(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Throwable t); } Use the core APIs before and after calling the method, as well as during exception handling.\nV2 APIs The interceptor of V2 API uses MethodInvocationContext context to replace the MethodInterceptResult result in the beforeMethod, and be added as a new parameter in afterMethod and handleMethodException.\nMethodInvocationContext context is only shared in one time execution, and safe to use when face concurrency execution.\n/** * A v2 interceptor, which intercept method\u0026#39;s invocation. The target methods will be defined in {@link * ClassEnhancePluginDefineV2}\u0026#39;s subclass, most likely in {@link ClassInstanceMethodsEnhancePluginDefine} */ public interface InstanceMethodsAroundInterceptorV2 { /** * called before target method invocation. * * @param context the method invocation context including result context. */ void beforeMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, MethodInvocationContext context) throws Throwable; /** * called after target method invocation. Even method\u0026#39;s invocation triggers an exception. * * @param ret the method\u0026#39;s original return value. May be null if the method triggers an exception. * @return the method\u0026#39;s actual return value. */ Object afterMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Object ret, MethodInvocationContext context) throws Throwable; /** * called when occur exception. * * @param t the exception occur. */ void handleMethodException(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Throwable t, MethodInvocationContext context); } Bootstrap class instrumentation. SkyWalking has packaged the bootstrap instrumentation in the agent core. You can easily implement it by declaring it in the instrumentation definition.\nOverride the public boolean isBootstrapInstrumentation() and return true. Such as\npublic class URLInstrumentation extends ClassEnhancePluginDefine { private static String CLASS_NAME = \u0026#34;java.net.URL\u0026#34;; @Override protected ClassMatch enhanceClass() { return byName(CLASS_NAME); } @Override public ConstructorInterceptPoint[] getConstructorsInterceptPoints() { return new ConstructorInterceptPoint[] { new ConstructorInterceptPoint() { @Override public ElementMatcher\u0026lt;MethodDescription\u0026gt; getConstructorMatcher() { return any(); } @Override public String getConstructorInterceptor() { return \u0026#34;org.apache.skywalking.apm.plugin.jre.httpurlconnection.Interceptor2\u0026#34;; } } }; } @Override public InstanceMethodsInterceptPoint[] getInstanceMethodsInterceptPoints() { return new InstanceMethodsInterceptPoint[0]; } @Override public StaticMethodsInterceptPoint[] getStaticMethodsInterceptPoints() { return new StaticMethodsInterceptPoint[0]; } @Override public boolean isBootstrapInstrumentation() { return true; } } ClassEnhancePluginDefineV2 is provided in v2 APIs, #isBootstrapInstrumentation works too.\nNOTE: Bootstrap instrumentation should be used only where necessary. During its actual execution, it mostly affects the JRE core(rt.jar). Defining it other than where necessary could lead to unexpected results or side effects.\nProvide custom config for the plugin The config could provide different behaviours based on the configurations. The SkyWalking plugin mechanism provides the configuration injection and initialization system in the agent core.\nEvery plugin could declare one or more classes to represent the config by using @PluginConfig annotation. The agent core could initialize this class' static field through System environments, System properties, and agent.config static file.\nThe #root() method in the @PluginConfig annotation requires declaring the root class for the initialization process. Typically, SkyWalking prefers to use nested inner static classes for the hierarchy of the configuration. We recommend using Plugin/plugin-name/config-key as the nested classes structure of the config class.\nNOTE: because of the Java ClassLoader mechanism, the @PluginConfig annotation should be added on the real class used in the interceptor codes.\nIn the following example, @PluginConfig(root = SpringMVCPluginConfig.class) indicates that initialization should start with using SpringMVCPluginConfig as the root. Then, the config key of the attribute USE_QUALIFIED_NAME_AS_ENDPOINT_NAME should be plugin.springmvc.use_qualified_name_as_endpoint_name.\npublic class SpringMVCPluginConfig { public static class Plugin { // NOTE, if move this annotation on the `Plugin` or `SpringMVCPluginConfig` class, it no longer has any effect.  @PluginConfig(root = SpringMVCPluginConfig.class) public static class SpringMVC { /** * If true, the fully qualified method name will be used as the endpoint name instead of the request URL, * default is false. */ public static boolean USE_QUALIFIED_NAME_AS_ENDPOINT_NAME = false; /** * This config item controls that whether the SpringMVC plugin should collect the parameters of the * request. */ public static boolean COLLECT_HTTP_PARAMS = false; } @PluginConfig(root = SpringMVCPluginConfig.class) public static class Http { /** * When either {@link Plugin.SpringMVC#COLLECT_HTTP_PARAMS} is enabled, how many characters to keep and send * to the OAP backend, use negative values to keep and send the complete parameters, NB. this config item is * added for the sake of performance */ public static int HTTP_PARAMS_LENGTH_THRESHOLD = 1024; } } } Meter Plugin Java agent plugin could use meter APIs to collect metrics for backend analysis.\n Counter API represents a single monotonically increasing counter which automatically collects data and reports to the backend. import org.apache.skywalking.apm.agent.core.meter.MeterFactory; Counter counter = MeterFactory.counter(meterName).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).mode(Counter.Mode.INCREMENT).build(); counter.increment(1d);    MeterFactory.counter creates a new counter builder with the meter name. Counter.Builder.tag(String key, String value) marks a tag key/value pair. Counter.Builder.mode(Counter.Mode mode) changes the counter mode. RATE mode means the reporting rate to the backend. Counter.Builder.build() builds a new Counter which is collected and reported to the backend. Counter.increment(double count) increment counts to the Counter. It could be a positive value.   Gauge API represents a single numerical value.  import org.apache.skywalking.apm.agent.core.meter.MeterFactory; ThreadPoolExecutor threadPool = ...; Gauge gauge = MeterFactory.gauge(meterName, () -\u0026gt; threadPool.getActiveCount()).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).build();  MeterFactory.gauge(String name, Supplier\u0026lt;Double\u0026gt; getter) creates a new gauge builder with the meter name and supplier function. This function must return a double value. Gauge.Builder.tag(String key, String value) marks a tag key/value pair. Gauge.Builder.build() builds a new Gauge which is collected and reported to the backend.   Histogram API represents a summary sample observations with customized buckets.  import org.apache.skywalking.apm.agent.core.meter.MeterFactory; Histogram histogram = MeterFactory.histogram(\u0026#34;test\u0026#34;).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).steps(Arrays.asList(1, 5, 10)).minValue(0).build(); histogram.addValue(3);  MeterFactory.histogram(String name) creates a new histogram builder with the meter name. Histogram.Builder.tag(String key, String value) marks a tag key/value pair. Histogram.Builder.steps(List\u0026lt;Double\u0026gt; steps) sets up the max values of every histogram buckets. Histogram.Builder.minValue(double value) sets up the minimal value of this histogram. Default is 0. Histogram.Builder.build() builds a new Histogram which is collected and reported to the backend. Histogram.addValue(double value) adds value into the histogram, and automatically analyzes what bucket count needs to be incremented. Rule: count into [step1, step2).  Plugin Test Tool The Apache SkyWalking Agent Test Tool Suite is an incredibly useful test tool suite that is available in a wide variety of agent languages. It includes the mock collector and validator. The mock collector is a SkyWalking receiver, like the OAP server.\nYou could learn how to use this tool to test the plugin in this doc. This is a must if you want to contribute plugins to the SkyWalking official repo.\nContribute plugins to the Apache SkyWalking repository We welcome everyone to contribute their plugins.\nPlease follow these steps:\n Submit an issue for your plugin, including any supported versions. Create sub modules under apm-sniffer/apm-sdk-plugin or apm-sniffer/optional-plugins, and the name should include supported library name and versions. Follow this guide to develop. Make sure comments and test cases are provided. Develop and test. Provide the automatic test cases. Learn how to write the plugin test case from this doc Send a pull request and ask for review. The plugin committers will approve your plugins, plugin CI-with-IT, e2e, and the plugin tests will be passed. The plugin is accepted by SkyWalking.  ","excerpt":"Plugin Development Guide This document describes how to understand, develop and contribute a plugin. …","ref":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/java-plugin-development-guide/","title":"Plugin Development Guide"},{"body":"Plugin Development Guide This document describes how to understand, develop and contribute a plugin.\nThere are 2 kinds of plugin:\n Tracing plugin. Follow the distributed tracing concept to collect spans with tags and logs. Meter plugin. Collect numeric metrics in Counter, Gauge, and Histogram formats.  We also provide the plugin test tool to verify the data collected and reported by the plugin. If you plan to contribute any plugin to our main repo, the data would be verified by this tool too.\nTracing plugin Concepts Span The span is an important and recognized concept in the distributed tracing system. Learn about the span from the Google Dapper Paper and OpenTracing\nSkyWalking has supported OpenTracing and OpenTracing-Java API since 2017. Our concepts of the span are similar to that of the Google Dapper Paper and OpenTracing. We have also extended the span.\nThere are three types of span:\n1.1 EntrySpan The EntrySpan represents a service provider. It is also an endpoint on the server end. As an APM system, our target is the application servers. Therefore, almost all the services and MQ-consumers are EntrySpan.\n1.2 LocalSpan The LocalSpan represents a normal Java method that does not concern remote services. It is neither a MQ producer/consumer nor a service (e.g. HTTP service) provider/consumer.\n1.3 ExitSpan The ExitSpan represents a client of service or MQ-producer. It is named the LeafSpan in the early versions of SkyWalking. For example, accessing DB through JDBC and reading Redis/Memcached are classified as an ExitSpan.\nContextCarrier In order to implement distributed tracing, cross-process tracing has to be bound, and the context must propagate across the process. This is where the ContextCarrier comes in.\nHere are the steps on how to use the ContextCarrier in an A-\u0026gt;B distributed call.\n Create a new and empty ContextCarrier on the client end. Create an ExitSpan by ContextManager#createExitSpan or use ContextManager#inject to initalize the ContextCarrier. Place all items of ContextCarrier into heads (e.g. HTTP HEAD), attachments (e.g. Dubbo RPC framework) or messages (e.g. Kafka). The ContextCarrier propagates to the server end through the service call. On the server end, obtain all items from the heads, attachments or messages. Create an EntrySpan by ContextManager#createEntrySpan or use ContextManager#extract to bind the client and server ends.  See the following examples, where we use the Apache HTTPComponent client plugin and Tomcat 7 server plugin:\n Using the Apache HTTPComponent client plugin on the client end  span = ContextManager.createExitSpan(\u0026#34;/span/operation/name\u0026#34;, contextCarrier, \u0026#34;ip:port\u0026#34;); CarrierItem next = contextCarrier.items(); while (next.hasNext()) { next = next.next(); httpRequest.setHeader(next.getHeadKey(), next.getHeadValue()); } Using the Tomcat 7 server plugin on the server end  ContextCarrier contextCarrier = new ContextCarrier(); CarrierItem next = contextCarrier.items(); while (next.hasNext()) { next = next.next(); next.setHeadValue(request.getHeader(next.getHeadKey())); } span = ContextManager.createEntrySpan(“/span/operation/name”, contextCarrier); ContextSnapshot Besides cross-process tracing, cross-thread tracing has to be supported as well. For instance, both async process (in-memory MQ) and batch process are common in Java. Cross-process and cross-thread tracing are very similar in that they both require propagating context, except that cross-thread tracing does not require serialization.\nHere are the three steps on cross-thread propagation:\n Use ContextManager#capture to get the ContextSnapshot object. Let the sub-thread access the ContextSnapshot through method arguments or being carried by existing arguments Use ContextManager#continued in sub-thread.  Core APIs ContextManager ContextManager provides all major and primary APIs.\n Create EntrySpan  public static AbstractSpan createEntrySpan(String endpointName, ContextCarrier carrier) Create EntrySpan according to the operation name (e.g. service name, uri) and ContextCarrier.\nCreate LocalSpan  public static AbstractSpan createLocalSpan(String endpointName) Create LocalSpan according to the operation name (e.g. full method signature).\nCreate ExitSpan  public static AbstractSpan createExitSpan(String endpointName, ContextCarrier carrier, String remotePeer) Create ExitSpan according to the operation name (e.g. service name, uri) and the new ContextCarrier and peer address (e.g. ip+port, hostname+port).\nAbstractSpan /** * Set the component id, which defines in {@link ComponentsDefine} * * @param component * @return the span for chaining. */ AbstractSpan setComponent(Component component); AbstractSpan setLayer(SpanLayer layer); /** * Set a key:value tag on the Span. * * @return this Span instance, for chaining */ AbstractSpan tag(String key, String value); /** * Record an exception event of the current walltime timestamp. * * @param t any subclass of {@link Throwable}, which occurs in this span. * @return the Span, for chaining */ AbstractSpan log(Throwable t); AbstractSpan errorOccurred(); /** * Record an event at a specific timestamp. * * @param timestamp The explicit timestamp for the log record. * @param event the events * @return the Span, for chaining */ AbstractSpan log(long timestamp, Map\u0026lt;String, ?\u0026gt; event); /** * Sets the string name for the logical operation this span represents. * * @return this Span instance, for chaining */ AbstractSpan setOperationName(String endpointName); Besides setting the operation name, tags and logs, two attributes must be set, namely the component and layer. This is especially important for the EntrySpan and ExitSpan.\nSpanLayer is the type of span. There are 5 values:\n UNKNOWN (default) DB RPC_FRAMEWORK (designed for the RPC framework, rather than an ordinary HTTP call) HTTP MQ  Component IDs are defined and reserved by the SkyWalking project. For extension of the component name/ID, please follow the OAP server Component library settings document.\nSpecial Span Tags All tags are available in the trace view. Meanwhile, in the OAP backend analysis, some special tags or tag combinations provide other advanced features.\nTag key http.status_code The value should be an integer. The response code of OAL entities corresponds to this value.\nTag keys db.statement and db.type. The value of db.statement should be a string that represents the database statement, such as SQL, or [No statement]/+span#operationName if the value is empty. When the exit span contains this tag, OAP samples the slow statements based on agent-analyzer/default/maxSlowSQLLength. The threshold of slow statement is defined in accordance with agent-analyzer/default/slowDBAccessThreshold. Check Slow Database Statement document of OAP server for details.\nExtension logic endpoint: Tag key x-le The logic endpoint is a concept that doesn\u0026rsquo;t represent a real RPC call, but requires the statistic. The value of x-le should be in JSON format. There are two options:\n Define a separated logic endpoint. Provide its own endpoint name, latency and status. Suitable for entry and local span.  { \u0026#34;name\u0026#34;: \u0026#34;GraphQL-service\u0026#34;, \u0026#34;latency\u0026#34;: 100, \u0026#34;status\u0026#34;: true } Declare the current local span representing a logic endpoint.  { \u0026#34;logic-span\u0026#34;: true } Virtual Database Relative Tags SkyWalking analysis Database(SQL-like) performance metrics through the following tags.\npublic static final StringTag DB_TYPE = new StringTag(3, \u0026#34;db.type\u0026#34;); public static final StringTag DB_STATEMENT = new StringTag(5, \u0026#34;db.statement\u0026#34;);  db.type records database type, such as sql, cassandra, Elasticsearch. db.statementrecords the sql statement of the database access.  Read backend\u0026rsquo;s virtual database doc for more details.\nVirtual Cache Relative Tags SkyWalking analysis cache performance related metrics through the following tags.\npublic static final StringTag CACHE_TYPE = new StringTag(15, \u0026#34;cache.type\u0026#34;); public static final StringTag CACHE_CMD = new StringTag(17, \u0026#34;cache.cmd\u0026#34;); public static final StringTag CACHE_OP = new StringTag(16, \u0026#34;cache.op\u0026#34;); public static final StringTag CACHE_KEY = new StringTag(18, \u0026#34;cache.key\u0026#34;);  cache.type indicates the cache type , usually it\u0026rsquo;s official name of cache (e.g. Redis) cache.cmd indicates the cache command that would be sent to cache server (e.g. setnx) cache.op indicates the command is used for write or read operation , usually the value is converting from command cache.key indicates the cache key that would be sent to cache server , this tag maybe null , as string type key would be collected usually.  In order to decide which op should be converted to flexibly , It\u0026rsquo;s better that providing config property . Reference Jedis-4.x-plugin\nVirtual Message Queue (MQ) Relative Tags SkyWalking analysis MQ performance related metrics through the following tags.\npublic static final StringTag MQ_QUEUE = new StringTag(7, \u0026#34;mq.queue\u0026#34;); public static final StringTag MQ_TOPIC = new StringTag(9, \u0026#34;mq.topic\u0026#34;); public static final StringTag TRANSMISSION_LATENCY = new StringTag(15, \u0026#34;transmission.latency\u0026#34;, false);  mq.queue indicates MQ queue name mq.topic indicates MQ topic name , It\u0026rsquo;s optional as some MQ don\u0026rsquo;t hava concept of topic transmission.latency The transmission latency from consumer to producer. Usually you needn\u0026rsquo;t to record this tag manually, instead to call contextCarrier.extensionInjector().injectSendingTimestamp(); to record tag sendingTimestamp on producer side , and SkyWalking would record this tag on consumer side if sw8-x context carrier(from producer side) contains sendingTimestamp  Notice , you should set peer at both sides(producer and consumer). And the value of peer should represent the MQ server cluster.\nAdvanced APIs Async Span APIs There is a set of advanced APIs in Span which is specifically designed for async use cases. When tags, logs, and attributes (including end time) of the span need to be set in another thread, you should use these APIs.\n/** * The span finish at current tracing context, but the current span is still alive, until {@link #asyncFinish} * called. * * This method must be called\u0026lt;br/\u0026gt; * 1. In original thread(tracing context). * 2. Current span is active span. * * During alive, tags, logs and attributes of the span could be changed, in any thread. * * The execution times of {@link #prepareForAsync} and {@link #asyncFinish()} must match. * * @return the current span */ AbstractSpan prepareForAsync(); /** * Notify the span, it could be finished. * * The execution times of {@link #prepareForAsync} and {@link #asyncFinish()} must match. * * @return the current span */ AbstractSpan asyncFinish();  Call #prepareForAsync in the original context. Run ContextManager#stopSpan in the original context when your job in the current thread is complete. Propagate the span to any other thread. Once the above steps are all set, call #asyncFinish in any thread. When #prepareForAsync is complete for all spans, the tracing context will be finished and will report to the backend (based on the count of API execution).  Develop a plugin Abstract The basic method to trace is to intercept a Java method, by using byte code manipulation tech and AOP concept. SkyWalking has packaged the byte code manipulation tech and tracing context propagation, so you simply have to define the intercept point (a.k.a. aspect pointcut in Spring).\nIntercept SkyWalking provides two common definitions to intercept constructor, instance method and class method.\nv1 APIs  Extend ClassInstanceMethodsEnhancePluginDefine to define constructor intercept points and instance method intercept points. Extend ClassStaticMethodsEnhancePluginDefine to define class method intercept points.  Of course, you can extend ClassEnhancePluginDefine to set all intercept points, although it is uncommon to do so.\nv2 APIs v2 APIs provide an enhanced interceptor, which could propagate context through MIC(MethodInvocationContext).\n Extend ClassInstanceMethodsEnhancePluginDefineV2 to define constructor intercept points and instance method intercept points. Extend ClassStaticMethodsEnhancePluginDefineV2 to define class method intercept points.  Of course, you can extend ClassEnhancePluginDefineV2 to set all intercept points, although it is uncommon to do so.\nImplement plugin See the following demonstration on how to implement a plugin by extending ClassInstanceMethodsEnhancePluginDefine.\n Define the target class name.  protected abstract ClassMatch enhanceClass(); ClassMatch represents how to match the target classes. There are 4 ways:\n byName: Based on the full class names (package name + . + class name). byClassAnnotationMatch: Depends on whether there are certain annotations in the target classes. byMethodAnnotationMatch: Depends on whether there are certain annotations in the methods of the target classes. byHierarchyMatch: Based on the parent classes or interfaces of the target classes.  Attention:\n Never use ThirdPartyClass.class in the instrumentation definitions, such as takesArguments(ThirdPartyClass.class), or byName(ThirdPartyClass.class.getName()), because of the fact that ThirdPartyClass dose not necessarily exist in the target application and this will break the agent; we have import checks to assist in checking this in CI, but it doesn\u0026rsquo;t cover all scenarios of this limitation, so never try to work around this limitation by something like using full-qualified-class-name (FQCN), i.e. takesArguments(full.qualified.ThirdPartyClass.class) and byName(full.qualified.ThirdPartyClass.class.getName()) will pass the CI check, but are still invalid in the agent codes. Therefore, Use Full Qualified Class Name String Literature Instead. Even if you are perfectly sure that the class to be intercepted exists in the target application (such as JDK classes), still, do not use *.class.getName() to get the class String name. We recommend you to use a literal string. This is to avoid ClassLoader issues. by*AnnotationMatch does not support inherited annotations. We do not recommend using byHierarchyMatch unless necessary. Using it may trigger the interception of many unexcepted methods, which would cause performance issues.  Example:\n@Override protected ClassMatch enhanceClassName() { return byName(\u0026#34;org.apache.catalina.core.StandardEngineValve\u0026#34;); } Define an instance method intercept point.  public InstanceMethodsInterceptPoint[] getInstanceMethodsInterceptPoints(); public interface InstanceMethodsInterceptPoint { /** * class instance methods matcher. * * @return methods matcher */ ElementMatcher\u0026lt;MethodDescription\u0026gt; getMethodsMatcher(); /** * @return represents a class name, the class instance must instanceof InstanceMethodsAroundInterceptor. */ String getMethodsInterceptor(); boolean isOverrideArgs(); } You may also use Matcher to set the target methods. Return true in isOverrideArgs, if you want to change the argument ref in interceptor. Please refer to bytebuddy for details of defining ElementMatcher.\nIn Skywalking, we provide 3 classes to facilitate ElementMatcher definition:\n AnnotationTypeNameMatch: Check on whether there is a certain annotation in the target method. ReturnTypeNameMatch: Check the return type name (package name + . + class name) of the target method. ArgumentTypeNameMatch: Check on the argument index and the type name (package name + . + class name) of the target method.  Attention:\n In case of using ReturnTypeNameMatch and ArgumentTypeNameMatch, use [Lxxx; (Java file format defined in JVM Specification) to define an Array type. For example, you should write [Ljava.lang.String; for java.lang.String[].  The following sections will tell you how to implement the interceptor.\nAdd plugin definition into the skywalking-plugin.def file.  tomcat-7.x/8.x=TomcatInstrumentation  Set up witnessClasses and/or witnessMethods if the instrumentation has to be activated in specific versions.\nExample:\n// The plugin is activated only when the foo.Bar class exists. @Override protected String[] witnessClasses() { return new String[] { \u0026#34;foo.Bar\u0026#34; }; } // The plugin is activated only when the foo.Bar#hello method exists. @Override protected List\u0026lt;WitnessMethod\u0026gt; witnessMethods() { List\u0026lt;WitnessMethod\u0026gt; witnessMethodList = new ArrayList\u0026lt;\u0026gt;(); WitnessMethod witnessMethod = new WitnessMethod(\u0026#34;foo.Bar\u0026#34;, ElementMatchers.named(\u0026#34;hello\u0026#34;)); witnessMethodList.add(witnessMethod); return witnessMethodList; } For more examples, see WitnessTest.java\n  Implement an interceptor As an interceptor for an instance method, it has to implement org.apache.skywalking.apm.agent.core.plugin.interceptor.enhance.InstanceMethodsAroundInterceptor\n/** * A interceptor, which intercept method\u0026#39;s invocation. The target methods will be defined in {@link * ClassEnhancePluginDefine}\u0026#39;s subclass, most likely in {@link ClassInstanceMethodsEnhancePluginDefine} */ public interface InstanceMethodsAroundInterceptor { /** * called before target method invocation. * * @param result change this result, if you want to truncate the method. * @throws Throwable */ void beforeMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, MethodInterceptResult result) throws Throwable; /** * called after target method invocation. Even method\u0026#39;s invocation triggers an exception. * * @param ret the method\u0026#39;s original return value. * @return the method\u0026#39;s actual return value. * @throws Throwable */ Object afterMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Object ret) throws Throwable; /** * called when occur exception. * * @param t the exception occur. */ void handleMethodException(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Throwable t); } Use the core APIs before and after calling the method, as well as during exception handling.\nV2 APIs The interceptor of V2 API uses MethodInvocationContext context to replace the MethodInterceptResult result in the beforeMethod, and be added as a new parameter in afterMethod and handleMethodException.\nMethodInvocationContext context is only shared in one time execution, and safe to use when face concurrency execution.\n/** * A v2 interceptor, which intercept method\u0026#39;s invocation. The target methods will be defined in {@link * ClassEnhancePluginDefineV2}\u0026#39;s subclass, most likely in {@link ClassInstanceMethodsEnhancePluginDefine} */ public interface InstanceMethodsAroundInterceptorV2 { /** * called before target method invocation. * * @param context the method invocation context including result context. */ void beforeMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, MethodInvocationContext context) throws Throwable; /** * called after target method invocation. Even method\u0026#39;s invocation triggers an exception. * * @param ret the method\u0026#39;s original return value. May be null if the method triggers an exception. * @return the method\u0026#39;s actual return value. */ Object afterMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Object ret, MethodInvocationContext context) throws Throwable; /** * called when occur exception. * * @param t the exception occur. */ void handleMethodException(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Throwable t, MethodInvocationContext context); } Bootstrap class instrumentation. SkyWalking has packaged the bootstrap instrumentation in the agent core. You can easily implement it by declaring it in the instrumentation definition.\nOverride the public boolean isBootstrapInstrumentation() and return true. Such as\npublic class URLInstrumentation extends ClassEnhancePluginDefine { private static String CLASS_NAME = \u0026#34;java.net.URL\u0026#34;; @Override protected ClassMatch enhanceClass() { return byName(CLASS_NAME); } @Override public ConstructorInterceptPoint[] getConstructorsInterceptPoints() { return new ConstructorInterceptPoint[] { new ConstructorInterceptPoint() { @Override public ElementMatcher\u0026lt;MethodDescription\u0026gt; getConstructorMatcher() { return any(); } @Override public String getConstructorInterceptor() { return \u0026#34;org.apache.skywalking.apm.plugin.jre.httpurlconnection.Interceptor2\u0026#34;; } } }; } @Override public InstanceMethodsInterceptPoint[] getInstanceMethodsInterceptPoints() { return new InstanceMethodsInterceptPoint[0]; } @Override public StaticMethodsInterceptPoint[] getStaticMethodsInterceptPoints() { return new StaticMethodsInterceptPoint[0]; } @Override public boolean isBootstrapInstrumentation() { return true; } } ClassEnhancePluginDefineV2 is provided in v2 APIs, #isBootstrapInstrumentation works too.\nNOTE: Bootstrap instrumentation should be used only where necessary. During its actual execution, it mostly affects the JRE core(rt.jar). Defining it other than where necessary could lead to unexpected results or side effects.\nProvide custom config for the plugin The config could provide different behaviours based on the configurations. The SkyWalking plugin mechanism provides the configuration injection and initialization system in the agent core.\nEvery plugin could declare one or more classes to represent the config by using @PluginConfig annotation. The agent core could initialize this class' static field through System environments, System properties, and agent.config static file.\nThe #root() method in the @PluginConfig annotation requires declaring the root class for the initialization process. Typically, SkyWalking prefers to use nested inner static classes for the hierarchy of the configuration. We recommend using Plugin/plugin-name/config-key as the nested classes structure of the config class.\nNOTE: because of the Java ClassLoader mechanism, the @PluginConfig annotation should be added on the real class used in the interceptor codes.\nIn the following example, @PluginConfig(root = SpringMVCPluginConfig.class) indicates that initialization should start with using SpringMVCPluginConfig as the root. Then, the config key of the attribute USE_QUALIFIED_NAME_AS_ENDPOINT_NAME should be plugin.springmvc.use_qualified_name_as_endpoint_name.\npublic class SpringMVCPluginConfig { public static class Plugin { // NOTE, if move this annotation on the `Plugin` or `SpringMVCPluginConfig` class, it no longer has any effect.  @PluginConfig(root = SpringMVCPluginConfig.class) public static class SpringMVC { /** * If true, the fully qualified method name will be used as the endpoint name instead of the request URL, * default is false. */ public static boolean USE_QUALIFIED_NAME_AS_ENDPOINT_NAME = false; /** * This config item controls that whether the SpringMVC plugin should collect the parameters of the * request. */ public static boolean COLLECT_HTTP_PARAMS = false; } @PluginConfig(root = SpringMVCPluginConfig.class) public static class Http { /** * When either {@link Plugin.SpringMVC#COLLECT_HTTP_PARAMS} is enabled, how many characters to keep and send * to the OAP backend, use negative values to keep and send the complete parameters, NB. this config item is * added for the sake of performance */ public static int HTTP_PARAMS_LENGTH_THRESHOLD = 1024; } } } Meter Plugin Java agent plugin could use meter APIs to collect metrics for backend analysis.\n Counter API represents a single monotonically increasing counter which automatically collects data and reports to the backend. import org.apache.skywalking.apm.agent.core.meter.MeterFactory; Counter counter = MeterFactory.counter(meterName).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).mode(Counter.Mode.INCREMENT).build(); counter.increment(1d);    MeterFactory.counter creates a new counter builder with the meter name. Counter.Builder.tag(String key, String value) marks a tag key/value pair. Counter.Builder.mode(Counter.Mode mode) changes the counter mode. RATE mode means the reporting rate to the backend. Counter.Builder.build() builds a new Counter which is collected and reported to the backend. Counter.increment(double count) increment counts to the Counter. It could be a positive value.   Gauge API represents a single numerical value.  import org.apache.skywalking.apm.agent.core.meter.MeterFactory; ThreadPoolExecutor threadPool = ...; Gauge gauge = MeterFactory.gauge(meterName, () -\u0026gt; threadPool.getActiveCount()).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).build();  MeterFactory.gauge(String name, Supplier\u0026lt;Double\u0026gt; getter) creates a new gauge builder with the meter name and supplier function. This function must return a double value. Gauge.Builder.tag(String key, String value) marks a tag key/value pair. Gauge.Builder.build() builds a new Gauge which is collected and reported to the backend.   Histogram API represents a summary sample observations with customized buckets.  import org.apache.skywalking.apm.agent.core.meter.MeterFactory; Histogram histogram = MeterFactory.histogram(\u0026#34;test\u0026#34;).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).steps(Arrays.asList(1, 5, 10)).minValue(0).build(); histogram.addValue(3);  MeterFactory.histogram(String name) creates a new histogram builder with the meter name. Histogram.Builder.tag(String key, String value) marks a tag key/value pair. Histogram.Builder.steps(List\u0026lt;Double\u0026gt; steps) sets up the max values of every histogram buckets. Histogram.Builder.minValue(double value) sets up the minimal value of this histogram. Default is 0. Histogram.Builder.build() builds a new Histogram which is collected and reported to the backend. Histogram.addValue(double value) adds value into the histogram, and automatically analyzes what bucket count needs to be incremented. Rule: count into [step1, step2).  Plugin Test Tool The Apache SkyWalking Agent Test Tool Suite is an incredibly useful test tool suite that is available in a wide variety of agent languages. It includes the mock collector and validator. The mock collector is a SkyWalking receiver, like the OAP server.\nYou could learn how to use this tool to test the plugin in this doc. This is a must if you want to contribute plugins to the SkyWalking official repo.\nContribute plugins to the Apache SkyWalking repository We welcome everyone to contribute their plugins.\nPlease follow these steps:\n Submit an issue for your plugin, including any supported versions. Create sub modules under apm-sniffer/apm-sdk-plugin or apm-sniffer/optional-plugins, and the name should include supported library name and versions. Follow this guide to develop. Make sure comments and test cases are provided. Develop and test. Provide the automatic test cases. Learn how to write the plugin test case from this doc Send a pull request and ask for review. The plugin committers will approve your plugins, plugin CI-with-IT, e2e, and the plugin tests will be passed. The plugin is accepted by SkyWalking.  ","excerpt":"Plugin Development Guide This document describes how to understand, develop and contribute a plugin. …","ref":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/java-plugin-development-guide/","title":"Plugin Development Guide"},{"body":"Plugin Development Guide You can always take the existing plugins as examples, while there are some general ideas for all plugins.\n  A plugin is a module under the directory skywalking/plugins with an install method;\n  Inside the install method, you find out the relevant method(s) of the libraries that you plan to instrument, and create/close spans before/after those method(s).\n  You should also provide version rules in the plugin module, which means the version of package your plugin aim to test.\nAll below variables will be used by the tools/plugin_doc_gen.py to produce a latest Plugin Doc.\nlink_vector = [\u0026#39;https://www.python-httpx.org/\u0026#39;] # This should link to the official website/doc of this lib # The support matrix is for scenarios where some libraries don\u0026#39;t work for certain Python versions # Therefore, we use the matrix to instruct the CI testing pipeline to skip over plugin test for such Python version # The right side versions, should almost always use A.B.* to test the latest minor version of two recent major versions.  support_matrix = { \u0026#39;httpx\u0026#39;: { \u0026#39;\u0026gt;=3.7\u0026#39;: [\u0026#39;0.23.*\u0026#39;, \u0026#39;0.22.*\u0026#39;] } } # The note will be used when generating the plugin documentation for users. note = \u0026#34;\u0026#34;\u0026#34;\u0026#34;\u0026#34;\u0026#34;   Every plugin requires a corresponding test under tests/plugin before it can be merged, refer to the Plugin Test Guide when writing a plugin test.\n  Add the corresponding configuration options added/modified by the new plugin to the config.py and add new comments for each, then regenerate the configuration.md by make doc-gen.\n  Steps after coding If your PR introduces the need for a new non-standard library which needs to be pulled via pip or if it removes the need for a previously-used library:\n Run poetry add library --group plugins to pin the dependency to the plugins group, Do not add it to the main dependency! Run make doc-gen to generate a test matrix documentation for the plugin.  ","excerpt":"Plugin Development Guide You can always take the existing plugins as examples, while there are some …","ref":"/docs/skywalking-python/latest/en/contribution/how-to-develop-plugin/","title":"Plugin Development Guide"},{"body":"Plugin Development Guide You can always take the existing plugins as examples, while there are some general ideas for all plugins.\n  A plugin is a module under the directory skywalking/plugins with an install method;\n  Inside the install method, you find out the relevant method(s) of the libraries that you plan to instrument, and create/close spans before/after those method(s).\n  You should also provide version rules in the plugin module, which means the version of package your plugin aim to test.\nAll below variables will be used by the tools/plugin_doc_gen.py to produce a latest Plugin Doc.\nlink_vector = [\u0026#39;https://www.python-httpx.org/\u0026#39;] # This should link to the official website/doc of this lib # The support matrix is for scenarios where some libraries don\u0026#39;t work for certain Python versions # Therefore, we use the matrix to instruct the CI testing pipeline to skip over plugin test for such Python version # The right side versions, should almost always use A.B.* to test the latest minor version of two recent major versions.  support_matrix = { \u0026#39;httpx\u0026#39;: { \u0026#39;\u0026gt;=3.7\u0026#39;: [\u0026#39;0.23.*\u0026#39;, \u0026#39;0.22.*\u0026#39;] } } # The note will be used when generating the plugin documentation for users. note = \u0026#34;\u0026#34;\u0026#34;\u0026#34;\u0026#34;\u0026#34;   Every plugin requires a corresponding test under tests/plugin before it can be merged, refer to the Plugin Test Guide when writing a plugin test.\n  Add the corresponding configuration options added/modified by the new plugin to the config.py and add new comments for each, then regenerate the configuration.md by make doc-gen.\n  Steps after coding If your PR introduces the need for a new non-standard library which needs to be pulled via pip or if it removes the need for a previously-used library:\n Run poetry add library --group plugins to pin the dependency to the plugins group, Do not add it to the main dependency! Run make doc-gen to generate a test matrix documentation for the plugin.  ","excerpt":"Plugin Development Guide You can always take the existing plugins as examples, while there are some …","ref":"/docs/skywalking-python/next/en/contribution/how-to-develop-plugin/","title":"Plugin Development Guide"},{"body":"Plugin Development Guide You can always take the existing plugins as examples, while there are some general ideas for all plugins.\n  A plugin is a module under the directory skywalking/plugins with an install method;\n  Inside the install method, you find out the relevant method(s) of the libraries that you plan to instrument, and create/close spans before/after those method(s).\n  You should also provide version rules in the plugin module, which means the version of package your plugin aim to test.\nAll below variables will be used by the tools/plugin_doc_gen.py to produce a latest Plugin Doc.\nlink_vector = [\u0026#39;https://www.python-httpx.org/\u0026#39;] # This should link to the official website/doc of this lib # The support matrix is for scenarios where some libraries don\u0026#39;t work for certain Python versions # Therefore, we use the matrix to instruct the CI testing pipeline to skip over plugin test for such Python version # The right side versions, should almost always use A.B.* to test the latest minor version of two recent major versions.  support_matrix = { \u0026#39;httpx\u0026#39;: { \u0026#39;\u0026gt;=3.7\u0026#39;: [\u0026#39;0.23.*\u0026#39;, \u0026#39;0.22.*\u0026#39;] } } # The note will be used when generating the plugin documentation for users. note = \u0026#34;\u0026#34;\u0026#34;\u0026#34;\u0026#34;\u0026#34;   Every plugin requires a corresponding test under tests/plugin before it can be merged, refer to the Plugin Test Guide when writing a plugin test.\n  Add the corresponding configuration options added/modified by the new plugin to the config.py and add new comments for each, then regenerate the configuration.md by make doc-gen.\n  Steps after coding If your PR introduces the need for a new non-standard library which needs to be pulled via pip or if it removes the need for a previously-used library:\n Run poetry add library --group plugins to pin the dependency to the plugins group, Do not add it to the main dependency! Run make doc-gen to generate a test matrix documentation for the plugin.  ","excerpt":"Plugin Development Guide You can always take the existing plugins as examples, while there are some …","ref":"/docs/skywalking-python/v1.0.1/en/contribution/how-to-develop-plugin/","title":"Plugin Development Guide"},{"body":"Plugin Exclusion The plugin exclusion is used during the compilation phase to exclude specific plugins, through their names. Consequently, the codes of these excluded plugins will not be weaved in, then, no relative tracing and metrics.\nConfiguration plugin:# List the names of excluded plugins, multiple plugin names should be splitted by \u0026#34;,\u0026#34;# NOTE: This parameter only takes effect during the compilation phase.excluded:${SW_AGENT_PLUGIN_EXCLUDES:}This configuration option is also located in the existing configuration files and supports configuration based on environment variables. However, this environment variable only takes effect during the compilation phase.\nThe plugins name please refer to the Support Plugins Documentation.\n","excerpt":"Plugin Exclusion The plugin exclusion is used during the compilation phase to exclude specific …","ref":"/docs/skywalking-go/latest/en/advanced-features/plugin-exclusion/","title":"Plugin Exclusion"},{"body":"Plugin Exclusion The plugin exclusion is used during the compilation phase to exclude specific plugins, through their names. Consequently, the codes of these excluded plugins will not be weaved in, then, no relative tracing and metrics.\nConfiguration plugin:# List the names of excluded plugins, multiple plugin names should be splitted by \u0026#34;,\u0026#34;# NOTE: This parameter only takes effect during the compilation phase.excluded:${SW_AGENT_PLUGIN_EXCLUDES:}This configuration option is also located in the existing configuration files and supports configuration based on environment variables. However, this environment variable only takes effect during the compilation phase.\nThe plugins name please refer to the Support Plugins Documentation.\n","excerpt":"Plugin Exclusion The plugin exclusion is used during the compilation phase to exclude specific …","ref":"/docs/skywalking-go/next/en/advanced-features/plugin-exclusion/","title":"Plugin Exclusion"},{"body":"Plugin Exclusion The plugin exclusion is used during the compilation phase to exclude specific plugins, through their names. Consequently, the codes of these excluded plugins will not be weaved in, then, no relative tracing and metrics.\nConfiguration plugin:# List the names of excluded plugins, multiple plugin names should be splitted by \u0026#34;,\u0026#34;# NOTE: This parameter only takes effect during the compilation phase.excluded:${SW_AGENT_PLUGIN_EXCLUDES:}This configuration option is also located in the existing configuration files and supports configuration based on environment variables. However, this environment variable only takes effect during the compilation phase.\nThe plugins name please refer to the Support Plugins Documentation.\n","excerpt":"Plugin Exclusion The plugin exclusion is used during the compilation phase to exclude specific …","ref":"/docs/skywalking-go/v0.4.0/en/advanced-features/plugin-exclusion/","title":"Plugin Exclusion"},{"body":"Plugin List  Client  GRPC Client Kafka Client   Fallbacker  None Fallbacker Timer Fallbacker   Fetcher Filter Forwarder  Envoy ALS v2 GRPC Forwarder Envoy ALS v3 GRPC Forwarder Envoy Metrics v2 GRPC Forwarder Envoy Metrics v3 GRPC Forwarder Native CDS GRPC Forwarder Native EBPF Profiling GRPC Forwarder Native Event GRPC Forwarder Native JVM GRPC Forwarder Native CLR GRPC Forwarder Native Log GRPC Forwarder Native Log Kafka Forwarder Native Management GRPC Forwarder Native Meter GRPC Forwarder Native Process GRPC Forwarder Native Profile GRPC Forwarder Native Tracing GRPC Forwarder OpenTelemetry Metrics v1 GRPC Forwarder   Parser Queue  Memory Queue Mmap Queue None Queue   Receiver  GRPC Envoy ALS v2 Receiver GRPC Envoy ALS v3 Receiver GRPC Envoy Metrics v2 Receiver GRPC Envoy Metrics v3 Receiver GRPC Native CDS Receiver GRPC Native EBFP Profiling Receiver GRPC Native Event Receiver GRPC Native JVM Receiver GRPC Native CLR Receiver GRPC Native Log Receiver GRPC Native Management Receiver GRPC Native Meter Receiver GRPC Native Process Receiver GRPC Native Profile Receiver GRPC Native Tracing Receiver GRPC OpenTelemetry Metrics v1 Receiver HTTP Native Log Receiver   Server  GRPC Server HTTP Server    ","excerpt":"Plugin List  Client  GRPC Client Kafka Client   Fallbacker  None Fallbacker Timer Fallbacker …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/plugin-list/","title":"Plugin List"},{"body":"Plugin List  Client  GRPC Client Kafka Client   Fallbacker  None Fallbacker Timer Fallbacker   Fetcher Filter Forwarder  Envoy ALS v2 GRPC Forwarder Envoy ALS v3 GRPC Forwarder Envoy Metrics v2 GRPC Forwarder Envoy Metrics v3 GRPC Forwarder Native CDS GRPC Forwarder Native CLR GRPC Forwarder GRPC Native EBFP Access Log Forwarder Native EBPF Profiling GRPC Forwarder Native Event GRPC Forwarder Native JVM GRPC Forwarder Native Log GRPC Forwarder Native Log Kafka Forwarder Native Management GRPC Forwarder Native Meter GRPC Forwarder Native Process GRPC Forwarder Native Profile GRPC Forwarder Native Tracing GRPC Forwarder OpenTelemetry Metrics v1 GRPC Forwarder   Parser Queue  Memory Queue Mmap Queue None Queue   Receiver  GRPC Envoy ALS v2 Receiver GRPC Envoy ALS v3 Receiver GRPC Envoy Metrics v2 Receiver GRPC Envoy Metrics v3 Receiver GRPC Native CDS Receiver GRPC Native CLR Receiver GRPC Native EBFP Accesslog Receiver GRPC Native EBFP Profiling Receiver GRPC Native Event Receiver GRPC Native JVM Receiver GRPC Native Log Receiver GRPC Native Management Receiver GRPC Native Meter Receiver GRPC Native Process Receiver GRPC Native Profile Receiver GRPC Native Tracing Receiver GRPC OpenTelemetry Metrics v1 Receiver HTTP Native Log Receiver   Server  GRPC Server HTTP Server    ","excerpt":"Plugin List  Client  GRPC Client Kafka Client   Fallbacker  None Fallbacker Timer Fallbacker …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/plugin-list/","title":"Plugin List"},{"body":"Plugin List  Client  GRPC Client Kafka Client   Fallbacker  None Fallbacker Timer Fallbacker   Fetcher Filter Forwarder  Envoy ALS v2 GRPC Forwarder Envoy ALS v3 GRPC Forwarder Envoy Metrics v2 GRPC Forwarder Envoy Metrics v3 GRPC Forwarder Native CDS GRPC Forwarder Native EBPF Profiling GRPC Forwarder Native Event GRPC Forwarder Native JVM GRPC Forwarder Native CLR GRPC Forwarder Native Log GRPC Forwarder Native Log Kafka Forwarder Native Management GRPC Forwarder Native Meter GRPC Forwarder Native Process GRPC Forwarder Native Profile GRPC Forwarder Native Tracing GRPC Forwarder OpenTelemetry Metrics v1 GRPC Forwarder   Parser Queue  Memory Queue Mmap Queue None Queue   Receiver  GRPC Envoy ALS v2 Receiver GRPC Envoy ALS v3 Receiver GRPC Envoy Metrics v2 Receiver GRPC Envoy Metrics v3 Receiver GRPC Native CDS Receiver GRPC Native EBFP Profiling Receiver GRPC Native Event Receiver GRPC Native JVM Receiver GRPC Native CLR Receiver GRPC Native Log Receiver GRPC Native Management Receiver GRPC Native Meter Receiver GRPC Native Process Receiver GRPC Native Profile Receiver GRPC Native Tracing Receiver GRPC OpenTelemetry Metrics v1 Receiver HTTP Native Log Receiver   Server  GRPC Server HTTP Server    ","excerpt":"Plugin List  Client  GRPC Client Kafka Client   Fallbacker  None Fallbacker Timer Fallbacker …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/plugin-list/","title":"Plugin List"},{"body":"plugin structure Plugin is a common concept for Satellite, which is in all extension plugins.\nRegistration mechanism The Plugin registration mechanism in Satellite is similar to the SPI registration mechanism of Java. Plugin registration mechanism supports to register an interface and its implementation, that means different interfaces have different registration spaces. We can easily find the type of a specific plugin according to the interface and the plugin name and initialize it according to the type.\nstructure:\n code: map[reflect.Type]map[string]reflect.Value meaning: map[interface type]map[plugin name] plugin type  Initialization mechanism Users can easily find a plugin type and initialize an empty plugin instance according to the previous registration mechanism. For setting up the configuration of the extension convenience, we define the initialization mechanism in Plugin structure.\nIn the initialization mechanism, the plugin category(interface) and the init config is required.\nInitialize processing is like the following.\n Find the plugin name in the input config according to the fixed key plugin_name. Find plugin type according to the plugin category(interface) and the plugin name. Create an empty plugin. Initialize the plugin according to the merged config, which is created by the input config and the default config.  Plugin usage in Satellite Nowadays, the numbers of the Plugin categories is 2. One is the sharing Plugin, and another is the other normal Plugin.\n Extension Plugins:  sharing plugins  Server Plugin Client Plugin   normal plugins  Receiver Plugin Fetcher Plugin Parser Plugin Queue Plugin Filter Plugin Fallbacker Plugin Forwarder Plugin      ","excerpt":"plugin structure Plugin is a common concept for Satellite, which is in all extension plugins. …","ref":"/docs/skywalking-satellite/latest/en/concepts-and-designs/plugin_mechanism/","title":"plugin structure"},{"body":"plugin structure Plugin is a common concept for Satellite, which is in all extension plugins.\nRegistration mechanism The Plugin registration mechanism in Satellite is similar to the SPI registration mechanism of Java. Plugin registration mechanism supports to register an interface and its implementation, that means different interfaces have different registration spaces. We can easily find the type of a specific plugin according to the interface and the plugin name and initialize it according to the type.\nstructure:\n code: map[reflect.Type]map[string]reflect.Value meaning: map[interface type]map[plugin name] plugin type  Initialization mechanism Users can easily find a plugin type and initialize an empty plugin instance according to the previous registration mechanism. For setting up the configuration of the extension convenience, we define the initialization mechanism in Plugin structure.\nIn the initialization mechanism, the plugin category(interface) and the init config is required.\nInitialize processing is like the following.\n Find the plugin name in the input config according to the fixed key plugin_name. Find plugin type according to the plugin category(interface) and the plugin name. Create an empty plugin. Initialize the plugin according to the merged config, which is created by the input config and the default config.  Plugin usage in Satellite Nowadays, the numbers of the Plugin categories is 2. One is the sharing Plugin, and another is the other normal Plugin.\n Extension Plugins:  sharing plugins  Server Plugin Client Plugin   normal plugins  Receiver Plugin Fetcher Plugin Parser Plugin Queue Plugin Filter Plugin Fallbacker Plugin Forwarder Plugin      ","excerpt":"plugin structure Plugin is a common concept for Satellite, which is in all extension plugins. …","ref":"/docs/skywalking-satellite/next/en/concepts-and-designs/plugin_mechanism/","title":"plugin structure"},{"body":"plugin structure Plugin is a common concept for Satellite, which is in all extension plugins.\nRegistration mechanism The Plugin registration mechanism in Satellite is similar to the SPI registration mechanism of Java. Plugin registration mechanism supports to register an interface and its implementation, that means different interfaces have different registration spaces. We can easily find the type of a specific plugin according to the interface and the plugin name and initialize it according to the type.\nstructure:\n code: map[reflect.Type]map[string]reflect.Value meaning: map[interface type]map[plugin name] plugin type  Initialization mechanism Users can easily find a plugin type and initialize an empty plugin instance according to the previous registration mechanism. For setting up the configuration of the extension convenience, we define the initialization mechanism in Plugin structure.\nIn the initialization mechanism, the plugin category(interface) and the init config is required.\nInitialize processing is like the following.\n Find the plugin name in the input config according to the fixed key plugin_name. Find plugin type according to the plugin category(interface) and the plugin name. Create an empty plugin. Initialize the plugin according to the merged config, which is created by the input config and the default config.  Plugin usage in Satellite Nowadays, the numbers of the Plugin categories is 2. One is the sharing Plugin, and another is the other normal Plugin.\n Extension Plugins:  sharing plugins  Server Plugin Client Plugin   normal plugins  Receiver Plugin Fetcher Plugin Parser Plugin Queue Plugin Filter Plugin Fallbacker Plugin Forwarder Plugin      ","excerpt":"plugin structure Plugin is a common concept for Satellite, which is in all extension plugins. …","ref":"/docs/skywalking-satellite/v1.2.0/en/concepts-and-designs/plugin_mechanism/","title":"plugin structure"},{"body":"Plugin Test Plugin tests are required and should pass before a new plugin is able to merge into the master branch. Specify a support matrix in each plugin in the skywalking/plugins folder, along with their website links, the matrix and links will be used for plugin support table documentation generation for this doc Plugins.md.\nUse make doc-gen to generate a table and paste into Plugins.md after all test passes.\nSkyWalking Agent Test Tool (Mock Collector) SkyWalking Agent Test Tool respects the same protocol as the SkyWalking backend, and thus receives the report data from the agent side, besides, it also exposes some HTTP endpoints for verification.\nTested Service A tested service is a service involving the plugin that is to be tested, and exposes some endpoints to trigger the instrumented code and report log/trace/meter data to the mock collector.\nDocker Compose docker-compose is used to orchestrate the mock collector and the tested service(s), the docker-compose.yml should be able to run with docker-compose -f docker-compose.yml up in standalone mode, which can be used in debugging too.\nExpected Data The expected.data.yml file contains the expected segment/log/meter data after we have triggered the instrumentation and report to mock collector.\nOnce the mock collector receives data, we post the expected data to the mock collector and verify whether they match.\nThis can be done through the /dataValidate of the mock collector, say http://collector:12800/dataValidate, for example.\nExample If we want to test the plugin for the built-in library http, we will:\n Build a tested service, which sets up an HTTP server by http library, and exposes an HTTP endpoint to be triggered in the test codes, say /trigger, take this provider service as example. Compose a docker-compose.yml file, orchestrating the service built in step 1 and the mock collector, take this docker-compose.yml as an example. Write test codes to trigger the endpoint in step 1, and send the expected data file to the mock collector to verify, take this test as example.  ","excerpt":"Plugin Test Plugin tests are required and should pass before a new plugin is able to merge into the …","ref":"/docs/skywalking-python/latest/en/contribution/how-to-test-plugin/","title":"Plugin Test"},{"body":"Plugin Test Plugin tests are required and should pass before a new plugin is able to merge into the master branch. Specify a support matrix in each plugin in the skywalking/plugins folder, along with their website links, the matrix and links will be used for plugin support table documentation generation for this doc Plugins.md.\nUse make doc-gen to generate a table and paste into Plugins.md after all test passes.\nSkyWalking Agent Test Tool (Mock Collector) SkyWalking Agent Test Tool respects the same protocol as the SkyWalking backend, and thus receives the report data from the agent side, besides, it also exposes some HTTP endpoints for verification.\nTested Service A tested service is a service involving the plugin that is to be tested, and exposes some endpoints to trigger the instrumented code and report log/trace/meter data to the mock collector.\nDocker Compose docker-compose is used to orchestrate the mock collector and the tested service(s), the docker-compose.yml should be able to run with docker-compose -f docker-compose.yml up in standalone mode, which can be used in debugging too.\nExpected Data The expected.data.yml file contains the expected segment/log/meter data after we have triggered the instrumentation and report to mock collector.\nOnce the mock collector receives data, we post the expected data to the mock collector and verify whether they match.\nThis can be done through the /dataValidate of the mock collector, say http://collector:12800/dataValidate, for example.\nExample If we want to test the plugin for the built-in library http, we will:\n Build a tested service, which sets up an HTTP server by http library, and exposes an HTTP endpoint to be triggered in the test codes, say /trigger, take this provider service as example. Compose a docker-compose.yml file, orchestrating the service built in step 1 and the mock collector, take this docker-compose.yml as an example. Write test codes to trigger the endpoint in step 1, and send the expected data file to the mock collector to verify, take this test as example.  ","excerpt":"Plugin Test Plugin tests are required and should pass before a new plugin is able to merge into the …","ref":"/docs/skywalking-python/next/en/contribution/how-to-test-plugin/","title":"Plugin Test"},{"body":"Plugin Test Plugin tests are required and should pass before a new plugin is able to merge into the master branch. Specify a support matrix in each plugin in the skywalking/plugins folder, along with their website links, the matrix and links will be used for plugin support table documentation generation for this doc Plugins.md.\nUse make doc-gen to generate a table and paste into Plugins.md after all test passes.\nSkyWalking Agent Test Tool (Mock Collector) SkyWalking Agent Test Tool respects the same protocol as the SkyWalking backend, and thus receives the report data from the agent side, besides, it also exposes some HTTP endpoints for verification.\nTested Service A tested service is a service involving the plugin that is to be tested, and exposes some endpoints to trigger the instrumented code and report log/trace/meter data to the mock collector.\nDocker Compose docker-compose is used to orchestrate the mock collector and the tested service(s), the docker-compose.yml should be able to run with docker-compose -f docker-compose.yml up in standalone mode, which can be used in debugging too.\nExpected Data The expected.data.yml file contains the expected segment/log/meter data after we have triggered the instrumentation and report to mock collector.\nOnce the mock collector receives data, we post the expected data to the mock collector and verify whether they match.\nThis can be done through the /dataValidate of the mock collector, say http://collector:12800/dataValidate, for example.\nExample If we want to test the plugin for the built-in library http, we will:\n Build a tested service, which sets up an HTTP server by http library, and exposes an HTTP endpoint to be triggered in the test codes, say /trigger, take this provider service as example. Compose a docker-compose.yml file, orchestrating the service built in step 1 and the mock collector, take this docker-compose.yml as an example. Write test codes to trigger the endpoint in step 1, and send the expected data file to the mock collector to verify, take this test as example.  ","excerpt":"Plugin Test Plugin tests are required and should pass before a new plugin is able to merge into the …","ref":"/docs/skywalking-python/v1.0.1/en/contribution/how-to-test-plugin/","title":"Plugin Test"},{"body":"PostgreSQL PostgreSQL JDBC driver uses version 42.3.2. It supports PostgreSQL 8.2 or newer. Activate PostgreSQL as storage, and set storage provider to postgresql.\nstorage:selector:${SW_STORAGE:postgresql}postgresql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:postgresql://localhost:5432/skywalking\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:postgres}dataSource.password:${SW_DATA_SOURCE_PASSWORD:123456}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfArrayColumn:${SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN:20}numOfSearchableValuesPerTag:${SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG:2}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password, are found in application.yml. Only part of the settings is listed here. Please follow HikariCP connection pool document for full settings.\n","excerpt":"PostgreSQL PostgreSQL JDBC driver uses version 42.3.2. It supports PostgreSQL 8.2 or newer. Activate …","ref":"/docs/main/latest/en/setup/backend/storages/postgresql/","title":"PostgreSQL"},{"body":"PostgreSQL PostgreSQL JDBC driver uses version 42.3.2. It supports PostgreSQL 8.2 or newer. Activate PostgreSQL as storage, and set storage provider to postgresql.\nstorage:selector:${SW_STORAGE:postgresql}postgresql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:postgresql://localhost:5432/skywalking\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:postgres}dataSource.password:${SW_DATA_SOURCE_PASSWORD:123456}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfArrayColumn:${SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN:20}numOfSearchableValuesPerTag:${SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG:2}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password, are found in application.yml. Only part of the settings is listed here. Please follow HikariCP connection pool document for full settings.\n","excerpt":"PostgreSQL PostgreSQL JDBC driver uses version 42.3.2. It supports PostgreSQL 8.2 or newer. Activate …","ref":"/docs/main/next/en/setup/backend/storages/postgresql/","title":"PostgreSQL"},{"body":"PostgreSQL PostgreSQL JDBC driver uses version 42.3.2. It supports PostgreSQL 8.2 or newer. Activate PostgreSQL as storage, and set storage provider to postgresql.\nstorage:selector:${SW_STORAGE:postgresql}postgresql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:postgresql://localhost:5432/skywalking\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:postgres}dataSource.password:${SW_DATA_SOURCE_PASSWORD:123456}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfArrayColumn:${SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN:20}numOfSearchableValuesPerTag:${SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG:2}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password, are found in application.yml. Only part of the settings is listed here. Please follow HikariCP connection pool document for full settings.\n","excerpt":"PostgreSQL PostgreSQL JDBC driver uses version 42.3.2. It supports PostgreSQL 8.2 or newer. Activate …","ref":"/docs/main/v9.7.0/en/setup/backend/storages/postgresql/","title":"PostgreSQL"},{"body":"PostgreSQL monitoring PostgreSQL server performance from postgres-exporter SkyWalking leverages postgres-exporter for collecting metrics data from PostgreSQL. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  postgres-exporter collect metrics data from PostgreSQL. OpenTelemetry Collector fetches metrics from postgres-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up postgres-exporter. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  PostgreSQL Monitoring PostgreSQL cluster is cataloged as a Layer: PostgreSQL Service in OAP. Each PostgreSQL server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Shared Buffers MB meter_pg_shared_buffers The number of shared memory buffers used by the server postgres-exporter   Effective Cache GB meter_pg_effective_cache The planner\u0026rsquo;s assumption about the total size of the data caches postgres-exporter   Maintenance Work Mem MB meter_pg_maintenance_work_mem The maximum memory to be used for maintenance operations postgres-exporter   Seq Page Cost  meter_pg_seq_page_cost The planner\u0026rsquo;s estimate of the cost of a sequentially fetched disk page. postgres-exporter   Random Page Cost  meter_pg_random_page_cost The planner\u0026rsquo;s estimate of the cost of a nonsequentially fetched disk page. postgres-exporter   Max Worker Processes  meter_pg_max_worker_processes Maximum number of concurrent worker processes postgres-exporter   Max WAL Size GB meter_max_wal_size The WAL size that triggers a checkpoint postgres-exporter   Max Parallel Workers  meter_pg_max_parallel_workers The maximum number of parallel processes per executor node postgres-exporter   Work Mem MB meter_pg_max_work_mem The maximum memory to be used for query workspaces. postgres-exporter   Fetched Row Trend  meter_pg_fetched_rows_rate The trend of the number of rows fetched by queries in this database. postgres-exporter   Inserted Row Trend  meter_pg_inserted_rows_rate The trend of the number of rows inserted by queries in this database. postgres-exporter   Updated Row Trend  meter_pg_updated_rows_rate The trend of the number of rows updated by queries in this database. postgres-exporter   Deleted Row Trend  meter_pg_deleted_rows_rate The trend of the number of rows deleted by queries in this database. postgres-exporter   Returned Row Trend  meter_pg_returned_rows_rate The trend of the number of rows returned by queries in this database. postgres-exporter   Committed Transactions Trend  meter_pg_committed_transactions_rate The trend of the number of transactions in this database that have been committed postgres-exporter   Rolled Back Transactions Trend  meter_pg_rolled_back_transactions_rate The trend of the number of transactions in this database that have been rolled back postgres-exporter   Buffers Trend  meter_pg_buffers_alloc  meter_pg_buffers_checkpoint meter_pg_buffers_clean meter_pg_buffers_backend_fsync meter_pg_buffers_backend The trend of the number of buffers postgres-exporter   Conflicts Trend  meter_pg_conflicts_rate The trend of the number of queries canceled due to conflicts with recovery in this database postgres-exporter   Deadlock Trend  meter_pg_deadlocks_rate The trend of the number of deadlocks detected in this database postgres-exporter   Cache Hit Rate % meter_pg_cache_hit_rate The rate of cache hit postgres-exporter   Temporary Files Trend  meter_pg_temporary_files_rate The rate of total amount of data written to temporary files by queries in this database. All temporary files are counted, regardless of why the temporary file was created, and regardless of the log_temp_files setting postgres-exporter   Checkpoint Stat Trend  meter_pg_checkpoint_write_time_rate  meter_pg_checkpoint_sync_time_rate  meter_pg_checkpoint_req_rate meter_pg_checkpoint_timed_rate The trend of checkpoint stat postgres-exporter   Active Sessions  meter_pg_active_sessions The number of connections which state is active postgres-exporter   Idle Sessions  meter_pg_idle_sessions The number of connections which state is idle,idle in transaction or idle in transaction (aborted) postgres-exporter   Locks Count  meter_pg_locks_count Number of locks postgres-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/postgresql. The PostgreSQL dashboard panel configurations are found in /config/ui-initialized-templates/postgresql.\nCollect sampled slow SQLs SkyWalking leverages fluentbit or other log agents for collecting slow SQL statements from PostgreSQL.\nData flow  fluentbit agent collects slow sql logs from PostgreSQL. fluentbit agent sends data to SkyWalking OAP Server using native log APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Set up fluentbit. Config fluentbit Config PostgreSQL to enable slow log. Example.  Slow SQL Monitoring Slow SQL monitoring provides monitoring of the slow SQL statements of the PostgreSQL server. PostgreSQL Cluster is cataloged as a Layer: POSTGRESQL Service in OAP. Each PostgreSQL server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Slow Statements ms top_n_database_statement The latency and statement of PostgreSQL slow SQLs fluentbit    Customizations You can customize your own metrics/expression/dashboard panel. The slowsql expression rules are found in /config/lal/pgsql-slowsql.yaml The PostgreSQL dashboard panel configurations are found in /config/ui-initialized-templates/postgresql.\n","excerpt":"PostgreSQL monitoring PostgreSQL server performance from postgres-exporter SkyWalking leverages …","ref":"/docs/main/latest/en/setup/backend/backend-postgresql-monitoring/","title":"PostgreSQL monitoring"},{"body":"PostgreSQL monitoring PostgreSQL server performance from postgres-exporter SkyWalking leverages postgres-exporter for collecting metrics data from PostgreSQL. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  postgres-exporter collect metrics data from PostgreSQL. OpenTelemetry Collector fetches metrics from postgres-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up postgres-exporter. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  PostgreSQL Monitoring PostgreSQL cluster is cataloged as a Layer: PostgreSQL Service in OAP. Each PostgreSQL server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Shared Buffers MB meter_pg_shared_buffers The number of shared memory buffers used by the server postgres-exporter   Effective Cache GB meter_pg_effective_cache The planner\u0026rsquo;s assumption about the total size of the data caches postgres-exporter   Maintenance Work Mem MB meter_pg_maintenance_work_mem The maximum memory to be used for maintenance operations postgres-exporter   Seq Page Cost  meter_pg_seq_page_cost The planner\u0026rsquo;s estimate of the cost of a sequentially fetched disk page. postgres-exporter   Random Page Cost  meter_pg_random_page_cost The planner\u0026rsquo;s estimate of the cost of a nonsequentially fetched disk page. postgres-exporter   Max Worker Processes  meter_pg_max_worker_processes Maximum number of concurrent worker processes postgres-exporter   Max WAL Size GB meter_max_wal_size The WAL size that triggers a checkpoint postgres-exporter   Max Parallel Workers  meter_pg_max_parallel_workers The maximum number of parallel processes per executor node postgres-exporter   Work Mem MB meter_pg_max_work_mem The maximum memory to be used for query workspaces. postgres-exporter   Fetched Row Trend  meter_pg_fetched_rows_rate The trend of the number of rows fetched by queries in this database. postgres-exporter   Inserted Row Trend  meter_pg_inserted_rows_rate The trend of the number of rows inserted by queries in this database. postgres-exporter   Updated Row Trend  meter_pg_updated_rows_rate The trend of the number of rows updated by queries in this database. postgres-exporter   Deleted Row Trend  meter_pg_deleted_rows_rate The trend of the number of rows deleted by queries in this database. postgres-exporter   Returned Row Trend  meter_pg_returned_rows_rate The trend of the number of rows returned by queries in this database. postgres-exporter   Committed Transactions Trend  meter_pg_committed_transactions_rate The trend of the number of transactions in this database that have been committed postgres-exporter   Rolled Back Transactions Trend  meter_pg_rolled_back_transactions_rate The trend of the number of transactions in this database that have been rolled back postgres-exporter   Buffers Trend  meter_pg_buffers_alloc  meter_pg_buffers_checkpoint meter_pg_buffers_clean meter_pg_buffers_backend_fsync meter_pg_buffers_backend The trend of the number of buffers postgres-exporter   Conflicts Trend  meter_pg_conflicts_rate The trend of the number of queries canceled due to conflicts with recovery in this database postgres-exporter   Deadlock Trend  meter_pg_deadlocks_rate The trend of the number of deadlocks detected in this database postgres-exporter   Cache Hit Rate % meter_pg_cache_hit_rate The rate of cache hit postgres-exporter   Temporary Files Trend  meter_pg_temporary_files_rate The rate of total amount of data written to temporary files by queries in this database. All temporary files are counted, regardless of why the temporary file was created, and regardless of the log_temp_files setting postgres-exporter   Checkpoint Stat Trend  meter_pg_checkpoint_write_time_rate  meter_pg_checkpoint_sync_time_rate  meter_pg_checkpoint_req_rate meter_pg_checkpoint_timed_rate The trend of checkpoint stat postgres-exporter   Active Sessions  meter_pg_active_sessions The number of connections which state is active postgres-exporter   Idle Sessions  meter_pg_idle_sessions The number of connections which state is idle,idle in transaction or idle in transaction (aborted) postgres-exporter   Locks Count  meter_pg_locks_count Number of locks postgres-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/postgresql. The PostgreSQL dashboard panel configurations are found in /config/ui-initialized-templates/postgresql.\nCollect sampled slow SQLs SkyWalking leverages fluentbit or other log agents for collecting slow SQL statements from PostgreSQL.\nData flow  fluentbit agent collects slow sql logs from PostgreSQL. fluentbit agent sends data to SkyWalking OAP Server using native log APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Set up fluentbit. Config fluentbit Config PostgreSQL to enable slow log. Example.  Slow SQL Monitoring Slow SQL monitoring provides monitoring of the slow SQL statements of the PostgreSQL server. PostgreSQL Cluster is cataloged as a Layer: POSTGRESQL Service in OAP. Each PostgreSQL server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Slow Statements ms top_n_database_statement The latency and statement of PostgreSQL slow SQLs fluentbit    Customizations You can customize your own metrics/expression/dashboard panel. The slowsql expression rules are found in /config/lal/pgsql-slowsql.yaml The PostgreSQL dashboard panel configurations are found in /config/ui-initialized-templates/postgresql.\n","excerpt":"PostgreSQL monitoring PostgreSQL server performance from postgres-exporter SkyWalking leverages …","ref":"/docs/main/next/en/setup/backend/backend-postgresql-monitoring/","title":"PostgreSQL monitoring"},{"body":"PostgreSQL monitoring SkyWalking leverages postgres-exporter for collecting metrics data from PostgreSQL. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  postgres-exporter collect metrics data from PostgreSQL. OpenTelemetry Collector fetches metrics from postgres-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via the OpenCensus gRPC Exporter or OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up postgres-exporter. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  PostgreSQL Monitoring PostgreSQL monitoring provides monitoring of the status and resources of the PostgreSQL server.PostgreSQL server as a Service in OAP, and land on the Layer: POSTGRESQL.\nPostgreSQL Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Shared Buffers MB meter_pg_shared_buffers The number of shared memory buffers used by the server postgres-exporter   Effective Cache GB meter_pg_effective_cache The planner\u0026rsquo;s assumption about the total size of the data caches postgres-exporter   Maintenance Work Mem MB meter_pg_maintenance_work_mem The maximum memory to be used for maintenance operations postgres-exporter   Seq Page Cost  meter_pg_seq_page_cost The planner\u0026rsquo;s estimate of the cost of a sequentially fetched disk page. postgres-exporter   Random Page Cost  meter_pg_random_page_cost The planner\u0026rsquo;s estimate of the cost of a nonsequentially fetched disk page. postgres-exporter   Max Worker Processes  meter_pg_max_worker_processes Maximum number of concurrent worker processes postgres-exporter   Max WAL Size GB meter_max_wal_size The WAL size that triggers a checkpoint postgres-exporter   Max Parallel Workers  meter_pg_max_parallel_workers The maximum number of parallel processes per executor node postgres-exporter   Work Mem MB meter_pg_max_work_mem The maximum memory to be used for query workspaces. postgres-exporter   Fetched Row Trend  meter_pg_fetched_rows_rate The trend of the number of rows fetched by queries in this database. postgres-exporter   Inserted Row Trend  meter_pg_inserted_rows_rate The trend of the number of rows inserted by queries in this database. postgres-exporter   Updated Row Trend  meter_pg_updated_rows_rate The trend of the number of rows updated by queries in this database. postgres-exporter   Deleted Row Trend  meter_pg_deleted_rows_rate The trend of the number of rows deleted by queries in this database. postgres-exporter   Returned Row Trend  meter_pg_returned_rows_rate The trend of the number of rows returned by queries in this database. postgres-exporter   Committed Transactions Trend  meter_pg_committed_transactions_rate The trend of the number of transactions in this database that have been committed postgres-exporter   Rolled Back Transactions Trend  meter_pg_rolled_back_transactions_rate The trend of the number of transactions in this database that have been rolled back postgres-exporter   Buffers Trend  meter_pg_buffers_alloc  meter_pg_buffers_checkpoint meter_pg_buffers_clean meter_pg_buffers_backend_fsync meter_pg_buffers_backend The trend of the number of buffers postgres-exporter   Conflicts Trend  meter_pg_conflicts_rate The trend of the number of queries canceled due to conflicts with recovery in this database postgres-exporter   Deadlock Trend  meter_pg_deadlocks_rate The trend of the number of deadlocks detected in this database postgres-exporter   Cache Hit Rate % meter_pg_cache_hit_rate The rate of cache hit postgres-exporter   Temporary Files Trend  meter_pg_temporary_files_rate The rate of total amount of data written to temporary files by queries in this database. All temporary files are counted, regardless of why the temporary file was created, and regardless of the log_temp_files setting postgres-exporter   Checkpoint Stat Trend  meter_pg_checkpoint_write_time_rate  meter_pg_checkpoint_sync_time_rate  meter_pg_checkpoint_req_rate meter_pg_checkpoint_timed_rate The trend of checkpoint stat postgres-exporter   Active Sessions  meter_pg_active_sessions The number of connections which state is active postgres-exporter   Idle Sessions  meter_pg_idle_sessions The number of connections which state is idle,idle in transaction or idle in transaction (aborted) postgres-exporter   Locks Count  meter_pg_locks_count Number of locks postgres-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/postgresql.yaml. The PostgreSQL dashboard panel configurations are found in /config/ui-initialized-templates/postgresql.\n","excerpt":"PostgreSQL monitoring SkyWalking leverages postgres-exporter for collecting metrics data from …","ref":"/docs/main/v9.2.0/en/setup/backend/backend-postgresql-monitoring/","title":"PostgreSQL monitoring"},{"body":"PostgreSQL monitoring PostgreSQL server performance from postgres-exporter SkyWalking leverages postgres-exporter for collecting metrics data from PostgreSQL. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  postgres-exporter collect metrics data from PostgreSQL. OpenTelemetry Collector fetches metrics from postgres-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via the OpenCensus gRPC Exporter or OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up postgres-exporter. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  PostgreSQL Monitoring PostgreSQL monitoring provides monitoring of the status and resources of the PostgreSQL server.PostgreSQL server as a Service in OAP, and land on the Layer: POSTGRESQL.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Shared Buffers MB meter_pg_shared_buffers The number of shared memory buffers used by the server postgres-exporter   Effective Cache GB meter_pg_effective_cache The planner\u0026rsquo;s assumption about the total size of the data caches postgres-exporter   Maintenance Work Mem MB meter_pg_maintenance_work_mem The maximum memory to be used for maintenance operations postgres-exporter   Seq Page Cost  meter_pg_seq_page_cost The planner\u0026rsquo;s estimate of the cost of a sequentially fetched disk page. postgres-exporter   Random Page Cost  meter_pg_random_page_cost The planner\u0026rsquo;s estimate of the cost of a nonsequentially fetched disk page. postgres-exporter   Max Worker Processes  meter_pg_max_worker_processes Maximum number of concurrent worker processes postgres-exporter   Max WAL Size GB meter_max_wal_size The WAL size that triggers a checkpoint postgres-exporter   Max Parallel Workers  meter_pg_max_parallel_workers The maximum number of parallel processes per executor node postgres-exporter   Work Mem MB meter_pg_max_work_mem The maximum memory to be used for query workspaces. postgres-exporter   Fetched Row Trend  meter_pg_fetched_rows_rate The trend of the number of rows fetched by queries in this database. postgres-exporter   Inserted Row Trend  meter_pg_inserted_rows_rate The trend of the number of rows inserted by queries in this database. postgres-exporter   Updated Row Trend  meter_pg_updated_rows_rate The trend of the number of rows updated by queries in this database. postgres-exporter   Deleted Row Trend  meter_pg_deleted_rows_rate The trend of the number of rows deleted by queries in this database. postgres-exporter   Returned Row Trend  meter_pg_returned_rows_rate The trend of the number of rows returned by queries in this database. postgres-exporter   Committed Transactions Trend  meter_pg_committed_transactions_rate The trend of the number of transactions in this database that have been committed postgres-exporter   Rolled Back Transactions Trend  meter_pg_rolled_back_transactions_rate The trend of the number of transactions in this database that have been rolled back postgres-exporter   Buffers Trend  meter_pg_buffers_alloc  meter_pg_buffers_checkpoint meter_pg_buffers_clean meter_pg_buffers_backend_fsync meter_pg_buffers_backend The trend of the number of buffers postgres-exporter   Conflicts Trend  meter_pg_conflicts_rate The trend of the number of queries canceled due to conflicts with recovery in this database postgres-exporter   Deadlock Trend  meter_pg_deadlocks_rate The trend of the number of deadlocks detected in this database postgres-exporter   Cache Hit Rate % meter_pg_cache_hit_rate The rate of cache hit postgres-exporter   Temporary Files Trend  meter_pg_temporary_files_rate The rate of total amount of data written to temporary files by queries in this database. All temporary files are counted, regardless of why the temporary file was created, and regardless of the log_temp_files setting postgres-exporter   Checkpoint Stat Trend  meter_pg_checkpoint_write_time_rate  meter_pg_checkpoint_sync_time_rate  meter_pg_checkpoint_req_rate meter_pg_checkpoint_timed_rate The trend of checkpoint stat postgres-exporter   Active Sessions  meter_pg_active_sessions The number of connections which state is active postgres-exporter   Idle Sessions  meter_pg_idle_sessions The number of connections which state is idle,idle in transaction or idle in transaction (aborted) postgres-exporter   Locks Count  meter_pg_locks_count Number of locks postgres-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/postgresql.yaml. The PostgreSQL dashboard panel configurations are found in /config/ui-initialized-templates/postgresql.\nCollect sampled slow SQLs SkyWalking leverages fluentbit or other log agents for collecting slow SQL statements from PostgreSQL.\nData flow  fluentbit agent collects slow sql logs from PostgreSQL. fluentbit agent sends data to SkyWalking OAP Server using native log APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Set up fluentbit. Config fluentbit Config PostgreSQL to enable slow log. Example.  Slow SQL Monitoring Slow SQL monitoring provides monitoring of the slow SQL statements of the PostgreSQL server. PostgreSQL Cluster is cataloged as a Layer: POSTGRESQL Service in OAP. Each PostgreSQL server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Slow Statements ms top_n_database_statement The latency and statement of PostgreSQL slow SQLs fluentbit    Customizations You can customize your own metrics/expression/dashboard panel. The slowsql expression rules are found in /config/lal/pgsql-slowsql.yaml The PostgreSQL dashboard panel configurations are found in /config/ui-initialized-templates/postgresql.\n","excerpt":"PostgreSQL monitoring PostgreSQL server performance from postgres-exporter SkyWalking leverages …","ref":"/docs/main/v9.3.0/en/setup/backend/backend-postgresql-monitoring/","title":"PostgreSQL monitoring"},{"body":"PostgreSQL monitoring PostgreSQL server performance from postgres-exporter SkyWalking leverages postgres-exporter for collecting metrics data from PostgreSQL. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  postgres-exporter collect metrics data from PostgreSQL. OpenTelemetry Collector fetches metrics from postgres-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via the OpenCensus gRPC Exporter or OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up postgres-exporter. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  PostgreSQL Monitoring PostgreSQL monitoring provides monitoring of the status and resources of the PostgreSQL server.PostgreSQL server as a Service in OAP, and land on the Layer: POSTGRESQL.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Shared Buffers MB meter_pg_shared_buffers The number of shared memory buffers used by the server postgres-exporter   Effective Cache GB meter_pg_effective_cache The planner\u0026rsquo;s assumption about the total size of the data caches postgres-exporter   Maintenance Work Mem MB meter_pg_maintenance_work_mem The maximum memory to be used for maintenance operations postgres-exporter   Seq Page Cost  meter_pg_seq_page_cost The planner\u0026rsquo;s estimate of the cost of a sequentially fetched disk page. postgres-exporter   Random Page Cost  meter_pg_random_page_cost The planner\u0026rsquo;s estimate of the cost of a nonsequentially fetched disk page. postgres-exporter   Max Worker Processes  meter_pg_max_worker_processes Maximum number of concurrent worker processes postgres-exporter   Max WAL Size GB meter_max_wal_size The WAL size that triggers a checkpoint postgres-exporter   Max Parallel Workers  meter_pg_max_parallel_workers The maximum number of parallel processes per executor node postgres-exporter   Work Mem MB meter_pg_max_work_mem The maximum memory to be used for query workspaces. postgres-exporter   Fetched Row Trend  meter_pg_fetched_rows_rate The trend of the number of rows fetched by queries in this database. postgres-exporter   Inserted Row Trend  meter_pg_inserted_rows_rate The trend of the number of rows inserted by queries in this database. postgres-exporter   Updated Row Trend  meter_pg_updated_rows_rate The trend of the number of rows updated by queries in this database. postgres-exporter   Deleted Row Trend  meter_pg_deleted_rows_rate The trend of the number of rows deleted by queries in this database. postgres-exporter   Returned Row Trend  meter_pg_returned_rows_rate The trend of the number of rows returned by queries in this database. postgres-exporter   Committed Transactions Trend  meter_pg_committed_transactions_rate The trend of the number of transactions in this database that have been committed postgres-exporter   Rolled Back Transactions Trend  meter_pg_rolled_back_transactions_rate The trend of the number of transactions in this database that have been rolled back postgres-exporter   Buffers Trend  meter_pg_buffers_alloc  meter_pg_buffers_checkpoint meter_pg_buffers_clean meter_pg_buffers_backend_fsync meter_pg_buffers_backend The trend of the number of buffers postgres-exporter   Conflicts Trend  meter_pg_conflicts_rate The trend of the number of queries canceled due to conflicts with recovery in this database postgres-exporter   Deadlock Trend  meter_pg_deadlocks_rate The trend of the number of deadlocks detected in this database postgres-exporter   Cache Hit Rate % meter_pg_cache_hit_rate The rate of cache hit postgres-exporter   Temporary Files Trend  meter_pg_temporary_files_rate The rate of total amount of data written to temporary files by queries in this database. All temporary files are counted, regardless of why the temporary file was created, and regardless of the log_temp_files setting postgres-exporter   Checkpoint Stat Trend  meter_pg_checkpoint_write_time_rate  meter_pg_checkpoint_sync_time_rate  meter_pg_checkpoint_req_rate meter_pg_checkpoint_timed_rate The trend of checkpoint stat postgres-exporter   Active Sessions  meter_pg_active_sessions The number of connections which state is active postgres-exporter   Idle Sessions  meter_pg_idle_sessions The number of connections which state is idle,idle in transaction or idle in transaction (aborted) postgres-exporter   Locks Count  meter_pg_locks_count Number of locks postgres-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/postgresql. The PostgreSQL dashboard panel configurations are found in /config/ui-initialized-templates/postgresql.\nCollect sampled slow SQLs SkyWalking leverages fluentbit or other log agents for collecting slow SQL statements from PostgreSQL.\nData flow  fluentbit agent collects slow sql logs from PostgreSQL. fluentbit agent sends data to SkyWalking OAP Server using native log APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Set up fluentbit. Config fluentbit Config PostgreSQL to enable slow log. Example.  Slow SQL Monitoring Slow SQL monitoring provides monitoring of the slow SQL statements of the PostgreSQL server. PostgreSQL Cluster is cataloged as a Layer: POSTGRESQL Service in OAP. Each PostgreSQL server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Slow Statements ms top_n_database_statement The latency and statement of PostgreSQL slow SQLs fluentbit    Customizations You can customize your own metrics/expression/dashboard panel. The slowsql expression rules are found in /config/lal/pgsql-slowsql.yaml The PostgreSQL dashboard panel configurations are found in /config/ui-initialized-templates/postgresql.\n","excerpt":"PostgreSQL monitoring PostgreSQL server performance from postgres-exporter SkyWalking leverages …","ref":"/docs/main/v9.4.0/en/setup/backend/backend-postgresql-monitoring/","title":"PostgreSQL monitoring"},{"body":"PostgreSQL monitoring PostgreSQL server performance from postgres-exporter SkyWalking leverages postgres-exporter for collecting metrics data from PostgreSQL. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  postgres-exporter collect metrics data from PostgreSQL. OpenTelemetry Collector fetches metrics from postgres-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up postgres-exporter. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  PostgreSQL Monitoring PostgreSQL cluster is cataloged as a Layer: PostgreSQL Service in OAP. Each PostgreSQL server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Shared Buffers MB meter_pg_shared_buffers The number of shared memory buffers used by the server postgres-exporter   Effective Cache GB meter_pg_effective_cache The planner\u0026rsquo;s assumption about the total size of the data caches postgres-exporter   Maintenance Work Mem MB meter_pg_maintenance_work_mem The maximum memory to be used for maintenance operations postgres-exporter   Seq Page Cost  meter_pg_seq_page_cost The planner\u0026rsquo;s estimate of the cost of a sequentially fetched disk page. postgres-exporter   Random Page Cost  meter_pg_random_page_cost The planner\u0026rsquo;s estimate of the cost of a nonsequentially fetched disk page. postgres-exporter   Max Worker Processes  meter_pg_max_worker_processes Maximum number of concurrent worker processes postgres-exporter   Max WAL Size GB meter_max_wal_size The WAL size that triggers a checkpoint postgres-exporter   Max Parallel Workers  meter_pg_max_parallel_workers The maximum number of parallel processes per executor node postgres-exporter   Work Mem MB meter_pg_max_work_mem The maximum memory to be used for query workspaces. postgres-exporter   Fetched Row Trend  meter_pg_fetched_rows_rate The trend of the number of rows fetched by queries in this database. postgres-exporter   Inserted Row Trend  meter_pg_inserted_rows_rate The trend of the number of rows inserted by queries in this database. postgres-exporter   Updated Row Trend  meter_pg_updated_rows_rate The trend of the number of rows updated by queries in this database. postgres-exporter   Deleted Row Trend  meter_pg_deleted_rows_rate The trend of the number of rows deleted by queries in this database. postgres-exporter   Returned Row Trend  meter_pg_returned_rows_rate The trend of the number of rows returned by queries in this database. postgres-exporter   Committed Transactions Trend  meter_pg_committed_transactions_rate The trend of the number of transactions in this database that have been committed postgres-exporter   Rolled Back Transactions Trend  meter_pg_rolled_back_transactions_rate The trend of the number of transactions in this database that have been rolled back postgres-exporter   Buffers Trend  meter_pg_buffers_alloc  meter_pg_buffers_checkpoint meter_pg_buffers_clean meter_pg_buffers_backend_fsync meter_pg_buffers_backend The trend of the number of buffers postgres-exporter   Conflicts Trend  meter_pg_conflicts_rate The trend of the number of queries canceled due to conflicts with recovery in this database postgres-exporter   Deadlock Trend  meter_pg_deadlocks_rate The trend of the number of deadlocks detected in this database postgres-exporter   Cache Hit Rate % meter_pg_cache_hit_rate The rate of cache hit postgres-exporter   Temporary Files Trend  meter_pg_temporary_files_rate The rate of total amount of data written to temporary files by queries in this database. All temporary files are counted, regardless of why the temporary file was created, and regardless of the log_temp_files setting postgres-exporter   Checkpoint Stat Trend  meter_pg_checkpoint_write_time_rate  meter_pg_checkpoint_sync_time_rate  meter_pg_checkpoint_req_rate meter_pg_checkpoint_timed_rate The trend of checkpoint stat postgres-exporter   Active Sessions  meter_pg_active_sessions The number of connections which state is active postgres-exporter   Idle Sessions  meter_pg_idle_sessions The number of connections which state is idle,idle in transaction or idle in transaction (aborted) postgres-exporter   Locks Count  meter_pg_locks_count Number of locks postgres-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/postgresql. The PostgreSQL dashboard panel configurations are found in /config/ui-initialized-templates/postgresql.\nCollect sampled slow SQLs SkyWalking leverages fluentbit or other log agents for collecting slow SQL statements from PostgreSQL.\nData flow  fluentbit agent collects slow sql logs from PostgreSQL. fluentbit agent sends data to SkyWalking OAP Server using native log APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Set up fluentbit. Config fluentbit Config PostgreSQL to enable slow log. Example.  Slow SQL Monitoring Slow SQL monitoring provides monitoring of the slow SQL statements of the PostgreSQL server. PostgreSQL Cluster is cataloged as a Layer: POSTGRESQL Service in OAP. Each PostgreSQL server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Slow Statements ms top_n_database_statement The latency and statement of PostgreSQL slow SQLs fluentbit    Customizations You can customize your own metrics/expression/dashboard panel. The slowsql expression rules are found in /config/lal/pgsql-slowsql.yaml The PostgreSQL dashboard panel configurations are found in /config/ui-initialized-templates/postgresql.\n","excerpt":"PostgreSQL monitoring PostgreSQL server performance from postgres-exporter SkyWalking leverages …","ref":"/docs/main/v9.5.0/en/setup/backend/backend-postgresql-monitoring/","title":"PostgreSQL monitoring"},{"body":"PostgreSQL monitoring PostgreSQL server performance from postgres-exporter SkyWalking leverages postgres-exporter for collecting metrics data from PostgreSQL. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  postgres-exporter collect metrics data from PostgreSQL. OpenTelemetry Collector fetches metrics from postgres-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up postgres-exporter. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  PostgreSQL Monitoring PostgreSQL cluster is cataloged as a Layer: PostgreSQL Service in OAP. Each PostgreSQL server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Shared Buffers MB meter_pg_shared_buffers The number of shared memory buffers used by the server postgres-exporter   Effective Cache GB meter_pg_effective_cache The planner\u0026rsquo;s assumption about the total size of the data caches postgres-exporter   Maintenance Work Mem MB meter_pg_maintenance_work_mem The maximum memory to be used for maintenance operations postgres-exporter   Seq Page Cost  meter_pg_seq_page_cost The planner\u0026rsquo;s estimate of the cost of a sequentially fetched disk page. postgres-exporter   Random Page Cost  meter_pg_random_page_cost The planner\u0026rsquo;s estimate of the cost of a nonsequentially fetched disk page. postgres-exporter   Max Worker Processes  meter_pg_max_worker_processes Maximum number of concurrent worker processes postgres-exporter   Max WAL Size GB meter_max_wal_size The WAL size that triggers a checkpoint postgres-exporter   Max Parallel Workers  meter_pg_max_parallel_workers The maximum number of parallel processes per executor node postgres-exporter   Work Mem MB meter_pg_max_work_mem The maximum memory to be used for query workspaces. postgres-exporter   Fetched Row Trend  meter_pg_fetched_rows_rate The trend of the number of rows fetched by queries in this database. postgres-exporter   Inserted Row Trend  meter_pg_inserted_rows_rate The trend of the number of rows inserted by queries in this database. postgres-exporter   Updated Row Trend  meter_pg_updated_rows_rate The trend of the number of rows updated by queries in this database. postgres-exporter   Deleted Row Trend  meter_pg_deleted_rows_rate The trend of the number of rows deleted by queries in this database. postgres-exporter   Returned Row Trend  meter_pg_returned_rows_rate The trend of the number of rows returned by queries in this database. postgres-exporter   Committed Transactions Trend  meter_pg_committed_transactions_rate The trend of the number of transactions in this database that have been committed postgres-exporter   Rolled Back Transactions Trend  meter_pg_rolled_back_transactions_rate The trend of the number of transactions in this database that have been rolled back postgres-exporter   Buffers Trend  meter_pg_buffers_alloc  meter_pg_buffers_checkpoint meter_pg_buffers_clean meter_pg_buffers_backend_fsync meter_pg_buffers_backend The trend of the number of buffers postgres-exporter   Conflicts Trend  meter_pg_conflicts_rate The trend of the number of queries canceled due to conflicts with recovery in this database postgres-exporter   Deadlock Trend  meter_pg_deadlocks_rate The trend of the number of deadlocks detected in this database postgres-exporter   Cache Hit Rate % meter_pg_cache_hit_rate The rate of cache hit postgres-exporter   Temporary Files Trend  meter_pg_temporary_files_rate The rate of total amount of data written to temporary files by queries in this database. All temporary files are counted, regardless of why the temporary file was created, and regardless of the log_temp_files setting postgres-exporter   Checkpoint Stat Trend  meter_pg_checkpoint_write_time_rate  meter_pg_checkpoint_sync_time_rate  meter_pg_checkpoint_req_rate meter_pg_checkpoint_timed_rate The trend of checkpoint stat postgres-exporter   Active Sessions  meter_pg_active_sessions The number of connections which state is active postgres-exporter   Idle Sessions  meter_pg_idle_sessions The number of connections which state is idle,idle in transaction or idle in transaction (aborted) postgres-exporter   Locks Count  meter_pg_locks_count Number of locks postgres-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/postgresql. The PostgreSQL dashboard panel configurations are found in /config/ui-initialized-templates/postgresql.\nCollect sampled slow SQLs SkyWalking leverages fluentbit or other log agents for collecting slow SQL statements from PostgreSQL.\nData flow  fluentbit agent collects slow sql logs from PostgreSQL. fluentbit agent sends data to SkyWalking OAP Server using native log APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Set up fluentbit. Config fluentbit Config PostgreSQL to enable slow log. Example.  Slow SQL Monitoring Slow SQL monitoring provides monitoring of the slow SQL statements of the PostgreSQL server. PostgreSQL Cluster is cataloged as a Layer: POSTGRESQL Service in OAP. Each PostgreSQL server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Slow Statements ms top_n_database_statement The latency and statement of PostgreSQL slow SQLs fluentbit    Customizations You can customize your own metrics/expression/dashboard panel. The slowsql expression rules are found in /config/lal/pgsql-slowsql.yaml The PostgreSQL dashboard panel configurations are found in /config/ui-initialized-templates/postgresql.\n","excerpt":"PostgreSQL monitoring PostgreSQL server performance from postgres-exporter SkyWalking leverages …","ref":"/docs/main/v9.6.0/en/setup/backend/backend-postgresql-monitoring/","title":"PostgreSQL monitoring"},{"body":"PostgreSQL monitoring PostgreSQL server performance from postgres-exporter SkyWalking leverages postgres-exporter for collecting metrics data from PostgreSQL. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  postgres-exporter collect metrics data from PostgreSQL. OpenTelemetry Collector fetches metrics from postgres-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up postgres-exporter. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  PostgreSQL Monitoring PostgreSQL cluster is cataloged as a Layer: PostgreSQL Service in OAP. Each PostgreSQL server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Shared Buffers MB meter_pg_shared_buffers The number of shared memory buffers used by the server postgres-exporter   Effective Cache GB meter_pg_effective_cache The planner\u0026rsquo;s assumption about the total size of the data caches postgres-exporter   Maintenance Work Mem MB meter_pg_maintenance_work_mem The maximum memory to be used for maintenance operations postgres-exporter   Seq Page Cost  meter_pg_seq_page_cost The planner\u0026rsquo;s estimate of the cost of a sequentially fetched disk page. postgres-exporter   Random Page Cost  meter_pg_random_page_cost The planner\u0026rsquo;s estimate of the cost of a nonsequentially fetched disk page. postgres-exporter   Max Worker Processes  meter_pg_max_worker_processes Maximum number of concurrent worker processes postgres-exporter   Max WAL Size GB meter_max_wal_size The WAL size that triggers a checkpoint postgres-exporter   Max Parallel Workers  meter_pg_max_parallel_workers The maximum number of parallel processes per executor node postgres-exporter   Work Mem MB meter_pg_max_work_mem The maximum memory to be used for query workspaces. postgres-exporter   Fetched Row Trend  meter_pg_fetched_rows_rate The trend of the number of rows fetched by queries in this database. postgres-exporter   Inserted Row Trend  meter_pg_inserted_rows_rate The trend of the number of rows inserted by queries in this database. postgres-exporter   Updated Row Trend  meter_pg_updated_rows_rate The trend of the number of rows updated by queries in this database. postgres-exporter   Deleted Row Trend  meter_pg_deleted_rows_rate The trend of the number of rows deleted by queries in this database. postgres-exporter   Returned Row Trend  meter_pg_returned_rows_rate The trend of the number of rows returned by queries in this database. postgres-exporter   Committed Transactions Trend  meter_pg_committed_transactions_rate The trend of the number of transactions in this database that have been committed postgres-exporter   Rolled Back Transactions Trend  meter_pg_rolled_back_transactions_rate The trend of the number of transactions in this database that have been rolled back postgres-exporter   Buffers Trend  meter_pg_buffers_alloc  meter_pg_buffers_checkpoint meter_pg_buffers_clean meter_pg_buffers_backend_fsync meter_pg_buffers_backend The trend of the number of buffers postgres-exporter   Conflicts Trend  meter_pg_conflicts_rate The trend of the number of queries canceled due to conflicts with recovery in this database postgres-exporter   Deadlock Trend  meter_pg_deadlocks_rate The trend of the number of deadlocks detected in this database postgres-exporter   Cache Hit Rate % meter_pg_cache_hit_rate The rate of cache hit postgres-exporter   Temporary Files Trend  meter_pg_temporary_files_rate The rate of total amount of data written to temporary files by queries in this database. All temporary files are counted, regardless of why the temporary file was created, and regardless of the log_temp_files setting postgres-exporter   Checkpoint Stat Trend  meter_pg_checkpoint_write_time_rate  meter_pg_checkpoint_sync_time_rate  meter_pg_checkpoint_req_rate meter_pg_checkpoint_timed_rate The trend of checkpoint stat postgres-exporter   Active Sessions  meter_pg_active_sessions The number of connections which state is active postgres-exporter   Idle Sessions  meter_pg_idle_sessions The number of connections which state is idle,idle in transaction or idle in transaction (aborted) postgres-exporter   Locks Count  meter_pg_locks_count Number of locks postgres-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/postgresql. The PostgreSQL dashboard panel configurations are found in /config/ui-initialized-templates/postgresql.\nCollect sampled slow SQLs SkyWalking leverages fluentbit or other log agents for collecting slow SQL statements from PostgreSQL.\nData flow  fluentbit agent collects slow sql logs from PostgreSQL. fluentbit agent sends data to SkyWalking OAP Server using native log APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Set up fluentbit. Config fluentbit Config PostgreSQL to enable slow log. Example.  Slow SQL Monitoring Slow SQL monitoring provides monitoring of the slow SQL statements of the PostgreSQL server. PostgreSQL Cluster is cataloged as a Layer: POSTGRESQL Service in OAP. Each PostgreSQL server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Slow Statements ms top_n_database_statement The latency and statement of PostgreSQL slow SQLs fluentbit    Customizations You can customize your own metrics/expression/dashboard panel. The slowsql expression rules are found in /config/lal/pgsql-slowsql.yaml The PostgreSQL dashboard panel configurations are found in /config/ui-initialized-templates/postgresql.\n","excerpt":"PostgreSQL monitoring PostgreSQL server performance from postgres-exporter SkyWalking leverages …","ref":"/docs/main/v9.7.0/en/setup/backend/backend-postgresql-monitoring/","title":"PostgreSQL monitoring"},{"body":"Probe Introduction In SkyWalking, probe means an agent or SDK library integrated into a target system that takes charge of collecting telemetry data, including tracing and metrics. Depending on the target system tech stack, there are very different ways how the probe performs such tasks. But ultimately, they all work towards the same goal — to collect and reformat data, and then to send them to the backend.\nOn a high level, there are four typical categories in all SkyWalking probes.\n  Language based native agent. These agents run in target service user spaces, such as a part of user codes. For example, the SkyWalking Java agent uses the -javaagent command line argument to manipulate codes in runtime, where manipulate means to change and inject user\u0026rsquo;s codes. Another example is SkyWalking agent, which leverage Golang compiling mechanism to weaves codes in the compiling time. For some static compilation languages, such as C++, manual library is the only choice. As you can see, these agents are based on languages and libraries, no matter we provide auto instrument or manual agents.\n  Service Mesh probes. Service Mesh probes collect data from sidecar, control plane in service mesh or proxy. In the old days, proxy is only used as an ingress of the whole cluster, but with the Service Mesh and sidecar, we can now perform observability functions.\n  3rd-party instrument library. SkyWalking accepts many widely used instrument libraries data formats. SkyWalking community is connected closely with Zipkin community, it could work as an alternative server for both v1 and v2 Zipkin traces. Also, OTEL trace format in gRPC is supported, and converted to Zipkin format inside SkyWalking. As an alternative Zipkin server, Zipkin lens UI could be used to visualize accepted traces when they are in Zipkin format. See Receiver for Zipkin traces and Receiver for OTEL traces for more information.\n  eBPF agent. The eBPF agent collects metrics and profiling the target service powered by the eBPF technology of Linux kernel.\n  You don\u0026rsquo;t have to install all probes to make SkyWalking up and running. There are several recommended ways on how to use these probes:\n Use Language based native agent only to build topology and metrics for your business application. Use 3rd-party instrument library only, like the Zipkin instrument ecosystem. Use Service Mesh probe if you prefer Service Mesh stack and don\u0026rsquo;t want to use native agents. Use Service Mesh probe with Language based native agent or 3rd-party instrument library in pure tracing status. (Advanced usage) Use eBPF agent only if you only want to profile on demand and/or activating automatic performance analysis. Use eBPF agent with Language based native agent collaboratively. Enhance the traces with the eBPF agent to collect extra information.  What is the meaning of in tracing status?\nBy default, Language based native agent and 3rd-party instrument library both send distributed traces to the backend, where analyses and aggregation on those traces are performed. In pure tracing status means that the backend considers these traces as something like logs. In other words, the backend saves them, but doesn\u0026rsquo;t run the metrics analysis from traces. As a result, there would not have data of service/instance/endpoint metrics and relationships.\nWhat is next?  Learn more about the probes supported by SkyWalking in Service auto instrument agent , Manual instrument SDK and Zipkin receiver. After understanding how the probe works, see the backend overview for more on analysis and persistence.  ","excerpt":"Probe Introduction In SkyWalking, probe means an agent or SDK library integrated into a target …","ref":"/docs/main/latest/en/concepts-and-designs/probe-introduction/","title":"Probe Introduction"},{"body":"Probe Introduction In SkyWalking, probe means an agent or SDK library integrated into a target system that takes charge of collecting telemetry data, including tracing and metrics. Depending on the target system tech stack, there are very different ways how the probe performs such tasks. But ultimately, they all work towards the same goal — to collect and reformat data, and then to send them to the backend.\nOn a high level, there are four typical categories in all SkyWalking probes.\n  Language based native agent. These agents run in target service user spaces, such as a part of user codes. For example, the SkyWalking Java agent uses the -javaagent command line argument to manipulate codes in runtime, where manipulate means to change and inject user\u0026rsquo;s codes. Another example is SkyWalking agent, which leverage Golang compiling mechanism to weaves codes in the compiling time. For some static compilation languages, such as C++, manual library is the only choice. As you can see, these agents are based on languages and libraries, no matter we provide auto instrument or manual agents.\n  Service Mesh probes. Service Mesh probes collect data from sidecar, control plane in service mesh or proxy. In the old days, proxy is only used as an ingress of the whole cluster, but with the Service Mesh and sidecar, we can now perform observability functions.\n  3rd-party instrument library. SkyWalking accepts many widely used instrument libraries data formats. SkyWalking community is connected closely with Zipkin community, it could work as an alternative server for both v1 and v2 Zipkin traces. Also, OTEL trace format in gRPC is supported, and converted to Zipkin format inside SkyWalking. As an alternative Zipkin server, Zipkin lens UI could be used to visualize accepted traces when they are in Zipkin format. See Receiver for Zipkin traces and Receiver for OTEL traces for more information.\n  eBPF agent. The eBPF agent collects metrics and profiling the target service powered by the eBPF technology of Linux kernel.\n  You don\u0026rsquo;t have to install all probes to make SkyWalking up and running. There are several recommended ways on how to use these probes:\n Use Language based native agent only to build topology and metrics for your business application. Use 3rd-party instrument library only, like the Zipkin instrument ecosystem. Use Service Mesh probe if you prefer Service Mesh stack and don\u0026rsquo;t want to use native agents. Use Service Mesh probe with Language based native agent or 3rd-party instrument library in pure tracing status. (Advanced usage) Use eBPF agent only if you only want to profile on demand and/or activating automatic performance analysis. Use eBPF agent with Language based native agent collaboratively. Enhance the traces with the eBPF agent to collect extra information.  What is the meaning of in tracing status?\nBy default, Language based native agent and 3rd-party instrument library both send distributed traces to the backend, where analyses and aggregation on those traces are performed. In pure tracing status means that the backend considers these traces as something like logs. In other words, the backend saves them, but doesn\u0026rsquo;t run the metrics analysis from traces. As a result, there would not have data of service/instance/endpoint metrics and relationships.\nWhat is next?  Learn more about the probes supported by SkyWalking in Service auto instrument agent , Manual instrument SDK and Zipkin receiver. After understanding how the probe works, see the backend overview for more on analysis and persistence.  ","excerpt":"Probe Introduction In SkyWalking, probe means an agent or SDK library integrated into a target …","ref":"/docs/main/next/en/concepts-and-designs/probe-introduction/","title":"Probe Introduction"},{"body":"Probe Introduction In SkyWalking, probe means an agent or SDK library integrated into a target system that takes charge of collecting telemetry data, including tracing and metrics. Depending on the target system tech stack, there are very different ways how the probe performs such tasks. But ultimately, they all work towards the same goal — to collect and reformat data, and then to send them to the backend.\nOn a high level, there are three typical categories in all SkyWalking probes.\n  Language based native agent. These agents run in target service user spaces, such as a part of user codes. For example, the SkyWalking Java agent uses the -javaagent command line argument to manipulate codes in runtime, where manipulate means to change and inject user\u0026rsquo;s codes. Another kind of agents uses certain hook or intercept mechanism provided by target libraries. As you can see, these agents are based on languages and libraries.\n  Service Mesh probes. Service Mesh probes collect data from sidecar, control plane in service mesh or proxy. In the old days, proxy is only used as an ingress of the whole cluster, but with the Service Mesh and sidecar, we can now perform observability functions.\n  3rd-party instrument library. SkyWalking accepts many widely used instrument libraries data formats. It analyzes the data, transfers it to SkyWalking\u0026rsquo;s formats of trace, metrics or both. This feature starts with accepting Zipkin span data. See Receiver for Zipkin traces for more information.\n  You don\u0026rsquo;t need to use Language based native agent and Service Mesh probe at the same time, since they both serve to collect metrics data. Otherwise, your system will suffer twice the payload, and the analytic numbers will be doubled.\nThere are several recommended ways on how to use these probes:\n Use Language based native agent only. Use 3rd-party instrument library only, like the Zipkin instrument ecosystem. Use Service Mesh probe only. Use Service Mesh probe with Language based native agent or 3rd-party instrument library in tracing status. (Advanced usage)  What is the meaning of in tracing status?\nBy default, Language based native agent and 3rd-party instrument library both send distributed traces to the backend, where analyses and aggregation on those traces are performed. In tracing status means that the backend considers these traces as something like logs. In other words, the backend saves them, and builds the links between traces and metrics, like which endpoint and service does the trace belong?.\nWhat is next?  Learn more about the probes supported by SkyWalking in Service auto instrument agent , Manual instrument SDK and Zipkin receiver. After understanding how the probe works, see the backend overview for more on analysis and persistence.  ","excerpt":"Probe Introduction In SkyWalking, probe means an agent or SDK library integrated into a target …","ref":"/docs/main/v9.0.0/en/concepts-and-designs/probe-introduction/","title":"Probe Introduction"},{"body":"Probe Introduction In SkyWalking, probe means an agent or SDK library integrated into a target system that takes charge of collecting telemetry data, including tracing and metrics. Depending on the target system tech stack, there are very different ways how the probe performs such tasks. But ultimately, they all work towards the same goal — to collect and reformat data, and then to send them to the backend.\nOn a high level, there are four typical categories in all SkyWalking probes.\n  Language based native agent. These agents run in target service user spaces, such as a part of user codes. For example, the SkyWalking Java agent uses the -javaagent command line argument to manipulate codes in runtime, where manipulate means to change and inject user\u0026rsquo;s codes. Another kind of agents uses certain hook or intercept mechanism provided by target libraries. As you can see, these agents are based on languages and libraries.\n  Service Mesh probes. Service Mesh probes collect data from sidecar, control plane in service mesh or proxy. In the old days, proxy is only used as an ingress of the whole cluster, but with the Service Mesh and sidecar, we can now perform observability functions.\n  3rd-party instrument library. SkyWalking accepts many widely used instrument libraries data formats. It analyzes the data, transfers it to SkyWalking\u0026rsquo;s formats of trace, metrics or both. This feature starts with accepting Zipkin span data. See Receiver for Zipkin traces for more information.\n  eBPF agent. The eBPF agent collects metrics and proifiling the target service powered by the eBPF technology of Linux kernel.\n  You don\u0026rsquo;t need to use Language based native agent and Service Mesh probe at the same time, since they both serve to collect metrics data. Otherwise, your system will suffer twice the payload, and the analytic numbers will be doubled.\nThere are several recommended ways on how to use these probes:\n Use Language based native agent only. Use 3rd-party instrument library only, like the Zipkin instrument ecosystem. Use Service Mesh probe only. Use Service Mesh probe with Language based native agent or 3rd-party instrument library in tracing status. (Advanced usage) Use eBPF agent only. Use eBPF agent with Language based native agent collaboratively.  What is the meaning of in tracing status?\nBy default, Language based native agent and 3rd-party instrument library both send distributed traces to the backend, where analyses and aggregation on those traces are performed. In tracing status means that the backend considers these traces as something like logs. In other words, the backend saves them, and builds the links between traces and metrics, like which endpoint and service does the trace belong?.\nWhat is next?  Learn more about the probes supported by SkyWalking in Service auto instrument agent , Manual instrument SDK and Zipkin receiver. After understanding how the probe works, see the backend overview for more on analysis and persistence.  ","excerpt":"Probe Introduction In SkyWalking, probe means an agent or SDK library integrated into a target …","ref":"/docs/main/v9.1.0/en/concepts-and-designs/probe-introduction/","title":"Probe Introduction"},{"body":"Probe Introduction In SkyWalking, probe means an agent or SDK library integrated into a target system that takes charge of collecting telemetry data, including tracing and metrics. Depending on the target system tech stack, there are very different ways how the probe performs such tasks. But ultimately, they all work towards the same goal — to collect and reformat data, and then to send them to the backend.\nOn a high level, there are four typical categories in all SkyWalking probes.\n  Language based native agent. These agents run in target service user spaces, such as a part of user codes. For example, the SkyWalking Java agent uses the -javaagent command line argument to manipulate codes in runtime, where manipulate means to change and inject user\u0026rsquo;s codes. Another kind of agents uses certain hook or intercept mechanism provided by target libraries. As you can see, these agents are based on languages and libraries.\n  Service Mesh probes. Service Mesh probes collect data from sidecar, control plane in service mesh or proxy. In the old days, proxy is only used as an ingress of the whole cluster, but with the Service Mesh and sidecar, we can now perform observability functions.\n  3rd-party instrument library. SkyWalking accepts many widely used instrument libraries data formats. It analyzes the data, transfers it to SkyWalking\u0026rsquo;s formats of trace, metrics or both. This feature starts with accepting Zipkin span data. See Receiver for Zipkin traces for more information.\n  eBPF agent. The eBPF agent collects metrics and profiling the target service powered by the eBPF technology of Linux kernel.\n  You don\u0026rsquo;t need to use Language based native agent and Service Mesh probe at the same time, since they both serve to collect metrics data. Otherwise, your system will suffer twice the payload, and the analytic numbers will be doubled.\nThere are several recommended ways on how to use these probes:\n Use Language based native agent only. Use 3rd-party instrument library only, like the Zipkin instrument ecosystem. Use Service Mesh probe only. Use Service Mesh probe with Language based native agent or 3rd-party instrument library in tracing status. (Advanced usage) Use eBPF agent only. Use eBPF agent with Language based native agent collaboratively.  What is the meaning of in tracing status?\nBy default, Language based native agent and 3rd-party instrument library both send distributed traces to the backend, where analyses and aggregation on those traces are performed. In tracing status means that the backend considers these traces as something like logs. In other words, the backend saves them, and builds the links between traces and metrics, like which endpoint and service does the trace belong?.\nWhat is next?  Learn more about the probes supported by SkyWalking in Service auto instrument agent , Manual instrument SDK and Zipkin receiver. After understanding how the probe works, see the backend overview for more on analysis and persistence.  ","excerpt":"Probe Introduction In SkyWalking, probe means an agent or SDK library integrated into a target …","ref":"/docs/main/v9.2.0/en/concepts-and-designs/probe-introduction/","title":"Probe Introduction"},{"body":"Probe Introduction In SkyWalking, probe means an agent or SDK library integrated into a target system that takes charge of collecting telemetry data, including tracing and metrics. Depending on the target system tech stack, there are very different ways how the probe performs such tasks. But ultimately, they all work towards the same goal — to collect and reformat data, and then to send them to the backend.\nOn a high level, there are four typical categories in all SkyWalking probes.\n  Language based native agent. These agents run in target service user spaces, such as a part of user codes. For example, the SkyWalking Java agent uses the -javaagent command line argument to manipulate codes in runtime, where manipulate means to change and inject user\u0026rsquo;s codes. Another kind of agents uses certain hook or intercept mechanism provided by target libraries. As you can see, these agents are based on languages and libraries.\n  Service Mesh probes. Service Mesh probes collect data from sidecar, control plane in service mesh or proxy. In the old days, proxy is only used as an ingress of the whole cluster, but with the Service Mesh and sidecar, we can now perform observability functions.\n  3rd-party instrument library. SkyWalking accepts many widely used instrument libraries data formats. It analyzes the data, transfers it to SkyWalking\u0026rsquo;s formats of trace, metrics or both. This feature starts with accepting Zipkin span data. See Receiver for Zipkin traces for more information.\n  eBPF agent. The eBPF agent collects metrics and profiling the target service powered by the eBPF technology of Linux kernel.\n  You don\u0026rsquo;t need to use Language based native agent and Service Mesh probe at the same time, since they both serve to collect metrics data. Otherwise, your system will suffer twice the payload, and the analytic numbers will be doubled.\nThere are several recommended ways on how to use these probes:\n Use Language based native agent only. Use 3rd-party instrument library only, like the Zipkin instrument ecosystem. Use Service Mesh probe only. Use Service Mesh probe with Language based native agent or 3rd-party instrument library in tracing status. (Advanced usage) Use eBPF agent only. Use eBPF agent with Language based native agent collaboratively.  What is the meaning of in tracing status?\nBy default, Language based native agent and 3rd-party instrument library both send distributed traces to the backend, where analyses and aggregation on those traces are performed. In tracing status means that the backend considers these traces as something like logs. In other words, the backend saves them, and builds the links between traces and metrics, like which endpoint and service does the trace belong?.\nWhat is next?  Learn more about the probes supported by SkyWalking in Service auto instrument agent , Manual instrument SDK and Zipkin receiver. After understanding how the probe works, see the backend overview for more on analysis and persistence.  ","excerpt":"Probe Introduction In SkyWalking, probe means an agent or SDK library integrated into a target …","ref":"/docs/main/v9.3.0/en/concepts-and-designs/probe-introduction/","title":"Probe Introduction"},{"body":"Probe Introduction In SkyWalking, probe means an agent or SDK library integrated into a target system that takes charge of collecting telemetry data, including tracing and metrics. Depending on the target system tech stack, there are very different ways how the probe performs such tasks. But ultimately, they all work towards the same goal — to collect and reformat data, and then to send them to the backend.\nOn a high level, there are four typical categories in all SkyWalking probes.\n  Language based native agent. These agents run in target service user spaces, such as a part of user codes. For example, the SkyWalking Java agent uses the -javaagent command line argument to manipulate codes in runtime, where manipulate means to change and inject user\u0026rsquo;s codes. Another kind of agents uses certain hook or intercept mechanism provided by target libraries. As you can see, these agents are based on languages and libraries.\n  Service Mesh probes. Service Mesh probes collect data from sidecar, control plane in service mesh or proxy. In the old days, proxy is only used as an ingress of the whole cluster, but with the Service Mesh and sidecar, we can now perform observability functions.\n  3rd-party instrument library. SkyWalking accepts many widely used instrument libraries data formats. It analyzes the data, transfers it to SkyWalking\u0026rsquo;s formats of trace, metrics or both. This feature starts with accepting Zipkin span data. See Receiver for Zipkin traces for more information.\n  eBPF agent. The eBPF agent collects metrics and profiling the target service powered by the eBPF technology of Linux kernel.\n  You don\u0026rsquo;t need to use Language based native agent and Service Mesh probe at the same time, since they both serve to collect metrics data. Otherwise, your system will suffer twice the payload, and the analytic numbers will be doubled.\nThere are several recommended ways on how to use these probes:\n Use Language based native agent only. Use 3rd-party instrument library only, like the Zipkin instrument ecosystem. Use Service Mesh probe only. Use Service Mesh probe with Language based native agent or 3rd-party instrument library in tracing status. (Advanced usage) Use eBPF agent only. Use eBPF agent with Language based native agent collaboratively.  What is the meaning of in tracing status?\nBy default, Language based native agent and 3rd-party instrument library both send distributed traces to the backend, where analyses and aggregation on those traces are performed. In tracing status means that the backend considers these traces as something like logs. In other words, the backend saves them, and builds the links between traces and metrics, like which endpoint and service does the trace belong?.\nWhat is next?  Learn more about the probes supported by SkyWalking in Service auto instrument agent , Manual instrument SDK and Zipkin receiver. After understanding how the probe works, see the backend overview for more on analysis and persistence.  ","excerpt":"Probe Introduction In SkyWalking, probe means an agent or SDK library integrated into a target …","ref":"/docs/main/v9.4.0/en/concepts-and-designs/probe-introduction/","title":"Probe Introduction"},{"body":"Probe Introduction In SkyWalking, probe means an agent or SDK library integrated into a target system that takes charge of collecting telemetry data, including tracing and metrics. Depending on the target system tech stack, there are very different ways how the probe performs such tasks. But ultimately, they all work towards the same goal — to collect and reformat data, and then to send them to the backend.\nOn a high level, there are four typical categories in all SkyWalking probes.\n  Language based native agent. These agents run in target service user spaces, such as a part of user codes. For example, the SkyWalking Java agent uses the -javaagent command line argument to manipulate codes in runtime, where manipulate means to change and inject user\u0026rsquo;s codes. Another kind of agents uses certain hook or intercept mechanism provided by target libraries. As you can see, these agents are based on languages and libraries.\n  Service Mesh probes. Service Mesh probes collect data from sidecar, control plane in service mesh or proxy. In the old days, proxy is only used as an ingress of the whole cluster, but with the Service Mesh and sidecar, we can now perform observability functions.\n  3rd-party instrument library. SkyWalking accepts many widely used instrument libraries data formats. It analyzes the data, transfers it to SkyWalking\u0026rsquo;s formats of trace, metrics or both. This feature starts with accepting Zipkin span data. See Receiver for Zipkin traces for more information.\n  eBPF agent. The eBPF agent collects metrics and profiling the target service powered by the eBPF technology of Linux kernel.\n  You don\u0026rsquo;t need to use Language based native agent and Service Mesh probe at the same time, since they both serve to collect metrics data. Otherwise, your system will suffer twice the payload, and the analytic numbers will be doubled.\nThere are several recommended ways on how to use these probes:\n Use Language based native agent only. Use 3rd-party instrument library only, like the Zipkin instrument ecosystem. Use Service Mesh probe only. Use Service Mesh probe with Language based native agent or 3rd-party instrument library in tracing status. (Advanced usage) Use eBPF agent only. Use eBPF agent with Language based native agent collaboratively.  What is the meaning of in tracing status?\nBy default, Language based native agent and 3rd-party instrument library both send distributed traces to the backend, where analyses and aggregation on those traces are performed. In tracing status means that the backend considers these traces as something like logs. In other words, the backend saves them, and builds the links between traces and metrics, like which endpoint and service does the trace belong?.\nWhat is next?  Learn more about the probes supported by SkyWalking in Service auto instrument agent , Manual instrument SDK and Zipkin receiver. After understanding how the probe works, see the backend overview for more on analysis and persistence.  ","excerpt":"Probe Introduction In SkyWalking, probe means an agent or SDK library integrated into a target …","ref":"/docs/main/v9.5.0/en/concepts-and-designs/probe-introduction/","title":"Probe Introduction"},{"body":"Probe Introduction In SkyWalking, probe means an agent or SDK library integrated into a target system that takes charge of collecting telemetry data, including tracing and metrics. Depending on the target system tech stack, there are very different ways how the probe performs such tasks. But ultimately, they all work towards the same goal — to collect and reformat data, and then to send them to the backend.\nOn a high level, there are four typical categories in all SkyWalking probes.\n  Language based native agent. These agents run in target service user spaces, such as a part of user codes. For example, the SkyWalking Java agent uses the -javaagent command line argument to manipulate codes in runtime, where manipulate means to change and inject user\u0026rsquo;s codes. Another kind of agents uses certain hook or intercept mechanism provided by target libraries. As you can see, these agents are based on languages and libraries.\n  Service Mesh probes. Service Mesh probes collect data from sidecar, control plane in service mesh or proxy. In the old days, proxy is only used as an ingress of the whole cluster, but with the Service Mesh and sidecar, we can now perform observability functions.\n  3rd-party instrument library. SkyWalking accepts many widely used instrument libraries data formats. It analyzes the data, transfers it to SkyWalking\u0026rsquo;s formats of trace, metrics or both. This feature starts with accepting Zipkin span data. See Receiver for Zipkin traces for more information.\n  eBPF agent. The eBPF agent collects metrics and profiling the target service powered by the eBPF technology of Linux kernel.\n  You don\u0026rsquo;t need to use Language based native agent and Service Mesh probe at the same time, since they both serve to collect metrics data. Otherwise, your system will suffer twice the payload, and the analytic numbers will be doubled.\nThere are several recommended ways on how to use these probes:\n Use Language based native agent only. Use 3rd-party instrument library only, like the Zipkin instrument ecosystem. Use Service Mesh probe only. Use Service Mesh probe with Language based native agent or 3rd-party instrument library in tracing status. (Advanced usage) Use eBPF agent only. Use eBPF agent with Language based native agent collaboratively.  What is the meaning of in tracing status?\nBy default, Language based native agent and 3rd-party instrument library both send distributed traces to the backend, where analyses and aggregation on those traces are performed. In tracing status means that the backend considers these traces as something like logs. In other words, the backend saves them, and builds the links between traces and metrics, like which endpoint and service does the trace belong?.\nWhat is next?  Learn more about the probes supported by SkyWalking in Service auto instrument agent , Manual instrument SDK and Zipkin receiver. After understanding how the probe works, see the backend overview for more on analysis and persistence.  ","excerpt":"Probe Introduction In SkyWalking, probe means an agent or SDK library integrated into a target …","ref":"/docs/main/v9.6.0/en/concepts-and-designs/probe-introduction/","title":"Probe Introduction"},{"body":"Probe Introduction In SkyWalking, probe means an agent or SDK library integrated into a target system that takes charge of collecting telemetry data, including tracing and metrics. Depending on the target system tech stack, there are very different ways how the probe performs such tasks. But ultimately, they all work towards the same goal — to collect and reformat data, and then to send them to the backend.\nOn a high level, there are four typical categories in all SkyWalking probes.\n  Language based native agent. These agents run in target service user spaces, such as a part of user codes. For example, the SkyWalking Java agent uses the -javaagent command line argument to manipulate codes in runtime, where manipulate means to change and inject user\u0026rsquo;s codes. Another example is SkyWalking agent, which leverage Golang compiling mechanism to weaves codes in the compiling time. For some static compilation languages, such as C++, manual library is the only choice. As you can see, these agents are based on languages and libraries, no matter we provide auto instrument or manual agents.\n  Service Mesh probes. Service Mesh probes collect data from sidecar, control plane in service mesh or proxy. In the old days, proxy is only used as an ingress of the whole cluster, but with the Service Mesh and sidecar, we can now perform observability functions.\n  3rd-party instrument library. SkyWalking accepts many widely used instrument libraries data formats. SkyWalking community is connected closely with Zipkin community, it could work as an alternative server for both v1 and v2 Zipkin traces. Also, OTEL trace format in gRPC is supported, and converted to Zipkin format inside SkyWalking. As an alternative Zipkin server, Zipkin lens UI could be used to visualize accepted traces when they are in Zipkin format. See Receiver for Zipkin traces and Receiver for OTEL traces for more information.\n  eBPF agent. The eBPF agent collects metrics and profiling the target service powered by the eBPF technology of Linux kernel.\n  You don\u0026rsquo;t have to install all probes to make SkyWalking up and running. There are several recommended ways on how to use these probes:\n Use Language based native agent only to build topology and metrics for your business application. Use 3rd-party instrument library only, like the Zipkin instrument ecosystem. Use Service Mesh probe if you prefer Service Mesh stack and don\u0026rsquo;t want to use native agents. Use Service Mesh probe with Language based native agent or 3rd-party instrument library in pure tracing status. (Advanced usage) Use eBPF agent only if you only want to profile on demand and/or activating automatic performance analysis. Use eBPF agent with Language based native agent collaboratively. Enhance the traces with the eBPF agent to collect extra information.  What is the meaning of in tracing status?\nBy default, Language based native agent and 3rd-party instrument library both send distributed traces to the backend, where analyses and aggregation on those traces are performed. In pure tracing status means that the backend considers these traces as something like logs. In other words, the backend saves them, but doesn\u0026rsquo;t run the metrics analysis from traces. As a result, there would not have data of service/instance/endpoint metrics and relationships.\nWhat is next?  Learn more about the probes supported by SkyWalking in Service auto instrument agent , Manual instrument SDK and Zipkin receiver. After understanding how the probe works, see the backend overview for more on analysis and persistence.  ","excerpt":"Probe Introduction In SkyWalking, probe means an agent or SDK library integrated into a target …","ref":"/docs/main/v9.7.0/en/concepts-and-designs/probe-introduction/","title":"Probe Introduction"},{"body":"Probe Protocols Probe protocols describe and define how agents send collected metrics, logs, traces, and events, as well as set out the format of each entity.\nTracing There are two types of protocols that help language agents work in distributed tracing.\n Cross Process Propagation Headers Protocol and Cross Process Correlation Headers Protocol come in in-wire data format. Agent/SDK usually uses HTTP/MQ/HTTP2 headers to carry the data with the RPC request. The remote agent will receive this in the request handler, and bind the context with this specific request.  Cross Process Propagation Headers Protocol v3 has been the new protocol for in-wire context propagation since the version 8.0.0 release.\nCross Process Correlation Headers Protocol v1 is a new in-wire context propagation protocol which is additional and optional. Please read SkyWalking language agents documentation to see whether it is supported.\n Trace Data Protocol is an out-of-wire data format. Agent/SDK uses this to send traces to SkyWalking OAP server.  SkyWalking Trace Data Protocol v3 defines the communication method and format between the agent and backend.\nLogging  Log Data Protocol is an out-of-wire data format. Agent/SDK and collector use this to send logs into SkyWalking OAP server. SkyWalking Log Data Protocol defines the communication method and format between the agent and backend.  Metrics SkyWalking has a native metrics format, and supports widely used metric formats, such as Prometheus, OpenCensus, and Zabbix.\nThe native metrics format definition could be found here. Typically, the agent meter plugin (e.g. Java Meter Plugin) and Satellite Prometheus fetcher would convert metrics into native format and forward them to SkyWalking OAP server.\nTo learn more about receiving 3rd party formats metrics, see Meter receiver and OpenTelemetry receiver.\nBrowser probe protocol The browser probe, such as skywalking-client-js, could use this protocol to send data to the backend. This service is provided by gRPC.\nSkyWalking Browser Protocol defines the communication method and format between skywalking-client-js and backend.\nEvents Report Protocol The protocol is used to report events to the backend. The doc introduces the definition of an event, and the protocol repository defines gRPC services and message formats of events.\nJSON format events can be reported via HTTP API. The endpoint is http://\u0026lt;oap-address\u0026gt;:12800/v3/events. Example of a JSON event record:\n[ { \u0026#34;uuid\u0026#34;: \u0026#34;f498b3c0-8bca-438d-a5b0-3701826ae21c\u0026#34;, \u0026#34;source\u0026#34;: { \u0026#34;service\u0026#34;: \u0026#34;SERVICE-A\u0026#34;, \u0026#34;instance\u0026#34;: \u0026#34;INSTANCE-1\u0026#34; }, \u0026#34;name\u0026#34;: \u0026#34;Reboot\u0026#34;, \u0026#34;type\u0026#34;: \u0026#34;Normal\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;App reboot.\u0026#34;, \u0026#34;parameters\u0026#34;: {}, \u0026#34;startTime\u0026#34;: 1628044330000, \u0026#34;endTime\u0026#34;: 1628044331000 } ] ","excerpt":"Probe Protocols Probe protocols describe and define how agents send collected metrics, logs, traces, …","ref":"/docs/main/v9.0.0/en/protocols/readme/","title":"Probe Protocols"},{"body":"Probe Protocols Probe protocols describe and define how agents send collected metrics, logs, traces, and events, as well as set out the format of each entity.\nTracing There are two types of protocols that help language agents work in distributed tracing.\n Cross Process Propagation Headers Protocol and Cross Process Correlation Headers Protocol come in in-wire data format. Agent/SDK usually uses HTTP/MQ/HTTP2 headers to carry the data with the RPC request. The remote agent will receive this in the request handler, and bind the context with this specific request.  Cross Process Propagation Headers Protocol v3 has been the new protocol for in-wire context propagation since the version 8.0.0 release.\nCross Process Correlation Headers Protocol v1 is a new in-wire context propagation protocol which is additional and optional. Please read SkyWalking language agents documentation to see whether it is supported.\n Trace Data Protocol is an out-of-wire data format. Agent/SDK uses this to send traces to SkyWalking OAP server.  SkyWalking Trace Data Protocol v3 defines the communication method and format between the agent and backend.\nLogging  Log Data Protocol is an out-of-wire data format. Agent/SDK and collector use this to send logs into SkyWalking OAP server. SkyWalking Log Data Protocol defines the communication method and format between the agent and backend.  Metrics SkyWalking has a native metrics format, and supports widely used metric formats, such as Prometheus, OpenCensus, and Zabbix.\nThe native metrics format definition could be found here. Typically, the agent meter plugin (e.g. Java Meter Plugin) and Satellite Prometheus fetcher would convert metrics into native format and forward them to SkyWalking OAP server.\nTo learn more about receiving 3rd party formats metrics, see Meter receiver and OpenTelemetry receiver.\nBrowser probe protocol The browser probe, such as skywalking-client-js, could use this protocol to send data to the backend. This service is provided by gRPC.\nSkyWalking Browser Protocol defines the communication method and format between skywalking-client-js and backend.\nEvents Report Protocol The protocol is used to report events to the backend. The doc introduces the definition of an event, and the protocol repository defines gRPC services and message formats of events.\nJSON format events can be reported via HTTP API. The endpoint is http://\u0026lt;oap-address\u0026gt;:12800/v3/events. Example of a JSON event record:\n[ { \u0026#34;uuid\u0026#34;: \u0026#34;f498b3c0-8bca-438d-a5b0-3701826ae21c\u0026#34;, \u0026#34;source\u0026#34;: { \u0026#34;service\u0026#34;: \u0026#34;SERVICE-A\u0026#34;, \u0026#34;instance\u0026#34;: \u0026#34;INSTANCE-1\u0026#34; }, \u0026#34;name\u0026#34;: \u0026#34;Reboot\u0026#34;, \u0026#34;type\u0026#34;: \u0026#34;Normal\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;App reboot.\u0026#34;, \u0026#34;parameters\u0026#34;: {}, \u0026#34;startTime\u0026#34;: 1628044330000, \u0026#34;endTime\u0026#34;: 1628044331000 } ] ","excerpt":"Probe Protocols Probe protocols describe and define how agents send collected metrics, logs, traces, …","ref":"/docs/main/v9.1.0/en/protocols/readme/","title":"Probe Protocols"},{"body":"Probe Protocols Probe protocols describe and define how agents send collected metrics, logs, traces, and events, as well as set out the format of each entity.\nTracing There are two types of protocols that help language agents work in distributed tracing.\n Cross Process Propagation Headers Protocol and Cross Process Correlation Headers Protocol come in in-wire data format. Agent/SDK usually uses HTTP/MQ/HTTP2 headers to carry the data with the RPC request. The remote agent will receive this in the request handler, and bind the context with this specific request.  Cross Process Propagation Headers Protocol v3 has been the new protocol for in-wire context propagation since the version 8.0.0 release.\nCross Process Correlation Headers Protocol v1 is a new in-wire context propagation protocol which is additional and optional. Please read SkyWalking language agents documentation to see whether it is supported.\n Trace Data Protocol is an out-of-wire data format. Agent/SDK uses this to send traces to SkyWalking OAP server.  SkyWalking Trace Data Protocol v3 defines the communication method and format between the agent and backend.\nLogging  Log Data Protocol is an out-of-wire data format. Agent/SDK and collector use this to send logs into SkyWalking OAP server. SkyWalking Log Data Protocol defines the communication method and format between the agent and backend.  Metrics SkyWalking has a native metrics format, and supports widely used metric formats, such as Prometheus, OpenCensus, OpenTelemetry, and Zabbix.\nThe native metrics format definition could be found here. Typically, the agent meter plugin (e.g. Java Meter Plugin) and Satellite Prometheus fetcher would convert metrics into native format and forward them to SkyWalking OAP server.\nTo learn more about receiving 3rd party formats metrics, see Meter receiver and OpenTelemetry receiver.\nBrowser probe protocol The browser probe, such as skywalking-client-js, could use this protocol to send data to the backend. This service is provided by gRPC.\nSkyWalking Browser Protocol defines the communication method and format between skywalking-client-js and backend.\nEvents Report Protocol The protocol is used to report events to the backend. The doc introduces the definition of an event, and the protocol repository defines gRPC services and message formats of events.\nJSON format events can be reported via HTTP API. The endpoint is http://\u0026lt;oap-address\u0026gt;:12800/v3/events. Example of a JSON event record:\n[ { \u0026#34;uuid\u0026#34;: \u0026#34;f498b3c0-8bca-438d-a5b0-3701826ae21c\u0026#34;, \u0026#34;source\u0026#34;: { \u0026#34;service\u0026#34;: \u0026#34;SERVICE-A\u0026#34;, \u0026#34;instance\u0026#34;: \u0026#34;INSTANCE-1\u0026#34; }, \u0026#34;name\u0026#34;: \u0026#34;Reboot\u0026#34;, \u0026#34;type\u0026#34;: \u0026#34;Normal\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;App reboot.\u0026#34;, \u0026#34;parameters\u0026#34;: {}, \u0026#34;startTime\u0026#34;: 1628044330000, \u0026#34;endTime\u0026#34;: 1628044331000 } ] ","excerpt":"Probe Protocols Probe protocols describe and define how agents send collected metrics, logs, traces, …","ref":"/docs/main/v9.2.0/en/protocols/readme/","title":"Probe Protocols"},{"body":"Probe Protocols Probe protocols describe and define how agents send collected metrics, logs, traces, and events, as well as set out the format of each entity.\nTracing There are two types of protocols that help language agents work in distributed tracing.\n Cross Process Propagation Headers Protocol and Cross Process Correlation Headers Protocol come in in-wire data format. Agent/SDK usually uses HTTP/MQ/HTTP2 headers to carry the data with the RPC request. The remote agent will receive this in the request handler, and bind the context with this specific request.  Cross Process Propagation Headers Protocol v3 has been the new protocol for in-wire context propagation since the version 8.0.0 release.\nCross Process Correlation Headers Protocol v1 is a new in-wire context propagation protocol which is additional and optional. Please read SkyWalking language agents documentation to see whether it is supported.\n Trace Data Protocol is an out-of-wire data format. Agent/SDK uses this to send traces to SkyWalking OAP server.  SkyWalking Trace Data Protocol v3.1 defines the communication method and format between the agent and backend.\nLogging  Log Data Protocol is an out-of-wire data format. Agent/SDK and collector use this to send logs into SkyWalking OAP server. SkyWalking Log Data Protocol defines the communication method and format between the agent and backend.  Metrics SkyWalking has a native metrics format, and supports widely used metric formats, such as Prometheus, OpenCensus, OpenTelemetry, and Zabbix.\nThe native metrics format definition could be found here. The agent meter plugin (e.g. Java Meter Plugin) uses the native metric format to report metrics.\nOpenTelemetry collector, Telegraf agents, Zabbix agents could use their native protocol(e.g. OTLP) and OAP server would convert metrics into native format and forward them to MAL engine.\nTo learn more about receiving 3rd party formats metrics, see Meter receiver and OpenTelemetry receiver.\nBrowser probe protocol The browser probe, such as skywalking-client-js, could use this protocol to send data to the backend. This service is provided by gRPC.\nSkyWalking Browser Protocol defines the communication method and format between skywalking-client-js and backend.\nEvents Report Protocol The protocol is used to report events to the backend. The doc introduces the definition of an event, and the protocol repository defines gRPC services and message formats of events.\nJSON format events can be reported via HTTP API. The endpoint is http://\u0026lt;oap-address\u0026gt;:12800/v3/events. Example of a JSON event record:\n[ { \u0026#34;uuid\u0026#34;: \u0026#34;f498b3c0-8bca-438d-a5b0-3701826ae21c\u0026#34;, \u0026#34;source\u0026#34;: { \u0026#34;service\u0026#34;: \u0026#34;SERVICE-A\u0026#34;, \u0026#34;instance\u0026#34;: \u0026#34;INSTANCE-1\u0026#34; }, \u0026#34;name\u0026#34;: \u0026#34;Reboot\u0026#34;, \u0026#34;type\u0026#34;: \u0026#34;Normal\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;App reboot.\u0026#34;, \u0026#34;parameters\u0026#34;: {}, \u0026#34;startTime\u0026#34;: 1628044330000, \u0026#34;endTime\u0026#34;: 1628044331000 } ] ","excerpt":"Probe Protocols Probe protocols describe and define how agents send collected metrics, logs, traces, …","ref":"/docs/main/v9.3.0/en/protocols/readme/","title":"Probe Protocols"},{"body":"Problem When you start your application with the skywalking agent, you may find this exception in your agent log which means that EnhanceRequireObjectCache cannot be casted to EnhanceRequireObjectCache. For example:\nERROR 2018-05-07 21:31:24 InstMethodsInter : class[class org.springframework.web.method.HandlerMethod] after method[getBean] intercept failure java.lang.ClassCastException: org.apache.skywalking.apm.plugin.spring.mvc.commons.EnhanceRequireObjectCache cannot be cast to org.apache.skywalking.apm.plugin.spring.mvc.commons.EnhanceRequireObjectCache at org.apache.skywalking.apm.plugin.spring.mvc.commons.interceptor.GetBeanInterceptor.afterMethod(GetBeanInterceptor.java:45) at org.apache.skywalking.apm.agent.core.plugin.interceptor.enhance.InstMethodsInter.intercept(InstMethodsInter.java:105) at org.springframework.web.method.HandlerMethod.getBean(HandlerMethod.java) at org.springframework.web.servlet.handler.AbstractHandlerMethodExceptionResolver.shouldApplyTo(AbstractHandlerMethodExceptionResolver.java:47) at org.springframework.web.servlet.handler.AbstractHandlerExceptionResolver.resolveException(AbstractHandlerExceptionResolver.java:131) at org.springframework.web.servlet.handler.HandlerExceptionResolverComposite.resolveException(HandlerExceptionResolverComposite.java:76) ... Reason This exception may be caused by hot deployment tools (spring-boot-devtool) or otherwise, which changes the classloader in runtime.\nResolution  This error does not occur under the production environment, since developer tools are automatically disabled: See spring-boot-devtools. If you would like to debug in your development environment as usual, you should temporarily remove such hot deployment package in your lib path.  ","excerpt":"Problem When you start your application with the skywalking agent, you may find this exception in …","ref":"/docs/main/latest/en/faq/enhancerequireobjectcache-cast-exception/","title":"Problem"},{"body":"Problem  When importing the SkyWalking project to Eclipse, the following errors may occur:   Software being installed: Checkstyle configuration plugin for M2Eclipse 1.0.0.201705301746 (com.basistech.m2e.code.quality.checkstyle.feature.feature.group 1.0.0.201705301746) Missing requirement: Checkstyle configuration plugin for M2Eclipse 1.0.0.201705301746 (com.basistech.m2e.code.quality.checkstyle.feature.feature.group 1.0.0.201705301746) requires \u0026lsquo;net.sf.eclipsecs.core 5.2.0\u0026rsquo; but it could not be found\n Reason The Eclipse Checkstyle Plug-in has not been installed.\nResolution Download the plug-in at the link here: https://sourceforge.net/projects/eclipse-cs/?source=typ_redirect Eclipse Checkstyle Plug-in version 8.7.0.201801131309 is required. Plug-in notification: The Eclipse Checkstyle plug-in integrates the Checkstyle Java code auditor into the Eclipse IDE. The plug-in provides real-time feedback to the user on rule violations, including checking against coding style and error-prone code constructs.\n","excerpt":"Problem  When importing the SkyWalking project to Eclipse, the following errors may occur: …","ref":"/docs/main/latest/en/faq/import-project-eclipse-requireitems-exception/","title":"Problem"},{"body":"Problem Tracing doesn\u0026rsquo;t work on the Kafka consumer end.\nReason The kafka client is responsible for pulling messages from the brokers, after which the data will be processed by user-defined codes. However, only the poll action can be traced by the plug-in and the subsequent data processing work inevitably goes beyond the scope of the trace context. Thus, in order to complete tracing on the client end, manual instrumentation is required, i.e. the poll action and the processing action should be wrapped manually.\nResolve For a native Kafka client, please use the Application Toolkit libraries to do the manual instrumentation, with the help of the @KafkaPollAndInvoke annotation in apm-toolkit-kafka or with OpenTracing API. If you\u0026rsquo;re using spring-kafka 1.3.x, 2.2.x or above, you can easily trace the consumer end without further configuration.\n","excerpt":"Problem Tracing doesn\u0026rsquo;t work on the Kafka consumer end.\nReason The kafka client is responsible …","ref":"/docs/main/latest/en/faq/kafka-plugin/","title":"Problem"},{"body":"Problem When using a thread pool, TraceSegment data in a thread cannot be reported and there are memory data that cannot be recycled (memory leaks).\nExample ThreadPoolTaskExecutor executor = new ThreadPoolTaskExecutor(); executor.setThreadFactory(r -\u0026gt; new Thread(RunnableWrapper.of(r))); Reason  Worker threads are enhanced when using the thread pool. Based on the design of the SkyWalking Java Agent, when tracing a cross thread, you must enhance the task thread.  Resolution   When using Thread Schedule Framework: See SkyWalking Thread Schedule Framework at SkyWalking Java agent supported list, such as Spring FrameWork @Async, which can implement tracing without any modification.\n  When using Custom Thread Pool: Enhance the task thread with the following code.\n  ExecutorService executorService = Executors.newFixedThreadPool(1); executorService.execute(RunnableWrapper.of(new Runnable() { @Override public void run() { //your code  } })); See across thread solution APIs for more use cases.\n","excerpt":"Problem When using a thread pool, TraceSegment data in a thread cannot be reported and there are …","ref":"/docs/main/latest/en/faq/memory-leak-enhance-worker-thread/","title":"Problem"},{"body":"Problem  In maven build, the following error may occur with the protoc-plugin:  [ERROR] Failed to execute goal org.xolstice.maven.plugins:protobuf-maven-plugin:0.5.0:compile-custom (default) on project apm-network: Unable to copy the file to \\skywalking\\apm-network\\target\\protoc-plugins: \\skywalking\\apm-network\\target\\protoc-plugins\\protoc-3.3.0-linux-x86_64.exe (The process cannot access the file because it is being used by another process) -\u0026gt; [Help 1] Reason  The Protobuf compiler is dependent on the glibc. However, glibc has not been installed, or there is an old version already installed in the system.  Resolution  Install or upgrade to the latest version of the glibc library. Under the container environment, the latest glibc version of the alpine system is recommended. Please refer to http://www.gnu.org/software/libc/documentation.html.  ","excerpt":"Problem  In maven build, the following error may occur with the protoc-plugin:  [ERROR] Failed to …","ref":"/docs/main/latest/en/faq/protoc-plugin-fails-when-build/","title":"Problem"},{"body":"Problem The message with Field ID, 8888, must be reserved.\nReason Because Thrift cannot carry metadata to transport Trace Header in the original API, we transport them by wrapping TProtocolFactory.\nThrift allows us to append any additional fields in the message even if the receiver doesn\u0026rsquo;t deal with them. Those data will be skipped and left unread. Based on this, the 8888th field of the message is used to store Trace Header (or metadata) and to transport them. That means the message with Field ID, 8888, must be reserved.\nResolution Avoid using the Field(ID is 8888) in your application.\n","excerpt":"Problem The message with Field ID, 8888, must be reserved.\nReason Because Thrift cannot carry …","ref":"/docs/main/latest/en/faq/thrift-plugin/","title":"Problem"},{"body":"Problem  There is no abnormal log in Agent log and Collector log. The traces can be seen, but no other information is available in UI.  Reason The operating system where the monitored system is located is not set as the current time zone, causing statistics collection time points to deviate.\nResolution Make sure the time is synchronized between collector servers and monitored application servers.\n","excerpt":"Problem  There is no abnormal log in Agent log and Collector log. The traces can be seen, but no …","ref":"/docs/main/latest/en/faq/why-have-traces-no-others/","title":"Problem"},{"body":"Problem When you start your application with the skywalking agent, you may find this exception in your agent log which means that EnhanceRequireObjectCache cannot be casted to EnhanceRequireObjectCache. For example:\nERROR 2018-05-07 21:31:24 InstMethodsInter : class[class org.springframework.web.method.HandlerMethod] after method[getBean] intercept failure java.lang.ClassCastException: org.apache.skywalking.apm.plugin.spring.mvc.commons.EnhanceRequireObjectCache cannot be cast to org.apache.skywalking.apm.plugin.spring.mvc.commons.EnhanceRequireObjectCache at org.apache.skywalking.apm.plugin.spring.mvc.commons.interceptor.GetBeanInterceptor.afterMethod(GetBeanInterceptor.java:45) at org.apache.skywalking.apm.agent.core.plugin.interceptor.enhance.InstMethodsInter.intercept(InstMethodsInter.java:105) at org.springframework.web.method.HandlerMethod.getBean(HandlerMethod.java) at org.springframework.web.servlet.handler.AbstractHandlerMethodExceptionResolver.shouldApplyTo(AbstractHandlerMethodExceptionResolver.java:47) at org.springframework.web.servlet.handler.AbstractHandlerExceptionResolver.resolveException(AbstractHandlerExceptionResolver.java:131) at org.springframework.web.servlet.handler.HandlerExceptionResolverComposite.resolveException(HandlerExceptionResolverComposite.java:76) ... Reason This exception may be caused by hot deployment tools (spring-boot-devtool) or otherwise, which changes the classloader in runtime.\nResolution  This error does not occur under the production environment, since developer tools are automatically disabled: See spring-boot-devtools. If you would like to debug in your development environment as usual, you should temporarily remove such hot deployment package in your lib path.  ","excerpt":"Problem When you start your application with the skywalking agent, you may find this exception in …","ref":"/docs/main/next/en/faq/enhancerequireobjectcache-cast-exception/","title":"Problem"},{"body":"Problem  When importing the SkyWalking project to Eclipse, the following errors may occur:   Software being installed: Checkstyle configuration plugin for M2Eclipse 1.0.0.201705301746 (com.basistech.m2e.code.quality.checkstyle.feature.feature.group 1.0.0.201705301746) Missing requirement: Checkstyle configuration plugin for M2Eclipse 1.0.0.201705301746 (com.basistech.m2e.code.quality.checkstyle.feature.feature.group 1.0.0.201705301746) requires \u0026lsquo;net.sf.eclipsecs.core 5.2.0\u0026rsquo; but it could not be found\n Reason The Eclipse Checkstyle Plug-in has not been installed.\nResolution Download the plug-in at the link here: https://sourceforge.net/projects/eclipse-cs/?source=typ_redirect Eclipse Checkstyle Plug-in version 8.7.0.201801131309 is required. Plug-in notification: The Eclipse Checkstyle plug-in integrates the Checkstyle Java code auditor into the Eclipse IDE. The plug-in provides real-time feedback to the user on rule violations, including checking against coding style and error-prone code constructs.\n","excerpt":"Problem  When importing the SkyWalking project to Eclipse, the following errors may occur: …","ref":"/docs/main/next/en/faq/import-project-eclipse-requireitems-exception/","title":"Problem"},{"body":"Problem Tracing doesn\u0026rsquo;t work on the Kafka consumer end.\nReason The kafka client is responsible for pulling messages from the brokers, after which the data will be processed by user-defined codes. However, only the poll action can be traced by the plug-in and the subsequent data processing work inevitably goes beyond the scope of the trace context. Thus, in order to complete tracing on the client end, manual instrumentation is required, i.e. the poll action and the processing action should be wrapped manually.\nResolve For a native Kafka client, please use the Application Toolkit libraries to do the manual instrumentation, with the help of the @KafkaPollAndInvoke annotation in apm-toolkit-kafka or with OpenTracing API. If you\u0026rsquo;re using spring-kafka 1.3.x, 2.2.x or above, you can easily trace the consumer end without further configuration.\n","excerpt":"Problem Tracing doesn\u0026rsquo;t work on the Kafka consumer end.\nReason The kafka client is responsible …","ref":"/docs/main/next/en/faq/kafka-plugin/","title":"Problem"},{"body":"Problem When using a thread pool, TraceSegment data in a thread cannot be reported and there are memory data that cannot be recycled (memory leaks).\nExample ThreadPoolTaskExecutor executor = new ThreadPoolTaskExecutor(); executor.setThreadFactory(r -\u0026gt; new Thread(RunnableWrapper.of(r))); Reason  Worker threads are enhanced when using the thread pool. Based on the design of the SkyWalking Java Agent, when tracing a cross thread, you must enhance the task thread.  Resolution   When using Thread Schedule Framework: See SkyWalking Thread Schedule Framework at SkyWalking Java agent supported list, such as Spring FrameWork @Async, which can implement tracing without any modification.\n  When using Custom Thread Pool: Enhance the task thread with the following code.\n  ExecutorService executorService = Executors.newFixedThreadPool(1); executorService.execute(RunnableWrapper.of(new Runnable() { @Override public void run() { //your code  } })); See across thread solution APIs for more use cases.\n","excerpt":"Problem When using a thread pool, TraceSegment data in a thread cannot be reported and there are …","ref":"/docs/main/next/en/faq/memory-leak-enhance-worker-thread/","title":"Problem"},{"body":"Problem  In maven build, the following error may occur with the protoc-plugin:  [ERROR] Failed to execute goal org.xolstice.maven.plugins:protobuf-maven-plugin:0.5.0:compile-custom (default) on project apm-network: Unable to copy the file to \\skywalking\\apm-network\\target\\protoc-plugins: \\skywalking\\apm-network\\target\\protoc-plugins\\protoc-3.3.0-linux-x86_64.exe (The process cannot access the file because it is being used by another process) -\u0026gt; [Help 1] Reason  The Protobuf compiler is dependent on the glibc. However, glibc has not been installed, or there is an old version already installed in the system.  Resolution  Install or upgrade to the latest version of the glibc library. Under the container environment, the latest glibc version of the alpine system is recommended. Please refer to http://www.gnu.org/software/libc/documentation.html.  ","excerpt":"Problem  In maven build, the following error may occur with the protoc-plugin:  [ERROR] Failed to …","ref":"/docs/main/next/en/faq/protoc-plugin-fails-when-build/","title":"Problem"},{"body":"Problem The message with Field ID, 8888, must be reserved.\nReason Because Thrift cannot carry metadata to transport Trace Header in the original API, we transport them by wrapping TProtocolFactory.\nThrift allows us to append any additional fields in the message even if the receiver doesn\u0026rsquo;t deal with them. Those data will be skipped and left unread. Based on this, the 8888th field of the message is used to store Trace Header (or metadata) and to transport them. That means the message with Field ID, 8888, must be reserved.\nResolution Avoid using the Field(ID is 8888) in your application.\n","excerpt":"Problem The message with Field ID, 8888, must be reserved.\nReason Because Thrift cannot carry …","ref":"/docs/main/next/en/faq/thrift-plugin/","title":"Problem"},{"body":"Problem  There is no abnormal log in Agent log and Collector log. The traces can be seen, but no other information is available in UI.  Reason The operating system where the monitored system is located is not set as the current time zone, causing statistics collection time points to deviate.\nResolution Make sure the time is synchronized between collector servers and monitored application servers.\n","excerpt":"Problem  There is no abnormal log in Agent log and Collector log. The traces can be seen, but no …","ref":"/docs/main/next/en/faq/why-have-traces-no-others/","title":"Problem"},{"body":"Problem When you start your application with the skywalking agent, you may find this exception in your agent log which means that EnhanceRequireObjectCache cannot be casted to EnhanceRequireObjectCache. For example:\nERROR 2018-05-07 21:31:24 InstMethodsInter : class[class org.springframework.web.method.HandlerMethod] after method[getBean] intercept failure java.lang.ClassCastException: org.apache.skywalking.apm.plugin.spring.mvc.commons.EnhanceRequireObjectCache cannot be cast to org.apache.skywalking.apm.plugin.spring.mvc.commons.EnhanceRequireObjectCache at org.apache.skywalking.apm.plugin.spring.mvc.commons.interceptor.GetBeanInterceptor.afterMethod(GetBeanInterceptor.java:45) at org.apache.skywalking.apm.agent.core.plugin.interceptor.enhance.InstMethodsInter.intercept(InstMethodsInter.java:105) at org.springframework.web.method.HandlerMethod.getBean(HandlerMethod.java) at org.springframework.web.servlet.handler.AbstractHandlerMethodExceptionResolver.shouldApplyTo(AbstractHandlerMethodExceptionResolver.java:47) at org.springframework.web.servlet.handler.AbstractHandlerExceptionResolver.resolveException(AbstractHandlerExceptionResolver.java:131) at org.springframework.web.servlet.handler.HandlerExceptionResolverComposite.resolveException(HandlerExceptionResolverComposite.java:76) ... Reason This exception may be caused by hot deployment tools (spring-boot-devtool) or otherwise, which changes the classloader in runtime.\nResolution  This error does not occur under the production environment, since developer tools are automatically disabled: See spring-boot-devtools. If you would like to debug in your development environment as usual, you should temporarily remove such hot deployment package in your lib path.  ","excerpt":"Problem When you start your application with the skywalking agent, you may find this exception in …","ref":"/docs/main/v9.0.0/en/faq/enhancerequireobjectcache-cast-exception/","title":"Problem"},{"body":"Problem  When importing the SkyWalking project to Eclipse, the following errors may occur:   Software being installed: Checkstyle configuration plugin for M2Eclipse 1.0.0.201705301746 (com.basistech.m2e.code.quality.checkstyle.feature.feature.group 1.0.0.201705301746) Missing requirement: Checkstyle configuration plugin for M2Eclipse 1.0.0.201705301746 (com.basistech.m2e.code.quality.checkstyle.feature.feature.group 1.0.0.201705301746) requires \u0026lsquo;net.sf.eclipsecs.core 5.2.0\u0026rsquo; but it could not be found\n Reason The Eclipse Checkstyle Plug-in has not been installed.\nResolution Download the plug-in at the link here: https://sourceforge.net/projects/eclipse-cs/?source=typ_redirect Eclipse Checkstyle Plug-in version 8.7.0.201801131309 is required. Plug-in notification: The Eclipse Checkstyle plug-in integrates the Checkstyle Java code auditor into the Eclipse IDE. The plug-in provides real-time feedback to the user on rule violations, including checking against coding style and error-prone code constructs.\n","excerpt":"Problem  When importing the SkyWalking project to Eclipse, the following errors may occur: …","ref":"/docs/main/v9.0.0/en/faq/import-project-eclipse-requireitems-exception/","title":"Problem"},{"body":"Problem Tracing doesn\u0026rsquo;t work on the Kafka consumer end.\nReason The kafka client is responsible for pulling messages from the brokers, after which the data will be processed by user-defined codes. However, only the poll action can be traced by the plug-in and the subsequent data processing work inevitably goes beyond the scope of the trace context. Thus, in order to complete tracing on the client end, manual instrumentation is required, i.e. the poll action and the processing action should be wrapped manually.\nResolve For a native Kafka client, please use the Application Toolkit libraries to do the manual instrumentation, with the help of the @KafkaPollAndInvoke annotation in apm-toolkit-kafka or with OpenTracing API. If you\u0026rsquo;re using spring-kafka 1.3.x, 2.2.x or above, you can easily trace the consumer end without further configuration.\n","excerpt":"Problem Tracing doesn\u0026rsquo;t work on the Kafka consumer end.\nReason The kafka client is responsible …","ref":"/docs/main/v9.0.0/en/faq/kafka-plugin/","title":"Problem"},{"body":"Problem When using a thread pool, TraceSegment data in a thread cannot be reported and there are memory data that cannot be recycled (memory leaks).\nExample ThreadPoolTaskExecutor executor = new ThreadPoolTaskExecutor(); executor.setThreadFactory(r -\u0026gt; new Thread(RunnableWrapper.of(r))); Reason  Worker threads are enhanced when using the thread pool. Based on the design of the SkyWalking Java Agent, when tracing a cross thread, you must enhance the task thread.  Resolution   When using Thread Schedule Framework: See SkyWalking Thread Schedule Framework at SkyWalking Java agent supported list, such as Spring FrameWork @Async, which can implement tracing without any modification.\n  When using Custom Thread Pool: Enhance the task thread with the following code.\n  ExecutorService executorService = Executors.newFixedThreadPool(1); executorService.execute(RunnableWrapper.of(new Runnable() { @Override public void run() { //your code  } })); See across thread solution APIs for more use cases.\n","excerpt":"Problem When using a thread pool, TraceSegment data in a thread cannot be reported and there are …","ref":"/docs/main/v9.0.0/en/faq/memory-leak-enhance-worker-thread/","title":"Problem"},{"body":"Problem  In maven build, the following error may occur with the protoc-plugin:  [ERROR] Failed to execute goal org.xolstice.maven.plugins:protobuf-maven-plugin:0.5.0:compile-custom (default) on project apm-network: Unable to copy the file to \\skywalking\\apm-network\\target\\protoc-plugins: \\skywalking\\apm-network\\target\\protoc-plugins\\protoc-3.3.0-linux-x86_64.exe (The process cannot access the file because it is being used by another process) -\u0026gt; [Help 1] Reason  The Protobuf compiler is dependent on the glibc. However, glibc has not been installed, or there is an old version already installed in the system.  Resolution  Install or upgrade to the latest version of the glibc library. Under the container environment, the latest glibc version of the alpine system is recommended. Please refer to http://www.gnu.org/software/libc/documentation.html.  ","excerpt":"Problem  In maven build, the following error may occur with the protoc-plugin:  [ERROR] Failed to …","ref":"/docs/main/v9.0.0/en/faq/protoc-plugin-fails-when-build/","title":"Problem"},{"body":"Problem The message with Field ID, 8888, must be reserved.\nReason Because Thrift cannot carry metadata to transport Trace Header in the original API, we transport them by wrapping TProtocolFactory.\nThrift allows us to append any additional fields in the message even if the receiver doesn\u0026rsquo;t deal with them. Those data will be skipped and left unread. Based on this, the 8888th field of the message is used to store Trace Header (or metadata) and to transport them. That means the message with Field ID, 8888, must be reserved.\nResolution Avoid using the Field(ID is 8888) in your application.\n","excerpt":"Problem The message with Field ID, 8888, must be reserved.\nReason Because Thrift cannot carry …","ref":"/docs/main/v9.0.0/en/faq/thrift-plugin/","title":"Problem"},{"body":"Problem  There is no abnormal log in Agent log and Collector log. The traces can be seen, but no other information is available in UI.  Reason The operating system where the monitored system is located is not set as the current time zone, causing statistics collection time points to deviate.\nResolution Make sure the time is synchronized between collector servers and monitored application servers.\n","excerpt":"Problem  There is no abnormal log in Agent log and Collector log. The traces can be seen, but no …","ref":"/docs/main/v9.0.0/en/faq/why-have-traces-no-others/","title":"Problem"},{"body":"Problem When you start your application with the skywalking agent, you may find this exception in your agent log which means that EnhanceRequireObjectCache cannot be casted to EnhanceRequireObjectCache. For example:\nERROR 2018-05-07 21:31:24 InstMethodsInter : class[class org.springframework.web.method.HandlerMethod] after method[getBean] intercept failure java.lang.ClassCastException: org.apache.skywalking.apm.plugin.spring.mvc.commons.EnhanceRequireObjectCache cannot be cast to org.apache.skywalking.apm.plugin.spring.mvc.commons.EnhanceRequireObjectCache at org.apache.skywalking.apm.plugin.spring.mvc.commons.interceptor.GetBeanInterceptor.afterMethod(GetBeanInterceptor.java:45) at org.apache.skywalking.apm.agent.core.plugin.interceptor.enhance.InstMethodsInter.intercept(InstMethodsInter.java:105) at org.springframework.web.method.HandlerMethod.getBean(HandlerMethod.java) at org.springframework.web.servlet.handler.AbstractHandlerMethodExceptionResolver.shouldApplyTo(AbstractHandlerMethodExceptionResolver.java:47) at org.springframework.web.servlet.handler.AbstractHandlerExceptionResolver.resolveException(AbstractHandlerExceptionResolver.java:131) at org.springframework.web.servlet.handler.HandlerExceptionResolverComposite.resolveException(HandlerExceptionResolverComposite.java:76) ... Reason This exception may be caused by hot deployment tools (spring-boot-devtool) or otherwise, which changes the classloader in runtime.\nResolution  This error does not occur under the production environment, since developer tools are automatically disabled: See spring-boot-devtools. If you would like to debug in your development environment as usual, you should temporarily remove such hot deployment package in your lib path.  ","excerpt":"Problem When you start your application with the skywalking agent, you may find this exception in …","ref":"/docs/main/v9.1.0/en/faq/enhancerequireobjectcache-cast-exception/","title":"Problem"},{"body":"Problem  When importing the SkyWalking project to Eclipse, the following errors may occur:   Software being installed: Checkstyle configuration plugin for M2Eclipse 1.0.0.201705301746 (com.basistech.m2e.code.quality.checkstyle.feature.feature.group 1.0.0.201705301746) Missing requirement: Checkstyle configuration plugin for M2Eclipse 1.0.0.201705301746 (com.basistech.m2e.code.quality.checkstyle.feature.feature.group 1.0.0.201705301746) requires \u0026lsquo;net.sf.eclipsecs.core 5.2.0\u0026rsquo; but it could not be found\n Reason The Eclipse Checkstyle Plug-in has not been installed.\nResolution Download the plug-in at the link here: https://sourceforge.net/projects/eclipse-cs/?source=typ_redirect Eclipse Checkstyle Plug-in version 8.7.0.201801131309 is required. Plug-in notification: The Eclipse Checkstyle plug-in integrates the Checkstyle Java code auditor into the Eclipse IDE. The plug-in provides real-time feedback to the user on rule violations, including checking against coding style and error-prone code constructs.\n","excerpt":"Problem  When importing the SkyWalking project to Eclipse, the following errors may occur: …","ref":"/docs/main/v9.1.0/en/faq/import-project-eclipse-requireitems-exception/","title":"Problem"},{"body":"Problem Tracing doesn\u0026rsquo;t work on the Kafka consumer end.\nReason The kafka client is responsible for pulling messages from the brokers, after which the data will be processed by user-defined codes. However, only the poll action can be traced by the plug-in and the subsequent data processing work inevitably goes beyond the scope of the trace context. Thus, in order to complete tracing on the client end, manual instrumentation is required, i.e. the poll action and the processing action should be wrapped manually.\nResolve For a native Kafka client, please use the Application Toolkit libraries to do the manual instrumentation, with the help of the @KafkaPollAndInvoke annotation in apm-toolkit-kafka or with OpenTracing API. If you\u0026rsquo;re using spring-kafka 1.3.x, 2.2.x or above, you can easily trace the consumer end without further configuration.\n","excerpt":"Problem Tracing doesn\u0026rsquo;t work on the Kafka consumer end.\nReason The kafka client is responsible …","ref":"/docs/main/v9.1.0/en/faq/kafka-plugin/","title":"Problem"},{"body":"Problem When using a thread pool, TraceSegment data in a thread cannot be reported and there are memory data that cannot be recycled (memory leaks).\nExample ThreadPoolTaskExecutor executor = new ThreadPoolTaskExecutor(); executor.setThreadFactory(r -\u0026gt; new Thread(RunnableWrapper.of(r))); Reason  Worker threads are enhanced when using the thread pool. Based on the design of the SkyWalking Java Agent, when tracing a cross thread, you must enhance the task thread.  Resolution   When using Thread Schedule Framework: See SkyWalking Thread Schedule Framework at SkyWalking Java agent supported list, such as Spring FrameWork @Async, which can implement tracing without any modification.\n  When using Custom Thread Pool: Enhance the task thread with the following code.\n  ExecutorService executorService = Executors.newFixedThreadPool(1); executorService.execute(RunnableWrapper.of(new Runnable() { @Override public void run() { //your code  } })); See across thread solution APIs for more use cases.\n","excerpt":"Problem When using a thread pool, TraceSegment data in a thread cannot be reported and there are …","ref":"/docs/main/v9.1.0/en/faq/memory-leak-enhance-worker-thread/","title":"Problem"},{"body":"Problem  In maven build, the following error may occur with the protoc-plugin:  [ERROR] Failed to execute goal org.xolstice.maven.plugins:protobuf-maven-plugin:0.5.0:compile-custom (default) on project apm-network: Unable to copy the file to \\skywalking\\apm-network\\target\\protoc-plugins: \\skywalking\\apm-network\\target\\protoc-plugins\\protoc-3.3.0-linux-x86_64.exe (The process cannot access the file because it is being used by another process) -\u0026gt; [Help 1] Reason  The Protobuf compiler is dependent on the glibc. However, glibc has not been installed, or there is an old version already installed in the system.  Resolution  Install or upgrade to the latest version of the glibc library. Under the container environment, the latest glibc version of the alpine system is recommended. Please refer to http://www.gnu.org/software/libc/documentation.html.  ","excerpt":"Problem  In maven build, the following error may occur with the protoc-plugin:  [ERROR] Failed to …","ref":"/docs/main/v9.1.0/en/faq/protoc-plugin-fails-when-build/","title":"Problem"},{"body":"Problem The message with Field ID, 8888, must be reserved.\nReason Because Thrift cannot carry metadata to transport Trace Header in the original API, we transport them by wrapping TProtocolFactory.\nThrift allows us to append any additional fields in the message even if the receiver doesn\u0026rsquo;t deal with them. Those data will be skipped and left unread. Based on this, the 8888th field of the message is used to store Trace Header (or metadata) and to transport them. That means the message with Field ID, 8888, must be reserved.\nResolution Avoid using the Field(ID is 8888) in your application.\n","excerpt":"Problem The message with Field ID, 8888, must be reserved.\nReason Because Thrift cannot carry …","ref":"/docs/main/v9.1.0/en/faq/thrift-plugin/","title":"Problem"},{"body":"Problem  There is no abnormal log in Agent log and Collector log. The traces can be seen, but no other information is available in UI.  Reason The operating system where the monitored system is located is not set as the current time zone, causing statistics collection time points to deviate.\nResolution Make sure the time is synchronized between collector servers and monitored application servers.\n","excerpt":"Problem  There is no abnormal log in Agent log and Collector log. The traces can be seen, but no …","ref":"/docs/main/v9.1.0/en/faq/why-have-traces-no-others/","title":"Problem"},{"body":"Problem When you start your application with the skywalking agent, you may find this exception in your agent log which means that EnhanceRequireObjectCache cannot be casted to EnhanceRequireObjectCache. For example:\nERROR 2018-05-07 21:31:24 InstMethodsInter : class[class org.springframework.web.method.HandlerMethod] after method[getBean] intercept failure java.lang.ClassCastException: org.apache.skywalking.apm.plugin.spring.mvc.commons.EnhanceRequireObjectCache cannot be cast to org.apache.skywalking.apm.plugin.spring.mvc.commons.EnhanceRequireObjectCache at org.apache.skywalking.apm.plugin.spring.mvc.commons.interceptor.GetBeanInterceptor.afterMethod(GetBeanInterceptor.java:45) at org.apache.skywalking.apm.agent.core.plugin.interceptor.enhance.InstMethodsInter.intercept(InstMethodsInter.java:105) at org.springframework.web.method.HandlerMethod.getBean(HandlerMethod.java) at org.springframework.web.servlet.handler.AbstractHandlerMethodExceptionResolver.shouldApplyTo(AbstractHandlerMethodExceptionResolver.java:47) at org.springframework.web.servlet.handler.AbstractHandlerExceptionResolver.resolveException(AbstractHandlerExceptionResolver.java:131) at org.springframework.web.servlet.handler.HandlerExceptionResolverComposite.resolveException(HandlerExceptionResolverComposite.java:76) ... Reason This exception may be caused by hot deployment tools (spring-boot-devtool) or otherwise, which changes the classloader in runtime.\nResolution  This error does not occur under the production environment, since developer tools are automatically disabled: See spring-boot-devtools. If you would like to debug in your development environment as usual, you should temporarily remove such hot deployment package in your lib path.  ","excerpt":"Problem When you start your application with the skywalking agent, you may find this exception in …","ref":"/docs/main/v9.2.0/en/faq/enhancerequireobjectcache-cast-exception/","title":"Problem"},{"body":"Problem  When importing the SkyWalking project to Eclipse, the following errors may occur:   Software being installed: Checkstyle configuration plugin for M2Eclipse 1.0.0.201705301746 (com.basistech.m2e.code.quality.checkstyle.feature.feature.group 1.0.0.201705301746) Missing requirement: Checkstyle configuration plugin for M2Eclipse 1.0.0.201705301746 (com.basistech.m2e.code.quality.checkstyle.feature.feature.group 1.0.0.201705301746) requires \u0026lsquo;net.sf.eclipsecs.core 5.2.0\u0026rsquo; but it could not be found\n Reason The Eclipse Checkstyle Plug-in has not been installed.\nResolution Download the plug-in at the link here: https://sourceforge.net/projects/eclipse-cs/?source=typ_redirect Eclipse Checkstyle Plug-in version 8.7.0.201801131309 is required. Plug-in notification: The Eclipse Checkstyle plug-in integrates the Checkstyle Java code auditor into the Eclipse IDE. The plug-in provides real-time feedback to the user on rule violations, including checking against coding style and error-prone code constructs.\n","excerpt":"Problem  When importing the SkyWalking project to Eclipse, the following errors may occur: …","ref":"/docs/main/v9.2.0/en/faq/import-project-eclipse-requireitems-exception/","title":"Problem"},{"body":"Problem Tracing doesn\u0026rsquo;t work on the Kafka consumer end.\nReason The kafka client is responsible for pulling messages from the brokers, after which the data will be processed by user-defined codes. However, only the poll action can be traced by the plug-in and the subsequent data processing work inevitably goes beyond the scope of the trace context. Thus, in order to complete tracing on the client end, manual instrumentation is required, i.e. the poll action and the processing action should be wrapped manually.\nResolve For a native Kafka client, please use the Application Toolkit libraries to do the manual instrumentation, with the help of the @KafkaPollAndInvoke annotation in apm-toolkit-kafka or with OpenTracing API. If you\u0026rsquo;re using spring-kafka 1.3.x, 2.2.x or above, you can easily trace the consumer end without further configuration.\n","excerpt":"Problem Tracing doesn\u0026rsquo;t work on the Kafka consumer end.\nReason The kafka client is responsible …","ref":"/docs/main/v9.2.0/en/faq/kafka-plugin/","title":"Problem"},{"body":"Problem When using a thread pool, TraceSegment data in a thread cannot be reported and there are memory data that cannot be recycled (memory leaks).\nExample ThreadPoolTaskExecutor executor = new ThreadPoolTaskExecutor(); executor.setThreadFactory(r -\u0026gt; new Thread(RunnableWrapper.of(r))); Reason  Worker threads are enhanced when using the thread pool. Based on the design of the SkyWalking Java Agent, when tracing a cross thread, you must enhance the task thread.  Resolution   When using Thread Schedule Framework: See SkyWalking Thread Schedule Framework at SkyWalking Java agent supported list, such as Spring FrameWork @Async, which can implement tracing without any modification.\n  When using Custom Thread Pool: Enhance the task thread with the following code.\n  ExecutorService executorService = Executors.newFixedThreadPool(1); executorService.execute(RunnableWrapper.of(new Runnable() { @Override public void run() { //your code  } })); See across thread solution APIs for more use cases.\n","excerpt":"Problem When using a thread pool, TraceSegment data in a thread cannot be reported and there are …","ref":"/docs/main/v9.2.0/en/faq/memory-leak-enhance-worker-thread/","title":"Problem"},{"body":"Problem  In maven build, the following error may occur with the protoc-plugin:  [ERROR] Failed to execute goal org.xolstice.maven.plugins:protobuf-maven-plugin:0.5.0:compile-custom (default) on project apm-network: Unable to copy the file to \\skywalking\\apm-network\\target\\protoc-plugins: \\skywalking\\apm-network\\target\\protoc-plugins\\protoc-3.3.0-linux-x86_64.exe (The process cannot access the file because it is being used by another process) -\u0026gt; [Help 1] Reason  The Protobuf compiler is dependent on the glibc. However, glibc has not been installed, or there is an old version already installed in the system.  Resolution  Install or upgrade to the latest version of the glibc library. Under the container environment, the latest glibc version of the alpine system is recommended. Please refer to http://www.gnu.org/software/libc/documentation.html.  ","excerpt":"Problem  In maven build, the following error may occur with the protoc-plugin:  [ERROR] Failed to …","ref":"/docs/main/v9.2.0/en/faq/protoc-plugin-fails-when-build/","title":"Problem"},{"body":"Problem The message with Field ID, 8888, must be reserved.\nReason Because Thrift cannot carry metadata to transport Trace Header in the original API, we transport them by wrapping TProtocolFactory.\nThrift allows us to append any additional fields in the message even if the receiver doesn\u0026rsquo;t deal with them. Those data will be skipped and left unread. Based on this, the 8888th field of the message is used to store Trace Header (or metadata) and to transport them. That means the message with Field ID, 8888, must be reserved.\nResolution Avoid using the Field(ID is 8888) in your application.\n","excerpt":"Problem The message with Field ID, 8888, must be reserved.\nReason Because Thrift cannot carry …","ref":"/docs/main/v9.2.0/en/faq/thrift-plugin/","title":"Problem"},{"body":"Problem  There is no abnormal log in Agent log and Collector log. The traces can be seen, but no other information is available in UI.  Reason The operating system where the monitored system is located is not set as the current time zone, causing statistics collection time points to deviate.\nResolution Make sure the time is synchronized between collector servers and monitored application servers.\n","excerpt":"Problem  There is no abnormal log in Agent log and Collector log. The traces can be seen, but no …","ref":"/docs/main/v9.2.0/en/faq/why-have-traces-no-others/","title":"Problem"},{"body":"Problem When you start your application with the skywalking agent, you may find this exception in your agent log which means that EnhanceRequireObjectCache cannot be casted to EnhanceRequireObjectCache. For example:\nERROR 2018-05-07 21:31:24 InstMethodsInter : class[class org.springframework.web.method.HandlerMethod] after method[getBean] intercept failure java.lang.ClassCastException: org.apache.skywalking.apm.plugin.spring.mvc.commons.EnhanceRequireObjectCache cannot be cast to org.apache.skywalking.apm.plugin.spring.mvc.commons.EnhanceRequireObjectCache at org.apache.skywalking.apm.plugin.spring.mvc.commons.interceptor.GetBeanInterceptor.afterMethod(GetBeanInterceptor.java:45) at org.apache.skywalking.apm.agent.core.plugin.interceptor.enhance.InstMethodsInter.intercept(InstMethodsInter.java:105) at org.springframework.web.method.HandlerMethod.getBean(HandlerMethod.java) at org.springframework.web.servlet.handler.AbstractHandlerMethodExceptionResolver.shouldApplyTo(AbstractHandlerMethodExceptionResolver.java:47) at org.springframework.web.servlet.handler.AbstractHandlerExceptionResolver.resolveException(AbstractHandlerExceptionResolver.java:131) at org.springframework.web.servlet.handler.HandlerExceptionResolverComposite.resolveException(HandlerExceptionResolverComposite.java:76) ... Reason This exception may be caused by hot deployment tools (spring-boot-devtool) or otherwise, which changes the classloader in runtime.\nResolution  This error does not occur under the production environment, since developer tools are automatically disabled: See spring-boot-devtools. If you would like to debug in your development environment as usual, you should temporarily remove such hot deployment package in your lib path.  ","excerpt":"Problem When you start your application with the skywalking agent, you may find this exception in …","ref":"/docs/main/v9.3.0/en/faq/enhancerequireobjectcache-cast-exception/","title":"Problem"},{"body":"Problem  When importing the SkyWalking project to Eclipse, the following errors may occur:   Software being installed: Checkstyle configuration plugin for M2Eclipse 1.0.0.201705301746 (com.basistech.m2e.code.quality.checkstyle.feature.feature.group 1.0.0.201705301746) Missing requirement: Checkstyle configuration plugin for M2Eclipse 1.0.0.201705301746 (com.basistech.m2e.code.quality.checkstyle.feature.feature.group 1.0.0.201705301746) requires \u0026lsquo;net.sf.eclipsecs.core 5.2.0\u0026rsquo; but it could not be found\n Reason The Eclipse Checkstyle Plug-in has not been installed.\nResolution Download the plug-in at the link here: https://sourceforge.net/projects/eclipse-cs/?source=typ_redirect Eclipse Checkstyle Plug-in version 8.7.0.201801131309 is required. Plug-in notification: The Eclipse Checkstyle plug-in integrates the Checkstyle Java code auditor into the Eclipse IDE. The plug-in provides real-time feedback to the user on rule violations, including checking against coding style and error-prone code constructs.\n","excerpt":"Problem  When importing the SkyWalking project to Eclipse, the following errors may occur: …","ref":"/docs/main/v9.3.0/en/faq/import-project-eclipse-requireitems-exception/","title":"Problem"},{"body":"Problem Tracing doesn\u0026rsquo;t work on the Kafka consumer end.\nReason The kafka client is responsible for pulling messages from the brokers, after which the data will be processed by user-defined codes. However, only the poll action can be traced by the plug-in and the subsequent data processing work inevitably goes beyond the scope of the trace context. Thus, in order to complete tracing on the client end, manual instrumentation is required, i.e. the poll action and the processing action should be wrapped manually.\nResolve For a native Kafka client, please use the Application Toolkit libraries to do the manual instrumentation, with the help of the @KafkaPollAndInvoke annotation in apm-toolkit-kafka or with OpenTracing API. If you\u0026rsquo;re using spring-kafka 1.3.x, 2.2.x or above, you can easily trace the consumer end without further configuration.\n","excerpt":"Problem Tracing doesn\u0026rsquo;t work on the Kafka consumer end.\nReason The kafka client is responsible …","ref":"/docs/main/v9.3.0/en/faq/kafka-plugin/","title":"Problem"},{"body":"Problem When using a thread pool, TraceSegment data in a thread cannot be reported and there are memory data that cannot be recycled (memory leaks).\nExample ThreadPoolTaskExecutor executor = new ThreadPoolTaskExecutor(); executor.setThreadFactory(r -\u0026gt; new Thread(RunnableWrapper.of(r))); Reason  Worker threads are enhanced when using the thread pool. Based on the design of the SkyWalking Java Agent, when tracing a cross thread, you must enhance the task thread.  Resolution   When using Thread Schedule Framework: See SkyWalking Thread Schedule Framework at SkyWalking Java agent supported list, such as Spring FrameWork @Async, which can implement tracing without any modification.\n  When using Custom Thread Pool: Enhance the task thread with the following code.\n  ExecutorService executorService = Executors.newFixedThreadPool(1); executorService.execute(RunnableWrapper.of(new Runnable() { @Override public void run() { //your code  } })); See across thread solution APIs for more use cases.\n","excerpt":"Problem When using a thread pool, TraceSegment data in a thread cannot be reported and there are …","ref":"/docs/main/v9.3.0/en/faq/memory-leak-enhance-worker-thread/","title":"Problem"},{"body":"Problem  In maven build, the following error may occur with the protoc-plugin:  [ERROR] Failed to execute goal org.xolstice.maven.plugins:protobuf-maven-plugin:0.5.0:compile-custom (default) on project apm-network: Unable to copy the file to \\skywalking\\apm-network\\target\\protoc-plugins: \\skywalking\\apm-network\\target\\protoc-plugins\\protoc-3.3.0-linux-x86_64.exe (The process cannot access the file because it is being used by another process) -\u0026gt; [Help 1] Reason  The Protobuf compiler is dependent on the glibc. However, glibc has not been installed, or there is an old version already installed in the system.  Resolution  Install or upgrade to the latest version of the glibc library. Under the container environment, the latest glibc version of the alpine system is recommended. Please refer to http://www.gnu.org/software/libc/documentation.html.  ","excerpt":"Problem  In maven build, the following error may occur with the protoc-plugin:  [ERROR] Failed to …","ref":"/docs/main/v9.3.0/en/faq/protoc-plugin-fails-when-build/","title":"Problem"},{"body":"Problem The message with Field ID, 8888, must be reserved.\nReason Because Thrift cannot carry metadata to transport Trace Header in the original API, we transport them by wrapping TProtocolFactory.\nThrift allows us to append any additional fields in the message even if the receiver doesn\u0026rsquo;t deal with them. Those data will be skipped and left unread. Based on this, the 8888th field of the message is used to store Trace Header (or metadata) and to transport them. That means the message with Field ID, 8888, must be reserved.\nResolution Avoid using the Field(ID is 8888) in your application.\n","excerpt":"Problem The message with Field ID, 8888, must be reserved.\nReason Because Thrift cannot carry …","ref":"/docs/main/v9.3.0/en/faq/thrift-plugin/","title":"Problem"},{"body":"Problem  There is no abnormal log in Agent log and Collector log. The traces can be seen, but no other information is available in UI.  Reason The operating system where the monitored system is located is not set as the current time zone, causing statistics collection time points to deviate.\nResolution Make sure the time is synchronized between collector servers and monitored application servers.\n","excerpt":"Problem  There is no abnormal log in Agent log and Collector log. The traces can be seen, but no …","ref":"/docs/main/v9.3.0/en/faq/why-have-traces-no-others/","title":"Problem"},{"body":"Problem When you start your application with the skywalking agent, you may find this exception in your agent log which means that EnhanceRequireObjectCache cannot be casted to EnhanceRequireObjectCache. For example:\nERROR 2018-05-07 21:31:24 InstMethodsInter : class[class org.springframework.web.method.HandlerMethod] after method[getBean] intercept failure java.lang.ClassCastException: org.apache.skywalking.apm.plugin.spring.mvc.commons.EnhanceRequireObjectCache cannot be cast to org.apache.skywalking.apm.plugin.spring.mvc.commons.EnhanceRequireObjectCache at org.apache.skywalking.apm.plugin.spring.mvc.commons.interceptor.GetBeanInterceptor.afterMethod(GetBeanInterceptor.java:45) at org.apache.skywalking.apm.agent.core.plugin.interceptor.enhance.InstMethodsInter.intercept(InstMethodsInter.java:105) at org.springframework.web.method.HandlerMethod.getBean(HandlerMethod.java) at org.springframework.web.servlet.handler.AbstractHandlerMethodExceptionResolver.shouldApplyTo(AbstractHandlerMethodExceptionResolver.java:47) at org.springframework.web.servlet.handler.AbstractHandlerExceptionResolver.resolveException(AbstractHandlerExceptionResolver.java:131) at org.springframework.web.servlet.handler.HandlerExceptionResolverComposite.resolveException(HandlerExceptionResolverComposite.java:76) ... Reason This exception may be caused by hot deployment tools (spring-boot-devtool) or otherwise, which changes the classloader in runtime.\nResolution  This error does not occur under the production environment, since developer tools are automatically disabled: See spring-boot-devtools. If you would like to debug in your development environment as usual, you should temporarily remove such hot deployment package in your lib path.  ","excerpt":"Problem When you start your application with the skywalking agent, you may find this exception in …","ref":"/docs/main/v9.4.0/en/faq/enhancerequireobjectcache-cast-exception/","title":"Problem"},{"body":"Problem  When importing the SkyWalking project to Eclipse, the following errors may occur:   Software being installed: Checkstyle configuration plugin for M2Eclipse 1.0.0.201705301746 (com.basistech.m2e.code.quality.checkstyle.feature.feature.group 1.0.0.201705301746) Missing requirement: Checkstyle configuration plugin for M2Eclipse 1.0.0.201705301746 (com.basistech.m2e.code.quality.checkstyle.feature.feature.group 1.0.0.201705301746) requires \u0026lsquo;net.sf.eclipsecs.core 5.2.0\u0026rsquo; but it could not be found\n Reason The Eclipse Checkstyle Plug-in has not been installed.\nResolution Download the plug-in at the link here: https://sourceforge.net/projects/eclipse-cs/?source=typ_redirect Eclipse Checkstyle Plug-in version 8.7.0.201801131309 is required. Plug-in notification: The Eclipse Checkstyle plug-in integrates the Checkstyle Java code auditor into the Eclipse IDE. The plug-in provides real-time feedback to the user on rule violations, including checking against coding style and error-prone code constructs.\n","excerpt":"Problem  When importing the SkyWalking project to Eclipse, the following errors may occur: …","ref":"/docs/main/v9.4.0/en/faq/import-project-eclipse-requireitems-exception/","title":"Problem"},{"body":"Problem Tracing doesn\u0026rsquo;t work on the Kafka consumer end.\nReason The kafka client is responsible for pulling messages from the brokers, after which the data will be processed by user-defined codes. However, only the poll action can be traced by the plug-in and the subsequent data processing work inevitably goes beyond the scope of the trace context. Thus, in order to complete tracing on the client end, manual instrumentation is required, i.e. the poll action and the processing action should be wrapped manually.\nResolve For a native Kafka client, please use the Application Toolkit libraries to do the manual instrumentation, with the help of the @KafkaPollAndInvoke annotation in apm-toolkit-kafka or with OpenTracing API. If you\u0026rsquo;re using spring-kafka 1.3.x, 2.2.x or above, you can easily trace the consumer end without further configuration.\n","excerpt":"Problem Tracing doesn\u0026rsquo;t work on the Kafka consumer end.\nReason The kafka client is responsible …","ref":"/docs/main/v9.4.0/en/faq/kafka-plugin/","title":"Problem"},{"body":"Problem When using a thread pool, TraceSegment data in a thread cannot be reported and there are memory data that cannot be recycled (memory leaks).\nExample ThreadPoolTaskExecutor executor = new ThreadPoolTaskExecutor(); executor.setThreadFactory(r -\u0026gt; new Thread(RunnableWrapper.of(r))); Reason  Worker threads are enhanced when using the thread pool. Based on the design of the SkyWalking Java Agent, when tracing a cross thread, you must enhance the task thread.  Resolution   When using Thread Schedule Framework: See SkyWalking Thread Schedule Framework at SkyWalking Java agent supported list, such as Spring FrameWork @Async, which can implement tracing without any modification.\n  When using Custom Thread Pool: Enhance the task thread with the following code.\n  ExecutorService executorService = Executors.newFixedThreadPool(1); executorService.execute(RunnableWrapper.of(new Runnable() { @Override public void run() { //your code  } })); See across thread solution APIs for more use cases.\n","excerpt":"Problem When using a thread pool, TraceSegment data in a thread cannot be reported and there are …","ref":"/docs/main/v9.4.0/en/faq/memory-leak-enhance-worker-thread/","title":"Problem"},{"body":"Problem  In maven build, the following error may occur with the protoc-plugin:  [ERROR] Failed to execute goal org.xolstice.maven.plugins:protobuf-maven-plugin:0.5.0:compile-custom (default) on project apm-network: Unable to copy the file to \\skywalking\\apm-network\\target\\protoc-plugins: \\skywalking\\apm-network\\target\\protoc-plugins\\protoc-3.3.0-linux-x86_64.exe (The process cannot access the file because it is being used by another process) -\u0026gt; [Help 1] Reason  The Protobuf compiler is dependent on the glibc. However, glibc has not been installed, or there is an old version already installed in the system.  Resolution  Install or upgrade to the latest version of the glibc library. Under the container environment, the latest glibc version of the alpine system is recommended. Please refer to http://www.gnu.org/software/libc/documentation.html.  ","excerpt":"Problem  In maven build, the following error may occur with the protoc-plugin:  [ERROR] Failed to …","ref":"/docs/main/v9.4.0/en/faq/protoc-plugin-fails-when-build/","title":"Problem"},{"body":"Problem The message with Field ID, 8888, must be reserved.\nReason Because Thrift cannot carry metadata to transport Trace Header in the original API, we transport them by wrapping TProtocolFactory.\nThrift allows us to append any additional fields in the message even if the receiver doesn\u0026rsquo;t deal with them. Those data will be skipped and left unread. Based on this, the 8888th field of the message is used to store Trace Header (or metadata) and to transport them. That means the message with Field ID, 8888, must be reserved.\nResolution Avoid using the Field(ID is 8888) in your application.\n","excerpt":"Problem The message with Field ID, 8888, must be reserved.\nReason Because Thrift cannot carry …","ref":"/docs/main/v9.4.0/en/faq/thrift-plugin/","title":"Problem"},{"body":"Problem  There is no abnormal log in Agent log and Collector log. The traces can be seen, but no other information is available in UI.  Reason The operating system where the monitored system is located is not set as the current time zone, causing statistics collection time points to deviate.\nResolution Make sure the time is synchronized between collector servers and monitored application servers.\n","excerpt":"Problem  There is no abnormal log in Agent log and Collector log. The traces can be seen, but no …","ref":"/docs/main/v9.4.0/en/faq/why-have-traces-no-others/","title":"Problem"},{"body":"Problem When you start your application with the skywalking agent, you may find this exception in your agent log which means that EnhanceRequireObjectCache cannot be casted to EnhanceRequireObjectCache. For example:\nERROR 2018-05-07 21:31:24 InstMethodsInter : class[class org.springframework.web.method.HandlerMethod] after method[getBean] intercept failure java.lang.ClassCastException: org.apache.skywalking.apm.plugin.spring.mvc.commons.EnhanceRequireObjectCache cannot be cast to org.apache.skywalking.apm.plugin.spring.mvc.commons.EnhanceRequireObjectCache at org.apache.skywalking.apm.plugin.spring.mvc.commons.interceptor.GetBeanInterceptor.afterMethod(GetBeanInterceptor.java:45) at org.apache.skywalking.apm.agent.core.plugin.interceptor.enhance.InstMethodsInter.intercept(InstMethodsInter.java:105) at org.springframework.web.method.HandlerMethod.getBean(HandlerMethod.java) at org.springframework.web.servlet.handler.AbstractHandlerMethodExceptionResolver.shouldApplyTo(AbstractHandlerMethodExceptionResolver.java:47) at org.springframework.web.servlet.handler.AbstractHandlerExceptionResolver.resolveException(AbstractHandlerExceptionResolver.java:131) at org.springframework.web.servlet.handler.HandlerExceptionResolverComposite.resolveException(HandlerExceptionResolverComposite.java:76) ... Reason This exception may be caused by hot deployment tools (spring-boot-devtool) or otherwise, which changes the classloader in runtime.\nResolution  This error does not occur under the production environment, since developer tools are automatically disabled: See spring-boot-devtools. If you would like to debug in your development environment as usual, you should temporarily remove such hot deployment package in your lib path.  ","excerpt":"Problem When you start your application with the skywalking agent, you may find this exception in …","ref":"/docs/main/v9.5.0/en/faq/enhancerequireobjectcache-cast-exception/","title":"Problem"},{"body":"Problem  When importing the SkyWalking project to Eclipse, the following errors may occur:   Software being installed: Checkstyle configuration plugin for M2Eclipse 1.0.0.201705301746 (com.basistech.m2e.code.quality.checkstyle.feature.feature.group 1.0.0.201705301746) Missing requirement: Checkstyle configuration plugin for M2Eclipse 1.0.0.201705301746 (com.basistech.m2e.code.quality.checkstyle.feature.feature.group 1.0.0.201705301746) requires \u0026lsquo;net.sf.eclipsecs.core 5.2.0\u0026rsquo; but it could not be found\n Reason The Eclipse Checkstyle Plug-in has not been installed.\nResolution Download the plug-in at the link here: https://sourceforge.net/projects/eclipse-cs/?source=typ_redirect Eclipse Checkstyle Plug-in version 8.7.0.201801131309 is required. Plug-in notification: The Eclipse Checkstyle plug-in integrates the Checkstyle Java code auditor into the Eclipse IDE. The plug-in provides real-time feedback to the user on rule violations, including checking against coding style and error-prone code constructs.\n","excerpt":"Problem  When importing the SkyWalking project to Eclipse, the following errors may occur: …","ref":"/docs/main/v9.5.0/en/faq/import-project-eclipse-requireitems-exception/","title":"Problem"},{"body":"Problem Tracing doesn\u0026rsquo;t work on the Kafka consumer end.\nReason The kafka client is responsible for pulling messages from the brokers, after which the data will be processed by user-defined codes. However, only the poll action can be traced by the plug-in and the subsequent data processing work inevitably goes beyond the scope of the trace context. Thus, in order to complete tracing on the client end, manual instrumentation is required, i.e. the poll action and the processing action should be wrapped manually.\nResolve For a native Kafka client, please use the Application Toolkit libraries to do the manual instrumentation, with the help of the @KafkaPollAndInvoke annotation in apm-toolkit-kafka or with OpenTracing API. If you\u0026rsquo;re using spring-kafka 1.3.x, 2.2.x or above, you can easily trace the consumer end without further configuration.\n","excerpt":"Problem Tracing doesn\u0026rsquo;t work on the Kafka consumer end.\nReason The kafka client is responsible …","ref":"/docs/main/v9.5.0/en/faq/kafka-plugin/","title":"Problem"},{"body":"Problem When using a thread pool, TraceSegment data in a thread cannot be reported and there are memory data that cannot be recycled (memory leaks).\nExample ThreadPoolTaskExecutor executor = new ThreadPoolTaskExecutor(); executor.setThreadFactory(r -\u0026gt; new Thread(RunnableWrapper.of(r))); Reason  Worker threads are enhanced when using the thread pool. Based on the design of the SkyWalking Java Agent, when tracing a cross thread, you must enhance the task thread.  Resolution   When using Thread Schedule Framework: See SkyWalking Thread Schedule Framework at SkyWalking Java agent supported list, such as Spring FrameWork @Async, which can implement tracing without any modification.\n  When using Custom Thread Pool: Enhance the task thread with the following code.\n  ExecutorService executorService = Executors.newFixedThreadPool(1); executorService.execute(RunnableWrapper.of(new Runnable() { @Override public void run() { //your code  } })); See across thread solution APIs for more use cases.\n","excerpt":"Problem When using a thread pool, TraceSegment data in a thread cannot be reported and there are …","ref":"/docs/main/v9.5.0/en/faq/memory-leak-enhance-worker-thread/","title":"Problem"},{"body":"Problem  In maven build, the following error may occur with the protoc-plugin:  [ERROR] Failed to execute goal org.xolstice.maven.plugins:protobuf-maven-plugin:0.5.0:compile-custom (default) on project apm-network: Unable to copy the file to \\skywalking\\apm-network\\target\\protoc-plugins: \\skywalking\\apm-network\\target\\protoc-plugins\\protoc-3.3.0-linux-x86_64.exe (The process cannot access the file because it is being used by another process) -\u0026gt; [Help 1] Reason  The Protobuf compiler is dependent on the glibc. However, glibc has not been installed, or there is an old version already installed in the system.  Resolution  Install or upgrade to the latest version of the glibc library. Under the container environment, the latest glibc version of the alpine system is recommended. Please refer to http://www.gnu.org/software/libc/documentation.html.  ","excerpt":"Problem  In maven build, the following error may occur with the protoc-plugin:  [ERROR] Failed to …","ref":"/docs/main/v9.5.0/en/faq/protoc-plugin-fails-when-build/","title":"Problem"},{"body":"Problem The message with Field ID, 8888, must be reserved.\nReason Because Thrift cannot carry metadata to transport Trace Header in the original API, we transport them by wrapping TProtocolFactory.\nThrift allows us to append any additional fields in the message even if the receiver doesn\u0026rsquo;t deal with them. Those data will be skipped and left unread. Based on this, the 8888th field of the message is used to store Trace Header (or metadata) and to transport them. That means the message with Field ID, 8888, must be reserved.\nResolution Avoid using the Field(ID is 8888) in your application.\n","excerpt":"Problem The message with Field ID, 8888, must be reserved.\nReason Because Thrift cannot carry …","ref":"/docs/main/v9.5.0/en/faq/thrift-plugin/","title":"Problem"},{"body":"Problem  There is no abnormal log in Agent log and Collector log. The traces can be seen, but no other information is available in UI.  Reason The operating system where the monitored system is located is not set as the current time zone, causing statistics collection time points to deviate.\nResolution Make sure the time is synchronized between collector servers and monitored application servers.\n","excerpt":"Problem  There is no abnormal log in Agent log and Collector log. The traces can be seen, but no …","ref":"/docs/main/v9.5.0/en/faq/why-have-traces-no-others/","title":"Problem"},{"body":"Problem When you start your application with the skywalking agent, you may find this exception in your agent log which means that EnhanceRequireObjectCache cannot be casted to EnhanceRequireObjectCache. For example:\nERROR 2018-05-07 21:31:24 InstMethodsInter : class[class org.springframework.web.method.HandlerMethod] after method[getBean] intercept failure java.lang.ClassCastException: org.apache.skywalking.apm.plugin.spring.mvc.commons.EnhanceRequireObjectCache cannot be cast to org.apache.skywalking.apm.plugin.spring.mvc.commons.EnhanceRequireObjectCache at org.apache.skywalking.apm.plugin.spring.mvc.commons.interceptor.GetBeanInterceptor.afterMethod(GetBeanInterceptor.java:45) at org.apache.skywalking.apm.agent.core.plugin.interceptor.enhance.InstMethodsInter.intercept(InstMethodsInter.java:105) at org.springframework.web.method.HandlerMethod.getBean(HandlerMethod.java) at org.springframework.web.servlet.handler.AbstractHandlerMethodExceptionResolver.shouldApplyTo(AbstractHandlerMethodExceptionResolver.java:47) at org.springframework.web.servlet.handler.AbstractHandlerExceptionResolver.resolveException(AbstractHandlerExceptionResolver.java:131) at org.springframework.web.servlet.handler.HandlerExceptionResolverComposite.resolveException(HandlerExceptionResolverComposite.java:76) ... Reason This exception may be caused by hot deployment tools (spring-boot-devtool) or otherwise, which changes the classloader in runtime.\nResolution  This error does not occur under the production environment, since developer tools are automatically disabled: See spring-boot-devtools. If you would like to debug in your development environment as usual, you should temporarily remove such hot deployment package in your lib path.  ","excerpt":"Problem When you start your application with the skywalking agent, you may find this exception in …","ref":"/docs/main/v9.6.0/en/faq/enhancerequireobjectcache-cast-exception/","title":"Problem"},{"body":"Problem  When importing the SkyWalking project to Eclipse, the following errors may occur:   Software being installed: Checkstyle configuration plugin for M2Eclipse 1.0.0.201705301746 (com.basistech.m2e.code.quality.checkstyle.feature.feature.group 1.0.0.201705301746) Missing requirement: Checkstyle configuration plugin for M2Eclipse 1.0.0.201705301746 (com.basistech.m2e.code.quality.checkstyle.feature.feature.group 1.0.0.201705301746) requires \u0026lsquo;net.sf.eclipsecs.core 5.2.0\u0026rsquo; but it could not be found\n Reason The Eclipse Checkstyle Plug-in has not been installed.\nResolution Download the plug-in at the link here: https://sourceforge.net/projects/eclipse-cs/?source=typ_redirect Eclipse Checkstyle Plug-in version 8.7.0.201801131309 is required. Plug-in notification: The Eclipse Checkstyle plug-in integrates the Checkstyle Java code auditor into the Eclipse IDE. The plug-in provides real-time feedback to the user on rule violations, including checking against coding style and error-prone code constructs.\n","excerpt":"Problem  When importing the SkyWalking project to Eclipse, the following errors may occur: …","ref":"/docs/main/v9.6.0/en/faq/import-project-eclipse-requireitems-exception/","title":"Problem"},{"body":"Problem Tracing doesn\u0026rsquo;t work on the Kafka consumer end.\nReason The kafka client is responsible for pulling messages from the brokers, after which the data will be processed by user-defined codes. However, only the poll action can be traced by the plug-in and the subsequent data processing work inevitably goes beyond the scope of the trace context. Thus, in order to complete tracing on the client end, manual instrumentation is required, i.e. the poll action and the processing action should be wrapped manually.\nResolve For a native Kafka client, please use the Application Toolkit libraries to do the manual instrumentation, with the help of the @KafkaPollAndInvoke annotation in apm-toolkit-kafka or with OpenTracing API. If you\u0026rsquo;re using spring-kafka 1.3.x, 2.2.x or above, you can easily trace the consumer end without further configuration.\n","excerpt":"Problem Tracing doesn\u0026rsquo;t work on the Kafka consumer end.\nReason The kafka client is responsible …","ref":"/docs/main/v9.6.0/en/faq/kafka-plugin/","title":"Problem"},{"body":"Problem When using a thread pool, TraceSegment data in a thread cannot be reported and there are memory data that cannot be recycled (memory leaks).\nExample ThreadPoolTaskExecutor executor = new ThreadPoolTaskExecutor(); executor.setThreadFactory(r -\u0026gt; new Thread(RunnableWrapper.of(r))); Reason  Worker threads are enhanced when using the thread pool. Based on the design of the SkyWalking Java Agent, when tracing a cross thread, you must enhance the task thread.  Resolution   When using Thread Schedule Framework: See SkyWalking Thread Schedule Framework at SkyWalking Java agent supported list, such as Spring FrameWork @Async, which can implement tracing without any modification.\n  When using Custom Thread Pool: Enhance the task thread with the following code.\n  ExecutorService executorService = Executors.newFixedThreadPool(1); executorService.execute(RunnableWrapper.of(new Runnable() { @Override public void run() { //your code  } })); See across thread solution APIs for more use cases.\n","excerpt":"Problem When using a thread pool, TraceSegment data in a thread cannot be reported and there are …","ref":"/docs/main/v9.6.0/en/faq/memory-leak-enhance-worker-thread/","title":"Problem"},{"body":"Problem  In maven build, the following error may occur with the protoc-plugin:  [ERROR] Failed to execute goal org.xolstice.maven.plugins:protobuf-maven-plugin:0.5.0:compile-custom (default) on project apm-network: Unable to copy the file to \\skywalking\\apm-network\\target\\protoc-plugins: \\skywalking\\apm-network\\target\\protoc-plugins\\protoc-3.3.0-linux-x86_64.exe (The process cannot access the file because it is being used by another process) -\u0026gt; [Help 1] Reason  The Protobuf compiler is dependent on the glibc. However, glibc has not been installed, or there is an old version already installed in the system.  Resolution  Install or upgrade to the latest version of the glibc library. Under the container environment, the latest glibc version of the alpine system is recommended. Please refer to http://www.gnu.org/software/libc/documentation.html.  ","excerpt":"Problem  In maven build, the following error may occur with the protoc-plugin:  [ERROR] Failed to …","ref":"/docs/main/v9.6.0/en/faq/protoc-plugin-fails-when-build/","title":"Problem"},{"body":"Problem The message with Field ID, 8888, must be reserved.\nReason Because Thrift cannot carry metadata to transport Trace Header in the original API, we transport them by wrapping TProtocolFactory.\nThrift allows us to append any additional fields in the message even if the receiver doesn\u0026rsquo;t deal with them. Those data will be skipped and left unread. Based on this, the 8888th field of the message is used to store Trace Header (or metadata) and to transport them. That means the message with Field ID, 8888, must be reserved.\nResolution Avoid using the Field(ID is 8888) in your application.\n","excerpt":"Problem The message with Field ID, 8888, must be reserved.\nReason Because Thrift cannot carry …","ref":"/docs/main/v9.6.0/en/faq/thrift-plugin/","title":"Problem"},{"body":"Problem  There is no abnormal log in Agent log and Collector log. The traces can be seen, but no other information is available in UI.  Reason The operating system where the monitored system is located is not set as the current time zone, causing statistics collection time points to deviate.\nResolution Make sure the time is synchronized between collector servers and monitored application servers.\n","excerpt":"Problem  There is no abnormal log in Agent log and Collector log. The traces can be seen, but no …","ref":"/docs/main/v9.6.0/en/faq/why-have-traces-no-others/","title":"Problem"},{"body":"Problem When you start your application with the skywalking agent, you may find this exception in your agent log which means that EnhanceRequireObjectCache cannot be casted to EnhanceRequireObjectCache. For example:\nERROR 2018-05-07 21:31:24 InstMethodsInter : class[class org.springframework.web.method.HandlerMethod] after method[getBean] intercept failure java.lang.ClassCastException: org.apache.skywalking.apm.plugin.spring.mvc.commons.EnhanceRequireObjectCache cannot be cast to org.apache.skywalking.apm.plugin.spring.mvc.commons.EnhanceRequireObjectCache at org.apache.skywalking.apm.plugin.spring.mvc.commons.interceptor.GetBeanInterceptor.afterMethod(GetBeanInterceptor.java:45) at org.apache.skywalking.apm.agent.core.plugin.interceptor.enhance.InstMethodsInter.intercept(InstMethodsInter.java:105) at org.springframework.web.method.HandlerMethod.getBean(HandlerMethod.java) at org.springframework.web.servlet.handler.AbstractHandlerMethodExceptionResolver.shouldApplyTo(AbstractHandlerMethodExceptionResolver.java:47) at org.springframework.web.servlet.handler.AbstractHandlerExceptionResolver.resolveException(AbstractHandlerExceptionResolver.java:131) at org.springframework.web.servlet.handler.HandlerExceptionResolverComposite.resolveException(HandlerExceptionResolverComposite.java:76) ... Reason This exception may be caused by hot deployment tools (spring-boot-devtool) or otherwise, which changes the classloader in runtime.\nResolution  This error does not occur under the production environment, since developer tools are automatically disabled: See spring-boot-devtools. If you would like to debug in your development environment as usual, you should temporarily remove such hot deployment package in your lib path.  ","excerpt":"Problem When you start your application with the skywalking agent, you may find this exception in …","ref":"/docs/main/v9.7.0/en/faq/enhancerequireobjectcache-cast-exception/","title":"Problem"},{"body":"Problem  When importing the SkyWalking project to Eclipse, the following errors may occur:   Software being installed: Checkstyle configuration plugin for M2Eclipse 1.0.0.201705301746 (com.basistech.m2e.code.quality.checkstyle.feature.feature.group 1.0.0.201705301746) Missing requirement: Checkstyle configuration plugin for M2Eclipse 1.0.0.201705301746 (com.basistech.m2e.code.quality.checkstyle.feature.feature.group 1.0.0.201705301746) requires \u0026lsquo;net.sf.eclipsecs.core 5.2.0\u0026rsquo; but it could not be found\n Reason The Eclipse Checkstyle Plug-in has not been installed.\nResolution Download the plug-in at the link here: https://sourceforge.net/projects/eclipse-cs/?source=typ_redirect Eclipse Checkstyle Plug-in version 8.7.0.201801131309 is required. Plug-in notification: The Eclipse Checkstyle plug-in integrates the Checkstyle Java code auditor into the Eclipse IDE. The plug-in provides real-time feedback to the user on rule violations, including checking against coding style and error-prone code constructs.\n","excerpt":"Problem  When importing the SkyWalking project to Eclipse, the following errors may occur: …","ref":"/docs/main/v9.7.0/en/faq/import-project-eclipse-requireitems-exception/","title":"Problem"},{"body":"Problem Tracing doesn\u0026rsquo;t work on the Kafka consumer end.\nReason The kafka client is responsible for pulling messages from the brokers, after which the data will be processed by user-defined codes. However, only the poll action can be traced by the plug-in and the subsequent data processing work inevitably goes beyond the scope of the trace context. Thus, in order to complete tracing on the client end, manual instrumentation is required, i.e. the poll action and the processing action should be wrapped manually.\nResolve For a native Kafka client, please use the Application Toolkit libraries to do the manual instrumentation, with the help of the @KafkaPollAndInvoke annotation in apm-toolkit-kafka or with OpenTracing API. If you\u0026rsquo;re using spring-kafka 1.3.x, 2.2.x or above, you can easily trace the consumer end without further configuration.\n","excerpt":"Problem Tracing doesn\u0026rsquo;t work on the Kafka consumer end.\nReason The kafka client is responsible …","ref":"/docs/main/v9.7.0/en/faq/kafka-plugin/","title":"Problem"},{"body":"Problem When using a thread pool, TraceSegment data in a thread cannot be reported and there are memory data that cannot be recycled (memory leaks).\nExample ThreadPoolTaskExecutor executor = new ThreadPoolTaskExecutor(); executor.setThreadFactory(r -\u0026gt; new Thread(RunnableWrapper.of(r))); Reason  Worker threads are enhanced when using the thread pool. Based on the design of the SkyWalking Java Agent, when tracing a cross thread, you must enhance the task thread.  Resolution   When using Thread Schedule Framework: See SkyWalking Thread Schedule Framework at SkyWalking Java agent supported list, such as Spring FrameWork @Async, which can implement tracing without any modification.\n  When using Custom Thread Pool: Enhance the task thread with the following code.\n  ExecutorService executorService = Executors.newFixedThreadPool(1); executorService.execute(RunnableWrapper.of(new Runnable() { @Override public void run() { //your code  } })); See across thread solution APIs for more use cases.\n","excerpt":"Problem When using a thread pool, TraceSegment data in a thread cannot be reported and there are …","ref":"/docs/main/v9.7.0/en/faq/memory-leak-enhance-worker-thread/","title":"Problem"},{"body":"Problem  In maven build, the following error may occur with the protoc-plugin:  [ERROR] Failed to execute goal org.xolstice.maven.plugins:protobuf-maven-plugin:0.5.0:compile-custom (default) on project apm-network: Unable to copy the file to \\skywalking\\apm-network\\target\\protoc-plugins: \\skywalking\\apm-network\\target\\protoc-plugins\\protoc-3.3.0-linux-x86_64.exe (The process cannot access the file because it is being used by another process) -\u0026gt; [Help 1] Reason  The Protobuf compiler is dependent on the glibc. However, glibc has not been installed, or there is an old version already installed in the system.  Resolution  Install or upgrade to the latest version of the glibc library. Under the container environment, the latest glibc version of the alpine system is recommended. Please refer to http://www.gnu.org/software/libc/documentation.html.  ","excerpt":"Problem  In maven build, the following error may occur with the protoc-plugin:  [ERROR] Failed to …","ref":"/docs/main/v9.7.0/en/faq/protoc-plugin-fails-when-build/","title":"Problem"},{"body":"Problem The message with Field ID, 8888, must be reserved.\nReason Because Thrift cannot carry metadata to transport Trace Header in the original API, we transport them by wrapping TProtocolFactory.\nThrift allows us to append any additional fields in the message even if the receiver doesn\u0026rsquo;t deal with them. Those data will be skipped and left unread. Based on this, the 8888th field of the message is used to store Trace Header (or metadata) and to transport them. That means the message with Field ID, 8888, must be reserved.\nResolution Avoid using the Field(ID is 8888) in your application.\n","excerpt":"Problem The message with Field ID, 8888, must be reserved.\nReason Because Thrift cannot carry …","ref":"/docs/main/v9.7.0/en/faq/thrift-plugin/","title":"Problem"},{"body":"Problem  There is no abnormal log in Agent log and Collector log. The traces can be seen, but no other information is available in UI.  Reason The operating system where the monitored system is located is not set as the current time zone, causing statistics collection time points to deviate.\nResolution Make sure the time is synchronized between collector servers and monitored application servers.\n","excerpt":"Problem  There is no abnormal log in Agent log and Collector log. The traces can be seen, but no …","ref":"/docs/main/v9.7.0/en/faq/why-have-traces-no-others/","title":"Problem"},{"body":"Problem: Maven compilation failure with error such as Error: not found: python2 When you compile the project via Maven, it fails at module apm-webapp and the following error occurs.\nPay attention to keywords such as node-sass and Error: not found: python2.\n[INFO] \u0026gt; node-sass@4.11.0 postinstall C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\node-sass [INFO] \u0026gt; node scripts/build.js [ERROR] gyp verb check python checking for Python executable \u0026quot;python2\u0026quot; in the PATH [ERROR] gyp verb `which` failed Error: not found: python2 [ERROR] gyp verb `which` failed at getNotFoundError (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:13:12) [ERROR] gyp verb `which` failed at F (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:68:19) [ERROR] gyp verb `which` failed at E (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:80:29) [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:89:16 [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\isexe\\index.js:42:5 [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\isexe\\windows.js:36:5 [ERROR] gyp verb `which` failed at FSReqWrap.oncomplete (fs.js:152:21) [ERROR] gyp verb `which` failed code: 'ENOENT' } [ERROR] gyp verb check python checking for Python executable \u0026quot;python\u0026quot; in the PATH [ERROR] gyp verb `which` succeeded python C:\\Users\\XXX\\AppData\\Local\\Programs\\Python\\Python37\\python.EXE [ERROR] gyp ERR! configure error [ERROR] gyp ERR! stack Error: Command failed: C:\\Users\\XXX\\AppData\\Local\\Programs\\Python\\Python37\\python.EXE -c import sys; print \u0026quot;%s.%s.%s\u0026quot; % sys.version_info[:3]; [ERROR] gyp ERR! stack File \u0026quot;\u0026lt;string\u0026gt;\u0026quot;, line 1 [ERROR] gyp ERR! stack import sys; print \u0026quot;%s.%s.%s\u0026quot; % sys.version_info[:3]; [ERROR] gyp ERR! stack ^ [ERROR] gyp ERR! stack SyntaxError: invalid syntax [ERROR] gyp ERR! stack [ERROR] gyp ERR! stack at ChildProcess.exithandler (child_process.js:275:12) [ERROR] gyp ERR! stack at emitTwo (events.js:126:13) [ERROR] gyp ERR! stack at ChildProcess.emit (events.js:214:7) [ERROR] gyp ERR! stack at maybeClose (internal/child_process.js:925:16) [ERROR] gyp ERR! stack at Process.ChildProcess._handle.onexit (internal/child_process.js:209:5) [ERROR] gyp ERR! System Windows_NT 10.0.17134 ...... [INFO] server-starter-es7 ................................. SUCCESS [ 11.657 s] [INFO] apm-webapp ......................................... FAILURE [ 25.857 s] [INFO] apache-skywalking-apm .............................. SKIPPED [INFO] apache-skywalking-apm-es7 .......................... SKIPPED Reason The error has nothing to do with SkyWalking.\nAccording to the issue here (https://github.com/sass/node-sass/issues/1176), if you live in countries where requesting resources from GitHub and npmjs.org runs slow, some precompiled binaries for dependency node-sass would fail to be downloaded during npm install, and npm would try to compile them itself. That\u0026rsquo;s why python2 is needed.\nResolution 1. Use mirror. For instance, if you\u0026rsquo;re in China, please edit skywalking\\apm-webapp\\pom.xml as follows. Find\n\u0026lt;configuration\u0026gt; \u0026lt;arguments\u0026gt;install --registry=https://registry.npmjs.org/\u0026lt;/arguments\u0026gt; \u0026lt;/configuration\u0026gt; Replace it with\n\u0026lt;configuration\u0026gt; \u0026lt;arguments\u0026gt;install --registry=https://registry.npmmirror.com/ --sass_binary_site=https://npmmirror.com/mirrors/node-sass/\u0026lt;/arguments\u0026gt; \u0026lt;/configuration\u0026gt; 2. Get a sufficiently powerful VPN. ","excerpt":"Problem: Maven compilation failure with error such as Error: not found: python2 When you compile the …","ref":"/docs/main/latest/en/faq/maven-compile-npm-failure/","title":"Problem: Maven compilation failure with error such as `Error: not found: python2`"},{"body":"Problem: Maven compilation failure with error such as Error: not found: python2 When you compile the project via Maven, it fails at module apm-webapp and the following error occurs.\nPay attention to keywords such as node-sass and Error: not found: python2.\n[INFO] \u0026gt; node-sass@4.11.0 postinstall C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\node-sass [INFO] \u0026gt; node scripts/build.js [ERROR] gyp verb check python checking for Python executable \u0026quot;python2\u0026quot; in the PATH [ERROR] gyp verb `which` failed Error: not found: python2 [ERROR] gyp verb `which` failed at getNotFoundError (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:13:12) [ERROR] gyp verb `which` failed at F (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:68:19) [ERROR] gyp verb `which` failed at E (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:80:29) [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:89:16 [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\isexe\\index.js:42:5 [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\isexe\\windows.js:36:5 [ERROR] gyp verb `which` failed at FSReqWrap.oncomplete (fs.js:152:21) [ERROR] gyp verb `which` failed code: 'ENOENT' } [ERROR] gyp verb check python checking for Python executable \u0026quot;python\u0026quot; in the PATH [ERROR] gyp verb `which` succeeded python C:\\Users\\XXX\\AppData\\Local\\Programs\\Python\\Python37\\python.EXE [ERROR] gyp ERR! configure error [ERROR] gyp ERR! stack Error: Command failed: C:\\Users\\XXX\\AppData\\Local\\Programs\\Python\\Python37\\python.EXE -c import sys; print \u0026quot;%s.%s.%s\u0026quot; % sys.version_info[:3]; [ERROR] gyp ERR! stack File \u0026quot;\u0026lt;string\u0026gt;\u0026quot;, line 1 [ERROR] gyp ERR! stack import sys; print \u0026quot;%s.%s.%s\u0026quot; % sys.version_info[:3]; [ERROR] gyp ERR! stack ^ [ERROR] gyp ERR! stack SyntaxError: invalid syntax [ERROR] gyp ERR! stack [ERROR] gyp ERR! stack at ChildProcess.exithandler (child_process.js:275:12) [ERROR] gyp ERR! stack at emitTwo (events.js:126:13) [ERROR] gyp ERR! stack at ChildProcess.emit (events.js:214:7) [ERROR] gyp ERR! stack at maybeClose (internal/child_process.js:925:16) [ERROR] gyp ERR! stack at Process.ChildProcess._handle.onexit (internal/child_process.js:209:5) [ERROR] gyp ERR! System Windows_NT 10.0.17134 ...... [INFO] server-starter-es7 ................................. SUCCESS [ 11.657 s] [INFO] apm-webapp ......................................... FAILURE [ 25.857 s] [INFO] apache-skywalking-apm .............................. SKIPPED [INFO] apache-skywalking-apm-es7 .......................... SKIPPED Reason The error has nothing to do with SkyWalking.\nAccording to the issue here (https://github.com/sass/node-sass/issues/1176), if you live in countries where requesting resources from GitHub and npmjs.org runs slow, some precompiled binaries for dependency node-sass would fail to be downloaded during npm install, and npm would try to compile them itself. That\u0026rsquo;s why python2 is needed.\nResolution 1. Use mirror. For instance, if you\u0026rsquo;re in China, please edit skywalking\\apm-webapp\\pom.xml as follows. Find\n\u0026lt;configuration\u0026gt; \u0026lt;arguments\u0026gt;install --registry=https://registry.npmjs.org/\u0026lt;/arguments\u0026gt; \u0026lt;/configuration\u0026gt; Replace it with\n\u0026lt;configuration\u0026gt; \u0026lt;arguments\u0026gt;install --registry=https://registry.npmmirror.com/ --sass_binary_site=https://npmmirror.com/mirrors/node-sass/\u0026lt;/arguments\u0026gt; \u0026lt;/configuration\u0026gt; 2. Get a sufficiently powerful VPN. ","excerpt":"Problem: Maven compilation failure with error such as Error: not found: python2 When you compile the …","ref":"/docs/main/next/en/faq/maven-compile-npm-failure/","title":"Problem: Maven compilation failure with error such as `Error: not found: python2`"},{"body":"Problem: Maven compilation failure with error such as Error: not found: python2 When you compile the project via Maven, it fails at module apm-webapp and the following error occurs.\nPay attention to keywords such as node-sass and Error: not found: python2.\n[INFO] \u0026gt; node-sass@4.11.0 postinstall C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\node-sass [INFO] \u0026gt; node scripts/build.js [ERROR] gyp verb check python checking for Python executable \u0026quot;python2\u0026quot; in the PATH [ERROR] gyp verb `which` failed Error: not found: python2 [ERROR] gyp verb `which` failed at getNotFoundError (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:13:12) [ERROR] gyp verb `which` failed at F (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:68:19) [ERROR] gyp verb `which` failed at E (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:80:29) [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:89:16 [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\isexe\\index.js:42:5 [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\isexe\\windows.js:36:5 [ERROR] gyp verb `which` failed at FSReqWrap.oncomplete (fs.js:152:21) [ERROR] gyp verb `which` failed code: 'ENOENT' } [ERROR] gyp verb check python checking for Python executable \u0026quot;python\u0026quot; in the PATH [ERROR] gyp verb `which` succeeded python C:\\Users\\XXX\\AppData\\Local\\Programs\\Python\\Python37\\python.EXE [ERROR] gyp ERR! configure error [ERROR] gyp ERR! stack Error: Command failed: C:\\Users\\XXX\\AppData\\Local\\Programs\\Python\\Python37\\python.EXE -c import sys; print \u0026quot;%s.%s.%s\u0026quot; % sys.version_info[:3]; [ERROR] gyp ERR! stack File \u0026quot;\u0026lt;string\u0026gt;\u0026quot;, line 1 [ERROR] gyp ERR! stack import sys; print \u0026quot;%s.%s.%s\u0026quot; % sys.version_info[:3]; [ERROR] gyp ERR! stack ^ [ERROR] gyp ERR! stack SyntaxError: invalid syntax [ERROR] gyp ERR! stack [ERROR] gyp ERR! stack at ChildProcess.exithandler (child_process.js:275:12) [ERROR] gyp ERR! stack at emitTwo (events.js:126:13) [ERROR] gyp ERR! stack at ChildProcess.emit (events.js:214:7) [ERROR] gyp ERR! stack at maybeClose (internal/child_process.js:925:16) [ERROR] gyp ERR! stack at Process.ChildProcess._handle.onexit (internal/child_process.js:209:5) [ERROR] gyp ERR! System Windows_NT 10.0.17134 ...... [INFO] server-starter-es7 ................................. SUCCESS [ 11.657 s] [INFO] apm-webapp ......................................... FAILURE [ 25.857 s] [INFO] apache-skywalking-apm .............................. SKIPPED [INFO] apache-skywalking-apm-es7 .......................... SKIPPED Reason The error has nothing to do with SkyWalking.\nAccording to the issue here (https://github.com/sass/node-sass/issues/1176), if you live in countries where requesting resources from GitHub and npmjs.org runs slow, some precompiled binaries for dependency node-sass would fail to be downloaded during npm install, and npm would try to compile them itself. That\u0026rsquo;s why python2 is needed.\nResolution 1. Use mirror. For instance, if you\u0026rsquo;re in China, please edit skywalking\\apm-webapp\\pom.xml as follows. Find\n\u0026lt;configuration\u0026gt; \u0026lt;arguments\u0026gt;install --registry=https://registry.npmjs.org/\u0026lt;/arguments\u0026gt; \u0026lt;/configuration\u0026gt; Replace it with\n\u0026lt;configuration\u0026gt; \u0026lt;arguments\u0026gt;install --registry=https://registry.npmmirror.com/ --sass_binary_site=https://npmmirror.com/mirrors/node-sass/\u0026lt;/arguments\u0026gt; \u0026lt;/configuration\u0026gt; 2. Get a sufficiently powerful VPN. ","excerpt":"Problem: Maven compilation failure with error such as Error: not found: python2 When you compile the …","ref":"/docs/main/v9.0.0/en/faq/maven-compile-npm-failure/","title":"Problem: Maven compilation failure with error such as `Error: not found: python2`"},{"body":"Problem: Maven compilation failure with error such as Error: not found: python2 When you compile the project via Maven, it fails at module apm-webapp and the following error occurs.\nPay attention to keywords such as node-sass and Error: not found: python2.\n[INFO] \u0026gt; node-sass@4.11.0 postinstall C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\node-sass [INFO] \u0026gt; node scripts/build.js [ERROR] gyp verb check python checking for Python executable \u0026quot;python2\u0026quot; in the PATH [ERROR] gyp verb `which` failed Error: not found: python2 [ERROR] gyp verb `which` failed at getNotFoundError (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:13:12) [ERROR] gyp verb `which` failed at F (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:68:19) [ERROR] gyp verb `which` failed at E (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:80:29) [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:89:16 [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\isexe\\index.js:42:5 [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\isexe\\windows.js:36:5 [ERROR] gyp verb `which` failed at FSReqWrap.oncomplete (fs.js:152:21) [ERROR] gyp verb `which` failed code: 'ENOENT' } [ERROR] gyp verb check python checking for Python executable \u0026quot;python\u0026quot; in the PATH [ERROR] gyp verb `which` succeeded python C:\\Users\\XXX\\AppData\\Local\\Programs\\Python\\Python37\\python.EXE [ERROR] gyp ERR! configure error [ERROR] gyp ERR! stack Error: Command failed: C:\\Users\\XXX\\AppData\\Local\\Programs\\Python\\Python37\\python.EXE -c import sys; print \u0026quot;%s.%s.%s\u0026quot; % sys.version_info[:3]; [ERROR] gyp ERR! stack File \u0026quot;\u0026lt;string\u0026gt;\u0026quot;, line 1 [ERROR] gyp ERR! stack import sys; print \u0026quot;%s.%s.%s\u0026quot; % sys.version_info[:3]; [ERROR] gyp ERR! stack ^ [ERROR] gyp ERR! stack SyntaxError: invalid syntax [ERROR] gyp ERR! stack [ERROR] gyp ERR! stack at ChildProcess.exithandler (child_process.js:275:12) [ERROR] gyp ERR! stack at emitTwo (events.js:126:13) [ERROR] gyp ERR! stack at ChildProcess.emit (events.js:214:7) [ERROR] gyp ERR! stack at maybeClose (internal/child_process.js:925:16) [ERROR] gyp ERR! stack at Process.ChildProcess._handle.onexit (internal/child_process.js:209:5) [ERROR] gyp ERR! System Windows_NT 10.0.17134 ...... [INFO] server-starter-es7 ................................. SUCCESS [ 11.657 s] [INFO] apm-webapp ......................................... FAILURE [ 25.857 s] [INFO] apache-skywalking-apm .............................. SKIPPED [INFO] apache-skywalking-apm-es7 .......................... SKIPPED Reason The error has nothing to do with SkyWalking.\nAccording to the issue here (https://github.com/sass/node-sass/issues/1176), if you live in countries where requesting resources from GitHub and npmjs.org runs slow, some precompiled binaries for dependency node-sass would fail to be downloaded during npm install, and npm would try to compile them itself. That\u0026rsquo;s why python2 is needed.\nResolution 1. Use mirror. For instance, if you\u0026rsquo;re in China, please edit skywalking\\apm-webapp\\pom.xml as follows. Find\n\u0026lt;configuration\u0026gt; \u0026lt;arguments\u0026gt;install --registry=https://registry.npmjs.org/\u0026lt;/arguments\u0026gt; \u0026lt;/configuration\u0026gt; Replace it with\n\u0026lt;configuration\u0026gt; \u0026lt;arguments\u0026gt;install --registry=https://registry.npmmirror.com/ --sass_binary_site=https://npmmirror.com/mirrors/node-sass/\u0026lt;/arguments\u0026gt; \u0026lt;/configuration\u0026gt; 2. Get a sufficiently powerful VPN. ","excerpt":"Problem: Maven compilation failure with error such as Error: not found: python2 When you compile the …","ref":"/docs/main/v9.1.0/en/faq/maven-compile-npm-failure/","title":"Problem: Maven compilation failure with error such as `Error: not found: python2`"},{"body":"Problem: Maven compilation failure with error such as Error: not found: python2 When you compile the project via Maven, it fails at module apm-webapp and the following error occurs.\nPay attention to keywords such as node-sass and Error: not found: python2.\n[INFO] \u0026gt; node-sass@4.11.0 postinstall C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\node-sass [INFO] \u0026gt; node scripts/build.js [ERROR] gyp verb check python checking for Python executable \u0026quot;python2\u0026quot; in the PATH [ERROR] gyp verb `which` failed Error: not found: python2 [ERROR] gyp verb `which` failed at getNotFoundError (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:13:12) [ERROR] gyp verb `which` failed at F (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:68:19) [ERROR] gyp verb `which` failed at E (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:80:29) [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:89:16 [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\isexe\\index.js:42:5 [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\isexe\\windows.js:36:5 [ERROR] gyp verb `which` failed at FSReqWrap.oncomplete (fs.js:152:21) [ERROR] gyp verb `which` failed code: 'ENOENT' } [ERROR] gyp verb check python checking for Python executable \u0026quot;python\u0026quot; in the PATH [ERROR] gyp verb `which` succeeded python C:\\Users\\XXX\\AppData\\Local\\Programs\\Python\\Python37\\python.EXE [ERROR] gyp ERR! configure error [ERROR] gyp ERR! stack Error: Command failed: C:\\Users\\XXX\\AppData\\Local\\Programs\\Python\\Python37\\python.EXE -c import sys; print \u0026quot;%s.%s.%s\u0026quot; % sys.version_info[:3]; [ERROR] gyp ERR! stack File \u0026quot;\u0026lt;string\u0026gt;\u0026quot;, line 1 [ERROR] gyp ERR! stack import sys; print \u0026quot;%s.%s.%s\u0026quot; % sys.version_info[:3]; [ERROR] gyp ERR! stack ^ [ERROR] gyp ERR! stack SyntaxError: invalid syntax [ERROR] gyp ERR! stack [ERROR] gyp ERR! stack at ChildProcess.exithandler (child_process.js:275:12) [ERROR] gyp ERR! stack at emitTwo (events.js:126:13) [ERROR] gyp ERR! stack at ChildProcess.emit (events.js:214:7) [ERROR] gyp ERR! stack at maybeClose (internal/child_process.js:925:16) [ERROR] gyp ERR! stack at Process.ChildProcess._handle.onexit (internal/child_process.js:209:5) [ERROR] gyp ERR! System Windows_NT 10.0.17134 ...... [INFO] server-starter-es7 ................................. SUCCESS [ 11.657 s] [INFO] apm-webapp ......................................... FAILURE [ 25.857 s] [INFO] apache-skywalking-apm .............................. SKIPPED [INFO] apache-skywalking-apm-es7 .......................... SKIPPED Reason The error has nothing to do with SkyWalking.\nAccording to the issue here (https://github.com/sass/node-sass/issues/1176), if you live in countries where requesting resources from GitHub and npmjs.org runs slow, some precompiled binaries for dependency node-sass would fail to be downloaded during npm install, and npm would try to compile them itself. That\u0026rsquo;s why python2 is needed.\nResolution 1. Use mirror. For instance, if you\u0026rsquo;re in China, please edit skywalking\\apm-webapp\\pom.xml as follows. Find\n\u0026lt;configuration\u0026gt; \u0026lt;arguments\u0026gt;install --registry=https://registry.npmjs.org/\u0026lt;/arguments\u0026gt; \u0026lt;/configuration\u0026gt; Replace it with\n\u0026lt;configuration\u0026gt; \u0026lt;arguments\u0026gt;install --registry=https://registry.npmmirror.com/ --sass_binary_site=https://npmmirror.com/mirrors/node-sass/\u0026lt;/arguments\u0026gt; \u0026lt;/configuration\u0026gt; 2. Get a sufficiently powerful VPN. ","excerpt":"Problem: Maven compilation failure with error such as Error: not found: python2 When you compile the …","ref":"/docs/main/v9.2.0/en/faq/maven-compile-npm-failure/","title":"Problem: Maven compilation failure with error such as `Error: not found: python2`"},{"body":"Problem: Maven compilation failure with error such as Error: not found: python2 When you compile the project via Maven, it fails at module apm-webapp and the following error occurs.\nPay attention to keywords such as node-sass and Error: not found: python2.\n[INFO] \u0026gt; node-sass@4.11.0 postinstall C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\node-sass [INFO] \u0026gt; node scripts/build.js [ERROR] gyp verb check python checking for Python executable \u0026quot;python2\u0026quot; in the PATH [ERROR] gyp verb `which` failed Error: not found: python2 [ERROR] gyp verb `which` failed at getNotFoundError (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:13:12) [ERROR] gyp verb `which` failed at F (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:68:19) [ERROR] gyp verb `which` failed at E (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:80:29) [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:89:16 [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\isexe\\index.js:42:5 [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\isexe\\windows.js:36:5 [ERROR] gyp verb `which` failed at FSReqWrap.oncomplete (fs.js:152:21) [ERROR] gyp verb `which` failed code: 'ENOENT' } [ERROR] gyp verb check python checking for Python executable \u0026quot;python\u0026quot; in the PATH [ERROR] gyp verb `which` succeeded python C:\\Users\\XXX\\AppData\\Local\\Programs\\Python\\Python37\\python.EXE [ERROR] gyp ERR! configure error [ERROR] gyp ERR! stack Error: Command failed: C:\\Users\\XXX\\AppData\\Local\\Programs\\Python\\Python37\\python.EXE -c import sys; print \u0026quot;%s.%s.%s\u0026quot; % sys.version_info[:3]; [ERROR] gyp ERR! stack File \u0026quot;\u0026lt;string\u0026gt;\u0026quot;, line 1 [ERROR] gyp ERR! stack import sys; print \u0026quot;%s.%s.%s\u0026quot; % sys.version_info[:3]; [ERROR] gyp ERR! stack ^ [ERROR] gyp ERR! stack SyntaxError: invalid syntax [ERROR] gyp ERR! stack [ERROR] gyp ERR! stack at ChildProcess.exithandler (child_process.js:275:12) [ERROR] gyp ERR! stack at emitTwo (events.js:126:13) [ERROR] gyp ERR! stack at ChildProcess.emit (events.js:214:7) [ERROR] gyp ERR! stack at maybeClose (internal/child_process.js:925:16) [ERROR] gyp ERR! stack at Process.ChildProcess._handle.onexit (internal/child_process.js:209:5) [ERROR] gyp ERR! System Windows_NT 10.0.17134 ...... [INFO] server-starter-es7 ................................. SUCCESS [ 11.657 s] [INFO] apm-webapp ......................................... FAILURE [ 25.857 s] [INFO] apache-skywalking-apm .............................. SKIPPED [INFO] apache-skywalking-apm-es7 .......................... SKIPPED Reason The error has nothing to do with SkyWalking.\nAccording to the issue here (https://github.com/sass/node-sass/issues/1176), if you live in countries where requesting resources from GitHub and npmjs.org runs slow, some precompiled binaries for dependency node-sass would fail to be downloaded during npm install, and npm would try to compile them itself. That\u0026rsquo;s why python2 is needed.\nResolution 1. Use mirror. For instance, if you\u0026rsquo;re in China, please edit skywalking\\apm-webapp\\pom.xml as follows. Find\n\u0026lt;configuration\u0026gt; \u0026lt;arguments\u0026gt;install --registry=https://registry.npmjs.org/\u0026lt;/arguments\u0026gt; \u0026lt;/configuration\u0026gt; Replace it with\n\u0026lt;configuration\u0026gt; \u0026lt;arguments\u0026gt;install --registry=https://registry.npmmirror.com/ --sass_binary_site=https://npmmirror.com/mirrors/node-sass/\u0026lt;/arguments\u0026gt; \u0026lt;/configuration\u0026gt; 2. Get a sufficiently powerful VPN. ","excerpt":"Problem: Maven compilation failure with error such as Error: not found: python2 When you compile the …","ref":"/docs/main/v9.3.0/en/faq/maven-compile-npm-failure/","title":"Problem: Maven compilation failure with error such as `Error: not found: python2`"},{"body":"Problem: Maven compilation failure with error such as Error: not found: python2 When you compile the project via Maven, it fails at module apm-webapp and the following error occurs.\nPay attention to keywords such as node-sass and Error: not found: python2.\n[INFO] \u0026gt; node-sass@4.11.0 postinstall C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\node-sass [INFO] \u0026gt; node scripts/build.js [ERROR] gyp verb check python checking for Python executable \u0026quot;python2\u0026quot; in the PATH [ERROR] gyp verb `which` failed Error: not found: python2 [ERROR] gyp verb `which` failed at getNotFoundError (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:13:12) [ERROR] gyp verb `which` failed at F (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:68:19) [ERROR] gyp verb `which` failed at E (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:80:29) [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:89:16 [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\isexe\\index.js:42:5 [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\isexe\\windows.js:36:5 [ERROR] gyp verb `which` failed at FSReqWrap.oncomplete (fs.js:152:21) [ERROR] gyp verb `which` failed code: 'ENOENT' } [ERROR] gyp verb check python checking for Python executable \u0026quot;python\u0026quot; in the PATH [ERROR] gyp verb `which` succeeded python C:\\Users\\XXX\\AppData\\Local\\Programs\\Python\\Python37\\python.EXE [ERROR] gyp ERR! configure error [ERROR] gyp ERR! stack Error: Command failed: C:\\Users\\XXX\\AppData\\Local\\Programs\\Python\\Python37\\python.EXE -c import sys; print \u0026quot;%s.%s.%s\u0026quot; % sys.version_info[:3]; [ERROR] gyp ERR! stack File \u0026quot;\u0026lt;string\u0026gt;\u0026quot;, line 1 [ERROR] gyp ERR! stack import sys; print \u0026quot;%s.%s.%s\u0026quot; % sys.version_info[:3]; [ERROR] gyp ERR! stack ^ [ERROR] gyp ERR! stack SyntaxError: invalid syntax [ERROR] gyp ERR! stack [ERROR] gyp ERR! stack at ChildProcess.exithandler (child_process.js:275:12) [ERROR] gyp ERR! stack at emitTwo (events.js:126:13) [ERROR] gyp ERR! stack at ChildProcess.emit (events.js:214:7) [ERROR] gyp ERR! stack at maybeClose (internal/child_process.js:925:16) [ERROR] gyp ERR! stack at Process.ChildProcess._handle.onexit (internal/child_process.js:209:5) [ERROR] gyp ERR! System Windows_NT 10.0.17134 ...... [INFO] server-starter-es7 ................................. SUCCESS [ 11.657 s] [INFO] apm-webapp ......................................... FAILURE [ 25.857 s] [INFO] apache-skywalking-apm .............................. SKIPPED [INFO] apache-skywalking-apm-es7 .......................... SKIPPED Reason The error has nothing to do with SkyWalking.\nAccording to the issue here (https://github.com/sass/node-sass/issues/1176), if you live in countries where requesting resources from GitHub and npmjs.org runs slow, some precompiled binaries for dependency node-sass would fail to be downloaded during npm install, and npm would try to compile them itself. That\u0026rsquo;s why python2 is needed.\nResolution 1. Use mirror. For instance, if you\u0026rsquo;re in China, please edit skywalking\\apm-webapp\\pom.xml as follows. Find\n\u0026lt;configuration\u0026gt; \u0026lt;arguments\u0026gt;install --registry=https://registry.npmjs.org/\u0026lt;/arguments\u0026gt; \u0026lt;/configuration\u0026gt; Replace it with\n\u0026lt;configuration\u0026gt; \u0026lt;arguments\u0026gt;install --registry=https://registry.npmmirror.com/ --sass_binary_site=https://npmmirror.com/mirrors/node-sass/\u0026lt;/arguments\u0026gt; \u0026lt;/configuration\u0026gt; 2. Get a sufficiently powerful VPN. ","excerpt":"Problem: Maven compilation failure with error such as Error: not found: python2 When you compile the …","ref":"/docs/main/v9.4.0/en/faq/maven-compile-npm-failure/","title":"Problem: Maven compilation failure with error such as `Error: not found: python2`"},{"body":"Problem: Maven compilation failure with error such as Error: not found: python2 When you compile the project via Maven, it fails at module apm-webapp and the following error occurs.\nPay attention to keywords such as node-sass and Error: not found: python2.\n[INFO] \u0026gt; node-sass@4.11.0 postinstall C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\node-sass [INFO] \u0026gt; node scripts/build.js [ERROR] gyp verb check python checking for Python executable \u0026quot;python2\u0026quot; in the PATH [ERROR] gyp verb `which` failed Error: not found: python2 [ERROR] gyp verb `which` failed at getNotFoundError (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:13:12) [ERROR] gyp verb `which` failed at F (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:68:19) [ERROR] gyp verb `which` failed at E (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:80:29) [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:89:16 [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\isexe\\index.js:42:5 [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\isexe\\windows.js:36:5 [ERROR] gyp verb `which` failed at FSReqWrap.oncomplete (fs.js:152:21) [ERROR] gyp verb `which` failed code: 'ENOENT' } [ERROR] gyp verb check python checking for Python executable \u0026quot;python\u0026quot; in the PATH [ERROR] gyp verb `which` succeeded python C:\\Users\\XXX\\AppData\\Local\\Programs\\Python\\Python37\\python.EXE [ERROR] gyp ERR! configure error [ERROR] gyp ERR! stack Error: Command failed: C:\\Users\\XXX\\AppData\\Local\\Programs\\Python\\Python37\\python.EXE -c import sys; print \u0026quot;%s.%s.%s\u0026quot; % sys.version_info[:3]; [ERROR] gyp ERR! stack File \u0026quot;\u0026lt;string\u0026gt;\u0026quot;, line 1 [ERROR] gyp ERR! stack import sys; print \u0026quot;%s.%s.%s\u0026quot; % sys.version_info[:3]; [ERROR] gyp ERR! stack ^ [ERROR] gyp ERR! stack SyntaxError: invalid syntax [ERROR] gyp ERR! stack [ERROR] gyp ERR! stack at ChildProcess.exithandler (child_process.js:275:12) [ERROR] gyp ERR! stack at emitTwo (events.js:126:13) [ERROR] gyp ERR! stack at ChildProcess.emit (events.js:214:7) [ERROR] gyp ERR! stack at maybeClose (internal/child_process.js:925:16) [ERROR] gyp ERR! stack at Process.ChildProcess._handle.onexit (internal/child_process.js:209:5) [ERROR] gyp ERR! System Windows_NT 10.0.17134 ...... [INFO] server-starter-es7 ................................. SUCCESS [ 11.657 s] [INFO] apm-webapp ......................................... FAILURE [ 25.857 s] [INFO] apache-skywalking-apm .............................. SKIPPED [INFO] apache-skywalking-apm-es7 .......................... SKIPPED Reason The error has nothing to do with SkyWalking.\nAccording to the issue here (https://github.com/sass/node-sass/issues/1176), if you live in countries where requesting resources from GitHub and npmjs.org runs slow, some precompiled binaries for dependency node-sass would fail to be downloaded during npm install, and npm would try to compile them itself. That\u0026rsquo;s why python2 is needed.\nResolution 1. Use mirror. For instance, if you\u0026rsquo;re in China, please edit skywalking\\apm-webapp\\pom.xml as follows. Find\n\u0026lt;configuration\u0026gt; \u0026lt;arguments\u0026gt;install --registry=https://registry.npmjs.org/\u0026lt;/arguments\u0026gt; \u0026lt;/configuration\u0026gt; Replace it with\n\u0026lt;configuration\u0026gt; \u0026lt;arguments\u0026gt;install --registry=https://registry.npmmirror.com/ --sass_binary_site=https://npmmirror.com/mirrors/node-sass/\u0026lt;/arguments\u0026gt; \u0026lt;/configuration\u0026gt; 2. Get a sufficiently powerful VPN. ","excerpt":"Problem: Maven compilation failure with error such as Error: not found: python2 When you compile the …","ref":"/docs/main/v9.5.0/en/faq/maven-compile-npm-failure/","title":"Problem: Maven compilation failure with error such as `Error: not found: python2`"},{"body":"Problem: Maven compilation failure with error such as Error: not found: python2 When you compile the project via Maven, it fails at module apm-webapp and the following error occurs.\nPay attention to keywords such as node-sass and Error: not found: python2.\n[INFO] \u0026gt; node-sass@4.11.0 postinstall C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\node-sass [INFO] \u0026gt; node scripts/build.js [ERROR] gyp verb check python checking for Python executable \u0026quot;python2\u0026quot; in the PATH [ERROR] gyp verb `which` failed Error: not found: python2 [ERROR] gyp verb `which` failed at getNotFoundError (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:13:12) [ERROR] gyp verb `which` failed at F (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:68:19) [ERROR] gyp verb `which` failed at E (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:80:29) [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:89:16 [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\isexe\\index.js:42:5 [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\isexe\\windows.js:36:5 [ERROR] gyp verb `which` failed at FSReqWrap.oncomplete (fs.js:152:21) [ERROR] gyp verb `which` failed code: 'ENOENT' } [ERROR] gyp verb check python checking for Python executable \u0026quot;python\u0026quot; in the PATH [ERROR] gyp verb `which` succeeded python C:\\Users\\XXX\\AppData\\Local\\Programs\\Python\\Python37\\python.EXE [ERROR] gyp ERR! configure error [ERROR] gyp ERR! stack Error: Command failed: C:\\Users\\XXX\\AppData\\Local\\Programs\\Python\\Python37\\python.EXE -c import sys; print \u0026quot;%s.%s.%s\u0026quot; % sys.version_info[:3]; [ERROR] gyp ERR! stack File \u0026quot;\u0026lt;string\u0026gt;\u0026quot;, line 1 [ERROR] gyp ERR! stack import sys; print \u0026quot;%s.%s.%s\u0026quot; % sys.version_info[:3]; [ERROR] gyp ERR! stack ^ [ERROR] gyp ERR! stack SyntaxError: invalid syntax [ERROR] gyp ERR! stack [ERROR] gyp ERR! stack at ChildProcess.exithandler (child_process.js:275:12) [ERROR] gyp ERR! stack at emitTwo (events.js:126:13) [ERROR] gyp ERR! stack at ChildProcess.emit (events.js:214:7) [ERROR] gyp ERR! stack at maybeClose (internal/child_process.js:925:16) [ERROR] gyp ERR! stack at Process.ChildProcess._handle.onexit (internal/child_process.js:209:5) [ERROR] gyp ERR! System Windows_NT 10.0.17134 ...... [INFO] server-starter-es7 ................................. SUCCESS [ 11.657 s] [INFO] apm-webapp ......................................... FAILURE [ 25.857 s] [INFO] apache-skywalking-apm .............................. SKIPPED [INFO] apache-skywalking-apm-es7 .......................... SKIPPED Reason The error has nothing to do with SkyWalking.\nAccording to the issue here (https://github.com/sass/node-sass/issues/1176), if you live in countries where requesting resources from GitHub and npmjs.org runs slow, some precompiled binaries for dependency node-sass would fail to be downloaded during npm install, and npm would try to compile them itself. That\u0026rsquo;s why python2 is needed.\nResolution 1. Use mirror. For instance, if you\u0026rsquo;re in China, please edit skywalking\\apm-webapp\\pom.xml as follows. Find\n\u0026lt;configuration\u0026gt; \u0026lt;arguments\u0026gt;install --registry=https://registry.npmjs.org/\u0026lt;/arguments\u0026gt; \u0026lt;/configuration\u0026gt; Replace it with\n\u0026lt;configuration\u0026gt; \u0026lt;arguments\u0026gt;install --registry=https://registry.npmmirror.com/ --sass_binary_site=https://npmmirror.com/mirrors/node-sass/\u0026lt;/arguments\u0026gt; \u0026lt;/configuration\u0026gt; 2. Get a sufficiently powerful VPN. ","excerpt":"Problem: Maven compilation failure with error such as Error: not found: python2 When you compile the …","ref":"/docs/main/v9.6.0/en/faq/maven-compile-npm-failure/","title":"Problem: Maven compilation failure with error such as `Error: not found: python2`"},{"body":"Problem: Maven compilation failure with error such as Error: not found: python2 When you compile the project via Maven, it fails at module apm-webapp and the following error occurs.\nPay attention to keywords such as node-sass and Error: not found: python2.\n[INFO] \u0026gt; node-sass@4.11.0 postinstall C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\node-sass [INFO] \u0026gt; node scripts/build.js [ERROR] gyp verb check python checking for Python executable \u0026quot;python2\u0026quot; in the PATH [ERROR] gyp verb `which` failed Error: not found: python2 [ERROR] gyp verb `which` failed at getNotFoundError (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:13:12) [ERROR] gyp verb `which` failed at F (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:68:19) [ERROR] gyp verb `which` failed at E (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:80:29) [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:89:16 [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\isexe\\index.js:42:5 [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\isexe\\windows.js:36:5 [ERROR] gyp verb `which` failed at FSReqWrap.oncomplete (fs.js:152:21) [ERROR] gyp verb `which` failed code: 'ENOENT' } [ERROR] gyp verb check python checking for Python executable \u0026quot;python\u0026quot; in the PATH [ERROR] gyp verb `which` succeeded python C:\\Users\\XXX\\AppData\\Local\\Programs\\Python\\Python37\\python.EXE [ERROR] gyp ERR! configure error [ERROR] gyp ERR! stack Error: Command failed: C:\\Users\\XXX\\AppData\\Local\\Programs\\Python\\Python37\\python.EXE -c import sys; print \u0026quot;%s.%s.%s\u0026quot; % sys.version_info[:3]; [ERROR] gyp ERR! stack File \u0026quot;\u0026lt;string\u0026gt;\u0026quot;, line 1 [ERROR] gyp ERR! stack import sys; print \u0026quot;%s.%s.%s\u0026quot; % sys.version_info[:3]; [ERROR] gyp ERR! stack ^ [ERROR] gyp ERR! stack SyntaxError: invalid syntax [ERROR] gyp ERR! stack [ERROR] gyp ERR! stack at ChildProcess.exithandler (child_process.js:275:12) [ERROR] gyp ERR! stack at emitTwo (events.js:126:13) [ERROR] gyp ERR! stack at ChildProcess.emit (events.js:214:7) [ERROR] gyp ERR! stack at maybeClose (internal/child_process.js:925:16) [ERROR] gyp ERR! stack at Process.ChildProcess._handle.onexit (internal/child_process.js:209:5) [ERROR] gyp ERR! System Windows_NT 10.0.17134 ...... [INFO] server-starter-es7 ................................. SUCCESS [ 11.657 s] [INFO] apm-webapp ......................................... FAILURE [ 25.857 s] [INFO] apache-skywalking-apm .............................. SKIPPED [INFO] apache-skywalking-apm-es7 .......................... SKIPPED Reason The error has nothing to do with SkyWalking.\nAccording to the issue here (https://github.com/sass/node-sass/issues/1176), if you live in countries where requesting resources from GitHub and npmjs.org runs slow, some precompiled binaries for dependency node-sass would fail to be downloaded during npm install, and npm would try to compile them itself. That\u0026rsquo;s why python2 is needed.\nResolution 1. Use mirror. For instance, if you\u0026rsquo;re in China, please edit skywalking\\apm-webapp\\pom.xml as follows. Find\n\u0026lt;configuration\u0026gt; \u0026lt;arguments\u0026gt;install --registry=https://registry.npmjs.org/\u0026lt;/arguments\u0026gt; \u0026lt;/configuration\u0026gt; Replace it with\n\u0026lt;configuration\u0026gt; \u0026lt;arguments\u0026gt;install --registry=https://registry.npmmirror.com/ --sass_binary_site=https://npmmirror.com/mirrors/node-sass/\u0026lt;/arguments\u0026gt; \u0026lt;/configuration\u0026gt; 2. Get a sufficiently powerful VPN. ","excerpt":"Problem: Maven compilation failure with error such as Error: not found: python2 When you compile the …","ref":"/docs/main/v9.7.0/en/faq/maven-compile-npm-failure/","title":"Problem: Maven compilation failure with error such as `Error: not found: python2`"},{"body":"Profiling The profiling is an on-demand diagnosing method to locate bottleneck of the services. These typical scenarios usually are suitable for profiling through various profiling tools\n Some methods slow down the API performance. Too many threads and/or high-frequency I/O per OS process reduce the CPU efficiency. Massive RPC requests block the network to cause responding slowly. Unexpected network requests caused by security issues or codes' bug.  In the SkyWalking landscape, we provided three ways to support profiling within reasonable resource cost.\n In-process profiling is bundled with auto-instrument agents. Out-of-process profiling is powered by eBPF agent. Continuous profiling is powered by eBPF agent.  In-process profiling In-process profiling is primarily provided by auto-instrument agents in the VM-based runtime. This feature resolves the issue \u0026lt;1\u0026gt; through capture the snapshot of the thread stacks periodically. The OAP would aggregate the thread stack per RPC request, and provide a hierarchy graph to indicate the slow methods based on continuous snapshot.\nThe period is usually every 10-100 milliseconds, which is not recommended to be less, due to this capture would usually cause classical stop-the-world for the VM, which would impact the whole process performance.\nLearn more tech details from the post, Use Profiling to Fix the Blind Spot of Distributed Tracing.\nFor now, Java and Python agents support this.\nOut-of-process profiling Out-of-process profiling leverage eBPF technology with origins in the Linux kernel. It provides a way to extend the capabilities of the kernel safely and efficiently.\nOn-CPU Profiling On-CPU profiling is suitable for analyzing thread stacks when service CPU usage is high.\nIf the stack is dumped more times, it means that the thread stack occupies more CPU resources.\nThis is pretty similar with in-process profiling to resolve the issue \u0026lt;1\u0026gt;, but it is made out-of-process and based on Linux eBPF. Meanwhile, this is made for languages without VM mechanism, which caused not supported by in-process agents, such as, C/C++, Rust. Golang is a special case, it exposed the metadata of the VM for eBPF, so, it could be profiled.\nOff-CPU Profiling Off-CPU profiling is suitable for performance issues that are not caused by high CPU usage, but may be on high CPU load. This profiling aims to resolve the issue \u0026lt;2\u0026gt;.\nFor example,\n When there are too many threads in one service, using off-CPU profiling could reveal which threads spend more time context switching. Codes heavily rely on disk I/O or remote service performance would slow down the whole process.  Off-CPU profiling provides two perspectives\n Thread switch count: The number of times a thread switches context. When the thread returns to the CPU, it completes one context switch. A thread stack with a higher switch count spends more time context switching. Thread switch duration: The time it takes a thread to switch the context. A thread stack with a higher switch duration spends more time off-CPU.  Learn more tech details about ON/OFF CPU profiling from the post, Pinpoint Service Mesh Critical Performance Impact by using eBPF\nNetwork Profiling Network profiling captures the network packages to analysis traffic at L4(TCP) and L7(HTTP) to recognize network traffic from a specific process or a k8s pod. Through this traffic analysis, locate the root causes of the issues \u0026lt;3\u0026gt; and \u0026lt;4\u0026gt;.\nNetwork profiling provides\n Network topology and identify processes. Observe TCP traffic metrics with TLS status. Observe HTTP traffic metrics. Sample HTTP request/response raw data within tracing context. Observe time costs for local I/O costing on the OS. Such as the time of Linux process HTTP request/response.  Learn more tech details from the post, Diagnose Service Mesh Network Performance with eBPF\nContinuous Profiling Continuous Profiling utilizes monitoring of system, processes, and network, and automatically initiates profiling tasks when conditions meet the configured thresholds and time windows.\nMonitor type Continuous profiling periodically collects the following types of performance metrics for processes and systems:\n System Load: Monitor current system load value. Process CPU: Monitor process CPU usage percent, value in [0-100]. Process Thread Count: Monitor process thread count. HTTP Error Rate: Monitor the process HTTP(/1.x) response error(response status \u0026gt;= 500) percent, value in [0-100]. HTTP Avg Response Time: Monitor the process HTTP(/1.x) response duration(ms).  Trigger Target When the collected metric data matches the configured threshold, the following types of profiling tasks could be triggered:\n On CPU Profiling: Perform eBPF On CPU Profiling on processes that meet the threshold. Off CPU Profiling: Perform eBPF Off CPU Profiling on processes that meet the threshold. Network Profiling: Perform eBPF Network Profiling on all processes within the same instance as the processes that meet the threshold.  ","excerpt":"Profiling The profiling is an on-demand diagnosing method to locate bottleneck of the services. …","ref":"/docs/main/latest/en/concepts-and-designs/profiling/","title":"Profiling"},{"body":"Profiling The profiling is an on-demand diagnosing method to locate bottleneck of the services. These typical scenarios usually are suitable for profiling through various profiling tools\n Some methods slow down the API performance. Too many threads and/or high-frequency I/O per OS process reduce the CPU efficiency. Massive RPC requests block the network to cause responding slowly. Unexpected network requests caused by security issues or codes' bug.  In the SkyWalking landscape, we provided three ways to support profiling within reasonable resource cost.\n In-process profiling is bundled with auto-instrument agents. Out-of-process profiling is powered by eBPF agent. Continuous profiling is powered by eBPF agent.  In-process profiling In-process profiling is primarily provided by auto-instrument agents in the VM-based runtime. This feature resolves the issue \u0026lt;1\u0026gt; through capture the snapshot of the thread stacks periodically. The OAP would aggregate the thread stack per RPC request, and provide a hierarchy graph to indicate the slow methods based on continuous snapshot.\nThe period is usually every 10-100 milliseconds, which is not recommended to be less, due to this capture would usually cause classical stop-the-world for the VM, which would impact the whole process performance.\nLearn more tech details from the post, Use Profiling to Fix the Blind Spot of Distributed Tracing.\nFor now, Java and Python agents support this.\nOut-of-process profiling Out-of-process profiling leverage eBPF technology with origins in the Linux kernel. It provides a way to extend the capabilities of the kernel safely and efficiently.\nOn-CPU Profiling On-CPU profiling is suitable for analyzing thread stacks when service CPU usage is high.\nIf the stack is dumped more times, it means that the thread stack occupies more CPU resources.\nThis is pretty similar with in-process profiling to resolve the issue \u0026lt;1\u0026gt;, but it is made out-of-process and based on Linux eBPF. Meanwhile, this is made for languages without VM mechanism, which caused not supported by in-process agents, such as, C/C++, Rust. Golang is a special case, it exposed the metadata of the VM for eBPF, so, it could be profiled.\nOff-CPU Profiling Off-CPU profiling is suitable for performance issues that are not caused by high CPU usage, but may be on high CPU load. This profiling aims to resolve the issue \u0026lt;2\u0026gt;.\nFor example,\n When there are too many threads in one service, using off-CPU profiling could reveal which threads spend more time context switching. Codes heavily rely on disk I/O or remote service performance would slow down the whole process.  Off-CPU profiling provides two perspectives\n Thread switch count: The number of times a thread switches context. When the thread returns to the CPU, it completes one context switch. A thread stack with a higher switch count spends more time context switching. Thread switch duration: The time it takes a thread to switch the context. A thread stack with a higher switch duration spends more time off-CPU.  Learn more tech details about ON/OFF CPU profiling from the post, Pinpoint Service Mesh Critical Performance Impact by using eBPF\nNetwork Profiling Network profiling captures the network packages to analysis traffic at L4(TCP) and L7(HTTP) to recognize network traffic from a specific process or a k8s pod. Through this traffic analysis, locate the root causes of the issues \u0026lt;3\u0026gt; and \u0026lt;4\u0026gt;.\nNetwork profiling provides\n Network topology and identify processes. Observe TCP traffic metrics with TLS status. Observe HTTP traffic metrics. Sample HTTP request/response raw data within tracing context. Observe time costs for local I/O costing on the OS. Such as the time of Linux process HTTP request/response.  Learn more tech details from the post, Diagnose Service Mesh Network Performance with eBPF\nContinuous Profiling Continuous Profiling utilizes monitoring of system, processes, and network, and automatically initiates profiling tasks when conditions meet the configured thresholds and time windows.\nMonitor type Continuous profiling periodically collects the following types of performance metrics for processes and systems:\n System Load: Monitor current system load value. Process CPU: Monitor process CPU usage percent, value in [0-100]. Process Thread Count: Monitor process thread count. HTTP Error Rate: Monitor the process HTTP(/1.x) response error(response status \u0026gt;= 500) percent, value in [0-100]. HTTP Avg Response Time: Monitor the process HTTP(/1.x) response duration(ms).  Trigger Target When the collected metric data matches the configured threshold, the following types of profiling tasks could be triggered:\n On CPU Profiling: Perform eBPF On CPU Profiling on processes that meet the threshold. Off CPU Profiling: Perform eBPF Off CPU Profiling on processes that meet the threshold. Network Profiling: Perform eBPF Network Profiling on all processes within the same instance as the processes that meet the threshold.  ","excerpt":"Profiling The profiling is an on-demand diagnosing method to locate bottleneck of the services. …","ref":"/docs/main/next/en/concepts-and-designs/profiling/","title":"Profiling"},{"body":"Profiling The profiling is an on-demand diagnosing method to locate bottleneck of the services. These typical scenarios usually are suitable for profiling through various profiling tools\n Some methods slow down the API performance. Too many threads and/or high-frequency I/O per OS process reduce the CPU efficiency. Massive RPC requests block the network to cause responding slowly. Unexpected network requests caused by security issues or codes' bug.  In the SkyWalking landscape, we provided two ways to support profiling within reasonable resource cost.\n In-process profiling is bundled with auto-instrument agents. Out-of-process profiling is powered by eBPF agent.  In-process profiling In-process profiling is primarily provided by auto-instrument agents in the VM-based runtime. This feature resolves the issue \u0026lt;1\u0026gt; through capture the snapshot of the thread stacks periodically. The OAP would aggregate the thread stack per RPC request, and provide a hierarchy graph to indicate the slow methods based on continuous snapshot.\nThe period is usually every 10-100 milliseconds, which is not recommended to be less, due to this capture would usually cause classical stop-the-world for the VM, which would impact the whole process performance.\nLearn more tech details from the post, Use Profiling to Fix the Blind Spot of Distributed Tracing.\nFor now, Java and Python agents support this.\nOut-of-process profiling Out-of-process profiling leverage eBPF technology with origins in the Linux kernel. It provides a way to extend the capabilities of the kernel safely and efficiently.\nOn-CPU Profiling On-CPU profiling is suitable for analyzing thread stacks when service CPU usage is high.\nIf the stack is dumped more times, it means that the thread stack occupies more CPU resources.\nThis is pretty similar with in-process profiling to resolve the issue \u0026lt;1\u0026gt;, but it is made out-of-process and based on Linux eBPF. Meanwhile, this is made for languages without VM mechanism, which caused not supported by in-process agents, such as, C/C++, Rust. Golang is a special case, it exposed the metadata of the VM for eBPF, so, it could be profiled.\nOff-CPU Profiling Off-CPU profiling is suitable for performance issues that are not caused by high CPU usage, but may be on high CPU load. This profiling aims to resolve the issue \u0026lt;2\u0026gt;.\nFor example,\n When there are too many threads in one service, using off-CPU profiling could reveal which threads spend more time context switching. Codes heavily rely on disk I/O or remote service performance would slow down the whole process.  Off-CPU profiling provides two perspectives\n Thread switch count: The number of times a thread switches context. When the thread returns to the CPU, it completes one context switch. A thread stack with a higher switch count spends more time context switching. Thread switch duration: The time it takes a thread to switch the context. A thread stack with a higher switch duration spends more time off-CPU.  Learn more tech details about ON/OFF CPU profiling from the post, Pinpoint Service Mesh Critical Performance Impact by using eBPF\nNetwork Profiling Network profiling captures the network packages to analysis traffic at L4(TCP) and L7(HTTP) to recognize network traffic from a specific process or a k8s pod. Through this traffic analysis, locate the root causes of the issues \u0026lt;3\u0026gt; and \u0026lt;4\u0026gt;.\nNetwork profiling provides\n Network topology and identify processes. Observe TCP traffic metrics with TLS status. Observe HTTP traffic metrics. Sample HTTP request/response raw data within tracing context. Observe time costs for local I/O costing on the OS. Such as the time of Linux process HTTP request/response.  Learn more tech details from the post, Diagnose Service Mesh Network Performance with eBPF\n","excerpt":"Profiling The profiling is an on-demand diagnosing method to locate bottleneck of the services. …","ref":"/docs/main/v9.3.0/en/concepts-and-designs/profiling/","title":"Profiling"},{"body":"Profiling The profiling is an on-demand diagnosing method to locate bottleneck of the services. These typical scenarios usually are suitable for profiling through various profiling tools\n Some methods slow down the API performance. Too many threads and/or high-frequency I/O per OS process reduce the CPU efficiency. Massive RPC requests block the network to cause responding slowly. Unexpected network requests caused by security issues or codes' bug.  In the SkyWalking landscape, we provided two ways to support profiling within reasonable resource cost.\n In-process profiling is bundled with auto-instrument agents. Out-of-process profiling is powered by eBPF agent.  In-process profiling In-process profiling is primarily provided by auto-instrument agents in the VM-based runtime. This feature resolves the issue \u0026lt;1\u0026gt; through capture the snapshot of the thread stacks periodically. The OAP would aggregate the thread stack per RPC request, and provide a hierarchy graph to indicate the slow methods based on continuous snapshot.\nThe period is usually every 10-100 milliseconds, which is not recommended to be less, due to this capture would usually cause classical stop-the-world for the VM, which would impact the whole process performance.\nLearn more tech details from the post, Use Profiling to Fix the Blind Spot of Distributed Tracing.\nFor now, Java and Python agents support this.\nOut-of-process profiling Out-of-process profiling leverage eBPF technology with origins in the Linux kernel. It provides a way to extend the capabilities of the kernel safely and efficiently.\nOn-CPU Profiling On-CPU profiling is suitable for analyzing thread stacks when service CPU usage is high.\nIf the stack is dumped more times, it means that the thread stack occupies more CPU resources.\nThis is pretty similar with in-process profiling to resolve the issue \u0026lt;1\u0026gt;, but it is made out-of-process and based on Linux eBPF. Meanwhile, this is made for languages without VM mechanism, which caused not supported by in-process agents, such as, C/C++, Rust. Golang is a special case, it exposed the metadata of the VM for eBPF, so, it could be profiled.\nOff-CPU Profiling Off-CPU profiling is suitable for performance issues that are not caused by high CPU usage, but may be on high CPU load. This profiling aims to resolve the issue \u0026lt;2\u0026gt;.\nFor example,\n When there are too many threads in one service, using off-CPU profiling could reveal which threads spend more time context switching. Codes heavily rely on disk I/O or remote service performance would slow down the whole process.  Off-CPU profiling provides two perspectives\n Thread switch count: The number of times a thread switches context. When the thread returns to the CPU, it completes one context switch. A thread stack with a higher switch count spends more time context switching. Thread switch duration: The time it takes a thread to switch the context. A thread stack with a higher switch duration spends more time off-CPU.  Learn more tech details about ON/OFF CPU profiling from the post, Pinpoint Service Mesh Critical Performance Impact by using eBPF\nNetwork Profiling Network profiling captures the network packages to analysis traffic at L4(TCP) and L7(HTTP) to recognize network traffic from a specific process or a k8s pod. Through this traffic analysis, locate the root causes of the issues \u0026lt;3\u0026gt; and \u0026lt;4\u0026gt;.\nNetwork profiling provides\n Network topology and identify processes. Observe TCP traffic metrics with TLS status. Observe HTTP traffic metrics. Sample HTTP request/response raw data within tracing context. Observe time costs for local I/O costing on the OS. Such as the time of Linux process HTTP request/response.  Learn more tech details from the post, Diagnose Service Mesh Network Performance with eBPF\n","excerpt":"Profiling The profiling is an on-demand diagnosing method to locate bottleneck of the services. …","ref":"/docs/main/v9.4.0/en/concepts-and-designs/profiling/","title":"Profiling"},{"body":"Profiling The profiling is an on-demand diagnosing method to locate bottleneck of the services. These typical scenarios usually are suitable for profiling through various profiling tools\n Some methods slow down the API performance. Too many threads and/or high-frequency I/O per OS process reduce the CPU efficiency. Massive RPC requests block the network to cause responding slowly. Unexpected network requests caused by security issues or codes' bug.  In the SkyWalking landscape, we provided three ways to support profiling within reasonable resource cost.\n In-process profiling is bundled with auto-instrument agents. Out-of-process profiling is powered by eBPF agent. Continuous profiling is powered by eBPF agent.  In-process profiling In-process profiling is primarily provided by auto-instrument agents in the VM-based runtime. This feature resolves the issue \u0026lt;1\u0026gt; through capture the snapshot of the thread stacks periodically. The OAP would aggregate the thread stack per RPC request, and provide a hierarchy graph to indicate the slow methods based on continuous snapshot.\nThe period is usually every 10-100 milliseconds, which is not recommended to be less, due to this capture would usually cause classical stop-the-world for the VM, which would impact the whole process performance.\nLearn more tech details from the post, Use Profiling to Fix the Blind Spot of Distributed Tracing.\nFor now, Java and Python agents support this.\nOut-of-process profiling Out-of-process profiling leverage eBPF technology with origins in the Linux kernel. It provides a way to extend the capabilities of the kernel safely and efficiently.\nOn-CPU Profiling On-CPU profiling is suitable for analyzing thread stacks when service CPU usage is high.\nIf the stack is dumped more times, it means that the thread stack occupies more CPU resources.\nThis is pretty similar with in-process profiling to resolve the issue \u0026lt;1\u0026gt;, but it is made out-of-process and based on Linux eBPF. Meanwhile, this is made for languages without VM mechanism, which caused not supported by in-process agents, such as, C/C++, Rust. Golang is a special case, it exposed the metadata of the VM for eBPF, so, it could be profiled.\nOff-CPU Profiling Off-CPU profiling is suitable for performance issues that are not caused by high CPU usage, but may be on high CPU load. This profiling aims to resolve the issue \u0026lt;2\u0026gt;.\nFor example,\n When there are too many threads in one service, using off-CPU profiling could reveal which threads spend more time context switching. Codes heavily rely on disk I/O or remote service performance would slow down the whole process.  Off-CPU profiling provides two perspectives\n Thread switch count: The number of times a thread switches context. When the thread returns to the CPU, it completes one context switch. A thread stack with a higher switch count spends more time context switching. Thread switch duration: The time it takes a thread to switch the context. A thread stack with a higher switch duration spends more time off-CPU.  Learn more tech details about ON/OFF CPU profiling from the post, Pinpoint Service Mesh Critical Performance Impact by using eBPF\nNetwork Profiling Network profiling captures the network packages to analysis traffic at L4(TCP) and L7(HTTP) to recognize network traffic from a specific process or a k8s pod. Through this traffic analysis, locate the root causes of the issues \u0026lt;3\u0026gt; and \u0026lt;4\u0026gt;.\nNetwork profiling provides\n Network topology and identify processes. Observe TCP traffic metrics with TLS status. Observe HTTP traffic metrics. Sample HTTP request/response raw data within tracing context. Observe time costs for local I/O costing on the OS. Such as the time of Linux process HTTP request/response.  Learn more tech details from the post, Diagnose Service Mesh Network Performance with eBPF\nContinuous Profiling Continuous Profiling utilizes monitoring of system, processes, and network, and automatically initiates profiling tasks when conditions meet the configured thresholds and time windows.\nMonitor type Continuous profiling periodically collects the following types of performance metrics for processes and systems:\n System Load: Monitor current system load value. Process CPU: Monitor process CPU usage percent, value in [0-100]. Process Thread Count: Monitor process thread count. HTTP Error Rate: Monitor the process HTTP(/1.x) response error(response status \u0026gt;= 500) percent, value in [0-100]. HTTP Avg Response Time: Monitor the process HTTP(/1.x) response duration(ms).  Trigger Target When the collected metric data matches the configured threshold, the following types of profiling tasks could be triggered:\n On CPU Profiling: Perform eBPF On CPU Profiling on processes that meet the threshold. Off CPU Profiling: Perform eBPF Off CPU Profiling on processes that meet the threshold. Network Profiling: Perform eBPF Network Profiling on all processes within the same instance as the processes that meet the threshold.  ","excerpt":"Profiling The profiling is an on-demand diagnosing method to locate bottleneck of the services. …","ref":"/docs/main/v9.5.0/en/concepts-and-designs/profiling/","title":"Profiling"},{"body":"Profiling The profiling is an on-demand diagnosing method to locate bottleneck of the services. These typical scenarios usually are suitable for profiling through various profiling tools\n Some methods slow down the API performance. Too many threads and/or high-frequency I/O per OS process reduce the CPU efficiency. Massive RPC requests block the network to cause responding slowly. Unexpected network requests caused by security issues or codes' bug.  In the SkyWalking landscape, we provided three ways to support profiling within reasonable resource cost.\n In-process profiling is bundled with auto-instrument agents. Out-of-process profiling is powered by eBPF agent. Continuous profiling is powered by eBPF agent.  In-process profiling In-process profiling is primarily provided by auto-instrument agents in the VM-based runtime. This feature resolves the issue \u0026lt;1\u0026gt; through capture the snapshot of the thread stacks periodically. The OAP would aggregate the thread stack per RPC request, and provide a hierarchy graph to indicate the slow methods based on continuous snapshot.\nThe period is usually every 10-100 milliseconds, which is not recommended to be less, due to this capture would usually cause classical stop-the-world for the VM, which would impact the whole process performance.\nLearn more tech details from the post, Use Profiling to Fix the Blind Spot of Distributed Tracing.\nFor now, Java and Python agents support this.\nOut-of-process profiling Out-of-process profiling leverage eBPF technology with origins in the Linux kernel. It provides a way to extend the capabilities of the kernel safely and efficiently.\nOn-CPU Profiling On-CPU profiling is suitable for analyzing thread stacks when service CPU usage is high.\nIf the stack is dumped more times, it means that the thread stack occupies more CPU resources.\nThis is pretty similar with in-process profiling to resolve the issue \u0026lt;1\u0026gt;, but it is made out-of-process and based on Linux eBPF. Meanwhile, this is made for languages without VM mechanism, which caused not supported by in-process agents, such as, C/C++, Rust. Golang is a special case, it exposed the metadata of the VM for eBPF, so, it could be profiled.\nOff-CPU Profiling Off-CPU profiling is suitable for performance issues that are not caused by high CPU usage, but may be on high CPU load. This profiling aims to resolve the issue \u0026lt;2\u0026gt;.\nFor example,\n When there are too many threads in one service, using off-CPU profiling could reveal which threads spend more time context switching. Codes heavily rely on disk I/O or remote service performance would slow down the whole process.  Off-CPU profiling provides two perspectives\n Thread switch count: The number of times a thread switches context. When the thread returns to the CPU, it completes one context switch. A thread stack with a higher switch count spends more time context switching. Thread switch duration: The time it takes a thread to switch the context. A thread stack with a higher switch duration spends more time off-CPU.  Learn more tech details about ON/OFF CPU profiling from the post, Pinpoint Service Mesh Critical Performance Impact by using eBPF\nNetwork Profiling Network profiling captures the network packages to analysis traffic at L4(TCP) and L7(HTTP) to recognize network traffic from a specific process or a k8s pod. Through this traffic analysis, locate the root causes of the issues \u0026lt;3\u0026gt; and \u0026lt;4\u0026gt;.\nNetwork profiling provides\n Network topology and identify processes. Observe TCP traffic metrics with TLS status. Observe HTTP traffic metrics. Sample HTTP request/response raw data within tracing context. Observe time costs for local I/O costing on the OS. Such as the time of Linux process HTTP request/response.  Learn more tech details from the post, Diagnose Service Mesh Network Performance with eBPF\nContinuous Profiling Continuous Profiling utilizes monitoring of system, processes, and network, and automatically initiates profiling tasks when conditions meet the configured thresholds and time windows.\nMonitor type Continuous profiling periodically collects the following types of performance metrics for processes and systems:\n System Load: Monitor current system load value. Process CPU: Monitor process CPU usage percent, value in [0-100]. Process Thread Count: Monitor process thread count. HTTP Error Rate: Monitor the process HTTP(/1.x) response error(response status \u0026gt;= 500) percent, value in [0-100]. HTTP Avg Response Time: Monitor the process HTTP(/1.x) response duration(ms).  Trigger Target When the collected metric data matches the configured threshold, the following types of profiling tasks could be triggered:\n On CPU Profiling: Perform eBPF On CPU Profiling on processes that meet the threshold. Off CPU Profiling: Perform eBPF Off CPU Profiling on processes that meet the threshold. Network Profiling: Perform eBPF Network Profiling on all processes within the same instance as the processes that meet the threshold.  ","excerpt":"Profiling The profiling is an on-demand diagnosing method to locate bottleneck of the services. …","ref":"/docs/main/v9.6.0/en/concepts-and-designs/profiling/","title":"Profiling"},{"body":"Profiling The profiling is an on-demand diagnosing method to locate bottleneck of the services. These typical scenarios usually are suitable for profiling through various profiling tools\n Some methods slow down the API performance. Too many threads and/or high-frequency I/O per OS process reduce the CPU efficiency. Massive RPC requests block the network to cause responding slowly. Unexpected network requests caused by security issues or codes' bug.  In the SkyWalking landscape, we provided three ways to support profiling within reasonable resource cost.\n In-process profiling is bundled with auto-instrument agents. Out-of-process profiling is powered by eBPF agent. Continuous profiling is powered by eBPF agent.  In-process profiling In-process profiling is primarily provided by auto-instrument agents in the VM-based runtime. This feature resolves the issue \u0026lt;1\u0026gt; through capture the snapshot of the thread stacks periodically. The OAP would aggregate the thread stack per RPC request, and provide a hierarchy graph to indicate the slow methods based on continuous snapshot.\nThe period is usually every 10-100 milliseconds, which is not recommended to be less, due to this capture would usually cause classical stop-the-world for the VM, which would impact the whole process performance.\nLearn more tech details from the post, Use Profiling to Fix the Blind Spot of Distributed Tracing.\nFor now, Java and Python agents support this.\nOut-of-process profiling Out-of-process profiling leverage eBPF technology with origins in the Linux kernel. It provides a way to extend the capabilities of the kernel safely and efficiently.\nOn-CPU Profiling On-CPU profiling is suitable for analyzing thread stacks when service CPU usage is high.\nIf the stack is dumped more times, it means that the thread stack occupies more CPU resources.\nThis is pretty similar with in-process profiling to resolve the issue \u0026lt;1\u0026gt;, but it is made out-of-process and based on Linux eBPF. Meanwhile, this is made for languages without VM mechanism, which caused not supported by in-process agents, such as, C/C++, Rust. Golang is a special case, it exposed the metadata of the VM for eBPF, so, it could be profiled.\nOff-CPU Profiling Off-CPU profiling is suitable for performance issues that are not caused by high CPU usage, but may be on high CPU load. This profiling aims to resolve the issue \u0026lt;2\u0026gt;.\nFor example,\n When there are too many threads in one service, using off-CPU profiling could reveal which threads spend more time context switching. Codes heavily rely on disk I/O or remote service performance would slow down the whole process.  Off-CPU profiling provides two perspectives\n Thread switch count: The number of times a thread switches context. When the thread returns to the CPU, it completes one context switch. A thread stack with a higher switch count spends more time context switching. Thread switch duration: The time it takes a thread to switch the context. A thread stack with a higher switch duration spends more time off-CPU.  Learn more tech details about ON/OFF CPU profiling from the post, Pinpoint Service Mesh Critical Performance Impact by using eBPF\nNetwork Profiling Network profiling captures the network packages to analysis traffic at L4(TCP) and L7(HTTP) to recognize network traffic from a specific process or a k8s pod. Through this traffic analysis, locate the root causes of the issues \u0026lt;3\u0026gt; and \u0026lt;4\u0026gt;.\nNetwork profiling provides\n Network topology and identify processes. Observe TCP traffic metrics with TLS status. Observe HTTP traffic metrics. Sample HTTP request/response raw data within tracing context. Observe time costs for local I/O costing on the OS. Such as the time of Linux process HTTP request/response.  Learn more tech details from the post, Diagnose Service Mesh Network Performance with eBPF\nContinuous Profiling Continuous Profiling utilizes monitoring of system, processes, and network, and automatically initiates profiling tasks when conditions meet the configured thresholds and time windows.\nMonitor type Continuous profiling periodically collects the following types of performance metrics for processes and systems:\n System Load: Monitor current system load value. Process CPU: Monitor process CPU usage percent, value in [0-100]. Process Thread Count: Monitor process thread count. HTTP Error Rate: Monitor the process HTTP(/1.x) response error(response status \u0026gt;= 500) percent, value in [0-100]. HTTP Avg Response Time: Monitor the process HTTP(/1.x) response duration(ms).  Trigger Target When the collected metric data matches the configured threshold, the following types of profiling tasks could be triggered:\n On CPU Profiling: Perform eBPF On CPU Profiling on processes that meet the threshold. Off CPU Profiling: Perform eBPF Off CPU Profiling on processes that meet the threshold. Network Profiling: Perform eBPF Network Profiling on all processes within the same instance as the processes that meet the threshold.  ","excerpt":"Profiling The profiling is an on-demand diagnosing method to locate bottleneck of the services. …","ref":"/docs/main/v9.7.0/en/concepts-and-designs/profiling/","title":"Profiling"},{"body":"Profiling The profiling is used to profiling the processes from the Service Discovery, and send the snapshot to the backend server.\nConfiguration    Name Default Environment Key Description     profiling.active true ROVER_PROFILING_ACTIVE Is active the process profiling.   profiling.check_interval 10s ROVER_PROFILING_CHECK_INTERVAL Check the profiling task interval.   profiling.flush_interval 5s ROVER_PROFILING_FLUSH_INTERVAL Combine existing profiling data and report to the backend interval.   profiling.task.on_cpu.dump_period 9ms ROVER_PROFILING_TASK_ON_CPU_DUMP_PERIOD The profiling stack dump period.   profiling.task.network.report_interval 2s ROVER_PROFILING_TASK_NETWORK_TOPOLOGY_REPORT_INTERVAL The interval of send metrics to the backend.   profiling.task.network.meter_prefix rover_net_p ROVER_PROFILING_TASK_NETWORK_TOPOLOGY_METER_PREFIX The prefix of network profiling metrics name.   profiling.task.network.protocol_analyze.per_cpu_buffer 400KB ROVER_PROFILING_TASK_NETWORK_PROTOCOL_ANALYZE_PER_CPU_BUFFER The size of socket data buffer on each CPU.   profiling.task.network.protocol_analyze.parallels 2 ROVER_PROFILING_TASK_NETWORK_PROTOCOL_ANALYZE_PARALLELS The count of parallel protocol analyzer.   profiling.task.network.protocol_analyze.queue_size 5000 ROVER_PROFILING_TASK_NETWORK_PROTOCOL_ANALYZE_QUEUE_SIZE The size of per paralleled analyzer queue.   profiling.task.network.protocol_analyze.sampling.http.default_request_encoding UTF-8 ROVER_PROFILING_TASK_NETWORK_PROTOCOL_ANALYZE_SAMPLING_HTTP_DEFAULT_REQUEST_ENCODING The default body encoding when sampling the request.   profiling.task.network.protocol_analyze.sampling.http.default_response_encoding UTF-8 ROVER_PROFILING_TASK_NETWORK_PROTOCOL_ANALYZE_SAMPLING_HTTP_DEFAULT_RESPONSE_ENCODING The default body encoding when sampling the response.   profiling.continuous.meter_prefix rover_con_p ROVER_PROFILING_CONTINUOUS_METER_PREFIX The continuous related meters prefix name.   profiling.continuous.fetch_interval 1s ROVER_PROFILING_CONTINUOUS_FETCH_INTERVAL The interval of fetch metrics from the system, such as Process CPU, System Load, etc.   profiling.continuous.check_interval 5s ROVER_PROFILING_CONTINUOUS_CHECK_INTERVAL The interval of check metrics is reach the thresholds.   profiling.continuous.trigger.execute_duration 10m ROVER_PROFILING_CONTINUOUS_TRIGGER_EXECUTE_DURATION The duration of the profiling task.   profiling.continuous.trigger.silence_duration 20m ROVER_PROFILING_CONTINUOUS_TRIGGER_SILENCE_DURATION The minimal duration between the execution of the same profiling task.    Prepare service Before profiling your service, please make sure your service already has the symbol data inside the binary file. So we could locate the stack symbol, It could be checked following these ways:\n objdump: Using objdump --syms path/to/service. readelf: Using readelf --syms path/to/service.  Profiling Type All the profiling tasks are using the Linux Official Function and kprobe or uprobe to open perf event, and attach the eBPF Program to dump stacks.\nOn CPU On CPU Profiling task is using PERF_COUNT_SW_CPU_CLOCK to profiling the process with the CPU clock.\nOff CPU Off CPU Profiling task is attach the finish_task_switch in krobe to profiling the process.\nNetwork Network Profiling task is intercept IO-related syscall and urprobe in process to identify the network traffic and generate the metrics. Also, the following protocol are supported for analyzing using OpenSSL library, BoringSSL library, GoTLS, NodeTLS or plaintext:\n HTTP/1.x HTTP/2 MySQL CQL(The Cassandra Query Language) MongoDB Kafka DNS  Collecting data Network profiling uses metrics, logs send to the backend service.\nData Type The network profiling has customized the following two types of metrics to represent the network data:\n Counter: Records the total number of data in a certain period of time. Each counter containers the following data:  Count: The count of the execution. Bytes: The package size of the execution. Exe Time: The consumed time(nanosecond) of the execution.   Histogram: Records the distribution of the data in the bucket. TopN: Record the highest latency data in a certain period of time.  Labels Each metric contains the following labels to identify the process relationship:\n   Name Type Description     client_process_id or server_process_id string The ID of the current process, which is determined by the role of the current process in the connection as server or client.   client_local or server_local boolean The remote process is a local process.   client_address or server_address string The remote process address. ex: IP:port.   side enum The current process is either \u0026ldquo;client\u0026rdquo; or \u0026ldquo;server\u0026rdquo; in this connection.   protocol string Identification the protocol based on the package data content.   is_ssl bool Is the current connection using SSL.    Layer-4 Data Based on the above two data types, the following metrics are provided.\n   Name Type Unit Description     write Counter nanosecond The socket write counter   read Counter nanosecond The socket read counter   write RTT Counter microsecond The socket write RTT counter   connect Counter nanosecond The socket connect/accept with other server/client counter   close Counter nanosecond The socket close counter   retransmit Counter nanosecond The socket retransmit package counter   drop Counter nanosecond The socket drop package counter   write RTT Histogram microsecond The socket write RTT execute time histogram   write execute time Histogram nanosecond The socket write data execute time histogram   read execute time Histogram nanosecond The socket read data execute time histogram   connect execute time Histogram nanosecond The socket connect/accept with other server/client execute time histogram   close execute time Histogram nanosecond The socket close execute time histogram    HTTP/1.x Data Metrics    Name Type Unit Description     http1_request_cpm Counter count The HTTP request counter   http1_response_status_cpm Counter count The count of per HTTP response code   http1_request_package_size Histogram Byte size The request package size   http1_response_package_size Histogram Byte size The response package size   http1_client_duration Histogram millisecond The duration of single HTTP response on the client side   http1_server_duration Histogram millisecond The duration of single HTTP response on the server side    Logs    Name Type Unit Description     slow_traces TopN millisecond The Top N slow trace(id)s   status_4xx TopN millisecond The Top N trace(id)s with response status in 400-499   status_5xx TopN millisecond The Top N trace(id)s with response status in 500-599    Span Attached Event    Name Description     HTTP Request Sampling Complete information about the HTTP request, it\u0026rsquo;s only reported when it matches slow/4xx/5xx traces.   HTTP Response Sampling Complete information about the HTTP response, it\u0026rsquo;s only reported when it matches slow/4xx/5xx traces.   Syscall xxx The methods to use when the process invoke with the network-related syscall method. It\u0026rsquo;s only reported when it matches slow/4xx/5xx traces.    Continuous Profiling The continuous profiling feature monitors low-power target process information, including process CPU usage and network requests, based on configuration passed from the backend. When a threshold is met, it automatically initiates a profiling task(on/off CPU, Network) to provide more detailed analysis.\nMonitor Type System Load Monitor the average system load for the last minute, which is equivalent to using the first value of the load average in the uptime command.\nProcess CPU The target process utilizes a certain percentage of the CPU on the current host.\nProcess Thread Count The real-time number of threads in the target process.\nNetwork Network monitoring uses eBPF technology to collect real-time performance data of the current process responding to requests. Requests sent upstream are not monitored by the system.\nCurrently, network monitoring supports parsing of the HTTP/1.x protocol and supports the following types of monitoring:\n Error Rate: The percentage of network request errors, such as HTTP status codes within the range of [500-600), is considered as erroneous. Avg Response Time: Average response time(ms) for specified URI.  Metrics Rover would periodically send collected monitoring data to the backend using the Native Meter Protocol.\n   Name Unit Description     process_cpu (0-100)% The CPU usage percent   process_thread_count count The thread count of process   system_load count The average system load for the last minute, each process have same value   http_error_rate (0-100)% The network request error rate percentage   http_avg_response_time ms The network average response duration    ","excerpt":"Profiling The profiling is used to profiling the processes from the Service Discovery, and send the …","ref":"/docs/skywalking-rover/latest/en/setup/configuration/profiling/","title":"Profiling"},{"body":"Profiling The profiling is used to profiling the processes from the Service Discovery, and send the snapshot to the backend server.\nConfiguration    Name Default Environment Key Description     profiling.active true ROVER_PROFILING_ACTIVE Is active the process profiling.   profiling.check_interval 10s ROVER_PROFILING_CHECK_INTERVAL Check the profiling task interval.   profiling.flush_interval 5s ROVER_PROFILING_FLUSH_INTERVAL Combine existing profiling data and report to the backend interval.   profiling.task.on_cpu.dump_period 9ms ROVER_PROFILING_TASK_ON_CPU_DUMP_PERIOD The profiling stack dump period.   profiling.task.network.report_interval 2s ROVER_PROFILING_TASK_NETWORK_TOPOLOGY_REPORT_INTERVAL The interval of send metrics to the backend.   profiling.task.network.meter_prefix rover_net_p ROVER_PROFILING_TASK_NETWORK_TOPOLOGY_METER_PREFIX The prefix of network profiling metrics name.   profiling.task.network.protocol_analyze.per_cpu_buffer 400KB ROVER_PROFILING_TASK_NETWORK_PROTOCOL_ANALYZE_PER_CPU_BUFFER The size of socket data buffer on each CPU.   profiling.task.network.protocol_analyze.parallels 2 ROVER_PROFILING_TASK_NETWORK_PROTOCOL_ANALYZE_PARALLELS The count of parallel protocol analyzer.   profiling.task.network.protocol_analyze.queue_size 5000 ROVER_PROFILING_TASK_NETWORK_PROTOCOL_ANALYZE_QUEUE_SIZE The size of per paralleled analyzer queue.   profiling.task.network.protocol_analyze.sampling.http.default_request_encoding UTF-8 ROVER_PROFILING_TASK_NETWORK_PROTOCOL_ANALYZE_SAMPLING_HTTP_DEFAULT_REQUEST_ENCODING The default body encoding when sampling the request.   profiling.task.network.protocol_analyze.sampling.http.default_response_encoding UTF-8 ROVER_PROFILING_TASK_NETWORK_PROTOCOL_ANALYZE_SAMPLING_HTTP_DEFAULT_RESPONSE_ENCODING The default body encoding when sampling the response.   profiling.continuous.meter_prefix rover_con_p ROVER_PROFILING_CONTINUOUS_METER_PREFIX The continuous related meters prefix name.   profiling.continuous.fetch_interval 1s ROVER_PROFILING_CONTINUOUS_FETCH_INTERVAL The interval of fetch metrics from the system, such as Process CPU, System Load, etc.   profiling.continuous.check_interval 5s ROVER_PROFILING_CONTINUOUS_CHECK_INTERVAL The interval of check metrics is reach the thresholds.   profiling.continuous.trigger.execute_duration 10m ROVER_PROFILING_CONTINUOUS_TRIGGER_EXECUTE_DURATION The duration of the profiling task.   profiling.continuous.trigger.silence_duration 20m ROVER_PROFILING_CONTINUOUS_TRIGGER_SILENCE_DURATION The minimal duration between the execution of the same profiling task.    Prepare service Before profiling your service, please make sure your service already has the symbol data inside the binary file. So we could locate the stack symbol, It could be checked following these ways:\n objdump: Using objdump --syms path/to/service. readelf: Using readelf --syms path/to/service.  Profiling Type All the profiling tasks are using the Linux Official Function and kprobe or uprobe to open perf event, and attach the eBPF Program to dump stacks.\nOn CPU On CPU Profiling task is using PERF_COUNT_SW_CPU_CLOCK to profiling the process with the CPU clock.\nOff CPU Off CPU Profiling task is attach the finish_task_switch in krobe to profiling the process.\nNetwork Network Profiling task is intercept IO-related syscall and urprobe in process to identify the network traffic and generate the metrics. Also, the following protocol are supported for analyzing using OpenSSL library, BoringSSL library, GoTLS, NodeTLS or plaintext:\n HTTP/1.x HTTP/2 MySQL CQL(The Cassandra Query Language) MongoDB Kafka DNS  Collecting data Network profiling uses metrics, logs send to the backend service.\nData Type The network profiling has customized the following two types of metrics to represent the network data:\n Counter: Records the total number of data in a certain period of time. Each counter containers the following data:  Count: The count of the execution. Bytes: The package size of the execution. Exe Time: The consumed time(nanosecond) of the execution.   Histogram: Records the distribution of the data in the bucket. TopN: Record the highest latency data in a certain period of time.  Labels Each metric contains the following labels to identify the process relationship:\n   Name Type Description     client_process_id or server_process_id string The ID of the current process, which is determined by the role of the current process in the connection as server or client.   client_local or server_local boolean The remote process is a local process.   client_address or server_address string The remote process address. ex: IP:port.   side enum The current process is either \u0026ldquo;client\u0026rdquo; or \u0026ldquo;server\u0026rdquo; in this connection.   protocol string Identification the protocol based on the package data content.   is_ssl bool Is the current connection using SSL.    Layer-4 Data Based on the above two data types, the following metrics are provided.\n   Name Type Unit Description     write Counter nanosecond The socket write counter   read Counter nanosecond The socket read counter   write RTT Counter microsecond The socket write RTT counter   connect Counter nanosecond The socket connect/accept with other server/client counter   close Counter nanosecond The socket close counter   retransmit Counter nanosecond The socket retransmit package counter   drop Counter nanosecond The socket drop package counter   write RTT Histogram microsecond The socket write RTT execute time histogram   write execute time Histogram nanosecond The socket write data execute time histogram   read execute time Histogram nanosecond The socket read data execute time histogram   connect execute time Histogram nanosecond The socket connect/accept with other server/client execute time histogram   close execute time Histogram nanosecond The socket close execute time histogram    HTTP/1.x Data Metrics    Name Type Unit Description     http1_request_cpm Counter count The HTTP request counter   http1_response_status_cpm Counter count The count of per HTTP response code   http1_request_package_size Histogram Byte size The request package size   http1_response_package_size Histogram Byte size The response package size   http1_client_duration Histogram millisecond The duration of single HTTP response on the client side   http1_server_duration Histogram millisecond The duration of single HTTP response on the server side    Logs    Name Type Unit Description     slow_traces TopN millisecond The Top N slow trace(id)s   status_4xx TopN millisecond The Top N trace(id)s with response status in 400-499   status_5xx TopN millisecond The Top N trace(id)s with response status in 500-599    Span Attached Event    Name Description     HTTP Request Sampling Complete information about the HTTP request, it\u0026rsquo;s only reported when it matches slow/4xx/5xx traces.   HTTP Response Sampling Complete information about the HTTP response, it\u0026rsquo;s only reported when it matches slow/4xx/5xx traces.   Syscall xxx The methods to use when the process invoke with the network-related syscall method. It\u0026rsquo;s only reported when it matches slow/4xx/5xx traces.    Continuous Profiling The continuous profiling feature monitors low-power target process information, including process CPU usage and network requests, based on configuration passed from the backend. When a threshold is met, it automatically initiates a profiling task(on/off CPU, Network) to provide more detailed analysis.\nMonitor Type System Load Monitor the average system load for the last minute, which is equivalent to using the first value of the load average in the uptime command.\nProcess CPU The target process utilizes a certain percentage of the CPU on the current host.\nProcess Thread Count The real-time number of threads in the target process.\nNetwork Network monitoring uses eBPF technology to collect real-time performance data of the current process responding to requests. Requests sent upstream are not monitored by the system.\nCurrently, network monitoring supports parsing of the HTTP/1.x protocol and supports the following types of monitoring:\n Error Rate: The percentage of network request errors, such as HTTP status codes within the range of [500-600), is considered as erroneous. Avg Response Time: Average response time(ms) for specified URI.  Metrics Rover would periodically send collected monitoring data to the backend using the Native Meter Protocol.\n   Name Unit Description     process_cpu (0-100)% The CPU usage percent   process_thread_count count The thread count of process   system_load count The average system load for the last minute, each process have same value   http_error_rate (0-100)% The network request error rate percentage   http_avg_response_time ms The network average response duration    ","excerpt":"Profiling The profiling is used to profiling the processes from the Service Discovery, and send the …","ref":"/docs/skywalking-rover/next/en/setup/configuration/profiling/","title":"Profiling"},{"body":"Profiling The profiling is used to profiling the processes from the Service Discovery, and send the snapshot to the backend server.\nConfiguration    Name Default Environment Key Description     profiling.active true ROVER_PROFILING_ACTIVE Is active the process profiling.   profiling.check_interval 10s ROVER_PROFILING_CHECK_INTERVAL Check the profiling task interval.   profiling.flush_interval 5s ROVER_PROFILING_FLUSH_INTERVAL Combine existing profiling data and report to the backend interval.   profiling.task.on_cpu.dump_period 9ms ROVER_PROFILING_TASK_ON_CPU_DUMP_PERIOD The profiling stack dump period.   profiling.task.network.report_interval 2s ROVER_PROFILING_TASK_NETWORK_TOPOLOGY_REPORT_INTERVAL The interval of send metrics to the backend.   profiling.task.network.meter_prefix rover_net_p ROVER_PROFILING_TASK_NETWORK_TOPOLOGY_METER_PREFIX The prefix of network profiling metrics name.   profiling.task.network.protocol_analyze.per_cpu_buffer 400KB ROVER_PROFILING_TASK_NETWORK_PROTOCOL_ANALYZE_PER_CPU_BUFFER The size of socket data buffer on each CPU.   profiling.task.network.protocol_analyze.parallels 2 ROVER_PROFILING_TASK_NETWORK_PROTOCOL_ANALYZE_PARALLELS The count of parallel protocol analyzer.   profiling.task.network.protocol_analyze.queue_size 5000 ROVER_PROFILING_TASK_NETWORK_PROTOCOL_ANALYZE_QUEUE_SIZE The size of per paralleled analyzer queue.   profiling.task.network.protocol_analyze.sampling.http.default_request_encoding UTF-8 ROVER_PROFILING_TASK_NETWORK_PROTOCOL_ANALYZE_SAMPLING_HTTP_DEFAULT_REQUEST_ENCODING The default body encoding when sampling the request.   profiling.task.network.protocol_analyze.sampling.http.default_response_encoding UTF-8 ROVER_PROFILING_TASK_NETWORK_PROTOCOL_ANALYZE_SAMPLING_HTTP_DEFAULT_RESPONSE_ENCODING The default body encoding when sampling the response.   profiling.continuous.meter_prefix rover_con_p ROVER_PROFILING_CONTINUOUS_METER_PREFIX The continuous related meters prefix name.   profiling.continuous.fetch_interval 1s ROVER_PROFILING_CONTINUOUS_FETCH_INTERVAL The interval of fetch metrics from the system, such as Process CPU, System Load, etc.   profiling.continuous.check_interval 5s ROVER_PROFILING_CONTINUOUS_CHECK_INTERVAL The interval of check metrics is reach the thresholds.   profiling.continuous.trigger.execute_duration 10m ROVER_PROFILING_CONTINUOUS_TRIGGER_EXECUTE_DURATION The duration of the profiling task.   profiling.continuous.trigger.silence_duration 20m ROVER_PROFILING_CONTINUOUS_TRIGGER_SILENCE_DURATION The minimal duration between the execution of the same profiling task.    Prepare service Before profiling your service, please make sure your service already has the symbol data inside the binary file. So we could locate the stack symbol, It could be checked following these ways:\n objdump: Using objdump --syms path/to/service. readelf: Using readelf --syms path/to/service.  Profiling Type All the profiling tasks are using the Linux Official Function and kprobe or uprobe to open perf event, and attach the eBPF Program to dump stacks.\nOn CPU On CPU Profiling task is using PERF_COUNT_SW_CPU_CLOCK to profiling the process with the CPU clock.\nOff CPU Off CPU Profiling task is attach the finish_task_switch in krobe to profiling the process.\nNetwork Network Profiling task is intercept IO-related syscall and urprobe in process to identify the network traffic and generate the metrics. Also, the following protocol are supported for analyzing using OpenSSL library, BoringSSL library, GoTLS, NodeTLS or plaintext:\n HTTP/1.x HTTP/2 MySQL CQL(The Cassandra Query Language) MongoDB Kafka DNS  Collecting data Network profiling uses metrics, logs send to the backend service.\nData Type The network profiling has customized the following two types of metrics to represent the network data:\n Counter: Records the total number of data in a certain period of time. Each counter containers the following data:  Count: The count of the execution. Bytes: The package size of the execution. Exe Time: The consumed time(nanosecond) of the execution.   Histogram: Records the distribution of the data in the bucket. TopN: Record the highest latency data in a certain period of time.  Labels Each metric contains the following labels to identify the process relationship:\n   Name Type Description     client_process_id or server_process_id string The ID of the current process, which is determined by the role of the current process in the connection as server or client.   client_local or server_local boolean The remote process is a local process.   client_address or server_address string The remote process address. ex: IP:port.   side enum The current process is either \u0026ldquo;client\u0026rdquo; or \u0026ldquo;server\u0026rdquo; in this connection.   protocol string Identification the protocol based on the package data content.   is_ssl bool Is the current connection using SSL.    Layer-4 Data Based on the above two data types, the following metrics are provided.\n   Name Type Unit Description     write Counter nanosecond The socket write counter   read Counter nanosecond The socket read counter   write RTT Counter microsecond The socket write RTT counter   connect Counter nanosecond The socket connect/accept with other server/client counter   close Counter nanosecond The socket close counter   retransmit Counter nanosecond The socket retransmit package counter   drop Counter nanosecond The socket drop package counter   write RTT Histogram microsecond The socket write RTT execute time histogram   write execute time Histogram nanosecond The socket write data execute time histogram   read execute time Histogram nanosecond The socket read data execute time histogram   connect execute time Histogram nanosecond The socket connect/accept with other server/client execute time histogram   close execute time Histogram nanosecond The socket close execute time histogram    HTTP/1.x Data Metrics    Name Type Unit Description     http1_request_cpm Counter count The HTTP request counter   http1_response_status_cpm Counter count The count of per HTTP response code   http1_request_package_size Histogram Byte size The request package size   http1_response_package_size Histogram Byte size The response package size   http1_client_duration Histogram millisecond The duration of single HTTP response on the client side   http1_server_duration Histogram millisecond The duration of single HTTP response on the server side    Logs    Name Type Unit Description     slow_traces TopN millisecond The Top N slow trace(id)s   status_4xx TopN millisecond The Top N trace(id)s with response status in 400-499   status_5xx TopN millisecond The Top N trace(id)s with response status in 500-599    Span Attached Event    Name Description     HTTP Request Sampling Complete information about the HTTP request, it\u0026rsquo;s only reported when it matches slow/4xx/5xx traces.   HTTP Response Sampling Complete information about the HTTP response, it\u0026rsquo;s only reported when it matches slow/4xx/5xx traces.   Syscall xxx The methods to use when the process invoke with the network-related syscall method. It\u0026rsquo;s only reported when it matches slow/4xx/5xx traces.    Continuous Profiling The continuous profiling feature monitors low-power target process information, including process CPU usage and network requests, based on configuration passed from the backend. When a threshold is met, it automatically initiates a profiling task(on/off CPU, Network) to provide more detailed analysis.\nMonitor Type System Load Monitor the average system load for the last minute, which is equivalent to using the first value of the load average in the uptime command.\nProcess CPU The target process utilizes a certain percentage of the CPU on the current host.\nProcess Thread Count The real-time number of threads in the target process.\nNetwork Network monitoring uses eBPF technology to collect real-time performance data of the current process responding to requests. Requests sent upstream are not monitored by the system.\nCurrently, network monitoring supports parsing of the HTTP/1.x protocol and supports the following types of monitoring:\n Error Rate: The percentage of network request errors, such as HTTP status codes within the range of [500-600), is considered as erroneous. Avg Response Time: Average response time(ms) for specified URI.  Metrics Rover would periodically send collected monitoring data to the backend using the Native Meter Protocol.\n   Name Unit Description     process_cpu (0-100)% The CPU usage percent   process_thread_count count The thread count of process   system_load count The average system load for the last minute, each process have same value   http_error_rate (0-100)% The network request error rate percentage   http_avg_response_time ms The network average response duration    ","excerpt":"Profiling The profiling is used to profiling the processes from the Service Discovery, and send the …","ref":"/docs/skywalking-rover/v0.6.0/en/setup/configuration/profiling/","title":"Profiling"},{"body":"Profiling APIs SkyWalking offers two types of Profiling, in-process and out-process, each with its own API.\nIn-process profiling APIs In-process profiling commonly interacts with auto-instrument agents. It gathers stack traces of programs and sends the data to the OAP for further analysis.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.language.profile.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/language/profile/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;service ProfileTask { // query all sniffer need to execute profile task commands  rpc getProfileTaskCommands (ProfileTaskCommandQuery) returns (Commands) { } // collect dumped thread snapshot  rpc collectSnapshot (stream ThreadSnapshot) returns (Commands) { } // report profiling task finished  rpc reportTaskFinish (ProfileTaskFinishReport) returns (Commands) { }}message ProfileTaskCommandQuery { // current sniffer information  string service = 1; string serviceInstance = 2; // last command timestamp  int64 lastCommandTime = 3;}// dumped thread snapshot message ThreadSnapshot { // profile task id  string taskId = 1; // dumped segment id  string traceSegmentId = 2; // dump timestamp  int64 time = 3; // snapshot dump sequence, start with zero  int32 sequence = 4; // snapshot stack  ThreadStack stack = 5;}message ThreadStack { // stack code signature list  repeated string codeSignatures = 1;}// profile task finished report message ProfileTaskFinishReport { // current sniffer information  string service = 1; string serviceInstance = 2; // profile task  string taskId = 3;}Out-process profiling Out-process profiling interacts with eBPF agent, which receives tasks and captures data, then reports it to the OAP for further analysis.\nProcess APIs Similar to Service Instance, all processes must be reported to the OAP storage segment prior to analysis.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.ebpf.profiling.process.v3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/ebpf/profiling/process/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the detected processes and report them. service EBPFProcessService { // Report discovered process in Rover  rpc reportProcesses (EBPFProcessReportList) returns (EBPFReportProcessDownstream) { } // Keep the process alive in the backend.  rpc keepAlive (EBPFProcessPingPkgList) returns (Commands) { }}message EBPFProcessReportList { repeated EBPFProcessProperties processes = 1; // An ID generated by eBPF agent, should be unique globally.  string ebpfAgentID = 2;}message EBPFProcessProperties { // The Process metadata  oneof metadata { EBPFHostProcessMetadata hostProcess = 1; EBPFKubernetesProcessMetadata k8sProcess = 2; }}message EBPFHostProcessMetadata { // [required] Entity metadata  // Must ensure that entity information is unique at the time of reporting  EBPFProcessEntityMetadata entity = 1; // [required] The Process id of the host  int32 pid = 2; // [optional] properties of the process  repeated KeyStringValuePair properties = 3;}// Process Entity metadata message EBPFProcessEntityMetadata { // [required] Process belong layer name which define in the backend  string layer = 1; // [required] Process belong service name  string serviceName = 2; // [required] Process belong service instance name  string instanceName = 3; // [required] Process name  string processName = 4; // Process labels for aggregate from service  repeated string labels = 5;}// Kubernetes process metadata message EBPFKubernetesProcessMetadata { // [required] Entity metadata  // Must ensure that entity information is unique at the time of reporting  EBPFProcessEntityMetadata entity = 1; // [required] The Process id of the host  int32 pid = 2; // [optional] properties of the process  repeated KeyStringValuePair properties = 3;}message EBPFReportProcessDownstream { repeated EBPFProcessDownstream processes = 1;}message EBPFProcessDownstream { // Generated process id  string processId = 1; // Locate the process by basic information  oneof process { EBPFHostProcessDownstream hostProcess = 2; EBPFKubernetesProcessDownstream k8sProcess = 3; }}message EBPFHostProcessDownstream { int32 pid = 1; EBPFProcessEntityMetadata entityMetadata = 2;}// Kubernetes process downstream message EBPFKubernetesProcessDownstream { int32 pid = 1; EBPFProcessEntityMetadata entityMetadata = 2;}message EBPFProcessPingPkgList { repeated EBPFProcessPingPkg processes = 1; // An ID generated by eBPF agent, should be unique globally.  string ebpfAgentID = 2;}message EBPFProcessPingPkg { // Process entity  EBPFProcessEntityMetadata entityMetadata = 1; // Minimize necessary properties  repeated KeyStringValuePair properties = 2;}Out-process profiling APIs syntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.ebpf.profiling.v3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/ebpf/profiling/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the Rover Process profiling task and upload profiling data. service EBPFProfilingService { // Query profiling (start or stop) tasks  rpc queryTasks (EBPFProfilingTaskQuery) returns (Commands) { } // collect profiling data  rpc collectProfilingData (stream EBPFProfilingData) returns (Commands) { }}message EBPFProfilingTaskQuery { // rover instance id  string roverInstanceId = 1; // latest task update time  int64 latestUpdateTime = 2;}message EBPFProfilingData { // task metadata  EBPFProfilingTaskMetadata task = 1; // profiling data  oneof profiling { EBPFOnCPUProfiling onCPU = 2; EBPFOffCPUProfiling offCPU = 3; }}message EBPFProfilingTaskMetadata { // profiling task id  string taskId = 1; // profiling process id  string processId = 2; // the start time of this profiling process  int64 profilingStartTime = 3; // report time  int64 currentTime = 4;}message EBPFProfilingStackMetadata { // stack type  EBPFProfilingStackType stackType = 1; // stack id from kernel provide  int32 stackId = 2; // stack symbols  repeated string stackSymbols = 3;}enum EBPFProfilingStackType { PROCESS_KERNEL_SPACE = 0; PROCESS_USER_SPACE = 1;}message EBPFOnCPUProfiling { // stack data in one task(thread)  repeated EBPFProfilingStackMetadata stacks = 1; // stack counts  int32 dumpCount = 2;}message EBPFOffCPUProfiling { // stack data in one task(thread)  repeated EBPFProfilingStackMetadata stacks = 1; // total count of the process is switched to off cpu by the scheduler.  int32 switchCount = 2; // where time(nanoseconds) is spent waiting while blocked on I/O, locks, timers, paging/swapping, etc.  int64 duration = 3;}","excerpt":"Profiling APIs SkyWalking offers two types of Profiling, in-process and out-process, each with its …","ref":"/docs/main/latest/en/api/profiling-protocol/","title":"Profiling APIs"},{"body":"Profiling APIs SkyWalking offers two types of Profiling, in-process and out-process, each with its own API.\nIn-process profiling APIs In-process profiling commonly interacts with auto-instrument agents. It gathers stack traces of programs and sends the data to the OAP for further analysis.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.language.profile.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/language/profile/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;service ProfileTask { // query all sniffer need to execute profile task commands  rpc getProfileTaskCommands (ProfileTaskCommandQuery) returns (Commands) { } // collect dumped thread snapshot  rpc collectSnapshot (stream ThreadSnapshot) returns (Commands) { } // report profiling task finished  rpc reportTaskFinish (ProfileTaskFinishReport) returns (Commands) { }}message ProfileTaskCommandQuery { // current sniffer information  string service = 1; string serviceInstance = 2; // last command timestamp  int64 lastCommandTime = 3;}// dumped thread snapshot message ThreadSnapshot { // profile task id  string taskId = 1; // dumped segment id  string traceSegmentId = 2; // dump timestamp  int64 time = 3; // snapshot dump sequence, start with zero  int32 sequence = 4; // snapshot stack  ThreadStack stack = 5;}message ThreadStack { // stack code signature list  repeated string codeSignatures = 1;}// profile task finished report message ProfileTaskFinishReport { // current sniffer information  string service = 1; string serviceInstance = 2; // profile task  string taskId = 3;}Out-process profiling Out-process profiling interacts with eBPF agent, which receives tasks and captures data, then reports it to the OAP for further analysis.\nProcess APIs Similar to Service Instance, all processes must be reported to the OAP storage segment prior to analysis.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.ebpf.profiling.process.v3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/ebpf/profiling/process/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the detected processes and report them. service EBPFProcessService { // Report discovered process in Rover  rpc reportProcesses (EBPFProcessReportList) returns (EBPFReportProcessDownstream) { } // Keep the process alive in the backend.  rpc keepAlive (EBPFProcessPingPkgList) returns (Commands) { }}message EBPFProcessReportList { repeated EBPFProcessProperties processes = 1; // An ID generated by eBPF agent, should be unique globally.  string ebpfAgentID = 2;}message EBPFProcessProperties { // The Process metadata  oneof metadata { EBPFHostProcessMetadata hostProcess = 1; EBPFKubernetesProcessMetadata k8sProcess = 2; }}message EBPFHostProcessMetadata { // [required] Entity metadata  // Must ensure that entity information is unique at the time of reporting  EBPFProcessEntityMetadata entity = 1; // [required] The Process id of the host  int32 pid = 2; // [optional] properties of the process  repeated KeyStringValuePair properties = 3;}// Process Entity metadata message EBPFProcessEntityMetadata { // [required] Process belong layer name which define in the backend  string layer = 1; // [required] Process belong service name  string serviceName = 2; // [required] Process belong service instance name  string instanceName = 3; // [required] Process name  string processName = 4; // Process labels for aggregate from service  repeated string labels = 5;}// Kubernetes process metadata message EBPFKubernetesProcessMetadata { // [required] Entity metadata  // Must ensure that entity information is unique at the time of reporting  EBPFProcessEntityMetadata entity = 1; // [required] The Process id of the host  int32 pid = 2; // [optional] properties of the process  repeated KeyStringValuePair properties = 3;}message EBPFReportProcessDownstream { repeated EBPFProcessDownstream processes = 1;}message EBPFProcessDownstream { // Generated process id  string processId = 1; // Locate the process by basic information  oneof process { EBPFHostProcessDownstream hostProcess = 2; EBPFKubernetesProcessDownstream k8sProcess = 3; }}message EBPFHostProcessDownstream { int32 pid = 1; EBPFProcessEntityMetadata entityMetadata = 2;}// Kubernetes process downstream message EBPFKubernetesProcessDownstream { int32 pid = 1; EBPFProcessEntityMetadata entityMetadata = 2;}message EBPFProcessPingPkgList { repeated EBPFProcessPingPkg processes = 1; // An ID generated by eBPF agent, should be unique globally.  string ebpfAgentID = 2;}message EBPFProcessPingPkg { // Process entity  EBPFProcessEntityMetadata entityMetadata = 1; // Minimize necessary properties  repeated KeyStringValuePair properties = 2;}Out-process profiling APIs syntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.ebpf.profiling.v3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/ebpf/profiling/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the Rover Process profiling task and upload profiling data. service EBPFProfilingService { // Query profiling (start or stop) tasks  rpc queryTasks (EBPFProfilingTaskQuery) returns (Commands) { } // collect profiling data  rpc collectProfilingData (stream EBPFProfilingData) returns (Commands) { }}message EBPFProfilingTaskQuery { // rover instance id  string roverInstanceId = 1; // latest task update time  int64 latestUpdateTime = 2;}message EBPFProfilingData { // task metadata  EBPFProfilingTaskMetadata task = 1; // profiling data  oneof profiling { EBPFOnCPUProfiling onCPU = 2; EBPFOffCPUProfiling offCPU = 3; }}message EBPFProfilingTaskMetadata { // profiling task id  string taskId = 1; // profiling process id  string processId = 2; // the start time of this profiling process  int64 profilingStartTime = 3; // report time  int64 currentTime = 4;}message EBPFProfilingStackMetadata { // stack type  EBPFProfilingStackType stackType = 1; // stack id from kernel provide  int32 stackId = 2; // stack symbols  repeated string stackSymbols = 3;}enum EBPFProfilingStackType { PROCESS_KERNEL_SPACE = 0; PROCESS_USER_SPACE = 1;}message EBPFOnCPUProfiling { // stack data in one task(thread)  repeated EBPFProfilingStackMetadata stacks = 1; // stack counts  int32 dumpCount = 2;}message EBPFOffCPUProfiling { // stack data in one task(thread)  repeated EBPFProfilingStackMetadata stacks = 1; // total count of the process is switched to off cpu by the scheduler.  int32 switchCount = 2; // where time(nanoseconds) is spent waiting while blocked on I/O, locks, timers, paging/swapping, etc.  int64 duration = 3;}","excerpt":"Profiling APIs SkyWalking offers two types of Profiling, in-process and out-process, each with its …","ref":"/docs/main/next/en/api/profiling-protocol/","title":"Profiling APIs"},{"body":"Profiling APIs SkyWalking offers two types of Profiling, in-process and out-process, each with its own API.\nIn-process profiling APIs In-process profiling commonly interacts with auto-instrument agents. It gathers stack traces of programs and sends the data to the OAP for further analysis.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.language.profile.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/language/profile/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;service ProfileTask { // query all sniffer need to execute profile task commands  rpc getProfileTaskCommands (ProfileTaskCommandQuery) returns (Commands) { } // collect dumped thread snapshot  rpc collectSnapshot (stream ThreadSnapshot) returns (Commands) { } // report profiling task finished  rpc reportTaskFinish (ProfileTaskFinishReport) returns (Commands) { }}message ProfileTaskCommandQuery { // current sniffer information  string service = 1; string serviceInstance = 2; // last command timestamp  int64 lastCommandTime = 3;}// dumped thread snapshot message ThreadSnapshot { // profile task id  string taskId = 1; // dumped segment id  string traceSegmentId = 2; // dump timestamp  int64 time = 3; // snapshot dump sequence, start with zero  int32 sequence = 4; // snapshot stack  ThreadStack stack = 5;}message ThreadStack { // stack code signature list  repeated string codeSignatures = 1;}// profile task finished report message ProfileTaskFinishReport { // current sniffer information  string service = 1; string serviceInstance = 2; // profile task  string taskId = 3;}Out-process profiling Out-process profiling interacts with eBPF agent, which receives tasks and captures data, then reports it to the OAP for further analysis.\nProcess APIs Similar to Service Instance, all processes must be reported to the OAP storage segment prior to analysis.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.ebpf.profiling.process.v3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/ebpf/profiling/process/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the detected processes and report them. service EBPFProcessService { // Report discovered process in Rover  rpc reportProcesses (EBPFProcessReportList) returns (EBPFReportProcessDownstream) { } // Keep the process alive in the backend.  rpc keepAlive (EBPFProcessPingPkgList) returns (Commands) { }}message EBPFProcessReportList { repeated EBPFProcessProperties processes = 1; // An ID generated by eBPF agent, should be unique globally.  string ebpfAgentID = 2;}message EBPFProcessProperties { // The Process metadata  oneof metadata { EBPFHostProcessMetadata hostProcess = 1; EBPFKubernetesProcessMetadata k8sProcess = 2; }}message EBPFHostProcessMetadata { // [required] Entity metadata  // Must ensure that entity information is unique at the time of reporting  EBPFProcessEntityMetadata entity = 1; // [required] The Process id of the host  int32 pid = 2; // [optional] properties of the process  repeated KeyStringValuePair properties = 3;}// Process Entity metadata message EBPFProcessEntityMetadata { // [required] Process belong layer name which define in the backend  string layer = 1; // [required] Process belong service name  string serviceName = 2; // [required] Process belong service instance name  string instanceName = 3; // [required] Process name  string processName = 4; // Process labels for aggregate from service  repeated string labels = 5;}// Kubernetes process metadata message EBPFKubernetesProcessMetadata { // [required] Entity metadata  // Must ensure that entity information is unique at the time of reporting  EBPFProcessEntityMetadata entity = 1; // [required] The Process id of the host  int32 pid = 2; // [optional] properties of the process  repeated KeyStringValuePair properties = 3;}message EBPFReportProcessDownstream { repeated EBPFProcessDownstream processes = 1;}message EBPFProcessDownstream { // Generated process id  string processId = 1; // Locate the process by basic information  oneof process { EBPFHostProcessDownstream hostProcess = 2; EBPFKubernetesProcessDownstream k8sProcess = 3; }}message EBPFHostProcessDownstream { int32 pid = 1; EBPFProcessEntityMetadata entityMetadata = 2;}// Kubernetes process downstream message EBPFKubernetesProcessDownstream { int32 pid = 1; EBPFProcessEntityMetadata entityMetadata = 2;}message EBPFProcessPingPkgList { repeated EBPFProcessPingPkg processes = 1; // An ID generated by eBPF agent, should be unique globally.  string ebpfAgentID = 2;}message EBPFProcessPingPkg { // Process entity  EBPFProcessEntityMetadata entityMetadata = 1; // Minimize necessary properties  repeated KeyStringValuePair properties = 2;}Out-process profiling APIs syntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.ebpf.profiling.v3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/ebpf/profiling/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the Rover Process profiling task and upload profiling data. service EBPFProfilingService { // Query profiling (start or stop) tasks  rpc queryTasks (EBPFProfilingTaskQuery) returns (Commands) { } // collect profiling data  rpc collectProfilingData (stream EBPFProfilingData) returns (Commands) { }}message EBPFProfilingTaskQuery { // rover instance id  string roverInstanceId = 1; // latest task update time  int64 latestUpdateTime = 2;}message EBPFProfilingData { // task metadata  EBPFProfilingTaskMetadata task = 1; // profiling data  oneof profiling { EBPFOnCPUProfiling onCPU = 2; EBPFOffCPUProfiling offCPU = 3; }}message EBPFProfilingTaskMetadata { // profiling task id  string taskId = 1; // profiling process id  string processId = 2; // the start time of this profiling process  int64 profilingStartTime = 3; // report time  int64 currentTime = 4;}message EBPFProfilingStackMetadata { // stack type  EBPFProfilingStackType stackType = 1; // stack id from kernel provide  int32 stackId = 2; // stack symbols  repeated string stackSymbols = 3;}enum EBPFProfilingStackType { PROCESS_KERNEL_SPACE = 0; PROCESS_USER_SPACE = 1;}message EBPFOnCPUProfiling { // stack data in one task(thread)  repeated EBPFProfilingStackMetadata stacks = 1; // stack counts  int32 dumpCount = 2;}message EBPFOffCPUProfiling { // stack data in one task(thread)  repeated EBPFProfilingStackMetadata stacks = 1; // total count of the process is switched to off cpu by the scheduler.  int32 switchCount = 2; // where time(nanoseconds) is spent waiting while blocked on I/O, locks, timers, paging/swapping, etc.  int64 duration = 3;}","excerpt":"Profiling APIs SkyWalking offers two types of Profiling, in-process and out-process, each with its …","ref":"/docs/main/v9.4.0/en/api/profiling-protocol/","title":"Profiling APIs"},{"body":"Profiling APIs SkyWalking offers two types of Profiling, in-process and out-process, each with its own API.\nIn-process profiling APIs In-process profiling commonly interacts with auto-instrument agents. It gathers stack traces of programs and sends the data to the OAP for further analysis.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.language.profile.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/language/profile/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;service ProfileTask { // query all sniffer need to execute profile task commands  rpc getProfileTaskCommands (ProfileTaskCommandQuery) returns (Commands) { } // collect dumped thread snapshot  rpc collectSnapshot (stream ThreadSnapshot) returns (Commands) { } // report profiling task finished  rpc reportTaskFinish (ProfileTaskFinishReport) returns (Commands) { }}message ProfileTaskCommandQuery { // current sniffer information  string service = 1; string serviceInstance = 2; // last command timestamp  int64 lastCommandTime = 3;}// dumped thread snapshot message ThreadSnapshot { // profile task id  string taskId = 1; // dumped segment id  string traceSegmentId = 2; // dump timestamp  int64 time = 3; // snapshot dump sequence, start with zero  int32 sequence = 4; // snapshot stack  ThreadStack stack = 5;}message ThreadStack { // stack code signature list  repeated string codeSignatures = 1;}// profile task finished report message ProfileTaskFinishReport { // current sniffer information  string service = 1; string serviceInstance = 2; // profile task  string taskId = 3;}Out-process profiling Out-process profiling interacts with eBPF agent, which receives tasks and captures data, then reports it to the OAP for further analysis.\nProcess APIs Similar to Service Instance, all processes must be reported to the OAP storage segment prior to analysis.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.ebpf.profiling.process.v3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/ebpf/profiling/process/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the detected processes and report them. service EBPFProcessService { // Report discovered process in Rover  rpc reportProcesses (EBPFProcessReportList) returns (EBPFReportProcessDownstream) { } // Keep the process alive in the backend.  rpc keepAlive (EBPFProcessPingPkgList) returns (Commands) { }}message EBPFProcessReportList { repeated EBPFProcessProperties processes = 1; // An ID generated by eBPF agent, should be unique globally.  string ebpfAgentID = 2;}message EBPFProcessProperties { // The Process metadata  oneof metadata { EBPFHostProcessMetadata hostProcess = 1; EBPFKubernetesProcessMetadata k8sProcess = 2; }}message EBPFHostProcessMetadata { // [required] Entity metadata  // Must ensure that entity information is unique at the time of reporting  EBPFProcessEntityMetadata entity = 1; // [required] The Process id of the host  int32 pid = 2; // [optional] properties of the process  repeated KeyStringValuePair properties = 3;}// Process Entity metadata message EBPFProcessEntityMetadata { // [required] Process belong layer name which define in the backend  string layer = 1; // [required] Process belong service name  string serviceName = 2; // [required] Process belong service instance name  string instanceName = 3; // [required] Process name  string processName = 4; // Process labels for aggregate from service  repeated string labels = 5;}// Kubernetes process metadata message EBPFKubernetesProcessMetadata { // [required] Entity metadata  // Must ensure that entity information is unique at the time of reporting  EBPFProcessEntityMetadata entity = 1; // [required] The Process id of the host  int32 pid = 2; // [optional] properties of the process  repeated KeyStringValuePair properties = 3;}message EBPFReportProcessDownstream { repeated EBPFProcessDownstream processes = 1;}message EBPFProcessDownstream { // Generated process id  string processId = 1; // Locate the process by basic information  oneof process { EBPFHostProcessDownstream hostProcess = 2; EBPFKubernetesProcessDownstream k8sProcess = 3; }}message EBPFHostProcessDownstream { int32 pid = 1; EBPFProcessEntityMetadata entityMetadata = 2;}// Kubernetes process downstream message EBPFKubernetesProcessDownstream { int32 pid = 1; EBPFProcessEntityMetadata entityMetadata = 2;}message EBPFProcessPingPkgList { repeated EBPFProcessPingPkg processes = 1; // An ID generated by eBPF agent, should be unique globally.  string ebpfAgentID = 2;}message EBPFProcessPingPkg { // Process entity  EBPFProcessEntityMetadata entityMetadata = 1; // Minimize necessary properties  repeated KeyStringValuePair properties = 2;}Out-process profiling APIs syntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.ebpf.profiling.v3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/ebpf/profiling/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the Rover Process profiling task and upload profiling data. service EBPFProfilingService { // Query profiling (start or stop) tasks  rpc queryTasks (EBPFProfilingTaskQuery) returns (Commands) { } // collect profiling data  rpc collectProfilingData (stream EBPFProfilingData) returns (Commands) { }}message EBPFProfilingTaskQuery { // rover instance id  string roverInstanceId = 1; // latest task update time  int64 latestUpdateTime = 2;}message EBPFProfilingData { // task metadata  EBPFProfilingTaskMetadata task = 1; // profiling data  oneof profiling { EBPFOnCPUProfiling onCPU = 2; EBPFOffCPUProfiling offCPU = 3; }}message EBPFProfilingTaskMetadata { // profiling task id  string taskId = 1; // profiling process id  string processId = 2; // the start time of this profiling process  int64 profilingStartTime = 3; // report time  int64 currentTime = 4;}message EBPFProfilingStackMetadata { // stack type  EBPFProfilingStackType stackType = 1; // stack id from kernel provide  int32 stackId = 2; // stack symbols  repeated string stackSymbols = 3;}enum EBPFProfilingStackType { PROCESS_KERNEL_SPACE = 0; PROCESS_USER_SPACE = 1;}message EBPFOnCPUProfiling { // stack data in one task(thread)  repeated EBPFProfilingStackMetadata stacks = 1; // stack counts  int32 dumpCount = 2;}message EBPFOffCPUProfiling { // stack data in one task(thread)  repeated EBPFProfilingStackMetadata stacks = 1; // total count of the process is switched to off cpu by the scheduler.  int32 switchCount = 2; // where time(nanoseconds) is spent waiting while blocked on I/O, locks, timers, paging/swapping, etc.  int64 duration = 3;}","excerpt":"Profiling APIs SkyWalking offers two types of Profiling, in-process and out-process, each with its …","ref":"/docs/main/v9.5.0/en/api/profiling-protocol/","title":"Profiling APIs"},{"body":"Profiling APIs SkyWalking offers two types of Profiling, in-process and out-process, each with its own API.\nIn-process profiling APIs In-process profiling commonly interacts with auto-instrument agents. It gathers stack traces of programs and sends the data to the OAP for further analysis.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.language.profile.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/language/profile/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;service ProfileTask { // query all sniffer need to execute profile task commands  rpc getProfileTaskCommands (ProfileTaskCommandQuery) returns (Commands) { } // collect dumped thread snapshot  rpc collectSnapshot (stream ThreadSnapshot) returns (Commands) { } // report profiling task finished  rpc reportTaskFinish (ProfileTaskFinishReport) returns (Commands) { }}message ProfileTaskCommandQuery { // current sniffer information  string service = 1; string serviceInstance = 2; // last command timestamp  int64 lastCommandTime = 3;}// dumped thread snapshot message ThreadSnapshot { // profile task id  string taskId = 1; // dumped segment id  string traceSegmentId = 2; // dump timestamp  int64 time = 3; // snapshot dump sequence, start with zero  int32 sequence = 4; // snapshot stack  ThreadStack stack = 5;}message ThreadStack { // stack code signature list  repeated string codeSignatures = 1;}// profile task finished report message ProfileTaskFinishReport { // current sniffer information  string service = 1; string serviceInstance = 2; // profile task  string taskId = 3;}Out-process profiling Out-process profiling interacts with eBPF agent, which receives tasks and captures data, then reports it to the OAP for further analysis.\nProcess APIs Similar to Service Instance, all processes must be reported to the OAP storage segment prior to analysis.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.ebpf.profiling.process.v3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/ebpf/profiling/process/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the detected processes and report them. service EBPFProcessService { // Report discovered process in Rover  rpc reportProcesses (EBPFProcessReportList) returns (EBPFReportProcessDownstream) { } // Keep the process alive in the backend.  rpc keepAlive (EBPFProcessPingPkgList) returns (Commands) { }}message EBPFProcessReportList { repeated EBPFProcessProperties processes = 1; // An ID generated by eBPF agent, should be unique globally.  string ebpfAgentID = 2;}message EBPFProcessProperties { // The Process metadata  oneof metadata { EBPFHostProcessMetadata hostProcess = 1; EBPFKubernetesProcessMetadata k8sProcess = 2; }}message EBPFHostProcessMetadata { // [required] Entity metadata  // Must ensure that entity information is unique at the time of reporting  EBPFProcessEntityMetadata entity = 1; // [required] The Process id of the host  int32 pid = 2; // [optional] properties of the process  repeated KeyStringValuePair properties = 3;}// Process Entity metadata message EBPFProcessEntityMetadata { // [required] Process belong layer name which define in the backend  string layer = 1; // [required] Process belong service name  string serviceName = 2; // [required] Process belong service instance name  string instanceName = 3; // [required] Process name  string processName = 4; // Process labels for aggregate from service  repeated string labels = 5;}// Kubernetes process metadata message EBPFKubernetesProcessMetadata { // [required] Entity metadata  // Must ensure that entity information is unique at the time of reporting  EBPFProcessEntityMetadata entity = 1; // [required] The Process id of the host  int32 pid = 2; // [optional] properties of the process  repeated KeyStringValuePair properties = 3;}message EBPFReportProcessDownstream { repeated EBPFProcessDownstream processes = 1;}message EBPFProcessDownstream { // Generated process id  string processId = 1; // Locate the process by basic information  oneof process { EBPFHostProcessDownstream hostProcess = 2; EBPFKubernetesProcessDownstream k8sProcess = 3; }}message EBPFHostProcessDownstream { int32 pid = 1; EBPFProcessEntityMetadata entityMetadata = 2;}// Kubernetes process downstream message EBPFKubernetesProcessDownstream { int32 pid = 1; EBPFProcessEntityMetadata entityMetadata = 2;}message EBPFProcessPingPkgList { repeated EBPFProcessPingPkg processes = 1; // An ID generated by eBPF agent, should be unique globally.  string ebpfAgentID = 2;}message EBPFProcessPingPkg { // Process entity  EBPFProcessEntityMetadata entityMetadata = 1; // Minimize necessary properties  repeated KeyStringValuePair properties = 2;}Out-process profiling APIs syntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.ebpf.profiling.v3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/ebpf/profiling/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the Rover Process profiling task and upload profiling data. service EBPFProfilingService { // Query profiling (start or stop) tasks  rpc queryTasks (EBPFProfilingTaskQuery) returns (Commands) { } // collect profiling data  rpc collectProfilingData (stream EBPFProfilingData) returns (Commands) { }}message EBPFProfilingTaskQuery { // rover instance id  string roverInstanceId = 1; // latest task update time  int64 latestUpdateTime = 2;}message EBPFProfilingData { // task metadata  EBPFProfilingTaskMetadata task = 1; // profiling data  oneof profiling { EBPFOnCPUProfiling onCPU = 2; EBPFOffCPUProfiling offCPU = 3; }}message EBPFProfilingTaskMetadata { // profiling task id  string taskId = 1; // profiling process id  string processId = 2; // the start time of this profiling process  int64 profilingStartTime = 3; // report time  int64 currentTime = 4;}message EBPFProfilingStackMetadata { // stack type  EBPFProfilingStackType stackType = 1; // stack id from kernel provide  int32 stackId = 2; // stack symbols  repeated string stackSymbols = 3;}enum EBPFProfilingStackType { PROCESS_KERNEL_SPACE = 0; PROCESS_USER_SPACE = 1;}message EBPFOnCPUProfiling { // stack data in one task(thread)  repeated EBPFProfilingStackMetadata stacks = 1; // stack counts  int32 dumpCount = 2;}message EBPFOffCPUProfiling { // stack data in one task(thread)  repeated EBPFProfilingStackMetadata stacks = 1; // total count of the process is switched to off cpu by the scheduler.  int32 switchCount = 2; // where time(nanoseconds) is spent waiting while blocked on I/O, locks, timers, paging/swapping, etc.  int64 duration = 3;}","excerpt":"Profiling APIs SkyWalking offers two types of Profiling, in-process and out-process, each with its …","ref":"/docs/main/v9.6.0/en/api/profiling-protocol/","title":"Profiling APIs"},{"body":"Profiling APIs SkyWalking offers two types of Profiling, in-process and out-process, each with its own API.\nIn-process profiling APIs In-process profiling commonly interacts with auto-instrument agents. It gathers stack traces of programs and sends the data to the OAP for further analysis.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.language.profile.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/language/profile/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;service ProfileTask { // query all sniffer need to execute profile task commands  rpc getProfileTaskCommands (ProfileTaskCommandQuery) returns (Commands) { } // collect dumped thread snapshot  rpc collectSnapshot (stream ThreadSnapshot) returns (Commands) { } // report profiling task finished  rpc reportTaskFinish (ProfileTaskFinishReport) returns (Commands) { }}message ProfileTaskCommandQuery { // current sniffer information  string service = 1; string serviceInstance = 2; // last command timestamp  int64 lastCommandTime = 3;}// dumped thread snapshot message ThreadSnapshot { // profile task id  string taskId = 1; // dumped segment id  string traceSegmentId = 2; // dump timestamp  int64 time = 3; // snapshot dump sequence, start with zero  int32 sequence = 4; // snapshot stack  ThreadStack stack = 5;}message ThreadStack { // stack code signature list  repeated string codeSignatures = 1;}// profile task finished report message ProfileTaskFinishReport { // current sniffer information  string service = 1; string serviceInstance = 2; // profile task  string taskId = 3;}Out-process profiling Out-process profiling interacts with eBPF agent, which receives tasks and captures data, then reports it to the OAP for further analysis.\nProcess APIs Similar to Service Instance, all processes must be reported to the OAP storage segment prior to analysis.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.ebpf.profiling.process.v3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/ebpf/profiling/process/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the detected processes and report them. service EBPFProcessService { // Report discovered process in Rover  rpc reportProcesses (EBPFProcessReportList) returns (EBPFReportProcessDownstream) { } // Keep the process alive in the backend.  rpc keepAlive (EBPFProcessPingPkgList) returns (Commands) { }}message EBPFProcessReportList { repeated EBPFProcessProperties processes = 1; // An ID generated by eBPF agent, should be unique globally.  string ebpfAgentID = 2;}message EBPFProcessProperties { // The Process metadata  oneof metadata { EBPFHostProcessMetadata hostProcess = 1; EBPFKubernetesProcessMetadata k8sProcess = 2; }}message EBPFHostProcessMetadata { // [required] Entity metadata  // Must ensure that entity information is unique at the time of reporting  EBPFProcessEntityMetadata entity = 1; // [required] The Process id of the host  int32 pid = 2; // [optional] properties of the process  repeated KeyStringValuePair properties = 3;}// Process Entity metadata message EBPFProcessEntityMetadata { // [required] Process belong layer name which define in the backend  string layer = 1; // [required] Process belong service name  string serviceName = 2; // [required] Process belong service instance name  string instanceName = 3; // [required] Process name  string processName = 4; // Process labels for aggregate from service  repeated string labels = 5;}// Kubernetes process metadata message EBPFKubernetesProcessMetadata { // [required] Entity metadata  // Must ensure that entity information is unique at the time of reporting  EBPFProcessEntityMetadata entity = 1; // [required] The Process id of the host  int32 pid = 2; // [optional] properties of the process  repeated KeyStringValuePair properties = 3;}message EBPFReportProcessDownstream { repeated EBPFProcessDownstream processes = 1;}message EBPFProcessDownstream { // Generated process id  string processId = 1; // Locate the process by basic information  oneof process { EBPFHostProcessDownstream hostProcess = 2; EBPFKubernetesProcessDownstream k8sProcess = 3; }}message EBPFHostProcessDownstream { int32 pid = 1; EBPFProcessEntityMetadata entityMetadata = 2;}// Kubernetes process downstream message EBPFKubernetesProcessDownstream { int32 pid = 1; EBPFProcessEntityMetadata entityMetadata = 2;}message EBPFProcessPingPkgList { repeated EBPFProcessPingPkg processes = 1; // An ID generated by eBPF agent, should be unique globally.  string ebpfAgentID = 2;}message EBPFProcessPingPkg { // Process entity  EBPFProcessEntityMetadata entityMetadata = 1; // Minimize necessary properties  repeated KeyStringValuePair properties = 2;}Out-process profiling APIs syntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.ebpf.profiling.v3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/ebpf/profiling/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the Rover Process profiling task and upload profiling data. service EBPFProfilingService { // Query profiling (start or stop) tasks  rpc queryTasks (EBPFProfilingTaskQuery) returns (Commands) { } // collect profiling data  rpc collectProfilingData (stream EBPFProfilingData) returns (Commands) { }}message EBPFProfilingTaskQuery { // rover instance id  string roverInstanceId = 1; // latest task update time  int64 latestUpdateTime = 2;}message EBPFProfilingData { // task metadata  EBPFProfilingTaskMetadata task = 1; // profiling data  oneof profiling { EBPFOnCPUProfiling onCPU = 2; EBPFOffCPUProfiling offCPU = 3; }}message EBPFProfilingTaskMetadata { // profiling task id  string taskId = 1; // profiling process id  string processId = 2; // the start time of this profiling process  int64 profilingStartTime = 3; // report time  int64 currentTime = 4;}message EBPFProfilingStackMetadata { // stack type  EBPFProfilingStackType stackType = 1; // stack id from kernel provide  int32 stackId = 2; // stack symbols  repeated string stackSymbols = 3;}enum EBPFProfilingStackType { PROCESS_KERNEL_SPACE = 0; PROCESS_USER_SPACE = 1;}message EBPFOnCPUProfiling { // stack data in one task(thread)  repeated EBPFProfilingStackMetadata stacks = 1; // stack counts  int32 dumpCount = 2;}message EBPFOffCPUProfiling { // stack data in one task(thread)  repeated EBPFProfilingStackMetadata stacks = 1; // total count of the process is switched to off cpu by the scheduler.  int32 switchCount = 2; // where time(nanoseconds) is spent waiting while blocked on I/O, locks, timers, paging/swapping, etc.  int64 duration = 3;}","excerpt":"Profiling APIs SkyWalking offers two types of Profiling, in-process and out-process, each with its …","ref":"/docs/main/v9.7.0/en/api/profiling-protocol/","title":"Profiling APIs"},{"body":"Project Structure  agent: The agent core files copied when hybrid compilation. bin: The binary files of Go agent program. docs: The documentation of Go agent. log: The log configuration for adapt the Golang agent. plugins: The plugins for adapt the frameworks.  core: Agent core and API for the SkyWalking Agent, the plugins should import this module. xxx: The plugins for adapt the framework.   reporter: The reporter for adapt the SkyWalking backend. tools/go-agent: The Golang Agent enhancement program.  cmd: The agent starter. config: The application register configuration for agent. instrument: Perform enhancement on different packages during hybrid compilation.  agentcore: When compiling SkyWalking Go, enhance its code, mainly for Agent Core file copying. api: The API of the instrument. entry: When compiling the main package, enhance its code, mainly focusing on starting the Agent system. plugins: When detecting a framework that requires enhancement, enhance its. For specific operation details, please refer to the Key Principle document. reporter: When compiling the reporter package under agent, enhance its code, mainly focusing on starting the reporter. runtime: When compiling the runtime package, enhance its code. For specific operation details, please refer to the Key Principle document.   tools: helps to build the agent.    ","excerpt":"Project Structure  agent: The agent core files copied when hybrid compilation. bin: The binary files …","ref":"/docs/skywalking-go/latest/en/concepts-and-designs/project-structure/","title":"Project Structure"},{"body":"Project Structure  agent: The agent core files copied when hybrid compilation. bin: The binary files of Go agent program. docs: The documentation of Go agent. log: The log configuration for adapt the Golang agent. plugins: The plugins for adapt the frameworks.  core: Agent core and API for the SkyWalking Agent, the plugins should import this module. xxx: The plugins for adapt the framework.   reporter: The reporter for adapt the SkyWalking backend. tools/go-agent: The Golang Agent enhancement program.  cmd: The agent starter. config: The application register configuration for agent. instrument: Perform enhancement on different packages during hybrid compilation.  agentcore: When compiling SkyWalking Go, enhance its code, mainly for Agent Core file copying. api: The API of the instrument. entry: When compiling the main package, enhance its code, mainly focusing on starting the Agent system. plugins: When detecting a framework that requires enhancement, enhance its. For specific operation details, please refer to the Key Principle document. reporter: When compiling the reporter package under agent, enhance its code, mainly focusing on starting the reporter. runtime: When compiling the runtime package, enhance its code. For specific operation details, please refer to the Key Principle document.   tools: helps to build the agent.    ","excerpt":"Project Structure  agent: The agent core files copied when hybrid compilation. bin: The binary files …","ref":"/docs/skywalking-go/next/en/concepts-and-designs/project-structure/","title":"Project Structure"},{"body":"Project Structure  agent: The agent core files copied when hybrid compilation. bin: The binary files of Go agent program. docs: The documentation of Go agent. log: The log configuration for adapt the Golang agent. plugins: The plugins for adapt the frameworks.  core: Agent core and API for the SkyWalking Agent, the plugins should import this module. xxx: The plugins for adapt the framework.   reporter: The reporter for adapt the SkyWalking backend. tools/go-agent: The Golang Agent enhancement program.  cmd: The agent starter. config: The application register configuration for agent. instrument: Perform enhancement on different packages during hybrid compilation.  agentcore: When compiling SkyWalking Go, enhance its code, mainly for Agent Core file copying. api: The API of the instrument. entry: When compiling the main package, enhance its code, mainly focusing on starting the Agent system. plugins: When detecting a framework that requires enhancement, enhance its. For specific operation details, please refer to the Key Principle document. reporter: When compiling the reporter package under agent, enhance its code, mainly focusing on starting the reporter. runtime: When compiling the runtime package, enhance its code. For specific operation details, please refer to the Key Principle document.   tools: helps to build the agent.    ","excerpt":"Project Structure  agent: The agent core files copied when hybrid compilation. bin: The binary files …","ref":"/docs/skywalking-go/v0.4.0/en/concepts-and-designs/project-structure/","title":"Project Structure"},{"body":"Project Structure  cmd: The starter of Satellite. configs: Satellite configs. internal: Core, API, and common utils.  internal/pkg: Sharing with Core and Plugins, such as api and utils. internal/satellite: The core of Satellite.   plugins: Contains all plugins.  plugins/{type}: Contains the plugins of this {type}. Satellite has 9 plugin types. plugins/{type}/api: Contains the plugin definition and initializer. plugins/{type}/{plugin-name}: Contains the specific plugin. init.go: Register the plugins to the plugin registry.    . ├── CHANGES.md ├── cmd ├── configs ├── docs ├── go.sum ├── internal │ ├── pkg │ └── satellite ├── plugins │ ├── client │ ├── fallbacker │ ├── fetcher │ ├── filter │ ├── forwarder │ ├── init.go │ ├── parser │ ├── queue │ ├── receiver │ └── server ","excerpt":"Project Structure  cmd: The starter of Satellite. configs: Satellite configs. internal: Core, API, …","ref":"/docs/skywalking-satellite/latest/en/concepts-and-designs/project_structue/","title":"Project Structure"},{"body":"Project Structure  cmd: The starter of Satellite. configs: Satellite configs. internal: Core, API, and common utils.  internal/pkg: Sharing with Core and Plugins, such as api and utils. internal/satellite: The core of Satellite.   plugins: Contains all plugins.  plugins/{type}: Contains the plugins of this {type}. Satellite has 9 plugin types. plugins/{type}/api: Contains the plugin definition and initializer. plugins/{type}/{plugin-name}: Contains the specific plugin. init.go: Register the plugins to the plugin registry.    . ├── CHANGES.md ├── cmd ├── configs ├── docs ├── go.sum ├── internal │ ├── pkg │ └── satellite ├── plugins │ ├── client │ ├── fallbacker │ ├── fetcher │ ├── filter │ ├── forwarder │ ├── init.go │ ├── parser │ ├── queue │ ├── receiver │ └── server ","excerpt":"Project Structure  cmd: The starter of Satellite. configs: Satellite configs. internal: Core, API, …","ref":"/docs/skywalking-satellite/next/en/concepts-and-designs/project_structue/","title":"Project Structure"},{"body":"Project Structure  cmd: The starter of Satellite. configs: Satellite configs. internal: Core, API, and common utils.  internal/pkg: Sharing with Core and Plugins, such as api and utils. internal/satellite: The core of Satellite.   plugins: Contains all plugins.  plugins/{type}: Contains the plugins of this {type}. Satellite has 9 plugin types. plugins/{type}/api: Contains the plugin definition and initializer. plugins/{type}/{plugin-name}: Contains the specific plugin. init.go: Register the plugins to the plugin registry.    . ├── CHANGES.md ├── cmd ├── configs ├── docs ├── go.sum ├── internal │ ├── pkg │ └── satellite ├── plugins │ ├── client │ ├── fallbacker │ ├── fetcher │ ├── filter │ ├── forwarder │ ├── init.go │ ├── parser │ ├── queue │ ├── receiver │ └── server ","excerpt":"Project Structure  cmd: The starter of Satellite. configs: Satellite configs. internal: Core, API, …","ref":"/docs/skywalking-satellite/v1.2.0/en/concepts-and-designs/project_structue/","title":"Project Structure"},{"body":"Prometheus Fetcher Prometheus fetcher reads metrics from Prometheus endpoint, and transfer the metrics into SkyWalking native format for the MAL engine.\nConfiguration file Prometheus fetcher is configured via a configuration file. The configuration file defines everything related to fetching services and their instances, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP fails to start up. The files are located at $CLASSPATH/fetcher-prom-rules.\nThe file is written in YAML format, defined by the scheme described below. Brackets indicate that a parameter is optional.\nA full example can be found here\nGeneric placeholders are defined as follows:\n \u0026lt;duration\u0026gt;: This is parsed into a textual representation of a duration. The formats accepted are based on the ISO-8601 duration format PnDTnHnMn.nS with days considered to be exactly 24 hours. \u0026lt;labelname\u0026gt;: A string matching the regular expression [a-zA-Z_][a-zA-Z0-9_]*. \u0026lt;labelvalue\u0026gt;: A string of unicode characters. \u0026lt;host\u0026gt;: A valid string consisting of a hostname or IP followed by an optional port number. \u0026lt;path\u0026gt;: A valid URL path. \u0026lt;string\u0026gt;: A regular string.  # How frequently to fetch targets.fetcherInterval:\u0026lt;duration\u0026gt;# Per-fetch timeout when fetching this target.fetcherTimeout:\u0026lt;duration\u0026gt;# The HTTP resource path on which to fetch metrics from targets.metricsPath:\u0026lt;path\u0026gt;#Statically configured targets.staticConfig:# The targets specified by the static config.targets:[- \u0026lt;target\u0026gt; ]# Labels assigned to all metrics fetched from the targets.labels:[ \u0026lt;labelname\u0026gt;:\u0026lt;labelvalue\u0026gt; ... ]# filter the metrics, only those metrics that satisfy this condition will be passed into the `metricsRules` below.filter: \u0026lt;closure\u0026gt; # example:\u0026#39;{ tags -\u0026gt; tags.job_name == \u0026#34;vm-monitoring\u0026#34; }\u0026#39;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# Metrics rule allow you to recompute queries.metricsRules:[- \u0026lt;metric_rules\u0026gt; ] # The url of target exporter. the format should be complied with \u0026#34;java.net.URI\u0026#34;url:\u0026lt;string\u0026gt;# The path of root CA file.sslCaFilePath:\u0026lt;string\u0026gt;\u0026lt;metric_rules\u0026gt; # The name of rule, which combinates with a prefix \u0026#39;meter_\u0026#39; as the index/table name in storage.name:\u0026lt;string\u0026gt;# MAL expression.exp:\u0026lt;string\u0026gt;To know more about MAL, please refer to mal.md\nActive Fetcher Rules Suppose you want to enable some metric-custom.yaml files stored at fetcher-prom-rules, append its name to enabledRules of prometheus-fetcher as follows:\nprometheus-fetcher:selector:${SW_PROMETHEUS_FETCHER:default}default:enabledRules:${SW_PROMETHEUS_FETCHER_ENABLED_RULES:\u0026#34;self,metric-custom\u0026#34;}","excerpt":"Prometheus Fetcher Prometheus fetcher reads metrics from Prometheus endpoint, and transfer the …","ref":"/docs/main/v9.0.0/en/setup/backend/prometheus-metrics/","title":"Prometheus Fetcher"},{"body":"Prometheus Fetcher Prometheus fetcher reads metrics from the Prometheus endpoint and transfers the metrics into SkyWalking native format for the MAL engine.\nConfiguration file Prometheus fetcher is configured via a configuration file. The configuration file defines everything related to fetching services and their instances, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP fails to start up. The files are located at $CLASSPATH/fetcher-prom-rules.\nThe file is written in YAML format, defined by the scheme described below. Brackets indicate that a parameter is optional.\nA full example can be found here\nGeneric placeholders are defined as follows:\n \u0026lt;duration\u0026gt;: This is parsed into a textual representation of a duration. The accepted formats are based on the ISO-8601 duration format PnDTnHnMn.nS with days of exactly 24 hours. \u0026lt;labelname\u0026gt;: A string matching the regular expression [a-zA-Z_][a-zA-Z0-9_]*. \u0026lt;labelvalue\u0026gt;: A string of Unicode characters. \u0026lt;host\u0026gt;: A valid string consisting of a hostname or IP followed by an optional port number. \u0026lt;path\u0026gt;: A valid URL path. \u0026lt;string\u0026gt;: A regular string.  # How frequently to fetch targets.fetcherInterval:\u0026lt;duration\u0026gt;# Per-fetch timeout when fetching this target.fetcherTimeout:\u0026lt;duration\u0026gt;# The HTTP resource path on which to fetch metrics from targets.metricsPath:\u0026lt;path\u0026gt;#Statically configured targets.staticConfig:# The targets specified by the static config.targets:[- \u0026lt;target\u0026gt; ]# Labels assigned to all metrics fetched from the targets.labels:[ \u0026lt;labelname\u0026gt;:\u0026lt;labelvalue\u0026gt; ... ]# filter the metrics, only those metrics that satisfy this condition will be passed into the `metricsRules` below.filter: \u0026lt;closure\u0026gt; # example:\u0026#39;{ tags -\u0026gt; tags.job_name == \u0026#34;vm-monitoring\u0026#34; }\u0026#39;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# Metrics rule allow you to recompute queries.metricsRules:[- \u0026lt;metric_rules\u0026gt; ] # The url of target exporter. the format should be complied with \u0026#34;java.net.URI\u0026#34;url:\u0026lt;string\u0026gt;# The path of root CA file.sslCaFilePath:\u0026lt;string\u0026gt;\u0026lt;metric_rules\u0026gt; # The name of rule, which combinates with a prefix \u0026#39;meter_\u0026#39; as the index/table name in storage.name:\u0026lt;string\u0026gt;# MAL expression.exp:\u0026lt;string\u0026gt;To know more about MAL, please refer to mal.md\nActive Fetcher Rules Suppose you want to enable some metric-custom.yaml files stored at fetcher-prom-rules, append its name to enabledRules of prometheus-fetcher as follows:\nprometheus-fetcher:selector:${SW_PROMETHEUS_FETCHER:default}default:enabledRules:${SW_PROMETHEUS_FETCHER_ENABLED_RULES:\u0026#34;self,metric-custom\u0026#34;}","excerpt":"Prometheus Fetcher Prometheus fetcher reads metrics from the Prometheus endpoint and transfers the …","ref":"/docs/main/v9.1.0/en/setup/backend/prometheus-metrics/","title":"Prometheus Fetcher"},{"body":"Prometheus Fetcher Prometheus fetcher reads metrics from the Prometheus endpoint and transfers the metrics into SkyWalking native format for the MAL engine.\nConfiguration file Prometheus fetcher is configured via a configuration file. The configuration file defines everything related to fetching services and their instances, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP fails to start up. The files are located at $CLASSPATH/fetcher-prom-rules.\nThe file is written in YAML format, defined by the scheme described below. Brackets indicate that a parameter is optional.\nA full example can be found here\nGeneric placeholders are defined as follows:\n \u0026lt;duration\u0026gt;: This is parsed into a textual representation of a duration. The accepted formats are based on the ISO-8601 duration format PnDTnHnMn.nS with days of exactly 24 hours. \u0026lt;labelname\u0026gt;: A string matching the regular expression [a-zA-Z_][a-zA-Z0-9_]*. \u0026lt;labelvalue\u0026gt;: A string of Unicode characters. \u0026lt;host\u0026gt;: A valid string consisting of a hostname or IP followed by an optional port number. \u0026lt;path\u0026gt;: A valid URL path. \u0026lt;string\u0026gt;: A regular string.  # How frequently to fetch targets.fetcherInterval:\u0026lt;duration\u0026gt;# Per-fetch timeout when fetching this target.fetcherTimeout:\u0026lt;duration\u0026gt;# The HTTP resource path on which to fetch metrics from targets.metricsPath:\u0026lt;path\u0026gt;#Statically configured targets.staticConfig:# The targets specified by the static config.targets:[- \u0026lt;target\u0026gt; ]# Labels assigned to all metrics fetched from the targets.labels:[ \u0026lt;labelname\u0026gt;:\u0026lt;labelvalue\u0026gt; ... ]# initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# filter the metrics, only those metrics that satisfy this condition will be passed into the `metricsRules` below.filter: \u0026lt;closure\u0026gt; # example:\u0026#39;{ tags -\u0026gt; tags.job_name == \u0026#34;vm-monitoring\u0026#34; }\u0026#39;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# Metrics rule allow you to recompute queries.metricsRules:[- \u0026lt;metric_rules\u0026gt; ] # The url of target exporter. the format should be complied with \u0026#34;java.net.URI\u0026#34;url:\u0026lt;string\u0026gt;# The path of root CA file.sslCaFilePath:\u0026lt;string\u0026gt;\u0026lt;metric_rules\u0026gt; # The name of rule, which combinates with a prefix \u0026#39;meter_\u0026#39; as the index/table name in storage.name:\u0026lt;string\u0026gt;# MAL expression.exp:\u0026lt;string\u0026gt;To know more about MAL, please refer to mal.md\nActive Fetcher Rules Suppose you want to enable some metric-custom.yaml files stored at fetcher-prom-rules, append its name to enabledRules of prometheus-fetcher as follows:\nprometheus-fetcher:selector:${SW_PROMETHEUS_FETCHER:default}default:enabledRules:${SW_PROMETHEUS_FETCHER_ENABLED_RULES:\u0026#34;self,metric-custom\u0026#34;}","excerpt":"Prometheus Fetcher Prometheus fetcher reads metrics from the Prometheus endpoint and transfers the …","ref":"/docs/main/v9.2.0/en/setup/backend/prometheus-metrics/","title":"Prometheus Fetcher"},{"body":"PromQL Service PromQL(Prometheus Query Language) Service exposes Prometheus Querying HTTP APIs including the bundled PromQL expression system. Third-party systems or visualization platforms that already support PromQL (such as Grafana), could obtain metrics through PromQL Service.\nAs SkyWalking and Prometheus have fundamental differences in metrics classification, format, storage, etc. The PromQL Service supported will be a subset of the complete PromQL.\nDetails Of Supported Protocol The following doc describes the details of the supported protocol and compared it to the PromQL official documentation. If not mentioned, it will not be supported by default.\nTime series Selectors Instant Vector Selectors For example: select metric service_cpm which the service is $service and the layer is $layer.\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} Note: The label matching operators only support = instead of regular expressions.\nRange Vector Selectors For example: select metric service_cpm which the service is $service and the layer is $layer within the last 5 minutes.\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;}[5m] Time Durations    Unit Definition Support     ms milliseconds yes   s seconds yes   m minutes yes   h hours yes   d days yes   w weeks yes   y years no    Binary operators Arithmetic binary operators    Operator Definition Support     + addition yes   - subtraction yes   * multiplication yes   / division yes   % modulo yes   ^ power/exponentiation no    Between two scalars For example:\n1 + 2 Between an instant vector and a scalar For example:\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} / 100 Between two instant vectors For example:\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} + service_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} Note: The operations between vectors require the same metric and labels, and don\u0026rsquo;t support Vector matching.\nComparison binary operators    Operator Definition Support     == equal yes   != not-equal yes   \u0026gt; greater-than yes   \u0026lt; less-than yes   \u0026gt;= greater-or-equal yes   \u0026lt;= less-or-equal) yes    Between two scalars For example:\n1 \u0026gt; bool 2 Between an instant vector and a scalar For example:\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} \u0026gt; 1 Between two instant vectors For example:\nservice_cpm{service=\u0026#39;service_A\u0026#39;, layer=\u0026#39;$layer\u0026#39;} \u0026gt; service_cpm{service=\u0026#39;service_B\u0026#39;, layer=\u0026#39;$layer\u0026#39;} HTTP API Expression queries Instant queries GET|POST /api/v1/query    Parameter Definition Support Optional     query prometheus expression yes no   time The latest metrics value from current time to this time is returned. If time is empty, the default look-back time is 2 minutes. yes yes   timeout evaluation timeout no ignore    For example:\n/api/v1/query?query=service_cpm{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;} Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677548400, \u0026#34;6\u0026#34; ] } ] } } Range queries GET|POST /api/v1/query_range    Parameter Definition Support Optional     query prometheus expression yes no   start start timestamp, seconds yes no   end end timestamp, seconds yes no   step SkyWalking will automatically fit Step(DAY, HOUR, MINUTE) through start and end. no ignore   timeout evaluation timeout no ignore    For example:\n/api/v1/query_range?query=service_cpm{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;}\u0026amp;start=1677479336\u0026amp;end=1677479636 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;matrix\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;values\u0026#34;: [ [ 1677479280, \u0026#34;18\u0026#34; ], [ 1677479340, \u0026#34;18\u0026#34; ], [ 1677479400, \u0026#34;18\u0026#34; ], [ 1677479460, \u0026#34;18\u0026#34; ], [ 1677479520, \u0026#34;18\u0026#34; ], [ 1677479580, \u0026#34;18\u0026#34; ] ] } ] } } Querying metadata Finding series by label matchers GET|POST /api/v1/series    Parameter Definition Support Optional     match[] series selector yes no   start start timestamp, seconds yes no   end end timestamp, seconds yes no    For example:\n/api/v1/series?match[]=service_traffic{layer=\u0026#39;GENERAL\u0026#39;}\u0026amp;start=1677479336\u0026amp;end=1677479636 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::recommendation\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::app\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::gateway\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::frontend\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; } ] } Note: SkyWalking\u0026rsquo;s metadata exists in the following metrics(traffics):\n service_traffic instance_traffic endpoint_traffic  Getting label names GET|POST /api/v1/labels    Parameter Definition Support Optional     match[] series selector yes yes   start start timestamp no yes   end end timestamp no yes    For example:\n/api/v1/labels?match[]=instance_jvm_cpu\u0026#39; Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;layer\u0026#34;, \u0026#34;service\u0026#34;, \u0026#34;top_n\u0026#34;, \u0026#34;order\u0026#34;, \u0026#34;service_instance\u0026#34;, \u0026#34;parent_service\u0026#34; ] } Querying label values GET /api/v1/label/\u0026lt;label_name\u0026gt;/values    Parameter Definition Support Optional     match[] series selector yes no   start start timestamp no yes   end end timestamp no yes    For example:\n/api/v1/label/__name__/values Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;meter_mysql_instance_qps\u0026#34;, \u0026#34;service_cpm\u0026#34;, \u0026#34;envoy_cluster_up_rq_active\u0026#34;, \u0026#34;instance_jvm_class_loaded_class_count\u0026#34;, \u0026#34;k8s_cluster_memory_requests\u0026#34;, \u0026#34;meter_vm_memory_used\u0026#34;, \u0026#34;meter_apisix_sv_bandwidth_unmatched\u0026#34;, \u0026#34;meter_vm_memory_total\u0026#34;, \u0026#34;instance_jvm_thread_live_count\u0026#34;, \u0026#34;instance_jvm_thread_timed_waiting_state_thread_count\u0026#34;, \u0026#34;browser_app_page_first_pack_percentile\u0026#34;, \u0026#34;instance_clr_max_worker_threads\u0026#34;, ... ] } Querying metric metadata GET /api/v1/metadata    Parameter Definition Support Optional     limit maximum number of metrics to return yes yes   metric metric name, support regular expression yes yes    For example:\n/api/v1/metadata?limit=10 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;meter_mysql_instance_qps\u0026#34;: [ { \u0026#34;type\u0026#34;: \u0026#34;gauge\u0026#34;, \u0026#34;help\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;unit\u0026#34;: \u0026#34;\u0026#34; } ], \u0026#34;meter_apisix_sv_bandwidth_unmatched\u0026#34;: [ { \u0026#34;type\u0026#34;: \u0026#34;gauge\u0026#34;, \u0026#34;help\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;unit\u0026#34;: \u0026#34;\u0026#34; } ], \u0026#34;service_cpm\u0026#34;: [ { \u0026#34;type\u0026#34;: \u0026#34;gauge\u0026#34;, \u0026#34;help\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;unit\u0026#34;: \u0026#34;\u0026#34; } ], ... } } Metrics Type For Query Supported Metrics Scope(Catalog) Not all scopes are supported for now, please check the following table:\n   Scope Support     Service yes   ServiceInstance yes   Endpoint yes   ServiceRelation no   ServiceInstanceRelation no   Process no   ProcessRelation no    General labels Each metric contains general labels: layer. Different metrics will have different labels depending on their Scope and metric value type.\n   Query Labels Scope Expression Example     layer, service Service service_cpm{service='$service', layer='$layer'}   layer, service, service_instance ServiceInstance service_instance_cpm{service='$service', service_instance='$service_instance', layer='$layer'}   layer, service, endpoint Endpoint endpoint_cpm{service='$service', endpoint='$endpoint', layer='$layer'}    Common Value Metrics  Query Labels:  {General labels}  Expression Example:  service_cpm{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677490740, \u0026#34;3\u0026#34; ] } ] } } Labeled Value Metrics  Query Labels:  --{General labels} --labels: Used to filter the value labels to be returned --relabels: Used to rename the returned value labels note: The number and order of labels must match the number and order of relabels.  Expression Example:  service_percentile{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;, labels=\u0026#39;0,1,2\u0026#39;, relabels=\u0026#39;P50,P75,P90\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_percentile\u0026#34;, \u0026#34;label\u0026#34;: \u0026#34;P50\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677493380, \u0026#34;0\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_percentile\u0026#34;, \u0026#34;label\u0026#34;: \u0026#34;P75\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677493380, \u0026#34;0\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_percentile\u0026#34;, \u0026#34;label\u0026#34;: \u0026#34;P90\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677493380, \u0026#34;0\u0026#34; ] } ] } } Sort Metrics  Query Labels:  --parent_service: \u0026lt;optional\u0026gt; Name of the parent service. --top_n: The max number of the selected metric value --order: ASC/DES  Expression Example:  service_instance_cpm{parent_service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;, top_n=\u0026#39;10\u0026#39;, order=\u0026#39;DES\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_instance_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;ServiceInstance\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;651db53c0e3843d8b9c4c53a90b4992a@10.4.0.28\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677494280, \u0026#34;14\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_instance_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;ServiceInstance\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;4c04cf44d6bd408880556aa3c2cfb620@10.4.0.232\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677494280, \u0026#34;6\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_instance_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;ServiceInstance\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;f5ac8ead31af4e6795cae761729a2742@10.4.0.236\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677494280, \u0026#34;5\u0026#34; ] } ] } } Sampled Records  Query Labels:  --parent_service: Name of the parent service --top_n: The max number of the selected records value --order: ASC/DES  Expression Example:  top_n_database_statement{parent_service=\u0026#39;localhost:-1\u0026#39;, layer=\u0026#39;VIRTUAL_DATABASE\u0026#39;, top_n=\u0026#39;10\u0026#39;, order=\u0026#39;DES\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;top_n_database_statement\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;VIRTUAL_DATABASE\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;record\u0026#34;: \u0026#34;select song0_.id as id1_0_, song0_.artist as artist2_0_, song0_.genre as genre3_0_, song0_.liked as liked4_0_, song0_.name as name5_0_ from song song0_ where song0_.liked\u0026gt;?\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677501360, \u0026#34;1\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;top_n_database_statement\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;VIRTUAL_DATABASE\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;record\u0026#34;: \u0026#34;select song0_.id as id1_0_, song0_.artist as artist2_0_, song0_.genre as genre3_0_, song0_.liked as liked4_0_, song0_.name as name5_0_ from song song0_ where song0_.liked\u0026gt;?\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677501360, \u0026#34;1\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;top_n_database_statement\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;VIRTUAL_DATABASE\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;record\u0026#34;: \u0026#34;select song0_.id as id1_0_, song0_.artist as artist2_0_, song0_.genre as genre3_0_, song0_.liked as liked4_0_, song0_.name as name5_0_ from song song0_ where song0_.liked\u0026gt;?\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677501360, \u0026#34;1\u0026#34; ] } ] } } ","excerpt":"PromQL Service PromQL(Prometheus Query Language) Service exposes Prometheus Querying HTTP APIs …","ref":"/docs/main/latest/en/api/promql-service/","title":"PromQL Service"},{"body":"PromQL Service PromQL(Prometheus Query Language) Service exposes Prometheus Querying HTTP APIs including the bundled PromQL expression system. Third-party systems or visualization platforms that already support PromQL (such as Grafana), could obtain metrics through PromQL Service.\nAs SkyWalking and Prometheus have fundamental differences in metrics classification, format, storage, etc. The PromQL Service supported will be a subset of the complete PromQL.\nDetails Of Supported Protocol The following doc describes the details of the supported protocol and compared it to the PromQL official documentation. If not mentioned, it will not be supported by default.\nTime series Selectors Instant Vector Selectors For example: select metric service_cpm which the service is $service and the layer is $layer.\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} Note: The label matching operators only support = instead of regular expressions.\nRange Vector Selectors For example: select metric service_cpm which the service is $service and the layer is $layer within the last 5 minutes.\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;}[5m] Time Durations    Unit Definition Support     ms milliseconds yes   s seconds yes   m minutes yes   h hours yes   d days yes   w weeks yes   y years no    Binary operators Arithmetic binary operators    Operator Definition Support     + addition yes   - subtraction yes   * multiplication yes   / division yes   % modulo yes   ^ power/exponentiation no    Between two scalars For example:\n1 + 2 Between an instant vector and a scalar For example:\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} / 100 Between two instant vectors For example:\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} + service_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} Note: The operations between vectors require the same metric and labels, and don\u0026rsquo;t support Vector matching.\nComparison binary operators    Operator Definition Support     == equal yes   != not-equal yes   \u0026gt; greater-than yes   \u0026lt; less-than yes   \u0026gt;= greater-or-equal yes   \u0026lt;= less-or-equal) yes    Between two scalars For example:\n1 \u0026gt; bool 2 Between an instant vector and a scalar For example:\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} \u0026gt; 1 Between two instant vectors For example:\nservice_cpm{service=\u0026#39;service_A\u0026#39;, layer=\u0026#39;$layer\u0026#39;} \u0026gt; service_cpm{service=\u0026#39;service_B\u0026#39;, layer=\u0026#39;$layer\u0026#39;} HTTP API Expression queries Instant queries GET|POST /api/v1/query    Parameter Definition Support Optional     query prometheus expression yes no   time The latest metrics value from current time to this time is returned. If time is empty, the default look-back time is 2 minutes. yes yes   timeout evaluation timeout no ignore    For example:\n/api/v1/query?query=service_cpm{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;} Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677548400, \u0026#34;6\u0026#34; ] } ] } } Range queries GET|POST /api/v1/query_range    Parameter Definition Support Optional     query prometheus expression yes no   start start timestamp, seconds yes no   end end timestamp, seconds yes no   step SkyWalking will automatically fit Step(DAY, HOUR, MINUTE) through start and end. no ignore   timeout evaluation timeout no ignore    For example:\n/api/v1/query_range?query=service_cpm{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;}\u0026amp;start=1677479336\u0026amp;end=1677479636 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;matrix\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;values\u0026#34;: [ [ 1677479280, \u0026#34;18\u0026#34; ], [ 1677479340, \u0026#34;18\u0026#34; ], [ 1677479400, \u0026#34;18\u0026#34; ], [ 1677479460, \u0026#34;18\u0026#34; ], [ 1677479520, \u0026#34;18\u0026#34; ], [ 1677479580, \u0026#34;18\u0026#34; ] ] } ] } } Querying metadata Finding series by label matchers GET|POST /api/v1/series    Parameter Definition Support Optional     match[] series selector yes no   start start timestamp, seconds yes no   end end timestamp, seconds yes no    For example:\n/api/v1/series?match[]=service_traffic{layer=\u0026#39;GENERAL\u0026#39;}\u0026amp;start=1677479336\u0026amp;end=1677479636 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::recommendation\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::app\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::gateway\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::frontend\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; } ] } Note: SkyWalking\u0026rsquo;s metadata exists in the following metrics(traffics):\n service_traffic instance_traffic endpoint_traffic  Getting label names GET|POST /api/v1/labels    Parameter Definition Support Optional     match[] series selector yes yes   start start timestamp no yes   end end timestamp, if end time is not present, use current time as default end time yes yes    For example:\n/api/v1/labels?match[]=instance_jvm_cpu\u0026#39; Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;layer\u0026#34;, \u0026#34;service\u0026#34;, \u0026#34;top_n\u0026#34;, \u0026#34;order\u0026#34;, \u0026#34;service_instance\u0026#34;, \u0026#34;parent_service\u0026#34; ] } Querying label values GET /api/v1/label/\u0026lt;label_name\u0026gt;/values    Parameter Definition Support Optional     match[] series selector yes yes   start start timestamp no yes   end end timestamp, if end time is not present, use current time as default end time yes yes    For example:\n/api/v1/label/__name__/values Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;meter_mysql_instance_qps\u0026#34;, \u0026#34;service_cpm\u0026#34;, \u0026#34;envoy_cluster_up_rq_active\u0026#34;, \u0026#34;instance_jvm_class_loaded_class_count\u0026#34;, \u0026#34;k8s_cluster_memory_requests\u0026#34;, \u0026#34;meter_vm_memory_used\u0026#34;, \u0026#34;meter_apisix_sv_bandwidth_unmatched\u0026#34;, \u0026#34;meter_vm_memory_total\u0026#34;, \u0026#34;instance_jvm_thread_live_count\u0026#34;, \u0026#34;instance_jvm_thread_timed_waiting_state_thread_count\u0026#34;, \u0026#34;browser_app_page_first_pack_percentile\u0026#34;, \u0026#34;instance_clr_max_worker_threads\u0026#34;, ... ] } Querying metric metadata GET /api/v1/metadata    Parameter Definition Support Optional     limit maximum number of metrics to return yes yes   metric metric name, support regular expression yes yes    For example:\n/api/v1/metadata?limit=10 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;meter_mysql_instance_qps\u0026#34;: [ { \u0026#34;type\u0026#34;: \u0026#34;gauge\u0026#34;, \u0026#34;help\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;unit\u0026#34;: \u0026#34;\u0026#34; } ], \u0026#34;meter_apisix_sv_bandwidth_unmatched\u0026#34;: [ { \u0026#34;type\u0026#34;: \u0026#34;gauge\u0026#34;, \u0026#34;help\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;unit\u0026#34;: \u0026#34;\u0026#34; } ], \u0026#34;service_cpm\u0026#34;: [ { \u0026#34;type\u0026#34;: \u0026#34;gauge\u0026#34;, \u0026#34;help\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;unit\u0026#34;: \u0026#34;\u0026#34; } ], ... } } Metrics Type For Query Supported Metrics Scope(Catalog) Not all scopes are supported for now, please check the following table:\n   Scope Support     Service yes   ServiceInstance yes   Endpoint yes   ServiceRelation no   ServiceInstanceRelation no   Process no   ProcessRelation no    General labels Each metric contains general labels: layer. Different metrics will have different labels depending on their Scope and metric value type.\n   Query Labels Scope Expression Example     layer, service Service service_cpm{service='$service', layer='$layer'}   layer, service, service_instance ServiceInstance service_instance_cpm{service='$service', service_instance='$service_instance', layer='$layer'}   layer, service, endpoint Endpoint endpoint_cpm{service='$service', endpoint='$endpoint', layer='$layer'}    Common Value Metrics  Query Labels:  {General labels}  Expression Example:  service_cpm{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677490740, \u0026#34;3\u0026#34; ] } ] } } Labeled Value Metrics  Query Labels:  --{General labels} --metric labels: Used to filter the value labels to be returned  Expression Example:  service_percentile{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;, p=\u0026#39;50,75,90\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_percentile\u0026#34;, \u0026#34;p\u0026#34;: \u0026#34;50\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677493380, \u0026#34;0\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_percentile\u0026#34;, \u0026#34;p\u0026#34;: \u0026#34;75\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677493380, \u0026#34;0\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_percentile\u0026#34;, \u0026#34;p\u0026#34;: \u0026#34;90\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677493380, \u0026#34;0\u0026#34; ] } ] } } Sort Metrics  Query Labels:  --parent_service: \u0026lt;optional\u0026gt; Name of the parent service. --top_n: The max number of the selected metric value --order: ASC/DES  Expression Example:  service_instance_cpm{parent_service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;, top_n=\u0026#39;10\u0026#39;, order=\u0026#39;DES\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_instance_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;ServiceInstance\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;651db53c0e3843d8b9c4c53a90b4992a@10.4.0.28\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677494280, \u0026#34;14\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_instance_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;ServiceInstance\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;4c04cf44d6bd408880556aa3c2cfb620@10.4.0.232\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677494280, \u0026#34;6\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_instance_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;ServiceInstance\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;f5ac8ead31af4e6795cae761729a2742@10.4.0.236\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677494280, \u0026#34;5\u0026#34; ] } ] } } Sampled Records  Query Labels:  --parent_service: Name of the parent service --top_n: The max number of the selected records value --order: ASC/DES  Expression Example:  top_n_database_statement{parent_service=\u0026#39;localhost:-1\u0026#39;, layer=\u0026#39;VIRTUAL_DATABASE\u0026#39;, top_n=\u0026#39;10\u0026#39;, order=\u0026#39;DES\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;top_n_database_statement\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;VIRTUAL_DATABASE\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;record\u0026#34;: \u0026#34;select song0_.id as id1_0_, song0_.artist as artist2_0_, song0_.genre as genre3_0_, song0_.liked as liked4_0_, song0_.name as name5_0_ from song song0_ where song0_.liked\u0026gt;?\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677501360, \u0026#34;1\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;top_n_database_statement\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;VIRTUAL_DATABASE\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;record\u0026#34;: \u0026#34;select song0_.id as id1_0_, song0_.artist as artist2_0_, song0_.genre as genre3_0_, song0_.liked as liked4_0_, song0_.name as name5_0_ from song song0_ where song0_.liked\u0026gt;?\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677501360, \u0026#34;1\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;top_n_database_statement\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;VIRTUAL_DATABASE\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;record\u0026#34;: \u0026#34;select song0_.id as id1_0_, song0_.artist as artist2_0_, song0_.genre as genre3_0_, song0_.liked as liked4_0_, song0_.name as name5_0_ from song song0_ where song0_.liked\u0026gt;?\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677501360, \u0026#34;1\u0026#34; ] } ] } } ","excerpt":"PromQL Service PromQL(Prometheus Query Language) Service exposes Prometheus Querying HTTP APIs …","ref":"/docs/main/next/en/api/promql-service/","title":"PromQL Service"},{"body":"PromQL Service PromQL(Prometheus Query Language) Service exposes Prometheus Querying HTTP APIs including the bundled PromQL expression system. Third-party systems or visualization platforms that already support PromQL (such as Grafana), could obtain metrics through PromeQL Service.\nAs SkyWalking and Prometheus have fundamental differences in metrics classification, format, storage, etc. The PromQL Service supported will be a subset of the complete PromQL\nDetails Of Supported Protocol The following doc describes the details of the supported protocol and compared it to the PromQL official documentation. If not mentioned, it will not be supported by default.\nTime series Selectors Instant Vector Selectors For example: select metric service_cpm which the service is $service and the layer is $layer.\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} Note: The label matching operators only support = instead of regular expressions.\nRange Vector Selectors For example: select metric service_cpm which the service is $service and the layer is $layer within the last 5 minutes.\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;}[5m] Time Durations    Unit Definition Support     ms milliseconds yes   s seconds yes   m minutes yes   h hours yes   d days yes   w weeks yes   y years no    Binary operators Arithmetic binary operators    Operator Definition Support     + addition yes   - subtraction yes   * multiplication yes   / division yes   % modulo yes   ^ power/exponentiation no    Between two scalars For example:\n1 + 2 Between an instant vector and a scalar For example:\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} / 100 Between two instant vectors For example:\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} + service_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} Note: The operations between vectors require the same metric and labels, and don\u0026rsquo;t support Vector matching.\nComparison binary operators    Operator Definition Support     == equal yes   != not-equal yes   \u0026gt; greater-than yes   \u0026lt; less-than yes   \u0026gt;= greater-or-equal yes   \u0026lt;= less-or-equal) yes    Between two scalars For example:\n1 \u0026gt; bool 2 Between an instant vector and a scalar For example:\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} \u0026gt; 1 Between two instant vectors For example:\nservice_cpm{service=\u0026#39;service_A\u0026#39;, layer=\u0026#39;$layer\u0026#39;} \u0026gt; service_cpm{service=\u0026#39;service_B\u0026#39;, layer=\u0026#39;$layer\u0026#39;} HTTP API Expression queries Instant queries GET|POST /api/v1/query    Parameter Definition Support Optional     query prometheus expression yes no   time The latest metrics value from current time to this time is returned. If time is empty, the default look-back time is 2 minutes. yes yes   timeout evaluation timeout no ignore    For example:\n/api/v1/query?query=service_cpm{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;} Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677548400, \u0026#34;6\u0026#34; ] } ] } } Range queries GET|POST /api/v1/query_range    Parameter Definition Support Optional     query prometheus expression yes no   start start timestamp, seconds yes no   end end timestamp, seconds yes no   step SkyWalking will automatically fit Step(DAY, HOUR, MINUTE) through start and end. no ignore   timeout evaluation timeout no ignore    For example:\n/api/v1/query_range?query=service_cpm{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;}\u0026amp;start=1677479336\u0026amp;end=1677479636 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;matrix\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;values\u0026#34;: [ [ 1677479280, \u0026#34;18\u0026#34; ], [ 1677479340, \u0026#34;18\u0026#34; ], [ 1677479400, \u0026#34;18\u0026#34; ], [ 1677479460, \u0026#34;18\u0026#34; ], [ 1677479520, \u0026#34;18\u0026#34; ], [ 1677479580, \u0026#34;18\u0026#34; ] ] } ] } } Querying metadata Finding series by label matchers GET|POST /api/v1/series    Parameter Definition Support Optional     match[] series selector yes no   start start timestamp, seconds yes no   end end timestamp, seconds yes no    For example:\n/api/v1/series?match[]=service_traffic{layer=\u0026#39;GENERAL\u0026#39;}\u0026amp;start=1677479336\u0026amp;end=1677479636 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::recommendation\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::app\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::gateway\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::frontend\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; } ] } Note: SkyWalking\u0026rsquo;s metadata exists in the following metrics(traffics):\n service_traffic instance_traffic endpoint_traffic  Getting label names GET|POST /api/v1/labels    Parameter Definition Support Optional     match[] series selector yes yes   start start timestamp no yes   end end timestamp no yes    For example:\n/api/v1/labels?match[]=instance_jvm_cpu\u0026#39; Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;layer\u0026#34;, \u0026#34;scope\u0026#34;, \u0026#34;top_n\u0026#34;, \u0026#34;order\u0026#34;, \u0026#34;service_instance\u0026#34;, \u0026#34;parent_service\u0026#34; ] } Querying label values GET /api/v1/label/\u0026lt;label_name\u0026gt;/values    Parameter Definition Support Optional     match[] series selector yes no   start start timestamp no yes   end end timestamp no yes    For example:\n/api/v1/label/__name__/values Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;meter_mysql_instance_qps\u0026#34;, \u0026#34;service_cpm\u0026#34;, \u0026#34;envoy_cluster_up_rq_active\u0026#34;, \u0026#34;instance_jvm_class_loaded_class_count\u0026#34;, \u0026#34;k8s_cluster_memory_requests\u0026#34;, \u0026#34;meter_vm_memory_used\u0026#34;, \u0026#34;meter_apisix_sv_bandwidth_unmatched\u0026#34;, \u0026#34;meter_vm_memory_total\u0026#34;, \u0026#34;instance_jvm_thread_live_count\u0026#34;, \u0026#34;instance_jvm_thread_timed_waiting_state_thread_count\u0026#34;, \u0026#34;browser_app_page_first_pack_percentile\u0026#34;, \u0026#34;instance_clr_max_worker_threads\u0026#34;, ... ] } Querying metric metadata GET /api/v1/metadata    Parameter Definition Support Optional     limit maximum number of metrics to return yes yes   metric metric name, support regular expression yes yes    For example:\n/api/v1/metadata?limit=10 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;meter_mysql_instance_qps\u0026#34;: [ { \u0026#34;type\u0026#34;: \u0026#34;gauge\u0026#34;, \u0026#34;help\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;unit\u0026#34;: \u0026#34;\u0026#34; } ], \u0026#34;meter_apisix_sv_bandwidth_unmatched\u0026#34;: [ { \u0026#34;type\u0026#34;: \u0026#34;gauge\u0026#34;, \u0026#34;help\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;unit\u0026#34;: \u0026#34;\u0026#34; } ], \u0026#34;service_cpm\u0026#34;: [ { \u0026#34;type\u0026#34;: \u0026#34;gauge\u0026#34;, \u0026#34;help\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;unit\u0026#34;: \u0026#34;\u0026#34; } ], ... } } Metrics Type For Query Supported Metrics Scope(Catalog) All scopes are not supported completely, please check the following table:\n   Scope Support     Service yes   ServiceInstance yes   Endpoint yes   ServiceRelation no   ServiceInstanceRelation no   Process no   ProcessRelation no    General labels Each metric contains general labels: layer. Different metrics will have different labels depending on their Scope and metric value type.\n   Query Labels Scope Expression Example     layer, service Service service_cpm{service='$service', layer='$layer'}   layer, service, service_instance ServiceInstance service_instance_cpm{service='$service', service_instance='$service_instance', layer='$layer'}   layer, service, endpoint Endpoint endpoint_cpm{service='$service', endpoint='$endpoint', layer='$layer'}    Common Value Metrics  Query Labels:  {General labels}  Expression Example:  service_cpm{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677490740, \u0026#34;3\u0026#34; ] } ] } } Labeled Value Metrics  Query Labels:  --{General labels} --labels: Used to filter the value labels to be returned --relabels: Used to rename the returned value labels note: The number and order of labels must match the number and order of relabels.  Expression Example:  service_percentile{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;, labels=\u0026#39;0,1,2\u0026#39;, relabels=\u0026#39;P50,P75,P90\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_percentile\u0026#34;, \u0026#34;label\u0026#34;: \u0026#34;P50\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677493380, \u0026#34;0\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_percentile\u0026#34;, \u0026#34;label\u0026#34;: \u0026#34;P75\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677493380, \u0026#34;0\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_percentile\u0026#34;, \u0026#34;label\u0026#34;: \u0026#34;P90\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677493380, \u0026#34;0\u0026#34; ] } ] } } Sort Metrics  Query Labels:  --parent_service: \u0026lt;optional\u0026gt; Name of the parent service. --top_n: The max number of the selected metric value --order: ASC/DES  Expression Example:  service_instance_cpm{parent_service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;, top_n=\u0026#39;10\u0026#39;, order=\u0026#39;DES\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_instance_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;ServiceInstance\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;651db53c0e3843d8b9c4c53a90b4992a@10.4.0.28\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677494280, \u0026#34;14\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_instance_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;ServiceInstance\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;4c04cf44d6bd408880556aa3c2cfb620@10.4.0.232\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677494280, \u0026#34;6\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_instance_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;ServiceInstance\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;f5ac8ead31af4e6795cae761729a2742@10.4.0.236\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677494280, \u0026#34;5\u0026#34; ] } ] } } Sampled Records  Query Labels:  --parent_service: Name of the parent service --top_n: The max number of the selected records value --order: ASC/DES  Expression Example:  top_n_database_statement{parent_service=\u0026#39;localhost:-1\u0026#39;, layer=\u0026#39;VIRTUAL_DATABASE\u0026#39;, top_n=\u0026#39;10\u0026#39;, order=\u0026#39;DES\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;top_n_database_statement\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;VIRTUAL_DATABASE\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;record\u0026#34;: \u0026#34;select song0_.id as id1_0_, song0_.artist as artist2_0_, song0_.genre as genre3_0_, song0_.liked as liked4_0_, song0_.name as name5_0_ from song song0_ where song0_.liked\u0026gt;?\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677501360, \u0026#34;1\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;top_n_database_statement\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;VIRTUAL_DATABASE\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;record\u0026#34;: \u0026#34;select song0_.id as id1_0_, song0_.artist as artist2_0_, song0_.genre as genre3_0_, song0_.liked as liked4_0_, song0_.name as name5_0_ from song song0_ where song0_.liked\u0026gt;?\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677501360, \u0026#34;1\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;top_n_database_statement\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;VIRTUAL_DATABASE\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;record\u0026#34;: \u0026#34;select song0_.id as id1_0_, song0_.artist as artist2_0_, song0_.genre as genre3_0_, song0_.liked as liked4_0_, song0_.name as name5_0_ from song song0_ where song0_.liked\u0026gt;?\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677501360, \u0026#34;1\u0026#34; ] } ] } } ","excerpt":"PromQL Service PromQL(Prometheus Query Language) Service exposes Prometheus Querying HTTP APIs …","ref":"/docs/main/v9.4.0/en/api/promql-service/","title":"PromQL Service"},{"body":"PromQL Service PromQL(Prometheus Query Language) Service exposes Prometheus Querying HTTP APIs including the bundled PromQL expression system. Third-party systems or visualization platforms that already support PromQL (such as Grafana), could obtain metrics through PromQL Service.\nAs SkyWalking and Prometheus have fundamental differences in metrics classification, format, storage, etc. The PromQL Service supported will be a subset of the complete PromQL.\nDetails Of Supported Protocol The following doc describes the details of the supported protocol and compared it to the PromQL official documentation. If not mentioned, it will not be supported by default.\nTime series Selectors Instant Vector Selectors For example: select metric service_cpm which the service is $service and the layer is $layer.\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} Note: The label matching operators only support = instead of regular expressions.\nRange Vector Selectors For example: select metric service_cpm which the service is $service and the layer is $layer within the last 5 minutes.\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;}[5m] Time Durations    Unit Definition Support     ms milliseconds yes   s seconds yes   m minutes yes   h hours yes   d days yes   w weeks yes   y years no    Binary operators Arithmetic binary operators    Operator Definition Support     + addition yes   - subtraction yes   * multiplication yes   / division yes   % modulo yes   ^ power/exponentiation no    Between two scalars For example:\n1 + 2 Between an instant vector and a scalar For example:\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} / 100 Between two instant vectors For example:\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} + service_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} Note: The operations between vectors require the same metric and labels, and don\u0026rsquo;t support Vector matching.\nComparison binary operators    Operator Definition Support     == equal yes   != not-equal yes   \u0026gt; greater-than yes   \u0026lt; less-than yes   \u0026gt;= greater-or-equal yes   \u0026lt;= less-or-equal) yes    Between two scalars For example:\n1 \u0026gt; bool 2 Between an instant vector and a scalar For example:\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} \u0026gt; 1 Between two instant vectors For example:\nservice_cpm{service=\u0026#39;service_A\u0026#39;, layer=\u0026#39;$layer\u0026#39;} \u0026gt; service_cpm{service=\u0026#39;service_B\u0026#39;, layer=\u0026#39;$layer\u0026#39;} HTTP API Expression queries Instant queries GET|POST /api/v1/query    Parameter Definition Support Optional     query prometheus expression yes no   time The latest metrics value from current time to this time is returned. If time is empty, the default look-back time is 2 minutes. yes yes   timeout evaluation timeout no ignore    For example:\n/api/v1/query?query=service_cpm{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;} Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677548400, \u0026#34;6\u0026#34; ] } ] } } Range queries GET|POST /api/v1/query_range    Parameter Definition Support Optional     query prometheus expression yes no   start start timestamp, seconds yes no   end end timestamp, seconds yes no   step SkyWalking will automatically fit Step(DAY, HOUR, MINUTE) through start and end. no ignore   timeout evaluation timeout no ignore    For example:\n/api/v1/query_range?query=service_cpm{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;}\u0026amp;start=1677479336\u0026amp;end=1677479636 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;matrix\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;values\u0026#34;: [ [ 1677479280, \u0026#34;18\u0026#34; ], [ 1677479340, \u0026#34;18\u0026#34; ], [ 1677479400, \u0026#34;18\u0026#34; ], [ 1677479460, \u0026#34;18\u0026#34; ], [ 1677479520, \u0026#34;18\u0026#34; ], [ 1677479580, \u0026#34;18\u0026#34; ] ] } ] } } Querying metadata Finding series by label matchers GET|POST /api/v1/series    Parameter Definition Support Optional     match[] series selector yes no   start start timestamp, seconds yes no   end end timestamp, seconds yes no    For example:\n/api/v1/series?match[]=service_traffic{layer=\u0026#39;GENERAL\u0026#39;}\u0026amp;start=1677479336\u0026amp;end=1677479636 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::recommendation\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::app\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::gateway\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::frontend\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; } ] } Note: SkyWalking\u0026rsquo;s metadata exists in the following metrics(traffics):\n service_traffic instance_traffic endpoint_traffic  Getting label names GET|POST /api/v1/labels    Parameter Definition Support Optional     match[] series selector yes yes   start start timestamp no yes   end end timestamp no yes    For example:\n/api/v1/labels?match[]=instance_jvm_cpu\u0026#39; Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;layer\u0026#34;, \u0026#34;service\u0026#34;, \u0026#34;top_n\u0026#34;, \u0026#34;order\u0026#34;, \u0026#34;service_instance\u0026#34;, \u0026#34;parent_service\u0026#34; ] } Querying label values GET /api/v1/label/\u0026lt;label_name\u0026gt;/values    Parameter Definition Support Optional     match[] series selector yes no   start start timestamp no yes   end end timestamp no yes    For example:\n/api/v1/label/__name__/values Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;meter_mysql_instance_qps\u0026#34;, \u0026#34;service_cpm\u0026#34;, \u0026#34;envoy_cluster_up_rq_active\u0026#34;, \u0026#34;instance_jvm_class_loaded_class_count\u0026#34;, \u0026#34;k8s_cluster_memory_requests\u0026#34;, \u0026#34;meter_vm_memory_used\u0026#34;, \u0026#34;meter_apisix_sv_bandwidth_unmatched\u0026#34;, \u0026#34;meter_vm_memory_total\u0026#34;, \u0026#34;instance_jvm_thread_live_count\u0026#34;, \u0026#34;instance_jvm_thread_timed_waiting_state_thread_count\u0026#34;, \u0026#34;browser_app_page_first_pack_percentile\u0026#34;, \u0026#34;instance_clr_max_worker_threads\u0026#34;, ... ] } Querying metric metadata GET /api/v1/metadata    Parameter Definition Support Optional     limit maximum number of metrics to return yes yes   metric metric name, support regular expression yes yes    For example:\n/api/v1/metadata?limit=10 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;meter_mysql_instance_qps\u0026#34;: [ { \u0026#34;type\u0026#34;: \u0026#34;gauge\u0026#34;, \u0026#34;help\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;unit\u0026#34;: \u0026#34;\u0026#34; } ], \u0026#34;meter_apisix_sv_bandwidth_unmatched\u0026#34;: [ { \u0026#34;type\u0026#34;: \u0026#34;gauge\u0026#34;, \u0026#34;help\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;unit\u0026#34;: \u0026#34;\u0026#34; } ], \u0026#34;service_cpm\u0026#34;: [ { \u0026#34;type\u0026#34;: \u0026#34;gauge\u0026#34;, \u0026#34;help\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;unit\u0026#34;: \u0026#34;\u0026#34; } ], ... } } Metrics Type For Query Supported Metrics Scope(Catalog) Not all scopes are supported for now, please check the following table:\n   Scope Support     Service yes   ServiceInstance yes   Endpoint yes   ServiceRelation no   ServiceInstanceRelation no   Process no   ProcessRelation no    General labels Each metric contains general labels: layer. Different metrics will have different labels depending on their Scope and metric value type.\n   Query Labels Scope Expression Example     layer, service Service service_cpm{service='$service', layer='$layer'}   layer, service, service_instance ServiceInstance service_instance_cpm{service='$service', service_instance='$service_instance', layer='$layer'}   layer, service, endpoint Endpoint endpoint_cpm{service='$service', endpoint='$endpoint', layer='$layer'}    Common Value Metrics  Query Labels:  {General labels}  Expression Example:  service_cpm{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677490740, \u0026#34;3\u0026#34; ] } ] } } Labeled Value Metrics  Query Labels:  --{General labels} --labels: Used to filter the value labels to be returned --relabels: Used to rename the returned value labels note: The number and order of labels must match the number and order of relabels.  Expression Example:  service_percentile{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;, labels=\u0026#39;0,1,2\u0026#39;, relabels=\u0026#39;P50,P75,P90\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_percentile\u0026#34;, \u0026#34;label\u0026#34;: \u0026#34;P50\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677493380, \u0026#34;0\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_percentile\u0026#34;, \u0026#34;label\u0026#34;: \u0026#34;P75\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677493380, \u0026#34;0\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_percentile\u0026#34;, \u0026#34;label\u0026#34;: \u0026#34;P90\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677493380, \u0026#34;0\u0026#34; ] } ] } } Sort Metrics  Query Labels:  --parent_service: \u0026lt;optional\u0026gt; Name of the parent service. --top_n: The max number of the selected metric value --order: ASC/DES  Expression Example:  service_instance_cpm{parent_service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;, top_n=\u0026#39;10\u0026#39;, order=\u0026#39;DES\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_instance_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;ServiceInstance\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;651db53c0e3843d8b9c4c53a90b4992a@10.4.0.28\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677494280, \u0026#34;14\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_instance_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;ServiceInstance\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;4c04cf44d6bd408880556aa3c2cfb620@10.4.0.232\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677494280, \u0026#34;6\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_instance_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;ServiceInstance\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;f5ac8ead31af4e6795cae761729a2742@10.4.0.236\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677494280, \u0026#34;5\u0026#34; ] } ] } } Sampled Records  Query Labels:  --parent_service: Name of the parent service --top_n: The max number of the selected records value --order: ASC/DES  Expression Example:  top_n_database_statement{parent_service=\u0026#39;localhost:-1\u0026#39;, layer=\u0026#39;VIRTUAL_DATABASE\u0026#39;, top_n=\u0026#39;10\u0026#39;, order=\u0026#39;DES\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;top_n_database_statement\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;VIRTUAL_DATABASE\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;record\u0026#34;: \u0026#34;select song0_.id as id1_0_, song0_.artist as artist2_0_, song0_.genre as genre3_0_, song0_.liked as liked4_0_, song0_.name as name5_0_ from song song0_ where song0_.liked\u0026gt;?\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677501360, \u0026#34;1\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;top_n_database_statement\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;VIRTUAL_DATABASE\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;record\u0026#34;: \u0026#34;select song0_.id as id1_0_, song0_.artist as artist2_0_, song0_.genre as genre3_0_, song0_.liked as liked4_0_, song0_.name as name5_0_ from song song0_ where song0_.liked\u0026gt;?\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677501360, \u0026#34;1\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;top_n_database_statement\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;VIRTUAL_DATABASE\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;record\u0026#34;: \u0026#34;select song0_.id as id1_0_, song0_.artist as artist2_0_, song0_.genre as genre3_0_, song0_.liked as liked4_0_, song0_.name as name5_0_ from song song0_ where song0_.liked\u0026gt;?\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677501360, \u0026#34;1\u0026#34; ] } ] } } ","excerpt":"PromQL Service PromQL(Prometheus Query Language) Service exposes Prometheus Querying HTTP APIs …","ref":"/docs/main/v9.5.0/en/api/promql-service/","title":"PromQL Service"},{"body":"PromQL Service PromQL(Prometheus Query Language) Service exposes Prometheus Querying HTTP APIs including the bundled PromQL expression system. Third-party systems or visualization platforms that already support PromQL (such as Grafana), could obtain metrics through PromQL Service.\nAs SkyWalking and Prometheus have fundamental differences in metrics classification, format, storage, etc. The PromQL Service supported will be a subset of the complete PromQL.\nDetails Of Supported Protocol The following doc describes the details of the supported protocol and compared it to the PromQL official documentation. If not mentioned, it will not be supported by default.\nTime series Selectors Instant Vector Selectors For example: select metric service_cpm which the service is $service and the layer is $layer.\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} Note: The label matching operators only support = instead of regular expressions.\nRange Vector Selectors For example: select metric service_cpm which the service is $service and the layer is $layer within the last 5 minutes.\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;}[5m] Time Durations    Unit Definition Support     ms milliseconds yes   s seconds yes   m minutes yes   h hours yes   d days yes   w weeks yes   y years no    Binary operators Arithmetic binary operators    Operator Definition Support     + addition yes   - subtraction yes   * multiplication yes   / division yes   % modulo yes   ^ power/exponentiation no    Between two scalars For example:\n1 + 2 Between an instant vector and a scalar For example:\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} / 100 Between two instant vectors For example:\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} + service_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} Note: The operations between vectors require the same metric and labels, and don\u0026rsquo;t support Vector matching.\nComparison binary operators    Operator Definition Support     == equal yes   != not-equal yes   \u0026gt; greater-than yes   \u0026lt; less-than yes   \u0026gt;= greater-or-equal yes   \u0026lt;= less-or-equal) yes    Between two scalars For example:\n1 \u0026gt; bool 2 Between an instant vector and a scalar For example:\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} \u0026gt; 1 Between two instant vectors For example:\nservice_cpm{service=\u0026#39;service_A\u0026#39;, layer=\u0026#39;$layer\u0026#39;} \u0026gt; service_cpm{service=\u0026#39;service_B\u0026#39;, layer=\u0026#39;$layer\u0026#39;} HTTP API Expression queries Instant queries GET|POST /api/v1/query    Parameter Definition Support Optional     query prometheus expression yes no   time The latest metrics value from current time to this time is returned. If time is empty, the default look-back time is 2 minutes. yes yes   timeout evaluation timeout no ignore    For example:\n/api/v1/query?query=service_cpm{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;} Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677548400, \u0026#34;6\u0026#34; ] } ] } } Range queries GET|POST /api/v1/query_range    Parameter Definition Support Optional     query prometheus expression yes no   start start timestamp, seconds yes no   end end timestamp, seconds yes no   step SkyWalking will automatically fit Step(DAY, HOUR, MINUTE) through start and end. no ignore   timeout evaluation timeout no ignore    For example:\n/api/v1/query_range?query=service_cpm{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;}\u0026amp;start=1677479336\u0026amp;end=1677479636 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;matrix\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;values\u0026#34;: [ [ 1677479280, \u0026#34;18\u0026#34; ], [ 1677479340, \u0026#34;18\u0026#34; ], [ 1677479400, \u0026#34;18\u0026#34; ], [ 1677479460, \u0026#34;18\u0026#34; ], [ 1677479520, \u0026#34;18\u0026#34; ], [ 1677479580, \u0026#34;18\u0026#34; ] ] } ] } } Querying metadata Finding series by label matchers GET|POST /api/v1/series    Parameter Definition Support Optional     match[] series selector yes no   start start timestamp, seconds yes no   end end timestamp, seconds yes no    For example:\n/api/v1/series?match[]=service_traffic{layer=\u0026#39;GENERAL\u0026#39;}\u0026amp;start=1677479336\u0026amp;end=1677479636 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::recommendation\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::app\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::gateway\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::frontend\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; } ] } Note: SkyWalking\u0026rsquo;s metadata exists in the following metrics(traffics):\n service_traffic instance_traffic endpoint_traffic  Getting label names GET|POST /api/v1/labels    Parameter Definition Support Optional     match[] series selector yes yes   start start timestamp no yes   end end timestamp no yes    For example:\n/api/v1/labels?match[]=instance_jvm_cpu\u0026#39; Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;layer\u0026#34;, \u0026#34;service\u0026#34;, \u0026#34;top_n\u0026#34;, \u0026#34;order\u0026#34;, \u0026#34;service_instance\u0026#34;, \u0026#34;parent_service\u0026#34; ] } Querying label values GET /api/v1/label/\u0026lt;label_name\u0026gt;/values    Parameter Definition Support Optional     match[] series selector yes no   start start timestamp no yes   end end timestamp no yes    For example:\n/api/v1/label/__name__/values Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;meter_mysql_instance_qps\u0026#34;, \u0026#34;service_cpm\u0026#34;, \u0026#34;envoy_cluster_up_rq_active\u0026#34;, \u0026#34;instance_jvm_class_loaded_class_count\u0026#34;, \u0026#34;k8s_cluster_memory_requests\u0026#34;, \u0026#34;meter_vm_memory_used\u0026#34;, \u0026#34;meter_apisix_sv_bandwidth_unmatched\u0026#34;, \u0026#34;meter_vm_memory_total\u0026#34;, \u0026#34;instance_jvm_thread_live_count\u0026#34;, \u0026#34;instance_jvm_thread_timed_waiting_state_thread_count\u0026#34;, \u0026#34;browser_app_page_first_pack_percentile\u0026#34;, \u0026#34;instance_clr_max_worker_threads\u0026#34;, ... ] } Querying metric metadata GET /api/v1/metadata    Parameter Definition Support Optional     limit maximum number of metrics to return yes yes   metric metric name, support regular expression yes yes    For example:\n/api/v1/metadata?limit=10 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;meter_mysql_instance_qps\u0026#34;: [ { \u0026#34;type\u0026#34;: \u0026#34;gauge\u0026#34;, \u0026#34;help\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;unit\u0026#34;: \u0026#34;\u0026#34; } ], \u0026#34;meter_apisix_sv_bandwidth_unmatched\u0026#34;: [ { \u0026#34;type\u0026#34;: \u0026#34;gauge\u0026#34;, \u0026#34;help\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;unit\u0026#34;: \u0026#34;\u0026#34; } ], \u0026#34;service_cpm\u0026#34;: [ { \u0026#34;type\u0026#34;: \u0026#34;gauge\u0026#34;, \u0026#34;help\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;unit\u0026#34;: \u0026#34;\u0026#34; } ], ... } } Metrics Type For Query Supported Metrics Scope(Catalog) Not all scopes are supported for now, please check the following table:\n   Scope Support     Service yes   ServiceInstance yes   Endpoint yes   ServiceRelation no   ServiceInstanceRelation no   Process no   ProcessRelation no    General labels Each metric contains general labels: layer. Different metrics will have different labels depending on their Scope and metric value type.\n   Query Labels Scope Expression Example     layer, service Service service_cpm{service='$service', layer='$layer'}   layer, service, service_instance ServiceInstance service_instance_cpm{service='$service', service_instance='$service_instance', layer='$layer'}   layer, service, endpoint Endpoint endpoint_cpm{service='$service', endpoint='$endpoint', layer='$layer'}    Common Value Metrics  Query Labels:  {General labels}  Expression Example:  service_cpm{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677490740, \u0026#34;3\u0026#34; ] } ] } } Labeled Value Metrics  Query Labels:  --{General labels} --labels: Used to filter the value labels to be returned --relabels: Used to rename the returned value labels note: The number and order of labels must match the number and order of relabels.  Expression Example:  service_percentile{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;, labels=\u0026#39;0,1,2\u0026#39;, relabels=\u0026#39;P50,P75,P90\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_percentile\u0026#34;, \u0026#34;label\u0026#34;: \u0026#34;P50\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677493380, \u0026#34;0\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_percentile\u0026#34;, \u0026#34;label\u0026#34;: \u0026#34;P75\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677493380, \u0026#34;0\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_percentile\u0026#34;, \u0026#34;label\u0026#34;: \u0026#34;P90\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677493380, \u0026#34;0\u0026#34; ] } ] } } Sort Metrics  Query Labels:  --parent_service: \u0026lt;optional\u0026gt; Name of the parent service. --top_n: The max number of the selected metric value --order: ASC/DES  Expression Example:  service_instance_cpm{parent_service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;, top_n=\u0026#39;10\u0026#39;, order=\u0026#39;DES\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_instance_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;ServiceInstance\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;651db53c0e3843d8b9c4c53a90b4992a@10.4.0.28\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677494280, \u0026#34;14\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_instance_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;ServiceInstance\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;4c04cf44d6bd408880556aa3c2cfb620@10.4.0.232\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677494280, \u0026#34;6\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_instance_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;ServiceInstance\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;f5ac8ead31af4e6795cae761729a2742@10.4.0.236\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677494280, \u0026#34;5\u0026#34; ] } ] } } Sampled Records  Query Labels:  --parent_service: Name of the parent service --top_n: The max number of the selected records value --order: ASC/DES  Expression Example:  top_n_database_statement{parent_service=\u0026#39;localhost:-1\u0026#39;, layer=\u0026#39;VIRTUAL_DATABASE\u0026#39;, top_n=\u0026#39;10\u0026#39;, order=\u0026#39;DES\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;top_n_database_statement\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;VIRTUAL_DATABASE\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;record\u0026#34;: \u0026#34;select song0_.id as id1_0_, song0_.artist as artist2_0_, song0_.genre as genre3_0_, song0_.liked as liked4_0_, song0_.name as name5_0_ from song song0_ where song0_.liked\u0026gt;?\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677501360, \u0026#34;1\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;top_n_database_statement\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;VIRTUAL_DATABASE\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;record\u0026#34;: \u0026#34;select song0_.id as id1_0_, song0_.artist as artist2_0_, song0_.genre as genre3_0_, song0_.liked as liked4_0_, song0_.name as name5_0_ from song song0_ where song0_.liked\u0026gt;?\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677501360, \u0026#34;1\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;top_n_database_statement\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;VIRTUAL_DATABASE\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;record\u0026#34;: \u0026#34;select song0_.id as id1_0_, song0_.artist as artist2_0_, song0_.genre as genre3_0_, song0_.liked as liked4_0_, song0_.name as name5_0_ from song song0_ where song0_.liked\u0026gt;?\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677501360, \u0026#34;1\u0026#34; ] } ] } } ","excerpt":"PromQL Service PromQL(Prometheus Query Language) Service exposes Prometheus Querying HTTP APIs …","ref":"/docs/main/v9.6.0/en/api/promql-service/","title":"PromQL Service"},{"body":"PromQL Service PromQL(Prometheus Query Language) Service exposes Prometheus Querying HTTP APIs including the bundled PromQL expression system. Third-party systems or visualization platforms that already support PromQL (such as Grafana), could obtain metrics through PromQL Service.\nAs SkyWalking and Prometheus have fundamental differences in metrics classification, format, storage, etc. The PromQL Service supported will be a subset of the complete PromQL.\nDetails Of Supported Protocol The following doc describes the details of the supported protocol and compared it to the PromQL official documentation. If not mentioned, it will not be supported by default.\nTime series Selectors Instant Vector Selectors For example: select metric service_cpm which the service is $service and the layer is $layer.\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} Note: The label matching operators only support = instead of regular expressions.\nRange Vector Selectors For example: select metric service_cpm which the service is $service and the layer is $layer within the last 5 minutes.\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;}[5m] Time Durations    Unit Definition Support     ms milliseconds yes   s seconds yes   m minutes yes   h hours yes   d days yes   w weeks yes   y years no    Binary operators Arithmetic binary operators    Operator Definition Support     + addition yes   - subtraction yes   * multiplication yes   / division yes   % modulo yes   ^ power/exponentiation no    Between two scalars For example:\n1 + 2 Between an instant vector and a scalar For example:\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} / 100 Between two instant vectors For example:\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} + service_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} Note: The operations between vectors require the same metric and labels, and don\u0026rsquo;t support Vector matching.\nComparison binary operators    Operator Definition Support     == equal yes   != not-equal yes   \u0026gt; greater-than yes   \u0026lt; less-than yes   \u0026gt;= greater-or-equal yes   \u0026lt;= less-or-equal) yes    Between two scalars For example:\n1 \u0026gt; bool 2 Between an instant vector and a scalar For example:\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} \u0026gt; 1 Between two instant vectors For example:\nservice_cpm{service=\u0026#39;service_A\u0026#39;, layer=\u0026#39;$layer\u0026#39;} \u0026gt; service_cpm{service=\u0026#39;service_B\u0026#39;, layer=\u0026#39;$layer\u0026#39;} HTTP API Expression queries Instant queries GET|POST /api/v1/query    Parameter Definition Support Optional     query prometheus expression yes no   time The latest metrics value from current time to this time is returned. If time is empty, the default look-back time is 2 minutes. yes yes   timeout evaluation timeout no ignore    For example:\n/api/v1/query?query=service_cpm{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;} Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677548400, \u0026#34;6\u0026#34; ] } ] } } Range queries GET|POST /api/v1/query_range    Parameter Definition Support Optional     query prometheus expression yes no   start start timestamp, seconds yes no   end end timestamp, seconds yes no   step SkyWalking will automatically fit Step(DAY, HOUR, MINUTE) through start and end. no ignore   timeout evaluation timeout no ignore    For example:\n/api/v1/query_range?query=service_cpm{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;}\u0026amp;start=1677479336\u0026amp;end=1677479636 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;matrix\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;values\u0026#34;: [ [ 1677479280, \u0026#34;18\u0026#34; ], [ 1677479340, \u0026#34;18\u0026#34; ], [ 1677479400, \u0026#34;18\u0026#34; ], [ 1677479460, \u0026#34;18\u0026#34; ], [ 1677479520, \u0026#34;18\u0026#34; ], [ 1677479580, \u0026#34;18\u0026#34; ] ] } ] } } Querying metadata Finding series by label matchers GET|POST /api/v1/series    Parameter Definition Support Optional     match[] series selector yes no   start start timestamp, seconds yes no   end end timestamp, seconds yes no    For example:\n/api/v1/series?match[]=service_traffic{layer=\u0026#39;GENERAL\u0026#39;}\u0026amp;start=1677479336\u0026amp;end=1677479636 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::recommendation\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::app\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::gateway\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::frontend\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; } ] } Note: SkyWalking\u0026rsquo;s metadata exists in the following metrics(traffics):\n service_traffic instance_traffic endpoint_traffic  Getting label names GET|POST /api/v1/labels    Parameter Definition Support Optional     match[] series selector yes yes   start start timestamp no yes   end end timestamp no yes    For example:\n/api/v1/labels?match[]=instance_jvm_cpu\u0026#39; Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;layer\u0026#34;, \u0026#34;service\u0026#34;, \u0026#34;top_n\u0026#34;, \u0026#34;order\u0026#34;, \u0026#34;service_instance\u0026#34;, \u0026#34;parent_service\u0026#34; ] } Querying label values GET /api/v1/label/\u0026lt;label_name\u0026gt;/values    Parameter Definition Support Optional     match[] series selector yes no   start start timestamp no yes   end end timestamp no yes    For example:\n/api/v1/label/__name__/values Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;meter_mysql_instance_qps\u0026#34;, \u0026#34;service_cpm\u0026#34;, \u0026#34;envoy_cluster_up_rq_active\u0026#34;, \u0026#34;instance_jvm_class_loaded_class_count\u0026#34;, \u0026#34;k8s_cluster_memory_requests\u0026#34;, \u0026#34;meter_vm_memory_used\u0026#34;, \u0026#34;meter_apisix_sv_bandwidth_unmatched\u0026#34;, \u0026#34;meter_vm_memory_total\u0026#34;, \u0026#34;instance_jvm_thread_live_count\u0026#34;, \u0026#34;instance_jvm_thread_timed_waiting_state_thread_count\u0026#34;, \u0026#34;browser_app_page_first_pack_percentile\u0026#34;, \u0026#34;instance_clr_max_worker_threads\u0026#34;, ... ] } Querying metric metadata GET /api/v1/metadata    Parameter Definition Support Optional     limit maximum number of metrics to return yes yes   metric metric name, support regular expression yes yes    For example:\n/api/v1/metadata?limit=10 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;meter_mysql_instance_qps\u0026#34;: [ { \u0026#34;type\u0026#34;: \u0026#34;gauge\u0026#34;, \u0026#34;help\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;unit\u0026#34;: \u0026#34;\u0026#34; } ], \u0026#34;meter_apisix_sv_bandwidth_unmatched\u0026#34;: [ { \u0026#34;type\u0026#34;: \u0026#34;gauge\u0026#34;, \u0026#34;help\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;unit\u0026#34;: \u0026#34;\u0026#34; } ], \u0026#34;service_cpm\u0026#34;: [ { \u0026#34;type\u0026#34;: \u0026#34;gauge\u0026#34;, \u0026#34;help\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;unit\u0026#34;: \u0026#34;\u0026#34; } ], ... } } Metrics Type For Query Supported Metrics Scope(Catalog) Not all scopes are supported for now, please check the following table:\n   Scope Support     Service yes   ServiceInstance yes   Endpoint yes   ServiceRelation no   ServiceInstanceRelation no   Process no   ProcessRelation no    General labels Each metric contains general labels: layer. Different metrics will have different labels depending on their Scope and metric value type.\n   Query Labels Scope Expression Example     layer, service Service service_cpm{service='$service', layer='$layer'}   layer, service, service_instance ServiceInstance service_instance_cpm{service='$service', service_instance='$service_instance', layer='$layer'}   layer, service, endpoint Endpoint endpoint_cpm{service='$service', endpoint='$endpoint', layer='$layer'}    Common Value Metrics  Query Labels:  {General labels}  Expression Example:  service_cpm{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677490740, \u0026#34;3\u0026#34; ] } ] } } Labeled Value Metrics  Query Labels:  --{General labels} --labels: Used to filter the value labels to be returned --relabels: Used to rename the returned value labels note: The number and order of labels must match the number and order of relabels.  Expression Example:  service_percentile{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;, labels=\u0026#39;0,1,2\u0026#39;, relabels=\u0026#39;P50,P75,P90\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_percentile\u0026#34;, \u0026#34;label\u0026#34;: \u0026#34;P50\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677493380, \u0026#34;0\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_percentile\u0026#34;, \u0026#34;label\u0026#34;: \u0026#34;P75\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677493380, \u0026#34;0\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_percentile\u0026#34;, \u0026#34;label\u0026#34;: \u0026#34;P90\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677493380, \u0026#34;0\u0026#34; ] } ] } } Sort Metrics  Query Labels:  --parent_service: \u0026lt;optional\u0026gt; Name of the parent service. --top_n: The max number of the selected metric value --order: ASC/DES  Expression Example:  service_instance_cpm{parent_service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;, top_n=\u0026#39;10\u0026#39;, order=\u0026#39;DES\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_instance_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;ServiceInstance\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;651db53c0e3843d8b9c4c53a90b4992a@10.4.0.28\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677494280, \u0026#34;14\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_instance_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;ServiceInstance\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;4c04cf44d6bd408880556aa3c2cfb620@10.4.0.232\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677494280, \u0026#34;6\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_instance_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;ServiceInstance\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;f5ac8ead31af4e6795cae761729a2742@10.4.0.236\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677494280, \u0026#34;5\u0026#34; ] } ] } } Sampled Records  Query Labels:  --parent_service: Name of the parent service --top_n: The max number of the selected records value --order: ASC/DES  Expression Example:  top_n_database_statement{parent_service=\u0026#39;localhost:-1\u0026#39;, layer=\u0026#39;VIRTUAL_DATABASE\u0026#39;, top_n=\u0026#39;10\u0026#39;, order=\u0026#39;DES\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;top_n_database_statement\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;VIRTUAL_DATABASE\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;record\u0026#34;: \u0026#34;select song0_.id as id1_0_, song0_.artist as artist2_0_, song0_.genre as genre3_0_, song0_.liked as liked4_0_, song0_.name as name5_0_ from song song0_ where song0_.liked\u0026gt;?\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677501360, \u0026#34;1\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;top_n_database_statement\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;VIRTUAL_DATABASE\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;record\u0026#34;: \u0026#34;select song0_.id as id1_0_, song0_.artist as artist2_0_, song0_.genre as genre3_0_, song0_.liked as liked4_0_, song0_.name as name5_0_ from song song0_ where song0_.liked\u0026gt;?\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677501360, \u0026#34;1\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;top_n_database_statement\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;VIRTUAL_DATABASE\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;record\u0026#34;: \u0026#34;select song0_.id as id1_0_, song0_.artist as artist2_0_, song0_.genre as genre3_0_, song0_.liked as liked4_0_, song0_.name as name5_0_ from song song0_ where song0_.liked\u0026gt;?\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677501360, \u0026#34;1\u0026#34; ] } ] } } ","excerpt":"PromQL Service PromQL(Prometheus Query Language) Service exposes Prometheus Querying HTTP APIs …","ref":"/docs/main/v9.7.0/en/api/promql-service/","title":"PromQL Service"},{"body":"Protocol Documentation \nTable of Contents   banyandb/cluster/v1/rpc.proto\n  SendRequest\n  SendResponse\n  Service\n    banyandb/common/v1/common.proto\n  Group\n  IntervalRule\n  Metadata\n  ResourceOpts\n  Catalog\n  IntervalRule.Unit\n    banyandb/database/v1/database.proto\n  Node\n  Shard\n  Role\n    banyandb/model/v1/common.proto\n  FieldValue\n  Float\n  Int\n  IntArray\n  Str\n  StrArray\n  TagFamilyForWrite\n  TagValue\n  AggregationFunction\n    banyandb/model/v1/query.proto\n  Condition\n  Criteria\n  LogicalExpression\n  QueryOrder\n  Tag\n  TagFamily\n  TagProjection\n  TagProjection.TagFamily\n  TimeRange\n  Condition.BinaryOp\n  LogicalExpression.LogicalOp\n  Sort\n    banyandb/database/v1/schema.proto\n  Entity\n  FieldSpec\n  IndexRule\n  IndexRuleBinding\n  Measure\n  Stream\n  Subject\n  TagFamilySpec\n  TagSpec\n  TopNAggregation\n  CompressionMethod\n  EncodingMethod\n  FieldType\n  IndexRule.Analyzer\n  IndexRule.Location\n  IndexRule.Type\n  TagType\n    banyandb/database/v1/rpc.proto\n  GroupRegistryServiceCreateRequest\n  GroupRegistryServiceCreateResponse\n  GroupRegistryServiceDeleteRequest\n  GroupRegistryServiceDeleteResponse\n  GroupRegistryServiceExistRequest\n  GroupRegistryServiceExistResponse\n  GroupRegistryServiceGetRequest\n  GroupRegistryServiceGetResponse\n  GroupRegistryServiceListRequest\n  GroupRegistryServiceListResponse\n  GroupRegistryServiceUpdateRequest\n  GroupRegistryServiceUpdateResponse\n  IndexRuleBindingRegistryServiceCreateRequest\n  IndexRuleBindingRegistryServiceCreateResponse\n  IndexRuleBindingRegistryServiceDeleteRequest\n  IndexRuleBindingRegistryServiceDeleteResponse\n  IndexRuleBindingRegistryServiceExistRequest\n  IndexRuleBindingRegistryServiceExistResponse\n  IndexRuleBindingRegistryServiceGetRequest\n  IndexRuleBindingRegistryServiceGetResponse\n  IndexRuleBindingRegistryServiceListRequest\n  IndexRuleBindingRegistryServiceListResponse\n  IndexRuleBindingRegistryServiceUpdateRequest\n  IndexRuleBindingRegistryServiceUpdateResponse\n  IndexRuleRegistryServiceCreateRequest\n  IndexRuleRegistryServiceCreateResponse\n  IndexRuleRegistryServiceDeleteRequest\n  IndexRuleRegistryServiceDeleteResponse\n  IndexRuleRegistryServiceExistRequest\n  IndexRuleRegistryServiceExistResponse\n  IndexRuleRegistryServiceGetRequest\n  IndexRuleRegistryServiceGetResponse\n  IndexRuleRegistryServiceListRequest\n  IndexRuleRegistryServiceListResponse\n  IndexRuleRegistryServiceUpdateRequest\n  IndexRuleRegistryServiceUpdateResponse\n  MeasureRegistryServiceCreateRequest\n  MeasureRegistryServiceCreateResponse\n  MeasureRegistryServiceDeleteRequest\n  MeasureRegistryServiceDeleteResponse\n  MeasureRegistryServiceExistRequest\n  MeasureRegistryServiceExistResponse\n  MeasureRegistryServiceGetRequest\n  MeasureRegistryServiceGetResponse\n  MeasureRegistryServiceListRequest\n  MeasureRegistryServiceListResponse\n  MeasureRegistryServiceUpdateRequest\n  MeasureRegistryServiceUpdateResponse\n  StreamRegistryServiceCreateRequest\n  StreamRegistryServiceCreateResponse\n  StreamRegistryServiceDeleteRequest\n  StreamRegistryServiceDeleteResponse\n  StreamRegistryServiceExistRequest\n  StreamRegistryServiceExistResponse\n  StreamRegistryServiceGetRequest\n  StreamRegistryServiceGetResponse\n  StreamRegistryServiceListRequest\n  StreamRegistryServiceListResponse\n  StreamRegistryServiceUpdateRequest\n  StreamRegistryServiceUpdateResponse\n  TopNAggregationRegistryServiceCreateRequest\n  TopNAggregationRegistryServiceCreateResponse\n  TopNAggregationRegistryServiceDeleteRequest\n  TopNAggregationRegistryServiceDeleteResponse\n  TopNAggregationRegistryServiceExistRequest\n  TopNAggregationRegistryServiceExistResponse\n  TopNAggregationRegistryServiceGetRequest\n  TopNAggregationRegistryServiceGetResponse\n  TopNAggregationRegistryServiceListRequest\n  TopNAggregationRegistryServiceListResponse\n  TopNAggregationRegistryServiceUpdateRequest\n  TopNAggregationRegistryServiceUpdateResponse\n  GroupRegistryService\n  IndexRuleBindingRegistryService\n  IndexRuleRegistryService\n  MeasureRegistryService\n  StreamRegistryService\n  TopNAggregationRegistryService\n    banyandb/measure/v1/query.proto\n DataPoint DataPoint.Field QueryRequest QueryRequest.Aggregation QueryRequest.FieldProjection QueryRequest.GroupBy QueryRequest.Top QueryResponse    banyandb/measure/v1/topn.proto\n TopNList TopNList.Item TopNRequest TopNResponse    banyandb/model/v1/write.proto\n Status    banyandb/measure/v1/write.proto\n DataPointValue InternalWriteRequest WriteRequest WriteResponse    banyandb/measure/v1/rpc.proto\n MeasureService    banyandb/property/v1/property.proto\n Metadata Property    banyandb/property/v1/rpc.proto\n  ApplyRequest\n  ApplyResponse\n  DeleteRequest\n  DeleteResponse\n  GetRequest\n  GetResponse\n  KeepAliveRequest\n  KeepAliveResponse\n  ListRequest\n  ListResponse\n  ApplyRequest.Strategy\n  PropertyService\n    banyandb/stream/v1/query.proto\n Element QueryRequest QueryResponse    banyandb/stream/v1/write.proto\n ElementValue InternalWriteRequest WriteRequest WriteResponse    banyandb/stream/v1/rpc.proto\n StreamService    Scalar Value Types\n  \nTop\nbanyandb/cluster/v1/rpc.proto \nSendRequest    Field Type Label Description     topic string     message_id uint64     body google.protobuf.Any      \nSendResponse    Field Type Label Description     message_id uint64     error string     body google.protobuf.Any      \nService    Method Name Request Type Response Type Description     Send SendRequest stream SendResponse stream     \nTop\nbanyandb/common/v1/common.proto \nGroup Group is an internal object for Group management\n   Field Type Label Description     metadata Metadata  metadata define the group's identity   catalog Catalog  catalog denotes which type of data the group contains   resource_opts ResourceOpts  resourceOpts indicates the structure of the underlying kv storage   updated_at google.protobuf.Timestamp  updated_at indicates when resources of the group are updated    \nIntervalRule IntervalRule is a structured duration\n   Field Type Label Description     unit IntervalRule.Unit  unit can only be UNIT_HOUR or UNIT_DAY   num uint32      \nMetadata Metadata is for multi-tenant, multi-model use\n   Field Type Label Description     group string  group contains a set of options, like retention policy, max   name string  name of the entity   id uint32     create_revision int64  readonly. create_revision is the revision of last creation on this key.   mod_revision int64  readonly. mod_revision is the revision of last modification on this key.    \nResourceOpts    Field Type Label Description     shard_num uint32  shard_num is the number of shards   block_interval IntervalRule  block_interval indicates the length of a block block_interval should be less than or equal to segment_interval   segment_interval IntervalRule  segment_interval indicates the length of a segment   ttl IntervalRule  ttl indicates time to live, how long the data will be cached    \nCatalog    Name Number Description     CATALOG_UNSPECIFIED 0    CATALOG_STREAM 1    CATALOG_MEASURE 2     \nIntervalRule.Unit    Name Number Description     UNIT_UNSPECIFIED 0    UNIT_HOUR 1    UNIT_DAY 2     \nTop\nbanyandb/database/v1/database.proto \nNode    Field Type Label Description     metadata banyandb.common.v1.Metadata     roles Role repeated    grpc_address string     http_address string     created_at google.protobuf.Timestamp      \nShard    Field Type Label Description     id uint64     metadata banyandb.common.v1.Metadata     catalog banyandb.common.v1.Catalog     node string     total uint32     updated_at google.protobuf.Timestamp     created_at google.protobuf.Timestamp      \nRole    Name Number Description     ROLE_UNSPECIFIED 0    ROLE_META 1    ROLE_DATA 2    ROLE_LIAISON 3     \nTop\nbanyandb/model/v1/common.proto \nFieldValue    Field Type Label Description     null google.protobuf.NullValue     str Str     int Int     binary_data bytes     float Float      \nFloat    Field Type Label Description     value double      \nInt    Field Type Label Description     value int64      \nIntArray    Field Type Label Description     value int64 repeated     \nStr    Field Type Label Description     value string      \nStrArray    Field Type Label Description     value string repeated     \nTagFamilyForWrite    Field Type Label Description     tags TagValue repeated     \nTagValue    Field Type Label Description     null google.protobuf.NullValue     str Str     str_array StrArray     int Int     int_array IntArray     binary_data bytes      \nAggregationFunction    Name Number Description     AGGREGATION_FUNCTION_UNSPECIFIED 0    AGGREGATION_FUNCTION_MEAN 1    AGGREGATION_FUNCTION_MAX 2    AGGREGATION_FUNCTION_MIN 3    AGGREGATION_FUNCTION_COUNT 4    AGGREGATION_FUNCTION_SUM 5     \nTop\nbanyandb/model/v1/query.proto \nCondition Condition consists of the query condition with a single binary operator to be imposed For 1:1 BinaryOp, values in condition must be an array with length = 1, while for 1:N BinaryOp, values can be an array with length \u0026gt;= 1.\n   Field Type Label Description     name string     op Condition.BinaryOp     value TagValue      \nCriteria tag_families are indexed.\n   Field Type Label Description     le LogicalExpression     condition Condition      \nLogicalExpression LogicalExpression supports logical operation\n   Field Type Label Description     op LogicalExpression.LogicalOp  op is a logical operation   left Criteria     right Criteria      \nQueryOrder QueryOrder means a Sort operation to be done for a given index rule. The index_rule_name refers to the name of a index rule bound to the subject.\n   Field Type Label Description     index_rule_name string     sort Sort      \nTag Pair is the building block of a record which is equivalent to a key-value pair. In the context of Trace, it could be metadata of a trace such as service_name, service_instance, etc. Besides, other tags are organized in key-value pair in the underlying storage layer. One should notice that the values can be a multi-value.\n   Field Type Label Description     key string     value TagValue      \nTagFamily    Field Type Label Description     name string     tags Tag repeated     \nTagProjection TagProjection is used to select the names of keys to be returned.\n   Field Type Label Description     tag_families TagProjection.TagFamily repeated     \nTagProjection.TagFamily    Field Type Label Description     name string     tags string repeated     \nTimeRange TimeRange is a range query for uint64, the range here follows left-inclusive and right-exclusive rule, i.e. [begin, end) if both edges exist\n   Field Type Label Description     begin google.protobuf.Timestamp     end google.protobuf.Timestamp      \nCondition.BinaryOp BinaryOp specifies the operation imposed to the given query condition For EQ, NE, LT, GT, LE and GE, only one operand should be given, i.e. one-to-one relationship. HAVING and NOT_HAVING allow multi-value to be the operand such as array/vector, i.e. one-to-many relationship. For example, \u0026quot;keyA\u0026quot; contains \u0026quot;valueA\u0026quot; and \u0026quot;valueB\u0026quot; MATCH performances a full-text search if the tag is analyzed. The string value applies to the same analyzer as the tag, but string array value does not. Each item in a string array is seen as a token instead of a query expression.\n   Name Number Description     BINARY_OP_UNSPECIFIED 0    BINARY_OP_EQ 1    BINARY_OP_NE 2    BINARY_OP_LT 3    BINARY_OP_GT 4    BINARY_OP_LE 5    BINARY_OP_GE 6    BINARY_OP_HAVING 7    BINARY_OP_NOT_HAVING 8    BINARY_OP_IN 9    BINARY_OP_NOT_IN 10    BINARY_OP_MATCH 11     \nLogicalExpression.LogicalOp    Name Number Description     LOGICAL_OP_UNSPECIFIED 0    LOGICAL_OP_AND 1    LOGICAL_OP_OR 2     \nSort    Name Number Description     SORT_UNSPECIFIED 0    SORT_DESC 1    SORT_ASC 2     \nTop\nbanyandb/database/v1/schema.proto \nEntity    Field Type Label Description     tag_names string repeated     \nFieldSpec FieldSpec is the specification of field\n   Field Type Label Description     name string  name is the identity of a field   field_type FieldType  field_type denotes the type of field value   encoding_method EncodingMethod  encoding_method indicates how to encode data during writing   compression_method CompressionMethod  compression_method indicates how to compress data during writing    \nIndexRule IndexRule defines how to generate indices based on tags and the index type IndexRule should bind to a subject through an IndexRuleBinding to generate proper indices.\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  metadata define the rule's identity   tags string repeated tags are the combination that refers to an indexed object If the elements in tags are more than 1, the object will generate a multi-tag index Caveat: All tags in a multi-tag MUST have an identical IndexType   type IndexRule.Type  type is the IndexType of this IndexObject.   location IndexRule.Location  location indicates where to store index.   updated_at google.protobuf.Timestamp  updated_at indicates when the IndexRule is updated   analyzer IndexRule.Analyzer  analyzer analyzes tag value to support the full-text searching for TYPE_INVERTED indices.    \nIndexRuleBinding IndexRuleBinding is a bridge to connect severalIndexRules to a subject This binding is valid between begin_at_nanoseconds and expire_at_nanoseconds, that provides flexible strategies to control how to generate time series indices.\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  metadata is the identity of this binding   rules string repeated rules refers to the IndexRule   subject Subject  subject indicates the subject of binding action   begin_at google.protobuf.Timestamp  begin_at_nanoseconds is the timestamp, after which the binding will be active   expire_at google.protobuf.Timestamp  expire_at_nanoseconds it the timestamp, after which the binding will be inactive expire_at_nanoseconds must be larger than begin_at_nanoseconds   updated_at google.protobuf.Timestamp  updated_at indicates when the IndexRuleBinding is updated    \nMeasure Measure intends to store data point\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  metadata is the identity of a measure   tag_families TagFamilySpec repeated tag_families are for filter measures   fields FieldSpec repeated fields denote measure values   entity Entity  entity indicates which tags will be to generate a series and shard a measure   interval string  interval indicates how frequently to send a data point valid time units are \u0026quot;ns\u0026quot;, \u0026quot;us\u0026quot; (or \u0026quot;µs\u0026quot;), \u0026quot;ms\u0026quot;, \u0026quot;s\u0026quot;, \u0026quot;m\u0026quot;, \u0026quot;h\u0026quot;, \u0026quot;d\u0026quot;.   updated_at google.protobuf.Timestamp  updated_at indicates when the measure is updated    \nStream Stream intends to store streaming data, for example, traces or logs\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  metadata is the identity of a trace series   tag_families TagFamilySpec repeated tag_families   entity Entity  entity indicates how to generate a series and shard a stream   updated_at google.protobuf.Timestamp  updated_at indicates when the stream is updated    \nSubject Subject defines which stream or measure would generate indices\n   Field Type Label Description     catalog banyandb.common.v1.Catalog  catalog is where the subject belongs to todo validate plugin exist bug https://github.com/bufbuild/protoc-gen-validate/issues/672   name string  name refers to a stream or measure in a particular catalog    \nTagFamilySpec    Field Type Label Description     name string     tags TagSpec repeated tags defines accepted tags    \nTagSpec    Field Type Label Description     name string     type TagType     indexed_only bool  indexed_only indicates whether the tag is stored True: It's indexed only, but not stored False: it's stored and indexed    \nTopNAggregation TopNAggregation generates offline TopN statistics for a measure's TopN approximation\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  metadata is the identity of an aggregation   source_measure banyandb.common.v1.Metadata  source_measure denotes the data source of this aggregation   field_name string  field_name is the name of field used for ranking   field_value_sort banyandb.model.v1.Sort  field_value_sort indicates how to sort fields ASC: bottomN DESC: topN UNSPECIFIED: topN + bottomN todo validate plugin exist bug https://github.com/bufbuild/protoc-gen-validate/issues/672   group_by_tag_names string repeated group_by_tag_names groups data points into statistical counters   criteria banyandb.model.v1.Criteria  criteria select partial data points from measure   counters_number int32  counters_number sets the number of counters to be tracked. The default value is 1000   lru_size int32  lru_size defines how much entry is allowed to be maintained in the memory   updated_at google.protobuf.Timestamp  updated_at indicates when the measure is updated    \nCompressionMethod    Name Number Description     COMPRESSION_METHOD_UNSPECIFIED 0    COMPRESSION_METHOD_ZSTD 1     \nEncodingMethod    Name Number Description     ENCODING_METHOD_UNSPECIFIED 0    ENCODING_METHOD_GORILLA 1     \nFieldType    Name Number Description     FIELD_TYPE_UNSPECIFIED 0    FIELD_TYPE_STRING 1    FIELD_TYPE_INT 2    FIELD_TYPE_DATA_BINARY 3    FIELD_TYPE_FLOAT 4     \nIndexRule.Analyzer    Name Number Description     ANALYZER_UNSPECIFIED 0    ANALYZER_KEYWORD 1 Keyword analyzer is a “noop” analyzer which returns the entire input string as a single token.   ANALYZER_STANDARD 2 Standard analyzer provides grammar based tokenization   ANALYZER_SIMPLE 3 Simple analyzer breaks text into tokens at any non-letter character, such as numbers, spaces, hyphens and apostrophes, discards non-letter characters, and changes uppercase to lowercase.    \nIndexRule.Location    Name Number Description     LOCATION_UNSPECIFIED 0    LOCATION_SERIES 1    LOCATION_GLOBAL 2     \nIndexRule.Type Type determine the index structure under the hood\n   Name Number Description     TYPE_UNSPECIFIED 0    TYPE_TREE 1    TYPE_INVERTED 2     \nTagType    Name Number Description     TAG_TYPE_UNSPECIFIED 0    TAG_TYPE_STRING 1    TAG_TYPE_INT 2    TAG_TYPE_STRING_ARRAY 3    TAG_TYPE_INT_ARRAY 4    TAG_TYPE_DATA_BINARY 5     \nTop\nbanyandb/database/v1/rpc.proto \nGroupRegistryServiceCreateRequest    Field Type Label Description     group banyandb.common.v1.Group      \nGroupRegistryServiceCreateResponse \nGroupRegistryServiceDeleteRequest    Field Type Label Description     group string      \nGroupRegistryServiceDeleteResponse    Field Type Label Description     deleted bool      \nGroupRegistryServiceExistRequest    Field Type Label Description     group string      \nGroupRegistryServiceExistResponse    Field Type Label Description     has_group bool      \nGroupRegistryServiceGetRequest    Field Type Label Description     group string      \nGroupRegistryServiceGetResponse    Field Type Label Description     group banyandb.common.v1.Group      \nGroupRegistryServiceListRequest \nGroupRegistryServiceListResponse    Field Type Label Description     group banyandb.common.v1.Group repeated     \nGroupRegistryServiceUpdateRequest    Field Type Label Description     group banyandb.common.v1.Group      \nGroupRegistryServiceUpdateResponse \nIndexRuleBindingRegistryServiceCreateRequest    Field Type Label Description     index_rule_binding IndexRuleBinding      \nIndexRuleBindingRegistryServiceCreateResponse \nIndexRuleBindingRegistryServiceDeleteRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nIndexRuleBindingRegistryServiceDeleteResponse    Field Type Label Description     deleted bool      \nIndexRuleBindingRegistryServiceExistRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nIndexRuleBindingRegistryServiceExistResponse    Field Type Label Description     has_group bool     has_index_rule_binding bool      \nIndexRuleBindingRegistryServiceGetRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nIndexRuleBindingRegistryServiceGetResponse    Field Type Label Description     index_rule_binding IndexRuleBinding      \nIndexRuleBindingRegistryServiceListRequest    Field Type Label Description     group string      \nIndexRuleBindingRegistryServiceListResponse    Field Type Label Description     index_rule_binding IndexRuleBinding repeated     \nIndexRuleBindingRegistryServiceUpdateRequest    Field Type Label Description     index_rule_binding IndexRuleBinding      \nIndexRuleBindingRegistryServiceUpdateResponse \nIndexRuleRegistryServiceCreateRequest    Field Type Label Description     index_rule IndexRule      \nIndexRuleRegistryServiceCreateResponse \nIndexRuleRegistryServiceDeleteRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nIndexRuleRegistryServiceDeleteResponse    Field Type Label Description     deleted bool      \nIndexRuleRegistryServiceExistRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nIndexRuleRegistryServiceExistResponse    Field Type Label Description     has_group bool     has_index_rule bool      \nIndexRuleRegistryServiceGetRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nIndexRuleRegistryServiceGetResponse    Field Type Label Description     index_rule IndexRule      \nIndexRuleRegistryServiceListRequest    Field Type Label Description     group string      \nIndexRuleRegistryServiceListResponse    Field Type Label Description     index_rule IndexRule repeated     \nIndexRuleRegistryServiceUpdateRequest    Field Type Label Description     index_rule IndexRule      \nIndexRuleRegistryServiceUpdateResponse \nMeasureRegistryServiceCreateRequest    Field Type Label Description     measure Measure      \nMeasureRegistryServiceCreateResponse    Field Type Label Description     mod_revision int64      \nMeasureRegistryServiceDeleteRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nMeasureRegistryServiceDeleteResponse    Field Type Label Description     deleted bool      \nMeasureRegistryServiceExistRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nMeasureRegistryServiceExistResponse    Field Type Label Description     has_group bool     has_measure bool      \nMeasureRegistryServiceGetRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nMeasureRegistryServiceGetResponse    Field Type Label Description     measure Measure      \nMeasureRegistryServiceListRequest    Field Type Label Description     group string      \nMeasureRegistryServiceListResponse    Field Type Label Description     measure Measure repeated     \nMeasureRegistryServiceUpdateRequest    Field Type Label Description     measure Measure      \nMeasureRegistryServiceUpdateResponse    Field Type Label Description     mod_revision int64      \nStreamRegistryServiceCreateRequest    Field Type Label Description     stream Stream      \nStreamRegistryServiceCreateResponse    Field Type Label Description     mod_revision int64      \nStreamRegistryServiceDeleteRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nStreamRegistryServiceDeleteResponse    Field Type Label Description     deleted bool      \nStreamRegistryServiceExistRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nStreamRegistryServiceExistResponse    Field Type Label Description     has_group bool     has_stream bool      \nStreamRegistryServiceGetRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nStreamRegistryServiceGetResponse    Field Type Label Description     stream Stream      \nStreamRegistryServiceListRequest    Field Type Label Description     group string      \nStreamRegistryServiceListResponse    Field Type Label Description     stream Stream repeated     \nStreamRegistryServiceUpdateRequest    Field Type Label Description     stream Stream      \nStreamRegistryServiceUpdateResponse    Field Type Label Description     mod_revision int64      \nTopNAggregationRegistryServiceCreateRequest    Field Type Label Description     top_n_aggregation TopNAggregation      \nTopNAggregationRegistryServiceCreateResponse \nTopNAggregationRegistryServiceDeleteRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nTopNAggregationRegistryServiceDeleteResponse    Field Type Label Description     deleted bool      \nTopNAggregationRegistryServiceExistRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nTopNAggregationRegistryServiceExistResponse    Field Type Label Description     has_group bool     has_top_n_aggregation bool      \nTopNAggregationRegistryServiceGetRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nTopNAggregationRegistryServiceGetResponse    Field Type Label Description     top_n_aggregation TopNAggregation      \nTopNAggregationRegistryServiceListRequest    Field Type Label Description     group string      \nTopNAggregationRegistryServiceListResponse    Field Type Label Description     top_n_aggregation TopNAggregation repeated     \nTopNAggregationRegistryServiceUpdateRequest    Field Type Label Description     top_n_aggregation TopNAggregation      \nTopNAggregationRegistryServiceUpdateResponse \nGroupRegistryService    Method Name Request Type Response Type Description     Create GroupRegistryServiceCreateRequest GroupRegistryServiceCreateResponse    Update GroupRegistryServiceUpdateRequest GroupRegistryServiceUpdateResponse    Delete GroupRegistryServiceDeleteRequest GroupRegistryServiceDeleteResponse    Get GroupRegistryServiceGetRequest GroupRegistryServiceGetResponse    List GroupRegistryServiceListRequest GroupRegistryServiceListResponse    Exist GroupRegistryServiceExistRequest GroupRegistryServiceExistResponse Exist doesn't expose an HTTP endpoint. Please use HEAD method to touch Get instead    \nIndexRuleBindingRegistryService    Method Name Request Type Response Type Description     Create IndexRuleBindingRegistryServiceCreateRequest IndexRuleBindingRegistryServiceCreateResponse    Update IndexRuleBindingRegistryServiceUpdateRequest IndexRuleBindingRegistryServiceUpdateResponse    Delete IndexRuleBindingRegistryServiceDeleteRequest IndexRuleBindingRegistryServiceDeleteResponse    Get IndexRuleBindingRegistryServiceGetRequest IndexRuleBindingRegistryServiceGetResponse    List IndexRuleBindingRegistryServiceListRequest IndexRuleBindingRegistryServiceListResponse    Exist IndexRuleBindingRegistryServiceExistRequest IndexRuleBindingRegistryServiceExistResponse Exist doesn't expose an HTTP endpoint. Please use HEAD method to touch Get instead    \nIndexRuleRegistryService    Method Name Request Type Response Type Description     Create IndexRuleRegistryServiceCreateRequest IndexRuleRegistryServiceCreateResponse    Update IndexRuleRegistryServiceUpdateRequest IndexRuleRegistryServiceUpdateResponse    Delete IndexRuleRegistryServiceDeleteRequest IndexRuleRegistryServiceDeleteResponse    Get IndexRuleRegistryServiceGetRequest IndexRuleRegistryServiceGetResponse    List IndexRuleRegistryServiceListRequest IndexRuleRegistryServiceListResponse    Exist IndexRuleRegistryServiceExistRequest IndexRuleRegistryServiceExistResponse Exist doesn't expose an HTTP endpoint. Please use HEAD method to touch Get instead    \nMeasureRegistryService    Method Name Request Type Response Type Description     Create MeasureRegistryServiceCreateRequest MeasureRegistryServiceCreateResponse    Update MeasureRegistryServiceUpdateRequest MeasureRegistryServiceUpdateResponse    Delete MeasureRegistryServiceDeleteRequest MeasureRegistryServiceDeleteResponse    Get MeasureRegistryServiceGetRequest MeasureRegistryServiceGetResponse    List MeasureRegistryServiceListRequest MeasureRegistryServiceListResponse    Exist MeasureRegistryServiceExistRequest MeasureRegistryServiceExistResponse Exist doesn't expose an HTTP endpoint. Please use HEAD method to touch Get instead    \nStreamRegistryService    Method Name Request Type Response Type Description     Create StreamRegistryServiceCreateRequest StreamRegistryServiceCreateResponse    Update StreamRegistryServiceUpdateRequest StreamRegistryServiceUpdateResponse    Delete StreamRegistryServiceDeleteRequest StreamRegistryServiceDeleteResponse    Get StreamRegistryServiceGetRequest StreamRegistryServiceGetResponse    List StreamRegistryServiceListRequest StreamRegistryServiceListResponse    Exist StreamRegistryServiceExistRequest StreamRegistryServiceExistResponse Exist doesn't expose an HTTP endpoint. Please use HEAD method to touch Get instead    \nTopNAggregationRegistryService    Method Name Request Type Response Type Description     Create TopNAggregationRegistryServiceCreateRequest TopNAggregationRegistryServiceCreateResponse    Update TopNAggregationRegistryServiceUpdateRequest TopNAggregationRegistryServiceUpdateResponse    Delete TopNAggregationRegistryServiceDeleteRequest TopNAggregationRegistryServiceDeleteResponse    Get TopNAggregationRegistryServiceGetRequest TopNAggregationRegistryServiceGetResponse    List TopNAggregationRegistryServiceListRequest TopNAggregationRegistryServiceListResponse    Exist TopNAggregationRegistryServiceExistRequest TopNAggregationRegistryServiceExistResponse     \nTop\nbanyandb/measure/v1/query.proto \nDataPoint DataPoint is stored in Measures\n   Field Type Label Description     timestamp google.protobuf.Timestamp  timestamp is in the timeunit of milliseconds.   tag_families banyandb.model.v1.TagFamily repeated tag_families contains tags selected in the projection   fields DataPoint.Field repeated fields contains fields selected in the projection    \nDataPoint.Field    Field Type Label Description     name string     value banyandb.model.v1.FieldValue      \nQueryRequest QueryRequest is the request contract for query.\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  metadata is required   time_range banyandb.model.v1.TimeRange  time_range is a range query with begin/end time of entities in the timeunit of milliseconds.   criteria banyandb.model.v1.Criteria  tag_families are indexed.   tag_projection banyandb.model.v1.TagProjection  tag_projection can be used to select tags of the data points in the response   field_projection QueryRequest.FieldProjection  field_projection can be used to select fields of the data points in the response   group_by QueryRequest.GroupBy  group_by groups data points based on their field value for a specific tag and use field_name as the projection name   agg QueryRequest.Aggregation  agg aggregates data points based on a field   top QueryRequest.Top  top limits the result based on a particular field. If order_by is specified, top sorts the dataset based on order_by's output   offset uint32  offset is used to support pagination, together with the following limit. If top is specified, offset processes the dataset based on top's output   limit uint32  limit is used to impose a boundary on the number of records being returned. If top is specified, limit processes the dataset based on top's output   order_by banyandb.model.v1.QueryOrder  order_by is given to specify the sort for a tag.    \nQueryRequest.Aggregation    Field Type Label Description     function banyandb.model.v1.AggregationFunction     field_name string  field_name must be one of files indicated by the field_projection    \nQueryRequest.FieldProjection    Field Type Label Description     names string repeated     \nQueryRequest.GroupBy    Field Type Label Description     tag_projection banyandb.model.v1.TagProjection  tag_projection must be a subset of the tag_projection of QueryRequest   field_name string  field_name must be one of fields indicated by field_projection    \nQueryRequest.Top    Field Type Label Description     number int32  number set the how many items should be returned   field_name string  field_name must be one of files indicated by the field_projection   field_value_sort banyandb.model.v1.Sort  field_value_sort indicates how to sort fields ASC: bottomN DESC: topN UNSPECIFIED: topN    \nQueryResponse QueryResponse is the response for a query to the Query module.\n   Field Type Label Description     data_points DataPoint repeated data_points are the actual data returned    \nTop\nbanyandb/measure/v1/topn.proto \nTopNList TopNList contains a series of topN items\n   Field Type Label Description     timestamp google.protobuf.Timestamp  timestamp is in the timeunit of milliseconds.   items TopNList.Item repeated items contains top-n items in a list    \nTopNList.Item    Field Type Label Description     entity banyandb.model.v1.Tag repeated    value banyandb.model.v1.FieldValue      \nTopNRequest TopNRequest is the request contract for query.\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  metadata is required   time_range banyandb.model.v1.TimeRange  time_range is a range query with begin/end time of entities in the timeunit of milliseconds.   top_n int32  top_n set the how many items should be returned in each list.   agg banyandb.model.v1.AggregationFunction  agg aggregates lists grouped by field names in the time_range TODO validate enum defined_only   conditions banyandb.model.v1.Condition repeated criteria select counters. Only equals are acceptable.   field_value_sort banyandb.model.v1.Sort  field_value_sort indicates how to sort fields    \nTopNResponse TopNResponse is the response for a query to the Query module.\n   Field Type Label Description     lists TopNList repeated lists contain a series topN lists ranked by timestamp if agg_func in query request is specified, lists' size should be one.    \nTop\nbanyandb/model/v1/write.proto \nStatus Status is the response status for write\n   Name Number Description     STATUS_UNSPECIFIED 0    STATUS_SUCCEED 1    STATUS_INVALID_TIMESTAMP 2    STATUS_NOT_FOUND 3    STATUS_EXPIRED_SCHEMA 4    STATUS_INTERNAL_ERROR 5     \nTop\nbanyandb/measure/v1/write.proto \nDataPointValue DataPointValue is the data point for writing. It only contains values.\n   Field Type Label Description     timestamp google.protobuf.Timestamp  timestamp is in the timeunit of milliseconds.   tag_families banyandb.model.v1.TagFamilyForWrite repeated the order of tag_families' items match the measure schema   fields banyandb.model.v1.FieldValue repeated the order of fields match the measure schema    \nInternalWriteRequest    Field Type Label Description     shard_id uint32     series_hash bytes     entity_values banyandb.model.v1.TagValue repeated    request WriteRequest      \nWriteRequest WriteRequest is the request contract for write\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  the metadata is required.   data_point DataPointValue  the data_point is required.   message_id uint64  the message_id is required.    \nWriteResponse WriteResponse is the response contract for write\n   Field Type Label Description     message_id uint64  the message_id from request.   status banyandb.model.v1.Status  status indicates the request processing result   metadata banyandb.common.v1.Metadata  the metadata from request when request fails    \nTop\nbanyandb/measure/v1/rpc.proto \nMeasureService    Method Name Request Type Response Type Description     Query QueryRequest QueryResponse    Write WriteRequest stream WriteResponse stream    TopN TopNRequest TopNResponse     \nTop\nbanyandb/property/v1/property.proto \nMetadata Metadata is for multi-tenant use\n   Field Type Label Description     container banyandb.common.v1.Metadata  container is created when it receives the first property   id string  id identifies a property    \nProperty Property stores the user defined data\n   Field Type Label Description     metadata Metadata  metadata is the identity of a property   tags banyandb.model.v1.Tag repeated tag stores the content of a property   updated_at google.protobuf.Timestamp  updated_at indicates when the property is updated   lease_id int64  readonly. lease_id is the ID of the lease that attached to key.   ttl string  ttl indicates the time to live of the property. It's a string in the format of \u0026quot;1h\u0026quot;, \u0026quot;2m\u0026quot;, \u0026quot;3s\u0026quot;, \u0026quot;1500ms\u0026quot;. It defaults to 0s, which means the property never expires. The minimum allowed ttl is 1s.    \nTop\nbanyandb/property/v1/rpc.proto \nApplyRequest    Field Type Label Description     property Property     strategy ApplyRequest.Strategy  strategy indicates how to update a property. It defaults to STRATEGY_MERGE    \nApplyResponse    Field Type Label Description     created bool  created indicates whether the property existed. True: the property is absent. False: the property existed.   tags_num uint32     lease_id int64      \nDeleteRequest    Field Type Label Description     metadata Metadata     tags string repeated     \nDeleteResponse    Field Type Label Description     deleted bool     tags_num uint32      \nGetRequest    Field Type Label Description     metadata Metadata     tags string repeated     \nGetResponse    Field Type Label Description     property Property      \nKeepAliveRequest    Field Type Label Description     lease_id int64      \nKeepAliveResponse \nListRequest    Field Type Label Description     container banyandb.common.v1.Metadata     ids string repeated    tags string repeated     \nListResponse    Field Type Label Description     property Property repeated     \nApplyRequest.Strategy    Name Number Description     STRATEGY_UNSPECIFIED 0    STRATEGY_MERGE 1    STRATEGY_REPLACE 2     \nPropertyService    Method Name Request Type Response Type Description     Apply ApplyRequest ApplyResponse Apply creates a property if it's absent, or update a existed one based on a strategy.   Delete DeleteRequest DeleteResponse    Get GetRequest GetResponse    List ListRequest ListResponse    KeepAlive KeepAliveRequest KeepAliveResponse     \nTop\nbanyandb/stream/v1/query.proto \nElement Element represents (stream context) a Span defined in Google Dapper paper or equivalently a Segment in Skywalking. (Log context) a log\n   Field Type Label Description     element_id string  element_id could be span_id of a Span or segment_id of a Segment in the context of stream   timestamp google.protobuf.Timestamp  timestamp represents a millisecond 1) either the start time of a Span/Segment, 2) or the timestamp of a log   tag_families banyandb.model.v1.TagFamily repeated fields contains all indexed Field. Some typical names, - stream_id - duration - service_name - service_instance_id - end_time_milliseconds    \nQueryRequest QueryRequest is the request contract for query.\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  metadata is required   time_range banyandb.model.v1.TimeRange  time_range is a range query with begin/end time of entities in the timeunit of milliseconds. In the context of stream, it represents the range of the startTime for spans/segments, while in the context of Log, it means the range of the timestamp(s) for logs. it is always recommended to specify time range for performance reason   offset uint32  offset is used to support pagination, together with the following limit   limit uint32  limit is used to impose a boundary on the number of records being returned   order_by banyandb.model.v1.QueryOrder  order_by is given to specify the sort for a field. So far, only fields in the type of Integer are supported   criteria banyandb.model.v1.Criteria  tag_families are indexed.   projection banyandb.model.v1.TagProjection  projection can be used to select the key names of the element in the response    \nQueryResponse QueryResponse is the response for a query to the Query module.\n   Field Type Label Description     elements Element repeated elements are the actual data returned    \nTop\nbanyandb/stream/v1/write.proto \nElementValue    Field Type Label Description     element_id string  element_id could be span_id of a Span or segment_id of a Segment in the context of stream   timestamp google.protobuf.Timestamp  timestamp is in the timeunit of milliseconds. It represents 1) either the start time of a Span/Segment, 2) or the timestamp of a log   tag_families banyandb.model.v1.TagFamilyForWrite repeated the order of tag_families' items match the stream schema    \nInternalWriteRequest    Field Type Label Description     shard_id uint32     series_hash bytes     entity_values banyandb.model.v1.TagValue repeated    request WriteRequest      \nWriteRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata  the metadata is required.   element ElementValue  the element is required.   message_id uint64  the message_id is required.    \nWriteResponse    Field Type Label Description     message_id uint64  the message_id from request.   status banyandb.model.v1.Status  status indicates the request processing result   metadata banyandb.common.v1.Metadata  the metadata from request when request fails    \nTop\nbanyandb/stream/v1/rpc.proto \nStreamService    Method Name Request Type Response Type Description     Query QueryRequest QueryResponse    Write WriteRequest stream WriteResponse stream     Scalar Value Types    .proto Type Notes C++ Java Python Go C# PHP Ruby     double  double double float float64 double float Float   float  float float float float32 float float Float   int32 Uses variable-length encoding. Inefficient for encoding negative numbers – if your field is likely to have negative values, use sint32 instead. int32 int int int32 int integer Bignum or Fixnum (as required)   int64 Uses variable-length encoding. Inefficient for encoding negative numbers – if your field is likely to have negative values, use sint64 instead. int64 long int/long int64 long integer/string Bignum   uint32 Uses variable-length encoding. uint32 int int/long uint32 uint integer Bignum or Fixnum (as required)   uint64 Uses variable-length encoding. uint64 long int/long uint64 ulong integer/string Bignum or Fixnum (as required)   sint32 Uses variable-length encoding. Signed int value. These more efficiently encode negative numbers than regular int32s. int32 int int int32 int integer Bignum or Fixnum (as required)   sint64 Uses variable-length encoding. Signed int value. These more efficiently encode negative numbers than regular int64s. int64 long int/long int64 long integer/string Bignum   fixed32 Always four bytes. More efficient than uint32 if values are often greater than 2^28. uint32 int int uint32 uint integer Bignum or Fixnum (as required)   fixed64 Always eight bytes. More efficient than uint64 if values are often greater than 2^56. uint64 long int/long uint64 ulong integer/string Bignum   sfixed32 Always four bytes. int32 int int int32 int integer Bignum or Fixnum (as required)   sfixed64 Always eight bytes. int64 long int/long int64 long integer/string Bignum   bool  bool boolean boolean bool bool boolean TrueClass/FalseClass   string A string must always contain UTF-8 encoded or 7-bit ASCII text. string String str/unicode string string string String (UTF-8)   bytes May contain any arbitrary sequence of bytes. string ByteString str []byte ByteString string String (ASCII-8BIT)    ","excerpt":"Protocol Documentation \nTable of Contents   banyandb/cluster/v1/rpc.proto\n  SendRequest …","ref":"/docs/skywalking-banyandb/latest/api-reference/","title":"Protocol Documentation"},{"body":"Protocol Documentation \nTable of Contents   banyandb/cluster/v1/rpc.proto\n  SendRequest\n  SendResponse\n  Service\n    banyandb/common/v1/common.proto\n  Group\n  IntervalRule\n  Metadata\n  ResourceOpts\n  Catalog\n  IntervalRule.Unit\n    banyandb/database/v1/database.proto\n  Node\n  Shard\n  Role\n    banyandb/model/v1/common.proto\n  FieldValue\n  Float\n  Int\n  IntArray\n  Str\n  StrArray\n  TagFamilyForWrite\n  TagValue\n  AggregationFunction\n    banyandb/model/v1/query.proto\n  Condition\n  Criteria\n  LogicalExpression\n  QueryOrder\n  Tag\n  TagFamily\n  TagProjection\n  TagProjection.TagFamily\n  TimeRange\n  Condition.BinaryOp\n  LogicalExpression.LogicalOp\n  Sort\n    banyandb/database/v1/schema.proto\n  Entity\n  FieldSpec\n  IndexRule\n  IndexRuleBinding\n  Measure\n  Stream\n  Subject\n  TagFamilySpec\n  TagSpec\n  TopNAggregation\n  CompressionMethod\n  EncodingMethod\n  FieldType\n  IndexRule.Analyzer\n  IndexRule.Type\n  TagType\n    banyandb/database/v1/rpc.proto\n  GroupRegistryServiceCreateRequest\n  GroupRegistryServiceCreateResponse\n  GroupRegistryServiceDeleteRequest\n  GroupRegistryServiceDeleteResponse\n  GroupRegistryServiceExistRequest\n  GroupRegistryServiceExistResponse\n  GroupRegistryServiceGetRequest\n  GroupRegistryServiceGetResponse\n  GroupRegistryServiceListRequest\n  GroupRegistryServiceListResponse\n  GroupRegistryServiceUpdateRequest\n  GroupRegistryServiceUpdateResponse\n  IndexRuleBindingRegistryServiceCreateRequest\n  IndexRuleBindingRegistryServiceCreateResponse\n  IndexRuleBindingRegistryServiceDeleteRequest\n  IndexRuleBindingRegistryServiceDeleteResponse\n  IndexRuleBindingRegistryServiceExistRequest\n  IndexRuleBindingRegistryServiceExistResponse\n  IndexRuleBindingRegistryServiceGetRequest\n  IndexRuleBindingRegistryServiceGetResponse\n  IndexRuleBindingRegistryServiceListRequest\n  IndexRuleBindingRegistryServiceListResponse\n  IndexRuleBindingRegistryServiceUpdateRequest\n  IndexRuleBindingRegistryServiceUpdateResponse\n  IndexRuleRegistryServiceCreateRequest\n  IndexRuleRegistryServiceCreateResponse\n  IndexRuleRegistryServiceDeleteRequest\n  IndexRuleRegistryServiceDeleteResponse\n  IndexRuleRegistryServiceExistRequest\n  IndexRuleRegistryServiceExistResponse\n  IndexRuleRegistryServiceGetRequest\n  IndexRuleRegistryServiceGetResponse\n  IndexRuleRegistryServiceListRequest\n  IndexRuleRegistryServiceListResponse\n  IndexRuleRegistryServiceUpdateRequest\n  IndexRuleRegistryServiceUpdateResponse\n  MeasureRegistryServiceCreateRequest\n  MeasureRegistryServiceCreateResponse\n  MeasureRegistryServiceDeleteRequest\n  MeasureRegistryServiceDeleteResponse\n  MeasureRegistryServiceExistRequest\n  MeasureRegistryServiceExistResponse\n  MeasureRegistryServiceGetRequest\n  MeasureRegistryServiceGetResponse\n  MeasureRegistryServiceListRequest\n  MeasureRegistryServiceListResponse\n  MeasureRegistryServiceUpdateRequest\n  MeasureRegistryServiceUpdateResponse\n  StreamRegistryServiceCreateRequest\n  StreamRegistryServiceCreateResponse\n  StreamRegistryServiceDeleteRequest\n  StreamRegistryServiceDeleteResponse\n  StreamRegistryServiceExistRequest\n  StreamRegistryServiceExistResponse\n  StreamRegistryServiceGetRequest\n  StreamRegistryServiceGetResponse\n  StreamRegistryServiceListRequest\n  StreamRegistryServiceListResponse\n  StreamRegistryServiceUpdateRequest\n  StreamRegistryServiceUpdateResponse\n  TopNAggregationRegistryServiceCreateRequest\n  TopNAggregationRegistryServiceCreateResponse\n  TopNAggregationRegistryServiceDeleteRequest\n  TopNAggregationRegistryServiceDeleteResponse\n  TopNAggregationRegistryServiceExistRequest\n  TopNAggregationRegistryServiceExistResponse\n  TopNAggregationRegistryServiceGetRequest\n  TopNAggregationRegistryServiceGetResponse\n  TopNAggregationRegistryServiceListRequest\n  TopNAggregationRegistryServiceListResponse\n  TopNAggregationRegistryServiceUpdateRequest\n  TopNAggregationRegistryServiceUpdateResponse\n  GroupRegistryService\n  IndexRuleBindingRegistryService\n  IndexRuleRegistryService\n  MeasureRegistryService\n  StreamRegistryService\n  TopNAggregationRegistryService\n    banyandb/measure/v1/query.proto\n DataPoint DataPoint.Field QueryRequest QueryRequest.Aggregation QueryRequest.FieldProjection QueryRequest.GroupBy QueryRequest.Top QueryResponse    banyandb/measure/v1/topn.proto\n TopNList TopNList.Item TopNRequest TopNResponse    banyandb/model/v1/write.proto\n Status    banyandb/measure/v1/write.proto\n DataPointValue InternalWriteRequest WriteRequest WriteResponse    banyandb/measure/v1/rpc.proto\n MeasureService    banyandb/property/v1/property.proto\n Metadata Property    banyandb/property/v1/rpc.proto\n  ApplyRequest\n  ApplyResponse\n  DeleteRequest\n  DeleteResponse\n  GetRequest\n  GetResponse\n  KeepAliveRequest\n  KeepAliveResponse\n  ListRequest\n  ListResponse\n  ApplyRequest.Strategy\n  PropertyService\n    banyandb/stream/v1/query.proto\n Element QueryRequest QueryResponse    banyandb/stream/v1/write.proto\n ElementValue InternalWriteRequest WriteRequest WriteResponse    banyandb/stream/v1/rpc.proto\n StreamService    Scalar Value Types\n  \nTop\nbanyandb/cluster/v1/rpc.proto \nSendRequest    Field Type Label Description     topic string     message_id uint64     body google.protobuf.Any     batch_mod bool      \nSendResponse    Field Type Label Description     message_id uint64     error string     body google.protobuf.Any      \nService    Method Name Request Type Response Type Description     Send SendRequest stream SendResponse stream     \nTop\nbanyandb/common/v1/common.proto \nGroup Group is an internal object for Group management\n   Field Type Label Description     metadata Metadata  metadata define the group's identity   catalog Catalog  catalog denotes which type of data the group contains   resource_opts ResourceOpts  resourceOpts indicates the structure of the underlying kv storage   updated_at google.protobuf.Timestamp  updated_at indicates when resources of the group are updated    \nIntervalRule IntervalRule is a structured duration\n   Field Type Label Description     unit IntervalRule.Unit  unit can only be UNIT_HOUR or UNIT_DAY   num uint32      \nMetadata Metadata is for multi-tenant, multi-model use\n   Field Type Label Description     group string  group contains a set of options, like retention policy, max   name string  name of the entity   id uint32  id is the unique identifier of the entity if id is not set, the system will generate a unique id   create_revision int64  readonly. create_revision is the revision of last creation on this key.   mod_revision int64  readonly. mod_revision is the revision of last modification on this key.    \nResourceOpts    Field Type Label Description     shard_num uint32  shard_num is the number of shards   segment_interval IntervalRule  segment_interval indicates the length of a segment   ttl IntervalRule  ttl indicates time to live, how long the data will be cached    \nCatalog    Name Number Description     CATALOG_UNSPECIFIED 0    CATALOG_STREAM 1    CATALOG_MEASURE 2     \nIntervalRule.Unit    Name Number Description     UNIT_UNSPECIFIED 0    UNIT_HOUR 1    UNIT_DAY 2     \nTop\nbanyandb/database/v1/database.proto \nNode    Field Type Label Description     metadata banyandb.common.v1.Metadata     roles Role repeated    grpc_address string     http_address string     created_at google.protobuf.Timestamp      \nShard    Field Type Label Description     id uint64     metadata banyandb.common.v1.Metadata     catalog banyandb.common.v1.Catalog     node string     total uint32     updated_at google.protobuf.Timestamp     created_at google.protobuf.Timestamp      \nRole    Name Number Description     ROLE_UNSPECIFIED 0    ROLE_META 1    ROLE_DATA 2    ROLE_LIAISON 3     \nTop\nbanyandb/model/v1/common.proto \nFieldValue    Field Type Label Description     null google.protobuf.NullValue     str Str     int Int     binary_data bytes     float Float      \nFloat    Field Type Label Description     value double      \nInt    Field Type Label Description     value int64      \nIntArray    Field Type Label Description     value int64 repeated     \nStr    Field Type Label Description     value string      \nStrArray    Field Type Label Description     value string repeated     \nTagFamilyForWrite    Field Type Label Description     tags TagValue repeated     \nTagValue    Field Type Label Description     null google.protobuf.NullValue     str Str     str_array StrArray     int Int     int_array IntArray     binary_data bytes      \nAggregationFunction    Name Number Description     AGGREGATION_FUNCTION_UNSPECIFIED 0    AGGREGATION_FUNCTION_MEAN 1    AGGREGATION_FUNCTION_MAX 2    AGGREGATION_FUNCTION_MIN 3    AGGREGATION_FUNCTION_COUNT 4    AGGREGATION_FUNCTION_SUM 5     \nTop\nbanyandb/model/v1/query.proto \nCondition Condition consists of the query condition with a single binary operator to be imposed For 1:1 BinaryOp, values in condition must be an array with length = 1, while for 1:N BinaryOp, values can be an array with length \u0026gt;= 1.\n   Field Type Label Description     name string     op Condition.BinaryOp     value TagValue      \nCriteria tag_families are indexed.\n   Field Type Label Description     le LogicalExpression     condition Condition      \nLogicalExpression LogicalExpression supports logical operation\n   Field Type Label Description     op LogicalExpression.LogicalOp  op is a logical operation   left Criteria     right Criteria      \nQueryOrder QueryOrder means a Sort operation to be done for a given index rule. The index_rule_name refers to the name of a index rule bound to the subject.\n   Field Type Label Description     index_rule_name string     sort Sort      \nTag Pair is the building block of a record which is equivalent to a key-value pair. In the context of Trace, it could be metadata of a trace such as service_name, service_instance, etc. Besides, other tags are organized in key-value pair in the underlying storage layer. One should notice that the values can be a multi-value.\n   Field Type Label Description     key string     value TagValue      \nTagFamily    Field Type Label Description     name string     tags Tag repeated     \nTagProjection TagProjection is used to select the names of keys to be returned.\n   Field Type Label Description     tag_families TagProjection.TagFamily repeated     \nTagProjection.TagFamily    Field Type Label Description     name string     tags string repeated     \nTimeRange TimeRange is a range query for uint64, the range here follows left-inclusive and right-exclusive rule, i.e. [begin, end) if both edges exist\n   Field Type Label Description     begin google.protobuf.Timestamp     end google.protobuf.Timestamp      \nCondition.BinaryOp BinaryOp specifies the operation imposed to the given query condition For EQ, NE, LT, GT, LE and GE, only one operand should be given, i.e. one-to-one relationship. HAVING and NOT_HAVING allow multi-value to be the operand such as array/vector, i.e. one-to-many relationship. For example, \u0026quot;keyA\u0026quot; contains \u0026quot;valueA\u0026quot; and \u0026quot;valueB\u0026quot; MATCH performances a full-text search if the tag is analyzed. The string value applies to the same analyzer as the tag, but string array value does not. Each item in a string array is seen as a token instead of a query expression.\n   Name Number Description     BINARY_OP_UNSPECIFIED 0    BINARY_OP_EQ 1    BINARY_OP_NE 2    BINARY_OP_LT 3    BINARY_OP_GT 4    BINARY_OP_LE 5    BINARY_OP_GE 6    BINARY_OP_HAVING 7    BINARY_OP_NOT_HAVING 8    BINARY_OP_IN 9    BINARY_OP_NOT_IN 10    BINARY_OP_MATCH 11     \nLogicalExpression.LogicalOp    Name Number Description     LOGICAL_OP_UNSPECIFIED 0    LOGICAL_OP_AND 1    LOGICAL_OP_OR 2     \nSort    Name Number Description     SORT_UNSPECIFIED 0    SORT_DESC 1    SORT_ASC 2     \nTop\nbanyandb/database/v1/schema.proto \nEntity    Field Type Label Description     tag_names string repeated     \nFieldSpec FieldSpec is the specification of field\n   Field Type Label Description     name string  name is the identity of a field   field_type FieldType  field_type denotes the type of field value   encoding_method EncodingMethod  encoding_method indicates how to encode data during writing   compression_method CompressionMethod  compression_method indicates how to compress data during writing    \nIndexRule IndexRule defines how to generate indices based on tags and the index type IndexRule should bind to a subject through an IndexRuleBinding to generate proper indices.\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  metadata define the rule's identity   tags string repeated tags are the combination that refers to an indexed object If the elements in tags are more than 1, the object will generate a multi-tag index Caveat: All tags in a multi-tag MUST have an identical IndexType   type IndexRule.Type  type is the IndexType of this IndexObject.   updated_at google.protobuf.Timestamp  updated_at indicates when the IndexRule is updated   analyzer IndexRule.Analyzer  analyzer analyzes tag value to support the full-text searching for TYPE_INVERTED indices.    \nIndexRuleBinding IndexRuleBinding is a bridge to connect severalIndexRules to a subject This binding is valid between begin_at_nanoseconds and expire_at_nanoseconds, that provides flexible strategies to control how to generate time series indices.\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  metadata is the identity of this binding   rules string repeated rules refers to the IndexRule   subject Subject  subject indicates the subject of binding action   begin_at google.protobuf.Timestamp  begin_at_nanoseconds is the timestamp, after which the binding will be active   expire_at google.protobuf.Timestamp  expire_at_nanoseconds it the timestamp, after which the binding will be inactive expire_at_nanoseconds must be larger than begin_at_nanoseconds   updated_at google.protobuf.Timestamp  updated_at indicates when the IndexRuleBinding is updated    \nMeasure Measure intends to store data point\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  metadata is the identity of a measure   tag_families TagFamilySpec repeated tag_families are for filter measures   fields FieldSpec repeated fields denote measure values   entity Entity  entity indicates which tags will be to generate a series and shard a measure   interval string  interval indicates how frequently to send a data point valid time units are \u0026quot;ns\u0026quot;, \u0026quot;us\u0026quot; (or \u0026quot;µs\u0026quot;), \u0026quot;ms\u0026quot;, \u0026quot;s\u0026quot;, \u0026quot;m\u0026quot;, \u0026quot;h\u0026quot;, \u0026quot;d\u0026quot;.   updated_at google.protobuf.Timestamp  updated_at indicates when the measure is updated    \nStream Stream intends to store streaming data, for example, traces or logs\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  metadata is the identity of a trace series   tag_families TagFamilySpec repeated tag_families   entity Entity  entity indicates how to generate a series and shard a stream   updated_at google.protobuf.Timestamp  updated_at indicates when the stream is updated    \nSubject Subject defines which stream or measure would generate indices\n   Field Type Label Description     catalog banyandb.common.v1.Catalog  catalog is where the subject belongs to todo validate plugin exist bug https://github.com/bufbuild/protoc-gen-validate/issues/672   name string  name refers to a stream or measure in a particular catalog    \nTagFamilySpec    Field Type Label Description     name string     tags TagSpec repeated tags defines accepted tags    \nTagSpec    Field Type Label Description     name string     type TagType     indexed_only bool  indexed_only indicates whether the tag is stored True: It's indexed only, but not stored False: it's stored and indexed    \nTopNAggregation TopNAggregation generates offline TopN statistics for a measure's TopN approximation\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  metadata is the identity of an aggregation   source_measure banyandb.common.v1.Metadata  source_measure denotes the data source of this aggregation   field_name string  field_name is the name of field used for ranking   field_value_sort banyandb.model.v1.Sort  field_value_sort indicates how to sort fields ASC: bottomN DESC: topN UNSPECIFIED: topN + bottomN todo validate plugin exist bug https://github.com/bufbuild/protoc-gen-validate/issues/672   group_by_tag_names string repeated group_by_tag_names groups data points into statistical counters   criteria banyandb.model.v1.Criteria  criteria select partial data points from measure   counters_number int32  counters_number sets the number of counters to be tracked. The default value is 1000   lru_size int32  lru_size defines how much entry is allowed to be maintained in the memory   updated_at google.protobuf.Timestamp  updated_at indicates when the measure is updated    \nCompressionMethod    Name Number Description     COMPRESSION_METHOD_UNSPECIFIED 0    COMPRESSION_METHOD_ZSTD 1     \nEncodingMethod    Name Number Description     ENCODING_METHOD_UNSPECIFIED 0    ENCODING_METHOD_GORILLA 1     \nFieldType    Name Number Description     FIELD_TYPE_UNSPECIFIED 0    FIELD_TYPE_STRING 1    FIELD_TYPE_INT 2    FIELD_TYPE_DATA_BINARY 3    FIELD_TYPE_FLOAT 4     \nIndexRule.Analyzer    Name Number Description     ANALYZER_UNSPECIFIED 0    ANALYZER_KEYWORD 1 Keyword analyzer is a “noop” analyzer which returns the entire input string as a single token.   ANALYZER_STANDARD 2 Standard analyzer provides grammar based tokenization   ANALYZER_SIMPLE 3 Simple analyzer breaks text into tokens at any non-letter character, such as numbers, spaces, hyphens and apostrophes, discards non-letter characters, and changes uppercase to lowercase.    \nIndexRule.Type Type determine the index structure under the hood\n   Name Number Description     TYPE_UNSPECIFIED 0    TYPE_INVERTED 1     \nTagType    Name Number Description     TAG_TYPE_UNSPECIFIED 0    TAG_TYPE_STRING 1    TAG_TYPE_INT 2    TAG_TYPE_STRING_ARRAY 3    TAG_TYPE_INT_ARRAY 4    TAG_TYPE_DATA_BINARY 5     \nTop\nbanyandb/database/v1/rpc.proto \nGroupRegistryServiceCreateRequest    Field Type Label Description     group banyandb.common.v1.Group      \nGroupRegistryServiceCreateResponse \nGroupRegistryServiceDeleteRequest    Field Type Label Description     group string      \nGroupRegistryServiceDeleteResponse    Field Type Label Description     deleted bool      \nGroupRegistryServiceExistRequest    Field Type Label Description     group string      \nGroupRegistryServiceExistResponse    Field Type Label Description     has_group bool      \nGroupRegistryServiceGetRequest    Field Type Label Description     group string      \nGroupRegistryServiceGetResponse    Field Type Label Description     group banyandb.common.v1.Group      \nGroupRegistryServiceListRequest \nGroupRegistryServiceListResponse    Field Type Label Description     group banyandb.common.v1.Group repeated     \nGroupRegistryServiceUpdateRequest    Field Type Label Description     group banyandb.common.v1.Group      \nGroupRegistryServiceUpdateResponse \nIndexRuleBindingRegistryServiceCreateRequest    Field Type Label Description     index_rule_binding IndexRuleBinding      \nIndexRuleBindingRegistryServiceCreateResponse \nIndexRuleBindingRegistryServiceDeleteRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nIndexRuleBindingRegistryServiceDeleteResponse    Field Type Label Description     deleted bool      \nIndexRuleBindingRegistryServiceExistRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nIndexRuleBindingRegistryServiceExistResponse    Field Type Label Description     has_group bool     has_index_rule_binding bool      \nIndexRuleBindingRegistryServiceGetRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nIndexRuleBindingRegistryServiceGetResponse    Field Type Label Description     index_rule_binding IndexRuleBinding      \nIndexRuleBindingRegistryServiceListRequest    Field Type Label Description     group string      \nIndexRuleBindingRegistryServiceListResponse    Field Type Label Description     index_rule_binding IndexRuleBinding repeated     \nIndexRuleBindingRegistryServiceUpdateRequest    Field Type Label Description     index_rule_binding IndexRuleBinding      \nIndexRuleBindingRegistryServiceUpdateResponse \nIndexRuleRegistryServiceCreateRequest    Field Type Label Description     index_rule IndexRule      \nIndexRuleRegistryServiceCreateResponse \nIndexRuleRegistryServiceDeleteRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nIndexRuleRegistryServiceDeleteResponse    Field Type Label Description     deleted bool      \nIndexRuleRegistryServiceExistRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nIndexRuleRegistryServiceExistResponse    Field Type Label Description     has_group bool     has_index_rule bool      \nIndexRuleRegistryServiceGetRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nIndexRuleRegistryServiceGetResponse    Field Type Label Description     index_rule IndexRule      \nIndexRuleRegistryServiceListRequest    Field Type Label Description     group string      \nIndexRuleRegistryServiceListResponse    Field Type Label Description     index_rule IndexRule repeated     \nIndexRuleRegistryServiceUpdateRequest    Field Type Label Description     index_rule IndexRule      \nIndexRuleRegistryServiceUpdateResponse \nMeasureRegistryServiceCreateRequest    Field Type Label Description     measure Measure      \nMeasureRegistryServiceCreateResponse    Field Type Label Description     mod_revision int64      \nMeasureRegistryServiceDeleteRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nMeasureRegistryServiceDeleteResponse    Field Type Label Description     deleted bool      \nMeasureRegistryServiceExistRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nMeasureRegistryServiceExistResponse    Field Type Label Description     has_group bool     has_measure bool      \nMeasureRegistryServiceGetRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nMeasureRegistryServiceGetResponse    Field Type Label Description     measure Measure      \nMeasureRegistryServiceListRequest    Field Type Label Description     group string      \nMeasureRegistryServiceListResponse    Field Type Label Description     measure Measure repeated     \nMeasureRegistryServiceUpdateRequest    Field Type Label Description     measure Measure      \nMeasureRegistryServiceUpdateResponse    Field Type Label Description     mod_revision int64      \nStreamRegistryServiceCreateRequest    Field Type Label Description     stream Stream      \nStreamRegistryServiceCreateResponse    Field Type Label Description     mod_revision int64      \nStreamRegistryServiceDeleteRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nStreamRegistryServiceDeleteResponse    Field Type Label Description     deleted bool      \nStreamRegistryServiceExistRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nStreamRegistryServiceExistResponse    Field Type Label Description     has_group bool     has_stream bool      \nStreamRegistryServiceGetRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nStreamRegistryServiceGetResponse    Field Type Label Description     stream Stream      \nStreamRegistryServiceListRequest    Field Type Label Description     group string      \nStreamRegistryServiceListResponse    Field Type Label Description     stream Stream repeated     \nStreamRegistryServiceUpdateRequest    Field Type Label Description     stream Stream      \nStreamRegistryServiceUpdateResponse    Field Type Label Description     mod_revision int64      \nTopNAggregationRegistryServiceCreateRequest    Field Type Label Description     top_n_aggregation TopNAggregation      \nTopNAggregationRegistryServiceCreateResponse \nTopNAggregationRegistryServiceDeleteRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nTopNAggregationRegistryServiceDeleteResponse    Field Type Label Description     deleted bool      \nTopNAggregationRegistryServiceExistRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nTopNAggregationRegistryServiceExistResponse    Field Type Label Description     has_group bool     has_top_n_aggregation bool      \nTopNAggregationRegistryServiceGetRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nTopNAggregationRegistryServiceGetResponse    Field Type Label Description     top_n_aggregation TopNAggregation      \nTopNAggregationRegistryServiceListRequest    Field Type Label Description     group string      \nTopNAggregationRegistryServiceListResponse    Field Type Label Description     top_n_aggregation TopNAggregation repeated     \nTopNAggregationRegistryServiceUpdateRequest    Field Type Label Description     top_n_aggregation TopNAggregation      \nTopNAggregationRegistryServiceUpdateResponse \nGroupRegistryService    Method Name Request Type Response Type Description     Create GroupRegistryServiceCreateRequest GroupRegistryServiceCreateResponse    Update GroupRegistryServiceUpdateRequest GroupRegistryServiceUpdateResponse    Delete GroupRegistryServiceDeleteRequest GroupRegistryServiceDeleteResponse    Get GroupRegistryServiceGetRequest GroupRegistryServiceGetResponse    List GroupRegistryServiceListRequest GroupRegistryServiceListResponse    Exist GroupRegistryServiceExistRequest GroupRegistryServiceExistResponse Exist doesn't expose an HTTP endpoint. Please use HEAD method to touch Get instead    \nIndexRuleBindingRegistryService    Method Name Request Type Response Type Description     Create IndexRuleBindingRegistryServiceCreateRequest IndexRuleBindingRegistryServiceCreateResponse    Update IndexRuleBindingRegistryServiceUpdateRequest IndexRuleBindingRegistryServiceUpdateResponse    Delete IndexRuleBindingRegistryServiceDeleteRequest IndexRuleBindingRegistryServiceDeleteResponse    Get IndexRuleBindingRegistryServiceGetRequest IndexRuleBindingRegistryServiceGetResponse    List IndexRuleBindingRegistryServiceListRequest IndexRuleBindingRegistryServiceListResponse    Exist IndexRuleBindingRegistryServiceExistRequest IndexRuleBindingRegistryServiceExistResponse Exist doesn't expose an HTTP endpoint. Please use HEAD method to touch Get instead    \nIndexRuleRegistryService    Method Name Request Type Response Type Description     Create IndexRuleRegistryServiceCreateRequest IndexRuleRegistryServiceCreateResponse    Update IndexRuleRegistryServiceUpdateRequest IndexRuleRegistryServiceUpdateResponse    Delete IndexRuleRegistryServiceDeleteRequest IndexRuleRegistryServiceDeleteResponse    Get IndexRuleRegistryServiceGetRequest IndexRuleRegistryServiceGetResponse    List IndexRuleRegistryServiceListRequest IndexRuleRegistryServiceListResponse    Exist IndexRuleRegistryServiceExistRequest IndexRuleRegistryServiceExistResponse Exist doesn't expose an HTTP endpoint. Please use HEAD method to touch Get instead    \nMeasureRegistryService    Method Name Request Type Response Type Description     Create MeasureRegistryServiceCreateRequest MeasureRegistryServiceCreateResponse    Update MeasureRegistryServiceUpdateRequest MeasureRegistryServiceUpdateResponse    Delete MeasureRegistryServiceDeleteRequest MeasureRegistryServiceDeleteResponse    Get MeasureRegistryServiceGetRequest MeasureRegistryServiceGetResponse    List MeasureRegistryServiceListRequest MeasureRegistryServiceListResponse    Exist MeasureRegistryServiceExistRequest MeasureRegistryServiceExistResponse Exist doesn't expose an HTTP endpoint. Please use HEAD method to touch Get instead    \nStreamRegistryService    Method Name Request Type Response Type Description     Create StreamRegistryServiceCreateRequest StreamRegistryServiceCreateResponse    Update StreamRegistryServiceUpdateRequest StreamRegistryServiceUpdateResponse    Delete StreamRegistryServiceDeleteRequest StreamRegistryServiceDeleteResponse    Get StreamRegistryServiceGetRequest StreamRegistryServiceGetResponse    List StreamRegistryServiceListRequest StreamRegistryServiceListResponse    Exist StreamRegistryServiceExistRequest StreamRegistryServiceExistResponse Exist doesn't expose an HTTP endpoint. Please use HEAD method to touch Get instead    \nTopNAggregationRegistryService    Method Name Request Type Response Type Description     Create TopNAggregationRegistryServiceCreateRequest TopNAggregationRegistryServiceCreateResponse    Update TopNAggregationRegistryServiceUpdateRequest TopNAggregationRegistryServiceUpdateResponse    Delete TopNAggregationRegistryServiceDeleteRequest TopNAggregationRegistryServiceDeleteResponse    Get TopNAggregationRegistryServiceGetRequest TopNAggregationRegistryServiceGetResponse    List TopNAggregationRegistryServiceListRequest TopNAggregationRegistryServiceListResponse    Exist TopNAggregationRegistryServiceExistRequest TopNAggregationRegistryServiceExistResponse Exist doesn't expose an HTTP endpoint. Please use HEAD method to touch Get instead    \nTop\nbanyandb/measure/v1/query.proto \nDataPoint DataPoint is stored in Measures\n   Field Type Label Description     timestamp google.protobuf.Timestamp  timestamp is in the timeunit of milliseconds.   tag_families banyandb.model.v1.TagFamily repeated tag_families contains tags selected in the projection   fields DataPoint.Field repeated fields contains fields selected in the projection    \nDataPoint.Field    Field Type Label Description     name string     value banyandb.model.v1.FieldValue      \nQueryRequest QueryRequest is the request contract for query.\n   Field Type Label Description     groups string repeated groups indicate where the data points are stored.   name string  name is the identity of a measure.   time_range banyandb.model.v1.TimeRange  time_range is a range query with begin/end time of entities in the timeunit of milliseconds.   criteria banyandb.model.v1.Criteria  tag_families are indexed.   tag_projection banyandb.model.v1.TagProjection  tag_projection can be used to select tags of the data points in the response   field_projection QueryRequest.FieldProjection  field_projection can be used to select fields of the data points in the response   group_by QueryRequest.GroupBy  group_by groups data points based on their field value for a specific tag and use field_name as the projection name   agg QueryRequest.Aggregation  agg aggregates data points based on a field   top QueryRequest.Top  top limits the result based on a particular field. If order_by is specified, top sorts the dataset based on order_by's output   offset uint32  offset is used to support pagination, together with the following limit. If top is specified, offset processes the dataset based on top's output   limit uint32  limit is used to impose a boundary on the number of records being returned. If top is specified, limit processes the dataset based on top's output   order_by banyandb.model.v1.QueryOrder  order_by is given to specify the sort for a tag.    \nQueryRequest.Aggregation    Field Type Label Description     function banyandb.model.v1.AggregationFunction     field_name string  field_name must be one of files indicated by the field_projection    \nQueryRequest.FieldProjection    Field Type Label Description     names string repeated     \nQueryRequest.GroupBy    Field Type Label Description     tag_projection banyandb.model.v1.TagProjection  tag_projection must be a subset of the tag_projection of QueryRequest   field_name string  field_name must be one of fields indicated by field_projection    \nQueryRequest.Top    Field Type Label Description     number int32  number set the how many items should be returned   field_name string  field_name must be one of files indicated by the field_projection   field_value_sort banyandb.model.v1.Sort  field_value_sort indicates how to sort fields ASC: bottomN DESC: topN UNSPECIFIED: topN    \nQueryResponse QueryResponse is the response for a query to the Query module.\n   Field Type Label Description     data_points DataPoint repeated data_points are the actual data returned    \nTop\nbanyandb/measure/v1/topn.proto \nTopNList TopNList contains a series of topN items\n   Field Type Label Description     timestamp google.protobuf.Timestamp  timestamp is in the timeunit of milliseconds.   items TopNList.Item repeated items contains top-n items in a list    \nTopNList.Item    Field Type Label Description     entity banyandb.model.v1.Tag repeated    value banyandb.model.v1.FieldValue      \nTopNRequest TopNRequest is the request contract for query.\n   Field Type Label Description     groups string repeated groups indicate where the data points are stored.   name string  name is the identity of a measure.   time_range banyandb.model.v1.TimeRange  time_range is a range query with begin/end time of entities in the timeunit of milliseconds.   top_n int32  top_n set the how many items should be returned in each list.   agg banyandb.model.v1.AggregationFunction  agg aggregates lists grouped by field names in the time_range TODO validate enum defined_only   conditions banyandb.model.v1.Condition repeated criteria select counters. Only equals are acceptable.   field_value_sort banyandb.model.v1.Sort  field_value_sort indicates how to sort fields    \nTopNResponse TopNResponse is the response for a query to the Query module.\n   Field Type Label Description     lists TopNList repeated lists contain a series topN lists ranked by timestamp if agg_func in query request is specified, lists' size should be one.    \nTop\nbanyandb/model/v1/write.proto \nStatus Status is the response status for write\n   Name Number Description     STATUS_UNSPECIFIED 0    STATUS_SUCCEED 1    STATUS_INVALID_TIMESTAMP 2    STATUS_NOT_FOUND 3    STATUS_EXPIRED_SCHEMA 4    STATUS_INTERNAL_ERROR 5     \nTop\nbanyandb/measure/v1/write.proto \nDataPointValue DataPointValue is the data point for writing. It only contains values.\n   Field Type Label Description     timestamp google.protobuf.Timestamp  timestamp is in the timeunit of milliseconds.   tag_families banyandb.model.v1.TagFamilyForWrite repeated the order of tag_families' items match the measure schema   fields banyandb.model.v1.FieldValue repeated the order of fields match the measure schema    \nInternalWriteRequest    Field Type Label Description     shard_id uint32     series_hash bytes     entity_values banyandb.model.v1.TagValue repeated    request WriteRequest      \nWriteRequest WriteRequest is the request contract for write\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  the metadata is required.   data_point DataPointValue  the data_point is required.   message_id uint64  the message_id is required.    \nWriteResponse WriteResponse is the response contract for write\n   Field Type Label Description     message_id uint64  the message_id from request.   status banyandb.model.v1.Status  status indicates the request processing result   metadata banyandb.common.v1.Metadata  the metadata from request when request fails    \nTop\nbanyandb/measure/v1/rpc.proto \nMeasureService    Method Name Request Type Response Type Description     Query QueryRequest QueryResponse    Write WriteRequest stream WriteResponse stream    TopN TopNRequest TopNResponse     \nTop\nbanyandb/property/v1/property.proto \nMetadata Metadata is for multi-tenant use\n   Field Type Label Description     container banyandb.common.v1.Metadata  container is created when it receives the first property   id string  id identifies a property    \nProperty Property stores the user defined data\n   Field Type Label Description     metadata Metadata  metadata is the identity of a property   tags banyandb.model.v1.Tag repeated tag stores the content of a property   updated_at google.protobuf.Timestamp  updated_at indicates when the property is updated   lease_id int64  readonly. lease_id is the ID of the lease that attached to key.   ttl string  ttl indicates the time to live of the property. It's a string in the format of \u0026quot;1h\u0026quot;, \u0026quot;2m\u0026quot;, \u0026quot;3s\u0026quot;, \u0026quot;1500ms\u0026quot;. It defaults to 0s, which means the property never expires. The minimum allowed ttl is 1s.    \nTop\nbanyandb/property/v1/rpc.proto \nApplyRequest    Field Type Label Description     property Property     strategy ApplyRequest.Strategy  strategy indicates how to update a property. It defaults to STRATEGY_MERGE    \nApplyResponse    Field Type Label Description     created bool  created indicates whether the property existed. True: the property is absent. False: the property existed.   tags_num uint32     lease_id int64      \nDeleteRequest    Field Type Label Description     metadata Metadata     tags string repeated     \nDeleteResponse    Field Type Label Description     deleted bool     tags_num uint32      \nGetRequest    Field Type Label Description     metadata Metadata     tags string repeated     \nGetResponse    Field Type Label Description     property Property      \nKeepAliveRequest    Field Type Label Description     lease_id int64      \nKeepAliveResponse \nListRequest    Field Type Label Description     container banyandb.common.v1.Metadata     ids string repeated    tags string repeated     \nListResponse    Field Type Label Description     property Property repeated     \nApplyRequest.Strategy    Name Number Description     STRATEGY_UNSPECIFIED 0    STRATEGY_MERGE 1    STRATEGY_REPLACE 2     \nPropertyService    Method Name Request Type Response Type Description     Apply ApplyRequest ApplyResponse Apply creates a property if it's absent, or update a existed one based on a strategy.   Delete DeleteRequest DeleteResponse    Get GetRequest GetResponse    List ListRequest ListResponse    KeepAlive KeepAliveRequest KeepAliveResponse     \nTop\nbanyandb/stream/v1/query.proto \nElement Element represents (stream context) a Span defined in Google Dapper paper or equivalently a Segment in Skywalking. (Log context) a log\n   Field Type Label Description     element_id string  element_id could be span_id of a Span or segment_id of a Segment in the context of stream   timestamp google.protobuf.Timestamp  timestamp represents a millisecond 1) either the start time of a Span/Segment, 2) or the timestamp of a log   tag_families banyandb.model.v1.TagFamily repeated fields contains all indexed Field. Some typical names, - stream_id - duration - service_name - service_instance_id - end_time_milliseconds    \nQueryRequest QueryRequest is the request contract for query.\n   Field Type Label Description     groups string repeated groups indicate where the elements are stored.   name string  name is the identity of a stream.   time_range banyandb.model.v1.TimeRange  time_range is a range query with begin/end time of entities in the timeunit of milliseconds. In the context of stream, it represents the range of the startTime for spans/segments, while in the context of Log, it means the range of the timestamp(s) for logs. it is always recommended to specify time range for performance reason   offset uint32  offset is used to support pagination, together with the following limit   limit uint32  limit is used to impose a boundary on the number of records being returned   order_by banyandb.model.v1.QueryOrder  order_by is given to specify the sort for a field. So far, only fields in the type of Integer are supported   criteria banyandb.model.v1.Criteria  tag_families are indexed.   projection banyandb.model.v1.TagProjection  projection can be used to select the key names of the element in the response    \nQueryResponse QueryResponse is the response for a query to the Query module.\n   Field Type Label Description     elements Element repeated elements are the actual data returned    \nTop\nbanyandb/stream/v1/write.proto \nElementValue    Field Type Label Description     element_id string  element_id could be span_id of a Span or segment_id of a Segment in the context of stream   timestamp google.protobuf.Timestamp  timestamp is in the timeunit of milliseconds. It represents 1) either the start time of a Span/Segment, 2) or the timestamp of a log   tag_families banyandb.model.v1.TagFamilyForWrite repeated the order of tag_families' items match the stream schema    \nInternalWriteRequest    Field Type Label Description     shard_id uint32     series_hash bytes     entity_values banyandb.model.v1.TagValue repeated    request WriteRequest      \nWriteRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata  the metadata is required.   element ElementValue  the element is required.   message_id uint64  the message_id is required.    \nWriteResponse    Field Type Label Description     message_id uint64  the message_id from request.   status banyandb.model.v1.Status  status indicates the request processing result   metadata banyandb.common.v1.Metadata  the metadata from request when request fails    \nTop\nbanyandb/stream/v1/rpc.proto \nStreamService    Method Name Request Type Response Type Description     Query QueryRequest QueryResponse    Write WriteRequest stream WriteResponse stream     Scalar Value Types    .proto Type Notes C++ Java Python Go C# PHP Ruby     double  double double float float64 double float Float   float  float float float float32 float float Float   int32 Uses variable-length encoding. Inefficient for encoding negative numbers – if your field is likely to have negative values, use sint32 instead. int32 int int int32 int integer Bignum or Fixnum (as required)   int64 Uses variable-length encoding. Inefficient for encoding negative numbers – if your field is likely to have negative values, use sint64 instead. int64 long int/long int64 long integer/string Bignum   uint32 Uses variable-length encoding. uint32 int int/long uint32 uint integer Bignum or Fixnum (as required)   uint64 Uses variable-length encoding. uint64 long int/long uint64 ulong integer/string Bignum or Fixnum (as required)   sint32 Uses variable-length encoding. Signed int value. These more efficiently encode negative numbers than regular int32s. int32 int int int32 int integer Bignum or Fixnum (as required)   sint64 Uses variable-length encoding. Signed int value. These more efficiently encode negative numbers than regular int64s. int64 long int/long int64 long integer/string Bignum   fixed32 Always four bytes. More efficient than uint32 if values are often greater than 2^28. uint32 int int uint32 uint integer Bignum or Fixnum (as required)   fixed64 Always eight bytes. More efficient than uint64 if values are often greater than 2^56. uint64 long int/long uint64 ulong integer/string Bignum   sfixed32 Always four bytes. int32 int int int32 int integer Bignum or Fixnum (as required)   sfixed64 Always eight bytes. int64 long int/long int64 long integer/string Bignum   bool  bool boolean boolean bool bool boolean TrueClass/FalseClass   string A string must always contain UTF-8 encoded or 7-bit ASCII text. string String str/unicode string string string String (UTF-8)   bytes May contain any arbitrary sequence of bytes. string ByteString str []byte ByteString string String (ASCII-8BIT)    ","excerpt":"Protocol Documentation \nTable of Contents   banyandb/cluster/v1/rpc.proto\n  SendRequest …","ref":"/docs/skywalking-banyandb/next/api-reference/","title":"Protocol Documentation"},{"body":"Protocol Documentation \nTable of Contents   banyandb/cluster/v1/rpc.proto\n  SendRequest\n  SendResponse\n  Service\n    banyandb/common/v1/common.proto\n  Group\n  IntervalRule\n  Metadata\n  ResourceOpts\n  Catalog\n  IntervalRule.Unit\n    banyandb/database/v1/database.proto\n  Node\n  Shard\n  Role\n    banyandb/model/v1/common.proto\n  FieldValue\n  Float\n  Int\n  IntArray\n  Str\n  StrArray\n  TagFamilyForWrite\n  TagValue\n  AggregationFunction\n    banyandb/model/v1/query.proto\n  Condition\n  Criteria\n  LogicalExpression\n  QueryOrder\n  Tag\n  TagFamily\n  TagProjection\n  TagProjection.TagFamily\n  TimeRange\n  Condition.BinaryOp\n  LogicalExpression.LogicalOp\n  Sort\n    banyandb/database/v1/schema.proto\n  Entity\n  FieldSpec\n  IndexRule\n  IndexRuleBinding\n  Measure\n  Stream\n  Subject\n  TagFamilySpec\n  TagSpec\n  TopNAggregation\n  CompressionMethod\n  EncodingMethod\n  FieldType\n  IndexRule.Analyzer\n  IndexRule.Location\n  IndexRule.Type\n  TagType\n    banyandb/database/v1/rpc.proto\n  GroupRegistryServiceCreateRequest\n  GroupRegistryServiceCreateResponse\n  GroupRegistryServiceDeleteRequest\n  GroupRegistryServiceDeleteResponse\n  GroupRegistryServiceExistRequest\n  GroupRegistryServiceExistResponse\n  GroupRegistryServiceGetRequest\n  GroupRegistryServiceGetResponse\n  GroupRegistryServiceListRequest\n  GroupRegistryServiceListResponse\n  GroupRegistryServiceUpdateRequest\n  GroupRegistryServiceUpdateResponse\n  IndexRuleBindingRegistryServiceCreateRequest\n  IndexRuleBindingRegistryServiceCreateResponse\n  IndexRuleBindingRegistryServiceDeleteRequest\n  IndexRuleBindingRegistryServiceDeleteResponse\n  IndexRuleBindingRegistryServiceExistRequest\n  IndexRuleBindingRegistryServiceExistResponse\n  IndexRuleBindingRegistryServiceGetRequest\n  IndexRuleBindingRegistryServiceGetResponse\n  IndexRuleBindingRegistryServiceListRequest\n  IndexRuleBindingRegistryServiceListResponse\n  IndexRuleBindingRegistryServiceUpdateRequest\n  IndexRuleBindingRegistryServiceUpdateResponse\n  IndexRuleRegistryServiceCreateRequest\n  IndexRuleRegistryServiceCreateResponse\n  IndexRuleRegistryServiceDeleteRequest\n  IndexRuleRegistryServiceDeleteResponse\n  IndexRuleRegistryServiceExistRequest\n  IndexRuleRegistryServiceExistResponse\n  IndexRuleRegistryServiceGetRequest\n  IndexRuleRegistryServiceGetResponse\n  IndexRuleRegistryServiceListRequest\n  IndexRuleRegistryServiceListResponse\n  IndexRuleRegistryServiceUpdateRequest\n  IndexRuleRegistryServiceUpdateResponse\n  MeasureRegistryServiceCreateRequest\n  MeasureRegistryServiceCreateResponse\n  MeasureRegistryServiceDeleteRequest\n  MeasureRegistryServiceDeleteResponse\n  MeasureRegistryServiceExistRequest\n  MeasureRegistryServiceExistResponse\n  MeasureRegistryServiceGetRequest\n  MeasureRegistryServiceGetResponse\n  MeasureRegistryServiceListRequest\n  MeasureRegistryServiceListResponse\n  MeasureRegistryServiceUpdateRequest\n  MeasureRegistryServiceUpdateResponse\n  StreamRegistryServiceCreateRequest\n  StreamRegistryServiceCreateResponse\n  StreamRegistryServiceDeleteRequest\n  StreamRegistryServiceDeleteResponse\n  StreamRegistryServiceExistRequest\n  StreamRegistryServiceExistResponse\n  StreamRegistryServiceGetRequest\n  StreamRegistryServiceGetResponse\n  StreamRegistryServiceListRequest\n  StreamRegistryServiceListResponse\n  StreamRegistryServiceUpdateRequest\n  StreamRegistryServiceUpdateResponse\n  TopNAggregationRegistryServiceCreateRequest\n  TopNAggregationRegistryServiceCreateResponse\n  TopNAggregationRegistryServiceDeleteRequest\n  TopNAggregationRegistryServiceDeleteResponse\n  TopNAggregationRegistryServiceExistRequest\n  TopNAggregationRegistryServiceExistResponse\n  TopNAggregationRegistryServiceGetRequest\n  TopNAggregationRegistryServiceGetResponse\n  TopNAggregationRegistryServiceListRequest\n  TopNAggregationRegistryServiceListResponse\n  TopNAggregationRegistryServiceUpdateRequest\n  TopNAggregationRegistryServiceUpdateResponse\n  GroupRegistryService\n  IndexRuleBindingRegistryService\n  IndexRuleRegistryService\n  MeasureRegistryService\n  StreamRegistryService\n  TopNAggregationRegistryService\n    banyandb/measure/v1/query.proto\n DataPoint DataPoint.Field QueryRequest QueryRequest.Aggregation QueryRequest.FieldProjection QueryRequest.GroupBy QueryRequest.Top QueryResponse    banyandb/measure/v1/topn.proto\n TopNList TopNList.Item TopNRequest TopNResponse    banyandb/model/v1/write.proto\n Status    banyandb/measure/v1/write.proto\n DataPointValue InternalWriteRequest WriteRequest WriteResponse    banyandb/measure/v1/rpc.proto\n MeasureService    banyandb/property/v1/property.proto\n Metadata Property    banyandb/property/v1/rpc.proto\n  ApplyRequest\n  ApplyResponse\n  DeleteRequest\n  DeleteResponse\n  GetRequest\n  GetResponse\n  KeepAliveRequest\n  KeepAliveResponse\n  ListRequest\n  ListResponse\n  ApplyRequest.Strategy\n  PropertyService\n    banyandb/stream/v1/query.proto\n Element QueryRequest QueryResponse    banyandb/stream/v1/write.proto\n ElementValue InternalWriteRequest WriteRequest WriteResponse    banyandb/stream/v1/rpc.proto\n StreamService    Scalar Value Types\n  \nTop\nbanyandb/cluster/v1/rpc.proto \nSendRequest    Field Type Label Description     topic string     message_id uint64     body google.protobuf.Any      \nSendResponse    Field Type Label Description     message_id uint64     error string     body google.protobuf.Any      \nService    Method Name Request Type Response Type Description     Send SendRequest stream SendResponse stream     \nTop\nbanyandb/common/v1/common.proto \nGroup Group is an internal object for Group management\n   Field Type Label Description     metadata Metadata  metadata define the group's identity   catalog Catalog  catalog denotes which type of data the group contains   resource_opts ResourceOpts  resourceOpts indicates the structure of the underlying kv storage   updated_at google.protobuf.Timestamp  updated_at indicates when resources of the group are updated    \nIntervalRule IntervalRule is a structured duration\n   Field Type Label Description     unit IntervalRule.Unit  unit can only be UNIT_HOUR or UNIT_DAY   num uint32      \nMetadata Metadata is for multi-tenant, multi-model use\n   Field Type Label Description     group string  group contains a set of options, like retention policy, max   name string  name of the entity   id uint32     create_revision int64  readonly. create_revision is the revision of last creation on this key.   mod_revision int64  readonly. mod_revision is the revision of last modification on this key.    \nResourceOpts    Field Type Label Description     shard_num uint32  shard_num is the number of shards   block_interval IntervalRule  block_interval indicates the length of a block block_interval should be less than or equal to segment_interval   segment_interval IntervalRule  segment_interval indicates the length of a segment   ttl IntervalRule  ttl indicates time to live, how long the data will be cached    \nCatalog    Name Number Description     CATALOG_UNSPECIFIED 0    CATALOG_STREAM 1    CATALOG_MEASURE 2     \nIntervalRule.Unit    Name Number Description     UNIT_UNSPECIFIED 0    UNIT_HOUR 1    UNIT_DAY 2     \nTop\nbanyandb/database/v1/database.proto \nNode    Field Type Label Description     metadata banyandb.common.v1.Metadata     roles Role repeated    grpc_address string     http_address string     created_at google.protobuf.Timestamp      \nShard    Field Type Label Description     id uint64     metadata banyandb.common.v1.Metadata     catalog banyandb.common.v1.Catalog     node string     total uint32     updated_at google.protobuf.Timestamp     created_at google.protobuf.Timestamp      \nRole    Name Number Description     ROLE_UNSPECIFIED 0    ROLE_META 1    ROLE_DATA 2    ROLE_LIAISON 3     \nTop\nbanyandb/model/v1/common.proto \nFieldValue    Field Type Label Description     null google.protobuf.NullValue     str Str     int Int     binary_data bytes     float Float      \nFloat    Field Type Label Description     value double      \nInt    Field Type Label Description     value int64      \nIntArray    Field Type Label Description     value int64 repeated     \nStr    Field Type Label Description     value string      \nStrArray    Field Type Label Description     value string repeated     \nTagFamilyForWrite    Field Type Label Description     tags TagValue repeated     \nTagValue    Field Type Label Description     null google.protobuf.NullValue     str Str     str_array StrArray     int Int     int_array IntArray     binary_data bytes      \nAggregationFunction    Name Number Description     AGGREGATION_FUNCTION_UNSPECIFIED 0    AGGREGATION_FUNCTION_MEAN 1    AGGREGATION_FUNCTION_MAX 2    AGGREGATION_FUNCTION_MIN 3    AGGREGATION_FUNCTION_COUNT 4    AGGREGATION_FUNCTION_SUM 5     \nTop\nbanyandb/model/v1/query.proto \nCondition Condition consists of the query condition with a single binary operator to be imposed For 1:1 BinaryOp, values in condition must be an array with length = 1, while for 1:N BinaryOp, values can be an array with length \u0026gt;= 1.\n   Field Type Label Description     name string     op Condition.BinaryOp     value TagValue      \nCriteria tag_families are indexed.\n   Field Type Label Description     le LogicalExpression     condition Condition      \nLogicalExpression LogicalExpression supports logical operation\n   Field Type Label Description     op LogicalExpression.LogicalOp  op is a logical operation   left Criteria     right Criteria      \nQueryOrder QueryOrder means a Sort operation to be done for a given index rule. The index_rule_name refers to the name of a index rule bound to the subject.\n   Field Type Label Description     index_rule_name string     sort Sort      \nTag Pair is the building block of a record which is equivalent to a key-value pair. In the context of Trace, it could be metadata of a trace such as service_name, service_instance, etc. Besides, other tags are organized in key-value pair in the underlying storage layer. One should notice that the values can be a multi-value.\n   Field Type Label Description     key string     value TagValue      \nTagFamily    Field Type Label Description     name string     tags Tag repeated     \nTagProjection TagProjection is used to select the names of keys to be returned.\n   Field Type Label Description     tag_families TagProjection.TagFamily repeated     \nTagProjection.TagFamily    Field Type Label Description     name string     tags string repeated     \nTimeRange TimeRange is a range query for uint64, the range here follows left-inclusive and right-exclusive rule, i.e. [begin, end) if both edges exist\n   Field Type Label Description     begin google.protobuf.Timestamp     end google.protobuf.Timestamp      \nCondition.BinaryOp BinaryOp specifies the operation imposed to the given query condition For EQ, NE, LT, GT, LE and GE, only one operand should be given, i.e. one-to-one relationship. HAVING and NOT_HAVING allow multi-value to be the operand such as array/vector, i.e. one-to-many relationship. For example, \u0026quot;keyA\u0026quot; contains \u0026quot;valueA\u0026quot; and \u0026quot;valueB\u0026quot; MATCH performances a full-text search if the tag is analyzed. The string value applies to the same analyzer as the tag, but string array value does not. Each item in a string array is seen as a token instead of a query expression.\n   Name Number Description     BINARY_OP_UNSPECIFIED 0    BINARY_OP_EQ 1    BINARY_OP_NE 2    BINARY_OP_LT 3    BINARY_OP_GT 4    BINARY_OP_LE 5    BINARY_OP_GE 6    BINARY_OP_HAVING 7    BINARY_OP_NOT_HAVING 8    BINARY_OP_IN 9    BINARY_OP_NOT_IN 10    BINARY_OP_MATCH 11     \nLogicalExpression.LogicalOp    Name Number Description     LOGICAL_OP_UNSPECIFIED 0    LOGICAL_OP_AND 1    LOGICAL_OP_OR 2     \nSort    Name Number Description     SORT_UNSPECIFIED 0    SORT_DESC 1    SORT_ASC 2     \nTop\nbanyandb/database/v1/schema.proto \nEntity    Field Type Label Description     tag_names string repeated     \nFieldSpec FieldSpec is the specification of field\n   Field Type Label Description     name string  name is the identity of a field   field_type FieldType  field_type denotes the type of field value   encoding_method EncodingMethod  encoding_method indicates how to encode data during writing   compression_method CompressionMethod  compression_method indicates how to compress data during writing    \nIndexRule IndexRule defines how to generate indices based on tags and the index type IndexRule should bind to a subject through an IndexRuleBinding to generate proper indices.\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  metadata define the rule's identity   tags string repeated tags are the combination that refers to an indexed object If the elements in tags are more than 1, the object will generate a multi-tag index Caveat: All tags in a multi-tag MUST have an identical IndexType   type IndexRule.Type  type is the IndexType of this IndexObject.   location IndexRule.Location  location indicates where to store index.   updated_at google.protobuf.Timestamp  updated_at indicates when the IndexRule is updated   analyzer IndexRule.Analyzer  analyzer analyzes tag value to support the full-text searching for TYPE_INVERTED indices.    \nIndexRuleBinding IndexRuleBinding is a bridge to connect severalIndexRules to a subject This binding is valid between begin_at_nanoseconds and expire_at_nanoseconds, that provides flexible strategies to control how to generate time series indices.\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  metadata is the identity of this binding   rules string repeated rules refers to the IndexRule   subject Subject  subject indicates the subject of binding action   begin_at google.protobuf.Timestamp  begin_at_nanoseconds is the timestamp, after which the binding will be active   expire_at google.protobuf.Timestamp  expire_at_nanoseconds it the timestamp, after which the binding will be inactive expire_at_nanoseconds must be larger than begin_at_nanoseconds   updated_at google.protobuf.Timestamp  updated_at indicates when the IndexRuleBinding is updated    \nMeasure Measure intends to store data point\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  metadata is the identity of a measure   tag_families TagFamilySpec repeated tag_families are for filter measures   fields FieldSpec repeated fields denote measure values   entity Entity  entity indicates which tags will be to generate a series and shard a measure   interval string  interval indicates how frequently to send a data point valid time units are \u0026quot;ns\u0026quot;, \u0026quot;us\u0026quot; (or \u0026quot;µs\u0026quot;), \u0026quot;ms\u0026quot;, \u0026quot;s\u0026quot;, \u0026quot;m\u0026quot;, \u0026quot;h\u0026quot;, \u0026quot;d\u0026quot;.   updated_at google.protobuf.Timestamp  updated_at indicates when the measure is updated    \nStream Stream intends to store streaming data, for example, traces or logs\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  metadata is the identity of a trace series   tag_families TagFamilySpec repeated tag_families   entity Entity  entity indicates how to generate a series and shard a stream   updated_at google.protobuf.Timestamp  updated_at indicates when the stream is updated    \nSubject Subject defines which stream or measure would generate indices\n   Field Type Label Description     catalog banyandb.common.v1.Catalog  catalog is where the subject belongs to todo validate plugin exist bug https://github.com/bufbuild/protoc-gen-validate/issues/672   name string  name refers to a stream or measure in a particular catalog    \nTagFamilySpec    Field Type Label Description     name string     tags TagSpec repeated tags defines accepted tags    \nTagSpec    Field Type Label Description     name string     type TagType     indexed_only bool  indexed_only indicates whether the tag is stored True: It's indexed only, but not stored False: it's stored and indexed    \nTopNAggregation TopNAggregation generates offline TopN statistics for a measure's TopN approximation\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  metadata is the identity of an aggregation   source_measure banyandb.common.v1.Metadata  source_measure denotes the data source of this aggregation   field_name string  field_name is the name of field used for ranking   field_value_sort banyandb.model.v1.Sort  field_value_sort indicates how to sort fields ASC: bottomN DESC: topN UNSPECIFIED: topN + bottomN todo validate plugin exist bug https://github.com/bufbuild/protoc-gen-validate/issues/672   group_by_tag_names string repeated group_by_tag_names groups data points into statistical counters   criteria banyandb.model.v1.Criteria  criteria select partial data points from measure   counters_number int32  counters_number sets the number of counters to be tracked. The default value is 1000   lru_size int32  lru_size defines how much entry is allowed to be maintained in the memory   updated_at google.protobuf.Timestamp  updated_at indicates when the measure is updated    \nCompressionMethod    Name Number Description     COMPRESSION_METHOD_UNSPECIFIED 0    COMPRESSION_METHOD_ZSTD 1     \nEncodingMethod    Name Number Description     ENCODING_METHOD_UNSPECIFIED 0    ENCODING_METHOD_GORILLA 1     \nFieldType    Name Number Description     FIELD_TYPE_UNSPECIFIED 0    FIELD_TYPE_STRING 1    FIELD_TYPE_INT 2    FIELD_TYPE_DATA_BINARY 3    FIELD_TYPE_FLOAT 4     \nIndexRule.Analyzer    Name Number Description     ANALYZER_UNSPECIFIED 0    ANALYZER_KEYWORD 1 Keyword analyzer is a “noop” analyzer which returns the entire input string as a single token.   ANALYZER_STANDARD 2 Standard analyzer provides grammar based tokenization   ANALYZER_SIMPLE 3 Simple analyzer breaks text into tokens at any non-letter character, such as numbers, spaces, hyphens and apostrophes, discards non-letter characters, and changes uppercase to lowercase.    \nIndexRule.Location    Name Number Description     LOCATION_UNSPECIFIED 0    LOCATION_SERIES 1    LOCATION_GLOBAL 2     \nIndexRule.Type Type determine the index structure under the hood\n   Name Number Description     TYPE_UNSPECIFIED 0    TYPE_TREE 1    TYPE_INVERTED 2     \nTagType    Name Number Description     TAG_TYPE_UNSPECIFIED 0    TAG_TYPE_STRING 1    TAG_TYPE_INT 2    TAG_TYPE_STRING_ARRAY 3    TAG_TYPE_INT_ARRAY 4    TAG_TYPE_DATA_BINARY 5     \nTop\nbanyandb/database/v1/rpc.proto \nGroupRegistryServiceCreateRequest    Field Type Label Description     group banyandb.common.v1.Group      \nGroupRegistryServiceCreateResponse \nGroupRegistryServiceDeleteRequest    Field Type Label Description     group string      \nGroupRegistryServiceDeleteResponse    Field Type Label Description     deleted bool      \nGroupRegistryServiceExistRequest    Field Type Label Description     group string      \nGroupRegistryServiceExistResponse    Field Type Label Description     has_group bool      \nGroupRegistryServiceGetRequest    Field Type Label Description     group string      \nGroupRegistryServiceGetResponse    Field Type Label Description     group banyandb.common.v1.Group      \nGroupRegistryServiceListRequest \nGroupRegistryServiceListResponse    Field Type Label Description     group banyandb.common.v1.Group repeated     \nGroupRegistryServiceUpdateRequest    Field Type Label Description     group banyandb.common.v1.Group      \nGroupRegistryServiceUpdateResponse \nIndexRuleBindingRegistryServiceCreateRequest    Field Type Label Description     index_rule_binding IndexRuleBinding      \nIndexRuleBindingRegistryServiceCreateResponse \nIndexRuleBindingRegistryServiceDeleteRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nIndexRuleBindingRegistryServiceDeleteResponse    Field Type Label Description     deleted bool      \nIndexRuleBindingRegistryServiceExistRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nIndexRuleBindingRegistryServiceExistResponse    Field Type Label Description     has_group bool     has_index_rule_binding bool      \nIndexRuleBindingRegistryServiceGetRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nIndexRuleBindingRegistryServiceGetResponse    Field Type Label Description     index_rule_binding IndexRuleBinding      \nIndexRuleBindingRegistryServiceListRequest    Field Type Label Description     group string      \nIndexRuleBindingRegistryServiceListResponse    Field Type Label Description     index_rule_binding IndexRuleBinding repeated     \nIndexRuleBindingRegistryServiceUpdateRequest    Field Type Label Description     index_rule_binding IndexRuleBinding      \nIndexRuleBindingRegistryServiceUpdateResponse \nIndexRuleRegistryServiceCreateRequest    Field Type Label Description     index_rule IndexRule      \nIndexRuleRegistryServiceCreateResponse \nIndexRuleRegistryServiceDeleteRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nIndexRuleRegistryServiceDeleteResponse    Field Type Label Description     deleted bool      \nIndexRuleRegistryServiceExistRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nIndexRuleRegistryServiceExistResponse    Field Type Label Description     has_group bool     has_index_rule bool      \nIndexRuleRegistryServiceGetRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nIndexRuleRegistryServiceGetResponse    Field Type Label Description     index_rule IndexRule      \nIndexRuleRegistryServiceListRequest    Field Type Label Description     group string      \nIndexRuleRegistryServiceListResponse    Field Type Label Description     index_rule IndexRule repeated     \nIndexRuleRegistryServiceUpdateRequest    Field Type Label Description     index_rule IndexRule      \nIndexRuleRegistryServiceUpdateResponse \nMeasureRegistryServiceCreateRequest    Field Type Label Description     measure Measure      \nMeasureRegistryServiceCreateResponse    Field Type Label Description     mod_revision int64      \nMeasureRegistryServiceDeleteRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nMeasureRegistryServiceDeleteResponse    Field Type Label Description     deleted bool      \nMeasureRegistryServiceExistRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nMeasureRegistryServiceExistResponse    Field Type Label Description     has_group bool     has_measure bool      \nMeasureRegistryServiceGetRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nMeasureRegistryServiceGetResponse    Field Type Label Description     measure Measure      \nMeasureRegistryServiceListRequest    Field Type Label Description     group string      \nMeasureRegistryServiceListResponse    Field Type Label Description     measure Measure repeated     \nMeasureRegistryServiceUpdateRequest    Field Type Label Description     measure Measure      \nMeasureRegistryServiceUpdateResponse    Field Type Label Description     mod_revision int64      \nStreamRegistryServiceCreateRequest    Field Type Label Description     stream Stream      \nStreamRegistryServiceCreateResponse    Field Type Label Description     mod_revision int64      \nStreamRegistryServiceDeleteRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nStreamRegistryServiceDeleteResponse    Field Type Label Description     deleted bool      \nStreamRegistryServiceExistRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nStreamRegistryServiceExistResponse    Field Type Label Description     has_group bool     has_stream bool      \nStreamRegistryServiceGetRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nStreamRegistryServiceGetResponse    Field Type Label Description     stream Stream      \nStreamRegistryServiceListRequest    Field Type Label Description     group string      \nStreamRegistryServiceListResponse    Field Type Label Description     stream Stream repeated     \nStreamRegistryServiceUpdateRequest    Field Type Label Description     stream Stream      \nStreamRegistryServiceUpdateResponse    Field Type Label Description     mod_revision int64      \nTopNAggregationRegistryServiceCreateRequest    Field Type Label Description     top_n_aggregation TopNAggregation      \nTopNAggregationRegistryServiceCreateResponse \nTopNAggregationRegistryServiceDeleteRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nTopNAggregationRegistryServiceDeleteResponse    Field Type Label Description     deleted bool      \nTopNAggregationRegistryServiceExistRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nTopNAggregationRegistryServiceExistResponse    Field Type Label Description     has_group bool     has_top_n_aggregation bool      \nTopNAggregationRegistryServiceGetRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nTopNAggregationRegistryServiceGetResponse    Field Type Label Description     top_n_aggregation TopNAggregation      \nTopNAggregationRegistryServiceListRequest    Field Type Label Description     group string      \nTopNAggregationRegistryServiceListResponse    Field Type Label Description     top_n_aggregation TopNAggregation repeated     \nTopNAggregationRegistryServiceUpdateRequest    Field Type Label Description     top_n_aggregation TopNAggregation      \nTopNAggregationRegistryServiceUpdateResponse \nGroupRegistryService    Method Name Request Type Response Type Description     Create GroupRegistryServiceCreateRequest GroupRegistryServiceCreateResponse    Update GroupRegistryServiceUpdateRequest GroupRegistryServiceUpdateResponse    Delete GroupRegistryServiceDeleteRequest GroupRegistryServiceDeleteResponse    Get GroupRegistryServiceGetRequest GroupRegistryServiceGetResponse    List GroupRegistryServiceListRequest GroupRegistryServiceListResponse    Exist GroupRegistryServiceExistRequest GroupRegistryServiceExistResponse Exist doesn't expose an HTTP endpoint. Please use HEAD method to touch Get instead    \nIndexRuleBindingRegistryService    Method Name Request Type Response Type Description     Create IndexRuleBindingRegistryServiceCreateRequest IndexRuleBindingRegistryServiceCreateResponse    Update IndexRuleBindingRegistryServiceUpdateRequest IndexRuleBindingRegistryServiceUpdateResponse    Delete IndexRuleBindingRegistryServiceDeleteRequest IndexRuleBindingRegistryServiceDeleteResponse    Get IndexRuleBindingRegistryServiceGetRequest IndexRuleBindingRegistryServiceGetResponse    List IndexRuleBindingRegistryServiceListRequest IndexRuleBindingRegistryServiceListResponse    Exist IndexRuleBindingRegistryServiceExistRequest IndexRuleBindingRegistryServiceExistResponse Exist doesn't expose an HTTP endpoint. Please use HEAD method to touch Get instead    \nIndexRuleRegistryService    Method Name Request Type Response Type Description     Create IndexRuleRegistryServiceCreateRequest IndexRuleRegistryServiceCreateResponse    Update IndexRuleRegistryServiceUpdateRequest IndexRuleRegistryServiceUpdateResponse    Delete IndexRuleRegistryServiceDeleteRequest IndexRuleRegistryServiceDeleteResponse    Get IndexRuleRegistryServiceGetRequest IndexRuleRegistryServiceGetResponse    List IndexRuleRegistryServiceListRequest IndexRuleRegistryServiceListResponse    Exist IndexRuleRegistryServiceExistRequest IndexRuleRegistryServiceExistResponse Exist doesn't expose an HTTP endpoint. Please use HEAD method to touch Get instead    \nMeasureRegistryService    Method Name Request Type Response Type Description     Create MeasureRegistryServiceCreateRequest MeasureRegistryServiceCreateResponse    Update MeasureRegistryServiceUpdateRequest MeasureRegistryServiceUpdateResponse    Delete MeasureRegistryServiceDeleteRequest MeasureRegistryServiceDeleteResponse    Get MeasureRegistryServiceGetRequest MeasureRegistryServiceGetResponse    List MeasureRegistryServiceListRequest MeasureRegistryServiceListResponse    Exist MeasureRegistryServiceExistRequest MeasureRegistryServiceExistResponse Exist doesn't expose an HTTP endpoint. Please use HEAD method to touch Get instead    \nStreamRegistryService    Method Name Request Type Response Type Description     Create StreamRegistryServiceCreateRequest StreamRegistryServiceCreateResponse    Update StreamRegistryServiceUpdateRequest StreamRegistryServiceUpdateResponse    Delete StreamRegistryServiceDeleteRequest StreamRegistryServiceDeleteResponse    Get StreamRegistryServiceGetRequest StreamRegistryServiceGetResponse    List StreamRegistryServiceListRequest StreamRegistryServiceListResponse    Exist StreamRegistryServiceExistRequest StreamRegistryServiceExistResponse Exist doesn't expose an HTTP endpoint. Please use HEAD method to touch Get instead    \nTopNAggregationRegistryService    Method Name Request Type Response Type Description     Create TopNAggregationRegistryServiceCreateRequest TopNAggregationRegistryServiceCreateResponse    Update TopNAggregationRegistryServiceUpdateRequest TopNAggregationRegistryServiceUpdateResponse    Delete TopNAggregationRegistryServiceDeleteRequest TopNAggregationRegistryServiceDeleteResponse    Get TopNAggregationRegistryServiceGetRequest TopNAggregationRegistryServiceGetResponse    List TopNAggregationRegistryServiceListRequest TopNAggregationRegistryServiceListResponse    Exist TopNAggregationRegistryServiceExistRequest TopNAggregationRegistryServiceExistResponse     \nTop\nbanyandb/measure/v1/query.proto \nDataPoint DataPoint is stored in Measures\n   Field Type Label Description     timestamp google.protobuf.Timestamp  timestamp is in the timeunit of milliseconds.   tag_families banyandb.model.v1.TagFamily repeated tag_families contains tags selected in the projection   fields DataPoint.Field repeated fields contains fields selected in the projection    \nDataPoint.Field    Field Type Label Description     name string     value banyandb.model.v1.FieldValue      \nQueryRequest QueryRequest is the request contract for query.\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  metadata is required   time_range banyandb.model.v1.TimeRange  time_range is a range query with begin/end time of entities in the timeunit of milliseconds.   criteria banyandb.model.v1.Criteria  tag_families are indexed.   tag_projection banyandb.model.v1.TagProjection  tag_projection can be used to select tags of the data points in the response   field_projection QueryRequest.FieldProjection  field_projection can be used to select fields of the data points in the response   group_by QueryRequest.GroupBy  group_by groups data points based on their field value for a specific tag and use field_name as the projection name   agg QueryRequest.Aggregation  agg aggregates data points based on a field   top QueryRequest.Top  top limits the result based on a particular field. If order_by is specified, top sorts the dataset based on order_by's output   offset uint32  offset is used to support pagination, together with the following limit. If top is specified, offset processes the dataset based on top's output   limit uint32  limit is used to impose a boundary on the number of records being returned. If top is specified, limit processes the dataset based on top's output   order_by banyandb.model.v1.QueryOrder  order_by is given to specify the sort for a tag.    \nQueryRequest.Aggregation    Field Type Label Description     function banyandb.model.v1.AggregationFunction     field_name string  field_name must be one of files indicated by the field_projection    \nQueryRequest.FieldProjection    Field Type Label Description     names string repeated     \nQueryRequest.GroupBy    Field Type Label Description     tag_projection banyandb.model.v1.TagProjection  tag_projection must be a subset of the tag_projection of QueryRequest   field_name string  field_name must be one of fields indicated by field_projection    \nQueryRequest.Top    Field Type Label Description     number int32  number set the how many items should be returned   field_name string  field_name must be one of files indicated by the field_projection   field_value_sort banyandb.model.v1.Sort  field_value_sort indicates how to sort fields ASC: bottomN DESC: topN UNSPECIFIED: topN    \nQueryResponse QueryResponse is the response for a query to the Query module.\n   Field Type Label Description     data_points DataPoint repeated data_points are the actual data returned    \nTop\nbanyandb/measure/v1/topn.proto \nTopNList TopNList contains a series of topN items\n   Field Type Label Description     timestamp google.protobuf.Timestamp  timestamp is in the timeunit of milliseconds.   items TopNList.Item repeated items contains top-n items in a list    \nTopNList.Item    Field Type Label Description     entity banyandb.model.v1.Tag repeated    value banyandb.model.v1.FieldValue      \nTopNRequest TopNRequest is the request contract for query.\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  metadata is required   time_range banyandb.model.v1.TimeRange  time_range is a range query with begin/end time of entities in the timeunit of milliseconds.   top_n int32  top_n set the how many items should be returned in each list.   agg banyandb.model.v1.AggregationFunction  agg aggregates lists grouped by field names in the time_range TODO validate enum defined_only   conditions banyandb.model.v1.Condition repeated criteria select counters. Only equals are acceptable.   field_value_sort banyandb.model.v1.Sort  field_value_sort indicates how to sort fields    \nTopNResponse TopNResponse is the response for a query to the Query module.\n   Field Type Label Description     lists TopNList repeated lists contain a series topN lists ranked by timestamp if agg_func in query request is specified, lists' size should be one.    \nTop\nbanyandb/model/v1/write.proto \nStatus Status is the response status for write\n   Name Number Description     STATUS_UNSPECIFIED 0    STATUS_SUCCEED 1    STATUS_INVALID_TIMESTAMP 2    STATUS_NOT_FOUND 3    STATUS_EXPIRED_SCHEMA 4    STATUS_INTERNAL_ERROR 5     \nTop\nbanyandb/measure/v1/write.proto \nDataPointValue DataPointValue is the data point for writing. It only contains values.\n   Field Type Label Description     timestamp google.protobuf.Timestamp  timestamp is in the timeunit of milliseconds.   tag_families banyandb.model.v1.TagFamilyForWrite repeated the order of tag_families' items match the measure schema   fields banyandb.model.v1.FieldValue repeated the order of fields match the measure schema    \nInternalWriteRequest    Field Type Label Description     shard_id uint32     series_hash bytes     entity_values banyandb.model.v1.TagValue repeated    request WriteRequest      \nWriteRequest WriteRequest is the request contract for write\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  the metadata is required.   data_point DataPointValue  the data_point is required.   message_id uint64  the message_id is required.    \nWriteResponse WriteResponse is the response contract for write\n   Field Type Label Description     message_id uint64  the message_id from request.   status banyandb.model.v1.Status  status indicates the request processing result   metadata banyandb.common.v1.Metadata  the metadata from request when request fails    \nTop\nbanyandb/measure/v1/rpc.proto \nMeasureService    Method Name Request Type Response Type Description     Query QueryRequest QueryResponse    Write WriteRequest stream WriteResponse stream    TopN TopNRequest TopNResponse     \nTop\nbanyandb/property/v1/property.proto \nMetadata Metadata is for multi-tenant use\n   Field Type Label Description     container banyandb.common.v1.Metadata  container is created when it receives the first property   id string  id identifies a property    \nProperty Property stores the user defined data\n   Field Type Label Description     metadata Metadata  metadata is the identity of a property   tags banyandb.model.v1.Tag repeated tag stores the content of a property   updated_at google.protobuf.Timestamp  updated_at indicates when the property is updated   lease_id int64  readonly. lease_id is the ID of the lease that attached to key.   ttl string  ttl indicates the time to live of the property. It's a string in the format of \u0026quot;1h\u0026quot;, \u0026quot;2m\u0026quot;, \u0026quot;3s\u0026quot;, \u0026quot;1500ms\u0026quot;. It defaults to 0s, which means the property never expires. The minimum allowed ttl is 1s.    \nTop\nbanyandb/property/v1/rpc.proto \nApplyRequest    Field Type Label Description     property Property     strategy ApplyRequest.Strategy  strategy indicates how to update a property. It defaults to STRATEGY_MERGE    \nApplyResponse    Field Type Label Description     created bool  created indicates whether the property existed. True: the property is absent. False: the property existed.   tags_num uint32     lease_id int64      \nDeleteRequest    Field Type Label Description     metadata Metadata     tags string repeated     \nDeleteResponse    Field Type Label Description     deleted bool     tags_num uint32      \nGetRequest    Field Type Label Description     metadata Metadata     tags string repeated     \nGetResponse    Field Type Label Description     property Property      \nKeepAliveRequest    Field Type Label Description     lease_id int64      \nKeepAliveResponse \nListRequest    Field Type Label Description     container banyandb.common.v1.Metadata     ids string repeated    tags string repeated     \nListResponse    Field Type Label Description     property Property repeated     \nApplyRequest.Strategy    Name Number Description     STRATEGY_UNSPECIFIED 0    STRATEGY_MERGE 1    STRATEGY_REPLACE 2     \nPropertyService    Method Name Request Type Response Type Description     Apply ApplyRequest ApplyResponse Apply creates a property if it's absent, or update a existed one based on a strategy.   Delete DeleteRequest DeleteResponse    Get GetRequest GetResponse    List ListRequest ListResponse    KeepAlive KeepAliveRequest KeepAliveResponse     \nTop\nbanyandb/stream/v1/query.proto \nElement Element represents (stream context) a Span defined in Google Dapper paper or equivalently a Segment in Skywalking. (Log context) a log\n   Field Type Label Description     element_id string  element_id could be span_id of a Span or segment_id of a Segment in the context of stream   timestamp google.protobuf.Timestamp  timestamp represents a millisecond 1) either the start time of a Span/Segment, 2) or the timestamp of a log   tag_families banyandb.model.v1.TagFamily repeated fields contains all indexed Field. Some typical names, - stream_id - duration - service_name - service_instance_id - end_time_milliseconds    \nQueryRequest QueryRequest is the request contract for query.\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  metadata is required   time_range banyandb.model.v1.TimeRange  time_range is a range query with begin/end time of entities in the timeunit of milliseconds. In the context of stream, it represents the range of the startTime for spans/segments, while in the context of Log, it means the range of the timestamp(s) for logs. it is always recommended to specify time range for performance reason   offset uint32  offset is used to support pagination, together with the following limit   limit uint32  limit is used to impose a boundary on the number of records being returned   order_by banyandb.model.v1.QueryOrder  order_by is given to specify the sort for a field. So far, only fields in the type of Integer are supported   criteria banyandb.model.v1.Criteria  tag_families are indexed.   projection banyandb.model.v1.TagProjection  projection can be used to select the key names of the element in the response    \nQueryResponse QueryResponse is the response for a query to the Query module.\n   Field Type Label Description     elements Element repeated elements are the actual data returned    \nTop\nbanyandb/stream/v1/write.proto \nElementValue    Field Type Label Description     element_id string  element_id could be span_id of a Span or segment_id of a Segment in the context of stream   timestamp google.protobuf.Timestamp  timestamp is in the timeunit of milliseconds. It represents 1) either the start time of a Span/Segment, 2) or the timestamp of a log   tag_families banyandb.model.v1.TagFamilyForWrite repeated the order of tag_families' items match the stream schema    \nInternalWriteRequest    Field Type Label Description     shard_id uint32     series_hash bytes     entity_values banyandb.model.v1.TagValue repeated    request WriteRequest      \nWriteRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata  the metadata is required.   element ElementValue  the element is required.   message_id uint64  the message_id is required.    \nWriteResponse    Field Type Label Description     message_id uint64  the message_id from request.   status banyandb.model.v1.Status  status indicates the request processing result   metadata banyandb.common.v1.Metadata  the metadata from request when request fails    \nTop\nbanyandb/stream/v1/rpc.proto \nStreamService    Method Name Request Type Response Type Description     Query QueryRequest QueryResponse    Write WriteRequest stream WriteResponse stream     Scalar Value Types    .proto Type Notes C++ Java Python Go C# PHP Ruby     double  double double float float64 double float Float   float  float float float float32 float float Float   int32 Uses variable-length encoding. Inefficient for encoding negative numbers – if your field is likely to have negative values, use sint32 instead. int32 int int int32 int integer Bignum or Fixnum (as required)   int64 Uses variable-length encoding. Inefficient for encoding negative numbers – if your field is likely to have negative values, use sint64 instead. int64 long int/long int64 long integer/string Bignum   uint32 Uses variable-length encoding. uint32 int int/long uint32 uint integer Bignum or Fixnum (as required)   uint64 Uses variable-length encoding. uint64 long int/long uint64 ulong integer/string Bignum or Fixnum (as required)   sint32 Uses variable-length encoding. Signed int value. These more efficiently encode negative numbers than regular int32s. int32 int int int32 int integer Bignum or Fixnum (as required)   sint64 Uses variable-length encoding. Signed int value. These more efficiently encode negative numbers than regular int64s. int64 long int/long int64 long integer/string Bignum   fixed32 Always four bytes. More efficient than uint32 if values are often greater than 2^28. uint32 int int uint32 uint integer Bignum or Fixnum (as required)   fixed64 Always eight bytes. More efficient than uint64 if values are often greater than 2^56. uint64 long int/long uint64 ulong integer/string Bignum   sfixed32 Always four bytes. int32 int int int32 int integer Bignum or Fixnum (as required)   sfixed64 Always eight bytes. int64 long int/long int64 long integer/string Bignum   bool  bool boolean boolean bool bool boolean TrueClass/FalseClass   string A string must always contain UTF-8 encoded or 7-bit ASCII text. string String str/unicode string string string String (UTF-8)   bytes May contain any arbitrary sequence of bytes. string ByteString str []byte ByteString string String (ASCII-8BIT)    ","excerpt":"Protocol Documentation \nTable of Contents   banyandb/cluster/v1/rpc.proto\n  SendRequest …","ref":"/docs/skywalking-banyandb/v0.5.0/api-reference/","title":"Protocol Documentation"},{"body":"Pulsar monitoring SkyWalking leverages OpenTelemetry Collector to collect metrics data in Prometheus format from the Pulsar and transfer the metrics to OpenTelemetry receiver and into the Meter System. Kafka entity as a Service in OAP and on the `Layer: PULSAR.\nData flow  Pulsar exposes metrics through Prometheus endpoint. OpenTelemetry Collector fetches metrics from Pulsar cluster via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.`  Setup  Set up Pulsar Cluster. (Pulsar cluster includes pulsar broker cluster and Bookkeeper bookie cluster.) Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  Pulsar Monitoring Pulsar monitoring provides multidimensional metrics monitoring of Pulsar cluster as Layer: PULSAR Service in the OAP. In each cluster, the nodes are represented as Instance.\nPulsar Cluster Supported Metrics    Monitoring Panel Metric Name Description Data Source     Total Topics meter_pulsar_total_topics The number of Pulsar topics in this cluster. Pulsar Cluster   Total Subscriptions meter_pulsar_total_subscriptions The number of Pulsar subscriptions in this cluster. Pulsar Cluster   Total Producers meter_pulsar_total_producers The number of active producers connected to this cluster. Pulsar Cluster   Total Consumers meter_pulsar_total_consumers The number of active consumers connected to this cluster. Pulsar Cluster   Message Rate In meter_pulsar_message_rate_in The total message rate coming into this cluster (message per second). Pulsar Cluster   Message Rate Out meter_pulsar_message_rate_out The total message rate going out from this cluster (message per second). Pulsar Cluster   Throughput In meter_pulsar_throughput_in The total throughput coming into this cluster (byte per second). Pulsar Cluster   Throughput Out meter_pulsar_throughput_out The total throughput going out from this cluster (byte per second). Pulsar Cluster   Storage Size meter_pulsar_storage_size The total storage size of all topics in this broker (in bytes). Pulsar Cluster   Storage Logical Size meter_pulsar_storage_logical_size The storage size of all topics in this broker without replicas (in bytes). Pulsar Cluster   Storage Write Rate meter_pulsar_storage_write_rate The total message batches (entries) written to the storage for this broker (message batch per second). Pulsar Cluster   Storage Read Rate meter_pulsar_storage_read_rate The total message batches (entries) read from the storage for this broker (message batch per second). Pulsar Cluster    Pulsar Node Supported Metrics    Monitoring Panel Metric Name Description Data Source     Active Connections meter_pulsar_broker_active_connections The number of active connections. Pulsar Broker   Total Connections meter_pulsar_broker_total_connections The total number of connections. Pulsar Broker   Connection Create Success Count meter_pulsar_broker_connection_create_success_count The number of successfully created connections. Pulsar Broker   Connection Create Fail Count meter_pulsar_broker_connection_create_fail_count The number of failed connections. Pulsar Broker   Connection Closed Total Count meter_pulsar_broker_connection_closed_total_count The total number of closed connections. Pulsar Broker   JVM Buffer Pool Used meter_pulsar_broker_jvm_buffer_pool_used_bytes The usage of jvm buffer pool. Pulsar Broker   JVM Memory Pool Used meter_pulsar_broker_jvm_memory_pool_used The usage of jvm memory pool. Pulsar Broker   JVM Memory meter_pulsar_broker_jvm_memory_init meter_pulsar_broker_jvm_memory_used meter_pulsar_broker_jvm_memory_committed The usage of jvm memory. Pulsar Broker   JVM Threads meter_pulsar_broker_jvm_threads_current meter_pulsar_broker_jvm_threads_daemon meter_pulsar_broker_jvm_threads_peak meter_pulsar_broker_jvm_threads_deadlocked The usage of jvm threads. Pulsar Broker   GC Time meter_pulsar_broker_jvm_gc_collection_seconds_sum Time spent in a given JVM garbage collector in seconds. Pulsar Broker   GC Count meter_pulsar_broker_jvm_gc_collection_seconds_count The count of a given JVM garbage collector. Pulsar Broker    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in otel-rules/pulsar/pulsar-cluster.yaml, otel-rules/pulsar/pulsar-broker.yaml. The RabbitMQ dashboard panel configurations are found in ui-initialized-templates/pulsar.\n","excerpt":"Pulsar monitoring SkyWalking leverages OpenTelemetry Collector to collect metrics data in Prometheus …","ref":"/docs/main/latest/en/setup/backend/backend-pulsar-monitoring/","title":"Pulsar monitoring"},{"body":"Pulsar monitoring SkyWalking leverages OpenTelemetry Collector to collect metrics data in Prometheus format from the Pulsar and transfer the metrics to OpenTelemetry receiver and into the Meter System. Kafka entity as a Service in OAP and on the `Layer: PULSAR.\nData flow  Pulsar exposes metrics through Prometheus endpoint. OpenTelemetry Collector fetches metrics from Pulsar cluster via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.`  Setup  Set up Pulsar Cluster. (Pulsar cluster includes pulsar broker cluster and Bookkeeper bookie cluster.) Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  Pulsar Monitoring Pulsar monitoring provides multidimensional metrics monitoring of Pulsar cluster as Layer: PULSAR Service in the OAP. In each cluster, the nodes are represented as Instance.\nPulsar Cluster Supported Metrics    Monitoring Panel Metric Name Description Data Source     Total Topics meter_pulsar_total_topics The number of Pulsar topics in this cluster. Pulsar Cluster   Total Subscriptions meter_pulsar_total_subscriptions The number of Pulsar subscriptions in this cluster. Pulsar Cluster   Total Producers meter_pulsar_total_producers The number of active producers connected to this cluster. Pulsar Cluster   Total Consumers meter_pulsar_total_consumers The number of active consumers connected to this cluster. Pulsar Cluster   Message Rate In meter_pulsar_message_rate_in The total message rate coming into this cluster (message per second). Pulsar Cluster   Message Rate Out meter_pulsar_message_rate_out The total message rate going out from this cluster (message per second). Pulsar Cluster   Throughput In meter_pulsar_throughput_in The total throughput coming into this cluster (byte per second). Pulsar Cluster   Throughput Out meter_pulsar_throughput_out The total throughput going out from this cluster (byte per second). Pulsar Cluster   Storage Size meter_pulsar_storage_size The total storage size of all topics in this broker (in bytes). Pulsar Cluster   Storage Logical Size meter_pulsar_storage_logical_size The storage size of all topics in this broker without replicas (in bytes). Pulsar Cluster   Storage Write Rate meter_pulsar_storage_write_rate The total message batches (entries) written to the storage for this broker (message batch per second). Pulsar Cluster   Storage Read Rate meter_pulsar_storage_read_rate The total message batches (entries) read from the storage for this broker (message batch per second). Pulsar Cluster    Pulsar Node Supported Metrics    Monitoring Panel Metric Name Description Data Source     Active Connections meter_pulsar_broker_active_connections The number of active connections. Pulsar Broker   Total Connections meter_pulsar_broker_total_connections The total number of connections. Pulsar Broker   Connection Create Success Count meter_pulsar_broker_connection_create_success_count The number of successfully created connections. Pulsar Broker   Connection Create Fail Count meter_pulsar_broker_connection_create_fail_count The number of failed connections. Pulsar Broker   Connection Closed Total Count meter_pulsar_broker_connection_closed_total_count The total number of closed connections. Pulsar Broker   JVM Buffer Pool Used meter_pulsar_broker_jvm_buffer_pool_used_bytes The usage of jvm buffer pool. Pulsar Broker   JVM Memory Pool Used meter_pulsar_broker_jvm_memory_pool_used The usage of jvm memory pool. Pulsar Broker   JVM Memory meter_pulsar_broker_jvm_memory_init meter_pulsar_broker_jvm_memory_used meter_pulsar_broker_jvm_memory_committed The usage of jvm memory. Pulsar Broker   JVM Threads meter_pulsar_broker_jvm_threads_current meter_pulsar_broker_jvm_threads_daemon meter_pulsar_broker_jvm_threads_peak meter_pulsar_broker_jvm_threads_deadlocked The usage of jvm threads. Pulsar Broker   GC Time meter_pulsar_broker_jvm_gc_collection_seconds_sum Time spent in a given JVM garbage collector in seconds. Pulsar Broker   GC Count meter_pulsar_broker_jvm_gc_collection_seconds_count The count of a given JVM garbage collector. Pulsar Broker    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in otel-rules/pulsar/pulsar-cluster.yaml, otel-rules/pulsar/pulsar-broker.yaml. The Pulsar dashboard panel configurations are found in ui-initialized-templates/pulsar.\n","excerpt":"Pulsar monitoring SkyWalking leverages OpenTelemetry Collector to collect metrics data in Prometheus …","ref":"/docs/main/next/en/setup/backend/backend-pulsar-monitoring/","title":"Pulsar monitoring"},{"body":"Pulsar monitoring SkyWalking leverages OpenTelemetry Collector to collect metrics data in Prometheus format from the Pulsar and transfer the metrics to OpenTelemetry receiver and into the Meter System. Kafka entity as a Service in OAP and on the `Layer: PULSAR.\nData flow  Pulsar exposes metrics through Prometheus endpoint. OpenTelemetry Collector fetches metrics from Pulsar cluster via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.`  Setup  Set up Pulsar Cluster. (Pulsar cluster includes pulsar broker cluster and Bookkeeper bookie cluster.) Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  Pulsar Monitoring Pulsar monitoring provides multidimensional metrics monitoring of Pulsar cluster as Layer: PULSAR Service in the OAP. In each cluster, the nodes are represented as Instance.\nPulsar Cluster Supported Metrics    Monitoring Panel Metric Name Description Data Source     Total Topics meter_pulsar_total_topics The number of Pulsar topics in this cluster. Pulsar Cluster   Total Subscriptions meter_pulsar_total_subscriptions The number of Pulsar subscriptions in this cluster. Pulsar Cluster   Total Producers meter_pulsar_total_producers The number of active producers connected to this cluster. Pulsar Cluster   Total Consumers meter_pulsar_total_consumers The number of active consumers connected to this cluster. Pulsar Cluster   Message Rate In meter_pulsar_message_rate_in The total message rate coming into this cluster (message per second). Pulsar Cluster   Message Rate Out meter_pulsar_message_rate_out The total message rate going out from this cluster (message per second). Pulsar Cluster   Throughput In meter_pulsar_throughput_in The total throughput coming into this cluster (byte per second). Pulsar Cluster   Throughput Out meter_pulsar_throughput_out The total throughput going out from this cluster (byte per second). Pulsar Cluster   Storage Size meter_pulsar_storage_size The total storage size of all topics in this broker (in bytes). Pulsar Cluster   Storage Logical Size meter_pulsar_storage_logical_size The storage size of all topics in this broker without replicas (in bytes). Pulsar Cluster   Storage Write Rate meter_pulsar_storage_write_rate The total message batches (entries) written to the storage for this broker (message batch per second). Pulsar Cluster   Storage Read Rate meter_pulsar_storage_read_rate The total message batches (entries) read from the storage for this broker (message batch per second). Pulsar Cluster    Pulsar Node Supported Metrics    Monitoring Panel Metric Name Description Data Source     Active Connections meter_pulsar_broker_active_connections The number of active connections. Pulsar Broker   Total Connections meter_pulsar_broker_total_connections The total number of connections. Pulsar Broker   Connection Create Success Count meter_pulsar_broker_connection_create_success_count The number of successfully created connections. Pulsar Broker   Connection Create Fail Count meter_pulsar_broker_connection_create_fail_count The number of failed connections. Pulsar Broker   Connection Closed Total Count meter_pulsar_broker_connection_closed_total_count The total number of closed connections. Pulsar Broker   JVM Buffer Pool Used meter_pulsar_broker_jvm_buffer_pool_used_bytes The usage of jvm buffer pool. Pulsar Broker   JVM Memory Pool Used meter_pulsar_broker_jvm_memory_pool_used The usage of jvm memory pool. Pulsar Broker   JVM Memory meter_pulsar_broker_jvm_memory_init meter_pulsar_broker_jvm_memory_used meter_pulsar_broker_jvm_memory_committed The usage of jvm memory. Pulsar Broker   JVM Threads meter_pulsar_broker_jvm_threads_current meter_pulsar_broker_jvm_threads_daemon meter_pulsar_broker_jvm_threads_peak meter_pulsar_broker_jvm_threads_deadlocked The usage of jvm threads. Pulsar Broker   GC Time meter_pulsar_broker_jvm_gc_collection_seconds_sum Time spent in a given JVM garbage collector in seconds. Pulsar Broker   GC Count meter_pulsar_broker_jvm_gc_collection_seconds_count The count of a given JVM garbage collector. Pulsar Broker    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in otel-rules/pulsar/pulsar-cluster.yaml, otel-rules/pulsar/pulsar-broker.yaml. The RabbitMQ dashboard panel configurations are found in ui-initialized-templates/pulsar.\n","excerpt":"Pulsar monitoring SkyWalking leverages OpenTelemetry Collector to collect metrics data in Prometheus …","ref":"/docs/main/v9.7.0/en/setup/backend/backend-pulsar-monitoring/","title":"Pulsar monitoring"},{"body":"Python Agent Asynchronous Enhancement Since 1.1.0, the Python agent supports asynchronous reporting of ALL telemetry data, including traces, metrics, logs and profile. This feature is disabled by default, since it is still in the experimental stage. You can enable it by setting the SW_AGENT_ASYNCIO_ENHANCEMENT environment variable to true. See the configuration document for more information.\nexport SW_AGENT_ASYNCIO_ENHANCEMENT=true Why we need this feature Before version 1.1.0, SkyWalking Python agent had only an implementation with the Threading module to provide data reporters. Yet with the growth of the Python agent, it is now fully capable and requires more resources than when only tracing was supported (we start many threads and gRPC itself creates even more threads when streaming).\nAs well known, the Global Interpreter Lock (GIL) in Python can limit the true parallel execution of threads. This issue also effects the Python agent, especially on network communication with the SkyWalking OAP (gRPC, HTTP and Kafka).\nTherefore, we have decided to implement the reporter code for the SkyWalking Python agent based on the asyncio library. asyncio is an officially supported asynchronous programming library in Python that operates on a single-threaded, coroutine-driven model. Currently, it enjoys widespread adoption and boasts a rich ecosystem, making it the preferred choice for enhancing asynchronous capabilities in many Python projects.\nHow it works To keep the API unchanged, we have completely rewritten a new class called SkyWalkingAgentAsync (identical to the SkyWalkingAgent class). We use the environment variable mentioned above, SW_AGENT_ASYNCIO_ENHANCEMENT, to control which class implements the agent\u0026rsquo;s interface.\nIn the SkyWalkingAgentAsync class, we have employed asyncio coroutines and their related functions to replace the Python threading implementation in nearly all instances. And we have applied asyncio enhancements to all three primary reporting protocols of the current SkyWalking Python agent:\n  gRPC: We use the grpc.aio module to replace the grpc module. Since the grpc.aio module is also officially supported and included in the grpc package, we can use it directly without any additional installation.\n  HTTP: We use the aiohttp module to replace the requests module.\n  Kafka: We use the aiokafka module to replace the kafka-python module.\n  Performance improvement We use wrk to pressure test the network throughput of the Python agents in a FastAPI application.\n gRPC  The performance has been improved by about 32.8%\n   gRPC QPS TPS Avg Latency     sync (original) 899.26 146.66KB 545.97ms   async (new) 1194.55 194.81KB 410.97ms     HTTP  The performance has been improved by about 9.8%\n   HTTP QPS TPS Avg Latency     sync (original) 530.95 86.59KB 1.53s   async (new) 583.37 95.14KB 1.44s     Kafka  The performance has been improved by about 89.6%\n   Kafka QPS TPS Avg Latency     sync (original) 345.89 56.41KB 1.09s   async (new) 655.67 106.93KB 1.24s     In fact, only the performance improvement of gRPC is of more reference value. Because the other two protocols use third-party libraries with completely different implementations, the performance improvement depends to a certain extent on the performance of these third-party libraries.\n More details see this PR .\nPotential problems We have shown that the asynchronous enhancement function improves the transmission efficiency of metrics, traces and logs. But it improves the proformance of profile data very little, and even causes performance degradation.\nThis is mainly because a large part of the data in the profile part comes from the monitoring and measurement of Python threads, which is exactly what we need to avoid in asynchronous enhancement. Since operations on threads cannot be bypassed, we may need additional overhead to support cross-thread coroutine communication, which may lead to performance degradation instead of increase.\nAsynchronous enhancements involve many code changes and introduced some new dependencies. Since this feature is relatively new, it may cause some unexpected errors and problems. If you encounter them, please feel free to contact us or submit issues and PRs!\n","excerpt":"Python Agent Asynchronous Enhancement Since 1.1.0, the Python agent supports asynchronous reporting …","ref":"/docs/skywalking-python/next/en/setup/advanced/asyncenhancement/","title":"Python Agent Asynchronous Enhancement"},{"body":"Python Agent Log Reporter This functionality reports logs collected from the Python logging module (in theory, also logging libraries depending on the core logging module) and loguru module.\nFrom Python agent 1.0.0, the log reporter is automatically enabled and can be disabled through agent_log_reporter_active=False or SW_AGENT_LOG_REPORTER_ACTIVE=False.\nLog reporter supports all three protocols including grpc, http and kafka, which shares the same config agent_protocol with trace reporter.\nIf chosen http protocol, the logs will be batch-reported to the collector REST endpoint oap/v3/logs.\nIf chosen kafka protocol, please make sure to config kafka-fetcher on the OAP side, and make sure Python agent config kafka_bootstrap_servers points to your Kafka brokers.\nPlease make sure OAP is consuming the same Kafka topic as your agent produces to, kafka_namespace must match OAP side configuration plugin.kafka.namespace\nagent_log_reporter_active=True - Enables the log reporter.\nagent_log_reporter_max_buffer_size - The maximum queue backlog size for sending log data to backend, logs beyond this are silently dropped.\nAlternatively, you can pass configurations through environment variables. Please refer to the Configuration Vocabulary for the list of environment variables associated with the log reporter.\nSpecify a logging level  [Important] Agent will only report logs that passes the default level threshold logging.getLogger().setLevel(logging.WARNING) For example, if your logger level is logging.INFO, agent will not report info logs even if you set agent_log_reporter_level to INFO\n Additional to the code level configuration, only the logs with a level equal to or higher than the specified configuration will be collected and reported.\nIn other words, the agent skips reporting some unwanted logs based on your level threshold even though they are still logged.\nlog_reporter_level - The string name of a logger level.\nNote that it also works with your custom logger levels, simply specify its string name in the config.\nIgnore log filters The following config is disabled by default. When enabled, the log reporter will collect logs disregarding your custom log filters.\nFor example, if you attach the filter below to the logger - the default behavior of log reporting aligns with the filter (not reporting any logs with a message starting with SW test)\nclass AppFilter(logging.Filter): def filter(self, record): return not record.getMessage().startswith(\u0026#39;SW test\u0026#39;) logger.addFilter(AppFilter()) However, if you do would like to report those filtered logs, set the log_reporter_ignore_filter to True.\nFormatting Note that regardless of the formatting, Python agent will always report the following three tags -\nlevel - the logger level name\nlogger - the logger name\nthread - the thread name\nLimit stacktrace depth You can set the cause_exception_depth config entry to a desired level(defaults to 10), which limits the output depth of exception stacktrace in reporting.\nThis config limits agent to report up to limit stacktrace, please refer to Python traceback for more explanations.\nCustomize the reported log format You can choose to report collected logs in a custom layout.\nIf not set, the agent uses the layout below by default, else the agent uses your custom layout set in log_reporter_layout.\n'%(asctime)s [%(threadName)s] %(levelname)s %(name)s - %(message)s'\nIf the layout is set to None, the reported log content will only contain the pre-formatted LogRecord.message(msg % args) without any additional styles or extra fields, stacktrace will be attached if an exception was raised.\nTransmit un-formatted logs You can also choose to report the log messages without any formatting. It separates the raw log msg logRecord.msg and logRecord.args, then puts them into message content and tags starting from argument.0, respectively, along with an exception tag if an exception was raised.\nNote when you set log_reporter_formatted to False, it ignores your custom layout introduced above.\nAs an example, the following code:\nlogger.info(\u0026#34;SW test log %s%s%s\u0026#34;, \u0026#39;arg0\u0026#39;, \u0026#39;arg1\u0026#39;, \u0026#39;arg2\u0026#39;) Will result in:\n{ \u0026#34;content\u0026#34;: \u0026#34;SW test log %s %s %s\u0026#34;, \u0026#34;tags\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;argument.0\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;arg0\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.1\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;arg1\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.2\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;arg2\u0026#34; } ] } ","excerpt":"Python Agent Log Reporter This functionality reports logs collected from the Python logging module …","ref":"/docs/skywalking-python/latest/en/setup/advanced/logreporter/","title":"Python Agent Log Reporter"},{"body":"Python Agent Log Reporter This functionality reports logs collected from the Python logging module (in theory, also logging libraries depending on the core logging module) and loguru module.\nFrom Python agent 1.0.0, the log reporter is automatically enabled and can be disabled through agent_log_reporter_active=False or SW_AGENT_LOG_REPORTER_ACTIVE=False.\nLog reporter supports all three protocols including grpc, http and kafka, which shares the same config agent_protocol with trace reporter.\nIf chosen http protocol, the logs will be batch-reported to the collector REST endpoint oap/v3/logs.\nIf chosen kafka protocol, please make sure to config kafka-fetcher on the OAP side, and make sure Python agent config kafka_bootstrap_servers points to your Kafka brokers.\nPlease make sure OAP is consuming the same Kafka topic as your agent produces to, kafka_namespace must match OAP side configuration plugin.kafka.namespace\nagent_log_reporter_active=True - Enables the log reporter.\nagent_log_reporter_max_buffer_size - The maximum queue backlog size for sending log data to backend, logs beyond this are silently dropped.\nAlternatively, you can pass configurations through environment variables. Please refer to the Configuration Vocabulary for the list of environment variables associated with the log reporter.\nSpecify a logging level  [Important] Agent will only report logs that passes the default level threshold logging.getLogger().setLevel(logging.WARNING) For example, if your logger level is logging.INFO, agent will not report info logs even if you set agent_log_reporter_level to INFO\n Additional to the code level configuration, only the logs with a level equal to or higher than the specified configuration will be collected and reported.\nIn other words, the agent skips reporting some unwanted logs based on your level threshold even though they are still logged.\nlog_reporter_level - The string name of a logger level.\nNote that it also works with your custom logger levels, simply specify its string name in the config.\nIgnore log filters The following config is disabled by default. When enabled, the log reporter will collect logs disregarding your custom log filters.\nFor example, if you attach the filter below to the logger - the default behavior of log reporting aligns with the filter (not reporting any logs with a message starting with SW test)\nclass AppFilter(logging.Filter): def filter(self, record): return not record.getMessage().startswith(\u0026#39;SW test\u0026#39;) logger.addFilter(AppFilter()) However, if you do would like to report those filtered logs, set the log_reporter_ignore_filter to True.\nFormatting Note that regardless of the formatting, Python agent will always report the following three tags -\nlevel - the logger level name\nlogger - the logger name\nthread - the thread name\nLimit stacktrace depth You can set the cause_exception_depth config entry to a desired level(defaults to 10), which limits the output depth of exception stacktrace in reporting.\nThis config limits agent to report up to limit stacktrace, please refer to Python traceback for more explanations.\nCustomize the reported log format You can choose to report collected logs in a custom layout.\nIf not set, the agent uses the layout below by default, else the agent uses your custom layout set in log_reporter_layout.\n'%(asctime)s [%(threadName)s] %(levelname)s %(name)s - %(message)s'\nIf the layout is set to None, the reported log content will only contain the pre-formatted LogRecord.message(msg % args) without any additional styles or extra fields, stacktrace will be attached if an exception was raised.\nTransmit un-formatted logs You can also choose to report the log messages without any formatting. It separates the raw log msg logRecord.msg and logRecord.args, then puts them into message content and tags starting from argument.0, respectively, along with an exception tag if an exception was raised.\nNote when you set log_reporter_formatted to False, it ignores your custom layout introduced above.\nAs an example, the following code:\nlogger.info(\u0026#34;SW test log %s%s%s\u0026#34;, \u0026#39;arg0\u0026#39;, \u0026#39;arg1\u0026#39;, \u0026#39;arg2\u0026#39;) Will result in:\n{ \u0026#34;content\u0026#34;: \u0026#34;SW test log %s %s %s\u0026#34;, \u0026#34;tags\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;argument.0\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;arg0\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.1\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;arg1\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.2\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;arg2\u0026#34; } ] } Print trace ID in your logs To print out the trace IDs in the logs, simply add %(tid)s to the agent_log_reporter_layout.\nYou can take advantage of this feature to print out the trace IDs on any channel you desire, not limited to reporting logs to OAP, this can be achieved by using any formatter you prefer in your own application logic.\n","excerpt":"Python Agent Log Reporter This functionality reports logs collected from the Python logging module …","ref":"/docs/skywalking-python/next/en/setup/advanced/logreporter/","title":"Python Agent Log Reporter"},{"body":"Python Agent Log Reporter This functionality reports logs collected from the Python logging module (in theory, also logging libraries depending on the core logging module) and loguru module.\nFrom Python agent 1.0.0, the log reporter is automatically enabled and can be disabled through agent_log_reporter_active=False or SW_AGENT_LOG_REPORTER_ACTIVE=False.\nLog reporter supports all three protocols including grpc, http and kafka, which shares the same config agent_protocol with trace reporter.\nIf chosen http protocol, the logs will be batch-reported to the collector REST endpoint oap/v3/logs.\nIf chosen kafka protocol, please make sure to config kafka-fetcher on the OAP side, and make sure Python agent config kafka_bootstrap_servers points to your Kafka brokers.\nPlease make sure OAP is consuming the same Kafka topic as your agent produces to, kafka_namespace must match OAP side configuration plugin.kafka.namespace\nagent_log_reporter_active=True - Enables the log reporter.\nagent_log_reporter_max_buffer_size - The maximum queue backlog size for sending log data to backend, logs beyond this are silently dropped.\nAlternatively, you can pass configurations through environment variables. Please refer to the Configuration Vocabulary for the list of environment variables associated with the log reporter.\nSpecify a logging level  [Important] Agent will only report logs that passes the default level threshold logging.getLogger().setLevel(logging.WARNING) For example, if your logger level is logging.INFO, agent will not report info logs even if you set agent_log_reporter_level to INFO\n Additional to the code level configuration, only the logs with a level equal to or higher than the specified configuration will be collected and reported.\nIn other words, the agent skips reporting some unwanted logs based on your level threshold even though they are still logged.\nlog_reporter_level - The string name of a logger level.\nNote that it also works with your custom logger levels, simply specify its string name in the config.\nIgnore log filters The following config is disabled by default. When enabled, the log reporter will collect logs disregarding your custom log filters.\nFor example, if you attach the filter below to the logger - the default behavior of log reporting aligns with the filter (not reporting any logs with a message starting with SW test)\nclass AppFilter(logging.Filter): def filter(self, record): return not record.getMessage().startswith(\u0026#39;SW test\u0026#39;) logger.addFilter(AppFilter()) However, if you do would like to report those filtered logs, set the log_reporter_ignore_filter to True.\nFormatting Note that regardless of the formatting, Python agent will always report the following three tags -\nlevel - the logger level name\nlogger - the logger name\nthread - the thread name\nLimit stacktrace depth You can set the cause_exception_depth config entry to a desired level(defaults to 10), which limits the output depth of exception stacktrace in reporting.\nThis config limits agent to report up to limit stacktrace, please refer to Python traceback for more explanations.\nCustomize the reported log format You can choose to report collected logs in a custom layout.\nIf not set, the agent uses the layout below by default, else the agent uses your custom layout set in log_reporter_layout.\n'%(asctime)s [%(threadName)s] %(levelname)s %(name)s - %(message)s'\nIf the layout is set to None, the reported log content will only contain the pre-formatted LogRecord.message(msg % args) without any additional styles or extra fields, stacktrace will be attached if an exception was raised.\nTransmit un-formatted logs You can also choose to report the log messages without any formatting. It separates the raw log msg logRecord.msg and logRecord.args, then puts them into message content and tags starting from argument.0, respectively, along with an exception tag if an exception was raised.\nNote when you set log_reporter_formatted to False, it ignores your custom layout introduced above.\nAs an example, the following code:\nlogger.info(\u0026#34;SW test log %s%s%s\u0026#34;, \u0026#39;arg0\u0026#39;, \u0026#39;arg1\u0026#39;, \u0026#39;arg2\u0026#39;) Will result in:\n{ \u0026#34;content\u0026#34;: \u0026#34;SW test log %s %s %s\u0026#34;, \u0026#34;tags\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;argument.0\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;arg0\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.1\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;arg1\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.2\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;arg2\u0026#34; } ] } ","excerpt":"Python Agent Log Reporter This functionality reports logs collected from the Python logging module …","ref":"/docs/skywalking-python/v1.0.1/en/setup/advanced/logreporter/","title":"Python Agent Log Reporter"},{"body":"Python Agent Meter Reporter Important Note: Meter reporter is currently available to send in gRPC and Kafka protocol, HTTP protocol is not implemented yet (requires additional handler on SkyWalking OAP side).\nEnabling the feature (default is enabled) PVM Reporter is also by default enabled, meaning useful Python metrics such as thread count/GC info will be shown in OAP General Services - Instance - PVM Tab) If you really don\u0026rsquo;t need such a feature, disable them through config.agent_pvm_meter_reporter_active or SW_AGENT_PVM_METER_REPORTER_ACTIVE\nconfig.agent_meter_reporter_active = True # Or os.environ[\u0026#39;SW_AGENT_METER_REPORTER_ACTIVE\u0026#39;] = \u0026#39;True\u0026#39; or\nexport SW_AGENT_METER_REPORTER_ACTIVE=True Disable the feature os.environ[\u0026#39;SW_AGENT_METER_REPORTER_ACTIVE\u0026#39;] = \u0026#39;False\u0026#39; or\nexport SW_AGENT_METER_REPORTER_ACTIVE=False Counter  Counter API represents a single monotonically increasing counter, automatic collect data and report to backend.  builder = Counter.Builder(\u0026#39;c2\u0026#39;, CounterMode.INCREMENT, ((\u0026#34;k1\u0026#34;, \u0026#34;v1\u0026#34;), (\u0026#34;k2\u0026#34;, \u0026#34;v2\u0026#34;))) # or this way # builder = Counter.Builder(\u0026#39;c2\u0026#39;, CounterMode.INCREMENT).tag(\u0026#39;key1\u0026#39;, \u0026#39;value1\u0026#39;).tag(\u0026#39;key2\u0026#39;, \u0026#39;value2\u0026#39;) c = builder.build() c.increment(2) Syntactic sugars builder = Counter.Builder(\u0026#39;c2\u0026#39;, CounterMode.INCREMENT) c = builder.build() # increase Counter c by the time the with-wrapped codes consumed with c.create_timer(): # some codes may consume a certain time builder = Counter.Builder(\u0026#39;c3\u0026#39;, CounterMode.INCREMENT) c = builder.build() # increase Counter c by num once counter_decorator_test gets called @Counter.increase(name=\u0026#39;c3\u0026#39;, num=2) def counter_decorator_test(): # some codes builder = Counter.Builder(\u0026#39;c4\u0026#39;, CounterMode.INCREMENT) c = builder.build() # increase Counter c by the time counter_decorator_test consumed @Counter.timer(name=\u0026#39;c4\u0026#39;) def counter_decorator_test(s): # some codes may consume a certain time  Counter.Builder(name, tags) Create a new counter builder with the meter name and optional tags. Counter.tag(key: str, value) Mark a tag key/value pair. Counter.mode(mode: CounterMode) Change the counter mode, RATE mode means reporting rate to the backend. Counter.increment(count) Increment count to the Counter, It could be a positive value.  Gauge  Gauge API represents a single numerical value.  # producer: iterable object builder = Gauge.Builder(\u0026#39;g1\u0026#39;, producer, ((\u0026#34;key\u0026#34;, \u0026#34;value\u0026#34;))) g = Builder.build()  Gauge.Builder(name, tags) Create a new gauge builder with the meter name and iterable object, this iterable object need to produce numeric value. Gauge.tag(key: str, value) Mark a tag key/value pair. Gauge.build() Build a new Gauge which is collected and reported to the backend.  Histogram  Histogram API represents a summary sample observations with customize buckets.  builder = Histogram.Builder(\u0026#39;h2\u0026#39;, [i / 10 for i in range(10)], (\u0026#34;key\u0026#34;, \u0026#34;value\u0026#34;)) h = builder.build() Syntactic sugars builder = Histogram.Builder(\u0026#39;h3\u0026#39;, [i / 10 for i in range(10)]) h = builder.build() # Histogram h will record the time the with-wprapped codes consumed with h.create_timer(): # some codes may consume a certain time builder = Histogram.Builder(\u0026#39;h2\u0026#39;, [i / 10 for i in range(10)]) h = builder.build() # Histogram h will record the time histogram_decorator_test consumed @Histogram.timer(name=\u0026#39;h2\u0026#39;) def histogram_decorator_test(s): time.sleep(s)  Histogram.Builder(name, tags) Create a new histogram builder with the meter name and optional tags. Histogram.tag(key: str, value) Mark a tag key/value pair. Histogram.minValue(value) Set up the minimal value of this histogram, default is 0. Histogram.build() Build a new Histogram which is collected and reported to the backend. Histogram.addValue(value) Add value into the histogram, automatically analyze what bucket count needs to be increment. rule: count into [step1, step2).  ","excerpt":"Python Agent Meter Reporter Important Note: Meter reporter is currently available to send in gRPC …","ref":"/docs/skywalking-python/latest/en/setup/advanced/meterreporter/","title":"Python Agent Meter Reporter"},{"body":"Python Agent Meter Reporter Important Note: Meter reporter is currently available to send in gRPC and Kafka protocol, HTTP protocol is not implemented yet (requires additional handler on SkyWalking OAP side).\nEnabling the feature (default is enabled) PVM Reporter is also by default enabled, meaning useful Python metrics such as thread count/GC info will be shown in OAP General Services - Instance - PVM Tab) If you really don\u0026rsquo;t need such a feature, disable them through config.agent_pvm_meter_reporter_active or SW_AGENT_PVM_METER_REPORTER_ACTIVE\nconfig.agent_meter_reporter_active = True # Or os.environ[\u0026#39;SW_AGENT_METER_REPORTER_ACTIVE\u0026#39;] = \u0026#39;True\u0026#39; or\nexport SW_AGENT_METER_REPORTER_ACTIVE=True Disable the feature os.environ[\u0026#39;SW_AGENT_METER_REPORTER_ACTIVE\u0026#39;] = \u0026#39;False\u0026#39; or\nexport SW_AGENT_METER_REPORTER_ACTIVE=False Counter  Counter API represents a single monotonically increasing counter, automatic collect data and report to backend.  builder = Counter.Builder(\u0026#39;c2\u0026#39;, CounterMode.INCREMENT, ((\u0026#34;k1\u0026#34;, \u0026#34;v1\u0026#34;), (\u0026#34;k2\u0026#34;, \u0026#34;v2\u0026#34;))) # or this way # builder = Counter.Builder(\u0026#39;c2\u0026#39;, CounterMode.INCREMENT).tag(\u0026#39;key1\u0026#39;, \u0026#39;value1\u0026#39;).tag(\u0026#39;key2\u0026#39;, \u0026#39;value2\u0026#39;) c = builder.build() c.increment(2) Syntactic sugars builder = Counter.Builder(\u0026#39;c2\u0026#39;, CounterMode.INCREMENT) c = builder.build() # increase Counter c by the time the with-wrapped codes consumed with c.create_timer(): # some codes may consume a certain time builder = Counter.Builder(\u0026#39;c3\u0026#39;, CounterMode.INCREMENT) c = builder.build() # increase Counter c by num once counter_decorator_test gets called @Counter.increase(name=\u0026#39;c3\u0026#39;, num=2) def counter_decorator_test(): # some codes builder = Counter.Builder(\u0026#39;c4\u0026#39;, CounterMode.INCREMENT) c = builder.build() # increase Counter c by the time counter_decorator_test consumed @Counter.timer(name=\u0026#39;c4\u0026#39;) def counter_decorator_test(s): # some codes may consume a certain time  Counter.Builder(name, tags) Create a new counter builder with the meter name and optional tags. Counter.tag(key: str, value) Mark a tag key/value pair. Counter.mode(mode: CounterMode) Change the counter mode, RATE mode means reporting rate to the backend. Counter.increment(count) Increment count to the Counter, It could be a positive value.  Gauge  Gauge API represents a single numerical value.  # producer: iterable object builder = Gauge.Builder(\u0026#39;g1\u0026#39;, producer, ((\u0026#34;key\u0026#34;, \u0026#34;value\u0026#34;))) g = Builder.build()  Gauge.Builder(name, tags) Create a new gauge builder with the meter name and iterable object, this iterable object need to produce numeric value. Gauge.tag(key: str, value) Mark a tag key/value pair. Gauge.build() Build a new Gauge which is collected and reported to the backend.  Histogram  Histogram API represents a summary sample observations with customize buckets.  builder = Histogram.Builder(\u0026#39;h2\u0026#39;, [i / 10 for i in range(10)], (\u0026#34;key\u0026#34;, \u0026#34;value\u0026#34;)) h = builder.build() Syntactic sugars builder = Histogram.Builder(\u0026#39;h3\u0026#39;, [i / 10 for i in range(10)]) h = builder.build() # Histogram h will record the time the with-wprapped codes consumed with h.create_timer(): # some codes may consume a certain time builder = Histogram.Builder(\u0026#39;h2\u0026#39;, [i / 10 for i in range(10)]) h = builder.build() # Histogram h will record the time histogram_decorator_test consumed @Histogram.timer(name=\u0026#39;h2\u0026#39;) def histogram_decorator_test(s): time.sleep(s)  Histogram.Builder(name, tags) Create a new histogram builder with the meter name and optional tags. Histogram.tag(key: str, value) Mark a tag key/value pair. Histogram.minValue(value) Set up the minimal value of this histogram, default is 0. Histogram.build() Build a new Histogram which is collected and reported to the backend. Histogram.addValue(value) Add value into the histogram, automatically analyze what bucket count needs to be increment. rule: count into [step1, step2).  ","excerpt":"Python Agent Meter Reporter Important Note: Meter reporter is currently available to send in gRPC …","ref":"/docs/skywalking-python/next/en/setup/advanced/meterreporter/","title":"Python Agent Meter Reporter"},{"body":"Python Agent Meter Reporter Important Note: Meter reporter is currently available to send in gRPC and Kafka protocol, HTTP protocol is not implemented yet (requires additional handler on SkyWalking OAP side).\nEnabling the feature (default is enabled) PVM Reporter is also by default enabled, meaning useful Python metrics such as thread count/GC info will be shown in OAP General Services - Instance - PVM Tab) If you really don\u0026rsquo;t need such a feature, disable them through config.agent_pvm_meter_reporter_active or SW_AGENT_PVM_METER_REPORTER_ACTIVE\nconfig.agent_meter_reporter_active = True # Or os.environ[\u0026#39;SW_AGENT_METER_REPORTER_ACTIVE\u0026#39;] = \u0026#39;True\u0026#39; or\nexport SW_AGENT_METER_REPORTER_ACTIVE=True Disable the feature os.environ[\u0026#39;SW_AGENT_METER_REPORTER_ACTIVE\u0026#39;] = \u0026#39;False\u0026#39; or\nexport SW_AGENT_METER_REPORTER_ACTIVE=False Counter  Counter API represents a single monotonically increasing counter, automatic collect data and report to backend.  builder = Counter.Builder(\u0026#39;c2\u0026#39;, CounterMode.INCREMENT, ((\u0026#34;k1\u0026#34;, \u0026#34;v1\u0026#34;), (\u0026#34;k2\u0026#34;, \u0026#34;v2\u0026#34;))) # or this way # builder = Counter.Builder(\u0026#39;c2\u0026#39;, CounterMode.INCREMENT).tag(\u0026#39;key1\u0026#39;, \u0026#39;value1\u0026#39;).tag(\u0026#39;key2\u0026#39;, \u0026#39;value2\u0026#39;) c = builder.build() c.increment(2) Syntactic sugars builder = Counter.Builder(\u0026#39;c2\u0026#39;, CounterMode.INCREMENT) c = builder.build() # increase Counter c by the time the with-wrapped codes consumed with c.create_timer(): # some codes may consume a certain time builder = Counter.Builder(\u0026#39;c3\u0026#39;, CounterMode.INCREMENT) c = builder.build() # increase Counter c by num once counter_decorator_test gets called @Counter.increase(name=\u0026#39;c3\u0026#39;, num=2) def counter_decorator_test(): # some codes builder = Counter.Builder(\u0026#39;c4\u0026#39;, CounterMode.INCREMENT) c = builder.build() # increase Counter c by the time counter_decorator_test consumed @Counter.timer(name=\u0026#39;c4\u0026#39;) def counter_decorator_test(s): # some codes may consume a certain time  Counter.Builder(name, tags) Create a new counter builder with the meter name and optional tags. Counter.tag(key: str, value) Mark a tag key/value pair. Counter.mode(mode: CounterMode) Change the counter mode, RATE mode means reporting rate to the backend. Counter.increment(count) Increment count to the Counter, It could be a positive value.  Gauge  Gauge API represents a single numerical value.  # producer: iterable object builder = Gauge.Builder(\u0026#39;g1\u0026#39;, producer, ((\u0026#34;key\u0026#34;, \u0026#34;value\u0026#34;))) g = Builder.build()  Gauge.Builder(name, tags) Create a new gauge builder with the meter name and iterable object, this iterable object need to produce numeric value. Gauge.tag(key: str, value) Mark a tag key/value pair. Gauge.build() Build a new Gauge which is collected and reported to the backend.  Histogram  Histogram API represents a summary sample observations with customize buckets.  builder = Histogram.Builder(\u0026#39;h2\u0026#39;, [i / 10 for i in range(10)], (\u0026#34;key\u0026#34;, \u0026#34;value\u0026#34;)) h = builder.build() Syntactic sugars builder = Histogram.Builder(\u0026#39;h3\u0026#39;, [i / 10 for i in range(10)]) h = builder.build() # Histogram h will record the time the with-wprapped codes consumed with h.create_timer(): # some codes may consume a certain time builder = Histogram.Builder(\u0026#39;h2\u0026#39;, [i / 10 for i in range(10)]) h = builder.build() # Histogram h will record the time histogram_decorator_test consumed @Histogram.timer(name=\u0026#39;h2\u0026#39;) def histogram_decorator_test(s): time.sleep(s)  Histogram.Builder(name, tags) Create a new histogram builder with the meter name and optional tags. Histogram.tag(key: str, value) Mark a tag key/value pair. Histogram.minValue(value) Set up the minimal value of this histogram, default is 0. Histogram.build() Build a new Histogram which is collected and reported to the backend. Histogram.addValue(value) Add value into the histogram, automatically analyze what bucket count needs to be increment. rule: count into [step1, step2).  ","excerpt":"Python Agent Meter Reporter Important Note: Meter reporter is currently available to send in gRPC …","ref":"/docs/skywalking-python/v1.0.1/en/setup/advanced/meterreporter/","title":"Python Agent Meter Reporter"},{"body":"Query Measures Query operation queries the data in a measure.\nbydbctl is the command line tool in examples.\nThe input contains two parts:\n Request: a YAML-based text which is defined by the API Time Range: YAML and CLI\u0026rsquo;s flags both support it.  Time Range The query specification contains time_range field. The request should set absolute times to it. bydbctl also provides start and end flags to support passing absolute and relative times.\n\u0026ldquo;start\u0026rdquo; and \u0026ldquo;end\u0026rdquo; specify a time range during which the query is performed, they can be an absolute time like \u0026ldquo;2006-01-02T15:04:05Z07:00\u0026rdquo;, or relative time (to the current time) like \u0026ldquo;-30m\u0026rdquo;, or \u0026ldquo;30m\u0026rdquo;. They are both optional and their default values follow the rules below:\n when \u0026ldquo;start\u0026rdquo; and \u0026ldquo;end\u0026rdquo; are both absent, \u0026ldquo;start = now - 30 minutes\u0026rdquo; and \u0026ldquo;end = now\u0026rdquo;, namely past 30 minutes; when \u0026ldquo;start\u0026rdquo; is absent and \u0026ldquo;end\u0026rdquo; is present, this command calculates \u0026ldquo;start\u0026rdquo; (minus 30 units), e.g. \u0026ldquo;end = 2022-11-09T12:34:00Z\u0026rdquo;, so \u0026ldquo;start = end - 30 minutes = 2022-11-09T12:04:00Z\u0026rdquo;; when \u0026ldquo;start\u0026rdquo; is present and \u0026ldquo;end\u0026rdquo; is absent, this command calculates \u0026ldquo;end\u0026rdquo; (plus 30 units), e.g. \u0026ldquo;start = 2022-11-09T12:04:00Z\u0026rdquo;, so \u0026ldquo;end = start + 30 minutes = 2022-11-09T12:34:00Z\u0026rdquo;.  Examples To retrieve a series of data points between 2022-10-15T22:32:48Z and 2022-10-15T23:32:48Z could use the below command. These data points contain tags: id and entity_id that belong to a family default. They also choose fields: total and value.\n$ bydbctl measure query -f - \u0026lt;\u0026lt;EOF metadata: name: \u0026#34;service_cpm_minute\u0026#34; group: \u0026#34;sw_metric\u0026#34; tagProjection: tagFamilies: - name: \u0026#34;default\u0026#34; tags: [\u0026#34;id\u0026#34;, \u0026#34;entity_id\u0026#34;] fieldProjection: names: [\u0026#34;total\u0026#34;, \u0026#34;value\u0026#34;] timeRange: begin: 2022-10-15T22:32:48Z end: 2022-10-15T23:32:48Z EOF The below command could query data in the last 30 minutes using relative time duration :\n$ bydbctl measure query --start -30m -f - \u0026lt;\u0026lt;EOF metadata: name: \u0026#34;service_cpm_minute\u0026#34; group: \u0026#34;sw_metric\u0026#34; tagProjection: tagFamilies: - name: \u0026#34;default\u0026#34; tags: [\u0026#34;id\u0026#34;, \u0026#34;entity_id\u0026#34;] fieldProjection: names: [\u0026#34;total\u0026#34;, \u0026#34;value\u0026#34;] EOF API Reference MeasureService v1\n","excerpt":"Query Measures Query operation queries the data in a measure.\nbydbctl is the command line tool in …","ref":"/docs/skywalking-banyandb/latest/crud/measure/query/","title":"Query Measures"},{"body":"Query Measures Query operation queries the data in a measure.\nbydbctl is the command line tool in examples.\nThe input contains two parts:\n Request: a YAML-based text which is defined by the API Time Range: YAML and CLI\u0026rsquo;s flags both support it.  Time Range The query specification contains time_range field. The request should set absolute times to it. bydbctl also provides start and end flags to support passing absolute and relative times.\n\u0026ldquo;start\u0026rdquo; and \u0026ldquo;end\u0026rdquo; specify a time range during which the query is performed, they can be an absolute time like \u0026ldquo;2006-01-02T15:04:05Z07:00\u0026rdquo;, or relative time (to the current time) like \u0026ldquo;-30m\u0026rdquo;, or \u0026ldquo;30m\u0026rdquo;. They are both optional and their default values follow the rules below:\n when \u0026ldquo;start\u0026rdquo; and \u0026ldquo;end\u0026rdquo; are both absent, \u0026ldquo;start = now - 30 minutes\u0026rdquo; and \u0026ldquo;end = now\u0026rdquo;, namely past 30 minutes; when \u0026ldquo;start\u0026rdquo; is absent and \u0026ldquo;end\u0026rdquo; is present, this command calculates \u0026ldquo;start\u0026rdquo; (minus 30 units), e.g. \u0026ldquo;end = 2022-11-09T12:34:00Z\u0026rdquo;, so \u0026ldquo;start = end - 30 minutes = 2022-11-09T12:04:00Z\u0026rdquo;; when \u0026ldquo;start\u0026rdquo; is present and \u0026ldquo;end\u0026rdquo; is absent, this command calculates \u0026ldquo;end\u0026rdquo; (plus 30 units), e.g. \u0026ldquo;start = 2022-11-09T12:04:00Z\u0026rdquo;, so \u0026ldquo;end = start + 30 minutes = 2022-11-09T12:34:00Z\u0026rdquo;.  Examples To retrieve a series of data points between 2022-10-15T22:32:48Z and 2022-10-15T23:32:48Z could use the below command. These data points contain tags: id and entity_id that belong to a family default. They also choose fields: total and value.\n$ bydbctl measure query -f - \u0026lt;\u0026lt;EOF metadata: name: \u0026#34;service_cpm_minute\u0026#34; group: \u0026#34;sw_metric\u0026#34; tagProjection: tagFamilies: - name: \u0026#34;default\u0026#34; tags: [\u0026#34;id\u0026#34;, \u0026#34;entity_id\u0026#34;] fieldProjection: names: [\u0026#34;total\u0026#34;, \u0026#34;value\u0026#34;] timeRange: begin: 2022-10-15T22:32:48Z end: 2022-10-15T23:32:48Z EOF The below command could query data in the last 30 minutes using relative time duration :\n$ bydbctl measure query --start -30m -f - \u0026lt;\u0026lt;EOF metadata: name: \u0026#34;service_cpm_minute\u0026#34; group: \u0026#34;sw_metric\u0026#34; tagProjection: tagFamilies: - name: \u0026#34;default\u0026#34; tags: [\u0026#34;id\u0026#34;, \u0026#34;entity_id\u0026#34;] fieldProjection: names: [\u0026#34;total\u0026#34;, \u0026#34;value\u0026#34;] EOF API Reference MeasureService v1\n","excerpt":"Query Measures Query operation queries the data in a measure.\nbydbctl is the command line tool in …","ref":"/docs/skywalking-banyandb/next/crud/measure/query/","title":"Query Measures"},{"body":"Query Measures Query operation queries the data in a measure.\nbydbctl is the command line tool in examples.\nThe input contains two parts:\n Request: a YAML-based text which is defined by the API Time Range: YAML and CLI\u0026rsquo;s flags both support it.  Time Range The query specification contains time_range field. The request should set absolute times to it. bydbctl also provides start and end flags to support passing absolute and relative times.\n\u0026ldquo;start\u0026rdquo; and \u0026ldquo;end\u0026rdquo; specify a time range during which the query is performed, they can be an absolute time like \u0026ldquo;2006-01-02T15:04:05Z07:00\u0026rdquo;, or relative time (to the current time) like \u0026ldquo;-30m\u0026rdquo;, or \u0026ldquo;30m\u0026rdquo;. They are both optional and their default values follow the rules below:\n when \u0026ldquo;start\u0026rdquo; and \u0026ldquo;end\u0026rdquo; are both absent, \u0026ldquo;start = now - 30 minutes\u0026rdquo; and \u0026ldquo;end = now\u0026rdquo;, namely past 30 minutes; when \u0026ldquo;start\u0026rdquo; is absent and \u0026ldquo;end\u0026rdquo; is present, this command calculates \u0026ldquo;start\u0026rdquo; (minus 30 units), e.g. \u0026ldquo;end = 2022-11-09T12:34:00Z\u0026rdquo;, so \u0026ldquo;start = end - 30 minutes = 2022-11-09T12:04:00Z\u0026rdquo;; when \u0026ldquo;start\u0026rdquo; is present and \u0026ldquo;end\u0026rdquo; is absent, this command calculates \u0026ldquo;end\u0026rdquo; (plus 30 units), e.g. \u0026ldquo;start = 2022-11-09T12:04:00Z\u0026rdquo;, so \u0026ldquo;end = start + 30 minutes = 2022-11-09T12:34:00Z\u0026rdquo;.  Examples To retrieve a series of data points between 2022-10-15T22:32:48Z and 2022-10-15T23:32:48Z could use the below command. These data points contain tags: id and entity_id that belong to a family default. They also choose fields: total and value.\n$ bydbctl measure query -f - \u0026lt;\u0026lt;EOF metadata: name: \u0026#34;service_cpm_minute\u0026#34; group: \u0026#34;sw_metric\u0026#34; tagProjection: tagFamilies: - name: \u0026#34;default\u0026#34; tags: [\u0026#34;id\u0026#34;, \u0026#34;entity_id\u0026#34;] fieldProjection: names: [\u0026#34;total\u0026#34;, \u0026#34;value\u0026#34;] timeRange: begin: 2022-10-15T22:32:48Z end: 2022-10-15T23:32:48Z EOF The below command could query data in the last 30 minutes using relative time duration :\n$ bydbctl measure query --start -30m -f - \u0026lt;\u0026lt;EOF metadata: name: \u0026#34;service_cpm_minute\u0026#34; group: \u0026#34;sw_metric\u0026#34; tagProjection: tagFamilies: - name: \u0026#34;default\u0026#34; tags: [\u0026#34;id\u0026#34;, \u0026#34;entity_id\u0026#34;] fieldProjection: names: [\u0026#34;total\u0026#34;, \u0026#34;value\u0026#34;] EOF API Reference MeasureService v1\n","excerpt":"Query Measures Query operation queries the data in a measure.\nbydbctl is the command line tool in …","ref":"/docs/skywalking-banyandb/v0.5.0/crud/measure/query/","title":"Query Measures"},{"body":"Query Protocol Query Protocol defines a set of APIs in GraphQL grammar to provide data query and interactive capabilities with SkyWalking native visualization tool or 3rd party system, including Web UI, CLI or private system.\nQuery protocol official repository, https://github.com/apache/skywalking-query-protocol.\nAll deprecated APIs are moved here.\nMetadata Metadata contains concise information on all services and their instances, endpoints, etc. under monitoring. You may query the metadata in different ways.\nextendtypeQuery{# Normal service related meta info getAllServices(duration:Duration!,group:String):[Service!]!searchServices(duration:Duration!,keyword:String!):[Service!]!searchService(serviceCode:String!):Service# Fetch all services of Browser typegetAllBrowserServices(duration:Duration!):[Service!]!searchBrowserServices(duration:Duration!,keyword:String!):[Service!]!searchBrowserService(serviceCode:String!):Service# Service instance querygetServiceInstances(duration:Duration!,serviceId:ID!):[ServiceInstance!]!# Endpoint query# Consider there are huge numbers of endpoint,# must use endpoint owner\u0026#39;s service id, keyword and limit filter to do query.searchEndpoint(keyword:String!,serviceId:ID!,limit:Int!):[Endpoint!]!getEndpointInfo(endpointId:ID!):EndpointInfo# Process query# Read process list.listProcesses(duration:Duration!,instanceId:ID!):[Process!]!# Find process according to given ID. Return null if not existing.getProcess(processId:ID!):Process# Get the number of matched processes through serviceId, labels# Labels: the matched process should contain all labels## The return is not a precise number, the process has its lifecycle, as it reboots and shutdowns with time.# The return number just gives an abstract of the scale of profiling that would be applied.estimateProcessScale(serviceId:ID!,labels:[String!]!):Long!# Database related meta info.getAllDatabases(duration:Duration!):[Database!]!getTimeInfo:TimeInfo}Topology The topology and dependency graphs among services, instances and endpoints. Includes direct relationships or global maps.\nextendtypeQuery{# Query the global topologygetGlobalTopology(duration:Duration!):Topology# Query the topology, based on the given servicegetServiceTopology(serviceId:ID!,duration:Duration!):Topology# Query the topology, based on the given services.# `#getServiceTopology` could be replaced by this.getServicesTopology(serviceIds:[ID!]!,duration:Duration!):Topology# Query the instance topology, based on the given clientServiceId and serverServiceIdgetServiceInstanceTopology(clientServiceId:ID!,serverServiceId:ID!,duration:Duration!):ServiceInstanceTopology# Query the topology, based on the given endpointgetEndpointTopology(endpointId:ID!,duration:Duration!):Topology# v2 of getEndpointTopologygetEndpointDependencies(endpointId:ID!,duration:Duration!):EndpointTopology}Metrics Metrics query targets all objects defined in OAL script and MAL.\nV3 APIs Provide Metrics V3 query APIs since 9.5.0, including metadata and MQE. SkyWalking Metrics Query Expression(MQE) is an extension query mechanism. MQE allows users to do simple query-stage calculation like well known PromQL through GraphQL. The expression\u0026rsquo;s syntax can refer to here.\nextendtypeQuery{# Metrics definition metadata query. Response the metrics type which determines the suitable query methods.typeOfMetrics(name:String!):MetricsType!# Get the list of all available metrics in the current OAP server.# Param, regex, could be used to filter the metrics by name.listMetrics(regex:String):[MetricDefinition!]!execExpression(expression:String!,entity:Entity!,duration:Duration!):ExpressionResult!}typeExpressionResult{type:ExpressionResultType!# When the type == TIME_SERIES_VALUES, the results would be a collection of MQEValues.# In other legal type cases, only one MQEValues is expected in the array.results:[MQEValues!]!# When type == ExpressionResultType.UNKNOWN,# the error message includes the expression resolving errors.error:String}enumExpressionResultType{# Can\u0026#39;t resolve the type of the given expression.UNKNOWN# A single valueSINGLE_VALUE# A collection of time-series values.# The value could have labels or not.TIME_SERIES_VALUES# A collection of aggregated values through metric sort functionSORTED_LIST# A collection of sampled records.# When the original metric type is sampled recordsRECORD_LIST}Logs extendtypeQuery{# Return true if the current storage implementation supports fuzzy query for logs.supportQueryLogsByKeywords:Boolean!queryLogs(condition:LogQueryCondition):Logs# Test the logs and get the results of the LAL output.test(requests:LogTestRequest!):LogTestResponse!}Log implementations vary between different database options. Some search engines like ElasticSearch and OpenSearch can support full log text fuzzy queries, while others do not due to considerations related to performance impact and end user experience.\ntest API serves as the debugging tool for native LAL parsing.\nTrace extendtypeQuery{queryBasicTraces(condition:TraceQueryCondition):TraceBriefqueryTrace(traceId:ID!):Trace}Trace query fetches trace segment lists and spans of given trace IDs.\nAlarm extendtypeQuery{getAlarmTrend(duration:Duration!):AlarmTrend!getAlarm(duration:Duration!,scope:Scope,keyword:String,paging:Pagination!,tags:[AlarmTag]):Alarms}Alarm query identifies alarms and related events.\nEvent extendtypeQuery{queryEvents(condition:EventQueryCondition):Events}Event query fetches the event list based on given sources and time range conditions.\nProfiling SkyWalking offers two types of profiling, in-process and out-process, allowing users to create tasks and check their execution status.\nIn-process profiling extendtypeMutation{# crate new profile taskcreateProfileTask(creationRequest:ProfileTaskCreationRequest):ProfileTaskCreationResult!}extendtypeQuery{# query all task list, order by ProfileTask#startTime descendinggetProfileTaskList(serviceId:ID,endpointName:String):[ProfileTask!]!# query all task logsgetProfileTaskLogs(taskID:String):[ProfileTaskLog!]!# query all task profiled segment listgetProfileTaskSegmentList(taskID:String):[BasicTrace!]!# query profiled segmentgetProfiledSegment(segmentId:String):ProfiledSegment# analyze profiled segment, start and end time use timestamp(millisecond)getProfileAnalyze(segmentId:String!,timeRanges:[ProfileAnalyzeTimeRange!]!):ProfileAnalyzation!}Out-process profiling extendtypeMutation{# create a new eBPF fixed time profiling taskcreateEBPFProfilingFixedTimeTask(request:EBPFProfilingTaskFixedTimeCreationRequest!):EBPFProfilingTaskCreationResult!# create a new eBPF network profiling taskcreateEBPFNetworkProfiling(request:EBPFProfilingNetworkTaskRequest!):EBPFProfilingTaskCreationResult!# keep alive the eBPF profiling taskkeepEBPFNetworkProfiling(taskId:ID!):EBPFNetworkKeepProfilingResult!}extendtypeQuery{# query eBPF profiling data for prepare create taskqueryPrepareCreateEBPFProfilingTaskData(serviceId:ID!):EBPFProfilingTaskPrepare!# query eBPF profiling task listqueryEBPFProfilingTasks(serviceId:ID,serviceInstanceId:ID,targets:[EBPFProfilingTargetType!]):[EBPFProfilingTask!]!# query schedules from profiling taskqueryEBPFProfilingSchedules(taskId:ID!):[EBPFProfilingSchedule!]!# analyze the profiling schedule# aggregateType is \u0026#34;EBPFProfilingAnalyzeAggregateType#COUNT\u0026#34; as default. analysisEBPFProfilingResult(scheduleIdList:[ID!]!,timeRanges:[EBPFProfilingAnalyzeTimeRange!]!,aggregateType:EBPFProfilingAnalyzeAggregateType):EBPFProfilingAnalyzation!}Condition Duration Duration is a widely used parameter type as the APM data is time-related. See the following for more details. Step relates to precision.\n# The Duration defines the start and end time for each query operation.# Fields: `start` and `end`# represents the time span. And each of them matches the step.# ref https://www.ietf.org/rfc/rfc3339.txt# The time formats are# `SECOND` step: yyyy-MM-dd HHmmss# `MINUTE` step: yyyy-MM-dd HHmm# `HOUR` step: yyyy-MM-dd HH# `DAY` step: yyyy-MM-dd# `MONTH` step: yyyy-MM# Field: `step`# represents the accurate time point.# e.g.# if step==HOUR , start=2017-11-08 09, end=2017-11-08 19# then# metrics from the following time points expected# 2017-11-08 9:00 -\u0026gt; 2017-11-08 19:00# there are 11 time points (hours) in the time span.inputDuration{start:String!end:String!step:Step!}enumStep{MONTHDAYHOURMINUTESECOND}","excerpt":"Query Protocol Query Protocol defines a set of APIs in GraphQL grammar to provide data query and …","ref":"/docs/main/latest/en/api/query-protocol/","title":"Query Protocol"},{"body":"Query Protocol Query Protocol defines a set of APIs in GraphQL grammar to provide data query and interactive capabilities with SkyWalking native visualization tool or 3rd party system, including Web UI, CLI or private system.\nQuery protocol official repository, https://github.com/apache/skywalking-query-protocol.\nAll deprecated APIs are moved here.\nMetadata Metadata contains concise information on all services and their instances, endpoints, etc. under monitoring. You may query the metadata in different ways.\nV2 APIs Provide Metadata V2 query APIs since 9.0.0, including Layer concept.\nextendtypeQuery{# Read all available layers# UI could use this list to determine available dashboards/panels# The available layers would change with time in the runtime, because new service could be detected in any time.# This list should be loaded periodically.listLayers:[String!]!# Read the service list according to layer.listServices(layer:String):[Service!]!# Find service according to given ID. Return null if not existing.getService(serviceId:String!):Service# Search and find service according to given name. Return null if not existing.findService(serviceName:String!):Service# Read service instance list.listInstances(duration:Duration!,serviceId:ID!):[ServiceInstance!]!# Search and find service instance according to given ID. Return null if not existing.getInstance(instanceId:String!):ServiceInstance# Search and find matched endpoints according to given service and keyword(optional)# If no keyword, randomly choose endpoint based on `limit` value.findEndpoint(keyword:String,serviceId:ID!,limit:Int!):[Endpoint!]!getEndpointInfo(endpointId:ID!):EndpointInfo# Read process list.listProcesses(duration:Duration!,instanceId:ID!):[Process!]!# Find process according to given ID. Return null if not existing.getProcess(processId:ID!):Process# Get the number of matched processes through serviceId, labels# Labels: the matched process should contain all labels## The return is not a precise number, the process has its lifecycle, as it reboots and shutdowns with time.# The return number just gives an abstract of the scale of profiling that would be applied.estimateProcessScale(serviceId:ID!,labels:[String!]!):Long!getTimeInfo:TimeInfo}Topology The topology and dependency graphs among services, instances and endpoints. Includes direct relationships or global maps.\nextendtypeQuery{# Query the global topology# When layer is specified, the topology of this layer would be queriedgetGlobalTopology(duration:Duration!,layer:String):Topology# Query the topology, based on the given servicegetServiceTopology(serviceId:ID!,duration:Duration!):Topology# Query the topology, based on the given services.# `#getServiceTopology` could be replaced by this.getServicesTopology(serviceIds:[ID!]!,duration:Duration!):Topology# Query the instance topology, based on the given clientServiceId and serverServiceIdgetServiceInstanceTopology(clientServiceId:ID!,serverServiceId:ID!,duration:Duration!):ServiceInstanceTopology# Query the topology, based on the given endpointgetEndpointTopology(endpointId:ID!,duration:Duration!):Topology# v2 of getEndpointTopologygetEndpointDependencies(endpointId:ID!,duration:Duration!):EndpointTopology# Query the topology, based on the given instancegetProcessTopology(serviceInstanceId:ID!,duration:Duration!):ProcessTopology}Metrics Metrics query targets all objects defined in OAL script and MAL.\nV3 APIs Provide Metrics V3 query APIs since 9.5.0, including metadata and MQE. SkyWalking Metrics Query Expression(MQE) is an extension query mechanism. MQE allows users to do simple query-stage calculation like well known PromQL through GraphQL. The expression\u0026rsquo;s syntax can refer to here.\nextendtypeQuery{# Metrics definition metadata query. Response the metrics type which determines the suitable query methods.typeOfMetrics(name:String!):MetricsType!# Get the list of all available metrics in the current OAP server.# Param, regex, could be used to filter the metrics by name.listMetrics(regex:String):[MetricDefinition!]!execExpression(expression:String!,entity:Entity!,duration:Duration!):ExpressionResult!}typeExpressionResult{type:ExpressionResultType!# When the type == TIME_SERIES_VALUES, the results would be a collection of MQEValues.# In other legal type cases, only one MQEValues is expected in the array.results:[MQEValues!]!# When type == ExpressionResultType.UNKNOWN,# the error message includes the expression resolving errors.error:String}enumExpressionResultType{# Can\u0026#39;t resolve the type of the given expression.UNKNOWN# A single valueSINGLE_VALUE# A collection of time-series values.# The value could have labels or not.TIME_SERIES_VALUES# A collection of aggregated values through metric sort functionSORTED_LIST# A collection of sampled records.# When the original metric type is sampled recordsRECORD_LIST}Logs extendtypeQuery{# Return true if the current storage implementation supports fuzzy query for logs.supportQueryLogsByKeywords:Boolean!queryLogs(condition:LogQueryCondition):Logs# Test the logs and get the results of the LAL output.test(requests:LogTestRequest!):LogTestResponse!# Read the list of searchable keysqueryLogTagAutocompleteKeys(duration:Duration!):[String!]# Search the available value options of the given key.queryLogTagAutocompleteValues(tagKey:String!,duration:Duration!):[String!]}Log implementations vary between different database options. Some search engines like ElasticSearch and OpenSearch can support full log text fuzzy queries, while others do not due to considerations related to performance impact and end user experience.\ntest API serves as the debugging tool for native LAL parsing.\nTrace extendtypeQuery{# Search segment list with given conditionsqueryBasicTraces(condition:TraceQueryCondition):TraceBrief# Read the specific trace ID with given trace IDqueryTrace(traceId:ID!):Trace# Read the list of searchable keysqueryTraceTagAutocompleteKeys(duration:Duration!):[String!]# Search the available value options of the given key.queryTraceTagAutocompleteValues(tagKey:String!,duration:Duration!):[String!]}Trace query fetches trace segment lists and spans of given trace IDs.\nAlarm extendtypeQuery{getAlarmTrend(duration:Duration!):AlarmTrend!getAlarm(duration:Duration!,scope:Scope,keyword:String,paging:Pagination!,tags:[AlarmTag]):Alarms}Alarm query identifies alarms and related events.\nEvent extendtypeQuery{queryEvents(condition:EventQueryCondition):Events}Event query fetches the event list based on given sources and time range conditions.\nProfiling SkyWalking offers two types of profiling, in-process and out-process, allowing users to create tasks and check their execution status.\nIn-process profiling extendtypeMutation{# crate new profile taskcreateProfileTask(creationRequest:ProfileTaskCreationRequest):ProfileTaskCreationResult!}extendtypeQuery{# query all task list, order by ProfileTask#startTime descendinggetProfileTaskList(serviceId:ID,endpointName:String):[ProfileTask!]!# query all task logsgetProfileTaskLogs(taskID:String):[ProfileTaskLog!]!# query all task profiled segment listgetProfileTaskSegments(taskID:ID!):[ProfiledTraceSegments!]!# analyze multiple profiled segments, start and end time use timestamp(millisecond)getSegmentsProfileAnalyze(queries:[SegmentProfileAnalyzeQuery!]!):ProfileAnalyzation!}Out-process profiling extendtypeMutation{# create a new eBPF fixed time profiling taskcreateEBPFProfilingFixedTimeTask(request:EBPFProfilingTaskFixedTimeCreationRequest!):EBPFProfilingTaskCreationResult!# create a new eBPF network profiling taskcreateEBPFNetworkProfiling(request:EBPFProfilingNetworkTaskRequest!):EBPFProfilingTaskCreationResult!# keep alive the eBPF profiling taskkeepEBPFNetworkProfiling(taskId:ID!):EBPFNetworkKeepProfilingResult!}extendtypeQuery{# query eBPF profiling data for prepare create taskqueryPrepareCreateEBPFProfilingTaskData(serviceId:ID!):EBPFProfilingTaskPrepare!# query eBPF profiling task list# query `triggerType == FIXED_TIME` when triggerType is absentqueryEBPFProfilingTasks(serviceId:ID,serviceInstanceId:ID,targets:[EBPFProfilingTargetType!],triggerType:EBPFProfilingTriggerType,duration:Duration):[EBPFProfilingTask!]!# query schedules from profiling taskqueryEBPFProfilingSchedules(taskId:ID!):[EBPFProfilingSchedule!]!# analyze the profiling schedule# aggregateType is \u0026#34;EBPFProfilingAnalyzeAggregateType#COUNT\u0026#34; as default. analysisEBPFProfilingResult(scheduleIdList:[ID!]!,timeRanges:[EBPFProfilingAnalyzeTimeRange!]!,aggregateType:EBPFProfilingAnalyzeAggregateType):EBPFProfilingAnalyzation!}On-Demand Pod Logs Provide APIs to query on-demand pod logs since 9.1.0.\nextendtypeQuery{listContainers(condition:OndemandContainergQueryCondition):PodContainersondemandPodLogs(condition:OndemandLogQueryCondition):Logs}Hierarchy Provide Hierarchy query APIs since 10.0.0, including service and instance hierarchy.\nextendtypeQuery{# Query the service hierarchy, based on the given service. Will recursively return all related layers services in the hierarchy.getServiceHierarchy(serviceId:ID!,layer:String!):ServiceHierarchy!# Query the instance hierarchy, based on the given instance. Will return all direct related layers instances in the hierarchy, no recursive.getInstanceHierarchy(instanceId:ID!,layer:String!):InstanceHierarchy!# List layer hierarchy levels. The layer levels are defined in the `hierarchy-definition.yml`.listLayerLevels:[LayerLevel!]!}Condition Duration Duration is a widely used parameter type as the APM data is time-related. See the following for more details. Step relates to precision.\n# The Duration defines the start and end time for each query operation.# Fields: `start` and `end`# represents the time span. And each of them matches the step.# ref https://www.ietf.org/rfc/rfc3339.txt# The time formats are# `SECOND` step: yyyy-MM-dd HHmmss# `MINUTE` step: yyyy-MM-dd HHmm# `HOUR` step: yyyy-MM-dd HH# `DAY` step: yyyy-MM-dd# `MONTH` step: yyyy-MM# Field: `step`# represents the accurate time point.# e.g.# if step==HOUR , start=2017-11-08 09, end=2017-11-08 19# then# metrics from the following time points expected# 2017-11-08 9:00 -\u0026gt; 2017-11-08 19:00# there are 11 time points (hours) in the time span.inputDuration{start:String!end:String!step:Step!}enumStep{MONTHDAYHOURMINUTESECOND}","excerpt":"Query Protocol Query Protocol defines a set of APIs in GraphQL grammar to provide data query and …","ref":"/docs/main/next/en/api/query-protocol/","title":"Query Protocol"},{"body":"Query Protocol Query Protocol defines a set of APIs in GraphQL grammar to provide data query and interactive capabilities with SkyWalking native visualization tool or 3rd party system, including Web UI, CLI or private system.\nQuery protocol official repository, https://github.com/apache/skywalking-query-protocol.\nMetadata Metadata contains concise information on all services and their instances, endpoints, etc. under monitoring. You may query the metadata in different ways.\nextendtypeQuery{# Normal service related meta info getAllServices(duration:Duration!,group:String):[Service!]!searchServices(duration:Duration!,keyword:String!):[Service!]!searchService(serviceCode:String!):Service# Fetch all services of Browser typegetAllBrowserServices(duration:Duration!):[Service!]!searchBrowserServices(duration:Duration!,keyword:String!):[Service!]!searchBrowserService(serviceCode:String!):Service# Service instance querygetServiceInstances(duration:Duration!,serviceId:ID!):[ServiceInstance!]!# Endpoint query# Consider there are huge numbers of endpoint,# must use endpoint owner\u0026#39;s service id, keyword and limit filter to do query.searchEndpoint(keyword:String!,serviceId:ID!,limit:Int!):[Endpoint!]!getEndpointInfo(endpointId:ID!):EndpointInfo# Database related meta info.getAllDatabases(duration:Duration!):[Database!]!getTimeInfo:TimeInfo}Topology The topology and dependency graphs among services, instances and endpoints. Includes direct relationships or global maps.\nextendtypeQuery{# Query the global topologygetGlobalTopology(duration:Duration!):Topology# Query the topology, based on the given servicegetServiceTopology(serviceId:ID!,duration:Duration!):Topology# Query the topology, based on the given services.# `#getServiceTopology` could be replaced by this.getServicesTopology(serviceIds:[ID!]!,duration:Duration!):Topology# Query the instance topology, based on the given clientServiceId and serverServiceIdgetServiceInstanceTopology(clientServiceId:ID!,serverServiceId:ID!,duration:Duration!):ServiceInstanceTopology# Query the topology, based on the given endpointgetEndpointTopology(endpointId:ID!,duration:Duration!):Topology# v2 of getEndpointTopologygetEndpointDependencies(endpointId:ID!,duration:Duration!):EndpointTopology}Metrics Metrics query targets all objects defined in OAL script and MAL. You may obtain the metrics data in linear or thermodynamic matrix formats based on the aggregation functions in script.\nV2 APIs Provide Metrics V2 query APIs since 8.0.0, including metadata, single/multiple values, heatmap, and sampled records metrics.\nextendtypeQuery{# Metrics definition metadata query. Response the metrics type which determines the suitable query methods.typeOfMetrics(name:String!):MetricsType!# Get the list of all available metrics in the current OAP server.# Param, regex, could be used to filter the metrics by name.listMetrics(regex:String):[MetricDefinition!]!# Read metrics single value in the duration of required metricsreadMetricsValue(condition:MetricsCondition!,duration:Duration!):Long!# Read time-series values in the duration of required metricsreadMetricsValues(condition:MetricsCondition!,duration:Duration!):MetricsValues!# Read entity list of required metrics and parent entity type.sortMetrics(condition:TopNCondition!,duration:Duration!):[SelectedRecord!]!# Read value in the given time duration, usually as a linear.# labels: the labels you need to query.readLabeledMetricsValues(condition:MetricsCondition!,labels:[String!]!,duration:Duration!):[MetricsValues!]!# Heatmap is bucket based value statistic result.readHeatMap(condition:MetricsCondition!,duration:Duration!):HeatMap# Read the sampled records# TopNCondition#scope is not required.readSampledRecords(condition:TopNCondition!,duration:Duration!):[SelectedRecord!]!}V1 APIs 3 types of metrics can be queried. V1 APIs were introduced since 6.x. Now they are a shell to V2 APIs.\n Single value. Most default metrics are in single value. getValues and getLinearIntValues are suitable for this purpose. Multiple value. A metric defined in OAL includes multiple value calculations. Use getMultipleLinearIntValues to obtain all values. percentile is a typical multiple value function in OAL. Heatmap value. Read Heatmap in WIKI for details. thermodynamic is the only OAL function. Use getThermodynamic to get the values.  extendtypeQuery{getValues(metric:BatchMetricConditions!,duration:Duration!):IntValuesgetLinearIntValues(metric:MetricCondition!,duration:Duration!):IntValues# Query the type of metrics including multiple values, and format them as multiple linears.# The seq of these multiple lines base on the calculation func in OAL# Such as, should us this to query the result of func percentile(50,75,90,95,99) in OAL,# then five lines will be responsed, p50 is the first element of return value.getMultipleLinearIntValues(metric:MetricCondition!,numOfLinear:Int!,duration:Duration!):[IntValues!]!getThermodynamic(metric:MetricCondition!,duration:Duration!):Thermodynamic}Metrics are defined in the config/oal/*.oal files.\nAggregation Aggregation query means that the metrics data need a secondary aggregation at query stage, which causes the query interfaces to have some different arguments. A typical example of aggregation query is the TopN list of services. Metrics stream aggregation simply calculates the metrics values of each service, but the expected list requires ordering metrics data by their values.\nAggregation query is for single value metrics only.\n# The aggregation query is different with the metric query.# All aggregation queries require backend or/and storage do aggregation in query time.extendtypeQuery{# TopN is an aggregation query.getServiceTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getAllServiceInstanceTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getServiceInstanceTopN(serviceId:ID!,name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getAllEndpointTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getEndpointTopN(serviceId:ID!,name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!}Logs extendtypeQuery{# Return true if the current storage implementation supports fuzzy query for logs.supportQueryLogsByKeywords:Boolean!queryLogs(condition:LogQueryCondition):Logs# Test the logs and get the results of the LAL output.test(requests:LogTestRequest!):LogTestResponse!}Log implementations vary between different database options. Some search engines like ElasticSearch and OpenSearch can support full log text fuzzy queries, while others do not due to considerations related to performance impact and end user experience.\ntest API serves as the debugging tool for native LAL parsing.\nTrace extendtypeQuery{queryBasicTraces(condition:TraceQueryCondition):TraceBriefqueryTrace(traceId:ID!):Trace}Trace query fetches trace segment lists and spans of given trace IDs.\nAlarm extendtypeQuery{getAlarmTrend(duration:Duration!):AlarmTrend!getAlarm(duration:Duration!,scope:Scope,keyword:String,paging:Pagination!,tags:[AlarmTag]):Alarms}Alarm query identifies alarms and related events.\nEvent extendtypeQuery{queryEvents(condition:EventQueryCondition):Events}Event query fetches the event list based on given sources and time range conditions.\nCondition Duration Duration is a widely used parameter type as the APM data is time-related. See the following for more details. Step relates to precision.\n# The Duration defines the start and end time for each query operation.# Fields: `start` and `end`# represents the time span. And each of them matches the step.# ref https://www.ietf.org/rfc/rfc3339.txt# The time formats are# `SECOND` step: yyyy-MM-dd HHmmss# `MINUTE` step: yyyy-MM-dd HHmm# `HOUR` step: yyyy-MM-dd HH# `DAY` step: yyyy-MM-dd# `MONTH` step: yyyy-MM# Field: `step`# represents the accurate time point.# e.g.# if step==HOUR , start=2017-11-08 09, end=2017-11-08 19# then# metrics from the following time points expected# 2017-11-08 9:00 -\u0026gt; 2017-11-08 19:00# there are 11 time points (hours) in the time span.inputDuration{start:String!end:String!step:Step!}enumStep{MONTHDAYHOURMINUTESECOND}","excerpt":"Query Protocol Query Protocol defines a set of APIs in GraphQL grammar to provide data query and …","ref":"/docs/main/v9.0.0/en/protocols/query-protocol/","title":"Query Protocol"},{"body":"Query Protocol Query Protocol defines a set of APIs in GraphQL grammar to provide data query and interactive capabilities with SkyWalking native visualization tool or 3rd party system, including Web UI, CLI or private system.\nQuery protocol official repository, https://github.com/apache/skywalking-query-protocol.\nMetadata Metadata contains concise information on all services and their instances, endpoints, etc. under monitoring. You may query the metadata in different ways.\nextendtypeQuery{# Normal service related meta info getAllServices(duration:Duration!,group:String):[Service!]!searchServices(duration:Duration!,keyword:String!):[Service!]!searchService(serviceCode:String!):Service# Fetch all services of Browser typegetAllBrowserServices(duration:Duration!):[Service!]!searchBrowserServices(duration:Duration!,keyword:String!):[Service!]!searchBrowserService(serviceCode:String!):Service# Service instance querygetServiceInstances(duration:Duration!,serviceId:ID!):[ServiceInstance!]!# Endpoint query# Consider there are huge numbers of endpoint,# must use endpoint owner\u0026#39;s service id, keyword and limit filter to do query.searchEndpoint(keyword:String!,serviceId:ID!,limit:Int!):[Endpoint!]!getEndpointInfo(endpointId:ID!):EndpointInfo# Database related meta info.getAllDatabases(duration:Duration!):[Database!]!getTimeInfo:TimeInfo}Topology The topology and dependency graphs among services, instances and endpoints. Includes direct relationships or global maps.\nextendtypeQuery{# Query the global topologygetGlobalTopology(duration:Duration!):Topology# Query the topology, based on the given servicegetServiceTopology(serviceId:ID!,duration:Duration!):Topology# Query the topology, based on the given services.# `#getServiceTopology` could be replaced by this.getServicesTopology(serviceIds:[ID!]!,duration:Duration!):Topology# Query the instance topology, based on the given clientServiceId and serverServiceIdgetServiceInstanceTopology(clientServiceId:ID!,serverServiceId:ID!,duration:Duration!):ServiceInstanceTopology# Query the topology, based on the given endpointgetEndpointTopology(endpointId:ID!,duration:Duration!):Topology# v2 of getEndpointTopologygetEndpointDependencies(endpointId:ID!,duration:Duration!):EndpointTopology}Metrics Metrics query targets all objects defined in OAL script and MAL. You may obtain the metrics data in linear or thermodynamic matrix formats based on the aggregation functions in script.\nV2 APIs Provide Metrics V2 query APIs since 8.0.0, including metadata, single/multiple values, heatmap, and sampled records metrics.\nextendtypeQuery{# Metrics definition metadata query. Response the metrics type which determines the suitable query methods.typeOfMetrics(name:String!):MetricsType!# Get the list of all available metrics in the current OAP server.# Param, regex, could be used to filter the metrics by name.listMetrics(regex:String):[MetricDefinition!]!# Read metrics single value in the duration of required metricsreadMetricsValue(condition:MetricsCondition!,duration:Duration!):Long!# Read time-series values in the duration of required metricsreadMetricsValues(condition:MetricsCondition!,duration:Duration!):MetricsValues!# Read entity list of required metrics and parent entity type.sortMetrics(condition:TopNCondition!,duration:Duration!):[SelectedRecord!]!# Read value in the given time duration, usually as a linear.# labels: the labels you need to query.readLabeledMetricsValues(condition:MetricsCondition!,labels:[String!]!,duration:Duration!):[MetricsValues!]!# Heatmap is bucket based value statistic result.readHeatMap(condition:MetricsCondition!,duration:Duration!):HeatMap# Read the sampled records# TopNCondition#scope is not required.readSampledRecords(condition:TopNCondition!,duration:Duration!):[SelectedRecord!]!}V1 APIs 3 types of metrics can be queried. V1 APIs were introduced since 6.x. Now they are a shell to V2 APIs.\n Single value. Most default metrics are in single value. getValues and getLinearIntValues are suitable for this purpose. Multiple value. A metric defined in OAL includes multiple value calculations. Use getMultipleLinearIntValues to obtain all values. percentile is a typical multiple value function in OAL. Heatmap value. Read Heatmap in WIKI for details. thermodynamic is the only OAL function. Use getThermodynamic to get the values.  extendtypeQuery{getValues(metric:BatchMetricConditions!,duration:Duration!):IntValuesgetLinearIntValues(metric:MetricCondition!,duration:Duration!):IntValues# Query the type of metrics including multiple values, and format them as multiple linears.# The seq of these multiple lines base on the calculation func in OAL# Such as, should us this to query the result of func percentile(50,75,90,95,99) in OAL,# then five lines will be responsed, p50 is the first element of return value.getMultipleLinearIntValues(metric:MetricCondition!,numOfLinear:Int!,duration:Duration!):[IntValues!]!getThermodynamic(metric:MetricCondition!,duration:Duration!):Thermodynamic}Metrics are defined in the config/oal/*.oal files.\nAggregation Aggregation query means that the metrics data need a secondary aggregation at query stage, which causes the query interfaces to have some different arguments. A typical example of aggregation query is the TopN list of services. Metrics stream aggregation simply calculates the metrics values of each service, but the expected list requires ordering metrics data by their values.\nAggregation query is for single value metrics only.\n# The aggregation query is different with the metric query.# All aggregation queries require backend or/and storage do aggregation in query time.extendtypeQuery{# TopN is an aggregation query.getServiceTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getAllServiceInstanceTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getServiceInstanceTopN(serviceId:ID!,name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getAllEndpointTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getEndpointTopN(serviceId:ID!,name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!}Logs extendtypeQuery{# Return true if the current storage implementation supports fuzzy query for logs.supportQueryLogsByKeywords:Boolean!queryLogs(condition:LogQueryCondition):Logs# Test the logs and get the results of the LAL output.test(requests:LogTestRequest!):LogTestResponse!}Log implementations vary between different database options. Some search engines like ElasticSearch and OpenSearch can support full log text fuzzy queries, while others do not due to considerations related to performance impact and end user experience.\ntest API serves as the debugging tool for native LAL parsing.\nTrace extendtypeQuery{queryBasicTraces(condition:TraceQueryCondition):TraceBriefqueryTrace(traceId:ID!):Trace}Trace query fetches trace segment lists and spans of given trace IDs.\nAlarm extendtypeQuery{getAlarmTrend(duration:Duration!):AlarmTrend!getAlarm(duration:Duration!,scope:Scope,keyword:String,paging:Pagination!,tags:[AlarmTag]):Alarms}Alarm query identifies alarms and related events.\nEvent extendtypeQuery{queryEvents(condition:EventQueryCondition):Events}Event query fetches the event list based on given sources and time range conditions.\nCondition Duration Duration is a widely used parameter type as the APM data is time-related. See the following for more details. Step relates to precision.\n# The Duration defines the start and end time for each query operation.# Fields: `start` and `end`# represents the time span. And each of them matches the step.# ref https://www.ietf.org/rfc/rfc3339.txt# The time formats are# `SECOND` step: yyyy-MM-dd HHmmss# `MINUTE` step: yyyy-MM-dd HHmm# `HOUR` step: yyyy-MM-dd HH# `DAY` step: yyyy-MM-dd# `MONTH` step: yyyy-MM# Field: `step`# represents the accurate time point.# e.g.# if step==HOUR , start=2017-11-08 09, end=2017-11-08 19# then# metrics from the following time points expected# 2017-11-08 9:00 -\u0026gt; 2017-11-08 19:00# there are 11 time points (hours) in the time span.inputDuration{start:String!end:String!step:Step!}enumStep{MONTHDAYHOURMINUTESECOND}","excerpt":"Query Protocol Query Protocol defines a set of APIs in GraphQL grammar to provide data query and …","ref":"/docs/main/v9.1.0/en/protocols/query-protocol/","title":"Query Protocol"},{"body":"Query Protocol Query Protocol defines a set of APIs in GraphQL grammar to provide data query and interactive capabilities with SkyWalking native visualization tool or 3rd party system, including Web UI, CLI or private system.\nQuery protocol official repository, https://github.com/apache/skywalking-query-protocol.\nMetadata Metadata contains concise information on all services and their instances, endpoints, etc. under monitoring. You may query the metadata in different ways.\nextendtypeQuery{# Normal service related meta info getAllServices(duration:Duration!,group:String):[Service!]!searchServices(duration:Duration!,keyword:String!):[Service!]!searchService(serviceCode:String!):Service# Fetch all services of Browser typegetAllBrowserServices(duration:Duration!):[Service!]!searchBrowserServices(duration:Duration!,keyword:String!):[Service!]!searchBrowserService(serviceCode:String!):Service# Service instance querygetServiceInstances(duration:Duration!,serviceId:ID!):[ServiceInstance!]!# Endpoint query# Consider there are huge numbers of endpoint,# must use endpoint owner\u0026#39;s service id, keyword and limit filter to do query.searchEndpoint(keyword:String!,serviceId:ID!,limit:Int!):[Endpoint!]!getEndpointInfo(endpointId:ID!):EndpointInfo# Database related meta info.getAllDatabases(duration:Duration!):[Database!]!getTimeInfo:TimeInfo}Topology The topology and dependency graphs among services, instances and endpoints. Includes direct relationships or global maps.\nextendtypeQuery{# Query the global topologygetGlobalTopology(duration:Duration!):Topology# Query the topology, based on the given servicegetServiceTopology(serviceId:ID!,duration:Duration!):Topology# Query the topology, based on the given services.# `#getServiceTopology` could be replaced by this.getServicesTopology(serviceIds:[ID!]!,duration:Duration!):Topology# Query the instance topology, based on the given clientServiceId and serverServiceIdgetServiceInstanceTopology(clientServiceId:ID!,serverServiceId:ID!,duration:Duration!):ServiceInstanceTopology# Query the topology, based on the given endpointgetEndpointTopology(endpointId:ID!,duration:Duration!):Topology# v2 of getEndpointTopologygetEndpointDependencies(endpointId:ID!,duration:Duration!):EndpointTopology}Metrics Metrics query targets all objects defined in OAL script and MAL. You may obtain the metrics data in linear or thermodynamic matrix formats based on the aggregation functions in script.\nV2 APIs Provide Metrics V2 query APIs since 8.0.0, including metadata, single/multiple values, heatmap, and sampled records metrics.\nextendtypeQuery{# Metrics definition metadata query. Response the metrics type which determines the suitable query methods.typeOfMetrics(name:String!):MetricsType!# Get the list of all available metrics in the current OAP server.# Param, regex, could be used to filter the metrics by name.listMetrics(regex:String):[MetricDefinition!]!# Read metrics single value in the duration of required metricsreadMetricsValue(condition:MetricsCondition!,duration:Duration!):Long!# Read time-series values in the duration of required metricsreadMetricsValues(condition:MetricsCondition!,duration:Duration!):MetricsValues!# Read entity list of required metrics and parent entity type.sortMetrics(condition:TopNCondition!,duration:Duration!):[SelectedRecord!]!# Read value in the given time duration, usually as a linear.# labels: the labels you need to query.readLabeledMetricsValues(condition:MetricsCondition!,labels:[String!]!,duration:Duration!):[MetricsValues!]!# Heatmap is bucket based value statistic result.readHeatMap(condition:MetricsCondition!,duration:Duration!):HeatMap# Read the sampled records# TopNCondition#scope is not required.readSampledRecords(condition:TopNCondition!,duration:Duration!):[SelectedRecord!]!}V1 APIs 3 types of metrics can be queried. V1 APIs were introduced since 6.x. Now they are a shell to V2 APIs.\n Single value. Most default metrics are in single value. getValues and getLinearIntValues are suitable for this purpose. Multiple value. A metric defined in OAL includes multiple value calculations. Use getMultipleLinearIntValues to obtain all values. percentile is a typical multiple value function in OAL. Heatmap value. Read Heatmap in WIKI for details. thermodynamic is the only OAL function. Use getThermodynamic to get the values.  extendtypeQuery{getValues(metric:BatchMetricConditions!,duration:Duration!):IntValuesgetLinearIntValues(metric:MetricCondition!,duration:Duration!):IntValues# Query the type of metrics including multiple values, and format them as multiple linears.# The seq of these multiple lines base on the calculation func in OAL# Such as, should us this to query the result of func percentile(50,75,90,95,99) in OAL,# then five lines will be responsed, p50 is the first element of return value.getMultipleLinearIntValues(metric:MetricCondition!,numOfLinear:Int!,duration:Duration!):[IntValues!]!getThermodynamic(metric:MetricCondition!,duration:Duration!):Thermodynamic}Metrics are defined in the config/oal/*.oal files.\nAggregation Aggregation query means that the metrics data need a secondary aggregation at query stage, which causes the query interfaces to have some different arguments. A typical example of aggregation query is the TopN list of services. Metrics stream aggregation simply calculates the metrics values of each service, but the expected list requires ordering metrics data by their values.\nAggregation query is for single value metrics only.\n# The aggregation query is different with the metric query.# All aggregation queries require backend or/and storage do aggregation in query time.extendtypeQuery{# TopN is an aggregation query.getServiceTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getAllServiceInstanceTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getServiceInstanceTopN(serviceId:ID!,name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getAllEndpointTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getEndpointTopN(serviceId:ID!,name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!}Logs extendtypeQuery{# Return true if the current storage implementation supports fuzzy query for logs.supportQueryLogsByKeywords:Boolean!queryLogs(condition:LogQueryCondition):Logs# Test the logs and get the results of the LAL output.test(requests:LogTestRequest!):LogTestResponse!}Log implementations vary between different database options. Some search engines like ElasticSearch and OpenSearch can support full log text fuzzy queries, while others do not due to considerations related to performance impact and end user experience.\ntest API serves as the debugging tool for native LAL parsing.\nTrace extendtypeQuery{queryBasicTraces(condition:TraceQueryCondition):TraceBriefqueryTrace(traceId:ID!):Trace}Trace query fetches trace segment lists and spans of given trace IDs.\nAlarm extendtypeQuery{getAlarmTrend(duration:Duration!):AlarmTrend!getAlarm(duration:Duration!,scope:Scope,keyword:String,paging:Pagination!,tags:[AlarmTag]):Alarms}Alarm query identifies alarms and related events.\nEvent extendtypeQuery{queryEvents(condition:EventQueryCondition):Events}Event query fetches the event list based on given sources and time range conditions.\nCondition Duration Duration is a widely used parameter type as the APM data is time-related. See the following for more details. Step relates to precision.\n# The Duration defines the start and end time for each query operation.# Fields: `start` and `end`# represents the time span. And each of them matches the step.# ref https://www.ietf.org/rfc/rfc3339.txt# The time formats are# `SECOND` step: yyyy-MM-dd HHmmss# `MINUTE` step: yyyy-MM-dd HHmm# `HOUR` step: yyyy-MM-dd HH# `DAY` step: yyyy-MM-dd# `MONTH` step: yyyy-MM# Field: `step`# represents the accurate time point.# e.g.# if step==HOUR , start=2017-11-08 09, end=2017-11-08 19# then# metrics from the following time points expected# 2017-11-08 9:00 -\u0026gt; 2017-11-08 19:00# there are 11 time points (hours) in the time span.inputDuration{start:String!end:String!step:Step!}enumStep{MONTHDAYHOURMINUTESECOND}","excerpt":"Query Protocol Query Protocol defines a set of APIs in GraphQL grammar to provide data query and …","ref":"/docs/main/v9.2.0/en/protocols/query-protocol/","title":"Query Protocol"},{"body":"Query Protocol Query Protocol defines a set of APIs in GraphQL grammar to provide data query and interactive capabilities with SkyWalking native visualization tool or 3rd party system, including Web UI, CLI or private system.\nQuery protocol official repository, https://github.com/apache/skywalking-query-protocol.\nMetadata Metadata contains concise information on all services and their instances, endpoints, etc. under monitoring. You may query the metadata in different ways.\nextendtypeQuery{# Normal service related meta info getAllServices(duration:Duration!,group:String):[Service!]!searchServices(duration:Duration!,keyword:String!):[Service!]!searchService(serviceCode:String!):Service# Fetch all services of Browser typegetAllBrowserServices(duration:Duration!):[Service!]!searchBrowserServices(duration:Duration!,keyword:String!):[Service!]!searchBrowserService(serviceCode:String!):Service# Service instance querygetServiceInstances(duration:Duration!,serviceId:ID!):[ServiceInstance!]!# Endpoint query# Consider there are huge numbers of endpoint,# must use endpoint owner\u0026#39;s service id, keyword and limit filter to do query.searchEndpoint(keyword:String!,serviceId:ID!,limit:Int!):[Endpoint!]!getEndpointInfo(endpointId:ID!):EndpointInfo# Database related meta info.getAllDatabases(duration:Duration!):[Database!]!getTimeInfo:TimeInfo}Topology The topology and dependency graphs among services, instances and endpoints. Includes direct relationships or global maps.\nextendtypeQuery{# Query the global topologygetGlobalTopology(duration:Duration!):Topology# Query the topology, based on the given servicegetServiceTopology(serviceId:ID!,duration:Duration!):Topology# Query the topology, based on the given services.# `#getServiceTopology` could be replaced by this.getServicesTopology(serviceIds:[ID!]!,duration:Duration!):Topology# Query the instance topology, based on the given clientServiceId and serverServiceIdgetServiceInstanceTopology(clientServiceId:ID!,serverServiceId:ID!,duration:Duration!):ServiceInstanceTopology# Query the topology, based on the given endpointgetEndpointTopology(endpointId:ID!,duration:Duration!):Topology# v2 of getEndpointTopologygetEndpointDependencies(endpointId:ID!,duration:Duration!):EndpointTopology}Metrics Metrics query targets all objects defined in OAL script and MAL. You may obtain the metrics data in linear or thermodynamic matrix formats based on the aggregation functions in script.\nV2 APIs Provide Metrics V2 query APIs since 8.0.0, including metadata, single/multiple values, heatmap, and sampled records metrics.\nextendtypeQuery{# Metrics definition metadata query. Response the metrics type which determines the suitable query methods.typeOfMetrics(name:String!):MetricsType!# Get the list of all available metrics in the current OAP server.# Param, regex, could be used to filter the metrics by name.listMetrics(regex:String):[MetricDefinition!]!# Read metrics single value in the duration of required metricsreadMetricsValue(condition:MetricsCondition!,duration:Duration!):Long!# Read time-series values in the duration of required metricsreadMetricsValues(condition:MetricsCondition!,duration:Duration!):MetricsValues!# Read entity list of required metrics and parent entity type.sortMetrics(condition:TopNCondition!,duration:Duration!):[SelectedRecord!]!# Read value in the given time duration, usually as a linear.# labels: the labels you need to query.readLabeledMetricsValues(condition:MetricsCondition!,labels:[String!]!,duration:Duration!):[MetricsValues!]!# Heatmap is bucket based value statistic result.readHeatMap(condition:MetricsCondition!,duration:Duration!):HeatMap# Deprecated since 9.3.0, replaced by readRecords defined in record.graphqls# Read the sampled records# TopNCondition#scope is not required.readSampledRecords(condition:TopNCondition!,duration:Duration!):[SelectedRecord!]!}V1 APIs 3 types of metrics can be queried. V1 APIs were introduced since 6.x. Now they are a shell to V2 APIs.\n Single value. Most default metrics are in single value. getValues and getLinearIntValues are suitable for this purpose. Multiple value. A metric defined in OAL includes multiple value calculations. Use getMultipleLinearIntValues to obtain all values. percentile is a typical multiple value function in OAL. Heatmap value. Read Heatmap in WIKI for details. thermodynamic is the only OAL function. Use getThermodynamic to get the values.  extendtypeQuery{getValues(metric:BatchMetricConditions!,duration:Duration!):IntValuesgetLinearIntValues(metric:MetricCondition!,duration:Duration!):IntValues# Query the type of metrics including multiple values, and format them as multiple lines.# The seq of these multiple lines base on the calculation func in OAL# Such as, should us this to query the result of func percentile(50,75,90,95,99) in OAL,# then five lines will be responded, p50 is the first element of return value.getMultipleLinearIntValues(metric:MetricCondition!,numOfLinear:Int!,duration:Duration!):[IntValues!]!getThermodynamic(metric:MetricCondition!,duration:Duration!):Thermodynamic}Metrics are defined in the config/oal/*.oal files.\nAggregation Aggregation query means that the metrics data need a secondary aggregation at query stage, which causes the query interfaces to have some different arguments. A typical example of aggregation query is the TopN list of services. Metrics stream aggregation simply calculates the metrics values of each service, but the expected list requires ordering metrics data by their values.\nAggregation query is for single value metrics only.\n# The aggregation query is different with the metric query.# All aggregation queries require backend or/and storage do aggregation in query time.extendtypeQuery{# TopN is an aggregation query.getServiceTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getAllServiceInstanceTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getServiceInstanceTopN(serviceId:ID!,name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getAllEndpointTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getEndpointTopN(serviceId:ID!,name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!}Record Record is a general and abstract type for collected raw data. In the observability, traces and logs have specific and well-defined meanings, meanwhile, the general records represent other collected records. Such as sampled slow SQL statement, HTTP request raw data(request/response header/body)\nextendtypeQuery{# Query collected records with given metric name and parent entity conditions, and return in the requested order.readRecords(condition:RecordCondition!,duration:Duration!):[Record!]!}Logs extendtypeQuery{# Return true if the current storage implementation supports fuzzy query for logs.supportQueryLogsByKeywords:Boolean!queryLogs(condition:LogQueryCondition):Logs# Test the logs and get the results of the LAL output.test(requests:LogTestRequest!):LogTestResponse!}Log implementations vary between different database options. Some search engines like ElasticSearch and OpenSearch can support full log text fuzzy queries, while others do not due to considerations related to performance impact and end user experience.\ntest API serves as the debugging tool for native LAL parsing.\nTrace extendtypeQuery{queryBasicTraces(condition:TraceQueryCondition):TraceBriefqueryTrace(traceId:ID!):Trace}Trace query fetches trace segment lists and spans of given trace IDs.\nAlarm extendtypeQuery{getAlarmTrend(duration:Duration!):AlarmTrend!getAlarm(duration:Duration!,scope:Scope,keyword:String,paging:Pagination!,tags:[AlarmTag]):Alarms}Alarm query identifies alarms and related events.\nEvent extendtypeQuery{queryEvents(condition:EventQueryCondition):Events}Event query fetches the event list based on given sources and time range conditions.\nCondition Duration Duration is a widely used parameter type as the APM data is time-related. See the following for more details. Step relates to precision.\n# The Duration defines the start and end time for each query operation.# Fields: `start` and `end`# represents the time span. And each of them matches the step.# ref https://www.ietf.org/rfc/rfc3339.txt# The time formats are# `SECOND` step: yyyy-MM-dd HHmmss# `MINUTE` step: yyyy-MM-dd HHmm# `HOUR` step: yyyy-MM-dd HH# `DAY` step: yyyy-MM-dd# `MONTH` step: yyyy-MM# Field: `step`# represents the accurate time point.# e.g.# if step==HOUR , start=2017-11-08 09, end=2017-11-08 19# then# metrics from the following time points expected# 2017-11-08 9:00 -\u0026gt; 2017-11-08 19:00# there are 11 time points (hours) in the time span.inputDuration{start:String!end:String!step:Step!}enumStep{MONTHDAYHOURMINUTESECOND}","excerpt":"Query Protocol Query Protocol defines a set of APIs in GraphQL grammar to provide data query and …","ref":"/docs/main/v9.3.0/en/protocols/query-protocol/","title":"Query Protocol"},{"body":"Query Protocol Query Protocol defines a set of APIs in GraphQL grammar to provide data query and interactive capabilities with SkyWalking native visualization tool or 3rd party system, including Web UI, CLI or private system.\nQuery protocol official repository, https://github.com/apache/skywalking-query-protocol.\nMetadata Metadata contains concise information on all services and their instances, endpoints, etc. under monitoring. You may query the metadata in different ways.\nextendtypeQuery{# Normal service related meta info getAllServices(duration:Duration!,group:String):[Service!]!searchServices(duration:Duration!,keyword:String!):[Service!]!searchService(serviceCode:String!):Service# Fetch all services of Browser typegetAllBrowserServices(duration:Duration!):[Service!]!searchBrowserServices(duration:Duration!,keyword:String!):[Service!]!searchBrowserService(serviceCode:String!):Service# Service instance querygetServiceInstances(duration:Duration!,serviceId:ID!):[ServiceInstance!]!# Endpoint query# Consider there are huge numbers of endpoint,# must use endpoint owner\u0026#39;s service id, keyword and limit filter to do query.searchEndpoint(keyword:String!,serviceId:ID!,limit:Int!):[Endpoint!]!getEndpointInfo(endpointId:ID!):EndpointInfo# Process query# Read process list.listProcesses(duration:Duration!,instanceId:ID!):[Process!]!# Find process according to given ID. Return null if not existing.getProcess(processId:ID!):Process# Get the number of matched processes through serviceId, labels# Labels: the matched process should contain all labels## The return is not a precise number, the process has its lifecycle, as it reboots and shutdowns with time.# The return number just gives an abstract of the scale of profiling that would be applied.estimateProcessScale(serviceId:ID!,labels:[String!]!):Long!# Database related meta info.getAllDatabases(duration:Duration!):[Database!]!getTimeInfo:TimeInfo}Topology The topology and dependency graphs among services, instances and endpoints. Includes direct relationships or global maps.\nextendtypeQuery{# Query the global topologygetGlobalTopology(duration:Duration!):Topology# Query the topology, based on the given servicegetServiceTopology(serviceId:ID!,duration:Duration!):Topology# Query the topology, based on the given services.# `#getServiceTopology` could be replaced by this.getServicesTopology(serviceIds:[ID!]!,duration:Duration!):Topology# Query the instance topology, based on the given clientServiceId and serverServiceIdgetServiceInstanceTopology(clientServiceId:ID!,serverServiceId:ID!,duration:Duration!):ServiceInstanceTopology# Query the topology, based on the given endpointgetEndpointTopology(endpointId:ID!,duration:Duration!):Topology# v2 of getEndpointTopologygetEndpointDependencies(endpointId:ID!,duration:Duration!):EndpointTopology}Metrics Metrics query targets all objects defined in OAL script and MAL. You may obtain the metrics data in linear or thermodynamic matrix formats based on the aggregation functions in script.\nV2 APIs Provide Metrics V2 query APIs since 8.0.0, including metadata, single/multiple values, heatmap, and sampled records metrics.\nextendtypeQuery{# Metrics definition metadata query. Response the metrics type which determines the suitable query methods.typeOfMetrics(name:String!):MetricsType!# Get the list of all available metrics in the current OAP server.# Param, regex, could be used to filter the metrics by name.listMetrics(regex:String):[MetricDefinition!]!# Read metrics single value in the duration of required metricsreadMetricsValue(condition:MetricsCondition!,duration:Duration!):Long!# Read time-series values in the duration of required metricsreadMetricsValues(condition:MetricsCondition!,duration:Duration!):MetricsValues!# Read entity list of required metrics and parent entity type.sortMetrics(condition:TopNCondition!,duration:Duration!):[SelectedRecord!]!# Read value in the given time duration, usually as a linear.# labels: the labels you need to query.readLabeledMetricsValues(condition:MetricsCondition!,labels:[String!]!,duration:Duration!):[MetricsValues!]!# Heatmap is bucket based value statistic result.readHeatMap(condition:MetricsCondition!,duration:Duration!):HeatMap# Deprecated since 9.3.0, replaced by readRecords defined in record.graphqls# Read the sampled records# TopNCondition#scope is not required.readSampledRecords(condition:TopNCondition!,duration:Duration!):[SelectedRecord!]!}V1 APIs 3 types of metrics can be queried. V1 APIs were introduced since 6.x. Now they are a shell to V2 APIs.\n Single value. Most default metrics are in single value. getValues and getLinearIntValues are suitable for this purpose. Multiple value. A metric defined in OAL includes multiple value calculations. Use getMultipleLinearIntValues to obtain all values. percentile is a typical multiple value function in OAL. Heatmap value. Read Heatmap in WIKI for details. thermodynamic is the only OAL function. Use getThermodynamic to get the values.  extendtypeQuery{getValues(metric:BatchMetricConditions!,duration:Duration!):IntValuesgetLinearIntValues(metric:MetricCondition!,duration:Duration!):IntValues# Query the type of metrics including multiple values, and format them as multiple lines.# The seq of these multiple lines base on the calculation func in OAL# Such as, should us this to query the result of func percentile(50,75,90,95,99) in OAL,# then five lines will be responded, p50 is the first element of return value.getMultipleLinearIntValues(metric:MetricCondition!,numOfLinear:Int!,duration:Duration!):[IntValues!]!getThermodynamic(metric:MetricCondition!,duration:Duration!):Thermodynamic}Metrics are defined in the config/oal/*.oal files.\nAggregation Aggregation query means that the metrics data need a secondary aggregation at query stage, which causes the query interfaces to have some different arguments. A typical example of aggregation query is the TopN list of services. Metrics stream aggregation simply calculates the metrics values of each service, but the expected list requires ordering metrics data by their values.\nAggregation query is for single value metrics only.\n# The aggregation query is different with the metric query.# All aggregation queries require backend or/and storage do aggregation in query time.extendtypeQuery{# TopN is an aggregation query.getServiceTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getAllServiceInstanceTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getServiceInstanceTopN(serviceId:ID!,name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getAllEndpointTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getEndpointTopN(serviceId:ID!,name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!}Record Record is a general and abstract type for collected raw data. In the observability, traces and logs have specific and well-defined meanings, meanwhile, the general records represent other collected records. Such as sampled slow SQL statement, HTTP request raw data(request/response header/body)\nextendtypeQuery{# Query collected records with given metric name and parent entity conditions, and return in the requested order.readRecords(condition:RecordCondition!,duration:Duration!):[Record!]!}Logs extendtypeQuery{# Return true if the current storage implementation supports fuzzy query for logs.supportQueryLogsByKeywords:Boolean!queryLogs(condition:LogQueryCondition):Logs# Test the logs and get the results of the LAL output.test(requests:LogTestRequest!):LogTestResponse!}Log implementations vary between different database options. Some search engines like ElasticSearch and OpenSearch can support full log text fuzzy queries, while others do not due to considerations related to performance impact and end user experience.\ntest API serves as the debugging tool for native LAL parsing.\nTrace extendtypeQuery{queryBasicTraces(condition:TraceQueryCondition):TraceBriefqueryTrace(traceId:ID!):Trace}Trace query fetches trace segment lists and spans of given trace IDs.\nAlarm extendtypeQuery{getAlarmTrend(duration:Duration!):AlarmTrend!getAlarm(duration:Duration!,scope:Scope,keyword:String,paging:Pagination!,tags:[AlarmTag]):Alarms}Alarm query identifies alarms and related events.\nEvent extendtypeQuery{queryEvents(condition:EventQueryCondition):Events}Event query fetches the event list based on given sources and time range conditions.\nProfiling SkyWalking offers two types of profiling, in-process and out-process, allowing users to create tasks and check their execution status.\nIn-process profiling extendtypeMutation{# crate new profile taskcreateProfileTask(creationRequest:ProfileTaskCreationRequest):ProfileTaskCreationResult!}extendtypeQuery{# query all task list, order by ProfileTask#startTime descendinggetProfileTaskList(serviceId:ID,endpointName:String):[ProfileTask!]!# query all task logsgetProfileTaskLogs(taskID:String):[ProfileTaskLog!]!# query all task profiled segment listgetProfileTaskSegmentList(taskID:String):[BasicTrace!]!# query profiled segmentgetProfiledSegment(segmentId:String):ProfiledSegment# analyze profiled segment, start and end time use timestamp(millisecond)getProfileAnalyze(segmentId:String!,timeRanges:[ProfileAnalyzeTimeRange!]!):ProfileAnalyzation!}Out-process profiling extendtypeMutation{# create a new eBPF fixed time profiling taskcreateEBPFProfilingFixedTimeTask(request:EBPFProfilingTaskFixedTimeCreationRequest!):EBPFProfilingTaskCreationResult!# create a new eBPF network profiling taskcreateEBPFNetworkProfiling(request:EBPFProfilingNetworkTaskRequest!):EBPFProfilingTaskCreationResult!# keep alive the eBPF profiling taskkeepEBPFNetworkProfiling(taskId:ID!):EBPFNetworkKeepProfilingResult!}extendtypeQuery{# query eBPF profiling data for prepare create taskqueryPrepareCreateEBPFProfilingTaskData(serviceId:ID!):EBPFProfilingTaskPrepare!# query eBPF profiling task listqueryEBPFProfilingTasks(serviceId:ID,serviceInstanceId:ID,targets:[EBPFProfilingTargetType!]):[EBPFProfilingTask!]!# query schedules from profiling taskqueryEBPFProfilingSchedules(taskId:ID!):[EBPFProfilingSchedule!]!# analyze the profiling schedule# aggregateType is \u0026#34;EBPFProfilingAnalyzeAggregateType#COUNT\u0026#34; as default. analysisEBPFProfilingResult(scheduleIdList:[ID!]!,timeRanges:[EBPFProfilingAnalyzeTimeRange!]!,aggregateType:EBPFProfilingAnalyzeAggregateType):EBPFProfilingAnalyzation!}Condition Duration Duration is a widely used parameter type as the APM data is time-related. See the following for more details. Step relates to precision.\n# The Duration defines the start and end time for each query operation.# Fields: `start` and `end`# represents the time span. And each of them matches the step.# ref https://www.ietf.org/rfc/rfc3339.txt# The time formats are# `SECOND` step: yyyy-MM-dd HHmmss# `MINUTE` step: yyyy-MM-dd HHmm# `HOUR` step: yyyy-MM-dd HH# `DAY` step: yyyy-MM-dd# `MONTH` step: yyyy-MM# Field: `step`# represents the accurate time point.# e.g.# if step==HOUR , start=2017-11-08 09, end=2017-11-08 19# then# metrics from the following time points expected# 2017-11-08 9:00 -\u0026gt; 2017-11-08 19:00# there are 11 time points (hours) in the time span.inputDuration{start:String!end:String!step:Step!}enumStep{MONTHDAYHOURMINUTESECOND}","excerpt":"Query Protocol Query Protocol defines a set of APIs in GraphQL grammar to provide data query and …","ref":"/docs/main/v9.4.0/en/api/query-protocol/","title":"Query Protocol"},{"body":"Query Protocol Query Protocol defines a set of APIs in GraphQL grammar to provide data query and interactive capabilities with SkyWalking native visualization tool or 3rd party system, including Web UI, CLI or private system.\nQuery protocol official repository, https://github.com/apache/skywalking-query-protocol.\nAll deprecated APIs are moved here.\nMetadata Metadata contains concise information on all services and their instances, endpoints, etc. under monitoring. You may query the metadata in different ways.\nextendtypeQuery{# Normal service related meta info getAllServices(duration:Duration!,group:String):[Service!]!searchServices(duration:Duration!,keyword:String!):[Service!]!searchService(serviceCode:String!):Service# Fetch all services of Browser typegetAllBrowserServices(duration:Duration!):[Service!]!searchBrowserServices(duration:Duration!,keyword:String!):[Service!]!searchBrowserService(serviceCode:String!):Service# Service instance querygetServiceInstances(duration:Duration!,serviceId:ID!):[ServiceInstance!]!# Endpoint query# Consider there are huge numbers of endpoint,# must use endpoint owner\u0026#39;s service id, keyword and limit filter to do query.searchEndpoint(keyword:String!,serviceId:ID!,limit:Int!):[Endpoint!]!getEndpointInfo(endpointId:ID!):EndpointInfo# Process query# Read process list.listProcesses(duration:Duration!,instanceId:ID!):[Process!]!# Find process according to given ID. Return null if not existing.getProcess(processId:ID!):Process# Get the number of matched processes through serviceId, labels# Labels: the matched process should contain all labels## The return is not a precise number, the process has its lifecycle, as it reboots and shutdowns with time.# The return number just gives an abstract of the scale of profiling that would be applied.estimateProcessScale(serviceId:ID!,labels:[String!]!):Long!# Database related meta info.getAllDatabases(duration:Duration!):[Database!]!getTimeInfo:TimeInfo}Topology The topology and dependency graphs among services, instances and endpoints. Includes direct relationships or global maps.\nextendtypeQuery{# Query the global topologygetGlobalTopology(duration:Duration!):Topology# Query the topology, based on the given servicegetServiceTopology(serviceId:ID!,duration:Duration!):Topology# Query the topology, based on the given services.# `#getServiceTopology` could be replaced by this.getServicesTopology(serviceIds:[ID!]!,duration:Duration!):Topology# Query the instance topology, based on the given clientServiceId and serverServiceIdgetServiceInstanceTopology(clientServiceId:ID!,serverServiceId:ID!,duration:Duration!):ServiceInstanceTopology# Query the topology, based on the given endpointgetEndpointTopology(endpointId:ID!,duration:Duration!):Topology# v2 of getEndpointTopologygetEndpointDependencies(endpointId:ID!,duration:Duration!):EndpointTopology}Metrics Metrics query targets all objects defined in OAL script and MAL.\nV3 APIs Provide Metrics V3 query APIs since 9.5.0, including metadata and MQE. SkyWalking Metrics Query Expression(MQE) is an extension query mechanism. MQE allows users to do simple query-stage calculation like well known PromQL through GraphQL. The expression\u0026rsquo;s syntax can refer to here.\nextendtypeQuery{# Metrics definition metadata query. Response the metrics type which determines the suitable query methods.typeOfMetrics(name:String!):MetricsType!# Get the list of all available metrics in the current OAP server.# Param, regex, could be used to filter the metrics by name.listMetrics(regex:String):[MetricDefinition!]!execExpression(expression:String!,entity:Entity!,duration:Duration!):ExpressionResult!}typeExpressionResult{type:ExpressionResultType!# When the type == TIME_SERIES_VALUES, the results would be a collection of MQEValues.# In other legal type cases, only one MQEValues is expected in the array.results:[MQEValues!]!# When type == ExpressionResultType.UNKNOWN,# the error message includes the expression resolving errors.error:String}enumExpressionResultType{# Can\u0026#39;t resolve the type of the given expression.UNKNOWN# A single valueSINGLE_VALUE# A collection of time-series values.# The value could have labels or not.TIME_SERIES_VALUES# A collection of aggregated values through metric sort functionSORTED_LIST# A collection of sampled records.# When the original metric type is sampled recordsRECORD_LIST}Logs extendtypeQuery{# Return true if the current storage implementation supports fuzzy query for logs.supportQueryLogsByKeywords:Boolean!queryLogs(condition:LogQueryCondition):Logs# Test the logs and get the results of the LAL output.test(requests:LogTestRequest!):LogTestResponse!}Log implementations vary between different database options. Some search engines like ElasticSearch and OpenSearch can support full log text fuzzy queries, while others do not due to considerations related to performance impact and end user experience.\ntest API serves as the debugging tool for native LAL parsing.\nTrace extendtypeQuery{queryBasicTraces(condition:TraceQueryCondition):TraceBriefqueryTrace(traceId:ID!):Trace}Trace query fetches trace segment lists and spans of given trace IDs.\nAlarm extendtypeQuery{getAlarmTrend(duration:Duration!):AlarmTrend!getAlarm(duration:Duration!,scope:Scope,keyword:String,paging:Pagination!,tags:[AlarmTag]):Alarms}Alarm query identifies alarms and related events.\nEvent extendtypeQuery{queryEvents(condition:EventQueryCondition):Events}Event query fetches the event list based on given sources and time range conditions.\nProfiling SkyWalking offers two types of profiling, in-process and out-process, allowing users to create tasks and check their execution status.\nIn-process profiling extendtypeMutation{# crate new profile taskcreateProfileTask(creationRequest:ProfileTaskCreationRequest):ProfileTaskCreationResult!}extendtypeQuery{# query all task list, order by ProfileTask#startTime descendinggetProfileTaskList(serviceId:ID,endpointName:String):[ProfileTask!]!# query all task logsgetProfileTaskLogs(taskID:String):[ProfileTaskLog!]!# query all task profiled segment listgetProfileTaskSegmentList(taskID:String):[BasicTrace!]!# query profiled segmentgetProfiledSegment(segmentId:String):ProfiledSegment# analyze profiled segment, start and end time use timestamp(millisecond)getProfileAnalyze(segmentId:String!,timeRanges:[ProfileAnalyzeTimeRange!]!):ProfileAnalyzation!}Out-process profiling extendtypeMutation{# create a new eBPF fixed time profiling taskcreateEBPFProfilingFixedTimeTask(request:EBPFProfilingTaskFixedTimeCreationRequest!):EBPFProfilingTaskCreationResult!# create a new eBPF network profiling taskcreateEBPFNetworkProfiling(request:EBPFProfilingNetworkTaskRequest!):EBPFProfilingTaskCreationResult!# keep alive the eBPF profiling taskkeepEBPFNetworkProfiling(taskId:ID!):EBPFNetworkKeepProfilingResult!}extendtypeQuery{# query eBPF profiling data for prepare create taskqueryPrepareCreateEBPFProfilingTaskData(serviceId:ID!):EBPFProfilingTaskPrepare!# query eBPF profiling task listqueryEBPFProfilingTasks(serviceId:ID,serviceInstanceId:ID,targets:[EBPFProfilingTargetType!]):[EBPFProfilingTask!]!# query schedules from profiling taskqueryEBPFProfilingSchedules(taskId:ID!):[EBPFProfilingSchedule!]!# analyze the profiling schedule# aggregateType is \u0026#34;EBPFProfilingAnalyzeAggregateType#COUNT\u0026#34; as default. analysisEBPFProfilingResult(scheduleIdList:[ID!]!,timeRanges:[EBPFProfilingAnalyzeTimeRange!]!,aggregateType:EBPFProfilingAnalyzeAggregateType):EBPFProfilingAnalyzation!}Condition Duration Duration is a widely used parameter type as the APM data is time-related. See the following for more details. Step relates to precision.\n# The Duration defines the start and end time for each query operation.# Fields: `start` and `end`# represents the time span. And each of them matches the step.# ref https://www.ietf.org/rfc/rfc3339.txt# The time formats are# `SECOND` step: yyyy-MM-dd HHmmss# `MINUTE` step: yyyy-MM-dd HHmm# `HOUR` step: yyyy-MM-dd HH# `DAY` step: yyyy-MM-dd# `MONTH` step: yyyy-MM# Field: `step`# represents the accurate time point.# e.g.# if step==HOUR , start=2017-11-08 09, end=2017-11-08 19# then# metrics from the following time points expected# 2017-11-08 9:00 -\u0026gt; 2017-11-08 19:00# there are 11 time points (hours) in the time span.inputDuration{start:String!end:String!step:Step!}enumStep{MONTHDAYHOURMINUTESECOND}","excerpt":"Query Protocol Query Protocol defines a set of APIs in GraphQL grammar to provide data query and …","ref":"/docs/main/v9.5.0/en/api/query-protocol/","title":"Query Protocol"},{"body":"Query Protocol Query Protocol defines a set of APIs in GraphQL grammar to provide data query and interactive capabilities with SkyWalking native visualization tool or 3rd party system, including Web UI, CLI or private system.\nQuery protocol official repository, https://github.com/apache/skywalking-query-protocol.\nAll deprecated APIs are moved here.\nMetadata Metadata contains concise information on all services and their instances, endpoints, etc. under monitoring. You may query the metadata in different ways.\nextendtypeQuery{# Normal service related meta info getAllServices(duration:Duration!,group:String):[Service!]!searchServices(duration:Duration!,keyword:String!):[Service!]!searchService(serviceCode:String!):Service# Fetch all services of Browser typegetAllBrowserServices(duration:Duration!):[Service!]!searchBrowserServices(duration:Duration!,keyword:String!):[Service!]!searchBrowserService(serviceCode:String!):Service# Service instance querygetServiceInstances(duration:Duration!,serviceId:ID!):[ServiceInstance!]!# Endpoint query# Consider there are huge numbers of endpoint,# must use endpoint owner\u0026#39;s service id, keyword and limit filter to do query.searchEndpoint(keyword:String!,serviceId:ID!,limit:Int!):[Endpoint!]!getEndpointInfo(endpointId:ID!):EndpointInfo# Process query# Read process list.listProcesses(duration:Duration!,instanceId:ID!):[Process!]!# Find process according to given ID. Return null if not existing.getProcess(processId:ID!):Process# Get the number of matched processes through serviceId, labels# Labels: the matched process should contain all labels## The return is not a precise number, the process has its lifecycle, as it reboots and shutdowns with time.# The return number just gives an abstract of the scale of profiling that would be applied.estimateProcessScale(serviceId:ID!,labels:[String!]!):Long!# Database related meta info.getAllDatabases(duration:Duration!):[Database!]!getTimeInfo:TimeInfo}Topology The topology and dependency graphs among services, instances and endpoints. Includes direct relationships or global maps.\nextendtypeQuery{# Query the global topologygetGlobalTopology(duration:Duration!):Topology# Query the topology, based on the given servicegetServiceTopology(serviceId:ID!,duration:Duration!):Topology# Query the topology, based on the given services.# `#getServiceTopology` could be replaced by this.getServicesTopology(serviceIds:[ID!]!,duration:Duration!):Topology# Query the instance topology, based on the given clientServiceId and serverServiceIdgetServiceInstanceTopology(clientServiceId:ID!,serverServiceId:ID!,duration:Duration!):ServiceInstanceTopology# Query the topology, based on the given endpointgetEndpointTopology(endpointId:ID!,duration:Duration!):Topology# v2 of getEndpointTopologygetEndpointDependencies(endpointId:ID!,duration:Duration!):EndpointTopology}Metrics Metrics query targets all objects defined in OAL script and MAL.\nV3 APIs Provide Metrics V3 query APIs since 9.5.0, including metadata and MQE. SkyWalking Metrics Query Expression(MQE) is an extension query mechanism. MQE allows users to do simple query-stage calculation like well known PromQL through GraphQL. The expression\u0026rsquo;s syntax can refer to here.\nextendtypeQuery{# Metrics definition metadata query. Response the metrics type which determines the suitable query methods.typeOfMetrics(name:String!):MetricsType!# Get the list of all available metrics in the current OAP server.# Param, regex, could be used to filter the metrics by name.listMetrics(regex:String):[MetricDefinition!]!execExpression(expression:String!,entity:Entity!,duration:Duration!):ExpressionResult!}typeExpressionResult{type:ExpressionResultType!# When the type == TIME_SERIES_VALUES, the results would be a collection of MQEValues.# In other legal type cases, only one MQEValues is expected in the array.results:[MQEValues!]!# When type == ExpressionResultType.UNKNOWN,# the error message includes the expression resolving errors.error:String}enumExpressionResultType{# Can\u0026#39;t resolve the type of the given expression.UNKNOWN# A single valueSINGLE_VALUE# A collection of time-series values.# The value could have labels or not.TIME_SERIES_VALUES# A collection of aggregated values through metric sort functionSORTED_LIST# A collection of sampled records.# When the original metric type is sampled recordsRECORD_LIST}Logs extendtypeQuery{# Return true if the current storage implementation supports fuzzy query for logs.supportQueryLogsByKeywords:Boolean!queryLogs(condition:LogQueryCondition):Logs# Test the logs and get the results of the LAL output.test(requests:LogTestRequest!):LogTestResponse!}Log implementations vary between different database options. Some search engines like ElasticSearch and OpenSearch can support full log text fuzzy queries, while others do not due to considerations related to performance impact and end user experience.\ntest API serves as the debugging tool for native LAL parsing.\nTrace extendtypeQuery{queryBasicTraces(condition:TraceQueryCondition):TraceBriefqueryTrace(traceId:ID!):Trace}Trace query fetches trace segment lists and spans of given trace IDs.\nAlarm extendtypeQuery{getAlarmTrend(duration:Duration!):AlarmTrend!getAlarm(duration:Duration!,scope:Scope,keyword:String,paging:Pagination!,tags:[AlarmTag]):Alarms}Alarm query identifies alarms and related events.\nEvent extendtypeQuery{queryEvents(condition:EventQueryCondition):Events}Event query fetches the event list based on given sources and time range conditions.\nProfiling SkyWalking offers two types of profiling, in-process and out-process, allowing users to create tasks and check their execution status.\nIn-process profiling extendtypeMutation{# crate new profile taskcreateProfileTask(creationRequest:ProfileTaskCreationRequest):ProfileTaskCreationResult!}extendtypeQuery{# query all task list, order by ProfileTask#startTime descendinggetProfileTaskList(serviceId:ID,endpointName:String):[ProfileTask!]!# query all task logsgetProfileTaskLogs(taskID:String):[ProfileTaskLog!]!# query all task profiled segment listgetProfileTaskSegmentList(taskID:String):[BasicTrace!]!# query profiled segmentgetProfiledSegment(segmentId:String):ProfiledSegment# analyze profiled segment, start and end time use timestamp(millisecond)getProfileAnalyze(segmentId:String!,timeRanges:[ProfileAnalyzeTimeRange!]!):ProfileAnalyzation!}Out-process profiling extendtypeMutation{# create a new eBPF fixed time profiling taskcreateEBPFProfilingFixedTimeTask(request:EBPFProfilingTaskFixedTimeCreationRequest!):EBPFProfilingTaskCreationResult!# create a new eBPF network profiling taskcreateEBPFNetworkProfiling(request:EBPFProfilingNetworkTaskRequest!):EBPFProfilingTaskCreationResult!# keep alive the eBPF profiling taskkeepEBPFNetworkProfiling(taskId:ID!):EBPFNetworkKeepProfilingResult!}extendtypeQuery{# query eBPF profiling data for prepare create taskqueryPrepareCreateEBPFProfilingTaskData(serviceId:ID!):EBPFProfilingTaskPrepare!# query eBPF profiling task listqueryEBPFProfilingTasks(serviceId:ID,serviceInstanceId:ID,targets:[EBPFProfilingTargetType!]):[EBPFProfilingTask!]!# query schedules from profiling taskqueryEBPFProfilingSchedules(taskId:ID!):[EBPFProfilingSchedule!]!# analyze the profiling schedule# aggregateType is \u0026#34;EBPFProfilingAnalyzeAggregateType#COUNT\u0026#34; as default. analysisEBPFProfilingResult(scheduleIdList:[ID!]!,timeRanges:[EBPFProfilingAnalyzeTimeRange!]!,aggregateType:EBPFProfilingAnalyzeAggregateType):EBPFProfilingAnalyzation!}Condition Duration Duration is a widely used parameter type as the APM data is time-related. See the following for more details. Step relates to precision.\n# The Duration defines the start and end time for each query operation.# Fields: `start` and `end`# represents the time span. And each of them matches the step.# ref https://www.ietf.org/rfc/rfc3339.txt# The time formats are# `SECOND` step: yyyy-MM-dd HHmmss# `MINUTE` step: yyyy-MM-dd HHmm# `HOUR` step: yyyy-MM-dd HH# `DAY` step: yyyy-MM-dd# `MONTH` step: yyyy-MM# Field: `step`# represents the accurate time point.# e.g.# if step==HOUR , start=2017-11-08 09, end=2017-11-08 19# then# metrics from the following time points expected# 2017-11-08 9:00 -\u0026gt; 2017-11-08 19:00# there are 11 time points (hours) in the time span.inputDuration{start:String!end:String!step:Step!}enumStep{MONTHDAYHOURMINUTESECOND}","excerpt":"Query Protocol Query Protocol defines a set of APIs in GraphQL grammar to provide data query and …","ref":"/docs/main/v9.6.0/en/api/query-protocol/","title":"Query Protocol"},{"body":"Query Protocol Query Protocol defines a set of APIs in GraphQL grammar to provide data query and interactive capabilities with SkyWalking native visualization tool or 3rd party system, including Web UI, CLI or private system.\nQuery protocol official repository, https://github.com/apache/skywalking-query-protocol.\nAll deprecated APIs are moved here.\nMetadata Metadata contains concise information on all services and their instances, endpoints, etc. under monitoring. You may query the metadata in different ways.\nextendtypeQuery{# Normal service related meta info getAllServices(duration:Duration!,group:String):[Service!]!searchServices(duration:Duration!,keyword:String!):[Service!]!searchService(serviceCode:String!):Service# Fetch all services of Browser typegetAllBrowserServices(duration:Duration!):[Service!]!searchBrowserServices(duration:Duration!,keyword:String!):[Service!]!searchBrowserService(serviceCode:String!):Service# Service instance querygetServiceInstances(duration:Duration!,serviceId:ID!):[ServiceInstance!]!# Endpoint query# Consider there are huge numbers of endpoint,# must use endpoint owner\u0026#39;s service id, keyword and limit filter to do query.searchEndpoint(keyword:String!,serviceId:ID!,limit:Int!):[Endpoint!]!getEndpointInfo(endpointId:ID!):EndpointInfo# Process query# Read process list.listProcesses(duration:Duration!,instanceId:ID!):[Process!]!# Find process according to given ID. Return null if not existing.getProcess(processId:ID!):Process# Get the number of matched processes through serviceId, labels# Labels: the matched process should contain all labels## The return is not a precise number, the process has its lifecycle, as it reboots and shutdowns with time.# The return number just gives an abstract of the scale of profiling that would be applied.estimateProcessScale(serviceId:ID!,labels:[String!]!):Long!# Database related meta info.getAllDatabases(duration:Duration!):[Database!]!getTimeInfo:TimeInfo}Topology The topology and dependency graphs among services, instances and endpoints. Includes direct relationships or global maps.\nextendtypeQuery{# Query the global topologygetGlobalTopology(duration:Duration!):Topology# Query the topology, based on the given servicegetServiceTopology(serviceId:ID!,duration:Duration!):Topology# Query the topology, based on the given services.# `#getServiceTopology` could be replaced by this.getServicesTopology(serviceIds:[ID!]!,duration:Duration!):Topology# Query the instance topology, based on the given clientServiceId and serverServiceIdgetServiceInstanceTopology(clientServiceId:ID!,serverServiceId:ID!,duration:Duration!):ServiceInstanceTopology# Query the topology, based on the given endpointgetEndpointTopology(endpointId:ID!,duration:Duration!):Topology# v2 of getEndpointTopologygetEndpointDependencies(endpointId:ID!,duration:Duration!):EndpointTopology}Metrics Metrics query targets all objects defined in OAL script and MAL.\nV3 APIs Provide Metrics V3 query APIs since 9.5.0, including metadata and MQE. SkyWalking Metrics Query Expression(MQE) is an extension query mechanism. MQE allows users to do simple query-stage calculation like well known PromQL through GraphQL. The expression\u0026rsquo;s syntax can refer to here.\nextendtypeQuery{# Metrics definition metadata query. Response the metrics type which determines the suitable query methods.typeOfMetrics(name:String!):MetricsType!# Get the list of all available metrics in the current OAP server.# Param, regex, could be used to filter the metrics by name.listMetrics(regex:String):[MetricDefinition!]!execExpression(expression:String!,entity:Entity!,duration:Duration!):ExpressionResult!}typeExpressionResult{type:ExpressionResultType!# When the type == TIME_SERIES_VALUES, the results would be a collection of MQEValues.# In other legal type cases, only one MQEValues is expected in the array.results:[MQEValues!]!# When type == ExpressionResultType.UNKNOWN,# the error message includes the expression resolving errors.error:String}enumExpressionResultType{# Can\u0026#39;t resolve the type of the given expression.UNKNOWN# A single valueSINGLE_VALUE# A collection of time-series values.# The value could have labels or not.TIME_SERIES_VALUES# A collection of aggregated values through metric sort functionSORTED_LIST# A collection of sampled records.# When the original metric type is sampled recordsRECORD_LIST}Logs extendtypeQuery{# Return true if the current storage implementation supports fuzzy query for logs.supportQueryLogsByKeywords:Boolean!queryLogs(condition:LogQueryCondition):Logs# Test the logs and get the results of the LAL output.test(requests:LogTestRequest!):LogTestResponse!}Log implementations vary between different database options. Some search engines like ElasticSearch and OpenSearch can support full log text fuzzy queries, while others do not due to considerations related to performance impact and end user experience.\ntest API serves as the debugging tool for native LAL parsing.\nTrace extendtypeQuery{queryBasicTraces(condition:TraceQueryCondition):TraceBriefqueryTrace(traceId:ID!):Trace}Trace query fetches trace segment lists and spans of given trace IDs.\nAlarm extendtypeQuery{getAlarmTrend(duration:Duration!):AlarmTrend!getAlarm(duration:Duration!,scope:Scope,keyword:String,paging:Pagination!,tags:[AlarmTag]):Alarms}Alarm query identifies alarms and related events.\nEvent extendtypeQuery{queryEvents(condition:EventQueryCondition):Events}Event query fetches the event list based on given sources and time range conditions.\nProfiling SkyWalking offers two types of profiling, in-process and out-process, allowing users to create tasks and check their execution status.\nIn-process profiling extendtypeMutation{# crate new profile taskcreateProfileTask(creationRequest:ProfileTaskCreationRequest):ProfileTaskCreationResult!}extendtypeQuery{# query all task list, order by ProfileTask#startTime descendinggetProfileTaskList(serviceId:ID,endpointName:String):[ProfileTask!]!# query all task logsgetProfileTaskLogs(taskID:String):[ProfileTaskLog!]!# query all task profiled segment listgetProfileTaskSegmentList(taskID:String):[BasicTrace!]!# query profiled segmentgetProfiledSegment(segmentId:String):ProfiledSegment# analyze profiled segment, start and end time use timestamp(millisecond)getProfileAnalyze(segmentId:String!,timeRanges:[ProfileAnalyzeTimeRange!]!):ProfileAnalyzation!}Out-process profiling extendtypeMutation{# create a new eBPF fixed time profiling taskcreateEBPFProfilingFixedTimeTask(request:EBPFProfilingTaskFixedTimeCreationRequest!):EBPFProfilingTaskCreationResult!# create a new eBPF network profiling taskcreateEBPFNetworkProfiling(request:EBPFProfilingNetworkTaskRequest!):EBPFProfilingTaskCreationResult!# keep alive the eBPF profiling taskkeepEBPFNetworkProfiling(taskId:ID!):EBPFNetworkKeepProfilingResult!}extendtypeQuery{# query eBPF profiling data for prepare create taskqueryPrepareCreateEBPFProfilingTaskData(serviceId:ID!):EBPFProfilingTaskPrepare!# query eBPF profiling task listqueryEBPFProfilingTasks(serviceId:ID,serviceInstanceId:ID,targets:[EBPFProfilingTargetType!]):[EBPFProfilingTask!]!# query schedules from profiling taskqueryEBPFProfilingSchedules(taskId:ID!):[EBPFProfilingSchedule!]!# analyze the profiling schedule# aggregateType is \u0026#34;EBPFProfilingAnalyzeAggregateType#COUNT\u0026#34; as default. analysisEBPFProfilingResult(scheduleIdList:[ID!]!,timeRanges:[EBPFProfilingAnalyzeTimeRange!]!,aggregateType:EBPFProfilingAnalyzeAggregateType):EBPFProfilingAnalyzation!}Condition Duration Duration is a widely used parameter type as the APM data is time-related. See the following for more details. Step relates to precision.\n# The Duration defines the start and end time for each query operation.# Fields: `start` and `end`# represents the time span. And each of them matches the step.# ref https://www.ietf.org/rfc/rfc3339.txt# The time formats are# `SECOND` step: yyyy-MM-dd HHmmss# `MINUTE` step: yyyy-MM-dd HHmm# `HOUR` step: yyyy-MM-dd HH# `DAY` step: yyyy-MM-dd# `MONTH` step: yyyy-MM# Field: `step`# represents the accurate time point.# e.g.# if step==HOUR , start=2017-11-08 09, end=2017-11-08 19# then# metrics from the following time points expected# 2017-11-08 9:00 -\u0026gt; 2017-11-08 19:00# there are 11 time points (hours) in the time span.inputDuration{start:String!end:String!step:Step!}enumStep{MONTHDAYHOURMINUTESECOND}","excerpt":"Query Protocol Query Protocol defines a set of APIs in GraphQL grammar to provide data query and …","ref":"/docs/main/v9.7.0/en/api/query-protocol/","title":"Query Protocol"},{"body":"Query Streams Query operation queries the data in a stream.\nbydbctl is the command line tool in examples.\nThe input contains two parts:\n Request: a YAML-based text which is defined by the API Time Range: YAML and CLI\u0026rsquo;s flags both support it.  Time Range The query specification contains time_range field. The request should set absolute times to it. bydbctl also provides start and end flags to support passing absolute and relative times.\n\u0026ldquo;start\u0026rdquo; and \u0026ldquo;end\u0026rdquo; specify a time range during which the query is performed, they can be an absolute time like \u0026ldquo;2006-01-02T15:04:05Z07:00\u0026rdquo;, or relative time (to the current time) like \u0026ldquo;-30m\u0026rdquo;, or \u0026ldquo;30m\u0026rdquo;. They are both optional and their default values follow the rules below:\n when \u0026ldquo;start\u0026rdquo; and \u0026ldquo;end\u0026rdquo; are both absent, \u0026ldquo;start = now - 30 minutes\u0026rdquo; and \u0026ldquo;end = now\u0026rdquo;, namely past 30 minutes; when \u0026ldquo;start\u0026rdquo; is absent and \u0026ldquo;end\u0026rdquo; is present, this command calculates \u0026ldquo;start\u0026rdquo; (minus 30 units), e.g. \u0026ldquo;end = 2022-11-09T12:34:00Z\u0026rdquo;, so \u0026ldquo;start = end - 30 minutes = 2022-11-09T12:04:00Z\u0026rdquo;; when \u0026ldquo;start\u0026rdquo; is present and \u0026ldquo;end\u0026rdquo; is absent, this command calculates \u0026ldquo;end\u0026rdquo; (plus 30 units), e.g. \u0026ldquo;start = 2022-11-09T12:04:00Z\u0026rdquo;, so \u0026ldquo;end = start + 30 minutes = 2022-11-09T12:34:00Z\u0026rdquo;.  Examples To retrieve elements in a stream named sw between 2022-10-15T22:32:48Z and 2022-10-15T23:32:48Z could use the below command. These elements also choose a tag trace_id which lives in a family named searchable.\n$ bydbctl stream query -f - \u0026lt;\u0026lt;EOF metadata: group: \u0026#34;default\u0026#34; name: \u0026#34;sw\u0026#34; projection: tagFamilies: - name: \u0026#34;searchable\u0026#34; tags: [\u0026#34;trace_id\u0026#34;] timeRange: begin: 2022-10-15T22:32:48+08:00 end: 2022-10-15T23:32:48+08:00 EOF The below command could query data in the last 30 minutes using relative time duration :\n$ bydbctl stream query --start -30m -f - \u0026lt;\u0026lt;EOF metadata: group: \u0026#34;default\u0026#34; name: \u0026#34;sw\u0026#34; projection: tagFamilies: - name: \u0026#34;searchable\u0026#34; tags: [\u0026#34;trace_id\u0026#34;] EOF API Reference StreamService v1\n","excerpt":"Query Streams Query operation queries the data in a stream.\nbydbctl is the command line tool in …","ref":"/docs/skywalking-banyandb/latest/crud/stream/query/","title":"Query Streams"},{"body":"Query Streams Query operation queries the data in a stream.\nbydbctl is the command line tool in examples.\nThe input contains two parts:\n Request: a YAML-based text which is defined by the API Time Range: YAML and CLI\u0026rsquo;s flags both support it.  Time Range The query specification contains time_range field. The request should set absolute times to it. bydbctl also provides start and end flags to support passing absolute and relative times.\n\u0026ldquo;start\u0026rdquo; and \u0026ldquo;end\u0026rdquo; specify a time range during which the query is performed, they can be an absolute time like \u0026ldquo;2006-01-02T15:04:05Z07:00\u0026rdquo;, or relative time (to the current time) like \u0026ldquo;-30m\u0026rdquo;, or \u0026ldquo;30m\u0026rdquo;. They are both optional and their default values follow the rules below:\n when \u0026ldquo;start\u0026rdquo; and \u0026ldquo;end\u0026rdquo; are both absent, \u0026ldquo;start = now - 30 minutes\u0026rdquo; and \u0026ldquo;end = now\u0026rdquo;, namely past 30 minutes; when \u0026ldquo;start\u0026rdquo; is absent and \u0026ldquo;end\u0026rdquo; is present, this command calculates \u0026ldquo;start\u0026rdquo; (minus 30 units), e.g. \u0026ldquo;end = 2022-11-09T12:34:00Z\u0026rdquo;, so \u0026ldquo;start = end - 30 minutes = 2022-11-09T12:04:00Z\u0026rdquo;; when \u0026ldquo;start\u0026rdquo; is present and \u0026ldquo;end\u0026rdquo; is absent, this command calculates \u0026ldquo;end\u0026rdquo; (plus 30 units), e.g. \u0026ldquo;start = 2022-11-09T12:04:00Z\u0026rdquo;, so \u0026ldquo;end = start + 30 minutes = 2022-11-09T12:34:00Z\u0026rdquo;.  Examples To retrieve elements in a stream named sw between 2022-10-15T22:32:48Z and 2022-10-15T23:32:48Z could use the below command. These elements also choose a tag trace_id which lives in a family named searchable.\n$ bydbctl stream query -f - \u0026lt;\u0026lt;EOF metadata: group: \u0026#34;default\u0026#34; name: \u0026#34;sw\u0026#34; projection: tagFamilies: - name: \u0026#34;searchable\u0026#34; tags: [\u0026#34;trace_id\u0026#34;] timeRange: begin: 2022-10-15T22:32:48+08:00 end: 2022-10-15T23:32:48+08:00 EOF The below command could query data in the last 30 minutes using relative time duration :\n$ bydbctl stream query --start -30m -f - \u0026lt;\u0026lt;EOF metadata: group: \u0026#34;default\u0026#34; name: \u0026#34;sw\u0026#34; projection: tagFamilies: - name: \u0026#34;searchable\u0026#34; tags: [\u0026#34;trace_id\u0026#34;] EOF API Reference StreamService v1\n","excerpt":"Query Streams Query operation queries the data in a stream.\nbydbctl is the command line tool in …","ref":"/docs/skywalking-banyandb/next/crud/stream/query/","title":"Query Streams"},{"body":"Query Streams Query operation queries the data in a stream.\nbydbctl is the command line tool in examples.\nThe input contains two parts:\n Request: a YAML-based text which is defined by the API Time Range: YAML and CLI\u0026rsquo;s flags both support it.  Time Range The query specification contains time_range field. The request should set absolute times to it. bydbctl also provides start and end flags to support passing absolute and relative times.\n\u0026ldquo;start\u0026rdquo; and \u0026ldquo;end\u0026rdquo; specify a time range during which the query is performed, they can be an absolute time like \u0026ldquo;2006-01-02T15:04:05Z07:00\u0026rdquo;, or relative time (to the current time) like \u0026ldquo;-30m\u0026rdquo;, or \u0026ldquo;30m\u0026rdquo;. They are both optional and their default values follow the rules below:\n when \u0026ldquo;start\u0026rdquo; and \u0026ldquo;end\u0026rdquo; are both absent, \u0026ldquo;start = now - 30 minutes\u0026rdquo; and \u0026ldquo;end = now\u0026rdquo;, namely past 30 minutes; when \u0026ldquo;start\u0026rdquo; is absent and \u0026ldquo;end\u0026rdquo; is present, this command calculates \u0026ldquo;start\u0026rdquo; (minus 30 units), e.g. \u0026ldquo;end = 2022-11-09T12:34:00Z\u0026rdquo;, so \u0026ldquo;start = end - 30 minutes = 2022-11-09T12:04:00Z\u0026rdquo;; when \u0026ldquo;start\u0026rdquo; is present and \u0026ldquo;end\u0026rdquo; is absent, this command calculates \u0026ldquo;end\u0026rdquo; (plus 30 units), e.g. \u0026ldquo;start = 2022-11-09T12:04:00Z\u0026rdquo;, so \u0026ldquo;end = start + 30 minutes = 2022-11-09T12:34:00Z\u0026rdquo;.  Examples To retrieve elements in a stream named sw between 2022-10-15T22:32:48Z and 2022-10-15T23:32:48Z could use the below command. These elements also choose a tag trace_id which lives in a family named searchable.\n$ bydbctl stream query -f - \u0026lt;\u0026lt;EOF metadata: group: \u0026#34;default\u0026#34; name: \u0026#34;sw\u0026#34; projection: tagFamilies: - name: \u0026#34;searchable\u0026#34; tags: [\u0026#34;trace_id\u0026#34;] timeRange: begin: 2022-10-15T22:32:48+08:00 end: 2022-10-15T23:32:48+08:00 EOF The below command could query data in the last 30 minutes using relative time duration :\n$ bydbctl stream query --start -30m -f - \u0026lt;\u0026lt;EOF metadata: group: \u0026#34;default\u0026#34; name: \u0026#34;sw\u0026#34; projection: tagFamilies: - name: \u0026#34;searchable\u0026#34; tags: [\u0026#34;trace_id\u0026#34;] EOF API Reference StreamService v1\n","excerpt":"Query Streams Query operation queries the data in a stream.\nbydbctl is the command line tool in …","ref":"/docs/skywalking-banyandb/v0.5.0/crud/stream/query/","title":"Query Streams"},{"body":"Queue/memory-queue Description This is a memory queue to buffer the input event.\nDefaultConfig # The maximum buffer event size.event_buffer_size:5000# The partition count of queue.partition:1Configuration    Name Type Description     event_buffer_size int configThe maximum buffer event size.   partition int The total partition count.    ","excerpt":"Queue/memory-queue Description This is a memory queue to buffer the input event.\nDefaultConfig # The …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/queue_memory-queue/","title":"Queue/memory-queue"},{"body":"Queue/memory-queue Description This is a memory queue to buffer the input event.\nDefaultConfig # The maximum buffer event size.event_buffer_size:5000# The partition count of queue.partition:1Configuration    Name Type Description     event_buffer_size int configThe maximum buffer event size.   partition int The total partition count.    ","excerpt":"Queue/memory-queue Description This is a memory queue to buffer the input event.\nDefaultConfig # The …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/queue_memory-queue/","title":"Queue/memory-queue"},{"body":"Queue/memory-queue Description This is a memory queue to buffer the input event.\nDefaultConfig # The maximum buffer event size.event_buffer_size:5000# The partition count of queue.partition:1Configuration    Name Type Description     event_buffer_size int configThe maximum buffer event size.   partition int The total partition count.    ","excerpt":"Queue/memory-queue Description This is a memory queue to buffer the input event.\nDefaultConfig # The …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/queue_memory-queue/","title":"Queue/memory-queue"},{"body":"Queue/mmap-queue Description This is a memory mapped queue to provide the persistent storage for the input event. Please note that this plugin does not support Windows platform.\nDefaultConfig # The size of each segment. Default value is 256K. The unit is Byte.segment_size:262114# The max num of segments in memory. Default value is 10.max_in_mem_segments:10# The capacity of Queue = segment_size * queue_capacity_segments.queue_capacity_segments:2000# The period flush time. The unit is ms. Default value is 1 second.flush_period:1000# The max number in one flush time. Default value is 10000.flush_ceiling_num:10000# The max size of the input event. Default value is 20k.max_event_size:20480# The partition count of queue.partition:1Configuration    Name Type Description     segment_size int The size of each segment. The unit is byte.   max_in_mem_segments int32 The max num of segments in memory.   queue_capacity_segments int The capacity of Queue = segment_size * queue_capacity_segments.   flush_period int The period flush time. The unit is ms.   flush_ceiling_num int The max number in one flush time.   max_event_size int The max size of the input event.   partition int The total partition count.    ","excerpt":"Queue/mmap-queue Description This is a memory mapped queue to provide the persistent storage for the …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/queue_mmap-queue/","title":"Queue/mmap-queue"},{"body":"Queue/mmap-queue Description This is a memory mapped queue to provide the persistent storage for the input event. Please note that this plugin does not support Windows platform.\nDefaultConfig # The size of each segment. Default value is 256K. The unit is Byte.segment_size:262114# The max num of segments in memory. Default value is 10.max_in_mem_segments:10# The capacity of Queue = segment_size * queue_capacity_segments.queue_capacity_segments:2000# The period flush time. The unit is ms. Default value is 1 second.flush_period:1000# The max number in one flush time. Default value is 10000.flush_ceiling_num:10000# The max size of the input event. Default value is 20k.max_event_size:20480# The partition count of queue.partition:1Configuration    Name Type Description     segment_size int The size of each segment. The unit is byte.   max_in_mem_segments int32 The max num of segments in memory.   queue_capacity_segments int The capacity of Queue = segment_size * queue_capacity_segments.   flush_period int The period flush time. The unit is ms.   flush_ceiling_num int The max number in one flush time.   max_event_size int The max size of the input event.   partition int The total partition count.    ","excerpt":"Queue/mmap-queue Description This is a memory mapped queue to provide the persistent storage for the …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/queue_mmap-queue/","title":"Queue/mmap-queue"},{"body":"Queue/mmap-queue Description This is a memory mapped queue to provide the persistent storage for the input event. Please note that this plugin does not support Windows platform.\nDefaultConfig # The size of each segment. Default value is 256K. The unit is Byte.segment_size:262114# The max num of segments in memory. Default value is 10.max_in_mem_segments:10# The capacity of Queue = segment_size * queue_capacity_segments.queue_capacity_segments:2000# The period flush time. The unit is ms. Default value is 1 second.flush_period:1000# The max number in one flush time. Default value is 10000.flush_ceiling_num:10000# The max size of the input event. Default value is 20k.max_event_size:20480# The partition count of queue.partition:1Configuration    Name Type Description     segment_size int The size of each segment. The unit is byte.   max_in_mem_segments int32 The max num of segments in memory.   queue_capacity_segments int The capacity of Queue = segment_size * queue_capacity_segments.   flush_period int The period flush time. The unit is ms.   flush_ceiling_num int The max number in one flush time.   max_event_size int The max size of the input event.   partition int The total partition count.    ","excerpt":"Queue/mmap-queue Description This is a memory mapped queue to provide the persistent storage for the …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/queue_mmap-queue/","title":"Queue/mmap-queue"},{"body":"Queue/none-queue Description This is an empty queue for direct connection protocols, such as SkyWalking native configuration discovery service protocol.\nDefaultConfig # The partition count of queue.partition:1Configuration    Name Type Description     partition int The total partition count.    ","excerpt":"Queue/none-queue Description This is an empty queue for direct connection protocols, such as …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/queue_none-queue/","title":"Queue/none-queue"},{"body":"Queue/none-queue Description This is an empty queue for direct connection protocols, such as SkyWalking native configuration discovery service protocol.\nDefaultConfig # The partition count of queue.partition:1Configuration    Name Type Description     partition int The total partition count.    ","excerpt":"Queue/none-queue Description This is an empty queue for direct connection protocols, such as …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/queue_none-queue/","title":"Queue/none-queue"},{"body":"Queue/none-queue Description This is an empty queue for direct connection protocols, such as SkyWalking native configuration discovery service protocol.\nDefaultConfig # The partition count of queue.partition:1Configuration    Name Type Description     partition int The total partition count.    ","excerpt":"Queue/none-queue Description This is an empty queue for direct connection protocols, such as …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/queue_none-queue/","title":"Queue/none-queue"},{"body":"Quick Start for Contributors Make and Makefile We rely on Makefile to automate jobs, including setting up environments, testing and releasing.\nFirst you need to have the make command available:\n# ubuntu/wsl sudo apt-get update sudo apt-get -y install make or\n# windows powershell Set-ExecutionPolicy RemoteSigned -Scope CurrentUser # Optional: Needed to run a remote script the first time irm get.scoop.sh | iex scoop install make Poetry We have migrated from basic pip to Poetry to manage dependencies and package our project.\nOnce you have make ready, run make env, this will automatically install the right Poetry release, and create (plus manage) a .venv virtual environment for us based on the currently activated Python 3 version. Enjoy coding!\nNote: Make sure you have python3 aliased to python available on Windows computers instead of pointing to the Microsoft app store.\nSwitching between Multiple Python Versions Do not develop/test on Python \u0026lt; 3.7, since Poetry and some other functionalities we implement rely on Python 3.7+\nIf you would like to test on multiple Python versions, run the following to switch and recreate virtual environment:\nWithout Python Version Tools poetry env use python3.x poetry install With Python Version Tools pyenv shell 3.9.11 poetry env use $(pyenv which python) poetry install Or try: virtualenvs.prefer-active-python, which is an experimental poetry feature that can be set to true so that it will automatically follow environment.\nNext Refer to the Plugin Development Guide to learn how to build a new plugin for a library.\n","excerpt":"Quick Start for Contributors Make and Makefile We rely on Makefile to automate jobs, including …","ref":"/docs/skywalking-python/latest/en/contribution/developer/","title":"Quick Start for Contributors"},{"body":"Quick Start for Contributors Make and Makefile We rely on Makefile to automate jobs, including setting up environments, testing and releasing.\nFirst you need to have the make command available:\n# ubuntu/wsl sudo apt-get update sudo apt-get -y install make or\n# windows powershell Set-ExecutionPolicy RemoteSigned -Scope CurrentUser # Optional: Needed to run a remote script the first time irm get.scoop.sh | iex scoop install make Poetry We have migrated from basic pip to Poetry to manage dependencies and package our project.\nOnce you have make ready, run make env, this will automatically install the right Poetry release, and create (plus manage) a .venv virtual environment for us based on the currently activated Python 3 version. Enjoy coding!\nNote: Make sure you have python3 aliased to python available on Windows computers instead of pointing to the Microsoft app store.\nSwitching between Multiple Python Versions Do not develop/test on Python \u0026lt; 3.7, since Poetry and some other functionalities we implement rely on Python 3.7+\nIf you would like to test on multiple Python versions, run the following to switch and recreate virtual environment:\nWithout Python Version Tools poetry env use python3.x poetry install With Python Version Tools pyenv shell 3.9.11 poetry env use $(pyenv which python) poetry install Or try: virtualenvs.prefer-active-python, which is an experimental poetry feature that can be set to true so that it will automatically follow environment.\nNext Refer to the Plugin Development Guide to learn how to build a new plugin for a library.\n","excerpt":"Quick Start for Contributors Make and Makefile We rely on Makefile to automate jobs, including …","ref":"/docs/skywalking-python/next/en/contribution/developer/","title":"Quick Start for Contributors"},{"body":"Quick Start for Contributors Make and Makefile We rely on Makefile to automate jobs, including setting up environments, testing and releasing.\nFirst you need to have the make command available:\n# ubuntu/wsl sudo apt-get update sudo apt-get -y install make or\n# windows powershell Set-ExecutionPolicy RemoteSigned -Scope CurrentUser # Optional: Needed to run a remote script the first time irm get.scoop.sh | iex scoop install make Poetry We have migrated from basic pip to Poetry to manage dependencies and package our project.\nOnce you have make ready, run make env, this will automatically install the right Poetry release, and create (plus manage) a .venv virtual environment for us based on the currently activated Python 3 version. Enjoy coding!\nNote: Make sure you have python3 aliased to python available on Windows computers instead of pointing to the Microsoft app store.\nSwitching between Multiple Python Versions Do not develop/test on Python \u0026lt; 3.7, since Poetry and some other functionalities we implement rely on Python 3.7+\nIf you would like to test on multiple Python versions, run the following to switch and recreate virtual environment:\nWithout Python Version Tools poetry env use python3.x poetry install With Python Version Tools pyenv shell 3.9.11 poetry env use $(pyenv which python) poetry install Or try: virtualenvs.prefer-active-python, which is an experimental poetry feature that can be set to true so that it will automatically follow environment.\nNext Refer to the Plugin Development Guide to learn how to build a new plugin for a library.\n","excerpt":"Quick Start for Contributors Make and Makefile We rely on Makefile to automate jobs, including …","ref":"/docs/skywalking-python/v1.0.1/en/contribution/developer/","title":"Quick Start for Contributors"},{"body":"RabbitMQ monitoring SkyWalking leverages rabbitmq_prometheus plugin for collecting metrics data from RabbitMQ. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  The rabbitmq_prometheus plugin collect metrics data from RabbitMQ. Note: The RabbitMQ version is required to be 3.8.0+. The rabbitmq_prometheus plugin is built-in since RabbitMQ v3.8.0. OpenTelemetry Collector fetches metrics from rabbitmq_prometheus plugin via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup rabbitmq_prometheus. Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  RabbitMQ Monitoring RabbitMQ monitoring provides multidimensional metrics monitoring of RabbitMQ cluster as Layer: RABBITMQ Service in the OAP. In each cluster, the nodes are represented as Instance.\nRabbitMQ Cluster Supported Metrics    Monitoring Panel Metric Name Description Data Source     Memory Available Before Publishers Blocked (MB) meter_rabbitmq_memory_available_before_publisher_blocked If the value is zero or less, the memory alarm will be triggered and all publishing connections across all cluster nodes will be blocked. rabbitmq_prometheus plugin   Disk Space Available Before Publishers Blocked (GB) meter_rabbitmq_disk_space_available_before_publisher_blocked This metric is reported for the partition where the RabbitMQ data directory is stored. rabbitmq_prometheus plugin   File Descriptors Available meter_rabbitmq_file_descriptors_available When this value reaches zero, new connections will not be accepted and disk write operations may fail. rabbitmq_prometheus plugin   TCP Sockets Available meter_rabbitmq_tcp_socket_available When this value reaches zero, new connections will not be accepted. rabbitmq_prometheus plugin   Messages Ready To Be Delivered To Consumers meter_rabbitmq_message_ready_delivered_consumers Total number of ready messages ready to be delivered to consumers. rabbitmq_prometheus plugin   Messages Pending Consumer Acknowledgement meter_rabbitmq_message_unacknowledged_delivered_consumers The total number of messages that are either in-flight to consumers, currently being processed by consumers or simply waiting for the consumer acknowledgements to be processed by the queue. Until the queue processes the message acknowledgement, the message will remain unacknowledged. rabbitmq_prometheus plugin   Messages Published meter_rabbitmq_messages_published The incoming message rate before any routing rules are applied. rabbitmq_prometheus plugin   Messages Confirmed To Publishers meter_rabbitmq_messages_confirmed The rate of messages confirmed by the broker to publishers. Publishers must opt-in to receive message confirmations. rabbitmq_prometheus plugin   Messages Unconfirmed To Publishers meter_rabbitmq_messages_unconfirmed The rate of messages received from publishers that have publisher confirms enabled and the broker has not confirmed yet. rabbitmq_prometheus plugin   Messages Routed To Queues meter_rabbitmq_messages_routed The rate of messages received from publishers and successfully routed to the master queue replicas. rabbitmq_prometheus plugin   Unroutable Messages Returned To Publishers meter_rabbitmq_messages_unroutable_returned The rate of messages that cannot be routed and are returned back to publishers. rabbitmq_prometheus plugin   Unroutable Messages Dropped meter_rabbitmq_messages_unroutable_dropped The rate of messages that cannot be routed and are dropped. rabbitmq_prometheus plugin   Queues Total meter_rabbitmq_queues Total number of queue masters per node. rabbitmq_prometheus plugin   Queues Declared meter_rabbitmq_queues_declared_total The rate of queue declarations performed by clients. rabbitmq_prometheus plugin   Queues Created meter_rabbitmq_queues_created_total The rate of new queues created (as opposed to redeclarations). rabbitmq_prometheus plugin   Queues Deleted meter_rabbitmq_queues_deleted_total The rate of queues deleted. rabbitmq_prometheus plugin   Channels Total meter_rabbitmq_channels Total number of channels on all currently opened connections. rabbitmq_prometheus plugin   Channels Opened meter_rabbitmq_channels_opened_total The rate of new channels opened by applications across all connections. Channels are expected to be long-lived. rabbitmq_prometheus plugin   Channels Closed meter_rabbitmq_channels_closed_total The rate of channels closed by applications across all connections. Channels are expected to be long-lived. rabbitmq_prometheus plugin   Connections Total meter_rabbitmq_connections Total number of client connections. rabbitmq_prometheus plugin   Connections Opened meter_rabbitmq_connections_opened_total The rate of new connections opened by clients. Connections are expected to be long-lived. rabbitmq_prometheus plugin   Connections Closed meter_rabbitmq_connections_closed_total The rate of connections closed. Connections are expected to be long-lived. rabbitmq_prometheus plugin    RabbitMQ Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Ready Messages  meter_rabbitmq_node_queue_messages_ready Total number of ready messages ready to be delivered to consumers. rabbitmq_prometheus plugin   Unacknowledged Messages  meter_rabbitmq_node_unacknowledged_messages Messages delivered to consumers but not yet acknowledged rabbitmq_prometheus plugin   Incoming Messages  meter_rabbitmq_node_incoming_messages The incoming message rate before any routing rules are applied. rabbitmq_prometheus plugin   Outgoing Messages  meter_rabbitmq_node_outgoing_messages_total The outgoing message rate before any routing rules are applied. rabbitmq_prometheus plugin   Publishers  meter_rabbitmq_node_publisher_total Publishers rabbitmq_prometheus plugin   Consumers  meter_rabbitmq_node_consumer_total Consumers currently connect rabbitmq_prometheus plugin   Collections  meter_rabbitmq_node_connections_total Connections currently open rabbitmq_prometheus plugin   Channels  meter_rabbitmq_node_channel_total Channels currently open rabbitmq_prometheus plugin   Queues  meter_rabbitmq_node_queue_total Queues available rabbitmq_prometheus plugin   Allocated Used % meter_rabbitmq_node_allocated_used_percent Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Allocated Unused % meter_rabbitmq_node_allocated_unused_percent Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Allocated Used MB meter_rabbitmq_node_allocated_used_bytes Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Allocated Unused MB meter_rabbitmq_node_allocated_unused_bytes Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Allocated Total MB meter_rabbitmq_node_allocated_total_bytes Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Resident Set Size MB meter_rabbitmq_node_process_resident_memory_bytes Erlang VM Resident Set Size (RSS) As reported by the OS rabbitmq_prometheus plugin   Allocators MB meter_rabbitmq_node_allocated_unused_bytes meter_rabbitmq_node_allocated_total_bytes meter_rabbitmq_node_process_resident_memory_bytes  rabbitmq_prometheus plugin   Allocated By Type MB meter_rabbitmq_node_allocated_by_type Allocated by allocator type rabbitmq_prometheus plugin   Multiblock Used MB meter_rabbitmq_node_allocated_multiblock_used Multi block used rabbitmq_prometheus plugin   Multiblock Unused MB meter_rabbitmq_node_allocated_multiblock_unused Multi block used rabbitmq_prometheus plugin   Multiblock Pool Used MB meter_rabbitmq_node_allocated_multiblock_pool_used Multi block pool used rabbitmq_prometheus plugin   Multiblock Pool Unused MB meter_rabbitmq_node_allocated_multiblock_pool_unused Multi block pool unused rabbitmq_prometheus plugin   Singleblock Used MB meter_rabbitmq_node_allocated_singleblock_used Single block used rabbitmq_prometheus plugin   Singleblock Unused MB meter_rabbitmq_node_allocated_singleblock_unused Single block unused rabbitmq_prometheus plugin    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/rabbitmq/rabbitmq-cluster.yaml, /config/otel-rules/rabbitmq/rabbitmq-node.yaml. The RabbitMQ dashboard panel configurations are found in /config/ui-initialized-templates/rabbitmq.\n","excerpt":"RabbitMQ monitoring SkyWalking leverages rabbitmq_prometheus plugin for collecting metrics data from …","ref":"/docs/main/latest/en/setup/backend/backend-rabbitmq-monitoring/","title":"RabbitMQ monitoring"},{"body":"RabbitMQ monitoring SkyWalking leverages rabbitmq_prometheus plugin for collecting metrics data from RabbitMQ. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  The rabbitmq_prometheus plugin collect metrics data from RabbitMQ. Note: The RabbitMQ version is required to be 3.8.0+. The rabbitmq_prometheus plugin is built-in since RabbitMQ v3.8.0. OpenTelemetry Collector fetches metrics from rabbitmq_prometheus plugin via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup rabbitmq_prometheus. Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  RabbitMQ Monitoring RabbitMQ monitoring provides multidimensional metrics monitoring of RabbitMQ cluster as Layer: RABBITMQ Service in the OAP. In each cluster, the nodes are represented as Instance.\nRabbitMQ Cluster Supported Metrics    Monitoring Panel Metric Name Description Data Source     Memory Available Before Publishers Blocked (MB) meter_rabbitmq_memory_available_before_publisher_blocked If the value is zero or less, the memory alarm will be triggered and all publishing connections across all cluster nodes will be blocked. rabbitmq_prometheus plugin   Disk Space Available Before Publishers Blocked (GB) meter_rabbitmq_disk_space_available_before_publisher_blocked This metric is reported for the partition where the RabbitMQ data directory is stored. rabbitmq_prometheus plugin   File Descriptors Available meter_rabbitmq_file_descriptors_available When this value reaches zero, new connections will not be accepted and disk write operations may fail. rabbitmq_prometheus plugin   TCP Sockets Available meter_rabbitmq_tcp_socket_available When this value reaches zero, new connections will not be accepted. rabbitmq_prometheus plugin   Messages Ready To Be Delivered To Consumers meter_rabbitmq_message_ready_delivered_consumers Total number of ready messages ready to be delivered to consumers. rabbitmq_prometheus plugin   Messages Pending Consumer Acknowledgement meter_rabbitmq_message_unacknowledged_delivered_consumers The total number of messages that are either in-flight to consumers, currently being processed by consumers or simply waiting for the consumer acknowledgements to be processed by the queue. Until the queue processes the message acknowledgement, the message will remain unacknowledged. rabbitmq_prometheus plugin   Messages Published meter_rabbitmq_messages_published The incoming message rate before any routing rules are applied. rabbitmq_prometheus plugin   Messages Confirmed To Publishers meter_rabbitmq_messages_confirmed The rate of messages confirmed by the broker to publishers. Publishers must opt-in to receive message confirmations. rabbitmq_prometheus plugin   Messages Unconfirmed To Publishers meter_rabbitmq_messages_unconfirmed The rate of messages received from publishers that have publisher confirms enabled and the broker has not confirmed yet. rabbitmq_prometheus plugin   Messages Routed To Queues meter_rabbitmq_messages_routed The rate of messages received from publishers and successfully routed to the master queue replicas. rabbitmq_prometheus plugin   Unroutable Messages Returned To Publishers meter_rabbitmq_messages_unroutable_returned The rate of messages that cannot be routed and are returned back to publishers. rabbitmq_prometheus plugin   Unroutable Messages Dropped meter_rabbitmq_messages_unroutable_dropped The rate of messages that cannot be routed and are dropped. rabbitmq_prometheus plugin   Queues Total meter_rabbitmq_queues Total number of queue masters per node. rabbitmq_prometheus plugin   Queues Declared meter_rabbitmq_queues_declared_total The rate of queue declarations performed by clients. rabbitmq_prometheus plugin   Queues Created meter_rabbitmq_queues_created_total The rate of new queues created (as opposed to redeclarations). rabbitmq_prometheus plugin   Queues Deleted meter_rabbitmq_queues_deleted_total The rate of queues deleted. rabbitmq_prometheus plugin   Channels Total meter_rabbitmq_channels Total number of channels on all currently opened connections. rabbitmq_prometheus plugin   Channels Opened meter_rabbitmq_channels_opened_total The rate of new channels opened by applications across all connections. Channels are expected to be long-lived. rabbitmq_prometheus plugin   Channels Closed meter_rabbitmq_channels_closed_total The rate of channels closed by applications across all connections. Channels are expected to be long-lived. rabbitmq_prometheus plugin   Connections Total meter_rabbitmq_connections Total number of client connections. rabbitmq_prometheus plugin   Connections Opened meter_rabbitmq_connections_opened_total The rate of new connections opened by clients. Connections are expected to be long-lived. rabbitmq_prometheus plugin   Connections Closed meter_rabbitmq_connections_closed_total The rate of connections closed. Connections are expected to be long-lived. rabbitmq_prometheus plugin    RabbitMQ Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Ready Messages  meter_rabbitmq_node_queue_messages_ready Total number of ready messages ready to be delivered to consumers. rabbitmq_prometheus plugin   Unacknowledged Messages  meter_rabbitmq_node_unacknowledged_messages Messages delivered to consumers but not yet acknowledged rabbitmq_prometheus plugin   Incoming Messages  meter_rabbitmq_node_incoming_messages The incoming message rate before any routing rules are applied. rabbitmq_prometheus plugin   Outgoing Messages  meter_rabbitmq_node_outgoing_messages_total The outgoing message rate before any routing rules are applied. rabbitmq_prometheus plugin   Publishers  meter_rabbitmq_node_publisher_total Publishers rabbitmq_prometheus plugin   Consumers  meter_rabbitmq_node_consumer_total Consumers currently connect rabbitmq_prometheus plugin   Collections  meter_rabbitmq_node_connections_total Connections currently open rabbitmq_prometheus plugin   Channels  meter_rabbitmq_node_channel_total Channels currently open rabbitmq_prometheus plugin   Queues  meter_rabbitmq_node_queue_total Queues available rabbitmq_prometheus plugin   Allocated Used % meter_rabbitmq_node_allocated_used_percent Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Allocated Unused % meter_rabbitmq_node_allocated_unused_percent Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Allocated Used MB meter_rabbitmq_node_allocated_used_bytes Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Allocated Unused MB meter_rabbitmq_node_allocated_unused_bytes Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Allocated Total MB meter_rabbitmq_node_allocated_total_bytes Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Resident Set Size MB meter_rabbitmq_node_process_resident_memory_bytes Erlang VM Resident Set Size (RSS) As reported by the OS rabbitmq_prometheus plugin   Allocators MB meter_rabbitmq_node_allocated_unused_bytes meter_rabbitmq_node_allocated_total_bytes meter_rabbitmq_node_process_resident_memory_bytes  rabbitmq_prometheus plugin   Allocated By Type MB meter_rabbitmq_node_allocated_by_type Allocated by allocator type rabbitmq_prometheus plugin   Multiblock Used MB meter_rabbitmq_node_allocated_multiblock_used Multi block used rabbitmq_prometheus plugin   Multiblock Unused MB meter_rabbitmq_node_allocated_multiblock_unused Multi block used rabbitmq_prometheus plugin   Multiblock Pool Used MB meter_rabbitmq_node_allocated_multiblock_pool_used Multi block pool used rabbitmq_prometheus plugin   Multiblock Pool Unused MB meter_rabbitmq_node_allocated_multiblock_pool_unused Multi block pool unused rabbitmq_prometheus plugin   Singleblock Used MB meter_rabbitmq_node_allocated_singleblock_used Single block used rabbitmq_prometheus plugin   Singleblock Unused MB meter_rabbitmq_node_allocated_singleblock_unused Single block unused rabbitmq_prometheus plugin    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/rabbitmq/rabbitmq-cluster.yaml, /config/otel-rules/rabbitmq/rabbitmq-node.yaml. The RabbitMQ dashboard panel configurations are found in /config/ui-initialized-templates/rabbitmq.\n","excerpt":"RabbitMQ monitoring SkyWalking leverages rabbitmq_prometheus plugin for collecting metrics data from …","ref":"/docs/main/next/en/setup/backend/backend-rabbitmq-monitoring/","title":"RabbitMQ monitoring"},{"body":"RabbitMQ monitoring SkyWalking leverages rabbitmq_prometheus plugin for collecting metrics data from RabbitMQ. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  The rabbitmq_prometheus plugin collect metrics data from RabbitMQ. Note: The RabbitMQ version is required to be 3.8.0+. The rabbitmq_prometheus plugin is built-in since RabbitMQ v3.8.0. OpenTelemetry Collector fetches metrics from rabbitmq_prometheus plugin via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup rabbitmq_prometheus. Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  RabbitMQ Monitoring RabbitMQ monitoring provides multidimensional metrics monitoring of RabbitMQ cluster as Layer: RABBITMQ Service in the OAP. In each cluster, the nodes are represented as Instance.\nRabbitMQ Cluster Supported Metrics    Monitoring Panel Metric Name Description Data Source     Memory Available Before Publishers Blocked (MB) meter_rabbitmq_memory_available_before_publisher_blocked If the value is zero or less, the memory alarm will be triggered and all publishing connections across all cluster nodes will be blocked. rabbitmq_prometheus plugin   Disk Space Available Before Publishers Blocked (GB) meter_rabbitmq_disk_space_available_before_publisher_blocked This metric is reported for the partition where the RabbitMQ data directory is stored. rabbitmq_prometheus plugin   File Descriptors Available meter_rabbitmq_file_descriptors_available When this value reaches zero, new connections will not be accepted and disk write operations may fail. rabbitmq_prometheus plugin   TCP Sockets Available meter_rabbitmq_tcp_socket_available When this value reaches zero, new connections will not be accepted. rabbitmq_prometheus plugin   Messages Ready To Be Delivered To Consumers meter_rabbitmq_message_ready_delivered_consumers Total number of ready messages ready to be delivered to consumers. rabbitmq_prometheus plugin   Messages Pending Consumer Acknowledgement meter_rabbitmq_message_unacknowledged_delivered_consumers The total number of messages that are either in-flight to consumers, currently being processed by consumers or simply waiting for the consumer acknowledgements to be processed by the queue. Until the queue processes the message acknowledgement, the message will remain unacknowledged. rabbitmq_prometheus plugin   Messages Published meter_rabbitmq_messages_published The incoming message rate before any routing rules are applied. rabbitmq_prometheus plugin   Messages Confirmed To Publishers meter_rabbitmq_messages_confirmed The rate of messages confirmed by the broker to publishers. Publishers must opt-in to receive message confirmations. rabbitmq_prometheus plugin   Messages Unconfirmed To Publishers meter_rabbitmq_messages_unconfirmed The rate of messages received from publishers that have publisher confirms enabled and the broker has not confirmed yet. rabbitmq_prometheus plugin   Messages Routed To Queues meter_rabbitmq_messages_routed The rate of messages received from publishers and successfully routed to the master queue replicas. rabbitmq_prometheus plugin   Unroutable Messages Returned To Publishers meter_rabbitmq_messages_unroutable_returned The rate of messages that cannot be routed and are returned back to publishers. rabbitmq_prometheus plugin   Unroutable Messages Dropped meter_rabbitmq_messages_unroutable_dropped The rate of messages that cannot be routed and are dropped. rabbitmq_prometheus plugin   Queues Total meter_rabbitmq_queues Total number of queue masters per node. rabbitmq_prometheus plugin   Queues Declared meter_rabbitmq_queues_declared_total The rate of queue declarations performed by clients. rabbitmq_prometheus plugin   Queues Created meter_rabbitmq_queues_created_total The rate of new queues created (as opposed to redeclarations). rabbitmq_prometheus plugin   Queues Deleted meter_rabbitmq_queues_deleted_total The rate of queues deleted. rabbitmq_prometheus plugin   Channels Total meter_rabbitmq_channels Total number of channels on all currently opened connections. rabbitmq_prometheus plugin   Channels Opened meter_rabbitmq_channels_opened_total The rate of new channels opened by applications across all connections. Channels are expected to be long-lived. rabbitmq_prometheus plugin   Channels Closed meter_rabbitmq_channels_closed_total The rate of channels closed by applications across all connections. Channels are expected to be long-lived. rabbitmq_prometheus plugin   Connections Total meter_rabbitmq_connections Total number of client connections. rabbitmq_prometheus plugin   Connections Opened meter_rabbitmq_connections_opened_total The rate of new connections opened by clients. Connections are expected to be long-lived. rabbitmq_prometheus plugin   Connections Closed meter_rabbitmq_connections_closed_total The rate of connections closed. Connections are expected to be long-lived. rabbitmq_prometheus plugin    RabbitMQ Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Ready Messages  meter_rabbitmq_node_queue_messages_ready Total number of ready messages ready to be delivered to consumers. rabbitmq_prometheus plugin   Unacknowledged Messages  meter_rabbitmq_node_unacknowledged_messages Messages delivered to consumers but not yet acknowledged rabbitmq_prometheus plugin   Incoming Messages  meter_rabbitmq_node_incoming_messages The incoming message rate before any routing rules are applied. rabbitmq_prometheus plugin   Outgoing Messages  meter_rabbitmq_node_outgoing_messages_total The outgoing message rate before any routing rules are applied. rabbitmq_prometheus plugin   Publishers  meter_rabbitmq_node_publisher_total Publishers rabbitmq_prometheus plugin   Consumers  meter_rabbitmq_node_consumer_total Consumers currently connect rabbitmq_prometheus plugin   Collections  meter_rabbitmq_node_connections_total Connections currently open rabbitmq_prometheus plugin   Channels  meter_rabbitmq_node_channel_total Channels currently open rabbitmq_prometheus plugin   Queues  meter_rabbitmq_node_queue_total Queues available rabbitmq_prometheus plugin   Allocated Used % meter_rabbitmq_node_allocated_used_percent Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Allocated Unused % meter_rabbitmq_node_allocated_unused_percent Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Allocated Used MB meter_rabbitmq_node_allocated_used_bytes Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Allocated Unused MB meter_rabbitmq_node_allocated_unused_bytes Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Allocated Total MB meter_rabbitmq_node_allocated_total_bytes Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Resident Set Size MB meter_rabbitmq_node_process_resident_memory_bytes Erlang VM Resident Set Size (RSS) As reported by the OS rabbitmq_prometheus plugin   Allocators MB meter_rabbitmq_node_allocated_unused_bytes meter_rabbitmq_node_allocated_total_bytes meter_rabbitmq_node_process_resident_memory_bytes  rabbitmq_prometheus plugin   Allocated By Type MB meter_rabbitmq_node_allocated_by_type Allocated by allocator type rabbitmq_prometheus plugin   Multiblock Used MB meter_rabbitmq_node_allocated_multiblock_used Multi block used rabbitmq_prometheus plugin   Multiblock Unused MB meter_rabbitmq_node_allocated_multiblock_unused Multi block used rabbitmq_prometheus plugin   Multiblock Pool Used MB meter_rabbitmq_node_allocated_multiblock_pool_used Multi block pool used rabbitmq_prometheus plugin   Multiblock Pool Unused MB meter_rabbitmq_node_allocated_multiblock_pool_unused Multi block pool unused rabbitmq_prometheus plugin   Singleblock Used MB meter_rabbitmq_node_allocated_singleblock_used Single block used rabbitmq_prometheus plugin   Singleblock Unused MB meter_rabbitmq_node_allocated_singleblock_unused Single block unused rabbitmq_prometheus plugin    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/rabbitmq/rabbitmq-cluster.yaml, /config/otel-rules/rabbitmq/rabbitmq-node.yaml. The RabbitMQ dashboard panel configurations are found in /config/ui-initialized-templates/rabbitmq.\n","excerpt":"RabbitMQ monitoring SkyWalking leverages rabbitmq_prometheus plugin for collecting metrics data from …","ref":"/docs/main/v9.5.0/en/setup/backend/backend-rabbitmq-monitoring/","title":"RabbitMQ monitoring"},{"body":"RabbitMQ monitoring SkyWalking leverages rabbitmq_prometheus plugin for collecting metrics data from RabbitMQ. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  The rabbitmq_prometheus plugin collect metrics data from RabbitMQ. Note: The RabbitMQ version is required to be 3.8.0+. The rabbitmq_prometheus plugin is built-in since RabbitMQ v3.8.0. OpenTelemetry Collector fetches metrics from rabbitmq_prometheus plugin via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup rabbitmq_prometheus. Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  RabbitMQ Monitoring RabbitMQ monitoring provides multidimensional metrics monitoring of RabbitMQ cluster as Layer: RABBITMQ Service in the OAP. In each cluster, the nodes are represented as Instance.\nRabbitMQ Cluster Supported Metrics    Monitoring Panel Metric Name Description Data Source     Memory Available Before Publishers Blocked (MB) meter_rabbitmq_memory_available_before_publisher_blocked If the value is zero or less, the memory alarm will be triggered and all publishing connections across all cluster nodes will be blocked. rabbitmq_prometheus plugin   Disk Space Available Before Publishers Blocked (GB) meter_rabbitmq_disk_space_available_before_publisher_blocked This metric is reported for the partition where the RabbitMQ data directory is stored. rabbitmq_prometheus plugin   File Descriptors Available meter_rabbitmq_file_descriptors_available When this value reaches zero, new connections will not be accepted and disk write operations may fail. rabbitmq_prometheus plugin   TCP Sockets Available meter_rabbitmq_tcp_socket_available When this value reaches zero, new connections will not be accepted. rabbitmq_prometheus plugin   Messages Ready To Be Delivered To Consumers meter_rabbitmq_message_ready_delivered_consumers Total number of ready messages ready to be delivered to consumers. rabbitmq_prometheus plugin   Messages Pending Consumer Acknowledgement meter_rabbitmq_message_unacknowledged_delivered_consumers The total number of messages that are either in-flight to consumers, currently being processed by consumers or simply waiting for the consumer acknowledgements to be processed by the queue. Until the queue processes the message acknowledgement, the message will remain unacknowledged. rabbitmq_prometheus plugin   Messages Published meter_rabbitmq_messages_published The incoming message rate before any routing rules are applied. rabbitmq_prometheus plugin   Messages Confirmed To Publishers meter_rabbitmq_messages_confirmed The rate of messages confirmed by the broker to publishers. Publishers must opt-in to receive message confirmations. rabbitmq_prometheus plugin   Messages Unconfirmed To Publishers meter_rabbitmq_messages_unconfirmed The rate of messages received from publishers that have publisher confirms enabled and the broker has not confirmed yet. rabbitmq_prometheus plugin   Messages Routed To Queues meter_rabbitmq_messages_routed The rate of messages received from publishers and successfully routed to the master queue replicas. rabbitmq_prometheus plugin   Unroutable Messages Returned To Publishers meter_rabbitmq_messages_unroutable_returned The rate of messages that cannot be routed and are returned back to publishers. rabbitmq_prometheus plugin   Unroutable Messages Dropped meter_rabbitmq_messages_unroutable_dropped The rate of messages that cannot be routed and are dropped. rabbitmq_prometheus plugin   Queues Total meter_rabbitmq_queues Total number of queue masters per node. rabbitmq_prometheus plugin   Queues Declared meter_rabbitmq_queues_declared_total The rate of queue declarations performed by clients. rabbitmq_prometheus plugin   Queues Created meter_rabbitmq_queues_created_total The rate of new queues created (as opposed to redeclarations). rabbitmq_prometheus plugin   Queues Deleted meter_rabbitmq_queues_deleted_total The rate of queues deleted. rabbitmq_prometheus plugin   Channels Total meter_rabbitmq_channels Total number of channels on all currently opened connections. rabbitmq_prometheus plugin   Channels Opened meter_rabbitmq_channels_opened_total The rate of new channels opened by applications across all connections. Channels are expected to be long-lived. rabbitmq_prometheus plugin   Channels Closed meter_rabbitmq_channels_closed_total The rate of channels closed by applications across all connections. Channels are expected to be long-lived. rabbitmq_prometheus plugin   Connections Total meter_rabbitmq_connections Total number of client connections. rabbitmq_prometheus plugin   Connections Opened meter_rabbitmq_connections_opened_total The rate of new connections opened by clients. Connections are expected to be long-lived. rabbitmq_prometheus plugin   Connections Closed meter_rabbitmq_connections_closed_total The rate of connections closed. Connections are expected to be long-lived. rabbitmq_prometheus plugin    RabbitMQ Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Ready Messages  meter_rabbitmq_node_queue_messages_ready Total number of ready messages ready to be delivered to consumers. rabbitmq_prometheus plugin   Unacknowledged Messages  meter_rabbitmq_node_unacknowledged_messages Messages delivered to consumers but not yet acknowledged rabbitmq_prometheus plugin   Incoming Messages  meter_rabbitmq_node_incoming_messages The incoming message rate before any routing rules are applied. rabbitmq_prometheus plugin   Outgoing Messages  meter_rabbitmq_node_outgoing_messages_total The outgoing message rate before any routing rules are applied. rabbitmq_prometheus plugin   Publishers  meter_rabbitmq_node_publisher_total Publishers rabbitmq_prometheus plugin   Consumers  meter_rabbitmq_node_consumer_total Consumers currently connect rabbitmq_prometheus plugin   Collections  meter_rabbitmq_node_connections_total Connections currently open rabbitmq_prometheus plugin   Channels  meter_rabbitmq_node_channel_total Channels currently open rabbitmq_prometheus plugin   Queues  meter_rabbitmq_node_queue_total Queues available rabbitmq_prometheus plugin   Allocated Used % meter_rabbitmq_node_allocated_used_percent Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Allocated Unused % meter_rabbitmq_node_allocated_unused_percent Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Allocated Used MB meter_rabbitmq_node_allocated_used_bytes Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Allocated Unused MB meter_rabbitmq_node_allocated_unused_bytes Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Allocated Total MB meter_rabbitmq_node_allocated_total_bytes Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Resident Set Size MB meter_rabbitmq_node_process_resident_memory_bytes Erlang VM Resident Set Size (RSS) As reported by the OS rabbitmq_prometheus plugin   Allocators MB meter_rabbitmq_node_allocated_unused_bytes meter_rabbitmq_node_allocated_total_bytes meter_rabbitmq_node_process_resident_memory_bytes  rabbitmq_prometheus plugin   Allocated By Type MB meter_rabbitmq_node_allocated_by_type Allocated by allocator type rabbitmq_prometheus plugin   Multiblock Used MB meter_rabbitmq_node_allocated_multiblock_used Multi block used rabbitmq_prometheus plugin   Multiblock Unused MB meter_rabbitmq_node_allocated_multiblock_unused Multi block used rabbitmq_prometheus plugin   Multiblock Pool Used MB meter_rabbitmq_node_allocated_multiblock_pool_used Multi block pool used rabbitmq_prometheus plugin   Multiblock Pool Unused MB meter_rabbitmq_node_allocated_multiblock_pool_unused Multi block pool unused rabbitmq_prometheus plugin   Singleblock Used MB meter_rabbitmq_node_allocated_singleblock_used Single block used rabbitmq_prometheus plugin   Singleblock Unused MB meter_rabbitmq_node_allocated_singleblock_unused Single block unused rabbitmq_prometheus plugin    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/rabbitmq/rabbitmq-cluster.yaml, /config/otel-rules/rabbitmq/rabbitmq-node.yaml. The RabbitMQ dashboard panel configurations are found in /config/ui-initialized-templates/rabbitmq.\n","excerpt":"RabbitMQ monitoring SkyWalking leverages rabbitmq_prometheus plugin for collecting metrics data from …","ref":"/docs/main/v9.6.0/en/setup/backend/backend-rabbitmq-monitoring/","title":"RabbitMQ monitoring"},{"body":"RabbitMQ monitoring SkyWalking leverages rabbitmq_prometheus plugin for collecting metrics data from RabbitMQ. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  The rabbitmq_prometheus plugin collect metrics data from RabbitMQ. Note: The RabbitMQ version is required to be 3.8.0+. The rabbitmq_prometheus plugin is built-in since RabbitMQ v3.8.0. OpenTelemetry Collector fetches metrics from rabbitmq_prometheus plugin via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup rabbitmq_prometheus. Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  RabbitMQ Monitoring RabbitMQ monitoring provides multidimensional metrics monitoring of RabbitMQ cluster as Layer: RABBITMQ Service in the OAP. In each cluster, the nodes are represented as Instance.\nRabbitMQ Cluster Supported Metrics    Monitoring Panel Metric Name Description Data Source     Memory Available Before Publishers Blocked (MB) meter_rabbitmq_memory_available_before_publisher_blocked If the value is zero or less, the memory alarm will be triggered and all publishing connections across all cluster nodes will be blocked. rabbitmq_prometheus plugin   Disk Space Available Before Publishers Blocked (GB) meter_rabbitmq_disk_space_available_before_publisher_blocked This metric is reported for the partition where the RabbitMQ data directory is stored. rabbitmq_prometheus plugin   File Descriptors Available meter_rabbitmq_file_descriptors_available When this value reaches zero, new connections will not be accepted and disk write operations may fail. rabbitmq_prometheus plugin   TCP Sockets Available meter_rabbitmq_tcp_socket_available When this value reaches zero, new connections will not be accepted. rabbitmq_prometheus plugin   Messages Ready To Be Delivered To Consumers meter_rabbitmq_message_ready_delivered_consumers Total number of ready messages ready to be delivered to consumers. rabbitmq_prometheus plugin   Messages Pending Consumer Acknowledgement meter_rabbitmq_message_unacknowledged_delivered_consumers The total number of messages that are either in-flight to consumers, currently being processed by consumers or simply waiting for the consumer acknowledgements to be processed by the queue. Until the queue processes the message acknowledgement, the message will remain unacknowledged. rabbitmq_prometheus plugin   Messages Published meter_rabbitmq_messages_published The incoming message rate before any routing rules are applied. rabbitmq_prometheus plugin   Messages Confirmed To Publishers meter_rabbitmq_messages_confirmed The rate of messages confirmed by the broker to publishers. Publishers must opt-in to receive message confirmations. rabbitmq_prometheus plugin   Messages Unconfirmed To Publishers meter_rabbitmq_messages_unconfirmed The rate of messages received from publishers that have publisher confirms enabled and the broker has not confirmed yet. rabbitmq_prometheus plugin   Messages Routed To Queues meter_rabbitmq_messages_routed The rate of messages received from publishers and successfully routed to the master queue replicas. rabbitmq_prometheus plugin   Unroutable Messages Returned To Publishers meter_rabbitmq_messages_unroutable_returned The rate of messages that cannot be routed and are returned back to publishers. rabbitmq_prometheus plugin   Unroutable Messages Dropped meter_rabbitmq_messages_unroutable_dropped The rate of messages that cannot be routed and are dropped. rabbitmq_prometheus plugin   Queues Total meter_rabbitmq_queues Total number of queue masters per node. rabbitmq_prometheus plugin   Queues Declared meter_rabbitmq_queues_declared_total The rate of queue declarations performed by clients. rabbitmq_prometheus plugin   Queues Created meter_rabbitmq_queues_created_total The rate of new queues created (as opposed to redeclarations). rabbitmq_prometheus plugin   Queues Deleted meter_rabbitmq_queues_deleted_total The rate of queues deleted. rabbitmq_prometheus plugin   Channels Total meter_rabbitmq_channels Total number of channels on all currently opened connections. rabbitmq_prometheus plugin   Channels Opened meter_rabbitmq_channels_opened_total The rate of new channels opened by applications across all connections. Channels are expected to be long-lived. rabbitmq_prometheus plugin   Channels Closed meter_rabbitmq_channels_closed_total The rate of channels closed by applications across all connections. Channels are expected to be long-lived. rabbitmq_prometheus plugin   Connections Total meter_rabbitmq_connections Total number of client connections. rabbitmq_prometheus plugin   Connections Opened meter_rabbitmq_connections_opened_total The rate of new connections opened by clients. Connections are expected to be long-lived. rabbitmq_prometheus plugin   Connections Closed meter_rabbitmq_connections_closed_total The rate of connections closed. Connections are expected to be long-lived. rabbitmq_prometheus plugin    RabbitMQ Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Ready Messages  meter_rabbitmq_node_queue_messages_ready Total number of ready messages ready to be delivered to consumers. rabbitmq_prometheus plugin   Unacknowledged Messages  meter_rabbitmq_node_unacknowledged_messages Messages delivered to consumers but not yet acknowledged rabbitmq_prometheus plugin   Incoming Messages  meter_rabbitmq_node_incoming_messages The incoming message rate before any routing rules are applied. rabbitmq_prometheus plugin   Outgoing Messages  meter_rabbitmq_node_outgoing_messages_total The outgoing message rate before any routing rules are applied. rabbitmq_prometheus plugin   Publishers  meter_rabbitmq_node_publisher_total Publishers rabbitmq_prometheus plugin   Consumers  meter_rabbitmq_node_consumer_total Consumers currently connect rabbitmq_prometheus plugin   Collections  meter_rabbitmq_node_connections_total Connections currently open rabbitmq_prometheus plugin   Channels  meter_rabbitmq_node_channel_total Channels currently open rabbitmq_prometheus plugin   Queues  meter_rabbitmq_node_queue_total Queues available rabbitmq_prometheus plugin   Allocated Used % meter_rabbitmq_node_allocated_used_percent Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Allocated Unused % meter_rabbitmq_node_allocated_unused_percent Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Allocated Used MB meter_rabbitmq_node_allocated_used_bytes Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Allocated Unused MB meter_rabbitmq_node_allocated_unused_bytes Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Allocated Total MB meter_rabbitmq_node_allocated_total_bytes Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Resident Set Size MB meter_rabbitmq_node_process_resident_memory_bytes Erlang VM Resident Set Size (RSS) As reported by the OS rabbitmq_prometheus plugin   Allocators MB meter_rabbitmq_node_allocated_unused_bytes meter_rabbitmq_node_allocated_total_bytes meter_rabbitmq_node_process_resident_memory_bytes  rabbitmq_prometheus plugin   Allocated By Type MB meter_rabbitmq_node_allocated_by_type Allocated by allocator type rabbitmq_prometheus plugin   Multiblock Used MB meter_rabbitmq_node_allocated_multiblock_used Multi block used rabbitmq_prometheus plugin   Multiblock Unused MB meter_rabbitmq_node_allocated_multiblock_unused Multi block used rabbitmq_prometheus plugin   Multiblock Pool Used MB meter_rabbitmq_node_allocated_multiblock_pool_used Multi block pool used rabbitmq_prometheus plugin   Multiblock Pool Unused MB meter_rabbitmq_node_allocated_multiblock_pool_unused Multi block pool unused rabbitmq_prometheus plugin   Singleblock Used MB meter_rabbitmq_node_allocated_singleblock_used Single block used rabbitmq_prometheus plugin   Singleblock Unused MB meter_rabbitmq_node_allocated_singleblock_unused Single block unused rabbitmq_prometheus plugin    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/rabbitmq/rabbitmq-cluster.yaml, /config/otel-rules/rabbitmq/rabbitmq-node.yaml. The RabbitMQ dashboard panel configurations are found in /config/ui-initialized-templates/rabbitmq.\n","excerpt":"RabbitMQ monitoring SkyWalking leverages rabbitmq_prometheus plugin for collecting metrics data from …","ref":"/docs/main/v9.7.0/en/setup/backend/backend-rabbitmq-monitoring/","title":"RabbitMQ monitoring"},{"body":"Reading Context All following APIs provide readonly features for the tracing context from tracing system. The values are only available when the current thread is traced.\n Use TraceContext.traceId() API to obtain traceId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;traceId\u0026#34;, TraceContext.traceId());  Use TraceContext.segmentId() API to obtain segmentId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;segmentId\u0026#34;, TraceContext.segmentId());  Use TraceContext.spanId() API to obtain spanId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;spanId\u0026#34;, TraceContext.spanId()); Sample codes only\n","excerpt":"Reading Context All following APIs provide readonly features for the tracing context from tracing …","ref":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/application-toolkit-trace-read-context/","title":"Reading Context"},{"body":"Reading Context All following APIs provide readonly features for the tracing context from tracing system. The values are only available when the current thread is traced.\n Use TraceContext.traceId() API to obtain traceId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;traceId\u0026#34;, TraceContext.traceId());  Use TraceContext.segmentId() API to obtain segmentId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;segmentId\u0026#34;, TraceContext.segmentId());  Use TraceContext.spanId() API to obtain spanId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;spanId\u0026#34;, TraceContext.spanId()); Sample codes only\n","excerpt":"Reading Context All following APIs provide readonly features for the tracing context from tracing …","ref":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-trace-read-context/","title":"Reading Context"},{"body":"Reading Context All following APIs provide readonly features for the tracing context from tracing system. The values are only available when the current thread is traced.\n Use TraceContext.traceId() API to obtain traceId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;traceId\u0026#34;, TraceContext.traceId());  Use TraceContext.segmentId() API to obtain segmentId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;segmentId\u0026#34;, TraceContext.segmentId());  Use TraceContext.spanId() API to obtain spanId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;spanId\u0026#34;, TraceContext.spanId()); Sample codes only\n","excerpt":"Reading Context All following APIs provide readonly features for the tracing context from tracing …","ref":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/application-toolkit-trace-read-context/","title":"Reading Context"},{"body":"Reading Context All following APIs provide readonly features for the tracing context from tracing system. The values are only available when the current thread is traced.\n Use TraceContext.traceId() API to obtain traceId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;traceId\u0026#34;, TraceContext.traceId());  Use TraceContext.segmentId() API to obtain segmentId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;segmentId\u0026#34;, TraceContext.segmentId());  Use TraceContext.spanId() API to obtain spanId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;spanId\u0026#34;, TraceContext.spanId()); Sample codes only\n","excerpt":"Reading Context All following APIs provide readonly features for the tracing context from tracing …","ref":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/application-toolkit-trace-read-context/","title":"Reading Context"},{"body":"Reading Context All following APIs provide readonly features for the tracing context from tracing system. The values are only available when the current thread is traced.\n Use TraceContext.traceId() API to obtain traceId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;traceId\u0026#34;, TraceContext.traceId());  Use TraceContext.segmentId() API to obtain segmentId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;segmentId\u0026#34;, TraceContext.segmentId());  Use TraceContext.spanId() API to obtain spanId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;spanId\u0026#34;, TraceContext.spanId()); Sample codes only\n","excerpt":"Reading Context All following APIs provide readonly features for the tracing context from tracing …","ref":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/application-toolkit-trace-read-context/","title":"Reading Context"},{"body":"Receiver/grpc-envoy-als-v2-receiver Description This is a receiver for Envoy ALS format, which is defined at https://github.com/envoyproxy/envoy/blob/v1.17.4/api/envoy/service/accesslog/v2/als.proto.\nSupport Forwarders  envoy-als-v2-grpc-forwarder  DefaultConfig # The time interval between two flush operations. And the time unit is millisecond.flush_time:1000# The max cache count when receive the messagelimit_count:500Configuration    Name Type Description     flush_time int The time interval between two flush operations. And the time unit is millisecond.   limit_count int The max cache count when receive the message    ","excerpt":"Receiver/grpc-envoy-als-v2-receiver Description This is a receiver for Envoy ALS format, which is …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/receiver_grpc-envoy-als-v2-receiver/","title":"Receiver/grpc-envoy-als-v2-receiver"},{"body":"Receiver/grpc-envoy-als-v2-receiver Description This is a receiver for Envoy ALS format, which is defined at https://github.com/envoyproxy/envoy/blob/v1.17.4/api/envoy/service/accesslog/v2/als.proto.\nSupport Forwarders  envoy-als-v2-grpc-forwarder  DefaultConfig # The time interval between two flush operations. And the time unit is millisecond.flush_time:1000# The max cache count when receive the messagelimit_count:500Configuration    Name Type Description     flush_time int The time interval between two flush operations. And the time unit is millisecond.   limit_count int The max cache count when receive the message    ","excerpt":"Receiver/grpc-envoy-als-v2-receiver Description This is a receiver for Envoy ALS format, which is …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/receiver_grpc-envoy-als-v2-receiver/","title":"Receiver/grpc-envoy-als-v2-receiver"},{"body":"Receiver/grpc-envoy-als-v2-receiver Description This is a receiver for Envoy ALS format, which is defined at https://github.com/envoyproxy/envoy/blob/v1.17.4/api/envoy/service/accesslog/v2/als.proto.\nSupport Forwarders  envoy-als-v2-grpc-forwarder  DefaultConfig # The time interval between two flush operations. And the time unit is millisecond.flush_time:1000# The max cache count when receive the messagelimit_count:500Configuration    Name Type Description     flush_time int The time interval between two flush operations. And the time unit is millisecond.   limit_count int The max cache count when receive the message    ","excerpt":"Receiver/grpc-envoy-als-v2-receiver Description This is a receiver for Envoy ALS format, which is …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/receiver_grpc-envoy-als-v2-receiver/","title":"Receiver/grpc-envoy-als-v2-receiver"},{"body":"Receiver/grpc-envoy-als-v3-receiver Description This is a receiver for Envoy ALS format, which is defined at https://github.com/envoyproxy/envoy/blob/3791753e94edbac8a90c5485c68136886c40e719/api/envoy/config/accesslog/v3/accesslog.proto.\nSupport Forwarders  envoy-als-v3-grpc-forwarder  DefaultConfig # The time interval between two flush operations. And the time unit is millisecond.flush_time:1000# The max cache count when receive the messagelimit_count:500Configuration    Name Type Description     flush_time int The time interval between two flush operations. And the time unit is millisecond.   limit_count int The max cache count when receive the message    ","excerpt":"Receiver/grpc-envoy-als-v3-receiver Description This is a receiver for Envoy ALS format, which is …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/receiver_grpc-envoy-als-v3-receiver/","title":"Receiver/grpc-envoy-als-v3-receiver"},{"body":"Receiver/grpc-envoy-als-v3-receiver Description This is a receiver for Envoy ALS format, which is defined at https://github.com/envoyproxy/envoy/blob/3791753e94edbac8a90c5485c68136886c40e719/api/envoy/config/accesslog/v3/accesslog.proto.\nSupport Forwarders  envoy-als-v3-grpc-forwarder  DefaultConfig # The time interval between two flush operations. And the time unit is millisecond.flush_time:1000# The max cache count when receive the messagelimit_count:500Configuration    Name Type Description     flush_time int The time interval between two flush operations. And the time unit is millisecond.   limit_count int The max cache count when receive the message    ","excerpt":"Receiver/grpc-envoy-als-v3-receiver Description This is a receiver for Envoy ALS format, which is …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/receiver_grpc-envoy-als-v3-receiver/","title":"Receiver/grpc-envoy-als-v3-receiver"},{"body":"Receiver/grpc-envoy-als-v3-receiver Description This is a receiver for Envoy ALS format, which is defined at https://github.com/envoyproxy/envoy/blob/3791753e94edbac8a90c5485c68136886c40e719/api/envoy/config/accesslog/v3/accesslog.proto.\nSupport Forwarders  envoy-als-v3-grpc-forwarder  DefaultConfig # The time interval between two flush operations. And the time unit is millisecond.flush_time:1000# The max cache count when receive the messagelimit_count:500Configuration    Name Type Description     flush_time int The time interval between two flush operations. And the time unit is millisecond.   limit_count int The max cache count when receive the message    ","excerpt":"Receiver/grpc-envoy-als-v3-receiver Description This is a receiver for Envoy ALS format, which is …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/receiver_grpc-envoy-als-v3-receiver/","title":"Receiver/grpc-envoy-als-v3-receiver"},{"body":"Receiver/grpc-envoy-metrics-v2-receiver Description This is a receiver for Envoy Metrics format, which is defined at https://github.com/envoyproxy/envoy/blob/v1.17.4/api/envoy/service/metrics/v2/metrics_service.proto.\nSupport Forwarders  envoy-metrics-v2-grpc-forwarder  DefaultConfig # The time interval between two flush operations. And the time unit is millisecond.flush_time:1000# The max cache count when receive the messagelimit_count:500Configuration    Name Type Description     flush_time int The time interval between two flush operations. And the time unit is millisecond.   limit_count int The max cache count when receive the message    ","excerpt":"Receiver/grpc-envoy-metrics-v2-receiver Description This is a receiver for Envoy Metrics format, …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/receiver_grpc-envoy-metrics-v2-receiver/","title":"Receiver/grpc-envoy-metrics-v2-receiver"},{"body":"Receiver/grpc-envoy-metrics-v2-receiver Description This is a receiver for Envoy Metrics format, which is defined at https://github.com/envoyproxy/envoy/blob/v1.17.4/api/envoy/service/metrics/v2/metrics_service.proto.\nSupport Forwarders  envoy-metrics-v2-grpc-forwarder  DefaultConfig # The time interval between two flush operations. And the time unit is millisecond.flush_time:1000# The max cache count when receive the messagelimit_count:500Configuration    Name Type Description     flush_time int The time interval between two flush operations. And the time unit is millisecond.   limit_count int The max cache count when receive the message    ","excerpt":"Receiver/grpc-envoy-metrics-v2-receiver Description This is a receiver for Envoy Metrics format, …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/receiver_grpc-envoy-metrics-v2-receiver/","title":"Receiver/grpc-envoy-metrics-v2-receiver"},{"body":"Receiver/grpc-envoy-metrics-v2-receiver Description This is a receiver for Envoy Metrics format, which is defined at https://github.com/envoyproxy/envoy/blob/v1.17.4/api/envoy/service/metrics/v2/metrics_service.proto.\nSupport Forwarders  envoy-metrics-v2-grpc-forwarder  DefaultConfig # The time interval between two flush operations. And the time unit is millisecond.flush_time:1000# The max cache count when receive the messagelimit_count:500Configuration    Name Type Description     flush_time int The time interval between two flush operations. And the time unit is millisecond.   limit_count int The max cache count when receive the message    ","excerpt":"Receiver/grpc-envoy-metrics-v2-receiver Description This is a receiver for Envoy Metrics format, …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/receiver_grpc-envoy-metrics-v2-receiver/","title":"Receiver/grpc-envoy-metrics-v2-receiver"},{"body":"Receiver/grpc-envoy-metrics-v3-receiver Description This is a receiver for Envoy Metrics format, which is defined at https://github.com/envoyproxy/envoy/blob/5f7d6efb5786ee3de31b1fb37c78fa281718b704/api/envoy/service/metrics/v3/metrics_service.proto.\nSupport Forwarders  envoy-metrics-v3-grpc-forwarder  DefaultConfig # The time interval between two flush operations. And the time unit is millisecond.flush_time:1000# The max cache count when receive the messagelimit_count:500Configuration    Name Type Description     flush_time int The time interval between two flush operations. And the time unit is millisecond.   limit_count int The max cache count when receive the message    ","excerpt":"Receiver/grpc-envoy-metrics-v3-receiver Description This is a receiver for Envoy Metrics format, …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/receiver_grpc-envoy-metrics-v3-receiver/","title":"Receiver/grpc-envoy-metrics-v3-receiver"},{"body":"Receiver/grpc-envoy-metrics-v3-receiver Description This is a receiver for Envoy Metrics format, which is defined at https://github.com/envoyproxy/envoy/blob/5f7d6efb5786ee3de31b1fb37c78fa281718b704/api/envoy/service/metrics/v3/metrics_service.proto.\nSupport Forwarders  envoy-metrics-v3-grpc-forwarder  DefaultConfig # The time interval between two flush operations. And the time unit is millisecond.flush_time:1000# The max cache count when receive the messagelimit_count:500Configuration    Name Type Description     flush_time int The time interval between two flush operations. And the time unit is millisecond.   limit_count int The max cache count when receive the message    ","excerpt":"Receiver/grpc-envoy-metrics-v3-receiver Description This is a receiver for Envoy Metrics format, …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/receiver_grpc-envoy-metrics-v3-receiver/","title":"Receiver/grpc-envoy-metrics-v3-receiver"},{"body":"Receiver/grpc-envoy-metrics-v3-receiver Description This is a receiver for Envoy Metrics format, which is defined at https://github.com/envoyproxy/envoy/blob/5f7d6efb5786ee3de31b1fb37c78fa281718b704/api/envoy/service/metrics/v3/metrics_service.proto.\nSupport Forwarders  envoy-metrics-v3-grpc-forwarder  DefaultConfig # The time interval between two flush operations. And the time unit is millisecond.flush_time:1000# The max cache count when receive the messagelimit_count:500Configuration    Name Type Description     flush_time int The time interval between two flush operations. And the time unit is millisecond.   limit_count int The max cache count when receive the message    ","excerpt":"Receiver/grpc-envoy-metrics-v3-receiver Description This is a receiver for Envoy Metrics format, …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/receiver_grpc-envoy-metrics-v3-receiver/","title":"Receiver/grpc-envoy-metrics-v3-receiver"},{"body":"Receiver/grpc-native-cds-receiver Description This is a receiver for SkyWalking native Configuration Discovery Service format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/language-agent/ConfigurationDiscoveryService.proto.\nSupport Forwarders  native-cds-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Receiver/grpc-native-cds-receiver Description This is a receiver for SkyWalking native Configuration …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/receiver_grpc-native-cds-receiver/","title":"Receiver/grpc-native-cds-receiver"},{"body":"Receiver/grpc-native-cds-receiver Description This is a receiver for SkyWalking native Configuration Discovery Service format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/language-agent/ConfigurationDiscoveryService.proto.\nSupport Forwarders  native-cds-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Receiver/grpc-native-cds-receiver Description This is a receiver for SkyWalking native Configuration …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/receiver_grpc-native-cds-receiver/","title":"Receiver/grpc-native-cds-receiver"},{"body":"Receiver/grpc-native-cds-receiver Description This is a receiver for SkyWalking native Configuration Discovery Service format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/language-agent/ConfigurationDiscoveryService.proto.\nSupport Forwarders  native-cds-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Receiver/grpc-native-cds-receiver Description This is a receiver for SkyWalking native Configuration …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/receiver_grpc-native-cds-receiver/","title":"Receiver/grpc-native-cds-receiver"},{"body":"Receiver/grpc-native-clr-receiver Description This is a receiver for SkyWalking native clr format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/language-agent/CLRMetric.proto.\nSupport Forwarders  native-clr-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Receiver/grpc-native-clr-receiver Description This is a receiver for SkyWalking native clr format, …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/receiver_grpc-native-clr-receiver/","title":"Receiver/grpc-native-clr-receiver"},{"body":"Receiver/grpc-native-clr-receiver Description This is a receiver for SkyWalking native clr format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/language-agent/CLRMetric.proto.\nSupport Forwarders  native-clr-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Receiver/grpc-native-clr-receiver Description This is a receiver for SkyWalking native clr format, …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/receiver_grpc-native-clr-receiver/","title":"Receiver/grpc-native-clr-receiver"},{"body":"Receiver/grpc-native-clr-receiver Description This is a receiver for SkyWalking native clr format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/language-agent/CLRMetric.proto.\nSupport Forwarders  native-clr-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Receiver/grpc-native-clr-receiver Description This is a receiver for SkyWalking native clr format, …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/receiver_grpc-native-clr-receiver/","title":"Receiver/grpc-native-clr-receiver"},{"body":"Receiver/grpc-native-ebpf-accesslog-receiver Description This is a receiver for SkyWalking native accesslog format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/ebpf/accesslog.proto.\nSupport Forwarders  native-ebpf-accesslog-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Receiver/grpc-native-ebpf-accesslog-receiver Description This is a receiver for SkyWalking native …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/receiver_grpc-native-ebpf-accesslog-receiver/","title":"Receiver/grpc-native-ebpf-accesslog-receiver"},{"body":"Receiver/grpc-native-ebpf-profiling-receiver Description This is a receiver for SkyWalking native process format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/ebpf/profiling/Process.proto.\nSupport Forwarders  native-ebpf-profiling-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Receiver/grpc-native-ebpf-profiling-receiver Description This is a receiver for SkyWalking native …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/receiver_grpc-native-ebpf-profiling-receiver/","title":"Receiver/grpc-native-ebpf-profiling-receiver"},{"body":"Receiver/grpc-native-ebpf-profiling-receiver Description This is a receiver for SkyWalking native process format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/ebpf/profiling/Process.proto.\nSupport Forwarders  native-ebpf-profiling-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Receiver/grpc-native-ebpf-profiling-receiver Description This is a receiver for SkyWalking native …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/receiver_grpc-native-ebpf-profiling-receiver/","title":"Receiver/grpc-native-ebpf-profiling-receiver"},{"body":"Receiver/grpc-native-ebpf-profiling-receiver Description This is a receiver for SkyWalking native process format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/ebpf/profiling/Process.proto.\nSupport Forwarders  native-ebpf-profiling-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Receiver/grpc-native-ebpf-profiling-receiver Description This is a receiver for SkyWalking native …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/receiver_grpc-native-ebpf-profiling-receiver/","title":"Receiver/grpc-native-ebpf-profiling-receiver"},{"body":"Receiver/grpc-native-event-receiver Description This is a receiver for SkyWalking native meter format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/event/Event.proto.\nSupport Forwarders  native-event-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Receiver/grpc-native-event-receiver Description This is a receiver for SkyWalking native meter …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/receiver_grpc-native-event-receiver/","title":"Receiver/grpc-native-event-receiver"},{"body":"Receiver/grpc-native-event-receiver Description This is a receiver for SkyWalking native meter format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/event/Event.proto.\nSupport Forwarders  native-event-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Receiver/grpc-native-event-receiver Description This is a receiver for SkyWalking native meter …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/receiver_grpc-native-event-receiver/","title":"Receiver/grpc-native-event-receiver"},{"body":"Receiver/grpc-native-event-receiver Description This is a receiver for SkyWalking native meter format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/event/Event.proto.\nSupport Forwarders  native-event-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Receiver/grpc-native-event-receiver Description This is a receiver for SkyWalking native meter …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/receiver_grpc-native-event-receiver/","title":"Receiver/grpc-native-event-receiver"},{"body":"Receiver/grpc-native-jvm-receiver Description This is a receiver for SkyWalking native jvm format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/language-agent/JVMMetric.proto.\nSupport Forwarders  native-jvm-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Receiver/grpc-native-jvm-receiver Description This is a receiver for SkyWalking native jvm format, …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/receiver_grpc-native-jvm-receiver/","title":"Receiver/grpc-native-jvm-receiver"},{"body":"Receiver/grpc-native-jvm-receiver Description This is a receiver for SkyWalking native jvm format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/language-agent/JVMMetric.proto.\nSupport Forwarders  native-jvm-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Receiver/grpc-native-jvm-receiver Description This is a receiver for SkyWalking native jvm format, …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/receiver_grpc-native-jvm-receiver/","title":"Receiver/grpc-native-jvm-receiver"},{"body":"Receiver/grpc-native-jvm-receiver Description This is a receiver for SkyWalking native jvm format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/language-agent/JVMMetric.proto.\nSupport Forwarders  native-jvm-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Receiver/grpc-native-jvm-receiver Description This is a receiver for SkyWalking native jvm format, …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/receiver_grpc-native-jvm-receiver/","title":"Receiver/grpc-native-jvm-receiver"},{"body":"Receiver/grpc-native-log-receiver Description This is a receiver for SkyWalking native logging format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/logging/Logging.proto.\nSupport Forwarders  native-log-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Receiver/grpc-native-log-receiver Description This is a receiver for SkyWalking native logging …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/receiver_grpc-native-log-receiver/","title":"Receiver/grpc-native-log-receiver"},{"body":"Receiver/grpc-native-log-receiver Description This is a receiver for SkyWalking native logging format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/logging/Logging.proto.\nSupport Forwarders  native-log-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Receiver/grpc-native-log-receiver Description This is a receiver for SkyWalking native logging …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/receiver_grpc-native-log-receiver/","title":"Receiver/grpc-native-log-receiver"},{"body":"Receiver/grpc-native-log-receiver Description This is a receiver for SkyWalking native logging format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/logging/Logging.proto.\nSupport Forwarders  native-log-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Receiver/grpc-native-log-receiver Description This is a receiver for SkyWalking native logging …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/receiver_grpc-native-log-receiver/","title":"Receiver/grpc-native-log-receiver"},{"body":"Receiver/grpc-native-management-receiver Description This is a receiver for SkyWalking native management format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/management/Management.proto.\nSupport Forwarders  native-management-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Receiver/grpc-native-management-receiver Description This is a receiver for SkyWalking native …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/receiver_grpc-native-management-receiver/","title":"Receiver/grpc-native-management-receiver"},{"body":"Receiver/grpc-native-management-receiver Description This is a receiver for SkyWalking native management format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/management/Management.proto.\nSupport Forwarders  native-management-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Receiver/grpc-native-management-receiver Description This is a receiver for SkyWalking native …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/receiver_grpc-native-management-receiver/","title":"Receiver/grpc-native-management-receiver"},{"body":"Receiver/grpc-native-management-receiver Description This is a receiver for SkyWalking native management format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/management/Management.proto.\nSupport Forwarders  native-management-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Receiver/grpc-native-management-receiver Description This is a receiver for SkyWalking native …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/receiver_grpc-native-management-receiver/","title":"Receiver/grpc-native-management-receiver"},{"body":"Receiver/grpc-native-meter-receiver Description This is a receiver for SkyWalking native meter format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/event/Event.proto.\nSupport Forwarders  native-meter-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Receiver/grpc-native-meter-receiver Description This is a receiver for SkyWalking native meter …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/receiver_grpc-native-meter-receiver/","title":"Receiver/grpc-native-meter-receiver"},{"body":"Receiver/grpc-native-meter-receiver Description This is a receiver for SkyWalking native meter format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/event/Event.proto.\nSupport Forwarders  native-meter-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Receiver/grpc-native-meter-receiver Description This is a receiver for SkyWalking native meter …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/receiver_grpc-native-meter-receiver/","title":"Receiver/grpc-native-meter-receiver"},{"body":"Receiver/grpc-native-meter-receiver Description This is a receiver for SkyWalking native meter format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/event/Event.proto.\nSupport Forwarders  native-meter-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Receiver/grpc-native-meter-receiver Description This is a receiver for SkyWalking native meter …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/receiver_grpc-native-meter-receiver/","title":"Receiver/grpc-native-meter-receiver"},{"body":"Receiver/grpc-native-process-receiver Description This is a receiver for SkyWalking native process format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/ebpf/profiling/Process.proto.\nSupport Forwarders  native-process-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Receiver/grpc-native-process-receiver Description This is a receiver for SkyWalking native process …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/receiver_grpc-native-process-receiver/","title":"Receiver/grpc-native-process-receiver"},{"body":"Receiver/grpc-native-process-receiver Description This is a receiver for SkyWalking native process format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/ebpf/profiling/Process.proto.\nSupport Forwarders  native-process-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Receiver/grpc-native-process-receiver Description This is a receiver for SkyWalking native process …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/receiver_grpc-native-process-receiver/","title":"Receiver/grpc-native-process-receiver"},{"body":"Receiver/grpc-native-process-receiver Description This is a receiver for SkyWalking native process format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/ebpf/profiling/Process.proto.\nSupport Forwarders  native-process-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Receiver/grpc-native-process-receiver Description This is a receiver for SkyWalking native process …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/receiver_grpc-native-process-receiver/","title":"Receiver/grpc-native-process-receiver"},{"body":"Receiver/grpc-native-profile-receiver Description This is a receiver for SkyWalking native profile format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/profile/Profile.proto.\nSupport Forwarders  native-profile-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Receiver/grpc-native-profile-receiver Description This is a receiver for SkyWalking native profile …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/receiver_grpc-native-profile-receiver/","title":"Receiver/grpc-native-profile-receiver"},{"body":"Receiver/grpc-native-profile-receiver Description This is a receiver for SkyWalking native profile format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/profile/Profile.proto.\nSupport Forwarders  native-profile-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Receiver/grpc-native-profile-receiver Description This is a receiver for SkyWalking native profile …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/receiver_grpc-native-profile-receiver/","title":"Receiver/grpc-native-profile-receiver"},{"body":"Receiver/grpc-native-profile-receiver Description This is a receiver for SkyWalking native profile format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/profile/Profile.proto.\nSupport Forwarders  native-profile-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Receiver/grpc-native-profile-receiver Description This is a receiver for SkyWalking native profile …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/receiver_grpc-native-profile-receiver/","title":"Receiver/grpc-native-profile-receiver"},{"body":"Receiver/grpc-native-tracing-receiver Description This is a receiver for SkyWalking native tracing and span attached event format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/language-agent/Tracing.proto.\nSupport Forwarders  native-tracing-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Receiver/grpc-native-tracing-receiver Description This is a receiver for SkyWalking native tracing …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/receiver_grpc-native-tracing-receiver/","title":"Receiver/grpc-native-tracing-receiver"},{"body":"Receiver/grpc-native-tracing-receiver Description This is a receiver for SkyWalking native tracing and span attached event format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/language-agent/Tracing.proto.\nSupport Forwarders  native-tracing-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Receiver/grpc-native-tracing-receiver Description This is a receiver for SkyWalking native tracing …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/receiver_grpc-native-tracing-receiver/","title":"Receiver/grpc-native-tracing-receiver"},{"body":"Receiver/grpc-native-tracing-receiver Description This is a receiver for SkyWalking native tracing and span attached event format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/language-agent/Tracing.proto.\nSupport Forwarders  native-tracing-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Receiver/grpc-native-tracing-receiver Description This is a receiver for SkyWalking native tracing …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/receiver_grpc-native-tracing-receiver/","title":"Receiver/grpc-native-tracing-receiver"},{"body":"Receiver/grpc-otlp-metrics-v1-receiver Description This is a receiver for OpenTelemetry Metrics v1 format, which is defined at https://github.com/open-telemetry/opentelemetry-proto/blob/724e427879e3d2bae2edc0218fff06e37b9eb46e/opentelemetry/proto/collector/metrics/v1/metrics_service.proto.\nSupport Forwarders  otlp-metrics-v1-grpc-forwarder  DefaultConfig yaml \nConfiguration    Name Type Description    ","excerpt":"Receiver/grpc-otlp-metrics-v1-receiver Description This is a receiver for OpenTelemetry Metrics v1 …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/receiver_grpc-otlp-metrics-v1-receiver/","title":"Receiver/grpc-otlp-metrics-v1-receiver"},{"body":"Receiver/grpc-otlp-metrics-v1-receiver Description This is a receiver for OpenTelemetry Metrics v1 format, which is defined at https://github.com/open-telemetry/opentelemetry-proto/blob/724e427879e3d2bae2edc0218fff06e37b9eb46e/opentelemetry/proto/collector/metrics/v1/metrics_service.proto.\nSupport Forwarders  otlp-metrics-v1-grpc-forwarder  DefaultConfig yaml \nConfiguration    Name Type Description    ","excerpt":"Receiver/grpc-otlp-metrics-v1-receiver Description This is a receiver for OpenTelemetry Metrics v1 …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/receiver_grpc-otlp-metrics-v1-receiver/","title":"Receiver/grpc-otlp-metrics-v1-receiver"},{"body":"Receiver/grpc-otlp-metrics-v1-receiver Description This is a receiver for OpenTelemetry Metrics v1 format, which is defined at https://github.com/open-telemetry/opentelemetry-proto/blob/724e427879e3d2bae2edc0218fff06e37b9eb46e/opentelemetry/proto/collector/metrics/v1/metrics_service.proto.\nSupport Forwarders  otlp-metrics-v1-grpc-forwarder  DefaultConfig yaml \nConfiguration    Name Type Description    ","excerpt":"Receiver/grpc-otlp-metrics-v1-receiver Description This is a receiver for OpenTelemetry Metrics v1 …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/receiver_grpc-otlp-metrics-v1-receiver/","title":"Receiver/grpc-otlp-metrics-v1-receiver"},{"body":"Receiver/http-native-log-receiver Description This is a receiver for SkyWalking http logging format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/logging/Logging.proto.\nSupport Forwarders  native-log-grpc-forwarder  DefaultConfig # The native log request URI.uri:\u0026#34;/logging\u0026#34;# The request timeout seconds.timeout:5Configuration    Name Type Description     uri string config   timeout int     ","excerpt":"Receiver/http-native-log-receiver Description This is a receiver for SkyWalking http logging format, …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/receiver_http-native-log-receiver/","title":"Receiver/http-native-log-receiver"},{"body":"Receiver/http-native-log-receiver Description This is a receiver for SkyWalking http logging format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/logging/Logging.proto.\nSupport Forwarders  native-log-grpc-forwarder  DefaultConfig # The native log request URI.uri:\u0026#34;/logging\u0026#34;# The request timeout seconds.timeout:5Configuration    Name Type Description     uri string config   timeout int     ","excerpt":"Receiver/http-native-log-receiver Description This is a receiver for SkyWalking http logging format, …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/receiver_http-native-log-receiver/","title":"Receiver/http-native-log-receiver"},{"body":"Receiver/http-native-log-receiver Description This is a receiver for SkyWalking http logging format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/logging/Logging.proto.\nSupport Forwarders  native-log-grpc-forwarder  DefaultConfig # The native log request URI.uri:\u0026#34;/logging\u0026#34;# The request timeout seconds.timeout:5Configuration    Name Type Description     uri string config   timeout int     ","excerpt":"Receiver/http-native-log-receiver Description This is a receiver for SkyWalking http logging format, …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/receiver_http-native-log-receiver/","title":"Receiver/http-native-log-receiver"},{"body":"Redis monitoring Redis server performance from redis-exporter SkyWalking leverages redis-exporter for collecting metrics data from Redis. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  redis-exporter collect metrics data from Redis. OpenTelemetry Collector fetches metrics from redis-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up redis-exporter. Set up OpenTelemetry Collector. For details on Redis Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  Redis Monitoring Redis monitoring provides monitoring of the status and resources of the Redis server. Redis cluster is cataloged as a Layer: REDIS Service in OAP. Each Redis server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Uptime day meter_redis_uptime The uptime of Redis. redis-exporter   Connected Clients  meter_redis_connected_clients The number of connected clients. redis-exporter   Blocked Clients  meter_redis_blocked_clients The number of blocked clients. redis-exporter   Memory Max Bytes MB meter_redis_memory_max_bytes The max bytes of memory. redis-exporter   Hits Rate % meter_redis_hit_rate Hit rate of redis when used as a cache. redis-exporter   Average Time Spend By Command second meter_redis_average_time_spent_by_command Average time to execute various types of commands. redis-exporter   Total Commands Trend  meter_redis_total_commands_rate The Trend of total commands. redis-exporter   DB keys  meter_redis_evicted_keys_total  meter_redis_expired_keys_total  meter_redis_db_keys The number of Expired / Evicted / total keys. redis-exporter   Net Input/Output Bytes KB meter_redis_net_input_bytes  meter_redis_net_output_bytes Total bytes of input / output of redis net. redis-exporter   Memory Usage % meter_redis_memory_usage Percentage of used memory. redis-exporter   Total Time Spend By Command Trend  meter_redis_commands_duration_seconds_total_rate The trend of total time spend by command redis-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/redis. The Redis dashboard panel configurations are found in /config/ui-initialized-templates/redis.\nCollect sampled slow commands SkyWalking leverages fluentbit or other log agents for collecting slow commands from Redis.\nData flow  Execute commands periodically to collect slow logs from Redis and save the result locally. Fluent-bit agent collects slow logs from local file. fluent-bit agent sends data to SkyWalking OAP Server using native meter APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Set up fluentbit. Config fluentbit from here for Redis. Config slow log from here for Redis. Periodically execute the commands.  Notice:\n1.The slowlog-log-slower-than and slowlog-max-len configuration items in the configuration file are for the slow log, the former indicating that execution time longer than the specified time (in milliseconds) will be logged to the slowlog, and the latter indicating the maximum number of slow logs that will be stored in the slow log file. 2.In the e2e test, SkyWalking uses cron to periodically execute the redis command to fetch the slow logs and write them to a local file, which is then collected by fluent-bit to send the data to the OAP. You can see the relevant configuration files here.You can also get slow logs periodically and send them to OAP in other ways than using cron and fluent-bit.\nSlow Commands Monitoring Slow SQL monitoring provides monitoring of the slow commands of the Redis servers. Redis servers are cataloged as a Layer: REDIS Service in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Slow Statements ms top_n_database_statement The latency and statement of Redis slow commands fluentbit    Customizations You can customize your own metrics/expression/dashboard panel. The slowsql expression rules are found in /config/lal/redis-slowsql.yaml The Redis dashboard panel configurations are found in /config/ui-initialized-templates/redis. `\n","excerpt":"Redis monitoring Redis server performance from redis-exporter SkyWalking leverages redis-exporter …","ref":"/docs/main/latest/en/setup/backend/backend-redis-monitoring/","title":"Redis monitoring"},{"body":"Redis monitoring Redis server performance from redis-exporter SkyWalking leverages redis-exporter for collecting metrics data from Redis. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  redis-exporter collect metrics data from Redis. OpenTelemetry Collector fetches metrics from redis-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up redis-exporter. Set up OpenTelemetry Collector. For details on Redis Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  Redis Monitoring Redis monitoring provides monitoring of the status and resources of the Redis server. Redis cluster is cataloged as a Layer: REDIS Service in OAP. Each Redis server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Uptime day meter_redis_uptime The uptime of Redis. redis-exporter   Connected Clients  meter_redis_connected_clients The number of connected clients. redis-exporter   Blocked Clients  meter_redis_blocked_clients The number of blocked clients. redis-exporter   Memory Max Bytes MB meter_redis_memory_max_bytes The max bytes of memory. redis-exporter   Hits Rate % meter_redis_hit_rate Hit rate of redis when used as a cache. redis-exporter   Average Time Spend By Command second meter_redis_average_time_spent_by_command Average time to execute various types of commands. redis-exporter   Total Commands Trend  meter_redis_total_commands_rate The Trend of total commands. redis-exporter   DB keys  meter_redis_evicted_keys_total  meter_redis_expired_keys_total  meter_redis_db_keys The number of Expired / Evicted / total keys. redis-exporter   Net Input/Output Bytes KB meter_redis_net_input_bytes  meter_redis_net_output_bytes Total bytes of input / output of redis net. redis-exporter   Memory Usage % meter_redis_memory_used_bytes  meter_redis_memory_max_bytes Percentage of used memory. redis-exporter   Total Time Spend By Command Trend  meter_redis_commands_duration  meter_redis_commands_total The trend of total time spend by command redis-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/redis. The Redis dashboard panel configurations are found in /config/ui-initialized-templates/redis.\nCollect sampled slow commands SkyWalking leverages fluentbit or other log agents for collecting slow commands from Redis.\nData flow  Execute commands periodically to collect slow logs from Redis and save the result locally. Fluent-bit agent collects slow logs from local file. fluent-bit agent sends data to SkyWalking OAP Server using native meter APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Set up fluentbit. Config fluentbit from here for Redis. Config slow log from here for Redis. Periodically execute the commands.  Notice:\n1.The slowlog-log-slower-than and slowlog-max-len configuration items in the configuration file are for the slow log, the former indicating that execution time longer than the specified time (in milliseconds) will be logged to the slowlog, and the latter indicating the maximum number of slow logs that will be stored in the slow log file. 2.In the e2e test, SkyWalking uses cron to periodically execute the redis command to fetch the slow logs and write them to a local file, which is then collected by fluent-bit to send the data to the OAP. You can see the relevant configuration files here.You can also get slow logs periodically and send them to OAP in other ways than using cron and fluent-bit.\nSlow Commands Monitoring Slow SQL monitoring provides monitoring of the slow commands of the Redis servers. Redis servers are cataloged as a Layer: REDIS Service in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Slow Statements ms top_n_database_statement The latency and statement of Redis slow commands fluentbit    Customizations You can customize your own metrics/expression/dashboard panel. The slowsql expression rules are found in /config/lal/redis-slowsql.yaml The Redis dashboard panel configurations are found in /config/ui-initialized-templates/redis. `\n","excerpt":"Redis monitoring Redis server performance from redis-exporter SkyWalking leverages redis-exporter …","ref":"/docs/main/next/en/setup/backend/backend-redis-monitoring/","title":"Redis monitoring"},{"body":"Redis monitoring Redis server performance from redis-exporter SkyWalking leverages redis-exporter for collecting metrics data from Redis. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  redis-exporter collect metrics data from Redis. OpenTelemetry Collector fetches metrics from redis-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up redis-exporter. Set up OpenTelemetry Collector. For details on Redis Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  Redis Monitoring Redis monitoring provides monitoring of the status and resources of the Redis server. Redis cluster is cataloged as a Layer: REDIS Service in OAP. Each Redis server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Uptime day meter_redis_uptime The uptime of Redis. redis-exporter   Connected Clients  meter_redis_connected_clients The number of connected clients. redis-exporter   Blocked Clients  meter_redis_blocked_clients The number of blocked clients. redis-exporter   Memory Max Bytes MB meter_redis_memory_max_bytes The max bytes of memory. redis-exporter   Hits Rate % meter_redis_hit_rate Hit rate of redis when used as a cache. redis-exporter   Average Time Spend By Command second meter_redis_average_time_spent_by_command Average time to execute various types of commands. redis-exporter   Total Commands Trend  meter_redis_total_commands_rate The Trend of total commands. redis-exporter   DB keys  meter_redis_evicted_keys_total  meter_redis_expired_keys_total  meter_redis_db_keys The number of Expired / Evicted / total keys. redis-exporter   Net Input/Output Bytes KB meter_redis_net_input_bytes  meter_redis_net_output_bytes Total bytes of input / output of redis net. redis-exporter   Memory Usage % meter_redis_memory_usage Percentage of used memory. redis-exporter   Total Time Spend By Command Trend  meter_redis_commands_duration_seconds_total_rate The trend of total time spend by command redis-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/redis. The Redis dashboard panel configurations are found in /config/ui-initialized-templates/redis.\nCollect sampled slow commands SkyWalking leverages fluentbit or other log agents for collecting slow commands from Redis.\nData flow  Execute commands periodically to collect slow logs from Redis and save the result locally. Fluent-bit agent collects slow logs from local file. fluent-bit agent sends data to SkyWalking OAP Server using native meter APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Set up fluentbit. Config fluentbit from here for Redis. Config slow log from here for Redis. Periodically execute the commands.  Notice:\n1.The slowlog-log-slower-than and slowlog-max-len configuration items in the configuration file are for the slow log, the former indicating that execution time longer than the specified time (in milliseconds) will be logged to the slowlog, and the latter indicating the maximum number of slow logs that will be stored in the slow log file. 2.In the e2e test, SkyWalking uses cron to periodically execute the redis command to fetch the slow logs and write them to a local file, which is then collected by fluent-bit to send the data to the OAP. You can see the relevant configuration files here.You can also get slow logs periodically and send them to OAP in other ways than using cron and fluent-bit.\nSlow Commands Monitoring Slow SQL monitoring provides monitoring of the slow commands of the Redis servers. Redis servers are cataloged as a Layer: REDIS Service in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Slow Statements ms top_n_database_statement The latency and statement of Redis slow commands fluentbit    Customizations You can customize your own metrics/expression/dashboard panel. The slowsql expression rules are found in /config/lal/redis-slowsql.yaml The Redis dashboard panel configurations are found in /config/ui-initialized-templates/redis. `\n","excerpt":"Redis monitoring Redis server performance from redis-exporter SkyWalking leverages redis-exporter …","ref":"/docs/main/v9.5.0/en/setup/backend/backend-redis-monitoring/","title":"Redis monitoring"},{"body":"Redis monitoring Redis server performance from redis-exporter SkyWalking leverages redis-exporter for collecting metrics data from Redis. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  redis-exporter collect metrics data from Redis. OpenTelemetry Collector fetches metrics from redis-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up redis-exporter. Set up OpenTelemetry Collector. For details on Redis Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  Redis Monitoring Redis monitoring provides monitoring of the status and resources of the Redis server. Redis cluster is cataloged as a Layer: REDIS Service in OAP. Each Redis server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Uptime day meter_redis_uptime The uptime of Redis. redis-exporter   Connected Clients  meter_redis_connected_clients The number of connected clients. redis-exporter   Blocked Clients  meter_redis_blocked_clients The number of blocked clients. redis-exporter   Memory Max Bytes MB meter_redis_memory_max_bytes The max bytes of memory. redis-exporter   Hits Rate % meter_redis_hit_rate Hit rate of redis when used as a cache. redis-exporter   Average Time Spend By Command second meter_redis_average_time_spent_by_command Average time to execute various types of commands. redis-exporter   Total Commands Trend  meter_redis_total_commands_rate The Trend of total commands. redis-exporter   DB keys  meter_redis_evicted_keys_total  meter_redis_expired_keys_total  meter_redis_db_keys The number of Expired / Evicted / total keys. redis-exporter   Net Input/Output Bytes KB meter_redis_net_input_bytes  meter_redis_net_output_bytes Total bytes of input / output of redis net. redis-exporter   Memory Usage % meter_redis_memory_usage Percentage of used memory. redis-exporter   Total Time Spend By Command Trend  meter_redis_commands_duration_seconds_total_rate The trend of total time spend by command redis-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/redis. The Redis dashboard panel configurations are found in /config/ui-initialized-templates/redis.\nCollect sampled slow commands SkyWalking leverages fluentbit or other log agents for collecting slow commands from Redis.\nData flow  Execute commands periodically to collect slow logs from Redis and save the result locally. Fluent-bit agent collects slow logs from local file. fluent-bit agent sends data to SkyWalking OAP Server using native meter APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Set up fluentbit. Config fluentbit from here for Redis. Config slow log from here for Redis. Periodically execute the commands.  Notice:\n1.The slowlog-log-slower-than and slowlog-max-len configuration items in the configuration file are for the slow log, the former indicating that execution time longer than the specified time (in milliseconds) will be logged to the slowlog, and the latter indicating the maximum number of slow logs that will be stored in the slow log file. 2.In the e2e test, SkyWalking uses cron to periodically execute the redis command to fetch the slow logs and write them to a local file, which is then collected by fluent-bit to send the data to the OAP. You can see the relevant configuration files here.You can also get slow logs periodically and send them to OAP in other ways than using cron and fluent-bit.\nSlow Commands Monitoring Slow SQL monitoring provides monitoring of the slow commands of the Redis servers. Redis servers are cataloged as a Layer: REDIS Service in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Slow Statements ms top_n_database_statement The latency and statement of Redis slow commands fluentbit    Customizations You can customize your own metrics/expression/dashboard panel. The slowsql expression rules are found in /config/lal/redis-slowsql.yaml The Redis dashboard panel configurations are found in /config/ui-initialized-templates/redis. `\n","excerpt":"Redis monitoring Redis server performance from redis-exporter SkyWalking leverages redis-exporter …","ref":"/docs/main/v9.6.0/en/setup/backend/backend-redis-monitoring/","title":"Redis monitoring"},{"body":"Redis monitoring Redis server performance from redis-exporter SkyWalking leverages redis-exporter for collecting metrics data from Redis. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  redis-exporter collect metrics data from Redis. OpenTelemetry Collector fetches metrics from redis-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up redis-exporter. Set up OpenTelemetry Collector. For details on Redis Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  Redis Monitoring Redis monitoring provides monitoring of the status and resources of the Redis server. Redis cluster is cataloged as a Layer: REDIS Service in OAP. Each Redis server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Uptime day meter_redis_uptime The uptime of Redis. redis-exporter   Connected Clients  meter_redis_connected_clients The number of connected clients. redis-exporter   Blocked Clients  meter_redis_blocked_clients The number of blocked clients. redis-exporter   Memory Max Bytes MB meter_redis_memory_max_bytes The max bytes of memory. redis-exporter   Hits Rate % meter_redis_hit_rate Hit rate of redis when used as a cache. redis-exporter   Average Time Spend By Command second meter_redis_average_time_spent_by_command Average time to execute various types of commands. redis-exporter   Total Commands Trend  meter_redis_total_commands_rate The Trend of total commands. redis-exporter   DB keys  meter_redis_evicted_keys_total  meter_redis_expired_keys_total  meter_redis_db_keys The number of Expired / Evicted / total keys. redis-exporter   Net Input/Output Bytes KB meter_redis_net_input_bytes  meter_redis_net_output_bytes Total bytes of input / output of redis net. redis-exporter   Memory Usage % meter_redis_memory_usage Percentage of used memory. redis-exporter   Total Time Spend By Command Trend  meter_redis_commands_duration_seconds_total_rate The trend of total time spend by command redis-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/redis. The Redis dashboard panel configurations are found in /config/ui-initialized-templates/redis.\nCollect sampled slow commands SkyWalking leverages fluentbit or other log agents for collecting slow commands from Redis.\nData flow  Execute commands periodically to collect slow logs from Redis and save the result locally. Fluent-bit agent collects slow logs from local file. fluent-bit agent sends data to SkyWalking OAP Server using native meter APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Set up fluentbit. Config fluentbit from here for Redis. Config slow log from here for Redis. Periodically execute the commands.  Notice:\n1.The slowlog-log-slower-than and slowlog-max-len configuration items in the configuration file are for the slow log, the former indicating that execution time longer than the specified time (in milliseconds) will be logged to the slowlog, and the latter indicating the maximum number of slow logs that will be stored in the slow log file. 2.In the e2e test, SkyWalking uses cron to periodically execute the redis command to fetch the slow logs and write them to a local file, which is then collected by fluent-bit to send the data to the OAP. You can see the relevant configuration files here.You can also get slow logs periodically and send them to OAP in other ways than using cron and fluent-bit.\nSlow Commands Monitoring Slow SQL monitoring provides monitoring of the slow commands of the Redis servers. Redis servers are cataloged as a Layer: REDIS Service in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Slow Statements ms top_n_database_statement The latency and statement of Redis slow commands fluentbit    Customizations You can customize your own metrics/expression/dashboard panel. The slowsql expression rules are found in /config/lal/redis-slowsql.yaml The Redis dashboard panel configurations are found in /config/ui-initialized-templates/redis. `\n","excerpt":"Redis monitoring Redis server performance from redis-exporter SkyWalking leverages redis-exporter …","ref":"/docs/main/v9.7.0/en/setup/backend/backend-redis-monitoring/","title":"Redis monitoring"},{"body":"Register mechanism is no longer required for local / exit span Since version 6.6.0, SkyWalking has removed the local and exit span registers. If an old java agent (before 6.6.0) is still running, which registers to the 6.6.0+ backend, you will face the following warning message.\nclass=RegisterServiceHandler, message = Unexpected endpoint register, endpoint isn't detected from server side. This will not harm the backend or cause any issues, but serves as a reminder that your agent or other clients should follow the new protocol requirements.\nYou could simply use log4j2.xml to filter this warning message out.\n","excerpt":"Register mechanism is no longer required for local / exit span Since version 6.6.0, SkyWalking has …","ref":"/docs/main/latest/en/faq/unexpected-endpoint-register/","title":"Register mechanism is no longer required for local / exit span"},{"body":"Register mechanism is no longer required for local / exit span Since version 6.6.0, SkyWalking has removed the local and exit span registers. If an old java agent (before 6.6.0) is still running, which registers to the 6.6.0+ backend, you will face the following warning message.\nclass=RegisterServiceHandler, message = Unexpected endpoint register, endpoint isn't detected from server side. This will not harm the backend or cause any issues, but serves as a reminder that your agent or other clients should follow the new protocol requirements.\nYou could simply use log4j2.xml to filter this warning message out.\n","excerpt":"Register mechanism is no longer required for local / exit span Since version 6.6.0, SkyWalking has …","ref":"/docs/main/next/en/faq/unexpected-endpoint-register/","title":"Register mechanism is no longer required for local / exit span"},{"body":"Register mechanism is no longer required for local / exit span Since version 6.6.0, SkyWalking has removed the local and exit span registers. If an old java agent (before 6.6.0) is still running, which registers to the 6.6.0+ backend, you will face the following warning message.\nclass=RegisterServiceHandler, message = Unexpected endpoint register, endpoint isn't detected from server side. This will not harm the backend or cause any issues, but serves as a reminder that your agent or other clients should follow the new protocol requirements.\nYou could simply use log4j2.xml to filter this warning message out.\n","excerpt":"Register mechanism is no longer required for local / exit span Since version 6.6.0, SkyWalking has …","ref":"/docs/main/v9.0.0/en/faq/unexpected-endpoint-register/","title":"Register mechanism is no longer required for local / exit span"},{"body":"Register mechanism is no longer required for local / exit span Since version 6.6.0, SkyWalking has removed the local and exit span registers. If an old java agent (before 6.6.0) is still running, which registers to the 6.6.0+ backend, you will face the following warning message.\nclass=RegisterServiceHandler, message = Unexpected endpoint register, endpoint isn't detected from server side. This will not harm the backend or cause any issues, but serves as a reminder that your agent or other clients should follow the new protocol requirements.\nYou could simply use log4j2.xml to filter this warning message out.\n","excerpt":"Register mechanism is no longer required for local / exit span Since version 6.6.0, SkyWalking has …","ref":"/docs/main/v9.1.0/en/faq/unexpected-endpoint-register/","title":"Register mechanism is no longer required for local / exit span"},{"body":"Register mechanism is no longer required for local / exit span Since version 6.6.0, SkyWalking has removed the local and exit span registers. If an old java agent (before 6.6.0) is still running, which registers to the 6.6.0+ backend, you will face the following warning message.\nclass=RegisterServiceHandler, message = Unexpected endpoint register, endpoint isn't detected from server side. This will not harm the backend or cause any issues, but serves as a reminder that your agent or other clients should follow the new protocol requirements.\nYou could simply use log4j2.xml to filter this warning message out.\n","excerpt":"Register mechanism is no longer required for local / exit span Since version 6.6.0, SkyWalking has …","ref":"/docs/main/v9.2.0/en/faq/unexpected-endpoint-register/","title":"Register mechanism is no longer required for local / exit span"},{"body":"Register mechanism is no longer required for local / exit span Since version 6.6.0, SkyWalking has removed the local and exit span registers. If an old java agent (before 6.6.0) is still running, which registers to the 6.6.0+ backend, you will face the following warning message.\nclass=RegisterServiceHandler, message = Unexpected endpoint register, endpoint isn't detected from server side. This will not harm the backend or cause any issues, but serves as a reminder that your agent or other clients should follow the new protocol requirements.\nYou could simply use log4j2.xml to filter this warning message out.\n","excerpt":"Register mechanism is no longer required for local / exit span Since version 6.6.0, SkyWalking has …","ref":"/docs/main/v9.3.0/en/faq/unexpected-endpoint-register/","title":"Register mechanism is no longer required for local / exit span"},{"body":"Register mechanism is no longer required for local / exit span Since version 6.6.0, SkyWalking has removed the local and exit span registers. If an old java agent (before 6.6.0) is still running, which registers to the 6.6.0+ backend, you will face the following warning message.\nclass=RegisterServiceHandler, message = Unexpected endpoint register, endpoint isn't detected from server side. This will not harm the backend or cause any issues, but serves as a reminder that your agent or other clients should follow the new protocol requirements.\nYou could simply use log4j2.xml to filter this warning message out.\n","excerpt":"Register mechanism is no longer required for local / exit span Since version 6.6.0, SkyWalking has …","ref":"/docs/main/v9.4.0/en/faq/unexpected-endpoint-register/","title":"Register mechanism is no longer required for local / exit span"},{"body":"Register mechanism is no longer required for local / exit span Since version 6.6.0, SkyWalking has removed the local and exit span registers. If an old java agent (before 6.6.0) is still running, which registers to the 6.6.0+ backend, you will face the following warning message.\nclass=RegisterServiceHandler, message = Unexpected endpoint register, endpoint isn't detected from server side. This will not harm the backend or cause any issues, but serves as a reminder that your agent or other clients should follow the new protocol requirements.\nYou could simply use log4j2.xml to filter this warning message out.\n","excerpt":"Register mechanism is no longer required for local / exit span Since version 6.6.0, SkyWalking has …","ref":"/docs/main/v9.5.0/en/faq/unexpected-endpoint-register/","title":"Register mechanism is no longer required for local / exit span"},{"body":"Register mechanism is no longer required for local / exit span Since version 6.6.0, SkyWalking has removed the local and exit span registers. If an old java agent (before 6.6.0) is still running, which registers to the 6.6.0+ backend, you will face the following warning message.\nclass=RegisterServiceHandler, message = Unexpected endpoint register, endpoint isn't detected from server side. This will not harm the backend or cause any issues, but serves as a reminder that your agent or other clients should follow the new protocol requirements.\nYou could simply use log4j2.xml to filter this warning message out.\n","excerpt":"Register mechanism is no longer required for local / exit span Since version 6.6.0, SkyWalking has …","ref":"/docs/main/v9.6.0/en/faq/unexpected-endpoint-register/","title":"Register mechanism is no longer required for local / exit span"},{"body":"Register mechanism is no longer required for local / exit span Since version 6.6.0, SkyWalking has removed the local and exit span registers. If an old java agent (before 6.6.0) is still running, which registers to the 6.6.0+ backend, you will face the following warning message.\nclass=RegisterServiceHandler, message = Unexpected endpoint register, endpoint isn't detected from server side. This will not harm the backend or cause any issues, but serves as a reminder that your agent or other clients should follow the new protocol requirements.\nYou could simply use log4j2.xml to filter this warning message out.\n","excerpt":"Register mechanism is no longer required for local / exit span Since version 6.6.0, SkyWalking has …","ref":"/docs/main/v9.7.0/en/faq/unexpected-endpoint-register/","title":"Register mechanism is no longer required for local / exit span"},{"body":"Report service instance status   Service Instance Properties Service instance contains more information than just a name. In order for the agent to report service instance status, use ManagementService#reportInstanceProperties service to provide a string-key/string-value pair list as the parameter. The language of target instance must be provided as the minimum requirement.\n  Service Ping Service instance should keep alive with the backend. The agent should set a scheduler using ManagementService#keepAlive service every minute.\n  syntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.management.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/management/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the service reporting the extra information of the instance. service ManagementService { // Report custom properties of a service instance.  rpc reportInstanceProperties (InstanceProperties) returns (Commands) { } // Keep the instance alive in the backend analysis.  // Only recommend to do separate keepAlive report when no trace and metrics needs to be reported.  // Otherwise, it is duplicated.  rpc keepAlive (InstancePingPkg) returns (Commands) { }}message InstanceProperties { string service = 1; string serviceInstance = 2; repeated KeyStringValuePair properties = 3; // Instance belong layer name which define in the backend, general is default.  string layer = 4;}message InstancePingPkg { string service = 1; string serviceInstance = 2; // Instance belong layer name which define in the backend, general is default.  string layer = 3;}Via HTTP Endpoint  Report service instance properties   POST http://localhost:12800/v3/management/reportProperties\n Input:\n{ \u0026#34;service\u0026#34;: \u0026#34;User Service Name\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User Service Instance Name\u0026#34;, \u0026#34;properties\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;language\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;Lua\u0026#34; } ] } Output JSON Array:\n{}  Service instance ping   POST http://localhost:12800/v3/management/keepAlive\n Input:\n{ \u0026#34;service\u0026#34;: \u0026#34;User Service Name\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User Service Instance Name\u0026#34; } OutPut:\n{} ","excerpt":"Report service instance status   Service Instance Properties Service instance contains more …","ref":"/docs/main/latest/en/api/instance-properties/","title":"Report service instance status"},{"body":"Report service instance status   Service Instance Properties Service instance contains more information than just a name. In order for the agent to report service instance status, use ManagementService#reportInstanceProperties service to provide a string-key/string-value pair list as the parameter. The language of target instance must be provided as the minimum requirement.\n  Service Ping Service instance should keep alive with the backend. The agent should set a scheduler using ManagementService#keepAlive service every minute.\n  syntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.management.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/management/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the service reporting the extra information of the instance. service ManagementService { // Report custom properties of a service instance.  rpc reportInstanceProperties (InstanceProperties) returns (Commands) { } // Keep the instance alive in the backend analysis.  // Only recommend to do separate keepAlive report when no trace and metrics needs to be reported.  // Otherwise, it is duplicated.  rpc keepAlive (InstancePingPkg) returns (Commands) { }}message InstanceProperties { string service = 1; string serviceInstance = 2; repeated KeyStringValuePair properties = 3; // Instance belong layer name which define in the backend, general is default.  string layer = 4;}message InstancePingPkg { string service = 1; string serviceInstance = 2; // Instance belong layer name which define in the backend, general is default.  string layer = 3;}Via HTTP Endpoint  Report service instance properties   POST http://localhost:12800/v3/management/reportProperties\n Input:\n{ \u0026#34;service\u0026#34;: \u0026#34;User Service Name\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User Service Instance Name\u0026#34;, \u0026#34;properties\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;language\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;Lua\u0026#34; } ] } Output JSON Array:\n{}  Service instance ping   POST http://localhost:12800/v3/management/keepAlive\n Input:\n{ \u0026#34;service\u0026#34;: \u0026#34;User Service Name\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User Service Instance Name\u0026#34; } OutPut:\n{} ","excerpt":"Report service instance status   Service Instance Properties Service instance contains more …","ref":"/docs/main/next/en/api/instance-properties/","title":"Report service instance status"},{"body":"Report service instance status   Service Instance Properties Service instance contains more information than just a name. In order for the agent to report service instance status, use ManagementService#reportInstanceProperties service to provide a string-key/string-value pair list as the parameter. The language of target instance must be provided as the minimum requirement.\n  Service Ping Service instance should keep alive with the backend. The agent should set a scheduler using ManagementService#keepAlive service every minute.\n  syntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.management.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/management/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the service reporting the extra information of the instance. service ManagementService { // Report custom properties of a service instance.  rpc reportInstanceProperties (InstanceProperties) returns (Commands) { } // Keep the instance alive in the backend analysis.  // Only recommend to do separate keepAlive report when no trace and metrics needs to be reported.  // Otherwise, it is duplicated.  rpc keepAlive (InstancePingPkg) returns (Commands) { }}message InstanceProperties { string service = 1; string serviceInstance = 2; repeated KeyStringValuePair properties = 3; // Instance belong layer name which define in the backend, general is default.  string layer = 4;}message InstancePingPkg { string service = 1; string serviceInstance = 2; // Instance belong layer name which define in the backend, general is default.  string layer = 3;}Via HTTP Endpoint  Report service instance properties   POST http://localhost:12800/v3/management/reportProperties\n Input:\n{ \u0026#34;service\u0026#34;: \u0026#34;User Service Name\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User Service Instance Name\u0026#34;, \u0026#34;properties\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;language\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;Lua\u0026#34; } ] } Output JSON Array:\n{}  Service instance ping   POST http://localhost:12800/v3/management/keepAlive\n Input:\n{ \u0026#34;service\u0026#34;: \u0026#34;User Service Name\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User Service Instance Name\u0026#34; } OutPut:\n{} ","excerpt":"Report service instance status   Service Instance Properties Service instance contains more …","ref":"/docs/main/v9.4.0/en/api/instance-properties/","title":"Report service instance status"},{"body":"Report service instance status   Service Instance Properties Service instance contains more information than just a name. In order for the agent to report service instance status, use ManagementService#reportInstanceProperties service to provide a string-key/string-value pair list as the parameter. The language of target instance must be provided as the minimum requirement.\n  Service Ping Service instance should keep alive with the backend. The agent should set a scheduler using ManagementService#keepAlive service every minute.\n  syntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.management.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/management/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the service reporting the extra information of the instance. service ManagementService { // Report custom properties of a service instance.  rpc reportInstanceProperties (InstanceProperties) returns (Commands) { } // Keep the instance alive in the backend analysis.  // Only recommend to do separate keepAlive report when no trace and metrics needs to be reported.  // Otherwise, it is duplicated.  rpc keepAlive (InstancePingPkg) returns (Commands) { }}message InstanceProperties { string service = 1; string serviceInstance = 2; repeated KeyStringValuePair properties = 3; // Instance belong layer name which define in the backend, general is default.  string layer = 4;}message InstancePingPkg { string service = 1; string serviceInstance = 2; // Instance belong layer name which define in the backend, general is default.  string layer = 3;}Via HTTP Endpoint  Report service instance properties   POST http://localhost:12800/v3/management/reportProperties\n Input:\n{ \u0026#34;service\u0026#34;: \u0026#34;User Service Name\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User Service Instance Name\u0026#34;, \u0026#34;properties\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;language\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;Lua\u0026#34; } ] } Output JSON Array:\n{}  Service instance ping   POST http://localhost:12800/v3/management/keepAlive\n Input:\n{ \u0026#34;service\u0026#34;: \u0026#34;User Service Name\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User Service Instance Name\u0026#34; } OutPut:\n{} ","excerpt":"Report service instance status   Service Instance Properties Service instance contains more …","ref":"/docs/main/v9.5.0/en/api/instance-properties/","title":"Report service instance status"},{"body":"Report service instance status   Service Instance Properties Service instance contains more information than just a name. In order for the agent to report service instance status, use ManagementService#reportInstanceProperties service to provide a string-key/string-value pair list as the parameter. The language of target instance must be provided as the minimum requirement.\n  Service Ping Service instance should keep alive with the backend. The agent should set a scheduler using ManagementService#keepAlive service every minute.\n  syntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.management.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/management/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the service reporting the extra information of the instance. service ManagementService { // Report custom properties of a service instance.  rpc reportInstanceProperties (InstanceProperties) returns (Commands) { } // Keep the instance alive in the backend analysis.  // Only recommend to do separate keepAlive report when no trace and metrics needs to be reported.  // Otherwise, it is duplicated.  rpc keepAlive (InstancePingPkg) returns (Commands) { }}message InstanceProperties { string service = 1; string serviceInstance = 2; repeated KeyStringValuePair properties = 3; // Instance belong layer name which define in the backend, general is default.  string layer = 4;}message InstancePingPkg { string service = 1; string serviceInstance = 2; // Instance belong layer name which define in the backend, general is default.  string layer = 3;}Via HTTP Endpoint  Report service instance properties   POST http://localhost:12800/v3/management/reportProperties\n Input:\n{ \u0026#34;service\u0026#34;: \u0026#34;User Service Name\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User Service Instance Name\u0026#34;, \u0026#34;properties\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;language\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;Lua\u0026#34; } ] } Output JSON Array:\n{}  Service instance ping   POST http://localhost:12800/v3/management/keepAlive\n Input:\n{ \u0026#34;service\u0026#34;: \u0026#34;User Service Name\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User Service Instance Name\u0026#34; } OutPut:\n{} ","excerpt":"Report service instance status   Service Instance Properties Service instance contains more …","ref":"/docs/main/v9.6.0/en/api/instance-properties/","title":"Report service instance status"},{"body":"Report service instance status   Service Instance Properties Service instance contains more information than just a name. In order for the agent to report service instance status, use ManagementService#reportInstanceProperties service to provide a string-key/string-value pair list as the parameter. The language of target instance must be provided as the minimum requirement.\n  Service Ping Service instance should keep alive with the backend. The agent should set a scheduler using ManagementService#keepAlive service every minute.\n  syntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.management.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/management/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the service reporting the extra information of the instance. service ManagementService { // Report custom properties of a service instance.  rpc reportInstanceProperties (InstanceProperties) returns (Commands) { } // Keep the instance alive in the backend analysis.  // Only recommend to do separate keepAlive report when no trace and metrics needs to be reported.  // Otherwise, it is duplicated.  rpc keepAlive (InstancePingPkg) returns (Commands) { }}message InstanceProperties { string service = 1; string serviceInstance = 2; repeated KeyStringValuePair properties = 3; // Instance belong layer name which define in the backend, general is default.  string layer = 4;}message InstancePingPkg { string service = 1; string serviceInstance = 2; // Instance belong layer name which define in the backend, general is default.  string layer = 3;}Via HTTP Endpoint  Report service instance properties   POST http://localhost:12800/v3/management/reportProperties\n Input:\n{ \u0026#34;service\u0026#34;: \u0026#34;User Service Name\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User Service Instance Name\u0026#34;, \u0026#34;properties\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;language\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;Lua\u0026#34; } ] } Output JSON Array:\n{}  Service instance ping   POST http://localhost:12800/v3/management/keepAlive\n Input:\n{ \u0026#34;service\u0026#34;: \u0026#34;User Service Name\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User Service Instance Name\u0026#34; } OutPut:\n{} ","excerpt":"Report service instance status   Service Instance Properties Service instance contains more …","ref":"/docs/main/v9.7.0/en/api/instance-properties/","title":"Report service instance status"},{"body":"RocketMQ monitoring SkyWalking leverages rocketmq-exporter for collecting metrics data from RocketMQ. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  The rocketmq-exporter (https://github.com/apache/rocketmq-exporter?tab=readme-ov-file#readme) collects metrics data from RocketMQ, The RocketMQ version is required to be 4.3.2+. OpenTelemetry Collector fetches metrics from rocketmq-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup rocketmq-exporter. Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  RocketMQ Monitoring RocketMQ monitoring provides multidimensional metrics monitoring of RocketMQ Exporter as Layer: RocketMQ Service in the OAP. In each cluster, the broker is represented as Instance and the topic is represented as Endpoint.\nRocketMQ Cluster Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Messages Produced Today Count meter_rocketmq_cluster_messages_produced_today The number of cluster messages produced today. RocketMQ Exporter   Messages Consumed Today Count meter_rocketmq_cluster_messages_consumed_today The number of cluster messages consumed today. RocketMQ Exporter   Total Producer Tps Msg/sec meter_rocketmq_cluster_total_producer_tps The number of messages produced per second. RocketMQ Exporter   Total Consume Tps Msg/sec meter_rocketmq_cluster_total_consumer_tps The number of messages consumed per second. RocketMQ Exporter   Producer Message Size Bytes/sec meter_rocketmq_cluster_producer_message_size The max size of a message produced per second. RocketMQ Exporter   Consumer Message Size Bytes/sec meter_rocketmq_cluster_consumer_message_size The max size of the consumed message per second. RocketMQ Exporter   Messages Produced Until Yesterday Count meter_rocketmq_cluster_messages_produced_until_yesterday The total number of messages put until 12 o\u0026rsquo;clock last night. RocketMQ Exporter   Messages Consumed Until Yesterday Count meter_rocketmq_cluster_messages_consumed_until_yesterday The total number of messages read until 12 o\u0026rsquo;clock last night. RocketMQ Exporter   Max Consumer Latency ms meter_rocketmq_cluster_max_consumer_latency The max number of consumer latency. RocketMQ Exporter   Max CommitLog Disk Ratio % meter_rocketmq_cluster_max_commitLog_disk_ratio The max utilization ratio of the commit log disk. RocketMQ Exporter   CommitLog Disk Ratio % meter_rocketmq_cluster_commitLog_disk_ratio The utilization ratio of the commit log disk per broker IP. RocketMQ Exporter   Pull ThreadPool Queue Head Wait Time ms meter_rocketmq_cluster_pull_threadPool_queue_head_wait_time The wait time in milliseconds for pulling threadPool queue per broker IP. RocketMQ Exporter   Send ThreadPool Queue Head Wait Time ms meter_rocketmq_cluster_send_threadPool_queue_head_wait_time The wait time in milliseconds for sending threadPool queue per broker IP. RocketMQ Exporter    RocketMQ Broker Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Produce TPS Msg/sec meter_rocketmq_broker_produce_tps The number of broker produces messages per second. RocketMQ Exporter   Consume QPS Msg/sec meter_rocketmq_broker_consume_qps The number of broker consumes messages per second. RocketMQ Exporter   Producer Message Size Bytes/sec meter_rocketmq_broker_producer_message_size The max size of the messages produced per second. RocketMQ Exporter   Consumer Message Size Bytes/sec meter_rocketmq_broker_consumer_message_size The max size of the messages consumed per second. RocketMQ Exporter    RocketMQ Topic Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Max Producer Message Size Byte meter_rocketmq_topic_max_producer_message_size The maximum number of messages produced. RocketMQ Exporter   Max Consumer Message Size Byte meter_rocketmq_topic_max_consumer_message_size The maximum number of messages consumed. RocketMQ Exporter   Consumer Latency ms meter_rocketmq_topic_consumer_latency Consumption delay time of a consumer group. RocketMQ Exporter   Producer Tps Msg/sec meter_rocketmq_topic_producer_tps The number of messages produced per second. RocketMQ Exporter   Consumer Group Tps Msg/sec meter_rocketmq_topic_consumer_group_tps The number of messages consumed per second per consumer group. RocketMQ Exporter   Producer Offset Count meter_rocketmq_topic_producer_offset The max progress of a topic\u0026rsquo;s production message. RocketMQ Exporter   Consumer Group Offset Count meter_rocketmq_topic_consumer_group_offset The max progress of a topic\u0026rsquo;s consumption message per consumer group. RocketMQ Exporter   Producer Message Size Byte/sec meter_rocketmq_topic_producer_message_size The max size of messages produced per second. RocketMQ Exporter   Consumer Message Size Byte/sec meter_rocketmq_topic_consumer_message_size The max size of messages consumed per second. RocketMQ Exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in otel-rules/rocketmq/rocketmq-cluster.yaml, otel-rules/rocketmq/rocketmq-broker.yaml, otel-rules/rocketmq/rocketmq-topic.yaml. The RocketMQ dashboard panel configurations are found in ui-initialized-templates/rocketmq.\n","excerpt":"RocketMQ monitoring SkyWalking leverages rocketmq-exporter for collecting metrics data from …","ref":"/docs/main/next/en/setup/backend/backend-rocketmq-monitoring/","title":"RocketMQ monitoring"},{"body":"Running and Debugging Debugging is essential when developing plugins, as it helps you verify your plugin logic. If you want to perform debugging, follow these steps:\n Write test code: Write a sample application that includes the framework content you need to test. Build the Agent: In the project root directory, run the make build command to compile the Agent program into a binary file. Adjust the test program\u0026rsquo;s Debug configuration: Modify the test program\u0026rsquo;s Debug configuration, which will be explained in more detail later. Launch the program and add breakpoints: Start your sample application and add breakpoints in your plugin code where you want to pause the execution and inspect the program state.  Write test code Please make sure that you have imported github.com/apache/skywalking-go in your test code. You can refer to the documentation on how to compile using go build for specific steps.\nAdjust the test program\u0026rsquo;s Debug configuration Please locate the following two paths:\n Go Agent: Locate the binary file generated through make build in the previous step. Current project path: Find the root directory of the current project, which will be used to search for source files in subsequent steps.  Then, please enter the following command in the tool arguments section of the debug configuration:\n-toolexec '/path/to/skywalking-go-agent -debug /path/to/current-project-path' -a\u0026quot;. ","excerpt":"Running and Debugging Debugging is essential when developing plugins, as it helps you verify your …","ref":"/docs/skywalking-go/latest/en/development-and-contribution/running-and-debugging/","title":"Running and Debugging"},{"body":"Running and Debugging Debugging is essential when developing plugins, as it helps you verify your plugin logic. If you want to perform debugging, follow these steps:\n Write test code: Write a sample application that includes the framework content you need to test. Build the Agent: In the project root directory, run the make build command to compile the Agent program into a binary file. Adjust the test program\u0026rsquo;s Debug configuration: Modify the test program\u0026rsquo;s Debug configuration, which will be explained in more detail later. Launch the program and add breakpoints: Start your sample application and add breakpoints in your plugin code where you want to pause the execution and inspect the program state.  Write test code Please make sure that you have imported github.com/apache/skywalking-go in your test code. You can refer to the documentation on how to compile using go build for specific steps.\nAdjust the test program\u0026rsquo;s Debug configuration Please locate the following two paths:\n Go Agent: Locate the binary file generated through make build in the previous step. Current project path: Find the root directory of the current project, which will be used to search for source files in subsequent steps.  Then, please enter the following command in the tool arguments section of the debug configuration:\n-toolexec '/path/to/skywalking-go-agent -debug /path/to/current-project-path' -a\u0026quot;. ","excerpt":"Running and Debugging Debugging is essential when developing plugins, as it helps you verify your …","ref":"/docs/skywalking-go/next/en/development-and-contribution/running-and-debugging/","title":"Running and Debugging"},{"body":"Running and Debugging Debugging is essential when developing plugins, as it helps you verify your plugin logic. If you want to perform debugging, follow these steps:\n Write test code: Write a sample application that includes the framework content you need to test. Build the Agent: In the project root directory, run the make build command to compile the Agent program into a binary file. Adjust the test program\u0026rsquo;s Debug configuration: Modify the test program\u0026rsquo;s Debug configuration, which will be explained in more detail later. Launch the program and add breakpoints: Start your sample application and add breakpoints in your plugin code where you want to pause the execution and inspect the program state.  Write test code Please make sure that you have imported github.com/apache/skywalking-go in your test code. You can refer to the documentation on how to compile using go build for specific steps.\nAdjust the test program\u0026rsquo;s Debug configuration Please locate the following two paths:\n Go Agent: Locate the binary file generated through make build in the previous step. Current project path: Find the root directory of the current project, which will be used to search for source files in subsequent steps.  Then, please enter the following command in the tool arguments section of the debug configuration:\n-toolexec '/path/to/skywalking-go-agent -debug /path/to/current-project-path' -a\u0026quot;. ","excerpt":"Running and Debugging Debugging is essential when developing plugins, as it helps you verify your …","ref":"/docs/skywalking-go/v0.4.0/en/development-and-contribution/running-and-debugging/","title":"Running and Debugging"},{"body":"Satellite self observability dashboard SkyWalking Satellite collects and exports metrics in Prometheus format and SkyWalking metrics service protobuffer format for consuming, it also provides a dashboard to visualize the Satellite metrics.\nData flow  SkyWalking Satellite collects metrics data internally and pushes the metrics to SkyWalking OAP. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up SkyWalking Satellite Telemetry Exporter. Config SkyWalking OpenTelemetry receiver.  Self observability monitoring Self observability monitoring provides monitoring of the status and resources of the OAP server itself. oap-server is a Service in OAP, and land on the Layer: SO11Y_OAP.\nSelf observability metrics    Monitoring Panel Unit Metric Name Description Data Source      Count satellite_service_grpc_connect_count Connection Count SkyWalking Satellite    Percentage satellite_service_server_cpu_utilization CPU (%) SkyWalking Satellite    Count satellite_service_queue_used_count The used count of queue of pipeline SkyWalking Satellite    Count satellite_service_receive_event_count Receive count of event from downstream SkyWalking Satellite    Count satellite_service_fetch_event_count Fetch count of event from downstream SkyWalking Satellite    Count satellite_service_queue_input_count The event count of push to the queue SkyWalking Satellite    Count satellite_service_send_event_count The event count of push data to the upstream SkyWalking Satellite    Customizations You can customize your own metrics/expression/dashboard panel. The self observability dashboard panel configurations are found in /config/ui-initialized-templates/so11y_satellite/so11y-root.json.\n","excerpt":"Satellite self observability dashboard SkyWalking Satellite collects and exports metrics in …","ref":"/docs/main/latest/en/setup/backend/dashboards-so11y-satellite/","title":"Satellite self observability dashboard"},{"body":"Satellite self observability dashboard SkyWalking Satellite collects and exports metrics in Prometheus format and SkyWalking metrics service protobuffer format for consuming, it also provides a dashboard to visualize the Satellite metrics.\nData flow  SkyWalking Satellite collects metrics data internally and pushes the metrics to SkyWalking OAP. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up SkyWalking Satellite Telemetry Exporter. Config SkyWalking OpenTelemetry receiver.  Self observability monitoring Self observability monitoring provides monitoring of the status and resources of the OAP server itself. oap-server is a Service in OAP, and land on the Layer: SO11Y_OAP.\nSelf observability metrics    Monitoring Panel Unit Metric Name Description Data Source      Count satellite_service_grpc_connect_count Connection Count SkyWalking Satellite    Percentage satellite_service_server_cpu_utilization CPU (%) SkyWalking Satellite    Count satellite_service_queue_used_count The used count of queue of pipeline SkyWalking Satellite    Count satellite_service_receive_event_count Receive count of event from downstream SkyWalking Satellite    Count satellite_service_fetch_event_count Fetch count of event from downstream SkyWalking Satellite    Count satellite_service_queue_input_count The event count of push to the queue SkyWalking Satellite    Count satellite_service_send_event_count The event count of push data to the upstream SkyWalking Satellite    Customizations You can customize your own metrics/expression/dashboard panel. The self observability dashboard panel configurations are found in /config/ui-initialized-templates/so11y_satellite/so11y-root.json.\n","excerpt":"Satellite self observability dashboard SkyWalking Satellite collects and exports metrics in …","ref":"/docs/main/next/en/setup/backend/dashboards-so11y-satellite/","title":"Satellite self observability dashboard"},{"body":"Satellite self observability dashboard SkyWalking Satellite collects and exports metrics in Prometheus format and SkyWalking metrics service protobuffer format for consuming, it also provides a dashboard to visualize the Satellite metrics.\nData flow  SkyWalking Satellite collects metrics data internally and pushes the metrics to SkyWalking OAP. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up SkyWalking Satellite Telemetry Exporter. Config SkyWalking OpenTelemetry receiver.  Self observability monitoring Self observability monitoring provides monitoring of the status and resources of the OAP server itself. oap-server is a Service in OAP, and land on the Layer: SO11Y_OAP.\nSelf observability metrics    Monitoring Panel Unit Metric Name Description Data Source      Count satellite_service_grpc_connect_count Connection Count SkyWalking Satellite    Percentage satellite_service_server_cpu_utilization CPU (%) SkyWalking Satellite    Count satellite_service_queue_used_count The used count of queue of pipeline SkyWalking Satellite    Count satellite_service_receive_event_count Receive count of event from downstream SkyWalking Satellite    Count satellite_service_fetch_event_count Fetch count of event from downstream SkyWalking Satellite    Count satellite_service_queue_input_count The event count of push to the queue SkyWalking Satellite    Count satellite_service_send_event_count The event count of push data to the upstream SkyWalking Satellite    Customizations You can customize your own metrics/expression/dashboard panel. The self observability dashboard panel configurations are found in /config/ui-initialized-templates/so11y_satellite/so11y-root.json.\n","excerpt":"Satellite self observability dashboard SkyWalking Satellite collects and exports metrics in …","ref":"/docs/main/v9.3.0/en/setup/backend/dashboards-so11y-satellite/","title":"Satellite self observability dashboard"},{"body":"Satellite self observability dashboard SkyWalking Satellite collects and exports metrics in Prometheus format and SkyWalking metrics service protobuffer format for consuming, it also provides a dashboard to visualize the Satellite metrics.\nData flow  SkyWalking Satellite collects metrics data internally and pushes the metrics to SkyWalking OAP. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up SkyWalking Satellite Telemetry Exporter. Config SkyWalking OpenTelemetry receiver.  Self observability monitoring Self observability monitoring provides monitoring of the status and resources of the OAP server itself. oap-server is a Service in OAP, and land on the Layer: SO11Y_OAP.\nSelf observability metrics    Monitoring Panel Unit Metric Name Description Data Source      Count satellite_service_grpc_connect_count Connection Count SkyWalking Satellite    Percentage satellite_service_server_cpu_utilization CPU (%) SkyWalking Satellite    Count satellite_service_queue_used_count The used count of queue of pipeline SkyWalking Satellite    Count satellite_service_receive_event_count Receive count of event from downstream SkyWalking Satellite    Count satellite_service_fetch_event_count Fetch count of event from downstream SkyWalking Satellite    Count satellite_service_queue_input_count The event count of push to the queue SkyWalking Satellite    Count satellite_service_send_event_count The event count of push data to the upstream SkyWalking Satellite    Customizations You can customize your own metrics/expression/dashboard panel. The self observability dashboard panel configurations are found in /config/ui-initialized-templates/so11y_satellite/so11y-root.json.\n","excerpt":"Satellite self observability dashboard SkyWalking Satellite collects and exports metrics in …","ref":"/docs/main/v9.4.0/en/setup/backend/dashboards-so11y-satellite/","title":"Satellite self observability dashboard"},{"body":"Satellite self observability dashboard SkyWalking Satellite collects and exports metrics in Prometheus format and SkyWalking metrics service protobuffer format for consuming, it also provides a dashboard to visualize the Satellite metrics.\nData flow  SkyWalking Satellite collects metrics data internally and pushes the metrics to SkyWalking OAP. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up SkyWalking Satellite Telemetry Exporter. Config SkyWalking OpenTelemetry receiver.  Self observability monitoring Self observability monitoring provides monitoring of the status and resources of the OAP server itself. oap-server is a Service in OAP, and land on the Layer: SO11Y_OAP.\nSelf observability metrics    Monitoring Panel Unit Metric Name Description Data Source      Count satellite_service_grpc_connect_count Connection Count SkyWalking Satellite    Percentage satellite_service_server_cpu_utilization CPU (%) SkyWalking Satellite    Count satellite_service_queue_used_count The used count of queue of pipeline SkyWalking Satellite    Count satellite_service_receive_event_count Receive count of event from downstream SkyWalking Satellite    Count satellite_service_fetch_event_count Fetch count of event from downstream SkyWalking Satellite    Count satellite_service_queue_input_count The event count of push to the queue SkyWalking Satellite    Count satellite_service_send_event_count The event count of push data to the upstream SkyWalking Satellite    Customizations You can customize your own metrics/expression/dashboard panel. The self observability dashboard panel configurations are found in /config/ui-initialized-templates/so11y_satellite/so11y-root.json.\n","excerpt":"Satellite self observability dashboard SkyWalking Satellite collects and exports metrics in …","ref":"/docs/main/v9.5.0/en/setup/backend/dashboards-so11y-satellite/","title":"Satellite self observability dashboard"},{"body":"Satellite self observability dashboard SkyWalking Satellite collects and exports metrics in Prometheus format and SkyWalking metrics service protobuffer format for consuming, it also provides a dashboard to visualize the Satellite metrics.\nData flow  SkyWalking Satellite collects metrics data internally and pushes the metrics to SkyWalking OAP. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up SkyWalking Satellite Telemetry Exporter. Config SkyWalking OpenTelemetry receiver.  Self observability monitoring Self observability monitoring provides monitoring of the status and resources of the OAP server itself. oap-server is a Service in OAP, and land on the Layer: SO11Y_OAP.\nSelf observability metrics    Monitoring Panel Unit Metric Name Description Data Source      Count satellite_service_grpc_connect_count Connection Count SkyWalking Satellite    Percentage satellite_service_server_cpu_utilization CPU (%) SkyWalking Satellite    Count satellite_service_queue_used_count The used count of queue of pipeline SkyWalking Satellite    Count satellite_service_receive_event_count Receive count of event from downstream SkyWalking Satellite    Count satellite_service_fetch_event_count Fetch count of event from downstream SkyWalking Satellite    Count satellite_service_queue_input_count The event count of push to the queue SkyWalking Satellite    Count satellite_service_send_event_count The event count of push data to the upstream SkyWalking Satellite    Customizations You can customize your own metrics/expression/dashboard panel. The self observability dashboard panel configurations are found in /config/ui-initialized-templates/so11y_satellite/so11y-root.json.\n","excerpt":"Satellite self observability dashboard SkyWalking Satellite collects and exports metrics in …","ref":"/docs/main/v9.6.0/en/setup/backend/dashboards-so11y-satellite/","title":"Satellite self observability dashboard"},{"body":"Satellite self observability dashboard SkyWalking Satellite collects and exports metrics in Prometheus format and SkyWalking metrics service protobuffer format for consuming, it also provides a dashboard to visualize the Satellite metrics.\nData flow  SkyWalking Satellite collects metrics data internally and pushes the metrics to SkyWalking OAP. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up SkyWalking Satellite Telemetry Exporter. Config SkyWalking OpenTelemetry receiver.  Self observability monitoring Self observability monitoring provides monitoring of the status and resources of the OAP server itself. oap-server is a Service in OAP, and land on the Layer: SO11Y_OAP.\nSelf observability metrics    Monitoring Panel Unit Metric Name Description Data Source      Count satellite_service_grpc_connect_count Connection Count SkyWalking Satellite    Percentage satellite_service_server_cpu_utilization CPU (%) SkyWalking Satellite    Count satellite_service_queue_used_count The used count of queue of pipeline SkyWalking Satellite    Count satellite_service_receive_event_count Receive count of event from downstream SkyWalking Satellite    Count satellite_service_fetch_event_count Fetch count of event from downstream SkyWalking Satellite    Count satellite_service_queue_input_count The event count of push to the queue SkyWalking Satellite    Count satellite_service_send_event_count The event count of push data to the upstream SkyWalking Satellite    Customizations You can customize your own metrics/expression/dashboard panel. The self observability dashboard panel configurations are found in /config/ui-initialized-templates/so11y_satellite/so11y-root.json.\n","excerpt":"Satellite self observability dashboard SkyWalking Satellite collects and exports metrics in …","ref":"/docs/main/v9.7.0/en/setup/backend/dashboards-so11y-satellite/","title":"Satellite self observability dashboard"},{"body":"Satellite Usage In this example, you will learn how to use the Satellite.\nInstall Satellite Install the Satellite component.\nInstall Operator And Backend  Follow Operator installation instrument to install the operator. Follow Deploy OAP server and UI to install backend.  Deploy Satellite with default setting  Deploy the Storage use the below command:  Clone this repo, then change current directory to samples.\nIssue the below command to deploy an OAP server and UI.\nkubectl apply -f satellite.yaml Check the Satellite in Kubernetes:  $ kubectl get satellite NAME INSTANCES RUNNING ADDRESS default 1 1 default-satellite.default Satellite With HPA  Follow Custom Metrics Adapter to install the metrics adapter. Update the config in the Satellite CRD and re-apply it to activate the metrics service in satellite.  config: - name: SATELLITE_TELEMETRY_EXPORT_TYPE value: metrics_service Update the config in the OAP CRD and re-apply it to activate the satellite MAL.  config: - name: SW_METER_ANALYZER_ACTIVE_FILES value: satellite Add the HorizontalPodAutoScaler CRD, and update the config file the service and target to your excepted config. It\u0026rsquo;s recommend to set the stabilizationWindowSeconds and selectPolicy of scaling up in HPA, which would help prevent continuous scaling up of pods due to metric delay fluctuations. Check the HorizontalPodAutoScaler in the Kubernetes:  $ kubectl get HorizontalPodAutoscaler NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE hpa-demo Deployment/skywalking-system-satellite 2/1900, 5/75 1 3 1 92m ","excerpt":"Satellite Usage In this example, you will learn how to use the Satellite.\nInstall Satellite Install …","ref":"/docs/skywalking-swck/latest/examples/satellite/","title":"Satellite Usage"},{"body":"Satellite Usage In this example, you will learn how to use the Satellite.\nInstall Satellite Install the Satellite component.\nInstall Operator And Backend  Follow Operator installation instrument to install the operator. Follow Deploy OAP server and UI to install backend.  Deploy Satellite with default setting  Deploy the Storage use the below command:  Clone this repo, then change current directory to samples.\nIssue the below command to deploy an OAP server and UI.\nkubectl apply -f satellite.yaml Check the Satellite in Kubernetes:  $ kubectl get satellite NAME INSTANCES RUNNING ADDRESS default 1 1 default-satellite.default Satellite With HPA  Follow Custom Metrics Adapter to install the metrics adapter. Update the config in the Satellite CRD and re-apply it to activate the metrics service in satellite.  config: - name: SATELLITE_TELEMETRY_EXPORT_TYPE value: metrics_service Update the config in the OAP CRD and re-apply it to activate the satellite MAL.  config: - name: SW_METER_ANALYZER_ACTIVE_FILES value: satellite Add the HorizontalPodAutoScaler CRD, and update the config file the service and target to your excepted config. It\u0026rsquo;s recommend to set the stabilizationWindowSeconds and selectPolicy of scaling up in HPA, which would help prevent continuous scaling up of pods due to metric delay fluctuations. Check the HorizontalPodAutoScaler in the Kubernetes:  $ kubectl get HorizontalPodAutoscaler NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE hpa-demo Deployment/skywalking-system-satellite 2/1900, 5/75 1 3 1 92m ","excerpt":"Satellite Usage In this example, you will learn how to use the Satellite.\nInstall Satellite Install …","ref":"/docs/skywalking-swck/next/examples/satellite/","title":"Satellite Usage"},{"body":"Satellite Usage In this example, you will learn how to use the Satellite.\nInstall Satellite Install the Satellite component.\nInstall Operator And Backend  Follow Operator installation instrument to install the operator. Follow Deploy OAP server and UI to install backend.  Deploy Satellite with default setting  Deploy the Storage use the below command:  Clone this repo, then change current directory to samples.\nIssue the below command to deploy an OAP server and UI.\nkubectl apply -f satellite.yaml Check the Satellite in Kubernetes:  $ kubectl get satellite NAME INSTANCES RUNNING ADDRESS default 1 1 default-satellite.default Satellite With HPA  Follow Custom Metrics Adapter to install the metrics adapter. Update the config in the Satellite CRD and re-apply it to activate the metrics service in satellite.  config: - name: SATELLITE_TELEMETRY_EXPORT_TYPE value: metrics_service Update the config in the OAP CRD and re-apply it to activate the satellite MAL.  config: - name: SW_METER_ANALYZER_ACTIVE_FILES value: satellite Add the HorizontalPodAutoScaler CRD, and update the config file the service and target to your excepted config. It\u0026rsquo;s recommend to set the stabilizationWindowSeconds and selectPolicy of scaling up in HPA, which would help prevent continuous scaling up of pods due to metric delay fluctuations. Check the HorizontalPodAutoScaler in the Kubernetes:  $ kubectl get HorizontalPodAutoscaler NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE hpa-demo Deployment/skywalking-system-satellite 2/1900, 5/75 1 3 1 92m ","excerpt":"Satellite Usage In this example, you will learn how to use the Satellite.\nInstall Satellite Install …","ref":"/docs/skywalking-swck/v0.9.0/examples/satellite/","title":"Satellite Usage"},{"body":"Scaling with Apache SkyWalking Background In the Apache SkyWalking ecosystem, the OAP obtains metrics, traces, logs, and event data through SkyWalking Agent, Envoy, or other data sources. Under the gRPC protocol, it transmits data by communicating with a single server node. Only when the connection is broken, the reconnecting policy would be used based on DNS round-robin mode. When new services are added at runtime or the OAP load is kept high due to increased traffic of observed services, the OAP cluster needs to scale out for increased traffic. The load of the new OAP node would be less due to all existing agents having connected to previous nodes. Even without scaling, the load of OAP nodes would be unbalanced, because the agent would keep the connection due to random policy at the booting stage. In these cases, it would become a challenge to keep up the health status of all nodes, and be able to scale out when needed.\nIn this article, we mainly discuss how to solve this challenge in SkyWalking.\nHow to Load Balance SkyWalking mainly uses the gRPC protocol for data transmission, so this article mainly introduces load balancing in the gRPC protocol.\nProxy Or Client-side Based on the gRPC official Load Balancing blog, there are two approaches to load balancing:\n Client-side: The client perceives multiple back-end services and uses a load-balancing algorithm to select a back-end service for each RPC. Proxy: The client sends the message to the proxy server, and the proxy server load balances the message to the back-end service.  From the perspective of observability system architecture:\n    Pros Cons     Client-side High performance because of the elimination of extra hop Complex client (cluster awareness, load balancing, health check, etc.)Ensure each data source to be connected provides complex client capabilities   Proxy Simple Client Higher latency    We choose Proxy mode for the following reasons:\n Observable data is not very time-sensitive, a little latency caused by transmission is acceptable. A little extra hop is acceptable and there is no impact on the client-side. As an observability platform, we cannot/should not ask clients to change. They make their own tech decisions and may have their own commercial considerations.  Transmission Policy In the proxy mode, we should determine the transmission path between downstream and upstream.\nDifferent data protocols require different processing policies. There are two transmission policies:\n Synchronous: Suitable for protocols that require data exchange in the client, such as SkyWalking Dynamic Configuration Service. This type of protocol provides real-time results. Asynchronous batch: Used when the client doesn’t care about the upstream processing results, but only the transmitted data (e.g., trace report, log report, etc.)  The synchronization policy requires that the proxy send the message to the upstream server when receiving the client message, and synchronously return the response data to the downstream client. Usually, only a few protocols need to use the synchronization policy.\nAs shown below, after the client sends the request to the Proxy, the proxy would send the message to the server synchronously. When the proxy receives the result, it returns to the client.\nThe asynchronous batch policy means that the data is sent to the upstream server in batches asynchronously. This policy is more common because most protocols in SkyWalking are primarily based on data reporting. We think using the queue as a buffer could have a good effect. The asynchronous batch policy is executed according to the following steps:\n The proxy receives the data and wraps it as an Event object. An event is added into the queue. When the cycle time is reached or when the queue elements reach the fixed number, the elements in the queue will parallel consume and send to the OAP.  The advantage of using queues is:\n Separate data receiving and sending to reduce the mutual influence. The interval quantization mechanism can be used to combine events, which helps to speed up sending events to the OAP. Using multi-threaded consumption queue events can make fuller use of network IO.  As shown below, after the proxy receives the message, the proxy would wrap the message as an event and push it to the queue. The message sender would take batch events from the queue and send them to the upstream OAP.\nRouting Routing algorithms are used to route messages to a single upstream server node.\nThe Round-Robin algorithm selects nodes in order from the list of upstream service nodes. The advantage of this algorithm is that the number of times each node is selected is average. When the size of the data is close to the same, each upstream node can handle the same quantity of data content.\nWith the Weight Round-Robin, each upstream server node has a corresponding routing weight ratio. The difference from Round-Robin is that each upstream node has more chances to be routed according to its weight. This algorithm is more suitable to use when the upstream server node machine configuration is not the same.\nThe Fixed algorithm is a hybrid algorithm. It can ensure that the same data is routed to the same upstream server node, and when the upstream server scales out, it still maintains routing to the same node; unless the upstream node does not exist, it will reroute. This algorithm is mainly used in the SkyWalking Meter protocol because this protocol needs to ensure that the metrics of the same service instance are sent to the same OAP node. The Routing steps are as follows:\n Generate a unique identification string based on the data content, as short as possible. The amount of data is controllable. Get the upstream node of identity from LRU Cache, and use it if it exists. According to the identification, generate the corresponding hash value, and find the upstream server node from the upstream list. Save the mapping relationship between the upstream server node and identification to LRU Cache.  The advantage of this algorithm is to bind the data with the upstream server node as much as possible, so the upstream server can better process continuous data. The disadvantage is that it takes up a certain amount of memory space to save the corresponding relationship.\nAs shown below, the image is divided into two parts:\n The left side represents that the same data content always is routed to the same server node. The right side represents the data routing algorithm. Get the number from the data, and use the remainder algorithm to obtain the position.  We choose to use a combination of Round-Robin and Fixed algorithm for routing:\n The Fixed routing algorithm is suitable for specific protocols, mainly used when passing metrics data to the SkyWalking Meter protocol The Round-Robin algorithm is used by default. When the SkyWalking OAP cluster is deployed, the configuration of the nodes needs to be as much the same as possible, so there would be no need to use the Weight Round-Robin algorithm.  How to balance the load balancer itself? Proxy still needs to deal with the load balancing problem from client to itself, especially when deploying a Proxy cluster in a production environment.\nThere are three ways to solve this problem:\n Connection management: Use the max_connection config on the client-side to specify the maximum connection duration of each connection. For more information, please read the proposal. Cluster awareness: The proxy has cluster awareness, and actively disconnects the connection when the load is unbalanced to allow the client to re-pick up the proxy. Resource limit+HPA: Restrict the connection resource situation of each proxy, and no longer accept new connections when the resource limit is reached. And use the HPA mechanism of Kubernetes to dynamically scale out the number of the proxy.      Connection management Cluster awareness Resource Limit+HPA     Pros Simple to use Ensure that the number of connections in each proxy is relatively  Simple to use   Cons Each client needs to ensure that data is not lostThe client is required to accept GOWAY responses May cause a sudden increase in traffic on some nodesEach client needs to ensure that data is not lost  Traffic will not be particularly balanced in each instance    We choose Limit+HPA for these reasons:\n Easy to config and use the proxy and easy to understand based on basic data metrics. No data loss due to broken connection. There is no need for the client to implement any other protocols to prevent data loss, especially when the client is a commercial product. The connection of each node in the proxy cluster does not need to be particularly balanced, as long as the proxy node itself is high-performance.  SkyWalking-Satellite We have implemented this Proxy in the SkyWalking-Satellite project. It’s used between Client and SkyWalking OAP, effectively solving the load balancing problem.\nAfter the system is deployed, the Satellite would accept the traffic from the Client, and the Satellite will perceive all the nodes of the OAP through Kubernetes Label Selector or manual configuration, and load balance the traffic to the upstream OAP node.\nAs shown below, a single client still maintains a connection with a single Satellite, Satellite would establish the connection with each OAP, and load balance message to the OAP node.\nWhen scaling Satellite, we need to deploy the SWCK adapter and configure the HPA in Kubernetes. SWCK is a platform for the SkyWalking users, provisions, upgrades, maintains SkyWalking relevant components, and makes them work natively on Kubernetes.\nAfter deployment is finished, the following steps would be performed:\n Read metrics from OAP: HPA requests the SWCK metrics adapter to dynamically read the metrics in the OAP. Scaling the Satellite: Kubernetes HPA senses that the metrics values are in line with expectations, so the Satellite would be scaling automatically.  As shown below, use the dotted line to divide the two parts. HPA uses SWCK Adapter to read the metrics in the OAP. When the threshold is met, HPA would scale the Satellite deployment.\nExample In this section, we will demonstrate two cases:\n SkyWalking Scaling: After SkyWalking OAP scaling, the traffic would auto load balancing through Satellite. Satellite Scaling: Satellite’s own traffic load balancing.  NOTE: All commands could be accessed through GitHub.\nSkyWalking Scaling We will use the bookinfo application to demonstrate how to integrate Apache SkyWalking 8.9.1 with Apache SkyWalking-Satellite 0.5.0, and observe the service mesh through the Envoy ALS protocol.\nBefore starting, please make sure that you already have a Kubernetes environment.\nInstall Istio Istio provides a very convenient way to configure the Envoy proxy and enable the access log service. The following step:\n Install the istioctl locally to help manage the Istio mesh. Install Istio into the Kubernetes environment with a demo configuration profile, and enable the Envoy ALS. Transmit the ALS message to the satellite. The satellite we will deploy later. Add the label into the default namespace so Istio could automatically inject Envoy sidecar proxies when you deploy your application later.  # install istioctl export ISTIO_VERSION=1.12.0 curl -L https://istio.io/downloadIstio | sh - sudo mv $PWD/istio-$ISTIO_VERSION/bin/istioctl /usr/local/bin/ # install istio istioctl install -y --set profile=demo \\ \t--set meshConfig.enableEnvoyAccessLogService=true \\ \t--set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-system-satellite.skywalking-system:11800 # enbale envoy proxy in default namespace kubectl label namespace default istio-injection=enabled Install SWCK SWCK provides convenience for users to deploy and upgrade SkyWalking related components based on Kubernetes. The automatic scale function of Satellite also mainly relies on SWCK. For more information, you could refer to the official documentation.\n# Install cert-manager kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.3.1/cert-manager.yaml # Deploy SWCK mkdir -p skywalking-swck \u0026amp;\u0026amp; cd skywalking-swck wget https://dlcdn.apache.org/skywalking/swck/0.6.1/skywalking-swck-0.6.1-bin.tgz tar -zxvf skywalking-swck-0.6.1-bin.tgz cd config kubectl apply -f operator-bundle.yaml Deploy Apache SkyWalking And Apache SkyWalking-Satellite We have provided a simple script to deploy the skywalking OAP, UI, and Satellite.\n# Create the skywalking components namespace kubectl create namespace skywalking-system kubectl label namespace skywalking-system swck-injection=enabled # Deploy components kubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/sw-components.yaml Deploy Bookinfo Application export ISTIO_VERSION=1.12.0 kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/platform/kube/bookinfo.yaml kubectl wait --for=condition=Ready pods --all --timeout=1200s kubectl port-forward service/productpage 9080 Next, please open your browser and visit http://localhost:9080. You should be able to see the Bookinfo application. Refresh the webpage several times to generate enough access logs.\nThen, you can see the topology and metrics of the Bookinfo application on SkyWalking WebUI. At this time, you can see that the Satellite is working!\nDeploy Monitor We need to install OpenTelemetry Collector to collect metrics in OAPs and analyze them.\n# Add OTEL collector kubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/otel-collector-oap.yaml kubectl port-forward -n skywalking-system service/skywalking-system-ui 8080:80 Next, please open your browser and visit http://localhost:8080/ and create a new item on the dashboard. The SkyWalking Web UI pictured below shows how the data content is applied.\nScaling OAP Scaling the number of OAPs by deployment.\nkubectl scale --replicas=3 -n skywalking-system deployment/skywalking-system-oap Done! After a period of time, you will see that the number of OAPs becomes 3, and the ALS traffic is balanced to each OAP.\nSatellite Scaling After we have completed the SkyWalking Scaling, we would carry out the Satellite Scaling demo.\nDeploy SWCK HPA SWCK provides an adapter to implement the Kubernetes external metrics to adapt the HPA through reading the metrics in SkyWalking OAP. We expose the metrics service in Satellite to OAP and configure HPA Resource to auto-scaling the Satellite.\nInstall the SWCK adapter into the Kubernetes environment:\nkubectl apply -f skywalking-swck/config/adapter-bundle.yaml Create the HPA resource, and limit each Satellite to handle a maximum of 10 connections:\nkubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/satellite-hpa.yaml Then, you could see we have 9 connections in one satellite. One envoy proxy may establish multiple connections to the satellite.\n$ kubectl get HorizontalPodAutoscaler -n skywalking-system NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE hpa-demo Deployment/skywalking-system-satellite 9/10 1 3 1 5m18s Scaling Application The scaling application could establish more connections to the satellite, to verify whether the HPA is in effect.\nkubectl scale --replicas=3 deployment/productpage-v1 deployment/details-v1 Done! By default, Satellite will deploy a single instance and a single instance will only accept 11 connections. HPA resources limit one Satellite to handle 10 connections and use a stabilization window to make Satellite stable scaling up. In this case, we deploy the Bookinfo application in 10+ instances after scaling, which means that 10+ connections will be established to the Satellite.\nSo after HPA resources are running, the Satellite would be automatically scaled up to 2 instances. You can learn about the calculation algorithm of replicas through the official documentation. Run the following command to view the running status:\n$ kubectl get HorizontalPodAutoscaler -n skywalking-system --watch NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 1 3m31s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 1 4m20s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 2 4m38s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 2 5m8s hpa-demo Deployment/skywalking-system-satellite 6/10 1 3 2 5m23s By observing the “number of connections” metric, we would be able to see that when the number of connections of each gRPC exceeds 10 connections, then the satellite automatically scales through the HPA rule. As a result, the connection number is down to normal status (in this example, less than 10)\nswctl metrics linear --name satellite_service_grpc_connect_count --service-name satellite::satellite-service ","excerpt":"Scaling with Apache SkyWalking Background In the Apache SkyWalking ecosystem, the OAP obtains …","ref":"/docs/main/latest/en/academy/scaling-with-apache-skywalking/","title":"Scaling with Apache SkyWalking"},{"body":"Scaling with Apache SkyWalking Background In the Apache SkyWalking ecosystem, the OAP obtains metrics, traces, logs, and event data through SkyWalking Agent, Envoy, or other data sources. Under the gRPC protocol, it transmits data by communicating with a single server node. Only when the connection is broken, the reconnecting policy would be used based on DNS round-robin mode. When new services are added at runtime or the OAP load is kept high due to increased traffic of observed services, the OAP cluster needs to scale out for increased traffic. The load of the new OAP node would be less due to all existing agents having connected to previous nodes. Even without scaling, the load of OAP nodes would be unbalanced, because the agent would keep the connection due to random policy at the booting stage. In these cases, it would become a challenge to keep up the health status of all nodes, and be able to scale out when needed.\nIn this article, we mainly discuss how to solve this challenge in SkyWalking.\nHow to Load Balance SkyWalking mainly uses the gRPC protocol for data transmission, so this article mainly introduces load balancing in the gRPC protocol.\nProxy Or Client-side Based on the gRPC official Load Balancing blog, there are two approaches to load balancing:\n Client-side: The client perceives multiple back-end services and uses a load-balancing algorithm to select a back-end service for each RPC. Proxy: The client sends the message to the proxy server, and the proxy server load balances the message to the back-end service.  From the perspective of observability system architecture:\n    Pros Cons     Client-side High performance because of the elimination of extra hop Complex client (cluster awareness, load balancing, health check, etc.)Ensure each data source to be connected provides complex client capabilities   Proxy Simple Client Higher latency    We choose Proxy mode for the following reasons:\n Observable data is not very time-sensitive, a little latency caused by transmission is acceptable. A little extra hop is acceptable and there is no impact on the client-side. As an observability platform, we cannot/should not ask clients to change. They make their own tech decisions and may have their own commercial considerations.  Transmission Policy In the proxy mode, we should determine the transmission path between downstream and upstream.\nDifferent data protocols require different processing policies. There are two transmission policies:\n Synchronous: Suitable for protocols that require data exchange in the client, such as SkyWalking Dynamic Configuration Service. This type of protocol provides real-time results. Asynchronous batch: Used when the client doesn’t care about the upstream processing results, but only the transmitted data (e.g., trace report, log report, etc.)  The synchronization policy requires that the proxy send the message to the upstream server when receiving the client message, and synchronously return the response data to the downstream client. Usually, only a few protocols need to use the synchronization policy.\nAs shown below, after the client sends the request to the Proxy, the proxy would send the message to the server synchronously. When the proxy receives the result, it returns to the client.\nThe asynchronous batch policy means that the data is sent to the upstream server in batches asynchronously. This policy is more common because most protocols in SkyWalking are primarily based on data reporting. We think using the queue as a buffer could have a good effect. The asynchronous batch policy is executed according to the following steps:\n The proxy receives the data and wraps it as an Event object. An event is added into the queue. When the cycle time is reached or when the queue elements reach the fixed number, the elements in the queue will parallel consume and send to the OAP.  The advantage of using queues is:\n Separate data receiving and sending to reduce the mutual influence. The interval quantization mechanism can be used to combine events, which helps to speed up sending events to the OAP. Using multi-threaded consumption queue events can make fuller use of network IO.  As shown below, after the proxy receives the message, the proxy would wrap the message as an event and push it to the queue. The message sender would take batch events from the queue and send them to the upstream OAP.\nRouting Routing algorithms are used to route messages to a single upstream server node.\nThe Round-Robin algorithm selects nodes in order from the list of upstream service nodes. The advantage of this algorithm is that the number of times each node is selected is average. When the size of the data is close to the same, each upstream node can handle the same quantity of data content.\nWith the Weight Round-Robin, each upstream server node has a corresponding routing weight ratio. The difference from Round-Robin is that each upstream node has more chances to be routed according to its weight. This algorithm is more suitable to use when the upstream server node machine configuration is not the same.\nThe Fixed algorithm is a hybrid algorithm. It can ensure that the same data is routed to the same upstream server node, and when the upstream server scales out, it still maintains routing to the same node; unless the upstream node does not exist, it will reroute. This algorithm is mainly used in the SkyWalking Meter protocol because this protocol needs to ensure that the metrics of the same service instance are sent to the same OAP node. The Routing steps are as follows:\n Generate a unique identification string based on the data content, as short as possible. The amount of data is controllable. Get the upstream node of identity from LRU Cache, and use it if it exists. According to the identification, generate the corresponding hash value, and find the upstream server node from the upstream list. Save the mapping relationship between the upstream server node and identification to LRU Cache.  The advantage of this algorithm is to bind the data with the upstream server node as much as possible, so the upstream server can better process continuous data. The disadvantage is that it takes up a certain amount of memory space to save the corresponding relationship.\nAs shown below, the image is divided into two parts:\n The left side represents that the same data content always is routed to the same server node. The right side represents the data routing algorithm. Get the number from the data, and use the remainder algorithm to obtain the position.  We choose to use a combination of Round-Robin and Fixed algorithm for routing:\n The Fixed routing algorithm is suitable for specific protocols, mainly used when passing metrics data to the SkyWalking Meter protocol The Round-Robin algorithm is used by default. When the SkyWalking OAP cluster is deployed, the configuration of the nodes needs to be as much the same as possible, so there would be no need to use the Weight Round-Robin algorithm.  How to balance the load balancer itself? Proxy still needs to deal with the load balancing problem from client to itself, especially when deploying a Proxy cluster in a production environment.\nThere are three ways to solve this problem:\n Connection management: Use the max_connection config on the client-side to specify the maximum connection duration of each connection. For more information, please read the proposal. Cluster awareness: The proxy has cluster awareness, and actively disconnects the connection when the load is unbalanced to allow the client to re-pick up the proxy. Resource limit+HPA: Restrict the connection resource situation of each proxy, and no longer accept new connections when the resource limit is reached. And use the HPA mechanism of Kubernetes to dynamically scale out the number of the proxy.      Connection management Cluster awareness Resource Limit+HPA     Pros Simple to use Ensure that the number of connections in each proxy is relatively  Simple to use   Cons Each client needs to ensure that data is not lostThe client is required to accept GOWAY responses May cause a sudden increase in traffic on some nodesEach client needs to ensure that data is not lost  Traffic will not be particularly balanced in each instance    We choose Limit+HPA for these reasons:\n Easy to config and use the proxy and easy to understand based on basic data metrics. No data loss due to broken connection. There is no need for the client to implement any other protocols to prevent data loss, especially when the client is a commercial product. The connection of each node in the proxy cluster does not need to be particularly balanced, as long as the proxy node itself is high-performance.  SkyWalking-Satellite We have implemented this Proxy in the SkyWalking-Satellite project. It’s used between Client and SkyWalking OAP, effectively solving the load balancing problem.\nAfter the system is deployed, the Satellite would accept the traffic from the Client, and the Satellite will perceive all the nodes of the OAP through Kubernetes Label Selector or manual configuration, and load balance the traffic to the upstream OAP node.\nAs shown below, a single client still maintains a connection with a single Satellite, Satellite would establish the connection with each OAP, and load balance message to the OAP node.\nWhen scaling Satellite, we need to deploy the SWCK adapter and configure the HPA in Kubernetes. SWCK is a platform for the SkyWalking users, provisions, upgrades, maintains SkyWalking relevant components, and makes them work natively on Kubernetes.\nAfter deployment is finished, the following steps would be performed:\n Read metrics from OAP: HPA requests the SWCK metrics adapter to dynamically read the metrics in the OAP. Scaling the Satellite: Kubernetes HPA senses that the metrics values are in line with expectations, so the Satellite would be scaling automatically.  As shown below, use the dotted line to divide the two parts. HPA uses SWCK Adapter to read the metrics in the OAP. When the threshold is met, HPA would scale the Satellite deployment.\nExample In this section, we will demonstrate two cases:\n SkyWalking Scaling: After SkyWalking OAP scaling, the traffic would auto load balancing through Satellite. Satellite Scaling: Satellite’s own traffic load balancing.  NOTE: All commands could be accessed through GitHub.\nSkyWalking Scaling We will use the bookinfo application to demonstrate how to integrate Apache SkyWalking 8.9.1 with Apache SkyWalking-Satellite 0.5.0, and observe the service mesh through the Envoy ALS protocol.\nBefore starting, please make sure that you already have a Kubernetes environment.\nInstall Istio Istio provides a very convenient way to configure the Envoy proxy and enable the access log service. The following step:\n Install the istioctl locally to help manage the Istio mesh. Install Istio into the Kubernetes environment with a demo configuration profile, and enable the Envoy ALS. Transmit the ALS message to the satellite. The satellite we will deploy later. Add the label into the default namespace so Istio could automatically inject Envoy sidecar proxies when you deploy your application later.  # install istioctl export ISTIO_VERSION=1.12.0 curl -L https://istio.io/downloadIstio | sh - sudo mv $PWD/istio-$ISTIO_VERSION/bin/istioctl /usr/local/bin/ # install istio istioctl install -y --set profile=demo \\ \t--set meshConfig.enableEnvoyAccessLogService=true \\ \t--set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-system-satellite.skywalking-system:11800 # enbale envoy proxy in default namespace kubectl label namespace default istio-injection=enabled Install SWCK SWCK provides convenience for users to deploy and upgrade SkyWalking related components based on Kubernetes. The automatic scale function of Satellite also mainly relies on SWCK. For more information, you could refer to the official documentation.\n# Install cert-manager kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.3.1/cert-manager.yaml # Deploy SWCK mkdir -p skywalking-swck \u0026amp;\u0026amp; cd skywalking-swck wget https://dlcdn.apache.org/skywalking/swck/0.6.1/skywalking-swck-0.6.1-bin.tgz tar -zxvf skywalking-swck-0.6.1-bin.tgz cd config kubectl apply -f operator-bundle.yaml Deploy Apache SkyWalking And Apache SkyWalking-Satellite We have provided a simple script to deploy the skywalking OAP, UI, and Satellite.\n# Create the skywalking components namespace kubectl create namespace skywalking-system kubectl label namespace skywalking-system swck-injection=enabled # Deploy components kubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/sw-components.yaml Deploy Bookinfo Application export ISTIO_VERSION=1.12.0 kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/platform/kube/bookinfo.yaml kubectl wait --for=condition=Ready pods --all --timeout=1200s kubectl port-forward service/productpage 9080 Next, please open your browser and visit http://localhost:9080. You should be able to see the Bookinfo application. Refresh the webpage several times to generate enough access logs.\nThen, you can see the topology and metrics of the Bookinfo application on SkyWalking WebUI. At this time, you can see that the Satellite is working!\nDeploy Monitor We need to install OpenTelemetry Collector to collect metrics in OAPs and analyze them.\n# Add OTEL collector kubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/otel-collector-oap.yaml kubectl port-forward -n skywalking-system service/skywalking-system-ui 8080:80 Next, please open your browser and visit http://localhost:8080/ and create a new item on the dashboard. The SkyWalking Web UI pictured below shows how the data content is applied.\nScaling OAP Scaling the number of OAPs by deployment.\nkubectl scale --replicas=3 -n skywalking-system deployment/skywalking-system-oap Done! After a period of time, you will see that the number of OAPs becomes 3, and the ALS traffic is balanced to each OAP.\nSatellite Scaling After we have completed the SkyWalking Scaling, we would carry out the Satellite Scaling demo.\nDeploy SWCK HPA SWCK provides an adapter to implement the Kubernetes external metrics to adapt the HPA through reading the metrics in SkyWalking OAP. We expose the metrics service in Satellite to OAP and configure HPA Resource to auto-scaling the Satellite.\nInstall the SWCK adapter into the Kubernetes environment:\nkubectl apply -f skywalking-swck/config/adapter-bundle.yaml Create the HPA resource, and limit each Satellite to handle a maximum of 10 connections:\nkubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/satellite-hpa.yaml Then, you could see we have 9 connections in one satellite. One envoy proxy may establish multiple connections to the satellite.\n$ kubectl get HorizontalPodAutoscaler -n skywalking-system NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE hpa-demo Deployment/skywalking-system-satellite 9/10 1 3 1 5m18s Scaling Application The scaling application could establish more connections to the satellite, to verify whether the HPA is in effect.\nkubectl scale --replicas=3 deployment/productpage-v1 deployment/details-v1 Done! By default, Satellite will deploy a single instance and a single instance will only accept 11 connections. HPA resources limit one Satellite to handle 10 connections and use a stabilization window to make Satellite stable scaling up. In this case, we deploy the Bookinfo application in 10+ instances after scaling, which means that 10+ connections will be established to the Satellite.\nSo after HPA resources are running, the Satellite would be automatically scaled up to 2 instances. You can learn about the calculation algorithm of replicas through the official documentation. Run the following command to view the running status:\n$ kubectl get HorizontalPodAutoscaler -n skywalking-system --watch NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 1 3m31s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 1 4m20s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 2 4m38s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 2 5m8s hpa-demo Deployment/skywalking-system-satellite 6/10 1 3 2 5m23s By observing the “number of connections” metric, we would be able to see that when the number of connections of each gRPC exceeds 10 connections, then the satellite automatically scales through the HPA rule. As a result, the connection number is down to normal status (in this example, less than 10)\nswctl metrics linear --name satellite_service_grpc_connect_count --service-name satellite::satellite-service ","excerpt":"Scaling with Apache SkyWalking Background In the Apache SkyWalking ecosystem, the OAP obtains …","ref":"/docs/main/next/en/academy/scaling-with-apache-skywalking/","title":"Scaling with Apache SkyWalking"},{"body":"Scaling with Apache SkyWalking Background In the Apache SkyWalking ecosystem, the OAP obtains metrics, traces, logs, and event data through SkyWalking Agent, Envoy, or other data sources. Under the gRPC protocol, it transmits data by communicating with a single server node. Only when the connection is broken, the reconnecting policy would be used based on DNS round-robin mode. When new services are added at runtime or the OAP load is kept high due to increased traffic of observed services, the OAP cluster needs to scale out for increased traffic. The load of the new OAP node would be less due to all existing agents having connected to previous nodes. Even without scaling, the load of OAP nodes would be unbalanced, because the agent would keep the connection due to random policy at the booting stage. In these cases, it would become a challenge to keep up the health status of all nodes, and be able to scale out when needed.\nIn this article, we mainly discuss how to solve this challenge in SkyWalking.\nHow to Load Balance SkyWalking mainly uses the gRPC protocol for data transmission, so this article mainly introduces load balancing in the gRPC protocol.\nProxy Or Client-side Based on the gRPC official Load Balancing blog, there are two approaches to load balancing:\n Client-side: The client perceives multiple back-end services and uses a load-balancing algorithm to select a back-end service for each RPC. Proxy: The client sends the message to the proxy server, and the proxy server load balances the message to the back-end service.  From the perspective of observability system architecture:\n    Pros Cons     Client-side High performance because of the elimination of extra hop Complex client (cluster awareness, load balancing, health check, etc.)Ensure each data source to be connected provides complex client capabilities   Proxy Simple Client Higher latency    We choose Proxy mode for the following reasons:\n Observable data is not very time-sensitive, a little latency caused by transmission is acceptable. A little extra hop is acceptable and there is no impact on the client-side. As an observability platform, we cannot/should not ask clients to change. They make their own tech decisions and may have their own commercial considerations.  Transmission Policy In the proxy mode, we should determine the transmission path between downstream and upstream.\nDifferent data protocols require different processing policies. There are two transmission policies:\n Synchronous: Suitable for protocols that require data exchange in the client, such as SkyWalking Dynamic Configuration Service. This type of protocol provides real-time results. Asynchronous batch: Used when the client doesn’t care about the upstream processing results, but only the transmitted data (e.g., trace report, log report, etc.)  The synchronization policy requires that the proxy send the message to the upstream server when receiving the client message, and synchronously return the response data to the downstream client. Usually, only a few protocols need to use the synchronization policy.\nAs shown below, after the client sends the request to the Proxy, the proxy would send the message to the server synchronously. When the proxy receives the result, it returns to the client.\nThe asynchronous batch policy means that the data is sent to the upstream server in batches asynchronously. This policy is more common because most protocols in SkyWalking are primarily based on data reporting. We think using the queue as a buffer could have a good effect. The asynchronous batch policy is executed according to the following steps:\n The proxy receives the data and wraps it as an Event object. An event is added into the queue. When the cycle time is reached or when the queue elements reach the fixed number, the elements in the queue will parallel consume and send to the OAP.  The advantage of using queues is:\n Separate data receiving and sending to reduce the mutual influence. The interval quantization mechanism can be used to combine events, which helps to speed up sending events to the OAP. Using multi-threaded consumption queue events can make fuller use of network IO.  As shown below, after the proxy receives the message, the proxy would wrap the message as an event and push it to the queue. The message sender would take batch events from the queue and send them to the upstream OAP.\nRouting Routing algorithms are used to route messages to a single upstream server node.\nThe Round-Robin algorithm selects nodes in order from the list of upstream service nodes. The advantage of this algorithm is that the number of times each node is selected is average. When the size of the data is close to the same, each upstream node can handle the same quantity of data content.\nWith the Weight Round-Robin, each upstream server node has a corresponding routing weight ratio. The difference from Round-Robin is that each upstream node has more chances to be routed according to its weight. This algorithm is more suitable to use when the upstream server node machine configuration is not the same.\nThe Fixed algorithm is a hybrid algorithm. It can ensure that the same data is routed to the same upstream server node, and when the upstream server scales out, it still maintains routing to the same node; unless the upstream node does not exist, it will reroute. This algorithm is mainly used in the SkyWalking Meter protocol because this protocol needs to ensure that the metrics of the same service instance are sent to the same OAP node. The Routing steps are as follows:\n Generate a unique identification string based on the data content, as short as possible. The amount of data is controllable. Get the upstream node of identity from LRU Cache, and use it if it exists. According to the identification, generate the corresponding hash value, and find the upstream server node from the upstream list. Save the mapping relationship between the upstream server node and identification to LRU Cache.  The advantage of this algorithm is to bind the data with the upstream server node as much as possible, so the upstream server can better process continuous data. The disadvantage is that it takes up a certain amount of memory space to save the corresponding relationship.\nAs shown below, the image is divided into two parts:\n The left side represents that the same data content always is routed to the same server node. The right side represents the data routing algorithm. Get the number from the data, and use the remainder algorithm to obtain the position.  We choose to use a combination of Round-Robin and Fixed algorithm for routing:\n The Fixed routing algorithm is suitable for specific protocols, mainly used when passing metrics data to the SkyWalking Meter protocol The Round-Robin algorithm is used by default. When the SkyWalking OAP cluster is deployed, the configuration of the nodes needs to be as much the same as possible, so there would be no need to use the Weight Round-Robin algorithm.  How to balance the load balancer itself? Proxy still needs to deal with the load balancing problem from client to itself, especially when deploying a Proxy cluster in a production environment.\nThere are three ways to solve this problem:\n Connection management: Use the max_connection config on the client-side to specify the maximum connection duration of each connection. For more information, please read the proposal. Cluster awareness: The proxy has cluster awareness, and actively disconnects the connection when the load is unbalanced to allow the client to re-pick up the proxy. Resource limit+HPA: Restrict the connection resource situation of each proxy, and no longer accept new connections when the resource limit is reached. And use the HPA mechanism of Kubernetes to dynamically scale out the number of the proxy.      Connection management Cluster awareness Resource Limit+HPA     Pros Simple to use Ensure that the number of connections in each proxy is relatively  Simple to use   Cons Each client needs to ensure that data is not lostThe client is required to accept GOWAY responses May cause a sudden increase in traffic on some nodesEach client needs to ensure that data is not lost  Traffic will not be particularly balanced in each instance    We choose Limit+HPA for these reasons:\n Easy to config and use the proxy and easy to understand based on basic data metrics. No data loss due to broken connection. There is no need for the client to implement any other protocols to prevent data loss, especially when the client is a commercial product. The connection of each node in the proxy cluster does not need to be particularly balanced, as long as the proxy node itself is high-performance.  SkyWalking-Satellite We have implemented this Proxy in the SkyWalking-Satellite project. It’s used between Client and SkyWalking OAP, effectively solving the load balancing problem.\nAfter the system is deployed, the Satellite would accept the traffic from the Client, and the Satellite will perceive all the nodes of the OAP through Kubernetes Label Selector or manual configuration, and load balance the traffic to the upstream OAP node.\nAs shown below, a single client still maintains a connection with a single Satellite, Satellite would establish the connection with each OAP, and load balance message to the OAP node.\nWhen scaling Satellite, we need to deploy the SWCK adapter and configure the HPA in Kubernetes. SWCK is a platform for the SkyWalking users, provisions, upgrades, maintains SkyWalking relevant components, and makes them work natively on Kubernetes.\nAfter deployment is finished, the following steps would be performed:\n Read metrics from OAP: HPA requests the SWCK metrics adapter to dynamically read the metrics in the OAP. Scaling the Satellite: Kubernetes HPA senses that the metrics values are in line with expectations, so the Satellite would be scaling automatically.  As shown below, use the dotted line to divide the two parts. HPA uses SWCK Adapter to read the metrics in the OAP. When the threshold is met, HPA would scale the Satellite deployment.\nExample In this section, we will demonstrate two cases:\n SkyWalking Scaling: After SkyWalking OAP scaling, the traffic would auto load balancing through Satellite. Satellite Scaling: Satellite’s own traffic load balancing.  NOTE: All commands could be accessed through GitHub.\nSkyWalking Scaling We will use the bookinfo application to demonstrate how to integrate Apache SkyWalking 8.9.1 with Apache SkyWalking-Satellite 0.5.0, and observe the service mesh through the Envoy ALS protocol.\nBefore starting, please make sure that you already have a Kubernetes environment.\nInstall Istio Istio provides a very convenient way to configure the Envoy proxy and enable the access log service. The following step:\n Install the istioctl locally to help manage the Istio mesh. Install Istio into the Kubernetes environment with a demo configuration profile, and enable the Envoy ALS. Transmit the ALS message to the satellite. The satellite we will deploy later. Add the label into the default namespace so Istio could automatically inject Envoy sidecar proxies when you deploy your application later.  # install istioctl export ISTIO_VERSION=1.12.0 curl -L https://istio.io/downloadIstio | sh - sudo mv $PWD/istio-$ISTIO_VERSION/bin/istioctl /usr/local/bin/ # install istio istioctl install -y --set profile=demo \\ \t--set meshConfig.enableEnvoyAccessLogService=true \\ \t--set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-system-satellite.skywalking-system:11800 # enbale envoy proxy in default namespace kubectl label namespace default istio-injection=enabled Install SWCK SWCK provides convenience for users to deploy and upgrade SkyWalking related components based on Kubernetes. The automatic scale function of Satellite also mainly relies on SWCK. For more information, you could refer to the official documentation.\n# Install cert-manager kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.3.1/cert-manager.yaml # Deploy SWCK mkdir -p skywalking-swck \u0026amp;\u0026amp; cd skywalking-swck wget https://dlcdn.apache.org/skywalking/swck/0.6.1/skywalking-swck-0.6.1-bin.tgz tar -zxvf skywalking-swck-0.6.1-bin.tgz cd config kubectl apply -f operator-bundle.yaml Deploy Apache SkyWalking And Apache SkyWalking-Satellite We have provided a simple script to deploy the skywalking OAP, UI, and Satellite.\n# Create the skywalking components namespace kubectl create namespace skywalking-system kubectl label namespace skywalking-system swck-injection=enabled # Deploy components kubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/sw-components.yaml Deploy Bookinfo Application export ISTIO_VERSION=1.12.0 kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/platform/kube/bookinfo.yaml kubectl wait --for=condition=Ready pods --all --timeout=1200s kubectl port-forward service/productpage 9080 Next, please open your browser and visit http://localhost:9080. You should be able to see the Bookinfo application. Refresh the webpage several times to generate enough access logs.\nThen, you can see the topology and metrics of the Bookinfo application on SkyWalking WebUI. At this time, you can see that the Satellite is working!\nDeploy Monitor We need to install OpenTelemetry Collector to collect metrics in OAPs and analyze them.\n# Add OTEL collector kubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/otel-collector-oap.yaml kubectl port-forward -n skywalking-system service/skywalking-system-ui 8080:80 Next, please open your browser and visit http://localhost:8080/ and create a new item on the dashboard. The SkyWalking Web UI pictured below shows how the data content is applied.\nScaling OAP Scaling the number of OAPs by deployment.\nkubectl scale --replicas=3 -n skywalking-system deployment/skywalking-system-oap Done! After a period of time, you will see that the number of OAPs becomes 3, and the ALS traffic is balanced to each OAP.\nSatellite Scaling After we have completed the SkyWalking Scaling, we would carry out the Satellite Scaling demo.\nDeploy SWCK HPA SWCK provides an adapter to implement the Kubernetes external metrics to adapt the HPA through reading the metrics in SkyWalking OAP. We expose the metrics service in Satellite to OAP and configure HPA Resource to auto-scaling the Satellite.\nInstall the SWCK adapter into the Kubernetes environment:\nkubectl apply -f skywalking-swck/config/adapter-bundle.yaml Create the HPA resource, and limit each Satellite to handle a maximum of 10 connections:\nkubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/satellite-hpa.yaml Then, you could see we have 9 connections in one satellite. One envoy proxy may establish multiple connections to the satellite.\n$ kubectl get HorizontalPodAutoscaler -n skywalking-system NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE hpa-demo Deployment/skywalking-system-satellite 9/10 1 3 1 5m18s Scaling Application The scaling application could establish more connections to the satellite, to verify whether the HPA is in effect.\nkubectl scale --replicas=3 deployment/productpage-v1 deployment/details-v1 Done! By default, Satellite will deploy a single instance and a single instance will only accept 11 connections. HPA resources limit one Satellite to handle 10 connections and use a stabilization window to make Satellite stable scaling up. In this case, we deploy the Bookinfo application in 10+ instances after scaling, which means that 10+ connections will be established to the Satellite.\nSo after HPA resources are running, the Satellite would be automatically scaled up to 2 instances. You can learn about the calculation algorithm of replicas through the official documentation. Run the following command to view the running status:\n$ kubectl get HorizontalPodAutoscaler -n skywalking-system --watch NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 1 3m31s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 1 4m20s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 2 4m38s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 2 5m8s hpa-demo Deployment/skywalking-system-satellite 6/10 1 3 2 5m23s By observing the “number of connections” metric, we would be able to see that when the number of connections of each gRPC exceeds 10 connections, then the satellite automatically scales through the HPA rule. As a result, the connection number is down to normal status (in this example, less than 10)\nswctl metrics linear --name satellite_service_grpc_connect_count --service-name satellite::satellite-service ","excerpt":"Scaling with Apache SkyWalking Background In the Apache SkyWalking ecosystem, the OAP obtains …","ref":"/docs/main/v9.3.0/en/academy/scaling-with-apache-skywalking/","title":"Scaling with Apache SkyWalking"},{"body":"Scaling with Apache SkyWalking Background In the Apache SkyWalking ecosystem, the OAP obtains metrics, traces, logs, and event data through SkyWalking Agent, Envoy, or other data sources. Under the gRPC protocol, it transmits data by communicating with a single server node. Only when the connection is broken, the reconnecting policy would be used based on DNS round-robin mode. When new services are added at runtime or the OAP load is kept high due to increased traffic of observed services, the OAP cluster needs to scale out for increased traffic. The load of the new OAP node would be less due to all existing agents having connected to previous nodes. Even without scaling, the load of OAP nodes would be unbalanced, because the agent would keep the connection due to random policy at the booting stage. In these cases, it would become a challenge to keep up the health status of all nodes, and be able to scale out when needed.\nIn this article, we mainly discuss how to solve this challenge in SkyWalking.\nHow to Load Balance SkyWalking mainly uses the gRPC protocol for data transmission, so this article mainly introduces load balancing in the gRPC protocol.\nProxy Or Client-side Based on the gRPC official Load Balancing blog, there are two approaches to load balancing:\n Client-side: The client perceives multiple back-end services and uses a load-balancing algorithm to select a back-end service for each RPC. Proxy: The client sends the message to the proxy server, and the proxy server load balances the message to the back-end service.  From the perspective of observability system architecture:\n    Pros Cons     Client-side High performance because of the elimination of extra hop Complex client (cluster awareness, load balancing, health check, etc.)Ensure each data source to be connected provides complex client capabilities   Proxy Simple Client Higher latency    We choose Proxy mode for the following reasons:\n Observable data is not very time-sensitive, a little latency caused by transmission is acceptable. A little extra hop is acceptable and there is no impact on the client-side. As an observability platform, we cannot/should not ask clients to change. They make their own tech decisions and may have their own commercial considerations.  Transmission Policy In the proxy mode, we should determine the transmission path between downstream and upstream.\nDifferent data protocols require different processing policies. There are two transmission policies:\n Synchronous: Suitable for protocols that require data exchange in the client, such as SkyWalking Dynamic Configuration Service. This type of protocol provides real-time results. Asynchronous batch: Used when the client doesn’t care about the upstream processing results, but only the transmitted data (e.g., trace report, log report, etc.)  The synchronization policy requires that the proxy send the message to the upstream server when receiving the client message, and synchronously return the response data to the downstream client. Usually, only a few protocols need to use the synchronization policy.\nAs shown below, after the client sends the request to the Proxy, the proxy would send the message to the server synchronously. When the proxy receives the result, it returns to the client.\nThe asynchronous batch policy means that the data is sent to the upstream server in batches asynchronously. This policy is more common because most protocols in SkyWalking are primarily based on data reporting. We think using the queue as a buffer could have a good effect. The asynchronous batch policy is executed according to the following steps:\n The proxy receives the data and wraps it as an Event object. An event is added into the queue. When the cycle time is reached or when the queue elements reach the fixed number, the elements in the queue will parallel consume and send to the OAP.  The advantage of using queues is:\n Separate data receiving and sending to reduce the mutual influence. The interval quantization mechanism can be used to combine events, which helps to speed up sending events to the OAP. Using multi-threaded consumption queue events can make fuller use of network IO.  As shown below, after the proxy receives the message, the proxy would wrap the message as an event and push it to the queue. The message sender would take batch events from the queue and send them to the upstream OAP.\nRouting Routing algorithms are used to route messages to a single upstream server node.\nThe Round-Robin algorithm selects nodes in order from the list of upstream service nodes. The advantage of this algorithm is that the number of times each node is selected is average. When the size of the data is close to the same, each upstream node can handle the same quantity of data content.\nWith the Weight Round-Robin, each upstream server node has a corresponding routing weight ratio. The difference from Round-Robin is that each upstream node has more chances to be routed according to its weight. This algorithm is more suitable to use when the upstream server node machine configuration is not the same.\nThe Fixed algorithm is a hybrid algorithm. It can ensure that the same data is routed to the same upstream server node, and when the upstream server scales out, it still maintains routing to the same node; unless the upstream node does not exist, it will reroute. This algorithm is mainly used in the SkyWalking Meter protocol because this protocol needs to ensure that the metrics of the same service instance are sent to the same OAP node. The Routing steps are as follows:\n Generate a unique identification string based on the data content, as short as possible. The amount of data is controllable. Get the upstream node of identity from LRU Cache, and use it if it exists. According to the identification, generate the corresponding hash value, and find the upstream server node from the upstream list. Save the mapping relationship between the upstream server node and identification to LRU Cache.  The advantage of this algorithm is to bind the data with the upstream server node as much as possible, so the upstream server can better process continuous data. The disadvantage is that it takes up a certain amount of memory space to save the corresponding relationship.\nAs shown below, the image is divided into two parts:\n The left side represents that the same data content always is routed to the same server node. The right side represents the data routing algorithm. Get the number from the data, and use the remainder algorithm to obtain the position.  We choose to use a combination of Round-Robin and Fixed algorithm for routing:\n The Fixed routing algorithm is suitable for specific protocols, mainly used when passing metrics data to the SkyWalking Meter protocol The Round-Robin algorithm is used by default. When the SkyWalking OAP cluster is deployed, the configuration of the nodes needs to be as much the same as possible, so there would be no need to use the Weight Round-Robin algorithm.  How to balance the load balancer itself? Proxy still needs to deal with the load balancing problem from client to itself, especially when deploying a Proxy cluster in a production environment.\nThere are three ways to solve this problem:\n Connection management: Use the max_connection config on the client-side to specify the maximum connection duration of each connection. For more information, please read the proposal. Cluster awareness: The proxy has cluster awareness, and actively disconnects the connection when the load is unbalanced to allow the client to re-pick up the proxy. Resource limit+HPA: Restrict the connection resource situation of each proxy, and no longer accept new connections when the resource limit is reached. And use the HPA mechanism of Kubernetes to dynamically scale out the number of the proxy.      Connection management Cluster awareness Resource Limit+HPA     Pros Simple to use Ensure that the number of connections in each proxy is relatively  Simple to use   Cons Each client needs to ensure that data is not lostThe client is required to accept GOWAY responses May cause a sudden increase in traffic on some nodesEach client needs to ensure that data is not lost  Traffic will not be particularly balanced in each instance    We choose Limit+HPA for these reasons:\n Easy to config and use the proxy and easy to understand based on basic data metrics. No data loss due to broken connection. There is no need for the client to implement any other protocols to prevent data loss, especially when the client is a commercial product. The connection of each node in the proxy cluster does not need to be particularly balanced, as long as the proxy node itself is high-performance.  SkyWalking-Satellite We have implemented this Proxy in the SkyWalking-Satellite project. It’s used between Client and SkyWalking OAP, effectively solving the load balancing problem.\nAfter the system is deployed, the Satellite would accept the traffic from the Client, and the Satellite will perceive all the nodes of the OAP through Kubernetes Label Selector or manual configuration, and load balance the traffic to the upstream OAP node.\nAs shown below, a single client still maintains a connection with a single Satellite, Satellite would establish the connection with each OAP, and load balance message to the OAP node.\nWhen scaling Satellite, we need to deploy the SWCK adapter and configure the HPA in Kubernetes. SWCK is a platform for the SkyWalking users, provisions, upgrades, maintains SkyWalking relevant components, and makes them work natively on Kubernetes.\nAfter deployment is finished, the following steps would be performed:\n Read metrics from OAP: HPA requests the SWCK metrics adapter to dynamically read the metrics in the OAP. Scaling the Satellite: Kubernetes HPA senses that the metrics values are in line with expectations, so the Satellite would be scaling automatically.  As shown below, use the dotted line to divide the two parts. HPA uses SWCK Adapter to read the metrics in the OAP. When the threshold is met, HPA would scale the Satellite deployment.\nExample In this section, we will demonstrate two cases:\n SkyWalking Scaling: After SkyWalking OAP scaling, the traffic would auto load balancing through Satellite. Satellite Scaling: Satellite’s own traffic load balancing.  NOTE: All commands could be accessed through GitHub.\nSkyWalking Scaling We will use the bookinfo application to demonstrate how to integrate Apache SkyWalking 8.9.1 with Apache SkyWalking-Satellite 0.5.0, and observe the service mesh through the Envoy ALS protocol.\nBefore starting, please make sure that you already have a Kubernetes environment.\nInstall Istio Istio provides a very convenient way to configure the Envoy proxy and enable the access log service. The following step:\n Install the istioctl locally to help manage the Istio mesh. Install Istio into the Kubernetes environment with a demo configuration profile, and enable the Envoy ALS. Transmit the ALS message to the satellite. The satellite we will deploy later. Add the label into the default namespace so Istio could automatically inject Envoy sidecar proxies when you deploy your application later.  # install istioctl export ISTIO_VERSION=1.12.0 curl -L https://istio.io/downloadIstio | sh - sudo mv $PWD/istio-$ISTIO_VERSION/bin/istioctl /usr/local/bin/ # install istio istioctl install -y --set profile=demo \\ \t--set meshConfig.enableEnvoyAccessLogService=true \\ \t--set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-system-satellite.skywalking-system:11800 # enbale envoy proxy in default namespace kubectl label namespace default istio-injection=enabled Install SWCK SWCK provides convenience for users to deploy and upgrade SkyWalking related components based on Kubernetes. The automatic scale function of Satellite also mainly relies on SWCK. For more information, you could refer to the official documentation.\n# Install cert-manager kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.3.1/cert-manager.yaml # Deploy SWCK mkdir -p skywalking-swck \u0026amp;\u0026amp; cd skywalking-swck wget https://dlcdn.apache.org/skywalking/swck/0.6.1/skywalking-swck-0.6.1-bin.tgz tar -zxvf skywalking-swck-0.6.1-bin.tgz cd config kubectl apply -f operator-bundle.yaml Deploy Apache SkyWalking And Apache SkyWalking-Satellite We have provided a simple script to deploy the skywalking OAP, UI, and Satellite.\n# Create the skywalking components namespace kubectl create namespace skywalking-system kubectl label namespace skywalking-system swck-injection=enabled # Deploy components kubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/sw-components.yaml Deploy Bookinfo Application export ISTIO_VERSION=1.12.0 kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/platform/kube/bookinfo.yaml kubectl wait --for=condition=Ready pods --all --timeout=1200s kubectl port-forward service/productpage 9080 Next, please open your browser and visit http://localhost:9080. You should be able to see the Bookinfo application. Refresh the webpage several times to generate enough access logs.\nThen, you can see the topology and metrics of the Bookinfo application on SkyWalking WebUI. At this time, you can see that the Satellite is working!\nDeploy Monitor We need to install OpenTelemetry Collector to collect metrics in OAPs and analyze them.\n# Add OTEL collector kubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/otel-collector-oap.yaml kubectl port-forward -n skywalking-system service/skywalking-system-ui 8080:80 Next, please open your browser and visit http://localhost:8080/ and create a new item on the dashboard. The SkyWalking Web UI pictured below shows how the data content is applied.\nScaling OAP Scaling the number of OAPs by deployment.\nkubectl scale --replicas=3 -n skywalking-system deployment/skywalking-system-oap Done! After a period of time, you will see that the number of OAPs becomes 3, and the ALS traffic is balanced to each OAP.\nSatellite Scaling After we have completed the SkyWalking Scaling, we would carry out the Satellite Scaling demo.\nDeploy SWCK HPA SWCK provides an adapter to implement the Kubernetes external metrics to adapt the HPA through reading the metrics in SkyWalking OAP. We expose the metrics service in Satellite to OAP and configure HPA Resource to auto-scaling the Satellite.\nInstall the SWCK adapter into the Kubernetes environment:\nkubectl apply -f skywalking-swck/config/adapter-bundle.yaml Create the HPA resource, and limit each Satellite to handle a maximum of 10 connections:\nkubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/satellite-hpa.yaml Then, you could see we have 9 connections in one satellite. One envoy proxy may establish multiple connections to the satellite.\n$ kubectl get HorizontalPodAutoscaler -n skywalking-system NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE hpa-demo Deployment/skywalking-system-satellite 9/10 1 3 1 5m18s Scaling Application The scaling application could establish more connections to the satellite, to verify whether the HPA is in effect.\nkubectl scale --replicas=3 deployment/productpage-v1 deployment/details-v1 Done! By default, Satellite will deploy a single instance and a single instance will only accept 11 connections. HPA resources limit one Satellite to handle 10 connections and use a stabilization window to make Satellite stable scaling up. In this case, we deploy the Bookinfo application in 10+ instances after scaling, which means that 10+ connections will be established to the Satellite.\nSo after HPA resources are running, the Satellite would be automatically scaled up to 2 instances. You can learn about the calculation algorithm of replicas through the official documentation. Run the following command to view the running status:\n$ kubectl get HorizontalPodAutoscaler -n skywalking-system --watch NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 1 3m31s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 1 4m20s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 2 4m38s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 2 5m8s hpa-demo Deployment/skywalking-system-satellite 6/10 1 3 2 5m23s By observing the “number of connections” metric, we would be able to see that when the number of connections of each gRPC exceeds 10 connections, then the satellite automatically scales through the HPA rule. As a result, the connection number is down to normal status (in this example, less than 10)\nswctl metrics linear --name satellite_service_grpc_connect_count --service-name satellite::satellite-service ","excerpt":"Scaling with Apache SkyWalking Background In the Apache SkyWalking ecosystem, the OAP obtains …","ref":"/docs/main/v9.4.0/en/academy/scaling-with-apache-skywalking/","title":"Scaling with Apache SkyWalking"},{"body":"Scaling with Apache SkyWalking Background In the Apache SkyWalking ecosystem, the OAP obtains metrics, traces, logs, and event data through SkyWalking Agent, Envoy, or other data sources. Under the gRPC protocol, it transmits data by communicating with a single server node. Only when the connection is broken, the reconnecting policy would be used based on DNS round-robin mode. When new services are added at runtime or the OAP load is kept high due to increased traffic of observed services, the OAP cluster needs to scale out for increased traffic. The load of the new OAP node would be less due to all existing agents having connected to previous nodes. Even without scaling, the load of OAP nodes would be unbalanced, because the agent would keep the connection due to random policy at the booting stage. In these cases, it would become a challenge to keep up the health status of all nodes, and be able to scale out when needed.\nIn this article, we mainly discuss how to solve this challenge in SkyWalking.\nHow to Load Balance SkyWalking mainly uses the gRPC protocol for data transmission, so this article mainly introduces load balancing in the gRPC protocol.\nProxy Or Client-side Based on the gRPC official Load Balancing blog, there are two approaches to load balancing:\n Client-side: The client perceives multiple back-end services and uses a load-balancing algorithm to select a back-end service for each RPC. Proxy: The client sends the message to the proxy server, and the proxy server load balances the message to the back-end service.  From the perspective of observability system architecture:\n    Pros Cons     Client-side High performance because of the elimination of extra hop Complex client (cluster awareness, load balancing, health check, etc.)Ensure each data source to be connected provides complex client capabilities   Proxy Simple Client Higher latency    We choose Proxy mode for the following reasons:\n Observable data is not very time-sensitive, a little latency caused by transmission is acceptable. A little extra hop is acceptable and there is no impact on the client-side. As an observability platform, we cannot/should not ask clients to change. They make their own tech decisions and may have their own commercial considerations.  Transmission Policy In the proxy mode, we should determine the transmission path between downstream and upstream.\nDifferent data protocols require different processing policies. There are two transmission policies:\n Synchronous: Suitable for protocols that require data exchange in the client, such as SkyWalking Dynamic Configuration Service. This type of protocol provides real-time results. Asynchronous batch: Used when the client doesn’t care about the upstream processing results, but only the transmitted data (e.g., trace report, log report, etc.)  The synchronization policy requires that the proxy send the message to the upstream server when receiving the client message, and synchronously return the response data to the downstream client. Usually, only a few protocols need to use the synchronization policy.\nAs shown below, after the client sends the request to the Proxy, the proxy would send the message to the server synchronously. When the proxy receives the result, it returns to the client.\nThe asynchronous batch policy means that the data is sent to the upstream server in batches asynchronously. This policy is more common because most protocols in SkyWalking are primarily based on data reporting. We think using the queue as a buffer could have a good effect. The asynchronous batch policy is executed according to the following steps:\n The proxy receives the data and wraps it as an Event object. An event is added into the queue. When the cycle time is reached or when the queue elements reach the fixed number, the elements in the queue will parallel consume and send to the OAP.  The advantage of using queues is:\n Separate data receiving and sending to reduce the mutual influence. The interval quantization mechanism can be used to combine events, which helps to speed up sending events to the OAP. Using multi-threaded consumption queue events can make fuller use of network IO.  As shown below, after the proxy receives the message, the proxy would wrap the message as an event and push it to the queue. The message sender would take batch events from the queue and send them to the upstream OAP.\nRouting Routing algorithms are used to route messages to a single upstream server node.\nThe Round-Robin algorithm selects nodes in order from the list of upstream service nodes. The advantage of this algorithm is that the number of times each node is selected is average. When the size of the data is close to the same, each upstream node can handle the same quantity of data content.\nWith the Weight Round-Robin, each upstream server node has a corresponding routing weight ratio. The difference from Round-Robin is that each upstream node has more chances to be routed according to its weight. This algorithm is more suitable to use when the upstream server node machine configuration is not the same.\nThe Fixed algorithm is a hybrid algorithm. It can ensure that the same data is routed to the same upstream server node, and when the upstream server scales out, it still maintains routing to the same node; unless the upstream node does not exist, it will reroute. This algorithm is mainly used in the SkyWalking Meter protocol because this protocol needs to ensure that the metrics of the same service instance are sent to the same OAP node. The Routing steps are as follows:\n Generate a unique identification string based on the data content, as short as possible. The amount of data is controllable. Get the upstream node of identity from LRU Cache, and use it if it exists. According to the identification, generate the corresponding hash value, and find the upstream server node from the upstream list. Save the mapping relationship between the upstream server node and identification to LRU Cache.  The advantage of this algorithm is to bind the data with the upstream server node as much as possible, so the upstream server can better process continuous data. The disadvantage is that it takes up a certain amount of memory space to save the corresponding relationship.\nAs shown below, the image is divided into two parts:\n The left side represents that the same data content always is routed to the same server node. The right side represents the data routing algorithm. Get the number from the data, and use the remainder algorithm to obtain the position.  We choose to use a combination of Round-Robin and Fixed algorithm for routing:\n The Fixed routing algorithm is suitable for specific protocols, mainly used when passing metrics data to the SkyWalking Meter protocol The Round-Robin algorithm is used by default. When the SkyWalking OAP cluster is deployed, the configuration of the nodes needs to be as much the same as possible, so there would be no need to use the Weight Round-Robin algorithm.  How to balance the load balancer itself? Proxy still needs to deal with the load balancing problem from client to itself, especially when deploying a Proxy cluster in a production environment.\nThere are three ways to solve this problem:\n Connection management: Use the max_connection config on the client-side to specify the maximum connection duration of each connection. For more information, please read the proposal. Cluster awareness: The proxy has cluster awareness, and actively disconnects the connection when the load is unbalanced to allow the client to re-pick up the proxy. Resource limit+HPA: Restrict the connection resource situation of each proxy, and no longer accept new connections when the resource limit is reached. And use the HPA mechanism of Kubernetes to dynamically scale out the number of the proxy.      Connection management Cluster awareness Resource Limit+HPA     Pros Simple to use Ensure that the number of connections in each proxy is relatively  Simple to use   Cons Each client needs to ensure that data is not lostThe client is required to accept GOWAY responses May cause a sudden increase in traffic on some nodesEach client needs to ensure that data is not lost  Traffic will not be particularly balanced in each instance    We choose Limit+HPA for these reasons:\n Easy to config and use the proxy and easy to understand based on basic data metrics. No data loss due to broken connection. There is no need for the client to implement any other protocols to prevent data loss, especially when the client is a commercial product. The connection of each node in the proxy cluster does not need to be particularly balanced, as long as the proxy node itself is high-performance.  SkyWalking-Satellite We have implemented this Proxy in the SkyWalking-Satellite project. It’s used between Client and SkyWalking OAP, effectively solving the load balancing problem.\nAfter the system is deployed, the Satellite would accept the traffic from the Client, and the Satellite will perceive all the nodes of the OAP through Kubernetes Label Selector or manual configuration, and load balance the traffic to the upstream OAP node.\nAs shown below, a single client still maintains a connection with a single Satellite, Satellite would establish the connection with each OAP, and load balance message to the OAP node.\nWhen scaling Satellite, we need to deploy the SWCK adapter and configure the HPA in Kubernetes. SWCK is a platform for the SkyWalking users, provisions, upgrades, maintains SkyWalking relevant components, and makes them work natively on Kubernetes.\nAfter deployment is finished, the following steps would be performed:\n Read metrics from OAP: HPA requests the SWCK metrics adapter to dynamically read the metrics in the OAP. Scaling the Satellite: Kubernetes HPA senses that the metrics values are in line with expectations, so the Satellite would be scaling automatically.  As shown below, use the dotted line to divide the two parts. HPA uses SWCK Adapter to read the metrics in the OAP. When the threshold is met, HPA would scale the Satellite deployment.\nExample In this section, we will demonstrate two cases:\n SkyWalking Scaling: After SkyWalking OAP scaling, the traffic would auto load balancing through Satellite. Satellite Scaling: Satellite’s own traffic load balancing.  NOTE: All commands could be accessed through GitHub.\nSkyWalking Scaling We will use the bookinfo application to demonstrate how to integrate Apache SkyWalking 8.9.1 with Apache SkyWalking-Satellite 0.5.0, and observe the service mesh through the Envoy ALS protocol.\nBefore starting, please make sure that you already have a Kubernetes environment.\nInstall Istio Istio provides a very convenient way to configure the Envoy proxy and enable the access log service. The following step:\n Install the istioctl locally to help manage the Istio mesh. Install Istio into the Kubernetes environment with a demo configuration profile, and enable the Envoy ALS. Transmit the ALS message to the satellite. The satellite we will deploy later. Add the label into the default namespace so Istio could automatically inject Envoy sidecar proxies when you deploy your application later.  # install istioctl export ISTIO_VERSION=1.12.0 curl -L https://istio.io/downloadIstio | sh - sudo mv $PWD/istio-$ISTIO_VERSION/bin/istioctl /usr/local/bin/ # install istio istioctl install -y --set profile=demo \\ \t--set meshConfig.enableEnvoyAccessLogService=true \\ \t--set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-system-satellite.skywalking-system:11800 # enbale envoy proxy in default namespace kubectl label namespace default istio-injection=enabled Install SWCK SWCK provides convenience for users to deploy and upgrade SkyWalking related components based on Kubernetes. The automatic scale function of Satellite also mainly relies on SWCK. For more information, you could refer to the official documentation.\n# Install cert-manager kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.3.1/cert-manager.yaml # Deploy SWCK mkdir -p skywalking-swck \u0026amp;\u0026amp; cd skywalking-swck wget https://dlcdn.apache.org/skywalking/swck/0.6.1/skywalking-swck-0.6.1-bin.tgz tar -zxvf skywalking-swck-0.6.1-bin.tgz cd config kubectl apply -f operator-bundle.yaml Deploy Apache SkyWalking And Apache SkyWalking-Satellite We have provided a simple script to deploy the skywalking OAP, UI, and Satellite.\n# Create the skywalking components namespace kubectl create namespace skywalking-system kubectl label namespace skywalking-system swck-injection=enabled # Deploy components kubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/sw-components.yaml Deploy Bookinfo Application export ISTIO_VERSION=1.12.0 kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/platform/kube/bookinfo.yaml kubectl wait --for=condition=Ready pods --all --timeout=1200s kubectl port-forward service/productpage 9080 Next, please open your browser and visit http://localhost:9080. You should be able to see the Bookinfo application. Refresh the webpage several times to generate enough access logs.\nThen, you can see the topology and metrics of the Bookinfo application on SkyWalking WebUI. At this time, you can see that the Satellite is working!\nDeploy Monitor We need to install OpenTelemetry Collector to collect metrics in OAPs and analyze them.\n# Add OTEL collector kubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/otel-collector-oap.yaml kubectl port-forward -n skywalking-system service/skywalking-system-ui 8080:80 Next, please open your browser and visit http://localhost:8080/ and create a new item on the dashboard. The SkyWalking Web UI pictured below shows how the data content is applied.\nScaling OAP Scaling the number of OAPs by deployment.\nkubectl scale --replicas=3 -n skywalking-system deployment/skywalking-system-oap Done! After a period of time, you will see that the number of OAPs becomes 3, and the ALS traffic is balanced to each OAP.\nSatellite Scaling After we have completed the SkyWalking Scaling, we would carry out the Satellite Scaling demo.\nDeploy SWCK HPA SWCK provides an adapter to implement the Kubernetes external metrics to adapt the HPA through reading the metrics in SkyWalking OAP. We expose the metrics service in Satellite to OAP and configure HPA Resource to auto-scaling the Satellite.\nInstall the SWCK adapter into the Kubernetes environment:\nkubectl apply -f skywalking-swck/config/adapter-bundle.yaml Create the HPA resource, and limit each Satellite to handle a maximum of 10 connections:\nkubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/satellite-hpa.yaml Then, you could see we have 9 connections in one satellite. One envoy proxy may establish multiple connections to the satellite.\n$ kubectl get HorizontalPodAutoscaler -n skywalking-system NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE hpa-demo Deployment/skywalking-system-satellite 9/10 1 3 1 5m18s Scaling Application The scaling application could establish more connections to the satellite, to verify whether the HPA is in effect.\nkubectl scale --replicas=3 deployment/productpage-v1 deployment/details-v1 Done! By default, Satellite will deploy a single instance and a single instance will only accept 11 connections. HPA resources limit one Satellite to handle 10 connections and use a stabilization window to make Satellite stable scaling up. In this case, we deploy the Bookinfo application in 10+ instances after scaling, which means that 10+ connections will be established to the Satellite.\nSo after HPA resources are running, the Satellite would be automatically scaled up to 2 instances. You can learn about the calculation algorithm of replicas through the official documentation. Run the following command to view the running status:\n$ kubectl get HorizontalPodAutoscaler -n skywalking-system --watch NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 1 3m31s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 1 4m20s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 2 4m38s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 2 5m8s hpa-demo Deployment/skywalking-system-satellite 6/10 1 3 2 5m23s By observing the “number of connections” metric, we would be able to see that when the number of connections of each gRPC exceeds 10 connections, then the satellite automatically scales through the HPA rule. As a result, the connection number is down to normal status (in this example, less than 10)\nswctl metrics linear --name satellite_service_grpc_connect_count --service-name satellite::satellite-service ","excerpt":"Scaling with Apache SkyWalking Background In the Apache SkyWalking ecosystem, the OAP obtains …","ref":"/docs/main/v9.5.0/en/academy/scaling-with-apache-skywalking/","title":"Scaling with Apache SkyWalking"},{"body":"Scaling with Apache SkyWalking Background In the Apache SkyWalking ecosystem, the OAP obtains metrics, traces, logs, and event data through SkyWalking Agent, Envoy, or other data sources. Under the gRPC protocol, it transmits data by communicating with a single server node. Only when the connection is broken, the reconnecting policy would be used based on DNS round-robin mode. When new services are added at runtime or the OAP load is kept high due to increased traffic of observed services, the OAP cluster needs to scale out for increased traffic. The load of the new OAP node would be less due to all existing agents having connected to previous nodes. Even without scaling, the load of OAP nodes would be unbalanced, because the agent would keep the connection due to random policy at the booting stage. In these cases, it would become a challenge to keep up the health status of all nodes, and be able to scale out when needed.\nIn this article, we mainly discuss how to solve this challenge in SkyWalking.\nHow to Load Balance SkyWalking mainly uses the gRPC protocol for data transmission, so this article mainly introduces load balancing in the gRPC protocol.\nProxy Or Client-side Based on the gRPC official Load Balancing blog, there are two approaches to load balancing:\n Client-side: The client perceives multiple back-end services and uses a load-balancing algorithm to select a back-end service for each RPC. Proxy: The client sends the message to the proxy server, and the proxy server load balances the message to the back-end service.  From the perspective of observability system architecture:\n    Pros Cons     Client-side High performance because of the elimination of extra hop Complex client (cluster awareness, load balancing, health check, etc.)Ensure each data source to be connected provides complex client capabilities   Proxy Simple Client Higher latency    We choose Proxy mode for the following reasons:\n Observable data is not very time-sensitive, a little latency caused by transmission is acceptable. A little extra hop is acceptable and there is no impact on the client-side. As an observability platform, we cannot/should not ask clients to change. They make their own tech decisions and may have their own commercial considerations.  Transmission Policy In the proxy mode, we should determine the transmission path between downstream and upstream.\nDifferent data protocols require different processing policies. There are two transmission policies:\n Synchronous: Suitable for protocols that require data exchange in the client, such as SkyWalking Dynamic Configuration Service. This type of protocol provides real-time results. Asynchronous batch: Used when the client doesn’t care about the upstream processing results, but only the transmitted data (e.g., trace report, log report, etc.)  The synchronization policy requires that the proxy send the message to the upstream server when receiving the client message, and synchronously return the response data to the downstream client. Usually, only a few protocols need to use the synchronization policy.\nAs shown below, after the client sends the request to the Proxy, the proxy would send the message to the server synchronously. When the proxy receives the result, it returns to the client.\nThe asynchronous batch policy means that the data is sent to the upstream server in batches asynchronously. This policy is more common because most protocols in SkyWalking are primarily based on data reporting. We think using the queue as a buffer could have a good effect. The asynchronous batch policy is executed according to the following steps:\n The proxy receives the data and wraps it as an Event object. An event is added into the queue. When the cycle time is reached or when the queue elements reach the fixed number, the elements in the queue will parallel consume and send to the OAP.  The advantage of using queues is:\n Separate data receiving and sending to reduce the mutual influence. The interval quantization mechanism can be used to combine events, which helps to speed up sending events to the OAP. Using multi-threaded consumption queue events can make fuller use of network IO.  As shown below, after the proxy receives the message, the proxy would wrap the message as an event and push it to the queue. The message sender would take batch events from the queue and send them to the upstream OAP.\nRouting Routing algorithms are used to route messages to a single upstream server node.\nThe Round-Robin algorithm selects nodes in order from the list of upstream service nodes. The advantage of this algorithm is that the number of times each node is selected is average. When the size of the data is close to the same, each upstream node can handle the same quantity of data content.\nWith the Weight Round-Robin, each upstream server node has a corresponding routing weight ratio. The difference from Round-Robin is that each upstream node has more chances to be routed according to its weight. This algorithm is more suitable to use when the upstream server node machine configuration is not the same.\nThe Fixed algorithm is a hybrid algorithm. It can ensure that the same data is routed to the same upstream server node, and when the upstream server scales out, it still maintains routing to the same node; unless the upstream node does not exist, it will reroute. This algorithm is mainly used in the SkyWalking Meter protocol because this protocol needs to ensure that the metrics of the same service instance are sent to the same OAP node. The Routing steps are as follows:\n Generate a unique identification string based on the data content, as short as possible. The amount of data is controllable. Get the upstream node of identity from LRU Cache, and use it if it exists. According to the identification, generate the corresponding hash value, and find the upstream server node from the upstream list. Save the mapping relationship between the upstream server node and identification to LRU Cache.  The advantage of this algorithm is to bind the data with the upstream server node as much as possible, so the upstream server can better process continuous data. The disadvantage is that it takes up a certain amount of memory space to save the corresponding relationship.\nAs shown below, the image is divided into two parts:\n The left side represents that the same data content always is routed to the same server node. The right side represents the data routing algorithm. Get the number from the data, and use the remainder algorithm to obtain the position.  We choose to use a combination of Round-Robin and Fixed algorithm for routing:\n The Fixed routing algorithm is suitable for specific protocols, mainly used when passing metrics data to the SkyWalking Meter protocol The Round-Robin algorithm is used by default. When the SkyWalking OAP cluster is deployed, the configuration of the nodes needs to be as much the same as possible, so there would be no need to use the Weight Round-Robin algorithm.  How to balance the load balancer itself? Proxy still needs to deal with the load balancing problem from client to itself, especially when deploying a Proxy cluster in a production environment.\nThere are three ways to solve this problem:\n Connection management: Use the max_connection config on the client-side to specify the maximum connection duration of each connection. For more information, please read the proposal. Cluster awareness: The proxy has cluster awareness, and actively disconnects the connection when the load is unbalanced to allow the client to re-pick up the proxy. Resource limit+HPA: Restrict the connection resource situation of each proxy, and no longer accept new connections when the resource limit is reached. And use the HPA mechanism of Kubernetes to dynamically scale out the number of the proxy.      Connection management Cluster awareness Resource Limit+HPA     Pros Simple to use Ensure that the number of connections in each proxy is relatively  Simple to use   Cons Each client needs to ensure that data is not lostThe client is required to accept GOWAY responses May cause a sudden increase in traffic on some nodesEach client needs to ensure that data is not lost  Traffic will not be particularly balanced in each instance    We choose Limit+HPA for these reasons:\n Easy to config and use the proxy and easy to understand based on basic data metrics. No data loss due to broken connection. There is no need for the client to implement any other protocols to prevent data loss, especially when the client is a commercial product. The connection of each node in the proxy cluster does not need to be particularly balanced, as long as the proxy node itself is high-performance.  SkyWalking-Satellite We have implemented this Proxy in the SkyWalking-Satellite project. It’s used between Client and SkyWalking OAP, effectively solving the load balancing problem.\nAfter the system is deployed, the Satellite would accept the traffic from the Client, and the Satellite will perceive all the nodes of the OAP through Kubernetes Label Selector or manual configuration, and load balance the traffic to the upstream OAP node.\nAs shown below, a single client still maintains a connection with a single Satellite, Satellite would establish the connection with each OAP, and load balance message to the OAP node.\nWhen scaling Satellite, we need to deploy the SWCK adapter and configure the HPA in Kubernetes. SWCK is a platform for the SkyWalking users, provisions, upgrades, maintains SkyWalking relevant components, and makes them work natively on Kubernetes.\nAfter deployment is finished, the following steps would be performed:\n Read metrics from OAP: HPA requests the SWCK metrics adapter to dynamically read the metrics in the OAP. Scaling the Satellite: Kubernetes HPA senses that the metrics values are in line with expectations, so the Satellite would be scaling automatically.  As shown below, use the dotted line to divide the two parts. HPA uses SWCK Adapter to read the metrics in the OAP. When the threshold is met, HPA would scale the Satellite deployment.\nExample In this section, we will demonstrate two cases:\n SkyWalking Scaling: After SkyWalking OAP scaling, the traffic would auto load balancing through Satellite. Satellite Scaling: Satellite’s own traffic load balancing.  NOTE: All commands could be accessed through GitHub.\nSkyWalking Scaling We will use the bookinfo application to demonstrate how to integrate Apache SkyWalking 8.9.1 with Apache SkyWalking-Satellite 0.5.0, and observe the service mesh through the Envoy ALS protocol.\nBefore starting, please make sure that you already have a Kubernetes environment.\nInstall Istio Istio provides a very convenient way to configure the Envoy proxy and enable the access log service. The following step:\n Install the istioctl locally to help manage the Istio mesh. Install Istio into the Kubernetes environment with a demo configuration profile, and enable the Envoy ALS. Transmit the ALS message to the satellite. The satellite we will deploy later. Add the label into the default namespace so Istio could automatically inject Envoy sidecar proxies when you deploy your application later.  # install istioctl export ISTIO_VERSION=1.12.0 curl -L https://istio.io/downloadIstio | sh - sudo mv $PWD/istio-$ISTIO_VERSION/bin/istioctl /usr/local/bin/ # install istio istioctl install -y --set profile=demo \\ \t--set meshConfig.enableEnvoyAccessLogService=true \\ \t--set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-system-satellite.skywalking-system:11800 # enbale envoy proxy in default namespace kubectl label namespace default istio-injection=enabled Install SWCK SWCK provides convenience for users to deploy and upgrade SkyWalking related components based on Kubernetes. The automatic scale function of Satellite also mainly relies on SWCK. For more information, you could refer to the official documentation.\n# Install cert-manager kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.3.1/cert-manager.yaml # Deploy SWCK mkdir -p skywalking-swck \u0026amp;\u0026amp; cd skywalking-swck wget https://dlcdn.apache.org/skywalking/swck/0.6.1/skywalking-swck-0.6.1-bin.tgz tar -zxvf skywalking-swck-0.6.1-bin.tgz cd config kubectl apply -f operator-bundle.yaml Deploy Apache SkyWalking And Apache SkyWalking-Satellite We have provided a simple script to deploy the skywalking OAP, UI, and Satellite.\n# Create the skywalking components namespace kubectl create namespace skywalking-system kubectl label namespace skywalking-system swck-injection=enabled # Deploy components kubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/sw-components.yaml Deploy Bookinfo Application export ISTIO_VERSION=1.12.0 kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/platform/kube/bookinfo.yaml kubectl wait --for=condition=Ready pods --all --timeout=1200s kubectl port-forward service/productpage 9080 Next, please open your browser and visit http://localhost:9080. You should be able to see the Bookinfo application. Refresh the webpage several times to generate enough access logs.\nThen, you can see the topology and metrics of the Bookinfo application on SkyWalking WebUI. At this time, you can see that the Satellite is working!\nDeploy Monitor We need to install OpenTelemetry Collector to collect metrics in OAPs and analyze them.\n# Add OTEL collector kubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/otel-collector-oap.yaml kubectl port-forward -n skywalking-system service/skywalking-system-ui 8080:80 Next, please open your browser and visit http://localhost:8080/ and create a new item on the dashboard. The SkyWalking Web UI pictured below shows how the data content is applied.\nScaling OAP Scaling the number of OAPs by deployment.\nkubectl scale --replicas=3 -n skywalking-system deployment/skywalking-system-oap Done! After a period of time, you will see that the number of OAPs becomes 3, and the ALS traffic is balanced to each OAP.\nSatellite Scaling After we have completed the SkyWalking Scaling, we would carry out the Satellite Scaling demo.\nDeploy SWCK HPA SWCK provides an adapter to implement the Kubernetes external metrics to adapt the HPA through reading the metrics in SkyWalking OAP. We expose the metrics service in Satellite to OAP and configure HPA Resource to auto-scaling the Satellite.\nInstall the SWCK adapter into the Kubernetes environment:\nkubectl apply -f skywalking-swck/config/adapter-bundle.yaml Create the HPA resource, and limit each Satellite to handle a maximum of 10 connections:\nkubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/satellite-hpa.yaml Then, you could see we have 9 connections in one satellite. One envoy proxy may establish multiple connections to the satellite.\n$ kubectl get HorizontalPodAutoscaler -n skywalking-system NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE hpa-demo Deployment/skywalking-system-satellite 9/10 1 3 1 5m18s Scaling Application The scaling application could establish more connections to the satellite, to verify whether the HPA is in effect.\nkubectl scale --replicas=3 deployment/productpage-v1 deployment/details-v1 Done! By default, Satellite will deploy a single instance and a single instance will only accept 11 connections. HPA resources limit one Satellite to handle 10 connections and use a stabilization window to make Satellite stable scaling up. In this case, we deploy the Bookinfo application in 10+ instances after scaling, which means that 10+ connections will be established to the Satellite.\nSo after HPA resources are running, the Satellite would be automatically scaled up to 2 instances. You can learn about the calculation algorithm of replicas through the official documentation. Run the following command to view the running status:\n$ kubectl get HorizontalPodAutoscaler -n skywalking-system --watch NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 1 3m31s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 1 4m20s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 2 4m38s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 2 5m8s hpa-demo Deployment/skywalking-system-satellite 6/10 1 3 2 5m23s By observing the “number of connections” metric, we would be able to see that when the number of connections of each gRPC exceeds 10 connections, then the satellite automatically scales through the HPA rule. As a result, the connection number is down to normal status (in this example, less than 10)\nswctl metrics linear --name satellite_service_grpc_connect_count --service-name satellite::satellite-service ","excerpt":"Scaling with Apache SkyWalking Background In the Apache SkyWalking ecosystem, the OAP obtains …","ref":"/docs/main/v9.6.0/en/academy/scaling-with-apache-skywalking/","title":"Scaling with Apache SkyWalking"},{"body":"Scaling with Apache SkyWalking Background In the Apache SkyWalking ecosystem, the OAP obtains metrics, traces, logs, and event data through SkyWalking Agent, Envoy, or other data sources. Under the gRPC protocol, it transmits data by communicating with a single server node. Only when the connection is broken, the reconnecting policy would be used based on DNS round-robin mode. When new services are added at runtime or the OAP load is kept high due to increased traffic of observed services, the OAP cluster needs to scale out for increased traffic. The load of the new OAP node would be less due to all existing agents having connected to previous nodes. Even without scaling, the load of OAP nodes would be unbalanced, because the agent would keep the connection due to random policy at the booting stage. In these cases, it would become a challenge to keep up the health status of all nodes, and be able to scale out when needed.\nIn this article, we mainly discuss how to solve this challenge in SkyWalking.\nHow to Load Balance SkyWalking mainly uses the gRPC protocol for data transmission, so this article mainly introduces load balancing in the gRPC protocol.\nProxy Or Client-side Based on the gRPC official Load Balancing blog, there are two approaches to load balancing:\n Client-side: The client perceives multiple back-end services and uses a load-balancing algorithm to select a back-end service for each RPC. Proxy: The client sends the message to the proxy server, and the proxy server load balances the message to the back-end service.  From the perspective of observability system architecture:\n    Pros Cons     Client-side High performance because of the elimination of extra hop Complex client (cluster awareness, load balancing, health check, etc.)Ensure each data source to be connected provides complex client capabilities   Proxy Simple Client Higher latency    We choose Proxy mode for the following reasons:\n Observable data is not very time-sensitive, a little latency caused by transmission is acceptable. A little extra hop is acceptable and there is no impact on the client-side. As an observability platform, we cannot/should not ask clients to change. They make their own tech decisions and may have their own commercial considerations.  Transmission Policy In the proxy mode, we should determine the transmission path between downstream and upstream.\nDifferent data protocols require different processing policies. There are two transmission policies:\n Synchronous: Suitable for protocols that require data exchange in the client, such as SkyWalking Dynamic Configuration Service. This type of protocol provides real-time results. Asynchronous batch: Used when the client doesn’t care about the upstream processing results, but only the transmitted data (e.g., trace report, log report, etc.)  The synchronization policy requires that the proxy send the message to the upstream server when receiving the client message, and synchronously return the response data to the downstream client. Usually, only a few protocols need to use the synchronization policy.\nAs shown below, after the client sends the request to the Proxy, the proxy would send the message to the server synchronously. When the proxy receives the result, it returns to the client.\nThe asynchronous batch policy means that the data is sent to the upstream server in batches asynchronously. This policy is more common because most protocols in SkyWalking are primarily based on data reporting. We think using the queue as a buffer could have a good effect. The asynchronous batch policy is executed according to the following steps:\n The proxy receives the data and wraps it as an Event object. An event is added into the queue. When the cycle time is reached or when the queue elements reach the fixed number, the elements in the queue will parallel consume and send to the OAP.  The advantage of using queues is:\n Separate data receiving and sending to reduce the mutual influence. The interval quantization mechanism can be used to combine events, which helps to speed up sending events to the OAP. Using multi-threaded consumption queue events can make fuller use of network IO.  As shown below, after the proxy receives the message, the proxy would wrap the message as an event and push it to the queue. The message sender would take batch events from the queue and send them to the upstream OAP.\nRouting Routing algorithms are used to route messages to a single upstream server node.\nThe Round-Robin algorithm selects nodes in order from the list of upstream service nodes. The advantage of this algorithm is that the number of times each node is selected is average. When the size of the data is close to the same, each upstream node can handle the same quantity of data content.\nWith the Weight Round-Robin, each upstream server node has a corresponding routing weight ratio. The difference from Round-Robin is that each upstream node has more chances to be routed according to its weight. This algorithm is more suitable to use when the upstream server node machine configuration is not the same.\nThe Fixed algorithm is a hybrid algorithm. It can ensure that the same data is routed to the same upstream server node, and when the upstream server scales out, it still maintains routing to the same node; unless the upstream node does not exist, it will reroute. This algorithm is mainly used in the SkyWalking Meter protocol because this protocol needs to ensure that the metrics of the same service instance are sent to the same OAP node. The Routing steps are as follows:\n Generate a unique identification string based on the data content, as short as possible. The amount of data is controllable. Get the upstream node of identity from LRU Cache, and use it if it exists. According to the identification, generate the corresponding hash value, and find the upstream server node from the upstream list. Save the mapping relationship between the upstream server node and identification to LRU Cache.  The advantage of this algorithm is to bind the data with the upstream server node as much as possible, so the upstream server can better process continuous data. The disadvantage is that it takes up a certain amount of memory space to save the corresponding relationship.\nAs shown below, the image is divided into two parts:\n The left side represents that the same data content always is routed to the same server node. The right side represents the data routing algorithm. Get the number from the data, and use the remainder algorithm to obtain the position.  We choose to use a combination of Round-Robin and Fixed algorithm for routing:\n The Fixed routing algorithm is suitable for specific protocols, mainly used when passing metrics data to the SkyWalking Meter protocol The Round-Robin algorithm is used by default. When the SkyWalking OAP cluster is deployed, the configuration of the nodes needs to be as much the same as possible, so there would be no need to use the Weight Round-Robin algorithm.  How to balance the load balancer itself? Proxy still needs to deal with the load balancing problem from client to itself, especially when deploying a Proxy cluster in a production environment.\nThere are three ways to solve this problem:\n Connection management: Use the max_connection config on the client-side to specify the maximum connection duration of each connection. For more information, please read the proposal. Cluster awareness: The proxy has cluster awareness, and actively disconnects the connection when the load is unbalanced to allow the client to re-pick up the proxy. Resource limit+HPA: Restrict the connection resource situation of each proxy, and no longer accept new connections when the resource limit is reached. And use the HPA mechanism of Kubernetes to dynamically scale out the number of the proxy.      Connection management Cluster awareness Resource Limit+HPA     Pros Simple to use Ensure that the number of connections in each proxy is relatively  Simple to use   Cons Each client needs to ensure that data is not lostThe client is required to accept GOWAY responses May cause a sudden increase in traffic on some nodesEach client needs to ensure that data is not lost  Traffic will not be particularly balanced in each instance    We choose Limit+HPA for these reasons:\n Easy to config and use the proxy and easy to understand based on basic data metrics. No data loss due to broken connection. There is no need for the client to implement any other protocols to prevent data loss, especially when the client is a commercial product. The connection of each node in the proxy cluster does not need to be particularly balanced, as long as the proxy node itself is high-performance.  SkyWalking-Satellite We have implemented this Proxy in the SkyWalking-Satellite project. It’s used between Client and SkyWalking OAP, effectively solving the load balancing problem.\nAfter the system is deployed, the Satellite would accept the traffic from the Client, and the Satellite will perceive all the nodes of the OAP through Kubernetes Label Selector or manual configuration, and load balance the traffic to the upstream OAP node.\nAs shown below, a single client still maintains a connection with a single Satellite, Satellite would establish the connection with each OAP, and load balance message to the OAP node.\nWhen scaling Satellite, we need to deploy the SWCK adapter and configure the HPA in Kubernetes. SWCK is a platform for the SkyWalking users, provisions, upgrades, maintains SkyWalking relevant components, and makes them work natively on Kubernetes.\nAfter deployment is finished, the following steps would be performed:\n Read metrics from OAP: HPA requests the SWCK metrics adapter to dynamically read the metrics in the OAP. Scaling the Satellite: Kubernetes HPA senses that the metrics values are in line with expectations, so the Satellite would be scaling automatically.  As shown below, use the dotted line to divide the two parts. HPA uses SWCK Adapter to read the metrics in the OAP. When the threshold is met, HPA would scale the Satellite deployment.\nExample In this section, we will demonstrate two cases:\n SkyWalking Scaling: After SkyWalking OAP scaling, the traffic would auto load balancing through Satellite. Satellite Scaling: Satellite’s own traffic load balancing.  NOTE: All commands could be accessed through GitHub.\nSkyWalking Scaling We will use the bookinfo application to demonstrate how to integrate Apache SkyWalking 8.9.1 with Apache SkyWalking-Satellite 0.5.0, and observe the service mesh through the Envoy ALS protocol.\nBefore starting, please make sure that you already have a Kubernetes environment.\nInstall Istio Istio provides a very convenient way to configure the Envoy proxy and enable the access log service. The following step:\n Install the istioctl locally to help manage the Istio mesh. Install Istio into the Kubernetes environment with a demo configuration profile, and enable the Envoy ALS. Transmit the ALS message to the satellite. The satellite we will deploy later. Add the label into the default namespace so Istio could automatically inject Envoy sidecar proxies when you deploy your application later.  # install istioctl export ISTIO_VERSION=1.12.0 curl -L https://istio.io/downloadIstio | sh - sudo mv $PWD/istio-$ISTIO_VERSION/bin/istioctl /usr/local/bin/ # install istio istioctl install -y --set profile=demo \\ \t--set meshConfig.enableEnvoyAccessLogService=true \\ \t--set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-system-satellite.skywalking-system:11800 # enbale envoy proxy in default namespace kubectl label namespace default istio-injection=enabled Install SWCK SWCK provides convenience for users to deploy and upgrade SkyWalking related components based on Kubernetes. The automatic scale function of Satellite also mainly relies on SWCK. For more information, you could refer to the official documentation.\n# Install cert-manager kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.3.1/cert-manager.yaml # Deploy SWCK mkdir -p skywalking-swck \u0026amp;\u0026amp; cd skywalking-swck wget https://dlcdn.apache.org/skywalking/swck/0.6.1/skywalking-swck-0.6.1-bin.tgz tar -zxvf skywalking-swck-0.6.1-bin.tgz cd config kubectl apply -f operator-bundle.yaml Deploy Apache SkyWalking And Apache SkyWalking-Satellite We have provided a simple script to deploy the skywalking OAP, UI, and Satellite.\n# Create the skywalking components namespace kubectl create namespace skywalking-system kubectl label namespace skywalking-system swck-injection=enabled # Deploy components kubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/sw-components.yaml Deploy Bookinfo Application export ISTIO_VERSION=1.12.0 kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/platform/kube/bookinfo.yaml kubectl wait --for=condition=Ready pods --all --timeout=1200s kubectl port-forward service/productpage 9080 Next, please open your browser and visit http://localhost:9080. You should be able to see the Bookinfo application. Refresh the webpage several times to generate enough access logs.\nThen, you can see the topology and metrics of the Bookinfo application on SkyWalking WebUI. At this time, you can see that the Satellite is working!\nDeploy Monitor We need to install OpenTelemetry Collector to collect metrics in OAPs and analyze them.\n# Add OTEL collector kubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/otel-collector-oap.yaml kubectl port-forward -n skywalking-system service/skywalking-system-ui 8080:80 Next, please open your browser and visit http://localhost:8080/ and create a new item on the dashboard. The SkyWalking Web UI pictured below shows how the data content is applied.\nScaling OAP Scaling the number of OAPs by deployment.\nkubectl scale --replicas=3 -n skywalking-system deployment/skywalking-system-oap Done! After a period of time, you will see that the number of OAPs becomes 3, and the ALS traffic is balanced to each OAP.\nSatellite Scaling After we have completed the SkyWalking Scaling, we would carry out the Satellite Scaling demo.\nDeploy SWCK HPA SWCK provides an adapter to implement the Kubernetes external metrics to adapt the HPA through reading the metrics in SkyWalking OAP. We expose the metrics service in Satellite to OAP and configure HPA Resource to auto-scaling the Satellite.\nInstall the SWCK adapter into the Kubernetes environment:\nkubectl apply -f skywalking-swck/config/adapter-bundle.yaml Create the HPA resource, and limit each Satellite to handle a maximum of 10 connections:\nkubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/satellite-hpa.yaml Then, you could see we have 9 connections in one satellite. One envoy proxy may establish multiple connections to the satellite.\n$ kubectl get HorizontalPodAutoscaler -n skywalking-system NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE hpa-demo Deployment/skywalking-system-satellite 9/10 1 3 1 5m18s Scaling Application The scaling application could establish more connections to the satellite, to verify whether the HPA is in effect.\nkubectl scale --replicas=3 deployment/productpage-v1 deployment/details-v1 Done! By default, Satellite will deploy a single instance and a single instance will only accept 11 connections. HPA resources limit one Satellite to handle 10 connections and use a stabilization window to make Satellite stable scaling up. In this case, we deploy the Bookinfo application in 10+ instances after scaling, which means that 10+ connections will be established to the Satellite.\nSo after HPA resources are running, the Satellite would be automatically scaled up to 2 instances. You can learn about the calculation algorithm of replicas through the official documentation. Run the following command to view the running status:\n$ kubectl get HorizontalPodAutoscaler -n skywalking-system --watch NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 1 3m31s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 1 4m20s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 2 4m38s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 2 5m8s hpa-demo Deployment/skywalking-system-satellite 6/10 1 3 2 5m23s By observing the “number of connections” metric, we would be able to see that when the number of connections of each gRPC exceeds 10 connections, then the satellite automatically scales through the HPA rule. As a result, the connection number is down to normal status (in this example, less than 10)\nswctl metrics linear --name satellite_service_grpc_connect_count --service-name satellite::satellite-service ","excerpt":"Scaling with Apache SkyWalking Background In the Apache SkyWalking ecosystem, the OAP obtains …","ref":"/docs/main/v9.7.0/en/academy/scaling-with-apache-skywalking/","title":"Scaling with Apache SkyWalking"},{"body":"Scopes and Fields Using the Aggregation Function, the requests will be grouped by time and Group Key(s) in each scope.\nSCOPE Service This calculates the metrics data from each request of the service.\n   Name Remarks Group Key Type     name The name of the service.  string   layer Layer represents an abstract framework in the computer science, such as operation system(OS_LINUX layer), Kubernetes(k8s layer)  enum   serviceInstanceName The name of the service instance ID.  string   endpointName The name of the endpoint, such as a full path of HTTP URI.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request. Such as: Database, HTTP, RPC, gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPService This calculates the metrics data from each request of the TCP service.\n   Name Remarks Group Key Type     name The name of the service.  string   layer Layer represents an abstract framework in the computer science, such as operation system(OS_LINUX layer), Kubernetes(k8s layer)  enum   serviceInstanceName The name of the service instance ID.  string   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    SCOPE ServiceInstance This calculates the metrics data from each request of the service instance.\n   Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   endpointName The name of the endpoint, such as a full path of the HTTP URI.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPServiceInstance This calculates the metrics data from each request of the service instance.\n   Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    Secondary scopes of ServiceInstance This calculates the metrics data if the service instance is a JVM and collects through javaagent.\n SCOPE ServiceInstanceJVMCPU     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   usePercent The percentage of CPU time spent.  double    SCOPE ServiceInstanceJVMMemory     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   heapStatus Indicates whether the metric has a heap property or not.  bool   init See the JVM documentation.  long   max See the JVM documentation.  long   used See the JVM documentation.  long   committed See the JVM documentation.  long    SCOPE ServiceInstanceJVMMemoryPool     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   poolType The type may be CODE_CACHE_USAGE, NEWGEN_USAGE, OLDGEN_USAGE, SURVIVOR_USAGE, PERMGEN_USAGE, or METASPACE_USAGE based on different versions of JVM.  enum   init See the JVM documentation.  long   max See the JVM documentation.  long   used See the JVM documentation.  long   committed See the JVM documentation.  long    SCOPE ServiceInstanceJVMGC     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   phase Includes both NEW and OLD.  Enum   time The time spent in GC.  long   count The count in GC operations.  long    SCOPE ServiceInstanceJVMThread     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   liveCount The current number of live threads.  long   daemonCount The current number of daemon threads.  long   peakCount The current number of peak threads.  long   runnableStateThreadCount The current number of threads in runnable state.  long   blockedStateThreadCount The current number of threads in blocked state.  long   waitingStateThreadCount The current number of threads in waiting state.  long   timedWaitingStateThreadCount The current number of threads in time-waiting state.  long    SCOPE ServiceInstanceJVMClass     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   loadedClassCount The number of classes that are currently loaded in the JVM.  long   totalUnloadedClassCount The total number of classes unloaded since the JVM has started execution.  long   totalLoadedClassCount The total number of classes that have been loaded since the JVM has started execution.  long    SCOPE Endpoint This calculates the metrics data from each request of the endpoint in the service.\n   Name Remarks Group Key Type     name The name of the endpoint, such as a full path of the HTTP URI.  string   serviceName The name of the service.  string   serviceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  enum   serviceInstanceName The name of the service instance ID.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE ServiceRelation This calculates the metrics data from each request between services.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceLayer The layer of the source service.  enum   destServiceName The name of the destination service.  string   destServiceInstanceName The name of the destination service instance.  string   destLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination services, such as service_relation_mtls_cpm = from(ServiceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPServiceRelation This calculates the metrics data from each request between services.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceLayer The layer of the source service.  enum   destServiceName The name of the destination service.  string   destServiceInstanceName The name of the destination service instance.  string   destLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination services, such as service_relation_mtls_cpm = from(ServiceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    SCOPE ServiceInstanceRelation This calculates the metrics data from each request between service instances.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceServiceLayer The layer of the source service.  enum   destServiceName The name of the destination service.     destServiceInstanceName The name of the destination service instance.  string   destServiceLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of the component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination service instances, such as service_instance_relation_mtls_cpm = from(ServiceInstanceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPServiceInstanceRelation This calculates the metrics data from each request between service instances.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceServiceLayer The layer of the source service.  enum   destServiceName The name of the destination service.     destServiceInstanceName The name of the destination service instance.  string   destServiceLayer The layer of the destination service.  enum   componentId The ID of the component used in this call. yes string   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination service instances, such as service_instance_relation_mtls_cpm = from(ServiceInstanceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    SCOPE EndpointRelation This calculates the metrics data of the dependency between endpoints. This relation is hard to detect, and it depends on the tracing library to propagate the previous endpoint. Therefore, the EndpointRelation scope aggregation comes into effect only in services under tracing by SkyWalking native agents, including auto instrument agents (like Java and .NET), or other tracing context propagation in SkyWalking specification.\n   Name Remarks Group Key Type     endpoint The parent endpoint in the dependency.  string   serviceName The name of the service.  string   serviceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  enum   childEndpoint The endpoint used by the parent endpoint in row(1).  string   childServiceName The endpoint used by the parent service in row(1).  string   childServiceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  string   childServiceInstanceName The endpoint used by the parent service instance in row(1).  string   rpcLatency The latency of the RPC between the parent endpoint and childEndpoint, excluding the latency caused by the parent endpoint itself.     componentId The ID of the component used in this call. yes string   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Indicates where the relation is detected. The value may be client, server, or proxy. yes enum    SCOPE BrowserAppTraffic This calculates the metrics data from each request of the browser application (browser only).\n   Name Remarks Group Key Type     name The browser application name of each request.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppSingleVersionTraffic This calculates the metrics data from each request of a single version in the browser application (browser only).\n   Name Remarks Group Key Type     name The single version name of each request.  string   serviceName The name of the browser application.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppPageTraffic This calculates the metrics data from each request of the page in the browser application (browser only).\n   Name Remarks Group Key Type     name The page name of each request.  string   serviceName The name of the browser application.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppPagePerf This calculates the metrics data from each request of the page in the browser application (browser only).\n   Name Remarks Group Key Type     name The page name of each request.  string   serviceName The name of the browser application.  string   redirectTime The time taken to redirect.  int(in ms)   dnsTime The DNS query time.  int(in ms)   ttfbTime Time to first byte.  int(in ms)   tcpTime TCP connection time.  int(in ms)   transTime Content transfer time.  int(in ms)   domAnalysisTime Dom parsing time.  int(in ms)   fptTime First paint time or blank screen time.  int(in ms)   domReadyTime Dom ready time.  int(in ms)   loadPageTime Page full load time.  int(in ms)   resTime Synchronous load resources in the page.  int(in ms)   sslTime Only valid for HTTPS.  int(in ms)   ttlTime Time to interact.  int(in ms)   firstPackTime First pack time.  int(in ms)   fmpTime First Meaningful Paint.  int(in ms)    SCOPE Event This calculates the metrics data from events.\n   Name Remarks Group Key Type     name The name of the event.  string   service The service name to which the event belongs to.  string   serviceInstance The service instance to which the event belongs to, if any.  string   endpoint The service endpoint to which the event belongs to, if any.  string   type The type of the event, Normal or Error.  string   message The message of the event.  string   parameters The parameters in the message, see parameters.  string    SCOPE DatabaseAccess This calculates the metrics data from each request of database.\n   Name Remarks Group Key Type     name The service name of virtual database service.  string   databaseTypeId The ID of the component used in this call.  int   latency The time taken by each request.  int(in ms)   status Indicates the success or failure of the request.  boolean    SCOPE DatabaseSlowStatement This calculates the metrics data from slow request of database.\n   Name Remarks Group Key Type     databaseServiceId The service id of virtual cache service.  string   statement The sql statement .  string   latency The time taken by each request.  int(in ms)   traceId The traceId of this slow statement  string    SCOPE CacheAccess This calculates the metrics data from each request of cache system.\n   Name Remarks Group Key Type     name The service name of virtual cache service.  string   cacheTypeId The ID of the component used in this call.  int   latency The time taken by each request.  int(in ms)   status Indicates the success or failure of the request.  boolean   operation Indicates this access is used for write or read  string    SCOPE CacheSlowAccess This calculates the metrics data from slow request of cache system , which is used for write or read operation.\n   Name Remarks Group Key Type     cacheServiceId The service id of virtual cache service.  string   command The cache command .  string   key The cache command key.  string   latency The time taken by each request.  int(in ms)   traceId The traceId of this slow access  string   status Indicates the success or failure of the request.  boolean   operation Indicates this access is used for write or read  string    SCOPE MQAccess This calculates the service dimensional metrics data from each request of MQ system on consume/produce side\n   Name Remarks Group Key Type     name The service name , usually it\u0026rsquo;s MQ address(es)  string   transmissionLatency The latency from produce side to consume side .  int(in ms)   status Indicates the success or failure of the request.  boolean   operation Indicates this access is on Produce or Consume side  enum    SCOPE MQEndpointAccess This calculates the endpoint dimensional metrics data from each request of MQ system on consume/produce side\n   Name Remarks Group Key Type     serviceName The service name that this endpoint belongs to.  string   endpoint The endpoint name , usually it\u0026rsquo;s combined by queue,topic  string   transmissionLatency The latency from produce side to consume side .  int(in ms)   status Indicates the success or failure of the request.  boolean   operation Indicates this access is on Produce or Consume side  enum    ","excerpt":"Scopes and Fields Using the Aggregation Function, the requests will be grouped by time and Group …","ref":"/docs/main/latest/en/concepts-and-designs/scope-definitions/","title":"Scopes and Fields"},{"body":"Scopes and Fields Using the Aggregation Function, the requests will be grouped by time and Group Key(s) in each scope.\nSCOPE Service This calculates the metrics data from each request of the service.\n   Name Remarks Group Key Type     name The name of the service.  string   layer Layer represents an abstract framework in the computer science, such as operation system(OS_LINUX layer), Kubernetes(k8s layer)  enum   serviceInstanceName The name of the service instance ID.  string   endpointName The name of the endpoint, such as a full path of HTTP URI.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request. Such as: Database, HTTP, RPC, gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPService This calculates the metrics data from each request of the TCP service.\n   Name Remarks Group Key Type     name The name of the service.  string   layer Layer represents an abstract framework in the computer science, such as operation system(OS_LINUX layer), Kubernetes(k8s layer)  enum   serviceInstanceName The name of the service instance ID.  string   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    SCOPE ServiceInstance This calculates the metrics data from each request of the service instance.\n   Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   endpointName The name of the endpoint, such as a full path of the HTTP URI.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPServiceInstance This calculates the metrics data from each request of the service instance.\n   Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    Secondary scopes of ServiceInstance This calculates the metrics data if the service instance is a JVM and collects through javaagent.\n SCOPE ServiceInstanceJVMCPU     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   usePercent The percentage of CPU time spent.  double    SCOPE ServiceInstanceJVMMemory     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   heapStatus Indicates whether the metric has a heap property or not.  bool   init See the JVM documentation.  long   max See the JVM documentation.  long   used See the JVM documentation.  long   committed See the JVM documentation.  long    SCOPE ServiceInstanceJVMMemoryPool     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   poolType The type may be CODE_CACHE_USAGE, NEWGEN_USAGE, OLDGEN_USAGE, SURVIVOR_USAGE, PERMGEN_USAGE, or METASPACE_USAGE based on different versions of JVM.  enum   init See the JVM documentation.  long   max See the JVM documentation.  long   used See the JVM documentation.  long   committed See the JVM documentation.  long    SCOPE ServiceInstanceJVMGC     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   phase Includes both NEW and OLD.  Enum   time The time spent in GC.  long   count The count in GC operations.  long    SCOPE ServiceInstanceJVMThread     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   liveCount The current number of live threads.  long   daemonCount The current number of daemon threads.  long   peakCount The current number of peak threads.  long   runnableStateThreadCount The current number of threads in runnable state.  long   blockedStateThreadCount The current number of threads in blocked state.  long   waitingStateThreadCount The current number of threads in waiting state.  long   timedWaitingStateThreadCount The current number of threads in time-waiting state.  long    SCOPE ServiceInstanceJVMClass     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   loadedClassCount The number of classes that are currently loaded in the JVM.  long   totalUnloadedClassCount The total number of classes unloaded since the JVM has started execution.  long   totalLoadedClassCount The total number of classes that have been loaded since the JVM has started execution.  long    SCOPE Endpoint This calculates the metrics data from each request of the endpoint in the service.\n   Name Remarks Group Key Type     name The name of the endpoint, such as a full path of the HTTP URI.  string   serviceName The name of the service.  string   serviceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  enum   serviceInstanceName The name of the service instance ID.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE ServiceRelation This calculates the metrics data from each request between services.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceLayer The layer of the source service.  enum   destServiceName The name of the destination service.  string   destServiceInstanceName The name of the destination service instance.  string   destLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination services, such as service_relation_mtls_cpm = from(ServiceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPServiceRelation This calculates the metrics data from each request between services.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceLayer The layer of the source service.  enum   destServiceName The name of the destination service.  string   destServiceInstanceName The name of the destination service instance.  string   destLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination services, such as service_relation_mtls_cpm = from(ServiceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    SCOPE ServiceInstanceRelation This calculates the metrics data from each request between service instances.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceServiceLayer The layer of the source service.  enum   destServiceName The name of the destination service.     destServiceInstanceName The name of the destination service instance.  string   destServiceLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of the component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination service instances, such as service_instance_relation_mtls_cpm = from(ServiceInstanceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPServiceInstanceRelation This calculates the metrics data from each request between service instances.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceServiceLayer The layer of the source service.  enum   destServiceName The name of the destination service.     destServiceInstanceName The name of the destination service instance.  string   destServiceLayer The layer of the destination service.  enum   componentId The ID of the component used in this call. yes string   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination service instances, such as service_instance_relation_mtls_cpm = from(ServiceInstanceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    SCOPE EndpointRelation This calculates the metrics data of the dependency between endpoints. This relation is hard to detect, and it depends on the tracing library to propagate the previous endpoint. Therefore, the EndpointRelation scope aggregation comes into effect only in services under tracing by SkyWalking native agents, including auto instrument agents (like Java and .NET), or other tracing context propagation in SkyWalking specification.\n   Name Remarks Group Key Type     endpoint The parent endpoint in the dependency.  string   serviceName The name of the service.  string   serviceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  enum   childEndpoint The endpoint used by the parent endpoint in row(1).  string   childServiceName The endpoint used by the parent service in row(1).  string   childServiceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  string   childServiceInstanceName The endpoint used by the parent service instance in row(1).  string   rpcLatency The latency of the RPC between the parent endpoint and childEndpoint, excluding the latency caused by the parent endpoint itself.     componentId The ID of the component used in this call. yes string   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Indicates where the relation is detected. The value may be client, server, or proxy. yes enum    SCOPE BrowserAppTraffic This calculates the metrics data from each request of the browser application (browser only).\n   Name Remarks Group Key Type     name The browser application name of each request.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppSingleVersionTraffic This calculates the metrics data from each request of a single version in the browser application (browser only).\n   Name Remarks Group Key Type     name The single version name of each request.  string   serviceName The name of the browser application.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppPageTraffic This calculates the metrics data from each request of the page in the browser application (browser only).\n   Name Remarks Group Key Type     name The page name of each request.  string   serviceName The name of the browser application.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppPagePerf This calculates the metrics data from each request of the page in the browser application (browser only).\n   Name Remarks Group Key Type     name The page name of each request.  string   serviceName The name of the browser application.  string   redirectTime The time taken to redirect.  int(in ms)   dnsTime The DNS query time.  int(in ms)   ttfbTime Time to first byte.  int(in ms)   tcpTime TCP connection time.  int(in ms)   transTime Content transfer time.  int(in ms)   domAnalysisTime Dom parsing time.  int(in ms)   fptTime First paint time or blank screen time.  int(in ms)   domReadyTime Dom ready time.  int(in ms)   loadPageTime Page full load time.  int(in ms)   resTime Synchronous load resources in the page.  int(in ms)   sslTime Only valid for HTTPS.  int(in ms)   ttlTime Time to interact.  int(in ms)   firstPackTime First pack time.  int(in ms)   fmpTime First Meaningful Paint.  int(in ms)    SCOPE Event This calculates the metrics data from events.\n   Name Remarks Group Key Type     name The name of the event.  string   service The service name to which the event belongs to.  string   serviceInstance The service instance to which the event belongs to, if any.  string   endpoint The service endpoint to which the event belongs to, if any.  string   type The type of the event, Normal or Error.  string   message The message of the event.  string   parameters The parameters in the message, see parameters.  string    SCOPE DatabaseAccess This calculates the metrics data from each request of database.\n   Name Remarks Group Key Type     name The service name of virtual database service.  string   databaseTypeId The ID of the component used in this call.  int   latency The time taken by each request.  int(in ms)   status Indicates the success or failure of the request.  boolean    SCOPE DatabaseSlowStatement This calculates the metrics data from slow request of database.\n   Name Remarks Group Key Type     databaseServiceId The service id of virtual cache service.  string   statement The sql statement .  string   latency The time taken by each request.  int(in ms)   traceId The traceId of this slow statement  string    SCOPE CacheAccess This calculates the metrics data from each request of cache system.\n   Name Remarks Group Key Type     name The service name of virtual cache service.  string   cacheTypeId The ID of the component used in this call.  int   latency The time taken by each request.  int(in ms)   status Indicates the success or failure of the request.  boolean   operation Indicates this access is used for write or read  string    SCOPE CacheSlowAccess This calculates the metrics data from slow request of cache system , which is used for write or read operation.\n   Name Remarks Group Key Type     cacheServiceId The service id of virtual cache service.  string   command The cache command .  string   key The cache command key.  string   latency The time taken by each request.  int(in ms)   traceId The traceId of this slow access  string   status Indicates the success or failure of the request.  boolean   operation Indicates this access is used for write or read  string    SCOPE MQAccess This calculates the service dimensional metrics data from each request of MQ system on consume/produce side\n   Name Remarks Group Key Type     name The service name , usually it\u0026rsquo;s MQ address(es)  string   transmissionLatency The latency from produce side to consume side .  int(in ms)   status Indicates the success or failure of the request.  boolean   operation Indicates this access is on Produce or Consume side  enum    SCOPE MQEndpointAccess This calculates the endpoint dimensional metrics data from each request of MQ system on consume/produce side\n   Name Remarks Group Key Type     serviceName The service name that this endpoint belongs to.  string   endpoint The endpoint name , usually it\u0026rsquo;s combined by queue,topic  string   transmissionLatency The latency from produce side to consume side .  int(in ms)   status Indicates the success or failure of the request.  boolean   operation Indicates this access is on Produce or Consume side  enum    SCOPES with K8S Prefix All metrics starting with K8S are derived from Kubernetes monitoring by Rover(eBPF agent).\nService, Service Instance and relations For all K8SService, K8SServiceInstance, K8SServiceRelation and K8SServiceInstanceRelation, they all have the following package/protocol level metric contents.\n   Name Remarks Group Key Type     type The metrics from log type, the following names should have the type prefix. The value may be connect, accept, close, write, read, protocol.  string   connect.duration Connect to other service use duration.  long(in nanoseconds)   connect.success The connect is success or not.  boolean   accept.duration Accept connection from client use duration.  long(in nanoseconds)   close.duration Close one connection use duration.  long(in nanoseconds)   close.success Close one connection is success or not.  boolean   write.duration Write data to the connection use duration.  long(in nanoseconds)   write.syscall Write data to the connection syscall name. The value should be Write, Writev, Send, SendTo, SendMsg, SendMmsg, SendFile, SendFile64.  string   write.l4.duration Write data to the connection use duration on Linux Layer 4.  long(in nanoseconds)   write.l4.transmitPackageCount Total package count on write data to the connection.  long   write.l4.retransmitPackageCount Total retransmit package count on write data to the connection.  long   write.l4.totalPackageSize Total transmit package size on write data to the connection.  long(bytes)   write.l3.duration Write data to the connection use duration on Linux Layer 3.  long(in nanoseconds)   write.l3.localDuration Write data to the connection use local duration on Linux Layer 3.  long(in nanoseconds)   write.l3.outputDuration Write data to the connection use output duration on Linux Layer 3.  long(in nanoseconds)   write.l3.resolveMACCount Total resolve remote MAC address count on write data to the connection.  long   write.l3.resolveMACDuration Total resolve remote MAC address use duration on write data to the connection.  long(in nanoseconds)   write.l3.netFilterCount Total do net filtering count on write data to the connection.  long   write.l3.netFilterDuration Total do net filtering use duration on write data to the connection.  long(in nanoseconds)   write.l2.duration Write data to the connection use duration on Linux L2.  long(nanoseconds)   write.l2.networkDeviceName The network device name on write data to the connection.  string   write.l2.enterQueueBufferCount The write package count to the network device queue on write data to the connection.  long   write.l2.readySendDuration Total ready send buffer duration on write data to the connection.  long(in nanoseconds)   write.l2.networkDeviceSendDuration Total network send buffer use duration on write data to the connection.  long(in nanoseconds)   read.duration Read data from the connection use duration.  long(in nanoseconds)   read.syscall Read data from the connection syscall name. The value should Read, Readv, Recv, RecvFrom, RecvMsg, RecvMmsg.  string   read.l4.duration Read data to the connection use duration on Linux Layer 4.  long(in nanoseconds)   read.l3.duration Read data to the connection use duration on Linux Layer 3.  long(in nanoseconds)   read.l3.rcvDuration Read data to the connection use receive duration on Linux Layer 3.  long(in nanoseconds)   read.l3.localDuration Read data to the connection use local duration on Linux Layer 3.  long(in nanoseconds)   read.l3.netFilterCount Total do net filtering count on read data from the connection.  long   read.l3.netFilterDuration Total do net filtering use duration on read data from the connection.  long(in nanoseconds)   read.l2.netDeviceName The network device name on read data from the connection.  string   read.l2.packageCount Total read package count on the connection.  long   read.l2.totalPackageSize Total read package size on the connection.  long(bytes)   read.l2.packageToQueueDuration Total read package to the queue duration on the connection.  long(in nanoseconds)   read.l2.rcvPackageFromQueueDuration Total read package from the queue duration on the connection.  long(in nanoseconds)   protocol.type The protocol type name, the following names should have the type prefix. The value should be HTTP.  string   protocol.success This protocol request and response is success or not.  boolean   protocol.http.latency The latency of HTTP response.  long(in nanoseconds)   protocol.http.url The url path of HTTP request.  string   protocol.http.method The method name of HTTP request.  string   protocol.http.statusCode The response code of HTTP response.  int   protocol.http.sizeOfRequestHeader The header size of HTTP request.  long(bytes)   protocol.http.sizeOfRequestBody The body size of HTTP request.  long(bytes)   protocol.http.sizeOfResponseHeader The header size of HTTP response.  long(bytes)   protocol.http.sizeOfResponseBody The body size of HTTP response.  long(bytes)    SCOPE K8SService    Name Remarks Group Key Type     name The service name in kubernetes.  string   layer The layer in kubernetes service.  string   detectPoint Where the relation is detected. The value may be client or server.  enum    SCOPE K8SServiceInstance    Name Remarks Group Key Type     serviceName The service name in kubernetes.  string   serviceInstanceName The pod name in kubernetes.  string   layer The layer of kubernetes service.  string   detectPoint Where the relation is detected. The value may be client or server.  enum    SCOPE K8SServiceRelation    Name Remarks Group Key Type     sourceServiceName The source service name in kubernetes.  string   sourceLayer The source layer service in kubernetes.  string   detectPoint Where the relation is detected. The value may be client or server.  enum   componentId The ID of component used in this call.  string   tlsMode The TLS mode of relation. The value may be Plain or TLS.  enum   destServiceName The dest service name in kubernetes.  string   destLayer The dest layer service in kubernetes.  string    SCOPE K8SServiceRelation    Name Remarks Group Key Type     sourceServiceName The source service name in kubernetes.  string   sourceLayer The source layer service in kubernetes.  string   detectPoint Where the relation is detected. The value may be client or server.  enum   componentId The ID of component used in this call.  string   tlsMode The TLS mode of relation. The value may be Plain or TLS.  enum   destServiceName The dest service name in kubernetes.  string   destLayer The dest layer service in kubernetes.  string    SCOPE K8SServiceInstanceRelation    Name Remarks Group Key Type     sourceServiceName The source service name in kubernetes.  string   sourceServiceInstanceName The source pod name in kubernetes.  string   sourceLayer The source layer service in kubernetes.  string   detectPoint Where the relation is detected. The value may be client or server.  enum   componentId The ID of component used in this call.  string   tlsMode The TLS mode of relation. The value may be Plain or TLS.  enum   destServiceName The dest service name in kubernetes.  string   destServiceInstanceName The dest pod name in kubernetes.  string   destLayer The dest layer service in kubernetes.  string    Endpoint and Endpoint Relation For K8SEndpoint and K8SEndpointRelation, they only have the following protocol level metric contents.\n   Name Remarks Group Key Type     protocol.type The protocol type name, the following names should have the type prefix. The value should be HTTP.  string   protocol.success This protocol request and response is success or not.  boolean   protocol.http.latency The latency of HTTP response.  long(in nanoseconds)   protocol.http.url The url path of HTTP request.  string   protocol.http.method The method name of HTTP request.  string   protocol.http.statusCode The response code of HTTP response.  int   protocol.http.sizeOfRequestHeader The header size of HTTP request.  long(bytes)   protocol.http.sizeOfRequestBody The body size of HTTP request.  long(bytes)   protocol.http.sizeOfResponseHeader The header size of HTTP response.  long(bytes)   protocol.http.sizeOfResponseBody The body size of HTTP response.  long(bytes)    SCOPE K8SEndpoint    Name Remarks Group Key Type     serviceName The service name in kubernetes.  string   layer The layer in kubernetes service.  string   endpointName The endpoint name detect in kubernetes service.  string   duration The duration of the service endpoint response latency.  long    SCOPE K8SEndpointRelation    Name Remarks Group Key Type     sourceServiceName The source service name in kubernetes.  string   sourceServiceName The layer in kubernetes source service.  string   sourceEndpointName The endpoint name detect in kubernetes source service.  string   detectPoint Where the relation is detected. The value may be client or server.  enum   componentId The ID of component used in this call.  string   destServiceName The dest service name in kubernetes.  string   destServiceName The layer in kubernetes dest service.  string   destEndpointName The endpoint name detect in kubernetes dest service.  string    ","excerpt":"Scopes and Fields Using the Aggregation Function, the requests will be grouped by time and Group …","ref":"/docs/main/next/en/concepts-and-designs/scope-definitions/","title":"Scopes and Fields"},{"body":"Scopes and Fields Using the Aggregation Function, the requests will be grouped by time and Group Key(s) in each scope.\nSCOPE Service This calculates the metrics data from each request of the service.\n   Name Remarks Group Key Type     name The name of the service.  string   layer Layer represents an abstract framework in the computer science, such as operation system(OS_LINUX layer), Kubernetes(k8s layer)  enum   serviceInstanceName The name of the service instance ID.  string   endpointName The name of the endpoint, such as a full path of HTTP URI.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   responseCode Deprecated.The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request. Such as: Database, HTTP, RPC, gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   tcpInfo.receivedBytes The received bytes of the TCP traffic, if this request is a TCP call.  long   tcpInfo.sentBytes The sent bytes of the TCP traffic, if this request is a TCP call.  long    SCOPE ServiceInstance This calculates the metrics data from each request of the service instance.\n   Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   layer Layer represents an abstract framework in the computer science, such as operation system(OS_LINUX layer), Kubernetes(k8s layer)  enum   endpointName The name of the endpoint, such as a full path of the HTTP URI.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   responseCode Deprecated.The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   tcpInfo.receivedBytes The received bytes of the TCP traffic, if this request is a TCP call.  long   tcpInfo.sentBytes The sent bytes of the TCP traffic, if this request is a TCP call.  long    Secondary scopes of ServiceInstance This calculates the metrics data if the service instance is a JVM and collects through javaagent.\n SCOPE ServiceInstanceJVMCPU     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   usePercent The percentage of CPU time spent.  double    SCOPE ServiceInstanceJVMMemory     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   heapStatus Indicates whether the metric has a heap property or not.  bool   init See the JVM documentation.  long   max See the JVM documentation.  long   used See the JVM documentation.  long   committed See the JVM documentation.  long    SCOPE ServiceInstanceJVMMemoryPool     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   poolType The type may be CODE_CACHE_USAGE, NEWGEN_USAGE, OLDGEN_USAGE, SURVIVOR_USAGE, PERMGEN_USAGE, or METASPACE_USAGE based on different versions of JVM.  enum   init See the JVM documentation.  long   max See the JVM documentation.  long   used See the JVM documentation.  long   committed See the JVM documentation.  long    SCOPE ServiceInstanceJVMGC     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   phase Includes both NEW and OLD.  Enum   time The time spent in GC.  long   count The count in GC operations.  long    SCOPE ServiceInstanceJVMThread     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   liveCount The current number of live threads.  long   daemonCount The current number of daemon threads.  long   peakCount The current number of peak threads.  long   runnableStateThreadCount The current number of threads in runnable state.  long   blockedStateThreadCount The current number of threads in blocked state.  long   waitingStateThreadCount The current number of threads in waiting state.  long   timedWaitingStateThreadCount The current number of threads in time-waiting state.  long    SCOPE ServiceInstanceJVMClass     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   loadedClassCount The number of classes that are currently loaded in the JVM.  long   totalUnloadedClassCount The total number of classes unloaded since the JVM has started execution.  long   totalLoadedClassCount The total number of classes that have been loaded since the JVM has started execution.  long    SCOPE Endpoint This calculates the metrics data from each request of the endpoint in the service.\n   Name Remarks Group Key Type     name The name of the endpoint, such as a full path of the HTTP URI.  string   serviceName The name of the service.  string   serviceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  enum   serviceInstanceName The name of the service instance ID.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   responseCode Deprecated.The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   tcpInfo.receivedBytes The received bytes of the TCP traffic, if this request is a TCP call.  long   tcpInfo.sentBytes The sent bytes of the TCP traffic, if this request is a TCP call.  long    SCOPE ServiceRelation This calculates the metrics data from each request between services.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceLayer The layer of the source service.  enum   destServiceName The name of the destination service.  string   destServiceInstanceName The name of the destination service instance.  string   destLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   responseCode Deprecated.The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination services, such as service_relation_mtls_cpm = from(ServiceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   tcpInfo.receivedBytes The received bytes of the TCP traffic, if this request is a TCP call.  long   tcpInfo.sentBytes The sent bytes of the TCP traffic, if this request is a TCP call.  long    SCOPE ServiceInstanceRelation This calculates the metrics data from each request between service instances.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceServiceLayer The layer of the source service.  enum   destServiceName The name of the destination service.     destServiceInstanceName The name of the destination service instance.  string   destServiceLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of the component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   responseCode Deprecated.The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination service instances, such as service_instance_relation_mtls_cpm = from(ServiceInstanceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   tcpInfo.receivedBytes The received bytes of the TCP traffic, if this request is a TCP call.  long   tcpInfo.sentBytes The sent bytes of the TCP traffic, if this request is a TCP call.  long    SCOPE EndpointRelation This calculates the metrics data of the dependency between endpoints. This relation is hard to detect, and it depends on the tracing library to propagate the previous endpoint. Therefore, the EndpointRelation scope aggregation comes into effect only in services under tracing by SkyWalking native agents, including auto instrument agents (like Java and .NET), OpenCensus SkyWalking exporter implementation, or other tracing context propagation in SkyWalking specification.\n   Name Remarks Group Key Type     endpoint The parent endpoint in the dependency.  string   serviceName The name of the service.  string   serviceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  enum   childEndpoint The endpoint used by the parent endpoint in row(1).  string   childServiceName The endpoint used by the parent service in row(1).  string   childServiceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  string   childServiceInstanceName The endpoint used by the parent service instance in row(1).  string   rpcLatency The latency of the RPC between the parent endpoint and childEndpoint, excluding the latency caused by the parent endpoint itself.     componentId The ID of the component used in this call. yes string   status Indicates the success or failure of the request.  bool(true for success)   responseCode Deprecated.The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Indicates where the relation is detected. The value may be client, server, or proxy. yes enum    SCOPE BrowserAppTraffic This calculates the metrics data from each request of the browser application (browser only).\n   Name Remarks Group Key Type     name The browser application name of each request.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppSingleVersionTraffic This calculates the metrics data from each request of a single version in the browser application (browser only).\n   Name Remarks Group Key Type     name The single version name of each request.  string   serviceName The name of the browser application.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppPageTraffic This calculates the metrics data from each request of the page in the browser application (browser only).\n   Name Remarks Group Key Type     name The page name of each request.  string   serviceName The name of the browser application.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppPagePerf This calculates the metrics data from each request of the page in the browser application (browser only).\n   Name Remarks Group Key Type     name The page name of each request.  string   serviceName The name of the browser application.  string   redirectTime The time taken to redirect.  int(in ms)   dnsTime The DNS query time.  int(in ms)   ttfbTime Time to first byte.  int(in ms)   tcpTime TCP connection time.  int(in ms)   transTime Content transfer time.  int(in ms)   domAnalysisTime Dom parsing time.  int(in ms)   fptTime First paint time or blank screen time.  int(in ms)   domReadyTime Dom ready time.  int(in ms)   loadPageTime Page full load time.  int(in ms)   resTime Synchronous load resources in the page.  int(in ms)   sslTime Only valid for HTTPS.  int(in ms)   ttlTime Time to interact.  int(in ms)   firstPackTime First pack time.  int(in ms)   fmpTime First Meaningful Paint.  int(in ms)    SCOPE Event This calculates the metrics data from events.\n   Name Remarks Group Key Type     name The name of the event.  string   service The service name to which the event belongs to.  string   serviceInstance The service instance to which the event belongs to, if any.  string   endpoint The service endpoint to which the event belongs to, if any.  string   type The type of the event, Normal or Error.  string   message The message of the event.  string   parameters The parameters in the message, see parameters.  string    ","excerpt":"Scopes and Fields Using the Aggregation Function, the requests will be grouped by time and Group …","ref":"/docs/main/v9.0.0/en/concepts-and-designs/scope-definitions/","title":"Scopes and Fields"},{"body":"Scopes and Fields Using the Aggregation Function, the requests will be grouped by time and Group Key(s) in each scope.\nSCOPE Service This calculates the metrics data from each request of the service.\n   Name Remarks Group Key Type     name The name of the service.  string   layer Layer represents an abstract framework in the computer science, such as operation system(OS_LINUX layer), Kubernetes(k8s layer)  enum   serviceInstanceName The name of the service instance ID.  string   endpointName The name of the endpoint, such as a full path of HTTP URI.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   responseCode Deprecated.The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request. Such as: Database, HTTP, RPC, gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   tcpInfo.receivedBytes The received bytes of the TCP traffic, if this request is a TCP call.  long   tcpInfo.sentBytes The sent bytes of the TCP traffic, if this request is a TCP call.  long    SCOPE ServiceInstance This calculates the metrics data from each request of the service instance.\n   Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   endpointName The name of the endpoint, such as a full path of the HTTP URI.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   responseCode Deprecated.The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   tcpInfo.receivedBytes The received bytes of the TCP traffic, if this request is a TCP call.  long   tcpInfo.sentBytes The sent bytes of the TCP traffic, if this request is a TCP call.  long    Secondary scopes of ServiceInstance This calculates the metrics data if the service instance is a JVM and collects through javaagent.\n SCOPE ServiceInstanceJVMCPU     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   usePercent The percentage of CPU time spent.  double    SCOPE ServiceInstanceJVMMemory     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   heapStatus Indicates whether the metric has a heap property or not.  bool   init See the JVM documentation.  long   max See the JVM documentation.  long   used See the JVM documentation.  long   committed See the JVM documentation.  long    SCOPE ServiceInstanceJVMMemoryPool     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   poolType The type may be CODE_CACHE_USAGE, NEWGEN_USAGE, OLDGEN_USAGE, SURVIVOR_USAGE, PERMGEN_USAGE, or METASPACE_USAGE based on different versions of JVM.  enum   init See the JVM documentation.  long   max See the JVM documentation.  long   used See the JVM documentation.  long   committed See the JVM documentation.  long    SCOPE ServiceInstanceJVMGC     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   phase Includes both NEW and OLD.  Enum   time The time spent in GC.  long   count The count in GC operations.  long    SCOPE ServiceInstanceJVMThread     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   liveCount The current number of live threads.  long   daemonCount The current number of daemon threads.  long   peakCount The current number of peak threads.  long   runnableStateThreadCount The current number of threads in runnable state.  long   blockedStateThreadCount The current number of threads in blocked state.  long   waitingStateThreadCount The current number of threads in waiting state.  long   timedWaitingStateThreadCount The current number of threads in time-waiting state.  long    SCOPE ServiceInstanceJVMClass     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   loadedClassCount The number of classes that are currently loaded in the JVM.  long   totalUnloadedClassCount The total number of classes unloaded since the JVM has started execution.  long   totalLoadedClassCount The total number of classes that have been loaded since the JVM has started execution.  long    SCOPE Endpoint This calculates the metrics data from each request of the endpoint in the service.\n   Name Remarks Group Key Type     name The name of the endpoint, such as a full path of the HTTP URI.  string   serviceName The name of the service.  string   serviceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  enum   serviceInstanceName The name of the service instance ID.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   responseCode Deprecated.The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   tcpInfo.receivedBytes The received bytes of the TCP traffic, if this request is a TCP call.  long   tcpInfo.sentBytes The sent bytes of the TCP traffic, if this request is a TCP call.  long    SCOPE ServiceRelation This calculates the metrics data from each request between services.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceLayer The layer of the source service.  enum   destServiceName The name of the destination service.  string   destServiceInstanceName The name of the destination service instance.  string   destLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   responseCode Deprecated.The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination services, such as service_relation_mtls_cpm = from(ServiceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   tcpInfo.receivedBytes The received bytes of the TCP traffic, if this request is a TCP call.  long   tcpInfo.sentBytes The sent bytes of the TCP traffic, if this request is a TCP call.  long    SCOPE ServiceInstanceRelation This calculates the metrics data from each request between service instances.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceServiceLayer The layer of the source service.  enum   destServiceName The name of the destination service.     destServiceInstanceName The name of the destination service instance.  string   destServiceLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of the component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   responseCode Deprecated.The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination service instances, such as service_instance_relation_mtls_cpm = from(ServiceInstanceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   tcpInfo.receivedBytes The received bytes of the TCP traffic, if this request is a TCP call.  long   tcpInfo.sentBytes The sent bytes of the TCP traffic, if this request is a TCP call.  long    SCOPE EndpointRelation This calculates the metrics data of the dependency between endpoints. This relation is hard to detect, and it depends on the tracing library to propagate the previous endpoint. Therefore, the EndpointRelation scope aggregation comes into effect only in services under tracing by SkyWalking native agents, including auto instrument agents (like Java and .NET), OpenCensus SkyWalking exporter implementation, or other tracing context propagation in SkyWalking specification.\n   Name Remarks Group Key Type     endpoint The parent endpoint in the dependency.  string   serviceName The name of the service.  string   serviceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  enum   childEndpoint The endpoint used by the parent endpoint in row(1).  string   childServiceName The endpoint used by the parent service in row(1).  string   childServiceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  string   childServiceInstanceName The endpoint used by the parent service instance in row(1).  string   rpcLatency The latency of the RPC between the parent endpoint and childEndpoint, excluding the latency caused by the parent endpoint itself.     componentId The ID of the component used in this call. yes string   status Indicates the success or failure of the request.  bool(true for success)   responseCode Deprecated.The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Indicates where the relation is detected. The value may be client, server, or proxy. yes enum    SCOPE BrowserAppTraffic This calculates the metrics data from each request of the browser application (browser only).\n   Name Remarks Group Key Type     name The browser application name of each request.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppSingleVersionTraffic This calculates the metrics data from each request of a single version in the browser application (browser only).\n   Name Remarks Group Key Type     name The single version name of each request.  string   serviceName The name of the browser application.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppPageTraffic This calculates the metrics data from each request of the page in the browser application (browser only).\n   Name Remarks Group Key Type     name The page name of each request.  string   serviceName The name of the browser application.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppPagePerf This calculates the metrics data from each request of the page in the browser application (browser only).\n   Name Remarks Group Key Type     name The page name of each request.  string   serviceName The name of the browser application.  string   redirectTime The time taken to redirect.  int(in ms)   dnsTime The DNS query time.  int(in ms)   ttfbTime Time to first byte.  int(in ms)   tcpTime TCP connection time.  int(in ms)   transTime Content transfer time.  int(in ms)   domAnalysisTime Dom parsing time.  int(in ms)   fptTime First paint time or blank screen time.  int(in ms)   domReadyTime Dom ready time.  int(in ms)   loadPageTime Page full load time.  int(in ms)   resTime Synchronous load resources in the page.  int(in ms)   sslTime Only valid for HTTPS.  int(in ms)   ttlTime Time to interact.  int(in ms)   firstPackTime First pack time.  int(in ms)   fmpTime First Meaningful Paint.  int(in ms)    SCOPE Event This calculates the metrics data from events.\n   Name Remarks Group Key Type     name The name of the event.  string   service The service name to which the event belongs to.  string   serviceInstance The service instance to which the event belongs to, if any.  string   endpoint The service endpoint to which the event belongs to, if any.  string   type The type of the event, Normal or Error.  string   message The message of the event.  string   parameters The parameters in the message, see parameters.  string    ","excerpt":"Scopes and Fields Using the Aggregation Function, the requests will be grouped by time and Group …","ref":"/docs/main/v9.1.0/en/concepts-and-designs/scope-definitions/","title":"Scopes and Fields"},{"body":"Scopes and Fields Using the Aggregation Function, the requests will be grouped by time and Group Key(s) in each scope.\nSCOPE Service This calculates the metrics data from each request of the service.\n   Name Remarks Group Key Type     name The name of the service.  string   layer Layer represents an abstract framework in the computer science, such as operation system(OS_LINUX layer), Kubernetes(k8s layer)  enum   serviceInstanceName The name of the service instance ID.  string   endpointName The name of the endpoint, such as a full path of HTTP URI.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   responseCode Deprecated.The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request. Such as: Database, HTTP, RPC, gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   tcpInfo.receivedBytes The received bytes of the TCP traffic, if this request is a TCP call.  long   tcpInfo.sentBytes The sent bytes of the TCP traffic, if this request is a TCP call.  long    SCOPE ServiceInstance This calculates the metrics data from each request of the service instance.\n   Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   endpointName The name of the endpoint, such as a full path of the HTTP URI.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   responseCode Deprecated.The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   tcpInfo.receivedBytes The received bytes of the TCP traffic, if this request is a TCP call.  long   tcpInfo.sentBytes The sent bytes of the TCP traffic, if this request is a TCP call.  long    Secondary scopes of ServiceInstance This calculates the metrics data if the service instance is a JVM and collects through javaagent.\n SCOPE ServiceInstanceJVMCPU     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   usePercent The percentage of CPU time spent.  double    SCOPE ServiceInstanceJVMMemory     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   heapStatus Indicates whether the metric has a heap property or not.  bool   init See the JVM documentation.  long   max See the JVM documentation.  long   used See the JVM documentation.  long   committed See the JVM documentation.  long    SCOPE ServiceInstanceJVMMemoryPool     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   poolType The type may be CODE_CACHE_USAGE, NEWGEN_USAGE, OLDGEN_USAGE, SURVIVOR_USAGE, PERMGEN_USAGE, or METASPACE_USAGE based on different versions of JVM.  enum   init See the JVM documentation.  long   max See the JVM documentation.  long   used See the JVM documentation.  long   committed See the JVM documentation.  long    SCOPE ServiceInstanceJVMGC     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   phase Includes both NEW and OLD.  Enum   time The time spent in GC.  long   count The count in GC operations.  long    SCOPE ServiceInstanceJVMThread     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   liveCount The current number of live threads.  long   daemonCount The current number of daemon threads.  long   peakCount The current number of peak threads.  long   runnableStateThreadCount The current number of threads in runnable state.  long   blockedStateThreadCount The current number of threads in blocked state.  long   waitingStateThreadCount The current number of threads in waiting state.  long   timedWaitingStateThreadCount The current number of threads in time-waiting state.  long    SCOPE ServiceInstanceJVMClass     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   loadedClassCount The number of classes that are currently loaded in the JVM.  long   totalUnloadedClassCount The total number of classes unloaded since the JVM has started execution.  long   totalLoadedClassCount The total number of classes that have been loaded since the JVM has started execution.  long    SCOPE Endpoint This calculates the metrics data from each request of the endpoint in the service.\n   Name Remarks Group Key Type     name The name of the endpoint, such as a full path of the HTTP URI.  string   serviceName The name of the service.  string   serviceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  enum   serviceInstanceName The name of the service instance ID.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   responseCode Deprecated.The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   tcpInfo.receivedBytes The received bytes of the TCP traffic, if this request is a TCP call.  long   tcpInfo.sentBytes The sent bytes of the TCP traffic, if this request is a TCP call.  long    SCOPE ServiceRelation This calculates the metrics data from each request between services.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceLayer The layer of the source service.  enum   destServiceName The name of the destination service.  string   destServiceInstanceName The name of the destination service instance.  string   destLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   responseCode Deprecated.The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination services, such as service_relation_mtls_cpm = from(ServiceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   tcpInfo.receivedBytes The received bytes of the TCP traffic, if this request is a TCP call.  long   tcpInfo.sentBytes The sent bytes of the TCP traffic, if this request is a TCP call.  long    SCOPE ServiceInstanceRelation This calculates the metrics data from each request between service instances.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceServiceLayer The layer of the source service.  enum   destServiceName The name of the destination service.     destServiceInstanceName The name of the destination service instance.  string   destServiceLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of the component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   responseCode Deprecated.The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination service instances, such as service_instance_relation_mtls_cpm = from(ServiceInstanceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   tcpInfo.receivedBytes The received bytes of the TCP traffic, if this request is a TCP call.  long   tcpInfo.sentBytes The sent bytes of the TCP traffic, if this request is a TCP call.  long    SCOPE EndpointRelation This calculates the metrics data of the dependency between endpoints. This relation is hard to detect, and it depends on the tracing library to propagate the previous endpoint. Therefore, the EndpointRelation scope aggregation comes into effect only in services under tracing by SkyWalking native agents, including auto instrument agents (like Java and .NET), OpenCensus SkyWalking exporter implementation, or other tracing context propagation in SkyWalking specification.\n   Name Remarks Group Key Type     endpoint The parent endpoint in the dependency.  string   serviceName The name of the service.  string   serviceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  enum   childEndpoint The endpoint used by the parent endpoint in row(1).  string   childServiceName The endpoint used by the parent service in row(1).  string   childServiceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  string   childServiceInstanceName The endpoint used by the parent service instance in row(1).  string   rpcLatency The latency of the RPC between the parent endpoint and childEndpoint, excluding the latency caused by the parent endpoint itself.     componentId The ID of the component used in this call. yes string   status Indicates the success or failure of the request.  bool(true for success)   responseCode Deprecated.The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Indicates where the relation is detected. The value may be client, server, or proxy. yes enum    SCOPE BrowserAppTraffic This calculates the metrics data from each request of the browser application (browser only).\n   Name Remarks Group Key Type     name The browser application name of each request.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppSingleVersionTraffic This calculates the metrics data from each request of a single version in the browser application (browser only).\n   Name Remarks Group Key Type     name The single version name of each request.  string   serviceName The name of the browser application.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppPageTraffic This calculates the metrics data from each request of the page in the browser application (browser only).\n   Name Remarks Group Key Type     name The page name of each request.  string   serviceName The name of the browser application.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppPagePerf This calculates the metrics data from each request of the page in the browser application (browser only).\n   Name Remarks Group Key Type     name The page name of each request.  string   serviceName The name of the browser application.  string   redirectTime The time taken to redirect.  int(in ms)   dnsTime The DNS query time.  int(in ms)   ttfbTime Time to first byte.  int(in ms)   tcpTime TCP connection time.  int(in ms)   transTime Content transfer time.  int(in ms)   domAnalysisTime Dom parsing time.  int(in ms)   fptTime First paint time or blank screen time.  int(in ms)   domReadyTime Dom ready time.  int(in ms)   loadPageTime Page full load time.  int(in ms)   resTime Synchronous load resources in the page.  int(in ms)   sslTime Only valid for HTTPS.  int(in ms)   ttlTime Time to interact.  int(in ms)   firstPackTime First pack time.  int(in ms)   fmpTime First Meaningful Paint.  int(in ms)    SCOPE Event This calculates the metrics data from events.\n   Name Remarks Group Key Type     name The name of the event.  string   service The service name to which the event belongs to.  string   serviceInstance The service instance to which the event belongs to, if any.  string   endpoint The service endpoint to which the event belongs to, if any.  string   type The type of the event, Normal or Error.  string   message The message of the event.  string   parameters The parameters in the message, see parameters.  string    ","excerpt":"Scopes and Fields Using the Aggregation Function, the requests will be grouped by time and Group …","ref":"/docs/main/v9.2.0/en/concepts-and-designs/scope-definitions/","title":"Scopes and Fields"},{"body":"Scopes and Fields Using the Aggregation Function, the requests will be grouped by time and Group Key(s) in each scope.\nSCOPE Service This calculates the metrics data from each request of the service.\n   Name Remarks Group Key Type     name The name of the service.  string   layer Layer represents an abstract framework in the computer science, such as operation system(OS_LINUX layer), Kubernetes(k8s layer)  enum   serviceInstanceName The name of the service instance ID.  string   endpointName The name of the endpoint, such as a full path of HTTP URI.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request. Such as: Database, HTTP, RPC, gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPService This calculates the metrics data from each request of the TCP service.\n   Name Remarks Group Key Type     name The name of the service.  string   layer Layer represents an abstract framework in the computer science, such as operation system(OS_LINUX layer), Kubernetes(k8s layer)  enum   serviceInstanceName The name of the service instance ID.  string   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    SCOPE ServiceInstance This calculates the metrics data from each request of the service instance.\n   Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   endpointName The name of the endpoint, such as a full path of the HTTP URI.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPServiceInstance This calculates the metrics data from each request of the service instance.\n   Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    Secondary scopes of ServiceInstance This calculates the metrics data if the service instance is a JVM and collects through javaagent.\n SCOPE ServiceInstanceJVMCPU     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   usePercent The percentage of CPU time spent.  double    SCOPE ServiceInstanceJVMMemory     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   heapStatus Indicates whether the metric has a heap property or not.  bool   init See the JVM documentation.  long   max See the JVM documentation.  long   used See the JVM documentation.  long   committed See the JVM documentation.  long    SCOPE ServiceInstanceJVMMemoryPool     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   poolType The type may be CODE_CACHE_USAGE, NEWGEN_USAGE, OLDGEN_USAGE, SURVIVOR_USAGE, PERMGEN_USAGE, or METASPACE_USAGE based on different versions of JVM.  enum   init See the JVM documentation.  long   max See the JVM documentation.  long   used See the JVM documentation.  long   committed See the JVM documentation.  long    SCOPE ServiceInstanceJVMGC     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   phase Includes both NEW and OLD.  Enum   time The time spent in GC.  long   count The count in GC operations.  long    SCOPE ServiceInstanceJVMThread     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   liveCount The current number of live threads.  long   daemonCount The current number of daemon threads.  long   peakCount The current number of peak threads.  long   runnableStateThreadCount The current number of threads in runnable state.  long   blockedStateThreadCount The current number of threads in blocked state.  long   waitingStateThreadCount The current number of threads in waiting state.  long   timedWaitingStateThreadCount The current number of threads in time-waiting state.  long    SCOPE ServiceInstanceJVMClass     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   loadedClassCount The number of classes that are currently loaded in the JVM.  long   totalUnloadedClassCount The total number of classes unloaded since the JVM has started execution.  long   totalLoadedClassCount The total number of classes that have been loaded since the JVM has started execution.  long    SCOPE Endpoint This calculates the metrics data from each request of the endpoint in the service.\n   Name Remarks Group Key Type     name The name of the endpoint, such as a full path of the HTTP URI.  string   serviceName The name of the service.  string   serviceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  enum   serviceInstanceName The name of the service instance ID.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE ServiceRelation This calculates the metrics data from each request between services.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceLayer The layer of the source service.  enum   destServiceName The name of the destination service.  string   destServiceInstanceName The name of the destination service instance.  string   destLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination services, such as service_relation_mtls_cpm = from(ServiceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPServiceRelation This calculates the metrics data from each request between services.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceLayer The layer of the source service.  enum   destServiceName The name of the destination service.  string   destServiceInstanceName The name of the destination service instance.  string   destLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination services, such as service_relation_mtls_cpm = from(ServiceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    SCOPE ServiceInstanceRelation This calculates the metrics data from each request between service instances.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceServiceLayer The layer of the source service.  enum   destServiceName The name of the destination service.     destServiceInstanceName The name of the destination service instance.  string   destServiceLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of the component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination service instances, such as service_instance_relation_mtls_cpm = from(ServiceInstanceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPServiceInstanceRelation This calculates the metrics data from each request between service instances.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceServiceLayer The layer of the source service.  enum   destServiceName The name of the destination service.     destServiceInstanceName The name of the destination service instance.  string   destServiceLayer The layer of the destination service.  enum   componentId The ID of the component used in this call. yes string   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination service instances, such as service_instance_relation_mtls_cpm = from(ServiceInstanceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    SCOPE EndpointRelation This calculates the metrics data of the dependency between endpoints. This relation is hard to detect, and it depends on the tracing library to propagate the previous endpoint. Therefore, the EndpointRelation scope aggregation comes into effect only in services under tracing by SkyWalking native agents, including auto instrument agents (like Java and .NET), OpenCensus SkyWalking exporter implementation, or other tracing context propagation in SkyWalking specification.\n   Name Remarks Group Key Type     endpoint The parent endpoint in the dependency.  string   serviceName The name of the service.  string   serviceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  enum   childEndpoint The endpoint used by the parent endpoint in row(1).  string   childServiceName The endpoint used by the parent service in row(1).  string   childServiceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  string   childServiceInstanceName The endpoint used by the parent service instance in row(1).  string   rpcLatency The latency of the RPC between the parent endpoint and childEndpoint, excluding the latency caused by the parent endpoint itself.     componentId The ID of the component used in this call. yes string   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Indicates where the relation is detected. The value may be client, server, or proxy. yes enum    SCOPE BrowserAppTraffic This calculates the metrics data from each request of the browser application (browser only).\n   Name Remarks Group Key Type     name The browser application name of each request.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppSingleVersionTraffic This calculates the metrics data from each request of a single version in the browser application (browser only).\n   Name Remarks Group Key Type     name The single version name of each request.  string   serviceName The name of the browser application.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppPageTraffic This calculates the metrics data from each request of the page in the browser application (browser only).\n   Name Remarks Group Key Type     name The page name of each request.  string   serviceName The name of the browser application.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppPagePerf This calculates the metrics data from each request of the page in the browser application (browser only).\n   Name Remarks Group Key Type     name The page name of each request.  string   serviceName The name of the browser application.  string   redirectTime The time taken to redirect.  int(in ms)   dnsTime The DNS query time.  int(in ms)   ttfbTime Time to first byte.  int(in ms)   tcpTime TCP connection time.  int(in ms)   transTime Content transfer time.  int(in ms)   domAnalysisTime Dom parsing time.  int(in ms)   fptTime First paint time or blank screen time.  int(in ms)   domReadyTime Dom ready time.  int(in ms)   loadPageTime Page full load time.  int(in ms)   resTime Synchronous load resources in the page.  int(in ms)   sslTime Only valid for HTTPS.  int(in ms)   ttlTime Time to interact.  int(in ms)   firstPackTime First pack time.  int(in ms)   fmpTime First Meaningful Paint.  int(in ms)    SCOPE Event This calculates the metrics data from events.\n   Name Remarks Group Key Type     name The name of the event.  string   service The service name to which the event belongs to.  string   serviceInstance The service instance to which the event belongs to, if any.  string   endpoint The service endpoint to which the event belongs to, if any.  string   type The type of the event, Normal or Error.  string   message The message of the event.  string   parameters The parameters in the message, see parameters.  string    SCOPE DatabaseAccess This calculates the metrics data from each request of database.\n   Name Remarks Group Key Type     name The service name of virtual database service.  string   databaseTypeId The ID of the component used in this call.  int   latency The time taken by each request.  int(in ms)   status Indicates the success or failure of the request.  boolean    SCOPE DatabaseSlowStatement This calculates the metrics data from slow request of database.\n   Name Remarks Group Key Type     databaseServiceId The service id of virtual cache service.  string   statement The sql statement .  string   latency The time taken by each request.  int(in ms)   traceId The traceId of this slow statement  string    SCOPE CacheAccess This calculates the metrics data from each request of cache system.\n   Name Remarks Group Key Type     name The service name of virtual cache service.  string   cacheTypeId The ID of the component used in this call.  int   latency The time taken by each request.  int(in ms)   status Indicates the success or failure of the request.  boolean   operation Indicates this access is used for write or read  string    SCOPE CacheSlowAccess This calculates the metrics data from slow request of cache system , which is used for write or read operation.\n   Name Remarks Group Key Type     cacheServiceId The service id of virtual cache service.  string   command The cache command .  string   key The cache command key.  string   latency The time taken by each request.  int(in ms)   traceId The traceId of this slow access  string   status Indicates the success or failure of the request.  boolean   operation Indicates this access is used for write or read  string    SCOPE MQAccess This calculates the service dimensional metrics data from each request of MQ system on consume/produce side\n   Name Remarks Group Key Type     name The service name , usually it\u0026rsquo;s MQ address(es)  string   transmissionLatency The latency from produce side to consume side .  int(in ms)   status Indicates the success or failure of the request.  boolean   operation Indicates this access is on Produce or Consume side  enum    SCOPE MQEndpointAccess This calculates the endpoint dimensional metrics data from each request of MQ system on consume/produce side\n   Name Remarks Group Key Type     serviceName The service name that this endpoint belongs to.  string   endpoint The endpoint name , usually it\u0026rsquo;s combined by queue,topic  string   transmissionLatency The latency from produce side to consume side .  int(in ms)   status Indicates the success or failure of the request.  boolean   operation Indicates this access is on Produce or Consume side  enum    ","excerpt":"Scopes and Fields Using the Aggregation Function, the requests will be grouped by time and Group …","ref":"/docs/main/v9.3.0/en/concepts-and-designs/scope-definitions/","title":"Scopes and Fields"},{"body":"Scopes and Fields Using the Aggregation Function, the requests will be grouped by time and Group Key(s) in each scope.\nSCOPE Service This calculates the metrics data from each request of the service.\n   Name Remarks Group Key Type     name The name of the service.  string   layer Layer represents an abstract framework in the computer science, such as operation system(OS_LINUX layer), Kubernetes(k8s layer)  enum   serviceInstanceName The name of the service instance ID.  string   endpointName The name of the endpoint, such as a full path of HTTP URI.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request. Such as: Database, HTTP, RPC, gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPService This calculates the metrics data from each request of the TCP service.\n   Name Remarks Group Key Type     name The name of the service.  string   layer Layer represents an abstract framework in the computer science, such as operation system(OS_LINUX layer), Kubernetes(k8s layer)  enum   serviceInstanceName The name of the service instance ID.  string   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    SCOPE ServiceInstance This calculates the metrics data from each request of the service instance.\n   Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   endpointName The name of the endpoint, such as a full path of the HTTP URI.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPServiceInstance This calculates the metrics data from each request of the service instance.\n   Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    Secondary scopes of ServiceInstance This calculates the metrics data if the service instance is a JVM and collects through javaagent.\n SCOPE ServiceInstanceJVMCPU     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   usePercent The percentage of CPU time spent.  double    SCOPE ServiceInstanceJVMMemory     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   heapStatus Indicates whether the metric has a heap property or not.  bool   init See the JVM documentation.  long   max See the JVM documentation.  long   used See the JVM documentation.  long   committed See the JVM documentation.  long    SCOPE ServiceInstanceJVMMemoryPool     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   poolType The type may be CODE_CACHE_USAGE, NEWGEN_USAGE, OLDGEN_USAGE, SURVIVOR_USAGE, PERMGEN_USAGE, or METASPACE_USAGE based on different versions of JVM.  enum   init See the JVM documentation.  long   max See the JVM documentation.  long   used See the JVM documentation.  long   committed See the JVM documentation.  long    SCOPE ServiceInstanceJVMGC     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   phase Includes both NEW and OLD.  Enum   time The time spent in GC.  long   count The count in GC operations.  long    SCOPE ServiceInstanceJVMThread     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   liveCount The current number of live threads.  long   daemonCount The current number of daemon threads.  long   peakCount The current number of peak threads.  long   runnableStateThreadCount The current number of threads in runnable state.  long   blockedStateThreadCount The current number of threads in blocked state.  long   waitingStateThreadCount The current number of threads in waiting state.  long   timedWaitingStateThreadCount The current number of threads in time-waiting state.  long    SCOPE ServiceInstanceJVMClass     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   loadedClassCount The number of classes that are currently loaded in the JVM.  long   totalUnloadedClassCount The total number of classes unloaded since the JVM has started execution.  long   totalLoadedClassCount The total number of classes that have been loaded since the JVM has started execution.  long    SCOPE Endpoint This calculates the metrics data from each request of the endpoint in the service.\n   Name Remarks Group Key Type     name The name of the endpoint, such as a full path of the HTTP URI.  string   serviceName The name of the service.  string   serviceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  enum   serviceInstanceName The name of the service instance ID.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE ServiceRelation This calculates the metrics data from each request between services.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceLayer The layer of the source service.  enum   destServiceName The name of the destination service.  string   destServiceInstanceName The name of the destination service instance.  string   destLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination services, such as service_relation_mtls_cpm = from(ServiceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPServiceRelation This calculates the metrics data from each request between services.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceLayer The layer of the source service.  enum   destServiceName The name of the destination service.  string   destServiceInstanceName The name of the destination service instance.  string   destLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination services, such as service_relation_mtls_cpm = from(ServiceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    SCOPE ServiceInstanceRelation This calculates the metrics data from each request between service instances.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceServiceLayer The layer of the source service.  enum   destServiceName The name of the destination service.     destServiceInstanceName The name of the destination service instance.  string   destServiceLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of the component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination service instances, such as service_instance_relation_mtls_cpm = from(ServiceInstanceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPServiceInstanceRelation This calculates the metrics data from each request between service instances.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceServiceLayer The layer of the source service.  enum   destServiceName The name of the destination service.     destServiceInstanceName The name of the destination service instance.  string   destServiceLayer The layer of the destination service.  enum   componentId The ID of the component used in this call. yes string   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination service instances, such as service_instance_relation_mtls_cpm = from(ServiceInstanceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    SCOPE EndpointRelation This calculates the metrics data of the dependency between endpoints. This relation is hard to detect, and it depends on the tracing library to propagate the previous endpoint. Therefore, the EndpointRelation scope aggregation comes into effect only in services under tracing by SkyWalking native agents, including auto instrument agents (like Java and .NET), OpenCensus SkyWalking exporter implementation, or other tracing context propagation in SkyWalking specification.\n   Name Remarks Group Key Type     endpoint The parent endpoint in the dependency.  string   serviceName The name of the service.  string   serviceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  enum   childEndpoint The endpoint used by the parent endpoint in row(1).  string   childServiceName The endpoint used by the parent service in row(1).  string   childServiceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  string   childServiceInstanceName The endpoint used by the parent service instance in row(1).  string   rpcLatency The latency of the RPC between the parent endpoint and childEndpoint, excluding the latency caused by the parent endpoint itself.     componentId The ID of the component used in this call. yes string   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Indicates where the relation is detected. The value may be client, server, or proxy. yes enum    SCOPE BrowserAppTraffic This calculates the metrics data from each request of the browser application (browser only).\n   Name Remarks Group Key Type     name The browser application name of each request.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppSingleVersionTraffic This calculates the metrics data from each request of a single version in the browser application (browser only).\n   Name Remarks Group Key Type     name The single version name of each request.  string   serviceName The name of the browser application.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppPageTraffic This calculates the metrics data from each request of the page in the browser application (browser only).\n   Name Remarks Group Key Type     name The page name of each request.  string   serviceName The name of the browser application.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppPagePerf This calculates the metrics data from each request of the page in the browser application (browser only).\n   Name Remarks Group Key Type     name The page name of each request.  string   serviceName The name of the browser application.  string   redirectTime The time taken to redirect.  int(in ms)   dnsTime The DNS query time.  int(in ms)   ttfbTime Time to first byte.  int(in ms)   tcpTime TCP connection time.  int(in ms)   transTime Content transfer time.  int(in ms)   domAnalysisTime Dom parsing time.  int(in ms)   fptTime First paint time or blank screen time.  int(in ms)   domReadyTime Dom ready time.  int(in ms)   loadPageTime Page full load time.  int(in ms)   resTime Synchronous load resources in the page.  int(in ms)   sslTime Only valid for HTTPS.  int(in ms)   ttlTime Time to interact.  int(in ms)   firstPackTime First pack time.  int(in ms)   fmpTime First Meaningful Paint.  int(in ms)    SCOPE Event This calculates the metrics data from events.\n   Name Remarks Group Key Type     name The name of the event.  string   service The service name to which the event belongs to.  string   serviceInstance The service instance to which the event belongs to, if any.  string   endpoint The service endpoint to which the event belongs to, if any.  string   type The type of the event, Normal or Error.  string   message The message of the event.  string   parameters The parameters in the message, see parameters.  string    SCOPE DatabaseAccess This calculates the metrics data from each request of database.\n   Name Remarks Group Key Type     name The service name of virtual database service.  string   databaseTypeId The ID of the component used in this call.  int   latency The time taken by each request.  int(in ms)   status Indicates the success or failure of the request.  boolean    SCOPE DatabaseSlowStatement This calculates the metrics data from slow request of database.\n   Name Remarks Group Key Type     databaseServiceId The service id of virtual cache service.  string   statement The sql statement .  string   latency The time taken by each request.  int(in ms)   traceId The traceId of this slow statement  string    SCOPE CacheAccess This calculates the metrics data from each request of cache system.\n   Name Remarks Group Key Type     name The service name of virtual cache service.  string   cacheTypeId The ID of the component used in this call.  int   latency The time taken by each request.  int(in ms)   status Indicates the success or failure of the request.  boolean   operation Indicates this access is used for write or read  string    SCOPE CacheSlowAccess This calculates the metrics data from slow request of cache system , which is used for write or read operation.\n   Name Remarks Group Key Type     cacheServiceId The service id of virtual cache service.  string   command The cache command .  string   key The cache command key.  string   latency The time taken by each request.  int(in ms)   traceId The traceId of this slow access  string   status Indicates the success or failure of the request.  boolean   operation Indicates this access is used for write or read  string    SCOPE MQAccess This calculates the service dimensional metrics data from each request of MQ system on consume/produce side\n   Name Remarks Group Key Type     name The service name , usually it\u0026rsquo;s MQ address(es)  string   transmissionLatency The latency from produce side to consume side .  int(in ms)   status Indicates the success or failure of the request.  boolean   operation Indicates this access is on Produce or Consume side  enum    SCOPE MQEndpointAccess This calculates the endpoint dimensional metrics data from each request of MQ system on consume/produce side\n   Name Remarks Group Key Type     serviceName The service name that this endpoint belongs to.  string   endpoint The endpoint name , usually it\u0026rsquo;s combined by queue,topic  string   transmissionLatency The latency from produce side to consume side .  int(in ms)   status Indicates the success or failure of the request.  boolean   operation Indicates this access is on Produce or Consume side  enum    ","excerpt":"Scopes and Fields Using the Aggregation Function, the requests will be grouped by time and Group …","ref":"/docs/main/v9.4.0/en/concepts-and-designs/scope-definitions/","title":"Scopes and Fields"},{"body":"Scopes and Fields Using the Aggregation Function, the requests will be grouped by time and Group Key(s) in each scope.\nSCOPE Service This calculates the metrics data from each request of the service.\n   Name Remarks Group Key Type     name The name of the service.  string   layer Layer represents an abstract framework in the computer science, such as operation system(OS_LINUX layer), Kubernetes(k8s layer)  enum   serviceInstanceName The name of the service instance ID.  string   endpointName The name of the endpoint, such as a full path of HTTP URI.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request. Such as: Database, HTTP, RPC, gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPService This calculates the metrics data from each request of the TCP service.\n   Name Remarks Group Key Type     name The name of the service.  string   layer Layer represents an abstract framework in the computer science, such as operation system(OS_LINUX layer), Kubernetes(k8s layer)  enum   serviceInstanceName The name of the service instance ID.  string   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    SCOPE ServiceInstance This calculates the metrics data from each request of the service instance.\n   Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   endpointName The name of the endpoint, such as a full path of the HTTP URI.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPServiceInstance This calculates the metrics data from each request of the service instance.\n   Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    Secondary scopes of ServiceInstance This calculates the metrics data if the service instance is a JVM and collects through javaagent.\n SCOPE ServiceInstanceJVMCPU     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   usePercent The percentage of CPU time spent.  double    SCOPE ServiceInstanceJVMMemory     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   heapStatus Indicates whether the metric has a heap property or not.  bool   init See the JVM documentation.  long   max See the JVM documentation.  long   used See the JVM documentation.  long   committed See the JVM documentation.  long    SCOPE ServiceInstanceJVMMemoryPool     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   poolType The type may be CODE_CACHE_USAGE, NEWGEN_USAGE, OLDGEN_USAGE, SURVIVOR_USAGE, PERMGEN_USAGE, or METASPACE_USAGE based on different versions of JVM.  enum   init See the JVM documentation.  long   max See the JVM documentation.  long   used See the JVM documentation.  long   committed See the JVM documentation.  long    SCOPE ServiceInstanceJVMGC     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   phase Includes both NEW and OLD.  Enum   time The time spent in GC.  long   count The count in GC operations.  long    SCOPE ServiceInstanceJVMThread     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   liveCount The current number of live threads.  long   daemonCount The current number of daemon threads.  long   peakCount The current number of peak threads.  long   runnableStateThreadCount The current number of threads in runnable state.  long   blockedStateThreadCount The current number of threads in blocked state.  long   waitingStateThreadCount The current number of threads in waiting state.  long   timedWaitingStateThreadCount The current number of threads in time-waiting state.  long    SCOPE ServiceInstanceJVMClass     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   loadedClassCount The number of classes that are currently loaded in the JVM.  long   totalUnloadedClassCount The total number of classes unloaded since the JVM has started execution.  long   totalLoadedClassCount The total number of classes that have been loaded since the JVM has started execution.  long    SCOPE Endpoint This calculates the metrics data from each request of the endpoint in the service.\n   Name Remarks Group Key Type     name The name of the endpoint, such as a full path of the HTTP URI.  string   serviceName The name of the service.  string   serviceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  enum   serviceInstanceName The name of the service instance ID.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE ServiceRelation This calculates the metrics data from each request between services.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceLayer The layer of the source service.  enum   destServiceName The name of the destination service.  string   destServiceInstanceName The name of the destination service instance.  string   destLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination services, such as service_relation_mtls_cpm = from(ServiceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPServiceRelation This calculates the metrics data from each request between services.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceLayer The layer of the source service.  enum   destServiceName The name of the destination service.  string   destServiceInstanceName The name of the destination service instance.  string   destLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination services, such as service_relation_mtls_cpm = from(ServiceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    SCOPE ServiceInstanceRelation This calculates the metrics data from each request between service instances.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceServiceLayer The layer of the source service.  enum   destServiceName The name of the destination service.     destServiceInstanceName The name of the destination service instance.  string   destServiceLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of the component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination service instances, such as service_instance_relation_mtls_cpm = from(ServiceInstanceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPServiceInstanceRelation This calculates the metrics data from each request between service instances.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceServiceLayer The layer of the source service.  enum   destServiceName The name of the destination service.     destServiceInstanceName The name of the destination service instance.  string   destServiceLayer The layer of the destination service.  enum   componentId The ID of the component used in this call. yes string   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination service instances, such as service_instance_relation_mtls_cpm = from(ServiceInstanceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    SCOPE EndpointRelation This calculates the metrics data of the dependency between endpoints. This relation is hard to detect, and it depends on the tracing library to propagate the previous endpoint. Therefore, the EndpointRelation scope aggregation comes into effect only in services under tracing by SkyWalking native agents, including auto instrument agents (like Java and .NET), or other tracing context propagation in SkyWalking specification.\n   Name Remarks Group Key Type     endpoint The parent endpoint in the dependency.  string   serviceName The name of the service.  string   serviceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  enum   childEndpoint The endpoint used by the parent endpoint in row(1).  string   childServiceName The endpoint used by the parent service in row(1).  string   childServiceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  string   childServiceInstanceName The endpoint used by the parent service instance in row(1).  string   rpcLatency The latency of the RPC between the parent endpoint and childEndpoint, excluding the latency caused by the parent endpoint itself.     componentId The ID of the component used in this call. yes string   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Indicates where the relation is detected. The value may be client, server, or proxy. yes enum    SCOPE BrowserAppTraffic This calculates the metrics data from each request of the browser application (browser only).\n   Name Remarks Group Key Type     name The browser application name of each request.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppSingleVersionTraffic This calculates the metrics data from each request of a single version in the browser application (browser only).\n   Name Remarks Group Key Type     name The single version name of each request.  string   serviceName The name of the browser application.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppPageTraffic This calculates the metrics data from each request of the page in the browser application (browser only).\n   Name Remarks Group Key Type     name The page name of each request.  string   serviceName The name of the browser application.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppPagePerf This calculates the metrics data from each request of the page in the browser application (browser only).\n   Name Remarks Group Key Type     name The page name of each request.  string   serviceName The name of the browser application.  string   redirectTime The time taken to redirect.  int(in ms)   dnsTime The DNS query time.  int(in ms)   ttfbTime Time to first byte.  int(in ms)   tcpTime TCP connection time.  int(in ms)   transTime Content transfer time.  int(in ms)   domAnalysisTime Dom parsing time.  int(in ms)   fptTime First paint time or blank screen time.  int(in ms)   domReadyTime Dom ready time.  int(in ms)   loadPageTime Page full load time.  int(in ms)   resTime Synchronous load resources in the page.  int(in ms)   sslTime Only valid for HTTPS.  int(in ms)   ttlTime Time to interact.  int(in ms)   firstPackTime First pack time.  int(in ms)   fmpTime First Meaningful Paint.  int(in ms)    SCOPE Event This calculates the metrics data from events.\n   Name Remarks Group Key Type     name The name of the event.  string   service The service name to which the event belongs to.  string   serviceInstance The service instance to which the event belongs to, if any.  string   endpoint The service endpoint to which the event belongs to, if any.  string   type The type of the event, Normal or Error.  string   message The message of the event.  string   parameters The parameters in the message, see parameters.  string    SCOPE DatabaseAccess This calculates the metrics data from each request of database.\n   Name Remarks Group Key Type     name The service name of virtual database service.  string   databaseTypeId The ID of the component used in this call.  int   latency The time taken by each request.  int(in ms)   status Indicates the success or failure of the request.  boolean    SCOPE DatabaseSlowStatement This calculates the metrics data from slow request of database.\n   Name Remarks Group Key Type     databaseServiceId The service id of virtual cache service.  string   statement The sql statement .  string   latency The time taken by each request.  int(in ms)   traceId The traceId of this slow statement  string    SCOPE CacheAccess This calculates the metrics data from each request of cache system.\n   Name Remarks Group Key Type     name The service name of virtual cache service.  string   cacheTypeId The ID of the component used in this call.  int   latency The time taken by each request.  int(in ms)   status Indicates the success or failure of the request.  boolean   operation Indicates this access is used for write or read  string    SCOPE CacheSlowAccess This calculates the metrics data from slow request of cache system , which is used for write or read operation.\n   Name Remarks Group Key Type     cacheServiceId The service id of virtual cache service.  string   command The cache command .  string   key The cache command key.  string   latency The time taken by each request.  int(in ms)   traceId The traceId of this slow access  string   status Indicates the success or failure of the request.  boolean   operation Indicates this access is used for write or read  string    SCOPE MQAccess This calculates the service dimensional metrics data from each request of MQ system on consume/produce side\n   Name Remarks Group Key Type     name The service name , usually it\u0026rsquo;s MQ address(es)  string   transmissionLatency The latency from produce side to consume side .  int(in ms)   status Indicates the success or failure of the request.  boolean   operation Indicates this access is on Produce or Consume side  enum    SCOPE MQEndpointAccess This calculates the endpoint dimensional metrics data from each request of MQ system on consume/produce side\n   Name Remarks Group Key Type     serviceName The service name that this endpoint belongs to.  string   endpoint The endpoint name , usually it\u0026rsquo;s combined by queue,topic  string   transmissionLatency The latency from produce side to consume side .  int(in ms)   status Indicates the success or failure of the request.  boolean   operation Indicates this access is on Produce or Consume side  enum    ","excerpt":"Scopes and Fields Using the Aggregation Function, the requests will be grouped by time and Group …","ref":"/docs/main/v9.5.0/en/concepts-and-designs/scope-definitions/","title":"Scopes and Fields"},{"body":"Scopes and Fields Using the Aggregation Function, the requests will be grouped by time and Group Key(s) in each scope.\nSCOPE Service This calculates the metrics data from each request of the service.\n   Name Remarks Group Key Type     name The name of the service.  string   layer Layer represents an abstract framework in the computer science, such as operation system(OS_LINUX layer), Kubernetes(k8s layer)  enum   serviceInstanceName The name of the service instance ID.  string   endpointName The name of the endpoint, such as a full path of HTTP URI.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request. Such as: Database, HTTP, RPC, gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPService This calculates the metrics data from each request of the TCP service.\n   Name Remarks Group Key Type     name The name of the service.  string   layer Layer represents an abstract framework in the computer science, such as operation system(OS_LINUX layer), Kubernetes(k8s layer)  enum   serviceInstanceName The name of the service instance ID.  string   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    SCOPE ServiceInstance This calculates the metrics data from each request of the service instance.\n   Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   endpointName The name of the endpoint, such as a full path of the HTTP URI.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPServiceInstance This calculates the metrics data from each request of the service instance.\n   Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    Secondary scopes of ServiceInstance This calculates the metrics data if the service instance is a JVM and collects through javaagent.\n SCOPE ServiceInstanceJVMCPU     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   usePercent The percentage of CPU time spent.  double    SCOPE ServiceInstanceJVMMemory     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   heapStatus Indicates whether the metric has a heap property or not.  bool   init See the JVM documentation.  long   max See the JVM documentation.  long   used See the JVM documentation.  long   committed See the JVM documentation.  long    SCOPE ServiceInstanceJVMMemoryPool     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   poolType The type may be CODE_CACHE_USAGE, NEWGEN_USAGE, OLDGEN_USAGE, SURVIVOR_USAGE, PERMGEN_USAGE, or METASPACE_USAGE based on different versions of JVM.  enum   init See the JVM documentation.  long   max See the JVM documentation.  long   used See the JVM documentation.  long   committed See the JVM documentation.  long    SCOPE ServiceInstanceJVMGC     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   phase Includes both NEW and OLD.  Enum   time The time spent in GC.  long   count The count in GC operations.  long    SCOPE ServiceInstanceJVMThread     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   liveCount The current number of live threads.  long   daemonCount The current number of daemon threads.  long   peakCount The current number of peak threads.  long   runnableStateThreadCount The current number of threads in runnable state.  long   blockedStateThreadCount The current number of threads in blocked state.  long   waitingStateThreadCount The current number of threads in waiting state.  long   timedWaitingStateThreadCount The current number of threads in time-waiting state.  long    SCOPE ServiceInstanceJVMClass     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   loadedClassCount The number of classes that are currently loaded in the JVM.  long   totalUnloadedClassCount The total number of classes unloaded since the JVM has started execution.  long   totalLoadedClassCount The total number of classes that have been loaded since the JVM has started execution.  long    SCOPE Endpoint This calculates the metrics data from each request of the endpoint in the service.\n   Name Remarks Group Key Type     name The name of the endpoint, such as a full path of the HTTP URI.  string   serviceName The name of the service.  string   serviceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  enum   serviceInstanceName The name of the service instance ID.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE ServiceRelation This calculates the metrics data from each request between services.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceLayer The layer of the source service.  enum   destServiceName The name of the destination service.  string   destServiceInstanceName The name of the destination service instance.  string   destLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination services, such as service_relation_mtls_cpm = from(ServiceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPServiceRelation This calculates the metrics data from each request between services.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceLayer The layer of the source service.  enum   destServiceName The name of the destination service.  string   destServiceInstanceName The name of the destination service instance.  string   destLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination services, such as service_relation_mtls_cpm = from(ServiceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    SCOPE ServiceInstanceRelation This calculates the metrics data from each request between service instances.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceServiceLayer The layer of the source service.  enum   destServiceName The name of the destination service.     destServiceInstanceName The name of the destination service instance.  string   destServiceLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of the component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination service instances, such as service_instance_relation_mtls_cpm = from(ServiceInstanceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPServiceInstanceRelation This calculates the metrics data from each request between service instances.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceServiceLayer The layer of the source service.  enum   destServiceName The name of the destination service.     destServiceInstanceName The name of the destination service instance.  string   destServiceLayer The layer of the destination service.  enum   componentId The ID of the component used in this call. yes string   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination service instances, such as service_instance_relation_mtls_cpm = from(ServiceInstanceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    SCOPE EndpointRelation This calculates the metrics data of the dependency between endpoints. This relation is hard to detect, and it depends on the tracing library to propagate the previous endpoint. Therefore, the EndpointRelation scope aggregation comes into effect only in services under tracing by SkyWalking native agents, including auto instrument agents (like Java and .NET), or other tracing context propagation in SkyWalking specification.\n   Name Remarks Group Key Type     endpoint The parent endpoint in the dependency.  string   serviceName The name of the service.  string   serviceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  enum   childEndpoint The endpoint used by the parent endpoint in row(1).  string   childServiceName The endpoint used by the parent service in row(1).  string   childServiceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  string   childServiceInstanceName The endpoint used by the parent service instance in row(1).  string   rpcLatency The latency of the RPC between the parent endpoint and childEndpoint, excluding the latency caused by the parent endpoint itself.     componentId The ID of the component used in this call. yes string   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Indicates where the relation is detected. The value may be client, server, or proxy. yes enum    SCOPE BrowserAppTraffic This calculates the metrics data from each request of the browser application (browser only).\n   Name Remarks Group Key Type     name The browser application name of each request.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppSingleVersionTraffic This calculates the metrics data from each request of a single version in the browser application (browser only).\n   Name Remarks Group Key Type     name The single version name of each request.  string   serviceName The name of the browser application.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppPageTraffic This calculates the metrics data from each request of the page in the browser application (browser only).\n   Name Remarks Group Key Type     name The page name of each request.  string   serviceName The name of the browser application.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppPagePerf This calculates the metrics data from each request of the page in the browser application (browser only).\n   Name Remarks Group Key Type     name The page name of each request.  string   serviceName The name of the browser application.  string   redirectTime The time taken to redirect.  int(in ms)   dnsTime The DNS query time.  int(in ms)   ttfbTime Time to first byte.  int(in ms)   tcpTime TCP connection time.  int(in ms)   transTime Content transfer time.  int(in ms)   domAnalysisTime Dom parsing time.  int(in ms)   fptTime First paint time or blank screen time.  int(in ms)   domReadyTime Dom ready time.  int(in ms)   loadPageTime Page full load time.  int(in ms)   resTime Synchronous load resources in the page.  int(in ms)   sslTime Only valid for HTTPS.  int(in ms)   ttlTime Time to interact.  int(in ms)   firstPackTime First pack time.  int(in ms)   fmpTime First Meaningful Paint.  int(in ms)    SCOPE Event This calculates the metrics data from events.\n   Name Remarks Group Key Type     name The name of the event.  string   service The service name to which the event belongs to.  string   serviceInstance The service instance to which the event belongs to, if any.  string   endpoint The service endpoint to which the event belongs to, if any.  string   type The type of the event, Normal or Error.  string   message The message of the event.  string   parameters The parameters in the message, see parameters.  string    SCOPE DatabaseAccess This calculates the metrics data from each request of database.\n   Name Remarks Group Key Type     name The service name of virtual database service.  string   databaseTypeId The ID of the component used in this call.  int   latency The time taken by each request.  int(in ms)   status Indicates the success or failure of the request.  boolean    SCOPE DatabaseSlowStatement This calculates the metrics data from slow request of database.\n   Name Remarks Group Key Type     databaseServiceId The service id of virtual cache service.  string   statement The sql statement .  string   latency The time taken by each request.  int(in ms)   traceId The traceId of this slow statement  string    SCOPE CacheAccess This calculates the metrics data from each request of cache system.\n   Name Remarks Group Key Type     name The service name of virtual cache service.  string   cacheTypeId The ID of the component used in this call.  int   latency The time taken by each request.  int(in ms)   status Indicates the success or failure of the request.  boolean   operation Indicates this access is used for write or read  string    SCOPE CacheSlowAccess This calculates the metrics data from slow request of cache system , which is used for write or read operation.\n   Name Remarks Group Key Type     cacheServiceId The service id of virtual cache service.  string   command The cache command .  string   key The cache command key.  string   latency The time taken by each request.  int(in ms)   traceId The traceId of this slow access  string   status Indicates the success or failure of the request.  boolean   operation Indicates this access is used for write or read  string    SCOPE MQAccess This calculates the service dimensional metrics data from each request of MQ system on consume/produce side\n   Name Remarks Group Key Type     name The service name , usually it\u0026rsquo;s MQ address(es)  string   transmissionLatency The latency from produce side to consume side .  int(in ms)   status Indicates the success or failure of the request.  boolean   operation Indicates this access is on Produce or Consume side  enum    SCOPE MQEndpointAccess This calculates the endpoint dimensional metrics data from each request of MQ system on consume/produce side\n   Name Remarks Group Key Type     serviceName The service name that this endpoint belongs to.  string   endpoint The endpoint name , usually it\u0026rsquo;s combined by queue,topic  string   transmissionLatency The latency from produce side to consume side .  int(in ms)   status Indicates the success or failure of the request.  boolean   operation Indicates this access is on Produce or Consume side  enum    ","excerpt":"Scopes and Fields Using the Aggregation Function, the requests will be grouped by time and Group …","ref":"/docs/main/v9.6.0/en/concepts-and-designs/scope-definitions/","title":"Scopes and Fields"},{"body":"Scopes and Fields Using the Aggregation Function, the requests will be grouped by time and Group Key(s) in each scope.\nSCOPE Service This calculates the metrics data from each request of the service.\n   Name Remarks Group Key Type     name The name of the service.  string   layer Layer represents an abstract framework in the computer science, such as operation system(OS_LINUX layer), Kubernetes(k8s layer)  enum   serviceInstanceName The name of the service instance ID.  string   endpointName The name of the endpoint, such as a full path of HTTP URI.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request. Such as: Database, HTTP, RPC, gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPService This calculates the metrics data from each request of the TCP service.\n   Name Remarks Group Key Type     name The name of the service.  string   layer Layer represents an abstract framework in the computer science, such as operation system(OS_LINUX layer), Kubernetes(k8s layer)  enum   serviceInstanceName The name of the service instance ID.  string   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    SCOPE ServiceInstance This calculates the metrics data from each request of the service instance.\n   Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   endpointName The name of the endpoint, such as a full path of the HTTP URI.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPServiceInstance This calculates the metrics data from each request of the service instance.\n   Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    Secondary scopes of ServiceInstance This calculates the metrics data if the service instance is a JVM and collects through javaagent.\n SCOPE ServiceInstanceJVMCPU     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   usePercent The percentage of CPU time spent.  double    SCOPE ServiceInstanceJVMMemory     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   heapStatus Indicates whether the metric has a heap property or not.  bool   init See the JVM documentation.  long   max See the JVM documentation.  long   used See the JVM documentation.  long   committed See the JVM documentation.  long    SCOPE ServiceInstanceJVMMemoryPool     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   poolType The type may be CODE_CACHE_USAGE, NEWGEN_USAGE, OLDGEN_USAGE, SURVIVOR_USAGE, PERMGEN_USAGE, or METASPACE_USAGE based on different versions of JVM.  enum   init See the JVM documentation.  long   max See the JVM documentation.  long   used See the JVM documentation.  long   committed See the JVM documentation.  long    SCOPE ServiceInstanceJVMGC     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   phase Includes both NEW and OLD.  Enum   time The time spent in GC.  long   count The count in GC operations.  long    SCOPE ServiceInstanceJVMThread     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   liveCount The current number of live threads.  long   daemonCount The current number of daemon threads.  long   peakCount The current number of peak threads.  long   runnableStateThreadCount The current number of threads in runnable state.  long   blockedStateThreadCount The current number of threads in blocked state.  long   waitingStateThreadCount The current number of threads in waiting state.  long   timedWaitingStateThreadCount The current number of threads in time-waiting state.  long    SCOPE ServiceInstanceJVMClass     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   loadedClassCount The number of classes that are currently loaded in the JVM.  long   totalUnloadedClassCount The total number of classes unloaded since the JVM has started execution.  long   totalLoadedClassCount The total number of classes that have been loaded since the JVM has started execution.  long    SCOPE Endpoint This calculates the metrics data from each request of the endpoint in the service.\n   Name Remarks Group Key Type     name The name of the endpoint, such as a full path of the HTTP URI.  string   serviceName The name of the service.  string   serviceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  enum   serviceInstanceName The name of the service instance ID.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE ServiceRelation This calculates the metrics data from each request between services.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceLayer The layer of the source service.  enum   destServiceName The name of the destination service.  string   destServiceInstanceName The name of the destination service instance.  string   destLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination services, such as service_relation_mtls_cpm = from(ServiceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPServiceRelation This calculates the metrics data from each request between services.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceLayer The layer of the source service.  enum   destServiceName The name of the destination service.  string   destServiceInstanceName The name of the destination service instance.  string   destLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination services, such as service_relation_mtls_cpm = from(ServiceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    SCOPE ServiceInstanceRelation This calculates the metrics data from each request between service instances.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceServiceLayer The layer of the source service.  enum   destServiceName The name of the destination service.     destServiceInstanceName The name of the destination service instance.  string   destServiceLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of the component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination service instances, such as service_instance_relation_mtls_cpm = from(ServiceInstanceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPServiceInstanceRelation This calculates the metrics data from each request between service instances.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceServiceLayer The layer of the source service.  enum   destServiceName The name of the destination service.     destServiceInstanceName The name of the destination service instance.  string   destServiceLayer The layer of the destination service.  enum   componentId The ID of the component used in this call. yes string   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination service instances, such as service_instance_relation_mtls_cpm = from(ServiceInstanceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    SCOPE EndpointRelation This calculates the metrics data of the dependency between endpoints. This relation is hard to detect, and it depends on the tracing library to propagate the previous endpoint. Therefore, the EndpointRelation scope aggregation comes into effect only in services under tracing by SkyWalking native agents, including auto instrument agents (like Java and .NET), or other tracing context propagation in SkyWalking specification.\n   Name Remarks Group Key Type     endpoint The parent endpoint in the dependency.  string   serviceName The name of the service.  string   serviceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  enum   childEndpoint The endpoint used by the parent endpoint in row(1).  string   childServiceName The endpoint used by the parent service in row(1).  string   childServiceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  string   childServiceInstanceName The endpoint used by the parent service instance in row(1).  string   rpcLatency The latency of the RPC between the parent endpoint and childEndpoint, excluding the latency caused by the parent endpoint itself.     componentId The ID of the component used in this call. yes string   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Indicates where the relation is detected. The value may be client, server, or proxy. yes enum    SCOPE BrowserAppTraffic This calculates the metrics data from each request of the browser application (browser only).\n   Name Remarks Group Key Type     name The browser application name of each request.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppSingleVersionTraffic This calculates the metrics data from each request of a single version in the browser application (browser only).\n   Name Remarks Group Key Type     name The single version name of each request.  string   serviceName The name of the browser application.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppPageTraffic This calculates the metrics data from each request of the page in the browser application (browser only).\n   Name Remarks Group Key Type     name The page name of each request.  string   serviceName The name of the browser application.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppPagePerf This calculates the metrics data from each request of the page in the browser application (browser only).\n   Name Remarks Group Key Type     name The page name of each request.  string   serviceName The name of the browser application.  string   redirectTime The time taken to redirect.  int(in ms)   dnsTime The DNS query time.  int(in ms)   ttfbTime Time to first byte.  int(in ms)   tcpTime TCP connection time.  int(in ms)   transTime Content transfer time.  int(in ms)   domAnalysisTime Dom parsing time.  int(in ms)   fptTime First paint time or blank screen time.  int(in ms)   domReadyTime Dom ready time.  int(in ms)   loadPageTime Page full load time.  int(in ms)   resTime Synchronous load resources in the page.  int(in ms)   sslTime Only valid for HTTPS.  int(in ms)   ttlTime Time to interact.  int(in ms)   firstPackTime First pack time.  int(in ms)   fmpTime First Meaningful Paint.  int(in ms)    SCOPE Event This calculates the metrics data from events.\n   Name Remarks Group Key Type     name The name of the event.  string   service The service name to which the event belongs to.  string   serviceInstance The service instance to which the event belongs to, if any.  string   endpoint The service endpoint to which the event belongs to, if any.  string   type The type of the event, Normal or Error.  string   message The message of the event.  string   parameters The parameters in the message, see parameters.  string    SCOPE DatabaseAccess This calculates the metrics data from each request of database.\n   Name Remarks Group Key Type     name The service name of virtual database service.  string   databaseTypeId The ID of the component used in this call.  int   latency The time taken by each request.  int(in ms)   status Indicates the success or failure of the request.  boolean    SCOPE DatabaseSlowStatement This calculates the metrics data from slow request of database.\n   Name Remarks Group Key Type     databaseServiceId The service id of virtual cache service.  string   statement The sql statement .  string   latency The time taken by each request.  int(in ms)   traceId The traceId of this slow statement  string    SCOPE CacheAccess This calculates the metrics data from each request of cache system.\n   Name Remarks Group Key Type     name The service name of virtual cache service.  string   cacheTypeId The ID of the component used in this call.  int   latency The time taken by each request.  int(in ms)   status Indicates the success or failure of the request.  boolean   operation Indicates this access is used for write or read  string    SCOPE CacheSlowAccess This calculates the metrics data from slow request of cache system , which is used for write or read operation.\n   Name Remarks Group Key Type     cacheServiceId The service id of virtual cache service.  string   command The cache command .  string   key The cache command key.  string   latency The time taken by each request.  int(in ms)   traceId The traceId of this slow access  string   status Indicates the success or failure of the request.  boolean   operation Indicates this access is used for write or read  string    SCOPE MQAccess This calculates the service dimensional metrics data from each request of MQ system on consume/produce side\n   Name Remarks Group Key Type     name The service name , usually it\u0026rsquo;s MQ address(es)  string   transmissionLatency The latency from produce side to consume side .  int(in ms)   status Indicates the success or failure of the request.  boolean   operation Indicates this access is on Produce or Consume side  enum    SCOPE MQEndpointAccess This calculates the endpoint dimensional metrics data from each request of MQ system on consume/produce side\n   Name Remarks Group Key Type     serviceName The service name that this endpoint belongs to.  string   endpoint The endpoint name , usually it\u0026rsquo;s combined by queue,topic  string   transmissionLatency The latency from produce side to consume side .  int(in ms)   status Indicates the success or failure of the request.  boolean   operation Indicates this access is on Produce or Consume side  enum    ","excerpt":"Scopes and Fields Using the Aggregation Function, the requests will be grouped by time and Group …","ref":"/docs/main/v9.7.0/en/concepts-and-designs/scope-definitions/","title":"Scopes and Fields"},{"body":"Scratch The OAP Config Dump SkyWalking OAP behaviors could be controlled through hundreds of configurations. It is hard to know what is the final configuration as all the configurations could be overrided by system environments.\nThe core config file application.yml lists all the configurations and their default values. However, it is still hard to know the runtime value.\nScratch is a tool to dump the final configuration. It is provided within OAP rest server, which could be accessed through HTTP GET http://{core restHost}:{core restPort}/debugging/config/dump.\n\u0026gt; curl http://127.0.0.1:12800/debugging/config/dump cluster.provider=standalone core.provider=default core.default.prepareThreads=2 core.default.restHost=0.0.0.0 core.default.searchableLogsTags=level,http.status_code core.default.role=Mixed core.default.persistentPeriod=25 core.default.syncPeriodHttpUriRecognitionPattern=10 core.default.restIdleTimeOut=30000 core.default.dataKeeperExecutePeriod=5 core.default.topNReportPeriod=10 core.default.gRPCSslTrustedCAPath= core.default.downsampling=[Hour, Day] core.default.serviceNameMaxLength=70 core.default.gRPCSslEnabled=false core.default.restPort=12800 core.default.serviceCacheRefreshInterval=10 ... All booting configurations with their runtime values are listed, including the selected provider for each module.\nProtect The Secrets Some of the configurations contain sensitive values, such as username, password, token, etc. These values would be masked in the dump result. For example, the storage.elasticsearch.password in the following configurations,\nstorage:selector:${SW_STORAGE:h2}elasticsearch:password:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}It would be masked and shown as ******** in the dump result.\n\u0026gt; curl http://127.0.0.1:12800/debugging/config/dump ... storage.elasticsearch.password=******** ... By default, we mask the config keys through the following configurations.\n# Include the list of keywords to filter configurations including secrets. Separate keywords by a comma.keywords4MaskingSecretsOfConfig:${SW_DEBUGGING_QUERY_KEYWORDS_FOR_MASKING_SECRETS:user,password,token,accessKey,secretKey,authentication}Disable The Config Dump Service By default, this service is open for helping users to debug and diagnose. If you want to disable it, you need to diable the whole debugging-query module through setting selector=-.\ndebugging-query:selector:${SW_DEBUGGING_QUERY:-}","excerpt":"Scratch The OAP Config Dump SkyWalking OAP behaviors could be controlled through hundreds of …","ref":"/docs/main/latest/en/debugging/config_dump/","title":"Scratch The OAP Config Dump"},{"body":"Scratch The OAP Config Dump SkyWalking OAP behaviors could be controlled through hundreds of configurations. It is hard to know what is the final configuration as all the configurations could be overrided by system environments.\nThe core config file application.yml lists all the configurations and their default values. However, it is still hard to know the runtime value.\nScratch is a tool to dump the final configuration. It is provided within OAP rest server, which could be accessed through HTTP GET http://{core restHost}:{core restPort}/debugging/config/dump.\n\u0026gt; curl http://127.0.0.1:12800/debugging/config/dump cluster.provider=standalone core.provider=default core.default.prepareThreads=2 core.default.restHost=0.0.0.0 core.default.searchableLogsTags=level,http.status_code core.default.role=Mixed core.default.persistentPeriod=25 core.default.syncPeriodHttpUriRecognitionPattern=10 core.default.restIdleTimeOut=30000 core.default.dataKeeperExecutePeriod=5 core.default.topNReportPeriod=10 core.default.gRPCSslTrustedCAPath= core.default.downsampling=[Hour, Day] core.default.serviceNameMaxLength=70 core.default.gRPCSslEnabled=false core.default.restPort=12800 core.default.serviceCacheRefreshInterval=10 ... All booting configurations with their runtime values are listed, including the selected provider for each module.\nProtect The Secrets Some of the configurations contain sensitive values, such as username, password, token, etc. These values would be masked in the dump result. For example, the storage.elasticsearch.password in the following configurations,\nstorage:selector:${SW_STORAGE:h2}elasticsearch:password:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}It would be masked and shown as ******** in the dump result.\n\u0026gt; curl http://127.0.0.1:12800/debugging/config/dump ... storage.elasticsearch.password=******** ... By default, we mask the config keys through the following configurations.\n# Include the list of keywords to filter configurations including secrets. Separate keywords by a comma.keywords4MaskingSecretsOfConfig:${SW_DEBUGGING_QUERY_KEYWORDS_FOR_MASKING_SECRETS:user,password,token,accessKey,secretKey,authentication}Disable The Config Dump Service By default, this service is open for helping users to debug and diagnose. If you want to disable it, you need to diable the whole debugging-query module through setting selector=-.\ndebugging-query:selector:${SW_DEBUGGING_QUERY:-}","excerpt":"Scratch The OAP Config Dump SkyWalking OAP behaviors could be controlled through hundreds of …","ref":"/docs/main/next/en/debugging/config_dump/","title":"Scratch The OAP Config Dump"},{"body":"Scratch The OAP Config Dump SkyWalking OAP behaviors could be controlled through hundreds of configurations. It is hard to know what is the final configuration as all the configurations could be overrided by system environments.\nThe core config file application.yml lists all the configurations and their default values. However, it is still hard to know the runtime value.\nScratch is a tool to dump the final configuration. It is provided within OAP rest server, which could be accessed through HTTP GET http://{core restHost}:{core restPort}/debugging/config/dump.\n\u0026gt; curl http://127.0.0.1:12800/debugging/config/dump cluster.provider=standalone core.provider=default core.default.prepareThreads=2 core.default.restHost=0.0.0.0 core.default.searchableLogsTags=level,http.status_code core.default.role=Mixed core.default.persistentPeriod=25 core.default.syncPeriodHttpUriRecognitionPattern=10 core.default.restIdleTimeOut=30000 core.default.dataKeeperExecutePeriod=5 core.default.topNReportPeriod=10 core.default.gRPCSslTrustedCAPath= core.default.downsampling=[Hour, Day] core.default.serviceNameMaxLength=70 core.default.gRPCSslEnabled=false core.default.restPort=12800 core.default.serviceCacheRefreshInterval=10 ... All booting configurations with their runtime values are listed, including the selected provider for each module.\nProtect The Secrets Some of the configurations contain sensitive values, such as username, password, token, etc. These values would be masked in the dump result. For example, the storage.elasticsearch.password in the following configurations,\nstorage:selector:${SW_STORAGE:h2}elasticsearch:password:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}It would be masked and shown as ******** in the dump result.\n\u0026gt; curl http://127.0.0.1:12800/debugging/config/dump ... storage.elasticsearch.password=******** ... By default, we mask the config keys through the following configurations.\n# Include the list of keywords to filter configurations including secrets. Separate keywords by a comma.keywords4MaskingSecretsOfConfig:${SW_DEBUGGING_QUERY_KEYWORDS_FOR_MASKING_SECRETS:user,password,token,accessKey,secretKey,authentication}Disable The Config Dump Service By default, this service is open for helping users to debug and diagnose. If you want to disable it, you need to diable the whole debugging-query module through setting selector=-.\ndebugging-query:selector:${SW_DEBUGGING_QUERY:-}","excerpt":"Scratch The OAP Config Dump SkyWalking OAP behaviors could be controlled through hundreds of …","ref":"/docs/main/v9.7.0/en/debugging/config_dump/","title":"Scratch The OAP Config Dump"},{"body":"","excerpt":"","ref":"/search/","title":"Search Results"},{"body":"Security Notice The SkyWalking OAP server, UI, and agent deployments should run in a secure environment, such as only inside your data center. OAP server, UI, and agent deployments should only be reachable by the operation team on default deployment.\nAll telemetry data are trusted. The OAP server would not validate any field of the telemetry data to avoid extra load for the server.\nIt is up to the operator(OPS team) whether to expose the OAP server, UI, or some agent deployment to unsecured environment. The following security policies should be considered to add to secure your SkyWalking deployment.\n HTTPs and gRPC+TLS should be used between agents and OAP servers, as well as UI. Set up TOKEN or username/password based authentications for the OAP server and UI through your Gateway. Validate all fields of the traceable RPC(including HTTP 1/2, MQ) headers(header names are sw8, sw8-x and sw8-correlation) when requests are from out of the trusted zone. Or simply block/remove those headers unless you are using the client-js agent. All fields of telemetry data(HTTP in raw text or encoded Protobuf format) should be validated and reject malicious data.  Without these protections, an attacker could embed executable Javascript code in those fields, causing XSS or even Remote Code Execution (RCE) issues.\nFor some sensitive environment, consider to limit the telemetry report frequency in case of DoS/DDoS for exposed OAP and UI services.\nappendix The SkyWalking client-js agent is always running out of the secured environment. Please follow its security notice for more details.\n","excerpt":"Security Notice The SkyWalking OAP server, UI, and agent deployments should run in a secure …","ref":"/docs/main/latest/en/security/readme/","title":"Security Notice"},{"body":"Security Notice The SkyWalking OAP server, UI, and agent deployments should run in a secure environment, such as only inside your data center. OAP server, UI, and agent deployments should only be reachable by the operation team on default deployment.\nAll telemetry data are trusted. The OAP server would not validate any field of the telemetry data to avoid extra load for the server.\nIt is up to the operator(OPS team) whether to expose the OAP server, UI, or some agent deployment to unsecured environment. The following security policies should be considered to add to secure your SkyWalking deployment.\n HTTPs and gRPC+TLS should be used between agents and OAP servers, as well as UI. Set up TOKEN or username/password based authentications for the OAP server and UI through your Gateway. Validate all fields of the traceable RPC(including HTTP 1/2, MQ) headers(header names are sw8, sw8-x and sw8-correlation) when requests are from out of the trusted zone. Or simply block/remove those headers unless you are using the client-js agent. All fields of telemetry data(HTTP in raw text or encoded Protobuf format) should be validated and reject malicious data.  Without these protections, an attacker could embed executable Javascript code in those fields, causing XSS or even Remote Code Execution (RCE) issues.\nFor some sensitive environment, consider to limit the telemetry report frequency in case of DoS/DDoS for exposed OAP and UI services.\nappendix The SkyWalking client-js agent is always running out of the secured environment. Please follow its security notice for more details.\n","excerpt":"Security Notice The SkyWalking OAP server, UI, and agent deployments should run in a secure …","ref":"/docs/main/next/en/security/readme/","title":"Security Notice"},{"body":"Security Notice The SkyWalking OAP server, UI, and agent deployments should run in a secure environment, such as only inside your data center. OAP server, UI, and agent deployments should only be reachable by the operation team on default deployment.\nAll telemetry data are trusted. The OAP server would not validate any field of the telemetry data to avoid extra load for the server.\nIt is up to the operator(OPS team) whether to expose the OAP server, UI, or some agent deployment to unsecured environment. The following security policies should be considered to add to secure your SkyWalking deployment.\n HTTPs and gRPC+TLS should be used between agents and OAP servers, as well as UI. Set up TOKEN or username/password based authentications for the OAP server and UI through your Gateway. Validate all fields of the traceable RPC(including HTTP 1/2, MQ) headers(header names are sw8, sw8-x and sw8-correlation) when requests are from out of the trusted zone. Or simply block/remove those headers unless you are using the client-js agent. All fields of telemetry data(HTTP in raw text or encoded Protobuf format) should be validated and reject malicious data.  Without these protections, an attacker could embed executable Javascript code in those fields, causing XSS or even Remote Code Execution (RCE) issues.\nFor some sensitive environment, consider to limit the telemetry report frequency in case of DoS/DDoS for exposed OAP and UI services.\nappendix The SkyWalking client-js agent is always running out of the secured environment. Please follow its security notice for more details.\n","excerpt":"Security Notice The SkyWalking OAP server, UI, and agent deployments should run in a secure …","ref":"/docs/main/v9.3.0/en/security/readme/","title":"Security Notice"},{"body":"Security Notice The SkyWalking OAP server, UI, and agent deployments should run in a secure environment, such as only inside your data center. OAP server, UI, and agent deployments should only be reachable by the operation team on default deployment.\nAll telemetry data are trusted. The OAP server would not validate any field of the telemetry data to avoid extra load for the server.\nIt is up to the operator(OPS team) whether to expose the OAP server, UI, or some agent deployment to unsecured environment. The following security policies should be considered to add to secure your SkyWalking deployment.\n HTTPs and gRPC+TLS should be used between agents and OAP servers, as well as UI. Set up TOKEN or username/password based authentications for the OAP server and UI through your Gateway. Validate all fields of the traceable RPC(including HTTP 1/2, MQ) headers(header names are sw8, sw8-x and sw8-correlation) when requests are from out of the trusted zone. Or simply block/remove those headers unless you are using the client-js agent. All fields of telemetry data(HTTP in raw text or encoded Protobuf format) should be validated and reject malicious data.  Without these protections, an attacker could embed executable Javascript code in those fields, causing XSS or even Remote Code Execution (RCE) issues.\nFor some sensitive environment, consider to limit the telemetry report frequency in case of DoS/DDoS for exposed OAP and UI services.\nappendix The SkyWalking client-js agent is always running out of the secured environment. Please follow its security notice for more details.\n","excerpt":"Security Notice The SkyWalking OAP server, UI, and agent deployments should run in a secure …","ref":"/docs/main/v9.4.0/en/security/readme/","title":"Security Notice"},{"body":"Security Notice The SkyWalking OAP server, UI, and agent deployments should run in a secure environment, such as only inside your data center. OAP server, UI, and agent deployments should only be reachable by the operation team on default deployment.\nAll telemetry data are trusted. The OAP server would not validate any field of the telemetry data to avoid extra load for the server.\nIt is up to the operator(OPS team) whether to expose the OAP server, UI, or some agent deployment to unsecured environment. The following security policies should be considered to add to secure your SkyWalking deployment.\n HTTPs and gRPC+TLS should be used between agents and OAP servers, as well as UI. Set up TOKEN or username/password based authentications for the OAP server and UI through your Gateway. Validate all fields of the traceable RPC(including HTTP 1/2, MQ) headers(header names are sw8, sw8-x and sw8-correlation) when requests are from out of the trusted zone. Or simply block/remove those headers unless you are using the client-js agent. All fields of telemetry data(HTTP in raw text or encoded Protobuf format) should be validated and reject malicious data.  Without these protections, an attacker could embed executable Javascript code in those fields, causing XSS or even Remote Code Execution (RCE) issues.\nFor some sensitive environment, consider to limit the telemetry report frequency in case of DoS/DDoS for exposed OAP and UI services.\nappendix The SkyWalking client-js agent is always running out of the secured environment. Please follow its security notice for more details.\n","excerpt":"Security Notice The SkyWalking OAP server, UI, and agent deployments should run in a secure …","ref":"/docs/main/v9.5.0/en/security/readme/","title":"Security Notice"},{"body":"Security Notice The SkyWalking OAP server, UI, and agent deployments should run in a secure environment, such as only inside your data center. OAP server, UI, and agent deployments should only be reachable by the operation team on default deployment.\nAll telemetry data are trusted. The OAP server would not validate any field of the telemetry data to avoid extra load for the server.\nIt is up to the operator(OPS team) whether to expose the OAP server, UI, or some agent deployment to unsecured environment. The following security policies should be considered to add to secure your SkyWalking deployment.\n HTTPs and gRPC+TLS should be used between agents and OAP servers, as well as UI. Set up TOKEN or username/password based authentications for the OAP server and UI through your Gateway. Validate all fields of the traceable RPC(including HTTP 1/2, MQ) headers(header names are sw8, sw8-x and sw8-correlation) when requests are from out of the trusted zone. Or simply block/remove those headers unless you are using the client-js agent. All fields of telemetry data(HTTP in raw text or encoded Protobuf format) should be validated and reject malicious data.  Without these protections, an attacker could embed executable Javascript code in those fields, causing XSS or even Remote Code Execution (RCE) issues.\nFor some sensitive environment, consider to limit the telemetry report frequency in case of DoS/DDoS for exposed OAP and UI services.\nappendix The SkyWalking client-js agent is always running out of the secured environment. Please follow its security notice for more details.\n","excerpt":"Security Notice The SkyWalking OAP server, UI, and agent deployments should run in a secure …","ref":"/docs/main/v9.6.0/en/security/readme/","title":"Security Notice"},{"body":"Security Notice The SkyWalking OAP server, UI, and agent deployments should run in a secure environment, such as only inside your data center. OAP server, UI, and agent deployments should only be reachable by the operation team on default deployment.\nAll telemetry data are trusted. The OAP server would not validate any field of the telemetry data to avoid extra load for the server.\nIt is up to the operator(OPS team) whether to expose the OAP server, UI, or some agent deployment to unsecured environment. The following security policies should be considered to add to secure your SkyWalking deployment.\n HTTPs and gRPC+TLS should be used between agents and OAP servers, as well as UI. Set up TOKEN or username/password based authentications for the OAP server and UI through your Gateway. Validate all fields of the traceable RPC(including HTTP 1/2, MQ) headers(header names are sw8, sw8-x and sw8-correlation) when requests are from out of the trusted zone. Or simply block/remove those headers unless you are using the client-js agent. All fields of telemetry data(HTTP in raw text or encoded Protobuf format) should be validated and reject malicious data.  Without these protections, an attacker could embed executable Javascript code in those fields, causing XSS or even Remote Code Execution (RCE) issues.\nFor some sensitive environment, consider to limit the telemetry report frequency in case of DoS/DDoS for exposed OAP and UI services.\nappendix The SkyWalking client-js agent is always running out of the secured environment. Please follow its security notice for more details.\n","excerpt":"Security Notice The SkyWalking OAP server, UI, and agent deployments should run in a secure …","ref":"/docs/main/v9.7.0/en/security/readme/","title":"Security Notice"},{"body":"Send Envoy metrics to SkyWalking with / without Istio Envoy defines a gRPC service to emit metrics, and whatever is used to implement this protocol can be used to receive the metrics. SkyWalking has a built-in receiver that implements this protocol, so you can configure Envoy to emit its metrics to SkyWalking.\nAs an APM system, SkyWalking does not only receive and store the metrics emitted by Envoy, but it also analyzes the topology of services and service instances.\nAttention: There are two versions of Envoy metrics service protocol currently: v2 and v3. SkyWalking (8.3.0+) supports both of them.\nConfigure Envoy to send metrics to SkyWalking without Istio Envoy can be used with / without Istio. This section explains how you can configure the standalone Envoy to send metrics to SkyWalking.\nIn order to let Envoy send metrics to SkyWalking, we need to feed Envoy with a configuration that contains stats_sinks, which in turn includes envoy.metrics_service. This envoy.metrics_service should be configured as a config.grpc_service entry.\nThe noteworthy parts of the config are shown below:\nstats_sinks:- name:envoy.metrics_serviceconfig:grpc_service:# Note: we can use google_grpc implementation as well.envoy_grpc:cluster_name:service_skywalkingstatic_resources:...clusters:- name:service_skywalkingconnect_timeout:5stype:LOGICAL_DNShttp2_protocol_options:{}dns_lookup_family:V4_ONLYlb_policy:ROUND_ROBINload_assignment:cluster_name:service_skywalkingendpoints:- lb_endpoints:- endpoint:address:socket_address:address:skywalking# This is the port where SkyWalking serving the Envoy Metrics Service gRPC stream.port_value:11800The comprehensive static configuration can be found here.\nNote that Envoy can also be configured dynamically through xDS Protocol.\nAs mentioned above, SkyWalking also builds the topology of services from the metrics, since Envoy also carries service metadata along with the metrics. To feed Envoy such metadata, see the other part of the configuration as follows:\nnode:# ... other configsmetadata:LABELS:app:test-appNAME:service-instance-nameConfigure Envoy to send metrics to SkyWalking with Istio Typically, Envoy can also be used with Istio. In this case, the configurations are much simpler because Istio composes the configurations for you and sends them to Envoy via xDS Protocol. Istio also automatically injects the metadata, such as service name and instance name, into the bootstrap configurations.\nEmitting the metrics to SkyWalking is as simple as adding the option --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; to Istio install command, like this:\nistioctl install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*\u0026#39; If you already have Istio installed, you can use the following command to apply the config without re-installing Istio:\nistioctl manifest install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*\u0026#39; Note: proxyStatsMatcher is only supported by Istio 1.8+. We recommend using inclusionRegexps to reserve specific metrics which need to be analyzed, in order to reduce memory usage and avoid CPU overhead. For example, OAP uses these metrics:\nistioctl manifest install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*membership_healthy.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[1]=.*upstream_cx_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[2]=.*upstream_cx_total.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[3]=.*upstream_rq_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[4]=.*upstream_rq_total.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[5]=.*upstream_rq_pending_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[6]=.*lb_healthy_panic.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[7]=.*upstream_cx_none_healthy.*\u0026#39; Metrics data Some Envoy statistics are listed here. Sample data that contain identifiers can be found here, while the metrics can be found here.\n","excerpt":"Send Envoy metrics to SkyWalking with / without Istio Envoy defines a gRPC service to emit metrics, …","ref":"/docs/main/v9.0.0/en/setup/envoy/metrics_service_setting/","title":"Send Envoy metrics to SkyWalking with / without Istio"},{"body":"Send Envoy metrics to SkyWalking with/without Istio Envoy defines a gRPC service to emit metrics, and whatever is used to implement this protocol can be used to receive the metrics. SkyWalking has a built-in receiver that implements this protocol, so you can configure Envoy to emit its metrics to SkyWalking.\nAs an APM system, SkyWalking not only receives and stores the metrics emitted by Envoy but also analyzes the topology of services and service instances.\nAttention: There are two versions of the Envoy metrics service protocol currently: v2 and v3. SkyWalking (8.3.0+) supports both of them.\nConfigure Envoy to send metrics to SkyWalking without Istio Envoy can be used with/without Istio. This section explains how you can configure the standalone Envoy to send metrics to SkyWalking.\nTo let Envoy send metrics to SkyWalking, we need to feed Envoy with a configuration that contains stats_sinks, which in turn includes envoy.metrics_service. This envoy.metrics_service should be configured as a config.grpc_service entry.\nThe noteworthy parts of the config are shown below:\nstats_sinks:- name:envoy.metrics_serviceconfig:grpc_service:# Note: we can use google_grpc implementation as well.envoy_grpc:cluster_name:service_skywalkingstatic_resources:...clusters:- name:service_skywalkingconnect_timeout:5stype:LOGICAL_DNShttp2_protocol_options:{}dns_lookup_family:V4_ONLYlb_policy:ROUND_ROBINload_assignment:cluster_name:service_skywalkingendpoints:- lb_endpoints:- endpoint:address:socket_address:address:skywalking# This is the port where SkyWalking serving the Envoy Metrics Service gRPC stream.port_value:11800The comprehensive static configuration can be found here.\nNote that Envoy can also be configured dynamically through xDS Protocol.\nAs mentioned above, SkyWalking also builds the topology of services from the metrics since Envoy also carries service metadata along with the metrics. To feed Envoy such metadata, see the other part of the configuration as follows:\nnode:# ... other configsmetadata:LABELS:app:test-appNAME:service-instance-nameConfigure Envoy to send metrics to SkyWalking with Istio Typically, Envoy can also be used with Istio. In this case, the configurations are much simpler because Istio composes the configurations for you and sends them to Envoy via xDS Protocol. Istio also automatically injects the metadata, such as service name and instance name, into the bootstrap configurations.\nEmitting the metrics to SkyWalking is as simple as adding the option --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; to Istio install command, like this:\nistioctl install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*\u0026#39; If you already have Istio installed, you can use the following command to apply the config without re-installing Istio:\nistioctl manifest install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*\u0026#39; Note: proxyStatsMatcher is only supported by Istio 1.8+. We recommend using inclusionRegexps to reserve specific metrics that need to be analyzed to reduce memory usage and avoid CPU overhead. For example, OAP uses these metrics:\nistioctl manifest install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*membership_healthy.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[1]=.*upstream_cx_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[2]=.*upstream_cx_total.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[3]=.*upstream_rq_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[4]=.*upstream_rq_total.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[5]=.*upstream_rq_pending_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[6]=.*lb_healthy_panic.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[7]=.*upstream_cx_none_healthy.*\u0026#39; Metrics data Some Envoy statistics are listed here. Sample data that contain identifiers can be found here, while the metrics can be found here.\nNetwork Monitoring SkyWalking supports network monitoring of the data plane in the Service Mesh. Read this documentation for learn more.\n","excerpt":"Send Envoy metrics to SkyWalking with/without Istio Envoy defines a gRPC service to emit metrics, …","ref":"/docs/main/latest/en/setup/envoy/metrics_service_setting/","title":"Send Envoy metrics to SkyWalking with/without Istio"},{"body":"Send Envoy metrics to SkyWalking with/without Istio Envoy defines a gRPC service to emit metrics, and whatever is used to implement this protocol can be used to receive the metrics. SkyWalking has a built-in receiver that implements this protocol, so you can configure Envoy to emit its metrics to SkyWalking.\nAs an APM system, SkyWalking not only receives and stores the metrics emitted by Envoy but also analyzes the topology of services and service instances.\nAttention: There are two versions of the Envoy metrics service protocol currently: v2 and v3. SkyWalking (8.3.0+) supports both of them.\nConfigure Envoy to send metrics to SkyWalking without Istio Envoy can be used with/without Istio. This section explains how you can configure the standalone Envoy to send metrics to SkyWalking.\nTo let Envoy send metrics to SkyWalking, we need to feed Envoy with a configuration that contains stats_sinks, which in turn includes envoy.metrics_service. This envoy.metrics_service should be configured as a config.grpc_service entry.\nThe noteworthy parts of the config are shown below:\nstats_sinks:- name:envoy.metrics_serviceconfig:grpc_service:# Note: we can use google_grpc implementation as well.envoy_grpc:cluster_name:service_skywalkingstatic_resources:...clusters:- name:service_skywalkingconnect_timeout:5stype:LOGICAL_DNShttp2_protocol_options:{}dns_lookup_family:V4_ONLYlb_policy:ROUND_ROBINload_assignment:cluster_name:service_skywalkingendpoints:- lb_endpoints:- endpoint:address:socket_address:address:skywalking# This is the port where SkyWalking serving the Envoy Metrics Service gRPC stream.port_value:11800The comprehensive static configuration can be found here.\nNote that Envoy can also be configured dynamically through xDS Protocol.\nAs mentioned above, SkyWalking also builds the topology of services from the metrics since Envoy also carries service metadata along with the metrics. To feed Envoy such metadata, see the other part of the configuration as follows:\nnode:# ... other configsmetadata:LABELS:app:test-appNAME:service-instance-nameConfigure Envoy to send metrics to SkyWalking with Istio Typically, Envoy can also be used with Istio. In this case, the configurations are much simpler because Istio composes the configurations for you and sends them to Envoy via xDS Protocol. Istio also automatically injects the metadata, such as service name and instance name, into the bootstrap configurations.\nEmitting the metrics to SkyWalking is as simple as adding the option --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; to Istio install command, like this:\nistioctl install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*\u0026#39; If you already have Istio installed, you can use the following command to apply the config without re-installing Istio:\nistioctl manifest install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*\u0026#39; Note: proxyStatsMatcher is only supported by Istio 1.8+. We recommend using inclusionRegexps to reserve specific metrics that need to be analyzed to reduce memory usage and avoid CPU overhead. For example, OAP uses these metrics:\nistioctl manifest install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*membership_healthy.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[1]=.*upstream_cx_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[2]=.*upstream_cx_total.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[3]=.*upstream_rq_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[4]=.*upstream_rq_total.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[5]=.*upstream_rq_pending_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[6]=.*lb_healthy_panic.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[7]=.*upstream_cx_none_healthy.*\u0026#39; Metrics data Some Envoy statistics are listed here. Sample data that contain identifiers can be found here, while the metrics can be found here.\nNetwork Monitoring SkyWalking supports network monitoring of the data plane in the Service Mesh. Read this documentation for learn more.\n","excerpt":"Send Envoy metrics to SkyWalking with/without Istio Envoy defines a gRPC service to emit metrics, …","ref":"/docs/main/next/en/setup/envoy/metrics_service_setting/","title":"Send Envoy metrics to SkyWalking with/without Istio"},{"body":"Send Envoy metrics to SkyWalking with/without Istio Envoy defines a gRPC service to emit metrics, and whatever is used to implement this protocol can be used to receive the metrics. SkyWalking has a built-in receiver that implements this protocol, so you can configure Envoy to emit its metrics to SkyWalking.\nAs an APM system, SkyWalking not only receives and stores the metrics emitted by Envoy but also analyzes the topology of services and service instances.\nAttention: There are two versions of the Envoy metrics service protocol currently: v2 and v3. SkyWalking (8.3.0+) supports both of them.\nConfigure Envoy to send metrics to SkyWalking without Istio Envoy can be used with/without Istio. This section explains how you can configure the standalone Envoy to send metrics to SkyWalking.\nTo let Envoy send metrics to SkyWalking, we need to feed Envoy with a configuration that contains stats_sinks, which in turn includes envoy.metrics_service. This envoy.metrics_service should be configured as a config.grpc_service entry.\nThe noteworthy parts of the config are shown below:\nstats_sinks:- name:envoy.metrics_serviceconfig:grpc_service:# Note: we can use google_grpc implementation as well.envoy_grpc:cluster_name:service_skywalkingstatic_resources:...clusters:- name:service_skywalkingconnect_timeout:5stype:LOGICAL_DNShttp2_protocol_options:{}dns_lookup_family:V4_ONLYlb_policy:ROUND_ROBINload_assignment:cluster_name:service_skywalkingendpoints:- lb_endpoints:- endpoint:address:socket_address:address:skywalking# This is the port where SkyWalking serving the Envoy Metrics Service gRPC stream.port_value:11800The comprehensive static configuration can be found here.\nNote that Envoy can also be configured dynamically through xDS Protocol.\nAs mentioned above, SkyWalking also builds the topology of services from the metrics since Envoy also carries service metadata along with the metrics. To feed Envoy such metadata, see the other part of the configuration as follows:\nnode:# ... other configsmetadata:LABELS:app:test-appNAME:service-instance-nameConfigure Envoy to send metrics to SkyWalking with Istio Typically, Envoy can also be used with Istio. In this case, the configurations are much simpler because Istio composes the configurations for you and sends them to Envoy via xDS Protocol. Istio also automatically injects the metadata, such as service name and instance name, into the bootstrap configurations.\nEmitting the metrics to SkyWalking is as simple as adding the option --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; to Istio install command, like this:\nistioctl install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*\u0026#39; If you already have Istio installed, you can use the following command to apply the config without re-installing Istio:\nistioctl manifest install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*\u0026#39; Note: proxyStatsMatcher is only supported by Istio 1.8+. We recommend using inclusionRegexps to reserve specific metrics that need to be analyzed to reduce memory usage and avoid CPU overhead. For example, OAP uses these metrics:\nistioctl manifest install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*membership_healthy.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[1]=.*upstream_cx_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[2]=.*upstream_cx_total.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[3]=.*upstream_rq_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[4]=.*upstream_rq_total.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[5]=.*upstream_rq_pending_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[6]=.*lb_healthy_panic.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[7]=.*upstream_cx_none_healthy.*\u0026#39; Metrics data Some Envoy statistics are listed here. Sample data that contain identifiers can be found here, while the metrics can be found here.\n","excerpt":"Send Envoy metrics to SkyWalking with/without Istio Envoy defines a gRPC service to emit metrics, …","ref":"/docs/main/v9.1.0/en/setup/envoy/metrics_service_setting/","title":"Send Envoy metrics to SkyWalking with/without Istio"},{"body":"Send Envoy metrics to SkyWalking with/without Istio Envoy defines a gRPC service to emit metrics, and whatever is used to implement this protocol can be used to receive the metrics. SkyWalking has a built-in receiver that implements this protocol, so you can configure Envoy to emit its metrics to SkyWalking.\nAs an APM system, SkyWalking not only receives and stores the metrics emitted by Envoy but also analyzes the topology of services and service instances.\nAttention: There are two versions of the Envoy metrics service protocol currently: v2 and v3. SkyWalking (8.3.0+) supports both of them.\nConfigure Envoy to send metrics to SkyWalking without Istio Envoy can be used with/without Istio. This section explains how you can configure the standalone Envoy to send metrics to SkyWalking.\nTo let Envoy send metrics to SkyWalking, we need to feed Envoy with a configuration that contains stats_sinks, which in turn includes envoy.metrics_service. This envoy.metrics_service should be configured as a config.grpc_service entry.\nThe noteworthy parts of the config are shown below:\nstats_sinks:- name:envoy.metrics_serviceconfig:grpc_service:# Note: we can use google_grpc implementation as well.envoy_grpc:cluster_name:service_skywalkingstatic_resources:...clusters:- name:service_skywalkingconnect_timeout:5stype:LOGICAL_DNShttp2_protocol_options:{}dns_lookup_family:V4_ONLYlb_policy:ROUND_ROBINload_assignment:cluster_name:service_skywalkingendpoints:- lb_endpoints:- endpoint:address:socket_address:address:skywalking# This is the port where SkyWalking serving the Envoy Metrics Service gRPC stream.port_value:11800The comprehensive static configuration can be found here.\nNote that Envoy can also be configured dynamically through xDS Protocol.\nAs mentioned above, SkyWalking also builds the topology of services from the metrics since Envoy also carries service metadata along with the metrics. To feed Envoy such metadata, see the other part of the configuration as follows:\nnode:# ... other configsmetadata:LABELS:app:test-appNAME:service-instance-nameConfigure Envoy to send metrics to SkyWalking with Istio Typically, Envoy can also be used with Istio. In this case, the configurations are much simpler because Istio composes the configurations for you and sends them to Envoy via xDS Protocol. Istio also automatically injects the metadata, such as service name and instance name, into the bootstrap configurations.\nEmitting the metrics to SkyWalking is as simple as adding the option --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; to Istio install command, like this:\nistioctl install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*\u0026#39; If you already have Istio installed, you can use the following command to apply the config without re-installing Istio:\nistioctl manifest install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*\u0026#39; Note: proxyStatsMatcher is only supported by Istio 1.8+. We recommend using inclusionRegexps to reserve specific metrics that need to be analyzed to reduce memory usage and avoid CPU overhead. For example, OAP uses these metrics:\nistioctl manifest install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*membership_healthy.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[1]=.*upstream_cx_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[2]=.*upstream_cx_total.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[3]=.*upstream_rq_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[4]=.*upstream_rq_total.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[5]=.*upstream_rq_pending_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[6]=.*lb_healthy_panic.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[7]=.*upstream_cx_none_healthy.*\u0026#39; Metrics data Some Envoy statistics are listed here. Sample data that contain identifiers can be found here, while the metrics can be found here.\nNetwork Monitoring SkyWalking supports network monitoring of the data plane in the Service Mesh. Read this documentation for learn more.\n","excerpt":"Send Envoy metrics to SkyWalking with/without Istio Envoy defines a gRPC service to emit metrics, …","ref":"/docs/main/v9.2.0/en/setup/envoy/metrics_service_setting/","title":"Send Envoy metrics to SkyWalking with/without Istio"},{"body":"Send Envoy metrics to SkyWalking with/without Istio Envoy defines a gRPC service to emit metrics, and whatever is used to implement this protocol can be used to receive the metrics. SkyWalking has a built-in receiver that implements this protocol, so you can configure Envoy to emit its metrics to SkyWalking.\nAs an APM system, SkyWalking not only receives and stores the metrics emitted by Envoy but also analyzes the topology of services and service instances.\nAttention: There are two versions of the Envoy metrics service protocol currently: v2 and v3. SkyWalking (8.3.0+) supports both of them.\nConfigure Envoy to send metrics to SkyWalking without Istio Envoy can be used with/without Istio. This section explains how you can configure the standalone Envoy to send metrics to SkyWalking.\nTo let Envoy send metrics to SkyWalking, we need to feed Envoy with a configuration that contains stats_sinks, which in turn includes envoy.metrics_service. This envoy.metrics_service should be configured as a config.grpc_service entry.\nThe noteworthy parts of the config are shown below:\nstats_sinks:- name:envoy.metrics_serviceconfig:grpc_service:# Note: we can use google_grpc implementation as well.envoy_grpc:cluster_name:service_skywalkingstatic_resources:...clusters:- name:service_skywalkingconnect_timeout:5stype:LOGICAL_DNShttp2_protocol_options:{}dns_lookup_family:V4_ONLYlb_policy:ROUND_ROBINload_assignment:cluster_name:service_skywalkingendpoints:- lb_endpoints:- endpoint:address:socket_address:address:skywalking# This is the port where SkyWalking serving the Envoy Metrics Service gRPC stream.port_value:11800The comprehensive static configuration can be found here.\nNote that Envoy can also be configured dynamically through xDS Protocol.\nAs mentioned above, SkyWalking also builds the topology of services from the metrics since Envoy also carries service metadata along with the metrics. To feed Envoy such metadata, see the other part of the configuration as follows:\nnode:# ... other configsmetadata:LABELS:app:test-appNAME:service-instance-nameConfigure Envoy to send metrics to SkyWalking with Istio Typically, Envoy can also be used with Istio. In this case, the configurations are much simpler because Istio composes the configurations for you and sends them to Envoy via xDS Protocol. Istio also automatically injects the metadata, such as service name and instance name, into the bootstrap configurations.\nEmitting the metrics to SkyWalking is as simple as adding the option --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; to Istio install command, like this:\nistioctl install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*\u0026#39; If you already have Istio installed, you can use the following command to apply the config without re-installing Istio:\nistioctl manifest install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*\u0026#39; Note: proxyStatsMatcher is only supported by Istio 1.8+. We recommend using inclusionRegexps to reserve specific metrics that need to be analyzed to reduce memory usage and avoid CPU overhead. For example, OAP uses these metrics:\nistioctl manifest install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*membership_healthy.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[1]=.*upstream_cx_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[2]=.*upstream_cx_total.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[3]=.*upstream_rq_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[4]=.*upstream_rq_total.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[5]=.*upstream_rq_pending_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[6]=.*lb_healthy_panic.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[7]=.*upstream_cx_none_healthy.*\u0026#39; Metrics data Some Envoy statistics are listed here. Sample data that contain identifiers can be found here, while the metrics can be found here.\nNetwork Monitoring SkyWalking supports network monitoring of the data plane in the Service Mesh. Read this documentation for learn more.\n","excerpt":"Send Envoy metrics to SkyWalking with/without Istio Envoy defines a gRPC service to emit metrics, …","ref":"/docs/main/v9.3.0/en/setup/envoy/metrics_service_setting/","title":"Send Envoy metrics to SkyWalking with/without Istio"},{"body":"Send Envoy metrics to SkyWalking with/without Istio Envoy defines a gRPC service to emit metrics, and whatever is used to implement this protocol can be used to receive the metrics. SkyWalking has a built-in receiver that implements this protocol, so you can configure Envoy to emit its metrics to SkyWalking.\nAs an APM system, SkyWalking not only receives and stores the metrics emitted by Envoy but also analyzes the topology of services and service instances.\nAttention: There are two versions of the Envoy metrics service protocol currently: v2 and v3. SkyWalking (8.3.0+) supports both of them.\nConfigure Envoy to send metrics to SkyWalking without Istio Envoy can be used with/without Istio. This section explains how you can configure the standalone Envoy to send metrics to SkyWalking.\nTo let Envoy send metrics to SkyWalking, we need to feed Envoy with a configuration that contains stats_sinks, which in turn includes envoy.metrics_service. This envoy.metrics_service should be configured as a config.grpc_service entry.\nThe noteworthy parts of the config are shown below:\nstats_sinks:- name:envoy.metrics_serviceconfig:grpc_service:# Note: we can use google_grpc implementation as well.envoy_grpc:cluster_name:service_skywalkingstatic_resources:...clusters:- name:service_skywalkingconnect_timeout:5stype:LOGICAL_DNShttp2_protocol_options:{}dns_lookup_family:V4_ONLYlb_policy:ROUND_ROBINload_assignment:cluster_name:service_skywalkingendpoints:- lb_endpoints:- endpoint:address:socket_address:address:skywalking# This is the port where SkyWalking serving the Envoy Metrics Service gRPC stream.port_value:11800The comprehensive static configuration can be found here.\nNote that Envoy can also be configured dynamically through xDS Protocol.\nAs mentioned above, SkyWalking also builds the topology of services from the metrics since Envoy also carries service metadata along with the metrics. To feed Envoy such metadata, see the other part of the configuration as follows:\nnode:# ... other configsmetadata:LABELS:app:test-appNAME:service-instance-nameConfigure Envoy to send metrics to SkyWalking with Istio Typically, Envoy can also be used with Istio. In this case, the configurations are much simpler because Istio composes the configurations for you and sends them to Envoy via xDS Protocol. Istio also automatically injects the metadata, such as service name and instance name, into the bootstrap configurations.\nEmitting the metrics to SkyWalking is as simple as adding the option --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; to Istio install command, like this:\nistioctl install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*\u0026#39; If you already have Istio installed, you can use the following command to apply the config without re-installing Istio:\nistioctl manifest install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*\u0026#39; Note: proxyStatsMatcher is only supported by Istio 1.8+. We recommend using inclusionRegexps to reserve specific metrics that need to be analyzed to reduce memory usage and avoid CPU overhead. For example, OAP uses these metrics:\nistioctl manifest install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*membership_healthy.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[1]=.*upstream_cx_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[2]=.*upstream_cx_total.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[3]=.*upstream_rq_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[4]=.*upstream_rq_total.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[5]=.*upstream_rq_pending_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[6]=.*lb_healthy_panic.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[7]=.*upstream_cx_none_healthy.*\u0026#39; Metrics data Some Envoy statistics are listed here. Sample data that contain identifiers can be found here, while the metrics can be found here.\nNetwork Monitoring SkyWalking supports network monitoring of the data plane in the Service Mesh. Read this documentation for learn more.\n","excerpt":"Send Envoy metrics to SkyWalking with/without Istio Envoy defines a gRPC service to emit metrics, …","ref":"/docs/main/v9.4.0/en/setup/envoy/metrics_service_setting/","title":"Send Envoy metrics to SkyWalking with/without Istio"},{"body":"Send Envoy metrics to SkyWalking with/without Istio Envoy defines a gRPC service to emit metrics, and whatever is used to implement this protocol can be used to receive the metrics. SkyWalking has a built-in receiver that implements this protocol, so you can configure Envoy to emit its metrics to SkyWalking.\nAs an APM system, SkyWalking not only receives and stores the metrics emitted by Envoy but also analyzes the topology of services and service instances.\nAttention: There are two versions of the Envoy metrics service protocol currently: v2 and v3. SkyWalking (8.3.0+) supports both of them.\nConfigure Envoy to send metrics to SkyWalking without Istio Envoy can be used with/without Istio. This section explains how you can configure the standalone Envoy to send metrics to SkyWalking.\nTo let Envoy send metrics to SkyWalking, we need to feed Envoy with a configuration that contains stats_sinks, which in turn includes envoy.metrics_service. This envoy.metrics_service should be configured as a config.grpc_service entry.\nThe noteworthy parts of the config are shown below:\nstats_sinks:- name:envoy.metrics_serviceconfig:grpc_service:# Note: we can use google_grpc implementation as well.envoy_grpc:cluster_name:service_skywalkingstatic_resources:...clusters:- name:service_skywalkingconnect_timeout:5stype:LOGICAL_DNShttp2_protocol_options:{}dns_lookup_family:V4_ONLYlb_policy:ROUND_ROBINload_assignment:cluster_name:service_skywalkingendpoints:- lb_endpoints:- endpoint:address:socket_address:address:skywalking# This is the port where SkyWalking serving the Envoy Metrics Service gRPC stream.port_value:11800The comprehensive static configuration can be found here.\nNote that Envoy can also be configured dynamically through xDS Protocol.\nAs mentioned above, SkyWalking also builds the topology of services from the metrics since Envoy also carries service metadata along with the metrics. To feed Envoy such metadata, see the other part of the configuration as follows:\nnode:# ... other configsmetadata:LABELS:app:test-appNAME:service-instance-nameConfigure Envoy to send metrics to SkyWalking with Istio Typically, Envoy can also be used with Istio. In this case, the configurations are much simpler because Istio composes the configurations for you and sends them to Envoy via xDS Protocol. Istio also automatically injects the metadata, such as service name and instance name, into the bootstrap configurations.\nEmitting the metrics to SkyWalking is as simple as adding the option --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; to Istio install command, like this:\nistioctl install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*\u0026#39; If you already have Istio installed, you can use the following command to apply the config without re-installing Istio:\nistioctl manifest install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*\u0026#39; Note: proxyStatsMatcher is only supported by Istio 1.8+. We recommend using inclusionRegexps to reserve specific metrics that need to be analyzed to reduce memory usage and avoid CPU overhead. For example, OAP uses these metrics:\nistioctl manifest install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*membership_healthy.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[1]=.*upstream_cx_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[2]=.*upstream_cx_total.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[3]=.*upstream_rq_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[4]=.*upstream_rq_total.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[5]=.*upstream_rq_pending_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[6]=.*lb_healthy_panic.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[7]=.*upstream_cx_none_healthy.*\u0026#39; Metrics data Some Envoy statistics are listed here. Sample data that contain identifiers can be found here, while the metrics can be found here.\nNetwork Monitoring SkyWalking supports network monitoring of the data plane in the Service Mesh. Read this documentation for learn more.\n","excerpt":"Send Envoy metrics to SkyWalking with/without Istio Envoy defines a gRPC service to emit metrics, …","ref":"/docs/main/v9.5.0/en/setup/envoy/metrics_service_setting/","title":"Send Envoy metrics to SkyWalking with/without Istio"},{"body":"Send Envoy metrics to SkyWalking with/without Istio Envoy defines a gRPC service to emit metrics, and whatever is used to implement this protocol can be used to receive the metrics. SkyWalking has a built-in receiver that implements this protocol, so you can configure Envoy to emit its metrics to SkyWalking.\nAs an APM system, SkyWalking not only receives and stores the metrics emitted by Envoy but also analyzes the topology of services and service instances.\nAttention: There are two versions of the Envoy metrics service protocol currently: v2 and v3. SkyWalking (8.3.0+) supports both of them.\nConfigure Envoy to send metrics to SkyWalking without Istio Envoy can be used with/without Istio. This section explains how you can configure the standalone Envoy to send metrics to SkyWalking.\nTo let Envoy send metrics to SkyWalking, we need to feed Envoy with a configuration that contains stats_sinks, which in turn includes envoy.metrics_service. This envoy.metrics_service should be configured as a config.grpc_service entry.\nThe noteworthy parts of the config are shown below:\nstats_sinks:- name:envoy.metrics_serviceconfig:grpc_service:# Note: we can use google_grpc implementation as well.envoy_grpc:cluster_name:service_skywalkingstatic_resources:...clusters:- name:service_skywalkingconnect_timeout:5stype:LOGICAL_DNShttp2_protocol_options:{}dns_lookup_family:V4_ONLYlb_policy:ROUND_ROBINload_assignment:cluster_name:service_skywalkingendpoints:- lb_endpoints:- endpoint:address:socket_address:address:skywalking# This is the port where SkyWalking serving the Envoy Metrics Service gRPC stream.port_value:11800The comprehensive static configuration can be found here.\nNote that Envoy can also be configured dynamically through xDS Protocol.\nAs mentioned above, SkyWalking also builds the topology of services from the metrics since Envoy also carries service metadata along with the metrics. To feed Envoy such metadata, see the other part of the configuration as follows:\nnode:# ... other configsmetadata:LABELS:app:test-appNAME:service-instance-nameConfigure Envoy to send metrics to SkyWalking with Istio Typically, Envoy can also be used with Istio. In this case, the configurations are much simpler because Istio composes the configurations for you and sends them to Envoy via xDS Protocol. Istio also automatically injects the metadata, such as service name and instance name, into the bootstrap configurations.\nEmitting the metrics to SkyWalking is as simple as adding the option --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; to Istio install command, like this:\nistioctl install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*\u0026#39; If you already have Istio installed, you can use the following command to apply the config without re-installing Istio:\nistioctl manifest install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*\u0026#39; Note: proxyStatsMatcher is only supported by Istio 1.8+. We recommend using inclusionRegexps to reserve specific metrics that need to be analyzed to reduce memory usage and avoid CPU overhead. For example, OAP uses these metrics:\nistioctl manifest install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*membership_healthy.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[1]=.*upstream_cx_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[2]=.*upstream_cx_total.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[3]=.*upstream_rq_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[4]=.*upstream_rq_total.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[5]=.*upstream_rq_pending_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[6]=.*lb_healthy_panic.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[7]=.*upstream_cx_none_healthy.*\u0026#39; Metrics data Some Envoy statistics are listed here. Sample data that contain identifiers can be found here, while the metrics can be found here.\nNetwork Monitoring SkyWalking supports network monitoring of the data plane in the Service Mesh. Read this documentation for learn more.\n","excerpt":"Send Envoy metrics to SkyWalking with/without Istio Envoy defines a gRPC service to emit metrics, …","ref":"/docs/main/v9.6.0/en/setup/envoy/metrics_service_setting/","title":"Send Envoy metrics to SkyWalking with/without Istio"},{"body":"Send Envoy metrics to SkyWalking with/without Istio Envoy defines a gRPC service to emit metrics, and whatever is used to implement this protocol can be used to receive the metrics. SkyWalking has a built-in receiver that implements this protocol, so you can configure Envoy to emit its metrics to SkyWalking.\nAs an APM system, SkyWalking not only receives and stores the metrics emitted by Envoy but also analyzes the topology of services and service instances.\nAttention: There are two versions of the Envoy metrics service protocol currently: v2 and v3. SkyWalking (8.3.0+) supports both of them.\nConfigure Envoy to send metrics to SkyWalking without Istio Envoy can be used with/without Istio. This section explains how you can configure the standalone Envoy to send metrics to SkyWalking.\nTo let Envoy send metrics to SkyWalking, we need to feed Envoy with a configuration that contains stats_sinks, which in turn includes envoy.metrics_service. This envoy.metrics_service should be configured as a config.grpc_service entry.\nThe noteworthy parts of the config are shown below:\nstats_sinks:- name:envoy.metrics_serviceconfig:grpc_service:# Note: we can use google_grpc implementation as well.envoy_grpc:cluster_name:service_skywalkingstatic_resources:...clusters:- name:service_skywalkingconnect_timeout:5stype:LOGICAL_DNShttp2_protocol_options:{}dns_lookup_family:V4_ONLYlb_policy:ROUND_ROBINload_assignment:cluster_name:service_skywalkingendpoints:- lb_endpoints:- endpoint:address:socket_address:address:skywalking# This is the port where SkyWalking serving the Envoy Metrics Service gRPC stream.port_value:11800The comprehensive static configuration can be found here.\nNote that Envoy can also be configured dynamically through xDS Protocol.\nAs mentioned above, SkyWalking also builds the topology of services from the metrics since Envoy also carries service metadata along with the metrics. To feed Envoy such metadata, see the other part of the configuration as follows:\nnode:# ... other configsmetadata:LABELS:app:test-appNAME:service-instance-nameConfigure Envoy to send metrics to SkyWalking with Istio Typically, Envoy can also be used with Istio. In this case, the configurations are much simpler because Istio composes the configurations for you and sends them to Envoy via xDS Protocol. Istio also automatically injects the metadata, such as service name and instance name, into the bootstrap configurations.\nEmitting the metrics to SkyWalking is as simple as adding the option --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; to Istio install command, like this:\nistioctl install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*\u0026#39; If you already have Istio installed, you can use the following command to apply the config without re-installing Istio:\nistioctl manifest install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*\u0026#39; Note: proxyStatsMatcher is only supported by Istio 1.8+. We recommend using inclusionRegexps to reserve specific metrics that need to be analyzed to reduce memory usage and avoid CPU overhead. For example, OAP uses these metrics:\nistioctl manifest install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*membership_healthy.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[1]=.*upstream_cx_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[2]=.*upstream_cx_total.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[3]=.*upstream_rq_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[4]=.*upstream_rq_total.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[5]=.*upstream_rq_pending_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[6]=.*lb_healthy_panic.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[7]=.*upstream_cx_none_healthy.*\u0026#39; Metrics data Some Envoy statistics are listed here. Sample data that contain identifiers can be found here, while the metrics can be found here.\nNetwork Monitoring SkyWalking supports network monitoring of the data plane in the Service Mesh. Read this documentation for learn more.\n","excerpt":"Send Envoy metrics to SkyWalking with/without Istio Envoy defines a gRPC service to emit metrics, …","ref":"/docs/main/v9.7.0/en/setup/envoy/metrics_service_setting/","title":"Send Envoy metrics to SkyWalking with/without Istio"},{"body":"Sending Envoy Metrics to SkyWalking OAP Server Example This is an example of sending Envoy Stats to the SkyWalking OAP server through Metric Service v2 and v3.\nRunning the example The example requires docker and docker-compose to be installed in your local system. It fetches images from Docker Hub.\nNote that in this setup, we override the log4j2.xml config to set the org.apache.skywalking.oap.server.receiver.envoy logger level to DEBUG. This enables us to see the messages sent by Envoy to the SkyWalking OAP server.\nYou can also find the Envoy Metric Service V3 API example in docker-compose-envoy-v3-api.yaml\n$ make up $ docker-compose logs -f skywalking $ # Please wait for a moment until SkyWalking is ready and Envoy starts sending the stats. You will see similar messages like the following: skywalking_1 | 2021-07-23 13:25:30,683 - org.apache.skywalking.oap.server.receiver.envoy.MetricServiceGRPCHandler -19437 [grpcServerPool-1-thread-2] DEBUG [] - Received msg identifier { skywalking_1 | node { skywalking_1 | id: \u0026quot;ingress\u0026quot; skywalking_1 | cluster: \u0026quot;envoy-proxy\u0026quot; skywalking_1 | metadata { skywalking_1 | fields { skywalking_1 | key: \u0026quot;LABELS\u0026quot; skywalking_1 | value { skywalking_1 | struct_value { skywalking_1 | fields { skywalking_1 | key: \u0026quot;app\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;test-app\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;NAME\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;service-instance-name\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;envoy\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;isawesome\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;skywalking\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;iscool\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | locality { skywalking_1 | region: \u0026quot;ap-southeast-1\u0026quot; skywalking_1 | zone: \u0026quot;zone1\u0026quot; skywalking_1 | sub_zone: \u0026quot;subzone1\u0026quot; skywalking_1 | } skywalking_1 | user_agent_name: \u0026quot;envoy\u0026quot; skywalking_1 | user_agent_build_version { skywalking_1 | version { skywalking_1 | major_number: 1 skywalking_1 | minor_number: 19 skywalking_1 | } skywalking_1 | metadata { skywalking_1 | fields { skywalking_1 | key: \u0026quot;build.type\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;RELEASE\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;revision.sha\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;68fe53a889416fd8570506232052b06f5a531541\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;revision.status\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;Clean\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;ssl.version\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;BoringSSL\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | extensions { skywalking_1 | name: \u0026quot;composite-action\u0026quot; skywalking_1 | category: \u0026quot;envoy.matching.action\u0026quot; skywalking_1 | } ...... skywalking_1 | } skywalking_1 | } skywalking_1 | envoy_metrics { skywalking_1 | name: \u0026quot;cluster.service_google.update_no_rebuild\u0026quot; skywalking_1 | type: COUNTER skywalking_1 | metric { skywalking_1 | counter { skywalking_1 | value: 1.0 skywalking_1 | } skywalking_1 | timestamp_ms: 1627046729718 skywalking_1 | } ...... skywalking_1 | } ... $ # To tear down: $ make down ","excerpt":"Sending Envoy Metrics to SkyWalking OAP Server Example This is an example of sending Envoy Stats to …","ref":"/docs/main/latest/en/setup/envoy/examples/metrics/readme/","title":"Sending Envoy Metrics to SkyWalking OAP Server Example"},{"body":"Sending Envoy Metrics to SkyWalking OAP Server Example This is an example of sending Envoy Stats to the SkyWalking OAP server through Metric Service v2 and v3.\nRunning the example The example requires docker and docker-compose to be installed in your local system. It fetches images from Docker Hub.\nNote that in this setup, we override the log4j2.xml config to set the org.apache.skywalking.oap.server.receiver.envoy logger level to DEBUG. This enables us to see the messages sent by Envoy to the SkyWalking OAP server.\nYou can also find the Envoy Metric Service V3 API example in docker-compose-envoy-v3-api.yaml\n$ make up $ docker-compose logs -f skywalking $ # Please wait for a moment until SkyWalking is ready and Envoy starts sending the stats. You will see similar messages like the following: skywalking_1 | 2021-07-23 13:25:30,683 - org.apache.skywalking.oap.server.receiver.envoy.MetricServiceGRPCHandler -19437 [grpcServerPool-1-thread-2] DEBUG [] - Received msg identifier { skywalking_1 | node { skywalking_1 | id: \u0026quot;ingress\u0026quot; skywalking_1 | cluster: \u0026quot;envoy-proxy\u0026quot; skywalking_1 | metadata { skywalking_1 | fields { skywalking_1 | key: \u0026quot;LABELS\u0026quot; skywalking_1 | value { skywalking_1 | struct_value { skywalking_1 | fields { skywalking_1 | key: \u0026quot;app\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;test-app\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;NAME\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;service-instance-name\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;envoy\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;isawesome\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;skywalking\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;iscool\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | locality { skywalking_1 | region: \u0026quot;ap-southeast-1\u0026quot; skywalking_1 | zone: \u0026quot;zone1\u0026quot; skywalking_1 | sub_zone: \u0026quot;subzone1\u0026quot; skywalking_1 | } skywalking_1 | user_agent_name: \u0026quot;envoy\u0026quot; skywalking_1 | user_agent_build_version { skywalking_1 | version { skywalking_1 | major_number: 1 skywalking_1 | minor_number: 19 skywalking_1 | } skywalking_1 | metadata { skywalking_1 | fields { skywalking_1 | key: \u0026quot;build.type\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;RELEASE\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;revision.sha\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;68fe53a889416fd8570506232052b06f5a531541\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;revision.status\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;Clean\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;ssl.version\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;BoringSSL\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | extensions { skywalking_1 | name: \u0026quot;composite-action\u0026quot; skywalking_1 | category: \u0026quot;envoy.matching.action\u0026quot; skywalking_1 | } ...... skywalking_1 | } skywalking_1 | } skywalking_1 | envoy_metrics { skywalking_1 | name: \u0026quot;cluster.service_google.update_no_rebuild\u0026quot; skywalking_1 | type: COUNTER skywalking_1 | metric { skywalking_1 | counter { skywalking_1 | value: 1.0 skywalking_1 | } skywalking_1 | timestamp_ms: 1627046729718 skywalking_1 | } ...... skywalking_1 | } ... $ # To tear down: $ make down ","excerpt":"Sending Envoy Metrics to SkyWalking OAP Server Example This is an example of sending Envoy Stats to …","ref":"/docs/main/next/en/setup/envoy/examples/metrics/readme/","title":"Sending Envoy Metrics to SkyWalking OAP Server Example"},{"body":"Sending Envoy Metrics to SkyWalking OAP Server Example This is an example of sending Envoy Stats to SkyWalking OAP server through Metric Service v2 and v3.\nRunning the example The example requires docker and docker-compose to be installed in your local system. It fetches images from Docker Hub.\nNote that in this setup, we override the log4j2.xml config to set the org.apache.skywalking.oap.server.receiver.envoy logger level to DEBUG. This enables us to see the messages sent by Envoy to SkyWalking OAP server.\nYou can also find the Envoy Metric Service V3 API example in docker-compose-envoy-v3-api.yaml\n$ make up $ docker-compose logs -f skywalking $ # Please wait for a moment until SkyWalking is ready and Envoy starts sending the stats. You will see similar messages like the following: skywalking_1 | 2021-07-23 13:25:30,683 - org.apache.skywalking.oap.server.receiver.envoy.MetricServiceGRPCHandler -19437 [grpcServerPool-1-thread-2] DEBUG [] - Received msg identifier { skywalking_1 | node { skywalking_1 | id: \u0026quot;ingress\u0026quot; skywalking_1 | cluster: \u0026quot;envoy-proxy\u0026quot; skywalking_1 | metadata { skywalking_1 | fields { skywalking_1 | key: \u0026quot;LABELS\u0026quot; skywalking_1 | value { skywalking_1 | struct_value { skywalking_1 | fields { skywalking_1 | key: \u0026quot;app\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;test-app\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;NAME\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;service-instance-name\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;envoy\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;isawesome\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;skywalking\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;iscool\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | locality { skywalking_1 | region: \u0026quot;ap-southeast-1\u0026quot; skywalking_1 | zone: \u0026quot;zone1\u0026quot; skywalking_1 | sub_zone: \u0026quot;subzone1\u0026quot; skywalking_1 | } skywalking_1 | user_agent_name: \u0026quot;envoy\u0026quot; skywalking_1 | user_agent_build_version { skywalking_1 | version { skywalking_1 | major_number: 1 skywalking_1 | minor_number: 19 skywalking_1 | } skywalking_1 | metadata { skywalking_1 | fields { skywalking_1 | key: \u0026quot;build.type\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;RELEASE\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;revision.sha\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;68fe53a889416fd8570506232052b06f5a531541\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;revision.status\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;Clean\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;ssl.version\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;BoringSSL\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | extensions { skywalking_1 | name: \u0026quot;composite-action\u0026quot; skywalking_1 | category: \u0026quot;envoy.matching.action\u0026quot; skywalking_1 | } ...... skywalking_1 | } skywalking_1 | } skywalking_1 | envoy_metrics { skywalking_1 | name: \u0026quot;cluster.service_google.update_no_rebuild\u0026quot; skywalking_1 | type: COUNTER skywalking_1 | metric { skywalking_1 | counter { skywalking_1 | value: 1.0 skywalking_1 | } skywalking_1 | timestamp_ms: 1627046729718 skywalking_1 | } ...... skywalking_1 | } ... $ # To tear down: $ make down ","excerpt":"Sending Envoy Metrics to SkyWalking OAP Server Example This is an example of sending Envoy Stats to …","ref":"/docs/main/v9.0.0/en/setup/envoy/examples/metrics/readme/","title":"Sending Envoy Metrics to SkyWalking OAP Server Example"},{"body":"Sending Envoy Metrics to SkyWalking OAP Server Example This is an example of sending Envoy Stats to the SkyWalking OAP server through Metric Service v2 and v3.\nRunning the example The example requires docker and docker-compose to be installed in your local system. It fetches images from Docker Hub.\nNote that in this setup, we override the log4j2.xml config to set the org.apache.skywalking.oap.server.receiver.envoy logger level to DEBUG. This enables us to see the messages sent by Envoy to the SkyWalking OAP server.\nYou can also find the Envoy Metric Service V3 API example in docker-compose-envoy-v3-api.yaml\n$ make up $ docker-compose logs -f skywalking $ # Please wait for a moment until SkyWalking is ready and Envoy starts sending the stats. You will see similar messages like the following: skywalking_1 | 2021-07-23 13:25:30,683 - org.apache.skywalking.oap.server.receiver.envoy.MetricServiceGRPCHandler -19437 [grpcServerPool-1-thread-2] DEBUG [] - Received msg identifier { skywalking_1 | node { skywalking_1 | id: \u0026quot;ingress\u0026quot; skywalking_1 | cluster: \u0026quot;envoy-proxy\u0026quot; skywalking_1 | metadata { skywalking_1 | fields { skywalking_1 | key: \u0026quot;LABELS\u0026quot; skywalking_1 | value { skywalking_1 | struct_value { skywalking_1 | fields { skywalking_1 | key: \u0026quot;app\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;test-app\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;NAME\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;service-instance-name\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;envoy\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;isawesome\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;skywalking\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;iscool\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | locality { skywalking_1 | region: \u0026quot;ap-southeast-1\u0026quot; skywalking_1 | zone: \u0026quot;zone1\u0026quot; skywalking_1 | sub_zone: \u0026quot;subzone1\u0026quot; skywalking_1 | } skywalking_1 | user_agent_name: \u0026quot;envoy\u0026quot; skywalking_1 | user_agent_build_version { skywalking_1 | version { skywalking_1 | major_number: 1 skywalking_1 | minor_number: 19 skywalking_1 | } skywalking_1 | metadata { skywalking_1 | fields { skywalking_1 | key: \u0026quot;build.type\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;RELEASE\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;revision.sha\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;68fe53a889416fd8570506232052b06f5a531541\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;revision.status\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;Clean\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;ssl.version\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;BoringSSL\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | extensions { skywalking_1 | name: \u0026quot;composite-action\u0026quot; skywalking_1 | category: \u0026quot;envoy.matching.action\u0026quot; skywalking_1 | } ...... skywalking_1 | } skywalking_1 | } skywalking_1 | envoy_metrics { skywalking_1 | name: \u0026quot;cluster.service_google.update_no_rebuild\u0026quot; skywalking_1 | type: COUNTER skywalking_1 | metric { skywalking_1 | counter { skywalking_1 | value: 1.0 skywalking_1 | } skywalking_1 | timestamp_ms: 1627046729718 skywalking_1 | } ...... skywalking_1 | } ... $ # To tear down: $ make down ","excerpt":"Sending Envoy Metrics to SkyWalking OAP Server Example This is an example of sending Envoy Stats to …","ref":"/docs/main/v9.1.0/en/setup/envoy/examples/metrics/readme/","title":"Sending Envoy Metrics to SkyWalking OAP Server Example"},{"body":"Sending Envoy Metrics to SkyWalking OAP Server Example This is an example of sending Envoy Stats to the SkyWalking OAP server through Metric Service v2 and v3.\nRunning the example The example requires docker and docker-compose to be installed in your local system. It fetches images from Docker Hub.\nNote that in this setup, we override the log4j2.xml config to set the org.apache.skywalking.oap.server.receiver.envoy logger level to DEBUG. This enables us to see the messages sent by Envoy to the SkyWalking OAP server.\nYou can also find the Envoy Metric Service V3 API example in docker-compose-envoy-v3-api.yaml\n$ make up $ docker-compose logs -f skywalking $ # Please wait for a moment until SkyWalking is ready and Envoy starts sending the stats. You will see similar messages like the following: skywalking_1 | 2021-07-23 13:25:30,683 - org.apache.skywalking.oap.server.receiver.envoy.MetricServiceGRPCHandler -19437 [grpcServerPool-1-thread-2] DEBUG [] - Received msg identifier { skywalking_1 | node { skywalking_1 | id: \u0026quot;ingress\u0026quot; skywalking_1 | cluster: \u0026quot;envoy-proxy\u0026quot; skywalking_1 | metadata { skywalking_1 | fields { skywalking_1 | key: \u0026quot;LABELS\u0026quot; skywalking_1 | value { skywalking_1 | struct_value { skywalking_1 | fields { skywalking_1 | key: \u0026quot;app\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;test-app\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;NAME\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;service-instance-name\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;envoy\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;isawesome\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;skywalking\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;iscool\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | locality { skywalking_1 | region: \u0026quot;ap-southeast-1\u0026quot; skywalking_1 | zone: \u0026quot;zone1\u0026quot; skywalking_1 | sub_zone: \u0026quot;subzone1\u0026quot; skywalking_1 | } skywalking_1 | user_agent_name: \u0026quot;envoy\u0026quot; skywalking_1 | user_agent_build_version { skywalking_1 | version { skywalking_1 | major_number: 1 skywalking_1 | minor_number: 19 skywalking_1 | } skywalking_1 | metadata { skywalking_1 | fields { skywalking_1 | key: \u0026quot;build.type\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;RELEASE\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;revision.sha\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;68fe53a889416fd8570506232052b06f5a531541\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;revision.status\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;Clean\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;ssl.version\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;BoringSSL\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | extensions { skywalking_1 | name: \u0026quot;composite-action\u0026quot; skywalking_1 | category: \u0026quot;envoy.matching.action\u0026quot; skywalking_1 | } ...... skywalking_1 | } skywalking_1 | } skywalking_1 | envoy_metrics { skywalking_1 | name: \u0026quot;cluster.service_google.update_no_rebuild\u0026quot; skywalking_1 | type: COUNTER skywalking_1 | metric { skywalking_1 | counter { skywalking_1 | value: 1.0 skywalking_1 | } skywalking_1 | timestamp_ms: 1627046729718 skywalking_1 | } ...... skywalking_1 | } ... $ # To tear down: $ make down ","excerpt":"Sending Envoy Metrics to SkyWalking OAP Server Example This is an example of sending Envoy Stats to …","ref":"/docs/main/v9.2.0/en/setup/envoy/examples/metrics/readme/","title":"Sending Envoy Metrics to SkyWalking OAP Server Example"},{"body":"Sending Envoy Metrics to SkyWalking OAP Server Example This is an example of sending Envoy Stats to the SkyWalking OAP server through Metric Service v2 and v3.\nRunning the example The example requires docker and docker-compose to be installed in your local system. It fetches images from Docker Hub.\nNote that in this setup, we override the log4j2.xml config to set the org.apache.skywalking.oap.server.receiver.envoy logger level to DEBUG. This enables us to see the messages sent by Envoy to the SkyWalking OAP server.\nYou can also find the Envoy Metric Service V3 API example in docker-compose-envoy-v3-api.yaml\n$ make up $ docker-compose logs -f skywalking $ # Please wait for a moment until SkyWalking is ready and Envoy starts sending the stats. You will see similar messages like the following: skywalking_1 | 2021-07-23 13:25:30,683 - org.apache.skywalking.oap.server.receiver.envoy.MetricServiceGRPCHandler -19437 [grpcServerPool-1-thread-2] DEBUG [] - Received msg identifier { skywalking_1 | node { skywalking_1 | id: \u0026quot;ingress\u0026quot; skywalking_1 | cluster: \u0026quot;envoy-proxy\u0026quot; skywalking_1 | metadata { skywalking_1 | fields { skywalking_1 | key: \u0026quot;LABELS\u0026quot; skywalking_1 | value { skywalking_1 | struct_value { skywalking_1 | fields { skywalking_1 | key: \u0026quot;app\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;test-app\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;NAME\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;service-instance-name\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;envoy\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;isawesome\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;skywalking\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;iscool\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | locality { skywalking_1 | region: \u0026quot;ap-southeast-1\u0026quot; skywalking_1 | zone: \u0026quot;zone1\u0026quot; skywalking_1 | sub_zone: \u0026quot;subzone1\u0026quot; skywalking_1 | } skywalking_1 | user_agent_name: \u0026quot;envoy\u0026quot; skywalking_1 | user_agent_build_version { skywalking_1 | version { skywalking_1 | major_number: 1 skywalking_1 | minor_number: 19 skywalking_1 | } skywalking_1 | metadata { skywalking_1 | fields { skywalking_1 | key: \u0026quot;build.type\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;RELEASE\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;revision.sha\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;68fe53a889416fd8570506232052b06f5a531541\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;revision.status\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;Clean\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;ssl.version\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;BoringSSL\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | extensions { skywalking_1 | name: \u0026quot;composite-action\u0026quot; skywalking_1 | category: \u0026quot;envoy.matching.action\u0026quot; skywalking_1 | } ...... skywalking_1 | } skywalking_1 | } skywalking_1 | envoy_metrics { skywalking_1 | name: \u0026quot;cluster.service_google.update_no_rebuild\u0026quot; skywalking_1 | type: COUNTER skywalking_1 | metric { skywalking_1 | counter { skywalking_1 | value: 1.0 skywalking_1 | } skywalking_1 | timestamp_ms: 1627046729718 skywalking_1 | } ...... skywalking_1 | } ... $ # To tear down: $ make down ","excerpt":"Sending Envoy Metrics to SkyWalking OAP Server Example This is an example of sending Envoy Stats to …","ref":"/docs/main/v9.3.0/en/setup/envoy/examples/metrics/readme/","title":"Sending Envoy Metrics to SkyWalking OAP Server Example"},{"body":"Sending Envoy Metrics to SkyWalking OAP Server Example This is an example of sending Envoy Stats to the SkyWalking OAP server through Metric Service v2 and v3.\nRunning the example The example requires docker and docker-compose to be installed in your local system. It fetches images from Docker Hub.\nNote that in this setup, we override the log4j2.xml config to set the org.apache.skywalking.oap.server.receiver.envoy logger level to DEBUG. This enables us to see the messages sent by Envoy to the SkyWalking OAP server.\nYou can also find the Envoy Metric Service V3 API example in docker-compose-envoy-v3-api.yaml\n$ make up $ docker-compose logs -f skywalking $ # Please wait for a moment until SkyWalking is ready and Envoy starts sending the stats. You will see similar messages like the following: skywalking_1 | 2021-07-23 13:25:30,683 - org.apache.skywalking.oap.server.receiver.envoy.MetricServiceGRPCHandler -19437 [grpcServerPool-1-thread-2] DEBUG [] - Received msg identifier { skywalking_1 | node { skywalking_1 | id: \u0026quot;ingress\u0026quot; skywalking_1 | cluster: \u0026quot;envoy-proxy\u0026quot; skywalking_1 | metadata { skywalking_1 | fields { skywalking_1 | key: \u0026quot;LABELS\u0026quot; skywalking_1 | value { skywalking_1 | struct_value { skywalking_1 | fields { skywalking_1 | key: \u0026quot;app\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;test-app\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;NAME\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;service-instance-name\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;envoy\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;isawesome\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;skywalking\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;iscool\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | locality { skywalking_1 | region: \u0026quot;ap-southeast-1\u0026quot; skywalking_1 | zone: \u0026quot;zone1\u0026quot; skywalking_1 | sub_zone: \u0026quot;subzone1\u0026quot; skywalking_1 | } skywalking_1 | user_agent_name: \u0026quot;envoy\u0026quot; skywalking_1 | user_agent_build_version { skywalking_1 | version { skywalking_1 | major_number: 1 skywalking_1 | minor_number: 19 skywalking_1 | } skywalking_1 | metadata { skywalking_1 | fields { skywalking_1 | key: \u0026quot;build.type\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;RELEASE\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;revision.sha\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;68fe53a889416fd8570506232052b06f5a531541\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;revision.status\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;Clean\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;ssl.version\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;BoringSSL\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | extensions { skywalking_1 | name: \u0026quot;composite-action\u0026quot; skywalking_1 | category: \u0026quot;envoy.matching.action\u0026quot; skywalking_1 | } ...... skywalking_1 | } skywalking_1 | } skywalking_1 | envoy_metrics { skywalking_1 | name: \u0026quot;cluster.service_google.update_no_rebuild\u0026quot; skywalking_1 | type: COUNTER skywalking_1 | metric { skywalking_1 | counter { skywalking_1 | value: 1.0 skywalking_1 | } skywalking_1 | timestamp_ms: 1627046729718 skywalking_1 | } ...... skywalking_1 | } ... $ # To tear down: $ make down ","excerpt":"Sending Envoy Metrics to SkyWalking OAP Server Example This is an example of sending Envoy Stats to …","ref":"/docs/main/v9.4.0/en/setup/envoy/examples/metrics/readme/","title":"Sending Envoy Metrics to SkyWalking OAP Server Example"},{"body":"Sending Envoy Metrics to SkyWalking OAP Server Example This is an example of sending Envoy Stats to the SkyWalking OAP server through Metric Service v2 and v3.\nRunning the example The example requires docker and docker-compose to be installed in your local system. It fetches images from Docker Hub.\nNote that in this setup, we override the log4j2.xml config to set the org.apache.skywalking.oap.server.receiver.envoy logger level to DEBUG. This enables us to see the messages sent by Envoy to the SkyWalking OAP server.\nYou can also find the Envoy Metric Service V3 API example in docker-compose-envoy-v3-api.yaml\n$ make up $ docker-compose logs -f skywalking $ # Please wait for a moment until SkyWalking is ready and Envoy starts sending the stats. You will see similar messages like the following: skywalking_1 | 2021-07-23 13:25:30,683 - org.apache.skywalking.oap.server.receiver.envoy.MetricServiceGRPCHandler -19437 [grpcServerPool-1-thread-2] DEBUG [] - Received msg identifier { skywalking_1 | node { skywalking_1 | id: \u0026quot;ingress\u0026quot; skywalking_1 | cluster: \u0026quot;envoy-proxy\u0026quot; skywalking_1 | metadata { skywalking_1 | fields { skywalking_1 | key: \u0026quot;LABELS\u0026quot; skywalking_1 | value { skywalking_1 | struct_value { skywalking_1 | fields { skywalking_1 | key: \u0026quot;app\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;test-app\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;NAME\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;service-instance-name\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;envoy\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;isawesome\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;skywalking\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;iscool\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | locality { skywalking_1 | region: \u0026quot;ap-southeast-1\u0026quot; skywalking_1 | zone: \u0026quot;zone1\u0026quot; skywalking_1 | sub_zone: \u0026quot;subzone1\u0026quot; skywalking_1 | } skywalking_1 | user_agent_name: \u0026quot;envoy\u0026quot; skywalking_1 | user_agent_build_version { skywalking_1 | version { skywalking_1 | major_number: 1 skywalking_1 | minor_number: 19 skywalking_1 | } skywalking_1 | metadata { skywalking_1 | fields { skywalking_1 | key: \u0026quot;build.type\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;RELEASE\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;revision.sha\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;68fe53a889416fd8570506232052b06f5a531541\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;revision.status\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;Clean\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;ssl.version\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;BoringSSL\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | extensions { skywalking_1 | name: \u0026quot;composite-action\u0026quot; skywalking_1 | category: \u0026quot;envoy.matching.action\u0026quot; skywalking_1 | } ...... skywalking_1 | } skywalking_1 | } skywalking_1 | envoy_metrics { skywalking_1 | name: \u0026quot;cluster.service_google.update_no_rebuild\u0026quot; skywalking_1 | type: COUNTER skywalking_1 | metric { skywalking_1 | counter { skywalking_1 | value: 1.0 skywalking_1 | } skywalking_1 | timestamp_ms: 1627046729718 skywalking_1 | } ...... skywalking_1 | } ... $ # To tear down: $ make down ","excerpt":"Sending Envoy Metrics to SkyWalking OAP Server Example This is an example of sending Envoy Stats to …","ref":"/docs/main/v9.5.0/en/setup/envoy/examples/metrics/readme/","title":"Sending Envoy Metrics to SkyWalking OAP Server Example"},{"body":"Sending Envoy Metrics to SkyWalking OAP Server Example This is an example of sending Envoy Stats to the SkyWalking OAP server through Metric Service v2 and v3.\nRunning the example The example requires docker and docker-compose to be installed in your local system. It fetches images from Docker Hub.\nNote that in this setup, we override the log4j2.xml config to set the org.apache.skywalking.oap.server.receiver.envoy logger level to DEBUG. This enables us to see the messages sent by Envoy to the SkyWalking OAP server.\nYou can also find the Envoy Metric Service V3 API example in docker-compose-envoy-v3-api.yaml\n$ make up $ docker-compose logs -f skywalking $ # Please wait for a moment until SkyWalking is ready and Envoy starts sending the stats. You will see similar messages like the following: skywalking_1 | 2021-07-23 13:25:30,683 - org.apache.skywalking.oap.server.receiver.envoy.MetricServiceGRPCHandler -19437 [grpcServerPool-1-thread-2] DEBUG [] - Received msg identifier { skywalking_1 | node { skywalking_1 | id: \u0026quot;ingress\u0026quot; skywalking_1 | cluster: \u0026quot;envoy-proxy\u0026quot; skywalking_1 | metadata { skywalking_1 | fields { skywalking_1 | key: \u0026quot;LABELS\u0026quot; skywalking_1 | value { skywalking_1 | struct_value { skywalking_1 | fields { skywalking_1 | key: \u0026quot;app\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;test-app\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;NAME\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;service-instance-name\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;envoy\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;isawesome\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;skywalking\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;iscool\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | locality { skywalking_1 | region: \u0026quot;ap-southeast-1\u0026quot; skywalking_1 | zone: \u0026quot;zone1\u0026quot; skywalking_1 | sub_zone: \u0026quot;subzone1\u0026quot; skywalking_1 | } skywalking_1 | user_agent_name: \u0026quot;envoy\u0026quot; skywalking_1 | user_agent_build_version { skywalking_1 | version { skywalking_1 | major_number: 1 skywalking_1 | minor_number: 19 skywalking_1 | } skywalking_1 | metadata { skywalking_1 | fields { skywalking_1 | key: \u0026quot;build.type\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;RELEASE\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;revision.sha\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;68fe53a889416fd8570506232052b06f5a531541\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;revision.status\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;Clean\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;ssl.version\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;BoringSSL\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | extensions { skywalking_1 | name: \u0026quot;composite-action\u0026quot; skywalking_1 | category: \u0026quot;envoy.matching.action\u0026quot; skywalking_1 | } ...... skywalking_1 | } skywalking_1 | } skywalking_1 | envoy_metrics { skywalking_1 | name: \u0026quot;cluster.service_google.update_no_rebuild\u0026quot; skywalking_1 | type: COUNTER skywalking_1 | metric { skywalking_1 | counter { skywalking_1 | value: 1.0 skywalking_1 | } skywalking_1 | timestamp_ms: 1627046729718 skywalking_1 | } ...... skywalking_1 | } ... $ # To tear down: $ make down ","excerpt":"Sending Envoy Metrics to SkyWalking OAP Server Example This is an example of sending Envoy Stats to …","ref":"/docs/main/v9.6.0/en/setup/envoy/examples/metrics/readme/","title":"Sending Envoy Metrics to SkyWalking OAP Server Example"},{"body":"Sending Envoy Metrics to SkyWalking OAP Server Example This is an example of sending Envoy Stats to the SkyWalking OAP server through Metric Service v2 and v3.\nRunning the example The example requires docker and docker-compose to be installed in your local system. It fetches images from Docker Hub.\nNote that in this setup, we override the log4j2.xml config to set the org.apache.skywalking.oap.server.receiver.envoy logger level to DEBUG. This enables us to see the messages sent by Envoy to the SkyWalking OAP server.\nYou can also find the Envoy Metric Service V3 API example in docker-compose-envoy-v3-api.yaml\n$ make up $ docker-compose logs -f skywalking $ # Please wait for a moment until SkyWalking is ready and Envoy starts sending the stats. You will see similar messages like the following: skywalking_1 | 2021-07-23 13:25:30,683 - org.apache.skywalking.oap.server.receiver.envoy.MetricServiceGRPCHandler -19437 [grpcServerPool-1-thread-2] DEBUG [] - Received msg identifier { skywalking_1 | node { skywalking_1 | id: \u0026quot;ingress\u0026quot; skywalking_1 | cluster: \u0026quot;envoy-proxy\u0026quot; skywalking_1 | metadata { skywalking_1 | fields { skywalking_1 | key: \u0026quot;LABELS\u0026quot; skywalking_1 | value { skywalking_1 | struct_value { skywalking_1 | fields { skywalking_1 | key: \u0026quot;app\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;test-app\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;NAME\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;service-instance-name\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;envoy\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;isawesome\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;skywalking\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;iscool\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | locality { skywalking_1 | region: \u0026quot;ap-southeast-1\u0026quot; skywalking_1 | zone: \u0026quot;zone1\u0026quot; skywalking_1 | sub_zone: \u0026quot;subzone1\u0026quot; skywalking_1 | } skywalking_1 | user_agent_name: \u0026quot;envoy\u0026quot; skywalking_1 | user_agent_build_version { skywalking_1 | version { skywalking_1 | major_number: 1 skywalking_1 | minor_number: 19 skywalking_1 | } skywalking_1 | metadata { skywalking_1 | fields { skywalking_1 | key: \u0026quot;build.type\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;RELEASE\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;revision.sha\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;68fe53a889416fd8570506232052b06f5a531541\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;revision.status\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;Clean\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;ssl.version\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;BoringSSL\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | extensions { skywalking_1 | name: \u0026quot;composite-action\u0026quot; skywalking_1 | category: \u0026quot;envoy.matching.action\u0026quot; skywalking_1 | } ...... skywalking_1 | } skywalking_1 | } skywalking_1 | envoy_metrics { skywalking_1 | name: \u0026quot;cluster.service_google.update_no_rebuild\u0026quot; skywalking_1 | type: COUNTER skywalking_1 | metric { skywalking_1 | counter { skywalking_1 | value: 1.0 skywalking_1 | } skywalking_1 | timestamp_ms: 1627046729718 skywalking_1 | } ...... skywalking_1 | } ... $ # To tear down: $ make down ","excerpt":"Sending Envoy Metrics to SkyWalking OAP Server Example This is an example of sending Envoy Stats to …","ref":"/docs/main/v9.7.0/en/setup/envoy/examples/metrics/readme/","title":"Sending Envoy Metrics to SkyWalking OAP Server Example"},{"body":"Server Agents Server agents in various languages provide auto-instrumentation or/and manual-instrumentation(APIs-based) mechanisms to integrate with target services. They support collecting traces, logs, metrics, and events using SkyWalking\u0026rsquo;s native format and maximize the analysis capabilities of the SkyWalking OAP server.\nInstalling language agents in services   Java agent. Learn how to install the Java agent in your service without affecting your code.\n  LUA agent. Learn how to install the Lua agent in Nginx + LUA module or OpenResty.\n  Kong agent. Learn how to install the Lua agent in Kong.\n  Python Agent. Learn how to install the Python Agent in a Python service without affecting your code.\n  Node.js agent. Learn how to install the NodeJS Agent in a NodeJS service.\n  Rust agent. Learn how to integrate the Rust agent with a rust service.\n  PHP agent. Learn how to install the PHP agent in your service without affecting your code.\n  Go agent. Learn how to integrate the Go agent with a golang service.\n  The following agents and SDKs are compatible with SkyWalking\u0026rsquo;s data formats and network protocols but are maintained by third parties. See their project repositories for guides and releases.\n  SkyAPM .NET Core agent. See .NET Core agent project documentation for more details.\n  SkyAPM C++ SDK. See cpp2sky project documentation for more details.\n  ","excerpt":"Server Agents Server agents in various languages provide auto-instrumentation or/and …","ref":"/docs/main/latest/en/setup/service-agent/server-agents/","title":"Server Agents"},{"body":"Server Agents Server agents in various languages provide auto-instrumentation or/and manual-instrumentation(APIs-based) mechanisms to integrate with target services. They support collecting traces, logs, metrics, and events using SkyWalking\u0026rsquo;s native format and maximize the analysis capabilities of the SkyWalking OAP server.\nInstalling language agents in services   Java agent. Learn how to install the Java agent in your service without affecting your code.\n  LUA agent. Learn how to install the Lua agent in Nginx + LUA module or OpenResty.\n  Kong agent. Learn how to install the Lua agent in Kong.\n  Python Agent. Learn how to install the Python Agent in a Python service without affecting your code.\n  Node.js agent. Learn how to install the NodeJS Agent in a NodeJS service.\n  Rust agent. Learn how to integrate the Rust agent with a rust service.\n  PHP agent. Learn how to install the PHP agent in your service without affecting your code.\n  Go agent. Learn how to integrate the Go agent with a golang service.\n  The following agents and SDKs are compatible with SkyWalking\u0026rsquo;s data formats and network protocols but are maintained by third parties. See their project repositories for guides and releases.\n  SkyAPM .NET Core agent. See .NET Core agent project documentation for more details.\n  SkyAPM C++ SDK. See cpp2sky project documentation for more details.\n  ","excerpt":"Server Agents Server agents in various languages provide auto-instrumentation or/and …","ref":"/docs/main/next/en/setup/service-agent/server-agents/","title":"Server Agents"},{"body":"Server Agents Server agents in various languages provide auto-instrumentation or/and manual-instrumentation(APIs-based) mechanism to integrate with target services. They support collecting traces, logs, metrics and events by using SkyWalking\u0026rsquo;s native format, and maximum the analysis capabilities of SkyWalking OAP server.\nInstalling language agents in services   Java agent. Learn how to install the Java agent in your service without affecting your code.\n  LUA agent. Learn how to install the Lua agent in Nginx + LUA module or OpenResty.\n  Kong agent. Learn how to install the Lua agent in Kong.\n  Python Agent. Learn how to install the Python Agent in a Python service.\n  Node.js agent. Learn how to install the NodeJS Agent in a NodeJS service.\n  Rust agent. Learn how to integrate the rust agent in a rust service.\n  The following agents and SDKs are compatible with SkyWalking\u0026rsquo;s data formats and network protocols, but are maintained by third parties. See their project repositories for guides and releases.\n  SkyAPM .NET Core agent. See .NET Core agent project document for more details.\n  SkyAPM PHP agent. See PHP agent project document for more details.\n  SkyAPM Go SDK. See go2sky project document for more details.\n  SkyAPM C++ SDK. See cpp2sky project document for more details.\n  ","excerpt":"Server Agents Server agents in various languages provide auto-instrumentation or/and …","ref":"/docs/main/v9.0.0/en/setup/service-agent/server-agents/","title":"Server Agents"},{"body":"Server Agents Server agents in various languages provide auto-instrumentation or/and manual-instrumentation(APIs-based) mechanisms to integrate with target services. They support collecting traces, logs, metrics, and events using SkyWalking\u0026rsquo;s native format and maximize the analysis capabilities of the SkyWalking OAP server.\nInstalling language agents in services   Java agent. Learn how to install the Java agent in your service without affecting your code.\n  LUA agent. Learn how to install the Lua agent in Nginx + LUA module or OpenResty.\n  Kong agent. Learn how to install the Lua agent in Kong.\n  Python Agent. Learn how to install the Python Agent in a Python service without affecting your code.\n  Node.js agent. Learn how to install the NodeJS Agent in a NodeJS service.\n  Rust agent. Learn how to integrate the Rust agent with a rust service.\n  The following agents and SDKs are compatible with SkyWalking\u0026rsquo;s data formats and network protocols but are maintained by third parties. See their project repositories for guides and releases.\n  SkyAPM .NET Core agent. See .NET Core agent project documentation for more details.\n  SkyAPM PHP agent. See PHP agent project documentation for more details.\n  SkyAPM Go SDK. See go2sky project documentation for more details.\n  SkyAPM C++ SDK. See cpp2sky project documentation for more details.\n  ","excerpt":"Server Agents Server agents in various languages provide auto-instrumentation or/and …","ref":"/docs/main/v9.1.0/en/setup/service-agent/server-agents/","title":"Server Agents"},{"body":"Server Agents Server agents in various languages provide auto-instrumentation or/and manual-instrumentation(APIs-based) mechanisms to integrate with target services. They support collecting traces, logs, metrics, and events using SkyWalking\u0026rsquo;s native format and maximize the analysis capabilities of the SkyWalking OAP server.\nInstalling language agents in services   Java agent. Learn how to install the Java agent in your service without affecting your code.\n  LUA agent. Learn how to install the Lua agent in Nginx + LUA module or OpenResty.\n  Kong agent. Learn how to install the Lua agent in Kong.\n  Python Agent. Learn how to install the Python Agent in a Python service without affecting your code.\n  Node.js agent. Learn how to install the NodeJS Agent in a NodeJS service.\n  Rust agent. Learn how to integrate the Rust agent with a rust service.\n  The following agents and SDKs are compatible with SkyWalking\u0026rsquo;s data formats and network protocols but are maintained by third parties. See their project repositories for guides and releases.\n  SkyAPM .NET Core agent. See .NET Core agent project documentation for more details.\n  SkyAPM PHP agent. See PHP agent project documentation for more details.\n  SkyAPM Go SDK. See go2sky project documentation for more details.\n  SkyAPM C++ SDK. See cpp2sky project documentation for more details.\n  ","excerpt":"Server Agents Server agents in various languages provide auto-instrumentation or/and …","ref":"/docs/main/v9.2.0/en/setup/service-agent/server-agents/","title":"Server Agents"},{"body":"Server Agents Server agents in various languages provide auto-instrumentation or/and manual-instrumentation(APIs-based) mechanisms to integrate with target services. They support collecting traces, logs, metrics, and events using SkyWalking\u0026rsquo;s native format and maximize the analysis capabilities of the SkyWalking OAP server.\nInstalling language agents in services   Java agent. Learn how to install the Java agent in your service without affecting your code.\n  LUA agent. Learn how to install the Lua agent in Nginx + LUA module or OpenResty.\n  Kong agent. Learn how to install the Lua agent in Kong.\n  Python Agent. Learn how to install the Python Agent in a Python service without affecting your code.\n  Node.js agent. Learn how to install the NodeJS Agent in a NodeJS service.\n  Rust agent. Learn how to integrate the Rust agent with a rust service.\n  PHP agent. Learn how to install the PHP agent in your service without affecting your code.\n  The following agents and SDKs are compatible with SkyWalking\u0026rsquo;s data formats and network protocols but are maintained by third parties. See their project repositories for guides and releases.\n  SkyAPM .NET Core agent. See .NET Core agent project documentation for more details.\n  SkyAPM Go SDK. See go2sky project documentation for more details.\n  SkyAPM C++ SDK. See cpp2sky project documentation for more details.\n  ","excerpt":"Server Agents Server agents in various languages provide auto-instrumentation or/and …","ref":"/docs/main/v9.3.0/en/setup/service-agent/server-agents/","title":"Server Agents"},{"body":"Server Agents Server agents in various languages provide auto-instrumentation or/and manual-instrumentation(APIs-based) mechanisms to integrate with target services. They support collecting traces, logs, metrics, and events using SkyWalking\u0026rsquo;s native format and maximize the analysis capabilities of the SkyWalking OAP server.\nInstalling language agents in services   Java agent. Learn how to install the Java agent in your service without affecting your code.\n  LUA agent. Learn how to install the Lua agent in Nginx + LUA module or OpenResty.\n  Kong agent. Learn how to install the Lua agent in Kong.\n  Python Agent. Learn how to install the Python Agent in a Python service without affecting your code.\n  Node.js agent. Learn how to install the NodeJS Agent in a NodeJS service.\n  Rust agent. Learn how to integrate the Rust agent with a rust service.\n  PHP agent. Learn how to install the PHP agent in your service without affecting your code.\n  The following agents and SDKs are compatible with SkyWalking\u0026rsquo;s data formats and network protocols but are maintained by third parties. See their project repositories for guides and releases.\n  SkyAPM .NET Core agent. See .NET Core agent project documentation for more details.\n  SkyAPM Go SDK. See go2sky project documentation for more details.\n  SkyAPM C++ SDK. See cpp2sky project documentation for more details.\n  ","excerpt":"Server Agents Server agents in various languages provide auto-instrumentation or/and …","ref":"/docs/main/v9.4.0/en/setup/service-agent/server-agents/","title":"Server Agents"},{"body":"Server Agents Server agents in various languages provide auto-instrumentation or/and manual-instrumentation(APIs-based) mechanisms to integrate with target services. They support collecting traces, logs, metrics, and events using SkyWalking\u0026rsquo;s native format and maximize the analysis capabilities of the SkyWalking OAP server.\nInstalling language agents in services   Java agent. Learn how to install the Java agent in your service without affecting your code.\n  LUA agent. Learn how to install the Lua agent in Nginx + LUA module or OpenResty.\n  Kong agent. Learn how to install the Lua agent in Kong.\n  Python Agent. Learn how to install the Python Agent in a Python service without affecting your code.\n  Node.js agent. Learn how to install the NodeJS Agent in a NodeJS service.\n  Rust agent. Learn how to integrate the Rust agent with a rust service.\n  PHP agent. Learn how to install the PHP agent in your service without affecting your code.\n  The following agents and SDKs are compatible with SkyWalking\u0026rsquo;s data formats and network protocols but are maintained by third parties. See their project repositories for guides and releases.\n  SkyAPM .NET Core agent. See .NET Core agent project documentation for more details.\n  SkyAPM Go SDK. See go2sky project documentation for more details.\n  SkyAPM C++ SDK. See cpp2sky project documentation for more details.\n  ","excerpt":"Server Agents Server agents in various languages provide auto-instrumentation or/and …","ref":"/docs/main/v9.5.0/en/setup/service-agent/server-agents/","title":"Server Agents"},{"body":"Server Agents Server agents in various languages provide auto-instrumentation or/and manual-instrumentation(APIs-based) mechanisms to integrate with target services. They support collecting traces, logs, metrics, and events using SkyWalking\u0026rsquo;s native format and maximize the analysis capabilities of the SkyWalking OAP server.\nInstalling language agents in services   Java agent. Learn how to install the Java agent in your service without affecting your code.\n  LUA agent. Learn how to install the Lua agent in Nginx + LUA module or OpenResty.\n  Kong agent. Learn how to install the Lua agent in Kong.\n  Python Agent. Learn how to install the Python Agent in a Python service without affecting your code.\n  Node.js agent. Learn how to install the NodeJS Agent in a NodeJS service.\n  Rust agent. Learn how to integrate the Rust agent with a rust service.\n  PHP agent. Learn how to install the PHP agent in your service without affecting your code.\n  Go agent. Learn how to integrate the Go agent with a golang service.\n  The following agents and SDKs are compatible with SkyWalking\u0026rsquo;s data formats and network protocols but are maintained by third parties. See their project repositories for guides and releases.\n  SkyAPM .NET Core agent. See .NET Core agent project documentation for more details.\n  SkyAPM C++ SDK. See cpp2sky project documentation for more details.\n  ","excerpt":"Server Agents Server agents in various languages provide auto-instrumentation or/and …","ref":"/docs/main/v9.6.0/en/setup/service-agent/server-agents/","title":"Server Agents"},{"body":"Server Agents Server agents in various languages provide auto-instrumentation or/and manual-instrumentation(APIs-based) mechanisms to integrate with target services. They support collecting traces, logs, metrics, and events using SkyWalking\u0026rsquo;s native format and maximize the analysis capabilities of the SkyWalking OAP server.\nInstalling language agents in services   Java agent. Learn how to install the Java agent in your service without affecting your code.\n  LUA agent. Learn how to install the Lua agent in Nginx + LUA module or OpenResty.\n  Kong agent. Learn how to install the Lua agent in Kong.\n  Python Agent. Learn how to install the Python Agent in a Python service without affecting your code.\n  Node.js agent. Learn how to install the NodeJS Agent in a NodeJS service.\n  Rust agent. Learn how to integrate the Rust agent with a rust service.\n  PHP agent. Learn how to install the PHP agent in your service without affecting your code.\n  Go agent. Learn how to integrate the Go agent with a golang service.\n  The following agents and SDKs are compatible with SkyWalking\u0026rsquo;s data formats and network protocols but are maintained by third parties. See their project repositories for guides and releases.\n  SkyAPM .NET Core agent. See .NET Core agent project documentation for more details.\n  SkyAPM C++ SDK. See cpp2sky project documentation for more details.\n  ","excerpt":"Server Agents Server agents in various languages provide auto-instrumentation or/and …","ref":"/docs/main/v9.7.0/en/setup/service-agent/server-agents/","title":"Server Agents"},{"body":"Server/grpc-server Description This is a sharing plugin, which would start a gRPC server.\nDefaultConfig # The address of grpc server. Default value is :11800address::11800# The network of grpc. Default value is :tcpnetwork:tcp# The max size of receiving log. Default value is 2M. The unit is Byte.max_recv_msg_size:2097152# The max concurrent stream channels.max_concurrent_streams:32# The TLS cert file path.tls_cert_file:\u0026#34;\u0026#34;# The TLS key file path.tls_key_file:\u0026#34;\u0026#34;# To Accept Connection Limiter when reach the resourceaccept_limit:# The max CPU utilization limitcpu_utilization:75# The max connection countconnection_count:4000Configuration    Name Type Description     address string The address of grpc server.   network string The network of grpc.   max_recv_msg_size int The max size of the received log.   max_concurrent_streams uint32 The max concurrent stream channels.   tls_cert_file string The TLS cert file path.   tls_key_file string The TLS key file path.   accept_limit grpc.AcceptConnectionConfig To Accept Connection Limiter when reach the resource    ","excerpt":"Server/grpc-server Description This is a sharing plugin, which would start a gRPC server. …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/server_grpc-server/","title":"Server/grpc-server"},{"body":"Server/grpc-server Description This is a sharing plugin, which would start a gRPC server.\nDefaultConfig # The address of grpc server. Default value is :11800address::11800# The network of grpc. Default value is :tcpnetwork:tcp# The max size of receiving log. Default value is 2M. The unit is Byte.max_recv_msg_size:2097152# The max concurrent stream channels.max_concurrent_streams:32# The TLS cert file path.tls_cert_file:\u0026#34;\u0026#34;# The TLS key file path.tls_key_file:\u0026#34;\u0026#34;# To Accept Connection Limiter when reach the resourceaccept_limit:# The max CPU utilization limitcpu_utilization:75# The max connection countconnection_count:4000Configuration    Name Type Description     address string The address of grpc server.   network string The network of grpc.   max_recv_msg_size int The max size of the received log.   max_concurrent_streams uint32 The max concurrent stream channels.   tls_cert_file string The TLS cert file path.   tls_key_file string The TLS key file path.   accept_limit grpc.AcceptConnectionConfig To Accept Connection Limiter when reach the resource    ","excerpt":"Server/grpc-server Description This is a sharing plugin, which would start a gRPC server. …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/server_grpc-server/","title":"Server/grpc-server"},{"body":"Server/grpc-server Description This is a sharing plugin, which would start a gRPC server.\nDefaultConfig # The address of grpc server. Default value is :11800address::11800# The network of grpc. Default value is :tcpnetwork:tcp# The max size of receiving log. Default value is 2M. The unit is Byte.max_recv_msg_size:2097152# The max concurrent stream channels.max_concurrent_streams:32# The TLS cert file path.tls_cert_file:\u0026#34;\u0026#34;# The TLS key file path.tls_key_file:\u0026#34;\u0026#34;# To Accept Connection Limiter when reach the resourceaccept_limit:# The max CPU utilization limitcpu_utilization:75# The max connection countconnection_count:4000Configuration    Name Type Description     address string The address of grpc server.   network string The network of grpc.   max_recv_msg_size int The max size of the received log.   max_concurrent_streams uint32 The max concurrent stream channels.   tls_cert_file string The TLS cert file path.   tls_key_file string The TLS key file path.   accept_limit grpc.AcceptConnectionConfig To Accept Connection Limiter when reach the resource    ","excerpt":"Server/grpc-server Description This is a sharing plugin, which would start a gRPC server. …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/server_grpc-server/","title":"Server/grpc-server"},{"body":"Server/http-server Description This is a sharing plugin, which would start a http server.\nDefaultConfig # The http server address.address:\u0026#34;:12800\u0026#34;Configuration    Name Type Description     address string     ","excerpt":"Server/http-server Description This is a sharing plugin, which would start a http server. …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/server_http-server/","title":"Server/http-server"},{"body":"Server/http-server Description This is a sharing plugin, which would start a http server.\nDefaultConfig # The http server address.address:\u0026#34;:12800\u0026#34;Configuration    Name Type Description     address string     ","excerpt":"Server/http-server Description This is a sharing plugin, which would start a http server. …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/server_http-server/","title":"Server/http-server"},{"body":"Server/http-server Description This is a sharing plugin, which would start a http server.\nDefaultConfig # The http server address.address:\u0026#34;:12800\u0026#34;Configuration    Name Type Description     address string     ","excerpt":"Server/http-server Description This is a sharing plugin, which would start a http server. …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/server_http-server/","title":"Server/http-server"},{"body":"Service Auto Grouping SkyWalking supports various default and customized dashboard templates. Each template provides an appropriate layout for services in a particular field. For example, the metrics for services with language agents installed may be different from that of services detected by the service mesh observability solution as well as SkyWalking\u0026rsquo;s self-observability metrics dashboard.\nTherefore, since version 8.3.0, the SkyWalking OAP has generated the groups based on this simple naming format:\n${service name} = [${group name}::]${logic name} If the service name includes double colons (::), the literal string before the colons is taken as the group name. In the latest GraphQL query, the group name has been provided as an optional parameter.\n getAllServices(duration: Duration!, group: String): [Service!]!\n RocketBot UI dashboards (Standard type) support the group name for default and custom configurations.\n","excerpt":"Service Auto Grouping SkyWalking supports various default and customized dashboard templates. Each …","ref":"/docs/main/latest/en/setup/backend/service-auto-grouping/","title":"Service Auto Grouping"},{"body":"Service Auto Grouping SkyWalking supports various default and customized dashboard templates. Each template provides an appropriate layout for services in a particular field. For example, the metrics for services with language agents installed may be different from that of services detected by the service mesh observability solution as well as SkyWalking\u0026rsquo;s self-observability metrics dashboard.\nTherefore, since version 8.3.0, the SkyWalking OAP has generated the groups based on this simple naming format:\n${service name} = [${group name}::]${logic name} If the service name includes double colons (::), the literal string before the colons is taken as the group name. In the latest GraphQL query, the group name has been provided as an optional parameter.\n getAllServices(duration: Duration!, group: String): [Service!]!\n RocketBot UI dashboards (Standard type) support the group name for default and custom configurations.\n","excerpt":"Service Auto Grouping SkyWalking supports various default and customized dashboard templates. Each …","ref":"/docs/main/next/en/setup/backend/service-auto-grouping/","title":"Service Auto Grouping"},{"body":"Service Auto Grouping SkyWalking supports various default and customized dashboard templates. Each template provides an appropriate layout for services in a particular field. For example, the metrics for services with language agents installed may be different from that of services detected by the service mesh observability solution as well as SkyWalking\u0026rsquo;s self-observability metrics dashboard.\nTherefore, since version 8.3.0, the SkyWalking OAP has generated the groups based on this simple naming format:\n${service name} = [${group name}::]${logic name} If the service name includes double colons (::), the literal string before the colons is taken as the group name. In the latest GraphQL query, the group name has been provided as an option parameter.\n getAllServices(duration: Duration!, group: String): [Service!]!\n RocketBot UI dashboards (Standard type) support the group name for default and custom configurations.\n","excerpt":"Service Auto Grouping SkyWalking supports various default and customized dashboard templates. Each …","ref":"/docs/main/v9.0.0/en/setup/backend/service-auto-grouping/","title":"Service Auto Grouping"},{"body":"Service Auto Grouping SkyWalking supports various default and customized dashboard templates. Each template provides an appropriate layout for services in a particular field. For example, the metrics for services with language agents installed may be different from that of services detected by the service mesh observability solution as well as SkyWalking\u0026rsquo;s self-observability metrics dashboard.\nTherefore, since version 8.3.0, the SkyWalking OAP has generated the groups based on this simple naming format:\n${service name} = [${group name}::]${logic name} If the service name includes double colons (::), the literal string before the colons is taken as the group name. In the latest GraphQL query, the group name has been provided as an optional parameter.\n getAllServices(duration: Duration!, group: String): [Service!]!\n RocketBot UI dashboards (Standard type) support the group name for default and custom configurations.\n","excerpt":"Service Auto Grouping SkyWalking supports various default and customized dashboard templates. Each …","ref":"/docs/main/v9.1.0/en/setup/backend/service-auto-grouping/","title":"Service Auto Grouping"},{"body":"Service Auto Grouping SkyWalking supports various default and customized dashboard templates. Each template provides an appropriate layout for services in a particular field. For example, the metrics for services with language agents installed may be different from that of services detected by the service mesh observability solution as well as SkyWalking\u0026rsquo;s self-observability metrics dashboard.\nTherefore, since version 8.3.0, the SkyWalking OAP has generated the groups based on this simple naming format:\n${service name} = [${group name}::]${logic name} If the service name includes double colons (::), the literal string before the colons is taken as the group name. In the latest GraphQL query, the group name has been provided as an optional parameter.\n getAllServices(duration: Duration!, group: String): [Service!]!\n RocketBot UI dashboards (Standard type) support the group name for default and custom configurations.\n","excerpt":"Service Auto Grouping SkyWalking supports various default and customized dashboard templates. Each …","ref":"/docs/main/v9.2.0/en/setup/backend/service-auto-grouping/","title":"Service Auto Grouping"},{"body":"Service Auto Grouping SkyWalking supports various default and customized dashboard templates. Each template provides an appropriate layout for services in a particular field. For example, the metrics for services with language agents installed may be different from that of services detected by the service mesh observability solution as well as SkyWalking\u0026rsquo;s self-observability metrics dashboard.\nTherefore, since version 8.3.0, the SkyWalking OAP has generated the groups based on this simple naming format:\n${service name} = [${group name}::]${logic name} If the service name includes double colons (::), the literal string before the colons is taken as the group name. In the latest GraphQL query, the group name has been provided as an optional parameter.\n getAllServices(duration: Duration!, group: String): [Service!]!\n RocketBot UI dashboards (Standard type) support the group name for default and custom configurations.\n","excerpt":"Service Auto Grouping SkyWalking supports various default and customized dashboard templates. Each …","ref":"/docs/main/v9.3.0/en/setup/backend/service-auto-grouping/","title":"Service Auto Grouping"},{"body":"Service Auto Grouping SkyWalking supports various default and customized dashboard templates. Each template provides an appropriate layout for services in a particular field. For example, the metrics for services with language agents installed may be different from that of services detected by the service mesh observability solution as well as SkyWalking\u0026rsquo;s self-observability metrics dashboard.\nTherefore, since version 8.3.0, the SkyWalking OAP has generated the groups based on this simple naming format:\n${service name} = [${group name}::]${logic name} If the service name includes double colons (::), the literal string before the colons is taken as the group name. In the latest GraphQL query, the group name has been provided as an optional parameter.\n getAllServices(duration: Duration!, group: String): [Service!]!\n RocketBot UI dashboards (Standard type) support the group name for default and custom configurations.\n","excerpt":"Service Auto Grouping SkyWalking supports various default and customized dashboard templates. Each …","ref":"/docs/main/v9.4.0/en/setup/backend/service-auto-grouping/","title":"Service Auto Grouping"},{"body":"Service Auto Grouping SkyWalking supports various default and customized dashboard templates. Each template provides an appropriate layout for services in a particular field. For example, the metrics for services with language agents installed may be different from that of services detected by the service mesh observability solution as well as SkyWalking\u0026rsquo;s self-observability metrics dashboard.\nTherefore, since version 8.3.0, the SkyWalking OAP has generated the groups based on this simple naming format:\n${service name} = [${group name}::]${logic name} If the service name includes double colons (::), the literal string before the colons is taken as the group name. In the latest GraphQL query, the group name has been provided as an optional parameter.\n getAllServices(duration: Duration!, group: String): [Service!]!\n RocketBot UI dashboards (Standard type) support the group name for default and custom configurations.\n","excerpt":"Service Auto Grouping SkyWalking supports various default and customized dashboard templates. Each …","ref":"/docs/main/v9.5.0/en/setup/backend/service-auto-grouping/","title":"Service Auto Grouping"},{"body":"Service Auto Grouping SkyWalking supports various default and customized dashboard templates. Each template provides an appropriate layout for services in a particular field. For example, the metrics for services with language agents installed may be different from that of services detected by the service mesh observability solution as well as SkyWalking\u0026rsquo;s self-observability metrics dashboard.\nTherefore, since version 8.3.0, the SkyWalking OAP has generated the groups based on this simple naming format:\n${service name} = [${group name}::]${logic name} If the service name includes double colons (::), the literal string before the colons is taken as the group name. In the latest GraphQL query, the group name has been provided as an optional parameter.\n getAllServices(duration: Duration!, group: String): [Service!]!\n RocketBot UI dashboards (Standard type) support the group name for default and custom configurations.\n","excerpt":"Service Auto Grouping SkyWalking supports various default and customized dashboard templates. Each …","ref":"/docs/main/v9.6.0/en/setup/backend/service-auto-grouping/","title":"Service Auto Grouping"},{"body":"Service Auto Grouping SkyWalking supports various default and customized dashboard templates. Each template provides an appropriate layout for services in a particular field. For example, the metrics for services with language agents installed may be different from that of services detected by the service mesh observability solution as well as SkyWalking\u0026rsquo;s self-observability metrics dashboard.\nTherefore, since version 8.3.0, the SkyWalking OAP has generated the groups based on this simple naming format:\n${service name} = [${group name}::]${logic name} If the service name includes double colons (::), the literal string before the colons is taken as the group name. In the latest GraphQL query, the group name has been provided as an optional parameter.\n getAllServices(duration: Duration!, group: String): [Service!]!\n RocketBot UI dashboards (Standard type) support the group name for default and custom configurations.\n","excerpt":"Service Auto Grouping SkyWalking supports various default and customized dashboard templates. Each …","ref":"/docs/main/v9.7.0/en/setup/backend/service-auto-grouping/","title":"Service Auto Grouping"},{"body":"Service Auto Instrument Agent The service auto instrument agent is a subset of language-based native agents. This kind of agents is based on some language-specific features, especially those of a VM-based language.\nWhat does Auto Instrument mean? Many users learned about these agents when they first heard that \u0026ldquo;Not a single line of code has to be changed\u0026rdquo;. SkyWalking used to mention this in its readme page as well. However, this does not reflect the full picture. For end users, it is true that they no longer have to modify their codes in most cases. But it is important to understand that the codes are in fact still modified by the agent, which is usually known as \u0026ldquo;runtime code manipulation\u0026rdquo;. The underlying logic is that the auto instrument agent uses the VM interface for code modification to dynamically add in the instrument code, such as modifying the class in Java through javaagent premain.\nIn fact, although the SkyWalking team has mentioned that most auto instrument agents are VM-based, you may build such tools during compiling time rather than runtime.\nWhat are the limitations? Auto instrument is very helpful, as you may perform auto instrument during compiling time, without having to depend on VM features. But there are also certain limitations that come with it:\n  Higher possibility of in-process propagation in many cases. Many high-level languages, such as Java and .NET, are used for building business systems. Most business logic codes run in the same thread for each request, which causes propagation to be based on thread ID, in order for the stack module to make sure that the context is safe.\n  Only works in certain frameworks or libraries. Since the agents are responsible for modifying the codes during runtime, the codes are already known to the agent plugin developers. There is usually a list of frameworks or libraries supported by this kind of probes. For example, see the SkyWalking Java agent supported list.\n  Cross-thread operations are not always supported. Like what is mentioned above regarding in-process propagation, most codes (especially business codes) run in a single thread per request. But in some other cases, they operate across different threads, such as assigning tasks to other threads, task pools or batch processes. Some languages may even provide coroutine or similar components like Goroutine, which allows developers to run async process with low payload. In such cases, auto instrument will face problems.\n  So, there\u0026rsquo;s nothing mysterious about auto instrument. In short, agent developers write an activation script to make instrument codes work for you. That\u0026rsquo;s it!\nWhat is next? If you want to learn about manual instrument libs in SkyWalking, see the Manual instrument SDK section.\n","excerpt":"Service Auto Instrument Agent The service auto instrument agent is a subset of language-based native …","ref":"/docs/main/latest/en/concepts-and-designs/service-agent/","title":"Service Auto Instrument Agent"},{"body":"Service Auto Instrument Agent The service auto instrument agent is a subset of language-based native agents. This kind of agents is based on some language-specific features, especially those of a VM-based language.\nWhat does Auto Instrument mean? Many users learned about these agents when they first heard that \u0026ldquo;Not a single line of code has to be changed\u0026rdquo;. SkyWalking used to mention this in its readme page as well. However, this does not reflect the full picture. For end users, it is true that they no longer have to modify their codes in most cases. But it is important to understand that the codes are in fact still modified by the agent, which is usually known as \u0026ldquo;runtime code manipulation\u0026rdquo;. The underlying logic is that the auto instrument agent uses the VM interface for code modification to dynamically add in the instrument code, such as modifying the class in Java through javaagent premain.\nIn fact, although the SkyWalking team has mentioned that most auto instrument agents are VM-based, you may build such tools during compiling time rather than runtime.\nWhat are the limitations? Auto instrument is very helpful, as you may perform auto instrument during compiling time, without having to depend on VM features. But there are also certain limitations that come with it:\n  Higher possibility of in-process propagation in many cases. Many high-level languages, such as Java and .NET, are used for building business systems. Most business logic codes run in the same thread for each request, which causes propagation to be based on thread ID, in order for the stack module to make sure that the context is safe.\n  Only works in certain frameworks or libraries. Since the agents are responsible for modifying the codes during runtime, the codes are already known to the agent plugin developers. There is usually a list of frameworks or libraries supported by this kind of probes. For example, see the SkyWalking Java agent supported list.\n  Cross-thread operations are not always supported. Like what is mentioned above regarding in-process propagation, most codes (especially business codes) run in a single thread per request. But in some other cases, they operate across different threads, such as assigning tasks to other threads, task pools or batch processes. Some languages may even provide coroutine or similar components like Goroutine, which allows developers to run async process with low payload. In such cases, auto instrument will face problems.\n  So, there\u0026rsquo;s nothing mysterious about auto instrument. In short, agent developers write an activation script to make instrument codes work for you. That\u0026rsquo;s it!\nWhat is next? If you want to learn about manual instrument libs in SkyWalking, see the Manual instrument SDK section.\n","excerpt":"Service Auto Instrument Agent The service auto instrument agent is a subset of language-based native …","ref":"/docs/main/next/en/concepts-and-designs/service-agent/","title":"Service Auto Instrument Agent"},{"body":"Service Auto Instrument Agent The service auto instrument agent is a subset of language-based native agents. This kind of agents is based on some language-specific features, especially those of a VM-based language.\nWhat does Auto Instrument mean? Many users learned about these agents when they first heard that \u0026ldquo;Not a single line of code has to be changed\u0026rdquo;. SkyWalking used to mention this in its readme page as well. However, this does not reflect the full picture. For end users, it is true that they no longer have to modify their codes in most cases. But it is important to understand that the codes are in fact still modified by the agent, which is usually known as \u0026ldquo;runtime code manipulation\u0026rdquo;. The underlying logic is that the auto instrument agent uses the VM interface for code modification to dynamically add in the instrument code, such as modifying the class in Java through javaagent premain.\nIn fact, although the SkyWalking team has mentioned that most auto instrument agents are VM-based, you may build such tools during compiling time rather than runtime.\nWhat are the limitations? Auto instrument is very helpful, as you may perform auto instrument during compiling time, without having to depend on VM features. But there are also certain limitations that come with it:\n  Higher possibility of in-process propagation in many cases. Many high-level languages, such as Java and .NET, are used for building business systems. Most business logic codes run in the same thread for each request, which causes propagation to be based on thread ID, in order for the stack module to make sure that the context is safe.\n  Only works in certain frameworks or libraries. Since the agents are responsible for modifying the codes during runtime, the codes are already known to the agent plugin developers. There is usually a list of frameworks or libraries supported by this kind of probes. For example, see the SkyWalking Java agent supported list.\n  Cross-thread operations are not always supported. Like what is mentioned above regarding in-process propagation, most codes (especially business codes) run in a single thread per request. But in some other cases, they operate across different threads, such as assigning tasks to other threads, task pools or batch processes. Some languages may even provide coroutine or similar components like Goroutine, which allows developers to run async process with low payload. In such cases, auto instrument will face problems.\n  So, there\u0026rsquo;s nothing mysterious about auto instrument. In short, agent developers write an activation script to make instrument codes work for you. That\u0026rsquo;s it!\nWhat is next? If you want to learn about manual instrument libs in SkyWalking, see the Manual instrument SDK section.\n","excerpt":"Service Auto Instrument Agent The service auto instrument agent is a subset of language-based native …","ref":"/docs/main/v9.0.0/en/concepts-and-designs/service-agent/","title":"Service Auto Instrument Agent"},{"body":"Service Auto Instrument Agent The service auto instrument agent is a subset of language-based native agents. This kind of agents is based on some language-specific features, especially those of a VM-based language.\nWhat does Auto Instrument mean? Many users learned about these agents when they first heard that \u0026ldquo;Not a single line of code has to be changed\u0026rdquo;. SkyWalking used to mention this in its readme page as well. However, this does not reflect the full picture. For end users, it is true that they no longer have to modify their codes in most cases. But it is important to understand that the codes are in fact still modified by the agent, which is usually known as \u0026ldquo;runtime code manipulation\u0026rdquo;. The underlying logic is that the auto instrument agent uses the VM interface for code modification to dynamically add in the instrument code, such as modifying the class in Java through javaagent premain.\nIn fact, although the SkyWalking team has mentioned that most auto instrument agents are VM-based, you may build such tools during compiling time rather than runtime.\nWhat are the limitations? Auto instrument is very helpful, as you may perform auto instrument during compiling time, without having to depend on VM features. But there are also certain limitations that come with it:\n  Higher possibility of in-process propagation in many cases. Many high-level languages, such as Java and .NET, are used for building business systems. Most business logic codes run in the same thread for each request, which causes propagation to be based on thread ID, in order for the stack module to make sure that the context is safe.\n  Only works in certain frameworks or libraries. Since the agents are responsible for modifying the codes during runtime, the codes are already known to the agent plugin developers. There is usually a list of frameworks or libraries supported by this kind of probes. For example, see the SkyWalking Java agent supported list.\n  Cross-thread operations are not always supported. Like what is mentioned above regarding in-process propagation, most codes (especially business codes) run in a single thread per request. But in some other cases, they operate across different threads, such as assigning tasks to other threads, task pools or batch processes. Some languages may even provide coroutine or similar components like Goroutine, which allows developers to run async process with low payload. In such cases, auto instrument will face problems.\n  So, there\u0026rsquo;s nothing mysterious about auto instrument. In short, agent developers write an activation script to make instrument codes work for you. That\u0026rsquo;s it!\nWhat is next? If you want to learn about manual instrument libs in SkyWalking, see the Manual instrument SDK section.\n","excerpt":"Service Auto Instrument Agent The service auto instrument agent is a subset of language-based native …","ref":"/docs/main/v9.1.0/en/concepts-and-designs/service-agent/","title":"Service Auto Instrument Agent"},{"body":"Service Auto Instrument Agent The service auto instrument agent is a subset of language-based native agents. This kind of agents is based on some language-specific features, especially those of a VM-based language.\nWhat does Auto Instrument mean? Many users learned about these agents when they first heard that \u0026ldquo;Not a single line of code has to be changed\u0026rdquo;. SkyWalking used to mention this in its readme page as well. However, this does not reflect the full picture. For end users, it is true that they no longer have to modify their codes in most cases. But it is important to understand that the codes are in fact still modified by the agent, which is usually known as \u0026ldquo;runtime code manipulation\u0026rdquo;. The underlying logic is that the auto instrument agent uses the VM interface for code modification to dynamically add in the instrument code, such as modifying the class in Java through javaagent premain.\nIn fact, although the SkyWalking team has mentioned that most auto instrument agents are VM-based, you may build such tools during compiling time rather than runtime.\nWhat are the limitations? Auto instrument is very helpful, as you may perform auto instrument during compiling time, without having to depend on VM features. But there are also certain limitations that come with it:\n  Higher possibility of in-process propagation in many cases. Many high-level languages, such as Java and .NET, are used for building business systems. Most business logic codes run in the same thread for each request, which causes propagation to be based on thread ID, in order for the stack module to make sure that the context is safe.\n  Only works in certain frameworks or libraries. Since the agents are responsible for modifying the codes during runtime, the codes are already known to the agent plugin developers. There is usually a list of frameworks or libraries supported by this kind of probes. For example, see the SkyWalking Java agent supported list.\n  Cross-thread operations are not always supported. Like what is mentioned above regarding in-process propagation, most codes (especially business codes) run in a single thread per request. But in some other cases, they operate across different threads, such as assigning tasks to other threads, task pools or batch processes. Some languages may even provide coroutine or similar components like Goroutine, which allows developers to run async process with low payload. In such cases, auto instrument will face problems.\n  So, there\u0026rsquo;s nothing mysterious about auto instrument. In short, agent developers write an activation script to make instrument codes work for you. That\u0026rsquo;s it!\nWhat is next? If you want to learn about manual instrument libs in SkyWalking, see the Manual instrument SDK section.\n","excerpt":"Service Auto Instrument Agent The service auto instrument agent is a subset of language-based native …","ref":"/docs/main/v9.2.0/en/concepts-and-designs/service-agent/","title":"Service Auto Instrument Agent"},{"body":"Service Auto Instrument Agent The service auto instrument agent is a subset of language-based native agents. This kind of agents is based on some language-specific features, especially those of a VM-based language.\nWhat does Auto Instrument mean? Many users learned about these agents when they first heard that \u0026ldquo;Not a single line of code has to be changed\u0026rdquo;. SkyWalking used to mention this in its readme page as well. However, this does not reflect the full picture. For end users, it is true that they no longer have to modify their codes in most cases. But it is important to understand that the codes are in fact still modified by the agent, which is usually known as \u0026ldquo;runtime code manipulation\u0026rdquo;. The underlying logic is that the auto instrument agent uses the VM interface for code modification to dynamically add in the instrument code, such as modifying the class in Java through javaagent premain.\nIn fact, although the SkyWalking team has mentioned that most auto instrument agents are VM-based, you may build such tools during compiling time rather than runtime.\nWhat are the limitations? Auto instrument is very helpful, as you may perform auto instrument during compiling time, without having to depend on VM features. But there are also certain limitations that come with it:\n  Higher possibility of in-process propagation in many cases. Many high-level languages, such as Java and .NET, are used for building business systems. Most business logic codes run in the same thread for each request, which causes propagation to be based on thread ID, in order for the stack module to make sure that the context is safe.\n  Only works in certain frameworks or libraries. Since the agents are responsible for modifying the codes during runtime, the codes are already known to the agent plugin developers. There is usually a list of frameworks or libraries supported by this kind of probes. For example, see the SkyWalking Java agent supported list.\n  Cross-thread operations are not always supported. Like what is mentioned above regarding in-process propagation, most codes (especially business codes) run in a single thread per request. But in some other cases, they operate across different threads, such as assigning tasks to other threads, task pools or batch processes. Some languages may even provide coroutine or similar components like Goroutine, which allows developers to run async process with low payload. In such cases, auto instrument will face problems.\n  So, there\u0026rsquo;s nothing mysterious about auto instrument. In short, agent developers write an activation script to make instrument codes work for you. That\u0026rsquo;s it!\nWhat is next? If you want to learn about manual instrument libs in SkyWalking, see the Manual instrument SDK section.\n","excerpt":"Service Auto Instrument Agent The service auto instrument agent is a subset of language-based native …","ref":"/docs/main/v9.3.0/en/concepts-and-designs/service-agent/","title":"Service Auto Instrument Agent"},{"body":"Service Auto Instrument Agent The service auto instrument agent is a subset of language-based native agents. This kind of agents is based on some language-specific features, especially those of a VM-based language.\nWhat does Auto Instrument mean? Many users learned about these agents when they first heard that \u0026ldquo;Not a single line of code has to be changed\u0026rdquo;. SkyWalking used to mention this in its readme page as well. However, this does not reflect the full picture. For end users, it is true that they no longer have to modify their codes in most cases. But it is important to understand that the codes are in fact still modified by the agent, which is usually known as \u0026ldquo;runtime code manipulation\u0026rdquo;. The underlying logic is that the auto instrument agent uses the VM interface for code modification to dynamically add in the instrument code, such as modifying the class in Java through javaagent premain.\nIn fact, although the SkyWalking team has mentioned that most auto instrument agents are VM-based, you may build such tools during compiling time rather than runtime.\nWhat are the limitations? Auto instrument is very helpful, as you may perform auto instrument during compiling time, without having to depend on VM features. But there are also certain limitations that come with it:\n  Higher possibility of in-process propagation in many cases. Many high-level languages, such as Java and .NET, are used for building business systems. Most business logic codes run in the same thread for each request, which causes propagation to be based on thread ID, in order for the stack module to make sure that the context is safe.\n  Only works in certain frameworks or libraries. Since the agents are responsible for modifying the codes during runtime, the codes are already known to the agent plugin developers. There is usually a list of frameworks or libraries supported by this kind of probes. For example, see the SkyWalking Java agent supported list.\n  Cross-thread operations are not always supported. Like what is mentioned above regarding in-process propagation, most codes (especially business codes) run in a single thread per request. But in some other cases, they operate across different threads, such as assigning tasks to other threads, task pools or batch processes. Some languages may even provide coroutine or similar components like Goroutine, which allows developers to run async process with low payload. In such cases, auto instrument will face problems.\n  So, there\u0026rsquo;s nothing mysterious about auto instrument. In short, agent developers write an activation script to make instrument codes work for you. That\u0026rsquo;s it!\nWhat is next? If you want to learn about manual instrument libs in SkyWalking, see the Manual instrument SDK section.\n","excerpt":"Service Auto Instrument Agent The service auto instrument agent is a subset of language-based native …","ref":"/docs/main/v9.4.0/en/concepts-and-designs/service-agent/","title":"Service Auto Instrument Agent"},{"body":"Service Auto Instrument Agent The service auto instrument agent is a subset of language-based native agents. This kind of agents is based on some language-specific features, especially those of a VM-based language.\nWhat does Auto Instrument mean? Many users learned about these agents when they first heard that \u0026ldquo;Not a single line of code has to be changed\u0026rdquo;. SkyWalking used to mention this in its readme page as well. However, this does not reflect the full picture. For end users, it is true that they no longer have to modify their codes in most cases. But it is important to understand that the codes are in fact still modified by the agent, which is usually known as \u0026ldquo;runtime code manipulation\u0026rdquo;. The underlying logic is that the auto instrument agent uses the VM interface for code modification to dynamically add in the instrument code, such as modifying the class in Java through javaagent premain.\nIn fact, although the SkyWalking team has mentioned that most auto instrument agents are VM-based, you may build such tools during compiling time rather than runtime.\nWhat are the limitations? Auto instrument is very helpful, as you may perform auto instrument during compiling time, without having to depend on VM features. But there are also certain limitations that come with it:\n  Higher possibility of in-process propagation in many cases. Many high-level languages, such as Java and .NET, are used for building business systems. Most business logic codes run in the same thread for each request, which causes propagation to be based on thread ID, in order for the stack module to make sure that the context is safe.\n  Only works in certain frameworks or libraries. Since the agents are responsible for modifying the codes during runtime, the codes are already known to the agent plugin developers. There is usually a list of frameworks or libraries supported by this kind of probes. For example, see the SkyWalking Java agent supported list.\n  Cross-thread operations are not always supported. Like what is mentioned above regarding in-process propagation, most codes (especially business codes) run in a single thread per request. But in some other cases, they operate across different threads, such as assigning tasks to other threads, task pools or batch processes. Some languages may even provide coroutine or similar components like Goroutine, which allows developers to run async process with low payload. In such cases, auto instrument will face problems.\n  So, there\u0026rsquo;s nothing mysterious about auto instrument. In short, agent developers write an activation script to make instrument codes work for you. That\u0026rsquo;s it!\nWhat is next? If you want to learn about manual instrument libs in SkyWalking, see the Manual instrument SDK section.\n","excerpt":"Service Auto Instrument Agent The service auto instrument agent is a subset of language-based native …","ref":"/docs/main/v9.5.0/en/concepts-and-designs/service-agent/","title":"Service Auto Instrument Agent"},{"body":"Service Auto Instrument Agent The service auto instrument agent is a subset of language-based native agents. This kind of agents is based on some language-specific features, especially those of a VM-based language.\nWhat does Auto Instrument mean? Many users learned about these agents when they first heard that \u0026ldquo;Not a single line of code has to be changed\u0026rdquo;. SkyWalking used to mention this in its readme page as well. However, this does not reflect the full picture. For end users, it is true that they no longer have to modify their codes in most cases. But it is important to understand that the codes are in fact still modified by the agent, which is usually known as \u0026ldquo;runtime code manipulation\u0026rdquo;. The underlying logic is that the auto instrument agent uses the VM interface for code modification to dynamically add in the instrument code, such as modifying the class in Java through javaagent premain.\nIn fact, although the SkyWalking team has mentioned that most auto instrument agents are VM-based, you may build such tools during compiling time rather than runtime.\nWhat are the limitations? Auto instrument is very helpful, as you may perform auto instrument during compiling time, without having to depend on VM features. But there are also certain limitations that come with it:\n  Higher possibility of in-process propagation in many cases. Many high-level languages, such as Java and .NET, are used for building business systems. Most business logic codes run in the same thread for each request, which causes propagation to be based on thread ID, in order for the stack module to make sure that the context is safe.\n  Only works in certain frameworks or libraries. Since the agents are responsible for modifying the codes during runtime, the codes are already known to the agent plugin developers. There is usually a list of frameworks or libraries supported by this kind of probes. For example, see the SkyWalking Java agent supported list.\n  Cross-thread operations are not always supported. Like what is mentioned above regarding in-process propagation, most codes (especially business codes) run in a single thread per request. But in some other cases, they operate across different threads, such as assigning tasks to other threads, task pools or batch processes. Some languages may even provide coroutine or similar components like Goroutine, which allows developers to run async process with low payload. In such cases, auto instrument will face problems.\n  So, there\u0026rsquo;s nothing mysterious about auto instrument. In short, agent developers write an activation script to make instrument codes work for you. That\u0026rsquo;s it!\nWhat is next? If you want to learn about manual instrument libs in SkyWalking, see the Manual instrument SDK section.\n","excerpt":"Service Auto Instrument Agent The service auto instrument agent is a subset of language-based native …","ref":"/docs/main/v9.6.0/en/concepts-and-designs/service-agent/","title":"Service Auto Instrument Agent"},{"body":"Service Auto Instrument Agent The service auto instrument agent is a subset of language-based native agents. This kind of agents is based on some language-specific features, especially those of a VM-based language.\nWhat does Auto Instrument mean? Many users learned about these agents when they first heard that \u0026ldquo;Not a single line of code has to be changed\u0026rdquo;. SkyWalking used to mention this in its readme page as well. However, this does not reflect the full picture. For end users, it is true that they no longer have to modify their codes in most cases. But it is important to understand that the codes are in fact still modified by the agent, which is usually known as \u0026ldquo;runtime code manipulation\u0026rdquo;. The underlying logic is that the auto instrument agent uses the VM interface for code modification to dynamically add in the instrument code, such as modifying the class in Java through javaagent premain.\nIn fact, although the SkyWalking team has mentioned that most auto instrument agents are VM-based, you may build such tools during compiling time rather than runtime.\nWhat are the limitations? Auto instrument is very helpful, as you may perform auto instrument during compiling time, without having to depend on VM features. But there are also certain limitations that come with it:\n  Higher possibility of in-process propagation in many cases. Many high-level languages, such as Java and .NET, are used for building business systems. Most business logic codes run in the same thread for each request, which causes propagation to be based on thread ID, in order for the stack module to make sure that the context is safe.\n  Only works in certain frameworks or libraries. Since the agents are responsible for modifying the codes during runtime, the codes are already known to the agent plugin developers. There is usually a list of frameworks or libraries supported by this kind of probes. For example, see the SkyWalking Java agent supported list.\n  Cross-thread operations are not always supported. Like what is mentioned above regarding in-process propagation, most codes (especially business codes) run in a single thread per request. But in some other cases, they operate across different threads, such as assigning tasks to other threads, task pools or batch processes. Some languages may even provide coroutine or similar components like Goroutine, which allows developers to run async process with low payload. In such cases, auto instrument will face problems.\n  So, there\u0026rsquo;s nothing mysterious about auto instrument. In short, agent developers write an activation script to make instrument codes work for you. That\u0026rsquo;s it!\nWhat is next? If you want to learn about manual instrument libs in SkyWalking, see the Manual instrument SDK section.\n","excerpt":"Service Auto Instrument Agent The service auto instrument agent is a subset of language-based native …","ref":"/docs/main/v9.7.0/en/concepts-and-designs/service-agent/","title":"Service Auto Instrument Agent"},{"body":"Service Discovery Service discovery is used to discover all Kubernetes services process in the current node and report them to backend services. After the process upload is completed, the other modules could perform more operations with the process, such as process profiling and collecting process metrics.\nConfiguration    Name Default Environment Key Description     process_discovery.heartbeat_period 20s ROVER_PROCESS_DISCOVERY_HEARTBEAT_PERIOD The period of report or keep-alive process to the backend.   process_discovery.properties_report_period 10 ROVER_PROCESS_DISCOVERY_PROPERTIES_REPORT_PERIOD The agent sends the process properties to the backend every: heartbeart period * properties report period.   process_discovery.kubernetes.active false ROVER_PROCESS_DISCOVERY_KUBERNETES_ACTIVE Is active the kubernetes process discovery.   process_discovery.kubernetes.node_name  ROVER_PROCESS_DISCOVERY_KUBERNETES_NODE_NAME Current deployed node name, it could be inject by spec.nodeName.   process_discovery.kubernetes.namespaces  ROVER_PROCESS_DISCOVERY_KUBERNETES_NAMESPACES Including pod by namespaces, if empty means including all namespaces. Multiple namespaces split by \u0026ldquo;,\u0026rdquo;.   process_discovery.kubernetes.analyzers   Declare how to build the process. The istio and k8s resources are active by default.   process_discovery.kubernetes.analyzers.active   Set is active analyzer.   process_discovery.kubernetes.analyzers.filters   Define which process is match to current process builder.   process_discovery.kubernetes.analyzers.service_name   The Service Name of the process entity.   process_discovery.kubernetes.analyzers.instance_name   The Service Instance Name of the process entity, by default, the instance name is the host IP v4 address from \u0026ldquo;en0\u0026rdquo; net interface.   process_discovery.kubernetes.analyzers.process_name   The Process Name of the process entity, by default, the process name is the executable name of the process.   process_discovery.kubernetes.analyzers.labels   The Process Labels, used to aggregate similar process from service entity. Multiple labels split by \u0026ldquo;,\u0026rdquo;.    Kubernetes Process Detector The Kubernetes process detector could detect any process under the Kubernetes container. If active the Kubernetes process detector, the rover must be deployed in the Kubernetes cluster. After finding the process, it would collect the metadata of the process when the report to the backend.\nProcess Analyze The process analysis declares which process could be profiled and how to build the process entity. The Istio and Kubernetes resources are active on default.\nFilter The filter provides an expression(go template) mechanism to match the process that can build the entity. Multiple expressions work together to determine whether the process can create the entity. Each expression must return the boolean value. Otherwise, the decision throws an error.\nThe context is similar to the entity builder. Using context could help the rover understand which process could build the entity.\nProcess Context Is the same with the process context in scanner, but doesn\u0026rsquo;t need to add the {{ and }} in prefix and suffix.\nPod Context Provide current pod information and judgments.\n   Name Argument Example Description     Name None eq .Pod.Name \u0026quot;test-pod-name\u0026quot; The name of the current pod. The example shows the pod name is equal to test-pod-name.   Namespace None eq .Pod.Namespace \u0026quot;test-namesapce\u0026quot; The name of the current pod namespace. The example shows the pod namespace name is equal to test-namespace.   Node None eq .Pod.Node \u0026quot;test-node\u0026quot; The name of the node deployed. The example shows the pod node name is equal to test-node.   LabelValue KeyNames eq .Pod.LavelValue \u0026quot;a,b\u0026quot; \u0026quot;v\u0026quot; The label value of the label keys, If provide multiple keys, if any key has value, then don\u0026rsquo;t need to get other values. The example shows the pod has anyone a or b label key, and the value matches to v.   ServiceName None eq .Pod.ServiceName \u0026quot;test-service\u0026quot; The service name of the pod. The example shows current pods matched service name is test-service.   HasContainer Container name .Pod.HasContainer \u0026quot;istio-proxy\u0026quot; The pod has the appointed container name.   LabelSelector selector .Pod.LabelSelector The pod is matches the label selector. For more details, please read the official documentation.   HasServiceName None .Pod.HasServiceName The pod has the matched service.   HasOwnerName kindNames .Pod.HasOwnerName \u0026quot;Service,Deployment\u0026quot; The pod has the matched owner name.    Container Context Provide current container(under the pod) information.\n   Name Argument Example Description     Name None eq .Container.Name \u0026quot;istio-proxy\u0026quot; The name of the current container under the pod. The examples show the container name is equal to istio-proxy.    Entity The entity including layer, serviceName, instanceName, processName and labels properties.\nThe entity also could use expression to build(serviceName, instanceName and processName).\nRover Rover context provides the context of the rover process instance and VM data.\n   Name Argument Example Description     InstanceID None {{.Rover.InstanceID}} Get the Instance ID of the rover.   HostIPV4 The Interface name {{.Rover.HostIPV4 \u0026quot;en0\u0026quot;}} Get the ipv4 address from the appointed network interface name.   HostIPV6 The Interface name {{.Rover.HostIPV6 \u0026quot;en0\u0026quot;}} Get the ipv6 address from the appointed network interface name.   HostName None {{.Rover.HostName}} Get the host name of current machine.    Process Process context provides the context relate to which process is matched.\n   Name Argument Example Description     ExeFilePath None {{.Process.ExeFilePath}} The execute file path of process.   ExeName None {{.Process.ExeName}} The execute file name.   CommandLine None {{.Process.CommandLine}} The command line of process.   Pid None {{.Process.Pid}} The id of the process.   WorkDir None {{.Process.WorkDir}} The work directory path of the process.    Pod The information on the current pod.\n   Name Argument Example Description     Name None {{.Pod.Name}} The name of current pod.   Namespace None {{.Pod.Namespace}} The name of current pod namespace.   Node None {{.Pod.Node}} The name of the node deployed.   LabelValue KeyNames, Default {{.Pod.LabelValue \u0026quot;a,b\u0026quot; \u0026quot;v\u0026quot;}} The label value of the label keys, If provide multiple keys, if any key has value, then don\u0026rsquo;t need to get other values. If all keys don\u0026rsquo;t have value, then return the default value.   ServiceName None {{.Pod.ServiceName}} The service name of the pod. If the pod hasn\u0026rsquo;t matched service, then return an empty string.   FindContainer ContainerName {{.Pod.FindContainer \u0026quot;test\u0026quot;}} Find the Container context by container name.   OwnerName KindNames {{.Pod.OwnerName \u0026quot;Service,Deployment\u0026quot;}} Find the Owner name by owner kind name.    Container The information of the current container under the pod.\n   Name Argument Example Description     Name None {{.Container.Name}} The name of the current container under the pod.    ID None {{.Container.ID}} The id of the current container under the pod.   EnvValue KeyNames {{.Container.EnvValue \u0026quot;a,b\u0026quot;}} The environment value of the first non-value key in the provided candidates(Iterate from left to right).    ","excerpt":"Service Discovery Service discovery is used to discover all Kubernetes services process in the …","ref":"/docs/skywalking-rover/latest/en/setup/configuration/service-discovery/","title":"Service Discovery"},{"body":"Service Discovery Service discovery is used to discover all Kubernetes services process in the current node and report them to backend services. After the process upload is completed, the other modules could perform more operations with the process, such as process profiling and collecting process metrics.\nConfiguration    Name Default Environment Key Description     process_discovery.heartbeat_period 20s ROVER_PROCESS_DISCOVERY_HEARTBEAT_PERIOD The period of report or keep-alive process to the backend.   process_discovery.properties_report_period 10 ROVER_PROCESS_DISCOVERY_PROPERTIES_REPORT_PERIOD The agent sends the process properties to the backend every: heartbeart period * properties report period.   process_discovery.kubernetes.active false ROVER_PROCESS_DISCOVERY_KUBERNETES_ACTIVE Is active the kubernetes process discovery.   process_discovery.kubernetes.node_name  ROVER_PROCESS_DISCOVERY_KUBERNETES_NODE_NAME Current deployed node name, it could be inject by spec.nodeName.   process_discovery.kubernetes.namespaces  ROVER_PROCESS_DISCOVERY_KUBERNETES_NAMESPACES Including pod by namespaces, if empty means including all namespaces. Multiple namespaces split by \u0026ldquo;,\u0026rdquo;.   process_discovery.kubernetes.analyzers   Declare how to build the process. The istio and k8s resources are active by default.   process_discovery.kubernetes.analyzers.active   Set is active analyzer.   process_discovery.kubernetes.analyzers.filters   Define which process is match to current process builder.   process_discovery.kubernetes.analyzers.service_name   The Service Name of the process entity.   process_discovery.kubernetes.analyzers.instance_name   The Service Instance Name of the process entity, by default, the instance name is the host IP v4 address from \u0026ldquo;en0\u0026rdquo; net interface.   process_discovery.kubernetes.analyzers.process_name   The Process Name of the process entity, by default, the process name is the executable name of the process.   process_discovery.kubernetes.analyzers.labels   The Process Labels, used to aggregate similar process from service entity. Multiple labels split by \u0026ldquo;,\u0026rdquo;.    Kubernetes Process Detector The Kubernetes process detector could detect any process under the Kubernetes container. If active the Kubernetes process detector, the rover must be deployed in the Kubernetes cluster. After finding the process, it would collect the metadata of the process when the report to the backend.\nProcess Analyze The process analysis declares which process could be profiled and how to build the process entity. The Istio and Kubernetes resources are active on default.\nFilter The filter provides an expression(go template) mechanism to match the process that can build the entity. Multiple expressions work together to determine whether the process can create the entity. Each expression must return the boolean value. Otherwise, the decision throws an error.\nThe context is similar to the entity builder. Using context could help the rover understand which process could build the entity.\nProcess Context Is the same with the process context in scanner, but doesn\u0026rsquo;t need to add the {{ and }} in prefix and suffix.\nPod Context Provide current pod information and judgments.\n   Name Argument Example Description     Name None eq .Pod.Name \u0026quot;test-pod-name\u0026quot; The name of the current pod. The example shows the pod name is equal to test-pod-name.   Namespace None eq .Pod.Namespace \u0026quot;test-namesapce\u0026quot; The name of the current pod namespace. The example shows the pod namespace name is equal to test-namespace.   Node None eq .Pod.Node \u0026quot;test-node\u0026quot; The name of the node deployed. The example shows the pod node name is equal to test-node.   LabelValue KeyNames eq .Pod.LavelValue \u0026quot;a,b\u0026quot; \u0026quot;v\u0026quot; The label value of the label keys, If provide multiple keys, if any key has value, then don\u0026rsquo;t need to get other values. The example shows the pod has anyone a or b label key, and the value matches to v.   ServiceName None eq .Pod.ServiceName \u0026quot;test-service\u0026quot; The service name of the pod. The example shows current pods matched service name is test-service.   HasContainer Container name .Pod.HasContainer \u0026quot;istio-proxy\u0026quot; The pod has the appointed container name.   LabelSelector selector .Pod.LabelSelector The pod is matches the label selector. For more details, please read the official documentation.   HasServiceName None .Pod.HasServiceName The pod has the matched service.   HasOwnerName kindNames .Pod.HasOwnerName \u0026quot;Service,Deployment\u0026quot; The pod has the matched owner name.    Container Context Provide current container(under the pod) information.\n   Name Argument Example Description     Name None eq .Container.Name \u0026quot;istio-proxy\u0026quot; The name of the current container under the pod. The examples show the container name is equal to istio-proxy.    Entity The entity including layer, serviceName, instanceName, processName and labels properties.\nThe entity also could use expression to build(serviceName, instanceName and processName).\nRover Rover context provides the context of the rover process instance and VM data.\n   Name Argument Example Description     InstanceID None {{.Rover.InstanceID}} Get the Instance ID of the rover.   HostIPV4 The Interface name {{.Rover.HostIPV4 \u0026quot;en0\u0026quot;}} Get the ipv4 address from the appointed network interface name.   HostIPV6 The Interface name {{.Rover.HostIPV6 \u0026quot;en0\u0026quot;}} Get the ipv6 address from the appointed network interface name.   HostName None {{.Rover.HostName}} Get the host name of current machine.    Process Process context provides the context relate to which process is matched.\n   Name Argument Example Description     ExeFilePath None {{.Process.ExeFilePath}} The execute file path of process.   ExeName None {{.Process.ExeName}} The execute file name.   CommandLine None {{.Process.CommandLine}} The command line of process.   Pid None {{.Process.Pid}} The id of the process.   WorkDir None {{.Process.WorkDir}} The work directory path of the process.    Pod The information on the current pod.\n   Name Argument Example Description     Name None {{.Pod.Name}} The name of current pod.   Namespace None {{.Pod.Namespace}} The name of current pod namespace.   Node None {{.Pod.Node}} The name of the node deployed.   LabelValue KeyNames, Default {{.Pod.LabelValue \u0026quot;a,b\u0026quot; \u0026quot;v\u0026quot;}} The label value of the label keys, If provide multiple keys, if any key has value, then don\u0026rsquo;t need to get other values. If all keys don\u0026rsquo;t have value, then return the default value.   ServiceName None {{.Pod.ServiceName}} The service name of the pod. If the pod hasn\u0026rsquo;t matched service, then return an empty string.   FindContainer ContainerName {{.Pod.FindContainer \u0026quot;test\u0026quot;}} Find the Container context by container name.   OwnerName KindNames {{.Pod.OwnerName \u0026quot;Service,Deployment\u0026quot;}} Find the Owner name by owner kind name.    Container The information of the current container under the pod.\n   Name Argument Example Description     Name None {{.Container.Name}} The name of the current container under the pod.    ID None {{.Container.ID}} The id of the current container under the pod.   EnvValue KeyNames {{.Container.EnvValue \u0026quot;a,b\u0026quot;}} The environment value of the first non-value key in the provided candidates(Iterate from left to right).    ","excerpt":"Service Discovery Service discovery is used to discover all Kubernetes services process in the …","ref":"/docs/skywalking-rover/next/en/setup/configuration/service-discovery/","title":"Service Discovery"},{"body":"Service Discovery Service discovery is used to discover all Kubernetes services process in the current node and report them to backend services. After the process upload is completed, the other modules could perform more operations with the process, such as process profiling and collecting process metrics.\nConfiguration    Name Default Environment Key Description     process_discovery.heartbeat_period 20s ROVER_PROCESS_DISCOVERY_HEARTBEAT_PERIOD The period of report or keep-alive process to the backend.   process_discovery.properties_report_period 10 ROVER_PROCESS_DISCOVERY_PROPERTIES_REPORT_PERIOD The agent sends the process properties to the backend every: heartbeart period * properties report period.   process_discovery.kubernetes.active false ROVER_PROCESS_DISCOVERY_KUBERNETES_ACTIVE Is active the kubernetes process discovery.   process_discovery.kubernetes.node_name  ROVER_PROCESS_DISCOVERY_KUBERNETES_NODE_NAME Current deployed node name, it could be inject by spec.nodeName.   process_discovery.kubernetes.namespaces  ROVER_PROCESS_DISCOVERY_KUBERNETES_NAMESPACES Including pod by namespaces, if empty means including all namespaces. Multiple namespaces split by \u0026ldquo;,\u0026rdquo;.   process_discovery.kubernetes.analyzers   Declare how to build the process. The istio and k8s resources are active by default.   process_discovery.kubernetes.analyzers.active   Set is active analyzer.   process_discovery.kubernetes.analyzers.filters   Define which process is match to current process builder.   process_discovery.kubernetes.analyzers.service_name   The Service Name of the process entity.   process_discovery.kubernetes.analyzers.instance_name   The Service Instance Name of the process entity, by default, the instance name is the host IP v4 address from \u0026ldquo;en0\u0026rdquo; net interface.   process_discovery.kubernetes.analyzers.process_name   The Process Name of the process entity, by default, the process name is the executable name of the process.   process_discovery.kubernetes.analyzers.labels   The Process Labels, used to aggregate similar process from service entity. Multiple labels split by \u0026ldquo;,\u0026rdquo;.    Kubernetes Process Detector The Kubernetes process detector could detect any process under the Kubernetes container. If active the Kubernetes process detector, the rover must be deployed in the Kubernetes cluster. After finding the process, it would collect the metadata of the process when the report to the backend.\nProcess Analyze The process analysis declares which process could be profiled and how to build the process entity. The Istio and Kubernetes resources are active on default.\nFilter The filter provides an expression(go template) mechanism to match the process that can build the entity. Multiple expressions work together to determine whether the process can create the entity. Each expression must return the boolean value. Otherwise, the decision throws an error.\nThe context is similar to the entity builder. Using context could help the rover understand which process could build the entity.\nProcess Context Is the same with the process context in scanner, but doesn\u0026rsquo;t need to add the {{ and }} in prefix and suffix.\nPod Context Provide current pod information and judgments.\n   Name Argument Example Description     Name None eq .Pod.Name \u0026quot;test-pod-name\u0026quot; The name of the current pod. The example shows the pod name is equal to test-pod-name.   Namespace None eq .Pod.Namespace \u0026quot;test-namesapce\u0026quot; The name of the current pod namespace. The example shows the pod namespace name is equal to test-namespace.   Node None eq .Pod.Node \u0026quot;test-node\u0026quot; The name of the node deployed. The example shows the pod node name is equal to test-node.   LabelValue KeyNames eq .Pod.LavelValue \u0026quot;a,b\u0026quot; \u0026quot;v\u0026quot; The label value of the label keys, If provide multiple keys, if any key has value, then don\u0026rsquo;t need to get other values. The example shows the pod has anyone a or b label key, and the value matches to v.   ServiceName None eq .Pod.ServiceName \u0026quot;test-service\u0026quot; The service name of the pod. The example shows current pods matched service name is test-service.   HasContainer Container name .Pod.HasContainer \u0026quot;istio-proxy\u0026quot; The pod has the appointed container name.   LabelSelector selector .Pod.LabelSelector The pod is matches the label selector. For more details, please read the official documentation.   HasServiceName None .Pod.HasServiceName The pod has the matched service.   HasOwnerName kindNames .Pod.HasOwnerName \u0026quot;Service,Deployment\u0026quot; The pod has the matched owner name.    Container Context Provide current container(under the pod) information.\n   Name Argument Example Description     Name None eq .Container.Name \u0026quot;istio-proxy\u0026quot; The name of the current container under the pod. The examples show the container name is equal to istio-proxy.    Entity The entity including layer, serviceName, instanceName, processName and labels properties.\nThe entity also could use expression to build(serviceName, instanceName and processName).\nRover Rover context provides the context of the rover process instance and VM data.\n   Name Argument Example Description     InstanceID None {{.Rover.InstanceID}} Get the Instance ID of the rover.   HostIPV4 The Interface name {{.Rover.HostIPV4 \u0026quot;en0\u0026quot;}} Get the ipv4 address from the appointed network interface name.   HostIPV6 The Interface name {{.Rover.HostIPV6 \u0026quot;en0\u0026quot;}} Get the ipv6 address from the appointed network interface name.   HostName None {{.Rover.HostName}} Get the host name of current machine.    Process Process context provides the context relate to which process is matched.\n   Name Argument Example Description     ExeFilePath None {{.Process.ExeFilePath}} The execute file path of process.   ExeName None {{.Process.ExeName}} The execute file name.   CommandLine None {{.Process.CommandLine}} The command line of process.   Pid None {{.Process.Pid}} The id of the process.   WorkDir None {{.Process.WorkDir}} The work directory path of the process.    Pod The information on the current pod.\n   Name Argument Example Description     Name None {{.Pod.Name}} The name of current pod.   Namespace None {{.Pod.Namespace}} The name of current pod namespace.   Node None {{.Pod.Node}} The name of the node deployed.   LabelValue KeyNames, Default {{.Pod.LabelValue \u0026quot;a,b\u0026quot; \u0026quot;v\u0026quot;}} The label value of the label keys, If provide multiple keys, if any key has value, then don\u0026rsquo;t need to get other values. If all keys don\u0026rsquo;t have value, then return the default value.   ServiceName None {{.Pod.ServiceName}} The service name of the pod. If the pod hasn\u0026rsquo;t matched service, then return an empty string.   FindContainer ContainerName {{.Pod.FindContainer \u0026quot;test\u0026quot;}} Find the Container context by container name.   OwnerName KindNames {{.Pod.OwnerName \u0026quot;Service,Deployment\u0026quot;}} Find the Owner name by owner kind name.    Container The information of the current container under the pod.\n   Name Argument Example Description     Name None {{.Container.Name}} The name of the current container under the pod.    ID None {{.Container.ID}} The id of the current container under the pod.   EnvValue KeyNames {{.Container.EnvValue \u0026quot;a,b\u0026quot;}} The environment value of the first non-value key in the provided candidates(Iterate from left to right).    ","excerpt":"Service Discovery Service discovery is used to discover all Kubernetes services process in the …","ref":"/docs/skywalking-rover/v0.6.0/en/setup/configuration/service-discovery/","title":"Service Discovery"},{"body":"Service Hierarchy SkyWalking v10 introduces a new concept Service Hierarchy which defines the relationships of existing logically same services in various layers. OAP will detect the services from different layers, and try to build the connections.\nDetect Service Hierarchy Connections There 2 ways to detect the connections:\n Automatically matching through OAP internal mechanism, no extra work is required. Build the connections through specific agents.  Note: All the relationships and auto-matching rules should be defined in the config/hierarchy-definition.yml file. If you want to customize it according to your own needs, please refer to Service Hierarchy Configuration.\nAutomatically Matching    Upper layer Lower layer Matching rule     GENERAL K8S_SERVICE GENERAL On K8S_SERVICE   GENERAL APISIX GENERAL On APISIX   VIRTUAL_DATABASE MYSQL VIRTUAL_DATABASE On MYSQL   VIRTUAL_DATABASE POSTGRESQL VIRTUAL_DATABASE On POSTGRESQL   VIRTUAL_DATABASE CLICKHOUSE VIRTUAL_DATABASE On CLICKHOUSE   VIRTUAL_MQ RABBITMQ VIRTUAL_MQ On RABBITMQ   VIRTUAL_MQ ROCKETMQ VIRTUAL_MQ On K8S_SERVICE   VIRTUAL_MQ KAFKA VIRTUAL_MQ On KAFKA   VIRTUAL_MQ RABBITMQ VIRTUAL_MQ On RABBITMQ   VIRTUAL_MQ PULSAR VIRTUAL_MQ On PULSAR   MESH MESH_DP MESH On MESH_DP   MESH K8S_SERVICE MESH On K8S_SERVICE   MESH_DP K8S_SERVICE MESH_DP On K8S_SERVICE   MYSQL K8S_SERVICE MYSQL On K8S_SERVICE   POSTGRESQL K8S_SERVICE POSTGRESQL On K8S_SERVICE   CLICKHOUSE K8S_SERVICE CLICKHOUSE On K8S_SERVICE   NGINX K8S_SERVICE NGINX On K8S_SERVICE   APISIX K8S_SERVICE APISIX On K8S_SERVICE   ROCKETMQ K8S_SERVICE ROCKETMQ On K8S_SERVICE   RABBITMQ K8S_SERVICE RABBITMQ On K8S_SERVICE   KAFKA K8S_SERVICE KAFKA On K8S_SERVICE   PULSAR K8S_SERVICE PULSAR On K8S_SERVICE   SO11Y_OAP K8S_SERVICE SO11Y_OAP On K8S_SERVICE     The following sections will describe the default matching rules in detail and use the upper-layer On lower-layer format. The example service name are based on SkyWalking Showcase default deployment. In SkyWalking the service name could be composed of group and short name with :: separator.  GENERAL On K8S_SERVICE  Rule name: lower-short-name-remove-ns Groovy script: { (u, l) -\u0026gt; u.shortName == l.shortName.substring(0, l.shortName.lastIndexOf('.')) } Description: GENERAL.service.shortName == K8S_SERVICE.service.shortName without namespace Matched Example:  GENERAL.service.name: agent::songs K8S_SERVICE.service.name: skywalking-showcase::songs.sample-services    GENERAL On APISIX  Rule name: lower-short-name-remove-ns Groovy script: { (u, l) -\u0026gt; u.shortName == l.shortName.substring(0, l.shortName.lastIndexOf('.')) } Description: GENERAL.service.shortName == APISIX.service.shortName without namespace Matched Example:  GENERAL.service.name: agent::frontend APISIX.service.name: APISIX::frontend.sample-services    VIRTUAL_DATABASE On MYSQL  Rule name: lower-short-name-with-fqdn Groovy script: { (u, l) -\u0026gt; u.shortName.substring(0, u.shortName.lastIndexOf(':')) == l.shortName.concat('.svc.cluster.local') } Description: VIRTUAL_DATABASE.service.shortName remove port == MYSQL.service.shortName with fqdn suffix Matched Example:  VIRTUAL_DATABASE.service.name: mysql.skywalking-showcase.svc.cluster.local:3306 MYSQL.service.name: mysql::mysql.skywalking-showcase    VIRTUAL_DATABASE On POSTGRESQL  Rule name: lower-short-name-with-fqdn Groovy script: { (u, l) -\u0026gt; u.shortName.substring(0, u.shortName.lastIndexOf(':')) == l.shortName.concat('.svc.cluster.local') } Description: VIRTUAL_DATABASE.service.shortName remove port == POSTGRESQL.service.shortName with fqdn suffix Matched Example:  VIRTUAL_DATABASE.service.name: psql.skywalking-showcase.svc.cluster.local:5432 POSTGRESQL.service.name: postgresql::psql.skywalking-showcase    VIRTUAL_DATABASE On CLICKHOUSE  Rule name: lower-short-name-with-fqdn Groovy script: { (u, l) -\u0026gt; u.shortName.substring(0, u.shortName.lastIndexOf(':')) == l.shortName.concat('.svc.cluster.local') } Description: VIRTUAL_DATABASE.service.shortName remove port == CLICKHOUSE.service.shortName with fqdn suffix Matched Example:  VIRTUAL_DATABASE.service.name: clickhouse.skywalking-showcase.svc.cluster.local:8123 CLICKHOUSE.service.name: clickhouse::clickhouse.skywalking-showcase    VIRTUAL_MQ On ROCKETMQ  Rule name: lower-short-name-with-fqdn Groovy script: { (u, l) -\u0026gt; u.shortName.substring(0, u.shortName.lastIndexOf(':')) == l.shortName.concat('.svc.cluster.local') } Description: VIRTUAL_MQ.service.shortName remove port == ROCKETMQ.service.shortName with fqdn suffix Matched Example:  VIRTUAL_MQ.service.name: rocketmq.skywalking-showcase.svc.cluster.local:9876 ROCKETMQ.service.name: rocketmq::rocketmq.skywalking-showcase    VIRTUAL_MQ On RABBITMQ  Rule name: lower-short-name-with-fqdn Groovy script: { (u, l) -\u0026gt; u.shortName.substring(0, u.shortName.lastIndexOf(':')) == l.shortName.concat('.svc.cluster.local') } Description: VIRTUAL_MQ.service.shortName remove port == RABBITMQ.service.shortName with fqdn suffix Matched Example:  VIRTUAL_MQ.service.name: rabbitmq.skywalking-showcase.svc.cluster.local:5672 RABBITMQ.service.name: rabbitmq::rabbitmq.skywalking-showcase     VIRTUAL_MQ On KAFKA  Rule name: lower-short-name-with-fqdn Groovy script: { (u, l) -\u0026gt; u.shortName.substring(0, u.shortName.lastIndexOf(':')) == l.shortName.concat('.svc.cluster.local') } Description: VIRTUAL_MQ.service.shortName remove port == KAFKA.service.shortName with fqdn suffix Matched Example:  VIRTUAL_MQ.service.name: kafka.skywalking-showcase.svc.cluster.local:9092 KAFKA.service.name: kafka::rocketmq.skywalking-showcase    VIRTUAL_MQ On PULSAR  Rule name: lower-short-name-with-fqdn Groovy script: { (u, l) -\u0026gt; u.shortName.substring(0, u.shortName.lastIndexOf(':')) == l.shortName.concat('.svc.cluster.local') } Description: VIRTUAL_MQ.service.shortName remove port == PULSAR.service.shortName with fqdn suffix Matched Example:  VIRTUAL_MQ.service.name: pulsar.skywalking-showcase.svc.cluster.local:6650 PULSAR.service.name: pulsar::pulsar.skywalking-showcase    MESH On MESH_DP  Rule name: name Groovy script: { (u, l) -\u0026gt; u.name == l.name } Description: MESH.service.name == MESH_DP.service.name Matched Example:  MESH.service.name: mesh-svr::songs.sample-services MESH_DP.service.name: mesh-svr::songs.sample-services    MESH On K8S_SERVICE  Rule name: short-name Groovy script: { (u, l) -\u0026gt; u.shortName == l.shortName } Description: MESH.service.shortName == K8S_SERVICE.service.shortName Matched Example:  MESH.service.name: mesh-svr::songs.sample-services K8S_SERVICE.service.name: skywalking-showcase::songs.sample-services    MESH_DP On K8S_SERVICE  Rule name: short-name Groovy script: { (u, l) -\u0026gt; u.shortName == l.shortName } Description: MESH_DP.service.shortName == K8S_SERVICE.service.shortName Matched Example:  MESH_DP.service.name: mesh-svr::songs.sample-services K8S_SERVICE.service.name: skywalking-showcase::songs.sample-services    MYSQL On K8S_SERVICE  Rule name: short-name Groovy script: { (u, l) -\u0026gt; u.shortName == l.shortName } Description: MYSQL.service.shortName == K8S_SERVICE.service.shortName Matched Example:  MYSQL.service.name: mysql::mysql.skywalking-showcase K8S_SERVICE.service.name: skywalking-showcase::mysql.skywalking-showcase    POSTGRESQL On K8S_SERVICE  Rule name: short-name Groovy script: { (u, l) -\u0026gt; u.shortName == l.shortName } Description: POSTGRESQL.service.shortName == K8S_SERVICE.service.shortName Matched Example:  POSTGRESQL.service.name: postgresql::psql.skywalking-showcase K8S_SERVICE.service.name: skywalking-showcase::psql.skywalking-showcase    CLICKHOUSE On K8S_SERVICE  Rule name: short-name Groovy script: { (u, l) -\u0026gt; u.shortName == l.shortName } Description: CLICKHOUSE.service.shortName == K8S_SERVICE.service.shortName Matched Example:  CLICKHOUSE.service.name: clickhouse::clickhouse.skywalking-showcase K8S_SERVICE.service.name: skywalking-showcase::clickhouse.skywalking-showcase    NGINX On K8S_SERVICE  Rule name: short-name Groovy script: { (u, l) -\u0026gt; u.shortName == l.shortName } Description: NGINX.service.shortName == K8S_SERVICE.service.shortName Matched Example:  NGINX.service.name: nginx::nginx.skywalking-showcase K8S_SERVICE.service.name: skywalking-showcase::nginx.skywalking-showcase    APISIX On K8S_SERVICE  Rule name: short-name Groovy script: { (u, l) -\u0026gt; u.shortName == l.shortName } Description: APISIX.service.shortName == K8S_SERVICE.service.shortName Matched Example:  APISIX.service.name: APISIX::frontend.sample-services K8S_SERVICE.service.name: skywalking-showcase::frontend.sample-services    ROCKETMQ On K8S_SERVICE  Rule name: short-name Groovy script: { (u, l) -\u0026gt; u.shortName == l.shortName } Description: ROCKETMQ.service.shortName == K8S_SERVICE.service.shortName Matched Example:  ROCKETMQ.service.name: rocketmq::rocketmq.skywalking-showcase K8S_SERVICE.service.name: skywalking-showcase::rocketmq.skywalking-showcase    RABBITMQ On K8S_SERVICE  Rule name: short-name Groovy script: { (u, l) -\u0026gt; u.shortName == l.shortName } Description: RABBITMQ.service.shortName == K8S_SERVICE.service.shortName Matched Example:  RABBITMQ.service.name: rabbitmq::rabbitmq.skywalking-showcase K8S_SERVICE.service.name: skywalking-showcase::rabbitmq.skywalking-showcase    KAFKA On K8S_SERVICE  Rule name: short-name Groovy script: { (u, l) -\u0026gt; u.shortName == l.shortName } Description: KAFKA.service.shortName == K8S_SERVICE.service.shortName Matched Example:  KAFKA.service.name: kafka::kafka.skywalking-showcase K8S_SERVICE.service.name: skywalking-showcase::kafka.skywalking-showcase    PULSAR On K8S_SERVICE  Rule name: short-name Groovy script: { (u, l) -\u0026gt; u.shortName == l.shortName } Description: PULSAR.service.shortName == K8S_SERVICE.service.shortName Matched Example:  PULSAR.service.name: pulsar::pulsar.skywalking-showcase K8S_SERVICE.service.name: skywalking-showcase::pulsar.skywalking-showcase    SO11Y_OAP On K8S_SERVICE  Rule name: short-name Groovy script: { (u, l) -\u0026gt; u.shortName == l.shortName } Description: SO11Y_OAP.service.shortName == K8S_SERVICE.service.shortName Matched Example:  SO11Y_OAP.service.name: demo-oap.skywalking-showcase K8S_SERVICE.service.name: skywalking-showcase::demo-oap.skywalking-showcase    Build Through Specific Agents Use agent tech involved(such as eBPF) and deployment tools(such as operator and agent injector) to detect the service hierarchy relations.\n   Upper layer Lower layer Agent    Instance Hierarchy Instance Hierarchy relationship follows the same definition as Service Hierarchy.\nAutomatically Matching If the service hierarchy is built, the instance hierarchy relationship could be detected automatically through the following rules:\n The upper instance name equals the lower instance name. The upper instance attribute pod/hostname equals the lower instance attribute pod/hostname. The upper instance attribute pod/hostname equals the lower instance name. The upper instance name equals the lower instance attribute pod/hostname.  Build Through Specific Agents ","excerpt":"Service Hierarchy SkyWalking v10 introduces a new concept Service Hierarchy which defines the …","ref":"/docs/main/next/en/concepts-and-designs/service-hierarchy/","title":"Service Hierarchy"},{"body":"Setting Override SkyWalking backend supports setting overrides by system properties and system environment variables. You may override the settings in application.yml\nSystem properties key rule ModuleName.ProviderName.SettingKey.\n  Example\nOverride restHost in this setting segment\n  core:default:restHost:${SW_CORE_REST_HOST:0.0.0.0}restPort:${SW_CORE_REST_PORT:12800}restContextPath:${SW_CORE_REST_CONTEXT_PATH:/}gRPCHost:${SW_CORE_GRPC_HOST:0.0.0.0}gRPCPort:${SW_CORE_GRPC_PORT:11800}Use command arg\n-Dcore.default.restHost=172.0.4.12 System environment variables   Example\nOverride restHost in this setting segment through environment variables\n  core:default:restHost:${REST_HOST:0.0.0.0}restPort:${SW_CORE_REST_PORT:12800}restContextPath:${SW_CORE_REST_CONTEXT_PATH:/}gRPCHost:${SW_CORE_GRPC_HOST:0.0.0.0}gRPCPort:${SW_CORE_GRPC_PORT:11800}If the REST_HOST  environment variable exists in your operating system and its value is 172.0.4.12, then the value of restHost here will be overwritten to 172.0.4.12; otherwise, it will be set to 0.0.0.0.\nPlaceholder nesting is also supported, like ${REST_HOST:${ANOTHER_REST_HOST:127.0.0.1}}. In this case, if the REST_HOST  environment variable does not exist, but the REST_ANOTHER_REST_HOSTHOST environment variable exists, and its value is 172.0.4.12, then the value of restHost here will be overwritten to 172.0.4.12; otherwise, it will be set to 127.0.0.1.\n","excerpt":"Setting Override SkyWalking backend supports setting overrides by system properties and system …","ref":"/docs/main/latest/en/setup/backend/backend-setting-override/","title":"Setting Override"},{"body":"Setting Override SkyWalking backend supports setting overrides by system properties and system environment variables. You may override the settings in application.yml\nSystem properties key rule ModuleName.ProviderName.SettingKey.\n  Example\nOverride restHost in this setting segment\n  core:default:restHost:${SW_CORE_REST_HOST:0.0.0.0}restPort:${SW_CORE_REST_PORT:12800}restContextPath:${SW_CORE_REST_CONTEXT_PATH:/}gRPCHost:${SW_CORE_GRPC_HOST:0.0.0.0}gRPCPort:${SW_CORE_GRPC_PORT:11800}Use command arg\n-Dcore.default.restHost=172.0.4.12 System environment variables   Example\nOverride restHost in this setting segment through environment variables\n  core:default:restHost:${REST_HOST:0.0.0.0}restPort:${SW_CORE_REST_PORT:12800}restContextPath:${SW_CORE_REST_CONTEXT_PATH:/}gRPCHost:${SW_CORE_GRPC_HOST:0.0.0.0}gRPCPort:${SW_CORE_GRPC_PORT:11800}If the REST_HOST  environment variable exists in your operating system and its value is 172.0.4.12, then the value of restHost here will be overwritten to 172.0.4.12; otherwise, it will be set to 0.0.0.0.\nPlaceholder nesting is also supported, like ${REST_HOST:${ANOTHER_REST_HOST:127.0.0.1}}. In this case, if the REST_HOST  environment variable does not exist, but the REST_ANOTHER_REST_HOSTHOST environment variable exists, and its value is 172.0.4.12, then the value of restHost here will be overwritten to 172.0.4.12; otherwise, it will be set to 127.0.0.1.\n","excerpt":"Setting Override SkyWalking backend supports setting overrides by system properties and system …","ref":"/docs/main/next/en/setup/backend/backend-setting-override/","title":"Setting Override"},{"body":"Setting Override SkyWalking backend supports setting overrides by system properties and system environment variables. You may override the settings in application.yml\nSystem properties key rule ModuleName.ProviderName.SettingKey.\n  Example\nOverride restHost in this setting segment\n  core:default:restHost:${SW_CORE_REST_HOST:0.0.0.0}restPort:${SW_CORE_REST_PORT:12800}restContextPath:${SW_CORE_REST_CONTEXT_PATH:/}gRPCHost:${SW_CORE_GRPC_HOST:0.0.0.0}gRPCPort:${SW_CORE_GRPC_PORT:11800}Use command arg\n-Dcore.default.restHost=172.0.4.12 System environment variables   Example\nOverride restHost in this setting segment through environment variables\n  core:default:restHost:${REST_HOST:0.0.0.0}restPort:${SW_CORE_REST_PORT:12800}restContextPath:${SW_CORE_REST_CONTEXT_PATH:/}gRPCHost:${SW_CORE_GRPC_HOST:0.0.0.0}gRPCPort:${SW_CORE_GRPC_PORT:11800}If the REST_HOST  environment variable exists in your operating system and its value is 172.0.4.12, then the value of restHost here will be overwritten to 172.0.4.12; otherwise, it will be set to 0.0.0.0.\nPlaceholder nesting is also supported, like ${REST_HOST:${ANOTHER_REST_HOST:127.0.0.1}}. In this case, if the REST_HOST  environment variable does not exist, but the REST_ANOTHER_REST_HOSTHOST environment variable exists and its value is 172.0.4.12, then the value of restHost here will be overwritten to 172.0.4.12; otherwise, it will be set to 127.0.0.1.\n","excerpt":"Setting Override SkyWalking backend supports setting overrides by system properties and system …","ref":"/docs/main/v9.0.0/en/setup/backend/backend-setting-override/","title":"Setting Override"},{"body":"Setting Override SkyWalking backend supports setting overrides by system properties and system environment variables. You may override the settings in application.yml\nSystem properties key rule ModuleName.ProviderName.SettingKey.\n  Example\nOverride restHost in this setting segment\n  core:default:restHost:${SW_CORE_REST_HOST:0.0.0.0}restPort:${SW_CORE_REST_PORT:12800}restContextPath:${SW_CORE_REST_CONTEXT_PATH:/}gRPCHost:${SW_CORE_GRPC_HOST:0.0.0.0}gRPCPort:${SW_CORE_GRPC_PORT:11800}Use command arg\n-Dcore.default.restHost=172.0.4.12 System environment variables   Example\nOverride restHost in this setting segment through environment variables\n  core:default:restHost:${REST_HOST:0.0.0.0}restPort:${SW_CORE_REST_PORT:12800}restContextPath:${SW_CORE_REST_CONTEXT_PATH:/}gRPCHost:${SW_CORE_GRPC_HOST:0.0.0.0}gRPCPort:${SW_CORE_GRPC_PORT:11800}If the REST_HOST  environment variable exists in your operating system and its value is 172.0.4.12, then the value of restHost here will be overwritten to 172.0.4.12; otherwise, it will be set to 0.0.0.0.\nPlaceholder nesting is also supported, like ${REST_HOST:${ANOTHER_REST_HOST:127.0.0.1}}. In this case, if the REST_HOST  environment variable does not exist, but the REST_ANOTHER_REST_HOSTHOST environment variable exists, and its value is 172.0.4.12, then the value of restHost here will be overwritten to 172.0.4.12; otherwise, it will be set to 127.0.0.1.\n","excerpt":"Setting Override SkyWalking backend supports setting overrides by system properties and system …","ref":"/docs/main/v9.1.0/en/setup/backend/backend-setting-override/","title":"Setting Override"},{"body":"Setting Override SkyWalking backend supports setting overrides by system properties and system environment variables. You may override the settings in application.yml\nSystem properties key rule ModuleName.ProviderName.SettingKey.\n  Example\nOverride restHost in this setting segment\n  core:default:restHost:${SW_CORE_REST_HOST:0.0.0.0}restPort:${SW_CORE_REST_PORT:12800}restContextPath:${SW_CORE_REST_CONTEXT_PATH:/}gRPCHost:${SW_CORE_GRPC_HOST:0.0.0.0}gRPCPort:${SW_CORE_GRPC_PORT:11800}Use command arg\n-Dcore.default.restHost=172.0.4.12 System environment variables   Example\nOverride restHost in this setting segment through environment variables\n  core:default:restHost:${REST_HOST:0.0.0.0}restPort:${SW_CORE_REST_PORT:12800}restContextPath:${SW_CORE_REST_CONTEXT_PATH:/}gRPCHost:${SW_CORE_GRPC_HOST:0.0.0.0}gRPCPort:${SW_CORE_GRPC_PORT:11800}If the REST_HOST  environment variable exists in your operating system and its value is 172.0.4.12, then the value of restHost here will be overwritten to 172.0.4.12; otherwise, it will be set to 0.0.0.0.\nPlaceholder nesting is also supported, like ${REST_HOST:${ANOTHER_REST_HOST:127.0.0.1}}. In this case, if the REST_HOST  environment variable does not exist, but the REST_ANOTHER_REST_HOSTHOST environment variable exists, and its value is 172.0.4.12, then the value of restHost here will be overwritten to 172.0.4.12; otherwise, it will be set to 127.0.0.1.\n","excerpt":"Setting Override SkyWalking backend supports setting overrides by system properties and system …","ref":"/docs/main/v9.2.0/en/setup/backend/backend-setting-override/","title":"Setting Override"},{"body":"Setting Override SkyWalking backend supports setting overrides by system properties and system environment variables. You may override the settings in application.yml\nSystem properties key rule ModuleName.ProviderName.SettingKey.\n  Example\nOverride restHost in this setting segment\n  core:default:restHost:${SW_CORE_REST_HOST:0.0.0.0}restPort:${SW_CORE_REST_PORT:12800}restContextPath:${SW_CORE_REST_CONTEXT_PATH:/}gRPCHost:${SW_CORE_GRPC_HOST:0.0.0.0}gRPCPort:${SW_CORE_GRPC_PORT:11800}Use command arg\n-Dcore.default.restHost=172.0.4.12 System environment variables   Example\nOverride restHost in this setting segment through environment variables\n  core:default:restHost:${REST_HOST:0.0.0.0}restPort:${SW_CORE_REST_PORT:12800}restContextPath:${SW_CORE_REST_CONTEXT_PATH:/}gRPCHost:${SW_CORE_GRPC_HOST:0.0.0.0}gRPCPort:${SW_CORE_GRPC_PORT:11800}If the REST_HOST  environment variable exists in your operating system and its value is 172.0.4.12, then the value of restHost here will be overwritten to 172.0.4.12; otherwise, it will be set to 0.0.0.0.\nPlaceholder nesting is also supported, like ${REST_HOST:${ANOTHER_REST_HOST:127.0.0.1}}. In this case, if the REST_HOST  environment variable does not exist, but the REST_ANOTHER_REST_HOSTHOST environment variable exists, and its value is 172.0.4.12, then the value of restHost here will be overwritten to 172.0.4.12; otherwise, it will be set to 127.0.0.1.\n","excerpt":"Setting Override SkyWalking backend supports setting overrides by system properties and system …","ref":"/docs/main/v9.3.0/en/setup/backend/backend-setting-override/","title":"Setting Override"},{"body":"Setting Override SkyWalking backend supports setting overrides by system properties and system environment variables. You may override the settings in application.yml\nSystem properties key rule ModuleName.ProviderName.SettingKey.\n  Example\nOverride restHost in this setting segment\n  core:default:restHost:${SW_CORE_REST_HOST:0.0.0.0}restPort:${SW_CORE_REST_PORT:12800}restContextPath:${SW_CORE_REST_CONTEXT_PATH:/}gRPCHost:${SW_CORE_GRPC_HOST:0.0.0.0}gRPCPort:${SW_CORE_GRPC_PORT:11800}Use command arg\n-Dcore.default.restHost=172.0.4.12 System environment variables   Example\nOverride restHost in this setting segment through environment variables\n  core:default:restHost:${REST_HOST:0.0.0.0}restPort:${SW_CORE_REST_PORT:12800}restContextPath:${SW_CORE_REST_CONTEXT_PATH:/}gRPCHost:${SW_CORE_GRPC_HOST:0.0.0.0}gRPCPort:${SW_CORE_GRPC_PORT:11800}If the REST_HOST  environment variable exists in your operating system and its value is 172.0.4.12, then the value of restHost here will be overwritten to 172.0.4.12; otherwise, it will be set to 0.0.0.0.\nPlaceholder nesting is also supported, like ${REST_HOST:${ANOTHER_REST_HOST:127.0.0.1}}. In this case, if the REST_HOST  environment variable does not exist, but the REST_ANOTHER_REST_HOSTHOST environment variable exists, and its value is 172.0.4.12, then the value of restHost here will be overwritten to 172.0.4.12; otherwise, it will be set to 127.0.0.1.\n","excerpt":"Setting Override SkyWalking backend supports setting overrides by system properties and system …","ref":"/docs/main/v9.4.0/en/setup/backend/backend-setting-override/","title":"Setting Override"},{"body":"Setting Override SkyWalking backend supports setting overrides by system properties and system environment variables. You may override the settings in application.yml\nSystem properties key rule ModuleName.ProviderName.SettingKey.\n  Example\nOverride restHost in this setting segment\n  core:default:restHost:${SW_CORE_REST_HOST:0.0.0.0}restPort:${SW_CORE_REST_PORT:12800}restContextPath:${SW_CORE_REST_CONTEXT_PATH:/}gRPCHost:${SW_CORE_GRPC_HOST:0.0.0.0}gRPCPort:${SW_CORE_GRPC_PORT:11800}Use command arg\n-Dcore.default.restHost=172.0.4.12 System environment variables   Example\nOverride restHost in this setting segment through environment variables\n  core:default:restHost:${REST_HOST:0.0.0.0}restPort:${SW_CORE_REST_PORT:12800}restContextPath:${SW_CORE_REST_CONTEXT_PATH:/}gRPCHost:${SW_CORE_GRPC_HOST:0.0.0.0}gRPCPort:${SW_CORE_GRPC_PORT:11800}If the REST_HOST  environment variable exists in your operating system and its value is 172.0.4.12, then the value of restHost here will be overwritten to 172.0.4.12; otherwise, it will be set to 0.0.0.0.\nPlaceholder nesting is also supported, like ${REST_HOST:${ANOTHER_REST_HOST:127.0.0.1}}. In this case, if the REST_HOST  environment variable does not exist, but the REST_ANOTHER_REST_HOSTHOST environment variable exists, and its value is 172.0.4.12, then the value of restHost here will be overwritten to 172.0.4.12; otherwise, it will be set to 127.0.0.1.\n","excerpt":"Setting Override SkyWalking backend supports setting overrides by system properties and system …","ref":"/docs/main/v9.5.0/en/setup/backend/backend-setting-override/","title":"Setting Override"},{"body":"Setting Override SkyWalking backend supports setting overrides by system properties and system environment variables. You may override the settings in application.yml\nSystem properties key rule ModuleName.ProviderName.SettingKey.\n  Example\nOverride restHost in this setting segment\n  core:default:restHost:${SW_CORE_REST_HOST:0.0.0.0}restPort:${SW_CORE_REST_PORT:12800}restContextPath:${SW_CORE_REST_CONTEXT_PATH:/}gRPCHost:${SW_CORE_GRPC_HOST:0.0.0.0}gRPCPort:${SW_CORE_GRPC_PORT:11800}Use command arg\n-Dcore.default.restHost=172.0.4.12 System environment variables   Example\nOverride restHost in this setting segment through environment variables\n  core:default:restHost:${REST_HOST:0.0.0.0}restPort:${SW_CORE_REST_PORT:12800}restContextPath:${SW_CORE_REST_CONTEXT_PATH:/}gRPCHost:${SW_CORE_GRPC_HOST:0.0.0.0}gRPCPort:${SW_CORE_GRPC_PORT:11800}If the REST_HOST  environment variable exists in your operating system and its value is 172.0.4.12, then the value of restHost here will be overwritten to 172.0.4.12; otherwise, it will be set to 0.0.0.0.\nPlaceholder nesting is also supported, like ${REST_HOST:${ANOTHER_REST_HOST:127.0.0.1}}. In this case, if the REST_HOST  environment variable does not exist, but the REST_ANOTHER_REST_HOSTHOST environment variable exists, and its value is 172.0.4.12, then the value of restHost here will be overwritten to 172.0.4.12; otherwise, it will be set to 127.0.0.1.\n","excerpt":"Setting Override SkyWalking backend supports setting overrides by system properties and system …","ref":"/docs/main/v9.6.0/en/setup/backend/backend-setting-override/","title":"Setting Override"},{"body":"Setting Override SkyWalking backend supports setting overrides by system properties and system environment variables. You may override the settings in application.yml\nSystem properties key rule ModuleName.ProviderName.SettingKey.\n  Example\nOverride restHost in this setting segment\n  core:default:restHost:${SW_CORE_REST_HOST:0.0.0.0}restPort:${SW_CORE_REST_PORT:12800}restContextPath:${SW_CORE_REST_CONTEXT_PATH:/}gRPCHost:${SW_CORE_GRPC_HOST:0.0.0.0}gRPCPort:${SW_CORE_GRPC_PORT:11800}Use command arg\n-Dcore.default.restHost=172.0.4.12 System environment variables   Example\nOverride restHost in this setting segment through environment variables\n  core:default:restHost:${REST_HOST:0.0.0.0}restPort:${SW_CORE_REST_PORT:12800}restContextPath:${SW_CORE_REST_CONTEXT_PATH:/}gRPCHost:${SW_CORE_GRPC_HOST:0.0.0.0}gRPCPort:${SW_CORE_GRPC_PORT:11800}If the REST_HOST  environment variable exists in your operating system and its value is 172.0.4.12, then the value of restHost here will be overwritten to 172.0.4.12; otherwise, it will be set to 0.0.0.0.\nPlaceholder nesting is also supported, like ${REST_HOST:${ANOTHER_REST_HOST:127.0.0.1}}. In this case, if the REST_HOST  environment variable does not exist, but the REST_ANOTHER_REST_HOSTHOST environment variable exists, and its value is 172.0.4.12, then the value of restHost here will be overwritten to 172.0.4.12; otherwise, it will be set to 127.0.0.1.\n","excerpt":"Setting Override SkyWalking backend supports setting overrides by system properties and system …","ref":"/docs/main/v9.7.0/en/setup/backend/backend-setting-override/","title":"Setting Override"},{"body":"Setting Override By default, SkyWalking Go agent provides a default agent.default.yaml to define the default configuration options.\nThis configuration file is used during hybrid compilation to write the configuration information of the Agent into the program. When the program boots, the agent would read the pre-configured content.\nConfiguration Changes The values in the config file should be updated by following the user requirements. They are applied during the hybrid compilation process.\nFor missing configuration items in the custom file, the Agent would use the values from the default configuration.\nEnvironment Variables In the default configuration, you can see that most of the configurations are in the format ${xxx:config_value}. It means that when the program starts, the agent would first read the xxx from the system environment variables in the runtime. If it cannot be found, the value would be used as the config_value as value.\nNote: that the search for environment variables is at runtime, not compile time.\n","excerpt":"Setting Override By default, SkyWalking Go agent provides a default agent.default.yaml to define the …","ref":"/docs/skywalking-go/latest/en/advanced-features/settings-override/","title":"Setting Override"},{"body":"Setting Override By default, SkyWalking Go agent provides a default agent.default.yaml to define the default configuration options.\nThis configuration file is used during hybrid compilation to write the configuration information of the Agent into the program. When the program boots, the agent would read the pre-configured content.\nConfiguration Changes The values in the config file should be updated by following the user requirements. They are applied during the hybrid compilation process.\nFor missing configuration items in the custom file, the Agent would use the values from the default configuration.\nEnvironment Variables In the default configuration, you can see that most of the configurations are in the format ${xxx:config_value}. It means that when the program starts, the agent would first read the xxx from the system environment variables in the runtime. If it cannot be found, the value would be used as the config_value as value.\nNote: that the search for environment variables is at runtime, not compile time.\n","excerpt":"Setting Override By default, SkyWalking Go agent provides a default agent.default.yaml to define the …","ref":"/docs/skywalking-go/next/en/advanced-features/settings-override/","title":"Setting Override"},{"body":"Setting Override By default, SkyWalking Go agent provides a default agent.default.yaml to define the default configuration options.\nThis configuration file is used during hybrid compilation to write the configuration information of the Agent into the program. When the program boots, the agent would read the pre-configured content.\nConfiguration Changes The values in the config file should be updated by following the user requirements. They are applied during the hybrid compilation process.\nFor missing configuration items in the custom file, the Agent would use the values from the default configuration.\nEnvironment Variables In the default configuration, you can see that most of the configurations are in the format ${xxx:config_value}. It means that when the program starts, the agent would first read the xxx from the system environment variables in the runtime. If it cannot be found, the value would be used as the config_value as value.\nNote: that the search for environment variables is at runtime, not compile time.\n","excerpt":"Setting Override By default, SkyWalking Go agent provides a default agent.default.yaml to define the …","ref":"/docs/skywalking-go/v0.4.0/en/advanced-features/settings-override/","title":"Setting Override"},{"body":"Setting Override In default, SkyWalking provide agent.config for agent.\nSetting override means end user can override the settings in these config file, through using system properties or agent options.\nSystem properties Use skywalking. + key in config file as system properties key, to override the value.\n  Why need this prefix?\nThe agent system properties and env share with target application, this prefix can avoid variable conflict.\n  Example\nOverride agent.application_code by this.\n  -Dskywalking.agent.application_code=31200 Agent options Add the properties after the agent path in JVM arguments.\n-javaagent:/path/to/skywalking-agent.jar=[option1]=[value1],[option2]=[value2]   Example\nOverride agent.application_code and logging.level by this.\n  -javaagent:/path/to/skywalking-agent.jar=agent.application_code=31200,logging.level=debug   Special characters\nIf a separator(, or =) in the option or value, it should be wrapped in quotes.\n  -javaagent:/path/to/skywalking-agent.jar=agent.ignore_suffix='.jpg,.jpeg' System environment variables   Example\nOverride agent.application_code and logging.level by this.\n  # The service name in UI agent.service_name=${SW_AGENT_NAME:Your_ApplicationName} # Logging level logging.level=${SW_LOGGING_LEVEL:INFO} If the SW_AGENT_NAME  environment variable exists in your operating system and its value is skywalking-agent-demo, then the value of agent.service_name here will be overwritten to skywalking-agent-demo, otherwise, it will be set to Your_ApplicationName.\nBy the way, Placeholder nesting is also supported, like ${SW_AGENT_NAME:${ANOTHER_AGENT_NAME:Your_ApplicationName}}. In this case, if the SW_AGENT_NAME  environment variable not exists, but the ANOTHER_AGENT_NAME environment variable exists and its value is skywalking-agent-demo, then the value of agent.service_name here will be overwritten to skywalking-agent-demo,otherwise, it will be set to Your_ApplicationName.\nOverride priority Agent Options \u0026gt; System.Properties(-D) \u0026gt; System environment variables \u0026gt; Config file\n","excerpt":"Setting Override In default, SkyWalking provide agent.config for agent.\nSetting override means end …","ref":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/setting-override/","title":"Setting Override"},{"body":"Setting Override In default, SkyWalking provide agent.config for agent.\nSetting override means end user can override the settings in these config file, through using system properties or agent options.\nSystem properties Use skywalking. + key in config file as system properties key, to override the value.\n  Why need this prefix?\nThe agent system properties and env share with target application, this prefix can avoid variable conflict.\n  Example\nOverride agent.application_code by this.\n  -Dskywalking.agent.application_code=31200 Agent options Add the properties after the agent path in JVM arguments.\n-javaagent:/path/to/skywalking-agent.jar=[option1]=[value1],[option2]=[value2]   Example\nOverride agent.application_code and logging.level by this.\n  -javaagent:/path/to/skywalking-agent.jar=agent.application_code=31200,logging.level=debug   Special characters\nIf a separator(, or =) in the option or value, it should be wrapped in quotes.\n  -javaagent:/path/to/skywalking-agent.jar=agent.ignore_suffix='.jpg,.jpeg' System environment variables   Example\nOverride agent.application_code and logging.level by this.\n  # The service name in UI agent.service_name=${SW_AGENT_NAME:Your_ApplicationName} # Logging level logging.level=${SW_LOGGING_LEVEL:INFO} If the SW_AGENT_NAME  environment variable exists in your operating system and its value is skywalking-agent-demo, then the value of agent.service_name here will be overwritten to skywalking-agent-demo, otherwise, it will be set to Your_ApplicationName.\nBy the way, Placeholder nesting is also supported, like ${SW_AGENT_NAME:${ANOTHER_AGENT_NAME:Your_ApplicationName}}. In this case, if the SW_AGENT_NAME  environment variable not exists, but the ANOTHER_AGENT_NAME environment variable exists and its value is skywalking-agent-demo, then the value of agent.service_name here will be overwritten to skywalking-agent-demo,otherwise, it will be set to Your_ApplicationName.\nOverride priority Agent Options \u0026gt; System.Properties(-D) \u0026gt; System environment variables \u0026gt; Config file\n","excerpt":"Setting Override In default, SkyWalking provide agent.config for agent.\nSetting override means end …","ref":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/setting-override/","title":"Setting Override"},{"body":"Setting Override In default, SkyWalking provide agent.config for agent.\nSetting override means end user can override the settings in these config file, through using system properties or agent options.\nSystem properties Use skywalking. + key in config file as system properties key, to override the value.\n  Why need this prefix?\nThe agent system properties and env share with target application, this prefix can avoid variable conflict.\n  Example\nOverride agent.application_code by this.\n  -Dskywalking.agent.application_code=31200 Agent options Add the properties after the agent path in JVM arguments.\n-javaagent:/path/to/skywalking-agent.jar=[option1]=[value1],[option2]=[value2]   Example\nOverride agent.application_code and logging.level by this.\n  -javaagent:/path/to/skywalking-agent.jar=agent.application_code=31200,logging.level=debug   Special characters\nIf a separator(, or =) in the option or value, it should be wrapped in quotes.\n  -javaagent:/path/to/skywalking-agent.jar=agent.ignore_suffix='.jpg,.jpeg' System environment variables   Example\nOverride agent.application_code and logging.level by this.\n  # The service name in UI agent.service_name=${SW_AGENT_NAME:Your_ApplicationName} # Logging level logging.level=${SW_LOGGING_LEVEL:INFO} If the SW_AGENT_NAME  environment variable exists in your operating system and its value is skywalking-agent-demo, then the value of agent.service_name here will be overwritten to skywalking-agent-demo, otherwise, it will be set to Your_ApplicationName.\nBy the way, Placeholder nesting is also supported, like ${SW_AGENT_NAME:${ANOTHER_AGENT_NAME:Your_ApplicationName}}. In this case, if the SW_AGENT_NAME  environment variable not exists, but the ANOTHER_AGENT_NAME environment variable exists and its value is skywalking-agent-demo, then the value of agent.service_name here will be overwritten to skywalking-agent-demo,otherwise, it will be set to Your_ApplicationName.\nOverride priority Agent Options \u0026gt; System.Properties(-D) \u0026gt; System environment variables \u0026gt; Config file\n","excerpt":"Setting Override In default, SkyWalking provide agent.config for agent.\nSetting override means end …","ref":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/setting-override/","title":"Setting Override"},{"body":"Setting Override In default, SkyWalking provide agent.config for agent.\nSetting override means end user can override the settings in these config file, through using system properties or agent options.\nSystem properties Use skywalking. + key in config file as system properties key, to override the value.\n  Why need this prefix?\nThe agent system properties and env share with target application, this prefix can avoid variable conflict.\n  Example\nOverride agent.application_code by this.\n  -Dskywalking.agent.application_code=31200 Agent options Add the properties after the agent path in JVM arguments.\n-javaagent:/path/to/skywalking-agent.jar=[option1]=[value1],[option2]=[value2]   Example\nOverride agent.application_code and logging.level by this.\n  -javaagent:/path/to/skywalking-agent.jar=agent.application_code=31200,logging.level=debug   Special characters\nIf a separator(, or =) in the option or value, it should be wrapped in quotes.\n  -javaagent:/path/to/skywalking-agent.jar=agent.ignore_suffix='.jpg,.jpeg' System environment variables   Example\nOverride agent.application_code and logging.level by this.\n  # The service name in UI agent.service_name=${SW_AGENT_NAME:Your_ApplicationName} # Logging level logging.level=${SW_LOGGING_LEVEL:INFO} If the SW_AGENT_NAME  environment variable exists in your operating system and its value is skywalking-agent-demo, then the value of agent.service_name here will be overwritten to skywalking-agent-demo, otherwise, it will be set to Your_ApplicationName.\nBy the way, Placeholder nesting is also supported, like ${SW_AGENT_NAME:${ANOTHER_AGENT_NAME:Your_ApplicationName}}. In this case, if the SW_AGENT_NAME  environment variable not exists, but the ANOTHER_AGENT_NAME environment variable exists and its value is skywalking-agent-demo, then the value of agent.service_name here will be overwritten to skywalking-agent-demo,otherwise, it will be set to Your_ApplicationName.\nOverride priority Agent Options \u0026gt; System.Properties(-D) \u0026gt; System environment variables \u0026gt; Config file\n","excerpt":"Setting Override In default, SkyWalking provide agent.config for agent.\nSetting override means end …","ref":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/setting-override/","title":"Setting Override"},{"body":"Setting Override In default, SkyWalking provide agent.config for agent.\nSetting override means end user can override the settings in these config file, through using system properties or agent options.\nSystem properties Use skywalking. + key in config file as system properties key, to override the value.\n  Why need this prefix?\nThe agent system properties and env share with target application, this prefix can avoid variable conflict.\n  Example\nOverride agent.application_code by this.\n  -Dskywalking.agent.application_code=31200 Agent options Add the properties after the agent path in JVM arguments.\n-javaagent:/path/to/skywalking-agent.jar=[option1]=[value1],[option2]=[value2]   Example\nOverride agent.application_code and logging.level by this.\n  -javaagent:/path/to/skywalking-agent.jar=agent.application_code=31200,logging.level=debug   Special characters\nIf a separator(, or =) in the option or value, it should be wrapped in quotes.\n  -javaagent:/path/to/skywalking-agent.jar=agent.ignore_suffix='.jpg,.jpeg' System environment variables   Example\nOverride agent.application_code and logging.level by this.\n  # The service name in UI agent.service_name=${SW_AGENT_NAME:Your_ApplicationName} # Logging level logging.level=${SW_LOGGING_LEVEL:INFO} If the SW_AGENT_NAME  environment variable exists in your operating system and its value is skywalking-agent-demo, then the value of agent.service_name here will be overwritten to skywalking-agent-demo, otherwise, it will be set to Your_ApplicationName.\nBy the way, Placeholder nesting is also supported, like ${SW_AGENT_NAME:${ANOTHER_AGENT_NAME:Your_ApplicationName}}. In this case, if the SW_AGENT_NAME  environment variable not exists, but the ANOTHER_AGENT_NAME environment variable exists and its value is skywalking-agent-demo, then the value of agent.service_name here will be overwritten to skywalking-agent-demo,otherwise, it will be set to Your_ApplicationName.\nOverride priority Agent Options \u0026gt; System.Properties(-D) \u0026gt; System environment variables \u0026gt; Config file\n","excerpt":"Setting Override In default, SkyWalking provide agent.config for agent.\nSetting override means end …","ref":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/setting-override/","title":"Setting Override"},{"body":"Setting Override SkyWalking Rover supports setting overrides by system environment variables. You could override the settings in rover_configs.yaml\nSystem environment variables   Example\nOverride core.backend.addr in this setting segment through environment variables\n  core:backend:addr:${ROVER_BACKEND_ADDR:localhost:11800}If the ROVER_BACKEND_ADDR  environment variable exists in your operating system and its value is oap:11800, then the value of core.backend.addr here will be overwritten to oap:11800, otherwise, it will be set to localhost:11800.\n","excerpt":"Setting Override SkyWalking Rover supports setting overrides by system environment variables. You …","ref":"/docs/skywalking-rover/latest/en/setup/configuration/override-settings/","title":"Setting Override"},{"body":"Setting Override SkyWalking Rover supports setting overrides by system environment variables. You could override the settings in rover_configs.yaml\nSystem environment variables   Example\nOverride core.backend.addr in this setting segment through environment variables\n  core:backend:addr:${ROVER_BACKEND_ADDR:localhost:11800}If the ROVER_BACKEND_ADDR  environment variable exists in your operating system and its value is oap:11800, then the value of core.backend.addr here will be overwritten to oap:11800, otherwise, it will be set to localhost:11800.\n","excerpt":"Setting Override SkyWalking Rover supports setting overrides by system environment variables. You …","ref":"/docs/skywalking-rover/next/en/setup/configuration/override-settings/","title":"Setting Override"},{"body":"Setting Override SkyWalking Rover supports setting overrides by system environment variables. You could override the settings in rover_configs.yaml\nSystem environment variables   Example\nOverride core.backend.addr in this setting segment through environment variables\n  core:backend:addr:${ROVER_BACKEND_ADDR:localhost:11800}If the ROVER_BACKEND_ADDR  environment variable exists in your operating system and its value is oap:11800, then the value of core.backend.addr here will be overwritten to oap:11800, otherwise, it will be set to localhost:11800.\n","excerpt":"Setting Override SkyWalking Rover supports setting overrides by system environment variables. You …","ref":"/docs/skywalking-rover/v0.6.0/en/setup/configuration/override-settings/","title":"Setting Override"},{"body":"Setting Override SkyWalking Satellite supports setting overrides by system environment variables. You could override the settings in satellite_config.yaml\nSystem environment variables   Example\nOverride log_pattern in this setting segment through environment variables\n  logger:log_pattern:${SATELLITE_LOGGER_LOG_PATTERN:%time [%level][%field] - %msg}time_pattern:${SATELLITE_LOGGER_TIME_PATTERN:2006-01-02 15:04:05.000}level:${SATELLITE_LOGGER_LEVEL:info}If the SATELLITE_LOGGER_LOG_PATTERN  environment variable exists in your operating system and its value is %msg, then the value of log_pattern here will be overwritten to %msg, otherwise, it will be set to %time [%level][%field] - %msg.\n","excerpt":"Setting Override SkyWalking Satellite supports setting overrides by system environment variables. …","ref":"/docs/skywalking-satellite/latest/en/setup/configuration/override-settings/","title":"Setting Override"},{"body":"Setting Override SkyWalking Satellite supports setting overrides by system environment variables. You could override the settings in satellite_config.yaml\nSystem environment variables   Example\nOverride log_pattern in this setting segment through environment variables\n  logger:log_pattern:${SATELLITE_LOGGER_LOG_PATTERN:%time [%level][%field] - %msg}time_pattern:${SATELLITE_LOGGER_TIME_PATTERN:2006-01-02 15:04:05.000}level:${SATELLITE_LOGGER_LEVEL:info}If the SATELLITE_LOGGER_LOG_PATTERN  environment variable exists in your operating system and its value is %msg, then the value of log_pattern here will be overwritten to %msg, otherwise, it will be set to %time [%level][%field] - %msg.\n","excerpt":"Setting Override SkyWalking Satellite supports setting overrides by system environment variables. …","ref":"/docs/skywalking-satellite/next/en/setup/configuration/override-settings/","title":"Setting Override"},{"body":"Setting Override SkyWalking Satellite supports setting overrides by system environment variables. You could override the settings in satellite_config.yaml\nSystem environment variables   Example\nOverride log_pattern in this setting segment through environment variables\n  logger:log_pattern:${SATELLITE_LOGGER_LOG_PATTERN:%time [%level][%field] - %msg}time_pattern:${SATELLITE_LOGGER_TIME_PATTERN:2006-01-02 15:04:05.000}level:${SATELLITE_LOGGER_LEVEL:info}If the SATELLITE_LOGGER_LOG_PATTERN  environment variable exists in your operating system and its value is %msg, then the value of log_pattern here will be overwritten to %msg, otherwise, it will be set to %time [%level][%field] - %msg.\n","excerpt":"Setting Override SkyWalking Satellite supports setting overrides by system environment variables. …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/configuration/override-settings/","title":"Setting Override"},{"body":"Setup The most important thing in E2E Testing is that it uses a separate configuration file and command to execute. If you haven\u0026rsquo;t read the Module Design, recommend read this document first.\n Installation Configuration file Run E2E Tests  ","excerpt":"Setup The most important thing in E2E Testing is that it uses a separate configuration file and …","ref":"/docs/skywalking-infra-e2e/latest/en/setup/readme/","title":"Setup"},{"body":"Setup The most important thing in E2E Testing is that it uses a separate configuration file and command to execute. If you haven\u0026rsquo;t read the Module Design, recommend read this document first.\n Installation Configuration file Run E2E Tests  ","excerpt":"Setup The most important thing in E2E Testing is that it uses a separate configuration file and …","ref":"/docs/skywalking-infra-e2e/next/en/setup/readme/","title":"Setup"},{"body":"Setup The most important thing in E2E Testing is that it uses a separate configuration file and command to execute. If you haven\u0026rsquo;t read the Module Design, recommend read this document first.\n Installation Configuration file Run E2E Tests  ","excerpt":"Setup The most important thing in E2E Testing is that it uses a separate configuration file and …","ref":"/docs/skywalking-infra-e2e/v1.3.0/en/setup/readme/","title":"Setup"},{"body":"Setup The first and most important thing is, that SkyWalking Rover startup behaviors are driven by configs/rover_configs.yaml. Understanding the setting file will help you to read this document.\nFollow Deploy on Kubernetes document to run rover in your cluster.\nRequirements and default settings Before you start, you should know that the main purpose of quickstart is to help you obtain a basic configuration for previews/demos. Usually, the process to be monitored is first declared.\nThen, you can use bin/startup.sh to start up the rover with their config.\nSkyWalking OAP Compatibility The SkyWalking Rover requires specialized protocols to communicate with SkyWalking OAP.\n   SkyWalking Rover Version SkyWalking OAP Notice     0.6.0+ \u0026gt; = 10.0.0 Only support Kubernetes.   0.1.0+ \u0026gt; = 9.1.0     Configuration  Common configurations about logs, backend address, cert files, etc. Service Discovery includes advanced setups about the ways of discovering services on your Kubernetes cluster. Access logs reports L2 to L4 network traffic relative information through access logs, to help OAP backend to do topology and metrics analysis. Profiling is an on-demand feature to enhance general observability besides access logs. It provides eBPF powered process ON_CPU, OFF_CPU profiling and network advanced profiling to link HTTP traffic with SkyWalking and Zipkin traces.  To adjust the configurations, refer to Overriding Setting document for more details.\nPrerequisites Currently, Linux operating systems are supported from version 4.9 and above, except for network profiling which requires version 4.16 or higher.\nThe following table lists currently supported/tested operating systems.\n   System Kernel Version On CPU Profiling Off CPU Profiling Network Profiling     CentOS 7 3.10.0 No No No   CentOS Stream 8 4.18.0 Yes Yes Yes   CentOS Stream 9 5.47.0 Yes Yes Yes   Debian 10 4.19.0 Yes Yes Yes   Debian 11 5.10.0 Yes Yes Yes(TCP Drop Monitor Excluded)   Fedora 35 5.14.10 Yes Yes Yes(TCP Drop Monitor Excluded)   RHEL 7 3.10.0 No No No   RHEL 8 4.18.0 Yes Yes Yes   RHEL 9 5.14.0 Yes Yes Yes   Rocky Linux 8 4.18.0 Yes Yes Yes   Rocky Linux 9 5.14.0 Yes Yes Yes   Ubuntu 1804 5.4.0 Yes Yes Yes   Ubuntu 20.04 5.15.0 Yes Yes Yes   Ubuntu 20.04 5.15.0 Yes Yes Yes   Ubuntu 22.04 5.15.0 Yes Yes Yes   Ubuntu 22.04 5.15.0 Yes Yes Yes   Ubuntu 22.10 5.19.0 Yes Yes Yes   Ubuntu Pro 16.04 4.15.0 Yes Yes No   Ubuntu Pro 18.04 5.4.0 Yes Yes Yes   Ubuntu Pro 20.04 5.15.0 Yes Yes Yes   Ubuntu Pro 22.04 5.15.0 Yes Yes Yes   Ubuntu Pro 22.04 5.15.0 Yes Yes Yes    ","excerpt":"Setup The first and most important thing is, that SkyWalking Rover startup behaviors are driven by …","ref":"/docs/skywalking-rover/latest/en/setup/overview/","title":"Setup"},{"body":"Setup The first and most important thing is, that SkyWalking Rover startup behaviors are driven by configs/rover_configs.yaml. Understanding the setting file will help you to read this document.\nFollow Deploy on Kubernetes document to run rover in your cluster.\nRequirements and default settings Before you start, you should know that the main purpose of quickstart is to help you obtain a basic configuration for previews/demos. Usually, the process to be monitored is first declared.\nThen, you can use bin/startup.sh to start up the rover with their config.\nSkyWalking OAP Compatibility The SkyWalking Rover requires specialized protocols to communicate with SkyWalking OAP.\n   SkyWalking Rover Version SkyWalking OAP Notice     0.6.0+ \u0026gt; = 10.0.0 Only support Kubernetes.   0.1.0+ \u0026gt; = 9.1.0     Configuration  Common configurations about logs, backend address, cert files, etc. Service Discovery includes advanced setups about the ways of discovering services on your Kubernetes cluster. Access logs reports L2 to L4 network traffic relative information through access logs, to help OAP backend to do topology and metrics analysis. Profiling is an on-demand feature to enhance general observability besides access logs. It provides eBPF powered process ON_CPU, OFF_CPU profiling and network advanced profiling to link HTTP traffic with SkyWalking and Zipkin traces.  To adjust the configurations, refer to Overriding Setting document for more details.\nPrerequisites Currently, Linux operating systems are supported from version 4.9 and above, except for network profiling which requires version 4.16 or higher.\nThe following table lists currently supported/tested operating systems.\n   System Kernel Version On CPU Profiling Off CPU Profiling Network Profiling     CentOS 7 3.10.0 No No No   CentOS Stream 8 4.18.0 Yes Yes Yes   CentOS Stream 9 5.47.0 Yes Yes Yes   Debian 10 4.19.0 Yes Yes Yes   Debian 11 5.10.0 Yes Yes Yes(TCP Drop Monitor Excluded)   Fedora 35 5.14.10 Yes Yes Yes(TCP Drop Monitor Excluded)   RHEL 7 3.10.0 No No No   RHEL 8 4.18.0 Yes Yes Yes   RHEL 9 5.14.0 Yes Yes Yes   Rocky Linux 8 4.18.0 Yes Yes Yes   Rocky Linux 9 5.14.0 Yes Yes Yes   Ubuntu 1804 5.4.0 Yes Yes Yes   Ubuntu 20.04 5.15.0 Yes Yes Yes   Ubuntu 20.04 5.15.0 Yes Yes Yes   Ubuntu 22.04 5.15.0 Yes Yes Yes   Ubuntu 22.04 5.15.0 Yes Yes Yes   Ubuntu 22.10 5.19.0 Yes Yes Yes   Ubuntu Pro 16.04 4.15.0 Yes Yes No   Ubuntu Pro 18.04 5.4.0 Yes Yes Yes   Ubuntu Pro 20.04 5.15.0 Yes Yes Yes   Ubuntu Pro 22.04 5.15.0 Yes Yes Yes   Ubuntu Pro 22.04 5.15.0 Yes Yes Yes    ","excerpt":"Setup The first and most important thing is, that SkyWalking Rover startup behaviors are driven by …","ref":"/docs/skywalking-rover/next/en/setup/overview/","title":"Setup"},{"body":"Setup The first and most important thing is, that SkyWalking Rover startup behaviors are driven by configs/rover_configs.yaml. Understanding the setting file will help you to read this document.\nFollow Deploy on Kubernetes document to run rover in your cluster.\nRequirements and default settings Before you start, you should know that the main purpose of quickstart is to help you obtain a basic configuration for previews/demos. Usually, the process to be monitored is first declared.\nThen, you can use bin/startup.sh to start up the rover with their config.\nSkyWalking OAP Compatibility The SkyWalking Rover requires specialized protocols to communicate with SkyWalking OAP.\n   SkyWalking Rover Version SkyWalking OAP Notice     0.6.0+ \u0026gt; = 10.0.0 Only support Kubernetes.   0.1.0+ \u0026gt; = 9.1.0     Configuration  Common configurations about logs, backend address, cert files, etc. Service Discovery includes advanced setups about the ways of discovering services on your Kubernetes cluster. Access logs reports L2 to L4 network traffic relative information through access logs, to help OAP backend to do topology and metrics analysis. Profiling is an on-demand feature to enhance general observability besides access logs. It provides eBPF powered process ON_CPU, OFF_CPU profiling and network advanced profiling to link HTTP traffic with SkyWalking and Zipkin traces.  To adjust the configurations, refer to Overriding Setting document for more details.\nPrerequisites Currently, Linux operating systems are supported from version 4.9 and above, except for network profiling which requires version 4.16 or higher.\nThe following table lists currently supported/tested operating systems.\n   System Kernel Version On CPU Profiling Off CPU Profiling Network Profiling     CentOS 7 3.10.0 No No No   CentOS Stream 8 4.18.0 Yes Yes Yes   CentOS Stream 9 5.47.0 Yes Yes Yes   Debian 10 4.19.0 Yes Yes Yes   Debian 11 5.10.0 Yes Yes Yes(TCP Drop Monitor Excluded)   Fedora 35 5.14.10 Yes Yes Yes(TCP Drop Monitor Excluded)   RHEL 7 3.10.0 No No No   RHEL 8 4.18.0 Yes Yes Yes   RHEL 9 5.14.0 Yes Yes Yes   Rocky Linux 8 4.18.0 Yes Yes Yes   Rocky Linux 9 5.14.0 Yes Yes Yes   Ubuntu 1804 5.4.0 Yes Yes Yes   Ubuntu 20.04 5.15.0 Yes Yes Yes   Ubuntu 20.04 5.15.0 Yes Yes Yes   Ubuntu 22.04 5.15.0 Yes Yes Yes   Ubuntu 22.04 5.15.0 Yes Yes Yes   Ubuntu 22.10 5.19.0 Yes Yes Yes   Ubuntu Pro 16.04 4.15.0 Yes Yes No   Ubuntu Pro 18.04 5.4.0 Yes Yes Yes   Ubuntu Pro 20.04 5.15.0 Yes Yes Yes   Ubuntu Pro 22.04 5.15.0 Yes Yes Yes   Ubuntu Pro 22.04 5.15.0 Yes Yes Yes    ","excerpt":"Setup The first and most important thing is, that SkyWalking Rover startup behaviors are driven by …","ref":"/docs/skywalking-rover/v0.6.0/en/setup/overview/","title":"Setup"},{"body":"Setup First and most important thing is, SkyWalking Satellite startup behaviours are driven by configs/satellite_config.yaml. Understanding the setting file will help you to read this document.\nRequirements and default settings Before you start, you should know that the main purpose of quickstart is to help you obtain a basic configuration for previews/demo. Performance and long-term running are not our goals.\nYou can use bin/startup.sh (or cmd) to start up the satellite with their default settings, set out as follows:\n Receive SkyWalking related protocols through grpc(listens on 0.0.0.0/11800) and transmit them to SkyWalking backend(to 0.0.0.0/11800). Expose Self-Observability telemetry data to Prometheus(listens on 0.0.0.0/1234)  Startup script Startup Script\nbin/startup.sh Examples You can quickly build your satellite according to the following examples:\nDeploy  Deploy on Linux and Windows Deploy on Kubernetes  More Use Cases  Transmit Log to Kafka Enable/Disable Channel Telemetry Exporter  satellite_config.yaml The core concept behind this setting file is, SkyWalking Satellite is based on pure modularization design. End user can switch or assemble the collector features by their own requirements.\nSo, in satellite_config.yaml, there are three parts.\n The common configurations. The sharing plugin configurations. The pipe plugin configurations.  Advanced feature document link list  Overriding settings in satellite_config.yaml is supported  Performance  ALS Load Balance.  ","excerpt":"Setup First and most important thing is, SkyWalking Satellite startup behaviours are driven by …","ref":"/docs/skywalking-satellite/latest/en/setup/readme/","title":"Setup"},{"body":"Setup First and most important thing is, SkyWalking Satellite startup behaviours are driven by configs/satellite_config.yaml. Understanding the setting file will help you to read this document.\nRequirements and default settings Before you start, you should know that the main purpose of quickstart is to help you obtain a basic configuration for previews/demo. Performance and long-term running are not our goals.\nYou can use bin/startup.sh (or cmd) to start up the satellite with their default settings, set out as follows:\n Receive SkyWalking related protocols through grpc(listens on 0.0.0.0/11800) and transmit them to SkyWalking backend(to 0.0.0.0/11800). Expose Self-Observability telemetry data to Prometheus(listens on 0.0.0.0/1234)  Startup script Startup Script\nbin/startup.sh Examples You can quickly build your satellite according to the following examples:\nDeploy  Deploy on Linux and Windows Deploy on Kubernetes  More Use Cases  Transmit Log to Kafka Enable/Disable Channel Telemetry Exporter  satellite_config.yaml The core concept behind this setting file is, SkyWalking Satellite is based on pure modularization design. End user can switch or assemble the collector features by their own requirements.\nSo, in satellite_config.yaml, there are three parts.\n The common configurations. The sharing plugin configurations. The pipe plugin configurations.  Advanced feature document link list  Overriding settings in satellite_config.yaml is supported  Performance  ALS Load Balance.  ","excerpt":"Setup First and most important thing is, SkyWalking Satellite startup behaviours are driven by …","ref":"/docs/skywalking-satellite/next/en/setup/readme/","title":"Setup"},{"body":"Setup First and most important thing is, SkyWalking Satellite startup behaviours are driven by configs/satellite_config.yaml. Understanding the setting file will help you to read this document.\nRequirements and default settings Before you start, you should know that the main purpose of quickstart is to help you obtain a basic configuration for previews/demo. Performance and long-term running are not our goals.\nYou can use bin/startup.sh (or cmd) to start up the satellite with their default settings, set out as follows:\n Receive SkyWalking related protocols through grpc(listens on 0.0.0.0/11800) and transmit them to SkyWalking backend(to 0.0.0.0/11800). Expose Self-Observability telemetry data to Prometheus(listens on 0.0.0.0/1234)  Startup script Startup Script\nbin/startup.sh Examples You can quickly build your satellite according to the following examples:\nDeploy  Deploy on Linux and Windows Deploy on Kubernetes  More Use Cases  Transmit Log to Kafka Enable/Disable Channel Telemetry Exporter  satellite_config.yaml The core concept behind this setting file is, SkyWalking Satellite is based on pure modularization design. End user can switch or assemble the collector features by their own requirements.\nSo, in satellite_config.yaml, there are three parts.\n The common configurations. The sharing plugin configurations. The pipe plugin configurations.  Advanced feature document link list  Overriding settings in satellite_config.yaml is supported  Performance  ALS Load Balance.  ","excerpt":"Setup First and most important thing is, SkyWalking Satellite startup behaviours are driven by …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/readme/","title":"Setup"},{"body":"Setup External Communication Channels SkyWalking has default activated gRPC/HTTP servers in the core module, which serve for both internal communication and external data report or query.\nIn some advanced scenarios, such as security requirements, specific gRPC/HTTP servers should be exposed for external requests.\nreceiver-sharing-server:selector:${SW_RECEIVER_SHARING_SERVER:default}default:# For REST serverrestHost:${SW_RECEIVER_SHARING_REST_HOST:0.0.0.0}restPort:${SW_RECEIVER_SHARING_REST_PORT:0}restContextPath:${SW_RECEIVER_SHARING_REST_CONTEXT_PATH:/}restMaxThreads:${SW_RECEIVER_SHARING_REST_MAX_THREADS:200}restIdleTimeOut:${SW_RECEIVER_SHARING_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_RECEIVER_SHARING_REST_QUEUE_SIZE:0}httpMaxRequestHeaderSize:${SW_RECEIVER_SHARING_HTTP_MAX_REQUEST_HEADER_SIZE:8192}# For gRPC servergRPCHost:${SW_RECEIVER_GRPC_HOST:0.0.0.0}gRPCPort:${SW_RECEIVER_GRPC_PORT:0}maxConcurrentCallsPerConnection:${SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL:0}maxMessageSize:${SW_RECEIVER_GRPC_MAX_MESSAGE_SIZE:0}gRPCThreadPoolQueueSize:${SW_RECEIVER_GRPC_POOL_QUEUE_SIZE:0}gRPCThreadPoolSize:${SW_RECEIVER_GRPC_THREAD_POOL_SIZE:0}gRPCSslEnabled:${SW_RECEIVER_GRPC_SSL_ENABLED:false}gRPCSslKeyPath:${SW_RECEIVER_GRPC_SSL_KEY_PATH:\u0026#34;\u0026#34;}gRPCSslCertChainPath:${SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH:\u0026#34;\u0026#34;}gRPCSslTrustedCAsPath:${SW_RECEIVER_GRPC_SSL_TRUSTED_CAS_PATH:\u0026#34;\u0026#34;}authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}Set restPort(HTTP) and gRPCPort(gRPC) to a legal port(greater than 0), would initialize new gRPC/HTTP servers for external requests with other relative settings. In this case, core/gRPC and core/rest could be served for cluster internal communication only.\n","excerpt":"Setup External Communication Channels SkyWalking has default activated gRPC/HTTP servers in the core …","ref":"/docs/main/latest/en/setup/backend/backend-expose/","title":"Setup External Communication Channels"},{"body":"Setup External Communication Channels SkyWalking has default activated gRPC/HTTP servers in the core module, which serve for both internal communication and external data report or query.\nIn some advanced scenarios, such as security requirements, specific gRPC/HTTP servers should be exposed for external requests.\nreceiver-sharing-server:selector:${SW_RECEIVER_SHARING_SERVER:default}default:# For REST serverrestHost:${SW_RECEIVER_SHARING_REST_HOST:0.0.0.0}restPort:${SW_RECEIVER_SHARING_REST_PORT:0}restContextPath:${SW_RECEIVER_SHARING_REST_CONTEXT_PATH:/}restMaxThreads:${SW_RECEIVER_SHARING_REST_MAX_THREADS:200}restIdleTimeOut:${SW_RECEIVER_SHARING_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_RECEIVER_SHARING_REST_QUEUE_SIZE:0}httpMaxRequestHeaderSize:${SW_RECEIVER_SHARING_HTTP_MAX_REQUEST_HEADER_SIZE:8192}# For gRPC servergRPCHost:${SW_RECEIVER_GRPC_HOST:0.0.0.0}gRPCPort:${SW_RECEIVER_GRPC_PORT:0}maxConcurrentCallsPerConnection:${SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL:0}maxMessageSize:${SW_RECEIVER_GRPC_MAX_MESSAGE_SIZE:0}gRPCThreadPoolSize:${SW_RECEIVER_GRPC_THREAD_POOL_SIZE:0}gRPCSslEnabled:${SW_RECEIVER_GRPC_SSL_ENABLED:false}gRPCSslKeyPath:${SW_RECEIVER_GRPC_SSL_KEY_PATH:\u0026#34;\u0026#34;}gRPCSslCertChainPath:${SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH:\u0026#34;\u0026#34;}gRPCSslTrustedCAsPath:${SW_RECEIVER_GRPC_SSL_TRUSTED_CAS_PATH:\u0026#34;\u0026#34;}authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}Set restPort(HTTP) and gRPCPort(gRPC) to a legal port(greater than 0), would initialize new gRPC/HTTP servers for external requests with other relative settings. In this case, core/gRPC and core/rest could be served for cluster internal communication only.\n","excerpt":"Setup External Communication Channels SkyWalking has default activated gRPC/HTTP servers in the core …","ref":"/docs/main/next/en/setup/backend/backend-expose/","title":"Setup External Communication Channels"},{"body":"Setup External Communication Channels SkyWalking has default activated gRPC/HTTP servers in the core module, which serve for both internal communication and external data report or query.\nIn some advanced scenarios, such as security requirements, specific gRPC/HTTP servers should be exposed for external requests.\nreceiver-sharing-server:selector:${SW_RECEIVER_SHARING_SERVER:default}default:# For Jetty serverrestHost:${SW_RECEIVER_SHARING_REST_HOST:0.0.0.0}restPort:${SW_RECEIVER_SHARING_REST_PORT:0}restContextPath:${SW_RECEIVER_SHARING_REST_CONTEXT_PATH:/}restMinThreads:${SW_RECEIVER_SHARING_JETTY_MIN_THREADS:1}restMaxThreads:${SW_RECEIVER_SHARING_JETTY_MAX_THREADS:200}restIdleTimeOut:${SW_RECEIVER_SHARING_JETTY_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_RECEIVER_SHARING_JETTY_QUEUE_SIZE:0}httpMaxRequestHeaderSize:${SW_RECEIVER_SHARING_HTTP_MAX_REQUEST_HEADER_SIZE:8192}# For gRPC servergRPCHost:${SW_RECEIVER_GRPC_HOST:0.0.0.0}gRPCPort:${SW_RECEIVER_GRPC_PORT:0}maxConcurrentCallsPerConnection:${SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL:0}maxMessageSize:${SW_RECEIVER_GRPC_MAX_MESSAGE_SIZE:0}gRPCThreadPoolQueueSize:${SW_RECEIVER_GRPC_POOL_QUEUE_SIZE:0}gRPCThreadPoolSize:${SW_RECEIVER_GRPC_THREAD_POOL_SIZE:0}gRPCSslEnabled:${SW_RECEIVER_GRPC_SSL_ENABLED:false}gRPCSslKeyPath:${SW_RECEIVER_GRPC_SSL_KEY_PATH:\u0026#34;\u0026#34;}gRPCSslCertChainPath:${SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH:\u0026#34;\u0026#34;}gRPCSslTrustedCAsPath:${SW_RECEIVER_GRPC_SSL_TRUSTED_CAS_PATH:\u0026#34;\u0026#34;}authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}Set restPort(HTTP) and gRPCPort(gRPC) to a legal port(greater than 0), would initialize new gRPC/HTTP servers for external requests with other relative settings. In this case, core/gRPC and core/rest could be served for cluster internal communication only.\n","excerpt":"Setup External Communication Channels SkyWalking has default activated gRPC/HTTP servers in the core …","ref":"/docs/main/v9.0.0/en/setup/backend/backend-expose/","title":"Setup External Communication Channels"},{"body":"Setup External Communication Channels SkyWalking has default activated gRPC/HTTP servers in the core module, which serve for both internal communication and external data report or query.\nIn some advanced scenarios, such as security requirements, specific gRPC/HTTP servers should be exposed for external requests.\nreceiver-sharing-server:selector:${SW_RECEIVER_SHARING_SERVER:default}default:# For REST serverrestHost:${SW_RECEIVER_SHARING_REST_HOST:0.0.0.0}restPort:${SW_RECEIVER_SHARING_REST_PORT:0}restContextPath:${SW_RECEIVER_SHARING_REST_CONTEXT_PATH:/}restMaxThreads:${SW_RECEIVER_SHARING_REST_MAX_THREADS:200}restIdleTimeOut:${SW_RECEIVER_SHARING_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_RECEIVER_SHARING_REST_QUEUE_SIZE:0}httpMaxRequestHeaderSize:${SW_RECEIVER_SHARING_HTTP_MAX_REQUEST_HEADER_SIZE:8192}# For gRPC servergRPCHost:${SW_RECEIVER_GRPC_HOST:0.0.0.0}gRPCPort:${SW_RECEIVER_GRPC_PORT:0}maxConcurrentCallsPerConnection:${SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL:0}maxMessageSize:${SW_RECEIVER_GRPC_MAX_MESSAGE_SIZE:0}gRPCThreadPoolQueueSize:${SW_RECEIVER_GRPC_POOL_QUEUE_SIZE:0}gRPCThreadPoolSize:${SW_RECEIVER_GRPC_THREAD_POOL_SIZE:0}gRPCSslEnabled:${SW_RECEIVER_GRPC_SSL_ENABLED:false}gRPCSslKeyPath:${SW_RECEIVER_GRPC_SSL_KEY_PATH:\u0026#34;\u0026#34;}gRPCSslCertChainPath:${SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH:\u0026#34;\u0026#34;}gRPCSslTrustedCAsPath:${SW_RECEIVER_GRPC_SSL_TRUSTED_CAS_PATH:\u0026#34;\u0026#34;}authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}Set restPort(HTTP) and gRPCPort(gRPC) to a legal port(greater than 0), would initialize new gRPC/HTTP servers for external requests with other relative settings. In this case, core/gRPC and core/rest could be served for cluster internal communication only.\n","excerpt":"Setup External Communication Channels SkyWalking has default activated gRPC/HTTP servers in the core …","ref":"/docs/main/v9.1.0/en/setup/backend/backend-expose/","title":"Setup External Communication Channels"},{"body":"Setup External Communication Channels SkyWalking has default activated gRPC/HTTP servers in the core module, which serve for both internal communication and external data report or query.\nIn some advanced scenarios, such as security requirements, specific gRPC/HTTP servers should be exposed for external requests.\nreceiver-sharing-server:selector:${SW_RECEIVER_SHARING_SERVER:default}default:# For REST serverrestHost:${SW_RECEIVER_SHARING_REST_HOST:0.0.0.0}restPort:${SW_RECEIVER_SHARING_REST_PORT:0}restContextPath:${SW_RECEIVER_SHARING_REST_CONTEXT_PATH:/}restMaxThreads:${SW_RECEIVER_SHARING_REST_MAX_THREADS:200}restIdleTimeOut:${SW_RECEIVER_SHARING_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_RECEIVER_SHARING_REST_QUEUE_SIZE:0}httpMaxRequestHeaderSize:${SW_RECEIVER_SHARING_HTTP_MAX_REQUEST_HEADER_SIZE:8192}# For gRPC servergRPCHost:${SW_RECEIVER_GRPC_HOST:0.0.0.0}gRPCPort:${SW_RECEIVER_GRPC_PORT:0}maxConcurrentCallsPerConnection:${SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL:0}maxMessageSize:${SW_RECEIVER_GRPC_MAX_MESSAGE_SIZE:0}gRPCThreadPoolQueueSize:${SW_RECEIVER_GRPC_POOL_QUEUE_SIZE:0}gRPCThreadPoolSize:${SW_RECEIVER_GRPC_THREAD_POOL_SIZE:0}gRPCSslEnabled:${SW_RECEIVER_GRPC_SSL_ENABLED:false}gRPCSslKeyPath:${SW_RECEIVER_GRPC_SSL_KEY_PATH:\u0026#34;\u0026#34;}gRPCSslCertChainPath:${SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH:\u0026#34;\u0026#34;}gRPCSslTrustedCAsPath:${SW_RECEIVER_GRPC_SSL_TRUSTED_CAS_PATH:\u0026#34;\u0026#34;}authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}Set restPort(HTTP) and gRPCPort(gRPC) to a legal port(greater than 0), would initialize new gRPC/HTTP servers for external requests with other relative settings. In this case, core/gRPC and core/rest could be served for cluster internal communication only.\n","excerpt":"Setup External Communication Channels SkyWalking has default activated gRPC/HTTP servers in the core …","ref":"/docs/main/v9.2.0/en/setup/backend/backend-expose/","title":"Setup External Communication Channels"},{"body":"Setup External Communication Channels SkyWalking has default activated gRPC/HTTP servers in the core module, which serve for both internal communication and external data report or query.\nIn some advanced scenarios, such as security requirements, specific gRPC/HTTP servers should be exposed for external requests.\nreceiver-sharing-server:selector:${SW_RECEIVER_SHARING_SERVER:default}default:# For REST serverrestHost:${SW_RECEIVER_SHARING_REST_HOST:0.0.0.0}restPort:${SW_RECEIVER_SHARING_REST_PORT:0}restContextPath:${SW_RECEIVER_SHARING_REST_CONTEXT_PATH:/}restMaxThreads:${SW_RECEIVER_SHARING_REST_MAX_THREADS:200}restIdleTimeOut:${SW_RECEIVER_SHARING_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_RECEIVER_SHARING_REST_QUEUE_SIZE:0}httpMaxRequestHeaderSize:${SW_RECEIVER_SHARING_HTTP_MAX_REQUEST_HEADER_SIZE:8192}# For gRPC servergRPCHost:${SW_RECEIVER_GRPC_HOST:0.0.0.0}gRPCPort:${SW_RECEIVER_GRPC_PORT:0}maxConcurrentCallsPerConnection:${SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL:0}maxMessageSize:${SW_RECEIVER_GRPC_MAX_MESSAGE_SIZE:0}gRPCThreadPoolQueueSize:${SW_RECEIVER_GRPC_POOL_QUEUE_SIZE:0}gRPCThreadPoolSize:${SW_RECEIVER_GRPC_THREAD_POOL_SIZE:0}gRPCSslEnabled:${SW_RECEIVER_GRPC_SSL_ENABLED:false}gRPCSslKeyPath:${SW_RECEIVER_GRPC_SSL_KEY_PATH:\u0026#34;\u0026#34;}gRPCSslCertChainPath:${SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH:\u0026#34;\u0026#34;}gRPCSslTrustedCAsPath:${SW_RECEIVER_GRPC_SSL_TRUSTED_CAS_PATH:\u0026#34;\u0026#34;}authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}Set restPort(HTTP) and gRPCPort(gRPC) to a legal port(greater than 0), would initialize new gRPC/HTTP servers for external requests with other relative settings. In this case, core/gRPC and core/rest could be served for cluster internal communication only.\n","excerpt":"Setup External Communication Channels SkyWalking has default activated gRPC/HTTP servers in the core …","ref":"/docs/main/v9.3.0/en/setup/backend/backend-expose/","title":"Setup External Communication Channels"},{"body":"Setup External Communication Channels SkyWalking has default activated gRPC/HTTP servers in the core module, which serve for both internal communication and external data report or query.\nIn some advanced scenarios, such as security requirements, specific gRPC/HTTP servers should be exposed for external requests.\nreceiver-sharing-server:selector:${SW_RECEIVER_SHARING_SERVER:default}default:# For REST serverrestHost:${SW_RECEIVER_SHARING_REST_HOST:0.0.0.0}restPort:${SW_RECEIVER_SHARING_REST_PORT:0}restContextPath:${SW_RECEIVER_SHARING_REST_CONTEXT_PATH:/}restMaxThreads:${SW_RECEIVER_SHARING_REST_MAX_THREADS:200}restIdleTimeOut:${SW_RECEIVER_SHARING_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_RECEIVER_SHARING_REST_QUEUE_SIZE:0}httpMaxRequestHeaderSize:${SW_RECEIVER_SHARING_HTTP_MAX_REQUEST_HEADER_SIZE:8192}# For gRPC servergRPCHost:${SW_RECEIVER_GRPC_HOST:0.0.0.0}gRPCPort:${SW_RECEIVER_GRPC_PORT:0}maxConcurrentCallsPerConnection:${SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL:0}maxMessageSize:${SW_RECEIVER_GRPC_MAX_MESSAGE_SIZE:0}gRPCThreadPoolQueueSize:${SW_RECEIVER_GRPC_POOL_QUEUE_SIZE:0}gRPCThreadPoolSize:${SW_RECEIVER_GRPC_THREAD_POOL_SIZE:0}gRPCSslEnabled:${SW_RECEIVER_GRPC_SSL_ENABLED:false}gRPCSslKeyPath:${SW_RECEIVER_GRPC_SSL_KEY_PATH:\u0026#34;\u0026#34;}gRPCSslCertChainPath:${SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH:\u0026#34;\u0026#34;}gRPCSslTrustedCAsPath:${SW_RECEIVER_GRPC_SSL_TRUSTED_CAS_PATH:\u0026#34;\u0026#34;}authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}Set restPort(HTTP) and gRPCPort(gRPC) to a legal port(greater than 0), would initialize new gRPC/HTTP servers for external requests with other relative settings. In this case, core/gRPC and core/rest could be served for cluster internal communication only.\n","excerpt":"Setup External Communication Channels SkyWalking has default activated gRPC/HTTP servers in the core …","ref":"/docs/main/v9.4.0/en/setup/backend/backend-expose/","title":"Setup External Communication Channels"},{"body":"Setup External Communication Channels SkyWalking has default activated gRPC/HTTP servers in the core module, which serve for both internal communication and external data report or query.\nIn some advanced scenarios, such as security requirements, specific gRPC/HTTP servers should be exposed for external requests.\nreceiver-sharing-server:selector:${SW_RECEIVER_SHARING_SERVER:default}default:# For REST serverrestHost:${SW_RECEIVER_SHARING_REST_HOST:0.0.0.0}restPort:${SW_RECEIVER_SHARING_REST_PORT:0}restContextPath:${SW_RECEIVER_SHARING_REST_CONTEXT_PATH:/}restMaxThreads:${SW_RECEIVER_SHARING_REST_MAX_THREADS:200}restIdleTimeOut:${SW_RECEIVER_SHARING_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_RECEIVER_SHARING_REST_QUEUE_SIZE:0}httpMaxRequestHeaderSize:${SW_RECEIVER_SHARING_HTTP_MAX_REQUEST_HEADER_SIZE:8192}# For gRPC servergRPCHost:${SW_RECEIVER_GRPC_HOST:0.0.0.0}gRPCPort:${SW_RECEIVER_GRPC_PORT:0}maxConcurrentCallsPerConnection:${SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL:0}maxMessageSize:${SW_RECEIVER_GRPC_MAX_MESSAGE_SIZE:0}gRPCThreadPoolQueueSize:${SW_RECEIVER_GRPC_POOL_QUEUE_SIZE:0}gRPCThreadPoolSize:${SW_RECEIVER_GRPC_THREAD_POOL_SIZE:0}gRPCSslEnabled:${SW_RECEIVER_GRPC_SSL_ENABLED:false}gRPCSslKeyPath:${SW_RECEIVER_GRPC_SSL_KEY_PATH:\u0026#34;\u0026#34;}gRPCSslCertChainPath:${SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH:\u0026#34;\u0026#34;}gRPCSslTrustedCAsPath:${SW_RECEIVER_GRPC_SSL_TRUSTED_CAS_PATH:\u0026#34;\u0026#34;}authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}Set restPort(HTTP) and gRPCPort(gRPC) to a legal port(greater than 0), would initialize new gRPC/HTTP servers for external requests with other relative settings. In this case, core/gRPC and core/rest could be served for cluster internal communication only.\n","excerpt":"Setup External Communication Channels SkyWalking has default activated gRPC/HTTP servers in the core …","ref":"/docs/main/v9.5.0/en/setup/backend/backend-expose/","title":"Setup External Communication Channels"},{"body":"Setup External Communication Channels SkyWalking has default activated gRPC/HTTP servers in the core module, which serve for both internal communication and external data report or query.\nIn some advanced scenarios, such as security requirements, specific gRPC/HTTP servers should be exposed for external requests.\nreceiver-sharing-server:selector:${SW_RECEIVER_SHARING_SERVER:default}default:# For REST serverrestHost:${SW_RECEIVER_SHARING_REST_HOST:0.0.0.0}restPort:${SW_RECEIVER_SHARING_REST_PORT:0}restContextPath:${SW_RECEIVER_SHARING_REST_CONTEXT_PATH:/}restMaxThreads:${SW_RECEIVER_SHARING_REST_MAX_THREADS:200}restIdleTimeOut:${SW_RECEIVER_SHARING_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_RECEIVER_SHARING_REST_QUEUE_SIZE:0}httpMaxRequestHeaderSize:${SW_RECEIVER_SHARING_HTTP_MAX_REQUEST_HEADER_SIZE:8192}# For gRPC servergRPCHost:${SW_RECEIVER_GRPC_HOST:0.0.0.0}gRPCPort:${SW_RECEIVER_GRPC_PORT:0}maxConcurrentCallsPerConnection:${SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL:0}maxMessageSize:${SW_RECEIVER_GRPC_MAX_MESSAGE_SIZE:0}gRPCThreadPoolQueueSize:${SW_RECEIVER_GRPC_POOL_QUEUE_SIZE:0}gRPCThreadPoolSize:${SW_RECEIVER_GRPC_THREAD_POOL_SIZE:0}gRPCSslEnabled:${SW_RECEIVER_GRPC_SSL_ENABLED:false}gRPCSslKeyPath:${SW_RECEIVER_GRPC_SSL_KEY_PATH:\u0026#34;\u0026#34;}gRPCSslCertChainPath:${SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH:\u0026#34;\u0026#34;}gRPCSslTrustedCAsPath:${SW_RECEIVER_GRPC_SSL_TRUSTED_CAS_PATH:\u0026#34;\u0026#34;}authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}Set restPort(HTTP) and gRPCPort(gRPC) to a legal port(greater than 0), would initialize new gRPC/HTTP servers for external requests with other relative settings. In this case, core/gRPC and core/rest could be served for cluster internal communication only.\n","excerpt":"Setup External Communication Channels SkyWalking has default activated gRPC/HTTP servers in the core …","ref":"/docs/main/v9.6.0/en/setup/backend/backend-expose/","title":"Setup External Communication Channels"},{"body":"Setup External Communication Channels SkyWalking has default activated gRPC/HTTP servers in the core module, which serve for both internal communication and external data report or query.\nIn some advanced scenarios, such as security requirements, specific gRPC/HTTP servers should be exposed for external requests.\nreceiver-sharing-server:selector:${SW_RECEIVER_SHARING_SERVER:default}default:# For REST serverrestHost:${SW_RECEIVER_SHARING_REST_HOST:0.0.0.0}restPort:${SW_RECEIVER_SHARING_REST_PORT:0}restContextPath:${SW_RECEIVER_SHARING_REST_CONTEXT_PATH:/}restMaxThreads:${SW_RECEIVER_SHARING_REST_MAX_THREADS:200}restIdleTimeOut:${SW_RECEIVER_SHARING_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_RECEIVER_SHARING_REST_QUEUE_SIZE:0}httpMaxRequestHeaderSize:${SW_RECEIVER_SHARING_HTTP_MAX_REQUEST_HEADER_SIZE:8192}# For gRPC servergRPCHost:${SW_RECEIVER_GRPC_HOST:0.0.0.0}gRPCPort:${SW_RECEIVER_GRPC_PORT:0}maxConcurrentCallsPerConnection:${SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL:0}maxMessageSize:${SW_RECEIVER_GRPC_MAX_MESSAGE_SIZE:0}gRPCThreadPoolQueueSize:${SW_RECEIVER_GRPC_POOL_QUEUE_SIZE:0}gRPCThreadPoolSize:${SW_RECEIVER_GRPC_THREAD_POOL_SIZE:0}gRPCSslEnabled:${SW_RECEIVER_GRPC_SSL_ENABLED:false}gRPCSslKeyPath:${SW_RECEIVER_GRPC_SSL_KEY_PATH:\u0026#34;\u0026#34;}gRPCSslCertChainPath:${SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH:\u0026#34;\u0026#34;}gRPCSslTrustedCAsPath:${SW_RECEIVER_GRPC_SSL_TRUSTED_CAS_PATH:\u0026#34;\u0026#34;}authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}Set restPort(HTTP) and gRPCPort(gRPC) to a legal port(greater than 0), would initialize new gRPC/HTTP servers for external requests with other relative settings. In this case, core/gRPC and core/rest could be served for cluster internal communication only.\n","excerpt":"Setup External Communication Channels SkyWalking has default activated gRPC/HTTP servers in the core …","ref":"/docs/main/v9.7.0/en/setup/backend/backend-expose/","title":"Setup External Communication Channels"},{"body":"Setup in build When you want to integrate the Agent using the original go build command, you need to follow these steps.\n1. Download Agent Download the Agent from the official website.\n2. Install SkyWalking Go SkyWalking Go offers two ways for integration into your project.\n2.1 Agent Injector Agent injector is recommended when you only want to include SkyWalking Go agent in the compiling pipeline or shell.\nPlease execute the following command, which would automatically import SkyWalking Go into your project.\n/path/to/agent -inject /path/to/your/project [-all]  /path/to/agent is the path to the agent which your downloaded. /path/to/your/project is the home path to your project, support absolute and related with current directory path. -all is the parameter for injecting all submodules in your project.  2.2 Code Dependency Use go get to import the skywalking-go program.\ngo get github.com/apache/skywalking-go Also, import the module to your main package:\nimport _ \u0026#34;github.com/apache/skywalking-go\u0026#34; NOTICE: Please ensure that the version of the Agent you downloaded is consistent with the version installed via go get in the previous section, to prevent errors such as missing package references during compilation.\n3. Build with SkyWalking Go Agent Add the following parameters in go build:\n-toolexec=\u0026#34;/path/to/go-agent\u0026#34; -a  -toolexec is the path to the Golang enhancement program. -a is the parameter for rebuilding all packages forcibly.  If you want to customize the configuration information for the current service, please add the following parameters, read more please refer the settings override documentation):\n-toolexec=\u0026#34;/path/to/go-agent -config /path/to/config.yaml\u0026#34; -a Binary Output The binary would be weaved and instrumented by SkyWalking Go.\n","excerpt":"Setup in build When you want to integrate the Agent using the original go build command, you need to …","ref":"/docs/skywalking-go/latest/en/setup/gobuild/","title":"Setup in build"},{"body":"Setup in build When you want to integrate the Agent using the original go build command, you need to follow these steps.\n1. Download Agent Download the Agent from the official website.\n2. Install SkyWalking Go SkyWalking Go offers two ways for integration into your project.\n2.1 Agent Injector Agent injector is recommended when you only want to include SkyWalking Go agent in the compiling pipeline or shell.\nPlease execute the following command, which would automatically import SkyWalking Go into your project.\n/path/to/agent -inject /path/to/your/project [-all]  /path/to/agent is the path to the agent which your downloaded. /path/to/your/project is the home path to your project, support absolute and related with current directory path. -all is the parameter for injecting all submodules in your project.  2.2 Code Dependency Use go get to import the skywalking-go program.\ngo get github.com/apache/skywalking-go Also, import the module to your main package:\nimport _ \u0026#34;github.com/apache/skywalking-go\u0026#34; NOTICE: Please ensure that the version of the Agent you downloaded is consistent with the version installed via go get in the previous section, to prevent errors such as missing package references during compilation.\n3. Build with SkyWalking Go Agent Add the following parameters in go build:\n-toolexec=\u0026#34;/path/to/go-agent\u0026#34; -a  -toolexec is the path to the Golang enhancement program. -a is the parameter for rebuilding all packages forcibly.  If you want to customize the configuration information for the current service, please add the following parameters, read more please refer the settings override documentation):\n-toolexec=\u0026#34;/path/to/go-agent -config /path/to/config.yaml\u0026#34; -a Binary Output The binary would be weaved and instrumented by SkyWalking Go.\n","excerpt":"Setup in build When you want to integrate the Agent using the original go build command, you need to …","ref":"/docs/skywalking-go/next/en/setup/gobuild/","title":"Setup in build"},{"body":"Setup in build When you want to integrate the Agent using the original go build command, you need to follow these steps.\n1. Download Agent Download the Agent from the official website.\n2. Install SkyWalking Go SkyWalking Go offers two ways for integration into your project.\n2.1 Agent Injector Agent injector is recommended when you only want to include SkyWalking Go agent in the compiling pipeline or shell.\nPlease execute the following command, which would automatically import SkyWalking Go into your project.\n/path/to/agent -inject /path/to/your/project [-all]  /path/to/agent is the path to the agent which your downloaded. /path/to/your/project is the home path to your project, support absolute and related with current directory path. -all is the parameter for injecting all submodules in your project.  2.2 Code Dependency Use go get to import the skywalking-go program.\ngo get github.com/apache/skywalking-go Also, import the module to your main package:\nimport _ \u0026#34;github.com/apache/skywalking-go\u0026#34; NOTICE: Please ensure that the version of the Agent you downloaded is consistent with the version installed via go get in the previous section, to prevent errors such as missing package references during compilation.\n3. Build with SkyWalking Go Agent Add the following parameters in go build:\n-toolexec=\u0026#34;/path/to/go-agent\u0026#34; -a  -toolexec is the path to the Golang enhancement program. -a is the parameter for rebuilding all packages forcibly.  If you want to customize the configuration information for the current service, please add the following parameters, read more please refer the settings override documentation):\n-toolexec=\u0026#34;/path/to/go-agent -config /path/to/config.yaml\u0026#34; -a Binary Output The binary would be weaved and instrumented by SkyWalking Go.\n","excerpt":"Setup in build When you want to integrate the Agent using the original go build command, you need to …","ref":"/docs/skywalking-go/v0.4.0/en/setup/gobuild/","title":"Setup in build"},{"body":"Setup in docker SkyWalking Go supports building user applications using Docker as the base container image.\nCustomized Dockerfile Using the SkyWalking Go provided image as the base image, perform file copying and other operations in the Dockerfile.\n# import the skywalking go base imageFROMapache/skywalking-go:\u0026lt;version\u0026gt;-go\u0026lt;go version\u0026gt;# Copy application codeCOPY /path/to/project /path/to/project# Inject the agent into the project or get dependencies by application selfRUN skywalking-go-agent -inject /path/to/project# Building the project including the agentRUN go build -toolexec=\u0026#34;skywalking-go-agent\u0026#34; -a /path/to/project# More operations...In the above code, we have performed the following actions:\n Used the SkyWalking Go provided image as the base image, which currently supports the following Go versions: 1.16, 1.17, 1.18, 1.19, 1.20. Copied the project into the Docker image. Installed SkyWalking Go and compiled the project, read this documentation for more detail. The SkyWalking Go agent is already installed in the /usr/local/bin directory with the name skywalking-go-agent.  ","excerpt":"Setup in docker SkyWalking Go supports building user applications using Docker as the base container …","ref":"/docs/skywalking-go/latest/en/setup/docker/","title":"Setup in docker"},{"body":"Setup in docker SkyWalking Go supports building user applications using Docker as the base container image.\nCustomized Dockerfile Using the SkyWalking Go provided image as the base image, perform file copying and other operations in the Dockerfile.\n# import the skywalking go base imageFROMapache/skywalking-go:\u0026lt;version\u0026gt;-go\u0026lt;go version\u0026gt;# Copy application codeCOPY /path/to/project /path/to/project# Inject the agent into the project or get dependencies by application selfRUN skywalking-go-agent -inject /path/to/project# Building the project including the agentRUN go build -toolexec=\u0026#34;skywalking-go-agent\u0026#34; -a /path/to/project# More operations...In the above code, we have performed the following actions:\n Used the SkyWalking Go provided image as the base image, which currently supports the following Go versions: 1.16, 1.17, 1.18, 1.19, 1.20. Copied the project into the Docker image. Installed SkyWalking Go and compiled the project, read this documentation for more detail. The SkyWalking Go agent is already installed in the /usr/local/bin directory with the name skywalking-go-agent.  ","excerpt":"Setup in docker SkyWalking Go supports building user applications using Docker as the base container …","ref":"/docs/skywalking-go/next/en/setup/docker/","title":"Setup in docker"},{"body":"Setup in docker SkyWalking Go supports building user applications using Docker as the base container image.\nCustomized Dockerfile Using the SkyWalking Go provided image as the base image, perform file copying and other operations in the Dockerfile.\n# import the skywalking go base imageFROMapache/skywalking-go:\u0026lt;version\u0026gt;-go\u0026lt;go version\u0026gt;# Copy application codeCOPY /path/to/project /path/to/project# Inject the agent into the project or get dependencies by application selfRUN skywalking-go-agent -inject /path/to/project# Building the project including the agentRUN go build -toolexec=\u0026#34;skywalking-go-agent\u0026#34; -a /path/to/project# More operations...In the above code, we have performed the following actions:\n Used the SkyWalking Go provided image as the base image, which currently supports the following Go versions: 1.16, 1.17, 1.18, 1.19, 1.20. Copied the project into the Docker image. Installed SkyWalking Go and compiled the project, read this documentation for more detail. The SkyWalking Go agent is already installed in the /usr/local/bin directory with the name skywalking-go-agent.  ","excerpt":"Setup in docker SkyWalking Go supports building user applications using Docker as the base container …","ref":"/docs/skywalking-go/v0.4.0/en/setup/docker/","title":"Setup in docker"},{"body":"Setup java agent  Agent is available for JDK 8 - 21. Find agent folder in SkyWalking release package Set agent.service_name in config/agent.config. Could be any String in English. Set collector.backend_service in config/agent.config. Default point to 127.0.0.1:11800, only works for local backend. Add -javaagent:/path/to/skywalking-package/agent/skywalking-agent.jar to JVM argument. And make sure to add it before the -jar argument.  Require SkyWalking OAP server 9.7.0+ if the agent works on the JRE using ZGC.\nThe agent release dist is included in Apache official release. New agent package looks like this.\n+-- agent +-- activations apm-toolkit-log4j-1.x-activation.jar apm-toolkit-log4j-2.x-activation.jar apm-toolkit-logback-1.x-activation.jar ... +-- config agent.config +-- plugins apm-dubbo-plugin.jar apm-feign-default-http-9.x.jar apm-httpClient-4.x-plugin.jar ..... +-- optional-plugins apm-gson-2.x-plugin.jar ..... +-- bootstrap-plugins jdk-http-plugin.jar ..... +-- expired-plugins # Expired plugins are moved to this folder. No guarantee of working and maintenance. apm-impala-2.6.x-plugin.jar ..... +-- logs skywalking-agent.jar  Start your application.  Install javaagent FAQs  Linux Tomcat 7, Tomcat 8, Tomcat 9\nChange the first line of tomcat/bin/catalina.sh.  CATALINA_OPTS=\u0026#34;$CATALINA_OPTS-javaagent:/path/to/skywalking-agent/skywalking-agent.jar\u0026#34;; export CATALINA_OPTS  Windows Tomcat 7, Tomcat 8, Tomcat 9\nChange the first line of tomcat/bin/catalina.bat.  set \u0026#34;CATALINA_OPTS=-javaagent:/path/to/skywalking-agent/skywalking-agent.jar\u0026#34;  JAR file\nAdd -javaagent argument to command line in which you start your app. eg:  java -javaagent:/path/to/skywalking-agent/skywalking-agent.jar -jar yourApp.jar  Jetty\nModify jetty.sh, add -javaagent argument to command line in which you start your app. eg:  export JAVA_OPTIONS=\u0026#34;${JAVA_OPTIONS}-javaagent:/path/to/skywalking-agent/skywalking-agent.jar\u0026#34; Plugins SkyWalking agent has supported various middlewares, frameworks and libraries. Read supported list to get them and supported version. If the plugin is in Optional² catalog, go to optional plugins and bootstrap class plugin section to learn how to active it.\n All plugins in /plugins folder are active. Remove the plugin jar, it disabled. The default logging output folder is /logs.  ","excerpt":"Setup java agent  Agent is available for JDK 8 - 21. Find agent folder in SkyWalking release package …","ref":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/readme/","title":"Setup java agent"},{"body":"Setup java agent  Agent is available for JDK 8 - 21. Find agent folder in SkyWalking release package Set agent.service_name in config/agent.config. Could be any String in English. Set collector.backend_service in config/agent.config. Default point to 127.0.0.1:11800, only works for local backend. Add -javaagent:/path/to/skywalking-package/agent/skywalking-agent.jar to JVM argument. And make sure to add it before the -jar argument.  Require SkyWalking OAP server 9.7.0+ if the agent works on the JRE using ZGC.\nThe agent release dist is included in Apache official release. New agent package looks like this.\n+-- agent +-- activations apm-toolkit-log4j-1.x-activation.jar apm-toolkit-log4j-2.x-activation.jar apm-toolkit-logback-1.x-activation.jar ... +-- config agent.config +-- plugins apm-dubbo-plugin.jar apm-feign-default-http-9.x.jar apm-httpClient-4.x-plugin.jar ..... +-- optional-plugins apm-gson-2.x-plugin.jar ..... +-- bootstrap-plugins jdk-http-plugin.jar ..... +-- expired-plugins # Expired plugins are moved to this folder. No guarantee of working and maintenance. apm-impala-2.6.x-plugin.jar ..... +-- logs skywalking-agent.jar  Start your application.  Install javaagent FAQs  Linux Tomcat 7, Tomcat 8, Tomcat 9\nChange the first line of tomcat/bin/catalina.sh.  CATALINA_OPTS=\u0026#34;$CATALINA_OPTS-javaagent:/path/to/skywalking-agent/skywalking-agent.jar\u0026#34;; export CATALINA_OPTS  Windows Tomcat 7, Tomcat 8, Tomcat 9\nChange the first line of tomcat/bin/catalina.bat.  set \u0026#34;CATALINA_OPTS=-javaagent:/path/to/skywalking-agent/skywalking-agent.jar\u0026#34;  JAR file\nAdd -javaagent argument to command line in which you start your app. eg:  java -javaagent:/path/to/skywalking-agent/skywalking-agent.jar -jar yourApp.jar  Jetty\nModify jetty.sh, add -javaagent argument to command line in which you start your app. eg:  export JAVA_OPTIONS=\u0026#34;${JAVA_OPTIONS}-javaagent:/path/to/skywalking-agent/skywalking-agent.jar\u0026#34; Plugins SkyWalking agent has supported various middlewares, frameworks and libraries. Read supported list to get them and supported version. If the plugin is in Optional² catalog, go to optional plugins and bootstrap class plugin section to learn how to active it.\n All plugins in /plugins folder are active. Remove the plugin jar, it disabled. The default logging output folder is /logs.  ","excerpt":"Setup java agent  Agent is available for JDK 8 - 21. Find agent folder in SkyWalking release package …","ref":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/readme/","title":"Setup java agent"},{"body":"Setup java agent  Agent is available for JDK 8 - 17. Find agent folder in SkyWalking release package Set agent.service_name in config/agent.config. Could be any String in English. Set collector.backend_service in config/agent.config. Default point to 127.0.0.1:11800, only works for local backend. Add -javaagent:/path/to/skywalking-package/agent/skywalking-agent.jar to JVM argument. And make sure to add it before the -jar argument.  The agent release dist is included in Apache official release. New agent package looks like this.\n+-- agent +-- activations apm-toolkit-log4j-1.x-activation.jar apm-toolkit-log4j-2.x-activation.jar apm-toolkit-logback-1.x-activation.jar ... +-- config agent.config +-- plugins apm-dubbo-plugin.jar apm-feign-default-http-9.x.jar apm-httpClient-4.x-plugin.jar ..... +-- optional-plugins apm-gson-2.x-plugin.jar ..... +-- bootstrap-plugins jdk-http-plugin.jar ..... +-- logs skywalking-agent.jar  Start your application.  Install javaagent FAQs  Linux Tomcat 7, Tomcat 8, Tomcat 9\nChange the first line of tomcat/bin/catalina.sh.  CATALINA_OPTS=\u0026#34;$CATALINA_OPTS-javaagent:/path/to/skywalking-agent/skywalking-agent.jar\u0026#34;; export CATALINA_OPTS  Windows Tomcat 7, Tomcat 8, Tomcat 9\nChange the first line of tomcat/bin/catalina.bat.  set \u0026#34;CATALINA_OPTS=-javaagent:/path/to/skywalking-agent/skywalking-agent.jar\u0026#34;  JAR file\nAdd -javaagent argument to command line in which you start your app. eg:  java -javaagent:/path/to/skywalking-agent/skywalking-agent.jar -jar yourApp.jar  Jetty\nModify jetty.sh, add -javaagent argument to command line in which you start your app. eg:  export JAVA_OPTIONS=\u0026#34;${JAVA_OPTIONS}-javaagent:/path/to/skywalking-agent/skywalking-agent.jar\u0026#34; Plugins SkyWalking agent has supported various middlewares, frameworks and libraries. Read supported list to get them and supported version. If the plugin is in Optional² catalog, go to optional plugins and bootstrap class plugin section to learn how to active it.\n All plugins in /plugins folder are active. Remove the plugin jar, it disabled. The default logging output folder is /logs.  ","excerpt":"Setup java agent  Agent is available for JDK 8 - 17. Find agent folder in SkyWalking release package …","ref":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/readme/","title":"Setup java agent"},{"body":"Setup java agent  Agent is available for JDK 8 - 21. Find agent folder in SkyWalking release package Set agent.service_name in config/agent.config. Could be any String in English. Set collector.backend_service in config/agent.config. Default point to 127.0.0.1:11800, only works for local backend. Add -javaagent:/path/to/skywalking-package/agent/skywalking-agent.jar to JVM argument. And make sure to add it before the -jar argument.  Require SkyWalking OAP server 9.7.0+ if the agent works on the JRE using ZGC.\nThe agent release dist is included in Apache official release. New agent package looks like this.\n+-- agent +-- activations apm-toolkit-log4j-1.x-activation.jar apm-toolkit-log4j-2.x-activation.jar apm-toolkit-logback-1.x-activation.jar ... +-- config agent.config +-- plugins apm-dubbo-plugin.jar apm-feign-default-http-9.x.jar apm-httpClient-4.x-plugin.jar ..... +-- optional-plugins apm-gson-2.x-plugin.jar ..... +-- bootstrap-plugins jdk-http-plugin.jar ..... +-- logs skywalking-agent.jar  Start your application.  Install javaagent FAQs  Linux Tomcat 7, Tomcat 8, Tomcat 9\nChange the first line of tomcat/bin/catalina.sh.  CATALINA_OPTS=\u0026#34;$CATALINA_OPTS-javaagent:/path/to/skywalking-agent/skywalking-agent.jar\u0026#34;; export CATALINA_OPTS  Windows Tomcat 7, Tomcat 8, Tomcat 9\nChange the first line of tomcat/bin/catalina.bat.  set \u0026#34;CATALINA_OPTS=-javaagent:/path/to/skywalking-agent/skywalking-agent.jar\u0026#34;  JAR file\nAdd -javaagent argument to command line in which you start your app. eg:  java -javaagent:/path/to/skywalking-agent/skywalking-agent.jar -jar yourApp.jar  Jetty\nModify jetty.sh, add -javaagent argument to command line in which you start your app. eg:  export JAVA_OPTIONS=\u0026#34;${JAVA_OPTIONS}-javaagent:/path/to/skywalking-agent/skywalking-agent.jar\u0026#34; Plugins SkyWalking agent has supported various middlewares, frameworks and libraries. Read supported list to get them and supported version. If the plugin is in Optional² catalog, go to optional plugins and bootstrap class plugin section to learn how to active it.\n All plugins in /plugins folder are active. Remove the plugin jar, it disabled. The default logging output folder is /logs.  ","excerpt":"Setup java agent  Agent is available for JDK 8 - 21. Find agent folder in SkyWalking release package …","ref":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/readme/","title":"Setup java agent"},{"body":"Setup java agent  Agent is available for JDK 8 - 21. Find agent folder in SkyWalking release package Set agent.service_name in config/agent.config. Could be any String in English. Set collector.backend_service in config/agent.config. Default point to 127.0.0.1:11800, only works for local backend. Add -javaagent:/path/to/skywalking-package/agent/skywalking-agent.jar to JVM argument. And make sure to add it before the -jar argument.  Require SkyWalking OAP server 9.7.0+ if the agent works on the JRE using ZGC.\nThe agent release dist is included in Apache official release. New agent package looks like this.\n+-- agent +-- activations apm-toolkit-log4j-1.x-activation.jar apm-toolkit-log4j-2.x-activation.jar apm-toolkit-logback-1.x-activation.jar ... +-- config agent.config +-- plugins apm-dubbo-plugin.jar apm-feign-default-http-9.x.jar apm-httpClient-4.x-plugin.jar ..... +-- optional-plugins apm-gson-2.x-plugin.jar ..... +-- bootstrap-plugins jdk-http-plugin.jar ..... +-- expired-plugins # Expired plugins are moved to this folder. No guarantee of working and maintenance. apm-impala-2.6.x-plugin.jar ..... +-- logs skywalking-agent.jar  Start your application.  Install javaagent FAQs  Linux Tomcat 7, Tomcat 8, Tomcat 9\nChange the first line of tomcat/bin/catalina.sh.  CATALINA_OPTS=\u0026#34;$CATALINA_OPTS-javaagent:/path/to/skywalking-agent/skywalking-agent.jar\u0026#34;; export CATALINA_OPTS  Windows Tomcat 7, Tomcat 8, Tomcat 9\nChange the first line of tomcat/bin/catalina.bat.  set \u0026#34;CATALINA_OPTS=-javaagent:/path/to/skywalking-agent/skywalking-agent.jar\u0026#34;  JAR file\nAdd -javaagent argument to command line in which you start your app. eg:  java -javaagent:/path/to/skywalking-agent/skywalking-agent.jar -jar yourApp.jar  Jetty\nModify jetty.sh, add -javaagent argument to command line in which you start your app. eg:  export JAVA_OPTIONS=\u0026#34;${JAVA_OPTIONS}-javaagent:/path/to/skywalking-agent/skywalking-agent.jar\u0026#34; Plugins SkyWalking agent has supported various middlewares, frameworks and libraries. Read supported list to get them and supported version. If the plugin is in Optional² catalog, go to optional plugins and bootstrap class plugin section to learn how to active it.\n All plugins in /plugins folder are active. Remove the plugin jar, it disabled. The default logging output folder is /logs.  ","excerpt":"Setup java agent  Agent is available for JDK 8 - 21. Find agent folder in SkyWalking release package …","ref":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/readme/","title":"Setup java agent"},{"body":"Setup PHP Agent  Agent is available for PHP 7.2 - 8.x. Build from source. Configure php.ini.  Requirements  GCC Rustc 1.65+ Cargo Libclang 9.0+ Make Protoc  Install dependencies For Debian-base OS sudo apt install gcc make llvm-13-dev libclang-13-dev protobuf-c-compiler protobuf-compiler For Alpine Linux apk add gcc make musl-dev llvm15-dev clang15-dev protobuf-c-compiler Install Rust globally The officially recommended way to install Rust is via rustup.\nBut because the source code toolchain is override by rust-toolchain.toml, so if you don\u0026rsquo;t need multi version Rust, we recommend to install Rust by these way:\n  Install through OS package manager (The Rust version in the source must be \u0026gt;= 1.65).\n  Through standalone installers.\nFor linux x86_64 user:\nwget https://static.rust-lang.org/dist/rust-1.65.0-x86_64-unknown-linux-gnu.tar.gz tar zxvf rust-1.65.0-x86_64-unknown-linux-gnu.tar.gz cd rust-1.65.0-x86_64-unknown-linux-gnu ./install.sh   Through rustup but set default-toolchain to none.\ncurl --proto \u0026#39;=https\u0026#39; --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --default-toolchain none   Install  Notice: If you compile skywalking_agent in Alpine Linux, you have to disable crt-static, otherwise the problem will be throw: \u0026ldquo;the libclang shared library at /usr/lib/libclang.so.15.0.7 could not be opened: Dynamic loading not supported\u0026rdquo;.\nYou can disable crt-static by environment variable:\nexport RUSTFLAGS=\u0026#34;-C target-feature=-crt-static\u0026#34;  Install from pecl.net pecl install skywalking_agent Install from the source codes git clone --recursive https://github.com/apache/skywalking-php.git cd skywalking-php phpize ./configure make make install Configure Configure skywalking agent in your php.ini.\n[skywalking_agent] extension = skywalking_agent.so ; Enable skywalking_agent extension or not. skywalking_agent.enable = Off ; Log file path. skywalking_agent.log_file = /tmp/skywalking-agent.log ; Log level: one of `OFF`, `TRACE`, `DEBUG`, `INFO`, `WARN`, `ERROR`. skywalking_agent.log_level = INFO ; Address of skywalking oap server. skywalking_agent.server_addr = 127.0.0.1:11800 ; Application service name. skywalking_agent.service_name = hello-skywalking Refer to the Configuration section for more configuration items.\n Notice: It is not recommended to enable skywalking_agent.enable by default globally, because skywalking agent will modify the hook function and fork a new process to be a worker. Enabling it by default will cause extra meaningless consumption when skywalking agent is not needed (such as simply executing a php script).\n Run Start php-fpm server:\nphp-fpm -F -d \u0026#34;skywalking_agent.enable=On\u0026#34;  Notice: It is necessary to keep the php-fpm process running in the foreground (by specifying the \u0026gt; -F parameter, etc.), running php-fpm as a daemon will cause the skywalking-agent reporter process immediately exit.\n ","excerpt":"Setup PHP Agent  Agent is available for PHP 7.2 - 8.x. Build from source. Configure php.ini. …","ref":"/docs/skywalking-php/latest/en/setup/service-agent/php-agent/readme/","title":"Setup PHP Agent"},{"body":"Setup PHP Agent  Agent is available for PHP 7.2 - 8.x. Build from source. Configure php.ini.  Requirements  GCC Rustc 1.65+ Cargo Libclang 9.0+ Make Protoc  Install dependencies For Debian-base OS sudo apt install gcc make llvm-13-dev libclang-13-dev protobuf-c-compiler protobuf-compiler For Alpine Linux apk add gcc make musl-dev llvm15-dev clang15-dev protobuf-c-compiler Install Rust globally The officially recommended way to install Rust is via rustup.\nBut because the source code toolchain is override by rust-toolchain.toml, so if you don\u0026rsquo;t need multi version Rust, we recommend to install Rust by these way:\n  Install through OS package manager (The Rust version in the source must be \u0026gt;= 1.65).\n  Through standalone installers.\nFor linux x86_64 user:\nwget https://static.rust-lang.org/dist/rust-1.65.0-x86_64-unknown-linux-gnu.tar.gz tar zxvf rust-1.65.0-x86_64-unknown-linux-gnu.tar.gz cd rust-1.65.0-x86_64-unknown-linux-gnu ./install.sh   Through rustup but set default-toolchain to none.\ncurl --proto \u0026#39;=https\u0026#39; --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --default-toolchain none   Install  Notice: If you compile skywalking_agent in Alpine Linux, you have to disable crt-static, otherwise the problem will be throw: \u0026ldquo;the libclang shared library at /usr/lib/libclang.so.15.0.7 could not be opened: Dynamic loading not supported\u0026rdquo;.\nYou can disable crt-static by environment variable:\nexport RUSTFLAGS=\u0026#34;-C target-feature=-crt-static\u0026#34;  Install from pecl.net pecl install skywalking_agent Install from the source codes git clone --recursive https://github.com/apache/skywalking-php.git cd skywalking-php phpize ./configure make make install Configure Configure skywalking agent in your php.ini.\n[skywalking_agent] extension = skywalking_agent.so ; Enable skywalking_agent extension or not. skywalking_agent.enable = Off ; Log file path. skywalking_agent.log_file = /tmp/skywalking-agent.log ; Log level: one of `OFF`, `TRACE`, `DEBUG`, `INFO`, `WARN`, `ERROR`. skywalking_agent.log_level = INFO ; Address of skywalking oap server. skywalking_agent.server_addr = 127.0.0.1:11800 ; Application service name. skywalking_agent.service_name = hello-skywalking Refer to the Configuration section for more configuration items.\n Notice: It is not recommended to enable skywalking_agent.enable by default globally, because skywalking agent will modify the hook function and fork a new process to be a worker. Enabling it by default will cause extra meaningless consumption when skywalking agent is not needed (such as simply executing a php script).\n Run Start php-fpm server:\nphp-fpm -F -d \u0026#34;skywalking_agent.enable=On\u0026#34;  Notice: It is necessary to keep the php-fpm process running in the foreground (by specifying the \u0026gt; -F parameter, etc.), running php-fpm as a daemon will cause the skywalking-agent reporter process immediately exit.\n ","excerpt":"Setup PHP Agent  Agent is available for PHP 7.2 - 8.x. Build from source. Configure php.ini. …","ref":"/docs/skywalking-php/next/en/setup/service-agent/php-agent/readme/","title":"Setup PHP Agent"},{"body":"Setup PHP Agent  Agent is available for PHP 7.2 - 8.x. Build from source. Configure php.ini.  Requirements  GCC Rustc 1.65+ Cargo Libclang 9.0+ Make Protoc  Install dependencies For Debian-base OS sudo apt install gcc make llvm-13-dev libclang-13-dev protobuf-c-compiler protobuf-compiler For Alpine Linux apk add gcc make musl-dev llvm15-dev clang15-dev protobuf-c-compiler Install Rust globally The officially recommended way to install Rust is via rustup.\nBut because the source code toolchain is override by rust-toolchain.toml, so if you don\u0026rsquo;t need multi version Rust, we recommend to install Rust by these way:\n  Install through OS package manager (The Rust version in the source must be \u0026gt;= 1.65).\n  Through standalone installers.\nFor linux x86_64 user:\nwget https://static.rust-lang.org/dist/rust-1.65.0-x86_64-unknown-linux-gnu.tar.gz tar zxvf rust-1.65.0-x86_64-unknown-linux-gnu.tar.gz cd rust-1.65.0-x86_64-unknown-linux-gnu ./install.sh   Through rustup but set default-toolchain to none.\ncurl --proto \u0026#39;=https\u0026#39; --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --default-toolchain none   Install  Notice: If you compile skywalking_agent in Alpine Linux, you have to disable crt-static, otherwise the problem will be throw: \u0026ldquo;the libclang shared library at /usr/lib/libclang.so.15.0.7 could not be opened: Dynamic loading not supported\u0026rdquo;.\nYou can disable crt-static by environment variable:\nexport RUSTFLAGS=\u0026#34;-C target-feature=-crt-static\u0026#34;  Install from pecl.net pecl install skywalking_agent Install from the source codes git clone --recursive https://github.com/apache/skywalking-php.git cd skywalking-php phpize ./configure make make install Configure Configure skywalking agent in your php.ini.\n[skywalking_agent] extension = skywalking_agent.so ; Enable skywalking_agent extension or not. skywalking_agent.enable = Off ; Log file path. skywalking_agent.log_file = /tmp/skywalking-agent.log ; Log level: one of `OFF`, `TRACE`, `DEBUG`, `INFO`, `WARN`, `ERROR`. skywalking_agent.log_level = INFO ; Address of skywalking oap server. skywalking_agent.server_addr = 127.0.0.1:11800 ; Application service name. skywalking_agent.service_name = hello-skywalking Refer to the Configuration section for more configuration items.\n Notice: It is not recommended to enable skywalking_agent.enable by default globally, because skywalking agent will modify the hook function and fork a new process to be a worker. Enabling it by default will cause extra meaningless consumption when skywalking agent is not needed (such as simply executing a php script).\n Run Start php-fpm server:\nphp-fpm -F -d \u0026#34;skywalking_agent.enable=On\u0026#34;  Notice: It is necessary to keep the php-fpm process running in the foreground (by specifying the \u0026gt; -F parameter, etc.), running php-fpm as a daemon will cause the skywalking-agent reporter process immediately exit.\n ","excerpt":"Setup PHP Agent  Agent is available for PHP 7.2 - 8.x. Build from source. Configure php.ini. …","ref":"/docs/skywalking-php/v0.7.0/en/setup/service-agent/php-agent/readme/","title":"Setup PHP Agent"},{"body":"Sharing Plugins Sharing plugin configurations has three 3 parts, which are common_config, clients and servers.\nCommon Configuration    Config Default Description     pipe_name sharing The group name of sharing plugins    Clients Clients have a series of client plugins, which would be sharing with the plugins of the other pipes. Please read the doc to find all client plugin configurations.\nServers Servers have a series of server plugins, which would be sharing with the plugins of the other pipes. Please read the doc to find all server plugin configurations.\nExample # The sharing plugins referenced by the specific plugins in the different pipes.sharing:common_config:pipe_name:sharingclients:- plugin_name:\u0026#34;kafka-client\u0026#34;brokers:${SATELLITE_KAFKA_CLIENT_BROKERS:127.0.0.1:9092}version:${SATELLITE_KAFKA_VERSION:\u0026#34;2.1.1\u0026#34;}servers:- plugin_name:\u0026#34;grpc-server\u0026#34;- plugin_name:\u0026#34;prometheus-server\u0026#34;address:${SATELLITE_PROMETHEUS_ADDRESS:\u0026#34;:8090\u0026#34;}","excerpt":"Sharing Plugins Sharing plugin configurations has three 3 parts, which are common_config, clients …","ref":"/docs/skywalking-satellite/latest/en/setup/configuration/sharing-plugins/","title":"Sharing Plugins"},{"body":"Sharing Plugins Sharing plugin configurations has three 3 parts, which are common_config, clients and servers.\nCommon Configuration    Config Default Description     pipe_name sharing The group name of sharing plugins    Clients Clients have a series of client plugins, which would be sharing with the plugins of the other pipes. Please read the doc to find all client plugin configurations.\nServers Servers have a series of server plugins, which would be sharing with the plugins of the other pipes. Please read the doc to find all server plugin configurations.\nExample # The sharing plugins referenced by the specific plugins in the different pipes.sharing:common_config:pipe_name:sharingclients:- plugin_name:\u0026#34;kafka-client\u0026#34;brokers:${SATELLITE_KAFKA_CLIENT_BROKERS:127.0.0.1:9092}version:${SATELLITE_KAFKA_VERSION:\u0026#34;2.1.1\u0026#34;}servers:- plugin_name:\u0026#34;grpc-server\u0026#34;- plugin_name:\u0026#34;prometheus-server\u0026#34;address:${SATELLITE_PROMETHEUS_ADDRESS:\u0026#34;:8090\u0026#34;}","excerpt":"Sharing Plugins Sharing plugin configurations has three 3 parts, which are common_config, clients …","ref":"/docs/skywalking-satellite/next/en/setup/configuration/sharing-plugins/","title":"Sharing Plugins"},{"body":"Sharing Plugins Sharing plugin configurations has three 3 parts, which are common_config, clients and servers.\nCommon Configuration    Config Default Description     pipe_name sharing The group name of sharing plugins    Clients Clients have a series of client plugins, which would be sharing with the plugins of the other pipes. Please read the doc to find all client plugin configurations.\nServers Servers have a series of server plugins, which would be sharing with the plugins of the other pipes. Please read the doc to find all server plugin configurations.\nExample # The sharing plugins referenced by the specific plugins in the different pipes.sharing:common_config:pipe_name:sharingclients:- plugin_name:\u0026#34;kafka-client\u0026#34;brokers:${SATELLITE_KAFKA_CLIENT_BROKERS:127.0.0.1:9092}version:${SATELLITE_KAFKA_VERSION:\u0026#34;2.1.1\u0026#34;}servers:- plugin_name:\u0026#34;grpc-server\u0026#34;- plugin_name:\u0026#34;prometheus-server\u0026#34;address:${SATELLITE_PROMETHEUS_ADDRESS:\u0026#34;:8090\u0026#34;}","excerpt":"Sharing Plugins Sharing plugin configurations has three 3 parts, which are common_config, clients …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/configuration/sharing-plugins/","title":"Sharing Plugins"},{"body":"SkyWalking 9.x showcase This showcase would follow the latest changes of SkyWalking 9.x, even before the official release.\nThis showcase repository includes an example music application and other manifests to demonstrate the main features of SkyWalking. The music application is composed of several microservices that are written in different programming languages. Here is the architecture:\n%% please read this doc in our official website, otherwise the graph is not correctly rendered. graph LR; loadgen[load generator] --\u0026gt; ui(\u0026quot;UI (React)\u0026quot;) --\u0026gt; Traffic1(\u0026quot;HTTP Request for backend serv\u0026quot;) --\u0026gt; apisix(\u0026quot;APISIX as UI container\u0026quot;) --\u0026gt; app(\u0026quot;app server (NodeJS)\u0026quot;) --\u0026gt; gateway(\u0026quot;gateway (Spring)\u0026quot;); ui(\u0026quot;UI (React)\u0026quot;) --\u0026gt; Traffic2(\u0026quot;HTTP Request for UI codes\u0026quot;) --\u0026gt; apisix(\u0026quot;APISIX with UI container\u0026quot;) gateway --\u0026gt; songs(\u0026quot;songs (Spring)\u0026quot;) \u0026amp; rcmd(\u0026quot;recommendations (Python)\u0026quot;); rcmd --\u0026gt; rating(\u0026quot;rating (Go)\u0026quot;); songs --\u0026gt; activeMQ activeMQ --\u0026gt; songs rcmd --\u0026gt; songs; songs --\u0026gt; db(\u0026quot;database (H2)\u0026quot;); Usage Please run the showcase in a brand new test cluster, otherwise the undeploy process may delete some resources that you have installed before running this showcase (for example cert-manager). If you don\u0026rsquo;t do this in a new test cluster, it\u0026rsquo;s all on your own risks!\nThe showcase uses GNU Make and Docker containers to run commands, so please make sure you have make installed and Docker daemon running.\nPrerequisites To deploy the full features of this showcase application, you may need up to 8 CPU cores and 32 GB memory, please increase the Docker daemon resources or Kubernetes cluster resources if you find containers / Pods failed to start up. Alternatively, you can also only deploy part of the features that interest you if you don\u0026rsquo;t want to increase the resources, via the guide in Customization.\nQuick Start Make sure you have a running Kubernetes cluster and kubectl can access to that cluster.\ngit clone https://github.com/apache/skywalking-showcase.git cd skywalking-showcase make deploy.kubernetes This will install SkyWalking components, including OAP in cluster mode with 2 nodes, SkyWalking UI, microservices with SkyWalking agent, microservices without SkyWalking agent but managed by Istio, 2 Pods to mimic virtual machines and export metrics to SkyWalking, and enable kubernetes cluster monitoring as well as SkyWalking self observability.\nFor more advanced deployments, check Customization documentation below.\nNotice, when run this showcase locally such as KinD, the images are downloaded inside the KinD, which could take over 10 mins(depend on local network). Rerun make deploy.kubernetes if some timeout errors break the process.\nCustomization The variables defined in Makefile.in can be overridden to customize the showcase, by specifying an environment variable with the same name, e.g.:\nexport ES_VERSION=7.14.0 make \u0026lt;target\u0026gt; or directly specifying in the make command, e.g.: make \u0026lt;target\u0026gt; ES_VERSION=7.14.0.\nRun make help to get more information.\nFeatures The showcase is composed of a set of scenarios with feature flags, you can deploy some of them that interest you by overriding the FEATURE_FLAGS variable defined in Makefile.in, as documented in Customization, e.g.:\nmake deploy.kubernetes FEATURE_FLAGS=single-node,agent Feature flags for different platforms (Kubernetes and Docker Compose) are not necessarily the same so make sure to specify the right feature flags.\nCurrently, the features supported are:\n   Name Description Note     java-agent-injector Use the java agent injector to inject the Skywalking Java agent and deploy microservices with other SkyWalking agent enabled. The microservices include agents for Java, NodeJS server, browser, Python.   agent Deploy microservices with SkyWalking agent pre-installed. In Kubernetes scenarios, please use java-agent-injector instead of this, if possible.   cluster Deploy SkyWalking OAP in cluster mode, with 2 nodes, and SkyWalking UI. Only one of cluster or single-node can be enabled.   single-node Deploy only one single node of SkyWalking OAP, and SkyWalking UI, ElasticSearch as storage. Only one of cluster or single-node can be enabled.   elasticsearch Deploy ElasticSearch as storage, you may want to disable this if you want to use your own ElasticSearch deployments.    postgresql Deploy PostgreSQL as storage, you may want to disable this if you want to use your own PostgreSQL deployments.    so11y Enable SkyWalking self observability. This is enabled by default for platform Docker Compose.   vm-monitor Start 2 virtual machines and export their metrics to SkyWalking. The \u0026ldquo;virtual machines\u0026rdquo; are mimicked by Docker containers or Pods.   als Start microservices WITHOUT SkyWalking agent enabled, and configure SkyWalking to analyze the topology and metrics from their access logs. Command istioctl is required to run this feature. The agentless microservices will be running at namespace ${NAMESPACE}-agentless   kubernetes-monitor Deploy OpenTelemetry and export Kubernetes monitoring metrics to SkyWalking for analysis and display on UI.    istiod-monitor Deploy OpenTelemetry and export Istio control plane metrics to SkyWalking for analysis and display on UI.    event Deploy tools to trigger events, and SkyWalking Kubernetes event exporter to export events into SkyWalking.    satellite Deploy SkyWalking Satellite to load balance the monitoring data.    trace-profiling Deploy tools to submit trace profiling tasks. Only support deployment with SkyWalking agents installed, currently Java agent and Python agent support trace profiling.   rover Deploy SkyWalking Rover and detect the processes in the Kubernetes environment. Only support deployment in the Kubernetes environment, docker is not supported.   mysql-monitor Start a MySQL server and load generator to execute the sample SQLs periodically, set up fluent bit to fetch slow logs and export to OAP, and export their metrics to SkyWalking.    postgresql-monitor Start a PostgreSQL server, and load generator to execute the sample SQLs periodically, set up fluent bit to fetch slow logs and export to OAP, and export their metrics to SkyWalking.    elasticsearch-monitor Deploy OpenTelemetry and export Elasticsearch monitoring metrics to SkyWalking for analysis and display on UI.    mongodb-monitor Deploy OpenTelemetry and export MongoDB monitoring metrics to SkyWalking for analysis and display on UI.    nginx-monitor Deploy OpenTelemetry and export Nginx metrics and logs to SkyWalking for analysis and display on UI    apisix-monitor Deploy OpenTelemetry and export APISIX metrics to SkyWalking for analysis and display on UI    mesh-with-agent Deploy services with java agent in the service mesh environment. Only support deployment in the Kubernetes environment, docker is not supported.   grafana Deploy a Grafana to show SkyWalking metrics and logs on the Grafana UI. Feel free to modify the Grafana config when deploy your own environment.   r3 Deploy R3 as RESTful URL recognition service.    rocketmq-monitor Deploy OpenTelemetry and export RocketMQ monitoring metrics to SkyWalking for analysis and display on UI.    pulsar-monitor Deploy OpenTelemetry and export Pulsar monitoring metrics to SkyWalking for analysis and display on UI.    rabbitmq-monitor Deploy OpenTelemetry and export RabbitMQ monitoring metrics to SkyWalking for analysis and display on UI.     Kubernetes To deploy the example application in Kubernetes, please make sure that you have kubectl command available, and it can connect to the Kubernetes cluster successfully.\nIf you don\u0026rsquo;t have a running cluster, you can also leverage KinD (Kubernetes in Docker) or minikube to create a cluster.\nRun kubectl get nodes to check the connectivity before going to next step. The typical error message that indicates your kubectl cannot connect to a cluster is:\nThe connection to the server localhost:8080 was refused - did you specify the right host or port? Deploy # Deploy make deploy.kubernetes # Undeploy make undeploy.kubernetes # Redeploy make redeploy.kubernetes # equivalent to make undeploy.kubernetes deploy.kubernetes Docker Compose Deploy # Deploy make deploy.docker # Undeploy make undeploy.docker # Redeploy make redeploy.docker # equivalent to make undeploy.docker deploy.docker Traffic Flow After deploy the showcase, the business system would send monitoring traffic to the OAP node, and one agent/sidecar connect to one OAP node directly.\nSatellite If the business traffic is unbalanced, it would cause the OAP node receive unbalanced monitoring data. So, you could add the Satellite component. After deploy the showcase with the satellite component, the monitoring traffic would send to the Satellite service, and satellite load balances the traffic to the OAP nodes.\n%% please read this doc in our official website, otherwise the graph is not correctly rendered. graph LR; agent[\u0026quot;business app(agent)\u0026quot;] --\u0026gt; satellite(\u0026quot;satellite\u0026quot;) --\u0026gt; oap(\u0026quot;oap\u0026quot;); envoy[\u0026quot;sidecar(envoy)\u0026quot;] --\u0026gt; satellite; Troubleshooting If you encounter any problems, please add DEBUG=true to the command line to get the output of the resources that will be applied.\nmake deploy.kubernetes DEBUG=true # this will print the resources that will be applied to Kubernetes make deploy.docker DEBUG=true # this will print the merged docker-compose.yaml content that will be used to run in Docker Compose ","excerpt":"SkyWalking 9.x showcase This showcase would follow the latest changes of SkyWalking 9.x, even before …","ref":"/docs/skywalking-showcase/next/readme/","title":"SkyWalking 9.x showcase"},{"body":"Skywalking Agent List  aerospike activemq-5.x armeria-063-084 armeria-085 armeria-086 armeria-098 armeria-100 async-http-client-2.x avro-1.x brpc-java brpc-java-3.x canal-1.x cassandra-java-driver-3.x dbcp-2.x druid-1.x dubbo dubbo-2.7.x dubbo-3.x dubbo-threadpool dubbo-threadpool-2.7.x ehcache-2.x elastic-job-2.x elasticjob-3.x elasticsearch-5.x elasticsearch-6.x elasticsearch-7.x fastjson-1.2.x feign-default-http-9.x feign-pathvar-9.x finagle graphql-8.x graphql-9.x graphql-12.x-15.x graphql-16plus grpc-1.x gson-2.8.x guava-cache h2-1.x hbase-1.x/2.x hikaricp-3.x/4.x httpasyncclient-4.x httpclient-3.x httpclient-4.x httpclient-5.x hystrix-1.x influxdb-2.x jackson-2.x jdk-http-plugin jdk-threading-plugin jedis-2.x-3.x jedis-4.x jetty-client-9.0 jetty-client-9.x jetty-server-9.x kafka-0.11.x/1.x/2.x kotlin-coroutine lettuce-5.x light4j mariadb-2.x micrometer-1.10.x memcache-2.x mongodb-2.x mongodb-3.x mongodb-4.x motan-0.x mybatis-3.x mysql-5.x mysql-6.x mysql-8.x nacos-client-2.x netty-socketio netty-http-4.1.x nutz-http-1.x nutz-mvc-annotation-1.x okhttp-3.x okhttp-4.x play-2.x postgresql-8.x pulsar-2.2-2.7 quasar quartz-scheduler-2.x rabbitmq redisson-3.x resteasy-server-3.x resteasy-server-4.x resteasy-server-6.x rocketMQ-3.x rocketMQ-4.x rocketMQ-5.x rocketMQ-client-java-5.x sentinel-1.x servicecomb-2.x sharding-sphere-3.x sharding-sphere-4.0.0 sharding-sphere-4.1.0 sharding-sphere-5.0.0 sofarpc solrj-7.x spring-annotation spring-async-annotation-5.x spring-cloud-feign-1.x spring-cloud-feign-2.x spring-cloud-gateway-2.0.x spring-cloud-gateway-2.1.x spring-concurrent-util-4.x spring-core-patch spring-kafka-1.x spring-kafka-2.x spring-mvc-annotation spring-mvc-annotation-3.x spring-mvc-annotation-4.x spring-mvc-annotation-5.x spring-mvc-annotation-6.x spring-resttemplate-3.x spring-resttemplate-4.x spring-resttemplate-6.x spring-scheduled-annotation spring-tx spring-webflux-5.x spring-webflux-5.x-webclient spymemcached-2.x struts2-2.x thrift tomcat-7.x/8.x tomcat-10.x toolkit-counter toolkit-gauge toolkit-histogram toolkit-kafka toolkit-log4j toolkit-log4j2 toolkit-logback toolkit-opentracing toolkit-tag toolkit-trace toolkit-exception toolkit-tracer toolkit-webflux undertow-2.x-plugin vertx-core-3.x vertx-core-4.x xxl-job-2.x zookeeper-3.4.x mssql-jtds-1.x mssql-jdbc apache-cxf-3.x jsonrpc4j spring-cloud-gateway-3.x neo4j-4.x clickhouse-0.3.1 clickhouse-0.3.2.x kylin-jdbc-2.6.x-3.x-4.x okhttp-2.x pulsar-2.8.x undertow-worker-thread-pool tomcat-thread-pool guava-eventbus shenyu-2.4.x jdk-threadpool-plugin hutool-http-5.x micronaut-http-client-3.2.x-3.6.x micronaut-http-server-3.2.x-3.6.x nats-client-2.14.x-2.15.x impala-jdbc-2.6.x jdk-forkjoinpool-plugin jetty-thread-pool jersey-2.x jersey-3.x grizzly-2.3.x-4.x grizzly-2.3.x-4.x-threadpool jetty-server-11.x jetty-client-11.x websphere-liberty-23.x spring-cloud-gateway-4.x spring-webflux-6.x spring-webflux-6.x-webclient activemq-artemis-jakarta-client-2.x  ","excerpt":"Skywalking Agent List  aerospike activemq-5.x armeria-063-084 armeria-085 armeria-086 armeria-098 …","ref":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/plugin-list/","title":"Skywalking Agent List"},{"body":"Skywalking Agent List  aerospike activemq-5.x armeria-063-084 armeria-085 armeria-086 armeria-098 armeria-100 async-http-client-2.x avro-1.x brpc-java brpc-java-3.x canal-1.x cassandra-java-driver-3.x dbcp-2.x druid-1.x dubbo dubbo-2.7.x dubbo-3.x dubbo-threadpool dubbo-threadpool-2.7.x ehcache-2.x elastic-job-2.x elasticjob-3.x elasticsearch-5.x elasticsearch-6.x elasticsearch-7.x fastjson-1.2.x feign-default-http-9.x feign-pathvar-9.x finagle graphql-8.x graphql-9.x graphql-12.x-15.x graphql-16plus grpc-1.x gson-2.8.x guava-cache h2-1.x hbase-1.x/2.x hikaricp-3.x/4.x httpasyncclient-4.x httpclient-3.x httpclient-4.x httpclient-5.x hystrix-1.x influxdb-2.x jackson-2.x jdk-http-plugin jdk-threading-plugin jedis-2.x-3.x jedis-4.x jetty-client-9.0 jetty-client-9.x jetty-server-9.x kafka-0.11.x/1.x/2.x kotlin-coroutine lettuce-5.x light4j mariadb-2.x micrometer-1.10.x memcache-2.x mongodb-2.x mongodb-3.x mongodb-4.x motan-0.x mybatis-3.x mysql-5.x mysql-6.x mysql-8.x nacos-client-2.x netty-socketio netty-http-4.1.x nutz-http-1.x nutz-mvc-annotation-1.x okhttp-3.x okhttp-4.x play-2.x postgresql-8.x pulsar-2.2-2.7 quasar quartz-scheduler-2.x rabbitmq redisson-3.x resteasy-server-3.x resteasy-server-4.x resteasy-server-6.x rocketMQ-3.x rocketMQ-4.x rocketMQ-5.x rocketMQ-client-java-5.x sentinel-1.x servicecomb-2.x sharding-sphere-3.x sharding-sphere-4.0.0 sharding-sphere-4.1.0 sharding-sphere-5.0.0 sofarpc solrj-7.x spring-annotation spring-async-annotation-5.x spring-cloud-feign-1.x spring-cloud-feign-2.x spring-cloud-gateway-2.0.x spring-cloud-gateway-2.1.x spring-concurrent-util-4.x spring-core-patch spring-kafka-1.x spring-kafka-2.x spring-mvc-annotation spring-mvc-annotation-3.x spring-mvc-annotation-4.x spring-mvc-annotation-5.x spring-mvc-annotation-6.x spring-resttemplate-3.x spring-resttemplate-4.x spring-resttemplate-6.x spring-scheduled-annotation spring-tx spring-webflux-5.x spring-webflux-5.x-webclient spymemcached-2.x struts2-2.x thrift tomcat-7.x/8.x tomcat-10.x toolkit-counter toolkit-gauge toolkit-histogram toolkit-kafka toolkit-log4j toolkit-log4j2 toolkit-logback toolkit-opentracing toolkit-tag toolkit-trace toolkit-exception toolkit-tracer toolkit-webflux undertow-2.x-plugin vertx-core-3.x vertx-core-4.x xxl-job-2.x zookeeper-3.4.x mssql-jtds-1.x mssql-jdbc apache-cxf-3.x jsonrpc4j spring-cloud-gateway-3.x neo4j-4.x clickhouse-0.3.1 clickhouse-0.3.2.x kylin-jdbc-2.6.x-3.x-4.x okhttp-2.x pulsar-2.8.x undertow-worker-thread-pool tomcat-thread-pool guava-eventbus shenyu-2.4.x jdk-threadpool-plugin hutool-http-5.x micronaut-http-client-3.2.x-3.6.x micronaut-http-server-3.2.x-3.6.x nats-client-2.14.x-2.15.x impala-jdbc-2.6.x jdk-forkjoinpool-plugin jetty-thread-pool jersey-2.x jersey-3.x grizzly-2.3.x-4.x grizzly-2.3.x-4.x-threadpool jetty-server-11.x jetty-client-11.x websphere-liberty-23.x spring-cloud-gateway-4.x spring-webflux-6.x spring-webflux-6.x-webclient activemq-artemis-jakarta-client-2.x c3p0-0.9.x  ","excerpt":"Skywalking Agent List  aerospike activemq-5.x armeria-063-084 armeria-085 armeria-086 armeria-098 …","ref":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/plugin-list/","title":"Skywalking Agent List"},{"body":"Skywalking Agent List  aerospike activemq-5.x armeria-063-084 armeria-085 armeria-086 armeria-098 armeria-100 async-http-client-2.x avro-1.x brpc-java brpc-java-3.x canal-1.x cassandra-java-driver-3.x dbcp-2.x druid-1.x dubbo dubbo-2.7.x dubbo-3.x dubbo-threadpool dubbo-threadpool-2.7.x ehcache-2.x elastic-job-2.x elasticjob-3.x elasticsearch-5.x elasticsearch-6.x elasticsearch-7.x fastjson-1.2.x feign-default-http-9.x feign-pathvar-9.x finagle graphql-8.x graphql-9.x graphql-12.x-15.x graphql-16plus grpc-1.x gson-2.8.x guava-cache h2-1.x hbase-1.x/2.x hikaricp-3.x/4.x httpasyncclient-4.x httpclient-3.x httpclient-4.x httpclient-5.x hystrix-1.x influxdb-2.x jackson-2.x jdk-http-plugin jdk-threading-plugin jedis-2.x-3.x jedis-4.x jetty-client-9.0 jetty-client-9.x jetty-server-9.x kafka-0.11.x/1.x/2.x kotlin-coroutine lettuce-5.x light4j mariadb-2.x micrometer-1.10.x memcache-2.x mongodb-2.x mongodb-3.x mongodb-4.x motan-0.x mybatis-3.x mysql-5.x mysql-6.x mysql-8.x nacos-client-2.x netty-socketio nutz-http-1.x nutz-mvc-annotation-1.x okhttp-3.x okhttp-4.x play-2.x postgresql-8.x pulsar-2.2-2.7 quasar quartz-scheduler-2.x rabbitmq redisson-3.x resteasy-server-3.x resteasy-server-4.x resteasy-server-6.x rocketMQ-3.x rocketMQ-4.x rocketMQ-5.x rocketMQ-client-java-5.x sentinel-1.x servicecomb-2.x sharding-sphere-3.x sharding-sphere-4.0.0 sharding-sphere-4.1.0 sharding-sphere-5.0.0 sofarpc solrj-7.x spring-annotation spring-async-annotation-5.x spring-cloud-feign-1.x spring-cloud-feign-2.x spring-cloud-gateway-2.0.x spring-cloud-gateway-2.1.x spring-concurrent-util-4.x spring-core-patch spring-kafka-1.x spring-kafka-2.x spring-mvc-annotation spring-mvc-annotation-3.x spring-mvc-annotation-4.x spring-mvc-annotation-5.x spring-mvc-annotation-6.x spring-resttemplate-3.x spring-resttemplate-4.x spring-resttemplate-6.x spring-scheduled-annotation spring-tx spring-webflux-5.x spring-webflux-5.x-webclient spymemcached-2.x struts2-2.x thrift tomcat-7.x/8.x tomcat-10.x toolkit-counter toolkit-gauge toolkit-histogram toolkit-kafka toolkit-log4j toolkit-log4j2 toolkit-logback toolkit-opentracing toolkit-tag toolkit-trace toolkit-exception toolkit-tracer toolkit-webflux undertow-2.x-plugin vertx-core-3.x vertx-core-4.x xxl-job-2.x zookeeper-3.4.x mssql-jtds-1.x mssql-jdbc apache-cxf-3.x jsonrpc4j spring-cloud-gateway-3.x neo4j-4.x clickhouse-0.3.1 clickhouse-0.3.2.x kylin-jdbc-2.6.x-3.x-4.x okhttp-2.x pulsar-2.8.x undertow-worker-thread-pool tomcat-thread-pool guava-eventbus shenyu-2.4.x jdk-threadpool-plugin hutool-http-5.x micronaut-http-client-3.2.x-3.6.x micronaut-http-server-3.2.x-3.6.x nats-client-2.14.x-2.15.x impala-jdbc-2.6.x jdk-forkjoinpool-plugin jetty-thread-pool jersey-2.x jersey-3.x grizzly-2.3.x-4.x grizzly-2.3.x-4.x-threadpool jetty-server-11.x jetty-client-11.x websphere-liberty-23.x  ","excerpt":"Skywalking Agent List  aerospike activemq-5.x armeria-063-084 armeria-085 armeria-086 armeria-098 …","ref":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/plugin-list/","title":"Skywalking Agent List"},{"body":"Skywalking Agent List  aerospike activemq-5.x armeria-063-084 armeria-085 armeria-086 armeria-098 armeria-100 async-http-client-2.x avro-1.x brpc-java brpc-java-3.x canal-1.x cassandra-java-driver-3.x dbcp-2.x druid-1.x dubbo dubbo-2.7.x dubbo-3.x dubbo-threadpool dubbo-threadpool-2.7.x ehcache-2.x elastic-job-2.x elasticjob-3.x elasticsearch-5.x elasticsearch-6.x elasticsearch-7.x fastjson-1.2.x feign-default-http-9.x feign-pathvar-9.x finagle graphql-8.x graphql-9.x graphql-12.x-15.x graphql-16plus grpc-1.x gson-2.8.x guava-cache h2-1.x hbase-1.x/2.x hikaricp-3.x/4.x httpasyncclient-4.x httpclient-3.x httpclient-4.x httpclient-5.x hystrix-1.x influxdb-2.x jackson-2.x jdk-http-plugin jdk-threading-plugin jedis-2.x-3.x jedis-4.x jetty-client-9.0 jetty-client-9.x jetty-server-9.x kafka-0.11.x/1.x/2.x kotlin-coroutine lettuce-5.x light4j mariadb-2.x micrometer-1.10.x memcache-2.x mongodb-2.x mongodb-3.x mongodb-4.x motan-0.x mybatis-3.x mysql-5.x mysql-6.x mysql-8.x nacos-client-2.x netty-socketio netty-http-4.1.x nutz-http-1.x nutz-mvc-annotation-1.x okhttp-3.x okhttp-4.x play-2.x postgresql-8.x pulsar-2.2-2.7 quasar quartz-scheduler-2.x rabbitmq redisson-3.x resteasy-server-3.x resteasy-server-4.x resteasy-server-6.x rocketMQ-3.x rocketMQ-4.x rocketMQ-5.x rocketMQ-client-java-5.x sentinel-1.x servicecomb-2.x sharding-sphere-3.x sharding-sphere-4.0.0 sharding-sphere-4.1.0 sharding-sphere-5.0.0 sofarpc solrj-7.x spring-annotation spring-async-annotation-5.x spring-cloud-feign-1.x spring-cloud-feign-2.x spring-cloud-gateway-2.0.x spring-cloud-gateway-2.1.x spring-concurrent-util-4.x spring-core-patch spring-kafka-1.x spring-kafka-2.x spring-mvc-annotation spring-mvc-annotation-3.x spring-mvc-annotation-4.x spring-mvc-annotation-5.x spring-mvc-annotation-6.x spring-resttemplate-3.x spring-resttemplate-4.x spring-resttemplate-6.x spring-scheduled-annotation spring-tx spring-webflux-5.x spring-webflux-5.x-webclient spymemcached-2.x struts2-2.x thrift tomcat-7.x/8.x tomcat-10.x toolkit-counter toolkit-gauge toolkit-histogram toolkit-kafka toolkit-log4j toolkit-log4j2 toolkit-logback toolkit-opentracing toolkit-tag toolkit-trace toolkit-exception toolkit-tracer toolkit-webflux undertow-2.x-plugin vertx-core-3.x vertx-core-4.x xxl-job-2.x zookeeper-3.4.x mssql-jtds-1.x mssql-jdbc apache-cxf-3.x jsonrpc4j spring-cloud-gateway-3.x neo4j-4.x clickhouse-0.3.1 clickhouse-0.3.2.x kylin-jdbc-2.6.x-3.x-4.x okhttp-2.x pulsar-2.8.x undertow-worker-thread-pool tomcat-thread-pool guava-eventbus shenyu-2.4.x jdk-threadpool-plugin hutool-http-5.x micronaut-http-client-3.2.x-3.6.x micronaut-http-server-3.2.x-3.6.x nats-client-2.14.x-2.15.x impala-jdbc-2.6.x jdk-forkjoinpool-plugin jetty-thread-pool jersey-2.x jersey-3.x grizzly-2.3.x-4.x grizzly-2.3.x-4.x-threadpool jetty-server-11.x jetty-client-11.x websphere-liberty-23.x  ","excerpt":"Skywalking Agent List  aerospike activemq-5.x armeria-063-084 armeria-085 armeria-086 armeria-098 …","ref":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/plugin-list/","title":"Skywalking Agent List"},{"body":"Skywalking Agent List  aerospike activemq-5.x armeria-063-084 armeria-085 armeria-086 armeria-098 armeria-100 async-http-client-2.x avro-1.x brpc-java brpc-java-3.x canal-1.x cassandra-java-driver-3.x dbcp-2.x druid-1.x dubbo dubbo-2.7.x dubbo-3.x dubbo-threadpool dubbo-threadpool-2.7.x ehcache-2.x elastic-job-2.x elasticjob-3.x elasticsearch-5.x elasticsearch-6.x elasticsearch-7.x fastjson-1.2.x feign-default-http-9.x feign-pathvar-9.x finagle graphql-8.x graphql-9.x graphql-12.x-15.x graphql-16plus grpc-1.x gson-2.8.x guava-cache h2-1.x hbase-1.x/2.x hikaricp-3.x/4.x httpasyncclient-4.x httpclient-3.x httpclient-4.x httpclient-5.x hystrix-1.x influxdb-2.x jackson-2.x jdk-http-plugin jdk-threading-plugin jedis-2.x-3.x jedis-4.x jetty-client-9.0 jetty-client-9.x jetty-server-9.x kafka-0.11.x/1.x/2.x kotlin-coroutine lettuce-5.x light4j mariadb-2.x micrometer-1.10.x memcache-2.x mongodb-2.x mongodb-3.x mongodb-4.x motan-0.x mybatis-3.x mysql-5.x mysql-6.x mysql-8.x nacos-client-2.x netty-socketio netty-http-4.1.x nutz-http-1.x nutz-mvc-annotation-1.x okhttp-3.x okhttp-4.x play-2.x postgresql-8.x pulsar-2.2-2.7 quasar quartz-scheduler-2.x rabbitmq redisson-3.x resteasy-server-3.x resteasy-server-4.x resteasy-server-6.x rocketMQ-3.x rocketMQ-4.x rocketMQ-5.x rocketMQ-client-java-5.x sentinel-1.x servicecomb-2.x sharding-sphere-3.x sharding-sphere-4.0.0 sharding-sphere-4.1.0 sharding-sphere-5.0.0 sofarpc solrj-7.x spring-annotation spring-async-annotation-5.x spring-cloud-feign-1.x spring-cloud-feign-2.x spring-cloud-gateway-2.0.x spring-cloud-gateway-2.1.x spring-concurrent-util-4.x spring-core-patch spring-kafka-1.x spring-kafka-2.x spring-mvc-annotation spring-mvc-annotation-3.x spring-mvc-annotation-4.x spring-mvc-annotation-5.x spring-mvc-annotation-6.x spring-resttemplate-3.x spring-resttemplate-4.x spring-resttemplate-6.x spring-scheduled-annotation spring-tx spring-webflux-5.x spring-webflux-5.x-webclient spymemcached-2.x struts2-2.x thrift tomcat-7.x/8.x tomcat-10.x toolkit-counter toolkit-gauge toolkit-histogram toolkit-kafka toolkit-log4j toolkit-log4j2 toolkit-logback toolkit-opentracing toolkit-tag toolkit-trace toolkit-exception toolkit-tracer toolkit-webflux undertow-2.x-plugin vertx-core-3.x vertx-core-4.x xxl-job-2.x zookeeper-3.4.x mssql-jtds-1.x mssql-jdbc apache-cxf-3.x jsonrpc4j spring-cloud-gateway-3.x neo4j-4.x clickhouse-0.3.1 clickhouse-0.3.2.x kylin-jdbc-2.6.x-3.x-4.x okhttp-2.x pulsar-2.8.x undertow-worker-thread-pool tomcat-thread-pool guava-eventbus shenyu-2.4.x jdk-threadpool-plugin hutool-http-5.x micronaut-http-client-3.2.x-3.6.x micronaut-http-server-3.2.x-3.6.x nats-client-2.14.x-2.15.x impala-jdbc-2.6.x jdk-forkjoinpool-plugin jetty-thread-pool jersey-2.x jersey-3.x grizzly-2.3.x-4.x grizzly-2.3.x-4.x-threadpool jetty-server-11.x jetty-client-11.x websphere-liberty-23.x spring-cloud-gateway-4.x spring-webflux-6.x spring-webflux-6.x-webclient activemq-artemis-jakarta-client-2.x  ","excerpt":"Skywalking Agent List  aerospike activemq-5.x armeria-063-084 armeria-085 armeria-086 armeria-098 …","ref":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/plugin-list/","title":"Skywalking Agent List"},{"body":"Apache SkyWalking Cloud on Kubernetes A bridge project between Apache SkyWalking and Kubernetes.\nSWCK is a platform for the SkyWalking user, provisions, upgrades, maintains SkyWalking relevant components, and makes them work natively on Kubernetes.\nFeatures  Java Agent Injector: Inject the java agent into the application pod natively. Operator: Provision and maintain SkyWalking backend components. Custom Metrics Adapter: Provides custom metrics come from SkyWalking OAP cluster for autoscaling by Kubernetes HPA  Build images Issue below instrument to get the docker image:\nmake or\nmake build To onboard operator or adapter, you should push the image to a registry where the kubernetes cluster can pull it.\nOnboard Java Agent Injector and Operator The java agent injector and operator share a same binary. To onboard them, you should follow:\n To install the java agent injector and operator in an existing cluster, make sure you have cert-manager installed. Apply the manifests for the Controller and CRDs in config:  kubectl apply -f config/operator-bundle.yaml Onboard Custom Metrics Adapter  Deploy OAP server by referring to Operator Quick Start. Apply the manifests for an adapter in config:  kubectl apply -f config/adapter-bundle.yaml License Apache 2.0 License.\n","excerpt":"Apache SkyWalking Cloud on Kubernetes A bridge project between Apache SkyWalking and Kubernetes. …","ref":"/docs/skywalking-swck/latest/binary-readme/","title":"SkyWalking Cloud on Kubernetes"},{"body":"Apache SkyWalking Cloud on Kubernetes A bridge project between Apache SkyWalking and Kubernetes.\nSWCK is a platform for the SkyWalking user, provisions, upgrades, maintains SkyWalking relevant components, and makes them work natively on Kubernetes.\nFeatures  Java Agent Injector: Inject the java agent into the application pod natively. Operator: Provision and maintain SkyWalking backend components. Custom Metrics Adapter: Provides custom metrics come from SkyWalking OAP cluster for autoscaling by Kubernetes HPA  Build images Issue below instrument to get the docker image:\nmake or\nmake build To onboard operator or adapter, you should push the image to a registry where the kubernetes cluster can pull it.\nOnboard Java Agent Injector and Operator The java agent injector and operator share a same binary. To onboard them, you should follow:\n To install the java agent injector and operator in an existing cluster, make sure you have cert-manager installed. Apply the manifests for the Controller and CRDs in config:  kubectl apply -f config/operator-bundle.yaml Onboard Custom Metrics Adapter  Deploy OAP server by referring to Operator Quick Start. Apply the manifests for an adapter in config:  kubectl apply -f config/adapter-bundle.yaml License Apache 2.0 License.\n","excerpt":"Apache SkyWalking Cloud on Kubernetes A bridge project between Apache SkyWalking and Kubernetes. …","ref":"/docs/skywalking-swck/next/binary-readme/","title":"SkyWalking Cloud on Kubernetes"},{"body":"Apache SkyWalking Cloud on Kubernetes A bridge project between Apache SkyWalking and Kubernetes.\nSWCK is a platform for the SkyWalking user, provisions, upgrades, maintains SkyWalking relevant components, and makes them work natively on Kubernetes.\nFeatures  Java Agent Injector: Inject the java agent into the application pod natively. Operator: Provision and maintain SkyWalking backend components. Custom Metrics Adapter: Provides custom metrics come from SkyWalking OAP cluster for autoscaling by Kubernetes HPA  Build images Issue below instrument to get the docker image:\nmake or\nmake build To onboard operator or adapter, you should push the image to a registry where the kubernetes cluster can pull it.\nOnboard Java Agent Injector and Operator The java agent injector and operator share a same binary. To onboard them, you should follow:\n To install the java agent injector and operator in an existing cluster, make sure you have cert-manager installed. Apply the manifests for the Controller and CRDs in config:  kubectl apply -f config/operator-bundle.yaml Onboard Custom Metrics Adapter  Deploy OAP server by referring to Operator Quick Start. Apply the manifests for an adapter in config:  kubectl apply -f config/adapter-bundle.yaml License Apache 2.0 License.\n","excerpt":"Apache SkyWalking Cloud on Kubernetes A bridge project between Apache SkyWalking and Kubernetes. …","ref":"/docs/skywalking-swck/v0.9.0/binary-readme/","title":"SkyWalking Cloud on Kubernetes"},{"body":"SkyWalking Cross Process Correlation Headers Protocol  Version 1.0  SkyWalking Cross Process Correlation Headers Protocol is a new in-wire context propagation protocol which is additional and optional. Please read SkyWalking language agents documentation to see whether it is supported.\nThis is an optional and additional protocol for language tracer implementation. All tracer implementation could consider implementing this. Cross Process Correlation Header key is sw8-correlation. The value is the encoded(key):encoded(value) list with elements splitted by , such as base64(string key):base64(string value),base64(string key2):base64(string value2).\nRecommendations for language APIs The following implementation method is recommended for different language APIs.\n TraceContext#putCorrelation and TraceContext#getCorrelation are recommended to write and read the correlation context, with key/value string. The key should be added if it is absent. The latter writes should override the previous value. The total number of all keys should be less than 3, and the length of each value should be less than 128 bytes. The context should be propagated as well when tracing context is propagated across threads and processes.  ","excerpt":"SkyWalking Cross Process Correlation Headers Protocol  Version 1.0  SkyWalking Cross Process …","ref":"/docs/main/latest/en/api/x-process-correlation-headers-v1/","title":"SkyWalking Cross Process Correlation Headers Protocol"},{"body":"SkyWalking Cross Process Correlation Headers Protocol  Version 1.0  SkyWalking Cross Process Correlation Headers Protocol is a new in-wire context propagation protocol which is additional and optional. Please read SkyWalking language agents documentation to see whether it is supported.\nThis is an optional and additional protocol for language tracer implementation. All tracer implementation could consider implementing this. Cross Process Correlation Header key is sw8-correlation. The value is the encoded(key):encoded(value) list with elements splitted by , such as base64(string key):base64(string value),base64(string key2):base64(string value2).\nRecommendations for language APIs The following implementation method is recommended for different language APIs.\n TraceContext#putCorrelation and TraceContext#getCorrelation are recommended to write and read the correlation context, with key/value string. The key should be added if it is absent. The latter writes should override the previous value. The total number of all keys should be less than 3, and the length of each value should be less than 128 bytes. The context should be propagated as well when tracing context is propagated across threads and processes.  ","excerpt":"SkyWalking Cross Process Correlation Headers Protocol  Version 1.0  SkyWalking Cross Process …","ref":"/docs/main/next/en/api/x-process-correlation-headers-v1/","title":"SkyWalking Cross Process Correlation Headers Protocol"},{"body":"SkyWalking Cross Process Correlation Headers Protocol  Version 1.0  The Cross Process Correlation Headers Protocol is used to transport custom data by leveraging the capability of Cross Process Propagation Headers Protocol.\nThis is an optional and additional protocol for language tracer implementation. All tracer implementation could consider implementing this. Cross Process Correlation Header key is sw8-correlation. The value is the encoded(key):encoded(value) list with elements splitted by , such as base64(string key):base64(string value),base64(string key2):base64(string value2).\nRecommendations for language APIs The following implementation method is recommended for different language APIs.\n TraceContext#putCorrelation and TraceContext#getCorrelation are recommended to write and read the correlation context, with key/value string. The key should be added if it is absent. The latter writes should override the previous value. The total number of all keys should be less than 3, and the length of each value should be less than 128 bytes. The context should be propagated as well when tracing context is propagated across threads and processes.  ","excerpt":"SkyWalking Cross Process Correlation Headers Protocol  Version 1.0  The Cross Process Correlation …","ref":"/docs/main/v9.0.0/en/protocols/skywalking-cross-process-correlation-headers-protocol-v1/","title":"SkyWalking Cross Process Correlation Headers Protocol"},{"body":"SkyWalking Cross Process Correlation Headers Protocol  Version 1.0  The Cross Process Correlation Headers Protocol is used to transport custom data by leveraging the capability of Cross Process Propagation Headers Protocol.\nThis is an optional and additional protocol for language tracer implementation. All tracer implementation could consider implementing this. Cross Process Correlation Header key is sw8-correlation. The value is the encoded(key):encoded(value) list with elements splitted by , such as base64(string key):base64(string value),base64(string key2):base64(string value2).\nRecommendations for language APIs The following implementation method is recommended for different language APIs.\n TraceContext#putCorrelation and TraceContext#getCorrelation are recommended to write and read the correlation context, with key/value string. The key should be added if it is absent. The latter writes should override the previous value. The total number of all keys should be less than 3, and the length of each value should be less than 128 bytes. The context should be propagated as well when tracing context is propagated across threads and processes.  ","excerpt":"SkyWalking Cross Process Correlation Headers Protocol  Version 1.0  The Cross Process Correlation …","ref":"/docs/main/v9.1.0/en/protocols/skywalking-cross-process-correlation-headers-protocol-v1/","title":"SkyWalking Cross Process Correlation Headers Protocol"},{"body":"SkyWalking Cross Process Correlation Headers Protocol  Version 1.0  The Cross Process Correlation Headers Protocol is used to transport custom data by leveraging the capability of Cross Process Propagation Headers Protocol.\nThis is an optional and additional protocol for language tracer implementation. All tracer implementation could consider implementing this. Cross Process Correlation Header key is sw8-correlation. The value is the encoded(key):encoded(value) list with elements splitted by , such as base64(string key):base64(string value),base64(string key2):base64(string value2).\nRecommendations for language APIs The following implementation method is recommended for different language APIs.\n TraceContext#putCorrelation and TraceContext#getCorrelation are recommended to write and read the correlation context, with key/value string. The key should be added if it is absent. The latter writes should override the previous value. The total number of all keys should be less than 3, and the length of each value should be less than 128 bytes. The context should be propagated as well when tracing context is propagated across threads and processes.  ","excerpt":"SkyWalking Cross Process Correlation Headers Protocol  Version 1.0  The Cross Process Correlation …","ref":"/docs/main/v9.2.0/en/protocols/skywalking-cross-process-correlation-headers-protocol-v1/","title":"SkyWalking Cross Process Correlation Headers Protocol"},{"body":"SkyWalking Cross Process Correlation Headers Protocol  Version 1.0  The Cross Process Correlation Headers Protocol is used to transport custom data by leveraging the capability of Cross Process Propagation Headers Protocol.\nThis is an optional and additional protocol for language tracer implementation. All tracer implementation could consider implementing this. Cross Process Correlation Header key is sw8-correlation. The value is the encoded(key):encoded(value) list with elements splitted by , such as base64(string key):base64(string value),base64(string key2):base64(string value2).\nRecommendations for language APIs The following implementation method is recommended for different language APIs.\n TraceContext#putCorrelation and TraceContext#getCorrelation are recommended to write and read the correlation context, with key/value string. The key should be added if it is absent. The latter writes should override the previous value. The total number of all keys should be less than 3, and the length of each value should be less than 128 bytes. The context should be propagated as well when tracing context is propagated across threads and processes.  ","excerpt":"SkyWalking Cross Process Correlation Headers Protocol  Version 1.0  The Cross Process Correlation …","ref":"/docs/main/v9.3.0/en/protocols/skywalking-cross-process-correlation-headers-protocol-v1/","title":"SkyWalking Cross Process Correlation Headers Protocol"},{"body":"SkyWalking Cross Process Correlation Headers Protocol  Version 1.0  SkyWalking Cross Process Correlation Headers Protocol is a new in-wire context propagation protocol which is additional and optional. Please read SkyWalking language agents documentation to see whether it is supported.\nThis is an optional and additional protocol for language tracer implementation. All tracer implementation could consider implementing this. Cross Process Correlation Header key is sw8-correlation. The value is the encoded(key):encoded(value) list with elements splitted by , such as base64(string key):base64(string value),base64(string key2):base64(string value2).\nRecommendations for language APIs The following implementation method is recommended for different language APIs.\n TraceContext#putCorrelation and TraceContext#getCorrelation are recommended to write and read the correlation context, with key/value string. The key should be added if it is absent. The latter writes should override the previous value. The total number of all keys should be less than 3, and the length of each value should be less than 128 bytes. The context should be propagated as well when tracing context is propagated across threads and processes.  ","excerpt":"SkyWalking Cross Process Correlation Headers Protocol  Version 1.0  SkyWalking Cross Process …","ref":"/docs/main/v9.4.0/en/api/x-process-correlation-headers-v1/","title":"SkyWalking Cross Process Correlation Headers Protocol"},{"body":"SkyWalking Cross Process Correlation Headers Protocol  Version 1.0  SkyWalking Cross Process Correlation Headers Protocol is a new in-wire context propagation protocol which is additional and optional. Please read SkyWalking language agents documentation to see whether it is supported.\nThis is an optional and additional protocol for language tracer implementation. All tracer implementation could consider implementing this. Cross Process Correlation Header key is sw8-correlation. The value is the encoded(key):encoded(value) list with elements splitted by , such as base64(string key):base64(string value),base64(string key2):base64(string value2).\nRecommendations for language APIs The following implementation method is recommended for different language APIs.\n TraceContext#putCorrelation and TraceContext#getCorrelation are recommended to write and read the correlation context, with key/value string. The key should be added if it is absent. The latter writes should override the previous value. The total number of all keys should be less than 3, and the length of each value should be less than 128 bytes. The context should be propagated as well when tracing context is propagated across threads and processes.  ","excerpt":"SkyWalking Cross Process Correlation Headers Protocol  Version 1.0  SkyWalking Cross Process …","ref":"/docs/main/v9.5.0/en/api/x-process-correlation-headers-v1/","title":"SkyWalking Cross Process Correlation Headers Protocol"},{"body":"SkyWalking Cross Process Correlation Headers Protocol  Version 1.0  SkyWalking Cross Process Correlation Headers Protocol is a new in-wire context propagation protocol which is additional and optional. Please read SkyWalking language agents documentation to see whether it is supported.\nThis is an optional and additional protocol for language tracer implementation. All tracer implementation could consider implementing this. Cross Process Correlation Header key is sw8-correlation. The value is the encoded(key):encoded(value) list with elements splitted by , such as base64(string key):base64(string value),base64(string key2):base64(string value2).\nRecommendations for language APIs The following implementation method is recommended for different language APIs.\n TraceContext#putCorrelation and TraceContext#getCorrelation are recommended to write and read the correlation context, with key/value string. The key should be added if it is absent. The latter writes should override the previous value. The total number of all keys should be less than 3, and the length of each value should be less than 128 bytes. The context should be propagated as well when tracing context is propagated across threads and processes.  ","excerpt":"SkyWalking Cross Process Correlation Headers Protocol  Version 1.0  SkyWalking Cross Process …","ref":"/docs/main/v9.6.0/en/api/x-process-correlation-headers-v1/","title":"SkyWalking Cross Process Correlation Headers Protocol"},{"body":"SkyWalking Cross Process Correlation Headers Protocol  Version 1.0  SkyWalking Cross Process Correlation Headers Protocol is a new in-wire context propagation protocol which is additional and optional. Please read SkyWalking language agents documentation to see whether it is supported.\nThis is an optional and additional protocol for language tracer implementation. All tracer implementation could consider implementing this. Cross Process Correlation Header key is sw8-correlation. The value is the encoded(key):encoded(value) list with elements splitted by , such as base64(string key):base64(string value),base64(string key2):base64(string value2).\nRecommendations for language APIs The following implementation method is recommended for different language APIs.\n TraceContext#putCorrelation and TraceContext#getCorrelation are recommended to write and read the correlation context, with key/value string. The key should be added if it is absent. The latter writes should override the previous value. The total number of all keys should be less than 3, and the length of each value should be less than 128 bytes. The context should be propagated as well when tracing context is propagated across threads and processes.  ","excerpt":"SkyWalking Cross Process Correlation Headers Protocol  Version 1.0  SkyWalking Cross Process …","ref":"/docs/main/v9.7.0/en/api/x-process-correlation-headers-v1/","title":"SkyWalking Cross Process Correlation Headers Protocol"},{"body":"SkyWalking Cross Process Propagation Headers Protocol  Version 3.0  SkyWalking is more akin to an APM system, rather than a common distributed tracing system. SkyWalking\u0026rsquo;s headers are much more complex than those found in a common distributed tracing system. The reason behind their complexity is for better analysis performance of the OAP. You can find many similar mechanisms in other commercial APM systems (some of which are even more complex than ours!).\nAbstract The SkyWalking Cross Process Propagation Headers Protocol v3, also known as the sw8 protocol, is designed for context propagation.\nStandard Header Item The standard header is the minimal requirement for context propagation.\n Header Name: sw8. Header Value: 8 fields split by -. The length of header value must be less than 2k (default).  Example of the value format: XXXXX-XXXXX-XXXX-XXXX\nValues Values must include the following segments, and all string type values are in BASE64 encoding.\n Required:   Sample. 0 or 1. 0 means that the context exists, but it could (and most likely will) be ignored. 1 means this trace needs to be sampled and sent to the backend. Trace ID. String(BASE64 encoded). A literal string that is globally unique. Parent trace segment ID. String(BASE64 encoded). A literal string that is globally unique. Parent span ID. Must be an integer. It begins with 0. This span ID points to the parent span in parent trace segment. Parent service. String(BASE64 encoded). Its length should be no more than 50 UTF-8 characters. Parent service instance. String(BASE64 encoded). Its length should be no more than 50 UTF-8 characters. Parent endpoint. String(BASE64 encoded). The operation name of the first entry span in the parent segment. Its length should be less than 150 UTF-8 characters. Target address of this request used on the client end. String(BASE64 encoded). The network address (not necessarily IP + port) used on the client end to access this target service.   Sample values: 1-TRACEID-SEGMENTID-3-PARENT_SERVICE-PARENT_INSTANCE-PARENT_ENDPOINT-IPPORT  Extension Header Item The extension header item is designed for advanced features. It provides interaction capabilities between the agents deployed in upstream and downstream services.\n Header Name: sw8-x Header Value: Split by -. The fields are extendable.  Values The current value includes fields.\n Tracing Mode. Empty, 0, or 1. Empty or 0 is the default. 1 indicates that all spans generated in this context will skip analysis, spanObject#skipAnalysis=true. This context is propagated to upstream by default, unless it is changed in the tracing process. The timestamp of sending on the client end. This is used in async RPC, such as MQ. Once it is set, the consumer end would calculate the latency between sending and receiving, and tag the latency in the span by using key transmission.latency automatically.  ","excerpt":"SkyWalking Cross Process Propagation Headers Protocol  Version 3.0  SkyWalking is more akin to an …","ref":"/docs/main/latest/en/api/x-process-propagation-headers-v3/","title":"SkyWalking Cross Process Propagation Headers Protocol"},{"body":"SkyWalking Cross Process Propagation Headers Protocol  Version 3.0  SkyWalking is more akin to an APM system, rather than a common distributed tracing system. SkyWalking\u0026rsquo;s headers are much more complex than those found in a common distributed tracing system. The reason behind their complexity is for better analysis performance of the OAP. You can find many similar mechanisms in other commercial APM systems (some of which are even more complex than ours!).\nAbstract The SkyWalking Cross Process Propagation Headers Protocol v3, also known as the sw8 protocol, is designed for context propagation.\nStandard Header Item The standard header is the minimal requirement for context propagation.\n Header Name: sw8. Header Value: 8 fields split by -. The length of header value must be less than 2k (default).  Example of the value format: XXXXX-XXXXX-XXXX-XXXX\nValues Values must include the following segments, and all string type values are in BASE64 encoding.\n Required:   Sample. 0 or 1. 0 means that the context exists, but it could (and most likely will) be ignored. 1 means this trace needs to be sampled and sent to the backend. Trace ID. String(BASE64 encoded). A literal string that is globally unique. Parent trace segment ID. String(BASE64 encoded). A literal string that is globally unique. Parent span ID. Must be an integer. It begins with 0. This span ID points to the parent span in parent trace segment. Parent service. String(BASE64 encoded). Its length should be no more than 50 UTF-8 characters. Parent service instance. String(BASE64 encoded). Its length should be no more than 50 UTF-8 characters. Parent endpoint. String(BASE64 encoded). The operation name of the first entry span in the parent segment. Its length should be less than 150 UTF-8 characters. Target address of this request used on the client end. String(BASE64 encoded). The network address (not necessarily IP + port) used on the client end to access this target service.   Sample values: 1-TRACEID-SEGMENTID-3-PARENT_SERVICE-PARENT_INSTANCE-PARENT_ENDPOINT-IPPORT  Extension Header Item The extension header item is designed for advanced features. It provides interaction capabilities between the agents deployed in upstream and downstream services.\n Header Name: sw8-x Header Value: Split by -. The fields are extendable.  Values The current value includes fields.\n Tracing Mode. Empty, 0, or 1. Empty or 0 is the default. 1 indicates that all spans generated in this context will skip analysis, spanObject#skipAnalysis=true. This context is propagated to upstream by default, unless it is changed in the tracing process. The timestamp of sending on the client end. This is used in async RPC, such as MQ. Once it is set, the consumer end would calculate the latency between sending and receiving, and tag the latency in the span by using key transmission.latency automatically.  ","excerpt":"SkyWalking Cross Process Propagation Headers Protocol  Version 3.0  SkyWalking is more akin to an …","ref":"/docs/main/next/en/api/x-process-propagation-headers-v3/","title":"SkyWalking Cross Process Propagation Headers Protocol"},{"body":"SkyWalking Cross Process Propagation Headers Protocol  Version 3.0  SkyWalking is more akin to an APM system, rather than a common distributed tracing system. SkyWalking\u0026rsquo;s headers are much more complex than those found in a common distributed tracing system. The reason behind their complexity is for better analysis performance of the OAP. You can find many similar mechanisms in other commercial APM systems (some of which are even more complex than ours!).\nAbstract The SkyWalking Cross Process Propagation Headers Protocol v3, also known as the sw8 protocol, is designed for context propagation.\nStandard Header Item The standard header is the minimal requirement for context propagation.\n Header Name: sw8. Header Value: 8 fields split by -. The length of header value must be less than 2k (default).  Example of the value format: XXXXX-XXXXX-XXXX-XXXX\nValues Values must include the following segments, and all string type values are in BASE64 encoding.\n Required:   Sample. 0 or 1. 0 means that the context exists, but it could (and most likely will) be ignored. 1 means this trace needs to be sampled and sent to the backend. Trace ID. String(BASE64 encoded). A literal string that is globally unique. Parent trace segment ID. String(BASE64 encoded). A literal string that is globally unique. Parent span ID. Must be an integer. It begins with 0. This span ID points to the parent span in parent trace segment. Parent service. String(BASE64 encoded). Its length should be no more than 50 UTF-8 characters. Parent service instance. String(BASE64 encoded). Its length should be no more than 50 UTF-8 characters. Parent endpoint. String(BASE64 encoded). The operation name of the first entry span in the parent segment. Its length should be less than 150 UTF-8 characters. Target address of this request used on the client end. String(BASE64 encoded). The network address (not necessarily IP + port) used on the client end to access this target service.   Sample values: 1-TRACEID-SEGMENTID-3-PARENT_SERVICE-PARENT_INSTANCE-PARENT_ENDPOINT-IPPORT  Extension Header Item The extension header item is designed for advanced features. It provides interaction capabilities between the agents deployed in upstream and downstream services.\n Header Name: sw8-x Header Value: Split by -. The fields are extendable.  Values The current value includes fields.\n Tracing Mode. Empty, 0, or 1. Empty or 0 is the default. 1 indicates that all spans generated in this context will skip analysis, spanObject#skipAnalysis=true. This context is propagated to upstream by default, unless it is changed in the tracing process. The timestamp of sending on the client end. This is used in async RPC, such as MQ. Once it is set, the consumer end would calculate the latency between sending and receiving, and tag the latency in the span by using key transmission.latency automatically.  ","excerpt":"SkyWalking Cross Process Propagation Headers Protocol  Version 3.0  SkyWalking is more akin to an …","ref":"/docs/main/v9.0.0/en/protocols/skywalking-cross-process-propagation-headers-protocol-v3/","title":"SkyWalking Cross Process Propagation Headers Protocol"},{"body":"SkyWalking Cross Process Propagation Headers Protocol  Version 3.0  SkyWalking is more akin to an APM system, rather than a common distributed tracing system. SkyWalking\u0026rsquo;s headers are much more complex than those found in a common distributed tracing system. The reason behind their complexity is for better analysis performance of the OAP. You can find many similar mechanisms in other commercial APM systems (some of which are even more complex than ours!).\nAbstract The SkyWalking Cross Process Propagation Headers Protocol v3, also known as the sw8 protocol, is designed for context propagation.\nStandard Header Item The standard header is the minimal requirement for context propagation.\n Header Name: sw8. Header Value: 8 fields split by -. The length of header value must be less than 2k (default).  Example of the value format: XXXXX-XXXXX-XXXX-XXXX\nValues Values must include the following segments, and all string type values are in BASE64 encoding.\n Required:   Sample. 0 or 1. 0 means that the context exists, but it could (and most likely will) be ignored. 1 means this trace needs to be sampled and sent to the backend. Trace ID. String(BASE64 encoded). A literal string that is globally unique. Parent trace segment ID. String(BASE64 encoded). A literal string that is globally unique. Parent span ID. Must be an integer. It begins with 0. This span ID points to the parent span in parent trace segment. Parent service. String(BASE64 encoded). Its length should be no more than 50 UTF-8 characters. Parent service instance. String(BASE64 encoded). Its length should be no more than 50 UTF-8 characters. Parent endpoint. String(BASE64 encoded). The operation name of the first entry span in the parent segment. Its length should be less than 150 UTF-8 characters. Target address of this request used on the client end. String(BASE64 encoded). The network address (not necessarily IP + port) used on the client end to access this target service.   Sample values: 1-TRACEID-SEGMENTID-3-PARENT_SERVICE-PARENT_INSTANCE-PARENT_ENDPOINT-IPPORT  Extension Header Item The extension header item is designed for advanced features. It provides interaction capabilities between the agents deployed in upstream and downstream services.\n Header Name: sw8-x Header Value: Split by -. The fields are extendable.  Values The current value includes fields.\n Tracing Mode. Empty, 0, or 1. Empty or 0 is the default. 1 indicates that all spans generated in this context will skip analysis, spanObject#skipAnalysis=true. This context is propagated to upstream by default, unless it is changed in the tracing process. The timestamp of sending on the client end. This is used in async RPC, such as MQ. Once it is set, the consumer end would calculate the latency between sending and receiving, and tag the latency in the span by using key transmission.latency automatically.  ","excerpt":"SkyWalking Cross Process Propagation Headers Protocol  Version 3.0  SkyWalking is more akin to an …","ref":"/docs/main/v9.1.0/en/protocols/skywalking-cross-process-propagation-headers-protocol-v3/","title":"SkyWalking Cross Process Propagation Headers Protocol"},{"body":"SkyWalking Cross Process Propagation Headers Protocol  Version 3.0  SkyWalking is more akin to an APM system, rather than a common distributed tracing system. SkyWalking\u0026rsquo;s headers are much more complex than those found in a common distributed tracing system. The reason behind their complexity is for better analysis performance of the OAP. You can find many similar mechanisms in other commercial APM systems (some of which are even more complex than ours!).\nAbstract The SkyWalking Cross Process Propagation Headers Protocol v3, also known as the sw8 protocol, is designed for context propagation.\nStandard Header Item The standard header is the minimal requirement for context propagation.\n Header Name: sw8. Header Value: 8 fields split by -. The length of header value must be less than 2k (default).  Example of the value format: XXXXX-XXXXX-XXXX-XXXX\nValues Values must include the following segments, and all string type values are in BASE64 encoding.\n Required:   Sample. 0 or 1. 0 means that the context exists, but it could (and most likely will) be ignored. 1 means this trace needs to be sampled and sent to the backend. Trace ID. String(BASE64 encoded). A literal string that is globally unique. Parent trace segment ID. String(BASE64 encoded). A literal string that is globally unique. Parent span ID. Must be an integer. It begins with 0. This span ID points to the parent span in parent trace segment. Parent service. String(BASE64 encoded). Its length should be no more than 50 UTF-8 characters. Parent service instance. String(BASE64 encoded). Its length should be no more than 50 UTF-8 characters. Parent endpoint. String(BASE64 encoded). The operation name of the first entry span in the parent segment. Its length should be less than 150 UTF-8 characters. Target address of this request used on the client end. String(BASE64 encoded). The network address (not necessarily IP + port) used on the client end to access this target service.   Sample values: 1-TRACEID-SEGMENTID-3-PARENT_SERVICE-PARENT_INSTANCE-PARENT_ENDPOINT-IPPORT  Extension Header Item The extension header item is designed for advanced features. It provides interaction capabilities between the agents deployed in upstream and downstream services.\n Header Name: sw8-x Header Value: Split by -. The fields are extendable.  Values The current value includes fields.\n Tracing Mode. Empty, 0, or 1. Empty or 0 is the default. 1 indicates that all spans generated in this context will skip analysis, spanObject#skipAnalysis=true. This context is propagated to upstream by default, unless it is changed in the tracing process. The timestamp of sending on the client end. This is used in async RPC, such as MQ. Once it is set, the consumer end would calculate the latency between sending and receiving, and tag the latency in the span by using key transmission.latency automatically.  ","excerpt":"SkyWalking Cross Process Propagation Headers Protocol  Version 3.0  SkyWalking is more akin to an …","ref":"/docs/main/v9.2.0/en/protocols/skywalking-cross-process-propagation-headers-protocol-v3/","title":"SkyWalking Cross Process Propagation Headers Protocol"},{"body":"SkyWalking Cross Process Propagation Headers Protocol  Version 3.0  SkyWalking is more akin to an APM system, rather than a common distributed tracing system. SkyWalking\u0026rsquo;s headers are much more complex than those found in a common distributed tracing system. The reason behind their complexity is for better analysis performance of the OAP. You can find many similar mechanisms in other commercial APM systems (some of which are even more complex than ours!).\nAbstract The SkyWalking Cross Process Propagation Headers Protocol v3, also known as the sw8 protocol, is designed for context propagation.\nStandard Header Item The standard header is the minimal requirement for context propagation.\n Header Name: sw8. Header Value: 8 fields split by -. The length of header value must be less than 2k (default).  Example of the value format: XXXXX-XXXXX-XXXX-XXXX\nValues Values must include the following segments, and all string type values are in BASE64 encoding.\n Required:   Sample. 0 or 1. 0 means that the context exists, but it could (and most likely will) be ignored. 1 means this trace needs to be sampled and sent to the backend. Trace ID. String(BASE64 encoded). A literal string that is globally unique. Parent trace segment ID. String(BASE64 encoded). A literal string that is globally unique. Parent span ID. Must be an integer. It begins with 0. This span ID points to the parent span in parent trace segment. Parent service. String(BASE64 encoded). Its length should be no more than 50 UTF-8 characters. Parent service instance. String(BASE64 encoded). Its length should be no more than 50 UTF-8 characters. Parent endpoint. String(BASE64 encoded). The operation name of the first entry span in the parent segment. Its length should be less than 150 UTF-8 characters. Target address of this request used on the client end. String(BASE64 encoded). The network address (not necessarily IP + port) used on the client end to access this target service.   Sample values: 1-TRACEID-SEGMENTID-3-PARENT_SERVICE-PARENT_INSTANCE-PARENT_ENDPOINT-IPPORT  Extension Header Item The extension header item is designed for advanced features. It provides interaction capabilities between the agents deployed in upstream and downstream services.\n Header Name: sw8-x Header Value: Split by -. The fields are extendable.  Values The current value includes fields.\n Tracing Mode. Empty, 0, or 1. Empty or 0 is the default. 1 indicates that all spans generated in this context will skip analysis, spanObject#skipAnalysis=true. This context is propagated to upstream by default, unless it is changed in the tracing process. The timestamp of sending on the client end. This is used in async RPC, such as MQ. Once it is set, the consumer end would calculate the latency between sending and receiving, and tag the latency in the span by using key transmission.latency automatically.  ","excerpt":"SkyWalking Cross Process Propagation Headers Protocol  Version 3.0  SkyWalking is more akin to an …","ref":"/docs/main/v9.3.0/en/protocols/skywalking-cross-process-propagation-headers-protocol-v3/","title":"SkyWalking Cross Process Propagation Headers Protocol"},{"body":"SkyWalking Cross Process Propagation Headers Protocol  Version 3.0  SkyWalking is more akin to an APM system, rather than a common distributed tracing system. SkyWalking\u0026rsquo;s headers are much more complex than those found in a common distributed tracing system. The reason behind their complexity is for better analysis performance of the OAP. You can find many similar mechanisms in other commercial APM systems (some of which are even more complex than ours!).\nAbstract The SkyWalking Cross Process Propagation Headers Protocol v3, also known as the sw8 protocol, is designed for context propagation.\nStandard Header Item The standard header is the minimal requirement for context propagation.\n Header Name: sw8. Header Value: 8 fields split by -. The length of header value must be less than 2k (default).  Example of the value format: XXXXX-XXXXX-XXXX-XXXX\nValues Values must include the following segments, and all string type values are in BASE64 encoding.\n Required:   Sample. 0 or 1. 0 means that the context exists, but it could (and most likely will) be ignored. 1 means this trace needs to be sampled and sent to the backend. Trace ID. String(BASE64 encoded). A literal string that is globally unique. Parent trace segment ID. String(BASE64 encoded). A literal string that is globally unique. Parent span ID. Must be an integer. It begins with 0. This span ID points to the parent span in parent trace segment. Parent service. String(BASE64 encoded). Its length should be no more than 50 UTF-8 characters. Parent service instance. String(BASE64 encoded). Its length should be no more than 50 UTF-8 characters. Parent endpoint. String(BASE64 encoded). The operation name of the first entry span in the parent segment. Its length should be less than 150 UTF-8 characters. Target address of this request used on the client end. String(BASE64 encoded). The network address (not necessarily IP + port) used on the client end to access this target service.   Sample values: 1-TRACEID-SEGMENTID-3-PARENT_SERVICE-PARENT_INSTANCE-PARENT_ENDPOINT-IPPORT  Extension Header Item The extension header item is designed for advanced features. It provides interaction capabilities between the agents deployed in upstream and downstream services.\n Header Name: sw8-x Header Value: Split by -. The fields are extendable.  Values The current value includes fields.\n Tracing Mode. Empty, 0, or 1. Empty or 0 is the default. 1 indicates that all spans generated in this context will skip analysis, spanObject#skipAnalysis=true. This context is propagated to upstream by default, unless it is changed in the tracing process. The timestamp of sending on the client end. This is used in async RPC, such as MQ. Once it is set, the consumer end would calculate the latency between sending and receiving, and tag the latency in the span by using key transmission.latency automatically.  ","excerpt":"SkyWalking Cross Process Propagation Headers Protocol  Version 3.0  SkyWalking is more akin to an …","ref":"/docs/main/v9.4.0/en/api/x-process-propagation-headers-v3/","title":"SkyWalking Cross Process Propagation Headers Protocol"},{"body":"SkyWalking Cross Process Propagation Headers Protocol  Version 3.0  SkyWalking is more akin to an APM system, rather than a common distributed tracing system. SkyWalking\u0026rsquo;s headers are much more complex than those found in a common distributed tracing system. The reason behind their complexity is for better analysis performance of the OAP. You can find many similar mechanisms in other commercial APM systems (some of which are even more complex than ours!).\nAbstract The SkyWalking Cross Process Propagation Headers Protocol v3, also known as the sw8 protocol, is designed for context propagation.\nStandard Header Item The standard header is the minimal requirement for context propagation.\n Header Name: sw8. Header Value: 8 fields split by -. The length of header value must be less than 2k (default).  Example of the value format: XXXXX-XXXXX-XXXX-XXXX\nValues Values must include the following segments, and all string type values are in BASE64 encoding.\n Required:   Sample. 0 or 1. 0 means that the context exists, but it could (and most likely will) be ignored. 1 means this trace needs to be sampled and sent to the backend. Trace ID. String(BASE64 encoded). A literal string that is globally unique. Parent trace segment ID. String(BASE64 encoded). A literal string that is globally unique. Parent span ID. Must be an integer. It begins with 0. This span ID points to the parent span in parent trace segment. Parent service. String(BASE64 encoded). Its length should be no more than 50 UTF-8 characters. Parent service instance. String(BASE64 encoded). Its length should be no more than 50 UTF-8 characters. Parent endpoint. String(BASE64 encoded). The operation name of the first entry span in the parent segment. Its length should be less than 150 UTF-8 characters. Target address of this request used on the client end. String(BASE64 encoded). The network address (not necessarily IP + port) used on the client end to access this target service.   Sample values: 1-TRACEID-SEGMENTID-3-PARENT_SERVICE-PARENT_INSTANCE-PARENT_ENDPOINT-IPPORT  Extension Header Item The extension header item is designed for advanced features. It provides interaction capabilities between the agents deployed in upstream and downstream services.\n Header Name: sw8-x Header Value: Split by -. The fields are extendable.  Values The current value includes fields.\n Tracing Mode. Empty, 0, or 1. Empty or 0 is the default. 1 indicates that all spans generated in this context will skip analysis, spanObject#skipAnalysis=true. This context is propagated to upstream by default, unless it is changed in the tracing process. The timestamp of sending on the client end. This is used in async RPC, such as MQ. Once it is set, the consumer end would calculate the latency between sending and receiving, and tag the latency in the span by using key transmission.latency automatically.  ","excerpt":"SkyWalking Cross Process Propagation Headers Protocol  Version 3.0  SkyWalking is more akin to an …","ref":"/docs/main/v9.5.0/en/api/x-process-propagation-headers-v3/","title":"SkyWalking Cross Process Propagation Headers Protocol"},{"body":"SkyWalking Cross Process Propagation Headers Protocol  Version 3.0  SkyWalking is more akin to an APM system, rather than a common distributed tracing system. SkyWalking\u0026rsquo;s headers are much more complex than those found in a common distributed tracing system. The reason behind their complexity is for better analysis performance of the OAP. You can find many similar mechanisms in other commercial APM systems (some of which are even more complex than ours!).\nAbstract The SkyWalking Cross Process Propagation Headers Protocol v3, also known as the sw8 protocol, is designed for context propagation.\nStandard Header Item The standard header is the minimal requirement for context propagation.\n Header Name: sw8. Header Value: 8 fields split by -. The length of header value must be less than 2k (default).  Example of the value format: XXXXX-XXXXX-XXXX-XXXX\nValues Values must include the following segments, and all string type values are in BASE64 encoding.\n Required:   Sample. 0 or 1. 0 means that the context exists, but it could (and most likely will) be ignored. 1 means this trace needs to be sampled and sent to the backend. Trace ID. String(BASE64 encoded). A literal string that is globally unique. Parent trace segment ID. String(BASE64 encoded). A literal string that is globally unique. Parent span ID. Must be an integer. It begins with 0. This span ID points to the parent span in parent trace segment. Parent service. String(BASE64 encoded). Its length should be no more than 50 UTF-8 characters. Parent service instance. String(BASE64 encoded). Its length should be no more than 50 UTF-8 characters. Parent endpoint. String(BASE64 encoded). The operation name of the first entry span in the parent segment. Its length should be less than 150 UTF-8 characters. Target address of this request used on the client end. String(BASE64 encoded). The network address (not necessarily IP + port) used on the client end to access this target service.   Sample values: 1-TRACEID-SEGMENTID-3-PARENT_SERVICE-PARENT_INSTANCE-PARENT_ENDPOINT-IPPORT  Extension Header Item The extension header item is designed for advanced features. It provides interaction capabilities between the agents deployed in upstream and downstream services.\n Header Name: sw8-x Header Value: Split by -. The fields are extendable.  Values The current value includes fields.\n Tracing Mode. Empty, 0, or 1. Empty or 0 is the default. 1 indicates that all spans generated in this context will skip analysis, spanObject#skipAnalysis=true. This context is propagated to upstream by default, unless it is changed in the tracing process. The timestamp of sending on the client end. This is used in async RPC, such as MQ. Once it is set, the consumer end would calculate the latency between sending and receiving, and tag the latency in the span by using key transmission.latency automatically.  ","excerpt":"SkyWalking Cross Process Propagation Headers Protocol  Version 3.0  SkyWalking is more akin to an …","ref":"/docs/main/v9.6.0/en/api/x-process-propagation-headers-v3/","title":"SkyWalking Cross Process Propagation Headers Protocol"},{"body":"SkyWalking Cross Process Propagation Headers Protocol  Version 3.0  SkyWalking is more akin to an APM system, rather than a common distributed tracing system. SkyWalking\u0026rsquo;s headers are much more complex than those found in a common distributed tracing system. The reason behind their complexity is for better analysis performance of the OAP. You can find many similar mechanisms in other commercial APM systems (some of which are even more complex than ours!).\nAbstract The SkyWalking Cross Process Propagation Headers Protocol v3, also known as the sw8 protocol, is designed for context propagation.\nStandard Header Item The standard header is the minimal requirement for context propagation.\n Header Name: sw8. Header Value: 8 fields split by -. The length of header value must be less than 2k (default).  Example of the value format: XXXXX-XXXXX-XXXX-XXXX\nValues Values must include the following segments, and all string type values are in BASE64 encoding.\n Required:   Sample. 0 or 1. 0 means that the context exists, but it could (and most likely will) be ignored. 1 means this trace needs to be sampled and sent to the backend. Trace ID. String(BASE64 encoded). A literal string that is globally unique. Parent trace segment ID. String(BASE64 encoded). A literal string that is globally unique. Parent span ID. Must be an integer. It begins with 0. This span ID points to the parent span in parent trace segment. Parent service. String(BASE64 encoded). Its length should be no more than 50 UTF-8 characters. Parent service instance. String(BASE64 encoded). Its length should be no more than 50 UTF-8 characters. Parent endpoint. String(BASE64 encoded). The operation name of the first entry span in the parent segment. Its length should be less than 150 UTF-8 characters. Target address of this request used on the client end. String(BASE64 encoded). The network address (not necessarily IP + port) used on the client end to access this target service.   Sample values: 1-TRACEID-SEGMENTID-3-PARENT_SERVICE-PARENT_INSTANCE-PARENT_ENDPOINT-IPPORT  Extension Header Item The extension header item is designed for advanced features. It provides interaction capabilities between the agents deployed in upstream and downstream services.\n Header Name: sw8-x Header Value: Split by -. The fields are extendable.  Values The current value includes fields.\n Tracing Mode. Empty, 0, or 1. Empty or 0 is the default. 1 indicates that all spans generated in this context will skip analysis, spanObject#skipAnalysis=true. This context is propagated to upstream by default, unless it is changed in the tracing process. The timestamp of sending on the client end. This is used in async RPC, such as MQ. Once it is set, the consumer end would calculate the latency between sending and receiving, and tag the latency in the span by using key transmission.latency automatically.  ","excerpt":"SkyWalking Cross Process Propagation Headers Protocol  Version 3.0  SkyWalking is more akin to an …","ref":"/docs/main/v9.7.0/en/api/x-process-propagation-headers-v3/","title":"SkyWalking Cross Process Propagation Headers Protocol"},{"body":"All SkyWalking exporter(metrics, trace, log) instructions had been moved here.\n","excerpt":"All SkyWalking exporter(metrics, trace, log) instructions had been moved here.","ref":"/docs/main/latest/en/setup/backend/metrics-exporter/","title":"SkyWalking exporter(metrics, trace, log) instructions had been moved [here](../exporter)."},{"body":"All SkyWalking exporter(metrics, trace, log) instructions had been moved here.\n","excerpt":"All SkyWalking exporter(metrics, trace, log) instructions had been moved here.","ref":"/docs/main/next/en/setup/backend/metrics-exporter/","title":"SkyWalking exporter(metrics, trace, log) instructions had been moved [here](../exporter)."},{"body":"All SkyWalking exporter(metrics, trace, log) instructions had been moved here.\n","excerpt":"All SkyWalking exporter(metrics, trace, log) instructions had been moved here.","ref":"/docs/main/v9.3.0/en/setup/backend/metrics-exporter/","title":"SkyWalking exporter(metrics, trace, log) instructions had been moved [here](../exporter)."},{"body":"All SkyWalking exporter(metrics, trace, log) instructions had been moved here.\n","excerpt":"All SkyWalking exporter(metrics, trace, log) instructions had been moved here.","ref":"/docs/main/v9.4.0/en/setup/backend/metrics-exporter/","title":"SkyWalking exporter(metrics, trace, log) instructions had been moved [here](../exporter)."},{"body":"All SkyWalking exporter(metrics, trace, log) instructions had been moved here.\n","excerpt":"All SkyWalking exporter(metrics, trace, log) instructions had been moved here.","ref":"/docs/main/v9.5.0/en/setup/backend/metrics-exporter/","title":"SkyWalking exporter(metrics, trace, log) instructions had been moved [here](../exporter)."},{"body":"All SkyWalking exporter(metrics, trace, log) instructions had been moved here.\n","excerpt":"All SkyWalking exporter(metrics, trace, log) instructions had been moved here.","ref":"/docs/main/v9.6.0/en/setup/backend/metrics-exporter/","title":"SkyWalking exporter(metrics, trace, log) instructions had been moved [here](../exporter)."},{"body":"All SkyWalking exporter(metrics, trace, log) instructions had been moved here.\n","excerpt":"All SkyWalking exporter(metrics, trace, log) instructions had been moved here.","ref":"/docs/main/v9.7.0/en/setup/backend/metrics-exporter/","title":"SkyWalking exporter(metrics, trace, log) instructions had been moved [here](../exporter)."},{"body":"SkyWalking Go Agent This is the official documentation of SkyWalking Go agent. Welcome to the SkyWalking community!\nSkyWalking Go is an open-source Golang auto-instrument agent that provides support for distributed tracing across different frameworks within the Golang language.\nTo use SkyWalking Go, simply import the base dependencies into your code and take advantage of the -toolexec parameter in Golang to enable hybrid compilation capabilities for various frameworks in your application.\n","excerpt":"SkyWalking Go Agent This is the official documentation of SkyWalking Go agent. Welcome to the …","ref":"/docs/skywalking-go/latest/readme/","title":"SkyWalking Go Agent"},{"body":"SkyWalking Go Agent This is the official documentation of SkyWalking Go agent. Welcome to the SkyWalking community!\nSkyWalking Go is an open-source Golang auto-instrument agent that provides support for distributed tracing across different frameworks within the Golang language.\nTo use SkyWalking Go, simply import the base dependencies into your code and take advantage of the -toolexec parameter in Golang to enable hybrid compilation capabilities for various frameworks in your application.\n","excerpt":"SkyWalking Go Agent This is the official documentation of SkyWalking Go agent. Welcome to the …","ref":"/docs/skywalking-go/next/readme/","title":"SkyWalking Go Agent"},{"body":"SkyWalking Go Agent This is the official documentation of SkyWalking Go agent. Welcome to the SkyWalking community!\nSkyWalking Go is an open-source Golang auto-instrument agent that provides support for distributed tracing across different frameworks within the Golang language.\nTo use SkyWalking Go, simply import the base dependencies into your code and take advantage of the -toolexec parameter in Golang to enable hybrid compilation capabilities for various frameworks in your application.\n","excerpt":"SkyWalking Go Agent This is the official documentation of SkyWalking Go agent. Welcome to the …","ref":"/docs/skywalking-go/v0.4.0/readme/","title":"SkyWalking Go Agent"},{"body":"SkyWalking Infra E2E Configuration Guide The configuration file is used to integrate all the step configuration content. You can see the sample configuration files for different environments in the examples directory.\nThere is a quick view about the configuration file, and using the yaml format.\nsetup:# set up the environmentcleanup:# clean up the environmenttrigger:# generate trafficverify:# test casesSetup Support two kinds of the environment to set up the system.\nKinD setup:env:kindfile:path/to/kind.yaml # Specified kinD manifest file pathkubeconfig:path/.kube/config # The path of kubeconfigtimeout:20m # timeout durationinit-system-environment:path/to/env # Import environment filesteps:# customize steps for prepare the environment- name:customize setups # step name# one of command line or kinD manifest filecommand:command lines # use command line to setup path:/path/to/manifest.yaml # the manifest file pathwait:# how to verify the manifest is set up finish- namespace:# The pod namespaceresource:# The pod resource namelabel-selector:# The resource label selectorfor:# The wait conditionkind:import-images:# import docker images to KinD- image:version # support using env to expand image, such as `${env_key}` or `$env_key`expose-ports:# Expose resource for host access- namespace:# The resource namespaceresource:# The resource name, such as `pod/foo` or `service/foo`port:# Want to expose port from resource NOTE: The fields file and kubeconfig are mutually exclusive.\n The KinD environment follow these steps:\n [optional]Start the KinD cluster according to the config file, expose KUBECONFIG to environment for help execute kubectl in the next steps. [optional]Setup the kubeconfig field for help execute kubectl in the next steps. Load docker images from kind.import-images if needed. Apply the resources files (--manifests) or/and run the custom init command (--commands) by steps. Wait until all steps are finished and all services are ready with the timeout(second). Expose all resource ports for host access.  Import docker image If you want to import docker image from private registries, there are several ways to do this:\n Using imagePullSecrets to pull images, please take reference from document. Using kind.import-images to load images from host. kind:import-images:- skywalking/oap:${OAP_HASH}# support using environment to expand the image name  Resource Export If you want to access the resource from host, should follow these steps:\n Declare which resource and ports need to be accessible from host. setup:kind:expose-ports:- namespace:default # Need to expose resource namespaceresource:pod/foo # Resource description, such as `pod/foo` or `service/foo`port:8080# Resource port want to expose, support `\u0026lt;resource_port\u0026gt;`, `\u0026lt;bind_to_host_port\u0026gt;:\u0026lt;resource_port\u0026gt;` Follow this format to get the host and port mapping by the environment, and it\u0026rsquo;s available in steps(trigger, verify). trigger:# trigger with specified mapped port, the resource name replace all `/` or `-` as `_`# host format: \u0026lt;resource_name\u0026gt;_host# port format: \u0026lt;resource_name\u0026gt;_\u0026lt;container_port\u0026gt;url:http://${pod_foo_host}:${pod_foo_8080}/  Log The console output of each pod could be found in ${workDir}/logs/${namespace}/${podName}.log.\nCompose setup:env:composefile:path/to/compose.yaml # Specified docker-compose file pathtimeout:20m # Timeout durationinit-system-environment:path/to/env # Import environment filesteps:# Customize steps for prepare the environment- name:customize setups # Step namecommand:command lines # Use command line to setup The docker-compose environment follow these steps:\n Import init-system-environment file for help build service and execute steps. Each line of the file content is an environment variable, and the key value is separate by \u0026ldquo;=\u0026rdquo;. Start the docker-compose services. Check the services' healthiness. Wait until all services are ready according to the interval, etc. Execute command to set up the testing environment or help verify.  Service Export If you want to get the service host and port mapping, should follow these steps:\n declare the port in the docker-compose service ports config. oap:image:xx.xx:1.0.0ports:# define the port- 8080 Follow this format to get the host and port mapping by the environment, and it\u0026rsquo;s available in steps(trigger, verify). trigger:# trigger with specified mappinged porturl:http://${oap_host}:${oap_8080}/  Log The console output of each service could be found in ${workDir}/logs/{serviceName}/std.log.\nTrigger After the Setup step is finished, use the Trigger step to generate traffic.\ntrigger:action:http # The action of the trigger. support HTTP invoke.interval:3s # Trigger the action every 3 seconds.times:5# The retry count before the request success.url:http://apache.skywalking.com/# Http trigger url link.method:GET # Http trigger method.headers:\u0026#34;Content-Type\u0026#34;: \u0026#34;application/json\u0026#34;\u0026#34;Authorization\u0026#34;: \u0026#34;Basic whatever\u0026#34;body:\u0026#39;{\u0026#34;k1\u0026#34;:\u0026#34;v1\u0026#34;, \u0026#34;k2\u0026#34;:\u0026#34;v2\u0026#34;}\u0026#39;The Trigger executed successfully at least once, after success, the next stage could be continued. Otherwise, there is an error and exit.\nVerify After the Trigger step is finished, running test cases.\nverify:retry:# verify with retry strategycount:10# max retry countinterval:10s # the interval between two attempts, e.g. 10s, 1m.fail-fast:true# when a case fails, whether to stop verifying other cases. This property defaults to true.concurrency:false# whether to verify cases concurrently. This property defaults to false.cases:# verify test cases- actual:path/to/actual.yaml # verify by actual file pathexpected:path/to/expected.yaml # excepted content file path- query:echo \u0026#39;foo\u0026#39; # verify by command execute outputexpected:path/to/expected.yaml # excepted content file path- includes:# including cases- path/to/cases.yaml # cases file pathThe test cases are executed in the order of declaration from top to bottom. When the execution of a case fails and the retry strategy is exceeded, it will stop verifying other cases if fail-fast is true. Otherwise, the process will continue to verify other cases.\nRetry strategy The retry strategy could retry automatically on the test case failure, and restart by the failed test case.\nCase source Support two kind source to verify, one case only supports one kind source type:\n source file: verify by generated yaml format file. command: use command line output as they need to verify content, also only support yaml format.  Excepted verify template After clarifying the content that needs to be verified, you need to write content to verify the real content and ensure that the data is correct.\nYou need to use the form of Go Template to write the verification file, and the data content to be rendered comes from the real data. By verifying whether the rendered data is consistent with the real data, it is verified whether the content is consistent. You could see many test cases in this directory.\nWe use go-cmp to show the parts where excepted do not match the actual data. - prefix represents the expected data content, + prefix represents the actual data content.\nWe have done a lot of extension functions for verification functions on the original Go Template.\nExtension functions Extension functions are used to help users quickly locate the problem content and write test cases that are easier to use.\nBasic Matches Verify that the number fits the range.\n   Function Description Grammar Verify success Verify failure     gt Verify the first param is greater than second param {{gt param1 param2}} param1 \u0026lt;wanted gt $param2, but was $param1\u0026gt;   ge Verify the first param is greater than or equals second param {{ge param1 param2}} param1 \u0026lt;wanted gt $param2, but was $param1\u0026gt;   lt Verify the first param is less than second param {{lt param1 param2}} param1 \u0026lt;wanted gt $param2, but was $param1\u0026gt;   le Verify the first param is less than or equals second param {{le param1 param2}} param1 \u0026lt;wanted gt $param2, but was $param1\u0026gt;   regexp Verify the first param matches the second regular expression {{regexp param1 param2}} param1 \u0026lt;\u0026quot;$param1\u0026quot; does not match the pattern $param2\u0026quot;\u0026gt;   notEmpty Verify The param is not empty {{notEmpty param}} param \u0026lt;\u0026quot;\u0026quot; is empty, wanted is not empty\u0026gt;   hasPrefix Verify The string param has the same prefix. {{hasPrefix param1 param2}} true false   hasSuffix Verify The string param has the same suffix. {{hasSuffix param1 param2}} true false    List Matches Verify the data in the condition list, Currently, it is only supported when all the conditions in the list are executed, it is considered as successful.\nHere is an example, It\u0026rsquo;s means the list values must have value is greater than 0, also have value greater than 1, Otherwise verify is failure.\n{{- contains .list }}- key:{{gt .value 0 }}- key:{{gt .value 1 }}{{- end }}Encoding In order to make the program easier for users to read and use, some code conversions are provided.\n   Function Description Grammar Result     b64enc Base64 encode {{ b64enc \u0026ldquo;Foo\u0026rdquo; }} Zm9v   sha256enc Sha256 encode {{ sha256enc \u0026ldquo;Foo\u0026rdquo; }} 1cbec737f863e4922cee63cc2ebbfaafcd1cff8b790d8cfd2e6a5d550b648afa   sha512enc Sha512 encode {{ sha512enc \u0026ldquo;Foo\u0026rdquo; }} 4abcd2639957cb23e33f63d70659b602a5923fafcfd2768ef79b0badea637e5c837161aa101a557a1d4deacbd912189e2bb11bf3c0c0c70ef7797217da7e8207    Reuse cases You could include multiple cases into one single E2E verify, It\u0026rsquo;s helpful for reusing the same verify cases.\nHere is the reused verify cases, and using includes configuration item to include this into E2E config.\ncases:- actual:path/to/actual.yaml # verify by actual file pathexpected:path/to/expected.yaml # excepted content file path- query:echo \u0026#39;foo\u0026#39; # verify by command execute outputexpected:path/to/expected.yaml # excepted content file pathCleanup After the E2E finished, how to clean up the environment.\ncleanup:on:always # Clean up strategyIf the on option under cleanup is not set, it will be automatically set to always if there is environment variable CI=true, which is present on many popular CI services, such as GitHub Actions, CircleCI, etc., otherwise it will be set to success, so the testing environment can be preserved when tests failed in your local machine.\nAll available strategies:\n always: No matter the execution result is success or failure, cleanup will be performed. success: Only when the execution succeeds. failure: Only when the execution failed. never: Never clean up the environment.  ","excerpt":"SkyWalking Infra E2E Configuration Guide The configuration file is used to integrate all the step …","ref":"/docs/skywalking-infra-e2e/latest/en/setup/configuration-file/","title":"SkyWalking Infra E2E Configuration Guide"},{"body":"SkyWalking Infra E2E Configuration Guide The configuration file is used to integrate all the step configuration content. You can see the sample configuration files for different environments in the examples directory.\nThere is a quick view about the configuration file, and using the yaml format.\nsetup:# set up the environmentcleanup:# clean up the environmenttrigger:# generate trafficverify:# test casesSetup Support two kinds of the environment to set up the system.\nKinD setup:env:kindfile:path/to/kind.yaml # Specified kinD manifest file pathkubeconfig:path/.kube/config # The path of kubeconfigtimeout:20m # timeout durationinit-system-environment:path/to/env # Import environment filesteps:# customize steps for prepare the environment- name:customize setups # step name# one of command line or kinD manifest filecommand:command lines # use command line to setup path:/path/to/manifest.yaml # the manifest file pathwait:# how to verify the manifest is set up finish- namespace:# The pod namespaceresource:# The pod resource namelabel-selector:# The resource label selectorfor:# The wait conditionkind:import-images:# import docker images to KinD- image:version # support using env to expand image, such as `${env_key}` or `$env_key`expose-ports:# Expose resource for host access- namespace:# The resource namespaceresource:# The resource name, such as `pod/foo` or `service/foo`port:# Want to expose port from resource NOTE: The fields file and kubeconfig are mutually exclusive.\n The KinD environment follow these steps:\n [optional]Start the KinD cluster according to the config file, expose KUBECONFIG to environment for help execute kubectl in the next steps. [optional]Setup the kubeconfig field for help execute kubectl in the next steps. Load docker images from kind.import-images if needed. Apply the resources files (--manifests) or/and run the custom init command (--commands) by steps. Wait until all steps are finished and all services are ready with the timeout(second). Expose all resource ports for host access.  Import docker image If you want to import docker image from private registries, there are several ways to do this:\n Using imagePullSecrets to pull images, please take reference from document. Using kind.import-images to load images from host. kind:import-images:- skywalking/oap:${OAP_HASH}# support using environment to expand the image name  Resource Export If you want to access the resource from host, should follow these steps:\n Declare which resource and ports need to be accessible from host. setup:kind:expose-ports:- namespace:default # Need to expose resource namespaceresource:pod/foo # Resource description, such as `pod/foo` or `service/foo`port:8080# Resource port want to expose, support `\u0026lt;resource_port\u0026gt;`, `\u0026lt;bind_to_host_port\u0026gt;:\u0026lt;resource_port\u0026gt;` Follow this format to get the host and port mapping by the environment, and it\u0026rsquo;s available in steps(trigger, verify). trigger:# trigger with specified mapped port, the resource name replace all `/` or `-` as `_`# host format: \u0026lt;resource_name\u0026gt;_host# port format: \u0026lt;resource_name\u0026gt;_\u0026lt;container_port\u0026gt;url:http://${pod_foo_host}:${pod_foo_8080}/  Log The console output of each pod could be found in ${workDir}/logs/${namespace}/${podName}.log.\nCompose setup:env:composefile:path/to/compose.yaml # Specified docker-compose file pathtimeout:20m # Timeout durationinit-system-environment:path/to/env # Import environment filesteps:# Customize steps for prepare the environment- name:customize setups # Step namecommand:command lines # Use command line to setup The docker-compose environment follow these steps:\n Import init-system-environment file for help build service and execute steps. Each line of the file content is an environment variable, and the key value is separate by \u0026ldquo;=\u0026rdquo;. Start the docker-compose services. Check the services' healthiness. Wait until all services are ready according to the interval, etc. Execute command to set up the testing environment or help verify.  Service Export If you want to get the service host and port mapping, should follow these steps:\n declare the port in the docker-compose service ports config. oap:image:xx.xx:1.0.0ports:# define the port- 8080 Follow this format to get the host and port mapping by the environment, and it\u0026rsquo;s available in steps(trigger, verify). trigger:# trigger with specified mappinged porturl:http://${oap_host}:${oap_8080}/  Log The console output of each service could be found in ${workDir}/logs/{serviceName}/std.log.\nTrigger After the Setup step is finished, use the Trigger step to generate traffic.\ntrigger:action:http # The action of the trigger. support HTTP invoke.interval:3s # Trigger the action every 3 seconds.times:5# The retry count before the request success.url:http://apache.skywalking.com/# Http trigger url link.method:GET # Http trigger method.headers:\u0026#34;Content-Type\u0026#34;: \u0026#34;application/json\u0026#34;\u0026#34;Authorization\u0026#34;: \u0026#34;Basic whatever\u0026#34;body:\u0026#39;{\u0026#34;k1\u0026#34;:\u0026#34;v1\u0026#34;, \u0026#34;k2\u0026#34;:\u0026#34;v2\u0026#34;}\u0026#39;The Trigger executed successfully at least once, after success, the next stage could be continued. Otherwise, there is an error and exit.\nVerify After the Trigger step is finished, running test cases.\nverify:retry:# verify with retry strategycount:10# max retry countinterval:10s # the interval between two attempts, e.g. 10s, 1m.fail-fast:true# when a case fails, whether to stop verifying other cases. This property defaults to true.concurrency:false# whether to verify cases concurrently. This property defaults to false.cases:# verify test cases- actual:path/to/actual.yaml # verify by actual file pathexpected:path/to/expected.yaml # excepted content file path- query:echo \u0026#39;foo\u0026#39; # verify by command execute outputexpected:path/to/expected.yaml # excepted content file path- includes:# including cases- path/to/cases.yaml # cases file pathThe test cases are executed in the order of declaration from top to bottom. When the execution of a case fails and the retry strategy is exceeded, it will stop verifying other cases if fail-fast is true. Otherwise, the process will continue to verify other cases.\nRetry strategy The retry strategy could retry automatically on the test case failure, and restart by the failed test case.\nCase source Support two kind source to verify, one case only supports one kind source type:\n source file: verify by generated yaml format file. command: use command line output as they need to verify content, also only support yaml format.  Excepted verify template After clarifying the content that needs to be verified, you need to write content to verify the real content and ensure that the data is correct.\nYou need to use the form of Go Template to write the verification file, and the data content to be rendered comes from the real data. By verifying whether the rendered data is consistent with the real data, it is verified whether the content is consistent. You could see many test cases in this directory.\nWe use go-cmp to show the parts where excepted do not match the actual data. - prefix represents the expected data content, + prefix represents the actual data content.\nWe have done a lot of extension functions for verification functions on the original Go Template.\nExtension functions Extension functions are used to help users quickly locate the problem content and write test cases that are easier to use.\nBasic Matches Verify that the number fits the range.\n   Function Description Grammar Verify success Verify failure     gt Verify the first param is greater than second param {{gt param1 param2}} param1 \u0026lt;wanted gt $param2, but was $param1\u0026gt;   ge Verify the first param is greater than or equals second param {{ge param1 param2}} param1 \u0026lt;wanted gt $param2, but was $param1\u0026gt;   lt Verify the first param is less than second param {{lt param1 param2}} param1 \u0026lt;wanted gt $param2, but was $param1\u0026gt;   le Verify the first param is less than or equals second param {{le param1 param2}} param1 \u0026lt;wanted gt $param2, but was $param1\u0026gt;   regexp Verify the first param matches the second regular expression {{regexp param1 param2}} param1 \u0026lt;\u0026quot;$param1\u0026quot; does not match the pattern $param2\u0026quot;\u0026gt;   notEmpty Verify The param is not empty {{notEmpty param}} param \u0026lt;\u0026quot;\u0026quot; is empty, wanted is not empty\u0026gt;   hasPrefix Verify The string param has the same prefix. {{hasPrefix param1 param2}} true false   hasSuffix Verify The string param has the same suffix. {{hasSuffix param1 param2}} true false    List Matches Verify the data in the condition list, Currently, it is only supported when all the conditions in the list are executed, it is considered as successful.\nHere is an example, It\u0026rsquo;s means the list values must have value is greater than 0, also have value greater than 1, Otherwise verify is failure.\n{{- contains .list }}- key:{{gt .value 0 }}- key:{{gt .value 1 }}{{- end }}Encoding In order to make the program easier for users to read and use, some code conversions are provided.\n   Function Description Grammar Result     b64enc Base64 encode {{ b64enc \u0026ldquo;Foo\u0026rdquo; }} Zm9v   sha256enc Sha256 encode {{ sha256enc \u0026ldquo;Foo\u0026rdquo; }} 1cbec737f863e4922cee63cc2ebbfaafcd1cff8b790d8cfd2e6a5d550b648afa   sha512enc Sha512 encode {{ sha512enc \u0026ldquo;Foo\u0026rdquo; }} 4abcd2639957cb23e33f63d70659b602a5923fafcfd2768ef79b0badea637e5c837161aa101a557a1d4deacbd912189e2bb11bf3c0c0c70ef7797217da7e8207    Reuse cases You could include multiple cases into one single E2E verify, It\u0026rsquo;s helpful for reusing the same verify cases.\nHere is the reused verify cases, and using includes configuration item to include this into E2E config.\ncases:- actual:path/to/actual.yaml # verify by actual file pathexpected:path/to/expected.yaml # excepted content file path- query:echo \u0026#39;foo\u0026#39; # verify by command execute outputexpected:path/to/expected.yaml # excepted content file pathCleanup After the E2E finished, how to clean up the environment.\ncleanup:on:always # Clean up strategyIf the on option under cleanup is not set, it will be automatically set to always if there is environment variable CI=true, which is present on many popular CI services, such as GitHub Actions, CircleCI, etc., otherwise it will be set to success, so the testing environment can be preserved when tests failed in your local machine.\nAll available strategies:\n always: No matter the execution result is success or failure, cleanup will be performed. success: Only when the execution succeeds. failure: Only when the execution failed. never: Never clean up the environment.  ","excerpt":"SkyWalking Infra E2E Configuration Guide The configuration file is used to integrate all the step …","ref":"/docs/skywalking-infra-e2e/next/en/setup/configuration-file/","title":"SkyWalking Infra E2E Configuration Guide"},{"body":"SkyWalking Infra E2E Configuration Guide The configuration file is used to integrate all the step configuration content. You can see the sample configuration files for different environments in the examples directory.\nThere is a quick view about the configuration file, and using the yaml format.\nsetup:# set up the environmentcleanup:# clean up the environmenttrigger:# generate trafficverify:# test casesSetup Support two kinds of the environment to set up the system.\nKinD setup:env:kindfile:path/to/kind.yaml # Specified kinD manifest file pathkubeconfig:path/.kube/config # The path of kubeconfigtimeout:20m # timeout durationinit-system-environment:path/to/env # Import environment filesteps:# customize steps for prepare the environment- name:customize setups # step name# one of command line or kinD manifest filecommand:command lines # use command line to setup path:/path/to/manifest.yaml # the manifest file pathwait:# how to verify the manifest is set up finish- namespace:# The pod namespaceresource:# The pod resource namelabel-selector:# The resource label selectorfor:# The wait conditionkind:import-images:# import docker images to KinD- image:version # support using env to expand image, such as `${env_key}` or `$env_key`expose-ports:# Expose resource for host access- namespace:# The resource namespaceresource:# The resource name, such as `pod/foo` or `service/foo`port:# Want to expose port from resource NOTE: The fields file and kubeconfig are mutually exclusive.\n The KinD environment follow these steps:\n [optional]Start the KinD cluster according to the config file, expose KUBECONFIG to environment for help execute kubectl in the next steps. [optional]Setup the kubeconfig field for help execute kubectl in the next steps. Load docker images from kind.import-images if needed. Apply the resources files (--manifests) or/and run the custom init command (--commands) by steps. Wait until all steps are finished and all services are ready with the timeout(second). Expose all resource ports for host access.  Import docker image If you want to import docker image from private registries, there are several ways to do this:\n Using imagePullSecrets to pull images, please take reference from document. Using kind.import-images to load images from host. kind:import-images:- skywalking/oap:${OAP_HASH}# support using environment to expand the image name  Resource Export If you want to access the resource from host, should follow these steps:\n Declare which resource and ports need to be accessible from host. setup:kind:expose-ports:- namespace:default # Need to expose resource namespaceresource:pod/foo # Resource description, such as `pod/foo` or `service/foo`port:8080# Resource port want to expose, support `\u0026lt;resource_port\u0026gt;`, `\u0026lt;bind_to_host_port\u0026gt;:\u0026lt;resource_port\u0026gt;` Follow this format to get the host and port mapping by the environment, and it\u0026rsquo;s available in steps(trigger, verify). trigger:# trigger with specified mapped port, the resource name replace all `/` or `-` as `_`# host format: \u0026lt;resource_name\u0026gt;_host# port format: \u0026lt;resource_name\u0026gt;_\u0026lt;container_port\u0026gt;url:http://${pod_foo_host}:${pod_foo_8080}/  Log The console output of each pod could be found in ${workDir}/logs/${namespace}/${podName}.log.\nCompose setup:env:composefile:path/to/compose.yaml # Specified docker-compose file pathtimeout:20m # Timeout durationinit-system-environment:path/to/env # Import environment filesteps:# Customize steps for prepare the environment- name:customize setups # Step namecommand:command lines # Use command line to setup The docker-compose environment follow these steps:\n Import init-system-environment file for help build service and execute steps. Each line of the file content is an environment variable, and the key value is separate by \u0026ldquo;=\u0026rdquo;. Start the docker-compose services. Check the services' healthiness. Wait until all services are ready according to the interval, etc. Execute command to set up the testing environment or help verify.  Service Export If you want to get the service host and port mapping, should follow these steps:\n declare the port in the docker-compose service ports config. oap:image:xx.xx:1.0.0ports:# define the port- 8080 Follow this format to get the host and port mapping by the environment, and it\u0026rsquo;s available in steps(trigger, verify). trigger:# trigger with specified mappinged porturl:http://${oap_host}:${oap_8080}/  Log The console output of each service could be found in ${workDir}/logs/{serviceName}/std.log.\nTrigger After the Setup step is finished, use the Trigger step to generate traffic.\ntrigger:action:http # The action of the trigger. support HTTP invoke.interval:3s # Trigger the action every 3 seconds.times:5# The retry count before the request success.url:http://apache.skywalking.com/# Http trigger url link.method:GET # Http trigger method.headers:\u0026#34;Content-Type\u0026#34;: \u0026#34;application/json\u0026#34;\u0026#34;Authorization\u0026#34;: \u0026#34;Basic whatever\u0026#34;body:\u0026#39;{\u0026#34;k1\u0026#34;:\u0026#34;v1\u0026#34;, \u0026#34;k2\u0026#34;:\u0026#34;v2\u0026#34;}\u0026#39;The Trigger executed successfully at least once, after success, the next stage could be continued. Otherwise, there is an error and exit.\nVerify After the Trigger step is finished, running test cases.\nverify:retry:# verify with retry strategycount:10# max retry countinterval:10s # the interval between two attempts, e.g. 10s, 1m.fail-fast:true# when a case fails, whether to stop verifying other cases. This property defaults to true.concurrency:false# whether to verify cases concurrently. This property defaults to false.cases:# verify test cases- actual:path/to/actual.yaml # verify by actual file pathexpected:path/to/expected.yaml # excepted content file path- query:echo \u0026#39;foo\u0026#39; # verify by command execute outputexpected:path/to/expected.yaml # excepted content file path- includes:# including cases- path/to/cases.yaml # cases file pathThe test cases are executed in the order of declaration from top to bottom. When the execution of a case fails and the retry strategy is exceeded, it will stop verifying other cases if fail-fast is true. Otherwise, the process will continue to verify other cases.\nRetry strategy The retry strategy could retry automatically on the test case failure, and restart by the failed test case.\nCase source Support two kind source to verify, one case only supports one kind source type:\n source file: verify by generated yaml format file. command: use command line output as they need to verify content, also only support yaml format.  Excepted verify template After clarifying the content that needs to be verified, you need to write content to verify the real content and ensure that the data is correct.\nYou need to use the form of Go Template to write the verification file, and the data content to be rendered comes from the real data. By verifying whether the rendered data is consistent with the real data, it is verified whether the content is consistent. You could see many test cases in this directory.\nWe use go-cmp to show the parts where excepted do not match the actual data. - prefix represents the expected data content, + prefix represents the actual data content.\nWe have done a lot of extension functions for verification functions on the original Go Template.\nExtension functions Extension functions are used to help users quickly locate the problem content and write test cases that are easier to use.\nBasic Matches Verify that the number fits the range.\n   Function Description Grammar Verify success Verify failure     gt Verify the first param is greater than second param {{gt param1 param2}} param1 \u0026lt;wanted gt $param2, but was $param1\u0026gt;   ge Verify the first param is greater than or equals second param {{ge param1 param2}} param1 \u0026lt;wanted gt $param2, but was $param1\u0026gt;   lt Verify the first param is less than second param {{lt param1 param2}} param1 \u0026lt;wanted gt $param2, but was $param1\u0026gt;   le Verify the first param is less than or equals second param {{le param1 param2}} param1 \u0026lt;wanted gt $param2, but was $param1\u0026gt;   regexp Verify the first param matches the second regular expression {{regexp param1 param2}} param1 \u0026lt;\u0026quot;$param1\u0026quot; does not match the pattern $param2\u0026quot;\u0026gt;   notEmpty Verify The param is not empty {{notEmpty param}} param \u0026lt;\u0026quot;\u0026quot; is empty, wanted is not empty\u0026gt;   hasPrefix Verify The string param has the same prefix. {{hasPrefix param1 param2}} true false   hasSuffix Verify The string param has the same suffix. {{hasSuffix param1 param2}} true false    List Matches Verify the data in the condition list, Currently, it is only supported when all the conditions in the list are executed, it is considered as successful.\nHere is an example, It\u0026rsquo;s means the list values must have value is greater than 0, also have value greater than 1, Otherwise verify is failure.\n{{- contains .list }}- key:{{gt .value 0 }}- key:{{gt .value 1 }}{{- end }}Encoding In order to make the program easier for users to read and use, some code conversions are provided.\n   Function Description Grammar Result     b64enc Base64 encode {{ b64enc \u0026ldquo;Foo\u0026rdquo; }} Zm9v   sha256enc Sha256 encode {{ sha256enc \u0026ldquo;Foo\u0026rdquo; }} 1cbec737f863e4922cee63cc2ebbfaafcd1cff8b790d8cfd2e6a5d550b648afa   sha512enc Sha512 encode {{ sha512enc \u0026ldquo;Foo\u0026rdquo; }} 4abcd2639957cb23e33f63d70659b602a5923fafcfd2768ef79b0badea637e5c837161aa101a557a1d4deacbd912189e2bb11bf3c0c0c70ef7797217da7e8207    Reuse cases You could include multiple cases into one single E2E verify, It\u0026rsquo;s helpful for reusing the same verify cases.\nHere is the reused verify cases, and using includes configuration item to include this into E2E config.\ncases:- actual:path/to/actual.yaml # verify by actual file pathexpected:path/to/expected.yaml # excepted content file path- query:echo \u0026#39;foo\u0026#39; # verify by command execute outputexpected:path/to/expected.yaml # excepted content file pathCleanup After the E2E finished, how to clean up the environment.\ncleanup:on:always # Clean up strategyIf the on option under cleanup is not set, it will be automatically set to always if there is environment variable CI=true, which is present on many popular CI services, such as GitHub Actions, CircleCI, etc., otherwise it will be set to success, so the testing environment can be preserved when tests failed in your local machine.\nAll available strategies:\n always: No matter the execution result is success or failure, cleanup will be performed. success: Only when the execution succeeds. failure: Only when the execution failed. never: Never clean up the environment.  ","excerpt":"SkyWalking Infra E2E Configuration Guide The configuration file is used to integrate all the step …","ref":"/docs/skywalking-infra-e2e/v1.3.0/en/setup/configuration-file/","title":"SkyWalking Infra E2E Configuration Guide"},{"body":"SkyWalking Infra E2E Execute Guide There are two ways to perform E2E Testing:\n Command: Suitable for local debugging and operation. GitHub Action: Suitable for automated execution in GitHub projects.  Command Through commands, you can execute a complete Controller.\n# e2e.yaml configuration file in current directory e2e run # or  # Specified the e2e.yaml file path e2e run -c /path/to/the/test/e2e.yaml Also, could run the separate step in the command line, these commands are all done by reading the configuration.\ne2e setup e2e trigger e2e verify e2e cleanup GitHub Action To use skywalking-infra-e2e in GitHub Actions, add a step in your GitHub workflow.\nThe working directory could be uploaded to GitHub Action Artifact after the task is completed, which contains environment variables and container logs in the environment.\n- name:Run E2E Testuses:apache/skywalking-infra-e2e@main # always prefer to use a revision instead of `main`.with:e2e-file:e2e.yaml # (required)need to run E2E file pathlog-dir:/path/to/log/dir # (Optional)Use `\u0026lt;work_dir\u0026gt;/logs/\u0026lt;job_name\u0026gt;_\u0026lt;matrix_value\u0026gt;`(if have GHA matrix) or `\u0026lt;work_dir\u0026gt;/logs/\u0026lt;job_name\u0026gt;` in GHA, and output logs into `\u0026lt;work_dir\u0026gt;/logs` out of GHA env, such as running locally.If you want to upload the log directory to the GitHub Action Artifact when this E2E test failure, you could define the below content in your GitHub Action Job.\n- name:Upload E2E Loguses:actions/upload-artifact@v2if:${{ failure() }} # Only upload the artifact when E2E testing failurewith:name:e2e-logpath:\u0026#34;${{ env.SW_INFRA_E2E_LOG_DIR }}\u0026#34;# The SkyWalking Infra E2E action sets SW_INFRA_E2E_LOG_DIR automatically. ","excerpt":"SkyWalking Infra E2E Execute Guide There are two ways to perform E2E Testing:\n Command: Suitable for …","ref":"/docs/skywalking-infra-e2e/latest/en/setup/run-e2e-tests/","title":"SkyWalking Infra E2E Execute Guide"},{"body":"SkyWalking Infra E2E Execute Guide There are two ways to perform E2E Testing:\n Command: Suitable for local debugging and operation. GitHub Action: Suitable for automated execution in GitHub projects.  Command Through commands, you can execute a complete Controller.\n# e2e.yaml configuration file in current directory e2e run # or  # Specified the e2e.yaml file path e2e run -c /path/to/the/test/e2e.yaml Also, could run the separate step in the command line, these commands are all done by reading the configuration.\ne2e setup e2e trigger e2e verify e2e cleanup GitHub Action To use skywalking-infra-e2e in GitHub Actions, add a step in your GitHub workflow.\nThe working directory could be uploaded to GitHub Action Artifact after the task is completed, which contains environment variables and container logs in the environment.\n- name:Run E2E Testuses:apache/skywalking-infra-e2e@main # always prefer to use a revision instead of `main`.with:e2e-file:e2e.yaml # (required)need to run E2E file pathlog-dir:/path/to/log/dir # (Optional)Use `\u0026lt;work_dir\u0026gt;/logs/\u0026lt;job_name\u0026gt;_\u0026lt;matrix_value\u0026gt;`(if have GHA matrix) or `\u0026lt;work_dir\u0026gt;/logs/\u0026lt;job_name\u0026gt;` in GHA, and output logs into `\u0026lt;work_dir\u0026gt;/logs` out of GHA env, such as running locally.If you want to upload the log directory to the GitHub Action Artifact when this E2E test failure, you could define the below content in your GitHub Action Job.\n- name:Upload E2E Loguses:actions/upload-artifact@v2if:${{ failure() }} # Only upload the artifact when E2E testing failurewith:name:e2e-logpath:\u0026#34;${{ env.SW_INFRA_E2E_LOG_DIR }}\u0026#34;# The SkyWalking Infra E2E action sets SW_INFRA_E2E_LOG_DIR automatically. ","excerpt":"SkyWalking Infra E2E Execute Guide There are two ways to perform E2E Testing:\n Command: Suitable for …","ref":"/docs/skywalking-infra-e2e/next/en/setup/run-e2e-tests/","title":"SkyWalking Infra E2E Execute Guide"},{"body":"SkyWalking Infra E2E Execute Guide There are two ways to perform E2E Testing:\n Command: Suitable for local debugging and operation. GitHub Action: Suitable for automated execution in GitHub projects.  Command Through commands, you can execute a complete Controller.\n# e2e.yaml configuration file in current directory e2e run # or  # Specified the e2e.yaml file path e2e run -c /path/to/the/test/e2e.yaml Also, could run the separate step in the command line, these commands are all done by reading the configuration.\ne2e setup e2e trigger e2e verify e2e cleanup GitHub Action To use skywalking-infra-e2e in GitHub Actions, add a step in your GitHub workflow.\nThe working directory could be uploaded to GitHub Action Artifact after the task is completed, which contains environment variables and container logs in the environment.\n- name:Run E2E Testuses:apache/skywalking-infra-e2e@main # always prefer to use a revision instead of `main`.with:e2e-file:e2e.yaml # (required)need to run E2E file pathlog-dir:/path/to/log/dir # (Optional)Use `\u0026lt;work_dir\u0026gt;/logs/\u0026lt;job_name\u0026gt;_\u0026lt;matrix_value\u0026gt;`(if have GHA matrix) or `\u0026lt;work_dir\u0026gt;/logs/\u0026lt;job_name\u0026gt;` in GHA, and output logs into `\u0026lt;work_dir\u0026gt;/logs` out of GHA env, such as running locally.If you want to upload the log directory to the GitHub Action Artifact when this E2E test failure, you could define the below content in your GitHub Action Job.\n- name:Upload E2E Loguses:actions/upload-artifact@v2if:${{ failure() }} # Only upload the artifact when E2E testing failurewith:name:e2e-logpath:\u0026#34;${{ env.SW_INFRA_E2E_LOG_DIR }}\u0026#34;# The SkyWalking Infra E2E action sets SW_INFRA_E2E_LOG_DIR automatically. ","excerpt":"SkyWalking Infra E2E Execute Guide There are two ways to perform E2E Testing:\n Command: Suitable for …","ref":"/docs/skywalking-infra-e2e/v1.3.0/en/setup/run-e2e-tests/","title":"SkyWalking Infra E2E Execute Guide"},{"body":"SkyWalking Java Agent This is the official documentation of SkyWalking Java agent. Welcome to the SkyWalking community!\nThe Java Agent for Apache SkyWalking, which provides the native tracing/metrics/logging/event abilities for Java projects.\nIn here, you could learn how to set up Java agent for the Java Runtime Envrionment services.\n","excerpt":"SkyWalking Java Agent This is the official documentation of SkyWalking Java agent. Welcome to the …","ref":"/docs/skywalking-java/latest/readme/","title":"SkyWalking Java Agent"},{"body":"SkyWalking Java Agent This is the official documentation of SkyWalking Java agent. Welcome to the SkyWalking community!\nThe Java Agent for Apache SkyWalking, which provides the native tracing/metrics/logging/event abilities for Java projects.\nIn here, you could learn how to set up Java agent for the Java Runtime Envrionment services.\n","excerpt":"SkyWalking Java Agent This is the official documentation of SkyWalking Java agent. Welcome to the …","ref":"/docs/skywalking-java/next/readme/","title":"SkyWalking Java Agent"},{"body":"SkyWalking Java Agent This is the official documentation of SkyWalking Java agent. Welcome to the SkyWalking community!\nThe Java Agent for Apache SkyWalking, which provides the native tracing/metrics/logging/event abilities for Java projects.\nIn here, you could learn how to set up Java agent for the Java Runtime Envrionment services.\n","excerpt":"SkyWalking Java Agent This is the official documentation of SkyWalking Java agent. Welcome to the …","ref":"/docs/skywalking-java/v9.0.0/readme/","title":"SkyWalking Java Agent"},{"body":"SkyWalking Java Agent This is the official documentation of SkyWalking Java agent. Welcome to the SkyWalking community!\nThe Java Agent for Apache SkyWalking, which provides the native tracing/metrics/logging/event abilities for Java projects.\nIn here, you could learn how to set up Java agent for the Java Runtime Envrionment services.\n","excerpt":"SkyWalking Java Agent This is the official documentation of SkyWalking Java agent. Welcome to the …","ref":"/docs/skywalking-java/v9.1.0/readme/","title":"SkyWalking Java Agent"},{"body":"SkyWalking Java Agent This is the official documentation of SkyWalking Java agent. Welcome to the SkyWalking community!\nThe Java Agent for Apache SkyWalking, which provides the native tracing/metrics/logging/event abilities for Java projects.\nIn here, you could learn how to set up Java agent for the Java Runtime Envrionment services.\n","excerpt":"SkyWalking Java Agent This is the official documentation of SkyWalking Java agent. Welcome to the …","ref":"/docs/skywalking-java/v9.2.0/readme/","title":"SkyWalking Java Agent"},{"body":"Apache SkyWalking Java Agent Release Guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking in The Apache Way and start the voting process by reading this document.\nSet up your development environment Follow the steps in the Apache maven deployment environment document to set gpg tool and encrypt passwords.\nUse the following block as a template and place it in ~/.m2/settings.xml.\n\u0026lt;settings\u0026gt; ... \u0026lt;servers\u0026gt; \u0026lt;!-- To publish a snapshot of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.snapshots.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; \u0026lt;!-- To stage a release of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.releases.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; ... \u0026lt;/servers\u0026gt; \u0026lt;/settings\u0026gt; Add your GPG public key  Add your GPG public key into the SkyWalking GPG KEYS file. If you are a committer, use your Apache ID and password to log in this svn, and update the file. Don\u0026rsquo;t override the existing file. Upload your GPG public key to the public GPG site, such as MIT\u0026rsquo;s site. This site should be in the Apache maven staging repository checklist.  Test your settings This step is only for testing purpose. If your env is correctly set, you don\u0026rsquo;t need to check every time.\n./mvnw clean install(this will build artifacts, sources and sign) Prepare for the release ./mvnw release:clean ./mvnw release:prepare -DautoVersionSubmodules=true -Pall  Set version number as x.y.z, and tag as vx.y.z (The version tag must start with v. You will find out why this is necessary in the next step.)  You could do a GPG signature before preparing for the release. If you need to input the password to sign, and the maven doesn\u0026rsquo;t provide you with the opportunity to do so, this may lead to failure of the release. To resolve this, you may run gpg --sign xxx in any file. This will allow it to remember the password for long enough to prepare for the release.\nStage the release ./mvnw release:perform -DskipTests -Pall  The release will be automatically inserted into a temporary staging repository.  Build and sign the source code and binary package export RELEASE_VERSION=x.y.z (example: RELEASE_VERSION=5.0.0-alpha) cd tools/releasing bash create_release.sh This script takes care of the following things:\n Use v + RELEASE_VERSION as tag to clone the codes. Complete git submodule init/update. Exclude all unnecessary files in the target source tar, such as .git, .github, and .gitmodules. See the script for more details. Execute gpg and shasum 512 for source code tar. Use maven package to build the agent tar. Execute gpg and shasum 512 for binary tar.  apache-skywalking-java-agent-x.y.z-src.tgz and files ending with .asc and .sha512 may be found in the tools/releasing folder. apache-skywalking-java-agent-x.y.z.tgz and files ending with .asc and .sha512 may be found in the tools/releasing/apache-skywalking-java-agent-x.y.z folder.\nUpload to Apache svn  Use your Apache ID to log in to https://dist.apache.org/repos/dist/dev/skywalking/java-agent/. Create a folder and name it by the release version and round, such as: x.y.z Upload the source code package to the folder with files ending with .asc and .sha512. Upload the distribution package to the folder with files ending with .asc and .sha512.  Make the internal announcements Send an announcement mail in dev mail list.\nMail title: [ANNOUNCE] SkyWalking Java Agent x.y.z test build available Mail content: The test build of Java Agent x.y.z is available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking-java/blob/master/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/java-agent/xxxx * sha512 checksums Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking-java/ Release Tag : * (Git Tag) x.y.z Release CommitID : * https://github.com/apache/skywalking-java/tree/(Git Commit ID) * Git submodule * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : \u0026gt; ./mvnw clean package A vote regarding the quality of this test build will be initiated within the next couple of days. Wait for at least 48 hours for test responses Any PMC member, committer or contributor can test the release features and provide feedback. Based on that, the PMC will decide whether to start the voting process.\nCall a vote in dev Call a vote in dev@skywalking.apache.org\nMail title: [VOTE] Release Apache SkyWalking Java Agent version x.y.z Mail content: Hi All, This is a call for vote to release Apache SkyWalking Java Agent version x.y.z. Release notes: * https://github.com/apache/skywalking-java/blob/master/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/java-agent/xxxx * sha512 checksums Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) x.y.z Release CommitID : * https://github.com/apache/skywalking-java/tree/(Git Commit ID) * Git submodule * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : \u0026gt; ./mvnw clean package Voting will start now (xxxx date) and will remain open for at least 72 hours, Request all PMC members to give their vote. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Vote Check All PMC members and committers should check these before casting +1 votes.\n Features test. All artifacts in staging repository are published with .asc, .md5, and *sha1 files. Source code and distribution package (apache-skywalking-java-agent-x.y.z-src.tar.gz, apache-skywalking-java-agent-x.y.z.tar.gz) are found in https://dist.apache.org/repos/dist/dev/skywalking/java-agent/x.y.z with .asc and .sha512. LICENSE and NOTICE are in the source code and distribution package. Check shasum -c apache-skywalking-java-agent-x.y.z-src.tgz.sha512. Check gpg --verify apache-skywalking-java-agent-x.y.z-src.tgz.asc apache-skywalking-apm-x.y.z-src.tgz Build a distribution package from the source code package (apache-skywalking-java-agent-x.y.z-src.tar.gz). Check the Apache License Header. Run docker run --rm -v $(pwd):/github/workspace apache/skywalking-eyes header check. (No binaries in source codes)  The voting process is as follows:\n All PMC member votes are +1 binding, and all other votes are +1 but non-binding. If you obtain at least 3 (+1 binding) votes with more +1 than -1 votes within 72 hours, the release will be approved.  Publish the release  Move source codes tar and distribution packages to https://dist.apache.org/repos/dist/release/skywalking/java-agent/.  \u0026gt; export SVN_EDITOR=vim \u0026gt; svn mv https://dist.apache.org/repos/dist/dev/skywalking/java-agent/x.y.z https://dist.apache.org/repos/dist/release/skywalking/java-agent .... enter your apache password .... Release in the nexus staging repo. Public download source and distribution tar/zip are located in http://www.apache.org/dyn/closer.cgi/skywalking/java-agent/x.y.z/xxx. The Apache mirror path is the only release information that we publish. Public asc and sha512 are located in https://www.apache.org/dist/skywalking/java-agent/x.y.z/xxx. Public KEYS point to https://www.apache.org/dist/skywalking/KEYS. Update the website download page. http://skywalking.apache.org/downloads/ . Add a new download source, distribution, sha512, asc, and document links. The links can be found following rules (3) to (6) above. Add a release event on the website homepage and event page. Announce the public release with changelog or key features. Send ANNOUNCE email to dev@skywalking.apache.org, announce@apache.org. The sender should use the Apache email account.  Mail title: [ANNOUNCE] Apache SkyWalking Java Agent x.y.z released Mail content: Hi all, Apache SkyWalking Team is glad to announce the first release of Apache SkyWalking Java Agent x.y.z. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. The Java Agent for Apache SkyWalking, which provides the native tracing/metrics/logging abilities for Java projects. This release contains a number of new features, bug fixes and improvements compared to version a.b.c(last release). The notable changes since x.y.z include: (Highlight key changes) 1. ... 2. ... 3. ... Please refer to the change log for the complete list of changes: https://github.com/apache/skywalking-java/blob/master/changes/changes-x.y.z.md Apache SkyWalking website: http://skywalking.apache.org/ Downloads: http://skywalking.apache.org/downloads/ Twitter: https://twitter.com/AsfSkyWalking SkyWalking Resources: - GitHub: https://github.com/apache/skywalking-java - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Apache SkyWalking Team Release Docker images export SW_VERSION=x.y.z git clone --depth 1 --branch v$SW_VERSION https://github.com/apache/skywalking-java.git cd skywalking-java curl -O https://dist.apache.org/repos/dist/release/skywalking/java-agent/$SW_VERSION/apache-skywalking-java-agent-$SW_VERSION.tgz tar -xzvf apache-skywalking-java-agent-$SW_VERSION.tgz export NAME=skywalking-java-agent export HUB=apache export TAG=$SW_VERSION make docker.push.alpine docker.push.java8 docker.push.java11 docker.push.java17 docker.push.java21 Clean up the old releases Once the latest release has been published, you should clean up the old releases from the mirror system.\n Update the download links (source, dist, asc, and sha512) on the website to the archive repo (https://archive.apache.org/dist/skywalking). Remove previous releases from https://dist.apache.org/repos/dist/release/skywalking/java-agent.  ","excerpt":"Apache SkyWalking Java Agent Release Guide If you\u0026rsquo;re a committer, you can learn how to release …","ref":"/docs/skywalking-java/latest/en/contribution/release-java-agent/","title":"SkyWalking Java Agent Release Guide"},{"body":"Apache SkyWalking Java Agent Release Guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking in The Apache Way and start the voting process by reading this document.\nSet up your development environment Follow the steps in the Apache maven deployment environment document to set gpg tool and encrypt passwords.\nUse the following block as a template and place it in ~/.m2/settings.xml.\n\u0026lt;settings\u0026gt; ... \u0026lt;servers\u0026gt; \u0026lt;!-- To publish a snapshot of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.snapshots.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; \u0026lt;!-- To stage a release of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.releases.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; ... \u0026lt;/servers\u0026gt; \u0026lt;/settings\u0026gt; Add your GPG public key  Add your GPG public key into the SkyWalking GPG KEYS file. If you are a committer, use your Apache ID and password to log in this svn, and update the file. Don\u0026rsquo;t override the existing file. Upload your GPG public key to the public GPG site, such as MIT\u0026rsquo;s site. This site should be in the Apache maven staging repository checklist.  Test your settings This step is only for testing purpose. If your env is correctly set, you don\u0026rsquo;t need to check every time.\n./mvnw clean install(this will build artifacts, sources and sign) Prepare for the release ./mvnw release:clean ./mvnw release:prepare -DautoVersionSubmodules=true -Pall  Set version number as x.y.z, and tag as vx.y.z (The version tag must start with v. You will find out why this is necessary in the next step.)  You could do a GPG signature before preparing for the release. If you need to input the password to sign, and the maven doesn\u0026rsquo;t provide you with the opportunity to do so, this may lead to failure of the release. To resolve this, you may run gpg --sign xxx in any file. This will allow it to remember the password for long enough to prepare for the release.\nStage the release ./mvnw release:perform -DskipTests -Pall  The release will be automatically inserted into a temporary staging repository.  Build and sign the source code and binary package export RELEASE_VERSION=x.y.z (example: RELEASE_VERSION=5.0.0-alpha) cd tools/releasing bash create_release.sh This script takes care of the following things:\n Use v + RELEASE_VERSION as tag to clone the codes. Complete git submodule init/update. Exclude all unnecessary files in the target source tar, such as .git, .github, and .gitmodules. See the script for more details. Execute gpg and shasum 512 for source code tar. Use maven package to build the agent tar. Execute gpg and shasum 512 for binary tar.  apache-skywalking-java-agent-x.y.z-src.tgz and files ending with .asc and .sha512 may be found in the tools/releasing folder. apache-skywalking-java-agent-x.y.z.tgz and files ending with .asc and .sha512 may be found in the tools/releasing/apache-skywalking-java-agent-x.y.z folder.\nUpload to Apache svn  Use your Apache ID to log in to https://dist.apache.org/repos/dist/dev/skywalking/java-agent/. Create a folder and name it by the release version and round, such as: x.y.z Upload the source code package to the folder with files ending with .asc and .sha512. Upload the distribution package to the folder with files ending with .asc and .sha512.  Make the internal announcements Send an announcement mail in dev mail list.\nMail title: [ANNOUNCE] SkyWalking Java Agent x.y.z test build available Mail content: The test build of Java Agent x.y.z is available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking-java/blob/master/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/java-agent/xxxx * sha512 checksums Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking-java/ Release Tag : * (Git Tag) x.y.z Release CommitID : * https://github.com/apache/skywalking-java/tree/(Git Commit ID) * Git submodule * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : \u0026gt; ./mvnw clean package A vote regarding the quality of this test build will be initiated within the next couple of days. Wait for at least 48 hours for test responses Any PMC member, committer or contributor can test the release features and provide feedback. Based on that, the PMC will decide whether to start the voting process.\nCall a vote in dev Call a vote in dev@skywalking.apache.org\nMail title: [VOTE] Release Apache SkyWalking Java Agent version x.y.z Mail content: Hi All, This is a call for vote to release Apache SkyWalking Java Agent version x.y.z. Release notes: * https://github.com/apache/skywalking-java/blob/master/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/java-agent/xxxx * sha512 checksums Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) x.y.z Release CommitID : * https://github.com/apache/skywalking-java/tree/(Git Commit ID) * Git submodule * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : \u0026gt; ./mvnw clean package Voting will start now (xxxx date) and will remain open for at least 72 hours, Request all PMC members to give their vote. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Vote Check All PMC members and committers should check these before casting +1 votes.\n Features test. All artifacts in staging repository are published with .asc, .md5, and *sha1 files. Source code and distribution package (apache-skywalking-java-agent-x.y.z-src.tar.gz, apache-skywalking-java-agent-x.y.z.tar.gz) are found in https://dist.apache.org/repos/dist/dev/skywalking/java-agent/x.y.z with .asc and .sha512. LICENSE and NOTICE are in the source code and distribution package. Check shasum -c apache-skywalking-java-agent-x.y.z-src.tgz.sha512. Check gpg --verify apache-skywalking-java-agent-x.y.z-src.tgz.asc apache-skywalking-apm-x.y.z-src.tgz Build a distribution package from the source code package (apache-skywalking-java-agent-x.y.z-src.tar.gz). Check the Apache License Header. Run docker run --rm -v $(pwd):/github/workspace apache/skywalking-eyes header check. (No binaries in source codes)  The voting process is as follows:\n All PMC member votes are +1 binding, and all other votes are +1 but non-binding. If you obtain at least 3 (+1 binding) votes with more +1 than -1 votes within 72 hours, the release will be approved.  Publish the release  Move source codes tar and distribution packages to https://dist.apache.org/repos/dist/release/skywalking/java-agent/.  \u0026gt; export SVN_EDITOR=vim \u0026gt; svn mv https://dist.apache.org/repos/dist/dev/skywalking/java-agent/x.y.z https://dist.apache.org/repos/dist/release/skywalking/java-agent .... enter your apache password .... Release in the nexus staging repo. Public download source and distribution tar/zip are located in http://www.apache.org/dyn/closer.cgi/skywalking/java-agent/x.y.z/xxx. The Apache mirror path is the only release information that we publish. Public asc and sha512 are located in https://www.apache.org/dist/skywalking/java-agent/x.y.z/xxx. Public KEYS point to https://www.apache.org/dist/skywalking/KEYS. Update the website download page. http://skywalking.apache.org/downloads/ . Add a new download source, distribution, sha512, asc, and document links. The links can be found following rules (3) to (6) above. Add a release event on the website homepage and event page. Announce the public release with changelog or key features. Send ANNOUNCE email to dev@skywalking.apache.org, announce@apache.org. The sender should use the Apache email account.  Mail title: [ANNOUNCE] Apache SkyWalking Java Agent x.y.z released Mail content: Hi all, Apache SkyWalking Team is glad to announce the first release of Apache SkyWalking Java Agent x.y.z. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. The Java Agent for Apache SkyWalking, which provides the native tracing/metrics/logging abilities for Java projects. This release contains a number of new features, bug fixes and improvements compared to version a.b.c(last release). The notable changes since x.y.z include: (Highlight key changes) 1. ... 2. ... 3. ... Please refer to the change log for the complete list of changes: https://github.com/apache/skywalking-java/blob/master/changes/changes-x.y.z.md Apache SkyWalking website: http://skywalking.apache.org/ Downloads: http://skywalking.apache.org/downloads/ Twitter: https://twitter.com/AsfSkyWalking SkyWalking Resources: - GitHub: https://github.com/apache/skywalking-java - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Apache SkyWalking Team Release Docker images export SW_VERSION=x.y.z git clone --depth 1 --branch v$SW_VERSION https://github.com/apache/skywalking-java.git cd skywalking-java curl -O https://dist.apache.org/repos/dist/release/skywalking/java-agent/$SW_VERSION/apache-skywalking-java-agent-$SW_VERSION.tgz tar -xzvf apache-skywalking-java-agent-$SW_VERSION.tgz export NAME=skywalking-java-agent export HUB=apache export TAG=$SW_VERSION make docker.push.alpine docker.push.java8 docker.push.java11 docker.push.java17 docker.push.java21 Clean up the old releases Once the latest release has been published, you should clean up the old releases from the mirror system.\n Update the download links (source, dist, asc, and sha512) on the website to the archive repo (https://archive.apache.org/dist/skywalking). Remove previous releases from https://dist.apache.org/repos/dist/release/skywalking/java-agent.  ","excerpt":"Apache SkyWalking Java Agent Release Guide If you\u0026rsquo;re a committer, you can learn how to release …","ref":"/docs/skywalking-java/next/en/contribution/release-java-agent/","title":"SkyWalking Java Agent Release Guide"},{"body":"Apache SkyWalking Java Agent Release Guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking in The Apache Way and start the voting process by reading this document.\nSet up your development environment Follow the steps in the Apache maven deployment environment document to set gpg tool and encrypt passwords.\nUse the following block as a template and place it in ~/.m2/settings.xml.\n\u0026lt;settings\u0026gt; ... \u0026lt;servers\u0026gt; \u0026lt;!-- To publish a snapshot of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.snapshots.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; \u0026lt;!-- To stage a release of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.releases.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; ... \u0026lt;/servers\u0026gt; \u0026lt;/settings\u0026gt; Add your GPG public key  Add your GPG public key into the SkyWalking GPG KEYS file. If you are a committer, use your Apache ID and password to log in this svn, and update the file. Don\u0026rsquo;t override the existing file. Upload your GPG public key to the public GPG site, such as MIT\u0026rsquo;s site. This site should be in the Apache maven staging repository checklist.  Test your settings This step is only for testing purpose. If your env is correctly set, you don\u0026rsquo;t need to check every time.\n./mvnw clean install(this will build artifacts, sources and sign) Prepare for the release ./mvnw release:clean ./mvnw release:prepare -DautoVersionSubmodules=true -Pall  Set version number as x.y.z, and tag as vx.y.z (The version tag must start with v. You will find out why this is necessary in the next step.)  You could do a GPG signature before preparing for the release. If you need to input the password to sign, and the maven doesn\u0026rsquo;t provide you with the opportunity to do so, this may lead to failure of the release. To resolve this, you may run gpg --sign xxx in any file. This will allow it to remember the password for long enough to prepare for the release.\nStage the release ./mvnw release:perform -DskipTests -Pall  The release will be automatically inserted into a temporary staging repository.  Build and sign the source code and binary package export RELEASE_VERSION=x.y.z (example: RELEASE_VERSION=5.0.0-alpha) cd tools/releasing bash create_release.sh This script takes care of the following things:\n Use v + RELEASE_VERSION as tag to clone the codes. Complete git submodule init/update. Exclude all unnecessary files in the target source tar, such as .git, .github, and .gitmodules. See the script for more details. Execute gpg and shasum 512 for source code tar. Use maven package to build the agent tar. Execute gpg and shasum 512 for binary tar.  apache-skywalking-java-agent-x.y.z-src.tgz and files ending with .asc and .sha512 may be found in the tools/releasing folder. apache-skywalking-java-agent-x.y.z.tgz and files ending with .asc and .sha512 may be found in the tools/releasing/apache-skywalking-java-agent-x.y.z folder.\nUpload to Apache svn  Use your Apache ID to log in to https://dist.apache.org/repos/dist/dev/skywalking/java-agent/. Create a folder and name it by the release version and round, such as: x.y.z Upload the source code package to the folder with files ending with .asc and .sha512. Upload the distribution package to the folder with files ending with .asc and .sha512.  Make the internal announcements Send an announcement mail in dev mail list.\nMail title: [ANNOUNCE] SkyWalking Java Agent x.y.z test build available Mail content: The test build of Java Agent x.y.z is available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking-java/blob/master/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/java-agent/xxxx * sha512 checksums Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking-java/ Release Tag : * (Git Tag) x.y.z Release CommitID : * https://github.com/apache/skywalking-java/tree/(Git Commit ID) * Git submodule * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : \u0026gt; ./mvnw clean package A vote regarding the quality of this test build will be initiated within the next couple of days. Wait for at least 48 hours for test responses Any PMC member, committer or contributor can test the release features and provide feedback. Based on that, the PMC will decide whether to start the voting process.\nCall a vote in dev Call a vote in dev@skywalking.apache.org\nMail title: [VOTE] Release Apache SkyWalking Java Agent version x.y.z Mail content: Hi All, This is a call for vote to release Apache SkyWalking Java Agent version x.y.z. Release notes: * https://github.com/apache/skywalking-java/blob/master/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/java-agent/xxxx * sha512 checksums Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) x.y.z Release CommitID : * https://github.com/apache/skywalking-java/tree/(Git Commit ID) * Git submodule * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : \u0026gt; ./mvnw clean package Voting will start now (xxxx date) and will remain open for at least 72 hours, Request all PMC members to give their vote. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Vote Check All PMC members and committers should check these before casting +1 votes.\n Features test. All artifacts in staging repository are published with .asc, .md5, and *sha1 files. Source code and distribution package (apache-skywalking-java-agent-x.y.z-src.tar.gz, apache-skywalking-java-agent-x.y.z.tar.gz) are found in https://dist.apache.org/repos/dist/dev/skywalking/java-agent/x.y.z with .asc and .sha512. LICENSE and NOTICE are in the source code and distribution package. Check shasum -c apache-skywalking-java-agent-x.y.z-src.tgz.sha512. Check gpg --verify apache-skywalking-java-agent-x.y.z-src.tgz.asc apache-skywalking-apm-x.y.z-src.tgz Build a distribution package from the source code package (apache-skywalking-java-agent-x.y.z-src.tar.gz). Check the Apache License Header. Run docker run --rm -v $(pwd):/github/workspace apache/skywalking-eyes header check. (No binaries in source codes)  The voting process is as follows:\n All PMC member votes are +1 binding, and all other votes are +1 but non-binding. If you obtain at least 3 (+1 binding) votes with more +1 than -1 votes within 72 hours, the release will be approved.  Publish the release  Move source codes tar and distribution packages to https://dist.apache.org/repos/dist/release/skywalking/java-agent/.  \u0026gt; export SVN_EDITOR=vim \u0026gt; svn mv https://dist.apache.org/repos/dist/dev/skywalking/java-agent/x.y.z https://dist.apache.org/repos/dist/release/skywalking/java-agent .... enter your apache password .... Release in the nexus staging repo. Public download source and distribution tar/zip are located in http://www.apache.org/dyn/closer.cgi/skywalking/java-agent/x.y.z/xxx. The Apache mirror path is the only release information that we publish. Public asc and sha512 are located in https://www.apache.org/dist/skywalking/java-agent/x.y.z/xxx. Public KEYS point to https://www.apache.org/dist/skywalking/KEYS. Update the website download page. http://skywalking.apache.org/downloads/ . Add a new download source, distribution, sha512, asc, and document links. The links can be found following rules (3) to (6) above. Add a release event on the website homepage and event page. Announce the public release with changelog or key features. Send ANNOUNCE email to dev@skywalking.apache.org, announce@apache.org. The sender should use the Apache email account.  Mail title: [ANNOUNCE] Apache SkyWalking Java Agent x.y.z released Mail content: Hi all, Apache SkyWalking Team is glad to announce the first release of Apache SkyWalking Java Agent x.y.z. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. The Java Agent for Apache SkyWalking, which provides the native tracing/metrics/logging abilities for Java projects. This release contains a number of new features, bug fixes and improvements compared to version a.b.c(last release). The notable changes since x.y.z include: (Highlight key changes) 1. ... 2. ... 3. ... Please refer to the change log for the complete list of changes: https://github.com/apache/skywalking-java/blob/master/changes/changes-x.y.z.md Apache SkyWalking website: http://skywalking.apache.org/ Downloads: http://skywalking.apache.org/downloads/ Twitter: https://twitter.com/AsfSkyWalking SkyWalking Resources: - GitHub: https://github.com/apache/skywalking-java - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Apache SkyWalking Team Release Docker images export SW_VERSION=x.y.z git clone --depth 1 --branch v$SW_VERSION https://github.com/apache/skywalking-java.git cd skywalking-java curl -O https://dist.apache.org/repos/dist/release/skywalking/java-agent/$SW_VERSION/apache-skywalking-java-agent-$SW_VERSION.tgz tar -xzvf apache-skywalking-java-agent-$SW_VERSION.tgz export NAME=skywalking-java-agent export HUB=apache export TAG=$SW_VERSION make docker.push.alpine docker.push.java8 docker.push.java11 docker.push.java17 Clean up the old releases Once the latest release has been published, you should clean up the old releases from the mirror system.\n Update the download links (source, dist, asc, and sha512) on the website to the archive repo (https://archive.apache.org/dist/skywalking). Remove previous releases from https://dist.apache.org/repos/dist/release/skywalking/java-agent.  ","excerpt":"Apache SkyWalking Java Agent Release Guide If you\u0026rsquo;re a committer, you can learn how to release …","ref":"/docs/skywalking-java/v9.0.0/en/contribution/release-java-agent/","title":"SkyWalking Java Agent Release Guide"},{"body":"Apache SkyWalking Java Agent Release Guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking in The Apache Way and start the voting process by reading this document.\nSet up your development environment Follow the steps in the Apache maven deployment environment document to set gpg tool and encrypt passwords.\nUse the following block as a template and place it in ~/.m2/settings.xml.\n\u0026lt;settings\u0026gt; ... \u0026lt;servers\u0026gt; \u0026lt;!-- To publish a snapshot of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.snapshots.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; \u0026lt;!-- To stage a release of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.releases.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; ... \u0026lt;/servers\u0026gt; \u0026lt;/settings\u0026gt; Add your GPG public key  Add your GPG public key into the SkyWalking GPG KEYS file. If you are a committer, use your Apache ID and password to log in this svn, and update the file. Don\u0026rsquo;t override the existing file. Upload your GPG public key to the public GPG site, such as MIT\u0026rsquo;s site. This site should be in the Apache maven staging repository checklist.  Test your settings This step is only for testing purpose. If your env is correctly set, you don\u0026rsquo;t need to check every time.\n./mvnw clean install(this will build artifacts, sources and sign) Prepare for the release ./mvnw release:clean ./mvnw release:prepare -DautoVersionSubmodules=true -Pall  Set version number as x.y.z, and tag as vx.y.z (The version tag must start with v. You will find out why this is necessary in the next step.)  You could do a GPG signature before preparing for the release. If you need to input the password to sign, and the maven doesn\u0026rsquo;t provide you with the opportunity to do so, this may lead to failure of the release. To resolve this, you may run gpg --sign xxx in any file. This will allow it to remember the password for long enough to prepare for the release.\nStage the release ./mvnw release:perform -DskipTests -Pall  The release will be automatically inserted into a temporary staging repository.  Build and sign the source code and binary package export RELEASE_VERSION=x.y.z (example: RELEASE_VERSION=5.0.0-alpha) cd tools/releasing bash create_release.sh This script takes care of the following things:\n Use v + RELEASE_VERSION as tag to clone the codes. Complete git submodule init/update. Exclude all unnecessary files in the target source tar, such as .git, .github, and .gitmodules. See the script for more details. Execute gpg and shasum 512 for source code tar. Use maven package to build the agent tar. Execute gpg and shasum 512 for binary tar.  apache-skywalking-java-agent-x.y.z-src.tgz and files ending with .asc and .sha512 may be found in the tools/releasing folder. apache-skywalking-java-agent-x.y.z.tgz and files ending with .asc and .sha512 may be found in the tools/releasing/apache-skywalking-java-agent-x.y.z folder.\nUpload to Apache svn  Use your Apache ID to log in to https://dist.apache.org/repos/dist/dev/skywalking/java-agent/. Create a folder and name it by the release version and round, such as: x.y.z Upload the source code package to the folder with files ending with .asc and .sha512. Upload the distribution package to the folder with files ending with .asc and .sha512.  Make the internal announcements Send an announcement mail in dev mail list.\nMail title: [ANNOUNCE] SkyWalking Java Agent x.y.z test build available Mail content: The test build of Java Agent x.y.z is available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking-java/blob/master/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/java-agent/xxxx * sha512 checksums Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking-java/ Release Tag : * (Git Tag) x.y.z Release CommitID : * https://github.com/apache/skywalking-java/tree/(Git Commit ID) * Git submodule * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : \u0026gt; ./mvnw clean package A vote regarding the quality of this test build will be initiated within the next couple of days. Wait for at least 48 hours for test responses Any PMC member, committer or contributor can test the release features and provide feedback. Based on that, the PMC will decide whether to start the voting process.\nCall a vote in dev Call a vote in dev@skywalking.apache.org\nMail title: [VOTE] Release Apache SkyWalking Java Agent version x.y.z Mail content: Hi All, This is a call for vote to release Apache SkyWalking Java Agent version x.y.z. Release notes: * https://github.com/apache/skywalking-java/blob/master/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/java-agent/xxxx * sha512 checksums Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) x.y.z Release CommitID : * https://github.com/apache/skywalking-java/tree/(Git Commit ID) * Git submodule * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : \u0026gt; ./mvnw clean package Voting will start now (xxxx date) and will remain open for at least 72 hours, Request all PMC members to give their vote. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Vote Check All PMC members and committers should check these before casting +1 votes.\n Features test. All artifacts in staging repository are published with .asc, .md5, and *sha1 files. Source code and distribution package (apache-skywalking-java-agent-x.y.z-src.tar.gz, apache-skywalking-java-agent-x.y.z.tar.gz) are found in https://dist.apache.org/repos/dist/dev/skywalking/java-agent/x.y.z with .asc and .sha512. LICENSE and NOTICE are in the source code and distribution package. Check shasum -c apache-skywalking-java-agent-x.y.z-src.tgz.sha512. Check gpg --verify apache-skywalking-java-agent-x.y.z-src.tgz.asc apache-skywalking-apm-x.y.z-src.tgz Build a distribution package from the source code package (apache-skywalking-java-agent-x.y.z-src.tar.gz). Check the Apache License Header. Run docker run --rm -v $(pwd):/github/workspace apache/skywalking-eyes header check. (No binaries in source codes)  The voting process is as follows:\n All PMC member votes are +1 binding, and all other votes are +1 but non-binding. If you obtain at least 3 (+1 binding) votes with more +1 than -1 votes within 72 hours, the release will be approved.  Publish the release  Move source codes tar and distribution packages to https://dist.apache.org/repos/dist/release/skywalking/java-agent/.  \u0026gt; export SVN_EDITOR=vim \u0026gt; svn mv https://dist.apache.org/repos/dist/dev/skywalking/java-agent/x.y.z https://dist.apache.org/repos/dist/release/skywalking/java-agent .... enter your apache password .... Release in the nexus staging repo. Public download source and distribution tar/zip are located in http://www.apache.org/dyn/closer.cgi/skywalking/java-agent/x.y.z/xxx. The Apache mirror path is the only release information that we publish. Public asc and sha512 are located in https://www.apache.org/dist/skywalking/java-agent/x.y.z/xxx. Public KEYS point to https://www.apache.org/dist/skywalking/KEYS. Update the website download page. http://skywalking.apache.org/downloads/ . Add a new download source, distribution, sha512, asc, and document links. The links can be found following rules (3) to (6) above. Add a release event on the website homepage and event page. Announce the public release with changelog or key features. Send ANNOUNCE email to dev@skywalking.apache.org, announce@apache.org. The sender should use the Apache email account.  Mail title: [ANNOUNCE] Apache SkyWalking Java Agent x.y.z released Mail content: Hi all, Apache SkyWalking Team is glad to announce the first release of Apache SkyWalking Java Agent x.y.z. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. The Java Agent for Apache SkyWalking, which provides the native tracing/metrics/logging abilities for Java projects. This release contains a number of new features, bug fixes and improvements compared to version a.b.c(last release). The notable changes since x.y.z include: (Highlight key changes) 1. ... 2. ... 3. ... Please refer to the change log for the complete list of changes: https://github.com/apache/skywalking-java/blob/master/changes/changes-x.y.z.md Apache SkyWalking website: http://skywalking.apache.org/ Downloads: http://skywalking.apache.org/downloads/ Twitter: https://twitter.com/AsfSkyWalking SkyWalking Resources: - GitHub: https://github.com/apache/skywalking-java - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Apache SkyWalking Team Release Docker images export SW_VERSION=x.y.z git clone --depth 1 --branch v$SW_VERSION https://github.com/apache/skywalking-java.git cd skywalking-java curl -O https://dist.apache.org/repos/dist/release/skywalking/java-agent/$SW_VERSION/apache-skywalking-java-agent-$SW_VERSION.tgz tar -xzvf apache-skywalking-java-agent-$SW_VERSION.tgz export NAME=skywalking-java-agent export HUB=apache export TAG=$SW_VERSION make docker.push.alpine docker.push.java8 docker.push.java11 docker.push.java17 Clean up the old releases Once the latest release has been published, you should clean up the old releases from the mirror system.\n Update the download links (source, dist, asc, and sha512) on the website to the archive repo (https://archive.apache.org/dist/skywalking). Remove previous releases from https://dist.apache.org/repos/dist/release/skywalking/java-agent.  ","excerpt":"Apache SkyWalking Java Agent Release Guide If you\u0026rsquo;re a committer, you can learn how to release …","ref":"/docs/skywalking-java/v9.1.0/en/contribution/release-java-agent/","title":"SkyWalking Java Agent Release Guide"},{"body":"Apache SkyWalking Java Agent Release Guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking in The Apache Way and start the voting process by reading this document.\nSet up your development environment Follow the steps in the Apache maven deployment environment document to set gpg tool and encrypt passwords.\nUse the following block as a template and place it in ~/.m2/settings.xml.\n\u0026lt;settings\u0026gt; ... \u0026lt;servers\u0026gt; \u0026lt;!-- To publish a snapshot of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.snapshots.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; \u0026lt;!-- To stage a release of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.releases.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; ... \u0026lt;/servers\u0026gt; \u0026lt;/settings\u0026gt; Add your GPG public key  Add your GPG public key into the SkyWalking GPG KEYS file. If you are a committer, use your Apache ID and password to log in this svn, and update the file. Don\u0026rsquo;t override the existing file. Upload your GPG public key to the public GPG site, such as MIT\u0026rsquo;s site. This site should be in the Apache maven staging repository checklist.  Test your settings This step is only for testing purpose. If your env is correctly set, you don\u0026rsquo;t need to check every time.\n./mvnw clean install(this will build artifacts, sources and sign) Prepare for the release ./mvnw release:clean ./mvnw release:prepare -DautoVersionSubmodules=true -Pall  Set version number as x.y.z, and tag as vx.y.z (The version tag must start with v. You will find out why this is necessary in the next step.)  You could do a GPG signature before preparing for the release. If you need to input the password to sign, and the maven doesn\u0026rsquo;t provide you with the opportunity to do so, this may lead to failure of the release. To resolve this, you may run gpg --sign xxx in any file. This will allow it to remember the password for long enough to prepare for the release.\nStage the release ./mvnw release:perform -DskipTests -Pall  The release will be automatically inserted into a temporary staging repository.  Build and sign the source code and binary package export RELEASE_VERSION=x.y.z (example: RELEASE_VERSION=5.0.0-alpha) cd tools/releasing bash create_release.sh This script takes care of the following things:\n Use v + RELEASE_VERSION as tag to clone the codes. Complete git submodule init/update. Exclude all unnecessary files in the target source tar, such as .git, .github, and .gitmodules. See the script for more details. Execute gpg and shasum 512 for source code tar. Use maven package to build the agent tar. Execute gpg and shasum 512 for binary tar.  apache-skywalking-java-agent-x.y.z-src.tgz and files ending with .asc and .sha512 may be found in the tools/releasing folder. apache-skywalking-java-agent-x.y.z.tgz and files ending with .asc and .sha512 may be found in the tools/releasing/apache-skywalking-java-agent-x.y.z folder.\nUpload to Apache svn  Use your Apache ID to log in to https://dist.apache.org/repos/dist/dev/skywalking/java-agent/. Create a folder and name it by the release version and round, such as: x.y.z Upload the source code package to the folder with files ending with .asc and .sha512. Upload the distribution package to the folder with files ending with .asc and .sha512.  Make the internal announcements Send an announcement mail in dev mail list.\nMail title: [ANNOUNCE] SkyWalking Java Agent x.y.z test build available Mail content: The test build of Java Agent x.y.z is available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking-java/blob/master/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/java-agent/xxxx * sha512 checksums Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking-java/ Release Tag : * (Git Tag) x.y.z Release CommitID : * https://github.com/apache/skywalking-java/tree/(Git Commit ID) * Git submodule * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : \u0026gt; ./mvnw clean package A vote regarding the quality of this test build will be initiated within the next couple of days. Wait for at least 48 hours for test responses Any PMC member, committer or contributor can test the release features and provide feedback. Based on that, the PMC will decide whether to start the voting process.\nCall a vote in dev Call a vote in dev@skywalking.apache.org\nMail title: [VOTE] Release Apache SkyWalking Java Agent version x.y.z Mail content: Hi All, This is a call for vote to release Apache SkyWalking Java Agent version x.y.z. Release notes: * https://github.com/apache/skywalking-java/blob/master/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/java-agent/xxxx * sha512 checksums Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) x.y.z Release CommitID : * https://github.com/apache/skywalking-java/tree/(Git Commit ID) * Git submodule * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : \u0026gt; ./mvnw clean package Voting will start now (xxxx date) and will remain open for at least 72 hours, Request all PMC members to give their vote. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Vote Check All PMC members and committers should check these before casting +1 votes.\n Features test. All artifacts in staging repository are published with .asc, .md5, and *sha1 files. Source code and distribution package (apache-skywalking-java-agent-x.y.z-src.tar.gz, apache-skywalking-java-agent-x.y.z.tar.gz) are found in https://dist.apache.org/repos/dist/dev/skywalking/java-agent/x.y.z with .asc and .sha512. LICENSE and NOTICE are in the source code and distribution package. Check shasum -c apache-skywalking-java-agent-x.y.z-src.tgz.sha512. Check gpg --verify apache-skywalking-java-agent-x.y.z-src.tgz.asc apache-skywalking-apm-x.y.z-src.tgz Build a distribution package from the source code package (apache-skywalking-java-agent-x.y.z-src.tar.gz). Check the Apache License Header. Run docker run --rm -v $(pwd):/github/workspace apache/skywalking-eyes header check. (No binaries in source codes)  The voting process is as follows:\n All PMC member votes are +1 binding, and all other votes are +1 but non-binding. If you obtain at least 3 (+1 binding) votes with more +1 than -1 votes within 72 hours, the release will be approved.  Publish the release  Move source codes tar and distribution packages to https://dist.apache.org/repos/dist/release/skywalking/java-agent/.  \u0026gt; export SVN_EDITOR=vim \u0026gt; svn mv https://dist.apache.org/repos/dist/dev/skywalking/java-agent/x.y.z https://dist.apache.org/repos/dist/release/skywalking/java-agent .... enter your apache password .... Release in the nexus staging repo. Public download source and distribution tar/zip are located in http://www.apache.org/dyn/closer.cgi/skywalking/java-agent/x.y.z/xxx. The Apache mirror path is the only release information that we publish. Public asc and sha512 are located in https://www.apache.org/dist/skywalking/java-agent/x.y.z/xxx. Public KEYS point to https://www.apache.org/dist/skywalking/KEYS. Update the website download page. http://skywalking.apache.org/downloads/ . Add a new download source, distribution, sha512, asc, and document links. The links can be found following rules (3) to (6) above. Add a release event on the website homepage and event page. Announce the public release with changelog or key features. Send ANNOUNCE email to dev@skywalking.apache.org, announce@apache.org. The sender should use the Apache email account.  Mail title: [ANNOUNCE] Apache SkyWalking Java Agent x.y.z released Mail content: Hi all, Apache SkyWalking Team is glad to announce the first release of Apache SkyWalking Java Agent x.y.z. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. The Java Agent for Apache SkyWalking, which provides the native tracing/metrics/logging abilities for Java projects. This release contains a number of new features, bug fixes and improvements compared to version a.b.c(last release). The notable changes since x.y.z include: (Highlight key changes) 1. ... 2. ... 3. ... Please refer to the change log for the complete list of changes: https://github.com/apache/skywalking-java/blob/master/changes/changes-x.y.z.md Apache SkyWalking website: http://skywalking.apache.org/ Downloads: http://skywalking.apache.org/downloads/ Twitter: https://twitter.com/AsfSkyWalking SkyWalking Resources: - GitHub: https://github.com/apache/skywalking-java - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Apache SkyWalking Team Release Docker images export SW_VERSION=x.y.z git clone --depth 1 --branch v$SW_VERSION https://github.com/apache/skywalking-java.git cd skywalking-java curl -O https://dist.apache.org/repos/dist/release/skywalking/java-agent/$SW_VERSION/apache-skywalking-java-agent-$SW_VERSION.tgz tar -xzvf apache-skywalking-java-agent-$SW_VERSION.tgz export NAME=skywalking-java-agent export HUB=apache export TAG=$SW_VERSION make docker.push.alpine docker.push.java8 docker.push.java11 docker.push.java17 docker.push.java21 Clean up the old releases Once the latest release has been published, you should clean up the old releases from the mirror system.\n Update the download links (source, dist, asc, and sha512) on the website to the archive repo (https://archive.apache.org/dist/skywalking). Remove previous releases from https://dist.apache.org/repos/dist/release/skywalking/java-agent.  ","excerpt":"Apache SkyWalking Java Agent Release Guide If you\u0026rsquo;re a committer, you can learn how to release …","ref":"/docs/skywalking-java/v9.2.0/en/contribution/release-java-agent/","title":"SkyWalking Java Agent Release Guide"},{"body":"SkyWalking Kubernetes Event Exporter User Guide SkyWalking Kubernetes Event Exporter is able to watch, filter, and send Kubernetes events into the Apache SkyWalking backend.\nDemo Step 1: Create a Local Kubernetes Cluster Please follow step 1 to 3 in getting started to create a cluster.\nStep 2: Deploy OAP server and Event Exporter Create the skywalking-system namespace.\n$ kubectl create namespace skywalking-system Deploy an OAP server and an event exporter.\ncat \u0026lt;\u0026lt;EOF | kubectl apply -f - apiVersion: operator.skywalking.apache.org/v1alpha1 kind: OAPServer metadata: name: skywalking-system namespace: skywalking-system spec: version: 9.5.0 instances: 1 image: apache/skywalking-oap-server:9.5.0 service: template: type: ClusterIP --- apiVersion: operator.skywalking.apache.org/v1alpha1 kind: EventExporter metadata: name: skywalking-system namespace: skywalking-system spec: replicas: 1 config: | filters: - reason: \u0026#34;\u0026#34; message: \u0026#34;\u0026#34; minCount: 1 type: \u0026#34;\u0026#34; action: \u0026#34;\u0026#34; kind: \u0026#34;Pod|Service\u0026#34; namespace: \u0026#34;^skywalking-system$\u0026#34; name: \u0026#34;\u0026#34; service: \u0026#34;[^\\\\s]{1,}\u0026#34; exporters: - skywalking exporters: skywalking: template: source: service: \u0026#34;{{ .Service.Name }}\u0026#34; serviceInstance: \u0026#34;{{ .Pod.Name }}\u0026#34; endpoint: \u0026#34;\u0026#34; message: \u0026#34;{{ .Event.Message }}\u0026#34; address: \u0026#34;skywalking-system-oap.skywalking-system:11800\u0026#34; EOF Wait until both components are ready\u0026hellip;\n$ kubectl get pod -n skywalking-system NAME READY STATUS RESTARTS AGE skywalking-system-eventexporter-566db46fb6-npx8v 1/1 Running 0 50s skywalking-system-oap-68bd877f57-zs8hw 1/1 Running 0 50s Step 3: Check Reported Events We can verify k8s events is reported to the OAP server by using skywalking-cli.\nFirst, port-forward the OAP http service to your local machine.\n$ kubectl port-forward svc/skywalking-system-oap 12800:12800 -n skywalking-system Next, use swctl to list reported events in YAML format.\n$ swctl --display yaml event ls The output should contain k8s events of the OAP server.\nevents:- uuid:1d5bfe48-bc8d-4f5a-9680-188f59793459source:service:skywalking-system-oapserviceinstance:skywalking-system-oap-68bd877f57-cvkjbendpoint:\u0026#34;\u0026#34;name:Pulledtype:Normalmessage:Successfully pulled image \u0026#34;apache/skywalking-oap-server:9.5.0\u0026#34; in 6m4.108914335sparameters:[]starttime:1713793327000endtime:1713793327000layer:K8S- uuid:f576f6ad-748d-4cec -9260-6587c145550esource:service:skywalking-system-oapserviceinstance:skywalking-system-oap-68bd877f57-cvkjbendpoint:\u0026#34;\u0026#34;name:Createdtype:Normalmessage:Created container oapparameters:[]starttime:1713793327000endtime:1713793327000layer:K8S- uuid:0cec5b55-4cb0-4ff7-a670-a097609c531fsource:service:skywalking-system-oapserviceinstance:skywalking-system-oap-68bd877f57-cvkjbendpoint:\u0026#34;\u0026#34;name:Startedtype:Normalmessage:Started container oapparameters:[]starttime:1713793327000endtime:1713793327000layer:K8S- uuid:28f0d004-befe-4c27-a7b7-dfdc4dd755fasource:service:skywalking-system-oapserviceinstance:skywalking-system-oap-68bd877f57-cvkjbendpoint:\u0026#34;\u0026#34;name:Pullingtype:Normalmessage:Pulling image \u0026#34;apache/skywalking-oap-server:9.5.0\u0026#34;parameters:[]starttime:1713792963000endtime:1713792963000layer:K8S- uuid:6d766801-5057-42c0-aa63-93ce1e201418source:service:skywalking-system-oapserviceinstance:skywalking-system-oap-68bd877f57-cvkjbendpoint:\u0026#34;\u0026#34;name:Scheduledtype:Normalmessage:Successfully assigned skywalking-system/skywalking-system-oap-68bd877f57-cvkjbto kind-workerparameters:[]starttime:1713792963000endtime:1713792963000layer:K8SWe can also verify by checking logs of the event exporter.\nkubectl logs -f skywalking-system-eventexporter-566db46fb6-npx8v -n skywalking-system ... DEBUG done: rendered event is: uuid:\u0026#34;8d8c2bd1-1812-4b0c-8237-560688366280\u0026#34; source:{service:\u0026#34;skywalking-system-oap\u0026#34; serviceInstance:\u0026#34;skywalking-system-oap-68bd877f57-zs8hw\u0026#34;} name:\u0026#34;Started\u0026#34; message:\u0026#34;Started container oap\u0026#34; startTime:1713795214000 endTime:1713795214000 layer:\u0026#34;K8S\u0026#34; Spec    name description default value     image Docker image of the event exporter. apache/skywalking-kubernetes-event-exporter:latest   replicas Number of event exporter pods. 1   config Configuration of filters and exporters in YAML format. \u0026quot;\u0026quot;    Please note: if you ignore the config field, no filters or exporter will be created.\nThis is because the EventExporter controller creates a configMap for all config values and attach the configMap to the event exporter container as configuration file. Ignoring the config field means an empty configuration file (with content \u0026quot;\u0026quot;) is provided to the event exporter.\nStatus    name description     availableReplicas Total number of available event exporter pods.   conditions Latest available observations of the underlying deployment\u0026rsquo;s current state   configMapName Name of the underlying configMap.    Configuration The event exporter supports reporting specific events by different exporters. We can add filter configs to choose which events we are interested in, and include exporter names in each filter config to tell event exporter how to export filtered events.\nAn example configuration is listed below:\nfilters:- reason:\u0026#34;\u0026#34;message:\u0026#34;\u0026#34;minCount:1type:\u0026#34;\u0026#34;action:\u0026#34;\u0026#34;kind:\u0026#34;Pod|Service\u0026#34;namespace:\u0026#34;^default$\u0026#34;name:\u0026#34;\u0026#34;service:\u0026#34;[^\\\\s]{1,}\u0026#34;exporters:- skywalkingexporters:skywalking:template:source:service:\u0026#34;{{ .Service.Name }}\u0026#34;serviceInstance:\u0026#34;{{ .Pod.Name }}\u0026#34;endpoint:\u0026#34;\u0026#34;message:\u0026#34;{{ .Event.Message }}\u0026#34;address:\u0026#34;skywalking-system-oap.skywalking-system:11800\u0026#34;Filter Config    name description example     reason Filter events of the specified reason, regular expression like \u0026quot;Killing\\|Killed\u0026quot; is supported. \u0026quot;\u0026quot;   message Filter events of the specified message, regular expression like \u0026quot;Pulling container.*\u0026quot; is supported. \u0026quot;\u0026quot;   minCount Filter events whose count is \u0026gt;= the specified value. 1   type Filter events of the specified type, regular expression like \u0026quot;Normal\\|Error\u0026quot; is supported. \u0026quot;\u0026quot;   action Filter events of the specified action, regular expression is supported. \u0026quot;\u0026quot;   kind Filter events of the specified kind, regular expression like \u0026quot;Pod\\|Service\u0026quot; is supported. \u0026quot;Pod\\|Service\u0026quot;   namespace Filter events from the specified namespace, regular expression like \u0026quot;default\\|bookinfo\u0026quot; is supported, empty means all namespaces. \u0026quot;^default$\u0026quot;   name Filter events of the specified involved object name, regular expression like \u0026quot;.*bookinfo.*\u0026quot; is supported. \u0026quot;\u0026quot;   service Filter events belonging to services whose name is not empty. \u0026quot;[^\\\\s]{1,}\u0026quot;   exporters Events satisfy this filter can be exported into several exporters that are defined below. [\u0026quot;skywalking\u0026quot;]    Skywalking Exporter Config SkyWalking exporter exports the events into Apache SkyWalking OAP server using grpc.\n   name description example     address The SkyWalking backend address where this exporter will export to. \u0026quot;skywalking-system-oap.skywalking-system:11800\u0026quot;   enableTLS Whether to use TLS for grpc server connection validation.  If TLS is enabled, the trustedCertPath is required, but clientCertPath and clientKeyPath are optional. false   clientCertPath Path of the X.509 certificate file. \u0026quot;\u0026quot;   clientKeyPath Path of the X.509 private key file. \u0026quot;\u0026quot;   trustedCertPath Path of the root certificate file. \u0026quot;\u0026quot;   insecureSkipVerify Whether a client verifies the server\u0026rsquo;s certificate chain and host name. Check tls.Config for more details. false   template The event template of SkyWalking exporter, it can be composed of metadata like Event, Pod, and Service.    template.source Event source information.    template.source.service Service name, can be a template string. \u0026quot;{{ .Service.Name }}\u0026quot;   template.source.serviceInstance Service instance name, can be a template string. \u0026quot;{{ .Pod.Name }}\u0026quot;   template.source.endpoint Endpoint, can be a template string. \u0026quot;\u0026quot;   template.message Message format, can be a template string. \u0026quot;{{ .Event.Message }}\u0026quot;    Console Exporter Config Console exporter exports the events into console logs, this exporter is typically used for debugging.\n   name description example     template The event template of SkyWalking exporter, it can be composed of metadata like Event, Pod, and Service.    template.source Event source information.    template.source.service Service name, can be a template string. \u0026quot;{{ .Service.Name }}\u0026quot;   template.source.serviceInstance Service instance name, can be a template string. \u0026quot;{{ .Pod.Name }}\u0026quot;   template.source.endpoint Endpoint, can be a template string. \u0026quot;\u0026quot;   template.message Message format, can be a template string. \u0026quot;{{ .Event.Message }}\u0026quot;    ","excerpt":"SkyWalking Kubernetes Event Exporter User Guide SkyWalking Kubernetes Event Exporter is able to …","ref":"/docs/skywalking-swck/next/examples/event-exporter/","title":"SkyWalking Kubernetes Event Exporter User Guide"},{"body":"SkyWalking PHP Agent This is the official documentation of SkyWalking PHP Agent. Welcome to the SkyWalking community!\nIn here, you could learn how to set up PHP agent for the PHP services.\n","excerpt":"SkyWalking PHP Agent This is the official documentation of SkyWalking PHP Agent. Welcome to the …","ref":"/docs/skywalking-php/latest/readme/","title":"SkyWalking PHP Agent"},{"body":"SkyWalking PHP Agent This is the official documentation of SkyWalking PHP Agent. Welcome to the SkyWalking community!\nIn here, you could learn how to set up PHP agent for the PHP services.\n","excerpt":"SkyWalking PHP Agent This is the official documentation of SkyWalking PHP Agent. Welcome to the …","ref":"/docs/skywalking-php/next/readme/","title":"SkyWalking PHP Agent"},{"body":"SkyWalking PHP Agent This is the official documentation of SkyWalking PHP Agent. Welcome to the SkyWalking community!\nIn here, you could learn how to set up PHP agent for the PHP services.\n","excerpt":"SkyWalking PHP Agent This is the official documentation of SkyWalking PHP Agent. Welcome to the …","ref":"/docs/skywalking-php/v0.7.0/readme/","title":"SkyWalking PHP Agent"},{"body":"SkyWalking Python Agent This is the official documentation of SkyWalking Python agent. Welcome to the SkyWalking community!\nThe Python Agent for Apache SkyWalking provides the native tracing/metrics/logging/profiling abilities for Python projects.\nThis documentation covers a number of ways to set up the Python agent for various use cases.\n \nCapabilities The following table demonstrates the currently supported telemetry collection capabilities in SkyWalking Python agent:\n   Reporter Supported? Details     Trace ✅ (default: ON) Automatic instrumentation + Manual SDK   Log ✅ (default: ON) Direct reporter only. (Tracing context in log planned)   Meter ✅ (default: ON) Meter API + Automatic PVM metrics   Event ❌ (Planned) Report lifecycle events of your awesome Python application   Profiling ✅ (default: ON) Threading and Greenlet Profiler    Live Demo  Find the live demo with Python agent on our website. Follow the showcase to set up preview deployment quickly.  ","excerpt":"SkyWalking Python Agent This is the official documentation of SkyWalking Python agent. Welcome to …","ref":"/docs/skywalking-python/latest/readme/","title":"SkyWalking Python Agent"},{"body":"SkyWalking Python Agent This is the official documentation of SkyWalking Python agent. Welcome to the SkyWalking community!\nThe Python Agent for Apache SkyWalking provides the native tracing/metrics/logging/profiling abilities for Python projects.\nThis documentation covers a number of ways to set up the Python agent for various use cases.\n \nCapabilities The following table demonstrates the currently supported telemetry collection capabilities in SkyWalking Python agent:\n   Reporter Supported? Details     Trace ✅ (default: ON) Automatic instrumentation + Manual SDK   Log ✅ (default: ON) Direct reporter only. (Tracing context in log planned)   Meter ✅ (default: ON) Meter API + Automatic PVM metrics   Event ❌ (Planned) Report lifecycle events of your awesome Python application   Profiling ✅ (default: ON) Threading and Greenlet Profiler    Live Demo  Find the live demo with Python agent on our website. Follow the showcase to set up preview deployment quickly.  ","excerpt":"SkyWalking Python Agent This is the official documentation of SkyWalking Python agent. Welcome to …","ref":"/docs/skywalking-python/next/readme/","title":"SkyWalking Python Agent"},{"body":"SkyWalking Python Agent This is the official documentation of SkyWalking Python agent. Welcome to the SkyWalking community!\nThe Python Agent for Apache SkyWalking provides the native tracing/metrics/logging/profiling abilities for Python projects.\nThis documentation covers a number of ways to set up the Python agent for various use cases.\n \nCapabilities The following table demonstrates the currently supported telemetry collection capabilities in SkyWalking Python agent:\n   Reporter Supported? Details     Trace ✅ (default: ON) Automatic instrumentation + Manual SDK   Log ✅ (default: ON) Direct reporter only. (Tracing context in log planned)   Meter ✅ (default: ON) Meter API + Automatic PVM metrics   Event ❌ (Planned) Report lifecycle events of your awesome Python application   Profiling ✅ (default: ON) Threading and Greenlet Profiler    Live Demo  Find the live demo with Python agent on our website. Follow the showcase to set up preview deployment quickly.  ","excerpt":"SkyWalking Python Agent This is the official documentation of SkyWalking Python agent. Welcome to …","ref":"/docs/skywalking-python/v1.0.1/readme/","title":"SkyWalking Python Agent"},{"body":"SkyWalking Python Agent Command Line Interface (sw-python CLI) Now, SkyWalking Python Agent CLI is the recommended way of running your application with Python agent, the CLI is well-tested and used by all agent E2E \u0026amp; Plugin tests.\nIn releases before 0.7.0, you would at least need to add the following lines to your applications to get the agent attached and running, this can be tedious in many cases due to large number of services, DevOps practices and can cause problem when used with prefork servers.\nfrom skywalking import agent, config config.init(SomeConfig) agent.start() The SkyWalking Python agent implements a command-line interface that can be utilized to attach the agent to your awesome applications during deployment without changing any application code, just like the SkyWalking Java Agent.\n The following feature is added in v1.0.0 as experimental flag, so you need to specify the -p flag to sw-python run -p. In the future, this flag will be removed and agent will automatically enable prefork/fork support in a more comprehensive manner.\n Especially with the new automatic postfork injection feature, you no longer have to worry about threading and forking incompatibility.\nCheck How to use with uWSGI and How to use with Gunicorn to understand the detailed background on what is post_fork, why you need them and how to easily overcome the trouble with sw-python CLI.\nYou should still read the legacy way to integrate agent in case the sw-python CLI is not working for you.\nUsage Upon successful installation of the SkyWalking Python agent via pip, a command-line script sw-python is installed in your environment (virtual env preferred).\n run sw-python to see if it is available, you will need to pass configuration by environment variables.\n For example: export SW_AGENT_COLLECTOR_BACKEND_SERVICES=localhost:11800\nThe run option The sw-python CLI provides a run option, which you can use to execute your applications (either begins with the python command or Python-based programs like gunicorn on your path) just like you invoke them normally, plus a prefix, the following example demonstrates the usage.\nIf your previous command to run your gunicorn/uwsgi application is:\ngunicorn your_app:app --workers 2 --worker-class uvicorn.workers.UvicornWorker --bind 0.0.0.0:8088\nor\nuwsgi --die-on-term --http 0.0.0.0:5000 --http-manage-expect --master --workers 3 --enable-threads --threads 3 --manage-script-name --mount /=main:app\nPlease change it to (the -p option starts one agent in each process, which is the correct behavior):\nImportant: if the call to uwsgi/gunicorn is prefixed with other commands, this approach will fail since agent currently looks for the command line input at index 0 for safety as an experimental feature.\nsw-python run -p gunicorn your_app:app --workers 2 --worker-class uvicorn.workers.UvicornWorker --bind 0.0.0.0:8088\nor\nsw-python run -p uwsgi --die-on-term --http 0.0.0.0:5000 --http-manage-expect --master --workers 3 --enable-threads --threads 3 --manage-script-name --mount /=main:app\nThe SkyWalking Python agent will start up along with all your application workers shortly.\nNote that sw-python also work with spawned subprocess (os.exec*/subprocess) as long as the PYTHONPATH is inherited.\nAdditionally, sw-python started agent works well with os.fork when your application forks workers, as long as the SW_AGENT_EXPERIMENTAL_FORK_SUPPORT is turned on. (It will be automatically turned on when gunicorn is detected)\nConfiguring the agent You would normally want to provide additional configurations other than the default ones.\nThrough environment variables The currently supported method is to provide the environment variables listed and explained in the Environment Variables List.\nThrough a sw-config.toml (TBD) Currently, only environment variable configuration is supported; an optional toml configuration is to be implemented.\nEnabling CLI DEBUG mode Note the CLI is a feature that manipulates the Python interpreter bootstrap behaviour, there could be unsupported cases.\nIf you encounter unexpected problems, please turn on the DEBUG mode by adding the -d or --debug flag to your sw-python command, as shown below.\nFrom: sw-python run command\nTo: sw-python -d run command\nPlease attach the debug logs to the SkyWalking Issues section if you believe it is a bug, idea discussions and pull requests are always welcomed.\nAdditional Remarks When executing commands with sw-python run command, your command\u0026rsquo;s Python interpreter will pick up the SkyWalking loader module.\nIt is not safe to attach SkyWalking Agent to those commands that resides in another Python installation because incompatible Python versions and mismatched SkyWalking versions can cause problems. Therefore, any attempt to pass a command that uses a different Python interpreter/ environment will not bring up SkyWalking Python Agent even if another SkyWalking Python agent is installed there(no matter the version), and will force exit with an error message indicating the reasoning.\nDisabling spawned processes from starting new agents Sometimes you don\u0026rsquo;t actually need the agent to monitor anything in a new process (when it\u0026rsquo;s not a web service worker). (here we mean process spawned by subprocess and os.exec*(), os.fork() is not controlled by this flag but experimental_fork_support)\nIf you do not need the agent to get loaded for application child processes, you can turn off the behavior by setting an environment variable.\nSW_AGENT_SW_PYTHON_BOOTSTRAP_PROPAGATE to False\nNote the auto bootstrap depends on the environment inherited by child processes, thus prepending a new sitecustomize path to or removing the loader path from the PYTHONPATH could also prevent the agent from loading in a child process.\nKnown limitations  The CLI may not work properly with arguments that involve double quotation marks in some shells. The CLI and bootstrapper stdout logs could get messy in Windows shells.  ","excerpt":"SkyWalking Python Agent Command Line Interface (sw-python CLI) Now, SkyWalking Python Agent CLI is …","ref":"/docs/skywalking-python/latest/en/setup/cli/","title":"SkyWalking Python Agent Command Line Interface (sw-python CLI)"},{"body":"SkyWalking Python Agent Command Line Interface (sw-python CLI) Now, SkyWalking Python Agent CLI is the recommended way of running your application with Python agent, the CLI is well-tested and used by all agent E2E \u0026amp; Plugin tests.\nIn releases before 0.7.0, you would at least need to add the following lines to your applications to get the agent attached and running, this can be tedious in many cases due to large number of services, DevOps practices and can cause problem when used with prefork servers.\nfrom skywalking import agent, config config.init(SomeConfig) agent.start() The SkyWalking Python agent implements a command-line interface that can be utilized to attach the agent to your awesome applications during deployment without changing any application code, just like the SkyWalking Java Agent.\n The following feature is added in v1.0.0 as experimental flag, so you need to specify the -p flag to sw-python run -p. In the future, this flag will be removed and agent will automatically enable prefork/fork support in a more comprehensive manner.\n Especially with the new automatic postfork injection feature, you no longer have to worry about threading and forking incompatibility.\nCheck How to use with uWSGI and How to use with Gunicorn to understand the detailed background on what is post_fork, why you need them and how to easily overcome the trouble with sw-python CLI.\nYou should still read the legacy way to integrate agent in case the sw-python CLI is not working for you.\nUsage Upon successful installation of the SkyWalking Python agent via pip, a command-line script sw-python is installed in your environment (virtual env preferred).\n run sw-python to see if it is available, you will need to pass configuration by environment variables.\n For example: export SW_AGENT_COLLECTOR_BACKEND_SERVICES=localhost:11800\nThe run option The sw-python CLI provides a run option, which you can use to execute your applications (either begins with the python command or Python-based programs like gunicorn on your path) just like you invoke them normally, plus a prefix, the following example demonstrates the usage.\nIf your previous command to run your gunicorn/uwsgi application is:\ngunicorn your_app:app --workers 2 --worker-class uvicorn.workers.UvicornWorker --bind 0.0.0.0:8088\nor\nuwsgi --die-on-term --http 0.0.0.0:5000 --http-manage-expect --master --workers 3 --enable-threads --threads 3 --manage-script-name --mount /=main:app\nPlease change it to (the -p option starts one agent in each process, which is the correct behavior):\nImportant: if the call to uwsgi/gunicorn is prefixed with other commands, this approach will fail since agent currently looks for the command line input at index 0 for safety as an experimental feature.\nsw-python run -p gunicorn your_app:app --workers 2 --worker-class uvicorn.workers.UvicornWorker --bind 0.0.0.0:8088\nor\nsw-python run -p uwsgi --die-on-term --http 0.0.0.0:5000 --http-manage-expect --master --workers 3 --enable-threads --threads 3 --manage-script-name --mount /=main:app\nThe SkyWalking Python agent will start up along with all your application workers shortly.\nNote that sw-python also work with spawned subprocess (os.exec*/subprocess) as long as the PYTHONPATH is inherited.\nAdditionally, sw-python started agent works well with os.fork when your application forks workers, as long as the SW_AGENT_EXPERIMENTAL_FORK_SUPPORT is turned on. (It will be automatically turned on when gunicorn is detected)\nConfiguring the agent You would normally want to provide additional configurations other than the default ones.\nThrough environment variables The currently supported method is to provide the environment variables listed and explained in the Environment Variables List.\nThrough a sw-config.toml (TBD) Currently, only environment variable configuration is supported; an optional toml configuration is to be implemented.\nEnabling CLI DEBUG mode Note the CLI is a feature that manipulates the Python interpreter bootstrap behaviour, there could be unsupported cases.\nIf you encounter unexpected problems, please turn on the DEBUG mode by adding the -d or --debug flag to your sw-python command, as shown below.\nFrom: sw-python run command\nTo: sw-python -d run command\nPlease attach the debug logs to the SkyWalking Issues section if you believe it is a bug, idea discussions and pull requests are always welcomed.\nAdditional Remarks When executing commands with sw-python run command, your command\u0026rsquo;s Python interpreter will pick up the SkyWalking loader module.\nIt is not safe to attach SkyWalking Agent to those commands that resides in another Python installation because incompatible Python versions and mismatched SkyWalking versions can cause problems. Therefore, any attempt to pass a command that uses a different Python interpreter/ environment will not bring up SkyWalking Python Agent even if another SkyWalking Python agent is installed there(no matter the version), and will force exit with an error message indicating the reasoning.\nDisabling spawned processes from starting new agents Sometimes you don\u0026rsquo;t actually need the agent to monitor anything in a new process (when it\u0026rsquo;s not a web service worker). (here we mean process spawned by subprocess and os.exec*(), os.fork() is not controlled by this flag but experimental_fork_support)\nIf you do not need the agent to get loaded for application child processes, you can turn off the behavior by setting an environment variable.\nSW_AGENT_SW_PYTHON_BOOTSTRAP_PROPAGATE to False\nNote the auto bootstrap depends on the environment inherited by child processes, thus prepending a new sitecustomize path to or removing the loader path from the PYTHONPATH could also prevent the agent from loading in a child process.\nKnown limitations  The CLI may not work properly with arguments that involve double quotation marks in some shells. The CLI and bootstrapper stdout logs could get messy in Windows shells.  ","excerpt":"SkyWalking Python Agent Command Line Interface (sw-python CLI) Now, SkyWalking Python Agent CLI is …","ref":"/docs/skywalking-python/next/en/setup/cli/","title":"SkyWalking Python Agent Command Line Interface (sw-python CLI)"},{"body":"SkyWalking Python Agent Command Line Interface (sw-python CLI) Now, SkyWalking Python Agent CLI is the recommended way of running your application with Python agent, the CLI is well-tested and used by all agent E2E \u0026amp; Plugin tests.\nIn releases before 0.7.0, you would at least need to add the following lines to your applications to get the agent attached and running, this can be tedious in many cases due to large number of services, DevOps practices and can cause problem when used with prefork servers.\nfrom skywalking import agent, config config.init(SomeConfig) agent.start() The SkyWalking Python agent implements a command-line interface that can be utilized to attach the agent to your awesome applications during deployment without changing any application code, just like the SkyWalking Java Agent.\n The following feature is added in v1.0.0 as experimental flag, so you need to specify the -p flag to sw-python run -p. In the future, this flag will be removed and agent will automatically enable prefork/fork support in a more comprehensive manner.\n Especially with the new automatic postfork injection feature, you no longer have to worry about threading and forking incompatibility.\nCheck How to use with uWSGI and How to use with Gunicorn to understand the detailed background on what is post_fork, why you need them and how to easily overcome the trouble with sw-python CLI.\nYou should still read the legacy way to integrate agent in case the sw-python CLI is not working for you.\nUsage Upon successful installation of the SkyWalking Python agent via pip, a command-line script sw-python is installed in your environment (virtual env preferred).\n run sw-python to see if it is available, you will need to pass configuration by environment variables.\n For example: export SW_AGENT_COLLECTOR_BACKEND_SERVICES=localhost:11800\nThe run option The sw-python CLI provides a run option, which you can use to execute your applications (either begins with the python command or Python-based programs like gunicorn on your path) just like you invoke them normally, plus a prefix, the following example demonstrates the usage.\nIf your previous command to run your gunicorn/uwsgi application is:\ngunicorn your_app:app --workers 2 --worker-class uvicorn.workers.UvicornWorker --bind 0.0.0.0:8088\nor\nuwsgi --die-on-term --http 0.0.0.0:5000 --http-manage-expect --master --workers 3 --enable-threads --threads 3 --manage-script-name --mount /=main:app\nPlease change it to (the -p option starts one agent in each process, which is the correct behavior):\nImportant: if the call to uwsgi/gunicorn is prefixed with other commands, this approach will fail since agent currently looks for the command line input at index 0 for safety as an experimental feature.\nsw-python run -p gunicorn your_app:app --workers 2 --worker-class uvicorn.workers.UvicornWorker --bind 0.0.0.0:8088\nor\nsw-python run -p uwsgi --die-on-term --http 0.0.0.0:5000 --http-manage-expect --master --workers 3 --enable-threads --threads 3 --manage-script-name --mount /=main:app\nThe SkyWalking Python agent will start up along with all your application workers shortly.\nNote that sw-python also work with spawned subprocess (os.exec*/subprocess) as long as the PYTHONPATH is inherited.\nAdditionally, sw-python started agent works well with os.fork when your application forks workers, as long as the SW_AGENT_EXPERIMENTAL_FORK_SUPPORT is turned on. (It will be automatically turned on when gunicorn is detected)\nConfiguring the agent You would normally want to provide additional configurations other than the default ones.\nThrough environment variables The currently supported method is to provide the environment variables listed and explained in the Environment Variables List.\nThrough a sw-config.toml (TBD) Currently, only environment variable configuration is supported; an optional toml configuration is to be implemented.\nEnabling CLI DEBUG mode Note the CLI is a feature that manipulates the Python interpreter bootstrap behaviour, there could be unsupported cases.\nIf you encounter unexpected problems, please turn on the DEBUG mode by adding the -d or --debug flag to your sw-python command, as shown below.\nFrom: sw-python run command\nTo: sw-python -d run command\nPlease attach the debug logs to the SkyWalking Issues section if you believe it is a bug, idea discussions and pull requests are always welcomed.\nAdditional Remarks When executing commands with sw-python run command, your command\u0026rsquo;s Python interpreter will pick up the SkyWalking loader module.\nIt is not safe to attach SkyWalking Agent to those commands that resides in another Python installation because incompatible Python versions and mismatched SkyWalking versions can cause problems. Therefore, any attempt to pass a command that uses a different Python interpreter/ environment will not bring up SkyWalking Python Agent even if another SkyWalking Python agent is installed there(no matter the version), and will force exit with an error message indicating the reasoning.\nDisabling spawned processes from starting new agents Sometimes you don\u0026rsquo;t actually need the agent to monitor anything in a new process (when it\u0026rsquo;s not a web service worker). (here we mean process spawned by subprocess and os.exec*(), os.fork() is not controlled by this flag but experimental_fork_support)\nIf you do not need the agent to get loaded for application child processes, you can turn off the behavior by setting an environment variable.\nSW_AGENT_SW_PYTHON_BOOTSTRAP_PROPAGATE to False\nNote the auto bootstrap depends on the environment inherited by child processes, thus prepending a new sitecustomize path to or removing the loader path from the PYTHONPATH could also prevent the agent from loading in a child process.\nKnown limitations  The CLI may not work properly with arguments that involve double quotation marks in some shells. The CLI and bootstrapper stdout logs could get messy in Windows shells.  ","excerpt":"SkyWalking Python Agent Command Line Interface (sw-python CLI) Now, SkyWalking Python Agent CLI is …","ref":"/docs/skywalking-python/v1.0.1/en/setup/cli/","title":"SkyWalking Python Agent Command Line Interface (sw-python CLI)"},{"body":"SkyWalking Python Instrumentation API Apart from the supported libraries that can be instrumented automatically, SkyWalking also provides some APIs to enable manual instrumentation.\nCreate Spans The code snippet below shows how to create entry span, exit span and local span.\nfrom skywalking import Component from skywalking.trace.context import SpanContext, get_context from skywalking.trace.tags import Tag context: SpanContext = get_context() # get a tracing context # create an entry span, by using `with` statement, # the span automatically starts/stops when entering/exiting the context with context.new_entry_span(op=\u0026#39;https://github.com/apache\u0026#39;) as span: span.component = Component.Flask # the span automatically stops when exiting the `with` context class TagSinger(Tag): key = \u0026#39;Singer\u0026#39; with context.new_exit_span(op=\u0026#39;https://github.com/apache\u0026#39;, peer=\u0026#39;localhost:8080\u0026#39;, component=Component.Flask) as span: span.tag(TagSinger(\u0026#39;Nakajima\u0026#39;)) with context.new_local_span(op=\u0026#39;https://github.com/apache\u0026#39;) as span: span.tag(TagSinger(\u0026#39;Nakajima\u0026#39;)) Decorators from time import sleep from skywalking import Component from skywalking.decorators import trace, runnable from skywalking.trace.context import SpanContext, get_context @trace() # the operation name is the method name(\u0026#39;some_other_method\u0026#39;) by default def some_other_method(): sleep(1) @trace(op=\u0026#39;awesome\u0026#39;) # customize the operation name to \u0026#39;awesome\u0026#39; def some_method(): some_other_method() @trace(op=\u0026#39;async_functions_are_also_supported\u0026#39;) async def async_func(): return \u0026#39;asynchronous\u0026#39; @trace() async def async_func2(): return await async_func() @runnable() # cross thread propagation def some_method(): some_other_method() from threading import Thread t = Thread(target=some_method) t.start() context: SpanContext = get_context() with context.new_entry_span(op=str(\u0026#39;https://github.com/apache/skywalking\u0026#39;)) as span: span.component = Component.Flask some_method() ","excerpt":"SkyWalking Python Instrumentation API Apart from the supported libraries that can be instrumented …","ref":"/docs/skywalking-python/latest/en/setup/advanced/api/","title":"SkyWalking Python Instrumentation API"},{"body":"SkyWalking Python Instrumentation API Apart from the supported libraries that can be instrumented automatically, SkyWalking also provides some APIs to enable manual instrumentation.\nCreate Spans The code snippet below shows how to create entry span, exit span and local span.\nfrom skywalking import Component from skywalking.trace.context import SpanContext, get_context from skywalking.trace.tags import Tag context: SpanContext = get_context() # get a tracing context # create an entry span, by using `with` statement, # the span automatically starts/stops when entering/exiting the context with context.new_entry_span(op=\u0026#39;https://github.com/apache\u0026#39;) as span: span.component = Component.Flask # the span automatically stops when exiting the `with` context class TagSinger(Tag): key = \u0026#39;Singer\u0026#39; with context.new_exit_span(op=\u0026#39;https://github.com/apache\u0026#39;, peer=\u0026#39;localhost:8080\u0026#39;, component=Component.Flask) as span: span.tag(TagSinger(\u0026#39;Nakajima\u0026#39;)) with context.new_local_span(op=\u0026#39;https://github.com/apache\u0026#39;) as span: span.tag(TagSinger(\u0026#39;Nakajima\u0026#39;)) Decorators from time import sleep from skywalking import Component from skywalking.decorators import trace, runnable from skywalking.trace.context import SpanContext, get_context @trace() # the operation name is the method name(\u0026#39;some_other_method\u0026#39;) by default def some_other_method(): sleep(1) @trace(op=\u0026#39;awesome\u0026#39;) # customize the operation name to \u0026#39;awesome\u0026#39; def some_method(): some_other_method() @trace(op=\u0026#39;async_functions_are_also_supported\u0026#39;) async def async_func(): return \u0026#39;asynchronous\u0026#39; @trace() async def async_func2(): return await async_func() @runnable() # cross thread propagation def some_method(): some_other_method() from threading import Thread t = Thread(target=some_method) t.start() context: SpanContext = get_context() with context.new_entry_span(op=str(\u0026#39;https://github.com/apache/skywalking\u0026#39;)) as span: span.component = Component.Flask some_method() ","excerpt":"SkyWalking Python Instrumentation API Apart from the supported libraries that can be instrumented …","ref":"/docs/skywalking-python/next/en/setup/advanced/api/","title":"SkyWalking Python Instrumentation API"},{"body":"SkyWalking Python Instrumentation API Apart from the supported libraries that can be instrumented automatically, SkyWalking also provides some APIs to enable manual instrumentation.\nCreate Spans The code snippet below shows how to create entry span, exit span and local span.\nfrom skywalking import Component from skywalking.trace.context import SpanContext, get_context from skywalking.trace.tags import Tag context: SpanContext = get_context() # get a tracing context # create an entry span, by using `with` statement, # the span automatically starts/stops when entering/exiting the context with context.new_entry_span(op=\u0026#39;https://github.com/apache\u0026#39;) as span: span.component = Component.Flask # the span automatically stops when exiting the `with` context class TagSinger(Tag): key = \u0026#39;Singer\u0026#39; with context.new_exit_span(op=\u0026#39;https://github.com/apache\u0026#39;, peer=\u0026#39;localhost:8080\u0026#39;, component=Component.Flask) as span: span.tag(TagSinger(\u0026#39;Nakajima\u0026#39;)) with context.new_local_span(op=\u0026#39;https://github.com/apache\u0026#39;) as span: span.tag(TagSinger(\u0026#39;Nakajima\u0026#39;)) Decorators from time import sleep from skywalking import Component from skywalking.decorators import trace, runnable from skywalking.trace.context import SpanContext, get_context @trace() # the operation name is the method name(\u0026#39;some_other_method\u0026#39;) by default def some_other_method(): sleep(1) @trace(op=\u0026#39;awesome\u0026#39;) # customize the operation name to \u0026#39;awesome\u0026#39; def some_method(): some_other_method() @trace(op=\u0026#39;async_functions_are_also_supported\u0026#39;) async def async_func(): return \u0026#39;asynchronous\u0026#39; @trace() async def async_func2(): return await async_func() @runnable() # cross thread propagation def some_method(): some_other_method() from threading import Thread t = Thread(target=some_method) t.start() context: SpanContext = get_context() with context.new_entry_span(op=str(\u0026#39;https://github.com/apache/skywalking\u0026#39;)) as span: span.component = Component.Flask some_method() ","excerpt":"SkyWalking Python Instrumentation API Apart from the supported libraries that can be instrumented …","ref":"/docs/skywalking-python/v1.0.1/en/setup/advanced/api/","title":"SkyWalking Python Instrumentation API"},{"body":"Apache SkyWalking release guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking in The Apache Way and start the voting process by reading this document.\nSet up your development environment Follow the steps in the Apache maven deployment environment document to set gpg tool and encrypt passwords.\nUse the following block as a template and place it in ~/.m2/settings.xml.\n\u0026lt;settings\u0026gt; ... \u0026lt;servers\u0026gt; \u0026lt;!-- To publish a snapshot of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.snapshots.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; \u0026lt;!-- To stage a release of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.releases.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; ... \u0026lt;/servers\u0026gt; \u0026lt;/settings\u0026gt; Add your GPG public key  Add your GPG public key into the SkyWalking GPG KEYS file. If you are a committer, use your Apache ID and password to log in this svn, and update the file. Don\u0026rsquo;t override the existing file. Upload your GPG public key to the public GPG site, such as MIT\u0026rsquo;s site. This site should be in the Apache maven staging repository checklist.  Test your settings This step is only for testing purpose. If your env is correctly set, you don\u0026rsquo;t need to check every time.\n./mvnw clean install -Pall (this will build artifacts, sources and sign) Prepare for the release ./mvnw release:clean ./mvnw release:prepare -DautoVersionSubmodules=true -Darguments='-Dmaven.test.skip' -Pall  Set version number as x.y.z, and tag as vx.y.z (The version tag must start with v. You will find out why this is necessary in the next step.)  You could do a GPG signature before preparing for the release. If you need to input the password to sign, and the maven doesn\u0026rsquo;t provide you with the opportunity to do so, this may lead to failure of the release. To resolve this, you may run gpg --sign xxx in any file. This will allow it to remember the password for long enough to prepare for the release.\nStage the release ./mvnw release:perform -Darguments='-Dmaven.test.skip' -Pall  The release will be automatically inserted into a temporary staging repository.  Build and sign the source code package export RELEASE_VERSION=x.y.z (example: RELEASE_VERSION=5.0.0-alpha) cd tools/releasing bash create_source_release.sh This script takes care of the following things:\n Use v + RELEASE_VERSION as tag to clone the codes. Complete git submodule init/update. Exclude all unnecessary files in the target source tar, such as .git, .github, and .gitmodules. See the script for more details. Execute gpg and shasum 512.  apache-skywalking-apm-x.y.z-src.tgz and files ending with .asc and .sha512 may be found in the tools/releasing folder.\nLocate and download the distribution package in Apache Nexus Staging repositories  Use your Apache ID to log in to https://repository.apache.org/. Go to https://repository.apache.org/#stagingRepositories. Search skywalking and find your staging repository. Close the repository and wait for all checks to pass. In this step, your GPG KEYS will be checked. See the set PGP document, if you haven\u0026rsquo;t done it before. Go to {REPO_URL}/org/apache/skywalking/apache-skywalking-apm/x.y.z. Download .tar.gz and .zip and files ending with .asc and .sha1.  Upload to Apache svn  Use your Apache ID to log in to https://dist.apache.org/repos/dist/dev/skywalking/. Create a folder and name it by the release version and round, such as: x.y.z Upload the source code package to the folder with files ending with .asc and .sha512.  Package name: apache-skywalking-x.y.z-src.tar.gz See Section \u0026ldquo;Build and sign the source code package\u0026rdquo; for more details   Upload the distribution package to the folder with files ending with .asc and .sha512.  Package name: apache-skywalking-bin-x.y.z.tar.gz. See Section \u0026ldquo;Locate and download the distribution package in Apache Nexus Staging repositories\u0026rdquo; for more details. Create a .sha512 package: shasum -a 512 file \u0026gt; file.sha512    Make the internal announcements Send an announcement mail in dev mail list.\nMail title: [ANNOUNCE] SkyWalking x.y.z test build available Mail content: The test build of x.y.z is available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/xxxx * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-apm-x.x.x-src.tgz - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.tar.gz Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) vx.y.z Release CommitID : * https://github.com/apache/skywalking/tree/(Git Commit ID) * Git submodule * skywalking-ui: https://github.com/apache/skywalking-booster-ui/tree/(Git Commit ID) * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) * oap-server/server-query-plugin/query-graphql-plugin/src/main/resources/query-protocol https://github.com/apache/skywalking-query-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking/blob/vx.y.z/docs/en/guides/How-to-build.md A vote regarding the quality of this test build will be initiated within the next couple of days. Wait for at least 48 hours for test responses Any PMC member, committer or contributor can test the release features and provide feedback. Based on that, the PMC will decide whether to start the voting process.\nCall a vote in dev Call a vote in dev@skywalking.apache.org\nMail title: [VOTE] Release Apache SkyWalking version x.y.z Mail content: Hi All, This is a call for vote to release Apache SkyWalking version x.y.z. Release notes: * https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/xxxx * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-apm-x.x.x-src.tgz - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.tar.gz Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) vx.y.z Release CommitID : * https://github.com/apache/skywalking/tree/(Git Commit ID) * Git submodule * skywalking-ui: https://github.com/apache/skywalking-booster-ui/tree/(Git Commit ID) * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) * oap-server/server-query-plugin/query-graphql-plugin/src/main/resources/query-protocol https://github.com/apache/skywalking-query-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking/blob/vx.y.z/docs/en/guides/How-to-build.md Voting will start now (xxxx date) and will remain open for at least 72 hours, Request all PMC members to give their vote. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Vote Check All PMC members and committers should check these before casting +1 votes.\n Features test. All artifacts in staging repository are published with .asc, .md5, and *sha1 files. Source code and distribution package (apache-skywalking-x.y.z-src.tar.gz, apache-skywalking-bin-x.y.z.tar.gz, apache-skywalking-bin-x.y.z.zip) are found in https://dist.apache.org/repos/dist/dev/skywalking/x.y.z with .asc and .sha512. LICENSE and NOTICE are in the source code and distribution package. Check shasum -c apache-skywalking-apm-x.y.z-src.tgz.sha512. Check gpg --verify apache-skywalking-apm-x.y.z-src.tgz.asc apache-skywalking-apm-x.y.z-src.tgz Build a distribution package from the source code package (apache-skywalking-x.y.z-src.tar.gz) by following this doc. Check the Apache License Header. Run docker run --rm -v $(pwd):/github/workspace apache/skywalking-eyes header check. (No binaries in source codes)  The voting process is as follows:\n All PMC member votes are +1 binding, and all other votes are +1 but non-binding. If you obtain at least 3 (+1 binding) votes with more +1 than -1 votes within 72 hours, the release will be approved.  Publish the release  Move source codes tar and distribution packages to https://dist.apache.org/repos/dist/release/skywalking/.  \u0026gt; export SVN_EDITOR=vim \u0026gt; svn mv https://dist.apache.org/repos/dist/dev/skywalking/x.y.z https://dist.apache.org/repos/dist/release/skywalking .... enter your apache password .... Release in the nexus staging repo. Public download source and distribution tar/zip are located in http://www.apache.org/dyn/closer.cgi/skywalking/x.y.z/xxx. The Apache mirror path is the only release information that we publish. Public asc and sha512 are located in https://www.apache.org/dist/skywalking/x.y.z/xxx. Public KEYS point to https://www.apache.org/dist/skywalking/KEYS. Update the website download page. http://skywalking.apache.org/downloads/ . Add a new download source, distribution, sha512, asc, and document links. The links can be found following rules (3) to (6) above. Add a release event on the website homepage and event page. Announce the public release with changelog or key features. Send ANNOUNCE email to dev@skywalking.apache.org, announce@apache.org. The sender should use the Apache email account.  Mail title: [ANNOUNCE] Apache SkyWalking x.y.z released Mail content: Hi all, Apache SkyWalking Team is glad to announce the first release of Apache SkyWalking x.y.z. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. This release contains a number of new features, bug fixes and improvements compared to version a.b.c(last release). The notable changes since x.y.z include: (Highlight key changes) 1. ... 2. ... 3. ... Please refer to the change log for the complete list of changes: https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Apache SkyWalking website: http://skywalking.apache.org/ Downloads: http://skywalking.apache.org/downloads/ Twitter: https://twitter.com/ASFSkyWalking SkyWalking Resources: - GitHub: https://github.com/apache/skywalking - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Apache SkyWalking Team Publish the Docker images We have a GitHub workflow to automatically publish the Docker images to Docker Hub after you set the version from pre-release to release, all you need to do is to watch that workflow and see whether it succeeds, if it fails, you can use the following steps to publish the Docker images in your local machine.\nexport SW_VERSION=x.y.z git clone --depth 1 --branch v$SW_VERSION https://github.com/apache/skywalking.git cd skywalking svn co https://dist.apache.org/repos/dist/release/skywalking/$SW_VERSION release # (1) export CONTEXT=release export HUB=apache export OAP_NAME=skywalking-oap-server export UI_NAME=skywalking-ui export TAG=$SW_VERSION export DIST=\u0026lt;the binary package name inside (1), e.g. apache-skywalking-apm-8.8.0.tar.gz\u0026gt; make docker.push Clean up the old releases Once the latest release has been published, you should clean up the old releases from the mirror system.\n Update the download links (source, dist, asc, and sha512) on the website to the archive repo (https://archive.apache.org/dist/skywalking). Remove previous releases from https://dist.apache.org/repos/dist/release/skywalking/.  ","excerpt":"Apache SkyWalking release guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking …","ref":"/docs/main/latest/en/guides/how-to-release/","title":"SkyWalking release guide"},{"body":"Apache SkyWalking release guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking in The Apache Way and start the voting process by reading this document.\nSet up your development environment Follow the steps in the Apache maven deployment environment document to set gpg tool and encrypt passwords.\nUse the following block as a template and place it in ~/.m2/settings.xml.\n\u0026lt;settings\u0026gt; ... \u0026lt;servers\u0026gt; \u0026lt;!-- To publish a snapshot of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.snapshots.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; \u0026lt;!-- To stage a release of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.releases.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; ... \u0026lt;/servers\u0026gt; \u0026lt;/settings\u0026gt; Add your GPG public key  Add your GPG public key into the SkyWalking GPG KEYS file. If you are a committer, use your Apache ID and password to log in this svn, and update the file. Don\u0026rsquo;t override the existing file. Upload your GPG public key to the public GPG site, such as MIT\u0026rsquo;s site. This site should be in the Apache maven staging repository checklist.  Test your settings This step is only for testing purpose. If your env is correctly set, you don\u0026rsquo;t need to check every time.\n./mvnw clean install -Pall (this will build artifacts, sources and sign) Prepare for the release ./mvnw release:clean ./mvnw release:prepare -DautoVersionSubmodules=true -Darguments='-Dmaven.test.skip' -Pall  Set version number as x.y.z, and tag as vx.y.z (The version tag must start with v. You will find out why this is necessary in the next step.)  You could do a GPG signature before preparing for the release. If you need to input the password to sign, and the maven doesn\u0026rsquo;t provide you with the opportunity to do so, this may lead to failure of the release. To resolve this, you may run gpg --sign xxx in any file. This will allow it to remember the password for long enough to prepare for the release.\nStage the release ./mvnw release:perform -Darguments='-Dmaven.test.skip' -Pall  The release will be automatically inserted into a temporary staging repository.  Build and sign the source code package export RELEASE_VERSION=x.y.z (example: RELEASE_VERSION=5.0.0-alpha) cd tools/releasing bash create_source_release.sh This script takes care of the following things:\n Use v + RELEASE_VERSION as tag to clone the codes. Complete git submodule init/update. Exclude all unnecessary files in the target source tar, such as .git, .github, and .gitmodules. See the script for more details. Execute gpg and shasum 512.  apache-skywalking-apm-x.y.z-src.tgz and files ending with .asc and .sha512 may be found in the tools/releasing folder.\nLocate and download the distribution package in Apache Nexus Staging repositories  Use your Apache ID to log in to https://repository.apache.org/. Go to https://repository.apache.org/#stagingRepositories. Search skywalking and find your staging repository. Close the repository and wait for all checks to pass. In this step, your GPG KEYS will be checked. See the set PGP document, if you haven\u0026rsquo;t done it before. Go to {REPO_URL}/org/apache/skywalking/apache-skywalking-apm/x.y.z. Download .tar.gz and .zip and files ending with .asc and .sha1.  Upload to Apache svn  Use your Apache ID to log in to https://dist.apache.org/repos/dist/dev/skywalking/. Create a folder and name it by the release version and round, such as: x.y.z Upload the source code package to the folder with files ending with .asc and .sha512.  Package name: apache-skywalking-x.y.z-src.tar.gz See Section \u0026ldquo;Build and sign the source code package\u0026rdquo; for more details   Upload the distribution package to the folder with files ending with .asc and .sha512.  Package name: apache-skywalking-bin-x.y.z.tar.gz. See Section \u0026ldquo;Locate and download the distribution package in Apache Nexus Staging repositories\u0026rdquo; for more details. Create a .sha512 package: shasum -a 512 file \u0026gt; file.sha512    Call a vote in dev Call a vote in dev@skywalking.apache.org\nMail title: [VOTE] Release Apache SkyWalking version x.y.z Mail content: Hi All, This is a call for vote to release Apache SkyWalking version x.y.z. Release notes: * https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/xxxx * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-apm-x.x.x-src.tgz - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.tar.gz Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) vx.y.z Release CommitID : * https://github.com/apache/skywalking/tree/(Git Commit ID) * Git submodule * skywalking-ui: https://github.com/apache/skywalking-booster-ui/tree/(Git Commit ID) * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) * oap-server/server-query-plugin/query-graphql-plugin/src/main/resources/query-protocol https://github.com/apache/skywalking-query-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking/blob/vx.y.z/docs/en/guides/How-to-build.md Voting will start now (xxxx date) and will remain open for at least 72 hours, Request all PMC members to give their vote. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Vote Check All PMC members and committers should check these before casting +1 votes.\n Features test. All artifacts in staging repository are published with .asc, .md5, and *sha1 files. Source code and distribution package (apache-skywalking-x.y.z-src.tar.gz, apache-skywalking-bin-x.y.z.tar.gz, apache-skywalking-bin-x.y.z.zip) are found in https://dist.apache.org/repos/dist/dev/skywalking/x.y.z with .asc and .sha512. LICENSE and NOTICE are in the source code and distribution package. Check shasum -c apache-skywalking-apm-x.y.z-src.tgz.sha512. Check gpg --verify apache-skywalking-apm-x.y.z-src.tgz.asc apache-skywalking-apm-x.y.z-src.tgz Build a distribution package from the source code package (apache-skywalking-x.y.z-src.tar.gz) by following this doc. Check the Apache License Header. Run docker run --rm -v $(pwd):/github/workspace apache/skywalking-eyes header check. (No binaries in source codes)  The voting process is as follows:\n All PMC member votes are +1 binding, and all other votes are +1 but non-binding. If you obtain at least 3 (+1 binding) votes with more +1 than -1 votes within 72 hours, the release will be approved.  Publish the release  Move source codes tar and distribution packages to https://dist.apache.org/repos/dist/release/skywalking/.  \u0026gt; export SVN_EDITOR=vim \u0026gt; svn mv https://dist.apache.org/repos/dist/dev/skywalking/x.y.z https://dist.apache.org/repos/dist/release/skywalking .... enter your apache password .... Release in the nexus staging repo. Public download source and distribution tar/zip are located in http://www.apache.org/dyn/closer.cgi/skywalking/x.y.z/xxx. The Apache mirror path is the only release information that we publish. Public asc and sha512 are located in https://www.apache.org/dist/skywalking/x.y.z/xxx. Public KEYS point to https://www.apache.org/dist/skywalking/KEYS. Update the website download page. http://skywalking.apache.org/downloads/ . Add a new download source, distribution, sha512, asc, and document links. The links can be found following rules (3) to (6) above. Add a release event on the website homepage and event page. Announce the public release with changelog or key features. Send ANNOUNCE email to dev@skywalking.apache.org, announce@apache.org. The sender should use the Apache email account.  Mail title: [ANNOUNCE] Apache SkyWalking x.y.z released Mail content: Hi all, Apache SkyWalking Team is glad to announce the first release of Apache SkyWalking x.y.z. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based architectures. This release contains a number of new features, bug fixes and improvements compared to version a.b.c(last release). The notable changes since x.y.z include: (Highlight key changes) 1. ... 2. ... 3. ... Please refer to the change log for the complete list of changes: https://skywalking.apache.org/docs/main/vx.y.z/en/changes/changes/ Apache SkyWalking website: http://skywalking.apache.org/ Downloads: https://skywalking.apache.org/downloads/#SkyWalkingAPM Twitter: https://twitter.com/ASFSkyWalking SkyWalking Resources: - GitHub: https://github.com/apache/skywalking - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Apache SkyWalking Team Publish the Docker images We have a GitHub workflow to automatically publish the Docker images to Docker Hub after you set the version from pre-release to release, all you need to do is to watch that workflow and see whether it succeeds, if it fails, you can use the following steps to publish the Docker images in your local machine.\nexport SW_VERSION=x.y.z git clone --depth 1 --branch v$SW_VERSION https://github.com/apache/skywalking.git cd skywalking svn co https://dist.apache.org/repos/dist/release/skywalking/$SW_VERSION release # (1) export CONTEXT=release export HUB=apache export OAP_NAME=skywalking-oap-server export UI_NAME=skywalking-ui export TAG=$SW_VERSION export DIST=\u0026lt;the binary package name inside (1), e.g. apache-skywalking-apm-8.8.0.tar.gz\u0026gt; make docker.push Clean up the old releases Once the latest release has been published, you should clean up the old releases from the mirror system.\n Update the download links (source, dist, asc, and sha512) on the website to the archive repo (https://archive.apache.org/dist/skywalking). Remove previous releases from https://dist.apache.org/repos/dist/release/skywalking/.  ","excerpt":"Apache SkyWalking release guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking …","ref":"/docs/main/next/en/guides/how-to-release/","title":"SkyWalking release guide"},{"body":"Apache SkyWalking release guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking in The Apache Way and start the voting process by reading this document.\nSet up your development environment Follow the steps in the Apache maven deployment environment document to set gpg tool and encrypt passwords.\nUse the following block as a template and place it in ~/.m2/settings.xml.\n\u0026lt;settings\u0026gt; ... \u0026lt;servers\u0026gt; \u0026lt;!-- To publish a snapshot of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.snapshots.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; \u0026lt;!-- To stage a release of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.releases.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; ... \u0026lt;/servers\u0026gt; \u0026lt;/settings\u0026gt; Add your GPG public key  Add your GPG public key into the SkyWalking GPG KEYS file. If you are a committer, use your Apache ID and password to log in this svn, and update the file. Don\u0026rsquo;t override the existing file. Upload your GPG public key to the public GPG site, such as MIT\u0026rsquo;s site. This site should be in the Apache maven staging repository checklist.  Test your settings This step is only for testing purpose. If your env is correctly set, you don\u0026rsquo;t need to check every time.\n./mvnw clean install -Pall (this will build artifacts, sources and sign) Prepare for the release ./mvnw release:clean ./mvnw release:prepare -DautoVersionSubmodules=true -Pall  Set version number as x.y.z, and tag as vx.y.z (The version tag must start with v. You will find out why this is necessary in the next step.)  You could do a GPG signature before preparing for the release. If you need to input the password to sign, and the maven doesn\u0026rsquo;t provide you with the opportunity to do so, this may lead to failure of the release. To resolve this, you may run gpg --sign xxx in any file. This will allow it to remember the password for long enough to prepare for the release.\nStage the release ./mvnw release:perform -Dmaven.test.skip -Pall  The release will be automatically inserted into a temporary staging repository.  Build and sign the source code package export RELEASE_VERSION=x.y.z (example: RELEASE_VERSION=5.0.0-alpha) cd tools/releasing bash create_source_release.sh This script takes care of the following things:\n Use v + RELEASE_VERSION as tag to clone the codes. Complete git submodule init/update. Exclude all unnecessary files in the target source tar, such as .git, .github, and .gitmodules. See the script for more details. Execute gpg and shasum 512.  apache-skywalking-apm-x.y.z-src.tgz and files ending with .asc and .sha512 may be found in the tools/releasing folder.\nLocate and download the distribution package in Apache Nexus Staging repositories  Use your Apache ID to log in to https://repository.apache.org/. Go to https://repository.apache.org/#stagingRepositories. Search skywalking and find your staging repository. Close the repository and wait for all checks to pass. In this step, your GPG KEYS will be checked. See the set PGP document, if you haven\u0026rsquo;t done it before. Go to {REPO_URL}/org/apache/skywalking/apache-skywalking-apm/x.y.z. Download .tar.gz and .zip and files ending with .asc and .sha1.  Upload to Apache svn  Use your Apache ID to log in to https://dist.apache.org/repos/dist/dev/skywalking/. Create a folder and name it by the release version and round, such as: x.y.z Upload the source code package to the folder with files ending with .asc and .sha512.  Package name: apache-skywalking-x.y.z-src.tar.gz See Section \u0026ldquo;Build and sign the source code package\u0026rdquo; for more details   Upload the distribution package to the folder with files ending with .asc and .sha512.  Package name: apache-skywalking-bin-x.y.z.tar.gz. See Section \u0026ldquo;Locate and download the distribution package in Apache Nexus Staging repositories\u0026rdquo; for more details. Create a .sha512 package: shasum -a 512 file \u0026gt; file.sha512    Make the internal announcements Send an announcement mail in dev mail list.\nMail title: [ANNOUNCE] SkyWalking x.y.z test build available Mail content: The test build of x.y.z is available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking/blob/master/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/xxxx * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-apm-x.x.x-src.tgz - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.tar.gz - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.zip Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) x.y.z Release CommitID : * https://github.com/apache/skywalking/tree/(Git Commit ID) * Git submodule * skywalking-ui: https://github.com/apache/skywalking-rocketbot-ui/tree/(Git Commit ID) * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) * oap-server/server-query-plugin/query-graphql-plugin/src/main/resources/query-protocol https://github.com/apache/skywalking-query-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking/blob/x.y.z/docs/en/guides/How-to-build.md A vote regarding the quality of this test build will be initiated within the next couple of days. Wait for at least 48 hours for test responses Any PMC member, committer or contributor can test the release features and provide feedback. Based on that, the PMC will decide whether to start the voting process.\nCall a vote in dev Call a vote in dev@skywalking.apache.org\nMail title: [VOTE] Release Apache SkyWalking version x.y.z Mail content: Hi All, This is a call for vote to release Apache SkyWalking version x.y.z. Release notes: * https://github.com/apache/skywalking/blob/master/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/xxxx * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-apm-x.x.x-src.tgz - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.tar.gz - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.zip Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) x.y.z Release CommitID : * https://github.com/apache/skywalking/tree/(Git Commit ID) * Git submodule * skywalking-ui: https://github.com/apache/skywalking-rocketbot-ui/tree/(Git Commit ID) * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) * oap-server/server-query-plugin/query-graphql-plugin/src/main/resources/query-protocol https://github.com/apache/skywalking-query-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking/blob/x.y.z/docs/en/guides/How-to-build.md Voting will start now (xxxx date) and will remain open for at least 72 hours, Request all PMC members to give their vote. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Vote Check All PMC members and committers should check these before casting +1 votes.\n Features test. All artifacts in staging repository are published with .asc, .md5, and *sha1 files. Source code and distribution package (apache-skywalking-x.y.z-src.tar.gz, apache-skywalking-bin-x.y.z.tar.gz, apache-skywalking-bin-x.y.z.zip) are found in https://dist.apache.org/repos/dist/dev/skywalking/x.y.z with .asc and .sha512. LICENSE and NOTICE are in the source code and distribution package. Check shasum -c apache-skywalking-apm-x.y.z-src.tgz.sha512. Check gpg --verify apache-skywalking-apm-x.y.z-src.tgz.asc apache-skywalking-apm-x.y.z-src.tgz Build a distribution package from the source code package (apache-skywalking-x.y.z-src.tar.gz) by following this doc. Check the Apache License Header. Run docker run --rm -v $(pwd):/github/workspace apache/skywalking-eyes header check. (No binaries in source codes)  The voting process is as follows:\n All PMC member votes are +1 binding, and all other votes are +1 but non-binding. If you obtain at least 3 (+1 binding) votes with more +1 than -1 votes within 72 hours, the release will be approved.  Publish the release  Move source codes tar and distribution packages to https://dist.apache.org/repos/dist/release/skywalking/.  \u0026gt; export SVN_EDITOR=vim \u0026gt; svn mv https://dist.apache.org/repos/dist/dev/skywalking/x.y.z https://dist.apache.org/repos/dist/release/skywalking .... enter your apache password .... Release in the nexus staging repo. Public download source and distribution tar/zip are located in http://www.apache.org/dyn/closer.cgi/skywalking/x.y.z/xxx. The Apache mirror path is the only release information that we publish. Public asc and sha512 are located in https://www.apache.org/dist/skywalking/x.y.z/xxx. Public KEYS point to https://www.apache.org/dist/skywalking/KEYS. Update the website download page. http://skywalking.apache.org/downloads/ . Add a new download source, distribution, sha512, asc, and document links. The links can be found following rules (3) to (6) above. Add a release event on the website homepage and event page. Announce the public release with changelog or key features. Send ANNOUNCE email to dev@skywalking.apache.org, announce@apache.org. The sender should use the Apache email account.  Mail title: [ANNOUNCE] Apache SkyWalking x.y.z released Mail content: Hi all, Apache SkyWalking Team is glad to announce the first release of Apache SkyWalking x.y.z. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. This release contains a number of new features, bug fixes and improvements compared to version a.b.c(last release). The notable changes since x.y.z include: (Highlight key changes) 1. ... 2. ... 3. ... Please refer to the change log for the complete list of changes: https://github.com/apache/skywalking/blob/master/changes/changes-x.y.z.md Apache SkyWalking website: http://skywalking.apache.org/ Downloads: http://skywalking.apache.org/downloads/ Twitter: https://twitter.com/ASFSkyWalking SkyWalking Resources: - GitHub: https://github.com/apache/skywalking - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Apache SkyWalking Team Publish the Docker images export SW_VERSION=x.y.z git clone --depth 1 --branch v$SW_VERSION https://github.com/apache/skywalking.git cd skywalking svn co https://dist.apache.org/repos/dist/release/skywalking/$SW_VERSION release # (1) export CONTEXT=release export HUB=apache export OAP_NAME=skywalking-oap-server export UI_NAME=skywalking-ui export TAG=$SW_VERSION export DIST=\u0026lt;the binary package name inside (1), e.g. apache-skywalking-apm-8.8.0.tar.gz\u0026gt; make docker.push Clean up the old releases Once the latest release has been published, you should clean up the old releases from the mirror system.\n Update the download links (source, dist, asc, and sha512) on the website to the archive repo (https://archive.apache.org/dist/skywalking). Remove previous releases from https://dist.apache.org/repos/dist/release/skywalking/.  ","excerpt":"Apache SkyWalking release guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking …","ref":"/docs/main/v9.0.0/en/guides/how-to-release/","title":"SkyWalking release guide"},{"body":"Apache SkyWalking release guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking in The Apache Way and start the voting process by reading this document.\nSet up your development environment Follow the steps in the Apache maven deployment environment document to set gpg tool and encrypt passwords.\nUse the following block as a template and place it in ~/.m2/settings.xml.\n\u0026lt;settings\u0026gt; ... \u0026lt;servers\u0026gt; \u0026lt;!-- To publish a snapshot of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.snapshots.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; \u0026lt;!-- To stage a release of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.releases.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; ... \u0026lt;/servers\u0026gt; \u0026lt;/settings\u0026gt; Add your GPG public key  Add your GPG public key into the SkyWalking GPG KEYS file. If you are a committer, use your Apache ID and password to log in this svn, and update the file. Don\u0026rsquo;t override the existing file. Upload your GPG public key to the public GPG site, such as MIT\u0026rsquo;s site. This site should be in the Apache maven staging repository checklist.  Test your settings This step is only for testing purpose. If your env is correctly set, you don\u0026rsquo;t need to check every time.\n./mvnw clean install -Pall (this will build artifacts, sources and sign) Prepare for the release ./mvnw release:clean ./mvnw release:prepare -DautoVersionSubmodules=true -Pall  Set version number as x.y.z, and tag as vx.y.z (The version tag must start with v. You will find out why this is necessary in the next step.)  You could do a GPG signature before preparing for the release. If you need to input the password to sign, and the maven doesn\u0026rsquo;t provide you with the opportunity to do so, this may lead to failure of the release. To resolve this, you may run gpg --sign xxx in any file. This will allow it to remember the password for long enough to prepare for the release.\nStage the release ./mvnw release:perform -Dmaven.test.skip -Pall  The release will be automatically inserted into a temporary staging repository.  Build and sign the source code package export RELEASE_VERSION=x.y.z (example: RELEASE_VERSION=5.0.0-alpha) cd tools/releasing bash create_source_release.sh This script takes care of the following things:\n Use v + RELEASE_VERSION as tag to clone the codes. Complete git submodule init/update. Exclude all unnecessary files in the target source tar, such as .git, .github, and .gitmodules. See the script for more details. Execute gpg and shasum 512.  apache-skywalking-apm-x.y.z-src.tgz and files ending with .asc and .sha512 may be found in the tools/releasing folder.\nLocate and download the distribution package in Apache Nexus Staging repositories  Use your Apache ID to log in to https://repository.apache.org/. Go to https://repository.apache.org/#stagingRepositories. Search skywalking and find your staging repository. Close the repository and wait for all checks to pass. In this step, your GPG KEYS will be checked. See the set PGP document, if you haven\u0026rsquo;t done it before. Go to {REPO_URL}/org/apache/skywalking/apache-skywalking-apm/x.y.z. Download .tar.gz and .zip and files ending with .asc and .sha1.  Upload to Apache svn  Use your Apache ID to log in to https://dist.apache.org/repos/dist/dev/skywalking/. Create a folder and name it by the release version and round, such as: x.y.z Upload the source code package to the folder with files ending with .asc and .sha512.  Package name: apache-skywalking-x.y.z-src.tar.gz See Section \u0026ldquo;Build and sign the source code package\u0026rdquo; for more details   Upload the distribution package to the folder with files ending with .asc and .sha512.  Package name: apache-skywalking-bin-x.y.z.tar.gz. See Section \u0026ldquo;Locate and download the distribution package in Apache Nexus Staging repositories\u0026rdquo; for more details. Create a .sha512 package: shasum -a 512 file \u0026gt; file.sha512    Make the internal announcements Send an announcement mail in dev mail list.\nMail title: [ANNOUNCE] SkyWalking x.y.z test build available Mail content: The test build of x.y.z is available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking/blob/master/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/xxxx * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-apm-x.x.x-src.tgz - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.tar.gz Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) x.y.z Release CommitID : * https://github.com/apache/skywalking/tree/(Git Commit ID) * Git submodule * skywalking-ui: https://github.com/apache/skywalking-booster-ui/tree/(Git Commit ID) * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) * oap-server/server-query-plugin/query-graphql-plugin/src/main/resources/query-protocol https://github.com/apache/skywalking-query-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking/blob/x.y.z/docs/en/guides/How-to-build.md A vote regarding the quality of this test build will be initiated within the next couple of days. Wait for at least 48 hours for test responses Any PMC member, committer or contributor can test the release features and provide feedback. Based on that, the PMC will decide whether to start the voting process.\nCall a vote in dev Call a vote in dev@skywalking.apache.org\nMail title: [VOTE] Release Apache SkyWalking version x.y.z Mail content: Hi All, This is a call for vote to release Apache SkyWalking version x.y.z. Release notes: * https://github.com/apache/skywalking/blob/master/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/xxxx * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-apm-x.x.x-src.tgz - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.tar.gz Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) x.y.z Release CommitID : * https://github.com/apache/skywalking/tree/(Git Commit ID) * Git submodule * skywalking-ui: https://github.com/apache/skywalking-booster-ui/tree/(Git Commit ID) * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) * oap-server/server-query-plugin/query-graphql-plugin/src/main/resources/query-protocol https://github.com/apache/skywalking-query-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking/blob/x.y.z/docs/en/guides/How-to-build.md Voting will start now (xxxx date) and will remain open for at least 72 hours, Request all PMC members to give their vote. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Vote Check All PMC members and committers should check these before casting +1 votes.\n Features test. All artifacts in staging repository are published with .asc, .md5, and *sha1 files. Source code and distribution package (apache-skywalking-x.y.z-src.tar.gz, apache-skywalking-bin-x.y.z.tar.gz, apache-skywalking-bin-x.y.z.zip) are found in https://dist.apache.org/repos/dist/dev/skywalking/x.y.z with .asc and .sha512. LICENSE and NOTICE are in the source code and distribution package. Check shasum -c apache-skywalking-apm-x.y.z-src.tgz.sha512. Check gpg --verify apache-skywalking-apm-x.y.z-src.tgz.asc apache-skywalking-apm-x.y.z-src.tgz Build a distribution package from the source code package (apache-skywalking-x.y.z-src.tar.gz) by following this doc. Check the Apache License Header. Run docker run --rm -v $(pwd):/github/workspace apache/skywalking-eyes header check. (No binaries in source codes)  The voting process is as follows:\n All PMC member votes are +1 binding, and all other votes are +1 but non-binding. If you obtain at least 3 (+1 binding) votes with more +1 than -1 votes within 72 hours, the release will be approved.  Publish the release  Move source codes tar and distribution packages to https://dist.apache.org/repos/dist/release/skywalking/.  \u0026gt; export SVN_EDITOR=vim \u0026gt; svn mv https://dist.apache.org/repos/dist/dev/skywalking/x.y.z https://dist.apache.org/repos/dist/release/skywalking .... enter your apache password .... Release in the nexus staging repo. Public download source and distribution tar/zip are located in http://www.apache.org/dyn/closer.cgi/skywalking/x.y.z/xxx. The Apache mirror path is the only release information that we publish. Public asc and sha512 are located in https://www.apache.org/dist/skywalking/x.y.z/xxx. Public KEYS point to https://www.apache.org/dist/skywalking/KEYS. Update the website download page. http://skywalking.apache.org/downloads/ . Add a new download source, distribution, sha512, asc, and document links. The links can be found following rules (3) to (6) above. Add a release event on the website homepage and event page. Announce the public release with changelog or key features. Send ANNOUNCE email to dev@skywalking.apache.org, announce@apache.org. The sender should use the Apache email account.  Mail title: [ANNOUNCE] Apache SkyWalking x.y.z released Mail content: Hi all, Apache SkyWalking Team is glad to announce the first release of Apache SkyWalking x.y.z. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. This release contains a number of new features, bug fixes and improvements compared to version a.b.c(last release). The notable changes since x.y.z include: (Highlight key changes) 1. ... 2. ... 3. ... Please refer to the change log for the complete list of changes: https://github.com/apache/skywalking/blob/master/changes/changes-x.y.z.md Apache SkyWalking website: http://skywalking.apache.org/ Downloads: http://skywalking.apache.org/downloads/ Twitter: https://twitter.com/ASFSkyWalking SkyWalking Resources: - GitHub: https://github.com/apache/skywalking - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Apache SkyWalking Team Publish the Docker images export SW_VERSION=x.y.z git clone --depth 1 --branch v$SW_VERSION https://github.com/apache/skywalking.git cd skywalking svn co https://dist.apache.org/repos/dist/release/skywalking/$SW_VERSION release # (1) export CONTEXT=release export HUB=apache export OAP_NAME=skywalking-oap-server export UI_NAME=skywalking-ui export TAG=$SW_VERSION export DIST=\u0026lt;the binary package name inside (1), e.g. apache-skywalking-apm-8.8.0.tar.gz\u0026gt; make docker.push Clean up the old releases Once the latest release has been published, you should clean up the old releases from the mirror system.\n Update the download links (source, dist, asc, and sha512) on the website to the archive repo (https://archive.apache.org/dist/skywalking). Remove previous releases from https://dist.apache.org/repos/dist/release/skywalking/.  ","excerpt":"Apache SkyWalking release guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking …","ref":"/docs/main/v9.1.0/en/guides/how-to-release/","title":"SkyWalking release guide"},{"body":"Apache SkyWalking release guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking in The Apache Way and start the voting process by reading this document.\nSet up your development environment Follow the steps in the Apache maven deployment environment document to set gpg tool and encrypt passwords.\nUse the following block as a template and place it in ~/.m2/settings.xml.\n\u0026lt;settings\u0026gt; ... \u0026lt;servers\u0026gt; \u0026lt;!-- To publish a snapshot of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.snapshots.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; \u0026lt;!-- To stage a release of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.releases.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; ... \u0026lt;/servers\u0026gt; \u0026lt;/settings\u0026gt; Add your GPG public key  Add your GPG public key into the SkyWalking GPG KEYS file. If you are a committer, use your Apache ID and password to log in this svn, and update the file. Don\u0026rsquo;t override the existing file. Upload your GPG public key to the public GPG site, such as MIT\u0026rsquo;s site. This site should be in the Apache maven staging repository checklist.  Test your settings This step is only for testing purpose. If your env is correctly set, you don\u0026rsquo;t need to check every time.\n./mvnw clean install -Pall (this will build artifacts, sources and sign) Prepare for the release ./mvnw release:clean ./mvnw release:prepare -DautoVersionSubmodules=true -Pall  Set version number as x.y.z, and tag as vx.y.z (The version tag must start with v. You will find out why this is necessary in the next step.)  You could do a GPG signature before preparing for the release. If you need to input the password to sign, and the maven doesn\u0026rsquo;t provide you with the opportunity to do so, this may lead to failure of the release. To resolve this, you may run gpg --sign xxx in any file. This will allow it to remember the password for long enough to prepare for the release.\nStage the release ./mvnw release:perform -Dmaven.test.skip -Pall  The release will be automatically inserted into a temporary staging repository.  Build and sign the source code package export RELEASE_VERSION=x.y.z (example: RELEASE_VERSION=5.0.0-alpha) cd tools/releasing bash create_source_release.sh This script takes care of the following things:\n Use v + RELEASE_VERSION as tag to clone the codes. Complete git submodule init/update. Exclude all unnecessary files in the target source tar, such as .git, .github, and .gitmodules. See the script for more details. Execute gpg and shasum 512.  apache-skywalking-apm-x.y.z-src.tgz and files ending with .asc and .sha512 may be found in the tools/releasing folder.\nLocate and download the distribution package in Apache Nexus Staging repositories  Use your Apache ID to log in to https://repository.apache.org/. Go to https://repository.apache.org/#stagingRepositories. Search skywalking and find your staging repository. Close the repository and wait for all checks to pass. In this step, your GPG KEYS will be checked. See the set PGP document, if you haven\u0026rsquo;t done it before. Go to {REPO_URL}/org/apache/skywalking/apache-skywalking-apm/x.y.z. Download .tar.gz and .zip and files ending with .asc and .sha1.  Upload to Apache svn  Use your Apache ID to log in to https://dist.apache.org/repos/dist/dev/skywalking/. Create a folder and name it by the release version and round, such as: x.y.z Upload the source code package to the folder with files ending with .asc and .sha512.  Package name: apache-skywalking-x.y.z-src.tar.gz See Section \u0026ldquo;Build and sign the source code package\u0026rdquo; for more details   Upload the distribution package to the folder with files ending with .asc and .sha512.  Package name: apache-skywalking-bin-x.y.z.tar.gz. See Section \u0026ldquo;Locate and download the distribution package in Apache Nexus Staging repositories\u0026rdquo; for more details. Create a .sha512 package: shasum -a 512 file \u0026gt; file.sha512    Make the internal announcements Send an announcement mail in dev mail list.\nMail title: [ANNOUNCE] SkyWalking x.y.z test build available Mail content: The test build of x.y.z is available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/xxxx * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-apm-x.x.x-src.tgz - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.tar.gz Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) vx.y.z Release CommitID : * https://github.com/apache/skywalking/tree/(Git Commit ID) * Git submodule * skywalking-ui: https://github.com/apache/skywalking-booster-ui/tree/(Git Commit ID) * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) * oap-server/server-query-plugin/query-graphql-plugin/src/main/resources/query-protocol https://github.com/apache/skywalking-query-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking/blob/vx.y.z/docs/en/guides/How-to-build.md A vote regarding the quality of this test build will be initiated within the next couple of days. Wait for at least 48 hours for test responses Any PMC member, committer or contributor can test the release features and provide feedback. Based on that, the PMC will decide whether to start the voting process.\nCall a vote in dev Call a vote in dev@skywalking.apache.org\nMail title: [VOTE] Release Apache SkyWalking version x.y.z Mail content: Hi All, This is a call for vote to release Apache SkyWalking version x.y.z. Release notes: * https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/xxxx * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-apm-x.x.x-src.tgz - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.tar.gz Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) vx.y.z Release CommitID : * https://github.com/apache/skywalking/tree/(Git Commit ID) * Git submodule * skywalking-ui: https://github.com/apache/skywalking-booster-ui/tree/(Git Commit ID) * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) * oap-server/server-query-plugin/query-graphql-plugin/src/main/resources/query-protocol https://github.com/apache/skywalking-query-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking/blob/vx.y.z/docs/en/guides/How-to-build.md Voting will start now (xxxx date) and will remain open for at least 72 hours, Request all PMC members to give their vote. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Vote Check All PMC members and committers should check these before casting +1 votes.\n Features test. All artifacts in staging repository are published with .asc, .md5, and *sha1 files. Source code and distribution package (apache-skywalking-x.y.z-src.tar.gz, apache-skywalking-bin-x.y.z.tar.gz, apache-skywalking-bin-x.y.z.zip) are found in https://dist.apache.org/repos/dist/dev/skywalking/x.y.z with .asc and .sha512. LICENSE and NOTICE are in the source code and distribution package. Check shasum -c apache-skywalking-apm-x.y.z-src.tgz.sha512. Check gpg --verify apache-skywalking-apm-x.y.z-src.tgz.asc apache-skywalking-apm-x.y.z-src.tgz Build a distribution package from the source code package (apache-skywalking-x.y.z-src.tar.gz) by following this doc. Check the Apache License Header. Run docker run --rm -v $(pwd):/github/workspace apache/skywalking-eyes header check. (No binaries in source codes)  The voting process is as follows:\n All PMC member votes are +1 binding, and all other votes are +1 but non-binding. If you obtain at least 3 (+1 binding) votes with more +1 than -1 votes within 72 hours, the release will be approved.  Publish the release  Move source codes tar and distribution packages to https://dist.apache.org/repos/dist/release/skywalking/.  \u0026gt; export SVN_EDITOR=vim \u0026gt; svn mv https://dist.apache.org/repos/dist/dev/skywalking/x.y.z https://dist.apache.org/repos/dist/release/skywalking .... enter your apache password .... Release in the nexus staging repo. Public download source and distribution tar/zip are located in http://www.apache.org/dyn/closer.cgi/skywalking/x.y.z/xxx. The Apache mirror path is the only release information that we publish. Public asc and sha512 are located in https://www.apache.org/dist/skywalking/x.y.z/xxx. Public KEYS point to https://www.apache.org/dist/skywalking/KEYS. Update the website download page. http://skywalking.apache.org/downloads/ . Add a new download source, distribution, sha512, asc, and document links. The links can be found following rules (3) to (6) above. Add a release event on the website homepage and event page. Announce the public release with changelog or key features. Send ANNOUNCE email to dev@skywalking.apache.org, announce@apache.org. The sender should use the Apache email account.  Mail title: [ANNOUNCE] Apache SkyWalking x.y.z released Mail content: Hi all, Apache SkyWalking Team is glad to announce the first release of Apache SkyWalking x.y.z. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. This release contains a number of new features, bug fixes and improvements compared to version a.b.c(last release). The notable changes since x.y.z include: (Highlight key changes) 1. ... 2. ... 3. ... Please refer to the change log for the complete list of changes: https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Apache SkyWalking website: http://skywalking.apache.org/ Downloads: http://skywalking.apache.org/downloads/ Twitter: https://twitter.com/ASFSkyWalking SkyWalking Resources: - GitHub: https://github.com/apache/skywalking - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Apache SkyWalking Team Publish the Docker images export SW_VERSION=x.y.z git clone --depth 1 --branch v$SW_VERSION https://github.com/apache/skywalking.git cd skywalking svn co https://dist.apache.org/repos/dist/release/skywalking/$SW_VERSION release # (1) export CONTEXT=release export HUB=apache export OAP_NAME=skywalking-oap-server export UI_NAME=skywalking-ui export TAG=$SW_VERSION export DIST=\u0026lt;the binary package name inside (1), e.g. apache-skywalking-apm-8.8.0.tar.gz\u0026gt; make docker.push Clean up the old releases Once the latest release has been published, you should clean up the old releases from the mirror system.\n Update the download links (source, dist, asc, and sha512) on the website to the archive repo (https://archive.apache.org/dist/skywalking). Remove previous releases from https://dist.apache.org/repos/dist/release/skywalking/.  ","excerpt":"Apache SkyWalking release guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking …","ref":"/docs/main/v9.2.0/en/guides/how-to-release/","title":"SkyWalking release guide"},{"body":"Apache SkyWalking release guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking in The Apache Way and start the voting process by reading this document.\nSet up your development environment Follow the steps in the Apache maven deployment environment document to set gpg tool and encrypt passwords.\nUse the following block as a template and place it in ~/.m2/settings.xml.\n\u0026lt;settings\u0026gt; ... \u0026lt;servers\u0026gt; \u0026lt;!-- To publish a snapshot of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.snapshots.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; \u0026lt;!-- To stage a release of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.releases.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; ... \u0026lt;/servers\u0026gt; \u0026lt;/settings\u0026gt; Add your GPG public key  Add your GPG public key into the SkyWalking GPG KEYS file. If you are a committer, use your Apache ID and password to log in this svn, and update the file. Don\u0026rsquo;t override the existing file. Upload your GPG public key to the public GPG site, such as MIT\u0026rsquo;s site. This site should be in the Apache maven staging repository checklist.  Test your settings This step is only for testing purpose. If your env is correctly set, you don\u0026rsquo;t need to check every time.\n./mvnw clean install -Pall (this will build artifacts, sources and sign) Prepare for the release ./mvnw release:clean ./mvnw release:prepare -DautoVersionSubmodules=true -Pall  Set version number as x.y.z, and tag as vx.y.z (The version tag must start with v. You will find out why this is necessary in the next step.)  You could do a GPG signature before preparing for the release. If you need to input the password to sign, and the maven doesn\u0026rsquo;t provide you with the opportunity to do so, this may lead to failure of the release. To resolve this, you may run gpg --sign xxx in any file. This will allow it to remember the password for long enough to prepare for the release.\nStage the release ./mvnw release:perform -Dmaven.test.skip -Pall  The release will be automatically inserted into a temporary staging repository.  Build and sign the source code package export RELEASE_VERSION=x.y.z (example: RELEASE_VERSION=5.0.0-alpha) cd tools/releasing bash create_source_release.sh This script takes care of the following things:\n Use v + RELEASE_VERSION as tag to clone the codes. Complete git submodule init/update. Exclude all unnecessary files in the target source tar, such as .git, .github, and .gitmodules. See the script for more details. Execute gpg and shasum 512.  apache-skywalking-apm-x.y.z-src.tgz and files ending with .asc and .sha512 may be found in the tools/releasing folder.\nLocate and download the distribution package in Apache Nexus Staging repositories  Use your Apache ID to log in to https://repository.apache.org/. Go to https://repository.apache.org/#stagingRepositories. Search skywalking and find your staging repository. Close the repository and wait for all checks to pass. In this step, your GPG KEYS will be checked. See the set PGP document, if you haven\u0026rsquo;t done it before. Go to {REPO_URL}/org/apache/skywalking/apache-skywalking-apm/x.y.z. Download .tar.gz and .zip and files ending with .asc and .sha1.  Upload to Apache svn  Use your Apache ID to log in to https://dist.apache.org/repos/dist/dev/skywalking/. Create a folder and name it by the release version and round, such as: x.y.z Upload the source code package to the folder with files ending with .asc and .sha512.  Package name: apache-skywalking-x.y.z-src.tar.gz See Section \u0026ldquo;Build and sign the source code package\u0026rdquo; for more details   Upload the distribution package to the folder with files ending with .asc and .sha512.  Package name: apache-skywalking-bin-x.y.z.tar.gz. See Section \u0026ldquo;Locate and download the distribution package in Apache Nexus Staging repositories\u0026rdquo; for more details. Create a .sha512 package: shasum -a 512 file \u0026gt; file.sha512    Make the internal announcements Send an announcement mail in dev mail list.\nMail title: [ANNOUNCE] SkyWalking x.y.z test build available Mail content: The test build of x.y.z is available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/xxxx * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-apm-x.x.x-src.tgz - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.tar.gz Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) vx.y.z Release CommitID : * https://github.com/apache/skywalking/tree/(Git Commit ID) * Git submodule * skywalking-ui: https://github.com/apache/skywalking-booster-ui/tree/(Git Commit ID) * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) * oap-server/server-query-plugin/query-graphql-plugin/src/main/resources/query-protocol https://github.com/apache/skywalking-query-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking/blob/vx.y.z/docs/en/guides/How-to-build.md A vote regarding the quality of this test build will be initiated within the next couple of days. Wait for at least 48 hours for test responses Any PMC member, committer or contributor can test the release features and provide feedback. Based on that, the PMC will decide whether to start the voting process.\nCall a vote in dev Call a vote in dev@skywalking.apache.org\nMail title: [VOTE] Release Apache SkyWalking version x.y.z Mail content: Hi All, This is a call for vote to release Apache SkyWalking version x.y.z. Release notes: * https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/xxxx * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-apm-x.x.x-src.tgz - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.tar.gz Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) vx.y.z Release CommitID : * https://github.com/apache/skywalking/tree/(Git Commit ID) * Git submodule * skywalking-ui: https://github.com/apache/skywalking-booster-ui/tree/(Git Commit ID) * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) * oap-server/server-query-plugin/query-graphql-plugin/src/main/resources/query-protocol https://github.com/apache/skywalking-query-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking/blob/vx.y.z/docs/en/guides/How-to-build.md Voting will start now (xxxx date) and will remain open for at least 72 hours, Request all PMC members to give their vote. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Vote Check All PMC members and committers should check these before casting +1 votes.\n Features test. All artifacts in staging repository are published with .asc, .md5, and *sha1 files. Source code and distribution package (apache-skywalking-x.y.z-src.tar.gz, apache-skywalking-bin-x.y.z.tar.gz, apache-skywalking-bin-x.y.z.zip) are found in https://dist.apache.org/repos/dist/dev/skywalking/x.y.z with .asc and .sha512. LICENSE and NOTICE are in the source code and distribution package. Check shasum -c apache-skywalking-apm-x.y.z-src.tgz.sha512. Check gpg --verify apache-skywalking-apm-x.y.z-src.tgz.asc apache-skywalking-apm-x.y.z-src.tgz Build a distribution package from the source code package (apache-skywalking-x.y.z-src.tar.gz) by following this doc. Check the Apache License Header. Run docker run --rm -v $(pwd):/github/workspace apache/skywalking-eyes header check. (No binaries in source codes)  The voting process is as follows:\n All PMC member votes are +1 binding, and all other votes are +1 but non-binding. If you obtain at least 3 (+1 binding) votes with more +1 than -1 votes within 72 hours, the release will be approved.  Publish the release  Move source codes tar and distribution packages to https://dist.apache.org/repos/dist/release/skywalking/.  \u0026gt; export SVN_EDITOR=vim \u0026gt; svn mv https://dist.apache.org/repos/dist/dev/skywalking/x.y.z https://dist.apache.org/repos/dist/release/skywalking .... enter your apache password .... Release in the nexus staging repo. Public download source and distribution tar/zip are located in http://www.apache.org/dyn/closer.cgi/skywalking/x.y.z/xxx. The Apache mirror path is the only release information that we publish. Public asc and sha512 are located in https://www.apache.org/dist/skywalking/x.y.z/xxx. Public KEYS point to https://www.apache.org/dist/skywalking/KEYS. Update the website download page. http://skywalking.apache.org/downloads/ . Add a new download source, distribution, sha512, asc, and document links. The links can be found following rules (3) to (6) above. Add a release event on the website homepage and event page. Announce the public release with changelog or key features. Send ANNOUNCE email to dev@skywalking.apache.org, announce@apache.org. The sender should use the Apache email account.  Mail title: [ANNOUNCE] Apache SkyWalking x.y.z released Mail content: Hi all, Apache SkyWalking Team is glad to announce the first release of Apache SkyWalking x.y.z. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. This release contains a number of new features, bug fixes and improvements compared to version a.b.c(last release). The notable changes since x.y.z include: (Highlight key changes) 1. ... 2. ... 3. ... Please refer to the change log for the complete list of changes: https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Apache SkyWalking website: http://skywalking.apache.org/ Downloads: http://skywalking.apache.org/downloads/ Twitter: https://twitter.com/ASFSkyWalking SkyWalking Resources: - GitHub: https://github.com/apache/skywalking - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Apache SkyWalking Team Publish the Docker images export SW_VERSION=x.y.z git clone --depth 1 --branch v$SW_VERSION https://github.com/apache/skywalking.git cd skywalking svn co https://dist.apache.org/repos/dist/release/skywalking/$SW_VERSION release # (1) export CONTEXT=release export HUB=apache export OAP_NAME=skywalking-oap-server export UI_NAME=skywalking-ui export TAG=$SW_VERSION export DIST=\u0026lt;the binary package name inside (1), e.g. apache-skywalking-apm-8.8.0.tar.gz\u0026gt; make docker.push Clean up the old releases Once the latest release has been published, you should clean up the old releases from the mirror system.\n Update the download links (source, dist, asc, and sha512) on the website to the archive repo (https://archive.apache.org/dist/skywalking). Remove previous releases from https://dist.apache.org/repos/dist/release/skywalking/.  ","excerpt":"Apache SkyWalking release guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking …","ref":"/docs/main/v9.3.0/en/guides/how-to-release/","title":"SkyWalking release guide"},{"body":"Apache SkyWalking release guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking in The Apache Way and start the voting process by reading this document.\nSet up your development environment Follow the steps in the Apache maven deployment environment document to set gpg tool and encrypt passwords.\nUse the following block as a template and place it in ~/.m2/settings.xml.\n\u0026lt;settings\u0026gt; ... \u0026lt;servers\u0026gt; \u0026lt;!-- To publish a snapshot of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.snapshots.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; \u0026lt;!-- To stage a release of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.releases.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; ... \u0026lt;/servers\u0026gt; \u0026lt;/settings\u0026gt; Add your GPG public key  Add your GPG public key into the SkyWalking GPG KEYS file. If you are a committer, use your Apache ID and password to log in this svn, and update the file. Don\u0026rsquo;t override the existing file. Upload your GPG public key to the public GPG site, such as MIT\u0026rsquo;s site. This site should be in the Apache maven staging repository checklist.  Test your settings This step is only for testing purpose. If your env is correctly set, you don\u0026rsquo;t need to check every time.\n./mvnw clean install -Pall (this will build artifacts, sources and sign) Prepare for the release ./mvnw release:clean ./mvnw release:prepare -DautoVersionSubmodules=true -Pall  Set version number as x.y.z, and tag as vx.y.z (The version tag must start with v. You will find out why this is necessary in the next step.)  You could do a GPG signature before preparing for the release. If you need to input the password to sign, and the maven doesn\u0026rsquo;t provide you with the opportunity to do so, this may lead to failure of the release. To resolve this, you may run gpg --sign xxx in any file. This will allow it to remember the password for long enough to prepare for the release.\nStage the release ./mvnw release:perform -Dmaven.test.skip -Pall  The release will be automatically inserted into a temporary staging repository.  Build and sign the source code package export RELEASE_VERSION=x.y.z (example: RELEASE_VERSION=5.0.0-alpha) cd tools/releasing bash create_source_release.sh This script takes care of the following things:\n Use v + RELEASE_VERSION as tag to clone the codes. Complete git submodule init/update. Exclude all unnecessary files in the target source tar, such as .git, .github, and .gitmodules. See the script for more details. Execute gpg and shasum 512.  apache-skywalking-apm-x.y.z-src.tgz and files ending with .asc and .sha512 may be found in the tools/releasing folder.\nLocate and download the distribution package in Apache Nexus Staging repositories  Use your Apache ID to log in to https://repository.apache.org/. Go to https://repository.apache.org/#stagingRepositories. Search skywalking and find your staging repository. Close the repository and wait for all checks to pass. In this step, your GPG KEYS will be checked. See the set PGP document, if you haven\u0026rsquo;t done it before. Go to {REPO_URL}/org/apache/skywalking/apache-skywalking-apm/x.y.z. Download .tar.gz and .zip and files ending with .asc and .sha1.  Upload to Apache svn  Use your Apache ID to log in to https://dist.apache.org/repos/dist/dev/skywalking/. Create a folder and name it by the release version and round, such as: x.y.z Upload the source code package to the folder with files ending with .asc and .sha512.  Package name: apache-skywalking-x.y.z-src.tar.gz See Section \u0026ldquo;Build and sign the source code package\u0026rdquo; for more details   Upload the distribution package to the folder with files ending with .asc and .sha512.  Package name: apache-skywalking-bin-x.y.z.tar.gz. See Section \u0026ldquo;Locate and download the distribution package in Apache Nexus Staging repositories\u0026rdquo; for more details. Create a .sha512 package: shasum -a 512 file \u0026gt; file.sha512    Make the internal announcements Send an announcement mail in dev mail list.\nMail title: [ANNOUNCE] SkyWalking x.y.z test build available Mail content: The test build of x.y.z is available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/xxxx * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-apm-x.x.x-src.tgz - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.tar.gz Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) vx.y.z Release CommitID : * https://github.com/apache/skywalking/tree/(Git Commit ID) * Git submodule * skywalking-ui: https://github.com/apache/skywalking-booster-ui/tree/(Git Commit ID) * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) * oap-server/server-query-plugin/query-graphql-plugin/src/main/resources/query-protocol https://github.com/apache/skywalking-query-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking/blob/vx.y.z/docs/en/guides/How-to-build.md A vote regarding the quality of this test build will be initiated within the next couple of days. Wait for at least 48 hours for test responses Any PMC member, committer or contributor can test the release features and provide feedback. Based on that, the PMC will decide whether to start the voting process.\nCall a vote in dev Call a vote in dev@skywalking.apache.org\nMail title: [VOTE] Release Apache SkyWalking version x.y.z Mail content: Hi All, This is a call for vote to release Apache SkyWalking version x.y.z. Release notes: * https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/xxxx * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-apm-x.x.x-src.tgz - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.tar.gz Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) vx.y.z Release CommitID : * https://github.com/apache/skywalking/tree/(Git Commit ID) * Git submodule * skywalking-ui: https://github.com/apache/skywalking-booster-ui/tree/(Git Commit ID) * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) * oap-server/server-query-plugin/query-graphql-plugin/src/main/resources/query-protocol https://github.com/apache/skywalking-query-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking/blob/vx.y.z/docs/en/guides/How-to-build.md Voting will start now (xxxx date) and will remain open for at least 72 hours, Request all PMC members to give their vote. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Vote Check All PMC members and committers should check these before casting +1 votes.\n Features test. All artifacts in staging repository are published with .asc, .md5, and *sha1 files. Source code and distribution package (apache-skywalking-x.y.z-src.tar.gz, apache-skywalking-bin-x.y.z.tar.gz, apache-skywalking-bin-x.y.z.zip) are found in https://dist.apache.org/repos/dist/dev/skywalking/x.y.z with .asc and .sha512. LICENSE and NOTICE are in the source code and distribution package. Check shasum -c apache-skywalking-apm-x.y.z-src.tgz.sha512. Check gpg --verify apache-skywalking-apm-x.y.z-src.tgz.asc apache-skywalking-apm-x.y.z-src.tgz Build a distribution package from the source code package (apache-skywalking-x.y.z-src.tar.gz) by following this doc. Check the Apache License Header. Run docker run --rm -v $(pwd):/github/workspace apache/skywalking-eyes header check. (No binaries in source codes)  The voting process is as follows:\n All PMC member votes are +1 binding, and all other votes are +1 but non-binding. If you obtain at least 3 (+1 binding) votes with more +1 than -1 votes within 72 hours, the release will be approved.  Publish the release  Move source codes tar and distribution packages to https://dist.apache.org/repos/dist/release/skywalking/.  \u0026gt; export SVN_EDITOR=vim \u0026gt; svn mv https://dist.apache.org/repos/dist/dev/skywalking/x.y.z https://dist.apache.org/repos/dist/release/skywalking .... enter your apache password .... Release in the nexus staging repo. Public download source and distribution tar/zip are located in http://www.apache.org/dyn/closer.cgi/skywalking/x.y.z/xxx. The Apache mirror path is the only release information that we publish. Public asc and sha512 are located in https://www.apache.org/dist/skywalking/x.y.z/xxx. Public KEYS point to https://www.apache.org/dist/skywalking/KEYS. Update the website download page. http://skywalking.apache.org/downloads/ . Add a new download source, distribution, sha512, asc, and document links. The links can be found following rules (3) to (6) above. Add a release event on the website homepage and event page. Announce the public release with changelog or key features. Send ANNOUNCE email to dev@skywalking.apache.org, announce@apache.org. The sender should use the Apache email account.  Mail title: [ANNOUNCE] Apache SkyWalking x.y.z released Mail content: Hi all, Apache SkyWalking Team is glad to announce the first release of Apache SkyWalking x.y.z. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. This release contains a number of new features, bug fixes and improvements compared to version a.b.c(last release). The notable changes since x.y.z include: (Highlight key changes) 1. ... 2. ... 3. ... Please refer to the change log for the complete list of changes: https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Apache SkyWalking website: http://skywalking.apache.org/ Downloads: http://skywalking.apache.org/downloads/ Twitter: https://twitter.com/ASFSkyWalking SkyWalking Resources: - GitHub: https://github.com/apache/skywalking - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Apache SkyWalking Team Publish the Docker images We have a GitHub workflow to automatically publish the Docker images to Docker Hub after you set the version from pre-release to release, all you need to do is to watch that workflow and see whether it succeeds, if it fails, you can use the following steps to publish the Docker images in your local machine.\nexport SW_VERSION=x.y.z git clone --depth 1 --branch v$SW_VERSION https://github.com/apache/skywalking.git cd skywalking svn co https://dist.apache.org/repos/dist/release/skywalking/$SW_VERSION release # (1) export CONTEXT=release export HUB=apache export OAP_NAME=skywalking-oap-server export UI_NAME=skywalking-ui export TAG=$SW_VERSION export DIST=\u0026lt;the binary package name inside (1), e.g. apache-skywalking-apm-8.8.0.tar.gz\u0026gt; make docker.push Clean up the old releases Once the latest release has been published, you should clean up the old releases from the mirror system.\n Update the download links (source, dist, asc, and sha512) on the website to the archive repo (https://archive.apache.org/dist/skywalking). Remove previous releases from https://dist.apache.org/repos/dist/release/skywalking/.  ","excerpt":"Apache SkyWalking release guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking …","ref":"/docs/main/v9.4.0/en/guides/how-to-release/","title":"SkyWalking release guide"},{"body":"Apache SkyWalking release guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking in The Apache Way and start the voting process by reading this document.\nSet up your development environment Follow the steps in the Apache maven deployment environment document to set gpg tool and encrypt passwords.\nUse the following block as a template and place it in ~/.m2/settings.xml.\n\u0026lt;settings\u0026gt; ... \u0026lt;servers\u0026gt; \u0026lt;!-- To publish a snapshot of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.snapshots.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; \u0026lt;!-- To stage a release of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.releases.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; ... \u0026lt;/servers\u0026gt; \u0026lt;/settings\u0026gt; Add your GPG public key  Add your GPG public key into the SkyWalking GPG KEYS file. If you are a committer, use your Apache ID and password to log in this svn, and update the file. Don\u0026rsquo;t override the existing file. Upload your GPG public key to the public GPG site, such as MIT\u0026rsquo;s site. This site should be in the Apache maven staging repository checklist.  Test your settings This step is only for testing purpose. If your env is correctly set, you don\u0026rsquo;t need to check every time.\n./mvnw clean install -Pall (this will build artifacts, sources and sign) Prepare for the release ./mvnw release:clean ./mvnw release:prepare -DautoVersionSubmodules=true -Darguments='-Dmaven.test.skip' -Pall  Set version number as x.y.z, and tag as vx.y.z (The version tag must start with v. You will find out why this is necessary in the next step.)  You could do a GPG signature before preparing for the release. If you need to input the password to sign, and the maven doesn\u0026rsquo;t provide you with the opportunity to do so, this may lead to failure of the release. To resolve this, you may run gpg --sign xxx in any file. This will allow it to remember the password for long enough to prepare for the release.\nStage the release ./mvnw release:perform -Darguments='-Dmaven.test.skip' -Pall  The release will be automatically inserted into a temporary staging repository.  Build and sign the source code package export RELEASE_VERSION=x.y.z (example: RELEASE_VERSION=5.0.0-alpha) cd tools/releasing bash create_source_release.sh This script takes care of the following things:\n Use v + RELEASE_VERSION as tag to clone the codes. Complete git submodule init/update. Exclude all unnecessary files in the target source tar, such as .git, .github, and .gitmodules. See the script for more details. Execute gpg and shasum 512.  apache-skywalking-apm-x.y.z-src.tgz and files ending with .asc and .sha512 may be found in the tools/releasing folder.\nLocate and download the distribution package in Apache Nexus Staging repositories  Use your Apache ID to log in to https://repository.apache.org/. Go to https://repository.apache.org/#stagingRepositories. Search skywalking and find your staging repository. Close the repository and wait for all checks to pass. In this step, your GPG KEYS will be checked. See the set PGP document, if you haven\u0026rsquo;t done it before. Go to {REPO_URL}/org/apache/skywalking/apache-skywalking-apm/x.y.z. Download .tar.gz and .zip and files ending with .asc and .sha1.  Upload to Apache svn  Use your Apache ID to log in to https://dist.apache.org/repos/dist/dev/skywalking/. Create a folder and name it by the release version and round, such as: x.y.z Upload the source code package to the folder with files ending with .asc and .sha512.  Package name: apache-skywalking-x.y.z-src.tar.gz See Section \u0026ldquo;Build and sign the source code package\u0026rdquo; for more details   Upload the distribution package to the folder with files ending with .asc and .sha512.  Package name: apache-skywalking-bin-x.y.z.tar.gz. See Section \u0026ldquo;Locate and download the distribution package in Apache Nexus Staging repositories\u0026rdquo; for more details. Create a .sha512 package: shasum -a 512 file \u0026gt; file.sha512    Make the internal announcements Send an announcement mail in dev mail list.\nMail title: [ANNOUNCE] SkyWalking x.y.z test build available Mail content: The test build of x.y.z is available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/xxxx * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-apm-x.x.x-src.tgz - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.tar.gz Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) vx.y.z Release CommitID : * https://github.com/apache/skywalking/tree/(Git Commit ID) * Git submodule * skywalking-ui: https://github.com/apache/skywalking-booster-ui/tree/(Git Commit ID) * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) * oap-server/server-query-plugin/query-graphql-plugin/src/main/resources/query-protocol https://github.com/apache/skywalking-query-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking/blob/vx.y.z/docs/en/guides/How-to-build.md A vote regarding the quality of this test build will be initiated within the next couple of days. Wait for at least 48 hours for test responses Any PMC member, committer or contributor can test the release features and provide feedback. Based on that, the PMC will decide whether to start the voting process.\nCall a vote in dev Call a vote in dev@skywalking.apache.org\nMail title: [VOTE] Release Apache SkyWalking version x.y.z Mail content: Hi All, This is a call for vote to release Apache SkyWalking version x.y.z. Release notes: * https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/xxxx * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-apm-x.x.x-src.tgz - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.tar.gz Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) vx.y.z Release CommitID : * https://github.com/apache/skywalking/tree/(Git Commit ID) * Git submodule * skywalking-ui: https://github.com/apache/skywalking-booster-ui/tree/(Git Commit ID) * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) * oap-server/server-query-plugin/query-graphql-plugin/src/main/resources/query-protocol https://github.com/apache/skywalking-query-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking/blob/vx.y.z/docs/en/guides/How-to-build.md Voting will start now (xxxx date) and will remain open for at least 72 hours, Request all PMC members to give their vote. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Vote Check All PMC members and committers should check these before casting +1 votes.\n Features test. All artifacts in staging repository are published with .asc, .md5, and *sha1 files. Source code and distribution package (apache-skywalking-x.y.z-src.tar.gz, apache-skywalking-bin-x.y.z.tar.gz, apache-skywalking-bin-x.y.z.zip) are found in https://dist.apache.org/repos/dist/dev/skywalking/x.y.z with .asc and .sha512. LICENSE and NOTICE are in the source code and distribution package. Check shasum -c apache-skywalking-apm-x.y.z-src.tgz.sha512. Check gpg --verify apache-skywalking-apm-x.y.z-src.tgz.asc apache-skywalking-apm-x.y.z-src.tgz Build a distribution package from the source code package (apache-skywalking-x.y.z-src.tar.gz) by following this doc. Check the Apache License Header. Run docker run --rm -v $(pwd):/github/workspace apache/skywalking-eyes header check. (No binaries in source codes)  The voting process is as follows:\n All PMC member votes are +1 binding, and all other votes are +1 but non-binding. If you obtain at least 3 (+1 binding) votes with more +1 than -1 votes within 72 hours, the release will be approved.  Publish the release  Move source codes tar and distribution packages to https://dist.apache.org/repos/dist/release/skywalking/.  \u0026gt; export SVN_EDITOR=vim \u0026gt; svn mv https://dist.apache.org/repos/dist/dev/skywalking/x.y.z https://dist.apache.org/repos/dist/release/skywalking .... enter your apache password .... Release in the nexus staging repo. Public download source and distribution tar/zip are located in http://www.apache.org/dyn/closer.cgi/skywalking/x.y.z/xxx. The Apache mirror path is the only release information that we publish. Public asc and sha512 are located in https://www.apache.org/dist/skywalking/x.y.z/xxx. Public KEYS point to https://www.apache.org/dist/skywalking/KEYS. Update the website download page. http://skywalking.apache.org/downloads/ . Add a new download source, distribution, sha512, asc, and document links. The links can be found following rules (3) to (6) above. Add a release event on the website homepage and event page. Announce the public release with changelog or key features. Send ANNOUNCE email to dev@skywalking.apache.org, announce@apache.org. The sender should use the Apache email account.  Mail title: [ANNOUNCE] Apache SkyWalking x.y.z released Mail content: Hi all, Apache SkyWalking Team is glad to announce the first release of Apache SkyWalking x.y.z. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. This release contains a number of new features, bug fixes and improvements compared to version a.b.c(last release). The notable changes since x.y.z include: (Highlight key changes) 1. ... 2. ... 3. ... Please refer to the change log for the complete list of changes: https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Apache SkyWalking website: http://skywalking.apache.org/ Downloads: http://skywalking.apache.org/downloads/ Twitter: https://twitter.com/ASFSkyWalking SkyWalking Resources: - GitHub: https://github.com/apache/skywalking - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Apache SkyWalking Team Publish the Docker images We have a GitHub workflow to automatically publish the Docker images to Docker Hub after you set the version from pre-release to release, all you need to do is to watch that workflow and see whether it succeeds, if it fails, you can use the following steps to publish the Docker images in your local machine.\nexport SW_VERSION=x.y.z git clone --depth 1 --branch v$SW_VERSION https://github.com/apache/skywalking.git cd skywalking svn co https://dist.apache.org/repos/dist/release/skywalking/$SW_VERSION release # (1) export CONTEXT=release export HUB=apache export OAP_NAME=skywalking-oap-server export UI_NAME=skywalking-ui export TAG=$SW_VERSION export DIST=\u0026lt;the binary package name inside (1), e.g. apache-skywalking-apm-8.8.0.tar.gz\u0026gt; make docker.push Clean up the old releases Once the latest release has been published, you should clean up the old releases from the mirror system.\n Update the download links (source, dist, asc, and sha512) on the website to the archive repo (https://archive.apache.org/dist/skywalking). Remove previous releases from https://dist.apache.org/repos/dist/release/skywalking/.  ","excerpt":"Apache SkyWalking release guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking …","ref":"/docs/main/v9.5.0/en/guides/how-to-release/","title":"SkyWalking release guide"},{"body":"Apache SkyWalking release guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking in The Apache Way and start the voting process by reading this document.\nSet up your development environment Follow the steps in the Apache maven deployment environment document to set gpg tool and encrypt passwords.\nUse the following block as a template and place it in ~/.m2/settings.xml.\n\u0026lt;settings\u0026gt; ... \u0026lt;servers\u0026gt; \u0026lt;!-- To publish a snapshot of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.snapshots.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; \u0026lt;!-- To stage a release of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.releases.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; ... \u0026lt;/servers\u0026gt; \u0026lt;/settings\u0026gt; Add your GPG public key  Add your GPG public key into the SkyWalking GPG KEYS file. If you are a committer, use your Apache ID and password to log in this svn, and update the file. Don\u0026rsquo;t override the existing file. Upload your GPG public key to the public GPG site, such as MIT\u0026rsquo;s site. This site should be in the Apache maven staging repository checklist.  Test your settings This step is only for testing purpose. If your env is correctly set, you don\u0026rsquo;t need to check every time.\n./mvnw clean install -Pall (this will build artifacts, sources and sign) Prepare for the release ./mvnw release:clean ./mvnw release:prepare -DautoVersionSubmodules=true -Darguments='-Dmaven.test.skip' -Pall  Set version number as x.y.z, and tag as vx.y.z (The version tag must start with v. You will find out why this is necessary in the next step.)  You could do a GPG signature before preparing for the release. If you need to input the password to sign, and the maven doesn\u0026rsquo;t provide you with the opportunity to do so, this may lead to failure of the release. To resolve this, you may run gpg --sign xxx in any file. This will allow it to remember the password for long enough to prepare for the release.\nStage the release ./mvnw release:perform -Darguments='-Dmaven.test.skip' -Pall  The release will be automatically inserted into a temporary staging repository.  Build and sign the source code package export RELEASE_VERSION=x.y.z (example: RELEASE_VERSION=5.0.0-alpha) cd tools/releasing bash create_source_release.sh This script takes care of the following things:\n Use v + RELEASE_VERSION as tag to clone the codes. Complete git submodule init/update. Exclude all unnecessary files in the target source tar, such as .git, .github, and .gitmodules. See the script for more details. Execute gpg and shasum 512.  apache-skywalking-apm-x.y.z-src.tgz and files ending with .asc and .sha512 may be found in the tools/releasing folder.\nLocate and download the distribution package in Apache Nexus Staging repositories  Use your Apache ID to log in to https://repository.apache.org/. Go to https://repository.apache.org/#stagingRepositories. Search skywalking and find your staging repository. Close the repository and wait for all checks to pass. In this step, your GPG KEYS will be checked. See the set PGP document, if you haven\u0026rsquo;t done it before. Go to {REPO_URL}/org/apache/skywalking/apache-skywalking-apm/x.y.z. Download .tar.gz and .zip and files ending with .asc and .sha1.  Upload to Apache svn  Use your Apache ID to log in to https://dist.apache.org/repos/dist/dev/skywalking/. Create a folder and name it by the release version and round, such as: x.y.z Upload the source code package to the folder with files ending with .asc and .sha512.  Package name: apache-skywalking-x.y.z-src.tar.gz See Section \u0026ldquo;Build and sign the source code package\u0026rdquo; for more details   Upload the distribution package to the folder with files ending with .asc and .sha512.  Package name: apache-skywalking-bin-x.y.z.tar.gz. See Section \u0026ldquo;Locate and download the distribution package in Apache Nexus Staging repositories\u0026rdquo; for more details. Create a .sha512 package: shasum -a 512 file \u0026gt; file.sha512    Make the internal announcements Send an announcement mail in dev mail list.\nMail title: [ANNOUNCE] SkyWalking x.y.z test build available Mail content: The test build of x.y.z is available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/xxxx * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-apm-x.x.x-src.tgz - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.tar.gz Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) vx.y.z Release CommitID : * https://github.com/apache/skywalking/tree/(Git Commit ID) * Git submodule * skywalking-ui: https://github.com/apache/skywalking-booster-ui/tree/(Git Commit ID) * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) * oap-server/server-query-plugin/query-graphql-plugin/src/main/resources/query-protocol https://github.com/apache/skywalking-query-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking/blob/vx.y.z/docs/en/guides/How-to-build.md A vote regarding the quality of this test build will be initiated within the next couple of days. Wait for at least 48 hours for test responses Any PMC member, committer or contributor can test the release features and provide feedback. Based on that, the PMC will decide whether to start the voting process.\nCall a vote in dev Call a vote in dev@skywalking.apache.org\nMail title: [VOTE] Release Apache SkyWalking version x.y.z Mail content: Hi All, This is a call for vote to release Apache SkyWalking version x.y.z. Release notes: * https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/xxxx * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-apm-x.x.x-src.tgz - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.tar.gz Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) vx.y.z Release CommitID : * https://github.com/apache/skywalking/tree/(Git Commit ID) * Git submodule * skywalking-ui: https://github.com/apache/skywalking-booster-ui/tree/(Git Commit ID) * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) * oap-server/server-query-plugin/query-graphql-plugin/src/main/resources/query-protocol https://github.com/apache/skywalking-query-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking/blob/vx.y.z/docs/en/guides/How-to-build.md Voting will start now (xxxx date) and will remain open for at least 72 hours, Request all PMC members to give their vote. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Vote Check All PMC members and committers should check these before casting +1 votes.\n Features test. All artifacts in staging repository are published with .asc, .md5, and *sha1 files. Source code and distribution package (apache-skywalking-x.y.z-src.tar.gz, apache-skywalking-bin-x.y.z.tar.gz, apache-skywalking-bin-x.y.z.zip) are found in https://dist.apache.org/repos/dist/dev/skywalking/x.y.z with .asc and .sha512. LICENSE and NOTICE are in the source code and distribution package. Check shasum -c apache-skywalking-apm-x.y.z-src.tgz.sha512. Check gpg --verify apache-skywalking-apm-x.y.z-src.tgz.asc apache-skywalking-apm-x.y.z-src.tgz Build a distribution package from the source code package (apache-skywalking-x.y.z-src.tar.gz) by following this doc. Check the Apache License Header. Run docker run --rm -v $(pwd):/github/workspace apache/skywalking-eyes header check. (No binaries in source codes)  The voting process is as follows:\n All PMC member votes are +1 binding, and all other votes are +1 but non-binding. If you obtain at least 3 (+1 binding) votes with more +1 than -1 votes within 72 hours, the release will be approved.  Publish the release  Move source codes tar and distribution packages to https://dist.apache.org/repos/dist/release/skywalking/.  \u0026gt; export SVN_EDITOR=vim \u0026gt; svn mv https://dist.apache.org/repos/dist/dev/skywalking/x.y.z https://dist.apache.org/repos/dist/release/skywalking .... enter your apache password .... Release in the nexus staging repo. Public download source and distribution tar/zip are located in http://www.apache.org/dyn/closer.cgi/skywalking/x.y.z/xxx. The Apache mirror path is the only release information that we publish. Public asc and sha512 are located in https://www.apache.org/dist/skywalking/x.y.z/xxx. Public KEYS point to https://www.apache.org/dist/skywalking/KEYS. Update the website download page. http://skywalking.apache.org/downloads/ . Add a new download source, distribution, sha512, asc, and document links. The links can be found following rules (3) to (6) above. Add a release event on the website homepage and event page. Announce the public release with changelog or key features. Send ANNOUNCE email to dev@skywalking.apache.org, announce@apache.org. The sender should use the Apache email account.  Mail title: [ANNOUNCE] Apache SkyWalking x.y.z released Mail content: Hi all, Apache SkyWalking Team is glad to announce the first release of Apache SkyWalking x.y.z. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. This release contains a number of new features, bug fixes and improvements compared to version a.b.c(last release). The notable changes since x.y.z include: (Highlight key changes) 1. ... 2. ... 3. ... Please refer to the change log for the complete list of changes: https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Apache SkyWalking website: http://skywalking.apache.org/ Downloads: http://skywalking.apache.org/downloads/ Twitter: https://twitter.com/ASFSkyWalking SkyWalking Resources: - GitHub: https://github.com/apache/skywalking - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Apache SkyWalking Team Publish the Docker images We have a GitHub workflow to automatically publish the Docker images to Docker Hub after you set the version from pre-release to release, all you need to do is to watch that workflow and see whether it succeeds, if it fails, you can use the following steps to publish the Docker images in your local machine.\nexport SW_VERSION=x.y.z git clone --depth 1 --branch v$SW_VERSION https://github.com/apache/skywalking.git cd skywalking svn co https://dist.apache.org/repos/dist/release/skywalking/$SW_VERSION release # (1) export CONTEXT=release export HUB=apache export OAP_NAME=skywalking-oap-server export UI_NAME=skywalking-ui export TAG=$SW_VERSION export DIST=\u0026lt;the binary package name inside (1), e.g. apache-skywalking-apm-8.8.0.tar.gz\u0026gt; make docker.push Clean up the old releases Once the latest release has been published, you should clean up the old releases from the mirror system.\n Update the download links (source, dist, asc, and sha512) on the website to the archive repo (https://archive.apache.org/dist/skywalking). Remove previous releases from https://dist.apache.org/repos/dist/release/skywalking/.  ","excerpt":"Apache SkyWalking release guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking …","ref":"/docs/main/v9.6.0/en/guides/how-to-release/","title":"SkyWalking release guide"},{"body":"Apache SkyWalking release guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking in The Apache Way and start the voting process by reading this document.\nSet up your development environment Follow the steps in the Apache maven deployment environment document to set gpg tool and encrypt passwords.\nUse the following block as a template and place it in ~/.m2/settings.xml.\n\u0026lt;settings\u0026gt; ... \u0026lt;servers\u0026gt; \u0026lt;!-- To publish a snapshot of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.snapshots.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; \u0026lt;!-- To stage a release of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.releases.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; ... \u0026lt;/servers\u0026gt; \u0026lt;/settings\u0026gt; Add your GPG public key  Add your GPG public key into the SkyWalking GPG KEYS file. If you are a committer, use your Apache ID and password to log in this svn, and update the file. Don\u0026rsquo;t override the existing file. Upload your GPG public key to the public GPG site, such as MIT\u0026rsquo;s site. This site should be in the Apache maven staging repository checklist.  Test your settings This step is only for testing purpose. If your env is correctly set, you don\u0026rsquo;t need to check every time.\n./mvnw clean install -Pall (this will build artifacts, sources and sign) Prepare for the release ./mvnw release:clean ./mvnw release:prepare -DautoVersionSubmodules=true -Darguments='-Dmaven.test.skip' -Pall  Set version number as x.y.z, and tag as vx.y.z (The version tag must start with v. You will find out why this is necessary in the next step.)  You could do a GPG signature before preparing for the release. If you need to input the password to sign, and the maven doesn\u0026rsquo;t provide you with the opportunity to do so, this may lead to failure of the release. To resolve this, you may run gpg --sign xxx in any file. This will allow it to remember the password for long enough to prepare for the release.\nStage the release ./mvnw release:perform -Darguments='-Dmaven.test.skip' -Pall  The release will be automatically inserted into a temporary staging repository.  Build and sign the source code package export RELEASE_VERSION=x.y.z (example: RELEASE_VERSION=5.0.0-alpha) cd tools/releasing bash create_source_release.sh This script takes care of the following things:\n Use v + RELEASE_VERSION as tag to clone the codes. Complete git submodule init/update. Exclude all unnecessary files in the target source tar, such as .git, .github, and .gitmodules. See the script for more details. Execute gpg and shasum 512.  apache-skywalking-apm-x.y.z-src.tgz and files ending with .asc and .sha512 may be found in the tools/releasing folder.\nLocate and download the distribution package in Apache Nexus Staging repositories  Use your Apache ID to log in to https://repository.apache.org/. Go to https://repository.apache.org/#stagingRepositories. Search skywalking and find your staging repository. Close the repository and wait for all checks to pass. In this step, your GPG KEYS will be checked. See the set PGP document, if you haven\u0026rsquo;t done it before. Go to {REPO_URL}/org/apache/skywalking/apache-skywalking-apm/x.y.z. Download .tar.gz and .zip and files ending with .asc and .sha1.  Upload to Apache svn  Use your Apache ID to log in to https://dist.apache.org/repos/dist/dev/skywalking/. Create a folder and name it by the release version and round, such as: x.y.z Upload the source code package to the folder with files ending with .asc and .sha512.  Package name: apache-skywalking-x.y.z-src.tar.gz See Section \u0026ldquo;Build and sign the source code package\u0026rdquo; for more details   Upload the distribution package to the folder with files ending with .asc and .sha512.  Package name: apache-skywalking-bin-x.y.z.tar.gz. See Section \u0026ldquo;Locate and download the distribution package in Apache Nexus Staging repositories\u0026rdquo; for more details. Create a .sha512 package: shasum -a 512 file \u0026gt; file.sha512    Make the internal announcements Send an announcement mail in dev mail list.\nMail title: [ANNOUNCE] SkyWalking x.y.z test build available Mail content: The test build of x.y.z is available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/xxxx * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-apm-x.x.x-src.tgz - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.tar.gz Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) vx.y.z Release CommitID : * https://github.com/apache/skywalking/tree/(Git Commit ID) * Git submodule * skywalking-ui: https://github.com/apache/skywalking-booster-ui/tree/(Git Commit ID) * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) * oap-server/server-query-plugin/query-graphql-plugin/src/main/resources/query-protocol https://github.com/apache/skywalking-query-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking/blob/vx.y.z/docs/en/guides/How-to-build.md A vote regarding the quality of this test build will be initiated within the next couple of days. Wait for at least 48 hours for test responses Any PMC member, committer or contributor can test the release features and provide feedback. Based on that, the PMC will decide whether to start the voting process.\nCall a vote in dev Call a vote in dev@skywalking.apache.org\nMail title: [VOTE] Release Apache SkyWalking version x.y.z Mail content: Hi All, This is a call for vote to release Apache SkyWalking version x.y.z. Release notes: * https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/xxxx * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-apm-x.x.x-src.tgz - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.tar.gz Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) vx.y.z Release CommitID : * https://github.com/apache/skywalking/tree/(Git Commit ID) * Git submodule * skywalking-ui: https://github.com/apache/skywalking-booster-ui/tree/(Git Commit ID) * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) * oap-server/server-query-plugin/query-graphql-plugin/src/main/resources/query-protocol https://github.com/apache/skywalking-query-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking/blob/vx.y.z/docs/en/guides/How-to-build.md Voting will start now (xxxx date) and will remain open for at least 72 hours, Request all PMC members to give their vote. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Vote Check All PMC members and committers should check these before casting +1 votes.\n Features test. All artifacts in staging repository are published with .asc, .md5, and *sha1 files. Source code and distribution package (apache-skywalking-x.y.z-src.tar.gz, apache-skywalking-bin-x.y.z.tar.gz, apache-skywalking-bin-x.y.z.zip) are found in https://dist.apache.org/repos/dist/dev/skywalking/x.y.z with .asc and .sha512. LICENSE and NOTICE are in the source code and distribution package. Check shasum -c apache-skywalking-apm-x.y.z-src.tgz.sha512. Check gpg --verify apache-skywalking-apm-x.y.z-src.tgz.asc apache-skywalking-apm-x.y.z-src.tgz Build a distribution package from the source code package (apache-skywalking-x.y.z-src.tar.gz) by following this doc. Check the Apache License Header. Run docker run --rm -v $(pwd):/github/workspace apache/skywalking-eyes header check. (No binaries in source codes)  The voting process is as follows:\n All PMC member votes are +1 binding, and all other votes are +1 but non-binding. If you obtain at least 3 (+1 binding) votes with more +1 than -1 votes within 72 hours, the release will be approved.  Publish the release  Move source codes tar and distribution packages to https://dist.apache.org/repos/dist/release/skywalking/.  \u0026gt; export SVN_EDITOR=vim \u0026gt; svn mv https://dist.apache.org/repos/dist/dev/skywalking/x.y.z https://dist.apache.org/repos/dist/release/skywalking .... enter your apache password .... Release in the nexus staging repo. Public download source and distribution tar/zip are located in http://www.apache.org/dyn/closer.cgi/skywalking/x.y.z/xxx. The Apache mirror path is the only release information that we publish. Public asc and sha512 are located in https://www.apache.org/dist/skywalking/x.y.z/xxx. Public KEYS point to https://www.apache.org/dist/skywalking/KEYS. Update the website download page. http://skywalking.apache.org/downloads/ . Add a new download source, distribution, sha512, asc, and document links. The links can be found following rules (3) to (6) above. Add a release event on the website homepage and event page. Announce the public release with changelog or key features. Send ANNOUNCE email to dev@skywalking.apache.org, announce@apache.org. The sender should use the Apache email account.  Mail title: [ANNOUNCE] Apache SkyWalking x.y.z released Mail content: Hi all, Apache SkyWalking Team is glad to announce the first release of Apache SkyWalking x.y.z. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. This release contains a number of new features, bug fixes and improvements compared to version a.b.c(last release). The notable changes since x.y.z include: (Highlight key changes) 1. ... 2. ... 3. ... Please refer to the change log for the complete list of changes: https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Apache SkyWalking website: http://skywalking.apache.org/ Downloads: http://skywalking.apache.org/downloads/ Twitter: https://twitter.com/ASFSkyWalking SkyWalking Resources: - GitHub: https://github.com/apache/skywalking - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Apache SkyWalking Team Publish the Docker images We have a GitHub workflow to automatically publish the Docker images to Docker Hub after you set the version from pre-release to release, all you need to do is to watch that workflow and see whether it succeeds, if it fails, you can use the following steps to publish the Docker images in your local machine.\nexport SW_VERSION=x.y.z git clone --depth 1 --branch v$SW_VERSION https://github.com/apache/skywalking.git cd skywalking svn co https://dist.apache.org/repos/dist/release/skywalking/$SW_VERSION release # (1) export CONTEXT=release export HUB=apache export OAP_NAME=skywalking-oap-server export UI_NAME=skywalking-ui export TAG=$SW_VERSION export DIST=\u0026lt;the binary package name inside (1), e.g. apache-skywalking-apm-8.8.0.tar.gz\u0026gt; make docker.push Clean up the old releases Once the latest release has been published, you should clean up the old releases from the mirror system.\n Update the download links (source, dist, asc, and sha512) on the website to the archive repo (https://archive.apache.org/dist/skywalking). Remove previous releases from https://dist.apache.org/repos/dist/release/skywalking/.  ","excerpt":"Apache SkyWalking release guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking …","ref":"/docs/main/v9.7.0/en/guides/how-to-release/","title":"SkyWalking release guide"},{"body":"Skywalking with Kotlin coroutine This Plugin provides an auto instrument support plugin for Kotlin coroutine based on context snapshot.\nDescription SkyWalking provide tracing context propagation inside thread. In order to support Kotlin Coroutine, we provide this additional plugin.\nImplementation principle As we know, Kotlin coroutine switches the execution thread by CoroutineDispatcher.\n Create a snapshot of the current context before dispatch the continuation. Then create a coroutine span after thread switched, mark the span continued with the snapshot. Every new span which created in the new thread will be a child of this coroutine span. So we can link those span together in a tracing. After the original runnable executed, we need to stop the coroutine span for cleaning thread state.  Some screenshots Run without the plugin We run a Kotlin coroutine based gRPC server without this coroutine plugin.\nYou can find, the one call (client -\u0026gt; server1 -\u0026gt; server2) has been split two tracing paths.\n Server1 without exit span and server2 tracing path.  Server2 tracing path.   Run with the plugin Without changing codes manually, just install the plugin. We can find the spans be connected together. We can get all info of one client call.\n","excerpt":"Skywalking with Kotlin coroutine This Plugin provides an auto instrument support plugin for Kotlin …","ref":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/agent-optional-plugins/kotlin-coroutine-plugin/","title":"Skywalking with Kotlin coroutine"},{"body":"Skywalking with Kotlin coroutine This Plugin provides an auto instrument support plugin for Kotlin coroutine based on context snapshot.\nDescription SkyWalking provide tracing context propagation inside thread. In order to support Kotlin Coroutine, we provide this additional plugin.\nImplementation principle As we know, Kotlin coroutine switches the execution thread by CoroutineDispatcher.\n Create a snapshot of the current context before dispatch the continuation. Then create a coroutine span after thread switched, mark the span continued with the snapshot. Every new span which created in the new thread will be a child of this coroutine span. So we can link those span together in a tracing. After the original runnable executed, we need to stop the coroutine span for cleaning thread state.  Some screenshots Run without the plugin We run a Kotlin coroutine based gRPC server without this coroutine plugin.\nYou can find, the one call (client -\u0026gt; server1 -\u0026gt; server2) has been split two tracing paths.\n Server1 without exit span and server2 tracing path.  Server2 tracing path.   Run with the plugin Without changing codes manually, just install the plugin. We can find the spans be connected together. We can get all info of one client call.\n","excerpt":"Skywalking with Kotlin coroutine This Plugin provides an auto instrument support plugin for Kotlin …","ref":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/agent-optional-plugins/kotlin-coroutine-plugin/","title":"Skywalking with Kotlin coroutine"},{"body":"Skywalking with Kotlin coroutine This Plugin provides an auto instrument support plugin for Kotlin coroutine based on context snapshot.\nDescription SkyWalking provide tracing context propagation inside thread. In order to support Kotlin Coroutine, we provide this additional plugin.\nImplementation principle As we know, Kotlin coroutine switches the execution thread by CoroutineDispatcher.\n Create a snapshot of the current context before dispatch the continuation. Then create a coroutine span after thread switched, mark the span continued with the snapshot. Every new span which created in the new thread will be a child of this coroutine span. So we can link those span together in a tracing. After the original runnable executed, we need to stop the coroutine span for cleaning thread state.  Some screenshots Run without the plugin We run a Kotlin coroutine based gRPC server without this coroutine plugin.\nYou can find, the one call (client -\u0026gt; server1 -\u0026gt; server2) has been split two tracing paths.\n Server1 without exit span and server2 tracing path.  Server2 tracing path.   Run with the plugin Without changing codes manually, just install the plugin. We can find the spans be connected together. We can get all info of one client call.\n","excerpt":"Skywalking with Kotlin coroutine This Plugin provides an auto instrument support plugin for Kotlin …","ref":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/agent-optional-plugins/kotlin-coroutine-plugin/","title":"Skywalking with Kotlin coroutine"},{"body":"Skywalking with Kotlin coroutine This Plugin provides an auto instrument support plugin for Kotlin coroutine based on context snapshot.\nDescription SkyWalking provide tracing context propagation inside thread. In order to support Kotlin Coroutine, we provide this additional plugin.\nImplementation principle As we know, Kotlin coroutine switches the execution thread by CoroutineDispatcher.\n Create a snapshot of the current context before dispatch the continuation. Then create a coroutine span after thread switched, mark the span continued with the snapshot. Every new span which created in the new thread will be a child of this coroutine span. So we can link those span together in a tracing. After the original runnable executed, we need to stop the coroutine span for cleaning thread state.  Some screenshots Run without the plugin We run a Kotlin coroutine based gRPC server without this coroutine plugin.\nYou can find, the one call (client -\u0026gt; server1 -\u0026gt; server2) has been split two tracing paths.\n Server1 without exit span and server2 tracing path.  Server2 tracing path.   Run with the plugin Without changing codes manually, just install the plugin. We can find the spans be connected together. We can get all info of one client call.\n","excerpt":"Skywalking with Kotlin coroutine This Plugin provides an auto instrument support plugin for Kotlin …","ref":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/agent-optional-plugins/kotlin-coroutine-plugin/","title":"Skywalking with Kotlin coroutine"},{"body":"Skywalking with Kotlin coroutine This Plugin provides an auto instrument support plugin for Kotlin coroutine based on context snapshot.\nDescription SkyWalking provide tracing context propagation inside thread. In order to support Kotlin Coroutine, we provide this additional plugin.\nImplementation principle As we know, Kotlin coroutine switches the execution thread by CoroutineDispatcher.\n Create a snapshot of the current context before dispatch the continuation. Then create a coroutine span after thread switched, mark the span continued with the snapshot. Every new span which created in the new thread will be a child of this coroutine span. So we can link those span together in a tracing. After the original runnable executed, we need to stop the coroutine span for cleaning thread state.  Some screenshots Run without the plugin We run a Kotlin coroutine based gRPC server without this coroutine plugin.\nYou can find, the one call (client -\u0026gt; server1 -\u0026gt; server2) has been split two tracing paths.\n Server1 without exit span and server2 tracing path.  Server2 tracing path.   Run with the plugin Without changing codes manually, just install the plugin. We can find the spans be connected together. We can get all info of one client call.\n","excerpt":"Skywalking with Kotlin coroutine This Plugin provides an auto instrument support plugin for Kotlin …","ref":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/agent-optional-plugins/kotlin-coroutine-plugin/","title":"Skywalking with Kotlin coroutine"},{"body":"Slow Cache Command Slow Cache command are sensitive for you to identify bottlenecks of a system which relies on cache system.\nSlow Cache command are based on sampling. Right now, the core samples are the top 50 slowest every 10 minutes. Note that the duration of these command must be slower than the threshold.\nHere\u0026rsquo;s the format of the settings (in milliseconds):\n cache-type:thresholdValue,cache-type2:thresholdValue2\n The default settings are default:20,redis:10. Reserved Cache type is default, which is the default threshold for all cache types, unless set explicitly.\nNote:\n The threshold should not be set too small, like 1ms. Although it works in theory, OAP performance issues may arise if your system statement access time is usually more than 1ms. The OAP server would run statistic per service and only persistent the top 50 every 10(controlled by topNReportPeriod: ${SW_CORE_TOPN_REPORT_PERIOD:10}) minutes by default.  ","excerpt":"Slow Cache Command Slow Cache command are sensitive for you to identify bottlenecks of a system …","ref":"/docs/main/latest/en/setup/backend/slow-cache-command/","title":"Slow Cache Command"},{"body":"Slow Cache Command Slow Cache command are sensitive for you to identify bottlenecks of a system which relies on cache system.\nSlow Cache command are based on sampling. Right now, the core samples are the top 50 slowest every 10 minutes. Note that the duration of these command must be slower than the threshold.\nHere\u0026rsquo;s the format of the settings (in milliseconds):\n cache-type:thresholdValue,cache-type2:thresholdValue2\n The default settings are default:20,redis:10. Reserved Cache type is default, which is the default threshold for all cache types, unless set explicitly.\nNote:\n The threshold should not be set too small, like 1ms. Although it works in theory, OAP performance issues may arise if your system statement access time is usually more than 1ms. The OAP server would run statistic per service and only persistent the top 50 every 10(controlled by topNReportPeriod: ${SW_CORE_TOPN_REPORT_PERIOD:10}) minutes by default.  ","excerpt":"Slow Cache Command Slow Cache command are sensitive for you to identify bottlenecks of a system …","ref":"/docs/main/next/en/setup/backend/slow-cache-command/","title":"Slow Cache Command"},{"body":"Slow Cache Command Slow Cache command are sensitive for you to identify bottlenecks of a system which relies on cache system.\nSlow Cache command are based on sampling. Right now, the core samples are the top 50 slowest every 10 minutes. Note that the duration of these command must be slower than the threshold.\nHere\u0026rsquo;s the format of the settings (in milliseconds):\n cache-type:thresholdValue,cache-type2:thresholdValue2\n The default settings are default:20,redis:10. Reserved Cache type is default, which is the default threshold for all cache types, unless set explicitly.\nNote:\n The threshold should not be set too small, like 1ms. Although it works in theory, OAP performance issues may arise if your system statement access time is usually more than 1ms. The OAP server would run statistic per service and only persistent the top 50 every 10(controlled by topNReportPeriod: ${SW_CORE_TOPN_REPORT_PERIOD:10}) minutes by default.  ","excerpt":"Slow Cache Command Slow Cache command are sensitive for you to identify bottlenecks of a system …","ref":"/docs/main/v9.3.0/en/setup/backend/slow-cache-command/","title":"Slow Cache Command"},{"body":"Slow Cache Command Slow Cache command are sensitive for you to identify bottlenecks of a system which relies on cache system.\nSlow Cache command are based on sampling. Right now, the core samples are the top 50 slowest every 10 minutes. Note that the duration of these command must be slower than the threshold.\nHere\u0026rsquo;s the format of the settings (in milliseconds):\n cache-type:thresholdValue,cache-type2:thresholdValue2\n The default settings are default:20,redis:10. Reserved Cache type is default, which is the default threshold for all cache types, unless set explicitly.\nNote:\n The threshold should not be set too small, like 1ms. Although it works in theory, OAP performance issues may arise if your system statement access time is usually more than 1ms. The OAP server would run statistic per service and only persistent the top 50 every 10(controlled by topNReportPeriod: ${SW_CORE_TOPN_REPORT_PERIOD:10}) minutes by default.  ","excerpt":"Slow Cache Command Slow Cache command are sensitive for you to identify bottlenecks of a system …","ref":"/docs/main/v9.4.0/en/setup/backend/slow-cache-command/","title":"Slow Cache Command"},{"body":"Slow Cache Command Slow Cache command are sensitive for you to identify bottlenecks of a system which relies on cache system.\nSlow Cache command are based on sampling. Right now, the core samples are the top 50 slowest every 10 minutes. Note that the duration of these command must be slower than the threshold.\nHere\u0026rsquo;s the format of the settings (in milliseconds):\n cache-type:thresholdValue,cache-type2:thresholdValue2\n The default settings are default:20,redis:10. Reserved Cache type is default, which is the default threshold for all cache types, unless set explicitly.\nNote:\n The threshold should not be set too small, like 1ms. Although it works in theory, OAP performance issues may arise if your system statement access time is usually more than 1ms. The OAP server would run statistic per service and only persistent the top 50 every 10(controlled by topNReportPeriod: ${SW_CORE_TOPN_REPORT_PERIOD:10}) minutes by default.  ","excerpt":"Slow Cache Command Slow Cache command are sensitive for you to identify bottlenecks of a system …","ref":"/docs/main/v9.5.0/en/setup/backend/slow-cache-command/","title":"Slow Cache Command"},{"body":"Slow Cache Command Slow Cache command are sensitive for you to identify bottlenecks of a system which relies on cache system.\nSlow Cache command are based on sampling. Right now, the core samples are the top 50 slowest every 10 minutes. Note that the duration of these command must be slower than the threshold.\nHere\u0026rsquo;s the format of the settings (in milliseconds):\n cache-type:thresholdValue,cache-type2:thresholdValue2\n The default settings are default:20,redis:10. Reserved Cache type is default, which is the default threshold for all cache types, unless set explicitly.\nNote:\n The threshold should not be set too small, like 1ms. Although it works in theory, OAP performance issues may arise if your system statement access time is usually more than 1ms. The OAP server would run statistic per service and only persistent the top 50 every 10(controlled by topNReportPeriod: ${SW_CORE_TOPN_REPORT_PERIOD:10}) minutes by default.  ","excerpt":"Slow Cache Command Slow Cache command are sensitive for you to identify bottlenecks of a system …","ref":"/docs/main/v9.6.0/en/setup/backend/slow-cache-command/","title":"Slow Cache Command"},{"body":"Slow Cache Command Slow Cache command are sensitive for you to identify bottlenecks of a system which relies on cache system.\nSlow Cache command are based on sampling. Right now, the core samples are the top 50 slowest every 10 minutes. Note that the duration of these command must be slower than the threshold.\nHere\u0026rsquo;s the format of the settings (in milliseconds):\n cache-type:thresholdValue,cache-type2:thresholdValue2\n The default settings are default:20,redis:10. Reserved Cache type is default, which is the default threshold for all cache types, unless set explicitly.\nNote:\n The threshold should not be set too small, like 1ms. Although it works in theory, OAP performance issues may arise if your system statement access time is usually more than 1ms. The OAP server would run statistic per service and only persistent the top 50 every 10(controlled by topNReportPeriod: ${SW_CORE_TOPN_REPORT_PERIOD:10}) minutes by default.  ","excerpt":"Slow Cache Command Slow Cache command are sensitive for you to identify bottlenecks of a system …","ref":"/docs/main/v9.7.0/en/setup/backend/slow-cache-command/","title":"Slow Cache Command"},{"body":"Slow Database Statement Slow Database statements are crucial for you to identify bottlenecks of a system which relies on databases.\nSlow DB statements are based on sampling. Right now, the core samples are the top 50 slowest every 10 minutes. Note that the duration of these statements must be slower than the threshold.\nHere\u0026rsquo;s the format of the settings (in milliseconds):\n database-type:thresholdValue,database-type2:thresholdValue2\n The default settings are default:200,mongodb:100. Reserved DB type is default, which is the default threshold for all database types, unless set explicitly.\nNote:\n The threshold should not be set too small, like 1ms. Although it works in theory, OAP performance issues may arise if your system statement access time is usually more than 1ms. The OAP server would run statistic per service and only persistent the top 50 every 10(controlled by topNReportPeriod: ${SW_CORE_TOPN_REPORT_PERIOD:10}) minutes by default.  ","excerpt":"Slow Database Statement Slow Database statements are crucial for you to identify bottlenecks of a …","ref":"/docs/main/latest/en/setup/backend/slow-db-statement/","title":"Slow Database Statement"},{"body":"Slow Database Statement Slow Database statements are crucial for you to identify bottlenecks of a system which relies on databases.\nSlow DB statements are based on sampling. Right now, the core samples are the top 50 slowest every 10 minutes. Note that the duration of these statements must be slower than the threshold.\nHere\u0026rsquo;s the format of the settings (in milliseconds):\n database-type:thresholdValue,database-type2:thresholdValue2\n The default settings are default:200,mongodb:100. Reserved DB type is default, which is the default threshold for all database types, unless set explicitly.\nNote:\n The threshold should not be set too small, like 1ms. Although it works in theory, OAP performance issues may arise if your system statement access time is usually more than 1ms. The OAP server would run statistic per service and only persistent the top 50 every 10(controlled by topNReportPeriod: ${SW_CORE_TOPN_REPORT_PERIOD:10}) minutes by default.  ","excerpt":"Slow Database Statement Slow Database statements are crucial for you to identify bottlenecks of a …","ref":"/docs/main/next/en/setup/backend/slow-db-statement/","title":"Slow Database Statement"},{"body":"Slow Database Statement Slow Database statements are crucial in order for you to identify bottlenecks of a system which relies on the database.\nSlow DB statements are based on sampling. Right now, the core samples the top 50 slowest every 10 minutes. Note that the duration of these statements must be slower than the threshold.\nHere\u0026rsquo;s the format of the settings (in milliseconds):\n database-type:thresholdValue,database-type2:thresholdValue2\n The default settings are default:200,mongodb:100. Reserved DB type is default, which is the default threshold for all database types, unless set explicitly.\nNote: The threshold should not be set too small, like 1ms. Although it works in theory, OAP performance issues may arise if your system statement access time is usually more than 1ms.\n","excerpt":"Slow Database Statement Slow Database statements are crucial in order for you to identify …","ref":"/docs/main/v9.0.0/en/setup/backend/slow-db-statement/","title":"Slow Database Statement"},{"body":"Slow Database Statement Slow Database statements are crucial for you to identify bottlenecks of a system which relies on databases.\nSlow DB statements are based on sampling. Right now, the core samples are the top 50 slowest every 10 minutes. Note that the duration of these statements must be slower than the threshold.\nHere\u0026rsquo;s the format of the settings (in milliseconds):\n database-type:thresholdValue,database-type2:thresholdValue2\n The default settings are default:200,mongodb:100. Reserved DB type is default, which is the default threshold for all database types, unless set explicitly.\nNote: The threshold should not be set too small, like 1ms. Although it works in theory, OAP performance issues may arise if your system statement access time is usually more than 1ms.\n","excerpt":"Slow Database Statement Slow Database statements are crucial for you to identify bottlenecks of a …","ref":"/docs/main/v9.1.0/en/setup/backend/slow-db-statement/","title":"Slow Database Statement"},{"body":"Slow Database Statement Slow Database statements are crucial for you to identify bottlenecks of a system which relies on databases.\nSlow DB statements are based on sampling. Right now, the core samples are the top 50 slowest every 10 minutes. Note that the duration of these statements must be slower than the threshold.\nHere\u0026rsquo;s the format of the settings (in milliseconds):\n database-type:thresholdValue,database-type2:thresholdValue2\n The default settings are default:200,mongodb:100. Reserved DB type is default, which is the default threshold for all database types, unless set explicitly.\nNote: The threshold should not be set too small, like 1ms. Although it works in theory, OAP performance issues may arise if your system statement access time is usually more than 1ms.\n","excerpt":"Slow Database Statement Slow Database statements are crucial for you to identify bottlenecks of a …","ref":"/docs/main/v9.2.0/en/setup/backend/slow-db-statement/","title":"Slow Database Statement"},{"body":"Slow Database Statement Slow Database statements are crucial for you to identify bottlenecks of a system which relies on databases.\nSlow DB statements are based on sampling. Right now, the core samples are the top 50 slowest every 10 minutes. Note that the duration of these statements must be slower than the threshold.\nHere\u0026rsquo;s the format of the settings (in milliseconds):\n database-type:thresholdValue,database-type2:thresholdValue2\n The default settings are default:200,mongodb:100. Reserved DB type is default, which is the default threshold for all database types, unless set explicitly.\nNote:\n The threshold should not be set too small, like 1ms. Although it works in theory, OAP performance issues may arise if your system statement access time is usually more than 1ms. The OAP server would run statistic per service and only persistent the top 50 every 10(controlled by topNReportPeriod: ${SW_CORE_TOPN_REPORT_PERIOD:10}) minutes by default.  ","excerpt":"Slow Database Statement Slow Database statements are crucial for you to identify bottlenecks of a …","ref":"/docs/main/v9.3.0/en/setup/backend/slow-db-statement/","title":"Slow Database Statement"},{"body":"Slow Database Statement Slow Database statements are crucial for you to identify bottlenecks of a system which relies on databases.\nSlow DB statements are based on sampling. Right now, the core samples are the top 50 slowest every 10 minutes. Note that the duration of these statements must be slower than the threshold.\nHere\u0026rsquo;s the format of the settings (in milliseconds):\n database-type:thresholdValue,database-type2:thresholdValue2\n The default settings are default:200,mongodb:100. Reserved DB type is default, which is the default threshold for all database types, unless set explicitly.\nNote:\n The threshold should not be set too small, like 1ms. Although it works in theory, OAP performance issues may arise if your system statement access time is usually more than 1ms. The OAP server would run statistic per service and only persistent the top 50 every 10(controlled by topNReportPeriod: ${SW_CORE_TOPN_REPORT_PERIOD:10}) minutes by default.  ","excerpt":"Slow Database Statement Slow Database statements are crucial for you to identify bottlenecks of a …","ref":"/docs/main/v9.4.0/en/setup/backend/slow-db-statement/","title":"Slow Database Statement"},{"body":"Slow Database Statement Slow Database statements are crucial for you to identify bottlenecks of a system which relies on databases.\nSlow DB statements are based on sampling. Right now, the core samples are the top 50 slowest every 10 minutes. Note that the duration of these statements must be slower than the threshold.\nHere\u0026rsquo;s the format of the settings (in milliseconds):\n database-type:thresholdValue,database-type2:thresholdValue2\n The default settings are default:200,mongodb:100. Reserved DB type is default, which is the default threshold for all database types, unless set explicitly.\nNote:\n The threshold should not be set too small, like 1ms. Although it works in theory, OAP performance issues may arise if your system statement access time is usually more than 1ms. The OAP server would run statistic per service and only persistent the top 50 every 10(controlled by topNReportPeriod: ${SW_CORE_TOPN_REPORT_PERIOD:10}) minutes by default.  ","excerpt":"Slow Database Statement Slow Database statements are crucial for you to identify bottlenecks of a …","ref":"/docs/main/v9.5.0/en/setup/backend/slow-db-statement/","title":"Slow Database Statement"},{"body":"Slow Database Statement Slow Database statements are crucial for you to identify bottlenecks of a system which relies on databases.\nSlow DB statements are based on sampling. Right now, the core samples are the top 50 slowest every 10 minutes. Note that the duration of these statements must be slower than the threshold.\nHere\u0026rsquo;s the format of the settings (in milliseconds):\n database-type:thresholdValue,database-type2:thresholdValue2\n The default settings are default:200,mongodb:100. Reserved DB type is default, which is the default threshold for all database types, unless set explicitly.\nNote:\n The threshold should not be set too small, like 1ms. Although it works in theory, OAP performance issues may arise if your system statement access time is usually more than 1ms. The OAP server would run statistic per service and only persistent the top 50 every 10(controlled by topNReportPeriod: ${SW_CORE_TOPN_REPORT_PERIOD:10}) minutes by default.  ","excerpt":"Slow Database Statement Slow Database statements are crucial for you to identify bottlenecks of a …","ref":"/docs/main/v9.6.0/en/setup/backend/slow-db-statement/","title":"Slow Database Statement"},{"body":"Slow Database Statement Slow Database statements are crucial for you to identify bottlenecks of a system which relies on databases.\nSlow DB statements are based on sampling. Right now, the core samples are the top 50 slowest every 10 minutes. Note that the duration of these statements must be slower than the threshold.\nHere\u0026rsquo;s the format of the settings (in milliseconds):\n database-type:thresholdValue,database-type2:thresholdValue2\n The default settings are default:200,mongodb:100. Reserved DB type is default, which is the default threshold for all database types, unless set explicitly.\nNote:\n The threshold should not be set too small, like 1ms. Although it works in theory, OAP performance issues may arise if your system statement access time is usually more than 1ms. The OAP server would run statistic per service and only persistent the top 50 every 10(controlled by topNReportPeriod: ${SW_CORE_TOPN_REPORT_PERIOD:10}) minutes by default.  ","excerpt":"Slow Database Statement Slow Database statements are crucial for you to identify bottlenecks of a …","ref":"/docs/main/v9.7.0/en/setup/backend/slow-db-statement/","title":"Slow Database Statement"},{"body":"Source and scope extension for new metrics From the OAL scope introduction, you should already have understood what a scope is. If you would like to create more extensions, you need to have a deeper understanding of what a source is.\nSource and scope are interrelated concepts. Scope declares the ID (int) and name, while source declares the attributes. Follow these steps to create a new source and sccope.\n In the OAP core module, it provides SourceReceiver internal services.  public interface SourceReceiver extends Service { void receive(Source source); } All data of the analysis must be a org.apache.skywalking.oap.server.core.source.Source sub class that is tagged by @SourceType annotation, and included in the org.apache.skywalking package. Then, it can be supported by the OAL script and OAP core.  Take the existing source service as an example.\n@ScopeDeclaration(id = SERVICE_INSTANCE, name = \u0026#34;ServiceInstance\u0026#34;, catalog = SERVICE_INSTANCE_CATALOG_NAME) @ScopeDefaultColumn.VirtualColumnDefinition(fieldName = \u0026#34;entityId\u0026#34;, columnName = \u0026#34;entity_id\u0026#34;, isID = true, type = String.class) public class ServiceInstance extends Source { @Override public int scope() { return DefaultScopeDefine.SERVICE_INSTANCE; } @Override public String getEntityId() { return String.valueOf(id); } @Getter @Setter private int id; @Getter @Setter @ScopeDefaultColumn.DefinedByField(columnName = \u0026#34;service_id\u0026#34;) private int serviceId; @Getter @Setter private String name; @Getter @Setter private String serviceName; @Getter @Setter private String endpointName; @Getter @Setter private int latency; @Getter @Setter private boolean status; @Getter @Setter private int responseCode; @Getter @Setter private RequestType type; }  The scope() method in source returns an ID, which is not a random value. This ID must be declared through the @ScopeDeclaration annotation too. The ID in @ScopeDeclaration and ID in scope() method must be the same for this source.\n  The String getEntityId() method in source requests the return value representing the unique entity to which the scope relates. For example, in this service scope, the ID is the service ID, which represents a particular service, like the Order service. This value is used in the OAL group mechanism.\n  @ScopeDefaultColumn.VirtualColumnDefinition and @ScopeDefaultColumn.DefinedByField are required. All declared fields (virtual/byField) will be pushed into a persistent entity, and maps to lists such as the ElasticSearch index and Database table column. For example, the entity ID and service ID for endpoint and service instance level scope are usually included. Take a reference from all existing scopes. All these fields are detected by OAL Runtime, and are required during query.\n  Add scope name as keyword to OAL grammar definition file, OALLexer.g4, which is at the antlr4 folder of the generate-tool-grammar module.\n  Add scope name as keyword to the parser definition file, OALParser.g4, which is located in the same folder as OALLexer.g4.\n   After finishing these steps, you could build a receiver, which do\n Obtain the original data of the metrics. Build the source, and send to SourceReceiver. Complete your OAL scripts. Repackage the project.  ","excerpt":"Source and scope extension for new metrics From the OAL scope introduction, you should already have …","ref":"/docs/main/latest/en/guides/source-extension/","title":"Source and scope extension for new metrics"},{"body":"Source and scope extension for new metrics From the OAL scope introduction, you should already have understood what a scope is. If you would like to create more extensions, you need to have a deeper understanding of what a source is.\nSource and scope are interrelated concepts. Scope declares the ID (int) and name, while source declares the attributes. Follow these steps to create a new source and sccope.\n In the OAP core module, it provides SourceReceiver internal services.  public interface SourceReceiver extends Service { void receive(Source source); } All data of the analysis must be a org.apache.skywalking.oap.server.core.source.Source sub class that is tagged by @SourceType annotation, and included in the org.apache.skywalking package. Then, it can be supported by the OAL script and OAP core.  Take the existing source service as an example.\n@ScopeDeclaration(id = SERVICE_INSTANCE, name = \u0026#34;ServiceInstance\u0026#34;, catalog = SERVICE_INSTANCE_CATALOG_NAME) @ScopeDefaultColumn.VirtualColumnDefinition(fieldName = \u0026#34;entityId\u0026#34;, columnName = \u0026#34;entity_id\u0026#34;, isID = true, type = String.class) public class ServiceInstance extends Source { @Override public int scope() { return DefaultScopeDefine.SERVICE_INSTANCE; } @Override public String getEntityId() { return String.valueOf(id); } @Getter @Setter private int id; @Getter @Setter @ScopeDefaultColumn.DefinedByField(columnName = \u0026#34;service_id\u0026#34;) private int serviceId; @Getter @Setter private String name; @Getter @Setter private String serviceName; @Getter @Setter private String endpointName; @Getter @Setter private int latency; @Getter @Setter private boolean status; @Getter @Setter private int responseCode; @Getter @Setter private RequestType type; }  The scope() method in source returns an ID, which is not a random value. This ID must be declared through the @ScopeDeclaration annotation too. The ID in @ScopeDeclaration and ID in scope() method must be the same for this source.\n  The String getEntityId() method in source requests the return value representing the unique entity to which the scope relates. For example, in this service scope, the ID is the service ID, which represents a particular service, like the Order service. This value is used in the OAL group mechanism.\n  @ScopeDefaultColumn.VirtualColumnDefinition and @ScopeDefaultColumn.DefinedByField are required. All declared fields (virtual/byField) will be pushed into a persistent entity, and maps to lists such as the ElasticSearch index and Database table column. For example, the entity ID and service ID for endpoint and service instance level scope are usually included. Take a reference from all existing scopes. All these fields are detected by OAL Runtime, and are required during query.\n  Add scope name as keyword to OAL grammar definition file, OALLexer.g4, which is at the antlr4 folder of the generate-tool-grammar module.\n  Add scope name as keyword to the parser definition file, OALParser.g4, which is located in the same folder as OALLexer.g4.\n   After finishing these steps, you could build a receiver, which do\n Obtain the original data of the metrics. Build the source, and send to SourceReceiver. Complete your OAL scripts. Repackage the project.  ","excerpt":"Source and scope extension for new metrics From the OAL scope introduction, you should already have …","ref":"/docs/main/next/en/guides/source-extension/","title":"Source and scope extension for new metrics"},{"body":"Source and scope extension for new metrics From the OAL scope introduction, you should already have understood what a scope is. If you would like to create more extensions, you need to have a deeper understanding of what a source is.\nSource and scope are interrelated concepts. Scope declares the ID (int) and name, while source declares the attributes. Follow these steps to create a new source and sccope.\n In the OAP core module, it provides SourceReceiver internal services.  public interface SourceReceiver extends Service { void receive(Source source); } All data of the analysis must be a org.apache.skywalking.oap.server.core.source.Source sub class that is tagged by @SourceType annotation, and included in the org.apache.skywalking package. Then, it can be supported by the OAL script and OAP core.  Take the existing source service as an example.\n@ScopeDeclaration(id = SERVICE_INSTANCE, name = \u0026#34;ServiceInstance\u0026#34;, catalog = SERVICE_INSTANCE_CATALOG_NAME) @ScopeDefaultColumn.VirtualColumnDefinition(fieldName = \u0026#34;entityId\u0026#34;, columnName = \u0026#34;entity_id\u0026#34;, isID = true, type = String.class) public class ServiceInstance extends Source { @Override public int scope() { return DefaultScopeDefine.SERVICE_INSTANCE; } @Override public String getEntityId() { return String.valueOf(id); } @Getter @Setter private int id; @Getter @Setter @ScopeDefaultColumn.DefinedByField(columnName = \u0026#34;service_id\u0026#34;) private int serviceId; @Getter @Setter private String name; @Getter @Setter private String serviceName; @Getter @Setter private String endpointName; @Getter @Setter private int latency; @Getter @Setter private boolean status; @Getter @Setter private int responseCode; @Getter @Setter private RequestType type; }  The scope() method in source returns an ID, which is not a random value. This ID must be declared through the @ScopeDeclaration annotation too. The ID in @ScopeDeclaration and ID in scope() method must be the same for this source.\n  The String getEntityId() method in source requests the return value representing the unique entity to which the scope relates. For example, in this service scope, the ID is the service ID, which represents a particular service, like the Order service. This value is used in the OAL group mechanism.\n  @ScopeDefaultColumn.VirtualColumnDefinition and @ScopeDefaultColumn.DefinedByField are required. All declared fields (virtual/byField) will be pushed into a persistent entity, and maps to lists such as the ElasticSearch index and Database table column. For example, the entity ID and service ID for endpoint and service instance level scope are usually included. Take a reference from all existing scopes. All these fields are detected by OAL Runtime, and are required during query.\n  Add scope name as keyword to OAL grammar definition file, OALLexer.g4, which is at the antlr4 folder of the generate-tool-grammar module.\n  Add scope name as keyword to the parser definition file, OALParser.g4, which is located in the same folder as OALLexer.g4.\n   After finishing these steps, you could build a receiver, which do\n Obtain the original data of the metrics. Build the source, and send to SourceReceiver. Complete your OAL scripts. Repackage the project.  ","excerpt":"Source and scope extension for new metrics From the OAL scope introduction, you should already have …","ref":"/docs/main/v9.0.0/en/guides/source-extension/","title":"Source and scope extension for new metrics"},{"body":"Source and scope extension for new metrics From the OAL scope introduction, you should already have understood what a scope is. If you would like to create more extensions, you need to have a deeper understanding of what a source is.\nSource and scope are interrelated concepts. Scope declares the ID (int) and name, while source declares the attributes. Follow these steps to create a new source and sccope.\n In the OAP core module, it provides SourceReceiver internal services.  public interface SourceReceiver extends Service { void receive(Source source); } All data of the analysis must be a org.apache.skywalking.oap.server.core.source.Source sub class that is tagged by @SourceType annotation, and included in the org.apache.skywalking package. Then, it can be supported by the OAL script and OAP core.  Take the existing source service as an example.\n@ScopeDeclaration(id = SERVICE_INSTANCE, name = \u0026#34;ServiceInstance\u0026#34;, catalog = SERVICE_INSTANCE_CATALOG_NAME) @ScopeDefaultColumn.VirtualColumnDefinition(fieldName = \u0026#34;entityId\u0026#34;, columnName = \u0026#34;entity_id\u0026#34;, isID = true, type = String.class) public class ServiceInstance extends Source { @Override public int scope() { return DefaultScopeDefine.SERVICE_INSTANCE; } @Override public String getEntityId() { return String.valueOf(id); } @Getter @Setter private int id; @Getter @Setter @ScopeDefaultColumn.DefinedByField(columnName = \u0026#34;service_id\u0026#34;) private int serviceId; @Getter @Setter private String name; @Getter @Setter private String serviceName; @Getter @Setter private String endpointName; @Getter @Setter private int latency; @Getter @Setter private boolean status; @Getter @Setter private int responseCode; @Getter @Setter private RequestType type; }  The scope() method in source returns an ID, which is not a random value. This ID must be declared through the @ScopeDeclaration annotation too. The ID in @ScopeDeclaration and ID in scope() method must be the same for this source.\n  The String getEntityId() method in source requests the return value representing the unique entity to which the scope relates. For example, in this service scope, the ID is the service ID, which represents a particular service, like the Order service. This value is used in the OAL group mechanism.\n  @ScopeDefaultColumn.VirtualColumnDefinition and @ScopeDefaultColumn.DefinedByField are required. All declared fields (virtual/byField) will be pushed into a persistent entity, and maps to lists such as the ElasticSearch index and Database table column. For example, the entity ID and service ID for endpoint and service instance level scope are usually included. Take a reference from all existing scopes. All these fields are detected by OAL Runtime, and are required during query.\n  Add scope name as keyword to OAL grammar definition file, OALLexer.g4, which is at the antlr4 folder of the generate-tool-grammar module.\n  Add scope name as keyword to the parser definition file, OALParser.g4, which is located in the same folder as OALLexer.g4.\n   After finishing these steps, you could build a receiver, which do\n Obtain the original data of the metrics. Build the source, and send to SourceReceiver. Complete your OAL scripts. Repackage the project.  ","excerpt":"Source and scope extension for new metrics From the OAL scope introduction, you should already have …","ref":"/docs/main/v9.1.0/en/guides/source-extension/","title":"Source and scope extension for new metrics"},{"body":"Source and scope extension for new metrics From the OAL scope introduction, you should already have understood what a scope is. If you would like to create more extensions, you need to have a deeper understanding of what a source is.\nSource and scope are interrelated concepts. Scope declares the ID (int) and name, while source declares the attributes. Follow these steps to create a new source and sccope.\n In the OAP core module, it provides SourceReceiver internal services.  public interface SourceReceiver extends Service { void receive(Source source); } All data of the analysis must be a org.apache.skywalking.oap.server.core.source.Source sub class that is tagged by @SourceType annotation, and included in the org.apache.skywalking package. Then, it can be supported by the OAL script and OAP core.  Take the existing source service as an example.\n@ScopeDeclaration(id = SERVICE_INSTANCE, name = \u0026#34;ServiceInstance\u0026#34;, catalog = SERVICE_INSTANCE_CATALOG_NAME) @ScopeDefaultColumn.VirtualColumnDefinition(fieldName = \u0026#34;entityId\u0026#34;, columnName = \u0026#34;entity_id\u0026#34;, isID = true, type = String.class) public class ServiceInstance extends Source { @Override public int scope() { return DefaultScopeDefine.SERVICE_INSTANCE; } @Override public String getEntityId() { return String.valueOf(id); } @Getter @Setter private int id; @Getter @Setter @ScopeDefaultColumn.DefinedByField(columnName = \u0026#34;service_id\u0026#34;) private int serviceId; @Getter @Setter private String name; @Getter @Setter private String serviceName; @Getter @Setter private String endpointName; @Getter @Setter private int latency; @Getter @Setter private boolean status; @Getter @Setter private int responseCode; @Getter @Setter private RequestType type; }  The scope() method in source returns an ID, which is not a random value. This ID must be declared through the @ScopeDeclaration annotation too. The ID in @ScopeDeclaration and ID in scope() method must be the same for this source.\n  The String getEntityId() method in source requests the return value representing the unique entity to which the scope relates. For example, in this service scope, the ID is the service ID, which represents a particular service, like the Order service. This value is used in the OAL group mechanism.\n  @ScopeDefaultColumn.VirtualColumnDefinition and @ScopeDefaultColumn.DefinedByField are required. All declared fields (virtual/byField) will be pushed into a persistent entity, and maps to lists such as the ElasticSearch index and Database table column. For example, the entity ID and service ID for endpoint and service instance level scope are usually included. Take a reference from all existing scopes. All these fields are detected by OAL Runtime, and are required during query.\n  Add scope name as keyword to OAL grammar definition file, OALLexer.g4, which is at the antlr4 folder of the generate-tool-grammar module.\n  Add scope name as keyword to the parser definition file, OALParser.g4, which is located in the same folder as OALLexer.g4.\n   After finishing these steps, you could build a receiver, which do\n Obtain the original data of the metrics. Build the source, and send to SourceReceiver. Complete your OAL scripts. Repackage the project.  ","excerpt":"Source and scope extension for new metrics From the OAL scope introduction, you should already have …","ref":"/docs/main/v9.2.0/en/guides/source-extension/","title":"Source and scope extension for new metrics"},{"body":"Source and scope extension for new metrics From the OAL scope introduction, you should already have understood what a scope is. If you would like to create more extensions, you need to have a deeper understanding of what a source is.\nSource and scope are interrelated concepts. Scope declares the ID (int) and name, while source declares the attributes. Follow these steps to create a new source and sccope.\n In the OAP core module, it provides SourceReceiver internal services.  public interface SourceReceiver extends Service { void receive(Source source); } All data of the analysis must be a org.apache.skywalking.oap.server.core.source.Source sub class that is tagged by @SourceType annotation, and included in the org.apache.skywalking package. Then, it can be supported by the OAL script and OAP core.  Take the existing source service as an example.\n@ScopeDeclaration(id = SERVICE_INSTANCE, name = \u0026#34;ServiceInstance\u0026#34;, catalog = SERVICE_INSTANCE_CATALOG_NAME) @ScopeDefaultColumn.VirtualColumnDefinition(fieldName = \u0026#34;entityId\u0026#34;, columnName = \u0026#34;entity_id\u0026#34;, isID = true, type = String.class) public class ServiceInstance extends Source { @Override public int scope() { return DefaultScopeDefine.SERVICE_INSTANCE; } @Override public String getEntityId() { return String.valueOf(id); } @Getter @Setter private int id; @Getter @Setter @ScopeDefaultColumn.DefinedByField(columnName = \u0026#34;service_id\u0026#34;) private int serviceId; @Getter @Setter private String name; @Getter @Setter private String serviceName; @Getter @Setter private String endpointName; @Getter @Setter private int latency; @Getter @Setter private boolean status; @Getter @Setter private int responseCode; @Getter @Setter private RequestType type; }  The scope() method in source returns an ID, which is not a random value. This ID must be declared through the @ScopeDeclaration annotation too. The ID in @ScopeDeclaration and ID in scope() method must be the same for this source.\n  The String getEntityId() method in source requests the return value representing the unique entity to which the scope relates. For example, in this service scope, the ID is the service ID, which represents a particular service, like the Order service. This value is used in the OAL group mechanism.\n  @ScopeDefaultColumn.VirtualColumnDefinition and @ScopeDefaultColumn.DefinedByField are required. All declared fields (virtual/byField) will be pushed into a persistent entity, and maps to lists such as the ElasticSearch index and Database table column. For example, the entity ID and service ID for endpoint and service instance level scope are usually included. Take a reference from all existing scopes. All these fields are detected by OAL Runtime, and are required during query.\n  Add scope name as keyword to OAL grammar definition file, OALLexer.g4, which is at the antlr4 folder of the generate-tool-grammar module.\n  Add scope name as keyword to the parser definition file, OALParser.g4, which is located in the same folder as OALLexer.g4.\n   After finishing these steps, you could build a receiver, which do\n Obtain the original data of the metrics. Build the source, and send to SourceReceiver. Complete your OAL scripts. Repackage the project.  ","excerpt":"Source and scope extension for new metrics From the OAL scope introduction, you should already have …","ref":"/docs/main/v9.3.0/en/guides/source-extension/","title":"Source and scope extension for new metrics"},{"body":"Source and scope extension for new metrics From the OAL scope introduction, you should already have understood what a scope is. If you would like to create more extensions, you need to have a deeper understanding of what a source is.\nSource and scope are interrelated concepts. Scope declares the ID (int) and name, while source declares the attributes. Follow these steps to create a new source and sccope.\n In the OAP core module, it provides SourceReceiver internal services.  public interface SourceReceiver extends Service { void receive(Source source); } All data of the analysis must be a org.apache.skywalking.oap.server.core.source.Source sub class that is tagged by @SourceType annotation, and included in the org.apache.skywalking package. Then, it can be supported by the OAL script and OAP core.  Take the existing source service as an example.\n@ScopeDeclaration(id = SERVICE_INSTANCE, name = \u0026#34;ServiceInstance\u0026#34;, catalog = SERVICE_INSTANCE_CATALOG_NAME) @ScopeDefaultColumn.VirtualColumnDefinition(fieldName = \u0026#34;entityId\u0026#34;, columnName = \u0026#34;entity_id\u0026#34;, isID = true, type = String.class) public class ServiceInstance extends Source { @Override public int scope() { return DefaultScopeDefine.SERVICE_INSTANCE; } @Override public String getEntityId() { return String.valueOf(id); } @Getter @Setter private int id; @Getter @Setter @ScopeDefaultColumn.DefinedByField(columnName = \u0026#34;service_id\u0026#34;) private int serviceId; @Getter @Setter private String name; @Getter @Setter private String serviceName; @Getter @Setter private String endpointName; @Getter @Setter private int latency; @Getter @Setter private boolean status; @Getter @Setter private int responseCode; @Getter @Setter private RequestType type; }  The scope() method in source returns an ID, which is not a random value. This ID must be declared through the @ScopeDeclaration annotation too. The ID in @ScopeDeclaration and ID in scope() method must be the same for this source.\n  The String getEntityId() method in source requests the return value representing the unique entity to which the scope relates. For example, in this service scope, the ID is the service ID, which represents a particular service, like the Order service. This value is used in the OAL group mechanism.\n  @ScopeDefaultColumn.VirtualColumnDefinition and @ScopeDefaultColumn.DefinedByField are required. All declared fields (virtual/byField) will be pushed into a persistent entity, and maps to lists such as the ElasticSearch index and Database table column. For example, the entity ID and service ID for endpoint and service instance level scope are usually included. Take a reference from all existing scopes. All these fields are detected by OAL Runtime, and are required during query.\n  Add scope name as keyword to OAL grammar definition file, OALLexer.g4, which is at the antlr4 folder of the generate-tool-grammar module.\n  Add scope name as keyword to the parser definition file, OALParser.g4, which is located in the same folder as OALLexer.g4.\n   After finishing these steps, you could build a receiver, which do\n Obtain the original data of the metrics. Build the source, and send to SourceReceiver. Complete your OAL scripts. Repackage the project.  ","excerpt":"Source and scope extension for new metrics From the OAL scope introduction, you should already have …","ref":"/docs/main/v9.4.0/en/guides/source-extension/","title":"Source and scope extension for new metrics"},{"body":"Source and scope extension for new metrics From the OAL scope introduction, you should already have understood what a scope is. If you would like to create more extensions, you need to have a deeper understanding of what a source is.\nSource and scope are interrelated concepts. Scope declares the ID (int) and name, while source declares the attributes. Follow these steps to create a new source and sccope.\n In the OAP core module, it provides SourceReceiver internal services.  public interface SourceReceiver extends Service { void receive(Source source); } All data of the analysis must be a org.apache.skywalking.oap.server.core.source.Source sub class that is tagged by @SourceType annotation, and included in the org.apache.skywalking package. Then, it can be supported by the OAL script and OAP core.  Take the existing source service as an example.\n@ScopeDeclaration(id = SERVICE_INSTANCE, name = \u0026#34;ServiceInstance\u0026#34;, catalog = SERVICE_INSTANCE_CATALOG_NAME) @ScopeDefaultColumn.VirtualColumnDefinition(fieldName = \u0026#34;entityId\u0026#34;, columnName = \u0026#34;entity_id\u0026#34;, isID = true, type = String.class) public class ServiceInstance extends Source { @Override public int scope() { return DefaultScopeDefine.SERVICE_INSTANCE; } @Override public String getEntityId() { return String.valueOf(id); } @Getter @Setter private int id; @Getter @Setter @ScopeDefaultColumn.DefinedByField(columnName = \u0026#34;service_id\u0026#34;) private int serviceId; @Getter @Setter private String name; @Getter @Setter private String serviceName; @Getter @Setter private String endpointName; @Getter @Setter private int latency; @Getter @Setter private boolean status; @Getter @Setter private int responseCode; @Getter @Setter private RequestType type; }  The scope() method in source returns an ID, which is not a random value. This ID must be declared through the @ScopeDeclaration annotation too. The ID in @ScopeDeclaration and ID in scope() method must be the same for this source.\n  The String getEntityId() method in source requests the return value representing the unique entity to which the scope relates. For example, in this service scope, the ID is the service ID, which represents a particular service, like the Order service. This value is used in the OAL group mechanism.\n  @ScopeDefaultColumn.VirtualColumnDefinition and @ScopeDefaultColumn.DefinedByField are required. All declared fields (virtual/byField) will be pushed into a persistent entity, and maps to lists such as the ElasticSearch index and Database table column. For example, the entity ID and service ID for endpoint and service instance level scope are usually included. Take a reference from all existing scopes. All these fields are detected by OAL Runtime, and are required during query.\n  Add scope name as keyword to OAL grammar definition file, OALLexer.g4, which is at the antlr4 folder of the generate-tool-grammar module.\n  Add scope name as keyword to the parser definition file, OALParser.g4, which is located in the same folder as OALLexer.g4.\n   After finishing these steps, you could build a receiver, which do\n Obtain the original data of the metrics. Build the source, and send to SourceReceiver. Complete your OAL scripts. Repackage the project.  ","excerpt":"Source and scope extension for new metrics From the OAL scope introduction, you should already have …","ref":"/docs/main/v9.5.0/en/guides/source-extension/","title":"Source and scope extension for new metrics"},{"body":"Source and scope extension for new metrics From the OAL scope introduction, you should already have understood what a scope is. If you would like to create more extensions, you need to have a deeper understanding of what a source is.\nSource and scope are interrelated concepts. Scope declares the ID (int) and name, while source declares the attributes. Follow these steps to create a new source and sccope.\n In the OAP core module, it provides SourceReceiver internal services.  public interface SourceReceiver extends Service { void receive(Source source); } All data of the analysis must be a org.apache.skywalking.oap.server.core.source.Source sub class that is tagged by @SourceType annotation, and included in the org.apache.skywalking package. Then, it can be supported by the OAL script and OAP core.  Take the existing source service as an example.\n@ScopeDeclaration(id = SERVICE_INSTANCE, name = \u0026#34;ServiceInstance\u0026#34;, catalog = SERVICE_INSTANCE_CATALOG_NAME) @ScopeDefaultColumn.VirtualColumnDefinition(fieldName = \u0026#34;entityId\u0026#34;, columnName = \u0026#34;entity_id\u0026#34;, isID = true, type = String.class) public class ServiceInstance extends Source { @Override public int scope() { return DefaultScopeDefine.SERVICE_INSTANCE; } @Override public String getEntityId() { return String.valueOf(id); } @Getter @Setter private int id; @Getter @Setter @ScopeDefaultColumn.DefinedByField(columnName = \u0026#34;service_id\u0026#34;) private int serviceId; @Getter @Setter private String name; @Getter @Setter private String serviceName; @Getter @Setter private String endpointName; @Getter @Setter private int latency; @Getter @Setter private boolean status; @Getter @Setter private int responseCode; @Getter @Setter private RequestType type; }  The scope() method in source returns an ID, which is not a random value. This ID must be declared through the @ScopeDeclaration annotation too. The ID in @ScopeDeclaration and ID in scope() method must be the same for this source.\n  The String getEntityId() method in source requests the return value representing the unique entity to which the scope relates. For example, in this service scope, the ID is the service ID, which represents a particular service, like the Order service. This value is used in the OAL group mechanism.\n  @ScopeDefaultColumn.VirtualColumnDefinition and @ScopeDefaultColumn.DefinedByField are required. All declared fields (virtual/byField) will be pushed into a persistent entity, and maps to lists such as the ElasticSearch index and Database table column. For example, the entity ID and service ID for endpoint and service instance level scope are usually included. Take a reference from all existing scopes. All these fields are detected by OAL Runtime, and are required during query.\n  Add scope name as keyword to OAL grammar definition file, OALLexer.g4, which is at the antlr4 folder of the generate-tool-grammar module.\n  Add scope name as keyword to the parser definition file, OALParser.g4, which is located in the same folder as OALLexer.g4.\n   After finishing these steps, you could build a receiver, which do\n Obtain the original data of the metrics. Build the source, and send to SourceReceiver. Complete your OAL scripts. Repackage the project.  ","excerpt":"Source and scope extension for new metrics From the OAL scope introduction, you should already have …","ref":"/docs/main/v9.6.0/en/guides/source-extension/","title":"Source and scope extension for new metrics"},{"body":"Source and scope extension for new metrics From the OAL scope introduction, you should already have understood what a scope is. If you would like to create more extensions, you need to have a deeper understanding of what a source is.\nSource and scope are interrelated concepts. Scope declares the ID (int) and name, while source declares the attributes. Follow these steps to create a new source and sccope.\n In the OAP core module, it provides SourceReceiver internal services.  public interface SourceReceiver extends Service { void receive(Source source); } All data of the analysis must be a org.apache.skywalking.oap.server.core.source.Source sub class that is tagged by @SourceType annotation, and included in the org.apache.skywalking package. Then, it can be supported by the OAL script and OAP core.  Take the existing source service as an example.\n@ScopeDeclaration(id = SERVICE_INSTANCE, name = \u0026#34;ServiceInstance\u0026#34;, catalog = SERVICE_INSTANCE_CATALOG_NAME) @ScopeDefaultColumn.VirtualColumnDefinition(fieldName = \u0026#34;entityId\u0026#34;, columnName = \u0026#34;entity_id\u0026#34;, isID = true, type = String.class) public class ServiceInstance extends Source { @Override public int scope() { return DefaultScopeDefine.SERVICE_INSTANCE; } @Override public String getEntityId() { return String.valueOf(id); } @Getter @Setter private int id; @Getter @Setter @ScopeDefaultColumn.DefinedByField(columnName = \u0026#34;service_id\u0026#34;) private int serviceId; @Getter @Setter private String name; @Getter @Setter private String serviceName; @Getter @Setter private String endpointName; @Getter @Setter private int latency; @Getter @Setter private boolean status; @Getter @Setter private int responseCode; @Getter @Setter private RequestType type; }  The scope() method in source returns an ID, which is not a random value. This ID must be declared through the @ScopeDeclaration annotation too. The ID in @ScopeDeclaration and ID in scope() method must be the same for this source.\n  The String getEntityId() method in source requests the return value representing the unique entity to which the scope relates. For example, in this service scope, the ID is the service ID, which represents a particular service, like the Order service. This value is used in the OAL group mechanism.\n  @ScopeDefaultColumn.VirtualColumnDefinition and @ScopeDefaultColumn.DefinedByField are required. All declared fields (virtual/byField) will be pushed into a persistent entity, and maps to lists such as the ElasticSearch index and Database table column. For example, the entity ID and service ID for endpoint and service instance level scope are usually included. Take a reference from all existing scopes. All these fields are detected by OAL Runtime, and are required during query.\n  Add scope name as keyword to OAL grammar definition file, OALLexer.g4, which is at the antlr4 folder of the generate-tool-grammar module.\n  Add scope name as keyword to the parser definition file, OALParser.g4, which is located in the same folder as OALLexer.g4.\n   After finishing these steps, you could build a receiver, which do\n Obtain the original data of the metrics. Build the source, and send to SourceReceiver. Complete your OAL scripts. Repackage the project.  ","excerpt":"Source and scope extension for new metrics From the OAL scope introduction, you should already have …","ref":"/docs/main/v9.7.0/en/guides/source-extension/","title":"Source and scope extension for new metrics"},{"body":"Spring annotation plugin This plugin allows to trace all methods of beans in Spring context, which are annotated with @Bean, @Service, @Component and @Repository.\n Why does this plugin optional?  Tracing all methods in Spring context all creates a lot of spans, which also spend more CPU, memory and network. Of course you want to have spans as many as possible, but please make sure your system payload can support these.\n","excerpt":"Spring annotation plugin This plugin allows to trace all methods of beans in Spring context, which …","ref":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/agent-optional-plugins/spring-annotation-plugin/","title":"Spring annotation plugin"},{"body":"Spring annotation plugin This plugin allows to trace all methods of beans in Spring context, which are annotated with @Bean, @Service, @Component and @Repository.\n Why does this plugin optional?  Tracing all methods in Spring context all creates a lot of spans, which also spend more CPU, memory and network. Of course you want to have spans as many as possible, but please make sure your system payload can support these.\n","excerpt":"Spring annotation plugin This plugin allows to trace all methods of beans in Spring context, which …","ref":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/agent-optional-plugins/spring-annotation-plugin/","title":"Spring annotation plugin"},{"body":"Spring annotation plugin This plugin allows to trace all methods of beans in Spring context, which are annotated with @Bean, @Service, @Component and @Repository.\n Why does this plugin optional?  Tracing all methods in Spring context all creates a lot of spans, which also spend more CPU, memory and network. Of course you want to have spans as many as possible, but please make sure your system payload can support these.\n","excerpt":"Spring annotation plugin This plugin allows to trace all methods of beans in Spring context, which …","ref":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/agent-optional-plugins/spring-annotation-plugin/","title":"Spring annotation plugin"},{"body":"Spring annotation plugin This plugin allows to trace all methods of beans in Spring context, which are annotated with @Bean, @Service, @Component and @Repository.\n Why does this plugin optional?  Tracing all methods in Spring context all creates a lot of spans, which also spend more CPU, memory and network. Of course you want to have spans as many as possible, but please make sure your system payload can support these.\n","excerpt":"Spring annotation plugin This plugin allows to trace all methods of beans in Spring context, which …","ref":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/agent-optional-plugins/spring-annotation-plugin/","title":"Spring annotation plugin"},{"body":"Spring annotation plugin This plugin allows to trace all methods of beans in Spring context, which are annotated with @Bean, @Service, @Component and @Repository.\n Why does this plugin optional?  Tracing all methods in Spring context all creates a lot of spans, which also spend more CPU, memory and network. Of course you want to have spans as many as possible, but please make sure your system payload can support these.\n","excerpt":"Spring annotation plugin This plugin allows to trace all methods of beans in Spring context, which …","ref":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/agent-optional-plugins/spring-annotation-plugin/","title":"Spring annotation plugin"},{"body":"Spring sleuth setup Spring Sleuth provides Spring Boot auto-configuration for distributed tracing. Skywalking integrates its micrometer so that it can send metrics to the Skywalking Meter System.\nSet up agent  Add micrometer and Skywalking meter registry dependency into the project\u0026rsquo;s pom.xml file. You can find more details at Toolkit micrometer.  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.springframework.boot\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;spring-boot-starter-actuator\u0026lt;/artifactId\u0026gt; \u0026lt;/dependency\u0026gt; \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-micrometer-registry\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Create Skywalking meter registry in spring bean management.  @Bean SkywalkingMeterRegistry skywalkingMeterRegistry() { // Add rate configs If you need, otherwise using none args construct  SkywalkingConfig config = new SkywalkingConfig(Arrays.asList(\u0026#34;\u0026#34;)); return new SkywalkingMeterRegistry(config); } Set up backend receiver  Make sure to enable meter receiver in application.yml.  receiver-meter:selector:${SW_RECEIVER_METER:default}default: Configure the meter config file. It already has the spring sleuth meter config. If you have a customized meter at the agent side, please configure the meter using the steps set out in the meter document.\n  Enable Spring sleuth config in application.yml.\n  agent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:meterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:spring-sleuth}Add UI dashboard   Open the dashboard view. Click edit button to edit the templates.\n  Create a new template. Template type: Standard -\u0026gt; Template Configuration: Spring -\u0026gt; Input the Template Name.\n  Click view button. You\u0026rsquo;ll see the spring sleuth dashboard.\n  Supported meter Three types of information are supported: Application, System, and JVM.\n Application: HTTP request count and duration, JDBC max/idle/active connection count, and Tomcat session active/reject count. System: CPU system/process usage, OS system load, and OS process file count. JVM: GC pause count and duration, memory max/used/committed size, thread peak/live/daemon count, and classes loaded/unloaded count.  ","excerpt":"Spring sleuth setup Spring Sleuth provides Spring Boot auto-configuration for distributed tracing. …","ref":"/docs/main/v9.0.0/en/setup/backend/spring-sleuth-setup/","title":"Spring sleuth setup"},{"body":"Spring sleuth setup Spring Sleuth provides Spring Boot auto-configuration for distributed tracing. Skywalking integrates its micrometer so that it can send metrics to the Skywalking Meter System.\nSet up agent  Add micrometer and Skywalking meter registry dependency into the project\u0026rsquo;s pom.xml file. You can find more details at Toolkit micrometer.  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.springframework.boot\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;spring-boot-starter-actuator\u0026lt;/artifactId\u0026gt; \u0026lt;/dependency\u0026gt; \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-micrometer-registry\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Create Skywalking meter registry in spring bean management.  @Bean SkywalkingMeterRegistry skywalkingMeterRegistry() { // Add rate configs If you need, otherwise using none args construct  SkywalkingConfig config = new SkywalkingConfig(Arrays.asList(\u0026#34;\u0026#34;)); return new SkywalkingMeterRegistry(config); } Set up backend receiver  Make sure to enable meter receiver in application.yml.  receiver-meter:selector:${SW_RECEIVER_METER:default}default: Configure the meter config file. It already has the spring sleuth meter config. If you have a customized meter at the agent side, please configure the meter using the steps set out in the meter document.\n  Enable Spring sleuth config in application.yml.\n  agent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:meterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:spring-sleuth}Add UI dashboard   Open the dashboard view. Click edit button to edit the templates.\n  Create a new template. Template type: Standard -\u0026gt; Template Configuration: Spring -\u0026gt; Input the Template Name.\n  Click view button. You\u0026rsquo;ll see the spring sleuth dashboard.\n  Supported meter Three types of information are supported: Application, System, and JVM.\n Application: HTTP request count and duration, JDBC max/idle/active connection count, and Tomcat session active/reject count. System: CPU system/process usage, OS system load, and OS process file count. JVM: GC pause count and duration, memory max/used/committed size, thread peak/live/daemon count, and classes loaded/unloaded count.  ","excerpt":"Spring sleuth setup Spring Sleuth provides Spring Boot auto-configuration for distributed tracing. …","ref":"/docs/main/v9.1.0/en/setup/backend/spring-sleuth-setup/","title":"Spring sleuth setup"},{"body":"Spring sleuth setup Spring Sleuth provides Spring Boot auto-configuration for distributed tracing. Skywalking integrates its micrometer so that it can send metrics to the Skywalking Meter System.\nSet up agent  Add micrometer and Skywalking meter registry dependency into the project\u0026rsquo;s pom.xml file. You can find more details at Toolkit micrometer.  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.springframework.boot\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;spring-boot-starter-actuator\u0026lt;/artifactId\u0026gt; \u0026lt;/dependency\u0026gt; \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-micrometer-registry\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Create Skywalking meter registry in spring bean management.  @Bean SkywalkingMeterRegistry skywalkingMeterRegistry() { // Add rate configs If you need, otherwise using none args construct  SkywalkingConfig config = new SkywalkingConfig(Arrays.asList(\u0026#34;\u0026#34;)); return new SkywalkingMeterRegistry(config); } Set up backend receiver  Make sure to enable meter receiver in application.yml.  receiver-meter:selector:${SW_RECEIVER_METER:default}default: Configure the meter config file. It already has the spring sleuth meter config. If you have a customized meter at the agent side, please configure the meter using the steps set out in the meter document.\n  Enable Spring sleuth config in application.yml.\n  agent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:meterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:spring-sleuth}Dashboard configuration SkyWalking provides the Spring Sleuth dashboard by default under the general service instance, which contains the metrics provided by Spring Sleuth by default. Once you have added customized metrics in the application and configuration the meter config file in the backend. Please following the customized dashboard documentation to add the metrics in the dashboard.\nSupported meter Three types of information are supported: Application, System, and JVM.\n Application: HTTP request count and duration, JDBC max/idle/active connection count, and Tomcat session active/reject count. System: CPU system/process usage, OS system load, and OS process file count. JVM: GC pause count and duration, memory max/used/committed size, thread peak/live/daemon count, and classes loaded/unloaded count.  ","excerpt":"Spring sleuth setup Spring Sleuth provides Spring Boot auto-configuration for distributed tracing. …","ref":"/docs/main/v9.2.0/en/setup/backend/spring-sleuth-setup/","title":"Spring sleuth setup"},{"body":"Spring sleuth setup Spring Sleuth provides Spring Boot auto-configuration for distributed tracing. Skywalking integrates its micrometer so that it can send metrics to the Skywalking Meter System.\nSet up agent  Add micrometer and Skywalking meter registry dependency into the project\u0026rsquo;s pom.xml file. You can find more details at Toolkit micrometer.  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.springframework.boot\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;spring-boot-starter-actuator\u0026lt;/artifactId\u0026gt; \u0026lt;/dependency\u0026gt; \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-micrometer-registry\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Create Skywalking meter registry in spring bean management.  @Bean SkywalkingMeterRegistry skywalkingMeterRegistry() { // Add rate configs If you need, otherwise using none args construct  SkywalkingConfig config = new SkywalkingConfig(Arrays.asList(\u0026#34;\u0026#34;)); return new SkywalkingMeterRegistry(config); } Set up backend receiver  Make sure to enable meter receiver in application.yml.  receiver-meter:selector:${SW_RECEIVER_METER:default}default: Configure the meter config file. It already has the spring sleuth meter config. If you have a customized meter at the agent side, please configure the meter using the steps set out in the meter document.\n  Enable Spring sleuth config in application.yml.\n  agent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:meterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:spring-sleuth}Dashboard configuration SkyWalking provides the Spring Sleuth dashboard by default under the general service instance, which contains the metrics provided by Spring Sleuth by default. Once you have added customized metrics in the application and configuration the meter config file in the backend. Please following the customized dashboard documentation to add the metrics in the dashboard.\nSupported meter Three types of information are supported: Application, System, and JVM.\n Application: HTTP request count and duration, JDBC max/idle/active connection count, and Tomcat session active/reject count. System: CPU system/process usage, OS system load, and OS process file count. JVM: GC pause count and duration, memory max/used/committed size, thread peak/live/daemon count, and classes loaded/unloaded count.  ","excerpt":"Spring sleuth setup Spring Sleuth provides Spring Boot auto-configuration for distributed tracing. …","ref":"/docs/main/v9.3.0/en/setup/backend/spring-sleuth-setup/","title":"Spring sleuth setup"},{"body":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System  Sheng Wu 吴 晟 wusheng@apache.org  Editor\u0026rsquo;s note This paper was written by Sheng Wu, project founder, in 2017, to describe the fundamental theory of all current agent core concepts. Readers could learn why SkyWalking agents are significantly different from other tracing system and Dapper[1] Paper\u0026rsquo;s description.\nAbstract Monitoring, visualizing and troubleshooting a large-scale distributed system is a major challenge. One common tool used today is the distributed tracing system (e.g., Google Dapper)[1], and detecting topology and metrics based on the tracing data. One big limitation of today’s topology detection is that the analysis depends on aggregating the client-side and server-side tracing spans in a given time window to generate the dependency of services. This causes more latency and memory use, because the client and server spans of every RPC must be matched in millions of randomly occurring requests in a highly distributed system. More importantly, it could fail to match if the duration of RPC between client and server is longer than the prior setup time window, or across the two windows.\nIn this paper, we present the STAM, Streaming Topology Analysis Method. In STAM, we could use auto instrumentation or a manual instrumentation mechanism to intercept and manipulate RPC at both client-side and server-side. In the case of auto instrumentation, STAM manipulates application codes at runtime, such as Java agent. As such, this monitoring system doesn’t require any source code changes from the application development team or RPC framework development team. The STAM injects an RPC network address used at client side, a service name and a service instance name into the RPC context, and binds the server-side service name and service instance name as the alias name for this network address used at the client side. Freeing the dependency analysis from the mechanisms that cause blocking and delay, the analysis core can process the monitoring data in stream mode and generate the accurate topology.\nThe STAM has been implemented in the Apache SkyWalking[2], an open source APM (application performance monitoring system) project of the Apache Software Foundation, which is widely used in many big enterprises[3] including Alibaba, Huawei, Tencent, Didi, Xiaomi, China Mobile and other enterprises (airlines, financial institutions and others) to support their large-scale distributed systems in the production environment. It reduces the load and memory cost significantly, with better horizontal scale capability.\nIntroduction Monitoring the highly distributed system, especially with a micro-service architecture, is very complex. Many RPCs, including HTTP, gRPC, MQ, Cache, and Database accesses, are behind a single client-side request. Allowing the IT team to understand the dependency relationships among thousands of services is the key feature and first step for observability of a whole distributed system. A distributed tracing system is capable of collecting traces, including all distributed request paths. Dependency relationships have been logically included in the trace data. A distributed tracing system, such as Zipkin [4] or Jaeger Tracing [10], provides built-in dependency analysis features, but many analysis features build on top of that. There are at least two fundamental limitations: timeliness and consistent accuracy.\nStrong timeliness is required to match the mutability of distributed application system dependency relationship, including service level and service instance level dependency.\nA Service is a logic group of instances which have the same functions or codes.\nA Service Instance is usually an OS level process, such as a JVM process. The relationships between services and instances are mutable, depending on the configuration, codes and network status. The dependency could change over time.\n Figure 1, Generated spans in traditional Dapper based tracing system. The span model in the Dapper paper and existing tracing systems,such as Zipkin instrumenting mode[9], just propagates the span id to the server side. Due to this model, dependency analysis requires a certain time window. The tracing spans are collected at both client- and server-sides, because the relationship is recorded. Due to that, the analysis process has to wait for the client and server spans to match in the same time window, in order to output the result, Service A depending on Service B. So, this time window must be over the duration of this RPC request; otherwise, the conclusion will be lost. This condition makes the analysis would not react the dependency mutation in second level, in production, it sometimes has to set the window duration in 3-5 mins. Also, because of the Windows-based design, if one side involves a long duration task, it can’t easily achieve consistent accuracy. Because in order to make the analysis as fast as possible, the analysis period is less than 5 minutes. But some spans can’t match its parent or children if the analysis is incomplete or crosses two time windows. Even if we added a mechanism to process the spans left in the previous stages, still some would have to be abandoned to keep the dataset size and memory usage reasonable.\nIn the STAM, we introduce a new span and context propagation models, with the new analysis method. These new models add the peer network address (IP or hostname) used at client side, client service instance name and client service name, into the context propagation model. Then it passes the RPC call from client to server, just as the original trace id and span id in the existing tracing system, and collects it in the server-side span. The new analysis method can easily generate the client-server relationship directly without waiting on the client span. It also sets the peer network address as one alias of the server service. After the across cluster node data sync, the client-side span analysis could use this alias metadata to generate the client-server relationship directly too. By using these new models and method in Apache SkyWalking, we remove the time windows-based analysis permanently, and fully use the streaming analysis mode with less than 5 seconds latency and consistent accuracy\nNew Span Model and Context Model The traditional span of a tracing system includes the following fields [1][6][10].\n A trace id to represent the whole trace. A span id to represent the current span. An operation name to describe what operation this span did. A start timestamp. A finish timestamp Service and Service Instance names of current span. A set of zero or more key:value Span Tags. A set of zero or more Span Logs, each of which is itself a key:value map paired with a timestamp. References to zero or more causally related Spans. Reference includes the parent span id and trace id.  In the new span model of STAM we add the following fields in the span.\nSpan type. Enumeration, including exit, local and entry. Entry and Exit spans are used in a networking related library. Entry spans represent a server-side networking library, such as Apache Tomcat[7]. Exit spans represent the client-side networking library, such as Apache HttpComponents [8].\nPeer Network Address. Remote \u0026ldquo;address,\u0026rdquo; suitable for use in exit and entry spans. In Exit spans, the peer network address is the address by the client library to access the server.\nThese fields usually are optionally included in many tracing system,. But in STAM, we require them in all RPC cases.\nContext Model is used to propagate the client-side information to server-side carried by the original RPC call, usually in the header, such as HTTP header or MQ header. In the old design, it carries the trace id and span id of client-side span. In the STAM, we enhance this model, adding the parent service name, parent service instance name and peer of exit span. The names could be literal strings. All these extra fields will help to remove the block of streaming analysis. Compared to the existing context model, this uses a little more bandwidth, but it could be optimized. In Apache SkyWalking, we design a register mechanism to exchange unique IDs to represent these names. As a result, only 3 integers are added in the RPC context, so the increase of bandwidth is at least less than 1% in the production environment.\nThe changes of two models could eliminate the time windows in the analysis process. Server-side span analysis enhances the context aware capability.\nNew Topology Analysis Method The new topology analysis method at the core of STAM is processing the span in stream mode. The analysis of the server-side span, also named entry span, includes the parent service name, parent service instance name and peer of exit span. So the analysis process could establish the following results.\n Set the peer of exit span as client using alias name of current service and instance. Peer network address \u0026lt;-\u0026gt; service name and peer network address \u0026lt;-\u0026gt; Service instance name aliases created. These two will sync with all analysis nodes and persistent in the storage, allowing more analysis processers to have this alias information. Generate relationships of parent service name -\u0026gt; current service name and parent service instance name -\u0026gt; current service instance name, unless there is another different Peer network address \u0026lt;-\u0026gt; Service Instance Name mapping found. In that case, only generate relationships of peer network address \u0026lt;-\u0026gt; service name and peer network address \u0026lt;-\u0026gt; Service instance name.  For analysis of the client-side span (exit span), there could three possibilities.\n The peer in the exit span already has the alias names established by server-side span analysis from step (1). Then use alias names to replace the peer, and generate traffic of current service name -\u0026gt; alias service name and current service instance name -\u0026gt; alias service instance name. If the alias could not be found, then just simply generate traffic for current service name -\u0026gt; peer and current service instance name -\u0026gt; peer. If multiple alias names of peer network address \u0026lt;-\u0026gt; Service Instance Name could be found, then keep generating traffic for current service name -\u0026gt; peer network address and current service instance name -\u0026gt; peer network address.   Figure 2, Apache SkyWalking uses STAM to detect and visualize the topology of distributed systems. Evaluation In this section, we evaluate the new models and analysis method in the context of several typical cases in which the old method loses timeliness and consistent accuracy.\n 1.New Service Online or Auto Scale Out  New services could be added into the whole topology by the developer team randomly, or container operation platform automatically by some scale out policy, like Kubernetes [5]. The monitoring system could not be notified in any case manually. By using STAM, we could detect the new node automatically and also keep the analysis process unblocked and consistent with detected nodes. In this case, a new service and network address (could be IP, port or both) are used. The peer network address \u0026lt;-\u0026gt; service mapping does not exist, the traffic of client service -\u0026gt; peer network address will be generated and persistent in the storage first. After mapping is generated, further traffic of client-service to server-service could be identified, generated and aggregated in the analysis platform. For filling the gap of a few traffic before the mapping generated, we require doing peer network address \u0026lt;-\u0026gt; service mapping translation again in query stage, to merge client service-\u0026gt;peer network address and client-service to server-service. In production, the amount of VM for the whole SkyWalking analysis platform deployment is less than 100, syncing among them will finish less than 10 seconds, in most cases it only takes 3-5 seconds. And in the query stage, the data has been aggregated in minutes or seconds at least. The query merge performance is not related to how much traffic happens before the mapping generated, only affected by sync duration, in here, only 3 seconds. Due to that, in minute level aggregation topology, it only adds 1 or 2 relationship records in the whole topology relationship dataset. Considering an over 100 services topology having over 500 relationship records per minute, the payload increase for this query merge is very limited and affordable. This feature is significant in a large and high load distributed system, as we don’t need to concern its scaling capability. And in some fork versions, they choose to update the existing client service-\u0026gt;peer network address to client-service to server-service after detecting the new mapping for peer generated, in order to remove the extra load at query stage permanently.\n Figure 3, Span analysis by using the new topology analysis method  2.Existing Uninstrumented Nodes  Every topology detection method has to work in this case. In many cases, there are nodes in the production environment that can’t be instrumented. Causes for this might include:(1) Restriction of the technology. In some golang or C++ written applications, there is no easy way in Java or .Net to do auto instrumentation by the agent. So, the codes may not be instrumented automatically. (2) The middleware, such as MQ, database server, has not adopted the tracing system. This would make it difficult or time consuming to implement the middleware instrumentation. (3) A 3rd party service or cloud service doesn’t support work with the current tracing system. (4) Lack of resources: e.g., the developer or operation team lacks time to make the instrumentation ready.\nThe STAM works well even if the client or server side has no instrumentation. It still keeps the topology as accurate as possible.\nIf the client side hasn’t instrumented, the server-side span wouldn’t get any reference through RPC context, so, it would simply use peer to generate traffic, as shown in Figure 4.\n Figure 4, STAM traffic generation when no client-side instrumentation As shown in Figure 5, in the other case, with no server-side instrumentation, the client span analysis doesn’t need to process this case. The STAM analysis core just simply keeps generating client service-\u0026gt;peer network address traffic. As there is no mapping for peer network address generated, there is no merging.\n Figure 5, STAM traffic generation when no server-side instrumentation  3.Uninstrumented Node Having Header Forward Capability  Besides the cases we evaluated in (2) Uninstrumented Nodes, there is one complex and special case: the instrumented node has the capability to propagate the header from downstream to upstream, typically in all proxy, such as Envoy[11], Nginx[12], Spring Cloud Gateway[13]. As proxy, it has the capability to forward all headers from downstream to upstream to keep some of information in the header, including the tracing context, authentication, browser information, and routing information, in order to make them accessible by the business services behind the proxy, like Envoy route configuration. When some proxy can’t be instrumented, no matter what the reason, it should not affect the topology detection.\nIn this case, the proxy address would be used at the client side and propagate through RPC context as peer network address, and the proxy forwards this to different upstream services. Then STAM could detect this case and generate the proxy as a conjectural node. In the STAM, more than one alias names for this network address should be generated. After those two are detected and synchronized to the analysis node, the analysis core knows there is at least one uninstrumented service standing between client and servers. So, it will generate the relationships of client service-\u0026gt;peer network address, peer-\u0026gt;server service B and peer network address -\u0026gt;server service C, as shown in Figure 6.\n Figure 6, STAM traffic generation when the proxy uninstrumentation Conclusion This paper described the STAM, which is to the best of our knowledge the best topology detection method for distributed tracing systems. It replaces the time-window based topology analysis method for tracing-based monitoring systems. It removes the resource cost of disk and memory for time-window baseds analysis permanently and totally, and the barriers of horizontal scale. One STAM implementation, Apache SkyWalking, is widely used for monitoring hundreds of applications in production. Some of them generated over 100 TB tracing data per day and topology for over 200 services in real time.\nAcknowledgments We thank all contributors of Apache SkyWalking project for suggestions, code contributions to implement the STAM, and feedback from using the STAM and SkyWalking in their production environment.\nLicense This paper and the STAM are licensed in the Apache 2.0.\nReferences  Dapper, a Large-Scale Distributed Systems Tracing Infrastructure, https://research.google.com/pubs/pub36356.html?spm=5176.100239.blogcont60165.11.OXME9Z Apache SkyWalking, http://skywalking.apache.org/ Apache Open Users, https://skywalking.apache.org/users/ Zipkin, https://zipkin.io/ Kubernetes, Production-Grade Container Orchestration. Automated container deployment, scaling, and management. https://kubernetes.io/ OpenTracing Specification https://github.com/opentracing/specification/blob/master/specification.md Apache Tomcat, http://tomcat.apache.org/ Apache HttpComponents, https://hc.apache.org/ Zipkin doc, ‘Instrumenting a library’ section, ‘Communicating trace information’ paragraph. https://zipkin.io/pages/instrumenting Jaeger Tracing, https://jaegertracing.io/ Envoy Proxy, http://envoyproxy.io/ Nginx, http://nginx.org/ Spring Cloud Gateway, https://spring.io/projects/spring-cloud-gateway  ","excerpt":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System …","ref":"/docs/main/latest/en/papers/stam/","title":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System"},{"body":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System  Sheng Wu 吴 晟 wusheng@apache.org  Editor\u0026rsquo;s note This paper was written by Sheng Wu, project founder, in 2017, to describe the fundamental theory of all current agent core concepts. Readers could learn why SkyWalking agents are significantly different from other tracing system and Dapper[1] Paper\u0026rsquo;s description.\nAbstract Monitoring, visualizing and troubleshooting a large-scale distributed system is a major challenge. One common tool used today is the distributed tracing system (e.g., Google Dapper)[1], and detecting topology and metrics based on the tracing data. One big limitation of today’s topology detection is that the analysis depends on aggregating the client-side and server-side tracing spans in a given time window to generate the dependency of services. This causes more latency and memory use, because the client and server spans of every RPC must be matched in millions of randomly occurring requests in a highly distributed system. More importantly, it could fail to match if the duration of RPC between client and server is longer than the prior setup time window, or across the two windows.\nIn this paper, we present the STAM, Streaming Topology Analysis Method. In STAM, we could use auto instrumentation or a manual instrumentation mechanism to intercept and manipulate RPC at both client-side and server-side. In the case of auto instrumentation, STAM manipulates application codes at runtime, such as Java agent. As such, this monitoring system doesn’t require any source code changes from the application development team or RPC framework development team. The STAM injects an RPC network address used at client side, a service name and a service instance name into the RPC context, and binds the server-side service name and service instance name as the alias name for this network address used at the client side. Freeing the dependency analysis from the mechanisms that cause blocking and delay, the analysis core can process the monitoring data in stream mode and generate the accurate topology.\nThe STAM has been implemented in the Apache SkyWalking[2], an open source APM (application performance monitoring system) project of the Apache Software Foundation, which is widely used in many big enterprises[3] including Alibaba, Huawei, Tencent, Didi, Xiaomi, China Mobile and other enterprises (airlines, financial institutions and others) to support their large-scale distributed systems in the production environment. It reduces the load and memory cost significantly, with better horizontal scale capability.\nIntroduction Monitoring the highly distributed system, especially with a micro-service architecture, is very complex. Many RPCs, including HTTP, gRPC, MQ, Cache, and Database accesses, are behind a single client-side request. Allowing the IT team to understand the dependency relationships among thousands of services is the key feature and first step for observability of a whole distributed system. A distributed tracing system is capable of collecting traces, including all distributed request paths. Dependency relationships have been logically included in the trace data. A distributed tracing system, such as Zipkin [4] or Jaeger Tracing [10], provides built-in dependency analysis features, but many analysis features build on top of that. There are at least two fundamental limitations: timeliness and consistent accuracy.\nStrong timeliness is required to match the mutability of distributed application system dependency relationship, including service level and service instance level dependency.\nA Service is a logic group of instances which have the same functions or codes.\nA Service Instance is usually an OS level process, such as a JVM process. The relationships between services and instances are mutable, depending on the configuration, codes and network status. The dependency could change over time.\n Figure 1, Generated spans in traditional Dapper based tracing system. The span model in the Dapper paper and existing tracing systems,such as Zipkin instrumenting mode[9], just propagates the span id to the server side. Due to this model, dependency analysis requires a certain time window. The tracing spans are collected at both client- and server-sides, because the relationship is recorded. Due to that, the analysis process has to wait for the client and server spans to match in the same time window, in order to output the result, Service A depending on Service B. So, this time window must be over the duration of this RPC request; otherwise, the conclusion will be lost. This condition makes the analysis would not react the dependency mutation in second level, in production, it sometimes has to set the window duration in 3-5 mins. Also, because of the Windows-based design, if one side involves a long duration task, it can’t easily achieve consistent accuracy. Because in order to make the analysis as fast as possible, the analysis period is less than 5 minutes. But some spans can’t match its parent or children if the analysis is incomplete or crosses two time windows. Even if we added a mechanism to process the spans left in the previous stages, still some would have to be abandoned to keep the dataset size and memory usage reasonable.\nIn the STAM, we introduce a new span and context propagation models, with the new analysis method. These new models add the peer network address (IP or hostname) used at client side, client service instance name and client service name, into the context propagation model. Then it passes the RPC call from client to server, just as the original trace id and span id in the existing tracing system, and collects it in the server-side span. The new analysis method can easily generate the client-server relationship directly without waiting on the client span. It also sets the peer network address as one alias of the server service. After the across cluster node data sync, the client-side span analysis could use this alias metadata to generate the client-server relationship directly too. By using these new models and method in Apache SkyWalking, we remove the time windows-based analysis permanently, and fully use the streaming analysis mode with less than 5 seconds latency and consistent accuracy\nNew Span Model and Context Model The traditional span of a tracing system includes the following fields [1][6][10].\n A trace id to represent the whole trace. A span id to represent the current span. An operation name to describe what operation this span did. A start timestamp. A finish timestamp Service and Service Instance names of current span. A set of zero or more key:value Span Tags. A set of zero or more Span Logs, each of which is itself a key:value map paired with a timestamp. References to zero or more causally related Spans. Reference includes the parent span id and trace id.  In the new span model of STAM we add the following fields in the span.\nSpan type. Enumeration, including exit, local and entry. Entry and Exit spans are used in a networking related library. Entry spans represent a server-side networking library, such as Apache Tomcat[7]. Exit spans represent the client-side networking library, such as Apache HttpComponents [8].\nPeer Network Address. Remote \u0026ldquo;address,\u0026rdquo; suitable for use in exit and entry spans. In Exit spans, the peer network address is the address by the client library to access the server.\nThese fields usually are optionally included in many tracing system,. But in STAM, we require them in all RPC cases.\nContext Model is used to propagate the client-side information to server-side carried by the original RPC call, usually in the header, such as HTTP header or MQ header. In the old design, it carries the trace id and span id of client-side span. In the STAM, we enhance this model, adding the parent service name, parent service instance name and peer of exit span. The names could be literal strings. All these extra fields will help to remove the block of streaming analysis. Compared to the existing context model, this uses a little more bandwidth, but it could be optimized. In Apache SkyWalking, we design a register mechanism to exchange unique IDs to represent these names. As a result, only 3 integers are added in the RPC context, so the increase of bandwidth is at least less than 1% in the production environment.\nThe changes of two models could eliminate the time windows in the analysis process. Server-side span analysis enhances the context aware capability.\nNew Topology Analysis Method The new topology analysis method at the core of STAM is processing the span in stream mode. The analysis of the server-side span, also named entry span, includes the parent service name, parent service instance name and peer of exit span. So the analysis process could establish the following results.\n Set the peer of exit span as client using alias name of current service and instance. Peer network address \u0026lt;-\u0026gt; service name and peer network address \u0026lt;-\u0026gt; Service instance name aliases created. These two will sync with all analysis nodes and persistent in the storage, allowing more analysis processers to have this alias information. Generate relationships of parent service name -\u0026gt; current service name and parent service instance name -\u0026gt; current service instance name, unless there is another different Peer network address \u0026lt;-\u0026gt; Service Instance Name mapping found. In that case, only generate relationships of peer network address \u0026lt;-\u0026gt; service name and peer network address \u0026lt;-\u0026gt; Service instance name.  For analysis of the client-side span (exit span), there could three possibilities.\n The peer in the exit span already has the alias names established by server-side span analysis from step (1). Then use alias names to replace the peer, and generate traffic of current service name -\u0026gt; alias service name and current service instance name -\u0026gt; alias service instance name. If the alias could not be found, then just simply generate traffic for current service name -\u0026gt; peer and current service instance name -\u0026gt; peer. If multiple alias names of peer network address \u0026lt;-\u0026gt; Service Instance Name could be found, then keep generating traffic for current service name -\u0026gt; peer network address and current service instance name -\u0026gt; peer network address.   Figure 2, Apache SkyWalking uses STAM to detect and visualize the topology of distributed systems. Evaluation In this section, we evaluate the new models and analysis method in the context of several typical cases in which the old method loses timeliness and consistent accuracy.\n 1.New Service Online or Auto Scale Out  New services could be added into the whole topology by the developer team randomly, or container operation platform automatically by some scale out policy, like Kubernetes [5]. The monitoring system could not be notified in any case manually. By using STAM, we could detect the new node automatically and also keep the analysis process unblocked and consistent with detected nodes. In this case, a new service and network address (could be IP, port or both) are used. The peer network address \u0026lt;-\u0026gt; service mapping does not exist, the traffic of client service -\u0026gt; peer network address will be generated and persistent in the storage first. After mapping is generated, further traffic of client-service to server-service could be identified, generated and aggregated in the analysis platform. For filling the gap of a few traffic before the mapping generated, we require doing peer network address \u0026lt;-\u0026gt; service mapping translation again in query stage, to merge client service-\u0026gt;peer network address and client-service to server-service. In production, the amount of VM for the whole SkyWalking analysis platform deployment is less than 100, syncing among them will finish less than 10 seconds, in most cases it only takes 3-5 seconds. And in the query stage, the data has been aggregated in minutes or seconds at least. The query merge performance is not related to how much traffic happens before the mapping generated, only affected by sync duration, in here, only 3 seconds. Due to that, in minute level aggregation topology, it only adds 1 or 2 relationship records in the whole topology relationship dataset. Considering an over 100 services topology having over 500 relationship records per minute, the payload increase for this query merge is very limited and affordable. This feature is significant in a large and high load distributed system, as we don’t need to concern its scaling capability. And in some fork versions, they choose to update the existing client service-\u0026gt;peer network address to client-service to server-service after detecting the new mapping for peer generated, in order to remove the extra load at query stage permanently.\n Figure 3, Span analysis by using the new topology analysis method  2.Existing Uninstrumented Nodes  Every topology detection method has to work in this case. In many cases, there are nodes in the production environment that can’t be instrumented. Causes for this might include:(1) Restriction of the technology. In some golang or C++ written applications, there is no easy way in Java or .Net to do auto instrumentation by the agent. So, the codes may not be instrumented automatically. (2) The middleware, such as MQ, database server, has not adopted the tracing system. This would make it difficult or time consuming to implement the middleware instrumentation. (3) A 3rd party service or cloud service doesn’t support work with the current tracing system. (4) Lack of resources: e.g., the developer or operation team lacks time to make the instrumentation ready.\nThe STAM works well even if the client or server side has no instrumentation. It still keeps the topology as accurate as possible.\nIf the client side hasn’t instrumented, the server-side span wouldn’t get any reference through RPC context, so, it would simply use peer to generate traffic, as shown in Figure 4.\n Figure 4, STAM traffic generation when no client-side instrumentation As shown in Figure 5, in the other case, with no server-side instrumentation, the client span analysis doesn’t need to process this case. The STAM analysis core just simply keeps generating client service-\u0026gt;peer network address traffic. As there is no mapping for peer network address generated, there is no merging.\n Figure 5, STAM traffic generation when no server-side instrumentation  3.Uninstrumented Node Having Header Forward Capability  Besides the cases we evaluated in (2) Uninstrumented Nodes, there is one complex and special case: the instrumented node has the capability to propagate the header from downstream to upstream, typically in all proxy, such as Envoy[11], Nginx[12], Spring Cloud Gateway[13]. As proxy, it has the capability to forward all headers from downstream to upstream to keep some of information in the header, including the tracing context, authentication, browser information, and routing information, in order to make them accessible by the business services behind the proxy, like Envoy route configuration. When some proxy can’t be instrumented, no matter what the reason, it should not affect the topology detection.\nIn this case, the proxy address would be used at the client side and propagate through RPC context as peer network address, and the proxy forwards this to different upstream services. Then STAM could detect this case and generate the proxy as a conjectural node. In the STAM, more than one alias names for this network address should be generated. After those two are detected and synchronized to the analysis node, the analysis core knows there is at least one uninstrumented service standing between client and servers. So, it will generate the relationships of client service-\u0026gt;peer network address, peer-\u0026gt;server service B and peer network address -\u0026gt;server service C, as shown in Figure 6.\n Figure 6, STAM traffic generation when the proxy uninstrumentation Conclusion This paper described the STAM, which is to the best of our knowledge the best topology detection method for distributed tracing systems. It replaces the time-window based topology analysis method for tracing-based monitoring systems. It removes the resource cost of disk and memory for time-window baseds analysis permanently and totally, and the barriers of horizontal scale. One STAM implementation, Apache SkyWalking, is widely used for monitoring hundreds of applications in production. Some of them generated over 100 TB tracing data per day and topology for over 200 services in real time.\nAcknowledgments We thank all contributors of Apache SkyWalking project for suggestions, code contributions to implement the STAM, and feedback from using the STAM and SkyWalking in their production environment.\nLicense This paper and the STAM are licensed in the Apache 2.0.\nReferences  Dapper, a Large-Scale Distributed Systems Tracing Infrastructure, https://research.google.com/pubs/pub36356.html?spm=5176.100239.blogcont60165.11.OXME9Z Apache SkyWalking, http://skywalking.apache.org/ Apache Open Users, https://skywalking.apache.org/users/ Zipkin, https://zipkin.io/ Kubernetes, Production-Grade Container Orchestration. Automated container deployment, scaling, and management. https://kubernetes.io/ OpenTracing Specification https://github.com/opentracing/specification/blob/master/specification.md Apache Tomcat, http://tomcat.apache.org/ Apache HttpComponents, https://hc.apache.org/ Zipkin doc, ‘Instrumenting a library’ section, ‘Communicating trace information’ paragraph. https://zipkin.io/pages/instrumenting Jaeger Tracing, https://jaegertracing.io/ Envoy Proxy, http://envoyproxy.io/ Nginx, http://nginx.org/ Spring Cloud Gateway, https://spring.io/projects/spring-cloud-gateway  ","excerpt":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System …","ref":"/docs/main/next/en/papers/stam/","title":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System"},{"body":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System  Sheng Wu 吴 晟 wusheng@apache.org  Editor\u0026rsquo;s note This paper was written by Sheng Wu, project founder, in 2017, to describe the fundamental theory of all current agent core concepts. Readers could learn why SkyWalking agents are significantly different from other tracing system and Dapper[1] Paper\u0026rsquo;s description.\nAbstract Monitoring, visualizing and troubleshooting a large-scale distributed system is a major challenge. One common tool used today is the distributed tracing system (e.g., Google Dapper)[1], and detecting topology and metrics based on the tracing data. One big limitation of today’s topology detection is that the analysis depends on aggregating the client-side and server-side tracing spans in a given time window to generate the dependency of services. This causes more latency and memory use, because the client and server spans of every RPC must be matched in millions of randomly occurring requests in a highly distributed system. More importantly, it could fail to match if the duration of RPC between client and server is longer than the prior setup time window, or across the two windows.\nIn this paper, we present the STAM, Streaming Topology Analysis Method. In STAM, we could use auto instrumentation or a manual instrumentation mechanism to intercept and manipulate RPC at both client-side and server-side. In the case of auto instrumentation, STAM manipulates application codes at runtime, such as Java agent. As such, this monitoring system doesn’t require any source code changes from the application development team or RPC framework development team. The STAM injects an RPC network address used at client side, a service name and a service instance name into the RPC context, and binds the server-side service name and service instance name as the alias name for this network address used at the client side. Freeing the dependency analysis from the mechanisms that cause blocking and delay, the analysis core can process the monitoring data in stream mode and generate the accurate topology.\nThe STAM has been implemented in the Apache SkyWalking[2], an open source APM (application performance monitoring system) project of the Apache Software Foundation, which is widely used in many big enterprises[3] including Alibaba, Huawei, Tencent, Didi, Xiaomi, China Mobile and other enterprises (airlines, financial institutions and others) to support their large-scale distributed systems in the production environment. It reduces the load and memory cost significantly, with better horizontal scale capability.\nIntroduction Monitoring the highly distributed system, especially with a micro-service architecture, is very complex. Many RPCs, including HTTP, gRPC, MQ, Cache, and Database accesses, are behind a single client-side request. Allowing the IT team to understand the dependency relationships among thousands of services is the key feature and first step for observability of a whole distributed system. A distributed tracing system is capable of collecting traces, including all distributed request paths. Dependency relationships have been logically included in the trace data. A distributed tracing system, such as Zipkin [4] or Jaeger Tracing [10], provides built-in dependency analysis features, but many analysis features build on top of that. There are at least two fundamental limitations: timeliness and consistent accuracy.\nStrong timeliness is required to match the mutability of distributed application system dependency relationship, including service level and service instance level dependency.\nA Service is a logic group of instances which have the same functions or codes.\nA Service Instance is usually an OS level process, such as a JVM process. The relationships between services and instances are mutable, depending on the configuration, codes and network status. The dependency could change over time.\n Figure 1, Generated spans in traditional Dapper based tracing system. The span model in the Dapper paper and existing tracing systems,such as Zipkin instrumenting mode[9], just propagates the span id to the server side. Due to this model, dependency analysis requires a certain time window. The tracing spans are collected at both client- and server-sides, because the relationship is recorded. Due to that, the analysis process has to wait for the client and server spans to match in the same time window, in order to output the result, Service A depending on Service B. So, this time window must be over the duration of this RPC request; otherwise, the conclusion will be lost. This condition makes the analysis would not react the dependency mutation in second level, in production, it sometimes has to set the window duration in 3-5 mins. Also, because of the Windows-based design, if one side involves a long duration task, it can’t easily achieve consistent accuracy. Because in order to make the analysis as fast as possible, the analysis period is less than 5 minutes. But some spans can’t match its parent or children if the analysis is incomplete or crosses two time windows. Even if we added a mechanism to process the spans left in the previous stages, still some would have to be abandoned to keep the dataset size and memory usage reasonable.\nIn the STAM, we introduce a new span and context propagation models, with the new analysis method. These new models add the peer network address (IP or hostname) used at client side, client service instance name and client service name, into the context propagation model. Then it passes the RPC call from client to server, just as the original trace id and span id in the existing tracing system, and collects it in the server-side span. The new analysis method can easily generate the client-server relationship directly without waiting on the client span. It also sets the peer network address as one alias of the server service. After the across cluster node data sync, the client-side span analysis could use this alias metadata to generate the client-server relationship directly too. By using these new models and method in Apache SkyWalking, we remove the time windows-based analysis permanently, and fully use the streaming analysis mode with less than 5 seconds latency and consistent accuracy\nNew Span Model and Context Model The traditional span of a tracing system includes the following fields [1][6][10].\n A trace id to represent the whole trace. A span id to represent the current span. An operation name to describe what operation this span did. A start timestamp. A finish timestamp Service and Service Instance names of current span. A set of zero or more key:value Span Tags. A set of zero or more Span Logs, each of which is itself a key:value map paired with a timestamp. References to zero or more causally related Spans. Reference includes the parent span id and trace id.  In the new span model of STAM we add the following fields in the span.\nSpan type. Enumeration, including exit, local and entry. Entry and Exit spans are used in a networking related library. Entry spans represent a server-side networking library, such as Apache Tomcat[7]. Exit spans represent the client-side networking library, such as Apache HttpComponents [8].\nPeer Network Address. Remote \u0026ldquo;address,\u0026rdquo; suitable for use in exit and entry spans. In Exit spans, the peer network address is the address by the client library to access the server.\nThese fields usually are optionally included in many tracing system,. But in STAM, we require them in all RPC cases.\nContext Model is used to propagate the client-side information to server-side carried by the original RPC call, usually in the header, such as HTTP header or MQ header. In the old design, it carries the trace id and span id of client-side span. In the STAM, we enhance this model, adding the parent service name, parent service instance name and peer of exit span. The names could be literal strings. All these extra fields will help to remove the block of streaming analysis. Compared to the existing context model, this uses a little more bandwidth, but it could be optimized. In Apache SkyWalking, we design a register mechanism to exchange unique IDs to represent these names. As a result, only 3 integers are added in the RPC context, so the increase of bandwidth is at least less than 1% in the production environment.\nThe changes of two models could eliminate the time windows in the analysis process. Server-side span analysis enhances the context aware capability.\nNew Topology Analysis Method The new topology analysis method at the core of STAM is processing the span in stream mode. The analysis of the server-side span, also named entry span, includes the parent service name, parent service instance name and peer of exit span. So the analysis process could establish the following results.\n Set the peer of exit span as client using alias name of current service and instance. Peer network address \u0026lt;-\u0026gt; service name and peer network address \u0026lt;-\u0026gt; Service instance name aliases created. These two will sync with all analysis nodes and persistent in the storage, allowing more analysis processers to have this alias information. Generate relationships of parent service name -\u0026gt; current service name and parent service instance name -\u0026gt; current service instance name, unless there is another different Peer network address \u0026lt;-\u0026gt; Service Instance Name mapping found. In that case, only generate relationships of peer network address \u0026lt;-\u0026gt; service name and peer network address \u0026lt;-\u0026gt; Service instance name.  For analysis of the client-side span (exit span), there could three possibilities.\n The peer in the exit span already has the alias names established by server-side span analysis from step (1). Then use alias names to replace the peer, and generate traffic of current service name -\u0026gt; alias service name and current service instance name -\u0026gt; alias service instance name. If the alias could not be found, then just simply generate traffic for current service name -\u0026gt; peer and current service instance name -\u0026gt; peer. If multiple alias names of peer network address \u0026lt;-\u0026gt; Service Instance Name could be found, then keep generating traffic for current service name -\u0026gt; peer network address and current service instance name -\u0026gt; peer network address.   Figure 2, Apache SkyWalking uses STAM to detect and visualize the topology of distributed systems. Evaluation In this section, we evaluate the new models and analysis method in the context of several typical cases in which the old method loses timeliness and consistent accuracy.\n 1.New Service Online or Auto Scale Out  New services could be added into the whole topology by the developer team randomly, or container operation platform automatically by some scale out policy, like Kubernetes [5]. The monitoring system could not be notified in any case manually. By using STAM, we could detect the new node automatically and also keep the analysis process unblocked and consistent with detected nodes. In this case, a new service and network address (could be IP, port or both) are used. The peer network address \u0026lt;-\u0026gt; service mapping does not exist, the traffic of client service -\u0026gt; peer network address will be generated and persistent in the storage first. After mapping is generated, further traffic of client-service to server-service could be identified, generated and aggregated in the analysis platform. For filling the gap of a few traffic before the mapping generated, we require doing peer network address \u0026lt;-\u0026gt; service mapping translation again in query stage, to merge client service-\u0026gt;peer network address and client-service to server-service. In production, the amount of VM for the whole SkyWalking analysis platform deployment is less than 100, syncing among them will finish less than 10 seconds, in most cases it only takes 3-5 seconds. And in the query stage, the data has been aggregated in minutes or seconds at least. The query merge performance is not related to how much traffic happens before the mapping generated, only affected by sync duration, in here, only 3 seconds. Due to that, in minute level aggregation topology, it only adds 1 or 2 relationship records in the whole topology relationship dataset. Considering an over 100 services topology having over 500 relationship records per minute, the payload increase for this query merge is very limited and affordable. This feature is significant in a large and high load distributed system, as we don’t need to concern its scaling capability. And in some fork versions, they choose to update the existing client service-\u0026gt;peer network address to client-service to server-service after detecting the new mapping for peer generated, in order to remove the extra load at query stage permanently.\n Figure 3, Span analysis by using the new topology analysis method  2.Existing Uninstrumented Nodes  Every topology detection method has to work in this case. In many cases, there are nodes in the production environment that can’t be instrumented. Causes for this might include:(1) Restriction of the technology. In some golang or C++ written applications, there is no easy way in Java or .Net to do auto instrumentation by the agent. So, the codes may not be instrumented automatically. (2) The middleware, such as MQ, database server, has not adopted the tracing system. This would make it difficult or time consuming to implement the middleware instrumentation. (3) A 3rd party service or cloud service doesn’t support work with the current tracing system. (4) Lack of resources: e.g., the developer or operation team lacks time to make the instrumentation ready.\nThe STAM works well even if the client or server side has no instrumentation. It still keeps the topology as accurate as possible.\nIf the client side hasn’t instrumented, the server-side span wouldn’t get any reference through RPC context, so, it would simply use peer to generate traffic, as shown in Figure 4.\n Figure 4, STAM traffic generation when no client-side instrumentation As shown in Figure 5, in the other case, with no server-side instrumentation, the client span analysis doesn’t need to process this case. The STAM analysis core just simply keeps generating client service-\u0026gt;peer network address traffic. As there is no mapping for peer network address generated, there is no merging.\n Figure 5, STAM traffic generation when no server-side instrumentation  3.Uninstrumented Node Having Header Forward Capability  Besides the cases we evaluated in (2) Uninstrumented Nodes, there is one complex and special case: the instrumented node has the capability to propagate the header from downstream to upstream, typically in all proxy, such as Envoy[11], Nginx[12], Spring Cloud Gateway[13]. As proxy, it has the capability to forward all headers from downstream to upstream to keep some of information in the header, including the tracing context, authentication, browser information, and routing information, in order to make them accessible by the business services behind the proxy, like Envoy route configuration [14]. When some proxy can’t be instrumented, no matter what the reason, it should not affect the topology detection.\nIn this case, the proxy address would be used at the client side and propagate through RPC context as peer network address, and the proxy forwards this to different upstream services. Then STAM could detect this case and generate the proxy as a conjectural node. In the STAM, more than one alias names for this network address should be generated. After those two are detected and synchronized to the analysis node, the analysis core knows there is at least one uninstrumented service standing between client and servers. So, it will generate the relationships of client service-\u0026gt;peer network address, peer-\u0026gt;server service B and peer network address -\u0026gt;server service C, as shown in Figure 6.\n Figure 6, STAM traffic generation when the proxy uninstrumentatio Conclusion This paper described the STAM, which is to the best of our knowledge the best topology detection method for distributed tracing systems. It replaces the time-window based topology analysis method for tracing-based monitoring systems. It removes the resource cost of disk and memory for time-window baseds analysis permanently and totally, and the barriers of horizontal scale. One STAM implementation, Apache SkyWalking, is widely used for monitoring hundreds of applications in production. Some of them generated over 100 TB tracing data per day and topology for over 200 services in real time.\nAcknowledgments We thank all contributors of Apache SkyWalking project for suggestions, code contributions to implement the STAM, and feedback from using the STAM and SkyWalking in their production environment.\nLicense This paper and the STAM are licensed in the Apache 2.0.\nReferences  Dapper, a Large-Scale Distributed Systems Tracing Infrastructure, https://research.google.com/pubs/pub36356.html?spm=5176.100239.blogcont60165.11.OXME9Z Apache SkyWalking, http://skywalking.apache.org/ Apache Open Users, https://skywalking.apache.org/users/ Zipkin, https://zipkin.io/ Kubernetes, Production-Grade Container Orchestration. Automated container deployment, scaling, and management. https://kubernetes.io/ OpenTracing Specification https://github.com/opentracing/specification/blob/master/specification.md Apache Tomcat, http://tomcat.apache.org/ Apache HttpComponents, https://hc.apache.org/ Zipkin doc, ‘Instrumenting a library’ section, ‘Communicating trace information’ paragraph. https://zipkin.io/pages/instrumenting Jaeger Tracing, https://jaegertracing.io/ Envoy Proxy, http://envoyproxy.io/ Nginx, http://nginx.org/ Spring Cloud Gateway, https://spring.io/projects/spring-cloud-gateway Envoy Route Configuration, https://www.envoyproxy.io/docs/envoy/latest/api-v2/api/v2/rds.proto.html?highlight=request_headers_to_  ","excerpt":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System …","ref":"/docs/main/v9.0.0/en/papers/stam/","title":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System"},{"body":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System  Sheng Wu 吴 晟 wusheng@apache.org  Editor\u0026rsquo;s note This paper was written by Sheng Wu, project founder, in 2017, to describe the fundamental theory of all current agent core concepts. Readers could learn why SkyWalking agents are significantly different from other tracing system and Dapper[1] Paper\u0026rsquo;s description.\nAbstract Monitoring, visualizing and troubleshooting a large-scale distributed system is a major challenge. One common tool used today is the distributed tracing system (e.g., Google Dapper)[1], and detecting topology and metrics based on the tracing data. One big limitation of today’s topology detection is that the analysis depends on aggregating the client-side and server-side tracing spans in a given time window to generate the dependency of services. This causes more latency and memory use, because the client and server spans of every RPC must be matched in millions of randomly occurring requests in a highly distributed system. More importantly, it could fail to match if the duration of RPC between client and server is longer than the prior setup time window, or across the two windows.\nIn this paper, we present the STAM, Streaming Topology Analysis Method. In STAM, we could use auto instrumentation or a manual instrumentation mechanism to intercept and manipulate RPC at both client-side and server-side. In the case of auto instrumentation, STAM manipulates application codes at runtime, such as Java agent. As such, this monitoring system doesn’t require any source code changes from the application development team or RPC framework development team. The STAM injects an RPC network address used at client side, a service name and a service instance name into the RPC context, and binds the server-side service name and service instance name as the alias name for this network address used at the client side. Freeing the dependency analysis from the mechanisms that cause blocking and delay, the analysis core can process the monitoring data in stream mode and generate the accurate topology.\nThe STAM has been implemented in the Apache SkyWalking[2], an open source APM (application performance monitoring system) project of the Apache Software Foundation, which is widely used in many big enterprises[3] including Alibaba, Huawei, Tencent, Didi, Xiaomi, China Mobile and other enterprises (airlines, financial institutions and others) to support their large-scale distributed systems in the production environment. It reduces the load and memory cost significantly, with better horizontal scale capability.\nIntroduction Monitoring the highly distributed system, especially with a micro-service architecture, is very complex. Many RPCs, including HTTP, gRPC, MQ, Cache, and Database accesses, are behind a single client-side request. Allowing the IT team to understand the dependency relationships among thousands of services is the key feature and first step for observability of a whole distributed system. A distributed tracing system is capable of collecting traces, including all distributed request paths. Dependency relationships have been logically included in the trace data. A distributed tracing system, such as Zipkin [4] or Jaeger Tracing [10], provides built-in dependency analysis features, but many analysis features build on top of that. There are at least two fundamental limitations: timeliness and consistent accuracy.\nStrong timeliness is required to match the mutability of distributed application system dependency relationship, including service level and service instance level dependency.\nA Service is a logic group of instances which have the same functions or codes.\nA Service Instance is usually an OS level process, such as a JVM process. The relationships between services and instances are mutable, depending on the configuration, codes and network status. The dependency could change over time.\n Figure 1, Generated spans in traditional Dapper based tracing system. The span model in the Dapper paper and existing tracing systems,such as Zipkin instrumenting mode[9], just propagates the span id to the server side. Due to this model, dependency analysis requires a certain time window. The tracing spans are collected at both client- and server-sides, because the relationship is recorded. Due to that, the analysis process has to wait for the client and server spans to match in the same time window, in order to output the result, Service A depending on Service B. So, this time window must be over the duration of this RPC request; otherwise, the conclusion will be lost. This condition makes the analysis would not react the dependency mutation in second level, in production, it sometimes has to set the window duration in 3-5 mins. Also, because of the Windows-based design, if one side involves a long duration task, it can’t easily achieve consistent accuracy. Because in order to make the analysis as fast as possible, the analysis period is less than 5 minutes. But some spans can’t match its parent or children if the analysis is incomplete or crosses two time windows. Even if we added a mechanism to process the spans left in the previous stages, still some would have to be abandoned to keep the dataset size and memory usage reasonable.\nIn the STAM, we introduce a new span and context propagation models, with the new analysis method. These new models add the peer network address (IP or hostname) used at client side, client service instance name and client service name, into the context propagation model. Then it passes the RPC call from client to server, just as the original trace id and span id in the existing tracing system, and collects it in the server-side span. The new analysis method can easily generate the client-server relationship directly without waiting on the client span. It also sets the peer network address as one alias of the server service. After the across cluster node data sync, the client-side span analysis could use this alias metadata to generate the client-server relationship directly too. By using these new models and method in Apache SkyWalking, we remove the time windows-based analysis permanently, and fully use the streaming analysis mode with less than 5 seconds latency and consistent accuracy\nNew Span Model and Context Model The traditional span of a tracing system includes the following fields [1][6][10].\n A trace id to represent the whole trace. A span id to represent the current span. An operation name to describe what operation this span did. A start timestamp. A finish timestamp Service and Service Instance names of current span. A set of zero or more key:value Span Tags. A set of zero or more Span Logs, each of which is itself a key:value map paired with a timestamp. References to zero or more causally related Spans. Reference includes the parent span id and trace id.  In the new span model of STAM we add the following fields in the span.\nSpan type. Enumeration, including exit, local and entry. Entry and Exit spans are used in a networking related library. Entry spans represent a server-side networking library, such as Apache Tomcat[7]. Exit spans represent the client-side networking library, such as Apache HttpComponents [8].\nPeer Network Address. Remote \u0026ldquo;address,\u0026rdquo; suitable for use in exit and entry spans. In Exit spans, the peer network address is the address by the client library to access the server.\nThese fields usually are optionally included in many tracing system,. But in STAM, we require them in all RPC cases.\nContext Model is used to propagate the client-side information to server-side carried by the original RPC call, usually in the header, such as HTTP header or MQ header. In the old design, it carries the trace id and span id of client-side span. In the STAM, we enhance this model, adding the parent service name, parent service instance name and peer of exit span. The names could be literal strings. All these extra fields will help to remove the block of streaming analysis. Compared to the existing context model, this uses a little more bandwidth, but it could be optimized. In Apache SkyWalking, we design a register mechanism to exchange unique IDs to represent these names. As a result, only 3 integers are added in the RPC context, so the increase of bandwidth is at least less than 1% in the production environment.\nThe changes of two models could eliminate the time windows in the analysis process. Server-side span analysis enhances the context aware capability.\nNew Topology Analysis Method The new topology analysis method at the core of STAM is processing the span in stream mode. The analysis of the server-side span, also named entry span, includes the parent service name, parent service instance name and peer of exit span. So the analysis process could establish the following results.\n Set the peer of exit span as client using alias name of current service and instance. Peer network address \u0026lt;-\u0026gt; service name and peer network address \u0026lt;-\u0026gt; Service instance name aliases created. These two will sync with all analysis nodes and persistent in the storage, allowing more analysis processers to have this alias information. Generate relationships of parent service name -\u0026gt; current service name and parent service instance name -\u0026gt; current service instance name, unless there is another different Peer network address \u0026lt;-\u0026gt; Service Instance Name mapping found. In that case, only generate relationships of peer network address \u0026lt;-\u0026gt; service name and peer network address \u0026lt;-\u0026gt; Service instance name.  For analysis of the client-side span (exit span), there could three possibilities.\n The peer in the exit span already has the alias names established by server-side span analysis from step (1). Then use alias names to replace the peer, and generate traffic of current service name -\u0026gt; alias service name and current service instance name -\u0026gt; alias service instance name. If the alias could not be found, then just simply generate traffic for current service name -\u0026gt; peer and current service instance name -\u0026gt; peer. If multiple alias names of peer network address \u0026lt;-\u0026gt; Service Instance Name could be found, then keep generating traffic for current service name -\u0026gt; peer network address and current service instance name -\u0026gt; peer network address.   Figure 2, Apache SkyWalking uses STAM to detect and visualize the topology of distributed systems. Evaluation In this section, we evaluate the new models and analysis method in the context of several typical cases in which the old method loses timeliness and consistent accuracy.\n 1.New Service Online or Auto Scale Out  New services could be added into the whole topology by the developer team randomly, or container operation platform automatically by some scale out policy, like Kubernetes [5]. The monitoring system could not be notified in any case manually. By using STAM, we could detect the new node automatically and also keep the analysis process unblocked and consistent with detected nodes. In this case, a new service and network address (could be IP, port or both) are used. The peer network address \u0026lt;-\u0026gt; service mapping does not exist, the traffic of client service -\u0026gt; peer network address will be generated and persistent in the storage first. After mapping is generated, further traffic of client-service to server-service could be identified, generated and aggregated in the analysis platform. For filling the gap of a few traffic before the mapping generated, we require doing peer network address \u0026lt;-\u0026gt; service mapping translation again in query stage, to merge client service-\u0026gt;peer network address and client-service to server-service. In production, the amount of VM for the whole SkyWalking analysis platform deployment is less than 100, syncing among them will finish less than 10 seconds, in most cases it only takes 3-5 seconds. And in the query stage, the data has been aggregated in minutes or seconds at least. The query merge performance is not related to how much traffic happens before the mapping generated, only affected by sync duration, in here, only 3 seconds. Due to that, in minute level aggregation topology, it only adds 1 or 2 relationship records in the whole topology relationship dataset. Considering an over 100 services topology having over 500 relationship records per minute, the payload increase for this query merge is very limited and affordable. This feature is significant in a large and high load distributed system, as we don’t need to concern its scaling capability. And in some fork versions, they choose to update the existing client service-\u0026gt;peer network address to client-service to server-service after detecting the new mapping for peer generated, in order to remove the extra load at query stage permanently.\n Figure 3, Span analysis by using the new topology analysis method  2.Existing Uninstrumented Nodes  Every topology detection method has to work in this case. In many cases, there are nodes in the production environment that can’t be instrumented. Causes for this might include:(1) Restriction of the technology. In some golang or C++ written applications, there is no easy way in Java or .Net to do auto instrumentation by the agent. So, the codes may not be instrumented automatically. (2) The middleware, such as MQ, database server, has not adopted the tracing system. This would make it difficult or time consuming to implement the middleware instrumentation. (3) A 3rd party service or cloud service doesn’t support work with the current tracing system. (4) Lack of resources: e.g., the developer or operation team lacks time to make the instrumentation ready.\nThe STAM works well even if the client or server side has no instrumentation. It still keeps the topology as accurate as possible.\nIf the client side hasn’t instrumented, the server-side span wouldn’t get any reference through RPC context, so, it would simply use peer to generate traffic, as shown in Figure 4.\n Figure 4, STAM traffic generation when no client-side instrumentation As shown in Figure 5, in the other case, with no server-side instrumentation, the client span analysis doesn’t need to process this case. The STAM analysis core just simply keeps generating client service-\u0026gt;peer network address traffic. As there is no mapping for peer network address generated, there is no merging.\n Figure 5, STAM traffic generation when no server-side instrumentation  3.Uninstrumented Node Having Header Forward Capability  Besides the cases we evaluated in (2) Uninstrumented Nodes, there is one complex and special case: the instrumented node has the capability to propagate the header from downstream to upstream, typically in all proxy, such as Envoy[11], Nginx[12], Spring Cloud Gateway[13]. As proxy, it has the capability to forward all headers from downstream to upstream to keep some of information in the header, including the tracing context, authentication, browser information, and routing information, in order to make them accessible by the business services behind the proxy, like Envoy route configuration. When some proxy can’t be instrumented, no matter what the reason, it should not affect the topology detection.\nIn this case, the proxy address would be used at the client side and propagate through RPC context as peer network address, and the proxy forwards this to different upstream services. Then STAM could detect this case and generate the proxy as a conjectural node. In the STAM, more than one alias names for this network address should be generated. After those two are detected and synchronized to the analysis node, the analysis core knows there is at least one uninstrumented service standing between client and servers. So, it will generate the relationships of client service-\u0026gt;peer network address, peer-\u0026gt;server service B and peer network address -\u0026gt;server service C, as shown in Figure 6.\n Figure 6, STAM traffic generation when the proxy uninstrumentation Conclusion This paper described the STAM, which is to the best of our knowledge the best topology detection method for distributed tracing systems. It replaces the time-window based topology analysis method for tracing-based monitoring systems. It removes the resource cost of disk and memory for time-window baseds analysis permanently and totally, and the barriers of horizontal scale. One STAM implementation, Apache SkyWalking, is widely used for monitoring hundreds of applications in production. Some of them generated over 100 TB tracing data per day and topology for over 200 services in real time.\nAcknowledgments We thank all contributors of Apache SkyWalking project for suggestions, code contributions to implement the STAM, and feedback from using the STAM and SkyWalking in their production environment.\nLicense This paper and the STAM are licensed in the Apache 2.0.\nReferences  Dapper, a Large-Scale Distributed Systems Tracing Infrastructure, https://research.google.com/pubs/pub36356.html?spm=5176.100239.blogcont60165.11.OXME9Z Apache SkyWalking, http://skywalking.apache.org/ Apache Open Users, https://skywalking.apache.org/users/ Zipkin, https://zipkin.io/ Kubernetes, Production-Grade Container Orchestration. Automated container deployment, scaling, and management. https://kubernetes.io/ OpenTracing Specification https://github.com/opentracing/specification/blob/master/specification.md Apache Tomcat, http://tomcat.apache.org/ Apache HttpComponents, https://hc.apache.org/ Zipkin doc, ‘Instrumenting a library’ section, ‘Communicating trace information’ paragraph. https://zipkin.io/pages/instrumenting Jaeger Tracing, https://jaegertracing.io/ Envoy Proxy, http://envoyproxy.io/ Nginx, http://nginx.org/ Spring Cloud Gateway, https://spring.io/projects/spring-cloud-gateway  ","excerpt":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System …","ref":"/docs/main/v9.1.0/en/papers/stam/","title":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System"},{"body":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System  Sheng Wu 吴 晟 wusheng@apache.org  Editor\u0026rsquo;s note This paper was written by Sheng Wu, project founder, in 2017, to describe the fundamental theory of all current agent core concepts. Readers could learn why SkyWalking agents are significantly different from other tracing system and Dapper[1] Paper\u0026rsquo;s description.\nAbstract Monitoring, visualizing and troubleshooting a large-scale distributed system is a major challenge. One common tool used today is the distributed tracing system (e.g., Google Dapper)[1], and detecting topology and metrics based on the tracing data. One big limitation of today’s topology detection is that the analysis depends on aggregating the client-side and server-side tracing spans in a given time window to generate the dependency of services. This causes more latency and memory use, because the client and server spans of every RPC must be matched in millions of randomly occurring requests in a highly distributed system. More importantly, it could fail to match if the duration of RPC between client and server is longer than the prior setup time window, or across the two windows.\nIn this paper, we present the STAM, Streaming Topology Analysis Method. In STAM, we could use auto instrumentation or a manual instrumentation mechanism to intercept and manipulate RPC at both client-side and server-side. In the case of auto instrumentation, STAM manipulates application codes at runtime, such as Java agent. As such, this monitoring system doesn’t require any source code changes from the application development team or RPC framework development team. The STAM injects an RPC network address used at client side, a service name and a service instance name into the RPC context, and binds the server-side service name and service instance name as the alias name for this network address used at the client side. Freeing the dependency analysis from the mechanisms that cause blocking and delay, the analysis core can process the monitoring data in stream mode and generate the accurate topology.\nThe STAM has been implemented in the Apache SkyWalking[2], an open source APM (application performance monitoring system) project of the Apache Software Foundation, which is widely used in many big enterprises[3] including Alibaba, Huawei, Tencent, Didi, Xiaomi, China Mobile and other enterprises (airlines, financial institutions and others) to support their large-scale distributed systems in the production environment. It reduces the load and memory cost significantly, with better horizontal scale capability.\nIntroduction Monitoring the highly distributed system, especially with a micro-service architecture, is very complex. Many RPCs, including HTTP, gRPC, MQ, Cache, and Database accesses, are behind a single client-side request. Allowing the IT team to understand the dependency relationships among thousands of services is the key feature and first step for observability of a whole distributed system. A distributed tracing system is capable of collecting traces, including all distributed request paths. Dependency relationships have been logically included in the trace data. A distributed tracing system, such as Zipkin [4] or Jaeger Tracing [10], provides built-in dependency analysis features, but many analysis features build on top of that. There are at least two fundamental limitations: timeliness and consistent accuracy.\nStrong timeliness is required to match the mutability of distributed application system dependency relationship, including service level and service instance level dependency.\nA Service is a logic group of instances which have the same functions or codes.\nA Service Instance is usually an OS level process, such as a JVM process. The relationships between services and instances are mutable, depending on the configuration, codes and network status. The dependency could change over time.\n Figure 1, Generated spans in traditional Dapper based tracing system. The span model in the Dapper paper and existing tracing systems,such as Zipkin instrumenting mode[9], just propagates the span id to the server side. Due to this model, dependency analysis requires a certain time window. The tracing spans are collected at both client- and server-sides, because the relationship is recorded. Due to that, the analysis process has to wait for the client and server spans to match in the same time window, in order to output the result, Service A depending on Service B. So, this time window must be over the duration of this RPC request; otherwise, the conclusion will be lost. This condition makes the analysis would not react the dependency mutation in second level, in production, it sometimes has to set the window duration in 3-5 mins. Also, because of the Windows-based design, if one side involves a long duration task, it can’t easily achieve consistent accuracy. Because in order to make the analysis as fast as possible, the analysis period is less than 5 minutes. But some spans can’t match its parent or children if the analysis is incomplete or crosses two time windows. Even if we added a mechanism to process the spans left in the previous stages, still some would have to be abandoned to keep the dataset size and memory usage reasonable.\nIn the STAM, we introduce a new span and context propagation models, with the new analysis method. These new models add the peer network address (IP or hostname) used at client side, client service instance name and client service name, into the context propagation model. Then it passes the RPC call from client to server, just as the original trace id and span id in the existing tracing system, and collects it in the server-side span. The new analysis method can easily generate the client-server relationship directly without waiting on the client span. It also sets the peer network address as one alias of the server service. After the across cluster node data sync, the client-side span analysis could use this alias metadata to generate the client-server relationship directly too. By using these new models and method in Apache SkyWalking, we remove the time windows-based analysis permanently, and fully use the streaming analysis mode with less than 5 seconds latency and consistent accuracy\nNew Span Model and Context Model The traditional span of a tracing system includes the following fields [1][6][10].\n A trace id to represent the whole trace. A span id to represent the current span. An operation name to describe what operation this span did. A start timestamp. A finish timestamp Service and Service Instance names of current span. A set of zero or more key:value Span Tags. A set of zero or more Span Logs, each of which is itself a key:value map paired with a timestamp. References to zero or more causally related Spans. Reference includes the parent span id and trace id.  In the new span model of STAM we add the following fields in the span.\nSpan type. Enumeration, including exit, local and entry. Entry and Exit spans are used in a networking related library. Entry spans represent a server-side networking library, such as Apache Tomcat[7]. Exit spans represent the client-side networking library, such as Apache HttpComponents [8].\nPeer Network Address. Remote \u0026ldquo;address,\u0026rdquo; suitable for use in exit and entry spans. In Exit spans, the peer network address is the address by the client library to access the server.\nThese fields usually are optionally included in many tracing system,. But in STAM, we require them in all RPC cases.\nContext Model is used to propagate the client-side information to server-side carried by the original RPC call, usually in the header, such as HTTP header or MQ header. In the old design, it carries the trace id and span id of client-side span. In the STAM, we enhance this model, adding the parent service name, parent service instance name and peer of exit span. The names could be literal strings. All these extra fields will help to remove the block of streaming analysis. Compared to the existing context model, this uses a little more bandwidth, but it could be optimized. In Apache SkyWalking, we design a register mechanism to exchange unique IDs to represent these names. As a result, only 3 integers are added in the RPC context, so the increase of bandwidth is at least less than 1% in the production environment.\nThe changes of two models could eliminate the time windows in the analysis process. Server-side span analysis enhances the context aware capability.\nNew Topology Analysis Method The new topology analysis method at the core of STAM is processing the span in stream mode. The analysis of the server-side span, also named entry span, includes the parent service name, parent service instance name and peer of exit span. So the analysis process could establish the following results.\n Set the peer of exit span as client using alias name of current service and instance. Peer network address \u0026lt;-\u0026gt; service name and peer network address \u0026lt;-\u0026gt; Service instance name aliases created. These two will sync with all analysis nodes and persistent in the storage, allowing more analysis processers to have this alias information. Generate relationships of parent service name -\u0026gt; current service name and parent service instance name -\u0026gt; current service instance name, unless there is another different Peer network address \u0026lt;-\u0026gt; Service Instance Name mapping found. In that case, only generate relationships of peer network address \u0026lt;-\u0026gt; service name and peer network address \u0026lt;-\u0026gt; Service instance name.  For analysis of the client-side span (exit span), there could three possibilities.\n The peer in the exit span already has the alias names established by server-side span analysis from step (1). Then use alias names to replace the peer, and generate traffic of current service name -\u0026gt; alias service name and current service instance name -\u0026gt; alias service instance name. If the alias could not be found, then just simply generate traffic for current service name -\u0026gt; peer and current service instance name -\u0026gt; peer. If multiple alias names of peer network address \u0026lt;-\u0026gt; Service Instance Name could be found, then keep generating traffic for current service name -\u0026gt; peer network address and current service instance name -\u0026gt; peer network address.   Figure 2, Apache SkyWalking uses STAM to detect and visualize the topology of distributed systems. Evaluation In this section, we evaluate the new models and analysis method in the context of several typical cases in which the old method loses timeliness and consistent accuracy.\n 1.New Service Online or Auto Scale Out  New services could be added into the whole topology by the developer team randomly, or container operation platform automatically by some scale out policy, like Kubernetes [5]. The monitoring system could not be notified in any case manually. By using STAM, we could detect the new node automatically and also keep the analysis process unblocked and consistent with detected nodes. In this case, a new service and network address (could be IP, port or both) are used. The peer network address \u0026lt;-\u0026gt; service mapping does not exist, the traffic of client service -\u0026gt; peer network address will be generated and persistent in the storage first. After mapping is generated, further traffic of client-service to server-service could be identified, generated and aggregated in the analysis platform. For filling the gap of a few traffic before the mapping generated, we require doing peer network address \u0026lt;-\u0026gt; service mapping translation again in query stage, to merge client service-\u0026gt;peer network address and client-service to server-service. In production, the amount of VM for the whole SkyWalking analysis platform deployment is less than 100, syncing among them will finish less than 10 seconds, in most cases it only takes 3-5 seconds. And in the query stage, the data has been aggregated in minutes or seconds at least. The query merge performance is not related to how much traffic happens before the mapping generated, only affected by sync duration, in here, only 3 seconds. Due to that, in minute level aggregation topology, it only adds 1 or 2 relationship records in the whole topology relationship dataset. Considering an over 100 services topology having over 500 relationship records per minute, the payload increase for this query merge is very limited and affordable. This feature is significant in a large and high load distributed system, as we don’t need to concern its scaling capability. And in some fork versions, they choose to update the existing client service-\u0026gt;peer network address to client-service to server-service after detecting the new mapping for peer generated, in order to remove the extra load at query stage permanently.\n Figure 3, Span analysis by using the new topology analysis method  2.Existing Uninstrumented Nodes  Every topology detection method has to work in this case. In many cases, there are nodes in the production environment that can’t be instrumented. Causes for this might include:(1) Restriction of the technology. In some golang or C++ written applications, there is no easy way in Java or .Net to do auto instrumentation by the agent. So, the codes may not be instrumented automatically. (2) The middleware, such as MQ, database server, has not adopted the tracing system. This would make it difficult or time consuming to implement the middleware instrumentation. (3) A 3rd party service or cloud service doesn’t support work with the current tracing system. (4) Lack of resources: e.g., the developer or operation team lacks time to make the instrumentation ready.\nThe STAM works well even if the client or server side has no instrumentation. It still keeps the topology as accurate as possible.\nIf the client side hasn’t instrumented, the server-side span wouldn’t get any reference through RPC context, so, it would simply use peer to generate traffic, as shown in Figure 4.\n Figure 4, STAM traffic generation when no client-side instrumentation As shown in Figure 5, in the other case, with no server-side instrumentation, the client span analysis doesn’t need to process this case. The STAM analysis core just simply keeps generating client service-\u0026gt;peer network address traffic. As there is no mapping for peer network address generated, there is no merging.\n Figure 5, STAM traffic generation when no server-side instrumentation  3.Uninstrumented Node Having Header Forward Capability  Besides the cases we evaluated in (2) Uninstrumented Nodes, there is one complex and special case: the instrumented node has the capability to propagate the header from downstream to upstream, typically in all proxy, such as Envoy[11], Nginx[12], Spring Cloud Gateway[13]. As proxy, it has the capability to forward all headers from downstream to upstream to keep some of information in the header, including the tracing context, authentication, browser information, and routing information, in order to make them accessible by the business services behind the proxy, like Envoy route configuration. When some proxy can’t be instrumented, no matter what the reason, it should not affect the topology detection.\nIn this case, the proxy address would be used at the client side and propagate through RPC context as peer network address, and the proxy forwards this to different upstream services. Then STAM could detect this case and generate the proxy as a conjectural node. In the STAM, more than one alias names for this network address should be generated. After those two are detected and synchronized to the analysis node, the analysis core knows there is at least one uninstrumented service standing between client and servers. So, it will generate the relationships of client service-\u0026gt;peer network address, peer-\u0026gt;server service B and peer network address -\u0026gt;server service C, as shown in Figure 6.\n Figure 6, STAM traffic generation when the proxy uninstrumentation Conclusion This paper described the STAM, which is to the best of our knowledge the best topology detection method for distributed tracing systems. It replaces the time-window based topology analysis method for tracing-based monitoring systems. It removes the resource cost of disk and memory for time-window baseds analysis permanently and totally, and the barriers of horizontal scale. One STAM implementation, Apache SkyWalking, is widely used for monitoring hundreds of applications in production. Some of them generated over 100 TB tracing data per day and topology for over 200 services in real time.\nAcknowledgments We thank all contributors of Apache SkyWalking project for suggestions, code contributions to implement the STAM, and feedback from using the STAM and SkyWalking in their production environment.\nLicense This paper and the STAM are licensed in the Apache 2.0.\nReferences  Dapper, a Large-Scale Distributed Systems Tracing Infrastructure, https://research.google.com/pubs/pub36356.html?spm=5176.100239.blogcont60165.11.OXME9Z Apache SkyWalking, http://skywalking.apache.org/ Apache Open Users, https://skywalking.apache.org/users/ Zipkin, https://zipkin.io/ Kubernetes, Production-Grade Container Orchestration. Automated container deployment, scaling, and management. https://kubernetes.io/ OpenTracing Specification https://github.com/opentracing/specification/blob/master/specification.md Apache Tomcat, http://tomcat.apache.org/ Apache HttpComponents, https://hc.apache.org/ Zipkin doc, ‘Instrumenting a library’ section, ‘Communicating trace information’ paragraph. https://zipkin.io/pages/instrumenting Jaeger Tracing, https://jaegertracing.io/ Envoy Proxy, http://envoyproxy.io/ Nginx, http://nginx.org/ Spring Cloud Gateway, https://spring.io/projects/spring-cloud-gateway  ","excerpt":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System …","ref":"/docs/main/v9.2.0/en/papers/stam/","title":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System"},{"body":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System  Sheng Wu 吴 晟 wusheng@apache.org  Editor\u0026rsquo;s note This paper was written by Sheng Wu, project founder, in 2017, to describe the fundamental theory of all current agent core concepts. Readers could learn why SkyWalking agents are significantly different from other tracing system and Dapper[1] Paper\u0026rsquo;s description.\nAbstract Monitoring, visualizing and troubleshooting a large-scale distributed system is a major challenge. One common tool used today is the distributed tracing system (e.g., Google Dapper)[1], and detecting topology and metrics based on the tracing data. One big limitation of today’s topology detection is that the analysis depends on aggregating the client-side and server-side tracing spans in a given time window to generate the dependency of services. This causes more latency and memory use, because the client and server spans of every RPC must be matched in millions of randomly occurring requests in a highly distributed system. More importantly, it could fail to match if the duration of RPC between client and server is longer than the prior setup time window, or across the two windows.\nIn this paper, we present the STAM, Streaming Topology Analysis Method. In STAM, we could use auto instrumentation or a manual instrumentation mechanism to intercept and manipulate RPC at both client-side and server-side. In the case of auto instrumentation, STAM manipulates application codes at runtime, such as Java agent. As such, this monitoring system doesn’t require any source code changes from the application development team or RPC framework development team. The STAM injects an RPC network address used at client side, a service name and a service instance name into the RPC context, and binds the server-side service name and service instance name as the alias name for this network address used at the client side. Freeing the dependency analysis from the mechanisms that cause blocking and delay, the analysis core can process the monitoring data in stream mode and generate the accurate topology.\nThe STAM has been implemented in the Apache SkyWalking[2], an open source APM (application performance monitoring system) project of the Apache Software Foundation, which is widely used in many big enterprises[3] including Alibaba, Huawei, Tencent, Didi, Xiaomi, China Mobile and other enterprises (airlines, financial institutions and others) to support their large-scale distributed systems in the production environment. It reduces the load and memory cost significantly, with better horizontal scale capability.\nIntroduction Monitoring the highly distributed system, especially with a micro-service architecture, is very complex. Many RPCs, including HTTP, gRPC, MQ, Cache, and Database accesses, are behind a single client-side request. Allowing the IT team to understand the dependency relationships among thousands of services is the key feature and first step for observability of a whole distributed system. A distributed tracing system is capable of collecting traces, including all distributed request paths. Dependency relationships have been logically included in the trace data. A distributed tracing system, such as Zipkin [4] or Jaeger Tracing [10], provides built-in dependency analysis features, but many analysis features build on top of that. There are at least two fundamental limitations: timeliness and consistent accuracy.\nStrong timeliness is required to match the mutability of distributed application system dependency relationship, including service level and service instance level dependency.\nA Service is a logic group of instances which have the same functions or codes.\nA Service Instance is usually an OS level process, such as a JVM process. The relationships between services and instances are mutable, depending on the configuration, codes and network status. The dependency could change over time.\n Figure 1, Generated spans in traditional Dapper based tracing system. The span model in the Dapper paper and existing tracing systems,such as Zipkin instrumenting mode[9], just propagates the span id to the server side. Due to this model, dependency analysis requires a certain time window. The tracing spans are collected at both client- and server-sides, because the relationship is recorded. Due to that, the analysis process has to wait for the client and server spans to match in the same time window, in order to output the result, Service A depending on Service B. So, this time window must be over the duration of this RPC request; otherwise, the conclusion will be lost. This condition makes the analysis would not react the dependency mutation in second level, in production, it sometimes has to set the window duration in 3-5 mins. Also, because of the Windows-based design, if one side involves a long duration task, it can’t easily achieve consistent accuracy. Because in order to make the analysis as fast as possible, the analysis period is less than 5 minutes. But some spans can’t match its parent or children if the analysis is incomplete or crosses two time windows. Even if we added a mechanism to process the spans left in the previous stages, still some would have to be abandoned to keep the dataset size and memory usage reasonable.\nIn the STAM, we introduce a new span and context propagation models, with the new analysis method. These new models add the peer network address (IP or hostname) used at client side, client service instance name and client service name, into the context propagation model. Then it passes the RPC call from client to server, just as the original trace id and span id in the existing tracing system, and collects it in the server-side span. The new analysis method can easily generate the client-server relationship directly without waiting on the client span. It also sets the peer network address as one alias of the server service. After the across cluster node data sync, the client-side span analysis could use this alias metadata to generate the client-server relationship directly too. By using these new models and method in Apache SkyWalking, we remove the time windows-based analysis permanently, and fully use the streaming analysis mode with less than 5 seconds latency and consistent accuracy\nNew Span Model and Context Model The traditional span of a tracing system includes the following fields [1][6][10].\n A trace id to represent the whole trace. A span id to represent the current span. An operation name to describe what operation this span did. A start timestamp. A finish timestamp Service and Service Instance names of current span. A set of zero or more key:value Span Tags. A set of zero or more Span Logs, each of which is itself a key:value map paired with a timestamp. References to zero or more causally related Spans. Reference includes the parent span id and trace id.  In the new span model of STAM we add the following fields in the span.\nSpan type. Enumeration, including exit, local and entry. Entry and Exit spans are used in a networking related library. Entry spans represent a server-side networking library, such as Apache Tomcat[7]. Exit spans represent the client-side networking library, such as Apache HttpComponents [8].\nPeer Network Address. Remote \u0026ldquo;address,\u0026rdquo; suitable for use in exit and entry spans. In Exit spans, the peer network address is the address by the client library to access the server.\nThese fields usually are optionally included in many tracing system,. But in STAM, we require them in all RPC cases.\nContext Model is used to propagate the client-side information to server-side carried by the original RPC call, usually in the header, such as HTTP header or MQ header. In the old design, it carries the trace id and span id of client-side span. In the STAM, we enhance this model, adding the parent service name, parent service instance name and peer of exit span. The names could be literal strings. All these extra fields will help to remove the block of streaming analysis. Compared to the existing context model, this uses a little more bandwidth, but it could be optimized. In Apache SkyWalking, we design a register mechanism to exchange unique IDs to represent these names. As a result, only 3 integers are added in the RPC context, so the increase of bandwidth is at least less than 1% in the production environment.\nThe changes of two models could eliminate the time windows in the analysis process. Server-side span analysis enhances the context aware capability.\nNew Topology Analysis Method The new topology analysis method at the core of STAM is processing the span in stream mode. The analysis of the server-side span, also named entry span, includes the parent service name, parent service instance name and peer of exit span. So the analysis process could establish the following results.\n Set the peer of exit span as client using alias name of current service and instance. Peer network address \u0026lt;-\u0026gt; service name and peer network address \u0026lt;-\u0026gt; Service instance name aliases created. These two will sync with all analysis nodes and persistent in the storage, allowing more analysis processers to have this alias information. Generate relationships of parent service name -\u0026gt; current service name and parent service instance name -\u0026gt; current service instance name, unless there is another different Peer network address \u0026lt;-\u0026gt; Service Instance Name mapping found. In that case, only generate relationships of peer network address \u0026lt;-\u0026gt; service name and peer network address \u0026lt;-\u0026gt; Service instance name.  For analysis of the client-side span (exit span), there could three possibilities.\n The peer in the exit span already has the alias names established by server-side span analysis from step (1). Then use alias names to replace the peer, and generate traffic of current service name -\u0026gt; alias service name and current service instance name -\u0026gt; alias service instance name. If the alias could not be found, then just simply generate traffic for current service name -\u0026gt; peer and current service instance name -\u0026gt; peer. If multiple alias names of peer network address \u0026lt;-\u0026gt; Service Instance Name could be found, then keep generating traffic for current service name -\u0026gt; peer network address and current service instance name -\u0026gt; peer network address.   Figure 2, Apache SkyWalking uses STAM to detect and visualize the topology of distributed systems. Evaluation In this section, we evaluate the new models and analysis method in the context of several typical cases in which the old method loses timeliness and consistent accuracy.\n 1.New Service Online or Auto Scale Out  New services could be added into the whole topology by the developer team randomly, or container operation platform automatically by some scale out policy, like Kubernetes [5]. The monitoring system could not be notified in any case manually. By using STAM, we could detect the new node automatically and also keep the analysis process unblocked and consistent with detected nodes. In this case, a new service and network address (could be IP, port or both) are used. The peer network address \u0026lt;-\u0026gt; service mapping does not exist, the traffic of client service -\u0026gt; peer network address will be generated and persistent in the storage first. After mapping is generated, further traffic of client-service to server-service could be identified, generated and aggregated in the analysis platform. For filling the gap of a few traffic before the mapping generated, we require doing peer network address \u0026lt;-\u0026gt; service mapping translation again in query stage, to merge client service-\u0026gt;peer network address and client-service to server-service. In production, the amount of VM for the whole SkyWalking analysis platform deployment is less than 100, syncing among them will finish less than 10 seconds, in most cases it only takes 3-5 seconds. And in the query stage, the data has been aggregated in minutes or seconds at least. The query merge performance is not related to how much traffic happens before the mapping generated, only affected by sync duration, in here, only 3 seconds. Due to that, in minute level aggregation topology, it only adds 1 or 2 relationship records in the whole topology relationship dataset. Considering an over 100 services topology having over 500 relationship records per minute, the payload increase for this query merge is very limited and affordable. This feature is significant in a large and high load distributed system, as we don’t need to concern its scaling capability. And in some fork versions, they choose to update the existing client service-\u0026gt;peer network address to client-service to server-service after detecting the new mapping for peer generated, in order to remove the extra load at query stage permanently.\n Figure 3, Span analysis by using the new topology analysis method  2.Existing Uninstrumented Nodes  Every topology detection method has to work in this case. In many cases, there are nodes in the production environment that can’t be instrumented. Causes for this might include:(1) Restriction of the technology. In some golang or C++ written applications, there is no easy way in Java or .Net to do auto instrumentation by the agent. So, the codes may not be instrumented automatically. (2) The middleware, such as MQ, database server, has not adopted the tracing system. This would make it difficult or time consuming to implement the middleware instrumentation. (3) A 3rd party service or cloud service doesn’t support work with the current tracing system. (4) Lack of resources: e.g., the developer or operation team lacks time to make the instrumentation ready.\nThe STAM works well even if the client or server side has no instrumentation. It still keeps the topology as accurate as possible.\nIf the client side hasn’t instrumented, the server-side span wouldn’t get any reference through RPC context, so, it would simply use peer to generate traffic, as shown in Figure 4.\n Figure 4, STAM traffic generation when no client-side instrumentation As shown in Figure 5, in the other case, with no server-side instrumentation, the client span analysis doesn’t need to process this case. The STAM analysis core just simply keeps generating client service-\u0026gt;peer network address traffic. As there is no mapping for peer network address generated, there is no merging.\n Figure 5, STAM traffic generation when no server-side instrumentation  3.Uninstrumented Node Having Header Forward Capability  Besides the cases we evaluated in (2) Uninstrumented Nodes, there is one complex and special case: the instrumented node has the capability to propagate the header from downstream to upstream, typically in all proxy, such as Envoy[11], Nginx[12], Spring Cloud Gateway[13]. As proxy, it has the capability to forward all headers from downstream to upstream to keep some of information in the header, including the tracing context, authentication, browser information, and routing information, in order to make them accessible by the business services behind the proxy, like Envoy route configuration. When some proxy can’t be instrumented, no matter what the reason, it should not affect the topology detection.\nIn this case, the proxy address would be used at the client side and propagate through RPC context as peer network address, and the proxy forwards this to different upstream services. Then STAM could detect this case and generate the proxy as a conjectural node. In the STAM, more than one alias names for this network address should be generated. After those two are detected and synchronized to the analysis node, the analysis core knows there is at least one uninstrumented service standing between client and servers. So, it will generate the relationships of client service-\u0026gt;peer network address, peer-\u0026gt;server service B and peer network address -\u0026gt;server service C, as shown in Figure 6.\n Figure 6, STAM traffic generation when the proxy uninstrumentation Conclusion This paper described the STAM, which is to the best of our knowledge the best topology detection method for distributed tracing systems. It replaces the time-window based topology analysis method for tracing-based monitoring systems. It removes the resource cost of disk and memory for time-window baseds analysis permanently and totally, and the barriers of horizontal scale. One STAM implementation, Apache SkyWalking, is widely used for monitoring hundreds of applications in production. Some of them generated over 100 TB tracing data per day and topology for over 200 services in real time.\nAcknowledgments We thank all contributors of Apache SkyWalking project for suggestions, code contributions to implement the STAM, and feedback from using the STAM and SkyWalking in their production environment.\nLicense This paper and the STAM are licensed in the Apache 2.0.\nReferences  Dapper, a Large-Scale Distributed Systems Tracing Infrastructure, https://research.google.com/pubs/pub36356.html?spm=5176.100239.blogcont60165.11.OXME9Z Apache SkyWalking, http://skywalking.apache.org/ Apache Open Users, https://skywalking.apache.org/users/ Zipkin, https://zipkin.io/ Kubernetes, Production-Grade Container Orchestration. Automated container deployment, scaling, and management. https://kubernetes.io/ OpenTracing Specification https://github.com/opentracing/specification/blob/master/specification.md Apache Tomcat, http://tomcat.apache.org/ Apache HttpComponents, https://hc.apache.org/ Zipkin doc, ‘Instrumenting a library’ section, ‘Communicating trace information’ paragraph. https://zipkin.io/pages/instrumenting Jaeger Tracing, https://jaegertracing.io/ Envoy Proxy, http://envoyproxy.io/ Nginx, http://nginx.org/ Spring Cloud Gateway, https://spring.io/projects/spring-cloud-gateway  ","excerpt":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System …","ref":"/docs/main/v9.3.0/en/papers/stam/","title":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System"},{"body":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System  Sheng Wu 吴 晟 wusheng@apache.org  Editor\u0026rsquo;s note This paper was written by Sheng Wu, project founder, in 2017, to describe the fundamental theory of all current agent core concepts. Readers could learn why SkyWalking agents are significantly different from other tracing system and Dapper[1] Paper\u0026rsquo;s description.\nAbstract Monitoring, visualizing and troubleshooting a large-scale distributed system is a major challenge. One common tool used today is the distributed tracing system (e.g., Google Dapper)[1], and detecting topology and metrics based on the tracing data. One big limitation of today’s topology detection is that the analysis depends on aggregating the client-side and server-side tracing spans in a given time window to generate the dependency of services. This causes more latency and memory use, because the client and server spans of every RPC must be matched in millions of randomly occurring requests in a highly distributed system. More importantly, it could fail to match if the duration of RPC between client and server is longer than the prior setup time window, or across the two windows.\nIn this paper, we present the STAM, Streaming Topology Analysis Method. In STAM, we could use auto instrumentation or a manual instrumentation mechanism to intercept and manipulate RPC at both client-side and server-side. In the case of auto instrumentation, STAM manipulates application codes at runtime, such as Java agent. As such, this monitoring system doesn’t require any source code changes from the application development team or RPC framework development team. The STAM injects an RPC network address used at client side, a service name and a service instance name into the RPC context, and binds the server-side service name and service instance name as the alias name for this network address used at the client side. Freeing the dependency analysis from the mechanisms that cause blocking and delay, the analysis core can process the monitoring data in stream mode and generate the accurate topology.\nThe STAM has been implemented in the Apache SkyWalking[2], an open source APM (application performance monitoring system) project of the Apache Software Foundation, which is widely used in many big enterprises[3] including Alibaba, Huawei, Tencent, Didi, Xiaomi, China Mobile and other enterprises (airlines, financial institutions and others) to support their large-scale distributed systems in the production environment. It reduces the load and memory cost significantly, with better horizontal scale capability.\nIntroduction Monitoring the highly distributed system, especially with a micro-service architecture, is very complex. Many RPCs, including HTTP, gRPC, MQ, Cache, and Database accesses, are behind a single client-side request. Allowing the IT team to understand the dependency relationships among thousands of services is the key feature and first step for observability of a whole distributed system. A distributed tracing system is capable of collecting traces, including all distributed request paths. Dependency relationships have been logically included in the trace data. A distributed tracing system, such as Zipkin [4] or Jaeger Tracing [10], provides built-in dependency analysis features, but many analysis features build on top of that. There are at least two fundamental limitations: timeliness and consistent accuracy.\nStrong timeliness is required to match the mutability of distributed application system dependency relationship, including service level and service instance level dependency.\nA Service is a logic group of instances which have the same functions or codes.\nA Service Instance is usually an OS level process, such as a JVM process. The relationships between services and instances are mutable, depending on the configuration, codes and network status. The dependency could change over time.\n Figure 1, Generated spans in traditional Dapper based tracing system. The span model in the Dapper paper and existing tracing systems,such as Zipkin instrumenting mode[9], just propagates the span id to the server side. Due to this model, dependency analysis requires a certain time window. The tracing spans are collected at both client- and server-sides, because the relationship is recorded. Due to that, the analysis process has to wait for the client and server spans to match in the same time window, in order to output the result, Service A depending on Service B. So, this time window must be over the duration of this RPC request; otherwise, the conclusion will be lost. This condition makes the analysis would not react the dependency mutation in second level, in production, it sometimes has to set the window duration in 3-5 mins. Also, because of the Windows-based design, if one side involves a long duration task, it can’t easily achieve consistent accuracy. Because in order to make the analysis as fast as possible, the analysis period is less than 5 minutes. But some spans can’t match its parent or children if the analysis is incomplete or crosses two time windows. Even if we added a mechanism to process the spans left in the previous stages, still some would have to be abandoned to keep the dataset size and memory usage reasonable.\nIn the STAM, we introduce a new span and context propagation models, with the new analysis method. These new models add the peer network address (IP or hostname) used at client side, client service instance name and client service name, into the context propagation model. Then it passes the RPC call from client to server, just as the original trace id and span id in the existing tracing system, and collects it in the server-side span. The new analysis method can easily generate the client-server relationship directly without waiting on the client span. It also sets the peer network address as one alias of the server service. After the across cluster node data sync, the client-side span analysis could use this alias metadata to generate the client-server relationship directly too. By using these new models and method in Apache SkyWalking, we remove the time windows-based analysis permanently, and fully use the streaming analysis mode with less than 5 seconds latency and consistent accuracy\nNew Span Model and Context Model The traditional span of a tracing system includes the following fields [1][6][10].\n A trace id to represent the whole trace. A span id to represent the current span. An operation name to describe what operation this span did. A start timestamp. A finish timestamp Service and Service Instance names of current span. A set of zero or more key:value Span Tags. A set of zero or more Span Logs, each of which is itself a key:value map paired with a timestamp. References to zero or more causally related Spans. Reference includes the parent span id and trace id.  In the new span model of STAM we add the following fields in the span.\nSpan type. Enumeration, including exit, local and entry. Entry and Exit spans are used in a networking related library. Entry spans represent a server-side networking library, such as Apache Tomcat[7]. Exit spans represent the client-side networking library, such as Apache HttpComponents [8].\nPeer Network Address. Remote \u0026ldquo;address,\u0026rdquo; suitable for use in exit and entry spans. In Exit spans, the peer network address is the address by the client library to access the server.\nThese fields usually are optionally included in many tracing system,. But in STAM, we require them in all RPC cases.\nContext Model is used to propagate the client-side information to server-side carried by the original RPC call, usually in the header, such as HTTP header or MQ header. In the old design, it carries the trace id and span id of client-side span. In the STAM, we enhance this model, adding the parent service name, parent service instance name and peer of exit span. The names could be literal strings. All these extra fields will help to remove the block of streaming analysis. Compared to the existing context model, this uses a little more bandwidth, but it could be optimized. In Apache SkyWalking, we design a register mechanism to exchange unique IDs to represent these names. As a result, only 3 integers are added in the RPC context, so the increase of bandwidth is at least less than 1% in the production environment.\nThe changes of two models could eliminate the time windows in the analysis process. Server-side span analysis enhances the context aware capability.\nNew Topology Analysis Method The new topology analysis method at the core of STAM is processing the span in stream mode. The analysis of the server-side span, also named entry span, includes the parent service name, parent service instance name and peer of exit span. So the analysis process could establish the following results.\n Set the peer of exit span as client using alias name of current service and instance. Peer network address \u0026lt;-\u0026gt; service name and peer network address \u0026lt;-\u0026gt; Service instance name aliases created. These two will sync with all analysis nodes and persistent in the storage, allowing more analysis processers to have this alias information. Generate relationships of parent service name -\u0026gt; current service name and parent service instance name -\u0026gt; current service instance name, unless there is another different Peer network address \u0026lt;-\u0026gt; Service Instance Name mapping found. In that case, only generate relationships of peer network address \u0026lt;-\u0026gt; service name and peer network address \u0026lt;-\u0026gt; Service instance name.  For analysis of the client-side span (exit span), there could three possibilities.\n The peer in the exit span already has the alias names established by server-side span analysis from step (1). Then use alias names to replace the peer, and generate traffic of current service name -\u0026gt; alias service name and current service instance name -\u0026gt; alias service instance name. If the alias could not be found, then just simply generate traffic for current service name -\u0026gt; peer and current service instance name -\u0026gt; peer. If multiple alias names of peer network address \u0026lt;-\u0026gt; Service Instance Name could be found, then keep generating traffic for current service name -\u0026gt; peer network address and current service instance name -\u0026gt; peer network address.   Figure 2, Apache SkyWalking uses STAM to detect and visualize the topology of distributed systems. Evaluation In this section, we evaluate the new models and analysis method in the context of several typical cases in which the old method loses timeliness and consistent accuracy.\n 1.New Service Online or Auto Scale Out  New services could be added into the whole topology by the developer team randomly, or container operation platform automatically by some scale out policy, like Kubernetes [5]. The monitoring system could not be notified in any case manually. By using STAM, we could detect the new node automatically and also keep the analysis process unblocked and consistent with detected nodes. In this case, a new service and network address (could be IP, port or both) are used. The peer network address \u0026lt;-\u0026gt; service mapping does not exist, the traffic of client service -\u0026gt; peer network address will be generated and persistent in the storage first. After mapping is generated, further traffic of client-service to server-service could be identified, generated and aggregated in the analysis platform. For filling the gap of a few traffic before the mapping generated, we require doing peer network address \u0026lt;-\u0026gt; service mapping translation again in query stage, to merge client service-\u0026gt;peer network address and client-service to server-service. In production, the amount of VM for the whole SkyWalking analysis platform deployment is less than 100, syncing among them will finish less than 10 seconds, in most cases it only takes 3-5 seconds. And in the query stage, the data has been aggregated in minutes or seconds at least. The query merge performance is not related to how much traffic happens before the mapping generated, only affected by sync duration, in here, only 3 seconds. Due to that, in minute level aggregation topology, it only adds 1 or 2 relationship records in the whole topology relationship dataset. Considering an over 100 services topology having over 500 relationship records per minute, the payload increase for this query merge is very limited and affordable. This feature is significant in a large and high load distributed system, as we don’t need to concern its scaling capability. And in some fork versions, they choose to update the existing client service-\u0026gt;peer network address to client-service to server-service after detecting the new mapping for peer generated, in order to remove the extra load at query stage permanently.\n Figure 3, Span analysis by using the new topology analysis method  2.Existing Uninstrumented Nodes  Every topology detection method has to work in this case. In many cases, there are nodes in the production environment that can’t be instrumented. Causes for this might include:(1) Restriction of the technology. In some golang or C++ written applications, there is no easy way in Java or .Net to do auto instrumentation by the agent. So, the codes may not be instrumented automatically. (2) The middleware, such as MQ, database server, has not adopted the tracing system. This would make it difficult or time consuming to implement the middleware instrumentation. (3) A 3rd party service or cloud service doesn’t support work with the current tracing system. (4) Lack of resources: e.g., the developer or operation team lacks time to make the instrumentation ready.\nThe STAM works well even if the client or server side has no instrumentation. It still keeps the topology as accurate as possible.\nIf the client side hasn’t instrumented, the server-side span wouldn’t get any reference through RPC context, so, it would simply use peer to generate traffic, as shown in Figure 4.\n Figure 4, STAM traffic generation when no client-side instrumentation As shown in Figure 5, in the other case, with no server-side instrumentation, the client span analysis doesn’t need to process this case. The STAM analysis core just simply keeps generating client service-\u0026gt;peer network address traffic. As there is no mapping for peer network address generated, there is no merging.\n Figure 5, STAM traffic generation when no server-side instrumentation  3.Uninstrumented Node Having Header Forward Capability  Besides the cases we evaluated in (2) Uninstrumented Nodes, there is one complex and special case: the instrumented node has the capability to propagate the header from downstream to upstream, typically in all proxy, such as Envoy[11], Nginx[12], Spring Cloud Gateway[13]. As proxy, it has the capability to forward all headers from downstream to upstream to keep some of information in the header, including the tracing context, authentication, browser information, and routing information, in order to make them accessible by the business services behind the proxy, like Envoy route configuration. When some proxy can’t be instrumented, no matter what the reason, it should not affect the topology detection.\nIn this case, the proxy address would be used at the client side and propagate through RPC context as peer network address, and the proxy forwards this to different upstream services. Then STAM could detect this case and generate the proxy as a conjectural node. In the STAM, more than one alias names for this network address should be generated. After those two are detected and synchronized to the analysis node, the analysis core knows there is at least one uninstrumented service standing between client and servers. So, it will generate the relationships of client service-\u0026gt;peer network address, peer-\u0026gt;server service B and peer network address -\u0026gt;server service C, as shown in Figure 6.\n Figure 6, STAM traffic generation when the proxy uninstrumentation Conclusion This paper described the STAM, which is to the best of our knowledge the best topology detection method for distributed tracing systems. It replaces the time-window based topology analysis method for tracing-based monitoring systems. It removes the resource cost of disk and memory for time-window baseds analysis permanently and totally, and the barriers of horizontal scale. One STAM implementation, Apache SkyWalking, is widely used for monitoring hundreds of applications in production. Some of them generated over 100 TB tracing data per day and topology for over 200 services in real time.\nAcknowledgments We thank all contributors of Apache SkyWalking project for suggestions, code contributions to implement the STAM, and feedback from using the STAM and SkyWalking in their production environment.\nLicense This paper and the STAM are licensed in the Apache 2.0.\nReferences  Dapper, a Large-Scale Distributed Systems Tracing Infrastructure, https://research.google.com/pubs/pub36356.html?spm=5176.100239.blogcont60165.11.OXME9Z Apache SkyWalking, http://skywalking.apache.org/ Apache Open Users, https://skywalking.apache.org/users/ Zipkin, https://zipkin.io/ Kubernetes, Production-Grade Container Orchestration. Automated container deployment, scaling, and management. https://kubernetes.io/ OpenTracing Specification https://github.com/opentracing/specification/blob/master/specification.md Apache Tomcat, http://tomcat.apache.org/ Apache HttpComponents, https://hc.apache.org/ Zipkin doc, ‘Instrumenting a library’ section, ‘Communicating trace information’ paragraph. https://zipkin.io/pages/instrumenting Jaeger Tracing, https://jaegertracing.io/ Envoy Proxy, http://envoyproxy.io/ Nginx, http://nginx.org/ Spring Cloud Gateway, https://spring.io/projects/spring-cloud-gateway  ","excerpt":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System …","ref":"/docs/main/v9.4.0/en/papers/stam/","title":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System"},{"body":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System  Sheng Wu 吴 晟 wusheng@apache.org  Editor\u0026rsquo;s note This paper was written by Sheng Wu, project founder, in 2017, to describe the fundamental theory of all current agent core concepts. Readers could learn why SkyWalking agents are significantly different from other tracing system and Dapper[1] Paper\u0026rsquo;s description.\nAbstract Monitoring, visualizing and troubleshooting a large-scale distributed system is a major challenge. One common tool used today is the distributed tracing system (e.g., Google Dapper)[1], and detecting topology and metrics based on the tracing data. One big limitation of today’s topology detection is that the analysis depends on aggregating the client-side and server-side tracing spans in a given time window to generate the dependency of services. This causes more latency and memory use, because the client and server spans of every RPC must be matched in millions of randomly occurring requests in a highly distributed system. More importantly, it could fail to match if the duration of RPC between client and server is longer than the prior setup time window, or across the two windows.\nIn this paper, we present the STAM, Streaming Topology Analysis Method. In STAM, we could use auto instrumentation or a manual instrumentation mechanism to intercept and manipulate RPC at both client-side and server-side. In the case of auto instrumentation, STAM manipulates application codes at runtime, such as Java agent. As such, this monitoring system doesn’t require any source code changes from the application development team or RPC framework development team. The STAM injects an RPC network address used at client side, a service name and a service instance name into the RPC context, and binds the server-side service name and service instance name as the alias name for this network address used at the client side. Freeing the dependency analysis from the mechanisms that cause blocking and delay, the analysis core can process the monitoring data in stream mode and generate the accurate topology.\nThe STAM has been implemented in the Apache SkyWalking[2], an open source APM (application performance monitoring system) project of the Apache Software Foundation, which is widely used in many big enterprises[3] including Alibaba, Huawei, Tencent, Didi, Xiaomi, China Mobile and other enterprises (airlines, financial institutions and others) to support their large-scale distributed systems in the production environment. It reduces the load and memory cost significantly, with better horizontal scale capability.\nIntroduction Monitoring the highly distributed system, especially with a micro-service architecture, is very complex. Many RPCs, including HTTP, gRPC, MQ, Cache, and Database accesses, are behind a single client-side request. Allowing the IT team to understand the dependency relationships among thousands of services is the key feature and first step for observability of a whole distributed system. A distributed tracing system is capable of collecting traces, including all distributed request paths. Dependency relationships have been logically included in the trace data. A distributed tracing system, such as Zipkin [4] or Jaeger Tracing [10], provides built-in dependency analysis features, but many analysis features build on top of that. There are at least two fundamental limitations: timeliness and consistent accuracy.\nStrong timeliness is required to match the mutability of distributed application system dependency relationship, including service level and service instance level dependency.\nA Service is a logic group of instances which have the same functions or codes.\nA Service Instance is usually an OS level process, such as a JVM process. The relationships between services and instances are mutable, depending on the configuration, codes and network status. The dependency could change over time.\n Figure 1, Generated spans in traditional Dapper based tracing system. The span model in the Dapper paper and existing tracing systems,such as Zipkin instrumenting mode[9], just propagates the span id to the server side. Due to this model, dependency analysis requires a certain time window. The tracing spans are collected at both client- and server-sides, because the relationship is recorded. Due to that, the analysis process has to wait for the client and server spans to match in the same time window, in order to output the result, Service A depending on Service B. So, this time window must be over the duration of this RPC request; otherwise, the conclusion will be lost. This condition makes the analysis would not react the dependency mutation in second level, in production, it sometimes has to set the window duration in 3-5 mins. Also, because of the Windows-based design, if one side involves a long duration task, it can’t easily achieve consistent accuracy. Because in order to make the analysis as fast as possible, the analysis period is less than 5 minutes. But some spans can’t match its parent or children if the analysis is incomplete or crosses two time windows. Even if we added a mechanism to process the spans left in the previous stages, still some would have to be abandoned to keep the dataset size and memory usage reasonable.\nIn the STAM, we introduce a new span and context propagation models, with the new analysis method. These new models add the peer network address (IP or hostname) used at client side, client service instance name and client service name, into the context propagation model. Then it passes the RPC call from client to server, just as the original trace id and span id in the existing tracing system, and collects it in the server-side span. The new analysis method can easily generate the client-server relationship directly without waiting on the client span. It also sets the peer network address as one alias of the server service. After the across cluster node data sync, the client-side span analysis could use this alias metadata to generate the client-server relationship directly too. By using these new models and method in Apache SkyWalking, we remove the time windows-based analysis permanently, and fully use the streaming analysis mode with less than 5 seconds latency and consistent accuracy\nNew Span Model and Context Model The traditional span of a tracing system includes the following fields [1][6][10].\n A trace id to represent the whole trace. A span id to represent the current span. An operation name to describe what operation this span did. A start timestamp. A finish timestamp Service and Service Instance names of current span. A set of zero or more key:value Span Tags. A set of zero or more Span Logs, each of which is itself a key:value map paired with a timestamp. References to zero or more causally related Spans. Reference includes the parent span id and trace id.  In the new span model of STAM we add the following fields in the span.\nSpan type. Enumeration, including exit, local and entry. Entry and Exit spans are used in a networking related library. Entry spans represent a server-side networking library, such as Apache Tomcat[7]. Exit spans represent the client-side networking library, such as Apache HttpComponents [8].\nPeer Network Address. Remote \u0026ldquo;address,\u0026rdquo; suitable for use in exit and entry spans. In Exit spans, the peer network address is the address by the client library to access the server.\nThese fields usually are optionally included in many tracing system,. But in STAM, we require them in all RPC cases.\nContext Model is used to propagate the client-side information to server-side carried by the original RPC call, usually in the header, such as HTTP header or MQ header. In the old design, it carries the trace id and span id of client-side span. In the STAM, we enhance this model, adding the parent service name, parent service instance name and peer of exit span. The names could be literal strings. All these extra fields will help to remove the block of streaming analysis. Compared to the existing context model, this uses a little more bandwidth, but it could be optimized. In Apache SkyWalking, we design a register mechanism to exchange unique IDs to represent these names. As a result, only 3 integers are added in the RPC context, so the increase of bandwidth is at least less than 1% in the production environment.\nThe changes of two models could eliminate the time windows in the analysis process. Server-side span analysis enhances the context aware capability.\nNew Topology Analysis Method The new topology analysis method at the core of STAM is processing the span in stream mode. The analysis of the server-side span, also named entry span, includes the parent service name, parent service instance name and peer of exit span. So the analysis process could establish the following results.\n Set the peer of exit span as client using alias name of current service and instance. Peer network address \u0026lt;-\u0026gt; service name and peer network address \u0026lt;-\u0026gt; Service instance name aliases created. These two will sync with all analysis nodes and persistent in the storage, allowing more analysis processers to have this alias information. Generate relationships of parent service name -\u0026gt; current service name and parent service instance name -\u0026gt; current service instance name, unless there is another different Peer network address \u0026lt;-\u0026gt; Service Instance Name mapping found. In that case, only generate relationships of peer network address \u0026lt;-\u0026gt; service name and peer network address \u0026lt;-\u0026gt; Service instance name.  For analysis of the client-side span (exit span), there could three possibilities.\n The peer in the exit span already has the alias names established by server-side span analysis from step (1). Then use alias names to replace the peer, and generate traffic of current service name -\u0026gt; alias service name and current service instance name -\u0026gt; alias service instance name. If the alias could not be found, then just simply generate traffic for current service name -\u0026gt; peer and current service instance name -\u0026gt; peer. If multiple alias names of peer network address \u0026lt;-\u0026gt; Service Instance Name could be found, then keep generating traffic for current service name -\u0026gt; peer network address and current service instance name -\u0026gt; peer network address.   Figure 2, Apache SkyWalking uses STAM to detect and visualize the topology of distributed systems. Evaluation In this section, we evaluate the new models and analysis method in the context of several typical cases in which the old method loses timeliness and consistent accuracy.\n 1.New Service Online or Auto Scale Out  New services could be added into the whole topology by the developer team randomly, or container operation platform automatically by some scale out policy, like Kubernetes [5]. The monitoring system could not be notified in any case manually. By using STAM, we could detect the new node automatically and also keep the analysis process unblocked and consistent with detected nodes. In this case, a new service and network address (could be IP, port or both) are used. The peer network address \u0026lt;-\u0026gt; service mapping does not exist, the traffic of client service -\u0026gt; peer network address will be generated and persistent in the storage first. After mapping is generated, further traffic of client-service to server-service could be identified, generated and aggregated in the analysis platform. For filling the gap of a few traffic before the mapping generated, we require doing peer network address \u0026lt;-\u0026gt; service mapping translation again in query stage, to merge client service-\u0026gt;peer network address and client-service to server-service. In production, the amount of VM for the whole SkyWalking analysis platform deployment is less than 100, syncing among them will finish less than 10 seconds, in most cases it only takes 3-5 seconds. And in the query stage, the data has been aggregated in minutes or seconds at least. The query merge performance is not related to how much traffic happens before the mapping generated, only affected by sync duration, in here, only 3 seconds. Due to that, in minute level aggregation topology, it only adds 1 or 2 relationship records in the whole topology relationship dataset. Considering an over 100 services topology having over 500 relationship records per minute, the payload increase for this query merge is very limited and affordable. This feature is significant in a large and high load distributed system, as we don’t need to concern its scaling capability. And in some fork versions, they choose to update the existing client service-\u0026gt;peer network address to client-service to server-service after detecting the new mapping for peer generated, in order to remove the extra load at query stage permanently.\n Figure 3, Span analysis by using the new topology analysis method  2.Existing Uninstrumented Nodes  Every topology detection method has to work in this case. In many cases, there are nodes in the production environment that can’t be instrumented. Causes for this might include:(1) Restriction of the technology. In some golang or C++ written applications, there is no easy way in Java or .Net to do auto instrumentation by the agent. So, the codes may not be instrumented automatically. (2) The middleware, such as MQ, database server, has not adopted the tracing system. This would make it difficult or time consuming to implement the middleware instrumentation. (3) A 3rd party service or cloud service doesn’t support work with the current tracing system. (4) Lack of resources: e.g., the developer or operation team lacks time to make the instrumentation ready.\nThe STAM works well even if the client or server side has no instrumentation. It still keeps the topology as accurate as possible.\nIf the client side hasn’t instrumented, the server-side span wouldn’t get any reference through RPC context, so, it would simply use peer to generate traffic, as shown in Figure 4.\n Figure 4, STAM traffic generation when no client-side instrumentation As shown in Figure 5, in the other case, with no server-side instrumentation, the client span analysis doesn’t need to process this case. The STAM analysis core just simply keeps generating client service-\u0026gt;peer network address traffic. As there is no mapping for peer network address generated, there is no merging.\n Figure 5, STAM traffic generation when no server-side instrumentation  3.Uninstrumented Node Having Header Forward Capability  Besides the cases we evaluated in (2) Uninstrumented Nodes, there is one complex and special case: the instrumented node has the capability to propagate the header from downstream to upstream, typically in all proxy, such as Envoy[11], Nginx[12], Spring Cloud Gateway[13]. As proxy, it has the capability to forward all headers from downstream to upstream to keep some of information in the header, including the tracing context, authentication, browser information, and routing information, in order to make them accessible by the business services behind the proxy, like Envoy route configuration. When some proxy can’t be instrumented, no matter what the reason, it should not affect the topology detection.\nIn this case, the proxy address would be used at the client side and propagate through RPC context as peer network address, and the proxy forwards this to different upstream services. Then STAM could detect this case and generate the proxy as a conjectural node. In the STAM, more than one alias names for this network address should be generated. After those two are detected and synchronized to the analysis node, the analysis core knows there is at least one uninstrumented service standing between client and servers. So, it will generate the relationships of client service-\u0026gt;peer network address, peer-\u0026gt;server service B and peer network address -\u0026gt;server service C, as shown in Figure 6.\n Figure 6, STAM traffic generation when the proxy uninstrumentation Conclusion This paper described the STAM, which is to the best of our knowledge the best topology detection method for distributed tracing systems. It replaces the time-window based topology analysis method for tracing-based monitoring systems. It removes the resource cost of disk and memory for time-window baseds analysis permanently and totally, and the barriers of horizontal scale. One STAM implementation, Apache SkyWalking, is widely used for monitoring hundreds of applications in production. Some of them generated over 100 TB tracing data per day and topology for over 200 services in real time.\nAcknowledgments We thank all contributors of Apache SkyWalking project for suggestions, code contributions to implement the STAM, and feedback from using the STAM and SkyWalking in their production environment.\nLicense This paper and the STAM are licensed in the Apache 2.0.\nReferences  Dapper, a Large-Scale Distributed Systems Tracing Infrastructure, https://research.google.com/pubs/pub36356.html?spm=5176.100239.blogcont60165.11.OXME9Z Apache SkyWalking, http://skywalking.apache.org/ Apache Open Users, https://skywalking.apache.org/users/ Zipkin, https://zipkin.io/ Kubernetes, Production-Grade Container Orchestration. Automated container deployment, scaling, and management. https://kubernetes.io/ OpenTracing Specification https://github.com/opentracing/specification/blob/master/specification.md Apache Tomcat, http://tomcat.apache.org/ Apache HttpComponents, https://hc.apache.org/ Zipkin doc, ‘Instrumenting a library’ section, ‘Communicating trace information’ paragraph. https://zipkin.io/pages/instrumenting Jaeger Tracing, https://jaegertracing.io/ Envoy Proxy, http://envoyproxy.io/ Nginx, http://nginx.org/ Spring Cloud Gateway, https://spring.io/projects/spring-cloud-gateway  ","excerpt":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System …","ref":"/docs/main/v9.5.0/en/papers/stam/","title":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System"},{"body":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System  Sheng Wu 吴 晟 wusheng@apache.org  Editor\u0026rsquo;s note This paper was written by Sheng Wu, project founder, in 2017, to describe the fundamental theory of all current agent core concepts. Readers could learn why SkyWalking agents are significantly different from other tracing system and Dapper[1] Paper\u0026rsquo;s description.\nAbstract Monitoring, visualizing and troubleshooting a large-scale distributed system is a major challenge. One common tool used today is the distributed tracing system (e.g., Google Dapper)[1], and detecting topology and metrics based on the tracing data. One big limitation of today’s topology detection is that the analysis depends on aggregating the client-side and server-side tracing spans in a given time window to generate the dependency of services. This causes more latency and memory use, because the client and server spans of every RPC must be matched in millions of randomly occurring requests in a highly distributed system. More importantly, it could fail to match if the duration of RPC between client and server is longer than the prior setup time window, or across the two windows.\nIn this paper, we present the STAM, Streaming Topology Analysis Method. In STAM, we could use auto instrumentation or a manual instrumentation mechanism to intercept and manipulate RPC at both client-side and server-side. In the case of auto instrumentation, STAM manipulates application codes at runtime, such as Java agent. As such, this monitoring system doesn’t require any source code changes from the application development team or RPC framework development team. The STAM injects an RPC network address used at client side, a service name and a service instance name into the RPC context, and binds the server-side service name and service instance name as the alias name for this network address used at the client side. Freeing the dependency analysis from the mechanisms that cause blocking and delay, the analysis core can process the monitoring data in stream mode and generate the accurate topology.\nThe STAM has been implemented in the Apache SkyWalking[2], an open source APM (application performance monitoring system) project of the Apache Software Foundation, which is widely used in many big enterprises[3] including Alibaba, Huawei, Tencent, Didi, Xiaomi, China Mobile and other enterprises (airlines, financial institutions and others) to support their large-scale distributed systems in the production environment. It reduces the load and memory cost significantly, with better horizontal scale capability.\nIntroduction Monitoring the highly distributed system, especially with a micro-service architecture, is very complex. Many RPCs, including HTTP, gRPC, MQ, Cache, and Database accesses, are behind a single client-side request. Allowing the IT team to understand the dependency relationships among thousands of services is the key feature and first step for observability of a whole distributed system. A distributed tracing system is capable of collecting traces, including all distributed request paths. Dependency relationships have been logically included in the trace data. A distributed tracing system, such as Zipkin [4] or Jaeger Tracing [10], provides built-in dependency analysis features, but many analysis features build on top of that. There are at least two fundamental limitations: timeliness and consistent accuracy.\nStrong timeliness is required to match the mutability of distributed application system dependency relationship, including service level and service instance level dependency.\nA Service is a logic group of instances which have the same functions or codes.\nA Service Instance is usually an OS level process, such as a JVM process. The relationships between services and instances are mutable, depending on the configuration, codes and network status. The dependency could change over time.\n Figure 1, Generated spans in traditional Dapper based tracing system. The span model in the Dapper paper and existing tracing systems,such as Zipkin instrumenting mode[9], just propagates the span id to the server side. Due to this model, dependency analysis requires a certain time window. The tracing spans are collected at both client- and server-sides, because the relationship is recorded. Due to that, the analysis process has to wait for the client and server spans to match in the same time window, in order to output the result, Service A depending on Service B. So, this time window must be over the duration of this RPC request; otherwise, the conclusion will be lost. This condition makes the analysis would not react the dependency mutation in second level, in production, it sometimes has to set the window duration in 3-5 mins. Also, because of the Windows-based design, if one side involves a long duration task, it can’t easily achieve consistent accuracy. Because in order to make the analysis as fast as possible, the analysis period is less than 5 minutes. But some spans can’t match its parent or children if the analysis is incomplete or crosses two time windows. Even if we added a mechanism to process the spans left in the previous stages, still some would have to be abandoned to keep the dataset size and memory usage reasonable.\nIn the STAM, we introduce a new span and context propagation models, with the new analysis method. These new models add the peer network address (IP or hostname) used at client side, client service instance name and client service name, into the context propagation model. Then it passes the RPC call from client to server, just as the original trace id and span id in the existing tracing system, and collects it in the server-side span. The new analysis method can easily generate the client-server relationship directly without waiting on the client span. It also sets the peer network address as one alias of the server service. After the across cluster node data sync, the client-side span analysis could use this alias metadata to generate the client-server relationship directly too. By using these new models and method in Apache SkyWalking, we remove the time windows-based analysis permanently, and fully use the streaming analysis mode with less than 5 seconds latency and consistent accuracy\nNew Span Model and Context Model The traditional span of a tracing system includes the following fields [1][6][10].\n A trace id to represent the whole trace. A span id to represent the current span. An operation name to describe what operation this span did. A start timestamp. A finish timestamp Service and Service Instance names of current span. A set of zero or more key:value Span Tags. A set of zero or more Span Logs, each of which is itself a key:value map paired with a timestamp. References to zero or more causally related Spans. Reference includes the parent span id and trace id.  In the new span model of STAM we add the following fields in the span.\nSpan type. Enumeration, including exit, local and entry. Entry and Exit spans are used in a networking related library. Entry spans represent a server-side networking library, such as Apache Tomcat[7]. Exit spans represent the client-side networking library, such as Apache HttpComponents [8].\nPeer Network Address. Remote \u0026ldquo;address,\u0026rdquo; suitable for use in exit and entry spans. In Exit spans, the peer network address is the address by the client library to access the server.\nThese fields usually are optionally included in many tracing system,. But in STAM, we require them in all RPC cases.\nContext Model is used to propagate the client-side information to server-side carried by the original RPC call, usually in the header, such as HTTP header or MQ header. In the old design, it carries the trace id and span id of client-side span. In the STAM, we enhance this model, adding the parent service name, parent service instance name and peer of exit span. The names could be literal strings. All these extra fields will help to remove the block of streaming analysis. Compared to the existing context model, this uses a little more bandwidth, but it could be optimized. In Apache SkyWalking, we design a register mechanism to exchange unique IDs to represent these names. As a result, only 3 integers are added in the RPC context, so the increase of bandwidth is at least less than 1% in the production environment.\nThe changes of two models could eliminate the time windows in the analysis process. Server-side span analysis enhances the context aware capability.\nNew Topology Analysis Method The new topology analysis method at the core of STAM is processing the span in stream mode. The analysis of the server-side span, also named entry span, includes the parent service name, parent service instance name and peer of exit span. So the analysis process could establish the following results.\n Set the peer of exit span as client using alias name of current service and instance. Peer network address \u0026lt;-\u0026gt; service name and peer network address \u0026lt;-\u0026gt; Service instance name aliases created. These two will sync with all analysis nodes and persistent in the storage, allowing more analysis processers to have this alias information. Generate relationships of parent service name -\u0026gt; current service name and parent service instance name -\u0026gt; current service instance name, unless there is another different Peer network address \u0026lt;-\u0026gt; Service Instance Name mapping found. In that case, only generate relationships of peer network address \u0026lt;-\u0026gt; service name and peer network address \u0026lt;-\u0026gt; Service instance name.  For analysis of the client-side span (exit span), there could three possibilities.\n The peer in the exit span already has the alias names established by server-side span analysis from step (1). Then use alias names to replace the peer, and generate traffic of current service name -\u0026gt; alias service name and current service instance name -\u0026gt; alias service instance name. If the alias could not be found, then just simply generate traffic for current service name -\u0026gt; peer and current service instance name -\u0026gt; peer. If multiple alias names of peer network address \u0026lt;-\u0026gt; Service Instance Name could be found, then keep generating traffic for current service name -\u0026gt; peer network address and current service instance name -\u0026gt; peer network address.   Figure 2, Apache SkyWalking uses STAM to detect and visualize the topology of distributed systems. Evaluation In this section, we evaluate the new models and analysis method in the context of several typical cases in which the old method loses timeliness and consistent accuracy.\n 1.New Service Online or Auto Scale Out  New services could be added into the whole topology by the developer team randomly, or container operation platform automatically by some scale out policy, like Kubernetes [5]. The monitoring system could not be notified in any case manually. By using STAM, we could detect the new node automatically and also keep the analysis process unblocked and consistent with detected nodes. In this case, a new service and network address (could be IP, port or both) are used. The peer network address \u0026lt;-\u0026gt; service mapping does not exist, the traffic of client service -\u0026gt; peer network address will be generated and persistent in the storage first. After mapping is generated, further traffic of client-service to server-service could be identified, generated and aggregated in the analysis platform. For filling the gap of a few traffic before the mapping generated, we require doing peer network address \u0026lt;-\u0026gt; service mapping translation again in query stage, to merge client service-\u0026gt;peer network address and client-service to server-service. In production, the amount of VM for the whole SkyWalking analysis platform deployment is less than 100, syncing among them will finish less than 10 seconds, in most cases it only takes 3-5 seconds. And in the query stage, the data has been aggregated in minutes or seconds at least. The query merge performance is not related to how much traffic happens before the mapping generated, only affected by sync duration, in here, only 3 seconds. Due to that, in minute level aggregation topology, it only adds 1 or 2 relationship records in the whole topology relationship dataset. Considering an over 100 services topology having over 500 relationship records per minute, the payload increase for this query merge is very limited and affordable. This feature is significant in a large and high load distributed system, as we don’t need to concern its scaling capability. And in some fork versions, they choose to update the existing client service-\u0026gt;peer network address to client-service to server-service after detecting the new mapping for peer generated, in order to remove the extra load at query stage permanently.\n Figure 3, Span analysis by using the new topology analysis method  2.Existing Uninstrumented Nodes  Every topology detection method has to work in this case. In many cases, there are nodes in the production environment that can’t be instrumented. Causes for this might include:(1) Restriction of the technology. In some golang or C++ written applications, there is no easy way in Java or .Net to do auto instrumentation by the agent. So, the codes may not be instrumented automatically. (2) The middleware, such as MQ, database server, has not adopted the tracing system. This would make it difficult or time consuming to implement the middleware instrumentation. (3) A 3rd party service or cloud service doesn’t support work with the current tracing system. (4) Lack of resources: e.g., the developer or operation team lacks time to make the instrumentation ready.\nThe STAM works well even if the client or server side has no instrumentation. It still keeps the topology as accurate as possible.\nIf the client side hasn’t instrumented, the server-side span wouldn’t get any reference through RPC context, so, it would simply use peer to generate traffic, as shown in Figure 4.\n Figure 4, STAM traffic generation when no client-side instrumentation As shown in Figure 5, in the other case, with no server-side instrumentation, the client span analysis doesn’t need to process this case. The STAM analysis core just simply keeps generating client service-\u0026gt;peer network address traffic. As there is no mapping for peer network address generated, there is no merging.\n Figure 5, STAM traffic generation when no server-side instrumentation  3.Uninstrumented Node Having Header Forward Capability  Besides the cases we evaluated in (2) Uninstrumented Nodes, there is one complex and special case: the instrumented node has the capability to propagate the header from downstream to upstream, typically in all proxy, such as Envoy[11], Nginx[12], Spring Cloud Gateway[13]. As proxy, it has the capability to forward all headers from downstream to upstream to keep some of information in the header, including the tracing context, authentication, browser information, and routing information, in order to make them accessible by the business services behind the proxy, like Envoy route configuration. When some proxy can’t be instrumented, no matter what the reason, it should not affect the topology detection.\nIn this case, the proxy address would be used at the client side and propagate through RPC context as peer network address, and the proxy forwards this to different upstream services. Then STAM could detect this case and generate the proxy as a conjectural node. In the STAM, more than one alias names for this network address should be generated. After those two are detected and synchronized to the analysis node, the analysis core knows there is at least one uninstrumented service standing between client and servers. So, it will generate the relationships of client service-\u0026gt;peer network address, peer-\u0026gt;server service B and peer network address -\u0026gt;server service C, as shown in Figure 6.\n Figure 6, STAM traffic generation when the proxy uninstrumentation Conclusion This paper described the STAM, which is to the best of our knowledge the best topology detection method for distributed tracing systems. It replaces the time-window based topology analysis method for tracing-based monitoring systems. It removes the resource cost of disk and memory for time-window baseds analysis permanently and totally, and the barriers of horizontal scale. One STAM implementation, Apache SkyWalking, is widely used for monitoring hundreds of applications in production. Some of them generated over 100 TB tracing data per day and topology for over 200 services in real time.\nAcknowledgments We thank all contributors of Apache SkyWalking project for suggestions, code contributions to implement the STAM, and feedback from using the STAM and SkyWalking in their production environment.\nLicense This paper and the STAM are licensed in the Apache 2.0.\nReferences  Dapper, a Large-Scale Distributed Systems Tracing Infrastructure, https://research.google.com/pubs/pub36356.html?spm=5176.100239.blogcont60165.11.OXME9Z Apache SkyWalking, http://skywalking.apache.org/ Apache Open Users, https://skywalking.apache.org/users/ Zipkin, https://zipkin.io/ Kubernetes, Production-Grade Container Orchestration. Automated container deployment, scaling, and management. https://kubernetes.io/ OpenTracing Specification https://github.com/opentracing/specification/blob/master/specification.md Apache Tomcat, http://tomcat.apache.org/ Apache HttpComponents, https://hc.apache.org/ Zipkin doc, ‘Instrumenting a library’ section, ‘Communicating trace information’ paragraph. https://zipkin.io/pages/instrumenting Jaeger Tracing, https://jaegertracing.io/ Envoy Proxy, http://envoyproxy.io/ Nginx, http://nginx.org/ Spring Cloud Gateway, https://spring.io/projects/spring-cloud-gateway  ","excerpt":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System …","ref":"/docs/main/v9.6.0/en/papers/stam/","title":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System"},{"body":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System  Sheng Wu 吴 晟 wusheng@apache.org  Editor\u0026rsquo;s note This paper was written by Sheng Wu, project founder, in 2017, to describe the fundamental theory of all current agent core concepts. Readers could learn why SkyWalking agents are significantly different from other tracing system and Dapper[1] Paper\u0026rsquo;s description.\nAbstract Monitoring, visualizing and troubleshooting a large-scale distributed system is a major challenge. One common tool used today is the distributed tracing system (e.g., Google Dapper)[1], and detecting topology and metrics based on the tracing data. One big limitation of today’s topology detection is that the analysis depends on aggregating the client-side and server-side tracing spans in a given time window to generate the dependency of services. This causes more latency and memory use, because the client and server spans of every RPC must be matched in millions of randomly occurring requests in a highly distributed system. More importantly, it could fail to match if the duration of RPC between client and server is longer than the prior setup time window, or across the two windows.\nIn this paper, we present the STAM, Streaming Topology Analysis Method. In STAM, we could use auto instrumentation or a manual instrumentation mechanism to intercept and manipulate RPC at both client-side and server-side. In the case of auto instrumentation, STAM manipulates application codes at runtime, such as Java agent. As such, this monitoring system doesn’t require any source code changes from the application development team or RPC framework development team. The STAM injects an RPC network address used at client side, a service name and a service instance name into the RPC context, and binds the server-side service name and service instance name as the alias name for this network address used at the client side. Freeing the dependency analysis from the mechanisms that cause blocking and delay, the analysis core can process the monitoring data in stream mode and generate the accurate topology.\nThe STAM has been implemented in the Apache SkyWalking[2], an open source APM (application performance monitoring system) project of the Apache Software Foundation, which is widely used in many big enterprises[3] including Alibaba, Huawei, Tencent, Didi, Xiaomi, China Mobile and other enterprises (airlines, financial institutions and others) to support their large-scale distributed systems in the production environment. It reduces the load and memory cost significantly, with better horizontal scale capability.\nIntroduction Monitoring the highly distributed system, especially with a micro-service architecture, is very complex. Many RPCs, including HTTP, gRPC, MQ, Cache, and Database accesses, are behind a single client-side request. Allowing the IT team to understand the dependency relationships among thousands of services is the key feature and first step for observability of a whole distributed system. A distributed tracing system is capable of collecting traces, including all distributed request paths. Dependency relationships have been logically included in the trace data. A distributed tracing system, such as Zipkin [4] or Jaeger Tracing [10], provides built-in dependency analysis features, but many analysis features build on top of that. There are at least two fundamental limitations: timeliness and consistent accuracy.\nStrong timeliness is required to match the mutability of distributed application system dependency relationship, including service level and service instance level dependency.\nA Service is a logic group of instances which have the same functions or codes.\nA Service Instance is usually an OS level process, such as a JVM process. The relationships between services and instances are mutable, depending on the configuration, codes and network status. The dependency could change over time.\n Figure 1, Generated spans in traditional Dapper based tracing system. The span model in the Dapper paper and existing tracing systems,such as Zipkin instrumenting mode[9], just propagates the span id to the server side. Due to this model, dependency analysis requires a certain time window. The tracing spans are collected at both client- and server-sides, because the relationship is recorded. Due to that, the analysis process has to wait for the client and server spans to match in the same time window, in order to output the result, Service A depending on Service B. So, this time window must be over the duration of this RPC request; otherwise, the conclusion will be lost. This condition makes the analysis would not react the dependency mutation in second level, in production, it sometimes has to set the window duration in 3-5 mins. Also, because of the Windows-based design, if one side involves a long duration task, it can’t easily achieve consistent accuracy. Because in order to make the analysis as fast as possible, the analysis period is less than 5 minutes. But some spans can’t match its parent or children if the analysis is incomplete or crosses two time windows. Even if we added a mechanism to process the spans left in the previous stages, still some would have to be abandoned to keep the dataset size and memory usage reasonable.\nIn the STAM, we introduce a new span and context propagation models, with the new analysis method. These new models add the peer network address (IP or hostname) used at client side, client service instance name and client service name, into the context propagation model. Then it passes the RPC call from client to server, just as the original trace id and span id in the existing tracing system, and collects it in the server-side span. The new analysis method can easily generate the client-server relationship directly without waiting on the client span. It also sets the peer network address as one alias of the server service. After the across cluster node data sync, the client-side span analysis could use this alias metadata to generate the client-server relationship directly too. By using these new models and method in Apache SkyWalking, we remove the time windows-based analysis permanently, and fully use the streaming analysis mode with less than 5 seconds latency and consistent accuracy\nNew Span Model and Context Model The traditional span of a tracing system includes the following fields [1][6][10].\n A trace id to represent the whole trace. A span id to represent the current span. An operation name to describe what operation this span did. A start timestamp. A finish timestamp Service and Service Instance names of current span. A set of zero or more key:value Span Tags. A set of zero or more Span Logs, each of which is itself a key:value map paired with a timestamp. References to zero or more causally related Spans. Reference includes the parent span id and trace id.  In the new span model of STAM we add the following fields in the span.\nSpan type. Enumeration, including exit, local and entry. Entry and Exit spans are used in a networking related library. Entry spans represent a server-side networking library, such as Apache Tomcat[7]. Exit spans represent the client-side networking library, such as Apache HttpComponents [8].\nPeer Network Address. Remote \u0026ldquo;address,\u0026rdquo; suitable for use in exit and entry spans. In Exit spans, the peer network address is the address by the client library to access the server.\nThese fields usually are optionally included in many tracing system,. But in STAM, we require them in all RPC cases.\nContext Model is used to propagate the client-side information to server-side carried by the original RPC call, usually in the header, such as HTTP header or MQ header. In the old design, it carries the trace id and span id of client-side span. In the STAM, we enhance this model, adding the parent service name, parent service instance name and peer of exit span. The names could be literal strings. All these extra fields will help to remove the block of streaming analysis. Compared to the existing context model, this uses a little more bandwidth, but it could be optimized. In Apache SkyWalking, we design a register mechanism to exchange unique IDs to represent these names. As a result, only 3 integers are added in the RPC context, so the increase of bandwidth is at least less than 1% in the production environment.\nThe changes of two models could eliminate the time windows in the analysis process. Server-side span analysis enhances the context aware capability.\nNew Topology Analysis Method The new topology analysis method at the core of STAM is processing the span in stream mode. The analysis of the server-side span, also named entry span, includes the parent service name, parent service instance name and peer of exit span. So the analysis process could establish the following results.\n Set the peer of exit span as client using alias name of current service and instance. Peer network address \u0026lt;-\u0026gt; service name and peer network address \u0026lt;-\u0026gt; Service instance name aliases created. These two will sync with all analysis nodes and persistent in the storage, allowing more analysis processers to have this alias information. Generate relationships of parent service name -\u0026gt; current service name and parent service instance name -\u0026gt; current service instance name, unless there is another different Peer network address \u0026lt;-\u0026gt; Service Instance Name mapping found. In that case, only generate relationships of peer network address \u0026lt;-\u0026gt; service name and peer network address \u0026lt;-\u0026gt; Service instance name.  For analysis of the client-side span (exit span), there could three possibilities.\n The peer in the exit span already has the alias names established by server-side span analysis from step (1). Then use alias names to replace the peer, and generate traffic of current service name -\u0026gt; alias service name and current service instance name -\u0026gt; alias service instance name. If the alias could not be found, then just simply generate traffic for current service name -\u0026gt; peer and current service instance name -\u0026gt; peer. If multiple alias names of peer network address \u0026lt;-\u0026gt; Service Instance Name could be found, then keep generating traffic for current service name -\u0026gt; peer network address and current service instance name -\u0026gt; peer network address.   Figure 2, Apache SkyWalking uses STAM to detect and visualize the topology of distributed systems. Evaluation In this section, we evaluate the new models and analysis method in the context of several typical cases in which the old method loses timeliness and consistent accuracy.\n 1.New Service Online or Auto Scale Out  New services could be added into the whole topology by the developer team randomly, or container operation platform automatically by some scale out policy, like Kubernetes [5]. The monitoring system could not be notified in any case manually. By using STAM, we could detect the new node automatically and also keep the analysis process unblocked and consistent with detected nodes. In this case, a new service and network address (could be IP, port or both) are used. The peer network address \u0026lt;-\u0026gt; service mapping does not exist, the traffic of client service -\u0026gt; peer network address will be generated and persistent in the storage first. After mapping is generated, further traffic of client-service to server-service could be identified, generated and aggregated in the analysis platform. For filling the gap of a few traffic before the mapping generated, we require doing peer network address \u0026lt;-\u0026gt; service mapping translation again in query stage, to merge client service-\u0026gt;peer network address and client-service to server-service. In production, the amount of VM for the whole SkyWalking analysis platform deployment is less than 100, syncing among them will finish less than 10 seconds, in most cases it only takes 3-5 seconds. And in the query stage, the data has been aggregated in minutes or seconds at least. The query merge performance is not related to how much traffic happens before the mapping generated, only affected by sync duration, in here, only 3 seconds. Due to that, in minute level aggregation topology, it only adds 1 or 2 relationship records in the whole topology relationship dataset. Considering an over 100 services topology having over 500 relationship records per minute, the payload increase for this query merge is very limited and affordable. This feature is significant in a large and high load distributed system, as we don’t need to concern its scaling capability. And in some fork versions, they choose to update the existing client service-\u0026gt;peer network address to client-service to server-service after detecting the new mapping for peer generated, in order to remove the extra load at query stage permanently.\n Figure 3, Span analysis by using the new topology analysis method  2.Existing Uninstrumented Nodes  Every topology detection method has to work in this case. In many cases, there are nodes in the production environment that can’t be instrumented. Causes for this might include:(1) Restriction of the technology. In some golang or C++ written applications, there is no easy way in Java or .Net to do auto instrumentation by the agent. So, the codes may not be instrumented automatically. (2) The middleware, such as MQ, database server, has not adopted the tracing system. This would make it difficult or time consuming to implement the middleware instrumentation. (3) A 3rd party service or cloud service doesn’t support work with the current tracing system. (4) Lack of resources: e.g., the developer or operation team lacks time to make the instrumentation ready.\nThe STAM works well even if the client or server side has no instrumentation. It still keeps the topology as accurate as possible.\nIf the client side hasn’t instrumented, the server-side span wouldn’t get any reference through RPC context, so, it would simply use peer to generate traffic, as shown in Figure 4.\n Figure 4, STAM traffic generation when no client-side instrumentation As shown in Figure 5, in the other case, with no server-side instrumentation, the client span analysis doesn’t need to process this case. The STAM analysis core just simply keeps generating client service-\u0026gt;peer network address traffic. As there is no mapping for peer network address generated, there is no merging.\n Figure 5, STAM traffic generation when no server-side instrumentation  3.Uninstrumented Node Having Header Forward Capability  Besides the cases we evaluated in (2) Uninstrumented Nodes, there is one complex and special case: the instrumented node has the capability to propagate the header from downstream to upstream, typically in all proxy, such as Envoy[11], Nginx[12], Spring Cloud Gateway[13]. As proxy, it has the capability to forward all headers from downstream to upstream to keep some of information in the header, including the tracing context, authentication, browser information, and routing information, in order to make them accessible by the business services behind the proxy, like Envoy route configuration. When some proxy can’t be instrumented, no matter what the reason, it should not affect the topology detection.\nIn this case, the proxy address would be used at the client side and propagate through RPC context as peer network address, and the proxy forwards this to different upstream services. Then STAM could detect this case and generate the proxy as a conjectural node. In the STAM, more than one alias names for this network address should be generated. After those two are detected and synchronized to the analysis node, the analysis core knows there is at least one uninstrumented service standing between client and servers. So, it will generate the relationships of client service-\u0026gt;peer network address, peer-\u0026gt;server service B and peer network address -\u0026gt;server service C, as shown in Figure 6.\n Figure 6, STAM traffic generation when the proxy uninstrumentation Conclusion This paper described the STAM, which is to the best of our knowledge the best topology detection method for distributed tracing systems. It replaces the time-window based topology analysis method for tracing-based monitoring systems. It removes the resource cost of disk and memory for time-window baseds analysis permanently and totally, and the barriers of horizontal scale. One STAM implementation, Apache SkyWalking, is widely used for monitoring hundreds of applications in production. Some of them generated over 100 TB tracing data per day and topology for over 200 services in real time.\nAcknowledgments We thank all contributors of Apache SkyWalking project for suggestions, code contributions to implement the STAM, and feedback from using the STAM and SkyWalking in their production environment.\nLicense This paper and the STAM are licensed in the Apache 2.0.\nReferences  Dapper, a Large-Scale Distributed Systems Tracing Infrastructure, https://research.google.com/pubs/pub36356.html?spm=5176.100239.blogcont60165.11.OXME9Z Apache SkyWalking, http://skywalking.apache.org/ Apache Open Users, https://skywalking.apache.org/users/ Zipkin, https://zipkin.io/ Kubernetes, Production-Grade Container Orchestration. Automated container deployment, scaling, and management. https://kubernetes.io/ OpenTracing Specification https://github.com/opentracing/specification/blob/master/specification.md Apache Tomcat, http://tomcat.apache.org/ Apache HttpComponents, https://hc.apache.org/ Zipkin doc, ‘Instrumenting a library’ section, ‘Communicating trace information’ paragraph. https://zipkin.io/pages/instrumenting Jaeger Tracing, https://jaegertracing.io/ Envoy Proxy, http://envoyproxy.io/ Nginx, http://nginx.org/ Spring Cloud Gateway, https://spring.io/projects/spring-cloud-gateway  ","excerpt":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System …","ref":"/docs/main/v9.7.0/en/papers/stam/","title":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System"},{"body":"Standalone Mode The standalone mode is the simplest way to run Banyand. It is suitable for the development and testing environment. The standalone mode is running as a standalone process by\n$ ./banyand-server standalone ██████╗ █████╗ ███╗ ██╗██╗ ██╗ █████╗ ███╗ ██╗██████╗ ██████╗ ██╔══██╗██╔══██╗████╗ ██║╚██╗ ██╔╝██╔══██╗████╗ ██║██╔══██╗██╔══██╗ ██████╔╝███████║██╔██╗ ██║ ╚████╔╝ ███████║██╔██╗ ██║██║ ██║██████╔╝ ██╔══██╗██╔══██║██║╚██╗██║ ╚██╔╝ ██╔══██║██║╚██╗██║██║ ██║██╔══██╗ ██████╔╝██║ ██║██║ ╚████║ ██║ ██║ ██║██║ ╚████║██████╔╝██████╔╝ ╚═════╝ ╚═╝ ╚═╝╚═╝ ╚═══╝ ╚═╝ ╚═╝ ╚═╝╚═╝ ╚═══╝╚═════╝ ╚═════╝ ***starting as a standalone server**** ... ... ***Listening to**** addr::17912 module:LIAISON-GRPC The banyand-server would be listening on the 0.0.0.0:17912 to access gRPC requests. if no errors occurred.\nAt the same time, the banyand-server would be listening on the 0.0.0.0:17913 to access HTTP requests. if no errors occurred. The HTTP server is used for CLI and Web UI.\n","excerpt":"Standalone Mode The standalone mode is the simplest way to run Banyand. It is suitable for the …","ref":"/docs/skywalking-banyandb/latest/installation/standalone/","title":"Standalone Mode"},{"body":"Standalone Mode The standalone mode is the simplest way to run Banyand. It is suitable for the development and testing environment. Once you unpack and extract the skywalking-banyandb-x.x.x-bin.tgz, you could startup BanyanDB server, the standalone mode is running as a standalone process.\n$ cd skywalking-banyandb-x.x.x-bin/bin $ ./banyand-server-static standalone ██████╗ █████╗ ███╗ ██╗██╗ ██╗ █████╗ ███╗ ██╗██████╗ ██████╗ ██╔══██╗██╔══██╗████╗ ██║╚██╗ ██╔╝██╔══██╗████╗ ██║██╔══██╗██╔══██╗ ██████╔╝███████║██╔██╗ ██║ ╚████╔╝ ███████║██╔██╗ ██║██║ ██║██████╔╝ ██╔══██╗██╔══██║██║╚██╗██║ ╚██╔╝ ██╔══██║██║╚██╗██║██║ ██║██╔══██╗ ██████╔╝██║ ██║██║ ╚████║ ██║ ██║ ██║██║ ╚████║██████╔╝██████╔╝ ╚═════╝ ╚═╝ ╚═╝╚═╝ ╚═══╝ ╚═╝ ╚═╝ ╚═╝╚═╝ ╚═══╝╚═════╝ ╚═════╝ ***starting as a standalone server**** ... ... ***Listening to**** addr::17912 module:LIAISON-GRPC The banyand server would be listening on the 0.0.0.0:17912 to access gRPC requests. if no errors occurred.\nAt the same time, the banyand server would be listening on the 0.0.0.0:17913 to access HTTP requests. if no errors occurred. The HTTP server is used for CLI and Web UI.\n","excerpt":"Standalone Mode The standalone mode is the simplest way to run Banyand. It is suitable for the …","ref":"/docs/skywalking-banyandb/next/installation/standalone/","title":"Standalone Mode"},{"body":"Standalone Mode The standalone mode is the simplest way to run Banyand. It is suitable for the development and testing environment. The standalone mode is running as a standalone process by\n$ ./banyand-server standalone ██████╗ █████╗ ███╗ ██╗██╗ ██╗ █████╗ ███╗ ██╗██████╗ ██████╗ ██╔══██╗██╔══██╗████╗ ██║╚██╗ ██╔╝██╔══██╗████╗ ██║██╔══██╗██╔══██╗ ██████╔╝███████║██╔██╗ ██║ ╚████╔╝ ███████║██╔██╗ ██║██║ ██║██████╔╝ ██╔══██╗██╔══██║██║╚██╗██║ ╚██╔╝ ██╔══██║██║╚██╗██║██║ ██║██╔══██╗ ██████╔╝██║ ██║██║ ╚████║ ██║ ██║ ██║██║ ╚████║██████╔╝██████╔╝ ╚═════╝ ╚═╝ ╚═╝╚═╝ ╚═══╝ ╚═╝ ╚═╝ ╚═╝╚═╝ ╚═══╝╚═════╝ ╚═════╝ ***starting as a standalone server**** ... ... ***Listening to**** addr::17912 module:LIAISON-GRPC The banyand-server would be listening on the 0.0.0.0:17912 to access gRPC requests. if no errors occurred.\nAt the same time, the banyand-server would be listening on the 0.0.0.0:17913 to access HTTP requests. if no errors occurred. The HTTP server is used for CLI and Web UI.\n","excerpt":"Standalone Mode The standalone mode is the simplest way to run Banyand. It is suitable for the …","ref":"/docs/skywalking-banyandb/v0.5.0/installation/standalone/","title":"Standalone Mode"},{"body":"Start up mode You may need different startup modes in different deployment tools, such as k8s. We provide two additional optional startup modes.\nDefault mode The default mode carries out tasks to initialize as necessary, starts to listen, and provides services.\nRun /bin/oapService.sh(.bat) to start in this mode. This is also applicable when you\u0026rsquo;re using startup.sh(.bat) to start.\nInit mode In this mode, the OAP server starts up to carry out initialization and then exits. You could use this mode to initialize your storage (such as ElasticSearch indexes, MySQL, and TiDB tables) as well as your data.\nRun /bin/oapServiceInit.sh(.bat) to start in this mode.\nNo-init mode In this mode, the OAP server starts up without carrying out initialization. Rather, it watches out for the ElasticSearch indexes, MySQL, TiDB and other storage tables, starts listening and provides services. In other words, the OAP server would anticipate having another OAP server carrying out the initialization.\nRun /bin/oapServiceNoInit.sh(.bat) to start in this mode.\n","excerpt":"Start up mode You may need different startup modes in different deployment tools, such as k8s. We …","ref":"/docs/main/latest/en/setup/backend/backend-start-up-mode/","title":"Start up mode"},{"body":"Start up mode You may need different startup modes in different deployment tools, such as k8s. We provide two additional optional startup modes.\nDefault mode The default mode carries out tasks to initialize as necessary, starts to listen, and provides services.\nRun /bin/oapService.sh(.bat) to start in this mode. This is also applicable when you\u0026rsquo;re using startup.sh(.bat) to start.\nInit mode In this mode, the OAP server starts up to carry out initialization and then exits. You could use this mode to initialize your storage (such as ElasticSearch indexes, MySQL, and TiDB tables) as well as your data.\nRun /bin/oapServiceInit.sh(.bat) to start in this mode.\nNo-init mode In this mode, the OAP server starts up without carrying out initialization. Rather, it watches out for the ElasticSearch indexes, MySQL, TiDB and other storage tables, starts listening and provides services. In other words, the OAP server would anticipate having another OAP server carrying out the initialization.\nRun /bin/oapServiceNoInit.sh(.bat) to start in this mode.\n","excerpt":"Start up mode You may need different startup modes in different deployment tools, such as k8s. We …","ref":"/docs/main/next/en/setup/backend/backend-start-up-mode/","title":"Start up mode"},{"body":"Start up mode In different deployment tools, such as k8s, you may need different startup modes. We provide two other optional startup modes.\nDefault mode The default mode carries out tasks to initialize as necessary, starts to listen, and provide services.\nRun /bin/oapService.sh(.bat) to start in this mode. This is also applicable when you\u0026rsquo;re using startup.sh(.bat) to start.\nInit mode In this mode, the OAP server starts up to carry out initialization, and then exits. You could use this mode to initialize your storage (such as ElasticSearch indexes, MySQL, and TiDB tables), as well as your data.\nRun /bin/oapServiceInit.sh(.bat) to start in this mode.\nNo-init mode In this mode, the OAP server starts up without carrying out initialization. Rather, it watches out for the ElasticSearch indexes, MySQL, and TiDB tables, starts to listen, and provide services. In other words, the OAP server would anticipate having another OAP server to carry out the initialization.\nRun /bin/oapServiceNoInit.sh(.bat) to start in this mode.\n","excerpt":"Start up mode In different deployment tools, such as k8s, you may need different startup modes. We …","ref":"/docs/main/v9.0.0/en/setup/backend/backend-start-up-mode/","title":"Start up mode"},{"body":"Start up mode You may need different startup modes in different deployment tools, such as k8s. We provide two additional optional startup modes.\nDefault mode The default mode carries out tasks to initialize as necessary, starts to listen, and provides services.\nRun /bin/oapService.sh(.bat) to start in this mode. This is also applicable when you\u0026rsquo;re using startup.sh(.bat) to start.\nInit mode In this mode, the OAP server starts up to carry out initialization and then exits. You could use this mode to initialize your storage (such as ElasticSearch indexes, MySQL, and TiDB tables) as well as your data.\nRun /bin/oapServiceInit.sh(.bat) to start in this mode.\nNo-init mode In this mode, the OAP server starts up without carrying out initialization. Rather, it watches out for the ElasticSearch indexes, MySQL, TiDB and other storage tables, starts listening and provides services. In other words, the OAP server would anticipate having another OAP server carrying out the initialization.\nRun /bin/oapServiceNoInit.sh(.bat) to start in this mode.\n","excerpt":"Start up mode You may need different startup modes in different deployment tools, such as k8s. We …","ref":"/docs/main/v9.1.0/en/setup/backend/backend-start-up-mode/","title":"Start up mode"},{"body":"Start up mode You may need different startup modes in different deployment tools, such as k8s. We provide two additional optional startup modes.\nDefault mode The default mode carries out tasks to initialize as necessary, starts to listen, and provides services.\nRun /bin/oapService.sh(.bat) to start in this mode. This is also applicable when you\u0026rsquo;re using startup.sh(.bat) to start.\nInit mode In this mode, the OAP server starts up to carry out initialization and then exits. You could use this mode to initialize your storage (such as ElasticSearch indexes, MySQL, and TiDB tables) as well as your data.\nRun /bin/oapServiceInit.sh(.bat) to start in this mode.\nNo-init mode In this mode, the OAP server starts up without carrying out initialization. Rather, it watches out for the ElasticSearch indexes, MySQL, TiDB and other storage tables, starts listening and provides services. In other words, the OAP server would anticipate having another OAP server carrying out the initialization.\nRun /bin/oapServiceNoInit.sh(.bat) to start in this mode.\n","excerpt":"Start up mode You may need different startup modes in different deployment tools, such as k8s. We …","ref":"/docs/main/v9.2.0/en/setup/backend/backend-start-up-mode/","title":"Start up mode"},{"body":"Start up mode You may need different startup modes in different deployment tools, such as k8s. We provide two additional optional startup modes.\nDefault mode The default mode carries out tasks to initialize as necessary, starts to listen, and provides services.\nRun /bin/oapService.sh(.bat) to start in this mode. This is also applicable when you\u0026rsquo;re using startup.sh(.bat) to start.\nInit mode In this mode, the OAP server starts up to carry out initialization and then exits. You could use this mode to initialize your storage (such as ElasticSearch indexes, MySQL, and TiDB tables) as well as your data.\nRun /bin/oapServiceInit.sh(.bat) to start in this mode.\nNo-init mode In this mode, the OAP server starts up without carrying out initialization. Rather, it watches out for the ElasticSearch indexes, MySQL, TiDB and other storage tables, starts listening and provides services. In other words, the OAP server would anticipate having another OAP server carrying out the initialization.\nRun /bin/oapServiceNoInit.sh(.bat) to start in this mode.\n","excerpt":"Start up mode You may need different startup modes in different deployment tools, such as k8s. We …","ref":"/docs/main/v9.3.0/en/setup/backend/backend-start-up-mode/","title":"Start up mode"},{"body":"Start up mode You may need different startup modes in different deployment tools, such as k8s. We provide two additional optional startup modes.\nDefault mode The default mode carries out tasks to initialize as necessary, starts to listen, and provides services.\nRun /bin/oapService.sh(.bat) to start in this mode. This is also applicable when you\u0026rsquo;re using startup.sh(.bat) to start.\nInit mode In this mode, the OAP server starts up to carry out initialization and then exits. You could use this mode to initialize your storage (such as ElasticSearch indexes, MySQL, and TiDB tables) as well as your data.\nRun /bin/oapServiceInit.sh(.bat) to start in this mode.\nNo-init mode In this mode, the OAP server starts up without carrying out initialization. Rather, it watches out for the ElasticSearch indexes, MySQL, TiDB and other storage tables, starts listening and provides services. In other words, the OAP server would anticipate having another OAP server carrying out the initialization.\nRun /bin/oapServiceNoInit.sh(.bat) to start in this mode.\n","excerpt":"Start up mode You may need different startup modes in different deployment tools, such as k8s. We …","ref":"/docs/main/v9.4.0/en/setup/backend/backend-start-up-mode/","title":"Start up mode"},{"body":"Start up mode You may need different startup modes in different deployment tools, such as k8s. We provide two additional optional startup modes.\nDefault mode The default mode carries out tasks to initialize as necessary, starts to listen, and provides services.\nRun /bin/oapService.sh(.bat) to start in this mode. This is also applicable when you\u0026rsquo;re using startup.sh(.bat) to start.\nInit mode In this mode, the OAP server starts up to carry out initialization and then exits. You could use this mode to initialize your storage (such as ElasticSearch indexes, MySQL, and TiDB tables) as well as your data.\nRun /bin/oapServiceInit.sh(.bat) to start in this mode.\nNo-init mode In this mode, the OAP server starts up without carrying out initialization. Rather, it watches out for the ElasticSearch indexes, MySQL, TiDB and other storage tables, starts listening and provides services. In other words, the OAP server would anticipate having another OAP server carrying out the initialization.\nRun /bin/oapServiceNoInit.sh(.bat) to start in this mode.\n","excerpt":"Start up mode You may need different startup modes in different deployment tools, such as k8s. We …","ref":"/docs/main/v9.5.0/en/setup/backend/backend-start-up-mode/","title":"Start up mode"},{"body":"Start up mode You may need different startup modes in different deployment tools, such as k8s. We provide two additional optional startup modes.\nDefault mode The default mode carries out tasks to initialize as necessary, starts to listen, and provides services.\nRun /bin/oapService.sh(.bat) to start in this mode. This is also applicable when you\u0026rsquo;re using startup.sh(.bat) to start.\nInit mode In this mode, the OAP server starts up to carry out initialization and then exits. You could use this mode to initialize your storage (such as ElasticSearch indexes, MySQL, and TiDB tables) as well as your data.\nRun /bin/oapServiceInit.sh(.bat) to start in this mode.\nNo-init mode In this mode, the OAP server starts up without carrying out initialization. Rather, it watches out for the ElasticSearch indexes, MySQL, TiDB and other storage tables, starts listening and provides services. In other words, the OAP server would anticipate having another OAP server carrying out the initialization.\nRun /bin/oapServiceNoInit.sh(.bat) to start in this mode.\n","excerpt":"Start up mode You may need different startup modes in different deployment tools, such as k8s. We …","ref":"/docs/main/v9.6.0/en/setup/backend/backend-start-up-mode/","title":"Start up mode"},{"body":"Start up mode You may need different startup modes in different deployment tools, such as k8s. We provide two additional optional startup modes.\nDefault mode The default mode carries out tasks to initialize as necessary, starts to listen, and provides services.\nRun /bin/oapService.sh(.bat) to start in this mode. This is also applicable when you\u0026rsquo;re using startup.sh(.bat) to start.\nInit mode In this mode, the OAP server starts up to carry out initialization and then exits. You could use this mode to initialize your storage (such as ElasticSearch indexes, MySQL, and TiDB tables) as well as your data.\nRun /bin/oapServiceInit.sh(.bat) to start in this mode.\nNo-init mode In this mode, the OAP server starts up without carrying out initialization. Rather, it watches out for the ElasticSearch indexes, MySQL, TiDB and other storage tables, starts listening and provides services. In other words, the OAP server would anticipate having another OAP server carrying out the initialization.\nRun /bin/oapServiceNoInit.sh(.bat) to start in this mode.\n","excerpt":"Start up mode You may need different startup modes in different deployment tools, such as k8s. We …","ref":"/docs/main/v9.7.0/en/setup/backend/backend-start-up-mode/","title":"Start up mode"},{"body":"Storage Usage In this example, you will learn how to use the Storage.\nInstall Operator Follow Operator installation instrument to install the operator.\nDefine Storage with default setting  sample.yaml(use the internal type)  apiVersion:operator.skywalking.apache.org/v1alpha1kind:Storagemetadata:name:samplespec:type:elasticsearchconnectType:internalversion:7.5.1instances:3image:docker.elastic.co/elasticsearch/elasticsearch:7.5.1security:user:secretName:defaulttls:truesample.yaml(use the external type)  apiVersion:operator.skywalking.apache.org/v1alpha1kind:Storagemetadata:name:samplespec:type:elasticsearchconnectType:externaladdress:\u0026#34;https://elasticsearch\u0026#34;security:user:secretName:defaultDeploy Storage  Deploy the Storage use the below command:  $ kubectl apply -f sample.yaml Check the Storage in Kubernetes:   If you deploy the storage with the internal type:  $ kubectl get storage NAME INSTANCES TYPE VERSION CONNECTTYPE sample 3 elasticsearch 7.5.1 internal  If you deploy the storage with the external type:  $ kubectl get storage NAME INSTANCES TYPE VERSION CONNECTTYPE sample elasticsearch 7.5.1 external Check the Statefulset in Kubernetes:  $ kubectl get statefulset NAME READY AGE sample-elasticsearch 3/3 7s Specify Storage Name in OAP server Here we modify the default OAP server configuration file,the new yaml file as follows:\napiVersion:operator.skywalking.apache.org/v1alpha1kind:OAPServermetadata:name:defaultspec:version:9.5.0instances:1image:apache/skywalking-oap-server:9.5.0service:template:type:ClusterIPstorage:name:sample Deploy the OAP server use the new yaml file:  $ kubectl apply -f oap.yaml Check the OAP server in Kubernetes:  $ kubectl get oapserver NAME INSTANCES RUNNING ADDRESS sample 1 1 sample-oap.default Check whether the pod generated by OAP server is running correctly.  $ kubectl get pod -l app=oap NAME READY STATUS RESTARTS AGE sample-oap-5bc79567b7-tkw6q 1/1 Running 0 6m31s ","excerpt":"Storage Usage In this example, you will learn how to use the Storage.\nInstall Operator Follow …","ref":"/docs/skywalking-swck/latest/examples/storage/","title":"Storage Usage"},{"body":"Storage Usage In this example, you will learn how to use the Storage.\nInstall Operator Follow Operator installation instrument to install the operator.\nDefine Storage with default setting  sample.yaml(use the internal type)  apiVersion:operator.skywalking.apache.org/v1alpha1kind:Storagemetadata:name:samplespec:type:elasticsearchconnectType:internalversion:7.5.1instances:3image:docker.elastic.co/elasticsearch/elasticsearch:7.5.1security:user:secretName:defaulttls:truesample.yaml(use the external type)  apiVersion:operator.skywalking.apache.org/v1alpha1kind:Storagemetadata:name:samplespec:type:elasticsearchconnectType:externaladdress:\u0026#34;https://elasticsearch\u0026#34;security:user:secretName:defaultDeploy Storage  Deploy the Storage use the below command:  $ kubectl apply -f sample.yaml Check the Storage in Kubernetes:   If you deploy the storage with the internal type:  $ kubectl get storage NAME INSTANCES TYPE VERSION CONNECTTYPE sample 3 elasticsearch 7.5.1 internal  If you deploy the storage with the external type:  $ kubectl get storage NAME INSTANCES TYPE VERSION CONNECTTYPE sample elasticsearch 7.5.1 external Check the Statefulset in Kubernetes:  $ kubectl get statefulset NAME READY AGE sample-elasticsearch 3/3 7s Specify Storage Name in OAP server Here we modify the default OAP server configuration file,the new yaml file as follows:\napiVersion:operator.skywalking.apache.org/v1alpha1kind:OAPServermetadata:name:defaultspec:version:9.5.0instances:1image:apache/skywalking-oap-server:9.5.0service:template:type:ClusterIPstorage:name:sample Deploy the OAP server use the new yaml file:  $ kubectl apply -f oap.yaml Check the OAP server in Kubernetes:  $ kubectl get oapserver NAME INSTANCES RUNNING ADDRESS sample 1 1 sample-oap.default Check whether the pod generated by OAP server is running correctly.  $ kubectl get pod -l app=oap NAME READY STATUS RESTARTS AGE sample-oap-5bc79567b7-tkw6q 1/1 Running 0 6m31s ","excerpt":"Storage Usage In this example, you will learn how to use the Storage.\nInstall Operator Follow …","ref":"/docs/skywalking-swck/next/examples/storage/","title":"Storage Usage"},{"body":"Storage Usage In this example, you will learn how to use the Storage.\nInstall Operator Follow Operator installation instrument to install the operator.\nDefine Storage with default setting  sample.yaml(use the internal type)  apiVersion:operator.skywalking.apache.org/v1alpha1kind:Storagemetadata:name:samplespec:type:elasticsearchconnectType:internalversion:7.5.1instances:3image:docker.elastic.co/elasticsearch/elasticsearch:7.5.1security:user:secretName:defaulttls:truesample.yaml(use the external type)  apiVersion:operator.skywalking.apache.org/v1alpha1kind:Storagemetadata:name:samplespec:type:elasticsearchconnectType:externaladdress:\u0026#34;https://elasticsearch\u0026#34;security:user:secretName:defaultDeploy Storage  Deploy the Storage use the below command:  $ kubectl apply -f sample.yaml Check the Storage in Kubernetes:   If you deploy the storage with the internal type:  $ kubectl get storage NAME INSTANCES TYPE VERSION CONNECTTYPE sample 3 elasticsearch 7.5.1 internal  If you deploy the storage with the external type:  $ kubectl get storage NAME INSTANCES TYPE VERSION CONNECTTYPE sample elasticsearch 7.5.1 external Check the Statefulset in Kubernetes:  $ kubectl get statefulset NAME READY AGE sample-elasticsearch 3/3 7s Specify Storage Name in OAP server Here we modify the default OAP server configuration file,the new yaml file as follows:\napiVersion:operator.skywalking.apache.org/v1alpha1kind:OAPServermetadata:name:defaultspec:version:9.5.0instances:1image:apache/skywalking-oap-server:9.5.0service:template:type:ClusterIPstorage:name:sample Deploy the OAP server use the new yaml file:  $ kubectl apply -f oap.yaml Check the OAP server in Kubernetes:  $ kubectl get oapserver NAME INSTANCES RUNNING ADDRESS sample 1 1 sample-oap.default Check whether the pod generated by OAP server is running correctly.  $ kubectl get pod -l app=oap NAME READY STATUS RESTARTS AGE sample-oap-5bc79567b7-tkw6q 1/1 Running 0 6m31s ","excerpt":"Storage Usage In this example, you will learn how to use the Storage.\nInstall Operator Follow …","ref":"/docs/skywalking-swck/v0.9.0/examples/storage/","title":"Storage Usage"},{"body":"Summary The SkyWalking Cloud on Kubernetes is proposed in order to:\n Managing and Monitoring Scaling backend cluster capacity up and down Changing backend cluster configuration Injecting configuration into the target cluster. Securing traffic between target clusters and backend cluster, or between backend cluster with TLS certificate  Motivation If the user of SkyWalking decided to deploy it into Kubernetes, there’re some critical challenges for them.\nFirst of them is the complex of deployment, it doesn’t only mean the OAP server and storage cluster, but also include configuring target cluster to send data to backend. Then they might struggle to keep all of them reliable. The size of the data transferred is very big and the cost of data stored is very high. The user usually faces some problems, for instance, OAP server stuck, Elasticsearch cluster GC rate sharply increases, the system load of some OAP instances is much more than others, and etc.\nWith the help of CRDs and the Controller, we can figure out the above problems and give users a more pleasing experience when using SWCK.\nProposal Production Design I proposed two crucial components for SWCK, backend operator and target injector. The first one intends to solve the problems of the backend operation, and another focus on simplifying the configuration of the target cluster.\nThey should be built as two separate binary/image, then are installed according to user’s requirements.\nBackend Operator The operator might be a GO application that manages and monitors other components, for example, OAP pods, storage pods(ES, MySQL, and etc.), ingress/entry and configuration.\nIt should be capable of HA, performance, and scalability.\nIt should also have the following capabilities:\n Defining CRDs for provisioning and configuring Provisioning backend automatically Splitting OAP instances according to their type(L1/L2), improving the ratio of them. Performance tuning of OAP and storage. Updating configuration dynamically, irrespectively it’s dynamic or not. Upgrading mirror version seamlessly. Health checking and failure recovery Collecting and analyzing metrics and logs, abnormal detection Horizontal scaling and scheduling tuning. Loadbalancing input gPRC stream and GraphQL querying. Supporting externally hosted storage service. Securing traffic  The above items should be accomplished in several versions/releases. The developer should sort the priority of them and grind the design.\nTarget injector The injector can inject agent lib and configuration into the target cluster automatically, enable/disable distributed tracing according to labels marked on resources or namespace.\nIt also integrates backend with service mesh platform, for example, Istio.\nIt should be a GO application and a GO lib to be invoked by swctl to generate pod YAMLs manually.\nTechnology Selection  Development Language: GO Operator dev tool: TBD Building tool: Make(Docker for windows) Installation: Helm3 chart Repository: github.com/apache/skywalking-swck CI: Github action  ","excerpt":"Summary The SkyWalking Cloud on Kubernetes is proposed in order to:\n Managing and Monitoring Scaling …","ref":"/docs/skywalking-swck/latest/design/proposal/","title":"Summary"},{"body":"Summary The SkyWalking Cloud on Kubernetes is proposed in order to:\n Managing and Monitoring Scaling backend cluster capacity up and down Changing backend cluster configuration Injecting configuration into the target cluster. Securing traffic between target clusters and backend cluster, or between backend cluster with TLS certificate  Motivation If the user of SkyWalking decided to deploy it into Kubernetes, there’re some critical challenges for them.\nFirst of them is the complex of deployment, it doesn’t only mean the OAP server and storage cluster, but also include configuring target cluster to send data to backend. Then they might struggle to keep all of them reliable. The size of the data transferred is very big and the cost of data stored is very high. The user usually faces some problems, for instance, OAP server stuck, Elasticsearch cluster GC rate sharply increases, the system load of some OAP instances is much more than others, and etc.\nWith the help of CRDs and the Controller, we can figure out the above problems and give users a more pleasing experience when using SWCK.\nProposal Production Design I proposed two crucial components for SWCK, backend operator and target injector. The first one intends to solve the problems of the backend operation, and another focus on simplifying the configuration of the target cluster.\nThey should be built as two separate binary/image, then are installed according to user’s requirements.\nBackend Operator The operator might be a GO application that manages and monitors other components, for example, OAP pods, storage pods(ES, MySQL, and etc.), ingress/entry and configuration.\nIt should be capable of HA, performance, and scalability.\nIt should also have the following capabilities:\n Defining CRDs for provisioning and configuring Provisioning backend automatically Splitting OAP instances according to their type(L1/L2), improving the ratio of them. Performance tuning of OAP and storage. Updating configuration dynamically, irrespectively it’s dynamic or not. Upgrading mirror version seamlessly. Health checking and failure recovery Collecting and analyzing metrics and logs, abnormal detection Horizontal scaling and scheduling tuning. Loadbalancing input gPRC stream and GraphQL querying. Supporting externally hosted storage service. Securing traffic  The above items should be accomplished in several versions/releases. The developer should sort the priority of them and grind the design.\nTarget injector The injector can inject agent lib and configuration into the target cluster automatically, enable/disable distributed tracing according to labels marked on resources or namespace.\nIt also integrates backend with service mesh platform, for example, Istio.\nIt should be a GO application and a GO lib to be invoked by swctl to generate pod YAMLs manually.\nTechnology Selection  Development Language: GO Operator dev tool: TBD Building tool: Make(Docker for windows) Installation: Helm3 chart Repository: github.com/apache/skywalking-swck CI: Github action  ","excerpt":"Summary The SkyWalking Cloud on Kubernetes is proposed in order to:\n Managing and Monitoring Scaling …","ref":"/docs/skywalking-swck/next/design/proposal/","title":"Summary"},{"body":"Summary The SkyWalking Cloud on Kubernetes is proposed in order to:\n Managing and Monitoring Scaling backend cluster capacity up and down Changing backend cluster configuration Injecting configuration into the target cluster. Securing traffic between target clusters and backend cluster, or between backend cluster with TLS certificate  Motivation If the user of SkyWalking decided to deploy it into Kubernetes, there’re some critical challenges for them.\nFirst of them is the complex of deployment, it doesn’t only mean the OAP server and storage cluster, but also include configuring target cluster to send data to backend. Then they might struggle to keep all of them reliable. The size of the data transferred is very big and the cost of data stored is very high. The user usually faces some problems, for instance, OAP server stuck, Elasticsearch cluster GC rate sharply increases, the system load of some OAP instances is much more than others, and etc.\nWith the help of CRDs and the Controller, we can figure out the above problems and give users a more pleasing experience when using SWCK.\nProposal Production Design I proposed two crucial components for SWCK, backend operator and target injector. The first one intends to solve the problems of the backend operation, and another focus on simplifying the configuration of the target cluster.\nThey should be built as two separate binary/image, then are installed according to user’s requirements.\nBackend Operator The operator might be a GO application that manages and monitors other components, for example, OAP pods, storage pods(ES, MySQL, and etc.), ingress/entry and configuration.\nIt should be capable of HA, performance, and scalability.\nIt should also have the following capabilities:\n Defining CRDs for provisioning and configuring Provisioning backend automatically Splitting OAP instances according to their type(L1/L2), improving the ratio of them. Performance tuning of OAP and storage. Updating configuration dynamically, irrespectively it’s dynamic or not. Upgrading mirror version seamlessly. Health checking and failure recovery Collecting and analyzing metrics and logs, abnormal detection Horizontal scaling and scheduling tuning. Loadbalancing input gPRC stream and GraphQL querying. Supporting externally hosted storage service. Securing traffic  The above items should be accomplished in several versions/releases. The developer should sort the priority of them and grind the design.\nTarget injector The injector can inject agent lib and configuration into the target cluster automatically, enable/disable distributed tracing according to labels marked on resources or namespace.\nIt also integrates backend with service mesh platform, for example, Istio.\nIt should be a GO application and a GO lib to be invoked by swctl to generate pod YAMLs manually.\nTechnology Selection  Development Language: GO Operator dev tool: TBD Building tool: Make(Docker for windows) Installation: Helm3 chart Repository: github.com/apache/skywalking-swck CI: Github action  ","excerpt":"Summary The SkyWalking Cloud on Kubernetes is proposed in order to:\n Managing and Monitoring Scaling …","ref":"/docs/skywalking-swck/v0.9.0/design/proposal/","title":"Summary"},{"body":"Support ActiveMQ classic Monitoring Motivation Apache ActiveMQ Classic is a popular and powerful open source messaging and Integration Patterns server. It supports many Cross Language Clients and Protocols, comes with easy to use Enterprise Integration Patterns and many advanced features.\nNow I want to add ActiveMQ Classic monitoring via the OpenTelemetry Collector which fetches metrics from jmx prometheus exporter run as a Java Agent.\nArchitecture Graph There is no significant architecture-level change.\nProposed Changes Apache ActiveMQ Classic has extensive support for JMX to allow you to monitor and control the behavior of the broker via the JMX MBeans.\nJmx prometheus exporter collects metrics data from ActiveMQ classic, this exporter is intended to be run as a Java Agent, exposing a HTTP server and serving metrics of the local JVM.\nUsing openTelemetry receiver to fetch these metrics to SkyWalking OAP server.\nActiveMQ Cluster Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     System Load Average Count meter_activemq_cluster_system_load_average The average system load, range:[0, 10000]. JMX Prometheus Exporter   Thread Count Count meter_activemq_cluster_thread_count Threads currently used by the JVM. JMX Prometheus Exporter   Init Heap Memory Usage Bytes meter_activemq_cluster_heap_memory_usage_init The initial amount of heap memory available. JMX Prometheus Exporter   Committed Heap Memory Usage Bytes meter_activemq_cluster_heap_memory_usage_committed The memory is guaranteed to be available for the JVM to use. JMX Prometheus Exporter   Used Heap Memory Usage Bytes meter_activemq_cluster_heap_memory_usage_used The amount of JVM heap memory currently in use. JMX Prometheus Exporter   Max Heap Memory Usage Bytes meter_activemq_cluster_heap_memory_usage_max The maximum possible size of the heap memory. JMX Prometheus Exporter   GC G1 Old Collection Count Count meter_activemq_cluster_gc_g1_old_collection_count The gc count of G1 Old Generation(JDK[9,17]). JMX Prometheus Exporter   GC G1 Young Collection Count Count meter_activemq_cluster_gc_g1_young_collection_count The gc count of G1 Young Generation(JDK[9,17]). JMX Prometheus Exporter   GC G1 Old Collection Time ms meter_activemq_cluster_gc_g1_old_collection_time The gc time spent in G1 Old Generation in milliseconds(JDK[9,17]). JMX Prometheus Exporter   GC G1 Young Collection Time ms meter_activemq_cluster_gc_g1_young_collection_time The gc time spent in G1 Young Generation in milliseconds(JDK[9,17]). JMX Prometheus Exporter   GC Parallel Old Collection Count Count meter_activemq_cluster_gc_parallel_old_collection_count The gc count of Parallel Old Generation(JDK[6,8]). JMX Prometheus Exporter   GC Parallel Young Collection Count Count meter_activemq_cluster_gc_parallel_young_collection_count The gc count of Parallel Young Generation(JDK[6,8]). JMX Prometheus Exporter   GC Parallel Old Collection Time ms meter_activemq_cluster_gc_parallel_old_collection_time The gc time spent in Parallel Old Generation in milliseconds(JDK[6,8]). JMX Prometheus Exporter   GC Parallel Young Collection Time ms meter_activemq_cluster_gc_parallel_young_collection_time The gc time spent in Parallel Young Generation in milliseconds(JDK[6,8]). JMX Prometheus Exporter   Enqueue Rate Count/s meter_activemq_cluster_enqueue_rate Number of messages that have been sent to the cluster per second(JDK[6,8]). JMX Prometheus Exporter   Dequeue Rate Count/s meter_activemq_cluster_dequeue_rate Number of messages that have been acknowledged or discarded on the cluster per second. JMX Prometheus Exporter   Dispatch Rate Count/s meter_activemq_cluster_dispatch_rate Number of messages that has been delivered to consumers per second. JMX Prometheus Exporter   Expired Rate Count/s meter_activemq_cluster_expired_rate Number of messages that have been expired per second. JMX Prometheus Exporter   Average Enqueue Time ms meter_activemq_cluster_average_enqueue_time The average time a message was held on this cluster. JMX Prometheus Exporter   Max Enqueue Time ms meter_activemq_cluster_max_enqueue_time The max time a message was held on this cluster. JMX Prometheus Exporter    ActiveMQ Broker Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Uptime sec meter_activemq_broker_uptime Uptime of the broker in day. JMX Prometheus Exporter   State  meter_activemq_broker_state If slave broker 1 else 0. JMX Prometheus Exporter   Current Connections Count meter_activemq_broker_current_connections The number of clients connected to the broker currently. JMX Prometheus Exporter   Current Producer Count Count meter_activemq_broker_current_producer_count The number of producers currently attached to the broker. JMX Prometheus Exporter   Current Consumer Count Count meter_activemq_broker_current_consumer_count The number of consumers consuming messages from the broker. JMX Prometheus Exporter   Producer Count Count meter_activemq_broker_producer_count Number of message producers active on destinations. JMX Prometheus Exporter   Consumer Count Count meter_activemq_broker_consumer_count Number of message consumers subscribed to destinations. JMX Prometheus Exporter   Enqueue Count Count meter_activemq_broker_enqueue_count The total number of messages sent to the broker. JMX Prometheus Exporter   Dequeue Count Count meter_activemq_broker_dequeue_count The total number of messages the broker has delivered to consumers. JMX Prometheus Exporter   Enqueue Rate Count/sec meter_activemq_broker_enqueue_rate The total number of messages sent to the broker per second. JMX Prometheus Exporter   Dequeue Rate Count/sec meter_activemq_broker_dequeue_rate The total number of messages the broker has delivered to consumers per second. JMX Prometheus Exporter   Memory Percent Usage % meter_activemq_broker_memory_percent_usage Percentage of configured memory used by the broker. JMX Prometheus Exporter   Memory Usage Bytes meter_activemq_broker_memory_percent_usage Memory used by undelivered messages in bytes. JMX Prometheus Exporter   Memory Limit Bytes meter_activemq_broker_memory_limit Memory limited used for holding undelivered messages before paging to temporary storage. JMX Prometheus Exporter   Store Percent Usage % meter_activemq_broker_store_percent_usage Percentage of available disk space used for persistent message storage. JMX Prometheus Exporter   Store Limit Bytes meter_activemq_broker_store_limit Disk limited used for persistent messages before producers are blocked. JMX Prometheus Exporter   Temp Percent Usage Bytes meter_activemq_broker_temp_percent_usage Percentage of available disk space used for non-persistent message storage. JMX Prometheus Exporter   Temp Limit Bytes meter_activemq_broker_temp_limit Disk limited used for non-persistent messages and temporary data before producers are blocked. JMX Prometheus Exporter   Average Message Size Bytes meter_activemq_broker_average_message_size Average message size on this broker. JMX Prometheus Exporter   Max Message Size Bytes meter_activemq_broker_max_message_size Max message size on this broker. JMX Prometheus Exporter   Queue Size Count meter_activemq_broker_queue_size Number of messages on this broker that have been dispatched but not acknowledged. JMX Prometheus Exporter    ActiveMQ Destination Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Producer Count Count meter_activemq_destination_producer_count Number of producers attached to this destination. JMX Prometheus Exporter   Consumer Count Count meter_activemq_destination_consumer_count Number of consumers subscribed to this destination. JMX Prometheus Exporter   Topic Consumer Count Count meter_activemq_destination_topic_consumer_count Number of consumers subscribed to the topics. JMX Prometheus Exporter   Queue Size Count meter_activemq_destination_queue_size The number of messages that have not been acknowledged by a consumer. JMX Prometheus Exporter   Memory Usage Bytes meter_activemq_destination_memory_usage Memory used by undelivered messages in bytes. JMX Prometheus Exporter   Memory Percent Usage % meter_activemq_destination_memory_percent_usage Percentage of configured memory used by the destination. JMX Prometheus Exporter   Enqueue Count Count meter_activemq_destination_enqueue_count The number of messages sent to the destination. JMX Prometheus Exporter   Dequeue Count Count meter_activemq_destination_dequeue_count The number of messages the destination has delivered to consumers. JMX Prometheus Exporter   Average Enqueue Time ms meter_activemq_destination_average_enqueue_time The average time a message was held on this destination. JMX Prometheus Exporter   Max Enqueue Time ms meter_activemq_destination_max_enqueue_time The max time a message was held on this destination. JMX Prometheus Exporter   Dispatch Count Count meter_activemq_destination_dispatch_count Number of messages that has been delivered to consumers. JMX Prometheus Exporter   Expired Count Count meter_activemq_destination_expired_count Number of messages that have been expired. JMX Prometheus Exporter   Inflight Count Count meter_activemq_destination_inflight_count Number of messages that have been dispatched to but not acknowledged by consumers. JMX Prometheus Exporter   Average Message Size Bytes meter_activemq_destination_average_message_size Average message size on this destination. JMX Prometheus Exporter   Max Message Size Bytes meter_activemq_destination_max_message_size Max message size on this destination. JMX Prometheus Exporter    Imported Dependencies libs and their licenses. No new dependency.\nCompatibility no breaking changes.\nGeneral usage docs ","excerpt":"Support ActiveMQ classic Monitoring Motivation Apache ActiveMQ Classic is a popular and powerful …","ref":"/docs/main/next/en/swip/swip-8/","title":"Support ActiveMQ classic Monitoring"},{"body":"Support available layers of service in the topology. Motivation UI could jump to the service dashboard and query service hierarchy from the topology node. For now topology node includes name and ID but without layer, as the service could have multiple layers, the limitation is that it is only works on the current layer which the topology represents:\n UI could not jump into another layer\u0026rsquo;s dashboard of the service. UI could not query the service hierarchy from the topology node if the node is not in current layer.  Here are typical use cases: should have a chance to jump into another layer\u0026rsquo;s dashboard of the service:\n In the mesh topology, mesh(layer MESH) and mesh-dp(layer MESH_DP) share a similar topology, one node will have two layers. In the mesh topology, agent(layer GENERAL) + virtual database(layer VIRTUAL_DATABASE), the node is in different layers.  Both of these two cases have hybrid layer topology. If we could support that, we could have a better x-layer interaction.\nArchitecture Graph There is no significant architecture-level change.\nPropose Changes Add the layers info into topology node:\n When building the topology node fetch the layers info from the service according to the service id. Return layers info in the Node when query the topology.  Imported Dependencies libs and their licenses. No new library is planned to be added to the codebase.\nCompatibility About the protocol, there should be no breaking changes, but enhancements only. New field layers is going to be added to the Node in the query protocol topology.graphqls.\ntypeNode{# The service ID of the node.id:ID!# The literal name of the #id.name:String!# The type name may be# 1. The service provider/middleware tech, such as: Tomcat, SpringMVC# 2. Conjectural Service, e.g. MySQL, Redis, Kafkatype:String# It is a conjecture node or real node, to represent a service or endpoint.isReal:Boolean!# The layers of the service.layers:[String!]!}General usage docs This proposal doesn\u0026rsquo;t impact the end user in any way of using SkyWalking. The remarkable change will be in the UI topology map, users could jump into the proper layer\u0026rsquo;s service dashboard and query the service hierarchy from the topology node.\n","excerpt":"Support available layers of service in the topology. Motivation UI could jump to the service …","ref":"/docs/main/next/en/swip/swip-4/","title":"Support available layers of service in the topology."},{"body":"Support ClickHouse Monitoring Motivation ClickHouse is a high-performance, column-oriented SQL database management system (DBMS) for online analytical processing (OLAP). It is available as both an open-source software and a cloud offering.\nNow I want to add ClickHouse monitoring via the OpenTelemetry Collector which fetches metrics from it\u0026rsquo;s own HTTP endpoint to expose metrics data for Prometheus (since ClickHouse v20.1.2.4). Clickhouse Exporter used only for old ClickHouse versions, modern versions have embedded prometheus endpoint.\nArchitecture Graph There is no significant architecture-level change.\nProposed Changes ClickHouse expose own metrics via HTTP endpoint to opentelemetry collector, using skyWalking openTelemetry receiver to fetch these metrics.\nThe exposed metrics are from the system.metrics table / the system.events table / the system.asynchronous_metrics table.\nClickHouse Instance Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     CpuUsage count meter_clickhouse_instance_cpu_usage CPU time spent seen by OS per second(according to ClickHouse.system.dashboard.CPU Usage (cores)). ClickHouse   MemoryUsage percentage meter_clickhouse_instance_memory_usage Total amount of memory (bytes) allocated by the server/ total amount of OS memory. ClickHouse   MemoryAvailable percentage meter_clickhouse_instance_memory_available Total amount of memory (bytes) available for program / total amount of OS memory. ClickHouse   Uptime sec meter_clickhouse_instance_uptime The server uptime in seconds. It includes the time spent for server initialization before accepting connections. ClickHouse   Version string meter_clickhouse_instance_version Version of the server in a single integer number in base-1000. ClickHouse   FileOpen count meter_clickhouse_instance_file_open Number of files opened. ClickHouse    ClickHouse Network Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     TcpConnections count meter_clickhouse_instance_tcp_connectionsmeter_clickhouse_tcp_connections Number of connections to TCP server. ClickHouse   MysqlConnections count meter_clickhouse_instance_mysql_connectionsmeter_clickhouse_mysql_connections Number of client connections using MySQL protocol. ClickHouse   HttpConnections count meter_clickhouse_instance_http_connectionsmeter_clickhouse_mysql_connections Number of connections to HTTP server. ClickHouse   InterserverConnections count meter_clickhouse_instance_interserver_connectionsmeter_clickhouse_interserver_connections Number of connections from other replicas to fetch parts. ClickHouse   PostgresqlConnections count meter_clickhouse_instance_postgresql_connectionsmeter_clickhouse_postgresql_connections Number of client connections using PostgreSQL protocol. ClickHouse   ReceiveBytes bytes meter_clickhouse_instance_network_receive_bytesmeter_clickhouse_network_receive_bytes Total number of bytes received from network. ClickHouse   SendBytes bytes meter_clickhouse_instance_network_send_bytesmeter_clickhouse_network_send_bytes Total number of bytes send to network. ClickHouse    ClickHouse Query Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     QueryCount count meter_clickhouse_instance_querymeter_clickhouse_query Number of executing queries. ClickHouse   SelectQueryCount count meter_clickhouse_instance_query_selectmeter_clickhouse_query_select Number of executing queries, but only for SELECT queries. ClickHouse   InsertQueryCount count meter_clickhouse_instance_query_insertmeter_clickhouse_query_insert Number of executing queries, but only for INSERT queries. ClickHouse   SelectQueryRate count/sec meter_clickhouse_instance_query_select_ratemeter_clickhouse_query_select_rate Number of SELECT queries per second. ClickHouse   InsertQueryRate count/sec meter_clickhouse_instance_query_insert_ratemeter_clickhouse_query_insert_rate Number of INSERT queries per second. ClickHouse   Querytime microsec meter_clickhouse_instance_querytime_microsecondsmeter_clickhouse_querytime_microseconds Total time of all queries. ClickHouse   SelectQuerytime microsec meter_clickhouse_instance_querytime_select_microsecondsmeter_clickhouse_querytime_select_microseconds Total time of SELECT queries. ClickHouse   InsertQuerytime microsec meter_clickhouse_instance_querytime_insert_microsecondsmeter_clickhouse_querytime_insert_microseconds Total time of INSERT queries. ClickHouse   OtherQuerytime microsec meter_clickhouse_instance_querytime_other_microsecondsmeter_clickhouse_querytime_other_microseconds Total time of queries that are not SELECT or INSERT. ClickHouse   QuerySlowCount count meter_clickhouse_instance_query_slowmeter_clickhouse_query_slow Number of reads from a file that were slow. ClickHouse    ClickHouse Insertion Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     InsertQueryCount count meter_clickhouse_instance_query_insertmeter_clickhouse_query_insert Number of executing queries, but only for INSERT queries. ClickHouse   InsertedRowCount count meter_clickhouse_instance_inserted_rowsmeter_clickhouse_inserted_rows Number of rows INSERTed to all tables. ClickHouse   InsertedBytes bytes meter_clickhouse_instance_inserted_bytesmeter_clickhouse_inserted_bytes Number of bytes INSERTed to all tables. ClickHouse   DelayedInsertCount count meter_clickhouse_instance_delayed_insertmeter_clickhouse_delayed_insert Number of times the INSERT of a block to a MergeTree table was throttled due to high number of active data parts for partition. ClickHouse    ClickHouse Replicas Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     ReplicatedChecks count meter_clickhouse_instance_replicated_checksmeter_clickhouse_replicated_checks Number of data parts checking for consistency. ClickHouse   ReplicatedFetch count meter_clickhouse_instance_replicated_fetchmeter_clickhouse_replicated_fetch Number of data parts being fetched from replica. ClickHouse   ReplicatedSend count meter_clickhouse_instance_replicated_sendmeter_clickhouse_replicated_send Number of data parts being sent to replicas. ClickHouse    ClickHouse MergeTree Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     BackgroundMergeCount count meter_clickhouse_instance_background_mergemeter_clickhouse_background_merge Number of executing background merges. ClickHouse   MergeRows count meter_clickhouse_instance_merge_rowsmeter_clickhouse_merge_rows Rows read for background merges. This is the number of rows before merge. ClickHouse   MergeUncompressedBytes bytes meter_clickhouse_instance_merge_uncompressed_bytesmeter_clickhouse_merge_uncompressed_bytes Uncompressed bytes (for columns as they stored in memory) that was read for background merges. This is the number before merge. ClickHouse   MoveCount count meter_clickhouse_instance_movemeter_clickhouse_move Number of currently executing moves. ClickHouse   PartsActive Count meter_clickhouse_instance_parts_activemeter_clickhouse_parts_active Active data part, used by current and upcoming SELECTs. ClickHouse   MutationsCount count meter_clickhouse_instance_mutationsmeter_clickhouse_mutations Number of mutations (ALTER DELETE/UPDATE). ClickHouse    ClickHouse Kafka Table Engine Supported Metrics When table engine works with Apache Kafka.\nKafka lets you:\n Publish or subscribe to data flows. Organize fault-tolerant storage. Process streams as they become available.     Monitoring Panel Unit Metric Name Description Data Source     KafkaMessagesRead count meter_clickhouse_instance_kafka_messages_readmeter_clickhouse_kafka_messages_read Number of Kafka messages already processed by ClickHouse. ClickHouse   KafkaWrites count meter_clickhouse_instance_kafka_writesmeter_clickhouse_kafka_writes Number of writes (inserts) to Kafka tables. ClickHouse   KafkaConsumers count meter_clickhouse_instance_kafka_consumersmeter_clickhouse_kafka_consumers Number of active Kafka consumers. ClickHouse   KafkProducers count meter_clickhouse_instance_kafka_producersmeter_clickhouse_kafka_producers Number of active Kafka producer created. ClickHouse    ClickHouse ZooKeeper Supported Metrics ClickHouse uses ZooKeeper for storing metadata of replicas when using replicated tables. If replicated tables are not used, this section of parameters can be omitted.\n   Monitoring Panel Unit Metric Name Description Data Source     ZookeeperSession count meter_clickhouse_instance_zookeeper_sessionmeter_clickhouse_zookeeper_session Number of sessions (connections) to ZooKeeper. ClickHouse   ZookeeperWatch count meter_clickhouse_instance_zookeeper_watchmeter_clickhouse_zookeeper_watch Number of watches (event subscriptions) in ZooKeeper. ClickHouse   ZookeeperBytesSent bytes meter_clickhouse_instance_zookeeper_bytes_sentmeter_clickhouse_zookeeper_bytes_sent Number of bytes send over network while communicating with ZooKeeper. ClickHouse   ZookeeperBytesReceive bytes meter_clickhouse_instance_zookeeper_bytes_receivedmeter_clickhouse_zookeeper_bytes_received Number of bytes send over network while communicating with ZooKeeper. ClickHouse    ClickHouse Keeper Supported Metrics ClickHouse Keeper provides the coordination system for data replication and distributed DDL queries execution. ClickHouse Keeper is compatible with ZooKeeper.\nClickHouse Keeper can work in embedded mode or standalone cluster mode, the metrics below are for embedded mode.\n   Monitoring Panel Unit Metric Name Description Data Source     KeeperAliveConnections count meter_clickhouse_instance_keeper_connections_alivemeter_clickhouse_keeper_connections_alive Number of alive connections for embedded ClickHouse Keeper. ClickHouse   KeeperOutstandingRequets count meter_clickhouse_instance_keeper_outstanding_requestsmeter_clickhouse_keeper_outstanding_requests Number of outstanding requests for embedded ClickHouse Keeper. ClickHouse    Imported Dependencies libs and their licenses. No new dependency.\nCompatibility no breaking changes.\nGeneral usage docs ","excerpt":"Support ClickHouse Monitoring Motivation ClickHouse is a high-performance, column-oriented SQL …","ref":"/docs/main/next/en/swip/swip-5/","title":"Support ClickHouse Monitoring"},{"body":"Support custom enhance Here is an optional plugin apm-customize-enhance-plugin\nIntroduce SkyWalking has provided Java agent plugin development guide to help developers to build new plugin.\nThis plugin is not designed for replacement but for user convenience. The behaviour is very similar with @Trace toolkit, but without code change requirement, and more powerful, such as provide tag and log.\nHow to configure Implementing enhancements to custom classes requires two steps.\n Active the plugin, move the optional-plugins/apm-customize-enhance-plugin.jar to plugin/apm-customize-enhance-plugin.jar. Set plugin.customize.enhance_file in agent.config, which targets to rule file, such as /absolute/path/to/customize_enhance.xml. Set enhancement rules in customize_enhance.xml. \u0026lt;?xml version=\u0026#34;1.0\u0026#34; encoding=\u0026#34;UTF-8\u0026#34;?\u0026gt; \u0026lt;enhanced\u0026gt; \u0026lt;class class_name=\u0026#34;test.apache.skywalking.testcase.customize.service.TestService1\u0026#34;\u0026gt; \u0026lt;method method=\u0026#34;staticMethod()\u0026#34; operation_name=\u0026#34;/is_static_method\u0026#34; static=\u0026#34;true\u0026#34;/\u0026gt; \u0026lt;method method=\u0026#34;staticMethod(java.lang.String,int.class,java.util.Map,java.util.List,[Ljava.lang.Object;)\u0026#34; operation_name=\u0026#34;/is_static_method_args\u0026#34; static=\u0026#34;true\u0026#34;\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0]\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[1]\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[3].[0]\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;tag key=\u0026#34;tag_1\u0026#34;\u0026gt;arg[2].[\u0026#39;k1\u0026#39;]\u0026lt;/tag\u0026gt; \u0026lt;tag key=\u0026#34;tag_2\u0026#34;\u0026gt;arg[4].[1]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_1\u0026#34;\u0026gt;arg[4].[2]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;method()\u0026#34; static=\u0026#34;false\u0026#34;/\u0026gt; \u0026lt;method method=\u0026#34;method(java.lang.String,int.class)\u0026#34; operation_name=\u0026#34;/method_2\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0]\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;tag key=\u0026#34;tag_1\u0026#34;\u0026gt;arg[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_1\u0026#34;\u0026gt;arg[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;method(test.apache.skywalking.testcase.customize.model.Model0,java.lang.String,int.class)\u0026#34; operation_name=\u0026#34;/method_3\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0].id\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0].model1.name\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0].model1.getId()\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;tag key=\u0026#34;tag_os\u0026#34;\u0026gt;arg[0].os.[1]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;arg[0].getM().[\u0026#39;k1\u0026#39;]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retString(java.lang.String)\u0026#34; operation_name=\u0026#34;/retString\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retModel0(test.apache.skywalking.apm.testcase.customize.model.Model0)\u0026#34; operation_name=\u0026#34;/retModel0\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj.model1.id\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj.model1.getId()\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;/class\u0026gt; \u0026lt;class class_name=\u0026#34;test.apache.skywalking.testcase.customize.service.TestService2\u0026#34;\u0026gt; \u0026lt;method method=\u0026#34;staticMethod(java.lang.String,int.class)\u0026#34; operation_name=\u0026#34;/is_2_static_method\u0026#34; static=\u0026#34;true\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_2_1\u0026#34;\u0026gt;arg[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_1_1\u0026#34;\u0026gt;arg[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;method([Ljava.lang.Object;)\u0026#34; operation_name=\u0026#34;/method_4\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_4_1\u0026#34;\u0026gt;arg[0].[0]\u0026lt;/tag\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;method(java.util.List,int.class)\u0026#34; operation_name=\u0026#34;/method_5\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_5_1\u0026#34;\u0026gt;arg[0].[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_5_1\u0026#34;\u0026gt;arg[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retArray([Ljava.lang.Object;)\u0026#34; operation_name=\u0026#34;/retArray\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj.[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj.[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retList(java.util.List)\u0026#34; operation_name=\u0026#34;/retList\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj.[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj.[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retMap(java.util.Map)\u0026#34; operation_name=\u0026#34;/retMap\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj.[\u0026#39;k1\u0026#39;]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj.[\u0026#39;k2\u0026#39;]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;/class\u0026gt; \u0026lt;/enhanced\u0026gt;    Explanation of the configuration in the file    configuration explanation     class_name The enhanced class   method The interceptor method of the class   operation_name If fill it out, will use it instead of the default operation_name.   operation_name_suffix What it means adding dynamic data after the operation_name.   static Is this method static.   tag Will add a tag in local span. The value of key needs to be represented on the XML node.   log Will add a log in local span. The value of key needs to be represented on the XML node.   arg[x] What it means is to get the input arguments. such as arg[0] is means get first arguments.   .[x] When the parsing object is Array or List, you can use it to get the object at the specified index.   .[\u0026lsquo;key\u0026rsquo;] When the parsing object is Map, you can get the map \u0026lsquo;key\u0026rsquo; through it.   returnedObj What it means is to get the return value.      ","excerpt":"Support custom enhance Here is an optional plugin apm-customize-enhance-plugin\nIntroduce SkyWalking …","ref":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/customize-enhance-trace/","title":"Support custom enhance"},{"body":"Support custom enhance Here is an optional plugin apm-customize-enhance-plugin\nIntroduce SkyWalking has provided Java agent plugin development guide to help developers to build new plugin.\nThis plugin is not designed for replacement but for user convenience. The behaviour is very similar with @Trace toolkit, but without code change requirement, and more powerful, such as provide tag and log.\nHow to configure Implementing enhancements to custom classes requires two steps.\n Active the plugin, move the optional-plugins/apm-customize-enhance-plugin.jar to plugin/apm-customize-enhance-plugin.jar. Set plugin.customize.enhance_file in agent.config, which targets to rule file, such as /absolute/path/to/customize_enhance.xml. Set enhancement rules in customize_enhance.xml. \u0026lt;?xml version=\u0026#34;1.0\u0026#34; encoding=\u0026#34;UTF-8\u0026#34;?\u0026gt; \u0026lt;enhanced\u0026gt; \u0026lt;class class_name=\u0026#34;test.apache.skywalking.testcase.customize.service.TestService1\u0026#34;\u0026gt; \u0026lt;method method=\u0026#34;staticMethod()\u0026#34; operation_name=\u0026#34;/is_static_method\u0026#34; static=\u0026#34;true\u0026#34;/\u0026gt; \u0026lt;method method=\u0026#34;staticMethod(java.lang.String,int.class,java.util.Map,java.util.List,[Ljava.lang.Object;)\u0026#34; operation_name=\u0026#34;/is_static_method_args\u0026#34; static=\u0026#34;true\u0026#34;\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0]\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[1]\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[3].[0]\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;tag key=\u0026#34;tag_1\u0026#34;\u0026gt;arg[2].[\u0026#39;k1\u0026#39;]\u0026lt;/tag\u0026gt; \u0026lt;tag key=\u0026#34;tag_2\u0026#34;\u0026gt;arg[4].[1]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_1\u0026#34;\u0026gt;arg[4].[2]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;method()\u0026#34; static=\u0026#34;false\u0026#34;/\u0026gt; \u0026lt;method method=\u0026#34;method(java.lang.String,int.class)\u0026#34; operation_name=\u0026#34;/method_2\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0]\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;tag key=\u0026#34;tag_1\u0026#34;\u0026gt;arg[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_1\u0026#34;\u0026gt;arg[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;method(test.apache.skywalking.testcase.customize.model.Model0,java.lang.String,int.class)\u0026#34; operation_name=\u0026#34;/method_3\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0].id\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0].model1.name\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0].model1.getId()\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;tag key=\u0026#34;tag_os\u0026#34;\u0026gt;arg[0].os.[1]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;arg[0].getM().[\u0026#39;k1\u0026#39;]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retString(java.lang.String)\u0026#34; operation_name=\u0026#34;/retString\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retModel0(test.apache.skywalking.apm.testcase.customize.model.Model0)\u0026#34; operation_name=\u0026#34;/retModel0\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj.model1.id\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj.model1.getId()\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;/class\u0026gt; \u0026lt;class class_name=\u0026#34;test.apache.skywalking.testcase.customize.service.TestService2\u0026#34;\u0026gt; \u0026lt;method method=\u0026#34;staticMethod(java.lang.String,int.class)\u0026#34; operation_name=\u0026#34;/is_2_static_method\u0026#34; static=\u0026#34;true\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_2_1\u0026#34;\u0026gt;arg[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_1_1\u0026#34;\u0026gt;arg[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;method([Ljava.lang.Object;)\u0026#34; operation_name=\u0026#34;/method_4\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_4_1\u0026#34;\u0026gt;arg[0].[0]\u0026lt;/tag\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;method(java.util.List,int.class)\u0026#34; operation_name=\u0026#34;/method_5\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_5_1\u0026#34;\u0026gt;arg[0].[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_5_1\u0026#34;\u0026gt;arg[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retArray([Ljava.lang.Object;)\u0026#34; operation_name=\u0026#34;/retArray\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj.[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj.[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retList(java.util.List)\u0026#34; operation_name=\u0026#34;/retList\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj.[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj.[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retMap(java.util.Map)\u0026#34; operation_name=\u0026#34;/retMap\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj.[\u0026#39;k1\u0026#39;]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj.[\u0026#39;k2\u0026#39;]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;/class\u0026gt; \u0026lt;/enhanced\u0026gt;    Explanation of the configuration in the file    configuration explanation     class_name The enhanced class   method The interceptor method of the class   operation_name If fill it out, will use it instead of the default operation_name.   operation_name_suffix What it means adding dynamic data after the operation_name.   static Is this method static.   tag Will add a tag in local span. The value of key needs to be represented on the XML node.   log Will add a log in local span. The value of key needs to be represented on the XML node.   arg[x] What it means is to get the input arguments. such as arg[0] is means get first arguments.   .[x] When the parsing object is Array or List, you can use it to get the object at the specified index.   .[\u0026lsquo;key\u0026rsquo;] When the parsing object is Map, you can get the map \u0026lsquo;key\u0026rsquo; through it.   returnedObj What it means is to get the return value.      ","excerpt":"Support custom enhance Here is an optional plugin apm-customize-enhance-plugin\nIntroduce SkyWalking …","ref":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/customize-enhance-trace/","title":"Support custom enhance"},{"body":"Support custom enhance Here is an optional plugin apm-customize-enhance-plugin\nIntroduce SkyWalking has provided Java agent plugin development guide to help developers to build new plugin.\nThis plugin is not designed for replacement but for user convenience. The behaviour is very similar with @Trace toolkit, but without code change requirement, and more powerful, such as provide tag and log.\nHow to configure Implementing enhancements to custom classes requires two steps.\n Active the plugin, move the optional-plugins/apm-customize-enhance-plugin.jar to plugin/apm-customize-enhance-plugin.jar. Set plugin.customize.enhance_file in agent.config, which targets to rule file, such as /absolute/path/to/customize_enhance.xml. Set enhancement rules in customize_enhance.xml. \u0026lt;?xml version=\u0026#34;1.0\u0026#34; encoding=\u0026#34;UTF-8\u0026#34;?\u0026gt; \u0026lt;enhanced\u0026gt; \u0026lt;class class_name=\u0026#34;test.apache.skywalking.testcase.customize.service.TestService1\u0026#34;\u0026gt; \u0026lt;method method=\u0026#34;staticMethod()\u0026#34; operation_name=\u0026#34;/is_static_method\u0026#34; static=\u0026#34;true\u0026#34;/\u0026gt; \u0026lt;method method=\u0026#34;staticMethod(java.lang.String,int.class,java.util.Map,java.util.List,[Ljava.lang.Object;)\u0026#34; operation_name=\u0026#34;/is_static_method_args\u0026#34; static=\u0026#34;true\u0026#34;\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0]\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[1]\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[3].[0]\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;tag key=\u0026#34;tag_1\u0026#34;\u0026gt;arg[2].[\u0026#39;k1\u0026#39;]\u0026lt;/tag\u0026gt; \u0026lt;tag key=\u0026#34;tag_2\u0026#34;\u0026gt;arg[4].[1]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_1\u0026#34;\u0026gt;arg[4].[2]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;method()\u0026#34; static=\u0026#34;false\u0026#34;/\u0026gt; \u0026lt;method method=\u0026#34;method(java.lang.String,int.class)\u0026#34; operation_name=\u0026#34;/method_2\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0]\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;tag key=\u0026#34;tag_1\u0026#34;\u0026gt;arg[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_1\u0026#34;\u0026gt;arg[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;method(test.apache.skywalking.testcase.customize.model.Model0,java.lang.String,int.class)\u0026#34; operation_name=\u0026#34;/method_3\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0].id\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0].model1.name\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0].model1.getId()\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;tag key=\u0026#34;tag_os\u0026#34;\u0026gt;arg[0].os.[1]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;arg[0].getM().[\u0026#39;k1\u0026#39;]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retString(java.lang.String)\u0026#34; operation_name=\u0026#34;/retString\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retModel0(test.apache.skywalking.apm.testcase.customize.model.Model0)\u0026#34; operation_name=\u0026#34;/retModel0\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj.model1.id\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj.model1.getId()\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;/class\u0026gt; \u0026lt;class class_name=\u0026#34;test.apache.skywalking.testcase.customize.service.TestService2\u0026#34;\u0026gt; \u0026lt;method method=\u0026#34;staticMethod(java.lang.String,int.class)\u0026#34; operation_name=\u0026#34;/is_2_static_method\u0026#34; static=\u0026#34;true\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_2_1\u0026#34;\u0026gt;arg[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_1_1\u0026#34;\u0026gt;arg[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;method([Ljava.lang.Object;)\u0026#34; operation_name=\u0026#34;/method_4\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_4_1\u0026#34;\u0026gt;arg[0].[0]\u0026lt;/tag\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;method(java.util.List,int.class)\u0026#34; operation_name=\u0026#34;/method_5\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_5_1\u0026#34;\u0026gt;arg[0].[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_5_1\u0026#34;\u0026gt;arg[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retArray([Ljava.lang.Object;)\u0026#34; operation_name=\u0026#34;/retArray\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj.[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj.[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retList(java.util.List)\u0026#34; operation_name=\u0026#34;/retList\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj.[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj.[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retMap(java.util.Map)\u0026#34; operation_name=\u0026#34;/retMap\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj.[\u0026#39;k1\u0026#39;]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj.[\u0026#39;k2\u0026#39;]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;/class\u0026gt; \u0026lt;/enhanced\u0026gt;    Explanation of the configuration in the file    configuration explanation     class_name The enhanced class   method The interceptor method of the class   operation_name If fill it out, will use it instead of the default operation_name.   operation_name_suffix What it means adding dynamic data after the operation_name.   static Is this method static.   tag Will add a tag in local span. The value of key needs to be represented on the XML node.   log Will add a log in local span. The value of key needs to be represented on the XML node.   arg[x] What it means is to get the input arguments. such as arg[0] is means get first arguments.   .[x] When the parsing object is Array or List, you can use it to get the object at the specified index.   .[\u0026lsquo;key\u0026rsquo;] When the parsing object is Map, you can get the map \u0026lsquo;key\u0026rsquo; through it.   returnedObj What it means is to get the return value.      ","excerpt":"Support custom enhance Here is an optional plugin apm-customize-enhance-plugin\nIntroduce SkyWalking …","ref":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/customize-enhance-trace/","title":"Support custom enhance"},{"body":"Support custom enhance Here is an optional plugin apm-customize-enhance-plugin\nIntroduce SkyWalking has provided Java agent plugin development guide to help developers to build new plugin.\nThis plugin is not designed for replacement but for user convenience. The behaviour is very similar with @Trace toolkit, but without code change requirement, and more powerful, such as provide tag and log.\nHow to configure Implementing enhancements to custom classes requires two steps.\n Active the plugin, move the optional-plugins/apm-customize-enhance-plugin.jar to plugin/apm-customize-enhance-plugin.jar. Set plugin.customize.enhance_file in agent.config, which targets to rule file, such as /absolute/path/to/customize_enhance.xml. Set enhancement rules in customize_enhance.xml. \u0026lt;?xml version=\u0026#34;1.0\u0026#34; encoding=\u0026#34;UTF-8\u0026#34;?\u0026gt; \u0026lt;enhanced\u0026gt; \u0026lt;class class_name=\u0026#34;test.apache.skywalking.testcase.customize.service.TestService1\u0026#34;\u0026gt; \u0026lt;method method=\u0026#34;staticMethod()\u0026#34; operation_name=\u0026#34;/is_static_method\u0026#34; static=\u0026#34;true\u0026#34;/\u0026gt; \u0026lt;method method=\u0026#34;staticMethod(java.lang.String,int.class,java.util.Map,java.util.List,[Ljava.lang.Object;)\u0026#34; operation_name=\u0026#34;/is_static_method_args\u0026#34; static=\u0026#34;true\u0026#34;\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0]\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[1]\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[3].[0]\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;tag key=\u0026#34;tag_1\u0026#34;\u0026gt;arg[2].[\u0026#39;k1\u0026#39;]\u0026lt;/tag\u0026gt; \u0026lt;tag key=\u0026#34;tag_2\u0026#34;\u0026gt;arg[4].[1]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_1\u0026#34;\u0026gt;arg[4].[2]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;method()\u0026#34; static=\u0026#34;false\u0026#34;/\u0026gt; \u0026lt;method method=\u0026#34;method(java.lang.String,int.class)\u0026#34; operation_name=\u0026#34;/method_2\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0]\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;tag key=\u0026#34;tag_1\u0026#34;\u0026gt;arg[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_1\u0026#34;\u0026gt;arg[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;method(test.apache.skywalking.testcase.customize.model.Model0,java.lang.String,int.class)\u0026#34; operation_name=\u0026#34;/method_3\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0].id\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0].model1.name\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0].model1.getId()\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;tag key=\u0026#34;tag_os\u0026#34;\u0026gt;arg[0].os.[1]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;arg[0].getM().[\u0026#39;k1\u0026#39;]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retString(java.lang.String)\u0026#34; operation_name=\u0026#34;/retString\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retModel0(test.apache.skywalking.apm.testcase.customize.model.Model0)\u0026#34; operation_name=\u0026#34;/retModel0\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj.model1.id\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj.model1.getId()\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;/class\u0026gt; \u0026lt;class class_name=\u0026#34;test.apache.skywalking.testcase.customize.service.TestService2\u0026#34;\u0026gt; \u0026lt;method method=\u0026#34;staticMethod(java.lang.String,int.class)\u0026#34; operation_name=\u0026#34;/is_2_static_method\u0026#34; static=\u0026#34;true\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_2_1\u0026#34;\u0026gt;arg[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_1_1\u0026#34;\u0026gt;arg[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;method([Ljava.lang.Object;)\u0026#34; operation_name=\u0026#34;/method_4\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_4_1\u0026#34;\u0026gt;arg[0].[0]\u0026lt;/tag\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;method(java.util.List,int.class)\u0026#34; operation_name=\u0026#34;/method_5\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_5_1\u0026#34;\u0026gt;arg[0].[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_5_1\u0026#34;\u0026gt;arg[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retArray([Ljava.lang.Object;)\u0026#34; operation_name=\u0026#34;/retArray\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj.[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj.[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retList(java.util.List)\u0026#34; operation_name=\u0026#34;/retList\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj.[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj.[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retMap(java.util.Map)\u0026#34; operation_name=\u0026#34;/retMap\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj.[\u0026#39;k1\u0026#39;]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj.[\u0026#39;k2\u0026#39;]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;/class\u0026gt; \u0026lt;/enhanced\u0026gt;    Explanation of the configuration in the file    configuration explanation     class_name The enhanced class   method The interceptor method of the class   operation_name If fill it out, will use it instead of the default operation_name.   operation_name_suffix What it means adding dynamic data after the operation_name.   static Is this method static.   tag Will add a tag in local span. The value of key needs to be represented on the XML node.   log Will add a log in local span. The value of key needs to be represented on the XML node.   arg[x] What it means is to get the input arguments. such as arg[0] is means get first arguments.   .[x] When the parsing object is Array or List, you can use it to get the object at the specified index.   .[\u0026lsquo;key\u0026rsquo;] When the parsing object is Map, you can get the map \u0026lsquo;key\u0026rsquo; through it.   returnedObj What it means is to get the return value.      ","excerpt":"Support custom enhance Here is an optional plugin apm-customize-enhance-plugin\nIntroduce SkyWalking …","ref":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/customize-enhance-trace/","title":"Support custom enhance"},{"body":"Support custom enhance Here is an optional plugin apm-customize-enhance-plugin\nIntroduce SkyWalking has provided Java agent plugin development guide to help developers to build new plugin.\nThis plugin is not designed for replacement but for user convenience. The behaviour is very similar with @Trace toolkit, but without code change requirement, and more powerful, such as provide tag and log.\nHow to configure Implementing enhancements to custom classes requires two steps.\n Active the plugin, move the optional-plugins/apm-customize-enhance-plugin.jar to plugin/apm-customize-enhance-plugin.jar. Set plugin.customize.enhance_file in agent.config, which targets to rule file, such as /absolute/path/to/customize_enhance.xml. Set enhancement rules in customize_enhance.xml. \u0026lt;?xml version=\u0026#34;1.0\u0026#34; encoding=\u0026#34;UTF-8\u0026#34;?\u0026gt; \u0026lt;enhanced\u0026gt; \u0026lt;class class_name=\u0026#34;test.apache.skywalking.testcase.customize.service.TestService1\u0026#34;\u0026gt; \u0026lt;method method=\u0026#34;staticMethod()\u0026#34; operation_name=\u0026#34;/is_static_method\u0026#34; static=\u0026#34;true\u0026#34;/\u0026gt; \u0026lt;method method=\u0026#34;staticMethod(java.lang.String,int.class,java.util.Map,java.util.List,[Ljava.lang.Object;)\u0026#34; operation_name=\u0026#34;/is_static_method_args\u0026#34; static=\u0026#34;true\u0026#34;\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0]\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[1]\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[3].[0]\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;tag key=\u0026#34;tag_1\u0026#34;\u0026gt;arg[2].[\u0026#39;k1\u0026#39;]\u0026lt;/tag\u0026gt; \u0026lt;tag key=\u0026#34;tag_2\u0026#34;\u0026gt;arg[4].[1]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_1\u0026#34;\u0026gt;arg[4].[2]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;method()\u0026#34; static=\u0026#34;false\u0026#34;/\u0026gt; \u0026lt;method method=\u0026#34;method(java.lang.String,int.class)\u0026#34; operation_name=\u0026#34;/method_2\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0]\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;tag key=\u0026#34;tag_1\u0026#34;\u0026gt;arg[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_1\u0026#34;\u0026gt;arg[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;method(test.apache.skywalking.testcase.customize.model.Model0,java.lang.String,int.class)\u0026#34; operation_name=\u0026#34;/method_3\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0].id\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0].model1.name\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0].model1.getId()\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;tag key=\u0026#34;tag_os\u0026#34;\u0026gt;arg[0].os.[1]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;arg[0].getM().[\u0026#39;k1\u0026#39;]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retString(java.lang.String)\u0026#34; operation_name=\u0026#34;/retString\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retModel0(test.apache.skywalking.apm.testcase.customize.model.Model0)\u0026#34; operation_name=\u0026#34;/retModel0\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj.model1.id\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj.model1.getId()\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;/class\u0026gt; \u0026lt;class class_name=\u0026#34;test.apache.skywalking.testcase.customize.service.TestService2\u0026#34;\u0026gt; \u0026lt;method method=\u0026#34;staticMethod(java.lang.String,int.class)\u0026#34; operation_name=\u0026#34;/is_2_static_method\u0026#34; static=\u0026#34;true\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_2_1\u0026#34;\u0026gt;arg[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_1_1\u0026#34;\u0026gt;arg[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;method([Ljava.lang.Object;)\u0026#34; operation_name=\u0026#34;/method_4\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_4_1\u0026#34;\u0026gt;arg[0].[0]\u0026lt;/tag\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;method(java.util.List,int.class)\u0026#34; operation_name=\u0026#34;/method_5\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_5_1\u0026#34;\u0026gt;arg[0].[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_5_1\u0026#34;\u0026gt;arg[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retArray([Ljava.lang.Object;)\u0026#34; operation_name=\u0026#34;/retArray\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj.[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj.[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retList(java.util.List)\u0026#34; operation_name=\u0026#34;/retList\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj.[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj.[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retMap(java.util.Map)\u0026#34; operation_name=\u0026#34;/retMap\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj.[\u0026#39;k1\u0026#39;]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj.[\u0026#39;k2\u0026#39;]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;/class\u0026gt; \u0026lt;/enhanced\u0026gt;    Explanation of the configuration in the file    configuration explanation     class_name The enhanced class   method The interceptor method of the class   operation_name If fill it out, will use it instead of the default operation_name.   operation_name_suffix What it means adding dynamic data after the operation_name.   static Is this method static.   tag Will add a tag in local span. The value of key needs to be represented on the XML node.   log Will add a log in local span. The value of key needs to be represented on the XML node.   arg[x] What it means is to get the input arguments. such as arg[0] is means get first arguments.   .[x] When the parsing object is Array or List, you can use it to get the object at the specified index.   .[\u0026lsquo;key\u0026rsquo;] When the parsing object is Map, you can get the map \u0026lsquo;key\u0026rsquo; through it.   returnedObj What it means is to get the return value.      ","excerpt":"Support custom enhance Here is an optional plugin apm-customize-enhance-plugin\nIntroduce SkyWalking …","ref":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/customize-enhance-trace/","title":"Support custom enhance"},{"body":"Support custom trace ignore Here is an optional plugin apm-trace-ignore-plugin\nNotice: Sampling still works when the trace ignores plug-in activation.\nIntroduce  The purpose of this plugin is to filter endpoint which are expected to be ignored by the tracing system. You can setup multiple URL path patterns, The endpoints match these patterns wouldn\u0026rsquo;t be traced. The current matching rules follow Ant Path match style , like /path/*, /path/**, /path/?. Copy apm-trace-ignore-plugin-x.jar to agent/plugins, restarting the agent can effect the plugin.  How to configure There are two ways to configure ignore patterns. Settings through system env has higher priority.\n Set through the system environment variable,you need to add skywalking.trace.ignore_path to the system variables, the value is the path that you need to ignore, multiple paths should be separated by , Create file named as apm-trace-ignore-plugin.config in /agent/config/ dir, and add rules to filter traces  trace.ignore_path=/your/path/1/**,/your/path/2/** ","excerpt":"Support custom trace ignore Here is an optional plugin apm-trace-ignore-plugin\nNotice: Sampling …","ref":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/agent-optional-plugins/trace-ignore-plugin/","title":"Support custom trace ignore"},{"body":"Support custom trace ignore Here is an optional plugin apm-trace-ignore-plugin\nNotice: Sampling still works when the trace ignores plug-in activation.\nIntroduce  The purpose of this plugin is to filter endpoint which are expected to be ignored by the tracing system. You can setup multiple URL path patterns, The endpoints match these patterns wouldn\u0026rsquo;t be traced. The current matching rules follow Ant Path match style , like /path/*, /path/**, /path/?. Copy apm-trace-ignore-plugin-x.jar to agent/plugins, restarting the agent can effect the plugin.  How to configure There are two ways to configure ignore patterns. Settings through system env has higher priority.\n Set through the system environment variable,you need to add skywalking.trace.ignore_path to the system variables, the value is the path that you need to ignore, multiple paths should be separated by , Create file named as apm-trace-ignore-plugin.config in /agent/config/ dir, and add rules to filter traces  trace.ignore_path=/your/path/1/**,/your/path/2/** ","excerpt":"Support custom trace ignore Here is an optional plugin apm-trace-ignore-plugin\nNotice: Sampling …","ref":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/agent-optional-plugins/trace-ignore-plugin/","title":"Support custom trace ignore"},{"body":"Support custom trace ignore Here is an optional plugin apm-trace-ignore-plugin\nNotice: Sampling still works when the trace ignores plug-in activation.\nIntroduce  The purpose of this plugin is to filter endpoint which are expected to be ignored by the tracing system. You can setup multiple URL path patterns, The endpoints match these patterns wouldn\u0026rsquo;t be traced. The current matching rules follow Ant Path match style , like /path/*, /path/**, /path/?. Copy apm-trace-ignore-plugin-x.jar to agent/plugins, restarting the agent can effect the plugin.  How to configure There are two ways to configure ignore patterns. Settings through system env has higher priority.\n Set through the system environment variable,you need to add skywalking.trace.ignore_path to the system variables, the value is the path that you need to ignore, multiple paths should be separated by , Create file named as apm-trace-ignore-plugin.config in /agent/config/ dir, and add rules to filter traces  trace.ignore_path=/your/path/1/**,/your/path/2/** ","excerpt":"Support custom trace ignore Here is an optional plugin apm-trace-ignore-plugin\nNotice: Sampling …","ref":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/agent-optional-plugins/trace-ignore-plugin/","title":"Support custom trace ignore"},{"body":"Support custom trace ignore Here is an optional plugin apm-trace-ignore-plugin\nNotice: Sampling still works when the trace ignores plug-in activation.\nIntroduce  The purpose of this plugin is to filter endpoint which are expected to be ignored by the tracing system. You can setup multiple URL path patterns, The endpoints match these patterns wouldn\u0026rsquo;t be traced. The current matching rules follow Ant Path match style , like /path/*, /path/**, /path/?. Copy apm-trace-ignore-plugin-x.jar to agent/plugins, restarting the agent can effect the plugin.  How to configure There are two ways to configure ignore patterns. Settings through system env has higher priority.\n Set through the system environment variable,you need to add skywalking.trace.ignore_path to the system variables, the value is the path that you need to ignore, multiple paths should be separated by , Create file named as apm-trace-ignore-plugin.config in /agent/config/ dir, and add rules to filter traces  trace.ignore_path=/your/path/1/**,/your/path/2/** ","excerpt":"Support custom trace ignore Here is an optional plugin apm-trace-ignore-plugin\nNotice: Sampling …","ref":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/agent-optional-plugins/trace-ignore-plugin/","title":"Support custom trace ignore"},{"body":"Support custom trace ignore Here is an optional plugin apm-trace-ignore-plugin\nNotice: Sampling still works when the trace ignores plug-in activation.\nIntroduce  The purpose of this plugin is to filter endpoint which are expected to be ignored by the tracing system. You can setup multiple URL path patterns, The endpoints match these patterns wouldn\u0026rsquo;t be traced. The current matching rules follow Ant Path match style , like /path/*, /path/**, /path/?. Copy apm-trace-ignore-plugin-x.jar to agent/plugins, restarting the agent can effect the plugin.  How to configure There are two ways to configure ignore patterns. Settings through system env has higher priority.\n Set through the system environment variable,you need to add skywalking.trace.ignore_path to the system variables, the value is the path that you need to ignore, multiple paths should be separated by , Create file named as apm-trace-ignore-plugin.config in /agent/config/ dir, and add rules to filter traces  trace.ignore_path=/your/path/1/**,/your/path/2/** ","excerpt":"Support custom trace ignore Here is an optional plugin apm-trace-ignore-plugin\nNotice: Sampling …","ref":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/agent-optional-plugins/trace-ignore-plugin/","title":"Support custom trace ignore"},{"body":"Support RocketMQ Monitoring Motivation RocketMQ is a cloud native messaging and streaming platform, making it simple to build event-driven applications. Now that Skywalking can monitor OpenTelemetry metrics, I want to add RocketMQ monitoring via the OpenTelemetry Collector, which fetches metrics from the RocketMQ Exporter\nArchitecture Graph There is no significant architecture-level change.\nProposed Changes rocketmq-exporter collects metrics from RocketMQ and transport the data to OpenTelemetry collector, using SkyWalking openTelemetry receiver to receive these metrics。 Provide cluster, broker, and topic dimensions monitoring.\nRocketMQ Cluster Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Messages Produced Today Count meter_rocketmq_cluster_messages_produced_today The number of cluster messages produced today. RocketMQ Exporter   Messages Consumed Today Count meter_rocketmq_cluster_messages_consumed_today The number of cluster messages consumed today. RocketMQ Exporter   Total Producer Tps Msg/sec meter_rocketmq_cluster_total_producer_tps The number of messages produced per second. RocketMQ Exporter   Total Consume Tps Msg/sec meter_rocketmq_cluster_total_consumer_tps The number of messages consumed per second. RocketMQ Exporter   Producer Message Size Bytes/sec meter_rocketmq_cluster_producer_message_size The max size of a message produced per second. RocketMQ Exporter   Consumer Message Size Bytes/sec meter_rocketmq_cluster_consumer_message_size The max size of the consumed message per second. RocketMQ Exporter   Messages Produced Until Yesterday Count meter_rocketmq_cluster_messages_produced_until_yesterday The total number of messages put until 12 o\u0026rsquo;clock last night. RocketMQ Exporter   Messages Consumed Until Yesterday Count meter_rocketmq_cluster_messages_consumed_until_yesterday The total number of messages read until 12 o\u0026rsquo;clock last night. RocketMQ Exporter   Max Consumer Latency ms meter_rocketmq_cluster_max_consumer_latency The max number of consumer latency. RocketMQ Exporter   Max CommitLog Disk Ratio % meter_rocketmq_cluster_max_commitLog_disk_ratio The max utilization ratio of the commit log disk. RocketMQ Exporter   CommitLog Disk Ratio % meter_rocketmq_cluster_commitLog_disk_ratio The utilization ratio of the commit log disk per broker IP. RocketMQ Exporter   Pull ThreadPool Queue Head Wait Time ms meter_rocketmq_cluster_pull_threadPool_queue_head_wait_time The wait time in milliseconds for pulling threadPool queue per broker IP. RocketMQ Exporter   Send ThreadPool Queue Head Wait Time ms meter_rocketmq_cluster_send_threadPool_queue_head_wait_time The wait time in milliseconds for sending threadPool queue per broker IP. RocketMQ Exporter   Topic Count Count meter_rocketmq_cluster_topic_count The number of topics that received messages from the producer. RocketMQ Exporter   Broker Count Count meter_rocketmq_cluster_broker_count The number of brokers that received messages from the producer. RocketMQ Exporter    RocketMQ Broker Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Produce TPS Msg/sec meter_rocketmq_broker_produce_tps The number of broker produces messages per second. RocketMQ Exporter   Consume QPS Msg/sec meter_rocketmq_broker_consume_qps The number of broker consumes messages per second. RocketMQ Exporter   Producer Message Size Bytes/sec meter_rocketmq_broker_producer_message_size The max size of the messages produced per second. RocketMQ Exporter   Consumer Message Size Bytes/sec meter_rocketmq_broker_consumer_message_size The max size of the messages consumed per second. RocketMQ Exporter    RocketMQ Topic Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Max Producer Message Size Byte meter_rocketmq_topic_max_producer_message_size The maximum number of messages produced. RocketMQ Exporter   Max Consumer Message Size Byte meter_rocketmq_topic_max_consumer_message_size The maximum number of messages consumed. RocketMQ Exporter   Consumer Latency ms meter_rocketmq_topic_consumer_latency Consumption delay time of a consumer group. RocketMQ Exporter   Producer Tps Msg/sec meter_rocketmq_topic_producer_tps The number of messages produced per second. RocketMQ Exporter   Consumer Group Tps Msg/sec meter_rocketmq_topic_consumer_group_tps The number of messages consumed per second per consumer group. RocketMQ Exporter   Producer Offset Count meter_rocketmq_topic_producer_offset The max progress of a topic\u0026rsquo;s production message. RocketMQ Exporter   Consumer Group Offset Count meter_rocketmq_topic_consumer_group_offset The max progress of a topic\u0026rsquo;s consumption message per consumer group. RocketMQ Exporter   Producer Message Size Byte/sec meter_rocketmq_topic_producer_message_size The max size of messages produced per second. RocketMQ Exporter   Consumer Message Size Byte/sec meter_rocketmq_topic_consumer_message_size The max size of messages consumed per second. RocketMQ Exporter   Consumer Group_Count Count meter_rocketmq_topic_consumer_group_count The number of consumer groups. RocketMQ Exporter   Broker Count Count meter_rocketmq_topic_broker_count The number of topics that received messages from the producer. RocketMQ Exporter    Imported Dependencies libs and their licenses. No new dependency.\nCompatibility no breaking changes.\nGeneral usage docs This feature is out of the box.\n","excerpt":"Support RocketMQ Monitoring Motivation RocketMQ is a cloud native messaging and streaming platform, …","ref":"/docs/main/next/en/swip/swip-3/","title":"Support RocketMQ Monitoring"},{"body":"Support Transport Layer Security (TLS) Transport Layer Security (TLS) is a very common security way when transport data through Internet. In some use cases, end users report the background:\nCreating SSL/TLS Certificates The first step is to generate certificates and key files for encrypting communication. This is fairly straightforward: use openssl from the command line.\nUse this script if you are not familiar with how to generate key files.\nWe need the following files:\n client.pem: A private RSA key to sign and authenticate the public key. It\u0026rsquo;s either a PKCS#8(PEM) or PKCS#1(DER). client.crt: Self-signed X.509 public keys for distribution. ca.crt: A certificate authority public key for a client to validate the server\u0026rsquo;s certificate.  Authentication Mode  Find ca.crt, and use it at client side. In mTLS mode, client.crt and client.pem are required at client side. Find server.crt, server.pem and ca.crt. Use them at server side. Please refer to gRPC Security of the OAP server doc for more details.  Enable TLS  Enable (m)TLS on the OAP server side, read more on this documentation. Following the configuration to enable (m)TLS on the agent side.     Name Environment Variable Required Type Description     reporter.grpc.tls.enable SW_AGENT_REPORTER_GRPC_TLS_ENABLE TLS/mTLS Enable (m)TLS on the gRPC reporter.   reporter.grpc.tls.ca_path SW_AGENT_REPORTER_GRPC_TLS_CA_PATH TLS The path of the CA certificate file. eg: /path/to/ca.cert.   reporter.grpc.tls.client.key_path SW_AGENT_REPORTER_GRPC_TLS_CLIENT_KEY_PATH mTLS The path of the client private key file, eg: /path/to/client.pem.   reporter.grpc.tls.client.client_cert_chain_path SW_AGENT_REPORTER_GRPC_TLS_CLIENT_CERT_CHAIN_PATH mTLS The path of the client certificate file, eg: /path/to/client.crt.   reporter.grpc.tls.insecure_skip_verify SW_AGENT_REPORTER_GRPC_TLS_INSECURE_SKIP_VERIFY TLS/mTLS Skip the server certificate and domain name verification.    ","excerpt":"Support Transport Layer Security (TLS) Transport Layer Security (TLS) is a very common security way …","ref":"/docs/skywalking-go/latest/en/advanced-features/grpc-tls/","title":"Support Transport Layer Security (TLS)"},{"body":"Support Transport Layer Security (TLS) Transport Layer Security (TLS) is a very common security way when transport data through Internet. In some use cases, end users report the background:\nCreating SSL/TLS Certificates The first step is to generate certificates and key files for encrypting communication. This is fairly straightforward: use openssl from the command line.\nUse this script if you are not familiar with how to generate key files.\nWe need the following files:\n client.pem: A private RSA key to sign and authenticate the public key. It\u0026rsquo;s either a PKCS#8(PEM) or PKCS#1(DER). client.crt: Self-signed X.509 public keys for distribution. ca.crt: A certificate authority public key for a client to validate the server\u0026rsquo;s certificate.  Authentication Mode  Find ca.crt, and use it at client side. In mTLS mode, client.crt and client.pem are required at client side. Find server.crt, server.pem and ca.crt. Use them at server side. Please refer to gRPC Security of the OAP server doc for more details.  Enable TLS  Enable (m)TLS on the OAP server side, read more on this documentation. Following the configuration to enable (m)TLS on the agent side.     Name Environment Variable Required Type Description     reporter.grpc.tls.enable SW_AGENT_REPORTER_GRPC_TLS_ENABLE TLS/mTLS Enable (m)TLS on the gRPC reporter.   reporter.grpc.tls.ca_path SW_AGENT_REPORTER_GRPC_TLS_CA_PATH TLS The path of the CA certificate file. eg: /path/to/ca.cert.   reporter.grpc.tls.client.key_path SW_AGENT_REPORTER_GRPC_TLS_CLIENT_KEY_PATH mTLS The path of the client private key file, eg: /path/to/client.pem.   reporter.grpc.tls.client.client_cert_chain_path SW_AGENT_REPORTER_GRPC_TLS_CLIENT_CERT_CHAIN_PATH mTLS The path of the client certificate file, eg: /path/to/client.crt.   reporter.grpc.tls.insecure_skip_verify SW_AGENT_REPORTER_GRPC_TLS_INSECURE_SKIP_VERIFY TLS/mTLS Skip the server certificate and domain name verification.    ","excerpt":"Support Transport Layer Security (TLS) Transport Layer Security (TLS) is a very common security way …","ref":"/docs/skywalking-go/next/en/advanced-features/grpc-tls/","title":"Support Transport Layer Security (TLS)"},{"body":"Support Transport Layer Security (TLS) Transport Layer Security (TLS) is a very common security way when transport data through Internet. In some use cases, end users report the background:\nCreating SSL/TLS Certificates The first step is to generate certificates and key files for encrypting communication. This is fairly straightforward: use openssl from the command line.\nUse this script if you are not familiar with how to generate key files.\nWe need the following files:\n client.pem: A private RSA key to sign and authenticate the public key. It\u0026rsquo;s either a PKCS#8(PEM) or PKCS#1(DER). client.crt: Self-signed X.509 public keys for distribution. ca.crt: A certificate authority public key for a client to validate the server\u0026rsquo;s certificate.  Authentication Mode  Find ca.crt, and use it at client side. In mTLS mode, client.crt and client.pem are required at client side. Find server.crt, server.pem and ca.crt. Use them at server side. Please refer to gRPC Security of the OAP server doc for more details.  Enable TLS  Enable (m)TLS on the OAP server side, read more on this documentation. Following the configuration to enable (m)TLS on the agent side.     Name Environment Variable Required Type Description     reporter.grpc.tls.enable SW_AGENT_REPORTER_GRPC_TLS_ENABLE TLS/mTLS Enable (m)TLS on the gRPC reporter.   reporter.grpc.tls.ca_path SW_AGENT_REPORTER_GRPC_TLS_CA_PATH TLS The path of the CA certificate file. eg: /path/to/ca.cert.   reporter.grpc.tls.client.key_path SW_AGENT_REPORTER_GRPC_TLS_CLIENT_KEY_PATH mTLS The path of the client private key file, eg: /path/to/client.pem.   reporter.grpc.tls.client.client_cert_chain_path SW_AGENT_REPORTER_GRPC_TLS_CLIENT_CERT_CHAIN_PATH mTLS The path of the client certificate file, eg: /path/to/client.crt.   reporter.grpc.tls.insecure_skip_verify SW_AGENT_REPORTER_GRPC_TLS_INSECURE_SKIP_VERIFY TLS/mTLS Skip the server certificate and domain name verification.    ","excerpt":"Support Transport Layer Security (TLS) Transport Layer Security (TLS) is a very common security way …","ref":"/docs/skywalking-go/v0.4.0/en/advanced-features/grpc-tls/","title":"Support Transport Layer Security (TLS)"},{"body":"Support Transport Layer Security (TLS) Transport Layer Security (TLS) is a very common security way when transport data through Internet. In some use cases, end users report the background:\n Target(under monitoring) applications are in a region, which also named VPC, at the same time, the SkyWalking backend is in another region (VPC).\nBecause of that, security requirement is very obvious.\n Creating SSL/TLS Certificates The first step is to generate certificates and key files for encrypting communication. This is fairly straightforward: use openssl from the command line.\nUse this script if you are not familiar with how to generate key files.\nWe need the following files:\n client.pem: A private RSA key to sign and authenticate the public key. It\u0026rsquo;s either a PKCS#8(PEM) or PKCS#1(DER). client.crt: Self-signed X.509 public keys for distribution. ca.crt: A certificate authority public key for a client to validate the server\u0026rsquo;s certificate.  Authentication Mode  Find ca.crt, and use it at client side. In mTLS mode, client.crt and client.pem are required at client side. Find server.crt, server.pem and ca.crt. Use them at server side. Please refer to gRPC Security of the OAP server doc for more details.  Open and config TLS Agent config  Agent enables TLS automatically after the ca.crt(by default /ca folder in agent package) file is detected. TLS with no CA mode could be activated by this setting.  agent.force_tls=${SW_AGENT_FORCE_TLS:true} Enable mutual TLS  Sharing gRPC server must be started with mTLS enabled. More details can be found in receiver-sharing-server section in application.yaml. Please refer to gRPC Security and gRPC/HTTP server for receiver. Copy CA certificate, certificate and private key of client into agent/ca. Configure client-side SSL/TLS in agent.conf. Change SW_AGENT_COLLECTOR_BACKEND_SERVICES targeting to host and port of receiver-sharing-server.  For example:\nagent.force_tls=${SW_AGENT_FORCE_TLS:true} agent.ssl_trusted_ca_path=${SW_AGENT_SSL_TRUSTED_CA_PATH:/ca/ca.crt} agent.ssl_key_path=${SW_AGENT_SSL_KEY_PATH:/ca/client.pem} agent.ssl_cert_chain_path=${SW_AGENT_SSL_CERT_CHAIN_PATH:/ca/client.crt} collector.backend_service=${SW_AGENT_COLLECTOR_BACKEND_SERVICES:skywalking-oap:11801} Notice, the client-side\u0026rsquo;s certificate and the private key are from the same CA certificate with server-side.\n","excerpt":"Support Transport Layer Security (TLS) Transport Layer Security (TLS) is a very common security way …","ref":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/tls/","title":"Support Transport Layer Security (TLS)"},{"body":"Support Transport Layer Security (TLS) Transport Layer Security (TLS) is a very common security way when transport data through Internet. In some use cases, end users report the background:\n Target(under monitoring) applications are in a region, which also named VPC, at the same time, the SkyWalking backend is in another region (VPC).\nBecause of that, security requirement is very obvious.\n Creating SSL/TLS Certificates The first step is to generate certificates and key files for encrypting communication. This is fairly straightforward: use openssl from the command line.\nUse this script if you are not familiar with how to generate key files.\nWe need the following files:\n client.pem: A private RSA key to sign and authenticate the public key. It\u0026rsquo;s either a PKCS#8(PEM) or PKCS#1(DER). client.crt: Self-signed X.509 public keys for distribution. ca.crt: A certificate authority public key for a client to validate the server\u0026rsquo;s certificate.  Authentication Mode  Find ca.crt, and use it at client side. In mTLS mode, client.crt and client.pem are required at client side. Find server.crt, server.pem and ca.crt. Use them at server side. Please refer to gRPC Security of the OAP server doc for more details.  Open and config TLS Agent config  Agent enables TLS automatically after the ca.crt(by default /ca folder in agent package) file is detected. TLS with no CA mode could be activated by this setting.  agent.force_tls=${SW_AGENT_FORCE_TLS:true} Enable mutual TLS  Sharing gRPC server must be started with mTLS enabled. More details can be found in receiver-sharing-server section in application.yaml. Please refer to gRPC Security and gRPC/HTTP server for receiver. Copy CA certificate, certificate and private key of client into agent/ca. Configure client-side SSL/TLS in agent.conf. Change SW_AGENT_COLLECTOR_BACKEND_SERVICES targeting to host and port of receiver-sharing-server.  For example:\nagent.force_tls=${SW_AGENT_FORCE_TLS:true} agent.ssl_trusted_ca_path=${SW_AGENT_SSL_TRUSTED_CA_PATH:/ca/ca.crt} agent.ssl_key_path=${SW_AGENT_SSL_KEY_PATH:/ca/client.pem} agent.ssl_cert_chain_path=${SW_AGENT_SSL_CERT_CHAIN_PATH:/ca/client.crt} collector.backend_service=${SW_AGENT_COLLECTOR_BACKEND_SERVICES:skywalking-oap:11801} Notice, the client-side\u0026rsquo;s certificate and the private key are from the same CA certificate with server-side.\n","excerpt":"Support Transport Layer Security (TLS) Transport Layer Security (TLS) is a very common security way …","ref":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/tls/","title":"Support Transport Layer Security (TLS)"},{"body":"Support Transport Layer Security (TLS) Transport Layer Security (TLS) is a very common security way when transport data through Internet. In some use cases, end users report the background:\n Target(under monitoring) applications are in a region, which also named VPC, at the same time, the SkyWalking backend is in another region (VPC).\nBecause of that, security requirement is very obvious.\n Creating SSL/TLS Certificates The first step is to generate certificates and key files for encrypting communication. This is fairly straightforward: use openssl from the command line.\nUse this script if you are not familiar with how to generate key files.\nWe need the following files:\n client.pem: A private RSA key to sign and authenticate the public key. It\u0026rsquo;s either a PKCS#8(PEM) or PKCS#1(DER). client.crt: Self-signed X.509 public keys for distribution. ca.crt: A certificate authority public key for a client to validate the server\u0026rsquo;s certificate.  Authentication Mode  Find ca.crt, and use it at client side. In mTLS mode, client.crt and client.pem are required at client side. Find server.crt, server.pem and ca.crt. Use them at server side. Please refer to gRPC Security of the OAP server doc for more details.  Open and config TLS Agent config  Agent enables TLS automatically after the ca.crt(by default /ca folder in agent package) file is detected. TLS with no CA mode could be activated by this setting.  agent.force_tls=${SW_AGENT_FORCE_TLS:true} Enable mutual TLS  Sharing gRPC server must be started with mTLS enabled. More details can be found in receiver-sharing-server section in application.yaml. Please refer to gRPC Security and gRPC/HTTP server for receiver. Copy CA certificate, certificate and private key of client into agent/ca. Configure client-side SSL/TLS in agent.conf. Change SW_AGENT_COLLECTOR_BACKEND_SERVICES targeting to host and port of receiver-sharing-server.  For example:\nagent.force_tls=${SW_AGENT_FORCE_TLS:true} agent.ssl_trusted_ca_path=${SW_AGENT_SSL_TRUSTED_CA_PATH:/ca/ca.crt} agent.ssl_key_path=${SW_AGENT_SSL_KEY_PATH:/ca/client.pem} agent.ssl_cert_chain_path=${SW_AGENT_SSL_CERT_CHAIN_PATH:/ca/client.crt} collector.backend_service=${SW_AGENT_COLLECTOR_BACKEND_SERVICES:skywalking-oap:11801} Notice, the client-side\u0026rsquo;s certificate and the private key are from the same CA certificate with server-side.\n","excerpt":"Support Transport Layer Security (TLS) Transport Layer Security (TLS) is a very common security way …","ref":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/tls/","title":"Support Transport Layer Security (TLS)"},{"body":"Support Transport Layer Security (TLS) Transport Layer Security (TLS) is a very common security way when transport data through Internet. In some use cases, end users report the background:\n Target(under monitoring) applications are in a region, which also named VPC, at the same time, the SkyWalking backend is in another region (VPC).\nBecause of that, security requirement is very obvious.\n Creating SSL/TLS Certificates The first step is to generate certificates and key files for encrypting communication. This is fairly straightforward: use openssl from the command line.\nUse this script if you are not familiar with how to generate key files.\nWe need the following files:\n client.pem: A private RSA key to sign and authenticate the public key. It\u0026rsquo;s either a PKCS#8(PEM) or PKCS#1(DER). client.crt: Self-signed X.509 public keys for distribution. ca.crt: A certificate authority public key for a client to validate the server\u0026rsquo;s certificate.  Authentication Mode  Find ca.crt, and use it at client side. In mTLS mode, client.crt and client.pem are required at client side. Find server.crt, server.pem and ca.crt. Use them at server side. Please refer to gRPC Security of the OAP server doc for more details.  Open and config TLS Agent config  Agent enables TLS automatically after the ca.crt(by default /ca folder in agent package) file is detected. TLS with no CA mode could be activated by this setting.  agent.force_tls=${SW_AGENT_FORCE_TLS:true} Enable mutual TLS  Sharing gRPC server must be started with mTLS enabled. More details can be found in receiver-sharing-server section in application.yaml. Please refer to gRPC Security and gRPC/HTTP server for receiver. Copy CA certificate, certificate and private key of client into agent/ca. Configure client-side SSL/TLS in agent.conf. Change SW_AGENT_COLLECTOR_BACKEND_SERVICES targeting to host and port of receiver-sharing-server.  For example:\nagent.force_tls=${SW_AGENT_FORCE_TLS:true} agent.ssl_trusted_ca_path=${SW_AGENT_SSL_TRUSTED_CA_PATH:/ca/ca.crt} agent.ssl_key_path=${SW_AGENT_SSL_KEY_PATH:/ca/client.pem} agent.ssl_cert_chain_path=${SW_AGENT_SSL_CERT_CHAIN_PATH:/ca/client.crt} collector.backend_service=${SW_AGENT_COLLECTOR_BACKEND_SERVICES:skywalking-oap:11801} Notice, the client-side\u0026rsquo;s certificate and the private key are from the same CA certificate with server-side.\n","excerpt":"Support Transport Layer Security (TLS) Transport Layer Security (TLS) is a very common security way …","ref":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/tls/","title":"Support Transport Layer Security (TLS)"},{"body":"Support Transport Layer Security (TLS) Transport Layer Security (TLS) is a very common security way when transport data through Internet. In some use cases, end users report the background:\n Target(under monitoring) applications are in a region, which also named VPC, at the same time, the SkyWalking backend is in another region (VPC).\nBecause of that, security requirement is very obvious.\n Creating SSL/TLS Certificates The first step is to generate certificates and key files for encrypting communication. This is fairly straightforward: use openssl from the command line.\nUse this script if you are not familiar with how to generate key files.\nWe need the following files:\n client.pem: A private RSA key to sign and authenticate the public key. It\u0026rsquo;s either a PKCS#8(PEM) or PKCS#1(DER). client.crt: Self-signed X.509 public keys for distribution. ca.crt: A certificate authority public key for a client to validate the server\u0026rsquo;s certificate.  Authentication Mode  Find ca.crt, and use it at client side. In mTLS mode, client.crt and client.pem are required at client side. Find server.crt, server.pem and ca.crt. Use them at server side. Please refer to gRPC Security of the OAP server doc for more details.  Open and config TLS Agent config  Agent enables TLS automatically after the ca.crt(by default /ca folder in agent package) file is detected. TLS with no CA mode could be activated by this setting.  agent.force_tls=${SW_AGENT_FORCE_TLS:true} Enable mutual TLS  Sharing gRPC server must be started with mTLS enabled. More details can be found in receiver-sharing-server section in application.yaml. Please refer to gRPC Security and gRPC/HTTP server for receiver. Copy CA certificate, certificate and private key of client into agent/ca. Configure client-side SSL/TLS in agent.conf. Change SW_AGENT_COLLECTOR_BACKEND_SERVICES targeting to host and port of receiver-sharing-server.  For example:\nagent.force_tls=${SW_AGENT_FORCE_TLS:true} agent.ssl_trusted_ca_path=${SW_AGENT_SSL_TRUSTED_CA_PATH:/ca/ca.crt} agent.ssl_key_path=${SW_AGENT_SSL_KEY_PATH:/ca/client.pem} agent.ssl_cert_chain_path=${SW_AGENT_SSL_CERT_CHAIN_PATH:/ca/client.crt} collector.backend_service=${SW_AGENT_COLLECTOR_BACKEND_SERVICES:skywalking-oap:11801} Notice, the client-side\u0026rsquo;s certificate and the private key are from the same CA certificate with server-side.\n","excerpt":"Support Transport Layer Security (TLS) Transport Layer Security (TLS) is a very common security way …","ref":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/tls/","title":"Support Transport Layer Security (TLS)"},{"body":"Supported Agent Configuration Options Below is the full list of supported configurations you can set to customize the agent behavior, please take some time to read the descriptions for what they can achieve.\n Usage: (Pass in intrusive setup)\n from skywalking import config, agent config.init(YourConfiguration=YourValue)) agent.start()  Usage: (Pass by environment variables)\n export SW_AGENT_YourConfiguration=YourValue Agent Core Configuration Options    Configuration Environment Variable Type Default Value Description     agent_collector_backend_services SW_AGENT_COLLECTOR_BACKEND_SERVICES \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; oap_host:oap_port The backend OAP server address, 11800 is default OAP gRPC port, 12800 is HTTP, Kafka ignores this option and uses kafka_bootstrap_servers option. This option should be changed accordingly with selected protocol   agent_protocol SW_AGENT_PROTOCOL \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; grpc The protocol to communicate with the backend OAP, http, grpc or kafka, we highly suggest using grpc in production as it\u0026rsquo;s well optimized than http. The kafka protocol provides an alternative way to submit data to the backend.   agent_name SW_AGENT_NAME \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; Python Service Name The name of your awesome Python service   agent_instance_name SW_AGENT_INSTANCE_NAME \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; str(uuid.uuid1()).replace('-', \u0026lsquo;') The name of this particular awesome Python service instance   agent_namespace SW_AGENT_NAMESPACE \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt;  The agent namespace of the Python service (available as tag and the suffix of service name)   kafka_bootstrap_servers SW_KAFKA_BOOTSTRAP_SERVERS \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; localhost:9092 A list of host/port pairs to use for establishing the initial connection to your Kafka cluster. It is in the form of host1:port1,host2:port2,\u0026hellip; (used for Kafka reporter protocol)   kafka_namespace SW_KAFKA_NAMESPACE \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt;  The kafka namespace specified by OAP side SW_NAMESPACE, prepends the following kafka topic names with a -.   kafka_topic_management SW_KAFKA_TOPIC_MANAGEMENT \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; skywalking-managements Specifying Kafka topic name for service instance reporting and registering, this should be in sync with OAP   kafka_topic_segment SW_KAFKA_TOPIC_SEGMENT \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; skywalking-segments Specifying Kafka topic name for Tracing data, this should be in sync with OAP   kafka_topic_log SW_KAFKA_TOPIC_LOG \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; skywalking-logs Specifying Kafka topic name for Log data, this should be in sync with OAP   kafka_topic_meter SW_KAFKA_TOPIC_METER \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; skywalking-meters Specifying Kafka topic name for Meter data, this should be in sync with OAP   kafka_reporter_custom_configurations SW_KAFKA_REPORTER_CUSTOM_CONFIGURATIONS \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt;  The configs to init KafkaProducer, supports the basic arguments (whose type is either str, bool, or int) listed here This config only works from env variables, each one should be passed in SW_KAFKA_REPORTER_CONFIG_\u0026lt;KEY_NAME\u0026gt;   agent_force_tls SW_AGENT_FORCE_TLS \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False Use TLS for communication with SkyWalking OAP (no cert required)   agent_authentication SW_AGENT_AUTHENTICATION \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt;  The authentication token to verify that the agent is trusted by the backend OAP, as for how to configure the backend, refer to the yaml.   agent_logging_level SW_AGENT_LOGGING_LEVEL \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; INFO The level of agent self-logs, could be one of CRITICAL, FATAL, ERROR, WARN(WARNING), INFO, DEBUG. Please turn on debug if an issue is encountered to find out what\u0026rsquo;s going on    Agent Core Danger Zone    Configuration Environment Variable Type Default Value Description     agent_collector_heartbeat_period SW_AGENT_COLLECTOR_HEARTBEAT_PERIOD \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 30 The agent will exchange heartbeat message with SkyWalking OAP backend every period seconds   agent_collector_properties_report_period_factor SW_AGENT_COLLECTOR_PROPERTIES_REPORT_PERIOD_FACTOR \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 10 The agent will report service instance properties every factor * heartbeat period seconds default: 10*30 = 300 seconds   agent_instance_properties_json SW_AGENT_INSTANCE_PROPERTIES_JSON \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt;  A custom JSON string to be reported as service instance properties, e.g. {\u0026quot;key\u0026quot;: \u0026quot;value\u0026quot;}   agent_experimental_fork_support SW_AGENT_EXPERIMENTAL_FORK_SUPPORT \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False The agent will restart itself in any os.fork()-ed child process. Important Note: it\u0026rsquo;s not suitable for short-lived processes as each one will create a new instance in SkyWalking dashboard in format of service_instance-child(pid). This feature may not work when a precise combination of gRPC + Python 3.7 + subprocess (not fork) is used together. The agent will output a warning log when using on Python 3.7 for such a reason.   agent_queue_timeout SW_AGENT_QUEUE_TIMEOUT \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 1 DANGEROUS - This option controls the interval of each bulk report from telemetry data queues Do not modify unless you have evaluated its impact given your service load.    SW_PYTHON Auto Instrumentation CLI    Configuration Environment Variable Type Default Value Description     agent_sw_python_bootstrap_propagate SW_AGENT_SW_PYTHON_BOOTSTRAP_PROPAGATE \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False Special: can only be passed via environment. This config controls the child process agent bootstrap behavior in sw-python CLI, if set to False, a valid child process will not boot up a SkyWalking Agent. Please refer to the CLI Guide for details.   agent_sw_python_cli_debug_enabled SW_AGENT_SW_PYTHON_CLI_DEBUG_ENABLED \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False Special: can only be passed via environment. This config controls the CLI and agent logging debug mode, if set to True, the CLI and agent will print out debug logs. Please refer to the CLI Guide for details. Important: this config will set agent logging level to DEBUG as well, do not use it in production otherwise it will flood your logs. This normally shouldn\u0026rsquo;t be pass as a simple flag -d will be the same.    Trace Reporter Configurations    Configuration Environment Variable Type Default Value Description     agent_trace_reporter_max_buffer_size SW_AGENT_TRACE_REPORTER_MAX_BUFFER_SIZE \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 10000 The maximum queue backlog size for sending the segment data to backend, segments beyond this are silently dropped   agent_trace_ignore_path SW_AGENT_TRACE_IGNORE_PATH \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt;  You can setup multiple URL path patterns, The endpoints match these patterns wouldn\u0026rsquo;t be traced. the current matching rules follow Ant Path match style , like /path/*, /path/**, /path/?.   agent_ignore_suffix SW_AGENT_IGNORE_SUFFIX \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; .jpg,.jpeg,.js,.css,.png,.bmp,.gif,.ico,.mp3,.mp4,.html,.svg If the operation name of the first span is included in this set, this segment should be ignored.   correlation_element_max_number SW_CORRELATION_ELEMENT_MAX_NUMBER \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 3 Max element count of the correlation context.   correlation_value_max_length SW_CORRELATION_VALUE_MAX_LENGTH \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 128 Max value length of correlation context element.    Profiling Configurations    Configuration Environment Variable Type Default Value Description     agent_profile_active SW_AGENT_PROFILE_ACTIVE \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; True If True, Python agent will enable profiler when user create a new profiling task.   agent_collector_get_profile_task_interval SW_AGENT_COLLECTOR_GET_PROFILE_TASK_INTERVAL \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 20 The number of seconds between two profile task query.   agent_profile_max_parallel SW_AGENT_PROFILE_MAX_PARALLEL \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 5 The number of parallel monitor segment count.   agent_profile_duration SW_AGENT_PROFILE_DURATION \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 10 The maximum monitor segment time(minutes), if current segment monitor time out of limit, then stop it.   agent_profile_dump_max_stack_depth SW_AGENT_PROFILE_DUMP_MAX_STACK_DEPTH \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 500 The number of max dump thread stack depth   agent_profile_snapshot_transport_buffer_size SW_AGENT_PROFILE_SNAPSHOT_TRANSPORT_BUFFER_SIZE \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 50 The number of snapshot transport to backend buffer size    Log Reporter Configurations    Configuration Environment Variable Type Default Value Description     agent_log_reporter_active SW_AGENT_LOG_REPORTER_ACTIVE \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; True If True, Python agent will report collected logs to the OAP or Satellite. Otherwise, it disables the feature.   agent_log_reporter_safe_mode SW_AGENT_LOG_REPORTER_SAFE_MODE \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False If True, Python agent will filter out HTTP basic auth information from log records. By default, it disables the feature due to potential performance impact brought by regular expression   agent_log_reporter_max_buffer_size SW_AGENT_LOG_REPORTER_MAX_BUFFER_SIZE \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 10000 The maximum queue backlog size for sending log data to backend, logs beyond this are silently dropped.   agent_log_reporter_level SW_AGENT_LOG_REPORTER_LEVEL \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; WARNING This config specifies the logger levels of concern, any logs with a level below the config will be ignored.   agent_log_reporter_ignore_filter SW_AGENT_LOG_REPORTER_IGNORE_FILTER \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False This config customizes whether to ignore the application-defined logger filters, if True, all logs are reported disregarding any filter rules.   agent_log_reporter_formatted SW_AGENT_LOG_REPORTER_FORMATTED \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; True If True, the log reporter will transmit the logs as formatted. Otherwise, puts logRecord.msg and logRecord.args into message content and tags(argument.n), respectively. Along with an exception tag if an exception was raised. Only applies to logging module.   agent_log_reporter_layout SW_AGENT_LOG_REPORTER_LAYOUT \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; %(asctime)s [%(threadName)s] %(levelname)s %(name)s - %(message)s The log reporter formats the logRecord message based on the layout given. Only applies to logging module.   agent_cause_exception_depth SW_AGENT_CAUSE_EXCEPTION_DEPTH \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 10 This configuration is shared by log reporter and tracer. This config limits agent to report up to limit stacktrace, please refer to [Python traceback](../ https://docs.python.org/3/library/traceback.html#traceback.print_tb) for more explanations.    Meter Reporter Configurations    Configuration Environment Variable Type Default Value Description     agent_meter_reporter_active SW_AGENT_METER_REPORTER_ACTIVE \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; True If True, Python agent will report collected meters to the OAP or Satellite. Otherwise, it disables the feature.   agent_meter_reporter_max_buffer_size SW_AGENT_METER_REPORTER_MAX_BUFFER_SIZE \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 10000 The maximum queue backlog size for sending meter data to backend, meters beyond this are silently dropped.   agent_meter_reporter_period SW_AGENT_METER_REPORTER_PERIOD \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 20 The interval in seconds between each meter data report   agent_pvm_meter_reporter_active SW_AGENT_PVM_METER_REPORTER_ACTIVE \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; True If True, Python agent will report collected Python Virtual Machine (PVM) meters to the OAP or Satellite. Otherwise, it disables the feature.    Plugin Related configurations    Configuration Environment Variable Type Default Value Description     agent_disable_plugins SW_AGENT_DISABLE_PLUGINS \u0026lt;class \u0026lsquo;list\u0026rsquo;\u0026gt; [''] The name patterns in comma-separated pattern, plugins whose name matches one of the pattern won\u0026rsquo;t be installed   plugin_http_http_params_length_threshold SW_PLUGIN_HTTP_HTTP_PARAMS_LENGTH_THRESHOLD \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 1024 When COLLECT_HTTP_PARAMS is enabled, how many characters to keep and send to the OAP backend, use negative values to keep and send the complete parameters, NB. this config item is added for the sake of performance.   plugin_http_ignore_method SW_PLUGIN_HTTP_IGNORE_METHOD \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt;  Comma-delimited list of http methods to ignore (GET, POST, HEAD, OPTIONS, etc\u0026hellip;)   plugin_sql_parameters_max_length SW_PLUGIN_SQL_PARAMETERS_MAX_LENGTH \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 0 The maximum length of the collected parameter, parameters longer than the specified length will be truncated, length 0 turns off parameter tracing   plugin_pymongo_trace_parameters SW_PLUGIN_PYMONGO_TRACE_PARAMETERS \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False Indicates whether to collect the filters of pymongo   plugin_pymongo_parameters_max_length SW_PLUGIN_PYMONGO_PARAMETERS_MAX_LENGTH \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 512 The maximum length of the collected filters, filters longer than the specified length will be truncated   plugin_elasticsearch_trace_dsl SW_PLUGIN_ELASTICSEARCH_TRACE_DSL \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False If true, trace all the DSL(Domain Specific Language) in ElasticSearch access, default is false   plugin_flask_collect_http_params SW_PLUGIN_FLASK_COLLECT_HTTP_PARAMS \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False This config item controls that whether the Flask plugin should collect the parameters of the request.   plugin_sanic_collect_http_params SW_PLUGIN_SANIC_COLLECT_HTTP_PARAMS \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False This config item controls that whether the Sanic plugin should collect the parameters of the request.   plugin_django_collect_http_params SW_PLUGIN_DJANGO_COLLECT_HTTP_PARAMS \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False This config item controls that whether the Django plugin should collect the parameters of the request.   plugin_fastapi_collect_http_params SW_PLUGIN_FASTAPI_COLLECT_HTTP_PARAMS \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False This config item controls that whether the FastAPI plugin should collect the parameters of the request.   plugin_bottle_collect_http_params SW_PLUGIN_BOTTLE_COLLECT_HTTP_PARAMS \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False This config item controls that whether the Bottle plugin should collect the parameters of the request.   plugin_celery_parameters_length SW_PLUGIN_CELERY_PARAMETERS_LENGTH \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 512 The maximum length of celery functions parameters, longer than this will be truncated, 0 turns off    ","excerpt":"Supported Agent Configuration Options Below is the full list of supported configurations you can set …","ref":"/docs/skywalking-python/latest/en/setup/configuration/","title":"Supported Agent Configuration Options"},{"body":"Supported Agent Configuration Options Below is the full list of supported configurations you can set to customize the agent behavior, please take some time to read the descriptions for what they can achieve.\n Usage: (Pass in intrusive setup)\n from skywalking import config, agent config.init(YourConfiguration=YourValue)) agent.start()  Usage: (Pass by environment variables)\n export SW_AGENT_YourConfiguration=YourValue Agent Core Configuration Options    Configuration Environment Variable Type Default Value Description     agent_collector_backend_services SW_AGENT_COLLECTOR_BACKEND_SERVICES \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; oap_host:oap_port The backend OAP server address, 11800 is default OAP gRPC port, 12800 is HTTP, Kafka ignores this option and uses kafka_bootstrap_servers option. This option should be changed accordingly with selected protocol   agent_protocol SW_AGENT_PROTOCOL \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; grpc The protocol to communicate with the backend OAP, http, grpc or kafka, we highly suggest using grpc in production as it\u0026rsquo;s well optimized than http. The kafka protocol provides an alternative way to submit data to the backend.   agent_name SW_AGENT_NAME \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; Python Service Name The name of your awesome Python service   agent_instance_name SW_AGENT_INSTANCE_NAME \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; str(uuid.uuid1()).replace('-', \u0026lsquo;') The name of this particular awesome Python service instance   agent_namespace SW_AGENT_NAMESPACE \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt;  The agent namespace of the Python service (available as tag and the suffix of service name)   kafka_bootstrap_servers SW_KAFKA_BOOTSTRAP_SERVERS \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; localhost:9092 A list of host/port pairs to use for establishing the initial connection to your Kafka cluster. It is in the form of host1:port1,host2:port2,\u0026hellip; (used for Kafka reporter protocol)   kafka_namespace SW_KAFKA_NAMESPACE \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt;  The kafka namespace specified by OAP side SW_NAMESPACE, prepends the following kafka topic names with a -.   kafka_topic_management SW_KAFKA_TOPIC_MANAGEMENT \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; skywalking-managements Specifying Kafka topic name for service instance reporting and registering, this should be in sync with OAP   kafka_topic_segment SW_KAFKA_TOPIC_SEGMENT \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; skywalking-segments Specifying Kafka topic name for Tracing data, this should be in sync with OAP   kafka_topic_log SW_KAFKA_TOPIC_LOG \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; skywalking-logs Specifying Kafka topic name for Log data, this should be in sync with OAP   kafka_topic_meter SW_KAFKA_TOPIC_METER \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; skywalking-meters Specifying Kafka topic name for Meter data, this should be in sync with OAP   kafka_reporter_custom_configurations SW_KAFKA_REPORTER_CUSTOM_CONFIGURATIONS \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt;  The configs to init KafkaProducer, supports the basic arguments (whose type is either str, bool, or int) listed here This config only works from env variables, each one should be passed in SW_KAFKA_REPORTER_CONFIG_\u0026lt;KEY_NAME\u0026gt;   agent_force_tls SW_AGENT_FORCE_TLS \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False Use TLS for communication with SkyWalking OAP (no cert required)   agent_authentication SW_AGENT_AUTHENTICATION \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt;  The authentication token to verify that the agent is trusted by the backend OAP, as for how to configure the backend, refer to the yaml.   agent_logging_level SW_AGENT_LOGGING_LEVEL \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; INFO The level of agent self-logs, could be one of CRITICAL, FATAL, ERROR, WARN(WARNING), INFO, DEBUG. Please turn on debug if an issue is encountered to find out what\u0026rsquo;s going on    Agent Core Danger Zone    Configuration Environment Variable Type Default Value Description     agent_collector_heartbeat_period SW_AGENT_COLLECTOR_HEARTBEAT_PERIOD \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 30 The agent will exchange heartbeat message with SkyWalking OAP backend every period seconds   agent_collector_properties_report_period_factor SW_AGENT_COLLECTOR_PROPERTIES_REPORT_PERIOD_FACTOR \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 10 The agent will report service instance properties every factor * heartbeat period seconds default: 10*30 = 300 seconds   agent_instance_properties_json SW_AGENT_INSTANCE_PROPERTIES_JSON \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt;  A custom JSON string to be reported as service instance properties, e.g. {\u0026quot;key\u0026quot;: \u0026quot;value\u0026quot;}   agent_experimental_fork_support SW_AGENT_EXPERIMENTAL_FORK_SUPPORT \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False The agent will restart itself in any os.fork()-ed child process. Important Note: it\u0026rsquo;s not suitable for short-lived processes as each one will create a new instance in SkyWalking dashboard in format of service_instance-child(pid). This feature may not work when a precise combination of gRPC + Python 3.7 + subprocess (not fork) is used together. The agent will output a warning log when using on Python 3.7 for such a reason.   agent_queue_timeout SW_AGENT_QUEUE_TIMEOUT \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 1 DANGEROUS - This option controls the interval of each bulk report from telemetry data queues Do not modify unless you have evaluated its impact given your service load.   agent_asyncio_enhancement SW_AGENT_ASYNCIO_ENHANCEMENT \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False Replace the threads to asyncio coroutines to report telemetry data to the OAP. This option is experimental and may not work as expected.    SW_PYTHON Auto Instrumentation CLI    Configuration Environment Variable Type Default Value Description     agent_sw_python_bootstrap_propagate SW_AGENT_SW_PYTHON_BOOTSTRAP_PROPAGATE \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False Special: can only be passed via environment. This config controls the child process agent bootstrap behavior in sw-python CLI, if set to False, a valid child process will not boot up a SkyWalking Agent. Please refer to the CLI Guide for details.   agent_sw_python_cli_debug_enabled SW_AGENT_SW_PYTHON_CLI_DEBUG_ENABLED \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False Special: can only be passed via environment. This config controls the CLI and agent logging debug mode, if set to True, the CLI and agent will print out debug logs. Please refer to the CLI Guide for details. Important: this config will set agent logging level to DEBUG as well, do not use it in production otherwise it will flood your logs. This normally shouldn\u0026rsquo;t be pass as a simple flag -d will be the same.    Trace Reporter Configurations    Configuration Environment Variable Type Default Value Description     agent_trace_reporter_max_buffer_size SW_AGENT_TRACE_REPORTER_MAX_BUFFER_SIZE \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 10000 The maximum queue backlog size for sending the segment data to backend, segments beyond this are silently dropped   agent_trace_ignore_path SW_AGENT_TRACE_IGNORE_PATH \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt;  You can setup multiple URL path patterns, The endpoints match these patterns wouldn\u0026rsquo;t be traced. the current matching rules follow Ant Path match style , like /path/*, /path/**, /path/?.   agent_ignore_suffix SW_AGENT_IGNORE_SUFFIX \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; .jpg,.jpeg,.js,.css,.png,.bmp,.gif,.ico,.mp3,.mp4,.html,.svg If the operation name of the first span is included in this set, this segment should be ignored.   correlation_element_max_number SW_CORRELATION_ELEMENT_MAX_NUMBER \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 3 Max element count of the correlation context.   correlation_value_max_length SW_CORRELATION_VALUE_MAX_LENGTH \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 128 Max value length of correlation context element.    Profiling Configurations    Configuration Environment Variable Type Default Value Description     agent_profile_active SW_AGENT_PROFILE_ACTIVE \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; True If True, Python agent will enable profiler when user create a new profiling task.   agent_collector_get_profile_task_interval SW_AGENT_COLLECTOR_GET_PROFILE_TASK_INTERVAL \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 20 The number of seconds between two profile task query.   agent_profile_max_parallel SW_AGENT_PROFILE_MAX_PARALLEL \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 5 The number of parallel monitor segment count.   agent_profile_duration SW_AGENT_PROFILE_DURATION \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 10 The maximum monitor segment time(minutes), if current segment monitor time out of limit, then stop it.   agent_profile_dump_max_stack_depth SW_AGENT_PROFILE_DUMP_MAX_STACK_DEPTH \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 500 The number of max dump thread stack depth   agent_profile_snapshot_transport_buffer_size SW_AGENT_PROFILE_SNAPSHOT_TRANSPORT_BUFFER_SIZE \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 50 The number of snapshot transport to backend buffer size    Log Reporter Configurations    Configuration Environment Variable Type Default Value Description     agent_log_reporter_active SW_AGENT_LOG_REPORTER_ACTIVE \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; True If True, Python agent will report collected logs to the OAP or Satellite. Otherwise, it disables the feature.   agent_log_reporter_safe_mode SW_AGENT_LOG_REPORTER_SAFE_MODE \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False If True, Python agent will filter out HTTP basic auth information from log records. By default, it disables the feature due to potential performance impact brought by regular expression   agent_log_reporter_max_buffer_size SW_AGENT_LOG_REPORTER_MAX_BUFFER_SIZE \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 10000 The maximum queue backlog size for sending log data to backend, logs beyond this are silently dropped.   agent_log_reporter_level SW_AGENT_LOG_REPORTER_LEVEL \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; WARNING This config specifies the logger levels of concern, any logs with a level below the config will be ignored.   agent_log_reporter_ignore_filter SW_AGENT_LOG_REPORTER_IGNORE_FILTER \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False This config customizes whether to ignore the application-defined logger filters, if True, all logs are reported disregarding any filter rules.   agent_log_reporter_formatted SW_AGENT_LOG_REPORTER_FORMATTED \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; True If True, the log reporter will transmit the logs as formatted. Otherwise, puts logRecord.msg and logRecord.args into message content and tags(argument.n), respectively. Along with an exception tag if an exception was raised. Only applies to logging module.   agent_log_reporter_layout SW_AGENT_LOG_REPORTER_LAYOUT \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; %(asctime)s [%(threadName)s] %(levelname)s %(name)s - %(message)s The log reporter formats the logRecord message based on the layout given. Only applies to logging module.   agent_cause_exception_depth SW_AGENT_CAUSE_EXCEPTION_DEPTH \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 10 This configuration is shared by log reporter and tracer. This config limits agent to report up to limit stacktrace, please refer to [Python traceback](../ https://docs.python.org/3/library/traceback.html#traceback.print_tb) for more explanations.    Meter Reporter Configurations    Configuration Environment Variable Type Default Value Description     agent_meter_reporter_active SW_AGENT_METER_REPORTER_ACTIVE \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; True If True, Python agent will report collected meters to the OAP or Satellite. Otherwise, it disables the feature.   agent_meter_reporter_max_buffer_size SW_AGENT_METER_REPORTER_MAX_BUFFER_SIZE \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 10000 The maximum queue backlog size for sending meter data to backend, meters beyond this are silently dropped.   agent_meter_reporter_period SW_AGENT_METER_REPORTER_PERIOD \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 20 The interval in seconds between each meter data report   agent_pvm_meter_reporter_active SW_AGENT_PVM_METER_REPORTER_ACTIVE \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; True If True, Python agent will report collected Python Virtual Machine (PVM) meters to the OAP or Satellite. Otherwise, it disables the feature.    Plugin Related configurations    Configuration Environment Variable Type Default Value Description     agent_disable_plugins SW_AGENT_DISABLE_PLUGINS \u0026lt;class \u0026lsquo;list\u0026rsquo;\u0026gt; [''] The name patterns in comma-separated pattern, plugins whose name matches one of the pattern won\u0026rsquo;t be installed   plugin_http_http_params_length_threshold SW_PLUGIN_HTTP_HTTP_PARAMS_LENGTH_THRESHOLD \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 1024 When COLLECT_HTTP_PARAMS is enabled, how many characters to keep and send to the OAP backend, use negative values to keep and send the complete parameters, NB. this config item is added for the sake of performance.   plugin_http_ignore_method SW_PLUGIN_HTTP_IGNORE_METHOD \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt;  Comma-delimited list of http methods to ignore (GET, POST, HEAD, OPTIONS, etc\u0026hellip;)   plugin_sql_parameters_max_length SW_PLUGIN_SQL_PARAMETERS_MAX_LENGTH \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 0 The maximum length of the collected parameter, parameters longer than the specified length will be truncated, length 0 turns off parameter tracing   plugin_pymongo_trace_parameters SW_PLUGIN_PYMONGO_TRACE_PARAMETERS \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False Indicates whether to collect the filters of pymongo   plugin_pymongo_parameters_max_length SW_PLUGIN_PYMONGO_PARAMETERS_MAX_LENGTH \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 512 The maximum length of the collected filters, filters longer than the specified length will be truncated   plugin_elasticsearch_trace_dsl SW_PLUGIN_ELASTICSEARCH_TRACE_DSL \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False If true, trace all the DSL(Domain Specific Language) in ElasticSearch access, default is false   plugin_flask_collect_http_params SW_PLUGIN_FLASK_COLLECT_HTTP_PARAMS \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False This config item controls that whether the Flask plugin should collect the parameters of the request.   plugin_sanic_collect_http_params SW_PLUGIN_SANIC_COLLECT_HTTP_PARAMS \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False This config item controls that whether the Sanic plugin should collect the parameters of the request.   plugin_django_collect_http_params SW_PLUGIN_DJANGO_COLLECT_HTTP_PARAMS \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False This config item controls that whether the Django plugin should collect the parameters of the request.   plugin_fastapi_collect_http_params SW_PLUGIN_FASTAPI_COLLECT_HTTP_PARAMS \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False This config item controls that whether the FastAPI plugin should collect the parameters of the request.   plugin_bottle_collect_http_params SW_PLUGIN_BOTTLE_COLLECT_HTTP_PARAMS \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False This config item controls that whether the Bottle plugin should collect the parameters of the request.   plugin_celery_parameters_length SW_PLUGIN_CELERY_PARAMETERS_LENGTH \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 512 The maximum length of celery functions parameters, longer than this will be truncated, 0 turns off    ","excerpt":"Supported Agent Configuration Options Below is the full list of supported configurations you can set …","ref":"/docs/skywalking-python/next/en/setup/configuration/","title":"Supported Agent Configuration Options"},{"body":"Supported Agent Configuration Options Below is the full list of supported configurations you can set to customize the agent behavior, please take some time to read the descriptions for what they can achieve.\n Usage: (Pass in intrusive setup)\n from skywalking import config, agent config.init(YourConfiguration=YourValue)) agent.start()  Usage: (Pass by environment variables)\n export SW_AGENT_YourConfiguration=YourValue Agent Core Configuration Options    Configuration Environment Variable Type Default Value Description     agent_collector_backend_services SW_AGENT_COLLECTOR_BACKEND_SERVICES \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; oap_host:oap_port The backend OAP server address, 11800 is default OAP gRPC port, 12800 is HTTP, Kafka ignores this option and uses kafka_bootstrap_servers option. This option should be changed accordingly with selected protocol   agent_protocol SW_AGENT_PROTOCOL \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; grpc The protocol to communicate with the backend OAP, http, grpc or kafka, we highly suggest using grpc in production as it\u0026rsquo;s well optimized than http. The kafka protocol provides an alternative way to submit data to the backend.   agent_name SW_AGENT_NAME \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; Python Service Name The name of your awesome Python service   agent_instance_name SW_AGENT_INSTANCE_NAME \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; str(uuid.uuid1()).replace('-', \u0026lsquo;') The name of this particular awesome Python service instance   agent_namespace SW_AGENT_NAMESPACE \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt;  The agent namespace of the Python service (available as tag and the suffix of service name)   kafka_bootstrap_servers SW_KAFKA_BOOTSTRAP_SERVERS \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; localhost:9092 A list of host/port pairs to use for establishing the initial connection to your Kafka cluster. It is in the form of host1:port1,host2:port2,\u0026hellip; (used for Kafka reporter protocol)   kafka_namespace SW_KAFKA_NAMESPACE \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt;  The kafka namespace specified by OAP side SW_NAMESPACE, prepends the following kafka topic names with a -.   kafka_topic_management SW_KAFKA_TOPIC_MANAGEMENT \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; skywalking-managements Specifying Kafka topic name for service instance reporting and registering, this should be in sync with OAP   kafka_topic_segment SW_KAFKA_TOPIC_SEGMENT \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; skywalking-segments Specifying Kafka topic name for Tracing data, this should be in sync with OAP   kafka_topic_log SW_KAFKA_TOPIC_LOG \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; skywalking-logs Specifying Kafka topic name for Log data, this should be in sync with OAP   kafka_topic_meter SW_KAFKA_TOPIC_METER \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; skywalking-meters Specifying Kafka topic name for Meter data, this should be in sync with OAP   kafka_reporter_custom_configurations SW_KAFKA_REPORTER_CUSTOM_CONFIGURATIONS \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt;  The configs to init KafkaProducer, supports the basic arguments (whose type is either str, bool, or int) listed here This config only works from env variables, each one should be passed in SW_KAFKA_REPORTER_CONFIG_\u0026lt;KEY_NAME\u0026gt;   agent_force_tls SW_AGENT_FORCE_TLS \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False Use TLS for communication with SkyWalking OAP (no cert required)   agent_authentication SW_AGENT_AUTHENTICATION \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt;  The authentication token to verify that the agent is trusted by the backend OAP, as for how to configure the backend, refer to the yaml.   agent_logging_level SW_AGENT_LOGGING_LEVEL \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; INFO The level of agent self-logs, could be one of CRITICAL, FATAL, ERROR, WARN(WARNING), INFO, DEBUG. Please turn on debug if an issue is encountered to find out what\u0026rsquo;s going on    Agent Core Danger Zone    Configuration Environment Variable Type Default Value Description     agent_collector_heartbeat_period SW_AGENT_COLLECTOR_HEARTBEAT_PERIOD \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 30 The agent will exchange heartbeat message with SkyWalking OAP backend every period seconds   agent_collector_properties_report_period_factor SW_AGENT_COLLECTOR_PROPERTIES_REPORT_PERIOD_FACTOR \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 10 The agent will report service instance properties every factor * heartbeat period seconds default: 10*30 = 300 seconds   agent_instance_properties_json SW_AGENT_INSTANCE_PROPERTIES_JSON \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt;  A custom JSON string to be reported as service instance properties, e.g. {\u0026quot;key\u0026quot;: \u0026quot;value\u0026quot;}   agent_experimental_fork_support SW_AGENT_EXPERIMENTAL_FORK_SUPPORT \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False The agent will restart itself in any os.fork()-ed child process. Important Note: it\u0026rsquo;s not suitable for short-lived processes as each one will create a new instance in SkyWalking dashboard in format of service_instance-child(pid). This feature may not work when a precise combination of gRPC + Python 3.7 + subprocess (not fork) is used together. The agent will output a warning log when using on Python 3.7 for such a reason.   agent_queue_timeout SW_AGENT_QUEUE_TIMEOUT \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 1 DANGEROUS - This option controls the interval of each bulk report from telemetry data queues Do not modify unless you have evaluated its impact given your service load.    SW_PYTHON Auto Instrumentation CLI    Configuration Environment Variable Type Default Value Description     agent_sw_python_bootstrap_propagate SW_AGENT_SW_PYTHON_BOOTSTRAP_PROPAGATE \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False Special: can only be passed via environment. This config controls the child process agent bootstrap behavior in sw-python CLI, if set to False, a valid child process will not boot up a SkyWalking Agent. Please refer to the CLI Guide for details.   agent_sw_python_cli_debug_enabled SW_AGENT_SW_PYTHON_CLI_DEBUG_ENABLED \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False Special: can only be passed via environment. This config controls the CLI and agent logging debug mode, if set to True, the CLI and agent will print out debug logs. Please refer to the CLI Guide for details. Important: this config will set agent logging level to DEBUG as well, do not use it in production otherwise it will flood your logs. This normally shouldn\u0026rsquo;t be pass as a simple flag -d will be the same.    Trace Reporter Configurations    Configuration Environment Variable Type Default Value Description     agent_trace_reporter_max_buffer_size SW_AGENT_TRACE_REPORTER_MAX_BUFFER_SIZE \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 10000 The maximum queue backlog size for sending the segment data to backend, segments beyond this are silently dropped   agent_trace_ignore_path SW_AGENT_TRACE_IGNORE_PATH \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt;  You can setup multiple URL path patterns, The endpoints match these patterns wouldn\u0026rsquo;t be traced. the current matching rules follow Ant Path match style , like /path/*, /path/**, /path/?.   agent_ignore_suffix SW_AGENT_IGNORE_SUFFIX \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; .jpg,.jpeg,.js,.css,.png,.bmp,.gif,.ico,.mp3,.mp4,.html,.svg If the operation name of the first span is included in this set, this segment should be ignored.   correlation_element_max_number SW_CORRELATION_ELEMENT_MAX_NUMBER \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 3 Max element count of the correlation context.   correlation_value_max_length SW_CORRELATION_VALUE_MAX_LENGTH \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 128 Max value length of correlation context element.    Profiling Configurations    Configuration Environment Variable Type Default Value Description     agent_profile_active SW_AGENT_PROFILE_ACTIVE \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; True If True, Python agent will enable profiler when user create a new profiling task.   agent_collector_get_profile_task_interval SW_AGENT_COLLECTOR_GET_PROFILE_TASK_INTERVAL \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 20 The number of seconds between two profile task query.   agent_profile_max_parallel SW_AGENT_PROFILE_MAX_PARALLEL \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 5 The number of parallel monitor segment count.   agent_profile_duration SW_AGENT_PROFILE_DURATION \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 10 The maximum monitor segment time(minutes), if current segment monitor time out of limit, then stop it.   agent_profile_dump_max_stack_depth SW_AGENT_PROFILE_DUMP_MAX_STACK_DEPTH \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 500 The number of max dump thread stack depth   agent_profile_snapshot_transport_buffer_size SW_AGENT_PROFILE_SNAPSHOT_TRANSPORT_BUFFER_SIZE \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 50 The number of snapshot transport to backend buffer size    Log Reporter Configurations    Configuration Environment Variable Type Default Value Description     agent_log_reporter_active SW_AGENT_LOG_REPORTER_ACTIVE \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; True If True, Python agent will report collected logs to the OAP or Satellite. Otherwise, it disables the feature.   agent_log_reporter_safe_mode SW_AGENT_LOG_REPORTER_SAFE_MODE \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False If True, Python agent will filter out HTTP basic auth information from log records. By default, it disables the feature due to potential performance impact brought by regular expression   agent_log_reporter_max_buffer_size SW_AGENT_LOG_REPORTER_MAX_BUFFER_SIZE \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 10000 The maximum queue backlog size for sending log data to backend, logs beyond this are silently dropped.   agent_log_reporter_level SW_AGENT_LOG_REPORTER_LEVEL \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; WARNING This config specifies the logger levels of concern, any logs with a level below the config will be ignored.   agent_log_reporter_ignore_filter SW_AGENT_LOG_REPORTER_IGNORE_FILTER \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False This config customizes whether to ignore the application-defined logger filters, if True, all logs are reported disregarding any filter rules.   agent_log_reporter_formatted SW_AGENT_LOG_REPORTER_FORMATTED \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; True If True, the log reporter will transmit the logs as formatted. Otherwise, puts logRecord.msg and logRecord.args into message content and tags(argument.n), respectively. Along with an exception tag if an exception was raised. Only applies to logging module.   agent_log_reporter_layout SW_AGENT_LOG_REPORTER_LAYOUT \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; %(asctime)s [%(threadName)s] %(levelname)s %(name)s - %(message)s The log reporter formats the logRecord message based on the layout given. Only applies to logging module.   agent_cause_exception_depth SW_AGENT_CAUSE_EXCEPTION_DEPTH \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 10 This configuration is shared by log reporter and tracer. This config limits agent to report up to limit stacktrace, please refer to [Python traceback](../ https://docs.python.org/3/library/traceback.html#traceback.print_tb) for more explanations.    Meter Reporter Configurations    Configuration Environment Variable Type Default Value Description     agent_meter_reporter_active SW_AGENT_METER_REPORTER_ACTIVE \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; True If True, Python agent will report collected meters to the OAP or Satellite. Otherwise, it disables the feature.   agent_meter_reporter_max_buffer_size SW_AGENT_METER_REPORTER_MAX_BUFFER_SIZE \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 10000 The maximum queue backlog size for sending meter data to backend, meters beyond this are silently dropped.   agent_meter_reporter_period SW_AGENT_METER_REPORTER_PERIOD \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 20 The interval in seconds between each meter data report   agent_pvm_meter_reporter_active SW_AGENT_PVM_METER_REPORTER_ACTIVE \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; True If True, Python agent will report collected Python Virtual Machine (PVM) meters to the OAP or Satellite. Otherwise, it disables the feature.    Plugin Related configurations    Configuration Environment Variable Type Default Value Description     agent_disable_plugins SW_AGENT_DISABLE_PLUGINS \u0026lt;class \u0026lsquo;list\u0026rsquo;\u0026gt; [''] The name patterns in comma-separated pattern, plugins whose name matches one of the pattern won\u0026rsquo;t be installed   plugin_http_http_params_length_threshold SW_PLUGIN_HTTP_HTTP_PARAMS_LENGTH_THRESHOLD \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 1024 When COLLECT_HTTP_PARAMS is enabled, how many characters to keep and send to the OAP backend, use negative values to keep and send the complete parameters, NB. this config item is added for the sake of performance.   plugin_http_ignore_method SW_PLUGIN_HTTP_IGNORE_METHOD \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt;  Comma-delimited list of http methods to ignore (GET, POST, HEAD, OPTIONS, etc\u0026hellip;)   plugin_sql_parameters_max_length SW_PLUGIN_SQL_PARAMETERS_MAX_LENGTH \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 0 The maximum length of the collected parameter, parameters longer than the specified length will be truncated, length 0 turns off parameter tracing   plugin_pymongo_trace_parameters SW_PLUGIN_PYMONGO_TRACE_PARAMETERS \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False Indicates whether to collect the filters of pymongo   plugin_pymongo_parameters_max_length SW_PLUGIN_PYMONGO_PARAMETERS_MAX_LENGTH \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 512 The maximum length of the collected filters, filters longer than the specified length will be truncated   plugin_elasticsearch_trace_dsl SW_PLUGIN_ELASTICSEARCH_TRACE_DSL \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False If true, trace all the DSL(Domain Specific Language) in ElasticSearch access, default is false   plugin_flask_collect_http_params SW_PLUGIN_FLASK_COLLECT_HTTP_PARAMS \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False This config item controls that whether the Flask plugin should collect the parameters of the request.   plugin_sanic_collect_http_params SW_PLUGIN_SANIC_COLLECT_HTTP_PARAMS \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False This config item controls that whether the Sanic plugin should collect the parameters of the request.   plugin_django_collect_http_params SW_PLUGIN_DJANGO_COLLECT_HTTP_PARAMS \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False This config item controls that whether the Django plugin should collect the parameters of the request.   plugin_fastapi_collect_http_params SW_PLUGIN_FASTAPI_COLLECT_HTTP_PARAMS \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False This config item controls that whether the FastAPI plugin should collect the parameters of the request.   plugin_bottle_collect_http_params SW_PLUGIN_BOTTLE_COLLECT_HTTP_PARAMS \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False This config item controls that whether the Bottle plugin should collect the parameters of the request.   plugin_celery_parameters_length SW_PLUGIN_CELERY_PARAMETERS_LENGTH \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 512 The maximum length of celery functions parameters, longer than this will be truncated, 0 turns off    ","excerpt":"Supported Agent Configuration Options Below is the full list of supported configurations you can set …","ref":"/docs/skywalking-python/v1.0.1/en/setup/configuration/","title":"Supported Agent Configuration Options"},{"body":"Supported Libraries This document is automatically generated from the SkyWalking Python testing matrix.\nThe column of versions only indicates the set of library versions tested in a best-effort manner.\nIf you find newer major versions that are missing from the following table, and it\u0026rsquo;s not documented as a limitation, please PR to update the test matrix in the plugin.\nVersions marked as NOT SUPPORTED may be due to an incompatible version with Python in the original library or a limitation of SkyWalking auto-instrumentation (welcome to contribute!)\nPlugin Support Table    Library Python Version - Lib Version Plugin Name     aiohttp Python \u0026gt;=3.7 - [\u0026lsquo;3.7.*']; sw_aiohttp   aioredis Python \u0026gt;=3.7 - [\u0026lsquo;2.0.*']; sw_aioredis   aiormq Python \u0026gt;=3.7 - [\u0026lsquo;6.3\u0026rsquo;, \u0026lsquo;6.4\u0026rsquo;]; sw_aiormq   amqp Python \u0026gt;=3.7 - [\u0026lsquo;2.6.1\u0026rsquo;]; sw_amqp   asyncpg Python \u0026gt;=3.7 - [\u0026lsquo;0.25.0\u0026rsquo;]; sw_asyncpg   bottle Python \u0026gt;=3.7 - [\u0026lsquo;0.12.23\u0026rsquo;]; sw_bottle   celery Python \u0026gt;=3.7 - [\u0026lsquo;5.1\u0026rsquo;]; sw_celery   confluent_kafka Python \u0026gt;=3.7 - [\u0026lsquo;1.5.0\u0026rsquo;, \u0026lsquo;1.7.0\u0026rsquo;, \u0026lsquo;1.8.2\u0026rsquo;]; sw_confluent_kafka   django Python \u0026gt;=3.7 - [\u0026lsquo;3.2\u0026rsquo;]; sw_django   elasticsearch Python \u0026gt;=3.7 - [\u0026lsquo;7.13\u0026rsquo;, \u0026lsquo;7.14\u0026rsquo;, \u0026lsquo;7.15\u0026rsquo;]; sw_elasticsearch   hug Python \u0026gt;=3.11 - NOT SUPPORTED YET; Python \u0026gt;=3.10 - [\u0026lsquo;2.5\u0026rsquo;, \u0026lsquo;2.6\u0026rsquo;]; Python \u0026gt;=3.7 - [\u0026lsquo;2.4.1\u0026rsquo;, \u0026lsquo;2.5\u0026rsquo;, \u0026lsquo;2.6\u0026rsquo;]; sw_falcon   fastapi Python \u0026gt;=3.7 - [\u0026lsquo;0.89.\u0026rsquo;, \u0026lsquo;0.88.']; sw_fastapi   flask Python \u0026gt;=3.7 - [\u0026lsquo;2.0\u0026rsquo;]; sw_flask   happybase Python \u0026gt;=3.7 - [\u0026lsquo;1.2.0\u0026rsquo;]; sw_happybase   http_server Python \u0026gt;=3.7 - ['*']; sw_http_server   werkzeug Python \u0026gt;=3.7 - [\u0026lsquo;1.0.1\u0026rsquo;, \u0026lsquo;2.0\u0026rsquo;]; sw_http_server   httpx Python \u0026gt;=3.7 - [\u0026lsquo;0.23.\u0026rsquo;, \u0026lsquo;0.22.']; sw_httpx   kafka-python Python \u0026gt;=3.7 - [\u0026lsquo;2.0\u0026rsquo;]; sw_kafka   loguru Python \u0026gt;=3.7 - [\u0026lsquo;0.6.0\u0026rsquo;, \u0026lsquo;0.7.0\u0026rsquo;]; sw_loguru   mysqlclient Python \u0026gt;=3.7 - [\u0026lsquo;2.1.*']; sw_mysqlclient   psycopg[binary] Python \u0026gt;=3.11 - [\u0026lsquo;3.1.']; Python \u0026gt;=3.7 - [\u0026lsquo;3.0.18\u0026rsquo;, \u0026lsquo;3.1.']; sw_psycopg   psycopg2-binary Python \u0026gt;=3.10 - NOT SUPPORTED YET; Python \u0026gt;=3.7 - [\u0026lsquo;2.9\u0026rsquo;]; sw_psycopg2   pymongo Python \u0026gt;=3.7 - [\u0026lsquo;3.11.*']; sw_pymongo   pymysql Python \u0026gt;=3.7 - [\u0026lsquo;1.0\u0026rsquo;]; sw_pymysql   pyramid Python \u0026gt;=3.7 - [\u0026lsquo;1.10\u0026rsquo;, \u0026lsquo;2.0\u0026rsquo;]; sw_pyramid   pika Python \u0026gt;=3.7 - [\u0026lsquo;1.2\u0026rsquo;]; sw_rabbitmq   redis Python \u0026gt;=3.7 - [\u0026lsquo;3.5.*\u0026rsquo;, \u0026lsquo;4.5.1\u0026rsquo;]; sw_redis   requests Python \u0026gt;=3.7 - [\u0026lsquo;2.26\u0026rsquo;, \u0026lsquo;2.25\u0026rsquo;]; sw_requests   sanic Python \u0026gt;=3.10 - NOT SUPPORTED YET; Python \u0026gt;=3.7 - [\u0026lsquo;20.12\u0026rsquo;]; sw_sanic   tornado Python \u0026gt;=3.7 - [\u0026lsquo;6.0\u0026rsquo;, \u0026lsquo;6.1\u0026rsquo;]; sw_tornado   urllib3 Python \u0026gt;=3.7 - [\u0026lsquo;1.26\u0026rsquo;, \u0026lsquo;1.25\u0026rsquo;]; sw_urllib3   urllib_request Python \u0026gt;=3.7 - ['*']; sw_urllib_request   websockets Python \u0026gt;=3.7 - [\u0026lsquo;10.3\u0026rsquo;, \u0026lsquo;10.4\u0026rsquo;]; sw_websockets    Notes  The celery server running with \u0026ldquo;celery -A \u0026hellip;\u0026rdquo; should be run with the HTTP protocol as it uses multiprocessing by default which is not compatible with the gRPC protocol implementation in SkyWalking currently. Celery clients can use whatever protocol they want. While Falcon is instrumented, only Hug is tested. Hug is believed to be abandoned project, use this plugin with a bit more caution. Instead of Hug, plugin test should move to test actual Falcon. The websocket instrumentation only traces client side connection handshake, the actual message exchange (send/recv) is not traced since injecting headers to socket message body is the only way to propagate the trace context, which requires customization of message structure and extreme care. (Feel free to add this feature by instrumenting the send/recv methods commented out in the code by either injecting sw8 headers or propagate the trace context in a separate message)  ","excerpt":"Supported Libraries This document is automatically generated from the SkyWalking Python testing …","ref":"/docs/skywalking-python/latest/en/setup/plugins/","title":"Supported Libraries"},{"body":"Supported Libraries This document is automatically generated from the SkyWalking Python testing matrix.\nThe column of versions only indicates the set of library versions tested in a best-effort manner.\nIf you find newer major versions that are missing from the following table, and it\u0026rsquo;s not documented as a limitation, please PR to update the test matrix in the plugin.\nVersions marked as NOT SUPPORTED may be due to an incompatible version with Python in the original library or a limitation of SkyWalking auto-instrumentation (welcome to contribute!)\nPlugin Support Table    Library Python Version - Lib Version Plugin Name     aiohttp Python \u0026gt;=3.7 - [\u0026lsquo;3.7.*']; sw_aiohttp   aioredis Python \u0026gt;=3.7 - [\u0026lsquo;2.0.*']; sw_aioredis   aiormq Python \u0026gt;=3.7 - [\u0026lsquo;6.3\u0026rsquo;, \u0026lsquo;6.4\u0026rsquo;]; sw_aiormq   amqp Python \u0026gt;=3.7 - [\u0026lsquo;2.6.1\u0026rsquo;]; sw_amqp   asyncpg Python \u0026gt;=3.7 - [\u0026lsquo;0.25.0\u0026rsquo;]; sw_asyncpg   bottle Python \u0026gt;=3.7 - [\u0026lsquo;0.12.23\u0026rsquo;]; sw_bottle   celery Python \u0026gt;=3.7 - [\u0026lsquo;5.1\u0026rsquo;]; sw_celery   confluent_kafka Python \u0026gt;=3.7 - [\u0026lsquo;1.5.0\u0026rsquo;, \u0026lsquo;1.7.0\u0026rsquo;, \u0026lsquo;1.8.2\u0026rsquo;]; sw_confluent_kafka   django Python \u0026gt;=3.7 - [\u0026lsquo;3.2\u0026rsquo;]; sw_django   elasticsearch Python \u0026gt;=3.7 - [\u0026lsquo;7.13\u0026rsquo;, \u0026lsquo;7.14\u0026rsquo;, \u0026lsquo;7.15\u0026rsquo;]; sw_elasticsearch   hug Python \u0026gt;=3.11 - NOT SUPPORTED YET; Python \u0026gt;=3.10 - [\u0026lsquo;2.5\u0026rsquo;, \u0026lsquo;2.6\u0026rsquo;]; Python \u0026gt;=3.7 - [\u0026lsquo;2.4.1\u0026rsquo;, \u0026lsquo;2.5\u0026rsquo;, \u0026lsquo;2.6\u0026rsquo;]; sw_falcon   fastapi Python \u0026gt;=3.7 - [\u0026lsquo;0.89.\u0026rsquo;, \u0026lsquo;0.88.']; sw_fastapi   flask Python \u0026gt;=3.7 - [\u0026lsquo;2.0\u0026rsquo;]; sw_flask   happybase Python \u0026gt;=3.7 - [\u0026lsquo;1.2.0\u0026rsquo;]; sw_happybase   http_server Python \u0026gt;=3.7 - ['*']; sw_http_server   werkzeug Python \u0026gt;=3.7 - [\u0026lsquo;1.0.1\u0026rsquo;, \u0026lsquo;2.0\u0026rsquo;]; sw_http_server   httpx Python \u0026gt;=3.7 - [\u0026lsquo;0.23.\u0026rsquo;, \u0026lsquo;0.22.']; sw_httpx   kafka-python Python \u0026gt;=3.7 - [\u0026lsquo;2.0\u0026rsquo;]; sw_kafka   loguru Python \u0026gt;=3.7 - [\u0026lsquo;0.6.0\u0026rsquo;, \u0026lsquo;0.7.0\u0026rsquo;]; sw_loguru   mysqlclient Python \u0026gt;=3.7 - [\u0026lsquo;2.1.*']; sw_mysqlclient   neo4j Python \u0026gt;=3.7 - [\u0026lsquo;5.*']; sw_neo4j   psycopg[binary] Python \u0026gt;=3.11 - [\u0026lsquo;3.1.']; Python \u0026gt;=3.7 - [\u0026lsquo;3.0.18\u0026rsquo;, \u0026lsquo;3.1.']; sw_psycopg   psycopg2-binary Python \u0026gt;=3.10 - NOT SUPPORTED YET; Python \u0026gt;=3.7 - [\u0026lsquo;2.9\u0026rsquo;]; sw_psycopg2   pymongo Python \u0026gt;=3.7 - [\u0026lsquo;3.11.*']; sw_pymongo   pymysql Python \u0026gt;=3.7 - [\u0026lsquo;1.0\u0026rsquo;]; sw_pymysql   pyramid Python \u0026gt;=3.7 - [\u0026lsquo;1.10\u0026rsquo;, \u0026lsquo;2.0\u0026rsquo;]; sw_pyramid   pika Python \u0026gt;=3.7 - [\u0026lsquo;1.2\u0026rsquo;]; sw_rabbitmq   redis Python \u0026gt;=3.7 - [\u0026lsquo;3.5.*\u0026rsquo;, \u0026lsquo;4.5.1\u0026rsquo;]; sw_redis   requests Python \u0026gt;=3.7 - [\u0026lsquo;2.26\u0026rsquo;, \u0026lsquo;2.25\u0026rsquo;]; sw_requests   sanic Python \u0026gt;=3.10 - NOT SUPPORTED YET; Python \u0026gt;=3.7 - [\u0026lsquo;20.12\u0026rsquo;]; sw_sanic   tornado Python \u0026gt;=3.7 - [\u0026lsquo;6.0\u0026rsquo;, \u0026lsquo;6.1\u0026rsquo;]; sw_tornado   urllib3 Python \u0026gt;=3.7 - [\u0026lsquo;1.26\u0026rsquo;, \u0026lsquo;1.25\u0026rsquo;]; sw_urllib3   urllib_request Python \u0026gt;=3.7 - ['*']; sw_urllib_request   websockets Python \u0026gt;=3.7 - [\u0026lsquo;10.3\u0026rsquo;, \u0026lsquo;10.4\u0026rsquo;]; sw_websockets    Notes  The celery server running with \u0026ldquo;celery -A \u0026hellip;\u0026rdquo; should be run with the HTTP protocol as it uses multiprocessing by default which is not compatible with the gRPC protocol implementation in SkyWalking currently. Celery clients can use whatever protocol they want. While Falcon is instrumented, only Hug is tested. Hug is believed to be abandoned project, use this plugin with a bit more caution. Instead of Hug, plugin test should move to test actual Falcon. The Neo4j plugin integrates neo4j python driver 5.x.x versions which support both Neo4j 5 and 4.4 DBMS. The websocket instrumentation only traces client side connection handshake, the actual message exchange (send/recv) is not traced since injecting headers to socket message body is the only way to propagate the trace context, which requires customization of message structure and extreme care. (Feel free to add this feature by instrumenting the send/recv methods commented out in the code by either injecting sw8 headers or propagate the trace context in a separate message)  ","excerpt":"Supported Libraries This document is automatically generated from the SkyWalking Python testing …","ref":"/docs/skywalking-python/next/en/setup/plugins/","title":"Supported Libraries"},{"body":"Supported Libraries This document is automatically generated from the SkyWalking Python testing matrix.\nThe column of versions only indicates the set of library versions tested in a best-effort manner.\nIf you find newer major versions that are missing from the following table, and it\u0026rsquo;s not documented as a limitation, please PR to update the test matrix in the plugin.\nVersions marked as NOT SUPPORTED may be due to an incompatible version with Python in the original library or a limitation of SkyWalking auto-instrumentation (welcome to contribute!)\nPlugin Support Table    Library Python Version - Lib Version Plugin Name     aiohttp Python \u0026gt;=3.7 - [\u0026lsquo;3.7.*']; sw_aiohttp   aioredis Python \u0026gt;=3.7 - [\u0026lsquo;2.0.*']; sw_aioredis   aiormq Python \u0026gt;=3.7 - [\u0026lsquo;6.3\u0026rsquo;, \u0026lsquo;6.4\u0026rsquo;]; sw_aiormq   amqp Python \u0026gt;=3.7 - [\u0026lsquo;2.6.1\u0026rsquo;]; sw_amqp   asyncpg Python \u0026gt;=3.7 - [\u0026lsquo;0.25.0\u0026rsquo;]; sw_asyncpg   bottle Python \u0026gt;=3.7 - [\u0026lsquo;0.12.23\u0026rsquo;]; sw_bottle   celery Python \u0026gt;=3.7 - [\u0026lsquo;5.1\u0026rsquo;]; sw_celery   confluent_kafka Python \u0026gt;=3.7 - [\u0026lsquo;1.5.0\u0026rsquo;, \u0026lsquo;1.7.0\u0026rsquo;, \u0026lsquo;1.8.2\u0026rsquo;]; sw_confluent_kafka   django Python \u0026gt;=3.7 - [\u0026lsquo;3.2\u0026rsquo;]; sw_django   elasticsearch Python \u0026gt;=3.7 - [\u0026lsquo;7.13\u0026rsquo;, \u0026lsquo;7.14\u0026rsquo;, \u0026lsquo;7.15\u0026rsquo;]; sw_elasticsearch   hug Python \u0026gt;=3.11 - NOT SUPPORTED YET; Python \u0026gt;=3.10 - [\u0026lsquo;2.5\u0026rsquo;, \u0026lsquo;2.6\u0026rsquo;]; Python \u0026gt;=3.7 - [\u0026lsquo;2.4.1\u0026rsquo;, \u0026lsquo;2.5\u0026rsquo;, \u0026lsquo;2.6\u0026rsquo;]; sw_falcon   fastapi Python \u0026gt;=3.7 - [\u0026lsquo;0.89.\u0026rsquo;, \u0026lsquo;0.88.']; sw_fastapi   flask Python \u0026gt;=3.7 - [\u0026lsquo;2.0\u0026rsquo;]; sw_flask   happybase Python \u0026gt;=3.7 - [\u0026lsquo;1.2.0\u0026rsquo;]; sw_happybase   http_server Python \u0026gt;=3.7 - ['*']; sw_http_server   werkzeug Python \u0026gt;=3.7 - [\u0026lsquo;1.0.1\u0026rsquo;, \u0026lsquo;2.0\u0026rsquo;]; sw_http_server   httpx Python \u0026gt;=3.7 - [\u0026lsquo;0.23.\u0026rsquo;, \u0026lsquo;0.22.']; sw_httpx   kafka-python Python \u0026gt;=3.7 - [\u0026lsquo;2.0\u0026rsquo;]; sw_kafka   loguru Python \u0026gt;=3.7 - [\u0026lsquo;0.6.0\u0026rsquo;, \u0026lsquo;0.7.0\u0026rsquo;]; sw_loguru   mysqlclient Python \u0026gt;=3.7 - [\u0026lsquo;2.1.*']; sw_mysqlclient   psycopg[binary] Python \u0026gt;=3.11 - [\u0026lsquo;3.1.']; Python \u0026gt;=3.7 - [\u0026lsquo;3.0.18\u0026rsquo;, \u0026lsquo;3.1.']; sw_psycopg   psycopg2-binary Python \u0026gt;=3.10 - NOT SUPPORTED YET; Python \u0026gt;=3.7 - [\u0026lsquo;2.9\u0026rsquo;]; sw_psycopg2   pymongo Python \u0026gt;=3.7 - [\u0026lsquo;3.11.*']; sw_pymongo   pymysql Python \u0026gt;=3.7 - [\u0026lsquo;1.0\u0026rsquo;]; sw_pymysql   pyramid Python \u0026gt;=3.7 - [\u0026lsquo;1.10\u0026rsquo;, \u0026lsquo;2.0\u0026rsquo;]; sw_pyramid   pika Python \u0026gt;=3.7 - [\u0026lsquo;1.2\u0026rsquo;]; sw_rabbitmq   redis Python \u0026gt;=3.7 - [\u0026lsquo;3.5.*\u0026rsquo;, \u0026lsquo;4.5.1\u0026rsquo;]; sw_redis   requests Python \u0026gt;=3.7 - [\u0026lsquo;2.26\u0026rsquo;, \u0026lsquo;2.25\u0026rsquo;]; sw_requests   sanic Python \u0026gt;=3.10 - NOT SUPPORTED YET; Python \u0026gt;=3.7 - [\u0026lsquo;20.12\u0026rsquo;]; sw_sanic   tornado Python \u0026gt;=3.7 - [\u0026lsquo;6.0\u0026rsquo;, \u0026lsquo;6.1\u0026rsquo;]; sw_tornado   urllib3 Python \u0026gt;=3.7 - [\u0026lsquo;1.26\u0026rsquo;, \u0026lsquo;1.25\u0026rsquo;]; sw_urllib3   urllib_request Python \u0026gt;=3.7 - ['*']; sw_urllib_request   websockets Python \u0026gt;=3.7 - [\u0026lsquo;10.3\u0026rsquo;, \u0026lsquo;10.4\u0026rsquo;]; sw_websockets    Notes  The celery server running with \u0026ldquo;celery -A \u0026hellip;\u0026rdquo; should be run with the HTTP protocol as it uses multiprocessing by default which is not compatible with the gRPC protocol implementation in SkyWalking currently. Celery clients can use whatever protocol they want. While Falcon is instrumented, only Hug is tested. Hug is believed to be abandoned project, use this plugin with a bit more caution. Instead of Hug, plugin test should move to test actual Falcon. The websocket instrumentation only traces client side connection handshake, the actual message exchange (send/recv) is not traced since injecting headers to socket message body is the only way to propagate the trace context, which requires customization of message structure and extreme care. (Feel free to add this feature by instrumenting the send/recv methods commented out in the code by either injecting sw8 headers or propagate the trace context in a separate message)  ","excerpt":"Supported Libraries This document is automatically generated from the SkyWalking Python testing …","ref":"/docs/skywalking-python/v1.0.1/en/setup/plugins/","title":"Supported Libraries"},{"body":"Supported SAPI, extension and library The following plugins provide the distributed tracing capability.\nSupported SAPI  PHP-FPM CLI under Swoole  Supported PHP extension  cURL PDO MySQL Improved Memcached phpredis MongoDB Memcache  Supported PHP library  predis php-amqplib for Message Queuing Producer  ","excerpt":"Supported SAPI, extension and library The following plugins provide the distributed tracing …","ref":"/docs/skywalking-php/latest/en/setup/service-agent/php-agent/supported-list/","title":"Supported SAPI, extension and library"},{"body":"Supported SAPI, extension and library The following plugins provide the distributed tracing capability.\nSupported SAPI  PHP-FPM CLI under Swoole  Supported PHP extension  cURL PDO MySQL Improved Memcached phpredis MongoDB Memcache  Supported PHP library  predis php-amqplib for Message Queuing Producer  ","excerpt":"Supported SAPI, extension and library The following plugins provide the distributed tracing …","ref":"/docs/skywalking-php/next/en/setup/service-agent/php-agent/supported-list/","title":"Supported SAPI, extension and library"},{"body":"Supported SAPI, extension and library The following plugins provide the distributed tracing capability.\nSupported SAPI  PHP-FPM CLI under Swoole  Supported PHP extension  cURL PDO MySQL Improved Memcached phpredis MongoDB Memcache  Supported PHP library  predis php-amqplib for Message Queuing Producer  ","excerpt":"Supported SAPI, extension and library The following plugins provide the distributed tracing …","ref":"/docs/skywalking-php/v0.7.0/en/setup/service-agent/php-agent/supported-list/","title":"Supported SAPI, extension and library"},{"body":"SWIP - SkyWalking Improvement Proposal SWIP - SkyWalking Improvement Proposal, is an official document to propose a new feature and/or feature improvement, which are relative to end users and developers.\nSkyWalking has been very stable since v9.x. We are getting over the rapid changing stage. The core concepts, protocols for reporting telemetry and query, 3rd party integration, and the streaming process kernel are very stable. From now(2024) on, SkyWalking community would focus more on improvement and controllable improvement. All major changes should be evaluated more seriously, and try as good as possible to avoid incompatible breaking changes.\nWhat is considered a major change? The catalogs of a major change are listed as follows\n New Feature. A feature doesn\u0026rsquo;t exist for the latest version. Any change of the network Interfaces, especially for Query Protocol, Data Collect Protocols, Dynamic Configuration APIs, Exporting APIs, AI pipeline APIs. Any change of storage structure.  Q: Is Agent side feature or change considered a SWIP?\nA: Right now, SWIP targets OAP and UI side changes. All agent side changes are pending on the reviews from the committers of those agents.\nSWIP Template The purpose of this template should not be considered a hard requirement. The major purpose of SWIP is helping the PMC and community member to understand the proposal better.\n# Title: SWIP-1234 xxxx  ## Motivation The description of new feature or improvement. ## Architecture Graph Describe the relationship between your new proposal part and existing components. ## Proposed Changes State your proposal in detail. ## Imported Dependencies libs and their licenses.  ## Compatibility Whether breaking configuration, storage structure, or protocols. ## General usage docs This doesn\u0026#39;t have to be a final version, but helps the reviewers to understand how to use this new feature. SWIP Process Here is the process for starting a SWIP.\n Start a SWIP discussion at GitHub Discussion Page with title [DISCUSS] xxxx. Fill in the sections as described above in SWIP Template. At least one SkyWalking committer commented on the discussion to show interest in adopting it. This committer could update this page to grant a SWIP ID, and update the title to [SWIP-ID NO.] [DISCUSS] xxxx. All further discussion could happen on the discussion page. Once the consensus is made by enough committer supporters, and/or through a mail list vote, this SWIP should be added here as SWIP-ID NO.md and listed in the below as Known SWIPs.  All accepted and proposed SWIPs can be found in here.\nKnown SWIPs Next SWIP Number: 8\nAccepted SWIPs  SWIP-8 Support ActiveMQ Monitoring SWIP-5 Support ClickHouse Monitoring SWIP-4 Support available layers of service in the topology SWIP-3 Support RocketMQ Monitoring SWIP-2 Collecting and Gathering Kubernetes Monitoring Data SWIP-1 Create and detect Service Hierarchy Relationship  ","excerpt":"SWIP - SkyWalking Improvement Proposal SWIP - SkyWalking Improvement Proposal, is an official …","ref":"/docs/main/next/en/swip/readme/","title":"SWIP - SkyWalking Improvement Proposal"},{"body":"Table of Agent Configuration Properties This is the properties list supported in agent/config/agent.config.\n   property key Description System Environment Variable Default     agent.service_name The service name to represent a logic group providing the same capabilities/logic. Suggestion: set a unique name for every logic service group, service instance nodes share the same code, Max length is 50(UTF-8 char). Optional, once service_name follows \u0026lt;group name\u0026gt;::\u0026lt;logic name\u0026gt; format, OAP server assigns the group name to the service metadata. SW_AGENT_NAME Your_ApplicationName   agent.namespace Namespace represents a subnet, such as kubernetes namespace, or 172.10.. SW_AGENT_NAMESPACE Not set   agent.cluster Cluster defines the physical cluster in a data center or same network segment. SW_AGENT_CLUSTER Not set   agent.sample_n_per_3_secs Negative or zero means off, by default.SAMPLE_N_PER_3_SECS means sampling N TraceSegment in 3 seconds tops. SW_AGENT_SAMPLE Not set   agent.authentication Authentication active is based on backend setting, see application.yml for more details.For most scenarios, this needs backend extensions, only basic match auth provided in default implementation. SW_AGENT_AUTHENTICATION Not set   agent.trace_segment_ref_limit_per_span The max number of TraceSegmentRef in a single span to keep memory cost estimatable. SW_TRACE_SEGMENT_LIMIT 500   agent.span_limit_per_segment The max number of spans in a single segment. Through this config item, SkyWalking keep your application memory cost estimated. SW_AGENT_SPAN_LIMIT 300   agent.ignore_suffix If the operation name of the first span is included in this set, this segment should be ignored. SW_AGENT_IGNORE_SUFFIX Not set   agent.is_open_debugging_class If true, skywalking agent will save all instrumented classes files in /debugging folder. SkyWalking team may ask for these files in order to resolve compatible problem. SW_AGENT_OPEN_DEBUG Not set   agent.instance_name Instance name is the identity of an instance, should be unique in the service. If empty, SkyWalking agent will generate an 32-bit uuid. Default, use UUID@hostname as the instance name. Max length is 50(UTF-8 char) SW_AGENT_INSTANCE_NAME \u0026quot;\u0026quot;   agent.instance_properties_json={\u0026quot;key\u0026quot;:\u0026quot;value\u0026quot;} Add service instance custom properties in json format. SW_INSTANCE_PROPERTIES_JSON Not set   agent.cause_exception_depth How depth the agent goes, when log all cause exceptions. SW_AGENT_CAUSE_EXCEPTION_DEPTH 5   agent.force_reconnection_period  Force reconnection period of grpc, based on grpc_channel_check_interval. SW_AGENT_FORCE_RECONNECTION_PERIOD 1   agent.operation_name_threshold  The operationName max length, setting this value \u0026gt; 190 is not recommended. SW_AGENT_OPERATION_NAME_THRESHOLD 150   agent.keep_tracing Keep tracing even the backend is not available if this value is true. SW_AGENT_KEEP_TRACING false   agent.force_tls Force open TLS for gRPC channel if this value is true. SW_AGENT_FORCE_TLS false   agent.ssl_trusted_ca_path gRPC SSL trusted ca file. SW_AGENT_SSL_TRUSTED_CA_PATH /ca/ca.crt   agent.ssl_key_path The private key file. Enable mTLS when ssl_key_path and ssl_cert_chain_path exist. SW_AGENT_SSL_KEY_PATH \u0026quot;\u0026quot;   agent.ssl_cert_chain_path The certificate file. Enable mTLS when ssl_key_path and ssl_cert_chain_path exist. SW_AGENT_SSL_CERT_CHAIN_PATH \u0026quot;\u0026quot;   agent.enable Enable the agent kernel services and instrumentation. SW_AGENT_ENABLE true   osinfo.ipv4_list_size Limit the length of the ipv4 list size. SW_AGENT_OSINFO_IPV4_LIST_SIZE 10   collector.grpc_channel_check_interval grpc channel status check interval. SW_AGENT_COLLECTOR_GRPC_CHANNEL_CHECK_INTERVAL 30   collector.heartbeat_period agent heartbeat report period. Unit, second. SW_AGENT_COLLECTOR_HEARTBEAT_PERIOD 30   collector.properties_report_period_factor The agent sends the instance properties to the backend every collector.heartbeat_period * collector.properties_report_period_factor seconds SW_AGENT_COLLECTOR_PROPERTIES_REPORT_PERIOD_FACTOR 10   collector.backend_service Collector SkyWalking trace receiver service addresses. SW_AGENT_COLLECTOR_BACKEND_SERVICES 127.0.0.1:11800   collector.grpc_upstream_timeout How long grpc client will timeout in sending data to upstream. Unit is second. SW_AGENT_COLLECTOR_GRPC_UPSTREAM_TIMEOUT 30 seconds   collector.get_profile_task_interval Sniffer get profile task list interval. SW_AGENT_COLLECTOR_GET_PROFILE_TASK_INTERVAL 20   collector.get_agent_dynamic_config_interval Sniffer get agent dynamic config interval SW_AGENT_COLLECTOR_GET_AGENT_DYNAMIC_CONFIG_INTERVAL 20   collector.is_resolve_dns_periodically If true, skywalking agent will enable periodically resolving DNS to update receiver service addresses. SW_AGENT_COLLECTOR_IS_RESOLVE_DNS_PERIODICALLY false   logging.level Log level: TRACE, DEBUG, INFO, WARN, ERROR, OFF. Default is info. SW_LOGGING_LEVEL INFO   logging.file_name Log file name. SW_LOGGING_FILE_NAME skywalking-api.log   logging.output Log output. Default is FILE. Use CONSOLE means output to stdout. SW_LOGGING_OUTPUT FILE   logging.dir Log files directory. Default is blank string, means, use \u0026ldquo;{theSkywalkingAgentJarDir}/logs \u0026quot; to output logs. {theSkywalkingAgentJarDir} is the directory where the skywalking agent jar file is located SW_LOGGING_DIR \u0026quot;\u0026quot;   logging.resolver Logger resolver: PATTERN or JSON. The default is PATTERN, which uses logging.pattern to print traditional text logs. JSON resolver prints logs in JSON format. SW_LOGGING_RESOLVER PATTERN   logging.pattern  Logging format. There are all conversion specifiers: * %level means log level. * %timestamp means now of time with format yyyy-MM-dd HH:mm:ss:SSS.\n* %thread means name of current thread.\n* %msg means some message which user logged. * %class means SimpleName of TargetClass. * %throwable means a throwable which user called. * %agent_name means agent.service_name. Only apply to the PatternLogger. SW_LOGGING_PATTERN %level %timestamp %thread %class : %msg %throwable   logging.max_file_size The max size of log file. If the size is bigger than this, archive the current file, and write into a new file. SW_LOGGING_MAX_FILE_SIZE 300 * 1024 * 1024   logging.max_history_files The max history log files. When rollover happened, if log files exceed this number,then the oldest file will be delete. Negative or zero means off, by default. SW_LOGGING_MAX_HISTORY_FILES -1   statuscheck.ignored_exceptions Listed exceptions would not be treated as an error. Because in some codes, the exception is being used as a way of controlling business flow. SW_STATUSCHECK_IGNORED_EXCEPTIONS \u0026quot;\u0026quot;   statuscheck.max_recursive_depth The max recursive depth when checking the exception traced by the agent. Typically, we don\u0026rsquo;t recommend setting this more than 10, which could cause a performance issue. Negative value and 0 would be ignored, which means all exceptions would make the span tagged in error status. SW_STATUSCHECK_MAX_RECURSIVE_DEPTH 1   correlation.element_max_number Max element count in the correlation context. SW_CORRELATION_ELEMENT_MAX_NUMBER 3   correlation.value_max_length Max value length of each element. SW_CORRELATION_VALUE_MAX_LENGTH 128   correlation.auto_tag_keys Tag the span by the key/value in the correlation context, when the keys listed here exist. SW_CORRELATION_AUTO_TAG_KEYS \u0026quot;\u0026quot;   jvm.buffer_size The buffer size of collected JVM info. SW_JVM_BUFFER_SIZE 60 * 10   jvm.metrics_collect_period The period in seconds of JVM metrics collection. Unit is second. SW_JVM_METRICS_COLLECT_PERIOD 1   buffer.channel_size The buffer channel size. SW_BUFFER_CHANNEL_SIZE 5   buffer.buffer_size The buffer size. SW_BUFFER_BUFFER_SIZE 300   profile.active If true, skywalking agent will enable profile when user create a new profile task. Otherwise disable profile. SW_AGENT_PROFILE_ACTIVE true   profile.max_parallel Parallel monitor segment count SW_AGENT_PROFILE_MAX_PARALLEL 5   profile.max_accept_sub_parallel Max monitoring sub-tasks count of one single endpoint access SW_AGENT_PROFILE_MAX_ACCEPT_SUB_PARALLEL 5   profile.duration Max monitor segment time(minutes), if current segment monitor time out of limit, then stop it. SW_AGENT_PROFILE_DURATION 10   profile.dump_max_stack_depth Max dump thread stack depth SW_AGENT_PROFILE_DUMP_MAX_STACK_DEPTH 500   profile.snapshot_transport_buffer_size Snapshot transport to backend buffer size SW_AGENT_PROFILE_SNAPSHOT_TRANSPORT_BUFFER_SIZE 4500   meter.active If true, the agent collects and reports metrics to the backend. SW_METER_ACTIVE true   meter.report_interval Report meters interval. The unit is second SW_METER_REPORT_INTERVAL 20   meter.max_meter_size Max size of the meter pool SW_METER_MAX_METER_SIZE 500   log.max_message_size The max size of message to send to server.Default is 10 MB. SW_GRPC_LOG_MAX_MESSAGE_SIZE 10485760   plugin.mount Mount the specific folders of the plugins. Plugins in mounted folders would work. SW_MOUNT_FOLDERS plugins,activations   plugin.peer_max_length  Peer maximum description limit. SW_PLUGIN_PEER_MAX_LENGTH 200   plugin.exclude_plugins  Exclude some plugins define in plugins dir,Multiple plugins are separated by comma.Plugin names is defined in Agent plugin list SW_EXCLUDE_PLUGINS \u0026quot;\u0026quot;   plugin.mongodb.trace_param If true, trace all the parameters in MongoDB access, default is false. Only trace the operation, not include parameters. SW_PLUGIN_MONGODB_TRACE_PARAM false   plugin.mongodb.filter_length_limit If set to positive number, the WriteRequest.params would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_MONGODB_FILTER_LENGTH_LIMIT 256   plugin.elasticsearch.trace_dsl If true, trace all the DSL(Domain Specific Language) in ElasticSearch access, default is false. SW_PLUGIN_ELASTICSEARCH_TRACE_DSL false   plugin.springmvc.use_qualified_name_as_endpoint_name If true, the fully qualified method name will be used as the endpoint name instead of the request URL, default is false. SW_PLUGIN_SPRINGMVC_USE_QUALIFIED_NAME_AS_ENDPOINT_NAME false   plugin.toolkit.use_qualified_name_as_operation_name If true, the fully qualified method name will be used as the operation name instead of the given operation name, default is false. SW_PLUGIN_TOOLKIT_USE_QUALIFIED_NAME_AS_OPERATION_NAME false   plugin.jdbc.trace_sql_parameters If set to true, the parameters of the sql (typically java.sql.PreparedStatement) would be collected. SW_JDBC_TRACE_SQL_PARAMETERS false   plugin.jdbc.sql_parameters_max_length If set to positive number, the db.sql.parameters would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_JDBC_SQL_PARAMETERS_MAX_LENGTH 512   plugin.jdbc.sql_body_max_length If set to positive number, the db.statement would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_JDBC_SQL_BODY_MAX_LENGTH 2048   plugin.solrj.trace_statement If true, trace all the query parameters(include deleteByIds and deleteByQuery) in Solr query request, default is false. SW_PLUGIN_SOLRJ_TRACE_STATEMENT false   plugin.solrj.trace_ops_params If true, trace all the operation parameters in Solr request, default is false. SW_PLUGIN_SOLRJ_TRACE_OPS_PARAMS false   plugin.light4j.trace_handler_chain If true, trace all middleware/business handlers that are part of the Light4J handler chain for a request. SW_PLUGIN_LIGHT4J_TRACE_HANDLER_CHAIN false   plugin.springtransaction.simplify_transaction_definition_name If true, the transaction definition name will be simplified. SW_PLUGIN_SPRINGTRANSACTION_SIMPLIFY_TRANSACTION_DEFINITION_NAME false   plugin.jdkthreading.threading_class_prefixes Threading classes (java.lang.Runnable and java.util.concurrent.Callable) and their subclasses, including anonymous inner classes whose name match any one of the THREADING_CLASS_PREFIXES (splitted by ,) will be instrumented, make sure to only specify as narrow prefixes as what you\u0026rsquo;re expecting to instrument, (java. and javax. will be ignored due to safety issues) SW_PLUGIN_JDKTHREADING_THREADING_CLASS_PREFIXES Not set   plugin.tomcat.collect_http_params This config item controls that whether the Tomcat plugin should collect the parameters of the request. Also, activate implicitly in the profiled trace. SW_PLUGIN_TOMCAT_COLLECT_HTTP_PARAMS false   plugin.springmvc.collect_http_params This config item controls that whether the SpringMVC plugin should collect the parameters of the request, when your Spring application is based on Tomcat, consider only setting either plugin.tomcat.collect_http_params or plugin.springmvc.collect_http_params. Also, activate implicitly in the profiled trace. SW_PLUGIN_SPRINGMVC_COLLECT_HTTP_PARAMS false   plugin.httpclient.collect_http_params This config item controls that whether the HttpClient plugin should collect the parameters of the request SW_PLUGIN_HTTPCLIENT_COLLECT_HTTP_PARAMS false   plugin.http.http_params_length_threshold When COLLECT_HTTP_PARAMS is enabled, how many characters to keep and send to the OAP backend, use negative values to keep and send the complete parameters, NB. this config item is added for the sake of performance. SW_PLUGIN_HTTP_HTTP_PARAMS_LENGTH_THRESHOLD 1024   plugin.http.http_headers_length_threshold When include_http_headers declares header names, this threshold controls the length limitation of all header values. use negative values to keep and send the complete headers. Note. this config item is added for the sake of performance. SW_PLUGIN_HTTP_HTTP_HEADERS_LENGTH_THRESHOLD 2048   plugin.http.include_http_headers Set the header names, which should be collected by the plugin. Header name must follow javax.servlet.http definition. Multiple names should be split by comma. SW_PLUGIN_HTTP_INCLUDE_HTTP_HEADERS ``(No header would be collected) |   plugin.feign.collect_request_body This config item controls that whether the Feign plugin should collect the http body of the request. SW_PLUGIN_FEIGN_COLLECT_REQUEST_BODY false   plugin.feign.filter_length_limit When COLLECT_REQUEST_BODY is enabled, how many characters to keep and send to the OAP backend, use negative values to keep and send the complete body. SW_PLUGIN_FEIGN_FILTER_LENGTH_LIMIT 1024   plugin.feign.supported_content_types_prefix When COLLECT_REQUEST_BODY is enabled and content-type start with SUPPORTED_CONTENT_TYPES_PREFIX, collect the body of the request , multiple paths should be separated by , SW_PLUGIN_FEIGN_SUPPORTED_CONTENT_TYPES_PREFIX application/json,text/   plugin.influxdb.trace_influxql If true, trace all the influxql(query and write) in InfluxDB access, default is true. SW_PLUGIN_INFLUXDB_TRACE_INFLUXQL true   plugin.dubbo.collect_consumer_arguments Apache Dubbo consumer collect arguments in RPC call, use Object#toString to collect arguments. SW_PLUGIN_DUBBO_COLLECT_CONSUMER_ARGUMENTS false   plugin.dubbo.consumer_arguments_length_threshold When plugin.dubbo.collect_consumer_arguments is true, Arguments of length from the front will to the OAP backend SW_PLUGIN_DUBBO_CONSUMER_ARGUMENTS_LENGTH_THRESHOLD 256   plugin.dubbo.collect_provider_arguments Apache Dubbo provider collect arguments in RPC call, use Object#toString to collect arguments. SW_PLUGIN_DUBBO_COLLECT_PROVIDER_ARGUMENTS false   plugin.dubbo.provider_arguments_length_threshold When plugin.dubbo.collect_provider_arguments is true, Arguments of length from the front will to the OAP backend SW_PLUGIN_DUBBO_PROVIDER_ARGUMENTS_LENGTH_THRESHOLD 256   plugin.kafka.bootstrap_servers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_KAFKA_BOOTSTRAP_SERVERS localhost:9092   plugin.kafka.get_topic_timeout Timeout period of reading topics from the Kafka server, the unit is second. SW_GET_TOPIC_TIMEOUT 10   plugin.kafka.producer_config Kafka producer configuration. Read producer configure to get more details. Check Kafka report doc for more details and examples. SW_PLUGIN_KAFKA_PRODUCER_CONFIG    plugin.kafka.producer_config_json Configure Kafka Producer configuration in JSON format. Notice it will be overridden by plugin.kafka.producer_config[key], if the key duplication. SW_PLUGIN_KAFKA_PRODUCER_CONFIG_JSON    plugin.kafka.topic_meter Specify which Kafka topic name for Meter System data to report to. SW_PLUGIN_KAFKA_TOPIC_METER skywalking-meters   plugin.kafka.topic_metrics Specify which Kafka topic name for JVM metrics data to report to. SW_PLUGIN_KAFKA_TOPIC_METRICS skywalking-metrics   plugin.kafka.topic_segment Specify which Kafka topic name for traces data to report to. SW_PLUGIN_KAFKA_TOPIC_SEGMENT skywalking-segments   plugin.kafka.topic_profiling Specify which Kafka topic name for Thread Profiling snapshot to report to. SW_PLUGIN_KAFKA_TOPIC_PROFILINGS skywalking-profilings   plugin.kafka.topic_management Specify which Kafka topic name for the register or heartbeat data of Service Instance to report to. SW_PLUGIN_KAFKA_TOPIC_MANAGEMENT skywalking-managements   plugin.kafka.topic_logging Specify which Kafka topic name for the logging data to report to. SW_PLUGIN_KAFKA_TOPIC_LOGGING skywalking-logging   plugin.kafka.namespace isolate multi OAP server when using same Kafka cluster (final topic name will append namespace before Kafka topics with - ). SW_KAFKA_NAMESPACE `` |   plugin.kafka.decode_class Specify which class to decode encoded configuration of kafka.You can set encoded information in plugin.kafka.producer_config_json or plugin.kafka.producer_config if you need. SW_KAFKA_DECODE_CLASS `` |   plugin.springannotation.classname_match_regex Match spring beans with regular expression for the class name. Multiple expressions could be separated by a comma. This only works when Spring annotation plugin has been activated. SW_SPRINGANNOTATION_CLASSNAME_MATCH_REGEX All the spring beans tagged with @Bean,@Service,@Dao, or @Repository.   plugin.toolkit.log.transmit_formatted Whether or not to transmit logged data as formatted or un-formatted. SW_PLUGIN_TOOLKIT_LOG_TRANSMIT_FORMATTED true   plugin.lettuce.trace_redis_parameters If set to true, the parameters of Redis commands would be collected by Lettuce agent. SW_PLUGIN_LETTUCE_TRACE_REDIS_PARAMETERS false   plugin.lettuce.redis_parameter_max_length If set to positive number and plugin.lettuce.trace_redis_parameters is set to true, Redis command parameters would be collected and truncated to this length. SW_PLUGIN_LETTUCE_REDIS_PARAMETER_MAX_LENGTH 128   plugin.lettuce.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_LETTUCE_OPERATION_MAPPING_WRITE    plugin.lettuce.operation_mapping_read  Specify which command should be converted to read operation SW_PLUGIN_LETTUCE_OPERATION_MAPPING_READ Referenc Lettuce-5.x-plugin   plugin.jedis.trace_redis_parameters If set to true, the parameters of Redis commands would be collected by Jedis agent. SW_PLUGIN_JEDIS_TRACE_REDIS_PARAMETERS false   plugin.jedis.redis_parameter_max_length If set to positive number and plugin.jedis.trace_redis_parameters is set to true, Redis command parameters would be collected and truncated to this length. SW_PLUGIN_JEDIS_REDIS_PARAMETER_MAX_LENGTH 128   plugin.jedis.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_JEDIS_OPERATION_MAPPING_WRITE    plugin.jedis.operation_mapping_read  Specify which command should be converted to read operation SW_PLUGIN_JEDIS_OPERATION_MAPPING_READ Referenc Jedis-4.x-plugin jedis-2.x-3.x-plugin   plugin.redisson.trace_redis_parameters If set to true, the parameters of Redis commands would be collected by Redisson agent. SW_PLUGIN_REDISSON_TRACE_REDIS_PARAMETERS false   plugin.redisson.redis_parameter_max_length If set to positive number and plugin.redisson.trace_redis_parameters is set to true, Redis command parameters would be collected and truncated to this length. SW_PLUGIN_REDISSON_REDIS_PARAMETER_MAX_LENGTH 128   plugin.redisson.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_REDISSON_OPERATION_MAPPING_WRITE    plugin.redisson.operation_mapping_read  Specify which command should be converted to read operation SW_PLUGIN_REDISSON_OPERATION_MAPPING_READ Referenc Redisson-3.x-plugin   plugin.neo4j.trace_cypher_parameters If set to true, the parameters of the cypher would be collected. SW_PLUGIN_NEO4J_TRACE_CYPHER_PARAMETERS false   plugin.neo4j.cypher_parameters_max_length If set to positive number, the db.cypher.parameters would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_NEO4J_CYPHER_PARAMETERS_MAX_LENGTH 512   plugin.neo4j.cypher_body_max_length If set to positive number, the db.statement would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_NEO4J_CYPHER_BODY_MAX_LENGTH 2048   plugin.cpupolicy.sample_cpu_usage_percent_limit If set to a positive number and activate trace sampler CPU policy plugin, the trace would not be collected when agent process CPU usage percent is greater than plugin.cpupolicy.sample_cpu_usage_percent_limit. SW_SAMPLE_CPU_USAGE_PERCENT_LIMIT -1   plugin.micronauthttpclient.collect_http_params This config item controls that whether the Micronaut http client plugin should collect the parameters of the request. Also, activate implicitly in the profiled trace. SW_PLUGIN_MICRONAUTHTTPCLIENT_COLLECT_HTTP_PARAMS false   plugin.micronauthttpserver.collect_http_params This config item controls that whether the Micronaut http server plugin should collect the parameters of the request. Also, activate implicitly in the profiled trace. SW_PLUGIN_MICRONAUTHTTPSERVER_COLLECT_HTTP_PARAMS false   plugin.memcached.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_MEMCACHED_OPERATION_MAPPING_WRITE get,gets,getAndTouch,getKeys,getKeysWithExpiryCheck,getKeysNoDuplicateCheck   plugin.memcached.operation_mapping_read Specify which command should be converted to read operation SW_PLUGIN_MEMCACHED_OPERATION_MAPPING_READ set,add,replace,append,prepend,cas,delete,touch,incr,decr   plugin.ehcache.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_EHCACHE_OPERATION_MAPPING_WRITE get,getAll,getQuiet,getKeys,getKeysWithExpiryCheck,getKeysNoDuplicateCheck,releaseRead,tryRead,getWithLoader,getAll,loadAll,getAllWithLoader   plugin.ehcache.operation_mapping_read Specify which command should be converted to read operation SW_PLUGIN_EHCACHE_OPERATION_MAPPING_READ tryRemoveImmediately,remove,removeAndReturnElement,removeAll,removeQuiet,removeWithWriter,put,putAll,replace,removeQuiet,removeWithWriter,removeElement,removeAll,putWithWriter,putQuiet,putIfAbsent,putIfAbsent   plugin.guavacache.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_GUAVACACHE_OPERATION_MAPPING_WRITE getIfPresent,get,getAllPresent,size   plugin.guavacache.operation_mapping_read Specify which command should be converted to read operation SW_PLUGIN_GUAVACACHE_OPERATION_MAPPING_READ put,putAll,invalidate,invalidateAll,invalidateAll,cleanUp   plugin.nettyhttp.collect_request_body This config item controls that whether the Netty-http plugin should collect the http body of the request. SW_PLUGIN_NETTY_HTTP_COLLECT_REQUEST_BODY false   plugin.nettyhttp.filter_length_limit When COLLECT_REQUEST_BODY is enabled, how many characters to keep and send to the OAP backend, use negative values to keep and send the complete body. SW_PLUGIN_NETTY_HTTP_FILTER_LENGTH_LIMIT 1024   plugin.nettyhttp.supported_content_types_prefix When COLLECT_REQUEST_BODY is enabled and content-type start with HTTP_SUPPORTED_CONTENT_TYPES_PREFIX, collect the body of the request , multiple paths should be separated by , SW_PLUGIN_NETTY_HTTP_SUPPORTED_CONTENT_TYPES_PREFIX application/json,text/   plugin.rocketmqclient.collect_message_keys If set to true, the keys of messages would be collected by the plugin for RocketMQ Java client.     plugin.rocketmqclient.collect_message_tags If set to true, the tags of messages would be collected by the plugin for RocketMQ Java client.            Reset Collection/Map type configurations as empty collection.  Collection type config, e.g. using  plugin.kafka.topics= to override default plugin.kafka.topics=a,b,c,d Map type config, e.g. using plugin.kafka.producer_config[]= to override default plugin.kafka.producer_config[key]=value  Dynamic Configurations All configurations above are static, if you need to change some agent settings at runtime, please read CDS - Configuration Discovery Service document for more details.\n","excerpt":"Table of Agent Configuration Properties This is the properties list supported in …","ref":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/configurations/","title":"Table of Agent Configuration Properties"},{"body":"Table of Agent Configuration Properties This is the properties list supported in agent/config/agent.config.\n   property key Description System Environment Variable Default     agent.service_name The service name to represent a logic group providing the same capabilities/logic. Suggestion: set a unique name for every logic service group, service instance nodes share the same code, Max length is 50(UTF-8 char). Optional, once service_name follows \u0026lt;group name\u0026gt;::\u0026lt;logic name\u0026gt; format, OAP server assigns the group name to the service metadata. SW_AGENT_NAME Your_ApplicationName   agent.namespace Namespace represents a subnet, such as kubernetes namespace, or 172.10.. SW_AGENT_NAMESPACE Not set   agent.cluster Cluster defines the physical cluster in a data center or same network segment. SW_AGENT_CLUSTER Not set   agent.sample_n_per_3_secs Negative or zero means off, by default.SAMPLE_N_PER_3_SECS means sampling N TraceSegment in 3 seconds tops. SW_AGENT_SAMPLE Not set   agent.authentication Authentication active is based on backend setting, see application.yml for more details.For most scenarios, this needs backend extensions, only basic match auth provided in default implementation. SW_AGENT_AUTHENTICATION Not set   agent.trace_segment_ref_limit_per_span The max number of TraceSegmentRef in a single span to keep memory cost estimatable. SW_TRACE_SEGMENT_LIMIT 500   agent.span_limit_per_segment The max number of spans in a single segment. Through this config item, SkyWalking keep your application memory cost estimated. SW_AGENT_SPAN_LIMIT 300   agent.ignore_suffix If the operation name of the first span is included in this set, this segment should be ignored. SW_AGENT_IGNORE_SUFFIX Not set   agent.is_open_debugging_class If true, skywalking agent will save all instrumented classes files in /debugging folder. SkyWalking team may ask for these files in order to resolve compatible problem. SW_AGENT_OPEN_DEBUG Not set   agent.instance_name Instance name is the identity of an instance, should be unique in the service. If empty, SkyWalking agent will generate an 32-bit uuid. Default, use UUID@hostname as the instance name. Max length is 50(UTF-8 char) SW_AGENT_INSTANCE_NAME \u0026quot;\u0026quot;   agent.instance_properties_json={\u0026quot;key\u0026quot;:\u0026quot;value\u0026quot;} Add service instance custom properties in json format. SW_INSTANCE_PROPERTIES_JSON Not set   agent.cause_exception_depth How depth the agent goes, when log all cause exceptions. SW_AGENT_CAUSE_EXCEPTION_DEPTH 5   agent.force_reconnection_period  Force reconnection period of grpc, based on grpc_channel_check_interval. SW_AGENT_FORCE_RECONNECTION_PERIOD 1   agent.operation_name_threshold  The operationName max length, setting this value \u0026gt; 190 is not recommended. SW_AGENT_OPERATION_NAME_THRESHOLD 150   agent.keep_tracing Keep tracing even the backend is not available if this value is true. SW_AGENT_KEEP_TRACING false   agent.force_tls Force open TLS for gRPC channel if this value is true. SW_AGENT_FORCE_TLS false   agent.ssl_trusted_ca_path gRPC SSL trusted ca file. SW_AGENT_SSL_TRUSTED_CA_PATH /ca/ca.crt   agent.ssl_key_path The private key file. Enable mTLS when ssl_key_path and ssl_cert_chain_path exist. SW_AGENT_SSL_KEY_PATH \u0026quot;\u0026quot;   agent.ssl_cert_chain_path The certificate file. Enable mTLS when ssl_key_path and ssl_cert_chain_path exist. SW_AGENT_SSL_CERT_CHAIN_PATH \u0026quot;\u0026quot;   agent.enable Enable the agent kernel services and instrumentation. SW_AGENT_ENABLE true   osinfo.ipv4_list_size Limit the length of the ipv4 list size. SW_AGENT_OSINFO_IPV4_LIST_SIZE 10   collector.grpc_channel_check_interval grpc channel status check interval. SW_AGENT_COLLECTOR_GRPC_CHANNEL_CHECK_INTERVAL 30   collector.heartbeat_period agent heartbeat report period. Unit, second. SW_AGENT_COLLECTOR_HEARTBEAT_PERIOD 30   collector.properties_report_period_factor The agent sends the instance properties to the backend every collector.heartbeat_period * collector.properties_report_period_factor seconds SW_AGENT_COLLECTOR_PROPERTIES_REPORT_PERIOD_FACTOR 10   collector.backend_service Collector SkyWalking trace receiver service addresses. SW_AGENT_COLLECTOR_BACKEND_SERVICES 127.0.0.1:11800   collector.grpc_upstream_timeout How long grpc client will timeout in sending data to upstream. Unit is second. SW_AGENT_COLLECTOR_GRPC_UPSTREAM_TIMEOUT 30 seconds   collector.get_profile_task_interval Sniffer get profile task list interval. SW_AGENT_COLLECTOR_GET_PROFILE_TASK_INTERVAL 20   collector.get_agent_dynamic_config_interval Sniffer get agent dynamic config interval SW_AGENT_COLLECTOR_GET_AGENT_DYNAMIC_CONFIG_INTERVAL 20   collector.is_resolve_dns_periodically If true, skywalking agent will enable periodically resolving DNS to update receiver service addresses. SW_AGENT_COLLECTOR_IS_RESOLVE_DNS_PERIODICALLY false   logging.level Log level: TRACE, DEBUG, INFO, WARN, ERROR, OFF. Default is info. SW_LOGGING_LEVEL INFO   logging.file_name Log file name. SW_LOGGING_FILE_NAME skywalking-api.log   logging.output Log output. Default is FILE. Use CONSOLE means output to stdout. SW_LOGGING_OUTPUT FILE   logging.dir Log files directory. Default is blank string, means, use \u0026ldquo;{theSkywalkingAgentJarDir}/logs \u0026quot; to output logs. {theSkywalkingAgentJarDir} is the directory where the skywalking agent jar file is located SW_LOGGING_DIR \u0026quot;\u0026quot;   logging.resolver Logger resolver: PATTERN or JSON. The default is PATTERN, which uses logging.pattern to print traditional text logs. JSON resolver prints logs in JSON format. SW_LOGGING_RESOLVER PATTERN   logging.pattern  Logging format. There are all conversion specifiers: * %level means log level. * %timestamp means now of time with format yyyy-MM-dd HH:mm:ss:SSS.\n* %thread means name of current thread.\n* %msg means some message which user logged. * %class means SimpleName of TargetClass. * %throwable means a throwable which user called. * %agent_name means agent.service_name. Only apply to the PatternLogger. SW_LOGGING_PATTERN %level %timestamp %thread %class : %msg %throwable   logging.max_file_size The max size of log file. If the size is bigger than this, archive the current file, and write into a new file. SW_LOGGING_MAX_FILE_SIZE 300 * 1024 * 1024   logging.max_history_files The max history log files. When rollover happened, if log files exceed this number,then the oldest file will be delete. Negative or zero means off, by default. SW_LOGGING_MAX_HISTORY_FILES -1   statuscheck.ignored_exceptions Listed exceptions would not be treated as an error. Because in some codes, the exception is being used as a way of controlling business flow. SW_STATUSCHECK_IGNORED_EXCEPTIONS \u0026quot;\u0026quot;   statuscheck.max_recursive_depth The max recursive depth when checking the exception traced by the agent. Typically, we don\u0026rsquo;t recommend setting this more than 10, which could cause a performance issue. Negative value and 0 would be ignored, which means all exceptions would make the span tagged in error status. SW_STATUSCHECK_MAX_RECURSIVE_DEPTH 1   correlation.element_max_number Max element count in the correlation context. SW_CORRELATION_ELEMENT_MAX_NUMBER 3   correlation.value_max_length Max value length of each element. SW_CORRELATION_VALUE_MAX_LENGTH 128   correlation.auto_tag_keys Tag the span by the key/value in the correlation context, when the keys listed here exist. SW_CORRELATION_AUTO_TAG_KEYS \u0026quot;\u0026quot;   jvm.buffer_size The buffer size of collected JVM info. SW_JVM_BUFFER_SIZE 60 * 10   jvm.metrics_collect_period The period in seconds of JVM metrics collection. Unit is second. SW_JVM_METRICS_COLLECT_PERIOD 1   buffer.channel_size The buffer channel size. SW_BUFFER_CHANNEL_SIZE 5   buffer.buffer_size The buffer size. SW_BUFFER_BUFFER_SIZE 300   profile.active If true, skywalking agent will enable profile when user create a new profile task. Otherwise disable profile. SW_AGENT_PROFILE_ACTIVE true   profile.max_parallel Parallel monitor segment count SW_AGENT_PROFILE_MAX_PARALLEL 5   profile.max_accept_sub_parallel Max monitoring sub-tasks count of one single endpoint access SW_AGENT_PROFILE_MAX_ACCEPT_SUB_PARALLEL 5   profile.duration Max monitor segment time(minutes), if current segment monitor time out of limit, then stop it. SW_AGENT_PROFILE_DURATION 10   profile.dump_max_stack_depth Max dump thread stack depth SW_AGENT_PROFILE_DUMP_MAX_STACK_DEPTH 500   profile.snapshot_transport_buffer_size Snapshot transport to backend buffer size SW_AGENT_PROFILE_SNAPSHOT_TRANSPORT_BUFFER_SIZE 4500   meter.active If true, the agent collects and reports metrics to the backend. SW_METER_ACTIVE true   meter.report_interval Report meters interval. The unit is second SW_METER_REPORT_INTERVAL 20   meter.max_meter_size Max size of the meter pool SW_METER_MAX_METER_SIZE 500   log.max_message_size The max size of message to send to server.Default is 10 MB. SW_GRPC_LOG_MAX_MESSAGE_SIZE 10485760   plugin.mount Mount the specific folders of the plugins. Plugins in mounted folders would work. SW_MOUNT_FOLDERS plugins,activations   plugin.peer_max_length  Peer maximum description limit. SW_PLUGIN_PEER_MAX_LENGTH 200   plugin.exclude_plugins  Exclude some plugins define in plugins dir,Multiple plugins are separated by comma.Plugin names is defined in Agent plugin list SW_EXCLUDE_PLUGINS \u0026quot;\u0026quot;   plugin.mongodb.trace_param If true, trace all the parameters in MongoDB access, default is false. Only trace the operation, not include parameters. SW_PLUGIN_MONGODB_TRACE_PARAM false   plugin.mongodb.filter_length_limit If set to positive number, the WriteRequest.params would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_MONGODB_FILTER_LENGTH_LIMIT 256   plugin.elasticsearch.trace_dsl If true, trace all the DSL(Domain Specific Language) in ElasticSearch access, default is false. SW_PLUGIN_ELASTICSEARCH_TRACE_DSL false   plugin.springmvc.use_qualified_name_as_endpoint_name If true, the fully qualified method name will be used as the endpoint name instead of the request URL, default is false. SW_PLUGIN_SPRINGMVC_USE_QUALIFIED_NAME_AS_ENDPOINT_NAME false   plugin.toolkit.use_qualified_name_as_operation_name If true, the fully qualified method name will be used as the operation name instead of the given operation name, default is false. SW_PLUGIN_TOOLKIT_USE_QUALIFIED_NAME_AS_OPERATION_NAME false   plugin.jdbc.trace_sql_parameters If set to true, the parameters of the sql (typically java.sql.PreparedStatement) would be collected. SW_JDBC_TRACE_SQL_PARAMETERS false   plugin.jdbc.sql_parameters_max_length If set to positive number, the db.sql.parameters would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_JDBC_SQL_PARAMETERS_MAX_LENGTH 512   plugin.jdbc.sql_body_max_length If set to positive number, the db.statement would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_JDBC_SQL_BODY_MAX_LENGTH 2048   plugin.solrj.trace_statement If true, trace all the query parameters(include deleteByIds and deleteByQuery) in Solr query request, default is false. SW_PLUGIN_SOLRJ_TRACE_STATEMENT false   plugin.solrj.trace_ops_params If true, trace all the operation parameters in Solr request, default is false. SW_PLUGIN_SOLRJ_TRACE_OPS_PARAMS false   plugin.light4j.trace_handler_chain If true, trace all middleware/business handlers that are part of the Light4J handler chain for a request. SW_PLUGIN_LIGHT4J_TRACE_HANDLER_CHAIN false   plugin.springtransaction.simplify_transaction_definition_name If true, the transaction definition name will be simplified. SW_PLUGIN_SPRINGTRANSACTION_SIMPLIFY_TRANSACTION_DEFINITION_NAME false   plugin.jdkthreading.threading_class_prefixes Threading classes (java.lang.Runnable and java.util.concurrent.Callable) and their subclasses, including anonymous inner classes whose name match any one of the THREADING_CLASS_PREFIXES (splitted by ,) will be instrumented, make sure to only specify as narrow prefixes as what you\u0026rsquo;re expecting to instrument, (java. and javax. will be ignored due to safety issues) SW_PLUGIN_JDKTHREADING_THREADING_CLASS_PREFIXES Not set   plugin.tomcat.collect_http_params This config item controls that whether the Tomcat plugin should collect the parameters of the request. Also, activate implicitly in the profiled trace. SW_PLUGIN_TOMCAT_COLLECT_HTTP_PARAMS false   plugin.springmvc.collect_http_params This config item controls that whether the SpringMVC plugin should collect the parameters of the request, when your Spring application is based on Tomcat, consider only setting either plugin.tomcat.collect_http_params or plugin.springmvc.collect_http_params. Also, activate implicitly in the profiled trace. SW_PLUGIN_SPRINGMVC_COLLECT_HTTP_PARAMS false   plugin.httpclient.collect_http_params This config item controls that whether the HttpClient plugin should collect the parameters of the request SW_PLUGIN_HTTPCLIENT_COLLECT_HTTP_PARAMS false   plugin.http.http_params_length_threshold When COLLECT_HTTP_PARAMS is enabled, how many characters to keep and send to the OAP backend, use negative values to keep and send the complete parameters, NB. this config item is added for the sake of performance. SW_PLUGIN_HTTP_HTTP_PARAMS_LENGTH_THRESHOLD 1024   plugin.http.http_headers_length_threshold When include_http_headers declares header names, this threshold controls the length limitation of all header values. use negative values to keep and send the complete headers. Note. this config item is added for the sake of performance. SW_PLUGIN_HTTP_HTTP_HEADERS_LENGTH_THRESHOLD 2048   plugin.http.include_http_headers Set the header names, which should be collected by the plugin. Header name must follow javax.servlet.http definition. Multiple names should be split by comma. SW_PLUGIN_HTTP_INCLUDE_HTTP_HEADERS ``(No header would be collected) |   plugin.feign.collect_request_body This config item controls that whether the Feign plugin should collect the http body of the request. SW_PLUGIN_FEIGN_COLLECT_REQUEST_BODY false   plugin.feign.filter_length_limit When COLLECT_REQUEST_BODY is enabled, how many characters to keep and send to the OAP backend, use negative values to keep and send the complete body. SW_PLUGIN_FEIGN_FILTER_LENGTH_LIMIT 1024   plugin.feign.supported_content_types_prefix When COLLECT_REQUEST_BODY is enabled and content-type start with SUPPORTED_CONTENT_TYPES_PREFIX, collect the body of the request , multiple paths should be separated by , SW_PLUGIN_FEIGN_SUPPORTED_CONTENT_TYPES_PREFIX application/json,text/   plugin.influxdb.trace_influxql If true, trace all the influxql(query and write) in InfluxDB access, default is true. SW_PLUGIN_INFLUXDB_TRACE_INFLUXQL true   plugin.dubbo.collect_consumer_arguments Apache Dubbo consumer collect arguments in RPC call, use Object#toString to collect arguments. SW_PLUGIN_DUBBO_COLLECT_CONSUMER_ARGUMENTS false   plugin.dubbo.consumer_arguments_length_threshold When plugin.dubbo.collect_consumer_arguments is true, Arguments of length from the front will to the OAP backend SW_PLUGIN_DUBBO_CONSUMER_ARGUMENTS_LENGTH_THRESHOLD 256   plugin.dubbo.collect_provider_arguments Apache Dubbo provider collect arguments in RPC call, use Object#toString to collect arguments. SW_PLUGIN_DUBBO_COLLECT_PROVIDER_ARGUMENTS false   plugin.dubbo.provider_arguments_length_threshold When plugin.dubbo.collect_provider_arguments is true, Arguments of length from the front will to the OAP backend SW_PLUGIN_DUBBO_PROVIDER_ARGUMENTS_LENGTH_THRESHOLD 256   plugin.kafka.bootstrap_servers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_KAFKA_BOOTSTRAP_SERVERS localhost:9092   plugin.kafka.get_topic_timeout Timeout period of reading topics from the Kafka server, the unit is second. SW_GET_TOPIC_TIMEOUT 10   plugin.kafka.producer_config Kafka producer configuration. Read producer configure to get more details. Check Kafka report doc for more details and examples. SW_PLUGIN_KAFKA_PRODUCER_CONFIG    plugin.kafka.producer_config_json Configure Kafka Producer configuration in JSON format. Notice it will be overridden by plugin.kafka.producer_config[key], if the key duplication. SW_PLUGIN_KAFKA_PRODUCER_CONFIG_JSON    plugin.kafka.topic_meter Specify which Kafka topic name for Meter System data to report to. SW_PLUGIN_KAFKA_TOPIC_METER skywalking-meters   plugin.kafka.topic_metrics Specify which Kafka topic name for JVM metrics data to report to. SW_PLUGIN_KAFKA_TOPIC_METRICS skywalking-metrics   plugin.kafka.topic_segment Specify which Kafka topic name for traces data to report to. SW_PLUGIN_KAFKA_TOPIC_SEGMENT skywalking-segments   plugin.kafka.topic_profiling Specify which Kafka topic name for Thread Profiling snapshot to report to. SW_PLUGIN_KAFKA_TOPIC_PROFILINGS skywalking-profilings   plugin.kafka.topic_management Specify which Kafka topic name for the register or heartbeat data of Service Instance to report to. SW_PLUGIN_KAFKA_TOPIC_MANAGEMENT skywalking-managements   plugin.kafka.topic_logging Specify which Kafka topic name for the logging data to report to. SW_PLUGIN_KAFKA_TOPIC_LOGGING skywalking-logging   plugin.kafka.namespace isolate multi OAP server when using same Kafka cluster (final topic name will append namespace before Kafka topics with - ). SW_KAFKA_NAMESPACE `` |   plugin.kafka.decode_class Specify which class to decode encoded configuration of kafka.You can set encoded information in plugin.kafka.producer_config_json or plugin.kafka.producer_config if you need. SW_KAFKA_DECODE_CLASS `` |   plugin.springannotation.classname_match_regex Match spring beans with regular expression for the class name. Multiple expressions could be separated by a comma. This only works when Spring annotation plugin has been activated. SW_SPRINGANNOTATION_CLASSNAME_MATCH_REGEX All the spring beans tagged with @Bean,@Service,@Dao, or @Repository.   plugin.toolkit.log.transmit_formatted Whether or not to transmit logged data as formatted or un-formatted. SW_PLUGIN_TOOLKIT_LOG_TRANSMIT_FORMATTED true   plugin.lettuce.trace_redis_parameters If set to true, the parameters of Redis commands would be collected by Lettuce agent. SW_PLUGIN_LETTUCE_TRACE_REDIS_PARAMETERS false   plugin.lettuce.redis_parameter_max_length If set to positive number and plugin.lettuce.trace_redis_parameters is set to true, Redis command parameters would be collected and truncated to this length. SW_PLUGIN_LETTUCE_REDIS_PARAMETER_MAX_LENGTH 128   plugin.lettuce.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_LETTUCE_OPERATION_MAPPING_WRITE    plugin.lettuce.operation_mapping_read  Specify which command should be converted to read operation SW_PLUGIN_LETTUCE_OPERATION_MAPPING_READ Referenc Lettuce-5.x-plugin   plugin.jedis.trace_redis_parameters If set to true, the parameters of Redis commands would be collected by Jedis agent. SW_PLUGIN_JEDIS_TRACE_REDIS_PARAMETERS false   plugin.jedis.redis_parameter_max_length If set to positive number and plugin.jedis.trace_redis_parameters is set to true, Redis command parameters would be collected and truncated to this length. SW_PLUGIN_JEDIS_REDIS_PARAMETER_MAX_LENGTH 128   plugin.jedis.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_JEDIS_OPERATION_MAPPING_WRITE    plugin.jedis.operation_mapping_read  Specify which command should be converted to read operation SW_PLUGIN_JEDIS_OPERATION_MAPPING_READ Referenc Jedis-4.x-plugin jedis-2.x-3.x-plugin   plugin.redisson.trace_redis_parameters If set to true, the parameters of Redis commands would be collected by Redisson agent. SW_PLUGIN_REDISSON_TRACE_REDIS_PARAMETERS false   plugin.redisson.redis_parameter_max_length If set to positive number and plugin.redisson.trace_redis_parameters is set to true, Redis command parameters would be collected and truncated to this length. SW_PLUGIN_REDISSON_REDIS_PARAMETER_MAX_LENGTH 128   plugin.redisson.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_REDISSON_OPERATION_MAPPING_WRITE    plugin.redisson.operation_mapping_read  Specify which command should be converted to read operation SW_PLUGIN_REDISSON_OPERATION_MAPPING_READ Referenc Redisson-3.x-plugin   plugin.neo4j.trace_cypher_parameters If set to true, the parameters of the cypher would be collected. SW_PLUGIN_NEO4J_TRACE_CYPHER_PARAMETERS false   plugin.neo4j.cypher_parameters_max_length If set to positive number, the db.cypher.parameters would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_NEO4J_CYPHER_PARAMETERS_MAX_LENGTH 512   plugin.neo4j.cypher_body_max_length If set to positive number, the db.statement would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_NEO4J_CYPHER_BODY_MAX_LENGTH 2048   plugin.cpupolicy.sample_cpu_usage_percent_limit If set to a positive number and activate trace sampler CPU policy plugin, the trace would not be collected when agent process CPU usage percent is greater than plugin.cpupolicy.sample_cpu_usage_percent_limit. SW_SAMPLE_CPU_USAGE_PERCENT_LIMIT -1   plugin.micronauthttpclient.collect_http_params This config item controls that whether the Micronaut http client plugin should collect the parameters of the request. Also, activate implicitly in the profiled trace. SW_PLUGIN_MICRONAUTHTTPCLIENT_COLLECT_HTTP_PARAMS false   plugin.micronauthttpserver.collect_http_params This config item controls that whether the Micronaut http server plugin should collect the parameters of the request. Also, activate implicitly in the profiled trace. SW_PLUGIN_MICRONAUTHTTPSERVER_COLLECT_HTTP_PARAMS false   plugin.memcached.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_MEMCACHED_OPERATION_MAPPING_WRITE get,gets,getAndTouch,getKeys,getKeysWithExpiryCheck,getKeysNoDuplicateCheck   plugin.memcached.operation_mapping_read Specify which command should be converted to read operation SW_PLUGIN_MEMCACHED_OPERATION_MAPPING_READ set,add,replace,append,prepend,cas,delete,touch,incr,decr   plugin.ehcache.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_EHCACHE_OPERATION_MAPPING_WRITE get,getAll,getQuiet,getKeys,getKeysWithExpiryCheck,getKeysNoDuplicateCheck,releaseRead,tryRead,getWithLoader,getAll,loadAll,getAllWithLoader   plugin.ehcache.operation_mapping_read Specify which command should be converted to read operation SW_PLUGIN_EHCACHE_OPERATION_MAPPING_READ tryRemoveImmediately,remove,removeAndReturnElement,removeAll,removeQuiet,removeWithWriter,put,putAll,replace,removeQuiet,removeWithWriter,removeElement,removeAll,putWithWriter,putQuiet,putIfAbsent,putIfAbsent   plugin.guavacache.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_GUAVACACHE_OPERATION_MAPPING_WRITE getIfPresent,get,getAllPresent,size   plugin.guavacache.operation_mapping_read Specify which command should be converted to read operation SW_PLUGIN_GUAVACACHE_OPERATION_MAPPING_READ put,putAll,invalidate,invalidateAll,invalidateAll,cleanUp   plugin.nettyhttp.collect_request_body This config item controls that whether the Netty-http plugin should collect the http body of the request. SW_PLUGIN_NETTY_HTTP_COLLECT_REQUEST_BODY false   plugin.nettyhttp.filter_length_limit When COLLECT_REQUEST_BODY is enabled, how many characters to keep and send to the OAP backend, use negative values to keep and send the complete body. SW_PLUGIN_NETTY_HTTP_FILTER_LENGTH_LIMIT 1024   plugin.nettyhttp.supported_content_types_prefix When COLLECT_REQUEST_BODY is enabled and content-type start with HTTP_SUPPORTED_CONTENT_TYPES_PREFIX, collect the body of the request , multiple paths should be separated by , SW_PLUGIN_NETTY_HTTP_SUPPORTED_CONTENT_TYPES_PREFIX application/json,text/   plugin.rocketmqclient.collect_message_keys If set to true, the keys of messages would be collected by the plugin for RocketMQ Java client.     plugin.rocketmqclient.collect_message_tags If set to true, the tags of messages would be collected by the plugin for RocketMQ Java client.            Reset Collection/Map type configurations as empty collection.  Collection type config, e.g. using  plugin.kafka.topics= to override default plugin.kafka.topics=a,b,c,d Map type config, e.g. using plugin.kafka.producer_config[]= to override default plugin.kafka.producer_config[key]=value  Dynamic Configurations All configurations above are static, if you need to change some agent settings at runtime, please read CDS - Configuration Discovery Service document for more details.\n","excerpt":"Table of Agent Configuration Properties This is the properties list supported in …","ref":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/configurations/","title":"Table of Agent Configuration Properties"},{"body":"Table of Agent Configuration Properties This is the properties list supported in agent/config/agent.config.\n   property key Description System Environment Variable Default     agent.service_name The service name to represent a logic group providing the same capabilities/logic. Suggestion: set a unique name for every logic service group, service instance nodes share the same code, Max length is 50(UTF-8 char). Optional, once service_name follows \u0026lt;group name\u0026gt;::\u0026lt;logic name\u0026gt; format, OAP server assigns the group name to the service metadata. SW_AGENT_NAME Your_ApplicationName   agent.namespace Namespace represents a subnet, such as kubernetes namespace, or 172.10.. SW_AGENT_NAMESPACE Not set   agent.cluster Cluster defines the physical cluster in a data center or same network segment. SW_AGENT_CLUSTER Not set   agent.sample_n_per_3_secs Negative or zero means off, by default.SAMPLE_N_PER_3_SECS means sampling N TraceSegment in 3 seconds tops. SW_AGENT_SAMPLE Not set   agent.authentication Authentication active is based on backend setting, see application.yml for more details.For most scenarios, this needs backend extensions, only basic match auth provided in default implementation. SW_AGENT_AUTHENTICATION Not set   agent.trace_segment_ref_limit_per_span The max number of TraceSegmentRef in a single span to keep memory cost estimatable. SW_TRACE_SEGMENT_LIMIT 500   agent.span_limit_per_segment The max number of spans in a single segment. Through this config item, SkyWalking keep your application memory cost estimated. SW_AGENT_SPAN_LIMIT 300   agent.ignore_suffix If the operation name of the first span is included in this set, this segment should be ignored. SW_AGENT_IGNORE_SUFFIX Not set   agent.is_open_debugging_class If true, skywalking agent will save all instrumented classes files in /debugging folder. SkyWalking team may ask for these files in order to resolve compatible problem. SW_AGENT_OPEN_DEBUG Not set   agent.instance_name Instance name is the identity of an instance, should be unique in the service. If empty, SkyWalking agent will generate an 32-bit uuid. Default, use UUID@hostname as the instance name. Max length is 50(UTF-8 char) SW_AGENT_INSTANCE_NAME \u0026quot;\u0026quot;   agent.instance_properties_json={\u0026quot;key\u0026quot;:\u0026quot;value\u0026quot;} Add service instance custom properties in json format. SW_INSTANCE_PROPERTIES_JSON Not set   agent.cause_exception_depth How depth the agent goes, when log all cause exceptions. SW_AGENT_CAUSE_EXCEPTION_DEPTH 5   agent.force_reconnection_period  Force reconnection period of grpc, based on grpc_channel_check_interval. SW_AGENT_FORCE_RECONNECTION_PERIOD 1   agent.operation_name_threshold  The operationName max length, setting this value \u0026gt; 190 is not recommended. SW_AGENT_OPERATION_NAME_THRESHOLD 150   agent.keep_tracing Keep tracing even the backend is not available if this value is true. SW_AGENT_KEEP_TRACING false   agent.force_tls Force open TLS for gRPC channel if this value is true. SW_AGENT_FORCE_TLS false   agent.ssl_trusted_ca_path gRPC SSL trusted ca file. SW_AGENT_SSL_TRUSTED_CA_PATH /ca/ca.crt   agent.ssl_key_path The private key file. Enable mTLS when ssl_key_path and ssl_cert_chain_path exist. SW_AGENT_SSL_KEY_PATH \u0026quot;\u0026quot;   agent.ssl_cert_chain_path The certificate file. Enable mTLS when ssl_key_path and ssl_cert_chain_path exist. SW_AGENT_SSL_CERT_CHAIN_PATH \u0026quot;\u0026quot;   agent.enable Enable the agent kernel services and instrumentation. SW_AGENT_ENABLE true   osinfo.ipv4_list_size Limit the length of the ipv4 list size. SW_AGENT_OSINFO_IPV4_LIST_SIZE 10   collector.grpc_channel_check_interval grpc channel status check interval. SW_AGENT_COLLECTOR_GRPC_CHANNEL_CHECK_INTERVAL 30   collector.heartbeat_period agent heartbeat report period. Unit, second. SW_AGENT_COLLECTOR_HEARTBEAT_PERIOD 30   collector.properties_report_period_factor The agent sends the instance properties to the backend every collector.heartbeat_period * collector.properties_report_period_factor seconds SW_AGENT_COLLECTOR_PROPERTIES_REPORT_PERIOD_FACTOR 10   collector.backend_service Collector SkyWalking trace receiver service addresses. SW_AGENT_COLLECTOR_BACKEND_SERVICES 127.0.0.1:11800   collector.grpc_upstream_timeout How long grpc client will timeout in sending data to upstream. Unit is second. SW_AGENT_COLLECTOR_GRPC_UPSTREAM_TIMEOUT 30 seconds   collector.get_profile_task_interval Sniffer get profile task list interval. SW_AGENT_COLLECTOR_GET_PROFILE_TASK_INTERVAL 20   collector.get_agent_dynamic_config_interval Sniffer get agent dynamic config interval SW_AGENT_COLLECTOR_GET_AGENT_DYNAMIC_CONFIG_INTERVAL 20   collector.is_resolve_dns_periodically If true, skywalking agent will enable periodically resolving DNS to update receiver service addresses. SW_AGENT_COLLECTOR_IS_RESOLVE_DNS_PERIODICALLY false   logging.level Log level: TRACE, DEBUG, INFO, WARN, ERROR, OFF. Default is info. SW_LOGGING_LEVEL INFO   logging.file_name Log file name. SW_LOGGING_FILE_NAME skywalking-api.log   logging.output Log output. Default is FILE. Use CONSOLE means output to stdout. SW_LOGGING_OUTPUT FILE   logging.dir Log files directory. Default is blank string, means, use \u0026ldquo;{theSkywalkingAgentJarDir}/logs \u0026quot; to output logs. {theSkywalkingAgentJarDir} is the directory where the skywalking agent jar file is located SW_LOGGING_DIR \u0026quot;\u0026quot;   logging.resolver Logger resolver: PATTERN or JSON. The default is PATTERN, which uses logging.pattern to print traditional text logs. JSON resolver prints logs in JSON format. SW_LOGGING_RESOLVER PATTERN   logging.pattern  Logging format. There are all conversion specifiers: * %level means log level. * %timestamp means now of time with format yyyy-MM-dd HH:mm:ss:SSS.\n* %thread means name of current thread.\n* %msg means some message which user logged. * %class means SimpleName of TargetClass. * %throwable means a throwable which user called. * %agent_name means agent.service_name. Only apply to the PatternLogger. SW_LOGGING_PATTERN %level %timestamp %thread %class : %msg %throwable   logging.max_file_size The max size of log file. If the size is bigger than this, archive the current file, and write into a new file. SW_LOGGING_MAX_FILE_SIZE 300 * 1024 * 1024   logging.max_history_files The max history log files. When rollover happened, if log files exceed this number,then the oldest file will be delete. Negative or zero means off, by default. SW_LOGGING_MAX_HISTORY_FILES -1   statuscheck.ignored_exceptions Listed exceptions would not be treated as an error. Because in some codes, the exception is being used as a way of controlling business flow. SW_STATUSCHECK_IGNORED_EXCEPTIONS \u0026quot;\u0026quot;   statuscheck.max_recursive_depth The max recursive depth when checking the exception traced by the agent. Typically, we don\u0026rsquo;t recommend setting this more than 10, which could cause a performance issue. Negative value and 0 would be ignored, which means all exceptions would make the span tagged in error status. SW_STATUSCHECK_MAX_RECURSIVE_DEPTH 1   correlation.element_max_number Max element count in the correlation context. SW_CORRELATION_ELEMENT_MAX_NUMBER 3   correlation.value_max_length Max value length of each element. SW_CORRELATION_VALUE_MAX_LENGTH 128   correlation.auto_tag_keys Tag the span by the key/value in the correlation context, when the keys listed here exist. SW_CORRELATION_AUTO_TAG_KEYS \u0026quot;\u0026quot;   jvm.buffer_size The buffer size of collected JVM info. SW_JVM_BUFFER_SIZE 60 * 10   jvm.metrics_collect_period The period in seconds of JVM metrics collection. Unit is second. SW_JVM_METRICS_COLLECT_PERIOD 1   buffer.channel_size The buffer channel size. SW_BUFFER_CHANNEL_SIZE 5   buffer.buffer_size The buffer size. SW_BUFFER_BUFFER_SIZE 300   profile.active If true, skywalking agent will enable profile when user create a new profile task. Otherwise disable profile. SW_AGENT_PROFILE_ACTIVE true   profile.max_parallel Parallel monitor segment count SW_AGENT_PROFILE_MAX_PARALLEL 5   profile.max_accept_sub_parallel Max monitoring sub-tasks count of one single endpoint access SW_AGENT_PROFILE_MAX_ACCEPT_SUB_PARALLEL 5   profile.duration Max monitor segment time(minutes), if current segment monitor time out of limit, then stop it. SW_AGENT_PROFILE_DURATION 10   profile.dump_max_stack_depth Max dump thread stack depth SW_AGENT_PROFILE_DUMP_MAX_STACK_DEPTH 500   profile.snapshot_transport_buffer_size Snapshot transport to backend buffer size SW_AGENT_PROFILE_SNAPSHOT_TRANSPORT_BUFFER_SIZE 4500   meter.active If true, the agent collects and reports metrics to the backend. SW_METER_ACTIVE true   meter.report_interval Report meters interval. The unit is second SW_METER_REPORT_INTERVAL 20   meter.max_meter_size Max size of the meter pool SW_METER_MAX_METER_SIZE 500   log.max_message_size The max size of message to send to server.Default is 10 MB. SW_GRPC_LOG_MAX_MESSAGE_SIZE 10485760   plugin.mount Mount the specific folders of the plugins. Plugins in mounted folders would work. SW_MOUNT_FOLDERS plugins,activations   plugin.peer_max_length  Peer maximum description limit. SW_PLUGIN_PEER_MAX_LENGTH 200   plugin.exclude_plugins  Exclude some plugins define in plugins dir,Multiple plugins are separated by comma.Plugin names is defined in Agent plugin list SW_EXCLUDE_PLUGINS \u0026quot;\u0026quot;   plugin.mongodb.trace_param If true, trace all the parameters in MongoDB access, default is false. Only trace the operation, not include parameters. SW_PLUGIN_MONGODB_TRACE_PARAM false   plugin.mongodb.filter_length_limit If set to positive number, the WriteRequest.params would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_MONGODB_FILTER_LENGTH_LIMIT 256   plugin.elasticsearch.trace_dsl If true, trace all the DSL(Domain Specific Language) in ElasticSearch access, default is false. SW_PLUGIN_ELASTICSEARCH_TRACE_DSL false   plugin.springmvc.use_qualified_name_as_endpoint_name If true, the fully qualified method name will be used as the endpoint name instead of the request URL, default is false. SW_PLUGIN_SPRINGMVC_USE_QUALIFIED_NAME_AS_ENDPOINT_NAME false   plugin.toolkit.use_qualified_name_as_operation_name If true, the fully qualified method name will be used as the operation name instead of the given operation name, default is false. SW_PLUGIN_TOOLKIT_USE_QUALIFIED_NAME_AS_OPERATION_NAME false   plugin.jdbc.trace_sql_parameters If set to true, the parameters of the sql (typically java.sql.PreparedStatement) would be collected. SW_JDBC_TRACE_SQL_PARAMETERS false   plugin.jdbc.sql_parameters_max_length If set to positive number, the db.sql.parameters would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_JDBC_SQL_PARAMETERS_MAX_LENGTH 512   plugin.jdbc.sql_body_max_length If set to positive number, the db.statement would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_JDBC_SQL_BODY_MAX_LENGTH 2048   plugin.solrj.trace_statement If true, trace all the query parameters(include deleteByIds and deleteByQuery) in Solr query request, default is false. SW_PLUGIN_SOLRJ_TRACE_STATEMENT false   plugin.solrj.trace_ops_params If true, trace all the operation parameters in Solr request, default is false. SW_PLUGIN_SOLRJ_TRACE_OPS_PARAMS false   plugin.light4j.trace_handler_chain If true, trace all middleware/business handlers that are part of the Light4J handler chain for a request. SW_PLUGIN_LIGHT4J_TRACE_HANDLER_CHAIN false   plugin.springtransaction.simplify_transaction_definition_name If true, the transaction definition name will be simplified. SW_PLUGIN_SPRINGTRANSACTION_SIMPLIFY_TRANSACTION_DEFINITION_NAME false   plugin.jdkthreading.threading_class_prefixes Threading classes (java.lang.Runnable and java.util.concurrent.Callable) and their subclasses, including anonymous inner classes whose name match any one of the THREADING_CLASS_PREFIXES (splitted by ,) will be instrumented, make sure to only specify as narrow prefixes as what you\u0026rsquo;re expecting to instrument, (java. and javax. will be ignored due to safety issues) SW_PLUGIN_JDKTHREADING_THREADING_CLASS_PREFIXES Not set   plugin.tomcat.collect_http_params This config item controls that whether the Tomcat plugin should collect the parameters of the request. Also, activate implicitly in the profiled trace. SW_PLUGIN_TOMCAT_COLLECT_HTTP_PARAMS false   plugin.springmvc.collect_http_params This config item controls that whether the SpringMVC plugin should collect the parameters of the request, when your Spring application is based on Tomcat, consider only setting either plugin.tomcat.collect_http_params or plugin.springmvc.collect_http_params. Also, activate implicitly in the profiled trace. SW_PLUGIN_SPRINGMVC_COLLECT_HTTP_PARAMS false   plugin.httpclient.collect_http_params This config item controls that whether the HttpClient plugin should collect the parameters of the request SW_PLUGIN_HTTPCLIENT_COLLECT_HTTP_PARAMS false   plugin.http.http_params_length_threshold When COLLECT_HTTP_PARAMS is enabled, how many characters to keep and send to the OAP backend, use negative values to keep and send the complete parameters, NB. this config item is added for the sake of performance. SW_PLUGIN_HTTP_HTTP_PARAMS_LENGTH_THRESHOLD 1024   plugin.http.http_headers_length_threshold When include_http_headers declares header names, this threshold controls the length limitation of all header values. use negative values to keep and send the complete headers. Note. this config item is added for the sake of performance. SW_PLUGIN_HTTP_HTTP_HEADERS_LENGTH_THRESHOLD 2048   plugin.http.include_http_headers Set the header names, which should be collected by the plugin. Header name must follow javax.servlet.http definition. Multiple names should be split by comma. SW_PLUGIN_HTTP_INCLUDE_HTTP_HEADERS ``(No header would be collected) |   plugin.feign.collect_request_body This config item controls that whether the Feign plugin should collect the http body of the request. SW_PLUGIN_FEIGN_COLLECT_REQUEST_BODY false   plugin.feign.filter_length_limit When COLLECT_REQUEST_BODY is enabled, how many characters to keep and send to the OAP backend, use negative values to keep and send the complete body. SW_PLUGIN_FEIGN_FILTER_LENGTH_LIMIT 1024   plugin.feign.supported_content_types_prefix When COLLECT_REQUEST_BODY is enabled and content-type start with SUPPORTED_CONTENT_TYPES_PREFIX, collect the body of the request , multiple paths should be separated by , SW_PLUGIN_FEIGN_SUPPORTED_CONTENT_TYPES_PREFIX application/json,text/   plugin.influxdb.trace_influxql If true, trace all the influxql(query and write) in InfluxDB access, default is true. SW_PLUGIN_INFLUXDB_TRACE_INFLUXQL true   plugin.dubbo.collect_consumer_arguments Apache Dubbo consumer collect arguments in RPC call, use Object#toString to collect arguments. SW_PLUGIN_DUBBO_COLLECT_CONSUMER_ARGUMENTS false   plugin.dubbo.consumer_arguments_length_threshold When plugin.dubbo.collect_consumer_arguments is true, Arguments of length from the front will to the OAP backend SW_PLUGIN_DUBBO_CONSUMER_ARGUMENTS_LENGTH_THRESHOLD 256   plugin.dubbo.collect_provider_arguments Apache Dubbo provider collect arguments in RPC call, use Object#toString to collect arguments. SW_PLUGIN_DUBBO_COLLECT_PROVIDER_ARGUMENTS false   plugin.dubbo.provider_arguments_length_threshold When plugin.dubbo.collect_provider_arguments is true, Arguments of length from the front will to the OAP backend SW_PLUGIN_DUBBO_PROVIDER_ARGUMENTS_LENGTH_THRESHOLD 256   plugin.kafka.bootstrap_servers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_KAFKA_BOOTSTRAP_SERVERS localhost:9092   plugin.kafka.get_topic_timeout Timeout period of reading topics from the Kafka server, the unit is second. SW_GET_TOPIC_TIMEOUT 10   plugin.kafka.producer_config Kafka producer configuration. Read producer configure to get more details. Check Kafka report doc for more details and examples. sw_plugin_kafka_producer_config    plugin.kafka.producer_config_json Configure Kafka Producer configuration in JSON format. Notice it will be overridden by plugin.kafka.producer_config[key], if the key duplication. SW_PLUGIN_KAFKA_PRODUCER_CONFIG_JSON    plugin.kafka.topic_meter Specify which Kafka topic name for Meter System data to report to. SW_PLUGIN_KAFKA_TOPIC_METER skywalking-meters   plugin.kafka.topic_metrics Specify which Kafka topic name for JVM metrics data to report to. SW_PLUGIN_KAFKA_TOPIC_METRICS skywalking-metrics   plugin.kafka.topic_segment Specify which Kafka topic name for traces data to report to. SW_PLUGIN_KAFKA_TOPIC_SEGMENT skywalking-segments   plugin.kafka.topic_profiling Specify which Kafka topic name for Thread Profiling snapshot to report to. SW_PLUGIN_KAFKA_TOPIC_PROFILINGS skywalking-profilings   plugin.kafka.topic_management Specify which Kafka topic name for the register or heartbeat data of Service Instance to report to. SW_PLUGIN_KAFKA_TOPIC_MANAGEMENT skywalking-managements   plugin.kafka.topic_logging Specify which Kafka topic name for the logging data to report to. SW_PLUGIN_KAFKA_TOPIC_LOGGING skywalking-logging   plugin.kafka.namespace isolate multi OAP server when using same Kafka cluster (final topic name will append namespace before Kafka topics with - ). SW_KAFKA_NAMESPACE `` |   plugin.kafka.decode_class Specify which class to decode encoded configuration of kafka.You can set encoded information in plugin.kafka.producer_config_json or plugin.kafka.producer_config if you need. SW_KAFKA_DECODE_CLASS `` |   plugin.springannotation.classname_match_regex Match spring beans with regular expression for the class name. Multiple expressions could be separated by a comma. This only works when Spring annotation plugin has been activated. SW_SPRINGANNOTATION_CLASSNAME_MATCH_REGEX All the spring beans tagged with @Bean,@Service,@Dao, or @Repository.   plugin.toolkit.log.transmit_formatted Whether or not to transmit logged data as formatted or un-formatted. SW_PLUGIN_TOOLKIT_LOG_TRANSMIT_FORMATTED true   plugin.lettuce.trace_redis_parameters If set to true, the parameters of Redis commands would be collected by Lettuce agent. SW_PLUGIN_LETTUCE_TRACE_REDIS_PARAMETERS false   plugin.lettuce.redis_parameter_max_length If set to positive number and plugin.lettuce.trace_redis_parameters is set to true, Redis command parameters would be collected and truncated to this length. SW_PLUGIN_LETTUCE_REDIS_PARAMETER_MAX_LENGTH 128   plugin.lettuce.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_LETTUCE_OPERATION_MAPPING_WRITE    plugin.lettuce.operation_mapping_read  Specify which command should be converted to read operation SW_PLUGIN_LETTUCE_OPERATION_MAPPING_READ Referenc Lettuce-5.x-plugin   plugin.jedis.trace_redis_parameters If set to true, the parameters of Redis commands would be collected by Jedis agent. SW_PLUGIN_JEDIS_TRACE_REDIS_PARAMETERS false   plugin.jedis.redis_parameter_max_length If set to positive number and plugin.jedis.trace_redis_parameters is set to true, Redis command parameters would be collected and truncated to this length. SW_PLUGIN_JEDIS_REDIS_PARAMETER_MAX_LENGTH 128   plugin.jedis.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_JEDIS_OPERATION_MAPPING_WRITE    plugin.jedis.operation_mapping_read  Specify which command should be converted to read operation SW_PLUGIN_JEDIS_OPERATION_MAPPING_READ Referenc Jedis-4.x-plugin jedis-2.x-3.x-plugin   plugin.redisson.trace_redis_parameters If set to true, the parameters of Redis commands would be collected by Redisson agent. SW_PLUGIN_REDISSON_TRACE_REDIS_PARAMETERS false   plugin.redisson.redis_parameter_max_length If set to positive number and plugin.redisson.trace_redis_parameters is set to true, Redis command parameters would be collected and truncated to this length. SW_PLUGIN_REDISSON_REDIS_PARAMETER_MAX_LENGTH 128   plugin.redisson.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_REDISSON_OPERATION_MAPPING_WRITE    plugin.redisson.operation_mapping_read  Specify which command should be converted to read operation SW_PLUGIN_REDISSON_OPERATION_MAPPING_READ Referenc Redisson-3.x-plugin   plugin.neo4j.trace_cypher_parameters If set to true, the parameters of the cypher would be collected. SW_PLUGIN_NEO4J_TRACE_CYPHER_PARAMETERS false   plugin.neo4j.cypher_parameters_max_length If set to positive number, the db.cypher.parameters would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_NEO4J_CYPHER_PARAMETERS_MAX_LENGTH 512   plugin.neo4j.cypher_body_max_length If set to positive number, the db.statement would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_NEO4J_CYPHER_BODY_MAX_LENGTH 2048   plugin.cpupolicy.sample_cpu_usage_percent_limit If set to a positive number and activate trace sampler CPU policy plugin, the trace would not be collected when agent process CPU usage percent is greater than plugin.cpupolicy.sample_cpu_usage_percent_limit. SW_SAMPLE_CPU_USAGE_PERCENT_LIMIT -1   plugin.micronauthttpclient.collect_http_params This config item controls that whether the Micronaut http client plugin should collect the parameters of the request. Also, activate implicitly in the profiled trace. SW_PLUGIN_MICRONAUTHTTPCLIENT_COLLECT_HTTP_PARAMS false   plugin.micronauthttpserver.collect_http_params This config item controls that whether the Micronaut http server plugin should collect the parameters of the request. Also, activate implicitly in the profiled trace. SW_PLUGIN_MICRONAUTHTTPSERVER_COLLECT_HTTP_PARAMS false   plugin.memcached.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_MEMCACHED_OPERATION_MAPPING_WRITE get,gets,getAndTouch,getKeys,getKeysWithExpiryCheck,getKeysNoDuplicateCheck   plugin.memcached.operation_mapping_read Specify which command should be converted to read operation SW_PLUGIN_MEMCACHED_OPERATION_MAPPING_READ set,add,replace,append,prepend,cas,delete,touch,incr,decr   plugin.ehcache.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_EHCACHE_OPERATION_MAPPING_WRITE get,getAll,getQuiet,getKeys,getKeysWithExpiryCheck,getKeysNoDuplicateCheck,releaseRead,tryRead,getWithLoader,getAll,loadAll,getAllWithLoader   plugin.ehcache.operation_mapping_read Specify which command should be converted to read operation SW_PLUGIN_EHCACHE_OPERATION_MAPPING_READ tryRemoveImmediately,remove,removeAndReturnElement,removeAll,removeQuiet,removeWithWriter,put,putAll,replace,removeQuiet,removeWithWriter,removeElement,removeAll,putWithWriter,putQuiet,putIfAbsent,putIfAbsent   plugin.guavacache.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_GUAVACACHE_OPERATION_MAPPING_WRITE getIfPresent,get,getAllPresent,size   plugin.guavacache.operation_mapping_read Specify which command should be converted to read operation SW_PLUGIN_GUAVACACHE_OPERATION_MAPPING_READ put,putAll,invalidate,invalidateAll,invalidateAll,cleanUp    Reset Collection/Map type configurations as empty collection.  Collection type config, e.g. using  plugin.kafka.topics= to override default plugin.kafka.topics=a,b,c,d Map type config, e.g. using plugin.kafka.producer_config[]= to override default plugin.kafka.producer_config[key]=value  Dynamic Configurations All configurations above are static, if you need to change some agent settings at runtime, please read CDS - Configuration Discovery Service document for more details.\n","excerpt":"Table of Agent Configuration Properties This is the properties list supported in …","ref":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/configurations/","title":"Table of Agent Configuration Properties"},{"body":"Table of Agent Configuration Properties This is the properties list supported in agent/config/agent.config.\n   property key Description System Environment Variable Default     agent.service_name The service name to represent a logic group providing the same capabilities/logic. Suggestion: set a unique name for every logic service group, service instance nodes share the same code, Max length is 50(UTF-8 char). Optional, once service_name follows \u0026lt;group name\u0026gt;::\u0026lt;logic name\u0026gt; format, OAP server assigns the group name to the service metadata. SW_AGENT_NAME Your_ApplicationName   agent.namespace Namespace represents a subnet, such as kubernetes namespace, or 172.10.. SW_AGENT_NAMESPACE Not set   agent.cluster Cluster defines the physical cluster in a data center or same network segment. SW_AGENT_CLUSTER Not set   agent.sample_n_per_3_secs Negative or zero means off, by default.SAMPLE_N_PER_3_SECS means sampling N TraceSegment in 3 seconds tops. SW_AGENT_SAMPLE Not set   agent.authentication Authentication active is based on backend setting, see application.yml for more details.For most scenarios, this needs backend extensions, only basic match auth provided in default implementation. SW_AGENT_AUTHENTICATION Not set   agent.trace_segment_ref_limit_per_span The max number of TraceSegmentRef in a single span to keep memory cost estimatable. SW_TRACE_SEGMENT_LIMIT 500   agent.span_limit_per_segment The max number of spans in a single segment. Through this config item, SkyWalking keep your application memory cost estimated. SW_AGENT_SPAN_LIMIT 300   agent.ignore_suffix If the operation name of the first span is included in this set, this segment should be ignored. SW_AGENT_IGNORE_SUFFIX Not set   agent.is_open_debugging_class If true, skywalking agent will save all instrumented classes files in /debugging folder. SkyWalking team may ask for these files in order to resolve compatible problem. SW_AGENT_OPEN_DEBUG Not set   agent.instance_name Instance name is the identity of an instance, should be unique in the service. If empty, SkyWalking agent will generate an 32-bit uuid. Default, use UUID@hostname as the instance name. Max length is 50(UTF-8 char) SW_AGENT_INSTANCE_NAME \u0026quot;\u0026quot;   agent.instance_properties_json={\u0026quot;key\u0026quot;:\u0026quot;value\u0026quot;} Add service instance custom properties in json format. SW_INSTANCE_PROPERTIES_JSON Not set   agent.cause_exception_depth How depth the agent goes, when log all cause exceptions. SW_AGENT_CAUSE_EXCEPTION_DEPTH 5   agent.force_reconnection_period  Force reconnection period of grpc, based on grpc_channel_check_interval. SW_AGENT_FORCE_RECONNECTION_PERIOD 1   agent.operation_name_threshold  The operationName max length, setting this value \u0026gt; 190 is not recommended. SW_AGENT_OPERATION_NAME_THRESHOLD 150   agent.keep_tracing Keep tracing even the backend is not available if this value is true. SW_AGENT_KEEP_TRACING false   agent.force_tls Force open TLS for gRPC channel if this value is true. SW_AGENT_FORCE_TLS false   agent.ssl_trusted_ca_path gRPC SSL trusted ca file. SW_AGENT_SSL_TRUSTED_CA_PATH /ca/ca.crt   agent.ssl_key_path The private key file. Enable mTLS when ssl_key_path and ssl_cert_chain_path exist. SW_AGENT_SSL_KEY_PATH \u0026quot;\u0026quot;   agent.ssl_cert_chain_path The certificate file. Enable mTLS when ssl_key_path and ssl_cert_chain_path exist. SW_AGENT_SSL_CERT_CHAIN_PATH \u0026quot;\u0026quot;   agent.enable Enable the agent kernel services and instrumentation. SW_AGENT_ENABLE true   osinfo.ipv4_list_size Limit the length of the ipv4 list size. SW_AGENT_OSINFO_IPV4_LIST_SIZE 10   collector.grpc_channel_check_interval grpc channel status check interval. SW_AGENT_COLLECTOR_GRPC_CHANNEL_CHECK_INTERVAL 30   collector.heartbeat_period agent heartbeat report period. Unit, second. SW_AGENT_COLLECTOR_HEARTBEAT_PERIOD 30   collector.properties_report_period_factor The agent sends the instance properties to the backend every collector.heartbeat_period * collector.properties_report_period_factor seconds SW_AGENT_COLLECTOR_PROPERTIES_REPORT_PERIOD_FACTOR 10   collector.backend_service Collector SkyWalking trace receiver service addresses. SW_AGENT_COLLECTOR_BACKEND_SERVICES 127.0.0.1:11800   collector.grpc_upstream_timeout How long grpc client will timeout in sending data to upstream. Unit is second. SW_AGENT_COLLECTOR_GRPC_UPSTREAM_TIMEOUT 30 seconds   collector.get_profile_task_interval Sniffer get profile task list interval. SW_AGENT_COLLECTOR_GET_PROFILE_TASK_INTERVAL 20   collector.get_agent_dynamic_config_interval Sniffer get agent dynamic config interval SW_AGENT_COLLECTOR_GET_AGENT_DYNAMIC_CONFIG_INTERVAL 20   collector.is_resolve_dns_periodically If true, skywalking agent will enable periodically resolving DNS to update receiver service addresses. SW_AGENT_COLLECTOR_IS_RESOLVE_DNS_PERIODICALLY false   logging.level Log level: TRACE, DEBUG, INFO, WARN, ERROR, OFF. Default is info. SW_LOGGING_LEVEL INFO   logging.file_name Log file name. SW_LOGGING_FILE_NAME skywalking-api.log   logging.output Log output. Default is FILE. Use CONSOLE means output to stdout. SW_LOGGING_OUTPUT FILE   logging.dir Log files directory. Default is blank string, means, use \u0026ldquo;{theSkywalkingAgentJarDir}/logs \u0026quot; to output logs. {theSkywalkingAgentJarDir} is the directory where the skywalking agent jar file is located SW_LOGGING_DIR \u0026quot;\u0026quot;   logging.resolver Logger resolver: PATTERN or JSON. The default is PATTERN, which uses logging.pattern to print traditional text logs. JSON resolver prints logs in JSON format. SW_LOGGING_RESOLVER PATTERN   logging.pattern  Logging format. There are all conversion specifiers: * %level means log level. * %timestamp means now of time with format yyyy-MM-dd HH:mm:ss:SSS.\n* %thread means name of current thread.\n* %msg means some message which user logged. * %class means SimpleName of TargetClass. * %throwable means a throwable which user called. * %agent_name means agent.service_name. Only apply to the PatternLogger. SW_LOGGING_PATTERN %level %timestamp %thread %class : %msg %throwable   logging.max_file_size The max size of log file. If the size is bigger than this, archive the current file, and write into a new file. SW_LOGGING_MAX_FILE_SIZE 300 * 1024 * 1024   logging.max_history_files The max history log files. When rollover happened, if log files exceed this number,then the oldest file will be delete. Negative or zero means off, by default. SW_LOGGING_MAX_HISTORY_FILES -1   statuscheck.ignored_exceptions Listed exceptions would not be treated as an error. Because in some codes, the exception is being used as a way of controlling business flow. SW_STATUSCHECK_IGNORED_EXCEPTIONS \u0026quot;\u0026quot;   statuscheck.max_recursive_depth The max recursive depth when checking the exception traced by the agent. Typically, we don\u0026rsquo;t recommend setting this more than 10, which could cause a performance issue. Negative value and 0 would be ignored, which means all exceptions would make the span tagged in error status. SW_STATUSCHECK_MAX_RECURSIVE_DEPTH 1   correlation.element_max_number Max element count in the correlation context. SW_CORRELATION_ELEMENT_MAX_NUMBER 3   correlation.value_max_length Max value length of each element. SW_CORRELATION_VALUE_MAX_LENGTH 128   correlation.auto_tag_keys Tag the span by the key/value in the correlation context, when the keys listed here exist. SW_CORRELATION_AUTO_TAG_KEYS \u0026quot;\u0026quot;   jvm.buffer_size The buffer size of collected JVM info. SW_JVM_BUFFER_SIZE 60 * 10   jvm.metrics_collect_period The period in seconds of JVM metrics collection. Unit is second. SW_JVM_METRICS_COLLECT_PERIOD 1   buffer.channel_size The buffer channel size. SW_BUFFER_CHANNEL_SIZE 5   buffer.buffer_size The buffer size. SW_BUFFER_BUFFER_SIZE 300   profile.active If true, skywalking agent will enable profile when user create a new profile task. Otherwise disable profile. SW_AGENT_PROFILE_ACTIVE true   profile.max_parallel Parallel monitor segment count SW_AGENT_PROFILE_MAX_PARALLEL 5   profile.max_accept_sub_parallel Max monitoring sub-tasks count of one single endpoint access SW_AGENT_PROFILE_MAX_ACCEPT_SUB_PARALLEL 5   profile.duration Max monitor segment time(minutes), if current segment monitor time out of limit, then stop it. SW_AGENT_PROFILE_DURATION 10   profile.dump_max_stack_depth Max dump thread stack depth SW_AGENT_PROFILE_DUMP_MAX_STACK_DEPTH 500   profile.snapshot_transport_buffer_size Snapshot transport to backend buffer size SW_AGENT_PROFILE_SNAPSHOT_TRANSPORT_BUFFER_SIZE 4500   meter.active If true, the agent collects and reports metrics to the backend. SW_METER_ACTIVE true   meter.report_interval Report meters interval. The unit is second SW_METER_REPORT_INTERVAL 20   meter.max_meter_size Max size of the meter pool SW_METER_MAX_METER_SIZE 500   log.max_message_size The max size of message to send to server.Default is 10 MB. SW_GRPC_LOG_MAX_MESSAGE_SIZE 10485760   plugin.mount Mount the specific folders of the plugins. Plugins in mounted folders would work. SW_MOUNT_FOLDERS plugins,activations   plugin.peer_max_length  Peer maximum description limit. SW_PLUGIN_PEER_MAX_LENGTH 200   plugin.exclude_plugins  Exclude some plugins define in plugins dir,Multiple plugins are separated by comma.Plugin names is defined in Agent plugin list SW_EXCLUDE_PLUGINS \u0026quot;\u0026quot;   plugin.mongodb.trace_param If true, trace all the parameters in MongoDB access, default is false. Only trace the operation, not include parameters. SW_PLUGIN_MONGODB_TRACE_PARAM false   plugin.mongodb.filter_length_limit If set to positive number, the WriteRequest.params would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_MONGODB_FILTER_LENGTH_LIMIT 256   plugin.elasticsearch.trace_dsl If true, trace all the DSL(Domain Specific Language) in ElasticSearch access, default is false. SW_PLUGIN_ELASTICSEARCH_TRACE_DSL false   plugin.springmvc.use_qualified_name_as_endpoint_name If true, the fully qualified method name will be used as the endpoint name instead of the request URL, default is false. SW_PLUGIN_SPRINGMVC_USE_QUALIFIED_NAME_AS_ENDPOINT_NAME false   plugin.toolkit.use_qualified_name_as_operation_name If true, the fully qualified method name will be used as the operation name instead of the given operation name, default is false. SW_PLUGIN_TOOLKIT_USE_QUALIFIED_NAME_AS_OPERATION_NAME false   plugin.jdbc.trace_sql_parameters If set to true, the parameters of the sql (typically java.sql.PreparedStatement) would be collected. SW_JDBC_TRACE_SQL_PARAMETERS false   plugin.jdbc.sql_parameters_max_length If set to positive number, the db.sql.parameters would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_JDBC_SQL_PARAMETERS_MAX_LENGTH 512   plugin.jdbc.sql_body_max_length If set to positive number, the db.statement would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_JDBC_SQL_BODY_MAX_LENGTH 2048   plugin.solrj.trace_statement If true, trace all the query parameters(include deleteByIds and deleteByQuery) in Solr query request, default is false. SW_PLUGIN_SOLRJ_TRACE_STATEMENT false   plugin.solrj.trace_ops_params If true, trace all the operation parameters in Solr request, default is false. SW_PLUGIN_SOLRJ_TRACE_OPS_PARAMS false   plugin.light4j.trace_handler_chain If true, trace all middleware/business handlers that are part of the Light4J handler chain for a request. SW_PLUGIN_LIGHT4J_TRACE_HANDLER_CHAIN false   plugin.springtransaction.simplify_transaction_definition_name If true, the transaction definition name will be simplified. SW_PLUGIN_SPRINGTRANSACTION_SIMPLIFY_TRANSACTION_DEFINITION_NAME false   plugin.jdkthreading.threading_class_prefixes Threading classes (java.lang.Runnable and java.util.concurrent.Callable) and their subclasses, including anonymous inner classes whose name match any one of the THREADING_CLASS_PREFIXES (splitted by ,) will be instrumented, make sure to only specify as narrow prefixes as what you\u0026rsquo;re expecting to instrument, (java. and javax. will be ignored due to safety issues) SW_PLUGIN_JDKTHREADING_THREADING_CLASS_PREFIXES Not set   plugin.tomcat.collect_http_params This config item controls that whether the Tomcat plugin should collect the parameters of the request. Also, activate implicitly in the profiled trace. SW_PLUGIN_TOMCAT_COLLECT_HTTP_PARAMS false   plugin.springmvc.collect_http_params This config item controls that whether the SpringMVC plugin should collect the parameters of the request, when your Spring application is based on Tomcat, consider only setting either plugin.tomcat.collect_http_params or plugin.springmvc.collect_http_params. Also, activate implicitly in the profiled trace. SW_PLUGIN_SPRINGMVC_COLLECT_HTTP_PARAMS false   plugin.httpclient.collect_http_params This config item controls that whether the HttpClient plugin should collect the parameters of the request SW_PLUGIN_HTTPCLIENT_COLLECT_HTTP_PARAMS false   plugin.http.http_params_length_threshold When COLLECT_HTTP_PARAMS is enabled, how many characters to keep and send to the OAP backend, use negative values to keep and send the complete parameters, NB. this config item is added for the sake of performance. SW_PLUGIN_HTTP_HTTP_PARAMS_LENGTH_THRESHOLD 1024   plugin.http.http_headers_length_threshold When include_http_headers declares header names, this threshold controls the length limitation of all header values. use negative values to keep and send the complete headers. Note. this config item is added for the sake of performance. SW_PLUGIN_HTTP_HTTP_HEADERS_LENGTH_THRESHOLD 2048   plugin.http.include_http_headers Set the header names, which should be collected by the plugin. Header name must follow javax.servlet.http definition. Multiple names should be split by comma. SW_PLUGIN_HTTP_INCLUDE_HTTP_HEADERS ``(No header would be collected) |   plugin.feign.collect_request_body This config item controls that whether the Feign plugin should collect the http body of the request. SW_PLUGIN_FEIGN_COLLECT_REQUEST_BODY false   plugin.feign.filter_length_limit When COLLECT_REQUEST_BODY is enabled, how many characters to keep and send to the OAP backend, use negative values to keep and send the complete body. SW_PLUGIN_FEIGN_FILTER_LENGTH_LIMIT 1024   plugin.feign.supported_content_types_prefix When COLLECT_REQUEST_BODY is enabled and content-type start with SUPPORTED_CONTENT_TYPES_PREFIX, collect the body of the request , multiple paths should be separated by , SW_PLUGIN_FEIGN_SUPPORTED_CONTENT_TYPES_PREFIX application/json,text/   plugin.influxdb.trace_influxql If true, trace all the influxql(query and write) in InfluxDB access, default is true. SW_PLUGIN_INFLUXDB_TRACE_INFLUXQL true   plugin.dubbo.collect_consumer_arguments Apache Dubbo consumer collect arguments in RPC call, use Object#toString to collect arguments. SW_PLUGIN_DUBBO_COLLECT_CONSUMER_ARGUMENTS false   plugin.dubbo.consumer_arguments_length_threshold When plugin.dubbo.collect_consumer_arguments is true, Arguments of length from the front will to the OAP backend SW_PLUGIN_DUBBO_CONSUMER_ARGUMENTS_LENGTH_THRESHOLD 256   plugin.dubbo.collect_provider_arguments Apache Dubbo provider collect arguments in RPC call, use Object#toString to collect arguments. SW_PLUGIN_DUBBO_COLLECT_PROVIDER_ARGUMENTS false   plugin.dubbo.provider_arguments_length_threshold When plugin.dubbo.collect_provider_arguments is true, Arguments of length from the front will to the OAP backend SW_PLUGIN_DUBBO_PROVIDER_ARGUMENTS_LENGTH_THRESHOLD 256   plugin.kafka.bootstrap_servers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_KAFKA_BOOTSTRAP_SERVERS localhost:9092   plugin.kafka.get_topic_timeout Timeout period of reading topics from the Kafka server, the unit is second. SW_GET_TOPIC_TIMEOUT 10   plugin.kafka.producer_config Kafka producer configuration. Read producer configure to get more details. Check Kafka report doc for more details and examples. sw_plugin_kafka_producer_config    plugin.kafka.producer_config_json Configure Kafka Producer configuration in JSON format. Notice it will be overridden by plugin.kafka.producer_config[key], if the key duplication. SW_PLUGIN_KAFKA_PRODUCER_CONFIG_JSON    plugin.kafka.topic_meter Specify which Kafka topic name for Meter System data to report to. SW_PLUGIN_KAFKA_TOPIC_METER skywalking-meters   plugin.kafka.topic_metrics Specify which Kafka topic name for JVM metrics data to report to. SW_PLUGIN_KAFKA_TOPIC_METRICS skywalking-metrics   plugin.kafka.topic_segment Specify which Kafka topic name for traces data to report to. SW_PLUGIN_KAFKA_TOPIC_SEGMENT skywalking-segments   plugin.kafka.topic_profiling Specify which Kafka topic name for Thread Profiling snapshot to report to. SW_PLUGIN_KAFKA_TOPIC_PROFILINGS skywalking-profilings   plugin.kafka.topic_management Specify which Kafka topic name for the register or heartbeat data of Service Instance to report to. SW_PLUGIN_KAFKA_TOPIC_MANAGEMENT skywalking-managements   plugin.kafka.topic_logging Specify which Kafka topic name for the logging data to report to. SW_PLUGIN_KAFKA_TOPIC_LOGGING skywalking-logging   plugin.kafka.namespace isolate multi OAP server when using same Kafka cluster (final topic name will append namespace before Kafka topics with - ). SW_KAFKA_NAMESPACE `` |   plugin.kafka.decode_class Specify which class to decode encoded configuration of kafka.You can set encoded information in plugin.kafka.producer_config_json or plugin.kafka.producer_config if you need. SW_KAFKA_DECODE_CLASS `` |   plugin.springannotation.classname_match_regex Match spring beans with regular expression for the class name. Multiple expressions could be separated by a comma. This only works when Spring annotation plugin has been activated. SW_SPRINGANNOTATION_CLASSNAME_MATCH_REGEX All the spring beans tagged with @Bean,@Service,@Dao, or @Repository.   plugin.toolkit.log.transmit_formatted Whether or not to transmit logged data as formatted or un-formatted. SW_PLUGIN_TOOLKIT_LOG_TRANSMIT_FORMATTED true   plugin.lettuce.trace_redis_parameters If set to true, the parameters of Redis commands would be collected by Lettuce agent. SW_PLUGIN_LETTUCE_TRACE_REDIS_PARAMETERS false   plugin.lettuce.redis_parameter_max_length If set to positive number and plugin.lettuce.trace_redis_parameters is set to true, Redis command parameters would be collected and truncated to this length. SW_PLUGIN_LETTUCE_REDIS_PARAMETER_MAX_LENGTH 128   plugin.lettuce.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_LETTUCE_OPERATION_MAPPING_WRITE    plugin.lettuce.operation_mapping_read  Specify which command should be converted to read operation SW_PLUGIN_LETTUCE_OPERATION_MAPPING_READ Referenc Lettuce-5.x-plugin   plugin.jedis.trace_redis_parameters If set to true, the parameters of Redis commands would be collected by Jedis agent. SW_PLUGIN_JEDIS_TRACE_REDIS_PARAMETERS false   plugin.jedis.redis_parameter_max_length If set to positive number and plugin.jedis.trace_redis_parameters is set to true, Redis command parameters would be collected and truncated to this length. SW_PLUGIN_JEDIS_REDIS_PARAMETER_MAX_LENGTH 128   plugin.jedis.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_JEDIS_OPERATION_MAPPING_WRITE    plugin.jedis.operation_mapping_read  Specify which command should be converted to read operation SW_PLUGIN_JEDIS_OPERATION_MAPPING_READ Referenc Jedis-4.x-plugin jedis-2.x-3.x-plugin   plugin.redisson.trace_redis_parameters If set to true, the parameters of Redis commands would be collected by Redisson agent. SW_PLUGIN_REDISSON_TRACE_REDIS_PARAMETERS false   plugin.redisson.redis_parameter_max_length If set to positive number and plugin.redisson.trace_redis_parameters is set to true, Redis command parameters would be collected and truncated to this length. SW_PLUGIN_REDISSON_REDIS_PARAMETER_MAX_LENGTH 128   plugin.redisson.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_REDISSON_OPERATION_MAPPING_WRITE    plugin.redisson.operation_mapping_read  Specify which command should be converted to read operation SW_PLUGIN_REDISSON_OPERATION_MAPPING_READ Referenc Redisson-3.x-plugin   plugin.neo4j.trace_cypher_parameters If set to true, the parameters of the cypher would be collected. SW_PLUGIN_NEO4J_TRACE_CYPHER_PARAMETERS false   plugin.neo4j.cypher_parameters_max_length If set to positive number, the db.cypher.parameters would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_NEO4J_CYPHER_PARAMETERS_MAX_LENGTH 512   plugin.neo4j.cypher_body_max_length If set to positive number, the db.statement would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_NEO4J_CYPHER_BODY_MAX_LENGTH 2048   plugin.cpupolicy.sample_cpu_usage_percent_limit If set to a positive number and activate trace sampler CPU policy plugin, the trace would not be collected when agent process CPU usage percent is greater than plugin.cpupolicy.sample_cpu_usage_percent_limit. SW_SAMPLE_CPU_USAGE_PERCENT_LIMIT -1   plugin.micronauthttpclient.collect_http_params This config item controls that whether the Micronaut http client plugin should collect the parameters of the request. Also, activate implicitly in the profiled trace. SW_PLUGIN_MICRONAUTHTTPCLIENT_COLLECT_HTTP_PARAMS false   plugin.micronauthttpserver.collect_http_params This config item controls that whether the Micronaut http server plugin should collect the parameters of the request. Also, activate implicitly in the profiled trace. SW_PLUGIN_MICRONAUTHTTPSERVER_COLLECT_HTTP_PARAMS false   plugin.memcached.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_MEMCACHED_OPERATION_MAPPING_WRITE get,gets,getAndTouch,getKeys,getKeysWithExpiryCheck,getKeysNoDuplicateCheck   plugin.memcached.operation_mapping_read Specify which command should be converted to read operation SW_PLUGIN_MEMCACHED_OPERATION_MAPPING_READ set,add,replace,append,prepend,cas,delete,touch,incr,decr   plugin.ehcache.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_EHCACHE_OPERATION_MAPPING_WRITE get,getAll,getQuiet,getKeys,getKeysWithExpiryCheck,getKeysNoDuplicateCheck,releaseRead,tryRead,getWithLoader,getAll,loadAll,getAllWithLoader   plugin.ehcache.operation_mapping_read Specify which command should be converted to read operation SW_PLUGIN_EHCACHE_OPERATION_MAPPING_READ tryRemoveImmediately,remove,removeAndReturnElement,removeAll,removeQuiet,removeWithWriter,put,putAll,replace,removeQuiet,removeWithWriter,removeElement,removeAll,putWithWriter,putQuiet,putIfAbsent,putIfAbsent   plugin.guavacache.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_GUAVACACHE_OPERATION_MAPPING_WRITE getIfPresent,get,getAllPresent,size   plugin.guavacache.operation_mapping_read Specify which command should be converted to read operation SW_PLUGIN_GUAVACACHE_OPERATION_MAPPING_READ put,putAll,invalidate,invalidateAll,invalidateAll,cleanUp   plugin.nettyhttp.collect_request_body This config item controls that whether the Netty-http plugin should collect the http body of the request. SW_PLUGIN_NETTY_HTTP_COLLECT_REQUEST_BODY false   plugin.nettyhttp.filter_length_limit When COLLECT_REQUEST_BODY is enabled, how many characters to keep and send to the OAP backend, use negative values to keep and send the complete body. SW_PLUGIN_NETTY_HTTP_FILTER_LENGTH_LIMIT 1024   plugin.nettyhttp.supported_content_types_prefix When COLLECT_REQUEST_BODY is enabled and content-type start with HTTP_SUPPORTED_CONTENT_TYPES_PREFIX, collect the body of the request , multiple paths should be separated by , SW_PLUGIN_NETTY_HTTP_SUPPORTED_CONTENT_TYPES_PREFIX application/json,text/          Reset Collection/Map type configurations as empty collection.  Collection type config, e.g. using  plugin.kafka.topics= to override default plugin.kafka.topics=a,b,c,d Map type config, e.g. using plugin.kafka.producer_config[]= to override default plugin.kafka.producer_config[key]=value  Dynamic Configurations All configurations above are static, if you need to change some agent settings at runtime, please read CDS - Configuration Discovery Service document for more details.\n","excerpt":"Table of Agent Configuration Properties This is the properties list supported in …","ref":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/configurations/","title":"Table of Agent Configuration Properties"},{"body":"Table of Agent Configuration Properties This is the properties list supported in agent/config/agent.config.\n   property key Description System Environment Variable Default     agent.service_name The service name to represent a logic group providing the same capabilities/logic. Suggestion: set a unique name for every logic service group, service instance nodes share the same code, Max length is 50(UTF-8 char). Optional, once service_name follows \u0026lt;group name\u0026gt;::\u0026lt;logic name\u0026gt; format, OAP server assigns the group name to the service metadata. SW_AGENT_NAME Your_ApplicationName   agent.namespace Namespace represents a subnet, such as kubernetes namespace, or 172.10.. SW_AGENT_NAMESPACE Not set   agent.cluster Cluster defines the physical cluster in a data center or same network segment. SW_AGENT_CLUSTER Not set   agent.sample_n_per_3_secs Negative or zero means off, by default.SAMPLE_N_PER_3_SECS means sampling N TraceSegment in 3 seconds tops. SW_AGENT_SAMPLE Not set   agent.authentication Authentication active is based on backend setting, see application.yml for more details.For most scenarios, this needs backend extensions, only basic match auth provided in default implementation. SW_AGENT_AUTHENTICATION Not set   agent.trace_segment_ref_limit_per_span The max number of TraceSegmentRef in a single span to keep memory cost estimatable. SW_TRACE_SEGMENT_LIMIT 500   agent.span_limit_per_segment The max number of spans in a single segment. Through this config item, SkyWalking keep your application memory cost estimated. SW_AGENT_SPAN_LIMIT 300   agent.ignore_suffix If the operation name of the first span is included in this set, this segment should be ignored. SW_AGENT_IGNORE_SUFFIX Not set   agent.is_open_debugging_class If true, skywalking agent will save all instrumented classes files in /debugging folder. SkyWalking team may ask for these files in order to resolve compatible problem. SW_AGENT_OPEN_DEBUG Not set   agent.instance_name Instance name is the identity of an instance, should be unique in the service. If empty, SkyWalking agent will generate an 32-bit uuid. Default, use UUID@hostname as the instance name. Max length is 50(UTF-8 char) SW_AGENT_INSTANCE_NAME \u0026quot;\u0026quot;   agent.instance_properties_json={\u0026quot;key\u0026quot;:\u0026quot;value\u0026quot;} Add service instance custom properties in json format. SW_INSTANCE_PROPERTIES_JSON Not set   agent.cause_exception_depth How depth the agent goes, when log all cause exceptions. SW_AGENT_CAUSE_EXCEPTION_DEPTH 5   agent.force_reconnection_period  Force reconnection period of grpc, based on grpc_channel_check_interval. SW_AGENT_FORCE_RECONNECTION_PERIOD 1   agent.operation_name_threshold  The operationName max length, setting this value \u0026gt; 190 is not recommended. SW_AGENT_OPERATION_NAME_THRESHOLD 150   agent.keep_tracing Keep tracing even the backend is not available if this value is true. SW_AGENT_KEEP_TRACING false   agent.force_tls Force open TLS for gRPC channel if this value is true. SW_AGENT_FORCE_TLS false   agent.ssl_trusted_ca_path gRPC SSL trusted ca file. SW_AGENT_SSL_TRUSTED_CA_PATH /ca/ca.crt   agent.ssl_key_path The private key file. Enable mTLS when ssl_key_path and ssl_cert_chain_path exist. SW_AGENT_SSL_KEY_PATH \u0026quot;\u0026quot;   agent.ssl_cert_chain_path The certificate file. Enable mTLS when ssl_key_path and ssl_cert_chain_path exist. SW_AGENT_SSL_CERT_CHAIN_PATH \u0026quot;\u0026quot;   agent.enable Enable the agent kernel services and instrumentation. SW_AGENT_ENABLE true   osinfo.ipv4_list_size Limit the length of the ipv4 list size. SW_AGENT_OSINFO_IPV4_LIST_SIZE 10   collector.grpc_channel_check_interval grpc channel status check interval. SW_AGENT_COLLECTOR_GRPC_CHANNEL_CHECK_INTERVAL 30   collector.heartbeat_period agent heartbeat report period. Unit, second. SW_AGENT_COLLECTOR_HEARTBEAT_PERIOD 30   collector.properties_report_period_factor The agent sends the instance properties to the backend every collector.heartbeat_period * collector.properties_report_period_factor seconds SW_AGENT_COLLECTOR_PROPERTIES_REPORT_PERIOD_FACTOR 10   collector.backend_service Collector SkyWalking trace receiver service addresses. SW_AGENT_COLLECTOR_BACKEND_SERVICES 127.0.0.1:11800   collector.grpc_upstream_timeout How long grpc client will timeout in sending data to upstream. Unit is second. SW_AGENT_COLLECTOR_GRPC_UPSTREAM_TIMEOUT 30 seconds   collector.get_profile_task_interval Sniffer get profile task list interval. SW_AGENT_COLLECTOR_GET_PROFILE_TASK_INTERVAL 20   collector.get_agent_dynamic_config_interval Sniffer get agent dynamic config interval SW_AGENT_COLLECTOR_GET_AGENT_DYNAMIC_CONFIG_INTERVAL 20   collector.is_resolve_dns_periodically If true, skywalking agent will enable periodically resolving DNS to update receiver service addresses. SW_AGENT_COLLECTOR_IS_RESOLVE_DNS_PERIODICALLY false   logging.level Log level: TRACE, DEBUG, INFO, WARN, ERROR, OFF. Default is info. SW_LOGGING_LEVEL INFO   logging.file_name Log file name. SW_LOGGING_FILE_NAME skywalking-api.log   logging.output Log output. Default is FILE. Use CONSOLE means output to stdout. SW_LOGGING_OUTPUT FILE   logging.dir Log files directory. Default is blank string, means, use \u0026ldquo;{theSkywalkingAgentJarDir}/logs \u0026quot; to output logs. {theSkywalkingAgentJarDir} is the directory where the skywalking agent jar file is located SW_LOGGING_DIR \u0026quot;\u0026quot;   logging.resolver Logger resolver: PATTERN or JSON. The default is PATTERN, which uses logging.pattern to print traditional text logs. JSON resolver prints logs in JSON format. SW_LOGGING_RESOLVER PATTERN   logging.pattern  Logging format. There are all conversion specifiers: * %level means log level. * %timestamp means now of time with format yyyy-MM-dd HH:mm:ss:SSS.\n* %thread means name of current thread.\n* %msg means some message which user logged. * %class means SimpleName of TargetClass. * %throwable means a throwable which user called. * %agent_name means agent.service_name. Only apply to the PatternLogger. SW_LOGGING_PATTERN %level %timestamp %thread %class : %msg %throwable   logging.max_file_size The max size of log file. If the size is bigger than this, archive the current file, and write into a new file. SW_LOGGING_MAX_FILE_SIZE 300 * 1024 * 1024   logging.max_history_files The max history log files. When rollover happened, if log files exceed this number,then the oldest file will be delete. Negative or zero means off, by default. SW_LOGGING_MAX_HISTORY_FILES -1   statuscheck.ignored_exceptions Listed exceptions would not be treated as an error. Because in some codes, the exception is being used as a way of controlling business flow. SW_STATUSCHECK_IGNORED_EXCEPTIONS \u0026quot;\u0026quot;   statuscheck.max_recursive_depth The max recursive depth when checking the exception traced by the agent. Typically, we don\u0026rsquo;t recommend setting this more than 10, which could cause a performance issue. Negative value and 0 would be ignored, which means all exceptions would make the span tagged in error status. SW_STATUSCHECK_MAX_RECURSIVE_DEPTH 1   correlation.element_max_number Max element count in the correlation context. SW_CORRELATION_ELEMENT_MAX_NUMBER 3   correlation.value_max_length Max value length of each element. SW_CORRELATION_VALUE_MAX_LENGTH 128   correlation.auto_tag_keys Tag the span by the key/value in the correlation context, when the keys listed here exist. SW_CORRELATION_AUTO_TAG_KEYS \u0026quot;\u0026quot;   jvm.buffer_size The buffer size of collected JVM info. SW_JVM_BUFFER_SIZE 60 * 10   jvm.metrics_collect_period The period in seconds of JVM metrics collection. Unit is second. SW_JVM_METRICS_COLLECT_PERIOD 1   buffer.channel_size The buffer channel size. SW_BUFFER_CHANNEL_SIZE 5   buffer.buffer_size The buffer size. SW_BUFFER_BUFFER_SIZE 300   profile.active If true, skywalking agent will enable profile when user create a new profile task. Otherwise disable profile. SW_AGENT_PROFILE_ACTIVE true   profile.max_parallel Parallel monitor segment count SW_AGENT_PROFILE_MAX_PARALLEL 5   profile.max_accept_sub_parallel Max monitoring sub-tasks count of one single endpoint access SW_AGENT_PROFILE_MAX_ACCEPT_SUB_PARALLEL 5   profile.duration Max monitor segment time(minutes), if current segment monitor time out of limit, then stop it. SW_AGENT_PROFILE_DURATION 10   profile.dump_max_stack_depth Max dump thread stack depth SW_AGENT_PROFILE_DUMP_MAX_STACK_DEPTH 500   profile.snapshot_transport_buffer_size Snapshot transport to backend buffer size SW_AGENT_PROFILE_SNAPSHOT_TRANSPORT_BUFFER_SIZE 4500   meter.active If true, the agent collects and reports metrics to the backend. SW_METER_ACTIVE true   meter.report_interval Report meters interval. The unit is second SW_METER_REPORT_INTERVAL 20   meter.max_meter_size Max size of the meter pool SW_METER_MAX_METER_SIZE 500   log.max_message_size The max size of message to send to server.Default is 10 MB. SW_GRPC_LOG_MAX_MESSAGE_SIZE 10485760   plugin.mount Mount the specific folders of the plugins. Plugins in mounted folders would work. SW_MOUNT_FOLDERS plugins,activations   plugin.peer_max_length  Peer maximum description limit. SW_PLUGIN_PEER_MAX_LENGTH 200   plugin.exclude_plugins  Exclude some plugins define in plugins dir,Multiple plugins are separated by comma.Plugin names is defined in Agent plugin list SW_EXCLUDE_PLUGINS \u0026quot;\u0026quot;   plugin.mongodb.trace_param If true, trace all the parameters in MongoDB access, default is false. Only trace the operation, not include parameters. SW_PLUGIN_MONGODB_TRACE_PARAM false   plugin.mongodb.filter_length_limit If set to positive number, the WriteRequest.params would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_MONGODB_FILTER_LENGTH_LIMIT 256   plugin.elasticsearch.trace_dsl If true, trace all the DSL(Domain Specific Language) in ElasticSearch access, default is false. SW_PLUGIN_ELASTICSEARCH_TRACE_DSL false   plugin.springmvc.use_qualified_name_as_endpoint_name If true, the fully qualified method name will be used as the endpoint name instead of the request URL, default is false. SW_PLUGIN_SPRINGMVC_USE_QUALIFIED_NAME_AS_ENDPOINT_NAME false   plugin.toolkit.use_qualified_name_as_operation_name If true, the fully qualified method name will be used as the operation name instead of the given operation name, default is false. SW_PLUGIN_TOOLKIT_USE_QUALIFIED_NAME_AS_OPERATION_NAME false   plugin.jdbc.trace_sql_parameters If set to true, the parameters of the sql (typically java.sql.PreparedStatement) would be collected. SW_JDBC_TRACE_SQL_PARAMETERS false   plugin.jdbc.sql_parameters_max_length If set to positive number, the db.sql.parameters would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_JDBC_SQL_PARAMETERS_MAX_LENGTH 512   plugin.jdbc.sql_body_max_length If set to positive number, the db.statement would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_JDBC_SQL_BODY_MAX_LENGTH 2048   plugin.solrj.trace_statement If true, trace all the query parameters(include deleteByIds and deleteByQuery) in Solr query request, default is false. SW_PLUGIN_SOLRJ_TRACE_STATEMENT false   plugin.solrj.trace_ops_params If true, trace all the operation parameters in Solr request, default is false. SW_PLUGIN_SOLRJ_TRACE_OPS_PARAMS false   plugin.light4j.trace_handler_chain If true, trace all middleware/business handlers that are part of the Light4J handler chain for a request. SW_PLUGIN_LIGHT4J_TRACE_HANDLER_CHAIN false   plugin.springtransaction.simplify_transaction_definition_name If true, the transaction definition name will be simplified. SW_PLUGIN_SPRINGTRANSACTION_SIMPLIFY_TRANSACTION_DEFINITION_NAME false   plugin.jdkthreading.threading_class_prefixes Threading classes (java.lang.Runnable and java.util.concurrent.Callable) and their subclasses, including anonymous inner classes whose name match any one of the THREADING_CLASS_PREFIXES (splitted by ,) will be instrumented, make sure to only specify as narrow prefixes as what you\u0026rsquo;re expecting to instrument, (java. and javax. will be ignored due to safety issues) SW_PLUGIN_JDKTHREADING_THREADING_CLASS_PREFIXES Not set   plugin.tomcat.collect_http_params This config item controls that whether the Tomcat plugin should collect the parameters of the request. Also, activate implicitly in the profiled trace. SW_PLUGIN_TOMCAT_COLLECT_HTTP_PARAMS false   plugin.springmvc.collect_http_params This config item controls that whether the SpringMVC plugin should collect the parameters of the request, when your Spring application is based on Tomcat, consider only setting either plugin.tomcat.collect_http_params or plugin.springmvc.collect_http_params. Also, activate implicitly in the profiled trace. SW_PLUGIN_SPRINGMVC_COLLECT_HTTP_PARAMS false   plugin.httpclient.collect_http_params This config item controls that whether the HttpClient plugin should collect the parameters of the request SW_PLUGIN_HTTPCLIENT_COLLECT_HTTP_PARAMS false   plugin.http.http_params_length_threshold When COLLECT_HTTP_PARAMS is enabled, how many characters to keep and send to the OAP backend, use negative values to keep and send the complete parameters, NB. this config item is added for the sake of performance. SW_PLUGIN_HTTP_HTTP_PARAMS_LENGTH_THRESHOLD 1024   plugin.http.http_headers_length_threshold When include_http_headers declares header names, this threshold controls the length limitation of all header values. use negative values to keep and send the complete headers. Note. this config item is added for the sake of performance. SW_PLUGIN_HTTP_HTTP_HEADERS_LENGTH_THRESHOLD 2048   plugin.http.include_http_headers Set the header names, which should be collected by the plugin. Header name must follow javax.servlet.http definition. Multiple names should be split by comma. SW_PLUGIN_HTTP_INCLUDE_HTTP_HEADERS ``(No header would be collected) |   plugin.feign.collect_request_body This config item controls that whether the Feign plugin should collect the http body of the request. SW_PLUGIN_FEIGN_COLLECT_REQUEST_BODY false   plugin.feign.filter_length_limit When COLLECT_REQUEST_BODY is enabled, how many characters to keep and send to the OAP backend, use negative values to keep and send the complete body. SW_PLUGIN_FEIGN_FILTER_LENGTH_LIMIT 1024   plugin.feign.supported_content_types_prefix When COLLECT_REQUEST_BODY is enabled and content-type start with SUPPORTED_CONTENT_TYPES_PREFIX, collect the body of the request , multiple paths should be separated by , SW_PLUGIN_FEIGN_SUPPORTED_CONTENT_TYPES_PREFIX application/json,text/   plugin.influxdb.trace_influxql If true, trace all the influxql(query and write) in InfluxDB access, default is true. SW_PLUGIN_INFLUXDB_TRACE_INFLUXQL true   plugin.dubbo.collect_consumer_arguments Apache Dubbo consumer collect arguments in RPC call, use Object#toString to collect arguments. SW_PLUGIN_DUBBO_COLLECT_CONSUMER_ARGUMENTS false   plugin.dubbo.consumer_arguments_length_threshold When plugin.dubbo.collect_consumer_arguments is true, Arguments of length from the front will to the OAP backend SW_PLUGIN_DUBBO_CONSUMER_ARGUMENTS_LENGTH_THRESHOLD 256   plugin.dubbo.collect_provider_arguments Apache Dubbo provider collect arguments in RPC call, use Object#toString to collect arguments. SW_PLUGIN_DUBBO_COLLECT_PROVIDER_ARGUMENTS false   plugin.dubbo.provider_arguments_length_threshold When plugin.dubbo.collect_provider_arguments is true, Arguments of length from the front will to the OAP backend SW_PLUGIN_DUBBO_PROVIDER_ARGUMENTS_LENGTH_THRESHOLD 256   plugin.kafka.bootstrap_servers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_KAFKA_BOOTSTRAP_SERVERS localhost:9092   plugin.kafka.get_topic_timeout Timeout period of reading topics from the Kafka server, the unit is second. SW_GET_TOPIC_TIMEOUT 10   plugin.kafka.producer_config Kafka producer configuration. Read producer configure to get more details. Check Kafka report doc for more details and examples. SW_PLUGIN_KAFKA_PRODUCER_CONFIG    plugin.kafka.producer_config_json Configure Kafka Producer configuration in JSON format. Notice it will be overridden by plugin.kafka.producer_config[key], if the key duplication. SW_PLUGIN_KAFKA_PRODUCER_CONFIG_JSON    plugin.kafka.topic_meter Specify which Kafka topic name for Meter System data to report to. SW_PLUGIN_KAFKA_TOPIC_METER skywalking-meters   plugin.kafka.topic_metrics Specify which Kafka topic name for JVM metrics data to report to. SW_PLUGIN_KAFKA_TOPIC_METRICS skywalking-metrics   plugin.kafka.topic_segment Specify which Kafka topic name for traces data to report to. SW_PLUGIN_KAFKA_TOPIC_SEGMENT skywalking-segments   plugin.kafka.topic_profiling Specify which Kafka topic name for Thread Profiling snapshot to report to. SW_PLUGIN_KAFKA_TOPIC_PROFILINGS skywalking-profilings   plugin.kafka.topic_management Specify which Kafka topic name for the register or heartbeat data of Service Instance to report to. SW_PLUGIN_KAFKA_TOPIC_MANAGEMENT skywalking-managements   plugin.kafka.topic_logging Specify which Kafka topic name for the logging data to report to. SW_PLUGIN_KAFKA_TOPIC_LOGGING skywalking-logging   plugin.kafka.namespace isolate multi OAP server when using same Kafka cluster (final topic name will append namespace before Kafka topics with - ). SW_KAFKA_NAMESPACE `` |   plugin.kafka.decode_class Specify which class to decode encoded configuration of kafka.You can set encoded information in plugin.kafka.producer_config_json or plugin.kafka.producer_config if you need. SW_KAFKA_DECODE_CLASS `` |   plugin.springannotation.classname_match_regex Match spring beans with regular expression for the class name. Multiple expressions could be separated by a comma. This only works when Spring annotation plugin has been activated. SW_SPRINGANNOTATION_CLASSNAME_MATCH_REGEX All the spring beans tagged with @Bean,@Service,@Dao, or @Repository.   plugin.toolkit.log.transmit_formatted Whether or not to transmit logged data as formatted or un-formatted. SW_PLUGIN_TOOLKIT_LOG_TRANSMIT_FORMATTED true   plugin.lettuce.trace_redis_parameters If set to true, the parameters of Redis commands would be collected by Lettuce agent. SW_PLUGIN_LETTUCE_TRACE_REDIS_PARAMETERS false   plugin.lettuce.redis_parameter_max_length If set to positive number and plugin.lettuce.trace_redis_parameters is set to true, Redis command parameters would be collected and truncated to this length. SW_PLUGIN_LETTUCE_REDIS_PARAMETER_MAX_LENGTH 128   plugin.lettuce.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_LETTUCE_OPERATION_MAPPING_WRITE    plugin.lettuce.operation_mapping_read  Specify which command should be converted to read operation SW_PLUGIN_LETTUCE_OPERATION_MAPPING_READ Referenc Lettuce-5.x-plugin   plugin.jedis.trace_redis_parameters If set to true, the parameters of Redis commands would be collected by Jedis agent. SW_PLUGIN_JEDIS_TRACE_REDIS_PARAMETERS false   plugin.jedis.redis_parameter_max_length If set to positive number and plugin.jedis.trace_redis_parameters is set to true, Redis command parameters would be collected and truncated to this length. SW_PLUGIN_JEDIS_REDIS_PARAMETER_MAX_LENGTH 128   plugin.jedis.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_JEDIS_OPERATION_MAPPING_WRITE    plugin.jedis.operation_mapping_read  Specify which command should be converted to read operation SW_PLUGIN_JEDIS_OPERATION_MAPPING_READ Referenc Jedis-4.x-plugin jedis-2.x-3.x-plugin   plugin.redisson.trace_redis_parameters If set to true, the parameters of Redis commands would be collected by Redisson agent. SW_PLUGIN_REDISSON_TRACE_REDIS_PARAMETERS false   plugin.redisson.redis_parameter_max_length If set to positive number and plugin.redisson.trace_redis_parameters is set to true, Redis command parameters would be collected and truncated to this length. SW_PLUGIN_REDISSON_REDIS_PARAMETER_MAX_LENGTH 128   plugin.redisson.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_REDISSON_OPERATION_MAPPING_WRITE    plugin.redisson.operation_mapping_read  Specify which command should be converted to read operation SW_PLUGIN_REDISSON_OPERATION_MAPPING_READ Referenc Redisson-3.x-plugin   plugin.neo4j.trace_cypher_parameters If set to true, the parameters of the cypher would be collected. SW_PLUGIN_NEO4J_TRACE_CYPHER_PARAMETERS false   plugin.neo4j.cypher_parameters_max_length If set to positive number, the db.cypher.parameters would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_NEO4J_CYPHER_PARAMETERS_MAX_LENGTH 512   plugin.neo4j.cypher_body_max_length If set to positive number, the db.statement would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_NEO4J_CYPHER_BODY_MAX_LENGTH 2048   plugin.cpupolicy.sample_cpu_usage_percent_limit If set to a positive number and activate trace sampler CPU policy plugin, the trace would not be collected when agent process CPU usage percent is greater than plugin.cpupolicy.sample_cpu_usage_percent_limit. SW_SAMPLE_CPU_USAGE_PERCENT_LIMIT -1   plugin.micronauthttpclient.collect_http_params This config item controls that whether the Micronaut http client plugin should collect the parameters of the request. Also, activate implicitly in the profiled trace. SW_PLUGIN_MICRONAUTHTTPCLIENT_COLLECT_HTTP_PARAMS false   plugin.micronauthttpserver.collect_http_params This config item controls that whether the Micronaut http server plugin should collect the parameters of the request. Also, activate implicitly in the profiled trace. SW_PLUGIN_MICRONAUTHTTPSERVER_COLLECT_HTTP_PARAMS false   plugin.memcached.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_MEMCACHED_OPERATION_MAPPING_WRITE get,gets,getAndTouch,getKeys,getKeysWithExpiryCheck,getKeysNoDuplicateCheck   plugin.memcached.operation_mapping_read Specify which command should be converted to read operation SW_PLUGIN_MEMCACHED_OPERATION_MAPPING_READ set,add,replace,append,prepend,cas,delete,touch,incr,decr   plugin.ehcache.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_EHCACHE_OPERATION_MAPPING_WRITE get,getAll,getQuiet,getKeys,getKeysWithExpiryCheck,getKeysNoDuplicateCheck,releaseRead,tryRead,getWithLoader,getAll,loadAll,getAllWithLoader   plugin.ehcache.operation_mapping_read Specify which command should be converted to read operation SW_PLUGIN_EHCACHE_OPERATION_MAPPING_READ tryRemoveImmediately,remove,removeAndReturnElement,removeAll,removeQuiet,removeWithWriter,put,putAll,replace,removeQuiet,removeWithWriter,removeElement,removeAll,putWithWriter,putQuiet,putIfAbsent,putIfAbsent   plugin.guavacache.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_GUAVACACHE_OPERATION_MAPPING_WRITE getIfPresent,get,getAllPresent,size   plugin.guavacache.operation_mapping_read Specify which command should be converted to read operation SW_PLUGIN_GUAVACACHE_OPERATION_MAPPING_READ put,putAll,invalidate,invalidateAll,invalidateAll,cleanUp   plugin.nettyhttp.collect_request_body This config item controls that whether the Netty-http plugin should collect the http body of the request. SW_PLUGIN_NETTY_HTTP_COLLECT_REQUEST_BODY false   plugin.nettyhttp.filter_length_limit When COLLECT_REQUEST_BODY is enabled, how many characters to keep and send to the OAP backend, use negative values to keep and send the complete body. SW_PLUGIN_NETTY_HTTP_FILTER_LENGTH_LIMIT 1024   plugin.nettyhttp.supported_content_types_prefix When COLLECT_REQUEST_BODY is enabled and content-type start with HTTP_SUPPORTED_CONTENT_TYPES_PREFIX, collect the body of the request , multiple paths should be separated by , SW_PLUGIN_NETTY_HTTP_SUPPORTED_CONTENT_TYPES_PREFIX application/json,text/   plugin.rocketmqclient.collect_message_keys If set to true, the keys of messages would be collected by the plugin for RocketMQ Java client.     plugin.rocketmqclient.collect_message_tags If set to true, the tags of messages would be collected by the plugin for RocketMQ Java client.            Reset Collection/Map type configurations as empty collection.  Collection type config, e.g. using  plugin.kafka.topics= to override default plugin.kafka.topics=a,b,c,d Map type config, e.g. using plugin.kafka.producer_config[]= to override default plugin.kafka.producer_config[key]=value  Dynamic Configurations All configurations above are static, if you need to change some agent settings at runtime, please read CDS - Configuration Discovery Service document for more details.\n","excerpt":"Table of Agent Configuration Properties This is the properties list supported in …","ref":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/configurations/","title":"Table of Agent Configuration Properties"},{"body":"SkyWalking Team The SkyWalking team is comprised of Members and Contributors, and the growth has never stopped. Members have direct access to the source of SkyWalking project and actively evolve the code-base. Contributors improve the project through submission of patches and suggestions to the Members. All contributions to SkyWalking are appreciated The number of contributors to the project is unbounded. All contributions to SkyWalking are greatly appreciated, whether for trivial cleanups, big new features or other material rewards.\nGet started   Members Project Management Committee    Name Apache ID      Can Li lican  Candy198088   DongXue Si ilucky    Han Liu liuhan  dalek_zero   Haochao Zhuang daming    Haoyang Liu liuhaoyangzz    Hongtao Gao hanahmily    Hongwei Zhai innerpeacez    Ignasi Barrera nacx    Jiajing Lu lujiajing    Jian Tan tanjian    Jiaqi Lin linjiaqi    Jiemin Xia jmjoy    Jinlin Fu withlin    Juntao Zhang zhangjuntao    Kai Wan wankai  wankai123   Kai Wang wangkai    Lang Li lilang         Name Apache ID      Michael Semb Wever mck    Qiuxia Fan qiuxiafan    Sheng Wu (Project V.P.) wusheng  wusheng1108   Shinn Zhang zhangxin  ascrutae   Wei Zhang zhangwei24    Wenbing Wang wangwenbin    Willem Ning Jiang ningjiang    Yang Bai baiyang    Yanlong He heyanlong  YanlongHe   Yao Wang ywang    Ye Cao dashanji    Yihao Chen yihaochen  Superskyyy   Yixiong Cao caoyixiong    Yongsheng Peng pengys    Yuguang Zhao zhaoyuguang    Zhang Kewei zhangkewei    Zhenxu Ke kezhenxu94  kezhenxu94      Committer    Name Apache ID      Brandon Fergerson bfergerson    Gong Dewei kylixs    Gui Cao zifeihan  zifeihan007   Hailin Wang wanghailin    Huaxi Jiang hoshea  Zerone___01   Jiapeng Liu liujiapeng    JunXu Chen chenjunxu    Ke Zhang zhangke  Humbertttttt   Ming Wen wenming    Puguang Yang ypg    Qiang Li liqiang    Qiang Xu xuqiang         Name Apache ID      Ruixian Wang ax1an  Ax1anRISE   Sheng Wang wangsheng    Tomasz Pytel tompytel    Wei Hua alonelaval    Wei Jin kvn    Weijie Zou kdump  RootShellExp   Weiyi Liu wayilau    Xiang Wei weixiang1862    Yueqin Zhang yswdqz    Yuntao Li liyuntao    Zhusheng Xu aderm    Zixin Zhou zhouzixin  starry_loveee      Contributors 882  Search   SkyWalking Showcase    22     kezhenxu94    wu-sheng    wankai123    mrproliu    Fine0830    JaredTan95    pg-yang    arugal    weixiang1862    dashanji    innerpeacez    yswdqz    peachisai    CodePrometheus    hanahmily    JohnDuncan5171    nisiyong    Superskyyy    azibhassan    chenxiaohu    jmjoy    sacloudy     SkyWalking Website    99     wu-sheng    Jtrust    kezhenxu94    mrproliu    hanahmily    rootsongjc    fgksgf    Superskyyy    jmjoy    JaredTan95    Fine0830    arugal    dmsolr    innerpeacez    BFergerson    zhaoyuguang    wankai123    dashanji    TinyAllen    weixiang1862    EvanLjp    peng-yongsheng    heyanlong    Humbertzhang    yswdqz    yanmaipian    lujiajing1126    FingerLeader    gxthrj    Ax1an    YunaiV    LIU-WEI-git    langyan1022    pg-yang    libinglong    alonelaval    nisiyong    x22x22    HHoflittlefish777    CzyerChen    cheenursn    thebouv    Alipebt    PGDream    liuhaoyang    LiteSun    liqiangz    geomonlin    lijing-21    leimeng-ma    klboke    kehuili    JoeCqupt    jjlu521016    jacentsao    hutaishi    hailin0    fushiqinghuan111    chopin-d    apmplus    jxnu-liguobin    zhang98722    yimeng    xu1009    xiongshiyan    xdRight     bing**   weiqiang333    vcjmhg    tristan-tsl    tisonkun    tevahp    sebbASF    FeynmanZhou    peachisai    nic-chen    lucperkins    lilien1010    Dylan-beicheng    devkanro    Johor03    ButterBright    harshaskumar05    kylixs    crl228    Humbedooh    thisisgpy    CharlesMaster    andrewgkew    wayilau    feelwing1314    adriancole    agile6v     394102339**   YoungHu    wang-yeliang    withlin    moonming     983708408**     SkyWalking    492     wu-sheng    peng-yongsheng    kezhenxu94     ascrutae**   ascrutae    acurtain    wankai123    mrproliu    hanahmily    Fine0830    JaredTan95    dmsolr    arugal    zhaoyuguang    lytscu    wingwong-knh     zhangxin**   BFergerson    pg-yang     ascrutae**   lujiajing1126    Ax1an    yswdqz    wayilau    EvanLjp    zifeihan    IanCao     295198088**   weixiang1862    x22x22    innerpeacez     394102339**   Superskyyy    clevertension    liuhaoyang    withlin    liqiangz    xbkaishui     renliangbu**   carlvine500    candyleer    peachisai    hailin0    zhangkewei    bai-yang    heyanlong    tom-pytel    TinyAllen    adermxzs    songzhendong     55846420**   wallezhang    Jtrust    IluckySi    qxo    smartboy37597    CzyerChen    alonelaval    heihaozi    wendal    LIU-WEI-git    CodePrometheus    Humbertzhang    toffentoffen    CalvinKirs    tristaZero     liufei**   zhyyu    stalary    honganan     lxin96**   jjtyro    xuanyu66    J-Cod3r    YunaiV    langyan1022    Liu-XinYuan    SataQiu    Cool-Coding    harvies    xu1009    wuwen5     55846420**   tuohai666    flycash    JohnNiang    yaojingguo    fgksgf    adriancole    codeglzhang    yu199195    yangyiweigege    VictorZeng    TeslaCN    LiWenGu    haoyann    chidaodezhongsheng    xinzhuxiansheng    aiyanbo    darcyda1    sN0wpeak    FatihErdem    chenhaipeng    nisiyong    Z-Beatles    YczYanchengzhe    cyberdak    dagmom    codelipenghui    dominicqi    dio    libinglong    liuzc9     lizl9**   neeuq    snakorse    xiaospider    xiaoy00    Indifer    huangyoje    s00373198    cyejing    Ahoo-Wang    yanfch    devkanro    oflebbe    rabajaj0509    Shikugawa    LinuxSuRen    ScienJus    liu-junchi    WillemJiang    chenpengfei    gnr163    jiang1997    jmjoy    viswaramamoorthy    vcjmhg    tzy1316106836    terranhu    scolia    osiriswd     2278966200**   novayoung    muyun12    mgsheng    makingtime    klboke    katelei6    karott    jinlongwang    hutaishi    Hen1ng    kuaikuai    lkxiaolou    purgeyao    michaelsembwever     bwh12398**   YunfengGao    WildWolfBang    juzhiyuan    SoberChina    KangZhiDong    mufiye     yushuqiang**   zxbu    yazong    xzyJavaX    xcaspar    wuguangkuo    webb2019    evanxuhe    yang-xiaodong    RaigorJiang    Qiliang    Oliverwqcwrw    buxingzhe    tsuilouis    leizhiyuan    Jargon9    potiuk     iluckysi   kim-up    HarryFQG    easonyipj    willseeyou    AlexanderWert    ajanthan    chen-ni    844067874    elk-g    dsc6636926    heihei180    amwyyyy    dengliming    cuiweiwei    coki230    coder-yqj    cngdkxw    chenmudu    beckhampu    cheetah012    ZhuWang1112    zaunist    shichaoyuan    XhangUeiJong    Switch-vov    SummerOfServenteen    maxiaoguang64    maclong1989    sourcelliu    margauxcabrera    Yebemeto    momo0313    Xlinlin     cheatbeater**   lxliuxuankb    lu-xiaoshuang    lpcy    louis-zhou    lngmountain     lixin40**   liuyanggithup    linliaoy     xlz35429674**    seiferhu**    seiferhu**    72372815\u0026#43;royal-dargon**    72775443\u0026#43;raybi-asus**   ralphgj    qiuyu-d    thanq    probeyang    carrypann    pkxiuluo    FeynmanZhou    ooi22    onecloud360    nileblack    chenyi19851209    neatlife    lijial    inversionhourglass    huliangdream    hsoftxl    hi-sb    Heguoya    hardzhang    haotian2015    gzlicanyi    guyukou    gy09535    guochen2    kylixs    gonedays    guodongq    ggndnn    GerryYuan    geekymv    geektcp    leemove    lazycathome    langke93    landonzeng    lagagain    ksewen    killGC    kikupotter    kevinyyyy    ken-duck    kayleyang    aeolusheath    justeene    jsbxyyx    zhangjianweibj    jianglin1008    jialong121    jjlu521016     zhousiliang163**    45602777\u0026#43;zhangzhanhong2**    zcai2**    zaygrzx**    yuyujulin**    yurunchuan**    182148432**    wu_yan_tao**    yanmingbi**    yangxb2010000**    yanbinwei2851**    978861768**    48479214\u0026#43;xuxiawei**    9313869\u0026#43;xuchangjunjx**    yexingren23**    1903636211**    xiaozheng**    281890899**    66098854\u0026#43;tangshan-brs**    88840672\u0026#43;wangwang89**    loushuiyifan**    305542043**    381321959**    zhangliang**    kzd666**    45203823\u0026#43;gzshilu**    28707699**    yqjdcyy**    tanjunchen20**    liuzhengyang**    hey.yanlong**    zygfengyuwuzu**    tmac.back**    xtha**    345434645**    zoidbergwill**    tbdp.hi**    tanzhen**    973117150**    89574863\u0026#43;4ydx3906**    sxzaihua**    hpy253215039**    814464284**    stone_wlg**    stenio**    hoolooday**    songzhe_fish**    wang-yaozheng**    sk163**    101088629\u0026#43;simonluo345**    simonlei**    41794887\u0026#43;sialais**    31874857\u0026#43;sikelangya**    mestarshine**    34833891\u0026#43;xdright**    bing**    23226334**    wujun8**    zzhxccw**    qrw_email**    wind2008hxy**    36367435\u0026#43;whl12345**    45580443\u0026#43;whfjam**    zwj777**    xiongchuang**    lyzhang1999**    52819067\u0026#43;weiqiang-w**    55177318\u0026#43;vcjmhg**    46754544\u0026#43;tristan-tsl**    wander4096**    136082619**    montecristosoul**   Lin1997    coolbeevip    LazyLei    leileiluoluo    lt5227    mostcool    Alipebt    zhentaoJin    kagaya85    augustowebd    j-s-3    JohnDuncan5171    jbampton    zouyx    JoeKerouac    Linda-pan    jim075960758    jiekun    c1ay     chenglei**    chenyao**   npmmirror    nikitap492    nickwongwong    ZhuoSiChen    mikechengwei    mikkeschiren    zeaposs    TheRealHaui    doddi    marcingrzejszczak    maolie    mahmoud-anwer    donotstopplz    liuhaoXD    linghengqian    darcydai    sdanzo    chanjarster    damonxue    cvimer    CommissarXia    ChengDaqi2023    CharlesMaster    shiluo34    brucewu-fly     qq327568824**   ArjenDavid-sjtu    AngryMills     andyzzlms**   AirTrioa    lunchboxav    50168383    1095071913    Jedore    mustangxu     zhongjianno1**   DeadLion    Lighfer    Henry75m39    onurccn    tankilo    Gallardot    AbelCha0    bootsrc    FingerLiu    Felixnoo    DuanYuePeng    efekaptan    qijianbo010    qqeasonchen    devon-ye     295198088**    c feng   buzuotaxuan    mmm9527    wolfboys    beiwangnull    amogege    alidisi    alexkarezin    aix3    adamni135    absorprofess    ZhengBing520    ZhHong    chenbeitang    ZS-Oliver    panniyuyu    fuhuo    ethan256    eoeac    echooymxq    dzx2018    IceSoda177    dvsv2    drgnchan    donbing007    dogblues    divyakumarjain    dd1k    dashanji    cutePanda123    cui-liqiang    cuishuang    crystaldust    wbpcode    TerrellChen    Technoboy-    StreamLang    stevehu    kun-song     826245622**   compilerduck    SheltonZSL    sergicastro    zhangsean    yymoth    ruibaby    rlenferink    remicollet    RandyAbernethy    QHWG67    pengyongqiang666    Patrick0308    yuqichou    Miss-you    ycoe     me**   yanickxia    XinweiLyu    liangyepianzhou    Wooo0    ViberW    wilsonwu    moonming    wyt    victor-yi    Videl    trustin    TomMD    ThisSeanZhang    gitter-badger    Adrian Cole    github-actions[bot]    dependabot[bot]     Booster UI    40     Fine0830    wu-sheng    heyanlong    pg-yang    CzyerChen    yswdqz    techbirds    Superskyyy    peachisai    zhourunjie1988    xu1009    weixiang1862    lsq27    innerpeacez    horochx    drgnchan    smartboy37597    CodePrometheus    WitMiao    liuyib    arugal    wuwen5    songzhendong    pw151294    kezhenxu94    jiang1997    hutaishi    heihei180    hadesy    ZhuWang1112    XinweiLyu    liangyepianzhou    SimonHu1993    LinuxSuRen    binbin666    marcingrzejszczak    toffentoffen    mahmoud-anwer    donotstopplz    BFergerson      Plugin for Service Topology    4     Fine0830    wu-sheng    Superskyyy     fine**     Java Agent    476     wu-sheng    peng-yongsheng     ascrutae**   ascrutae    kezhenxu94    acurtain    hanahmily    JaredTan95    dmsolr    mrproliu    arugal    zhaoyuguang    lytscu    Fine0830     zhangxin**   wingwong-knh    BFergerson    wankai123     ascrutae**   Ax1an    wayilau    zifeihan    EvanLjp    IanCao     295198088**   x22x22     394102339**   pg-yang    xu1009    clevertension    withlin    xbkaishui     renliangbu**   liuhaoyang    lujiajing1126    candyleer    carlvine500    liqiangz    nisiyong    hailin0    wallezhang    bai-yang    zhangkewei    heyanlong    xzyJavaX    songzhendong    adermxzs    TinyAllen    Jtrust     55846420**   heihaozi    IluckySi    qxo    wendal    alonelaval    CzyerChen    zhyyu    Humbertzhang    tristaZero    J-Cod3r    Cool-Coding    jjtyro    honganan    stalary    wuwen5     liufei**   gzlicanyi     lxin96**   tom-pytel    xuanyu66    devkanro    hutaishi    harvies    langyan1022    Liu-XinYuan    YunaiV    SataQiu    adriancole    darcyda1    yaojingguo    JohnNiang    flycash    tuohai666    cyberdak    codelipenghui    peachisai     55846420**   LiWenGu    kylixs    TeslaCN    haoyann    chidaodezhongsheng    xinzhuxiansheng    VictorZeng    xiaqi1210    yu199195    chanjarster    FatihErdem    aiyanbo    sN0wpeak    fgksgf    Oliverwqcwrw    Z-Beatles    alanlvle    dagmom    innerpeacez    dominicqi    weixiang1862    vcjmhg    cyejing    s00373198    huangyoje    Indifer    xiaoy00    snakorse    neeuq     lizl9**   libinglong    gnr163    chenpengfei    YczYanchengzhe    WillemJiang    liu-junchi    ScienJus    oflebbe    yanfch    Ahoo-Wang    dio    codeglzhang    osiriswd    scolia    terranhu    tzy1316106836    viswaramamoorthy    webb2019    gglzf4    kuaikuai     2278966200**   novayoung    muyun12    mgsheng    makingtime    lpcy    klboke    karott    jinlongwang    Hen1ng    Superskyyy    seifeHu    lkxiaolou    purgeyao    PepoRobert    michaelsembwever    marcingrzejszczak     bwh12398**   YunfengGao    WildWolfBang    shichaoyuan    juzhiyuan    SoberChina    KangZhiDong     yushuqiang**   zxbu    yazong    xcaspar    wuguangkuo    geekymv    yang-xiaodong    Shikugawa    Qiliang    buxingzhe    tsuilouis    Leibnizhu    leizhiyuan    CalvinKirs    Jargon9    potiuk     iluckysi   2han9wen71an    844067874    HarryFQG    ForrestWang123    ajanthan    AlexanderWert    willseeyou    ArjenDavid-sjtu    evanxuhe    elk-g    dsc6636926    amwyyyy    dengliming    dashanji    cylx3126    cuiweiwei    coki230    SummerOfServenteen    Switch-vov    tjiuming    XhangUeiJong    zaunist    cheetah012    beckhampu    chenmudu    coder-yqj    cngdkxw    githubcheng2978    FeynmanZhou    onecloud360    nileblack    neatlife    Xlinlin    momo0313    Yebemeto    margauxcabrera    sourcelliu    maxiaoguang64    lxliuxuankb    lvxiao1    guodongq    louis-zhou     lixin40**   pkxiuluo    carrypann    probeyang    qiaoxingxing    thanq    qiuyu-d    ggndnn    ralphgj    raybi-asus    GerryYuan    geektcp    mestarshine     chenyao**   sikelangya    simonlei    sk163    zhangjianweibj    JoeCqupt    jialong121    jjlu521016    hyhyf    hxd123456    huliangdream    xiaomiusa87    hsoftxl    hi-sb    Heguoya    hardzhang    haotian2015    guyukou    gy09535    rechardguo    gonedays    liuyanggithup    linliaoy    lijial    leemove    lbc97    lazycathome    langke93    landonzeng    ksewen    killGC    kikupotter    kevinyyyy    kayleyang    aeolusheath    justeene    jsbxyyx    jmjoy     tmac.back**    345434645**    zoidbergwill**    zhousiliang163**    45602777\u0026#43;zhangzhanhong2**    zcai2**    zaygrzx**    yuyujulin**    yurunchuan**    74546965\u0026#43;yswdqz**    182148432**    wu_yan_tao**    yanmingbi**    yangxb2010000**    yanbinwei2851**    249021408**    9313869\u0026#43;xuchangjunjx**    xiongchuang**    cheatbeater**    66098854\u0026#43;tangshan-brs**    42414099\u0026#43;yanye666**    893979653**    88840672\u0026#43;wangwang89**    loushuiyifan**    lcbiao34**    305542043**    381321959**    orezsilence**    zhangliang**    kzd666**    45203823\u0026#43;gzshilu**    28707699**    tanjunchen20**    70845636\u0026#43;mufiye**    liuzhengyang**    zygfengyuwuzu**    lyzhang1999**    wqp1987**   w2dp    weiqiang-w    tristan-tsl    tincopper    angty    tedli    tbdpmi     tanzhen**   tangxqa    sxzaihua    hepyu    surechen    stone-wlg    stenio2011    zhe1926     xubinghaozs**    yexingren23**    1903636211**    1612202137**    281890899**    34833891\u0026#43;xdright**    bing**    23226334**    wujun8**    809697469**    zzhxccw**    qrw_email**    wind2008hxy**    63728367\u0026#43;will2020-power**    36367435\u0026#43;whl12345**    45580443\u0026#43;whfjam**    zwj777**    weihubeats**   augustowebd    jbampton    zouyx    JoeKerouac    Linda-pan    leihuazhe     zhongjianno1**   DeadLion    Lighfer    kim-up    hardy4yooz    onurccn    guillaume-alvarez    GuiSong01    tankilo    Gallardot    AbelCha0    nikitap492    nickwongwong    ZhuoSiChen    mikkeschiren    zeaposs    TheRealHaui    maolie    donotstopplz    liuhaoXD    lishuo5263    Lin1997    coolbeevip    LazyLei    leileiluoluo    lt5227    zhentaoJin    kagaya85    CharlesMaster    shiluo34    wapkch    thisisgpy    brucewu-fly    BigXin0109    bmk15897     qq327568824**   AngryMills     andyzzlms**   guoxiaod    adaivskenan    Alceatraz    AirTrioa    lunchboxav    50168383    1095071913    bootsrc    ForestWang123    FingerLiu    DuanYuePeng    efekaptan    qijianbo010    qqeasonchen    DominikHubacek    devon-ye    darknesstm    zhaoxiaojie0415    darcydai    sdanzo    dachuan9e    cvimer    CommissarXia    Chenfx-git    furaul    HScarb    c1ay     295198088**    c feng   buzuotaxuan    mmm9527    beiwangnull    andotorg    amogege    alexkarezin    aix3    adamni135    zimmem    ZhHong    chenbeitang    ZS-Oliver    panniyuyu    fuhuo    eoeac    life-    echooymxq    dzx2018    IceSoda177    dvsv2    drgnchan    donbing007    divyakumarjain    AlchemyDing    dd1k    cutePanda123    cui-liqiang    crystaldust    jinrongzhang    wbpcode    TerrellChen    Technoboy-    stevehu    kun-song     826245622**   compilerduck    sergicastro    zhangsean    yymoth    SWHHEART    ruibaby    rlenferink    RickyLau    RandyAbernethy    QHWG67    Patrick0308     chenglei**   yuqichou    yoyofx    Miss-you    ycoe     me**   yanickxia    yangyulely    Wooo0    ViberW    wilsonwu    moonming    victor-yi    Videl    trustin    TomMD    ThisSeanZhang    gitter-badger     Python Agent    39     kezhenxu94    Superskyyy    tom-pytel    alonelaval    jiang1997    Humbertzhang    Jedore    ZEALi    katelei6    SheltonZSL    jaychoww    FAWC438    wu-sheng    probeyang    langyizhao    arcosx    zkscpqm    wuwen5    dafu-wu    VxCoder    taskmgr    Forstwith    fuhuo    dcryans     32413353\u0026#43;cooolr**   c1ay    chestarss    alidisi    XinweiLyu    TomMD    CodePrometheus    shenxiangzhuang    doddi    sungitly    wzy960520    JarvisG495    JaredTan95    fgksgf    zgfh     NodeJS Agent    15     kezhenxu94    tom-pytel    ruleeeer    BFergerson    wu-sheng    michaelzangl    alanlvle    tianyk    ErosZy    QuanjieDeng    TonyKingdom    liu-zhizhu     wxb17742006482**   nd-lqj    wuwen5     Go Agent    23     mrproliu    CodePrometheus    Alipebt    wu-sheng    LinuxSuRen    ShyunnY    IceSoda177    vearne    rfyiamcool    ethan256    jiekun    zheheBao    xuyue97    jarvis-u    icodeasy    YenchangChan    kikoroc    darknos    Ecostack    Ruff-nono    0o001    lujiajing1126    GlqEason     Rust Agent    7     jmjoy    wu-sheng    Shikugawa    tisonkun    CherishCai    dkkb    kezhenxu94     PHP Agent    5     jmjoy    heyanlong    phanalpha    wu-sheng    matikij     Client JavaScript    18     Fine0830    wu-sheng    arugal    Lighfer    kezhenxu94    tianyk    wuwen5    Leo555    qinhang3    min918    tthallos    i7guokui    aoxls    givingwu    Jtrust    JaredTan95    AliceTWu    airene     Nginx LUA Agent    21     wu-sheng    dmsolr    membphis    moonming    mrproliu    spacewander    kezhenxu94    WALL-E    arugal    wangrzneu    yxudong    JaredTan95    jeremie1112    dingdongnigetou    CalvinKirs    lilien1010    Jijun    Dofine-dufei    alonelaval    Frapschen    tzssangglass     Kong Agent    4     dmsolr    wu-sheng    kezhenxu94    CalvinKirs     SkyWalking Satellite    13     mrproliu    EvanLjp    kezhenxu94    gxthrj    wu-sheng    wangrzneu    BFergerson    fgksgf    CalvinKirs    guangdashao    inversionhourglass    nic-chen    arugal     Kubernetes Event Exporter    5     kezhenxu94    wu-sheng    fgksgf    dmsolr    CalvinKirs     SkyWalking Rover    9     mrproliu    wu-sheng    spacewander    jelipo    hkmdxlftjf    IluckySi    LinuxSuRen    caiwc    kezhenxu94      SkyWalking CLI    15     kezhenxu94    mrproliu    fgksgf    wu-sheng    hanahmily    try-agaaain    JarvisG495    arugal    alonelaval    BFergerson    heyanlong    Alexxxing    Superskyyy    clk1st    innerpeacez     Kubernetes Helm    31     innerpeacez    kezhenxu94    wu-sheng    hanahmily    mrproliu    JaredTan95    ButterBright    dashanji    rh-at    chengshiwen    eric-sailfish    geffzhang    glongzh    chenvista    swartz-k    tristan-tsl    vision-ken     wang_weihan**   wayilau    williamyao1982    zshrine    aikin-vip    wankai123    SeanKilleen    ScribblerCoder    rabajaj0509    CalvinKirs    carllhw    zalintyre    Yangfisher1    aviaviavi     SkyWalking Cloud on Kubernetes    19     hanahmily    dashanji    kezhenxu94    mrproliu    weixiang1862    wu-sheng    ESonata    jichengzhi    heyanlong    hwzhuhao    SzyWilliam     rolandma**   robberphex    toffentoffen    CalvinKirs    fgksgf    Duncan-tree-zhou    ButterBright    BFergerson      Data Collect Protocol    23     wu-sheng    mrproliu    arugal    kezhenxu94    liuhaoyang    EvanLjp    Shikugawa    peng-yongsheng    zifeihan    Switch-vov    dmsolr    hanahmily    fgksgf    nacx    yaojingguo    SataQiu    stalary    Z-Beatles    liqiangz    snakorse    xu1009    heyanlong    Liu-XinYuan     Query Protocol    17     wu-sheng    mrproliu    wankai123    arugal    peng-yongsheng    kezhenxu94    hanahmily    x22x22    JaredTan95    BFergerson    MiracleDx    fgksgf    liuhaoyang    Fine0830    chenmudu    liqiangz    heyanlong     Go API    12     mrproliu    wu-sheng    kezhenxu94    arugal    fgksgf     dalekliuhan**   gxthrj    liqiangz    EvanLjp    JaredTan95    CalvinKirs     mrproliu**     BanyanDB    27     hanahmily    lujiajing1126    Fine0830    WuChuSheng1    ButterBright    wu-sheng    HHoflittlefish777    hailin0    zesiar0    sivasathyaseeelan    mikechengwei    Sylvie-Wxr    innerpeacez    sacloudy    caicancai    tisonkun    DevPJ9    LinuxSuRen    sksDonni    mrproliu    BFergerson    Muyu-art    CalvinKirs    qazxcdswe123    achintya-7    e1ijah1    kezhenxu94     BanyanDB Java Client    5     lujiajing1126    wu-sheng    hanahmily    kezhenxu94    hailin0     BanyanDB Helm    5     ButterBright    wu-sheng    hanahmily    wankai123    kezhenxu94      Agent Test Tool    19     dmsolr    kezhenxu94    mrproliu    wu-sheng    arugal    nisiyong    zhyyu    EvanLjp    yaojingguo    CalvinKirs    LeePui    marcingrzejszczak    Shikugawa    dagmom    harvies    alonelaval    jmjoy    pg-yang    OrezzerO     SkyWalking Eyes    37     kezhenxu94    fgksgf    wu-sheng    zooltd    emschu    tisonkun    jmjoy    keiranmraine    MoGuGuai-hzr    mrproliu    dongzl    spacewander    gdams    rovast    elijaholmos    ryanmrichard    freeqaz    heyanlong    zifeihan    mohammedtabish0    acelyc111    Xuanwo    xiaoyawei    stumins    steveklabnik    chengshiwen    crholm    fulmicoton    Two-Hearts    kevgo    halacs    FushuWang    Juneezee    ddlees    dave-tucker    antgamdia    guilload     SkyWalking Infra E2E    15     mrproliu    kezhenxu94    Humbertzhang    fgksgf    chunriyeqiongsaigao    ethan256    Superskyyy    dashanji    lujiajing1126    JohnNiang    CalvinKirs    FeynmanZhou    arugal    heyanlong    wu-sheng      (Archived) Docker Files    12     hanahmily    wu-sheng    JaredTan95    kezhenxu94     lixin40**   aviaviavi    andrewgkew    carlvine500    kkl129    tristan-tsl    arugal    heyanlong     (Archived) Rocketbot UI    66     TinyAllen    Fine0830    x22x22    wu-sheng    JaredTan95    kezhenxu94    heihaozi    bigflybrother    Jtrust    dmsolr    zhaoyuguang    alonelaval    tom-pytel    hanahmily    aeolusheath    arugal    hailin0    Indifer     zhaoyuguang**   xuchangjunjx    wuguangkuo    whfjam    shiluo34    ruibaby    wilsonwu    constanine    horber    liqiangz    leemove    fuhuo     denghaobo**   jianglin1008    codelipenghui    lunamagic1978    novayoung    probeyang    dominicqi    stone-wlg    surechen    wallezhang    wuwen5     bing**   xu1009    huangyoje    heyanlong    llissery     437376068**   aiyanbo    BFergerson    efekaptan    yanfch    grissom-grissom    grissomsh    Humbertzhang    kagaya85    liuhaoyang    tsuilouis    masterxxo    zeaposs    QHWG67    Doublemine    zaunist    xiaoxiangmoe    c1ay    dagmom    fredster33     (Archived) Legacy UI    23     hanahmily    wu-sheng    peng-yongsheng    ascrutae    TinyAllen     zhangxin**    295198088**    qiu_jy**   zhaoyuguang    zuohl    wendal    jjlu521016    withlin    bai-yang    zhangkewei    wynn5a    clevertension    cloudgc     baiyang06**   WillemJiang    liuhaoyang    leizhiyuan    ajanthan     (Archived) OAL Generator    2     wu-sheng    peng-yongsheng      SkyAPM-dotnet    41     liuhaoyang    snakorse    wu-sheng    lu-xiaoshuang    ElderJames    yang-xiaodong    pengweiqhca    Ahoo-Wang    inversionhourglass    feiyun0112    sampsonye    KawhiWei    zeaposs    kaanid    qq362220083    withlin     xiaoweiyu**   witskeeper    beckjin    ShaoHans    misaya    itsvse    zhujinhu21    xclw2000    startewho    refactor2    rider11-dev    linkinshi    limfriend    guochen2    WeihanLi    SeanKilleen    cnlangzi    joesdu    SpringHgui    dimaaan    ChaunceyLin5152    catcherwong    BoydenYubin    andyliyuze    AlseinX     cpp2sky    7     Shikugawa     wbphub**   wuwen5    wu-sheng    makefriend8    wbpcode    JayInnn     SourceMarker    5     BFergerson    MrMineO5    voqaldev    chess-equality    javamak     Java Plugin Extensions    9     wu-sheng    ascrutae    JaredTan95    raybi-asus    zifeihan    nisiyong    bitray    li20020439    pg-yang     uranus    2     harvies    wu-sheng     (outdated) CN Documentations    22     kezhenxu94    SataQiu    wu-sheng    nikyotensai    ccccye123    Frapschen    shalk    wujun8    zhangnew    yazong    xiaoping378    thelight1     lilulu**   Hen1ng    harvies    dagmom    alienwow    system-designer    Superskyyy    JaredTan95    fgksgf    xing-yin      (Retired) Transporter Plugins    5     codeglzhang    wu-sheng    dmsolr    Jargon9    kezhenxu94     (Retired) Go2Sky    25     arugal    wu-sheng    hanahmily    mrproliu    kagaya85    easonyipj    nacx    Luckyboys    fgksgf    Humbertzhang    JaredTan95    JJ-Jasmin    withlin    yaojingguo    Just-maple    kuaikuai    zhuCheer    chwjbn    kehuili    kezhenxu94    limfriend    matianjun1    lokichoggio     bing**   liweiv     (Retired) Go2Sky Plugins    11     arugal    kagaya85    mrproliu    wu-sheng    elza2    matianjun1    dgqypl    zaunist    kehuili    newyue588cc    royal-dargon     (Retired) SkyAPM PHP Agent    33     heyanlong     wangbo78978**   lpf32     songzhian**   songzhian    wu-sheng    jmjoy    remicollet    kilingzhang     songzhian**   xonze    iamif3000    mikkeschiren    anynone    lvxiao1    xinfeingxia85    cyhii    silverkorn    AlpherJang    LJX22222    MrYzys    rovast    SP-66666    tinyu0    xudianyang    huohuanhuan    kezhenxu94    limfriend    ljf-6666    qjgszzx    dickens7    xybingbing    yaowenqiang    az13js     (Retired) SkyAPM Node.js    10     ascrutae    kezhenxu94    wu-sheng    zouyx    Jozdortraz    a526672351    rovast    Runrioter    jasper-zsh    TJ666         ← Team       ","excerpt":"SkyWalking Team The SkyWalking team is comprised of Members and Contributors, and the growth has …","ref":"/team/","title":"Team"},{"body":"Telegraf receiver The Telegraf receiver supports receiving InfluxDB Telegraf\u0026rsquo;s metrics by meter-system. The OAP can load the configuration at bootstrap. The files are located at $CLASSPATH/telegraf-rules. If the new configuration is not well-formed, the OAP may fail to start up.\nThis is the InfluxDB Telegraf Document, the Telegraf receiver can handle Telegraf\u0026rsquo;s CPU Input Plugin, Memory Input Plugin.\nThere are many other telegraf input plugins, users can customize different input plugins' rule files. The rule file should be in YAML format, defined by the scheme described in MAL. Please see the telegraf plugin directory for more input plugins information.\nNotice:\n  The Telegraf receiver module uses HTTP to receive telegraf\u0026rsquo;s metrics, so the outputs method should be set [[outputs.http]] in telegraf.conf file. Please see the http outputs for more details.\n  The Telegraf receiver module only process telegraf\u0026rsquo;s JSON metrics format, the data format should be set data_format = \u0026quot;json\u0026quot; in telegraf.conf file. Please see the JSON data format for more details.\n  The default json_timestamp_units is second in JSON output, and the Telegraf receiver module only process second timestamp unit. If users configure json_timestamp_units in telegraf.conf file, json_timestamp_units = \u0026quot;1s\u0026quot; is feasible. Please see the JSON data format for more details.\n  The following is the default telegraf receiver YAML rule file in the application.yml, Set SW_RECEIVER_TELEGRAF:default through system environment or change SW_RECEIVER_TELEGRAF_ACTIVE_FILES:vm to activate the OpenTelemetry receiver with vm.yml in telegraf-rules.\nreceiver-telegraf:selector:${SW_RECEIVER_TELEGRAF:default}default:activeFiles:${SW_RECEIVER_TELEGRAF_ACTIVE_FILES:vm}   Rule Name Description Configuration File Data Source     vm Metrics of VMs telegraf-rules/vm.yaml Telegraf inputs plugins \u0026ndash;\u0026gt; Telegraf Receiver \u0026ndash;\u0026gt; SkyWalking OAP Server    ","excerpt":"Telegraf receiver The Telegraf receiver supports receiving InfluxDB Telegraf\u0026rsquo;s metrics by …","ref":"/docs/main/latest/en/setup/backend/telegraf-receiver/","title":"Telegraf receiver"},{"body":"Telegraf receiver The Telegraf receiver supports receiving InfluxDB Telegraf\u0026rsquo;s metrics by meter-system. The OAP can load the configuration at bootstrap. The files are located at $CLASSPATH/telegraf-rules. If the new configuration is not well-formed, the OAP may fail to start up.\nThis is the InfluxDB Telegraf Document, the Telegraf receiver can handle Telegraf\u0026rsquo;s CPU Input Plugin, Memory Input Plugin.\nThere are many other telegraf input plugins, users can customize different input plugins' rule files. The rule file should be in YAML format, defined by the scheme described in MAL. Please see the telegraf plugin directory for more input plugins information.\nNotice:\n  The Telegraf receiver module uses HTTP to receive telegraf\u0026rsquo;s metrics, so the outputs method should be set [[outputs.http]] in telegraf.conf file. Please see the http outputs for more details.\n  The Telegraf receiver module only process telegraf\u0026rsquo;s JSON metrics format, the data format should be set data_format = \u0026quot;json\u0026quot; in telegraf.conf file. Please see the JSON data format for more details.\n  The default json_timestamp_units is second in JSON output, and the Telegraf receiver module only process second timestamp unit. If users configure json_timestamp_units in telegraf.conf file, json_timestamp_units = \u0026quot;1s\u0026quot; is feasible. Please see the JSON data format for more details.\n  The following is the default telegraf receiver YAML rule file in the application.yml, Set SW_RECEIVER_TELEGRAF:default through system environment or change SW_RECEIVER_TELEGRAF_ACTIVE_FILES:vm to activate the OpenTelemetry receiver with vm.yml in telegraf-rules.\nreceiver-telegraf:selector:${SW_RECEIVER_TELEGRAF:default}default:activeFiles:${SW_RECEIVER_TELEGRAF_ACTIVE_FILES:vm}   Rule Name Description Configuration File Data Source     vm Metrics of VMs telegraf-rules/vm.yaml Telegraf inputs plugins \u0026ndash;\u0026gt; Telegraf Receiver \u0026ndash;\u0026gt; SkyWalking OAP Server    ","excerpt":"Telegraf receiver The Telegraf receiver supports receiving InfluxDB Telegraf\u0026rsquo;s metrics by …","ref":"/docs/main/next/en/setup/backend/telegraf-receiver/","title":"Telegraf receiver"},{"body":"Telegraf receiver The Telegraf receiver supports receiving InfluxDB Telegraf\u0026rsquo;s metrics by meter-system. The OAP can load the configuration at bootstrap. The files are located at $CLASSPATH/telegraf-rules. If the new configuration is not well-formed, the OAP may fail to start up.\nThis is the InfluxDB Telegraf Document, the Telegraf receiver can handle Telegraf\u0026rsquo;s CPU Input Plugin, Memory Input Plugin.\nThere are many other telegraf input plugins, users can customize different input plugins' rule files. The rule file should be in YAML format, defined by the scheme described in MAL. Please see the telegraf plugin directory for more input plugins information.\nNotice:\n  The Telegraf receiver module uses HTTP to receive telegraf\u0026rsquo;s metrics, so the outputs method should be set [[outputs.http]] in telegraf.conf file. Please see the http outputs for more details.\n  The Telegraf receiver module only process telegraf\u0026rsquo;s JSON metrics format, the data format should be set data_format = \u0026quot;json\u0026quot; in telegraf.conf file. Please see the JSON data format for more details.\n  The default json_timestamp_units is second in JSON output, and the Telegraf receiver module only process second timestamp unit. If users configure json_timestamp_units in telegraf.conf file, json_timestamp_units = \u0026quot;1s\u0026quot; is feasible. Please see the JSON data format for more details.\n  The following is the default telegraf receiver YAML rule file in the application.yml, Set SW_RECEIVER_TELEGRAF:default through system environment or change SW_RECEIVER_TELEGRAF_ACTIVE_FILES:vm to activate the OpenTelemetry receiver with vm.yml in telegraf-rules.\nreceiver-telegraf:selector:${SW_RECEIVER_TELEGRAF:default}default:activeFiles:${SW_RECEIVER_TELEGRAF_ACTIVE_FILES:vm}   Rule Name Description Configuration File Data Source     vm Metrics of VMs telegraf-rules/vm.yaml Telegraf inputs plugins \u0026ndash;\u0026gt; Telegraf Receiver \u0026ndash;\u0026gt; SkyWalking OAP Server    ","excerpt":"Telegraf receiver The Telegraf receiver supports receiving InfluxDB Telegraf\u0026rsquo;s metrics by …","ref":"/docs/main/v9.3.0/en/setup/backend/telegraf-receiver/","title":"Telegraf receiver"},{"body":"Telegraf receiver The Telegraf receiver supports receiving InfluxDB Telegraf\u0026rsquo;s metrics by meter-system. The OAP can load the configuration at bootstrap. The files are located at $CLASSPATH/telegraf-rules. If the new configuration is not well-formed, the OAP may fail to start up.\nThis is the InfluxDB Telegraf Document, the Telegraf receiver can handle Telegraf\u0026rsquo;s CPU Input Plugin, Memory Input Plugin.\nThere are many other telegraf input plugins, users can customize different input plugins' rule files. The rule file should be in YAML format, defined by the scheme described in MAL. Please see the telegraf plugin directory for more input plugins information.\nNotice:\n  The Telegraf receiver module uses HTTP to receive telegraf\u0026rsquo;s metrics, so the outputs method should be set [[outputs.http]] in telegraf.conf file. Please see the http outputs for more details.\n  The Telegraf receiver module only process telegraf\u0026rsquo;s JSON metrics format, the data format should be set data_format = \u0026quot;json\u0026quot; in telegraf.conf file. Please see the JSON data format for more details.\n  The default json_timestamp_units is second in JSON output, and the Telegraf receiver module only process second timestamp unit. If users configure json_timestamp_units in telegraf.conf file, json_timestamp_units = \u0026quot;1s\u0026quot; is feasible. Please see the JSON data format for more details.\n  The following is the default telegraf receiver YAML rule file in the application.yml, Set SW_RECEIVER_TELEGRAF:default through system environment or change SW_RECEIVER_TELEGRAF_ACTIVE_FILES:vm to activate the OpenTelemetry receiver with vm.yml in telegraf-rules.\nreceiver-telegraf:selector:${SW_RECEIVER_TELEGRAF:default}default:activeFiles:${SW_RECEIVER_TELEGRAF_ACTIVE_FILES:vm}   Rule Name Description Configuration File Data Source     vm Metrics of VMs telegraf-rules/vm.yaml Telegraf inputs plugins \u0026ndash;\u0026gt; Telegraf Receiver \u0026ndash;\u0026gt; SkyWalking OAP Server    ","excerpt":"Telegraf receiver The Telegraf receiver supports receiving InfluxDB Telegraf\u0026rsquo;s metrics by …","ref":"/docs/main/v9.4.0/en/setup/backend/telegraf-receiver/","title":"Telegraf receiver"},{"body":"Telegraf receiver The Telegraf receiver supports receiving InfluxDB Telegraf\u0026rsquo;s metrics by meter-system. The OAP can load the configuration at bootstrap. The files are located at $CLASSPATH/telegraf-rules. If the new configuration is not well-formed, the OAP may fail to start up.\nThis is the InfluxDB Telegraf Document, the Telegraf receiver can handle Telegraf\u0026rsquo;s CPU Input Plugin, Memory Input Plugin.\nThere are many other telegraf input plugins, users can customize different input plugins' rule files. The rule file should be in YAML format, defined by the scheme described in MAL. Please see the telegraf plugin directory for more input plugins information.\nNotice:\n  The Telegraf receiver module uses HTTP to receive telegraf\u0026rsquo;s metrics, so the outputs method should be set [[outputs.http]] in telegraf.conf file. Please see the http outputs for more details.\n  The Telegraf receiver module only process telegraf\u0026rsquo;s JSON metrics format, the data format should be set data_format = \u0026quot;json\u0026quot; in telegraf.conf file. Please see the JSON data format for more details.\n  The default json_timestamp_units is second in JSON output, and the Telegraf receiver module only process second timestamp unit. If users configure json_timestamp_units in telegraf.conf file, json_timestamp_units = \u0026quot;1s\u0026quot; is feasible. Please see the JSON data format for more details.\n  The following is the default telegraf receiver YAML rule file in the application.yml, Set SW_RECEIVER_TELEGRAF:default through system environment or change SW_RECEIVER_TELEGRAF_ACTIVE_FILES:vm to activate the OpenTelemetry receiver with vm.yml in telegraf-rules.\nreceiver-telegraf:selector:${SW_RECEIVER_TELEGRAF:default}default:activeFiles:${SW_RECEIVER_TELEGRAF_ACTIVE_FILES:vm}   Rule Name Description Configuration File Data Source     vm Metrics of VMs telegraf-rules/vm.yaml Telegraf inputs plugins \u0026ndash;\u0026gt; Telegraf Receiver \u0026ndash;\u0026gt; SkyWalking OAP Server    ","excerpt":"Telegraf receiver The Telegraf receiver supports receiving InfluxDB Telegraf\u0026rsquo;s metrics by …","ref":"/docs/main/v9.5.0/en/setup/backend/telegraf-receiver/","title":"Telegraf receiver"},{"body":"Telegraf receiver The Telegraf receiver supports receiving InfluxDB Telegraf\u0026rsquo;s metrics by meter-system. The OAP can load the configuration at bootstrap. The files are located at $CLASSPATH/telegraf-rules. If the new configuration is not well-formed, the OAP may fail to start up.\nThis is the InfluxDB Telegraf Document, the Telegraf receiver can handle Telegraf\u0026rsquo;s CPU Input Plugin, Memory Input Plugin.\nThere are many other telegraf input plugins, users can customize different input plugins' rule files. The rule file should be in YAML format, defined by the scheme described in MAL. Please see the telegraf plugin directory for more input plugins information.\nNotice:\n  The Telegraf receiver module uses HTTP to receive telegraf\u0026rsquo;s metrics, so the outputs method should be set [[outputs.http]] in telegraf.conf file. Please see the http outputs for more details.\n  The Telegraf receiver module only process telegraf\u0026rsquo;s JSON metrics format, the data format should be set data_format = \u0026quot;json\u0026quot; in telegraf.conf file. Please see the JSON data format for more details.\n  The default json_timestamp_units is second in JSON output, and the Telegraf receiver module only process second timestamp unit. If users configure json_timestamp_units in telegraf.conf file, json_timestamp_units = \u0026quot;1s\u0026quot; is feasible. Please see the JSON data format for more details.\n  The following is the default telegraf receiver YAML rule file in the application.yml, Set SW_RECEIVER_TELEGRAF:default through system environment or change SW_RECEIVER_TELEGRAF_ACTIVE_FILES:vm to activate the OpenTelemetry receiver with vm.yml in telegraf-rules.\nreceiver-telegraf:selector:${SW_RECEIVER_TELEGRAF:default}default:activeFiles:${SW_RECEIVER_TELEGRAF_ACTIVE_FILES:vm}   Rule Name Description Configuration File Data Source     vm Metrics of VMs telegraf-rules/vm.yaml Telegraf inputs plugins \u0026ndash;\u0026gt; Telegraf Receiver \u0026ndash;\u0026gt; SkyWalking OAP Server    ","excerpt":"Telegraf receiver The Telegraf receiver supports receiving InfluxDB Telegraf\u0026rsquo;s metrics by …","ref":"/docs/main/v9.6.0/en/setup/backend/telegraf-receiver/","title":"Telegraf receiver"},{"body":"Telegraf receiver The Telegraf receiver supports receiving InfluxDB Telegraf\u0026rsquo;s metrics by meter-system. The OAP can load the configuration at bootstrap. The files are located at $CLASSPATH/telegraf-rules. If the new configuration is not well-formed, the OAP may fail to start up.\nThis is the InfluxDB Telegraf Document, the Telegraf receiver can handle Telegraf\u0026rsquo;s CPU Input Plugin, Memory Input Plugin.\nThere are many other telegraf input plugins, users can customize different input plugins' rule files. The rule file should be in YAML format, defined by the scheme described in MAL. Please see the telegraf plugin directory for more input plugins information.\nNotice:\n  The Telegraf receiver module uses HTTP to receive telegraf\u0026rsquo;s metrics, so the outputs method should be set [[outputs.http]] in telegraf.conf file. Please see the http outputs for more details.\n  The Telegraf receiver module only process telegraf\u0026rsquo;s JSON metrics format, the data format should be set data_format = \u0026quot;json\u0026quot; in telegraf.conf file. Please see the JSON data format for more details.\n  The default json_timestamp_units is second in JSON output, and the Telegraf receiver module only process second timestamp unit. If users configure json_timestamp_units in telegraf.conf file, json_timestamp_units = \u0026quot;1s\u0026quot; is feasible. Please see the JSON data format for more details.\n  The following is the default telegraf receiver YAML rule file in the application.yml, Set SW_RECEIVER_TELEGRAF:default through system environment or change SW_RECEIVER_TELEGRAF_ACTIVE_FILES:vm to activate the OpenTelemetry receiver with vm.yml in telegraf-rules.\nreceiver-telegraf:selector:${SW_RECEIVER_TELEGRAF:default}default:activeFiles:${SW_RECEIVER_TELEGRAF_ACTIVE_FILES:vm}   Rule Name Description Configuration File Data Source     vm Metrics of VMs telegraf-rules/vm.yaml Telegraf inputs plugins \u0026ndash;\u0026gt; Telegraf Receiver \u0026ndash;\u0026gt; SkyWalking OAP Server    ","excerpt":"Telegraf receiver The Telegraf receiver supports receiving InfluxDB Telegraf\u0026rsquo;s metrics by …","ref":"/docs/main/v9.7.0/en/setup/backend/telegraf-receiver/","title":"Telegraf receiver"},{"body":"Telemetry Exporter Satellite supports three ways to export its own telemetry data, prometheus, metrics-service or pprof.\nMultiple export methods are supported simultaneously, separated by commas.\nPrometheus Start HTTP port to export the satellite telemetry metrics.\nWhen the following configuration is completed, then the satellite telemetry metrics export to: http://localhost${SATELLITE_TELEMETRY_PROMETHEUS_ADDRESS}${SATELLITE_TELEMETRY_PROMETHEUS_ENDPOINT}, and all the metrics contain the cluster, service and instance tag.\n# The Satellite self telemetry configuration. telemetry: # The space concept for the deployment, such as the namespace concept in the Kubernetes. cluster: ${SATELLITE_TELEMETRY_CLUSTER:satellite-cluster} # The group concept for the deployment, such as the service resource concept in the Kubernetes. service: ${SATELLITE_TELEMETRY_SERVICE:satellite-service} # The minimum running unit, such as the pod concept in the Kubernetes. instance: ${SATELLITE_TELEMETRY_SERVICE:satellite-instance} # Telemetry export type, support \u0026#34;prometheus\u0026#34;, \u0026#34;metrics_service\u0026#34;, \u0026#34;pprof\u0026#34; or \u0026#34;none\u0026#34; export_type: ${SATELLITE_TELEMETRY_EXPORT_TYPE:prometheus} # Export telemetry data through Prometheus server, only works on \u0026#34;export_type=prometheus\u0026#34;. prometheus: # The prometheus server address. address: ${SATELLITE_TELEMETRY_PROMETHEUS_ADDRESS::1234} # The prometheus server metrics endpoint. endpoint: ${SATELLITE_TELEMETRY_PROMETHEUS_ENDPOINT:/metrics} Metrics Service Send the message to the gRPC service that supports SkyWalking\u0026rsquo;s native Meter protocol with interval.\nWhen the following configuration is completed, send the message to the specified grpc-client component at the specified time interval. Among them, service and instance will correspond to the services and service instances in SkyWalking.\n# The Satellite self telemetry configuration. telemetry: # The space concept for the deployment, such as the namespace concept in the Kubernetes. cluster: ${SATELLITE_TELEMETRY_CLUSTER:satellite-cluster} # The group concept for the deployment, such as the service resource concept in the Kubernetes. service: ${SATELLITE_TELEMETRY_SERVICE:satellite-service} # The minimum running unit, such as the pod concept in the Kubernetes. instance: ${SATELLITE_TELEMETRY_SERVICE:satellite-instance} # Telemetry export type, support \u0026#34;prometheus\u0026#34;, \u0026#34;metrics_service\u0026#34;, \u0026#34;pprof\u0026#34; or \u0026#34;none\u0026#34; export_type: ${SATELLITE_TELEMETRY_EXPORT_TYPE:metrics_service} # Export telemetry data through native meter format to OAP backend, only works on \u0026#34;export_type=metrics_service\u0026#34;. metrics_service: # The grpc-client plugin name, using the SkyWalking native batch meter protocol client_name: ${SATELLITE_TELEMETRY_METRICS_SERVICE_CLIENT_NAME:grpc-client} # The interval second for sending metrics interval: ${SATELLITE_TELEMETRY_METRICS_SERVICE_INTERVAL:10} # The prefix of telemetry metric name metric_prefix: ${SATELLITE_TELEMETRY_METRICS_SERVICE_METRIC_PREFIX:sw_stl_} pprof pprof can provide HTTP services to allow remote viewing of service execution status, helping you discover performance issues.\n# The Satellite self telemetry configuration. telemetry: # Telemetry export type, support \u0026#34;prometheus\u0026#34;, \u0026#34;metrics_service\u0026#34;, \u0026#34;pprof\u0026#34; or \u0026#34;none\u0026#34; export_type: ${SATELLITE_TELEMETRY_EXPORT_TYPE:pprof} # Export pprof service for detect performance issue pprof: # The pprof server address. address: ${SATELLITE_TELEMETRY_PPROF_ADDRESS::6060} ","excerpt":"Telemetry Exporter Satellite supports three ways to export its own telemetry data, prometheus, …","ref":"/docs/skywalking-satellite/latest/en/setup/examples/feature/telemetry-exporter/readme/","title":"Telemetry Exporter"},{"body":"Telemetry Exporter Satellite supports three ways to export its own telemetry data, prometheus, metrics-service or pprof.\nMultiple export methods are supported simultaneously, separated by commas.\nPrometheus Start HTTP port to export the satellite telemetry metrics.\nWhen the following configuration is completed, then the satellite telemetry metrics export to: http://localhost${SATELLITE_TELEMETRY_PROMETHEUS_ADDRESS}${SATELLITE_TELEMETRY_PROMETHEUS_ENDPOINT}, and all the metrics contain the cluster, service and instance tag.\n# The Satellite self telemetry configuration. telemetry: # The space concept for the deployment, such as the namespace concept in the Kubernetes. cluster: ${SATELLITE_TELEMETRY_CLUSTER:satellite-cluster} # The group concept for the deployment, such as the service resource concept in the Kubernetes. service: ${SATELLITE_TELEMETRY_SERVICE:satellite-service} # The minimum running unit, such as the pod concept in the Kubernetes. instance: ${SATELLITE_TELEMETRY_SERVICE:satellite-instance} # Telemetry export type, support \u0026#34;prometheus\u0026#34;, \u0026#34;metrics_service\u0026#34;, \u0026#34;pprof\u0026#34; or \u0026#34;none\u0026#34; export_type: ${SATELLITE_TELEMETRY_EXPORT_TYPE:prometheus} # Export telemetry data through Prometheus server, only works on \u0026#34;export_type=prometheus\u0026#34;. prometheus: # The prometheus server address. address: ${SATELLITE_TELEMETRY_PROMETHEUS_ADDRESS::1234} # The prometheus server metrics endpoint. endpoint: ${SATELLITE_TELEMETRY_PROMETHEUS_ENDPOINT:/metrics} Metrics Service Send the message to the gRPC service that supports SkyWalking\u0026rsquo;s native Meter protocol with interval.\nWhen the following configuration is completed, send the message to the specified grpc-client component at the specified time interval. Among them, service and instance will correspond to the services and service instances in SkyWalking.\n# The Satellite self telemetry configuration. telemetry: # The space concept for the deployment, such as the namespace concept in the Kubernetes. cluster: ${SATELLITE_TELEMETRY_CLUSTER:satellite-cluster} # The group concept for the deployment, such as the service resource concept in the Kubernetes. service: ${SATELLITE_TELEMETRY_SERVICE:satellite-service} # The minimum running unit, such as the pod concept in the Kubernetes. instance: ${SATELLITE_TELEMETRY_SERVICE:satellite-instance} # Telemetry export type, support \u0026#34;prometheus\u0026#34;, \u0026#34;metrics_service\u0026#34;, \u0026#34;pprof\u0026#34; or \u0026#34;none\u0026#34; export_type: ${SATELLITE_TELEMETRY_EXPORT_TYPE:metrics_service} # Export telemetry data through native meter format to OAP backend, only works on \u0026#34;export_type=metrics_service\u0026#34;. metrics_service: # The grpc-client plugin name, using the SkyWalking native batch meter protocol client_name: ${SATELLITE_TELEMETRY_METRICS_SERVICE_CLIENT_NAME:grpc-client} # The interval second for sending metrics interval: ${SATELLITE_TELEMETRY_METRICS_SERVICE_INTERVAL:10} # The prefix of telemetry metric name metric_prefix: ${SATELLITE_TELEMETRY_METRICS_SERVICE_METRIC_PREFIX:sw_stl_} pprof pprof can provide HTTP services to allow remote viewing of service execution status, helping you discover performance issues.\n# The Satellite self telemetry configuration. telemetry: # Telemetry export type, support \u0026#34;prometheus\u0026#34;, \u0026#34;metrics_service\u0026#34;, \u0026#34;pprof\u0026#34; or \u0026#34;none\u0026#34; export_type: ${SATELLITE_TELEMETRY_EXPORT_TYPE:pprof} # Export pprof service for detect performance issue pprof: # The pprof server address. address: ${SATELLITE_TELEMETRY_PPROF_ADDRESS::6060} ","excerpt":"Telemetry Exporter Satellite supports three ways to export its own telemetry data, prometheus, …","ref":"/docs/skywalking-satellite/next/en/setup/examples/feature/telemetry-exporter/readme/","title":"Telemetry Exporter"},{"body":"Telemetry Exporter Satellite supports three ways to export its own telemetry data, prometheus, metrics-service or pprof.\nMultiple export methods are supported simultaneously, separated by commas.\nPrometheus Start HTTP port to export the satellite telemetry metrics.\nWhen the following configuration is completed, then the satellite telemetry metrics export to: http://localhost${SATELLITE_TELEMETRY_PROMETHEUS_ADDRESS}${SATELLITE_TELEMETRY_PROMETHEUS_ENDPOINT}, and all the metrics contain the cluster, service and instance tag.\n# The Satellite self telemetry configuration. telemetry: # The space concept for the deployment, such as the namespace concept in the Kubernetes. cluster: ${SATELLITE_TELEMETRY_CLUSTER:satellite-cluster} # The group concept for the deployment, such as the service resource concept in the Kubernetes. service: ${SATELLITE_TELEMETRY_SERVICE:satellite-service} # The minimum running unit, such as the pod concept in the Kubernetes. instance: ${SATELLITE_TELEMETRY_SERVICE:satellite-instance} # Telemetry export type, support \u0026#34;prometheus\u0026#34;, \u0026#34;metrics_service\u0026#34;, \u0026#34;pprof\u0026#34; or \u0026#34;none\u0026#34; export_type: ${SATELLITE_TELEMETRY_EXPORT_TYPE:prometheus} # Export telemetry data through Prometheus server, only works on \u0026#34;export_type=prometheus\u0026#34;. prometheus: # The prometheus server address. address: ${SATELLITE_TELEMETRY_PROMETHEUS_ADDRESS::1234} # The prometheus server metrics endpoint. endpoint: ${SATELLITE_TELEMETRY_PROMETHEUS_ENDPOINT:/metrics} Metrics Service Send the message to the gRPC service that supports SkyWalking\u0026rsquo;s native Meter protocol with interval.\nWhen the following configuration is completed, send the message to the specified grpc-client component at the specified time interval. Among them, service and instance will correspond to the services and service instances in SkyWalking.\n# The Satellite self telemetry configuration. telemetry: # The space concept for the deployment, such as the namespace concept in the Kubernetes. cluster: ${SATELLITE_TELEMETRY_CLUSTER:satellite-cluster} # The group concept for the deployment, such as the service resource concept in the Kubernetes. service: ${SATELLITE_TELEMETRY_SERVICE:satellite-service} # The minimum running unit, such as the pod concept in the Kubernetes. instance: ${SATELLITE_TELEMETRY_SERVICE:satellite-instance} # Telemetry export type, support \u0026#34;prometheus\u0026#34;, \u0026#34;metrics_service\u0026#34;, \u0026#34;pprof\u0026#34; or \u0026#34;none\u0026#34; export_type: ${SATELLITE_TELEMETRY_EXPORT_TYPE:metrics_service} # Export telemetry data through native meter format to OAP backend, only works on \u0026#34;export_type=metrics_service\u0026#34;. metrics_service: # The grpc-client plugin name, using the SkyWalking native batch meter protocol client_name: ${SATELLITE_TELEMETRY_METRICS_SERVICE_CLIENT_NAME:grpc-client} # The interval second for sending metrics interval: ${SATELLITE_TELEMETRY_METRICS_SERVICE_INTERVAL:10} # The prefix of telemetry metric name metric_prefix: ${SATELLITE_TELEMETRY_METRICS_SERVICE_METRIC_PREFIX:sw_stl_} pprof pprof can provide HTTP services to allow remote viewing of service execution status, helping you discover performance issues.\n# The Satellite self telemetry configuration. telemetry: # Telemetry export type, support \u0026#34;prometheus\u0026#34;, \u0026#34;metrics_service\u0026#34;, \u0026#34;pprof\u0026#34; or \u0026#34;none\u0026#34; export_type: ${SATELLITE_TELEMETRY_EXPORT_TYPE:pprof} # Export pprof service for detect performance issue pprof: # The pprof server address. address: ${SATELLITE_TELEMETRY_PPROF_ADDRESS::6060} ","excerpt":"Telemetry Exporter Satellite supports three ways to export its own telemetry data, prometheus, …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/examples/feature/telemetry-exporter/readme/","title":"Telemetry Exporter"},{"body":"Telemetry for backend The OAP backend cluster itself is a distributed streaming process system. To assist the Ops team, we provide the telemetry for the OAP backend itself, also known as self-observability (so11y)\nBy default, the telemetry is disabled by setting selector to none, like this:\ntelemetry:selector:${SW_TELEMETRY:none}none:prometheus:host:${SW_TELEMETRY_PROMETHEUS_HOST:0.0.0.0}port:${SW_TELEMETRY_PROMETHEUS_PORT:1234}sslEnabled:${SW_TELEMETRY_PROMETHEUS_SSL_ENABLED:false}sslKeyPath:${SW_TELEMETRY_PROMETHEUS_SSL_KEY_PATH:\u0026#34;\u0026#34;}sslCertChainPath:${SW_TELEMETRY_PROMETHEUS_SSL_CERT_CHAIN_PATH:\u0026#34;\u0026#34;}You may also set Prometheus to enable them. For more information, refer to the details below.\nSelf Observability SkyWalking supports exposing telemetry data representing OAP running status through Prometheus endpoint. Users could set up OpenTelemetry collector to scrap and forward telemetry data to OAP server for further analysis, eventually showing up UI or GraphQL API.\nStatic IP or hostname Add the following configuration to enable self-observability-related modules.\n Set up prometheus telemetry.  telemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543Set up OpenTelemetry to scrape the metrics from OAP telemetry.  Refer to the E2E test case as an example.\nFor Kubernetes deployments, read the following section, otherwise you should be able to adjust the configurations below to fit your scenarios.\nService discovery on Kubernetes If you deploy an OAP server cluster on Kubernetes, the oap-server instance (pod) would not have a static IP or hostname. We can leverage OpenTelemetry Collector to discover the oap-server instance, and scrape \u0026amp; transfer the metrics to OAP OpenTelemetry receiver.\nOn how to install SkyWalking on k8s, you can refer to Apache SkyWalking Kubernetes.\nSet this up following these steps:\n Set up oap-server.    Set the metrics port.\nprometheus-port: 1234   Set environment variables.\nSW_TELEMETRY=prometheus SW_OTEL_RECEIVER=default SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES=oap Here is an example to install by Apache SkyWalking Kubernetes:\nhelm -n istio-system install skywalking skywalking \\ --set elasticsearch.replicas=1 \\ --set elasticsearch.minimumMasterNodes=1 \\ --set elasticsearch.imageTag=7.5.1 \\ --set oap.replicas=2 \\ --set ui.image.repository=$HUB/skywalking-ui \\ --set ui.image.tag=$TAG \\ --set oap.image.tag=$TAG \\ --set oap.image.repository=$HUB/skywalking-oap \\ --set oap.storageType=elasticsearch \\ --set oap.ports.prometheus-port=1234 \\ # \u0026lt;\u0026lt;\u0026lt; Expose self observability metrics port --set oap.env.SW_TELEMETRY=prometheus \\ --set oap.env.SW_OTEL_RECEIVER=default \\ # \u0026lt;\u0026lt;\u0026lt; Enable Otel receiver --set oap.env.SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES=oap # \u0026lt;\u0026lt;\u0026lt; Add oap analyzer for Otel metrics   Set up OpenTelemetry Collector and config a scrape job:  - job_name:\u0026#39;skywalking-so11y\u0026#39;# make sure to use this in the so11y.yaml to filter only so11y metricsmetrics_path:\u0026#39;/metrics\u0026#39;kubernetes_sd_configs:- role:podrelabel_configs:- source_labels:[__meta_kubernetes_pod_container_name, __meta_kubernetes_pod_container_port_name]action:keepregex:oap;prometheus-port- source_labels:[]target_label:servicereplacement:oap-server- source_labels:[__meta_kubernetes_pod_name]target_label:host_nameregex:(.+)replacement:$$1For the full example for OpenTelemetry Collector configuration and recommended version, you can refer to showcase.\n Users also could leverage the Prometheus endpoint for their own Prometheus and Grafana.\nNOTE: Since Apr 21, 2021, the Grafana project has been relicensed to AGPL-v3, and is no longer licensed for Apache 2.0. Check the LICENSE details. The following Prometheus + Grafana solution is optional rather than recommended.\nPrometheus Prometheus is supported as a telemetry implementor, which collects metrics from SkyWalking\u0026rsquo;s backend.\nSet prometheus to provider. The endpoint opens at http://0.0.0.0:1234/ and http://0.0.0.0:1234/metrics.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:Set host and port if needed.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543Set relevant SSL settings to expose a secure endpoint. Note that the private key file and cert chain file could be uploaded once changes are applied to them.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543sslEnabled:truesslKeyPath:/etc/ssl/key.pemsslCertChainPath:/etc/ssl/cert-chain.pemGrafana Visualization Provide the Grafana dashboard settings. Check SkyWalking OAP Cluster Monitor Dashboard config and SkyWalking OAP Instance Monitor Dashboard config.\n","excerpt":"Telemetry for backend The OAP backend cluster itself is a distributed streaming process system. To …","ref":"/docs/main/latest/en/setup/backend/backend-telemetry/","title":"Telemetry for backend"},{"body":"Telemetry for backend The OAP backend cluster itself is a distributed streaming process system. To assist the Ops team, we provide the telemetry for the OAP backend itself, also known as self-observability (so11y)\nBy default, the telemetry is disabled by setting selector to none, like this:\ntelemetry:selector:${SW_TELEMETRY:none}none:prometheus:host:${SW_TELEMETRY_PROMETHEUS_HOST:0.0.0.0}port:${SW_TELEMETRY_PROMETHEUS_PORT:1234}sslEnabled:${SW_TELEMETRY_PROMETHEUS_SSL_ENABLED:false}sslKeyPath:${SW_TELEMETRY_PROMETHEUS_SSL_KEY_PATH:\u0026#34;\u0026#34;}sslCertChainPath:${SW_TELEMETRY_PROMETHEUS_SSL_CERT_CHAIN_PATH:\u0026#34;\u0026#34;}You may also set Prometheus to enable them. For more information, refer to the details below.\nSelf Observability SkyWalking supports exposing telemetry data representing OAP running status through Prometheus endpoint. Users could set up OpenTelemetry collector to scrap and forward telemetry data to OAP server for further analysis, eventually showing up UI or GraphQL API.\nStatic IP or hostname Add the following configuration to enable self-observability-related modules.\n Set up prometheus telemetry.  telemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543Set up OpenTelemetry to scrape the metrics from OAP telemetry.  Refer to the E2E test case as an example.\nFor Kubernetes deployments, read the following section, otherwise you should be able to adjust the configurations below to fit your scenarios.\nService discovery on Kubernetes If you deploy an OAP server cluster on Kubernetes, the oap-server instance (pod) would not have a static IP or hostname. We can leverage OpenTelemetry Collector to discover the oap-server instance, and scrape \u0026amp; transfer the metrics to OAP OpenTelemetry receiver.\nOn how to install SkyWalking on k8s, you can refer to Apache SkyWalking Kubernetes.\nSet this up following these steps:\n Set up oap-server.    Set the metrics port.\nprometheus-port: 1234   Set environment variables.\nSW_TELEMETRY=prometheus SW_OTEL_RECEIVER=default SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES=oap Here is an example to install by Apache SkyWalking Kubernetes:\nhelm -n istio-system install skywalking skywalking \\ --set elasticsearch.replicas=1 \\ --set elasticsearch.minimumMasterNodes=1 \\ --set elasticsearch.imageTag=7.5.1 \\ --set oap.replicas=2 \\ --set ui.image.repository=$HUB/skywalking-ui \\ --set ui.image.tag=$TAG \\ --set oap.image.tag=$TAG \\ --set oap.image.repository=$HUB/skywalking-oap \\ --set oap.storageType=elasticsearch \\ --set oap.ports.prometheus-port=1234 \\ # \u0026lt;\u0026lt;\u0026lt; Expose self observability metrics port --set oap.env.SW_TELEMETRY=prometheus \\ --set oap.env.SW_OTEL_RECEIVER=default \\ # \u0026lt;\u0026lt;\u0026lt; Enable Otel receiver --set oap.env.SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES=oap # \u0026lt;\u0026lt;\u0026lt; Add oap analyzer for Otel metrics   Set up OpenTelemetry Collector and config a scrape job:  - job_name:\u0026#39;skywalking-so11y\u0026#39;# make sure to use this in the so11y.yaml to filter only so11y metricsmetrics_path:\u0026#39;/metrics\u0026#39;kubernetes_sd_configs:- role:podrelabel_configs:- source_labels:[__meta_kubernetes_pod_container_name, __meta_kubernetes_pod_container_port_name]action:keepregex:oap;prometheus-port- source_labels:[]target_label:servicereplacement:oap-server- source_labels:[__meta_kubernetes_pod_name]target_label:host_nameregex:(.+)replacement:$$1For the full example for OpenTelemetry Collector configuration and recommended version, you can refer to showcase.\n Users also could leverage the Prometheus endpoint for their own Prometheus and Grafana.\nNOTE: Since Apr 21, 2021, the Grafana project has been relicensed to AGPL-v3, and is no longer licensed for Apache 2.0. Check the LICENSE details. The following Prometheus + Grafana solution is optional rather than recommended.\nPrometheus Prometheus is supported as a telemetry implementor, which collects metrics from SkyWalking\u0026rsquo;s backend.\nSet prometheus to provider. The endpoint opens at http://0.0.0.0:1234/ and http://0.0.0.0:1234/metrics.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:Set host and port if needed.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543Set relevant SSL settings to expose a secure endpoint. Note that the private key file and cert chain file could be uploaded once changes are applied to them.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543sslEnabled:truesslKeyPath:/etc/ssl/key.pemsslCertChainPath:/etc/ssl/cert-chain.pemGrafana Visualization Provide the Grafana dashboard settings. Check SkyWalking OAP Cluster Monitor Dashboard config and SkyWalking OAP Instance Monitor Dashboard config.\n","excerpt":"Telemetry for backend The OAP backend cluster itself is a distributed streaming process system. To …","ref":"/docs/main/next/en/setup/backend/backend-telemetry/","title":"Telemetry for backend"},{"body":"Telemetry for backend The OAP backend cluster itself is a distributed streaming process system. To assist the Ops team, we provide the telemetry for the OAP backend itself.\nBy default, the telemetry is disabled by setting selector to none, like this:\ntelemetry:selector:${SW_TELEMETRY:none}none:prometheus:host:${SW_TELEMETRY_PROMETHEUS_HOST:0.0.0.0}port:${SW_TELEMETRY_PROMETHEUS_PORT:1234}sslEnabled:${SW_TELEMETRY_PROMETHEUS_SSL_ENABLED:false}sslKeyPath:${SW_TELEMETRY_PROMETHEUS_SSL_KEY_PATH:\u0026#34;\u0026#34;}sslCertChainPath:${SW_TELEMETRY_PROMETHEUS_SSL_CERT_CHAIN_PATH:\u0026#34;\u0026#34;}You may also set Prometheus to enable them. For more information, refer to the details below.\nSelf Observability Static IP or hostname SkyWalking supports collecting telemetry data into OAP backend directly. Users could check them out through UI or GraphQL API.\nAdd the following configuration to enable self-observability related modules.\n Set up prometheus telemetry.  telemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543Set up prometheus fetcher.  prometheus-fetcher:selector:${SW_PROMETHEUS_FETCHER:default}default:enabledRules:${SW_PROMETHEUS_FETCHER_ENABLED_RULES:\u0026#34;self\u0026#34;}Make sure config/fetcher-prom-rules/self.yaml exists.  Once you deploy an oap-server cluster, the target host should be replaced with a dedicated IP or hostname. For instances, there are three OAP servers in your cluster. Their host is service1, service2, and service3 respectively. You should update each self.yaml to switch the target host.\nservice1:\nfetcherInterval:PT15SfetcherTimeout:PT10SmetricsPath:/metricsstaticConfig:# targets will be labeled as \u0026#34;instance\u0026#34;targets:- service1:1234labels:service:oap-server...service2:\nfetcherInterval:PT15SfetcherTimeout:PT10SmetricsPath:/metricsstaticConfig:# targets will be labeled as \u0026#34;instance\u0026#34;targets:- service2:1234labels:service:oap-server...service3:\nfetcherInterval:PT15SfetcherTimeout:PT10SmetricsPath:/metricsstaticConfig:# targets will be labeled as \u0026#34;instance\u0026#34;targets:- service3:1234labels:service:oap-server...Service discovery (k8s) If you deploy an oap-server cluster on k8s, the oap-server instance (pod) would not have a static IP or hostname. We can leverage OpenTelemetry Collector to discover the oap-server instance, and scrape \u0026amp; transfer the metrics to OAP OpenTelemetry receiver.\nOn how to install SkyWalking on k8s, you can refer to Apache SkyWalking Kubernetes.\nSet this up following these steps:\n Set up oap-server.    Set the metrics port.\nprometheus-port: 1234   Set environment variables.\nSW_TELEMETRY=prometheus SW_OTEL_RECEIVER=default SW_OTEL_RECEIVER_ENABLED_OC_RULES=oap Here is an example to install by Apache SkyWalking Kubernetes:\nhelm -n istio-system install skywalking skywalking \\ --set elasticsearch.replicas=1 \\ --set elasticsearch.minimumMasterNodes=1 \\ --set elasticsearch.imageTag=7.5.1 \\ --set oap.replicas=2 \\ --set ui.image.repository=$HUB/skywalking-ui \\ --set ui.image.tag=$TAG \\ --set oap.image.tag=$TAG \\ --set oap.image.repository=$HUB/skywalking-oap \\ --set oap.storageType=elasticsearch \\ --set oap.ports.prometheus-port=1234 \\ # \u0026lt;\u0026lt;\u0026lt; Expose self observability metrics port --set oap.env.SW_TELEMETRY=prometheus \\ --set oap.env.SW_OTEL_RECEIVER=default \\ # \u0026lt;\u0026lt;\u0026lt; Enable Otel receiver --set oap.env.SW_OTEL_RECEIVER_ENABLED_OC_RULES=oap # \u0026lt;\u0026lt;\u0026lt; Add oap analyzer for Otel metrics   Set up OpenTelemetry Collector and config a scrape job:  - job_name:\u0026#39;skywalking-so11y\u0026#39;# make sure to use this in the so11y.yaml to filter only so11y metricsmetrics_path:\u0026#39;/metrics\u0026#39;kubernetes_sd_configs:- role:podrelabel_configs:- source_labels:[__meta_kubernetes_pod_container_name, __meta_kubernetes_pod_container_port_name]action:keepregex:oap;prometheus-port - source_labels:[]target_label:servicereplacement:oap-server- source_labels:[__meta_kubernetes_pod_name]target_label:host_nameregex:(.+)replacement:$$1 For the full example for OpenTelemetry Collector configuration and recommended version, you can refer to showcase.\n NOTE: Since Apr 21, 2021, the Grafana project has been relicensed to AGPL-v3, and is no longer licensed for Apache 2.0. Check the LICENSE details. The following Prometheus + Grafana solution is optional, rather than recommended.\nPrometheus Prometheus is supported as a telemetry implementor, which collects metrics from SkyWalking\u0026rsquo;s backend.\nSet prometheus to provider. The endpoint opens at http://0.0.0.0:1234/ and http://0.0.0.0:1234/metrics.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:Set host and port if needed.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543Set relevant SSL settings to expose a secure endpoint. Note that the private key file and cert chain file could be uploaded once changes are applied to them.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543sslEnabled:truesslKeyPath:/etc/ssl/key.pemsslCertChainPath:/etc/ssl/cert-chain.pemGrafana Visualization Provide the Grafana dashboard settings. Check SkyWalking OAP Cluster Monitor Dashboard config and SkyWalking OAP Instance Monitor Dashboard config.\n","excerpt":"Telemetry for backend The OAP backend cluster itself is a distributed streaming process system. To …","ref":"/docs/main/v9.0.0/en/setup/backend/backend-telemetry/","title":"Telemetry for backend"},{"body":"Telemetry for backend The OAP backend cluster itself is a distributed streaming process system. To assist the Ops team, we provide the telemetry for the OAP backend itself, also known as self-observability (so11y)\nBy default, the telemetry is disabled by setting selector to none, like this:\ntelemetry:selector:${SW_TELEMETRY:none}none:prometheus:host:${SW_TELEMETRY_PROMETHEUS_HOST:0.0.0.0}port:${SW_TELEMETRY_PROMETHEUS_PORT:1234}sslEnabled:${SW_TELEMETRY_PROMETHEUS_SSL_ENABLED:false}sslKeyPath:${SW_TELEMETRY_PROMETHEUS_SSL_KEY_PATH:\u0026#34;\u0026#34;}sslCertChainPath:${SW_TELEMETRY_PROMETHEUS_SSL_CERT_CHAIN_PATH:\u0026#34;\u0026#34;}You may also set Prometheus to enable them. For more information, refer to the details below.\nSelf Observability Static IP or hostname SkyWalking supports collecting telemetry data into the OAP backend directly. Users could check them out through UI or GraphQL API.\nAdd the following configuration to enable self-observability-related modules.\n Set up prometheus telemetry.  telemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543Set up Prometheus fetcher.  prometheus-fetcher:selector:${SW_PROMETHEUS_FETCHER:default}default:enabledRules:${SW_PROMETHEUS_FETCHER_ENABLED_RULES:\u0026#34;self\u0026#34;}Make sure config/fetcher-prom-rules/self.yaml exists.  Once you deploy an OAP server cluster, the target host should be replaced with a dedicated IP or hostname. For instance, if there are three OAP servers in your cluster, their hosts are service1, service2, and service3, respectively. You should update each self.yaml to switch the target host.\nservice1:\nfetcherInterval:PT15SfetcherTimeout:PT10SmetricsPath:/metricsstaticConfig:# targets will be labeled as \u0026#34;instance\u0026#34;targets:- service1:1234labels:service:oap-server...service2:\nfetcherInterval:PT15SfetcherTimeout:PT10SmetricsPath:/metricsstaticConfig:# targets will be labeled as \u0026#34;instance\u0026#34;targets:- service2:1234labels:service:oap-server...service3:\nfetcherInterval:PT15SfetcherTimeout:PT10SmetricsPath:/metricsstaticConfig:# targets will be labeled as \u0026#34;instance\u0026#34;targets:- service3:1234labels:service:oap-server...Service discovery on Kubernetes If you deploy an OAP server cluster on Kubernetes, the oap-server instance (pod) would not have a static IP or hostname. We can leverage OpenTelemetry Collector to discover the oap-server instance, and scrape \u0026amp; transfer the metrics to OAP OpenTelemetry receiver.\nOn how to install SkyWalking on k8s, you can refer to Apache SkyWalking Kubernetes.\nSet this up following these steps:\n Set up oap-server.    Set the metrics port.\nprometheus-port: 1234   Set environment variables.\nSW_TELEMETRY=prometheus SW_OTEL_RECEIVER=default SW_OTEL_RECEIVER_ENABLED_OC_RULES=oap Here is an example to install by Apache SkyWalking Kubernetes:\nhelm -n istio-system install skywalking skywalking \\ --set elasticsearch.replicas=1 \\ --set elasticsearch.minimumMasterNodes=1 \\ --set elasticsearch.imageTag=7.5.1 \\ --set oap.replicas=2 \\ --set ui.image.repository=$HUB/skywalking-ui \\ --set ui.image.tag=$TAG \\ --set oap.image.tag=$TAG \\ --set oap.image.repository=$HUB/skywalking-oap \\ --set oap.storageType=elasticsearch \\ --set oap.ports.prometheus-port=1234 \\ # \u0026lt;\u0026lt;\u0026lt; Expose self observability metrics port --set oap.env.SW_TELEMETRY=prometheus \\ --set oap.env.SW_OTEL_RECEIVER=default \\ # \u0026lt;\u0026lt;\u0026lt; Enable Otel receiver --set oap.env.SW_OTEL_RECEIVER_ENABLED_OC_RULES=oap # \u0026lt;\u0026lt;\u0026lt; Add oap analyzer for Otel metrics   Set up OpenTelemetry Collector and config a scrape job:  - job_name:\u0026#39;skywalking-so11y\u0026#39;# make sure to use this in the so11y.yaml to filter only so11y metricsmetrics_path:\u0026#39;/metrics\u0026#39;kubernetes_sd_configs:- role:podrelabel_configs:- source_labels:[__meta_kubernetes_pod_container_name, __meta_kubernetes_pod_container_port_name]action:keepregex:oap;prometheus-port - source_labels:[]target_label:servicereplacement:oap-server- source_labels:[__meta_kubernetes_pod_name]target_label:host_nameregex:(.+)replacement:$$1 For the full example for OpenTelemetry Collector configuration and recommended version, you can refer to showcase.\n NOTE: Since Apr 21, 2021, the Grafana project has been relicensed to AGPL-v3, and is no longer licensed for Apache 2.0. Check the LICENSE details. The following Prometheus + Grafana solution is optional rather than recommended.\nPrometheus Prometheus is supported as a telemetry implementor, which collects metrics from SkyWalking\u0026rsquo;s backend.\nSet prometheus to provider. The endpoint opens at http://0.0.0.0:1234/ and http://0.0.0.0:1234/metrics.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:Set host and port if needed.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543Set relevant SSL settings to expose a secure endpoint. Note that the private key file and cert chain file could be uploaded once changes are applied to them.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543sslEnabled:truesslKeyPath:/etc/ssl/key.pemsslCertChainPath:/etc/ssl/cert-chain.pemGrafana Visualization Provide the Grafana dashboard settings. Check SkyWalking OAP Cluster Monitor Dashboard config and SkyWalking OAP Instance Monitor Dashboard config.\n","excerpt":"Telemetry for backend The OAP backend cluster itself is a distributed streaming process system. To …","ref":"/docs/main/v9.1.0/en/setup/backend/backend-telemetry/","title":"Telemetry for backend"},{"body":"Telemetry for backend The OAP backend cluster itself is a distributed streaming process system. To assist the Ops team, we provide the telemetry for the OAP backend itself, also known as self-observability (so11y)\nBy default, the telemetry is disabled by setting selector to none, like this:\ntelemetry:selector:${SW_TELEMETRY:none}none:prometheus:host:${SW_TELEMETRY_PROMETHEUS_HOST:0.0.0.0}port:${SW_TELEMETRY_PROMETHEUS_PORT:1234}sslEnabled:${SW_TELEMETRY_PROMETHEUS_SSL_ENABLED:false}sslKeyPath:${SW_TELEMETRY_PROMETHEUS_SSL_KEY_PATH:\u0026#34;\u0026#34;}sslCertChainPath:${SW_TELEMETRY_PROMETHEUS_SSL_CERT_CHAIN_PATH:\u0026#34;\u0026#34;}You may also set Prometheus to enable them. For more information, refer to the details below.\nSelf Observability Static IP or hostname SkyWalking supports collecting telemetry data into the OAP backend directly. Users could check them out through UI or GraphQL API.\nAdd the following configuration to enable self-observability-related modules.\n Set up prometheus telemetry.  telemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543Set up Prometheus fetcher.  prometheus-fetcher:selector:${SW_PROMETHEUS_FETCHER:default}default:enabledRules:${SW_PROMETHEUS_FETCHER_ENABLED_RULES:\u0026#34;self\u0026#34;}Make sure config/fetcher-prom-rules/self.yaml exists.  Once you deploy an OAP server cluster, the target host should be replaced with a dedicated IP or hostname. For instance, if there are three OAP servers in your cluster, their hosts are service1, service2, and service3, respectively. You should update each self.yaml to switch the target host.\nservice1:\nfetcherInterval:PT15SfetcherTimeout:PT10SmetricsPath:/metricsstaticConfig:# targets will be labeled as \u0026#34;instance\u0026#34;targets:- service1:1234labels:service:oap-server...service2:\nfetcherInterval:PT15SfetcherTimeout:PT10SmetricsPath:/metricsstaticConfig:# targets will be labeled as \u0026#34;instance\u0026#34;targets:- service2:1234labels:service:oap-server...service3:\nfetcherInterval:PT15SfetcherTimeout:PT10SmetricsPath:/metricsstaticConfig:# targets will be labeled as \u0026#34;instance\u0026#34;targets:- service3:1234labels:service:oap-server...Service discovery on Kubernetes If you deploy an OAP server cluster on Kubernetes, the oap-server instance (pod) would not have a static IP or hostname. We can leverage OpenTelemetry Collector to discover the oap-server instance, and scrape \u0026amp; transfer the metrics to OAP OpenTelemetry receiver.\nOn how to install SkyWalking on k8s, you can refer to Apache SkyWalking Kubernetes.\nSet this up following these steps:\n Set up oap-server.    Set the metrics port.\nprometheus-port: 1234   Set environment variables.\nSW_TELEMETRY=prometheus SW_OTEL_RECEIVER=default SW_OTEL_RECEIVER_ENABLED_OTEL_RULES=oap Here is an example to install by Apache SkyWalking Kubernetes:\nhelm -n istio-system install skywalking skywalking \\ --set elasticsearch.replicas=1 \\ --set elasticsearch.minimumMasterNodes=1 \\ --set elasticsearch.imageTag=7.5.1 \\ --set oap.replicas=2 \\ --set ui.image.repository=$HUB/skywalking-ui \\ --set ui.image.tag=$TAG \\ --set oap.image.tag=$TAG \\ --set oap.image.repository=$HUB/skywalking-oap \\ --set oap.storageType=elasticsearch \\ --set oap.ports.prometheus-port=1234 \\ # \u0026lt;\u0026lt;\u0026lt; Expose self observability metrics port --set oap.env.SW_TELEMETRY=prometheus \\ --set oap.env.SW_OTEL_RECEIVER=default \\ # \u0026lt;\u0026lt;\u0026lt; Enable Otel receiver --set oap.env.SW_OTEL_RECEIVER_ENABLED_OTEL_RULES=oap # \u0026lt;\u0026lt;\u0026lt; Add oap analyzer for Otel metrics   Set up OpenTelemetry Collector and config a scrape job:  - job_name:\u0026#39;skywalking-so11y\u0026#39;# make sure to use this in the so11y.yaml to filter only so11y metricsmetrics_path:\u0026#39;/metrics\u0026#39;kubernetes_sd_configs:- role:podrelabel_configs:- source_labels:[__meta_kubernetes_pod_container_name, __meta_kubernetes_pod_container_port_name]action:keepregex:oap;prometheus-port- source_labels:[]target_label:servicereplacement:oap-server- source_labels:[__meta_kubernetes_pod_name]target_label:host_nameregex:(.+)replacement:$$1For the full example for OpenTelemetry Collector configuration and recommended version, you can refer to showcase.\n NOTE: Since Apr 21, 2021, the Grafana project has been relicensed to AGPL-v3, and is no longer licensed for Apache 2.0. Check the LICENSE details. The following Prometheus + Grafana solution is optional rather than recommended.\nPrometheus Prometheus is supported as a telemetry implementor, which collects metrics from SkyWalking\u0026rsquo;s backend.\nSet prometheus to provider. The endpoint opens at http://0.0.0.0:1234/ and http://0.0.0.0:1234/metrics.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:Set host and port if needed.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543Set relevant SSL settings to expose a secure endpoint. Note that the private key file and cert chain file could be uploaded once changes are applied to them.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543sslEnabled:truesslKeyPath:/etc/ssl/key.pemsslCertChainPath:/etc/ssl/cert-chain.pemGrafana Visualization Provide the Grafana dashboard settings. Check SkyWalking OAP Cluster Monitor Dashboard config and SkyWalking OAP Instance Monitor Dashboard config.\n","excerpt":"Telemetry for backend The OAP backend cluster itself is a distributed streaming process system. To …","ref":"/docs/main/v9.2.0/en/setup/backend/backend-telemetry/","title":"Telemetry for backend"},{"body":"Telemetry for backend The OAP backend cluster itself is a distributed streaming process system. To assist the Ops team, we provide the telemetry for the OAP backend itself, also known as self-observability (so11y)\nBy default, the telemetry is disabled by setting selector to none, like this:\ntelemetry:selector:${SW_TELEMETRY:none}none:prometheus:host:${SW_TELEMETRY_PROMETHEUS_HOST:0.0.0.0}port:${SW_TELEMETRY_PROMETHEUS_PORT:1234}sslEnabled:${SW_TELEMETRY_PROMETHEUS_SSL_ENABLED:false}sslKeyPath:${SW_TELEMETRY_PROMETHEUS_SSL_KEY_PATH:\u0026#34;\u0026#34;}sslCertChainPath:${SW_TELEMETRY_PROMETHEUS_SSL_CERT_CHAIN_PATH:\u0026#34;\u0026#34;}You may also set Prometheus to enable them. For more information, refer to the details below.\nSelf Observability Static IP or hostname SkyWalking supports collecting telemetry data into the OAP backend directly. Users could check them out through UI or GraphQL API.\nAdd the following configuration to enable self-observability-related modules.\n Set up prometheus telemetry.  telemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543Set up OpenTelemetry to scrape the metrics from OAP telemetry.  Refer to the E2E test case as an example.\nFor Kubernetes deployments, read the following section, otherwise you should be able to adjust the configurations below to fit your scenarios.\nService discovery on Kubernetes If you deploy an OAP server cluster on Kubernetes, the oap-server instance (pod) would not have a static IP or hostname. We can leverage OpenTelemetry Collector to discover the oap-server instance, and scrape \u0026amp; transfer the metrics to OAP OpenTelemetry receiver.\nOn how to install SkyWalking on k8s, you can refer to Apache SkyWalking Kubernetes.\nSet this up following these steps:\n Set up oap-server.    Set the metrics port.\nprometheus-port: 1234   Set environment variables.\nSW_TELEMETRY=prometheus SW_OTEL_RECEIVER=default SW_OTEL_RECEIVER_ENABLED_OTEL_RULES=oap Here is an example to install by Apache SkyWalking Kubernetes:\nhelm -n istio-system install skywalking skywalking \\ --set elasticsearch.replicas=1 \\ --set elasticsearch.minimumMasterNodes=1 \\ --set elasticsearch.imageTag=7.5.1 \\ --set oap.replicas=2 \\ --set ui.image.repository=$HUB/skywalking-ui \\ --set ui.image.tag=$TAG \\ --set oap.image.tag=$TAG \\ --set oap.image.repository=$HUB/skywalking-oap \\ --set oap.storageType=elasticsearch \\ --set oap.ports.prometheus-port=1234 \\ # \u0026lt;\u0026lt;\u0026lt; Expose self observability metrics port --set oap.env.SW_TELEMETRY=prometheus \\ --set oap.env.SW_OTEL_RECEIVER=default \\ # \u0026lt;\u0026lt;\u0026lt; Enable Otel receiver --set oap.env.SW_OTEL_RECEIVER_ENABLED_OTEL_RULES=oap # \u0026lt;\u0026lt;\u0026lt; Add oap analyzer for Otel metrics   Set up OpenTelemetry Collector and config a scrape job:  - job_name:\u0026#39;skywalking-so11y\u0026#39;# make sure to use this in the so11y.yaml to filter only so11y metricsmetrics_path:\u0026#39;/metrics\u0026#39;kubernetes_sd_configs:- role:podrelabel_configs:- source_labels:[__meta_kubernetes_pod_container_name, __meta_kubernetes_pod_container_port_name]action:keepregex:oap;prometheus-port- source_labels:[]target_label:servicereplacement:oap-server- source_labels:[__meta_kubernetes_pod_name]target_label:host_nameregex:(.+)replacement:$$1For the full example for OpenTelemetry Collector configuration and recommended version, you can refer to showcase.\n NOTE: Since Apr 21, 2021, the Grafana project has been relicensed to AGPL-v3, and is no longer licensed for Apache 2.0. Check the LICENSE details. The following Prometheus + Grafana solution is optional rather than recommended.\nPrometheus Prometheus is supported as a telemetry implementor, which collects metrics from SkyWalking\u0026rsquo;s backend.\nSet prometheus to provider. The endpoint opens at http://0.0.0.0:1234/ and http://0.0.0.0:1234/metrics.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:Set host and port if needed.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543Set relevant SSL settings to expose a secure endpoint. Note that the private key file and cert chain file could be uploaded once changes are applied to them.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543sslEnabled:truesslKeyPath:/etc/ssl/key.pemsslCertChainPath:/etc/ssl/cert-chain.pemGrafana Visualization Provide the Grafana dashboard settings. Check SkyWalking OAP Cluster Monitor Dashboard config and SkyWalking OAP Instance Monitor Dashboard config.\n","excerpt":"Telemetry for backend The OAP backend cluster itself is a distributed streaming process system. To …","ref":"/docs/main/v9.3.0/en/setup/backend/backend-telemetry/","title":"Telemetry for backend"},{"body":"Telemetry for backend The OAP backend cluster itself is a distributed streaming process system. To assist the Ops team, we provide the telemetry for the OAP backend itself, also known as self-observability (so11y)\nBy default, the telemetry is disabled by setting selector to none, like this:\ntelemetry:selector:${SW_TELEMETRY:none}none:prometheus:host:${SW_TELEMETRY_PROMETHEUS_HOST:0.0.0.0}port:${SW_TELEMETRY_PROMETHEUS_PORT:1234}sslEnabled:${SW_TELEMETRY_PROMETHEUS_SSL_ENABLED:false}sslKeyPath:${SW_TELEMETRY_PROMETHEUS_SSL_KEY_PATH:\u0026#34;\u0026#34;}sslCertChainPath:${SW_TELEMETRY_PROMETHEUS_SSL_CERT_CHAIN_PATH:\u0026#34;\u0026#34;}You may also set Prometheus to enable them. For more information, refer to the details below.\nSelf Observability SkyWalking supports exposing telemetry data representing OAP running status through Prometheus endpoint. Users could set up OpenTelemetry collector to scrap and forward telemetry data to OAP server for further analysis, eventually showing up UI or GraphQL API.\nStatic IP or hostname Add the following configuration to enable self-observability-related modules.\n Set up prometheus telemetry.  telemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543Set up OpenTelemetry to scrape the metrics from OAP telemetry.  Refer to the E2E test case as an example.\nFor Kubernetes deployments, read the following section, otherwise you should be able to adjust the configurations below to fit your scenarios.\nService discovery on Kubernetes If you deploy an OAP server cluster on Kubernetes, the oap-server instance (pod) would not have a static IP or hostname. We can leverage OpenTelemetry Collector to discover the oap-server instance, and scrape \u0026amp; transfer the metrics to OAP OpenTelemetry receiver.\nOn how to install SkyWalking on k8s, you can refer to Apache SkyWalking Kubernetes.\nSet this up following these steps:\n Set up oap-server.    Set the metrics port.\nprometheus-port: 1234   Set environment variables.\nSW_TELEMETRY=prometheus SW_OTEL_RECEIVER=default SW_OTEL_RECEIVER_ENABLED_OTEL_RULES=oap Here is an example to install by Apache SkyWalking Kubernetes:\nhelm -n istio-system install skywalking skywalking \\ --set elasticsearch.replicas=1 \\ --set elasticsearch.minimumMasterNodes=1 \\ --set elasticsearch.imageTag=7.5.1 \\ --set oap.replicas=2 \\ --set ui.image.repository=$HUB/skywalking-ui \\ --set ui.image.tag=$TAG \\ --set oap.image.tag=$TAG \\ --set oap.image.repository=$HUB/skywalking-oap \\ --set oap.storageType=elasticsearch \\ --set oap.ports.prometheus-port=1234 \\ # \u0026lt;\u0026lt;\u0026lt; Expose self observability metrics port --set oap.env.SW_TELEMETRY=prometheus \\ --set oap.env.SW_OTEL_RECEIVER=default \\ # \u0026lt;\u0026lt;\u0026lt; Enable Otel receiver --set oap.env.SW_OTEL_RECEIVER_ENABLED_OTEL_RULES=oap # \u0026lt;\u0026lt;\u0026lt; Add oap analyzer for Otel metrics   Set up OpenTelemetry Collector and config a scrape job:  - job_name:\u0026#39;skywalking-so11y\u0026#39;# make sure to use this in the so11y.yaml to filter only so11y metricsmetrics_path:\u0026#39;/metrics\u0026#39;kubernetes_sd_configs:- role:podrelabel_configs:- source_labels:[__meta_kubernetes_pod_container_name, __meta_kubernetes_pod_container_port_name]action:keepregex:oap;prometheus-port- source_labels:[]target_label:servicereplacement:oap-server- source_labels:[__meta_kubernetes_pod_name]target_label:host_nameregex:(.+)replacement:$$1For the full example for OpenTelemetry Collector configuration and recommended version, you can refer to showcase.\n Users also could leverage the Prometheus endpoint for their own Prometheus and Grafana.\nNOTE: Since Apr 21, 2021, the Grafana project has been relicensed to AGPL-v3, and is no longer licensed for Apache 2.0. Check the LICENSE details. The following Prometheus + Grafana solution is optional rather than recommended.\nPrometheus Prometheus is supported as a telemetry implementor, which collects metrics from SkyWalking\u0026rsquo;s backend.\nSet prometheus to provider. The endpoint opens at http://0.0.0.0:1234/ and http://0.0.0.0:1234/metrics.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:Set host and port if needed.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543Set relevant SSL settings to expose a secure endpoint. Note that the private key file and cert chain file could be uploaded once changes are applied to them.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543sslEnabled:truesslKeyPath:/etc/ssl/key.pemsslCertChainPath:/etc/ssl/cert-chain.pemGrafana Visualization Provide the Grafana dashboard settings. Check SkyWalking OAP Cluster Monitor Dashboard config and SkyWalking OAP Instance Monitor Dashboard config.\n","excerpt":"Telemetry for backend The OAP backend cluster itself is a distributed streaming process system. To …","ref":"/docs/main/v9.4.0/en/setup/backend/backend-telemetry/","title":"Telemetry for backend"},{"body":"Telemetry for backend The OAP backend cluster itself is a distributed streaming process system. To assist the Ops team, we provide the telemetry for the OAP backend itself, also known as self-observability (so11y)\nBy default, the telemetry is disabled by setting selector to none, like this:\ntelemetry:selector:${SW_TELEMETRY:none}none:prometheus:host:${SW_TELEMETRY_PROMETHEUS_HOST:0.0.0.0}port:${SW_TELEMETRY_PROMETHEUS_PORT:1234}sslEnabled:${SW_TELEMETRY_PROMETHEUS_SSL_ENABLED:false}sslKeyPath:${SW_TELEMETRY_PROMETHEUS_SSL_KEY_PATH:\u0026#34;\u0026#34;}sslCertChainPath:${SW_TELEMETRY_PROMETHEUS_SSL_CERT_CHAIN_PATH:\u0026#34;\u0026#34;}You may also set Prometheus to enable them. For more information, refer to the details below.\nSelf Observability SkyWalking supports exposing telemetry data representing OAP running status through Prometheus endpoint. Users could set up OpenTelemetry collector to scrap and forward telemetry data to OAP server for further analysis, eventually showing up UI or GraphQL API.\nStatic IP or hostname Add the following configuration to enable self-observability-related modules.\n Set up prometheus telemetry.  telemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543Set up OpenTelemetry to scrape the metrics from OAP telemetry.  Refer to the E2E test case as an example.\nFor Kubernetes deployments, read the following section, otherwise you should be able to adjust the configurations below to fit your scenarios.\nService discovery on Kubernetes If you deploy an OAP server cluster on Kubernetes, the oap-server instance (pod) would not have a static IP or hostname. We can leverage OpenTelemetry Collector to discover the oap-server instance, and scrape \u0026amp; transfer the metrics to OAP OpenTelemetry receiver.\nOn how to install SkyWalking on k8s, you can refer to Apache SkyWalking Kubernetes.\nSet this up following these steps:\n Set up oap-server.    Set the metrics port.\nprometheus-port: 1234   Set environment variables.\nSW_TELEMETRY=prometheus SW_OTEL_RECEIVER=default SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES=oap Here is an example to install by Apache SkyWalking Kubernetes:\nhelm -n istio-system install skywalking skywalking \\ --set elasticsearch.replicas=1 \\ --set elasticsearch.minimumMasterNodes=1 \\ --set elasticsearch.imageTag=7.5.1 \\ --set oap.replicas=2 \\ --set ui.image.repository=$HUB/skywalking-ui \\ --set ui.image.tag=$TAG \\ --set oap.image.tag=$TAG \\ --set oap.image.repository=$HUB/skywalking-oap \\ --set oap.storageType=elasticsearch \\ --set oap.ports.prometheus-port=1234 \\ # \u0026lt;\u0026lt;\u0026lt; Expose self observability metrics port --set oap.env.SW_TELEMETRY=prometheus \\ --set oap.env.SW_OTEL_RECEIVER=default \\ # \u0026lt;\u0026lt;\u0026lt; Enable Otel receiver --set oap.env.SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES=oap # \u0026lt;\u0026lt;\u0026lt; Add oap analyzer for Otel metrics   Set up OpenTelemetry Collector and config a scrape job:  - job_name:\u0026#39;skywalking-so11y\u0026#39;# make sure to use this in the so11y.yaml to filter only so11y metricsmetrics_path:\u0026#39;/metrics\u0026#39;kubernetes_sd_configs:- role:podrelabel_configs:- source_labels:[__meta_kubernetes_pod_container_name, __meta_kubernetes_pod_container_port_name]action:keepregex:oap;prometheus-port- source_labels:[]target_label:servicereplacement:oap-server- source_labels:[__meta_kubernetes_pod_name]target_label:host_nameregex:(.+)replacement:$$1For the full example for OpenTelemetry Collector configuration and recommended version, you can refer to showcase.\n Users also could leverage the Prometheus endpoint for their own Prometheus and Grafana.\nNOTE: Since Apr 21, 2021, the Grafana project has been relicensed to AGPL-v3, and is no longer licensed for Apache 2.0. Check the LICENSE details. The following Prometheus + Grafana solution is optional rather than recommended.\nPrometheus Prometheus is supported as a telemetry implementor, which collects metrics from SkyWalking\u0026rsquo;s backend.\nSet prometheus to provider. The endpoint opens at http://0.0.0.0:1234/ and http://0.0.0.0:1234/metrics.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:Set host and port if needed.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543Set relevant SSL settings to expose a secure endpoint. Note that the private key file and cert chain file could be uploaded once changes are applied to them.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543sslEnabled:truesslKeyPath:/etc/ssl/key.pemsslCertChainPath:/etc/ssl/cert-chain.pemGrafana Visualization Provide the Grafana dashboard settings. Check SkyWalking OAP Cluster Monitor Dashboard config and SkyWalking OAP Instance Monitor Dashboard config.\n","excerpt":"Telemetry for backend The OAP backend cluster itself is a distributed streaming process system. To …","ref":"/docs/main/v9.5.0/en/setup/backend/backend-telemetry/","title":"Telemetry for backend"},{"body":"Telemetry for backend The OAP backend cluster itself is a distributed streaming process system. To assist the Ops team, we provide the telemetry for the OAP backend itself, also known as self-observability (so11y)\nBy default, the telemetry is disabled by setting selector to none, like this:\ntelemetry:selector:${SW_TELEMETRY:none}none:prometheus:host:${SW_TELEMETRY_PROMETHEUS_HOST:0.0.0.0}port:${SW_TELEMETRY_PROMETHEUS_PORT:1234}sslEnabled:${SW_TELEMETRY_PROMETHEUS_SSL_ENABLED:false}sslKeyPath:${SW_TELEMETRY_PROMETHEUS_SSL_KEY_PATH:\u0026#34;\u0026#34;}sslCertChainPath:${SW_TELEMETRY_PROMETHEUS_SSL_CERT_CHAIN_PATH:\u0026#34;\u0026#34;}You may also set Prometheus to enable them. For more information, refer to the details below.\nSelf Observability SkyWalking supports exposing telemetry data representing OAP running status through Prometheus endpoint. Users could set up OpenTelemetry collector to scrap and forward telemetry data to OAP server for further analysis, eventually showing up UI or GraphQL API.\nStatic IP or hostname Add the following configuration to enable self-observability-related modules.\n Set up prometheus telemetry.  telemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543Set up OpenTelemetry to scrape the metrics from OAP telemetry.  Refer to the E2E test case as an example.\nFor Kubernetes deployments, read the following section, otherwise you should be able to adjust the configurations below to fit your scenarios.\nService discovery on Kubernetes If you deploy an OAP server cluster on Kubernetes, the oap-server instance (pod) would not have a static IP or hostname. We can leverage OpenTelemetry Collector to discover the oap-server instance, and scrape \u0026amp; transfer the metrics to OAP OpenTelemetry receiver.\nOn how to install SkyWalking on k8s, you can refer to Apache SkyWalking Kubernetes.\nSet this up following these steps:\n Set up oap-server.    Set the metrics port.\nprometheus-port: 1234   Set environment variables.\nSW_TELEMETRY=prometheus SW_OTEL_RECEIVER=default SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES=oap Here is an example to install by Apache SkyWalking Kubernetes:\nhelm -n istio-system install skywalking skywalking \\ --set elasticsearch.replicas=1 \\ --set elasticsearch.minimumMasterNodes=1 \\ --set elasticsearch.imageTag=7.5.1 \\ --set oap.replicas=2 \\ --set ui.image.repository=$HUB/skywalking-ui \\ --set ui.image.tag=$TAG \\ --set oap.image.tag=$TAG \\ --set oap.image.repository=$HUB/skywalking-oap \\ --set oap.storageType=elasticsearch \\ --set oap.ports.prometheus-port=1234 \\ # \u0026lt;\u0026lt;\u0026lt; Expose self observability metrics port --set oap.env.SW_TELEMETRY=prometheus \\ --set oap.env.SW_OTEL_RECEIVER=default \\ # \u0026lt;\u0026lt;\u0026lt; Enable Otel receiver --set oap.env.SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES=oap # \u0026lt;\u0026lt;\u0026lt; Add oap analyzer for Otel metrics   Set up OpenTelemetry Collector and config a scrape job:  - job_name:\u0026#39;skywalking-so11y\u0026#39;# make sure to use this in the so11y.yaml to filter only so11y metricsmetrics_path:\u0026#39;/metrics\u0026#39;kubernetes_sd_configs:- role:podrelabel_configs:- source_labels:[__meta_kubernetes_pod_container_name, __meta_kubernetes_pod_container_port_name]action:keepregex:oap;prometheus-port- source_labels:[]target_label:servicereplacement:oap-server- source_labels:[__meta_kubernetes_pod_name]target_label:host_nameregex:(.+)replacement:$$1For the full example for OpenTelemetry Collector configuration and recommended version, you can refer to showcase.\n Users also could leverage the Prometheus endpoint for their own Prometheus and Grafana.\nNOTE: Since Apr 21, 2021, the Grafana project has been relicensed to AGPL-v3, and is no longer licensed for Apache 2.0. Check the LICENSE details. The following Prometheus + Grafana solution is optional rather than recommended.\nPrometheus Prometheus is supported as a telemetry implementor, which collects metrics from SkyWalking\u0026rsquo;s backend.\nSet prometheus to provider. The endpoint opens at http://0.0.0.0:1234/ and http://0.0.0.0:1234/metrics.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:Set host and port if needed.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543Set relevant SSL settings to expose a secure endpoint. Note that the private key file and cert chain file could be uploaded once changes are applied to them.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543sslEnabled:truesslKeyPath:/etc/ssl/key.pemsslCertChainPath:/etc/ssl/cert-chain.pemGrafana Visualization Provide the Grafana dashboard settings. Check SkyWalking OAP Cluster Monitor Dashboard config and SkyWalking OAP Instance Monitor Dashboard config.\n","excerpt":"Telemetry for backend The OAP backend cluster itself is a distributed streaming process system. To …","ref":"/docs/main/v9.6.0/en/setup/backend/backend-telemetry/","title":"Telemetry for backend"},{"body":"Telemetry for backend The OAP backend cluster itself is a distributed streaming process system. To assist the Ops team, we provide the telemetry for the OAP backend itself, also known as self-observability (so11y)\nBy default, the telemetry is disabled by setting selector to none, like this:\ntelemetry:selector:${SW_TELEMETRY:none}none:prometheus:host:${SW_TELEMETRY_PROMETHEUS_HOST:0.0.0.0}port:${SW_TELEMETRY_PROMETHEUS_PORT:1234}sslEnabled:${SW_TELEMETRY_PROMETHEUS_SSL_ENABLED:false}sslKeyPath:${SW_TELEMETRY_PROMETHEUS_SSL_KEY_PATH:\u0026#34;\u0026#34;}sslCertChainPath:${SW_TELEMETRY_PROMETHEUS_SSL_CERT_CHAIN_PATH:\u0026#34;\u0026#34;}You may also set Prometheus to enable them. For more information, refer to the details below.\nSelf Observability SkyWalking supports exposing telemetry data representing OAP running status through Prometheus endpoint. Users could set up OpenTelemetry collector to scrap and forward telemetry data to OAP server for further analysis, eventually showing up UI or GraphQL API.\nStatic IP or hostname Add the following configuration to enable self-observability-related modules.\n Set up prometheus telemetry.  telemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543Set up OpenTelemetry to scrape the metrics from OAP telemetry.  Refer to the E2E test case as an example.\nFor Kubernetes deployments, read the following section, otherwise you should be able to adjust the configurations below to fit your scenarios.\nService discovery on Kubernetes If you deploy an OAP server cluster on Kubernetes, the oap-server instance (pod) would not have a static IP or hostname. We can leverage OpenTelemetry Collector to discover the oap-server instance, and scrape \u0026amp; transfer the metrics to OAP OpenTelemetry receiver.\nOn how to install SkyWalking on k8s, you can refer to Apache SkyWalking Kubernetes.\nSet this up following these steps:\n Set up oap-server.    Set the metrics port.\nprometheus-port: 1234   Set environment variables.\nSW_TELEMETRY=prometheus SW_OTEL_RECEIVER=default SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES=oap Here is an example to install by Apache SkyWalking Kubernetes:\nhelm -n istio-system install skywalking skywalking \\ --set elasticsearch.replicas=1 \\ --set elasticsearch.minimumMasterNodes=1 \\ --set elasticsearch.imageTag=7.5.1 \\ --set oap.replicas=2 \\ --set ui.image.repository=$HUB/skywalking-ui \\ --set ui.image.tag=$TAG \\ --set oap.image.tag=$TAG \\ --set oap.image.repository=$HUB/skywalking-oap \\ --set oap.storageType=elasticsearch \\ --set oap.ports.prometheus-port=1234 \\ # \u0026lt;\u0026lt;\u0026lt; Expose self observability metrics port --set oap.env.SW_TELEMETRY=prometheus \\ --set oap.env.SW_OTEL_RECEIVER=default \\ # \u0026lt;\u0026lt;\u0026lt; Enable Otel receiver --set oap.env.SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES=oap # \u0026lt;\u0026lt;\u0026lt; Add oap analyzer for Otel metrics   Set up OpenTelemetry Collector and config a scrape job:  - job_name:\u0026#39;skywalking-so11y\u0026#39;# make sure to use this in the so11y.yaml to filter only so11y metricsmetrics_path:\u0026#39;/metrics\u0026#39;kubernetes_sd_configs:- role:podrelabel_configs:- source_labels:[__meta_kubernetes_pod_container_name, __meta_kubernetes_pod_container_port_name]action:keepregex:oap;prometheus-port- source_labels:[]target_label:servicereplacement:oap-server- source_labels:[__meta_kubernetes_pod_name]target_label:host_nameregex:(.+)replacement:$$1For the full example for OpenTelemetry Collector configuration and recommended version, you can refer to showcase.\n Users also could leverage the Prometheus endpoint for their own Prometheus and Grafana.\nNOTE: Since Apr 21, 2021, the Grafana project has been relicensed to AGPL-v3, and is no longer licensed for Apache 2.0. Check the LICENSE details. The following Prometheus + Grafana solution is optional rather than recommended.\nPrometheus Prometheus is supported as a telemetry implementor, which collects metrics from SkyWalking\u0026rsquo;s backend.\nSet prometheus to provider. The endpoint opens at http://0.0.0.0:1234/ and http://0.0.0.0:1234/metrics.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:Set host and port if needed.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543Set relevant SSL settings to expose a secure endpoint. Note that the private key file and cert chain file could be uploaded once changes are applied to them.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543sslEnabled:truesslKeyPath:/etc/ssl/key.pemsslCertChainPath:/etc/ssl/cert-chain.pemGrafana Visualization Provide the Grafana dashboard settings. Check SkyWalking OAP Cluster Monitor Dashboard config and SkyWalking OAP Instance Monitor Dashboard config.\n","excerpt":"Telemetry for backend The OAP backend cluster itself is a distributed streaming process system. To …","ref":"/docs/main/v9.7.0/en/setup/backend/backend-telemetry/","title":"Telemetry for backend"},{"body":"The Logic Endpoint In default, all the RPC server-side names as entry spans, such as RESTFul API path and gRPC service name, would be endpoints with metrics. At the same time, SkyWalking introduces the logic endpoint concept, which allows plugins and users to add new endpoints without adding new spans. The following logic endpoints are added automatically by plugins.\n GraphQL Query and Mutation are logic endpoints by using the names of them. Spring\u0026rsquo;s ScheduledMethodRunnable jobs are logic endpoints. The name format is SpringScheduled/${className}/${methodName}. Apache ShardingSphere ElasticJob\u0026rsquo;s jobs are logic endpoints. The name format is ElasticJob/${jobName}. XXLJob\u0026rsquo;s jobs are logic endpoints. The name formats include xxl-job/MethodJob/${className}.${methodName}, xxl-job/ScriptJob/${GlueType}/id/${jobId}, and xxl-job/SimpleJob/${className}. Quartz(optional plugin)\u0026rsquo;s jobs are logic endpoints. the name format is quartz-scheduler/${className}.  User could use the SkyWalking\u0026rsquo;s application toolkits to add the tag into the local span to label the span as a logic endpoint in the analysis stage. The tag is, key=x-le and value = {\u0026quot;logic-span\u0026quot;:true}.\n","excerpt":"The Logic Endpoint In default, all the RPC server-side names as entry spans, such as RESTFul API …","ref":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/logic-endpoint/","title":"The Logic Endpoint"},{"body":"The Logic Endpoint In default, all the RPC server-side names as entry spans, such as RESTFul API path and gRPC service name, would be endpoints with metrics. At the same time, SkyWalking introduces the logic endpoint concept, which allows plugins and users to add new endpoints without adding new spans. The following logic endpoints are added automatically by plugins.\n GraphQL Query and Mutation are logic endpoints by using the names of them. Spring\u0026rsquo;s ScheduledMethodRunnable jobs are logic endpoints. The name format is SpringScheduled/${className}/${methodName}. Apache ShardingSphere ElasticJob\u0026rsquo;s jobs are logic endpoints. The name format is ElasticJob/${jobName}. XXLJob\u0026rsquo;s jobs are logic endpoints. The name formats include xxl-job/MethodJob/${className}.${methodName}, xxl-job/ScriptJob/${GlueType}/id/${jobId}, and xxl-job/SimpleJob/${className}. Quartz(optional plugin)\u0026rsquo;s jobs are logic endpoints. the name format is quartz-scheduler/${className}.  User could use the SkyWalking\u0026rsquo;s application toolkits to add the tag into the local span to label the span as a logic endpoint in the analysis stage. The tag is, key=x-le and value = {\u0026quot;logic-span\u0026quot;:true}.\n","excerpt":"The Logic Endpoint In default, all the RPC server-side names as entry spans, such as RESTFul API …","ref":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/logic-endpoint/","title":"The Logic Endpoint"},{"body":"The Logic Endpoint In default, all the RPC server-side names as entry spans, such as RESTFul API path and gRPC service name, would be endpoints with metrics. At the same time, SkyWalking introduces the logic endpoint concept, which allows plugins and users to add new endpoints without adding new spans. The following logic endpoints are added automatically by plugins.\n GraphQL Query and Mutation are logic endpoints by using the names of them. Spring\u0026rsquo;s ScheduledMethodRunnable jobs are logic endpoints. The name format is SpringScheduled/${className}/${methodName}. Apache ShardingSphere ElasticJob\u0026rsquo;s jobs are logic endpoints. The name format is ElasticJob/${jobName}. XXLJob\u0026rsquo;s jobs are logic endpoints. The name formats include xxl-job/MethodJob/${className}.${methodName}, xxl-job/ScriptJob/${GlueType}/id/${jobId}, and xxl-job/SimpleJob/${className}. Quartz(optional plugin)\u0026rsquo;s jobs are logic endpoints. the name format is quartz-scheduler/${className}.  User could use the SkyWalking\u0026rsquo;s application toolkits to add the tag into the local span to label the span as a logic endpoint in the analysis stage. The tag is, key=x-le and value = {\u0026quot;logic-span\u0026quot;:true}.\n","excerpt":"The Logic Endpoint In default, all the RPC server-side names as entry spans, such as RESTFul API …","ref":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/logic-endpoint/","title":"The Logic Endpoint"},{"body":"The Logic Endpoint In default, all the RPC server-side names as entry spans, such as RESTFul API path and gRPC service name, would be endpoints with metrics. At the same time, SkyWalking introduces the logic endpoint concept, which allows plugins and users to add new endpoints without adding new spans. The following logic endpoints are added automatically by plugins.\n GraphQL Query and Mutation are logic endpoints by using the names of them. Spring\u0026rsquo;s ScheduledMethodRunnable jobs are logic endpoints. The name format is SpringScheduled/${className}/${methodName}. Apache ShardingSphere ElasticJob\u0026rsquo;s jobs are logic endpoints. The name format is ElasticJob/${jobName}. XXLJob\u0026rsquo;s jobs are logic endpoints. The name formats include xxl-job/MethodJob/${className}.${methodName}, xxl-job/ScriptJob/${GlueType}/id/${jobId}, and xxl-job/SimpleJob/${className}. Quartz(optional plugin)\u0026rsquo;s jobs are logic endpoints. the name format is quartz-scheduler/${className}.  User could use the SkyWalking\u0026rsquo;s application toolkits to add the tag into the local span to label the span as a logic endpoint in the analysis stage. The tag is, key=x-le and value = {\u0026quot;logic-span\u0026quot;:true}.\n","excerpt":"The Logic Endpoint In default, all the RPC server-side names as entry spans, such as RESTFul API …","ref":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/logic-endpoint/","title":"The Logic Endpoint"},{"body":"The Logic Endpoint In default, all the RPC server-side names as entry spans, such as RESTFul API path and gRPC service name, would be endpoints with metrics. At the same time, SkyWalking introduces the logic endpoint concept, which allows plugins and users to add new endpoints without adding new spans. The following logic endpoints are added automatically by plugins.\n GraphQL Query and Mutation are logic endpoints by using the names of them. Spring\u0026rsquo;s ScheduledMethodRunnable jobs are logic endpoints. The name format is SpringScheduled/${className}/${methodName}. Apache ShardingSphere ElasticJob\u0026rsquo;s jobs are logic endpoints. The name format is ElasticJob/${jobName}. XXLJob\u0026rsquo;s jobs are logic endpoints. The name formats include xxl-job/MethodJob/${className}.${methodName}, xxl-job/ScriptJob/${GlueType}/id/${jobId}, and xxl-job/SimpleJob/${className}. Quartz(optional plugin)\u0026rsquo;s jobs are logic endpoints. the name format is quartz-scheduler/${className}.  User could use the SkyWalking\u0026rsquo;s application toolkits to add the tag into the local span to label the span as a logic endpoint in the analysis stage. The tag is, key=x-le and value = {\u0026quot;logic-span\u0026quot;:true}.\n","excerpt":"The Logic Endpoint In default, all the RPC server-side names as entry spans, such as RESTFul API …","ref":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/logic-endpoint/","title":"The Logic Endpoint"},{"body":"Dependency the toolkit, such as using maven or gradle\nAdd Trace Toolkit apm-toolkit-trace provides the APIs to enhance the trace context, such as createLocalSpan, createExitSpan, createEntrySpan, log, tag, prepareForAsync and asyncFinish. Add the toolkit dependency to your project.\n\u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-trace\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; ","excerpt":"Dependency the toolkit, such as using maven or gradle\nAdd Trace Toolkit apm-toolkit-trace provides …","ref":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/application-toolkit-dependency/","title":"the toolkit, such as using maven or gradle"},{"body":"Dependency the toolkit, such as using maven or gradle\nAdd Trace Toolkit apm-toolkit-trace provides the APIs to enhance the trace context, such as createLocalSpan, createExitSpan, createEntrySpan, log, tag, prepareForAsync and asyncFinish. Add the toolkit dependency to your project.\n\u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-trace\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; ","excerpt":"Dependency the toolkit, such as using maven or gradle\nAdd Trace Toolkit apm-toolkit-trace provides …","ref":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-dependency/","title":"the toolkit, such as using maven or gradle"},{"body":"Dependency the toolkit, such as using maven or gradle\nAdd Trace Toolkit apm-toolkit-trace provides the APIs to enhance the trace context, such as createLocalSpan, createExitSpan, createEntrySpan, log, tag, prepareForAsync and asyncFinish. Add the toolkit dependency to your project.\n\u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-trace\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; ","excerpt":"Dependency the toolkit, such as using maven or gradle\nAdd Trace Toolkit apm-toolkit-trace provides …","ref":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/application-toolkit-dependency/","title":"the toolkit, such as using maven or gradle"},{"body":"Dependency the toolkit, such as using maven or gradle\nAdd Trace Toolkit apm-toolkit-trace provides the APIs to enhance the trace context, such as createLocalSpan, createExitSpan, createEntrySpan, log, tag, prepareForAsync and asyncFinish. Add the toolkit dependency to your project.\n\u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-trace\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; ","excerpt":"Dependency the toolkit, such as using maven or gradle\nAdd Trace Toolkit apm-toolkit-trace provides …","ref":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/application-toolkit-dependency/","title":"the toolkit, such as using maven or gradle"},{"body":"Dependency the toolkit, such as using maven or gradle\nAdd Trace Toolkit apm-toolkit-trace provides the APIs to enhance the trace context, such as createLocalSpan, createExitSpan, createEntrySpan, log, tag, prepareForAsync and asyncFinish. Add the toolkit dependency to your project.\n\u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-trace\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; ","excerpt":"Dependency the toolkit, such as using maven or gradle\nAdd Trace Toolkit apm-toolkit-trace provides …","ref":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/application-toolkit-dependency/","title":"the toolkit, such as using maven or gradle"},{"body":"Thread dump merging mechanism The performance profile is an enhancement feature in the APM system. We are using the thread dump to estimate the method execution time, rather than adding multiple local spans. In this way, the resource cost would be much less than using distributed tracing to locate slow method. This feature is suitable in the production environment. This document introduces how thread dumps are merged into the final report as a stack tree(s).\nThread analyst Read data and transform Read the data from the database and convert it to a data structure in gRPC.\nst=\u0026gt;start: Start e=\u0026gt;end: End op1=\u0026gt;operation: Load data using paging op2=\u0026gt;operation: Transform data using parallel st(right)-\u0026gt;op1(right)-\u0026gt;op2 op2(right)-\u0026gt;e Copy the code and paste it into this link to generate flow chart.\n Use the stream to read data by page (50 records per page). Convert the data into gRPC data structures in the form of parallel streams. Merge into a list of data.  Data analysis Use the group-by and collector modes in the Java parallel stream to group according to the first stack element in the database records, and use the collector to perform data aggregation. Generate a multi-root tree.\nst=\u0026gt;start: Start e=\u0026gt;end: End op1=\u0026gt;operation: Group by first stack element sup=\u0026gt;operation: Generate empty stack tree acc=\u0026gt;operation: Accumulator data to stack tree com=\u0026gt;operation: Combine stack trees fin=\u0026gt;operation: Calculate durations and build result st(right)-\u0026gt;op1-\u0026gt;sup(right)-\u0026gt;acc acc(right)-\u0026gt;com(right)-\u0026gt;fin-\u0026gt;e Copy the code and paste it into this link to generate a flow chart.\n Group by first stack element: Use the first level element in each stack to group, ensuring that the stacks have the same root node. Generate empty stack tree: Generate multiple top-level empty trees to prepare for the following steps. The reason for generating multiple top-level trees is that original data can be added in parallel without generating locks. Accumulator data to stack tree: Add every thread dump into the generated trees.  Iterate through each element in the thread dump to find if there is any child element with the same code signature and same stack depth in the parent element. If not, add this element. Keep the dump sequences and timestamps in each nodes from the source.   Combine stack trees: Combine all trees structures into one by using the same rules as the Accumulator.  Use LDR to traverse the tree node. Use the Stack data structure to avoid recursive calls. Each stack element represents the node that needs to be merged. The task of merging two nodes is to merge the list of children nodes. If they have the same code signature and same parents, save the dump sequences and timestamps in this node. Otherwise, the node needs to be added into the target node as a new child.   Calculate durations and build result: Calculate relevant statistics and generate response.  Use the same traversal node logic as in the Combine stack trees step. Convert to a GraphQL data structure, and put all nodes into a list for subsequent duration calculations. Calculate each node\u0026rsquo;s duration in parallel. For each node, sort the sequences. If there are two continuous sequences, the duration should add the duration of these two seq\u0026rsquo;s timestamp. Calculate each node execution in parallel. For each node, the duration of the current node should deduct the time consumed by all children.    Profile data debugging Please follow the exporter tool to package profile data. Unzip the profile data and use analyzer main function to run it.\n","excerpt":"Thread dump merging mechanism The performance profile is an enhancement feature in the APM system. …","ref":"/docs/main/latest/en/setup/backend/backend-profile-thread-merging/","title":"Thread dump merging mechanism"},{"body":"Thread dump merging mechanism The performance profile is an enhancement feature in the APM system. We are using the thread dump to estimate the method execution time, rather than adding multiple local spans. In this way, the resource cost would be much less than using distributed tracing to locate slow method. This feature is suitable in the production environment. This document introduces how thread dumps are merged into the final report as a stack tree(s).\nThread analyst Read data and transform Read the data from the database and convert it to a data structure in gRPC.\nst=\u0026gt;start: Start e=\u0026gt;end: End op1=\u0026gt;operation: Load data using paging op2=\u0026gt;operation: Transform data using parallel st(right)-\u0026gt;op1(right)-\u0026gt;op2 op2(right)-\u0026gt;e Copy the code and paste it into this link to generate flow chart.\n Use the stream to read data by page (50 records per page). Convert the data into gRPC data structures in the form of parallel streams. Merge into a list of data.  Data analysis Use the group-by and collector modes in the Java parallel stream to group according to the first stack element in the database records, and use the collector to perform data aggregation. Generate a multi-root tree.\nst=\u0026gt;start: Start e=\u0026gt;end: End op1=\u0026gt;operation: Group by first stack element sup=\u0026gt;operation: Generate empty stack tree acc=\u0026gt;operation: Accumulator data to stack tree com=\u0026gt;operation: Combine stack trees fin=\u0026gt;operation: Calculate durations and build result st(right)-\u0026gt;op1-\u0026gt;sup(right)-\u0026gt;acc acc(right)-\u0026gt;com(right)-\u0026gt;fin-\u0026gt;e Copy the code and paste it into this link to generate a flow chart.\n Group by first stack element: Use the first level element in each stack to group, ensuring that the stacks have the same root node. Generate empty stack tree: Generate multiple top-level empty trees to prepare for the following steps. The reason for generating multiple top-level trees is that original data can be added in parallel without generating locks. Accumulator data to stack tree: Add every thread dump into the generated trees.  Iterate through each element in the thread dump to find if there is any child element with the same code signature and same stack depth in the parent element. If not, add this element. Keep the dump sequences and timestamps in each nodes from the source.   Combine stack trees: Combine all trees structures into one by using the same rules as the Accumulator.  Use LDR to traverse the tree node. Use the Stack data structure to avoid recursive calls. Each stack element represents the node that needs to be merged. The task of merging two nodes is to merge the list of children nodes. If they have the same code signature and same parents, save the dump sequences and timestamps in this node. Otherwise, the node needs to be added into the target node as a new child.   Calculate durations and build result: Calculate relevant statistics and generate response.  Use the same traversal node logic as in the Combine stack trees step. Convert to a GraphQL data structure, and put all nodes into a list for subsequent duration calculations. Calculate each node\u0026rsquo;s duration in parallel. For each node, sort the sequences. If there are two continuous sequences, the duration should add the duration of these two seq\u0026rsquo;s timestamp. Calculate each node execution in parallel. For each node, the duration of the current node should deduct the time consumed by all children.    Profile data debugging Please follow the exporter tool to package profile data. Unzip the profile data and use analyzer main function to run it.\n","excerpt":"Thread dump merging mechanism The performance profile is an enhancement feature in the APM system. …","ref":"/docs/main/next/en/setup/backend/backend-profile-thread-merging/","title":"Thread dump merging mechanism"},{"body":"Thread dump merging mechanism The performance profile is an enhancement feature in the APM system. We are using the thread dump to estimate the method execution time, rather than adding multiple local spans. In this way, the resource cost would be much less than using distributed tracing to locate slow method. This feature is suitable in the production environment. This document introduces how thread dumps are merged into the final report as a stack tree(s).\nThread analyst Read data and transform Read the data from the database and convert it to a data structure in gRPC.\nst=\u0026gt;start: Start e=\u0026gt;end: End op1=\u0026gt;operation: Load data using paging op2=\u0026gt;operation: Transform data using parallel st(right)-\u0026gt;op1(right)-\u0026gt;op2 op2(right)-\u0026gt;e Copy the code and paste it into this link to generate flow chart.\n Use the stream to read data by page (50 records per page). Convert the data into gRPC data structures in the form of parallel streams. Merge into a list of data.  Data analysis Use the group-by and collector modes in the Java parallel stream to group according to the first stack element in the database records, and use the collector to perform data aggregation. Generate a multi-root tree.\nst=\u0026gt;start: Start e=\u0026gt;end: End op1=\u0026gt;operation: Group by first stack element sup=\u0026gt;operation: Generate empty stack tree acc=\u0026gt;operation: Accumulator data to stack tree com=\u0026gt;operation: Combine stack trees fin=\u0026gt;operation: Calculate durations and build result st(right)-\u0026gt;op1-\u0026gt;sup(right)-\u0026gt;acc acc(right)-\u0026gt;com(right)-\u0026gt;fin-\u0026gt;e Copy the code and paste it into this link to generate a flow chart.\n Group by first stack element: Use the first level element in each stack to group, ensuring that the stacks have the same root node. Generate empty stack tree: Generate multiple top-level empty trees to prepare for the following steps. The reason for generating multiple top-level trees is that original data can be added in parallel without generating locks. Accumulator data to stack tree: Add every thread dump into the generated trees.  Iterate through each element in the thread dump to find if there is any child element with the same code signature and same stack depth in the parent element. If not, add this element. Keep the dump sequences and timestamps in each nodes from the source.   Combine stack trees: Combine all trees structures into one by using the same rules as the Accumulator.  Use LDR to traverse the tree node. Use the Stack data structure to avoid recursive calls. Each stack element represents the node that needs to be merged. The task of merging two nodes is to merge the list of children nodes. If they have the same code signature and same parents, save the dump sequences and timestamps in this node. Otherwise, the node needs to be added into the target node as a new child.   Calculate durations and build result: Calculate relevant statistics and generate response.  Use the same traversal node logic as in the Combine stack trees step. Convert to a GraphQL data structure, and put all nodes into a list for subsequent duration calculations. Calculate each node\u0026rsquo;s duration in parallel. For each node, sort the sequences. If there are two continuous sequences, the duration should add the duration of these two seq\u0026rsquo;s timestamp. Calculate each node execution in parallel. For each node, the duration of the current node should deduct the time consumed by all children.    Profile data debugging Please follow the exporter tool to package profile data. Unzip the profile data and use analyzer main function to run it.\n","excerpt":"Thread dump merging mechanism The performance profile is an enhancement feature in the APM system. …","ref":"/docs/main/v9.0.0/en/guides/backend-profile/","title":"Thread dump merging mechanism"},{"body":"Thread dump merging mechanism The performance profile is an enhancement feature in the APM system. We are using the thread dump to estimate the method execution time, rather than adding multiple local spans. In this way, the resource cost would be much less than using distributed tracing to locate slow method. This feature is suitable in the production environment. This document introduces how thread dumps are merged into the final report as a stack tree(s).\nThread analyst Read data and transform Read the data from the database and convert it to a data structure in gRPC.\nst=\u0026gt;start: Start e=\u0026gt;end: End op1=\u0026gt;operation: Load data using paging op2=\u0026gt;operation: Transform data using parallel st(right)-\u0026gt;op1(right)-\u0026gt;op2 op2(right)-\u0026gt;e Copy the code and paste it into this link to generate flow chart.\n Use the stream to read data by page (50 records per page). Convert the data into gRPC data structures in the form of parallel streams. Merge into a list of data.  Data analysis Use the group-by and collector modes in the Java parallel stream to group according to the first stack element in the database records, and use the collector to perform data aggregation. Generate a multi-root tree.\nst=\u0026gt;start: Start e=\u0026gt;end: End op1=\u0026gt;operation: Group by first stack element sup=\u0026gt;operation: Generate empty stack tree acc=\u0026gt;operation: Accumulator data to stack tree com=\u0026gt;operation: Combine stack trees fin=\u0026gt;operation: Calculate durations and build result st(right)-\u0026gt;op1-\u0026gt;sup(right)-\u0026gt;acc acc(right)-\u0026gt;com(right)-\u0026gt;fin-\u0026gt;e Copy the code and paste it into this link to generate a flow chart.\n Group by first stack element: Use the first level element in each stack to group, ensuring that the stacks have the same root node. Generate empty stack tree: Generate multiple top-level empty trees to prepare for the following steps. The reason for generating multiple top-level trees is that original data can be added in parallel without generating locks. Accumulator data to stack tree: Add every thread dump into the generated trees.  Iterate through each element in the thread dump to find if there is any child element with the same code signature and same stack depth in the parent element. If not, add this element. Keep the dump sequences and timestamps in each nodes from the source.   Combine stack trees: Combine all trees structures into one by using the same rules as the Accumulator.  Use LDR to traverse the tree node. Use the Stack data structure to avoid recursive calls. Each stack element represents the node that needs to be merged. The task of merging two nodes is to merge the list of children nodes. If they have the same code signature and same parents, save the dump sequences and timestamps in this node. Otherwise, the node needs to be added into the target node as a new child.   Calculate durations and build result: Calculate relevant statistics and generate response.  Use the same traversal node logic as in the Combine stack trees step. Convert to a GraphQL data structure, and put all nodes into a list for subsequent duration calculations. Calculate each node\u0026rsquo;s duration in parallel. For each node, sort the sequences. If there are two continuous sequences, the duration should add the duration of these two seq\u0026rsquo;s timestamp. Calculate each node execution in parallel. For each node, the duration of the current node should deduct the time consumed by all children.    Profile data debugging Please follow the exporter tool to package profile data. Unzip the profile data and use analyzer main function to run it.\n","excerpt":"Thread dump merging mechanism The performance profile is an enhancement feature in the APM system. …","ref":"/docs/main/v9.1.0/en/guides/backend-profile/","title":"Thread dump merging mechanism"},{"body":"Thread dump merging mechanism The performance profile is an enhancement feature in the APM system. We are using the thread dump to estimate the method execution time, rather than adding multiple local spans. In this way, the resource cost would be much less than using distributed tracing to locate slow method. This feature is suitable in the production environment. This document introduces how thread dumps are merged into the final report as a stack tree(s).\nThread analyst Read data and transform Read the data from the database and convert it to a data structure in gRPC.\nst=\u0026gt;start: Start e=\u0026gt;end: End op1=\u0026gt;operation: Load data using paging op2=\u0026gt;operation: Transform data using parallel st(right)-\u0026gt;op1(right)-\u0026gt;op2 op2(right)-\u0026gt;e Copy the code and paste it into this link to generate flow chart.\n Use the stream to read data by page (50 records per page). Convert the data into gRPC data structures in the form of parallel streams. Merge into a list of data.  Data analysis Use the group-by and collector modes in the Java parallel stream to group according to the first stack element in the database records, and use the collector to perform data aggregation. Generate a multi-root tree.\nst=\u0026gt;start: Start e=\u0026gt;end: End op1=\u0026gt;operation: Group by first stack element sup=\u0026gt;operation: Generate empty stack tree acc=\u0026gt;operation: Accumulator data to stack tree com=\u0026gt;operation: Combine stack trees fin=\u0026gt;operation: Calculate durations and build result st(right)-\u0026gt;op1-\u0026gt;sup(right)-\u0026gt;acc acc(right)-\u0026gt;com(right)-\u0026gt;fin-\u0026gt;e Copy the code and paste it into this link to generate a flow chart.\n Group by first stack element: Use the first level element in each stack to group, ensuring that the stacks have the same root node. Generate empty stack tree: Generate multiple top-level empty trees to prepare for the following steps. The reason for generating multiple top-level trees is that original data can be added in parallel without generating locks. Accumulator data to stack tree: Add every thread dump into the generated trees.  Iterate through each element in the thread dump to find if there is any child element with the same code signature and same stack depth in the parent element. If not, add this element. Keep the dump sequences and timestamps in each nodes from the source.   Combine stack trees: Combine all trees structures into one by using the same rules as the Accumulator.  Use LDR to traverse the tree node. Use the Stack data structure to avoid recursive calls. Each stack element represents the node that needs to be merged. The task of merging two nodes is to merge the list of children nodes. If they have the same code signature and same parents, save the dump sequences and timestamps in this node. Otherwise, the node needs to be added into the target node as a new child.   Calculate durations and build result: Calculate relevant statistics and generate response.  Use the same traversal node logic as in the Combine stack trees step. Convert to a GraphQL data structure, and put all nodes into a list for subsequent duration calculations. Calculate each node\u0026rsquo;s duration in parallel. For each node, sort the sequences. If there are two continuous sequences, the duration should add the duration of these two seq\u0026rsquo;s timestamp. Calculate each node execution in parallel. For each node, the duration of the current node should deduct the time consumed by all children.    Profile data debugging Please follow the exporter tool to package profile data. Unzip the profile data and use analyzer main function to run it.\n","excerpt":"Thread dump merging mechanism The performance profile is an enhancement feature in the APM system. …","ref":"/docs/main/v9.2.0/en/guides/backend-profile/","title":"Thread dump merging mechanism"},{"body":"Thread dump merging mechanism The performance profile is an enhancement feature in the APM system. We are using the thread dump to estimate the method execution time, rather than adding multiple local spans. In this way, the resource cost would be much less than using distributed tracing to locate slow method. This feature is suitable in the production environment. This document introduces how thread dumps are merged into the final report as a stack tree(s).\nThread analyst Read data and transform Read the data from the database and convert it to a data structure in gRPC.\nst=\u0026gt;start: Start e=\u0026gt;end: End op1=\u0026gt;operation: Load data using paging op2=\u0026gt;operation: Transform data using parallel st(right)-\u0026gt;op1(right)-\u0026gt;op2 op2(right)-\u0026gt;e Copy the code and paste it into this link to generate flow chart.\n Use the stream to read data by page (50 records per page). Convert the data into gRPC data structures in the form of parallel streams. Merge into a list of data.  Data analysis Use the group-by and collector modes in the Java parallel stream to group according to the first stack element in the database records, and use the collector to perform data aggregation. Generate a multi-root tree.\nst=\u0026gt;start: Start e=\u0026gt;end: End op1=\u0026gt;operation: Group by first stack element sup=\u0026gt;operation: Generate empty stack tree acc=\u0026gt;operation: Accumulator data to stack tree com=\u0026gt;operation: Combine stack trees fin=\u0026gt;operation: Calculate durations and build result st(right)-\u0026gt;op1-\u0026gt;sup(right)-\u0026gt;acc acc(right)-\u0026gt;com(right)-\u0026gt;fin-\u0026gt;e Copy the code and paste it into this link to generate a flow chart.\n Group by first stack element: Use the first level element in each stack to group, ensuring that the stacks have the same root node. Generate empty stack tree: Generate multiple top-level empty trees to prepare for the following steps. The reason for generating multiple top-level trees is that original data can be added in parallel without generating locks. Accumulator data to stack tree: Add every thread dump into the generated trees.  Iterate through each element in the thread dump to find if there is any child element with the same code signature and same stack depth in the parent element. If not, add this element. Keep the dump sequences and timestamps in each nodes from the source.   Combine stack trees: Combine all trees structures into one by using the same rules as the Accumulator.  Use LDR to traverse the tree node. Use the Stack data structure to avoid recursive calls. Each stack element represents the node that needs to be merged. The task of merging two nodes is to merge the list of children nodes. If they have the same code signature and same parents, save the dump sequences and timestamps in this node. Otherwise, the node needs to be added into the target node as a new child.   Calculate durations and build result: Calculate relevant statistics and generate response.  Use the same traversal node logic as in the Combine stack trees step. Convert to a GraphQL data structure, and put all nodes into a list for subsequent duration calculations. Calculate each node\u0026rsquo;s duration in parallel. For each node, sort the sequences. If there are two continuous sequences, the duration should add the duration of these two seq\u0026rsquo;s timestamp. Calculate each node execution in parallel. For each node, the duration of the current node should deduct the time consumed by all children.    Profile data debugging Please follow the exporter tool to package profile data. Unzip the profile data and use analyzer main function to run it.\n","excerpt":"Thread dump merging mechanism The performance profile is an enhancement feature in the APM system. …","ref":"/docs/main/v9.3.0/en/guides/backend-profile/","title":"Thread dump merging mechanism"},{"body":"Thread dump merging mechanism The performance profile is an enhancement feature in the APM system. We are using the thread dump to estimate the method execution time, rather than adding multiple local spans. In this way, the resource cost would be much less than using distributed tracing to locate slow method. This feature is suitable in the production environment. This document introduces how thread dumps are merged into the final report as a stack tree(s).\nThread analyst Read data and transform Read the data from the database and convert it to a data structure in gRPC.\nst=\u0026gt;start: Start e=\u0026gt;end: End op1=\u0026gt;operation: Load data using paging op2=\u0026gt;operation: Transform data using parallel st(right)-\u0026gt;op1(right)-\u0026gt;op2 op2(right)-\u0026gt;e Copy the code and paste it into this link to generate flow chart.\n Use the stream to read data by page (50 records per page). Convert the data into gRPC data structures in the form of parallel streams. Merge into a list of data.  Data analysis Use the group-by and collector modes in the Java parallel stream to group according to the first stack element in the database records, and use the collector to perform data aggregation. Generate a multi-root tree.\nst=\u0026gt;start: Start e=\u0026gt;end: End op1=\u0026gt;operation: Group by first stack element sup=\u0026gt;operation: Generate empty stack tree acc=\u0026gt;operation: Accumulator data to stack tree com=\u0026gt;operation: Combine stack trees fin=\u0026gt;operation: Calculate durations and build result st(right)-\u0026gt;op1-\u0026gt;sup(right)-\u0026gt;acc acc(right)-\u0026gt;com(right)-\u0026gt;fin-\u0026gt;e Copy the code and paste it into this link to generate a flow chart.\n Group by first stack element: Use the first level element in each stack to group, ensuring that the stacks have the same root node. Generate empty stack tree: Generate multiple top-level empty trees to prepare for the following steps. The reason for generating multiple top-level trees is that original data can be added in parallel without generating locks. Accumulator data to stack tree: Add every thread dump into the generated trees.  Iterate through each element in the thread dump to find if there is any child element with the same code signature and same stack depth in the parent element. If not, add this element. Keep the dump sequences and timestamps in each nodes from the source.   Combine stack trees: Combine all trees structures into one by using the same rules as the Accumulator.  Use LDR to traverse the tree node. Use the Stack data structure to avoid recursive calls. Each stack element represents the node that needs to be merged. The task of merging two nodes is to merge the list of children nodes. If they have the same code signature and same parents, save the dump sequences and timestamps in this node. Otherwise, the node needs to be added into the target node as a new child.   Calculate durations and build result: Calculate relevant statistics and generate response.  Use the same traversal node logic as in the Combine stack trees step. Convert to a GraphQL data structure, and put all nodes into a list for subsequent duration calculations. Calculate each node\u0026rsquo;s duration in parallel. For each node, sort the sequences. If there are two continuous sequences, the duration should add the duration of these two seq\u0026rsquo;s timestamp. Calculate each node execution in parallel. For each node, the duration of the current node should deduct the time consumed by all children.    Profile data debugging Please follow the exporter tool to package profile data. Unzip the profile data and use analyzer main function to run it.\n","excerpt":"Thread dump merging mechanism The performance profile is an enhancement feature in the APM system. …","ref":"/docs/main/v9.4.0/en/guides/backend-profile/","title":"Thread dump merging mechanism"},{"body":"Thread dump merging mechanism The performance profile is an enhancement feature in the APM system. We are using the thread dump to estimate the method execution time, rather than adding multiple local spans. In this way, the resource cost would be much less than using distributed tracing to locate slow method. This feature is suitable in the production environment. This document introduces how thread dumps are merged into the final report as a stack tree(s).\nThread analyst Read data and transform Read the data from the database and convert it to a data structure in gRPC.\nst=\u0026gt;start: Start e=\u0026gt;end: End op1=\u0026gt;operation: Load data using paging op2=\u0026gt;operation: Transform data using parallel st(right)-\u0026gt;op1(right)-\u0026gt;op2 op2(right)-\u0026gt;e Copy the code and paste it into this link to generate flow chart.\n Use the stream to read data by page (50 records per page). Convert the data into gRPC data structures in the form of parallel streams. Merge into a list of data.  Data analysis Use the group-by and collector modes in the Java parallel stream to group according to the first stack element in the database records, and use the collector to perform data aggregation. Generate a multi-root tree.\nst=\u0026gt;start: Start e=\u0026gt;end: End op1=\u0026gt;operation: Group by first stack element sup=\u0026gt;operation: Generate empty stack tree acc=\u0026gt;operation: Accumulator data to stack tree com=\u0026gt;operation: Combine stack trees fin=\u0026gt;operation: Calculate durations and build result st(right)-\u0026gt;op1-\u0026gt;sup(right)-\u0026gt;acc acc(right)-\u0026gt;com(right)-\u0026gt;fin-\u0026gt;e Copy the code and paste it into this link to generate a flow chart.\n Group by first stack element: Use the first level element in each stack to group, ensuring that the stacks have the same root node. Generate empty stack tree: Generate multiple top-level empty trees to prepare for the following steps. The reason for generating multiple top-level trees is that original data can be added in parallel without generating locks. Accumulator data to stack tree: Add every thread dump into the generated trees.  Iterate through each element in the thread dump to find if there is any child element with the same code signature and same stack depth in the parent element. If not, add this element. Keep the dump sequences and timestamps in each nodes from the source.   Combine stack trees: Combine all trees structures into one by using the same rules as the Accumulator.  Use LDR to traverse the tree node. Use the Stack data structure to avoid recursive calls. Each stack element represents the node that needs to be merged. The task of merging two nodes is to merge the list of children nodes. If they have the same code signature and same parents, save the dump sequences and timestamps in this node. Otherwise, the node needs to be added into the target node as a new child.   Calculate durations and build result: Calculate relevant statistics and generate response.  Use the same traversal node logic as in the Combine stack trees step. Convert to a GraphQL data structure, and put all nodes into a list for subsequent duration calculations. Calculate each node\u0026rsquo;s duration in parallel. For each node, sort the sequences. If there are two continuous sequences, the duration should add the duration of these two seq\u0026rsquo;s timestamp. Calculate each node execution in parallel. For each node, the duration of the current node should deduct the time consumed by all children.    Profile data debugging Please follow the exporter tool to package profile data. Unzip the profile data and use analyzer main function to run it.\n","excerpt":"Thread dump merging mechanism The performance profile is an enhancement feature in the APM system. …","ref":"/docs/main/v9.5.0/en/setup/backend/backend-profile-thread-merging/","title":"Thread dump merging mechanism"},{"body":"Thread dump merging mechanism The performance profile is an enhancement feature in the APM system. We are using the thread dump to estimate the method execution time, rather than adding multiple local spans. In this way, the resource cost would be much less than using distributed tracing to locate slow method. This feature is suitable in the production environment. This document introduces how thread dumps are merged into the final report as a stack tree(s).\nThread analyst Read data and transform Read the data from the database and convert it to a data structure in gRPC.\nst=\u0026gt;start: Start e=\u0026gt;end: End op1=\u0026gt;operation: Load data using paging op2=\u0026gt;operation: Transform data using parallel st(right)-\u0026gt;op1(right)-\u0026gt;op2 op2(right)-\u0026gt;e Copy the code and paste it into this link to generate flow chart.\n Use the stream to read data by page (50 records per page). Convert the data into gRPC data structures in the form of parallel streams. Merge into a list of data.  Data analysis Use the group-by and collector modes in the Java parallel stream to group according to the first stack element in the database records, and use the collector to perform data aggregation. Generate a multi-root tree.\nst=\u0026gt;start: Start e=\u0026gt;end: End op1=\u0026gt;operation: Group by first stack element sup=\u0026gt;operation: Generate empty stack tree acc=\u0026gt;operation: Accumulator data to stack tree com=\u0026gt;operation: Combine stack trees fin=\u0026gt;operation: Calculate durations and build result st(right)-\u0026gt;op1-\u0026gt;sup(right)-\u0026gt;acc acc(right)-\u0026gt;com(right)-\u0026gt;fin-\u0026gt;e Copy the code and paste it into this link to generate a flow chart.\n Group by first stack element: Use the first level element in each stack to group, ensuring that the stacks have the same root node. Generate empty stack tree: Generate multiple top-level empty trees to prepare for the following steps. The reason for generating multiple top-level trees is that original data can be added in parallel without generating locks. Accumulator data to stack tree: Add every thread dump into the generated trees.  Iterate through each element in the thread dump to find if there is any child element with the same code signature and same stack depth in the parent element. If not, add this element. Keep the dump sequences and timestamps in each nodes from the source.   Combine stack trees: Combine all trees structures into one by using the same rules as the Accumulator.  Use LDR to traverse the tree node. Use the Stack data structure to avoid recursive calls. Each stack element represents the node that needs to be merged. The task of merging two nodes is to merge the list of children nodes. If they have the same code signature and same parents, save the dump sequences and timestamps in this node. Otherwise, the node needs to be added into the target node as a new child.   Calculate durations and build result: Calculate relevant statistics and generate response.  Use the same traversal node logic as in the Combine stack trees step. Convert to a GraphQL data structure, and put all nodes into a list for subsequent duration calculations. Calculate each node\u0026rsquo;s duration in parallel. For each node, sort the sequences. If there are two continuous sequences, the duration should add the duration of these two seq\u0026rsquo;s timestamp. Calculate each node execution in parallel. For each node, the duration of the current node should deduct the time consumed by all children.    Profile data debugging Please follow the exporter tool to package profile data. Unzip the profile data and use analyzer main function to run it.\n","excerpt":"Thread dump merging mechanism The performance profile is an enhancement feature in the APM system. …","ref":"/docs/main/v9.6.0/en/setup/backend/backend-profile-thread-merging/","title":"Thread dump merging mechanism"},{"body":"Thread dump merging mechanism The performance profile is an enhancement feature in the APM system. We are using the thread dump to estimate the method execution time, rather than adding multiple local spans. In this way, the resource cost would be much less than using distributed tracing to locate slow method. This feature is suitable in the production environment. This document introduces how thread dumps are merged into the final report as a stack tree(s).\nThread analyst Read data and transform Read the data from the database and convert it to a data structure in gRPC.\nst=\u0026gt;start: Start e=\u0026gt;end: End op1=\u0026gt;operation: Load data using paging op2=\u0026gt;operation: Transform data using parallel st(right)-\u0026gt;op1(right)-\u0026gt;op2 op2(right)-\u0026gt;e Copy the code and paste it into this link to generate flow chart.\n Use the stream to read data by page (50 records per page). Convert the data into gRPC data structures in the form of parallel streams. Merge into a list of data.  Data analysis Use the group-by and collector modes in the Java parallel stream to group according to the first stack element in the database records, and use the collector to perform data aggregation. Generate a multi-root tree.\nst=\u0026gt;start: Start e=\u0026gt;end: End op1=\u0026gt;operation: Group by first stack element sup=\u0026gt;operation: Generate empty stack tree acc=\u0026gt;operation: Accumulator data to stack tree com=\u0026gt;operation: Combine stack trees fin=\u0026gt;operation: Calculate durations and build result st(right)-\u0026gt;op1-\u0026gt;sup(right)-\u0026gt;acc acc(right)-\u0026gt;com(right)-\u0026gt;fin-\u0026gt;e Copy the code and paste it into this link to generate a flow chart.\n Group by first stack element: Use the first level element in each stack to group, ensuring that the stacks have the same root node. Generate empty stack tree: Generate multiple top-level empty trees to prepare for the following steps. The reason for generating multiple top-level trees is that original data can be added in parallel without generating locks. Accumulator data to stack tree: Add every thread dump into the generated trees.  Iterate through each element in the thread dump to find if there is any child element with the same code signature and same stack depth in the parent element. If not, add this element. Keep the dump sequences and timestamps in each nodes from the source.   Combine stack trees: Combine all trees structures into one by using the same rules as the Accumulator.  Use LDR to traverse the tree node. Use the Stack data structure to avoid recursive calls. Each stack element represents the node that needs to be merged. The task of merging two nodes is to merge the list of children nodes. If they have the same code signature and same parents, save the dump sequences and timestamps in this node. Otherwise, the node needs to be added into the target node as a new child.   Calculate durations and build result: Calculate relevant statistics and generate response.  Use the same traversal node logic as in the Combine stack trees step. Convert to a GraphQL data structure, and put all nodes into a list for subsequent duration calculations. Calculate each node\u0026rsquo;s duration in parallel. For each node, sort the sequences. If there are two continuous sequences, the duration should add the duration of these two seq\u0026rsquo;s timestamp. Calculate each node execution in parallel. For each node, the duration of the current node should deduct the time consumed by all children.    Profile data debugging Please follow the exporter tool to package profile data. Unzip the profile data and use analyzer main function to run it.\n","excerpt":"Thread dump merging mechanism The performance profile is an enhancement feature in the APM system. …","ref":"/docs/main/v9.7.0/en/setup/backend/backend-profile-thread-merging/","title":"Thread dump merging mechanism"},{"body":"TimeSeries Database(TSDB) TSDB is a time-series storage engine designed to store and query large volumes of time-series data. One of the key features of TSDB is its ability to automatically manage data storage over time, optimize performance and ensure that the system can scale to handle large workloads. TSDB empowers Measure and Stream relevant data.\nShard In TSDB, the data in a group is partitioned into shards based on a configurable sharding scheme. Each shard is assigned to a specific set of storage nodes, and those nodes store and process the data within that shard. This allows BanyanDB to scale horizontally by adding more storage nodes to the cluster as needed.\nshard\n Buffer: It is typically implemented as an in-memory queue managed by a shard. When new time-series data is ingested into the system, it is added to the end of the queue, and when the buffer reaches a specific size, the data is flushed to disk in batches. SST: When a bucket of buffer becomes full or reaches a certain size threshold, it is flushed to disk as a new Sorted String Table (SST) file. This process is known as compaction. Segments and Blocks: Time-series data is stored in data segments/blocks within each shard. Blocks contain a fixed number of data points and are organized into time windows. Each data segment includes an index that efficiently retrieves data within the block. Block Cache: It manages the in-memory cache of data blocks, improving query performance by caching frequently accessed data blocks in memory.  Write Path The write path of TSDB begins when time-series data is ingested into the system. TSDB will consult the schema repository to check if the group exists, and if it does, then it will hash the SeriesID to determine which shard it belongs to.\nEach shard in TSDB is responsible for storing a subset of the time-series data, and it uses a write-ahead log to record incoming writes in a durable and fault-tolerant manner. The shard also holds an in-memory index allowing fast lookups of time-series data.\nWhen a shard receives a write request, the data is written to the buffer as a series of buckets. Each bucket is a fixed-size chunk of time-series data typically configured to be several minutes or hours long. As new data is written to the buffer, it is appended to the current bucket until it is full. Once the bucket is full, it is closed, and a new bucket is created to continue buffering writes.\nOnce a bucket is closed, it is stored as a single SST in a shard. The file is indexed and added to the index for the corresponding time range and resolution.\nRead Path The read path in TSDB retrieves time-series data from disk or memory and returns it to the query engine. The read path comprises several components: the buffer, cache, and SST file. The following is a high-level overview of how these components work together to retrieve time-series data in TSDB.\nThe first step in the read path is to perform an index lookup to determine which blocks contain the desired time range. The index contains metadata about each data block, including its start and end time and its location on disk.\nIf the requested data is present in the buffer (i.e., it has been recently written but not yet persisted to disk), the buffer is checked to see if the data can be returned directly from memory. The read path determines which bucket(s) contain the requested time range. If the data is not present in the buffer, the read path proceeds to the next step.\nIf the requested data is present in the cache (i.e., it has been recently read from disk and is still in memory), it is checked to see if the data can be returned directly from memory. The read path proceeds to the next step if the data is not in the cache.\nThe final step in the read path is to look up the appropriate SST file on disk. Files are the on-disk representation of data blocks and are organized by shard and time range. The read path determines which SST files contain the requested time range and reads the appropriate data blocks from the disk.\n","excerpt":"TimeSeries Database(TSDB) TSDB is a time-series storage engine designed to store and query large …","ref":"/docs/skywalking-banyandb/latest/concept/tsdb/","title":"TimeSeries Database(TSDB)"},{"body":"TimeSeries Database(TSDB) TSDB is a time-series storage engine designed to store and query large volumes of time-series data. One of the key features of TSDB is its ability to automatically manage data storage over time, optimize performance and ensure that the system can scale to handle large workloads. TSDB empowers Measure and Stream relevant data.\nShard In TSDB, the data in a group is partitioned into shards based on a configurable sharding scheme. Each shard is assigned to a specific set of storage nodes, and those nodes store and process the data within that shard. This allows BanyanDB to scale horizontally by adding more storage nodes to the cluster as needed.\nWithin each shard, data is stored in different segments based on time ranges. The series indexes are generated based on entities, and the indexes generated based on indexing rules of the Measure types are also stored under the shard.\nSegment Each segment is composed of multiple parts. Whenever SkyWalking sends a batch of data, BanyanDB writes this batch of data into a new part. For data of the Stream type, the inverted indexes generated based on the indexing rules are also stored in the segment. Since BanyanDB adopts a snapshot approach for data read and write operations, the segment also needs to maintain additional snapshot information to record the validity of the parts.\nPart Within a part, data is split into multiple files in a columnar manner. The timestamps are stored in the timestamps.bin file, tags are organized in persistent tag families as various files with the .tf suffix, and fields are stored separately in the fields.bin file.\nIn addition, each part maintains several metadata files. Among them, metadata.json is the metadata file for the part, storing descriptive information, such as start and end times, part size, etc.\nThe meta.bin is a skipping index file that serves as the entry file for the entire part, helping to index the primary.bin file.\nThe primary.bin file contains the index of each block. Through it, the actual data files or the tagFamily metadata files ending with .tfm can be indexed, which in turn helps to locate the data in blocks.\nNotably, for data of the Stream type, since there are no field columns, the fields.bin file does not exist, while the rest of the structure is entirely consistent with the Measure type.\nBlock Each block holds data with the same series ID. The max size of the measure block is controlled by data volume and the number of rows. Meanwhile, the max size of the stream block is controlled by data volume. The diagram below shows the detailed fields within each block. The block is the minimal unit of TSDB, which contains several rows of data. Due to the column-based design, each block is spread over several files.\nWrite Path The write path of TSDB begins when time-series data is ingested into the system. TSDB will consult the schema repository to check if the group exists, and if it does, then it will hash the SeriesID to determine which shard it belongs to.\nEach shard in TSDB is responsible for storing a subset of the time-series data. The shard also holds an in-memory index allowing fast lookups of time-series data.\nWhen a shard receives a write request, the data is written to the buffer as a memory part. Meanwhile, the series index and inverted index will also be updated. The worker in the background periodically flushes data, writing the memory part to the disk. After the flush operation is completed, it triggers a merge operation to combine the parts and remove invalid data.\nWhenever a new memory part is generated, or when a flush or merge operation is triggered, they initiate an update of the snapshot and delete outdated snapshots. The parts in a persistent snapshot could be accessible to the reader.\nRead Path The read path in TSDB retrieves time-series data from disk or memory, and returns it to the query engine. The read path comprises several components: the buffer and parts. The following is a high-level overview of how these components work together to retrieve time-series data in TSDB.\nThe first step in the read path is to perform an index lookup to determine which parts contain the desired time range. The index contains metadata about each data part, including its start and end time.\nIf the requested data is present in the buffer (i.e., it has been recently written but not yet persisted to disk), the buffer is checked to see if the data can be returned directly from memory. The read path determines which memory part(s) contain the requested time range. If the data is not present in the buffer, the read path proceeds to the next step.\nThe next step in the read path is to look up the appropriate parts on disk. Files are the on-disk representation of blocks and are organized by shard and time range. The read path determines which parts contain the requested time range and reads the appropriate blocks from the disk. Due to the column-based storage design, it may be necessary to read multiple data files.\n","excerpt":"TimeSeries Database(TSDB) TSDB is a time-series storage engine designed to store and query large …","ref":"/docs/skywalking-banyandb/next/concept/tsdb/","title":"TimeSeries Database(TSDB)"},{"body":"TimeSeries Database(TSDB) TSDB is a time-series storage engine designed to store and query large volumes of time-series data. One of the key features of TSDB is its ability to automatically manage data storage over time, optimize performance and ensure that the system can scale to handle large workloads. TSDB empowers Measure and Stream relevant data.\nShard In TSDB, the data in a group is partitioned into shards based on a configurable sharding scheme. Each shard is assigned to a specific set of storage nodes, and those nodes store and process the data within that shard. This allows BanyanDB to scale horizontally by adding more storage nodes to the cluster as needed.\nshard\n Buffer: It is typically implemented as an in-memory queue managed by a shard. When new time-series data is ingested into the system, it is added to the end of the queue, and when the buffer reaches a specific size, the data is flushed to disk in batches. SST: When a bucket of buffer becomes full or reaches a certain size threshold, it is flushed to disk as a new Sorted String Table (SST) file. This process is known as compaction. Segments and Blocks: Time-series data is stored in data segments/blocks within each shard. Blocks contain a fixed number of data points and are organized into time windows. Each data segment includes an index that efficiently retrieves data within the block. Block Cache: It manages the in-memory cache of data blocks, improving query performance by caching frequently accessed data blocks in memory.  Write Path The write path of TSDB begins when time-series data is ingested into the system. TSDB will consult the schema repository to check if the group exists, and if it does, then it will hash the SeriesID to determine which shard it belongs to.\nEach shard in TSDB is responsible for storing a subset of the time-series data, and it uses a write-ahead log to record incoming writes in a durable and fault-tolerant manner. The shard also holds an in-memory index allowing fast lookups of time-series data.\nWhen a shard receives a write request, the data is written to the buffer as a series of buckets. Each bucket is a fixed-size chunk of time-series data typically configured to be several minutes or hours long. As new data is written to the buffer, it is appended to the current bucket until it is full. Once the bucket is full, it is closed, and a new bucket is created to continue buffering writes.\nOnce a bucket is closed, it is stored as a single SST in a shard. The file is indexed and added to the index for the corresponding time range and resolution.\nRead Path The read path in TSDB retrieves time-series data from disk or memory and returns it to the query engine. The read path comprises several components: the buffer, cache, and SST file. The following is a high-level overview of how these components work together to retrieve time-series data in TSDB.\nThe first step in the read path is to perform an index lookup to determine which blocks contain the desired time range. The index contains metadata about each data block, including its start and end time and its location on disk.\nIf the requested data is present in the buffer (i.e., it has been recently written but not yet persisted to disk), the buffer is checked to see if the data can be returned directly from memory. The read path determines which bucket(s) contain the requested time range. If the data is not present in the buffer, the read path proceeds to the next step.\nIf the requested data is present in the cache (i.e., it has been recently read from disk and is still in memory), it is checked to see if the data can be returned directly from memory. The read path proceeds to the next step if the data is not in the cache.\nThe final step in the read path is to look up the appropriate SST file on disk. Files are the on-disk representation of data blocks and are organized by shard and time range. The read path determines which SST files contain the requested time range and reads the appropriate data blocks from the disk.\n","excerpt":"TimeSeries Database(TSDB) TSDB is a time-series storage engine designed to store and query large …","ref":"/docs/skywalking-banyandb/v0.5.0/concept/tsdb/","title":"TimeSeries Database(TSDB)"},{"body":"Welcome to Apache SkyWalking Cloud on Kubernetes Document Repository Here you can lean all you need to know about Apache SkyWalking Cloud on Kubernetes(SWCK). This repository provides how to onboard and customize the agent injector, operator and adapter.\n Design. Some materials regarding the design decision under the hood. Setup. Several instruments to onboard the agent injector, operator and adapter. Examples. A number of examples of how to use SWCK.  We\u0026rsquo;re always looking for help to improve our documentation and codes, so please don’t hesitate to file an issue if you see any problems. Or better yet, directly contribute by submitting a pull request to help us get better!\n","excerpt":"Welcome to Apache SkyWalking Cloud on Kubernetes Document Repository Here you can lean all you need …","ref":"/docs/skywalking-swck/latest/readme/","title":"to Apache SkyWalking Cloud on Kubernetes Document Repository"},{"body":"Welcome to Apache SkyWalking Cloud on Kubernetes Document Repository Here you can lean all you need to know about Apache SkyWalking Cloud on Kubernetes(SWCK). This repository provides how to onboard and customize the agent injector, operator and adapter.\n Design. Some materials regarding the design decision under the hood. Setup. Several instruments to onboard the agent injector, operator and adapter. Examples. A number of examples of how to use SWCK.  We\u0026rsquo;re always looking for help to improve our documentation and codes, so please don’t hesitate to file an issue if you see any problems. Or better yet, directly contribute by submitting a pull request to help us get better!\n","excerpt":"Welcome to Apache SkyWalking Cloud on Kubernetes Document Repository Here you can lean all you need …","ref":"/docs/skywalking-swck/next/readme/","title":"to Apache SkyWalking Cloud on Kubernetes Document Repository"},{"body":"Welcome to Apache SkyWalking Cloud on Kubernetes Document Repository Here you can lean all you need to know about Apache SkyWalking Cloud on Kubernetes(SWCK). This repository provides how to onboard and customize the agent injector, operator and adapter.\n Design. Some materials regarding the design decision under the hood. Setup. Several instruments to onboard the agent injector, operator and adapter. Examples. A number of examples of how to use SWCK.  We\u0026rsquo;re always looking for help to improve our documentation and codes, so please don’t hesitate to file an issue if you see any problems. Or better yet, directly contribute by submitting a pull request to help us get better!\n","excerpt":"Welcome to Apache SkyWalking Cloud on Kubernetes Document Repository Here you can lean all you need …","ref":"/docs/skywalking-swck/v0.9.0/readme/","title":"to Apache SkyWalking Cloud on Kubernetes Document Repository"},{"body":"Token Authentication Supported version 7.0.0+\nWhy do we need token authentication after TLS? TLS is about transport security, ensuring a trusted network. On the other hand, token authentication is about monitoring whether application data can be trusted.\nToken In the current version, a token is considered a simple string.\nSet Token  Set token in agent.config file  # Authentication active is based on backend setting, see application.yml for more details. agent.authentication = ${SW_AGENT_AUTHENTICATION:xxxx} Set token in application.yml file  ······receiver-sharing-server:default:authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}······Authentication failure The Skywalking OAP verifies every request from the agent and only allows requests whose token matches the one configured in application.yml to pass through.\nIf the token does not match, you will see the following log in the agent:\norg.apache.skywalking.apm.dependencies.io.grpc.StatusRuntimeException: PERMISSION_DENIED FAQ Can I use token authentication instead of TLS? No, you shouldn\u0026rsquo;t. Of course, it\u0026rsquo;s technically possible, but token and TLS are used for untrusted network environments. In these circumstances, TLS has a higher priority. Tokens can be trusted only under TLS protection, and they can be easily stolen if sent through a non-TLS network.\nDo you support other authentication mechanisms, such as ak/sk? Not for now. But we welcome contributions to this feature.\n","excerpt":"Token Authentication Supported version 7.0.0+\nWhy do we need token authentication after TLS? TLS is …","ref":"/docs/main/latest/en/setup/backend/backend-token-auth/","title":"Token Authentication"},{"body":"Token Authentication Supported version 7.0.0+\nWhy do we need token authentication after TLS? TLS is about transport security, ensuring a trusted network. On the other hand, token authentication is about monitoring whether application data can be trusted.\nToken In the current version, a token is considered a simple string.\nSet Token  Set token in agent.config file  # Authentication active is based on backend setting, see application.yml for more details. agent.authentication = ${SW_AGENT_AUTHENTICATION:xxxx} Set token in application.yml file  ······receiver-sharing-server:default:authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}······Authentication failure The Skywalking OAP verifies every request from the agent and only allows requests whose token matches the one configured in application.yml to pass through.\nIf the token does not match, you will see the following log in the agent:\norg.apache.skywalking.apm.dependencies.io.grpc.StatusRuntimeException: PERMISSION_DENIED FAQ Can I use token authentication instead of TLS? No, you shouldn\u0026rsquo;t. Of course, it\u0026rsquo;s technically possible, but token and TLS are used for untrusted network environments. In these circumstances, TLS has a higher priority. Tokens can be trusted only under TLS protection, and they can be easily stolen if sent through a non-TLS network.\nDo you support other authentication mechanisms, such as ak/sk? Not for now. But we welcome contributions to this feature.\n","excerpt":"Token Authentication Supported version 7.0.0+\nWhy do we need token authentication after TLS? TLS is …","ref":"/docs/main/next/en/setup/backend/backend-token-auth/","title":"Token Authentication"},{"body":"Token Authentication Supported version 7.0.0+\nWhy do we need token authentication after TLS? TLS is about transport security, which makes sure that a network can be trusted. On the other hand, token authentication is about monitoring whether application data can be trusted.\nToken In the current version, token is considered a simple string.\nSet Token  Set token in agent.config file  # Authentication active is based on backend setting, see application.yml for more details. agent.authentication = ${SW_AGENT_AUTHENTICATION:xxxx} Set token in application.yml file  ······receiver-sharing-server:default:authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}······Authentication failure The Skywalking OAP verifies every request from the agent, and only allows requests whose token matches the one configured in application.yml to pass through.\nIf the token does not match, you will see the following log in the agent:\norg.apache.skywalking.apm.dependencies.io.grpc.StatusRuntimeException: PERMISSION_DENIED FAQ Can I use token authentication instead of TLS? No, you shouldn\u0026rsquo;t. Of course it\u0026rsquo;s technically possible, but token and TLS are used for untrusted network environments. In these circumstances, TLS has a higher priority. Tokens can be trusted only under TLS protection, and they can be easily stolen if sent through a non-TLS network.\nDo you support other authentication mechanisms, such as ak/sk? Not for now. But we welcome contributions on this feature.\n","excerpt":"Token Authentication Supported version 7.0.0+\nWhy do we need token authentication after TLS? TLS is …","ref":"/docs/main/v9.0.0/en/setup/backend/backend-token-auth/","title":"Token Authentication"},{"body":"Token Authentication Supported version 7.0.0+\nWhy do we need token authentication after TLS? TLS is about transport security, ensuring a trusted network. On the other hand, token authentication is about monitoring whether application data can be trusted.\nToken In the current version, a token is considered a simple string.\nSet Token  Set token in agent.config file  # Authentication active is based on backend setting, see application.yml for more details. agent.authentication = ${SW_AGENT_AUTHENTICATION:xxxx} Set token in application.yml file  ······receiver-sharing-server:default:authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}······Authentication failure The Skywalking OAP verifies every request from the agent and only allows requests whose token matches the one configured in application.yml to pass through.\nIf the token does not match, you will see the following log in the agent:\norg.apache.skywalking.apm.dependencies.io.grpc.StatusRuntimeException: PERMISSION_DENIED FAQ Can I use token authentication instead of TLS? No, you shouldn\u0026rsquo;t. Of course, it\u0026rsquo;s technically possible, but token and TLS are used for untrusted network environments. In these circumstances, TLS has a higher priority. Tokens can be trusted only under TLS protection, and they can be easily stolen if sent through a non-TLS network.\nDo you support other authentication mechanisms, such as ak/sk? Not for now. But we welcome contributions to this feature.\n","excerpt":"Token Authentication Supported version 7.0.0+\nWhy do we need token authentication after TLS? TLS is …","ref":"/docs/main/v9.1.0/en/setup/backend/backend-token-auth/","title":"Token Authentication"},{"body":"Token Authentication Supported version 7.0.0+\nWhy do we need token authentication after TLS? TLS is about transport security, ensuring a trusted network. On the other hand, token authentication is about monitoring whether application data can be trusted.\nToken In the current version, a token is considered a simple string.\nSet Token  Set token in agent.config file  # Authentication active is based on backend setting, see application.yml for more details. agent.authentication = ${SW_AGENT_AUTHENTICATION:xxxx} Set token in application.yml file  ······receiver-sharing-server:default:authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}······Authentication failure The Skywalking OAP verifies every request from the agent and only allows requests whose token matches the one configured in application.yml to pass through.\nIf the token does not match, you will see the following log in the agent:\norg.apache.skywalking.apm.dependencies.io.grpc.StatusRuntimeException: PERMISSION_DENIED FAQ Can I use token authentication instead of TLS? No, you shouldn\u0026rsquo;t. Of course, it\u0026rsquo;s technically possible, but token and TLS are used for untrusted network environments. In these circumstances, TLS has a higher priority. Tokens can be trusted only under TLS protection, and they can be easily stolen if sent through a non-TLS network.\nDo you support other authentication mechanisms, such as ak/sk? Not for now. But we welcome contributions to this feature.\n","excerpt":"Token Authentication Supported version 7.0.0+\nWhy do we need token authentication after TLS? TLS is …","ref":"/docs/main/v9.2.0/en/setup/backend/backend-token-auth/","title":"Token Authentication"},{"body":"Token Authentication Supported version 7.0.0+\nWhy do we need token authentication after TLS? TLS is about transport security, ensuring a trusted network. On the other hand, token authentication is about monitoring whether application data can be trusted.\nToken In the current version, a token is considered a simple string.\nSet Token  Set token in agent.config file  # Authentication active is based on backend setting, see application.yml for more details. agent.authentication = ${SW_AGENT_AUTHENTICATION:xxxx} Set token in application.yml file  ······receiver-sharing-server:default:authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}······Authentication failure The Skywalking OAP verifies every request from the agent and only allows requests whose token matches the one configured in application.yml to pass through.\nIf the token does not match, you will see the following log in the agent:\norg.apache.skywalking.apm.dependencies.io.grpc.StatusRuntimeException: PERMISSION_DENIED FAQ Can I use token authentication instead of TLS? No, you shouldn\u0026rsquo;t. Of course, it\u0026rsquo;s technically possible, but token and TLS are used for untrusted network environments. In these circumstances, TLS has a higher priority. Tokens can be trusted only under TLS protection, and they can be easily stolen if sent through a non-TLS network.\nDo you support other authentication mechanisms, such as ak/sk? Not for now. But we welcome contributions to this feature.\n","excerpt":"Token Authentication Supported version 7.0.0+\nWhy do we need token authentication after TLS? TLS is …","ref":"/docs/main/v9.3.0/en/setup/backend/backend-token-auth/","title":"Token Authentication"},{"body":"Token Authentication Supported version 7.0.0+\nWhy do we need token authentication after TLS? TLS is about transport security, ensuring a trusted network. On the other hand, token authentication is about monitoring whether application data can be trusted.\nToken In the current version, a token is considered a simple string.\nSet Token  Set token in agent.config file  # Authentication active is based on backend setting, see application.yml for more details. agent.authentication = ${SW_AGENT_AUTHENTICATION:xxxx} Set token in application.yml file  ······receiver-sharing-server:default:authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}······Authentication failure The Skywalking OAP verifies every request from the agent and only allows requests whose token matches the one configured in application.yml to pass through.\nIf the token does not match, you will see the following log in the agent:\norg.apache.skywalking.apm.dependencies.io.grpc.StatusRuntimeException: PERMISSION_DENIED FAQ Can I use token authentication instead of TLS? No, you shouldn\u0026rsquo;t. Of course, it\u0026rsquo;s technically possible, but token and TLS are used for untrusted network environments. In these circumstances, TLS has a higher priority. Tokens can be trusted only under TLS protection, and they can be easily stolen if sent through a non-TLS network.\nDo you support other authentication mechanisms, such as ak/sk? Not for now. But we welcome contributions to this feature.\n","excerpt":"Token Authentication Supported version 7.0.0+\nWhy do we need token authentication after TLS? TLS is …","ref":"/docs/main/v9.4.0/en/setup/backend/backend-token-auth/","title":"Token Authentication"},{"body":"Token Authentication Supported version 7.0.0+\nWhy do we need token authentication after TLS? TLS is about transport security, ensuring a trusted network. On the other hand, token authentication is about monitoring whether application data can be trusted.\nToken In the current version, a token is considered a simple string.\nSet Token  Set token in agent.config file  # Authentication active is based on backend setting, see application.yml for more details. agent.authentication = ${SW_AGENT_AUTHENTICATION:xxxx} Set token in application.yml file  ······receiver-sharing-server:default:authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}······Authentication failure The Skywalking OAP verifies every request from the agent and only allows requests whose token matches the one configured in application.yml to pass through.\nIf the token does not match, you will see the following log in the agent:\norg.apache.skywalking.apm.dependencies.io.grpc.StatusRuntimeException: PERMISSION_DENIED FAQ Can I use token authentication instead of TLS? No, you shouldn\u0026rsquo;t. Of course, it\u0026rsquo;s technically possible, but token and TLS are used for untrusted network environments. In these circumstances, TLS has a higher priority. Tokens can be trusted only under TLS protection, and they can be easily stolen if sent through a non-TLS network.\nDo you support other authentication mechanisms, such as ak/sk? Not for now. But we welcome contributions to this feature.\n","excerpt":"Token Authentication Supported version 7.0.0+\nWhy do we need token authentication after TLS? TLS is …","ref":"/docs/main/v9.5.0/en/setup/backend/backend-token-auth/","title":"Token Authentication"},{"body":"Token Authentication Supported version 7.0.0+\nWhy do we need token authentication after TLS? TLS is about transport security, ensuring a trusted network. On the other hand, token authentication is about monitoring whether application data can be trusted.\nToken In the current version, a token is considered a simple string.\nSet Token  Set token in agent.config file  # Authentication active is based on backend setting, see application.yml for more details. agent.authentication = ${SW_AGENT_AUTHENTICATION:xxxx} Set token in application.yml file  ······receiver-sharing-server:default:authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}······Authentication failure The Skywalking OAP verifies every request from the agent and only allows requests whose token matches the one configured in application.yml to pass through.\nIf the token does not match, you will see the following log in the agent:\norg.apache.skywalking.apm.dependencies.io.grpc.StatusRuntimeException: PERMISSION_DENIED FAQ Can I use token authentication instead of TLS? No, you shouldn\u0026rsquo;t. Of course, it\u0026rsquo;s technically possible, but token and TLS are used for untrusted network environments. In these circumstances, TLS has a higher priority. Tokens can be trusted only under TLS protection, and they can be easily stolen if sent through a non-TLS network.\nDo you support other authentication mechanisms, such as ak/sk? Not for now. But we welcome contributions to this feature.\n","excerpt":"Token Authentication Supported version 7.0.0+\nWhy do we need token authentication after TLS? TLS is …","ref":"/docs/main/v9.6.0/en/setup/backend/backend-token-auth/","title":"Token Authentication"},{"body":"Token Authentication Supported version 7.0.0+\nWhy do we need token authentication after TLS? TLS is about transport security, ensuring a trusted network. On the other hand, token authentication is about monitoring whether application data can be trusted.\nToken In the current version, a token is considered a simple string.\nSet Token  Set token in agent.config file  # Authentication active is based on backend setting, see application.yml for more details. agent.authentication = ${SW_AGENT_AUTHENTICATION:xxxx} Set token in application.yml file  ······receiver-sharing-server:default:authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}······Authentication failure The Skywalking OAP verifies every request from the agent and only allows requests whose token matches the one configured in application.yml to pass through.\nIf the token does not match, you will see the following log in the agent:\norg.apache.skywalking.apm.dependencies.io.grpc.StatusRuntimeException: PERMISSION_DENIED FAQ Can I use token authentication instead of TLS? No, you shouldn\u0026rsquo;t. Of course, it\u0026rsquo;s technically possible, but token and TLS are used for untrusted network environments. In these circumstances, TLS has a higher priority. Tokens can be trusted only under TLS protection, and they can be easily stolen if sent through a non-TLS network.\nDo you support other authentication mechanisms, such as ak/sk? Not for now. But we welcome contributions to this feature.\n","excerpt":"Token Authentication Supported version 7.0.0+\nWhy do we need token authentication after TLS? TLS is …","ref":"/docs/main/v9.7.0/en/setup/backend/backend-token-auth/","title":"Token Authentication"},{"body":"Token Authentication Token In current version, Token is considered as a simple string.\nSet Token Set token in agent.config file\n# Authentication active is based on backend setting, see application.yml for more details. agent.authentication = xxxx Meanwhile, open the backend token authentication.\nAuthentication fails The Collector verifies every request from agent, allowed only the token match.\nIf the token is not right, you will see the following log in agent\norg.apache.skywalking.apm.dependencies.io.grpc.StatusRuntimeException: PERMISSION_DENIED FAQ Can I use token authentication instead of TLS? No, you shouldn\u0026rsquo;t. In tech way, you can of course, but token and TLS are used for untrusted network env. In that circumstance, TLS has higher priority than this. Token can be trusted only under TLS protection.Token can be stolen easily if you send it through a non-TLS network.\nDo you support other authentication mechanisms? Such as ak/sk? For now, no. But we appreciate someone contributes this feature.\n","excerpt":"Token Authentication Token In current version, Token is considered as a simple string.\nSet Token Set …","ref":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/token-auth/","title":"Token Authentication"},{"body":"Token Authentication Token In current version, Token is considered as a simple string.\nSet Token Set token in agent.config file\n# Authentication active is based on backend setting, see application.yml for more details. agent.authentication = xxxx Meanwhile, open the backend token authentication.\nAuthentication fails The Collector verifies every request from agent, allowed only the token match.\nIf the token is not right, you will see the following log in agent\norg.apache.skywalking.apm.dependencies.io.grpc.StatusRuntimeException: PERMISSION_DENIED FAQ Can I use token authentication instead of TLS? No, you shouldn\u0026rsquo;t. In tech way, you can of course, but token and TLS are used for untrusted network env. In that circumstance, TLS has higher priority than this. Token can be trusted only under TLS protection.Token can be stolen easily if you send it through a non-TLS network.\nDo you support other authentication mechanisms? Such as ak/sk? For now, no. But we appreciate someone contributes this feature.\n","excerpt":"Token Authentication Token In current version, Token is considered as a simple string.\nSet Token Set …","ref":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/token-auth/","title":"Token Authentication"},{"body":"Token Authentication Token In current version, Token is considered as a simple string.\nSet Token Set token in agent.config file\n# Authentication active is based on backend setting, see application.yml for more details. agent.authentication = xxxx Meanwhile, open the backend token authentication.\nAuthentication fails The Collector verifies every request from agent, allowed only the token match.\nIf the token is not right, you will see the following log in agent\norg.apache.skywalking.apm.dependencies.io.grpc.StatusRuntimeException: PERMISSION_DENIED FAQ Can I use token authentication instead of TLS? No, you shouldn\u0026rsquo;t. In tech way, you can of course, but token and TLS are used for untrusted network env. In that circumstance, TLS has higher priority than this. Token can be trusted only under TLS protection.Token can be stolen easily if you send it through a non-TLS network.\nDo you support other authentication mechanisms? Such as ak/sk? For now, no. But we appreciate someone contributes this feature.\n","excerpt":"Token Authentication Token In current version, Token is considered as a simple string.\nSet Token Set …","ref":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/token-auth/","title":"Token Authentication"},{"body":"Token Authentication Token In current version, Token is considered as a simple string.\nSet Token Set token in agent.config file\n# Authentication active is based on backend setting, see application.yml for more details. agent.authentication = xxxx Meanwhile, open the backend token authentication.\nAuthentication fails The Collector verifies every request from agent, allowed only the token match.\nIf the token is not right, you will see the following log in agent\norg.apache.skywalking.apm.dependencies.io.grpc.StatusRuntimeException: PERMISSION_DENIED FAQ Can I use token authentication instead of TLS? No, you shouldn\u0026rsquo;t. In tech way, you can of course, but token and TLS are used for untrusted network env. In that circumstance, TLS has higher priority than this. Token can be trusted only under TLS protection.Token can be stolen easily if you send it through a non-TLS network.\nDo you support other authentication mechanisms? Such as ak/sk? For now, no. But we appreciate someone contributes this feature.\n","excerpt":"Token Authentication Token In current version, Token is considered as a simple string.\nSet Token Set …","ref":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/token-auth/","title":"Token Authentication"},{"body":"Token Authentication Token In current version, Token is considered as a simple string.\nSet Token Set token in agent.config file\n# Authentication active is based on backend setting, see application.yml for more details. agent.authentication = xxxx Meanwhile, open the backend token authentication.\nAuthentication fails The Collector verifies every request from agent, allowed only the token match.\nIf the token is not right, you will see the following log in agent\norg.apache.skywalking.apm.dependencies.io.grpc.StatusRuntimeException: PERMISSION_DENIED FAQ Can I use token authentication instead of TLS? No, you shouldn\u0026rsquo;t. In tech way, you can of course, but token and TLS are used for untrusted network env. In that circumstance, TLS has higher priority than this. Token can be trusted only under TLS protection.Token can be stolen easily if you send it through a non-TLS network.\nDo you support other authentication mechanisms? Such as ak/sk? For now, no. But we appreciate someone contributes this feature.\n","excerpt":"Token Authentication Token In current version, Token is considered as a simple string.\nSet Token Set …","ref":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/token-auth/","title":"Token Authentication"},{"body":"Trace Correlation Context Trace correlation context APIs provide a way to put custom data in tracing context. All the data in the context will be propagated with the in-wire process automatically.\n Use TraceContext.putCorrelation() API to put custom data in tracing context.  Optional\u0026lt;String\u0026gt; previous = TraceContext.putCorrelation(\u0026#34;customKey\u0026#34;, \u0026#34;customValue\u0026#34;); CorrelationContext will remove the item when the value is null or empty.\n Use TraceContext.getCorrelation() API to get custom data.  Optional\u0026lt;String\u0026gt; value = TraceContext.getCorrelation(\u0026#34;customKey\u0026#34;); CorrelationContext configuration descriptions could be found in the agent configuration documentation, with correlation. as the prefix. Sample codes only\n","excerpt":"Trace Correlation Context Trace correlation context APIs provide a way to put custom data in tracing …","ref":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/application-toolkit-trace-correlation-context/","title":"Trace Correlation Context"},{"body":"Trace Correlation Context Trace correlation context APIs provide a way to put custom data in tracing context. All the data in the context will be propagated with the in-wire process automatically.\n Use TraceContext.putCorrelation() API to put custom data in tracing context.  Optional\u0026lt;String\u0026gt; previous = TraceContext.putCorrelation(\u0026#34;customKey\u0026#34;, \u0026#34;customValue\u0026#34;); CorrelationContext will remove the item when the value is null or empty.\n Use TraceContext.getCorrelation() API to get custom data.  Optional\u0026lt;String\u0026gt; value = TraceContext.getCorrelation(\u0026#34;customKey\u0026#34;); CorrelationContext configuration descriptions could be found in the agent configuration documentation, with correlation. as the prefix. Sample codes only\n","excerpt":"Trace Correlation Context Trace correlation context APIs provide a way to put custom data in tracing …","ref":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-trace-correlation-context/","title":"Trace Correlation Context"},{"body":"Trace Correlation Context Trace correlation context APIs provide a way to put custom data in tracing context. All the data in the context will be propagated with the in-wire process automatically.\n Use TraceContext.putCorrelation() API to put custom data in tracing context.  Optional\u0026lt;String\u0026gt; previous = TraceContext.putCorrelation(\u0026#34;customKey\u0026#34;, \u0026#34;customValue\u0026#34;); CorrelationContext will remove the item when the value is null or empty.\n Use TraceContext.getCorrelation() API to get custom data.  Optional\u0026lt;String\u0026gt; value = TraceContext.getCorrelation(\u0026#34;customKey\u0026#34;); CorrelationContext configuration descriptions could be found in the agent configuration documentation, with correlation. as the prefix. Sample codes only\n","excerpt":"Trace Correlation Context Trace correlation context APIs provide a way to put custom data in tracing …","ref":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/application-toolkit-trace-correlation-context/","title":"Trace Correlation Context"},{"body":"Trace Correlation Context Trace correlation context APIs provide a way to put custom data in tracing context. All the data in the context will be propagated with the in-wire process automatically.\n Use TraceContext.putCorrelation() API to put custom data in tracing context.  Optional\u0026lt;String\u0026gt; previous = TraceContext.putCorrelation(\u0026#34;customKey\u0026#34;, \u0026#34;customValue\u0026#34;); CorrelationContext will remove the item when the value is null or empty.\n Use TraceContext.getCorrelation() API to get custom data.  Optional\u0026lt;String\u0026gt; value = TraceContext.getCorrelation(\u0026#34;customKey\u0026#34;); CorrelationContext configuration descriptions could be found in the agent configuration documentation, with correlation. as the prefix. Sample codes only\n","excerpt":"Trace Correlation Context Trace correlation context APIs provide a way to put custom data in tracing …","ref":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/application-toolkit-trace-correlation-context/","title":"Trace Correlation Context"},{"body":"Trace Correlation Context Trace correlation context APIs provide a way to put custom data in tracing context. All the data in the context will be propagated with the in-wire process automatically.\n Use TraceContext.putCorrelation() API to put custom data in tracing context.  Optional\u0026lt;String\u0026gt; previous = TraceContext.putCorrelation(\u0026#34;customKey\u0026#34;, \u0026#34;customValue\u0026#34;); CorrelationContext will remove the item when the value is null or empty.\n Use TraceContext.getCorrelation() API to get custom data.  Optional\u0026lt;String\u0026gt; value = TraceContext.getCorrelation(\u0026#34;customKey\u0026#34;); CorrelationContext configuration descriptions could be found in the agent configuration documentation, with correlation. as the prefix. Sample codes only\n","excerpt":"Trace Correlation Context Trace correlation context APIs provide a way to put custom data in tracing …","ref":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/application-toolkit-trace-correlation-context/","title":"Trace Correlation Context"},{"body":"Trace Cross Thread These APIs provide ways to continuous tracing in the cross thread scenario with minimal code changes. All following are sample codes only to demonstrate how to adopt cross thread cases easier.\n Case 1.  @TraceCrossThread public static class MyCallable\u0026lt;String\u0026gt; implements Callable\u0026lt;String\u0026gt; { @Override public String call() throws Exception { return null; } } ... ExecutorService executorService = Executors.newFixedThreadPool(1); executorService.submit(new MyCallable());  Case 2.  ExecutorService executorService = Executors.newFixedThreadPool(1); executorService.submit(CallableWrapper.of(new Callable\u0026lt;String\u0026gt;() { @Override public String call() throws Exception { return null; } })); or\nExecutorService executorService = Executors.newFixedThreadPool(1); executorService.execute(RunnableWrapper.of(new Runnable() { @Override public void run() { //your code  } }));  Case 3.  @TraceCrossThread public class MySupplier\u0026lt;String\u0026gt; implements Supplier\u0026lt;String\u0026gt; { @Override public String get() { return null; } } ... CompletableFuture.supplyAsync(new MySupplier\u0026lt;String\u0026gt;()); or\nCompletableFuture.supplyAsync(SupplierWrapper.of(()-\u0026gt;{ return \u0026#34;SupplierWrapper\u0026#34;; })).thenAccept(System.out::println);  Case 4.  CompletableFuture.supplyAsync(SupplierWrapper.of(() -\u0026gt; { return \u0026#34;SupplierWrapper\u0026#34;; })).thenAcceptAsync(ConsumerWrapper.of(c -\u0026gt; { // your code visit(url)  System.out.println(\u0026#34;ConsumerWrapper\u0026#34;); })); or\nCompletableFuture.supplyAsync(SupplierWrapper.of(() -\u0026gt; { return \u0026#34;SupplierWrapper\u0026#34;; })).thenApplyAsync(FunctionWrapper.of(f -\u0026gt; { // your code visit(url)  return \u0026#34;FunctionWrapper\u0026#34;; })); ","excerpt":"Trace Cross Thread These APIs provide ways to continuous tracing in the cross thread scenario with …","ref":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/application-toolkit-trace-cross-thread/","title":"Trace Cross Thread"},{"body":"Trace Cross Thread These APIs provide ways to continuous tracing in the cross thread scenario with minimal code changes. All following are sample codes only to demonstrate how to adopt cross thread cases easier.\n Case 1.  @TraceCrossThread public static class MyCallable\u0026lt;String\u0026gt; implements Callable\u0026lt;String\u0026gt; { @Override public String call() throws Exception { return null; } } ... ExecutorService executorService = Executors.newFixedThreadPool(1); executorService.submit(new MyCallable());  Case 2.  ExecutorService executorService = Executors.newFixedThreadPool(1); executorService.submit(CallableWrapper.of(new Callable\u0026lt;String\u0026gt;() { @Override public String call() throws Exception { return null; } })); or\nExecutorService executorService = Executors.newFixedThreadPool(1); executorService.execute(RunnableWrapper.of(new Runnable() { @Override public void run() { //your code  } }));  Case 3.  @TraceCrossThread public class MySupplier\u0026lt;String\u0026gt; implements Supplier\u0026lt;String\u0026gt; { @Override public String get() { return null; } } ... CompletableFuture.supplyAsync(new MySupplier\u0026lt;String\u0026gt;()); or\nCompletableFuture.supplyAsync(SupplierWrapper.of(()-\u0026gt;{ return \u0026#34;SupplierWrapper\u0026#34;; })).thenAccept(System.out::println);  Case 4.  CompletableFuture.supplyAsync(SupplierWrapper.of(() -\u0026gt; { return \u0026#34;SupplierWrapper\u0026#34;; })).thenAcceptAsync(ConsumerWrapper.of(c -\u0026gt; { // your code visit(url)  System.out.println(\u0026#34;ConsumerWrapper\u0026#34;); })); or\nCompletableFuture.supplyAsync(SupplierWrapper.of(() -\u0026gt; { return \u0026#34;SupplierWrapper\u0026#34;; })).thenApplyAsync(FunctionWrapper.of(f -\u0026gt; { // your code visit(url)  return \u0026#34;FunctionWrapper\u0026#34;; })); ","excerpt":"Trace Cross Thread These APIs provide ways to continuous tracing in the cross thread scenario with …","ref":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-trace-cross-thread/","title":"Trace Cross Thread"},{"body":"Trace Cross Thread These APIs provide ways to continuous tracing in the cross thread scenario with minimal code changes. All following are sample codes only to demonstrate how to adopt cross thread cases easier.\n Case 1.  @TraceCrossThread public static class MyCallable\u0026lt;String\u0026gt; implements Callable\u0026lt;String\u0026gt; { @Override public String call() throws Exception { return null; } } ... ExecutorService executorService = Executors.newFixedThreadPool(1); executorService.submit(new MyCallable());  Case 2.  ExecutorService executorService = Executors.newFixedThreadPool(1); executorService.submit(CallableWrapper.of(new Callable\u0026lt;String\u0026gt;() { @Override public String call() throws Exception { return null; } })); or\nExecutorService executorService = Executors.newFixedThreadPool(1); executorService.execute(RunnableWrapper.of(new Runnable() { @Override public void run() { //your code  } }));  Case 3.  @TraceCrossThread public class MySupplier\u0026lt;String\u0026gt; implements Supplier\u0026lt;String\u0026gt; { @Override public String get() { return null; } } ... CompletableFuture.supplyAsync(new MySupplier\u0026lt;String\u0026gt;()); or\nCompletableFuture.supplyAsync(SupplierWrapper.of(()-\u0026gt;{ return \u0026#34;SupplierWrapper\u0026#34;; })).thenAccept(System.out::println);  Case 4.  CompletableFuture.supplyAsync(SupplierWrapper.of(() -\u0026gt; { return \u0026#34;SupplierWrapper\u0026#34;; })).thenAcceptAsync(ConsumerWrapper.of(c -\u0026gt; { // your code visit(url)  System.out.println(\u0026#34;ConsumerWrapper\u0026#34;); })); or\nCompletableFuture.supplyAsync(SupplierWrapper.of(() -\u0026gt; { return \u0026#34;SupplierWrapper\u0026#34;; })).thenApplyAsync(FunctionWrapper.of(f -\u0026gt; { // your code visit(url)  return \u0026#34;FunctionWrapper\u0026#34;; })); ","excerpt":"Trace Cross Thread These APIs provide ways to continuous tracing in the cross thread scenario with …","ref":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/application-toolkit-trace-cross-thread/","title":"Trace Cross Thread"},{"body":"Trace Cross Thread These APIs provide ways to continuous tracing in the cross thread scenario with minimal code changes. All following are sample codes only to demonstrate how to adopt cross thread cases easier.\n Case 1.  @TraceCrossThread public static class MyCallable\u0026lt;String\u0026gt; implements Callable\u0026lt;String\u0026gt; { @Override public String call() throws Exception { return null; } } ... ExecutorService executorService = Executors.newFixedThreadPool(1); executorService.submit(new MyCallable());  Case 2.  ExecutorService executorService = Executors.newFixedThreadPool(1); executorService.submit(CallableWrapper.of(new Callable\u0026lt;String\u0026gt;() { @Override public String call() throws Exception { return null; } })); or\nExecutorService executorService = Executors.newFixedThreadPool(1); executorService.execute(RunnableWrapper.of(new Runnable() { @Override public void run() { //your code  } }));  Case 3.  @TraceCrossThread public class MySupplier\u0026lt;String\u0026gt; implements Supplier\u0026lt;String\u0026gt; { @Override public String get() { return null; } } ... CompletableFuture.supplyAsync(new MySupplier\u0026lt;String\u0026gt;()); or\nCompletableFuture.supplyAsync(SupplierWrapper.of(()-\u0026gt;{ return \u0026#34;SupplierWrapper\u0026#34;; })).thenAccept(System.out::println);  Case 4.  CompletableFuture.supplyAsync(SupplierWrapper.of(() -\u0026gt; { return \u0026#34;SupplierWrapper\u0026#34;; })).thenAcceptAsync(ConsumerWrapper.of(c -\u0026gt; { // your code visit(url)  System.out.println(\u0026#34;ConsumerWrapper\u0026#34;); })); or\nCompletableFuture.supplyAsync(SupplierWrapper.of(() -\u0026gt; { return \u0026#34;SupplierWrapper\u0026#34;; })).thenApplyAsync(FunctionWrapper.of(f -\u0026gt; { // your code visit(url)  return \u0026#34;FunctionWrapper\u0026#34;; })); ","excerpt":"Trace Cross Thread These APIs provide ways to continuous tracing in the cross thread scenario with …","ref":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/application-toolkit-trace-cross-thread/","title":"Trace Cross Thread"},{"body":"Trace Cross Thread These APIs provide ways to continuous tracing in the cross thread scenario with minimal code changes. All following are sample codes only to demonstrate how to adopt cross thread cases easier.\n Case 1.  @TraceCrossThread public static class MyCallable\u0026lt;String\u0026gt; implements Callable\u0026lt;String\u0026gt; { @Override public String call() throws Exception { return null; } } ... ExecutorService executorService = Executors.newFixedThreadPool(1); executorService.submit(new MyCallable());  Case 2.  ExecutorService executorService = Executors.newFixedThreadPool(1); executorService.submit(CallableWrapper.of(new Callable\u0026lt;String\u0026gt;() { @Override public String call() throws Exception { return null; } })); or\nExecutorService executorService = Executors.newFixedThreadPool(1); executorService.execute(RunnableWrapper.of(new Runnable() { @Override public void run() { //your code  } }));  Case 3.  @TraceCrossThread public class MySupplier\u0026lt;String\u0026gt; implements Supplier\u0026lt;String\u0026gt; { @Override public String get() { return null; } } ... CompletableFuture.supplyAsync(new MySupplier\u0026lt;String\u0026gt;()); or\nCompletableFuture.supplyAsync(SupplierWrapper.of(()-\u0026gt;{ return \u0026#34;SupplierWrapper\u0026#34;; })).thenAccept(System.out::println);  Case 4.  CompletableFuture.supplyAsync(SupplierWrapper.of(() -\u0026gt; { return \u0026#34;SupplierWrapper\u0026#34;; })).thenAcceptAsync(ConsumerWrapper.of(c -\u0026gt; { // your code visit(url)  System.out.println(\u0026#34;ConsumerWrapper\u0026#34;); })); or\nCompletableFuture.supplyAsync(SupplierWrapper.of(() -\u0026gt; { return \u0026#34;SupplierWrapper\u0026#34;; })).thenApplyAsync(FunctionWrapper.of(f -\u0026gt; { // your code visit(url)  return \u0026#34;FunctionWrapper\u0026#34;; })); ","excerpt":"Trace Cross Thread These APIs provide ways to continuous tracing in the cross thread scenario with …","ref":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/application-toolkit-trace-cross-thread/","title":"Trace Cross Thread"},{"body":"Trace Data Protocol  Version, v3.1  Trace Data Protocol describes the data format between SkyWalking agent/sniffer and backend.\nTrace data protocol is defined and provided in gRPC format, and implemented in HTTP 1.1.\nFor trace format, note that:\n The segment is a unique concept in SkyWalking. It should include all spans for each request in a single OS process, which is usually a single language-based thread. There are three types of spans.    EntrySpan EntrySpan represents a service provider, which is also the endpoint on the server end. As an APM system, SkyWalking targets the application servers. Therefore, almost all the services and MQ-consumers are EntrySpans.\n  LocalSpan LocalSpan represents a typical Java method which is not related to remote services. It is neither a MQ producer/consumer nor a provider/consumer of a service (e.g. HTTP service).\n  ExitSpan ExitSpan represents a client of service or MQ-producer. It is known as the LeafSpan in the early stages of SkyWalking. For example, accessing DB by JDBC, and reading Redis/Memcached are classified as ExitSpans.\n   Cross-thread/process span parent information is called \u0026ldquo;reference\u0026rdquo;. Reference carries the trace ID, segment ID, span ID, service name, service instance name, endpoint name, and target address used on the client end (note: this is not required in cross-thread operations) of this request in the parent. See Cross Process Propagation Headers Protocol v3 for more details.\n  Span#skipAnalysis may be TRUE, if this span doesn\u0026rsquo;t require backend analysis.\n  Trace Report Protocol // The segment is a collection of spans. It includes all collected spans in a simple one request context, such as a HTTP request process. // // We recommend the agent/SDK report all tracked data of one request once for all, such as, // typically, such as in Java, one segment represent all tracked operations(spans) of one request context in the same thread. // At the same time, in some language there is not a clear concept like golang, it could represent all tracked operations of one request context. message SegmentObject { // A string id represents the whole trace.  string traceId = 1; // A unique id represents this segment. Other segments could use this id to reference as a child segment.  string traceSegmentId = 2; // Span collections included in this segment.  repeated SpanObject spans = 3; // **Service**. Represents a set/group of workloads which provide the same behaviours for incoming requests.  //  // The logic name represents the service. This would show as a separate node in the topology.  // The metrics analyzed from the spans, would be aggregated for this entity as the service level.  string service = 4; // **Service Instance**. Each individual workload in the Service group is known as an instance. Like `pods` in Kubernetes, it  // doesn\u0026#39;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process.  //  // The logic name represents the service instance. This would show as a separate node in the instance relationship.  // The metrics analyzed from the spans, would be aggregated for this entity as the service instance level.  string serviceInstance = 5; // Whether the segment includes all tracked spans.  // In the production environment tracked, some tasks could include too many spans for one request context, such as a batch update for a cache, or an async job.  // The agent/SDK could optimize or ignore some tracked spans for better performance.  // In this case, the value should be flagged as TRUE.  bool isSizeLimited = 6;}// Segment reference represents the link between two existing segment. message SegmentReference { // Represent the reference type. It could be across thread or across process.  // Across process means there is a downstream RPC call for this.  // Typically, refType == CrossProcess means SpanObject#spanType = entry.  RefType refType = 1; // A string id represents the whole trace.  string traceId = 2; // Another segment id as the parent.  string parentTraceSegmentId = 3; // The span id in the parent trace segment.  int32 parentSpanId = 4; // The service logic name of the parent segment.  // If refType == CrossThread, this name is as same as the trace segment.  string parentService = 5; // The service logic name instance of the parent segment.  // If refType == CrossThread, this name is as same as the trace segment.  string parentServiceInstance = 6; // The endpoint name of the parent segment.  // **Endpoint**. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature.  // In a trace segment, the endpoint name is the name of first entry span.  string parentEndpoint = 7; // The network address, including ip/hostname and port, which is used in the client side.  // Such as Client --\u0026gt; use 127.0.11.8:913 -\u0026gt; Server  // then, in the reference of entry span reported by Server, the value of this field is 127.0.11.8:913.  // This plays the important role in the SkyWalking STAM(Streaming Topology Analysis Method)  // For more details, read https://wu-sheng.github.io/STAM/  string networkAddressUsedAtPeer = 8;}// Span represents a execution unit in the system, with duration and many other attributes. // Span could be a method, a RPC, MQ message produce or consume. // In the practice, the span should be added when it is really necessary, to avoid payload overhead. // We recommend to creating spans in across process(client/server of RPC/MQ) and across thread cases only. message SpanObject { // The number id of the span. Should be unique in the whole segment.  // Starting at 0.  int32 spanId = 1; // The number id of the parent span in the whole segment.  // -1 represents no parent span.  // Also, be known as the root/first span of the segment.  int32 parentSpanId = 2; // Start timestamp in milliseconds of this span,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 startTime = 3; // End timestamp in milliseconds of this span,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 endTime = 4; // \u0026lt;Optional\u0026gt;  // In the across thread and across process, these references targeting the parent segments.  // The references usually have only one element, but in batch consumer case, such as in MQ or async batch process, it could be multiple.  repeated SegmentReference refs = 5; // A logic name represents this span.  //  // We don\u0026#39;t recommend to include the parameter, such as HTTP request parameters, as a part of the operation, especially this is the name of the entry span.  // All statistic for the endpoints are aggregated base on this name. Those parameters should be added in the tags if necessary.  // If in some cases, it have to be a part of the operation name,  // users should use the Group Parameterized Endpoints capability at the backend to get the meaningful metrics.  // Read https://github.com/apache/skywalking/blob/master/docs/en/setup/backend/endpoint-grouping-rules.md  string operationName = 6; // Remote address of the peer in RPC/MQ case.  // This is required when spanType = Exit, as it is a part of the SkyWalking STAM(Streaming Topology Analysis Method).  // For more details, read https://wu-sheng.github.io/STAM/  string peer = 7; // Span type represents the role in the RPC context.  SpanType spanType = 8; // Span layer represent the component tech stack, related to the network tech.  SpanLayer spanLayer = 9; // Component id is a predefined number id in the SkyWalking.  // It represents the framework, tech stack used by this tracked span, such as Spring.  // All IDs are defined in the https://github.com/apache/skywalking/blob/master/oap-server/server-bootstrap/src/main/resources/component-libraries.yml  // Send a pull request if you want to add languages, components or mapping definitions,  // all public components could be accepted.  // Follow this doc for more details, https://github.com/apache/skywalking/blob/master/docs/en/guides/Component-library-settings.md  int32 componentId = 10; // The status of the span. False means the tracked execution ends in the unexpected status.  // This affects the successful rate statistic in the backend.  // Exception or error code happened in the tracked process doesn\u0026#39;t mean isError == true, the implementations of agent plugin and tracing SDK make the final decision.  bool isError = 11; // String key, String value pair.  // Tags provides more information, includes parameters.  //  // In the OAP backend analysis, some special tag or tag combination could provide other advanced features.  // https://github.com/apache/skywalking/blob/master/docs/en/guides/Java-Plugin-Development-Guide.md#special-span-tags  repeated KeyStringValuePair tags = 12; // String key, String value pair with an accurate timestamp.  // Logging some events happening in the context of the span duration.  repeated Log logs = 13; // Force the backend don\u0026#39;t do analysis, if the value is TRUE.  // The backend has its own configurations to follow or override this.  //  // Use this mostly because the agent/SDK could know more context of the service role.  bool skipAnalysis = 14;}message Log { // The timestamp in milliseconds of this event.,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 time = 1; // String key, String value pair.  repeated KeyStringValuePair data = 2;}// Map to the type of span enum SpanType { // Server side of RPC. Consumer side of MQ.  Entry = 0; // Client side of RPC. Producer side of MQ.  Exit = 1; // A common local code execution.  Local = 2;}// A ID could be represented by multiple string sections. message ID { repeated string id = 1;}// Type of the reference enum RefType { // Map to the reference targeting the segment in another OS process.  CrossProcess = 0; // Map to the reference targeting the segment in the same process of the current one, just across thread.  // This is only used when the coding language has the thread concept.  CrossThread = 1;}// Map to the layer of span enum SpanLayer { // Unknown layer. Could be anything.  Unknown = 0; // A database layer, used in tracing the database client component.  Database = 1; // A RPC layer, used in both client and server sides of RPC component.  RPCFramework = 2; // HTTP is a more specific RPCFramework.  Http = 3; // A MQ layer, used in both producer and consumer sides of the MQ component.  MQ = 4; // A cache layer, used in tracing the cache client component.  Cache = 5;}// The segment collections for trace report in batch and sync mode. message SegmentCollection { repeated SegmentObject segments = 1;}Report Span Attached Events Besides in-process agents, there are other out-of-process agent, such as ebpf agent, could report additional information as attached events for the relative spans.\nSpanAttachedEventReportService#collect for attached event reporting.\n//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // ebpf agent(SkyWalking Rover) collects extra information from the OS(Linux Only) level to attach on the traced span. // Since v3.1 //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// service SpanAttachedEventReportService { // Collect SpanAttachedEvent to the OAP server in the streaming mode.  rpc collect (stream SpanAttachedEvent) returns (Commands) { }}// SpanAttachedEvent represents an attached event for a traced RPC. // // When an RPC is being traced by the in-process language agent, a span would be reported by the client-side agent. // And the rover would be aware of this RPC due to the existing tracing header. // Then, the rover agent collects extra information from the OS level to provide assistance information to diagnose network performance. message SpanAttachedEvent { // The nanosecond timestamp of the event\u0026#39;s start time.  // Notice, most unit of timestamp in SkyWalking is milliseconds, but NANO-SECOND is required here.  // Because the attached event happens in the OS syscall level, most of them are executed rapidly.  Instant startTime = 1; // The official event name.  // For example, the event name is a method signature from syscall stack.  string event = 2; // [Optional] The nanosecond timestamp of the event\u0026#39;s end time.  Instant endTime = 3; // The tags for this event includes some extra OS level information,  // such as  // 1. net_device used for this exit span.  // 2. network L7 protocol  repeated KeyStringValuePair tags = 4; // The summary of statistics during this event.  // Each statistic provides a name(metric name) to represent the name, and an int64/long as the value.  repeated KeyIntValuePair summary = 5; // Refer to a trace context decoded from `sw8` header through network, such as HTTP header, MQ metadata  // https://skywalking.apache.org/docs/main/next/en/protocols/skywalking-cross-process-propagation-headers-protocol-v3/#standard-header-item  SpanReference traceContext = 6; message SpanReference { SpanReferenceType type = 1; // [Optional] A string id represents the whole trace.  string traceId = 2; // A unique id represents this segment. Other segments could use this id to reference as a child segment.  // [Optional] when this span reference  string traceSegmentId = 3; // If type == SKYWALKING  // The number id of the span. Should be unique in the whole segment.  // Starting at 0  //  // If type == ZIPKIN  // The type of span ID is string.  string spanId = 4; } enum SpanReferenceType { SKYWALKING = 0; ZIPKIN = 1; }}Via HTTP Endpoint Detailed information about data format can be found in Instance Management. There are two ways to report segment data: one segment per request or segment array in bulk mode.\nPOST http://localhost:12800/v3/segment Send a single segment object in JSON format.\nInput:\n{ \u0026#34;traceId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34; } OutPut:\nPOST http://localhost:12800/v3/segments Send a segment object list in JSON format.\nInput:\n[{ \u0026#34;traceId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34; }, { \u0026#34;traceId\u0026#34;: \u0026#34;f956699e-5106-4ea3-95e5-da748c55bac1\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577250, \u0026#34;endTime\u0026#34;: 1588664577250, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577250, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577250, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;f956699e-5106-4ea3-95e5-da748c55bac1\u0026#34; }] OutPut:\n","excerpt":"Trace Data Protocol  Version, v3.1  Trace Data Protocol describes the data format between SkyWalking …","ref":"/docs/main/latest/en/api/trace-data-protocol-v3/","title":"Trace Data Protocol"},{"body":"Trace Data Protocol  Version, v3.1  Trace Data Protocol describes the data format between SkyWalking agent/sniffer and backend.\nTrace data protocol is defined and provided in gRPC format, and implemented in HTTP 1.1.\nFor trace format, note that:\n The segment is a unique concept in SkyWalking. It should include all spans for each request in a single OS process, which is usually a single language-based thread. There are three types of spans.    EntrySpan EntrySpan represents a service provider, which is also the endpoint on the server end. As an APM system, SkyWalking targets the application servers. Therefore, almost all the services and MQ-consumers are EntrySpans.\n  LocalSpan LocalSpan represents a typical Java method which is not related to remote services. It is neither a MQ producer/consumer nor a provider/consumer of a service (e.g. HTTP service).\n  ExitSpan ExitSpan represents a client of service or MQ-producer. It is known as the LeafSpan in the early stages of SkyWalking. For example, accessing DB by JDBC, and reading Redis/Memcached are classified as ExitSpans.\n   Cross-thread/process span parent information is called \u0026ldquo;reference\u0026rdquo;. Reference carries the trace ID, segment ID, span ID, service name, service instance name, endpoint name, and target address used on the client end (note: this is not required in cross-thread operations) of this request in the parent. See Cross Process Propagation Headers Protocol v3 for more details.\n  Span#skipAnalysis may be TRUE, if this span doesn\u0026rsquo;t require backend analysis.\n  Trace Report Protocol // The segment is a collection of spans. It includes all collected spans in a simple one request context, such as a HTTP request process. // // We recommend the agent/SDK report all tracked data of one request once for all, such as, // typically, such as in Java, one segment represent all tracked operations(spans) of one request context in the same thread. // At the same time, in some language there is not a clear concept like golang, it could represent all tracked operations of one request context. message SegmentObject { // A string id represents the whole trace.  string traceId = 1; // A unique id represents this segment. Other segments could use this id to reference as a child segment.  string traceSegmentId = 2; // Span collections included in this segment.  repeated SpanObject spans = 3; // **Service**. Represents a set/group of workloads which provide the same behaviours for incoming requests.  //  // The logic name represents the service. This would show as a separate node in the topology.  // The metrics analyzed from the spans, would be aggregated for this entity as the service level.  string service = 4; // **Service Instance**. Each individual workload in the Service group is known as an instance. Like `pods` in Kubernetes, it  // doesn\u0026#39;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process.  //  // The logic name represents the service instance. This would show as a separate node in the instance relationship.  // The metrics analyzed from the spans, would be aggregated for this entity as the service instance level.  string serviceInstance = 5; // Whether the segment includes all tracked spans.  // In the production environment tracked, some tasks could include too many spans for one request context, such as a batch update for a cache, or an async job.  // The agent/SDK could optimize or ignore some tracked spans for better performance.  // In this case, the value should be flagged as TRUE.  bool isSizeLimited = 6;}// Segment reference represents the link between two existing segment. message SegmentReference { // Represent the reference type. It could be across thread or across process.  // Across process means there is a downstream RPC call for this.  // Typically, refType == CrossProcess means SpanObject#spanType = entry.  RefType refType = 1; // A string id represents the whole trace.  string traceId = 2; // Another segment id as the parent.  string parentTraceSegmentId = 3; // The span id in the parent trace segment.  int32 parentSpanId = 4; // The service logic name of the parent segment.  // If refType == CrossThread, this name is as same as the trace segment.  string parentService = 5; // The service logic name instance of the parent segment.  // If refType == CrossThread, this name is as same as the trace segment.  string parentServiceInstance = 6; // The endpoint name of the parent segment.  // **Endpoint**. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature.  // In a trace segment, the endpoint name is the name of first entry span.  string parentEndpoint = 7; // The network address, including ip/hostname and port, which is used in the client side.  // Such as Client --\u0026gt; use 127.0.11.8:913 -\u0026gt; Server  // then, in the reference of entry span reported by Server, the value of this field is 127.0.11.8:913.  // This plays the important role in the SkyWalking STAM(Streaming Topology Analysis Method)  // For more details, read https://wu-sheng.github.io/STAM/  string networkAddressUsedAtPeer = 8;}// Span represents a execution unit in the system, with duration and many other attributes. // Span could be a method, a RPC, MQ message produce or consume. // In the practice, the span should be added when it is really necessary, to avoid payload overhead. // We recommend to creating spans in across process(client/server of RPC/MQ) and across thread cases only. message SpanObject { // The number id of the span. Should be unique in the whole segment.  // Starting at 0.  int32 spanId = 1; // The number id of the parent span in the whole segment.  // -1 represents no parent span.  // Also, be known as the root/first span of the segment.  int32 parentSpanId = 2; // Start timestamp in milliseconds of this span,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 startTime = 3; // End timestamp in milliseconds of this span,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 endTime = 4; // \u0026lt;Optional\u0026gt;  // In the across thread and across process, these references targeting the parent segments.  // The references usually have only one element, but in batch consumer case, such as in MQ or async batch process, it could be multiple.  repeated SegmentReference refs = 5; // A logic name represents this span.  //  // We don\u0026#39;t recommend to include the parameter, such as HTTP request parameters, as a part of the operation, especially this is the name of the entry span.  // All statistic for the endpoints are aggregated base on this name. Those parameters should be added in the tags if necessary.  // If in some cases, it have to be a part of the operation name,  // users should use the Group Parameterized Endpoints capability at the backend to get the meaningful metrics.  // Read https://github.com/apache/skywalking/blob/master/docs/en/setup/backend/endpoint-grouping-rules.md  string operationName = 6; // Remote address of the peer in RPC/MQ case.  // This is required when spanType = Exit, as it is a part of the SkyWalking STAM(Streaming Topology Analysis Method).  // For more details, read https://wu-sheng.github.io/STAM/  string peer = 7; // Span type represents the role in the RPC context.  SpanType spanType = 8; // Span layer represent the component tech stack, related to the network tech.  SpanLayer spanLayer = 9; // Component id is a predefined number id in the SkyWalking.  // It represents the framework, tech stack used by this tracked span, such as Spring.  // All IDs are defined in the https://github.com/apache/skywalking/blob/master/oap-server/server-bootstrap/src/main/resources/component-libraries.yml  // Send a pull request if you want to add languages, components or mapping definitions,  // all public components could be accepted.  // Follow this doc for more details, https://github.com/apache/skywalking/blob/master/docs/en/guides/Component-library-settings.md  int32 componentId = 10; // The status of the span. False means the tracked execution ends in the unexpected status.  // This affects the successful rate statistic in the backend.  // Exception or error code happened in the tracked process doesn\u0026#39;t mean isError == true, the implementations of agent plugin and tracing SDK make the final decision.  bool isError = 11; // String key, String value pair.  // Tags provides more information, includes parameters.  //  // In the OAP backend analysis, some special tag or tag combination could provide other advanced features.  // https://github.com/apache/skywalking/blob/master/docs/en/guides/Java-Plugin-Development-Guide.md#special-span-tags  repeated KeyStringValuePair tags = 12; // String key, String value pair with an accurate timestamp.  // Logging some events happening in the context of the span duration.  repeated Log logs = 13; // Force the backend don\u0026#39;t do analysis, if the value is TRUE.  // The backend has its own configurations to follow or override this.  //  // Use this mostly because the agent/SDK could know more context of the service role.  bool skipAnalysis = 14;}message Log { // The timestamp in milliseconds of this event.,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 time = 1; // String key, String value pair.  repeated KeyStringValuePair data = 2;}// Map to the type of span enum SpanType { // Server side of RPC. Consumer side of MQ.  Entry = 0; // Client side of RPC. Producer side of MQ.  Exit = 1; // A common local code execution.  Local = 2;}// A ID could be represented by multiple string sections. message ID { repeated string id = 1;}// Type of the reference enum RefType { // Map to the reference targeting the segment in another OS process.  CrossProcess = 0; // Map to the reference targeting the segment in the same process of the current one, just across thread.  // This is only used when the coding language has the thread concept.  CrossThread = 1;}// Map to the layer of span enum SpanLayer { // Unknown layer. Could be anything.  Unknown = 0; // A database layer, used in tracing the database client component.  Database = 1; // A RPC layer, used in both client and server sides of RPC component.  RPCFramework = 2; // HTTP is a more specific RPCFramework.  Http = 3; // A MQ layer, used in both producer and consumer sides of the MQ component.  MQ = 4; // A cache layer, used in tracing the cache client component.  Cache = 5;}// The segment collections for trace report in batch and sync mode. message SegmentCollection { repeated SegmentObject segments = 1;}Report Span Attached Events Besides in-process agents, there are other out-of-process agent, such as ebpf agent, could report additional information as attached events for the relative spans.\nSpanAttachedEventReportService#collect for attached event reporting.\n//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // ebpf agent(SkyWalking Rover) collects extra information from the OS(Linux Only) level to attach on the traced span. // Since v3.1 //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// service SpanAttachedEventReportService { // Collect SpanAttachedEvent to the OAP server in the streaming mode.  rpc collect (stream SpanAttachedEvent) returns (Commands) { }}// SpanAttachedEvent represents an attached event for a traced RPC. // // When an RPC is being traced by the in-process language agent, a span would be reported by the client-side agent. // And the rover would be aware of this RPC due to the existing tracing header. // Then, the rover agent collects extra information from the OS level to provide assistance information to diagnose network performance. message SpanAttachedEvent { // The nanosecond timestamp of the event\u0026#39;s start time.  // Notice, most unit of timestamp in SkyWalking is milliseconds, but NANO-SECOND is required here.  // Because the attached event happens in the OS syscall level, most of them are executed rapidly.  Instant startTime = 1; // The official event name.  // For example, the event name is a method signature from syscall stack.  string event = 2; // [Optional] The nanosecond timestamp of the event\u0026#39;s end time.  Instant endTime = 3; // The tags for this event includes some extra OS level information,  // such as  // 1. net_device used for this exit span.  // 2. network L7 protocol  repeated KeyStringValuePair tags = 4; // The summary of statistics during this event.  // Each statistic provides a name(metric name) to represent the name, and an int64/long as the value.  repeated KeyIntValuePair summary = 5; // Refer to a trace context decoded from `sw8` header through network, such as HTTP header, MQ metadata  // https://skywalking.apache.org/docs/main/next/en/protocols/skywalking-cross-process-propagation-headers-protocol-v3/#standard-header-item  SpanReference traceContext = 6; message SpanReference { SpanReferenceType type = 1; // [Optional] A string id represents the whole trace.  string traceId = 2; // A unique id represents this segment. Other segments could use this id to reference as a child segment.  // [Optional] when this span reference  string traceSegmentId = 3; // If type == SKYWALKING  // The number id of the span. Should be unique in the whole segment.  // Starting at 0  //  // If type == ZIPKIN  // The type of span ID is string.  string spanId = 4; } enum SpanReferenceType { SKYWALKING = 0; ZIPKIN = 1; }}Via HTTP Endpoint Detailed information about data format can be found in Instance Management. There are two ways to report segment data: one segment per request or segment array in bulk mode.\nPOST http://localhost:12800/v3/segment Send a single segment object in JSON format.\nInput:\n{ \u0026#34;traceId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34; } OutPut:\nPOST http://localhost:12800/v3/segments Send a segment object list in JSON format.\nInput:\n[{ \u0026#34;traceId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34; }, { \u0026#34;traceId\u0026#34;: \u0026#34;f956699e-5106-4ea3-95e5-da748c55bac1\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577250, \u0026#34;endTime\u0026#34;: 1588664577250, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577250, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577250, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;f956699e-5106-4ea3-95e5-da748c55bac1\u0026#34; }] OutPut:\n","excerpt":"Trace Data Protocol  Version, v3.1  Trace Data Protocol describes the data format between SkyWalking …","ref":"/docs/main/next/en/api/trace-data-protocol-v3/","title":"Trace Data Protocol"},{"body":"Trace Data Protocol  Version, v3.1  Trace Data Protocol describes the data format between SkyWalking agent/sniffer and backend.\nTrace data protocol is defined and provided in gRPC format, and implemented in HTTP 1.1.\nFor trace format, note that:\n The segment is a unique concept in SkyWalking. It should include all spans for each request in a single OS process, which is usually a single language-based thread. There are three types of spans.    EntrySpan EntrySpan represents a service provider, which is also the endpoint on the server end. As an APM system, SkyWalking targets the application servers. Therefore, almost all the services and MQ-consumers are EntrySpans.\n  LocalSpan LocalSpan represents a typical Java method which is not related to remote services. It is neither a MQ producer/consumer nor a provider/consumer of a service (e.g. HTTP service).\n  ExitSpan ExitSpan represents a client of service or MQ-producer. It is known as the LeafSpan in the early stages of SkyWalking. For example, accessing DB by JDBC, and reading Redis/Memcached are classified as ExitSpans.\n   Cross-thread/process span parent information is called \u0026ldquo;reference\u0026rdquo;. Reference carries the trace ID, segment ID, span ID, service name, service instance name, endpoint name, and target address used on the client end (note: this is not required in cross-thread operations) of this request in the parent. See Cross Process Propagation Headers Protocol v3 for more details.\n  Span#skipAnalysis may be TRUE, if this span doesn\u0026rsquo;t require backend analysis.\n  Trace Report Protocol // The segment is a collection of spans. It includes all collected spans in a simple one request context, such as a HTTP request process. // // We recommend the agent/SDK report all tracked data of one request once for all, such as, // typically, such as in Java, one segment represent all tracked operations(spans) of one request context in the same thread. // At the same time, in some language there is not a clear concept like golang, it could represent all tracked operations of one request context. message SegmentObject { // A string id represents the whole trace.  string traceId = 1; // A unique id represents this segment. Other segments could use this id to reference as a child segment.  string traceSegmentId = 2; // Span collections included in this segment.  repeated SpanObject spans = 3; // **Service**. Represents a set/group of workloads which provide the same behaviours for incoming requests.  //  // The logic name represents the service. This would show as a separate node in the topology.  // The metrics analyzed from the spans, would be aggregated for this entity as the service level.  string service = 4; // **Service Instance**. Each individual workload in the Service group is known as an instance. Like `pods` in Kubernetes, it  // doesn\u0026#39;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process.  //  // The logic name represents the service instance. This would show as a separate node in the instance relationship.  // The metrics analyzed from the spans, would be aggregated for this entity as the service instance level.  string serviceInstance = 5; // Whether the segment includes all tracked spans.  // In the production environment tracked, some tasks could include too many spans for one request context, such as a batch update for a cache, or an async job.  // The agent/SDK could optimize or ignore some tracked spans for better performance.  // In this case, the value should be flagged as TRUE.  bool isSizeLimited = 6;}// Segment reference represents the link between two existing segment. message SegmentReference { // Represent the reference type. It could be across thread or across process.  // Across process means there is a downstream RPC call for this.  // Typically, refType == CrossProcess means SpanObject#spanType = entry.  RefType refType = 1; // A string id represents the whole trace.  string traceId = 2; // Another segment id as the parent.  string parentTraceSegmentId = 3; // The span id in the parent trace segment.  int32 parentSpanId = 4; // The service logic name of the parent segment.  // If refType == CrossThread, this name is as same as the trace segment.  string parentService = 5; // The service logic name instance of the parent segment.  // If refType == CrossThread, this name is as same as the trace segment.  string parentServiceInstance = 6; // The endpoint name of the parent segment.  // **Endpoint**. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature.  // In a trace segment, the endpoint name is the name of first entry span.  string parentEndpoint = 7; // The network address, including ip/hostname and port, which is used in the client side.  // Such as Client --\u0026gt; use 127.0.11.8:913 -\u0026gt; Server  // then, in the reference of entry span reported by Server, the value of this field is 127.0.11.8:913.  // This plays the important role in the SkyWalking STAM(Streaming Topology Analysis Method)  // For more details, read https://wu-sheng.github.io/STAM/  string networkAddressUsedAtPeer = 8;}// Span represents a execution unit in the system, with duration and many other attributes. // Span could be a method, a RPC, MQ message produce or consume. // In the practice, the span should be added when it is really necessary, to avoid payload overhead. // We recommend to creating spans in across process(client/server of RPC/MQ) and across thread cases only. message SpanObject { // The number id of the span. Should be unique in the whole segment.  // Starting at 0.  int32 spanId = 1; // The number id of the parent span in the whole segment.  // -1 represents no parent span.  // Also, be known as the root/first span of the segment.  int32 parentSpanId = 2; // Start timestamp in milliseconds of this span,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 startTime = 3; // End timestamp in milliseconds of this span,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 endTime = 4; // \u0026lt;Optional\u0026gt;  // In the across thread and across process, these references targeting the parent segments.  // The references usually have only one element, but in batch consumer case, such as in MQ or async batch process, it could be multiple.  repeated SegmentReference refs = 5; // A logic name represents this span.  //  // We don\u0026#39;t recommend to include the parameter, such as HTTP request parameters, as a part of the operation, especially this is the name of the entry span.  // All statistic for the endpoints are aggregated base on this name. Those parameters should be added in the tags if necessary.  // If in some cases, it have to be a part of the operation name,  // users should use the Group Parameterized Endpoints capability at the backend to get the meaningful metrics.  // Read https://github.com/apache/skywalking/blob/master/docs/en/setup/backend/endpoint-grouping-rules.md  string operationName = 6; // Remote address of the peer in RPC/MQ case.  // This is required when spanType = Exit, as it is a part of the SkyWalking STAM(Streaming Topology Analysis Method).  // For more details, read https://wu-sheng.github.io/STAM/  string peer = 7; // Span type represents the role in the RPC context.  SpanType spanType = 8; // Span layer represent the component tech stack, related to the network tech.  SpanLayer spanLayer = 9; // Component id is a predefined number id in the SkyWalking.  // It represents the framework, tech stack used by this tracked span, such as Spring.  // All IDs are defined in the https://github.com/apache/skywalking/blob/master/oap-server/server-bootstrap/src/main/resources/component-libraries.yml  // Send a pull request if you want to add languages, components or mapping definitions,  // all public components could be accepted.  // Follow this doc for more details, https://github.com/apache/skywalking/blob/master/docs/en/guides/Component-library-settings.md  int32 componentId = 10; // The status of the span. False means the tracked execution ends in the unexpected status.  // This affects the successful rate statistic in the backend.  // Exception or error code happened in the tracked process doesn\u0026#39;t mean isError == true, the implementations of agent plugin and tracing SDK make the final decision.  bool isError = 11; // String key, String value pair.  // Tags provides more information, includes parameters.  //  // In the OAP backend analysis, some special tag or tag combination could provide other advanced features.  // https://github.com/apache/skywalking/blob/master/docs/en/guides/Java-Plugin-Development-Guide.md#special-span-tags  repeated KeyStringValuePair tags = 12; // String key, String value pair with an accurate timestamp.  // Logging some events happening in the context of the span duration.  repeated Log logs = 13; // Force the backend don\u0026#39;t do analysis, if the value is TRUE.  // The backend has its own configurations to follow or override this.  //  // Use this mostly because the agent/SDK could know more context of the service role.  bool skipAnalysis = 14;}message Log { // The timestamp in milliseconds of this event.,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 time = 1; // String key, String value pair.  repeated KeyStringValuePair data = 2;}// Map to the type of span enum SpanType { // Server side of RPC. Consumer side of MQ.  Entry = 0; // Client side of RPC. Producer side of MQ.  Exit = 1; // A common local code execution.  Local = 2;}// A ID could be represented by multiple string sections. message ID { repeated string id = 1;}// Type of the reference enum RefType { // Map to the reference targeting the segment in another OS process.  CrossProcess = 0; // Map to the reference targeting the segment in the same process of the current one, just across thread.  // This is only used when the coding language has the thread concept.  CrossThread = 1;}// Map to the layer of span enum SpanLayer { // Unknown layer. Could be anything.  Unknown = 0; // A database layer, used in tracing the database client component.  Database = 1; // A RPC layer, used in both client and server sides of RPC component.  RPCFramework = 2; // HTTP is a more specific RPCFramework.  Http = 3; // A MQ layer, used in both producer and consumer sides of the MQ component.  MQ = 4; // A cache layer, used in tracing the cache client component.  Cache = 5;}// The segment collections for trace report in batch and sync mode. message SegmentCollection { repeated SegmentObject segments = 1;}Report Span Attached Events Besides in-process agents, there are other out-of-process agent, such as ebpf agent, could report additional information as attached events for the relative spans.\nSpanAttachedEventReportService#collect for attached event reporting.\n//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // ebpf agent(SkyWalking Rover) collects extra information from the OS(Linux Only) level to attach on the traced span. // Since v3.1 //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// service SpanAttachedEventReportService { // Collect SpanAttachedEvent to the OAP server in the streaming mode.  rpc collect (stream SpanAttachedEvent) returns (Commands) { }}// SpanAttachedEvent represents an attached event for a traced RPC. // // When an RPC is being traced by the in-process language agent, a span would be reported by the client-side agent. // And the rover would be aware of this RPC due to the existing tracing header. // Then, the rover agent collects extra information from the OS level to provide assistance information to diagnose network performance. message SpanAttachedEvent { // The nanosecond timestamp of the event\u0026#39;s start time.  // Notice, most unit of timestamp in SkyWalking is milliseconds, but NANO-SECOND is required here.  // Because the attached event happens in the OS syscall level, most of them are executed rapidly.  Instant startTime = 1; // The official event name.  // For example, the event name is a method signature from syscall stack.  string event = 2; // [Optional] The nanosecond timestamp of the event\u0026#39;s end time.  Instant endTime = 3; // The tags for this event includes some extra OS level information,  // such as  // 1. net_device used for this exit span.  // 2. network L7 protocol  repeated KeyStringValuePair tags = 4; // The summary of statistics during this event.  // Each statistic provides a name(metric name) to represent the name, and an int64/long as the value.  repeated KeyIntValuePair summary = 5; // Refer to a trace context decoded from `sw8` header through network, such as HTTP header, MQ metadata  // https://skywalking.apache.org/docs/main/next/en/protocols/skywalking-cross-process-propagation-headers-protocol-v3/#standard-header-item  SpanReference traceContext = 6; message SpanReference { SpanReferenceType type = 1; // [Optional] A string id represents the whole trace.  string traceId = 2; // A unique id represents this segment. Other segments could use this id to reference as a child segment.  // [Optional] when this span reference  string traceSegmentId = 3; // If type == SKYWALKING  // The number id of the span. Should be unique in the whole segment.  // Starting at 0  //  // If type == ZIPKIN  // The type of span ID is string.  string spanId = 4; } enum SpanReferenceType { SKYWALKING = 0; ZIPKIN = 1; }}Via HTTP Endpoint Detailed information about data format can be found in Instance Management. There are two ways to report segment data: one segment per request or segment array in bulk mode.\nPOST http://localhost:12800/v3/segment Send a single segment object in JSON format.\nInput:\n{ \u0026#34;traceId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34; } OutPut:\nPOST http://localhost:12800/v3/segments Send a segment object list in JSON format.\nInput:\n[{ \u0026#34;traceId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34; }, { \u0026#34;traceId\u0026#34;: \u0026#34;f956699e-5106-4ea3-95e5-da748c55bac1\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577250, \u0026#34;endTime\u0026#34;: 1588664577250, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577250, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577250, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;f956699e-5106-4ea3-95e5-da748c55bac1\u0026#34; }] OutPut:\n","excerpt":"Trace Data Protocol  Version, v3.1  Trace Data Protocol describes the data format between SkyWalking …","ref":"/docs/main/v9.4.0/en/api/trace-data-protocol-v3/","title":"Trace Data Protocol"},{"body":"Trace Data Protocol  Version, v3.1  Trace Data Protocol describes the data format between SkyWalking agent/sniffer and backend.\nTrace data protocol is defined and provided in gRPC format, and implemented in HTTP 1.1.\nFor trace format, note that:\n The segment is a unique concept in SkyWalking. It should include all spans for each request in a single OS process, which is usually a single language-based thread. There are three types of spans.    EntrySpan EntrySpan represents a service provider, which is also the endpoint on the server end. As an APM system, SkyWalking targets the application servers. Therefore, almost all the services and MQ-consumers are EntrySpans.\n  LocalSpan LocalSpan represents a typical Java method which is not related to remote services. It is neither a MQ producer/consumer nor a provider/consumer of a service (e.g. HTTP service).\n  ExitSpan ExitSpan represents a client of service or MQ-producer. It is known as the LeafSpan in the early stages of SkyWalking. For example, accessing DB by JDBC, and reading Redis/Memcached are classified as ExitSpans.\n   Cross-thread/process span parent information is called \u0026ldquo;reference\u0026rdquo;. Reference carries the trace ID, segment ID, span ID, service name, service instance name, endpoint name, and target address used on the client end (note: this is not required in cross-thread operations) of this request in the parent. See Cross Process Propagation Headers Protocol v3 for more details.\n  Span#skipAnalysis may be TRUE, if this span doesn\u0026rsquo;t require backend analysis.\n  Trace Report Protocol // The segment is a collection of spans. It includes all collected spans in a simple one request context, such as a HTTP request process. // // We recommend the agent/SDK report all tracked data of one request once for all, such as, // typically, such as in Java, one segment represent all tracked operations(spans) of one request context in the same thread. // At the same time, in some language there is not a clear concept like golang, it could represent all tracked operations of one request context. message SegmentObject { // A string id represents the whole trace.  string traceId = 1; // A unique id represents this segment. Other segments could use this id to reference as a child segment.  string traceSegmentId = 2; // Span collections included in this segment.  repeated SpanObject spans = 3; // **Service**. Represents a set/group of workloads which provide the same behaviours for incoming requests.  //  // The logic name represents the service. This would show as a separate node in the topology.  // The metrics analyzed from the spans, would be aggregated for this entity as the service level.  string service = 4; // **Service Instance**. Each individual workload in the Service group is known as an instance. Like `pods` in Kubernetes, it  // doesn\u0026#39;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process.  //  // The logic name represents the service instance. This would show as a separate node in the instance relationship.  // The metrics analyzed from the spans, would be aggregated for this entity as the service instance level.  string serviceInstance = 5; // Whether the segment includes all tracked spans.  // In the production environment tracked, some tasks could include too many spans for one request context, such as a batch update for a cache, or an async job.  // The agent/SDK could optimize or ignore some tracked spans for better performance.  // In this case, the value should be flagged as TRUE.  bool isSizeLimited = 6;}// Segment reference represents the link between two existing segment. message SegmentReference { // Represent the reference type. It could be across thread or across process.  // Across process means there is a downstream RPC call for this.  // Typically, refType == CrossProcess means SpanObject#spanType = entry.  RefType refType = 1; // A string id represents the whole trace.  string traceId = 2; // Another segment id as the parent.  string parentTraceSegmentId = 3; // The span id in the parent trace segment.  int32 parentSpanId = 4; // The service logic name of the parent segment.  // If refType == CrossThread, this name is as same as the trace segment.  string parentService = 5; // The service logic name instance of the parent segment.  // If refType == CrossThread, this name is as same as the trace segment.  string parentServiceInstance = 6; // The endpoint name of the parent segment.  // **Endpoint**. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature.  // In a trace segment, the endpoint name is the name of first entry span.  string parentEndpoint = 7; // The network address, including ip/hostname and port, which is used in the client side.  // Such as Client --\u0026gt; use 127.0.11.8:913 -\u0026gt; Server  // then, in the reference of entry span reported by Server, the value of this field is 127.0.11.8:913.  // This plays the important role in the SkyWalking STAM(Streaming Topology Analysis Method)  // For more details, read https://wu-sheng.github.io/STAM/  string networkAddressUsedAtPeer = 8;}// Span represents a execution unit in the system, with duration and many other attributes. // Span could be a method, a RPC, MQ message produce or consume. // In the practice, the span should be added when it is really necessary, to avoid payload overhead. // We recommend to creating spans in across process(client/server of RPC/MQ) and across thread cases only. message SpanObject { // The number id of the span. Should be unique in the whole segment.  // Starting at 0.  int32 spanId = 1; // The number id of the parent span in the whole segment.  // -1 represents no parent span.  // Also, be known as the root/first span of the segment.  int32 parentSpanId = 2; // Start timestamp in milliseconds of this span,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 startTime = 3; // End timestamp in milliseconds of this span,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 endTime = 4; // \u0026lt;Optional\u0026gt;  // In the across thread and across process, these references targeting the parent segments.  // The references usually have only one element, but in batch consumer case, such as in MQ or async batch process, it could be multiple.  repeated SegmentReference refs = 5; // A logic name represents this span.  //  // We don\u0026#39;t recommend to include the parameter, such as HTTP request parameters, as a part of the operation, especially this is the name of the entry span.  // All statistic for the endpoints are aggregated base on this name. Those parameters should be added in the tags if necessary.  // If in some cases, it have to be a part of the operation name,  // users should use the Group Parameterized Endpoints capability at the backend to get the meaningful metrics.  // Read https://github.com/apache/skywalking/blob/master/docs/en/setup/backend/endpoint-grouping-rules.md  string operationName = 6; // Remote address of the peer in RPC/MQ case.  // This is required when spanType = Exit, as it is a part of the SkyWalking STAM(Streaming Topology Analysis Method).  // For more details, read https://wu-sheng.github.io/STAM/  string peer = 7; // Span type represents the role in the RPC context.  SpanType spanType = 8; // Span layer represent the component tech stack, related to the network tech.  SpanLayer spanLayer = 9; // Component id is a predefined number id in the SkyWalking.  // It represents the framework, tech stack used by this tracked span, such as Spring.  // All IDs are defined in the https://github.com/apache/skywalking/blob/master/oap-server/server-bootstrap/src/main/resources/component-libraries.yml  // Send a pull request if you want to add languages, components or mapping definitions,  // all public components could be accepted.  // Follow this doc for more details, https://github.com/apache/skywalking/blob/master/docs/en/guides/Component-library-settings.md  int32 componentId = 10; // The status of the span. False means the tracked execution ends in the unexpected status.  // This affects the successful rate statistic in the backend.  // Exception or error code happened in the tracked process doesn\u0026#39;t mean isError == true, the implementations of agent plugin and tracing SDK make the final decision.  bool isError = 11; // String key, String value pair.  // Tags provides more information, includes parameters.  //  // In the OAP backend analysis, some special tag or tag combination could provide other advanced features.  // https://github.com/apache/skywalking/blob/master/docs/en/guides/Java-Plugin-Development-Guide.md#special-span-tags  repeated KeyStringValuePair tags = 12; // String key, String value pair with an accurate timestamp.  // Logging some events happening in the context of the span duration.  repeated Log logs = 13; // Force the backend don\u0026#39;t do analysis, if the value is TRUE.  // The backend has its own configurations to follow or override this.  //  // Use this mostly because the agent/SDK could know more context of the service role.  bool skipAnalysis = 14;}message Log { // The timestamp in milliseconds of this event.,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 time = 1; // String key, String value pair.  repeated KeyStringValuePair data = 2;}// Map to the type of span enum SpanType { // Server side of RPC. Consumer side of MQ.  Entry = 0; // Client side of RPC. Producer side of MQ.  Exit = 1; // A common local code execution.  Local = 2;}// A ID could be represented by multiple string sections. message ID { repeated string id = 1;}// Type of the reference enum RefType { // Map to the reference targeting the segment in another OS process.  CrossProcess = 0; // Map to the reference targeting the segment in the same process of the current one, just across thread.  // This is only used when the coding language has the thread concept.  CrossThread = 1;}// Map to the layer of span enum SpanLayer { // Unknown layer. Could be anything.  Unknown = 0; // A database layer, used in tracing the database client component.  Database = 1; // A RPC layer, used in both client and server sides of RPC component.  RPCFramework = 2; // HTTP is a more specific RPCFramework.  Http = 3; // A MQ layer, used in both producer and consumer sides of the MQ component.  MQ = 4; // A cache layer, used in tracing the cache client component.  Cache = 5;}// The segment collections for trace report in batch and sync mode. message SegmentCollection { repeated SegmentObject segments = 1;}Report Span Attached Events Besides in-process agents, there are other out-of-process agent, such as ebpf agent, could report additional information as attached events for the relative spans.\nSpanAttachedEventReportService#collect for attached event reporting.\n//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // ebpf agent(SkyWalking Rover) collects extra information from the OS(Linux Only) level to attach on the traced span. // Since v3.1 //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// service SpanAttachedEventReportService { // Collect SpanAttachedEvent to the OAP server in the streaming mode.  rpc collect (stream SpanAttachedEvent) returns (Commands) { }}// SpanAttachedEvent represents an attached event for a traced RPC. // // When an RPC is being traced by the in-process language agent, a span would be reported by the client-side agent. // And the rover would be aware of this RPC due to the existing tracing header. // Then, the rover agent collects extra information from the OS level to provide assistance information to diagnose network performance. message SpanAttachedEvent { // The nanosecond timestamp of the event\u0026#39;s start time.  // Notice, most unit of timestamp in SkyWalking is milliseconds, but NANO-SECOND is required here.  // Because the attached event happens in the OS syscall level, most of them are executed rapidly.  Instant startTime = 1; // The official event name.  // For example, the event name is a method signature from syscall stack.  string event = 2; // [Optional] The nanosecond timestamp of the event\u0026#39;s end time.  Instant endTime = 3; // The tags for this event includes some extra OS level information,  // such as  // 1. net_device used for this exit span.  // 2. network L7 protocol  repeated KeyStringValuePair tags = 4; // The summary of statistics during this event.  // Each statistic provides a name(metric name) to represent the name, and an int64/long as the value.  repeated KeyIntValuePair summary = 5; // Refer to a trace context decoded from `sw8` header through network, such as HTTP header, MQ metadata  // https://skywalking.apache.org/docs/main/next/en/protocols/skywalking-cross-process-propagation-headers-protocol-v3/#standard-header-item  SpanReference traceContext = 6; message SpanReference { SpanReferenceType type = 1; // [Optional] A string id represents the whole trace.  string traceId = 2; // A unique id represents this segment. Other segments could use this id to reference as a child segment.  // [Optional] when this span reference  string traceSegmentId = 3; // If type == SKYWALKING  // The number id of the span. Should be unique in the whole segment.  // Starting at 0  //  // If type == ZIPKIN  // The type of span ID is string.  string spanId = 4; } enum SpanReferenceType { SKYWALKING = 0; ZIPKIN = 1; }}Via HTTP Endpoint Detailed information about data format can be found in Instance Management. There are two ways to report segment data: one segment per request or segment array in bulk mode.\nPOST http://localhost:12800/v3/segment Send a single segment object in JSON format.\nInput:\n{ \u0026#34;traceId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34; } OutPut:\nPOST http://localhost:12800/v3/segments Send a segment object list in JSON format.\nInput:\n[{ \u0026#34;traceId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34; }, { \u0026#34;traceId\u0026#34;: \u0026#34;f956699e-5106-4ea3-95e5-da748c55bac1\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577250, \u0026#34;endTime\u0026#34;: 1588664577250, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577250, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577250, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;f956699e-5106-4ea3-95e5-da748c55bac1\u0026#34; }] OutPut:\n","excerpt":"Trace Data Protocol  Version, v3.1  Trace Data Protocol describes the data format between SkyWalking …","ref":"/docs/main/v9.5.0/en/api/trace-data-protocol-v3/","title":"Trace Data Protocol"},{"body":"Trace Data Protocol  Version, v3.1  Trace Data Protocol describes the data format between SkyWalking agent/sniffer and backend.\nTrace data protocol is defined and provided in gRPC format, and implemented in HTTP 1.1.\nFor trace format, note that:\n The segment is a unique concept in SkyWalking. It should include all spans for each request in a single OS process, which is usually a single language-based thread. There are three types of spans.    EntrySpan EntrySpan represents a service provider, which is also the endpoint on the server end. As an APM system, SkyWalking targets the application servers. Therefore, almost all the services and MQ-consumers are EntrySpans.\n  LocalSpan LocalSpan represents a typical Java method which is not related to remote services. It is neither a MQ producer/consumer nor a provider/consumer of a service (e.g. HTTP service).\n  ExitSpan ExitSpan represents a client of service or MQ-producer. It is known as the LeafSpan in the early stages of SkyWalking. For example, accessing DB by JDBC, and reading Redis/Memcached are classified as ExitSpans.\n   Cross-thread/process span parent information is called \u0026ldquo;reference\u0026rdquo;. Reference carries the trace ID, segment ID, span ID, service name, service instance name, endpoint name, and target address used on the client end (note: this is not required in cross-thread operations) of this request in the parent. See Cross Process Propagation Headers Protocol v3 for more details.\n  Span#skipAnalysis may be TRUE, if this span doesn\u0026rsquo;t require backend analysis.\n  Trace Report Protocol // The segment is a collection of spans. It includes all collected spans in a simple one request context, such as a HTTP request process. // // We recommend the agent/SDK report all tracked data of one request once for all, such as, // typically, such as in Java, one segment represent all tracked operations(spans) of one request context in the same thread. // At the same time, in some language there is not a clear concept like golang, it could represent all tracked operations of one request context. message SegmentObject { // A string id represents the whole trace.  string traceId = 1; // A unique id represents this segment. Other segments could use this id to reference as a child segment.  string traceSegmentId = 2; // Span collections included in this segment.  repeated SpanObject spans = 3; // **Service**. Represents a set/group of workloads which provide the same behaviours for incoming requests.  //  // The logic name represents the service. This would show as a separate node in the topology.  // The metrics analyzed from the spans, would be aggregated for this entity as the service level.  string service = 4; // **Service Instance**. Each individual workload in the Service group is known as an instance. Like `pods` in Kubernetes, it  // doesn\u0026#39;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process.  //  // The logic name represents the service instance. This would show as a separate node in the instance relationship.  // The metrics analyzed from the spans, would be aggregated for this entity as the service instance level.  string serviceInstance = 5; // Whether the segment includes all tracked spans.  // In the production environment tracked, some tasks could include too many spans for one request context, such as a batch update for a cache, or an async job.  // The agent/SDK could optimize or ignore some tracked spans for better performance.  // In this case, the value should be flagged as TRUE.  bool isSizeLimited = 6;}// Segment reference represents the link between two existing segment. message SegmentReference { // Represent the reference type. It could be across thread or across process.  // Across process means there is a downstream RPC call for this.  // Typically, refType == CrossProcess means SpanObject#spanType = entry.  RefType refType = 1; // A string id represents the whole trace.  string traceId = 2; // Another segment id as the parent.  string parentTraceSegmentId = 3; // The span id in the parent trace segment.  int32 parentSpanId = 4; // The service logic name of the parent segment.  // If refType == CrossThread, this name is as same as the trace segment.  string parentService = 5; // The service logic name instance of the parent segment.  // If refType == CrossThread, this name is as same as the trace segment.  string parentServiceInstance = 6; // The endpoint name of the parent segment.  // **Endpoint**. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature.  // In a trace segment, the endpoint name is the name of first entry span.  string parentEndpoint = 7; // The network address, including ip/hostname and port, which is used in the client side.  // Such as Client --\u0026gt; use 127.0.11.8:913 -\u0026gt; Server  // then, in the reference of entry span reported by Server, the value of this field is 127.0.11.8:913.  // This plays the important role in the SkyWalking STAM(Streaming Topology Analysis Method)  // For more details, read https://wu-sheng.github.io/STAM/  string networkAddressUsedAtPeer = 8;}// Span represents a execution unit in the system, with duration and many other attributes. // Span could be a method, a RPC, MQ message produce or consume. // In the practice, the span should be added when it is really necessary, to avoid payload overhead. // We recommend to creating spans in across process(client/server of RPC/MQ) and across thread cases only. message SpanObject { // The number id of the span. Should be unique in the whole segment.  // Starting at 0.  int32 spanId = 1; // The number id of the parent span in the whole segment.  // -1 represents no parent span.  // Also, be known as the root/first span of the segment.  int32 parentSpanId = 2; // Start timestamp in milliseconds of this span,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 startTime = 3; // End timestamp in milliseconds of this span,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 endTime = 4; // \u0026lt;Optional\u0026gt;  // In the across thread and across process, these references targeting the parent segments.  // The references usually have only one element, but in batch consumer case, such as in MQ or async batch process, it could be multiple.  repeated SegmentReference refs = 5; // A logic name represents this span.  //  // We don\u0026#39;t recommend to include the parameter, such as HTTP request parameters, as a part of the operation, especially this is the name of the entry span.  // All statistic for the endpoints are aggregated base on this name. Those parameters should be added in the tags if necessary.  // If in some cases, it have to be a part of the operation name,  // users should use the Group Parameterized Endpoints capability at the backend to get the meaningful metrics.  // Read https://github.com/apache/skywalking/blob/master/docs/en/setup/backend/endpoint-grouping-rules.md  string operationName = 6; // Remote address of the peer in RPC/MQ case.  // This is required when spanType = Exit, as it is a part of the SkyWalking STAM(Streaming Topology Analysis Method).  // For more details, read https://wu-sheng.github.io/STAM/  string peer = 7; // Span type represents the role in the RPC context.  SpanType spanType = 8; // Span layer represent the component tech stack, related to the network tech.  SpanLayer spanLayer = 9; // Component id is a predefined number id in the SkyWalking.  // It represents the framework, tech stack used by this tracked span, such as Spring.  // All IDs are defined in the https://github.com/apache/skywalking/blob/master/oap-server/server-bootstrap/src/main/resources/component-libraries.yml  // Send a pull request if you want to add languages, components or mapping definitions,  // all public components could be accepted.  // Follow this doc for more details, https://github.com/apache/skywalking/blob/master/docs/en/guides/Component-library-settings.md  int32 componentId = 10; // The status of the span. False means the tracked execution ends in the unexpected status.  // This affects the successful rate statistic in the backend.  // Exception or error code happened in the tracked process doesn\u0026#39;t mean isError == true, the implementations of agent plugin and tracing SDK make the final decision.  bool isError = 11; // String key, String value pair.  // Tags provides more information, includes parameters.  //  // In the OAP backend analysis, some special tag or tag combination could provide other advanced features.  // https://github.com/apache/skywalking/blob/master/docs/en/guides/Java-Plugin-Development-Guide.md#special-span-tags  repeated KeyStringValuePair tags = 12; // String key, String value pair with an accurate timestamp.  // Logging some events happening in the context of the span duration.  repeated Log logs = 13; // Force the backend don\u0026#39;t do analysis, if the value is TRUE.  // The backend has its own configurations to follow or override this.  //  // Use this mostly because the agent/SDK could know more context of the service role.  bool skipAnalysis = 14;}message Log { // The timestamp in milliseconds of this event.,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 time = 1; // String key, String value pair.  repeated KeyStringValuePair data = 2;}// Map to the type of span enum SpanType { // Server side of RPC. Consumer side of MQ.  Entry = 0; // Client side of RPC. Producer side of MQ.  Exit = 1; // A common local code execution.  Local = 2;}// A ID could be represented by multiple string sections. message ID { repeated string id = 1;}// Type of the reference enum RefType { // Map to the reference targeting the segment in another OS process.  CrossProcess = 0; // Map to the reference targeting the segment in the same process of the current one, just across thread.  // This is only used when the coding language has the thread concept.  CrossThread = 1;}// Map to the layer of span enum SpanLayer { // Unknown layer. Could be anything.  Unknown = 0; // A database layer, used in tracing the database client component.  Database = 1; // A RPC layer, used in both client and server sides of RPC component.  RPCFramework = 2; // HTTP is a more specific RPCFramework.  Http = 3; // A MQ layer, used in both producer and consumer sides of the MQ component.  MQ = 4; // A cache layer, used in tracing the cache client component.  Cache = 5;}// The segment collections for trace report in batch and sync mode. message SegmentCollection { repeated SegmentObject segments = 1;}Report Span Attached Events Besides in-process agents, there are other out-of-process agent, such as ebpf agent, could report additional information as attached events for the relative spans.\nSpanAttachedEventReportService#collect for attached event reporting.\n//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // ebpf agent(SkyWalking Rover) collects extra information from the OS(Linux Only) level to attach on the traced span. // Since v3.1 //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// service SpanAttachedEventReportService { // Collect SpanAttachedEvent to the OAP server in the streaming mode.  rpc collect (stream SpanAttachedEvent) returns (Commands) { }}// SpanAttachedEvent represents an attached event for a traced RPC. // // When an RPC is being traced by the in-process language agent, a span would be reported by the client-side agent. // And the rover would be aware of this RPC due to the existing tracing header. // Then, the rover agent collects extra information from the OS level to provide assistance information to diagnose network performance. message SpanAttachedEvent { // The nanosecond timestamp of the event\u0026#39;s start time.  // Notice, most unit of timestamp in SkyWalking is milliseconds, but NANO-SECOND is required here.  // Because the attached event happens in the OS syscall level, most of them are executed rapidly.  Instant startTime = 1; // The official event name.  // For example, the event name is a method signature from syscall stack.  string event = 2; // [Optional] The nanosecond timestamp of the event\u0026#39;s end time.  Instant endTime = 3; // The tags for this event includes some extra OS level information,  // such as  // 1. net_device used for this exit span.  // 2. network L7 protocol  repeated KeyStringValuePair tags = 4; // The summary of statistics during this event.  // Each statistic provides a name(metric name) to represent the name, and an int64/long as the value.  repeated KeyIntValuePair summary = 5; // Refer to a trace context decoded from `sw8` header through network, such as HTTP header, MQ metadata  // https://skywalking.apache.org/docs/main/next/en/protocols/skywalking-cross-process-propagation-headers-protocol-v3/#standard-header-item  SpanReference traceContext = 6; message SpanReference { SpanReferenceType type = 1; // [Optional] A string id represents the whole trace.  string traceId = 2; // A unique id represents this segment. Other segments could use this id to reference as a child segment.  // [Optional] when this span reference  string traceSegmentId = 3; // If type == SKYWALKING  // The number id of the span. Should be unique in the whole segment.  // Starting at 0  //  // If type == ZIPKIN  // The type of span ID is string.  string spanId = 4; } enum SpanReferenceType { SKYWALKING = 0; ZIPKIN = 1; }}Via HTTP Endpoint Detailed information about data format can be found in Instance Management. There are two ways to report segment data: one segment per request or segment array in bulk mode.\nPOST http://localhost:12800/v3/segment Send a single segment object in JSON format.\nInput:\n{ \u0026#34;traceId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34; } OutPut:\nPOST http://localhost:12800/v3/segments Send a segment object list in JSON format.\nInput:\n[{ \u0026#34;traceId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34; }, { \u0026#34;traceId\u0026#34;: \u0026#34;f956699e-5106-4ea3-95e5-da748c55bac1\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577250, \u0026#34;endTime\u0026#34;: 1588664577250, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577250, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577250, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;f956699e-5106-4ea3-95e5-da748c55bac1\u0026#34; }] OutPut:\n","excerpt":"Trace Data Protocol  Version, v3.1  Trace Data Protocol describes the data format between SkyWalking …","ref":"/docs/main/v9.6.0/en/api/trace-data-protocol-v3/","title":"Trace Data Protocol"},{"body":"Trace Data Protocol  Version, v3.1  Trace Data Protocol describes the data format between SkyWalking agent/sniffer and backend.\nTrace data protocol is defined and provided in gRPC format, and implemented in HTTP 1.1.\nFor trace format, note that:\n The segment is a unique concept in SkyWalking. It should include all spans for each request in a single OS process, which is usually a single language-based thread. There are three types of spans.    EntrySpan EntrySpan represents a service provider, which is also the endpoint on the server end. As an APM system, SkyWalking targets the application servers. Therefore, almost all the services and MQ-consumers are EntrySpans.\n  LocalSpan LocalSpan represents a typical Java method which is not related to remote services. It is neither a MQ producer/consumer nor a provider/consumer of a service (e.g. HTTP service).\n  ExitSpan ExitSpan represents a client of service or MQ-producer. It is known as the LeafSpan in the early stages of SkyWalking. For example, accessing DB by JDBC, and reading Redis/Memcached are classified as ExitSpans.\n   Cross-thread/process span parent information is called \u0026ldquo;reference\u0026rdquo;. Reference carries the trace ID, segment ID, span ID, service name, service instance name, endpoint name, and target address used on the client end (note: this is not required in cross-thread operations) of this request in the parent. See Cross Process Propagation Headers Protocol v3 for more details.\n  Span#skipAnalysis may be TRUE, if this span doesn\u0026rsquo;t require backend analysis.\n  Trace Report Protocol // The segment is a collection of spans. It includes all collected spans in a simple one request context, such as a HTTP request process. // // We recommend the agent/SDK report all tracked data of one request once for all, such as, // typically, such as in Java, one segment represent all tracked operations(spans) of one request context in the same thread. // At the same time, in some language there is not a clear concept like golang, it could represent all tracked operations of one request context. message SegmentObject { // A string id represents the whole trace.  string traceId = 1; // A unique id represents this segment. Other segments could use this id to reference as a child segment.  string traceSegmentId = 2; // Span collections included in this segment.  repeated SpanObject spans = 3; // **Service**. Represents a set/group of workloads which provide the same behaviours for incoming requests.  //  // The logic name represents the service. This would show as a separate node in the topology.  // The metrics analyzed from the spans, would be aggregated for this entity as the service level.  string service = 4; // **Service Instance**. Each individual workload in the Service group is known as an instance. Like `pods` in Kubernetes, it  // doesn\u0026#39;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process.  //  // The logic name represents the service instance. This would show as a separate node in the instance relationship.  // The metrics analyzed from the spans, would be aggregated for this entity as the service instance level.  string serviceInstance = 5; // Whether the segment includes all tracked spans.  // In the production environment tracked, some tasks could include too many spans for one request context, such as a batch update for a cache, or an async job.  // The agent/SDK could optimize or ignore some tracked spans for better performance.  // In this case, the value should be flagged as TRUE.  bool isSizeLimited = 6;}// Segment reference represents the link between two existing segment. message SegmentReference { // Represent the reference type. It could be across thread or across process.  // Across process means there is a downstream RPC call for this.  // Typically, refType == CrossProcess means SpanObject#spanType = entry.  RefType refType = 1; // A string id represents the whole trace.  string traceId = 2; // Another segment id as the parent.  string parentTraceSegmentId = 3; // The span id in the parent trace segment.  int32 parentSpanId = 4; // The service logic name of the parent segment.  // If refType == CrossThread, this name is as same as the trace segment.  string parentService = 5; // The service logic name instance of the parent segment.  // If refType == CrossThread, this name is as same as the trace segment.  string parentServiceInstance = 6; // The endpoint name of the parent segment.  // **Endpoint**. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature.  // In a trace segment, the endpoint name is the name of first entry span.  string parentEndpoint = 7; // The network address, including ip/hostname and port, which is used in the client side.  // Such as Client --\u0026gt; use 127.0.11.8:913 -\u0026gt; Server  // then, in the reference of entry span reported by Server, the value of this field is 127.0.11.8:913.  // This plays the important role in the SkyWalking STAM(Streaming Topology Analysis Method)  // For more details, read https://wu-sheng.github.io/STAM/  string networkAddressUsedAtPeer = 8;}// Span represents a execution unit in the system, with duration and many other attributes. // Span could be a method, a RPC, MQ message produce or consume. // In the practice, the span should be added when it is really necessary, to avoid payload overhead. // We recommend to creating spans in across process(client/server of RPC/MQ) and across thread cases only. message SpanObject { // The number id of the span. Should be unique in the whole segment.  // Starting at 0.  int32 spanId = 1; // The number id of the parent span in the whole segment.  // -1 represents no parent span.  // Also, be known as the root/first span of the segment.  int32 parentSpanId = 2; // Start timestamp in milliseconds of this span,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 startTime = 3; // End timestamp in milliseconds of this span,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 endTime = 4; // \u0026lt;Optional\u0026gt;  // In the across thread and across process, these references targeting the parent segments.  // The references usually have only one element, but in batch consumer case, such as in MQ or async batch process, it could be multiple.  repeated SegmentReference refs = 5; // A logic name represents this span.  //  // We don\u0026#39;t recommend to include the parameter, such as HTTP request parameters, as a part of the operation, especially this is the name of the entry span.  // All statistic for the endpoints are aggregated base on this name. Those parameters should be added in the tags if necessary.  // If in some cases, it have to be a part of the operation name,  // users should use the Group Parameterized Endpoints capability at the backend to get the meaningful metrics.  // Read https://github.com/apache/skywalking/blob/master/docs/en/setup/backend/endpoint-grouping-rules.md  string operationName = 6; // Remote address of the peer in RPC/MQ case.  // This is required when spanType = Exit, as it is a part of the SkyWalking STAM(Streaming Topology Analysis Method).  // For more details, read https://wu-sheng.github.io/STAM/  string peer = 7; // Span type represents the role in the RPC context.  SpanType spanType = 8; // Span layer represent the component tech stack, related to the network tech.  SpanLayer spanLayer = 9; // Component id is a predefined number id in the SkyWalking.  // It represents the framework, tech stack used by this tracked span, such as Spring.  // All IDs are defined in the https://github.com/apache/skywalking/blob/master/oap-server/server-bootstrap/src/main/resources/component-libraries.yml  // Send a pull request if you want to add languages, components or mapping definitions,  // all public components could be accepted.  // Follow this doc for more details, https://github.com/apache/skywalking/blob/master/docs/en/guides/Component-library-settings.md  int32 componentId = 10; // The status of the span. False means the tracked execution ends in the unexpected status.  // This affects the successful rate statistic in the backend.  // Exception or error code happened in the tracked process doesn\u0026#39;t mean isError == true, the implementations of agent plugin and tracing SDK make the final decision.  bool isError = 11; // String key, String value pair.  // Tags provides more information, includes parameters.  //  // In the OAP backend analysis, some special tag or tag combination could provide other advanced features.  // https://github.com/apache/skywalking/blob/master/docs/en/guides/Java-Plugin-Development-Guide.md#special-span-tags  repeated KeyStringValuePair tags = 12; // String key, String value pair with an accurate timestamp.  // Logging some events happening in the context of the span duration.  repeated Log logs = 13; // Force the backend don\u0026#39;t do analysis, if the value is TRUE.  // The backend has its own configurations to follow or override this.  //  // Use this mostly because the agent/SDK could know more context of the service role.  bool skipAnalysis = 14;}message Log { // The timestamp in milliseconds of this event.,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 time = 1; // String key, String value pair.  repeated KeyStringValuePair data = 2;}// Map to the type of span enum SpanType { // Server side of RPC. Consumer side of MQ.  Entry = 0; // Client side of RPC. Producer side of MQ.  Exit = 1; // A common local code execution.  Local = 2;}// A ID could be represented by multiple string sections. message ID { repeated string id = 1;}// Type of the reference enum RefType { // Map to the reference targeting the segment in another OS process.  CrossProcess = 0; // Map to the reference targeting the segment in the same process of the current one, just across thread.  // This is only used when the coding language has the thread concept.  CrossThread = 1;}// Map to the layer of span enum SpanLayer { // Unknown layer. Could be anything.  Unknown = 0; // A database layer, used in tracing the database client component.  Database = 1; // A RPC layer, used in both client and server sides of RPC component.  RPCFramework = 2; // HTTP is a more specific RPCFramework.  Http = 3; // A MQ layer, used in both producer and consumer sides of the MQ component.  MQ = 4; // A cache layer, used in tracing the cache client component.  Cache = 5;}// The segment collections for trace report in batch and sync mode. message SegmentCollection { repeated SegmentObject segments = 1;}Report Span Attached Events Besides in-process agents, there are other out-of-process agent, such as ebpf agent, could report additional information as attached events for the relative spans.\nSpanAttachedEventReportService#collect for attached event reporting.\n//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // ebpf agent(SkyWalking Rover) collects extra information from the OS(Linux Only) level to attach on the traced span. // Since v3.1 //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// service SpanAttachedEventReportService { // Collect SpanAttachedEvent to the OAP server in the streaming mode.  rpc collect (stream SpanAttachedEvent) returns (Commands) { }}// SpanAttachedEvent represents an attached event for a traced RPC. // // When an RPC is being traced by the in-process language agent, a span would be reported by the client-side agent. // And the rover would be aware of this RPC due to the existing tracing header. // Then, the rover agent collects extra information from the OS level to provide assistance information to diagnose network performance. message SpanAttachedEvent { // The nanosecond timestamp of the event\u0026#39;s start time.  // Notice, most unit of timestamp in SkyWalking is milliseconds, but NANO-SECOND is required here.  // Because the attached event happens in the OS syscall level, most of them are executed rapidly.  Instant startTime = 1; // The official event name.  // For example, the event name is a method signature from syscall stack.  string event = 2; // [Optional] The nanosecond timestamp of the event\u0026#39;s end time.  Instant endTime = 3; // The tags for this event includes some extra OS level information,  // such as  // 1. net_device used for this exit span.  // 2. network L7 protocol  repeated KeyStringValuePair tags = 4; // The summary of statistics during this event.  // Each statistic provides a name(metric name) to represent the name, and an int64/long as the value.  repeated KeyIntValuePair summary = 5; // Refer to a trace context decoded from `sw8` header through network, such as HTTP header, MQ metadata  // https://skywalking.apache.org/docs/main/next/en/protocols/skywalking-cross-process-propagation-headers-protocol-v3/#standard-header-item  SpanReference traceContext = 6; message SpanReference { SpanReferenceType type = 1; // [Optional] A string id represents the whole trace.  string traceId = 2; // A unique id represents this segment. Other segments could use this id to reference as a child segment.  // [Optional] when this span reference  string traceSegmentId = 3; // If type == SKYWALKING  // The number id of the span. Should be unique in the whole segment.  // Starting at 0  //  // If type == ZIPKIN  // The type of span ID is string.  string spanId = 4; } enum SpanReferenceType { SKYWALKING = 0; ZIPKIN = 1; }}Via HTTP Endpoint Detailed information about data format can be found in Instance Management. There are two ways to report segment data: one segment per request or segment array in bulk mode.\nPOST http://localhost:12800/v3/segment Send a single segment object in JSON format.\nInput:\n{ \u0026#34;traceId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34; } OutPut:\nPOST http://localhost:12800/v3/segments Send a segment object list in JSON format.\nInput:\n[{ \u0026#34;traceId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34; }, { \u0026#34;traceId\u0026#34;: \u0026#34;f956699e-5106-4ea3-95e5-da748c55bac1\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577250, \u0026#34;endTime\u0026#34;: 1588664577250, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577250, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577250, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;f956699e-5106-4ea3-95e5-da748c55bac1\u0026#34; }] OutPut:\n","excerpt":"Trace Data Protocol  Version, v3.1  Trace Data Protocol describes the data format between SkyWalking …","ref":"/docs/main/v9.7.0/en/api/trace-data-protocol-v3/","title":"Trace Data Protocol"},{"body":"Trace Data Protocol v3 Trace Data Protocol describes the data format between SkyWalking agent/sniffer and backend.\nOverview Trace data protocol is defined and provided in gRPC format, and implemented in HTTP 1.1.\nReport service instance status   Service Instance Properties Service instance contains more information than just a name. In order for the agent to report service instance status, use ManagementService#reportInstanceProperties service to provide a string-key/string-value pair list as the parameter. The language of target instance must be provided as the minimum requirement.\n  Service Ping Service instance should keep alive with the backend. The agent should set a scheduler using ManagementService#keepAlive service every minute.\n  Send trace and metrics After you have the service ID and service instance ID ready, you could send traces and metrics. Now we have\n TraceSegmentReportService#collect for the SkyWalking native trace format JVMMetricReportService#collect for the SkyWalking native JVM format  For trace format, note that:\n The segment is a unique concept in SkyWalking. It should include all spans for each request in a single OS process, which is usually a single language-based thread. There are three types of spans.    EntrySpan EntrySpan represents a service provider, which is also the endpoint on the server end. As an APM system, SkyWalking targets the application servers. Therefore, almost all the services and MQ-consumers are EntrySpans.\n  LocalSpan LocalSpan represents a typical Java method which is not related to remote services. It is neither a MQ producer/consumer nor a provider/consumer of a service (e.g. HTTP service).\n  ExitSpan ExitSpan represents a client of service or MQ-producer. It is known as the LeafSpan in the early stages of SkyWalking. For example, accessing DB by JDBC, and reading Redis/Memcached are classified as ExitSpans.\n   Cross-thread/process span parent information is called \u0026ldquo;reference\u0026rdquo;. Reference carries the trace ID, segment ID, span ID, service name, service instance name, endpoint name, and target address used on the client end (note: this is not required in cross-thread operations) of this request in the parent. See Cross Process Propagation Headers Protocol v3 for more details.\n  Span#skipAnalysis may be TRUE, if this span doesn\u0026rsquo;t require backend analysis.\n  Protocol Definition // The segment is a collection of spans. It includes all collected spans in a simple one request context, such as a HTTP request process. // // We recommend the agent/SDK report all tracked data of one request once for all, such as, // typically, such as in Java, one segment represent all tracked operations(spans) of one request context in the same thread. // At the same time, in some language there is not a clear concept like golang, it could represent all tracked operations of one request context. message SegmentObject { // A string id represents the whole trace.  string traceId = 1; // A unique id represents this segment. Other segments could use this id to reference as a child segment.  string traceSegmentId = 2; // Span collections included in this segment.  repeated SpanObject spans = 3; // **Service**. Represents a set/group of workloads which provide the same behaviours for incoming requests.  //  // The logic name represents the service. This would show as a separate node in the topology.  // The metrics analyzed from the spans, would be aggregated for this entity as the service level.  string service = 4; // **Service Instance**. Each individual workload in the Service group is known as an instance. Like `pods` in Kubernetes, it  // doesn\u0026#39;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process.  //  // The logic name represents the service instance. This would show as a separate node in the instance relationship.  // The metrics analyzed from the spans, would be aggregated for this entity as the service instance level.  string serviceInstance = 5; // Whether the segment includes all tracked spans.  // In the production environment tracked, some tasks could include too many spans for one request context, such as a batch update for a cache, or an async job.  // The agent/SDK could optimize or ignore some tracked spans for better performance.  // In this case, the value should be flagged as TRUE.  bool isSizeLimited = 6;}// Segment reference represents the link between two existing segment. message SegmentReference { // Represent the reference type. It could be across thread or across process.  // Across process means there is a downstream RPC call for this.  // Typically, refType == CrossProcess means SpanObject#spanType = entry.  RefType refType = 1; // A string id represents the whole trace.  string traceId = 2; // Another segment id as the parent.  string parentTraceSegmentId = 3; // The span id in the parent trace segment.  int32 parentSpanId = 4; // The service logic name of the parent segment.  // If refType == CrossThread, this name is as same as the trace segment.  string parentService = 5; // The service logic name instance of the parent segment.  // If refType == CrossThread, this name is as same as the trace segment.  string parentServiceInstance = 6; // The endpoint name of the parent segment.  // **Endpoint**. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature.  // In a trace segment, the endpoint name is the name of first entry span.  string parentEndpoint = 7; // The network address, including ip/hostname and port, which is used in the client side.  // Such as Client --\u0026gt; use 127.0.11.8:913 -\u0026gt; Server  // then, in the reference of entry span reported by Server, the value of this field is 127.0.11.8:913.  // This plays the important role in the SkyWalking STAM(Streaming Topology Analysis Method)  // For more details, read https://wu-sheng.github.io/STAM/  string networkAddressUsedAtPeer = 8;}// Span represents a execution unit in the system, with duration and many other attributes. // Span could be a method, a RPC, MQ message produce or consume. // In the practice, the span should be added when it is really necessary, to avoid payload overhead. // We recommend to creating spans in across process(client/server of RPC/MQ) and across thread cases only. message SpanObject { // The number id of the span. Should be unique in the whole segment.  // Starting at 0.  int32 spanId = 1; // The number id of the parent span in the whole segment.  // -1 represents no parent span.  // Also, be known as the root/first span of the segment.  int32 parentSpanId = 2; // Start timestamp in milliseconds of this span,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 startTime = 3; // End timestamp in milliseconds of this span,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 endTime = 4; // \u0026lt;Optional\u0026gt;  // In the across thread and across process, these references targeting the parent segments.  // The references usually have only one element, but in batch consumer case, such as in MQ or async batch process, it could be multiple.  repeated SegmentReference refs = 5; // A logic name represents this span.  //  // We don\u0026#39;t recommend to include the parameter, such as HTTP request parameters, as a part of the operation, especially this is the name of the entry span.  // All statistic for the endpoints are aggregated base on this name. Those parameters should be added in the tags if necessary.  // If in some cases, it have to be a part of the operation name,  // users should use the Group Parameterized Endpoints capability at the backend to get the meaningful metrics.  // Read https://github.com/apache/skywalking/blob/master/docs/en/setup/backend/endpoint-grouping-rules.md  string operationName = 6; // Remote address of the peer in RPC/MQ case.  // This is required when spanType = Exit, as it is a part of the SkyWalking STAM(Streaming Topology Analysis Method).  // For more details, read https://wu-sheng.github.io/STAM/  string peer = 7; // Span type represents the role in the RPC context.  SpanType spanType = 8; // Span layer represent the component tech stack, related to the network tech.  SpanLayer spanLayer = 9; // Component id is a predefinited number id in the SkyWalking.  // It represents the framework, tech stack used by this tracked span, such as Spring.  // All IDs are defined in the https://github.com/apache/skywalking/blob/master/oap-server/server-bootstrap/src/main/resources/component-libraries.yml  // Send a pull request if you want to add languages, components or mapping defintions,  // all public components could be accepted.  // Follow this doc for more details, https://github.com/apache/skywalking/blob/master/docs/en/guides/Component-library-settings.md  int32 componentId = 10; // The status of the span. False means the tracked execution ends in the unexpected status.  // This affects the successful rate statistic in the backend.  // Exception or error code happened in the tracked process doesn\u0026#39;t mean isError == true, the implementations of agent plugin and tracing SDK make the final decision.  bool isError = 11; // String key, String value pair.  // Tags provides more informance, includes parameters.  //  // In the OAP backend analysis, some special tag or tag combination could provide other advanced features.  // https://github.com/apache/skywalking/blob/master/docs/en/guides/Java-Plugin-Development-Guide.md#special-span-tags  repeated KeyStringValuePair tags = 12; // String key, String value pair with an accurate timestamp.  // Logging some events happening in the context of the span duration.  repeated Log logs = 13; // Force the backend don\u0026#39;t do analysis, if the value is TRUE.  // The backend has its own configurations to follow or override this.  //  // Use this mostly because the agent/SDK could know more context of the service role.  bool skipAnalysis = 14;}message Log { // The timestamp in milliseconds of this event.,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 time = 1; // String key, String value pair.  repeated KeyStringValuePair data = 2;}// Map to the type of span enum SpanType { // Server side of RPC. Consumer side of MQ.  Entry = 0; // Client side of RPC. Producer side of MQ.  Exit = 1; // A common local code execution.  Local = 2;}// A ID could be represented by multiple string sections. message ID { repeated string id = 1;}// Type of the reference enum RefType { // Map to the reference targeting the segment in another OS process.  CrossProcess = 0; // Map to the reference targeting the segment in the same process of the current one, just across thread.  // This is only used when the coding language has the thread concept.  CrossThread = 1;}// Map to the layer of span enum SpanLayer { // Unknown layer. Could be anything.  Unknown = 0; // A database layer, used in tracing the database client component.  Database = 1; // A RPC layer, used in both client and server sides of RPC component.  RPCFramework = 2; // HTTP is a more specific RPCFramework.  Http = 3; // A MQ layer, used in both producer and consuer sides of the MQ component.  MQ = 4; // A cache layer, used in tracing the cache client component.  Cache = 5;}// The segment collections for trace report in batch and sync mode. message SegmentCollection { repeated SegmentObject segments = 1;}","excerpt":"Trace Data Protocol v3 Trace Data Protocol describes the data format between SkyWalking …","ref":"/docs/main/v9.0.0/en/protocols/trace-data-protocol-v3/","title":"Trace Data Protocol v3"},{"body":"Trace Data Protocol v3 Trace Data Protocol describes the data format between SkyWalking agent/sniffer and backend.\nOverview Trace data protocol is defined and provided in gRPC format, and implemented in HTTP 1.1.\nReport service instance status   Service Instance Properties Service instance contains more information than just a name. In order for the agent to report service instance status, use ManagementService#reportInstanceProperties service to provide a string-key/string-value pair list as the parameter. The language of target instance must be provided as the minimum requirement.\n  Service Ping Service instance should keep alive with the backend. The agent should set a scheduler using ManagementService#keepAlive service every minute.\n  Send trace and metrics After you have the service ID and service instance ID ready, you could send traces and metrics. Now we have\n TraceSegmentReportService#collect for the SkyWalking native trace format JVMMetricReportService#collect for the SkyWalking native JVM format  For trace format, note that:\n The segment is a unique concept in SkyWalking. It should include all spans for each request in a single OS process, which is usually a single language-based thread. There are three types of spans.    EntrySpan EntrySpan represents a service provider, which is also the endpoint on the server end. As an APM system, SkyWalking targets the application servers. Therefore, almost all the services and MQ-consumers are EntrySpans.\n  LocalSpan LocalSpan represents a typical Java method which is not related to remote services. It is neither a MQ producer/consumer nor a provider/consumer of a service (e.g. HTTP service).\n  ExitSpan ExitSpan represents a client of service or MQ-producer. It is known as the LeafSpan in the early stages of SkyWalking. For example, accessing DB by JDBC, and reading Redis/Memcached are classified as ExitSpans.\n   Cross-thread/process span parent information is called \u0026ldquo;reference\u0026rdquo;. Reference carries the trace ID, segment ID, span ID, service name, service instance name, endpoint name, and target address used on the client end (note: this is not required in cross-thread operations) of this request in the parent. See Cross Process Propagation Headers Protocol v3 for more details.\n  Span#skipAnalysis may be TRUE, if this span doesn\u0026rsquo;t require backend analysis.\n  Protocol Definition // The segment is a collection of spans. It includes all collected spans in a simple one request context, such as a HTTP request process. // // We recommend the agent/SDK report all tracked data of one request once for all, such as, // typically, such as in Java, one segment represent all tracked operations(spans) of one request context in the same thread. // At the same time, in some language there is not a clear concept like golang, it could represent all tracked operations of one request context. message SegmentObject { // A string id represents the whole trace.  string traceId = 1; // A unique id represents this segment. Other segments could use this id to reference as a child segment.  string traceSegmentId = 2; // Span collections included in this segment.  repeated SpanObject spans = 3; // **Service**. Represents a set/group of workloads which provide the same behaviours for incoming requests.  //  // The logic name represents the service. This would show as a separate node in the topology.  // The metrics analyzed from the spans, would be aggregated for this entity as the service level.  string service = 4; // **Service Instance**. Each individual workload in the Service group is known as an instance. Like `pods` in Kubernetes, it  // doesn\u0026#39;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process.  //  // The logic name represents the service instance. This would show as a separate node in the instance relationship.  // The metrics analyzed from the spans, would be aggregated for this entity as the service instance level.  string serviceInstance = 5; // Whether the segment includes all tracked spans.  // In the production environment tracked, some tasks could include too many spans for one request context, such as a batch update for a cache, or an async job.  // The agent/SDK could optimize or ignore some tracked spans for better performance.  // In this case, the value should be flagged as TRUE.  bool isSizeLimited = 6;}// Segment reference represents the link between two existing segment. message SegmentReference { // Represent the reference type. It could be across thread or across process.  // Across process means there is a downstream RPC call for this.  // Typically, refType == CrossProcess means SpanObject#spanType = entry.  RefType refType = 1; // A string id represents the whole trace.  string traceId = 2; // Another segment id as the parent.  string parentTraceSegmentId = 3; // The span id in the parent trace segment.  int32 parentSpanId = 4; // The service logic name of the parent segment.  // If refType == CrossThread, this name is as same as the trace segment.  string parentService = 5; // The service logic name instance of the parent segment.  // If refType == CrossThread, this name is as same as the trace segment.  string parentServiceInstance = 6; // The endpoint name of the parent segment.  // **Endpoint**. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature.  // In a trace segment, the endpoint name is the name of first entry span.  string parentEndpoint = 7; // The network address, including ip/hostname and port, which is used in the client side.  // Such as Client --\u0026gt; use 127.0.11.8:913 -\u0026gt; Server  // then, in the reference of entry span reported by Server, the value of this field is 127.0.11.8:913.  // This plays the important role in the SkyWalking STAM(Streaming Topology Analysis Method)  // For more details, read https://wu-sheng.github.io/STAM/  string networkAddressUsedAtPeer = 8;}// Span represents a execution unit in the system, with duration and many other attributes. // Span could be a method, a RPC, MQ message produce or consume. // In the practice, the span should be added when it is really necessary, to avoid payload overhead. // We recommend to creating spans in across process(client/server of RPC/MQ) and across thread cases only. message SpanObject { // The number id of the span. Should be unique in the whole segment.  // Starting at 0.  int32 spanId = 1; // The number id of the parent span in the whole segment.  // -1 represents no parent span.  // Also, be known as the root/first span of the segment.  int32 parentSpanId = 2; // Start timestamp in milliseconds of this span,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 startTime = 3; // End timestamp in milliseconds of this span,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 endTime = 4; // \u0026lt;Optional\u0026gt;  // In the across thread and across process, these references targeting the parent segments.  // The references usually have only one element, but in batch consumer case, such as in MQ or async batch process, it could be multiple.  repeated SegmentReference refs = 5; // A logic name represents this span.  //  // We don\u0026#39;t recommend to include the parameter, such as HTTP request parameters, as a part of the operation, especially this is the name of the entry span.  // All statistic for the endpoints are aggregated base on this name. Those parameters should be added in the tags if necessary.  // If in some cases, it have to be a part of the operation name,  // users should use the Group Parameterized Endpoints capability at the backend to get the meaningful metrics.  // Read https://github.com/apache/skywalking/blob/master/docs/en/setup/backend/endpoint-grouping-rules.md  string operationName = 6; // Remote address of the peer in RPC/MQ case.  // This is required when spanType = Exit, as it is a part of the SkyWalking STAM(Streaming Topology Analysis Method).  // For more details, read https://wu-sheng.github.io/STAM/  string peer = 7; // Span type represents the role in the RPC context.  SpanType spanType = 8; // Span layer represent the component tech stack, related to the network tech.  SpanLayer spanLayer = 9; // Component id is a predefinited number id in the SkyWalking.  // It represents the framework, tech stack used by this tracked span, such as Spring.  // All IDs are defined in the https://github.com/apache/skywalking/blob/master/oap-server/server-bootstrap/src/main/resources/component-libraries.yml  // Send a pull request if you want to add languages, components or mapping defintions,  // all public components could be accepted.  // Follow this doc for more details, https://github.com/apache/skywalking/blob/master/docs/en/guides/Component-library-settings.md  int32 componentId = 10; // The status of the span. False means the tracked execution ends in the unexpected status.  // This affects the successful rate statistic in the backend.  // Exception or error code happened in the tracked process doesn\u0026#39;t mean isError == true, the implementations of agent plugin and tracing SDK make the final decision.  bool isError = 11; // String key, String value pair.  // Tags provides more informance, includes parameters.  //  // In the OAP backend analysis, some special tag or tag combination could provide other advanced features.  // https://github.com/apache/skywalking/blob/master/docs/en/guides/Java-Plugin-Development-Guide.md#special-span-tags  repeated KeyStringValuePair tags = 12; // String key, String value pair with an accurate timestamp.  // Logging some events happening in the context of the span duration.  repeated Log logs = 13; // Force the backend don\u0026#39;t do analysis, if the value is TRUE.  // The backend has its own configurations to follow or override this.  //  // Use this mostly because the agent/SDK could know more context of the service role.  bool skipAnalysis = 14;}message Log { // The timestamp in milliseconds of this event.,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 time = 1; // String key, String value pair.  repeated KeyStringValuePair data = 2;}// Map to the type of span enum SpanType { // Server side of RPC. Consumer side of MQ.  Entry = 0; // Client side of RPC. Producer side of MQ.  Exit = 1; // A common local code execution.  Local = 2;}// A ID could be represented by multiple string sections. message ID { repeated string id = 1;}// Type of the reference enum RefType { // Map to the reference targeting the segment in another OS process.  CrossProcess = 0; // Map to the reference targeting the segment in the same process of the current one, just across thread.  // This is only used when the coding language has the thread concept.  CrossThread = 1;}// Map to the layer of span enum SpanLayer { // Unknown layer. Could be anything.  Unknown = 0; // A database layer, used in tracing the database client component.  Database = 1; // A RPC layer, used in both client and server sides of RPC component.  RPCFramework = 2; // HTTP is a more specific RPCFramework.  Http = 3; // A MQ layer, used in both producer and consuer sides of the MQ component.  MQ = 4; // A cache layer, used in tracing the cache client component.  Cache = 5;}// The segment collections for trace report in batch and sync mode. message SegmentCollection { repeated SegmentObject segments = 1;}","excerpt":"Trace Data Protocol v3 Trace Data Protocol describes the data format between SkyWalking …","ref":"/docs/main/v9.1.0/en/protocols/trace-data-protocol-v3/","title":"Trace Data Protocol v3"},{"body":"Trace Data Protocol v3 Trace Data Protocol describes the data format between SkyWalking agent/sniffer and backend.\nOverview Trace data protocol is defined and provided in gRPC format, and implemented in HTTP 1.1.\nReport service instance status   Service Instance Properties Service instance contains more information than just a name. In order for the agent to report service instance status, use ManagementService#reportInstanceProperties service to provide a string-key/string-value pair list as the parameter. The language of target instance must be provided as the minimum requirement.\n  Service Ping Service instance should keep alive with the backend. The agent should set a scheduler using ManagementService#keepAlive service every minute.\n  Send trace and metrics After you have the service ID and service instance ID ready, you could send traces and metrics. Now we have\n TraceSegmentReportService#collect for the SkyWalking native trace format JVMMetricReportService#collect for the SkyWalking native JVM format  For trace format, note that:\n The segment is a unique concept in SkyWalking. It should include all spans for each request in a single OS process, which is usually a single language-based thread. There are three types of spans.    EntrySpan EntrySpan represents a service provider, which is also the endpoint on the server end. As an APM system, SkyWalking targets the application servers. Therefore, almost all the services and MQ-consumers are EntrySpans.\n  LocalSpan LocalSpan represents a typical Java method which is not related to remote services. It is neither a MQ producer/consumer nor a provider/consumer of a service (e.g. HTTP service).\n  ExitSpan ExitSpan represents a client of service or MQ-producer. It is known as the LeafSpan in the early stages of SkyWalking. For example, accessing DB by JDBC, and reading Redis/Memcached are classified as ExitSpans.\n   Cross-thread/process span parent information is called \u0026ldquo;reference\u0026rdquo;. Reference carries the trace ID, segment ID, span ID, service name, service instance name, endpoint name, and target address used on the client end (note: this is not required in cross-thread operations) of this request in the parent. See Cross Process Propagation Headers Protocol v3 for more details.\n  Span#skipAnalysis may be TRUE, if this span doesn\u0026rsquo;t require backend analysis.\n  Protocol Definition // The segment is a collection of spans. It includes all collected spans in a simple one request context, such as a HTTP request process. // // We recommend the agent/SDK report all tracked data of one request once for all, such as, // typically, such as in Java, one segment represent all tracked operations(spans) of one request context in the same thread. // At the same time, in some language there is not a clear concept like golang, it could represent all tracked operations of one request context. message SegmentObject { // A string id represents the whole trace.  string traceId = 1; // A unique id represents this segment. Other segments could use this id to reference as a child segment.  string traceSegmentId = 2; // Span collections included in this segment.  repeated SpanObject spans = 3; // **Service**. Represents a set/group of workloads which provide the same behaviours for incoming requests.  //  // The logic name represents the service. This would show as a separate node in the topology.  // The metrics analyzed from the spans, would be aggregated for this entity as the service level.  string service = 4; // **Service Instance**. Each individual workload in the Service group is known as an instance. Like `pods` in Kubernetes, it  // doesn\u0026#39;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process.  //  // The logic name represents the service instance. This would show as a separate node in the instance relationship.  // The metrics analyzed from the spans, would be aggregated for this entity as the service instance level.  string serviceInstance = 5; // Whether the segment includes all tracked spans.  // In the production environment tracked, some tasks could include too many spans for one request context, such as a batch update for a cache, or an async job.  // The agent/SDK could optimize or ignore some tracked spans for better performance.  // In this case, the value should be flagged as TRUE.  bool isSizeLimited = 6;}// Segment reference represents the link between two existing segment. message SegmentReference { // Represent the reference type. It could be across thread or across process.  // Across process means there is a downstream RPC call for this.  // Typically, refType == CrossProcess means SpanObject#spanType = entry.  RefType refType = 1; // A string id represents the whole trace.  string traceId = 2; // Another segment id as the parent.  string parentTraceSegmentId = 3; // The span id in the parent trace segment.  int32 parentSpanId = 4; // The service logic name of the parent segment.  // If refType == CrossThread, this name is as same as the trace segment.  string parentService = 5; // The service logic name instance of the parent segment.  // If refType == CrossThread, this name is as same as the trace segment.  string parentServiceInstance = 6; // The endpoint name of the parent segment.  // **Endpoint**. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature.  // In a trace segment, the endpoint name is the name of first entry span.  string parentEndpoint = 7; // The network address, including ip/hostname and port, which is used in the client side.  // Such as Client --\u0026gt; use 127.0.11.8:913 -\u0026gt; Server  // then, in the reference of entry span reported by Server, the value of this field is 127.0.11.8:913.  // This plays the important role in the SkyWalking STAM(Streaming Topology Analysis Method)  // For more details, read https://wu-sheng.github.io/STAM/  string networkAddressUsedAtPeer = 8;}// Span represents a execution unit in the system, with duration and many other attributes. // Span could be a method, a RPC, MQ message produce or consume. // In the practice, the span should be added when it is really necessary, to avoid payload overhead. // We recommend to creating spans in across process(client/server of RPC/MQ) and across thread cases only. message SpanObject { // The number id of the span. Should be unique in the whole segment.  // Starting at 0.  int32 spanId = 1; // The number id of the parent span in the whole segment.  // -1 represents no parent span.  // Also, be known as the root/first span of the segment.  int32 parentSpanId = 2; // Start timestamp in milliseconds of this span,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 startTime = 3; // End timestamp in milliseconds of this span,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 endTime = 4; // \u0026lt;Optional\u0026gt;  // In the across thread and across process, these references targeting the parent segments.  // The references usually have only one element, but in batch consumer case, such as in MQ or async batch process, it could be multiple.  repeated SegmentReference refs = 5; // A logic name represents this span.  //  // We don\u0026#39;t recommend to include the parameter, such as HTTP request parameters, as a part of the operation, especially this is the name of the entry span.  // All statistic for the endpoints are aggregated base on this name. Those parameters should be added in the tags if necessary.  // If in some cases, it have to be a part of the operation name,  // users should use the Group Parameterized Endpoints capability at the backend to get the meaningful metrics.  // Read https://github.com/apache/skywalking/blob/master/docs/en/setup/backend/endpoint-grouping-rules.md  string operationName = 6; // Remote address of the peer in RPC/MQ case.  // This is required when spanType = Exit, as it is a part of the SkyWalking STAM(Streaming Topology Analysis Method).  // For more details, read https://wu-sheng.github.io/STAM/  string peer = 7; // Span type represents the role in the RPC context.  SpanType spanType = 8; // Span layer represent the component tech stack, related to the network tech.  SpanLayer spanLayer = 9; // Component id is a predefined number id in the SkyWalking.  // It represents the framework, tech stack used by this tracked span, such as Spring.  // All IDs are defined in the https://github.com/apache/skywalking/blob/master/oap-server/server-bootstrap/src/main/resources/component-libraries.yml  // Send a pull request if you want to add languages, components or mapping definitions,  // all public components could be accepted.  // Follow this doc for more details, https://github.com/apache/skywalking/blob/master/docs/en/guides/Component-library-settings.md  int32 componentId = 10; // The status of the span. False means the tracked execution ends in the unexpected status.  // This affects the successful rate statistic in the backend.  // Exception or error code happened in the tracked process doesn\u0026#39;t mean isError == true, the implementations of agent plugin and tracing SDK make the final decision.  bool isError = 11; // String key, String value pair.  // Tags provides more informance, includes parameters.  //  // In the OAP backend analysis, some special tag or tag combination could provide other advanced features.  // https://github.com/apache/skywalking/blob/master/docs/en/guides/Java-Plugin-Development-Guide.md#special-span-tags  repeated KeyStringValuePair tags = 12; // String key, String value pair with an accurate timestamp.  // Logging some events happening in the context of the span duration.  repeated Log logs = 13; // Force the backend don\u0026#39;t do analysis, if the value is TRUE.  // The backend has its own configurations to follow or override this.  //  // Use this mostly because the agent/SDK could know more context of the service role.  bool skipAnalysis = 14;}message Log { // The timestamp in milliseconds of this event.,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 time = 1; // String key, String value pair.  repeated KeyStringValuePair data = 2;}// Map to the type of span enum SpanType { // Server side of RPC. Consumer side of MQ.  Entry = 0; // Client side of RPC. Producer side of MQ.  Exit = 1; // A common local code execution.  Local = 2;}// A ID could be represented by multiple string sections. message ID { repeated string id = 1;}// Type of the reference enum RefType { // Map to the reference targeting the segment in another OS process.  CrossProcess = 0; // Map to the reference targeting the segment in the same process of the current one, just across thread.  // This is only used when the coding language has the thread concept.  CrossThread = 1;}// Map to the layer of span enum SpanLayer { // Unknown layer. Could be anything.  Unknown = 0; // A database layer, used in tracing the database client component.  Database = 1; // A RPC layer, used in both client and server sides of RPC component.  RPCFramework = 2; // HTTP is a more specific RPCFramework.  Http = 3; // A MQ layer, used in both producer and consuer sides of the MQ component.  MQ = 4; // A cache layer, used in tracing the cache client component.  Cache = 5;}// The segment collections for trace report in batch and sync mode. message SegmentCollection { repeated SegmentObject segments = 1;}","excerpt":"Trace Data Protocol v3 Trace Data Protocol describes the data format between SkyWalking …","ref":"/docs/main/v9.2.0/en/protocols/trace-data-protocol-v3/","title":"Trace Data Protocol v3"},{"body":"Trace Data Protocol v3.1 Trace Data Protocol describes the data format between SkyWalking agent/sniffer and backend.\nTrace data protocol is defined and provided in gRPC format, and implemented in HTTP 1.1.\nReport service instance status   Service Instance Properties Service instance contains more information than just a name. In order for the agent to report service instance status, use ManagementService#reportInstanceProperties service to provide a string-key/string-value pair list as the parameter. The language of target instance must be provided as the minimum requirement.\n  Service Ping Service instance should keep alive with the backend. The agent should set a scheduler using ManagementService#keepAlive service every minute.\n  Send trace and JVM metrics After you have the service ID and service instance ID ready, you could send traces and metrics. Now we have\n TraceSegmentReportService#collect for the SkyWalking native trace format JVMMetricReportService#collect for the SkyWalking native JVM format  For trace format, note that:\n The segment is a unique concept in SkyWalking. It should include all spans for each request in a single OS process, which is usually a single language-based thread. There are three types of spans.    EntrySpan EntrySpan represents a service provider, which is also the endpoint on the server end. As an APM system, SkyWalking targets the application servers. Therefore, almost all the services and MQ-consumers are EntrySpans.\n  LocalSpan LocalSpan represents a typical Java method which is not related to remote services. It is neither a MQ producer/consumer nor a provider/consumer of a service (e.g. HTTP service).\n  ExitSpan ExitSpan represents a client of service or MQ-producer. It is known as the LeafSpan in the early stages of SkyWalking. For example, accessing DB by JDBC, and reading Redis/Memcached are classified as ExitSpans.\n   Cross-thread/process span parent information is called \u0026ldquo;reference\u0026rdquo;. Reference carries the trace ID, segment ID, span ID, service name, service instance name, endpoint name, and target address used on the client end (note: this is not required in cross-thread operations) of this request in the parent. See Cross Process Propagation Headers Protocol v3 for more details.\n  Span#skipAnalysis may be TRUE, if this span doesn\u0026rsquo;t require backend analysis.\n  Trace Report Protocol // The segment is a collection of spans. It includes all collected spans in a simple one request context, such as a HTTP request process. // // We recommend the agent/SDK report all tracked data of one request once for all, such as, // typically, such as in Java, one segment represent all tracked operations(spans) of one request context in the same thread. // At the same time, in some language there is not a clear concept like golang, it could represent all tracked operations of one request context. message SegmentObject { // A string id represents the whole trace.  string traceId = 1; // A unique id represents this segment. Other segments could use this id to reference as a child segment.  string traceSegmentId = 2; // Span collections included in this segment.  repeated SpanObject spans = 3; // **Service**. Represents a set/group of workloads which provide the same behaviours for incoming requests.  //  // The logic name represents the service. This would show as a separate node in the topology.  // The metrics analyzed from the spans, would be aggregated for this entity as the service level.  string service = 4; // **Service Instance**. Each individual workload in the Service group is known as an instance. Like `pods` in Kubernetes, it  // doesn\u0026#39;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process.  //  // The logic name represents the service instance. This would show as a separate node in the instance relationship.  // The metrics analyzed from the spans, would be aggregated for this entity as the service instance level.  string serviceInstance = 5; // Whether the segment includes all tracked spans.  // In the production environment tracked, some tasks could include too many spans for one request context, such as a batch update for a cache, or an async job.  // The agent/SDK could optimize or ignore some tracked spans for better performance.  // In this case, the value should be flagged as TRUE.  bool isSizeLimited = 6;}// Segment reference represents the link between two existing segment. message SegmentReference { // Represent the reference type. It could be across thread or across process.  // Across process means there is a downstream RPC call for this.  // Typically, refType == CrossProcess means SpanObject#spanType = entry.  RefType refType = 1; // A string id represents the whole trace.  string traceId = 2; // Another segment id as the parent.  string parentTraceSegmentId = 3; // The span id in the parent trace segment.  int32 parentSpanId = 4; // The service logic name of the parent segment.  // If refType == CrossThread, this name is as same as the trace segment.  string parentService = 5; // The service logic name instance of the parent segment.  // If refType == CrossThread, this name is as same as the trace segment.  string parentServiceInstance = 6; // The endpoint name of the parent segment.  // **Endpoint**. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature.  // In a trace segment, the endpoint name is the name of first entry span.  string parentEndpoint = 7; // The network address, including ip/hostname and port, which is used in the client side.  // Such as Client --\u0026gt; use 127.0.11.8:913 -\u0026gt; Server  // then, in the reference of entry span reported by Server, the value of this field is 127.0.11.8:913.  // This plays the important role in the SkyWalking STAM(Streaming Topology Analysis Method)  // For more details, read https://wu-sheng.github.io/STAM/  string networkAddressUsedAtPeer = 8;}// Span represents a execution unit in the system, with duration and many other attributes. // Span could be a method, a RPC, MQ message produce or consume. // In the practice, the span should be added when it is really necessary, to avoid payload overhead. // We recommend to creating spans in across process(client/server of RPC/MQ) and across thread cases only. message SpanObject { // The number id of the span. Should be unique in the whole segment.  // Starting at 0.  int32 spanId = 1; // The number id of the parent span in the whole segment.  // -1 represents no parent span.  // Also, be known as the root/first span of the segment.  int32 parentSpanId = 2; // Start timestamp in milliseconds of this span,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 startTime = 3; // End timestamp in milliseconds of this span,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 endTime = 4; // \u0026lt;Optional\u0026gt;  // In the across thread and across process, these references targeting the parent segments.  // The references usually have only one element, but in batch consumer case, such as in MQ or async batch process, it could be multiple.  repeated SegmentReference refs = 5; // A logic name represents this span.  //  // We don\u0026#39;t recommend to include the parameter, such as HTTP request parameters, as a part of the operation, especially this is the name of the entry span.  // All statistic for the endpoints are aggregated base on this name. Those parameters should be added in the tags if necessary.  // If in some cases, it have to be a part of the operation name,  // users should use the Group Parameterized Endpoints capability at the backend to get the meaningful metrics.  // Read https://github.com/apache/skywalking/blob/master/docs/en/setup/backend/endpoint-grouping-rules.md  string operationName = 6; // Remote address of the peer in RPC/MQ case.  // This is required when spanType = Exit, as it is a part of the SkyWalking STAM(Streaming Topology Analysis Method).  // For more details, read https://wu-sheng.github.io/STAM/  string peer = 7; // Span type represents the role in the RPC context.  SpanType spanType = 8; // Span layer represent the component tech stack, related to the network tech.  SpanLayer spanLayer = 9; // Component id is a predefined number id in the SkyWalking.  // It represents the framework, tech stack used by this tracked span, such as Spring.  // All IDs are defined in the https://github.com/apache/skywalking/blob/master/oap-server/server-bootstrap/src/main/resources/component-libraries.yml  // Send a pull request if you want to add languages, components or mapping definitions,  // all public components could be accepted.  // Follow this doc for more details, https://github.com/apache/skywalking/blob/master/docs/en/guides/Component-library-settings.md  int32 componentId = 10; // The status of the span. False means the tracked execution ends in the unexpected status.  // This affects the successful rate statistic in the backend.  // Exception or error code happened in the tracked process doesn\u0026#39;t mean isError == true, the implementations of agent plugin and tracing SDK make the final decision.  bool isError = 11; // String key, String value pair.  // Tags provides more information, includes parameters.  //  // In the OAP backend analysis, some special tag or tag combination could provide other advanced features.  // https://github.com/apache/skywalking/blob/master/docs/en/guides/Java-Plugin-Development-Guide.md#special-span-tags  repeated KeyStringValuePair tags = 12; // String key, String value pair with an accurate timestamp.  // Logging some events happening in the context of the span duration.  repeated Log logs = 13; // Force the backend don\u0026#39;t do analysis, if the value is TRUE.  // The backend has its own configurations to follow or override this.  //  // Use this mostly because the agent/SDK could know more context of the service role.  bool skipAnalysis = 14;}message Log { // The timestamp in milliseconds of this event.,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 time = 1; // String key, String value pair.  repeated KeyStringValuePair data = 2;}// Map to the type of span enum SpanType { // Server side of RPC. Consumer side of MQ.  Entry = 0; // Client side of RPC. Producer side of MQ.  Exit = 1; // A common local code execution.  Local = 2;}// A ID could be represented by multiple string sections. message ID { repeated string id = 1;}// Type of the reference enum RefType { // Map to the reference targeting the segment in another OS process.  CrossProcess = 0; // Map to the reference targeting the segment in the same process of the current one, just across thread.  // This is only used when the coding language has the thread concept.  CrossThread = 1;}// Map to the layer of span enum SpanLayer { // Unknown layer. Could be anything.  Unknown = 0; // A database layer, used in tracing the database client component.  Database = 1; // A RPC layer, used in both client and server sides of RPC component.  RPCFramework = 2; // HTTP is a more specific RPCFramework.  Http = 3; // A MQ layer, used in both producer and consumer sides of the MQ component.  MQ = 4; // A cache layer, used in tracing the cache client component.  Cache = 5;}// The segment collections for trace report in batch and sync mode. message SegmentCollection { repeated SegmentObject segments = 1;}Report Span Attached Events Besides in-process agents, there are other out-of-process agent, such as ebpf agent, could report additional information as attached events for the relative spans.\nSpanAttachedEventReportService#collect for attached event reporting.\n//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // ebpf agent(SkyWalking Rover) collects extra information from the OS(Linux Only) level to attach on the traced span. // Since v3.1 //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// service SpanAttachedEventReportService { // Collect SpanAttachedEvent to the OAP server in the streaming mode.  rpc collect (stream SpanAttachedEvent) returns (Commands) { }}// SpanAttachedEvent represents an attached event for a traced RPC. // // When an RPC is being traced by the in-process language agent, a span would be reported by the client-side agent. // And the rover would be aware of this RPC due to the existing tracing header. // Then, the rover agent collects extra information from the OS level to provide assistance information to diagnose network performance. message SpanAttachedEvent { // The nanosecond timestamp of the event\u0026#39;s start time.  // Notice, most unit of timestamp in SkyWalking is milliseconds, but NANO-SECOND is required here.  // Because the attached event happens in the OS syscall level, most of them are executed rapidly.  Instant startTime = 1; // The official event name.  // For example, the event name is a method signature from syscall stack.  string event = 2; // [Optional] The nanosecond timestamp of the event\u0026#39;s end time.  Instant endTime = 3; // The tags for this event includes some extra OS level information,  // such as  // 1. net_device used for this exit span.  // 2. network L7 protocol  repeated KeyStringValuePair tags = 4; // The summary of statistics during this event.  // Each statistic provides a name(metric name) to represent the name, and an int64/long as the value.  repeated KeyIntValuePair summary = 5; // Refer to a trace context decoded from `sw8` header through network, such as HTTP header, MQ metadata  // https://skywalking.apache.org/docs/main/next/en/protocols/skywalking-cross-process-propagation-headers-protocol-v3/#standard-header-item  SpanReference traceContext = 6; message SpanReference { SpanReferenceType type = 1; // [Optional] A string id represents the whole trace.  string traceId = 2; // A unique id represents this segment. Other segments could use this id to reference as a child segment.  // [Optional] when this span reference  string traceSegmentId = 3; // If type == SKYWALKING  // The number id of the span. Should be unique in the whole segment.  // Starting at 0  //  // If type == ZIPKIN  // The type of span ID is string.  string spanId = 4; } enum SpanReferenceType { SKYWALKING = 0; ZIPKIN = 1; }}","excerpt":"Trace Data Protocol v3.1 Trace Data Protocol describes the data format between SkyWalking …","ref":"/docs/main/v9.3.0/en/protocols/trace-data-protocol-v3/","title":"Trace Data Protocol v3.1"},{"body":"Trace Profiling Trace Profiling is bound within the auto-instrument agent and corresponds to In-Process Profiling.\nIt is delivered to the agent in the form of a task, allowing for dynamic enabling or disabling. Trace Profiling tasks can be created when an endpoint within a service experiences high latency. When the agent receives the task, it periodically samples the thread stack related to the endpoint when requested. Once the sampling is complete, the thread stack within the endpoint can be analyzed to determine the specific line of business code causing the performance issue.\nLean more about the trace profiling, please read this blog.\nActive in the OAP OAP and the agent use a brand-new protocol to exchange Trace Profiling data, so it is necessary to start OAP with the following configuration:\nreceiver-profile:selector:${SW_RECEIVER_PROFILE:default}default:Trace Profiling Task with Analysis To use the Trace Profiling feature, please follow these steps:\n Create profiling task: Use the UI or CLI tool to create a task. Generate requests: Ensure that the service has generated requests. Query task details: Check that the created task has Trace data generated. Analyze the data: Analyze the Trace data to determine where performance bottlenecks exist in the service.  Create profiling task Creating a Trace Profiling task is used to notify all agent nodes that execute the service entity which endpoint needs to perform the Trace Profiling feature. This Endpoint is typically an HTTP request or an RPC request address.\nWhen creating a task, the following configuration fields are required:\n Service: Which agent under the service needs to be monitored. Endpoint: The specific endpoint name, such as \u0026ldquo;POST:/path/to/request.\u0026rdquo; Start Time: The start time of the task, which can be executed immediately or at a future time. Duration: The duration of the task execution. Min Duration Threshold: The monitoring will only be triggered when the specified endpoint\u0026rsquo;s execution time exceeds this threshold. This effectively prevents the collection of ineffective data due to short execution times. Dump Period: The thread stack collection period, which will trigger thread sampling every specified number of milliseconds. Max Sampling Count: The maximum number of traces that can be collected in a task. This effectively prevents the program execution from being affected by excessive trace sampling, such as the Stop The World situation in Java.  When the Agent receives a Trace Profiling task from OAP, it automatically generates a log to notify that the task has been acknowledged. The log contains the following field information:\n Instance: The name of the instance where the Agent is located. Type: Supports \u0026ldquo;NOTIFIED\u0026rdquo; and \u0026ldquo;EXECUTION_FINISHED\u0026rdquo;, with the current log displaying \u0026ldquo;NOTIFIED\u0026rdquo;. Time: The time when the Agent received the task.  Generate Requests At this point, Tracing requests matching the specified Endpoint and other conditions would undergo Profiling.\nNotice, whether profiling is thread sensitive, it relies on the agent side implementation. The Java Agent already supports cross-thread requests, so when a request involves cross-thread operations, it would also be periodically sampled for thread stack.\nQuery task details Once the Tracing request is completed, we can query the Tracing data associated with this Trace Profiling task, which includes the following information:\n TraceId: The Trace ID of the current request. Instance: The instance to which the current profiling data belongs. Duration: The total time taken by the current instance to process the Tracing request. Spans: The list of Spans associated with the current Tracing.  SpanId: The ID of the current span. Parent Span Id: The ID of the parent span, allowing for a tree structure. SegmentId: The ID of the segment to which the span belongs. Refs: References of the current span, note that it only includes \u0026ldquo;CROSS_THREAD\u0026rdquo; type references. Service: The service entity information to which the current span belongs. Instance: The instance entity information to which the current span belongs. Time: The start and end time of the current span. Endpoint Name: The name of the current Span. Type: The type of the current span, either \u0026ldquo;Entry\u0026rdquo;, \u0026ldquo;Local\u0026rdquo;, or \u0026ldquo;Exit\u0026rdquo;. Peer: The remote network address. Component: The name of the component used by the current span. Layer: The layer to which the current span belongs. Tags: The tags information contained in the current span. Logs: The log information in the current span. Profiled: Whether the current span supports Profiling data analysis.    Analyze the data Once we know which segments can be analyzed for profiling, we can then determine the time ranges available for thread stack analysis based on the \u0026ldquo;profiled\u0026rdquo; field in the span. Next, we can provide the following query content to analyze the data:\n segmentId: The segment to be analyzed. Segments are usually bound to individual threads, so we can determine which thread needs to be analyzed. time range: Includes the start and end time.  By combining the segmentId with the time range, we can confirm the data for a specific thread during a specific time period. This allows us to merge the thread stack data from the specified thread and time range and analyze which lines of code take longer to execute. The following fields help you understand the program execution:\n Id: Used to identify the current thread stack frame. Parent Id: Combined with \u0026ldquo;id\u0026rdquo; to determine the hierarchical relationship. Code Signature: The method signature of the current thread stack frame. Duration: The total time consumed by the current thread stack frame. Duration Child Excluded: Excludes the child method calls of the current method, only obtaining the time consumed by the current method. Count: The number of times the current thread stack frame was sampled.  If you want to learn more about the thread stack merging mechanism, please read this documentation.\nExporter If you find that the results of profiling data are not correct, you can report an issue through this documentation.\n","excerpt":"Trace Profiling Trace Profiling is bound within the auto-instrument agent and corresponds to …","ref":"/docs/main/latest/en/setup/backend/backend-trace-profiling/","title":"Trace Profiling"},{"body":"Trace Profiling Trace Profiling is bound within the auto-instrument agent and corresponds to In-Process Profiling.\nIt is delivered to the agent in the form of a task, allowing for dynamic enabling or disabling. Trace Profiling tasks can be created when an endpoint within a service experiences high latency. When the agent receives the task, it periodically samples the thread stack related to the endpoint when requested. Once the sampling is complete, the thread stack within the endpoint can be analyzed to determine the specific line of business code causing the performance issue.\nLean more about the trace profiling, please read this blog.\nActive in the OAP OAP and the agent use a brand-new protocol to exchange Trace Profiling data, so it is necessary to start OAP with the following configuration:\nreceiver-profile:selector:${SW_RECEIVER_PROFILE:default}default:Trace Profiling Task with Analysis To use the Trace Profiling feature, please follow these steps:\n Create profiling task: Use the UI or CLI tool to create a task. Generate requests: Ensure that the service has generated requests. Query task details: Check that the created task has Trace data generated. Analyze the data: Analyze the Trace data to determine where performance bottlenecks exist in the service.  Create profiling task Creating a Trace Profiling task is used to notify all agent nodes that execute the service entity which endpoint needs to perform the Trace Profiling feature. This Endpoint is typically an HTTP request or an RPC request address.\nWhen creating a task, the following configuration fields are required:\n Service: Which agent under the service needs to be monitored. Endpoint: The specific endpoint name, such as \u0026ldquo;POST:/path/to/request.\u0026rdquo; Start Time: The start time of the task, which can be executed immediately or at a future time. Duration: The duration of the task execution. Min Duration Threshold: The monitoring will only be triggered when the specified endpoint\u0026rsquo;s execution time exceeds this threshold. This effectively prevents the collection of ineffective data due to short execution times. Dump Period: The thread stack collection period, which will trigger thread sampling every specified number of milliseconds. Max Sampling Count: The maximum number of traces that can be collected in a task. This effectively prevents the program execution from being affected by excessive trace sampling, such as the Stop The World situation in Java.  When the Agent receives a Trace Profiling task from OAP, it automatically generates a log to notify that the task has been acknowledged. The log contains the following field information:\n Instance: The name of the instance where the Agent is located. Type: Supports \u0026ldquo;NOTIFIED\u0026rdquo; and \u0026ldquo;EXECUTION_FINISHED\u0026rdquo;, with the current log displaying \u0026ldquo;NOTIFIED\u0026rdquo;. Time: The time when the Agent received the task.  Generate Requests At this point, Tracing requests matching the specified Endpoint and other conditions would undergo Profiling.\nNotice, whether profiling is thread sensitive, it relies on the agent side implementation. The Java Agent already supports cross-thread requests, so when a request involves cross-thread operations, it would also be periodically sampled for thread stack.\nQuery task details Once the Tracing request is completed, we can query the Tracing data associated with this Trace Profiling task, which includes the following information:\n TraceId: The Trace ID of the current request. Instance: The instance to which the current profiling data belongs. Duration: The total time taken by the current instance to process the Tracing request. Spans: The list of Spans associated with the current Tracing.  SpanId: The ID of the current span. Parent Span Id: The ID of the parent span, allowing for a tree structure. SegmentId: The ID of the segment to which the span belongs. Refs: References of the current span, note that it only includes \u0026ldquo;CROSS_THREAD\u0026rdquo; type references. Service: The service entity information to which the current span belongs. Instance: The instance entity information to which the current span belongs. Time: The start and end time of the current span. Endpoint Name: The name of the current Span. Type: The type of the current span, either \u0026ldquo;Entry\u0026rdquo;, \u0026ldquo;Local\u0026rdquo;, or \u0026ldquo;Exit\u0026rdquo;. Peer: The remote network address. Component: The name of the component used by the current span. Layer: The layer to which the current span belongs. Tags: The tags information contained in the current span. Logs: The log information in the current span. Profiled: Whether the current span supports Profiling data analysis.    Analyze the data Once we know which segments can be analyzed for profiling, we can then determine the time ranges available for thread stack analysis based on the \u0026ldquo;profiled\u0026rdquo; field in the span. Next, we can provide the following query content to analyze the data:\n segmentId: The segment to be analyzed. Segments are usually bound to individual threads, so we can determine which thread needs to be analyzed. time range: Includes the start and end time.  By combining the segmentId with the time range, we can confirm the data for a specific thread during a specific time period. This allows us to merge the thread stack data from the specified thread and time range and analyze which lines of code take longer to execute. The following fields help you understand the program execution:\n Id: Used to identify the current thread stack frame. Parent Id: Combined with \u0026ldquo;id\u0026rdquo; to determine the hierarchical relationship. Code Signature: The method signature of the current thread stack frame. Duration: The total time consumed by the current thread stack frame. Duration Child Excluded: Excludes the child method calls of the current method, only obtaining the time consumed by the current method. Count: The number of times the current thread stack frame was sampled.  If you want to learn more about the thread stack merging mechanism, please read this documentation.\nExporter If you find that the results of profiling data are not correct, you can report an issue through this documentation.\n","excerpt":"Trace Profiling Trace Profiling is bound within the auto-instrument agent and corresponds to …","ref":"/docs/main/next/en/setup/backend/backend-trace-profiling/","title":"Trace Profiling"},{"body":"Trace Profiling Trace Profiling is bound within the auto-instrument agent and corresponds to In-Process Profiling.\nIt is delivered to the agent in the form of a task, allowing for dynamic enabling or disabling. Trace Profiling tasks can be created when an endpoint within a service experiences high latency. When the agent receives the task, it periodically samples the thread stack related to the endpoint when requested. Once the sampling is complete, the thread stack within the endpoint can be analyzed to determine the specific line of business code causing the performance issue.\nLean more about the trace profiling, please read this blog.\nActive in the OAP OAP and the agent use a brand-new protocol to exchange Trace Profiling data, so it is necessary to start OAP with the following configuration:\nreceiver-profile:selector:${SW_RECEIVER_PROFILE:default}default:Trace Profiling Task with Analysis To use the Trace Profiling feature, please follow these steps:\n Create profiling task: Use the UI or CLI tool to create a task. Generate requests: Ensure that the service has generated requests. Query task details: Check that the created task has Trace data generated. Analyze the data: Analyze the Trace data to determine where performance bottlenecks exist in the service.  Create profiling task Creating a Trace Profiling task is used to notify all agent nodes that execute the service entity which endpoint needs to perform the Trace Profiling feature. This Endpoint is typically an HTTP request or an RPC request address.\nWhen creating a task, the following configuration fields are required:\n Service: Which agent under the service needs to be monitored. Endpoint: The specific endpoint name, such as \u0026ldquo;POST:/path/to/request.\u0026rdquo; Start Time: The start time of the task, which can be executed immediately or at a future time. Duration: The duration of the task execution. Min Duration Threshold: The monitoring will only be triggered when the specified endpoint\u0026rsquo;s execution time exceeds this threshold. This effectively prevents the collection of ineffective data due to short execution times. Dump Period: The thread stack collection period, which will trigger thread sampling every specified number of milliseconds. Max Sampling Count: The maximum number of traces that can be collected in a task. This effectively prevents the program execution from being affected by excessive trace sampling, such as the Stop The World situation in Java.  When the Agent receives a Trace Profiling task from OAP, it automatically generates a log to notify that the task has been acknowledged. The log contains the following field information:\n Instance: The name of the instance where the Agent is located. Type: Supports \u0026ldquo;NOTIFIED\u0026rdquo; and \u0026ldquo;EXECUTION_FINISHED\u0026rdquo;, with the current log displaying \u0026ldquo;NOTIFIED\u0026rdquo;. Time: The time when the Agent received the task.  Generate Requests At this point, Tracing requests matching the specified Endpoint and other conditions would undergo Profiling.\nNotice, whether profiling is thread sensitive, it relies on the agent side implementation. The Java Agent already supports cross-thread requests, so when a request involves cross-thread operations, it would also be periodically sampled for thread stack.\nQuery task details Once the Tracing request is completed, we can query the Tracing data associated with this Trace Profiling task, which includes the following information:\n TraceId: The Trace ID of the current request. Instance: The instance to which the current profiling data belongs. Duration: The total time taken by the current instance to process the Tracing request. Spans: The list of Spans associated with the current Tracing.  SpanId: The ID of the current span. Parent Span Id: The ID of the parent span, allowing for a tree structure. SegmentId: The ID of the segment to which the span belongs. Refs: References of the current span, note that it only includes \u0026ldquo;CROSS_THREAD\u0026rdquo; type references. Service: The service entity information to which the current span belongs. Instance: The instance entity information to which the current span belongs. Time: The start and end time of the current span. Endpoint Name: The name of the current Span. Type: The type of the current span, either \u0026ldquo;Entry\u0026rdquo;, \u0026ldquo;Local\u0026rdquo;, or \u0026ldquo;Exit\u0026rdquo;. Peer: The remote network address. Component: The name of the component used by the current span. Layer: The layer to which the current span belongs. Tags: The tags information contained in the current span. Logs: The log information in the current span. Profiled: Whether the current span supports Profiling data analysis.    Analyze the data Once we know which segments can be analyzed for profiling, we can then determine the time ranges available for thread stack analysis based on the \u0026ldquo;profiled\u0026rdquo; field in the span. Next, we can provide the following query content to analyze the data:\n segmentId: The segment to be analyzed. Segments are usually bound to individual threads, so we can determine which thread needs to be analyzed. time range: Includes the start and end time.  By combining the segmentId with the time range, we can confirm the data for a specific thread during a specific time period. This allows us to merge the thread stack data from the specified thread and time range and analyze which lines of code take longer to execute. The following fields help you understand the program execution:\n Id: Used to identify the current thread stack frame. Parent Id: Combined with \u0026ldquo;id\u0026rdquo; to determine the hierarchical relationship. Code Signature: The method signature of the current thread stack frame. Duration: The total time consumed by the current thread stack frame. Duration Child Excluded: Excludes the child method calls of the current method, only obtaining the time consumed by the current method. Count: The number of times the current thread stack frame was sampled.  If you want to learn more about the thread stack merging mechanism, please read this documentation.\nExporter If you find that the results of profiling data are not correct, you can report an issue through this documentation.\n","excerpt":"Trace Profiling Trace Profiling is bound within the auto-instrument agent and corresponds to …","ref":"/docs/main/v9.5.0/en/setup/backend/backend-trace-profiling/","title":"Trace Profiling"},{"body":"Trace Profiling Trace Profiling is bound within the auto-instrument agent and corresponds to In-Process Profiling.\nIt is delivered to the agent in the form of a task, allowing for dynamic enabling or disabling. Trace Profiling tasks can be created when an endpoint within a service experiences high latency. When the agent receives the task, it periodically samples the thread stack related to the endpoint when requested. Once the sampling is complete, the thread stack within the endpoint can be analyzed to determine the specific line of business code causing the performance issue.\nLean more about the trace profiling, please read this blog.\nActive in the OAP OAP and the agent use a brand-new protocol to exchange Trace Profiling data, so it is necessary to start OAP with the following configuration:\nreceiver-profile:selector:${SW_RECEIVER_PROFILE:default}default:Trace Profiling Task with Analysis To use the Trace Profiling feature, please follow these steps:\n Create profiling task: Use the UI or CLI tool to create a task. Generate requests: Ensure that the service has generated requests. Query task details: Check that the created task has Trace data generated. Analyze the data: Analyze the Trace data to determine where performance bottlenecks exist in the service.  Create profiling task Creating a Trace Profiling task is used to notify all agent nodes that execute the service entity which endpoint needs to perform the Trace Profiling feature. This Endpoint is typically an HTTP request or an RPC request address.\nWhen creating a task, the following configuration fields are required:\n Service: Which agent under the service needs to be monitored. Endpoint: The specific endpoint name, such as \u0026ldquo;POST:/path/to/request.\u0026rdquo; Start Time: The start time of the task, which can be executed immediately or at a future time. Duration: The duration of the task execution. Min Duration Threshold: The monitoring will only be triggered when the specified endpoint\u0026rsquo;s execution time exceeds this threshold. This effectively prevents the collection of ineffective data due to short execution times. Dump Period: The thread stack collection period, which will trigger thread sampling every specified number of milliseconds. Max Sampling Count: The maximum number of traces that can be collected in a task. This effectively prevents the program execution from being affected by excessive trace sampling, such as the Stop The World situation in Java.  When the Agent receives a Trace Profiling task from OAP, it automatically generates a log to notify that the task has been acknowledged. The log contains the following field information:\n Instance: The name of the instance where the Agent is located. Type: Supports \u0026ldquo;NOTIFIED\u0026rdquo; and \u0026ldquo;EXECUTION_FINISHED\u0026rdquo;, with the current log displaying \u0026ldquo;NOTIFIED\u0026rdquo;. Time: The time when the Agent received the task.  Generate Requests At this point, Tracing requests matching the specified Endpoint and other conditions would undergo Profiling.\nNotice, whether profiling is thread sensitive, it relies on the agent side implementation. The Java Agent already supports cross-thread requests, so when a request involves cross-thread operations, it would also be periodically sampled for thread stack.\nQuery task details Once the Tracing request is completed, we can query the Tracing data associated with this Trace Profiling task, which includes the following information:\n TraceId: The Trace ID of the current request. Instance: The instance to which the current profiling data belongs. Duration: The total time taken by the current instance to process the Tracing request. Spans: The list of Spans associated with the current Tracing.  SpanId: The ID of the current span. Parent Span Id: The ID of the parent span, allowing for a tree structure. SegmentId: The ID of the segment to which the span belongs. Refs: References of the current span, note that it only includes \u0026ldquo;CROSS_THREAD\u0026rdquo; type references. Service: The service entity information to which the current span belongs. Instance: The instance entity information to which the current span belongs. Time: The start and end time of the current span. Endpoint Name: The name of the current Span. Type: The type of the current span, either \u0026ldquo;Entry\u0026rdquo;, \u0026ldquo;Local\u0026rdquo;, or \u0026ldquo;Exit\u0026rdquo;. Peer: The remote network address. Component: The name of the component used by the current span. Layer: The layer to which the current span belongs. Tags: The tags information contained in the current span. Logs: The log information in the current span. Profiled: Whether the current span supports Profiling data analysis.    Analyze the data Once we know which segments can be analyzed for profiling, we can then determine the time ranges available for thread stack analysis based on the \u0026ldquo;profiled\u0026rdquo; field in the span. Next, we can provide the following query content to analyze the data:\n segmentId: The segment to be analyzed. Segments are usually bound to individual threads, so we can determine which thread needs to be analyzed. time range: Includes the start and end time.  By combining the segmentId with the time range, we can confirm the data for a specific thread during a specific time period. This allows us to merge the thread stack data from the specified thread and time range and analyze which lines of code take longer to execute. The following fields help you understand the program execution:\n Id: Used to identify the current thread stack frame. Parent Id: Combined with \u0026ldquo;id\u0026rdquo; to determine the hierarchical relationship. Code Signature: The method signature of the current thread stack frame. Duration: The total time consumed by the current thread stack frame. Duration Child Excluded: Excludes the child method calls of the current method, only obtaining the time consumed by the current method. Count: The number of times the current thread stack frame was sampled.  If you want to learn more about the thread stack merging mechanism, please read this documentation.\nExporter If you find that the results of profiling data are not correct, you can report an issue through this documentation.\n","excerpt":"Trace Profiling Trace Profiling is bound within the auto-instrument agent and corresponds to …","ref":"/docs/main/v9.6.0/en/setup/backend/backend-trace-profiling/","title":"Trace Profiling"},{"body":"Trace Profiling Trace Profiling is bound within the auto-instrument agent and corresponds to In-Process Profiling.\nIt is delivered to the agent in the form of a task, allowing for dynamic enabling or disabling. Trace Profiling tasks can be created when an endpoint within a service experiences high latency. When the agent receives the task, it periodically samples the thread stack related to the endpoint when requested. Once the sampling is complete, the thread stack within the endpoint can be analyzed to determine the specific line of business code causing the performance issue.\nLean more about the trace profiling, please read this blog.\nActive in the OAP OAP and the agent use a brand-new protocol to exchange Trace Profiling data, so it is necessary to start OAP with the following configuration:\nreceiver-profile:selector:${SW_RECEIVER_PROFILE:default}default:Trace Profiling Task with Analysis To use the Trace Profiling feature, please follow these steps:\n Create profiling task: Use the UI or CLI tool to create a task. Generate requests: Ensure that the service has generated requests. Query task details: Check that the created task has Trace data generated. Analyze the data: Analyze the Trace data to determine where performance bottlenecks exist in the service.  Create profiling task Creating a Trace Profiling task is used to notify all agent nodes that execute the service entity which endpoint needs to perform the Trace Profiling feature. This Endpoint is typically an HTTP request or an RPC request address.\nWhen creating a task, the following configuration fields are required:\n Service: Which agent under the service needs to be monitored. Endpoint: The specific endpoint name, such as \u0026ldquo;POST:/path/to/request.\u0026rdquo; Start Time: The start time of the task, which can be executed immediately or at a future time. Duration: The duration of the task execution. Min Duration Threshold: The monitoring will only be triggered when the specified endpoint\u0026rsquo;s execution time exceeds this threshold. This effectively prevents the collection of ineffective data due to short execution times. Dump Period: The thread stack collection period, which will trigger thread sampling every specified number of milliseconds. Max Sampling Count: The maximum number of traces that can be collected in a task. This effectively prevents the program execution from being affected by excessive trace sampling, such as the Stop The World situation in Java.  When the Agent receives a Trace Profiling task from OAP, it automatically generates a log to notify that the task has been acknowledged. The log contains the following field information:\n Instance: The name of the instance where the Agent is located. Type: Supports \u0026ldquo;NOTIFIED\u0026rdquo; and \u0026ldquo;EXECUTION_FINISHED\u0026rdquo;, with the current log displaying \u0026ldquo;NOTIFIED\u0026rdquo;. Time: The time when the Agent received the task.  Generate Requests At this point, Tracing requests matching the specified Endpoint and other conditions would undergo Profiling.\nNotice, whether profiling is thread sensitive, it relies on the agent side implementation. The Java Agent already supports cross-thread requests, so when a request involves cross-thread operations, it would also be periodically sampled for thread stack.\nQuery task details Once the Tracing request is completed, we can query the Tracing data associated with this Trace Profiling task, which includes the following information:\n TraceId: The Trace ID of the current request. Instance: The instance to which the current profiling data belongs. Duration: The total time taken by the current instance to process the Tracing request. Spans: The list of Spans associated with the current Tracing.  SpanId: The ID of the current span. Parent Span Id: The ID of the parent span, allowing for a tree structure. SegmentId: The ID of the segment to which the span belongs. Refs: References of the current span, note that it only includes \u0026ldquo;CROSS_THREAD\u0026rdquo; type references. Service: The service entity information to which the current span belongs. Instance: The instance entity information to which the current span belongs. Time: The start and end time of the current span. Endpoint Name: The name of the current Span. Type: The type of the current span, either \u0026ldquo;Entry\u0026rdquo;, \u0026ldquo;Local\u0026rdquo;, or \u0026ldquo;Exit\u0026rdquo;. Peer: The remote network address. Component: The name of the component used by the current span. Layer: The layer to which the current span belongs. Tags: The tags information contained in the current span. Logs: The log information in the current span. Profiled: Whether the current span supports Profiling data analysis.    Analyze the data Once we know which segments can be analyzed for profiling, we can then determine the time ranges available for thread stack analysis based on the \u0026ldquo;profiled\u0026rdquo; field in the span. Next, we can provide the following query content to analyze the data:\n segmentId: The segment to be analyzed. Segments are usually bound to individual threads, so we can determine which thread needs to be analyzed. time range: Includes the start and end time.  By combining the segmentId with the time range, we can confirm the data for a specific thread during a specific time period. This allows us to merge the thread stack data from the specified thread and time range and analyze which lines of code take longer to execute. The following fields help you understand the program execution:\n Id: Used to identify the current thread stack frame. Parent Id: Combined with \u0026ldquo;id\u0026rdquo; to determine the hierarchical relationship. Code Signature: The method signature of the current thread stack frame. Duration: The total time consumed by the current thread stack frame. Duration Child Excluded: Excludes the child method calls of the current method, only obtaining the time consumed by the current method. Count: The number of times the current thread stack frame was sampled.  If you want to learn more about the thread stack merging mechanism, please read this documentation.\nExporter If you find that the results of profiling data are not correct, you can report an issue through this documentation.\n","excerpt":"Trace Profiling Trace Profiling is bound within the auto-instrument agent and corresponds to …","ref":"/docs/main/v9.7.0/en/setup/backend/backend-trace-profiling/","title":"Trace Profiling"},{"body":"Trace Sampling at server side An advantage of a distributed tracing system is that detailed information from the traces can be obtained. However, the downside is that these traces use up a lot of storage.\nIf you enable the trace sampling mechanism at the server-side, you will find that the service metrics, service instance, endpoint, and topology all have the same accuracy as before. The only difference is that they do not save all traces in storage.\nOf course, even if you enable sampling, the traces will be kept as consistent as possible. Being consistent means that once the trace segments have been collected and reported by agents, the backend would make its best effort not to split the traces. See our recommendation to understand why you should keep the traces as consistent as possible and try not to split them.\nSet the sample rate In the agent-analyzer module, you will find the sampleRate setting by the configuration traceSamplingPolicySettingsFile.\nagent-analyzer:default:...# The default sampling rate and the default trace latency time configured by the \u0026#39;traceSamplingPolicySettingsFile\u0026#39; file.traceSamplingPolicySettingsFile:${SW_TRACE_SAMPLING_POLICY_SETTINGS_FILE:trace-sampling-policy-settings.yml}forceSampleErrorSegment:${SW_FORCE_SAMPLE_ERROR_SEGMENT:true}# When sampling mechanism activated, this config would make the error status segment sampled, ignoring the sampling rate.The default trace-sampling-policy-settings.yml uses the following format. Could use dynamic configuration to update the settings in the runtime.\ndefault:# Default sampling rate that replaces the \u0026#39;agent-analyzer.default.sampleRate\u0026#39;# The sample rate precision is 1/10000. 10000 means 100% sample in default.rate:10000# Default trace latency time that replaces the \u0026#39;agent-analyzer.default.slowTraceSegmentThreshold\u0026#39;# Setting this threshold about the latency would make the slow trace segments sampled if they cost more time, even the sampling mechanism is activated. The default value is `-1`, which would not sample slow traces. Unit, millisecond.duration:-1#services:# - name: serverName# rate: 1000 # Sampling rate of this specific service# duration: 10000 # Trace latency threshold for trace sampling for this specific serviceduration.rate allows you to set the sample rate to this backend. The sample rate precision is 1/10000. 10000 means 100% sample by default.\nforceSampleErrorSegment allows you to save all error segments when the sampling mechanism is activated. This config will cause the error status segment to be sampled when the sampling mechanism is activated, ignoring the sampling rate.\ndefault.duration allows you to save all slow trace segments when the sampling mechanism is activated. Setting this threshold on latency (in milliseconds) would cause slow trace segments to be sampled if they use up more time, even if the sampling mechanism is activated. The default value is -1, which means that slow traces would not be sampled.\nNote: services.[].rate and services.[].duration has a higher priority than default.rare and default.duration.\nRecommendation You may choose to set different backend instances with different sampleRate values, although we recommend that you set the values to be the same.\nWhen you set the different rates, let\u0026rsquo;s say:\n Backend-InstanceA.sampleRate = 35 Backend-InstanceB.sampleRate = 55  Assume the agents have reported all trace segments to the backend. 35% of the traces at the global level will be collected and saved in storage consistently/completely together with all spans. 20% of the trace segments reported to Backend-Instance B will be saved in storage, whereas some trace segments may be missed, as they are reported to Backend-InstanceA and ignored.\nNote When you enable sampling, the actual sample rate may exceed sampleRate. The reason is that currently, all error/slow segments will be saved; meanwhile, the upstream and downstream may not be sampled. This feature ensures that you have the error/slow stacks and segments, although it is not guaranteed that you would have the whole traces.\nNote that if most of the accesses have failed or are slow, the sampling rate would be close to 100%. This may cause the backend or storage clusters to crash.\n","excerpt":"Trace Sampling at server side An advantage of a distributed tracing system is that detailed …","ref":"/docs/main/latest/en/setup/backend/trace-sampling/","title":"Trace Sampling at server side"},{"body":"Trace Sampling at server side An advantage of a distributed tracing system is that detailed information from the traces can be obtained. However, the downside is that these traces use up a lot of storage.\nIf you enable the trace sampling mechanism at the server-side, you will find that the service metrics, service instance, endpoint, and topology all have the same accuracy as before. The only difference is that they do not save all traces in storage.\nOf course, even if you enable sampling, the traces will be kept as consistent as possible. Being consistent means that once the trace segments have been collected and reported by agents, the backend would make its best effort not to split the traces. See our recommendation to understand why you should keep the traces as consistent as possible and try not to split them.\nSet the sample rate In the agent-analyzer module, you will find the sampleRate setting by the configuration traceSamplingPolicySettingsFile.\nagent-analyzer:default:...# The default sampling rate and the default trace latency time configured by the \u0026#39;traceSamplingPolicySettingsFile\u0026#39; file.traceSamplingPolicySettingsFile:${SW_TRACE_SAMPLING_POLICY_SETTINGS_FILE:trace-sampling-policy-settings.yml}forceSampleErrorSegment:${SW_FORCE_SAMPLE_ERROR_SEGMENT:true}# When sampling mechanism activated, this config would make the error status segment sampled, ignoring the sampling rate.The default trace-sampling-policy-settings.yml uses the following format. Could use dynamic configuration to update the settings in the runtime.\ndefault:# Default sampling rate that replaces the \u0026#39;agent-analyzer.default.sampleRate\u0026#39;# The sample rate precision is 1/10000. 10000 means 100% sample in default.rate:10000# Default trace latency time that replaces the \u0026#39;agent-analyzer.default.slowTraceSegmentThreshold\u0026#39;# Setting this threshold about the latency would make the slow trace segments sampled if they cost more time, even the sampling mechanism is activated. The default value is `-1`, which would not sample slow traces. Unit, millisecond.duration:-1#services:# - name: serverName# rate: 1000 # Sampling rate of this specific service# duration: 10000 # Trace latency threshold for trace sampling for this specific serviceduration.rate allows you to set the sample rate to this backend. The sample rate precision is 1/10000. 10000 means 100% sample by default.\nforceSampleErrorSegment allows you to save all error segments when the sampling mechanism is activated. This config will cause the error status segment to be sampled when the sampling mechanism is activated, ignoring the sampling rate.\ndefault.duration allows you to save all slow trace segments when the sampling mechanism is activated. Setting this threshold on latency (in milliseconds) would cause slow trace segments to be sampled if they use up more time, even if the sampling mechanism is activated. The default value is -1, which means that slow traces would not be sampled.\nNote: services.[].rate and services.[].duration has a higher priority than default.rare and default.duration.\nRecommendation You may choose to set different backend instances with different sampleRate values, although we recommend that you set the values to be the same.\nWhen you set the different rates, let\u0026rsquo;s say:\n Backend-InstanceA.sampleRate = 35 Backend-InstanceB.sampleRate = 55  Assume the agents have reported all trace segments to the backend. 35% of the traces at the global level will be collected and saved in storage consistently/completely together with all spans. 20% of the trace segments reported to Backend-Instance B will be saved in storage, whereas some trace segments may be missed, as they are reported to Backend-InstanceA and ignored.\nNote When you enable sampling, the actual sample rate may exceed sampleRate. The reason is that currently, all error/slow segments will be saved; meanwhile, the upstream and downstream may not be sampled. This feature ensures that you have the error/slow stacks and segments, although it is not guaranteed that you would have the whole traces.\nNote that if most of the accesses have failed or are slow, the sampling rate would be close to 100%. This may cause the backend or storage clusters to crash.\n","excerpt":"Trace Sampling at server side An advantage of a distributed tracing system is that detailed …","ref":"/docs/main/next/en/setup/backend/trace-sampling/","title":"Trace Sampling at server side"},{"body":"Trace Sampling at server side An advantage of a distributed tracing system is that detailed information from the traces can be obtained. However, the downside is that these traces use up a lot of storage. If you enable the trace sampling mechanism at the server side, you will find that the metrics of the service, service instance, endpoint, and topology all have the same accuracy as before. The only difference is that they do not save all traces into storage.\nOf course, even if you enable sampling, the traces will be kept as consistent as possible. Being consistent means that once the trace segments have been collected and reported by agents, the backend would do their best not to split the traces. See our recommendation to understand why you should keep the traces as consistent as possible and try not to split them.\nSet the sample rate In the agent-analyzer module, you will find the sampleRate setting by the configuration traceSamplingPolicySettingsFile.\nagent-analyzer:default:...# The default sampling rate and the default trace latency time configured by the \u0026#39;traceSamplingPolicySettingsFile\u0026#39; file.traceSamplingPolicySettingsFile:${SW_TRACE_SAMPLING_POLICY_SETTINGS_FILE:trace-sampling-policy-settings.yml}forceSampleErrorSegment:${SW_FORCE_SAMPLE_ERROR_SEGMENT:true}# When sampling mechanism activated, this config would make the error status segment sampled, ignoring the sampling rate.The default trace-sampling-policy-settings.yml uses the following format. Could use dynamic configuration to update the settings in the runtime.\ndefault:# Default sampling rate that replaces the \u0026#39;agent-analyzer.default.sampleRate\u0026#39;# The sample rate precision is 1/10000. 10000 means 100% sample in default.rate:10000# Default trace latency time that replaces the \u0026#39;agent-analyzer.default.slowTraceSegmentThreshold\u0026#39;# Setting this threshold about the latency would make the slow trace segments sampled if they cost more time, even the sampling mechanism activated. The default value is `-1`, which means would not sample slow traces. Unit, millisecond.duration:-1#services:# - name: serverName# rate: 1000 # Sampling rate of this specific service# duration: 10000 # Trace latency threshold for trace sampling for this specific serviceduration.rate allows you to set the sample rate to this backend. The sample rate precision is 1/10000. 10000 means 100% sample by default.\nforceSampleErrorSegment allows you to save all error segments when sampling mechanism is activated. When sampling mechanism is activated, this config would cause the error status segment to be sampled, ignoring the sampling rate.\ndefault.duration allows you to save all slow trace segments when sampling mechanism is activated. Setting this threshold on latency (in milliseconds) would cause slow trace segments to be sampled if they use up more time, even if the sampling mechanism is activated. The default value is -1, which means that slow traces would not be sampled.\nNote: services.[].rate and services.[].duration has a higher priority than default.rare and default.duration.\nRecommendation You may choose to set different backend instances with different sampleRate values, although we recommend that you set the values to be the same.\nWhen you set the different rates, let\u0026rsquo;s say:\n Backend-InstanceA.sampleRate = 35 Backend-InstanceB.sampleRate = 55  Assume the agents have reported all trace segments to the backend. 35% of the traces at the global level will be collected and saved in storage consistently/completely together with all spans. 20% of the trace segments which are reported to Backend-Instance B will be saved in storage, whereas some trace segments may be missed, as they are reported to Backend-InstanceA and ignored.\nNote When you enable sampling, the actual sample rate may exceed sampleRate. The reason is that currently all error/slow segments will be saved; meanwhile, the upstream and downstream may not be sampled. This feature ensures that you have the error/slow stacks and segments, although it is not guaranteed that you would have the whole traces.\nNote also if most of the access have failed or are slow, the sampling rate would be close to 100%. This may cause the backend or storage clusters to crash.\n","excerpt":"Trace Sampling at server side An advantage of a distributed tracing system is that detailed …","ref":"/docs/main/v9.0.0/en/setup/backend/trace-sampling/","title":"Trace Sampling at server side"},{"body":"Trace Sampling at server side An advantage of a distributed tracing system is that detailed information from the traces can be obtained. However, the downside is that these traces use up a lot of storage.\nIf you enable the trace sampling mechanism at the server-side, you will find that the service metrics, service instance, endpoint, and topology all have the same accuracy as before. The only difference is that they do not save all traces in storage.\nOf course, even if you enable sampling, the traces will be kept as consistent as possible. Being consistent means that once the trace segments have been collected and reported by agents, the backend would make its best effort not to split the traces. See our recommendation to understand why you should keep the traces as consistent as possible and try not to split them.\nSet the sample rate In the agent-analyzer module, you will find the sampleRate setting by the configuration traceSamplingPolicySettingsFile.\nagent-analyzer:default:...# The default sampling rate and the default trace latency time configured by the \u0026#39;traceSamplingPolicySettingsFile\u0026#39; file.traceSamplingPolicySettingsFile:${SW_TRACE_SAMPLING_POLICY_SETTINGS_FILE:trace-sampling-policy-settings.yml}forceSampleErrorSegment:${SW_FORCE_SAMPLE_ERROR_SEGMENT:true}# When sampling mechanism activated, this config would make the error status segment sampled, ignoring the sampling rate.The default trace-sampling-policy-settings.yml uses the following format. Could use dynamic configuration to update the settings in the runtime.\ndefault:# Default sampling rate that replaces the \u0026#39;agent-analyzer.default.sampleRate\u0026#39;# The sample rate precision is 1/10000. 10000 means 100% sample in default.rate:10000# Default trace latency time that replaces the \u0026#39;agent-analyzer.default.slowTraceSegmentThreshold\u0026#39;# Setting this threshold about the latency would make the slow trace segments sampled if they cost more time, even the sampling mechanism is activated. The default value is `-1`, which would not sample slow traces. Unit, millisecond.duration:-1#services:# - name: serverName# rate: 1000 # Sampling rate of this specific service# duration: 10000 # Trace latency threshold for trace sampling for this specific serviceduration.rate allows you to set the sample rate to this backend. The sample rate precision is 1/10000. 10000 means 100% sample by default.\nforceSampleErrorSegment allows you to save all error segments when the sampling mechanism is activated. This config will cause the error status segment to be sampled when the sampling mechanism is activated, ignoring the sampling rate.\ndefault.duration allows you to save all slow trace segments when the sampling mechanism is activated. Setting this threshold on latency (in milliseconds) would cause slow trace segments to be sampled if they use up more time, even if the sampling mechanism is activated. The default value is -1, which means that slow traces would not be sampled.\nNote: services.[].rate and services.[].duration has a higher priority than default.rare and default.duration.\nRecommendation You may choose to set different backend instances with different sampleRate values, although we recommend that you set the values to be the same.\nWhen you set the different rates, let\u0026rsquo;s say:\n Backend-InstanceA.sampleRate = 35 Backend-InstanceB.sampleRate = 55  Assume the agents have reported all trace segments to the backend. 35% of the traces at the global level will be collected and saved in storage consistently/completely together with all spans. 20% of the trace segments reported to Backend-Instance B will be saved in storage, whereas some trace segments may be missed, as they are reported to Backend-InstanceA and ignored.\nNote When you enable sampling, the actual sample rate may exceed sampleRate. The reason is that currently, all error/slow segments will be saved; meanwhile, the upstream and downstream may not be sampled. This feature ensures that you have the error/slow stacks and segments, although it is not guaranteed that you would have the whole traces.\nNote that if most of the accesses have failed or are slow, the sampling rate would be close to 100%. This may cause the backend or storage clusters to crash.\n","excerpt":"Trace Sampling at server side An advantage of a distributed tracing system is that detailed …","ref":"/docs/main/v9.1.0/en/setup/backend/trace-sampling/","title":"Trace Sampling at server side"},{"body":"Trace Sampling at server side An advantage of a distributed tracing system is that detailed information from the traces can be obtained. However, the downside is that these traces use up a lot of storage.\nIf you enable the trace sampling mechanism at the server-side, you will find that the service metrics, service instance, endpoint, and topology all have the same accuracy as before. The only difference is that they do not save all traces in storage.\nOf course, even if you enable sampling, the traces will be kept as consistent as possible. Being consistent means that once the trace segments have been collected and reported by agents, the backend would make its best effort not to split the traces. See our recommendation to understand why you should keep the traces as consistent as possible and try not to split them.\nSet the sample rate In the agent-analyzer module, you will find the sampleRate setting by the configuration traceSamplingPolicySettingsFile.\nagent-analyzer:default:...# The default sampling rate and the default trace latency time configured by the \u0026#39;traceSamplingPolicySettingsFile\u0026#39; file.traceSamplingPolicySettingsFile:${SW_TRACE_SAMPLING_POLICY_SETTINGS_FILE:trace-sampling-policy-settings.yml}forceSampleErrorSegment:${SW_FORCE_SAMPLE_ERROR_SEGMENT:true}# When sampling mechanism activated, this config would make the error status segment sampled, ignoring the sampling rate.The default trace-sampling-policy-settings.yml uses the following format. Could use dynamic configuration to update the settings in the runtime.\ndefault:# Default sampling rate that replaces the \u0026#39;agent-analyzer.default.sampleRate\u0026#39;# The sample rate precision is 1/10000. 10000 means 100% sample in default.rate:10000# Default trace latency time that replaces the \u0026#39;agent-analyzer.default.slowTraceSegmentThreshold\u0026#39;# Setting this threshold about the latency would make the slow trace segments sampled if they cost more time, even the sampling mechanism is activated. The default value is `-1`, which would not sample slow traces. Unit, millisecond.duration:-1#services:# - name: serverName# rate: 1000 # Sampling rate of this specific service# duration: 10000 # Trace latency threshold for trace sampling for this specific serviceduration.rate allows you to set the sample rate to this backend. The sample rate precision is 1/10000. 10000 means 100% sample by default.\nforceSampleErrorSegment allows you to save all error segments when the sampling mechanism is activated. This config will cause the error status segment to be sampled when the sampling mechanism is activated, ignoring the sampling rate.\ndefault.duration allows you to save all slow trace segments when the sampling mechanism is activated. Setting this threshold on latency (in milliseconds) would cause slow trace segments to be sampled if they use up more time, even if the sampling mechanism is activated. The default value is -1, which means that slow traces would not be sampled.\nNote: services.[].rate and services.[].duration has a higher priority than default.rare and default.duration.\nRecommendation You may choose to set different backend instances with different sampleRate values, although we recommend that you set the values to be the same.\nWhen you set the different rates, let\u0026rsquo;s say:\n Backend-InstanceA.sampleRate = 35 Backend-InstanceB.sampleRate = 55  Assume the agents have reported all trace segments to the backend. 35% of the traces at the global level will be collected and saved in storage consistently/completely together with all spans. 20% of the trace segments reported to Backend-Instance B will be saved in storage, whereas some trace segments may be missed, as they are reported to Backend-InstanceA and ignored.\nNote When you enable sampling, the actual sample rate may exceed sampleRate. The reason is that currently, all error/slow segments will be saved; meanwhile, the upstream and downstream may not be sampled. This feature ensures that you have the error/slow stacks and segments, although it is not guaranteed that you would have the whole traces.\nNote that if most of the accesses have failed or are slow, the sampling rate would be close to 100%. This may cause the backend or storage clusters to crash.\n","excerpt":"Trace Sampling at server side An advantage of a distributed tracing system is that detailed …","ref":"/docs/main/v9.2.0/en/setup/backend/trace-sampling/","title":"Trace Sampling at server side"},{"body":"Trace Sampling at server side An advantage of a distributed tracing system is that detailed information from the traces can be obtained. However, the downside is that these traces use up a lot of storage.\nIf you enable the trace sampling mechanism at the server-side, you will find that the service metrics, service instance, endpoint, and topology all have the same accuracy as before. The only difference is that they do not save all traces in storage.\nOf course, even if you enable sampling, the traces will be kept as consistent as possible. Being consistent means that once the trace segments have been collected and reported by agents, the backend would make its best effort not to split the traces. See our recommendation to understand why you should keep the traces as consistent as possible and try not to split them.\nSet the sample rate In the agent-analyzer module, you will find the sampleRate setting by the configuration traceSamplingPolicySettingsFile.\nagent-analyzer:default:...# The default sampling rate and the default trace latency time configured by the \u0026#39;traceSamplingPolicySettingsFile\u0026#39; file.traceSamplingPolicySettingsFile:${SW_TRACE_SAMPLING_POLICY_SETTINGS_FILE:trace-sampling-policy-settings.yml}forceSampleErrorSegment:${SW_FORCE_SAMPLE_ERROR_SEGMENT:true}# When sampling mechanism activated, this config would make the error status segment sampled, ignoring the sampling rate.The default trace-sampling-policy-settings.yml uses the following format. Could use dynamic configuration to update the settings in the runtime.\ndefault:# Default sampling rate that replaces the \u0026#39;agent-analyzer.default.sampleRate\u0026#39;# The sample rate precision is 1/10000. 10000 means 100% sample in default.rate:10000# Default trace latency time that replaces the \u0026#39;agent-analyzer.default.slowTraceSegmentThreshold\u0026#39;# Setting this threshold about the latency would make the slow trace segments sampled if they cost more time, even the sampling mechanism is activated. The default value is `-1`, which would not sample slow traces. Unit, millisecond.duration:-1#services:# - name: serverName# rate: 1000 # Sampling rate of this specific service# duration: 10000 # Trace latency threshold for trace sampling for this specific serviceduration.rate allows you to set the sample rate to this backend. The sample rate precision is 1/10000. 10000 means 100% sample by default.\nforceSampleErrorSegment allows you to save all error segments when the sampling mechanism is activated. This config will cause the error status segment to be sampled when the sampling mechanism is activated, ignoring the sampling rate.\ndefault.duration allows you to save all slow trace segments when the sampling mechanism is activated. Setting this threshold on latency (in milliseconds) would cause slow trace segments to be sampled if they use up more time, even if the sampling mechanism is activated. The default value is -1, which means that slow traces would not be sampled.\nNote: services.[].rate and services.[].duration has a higher priority than default.rare and default.duration.\nRecommendation You may choose to set different backend instances with different sampleRate values, although we recommend that you set the values to be the same.\nWhen you set the different rates, let\u0026rsquo;s say:\n Backend-InstanceA.sampleRate = 35 Backend-InstanceB.sampleRate = 55  Assume the agents have reported all trace segments to the backend. 35% of the traces at the global level will be collected and saved in storage consistently/completely together with all spans. 20% of the trace segments reported to Backend-Instance B will be saved in storage, whereas some trace segments may be missed, as they are reported to Backend-InstanceA and ignored.\nNote When you enable sampling, the actual sample rate may exceed sampleRate. The reason is that currently, all error/slow segments will be saved; meanwhile, the upstream and downstream may not be sampled. This feature ensures that you have the error/slow stacks and segments, although it is not guaranteed that you would have the whole traces.\nNote that if most of the accesses have failed or are slow, the sampling rate would be close to 100%. This may cause the backend or storage clusters to crash.\n","excerpt":"Trace Sampling at server side An advantage of a distributed tracing system is that detailed …","ref":"/docs/main/v9.3.0/en/setup/backend/trace-sampling/","title":"Trace Sampling at server side"},{"body":"Trace Sampling at server side An advantage of a distributed tracing system is that detailed information from the traces can be obtained. However, the downside is that these traces use up a lot of storage.\nIf you enable the trace sampling mechanism at the server-side, you will find that the service metrics, service instance, endpoint, and topology all have the same accuracy as before. The only difference is that they do not save all traces in storage.\nOf course, even if you enable sampling, the traces will be kept as consistent as possible. Being consistent means that once the trace segments have been collected and reported by agents, the backend would make its best effort not to split the traces. See our recommendation to understand why you should keep the traces as consistent as possible and try not to split them.\nSet the sample rate In the agent-analyzer module, you will find the sampleRate setting by the configuration traceSamplingPolicySettingsFile.\nagent-analyzer:default:...# The default sampling rate and the default trace latency time configured by the \u0026#39;traceSamplingPolicySettingsFile\u0026#39; file.traceSamplingPolicySettingsFile:${SW_TRACE_SAMPLING_POLICY_SETTINGS_FILE:trace-sampling-policy-settings.yml}forceSampleErrorSegment:${SW_FORCE_SAMPLE_ERROR_SEGMENT:true}# When sampling mechanism activated, this config would make the error status segment sampled, ignoring the sampling rate.The default trace-sampling-policy-settings.yml uses the following format. Could use dynamic configuration to update the settings in the runtime.\ndefault:# Default sampling rate that replaces the \u0026#39;agent-analyzer.default.sampleRate\u0026#39;# The sample rate precision is 1/10000. 10000 means 100% sample in default.rate:10000# Default trace latency time that replaces the \u0026#39;agent-analyzer.default.slowTraceSegmentThreshold\u0026#39;# Setting this threshold about the latency would make the slow trace segments sampled if they cost more time, even the sampling mechanism is activated. The default value is `-1`, which would not sample slow traces. Unit, millisecond.duration:-1#services:# - name: serverName# rate: 1000 # Sampling rate of this specific service# duration: 10000 # Trace latency threshold for trace sampling for this specific serviceduration.rate allows you to set the sample rate to this backend. The sample rate precision is 1/10000. 10000 means 100% sample by default.\nforceSampleErrorSegment allows you to save all error segments when the sampling mechanism is activated. This config will cause the error status segment to be sampled when the sampling mechanism is activated, ignoring the sampling rate.\ndefault.duration allows you to save all slow trace segments when the sampling mechanism is activated. Setting this threshold on latency (in milliseconds) would cause slow trace segments to be sampled if they use up more time, even if the sampling mechanism is activated. The default value is -1, which means that slow traces would not be sampled.\nNote: services.[].rate and services.[].duration has a higher priority than default.rare and default.duration.\nRecommendation You may choose to set different backend instances with different sampleRate values, although we recommend that you set the values to be the same.\nWhen you set the different rates, let\u0026rsquo;s say:\n Backend-InstanceA.sampleRate = 35 Backend-InstanceB.sampleRate = 55  Assume the agents have reported all trace segments to the backend. 35% of the traces at the global level will be collected and saved in storage consistently/completely together with all spans. 20% of the trace segments reported to Backend-Instance B will be saved in storage, whereas some trace segments may be missed, as they are reported to Backend-InstanceA and ignored.\nNote When you enable sampling, the actual sample rate may exceed sampleRate. The reason is that currently, all error/slow segments will be saved; meanwhile, the upstream and downstream may not be sampled. This feature ensures that you have the error/slow stacks and segments, although it is not guaranteed that you would have the whole traces.\nNote that if most of the accesses have failed or are slow, the sampling rate would be close to 100%. This may cause the backend or storage clusters to crash.\n","excerpt":"Trace Sampling at server side An advantage of a distributed tracing system is that detailed …","ref":"/docs/main/v9.4.0/en/setup/backend/trace-sampling/","title":"Trace Sampling at server side"},{"body":"Trace Sampling at server side An advantage of a distributed tracing system is that detailed information from the traces can be obtained. However, the downside is that these traces use up a lot of storage.\nIf you enable the trace sampling mechanism at the server-side, you will find that the service metrics, service instance, endpoint, and topology all have the same accuracy as before. The only difference is that they do not save all traces in storage.\nOf course, even if you enable sampling, the traces will be kept as consistent as possible. Being consistent means that once the trace segments have been collected and reported by agents, the backend would make its best effort not to split the traces. See our recommendation to understand why you should keep the traces as consistent as possible and try not to split them.\nSet the sample rate In the agent-analyzer module, you will find the sampleRate setting by the configuration traceSamplingPolicySettingsFile.\nagent-analyzer:default:...# The default sampling rate and the default trace latency time configured by the \u0026#39;traceSamplingPolicySettingsFile\u0026#39; file.traceSamplingPolicySettingsFile:${SW_TRACE_SAMPLING_POLICY_SETTINGS_FILE:trace-sampling-policy-settings.yml}forceSampleErrorSegment:${SW_FORCE_SAMPLE_ERROR_SEGMENT:true}# When sampling mechanism activated, this config would make the error status segment sampled, ignoring the sampling rate.The default trace-sampling-policy-settings.yml uses the following format. Could use dynamic configuration to update the settings in the runtime.\ndefault:# Default sampling rate that replaces the \u0026#39;agent-analyzer.default.sampleRate\u0026#39;# The sample rate precision is 1/10000. 10000 means 100% sample in default.rate:10000# Default trace latency time that replaces the \u0026#39;agent-analyzer.default.slowTraceSegmentThreshold\u0026#39;# Setting this threshold about the latency would make the slow trace segments sampled if they cost more time, even the sampling mechanism is activated. The default value is `-1`, which would not sample slow traces. Unit, millisecond.duration:-1#services:# - name: serverName# rate: 1000 # Sampling rate of this specific service# duration: 10000 # Trace latency threshold for trace sampling for this specific serviceduration.rate allows you to set the sample rate to this backend. The sample rate precision is 1/10000. 10000 means 100% sample by default.\nforceSampleErrorSegment allows you to save all error segments when the sampling mechanism is activated. This config will cause the error status segment to be sampled when the sampling mechanism is activated, ignoring the sampling rate.\ndefault.duration allows you to save all slow trace segments when the sampling mechanism is activated. Setting this threshold on latency (in milliseconds) would cause slow trace segments to be sampled if they use up more time, even if the sampling mechanism is activated. The default value is -1, which means that slow traces would not be sampled.\nNote: services.[].rate and services.[].duration has a higher priority than default.rare and default.duration.\nRecommendation You may choose to set different backend instances with different sampleRate values, although we recommend that you set the values to be the same.\nWhen you set the different rates, let\u0026rsquo;s say:\n Backend-InstanceA.sampleRate = 35 Backend-InstanceB.sampleRate = 55  Assume the agents have reported all trace segments to the backend. 35% of the traces at the global level will be collected and saved in storage consistently/completely together with all spans. 20% of the trace segments reported to Backend-Instance B will be saved in storage, whereas some trace segments may be missed, as they are reported to Backend-InstanceA and ignored.\nNote When you enable sampling, the actual sample rate may exceed sampleRate. The reason is that currently, all error/slow segments will be saved; meanwhile, the upstream and downstream may not be sampled. This feature ensures that you have the error/slow stacks and segments, although it is not guaranteed that you would have the whole traces.\nNote that if most of the accesses have failed or are slow, the sampling rate would be close to 100%. This may cause the backend or storage clusters to crash.\n","excerpt":"Trace Sampling at server side An advantage of a distributed tracing system is that detailed …","ref":"/docs/main/v9.5.0/en/setup/backend/trace-sampling/","title":"Trace Sampling at server side"},{"body":"Trace Sampling at server side An advantage of a distributed tracing system is that detailed information from the traces can be obtained. However, the downside is that these traces use up a lot of storage.\nIf you enable the trace sampling mechanism at the server-side, you will find that the service metrics, service instance, endpoint, and topology all have the same accuracy as before. The only difference is that they do not save all traces in storage.\nOf course, even if you enable sampling, the traces will be kept as consistent as possible. Being consistent means that once the trace segments have been collected and reported by agents, the backend would make its best effort not to split the traces. See our recommendation to understand why you should keep the traces as consistent as possible and try not to split them.\nSet the sample rate In the agent-analyzer module, you will find the sampleRate setting by the configuration traceSamplingPolicySettingsFile.\nagent-analyzer:default:...# The default sampling rate and the default trace latency time configured by the \u0026#39;traceSamplingPolicySettingsFile\u0026#39; file.traceSamplingPolicySettingsFile:${SW_TRACE_SAMPLING_POLICY_SETTINGS_FILE:trace-sampling-policy-settings.yml}forceSampleErrorSegment:${SW_FORCE_SAMPLE_ERROR_SEGMENT:true}# When sampling mechanism activated, this config would make the error status segment sampled, ignoring the sampling rate.The default trace-sampling-policy-settings.yml uses the following format. Could use dynamic configuration to update the settings in the runtime.\ndefault:# Default sampling rate that replaces the \u0026#39;agent-analyzer.default.sampleRate\u0026#39;# The sample rate precision is 1/10000. 10000 means 100% sample in default.rate:10000# Default trace latency time that replaces the \u0026#39;agent-analyzer.default.slowTraceSegmentThreshold\u0026#39;# Setting this threshold about the latency would make the slow trace segments sampled if they cost more time, even the sampling mechanism is activated. The default value is `-1`, which would not sample slow traces. Unit, millisecond.duration:-1#services:# - name: serverName# rate: 1000 # Sampling rate of this specific service# duration: 10000 # Trace latency threshold for trace sampling for this specific serviceduration.rate allows you to set the sample rate to this backend. The sample rate precision is 1/10000. 10000 means 100% sample by default.\nforceSampleErrorSegment allows you to save all error segments when the sampling mechanism is activated. This config will cause the error status segment to be sampled when the sampling mechanism is activated, ignoring the sampling rate.\ndefault.duration allows you to save all slow trace segments when the sampling mechanism is activated. Setting this threshold on latency (in milliseconds) would cause slow trace segments to be sampled if they use up more time, even if the sampling mechanism is activated. The default value is -1, which means that slow traces would not be sampled.\nNote: services.[].rate and services.[].duration has a higher priority than default.rare and default.duration.\nRecommendation You may choose to set different backend instances with different sampleRate values, although we recommend that you set the values to be the same.\nWhen you set the different rates, let\u0026rsquo;s say:\n Backend-InstanceA.sampleRate = 35 Backend-InstanceB.sampleRate = 55  Assume the agents have reported all trace segments to the backend. 35% of the traces at the global level will be collected and saved in storage consistently/completely together with all spans. 20% of the trace segments reported to Backend-Instance B will be saved in storage, whereas some trace segments may be missed, as they are reported to Backend-InstanceA and ignored.\nNote When you enable sampling, the actual sample rate may exceed sampleRate. The reason is that currently, all error/slow segments will be saved; meanwhile, the upstream and downstream may not be sampled. This feature ensures that you have the error/slow stacks and segments, although it is not guaranteed that you would have the whole traces.\nNote that if most of the accesses have failed or are slow, the sampling rate would be close to 100%. This may cause the backend or storage clusters to crash.\n","excerpt":"Trace Sampling at server side An advantage of a distributed tracing system is that detailed …","ref":"/docs/main/v9.6.0/en/setup/backend/trace-sampling/","title":"Trace Sampling at server side"},{"body":"Trace Sampling at server side An advantage of a distributed tracing system is that detailed information from the traces can be obtained. However, the downside is that these traces use up a lot of storage.\nIf you enable the trace sampling mechanism at the server-side, you will find that the service metrics, service instance, endpoint, and topology all have the same accuracy as before. The only difference is that they do not save all traces in storage.\nOf course, even if you enable sampling, the traces will be kept as consistent as possible. Being consistent means that once the trace segments have been collected and reported by agents, the backend would make its best effort not to split the traces. See our recommendation to understand why you should keep the traces as consistent as possible and try not to split them.\nSet the sample rate In the agent-analyzer module, you will find the sampleRate setting by the configuration traceSamplingPolicySettingsFile.\nagent-analyzer:default:...# The default sampling rate and the default trace latency time configured by the \u0026#39;traceSamplingPolicySettingsFile\u0026#39; file.traceSamplingPolicySettingsFile:${SW_TRACE_SAMPLING_POLICY_SETTINGS_FILE:trace-sampling-policy-settings.yml}forceSampleErrorSegment:${SW_FORCE_SAMPLE_ERROR_SEGMENT:true}# When sampling mechanism activated, this config would make the error status segment sampled, ignoring the sampling rate.The default trace-sampling-policy-settings.yml uses the following format. Could use dynamic configuration to update the settings in the runtime.\ndefault:# Default sampling rate that replaces the \u0026#39;agent-analyzer.default.sampleRate\u0026#39;# The sample rate precision is 1/10000. 10000 means 100% sample in default.rate:10000# Default trace latency time that replaces the \u0026#39;agent-analyzer.default.slowTraceSegmentThreshold\u0026#39;# Setting this threshold about the latency would make the slow trace segments sampled if they cost more time, even the sampling mechanism is activated. The default value is `-1`, which would not sample slow traces. Unit, millisecond.duration:-1#services:# - name: serverName# rate: 1000 # Sampling rate of this specific service# duration: 10000 # Trace latency threshold for trace sampling for this specific serviceduration.rate allows you to set the sample rate to this backend. The sample rate precision is 1/10000. 10000 means 100% sample by default.\nforceSampleErrorSegment allows you to save all error segments when the sampling mechanism is activated. This config will cause the error status segment to be sampled when the sampling mechanism is activated, ignoring the sampling rate.\ndefault.duration allows you to save all slow trace segments when the sampling mechanism is activated. Setting this threshold on latency (in milliseconds) would cause slow trace segments to be sampled if they use up more time, even if the sampling mechanism is activated. The default value is -1, which means that slow traces would not be sampled.\nNote: services.[].rate and services.[].duration has a higher priority than default.rare and default.duration.\nRecommendation You may choose to set different backend instances with different sampleRate values, although we recommend that you set the values to be the same.\nWhen you set the different rates, let\u0026rsquo;s say:\n Backend-InstanceA.sampleRate = 35 Backend-InstanceB.sampleRate = 55  Assume the agents have reported all trace segments to the backend. 35% of the traces at the global level will be collected and saved in storage consistently/completely together with all spans. 20% of the trace segments reported to Backend-Instance B will be saved in storage, whereas some trace segments may be missed, as they are reported to Backend-InstanceA and ignored.\nNote When you enable sampling, the actual sample rate may exceed sampleRate. The reason is that currently, all error/slow segments will be saved; meanwhile, the upstream and downstream may not be sampled. This feature ensures that you have the error/slow stacks and segments, although it is not guaranteed that you would have the whole traces.\nNote that if most of the accesses have failed or are slow, the sampling rate would be close to 100%. This may cause the backend or storage clusters to crash.\n","excerpt":"Trace Sampling at server side An advantage of a distributed tracing system is that detailed …","ref":"/docs/main/v9.7.0/en/setup/backend/trace-sampling/","title":"Trace Sampling at server side"},{"body":"Tracing and Tracing based Metrics Analyze Plugins The following plugins provide the distributed tracing capability, and the OAP backend would analyze the topology and metrics based on the tracing data.\n HTTP Server  Tomcat 7 Tomcat 8 Tomcat 9 Tomcat 10 Spring Boot Web 4.x Spring MVC 3.x, 4.x 5.x with servlet 3.x Spring MVC 6.x (Optional²) Nutz Web Framework 1.x Struts2 MVC 2.3.x -\u0026gt; 2.5.x Resin 3 (Optional¹) Resin 4 (Optional¹) Jetty Server 9.x -\u0026gt; 11.x Spring WebFlux 5.x (Optional¹) -\u0026gt; 6.x (Optional¹) Undertow 1.3.0.Final -\u0026gt; 2.0.27.Final RESTEasy 3.1.0.Final -\u0026gt; 6.2.4.Final Play Framework 2.6.x -\u0026gt; 2.8.x Light4J Microservices Framework 1.6.x -\u0026gt; 2.x Netty SocketIO 1.x Micronaut HTTP Server 3.2.x -\u0026gt; 3.6.x Jersey REST framework 2.x -\u0026gt; 3.x Grizzly 2.3.x -\u0026gt; 4.x WebSphere Liberty 23.x Netty HTTP 4.1.x (Optional²)   HTTP Client  Feign 9.x Netflix Spring Cloud Feign 1.1.x -\u0026gt; 2.x Okhttp 2.x -\u0026gt; 3.x -\u0026gt; 4.x Apache httpcomponent HttpClient 2.0 -\u0026gt; 3.1, 4.2, 4.3, 5.0, 5.1 Spring RestTemplate 4.x Spring RestTemplate 6.x (Optional²) Jetty Client 9.x -\u0026gt; 11.x Apache httpcomponent AsyncClient 4.x AsyncHttpClient 2.1+ Spring Webflux WebClient 5.x -\u0026gt; 6.x JRE HttpURLConnection (Optional²) Hutool-http client 5.x Micronaut HTTP Client 3.2.x -\u0026gt; 3.6.x   HTTP Gateway  Spring Cloud Gateway 2.0.2.RELEASE -\u0026gt; 4.1.x (Optional²) Apache ShenYu (Rich protocol support: HTTP,Spring Cloud,gRPC,Dubbo,SOFARPC,Motan,Tars) 2.4.x (Optional²)   JDBC  Mysql Driver 5.x, 6.x, 8.x Oracle Driver (Optional¹) H2 Driver 1.3.x -\u0026gt; 1.4.x ShardingSphere 3.0.0, 4.0.0, 4.0.1, 4.1.0, 4.1.1, 5.0.0 PostgreSQL Driver 8.x, 9.x, 42.x Mariadb Driver 2.x, 1.8 InfluxDB 2.5 -\u0026gt; 2.17 Mssql-Jtds 1.x Mssql-jdbc 6.x -\u0026gt; 8.x ClickHouse-jdbc 0.3.x Apache-Kylin-Jdbc 2.6.x -\u0026gt; 3.x -\u0026gt; 4.x Impala-jdbc 2.6.x (Optional³)   RPC Frameworks  Dubbo 2.5.4 -\u0026gt; 2.6.0 Dubbox 2.8.4 Apache Dubbo 2.7.x -\u0026gt; 3.x Motan 0.2.x -\u0026gt; 1.1.0 gRPC 1.x Apache ServiceComb Java Chassis 1.x, 2.x SOFARPC 5.4.0 Armeria 0.63.0 -\u0026gt; 1.22.0 Apache Avro 1.7.0 - 1.8.x Finagle 6.44.0 -\u0026gt; 20.1.0 (6.25.0 -\u0026gt; 6.44.0 not tested) Brpc-Java 2.3.7 -\u0026gt; 3.0.5 Thrift 0.10.0 -\u0026gt; 0.12.0 Apache CXF 3.x JSONRPC4J 1.2.0 -\u0026gt; 1.6 Nacos-Client 2.x (Optional²)   MQ  RocketMQ 3.x-\u0026gt; 5.x RocketMQ-gRPC 5.x Kafka 0.11.0.0 -\u0026gt; 3.2.3 Spring-Kafka Spring Kafka Consumer 1.3.x -\u0026gt; 2.3.x (2.0.x and 2.1.x not tested and not recommended by the official document) ActiveMQ 5.10.0 -\u0026gt; 5.15.4 RabbitMQ 3.x-\u0026gt; 5.x Pulsar 2.2.x -\u0026gt; 2.9.x NATS 2.14.x -\u0026gt; 2.15.x ActiveMQ-Artemis 2.30.0 -\u0026gt; 2.31.2 Aliyun ONS 1.x (Optional¹)   NoSQL  aerospike 3.x -\u0026gt; 6.x Redis  Jedis 2.x-4.x Redisson Easy Java Redis client 3.5.2+ Lettuce 5.x   MongoDB Java Driver 2.13-2.14, 3.4.0-3.12.7, 4.0.0-4.1.0 Memcached Client  Spymemcached 2.x Xmemcached 2.x   Elasticsearch  transport-client 5.2.x-5.6.x transport-client 6.2.3-6.8.4 transport-client 7.0.0-7.5.2 rest-high-level-client 6.7.1-6.8.4 rest-high-level-client 7.0.0-7.5.2   Solr  SolrJ 7.x   Cassandra 3.x  cassandra-java-driver 3.7.0-3.7.2   HBase  hbase-client HTable 1.0.0-2.4.2   Neo4j  Neo4j-java 4.x     Service Discovery  Netflix Eureka   Distributed Coordination  Zookeeper 3.4.x (Optional² \u0026amp; Except 3.4.4)   Spring Ecosystem  Spring Bean annotations(@Bean, @Service, @Component, @Repository) 3.x and 4.x (Optional²) Spring Core Async SuccessCallback/FailureCallback/ListenableFutureCallback 4.x Spring Transaction 4.x and 5.x (Optional²)   Hystrix: Latency and Fault Tolerance for Distributed Systems 1.4.20 -\u0026gt; 1.5.18 Sentinel: The Sentinel of Your Microservices 1.7.0 -\u0026gt; 1.8.1 Scheduler  Elastic Job 2.x Apache ShardingSphere-Elasticjob 3.x Spring @Scheduled 3.1+ Quartz Scheduler 2.x (Optional²) XXL Job 2.x   OpenTracing community supported Canal: Alibaba mysql database binlog incremental subscription \u0026amp; consumer components 1.0.25 -\u0026gt; 1.1.2 JSON  GSON 2.8.x (Optional²) Fastjson 1.2.x (Optional²) Jackson 2.x (Optional²)   Vert.x Ecosystem  Vert.x Eventbus 3.2 -\u0026gt; 4.x Vert.x Web 3.x -\u0026gt; 4.x   Thread Schedule Framework  Spring @Async 4.x and 5.x Quasar 0.7.x JRE Callable and Runnable (Optional²) JRE ForkJoinPool (Optional²)   Cache  Ehcache 2.x GuavaCache 18.x -\u0026gt; 23.x (Optional²)   Kotlin  Coroutine 1.0.1 -\u0026gt; 1.3.x (Optional²)   GraphQL  Graphql 8.0 -\u0026gt; 17.x   Pool  Apache Commons DBCP 2.x Alibaba Druid 1.x HikariCP 3.x -\u0026gt; 4.x   Logging Framework  log4j 2.x log4j2 1.2.x logback 1.2.x   ORM  MyBatis 3.4.x -\u0026gt; 3.5.x   Event  GuavaEventBus 19.x -\u0026gt; 31.x-jre    Meter Plugins The meter plugin provides the advanced metrics collections, which are not a part of tracing.\n Thread Pool  Undertow 2.1.x -\u0026gt; 2.6.x Tomcat 7.0.x -\u0026gt; 10.0.x Dubbo 2.5.x -\u0026gt; 2.7.x Jetty 9.1.x -\u0026gt; 11.x Grizzly 2.3.x -\u0026gt; 4.x     ¹Due to license incompatibilities/restrictions these plugins are hosted and released in 3rd part repository, go to SkyAPM java plugin extension repository to get these.\n²These plugins affect the performance or must be used under some conditions, from experiences. So only released in /optional-plugins or /bootstrap-plugins, copy to /plugins in order to make them work.\n³These plugins are not tested in the CI/CD pipeline, as the previous added tests are not able to run according to the latest CI/CD infrastructure limitations, lack of maintenance, or dependencies/images not available(e.g. removed from DockerHub).\n","excerpt":"Tracing and Tracing based Metrics Analyze Plugins The following plugins provide the distributed …","ref":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/supported-list/","title":"Tracing and Tracing based Metrics Analyze Plugins"},{"body":"Tracing and Tracing based Metrics Analyze Plugins The following plugins provide the distributed tracing capability, and the OAP backend would analyze the topology and metrics based on the tracing data.\n HTTP Server  Tomcat 7 Tomcat 8 Tomcat 9 Tomcat 10 Spring Boot Web 4.x Spring MVC 3.x, 4.x 5.x with servlet 3.x Spring MVC 6.x (Optional²) Nutz Web Framework 1.x Struts2 MVC 2.3.x -\u0026gt; 2.5.x Resin 3 (Optional¹) Resin 4 (Optional¹) Jetty Server 9.x -\u0026gt; 11.x Spring WebFlux 5.x (Optional¹) -\u0026gt; 6.x (Optional¹) Undertow 1.3.0.Final -\u0026gt; 2.0.27.Final RESTEasy 3.1.0.Final -\u0026gt; 6.2.4.Final Play Framework 2.6.x -\u0026gt; 2.8.x Light4J Microservices Framework 1.6.x -\u0026gt; 2.x Netty SocketIO 1.x Micronaut HTTP Server 3.2.x -\u0026gt; 3.6.x Jersey REST framework 2.x -\u0026gt; 3.x Grizzly 2.3.x -\u0026gt; 4.x WebSphere Liberty 23.x Netty HTTP 4.1.x (Optional²)   HTTP Client  Feign 9.x Netflix Spring Cloud Feign 1.1.x -\u0026gt; 2.x Okhttp 2.x -\u0026gt; 3.x -\u0026gt; 4.x Apache httpcomponent HttpClient 2.0 -\u0026gt; 3.1, 4.2, 4.3, 5.0, 5.1 Spring RestTemplate 4.x Spring RestTemplate 6.x (Optional²) Jetty Client 9.x -\u0026gt; 11.x Apache httpcomponent AsyncClient 4.x AsyncHttpClient 2.1+ Spring Webflux WebClient 5.x -\u0026gt; 6.x JRE HttpURLConnection (Optional²) Hutool-http client 5.x Micronaut HTTP Client 3.2.x -\u0026gt; 3.6.x   HTTP Gateway  Spring Cloud Gateway 2.0.2.RELEASE -\u0026gt; 4.1.x (Optional²) Apache ShenYu (Rich protocol support: HTTP,Spring Cloud,gRPC,Dubbo,SOFARPC,Motan,Tars) 2.4.x (Optional²)   JDBC  Mysql Driver 5.x, 6.x, 8.x Oracle Driver (Optional¹) H2 Driver 1.3.x -\u0026gt; 1.4.x ShardingSphere 3.0.0, 4.0.0, 4.0.1, 4.1.0, 4.1.1, 5.0.0 PostgreSQL Driver 8.x, 9.x, 42.x Mariadb Driver 2.x, 1.8 InfluxDB 2.5 -\u0026gt; 2.17 Mssql-Jtds 1.x Mssql-jdbc 6.x -\u0026gt; 8.x ClickHouse-jdbc 0.3.x Apache-Kylin-Jdbc 2.6.x -\u0026gt; 3.x -\u0026gt; 4.x Impala-jdbc 2.6.x (Optional³)   RPC Frameworks  Dubbo 2.5.4 -\u0026gt; 2.6.0 Dubbox 2.8.4 Apache Dubbo 2.7.x -\u0026gt; 3.x Motan 0.2.x -\u0026gt; 1.1.0 gRPC 1.x Apache ServiceComb Java Chassis 1.x, 2.x SOFARPC 5.4.0 Armeria 0.63.0 -\u0026gt; 1.22.0 Apache Avro 1.7.0 - 1.8.x Finagle 6.44.0 -\u0026gt; 20.1.0 (6.25.0 -\u0026gt; 6.44.0 not tested) Brpc-Java 2.3.7 -\u0026gt; 3.0.5 Thrift 0.10.0 -\u0026gt; 0.12.0 Apache CXF 3.x JSONRPC4J 1.2.0 -\u0026gt; 1.6 Nacos-Client 2.x (Optional²)   MQ  RocketMQ 3.x-\u0026gt; 5.x RocketMQ-gRPC 5.x Kafka 0.11.0.0 -\u0026gt; 3.2.3 Spring-Kafka Spring Kafka Consumer 1.3.x -\u0026gt; 2.3.x (2.0.x and 2.1.x not tested and not recommended by the official document) ActiveMQ 5.10.0 -\u0026gt; 5.15.4 RabbitMQ 3.x-\u0026gt; 5.x Pulsar 2.2.x -\u0026gt; 2.9.x NATS 2.14.x -\u0026gt; 2.15.x ActiveMQ-Artemis 2.30.0 -\u0026gt; 2.31.2 Aliyun ONS 1.x (Optional¹)   NoSQL  aerospike 3.x -\u0026gt; 6.x Redis  Jedis 2.x-4.x Redisson Easy Java Redis client 3.5.2+ Lettuce 5.x   MongoDB Java Driver 2.13-2.14, 3.4.0-3.12.7, 4.0.0-4.1.0 Memcached Client  Spymemcached 2.x Xmemcached 2.x   Elasticsearch  transport-client 5.2.x-5.6.x transport-client 6.2.3-6.8.4 transport-client 7.0.0-7.5.2 rest-high-level-client 6.7.1-6.8.4 rest-high-level-client 7.0.0-7.5.2   Solr  SolrJ 7.x   Cassandra 3.x  cassandra-java-driver 3.7.0-3.7.2   HBase  hbase-client HTable 1.0.0-2.4.2   Neo4j  Neo4j-java 4.x     Service Discovery  Netflix Eureka   Distributed Coordination  Zookeeper 3.4.x (Optional² \u0026amp; Except 3.4.4)   Spring Ecosystem  Spring Bean annotations(@Bean, @Service, @Component, @Repository) 3.x and 4.x (Optional²) Spring Core Async SuccessCallback/FailureCallback/ListenableFutureCallback 4.x Spring Transaction 4.x and 5.x (Optional²)   Hystrix: Latency and Fault Tolerance for Distributed Systems 1.4.20 -\u0026gt; 1.5.18 Sentinel: The Sentinel of Your Microservices 1.7.0 -\u0026gt; 1.8.1 Scheduler  Elastic Job 2.x Apache ShardingSphere-Elasticjob 3.x Spring @Scheduled 3.1+ Quartz Scheduler 2.x (Optional²) XXL Job 2.x   OpenTracing community supported Canal: Alibaba mysql database binlog incremental subscription \u0026amp; consumer components 1.0.25 -\u0026gt; 1.1.2 JSON  GSON 2.8.x (Optional²) Fastjson 1.2.x (Optional²) Jackson 2.x (Optional²)   Vert.x Ecosystem  Vert.x Eventbus 3.2 -\u0026gt; 4.x Vert.x Web 3.x -\u0026gt; 4.x   Thread Schedule Framework  Spring @Async 4.x and 5.x Quasar 0.7.x JRE Callable and Runnable (Optional²) JRE ForkJoinPool (Optional²)   Cache  Ehcache 2.x GuavaCache 18.x -\u0026gt; 23.x (Optional²)   Kotlin  Coroutine 1.0.1 -\u0026gt; 1.3.x (Optional²)   GraphQL  Graphql 8.0 -\u0026gt; 17.x   Pool  Apache Commons DBCP 2.x Alibaba Druid 1.x HikariCP 3.x -\u0026gt; 4.x C3P0 0.9.0 -\u0026gt; 0.10.0   Logging Framework  log4j 2.x log4j2 1.2.x logback 1.2.x   ORM  MyBatis 3.4.x -\u0026gt; 3.5.x   Event  GuavaEventBus 19.x -\u0026gt; 31.x-jre    Meter Plugins The meter plugin provides the advanced metrics collections, which are not a part of tracing.\n Thread Pool  Undertow 2.1.x -\u0026gt; 2.6.x Tomcat 7.0.x -\u0026gt; 10.0.x Dubbo 2.5.x -\u0026gt; 2.7.x Jetty 9.1.x -\u0026gt; 11.x Grizzly 2.3.x -\u0026gt; 4.x   Connection Pool  Apache Commons DBCP 2.x Alibaba Druid 1.x HikariCP 3.x -\u0026gt; 4.x C3P0 0.9.0 -\u0026gt; 0.10.0     ¹Due to license incompatibilities/restrictions these plugins are hosted and released in 3rd part repository, go to SkyAPM java plugin extension repository to get these.\n²These plugins affect the performance or must be used under some conditions, from experiences. So only released in /optional-plugins or /bootstrap-plugins, copy to /plugins in order to make them work.\n³These plugins are not tested in the CI/CD pipeline, as the previous added tests are not able to run according to the latest CI/CD infrastructure limitations, lack of maintenance, or dependencies/images not available(e.g. removed from DockerHub).\n","excerpt":"Tracing and Tracing based Metrics Analyze Plugins The following plugins provide the distributed …","ref":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/supported-list/","title":"Tracing and Tracing based Metrics Analyze Plugins"},{"body":"Tracing and Tracing based Metrics Analyze Plugins The following plugins provide the distributed tracing capability, and the OAP backend would analyze the topology and metrics based on the tracing data.\n HTTP Server  Tomcat 7 Tomcat 8 Tomcat 9 Tomcat 10 Spring Boot Web 4.x Spring MVC 3.x, 4.x 5.x with servlet 3.x Spring MVC 6.x (Optional²) Nutz Web Framework 1.x Struts2 MVC 2.3.x -\u0026gt; 2.5.x Resin 3 (Optional¹) Resin 4 (Optional¹) Jetty Server 9.x -\u0026gt; 11.x Spring WebFlux 5.x (Optional¹) Undertow 1.3.0.Final -\u0026gt; 2.0.27.Final RESTEasy 3.1.0.Final -\u0026gt; 6.2.4.Final Play Framework 2.6.x -\u0026gt; 2.8.x Light4J Microservices Framework 1.6.x -\u0026gt; 2.x Netty SocketIO 1.x Micronaut HTTP Server 3.2.x -\u0026gt; 3.6.x Jersey REST framework 2.x -\u0026gt; 3.x Grizzly 2.3.x -\u0026gt; 4.x WebSphere Liberty 23.x   HTTP Client  Feign 9.x Netflix Spring Cloud Feign 1.1.x -\u0026gt; 2.x Okhttp 2.x -\u0026gt; 3.x -\u0026gt; 4.x Apache httpcomponent HttpClient 2.0 -\u0026gt; 3.1, 4.2, 4.3, 5.0, 5.1 Spring RestTemplate 4.x Spring RestTemplate 6.x (Optional²) Jetty Client 9.x -\u0026gt; 11.x Apache httpcomponent AsyncClient 4.x AsyncHttpClient 2.1+ JRE HttpURLConnection (Optional²) Hutool-http client 5.x Micronaut HTTP Client 3.2.x -\u0026gt; 3.6.x   HTTP Gateway  Spring Cloud Gateway 2.0.2.RELEASE -\u0026gt; 3.x (Optional²) Apache ShenYu (Rich protocol support: HTTP,Spring Cloud,gRPC,Dubbo,SOFARPC,Motan,Tars) 2.4.x (Optional²)   JDBC  Mysql Driver 5.x, 6.x, 8.x Oracle Driver (Optional¹) H2 Driver 1.3.x -\u0026gt; 1.4.x ShardingSphere 3.0.0, 4.0.0, 4.0.1, 4.1.0, 4.1.1, 5.0.0 PostgreSQL Driver 8.x, 9.x, 42.x Mariadb Driver 2.x, 1.8 InfluxDB 2.5 -\u0026gt; 2.17 Mssql-Jtds 1.x Mssql-jdbc 6.x -\u0026gt; 8.x ClickHouse-jdbc 0.3.x Apache-Kylin-Jdbc 2.6.x -\u0026gt; 3.x -\u0026gt; 4.x Impala-jdbc 2.6.x   RPC Frameworks  Dubbo 2.5.4 -\u0026gt; 2.6.0 Dubbox 2.8.4 Apache Dubbo 2.7.x -\u0026gt; 3.x Motan 0.2.x -\u0026gt; 1.1.0 gRPC 1.x Apache ServiceComb Java Chassis 1.x, 2.x SOFARPC 5.4.0 Armeria 0.63.0 -\u0026gt; 1.22.0 Apache Avro 1.7.0 - 1.8.x Finagle 6.44.0 -\u0026gt; 20.1.0 (6.25.0 -\u0026gt; 6.44.0 not tested) Brpc-Java 2.3.7 -\u0026gt; 3.0.5 Thrift 0.10.0 -\u0026gt; 0.12.0 Apache CXF 3.x JSONRPC4J 1.2.0 -\u0026gt; 1.6 Nacos-Client 2.x (Optional²)   MQ  RocketMQ 3.x-\u0026gt; 5.x RocketMQ-gRPC 5.x Kafka 0.11.0.0 -\u0026gt; 3.2.3 Spring-Kafka Spring Kafka Consumer 1.3.x -\u0026gt; 2.3.x (2.0.x and 2.1.x not tested and not recommended by the official document) ActiveMQ 5.10.0 -\u0026gt; 5.15.4 RabbitMQ 3.x-\u0026gt; 5.x Pulsar 2.2.x -\u0026gt; 2.9.x NATS 2.14.x -\u0026gt; 2.15.x Aliyun ONS 1.x (Optional¹)   NoSQL  aerospike 3.x -\u0026gt; 6.x Redis  Jedis 2.x-4.x Redisson Easy Java Redis client 3.5.2+ Lettuce 5.x   MongoDB Java Driver 2.13-2.14, 3.4.0-3.12.7, 4.0.0-4.1.0 Memcached Client  Spymemcached 2.x Xmemcached 2.x   Elasticsearch  transport-client 5.2.x-5.6.x transport-client 6.2.3-6.8.4 transport-client 7.0.0-7.5.2 rest-high-level-client 6.7.1-6.8.4 rest-high-level-client 7.0.0-7.5.2   Solr  SolrJ 7.x   Cassandra 3.x  cassandra-java-driver 3.7.0-3.7.2   HBase  hbase-client HTable 1.0.0-2.4.2   Neo4j  Neo4j-java 4.x     Service Discovery  Netflix Eureka   Distributed Coordination  Zookeeper 3.4.x (Optional² \u0026amp; Except 3.4.4)   Spring Ecosystem  Spring Bean annotations(@Bean, @Service, @Component, @Repository) 3.x and 4.x (Optional²) Spring Core Async SuccessCallback/FailureCallback/ListenableFutureCallback 4.x Spring Transaction 4.x and 5.x (Optional²)   Hystrix: Latency and Fault Tolerance for Distributed Systems 1.4.20 -\u0026gt; 1.5.18 Sentinel: The Sentinel of Your Microservices 1.7.0 -\u0026gt; 1.8.1 Scheduler  Elastic Job 2.x Apache ShardingSphere-Elasticjob 3.x Spring @Scheduled 3.1+ Quartz Scheduler 2.x (Optional²) XXL Job 2.x   OpenTracing community supported Canal: Alibaba mysql database binlog incremental subscription \u0026amp; consumer components 1.0.25 -\u0026gt; 1.1.2 JSON  GSON 2.8.x (Optional²) Fastjson 1.2.x (Optional²) Jackson 2.x (Optional²)   Vert.x Ecosystem  Vert.x Eventbus 3.2 -\u0026gt; 4.x Vert.x Web 3.x -\u0026gt; 4.x   Thread Schedule Framework  Spring @Async 4.x and 5.x Quasar 0.7.x JRE Callable and Runnable (Optional²) JRE ForkJoinPool (Optional²)   Cache  Ehcache 2.x GuavaCache 18.x -\u0026gt; 23.x (Optional²)   Kotlin  Coroutine 1.0.1 -\u0026gt; 1.3.x (Optional²)   GraphQL  Graphql 8.0 -\u0026gt; 17.x   Pool  Apache Commons DBCP 2.x Alibaba Druid 1.x HikariCP 3.x -\u0026gt; 4.x   Logging Framework  log4j 2.x log4j2 1.2.x logback 1.2.x   ORM  MyBatis 3.4.x -\u0026gt; 3.5.x   Event  GuavaEventBus 19.x -\u0026gt; 31.x-jre    Meter Plugins The meter plugin provides the advanced metrics collections, which are not a part of tracing.\n Thread Pool  Undertow 2.1.x -\u0026gt; 2.6.x Tomcat 7.0.x -\u0026gt; 10.0.x Dubbo 2.5.x -\u0026gt; 2.7.x Jetty 9.1.x -\u0026gt; 11.x Grizzly 2.3.x -\u0026gt; 4.x     ¹Due to license incompatibilities/restrictions these plugins are hosted and released in 3rd part repository, go to SkyAPM java plugin extension repository to get these.\n²These plugins affect the performance or must be used under some conditions, from experiences. So only released in /optional-plugins or /bootstrap-plugins, copy to /plugins in order to make them work.\n","excerpt":"Tracing and Tracing based Metrics Analyze Plugins The following plugins provide the distributed …","ref":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/supported-list/","title":"Tracing and Tracing based Metrics Analyze Plugins"},{"body":"Tracing and Tracing based Metrics Analyze Plugins The following plugins provide the distributed tracing capability, and the OAP backend would analyze the topology and metrics based on the tracing data.\n HTTP Server  Tomcat 7 Tomcat 8 Tomcat 9 Tomcat 10 Spring Boot Web 4.x Spring MVC 3.x, 4.x 5.x with servlet 3.x Spring MVC 6.x (Optional²) Nutz Web Framework 1.x Struts2 MVC 2.3.x -\u0026gt; 2.5.x Resin 3 (Optional¹) Resin 4 (Optional¹) Jetty Server 9.x -\u0026gt; 11.x Spring WebFlux 5.x (Optional¹) Undertow 1.3.0.Final -\u0026gt; 2.0.27.Final RESTEasy 3.1.0.Final -\u0026gt; 6.2.4.Final Play Framework 2.6.x -\u0026gt; 2.8.x Light4J Microservices Framework 1.6.x -\u0026gt; 2.x Netty SocketIO 1.x Micronaut HTTP Server 3.2.x -\u0026gt; 3.6.x Jersey REST framework 2.x -\u0026gt; 3.x Grizzly 2.3.x -\u0026gt; 4.x WebSphere Liberty 23.x Netty HTTP 4.1.x (Optional²)   HTTP Client  Feign 9.x Netflix Spring Cloud Feign 1.1.x -\u0026gt; 2.x Okhttp 2.x -\u0026gt; 3.x -\u0026gt; 4.x Apache httpcomponent HttpClient 2.0 -\u0026gt; 3.1, 4.2, 4.3, 5.0, 5.1 Spring RestTemplate 4.x Spring RestTemplate 6.x (Optional²) Jetty Client 9.x -\u0026gt; 11.x Apache httpcomponent AsyncClient 4.x AsyncHttpClient 2.1+ JRE HttpURLConnection (Optional²) Hutool-http client 5.x Micronaut HTTP Client 3.2.x -\u0026gt; 3.6.x   HTTP Gateway  Spring Cloud Gateway 2.0.2.RELEASE -\u0026gt; 3.x (Optional²) Apache ShenYu (Rich protocol support: HTTP,Spring Cloud,gRPC,Dubbo,SOFARPC,Motan,Tars) 2.4.x (Optional²)   JDBC  Mysql Driver 5.x, 6.x, 8.x Oracle Driver (Optional¹) H2 Driver 1.3.x -\u0026gt; 1.4.x ShardingSphere 3.0.0, 4.0.0, 4.0.1, 4.1.0, 4.1.1, 5.0.0 PostgreSQL Driver 8.x, 9.x, 42.x Mariadb Driver 2.x, 1.8 InfluxDB 2.5 -\u0026gt; 2.17 Mssql-Jtds 1.x Mssql-jdbc 6.x -\u0026gt; 8.x ClickHouse-jdbc 0.3.x Apache-Kylin-Jdbc 2.6.x -\u0026gt; 3.x -\u0026gt; 4.x Impala-jdbc 2.6.x   RPC Frameworks  Dubbo 2.5.4 -\u0026gt; 2.6.0 Dubbox 2.8.4 Apache Dubbo 2.7.x -\u0026gt; 3.x Motan 0.2.x -\u0026gt; 1.1.0 gRPC 1.x Apache ServiceComb Java Chassis 1.x, 2.x SOFARPC 5.4.0 Armeria 0.63.0 -\u0026gt; 1.22.0 Apache Avro 1.7.0 - 1.8.x Finagle 6.44.0 -\u0026gt; 20.1.0 (6.25.0 -\u0026gt; 6.44.0 not tested) Brpc-Java 2.3.7 -\u0026gt; 3.0.5 Thrift 0.10.0 -\u0026gt; 0.12.0 Apache CXF 3.x JSONRPC4J 1.2.0 -\u0026gt; 1.6 Nacos-Client 2.x (Optional²)   MQ  RocketMQ 3.x-\u0026gt; 5.x RocketMQ-gRPC 5.x Kafka 0.11.0.0 -\u0026gt; 3.2.3 Spring-Kafka Spring Kafka Consumer 1.3.x -\u0026gt; 2.3.x (2.0.x and 2.1.x not tested and not recommended by the official document) ActiveMQ 5.10.0 -\u0026gt; 5.15.4 RabbitMQ 3.x-\u0026gt; 5.x Pulsar 2.2.x -\u0026gt; 2.9.x NATS 2.14.x -\u0026gt; 2.15.x Aliyun ONS 1.x (Optional¹)   NoSQL  aerospike 3.x -\u0026gt; 6.x Redis  Jedis 2.x-4.x Redisson Easy Java Redis client 3.5.2+ Lettuce 5.x   MongoDB Java Driver 2.13-2.14, 3.4.0-3.12.7, 4.0.0-4.1.0 Memcached Client  Spymemcached 2.x Xmemcached 2.x   Elasticsearch  transport-client 5.2.x-5.6.x transport-client 6.2.3-6.8.4 transport-client 7.0.0-7.5.2 rest-high-level-client 6.7.1-6.8.4 rest-high-level-client 7.0.0-7.5.2   Solr  SolrJ 7.x   Cassandra 3.x  cassandra-java-driver 3.7.0-3.7.2   HBase  hbase-client HTable 1.0.0-2.4.2   Neo4j  Neo4j-java 4.x     Service Discovery  Netflix Eureka   Distributed Coordination  Zookeeper 3.4.x (Optional² \u0026amp; Except 3.4.4)   Spring Ecosystem  Spring Bean annotations(@Bean, @Service, @Component, @Repository) 3.x and 4.x (Optional²) Spring Core Async SuccessCallback/FailureCallback/ListenableFutureCallback 4.x Spring Transaction 4.x and 5.x (Optional²)   Hystrix: Latency and Fault Tolerance for Distributed Systems 1.4.20 -\u0026gt; 1.5.18 Sentinel: The Sentinel of Your Microservices 1.7.0 -\u0026gt; 1.8.1 Scheduler  Elastic Job 2.x Apache ShardingSphere-Elasticjob 3.x Spring @Scheduled 3.1+ Quartz Scheduler 2.x (Optional²) XXL Job 2.x   OpenTracing community supported Canal: Alibaba mysql database binlog incremental subscription \u0026amp; consumer components 1.0.25 -\u0026gt; 1.1.2 JSON  GSON 2.8.x (Optional²) Fastjson 1.2.x (Optional²) Jackson 2.x (Optional²)   Vert.x Ecosystem  Vert.x Eventbus 3.2 -\u0026gt; 4.x Vert.x Web 3.x -\u0026gt; 4.x   Thread Schedule Framework  Spring @Async 4.x and 5.x Quasar 0.7.x JRE Callable and Runnable (Optional²) JRE ForkJoinPool (Optional²)   Cache  Ehcache 2.x GuavaCache 18.x -\u0026gt; 23.x (Optional²)   Kotlin  Coroutine 1.0.1 -\u0026gt; 1.3.x (Optional²)   GraphQL  Graphql 8.0 -\u0026gt; 17.x   Pool  Apache Commons DBCP 2.x Alibaba Druid 1.x HikariCP 3.x -\u0026gt; 4.x   Logging Framework  log4j 2.x log4j2 1.2.x logback 1.2.x   ORM  MyBatis 3.4.x -\u0026gt; 3.5.x   Event  GuavaEventBus 19.x -\u0026gt; 31.x-jre    Meter Plugins The meter plugin provides the advanced metrics collections, which are not a part of tracing.\n Thread Pool  Undertow 2.1.x -\u0026gt; 2.6.x Tomcat 7.0.x -\u0026gt; 10.0.x Dubbo 2.5.x -\u0026gt; 2.7.x Jetty 9.1.x -\u0026gt; 11.x Grizzly 2.3.x -\u0026gt; 4.x     ¹Due to license incompatibilities/restrictions these plugins are hosted and released in 3rd part repository, go to SkyAPM java plugin extension repository to get these.\n²These plugins affect the performance or must be used under some conditions, from experiences. So only released in /optional-plugins or /bootstrap-plugins, copy to /plugins in order to make them work.\n","excerpt":"Tracing and Tracing based Metrics Analyze Plugins The following plugins provide the distributed …","ref":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/supported-list/","title":"Tracing and Tracing based Metrics Analyze Plugins"},{"body":"Tracing and Tracing based Metrics Analyze Plugins The following plugins provide the distributed tracing capability, and the OAP backend would analyze the topology and metrics based on the tracing data.\n HTTP Server  Tomcat 7 Tomcat 8 Tomcat 9 Tomcat 10 Spring Boot Web 4.x Spring MVC 3.x, 4.x 5.x with servlet 3.x Spring MVC 6.x (Optional²) Nutz Web Framework 1.x Struts2 MVC 2.3.x -\u0026gt; 2.5.x Resin 3 (Optional¹) Resin 4 (Optional¹) Jetty Server 9.x -\u0026gt; 11.x Spring WebFlux 5.x (Optional¹) -\u0026gt; 6.x (Optional¹) Undertow 1.3.0.Final -\u0026gt; 2.0.27.Final RESTEasy 3.1.0.Final -\u0026gt; 6.2.4.Final Play Framework 2.6.x -\u0026gt; 2.8.x Light4J Microservices Framework 1.6.x -\u0026gt; 2.x Netty SocketIO 1.x Micronaut HTTP Server 3.2.x -\u0026gt; 3.6.x Jersey REST framework 2.x -\u0026gt; 3.x Grizzly 2.3.x -\u0026gt; 4.x WebSphere Liberty 23.x Netty HTTP 4.1.x (Optional²)   HTTP Client  Feign 9.x Netflix Spring Cloud Feign 1.1.x -\u0026gt; 2.x Okhttp 2.x -\u0026gt; 3.x -\u0026gt; 4.x Apache httpcomponent HttpClient 2.0 -\u0026gt; 3.1, 4.2, 4.3, 5.0, 5.1 Spring RestTemplate 4.x Spring RestTemplate 6.x (Optional²) Jetty Client 9.x -\u0026gt; 11.x Apache httpcomponent AsyncClient 4.x AsyncHttpClient 2.1+ Spring Webflux WebClient 5.x -\u0026gt; 6.x JRE HttpURLConnection (Optional²) Hutool-http client 5.x Micronaut HTTP Client 3.2.x -\u0026gt; 3.6.x   HTTP Gateway  Spring Cloud Gateway 2.0.2.RELEASE -\u0026gt; 4.1.x (Optional²) Apache ShenYu (Rich protocol support: HTTP,Spring Cloud,gRPC,Dubbo,SOFARPC,Motan,Tars) 2.4.x (Optional²)   JDBC  Mysql Driver 5.x, 6.x, 8.x Oracle Driver (Optional¹) H2 Driver 1.3.x -\u0026gt; 1.4.x ShardingSphere 3.0.0, 4.0.0, 4.0.1, 4.1.0, 4.1.1, 5.0.0 PostgreSQL Driver 8.x, 9.x, 42.x Mariadb Driver 2.x, 1.8 InfluxDB 2.5 -\u0026gt; 2.17 Mssql-Jtds 1.x Mssql-jdbc 6.x -\u0026gt; 8.x ClickHouse-jdbc 0.3.x Apache-Kylin-Jdbc 2.6.x -\u0026gt; 3.x -\u0026gt; 4.x Impala-jdbc 2.6.x (Optional³)   RPC Frameworks  Dubbo 2.5.4 -\u0026gt; 2.6.0 Dubbox 2.8.4 Apache Dubbo 2.7.x -\u0026gt; 3.x Motan 0.2.x -\u0026gt; 1.1.0 gRPC 1.x Apache ServiceComb Java Chassis 1.x, 2.x SOFARPC 5.4.0 Armeria 0.63.0 -\u0026gt; 1.22.0 Apache Avro 1.7.0 - 1.8.x Finagle 6.44.0 -\u0026gt; 20.1.0 (6.25.0 -\u0026gt; 6.44.0 not tested) Brpc-Java 2.3.7 -\u0026gt; 3.0.5 Thrift 0.10.0 -\u0026gt; 0.12.0 Apache CXF 3.x JSONRPC4J 1.2.0 -\u0026gt; 1.6 Nacos-Client 2.x (Optional²)   MQ  RocketMQ 3.x-\u0026gt; 5.x RocketMQ-gRPC 5.x Kafka 0.11.0.0 -\u0026gt; 3.2.3 Spring-Kafka Spring Kafka Consumer 1.3.x -\u0026gt; 2.3.x (2.0.x and 2.1.x not tested and not recommended by the official document) ActiveMQ 5.10.0 -\u0026gt; 5.15.4 RabbitMQ 3.x-\u0026gt; 5.x Pulsar 2.2.x -\u0026gt; 2.9.x NATS 2.14.x -\u0026gt; 2.15.x ActiveMQ-Artemis 2.30.0 -\u0026gt; 2.31.2 Aliyun ONS 1.x (Optional¹)   NoSQL  aerospike 3.x -\u0026gt; 6.x Redis  Jedis 2.x-4.x Redisson Easy Java Redis client 3.5.2+ Lettuce 5.x   MongoDB Java Driver 2.13-2.14, 3.4.0-3.12.7, 4.0.0-4.1.0 Memcached Client  Spymemcached 2.x Xmemcached 2.x   Elasticsearch  transport-client 5.2.x-5.6.x transport-client 6.2.3-6.8.4 transport-client 7.0.0-7.5.2 rest-high-level-client 6.7.1-6.8.4 rest-high-level-client 7.0.0-7.5.2   Solr  SolrJ 7.x   Cassandra 3.x  cassandra-java-driver 3.7.0-3.7.2   HBase  hbase-client HTable 1.0.0-2.4.2   Neo4j  Neo4j-java 4.x     Service Discovery  Netflix Eureka   Distributed Coordination  Zookeeper 3.4.x (Optional² \u0026amp; Except 3.4.4)   Spring Ecosystem  Spring Bean annotations(@Bean, @Service, @Component, @Repository) 3.x and 4.x (Optional²) Spring Core Async SuccessCallback/FailureCallback/ListenableFutureCallback 4.x Spring Transaction 4.x and 5.x (Optional²)   Hystrix: Latency and Fault Tolerance for Distributed Systems 1.4.20 -\u0026gt; 1.5.18 Sentinel: The Sentinel of Your Microservices 1.7.0 -\u0026gt; 1.8.1 Scheduler  Elastic Job 2.x Apache ShardingSphere-Elasticjob 3.x Spring @Scheduled 3.1+ Quartz Scheduler 2.x (Optional²) XXL Job 2.x   OpenTracing community supported Canal: Alibaba mysql database binlog incremental subscription \u0026amp; consumer components 1.0.25 -\u0026gt; 1.1.2 JSON  GSON 2.8.x (Optional²) Fastjson 1.2.x (Optional²) Jackson 2.x (Optional²)   Vert.x Ecosystem  Vert.x Eventbus 3.2 -\u0026gt; 4.x Vert.x Web 3.x -\u0026gt; 4.x   Thread Schedule Framework  Spring @Async 4.x and 5.x Quasar 0.7.x JRE Callable and Runnable (Optional²) JRE ForkJoinPool (Optional²)   Cache  Ehcache 2.x GuavaCache 18.x -\u0026gt; 23.x (Optional²)   Kotlin  Coroutine 1.0.1 -\u0026gt; 1.3.x (Optional²)   GraphQL  Graphql 8.0 -\u0026gt; 17.x   Pool  Apache Commons DBCP 2.x Alibaba Druid 1.x HikariCP 3.x -\u0026gt; 4.x   Logging Framework  log4j 2.x log4j2 1.2.x logback 1.2.x   ORM  MyBatis 3.4.x -\u0026gt; 3.5.x   Event  GuavaEventBus 19.x -\u0026gt; 31.x-jre    Meter Plugins The meter plugin provides the advanced metrics collections, which are not a part of tracing.\n Thread Pool  Undertow 2.1.x -\u0026gt; 2.6.x Tomcat 7.0.x -\u0026gt; 10.0.x Dubbo 2.5.x -\u0026gt; 2.7.x Jetty 9.1.x -\u0026gt; 11.x Grizzly 2.3.x -\u0026gt; 4.x     ¹Due to license incompatibilities/restrictions these plugins are hosted and released in 3rd part repository, go to SkyAPM java plugin extension repository to get these.\n²These plugins affect the performance or must be used under some conditions, from experiences. So only released in /optional-plugins or /bootstrap-plugins, copy to /plugins in order to make them work.\n³These plugins are not tested in the CI/CD pipeline, as the previous added tests are not able to run according to the latest CI/CD infrastructure limitations, lack of maintenance, or dependencies/images not available(e.g. removed from DockerHub).\n","excerpt":"Tracing and Tracing based Metrics Analyze Plugins The following plugins provide the distributed …","ref":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/supported-list/","title":"Tracing and Tracing based Metrics Analyze Plugins"},{"body":"Tracing APIs Add trace Toolkit toolkit/trace provides the APIs to enhance the trace context, such as createLocalSpan, createExitSpan, createEntrySpan, log, tag, prepareForAsync and asyncFinish. Add the toolkit dependency to your project.\nimport \u0026#34;github.com/apache/skywalking-go/toolkit/trace\u0026#34; Use Native Tracing Context Carrier The context carrier is used to pass the context between the difference application.\nWhen creating an Entry Span, you need to obtain the context carrier from the request. When creating an Exit Span, you need to write the context carrier into the target RPC request.\ntype ExtractorRef func(headerKey string) (string, error) type InjectorRef func(headerKey, headerValue string) error The following demo demonstrates how to pass the Context Carrier in the Tracing API:\n// create a new entry span and extract the context carrier from the request trace.CreateEntrySpan(\u0026#34;EntrySpan\u0026#34;, func(headerKey string) (string, error) { return request.Header.Get(headerKey), nil }) // create a new exit span and inject the context carrier into the request trace.CreateExitSpan(\u0026#34;ExitSpan\u0026#34;, request.Host, func(headerKey, headerValue string) error { request.Header.Add(headerKey, headerValue) return nil }) Create Span Use trace.CreateEntrySpan() API to create entry span, and then use SpanRef to contain the reference of created span in agent kernel.\n The first parameter is operation name of span the second parameter is InjectorRef.  spanRef, err := trace.CreateEntrySpan(\u0026#34;operationName\u0026#34;, InjectorRef) Use trace.CreateLocalSpan() API to create local span\n the only parameter is the operation name of span.  spanRef, err := trace.CreateLocalSpan(\u0026#34;operationName\u0026#34;) Use trace.CreateExitSpan() API to create exit span.\n the first parameter is the operation name of span the second parameter is the remote peer which means the peer address of exit operation. the third parameter is the ExtractorRef  spanRef, err := trace.CreateExitSpan(\u0026#34;operationName\u0026#34;, \u0026#34;peer\u0026#34;, ExtractorRef) Use trace.StopSpan() API to stop current span\ntrace.StopSpan() Add Span’s Tag and Log Use trace.AddLog() to record log in span.\nUse trace.SetTag() to add tag to span, the parameters of tag are two String which are key and value respectively.\ntrace.AddLog(...string) trace.SetTag(\u0026#34;key\u0026#34;,\u0026#34;value\u0026#34;) Set ComponentID Use trace.SetComponent() to set the component id of the Span\n the type of parameter is int32.  trace.SetComponent(ComponentID) The Component ID in Span is used to identify the current component, which is declared in the component libraries YAML from the OAP server side.\nAsync Prepare/Finish SpanRef is the return value of CreateSpan.Use SpanRef.PrepareAsync() to make current span still alive until SpanRef.AsyncFinish() called.\n Call PrepareAsync(). Use trace.StopSpan() to stop span in the original goroutine. Propagate the SpanRef to any other goroutine. Call SpanRef.AsyncFinish() in any goroutine.  Capture/Continue Context Snapshot  Use trace.CaptureContext() to get the segment info and store it in ContextSnapshotRef. Propagate the snapshot context to any other goroutine. Use trace.ContinueContext(snapshotRef) to load the snapshotRef in the target goroutine.  Reading Context All following APIs provide readonly features for the tracing context from tracing system. The values are only available when the current thread is traced.\n  Use trace.GetTraceID() API to get traceID.\ntraceID := trace.GetTraceID()   Use trace.GetSegmentID() API to get segmentID.\nsegmentID := trace.GetSegmentID()   Use trace.GetSpanID() API to get spanID.\nspanID := trace.GetSpanID()   Trace Correlation Context Trace correlation context APIs provide a way to put custom data in tracing context. All the data in the context will be propagated with the in-wire process automatically.\nUse trace.SetCorrelation() API to set custom data in tracing context.\ntrace.SetCorrelation(\u0026#34;key\u0026#34;,\u0026#34;value\u0026#34;)  Max element count in the correlation context is 3 Max value length of each element is 128  CorrelationContext will remove the key when the value is empty.\nUse trace.GetCorrelation() API to get custom data.\nvalue := trace.GetCorrelation(\u0026#34;key\u0026#34;) ","excerpt":"Tracing APIs Add trace Toolkit toolkit/trace provides the APIs to enhance the trace context, such as …","ref":"/docs/skywalking-go/latest/en/advanced-features/manual-apis/toolkit-trace/","title":"Tracing APIs"},{"body":"Tracing APIs Add trace Toolkit toolkit/trace provides the APIs to enhance the trace context, such as createLocalSpan, createExitSpan, createEntrySpan, log, tag, prepareForAsync and asyncFinish. Add the toolkit dependency to your project.\nimport \u0026#34;github.com/apache/skywalking-go/toolkit/trace\u0026#34; Use Native Tracing Context Carrier The context carrier is used to pass the context between the difference application.\nWhen creating an Entry Span, you need to obtain the context carrier from the request. When creating an Exit Span, you need to write the context carrier into the target RPC request.\ntype ExtractorRef func(headerKey string) (string, error) type InjectorRef func(headerKey, headerValue string) error The following demo demonstrates how to pass the Context Carrier in the Tracing API:\n// create a new entry span and extract the context carrier from the request trace.CreateEntrySpan(\u0026#34;EntrySpan\u0026#34;, func(headerKey string) (string, error) { return request.Header.Get(headerKey), nil }) // create a new exit span and inject the context carrier into the request trace.CreateExitSpan(\u0026#34;ExitSpan\u0026#34;, request.Host, func(headerKey, headerValue string) error { request.Header.Add(headerKey, headerValue) return nil }) Create Span Use trace.CreateEntrySpan() API to create entry span, and then use SpanRef to contain the reference of created span in agent kernel.\n The first parameter is operation name of span the second parameter is InjectorRef.  spanRef, err := trace.CreateEntrySpan(\u0026#34;operationName\u0026#34;, InjectorRef) Use trace.CreateLocalSpan() API to create local span\n the only parameter is the operation name of span.  spanRef, err := trace.CreateLocalSpan(\u0026#34;operationName\u0026#34;) Use trace.CreateExitSpan() API to create exit span.\n the first parameter is the operation name of span the second parameter is the remote peer which means the peer address of exit operation. the third parameter is the ExtractorRef  spanRef, err := trace.CreateExitSpan(\u0026#34;operationName\u0026#34;, \u0026#34;peer\u0026#34;, ExtractorRef) Use trace.StopSpan() API to stop current span\ntrace.StopSpan() Add Span’s Tag and Log Use trace.AddLog() to record log in span.\nUse trace.SetTag() to add tag to span, the parameters of tag are two String which are key and value respectively.\ntrace.AddLog(...string) trace.SetTag(\u0026#34;key\u0026#34;,\u0026#34;value\u0026#34;) Set ComponentID Use trace.SetComponent() to set the component id of the Span\n the type of parameter is int32.  trace.SetComponent(ComponentID) The Component ID in Span is used to identify the current component, which is declared in the component libraries YAML from the OAP server side.\nAsync Prepare/Finish SpanRef is the return value of CreateSpan.Use SpanRef.PrepareAsync() to make current span still alive until SpanRef.AsyncFinish() called.\n Call PrepareAsync(). Use trace.StopSpan() to stop span in the original goroutine. Propagate the SpanRef to any other goroutine. Call SpanRef.AsyncFinish() in any goroutine.  Capture/Continue Context Snapshot  Use trace.CaptureContext() to get the segment info and store it in ContextSnapshotRef. Propagate the snapshot context to any other goroutine. Use trace.ContinueContext(snapshotRef) to load the snapshotRef in the target goroutine.  Reading Context All following APIs provide readonly features for the tracing context from tracing system. The values are only available when the current thread is traced.\n  Use trace.GetTraceID() API to get traceID.\ntraceID := trace.GetTraceID()   Use trace.GetSegmentID() API to get segmentID.\nsegmentID := trace.GetSegmentID()   Use trace.GetSpanID() API to get spanID.\nspanID := trace.GetSpanID()   Trace Correlation Context Trace correlation context APIs provide a way to put custom data in tracing context. All the data in the context will be propagated with the in-wire process automatically.\nUse trace.SetCorrelation() API to set custom data in tracing context.\ntrace.SetCorrelation(\u0026#34;key\u0026#34;,\u0026#34;value\u0026#34;)  Max element count in the correlation context is 3 Max value length of each element is 128  CorrelationContext will remove the key when the value is empty.\nUse trace.GetCorrelation() API to get custom data.\nvalue := trace.GetCorrelation(\u0026#34;key\u0026#34;) ","excerpt":"Tracing APIs Add trace Toolkit toolkit/trace provides the APIs to enhance the trace context, such as …","ref":"/docs/skywalking-go/next/en/advanced-features/manual-apis/toolkit-trace/","title":"Tracing APIs"},{"body":"Tracing APIs Add trace Toolkit toolkit/trace provides the APIs to enhance the trace context, such as createLocalSpan, createExitSpan, createEntrySpan, log, tag, prepareForAsync and asyncFinish. Add the toolkit dependency to your project.\nimport \u0026#34;github.com/apache/skywalking-go/toolkit/trace\u0026#34; Use Native Tracing Context Carrier The context carrier is used to pass the context between the difference application.\nWhen creating an Entry Span, you need to obtain the context carrier from the request. When creating an Exit Span, you need to write the context carrier into the target RPC request.\ntype ExtractorRef func(headerKey string) (string, error) type InjectorRef func(headerKey, headerValue string) error The following demo demonstrates how to pass the Context Carrier in the Tracing API:\n// create a new entry span and extract the context carrier from the request trace.CreateEntrySpan(\u0026#34;EntrySpan\u0026#34;, func(headerKey string) (string, error) { return request.Header.Get(headerKey), nil }) // create a new exit span and inject the context carrier into the request trace.CreateExitSpan(\u0026#34;ExitSpan\u0026#34;, request.Host, func(headerKey, headerValue string) error { request.Header.Add(headerKey, headerValue) return nil }) Create Span Use trace.CreateEntrySpan() API to create entry span, and then use SpanRef to contain the reference of created span in agent kernel.\n The first parameter is operation name of span the second parameter is InjectorRef.  spanRef, err := trace.CreateEntrySpan(\u0026#34;operationName\u0026#34;, InjectorRef) Use trace.CreateLocalSpan() API to create local span\n the only parameter is the operation name of span.  spanRef, err := trace.CreateLocalSpan(\u0026#34;operationName\u0026#34;) Use trace.CreateExitSpan() API to create exit span.\n the first parameter is the operation name of span the second parameter is the remote peer which means the peer address of exit operation. the third parameter is the ExtractorRef  spanRef, err := trace.CreateExitSpan(\u0026#34;operationName\u0026#34;, \u0026#34;peer\u0026#34;, ExtractorRef) Use trace.StopSpan() API to stop current span\ntrace.StopSpan() Add Span’s Tag and Log Use trace.AddLog() to record log in span.\nUse trace.SetTag() to add tag to span, the parameters of tag are two String which are key and value respectively.\ntrace.AddLog(...string) trace.SetTag(\u0026#34;key\u0026#34;,\u0026#34;value\u0026#34;) Set ComponentID Use trace.SetComponent() to set the component id of the Span\n the type of parameter is int32.  trace.SetComponent(ComponentID) The Component ID in Span is used to identify the current component, which is declared in the component libraries YAML from the OAP server side.\nAsync Prepare/Finish SpanRef is the return value of CreateSpan.Use SpanRef.PrepareAsync() to make current span still alive until SpanRef.AsyncFinish() called.\n Call PrepareAsync(). Use trace.StopSpan() to stop span in the original goroutine. Propagate the SpanRef to any other goroutine. Call SpanRef.AsyncFinish() in any goroutine.  Capture/Continue Context Snapshot  Use trace.CaptureContext() to get the segment info and store it in ContextSnapshotRef. Propagate the snapshot context to any other goroutine. Use trace.ContinueContext(snapshotRef) to load the snapshotRef in the target goroutine.  Reading Context All following APIs provide readonly features for the tracing context from tracing system. The values are only available when the current thread is traced.\n  Use trace.GetTraceID() API to get traceID.\ntraceID := trace.GetTraceID()   Use trace.GetSegmentID() API to get segmentID.\nsegmentID := trace.GetSegmentID()   Use trace.GetSpanID() API to get spanID.\nspanID := trace.GetSpanID()   Trace Correlation Context Trace correlation context APIs provide a way to put custom data in tracing context. All the data in the context will be propagated with the in-wire process automatically.\nUse trace.SetCorrelation() API to set custom data in tracing context.\ntrace.SetCorrelation(\u0026#34;key\u0026#34;,\u0026#34;value\u0026#34;)  Max element count in the correlation context is 3 Max value length of each element is 128  CorrelationContext will remove the key when the value is empty.\nUse trace.GetCorrelation() API to get custom data.\nvalue := trace.GetCorrelation(\u0026#34;key\u0026#34;) ","excerpt":"Tracing APIs Add trace Toolkit toolkit/trace provides the APIs to enhance the trace context, such as …","ref":"/docs/skywalking-go/v0.4.0/en/advanced-features/manual-apis/toolkit-trace/","title":"Tracing APIs"},{"body":"Tracing Plugins The following plugins provide the distributed tracing capability, and the OAP backend would analyze the topology and metrics based on the tracing data.\n HTTP Server  gin: Gin tested v1.7.0 to v1.9.0. http: Native HTTP tested go v1.17 to go v1.20. go-restfulv3: Go-Restful tested v3.7.1 to 3.10.2. mux: Mux tested v1.7.0 to v1.8.0. iris: Iris tested v12.1.0 to 12.2.5. fasthttp: FastHttp tested v1.10.0 to v1.50.0. fiber: Fiber tested v2.49.0 to v2.50.0. echov4: Echov4 tested v4.0.0 to v4.11.4   HTTP Client  http: Native HTTP tested go v1.17 to go v1.20. fasthttp: FastHttp tested v1.10.0 to v1.50.0.   RPC Frameworks  dubbo: Dubbo tested v3.0.1 to v3.0.5. kratosv2: Kratos tested v2.3.1 to v2.6.2. microv4: Go-Micro tested v4.6.0 to v4.10.2. grpc : gRPC tested v1.55.0 to v1.57.0.   Database Client  gorm: GORM tested v1.22.0 to v1.25.1.  MySQL Driver   mongo: Mongo tested v1.11.1 to v1.11.7. sql: Native SQL tested go v1.17 to go v1.20.  MySQL Driver tested v1.4.0 to v1.7.1.     Cache Client  go-redisv9: go-redis tested v9.0.3 to v9.0.5.   MQ Client  rocketMQ: rocketmq-client-go tested v2.1.2. amqp: AMQP tested v1.9.0.    Metrics Plugins The meter plugin provides the advanced metrics collections.\n runtimemetrics: Native Runtime Metrics tested go v1.17 to go v1.20.  Logging Plugins The logging plugin provides the advanced logging collections.\n logrus: Logrus tested v1.8.2 to v1.9.3. zap: Zap tested v1.17.0 to v1.24.0.  ","excerpt":"Tracing Plugins The following plugins provide the distributed tracing capability, and the OAP …","ref":"/docs/skywalking-go/latest/en/agent/support-plugins/","title":"Tracing Plugins"},{"body":"Tracing Plugins The following plugins provide the distributed tracing capability, and the OAP backend would analyze the topology and metrics based on the tracing data.\n HTTP Server  gin: Gin tested v1.7.0 to v1.9.0. http: Native HTTP tested go v1.17 to go v1.20. go-restfulv3: Go-Restful tested v3.7.1 to 3.10.2. mux: Mux tested v1.7.0 to v1.8.0. iris: Iris tested v12.1.0 to 12.2.5. fasthttp: FastHttp tested v1.10.0 to v1.50.0. fiber: Fiber tested v2.49.0 to v2.50.0. echov4: Echov4 tested v4.0.0 to v4.11.4   HTTP Client  http: Native HTTP tested go v1.17 to go v1.20. fasthttp: FastHttp tested v1.10.0 to v1.50.0.   RPC Frameworks  dubbo: Dubbo tested v3.0.1 to v3.0.5. kratosv2: Kratos tested v2.3.1 to v2.6.2. microv4: Go-Micro tested v4.6.0 to v4.10.2. grpc : gRPC tested v1.55.0 to v1.57.0.   Database Client  gorm: GORM tested v1.22.0 to v1.25.1.  MySQL Driver   mongo: Mongo tested v1.11.1 to v1.11.7. sql: Native SQL tested go v1.17 to go v1.20.  MySQL Driver tested v1.4.0 to v1.7.1.     Cache Client  go-redisv9: go-redis tested v9.0.3 to v9.0.5.   MQ Client  rocketMQ: rocketmq-client-go tested v2.1.2. amqp: AMQP tested v1.9.0. pulsar: pulsar-client-go tested v0.12.0. segmentio-kafka: segmentio-kafka tested v0.4.47.    Metrics Plugins The meter plugin provides the advanced metrics collections.\n runtimemetrics: Native Runtime Metrics tested go v1.17 to go v1.20.  Logging Plugins The logging plugin provides the advanced logging collections.\n logrus: Logrus tested v1.8.2 to v1.9.3. zap: Zap tested v1.17.0 to v1.24.0.  ","excerpt":"Tracing Plugins The following plugins provide the distributed tracing capability, and the OAP …","ref":"/docs/skywalking-go/next/en/agent/support-plugins/","title":"Tracing Plugins"},{"body":"Tracing Plugins The following plugins provide the distributed tracing capability, and the OAP backend would analyze the topology and metrics based on the tracing data.\n HTTP Server  gin: Gin tested v1.7.0 to v1.9.0. http: Native HTTP tested go v1.17 to go v1.20. go-restfulv3: Go-Restful tested v3.7.1 to 3.10.2. mux: Mux tested v1.7.0 to v1.8.0. iris: Iris tested v12.1.0 to 12.2.5. fasthttp: FastHttp tested v1.10.0 to v1.50.0. fiber: Fiber tested v2.49.0 to v2.50.0. echov4: Echov4 tested v4.0.0 to v4.11.4   HTTP Client  http: Native HTTP tested go v1.17 to go v1.20. fasthttp: FastHttp tested v1.10.0 to v1.50.0.   RPC Frameworks  dubbo: Dubbo tested v3.0.1 to v3.0.5. kratosv2: Kratos tested v2.3.1 to v2.6.2. microv4: Go-Micro tested v4.6.0 to v4.10.2. grpc : gRPC tested v1.55.0 to v1.57.0.   Database Client  gorm: GORM tested v1.22.0 to v1.25.1.  MySQL Driver   mongo: Mongo tested v1.11.1 to v1.11.7. sql: Native SQL tested go v1.17 to go v1.20.  MySQL Driver tested v1.4.0 to v1.7.1.     Cache Client  go-redisv9: go-redis tested v9.0.3 to v9.0.5.   MQ Client  rocketMQ: rocketmq-client-go tested v2.1.2. amqp: AMQP tested v1.9.0.    Metrics Plugins The meter plugin provides the advanced metrics collections.\n runtimemetrics: Native Runtime Metrics tested go v1.17 to go v1.20.  Logging Plugins The logging plugin provides the advanced logging collections.\n logrus: Logrus tested v1.8.2 to v1.9.3. zap: Zap tested v1.17.0 to v1.24.0.  ","excerpt":"Tracing Plugins The following plugins provide the distributed tracing capability, and the OAP …","ref":"/docs/skywalking-go/v0.4.0/en/agent/support-plugins/","title":"Tracing Plugins"},{"body":"Tracing, Metrics and Logging with Go Agent All plugins in SkyWalking Go Agent are designed to provide functionality for distributed tracing, metrics, and logging data. For a detailed list of supported plugins, please refer to the documentation. This document aims to provide you with some configuration information for your usage. Please ensure that you have followed the documentation to successfully install the SkyWalking Go Agent into your application.\nMetadata Mechanism The Go Agent would be identified by the SkyWalking backend after startup and maintain a heartbeat to keep alive.\n   Name Environment Key Default Value Description     agent.service_name SW_AGENT_NAME Your_Application_Name The name of the service which showed in UI.   agent.instance_env_name  SW_AGENT_INSTANCE_NAME To obtain the environment variable key for the instance name, if it cannot be obtained, an instance name will be automatically generated.    Tracing Distributed tracing is the most common form of plugin in the Go Agent, and it becomes active with each new incoming request. By default, all plugins are enabled. For a specific list of plugins, please refer to the documentation.\nIf you wish to disable a particular plugin to prevent enhancements related to that plugin, please consult the documentation on how to disable plugins.\nThe basic configuration is as follows:\n   Name Environment Key Default Value Description     agent.sampler SW_AGENT_SAMPLER 1 Sampling rate of tracing data, which is a floating-point value that must be between 0 and 1.   agent.ignore_suffix SW_AGENT_IGNORE_SUFFIX .jpg,.jpeg,.js,.css,.png,.bmp,.gif,.ico,.mp3,.mp4,.html,.svg If the operation name of the first span is included in this set, this segment should be ignored.(multiple split by \u0026ldquo;,\u0026quot;).    Metrics The metrics plugin can dynamically monitor the execution status of the current program and aggregate the data into corresponding metrics. Eventually, the data is reported to the SkyWalking backend at a specified interval. For a specific list of plugins, please refer to the documentation.\nThe current configuration information is as follows:\n   Name Environment Key Default Value Description     agent.meter.collect_interval SW_AGENT_METER_COLLECT_INTERVAL 20 The interval of collecting metrics, in seconds.    Logging The logging plugin in SkyWalking Go Agent are used to handle agent and application logs, as well as application log querying. They primarily consist of the following three functionalities:\n Agent Log Adaptation: The plugin detects the logging framework used in the current system and integrates the agent\u0026rsquo;s logs with the system\u0026rsquo;s logging framework. Distributed Tracing Enhancement: It combines the distributed tracing information from the current request with the application logs, allowing you to have real-time visibility into all log contents related to specific requests. Log Reporting: The plugin reports both application and agent logs to the SkyWalking backend for data retrieval and display purposes.  For more details, please refer to the documentation to learn more detail.\n","excerpt":"Tracing, Metrics and Logging with Go Agent All plugins in SkyWalking Go Agent are designed to …","ref":"/docs/skywalking-go/latest/en/agent/tracing-metrics-logging/","title":"Tracing, Metrics and Logging with Go Agent"},{"body":"Tracing, Metrics and Logging with Go Agent All plugins in SkyWalking Go Agent are designed to provide functionality for distributed tracing, metrics, and logging data. For a detailed list of supported plugins, please refer to the documentation. This document aims to provide you with some configuration information for your usage. Please ensure that you have followed the documentation to successfully install the SkyWalking Go Agent into your application.\nMetadata Mechanism The Go Agent would be identified by the SkyWalking backend after startup and maintain a heartbeat to keep alive.\n   Name Environment Key Default Value Description     agent.service_name SW_AGENT_NAME Your_Application_Name The name of the service which showed in UI.   agent.instance_env_name  SW_AGENT_INSTANCE_NAME To obtain the environment variable key for the instance name, if it cannot be obtained, an instance name will be automatically generated.    Tracing Distributed tracing is the most common form of plugin in the Go Agent, and it becomes active with each new incoming request. By default, all plugins are enabled. For a specific list of plugins, please refer to the documentation.\nIf you wish to disable a particular plugin to prevent enhancements related to that plugin, please consult the documentation on how to disable plugins.\nThe basic configuration is as follows:\n   Name Environment Key Default Value Description     agent.sampler SW_AGENT_SAMPLER 1 Sampling rate of tracing data, which is a floating-point value that must be between 0 and 1.   agent.ignore_suffix SW_AGENT_IGNORE_SUFFIX .jpg,.jpeg,.js,.css,.png,.bmp,.gif,.ico,.mp3,.mp4,.html,.svg If the suffix obtained by splitting the operation name by the last index of \u0026ldquo;.\u0026rdquo; in this set, this segment should be ignored.(multiple split by \u0026ldquo;,\u0026quot;).   agent.trace_ignore_path SW_AGENT_TRACE_IGNORE_PATH  If the operation name of the first span is matching, this segment should be ignored.(multiple split by \u0026ldquo;,\u0026quot;).    Metrics The metrics plugin can dynamically monitor the execution status of the current program and aggregate the data into corresponding metrics. Eventually, the data is reported to the SkyWalking backend at a specified interval. For a specific list of plugins, please refer to the documentation.\nThe current configuration information is as follows:\n   Name Environment Key Default Value Description     agent.meter.collect_interval SW_AGENT_METER_COLLECT_INTERVAL 20 The interval of collecting metrics, in seconds.    Logging The logging plugin in SkyWalking Go Agent are used to handle agent and application logs, as well as application log querying. They primarily consist of the following three functionalities:\n Agent Log Adaptation: The plugin detects the logging framework used in the current system and integrates the agent\u0026rsquo;s logs with the system\u0026rsquo;s logging framework. Distributed Tracing Enhancement: It combines the distributed tracing information from the current request with the application logs, allowing you to have real-time visibility into all log contents related to specific requests. Log Reporting: The plugin reports both application and agent logs to the SkyWalking backend for data retrieval and display purposes.  For more details, please refer to the documentation to learn more detail.\n","excerpt":"Tracing, Metrics and Logging with Go Agent All plugins in SkyWalking Go Agent are designed to …","ref":"/docs/skywalking-go/next/en/agent/tracing-metrics-logging/","title":"Tracing, Metrics and Logging with Go Agent"},{"body":"Tracing, Metrics and Logging with Go Agent All plugins in SkyWalking Go Agent are designed to provide functionality for distributed tracing, metrics, and logging data. For a detailed list of supported plugins, please refer to the documentation. This document aims to provide you with some configuration information for your usage. Please ensure that you have followed the documentation to successfully install the SkyWalking Go Agent into your application.\nMetadata Mechanism The Go Agent would be identified by the SkyWalking backend after startup and maintain a heartbeat to keep alive.\n   Name Environment Key Default Value Description     agent.service_name SW_AGENT_NAME Your_Application_Name The name of the service which showed in UI.   agent.instance_env_name  SW_AGENT_INSTANCE_NAME To obtain the environment variable key for the instance name, if it cannot be obtained, an instance name will be automatically generated.    Tracing Distributed tracing is the most common form of plugin in the Go Agent, and it becomes active with each new incoming request. By default, all plugins are enabled. For a specific list of plugins, please refer to the documentation.\nIf you wish to disable a particular plugin to prevent enhancements related to that plugin, please consult the documentation on how to disable plugins.\nThe basic configuration is as follows:\n   Name Environment Key Default Value Description     agent.sampler SW_AGENT_SAMPLER 1 Sampling rate of tracing data, which is a floating-point value that must be between 0 and 1.   agent.ignore_suffix SW_AGENT_IGNORE_SUFFIX .jpg,.jpeg,.js,.css,.png,.bmp,.gif,.ico,.mp3,.mp4,.html,.svg If the operation name of the first span is included in this set, this segment should be ignored.(multiple split by \u0026ldquo;,\u0026quot;).    Metrics The metrics plugin can dynamically monitor the execution status of the current program and aggregate the data into corresponding metrics. Eventually, the data is reported to the SkyWalking backend at a specified interval. For a specific list of plugins, please refer to the documentation.\nThe current configuration information is as follows:\n   Name Environment Key Default Value Description     agent.meter.collect_interval SW_AGENT_METER_COLLECT_INTERVAL 20 The interval of collecting metrics, in seconds.    Logging The logging plugin in SkyWalking Go Agent are used to handle agent and application logs, as well as application log querying. They primarily consist of the following three functionalities:\n Agent Log Adaptation: The plugin detects the logging framework used in the current system and integrates the agent\u0026rsquo;s logs with the system\u0026rsquo;s logging framework. Distributed Tracing Enhancement: It combines the distributed tracing information from the current request with the application logs, allowing you to have real-time visibility into all log contents related to specific requests. Log Reporting: The plugin reports both application and agent logs to the SkyWalking backend for data retrieval and display purposes.  For more details, please refer to the documentation to learn more detail.\n","excerpt":"Tracing, Metrics and Logging with Go Agent All plugins in SkyWalking Go Agent are designed to …","ref":"/docs/skywalking-go/v0.4.0/en/agent/tracing-metrics-logging/","title":"Tracing, Metrics and Logging with Go Agent"},{"body":"Traffic The traffic is used to collecting the network access logs from services through the Service Discovery, and send access logs to the backend server for analyze.\nConfiguration    Name Default Environment Key Description     access_log.active false ROVER_ACCESS_LOG_ACTIVE Is active the access log monitoring.   access_log.exclude_namespaces istio-system,cert-manager,kube-system ROVER_ACCESS_LOG_EXCLUDE_NAMESPACES Exclude processes in the specified Kubernetes namespace. Multiple namespaces split by \u0026ldquo;,\u0026rdquo;   access_log.exclude_cluster  ROVER_ACCESS_LOG_EXCLUDE_CLUSTER Exclude processes in the specified cluster which defined in the process module. Multiple clusters split by \u0026ldquo;,\u0026rdquo;   access_log.flush.max_count 2000 ROVER_ACCESS_LOG_FLUSH_MAX_COUNT The max count of the access log when flush to the backend.   access_log.flush.period 5s ROVER_ACCESS_LOG_FLUSH_PERIOD The period of flush access log to the backend.   access_log_protocol_analyze.per_cpu_buffer 400KB ROVER_ACCESS_LOG_PROTOCOL_ANALYZE_PER_CPU_BUFFER The size of socket data buffer on each CPU.   access_log.protocol_analyze.parallels 2 ROVER_ACCESS_LOG_PROTOCOL_ANALYZE_PARALLELS The count of parallel protocol analyzer.   access_log.protocol_analyze.queue_size 5000 ROVER_ACCESS_LOG_PROTOCOL_ANALYZE_QUEUE_SIZE The size of per paralleled analyze queue.    Collectors Socket Connect/Accept/Close Monitor all socket connect, accept, and close events from monitored processes by attaching eBPF program to the respective trace points.\nSocket traffic Capture all socket traffic from monitored processes by attaching eBPF program to network syscalls.\nProtocol Data collection is followed by protocol analysis. Currently, the supported protocols include:\n HTTP/1.x HTTP/2  Note: As HTTP2 is a stateful protocol, it only supports monitoring processes that start after monitor. Processes already running at the time of monitoring may fail to provide complete data, leading to unsuccessful analysis.\nTLS When a process uses the TLS protocol for data transfer, Rover monitors libraries such as OpenSSL, BoringSSL, GoTLS, and NodeTLS to access the raw content. This feature is also applicable for protocol analysis.\nNote: the parsing of TLS protocols in Java is currently not supported.\nL2-L4 During data transmission, Rover records each packet\u0026rsquo;s through the network layers L2 to L4 using kprobes. This approach enhances the understanding of each packet\u0026rsquo;s transmission process, facilitating easier localization and troubleshooting of network issues.\n","excerpt":"Traffic The traffic is used to collecting the network access logs from services through the Service …","ref":"/docs/skywalking-rover/latest/en/setup/configuration/traffic/","title":"Traffic"},{"body":"Traffic The traffic is used to collecting the network access logs from services through the Service Discovery, and send access logs to the backend server for analyze.\nConfiguration    Name Default Environment Key Description     access_log.active false ROVER_ACCESS_LOG_ACTIVE Is active the access log monitoring.   access_log.exclude_namespaces istio-system,cert-manager,kube-system ROVER_ACCESS_LOG_EXCLUDE_NAMESPACES Exclude processes in the specified Kubernetes namespace. Multiple namespaces split by \u0026ldquo;,\u0026rdquo;   access_log.exclude_cluster  ROVER_ACCESS_LOG_EXCLUDE_CLUSTER Exclude processes in the specified cluster which defined in the process module. Multiple clusters split by \u0026ldquo;,\u0026rdquo;   access_log.flush.max_count 2000 ROVER_ACCESS_LOG_FLUSH_MAX_COUNT The max count of the access log when flush to the backend.   access_log.flush.period 5s ROVER_ACCESS_LOG_FLUSH_PERIOD The period of flush access log to the backend.   access_log_protocol_analyze.per_cpu_buffer 400KB ROVER_ACCESS_LOG_PROTOCOL_ANALYZE_PER_CPU_BUFFER The size of socket data buffer on each CPU.   access_log.protocol_analyze.parallels 2 ROVER_ACCESS_LOG_PROTOCOL_ANALYZE_PARALLELS The count of parallel protocol analyzer.   access_log.protocol_analyze.queue_size 5000 ROVER_ACCESS_LOG_PROTOCOL_ANALYZE_QUEUE_SIZE The size of per paralleled analyze queue.    Collectors Socket Connect/Accept/Close Monitor all socket connect, accept, and close events from monitored processes by attaching eBPF program to the respective trace points.\nSocket traffic Capture all socket traffic from monitored processes by attaching eBPF program to network syscalls.\nProtocol Data collection is followed by protocol analysis. Currently, the supported protocols include:\n HTTP/1.x HTTP/2  Note: As HTTP2 is a stateful protocol, it only supports monitoring processes that start after monitor. Processes already running at the time of monitoring may fail to provide complete data, leading to unsuccessful analysis.\nTLS When a process uses the TLS protocol for data transfer, Rover monitors libraries such as OpenSSL, BoringSSL, GoTLS, and NodeTLS to access the raw content. This feature is also applicable for protocol analysis.\nNote: the parsing of TLS protocols in Java is currently not supported.\nL2-L4 During data transmission, Rover records each packet\u0026rsquo;s through the network layers L2 to L4 using kprobes. This approach enhances the understanding of each packet\u0026rsquo;s transmission process, facilitating easier localization and troubleshooting of network issues.\n","excerpt":"Traffic The traffic is used to collecting the network access logs from services through the Service …","ref":"/docs/skywalking-rover/next/en/setup/configuration/traffic/","title":"Traffic"},{"body":"Traffic The traffic is used to collecting the network access logs from services through the Service Discovery, and send access logs to the backend server for analyze.\nConfiguration    Name Default Environment Key Description     access_log.active false ROVER_ACCESS_LOG_ACTIVE Is active the access log monitoring.   access_log.exclude_namespaces istio-system,cert-manager,kube-system ROVER_ACCESS_LOG_EXCLUDE_NAMESPACES Exclude processes in the specified Kubernetes namespace. Multiple namespaces split by \u0026ldquo;,\u0026rdquo;   access_log.exclude_cluster  ROVER_ACCESS_LOG_EXCLUDE_CLUSTER Exclude processes in the specified cluster which defined in the process module. Multiple clusters split by \u0026ldquo;,\u0026rdquo;   access_log.flush.max_count 2000 ROVER_ACCESS_LOG_FLUSH_MAX_COUNT The max count of the access log when flush to the backend.   access_log.flush.period 5s ROVER_ACCESS_LOG_FLUSH_PERIOD The period of flush access log to the backend.   access_log_protocol_analyze.per_cpu_buffer 400KB ROVER_ACCESS_LOG_PROTOCOL_ANALYZE_PER_CPU_BUFFER The size of socket data buffer on each CPU.   access_log.protocol_analyze.parallels 2 ROVER_ACCESS_LOG_PROTOCOL_ANALYZE_PARALLELS The count of parallel protocol analyzer.   access_log.protocol_analyze.queue_size 5000 ROVER_ACCESS_LOG_PROTOCOL_ANALYZE_QUEUE_SIZE The size of per paralleled analyze queue.    Collectors Socket Connect/Accept/Close Monitor all socket connect, accept, and close events from monitored processes by attaching eBPF program to the respective trace points.\nSocket traffic Capture all socket traffic from monitored processes by attaching eBPF program to network syscalls.\nProtocol Data collection is followed by protocol analysis. Currently, the supported protocols include:\n HTTP/1.x HTTP/2  Note: As HTTP2 is a stateful protocol, it only supports monitoring processes that start after monitor. Processes already running at the time of monitoring may fail to provide complete data, leading to unsuccessful analysis.\nTLS When a process uses the TLS protocol for data transfer, Rover monitors libraries such as OpenSSL, BoringSSL, GoTLS, and NodeTLS to access the raw content. This feature is also applicable for protocol analysis.\nNote: the parsing of TLS protocols in Java is currently not supported.\nL2-L4 During data transmission, Rover records each packet\u0026rsquo;s through the network layers L2 to L4 using kprobes. This approach enhances the understanding of each packet\u0026rsquo;s transmission process, facilitating easier localization and troubleshooting of network issues.\n","excerpt":"Traffic The traffic is used to collecting the network access logs from services through the Service …","ref":"/docs/skywalking-rover/v0.6.0/en/setup/configuration/traffic/","title":"Traffic"},{"body":"Transmit Log to Kafka Using Satellite to receive the SkyWalking log protocol from agent, and transport data to the Kafka Topic.\nConfig Here is config file, set out as follows:\n Declare gRPC server and kafka client to receive and transmit data. Declare the SkyWalking Log protocol gatherer and sender to transmit protocol via pipeline. Expose Self-Observability telemetry data to Prometheus.  ","excerpt":"Transmit Log to Kafka Using Satellite to receive the SkyWalking log protocol from agent, and …","ref":"/docs/skywalking-satellite/latest/en/setup/examples/feature/transmit-log-to-kafka/readme/","title":"Transmit Log to Kafka"},{"body":"Transmit Log to Kafka Using Satellite to receive the SkyWalking log protocol from agent, and transport data to the Kafka Topic.\nConfig Here is config file, set out as follows:\n Declare gRPC server and kafka client to receive and transmit data. Declare the SkyWalking Log protocol gatherer and sender to transmit protocol via pipeline. Expose Self-Observability telemetry data to Prometheus.  ","excerpt":"Transmit Log to Kafka Using Satellite to receive the SkyWalking log protocol from agent, and …","ref":"/docs/skywalking-satellite/next/en/setup/examples/feature/transmit-log-to-kafka/readme/","title":"Transmit Log to Kafka"},{"body":"Transmit Log to Kafka Using Satellite to receive the SkyWalking log protocol from agent, and transport data to the Kafka Topic.\nConfig Here is config file, set out as follows:\n Declare gRPC server and kafka client to receive and transmit data. Declare the SkyWalking Log protocol gatherer and sender to transmit protocol via pipeline. Expose Self-Observability telemetry data to Prometheus.  ","excerpt":"Transmit Log to Kafka Using Satellite to receive the SkyWalking log protocol from agent, and …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/examples/feature/transmit-log-to-kafka/readme/","title":"Transmit Log to Kafka"},{"body":"TTL In SkyWalking, there are two types of observability data:\n Records include traces, logs, topN sampled statements and alarm. recordDataTTL applies to record data. Metrics include all metrics for service, instance, endpoint, and topology map. Metadata(lists of services, instances, or endpoints) also belongs to metrics. metricsDataTTL applies to Metrics data.  These are the settings for the different types:\n# Set a timeout on metrics data. After the timeout has expired, the metrics data will automatically be deleted.recordDataTTL:${SW_CORE_RECORD_DATA_TTL:3}# Unit is daymetricsDataTTL:${SW_CORE_METRICS_DATA_TTL:7}# Unit is day","excerpt":"TTL In SkyWalking, there are two types of observability data:\n Records include traces, logs, topN …","ref":"/docs/main/latest/en/setup/backend/ttl/","title":"TTL"},{"body":"TTL In SkyWalking, there are two types of observability data:\n Records include traces, logs, topN sampled statements and alarm. recordDataTTL applies to record data. Metrics include all metrics for service, instance, endpoint, and topology map. Metadata(lists of services, instances, or endpoints) also belongs to metrics. metricsDataTTL applies to Metrics data.  These are the settings for the different types:\n# Set a timeout on metrics data. After the timeout has expired, the metrics data will automatically be deleted.recordDataTTL:${SW_CORE_RECORD_DATA_TTL:3}# Unit is daymetricsDataTTL:${SW_CORE_METRICS_DATA_TTL:7}# Unit is day","excerpt":"TTL In SkyWalking, there are two types of observability data:\n Records include traces, logs, topN …","ref":"/docs/main/next/en/setup/backend/ttl/","title":"TTL"},{"body":"TTL In SkyWalking, there are two types of observability data:\n Records include traces, logs, topN sampled statements and alarm. recordDataTTL applies to record data. Metrics include all metrics for service, instance, endpoint, and topology map. Metadata(lists of services, instances, or endpoints) also belongs to metrics. metricsDataTTL applies to Metrics data.  These are the settings for the different types:\n# Set a timeout on metrics data. After the timeout has expired, the metrics data will automatically be deleted.recordDataTTL:${SW_CORE_RECORD_DATA_TTL:3}# Unit is daymetricsDataTTL:${SW_CORE_METRICS_DATA_TTL:7}# Unit is day","excerpt":"TTL In SkyWalking, there are two types of observability data:\n Records include traces, logs, topN …","ref":"/docs/main/v9.0.0/en/setup/backend/ttl/","title":"TTL"},{"body":"TTL In SkyWalking, there are two types of observability data:\n Records include traces, logs, topN sampled statements and alarm. recordDataTTL applies to record data. Metrics include all metrics for service, instance, endpoint, and topology map. Metadata(lists of services, instances, or endpoints) also belongs to metrics. metricsDataTTL applies to Metrics data.  These are the settings for the different types:\n# Set a timeout on metrics data. After the timeout has expired, the metrics data will automatically be deleted.recordDataTTL:${SW_CORE_RECORD_DATA_TTL:3}# Unit is daymetricsDataTTL:${SW_CORE_METRICS_DATA_TTL:7}# Unit is day","excerpt":"TTL In SkyWalking, there are two types of observability data:\n Records include traces, logs, topN …","ref":"/docs/main/v9.1.0/en/setup/backend/ttl/","title":"TTL"},{"body":"TTL In SkyWalking, there are two types of observability data:\n Records include traces, logs, topN sampled statements and alarm. recordDataTTL applies to record data. Metrics include all metrics for service, instance, endpoint, and topology map. Metadata(lists of services, instances, or endpoints) also belongs to metrics. metricsDataTTL applies to Metrics data.  These are the settings for the different types:\n# Set a timeout on metrics data. After the timeout has expired, the metrics data will automatically be deleted.recordDataTTL:${SW_CORE_RECORD_DATA_TTL:3}# Unit is daymetricsDataTTL:${SW_CORE_METRICS_DATA_TTL:7}# Unit is day","excerpt":"TTL In SkyWalking, there are two types of observability data:\n Records include traces, logs, topN …","ref":"/docs/main/v9.2.0/en/setup/backend/ttl/","title":"TTL"},{"body":"TTL In SkyWalking, there are two types of observability data:\n Records include traces, logs, topN sampled statements and alarm. recordDataTTL applies to record data. Metrics include all metrics for service, instance, endpoint, and topology map. Metadata(lists of services, instances, or endpoints) also belongs to metrics. metricsDataTTL applies to Metrics data.  These are the settings for the different types:\n# Set a timeout on metrics data. After the timeout has expired, the metrics data will automatically be deleted.recordDataTTL:${SW_CORE_RECORD_DATA_TTL:3}# Unit is daymetricsDataTTL:${SW_CORE_METRICS_DATA_TTL:7}# Unit is day","excerpt":"TTL In SkyWalking, there are two types of observability data:\n Records include traces, logs, topN …","ref":"/docs/main/v9.3.0/en/setup/backend/ttl/","title":"TTL"},{"body":"TTL In SkyWalking, there are two types of observability data:\n Records include traces, logs, topN sampled statements and alarm. recordDataTTL applies to record data. Metrics include all metrics for service, instance, endpoint, and topology map. Metadata(lists of services, instances, or endpoints) also belongs to metrics. metricsDataTTL applies to Metrics data.  These are the settings for the different types:\n# Set a timeout on metrics data. After the timeout has expired, the metrics data will automatically be deleted.recordDataTTL:${SW_CORE_RECORD_DATA_TTL:3}# Unit is daymetricsDataTTL:${SW_CORE_METRICS_DATA_TTL:7}# Unit is day","excerpt":"TTL In SkyWalking, there are two types of observability data:\n Records include traces, logs, topN …","ref":"/docs/main/v9.4.0/en/setup/backend/ttl/","title":"TTL"},{"body":"TTL In SkyWalking, there are two types of observability data:\n Records include traces, logs, topN sampled statements and alarm. recordDataTTL applies to record data. Metrics include all metrics for service, instance, endpoint, and topology map. Metadata(lists of services, instances, or endpoints) also belongs to metrics. metricsDataTTL applies to Metrics data.  These are the settings for the different types:\n# Set a timeout on metrics data. After the timeout has expired, the metrics data will automatically be deleted.recordDataTTL:${SW_CORE_RECORD_DATA_TTL:3}# Unit is daymetricsDataTTL:${SW_CORE_METRICS_DATA_TTL:7}# Unit is day","excerpt":"TTL In SkyWalking, there are two types of observability data:\n Records include traces, logs, topN …","ref":"/docs/main/v9.5.0/en/setup/backend/ttl/","title":"TTL"},{"body":"TTL In SkyWalking, there are two types of observability data:\n Records include traces, logs, topN sampled statements and alarm. recordDataTTL applies to record data. Metrics include all metrics for service, instance, endpoint, and topology map. Metadata(lists of services, instances, or endpoints) also belongs to metrics. metricsDataTTL applies to Metrics data.  These are the settings for the different types:\n# Set a timeout on metrics data. After the timeout has expired, the metrics data will automatically be deleted.recordDataTTL:${SW_CORE_RECORD_DATA_TTL:3}# Unit is daymetricsDataTTL:${SW_CORE_METRICS_DATA_TTL:7}# Unit is day","excerpt":"TTL In SkyWalking, there are two types of observability data:\n Records include traces, logs, topN …","ref":"/docs/main/v9.6.0/en/setup/backend/ttl/","title":"TTL"},{"body":"TTL In SkyWalking, there are two types of observability data:\n Records include traces, logs, topN sampled statements and alarm. recordDataTTL applies to record data. Metrics include all metrics for service, instance, endpoint, and topology map. Metadata(lists of services, instances, or endpoints) also belongs to metrics. metricsDataTTL applies to Metrics data.  These are the settings for the different types:\n# Set a timeout on metrics data. After the timeout has expired, the metrics data will automatically be deleted.recordDataTTL:${SW_CORE_RECORD_DATA_TTL:3}# Unit is daymetricsDataTTL:${SW_CORE_METRICS_DATA_TTL:7}# Unit is day","excerpt":"TTL In SkyWalking, there are two types of observability data:\n Records include traces, logs, topN …","ref":"/docs/main/v9.7.0/en/setup/backend/ttl/","title":"TTL"},{"body":"UI SkyWalking UI distribution is already included in our Apache official release.\nStartup Startup script is also in /bin/webappService.sh(.bat). UI runs as a Java process, powered-by Armeria.\nSettings The settings file of UI is webapp/webapp.yml in the distribution package. It has three parts.\n Listening port. Backend connect info.  serverPort:${SW_SERVER_PORT:-8080}# Comma separated list of OAP addresses, with `http://` or `https://` prefix.oapServices:${SW_OAP_ADDRESS:-http://localhost:12800}zipkinServices:${SW_ZIPKIN_ADDRESS:http://localhost:9412}Start with Docker Image Start a container to connect OAP server whose address is http://oap:12800.\nexport version=9.0.0 docker run --name oap --restart always -d -e SW_OAP_ADDRESS=http://oap:12800 -e SW_ZIPKIN_ADDRESS=http://oap:9412 apache/skywalking-ui:$version Configuration We could set up environment variables to configure this image.\nSW_OAP_ADDRESS The address of your OAP server. The default value is http://127.0.0.1:12800.\nSW_ZIPKIN_ADDRESS The address of your Zipkin server. The default value is http://127.0.0.1:9412.\n","excerpt":"UI SkyWalking UI distribution is already included in our Apache official release.\nStartup Startup …","ref":"/docs/main/latest/en/setup/backend/ui-setup/","title":"UI"},{"body":"UI SkyWalking UI distribution is already included in our Apache official release.\nStartup Startup script is also in /bin/webappService.sh(.bat). UI runs as a Java process, powered-by Armeria.\nSettings The settings file of UI is webapp/webapp.yml in the distribution package. It has three parts.\n Listening port. Backend connect info.  serverPort:${SW_SERVER_PORT:-8080}# Comma separated list of OAP addresses, with `http://` or `https://` prefix.oapServices:${SW_OAP_ADDRESS:-http://localhost:12800}zipkinServices:${SW_ZIPKIN_ADDRESS:http://localhost:9412}Start with Docker Image Start a container to connect OAP server whose address is http://oap:12800.\nexport version=9.0.0 docker run --name oap --restart always -d -e SW_OAP_ADDRESS=http://oap:12800 -e SW_ZIPKIN_ADDRESS=http://oap:9412 apache/skywalking-ui:$version Configuration We could set up environment variables to configure this image.\nSW_OAP_ADDRESS The address of your OAP server. The default value is http://127.0.0.1:12800.\nSW_ZIPKIN_ADDRESS The address of your Zipkin server. The default value is http://127.0.0.1:9412.\n","excerpt":"UI SkyWalking UI distribution is already included in our Apache official release.\nStartup Startup …","ref":"/docs/main/next/en/setup/backend/ui-setup/","title":"UI"},{"body":"UI SkyWalking UI distribution is already included in our Apache official release.\nStartup Startup script is also in /bin/webappService.sh(.bat). UI runs as an OS Java process, powered-by Zuul.\nSettings Settings file of UI is webapp/webapp.yml in distribution package. It has three parts.\n Listening port. Backend connect info.  server:port:8080spring:cloud:gateway:routes:- id:oap-routeuri:lb://oap-servicepredicates:- Path=/graphql/**discovery:client:simple:instances:oap-service:# Point to all backend\u0026#39;s restHost:restPort, split by URI arrays.- uri:http://127.0.0.1:12800- uri:http://instance-2:12800Start with Docker Image Start a container to connect oap server whose address is http://oap:12800.\ndocker run --name oap --restart always -d -e SW_OAP_ADDRESS=http://oap:12800 apache/skywalking-ui:8.8.0 Configuration We could set up environment variables to configure this image.\nSW_OAP_ADDRESS The address of OAP server. Default value is http://127.0.0.1:12800.\n","excerpt":"UI SkyWalking UI distribution is already included in our Apache official release.\nStartup Startup …","ref":"/docs/main/v9.0.0/en/setup/backend/ui-setup/","title":"UI"},{"body":"UI SkyWalking UI distribution is already included in our Apache official release.\nStartup Startup script is also in /bin/webappService.sh(.bat). UI runs as an OS Java process, powered-by Zuul.\nSettings The settings file of UI is webapp/webapp.yml in the distribution package. It has three parts.\n Listening port. Backend connect info.  server:port:8080spring:cloud:gateway:routes:- id:oap-routeuri:lb://oap-servicepredicates:- Path=/graphql/**discovery:client:simple:instances:oap-service:# Point to all backend\u0026#39;s restHost:restPort, split by URI arrays.- uri:http://127.0.0.1:12800- uri:http://instance-2:12800Start with Docker Image Start a container to connect OAP server whose address is http://oap:12800.\ndocker run --name oap --restart always -d -e SW_OAP_ADDRESS=http://oap:12800 apache/skywalking-ui:8.8.0 Configuration We could set up environment variables to configure this image.\nSW_OAP_ADDRESS The address of your OAP server. The default value is http://127.0.0.1:12800.\n","excerpt":"UI SkyWalking UI distribution is already included in our Apache official release.\nStartup Startup …","ref":"/docs/main/v9.1.0/en/setup/backend/ui-setup/","title":"UI"},{"body":"UI SkyWalking UI distribution is already included in our Apache official release.\nStartup Startup script is also in /bin/webappService.sh(.bat). UI runs as an OS Java process, powered-by Zuul.\nSettings The settings file of UI is webapp/webapp.yml in the distribution package. It has three parts.\n Listening port. Backend connect info.  server:port:8080spring:cloud:gateway:routes:- id:oap-routeuri:lb://oap-servicepredicates:- Path=/graphql/**discovery:client:simple:instances:oap-service:# Point to all backend\u0026#39;s restHost:restPort, split by URI arrays.- uri:http://127.0.0.1:12800- uri:http://instance-2:12800Start with Docker Image Start a container to connect OAP server whose address is http://oap:12800.\ndocker run --name oap --restart always -d -e SW_OAP_ADDRESS=http://oap:12800 apache/skywalking-ui:8.8.0 Configuration We could set up environment variables to configure this image.\nSW_OAP_ADDRESS The address of your OAP server. The default value is http://127.0.0.1:12800.\n","excerpt":"UI SkyWalking UI distribution is already included in our Apache official release.\nStartup Startup …","ref":"/docs/main/v9.2.0/en/setup/backend/ui-setup/","title":"UI"},{"body":"UI SkyWalking UI distribution is already included in our Apache official release.\nStartup Startup script is also in /bin/webappService.sh(.bat). UI runs as an OS Java process, powered-by Zuul.\nSettings The settings file of UI is webapp/webapp.yml in the distribution package. It has three parts.\n Listening port. Backend connect info.  serverPort:${SW_SERVER_PORT:-8080}# Comma separated list of OAP addresses, without http:// prefix.oapServices:${SW_OAP_ADDRESS:-localhost:12800}Start with Docker Image Start a container to connect OAP server whose address is http://oap:12800.\ndocker run --name oap --restart always -d -e SW_OAP_ADDRESS=http://oap:12800 apache/skywalking-ui:8.8.0 Configuration We could set up environment variables to configure this image.\nSW_OAP_ADDRESS The address of your OAP server. The default value is http://127.0.0.1:12800.\n","excerpt":"UI SkyWalking UI distribution is already included in our Apache official release.\nStartup Startup …","ref":"/docs/main/v9.3.0/en/setup/backend/ui-setup/","title":"UI"},{"body":"UI SkyWalking UI distribution is already included in our Apache official release.\nStartup Startup script is also in /bin/webappService.sh(.bat). UI runs as an OS Java process, powered-by Zuul.\nSettings The settings file of UI is webapp/webapp.yml in the distribution package. It has three parts.\n Listening port. Backend connect info.  serverPort:${SW_SERVER_PORT:-8080}# Comma separated list of OAP addresses, without http:// prefix.oapServices:${SW_OAP_ADDRESS:-localhost:12800}zipkinServices:${SW_ZIPKIN_ADDRESS:localhost:9412}Start with Docker Image Start a container to connect OAP server whose address is http://oap:12800.\ndocker run --name oap --restart always -d -e SW_OAP_ADDRESS=http://oap:12800 -e SW_ZIPKIN_ADDRESS=http://oap:9412 apache/skywalking-ui:8.8.0 Configuration We could set up environment variables to configure this image.\nSW_OAP_ADDRESS The address of your OAP server. The default value is http://127.0.0.1:12800.\nSW_ZIPKIN_ADDRESS The address of your Zipkin server. The default value is http://127.0.0.1:9412.\n","excerpt":"UI SkyWalking UI distribution is already included in our Apache official release.\nStartup Startup …","ref":"/docs/main/v9.4.0/en/setup/backend/ui-setup/","title":"UI"},{"body":"UI SkyWalking UI distribution is already included in our Apache official release.\nStartup Startup script is also in /bin/webappService.sh(.bat). UI runs as a Java process, powered-by Armeria.\nSettings The settings file of UI is webapp/webapp.yml in the distribution package. It has three parts.\n Listening port. Backend connect info.  serverPort:${SW_SERVER_PORT:-8080}# Comma separated list of OAP addresses, with `http://` or `https://` prefix.oapServices:${SW_OAP_ADDRESS:-http://localhost:12800}zipkinServices:${SW_ZIPKIN_ADDRESS:http://localhost:9412}Start with Docker Image Start a container to connect OAP server whose address is http://oap:12800.\ndocker run --name oap --restart always -d -e SW_OAP_ADDRESS=http://oap:12800 -e SW_ZIPKIN_ADDRESS=http://oap:9412 apache/skywalking-ui:8.8.0 Configuration We could set up environment variables to configure this image.\nSW_OAP_ADDRESS The address of your OAP server. The default value is http://127.0.0.1:12800.\nSW_ZIPKIN_ADDRESS The address of your Zipkin server. The default value is http://127.0.0.1:9412.\n","excerpt":"UI SkyWalking UI distribution is already included in our Apache official release.\nStartup Startup …","ref":"/docs/main/v9.5.0/en/setup/backend/ui-setup/","title":"UI"},{"body":"UI SkyWalking UI distribution is already included in our Apache official release.\nStartup Startup script is also in /bin/webappService.sh(.bat). UI runs as a Java process, powered-by Armeria.\nSettings The settings file of UI is webapp/webapp.yml in the distribution package. It has three parts.\n Listening port. Backend connect info.  serverPort:${SW_SERVER_PORT:-8080}# Comma separated list of OAP addresses, with `http://` or `https://` prefix.oapServices:${SW_OAP_ADDRESS:-http://localhost:12800}zipkinServices:${SW_ZIPKIN_ADDRESS:http://localhost:9412}Start with Docker Image Start a container to connect OAP server whose address is http://oap:12800.\nexport version=9.0.0 docker run --name oap --restart always -d -e SW_OAP_ADDRESS=http://oap:12800 -e SW_ZIPKIN_ADDRESS=http://oap:9412 apache/skywalking-ui:$version Configuration We could set up environment variables to configure this image.\nSW_OAP_ADDRESS The address of your OAP server. The default value is http://127.0.0.1:12800.\nSW_ZIPKIN_ADDRESS The address of your Zipkin server. The default value is http://127.0.0.1:9412.\n","excerpt":"UI SkyWalking UI distribution is already included in our Apache official release.\nStartup Startup …","ref":"/docs/main/v9.6.0/en/setup/backend/ui-setup/","title":"UI"},{"body":"UI SkyWalking UI distribution is already included in our Apache official release.\nStartup Startup script is also in /bin/webappService.sh(.bat). UI runs as a Java process, powered-by Armeria.\nSettings The settings file of UI is webapp/webapp.yml in the distribution package. It has three parts.\n Listening port. Backend connect info.  serverPort:${SW_SERVER_PORT:-8080}# Comma separated list of OAP addresses, with `http://` or `https://` prefix.oapServices:${SW_OAP_ADDRESS:-http://localhost:12800}zipkinServices:${SW_ZIPKIN_ADDRESS:http://localhost:9412}Start with Docker Image Start a container to connect OAP server whose address is http://oap:12800.\nexport version=9.0.0 docker run --name oap --restart always -d -e SW_OAP_ADDRESS=http://oap:12800 -e SW_ZIPKIN_ADDRESS=http://oap:9412 apache/skywalking-ui:$version Configuration We could set up environment variables to configure this image.\nSW_OAP_ADDRESS The address of your OAP server. The default value is http://127.0.0.1:12800.\nSW_ZIPKIN_ADDRESS The address of your Zipkin server. The default value is http://127.0.0.1:9412.\n","excerpt":"UI SkyWalking UI distribution is already included in our Apache official release.\nStartup Startup …","ref":"/docs/main/v9.7.0/en/setup/backend/ui-setup/","title":"UI"},{"body":"Uninstrumented Gateways/Proxies The uninstrumented gateways are not instrumented by the SkyWalking agent plugin when they start, but they can be configured in gateways.yml file or via Dynamic Configuration. The reason why they can\u0026rsquo;t register to backend automatically is that there are no suitable agent plugins. For example, there are no agent plugins for Nginx, HAProxy, etc. So to visualize the real topology, we provide a way to configure the gateways/proxies manually.\nConfiguration Format The configuration content includes gateway names and their instances:\ngateways:- name:proxy0# the name is not used for nowinstances:- host:127.0.0.1# the host/IP of this gateway instanceport:9099# the port of this gateway instance defaults to 80Note: The host of the instance must be the one that is actually used on the client-side. For example, if instance proxyA has 2 IPs, say 192.168.1.110 and 192.168.1.111, both of which delegate the target service, and the client connects to 192.168.1.110, then configuring 192.168.1.111 as the host won\u0026rsquo;t work properly.\n","excerpt":"Uninstrumented Gateways/Proxies The uninstrumented gateways are not instrumented by the SkyWalking …","ref":"/docs/main/latest/en/setup/backend/uninstrumented-gateways/","title":"Uninstrumented Gateways/Proxies"},{"body":"Uninstrumented Gateways/Proxies The uninstrumented gateways are not instrumented by the SkyWalking agent plugin when they start, but they can be configured in gateways.yml file or via Dynamic Configuration. The reason why they can\u0026rsquo;t register to backend automatically is that there are no suitable agent plugins. For example, there are no agent plugins for Nginx, HAProxy, etc. So to visualize the real topology, we provide a way to configure the gateways/proxies manually.\nConfiguration Format The configuration content includes gateway names and their instances:\ngateways:- name:proxy0# the name is not used for nowinstances:- host:127.0.0.1# the host/IP of this gateway instanceport:9099# the port of this gateway instance defaults to 80Note: The host of the instance must be the one that is actually used on the client-side. For example, if instance proxyA has 2 IPs, say 192.168.1.110 and 192.168.1.111, both of which delegate the target service, and the client connects to 192.168.1.110, then configuring 192.168.1.111 as the host won\u0026rsquo;t work properly.\n","excerpt":"Uninstrumented Gateways/Proxies The uninstrumented gateways are not instrumented by the SkyWalking …","ref":"/docs/main/next/en/setup/backend/uninstrumented-gateways/","title":"Uninstrumented Gateways/Proxies"},{"body":"Uninstrumented Gateways/Proxies The uninstrumented gateways are not instrumented by SkyWalking agent plugin when they are started, but they can be configured in gateways.yml file or via Dynamic Configuration. The reason why they can\u0026rsquo;t register to backend automatically is that there\u0026rsquo;re no suitable agent plugins. For example, there are no agent plugins for Nginx, haproxy, etc. So in order to visualize the real topology, we provide a way to configure the gateways/proxies manually.\nConfiguration Format The configuration content includes gateway names and their instances:\ngateways:- name:proxy0# the name is not used for nowinstances:- host:127.0.0.1# the host/ip of this gateway instanceport:9099# the port of this gateway instance, defaults to 80Note: The host of the instance must be the one that is actually used at client side. For example, if instance proxyA has 2 IPs, say 192.168.1.110 and 192.168.1.111, both of which delegate the target service, and the client connects to 192.168.1.110, then configuring 192.168.1.111 as the host won\u0026rsquo;t work properly.\n","excerpt":"Uninstrumented Gateways/Proxies The uninstrumented gateways are not instrumented by SkyWalking agent …","ref":"/docs/main/v9.0.0/en/setup/backend/uninstrumented-gateways/","title":"Uninstrumented Gateways/Proxies"},{"body":"Uninstrumented Gateways/Proxies The uninstrumented gateways are not instrumented by the SkyWalking agent plugin when they start, but they can be configured in gateways.yml file or via Dynamic Configuration. The reason why they can\u0026rsquo;t register to backend automatically is that there are no suitable agent plugins. For example, there are no agent plugins for Nginx, HAProxy, etc. So to visualize the real topology, we provide a way to configure the gateways/proxies manually.\nConfiguration Format The configuration content includes gateway names and their instances:\ngateways:- name:proxy0# the name is not used for nowinstances:- host:127.0.0.1# the host/IP of this gateway instanceport:9099# the port of this gateway instance defaults to 80Note: The host of the instance must be the one that is actually used on the client-side. For example, if instance proxyA has 2 IPs, say 192.168.1.110 and 192.168.1.111, both of which delegate the target service, and the client connects to 192.168.1.110, then configuring 192.168.1.111 as the host won\u0026rsquo;t work properly.\n","excerpt":"Uninstrumented Gateways/Proxies The uninstrumented gateways are not instrumented by the SkyWalking …","ref":"/docs/main/v9.1.0/en/setup/backend/uninstrumented-gateways/","title":"Uninstrumented Gateways/Proxies"},{"body":"Uninstrumented Gateways/Proxies The uninstrumented gateways are not instrumented by the SkyWalking agent plugin when they start, but they can be configured in gateways.yml file or via Dynamic Configuration. The reason why they can\u0026rsquo;t register to backend automatically is that there are no suitable agent plugins. For example, there are no agent plugins for Nginx, HAProxy, etc. So to visualize the real topology, we provide a way to configure the gateways/proxies manually.\nConfiguration Format The configuration content includes gateway names and their instances:\ngateways:- name:proxy0# the name is not used for nowinstances:- host:127.0.0.1# the host/IP of this gateway instanceport:9099# the port of this gateway instance defaults to 80Note: The host of the instance must be the one that is actually used on the client-side. For example, if instance proxyA has 2 IPs, say 192.168.1.110 and 192.168.1.111, both of which delegate the target service, and the client connects to 192.168.1.110, then configuring 192.168.1.111 as the host won\u0026rsquo;t work properly.\n","excerpt":"Uninstrumented Gateways/Proxies The uninstrumented gateways are not instrumented by the SkyWalking …","ref":"/docs/main/v9.2.0/en/setup/backend/uninstrumented-gateways/","title":"Uninstrumented Gateways/Proxies"},{"body":"Uninstrumented Gateways/Proxies The uninstrumented gateways are not instrumented by the SkyWalking agent plugin when they start, but they can be configured in gateways.yml file or via Dynamic Configuration. The reason why they can\u0026rsquo;t register to backend automatically is that there are no suitable agent plugins. For example, there are no agent plugins for Nginx, HAProxy, etc. So to visualize the real topology, we provide a way to configure the gateways/proxies manually.\nConfiguration Format The configuration content includes gateway names and their instances:\ngateways:- name:proxy0# the name is not used for nowinstances:- host:127.0.0.1# the host/IP of this gateway instanceport:9099# the port of this gateway instance defaults to 80Note: The host of the instance must be the one that is actually used on the client-side. For example, if instance proxyA has 2 IPs, say 192.168.1.110 and 192.168.1.111, both of which delegate the target service, and the client connects to 192.168.1.110, then configuring 192.168.1.111 as the host won\u0026rsquo;t work properly.\n","excerpt":"Uninstrumented Gateways/Proxies The uninstrumented gateways are not instrumented by the SkyWalking …","ref":"/docs/main/v9.3.0/en/setup/backend/uninstrumented-gateways/","title":"Uninstrumented Gateways/Proxies"},{"body":"Uninstrumented Gateways/Proxies The uninstrumented gateways are not instrumented by the SkyWalking agent plugin when they start, but they can be configured in gateways.yml file or via Dynamic Configuration. The reason why they can\u0026rsquo;t register to backend automatically is that there are no suitable agent plugins. For example, there are no agent plugins for Nginx, HAProxy, etc. So to visualize the real topology, we provide a way to configure the gateways/proxies manually.\nConfiguration Format The configuration content includes gateway names and their instances:\ngateways:- name:proxy0# the name is not used for nowinstances:- host:127.0.0.1# the host/IP of this gateway instanceport:9099# the port of this gateway instance defaults to 80Note: The host of the instance must be the one that is actually used on the client-side. For example, if instance proxyA has 2 IPs, say 192.168.1.110 and 192.168.1.111, both of which delegate the target service, and the client connects to 192.168.1.110, then configuring 192.168.1.111 as the host won\u0026rsquo;t work properly.\n","excerpt":"Uninstrumented Gateways/Proxies The uninstrumented gateways are not instrumented by the SkyWalking …","ref":"/docs/main/v9.4.0/en/setup/backend/uninstrumented-gateways/","title":"Uninstrumented Gateways/Proxies"},{"body":"Uninstrumented Gateways/Proxies The uninstrumented gateways are not instrumented by the SkyWalking agent plugin when they start, but they can be configured in gateways.yml file or via Dynamic Configuration. The reason why they can\u0026rsquo;t register to backend automatically is that there are no suitable agent plugins. For example, there are no agent plugins for Nginx, HAProxy, etc. So to visualize the real topology, we provide a way to configure the gateways/proxies manually.\nConfiguration Format The configuration content includes gateway names and their instances:\ngateways:- name:proxy0# the name is not used for nowinstances:- host:127.0.0.1# the host/IP of this gateway instanceport:9099# the port of this gateway instance defaults to 80Note: The host of the instance must be the one that is actually used on the client-side. For example, if instance proxyA has 2 IPs, say 192.168.1.110 and 192.168.1.111, both of which delegate the target service, and the client connects to 192.168.1.110, then configuring 192.168.1.111 as the host won\u0026rsquo;t work properly.\n","excerpt":"Uninstrumented Gateways/Proxies The uninstrumented gateways are not instrumented by the SkyWalking …","ref":"/docs/main/v9.5.0/en/setup/backend/uninstrumented-gateways/","title":"Uninstrumented Gateways/Proxies"},{"body":"Uninstrumented Gateways/Proxies The uninstrumented gateways are not instrumented by the SkyWalking agent plugin when they start, but they can be configured in gateways.yml file or via Dynamic Configuration. The reason why they can\u0026rsquo;t register to backend automatically is that there are no suitable agent plugins. For example, there are no agent plugins for Nginx, HAProxy, etc. So to visualize the real topology, we provide a way to configure the gateways/proxies manually.\nConfiguration Format The configuration content includes gateway names and their instances:\ngateways:- name:proxy0# the name is not used for nowinstances:- host:127.0.0.1# the host/IP of this gateway instanceport:9099# the port of this gateway instance defaults to 80Note: The host of the instance must be the one that is actually used on the client-side. For example, if instance proxyA has 2 IPs, say 192.168.1.110 and 192.168.1.111, both of which delegate the target service, and the client connects to 192.168.1.110, then configuring 192.168.1.111 as the host won\u0026rsquo;t work properly.\n","excerpt":"Uninstrumented Gateways/Proxies The uninstrumented gateways are not instrumented by the SkyWalking …","ref":"/docs/main/v9.6.0/en/setup/backend/uninstrumented-gateways/","title":"Uninstrumented Gateways/Proxies"},{"body":"Uninstrumented Gateways/Proxies The uninstrumented gateways are not instrumented by the SkyWalking agent plugin when they start, but they can be configured in gateways.yml file or via Dynamic Configuration. The reason why they can\u0026rsquo;t register to backend automatically is that there are no suitable agent plugins. For example, there are no agent plugins for Nginx, HAProxy, etc. So to visualize the real topology, we provide a way to configure the gateways/proxies manually.\nConfiguration Format The configuration content includes gateway names and their instances:\ngateways:- name:proxy0# the name is not used for nowinstances:- host:127.0.0.1# the host/IP of this gateway instanceport:9099# the port of this gateway instance defaults to 80Note: The host of the instance must be the one that is actually used on the client-side. For example, if instance proxyA has 2 IPs, say 192.168.1.110 and 192.168.1.111, both of which delegate the target service, and the client connects to 192.168.1.110, then configuring 192.168.1.111 as the host won\u0026rsquo;t work properly.\n","excerpt":"Uninstrumented Gateways/Proxies The uninstrumented gateways are not instrumented by the SkyWalking …","ref":"/docs/main/v9.7.0/en/setup/backend/uninstrumented-gateways/","title":"Uninstrumented Gateways/Proxies"},{"body":"Unit Test For Satellite, the specific plugin may have some common dependencies. So we provide a global test initializer to init the dependencies.\nimport ( _ \u0026quot;github.com/apache/skywalking-satellite/internal/satellite/test\u0026quot; ) ","excerpt":"Unit Test For Satellite, the specific plugin may have some common dependencies. So we provide a …","ref":"/docs/skywalking-satellite/latest/en/guides/test/how-to-unit-test/","title":"Unit Test"},{"body":"Unit Test For Satellite, the specific plugin may have some common dependencies. So we provide a global test initializer to init the dependencies.\nimport ( _ \u0026quot;github.com/apache/skywalking-satellite/internal/satellite/test\u0026quot; ) ","excerpt":"Unit Test For Satellite, the specific plugin may have some common dependencies. So we provide a …","ref":"/docs/skywalking-satellite/next/en/guides/test/how-to-unit-test/","title":"Unit Test"},{"body":"Unit Test For Satellite, the specific plugin may have some common dependencies. So we provide a global test initializer to init the dependencies.\nimport ( _ \u0026quot;github.com/apache/skywalking-satellite/internal/satellite/test\u0026quot; ) ","excerpt":"Unit Test For Satellite, the specific plugin may have some common dependencies. So we provide a …","ref":"/docs/skywalking-satellite/v1.2.0/en/guides/test/how-to-unit-test/","title":"Unit Test"},{"body":"Use annotation to mark the method you want to trace.  Add @Trace to any method you want to trace. After that, you can see the span in the Stack. Methods annotated with @Tag will try to tag the current active span with the given key (Tag#key()) and (Tag#value()), if there is no active span at all, this annotation takes no effect. @Tag can be repeated, and can be used in companion with @Trace, see examples below. The value of Tag is the same as what are supported in Customize Enhance Trace.  /** * The codes below will generate a span, * and two types of tags, one type tag: keys are `tag1` and `tag2`, values are the passed-in parameters, respectively, the other type tag: keys are `username` and `age`, values are the return value in User, respectively */ @Trace @Tag(key = \u0026#34;tag1\u0026#34;, value = \u0026#34;arg[0]\u0026#34;) @Tag(key = \u0026#34;tag2\u0026#34;, value = \u0026#34;arg[1]\u0026#34;) @Tag(key = \u0026#34;username\u0026#34;, value = \u0026#34;returnedObj.username\u0026#34;) @Tag(key = \u0026#34;age\u0026#34;, value = \u0026#34;returnedObj.age\u0026#34;) public User methodYouWantToTrace(String param1, String param2) { // ... } Sample codes only\n","excerpt":"Use annotation to mark the method you want to trace.  Add @Trace to any method you want to trace. …","ref":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/application-toolkit-trace-annotation/","title":"Use annotation to mark the method you want to trace."},{"body":"Use annotation to mark the method you want to trace.  Add @Trace to any method you want to trace. After that, you can see the span in the Stack. Methods annotated with @Tag will try to tag the current active span with the given key (Tag#key()) and (Tag#value()), if there is no active span at all, this annotation takes no effect. @Tag can be repeated, and can be used in companion with @Trace, see examples below. The value of Tag is the same as what are supported in Customize Enhance Trace.  /** * The codes below will generate a span, * and two types of tags, one type tag: keys are `tag1` and `tag2`, values are the passed-in parameters, respectively, the other type tag: keys are `username` and `age`, values are the return value in User, respectively */ @Trace @Tag(key = \u0026#34;tag1\u0026#34;, value = \u0026#34;arg[0]\u0026#34;) @Tag(key = \u0026#34;tag2\u0026#34;, value = \u0026#34;arg[1]\u0026#34;) @Tag(key = \u0026#34;username\u0026#34;, value = \u0026#34;returnedObj.username\u0026#34;) @Tag(key = \u0026#34;age\u0026#34;, value = \u0026#34;returnedObj.age\u0026#34;) public User methodYouWantToTrace(String param1, String param2) { // ... } Sample codes only\n","excerpt":"Use annotation to mark the method you want to trace.  Add @Trace to any method you want to trace. …","ref":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-trace-annotation/","title":"Use annotation to mark the method you want to trace."},{"body":"Use annotation to mark the method you want to trace.  Add @Trace to any method you want to trace. After that, you can see the span in the Stack. Methods annotated with @Tag will try to tag the current active span with the given key (Tag#key()) and (Tag#value()), if there is no active span at all, this annotation takes no effect. @Tag can be repeated, and can be used in companion with @Trace, see examples below. The value of Tag is the same as what are supported in Customize Enhance Trace.  /** * The codes below will generate a span, * and two types of tags, one type tag: keys are `tag1` and `tag2`, values are the passed-in parameters, respectively, the other type tag: keys are `username` and `age`, values are the return value in User, respectively */ @Trace @Tag(key = \u0026#34;tag1\u0026#34;, value = \u0026#34;arg[0]\u0026#34;) @Tag(key = \u0026#34;tag2\u0026#34;, value = \u0026#34;arg[1]\u0026#34;) @Tag(key = \u0026#34;username\u0026#34;, value = \u0026#34;returnedObj.username\u0026#34;) @Tag(key = \u0026#34;age\u0026#34;, value = \u0026#34;returnedObj.age\u0026#34;) public User methodYouWantToTrace(String param1, String param2) { // ... } Sample codes only\n","excerpt":"Use annotation to mark the method you want to trace.  Add @Trace to any method you want to trace. …","ref":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/application-toolkit-trace-annotation/","title":"Use annotation to mark the method you want to trace."},{"body":"Use annotation to mark the method you want to trace.  Add @Trace to any method you want to trace. After that, you can see the span in the Stack. Methods annotated with @Tag will try to tag the current active span with the given key (Tag#key()) and (Tag#value()), if there is no active span at all, this annotation takes no effect. @Tag can be repeated, and can be used in companion with @Trace, see examples below. The value of Tag is the same as what are supported in Customize Enhance Trace.  /** * The codes below will generate a span, * and two types of tags, one type tag: keys are `tag1` and `tag2`, values are the passed-in parameters, respectively, the other type tag: keys are `username` and `age`, values are the return value in User, respectively */ @Trace @Tag(key = \u0026#34;tag1\u0026#34;, value = \u0026#34;arg[0]\u0026#34;) @Tag(key = \u0026#34;tag2\u0026#34;, value = \u0026#34;arg[1]\u0026#34;) @Tag(key = \u0026#34;username\u0026#34;, value = \u0026#34;returnedObj.username\u0026#34;) @Tag(key = \u0026#34;age\u0026#34;, value = \u0026#34;returnedObj.age\u0026#34;) public User methodYouWantToTrace(String param1, String param2) { // ... } Sample codes only\n","excerpt":"Use annotation to mark the method you want to trace.  Add @Trace to any method you want to trace. …","ref":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/application-toolkit-trace-annotation/","title":"Use annotation to mark the method you want to trace."},{"body":"Use annotation to mark the method you want to trace.  Add @Trace to any method you want to trace. After that, you can see the span in the Stack. Methods annotated with @Tag will try to tag the current active span with the given key (Tag#key()) and (Tag#value()), if there is no active span at all, this annotation takes no effect. @Tag can be repeated, and can be used in companion with @Trace, see examples below. The value of Tag is the same as what are supported in Customize Enhance Trace.  /** * The codes below will generate a span, * and two types of tags, one type tag: keys are `tag1` and `tag2`, values are the passed-in parameters, respectively, the other type tag: keys are `username` and `age`, values are the return value in User, respectively */ @Trace @Tag(key = \u0026#34;tag1\u0026#34;, value = \u0026#34;arg[0]\u0026#34;) @Tag(key = \u0026#34;tag2\u0026#34;, value = \u0026#34;arg[1]\u0026#34;) @Tag(key = \u0026#34;username\u0026#34;, value = \u0026#34;returnedObj.username\u0026#34;) @Tag(key = \u0026#34;age\u0026#34;, value = \u0026#34;returnedObj.age\u0026#34;) public User methodYouWantToTrace(String param1, String param2) { // ... } Sample codes only\n","excerpt":"Use annotation to mark the method you want to trace.  Add @Trace to any method you want to trace. …","ref":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/application-toolkit-trace-annotation/","title":"Use annotation to mark the method you want to trace."},{"body":"Use Grafana As The UI SkyWalking provide PromQL Service since 9.4.0 and LogQL Service since 9.6.0. You can choose Grafana as the SkyWalking UI. About the installation and how to use please refer to the official document.\nNotice \u0026lt;1\u0026gt;, Gafana is AGPL-3.0 license, which is very different from Apache 2.0. Please follow AGPL 3.0 license requirements.\nNotice \u0026lt;2\u0026gt;, SkyWalking always uses its native UI as first class. All visualization features are only available on native UI. Grafana UI is an extension on our support of PromQL APIs. We don\u0026rsquo;t maintain or promise the complete Grafana UI dashboard setup.\nConfigure Data Source Prometheus Data Source In the data source config panel, chose the Prometheus and set the url to the OAP server address, the default port is 9090. SkyWalking Data Source Before you start, please install the SkyWalking data source plugin. In the data source config panel, chose the SkyWalking and set the url to the OAP server graphql service address, the default port is 12800. Loki Data Source In the data source config panel, chose the Loki and set the url to the OAP server address, the default port is 3100. Configure Metric Dashboards Dashboards Settings The following steps are the example of config a General Service dashboard:\n Create a dashboard named General Service. A layer is recommended as a dashboard. Configure variables for the dashboard: After configure, you can select the service/instance/endpoint on the top of the dashboard:   Add Panels The following contents show how to add several typical metrics panels. General settings:\n Chose the metrics and chart. Set Query options --\u0026gt; Min interval = 1m, because the metrics min time bucket in SkyWalking is 1m. Add PromQL expressions, use the variables configured above for the labels then you can select the labels value from top. Note: Some metrics values may be required calculations to match units. Select the returned labels you want to show on panel. Test query and save the panel.  Common Value Metrics  For example service_apdex and Time series chart. Add PromQL expression, the metric scope is Service, so add labels service and layer for match. Set Connect null values --\u0026gt; Always and Show points --\u0026gt; Always because when the query interval \u0026gt; 1hour or 1day SkyWalking return the hour/day step metrics values.   Labeled Value Metrics  For example service_percentile and Time series chart. Add PromQL expressions, the metric scope is Service, add labels service and layer for match. And it\u0026rsquo;s a labeled value metric, add labels='0,1,2,3,4' filter the result label, and addrelabels='P50,P75,P90,P95,P99' rename the result label. Set Connect null values --\u0026gt; Always and Show points --\u0026gt; Always because when the query interval \u0026gt; 1hour or 1day SkyWalking return the hour/day step metrics values.   Sort Metrics  For example service_instance_cpm and Bar gauge chart. Add PromQL expressions, add labels parent_service and layer for match, add top_n='10' and order='DES' filter the result. Set the Calculation --\u0026gt; Latest*.   Sampled Records Same as the Sort Metrics.\nConfigure Topology Dashboards Dashboards Settings For now, SkyWalking support General Service and Service Mesh topology dashboards, the layer is GENERAL and MESH respectively. The following configuration can reuse the above General Service dashboard and add a new variable Plugin_SkyWalking for the dashboard: Add Topology Panel  Chose the Node Graph chart. Set Layer and Service by the variables. If you want to show all services in this layer, set Service empty. Set Node Metrics and Edge Metrics which you want to show on the topology.   Configure Log Dashboard Dashboards Settings The following steps are the example of config a log dashboard:\n Create a dashboard named Log. Configure variables for the dashboard:  Please make sure service_instance and endpoint variable enabled Include All option and set Custom all value to * or blank (typed by space button on the keyboard):  Tags variable is a little different from others, for more details, please refer Ad hoc filters:  After configure, you can select log query variables on the top of the dashboard:   Add Log Panel The following steps show how to add a log panel.\n Choose Logs chart. Set the Line limit value (The max number of logs to return in a query) and Order value (Determines the sort order of logs). Add LogQL expressions, use the variables configured above for the labels and searching keyword. service_instance \u0026amp; endpoint variable ref should use raw variable-format-options to prevent it value be escaped. Test query and save the panel.   Preview on demo.skywalking.a.o SkyWalking community provides a preview site for services of General and Service Mesh layers from the demo environment. You could take a glance through Preview metrics on Grafana of the demo deployment.\nNotice, we don\u0026rsquo;t provide all setups due to our monitoring target expanding fast. This demo is for helping you understand the above documents only.\n","excerpt":"Use Grafana As The UI SkyWalking provide PromQL Service since 9.4.0 and LogQL Service since 9.6.0. …","ref":"/docs/main/latest/en/setup/backend/ui-grafana/","title":"Use Grafana As The UI"},{"body":"Use Grafana As The UI SkyWalking provide PromQL Service since 9.4.0 and LogQL Service since 9.6.0. You can choose Grafana as the SkyWalking UI. About the installation and how to use please refer to the official document.\nNotice \u0026lt;1\u0026gt;, Gafana is AGPL-3.0 license, which is very different from Apache 2.0. Please follow AGPL 3.0 license requirements.\nNotice \u0026lt;2\u0026gt;, SkyWalking always uses its native UI as first class. All visualization features are only available on native UI. Grafana UI is an extension on our support of PromQL APIs. We don\u0026rsquo;t maintain or promise the complete Grafana UI dashboard setup.\nConfigure Data Source Prometheus Data Source In the data source config panel, chose the Prometheus and set the url to the OAP server address, the default port is 9090. SkyWalking Data Source Before you start, please install the SkyWalking data source plugin. In the data source config panel, chose the SkyWalking and set the url to the OAP server graphql service address, the default port is 12800. Loki Data Source In the data source config panel, chose the Loki and set the url to the OAP server address, the default port is 3100. Configure Metric Dashboards Dashboards Settings The following steps are the example of config a General Service dashboard:\n Create a dashboard named General Service. A layer is recommended as a dashboard. Configure variables for the dashboard: After configure, you can select the service/instance/endpoint on the top of the dashboard:   Add Panels The following contents show how to add several typical metrics panels. General settings:\n Chose the metrics and chart. Set Query options --\u0026gt; Min interval = 1m, because the metrics min time bucket in SkyWalking is 1m. Add PromQL expressions, use the variables configured above for the labels then you can select the labels value from top. Note: Some metrics values may be required calculations to match units. Select the returned labels you want to show on panel. Test query and save the panel.  Common Value Metrics  For example service_apdex and Time series chart. Add PromQL expression, the metric scope is Service, so add labels service and layer for match. Set Connect null values --\u0026gt; Always and Show points --\u0026gt; Always because when the query interval \u0026gt; 1hour or 1day SkyWalking return the hour/day step metrics values.   Labeled Value Metrics  For example service_percentile and Time series chart. Add PromQL expressions, the metric scope is Service, add labels service and layer for match. And it\u0026rsquo;s a labeled value metric, add labels='0,1,2,3,4' filter the result label, and addrelabels='P50,P75,P90,P95,P99' rename the result label. Set Connect null values --\u0026gt; Always and Show points --\u0026gt; Always because when the query interval \u0026gt; 1hour or 1day SkyWalking return the hour/day step metrics values.   Sort Metrics  For example service_instance_cpm and Bar gauge chart. Add PromQL expressions, add labels parent_service and layer for match, add top_n='10' and order='DES' filter the result. Set the Calculation --\u0026gt; Latest*.   Sampled Records Same as the Sort Metrics.\nConfigure Topology Dashboards Dashboards Settings For now, SkyWalking support General Service and Service Mesh topology dashboards, the layer is GENERAL and MESH respectively. The following configuration can reuse the above General Service dashboard and add a new variable Plugin_SkyWalking for the dashboard: Add Topology Panel  Chose the Node Graph chart. Set Layer and Service by the variables. If you want to show all services in this layer, set Service empty. Set Node Metrics and Edge Metrics which you want to show on the topology.   Configure Log Dashboard Dashboards Settings The following steps are the example of config a log dashboard:\n Create a dashboard named Log. Configure variables for the dashboard:  Please make sure service_instance and endpoint variable enabled Include All option and set Custom all value to * or blank (typed by space button on the keyboard):  Tags variable is a little different from others, for more details, please refer Ad hoc filters:  After configure, you can select log query variables on the top of the dashboard:   Add Log Panel The following steps show how to add a log panel.\n Choose Logs chart. Set the Line limit value (The max number of logs to return in a query) and Order value (Determines the sort order of logs). Add LogQL expressions, use the variables configured above for the labels and searching keyword. service_instance \u0026amp; endpoint variable ref should use raw variable-format-options to prevent it value be escaped. Test query and save the panel.   Preview on demo.skywalking.a.o SkyWalking community provides a preview site for services of General and Service Mesh layers from the demo environment. You could take a glance through Preview metrics on Grafana of the demo deployment.\nNotice, we don\u0026rsquo;t provide all setups due to our monitoring target expanding fast. This demo is for helping you understand the above documents only.\n","excerpt":"Use Grafana As The UI SkyWalking provide PromQL Service since 9.4.0 and LogQL Service since 9.6.0. …","ref":"/docs/main/next/en/setup/backend/ui-grafana/","title":"Use Grafana As The UI"},{"body":"Use Grafana As The UI Since 9.4.0, SkyWalking provide PromQL Service. You can choose Grafana as the SkyWalking UI. About the installation and how to use please refer to the official document.\nNotice \u0026lt;1\u0026gt;, Gafana is AGPL-3.0 license, which is very different from Apache 2.0. Please follow AGPL 3.0 license requirements.\nNotice \u0026lt;2\u0026gt;, SkyWalking always uses its native UI as first class. All visualization features are only available on native UI. Grafana UI is an extension on our support of PromQL APIs. We don\u0026rsquo;t maintain or promise the complete Grafana UI dashboard setup.\nConfigure Data Source In the data source config panel, chose the Prometheus and set the url to the OAP server address, the default port is 9090. Configure Dashboards Dashboards Settings The following steps are the example of config a General Service dashboard:\n Create a dashboard named General Service. A layer is recommended as a dashboard. Configure variables for the dashboard: After configure, you can select the service/instance/endpoint on the top of the dashboard:   Add Panels The following contents show how to add several typical metrics panels. General settings:\n Chose the metrics and chart. Set Query options --\u0026gt; Min interval = 1m, because the metrics min time bucket in SkyWalking is 1m. Add PromQL expressions, use the variables configured above for the labels then you can select the labels value from top. Note: Some metrics values may be required calculations to match units. Select the returned labels you want to show on panel. Test query and save the panel.  Common Value Metrics  For example service_apdex and Time series chart. Add PromQL expression, the metric scope is Service, so add labels service and layer for match. Set Connect null values --\u0026gt; Always and Show points --\u0026gt; Always because when the query interval \u0026gt; 1hour or 1day SkyWalking return the hour/day step metrics values.   Labeled Value Metrics  For example service_percentile and Time series chart. Add PromQL expressions, the metric scope is Service, add labels service and layer for match. And it\u0026rsquo;s a labeled value metric, add labels='0,1,2,3,4' filter the result label, and addrelabels='P50,P75,P90,P95,P99' rename the result label. Set Connect null values --\u0026gt; Always and Show points --\u0026gt; Always because when the query interval \u0026gt; 1hour or 1day SkyWalking return the hour/day step metrics values.   Sort Metrics  For example service_instance_cpm and Bar gauge chart. Add PromQL expressions, add labels parent_service and layer for match, add top_n='10' and order='DES' filter the result. Set the Calculation --\u0026gt; Latest*.   Sampled Records Same as the Sort Metrics.\nPreview on demo.skywalking.a.o SkyWalking community provides a preview site for services of General and Service Mesh layers from the demo environment. You could take a glance through Preview metrics on Grafana of the demo deployment.\nNotice, we don\u0026rsquo;t provide all setups due to our monitoring target expanding fast. This demo is for helping you understand the above documents only.\n","excerpt":"Use Grafana As The UI Since 9.4.0, SkyWalking provide PromQL Service. You can choose Grafana as the …","ref":"/docs/main/v9.4.0/en/setup/backend/ui-grafana/","title":"Use Grafana As The UI"},{"body":"Use Grafana As The UI Since 9.4.0, SkyWalking provide PromQL Service. You can choose Grafana as the SkyWalking UI. About the installation and how to use please refer to the official document.\nNotice \u0026lt;1\u0026gt;, Gafana is AGPL-3.0 license, which is very different from Apache 2.0. Please follow AGPL 3.0 license requirements.\nNotice \u0026lt;2\u0026gt;, SkyWalking always uses its native UI as first class. All visualization features are only available on native UI. Grafana UI is an extension on our support of PromQL APIs. We don\u0026rsquo;t maintain or promise the complete Grafana UI dashboard setup.\nConfigure Data Source In the data source config panel, chose the Prometheus and set the url to the OAP server address, the default port is 9090. Configure Dashboards Dashboards Settings The following steps are the example of config a General Service dashboard:\n Create a dashboard named General Service. A layer is recommended as a dashboard. Configure variables for the dashboard: After configure, you can select the service/instance/endpoint on the top of the dashboard:   Add Panels The following contents show how to add several typical metrics panels. General settings:\n Chose the metrics and chart. Set Query options --\u0026gt; Min interval = 1m, because the metrics min time bucket in SkyWalking is 1m. Add PromQL expressions, use the variables configured above for the labels then you can select the labels value from top. Note: Some metrics values may be required calculations to match units. Select the returned labels you want to show on panel. Test query and save the panel.  Common Value Metrics  For example service_apdex and Time series chart. Add PromQL expression, the metric scope is Service, so add labels service and layer for match. Set Connect null values --\u0026gt; Always and Show points --\u0026gt; Always because when the query interval \u0026gt; 1hour or 1day SkyWalking return the hour/day step metrics values.   Labeled Value Metrics  For example service_percentile and Time series chart. Add PromQL expressions, the metric scope is Service, add labels service and layer for match. And it\u0026rsquo;s a labeled value metric, add labels='0,1,2,3,4' filter the result label, and addrelabels='P50,P75,P90,P95,P99' rename the result label. Set Connect null values --\u0026gt; Always and Show points --\u0026gt; Always because when the query interval \u0026gt; 1hour or 1day SkyWalking return the hour/day step metrics values.   Sort Metrics  For example service_instance_cpm and Bar gauge chart. Add PromQL expressions, add labels parent_service and layer for match, add top_n='10' and order='DES' filter the result. Set the Calculation --\u0026gt; Latest*.   Sampled Records Same as the Sort Metrics.\nPreview on demo.skywalking.a.o SkyWalking community provides a preview site for services of General and Service Mesh layers from the demo environment. You could take a glance through Preview metrics on Grafana of the demo deployment.\nNotice, we don\u0026rsquo;t provide all setups due to our monitoring target expanding fast. This demo is for helping you understand the above documents only.\n","excerpt":"Use Grafana As The UI Since 9.4.0, SkyWalking provide PromQL Service. You can choose Grafana as the …","ref":"/docs/main/v9.5.0/en/setup/backend/ui-grafana/","title":"Use Grafana As The UI"},{"body":"Use Grafana As The UI SkyWalking provide PromQL Service since 9.4.0 and LogQL Service since 9.6.0. You can choose Grafana as the SkyWalking UI. About the installation and how to use please refer to the official document.\nNotice \u0026lt;1\u0026gt;, Gafana is AGPL-3.0 license, which is very different from Apache 2.0. Please follow AGPL 3.0 license requirements.\nNotice \u0026lt;2\u0026gt;, SkyWalking always uses its native UI as first class. All visualization features are only available on native UI. Grafana UI is an extension on our support of PromQL APIs. We don\u0026rsquo;t maintain or promise the complete Grafana UI dashboard setup.\nConfigure Data Source Prometheus Data Source In the data source config panel, chose the Prometheus and set the url to the OAP server address, the default port is 9090. Loki Data Source In the data source config panel, chose the Loki and set the url to the OAP server address, the default port is 3100. Configure Metric Dashboards Dashboards Settings The following steps are the example of config a General Service dashboard:\n Create a dashboard named General Service. A layer is recommended as a dashboard. Configure variables for the dashboard: After configure, you can select the service/instance/endpoint on the top of the dashboard:   Add Panels The following contents show how to add several typical metrics panels. General settings:\n Chose the metrics and chart. Set Query options --\u0026gt; Min interval = 1m, because the metrics min time bucket in SkyWalking is 1m. Add PromQL expressions, use the variables configured above for the labels then you can select the labels value from top. Note: Some metrics values may be required calculations to match units. Select the returned labels you want to show on panel. Test query and save the panel.  Common Value Metrics  For example service_apdex and Time series chart. Add PromQL expression, the metric scope is Service, so add labels service and layer for match. Set Connect null values --\u0026gt; Always and Show points --\u0026gt; Always because when the query interval \u0026gt; 1hour or 1day SkyWalking return the hour/day step metrics values.   Labeled Value Metrics  For example service_percentile and Time series chart. Add PromQL expressions, the metric scope is Service, add labels service and layer for match. And it\u0026rsquo;s a labeled value metric, add labels='0,1,2,3,4' filter the result label, and addrelabels='P50,P75,P90,P95,P99' rename the result label. Set Connect null values --\u0026gt; Always and Show points --\u0026gt; Always because when the query interval \u0026gt; 1hour or 1day SkyWalking return the hour/day step metrics values.   Sort Metrics  For example service_instance_cpm and Bar gauge chart. Add PromQL expressions, add labels parent_service and layer for match, add top_n='10' and order='DES' filter the result. Set the Calculation --\u0026gt; Latest*.   Sampled Records Same as the Sort Metrics.\nConfigure Log Dashboard Dashboards Settings The following steps are the example of config a log dashboard:\n Create a dashboard named Log. Configure variables for the dashboard:  Please make sure service_instance and endpoint variable enabled Include All option and set Custom all value to * or blank (typed by space button on the keyboard):  Tags variable is a little different from others, for more details, please refer Ad hoc filters:  After configure, you can select log query variables on the top of the dashboard:   Add Log Panel The following steps show how to add a log panel.\n Choose Logs chart. Set the Line limit value (The max number of logs to return in a query) and Order value (Determines the sort order of logs). Add LogQL expressions, use the variables configured above for the labels and searching keyword. service_instance \u0026amp; endpoint variable ref should use raw variable-format-options to prevent it value be escaped. Test query and save the panel.   Preview on demo.skywalking.a.o SkyWalking community provides a preview site for services of General and Service Mesh layers from the demo environment. You could take a glance through Preview metrics on Grafana of the demo deployment.\nNotice, we don\u0026rsquo;t provide all setups due to our monitoring target expanding fast. This demo is for helping you understand the above documents only.\n","excerpt":"Use Grafana As The UI SkyWalking provide PromQL Service since 9.4.0 and LogQL Service since 9.6.0. …","ref":"/docs/main/v9.6.0/en/setup/backend/ui-grafana/","title":"Use Grafana As The UI"},{"body":"Use Grafana As The UI SkyWalking provide PromQL Service since 9.4.0 and LogQL Service since 9.6.0. You can choose Grafana as the SkyWalking UI. About the installation and how to use please refer to the official document.\nNotice \u0026lt;1\u0026gt;, Gafana is AGPL-3.0 license, which is very different from Apache 2.0. Please follow AGPL 3.0 license requirements.\nNotice \u0026lt;2\u0026gt;, SkyWalking always uses its native UI as first class. All visualization features are only available on native UI. Grafana UI is an extension on our support of PromQL APIs. We don\u0026rsquo;t maintain or promise the complete Grafana UI dashboard setup.\nConfigure Data Source Prometheus Data Source In the data source config panel, chose the Prometheus and set the url to the OAP server address, the default port is 9090. SkyWalking Data Source Before you start, please install the SkyWalking data source plugin. In the data source config panel, chose the SkyWalking and set the url to the OAP server graphql service address, the default port is 12800. Loki Data Source In the data source config panel, chose the Loki and set the url to the OAP server address, the default port is 3100. Configure Metric Dashboards Dashboards Settings The following steps are the example of config a General Service dashboard:\n Create a dashboard named General Service. A layer is recommended as a dashboard. Configure variables for the dashboard: After configure, you can select the service/instance/endpoint on the top of the dashboard:   Add Panels The following contents show how to add several typical metrics panels. General settings:\n Chose the metrics and chart. Set Query options --\u0026gt; Min interval = 1m, because the metrics min time bucket in SkyWalking is 1m. Add PromQL expressions, use the variables configured above for the labels then you can select the labels value from top. Note: Some metrics values may be required calculations to match units. Select the returned labels you want to show on panel. Test query and save the panel.  Common Value Metrics  For example service_apdex and Time series chart. Add PromQL expression, the metric scope is Service, so add labels service and layer for match. Set Connect null values --\u0026gt; Always and Show points --\u0026gt; Always because when the query interval \u0026gt; 1hour or 1day SkyWalking return the hour/day step metrics values.   Labeled Value Metrics  For example service_percentile and Time series chart. Add PromQL expressions, the metric scope is Service, add labels service and layer for match. And it\u0026rsquo;s a labeled value metric, add labels='0,1,2,3,4' filter the result label, and addrelabels='P50,P75,P90,P95,P99' rename the result label. Set Connect null values --\u0026gt; Always and Show points --\u0026gt; Always because when the query interval \u0026gt; 1hour or 1day SkyWalking return the hour/day step metrics values.   Sort Metrics  For example service_instance_cpm and Bar gauge chart. Add PromQL expressions, add labels parent_service and layer for match, add top_n='10' and order='DES' filter the result. Set the Calculation --\u0026gt; Latest*.   Sampled Records Same as the Sort Metrics.\nConfigure Topology Dashboards Dashboards Settings For now, SkyWalking support General Service and Service Mesh topology dashboards, the layer is GENERAL and MESH respectively. The following configuration can reuse the above General Service dashboard and add a new variable Plugin_SkyWalking for the dashboard: Add Topology Panel  Chose the Node Graph chart. Set Layer and Service by the variables. If you want to show all services in this layer, set Service empty. Set Node Metrics and Edge Metrics which you want to show on the topology.   Configure Log Dashboard Dashboards Settings The following steps are the example of config a log dashboard:\n Create a dashboard named Log. Configure variables for the dashboard:  Please make sure service_instance and endpoint variable enabled Include All option and set Custom all value to * or blank (typed by space button on the keyboard):  Tags variable is a little different from others, for more details, please refer Ad hoc filters:  After configure, you can select log query variables on the top of the dashboard:   Add Log Panel The following steps show how to add a log panel.\n Choose Logs chart. Set the Line limit value (The max number of logs to return in a query) and Order value (Determines the sort order of logs). Add LogQL expressions, use the variables configured above for the labels and searching keyword. service_instance \u0026amp; endpoint variable ref should use raw variable-format-options to prevent it value be escaped. Test query and save the panel.   Preview on demo.skywalking.a.o SkyWalking community provides a preview site for services of General and Service Mesh layers from the demo environment. You could take a glance through Preview metrics on Grafana of the demo deployment.\nNotice, we don\u0026rsquo;t provide all setups due to our monitoring target expanding fast. This demo is for helping you understand the above documents only.\n","excerpt":"Use Grafana As The UI SkyWalking provide PromQL Service since 9.4.0 and LogQL Service since 9.6.0. …","ref":"/docs/main/v9.7.0/en/setup/backend/ui-grafana/","title":"Use Grafana As The UI"},{"body":"Use Profiling to Fix the Blind Spot of Distributed Tracing  This post introduces a way to automatically profile code in production with Apache SkyWalking. We believe the profile method helps reduce maintenance and overhead while increasing the precision in root cause analysis.\n This post introduces a way to automatically profile code in production with Apache SkyWalking. We believe the profile method helps reduce maintenance and overhead while increasing the precision in root cause analysis.\nLimitations of the Distributed Tracing In the early days, metrics and logging systems were the key solutions in monitoring platforms. With the adoption of microservice and distributed system-based architecture, distributed tracing has become more important. Distributed tracing provides relevant service context, such as system topology map and RPC parent-child relationships.\nSome claim that distributed tracing is the best way to discover the cause of performance issues in a distributed system. It’s good at finding issues at the RPC abstraction, or in the scope of components instrumented with spans. However, it isn’t that perfect.\nHave you been surprised to find a span duration longer than expected, but no insight into why? What should you do next? Some may think that the next step is to add more instrumentation, more spans into the trace, thinking that you would eventually find the root cause, with more data points. We’ll argue this is not a good option within a production environment. Here’s why:\n There is a risk of application overhead and system overload. Ad-hoc spans measure the performance of specific scopes or methods, but picking the right place can be difficult. To identify the precise cause, you can “instrument” (add spans to) many suspicious places. The additional instrumentation costs more CPU and memory in the production environment. Next, ad-hoc instrumentation that didn’t help is often forgotten, not deleted. This creates a valueless overhead load. In the worst case, excess instrumentation can cause performance problems in the production app or overload the tracing system. The process of ad-hoc (manual) instrumentation usually implies at least a restart. Trace instrumentation libraries, like Zipkin Brave, are integrated into many framework libraries. To instrument a method’s performance typically implies changing code, even if only an annotation. This implies a re-deploy. Even if you have the way to do auto instrumentation, like Apache SkyWalking, you still need to change the configuration and reboot the app. Otherwise, you take the risk of GC caused by hot dynamic instrumentation. Injecting instrumentation into an uninstrumented third party library is hard and complex. It takes more time and many won’t know how to do this. Usually, we don’t have code line numbers in the distributed tracing. Particularly when lambdas are in use, it can be difficult to identify the line of code associated with a span. Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.  Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.\nProfiling in Production Introduction To reuse distributed tracing to achieve method scope precision requires an understanding of the above limitations and a different approach. We called it PROFILE.\nMost high-level languages build and run on a thread concept. The profile approach takes continuous thread dumps. We merge the thread dumps to estimate the execution time of every method shown in the thread dumps. The key for distributed tracing is the tracing context, identifiers active (or current) for the profiled method. Using this trace context, we can weave data harvested from profiling into existing traces. This allows the system to automate otherwise ad-hoc instrumentation. Let’s dig deeper into how profiling works:\nWe consider a method invocation with the same stack depth and signature (method, line number etc), the same operation. We derive span timestamps from the thread dumps the same operation is in. Let’s put this visually:\nAbove, represents 10 successive thread dumps. If this method is in dumps 4-8, we assume it started before dump 4 and finished after dump 8. We can’t tell exactly when the method started and stopped. but the timestamps of thread dumps are close enough.\nTo reduce overhead caused by thread dumps, we only profile methods enclosed by a specific entry point, such as a URI or MVC Controller method. We identify these entry points through the trace context and the APM system.\nThe profile does thread dump analysis and gives us:\n The root cause, precise to the line number in the code. Reduced maintenance as ad-hoc instrumentation is obviated. Reduced overload risk caused by ad-hoc instrumentation. Dynamic activation: only when necessary and with a very clear profile target.  Implementing Precise Profiling Distributed profiling is built-into Apache SkyWalking application performance monitoring (APM). Let’s demonstrate how the profiling approach locates the root cause of the performance issue.\nfinal CountDownLatchcountDownLatch= new CountDownLatch(2); threadPool.submit(new Task1(countDownLatch)); threadPool.submit(new Task2(countDownLatch)); try { countDownLatch.await(500, TimeUnit.MILLISECONDS); } catch (InterruptedException) { } Task1 and Task2 have a race condition and unstable execution time: they will impact the performance of each other and anything calling them. While this code looks suspicious, it is representative of real life. People in the OPS/SRE team are not usually aware of all code changes and who did them. They only know something in the new code is causing a problem.\nTo make matters interesting, the above code is not always slow: it only happens when the condition is locked. In SkyWalking APM, we have metrics of endpoint p99/p95 latency, so, we are easy to find out the p99 of this endpoint is far from the avg response time. However, this is not the same as understanding the cause of the latency. To locate the root cause, add a profile condition to this endpoint: duration greater than 500ms. This means faster executions will not add profiling load.\nThis is a typical profiled trace segment (part of the whole distributed trace) shown on the SkyWalking UI. We now notice the “service/processWithThreadPool” span is slow as we expected, but why? This method is the one we added the faulty code to. As the UI shows that method, we know the profiler is working. Now, let’s see what the profile analysis result say.\nThis is the profile analysis stack view. We see the stack element names, duration (include/exclude the children) and slowest methods have been highlighted. It shows clearly, “sun.misc.Unsafe.park” costs the most time. If we look for the caller, it is the code we added: CountDownLatch.await.\nThe Limitations of the Profile Method No diagnostic tool can fit all cases, not even the profile method.\nThe first consideration is mistaking a repeatedly called method for a slow method. Thread dumps are periodic. If there is a loop of calling one method, the profile analysis result would say the target method is slow because it is captured every time in the dump process. There could be another reason. A method called many times can also end up captured in each thread dump. Even so, the profile did what it is designed for. It still helps the OPS/SRE team to locate the code having the issue.\nThe second consideration is overhead, the impact of repeated thread dumps is real and can’t be ignored. In SkyWalking, we set the profile dump period to at least 10ms. This means we can’t locate method performance issues if they complete in less than 10ms. SkyWalking has a threshold to control the maximum parallel degree as well.\nThe third consideration is profiling wouldn\u0026rsquo;t work for a low latency trace. Because the trace could be completed before profiling starts. But in reality, this is not an issue, profiling targets slow requests.\nUnderstanding the above keeps distributed tracing and APM systems useful for your OPS/SRE team.\nSupported Agents This feature was first implemented in Java agent since 7.0. The Python agent supported this since 0.7.0. Read this for more details\n","excerpt":"Use Profiling to Fix the Blind Spot of Distributed Tracing  This post introduces a way to …","ref":"/docs/main/latest/en/concepts-and-designs/sdk-profiling/","title":"Use Profiling to Fix the Blind Spot of Distributed Tracing"},{"body":"Use Profiling to Fix the Blind Spot of Distributed Tracing  This post introduces a way to automatically profile code in production with Apache SkyWalking. We believe the profile method helps reduce maintenance and overhead while increasing the precision in root cause analysis.\n This post introduces a way to automatically profile code in production with Apache SkyWalking. We believe the profile method helps reduce maintenance and overhead while increasing the precision in root cause analysis.\nLimitations of the Distributed Tracing In the early days, metrics and logging systems were the key solutions in monitoring platforms. With the adoption of microservice and distributed system-based architecture, distributed tracing has become more important. Distributed tracing provides relevant service context, such as system topology map and RPC parent-child relationships.\nSome claim that distributed tracing is the best way to discover the cause of performance issues in a distributed system. It’s good at finding issues at the RPC abstraction, or in the scope of components instrumented with spans. However, it isn’t that perfect.\nHave you been surprised to find a span duration longer than expected, but no insight into why? What should you do next? Some may think that the next step is to add more instrumentation, more spans into the trace, thinking that you would eventually find the root cause, with more data points. We’ll argue this is not a good option within a production environment. Here’s why:\n There is a risk of application overhead and system overload. Ad-hoc spans measure the performance of specific scopes or methods, but picking the right place can be difficult. To identify the precise cause, you can “instrument” (add spans to) many suspicious places. The additional instrumentation costs more CPU and memory in the production environment. Next, ad-hoc instrumentation that didn’t help is often forgotten, not deleted. This creates a valueless overhead load. In the worst case, excess instrumentation can cause performance problems in the production app or overload the tracing system. The process of ad-hoc (manual) instrumentation usually implies at least a restart. Trace instrumentation libraries, like Zipkin Brave, are integrated into many framework libraries. To instrument a method’s performance typically implies changing code, even if only an annotation. This implies a re-deploy. Even if you have the way to do auto instrumentation, like Apache SkyWalking, you still need to change the configuration and reboot the app. Otherwise, you take the risk of GC caused by hot dynamic instrumentation. Injecting instrumentation into an uninstrumented third party library is hard and complex. It takes more time and many won’t know how to do this. Usually, we don’t have code line numbers in the distributed tracing. Particularly when lambdas are in use, it can be difficult to identify the line of code associated with a span. Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.  Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.\nProfiling in Production Introduction To reuse distributed tracing to achieve method scope precision requires an understanding of the above limitations and a different approach. We called it PROFILE.\nMost high-level languages build and run on a thread concept. The profile approach takes continuous thread dumps. We merge the thread dumps to estimate the execution time of every method shown in the thread dumps. The key for distributed tracing is the tracing context, identifiers active (or current) for the profiled method. Using this trace context, we can weave data harvested from profiling into existing traces. This allows the system to automate otherwise ad-hoc instrumentation. Let’s dig deeper into how profiling works:\nWe consider a method invocation with the same stack depth and signature (method, line number etc), the same operation. We derive span timestamps from the thread dumps the same operation is in. Let’s put this visually:\nAbove, represents 10 successive thread dumps. If this method is in dumps 4-8, we assume it started before dump 4 and finished after dump 8. We can’t tell exactly when the method started and stopped. but the timestamps of thread dumps are close enough.\nTo reduce overhead caused by thread dumps, we only profile methods enclosed by a specific entry point, such as a URI or MVC Controller method. We identify these entry points through the trace context and the APM system.\nThe profile does thread dump analysis and gives us:\n The root cause, precise to the line number in the code. Reduced maintenance as ad-hoc instrumentation is obviated. Reduced overload risk caused by ad-hoc instrumentation. Dynamic activation: only when necessary and with a very clear profile target.  Implementing Precise Profiling Distributed profiling is built-into Apache SkyWalking application performance monitoring (APM). Let’s demonstrate how the profiling approach locates the root cause of the performance issue.\nfinal CountDownLatchcountDownLatch= new CountDownLatch(2); threadPool.submit(new Task1(countDownLatch)); threadPool.submit(new Task2(countDownLatch)); try { countDownLatch.await(500, TimeUnit.MILLISECONDS); } catch (InterruptedException) { } Task1 and Task2 have a race condition and unstable execution time: they will impact the performance of each other and anything calling them. While this code looks suspicious, it is representative of real life. People in the OPS/SRE team are not usually aware of all code changes and who did them. They only know something in the new code is causing a problem.\nTo make matters interesting, the above code is not always slow: it only happens when the condition is locked. In SkyWalking APM, we have metrics of endpoint p99/p95 latency, so, we are easy to find out the p99 of this endpoint is far from the avg response time. However, this is not the same as understanding the cause of the latency. To locate the root cause, add a profile condition to this endpoint: duration greater than 500ms. This means faster executions will not add profiling load.\nThis is a typical profiled trace segment (part of the whole distributed trace) shown on the SkyWalking UI. We now notice the “service/processWithThreadPool” span is slow as we expected, but why? This method is the one we added the faulty code to. As the UI shows that method, we know the profiler is working. Now, let’s see what the profile analysis result say.\nThis is the profile analysis stack view. We see the stack element names, duration (include/exclude the children) and slowest methods have been highlighted. It shows clearly, “sun.misc.Unsafe.park” costs the most time. If we look for the caller, it is the code we added: CountDownLatch.await.\nThe Limitations of the Profile Method No diagnostic tool can fit all cases, not even the profile method.\nThe first consideration is mistaking a repeatedly called method for a slow method. Thread dumps are periodic. If there is a loop of calling one method, the profile analysis result would say the target method is slow because it is captured every time in the dump process. There could be another reason. A method called many times can also end up captured in each thread dump. Even so, the profile did what it is designed for. It still helps the OPS/SRE team to locate the code having the issue.\nThe second consideration is overhead, the impact of repeated thread dumps is real and can’t be ignored. In SkyWalking, we set the profile dump period to at least 10ms. This means we can’t locate method performance issues if they complete in less than 10ms. SkyWalking has a threshold to control the maximum parallel degree as well.\nThe third consideration is profiling wouldn\u0026rsquo;t work for a low latency trace. Because the trace could be completed before profiling starts. But in reality, this is not an issue, profiling targets slow requests.\nUnderstanding the above keeps distributed tracing and APM systems useful for your OPS/SRE team.\nSupported Agents This feature was first implemented in Java agent since 7.0. The Python agent supported this since 0.7.0. Read this for more details\n","excerpt":"Use Profiling to Fix the Blind Spot of Distributed Tracing  This post introduces a way to …","ref":"/docs/main/next/en/concepts-and-designs/sdk-profiling/","title":"Use Profiling to Fix the Blind Spot of Distributed Tracing"},{"body":"Use Profiling to Fix the Blind Spot of Distributed Tracing  This post introduces a way to automatically profile code in production with Apache SkyWalking. We believe the profile method helps reduce maintenance and overhead while increasing the precision in root cause analysis.\n This post introduces a way to automatically profile code in production with Apache SkyWalking. We believe the profile method helps reduce maintenance and overhead while increasing the precision in root cause analysis.\nLimitations of the Distributed Tracing In the early days, metrics and logging systems were the key solutions in monitoring platforms. With the adoption of microservice and distributed system-based architecture, distributed tracing has become more important. Distributed tracing provides relevant service context, such as system topology map and RPC parent-child relationships.\nSome claim that distributed tracing is the best way to discover the cause of performance issues in a distributed system. It’s good at finding issues at the RPC abstraction, or in the scope of components instrumented with spans. However, it isn’t that perfect.\nHave you been surprised to find a span duration longer than expected, but no insight into why? What should you do next? Some may think that the next step is to add more instrumentation, more spans into the trace, thinking that you would eventually find the root cause, with more data points. We’ll argue this is not a good option within a production environment. Here’s why:\n There is a risk of application overhead and system overload. Ad-hoc spans measure the performance of specific scopes or methods, but picking the right place can be difficult. To identify the precise cause, you can “instrument” (add spans to) many suspicious places. The additional instrumentation costs more CPU and memory in the production environment. Next, ad-hoc instrumentation that didn’t help is often forgotten, not deleted. This creates a valueless overhead load. In the worst case, excess instrumentation can cause performance problems in the production app or overload the tracing system. The process of ad-hoc (manual) instrumentation usually implies at least a restart. Trace instrumentation libraries, like Zipkin Brave, are integrated into many framework libraries. To instrument a method’s performance typically implies changing code, even if only an annotation. This implies a re-deploy. Even if you have the way to do auto instrumentation, like Apache SkyWalking, you still need to change the configuration and reboot the app. Otherwise, you take the risk of GC caused by hot dynamic instrumentation. Injecting instrumentation into an uninstrumented third party library is hard and complex. It takes more time and many won’t know how to do this. Usually, we don’t have code line numbers in the distributed tracing. Particularly when lambdas are in use, it can be difficult to identify the line of code associated with a span. Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.  Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.\nProfiling in Production Introduction To reuse distributed tracing to achieve method scope precision requires an understanding of the above limitations and a different approach. We called it PROFILE.\nMost high-level languages build and run on a thread concept. The profile approach takes continuous thread dumps. We merge the thread dumps to estimate the execution time of every method shown in the thread dumps. The key for distributed tracing is the tracing context, identifiers active (or current) for the profiled method. Using this trace context, we can weave data harvested from profiling into existing traces. This allows the system to automate otherwise ad-hoc instrumentation. Let’s dig deeper into how profiling works:\nWe consider a method invocation with the same stack depth and signature (method, line number etc), the same operation. We derive span timestamps from the thread dumps the same operation is in. Let’s put this visually:\nAbove, represents 10 successive thread dumps. If this method is in dumps 4-8, we assume it started before dump 4 and finished after dump 8. We can’t tell exactly when the method started and stopped. but the timestamps of thread dumps are close enough.\nTo reduce overhead caused by thread dumps, we only profile methods enclosed by a specific entry point, such as a URI or MVC Controller method. We identify these entry points through the trace context and the APM system.\nThe profile does thread dump analysis and gives us:\n The root cause, precise to the line number in the code. Reduced maintenance as ad-hoc instrumentation is obviated. Reduced overload risk caused by ad-hoc instrumentation. Dynamic activation: only when necessary and with a very clear profile target.  Implementing Precise Profiling Distributed profiling is built-into Apache SkyWalking application performance monitoring (APM). Let’s demonstrate how the profiling approach locates the root cause of the performance issue.\nfinal CountDownLatchcountDownLatch= new CountDownLatch(2); threadPool.submit(new Task1(countDownLatch)); threadPool.submit(new Task2(countDownLatch)); try { countDownLatch.await(500, TimeUnit.MILLISECONDS); } catch (InterruptedExceptione) { } Task1 and Task2 have a race condition and unstable execution time: they will impact the performance of each other and anything calling them. While this code looks suspicious, it is representative of real life. People in the OPS/SRE team are not usually aware of all code changes and who did them. They only know something in the new code is causing a problem.\nTo make matters interesting, the above code is not always slow: it only happens when the condition is locked. In SkyWalking APM, we have metrics of endpoint p99/p95 latency, so, we are easy to find out the p99 of this endpoint is far from the avg response time. However, this is not the same as understanding the cause of the latency. To locate the root cause, add a profile condition to this endpoint: duration greater than 500ms. This means faster executions will not add profiling load.\nThis is a typical profiled trace segment (part of the whole distributed trace) shown on the SkyWalking UI. We now notice the “service/processWithThreadPool” span is slow as we expected, but why? This method is the one we added the faulty code to. As the UI shows that method, we know the profiler is working. Now, let’s see what the profile analysis result say.\nThis is the profile analysis stack view. We see the stack element names, duration (include/exclude the children) and slowest methods have been highlighted. It shows clearly, “sun.misc.Unsafe.park” costs the most time. If we look for the caller, it is the code we added: CountDownLatch.await.\nThe Limitations of the Profile Method No diagnostic tool can fit all cases, not even the profile method.\nThe first consideration is mistaking a repeatedly called method for a slow method. Thread dumps are periodic. If there is a loop of calling one method, the profile analysis result would say the target method is slow because it is captured every time in the dump process. There could be another reason. A method called many times can also end up captured in each thread dump. Even so, the profile did what it is designed for. It still helps the OPS/SRE team to locate the code having the issue.\nThe second consideration is overhead, the impact of repeated thread dumps is real and can’t be ignored. In SkyWalking, we set the profile dump period to at least 10ms. This means we can’t locate method performance issues if they complete in less than 10ms. SkyWalking has a threshold to control the maximum parallel degree as well.\nThe third consideration is profiling wouldn\u0026rsquo;t work for a low latency trace. Because the trace could be completed before profiling starts. But in reality, this is not an issue, profiling targets slow requests.\nUnderstanding the above keeps distributed tracing and APM systems useful for your OPS/SRE team.\nSupported Agents This feature was first implemented in Java agent since 7.0. The Python agent supported this since 0.7.0. Read this for more details\n","excerpt":"Use Profiling to Fix the Blind Spot of Distributed Tracing  This post introduces a way to …","ref":"/docs/main/v9.0.0/en/concepts-and-designs/sdk-profiling/","title":"Use Profiling to Fix the Blind Spot of Distributed Tracing"},{"body":"Use Profiling to Fix the Blind Spot of Distributed Tracing  This post introduces a way to automatically profile code in production with Apache SkyWalking. We believe the profile method helps reduce maintenance and overhead while increasing the precision in root cause analysis.\n This post introduces a way to automatically profile code in production with Apache SkyWalking. We believe the profile method helps reduce maintenance and overhead while increasing the precision in root cause analysis.\nLimitations of the Distributed Tracing In the early days, metrics and logging systems were the key solutions in monitoring platforms. With the adoption of microservice and distributed system-based architecture, distributed tracing has become more important. Distributed tracing provides relevant service context, such as system topology map and RPC parent-child relationships.\nSome claim that distributed tracing is the best way to discover the cause of performance issues in a distributed system. It’s good at finding issues at the RPC abstraction, or in the scope of components instrumented with spans. However, it isn’t that perfect.\nHave you been surprised to find a span duration longer than expected, but no insight into why? What should you do next? Some may think that the next step is to add more instrumentation, more spans into the trace, thinking that you would eventually find the root cause, with more data points. We’ll argue this is not a good option within a production environment. Here’s why:\n There is a risk of application overhead and system overload. Ad-hoc spans measure the performance of specific scopes or methods, but picking the right place can be difficult. To identify the precise cause, you can “instrument” (add spans to) many suspicious places. The additional instrumentation costs more CPU and memory in the production environment. Next, ad-hoc instrumentation that didn’t help is often forgotten, not deleted. This creates a valueless overhead load. In the worst case, excess instrumentation can cause performance problems in the production app or overload the tracing system. The process of ad-hoc (manual) instrumentation usually implies at least a restart. Trace instrumentation libraries, like Zipkin Brave, are integrated into many framework libraries. To instrument a method’s performance typically implies changing code, even if only an annotation. This implies a re-deploy. Even if you have the way to do auto instrumentation, like Apache SkyWalking, you still need to change the configuration and reboot the app. Otherwise, you take the risk of GC caused by hot dynamic instrumentation. Injecting instrumentation into an uninstrumented third party library is hard and complex. It takes more time and many won’t know how to do this. Usually, we don’t have code line numbers in the distributed tracing. Particularly when lambdas are in use, it can be difficult to identify the line of code associated with a span. Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.  Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.\nProfiling in Production Introduction To reuse distributed tracing to achieve method scope precision requires an understanding of the above limitations and a different approach. We called it PROFILE.\nMost high-level languages build and run on a thread concept. The profile approach takes continuous thread dumps. We merge the thread dumps to estimate the execution time of every method shown in the thread dumps. The key for distributed tracing is the tracing context, identifiers active (or current) for the profiled method. Using this trace context, we can weave data harvested from profiling into existing traces. This allows the system to automate otherwise ad-hoc instrumentation. Let’s dig deeper into how profiling works:\nWe consider a method invocation with the same stack depth and signature (method, line number etc), the same operation. We derive span timestamps from the thread dumps the same operation is in. Let’s put this visually:\nAbove, represents 10 successive thread dumps. If this method is in dumps 4-8, we assume it started before dump 4 and finished after dump 8. We can’t tell exactly when the method started and stopped. but the timestamps of thread dumps are close enough.\nTo reduce overhead caused by thread dumps, we only profile methods enclosed by a specific entry point, such as a URI or MVC Controller method. We identify these entry points through the trace context and the APM system.\nThe profile does thread dump analysis and gives us:\n The root cause, precise to the line number in the code. Reduced maintenance as ad-hoc instrumentation is obviated. Reduced overload risk caused by ad-hoc instrumentation. Dynamic activation: only when necessary and with a very clear profile target.  Implementing Precise Profiling Distributed profiling is built-into Apache SkyWalking application performance monitoring (APM). Let’s demonstrate how the profiling approach locates the root cause of the performance issue.\nfinal CountDownLatchcountDownLatch= new CountDownLatch(2); threadPool.submit(new Task1(countDownLatch)); threadPool.submit(new Task2(countDownLatch)); try { countDownLatch.await(500, TimeUnit.MILLISECONDS); } catch (InterruptedExceptione) { } Task1 and Task2 have a race condition and unstable execution time: they will impact the performance of each other and anything calling them. While this code looks suspicious, it is representative of real life. People in the OPS/SRE team are not usually aware of all code changes and who did them. They only know something in the new code is causing a problem.\nTo make matters interesting, the above code is not always slow: it only happens when the condition is locked. In SkyWalking APM, we have metrics of endpoint p99/p95 latency, so, we are easy to find out the p99 of this endpoint is far from the avg response time. However, this is not the same as understanding the cause of the latency. To locate the root cause, add a profile condition to this endpoint: duration greater than 500ms. This means faster executions will not add profiling load.\nThis is a typical profiled trace segment (part of the whole distributed trace) shown on the SkyWalking UI. We now notice the “service/processWithThreadPool” span is slow as we expected, but why? This method is the one we added the faulty code to. As the UI shows that method, we know the profiler is working. Now, let’s see what the profile analysis result say.\nThis is the profile analysis stack view. We see the stack element names, duration (include/exclude the children) and slowest methods have been highlighted. It shows clearly, “sun.misc.Unsafe.park” costs the most time. If we look for the caller, it is the code we added: CountDownLatch.await.\nThe Limitations of the Profile Method No diagnostic tool can fit all cases, not even the profile method.\nThe first consideration is mistaking a repeatedly called method for a slow method. Thread dumps are periodic. If there is a loop of calling one method, the profile analysis result would say the target method is slow because it is captured every time in the dump process. There could be another reason. A method called many times can also end up captured in each thread dump. Even so, the profile did what it is designed for. It still helps the OPS/SRE team to locate the code having the issue.\nThe second consideration is overhead, the impact of repeated thread dumps is real and can’t be ignored. In SkyWalking, we set the profile dump period to at least 10ms. This means we can’t locate method performance issues if they complete in less than 10ms. SkyWalking has a threshold to control the maximum parallel degree as well.\nThe third consideration is profiling wouldn\u0026rsquo;t work for a low latency trace. Because the trace could be completed before profiling starts. But in reality, this is not an issue, profiling targets slow requests.\nUnderstanding the above keeps distributed tracing and APM systems useful for your OPS/SRE team.\nSupported Agents This feature was first implemented in Java agent since 7.0. The Python agent supported this since 0.7.0. Read this for more details\n","excerpt":"Use Profiling to Fix the Blind Spot of Distributed Tracing  This post introduces a way to …","ref":"/docs/main/v9.1.0/en/concepts-and-designs/sdk-profiling/","title":"Use Profiling to Fix the Blind Spot of Distributed Tracing"},{"body":"Use Profiling to Fix the Blind Spot of Distributed Tracing  This post introduces a way to automatically profile code in production with Apache SkyWalking. We believe the profile method helps reduce maintenance and overhead while increasing the precision in root cause analysis.\n This post introduces a way to automatically profile code in production with Apache SkyWalking. We believe the profile method helps reduce maintenance and overhead while increasing the precision in root cause analysis.\nLimitations of the Distributed Tracing In the early days, metrics and logging systems were the key solutions in monitoring platforms. With the adoption of microservice and distributed system-based architecture, distributed tracing has become more important. Distributed tracing provides relevant service context, such as system topology map and RPC parent-child relationships.\nSome claim that distributed tracing is the best way to discover the cause of performance issues in a distributed system. It’s good at finding issues at the RPC abstraction, or in the scope of components instrumented with spans. However, it isn’t that perfect.\nHave you been surprised to find a span duration longer than expected, but no insight into why? What should you do next? Some may think that the next step is to add more instrumentation, more spans into the trace, thinking that you would eventually find the root cause, with more data points. We’ll argue this is not a good option within a production environment. Here’s why:\n There is a risk of application overhead and system overload. Ad-hoc spans measure the performance of specific scopes or methods, but picking the right place can be difficult. To identify the precise cause, you can “instrument” (add spans to) many suspicious places. The additional instrumentation costs more CPU and memory in the production environment. Next, ad-hoc instrumentation that didn’t help is often forgotten, not deleted. This creates a valueless overhead load. In the worst case, excess instrumentation can cause performance problems in the production app or overload the tracing system. The process of ad-hoc (manual) instrumentation usually implies at least a restart. Trace instrumentation libraries, like Zipkin Brave, are integrated into many framework libraries. To instrument a method’s performance typically implies changing code, even if only an annotation. This implies a re-deploy. Even if you have the way to do auto instrumentation, like Apache SkyWalking, you still need to change the configuration and reboot the app. Otherwise, you take the risk of GC caused by hot dynamic instrumentation. Injecting instrumentation into an uninstrumented third party library is hard and complex. It takes more time and many won’t know how to do this. Usually, we don’t have code line numbers in the distributed tracing. Particularly when lambdas are in use, it can be difficult to identify the line of code associated with a span. Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.  Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.\nProfiling in Production Introduction To reuse distributed tracing to achieve method scope precision requires an understanding of the above limitations and a different approach. We called it PROFILE.\nMost high-level languages build and run on a thread concept. The profile approach takes continuous thread dumps. We merge the thread dumps to estimate the execution time of every method shown in the thread dumps. The key for distributed tracing is the tracing context, identifiers active (or current) for the profiled method. Using this trace context, we can weave data harvested from profiling into existing traces. This allows the system to automate otherwise ad-hoc instrumentation. Let’s dig deeper into how profiling works:\nWe consider a method invocation with the same stack depth and signature (method, line number etc), the same operation. We derive span timestamps from the thread dumps the same operation is in. Let’s put this visually:\nAbove, represents 10 successive thread dumps. If this method is in dumps 4-8, we assume it started before dump 4 and finished after dump 8. We can’t tell exactly when the method started and stopped. but the timestamps of thread dumps are close enough.\nTo reduce overhead caused by thread dumps, we only profile methods enclosed by a specific entry point, such as a URI or MVC Controller method. We identify these entry points through the trace context and the APM system.\nThe profile does thread dump analysis and gives us:\n The root cause, precise to the line number in the code. Reduced maintenance as ad-hoc instrumentation is obviated. Reduced overload risk caused by ad-hoc instrumentation. Dynamic activation: only when necessary and with a very clear profile target.  Implementing Precise Profiling Distributed profiling is built-into Apache SkyWalking application performance monitoring (APM). Let’s demonstrate how the profiling approach locates the root cause of the performance issue.\nfinal CountDownLatchcountDownLatch= new CountDownLatch(2); threadPool.submit(new Task1(countDownLatch)); threadPool.submit(new Task2(countDownLatch)); try { countDownLatch.await(500, TimeUnit.MILLISECONDS); } catch (InterruptedException) { } Task1 and Task2 have a race condition and unstable execution time: they will impact the performance of each other and anything calling them. While this code looks suspicious, it is representative of real life. People in the OPS/SRE team are not usually aware of all code changes and who did them. They only know something in the new code is causing a problem.\nTo make matters interesting, the above code is not always slow: it only happens when the condition is locked. In SkyWalking APM, we have metrics of endpoint p99/p95 latency, so, we are easy to find out the p99 of this endpoint is far from the avg response time. However, this is not the same as understanding the cause of the latency. To locate the root cause, add a profile condition to this endpoint: duration greater than 500ms. This means faster executions will not add profiling load.\nThis is a typical profiled trace segment (part of the whole distributed trace) shown on the SkyWalking UI. We now notice the “service/processWithThreadPool” span is slow as we expected, but why? This method is the one we added the faulty code to. As the UI shows that method, we know the profiler is working. Now, let’s see what the profile analysis result say.\nThis is the profile analysis stack view. We see the stack element names, duration (include/exclude the children) and slowest methods have been highlighted. It shows clearly, “sun.misc.Unsafe.park” costs the most time. If we look for the caller, it is the code we added: CountDownLatch.await.\nThe Limitations of the Profile Method No diagnostic tool can fit all cases, not even the profile method.\nThe first consideration is mistaking a repeatedly called method for a slow method. Thread dumps are periodic. If there is a loop of calling one method, the profile analysis result would say the target method is slow because it is captured every time in the dump process. There could be another reason. A method called many times can also end up captured in each thread dump. Even so, the profile did what it is designed for. It still helps the OPS/SRE team to locate the code having the issue.\nThe second consideration is overhead, the impact of repeated thread dumps is real and can’t be ignored. In SkyWalking, we set the profile dump period to at least 10ms. This means we can’t locate method performance issues if they complete in less than 10ms. SkyWalking has a threshold to control the maximum parallel degree as well.\nThe third consideration is profiling wouldn\u0026rsquo;t work for a low latency trace. Because the trace could be completed before profiling starts. But in reality, this is not an issue, profiling targets slow requests.\nUnderstanding the above keeps distributed tracing and APM systems useful for your OPS/SRE team.\nSupported Agents This feature was first implemented in Java agent since 7.0. The Python agent supported this since 0.7.0. Read this for more details\n","excerpt":"Use Profiling to Fix the Blind Spot of Distributed Tracing  This post introduces a way to …","ref":"/docs/main/v9.2.0/en/concepts-and-designs/sdk-profiling/","title":"Use Profiling to Fix the Blind Spot of Distributed Tracing"},{"body":"Use Profiling to Fix the Blind Spot of Distributed Tracing  This post introduces a way to automatically profile code in production with Apache SkyWalking. We believe the profile method helps reduce maintenance and overhead while increasing the precision in root cause analysis.\n This post introduces a way to automatically profile code in production with Apache SkyWalking. We believe the profile method helps reduce maintenance and overhead while increasing the precision in root cause analysis.\nLimitations of the Distributed Tracing In the early days, metrics and logging systems were the key solutions in monitoring platforms. With the adoption of microservice and distributed system-based architecture, distributed tracing has become more important. Distributed tracing provides relevant service context, such as system topology map and RPC parent-child relationships.\nSome claim that distributed tracing is the best way to discover the cause of performance issues in a distributed system. It’s good at finding issues at the RPC abstraction, or in the scope of components instrumented with spans. However, it isn’t that perfect.\nHave you been surprised to find a span duration longer than expected, but no insight into why? What should you do next? Some may think that the next step is to add more instrumentation, more spans into the trace, thinking that you would eventually find the root cause, with more data points. We’ll argue this is not a good option within a production environment. Here’s why:\n There is a risk of application overhead and system overload. Ad-hoc spans measure the performance of specific scopes or methods, but picking the right place can be difficult. To identify the precise cause, you can “instrument” (add spans to) many suspicious places. The additional instrumentation costs more CPU and memory in the production environment. Next, ad-hoc instrumentation that didn’t help is often forgotten, not deleted. This creates a valueless overhead load. In the worst case, excess instrumentation can cause performance problems in the production app or overload the tracing system. The process of ad-hoc (manual) instrumentation usually implies at least a restart. Trace instrumentation libraries, like Zipkin Brave, are integrated into many framework libraries. To instrument a method’s performance typically implies changing code, even if only an annotation. This implies a re-deploy. Even if you have the way to do auto instrumentation, like Apache SkyWalking, you still need to change the configuration and reboot the app. Otherwise, you take the risk of GC caused by hot dynamic instrumentation. Injecting instrumentation into an uninstrumented third party library is hard and complex. It takes more time and many won’t know how to do this. Usually, we don’t have code line numbers in the distributed tracing. Particularly when lambdas are in use, it can be difficult to identify the line of code associated with a span. Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.  Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.\nProfiling in Production Introduction To reuse distributed tracing to achieve method scope precision requires an understanding of the above limitations and a different approach. We called it PROFILE.\nMost high-level languages build and run on a thread concept. The profile approach takes continuous thread dumps. We merge the thread dumps to estimate the execution time of every method shown in the thread dumps. The key for distributed tracing is the tracing context, identifiers active (or current) for the profiled method. Using this trace context, we can weave data harvested from profiling into existing traces. This allows the system to automate otherwise ad-hoc instrumentation. Let’s dig deeper into how profiling works:\nWe consider a method invocation with the same stack depth and signature (method, line number etc), the same operation. We derive span timestamps from the thread dumps the same operation is in. Let’s put this visually:\nAbove, represents 10 successive thread dumps. If this method is in dumps 4-8, we assume it started before dump 4 and finished after dump 8. We can’t tell exactly when the method started and stopped. but the timestamps of thread dumps are close enough.\nTo reduce overhead caused by thread dumps, we only profile methods enclosed by a specific entry point, such as a URI or MVC Controller method. We identify these entry points through the trace context and the APM system.\nThe profile does thread dump analysis and gives us:\n The root cause, precise to the line number in the code. Reduced maintenance as ad-hoc instrumentation is obviated. Reduced overload risk caused by ad-hoc instrumentation. Dynamic activation: only when necessary and with a very clear profile target.  Implementing Precise Profiling Distributed profiling is built-into Apache SkyWalking application performance monitoring (APM). Let’s demonstrate how the profiling approach locates the root cause of the performance issue.\nfinal CountDownLatchcountDownLatch= new CountDownLatch(2); threadPool.submit(new Task1(countDownLatch)); threadPool.submit(new Task2(countDownLatch)); try { countDownLatch.await(500, TimeUnit.MILLISECONDS); } catch (InterruptedException) { } Task1 and Task2 have a race condition and unstable execution time: they will impact the performance of each other and anything calling them. While this code looks suspicious, it is representative of real life. People in the OPS/SRE team are not usually aware of all code changes and who did them. They only know something in the new code is causing a problem.\nTo make matters interesting, the above code is not always slow: it only happens when the condition is locked. In SkyWalking APM, we have metrics of endpoint p99/p95 latency, so, we are easy to find out the p99 of this endpoint is far from the avg response time. However, this is not the same as understanding the cause of the latency. To locate the root cause, add a profile condition to this endpoint: duration greater than 500ms. This means faster executions will not add profiling load.\nThis is a typical profiled trace segment (part of the whole distributed trace) shown on the SkyWalking UI. We now notice the “service/processWithThreadPool” span is slow as we expected, but why? This method is the one we added the faulty code to. As the UI shows that method, we know the profiler is working. Now, let’s see what the profile analysis result say.\nThis is the profile analysis stack view. We see the stack element names, duration (include/exclude the children) and slowest methods have been highlighted. It shows clearly, “sun.misc.Unsafe.park” costs the most time. If we look for the caller, it is the code we added: CountDownLatch.await.\nThe Limitations of the Profile Method No diagnostic tool can fit all cases, not even the profile method.\nThe first consideration is mistaking a repeatedly called method for a slow method. Thread dumps are periodic. If there is a loop of calling one method, the profile analysis result would say the target method is slow because it is captured every time in the dump process. There could be another reason. A method called many times can also end up captured in each thread dump. Even so, the profile did what it is designed for. It still helps the OPS/SRE team to locate the code having the issue.\nThe second consideration is overhead, the impact of repeated thread dumps is real and can’t be ignored. In SkyWalking, we set the profile dump period to at least 10ms. This means we can’t locate method performance issues if they complete in less than 10ms. SkyWalking has a threshold to control the maximum parallel degree as well.\nThe third consideration is profiling wouldn\u0026rsquo;t work for a low latency trace. Because the trace could be completed before profiling starts. But in reality, this is not an issue, profiling targets slow requests.\nUnderstanding the above keeps distributed tracing and APM systems useful for your OPS/SRE team.\nSupported Agents This feature was first implemented in Java agent since 7.0. The Python agent supported this since 0.7.0. Read this for more details\n","excerpt":"Use Profiling to Fix the Blind Spot of Distributed Tracing  This post introduces a way to …","ref":"/docs/main/v9.3.0/en/concepts-and-designs/sdk-profiling/","title":"Use Profiling to Fix the Blind Spot of Distributed Tracing"},{"body":"Use Profiling to Fix the Blind Spot of Distributed Tracing  This post introduces a way to automatically profile code in production with Apache SkyWalking. We believe the profile method helps reduce maintenance and overhead while increasing the precision in root cause analysis.\n This post introduces a way to automatically profile code in production with Apache SkyWalking. We believe the profile method helps reduce maintenance and overhead while increasing the precision in root cause analysis.\nLimitations of the Distributed Tracing In the early days, metrics and logging systems were the key solutions in monitoring platforms. With the adoption of microservice and distributed system-based architecture, distributed tracing has become more important. Distributed tracing provides relevant service context, such as system topology map and RPC parent-child relationships.\nSome claim that distributed tracing is the best way to discover the cause of performance issues in a distributed system. It’s good at finding issues at the RPC abstraction, or in the scope of components instrumented with spans. However, it isn’t that perfect.\nHave you been surprised to find a span duration longer than expected, but no insight into why? What should you do next? Some may think that the next step is to add more instrumentation, more spans into the trace, thinking that you would eventually find the root cause, with more data points. We’ll argue this is not a good option within a production environment. Here’s why:\n There is a risk of application overhead and system overload. Ad-hoc spans measure the performance of specific scopes or methods, but picking the right place can be difficult. To identify the precise cause, you can “instrument” (add spans to) many suspicious places. The additional instrumentation costs more CPU and memory in the production environment. Next, ad-hoc instrumentation that didn’t help is often forgotten, not deleted. This creates a valueless overhead load. In the worst case, excess instrumentation can cause performance problems in the production app or overload the tracing system. The process of ad-hoc (manual) instrumentation usually implies at least a restart. Trace instrumentation libraries, like Zipkin Brave, are integrated into many framework libraries. To instrument a method’s performance typically implies changing code, even if only an annotation. This implies a re-deploy. Even if you have the way to do auto instrumentation, like Apache SkyWalking, you still need to change the configuration and reboot the app. Otherwise, you take the risk of GC caused by hot dynamic instrumentation. Injecting instrumentation into an uninstrumented third party library is hard and complex. It takes more time and many won’t know how to do this. Usually, we don’t have code line numbers in the distributed tracing. Particularly when lambdas are in use, it can be difficult to identify the line of code associated with a span. Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.  Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.\nProfiling in Production Introduction To reuse distributed tracing to achieve method scope precision requires an understanding of the above limitations and a different approach. We called it PROFILE.\nMost high-level languages build and run on a thread concept. The profile approach takes continuous thread dumps. We merge the thread dumps to estimate the execution time of every method shown in the thread dumps. The key for distributed tracing is the tracing context, identifiers active (or current) for the profiled method. Using this trace context, we can weave data harvested from profiling into existing traces. This allows the system to automate otherwise ad-hoc instrumentation. Let’s dig deeper into how profiling works:\nWe consider a method invocation with the same stack depth and signature (method, line number etc), the same operation. We derive span timestamps from the thread dumps the same operation is in. Let’s put this visually:\nAbove, represents 10 successive thread dumps. If this method is in dumps 4-8, we assume it started before dump 4 and finished after dump 8. We can’t tell exactly when the method started and stopped. but the timestamps of thread dumps are close enough.\nTo reduce overhead caused by thread dumps, we only profile methods enclosed by a specific entry point, such as a URI or MVC Controller method. We identify these entry points through the trace context and the APM system.\nThe profile does thread dump analysis and gives us:\n The root cause, precise to the line number in the code. Reduced maintenance as ad-hoc instrumentation is obviated. Reduced overload risk caused by ad-hoc instrumentation. Dynamic activation: only when necessary and with a very clear profile target.  Implementing Precise Profiling Distributed profiling is built-into Apache SkyWalking application performance monitoring (APM). Let’s demonstrate how the profiling approach locates the root cause of the performance issue.\nfinal CountDownLatchcountDownLatch= new CountDownLatch(2); threadPool.submit(new Task1(countDownLatch)); threadPool.submit(new Task2(countDownLatch)); try { countDownLatch.await(500, TimeUnit.MILLISECONDS); } catch (InterruptedException) { } Task1 and Task2 have a race condition and unstable execution time: they will impact the performance of each other and anything calling them. While this code looks suspicious, it is representative of real life. People in the OPS/SRE team are not usually aware of all code changes and who did them. They only know something in the new code is causing a problem.\nTo make matters interesting, the above code is not always slow: it only happens when the condition is locked. In SkyWalking APM, we have metrics of endpoint p99/p95 latency, so, we are easy to find out the p99 of this endpoint is far from the avg response time. However, this is not the same as understanding the cause of the latency. To locate the root cause, add a profile condition to this endpoint: duration greater than 500ms. This means faster executions will not add profiling load.\nThis is a typical profiled trace segment (part of the whole distributed trace) shown on the SkyWalking UI. We now notice the “service/processWithThreadPool” span is slow as we expected, but why? This method is the one we added the faulty code to. As the UI shows that method, we know the profiler is working. Now, let’s see what the profile analysis result say.\nThis is the profile analysis stack view. We see the stack element names, duration (include/exclude the children) and slowest methods have been highlighted. It shows clearly, “sun.misc.Unsafe.park” costs the most time. If we look for the caller, it is the code we added: CountDownLatch.await.\nThe Limitations of the Profile Method No diagnostic tool can fit all cases, not even the profile method.\nThe first consideration is mistaking a repeatedly called method for a slow method. Thread dumps are periodic. If there is a loop of calling one method, the profile analysis result would say the target method is slow because it is captured every time in the dump process. There could be another reason. A method called many times can also end up captured in each thread dump. Even so, the profile did what it is designed for. It still helps the OPS/SRE team to locate the code having the issue.\nThe second consideration is overhead, the impact of repeated thread dumps is real and can’t be ignored. In SkyWalking, we set the profile dump period to at least 10ms. This means we can’t locate method performance issues if they complete in less than 10ms. SkyWalking has a threshold to control the maximum parallel degree as well.\nThe third consideration is profiling wouldn\u0026rsquo;t work for a low latency trace. Because the trace could be completed before profiling starts. But in reality, this is not an issue, profiling targets slow requests.\nUnderstanding the above keeps distributed tracing and APM systems useful for your OPS/SRE team.\nSupported Agents This feature was first implemented in Java agent since 7.0. The Python agent supported this since 0.7.0. Read this for more details\n","excerpt":"Use Profiling to Fix the Blind Spot of Distributed Tracing  This post introduces a way to …","ref":"/docs/main/v9.4.0/en/concepts-and-designs/sdk-profiling/","title":"Use Profiling to Fix the Blind Spot of Distributed Tracing"},{"body":"Use Profiling to Fix the Blind Spot of Distributed Tracing  This post introduces a way to automatically profile code in production with Apache SkyWalking. We believe the profile method helps reduce maintenance and overhead while increasing the precision in root cause analysis.\n This post introduces a way to automatically profile code in production with Apache SkyWalking. We believe the profile method helps reduce maintenance and overhead while increasing the precision in root cause analysis.\nLimitations of the Distributed Tracing In the early days, metrics and logging systems were the key solutions in monitoring platforms. With the adoption of microservice and distributed system-based architecture, distributed tracing has become more important. Distributed tracing provides relevant service context, such as system topology map and RPC parent-child relationships.\nSome claim that distributed tracing is the best way to discover the cause of performance issues in a distributed system. It’s good at finding issues at the RPC abstraction, or in the scope of components instrumented with spans. However, it isn’t that perfect.\nHave you been surprised to find a span duration longer than expected, but no insight into why? What should you do next? Some may think that the next step is to add more instrumentation, more spans into the trace, thinking that you would eventually find the root cause, with more data points. We’ll argue this is not a good option within a production environment. Here’s why:\n There is a risk of application overhead and system overload. Ad-hoc spans measure the performance of specific scopes or methods, but picking the right place can be difficult. To identify the precise cause, you can “instrument” (add spans to) many suspicious places. The additional instrumentation costs more CPU and memory in the production environment. Next, ad-hoc instrumentation that didn’t help is often forgotten, not deleted. This creates a valueless overhead load. In the worst case, excess instrumentation can cause performance problems in the production app or overload the tracing system. The process of ad-hoc (manual) instrumentation usually implies at least a restart. Trace instrumentation libraries, like Zipkin Brave, are integrated into many framework libraries. To instrument a method’s performance typically implies changing code, even if only an annotation. This implies a re-deploy. Even if you have the way to do auto instrumentation, like Apache SkyWalking, you still need to change the configuration and reboot the app. Otherwise, you take the risk of GC caused by hot dynamic instrumentation. Injecting instrumentation into an uninstrumented third party library is hard and complex. It takes more time and many won’t know how to do this. Usually, we don’t have code line numbers in the distributed tracing. Particularly when lambdas are in use, it can be difficult to identify the line of code associated with a span. Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.  Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.\nProfiling in Production Introduction To reuse distributed tracing to achieve method scope precision requires an understanding of the above limitations and a different approach. We called it PROFILE.\nMost high-level languages build and run on a thread concept. The profile approach takes continuous thread dumps. We merge the thread dumps to estimate the execution time of every method shown in the thread dumps. The key for distributed tracing is the tracing context, identifiers active (or current) for the profiled method. Using this trace context, we can weave data harvested from profiling into existing traces. This allows the system to automate otherwise ad-hoc instrumentation. Let’s dig deeper into how profiling works:\nWe consider a method invocation with the same stack depth and signature (method, line number etc), the same operation. We derive span timestamps from the thread dumps the same operation is in. Let’s put this visually:\nAbove, represents 10 successive thread dumps. If this method is in dumps 4-8, we assume it started before dump 4 and finished after dump 8. We can’t tell exactly when the method started and stopped. but the timestamps of thread dumps are close enough.\nTo reduce overhead caused by thread dumps, we only profile methods enclosed by a specific entry point, such as a URI or MVC Controller method. We identify these entry points through the trace context and the APM system.\nThe profile does thread dump analysis and gives us:\n The root cause, precise to the line number in the code. Reduced maintenance as ad-hoc instrumentation is obviated. Reduced overload risk caused by ad-hoc instrumentation. Dynamic activation: only when necessary and with a very clear profile target.  Implementing Precise Profiling Distributed profiling is built-into Apache SkyWalking application performance monitoring (APM). Let’s demonstrate how the profiling approach locates the root cause of the performance issue.\nfinal CountDownLatchcountDownLatch= new CountDownLatch(2); threadPool.submit(new Task1(countDownLatch)); threadPool.submit(new Task2(countDownLatch)); try { countDownLatch.await(500, TimeUnit.MILLISECONDS); } catch (InterruptedException) { } Task1 and Task2 have a race condition and unstable execution time: they will impact the performance of each other and anything calling them. While this code looks suspicious, it is representative of real life. People in the OPS/SRE team are not usually aware of all code changes and who did them. They only know something in the new code is causing a problem.\nTo make matters interesting, the above code is not always slow: it only happens when the condition is locked. In SkyWalking APM, we have metrics of endpoint p99/p95 latency, so, we are easy to find out the p99 of this endpoint is far from the avg response time. However, this is not the same as understanding the cause of the latency. To locate the root cause, add a profile condition to this endpoint: duration greater than 500ms. This means faster executions will not add profiling load.\nThis is a typical profiled trace segment (part of the whole distributed trace) shown on the SkyWalking UI. We now notice the “service/processWithThreadPool” span is slow as we expected, but why? This method is the one we added the faulty code to. As the UI shows that method, we know the profiler is working. Now, let’s see what the profile analysis result say.\nThis is the profile analysis stack view. We see the stack element names, duration (include/exclude the children) and slowest methods have been highlighted. It shows clearly, “sun.misc.Unsafe.park” costs the most time. If we look for the caller, it is the code we added: CountDownLatch.await.\nThe Limitations of the Profile Method No diagnostic tool can fit all cases, not even the profile method.\nThe first consideration is mistaking a repeatedly called method for a slow method. Thread dumps are periodic. If there is a loop of calling one method, the profile analysis result would say the target method is slow because it is captured every time in the dump process. There could be another reason. A method called many times can also end up captured in each thread dump. Even so, the profile did what it is designed for. It still helps the OPS/SRE team to locate the code having the issue.\nThe second consideration is overhead, the impact of repeated thread dumps is real and can’t be ignored. In SkyWalking, we set the profile dump period to at least 10ms. This means we can’t locate method performance issues if they complete in less than 10ms. SkyWalking has a threshold to control the maximum parallel degree as well.\nThe third consideration is profiling wouldn\u0026rsquo;t work for a low latency trace. Because the trace could be completed before profiling starts. But in reality, this is not an issue, profiling targets slow requests.\nUnderstanding the above keeps distributed tracing and APM systems useful for your OPS/SRE team.\nSupported Agents This feature was first implemented in Java agent since 7.0. The Python agent supported this since 0.7.0. Read this for more details\n","excerpt":"Use Profiling to Fix the Blind Spot of Distributed Tracing  This post introduces a way to …","ref":"/docs/main/v9.5.0/en/concepts-and-designs/sdk-profiling/","title":"Use Profiling to Fix the Blind Spot of Distributed Tracing"},{"body":"Use Profiling to Fix the Blind Spot of Distributed Tracing  This post introduces a way to automatically profile code in production with Apache SkyWalking. We believe the profile method helps reduce maintenance and overhead while increasing the precision in root cause analysis.\n This post introduces a way to automatically profile code in production with Apache SkyWalking. We believe the profile method helps reduce maintenance and overhead while increasing the precision in root cause analysis.\nLimitations of the Distributed Tracing In the early days, metrics and logging systems were the key solutions in monitoring platforms. With the adoption of microservice and distributed system-based architecture, distributed tracing has become more important. Distributed tracing provides relevant service context, such as system topology map and RPC parent-child relationships.\nSome claim that distributed tracing is the best way to discover the cause of performance issues in a distributed system. It’s good at finding issues at the RPC abstraction, or in the scope of components instrumented with spans. However, it isn’t that perfect.\nHave you been surprised to find a span duration longer than expected, but no insight into why? What should you do next? Some may think that the next step is to add more instrumentation, more spans into the trace, thinking that you would eventually find the root cause, with more data points. We’ll argue this is not a good option within a production environment. Here’s why:\n There is a risk of application overhead and system overload. Ad-hoc spans measure the performance of specific scopes or methods, but picking the right place can be difficult. To identify the precise cause, you can “instrument” (add spans to) many suspicious places. The additional instrumentation costs more CPU and memory in the production environment. Next, ad-hoc instrumentation that didn’t help is often forgotten, not deleted. This creates a valueless overhead load. In the worst case, excess instrumentation can cause performance problems in the production app or overload the tracing system. The process of ad-hoc (manual) instrumentation usually implies at least a restart. Trace instrumentation libraries, like Zipkin Brave, are integrated into many framework libraries. To instrument a method’s performance typically implies changing code, even if only an annotation. This implies a re-deploy. Even if you have the way to do auto instrumentation, like Apache SkyWalking, you still need to change the configuration and reboot the app. Otherwise, you take the risk of GC caused by hot dynamic instrumentation. Injecting instrumentation into an uninstrumented third party library is hard and complex. It takes more time and many won’t know how to do this. Usually, we don’t have code line numbers in the distributed tracing. Particularly when lambdas are in use, it can be difficult to identify the line of code associated with a span. Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.  Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.\nProfiling in Production Introduction To reuse distributed tracing to achieve method scope precision requires an understanding of the above limitations and a different approach. We called it PROFILE.\nMost high-level languages build and run on a thread concept. The profile approach takes continuous thread dumps. We merge the thread dumps to estimate the execution time of every method shown in the thread dumps. The key for distributed tracing is the tracing context, identifiers active (or current) for the profiled method. Using this trace context, we can weave data harvested from profiling into existing traces. This allows the system to automate otherwise ad-hoc instrumentation. Let’s dig deeper into how profiling works:\nWe consider a method invocation with the same stack depth and signature (method, line number etc), the same operation. We derive span timestamps from the thread dumps the same operation is in. Let’s put this visually:\nAbove, represents 10 successive thread dumps. If this method is in dumps 4-8, we assume it started before dump 4 and finished after dump 8. We can’t tell exactly when the method started and stopped. but the timestamps of thread dumps are close enough.\nTo reduce overhead caused by thread dumps, we only profile methods enclosed by a specific entry point, such as a URI or MVC Controller method. We identify these entry points through the trace context and the APM system.\nThe profile does thread dump analysis and gives us:\n The root cause, precise to the line number in the code. Reduced maintenance as ad-hoc instrumentation is obviated. Reduced overload risk caused by ad-hoc instrumentation. Dynamic activation: only when necessary and with a very clear profile target.  Implementing Precise Profiling Distributed profiling is built-into Apache SkyWalking application performance monitoring (APM). Let’s demonstrate how the profiling approach locates the root cause of the performance issue.\nfinal CountDownLatchcountDownLatch= new CountDownLatch(2); threadPool.submit(new Task1(countDownLatch)); threadPool.submit(new Task2(countDownLatch)); try { countDownLatch.await(500, TimeUnit.MILLISECONDS); } catch (InterruptedException) { } Task1 and Task2 have a race condition and unstable execution time: they will impact the performance of each other and anything calling them. While this code looks suspicious, it is representative of real life. People in the OPS/SRE team are not usually aware of all code changes and who did them. They only know something in the new code is causing a problem.\nTo make matters interesting, the above code is not always slow: it only happens when the condition is locked. In SkyWalking APM, we have metrics of endpoint p99/p95 latency, so, we are easy to find out the p99 of this endpoint is far from the avg response time. However, this is not the same as understanding the cause of the latency. To locate the root cause, add a profile condition to this endpoint: duration greater than 500ms. This means faster executions will not add profiling load.\nThis is a typical profiled trace segment (part of the whole distributed trace) shown on the SkyWalking UI. We now notice the “service/processWithThreadPool” span is slow as we expected, but why? This method is the one we added the faulty code to. As the UI shows that method, we know the profiler is working. Now, let’s see what the profile analysis result say.\nThis is the profile analysis stack view. We see the stack element names, duration (include/exclude the children) and slowest methods have been highlighted. It shows clearly, “sun.misc.Unsafe.park” costs the most time. If we look for the caller, it is the code we added: CountDownLatch.await.\nThe Limitations of the Profile Method No diagnostic tool can fit all cases, not even the profile method.\nThe first consideration is mistaking a repeatedly called method for a slow method. Thread dumps are periodic. If there is a loop of calling one method, the profile analysis result would say the target method is slow because it is captured every time in the dump process. There could be another reason. A method called many times can also end up captured in each thread dump. Even so, the profile did what it is designed for. It still helps the OPS/SRE team to locate the code having the issue.\nThe second consideration is overhead, the impact of repeated thread dumps is real and can’t be ignored. In SkyWalking, we set the profile dump period to at least 10ms. This means we can’t locate method performance issues if they complete in less than 10ms. SkyWalking has a threshold to control the maximum parallel degree as well.\nThe third consideration is profiling wouldn\u0026rsquo;t work for a low latency trace. Because the trace could be completed before profiling starts. But in reality, this is not an issue, profiling targets slow requests.\nUnderstanding the above keeps distributed tracing and APM systems useful for your OPS/SRE team.\nSupported Agents This feature was first implemented in Java agent since 7.0. The Python agent supported this since 0.7.0. Read this for more details\n","excerpt":"Use Profiling to Fix the Blind Spot of Distributed Tracing  This post introduces a way to …","ref":"/docs/main/v9.6.0/en/concepts-and-designs/sdk-profiling/","title":"Use Profiling to Fix the Blind Spot of Distributed Tracing"},{"body":"Use Profiling to Fix the Blind Spot of Distributed Tracing  This post introduces a way to automatically profile code in production with Apache SkyWalking. We believe the profile method helps reduce maintenance and overhead while increasing the precision in root cause analysis.\n This post introduces a way to automatically profile code in production with Apache SkyWalking. We believe the profile method helps reduce maintenance and overhead while increasing the precision in root cause analysis.\nLimitations of the Distributed Tracing In the early days, metrics and logging systems were the key solutions in monitoring platforms. With the adoption of microservice and distributed system-based architecture, distributed tracing has become more important. Distributed tracing provides relevant service context, such as system topology map and RPC parent-child relationships.\nSome claim that distributed tracing is the best way to discover the cause of performance issues in a distributed system. It’s good at finding issues at the RPC abstraction, or in the scope of components instrumented with spans. However, it isn’t that perfect.\nHave you been surprised to find a span duration longer than expected, but no insight into why? What should you do next? Some may think that the next step is to add more instrumentation, more spans into the trace, thinking that you would eventually find the root cause, with more data points. We’ll argue this is not a good option within a production environment. Here’s why:\n There is a risk of application overhead and system overload. Ad-hoc spans measure the performance of specific scopes or methods, but picking the right place can be difficult. To identify the precise cause, you can “instrument” (add spans to) many suspicious places. The additional instrumentation costs more CPU and memory in the production environment. Next, ad-hoc instrumentation that didn’t help is often forgotten, not deleted. This creates a valueless overhead load. In the worst case, excess instrumentation can cause performance problems in the production app or overload the tracing system. The process of ad-hoc (manual) instrumentation usually implies at least a restart. Trace instrumentation libraries, like Zipkin Brave, are integrated into many framework libraries. To instrument a method’s performance typically implies changing code, even if only an annotation. This implies a re-deploy. Even if you have the way to do auto instrumentation, like Apache SkyWalking, you still need to change the configuration and reboot the app. Otherwise, you take the risk of GC caused by hot dynamic instrumentation. Injecting instrumentation into an uninstrumented third party library is hard and complex. It takes more time and many won’t know how to do this. Usually, we don’t have code line numbers in the distributed tracing. Particularly when lambdas are in use, it can be difficult to identify the line of code associated with a span. Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.  Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.\nProfiling in Production Introduction To reuse distributed tracing to achieve method scope precision requires an understanding of the above limitations and a different approach. We called it PROFILE.\nMost high-level languages build and run on a thread concept. The profile approach takes continuous thread dumps. We merge the thread dumps to estimate the execution time of every method shown in the thread dumps. The key for distributed tracing is the tracing context, identifiers active (or current) for the profiled method. Using this trace context, we can weave data harvested from profiling into existing traces. This allows the system to automate otherwise ad-hoc instrumentation. Let’s dig deeper into how profiling works:\nWe consider a method invocation with the same stack depth and signature (method, line number etc), the same operation. We derive span timestamps from the thread dumps the same operation is in. Let’s put this visually:\nAbove, represents 10 successive thread dumps. If this method is in dumps 4-8, we assume it started before dump 4 and finished after dump 8. We can’t tell exactly when the method started and stopped. but the timestamps of thread dumps are close enough.\nTo reduce overhead caused by thread dumps, we only profile methods enclosed by a specific entry point, such as a URI or MVC Controller method. We identify these entry points through the trace context and the APM system.\nThe profile does thread dump analysis and gives us:\n The root cause, precise to the line number in the code. Reduced maintenance as ad-hoc instrumentation is obviated. Reduced overload risk caused by ad-hoc instrumentation. Dynamic activation: only when necessary and with a very clear profile target.  Implementing Precise Profiling Distributed profiling is built-into Apache SkyWalking application performance monitoring (APM). Let’s demonstrate how the profiling approach locates the root cause of the performance issue.\nfinal CountDownLatchcountDownLatch= new CountDownLatch(2); threadPool.submit(new Task1(countDownLatch)); threadPool.submit(new Task2(countDownLatch)); try { countDownLatch.await(500, TimeUnit.MILLISECONDS); } catch (InterruptedException) { } Task1 and Task2 have a race condition and unstable execution time: they will impact the performance of each other and anything calling them. While this code looks suspicious, it is representative of real life. People in the OPS/SRE team are not usually aware of all code changes and who did them. They only know something in the new code is causing a problem.\nTo make matters interesting, the above code is not always slow: it only happens when the condition is locked. In SkyWalking APM, we have metrics of endpoint p99/p95 latency, so, we are easy to find out the p99 of this endpoint is far from the avg response time. However, this is not the same as understanding the cause of the latency. To locate the root cause, add a profile condition to this endpoint: duration greater than 500ms. This means faster executions will not add profiling load.\nThis is a typical profiled trace segment (part of the whole distributed trace) shown on the SkyWalking UI. We now notice the “service/processWithThreadPool” span is slow as we expected, but why? This method is the one we added the faulty code to. As the UI shows that method, we know the profiler is working. Now, let’s see what the profile analysis result say.\nThis is the profile analysis stack view. We see the stack element names, duration (include/exclude the children) and slowest methods have been highlighted. It shows clearly, “sun.misc.Unsafe.park” costs the most time. If we look for the caller, it is the code we added: CountDownLatch.await.\nThe Limitations of the Profile Method No diagnostic tool can fit all cases, not even the profile method.\nThe first consideration is mistaking a repeatedly called method for a slow method. Thread dumps are periodic. If there is a loop of calling one method, the profile analysis result would say the target method is slow because it is captured every time in the dump process. There could be another reason. A method called many times can also end up captured in each thread dump. Even so, the profile did what it is designed for. It still helps the OPS/SRE team to locate the code having the issue.\nThe second consideration is overhead, the impact of repeated thread dumps is real and can’t be ignored. In SkyWalking, we set the profile dump period to at least 10ms. This means we can’t locate method performance issues if they complete in less than 10ms. SkyWalking has a threshold to control the maximum parallel degree as well.\nThe third consideration is profiling wouldn\u0026rsquo;t work for a low latency trace. Because the trace could be completed before profiling starts. But in reality, this is not an issue, profiling targets slow requests.\nUnderstanding the above keeps distributed tracing and APM systems useful for your OPS/SRE team.\nSupported Agents This feature was first implemented in Java agent since 7.0. The Python agent supported this since 0.7.0. Read this for more details\n","excerpt":"Use Profiling to Fix the Blind Spot of Distributed Tracing  This post introduces a way to …","ref":"/docs/main/v9.7.0/en/concepts-and-designs/sdk-profiling/","title":"Use Profiling to Fix the Blind Spot of Distributed Tracing"},{"body":"Our Users  Various companies and organizations use SkyWalking for research, production and commercial products.                                                                                                                                                         Users are encouraged to add themselves to this page. Send a pull request to add your company or organization information [here].   ","excerpt":"Our Users  Various companies and organizations use SkyWalking for research, production and …","ref":"/users/","title":"Users"},{"body":"V6 upgrade SkyWalking v6 is widely used in many production environments. Follow the steps in the guide below to learn how to upgrade to a new release.\nNOTE: The ways to upgrade are not limited to the steps below.\nUse Canary Release Like all applications, you may upgrade SkyWalking using the canary release method through the following steps.\n Deploy a new cluster by using the latest version of SkyWalking OAP cluster with the new database cluster. Once the target service (i.e. the service being monitored) has upgraded the agent.jar (or simply by rebooting), have collector.backend_service pointing to the new OAP backend, and use/add a new namespace(agent.namespace in Table of Agent Configuration Properties). The namespace will prevent conflicts from arising between different versions. When all target services have been rebooted, the old OAP clusters could be discarded.  The Canary Release method works for any version upgrades.\nOnline Hot Reboot Upgrade The reason we require Canary Release is that the SkyWalking agent has cache mechanisms, and switching to a new cluster causes the cache to become unavailable for new OAP clusters. In version 6.5.0+ (especially for agent versions), we have Agent hot reboot trigger mechanism. This streamlines the upgrade process as we deploy a new cluster by using the latest version of SkyWalking OAP cluster with the new database cluster, and shift the traffic to the new cluster once and for all. Based on the mechanism, all agents will enter the cool_down mode, and come back online. For more details, see the backend setup documentation.\nNOTE: A known bug in 6.4.0 is that its agent may have re-connection issues; therefore, even though this bot reboot mechanism has been included in 6.4.0, it may not work under some network scenarios, especially in Kubernetes.\nAgent Compatibility All versions of SkyWalking 6.x (and even 7.x) are compatible with each other, so users could simply upgrade the OAP servers. As the agent has also been enhanced in the latest versions, according to the SkyWalking team\u0026rsquo;s recommendation, upgrade the agent as soon as practicable.\n","excerpt":"V6 upgrade SkyWalking v6 is widely used in many production environments. Follow the steps in the …","ref":"/docs/main/latest/en/faq/v6-version-upgrade/","title":"V6 upgrade"},{"body":"V6 upgrade SkyWalking v6 is widely used in many production environments. Follow the steps in the guide below to learn how to upgrade to a new release.\nNOTE: The ways to upgrade are not limited to the steps below.\nUse Canary Release Like all applications, you may upgrade SkyWalking using the canary release method through the following steps.\n Deploy a new cluster by using the latest version of SkyWalking OAP cluster with the new database cluster. Once the target service (i.e. the service being monitored) has upgraded the agent.jar (or simply by rebooting), have collector.backend_service pointing to the new OAP backend, and use/add a new namespace(agent.namespace in Table of Agent Configuration Properties). The namespace will prevent conflicts from arising between different versions. When all target services have been rebooted, the old OAP clusters could be discarded.  The Canary Release method works for any version upgrades.\nOnline Hot Reboot Upgrade The reason we require Canary Release is that the SkyWalking agent has cache mechanisms, and switching to a new cluster causes the cache to become unavailable for new OAP clusters. In version 6.5.0+ (especially for agent versions), we have Agent hot reboot trigger mechanism. This streamlines the upgrade process as we deploy a new cluster by using the latest version of SkyWalking OAP cluster with the new database cluster, and shift the traffic to the new cluster once and for all. Based on the mechanism, all agents will enter the cool_down mode, and come back online. For more details, see the backend setup documentation.\nNOTE: A known bug in 6.4.0 is that its agent may have re-connection issues; therefore, even though this bot reboot mechanism has been included in 6.4.0, it may not work under some network scenarios, especially in Kubernetes.\nAgent Compatibility All versions of SkyWalking 6.x (and even 7.x) are compatible with each other, so users could simply upgrade the OAP servers. As the agent has also been enhanced in the latest versions, according to the SkyWalking team\u0026rsquo;s recommendation, upgrade the agent as soon as practicable.\n","excerpt":"V6 upgrade SkyWalking v6 is widely used in many production environments. Follow the steps in the …","ref":"/docs/main/next/en/faq/v6-version-upgrade/","title":"V6 upgrade"},{"body":"V6 upgrade SkyWalking v6 is widely used in many production environments. Follow the steps in the guide below to learn how to upgrade to a new release.\nNOTE: The ways to upgrade are not limited to the steps below.\nUse Canary Release Like all applications, you may upgrade SkyWalking using the canary release method through the following steps.\n Deploy a new cluster by using the latest version of SkyWalking OAP cluster with the new database cluster. Once the target service (i.e. the service being monitored) has upgraded the agent.jar (or simply by rebooting), have collector.backend_service pointing to the new OAP backend, and use/add a new namespace(agent.namespace in Table of Agent Configuration Properties). The namespace will prevent conflicts from arising between different versions. When all target services have been rebooted, the old OAP clusters could be discarded.  The Canary Release method works for any version upgrades.\nOnline Hot Reboot Upgrade The reason we require Canary Release is that the SkyWalking agent has cache mechanisms, and switching to a new cluster causes the cache to become unavailable for new OAP clusters. In version 6.5.0+ (especially for agent versions), we have Agent hot reboot trigger mechanism. This streamlines the upgrade process as we deploy a new cluster by using the latest version of SkyWalking OAP cluster with the new database cluster, and shift the traffic to the new cluster once and for all. Based on the mechanism, all agents will enter the cool_down mode, and come back online. For more details, see the backend setup documentation.\nNOTE: A known bug in 6.4.0 is that its agent may have re-connection issues; therefore, even though this bot reboot mechanism has been included in 6.4.0, it may not work under some network scenarios, especially in Kubernetes.\nAgent Compatibility All versions of SkyWalking 6.x (and even 7.x) are compatible with each other, so users could simply upgrade the OAP servers. As the agent has also been enhanced in the latest versions, according to the SkyWalking team\u0026rsquo;s recommendation, upgrade the agent as soon as practicable.\n","excerpt":"V6 upgrade SkyWalking v6 is widely used in many production environments. Follow the steps in the …","ref":"/docs/main/v9.0.0/en/faq/v6-version-upgrade/","title":"V6 upgrade"},{"body":"V6 upgrade SkyWalking v6 is widely used in many production environments. Follow the steps in the guide below to learn how to upgrade to a new release.\nNOTE: The ways to upgrade are not limited to the steps below.\nUse Canary Release Like all applications, you may upgrade SkyWalking using the canary release method through the following steps.\n Deploy a new cluster by using the latest version of SkyWalking OAP cluster with the new database cluster. Once the target service (i.e. the service being monitored) has upgraded the agent.jar (or simply by rebooting), have collector.backend_service pointing to the new OAP backend, and use/add a new namespace(agent.namespace in Table of Agent Configuration Properties). The namespace will prevent conflicts from arising between different versions. When all target services have been rebooted, the old OAP clusters could be discarded.  The Canary Release method works for any version upgrades.\nOnline Hot Reboot Upgrade The reason we require Canary Release is that the SkyWalking agent has cache mechanisms, and switching to a new cluster causes the cache to become unavailable for new OAP clusters. In version 6.5.0+ (especially for agent versions), we have Agent hot reboot trigger mechanism. This streamlines the upgrade process as we deploy a new cluster by using the latest version of SkyWalking OAP cluster with the new database cluster, and shift the traffic to the new cluster once and for all. Based on the mechanism, all agents will enter the cool_down mode, and come back online. For more details, see the backend setup documentation.\nNOTE: A known bug in 6.4.0 is that its agent may have re-connection issues; therefore, even though this bot reboot mechanism has been included in 6.4.0, it may not work under some network scenarios, especially in Kubernetes.\nAgent Compatibility All versions of SkyWalking 6.x (and even 7.x) are compatible with each other, so users could simply upgrade the OAP servers. As the agent has also been enhanced in the latest versions, according to the SkyWalking team\u0026rsquo;s recommendation, upgrade the agent as soon as practicable.\n","excerpt":"V6 upgrade SkyWalking v6 is widely used in many production environments. Follow the steps in the …","ref":"/docs/main/v9.1.0/en/faq/v6-version-upgrade/","title":"V6 upgrade"},{"body":"V6 upgrade SkyWalking v6 is widely used in many production environments. Follow the steps in the guide below to learn how to upgrade to a new release.\nNOTE: The ways to upgrade are not limited to the steps below.\nUse Canary Release Like all applications, you may upgrade SkyWalking using the canary release method through the following steps.\n Deploy a new cluster by using the latest version of SkyWalking OAP cluster with the new database cluster. Once the target service (i.e. the service being monitored) has upgraded the agent.jar (or simply by rebooting), have collector.backend_service pointing to the new OAP backend, and use/add a new namespace(agent.namespace in Table of Agent Configuration Properties). The namespace will prevent conflicts from arising between different versions. When all target services have been rebooted, the old OAP clusters could be discarded.  The Canary Release method works for any version upgrades.\nOnline Hot Reboot Upgrade The reason we require Canary Release is that the SkyWalking agent has cache mechanisms, and switching to a new cluster causes the cache to become unavailable for new OAP clusters. In version 6.5.0+ (especially for agent versions), we have Agent hot reboot trigger mechanism. This streamlines the upgrade process as we deploy a new cluster by using the latest version of SkyWalking OAP cluster with the new database cluster, and shift the traffic to the new cluster once and for all. Based on the mechanism, all agents will enter the cool_down mode, and come back online. For more details, see the backend setup documentation.\nNOTE: A known bug in 6.4.0 is that its agent may have re-connection issues; therefore, even though this bot reboot mechanism has been included in 6.4.0, it may not work under some network scenarios, especially in Kubernetes.\nAgent Compatibility All versions of SkyWalking 6.x (and even 7.x) are compatible with each other, so users could simply upgrade the OAP servers. As the agent has also been enhanced in the latest versions, according to the SkyWalking team\u0026rsquo;s recommendation, upgrade the agent as soon as practicable.\n","excerpt":"V6 upgrade SkyWalking v6 is widely used in many production environments. Follow the steps in the …","ref":"/docs/main/v9.2.0/en/faq/v6-version-upgrade/","title":"V6 upgrade"},{"body":"V6 upgrade SkyWalking v6 is widely used in many production environments. Follow the steps in the guide below to learn how to upgrade to a new release.\nNOTE: The ways to upgrade are not limited to the steps below.\nUse Canary Release Like all applications, you may upgrade SkyWalking using the canary release method through the following steps.\n Deploy a new cluster by using the latest version of SkyWalking OAP cluster with the new database cluster. Once the target service (i.e. the service being monitored) has upgraded the agent.jar (or simply by rebooting), have collector.backend_service pointing to the new OAP backend, and use/add a new namespace(agent.namespace in Table of Agent Configuration Properties). The namespace will prevent conflicts from arising between different versions. When all target services have been rebooted, the old OAP clusters could be discarded.  The Canary Release method works for any version upgrades.\nOnline Hot Reboot Upgrade The reason we require Canary Release is that the SkyWalking agent has cache mechanisms, and switching to a new cluster causes the cache to become unavailable for new OAP clusters. In version 6.5.0+ (especially for agent versions), we have Agent hot reboot trigger mechanism. This streamlines the upgrade process as we deploy a new cluster by using the latest version of SkyWalking OAP cluster with the new database cluster, and shift the traffic to the new cluster once and for all. Based on the mechanism, all agents will enter the cool_down mode, and come back online. For more details, see the backend setup documentation.\nNOTE: A known bug in 6.4.0 is that its agent may have re-connection issues; therefore, even though this bot reboot mechanism has been included in 6.4.0, it may not work under some network scenarios, especially in Kubernetes.\nAgent Compatibility All versions of SkyWalking 6.x (and even 7.x) are compatible with each other, so users could simply upgrade the OAP servers. As the agent has also been enhanced in the latest versions, according to the SkyWalking team\u0026rsquo;s recommendation, upgrade the agent as soon as practicable.\n","excerpt":"V6 upgrade SkyWalking v6 is widely used in many production environments. Follow the steps in the …","ref":"/docs/main/v9.3.0/en/faq/v6-version-upgrade/","title":"V6 upgrade"},{"body":"V6 upgrade SkyWalking v6 is widely used in many production environments. Follow the steps in the guide below to learn how to upgrade to a new release.\nNOTE: The ways to upgrade are not limited to the steps below.\nUse Canary Release Like all applications, you may upgrade SkyWalking using the canary release method through the following steps.\n Deploy a new cluster by using the latest version of SkyWalking OAP cluster with the new database cluster. Once the target service (i.e. the service being monitored) has upgraded the agent.jar (or simply by rebooting), have collector.backend_service pointing to the new OAP backend, and use/add a new namespace(agent.namespace in Table of Agent Configuration Properties). The namespace will prevent conflicts from arising between different versions. When all target services have been rebooted, the old OAP clusters could be discarded.  The Canary Release method works for any version upgrades.\nOnline Hot Reboot Upgrade The reason we require Canary Release is that the SkyWalking agent has cache mechanisms, and switching to a new cluster causes the cache to become unavailable for new OAP clusters. In version 6.5.0+ (especially for agent versions), we have Agent hot reboot trigger mechanism. This streamlines the upgrade process as we deploy a new cluster by using the latest version of SkyWalking OAP cluster with the new database cluster, and shift the traffic to the new cluster once and for all. Based on the mechanism, all agents will enter the cool_down mode, and come back online. For more details, see the backend setup documentation.\nNOTE: A known bug in 6.4.0 is that its agent may have re-connection issues; therefore, even though this bot reboot mechanism has been included in 6.4.0, it may not work under some network scenarios, especially in Kubernetes.\nAgent Compatibility All versions of SkyWalking 6.x (and even 7.x) are compatible with each other, so users could simply upgrade the OAP servers. As the agent has also been enhanced in the latest versions, according to the SkyWalking team\u0026rsquo;s recommendation, upgrade the agent as soon as practicable.\n","excerpt":"V6 upgrade SkyWalking v6 is widely used in many production environments. Follow the steps in the …","ref":"/docs/main/v9.4.0/en/faq/v6-version-upgrade/","title":"V6 upgrade"},{"body":"V6 upgrade SkyWalking v6 is widely used in many production environments. Follow the steps in the guide below to learn how to upgrade to a new release.\nNOTE: The ways to upgrade are not limited to the steps below.\nUse Canary Release Like all applications, you may upgrade SkyWalking using the canary release method through the following steps.\n Deploy a new cluster by using the latest version of SkyWalking OAP cluster with the new database cluster. Once the target service (i.e. the service being monitored) has upgraded the agent.jar (or simply by rebooting), have collector.backend_service pointing to the new OAP backend, and use/add a new namespace(agent.namespace in Table of Agent Configuration Properties). The namespace will prevent conflicts from arising between different versions. When all target services have been rebooted, the old OAP clusters could be discarded.  The Canary Release method works for any version upgrades.\nOnline Hot Reboot Upgrade The reason we require Canary Release is that the SkyWalking agent has cache mechanisms, and switching to a new cluster causes the cache to become unavailable for new OAP clusters. In version 6.5.0+ (especially for agent versions), we have Agent hot reboot trigger mechanism. This streamlines the upgrade process as we deploy a new cluster by using the latest version of SkyWalking OAP cluster with the new database cluster, and shift the traffic to the new cluster once and for all. Based on the mechanism, all agents will enter the cool_down mode, and come back online. For more details, see the backend setup documentation.\nNOTE: A known bug in 6.4.0 is that its agent may have re-connection issues; therefore, even though this bot reboot mechanism has been included in 6.4.0, it may not work under some network scenarios, especially in Kubernetes.\nAgent Compatibility All versions of SkyWalking 6.x (and even 7.x) are compatible with each other, so users could simply upgrade the OAP servers. As the agent has also been enhanced in the latest versions, according to the SkyWalking team\u0026rsquo;s recommendation, upgrade the agent as soon as practicable.\n","excerpt":"V6 upgrade SkyWalking v6 is widely used in many production environments. Follow the steps in the …","ref":"/docs/main/v9.5.0/en/faq/v6-version-upgrade/","title":"V6 upgrade"},{"body":"V6 upgrade SkyWalking v6 is widely used in many production environments. Follow the steps in the guide below to learn how to upgrade to a new release.\nNOTE: The ways to upgrade are not limited to the steps below.\nUse Canary Release Like all applications, you may upgrade SkyWalking using the canary release method through the following steps.\n Deploy a new cluster by using the latest version of SkyWalking OAP cluster with the new database cluster. Once the target service (i.e. the service being monitored) has upgraded the agent.jar (or simply by rebooting), have collector.backend_service pointing to the new OAP backend, and use/add a new namespace(agent.namespace in Table of Agent Configuration Properties). The namespace will prevent conflicts from arising between different versions. When all target services have been rebooted, the old OAP clusters could be discarded.  The Canary Release method works for any version upgrades.\nOnline Hot Reboot Upgrade The reason we require Canary Release is that the SkyWalking agent has cache mechanisms, and switching to a new cluster causes the cache to become unavailable for new OAP clusters. In version 6.5.0+ (especially for agent versions), we have Agent hot reboot trigger mechanism. This streamlines the upgrade process as we deploy a new cluster by using the latest version of SkyWalking OAP cluster with the new database cluster, and shift the traffic to the new cluster once and for all. Based on the mechanism, all agents will enter the cool_down mode, and come back online. For more details, see the backend setup documentation.\nNOTE: A known bug in 6.4.0 is that its agent may have re-connection issues; therefore, even though this bot reboot mechanism has been included in 6.4.0, it may not work under some network scenarios, especially in Kubernetes.\nAgent Compatibility All versions of SkyWalking 6.x (and even 7.x) are compatible with each other, so users could simply upgrade the OAP servers. As the agent has also been enhanced in the latest versions, according to the SkyWalking team\u0026rsquo;s recommendation, upgrade the agent as soon as practicable.\n","excerpt":"V6 upgrade SkyWalking v6 is widely used in many production environments. Follow the steps in the …","ref":"/docs/main/v9.6.0/en/faq/v6-version-upgrade/","title":"V6 upgrade"},{"body":"V6 upgrade SkyWalking v6 is widely used in many production environments. Follow the steps in the guide below to learn how to upgrade to a new release.\nNOTE: The ways to upgrade are not limited to the steps below.\nUse Canary Release Like all applications, you may upgrade SkyWalking using the canary release method through the following steps.\n Deploy a new cluster by using the latest version of SkyWalking OAP cluster with the new database cluster. Once the target service (i.e. the service being monitored) has upgraded the agent.jar (or simply by rebooting), have collector.backend_service pointing to the new OAP backend, and use/add a new namespace(agent.namespace in Table of Agent Configuration Properties). The namespace will prevent conflicts from arising between different versions. When all target services have been rebooted, the old OAP clusters could be discarded.  The Canary Release method works for any version upgrades.\nOnline Hot Reboot Upgrade The reason we require Canary Release is that the SkyWalking agent has cache mechanisms, and switching to a new cluster causes the cache to become unavailable for new OAP clusters. In version 6.5.0+ (especially for agent versions), we have Agent hot reboot trigger mechanism. This streamlines the upgrade process as we deploy a new cluster by using the latest version of SkyWalking OAP cluster with the new database cluster, and shift the traffic to the new cluster once and for all. Based on the mechanism, all agents will enter the cool_down mode, and come back online. For more details, see the backend setup documentation.\nNOTE: A known bug in 6.4.0 is that its agent may have re-connection issues; therefore, even though this bot reboot mechanism has been included in 6.4.0, it may not work under some network scenarios, especially in Kubernetes.\nAgent Compatibility All versions of SkyWalking 6.x (and even 7.x) are compatible with each other, so users could simply upgrade the OAP servers. As the agent has also been enhanced in the latest versions, according to the SkyWalking team\u0026rsquo;s recommendation, upgrade the agent as soon as practicable.\n","excerpt":"V6 upgrade SkyWalking v6 is widely used in many production environments. Follow the steps in the …","ref":"/docs/main/v9.7.0/en/faq/v6-version-upgrade/","title":"V6 upgrade"},{"body":"V8 upgrade Starting from SkyWalking v8, the v3 protocol has been used. This makes it incompatible with previous releases. Users who intend to upgrade in v8 series releases could follow the steps below.\nRegisters in v6 and v7 have been removed in v8 for better scaling out performance. Please upgrade following the instructions below.\n Use a different storage or a new namespace. You may also consider erasing the whole storage indexes or tables related to SkyWalking. Deploy the whole SkyWalking cluster, and expose it in a new network address. If you are using language agents, upgrade the new agents too; meanwhile, make sure the agents are supported in a different language. Then, set up the backend address to the new SkyWalking OAP cluster.  ","excerpt":"V8 upgrade Starting from SkyWalking v8, the v3 protocol has been used. This makes it incompatible …","ref":"/docs/main/latest/en/faq/v8-version-upgrade/","title":"V8 upgrade"},{"body":"V8 upgrade Starting from SkyWalking v8, the v3 protocol has been used. This makes it incompatible with previous releases. Users who intend to upgrade in v8 series releases could follow the steps below.\nRegisters in v6 and v7 have been removed in v8 for better scaling out performance. Please upgrade following the instructions below.\n Use a different storage or a new namespace. You may also consider erasing the whole storage indexes or tables related to SkyWalking. Deploy the whole SkyWalking cluster, and expose it in a new network address. If you are using language agents, upgrade the new agents too; meanwhile, make sure the agents are supported in a different language. Then, set up the backend address to the new SkyWalking OAP cluster.  ","excerpt":"V8 upgrade Starting from SkyWalking v8, the v3 protocol has been used. This makes it incompatible …","ref":"/docs/main/next/en/faq/v8-version-upgrade/","title":"V8 upgrade"},{"body":"V8 upgrade Starting from SkyWalking v8, the v3 protocol has been used. This makes it incompatible with previous releases. Users who intend to upgrade in v8 series releases could follow the steps below.\nRegisters in v6 and v7 have been removed in v8 for better scaling out performance. Please upgrade following the instructions below.\n Use a different storage or a new namespace. You may also consider erasing the whole storage indexes or tables related to SkyWalking. Deploy the whole SkyWalking cluster, and expose it in a new network address. If you are using language agents, upgrade the new agents too; meanwhile, make sure the agents are supported in a different language. Then, set up the backend address to the new SkyWalking OAP cluster.  ","excerpt":"V8 upgrade Starting from SkyWalking v8, the v3 protocol has been used. This makes it incompatible …","ref":"/docs/main/v9.0.0/en/faq/v8-version-upgrade/","title":"V8 upgrade"},{"body":"V8 upgrade Starting from SkyWalking v8, the v3 protocol has been used. This makes it incompatible with previous releases. Users who intend to upgrade in v8 series releases could follow the steps below.\nRegisters in v6 and v7 have been removed in v8 for better scaling out performance. Please upgrade following the instructions below.\n Use a different storage or a new namespace. You may also consider erasing the whole storage indexes or tables related to SkyWalking. Deploy the whole SkyWalking cluster, and expose it in a new network address. If you are using language agents, upgrade the new agents too; meanwhile, make sure the agents are supported in a different language. Then, set up the backend address to the new SkyWalking OAP cluster.  ","excerpt":"V8 upgrade Starting from SkyWalking v8, the v3 protocol has been used. This makes it incompatible …","ref":"/docs/main/v9.1.0/en/faq/v8-version-upgrade/","title":"V8 upgrade"},{"body":"V8 upgrade Starting from SkyWalking v8, the v3 protocol has been used. This makes it incompatible with previous releases. Users who intend to upgrade in v8 series releases could follow the steps below.\nRegisters in v6 and v7 have been removed in v8 for better scaling out performance. Please upgrade following the instructions below.\n Use a different storage or a new namespace. You may also consider erasing the whole storage indexes or tables related to SkyWalking. Deploy the whole SkyWalking cluster, and expose it in a new network address. If you are using language agents, upgrade the new agents too; meanwhile, make sure the agents are supported in a different language. Then, set up the backend address to the new SkyWalking OAP cluster.  ","excerpt":"V8 upgrade Starting from SkyWalking v8, the v3 protocol has been used. This makes it incompatible …","ref":"/docs/main/v9.2.0/en/faq/v8-version-upgrade/","title":"V8 upgrade"},{"body":"V8 upgrade Starting from SkyWalking v8, the v3 protocol has been used. This makes it incompatible with previous releases. Users who intend to upgrade in v8 series releases could follow the steps below.\nRegisters in v6 and v7 have been removed in v8 for better scaling out performance. Please upgrade following the instructions below.\n Use a different storage or a new namespace. You may also consider erasing the whole storage indexes or tables related to SkyWalking. Deploy the whole SkyWalking cluster, and expose it in a new network address. If you are using language agents, upgrade the new agents too; meanwhile, make sure the agents are supported in a different language. Then, set up the backend address to the new SkyWalking OAP cluster.  ","excerpt":"V8 upgrade Starting from SkyWalking v8, the v3 protocol has been used. This makes it incompatible …","ref":"/docs/main/v9.3.0/en/faq/v8-version-upgrade/","title":"V8 upgrade"},{"body":"V8 upgrade Starting from SkyWalking v8, the v3 protocol has been used. This makes it incompatible with previous releases. Users who intend to upgrade in v8 series releases could follow the steps below.\nRegisters in v6 and v7 have been removed in v8 for better scaling out performance. Please upgrade following the instructions below.\n Use a different storage or a new namespace. You may also consider erasing the whole storage indexes or tables related to SkyWalking. Deploy the whole SkyWalking cluster, and expose it in a new network address. If you are using language agents, upgrade the new agents too; meanwhile, make sure the agents are supported in a different language. Then, set up the backend address to the new SkyWalking OAP cluster.  ","excerpt":"V8 upgrade Starting from SkyWalking v8, the v3 protocol has been used. This makes it incompatible …","ref":"/docs/main/v9.4.0/en/faq/v8-version-upgrade/","title":"V8 upgrade"},{"body":"V8 upgrade Starting from SkyWalking v8, the v3 protocol has been used. This makes it incompatible with previous releases. Users who intend to upgrade in v8 series releases could follow the steps below.\nRegisters in v6 and v7 have been removed in v8 for better scaling out performance. Please upgrade following the instructions below.\n Use a different storage or a new namespace. You may also consider erasing the whole storage indexes or tables related to SkyWalking. Deploy the whole SkyWalking cluster, and expose it in a new network address. If you are using language agents, upgrade the new agents too; meanwhile, make sure the agents are supported in a different language. Then, set up the backend address to the new SkyWalking OAP cluster.  ","excerpt":"V8 upgrade Starting from SkyWalking v8, the v3 protocol has been used. This makes it incompatible …","ref":"/docs/main/v9.5.0/en/faq/v8-version-upgrade/","title":"V8 upgrade"},{"body":"V8 upgrade Starting from SkyWalking v8, the v3 protocol has been used. This makes it incompatible with previous releases. Users who intend to upgrade in v8 series releases could follow the steps below.\nRegisters in v6 and v7 have been removed in v8 for better scaling out performance. Please upgrade following the instructions below.\n Use a different storage or a new namespace. You may also consider erasing the whole storage indexes or tables related to SkyWalking. Deploy the whole SkyWalking cluster, and expose it in a new network address. If you are using language agents, upgrade the new agents too; meanwhile, make sure the agents are supported in a different language. Then, set up the backend address to the new SkyWalking OAP cluster.  ","excerpt":"V8 upgrade Starting from SkyWalking v8, the v3 protocol has been used. This makes it incompatible …","ref":"/docs/main/v9.6.0/en/faq/v8-version-upgrade/","title":"V8 upgrade"},{"body":"V8 upgrade Starting from SkyWalking v8, the v3 protocol has been used. This makes it incompatible with previous releases. Users who intend to upgrade in v8 series releases could follow the steps below.\nRegisters in v6 and v7 have been removed in v8 for better scaling out performance. Please upgrade following the instructions below.\n Use a different storage or a new namespace. You may also consider erasing the whole storage indexes or tables related to SkyWalking. Deploy the whole SkyWalking cluster, and expose it in a new network address. If you are using language agents, upgrade the new agents too; meanwhile, make sure the agents are supported in a different language. Then, set up the backend address to the new SkyWalking OAP cluster.  ","excerpt":"V8 upgrade Starting from SkyWalking v8, the v3 protocol has been used. This makes it incompatible …","ref":"/docs/main/v9.7.0/en/faq/v8-version-upgrade/","title":"V8 upgrade"},{"body":"V9 upgrade Starting from v9, SkyWalking introduces the new core concept Layer. A layer represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), Kubernetes(k8s layer). This kind of layer would be catalogs on the new booster UI of various services/instances detected by different technologies. The query-protocol metadata-v2 has been used. The compatibility with previous releases is as below.\nQuery compatibility from previous version  The query-protocol metadata-v1 is provided on the top of the v2 implementation. All metrics are compatible with the previous data format, so you wouldn\u0026rsquo;t lose metrics.  Notice Incompatibility (1), the UI template configuration protocol is incompatible.\nIncompatibility  The UI configuration protocol has been changed by following the design of new booster UI. So, the RocketBot UI can\u0026rsquo;t work with the v9 backend. You need to remove ui_template index/template/table in your chosen storage, and reboot OAP in default or init mode. MAL: metric level function add an required argument Layer. Previous MAL expressions should add this argument. LAL: Extractor add function layer. If don\u0026rsquo;t set it manual, the default layer is GENERAL and the logs from ALS the default layer is mesh. Storage:Add service_id, short_name and layer columns to table ServiceTraffic. These data would be incompatible with previous versions. Make sure to remove the older ServiceTraffic table before OAP(v9) starts. OAP would generate the new table in the start procedure, and recreate all existing services when traffic comes. Since V9.1, SQL Database: move Tags list from Segment, Logs, Alarms to their additional tables, remove them before OAP starts. UI-template: Re-design for V9. Make sure to remove the older ui_template table before OAP(v9) starts.  ","excerpt":"V9 upgrade Starting from v9, SkyWalking introduces the new core concept Layer. A layer represents an …","ref":"/docs/main/latest/en/faq/v9-version-upgrade/","title":"V9 upgrade"},{"body":"V9 upgrade Starting from v9, SkyWalking introduces the new core concept Layer. A layer represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), Kubernetes(k8s layer). This kind of layer would be catalogs on the new booster UI of various services/instances detected by different technologies. The query-protocol metadata-v2 has been used. The compatibility with previous releases is as below.\nQuery compatibility from previous version  The query-protocol metadata-v1 is provided on the top of the v2 implementation. All metrics are compatible with the previous data format, so you wouldn\u0026rsquo;t lose metrics.  Notice Incompatibility (1), the UI template configuration protocol is incompatible.\nIncompatibility  The UI configuration protocol has been changed by following the design of new booster UI. So, the RocketBot UI can\u0026rsquo;t work with the v9 backend. You need to remove ui_template index/template/table in your chosen storage, and reboot OAP in default or init mode. MAL: metric level function add an required argument Layer. Previous MAL expressions should add this argument. LAL: Extractor add function layer. If don\u0026rsquo;t set it manual, the default layer is GENERAL and the logs from ALS the default layer is mesh. Storage:Add service_id, short_name and layer columns to table ServiceTraffic. These data would be incompatible with previous versions. Make sure to remove the older ServiceTraffic table before OAP(v9) starts. OAP would generate the new table in the start procedure, and recreate all existing services when traffic comes. Since V9.1, SQL Database: move Tags list from Segment, Logs, Alarms to their additional tables, remove them before OAP starts. UI-template: Re-design for V9. Make sure to remove the older ui_template table before OAP(v9) starts.  ","excerpt":"V9 upgrade Starting from v9, SkyWalking introduces the new core concept Layer. A layer represents an …","ref":"/docs/main/next/en/faq/v9-version-upgrade/","title":"V9 upgrade"},{"body":"V9 upgrade Starting from v9, SkyWalking introduces the new core concept Layer. A layer represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), Kubernetes(k8s layer). This kind of layer would be catalogs on the new booster UI of various services/instances detected by different technologies. The query-protocol metadata-v2 has been used. The compatibility with previous releases is as below.\nQuery compatibility from previous version  The query-protocol metadata-v1 is provided on the top of the v2 implementation. All metrics are compatible with the previous data format, so you wouldn\u0026rsquo;t lose metrics.  Notice Incompatibility (1), the UI template configuration protocol is incompatible.\nIncompatibility  The UI configuration protocol has been changed by following the design of new booster UI. So, the RocketBot UI can\u0026rsquo;t work with the v9 backend. You need to remove ui_template index/template/table in your chosen storage, and reboot OAP in default or init mode. MAL: metric level function add an required argument Layer. Previous MAL expressions should add this argument. LAL: Extractor add function layer. If don\u0026rsquo;t set it manual, the default layer is GENERAL and the logs from ALS the default layer is mesh. Storage:Add service_id, short_name and layer columns to table ServiceTraffic, add layer column to table InstanceTraffic. These data would be incompatible with previous versions. Make sure to remove the older ServiceTraffic and InstanceTraffic tables before OAP(v9) starts. OAP would generate the new table in the start procedure, and recreate all existing services and instances when traffic comes. UI-template: Re-design for V9. Make sure to remove the older ui_template table before OAP(v9) starts.  ","excerpt":"V9 upgrade Starting from v9, SkyWalking introduces the new core concept Layer. A layer represents an …","ref":"/docs/main/v9.0.0/en/faq/v9-version-upgrade/","title":"V9 upgrade"},{"body":"V9 upgrade Starting from v9, SkyWalking introduces the new core concept Layer. A layer represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), Kubernetes(k8s layer). This kind of layer would be catalogs on the new booster UI of various services/instances detected by different technologies. The query-protocol metadata-v2 has been used. The compatibility with previous releases is as below.\nQuery compatibility from previous version  The query-protocol metadata-v1 is provided on the top of the v2 implementation. All metrics are compatible with the previous data format, so you wouldn\u0026rsquo;t lose metrics.  Notice Incompatibility (1), the UI template configuration protocol is incompatible.\nIncompatibility  The UI configuration protocol has been changed by following the design of new booster UI. So, the RocketBot UI can\u0026rsquo;t work with the v9 backend. You need to remove ui_template index/template/table in your chosen storage, and reboot OAP in default or init mode. MAL: metric level function add an required argument Layer. Previous MAL expressions should add this argument. LAL: Extractor add function layer. If don\u0026rsquo;t set it manual, the default layer is GENERAL and the logs from ALS the default layer is mesh. Storage:Add service_id, short_name and layer columns to table ServiceTraffic. These data would be incompatible with previous versions. Make sure to remove the older ServiceTraffic table before OAP(v9) starts. OAP would generate the new table in the start procedure, and recreate all existing services when traffic comes. Since V9.1, SQL Database: move Tags list from Segment, Logs, Alarms to their additional tables, remove them before OAP starts. UI-template: Re-design for V9. Make sure to remove the older ui_template table before OAP(v9) starts.  ","excerpt":"V9 upgrade Starting from v9, SkyWalking introduces the new core concept Layer. A layer represents an …","ref":"/docs/main/v9.1.0/en/faq/v9-version-upgrade/","title":"V9 upgrade"},{"body":"V9 upgrade Starting from v9, SkyWalking introduces the new core concept Layer. A layer represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), Kubernetes(k8s layer). This kind of layer would be catalogs on the new booster UI of various services/instances detected by different technologies. The query-protocol metadata-v2 has been used. The compatibility with previous releases is as below.\nQuery compatibility from previous version  The query-protocol metadata-v1 is provided on the top of the v2 implementation. All metrics are compatible with the previous data format, so you wouldn\u0026rsquo;t lose metrics.  Notice Incompatibility (1), the UI template configuration protocol is incompatible.\nIncompatibility  The UI configuration protocol has been changed by following the design of new booster UI. So, the RocketBot UI can\u0026rsquo;t work with the v9 backend. You need to remove ui_template index/template/table in your chosen storage, and reboot OAP in default or init mode. MAL: metric level function add an required argument Layer. Previous MAL expressions should add this argument. LAL: Extractor add function layer. If don\u0026rsquo;t set it manual, the default layer is GENERAL and the logs from ALS the default layer is mesh. Storage:Add service_id, short_name and layer columns to table ServiceTraffic. These data would be incompatible with previous versions. Make sure to remove the older ServiceTraffic table before OAP(v9) starts. OAP would generate the new table in the start procedure, and recreate all existing services when traffic comes. Since V9.1, SQL Database: move Tags list from Segment, Logs, Alarms to their additional tables, remove them before OAP starts. UI-template: Re-design for V9. Make sure to remove the older ui_template table before OAP(v9) starts.  ","excerpt":"V9 upgrade Starting from v9, SkyWalking introduces the new core concept Layer. A layer represents an …","ref":"/docs/main/v9.2.0/en/faq/v9-version-upgrade/","title":"V9 upgrade"},{"body":"V9 upgrade Starting from v9, SkyWalking introduces the new core concept Layer. A layer represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), Kubernetes(k8s layer). This kind of layer would be catalogs on the new booster UI of various services/instances detected by different technologies. The query-protocol metadata-v2 has been used. The compatibility with previous releases is as below.\nQuery compatibility from previous version  The query-protocol metadata-v1 is provided on the top of the v2 implementation. All metrics are compatible with the previous data format, so you wouldn\u0026rsquo;t lose metrics.  Notice Incompatibility (1), the UI template configuration protocol is incompatible.\nIncompatibility  The UI configuration protocol has been changed by following the design of new booster UI. So, the RocketBot UI can\u0026rsquo;t work with the v9 backend. You need to remove ui_template index/template/table in your chosen storage, and reboot OAP in default or init mode. MAL: metric level function add an required argument Layer. Previous MAL expressions should add this argument. LAL: Extractor add function layer. If don\u0026rsquo;t set it manual, the default layer is GENERAL and the logs from ALS the default layer is mesh. Storage:Add service_id, short_name and layer columns to table ServiceTraffic. These data would be incompatible with previous versions. Make sure to remove the older ServiceTraffic table before OAP(v9) starts. OAP would generate the new table in the start procedure, and recreate all existing services when traffic comes. Since V9.1, SQL Database: move Tags list from Segment, Logs, Alarms to their additional tables, remove them before OAP starts. UI-template: Re-design for V9. Make sure to remove the older ui_template table before OAP(v9) starts.  ","excerpt":"V9 upgrade Starting from v9, SkyWalking introduces the new core concept Layer. A layer represents an …","ref":"/docs/main/v9.3.0/en/faq/v9-version-upgrade/","title":"V9 upgrade"},{"body":"V9 upgrade Starting from v9, SkyWalking introduces the new core concept Layer. A layer represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), Kubernetes(k8s layer). This kind of layer would be catalogs on the new booster UI of various services/instances detected by different technologies. The query-protocol metadata-v2 has been used. The compatibility with previous releases is as below.\nQuery compatibility from previous version  The query-protocol metadata-v1 is provided on the top of the v2 implementation. All metrics are compatible with the previous data format, so you wouldn\u0026rsquo;t lose metrics.  Notice Incompatibility (1), the UI template configuration protocol is incompatible.\nIncompatibility  The UI configuration protocol has been changed by following the design of new booster UI. So, the RocketBot UI can\u0026rsquo;t work with the v9 backend. You need to remove ui_template index/template/table in your chosen storage, and reboot OAP in default or init mode. MAL: metric level function add an required argument Layer. Previous MAL expressions should add this argument. LAL: Extractor add function layer. If don\u0026rsquo;t set it manual, the default layer is GENERAL and the logs from ALS the default layer is mesh. Storage:Add service_id, short_name and layer columns to table ServiceTraffic. These data would be incompatible with previous versions. Make sure to remove the older ServiceTraffic table before OAP(v9) starts. OAP would generate the new table in the start procedure, and recreate all existing services when traffic comes. Since V9.1, SQL Database: move Tags list from Segment, Logs, Alarms to their additional tables, remove them before OAP starts. UI-template: Re-design for V9. Make sure to remove the older ui_template table before OAP(v9) starts.  ","excerpt":"V9 upgrade Starting from v9, SkyWalking introduces the new core concept Layer. A layer represents an …","ref":"/docs/main/v9.4.0/en/faq/v9-version-upgrade/","title":"V9 upgrade"},{"body":"V9 upgrade Starting from v9, SkyWalking introduces the new core concept Layer. A layer represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), Kubernetes(k8s layer). This kind of layer would be catalogs on the new booster UI of various services/instances detected by different technologies. The query-protocol metadata-v2 has been used. The compatibility with previous releases is as below.\nQuery compatibility from previous version  The query-protocol metadata-v1 is provided on the top of the v2 implementation. All metrics are compatible with the previous data format, so you wouldn\u0026rsquo;t lose metrics.  Notice Incompatibility (1), the UI template configuration protocol is incompatible.\nIncompatibility  The UI configuration protocol has been changed by following the design of new booster UI. So, the RocketBot UI can\u0026rsquo;t work with the v9 backend. You need to remove ui_template index/template/table in your chosen storage, and reboot OAP in default or init mode. MAL: metric level function add an required argument Layer. Previous MAL expressions should add this argument. LAL: Extractor add function layer. If don\u0026rsquo;t set it manual, the default layer is GENERAL and the logs from ALS the default layer is mesh. Storage:Add service_id, short_name and layer columns to table ServiceTraffic. These data would be incompatible with previous versions. Make sure to remove the older ServiceTraffic table before OAP(v9) starts. OAP would generate the new table in the start procedure, and recreate all existing services when traffic comes. Since V9.1, SQL Database: move Tags list from Segment, Logs, Alarms to their additional tables, remove them before OAP starts. UI-template: Re-design for V9. Make sure to remove the older ui_template table before OAP(v9) starts.  ","excerpt":"V9 upgrade Starting from v9, SkyWalking introduces the new core concept Layer. A layer represents an …","ref":"/docs/main/v9.5.0/en/faq/v9-version-upgrade/","title":"V9 upgrade"},{"body":"V9 upgrade Starting from v9, SkyWalking introduces the new core concept Layer. A layer represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), Kubernetes(k8s layer). This kind of layer would be catalogs on the new booster UI of various services/instances detected by different technologies. The query-protocol metadata-v2 has been used. The compatibility with previous releases is as below.\nQuery compatibility from previous version  The query-protocol metadata-v1 is provided on the top of the v2 implementation. All metrics are compatible with the previous data format, so you wouldn\u0026rsquo;t lose metrics.  Notice Incompatibility (1), the UI template configuration protocol is incompatible.\nIncompatibility  The UI configuration protocol has been changed by following the design of new booster UI. So, the RocketBot UI can\u0026rsquo;t work with the v9 backend. You need to remove ui_template index/template/table in your chosen storage, and reboot OAP in default or init mode. MAL: metric level function add an required argument Layer. Previous MAL expressions should add this argument. LAL: Extractor add function layer. If don\u0026rsquo;t set it manual, the default layer is GENERAL and the logs from ALS the default layer is mesh. Storage:Add service_id, short_name and layer columns to table ServiceTraffic. These data would be incompatible with previous versions. Make sure to remove the older ServiceTraffic table before OAP(v9) starts. OAP would generate the new table in the start procedure, and recreate all existing services when traffic comes. Since V9.1, SQL Database: move Tags list from Segment, Logs, Alarms to their additional tables, remove them before OAP starts. UI-template: Re-design for V9. Make sure to remove the older ui_template table before OAP(v9) starts.  ","excerpt":"V9 upgrade Starting from v9, SkyWalking introduces the new core concept Layer. A layer represents an …","ref":"/docs/main/v9.6.0/en/faq/v9-version-upgrade/","title":"V9 upgrade"},{"body":"V9 upgrade Starting from v9, SkyWalking introduces the new core concept Layer. A layer represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), Kubernetes(k8s layer). This kind of layer would be catalogs on the new booster UI of various services/instances detected by different technologies. The query-protocol metadata-v2 has been used. The compatibility with previous releases is as below.\nQuery compatibility from previous version  The query-protocol metadata-v1 is provided on the top of the v2 implementation. All metrics are compatible with the previous data format, so you wouldn\u0026rsquo;t lose metrics.  Notice Incompatibility (1), the UI template configuration protocol is incompatible.\nIncompatibility  The UI configuration protocol has been changed by following the design of new booster UI. So, the RocketBot UI can\u0026rsquo;t work with the v9 backend. You need to remove ui_template index/template/table in your chosen storage, and reboot OAP in default or init mode. MAL: metric level function add an required argument Layer. Previous MAL expressions should add this argument. LAL: Extractor add function layer. If don\u0026rsquo;t set it manual, the default layer is GENERAL and the logs from ALS the default layer is mesh. Storage:Add service_id, short_name and layer columns to table ServiceTraffic. These data would be incompatible with previous versions. Make sure to remove the older ServiceTraffic table before OAP(v9) starts. OAP would generate the new table in the start procedure, and recreate all existing services when traffic comes. Since V9.1, SQL Database: move Tags list from Segment, Logs, Alarms to their additional tables, remove them before OAP starts. UI-template: Re-design for V9. Make sure to remove the older ui_template table before OAP(v9) starts.  ","excerpt":"V9 upgrade Starting from v9, SkyWalking introduces the new core concept Layer. A layer represents an …","ref":"/docs/main/v9.7.0/en/faq/v9-version-upgrade/","title":"V9 upgrade"},{"body":"Version 3.x -\u0026gt; 5.0.0-alpha Upgrade FAQs Collector Problem There is no information showing in the UI.\nCause In the upgrade from version 3.2.6 to 5.0.0, the existing Elasticsearch indexes are kept, but aren\u0026rsquo;t compatible with 5.0.0-alpha. When service name is registered, ElasticSearch will create this column by default type string, which will lead to an error.\nSolution Clean the data folder in ElasticSearch and restart ElasticSearch, collector and your application under monitoring.\n","excerpt":"Version 3.x -\u0026gt; 5.0.0-alpha Upgrade FAQs Collector Problem There is no information showing in the …","ref":"/docs/main/latest/en/faq/v3-version-upgrade/","title":"Version 3.x -\u003e 5.0.0-alpha Upgrade FAQs"},{"body":"Version 3.x -\u0026gt; 5.0.0-alpha Upgrade FAQs Collector Problem There is no information showing in the UI.\nCause In the upgrade from version 3.2.6 to 5.0.0, the existing Elasticsearch indexes are kept, but aren\u0026rsquo;t compatible with 5.0.0-alpha. When service name is registered, ElasticSearch will create this column by default type string, which will lead to an error.\nSolution Clean the data folder in ElasticSearch and restart ElasticSearch, collector and your application under monitoring.\n","excerpt":"Version 3.x -\u0026gt; 5.0.0-alpha Upgrade FAQs Collector Problem There is no information showing in the …","ref":"/docs/main/next/en/faq/v3-version-upgrade/","title":"Version 3.x -\u003e 5.0.0-alpha Upgrade FAQs"},{"body":"Version 3.x -\u0026gt; 5.0.0-alpha Upgrade FAQs Collector Problem There is no information showing in the UI.\nCause In the upgrade from version 3.2.6 to 5.0.0, the existing Elasticsearch indexes are kept, but aren\u0026rsquo;t compatible with 5.0.0-alpha. When service name is registered, ElasticSearch will create this column by default type string, which will lead to an error.\nSolution Clean the data folder in ElasticSearch and restart ElasticSearch, collector and your application under monitoring.\n","excerpt":"Version 3.x -\u0026gt; 5.0.0-alpha Upgrade FAQs Collector Problem There is no information showing in the …","ref":"/docs/main/v9.0.0/en/faq/v3-version-upgrade/","title":"Version 3.x -\u003e 5.0.0-alpha Upgrade FAQs"},{"body":"Version 3.x -\u0026gt; 5.0.0-alpha Upgrade FAQs Collector Problem There is no information showing in the UI.\nCause In the upgrade from version 3.2.6 to 5.0.0, the existing Elasticsearch indexes are kept, but aren\u0026rsquo;t compatible with 5.0.0-alpha. When service name is registered, ElasticSearch will create this column by default type string, which will lead to an error.\nSolution Clean the data folder in ElasticSearch and restart ElasticSearch, collector and your application under monitoring.\n","excerpt":"Version 3.x -\u0026gt; 5.0.0-alpha Upgrade FAQs Collector Problem There is no information showing in the …","ref":"/docs/main/v9.1.0/en/faq/v3-version-upgrade/","title":"Version 3.x -\u003e 5.0.0-alpha Upgrade FAQs"},{"body":"Version 3.x -\u0026gt; 5.0.0-alpha Upgrade FAQs Collector Problem There is no information showing in the UI.\nCause In the upgrade from version 3.2.6 to 5.0.0, the existing Elasticsearch indexes are kept, but aren\u0026rsquo;t compatible with 5.0.0-alpha. When service name is registered, ElasticSearch will create this column by default type string, which will lead to an error.\nSolution Clean the data folder in ElasticSearch and restart ElasticSearch, collector and your application under monitoring.\n","excerpt":"Version 3.x -\u0026gt; 5.0.0-alpha Upgrade FAQs Collector Problem There is no information showing in the …","ref":"/docs/main/v9.2.0/en/faq/v3-version-upgrade/","title":"Version 3.x -\u003e 5.0.0-alpha Upgrade FAQs"},{"body":"Version 3.x -\u0026gt; 5.0.0-alpha Upgrade FAQs Collector Problem There is no information showing in the UI.\nCause In the upgrade from version 3.2.6 to 5.0.0, the existing Elasticsearch indexes are kept, but aren\u0026rsquo;t compatible with 5.0.0-alpha. When service name is registered, ElasticSearch will create this column by default type string, which will lead to an error.\nSolution Clean the data folder in ElasticSearch and restart ElasticSearch, collector and your application under monitoring.\n","excerpt":"Version 3.x -\u0026gt; 5.0.0-alpha Upgrade FAQs Collector Problem There is no information showing in the …","ref":"/docs/main/v9.3.0/en/faq/v3-version-upgrade/","title":"Version 3.x -\u003e 5.0.0-alpha Upgrade FAQs"},{"body":"Version 3.x -\u0026gt; 5.0.0-alpha Upgrade FAQs Collector Problem There is no information showing in the UI.\nCause In the upgrade from version 3.2.6 to 5.0.0, the existing Elasticsearch indexes are kept, but aren\u0026rsquo;t compatible with 5.0.0-alpha. When service name is registered, ElasticSearch will create this column by default type string, which will lead to an error.\nSolution Clean the data folder in ElasticSearch and restart ElasticSearch, collector and your application under monitoring.\n","excerpt":"Version 3.x -\u0026gt; 5.0.0-alpha Upgrade FAQs Collector Problem There is no information showing in the …","ref":"/docs/main/v9.4.0/en/faq/v3-version-upgrade/","title":"Version 3.x -\u003e 5.0.0-alpha Upgrade FAQs"},{"body":"Version 3.x -\u0026gt; 5.0.0-alpha Upgrade FAQs Collector Problem There is no information showing in the UI.\nCause In the upgrade from version 3.2.6 to 5.0.0, the existing Elasticsearch indexes are kept, but aren\u0026rsquo;t compatible with 5.0.0-alpha. When service name is registered, ElasticSearch will create this column by default type string, which will lead to an error.\nSolution Clean the data folder in ElasticSearch and restart ElasticSearch, collector and your application under monitoring.\n","excerpt":"Version 3.x -\u0026gt; 5.0.0-alpha Upgrade FAQs Collector Problem There is no information showing in the …","ref":"/docs/main/v9.5.0/en/faq/v3-version-upgrade/","title":"Version 3.x -\u003e 5.0.0-alpha Upgrade FAQs"},{"body":"Version 3.x -\u0026gt; 5.0.0-alpha Upgrade FAQs Collector Problem There is no information showing in the UI.\nCause In the upgrade from version 3.2.6 to 5.0.0, the existing Elasticsearch indexes are kept, but aren\u0026rsquo;t compatible with 5.0.0-alpha. When service name is registered, ElasticSearch will create this column by default type string, which will lead to an error.\nSolution Clean the data folder in ElasticSearch and restart ElasticSearch, collector and your application under monitoring.\n","excerpt":"Version 3.x -\u0026gt; 5.0.0-alpha Upgrade FAQs Collector Problem There is no information showing in the …","ref":"/docs/main/v9.6.0/en/faq/v3-version-upgrade/","title":"Version 3.x -\u003e 5.0.0-alpha Upgrade FAQs"},{"body":"Version 3.x -\u0026gt; 5.0.0-alpha Upgrade FAQs Collector Problem There is no information showing in the UI.\nCause In the upgrade from version 3.2.6 to 5.0.0, the existing Elasticsearch indexes are kept, but aren\u0026rsquo;t compatible with 5.0.0-alpha. When service name is registered, ElasticSearch will create this column by default type string, which will lead to an error.\nSolution Clean the data folder in ElasticSearch and restart ElasticSearch, collector and your application under monitoring.\n","excerpt":"Version 3.x -\u0026gt; 5.0.0-alpha Upgrade FAQs Collector Problem There is no information showing in the …","ref":"/docs/main/v9.7.0/en/faq/v3-version-upgrade/","title":"Version 3.x -\u003e 5.0.0-alpha Upgrade FAQs"},{"body":"Virtual Cache Virtual cache represent the cache nodes detected by server agents' plugins. The performance metrics of the cache are also from the Cache client-side perspective.\nFor example, Redis plugins in the Java agent could detect the latency of command As a result, SkyWalking would show traffic, latency, success rate, and sampled slow operations(write/read) powered by backend analysis capabilities in this dashboard.\nThe cache operation span should have\n It is an Exit or Local span Span\u0026rsquo;s layer == CACHE Tag key = cache.type, value = The type of cache system , e.g. redis Tag key = cache.op, value = read or write , indicates the value of tag cache.cmd is used for write or read operation Tag key = cache.cmd, value = the cache command , e.g. get,set,del Tag key = cache.key, value = the cache key If the cache system is in-memory (e.g. Guava-cache), agents' plugin would create a local span usually, and the span\u0026rsquo;s peer would be null ,otherwise the peer is the network address(IP or domain) of Cache server.  Ref slow cache doc to know more slow Cache commands settings.\n","excerpt":"Virtual Cache Virtual cache represent the cache nodes detected by server agents' plugins. The …","ref":"/docs/main/latest/en/setup/service-agent/virtual-cache/","title":"Virtual Cache"},{"body":"Virtual Cache Virtual cache represent the cache nodes detected by server agents' plugins. The performance metrics of the cache are also from the Cache client-side perspective.\nFor example, Redis plugins in the Java agent could detect the latency of command As a result, SkyWalking would show traffic, latency, success rate, and sampled slow operations(write/read) powered by backend analysis capabilities in this dashboard.\nThe cache operation span should have\n It is an Exit or Local span Span\u0026rsquo;s layer == CACHE Tag key = cache.type, value = The type of cache system , e.g. redis Tag key = cache.op, value = read or write , indicates the value of tag cache.cmd is used for write or read operation Tag key = cache.cmd, value = the cache command , e.g. get,set,del Tag key = cache.key, value = the cache key If the cache system is in-memory (e.g. Guava-cache), agents' plugin would create a local span usually, and the span\u0026rsquo;s peer would be null ,otherwise the peer is the network address(IP or domain) of Cache server.  Ref slow cache doc to know more slow Cache commands settings.\n","excerpt":"Virtual Cache Virtual cache represent the cache nodes detected by server agents' plugins. The …","ref":"/docs/main/next/en/setup/service-agent/virtual-cache/","title":"Virtual Cache"},{"body":"Virtual Cache Virtual cache represent the cache nodes detected by server agents' plugins. The performance metrics of the cache are also from the Cache client-side perspective.\nFor example, Redis plugins in the Java agent could detect the latency of command As a result, SkyWalking would show traffic, latency, success rate, and sampled slow operations(write/read) powered by backend analysis capabilities in this dashboard.\nThe cache operation span should have\n It is an Exit or Local span Span\u0026rsquo;s layer == CACHE Tag key = cache.type, value = The type of cache system , e.g. redis Tag key = cache.op, value = read or write , indicates the value of tag cache.cmd is used for write or read operation Tag key = cache.cmd, value = the cache command , e.g. get,set,del Tag key = cache.key, value = the cache key If the cache system is in-memory (e.g. Guava-cache), agents' plugin would create a local span usually, and the span\u0026rsquo;s peer would be null ,otherwise the peer is the network address(IP or domain) of Cache server.  Ref slow cache doc to know more slow Cache commands settings.\n","excerpt":"Virtual Cache Virtual cache represent the cache nodes detected by server agents' plugins. The …","ref":"/docs/main/v9.3.0/en/setup/service-agent/virtual-cache/","title":"Virtual Cache"},{"body":"Virtual Cache Virtual cache represent the cache nodes detected by server agents' plugins. The performance metrics of the cache are also from the Cache client-side perspective.\nFor example, Redis plugins in the Java agent could detect the latency of command As a result, SkyWalking would show traffic, latency, success rate, and sampled slow operations(write/read) powered by backend analysis capabilities in this dashboard.\nThe cache operation span should have\n It is an Exit or Local span Span\u0026rsquo;s layer == CACHE Tag key = cache.type, value = The type of cache system , e.g. redis Tag key = cache.op, value = read or write , indicates the value of tag cache.cmd is used for write or read operation Tag key = cache.cmd, value = the cache command , e.g. get,set,del Tag key = cache.key, value = the cache key If the cache system is in-memory (e.g. Guava-cache), agents' plugin would create a local span usually, and the span\u0026rsquo;s peer would be null ,otherwise the peer is the network address(IP or domain) of Cache server.  Ref slow cache doc to know more slow Cache commands settings.\n","excerpt":"Virtual Cache Virtual cache represent the cache nodes detected by server agents' plugins. The …","ref":"/docs/main/v9.4.0/en/setup/service-agent/virtual-cache/","title":"Virtual Cache"},{"body":"Virtual Cache Virtual cache represent the cache nodes detected by server agents' plugins. The performance metrics of the cache are also from the Cache client-side perspective.\nFor example, Redis plugins in the Java agent could detect the latency of command As a result, SkyWalking would show traffic, latency, success rate, and sampled slow operations(write/read) powered by backend analysis capabilities in this dashboard.\nThe cache operation span should have\n It is an Exit or Local span Span\u0026rsquo;s layer == CACHE Tag key = cache.type, value = The type of cache system , e.g. redis Tag key = cache.op, value = read or write , indicates the value of tag cache.cmd is used for write or read operation Tag key = cache.cmd, value = the cache command , e.g. get,set,del Tag key = cache.key, value = the cache key If the cache system is in-memory (e.g. Guava-cache), agents' plugin would create a local span usually, and the span\u0026rsquo;s peer would be null ,otherwise the peer is the network address(IP or domain) of Cache server.  Ref slow cache doc to know more slow Cache commands settings.\n","excerpt":"Virtual Cache Virtual cache represent the cache nodes detected by server agents' plugins. The …","ref":"/docs/main/v9.5.0/en/setup/service-agent/virtual-cache/","title":"Virtual Cache"},{"body":"Virtual Cache Virtual cache represent the cache nodes detected by server agents' plugins. The performance metrics of the cache are also from the Cache client-side perspective.\nFor example, Redis plugins in the Java agent could detect the latency of command As a result, SkyWalking would show traffic, latency, success rate, and sampled slow operations(write/read) powered by backend analysis capabilities in this dashboard.\nThe cache operation span should have\n It is an Exit or Local span Span\u0026rsquo;s layer == CACHE Tag key = cache.type, value = The type of cache system , e.g. redis Tag key = cache.op, value = read or write , indicates the value of tag cache.cmd is used for write or read operation Tag key = cache.cmd, value = the cache command , e.g. get,set,del Tag key = cache.key, value = the cache key If the cache system is in-memory (e.g. Guava-cache), agents' plugin would create a local span usually, and the span\u0026rsquo;s peer would be null ,otherwise the peer is the network address(IP or domain) of Cache server.  Ref slow cache doc to know more slow Cache commands settings.\n","excerpt":"Virtual Cache Virtual cache represent the cache nodes detected by server agents' plugins. The …","ref":"/docs/main/v9.6.0/en/setup/service-agent/virtual-cache/","title":"Virtual Cache"},{"body":"Virtual Cache Virtual cache represent the cache nodes detected by server agents' plugins. The performance metrics of the cache are also from the Cache client-side perspective.\nFor example, Redis plugins in the Java agent could detect the latency of command As a result, SkyWalking would show traffic, latency, success rate, and sampled slow operations(write/read) powered by backend analysis capabilities in this dashboard.\nThe cache operation span should have\n It is an Exit or Local span Span\u0026rsquo;s layer == CACHE Tag key = cache.type, value = The type of cache system , e.g. redis Tag key = cache.op, value = read or write , indicates the value of tag cache.cmd is used for write or read operation Tag key = cache.cmd, value = the cache command , e.g. get,set,del Tag key = cache.key, value = the cache key If the cache system is in-memory (e.g. Guava-cache), agents' plugin would create a local span usually, and the span\u0026rsquo;s peer would be null ,otherwise the peer is the network address(IP or domain) of Cache server.  Ref slow cache doc to know more slow Cache commands settings.\n","excerpt":"Virtual Cache Virtual cache represent the cache nodes detected by server agents' plugins. The …","ref":"/docs/main/v9.7.0/en/setup/service-agent/virtual-cache/","title":"Virtual Cache"},{"body":"Virtual Database Virtual databases represent the database nodes detected by server agents' plugins. The performance metrics of the databases are also from the Database client-side perspective.\nFor example, JDBC plugins(MySQL, PostgreSQL, MariaDB, MSSQL) in the Java agent could detect the latency of SQL performance and SQL statements. As a result, SkyWalking would show database traffic, latency, success rate, and sampled slow SQLs powered by backend analysis capabilities in this dashboard.\nThe Database access span should have\n It is an Exit span Span\u0026rsquo;s layer == DATABASE Tag key = db.statement, value = SQL statement Tag key = db.type, value = the type of Database Span\u0026rsquo;s peer is the network address(IP or domain) of Database server.  Ref slow cache doc to know more slow SQL settings.\n","excerpt":"Virtual Database Virtual databases represent the database nodes detected by server agents' plugins. …","ref":"/docs/main/latest/en/setup/service-agent/virtual-database/","title":"Virtual Database"},{"body":"Virtual Database Virtual databases represent the database nodes detected by server agents' plugins. The performance metrics of the databases are also from the Database client-side perspective.\nFor example, JDBC plugins(MySQL, PostgreSQL, MariaDB, MSSQL) in the Java agent could detect the latency of SQL performance and SQL statements. As a result, SkyWalking would show database traffic, latency, success rate, and sampled slow SQLs powered by backend analysis capabilities in this dashboard.\nThe Database access span should have\n It is an Exit span Span\u0026rsquo;s layer == DATABASE Tag key = db.statement, value = SQL statement Tag key = db.type, value = the type of Database Span\u0026rsquo;s peer is the network address(IP or domain) of Database server.  Ref slow cache doc to know more slow SQL settings.\n","excerpt":"Virtual Database Virtual databases represent the database nodes detected by server agents' plugins. …","ref":"/docs/main/next/en/setup/service-agent/virtual-database/","title":"Virtual Database"},{"body":"Virtual Database Virtual databases represents the database nodes detected by server agents' plugins. The performance metrics of the databases are also from Database client side perspective.\nFor example, JDBC plugins(MySQL, PostgreSQL, Mariadb, MSSQL) in the Java agent could detect the latency of SQL performance, as well as SQL statements. As a result, in this dashboard, SkyWalking would show database traffic, latency, success rate and sampled slow SQLs powered by backend analysis capabilities.\nThe Database access span should have\n It is an Exit span Span\u0026rsquo;s layer == DATABASE Tag key = db.statement, value = SQL statement Tag key = db.type, value = the type of Database Span\u0026rsquo;s peer is the network address(IP or domain) of Database server.  ","excerpt":"Virtual Database Virtual databases represents the database nodes detected by server agents' plugins. …","ref":"/docs/main/v9.0.0/en/setup/service-agent/virtual-database/","title":"Virtual Database"},{"body":"Virtual Database Virtual databases represent the database nodes detected by server agents' plugins. The performance metrics of the databases are also from the Database client-side perspective.\nFor example, JDBC plugins(MySQL, PostgreSQL, MariaDB, MSSQL) in the Java agent could detect the latency of SQL performance and SQL statements. As a result, SkyWalking would show database traffic, latency, success rate, and sampled slow SQLs powered by backend analysis capabilities in this dashboard.\nThe Database access span should have\n It is an Exit span Span\u0026rsquo;s layer == DATABASE Tag key = db.statement, value = SQL statement Tag key = db.type, value = the type of Database Span\u0026rsquo;s peer is the network address(IP or domain) of Database server.  ","excerpt":"Virtual Database Virtual databases represent the database nodes detected by server agents' plugins. …","ref":"/docs/main/v9.1.0/en/setup/service-agent/virtual-database/","title":"Virtual Database"},{"body":"Virtual Database Virtual databases represent the database nodes detected by server agents' plugins. The performance metrics of the databases are also from the Database client-side perspective.\nFor example, JDBC plugins(MySQL, PostgreSQL, MariaDB, MSSQL) in the Java agent could detect the latency of SQL performance and SQL statements. As a result, SkyWalking would show database traffic, latency, success rate, and sampled slow SQLs powered by backend analysis capabilities in this dashboard.\nThe Database access span should have\n It is an Exit span Span\u0026rsquo;s layer == DATABASE Tag key = db.statement, value = SQL statement Tag key = db.type, value = the type of Database Span\u0026rsquo;s peer is the network address(IP or domain) of Database server.  ","excerpt":"Virtual Database Virtual databases represent the database nodes detected by server agents' plugins. …","ref":"/docs/main/v9.2.0/en/setup/service-agent/virtual-database/","title":"Virtual Database"},{"body":"Virtual Database Virtual databases represent the database nodes detected by server agents' plugins. The performance metrics of the databases are also from the Database client-side perspective.\nFor example, JDBC plugins(MySQL, PostgreSQL, MariaDB, MSSQL) in the Java agent could detect the latency of SQL performance and SQL statements. As a result, SkyWalking would show database traffic, latency, success rate, and sampled slow SQLs powered by backend analysis capabilities in this dashboard.\nThe Database access span should have\n It is an Exit span Span\u0026rsquo;s layer == DATABASE Tag key = db.statement, value = SQL statement Tag key = db.type, value = the type of Database Span\u0026rsquo;s peer is the network address(IP or domain) of Database server.  Ref slow cache doc to know more slow SQL settings.\n","excerpt":"Virtual Database Virtual databases represent the database nodes detected by server agents' plugins. …","ref":"/docs/main/v9.3.0/en/setup/service-agent/virtual-database/","title":"Virtual Database"},{"body":"Virtual Database Virtual databases represent the database nodes detected by server agents' plugins. The performance metrics of the databases are also from the Database client-side perspective.\nFor example, JDBC plugins(MySQL, PostgreSQL, MariaDB, MSSQL) in the Java agent could detect the latency of SQL performance and SQL statements. As a result, SkyWalking would show database traffic, latency, success rate, and sampled slow SQLs powered by backend analysis capabilities in this dashboard.\nThe Database access span should have\n It is an Exit span Span\u0026rsquo;s layer == DATABASE Tag key = db.statement, value = SQL statement Tag key = db.type, value = the type of Database Span\u0026rsquo;s peer is the network address(IP or domain) of Database server.  Ref slow cache doc to know more slow SQL settings.\n","excerpt":"Virtual Database Virtual databases represent the database nodes detected by server agents' plugins. …","ref":"/docs/main/v9.4.0/en/setup/service-agent/virtual-database/","title":"Virtual Database"},{"body":"Virtual Database Virtual databases represent the database nodes detected by server agents' plugins. The performance metrics of the databases are also from the Database client-side perspective.\nFor example, JDBC plugins(MySQL, PostgreSQL, MariaDB, MSSQL) in the Java agent could detect the latency of SQL performance and SQL statements. As a result, SkyWalking would show database traffic, latency, success rate, and sampled slow SQLs powered by backend analysis capabilities in this dashboard.\nThe Database access span should have\n It is an Exit span Span\u0026rsquo;s layer == DATABASE Tag key = db.statement, value = SQL statement Tag key = db.type, value = the type of Database Span\u0026rsquo;s peer is the network address(IP or domain) of Database server.  Ref slow cache doc to know more slow SQL settings.\n","excerpt":"Virtual Database Virtual databases represent the database nodes detected by server agents' plugins. …","ref":"/docs/main/v9.5.0/en/setup/service-agent/virtual-database/","title":"Virtual Database"},{"body":"Virtual Database Virtual databases represent the database nodes detected by server agents' plugins. The performance metrics of the databases are also from the Database client-side perspective.\nFor example, JDBC plugins(MySQL, PostgreSQL, MariaDB, MSSQL) in the Java agent could detect the latency of SQL performance and SQL statements. As a result, SkyWalking would show database traffic, latency, success rate, and sampled slow SQLs powered by backend analysis capabilities in this dashboard.\nThe Database access span should have\n It is an Exit span Span\u0026rsquo;s layer == DATABASE Tag key = db.statement, value = SQL statement Tag key = db.type, value = the type of Database Span\u0026rsquo;s peer is the network address(IP or domain) of Database server.  Ref slow cache doc to know more slow SQL settings.\n","excerpt":"Virtual Database Virtual databases represent the database nodes detected by server agents' plugins. …","ref":"/docs/main/v9.6.0/en/setup/service-agent/virtual-database/","title":"Virtual Database"},{"body":"Virtual Database Virtual databases represent the database nodes detected by server agents' plugins. The performance metrics of the databases are also from the Database client-side perspective.\nFor example, JDBC plugins(MySQL, PostgreSQL, MariaDB, MSSQL) in the Java agent could detect the latency of SQL performance and SQL statements. As a result, SkyWalking would show database traffic, latency, success rate, and sampled slow SQLs powered by backend analysis capabilities in this dashboard.\nThe Database access span should have\n It is an Exit span Span\u0026rsquo;s layer == DATABASE Tag key = db.statement, value = SQL statement Tag key = db.type, value = the type of Database Span\u0026rsquo;s peer is the network address(IP or domain) of Database server.  Ref slow cache doc to know more slow SQL settings.\n","excerpt":"Virtual Database Virtual databases represent the database nodes detected by server agents' plugins. …","ref":"/docs/main/v9.7.0/en/setup/service-agent/virtual-database/","title":"Virtual Database"},{"body":"Virtual Message Queue (MQ) Virtual MQ represent the MQ nodes detected by server agents' plugins. The performance metrics of the MQ are also from the MQ client-side perspective.\nFor example, Kafka plugins in the Java agent could detect the transmission latency of message As a result, SkyWalking would show message count, transmission latency, success rate powered by backend analysis capabilities in this dashboard.\nThe MQ operation span should have\n It is an Exit(at producer side) or Entry(at consumer side) span Span\u0026rsquo;s layer == MQ Tag key = mq.queue, value = MQ queue name Tag key = mq.topic, value = MQ queue topic , it\u0026rsquo;s optional as some MQ don\u0026rsquo;t have topic concept. Tag key = transmission.latency, value = Transmission latency from consumer to producer Set peer at both sides(producer and consumer). And the value of peer should represent the MQ server cluster.  ","excerpt":"Virtual Message Queue (MQ) Virtual MQ represent the MQ nodes detected by server agents' plugins. The …","ref":"/docs/main/latest/en/setup/service-agent/virtual-mq/","title":"Virtual Message Queue (MQ)"},{"body":"Virtual Message Queue (MQ) Virtual MQ represent the MQ nodes detected by server agents' plugins. The performance metrics of the MQ are also from the MQ client-side perspective.\nFor example, Kafka plugins in the Java agent could detect the transmission latency of message As a result, SkyWalking would show message count, transmission latency, success rate powered by backend analysis capabilities in this dashboard.\nThe MQ operation span should have\n It is an Exit(at producer side) or Entry(at consumer side) span Span\u0026rsquo;s layer == MQ Tag key = mq.queue, value = MQ queue name Tag key = mq.topic, value = MQ queue topic , it\u0026rsquo;s optional as some MQ don\u0026rsquo;t have topic concept. Tag key = transmission.latency, value = Transmission latency from consumer to producer Set peer at both sides(producer and consumer). And the value of peer should represent the MQ server cluster.  ","excerpt":"Virtual Message Queue (MQ) Virtual MQ represent the MQ nodes detected by server agents' plugins. The …","ref":"/docs/main/next/en/setup/service-agent/virtual-mq/","title":"Virtual Message Queue (MQ)"},{"body":"Virtual Message Queue (MQ) Virtual MQ represent the MQ nodes detected by server agents' plugins. The performance metrics of the MQ are also from the MQ client-side perspective.\nFor example, Kafka plugins in the Java agent could detect the transmission latency of message As a result, SkyWalking would show message count, transmission latency, success rate powered by backend analysis capabilities in this dashboard.\nThe MQ operation span should have\n It is an Exit(at producer side) or Entry(at consumer side) span Span\u0026rsquo;s layer == MQ Tag key = mq.queue, value = MQ queue name Tag key = mq.topic, value = MQ queue topic , it\u0026rsquo;s optional as some MQ don\u0026rsquo;t have topic concept. Tag key = transmission.latency, value = Transmission latency from consumer to producer Set peer at both sides(producer and consumer). And the value of peer should represent the MQ server cluster.  ","excerpt":"Virtual Message Queue (MQ) Virtual MQ represent the MQ nodes detected by server agents' plugins. The …","ref":"/docs/main/v9.3.0/en/setup/service-agent/virtual-mq/","title":"Virtual Message Queue (MQ)"},{"body":"Virtual Message Queue (MQ) Virtual MQ represent the MQ nodes detected by server agents' plugins. The performance metrics of the MQ are also from the MQ client-side perspective.\nFor example, Kafka plugins in the Java agent could detect the transmission latency of message As a result, SkyWalking would show message count, transmission latency, success rate powered by backend analysis capabilities in this dashboard.\nThe MQ operation span should have\n It is an Exit(at producer side) or Entry(at consumer side) span Span\u0026rsquo;s layer == MQ Tag key = mq.queue, value = MQ queue name Tag key = mq.topic, value = MQ queue topic , it\u0026rsquo;s optional as some MQ don\u0026rsquo;t have topic concept. Tag key = transmission.latency, value = Transmission latency from consumer to producer Set peer at both sides(producer and consumer). And the value of peer should represent the MQ server cluster.  ","excerpt":"Virtual Message Queue (MQ) Virtual MQ represent the MQ nodes detected by server agents' plugins. The …","ref":"/docs/main/v9.4.0/en/setup/service-agent/virtual-mq/","title":"Virtual Message Queue (MQ)"},{"body":"Virtual Message Queue (MQ) Virtual MQ represent the MQ nodes detected by server agents' plugins. The performance metrics of the MQ are also from the MQ client-side perspective.\nFor example, Kafka plugins in the Java agent could detect the transmission latency of message As a result, SkyWalking would show message count, transmission latency, success rate powered by backend analysis capabilities in this dashboard.\nThe MQ operation span should have\n It is an Exit(at producer side) or Entry(at consumer side) span Span\u0026rsquo;s layer == MQ Tag key = mq.queue, value = MQ queue name Tag key = mq.topic, value = MQ queue topic , it\u0026rsquo;s optional as some MQ don\u0026rsquo;t have topic concept. Tag key = transmission.latency, value = Transmission latency from consumer to producer Set peer at both sides(producer and consumer). And the value of peer should represent the MQ server cluster.  ","excerpt":"Virtual Message Queue (MQ) Virtual MQ represent the MQ nodes detected by server agents' plugins. The …","ref":"/docs/main/v9.5.0/en/setup/service-agent/virtual-mq/","title":"Virtual Message Queue (MQ)"},{"body":"Virtual Message Queue (MQ) Virtual MQ represent the MQ nodes detected by server agents' plugins. The performance metrics of the MQ are also from the MQ client-side perspective.\nFor example, Kafka plugins in the Java agent could detect the transmission latency of message As a result, SkyWalking would show message count, transmission latency, success rate powered by backend analysis capabilities in this dashboard.\nThe MQ operation span should have\n It is an Exit(at producer side) or Entry(at consumer side) span Span\u0026rsquo;s layer == MQ Tag key = mq.queue, value = MQ queue name Tag key = mq.topic, value = MQ queue topic , it\u0026rsquo;s optional as some MQ don\u0026rsquo;t have topic concept. Tag key = transmission.latency, value = Transmission latency from consumer to producer Set peer at both sides(producer and consumer). And the value of peer should represent the MQ server cluster.  ","excerpt":"Virtual Message Queue (MQ) Virtual MQ represent the MQ nodes detected by server agents' plugins. The …","ref":"/docs/main/v9.6.0/en/setup/service-agent/virtual-mq/","title":"Virtual Message Queue (MQ)"},{"body":"Virtual Message Queue (MQ) Virtual MQ represent the MQ nodes detected by server agents' plugins. The performance metrics of the MQ are also from the MQ client-side perspective.\nFor example, Kafka plugins in the Java agent could detect the transmission latency of message As a result, SkyWalking would show message count, transmission latency, success rate powered by backend analysis capabilities in this dashboard.\nThe MQ operation span should have\n It is an Exit(at producer side) or Entry(at consumer side) span Span\u0026rsquo;s layer == MQ Tag key = mq.queue, value = MQ queue name Tag key = mq.topic, value = MQ queue topic , it\u0026rsquo;s optional as some MQ don\u0026rsquo;t have topic concept. Tag key = transmission.latency, value = Transmission latency from consumer to producer Set peer at both sides(producer and consumer). And the value of peer should represent the MQ server cluster.  ","excerpt":"Virtual Message Queue (MQ) Virtual MQ represent the MQ nodes detected by server agents' plugins. The …","ref":"/docs/main/v9.7.0/en/setup/service-agent/virtual-mq/","title":"Virtual Message Queue (MQ)"},{"body":"Webflux Tracing Assistant APIs These APIs provide advanced features to enhance interaction capabilities in Webflux cases.\nAdd the toolkit to your project dependency, through Maven or Gradle\n\u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-webflux\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; The following scenarios are supported for tracing assistance.\nContinue Tracing from Client The WebFluxSkyWalkingOperators#continueTracing provides manual tracing continuous capabilities to adopt native Webflux APIs\n@GetMapping(\u0026#34;/testcase/annotation/mono/onnext\u0026#34;) public Mono\u0026lt;String\u0026gt; monoOnNext(@RequestBody(required = false) String body) { return Mono.subscriberContext() .flatMap(ctx -\u0026gt; WebFluxSkyWalkingOperators.continueTracing(ctx, () -\u0026gt; { visit(\u0026#34;http://localhost:\u0026#34; + serverPort + \u0026#34;/testcase/success\u0026#34;); return Mono.just(\u0026#34;Hello World\u0026#34;); })); } @GetMapping(\u0026#34;/login/userFunctions\u0026#34;) public Mono\u0026lt;Response\u0026lt;FunctionInfoResult\u0026gt;\u0026gt; functionInfo(ServerWebExchange exchange, @RequestParam String userId) { return ReactiveSecurityContextHolder.getContext() .flatMap(context -\u0026gt; { return exchange.getSession().map(session -\u0026gt; WebFluxSkyWalkingOperators.continueTracing(exchange, () -\u0026gt; handle(session, userId))); }); } private Response\u0026lt;FunctionInfoResult\u0026gt; handle(WebSession session, String userId) { //...dubbo rpc  } Mono.just(\u0026#34;key\u0026#34;).subscribeOn(Schedulers.boundedElastic()) .doOnEach(WebFluxSkyWalkingOperators.continueTracing(SignalType.ON_NEXT, () -\u0026gt; log.info(\u0026#34;test log with tid\u0026#34;))) .flatMap(key -\u0026gt; Mono.deferContextual(ctx -\u0026gt; WebFluxSkyWalkingOperators.continueTracing(Context.of(ctx), () -\u0026gt; { redis.hasKey(key); return Mono.just(\u0026#34;SUCCESS\u0026#34;); }) )); ... Fetch trace context relative IDs @Override public Mono\u0026lt;Void\u0026gt; filter(ServerWebExchange exchange, GatewayFilterChain chain){ // fetch trace ID  String traceId = WebFluxSkyWalkingTraceContext.traceId(exchange); // fetch segment ID  String segmentId = WebFluxSkyWalkingTraceContext.segmentId(exchange); // fetch span ID  int spanId = WebFluxSkyWalkingTraceContext.spanId(exchange); return chain.filter(exchange); } Manipulate Correlation Context @Override public Mono\u0026lt;Void\u0026gt; filter(ServerWebExchange exchange, GatewayFilterChain chain){ // Set correlation data can be retrieved by upstream nodes.  WebFluxSkyWalkingTraceContext.putCorrelation(exchange, \u0026#34;key1\u0026#34;, \u0026#34;value\u0026#34;); // Get correlation data  Optional\u0026lt;String\u0026gt; value2 = WebFluxSkyWalkingTraceContext.getCorrelation(exchange, \u0026#34;key2\u0026#34;); // dosomething...  return chain.filter(exchange); } Sample codes only\n","excerpt":"Webflux Tracing Assistant APIs These APIs provide advanced features to enhance interaction …","ref":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/application-toolkit-webflux/","title":"Webflux Tracing Assistant APIs"},{"body":"Webflux Tracing Assistant APIs These APIs provide advanced features to enhance interaction capabilities in Webflux cases.\nAdd the toolkit to your project dependency, through Maven or Gradle\n\u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-webflux\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; The following scenarios are supported for tracing assistance.\nContinue Tracing from Client The WebFluxSkyWalkingOperators#continueTracing provides manual tracing continuous capabilities to adopt native Webflux APIs\n@GetMapping(\u0026#34;/testcase/annotation/mono/onnext\u0026#34;) public Mono\u0026lt;String\u0026gt; monoOnNext(@RequestBody(required = false) String body) { return Mono.subscriberContext() .flatMap(ctx -\u0026gt; WebFluxSkyWalkingOperators.continueTracing(ctx, () -\u0026gt; { visit(\u0026#34;http://localhost:\u0026#34; + serverPort + \u0026#34;/testcase/success\u0026#34;); return Mono.just(\u0026#34;Hello World\u0026#34;); })); } @GetMapping(\u0026#34;/login/userFunctions\u0026#34;) public Mono\u0026lt;Response\u0026lt;FunctionInfoResult\u0026gt;\u0026gt; functionInfo(ServerWebExchange exchange, @RequestParam String userId) { return ReactiveSecurityContextHolder.getContext() .flatMap(context -\u0026gt; { return exchange.getSession().map(session -\u0026gt; WebFluxSkyWalkingOperators.continueTracing(exchange, () -\u0026gt; handle(session, userId))); }); } private Response\u0026lt;FunctionInfoResult\u0026gt; handle(WebSession session, String userId) { //...dubbo rpc  } Mono.just(\u0026#34;key\u0026#34;).subscribeOn(Schedulers.boundedElastic()) .doOnEach(WebFluxSkyWalkingOperators.continueTracing(SignalType.ON_NEXT, () -\u0026gt; log.info(\u0026#34;test log with tid\u0026#34;))) .flatMap(key -\u0026gt; Mono.deferContextual(ctx -\u0026gt; WebFluxSkyWalkingOperators.continueTracing(Context.of(ctx), () -\u0026gt; { redis.hasKey(key); return Mono.just(\u0026#34;SUCCESS\u0026#34;); }) )); ... Fetch trace context relative IDs @Override public Mono\u0026lt;Void\u0026gt; filter(ServerWebExchange exchange, GatewayFilterChain chain){ // fetch trace ID  String traceId = WebFluxSkyWalkingTraceContext.traceId(exchange); // fetch segment ID  String segmentId = WebFluxSkyWalkingTraceContext.segmentId(exchange); // fetch span ID  int spanId = WebFluxSkyWalkingTraceContext.spanId(exchange); return chain.filter(exchange); } Manipulate Correlation Context @Override public Mono\u0026lt;Void\u0026gt; filter(ServerWebExchange exchange, GatewayFilterChain chain){ // Set correlation data can be retrieved by upstream nodes.  WebFluxSkyWalkingTraceContext.putCorrelation(exchange, \u0026#34;key1\u0026#34;, \u0026#34;value\u0026#34;); // Get correlation data  Optional\u0026lt;String\u0026gt; value2 = WebFluxSkyWalkingTraceContext.getCorrelation(exchange, \u0026#34;key2\u0026#34;); // dosomething...  return chain.filter(exchange); } Sample codes only\n","excerpt":"Webflux Tracing Assistant APIs These APIs provide advanced features to enhance interaction …","ref":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-webflux/","title":"Webflux Tracing Assistant APIs"},{"body":"Webflux Tracing Assistant APIs These APIs provide advanced features to enhance interaction capabilities in Webflux cases.\nAdd the toolkit to your project dependency, through Maven or Gradle\n\u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-webflux\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; The following scenarios are supported for tracing assistance.\nContinue Tracing from Client The WebFluxSkyWalkingOperators#continueTracing provides manual tracing continuous capabilities to adopt native Webflux APIs\n@GetMapping(\u0026#34;/testcase/annotation/mono/onnext\u0026#34;) public Mono\u0026lt;String\u0026gt; monoOnNext(@RequestBody(required = false) String body) { return Mono.subscriberContext() .flatMap(ctx -\u0026gt; WebFluxSkyWalkingOperators.continueTracing(ctx, () -\u0026gt; { visit(\u0026#34;http://localhost:\u0026#34; + serverPort + \u0026#34;/testcase/success\u0026#34;); return Mono.just(\u0026#34;Hello World\u0026#34;); })); } @GetMapping(\u0026#34;/login/userFunctions\u0026#34;) public Mono\u0026lt;Response\u0026lt;FunctionInfoResult\u0026gt;\u0026gt; functionInfo(ServerWebExchange exchange, @RequestParam String userId) { return ReactiveSecurityContextHolder.getContext() .flatMap(context -\u0026gt; { return exchange.getSession().map(session -\u0026gt; WebFluxSkyWalkingOperators.continueTracing(exchange, () -\u0026gt; handle(session, userId))); }); } private Response\u0026lt;FunctionInfoResult\u0026gt; handle(WebSession session, String userId) { //...dubbo rpc  } Mono.just(\u0026#34;key\u0026#34;).subscribeOn(Schedulers.boundedElastic()) .doOnEach(WebFluxSkyWalkingOperators.continueTracing(SignalType.ON_NEXT, () -\u0026gt; log.info(\u0026#34;test log with tid\u0026#34;))) .flatMap(key -\u0026gt; Mono.deferContextual(ctx -\u0026gt; WebFluxSkyWalkingOperators.continueTracing(Context.of(ctx), () -\u0026gt; { redis.hasKey(key); return Mono.just(\u0026#34;SUCCESS\u0026#34;); }) )); ... Fetch trace context relative IDs @Override public Mono\u0026lt;Void\u0026gt; filter(ServerWebExchange exchange, GatewayFilterChain chain){ // fetch trace ID  String traceId = WebFluxSkyWalkingTraceContext.traceId(exchange); // fetch segment ID  String segmentId = WebFluxSkyWalkingTraceContext.segmentId(exchange); // fetch span ID  int spanId = WebFluxSkyWalkingTraceContext.spanId(exchange); return chain.filter(exchange); } Manipulate Correlation Context @Override public Mono\u0026lt;Void\u0026gt; filter(ServerWebExchange exchange, GatewayFilterChain chain){ // Set correlation data can be retrieved by upstream nodes.  WebFluxSkyWalkingTraceContext.putCorrelation(exchange, \u0026#34;key1\u0026#34;, \u0026#34;value\u0026#34;); // Get correlation data  Optional\u0026lt;String\u0026gt; value2 = WebFluxSkyWalkingTraceContext.getCorrelation(exchange, \u0026#34;key2\u0026#34;); // dosomething...  return chain.filter(exchange); } Sample codes only\n","excerpt":"Webflux Tracing Assistant APIs These APIs provide advanced features to enhance interaction …","ref":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/application-toolkit-webflux/","title":"Webflux Tracing Assistant APIs"},{"body":"Webflux Tracing Assistant APIs These APIs provide advanced features to enhance interaction capabilities in Webflux cases.\nAdd the toolkit to your project dependency, through Maven or Gradle\n\u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-webflux\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; The following scenarios are supported for tracing assistance.\nContinue Tracing from Client The WebFluxSkyWalkingOperators#continueTracing provides manual tracing continuous capabilities to adopt native Webflux APIs\n@GetMapping(\u0026#34;/testcase/annotation/mono/onnext\u0026#34;) public Mono\u0026lt;String\u0026gt; monoOnNext(@RequestBody(required = false) String body) { return Mono.subscriberContext() .flatMap(ctx -\u0026gt; WebFluxSkyWalkingOperators.continueTracing(ctx, () -\u0026gt; { visit(\u0026#34;http://localhost:\u0026#34; + serverPort + \u0026#34;/testcase/success\u0026#34;); return Mono.just(\u0026#34;Hello World\u0026#34;); })); } @GetMapping(\u0026#34;/login/userFunctions\u0026#34;) public Mono\u0026lt;Response\u0026lt;FunctionInfoResult\u0026gt;\u0026gt; functionInfo(ServerWebExchange exchange, @RequestParam String userId) { return ReactiveSecurityContextHolder.getContext() .flatMap(context -\u0026gt; { return exchange.getSession().map(session -\u0026gt; WebFluxSkyWalkingOperators.continueTracing(exchange, () -\u0026gt; handle(session, userId))); }); } private Response\u0026lt;FunctionInfoResult\u0026gt; handle(WebSession session, String userId) { //...dubbo rpc  } Mono.just(\u0026#34;key\u0026#34;).subscribeOn(Schedulers.boundedElastic()) .doOnEach(WebFluxSkyWalkingOperators.continueTracing(SignalType.ON_NEXT, () -\u0026gt; log.info(\u0026#34;test log with tid\u0026#34;))) .flatMap(key -\u0026gt; Mono.deferContextual(ctx -\u0026gt; WebFluxSkyWalkingOperators.continueTracing(Context.of(ctx), () -\u0026gt; { redis.hasKey(key); return Mono.just(\u0026#34;SUCCESS\u0026#34;); }) )); ... Fetch trace context relative IDs @Override public Mono\u0026lt;Void\u0026gt; filter(ServerWebExchange exchange, GatewayFilterChain chain){ // fetch trace ID  String traceId = WebFluxSkyWalkingTraceContext.traceId(exchange); // fetch segment ID  String segmentId = WebFluxSkyWalkingTraceContext.segmentId(exchange); // fetch span ID  int spanId = WebFluxSkyWalkingTraceContext.spanId(exchange); return chain.filter(exchange); } Manipulate Correlation Context @Override public Mono\u0026lt;Void\u0026gt; filter(ServerWebExchange exchange, GatewayFilterChain chain){ // Set correlation data can be retrieved by upstream nodes.  WebFluxSkyWalkingTraceContext.putCorrelation(exchange, \u0026#34;key1\u0026#34;, \u0026#34;value\u0026#34;); // Get correlation data  Optional\u0026lt;String\u0026gt; value2 = WebFluxSkyWalkingTraceContext.getCorrelation(exchange, \u0026#34;key2\u0026#34;); // dosomething...  return chain.filter(exchange); } Sample codes only\n","excerpt":"Webflux Tracing Assistant APIs These APIs provide advanced features to enhance interaction …","ref":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/application-toolkit-webflux/","title":"Webflux Tracing Assistant APIs"},{"body":"Webflux Tracing Assistant APIs These APIs provide advanced features to enhance interaction capabilities in Webflux cases.\nAdd the toolkit to your project dependency, through Maven or Gradle\n\u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-webflux\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; The following scenarios are supported for tracing assistance.\nContinue Tracing from Client The WebFluxSkyWalkingOperators#continueTracing provides manual tracing continuous capabilities to adopt native Webflux APIs\n@GetMapping(\u0026#34;/testcase/annotation/mono/onnext\u0026#34;) public Mono\u0026lt;String\u0026gt; monoOnNext(@RequestBody(required = false) String body) { return Mono.subscriberContext() .flatMap(ctx -\u0026gt; WebFluxSkyWalkingOperators.continueTracing(ctx, () -\u0026gt; { visit(\u0026#34;http://localhost:\u0026#34; + serverPort + \u0026#34;/testcase/success\u0026#34;); return Mono.just(\u0026#34;Hello World\u0026#34;); })); } @GetMapping(\u0026#34;/login/userFunctions\u0026#34;) public Mono\u0026lt;Response\u0026lt;FunctionInfoResult\u0026gt;\u0026gt; functionInfo(ServerWebExchange exchange, @RequestParam String userId) { return ReactiveSecurityContextHolder.getContext() .flatMap(context -\u0026gt; { return exchange.getSession().map(session -\u0026gt; WebFluxSkyWalkingOperators.continueTracing(exchange, () -\u0026gt; handle(session, userId))); }); } private Response\u0026lt;FunctionInfoResult\u0026gt; handle(WebSession session, String userId) { //...dubbo rpc  } Mono.just(\u0026#34;key\u0026#34;).subscribeOn(Schedulers.boundedElastic()) .doOnEach(WebFluxSkyWalkingOperators.continueTracing(SignalType.ON_NEXT, () -\u0026gt; log.info(\u0026#34;test log with tid\u0026#34;))) .flatMap(key -\u0026gt; Mono.deferContextual(ctx -\u0026gt; WebFluxSkyWalkingOperators.continueTracing(Context.of(ctx), () -\u0026gt; { redis.hasKey(key); return Mono.just(\u0026#34;SUCCESS\u0026#34;); }) )); ... Fetch trace context relative IDs @Override public Mono\u0026lt;Void\u0026gt; filter(ServerWebExchange exchange, GatewayFilterChain chain){ // fetch trace ID  String traceId = WebFluxSkyWalkingTraceContext.traceId(exchange); // fetch segment ID  String segmentId = WebFluxSkyWalkingTraceContext.segmentId(exchange); // fetch span ID  int spanId = WebFluxSkyWalkingTraceContext.spanId(exchange); return chain.filter(exchange); } Manipulate Correlation Context @Override public Mono\u0026lt;Void\u0026gt; filter(ServerWebExchange exchange, GatewayFilterChain chain){ // Set correlation data can be retrieved by upstream nodes.  WebFluxSkyWalkingTraceContext.putCorrelation(exchange, \u0026#34;key1\u0026#34;, \u0026#34;value\u0026#34;); // Get correlation data  Optional\u0026lt;String\u0026gt; value2 = WebFluxSkyWalkingTraceContext.getCorrelation(exchange, \u0026#34;key2\u0026#34;); // dosomething...  return chain.filter(exchange); } Sample codes only\n","excerpt":"Webflux Tracing Assistant APIs These APIs provide advanced features to enhance interaction …","ref":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/application-toolkit-webflux/","title":"Webflux Tracing Assistant APIs"},{"body":"Welcome This is the official documentation of SkyWalking 9. Welcome to the SkyWalking community!\nHere you can learn all you need to know about SkyWalking’s architecture, understand how to deploy and use SkyWalking, and contribute to the project based on SkyWalking\u0026rsquo;s contributing guidelines.\n  Concepts and Designs. You\u0026rsquo;ll find the core logic behind SkyWalking. You may start from here if you want to understand what is going on under our cool features and visualization.\n  Setup. A guide to installing SkyWalking for different use cases. It is an observability platform that supports multiple observability modes.\n  Contributing Guides. If you are a PMC member, a committer, or a new contributor, learn how to start contributing with these guides!\n  Protocols. The protocols show how agents/probes and the backend communicate with one another. Anyone interested in uplink telemetry data should definitely read this.\n  FAQs. A manifest of known issues with setup and secondary developments processes. Should you encounter any problems, check here first.\n  You might also find these links interesting:\n  The latest and old releases are all available at Apache SkyWalking release page. The change logs can be found here.\n  SkyWalking WIKI hosts the context of some changes and events.\n  You can find the conference schedules, video recordings, and articles about SkyWalking in the community resource catalog.\n  We\u0026rsquo;re always looking for help to improve our documentation and codes, so please don’t hesitate to file an issue if you see any problems. Or better yet, directly contribute by submitting a pull request to help us get better!\n","excerpt":"Welcome This is the official documentation of SkyWalking 9. Welcome to the SkyWalking community! …","ref":"/docs/main/latest/readme/","title":"Welcome"},{"body":"Welcome This is the official documentation of SkyWalking 10. Welcome to the SkyWalking community!\nHere you can learn all you need to know about SkyWalking\u0026rsquo;s architecture, understand how to deploy and use SkyWalking, and contribute to the project based on SkyWalking\u0026rsquo;s contributing guidelines.\n  Concepts and Designs. You\u0026rsquo;ll find the core logic behind SkyWalking. You may start from here if you want to understand what is going on under our cool features and visualization.\n  Setup. A guide to install SkyWalking for different use cases. It is an observability platform that supports multiple observability modes.\n  Contributing Guides. If you are a PMC member, a committer, or a new contributor, learn how to start contributing with these guides!\n  Protocols. The protocols show how agents/probes and the backend communicate with one another. Anyone interested in uplinking telemetry data should definitely read this.\n  FAQs. A manifest of known issues with setup and secondary developments processes. Should you encounter any problems, check here first.\n  You might also find these links interesting:\n  The latest and old releases are all available at Apache SkyWalking release page. The change logs can be found here.\n  SkyWalking WIKI hosts the context of some changes and events.\n  You can find the conference schedules, video recordings, and articles about SkyWalking in the community resource catalog.\n  We\u0026rsquo;re always looking for help to improve our documentation and codes, so please don’t hesitate to file an issue if you see any problems. Or better yet, directly contribute by submitting a pull request to help us get better!\n","excerpt":"Welcome This is the official documentation of SkyWalking 10. Welcome to the SkyWalking community! …","ref":"/docs/main/next/readme/","title":"Welcome"},{"body":"Welcome This is the official documentation of SkyWalking 9. Welcome to the SkyWalking community!\nHere you can learn all you need to know about SkyWalking’s architecture, understand how to deploy and use SkyWalking, and contribute to the project based on SkyWalking\u0026rsquo;s contributing guidelines.\n  Concepts and Designs. You\u0026rsquo;ll find the core logic behind SkyWalking. You may start from here if you want to understand what is going on under our cool features and visualization.\n  Setup. A guide to installing SkyWalking for different use cases. It is an observability platform that supports multiple observability modes.\n  Contributing Guides. If you are a PMC member, a committer, or a new contributor, learn how to start contributing with these guides!\n  Protocols. The protocols show how agents/probes and the backend communicate with one another. Anyone interested in uplink telemetry data should definitely read this.\n  FAQs. A manifest of known issues with setup and secondary developments processes. Should you encounter any problems, check here first.\n  You might also find these links interesting:\n  The latest and old releases are all available at Apache SkyWalking release page. The change logs can be found here.\n  SkyWalking WIKI hosts the context of some changes and events.\n  You can find the conference schedules, video recordings, and articles about SkyWalking in the community resource catalog.\n  We\u0026rsquo;re always looking for help to improve our documentation and codes, so please don’t hesitate to file an issue if you see any problems. Or better yet, directly contribute by submitting a pull request to help us get better!\n","excerpt":"Welcome This is the official documentation of SkyWalking 9. Welcome to the SkyWalking community! …","ref":"/docs/main/v9.0.0/readme/","title":"Welcome"},{"body":"Welcome This is the official documentation of SkyWalking 9. Welcome to the SkyWalking community!\nHere you can learn all you need to know about SkyWalking’s architecture, understand how to deploy and use SkyWalking, and contribute to the project based on SkyWalking\u0026rsquo;s contributing guidelines.\n  Concepts and Designs. You\u0026rsquo;ll find the core logic behind SkyWalking. You may start from here if you want to understand what is going on under our cool features and visualization.\n  Setup. A guide to installing SkyWalking for different use cases. It is an observability platform that supports multiple observability modes.\n  Contributing Guides. If you are a PMC member, a committer, or a new contributor, learn how to start contributing with these guides!\n  Protocols. The protocols show how agents/probes and the backend communicate with one another. Anyone interested in uplink telemetry data should definitely read this.\n  FAQs. A manifest of known issues with setup and secondary developments processes. Should you encounter any problems, check here first.\n  You might also find these links interesting:\n  The latest and old releases are all available at Apache SkyWalking release page. The change logs can be found here.\n  SkyWalking WIKI hosts the context of some changes and events.\n  You can find the conference schedules, video recordings, and articles about SkyWalking in the community resource catalog.\n  We\u0026rsquo;re always looking for help to improve our documentation and codes, so please don’t hesitate to file an issue if you see any problems. Or better yet, directly contribute by submitting a pull request to help us get better!\n","excerpt":"Welcome This is the official documentation of SkyWalking 9. Welcome to the SkyWalking community! …","ref":"/docs/main/v9.1.0/readme/","title":"Welcome"},{"body":"Welcome This is the official documentation of SkyWalking 9. Welcome to the SkyWalking community!\nHere you can learn all you need to know about SkyWalking’s architecture, understand how to deploy and use SkyWalking, and contribute to the project based on SkyWalking\u0026rsquo;s contributing guidelines.\n  Concepts and Designs. You\u0026rsquo;ll find the core logic behind SkyWalking. You may start from here if you want to understand what is going on under our cool features and visualization.\n  Setup. A guide to installing SkyWalking for different use cases. It is an observability platform that supports multiple observability modes.\n  Contributing Guides. If you are a PMC member, a committer, or a new contributor, learn how to start contributing with these guides!\n  Protocols. The protocols show how agents/probes and the backend communicate with one another. Anyone interested in uplink telemetry data should definitely read this.\n  FAQs. A manifest of known issues with setup and secondary developments processes. Should you encounter any problems, check here first.\n  You might also find these links interesting:\n  The latest and old releases are all available at Apache SkyWalking release page. The change logs can be found here.\n  SkyWalking WIKI hosts the context of some changes and events.\n  You can find the conference schedules, video recordings, and articles about SkyWalking in the community resource catalog.\n  We\u0026rsquo;re always looking for help to improve our documentation and codes, so please don’t hesitate to file an issue if you see any problems. Or better yet, directly contribute by submitting a pull request to help us get better!\n","excerpt":"Welcome This is the official documentation of SkyWalking 9. Welcome to the SkyWalking community! …","ref":"/docs/main/v9.2.0/readme/","title":"Welcome"},{"body":"Welcome This is the official documentation of SkyWalking 9. Welcome to the SkyWalking community!\nHere you can learn all you need to know about SkyWalking’s architecture, understand how to deploy and use SkyWalking, and contribute to the project based on SkyWalking\u0026rsquo;s contributing guidelines.\n  Concepts and Designs. You\u0026rsquo;ll find the core logic behind SkyWalking. You may start from here if you want to understand what is going on under our cool features and visualization.\n  Setup. A guide to installing SkyWalking for different use cases. It is an observability platform that supports multiple observability modes.\n  Contributing Guides. If you are a PMC member, a committer, or a new contributor, learn how to start contributing with these guides!\n  Protocols. The protocols show how agents/probes and the backend communicate with one another. Anyone interested in uplink telemetry data should definitely read this.\n  FAQs. A manifest of known issues with setup and secondary developments processes. Should you encounter any problems, check here first.\n  You might also find these links interesting:\n  The latest and old releases are all available at Apache SkyWalking release page. The change logs can be found here.\n  SkyWalking WIKI hosts the context of some changes and events.\n  You can find the conference schedules, video recordings, and articles about SkyWalking in the community resource catalog.\n  We\u0026rsquo;re always looking for help to improve our documentation and codes, so please don’t hesitate to file an issue if you see any problems. Or better yet, directly contribute by submitting a pull request to help us get better!\n","excerpt":"Welcome This is the official documentation of SkyWalking 9. Welcome to the SkyWalking community! …","ref":"/docs/main/v9.3.0/readme/","title":"Welcome"},{"body":"Welcome This is the official documentation of SkyWalking 9. Welcome to the SkyWalking community!\nHere you can learn all you need to know about SkyWalking’s architecture, understand how to deploy and use SkyWalking, and contribute to the project based on SkyWalking\u0026rsquo;s contributing guidelines.\n  Concepts and Designs. You\u0026rsquo;ll find the core logic behind SkyWalking. You may start from here if you want to understand what is going on under our cool features and visualization.\n  Setup. A guide to installing SkyWalking for different use cases. It is an observability platform that supports multiple observability modes.\n  Contributing Guides. If you are a PMC member, a committer, or a new contributor, learn how to start contributing with these guides!\n  Protocols. The protocols show how agents/probes and the backend communicate with one another. Anyone interested in uplink telemetry data should definitely read this.\n  FAQs. A manifest of known issues with setup and secondary developments processes. Should you encounter any problems, check here first.\n  You might also find these links interesting:\n  The latest and old releases are all available at Apache SkyWalking release page. The change logs can be found here.\n  SkyWalking WIKI hosts the context of some changes and events.\n  You can find the conference schedules, video recordings, and articles about SkyWalking in the community resource catalog.\n  We\u0026rsquo;re always looking for help to improve our documentation and codes, so please don’t hesitate to file an issue if you see any problems. Or better yet, directly contribute by submitting a pull request to help us get better!\n","excerpt":"Welcome This is the official documentation of SkyWalking 9. Welcome to the SkyWalking community! …","ref":"/docs/main/v9.4.0/readme/","title":"Welcome"},{"body":"Welcome This is the official documentation of SkyWalking 9. Welcome to the SkyWalking community!\nHere you can learn all you need to know about SkyWalking’s architecture, understand how to deploy and use SkyWalking, and contribute to the project based on SkyWalking\u0026rsquo;s contributing guidelines.\n  Concepts and Designs. You\u0026rsquo;ll find the core logic behind SkyWalking. You may start from here if you want to understand what is going on under our cool features and visualization.\n  Setup. A guide to installing SkyWalking for different use cases. It is an observability platform that supports multiple observability modes.\n  Contributing Guides. If you are a PMC member, a committer, or a new contributor, learn how to start contributing with these guides!\n  Protocols. The protocols show how agents/probes and the backend communicate with one another. Anyone interested in uplink telemetry data should definitely read this.\n  FAQs. A manifest of known issues with setup and secondary developments processes. Should you encounter any problems, check here first.\n  You might also find these links interesting:\n  The latest and old releases are all available at Apache SkyWalking release page. The change logs can be found here.\n  SkyWalking WIKI hosts the context of some changes and events.\n  You can find the conference schedules, video recordings, and articles about SkyWalking in the community resource catalog.\n  We\u0026rsquo;re always looking for help to improve our documentation and codes, so please don’t hesitate to file an issue if you see any problems. Or better yet, directly contribute by submitting a pull request to help us get better!\n","excerpt":"Welcome This is the official documentation of SkyWalking 9. Welcome to the SkyWalking community! …","ref":"/docs/main/v9.5.0/readme/","title":"Welcome"},{"body":"Welcome This is the official documentation of SkyWalking 9. Welcome to the SkyWalking community!\nHere you can learn all you need to know about SkyWalking’s architecture, understand how to deploy and use SkyWalking, and contribute to the project based on SkyWalking\u0026rsquo;s contributing guidelines.\n  Concepts and Designs. You\u0026rsquo;ll find the core logic behind SkyWalking. You may start from here if you want to understand what is going on under our cool features and visualization.\n  Setup. A guide to installing SkyWalking for different use cases. It is an observability platform that supports multiple observability modes.\n  Contributing Guides. If you are a PMC member, a committer, or a new contributor, learn how to start contributing with these guides!\n  Protocols. The protocols show how agents/probes and the backend communicate with one another. Anyone interested in uplink telemetry data should definitely read this.\n  FAQs. A manifest of known issues with setup and secondary developments processes. Should you encounter any problems, check here first.\n  You might also find these links interesting:\n  The latest and old releases are all available at Apache SkyWalking release page. The change logs can be found here.\n  SkyWalking WIKI hosts the context of some changes and events.\n  You can find the conference schedules, video recordings, and articles about SkyWalking in the community resource catalog.\n  We\u0026rsquo;re always looking for help to improve our documentation and codes, so please don’t hesitate to file an issue if you see any problems. Or better yet, directly contribute by submitting a pull request to help us get better!\n","excerpt":"Welcome This is the official documentation of SkyWalking 9. Welcome to the SkyWalking community! …","ref":"/docs/main/v9.6.0/readme/","title":"Welcome"},{"body":"Welcome This is the official documentation of SkyWalking 9. Welcome to the SkyWalking community!\nHere you can learn all you need to know about SkyWalking’s architecture, understand how to deploy and use SkyWalking, and contribute to the project based on SkyWalking\u0026rsquo;s contributing guidelines.\n  Concepts and Designs. You\u0026rsquo;ll find the core logic behind SkyWalking. You may start from here if you want to understand what is going on under our cool features and visualization.\n  Setup. A guide to installing SkyWalking for different use cases. It is an observability platform that supports multiple observability modes.\n  Contributing Guides. If you are a PMC member, a committer, or a new contributor, learn how to start contributing with these guides!\n  Protocols. The protocols show how agents/probes and the backend communicate with one another. Anyone interested in uplink telemetry data should definitely read this.\n  FAQs. A manifest of known issues with setup and secondary developments processes. Should you encounter any problems, check here first.\n  You might also find these links interesting:\n  The latest and old releases are all available at Apache SkyWalking release page. The change logs can be found here.\n  SkyWalking WIKI hosts the context of some changes and events.\n  You can find the conference schedules, video recordings, and articles about SkyWalking in the community resource catalog.\n  We\u0026rsquo;re always looking for help to improve our documentation and codes, so please don’t hesitate to file an issue if you see any problems. Or better yet, directly contribute by submitting a pull request to help us get better!\n","excerpt":"Welcome This is the official documentation of SkyWalking 9. Welcome to the SkyWalking community! …","ref":"/docs/main/v9.7.0/readme/","title":"Welcome"},{"body":"Welcome Welcome to the BanyanDB Here you can learn all you need to know about BanyanDB.\n Installation. Instruments about how to download and onboard BanyanDB server, Banyand. Clients. Some native clients to access Banyand. Observability. Learn how to effectively monitor, diagnose and optimize Banyand. Concept. Learn the concepts of Banyand. Includes the architecture, data model, and so on. CRUD Operations. To create, read, update, and delete data points or entities on resources in the schema.  You might also find these links interesting:\n  The latest and old releases are all available at Apache SkyWalking release page. The change logs can be found here.\n  SkyWalking WIKI hosts the context of some changes and events.\n  You can find the conference schedules, video recordings, and articles about SkyWalking in the community resource catalog.\n  We\u0026rsquo;re always looking for help to improve our documentation and codes, so please don’t hesitate to file an issue if you see any problems. Or better yet, directly contribute by submitting a pull request to help us get better!\n","excerpt":"Welcome Welcome to the BanyanDB Here you can learn all you need to know about BanyanDB. …","ref":"/docs/skywalking-banyandb/latest/readme/","title":"Welcome"},{"body":"Welcome Welcome to the BanyanDB Here you can learn all you need to know about BanyanDB.\n Installation. Instruments about how to download and onboard BanyanDB server, Banyand. Clients. Some native clients to access Banyand. Observability. Learn how to effectively monitor, diagnose and optimize Banyand. Concept. Learn the concepts of Banyand. Includes the architecture, data model, and so on. CRUD Operations. To create, read, update, and delete data points or entities on resources in the schema.  You might also find these links interesting:\n  The latest and old releases are all available at Apache SkyWalking release page. The change logs can be found here.\n  SkyWalking WIKI hosts the context of some changes and events.\n  You can find the conference schedules, video recordings, and articles about SkyWalking in the community resource catalog.\n  We\u0026rsquo;re always looking for help to improve our documentation and codes, so please don’t hesitate to file an issue if you see any problems. Or better yet, directly contribute by submitting a pull request to help us get better!\n","excerpt":"Welcome Welcome to the BanyanDB Here you can learn all you need to know about BanyanDB. …","ref":"/docs/skywalking-banyandb/next/readme/","title":"Welcome"},{"body":"Welcome Welcome to the BanyanDB Here you can learn all you need to know about BanyanDB.\n Installation. Instruments about how to download and onboard BanyanDB server, Banyand. Clients. Some native clients to access Banyand. Observability. Learn how to effectively monitor, diagnose and optimize Banyand. Concept. Learn the concepts of Banyand. Includes the architecture, data model, and so on. CRUD Operations. To create, read, update, and delete data points or entities on resources in the schema.  You might also find these links interesting:\n  The latest and old releases are all available at Apache SkyWalking release page. The change logs can be found here.\n  SkyWalking WIKI hosts the context of some changes and events.\n  You can find the conference schedules, video recordings, and articles about SkyWalking in the community resource catalog.\n  We\u0026rsquo;re always looking for help to improve our documentation and codes, so please don’t hesitate to file an issue if you see any problems. Or better yet, directly contribute by submitting a pull request to help us get better!\n","excerpt":"Welcome Welcome to the BanyanDB Here you can learn all you need to know about BanyanDB. …","ref":"/docs/skywalking-banyandb/v0.5.0/readme/","title":"Welcome"},{"body":"Welcome Here are SkyWalking Infra E2E official documentations. Welcome to use it.\nSkyWalking Infra E2E is an End-to-End Testing framework that aims to help developers to set up, debug, and verify E2E tests with ease. It’s built based on the lessons learned from tens of hundreds of test cases in the SkyWalking main repo.\nFrom here you can learn all about SkyWalking Infra E2E\u0026rsquo;s architecture, how to set up E2E testing.\n  Concepts and Designs. The most important core ideas about SkyWalking Infra E2E. You can learn from here if you want to understand what is going on under our cool features.\n  Setup. Introduce how to set up and running E2E testing.\n  Contribution. Introduce how to contribute SkyWalking Infra E2E.\n  We\u0026rsquo;re always looking for help improve our documentation and codes, so please don’t hesitate to file an issue if you see any problem. Or better yet, submit your contributions through the pull request to help make them better.\n","excerpt":"Welcome Here are SkyWalking Infra E2E official documentations. Welcome to use it.\nSkyWalking Infra …","ref":"/docs/skywalking-infra-e2e/latest/readme/","title":"Welcome"},{"body":"Welcome Here are SkyWalking Infra E2E official documentations. Welcome to use it.\nSkyWalking Infra E2E is an End-to-End Testing framework that aims to help developers to set up, debug, and verify E2E tests with ease. It’s built based on the lessons learned from tens of hundreds of test cases in the SkyWalking main repo.\nFrom here you can learn all about SkyWalking Infra E2E\u0026rsquo;s architecture, how to set up E2E testing.\n  Concepts and Designs. The most important core ideas about SkyWalking Infra E2E. You can learn from here if you want to understand what is going on under our cool features.\n  Setup. Introduce how to set up and running E2E testing.\n  Contribution. Introduce how to contribute SkyWalking Infra E2E.\n  We\u0026rsquo;re always looking for help improve our documentation and codes, so please don’t hesitate to file an issue if you see any problem. Or better yet, submit your contributions through the pull request to help make them better.\n","excerpt":"Welcome Here are SkyWalking Infra E2E official documentations. Welcome to use it.\nSkyWalking Infra …","ref":"/docs/skywalking-infra-e2e/next/readme/","title":"Welcome"},{"body":"Welcome Here are SkyWalking Infra E2E official documentations. Welcome to use it.\nSkyWalking Infra E2E is an End-to-End Testing framework that aims to help developers to set up, debug, and verify E2E tests with ease. It’s built based on the lessons learned from tens of hundreds of test cases in the SkyWalking main repo.\nFrom here you can learn all about SkyWalking Infra E2E\u0026rsquo;s architecture, how to set up E2E testing.\n  Concepts and Designs. The most important core ideas about SkyWalking Infra E2E. You can learn from here if you want to understand what is going on under our cool features.\n  Setup. Introduce how to set up and running E2E testing.\n  Contribution. Introduce how to contribute SkyWalking Infra E2E.\n  We\u0026rsquo;re always looking for help improve our documentation and codes, so please don’t hesitate to file an issue if you see any problem. Or better yet, submit your contributions through the pull request to help make them better.\n","excerpt":"Welcome Here are SkyWalking Infra E2E official documentations. Welcome to use it.\nSkyWalking Infra …","ref":"/docs/skywalking-infra-e2e/v1.3.0/readme/","title":"Welcome"},{"body":"Welcome Here are SkyWalking Rover official documentation. You\u0026rsquo;re welcome to join us.\nFrom here you can learn all about SkyWalking Rover\u0026rsquo;s architecture, and how to deploy and use SkyWalking Rover.\n  Concepts and Designs. The most important core ideas about SkyWalking Rover. You can learn from here if you want to understand what is going on under our cool features.\n  Setup. Introduce how to set up the SkyWalking Rover.\n  Guides. Guide users to develop or debug SkyWalking Rover.\n  We\u0026rsquo;re always looking for help to improve our documentation and codes, so please don’t hesitate to file an issue if you see any problem. Or better yet, submit your contributions through a pull request to help make them better.\n","excerpt":"Welcome Here are SkyWalking Rover official documentation. You\u0026rsquo;re welcome to join us.\nFrom here …","ref":"/docs/skywalking-rover/latest/readme/","title":"Welcome"},{"body":"Welcome Here are SkyWalking Rover official documentation. You\u0026rsquo;re welcome to join us.\nFrom here you can learn all about SkyWalking Rover\u0026rsquo;s architecture, and how to deploy and use SkyWalking Rover.\n  Concepts and Designs. The most important core ideas about SkyWalking Rover. You can learn from here if you want to understand what is going on under our cool features.\n  Setup. Introduce how to set up the SkyWalking Rover.\n  Guides. Guide users to develop or debug SkyWalking Rover.\n  We\u0026rsquo;re always looking for help to improve our documentation and codes, so please don’t hesitate to file an issue if you see any problem. Or better yet, submit your contributions through a pull request to help make them better.\n","excerpt":"Welcome Here are SkyWalking Rover official documentation. You\u0026rsquo;re welcome to join us.\nFrom here …","ref":"/docs/skywalking-rover/next/readme/","title":"Welcome"},{"body":"Welcome Here are SkyWalking Rover official documentation. You\u0026rsquo;re welcome to join us.\nFrom here you can learn all about SkyWalking Rover\u0026rsquo;s architecture, and how to deploy and use SkyWalking Rover.\n  Concepts and Designs. The most important core ideas about SkyWalking Rover. You can learn from here if you want to understand what is going on under our cool features.\n  Setup. Introduce how to set up the SkyWalking Rover.\n  Guides. Guide users to develop or debug SkyWalking Rover.\n  We\u0026rsquo;re always looking for help to improve our documentation and codes, so please don’t hesitate to file an issue if you see any problem. Or better yet, submit your contributions through a pull request to help make them better.\n","excerpt":"Welcome Here are SkyWalking Rover official documentation. You\u0026rsquo;re welcome to join us.\nFrom here …","ref":"/docs/skywalking-rover/v0.6.0/readme/","title":"Welcome"},{"body":"Welcome Here are SkyWalking Satellite official documentations. You\u0026rsquo;re welcome to join us.\nFrom here you can learn all about SkyWalking Satellite\u0026rsquo;s architecture, how to deploy and use SkyWalking Satellite.\n  Concepts and Designs. The most important core ideas about SkyWalking Satellite. You can learn from here if you want to understand what is going on under our cool features.\n  Setup. Introduce how to set up the SkyWalking Satellite.\n  Guides. Guide users to develop or debug SkyWalking Satellite.\n  Protocols. Protocols show the communication ways between agents/probes, Satellite and SkyWalking. Anyone interested in uplink telemetry data should definitely read this.\n  Change logs. The feature records of the different versions.\n  FAQs. A manifest of already known setup problems, secondary developments experiments. When you are facing a problem, check here first.\n  We\u0026rsquo;re always looking for help improve our documentation and codes, so please don’t hesitate to file an issue if you see any problem. Or better yet, submit your own contributions through pull request to help make them better.\n","excerpt":"Welcome Here are SkyWalking Satellite official documentations. You\u0026rsquo;re welcome to join us.\nFrom …","ref":"/docs/skywalking-satellite/latest/readme/","title":"Welcome"},{"body":"Welcome Here are SkyWalking Satellite official documentations. You\u0026rsquo;re welcome to join us.\nFrom here you can learn all about SkyWalking Satellite\u0026rsquo;s architecture, how to deploy and use SkyWalking Satellite.\n  Concepts and Designs. The most important core ideas about SkyWalking Satellite. You can learn from here if you want to understand what is going on under our cool features.\n  Setup. Introduce how to set up the SkyWalking Satellite.\n  Guides. Guide users to develop or debug SkyWalking Satellite.\n  Protocols. Protocols show the communication ways between agents/probes, Satellite and SkyWalking. Anyone interested in uplink telemetry data should definitely read this.\n  Change logs. The feature records of the different versions.\n  FAQs. A manifest of already known setup problems, secondary developments experiments. When you are facing a problem, check here first.\n  We\u0026rsquo;re always looking for help improve our documentation and codes, so please don’t hesitate to file an issue if you see any problem. Or better yet, submit your own contributions through pull request to help make them better.\n","excerpt":"Welcome Here are SkyWalking Satellite official documentations. You\u0026rsquo;re welcome to join us.\nFrom …","ref":"/docs/skywalking-satellite/next/readme/","title":"Welcome"},{"body":"Welcome Here are SkyWalking Satellite official documentations. You\u0026rsquo;re welcome to join us.\nFrom here you can learn all about SkyWalking Satellite\u0026rsquo;s architecture, how to deploy and use SkyWalking Satellite.\n  Concepts and Designs. The most important core ideas about SkyWalking Satellite. You can learn from here if you want to understand what is going on under our cool features.\n  Setup. Introduce how to set up the SkyWalking Satellite.\n  Guides. Guide users to develop or debug SkyWalking Satellite.\n  Protocols. Protocols show the communication ways between agents/probes, Satellite and SkyWalking. Anyone interested in uplink telemetry data should definitely read this.\n  Change logs. The feature records of the different versions.\n  FAQs. A manifest of already known setup problems, secondary developments experiments. When you are facing a problem, check here first.\n  We\u0026rsquo;re always looking for help improve our documentation and codes, so please don’t hesitate to file an issue if you see any problem. Or better yet, submit your own contributions through pull request to help make them better.\n","excerpt":"Welcome Here are SkyWalking Satellite official documentations. You\u0026rsquo;re welcome to join us.\nFrom …","ref":"/docs/skywalking-satellite/v1.2.0/readme/","title":"Welcome"},{"body":"What is VNode? On the trace page, you may sometimes find nodes with their spans named VNode, and that there are no attributes for such spans.\nVNode is created by the UI itself, rather than being reported by the agent or tracing SDK. It indicates that some spans are missed in the trace data in this query.\nHow does the UI detect the missing span(s)? The UI checks the parent spans and reference segments of all spans in real time. If no parent id(segment id + span id) could be found, then it creates a VNode automatically.\nHow did this happen? The VNode appears when the trace data is incomplete.\n The agent fail-safe mechanism has been activated. The SkyWalking agent could abandon the trace data if there are any network issues between the agent and the OAP (e.g. failure to connect, slow network speeds, etc.), or if the OAP cluster is not capable of processing all traces. Some plug-ins may have bugs, and some segments in the trace do not stop correctly and are held in the memory.  In such case, the trace would not exist in the query, thus the VNode shows up.\n","excerpt":"What is VNode? On the trace page, you may sometimes find nodes with their spans named VNode, and …","ref":"/docs/main/latest/en/faq/vnode/","title":"What is VNode?"},{"body":"What is VNode? On the trace page, you may sometimes find nodes with their spans named VNode, and that there are no attributes for such spans.\nVNode is created by the UI itself, rather than being reported by the agent or tracing SDK. It indicates that some spans are missed in the trace data in this query.\nHow does the UI detect the missing span(s)? The UI checks the parent spans and reference segments of all spans in real time. If no parent id(segment id + span id) could be found, then it creates a VNode automatically.\nHow did this happen? The VNode appears when the trace data is incomplete.\n The agent fail-safe mechanism has been activated. The SkyWalking agent could abandon the trace data if there are any network issues between the agent and the OAP (e.g. failure to connect, slow network speeds, etc.), or if the OAP cluster is not capable of processing all traces. Some plug-ins may have bugs, and some segments in the trace do not stop correctly and are held in the memory.  In such case, the trace would not exist in the query, thus the VNode shows up.\n","excerpt":"What is VNode? On the trace page, you may sometimes find nodes with their spans named VNode, and …","ref":"/docs/main/next/en/faq/vnode/","title":"What is VNode?"},{"body":"What is VNode? On the trace page, you may sometimes find nodes with their spans named VNode, and that there are no attributes for such spans.\nVNode is created by the UI itself, rather than being reported by the agent or tracing SDK. It indicates that some spans are missed in the trace data in this query.\nHow does the UI detect the missing span(s)? The UI checks the parent spans and reference segments of all spans in real time. If no parent id(segment id + span id) could be found, then it creates a VNode automatically.\nHow did this happen? The VNode appears when the trace data is incomplete.\n The agent fail-safe mechanism has been activated. The SkyWalking agent could abandon the trace data if there are any network issues between the agent and the OAP (e.g. failure to connect, slow network speeds, etc.), or if the OAP cluster is not capable of processing all traces. Some plug-ins may have bugs, and some segments in the trace do not stop correctly and are held in the memory.  In such case, the trace would not exist in the query, thus the VNode shows up.\n","excerpt":"What is VNode? On the trace page, you may sometimes find nodes with their spans named VNode, and …","ref":"/docs/main/v9.0.0/en/faq/vnode/","title":"What is VNode?"},{"body":"What is VNode? On the trace page, you may sometimes find nodes with their spans named VNode, and that there are no attributes for such spans.\nVNode is created by the UI itself, rather than being reported by the agent or tracing SDK. It indicates that some spans are missed in the trace data in this query.\nHow does the UI detect the missing span(s)? The UI checks the parent spans and reference segments of all spans in real time. If no parent id(segment id + span id) could be found, then it creates a VNode automatically.\nHow did this happen? The VNode appears when the trace data is incomplete.\n The agent fail-safe mechanism has been activated. The SkyWalking agent could abandon the trace data if there are any network issues between the agent and the OAP (e.g. failure to connect, slow network speeds, etc.), or if the OAP cluster is not capable of processing all traces. Some plug-ins may have bugs, and some segments in the trace do not stop correctly and are held in the memory.  In such case, the trace would not exist in the query, thus the VNode shows up.\n","excerpt":"What is VNode? On the trace page, you may sometimes find nodes with their spans named VNode, and …","ref":"/docs/main/v9.1.0/en/faq/vnode/","title":"What is VNode?"},{"body":"What is VNode? On the trace page, you may sometimes find nodes with their spans named VNode, and that there are no attributes for such spans.\nVNode is created by the UI itself, rather than being reported by the agent or tracing SDK. It indicates that some spans are missed in the trace data in this query.\nHow does the UI detect the missing span(s)? The UI checks the parent spans and reference segments of all spans in real time. If no parent id(segment id + span id) could be found, then it creates a VNode automatically.\nHow did this happen? The VNode appears when the trace data is incomplete.\n The agent fail-safe mechanism has been activated. The SkyWalking agent could abandon the trace data if there are any network issues between the agent and the OAP (e.g. failure to connect, slow network speeds, etc.), or if the OAP cluster is not capable of processing all traces. Some plug-ins may have bugs, and some segments in the trace do not stop correctly and are held in the memory.  In such case, the trace would not exist in the query, thus the VNode shows up.\n","excerpt":"What is VNode? On the trace page, you may sometimes find nodes with their spans named VNode, and …","ref":"/docs/main/v9.2.0/en/faq/vnode/","title":"What is VNode?"},{"body":"What is VNode? On the trace page, you may sometimes find nodes with their spans named VNode, and that there are no attributes for such spans.\nVNode is created by the UI itself, rather than being reported by the agent or tracing SDK. It indicates that some spans are missed in the trace data in this query.\nHow does the UI detect the missing span(s)? The UI checks the parent spans and reference segments of all spans in real time. If no parent id(segment id + span id) could be found, then it creates a VNode automatically.\nHow did this happen? The VNode appears when the trace data is incomplete.\n The agent fail-safe mechanism has been activated. The SkyWalking agent could abandon the trace data if there are any network issues between the agent and the OAP (e.g. failure to connect, slow network speeds, etc.), or if the OAP cluster is not capable of processing all traces. Some plug-ins may have bugs, and some segments in the trace do not stop correctly and are held in the memory.  In such case, the trace would not exist in the query, thus the VNode shows up.\n","excerpt":"What is VNode? On the trace page, you may sometimes find nodes with their spans named VNode, and …","ref":"/docs/main/v9.3.0/en/faq/vnode/","title":"What is VNode?"},{"body":"What is VNode? On the trace page, you may sometimes find nodes with their spans named VNode, and that there are no attributes for such spans.\nVNode is created by the UI itself, rather than being reported by the agent or tracing SDK. It indicates that some spans are missed in the trace data in this query.\nHow does the UI detect the missing span(s)? The UI checks the parent spans and reference segments of all spans in real time. If no parent id(segment id + span id) could be found, then it creates a VNode automatically.\nHow did this happen? The VNode appears when the trace data is incomplete.\n The agent fail-safe mechanism has been activated. The SkyWalking agent could abandon the trace data if there are any network issues between the agent and the OAP (e.g. failure to connect, slow network speeds, etc.), or if the OAP cluster is not capable of processing all traces. Some plug-ins may have bugs, and some segments in the trace do not stop correctly and are held in the memory.  In such case, the trace would not exist in the query, thus the VNode shows up.\n","excerpt":"What is VNode? On the trace page, you may sometimes find nodes with their spans named VNode, and …","ref":"/docs/main/v9.4.0/en/faq/vnode/","title":"What is VNode?"},{"body":"What is VNode? On the trace page, you may sometimes find nodes with their spans named VNode, and that there are no attributes for such spans.\nVNode is created by the UI itself, rather than being reported by the agent or tracing SDK. It indicates that some spans are missed in the trace data in this query.\nHow does the UI detect the missing span(s)? The UI checks the parent spans and reference segments of all spans in real time. If no parent id(segment id + span id) could be found, then it creates a VNode automatically.\nHow did this happen? The VNode appears when the trace data is incomplete.\n The agent fail-safe mechanism has been activated. The SkyWalking agent could abandon the trace data if there are any network issues between the agent and the OAP (e.g. failure to connect, slow network speeds, etc.), or if the OAP cluster is not capable of processing all traces. Some plug-ins may have bugs, and some segments in the trace do not stop correctly and are held in the memory.  In such case, the trace would not exist in the query, thus the VNode shows up.\n","excerpt":"What is VNode? On the trace page, you may sometimes find nodes with their spans named VNode, and …","ref":"/docs/main/v9.5.0/en/faq/vnode/","title":"What is VNode?"},{"body":"What is VNode? On the trace page, you may sometimes find nodes with their spans named VNode, and that there are no attributes for such spans.\nVNode is created by the UI itself, rather than being reported by the agent or tracing SDK. It indicates that some spans are missed in the trace data in this query.\nHow does the UI detect the missing span(s)? The UI checks the parent spans and reference segments of all spans in real time. If no parent id(segment id + span id) could be found, then it creates a VNode automatically.\nHow did this happen? The VNode appears when the trace data is incomplete.\n The agent fail-safe mechanism has been activated. The SkyWalking agent could abandon the trace data if there are any network issues between the agent and the OAP (e.g. failure to connect, slow network speeds, etc.), or if the OAP cluster is not capable of processing all traces. Some plug-ins may have bugs, and some segments in the trace do not stop correctly and are held in the memory.  In such case, the trace would not exist in the query, thus the VNode shows up.\n","excerpt":"What is VNode? On the trace page, you may sometimes find nodes with their spans named VNode, and …","ref":"/docs/main/v9.6.0/en/faq/vnode/","title":"What is VNode?"},{"body":"What is VNode? On the trace page, you may sometimes find nodes with their spans named VNode, and that there are no attributes for such spans.\nVNode is created by the UI itself, rather than being reported by the agent or tracing SDK. It indicates that some spans are missed in the trace data in this query.\nHow does the UI detect the missing span(s)? The UI checks the parent spans and reference segments of all spans in real time. If no parent id(segment id + span id) could be found, then it creates a VNode automatically.\nHow did this happen? The VNode appears when the trace data is incomplete.\n The agent fail-safe mechanism has been activated. The SkyWalking agent could abandon the trace data if there are any network issues between the agent and the OAP (e.g. failure to connect, slow network speeds, etc.), or if the OAP cluster is not capable of processing all traces. Some plug-ins may have bugs, and some segments in the trace do not stop correctly and are held in the memory.  In such case, the trace would not exist in the query, thus the VNode shows up.\n","excerpt":"What is VNode? On the trace page, you may sometimes find nodes with their spans named VNode, and …","ref":"/docs/main/v9.7.0/en/faq/vnode/","title":"What is VNode?"},{"body":"Why can\u0026rsquo;t I see any data in the UI? There are three main reasons no data can be shown by the UI:\n No traces have been sent to the collector. Traces have been sent, but the timezone of your containers is incorrect. Traces are in the collector, but you\u0026rsquo;re not watching the correct timeframe in the UI.  No traces Be sure to check the logs of your agents to see if they are connected to the collector and traces are being sent.\nIncorrect timezone in containers Be sure to check the time in your containers.\nThe UI isn\u0026rsquo;t showing any data Be sure to configure the timeframe shown by the UI.\n","excerpt":"Why can\u0026rsquo;t I see any data in the UI? There are three main reasons no data can be shown by the …","ref":"/docs/main/latest/en/faq/time-and-timezone/","title":"Why can't I see any data in the UI?"},{"body":"Why can\u0026rsquo;t I see any data in the UI? There are three main reasons no data can be shown by the UI:\n No traces have been sent to the collector. Traces have been sent, but the timezone of your containers is incorrect. Traces are in the collector, but you\u0026rsquo;re not watching the correct timeframe in the UI.  No traces Be sure to check the logs of your agents to see if they are connected to the collector and traces are being sent.\nIncorrect timezone in containers Be sure to check the time in your containers.\nThe UI isn\u0026rsquo;t showing any data Be sure to configure the timeframe shown by the UI.\n","excerpt":"Why can\u0026rsquo;t I see any data in the UI? There are three main reasons no data can be shown by the …","ref":"/docs/main/next/en/faq/time-and-timezone/","title":"Why can't I see any data in the UI?"},{"body":"Why can\u0026rsquo;t I see any data in the UI? There are three main reasons no data can be shown by the UI:\n No traces have been sent to the collector. Traces have been sent, but the timezone of your containers is incorrect. Traces are in the collector, but you\u0026rsquo;re not watching the correct timeframe in the UI.  No traces Be sure to check the logs of your agents to see if they are connected to the collector and traces are being sent.\nIncorrect timezone in containers Be sure to check the time in your containers.\nThe UI isn\u0026rsquo;t showing any data Be sure to configure the timeframe shown by the UI.\n","excerpt":"Why can\u0026rsquo;t I see any data in the UI? There are three main reasons no data can be shown by the …","ref":"/docs/main/v9.0.0/en/faq/time-and-timezone/","title":"Why can't I see any data in the UI?"},{"body":"Why can\u0026rsquo;t I see any data in the UI? There are three main reasons no data can be shown by the UI:\n No traces have been sent to the collector. Traces have been sent, but the timezone of your containers is incorrect. Traces are in the collector, but you\u0026rsquo;re not watching the correct timeframe in the UI.  No traces Be sure to check the logs of your agents to see if they are connected to the collector and traces are being sent.\nIncorrect timezone in containers Be sure to check the time in your containers.\nThe UI isn\u0026rsquo;t showing any data Be sure to configure the timeframe shown by the UI.\n","excerpt":"Why can\u0026rsquo;t I see any data in the UI? There are three main reasons no data can be shown by the …","ref":"/docs/main/v9.1.0/en/faq/time-and-timezone/","title":"Why can't I see any data in the UI?"},{"body":"Why can\u0026rsquo;t I see any data in the UI? There are three main reasons no data can be shown by the UI:\n No traces have been sent to the collector. Traces have been sent, but the timezone of your containers is incorrect. Traces are in the collector, but you\u0026rsquo;re not watching the correct timeframe in the UI.  No traces Be sure to check the logs of your agents to see if they are connected to the collector and traces are being sent.\nIncorrect timezone in containers Be sure to check the time in your containers.\nThe UI isn\u0026rsquo;t showing any data Be sure to configure the timeframe shown by the UI.\n","excerpt":"Why can\u0026rsquo;t I see any data in the UI? There are three main reasons no data can be shown by the …","ref":"/docs/main/v9.2.0/en/faq/time-and-timezone/","title":"Why can't I see any data in the UI?"},{"body":"Why can\u0026rsquo;t I see any data in the UI? There are three main reasons no data can be shown by the UI:\n No traces have been sent to the collector. Traces have been sent, but the timezone of your containers is incorrect. Traces are in the collector, but you\u0026rsquo;re not watching the correct timeframe in the UI.  No traces Be sure to check the logs of your agents to see if they are connected to the collector and traces are being sent.\nIncorrect timezone in containers Be sure to check the time in your containers.\nThe UI isn\u0026rsquo;t showing any data Be sure to configure the timeframe shown by the UI.\n","excerpt":"Why can\u0026rsquo;t I see any data in the UI? There are three main reasons no data can be shown by the …","ref":"/docs/main/v9.3.0/en/faq/time-and-timezone/","title":"Why can't I see any data in the UI?"},{"body":"Why can\u0026rsquo;t I see any data in the UI? There are three main reasons no data can be shown by the UI:\n No traces have been sent to the collector. Traces have been sent, but the timezone of your containers is incorrect. Traces are in the collector, but you\u0026rsquo;re not watching the correct timeframe in the UI.  No traces Be sure to check the logs of your agents to see if they are connected to the collector and traces are being sent.\nIncorrect timezone in containers Be sure to check the time in your containers.\nThe UI isn\u0026rsquo;t showing any data Be sure to configure the timeframe shown by the UI.\n","excerpt":"Why can\u0026rsquo;t I see any data in the UI? There are three main reasons no data can be shown by the …","ref":"/docs/main/v9.4.0/en/faq/time-and-timezone/","title":"Why can't I see any data in the UI?"},{"body":"Why can\u0026rsquo;t I see any data in the UI? There are three main reasons no data can be shown by the UI:\n No traces have been sent to the collector. Traces have been sent, but the timezone of your containers is incorrect. Traces are in the collector, but you\u0026rsquo;re not watching the correct timeframe in the UI.  No traces Be sure to check the logs of your agents to see if they are connected to the collector and traces are being sent.\nIncorrect timezone in containers Be sure to check the time in your containers.\nThe UI isn\u0026rsquo;t showing any data Be sure to configure the timeframe shown by the UI.\n","excerpt":"Why can\u0026rsquo;t I see any data in the UI? There are three main reasons no data can be shown by the …","ref":"/docs/main/v9.5.0/en/faq/time-and-timezone/","title":"Why can't I see any data in the UI?"},{"body":"Why can\u0026rsquo;t I see any data in the UI? There are three main reasons no data can be shown by the UI:\n No traces have been sent to the collector. Traces have been sent, but the timezone of your containers is incorrect. Traces are in the collector, but you\u0026rsquo;re not watching the correct timeframe in the UI.  No traces Be sure to check the logs of your agents to see if they are connected to the collector and traces are being sent.\nIncorrect timezone in containers Be sure to check the time in your containers.\nThe UI isn\u0026rsquo;t showing any data Be sure to configure the timeframe shown by the UI.\n","excerpt":"Why can\u0026rsquo;t I see any data in the UI? There are three main reasons no data can be shown by the …","ref":"/docs/main/v9.6.0/en/faq/time-and-timezone/","title":"Why can't I see any data in the UI?"},{"body":"Why can\u0026rsquo;t I see any data in the UI? There are three main reasons no data can be shown by the UI:\n No traces have been sent to the collector. Traces have been sent, but the timezone of your containers is incorrect. Traces are in the collector, but you\u0026rsquo;re not watching the correct timeframe in the UI.  No traces Be sure to check the logs of your agents to see if they are connected to the collector and traces are being sent.\nIncorrect timezone in containers Be sure to check the time in your containers.\nThe UI isn\u0026rsquo;t showing any data Be sure to configure the timeframe shown by the UI.\n","excerpt":"Why can\u0026rsquo;t I see any data in the UI? There are three main reasons no data can be shown by the …","ref":"/docs/main/v9.7.0/en/faq/time-and-timezone/","title":"Why can't I see any data in the UI?"},{"body":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? This issue is to be expected with an upgrade from 6.x to 7.x. See the Downsampling Data Packing feature of the ElasticSearch storage.\nYou may simply delete all expired *-day_xxxxx and *-hour_xxxxx(xxxxx is a timestamp) indexes. Currently, SkyWalking uses the metrics name-xxxxx and metrics name-month_xxxxx indexes only.\n","excerpt":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? This issue …","ref":"/docs/main/latest/en/faq/hour-day-metrics-stopping/","title":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x?"},{"body":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? This issue is to be expected with an upgrade from 6.x to 7.x. See the Downsampling Data Packing feature of the ElasticSearch storage.\nYou may simply delete all expired *-day_xxxxx and *-hour_xxxxx(xxxxx is a timestamp) indexes. Currently, SkyWalking uses the metrics name-xxxxx and metrics name-month_xxxxx indexes only.\n","excerpt":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? This issue …","ref":"/docs/main/next/en/faq/hour-day-metrics-stopping/","title":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x?"},{"body":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? This issue is to be expected with an upgrade from 6.x to 7.x. See the Downsampling Data Packing feature of the ElasticSearch storage.\nYou may simply delete all expired *-day_xxxxx and *-hour_xxxxx(xxxxx is a timestamp) indexes. Currently, SkyWalking uses the metrics name-xxxxx and metrics name-month_xxxxx indexes only.\n","excerpt":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? This issue …","ref":"/docs/main/v9.0.0/en/faq/hour-day-metrics-stopping/","title":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x?"},{"body":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? This issue is to be expected with an upgrade from 6.x to 7.x. See the Downsampling Data Packing feature of the ElasticSearch storage.\nYou may simply delete all expired *-day_xxxxx and *-hour_xxxxx(xxxxx is a timestamp) indexes. Currently, SkyWalking uses the metrics name-xxxxx and metrics name-month_xxxxx indexes only.\n","excerpt":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? This issue …","ref":"/docs/main/v9.1.0/en/faq/hour-day-metrics-stopping/","title":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x?"},{"body":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? This issue is to be expected with an upgrade from 6.x to 7.x. See the Downsampling Data Packing feature of the ElasticSearch storage.\nYou may simply delete all expired *-day_xxxxx and *-hour_xxxxx(xxxxx is a timestamp) indexes. Currently, SkyWalking uses the metrics name-xxxxx and metrics name-month_xxxxx indexes only.\n","excerpt":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? This issue …","ref":"/docs/main/v9.2.0/en/faq/hour-day-metrics-stopping/","title":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x?"},{"body":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? This issue is to be expected with an upgrade from 6.x to 7.x. See the Downsampling Data Packing feature of the ElasticSearch storage.\nYou may simply delete all expired *-day_xxxxx and *-hour_xxxxx(xxxxx is a timestamp) indexes. Currently, SkyWalking uses the metrics name-xxxxx and metrics name-month_xxxxx indexes only.\n","excerpt":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? This issue …","ref":"/docs/main/v9.3.0/en/faq/hour-day-metrics-stopping/","title":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x?"},{"body":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? This issue is to be expected with an upgrade from 6.x to 7.x. See the Downsampling Data Packing feature of the ElasticSearch storage.\nYou may simply delete all expired *-day_xxxxx and *-hour_xxxxx(xxxxx is a timestamp) indexes. Currently, SkyWalking uses the metrics name-xxxxx and metrics name-month_xxxxx indexes only.\n","excerpt":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? This issue …","ref":"/docs/main/v9.4.0/en/faq/hour-day-metrics-stopping/","title":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x?"},{"body":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? This issue is to be expected with an upgrade from 6.x to 7.x. See the Downsampling Data Packing feature of the ElasticSearch storage.\nYou may simply delete all expired *-day_xxxxx and *-hour_xxxxx(xxxxx is a timestamp) indexes. Currently, SkyWalking uses the metrics name-xxxxx and metrics name-month_xxxxx indexes only.\n","excerpt":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? This issue …","ref":"/docs/main/v9.5.0/en/faq/hour-day-metrics-stopping/","title":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x?"},{"body":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? This issue is to be expected with an upgrade from 6.x to 7.x. See the Downsampling Data Packing feature of the ElasticSearch storage.\nYou may simply delete all expired *-day_xxxxx and *-hour_xxxxx(xxxxx is a timestamp) indexes. Currently, SkyWalking uses the metrics name-xxxxx and metrics name-month_xxxxx indexes only.\n","excerpt":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? This issue …","ref":"/docs/main/v9.6.0/en/faq/hour-day-metrics-stopping/","title":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x?"},{"body":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? This issue is to be expected with an upgrade from 6.x to 7.x. See the Downsampling Data Packing feature of the ElasticSearch storage.\nYou may simply delete all expired *-day_xxxxx and *-hour_xxxxx(xxxxx is a timestamp) indexes. Currently, SkyWalking uses the metrics name-xxxxx and metrics name-month_xxxxx indexes only.\n","excerpt":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? This issue …","ref":"/docs/main/v9.7.0/en/faq/hour-day-metrics-stopping/","title":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x?"},{"body":"Why does SkyWalking use RPC(gRPC and RESTful) rather than MQ as transport layer by default? This is often asked by those who are first introduced to SkyWalking. Many believe that MQ should have better performance and should be able to support higher throughput, like the following:\nHere\u0026rsquo;s what we think.\nIs MQ appropriate for communicating with the OAP backend? This question arises when users consider the circumstances where the OAP cluster may not be powerful enough or becomes offline. But the following issues must first be addressed:\n Why do you think that the OAP is not powerful enough? Were it not powerful, the speed of data analysis wouldn\u0026rsquo;t have caught up with the producers (or agents). Then what is the point of adding new deployment requirements? Some may argue that the payload is sometimes higher than usual during peak times. But we must consider how much higher the payload really is. If it is higher by less than 40%, how many resources would you use for the new MQ cluster? How about moving them to new OAP and ES nodes? Say it is higher by 40% or more, such as by 70% to 200%. Then, it is likely that your MQ would use up more resources than it saves. Your MQ would support 2 to 3 times the payload using 10%-20% of the cost during usual times. Furthermore, in this case, if the payload/throughput are so high, how long would it take for the OAP cluster to catch up? The challenge here is that well before it catches up, the next peak times would have come.  With the analysis above in mind, why would you still want the traces to be 100%, given the resources they would cost? The preferred way to do this would be adding a better dynamic trace sampling mechanism at the backend. When throughput exceeds the threshold, gradually modify the active sampling rate from 100% to 10%, which means you could get the OAP and ES 3 times more powerful than usual, while ignoring the traces at peak times.\nIs MQ transport recommended despite its side effects? Even though MQ transport is not recommended from the production perspective, SkyWalking still provides optional plugins named kafka-reporter and kafka-fetcher for this feature since 8.1.0.\nHow about MQ metrics data exporter? Log and trace exporters are using MQ as transport channel. And metrics exporter uses gRPC, as considering the scale.\n","excerpt":"Why does SkyWalking use RPC(gRPC and RESTful) rather than MQ as transport layer by default? This is …","ref":"/docs/main/next/en/faq/why_mq_not_involved/","title":"Why does SkyWalking use RPC(gRPC and RESTful) rather than MQ as transport layer by default?"},{"body":"Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture? This is often asked by those who are first introduced to SkyWalking. Many believe that MQ should have better performance and should be able to support higher throughput, like the following:\nHere\u0026rsquo;s what we think.\nIs MQ appropriate for communicating with the OAP backend? This question arises when users consider the circumstances where the OAP cluster may not be powerful enough or becomes offline. But the following issues must first be addressed:\n Why do you think that the OAP is not powerful enough? Were it not powerful, the speed of data analysis wouldn\u0026rsquo;t have caught up with the producers (or agents). Then what is the point of adding new deployment requirements? Some may argue that the payload is sometimes higher than usual during peak times. But we must consider how much higher the payload really is. If it is higher by less than 40%, how many resources would you use for the new MQ cluster? How about moving them to new OAP and ES nodes? Say it is higher by 40% or more, such as by 70% to 200%. Then, it is likely that your MQ would use up more resources than it saves. Your MQ would support 2 to 3 times the payload using 10%-20% of the cost during usual times. Furthermore, in this case, if the payload/throughput are so high, how long would it take for the OAP cluster to catch up? The challenge here is that well before it catches up, the next peak times would have come.  With the analysis above in mind, why would you still want the traces to be 100%, given the resources they would cost? The preferred way to do this would be adding a better dynamic trace sampling mechanism at the backend. When throughput exceeds the threshold, gradually modify the active sampling rate from 100% to 10%, which means you could get the OAP and ES 3 times more powerful than usual, while ignoring the traces at peak times.\nIs MQ transport recommended despite its side effects? Even though MQ transport is not recommended from the production perspective, SkyWalking still provides optional plugins named kafka-reporter and kafka-fetcher for this feature since 8.1.0.\nHow about MQ metrics data exporter? The answer is that the MQ metrics data exporter is already readily available. The exporter module with gRPC default mechanism is there, and you can easily provide a new implementor of this module.\n","excerpt":"Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture? This is often asked by those who are …","ref":"/docs/main/latest/en/faq/why_mq_not_involved/","title":"Why doesn't SkyWalking involve MQ in its architecture?"},{"body":"Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture? This is often asked by those who are first introduced to SkyWalking. Many believe that MQ should have better performance and should be able to support higher throughput, like the following:\nHere\u0026rsquo;s what we think.\nIs MQ appropriate for communicating with the OAP backend? This question arises when users consider the circumstances where the OAP cluster may not be powerful enough or becomes offline. But the following issues must first be addressed:\n Why do you think that the OAP is not powerful enough? Were it not powerful, the speed of data analysis wouldn\u0026rsquo;t have caught up with the producers (or agents). Then what is the point of adding new deployment requirements? Some may argue that the payload is sometimes higher than usual during peak times. But we must consider how much higher the payload really is. If it is higher by less than 40%, how many resources would you use for the new MQ cluster? How about moving them to new OAP and ES nodes? Say it is higher by 40% or more, such as by 70% to 200%. Then, it is likely that your MQ would use up more resources than it saves. Your MQ would support 2 to 3 times the payload using 10%-20% of the cost during usual times. Furthermore, in this case, if the payload/throughput are so high, how long would it take for the OAP cluster to catch up? The challenge here is that well before it catches up, the next peak times would have come.  With the analysis above in mind, why would you still want the traces to be 100%, given the resources they would cost? The preferred way to do this would be adding a better dynamic trace sampling mechanism at the backend. When throughput exceeds the threshold, gradually modify the active sampling rate from 100% to 10%, which means you could get the OAP and ES 3 times more powerful than usual, while ignoring the traces at peak times.\nIs MQ transport recommended despite its side effects? Even though MQ transport is not recommended from the production perspective, SkyWalking still provides optional plugins named kafka-reporter and kafka-fetcher for this feature since 8.1.0.\nHow about MQ metrics data exporter? The answer is that the MQ metrics data exporter is already readily available. The exporter module with gRPC default mechanism is there, and you can easily provide a new implementor of this module.\n","excerpt":"Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture? This is often asked by those who are …","ref":"/docs/main/v9.0.0/en/faq/why_mq_not_involved/","title":"Why doesn't SkyWalking involve MQ in its architecture?"},{"body":"Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture? This is often asked by those who are first introduced to SkyWalking. Many believe that MQ should have better performance and should be able to support higher throughput, like the following:\nHere\u0026rsquo;s what we think.\nIs MQ appropriate for communicating with the OAP backend? This question arises when users consider the circumstances where the OAP cluster may not be powerful enough or becomes offline. But the following issues must first be addressed:\n Why do you think that the OAP is not powerful enough? Were it not powerful, the speed of data analysis wouldn\u0026rsquo;t have caught up with the producers (or agents). Then what is the point of adding new deployment requirements? Some may argue that the payload is sometimes higher than usual during peak times. But we must consider how much higher the payload really is. If it is higher by less than 40%, how many resources would you use for the new MQ cluster? How about moving them to new OAP and ES nodes? Say it is higher by 40% or more, such as by 70% to 200%. Then, it is likely that your MQ would use up more resources than it saves. Your MQ would support 2 to 3 times the payload using 10%-20% of the cost during usual times. Furthermore, in this case, if the payload/throughput are so high, how long would it take for the OAP cluster to catch up? The challenge here is that well before it catches up, the next peak times would have come.  With the analysis above in mind, why would you still want the traces to be 100%, given the resources they would cost? The preferred way to do this would be adding a better dynamic trace sampling mechanism at the backend. When throughput exceeds the threshold, gradually modify the active sampling rate from 100% to 10%, which means you could get the OAP and ES 3 times more powerful than usual, while ignoring the traces at peak times.\nIs MQ transport recommended despite its side effects? Even though MQ transport is not recommended from the production perspective, SkyWalking still provides optional plugins named kafka-reporter and kafka-fetcher for this feature since 8.1.0.\nHow about MQ metrics data exporter? The answer is that the MQ metrics data exporter is already readily available. The exporter module with gRPC default mechanism is there, and you can easily provide a new implementor of this module.\n","excerpt":"Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture? This is often asked by those who are …","ref":"/docs/main/v9.1.0/en/faq/why_mq_not_involved/","title":"Why doesn't SkyWalking involve MQ in its architecture?"},{"body":"Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture? This is often asked by those who are first introduced to SkyWalking. Many believe that MQ should have better performance and should be able to support higher throughput, like the following:\nHere\u0026rsquo;s what we think.\nIs MQ appropriate for communicating with the OAP backend? This question arises when users consider the circumstances where the OAP cluster may not be powerful enough or becomes offline. But the following issues must first be addressed:\n Why do you think that the OAP is not powerful enough? Were it not powerful, the speed of data analysis wouldn\u0026rsquo;t have caught up with the producers (or agents). Then what is the point of adding new deployment requirements? Some may argue that the payload is sometimes higher than usual during peak times. But we must consider how much higher the payload really is. If it is higher by less than 40%, how many resources would you use for the new MQ cluster? How about moving them to new OAP and ES nodes? Say it is higher by 40% or more, such as by 70% to 200%. Then, it is likely that your MQ would use up more resources than it saves. Your MQ would support 2 to 3 times the payload using 10%-20% of the cost during usual times. Furthermore, in this case, if the payload/throughput are so high, how long would it take for the OAP cluster to catch up? The challenge here is that well before it catches up, the next peak times would have come.  With the analysis above in mind, why would you still want the traces to be 100%, given the resources they would cost? The preferred way to do this would be adding a better dynamic trace sampling mechanism at the backend. When throughput exceeds the threshold, gradually modify the active sampling rate from 100% to 10%, which means you could get the OAP and ES 3 times more powerful than usual, while ignoring the traces at peak times.\nIs MQ transport recommended despite its side effects? Even though MQ transport is not recommended from the production perspective, SkyWalking still provides optional plugins named kafka-reporter and kafka-fetcher for this feature since 8.1.0.\nHow about MQ metrics data exporter? The answer is that the MQ metrics data exporter is already readily available. The exporter module with gRPC default mechanism is there, and you can easily provide a new implementor of this module.\n","excerpt":"Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture? This is often asked by those who are …","ref":"/docs/main/v9.2.0/en/faq/why_mq_not_involved/","title":"Why doesn't SkyWalking involve MQ in its architecture?"},{"body":"Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture? This is often asked by those who are first introduced to SkyWalking. Many believe that MQ should have better performance and should be able to support higher throughput, like the following:\nHere\u0026rsquo;s what we think.\nIs MQ appropriate for communicating with the OAP backend? This question arises when users consider the circumstances where the OAP cluster may not be powerful enough or becomes offline. But the following issues must first be addressed:\n Why do you think that the OAP is not powerful enough? Were it not powerful, the speed of data analysis wouldn\u0026rsquo;t have caught up with the producers (or agents). Then what is the point of adding new deployment requirements? Some may argue that the payload is sometimes higher than usual during peak times. But we must consider how much higher the payload really is. If it is higher by less than 40%, how many resources would you use for the new MQ cluster? How about moving them to new OAP and ES nodes? Say it is higher by 40% or more, such as by 70% to 200%. Then, it is likely that your MQ would use up more resources than it saves. Your MQ would support 2 to 3 times the payload using 10%-20% of the cost during usual times. Furthermore, in this case, if the payload/throughput are so high, how long would it take for the OAP cluster to catch up? The challenge here is that well before it catches up, the next peak times would have come.  With the analysis above in mind, why would you still want the traces to be 100%, given the resources they would cost? The preferred way to do this would be adding a better dynamic trace sampling mechanism at the backend. When throughput exceeds the threshold, gradually modify the active sampling rate from 100% to 10%, which means you could get the OAP and ES 3 times more powerful than usual, while ignoring the traces at peak times.\nIs MQ transport recommended despite its side effects? Even though MQ transport is not recommended from the production perspective, SkyWalking still provides optional plugins named kafka-reporter and kafka-fetcher for this feature since 8.1.0.\nHow about MQ metrics data exporter? The answer is that the MQ metrics data exporter is already readily available. The exporter module with gRPC default mechanism is there, and you can easily provide a new implementor of this module.\n","excerpt":"Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture? This is often asked by those who are …","ref":"/docs/main/v9.3.0/en/faq/why_mq_not_involved/","title":"Why doesn't SkyWalking involve MQ in its architecture?"},{"body":"Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture? This is often asked by those who are first introduced to SkyWalking. Many believe that MQ should have better performance and should be able to support higher throughput, like the following:\nHere\u0026rsquo;s what we think.\nIs MQ appropriate for communicating with the OAP backend? This question arises when users consider the circumstances where the OAP cluster may not be powerful enough or becomes offline. But the following issues must first be addressed:\n Why do you think that the OAP is not powerful enough? Were it not powerful, the speed of data analysis wouldn\u0026rsquo;t have caught up with the producers (or agents). Then what is the point of adding new deployment requirements? Some may argue that the payload is sometimes higher than usual during peak times. But we must consider how much higher the payload really is. If it is higher by less than 40%, how many resources would you use for the new MQ cluster? How about moving them to new OAP and ES nodes? Say it is higher by 40% or more, such as by 70% to 200%. Then, it is likely that your MQ would use up more resources than it saves. Your MQ would support 2 to 3 times the payload using 10%-20% of the cost during usual times. Furthermore, in this case, if the payload/throughput are so high, how long would it take for the OAP cluster to catch up? The challenge here is that well before it catches up, the next peak times would have come.  With the analysis above in mind, why would you still want the traces to be 100%, given the resources they would cost? The preferred way to do this would be adding a better dynamic trace sampling mechanism at the backend. When throughput exceeds the threshold, gradually modify the active sampling rate from 100% to 10%, which means you could get the OAP and ES 3 times more powerful than usual, while ignoring the traces at peak times.\nIs MQ transport recommended despite its side effects? Even though MQ transport is not recommended from the production perspective, SkyWalking still provides optional plugins named kafka-reporter and kafka-fetcher for this feature since 8.1.0.\nHow about MQ metrics data exporter? The answer is that the MQ metrics data exporter is already readily available. The exporter module with gRPC default mechanism is there, and you can easily provide a new implementor of this module.\n","excerpt":"Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture? This is often asked by those who are …","ref":"/docs/main/v9.4.0/en/faq/why_mq_not_involved/","title":"Why doesn't SkyWalking involve MQ in its architecture?"},{"body":"Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture? This is often asked by those who are first introduced to SkyWalking. Many believe that MQ should have better performance and should be able to support higher throughput, like the following:\nHere\u0026rsquo;s what we think.\nIs MQ appropriate for communicating with the OAP backend? This question arises when users consider the circumstances where the OAP cluster may not be powerful enough or becomes offline. But the following issues must first be addressed:\n Why do you think that the OAP is not powerful enough? Were it not powerful, the speed of data analysis wouldn\u0026rsquo;t have caught up with the producers (or agents). Then what is the point of adding new deployment requirements? Some may argue that the payload is sometimes higher than usual during peak times. But we must consider how much higher the payload really is. If it is higher by less than 40%, how many resources would you use for the new MQ cluster? How about moving them to new OAP and ES nodes? Say it is higher by 40% or more, such as by 70% to 200%. Then, it is likely that your MQ would use up more resources than it saves. Your MQ would support 2 to 3 times the payload using 10%-20% of the cost during usual times. Furthermore, in this case, if the payload/throughput are so high, how long would it take for the OAP cluster to catch up? The challenge here is that well before it catches up, the next peak times would have come.  With the analysis above in mind, why would you still want the traces to be 100%, given the resources they would cost? The preferred way to do this would be adding a better dynamic trace sampling mechanism at the backend. When throughput exceeds the threshold, gradually modify the active sampling rate from 100% to 10%, which means you could get the OAP and ES 3 times more powerful than usual, while ignoring the traces at peak times.\nIs MQ transport recommended despite its side effects? Even though MQ transport is not recommended from the production perspective, SkyWalking still provides optional plugins named kafka-reporter and kafka-fetcher for this feature since 8.1.0.\nHow about MQ metrics data exporter? The answer is that the MQ metrics data exporter is already readily available. The exporter module with gRPC default mechanism is there, and you can easily provide a new implementor of this module.\n","excerpt":"Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture? This is often asked by those who are …","ref":"/docs/main/v9.5.0/en/faq/why_mq_not_involved/","title":"Why doesn't SkyWalking involve MQ in its architecture?"},{"body":"Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture? This is often asked by those who are first introduced to SkyWalking. Many believe that MQ should have better performance and should be able to support higher throughput, like the following:\nHere\u0026rsquo;s what we think.\nIs MQ appropriate for communicating with the OAP backend? This question arises when users consider the circumstances where the OAP cluster may not be powerful enough or becomes offline. But the following issues must first be addressed:\n Why do you think that the OAP is not powerful enough? Were it not powerful, the speed of data analysis wouldn\u0026rsquo;t have caught up with the producers (or agents). Then what is the point of adding new deployment requirements? Some may argue that the payload is sometimes higher than usual during peak times. But we must consider how much higher the payload really is. If it is higher by less than 40%, how many resources would you use for the new MQ cluster? How about moving them to new OAP and ES nodes? Say it is higher by 40% or more, such as by 70% to 200%. Then, it is likely that your MQ would use up more resources than it saves. Your MQ would support 2 to 3 times the payload using 10%-20% of the cost during usual times. Furthermore, in this case, if the payload/throughput are so high, how long would it take for the OAP cluster to catch up? The challenge here is that well before it catches up, the next peak times would have come.  With the analysis above in mind, why would you still want the traces to be 100%, given the resources they would cost? The preferred way to do this would be adding a better dynamic trace sampling mechanism at the backend. When throughput exceeds the threshold, gradually modify the active sampling rate from 100% to 10%, which means you could get the OAP and ES 3 times more powerful than usual, while ignoring the traces at peak times.\nIs MQ transport recommended despite its side effects? Even though MQ transport is not recommended from the production perspective, SkyWalking still provides optional plugins named kafka-reporter and kafka-fetcher for this feature since 8.1.0.\nHow about MQ metrics data exporter? The answer is that the MQ metrics data exporter is already readily available. The exporter module with gRPC default mechanism is there, and you can easily provide a new implementor of this module.\n","excerpt":"Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture? This is often asked by those who are …","ref":"/docs/main/v9.6.0/en/faq/why_mq_not_involved/","title":"Why doesn't SkyWalking involve MQ in its architecture?"},{"body":"Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture? This is often asked by those who are first introduced to SkyWalking. Many believe that MQ should have better performance and should be able to support higher throughput, like the following:\nHere\u0026rsquo;s what we think.\nIs MQ appropriate for communicating with the OAP backend? This question arises when users consider the circumstances where the OAP cluster may not be powerful enough or becomes offline. But the following issues must first be addressed:\n Why do you think that the OAP is not powerful enough? Were it not powerful, the speed of data analysis wouldn\u0026rsquo;t have caught up with the producers (or agents). Then what is the point of adding new deployment requirements? Some may argue that the payload is sometimes higher than usual during peak times. But we must consider how much higher the payload really is. If it is higher by less than 40%, how many resources would you use for the new MQ cluster? How about moving them to new OAP and ES nodes? Say it is higher by 40% or more, such as by 70% to 200%. Then, it is likely that your MQ would use up more resources than it saves. Your MQ would support 2 to 3 times the payload using 10%-20% of the cost during usual times. Furthermore, in this case, if the payload/throughput are so high, how long would it take for the OAP cluster to catch up? The challenge here is that well before it catches up, the next peak times would have come.  With the analysis above in mind, why would you still want the traces to be 100%, given the resources they would cost? The preferred way to do this would be adding a better dynamic trace sampling mechanism at the backend. When throughput exceeds the threshold, gradually modify the active sampling rate from 100% to 10%, which means you could get the OAP and ES 3 times more powerful than usual, while ignoring the traces at peak times.\nIs MQ transport recommended despite its side effects? Even though MQ transport is not recommended from the production perspective, SkyWalking still provides optional plugins named kafka-reporter and kafka-fetcher for this feature since 8.1.0.\nHow about MQ metrics data exporter? The answer is that the MQ metrics data exporter is already readily available. The exporter module with gRPC default mechanism is there, and you can easily provide a new implementor of this module.\n","excerpt":"Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture? This is often asked by those who are …","ref":"/docs/main/v9.7.0/en/faq/why_mq_not_involved/","title":"Why doesn't SkyWalking involve MQ in its architecture?"},{"body":"Why is -Djava.ext.dirs not supported? -Djava.ext.dirs provides the extension class loader mechanism which was introduced in JDK 1.2, which was released in 1998. According to JEP 220: Modular Run-Time Images, it ends in JDK 9, to simplify both the Java SE Platform and the JDK we have removed the extension mechanism, including the java.ext.dirs system property and the lib/ext directory.\nThis JEP has been applied since JDK11, which is the most active LTS JDK version. When use -Djava.ext.dirs in JDK11+, the JVM would not be able to boot with following error.\n\u0026lt;JAVA_HOME\u0026gt;/lib/ext exists, extensions mechanism no longer supported; Use -classpath instead. .Error: Could not create the Java Virtual Machine. Error: A fatal exception has occurred. Program will exit. So, SkyWalking agent would not support the extension class loader mechanism.\nHow to resolve this issue? If you are using JDK8 and -Djava.ext.dirs, follow the JRE recommendations, Use -classpath instead. This should be a transparent change, which only affects your booting script.\nAlso, if you insist on keeping using -Djava.ext.dirs, the community had a pull request, which leverages the bootstrap instrumentation core of the agent to support the extension class loader.\nIn theory, this should work, but the SkyWalking doesn\u0026rsquo;t officially verify it before noticing the above JEP. You could take it as a reference.\nThe official recommendation still keeps as Use -classpath instead.\n","excerpt":"Why is -Djava.ext.dirs not supported? -Djava.ext.dirs provides the extension class loader mechanism …","ref":"/docs/skywalking-java/latest/en/faq/ext-dirs/","title":"Why is `-Djava.ext.dirs` not supported?"},{"body":"Why is -Djava.ext.dirs not supported? -Djava.ext.dirs provides the extension class loader mechanism which was introduced in JDK 1.2, which was released in 1998. According to JEP 220: Modular Run-Time Images, it ends in JDK 9, to simplify both the Java SE Platform and the JDK we have removed the extension mechanism, including the java.ext.dirs system property and the lib/ext directory.\nThis JEP has been applied since JDK11, which is the most active LTS JDK version. When use -Djava.ext.dirs in JDK11+, the JVM would not be able to boot with following error.\n\u0026lt;JAVA_HOME\u0026gt;/lib/ext exists, extensions mechanism no longer supported; Use -classpath instead. .Error: Could not create the Java Virtual Machine. Error: A fatal exception has occurred. Program will exit. So, SkyWalking agent would not support the extension class loader mechanism.\nHow to resolve this issue? If you are using JDK8 and -Djava.ext.dirs, follow the JRE recommendations, Use -classpath instead. This should be a transparent change, which only affects your booting script.\nAlso, if you insist on keeping using -Djava.ext.dirs, the community had a pull request, which leverages the bootstrap instrumentation core of the agent to support the extension class loader.\nIn theory, this should work, but the SkyWalking doesn\u0026rsquo;t officially verify it before noticing the above JEP. You could take it as a reference.\nThe official recommendation still keeps as Use -classpath instead.\n","excerpt":"Why is -Djava.ext.dirs not supported? -Djava.ext.dirs provides the extension class loader mechanism …","ref":"/docs/skywalking-java/next/en/faq/ext-dirs/","title":"Why is `-Djava.ext.dirs` not supported?"},{"body":"Why is -Djava.ext.dirs not supported? -Djava.ext.dirs provides the extension class loader mechanism which was introduced in JDK 1.2, which was released in 1998. According to JEP 220: Modular Run-Time Images, it ends in JDK 9, to simplify both the Java SE Platform and the JDK we have removed the extension mechanism, including the java.ext.dirs system property and the lib/ext directory.\nThis JEP has been applied since JDK11, which is the most active LTS JDK version. When use -Djava.ext.dirs in JDK11+, the JVM would not be able to boot with following error.\n\u0026lt;JAVA_HOME\u0026gt;/lib/ext exists, extensions mechanism no longer supported; Use -classpath instead. .Error: Could not create the Java Virtual Machine. Error: A fatal exception has occurred. Program will exit. So, SkyWalking agent would not support the extension class loader mechanism.\nHow to resolve this issue? If you are using JDK8 and -Djava.ext.dirs, follow the JRE recommendations, Use -classpath instead. This should be a transparent change, which only affects your booting script.\nAlso, if you insist on keeping using -Djava.ext.dirs, the community had a pull request, which leverages the bootstrap instrumentation core of the agent to support the extension class loader.\nIn theory, this should work, but the SkyWalking doesn\u0026rsquo;t officially verify it before noticing the above JEP. You could take it as a reference.\nThe official recommendation still keeps as Use -classpath instead.\n","excerpt":"Why is -Djava.ext.dirs not supported? -Djava.ext.dirs provides the extension class loader mechanism …","ref":"/docs/skywalking-java/v9.0.0/en/faq/ext-dirs/","title":"Why is `-Djava.ext.dirs` not supported?"},{"body":"Why is -Djava.ext.dirs not supported? -Djava.ext.dirs provides the extension class loader mechanism which was introduced in JDK 1.2, which was released in 1998. According to JEP 220: Modular Run-Time Images, it ends in JDK 9, to simplify both the Java SE Platform and the JDK we have removed the extension mechanism, including the java.ext.dirs system property and the lib/ext directory.\nThis JEP has been applied since JDK11, which is the most active LTS JDK version. When use -Djava.ext.dirs in JDK11+, the JVM would not be able to boot with following error.\n\u0026lt;JAVA_HOME\u0026gt;/lib/ext exists, extensions mechanism no longer supported; Use -classpath instead. .Error: Could not create the Java Virtual Machine. Error: A fatal exception has occurred. Program will exit. So, SkyWalking agent would not support the extension class loader mechanism.\nHow to resolve this issue? If you are using JDK8 and -Djava.ext.dirs, follow the JRE recommendations, Use -classpath instead. This should be a transparent change, which only affects your booting script.\nAlso, if you insist on keeping using -Djava.ext.dirs, the community had a pull request, which leverages the bootstrap instrumentation core of the agent to support the extension class loader.\nIn theory, this should work, but the SkyWalking doesn\u0026rsquo;t officially verify it before noticing the above JEP. You could take it as a reference.\nThe official recommendation still keeps as Use -classpath instead.\n","excerpt":"Why is -Djava.ext.dirs not supported? -Djava.ext.dirs provides the extension class loader mechanism …","ref":"/docs/skywalking-java/v9.1.0/en/faq/ext-dirs/","title":"Why is `-Djava.ext.dirs` not supported?"},{"body":"Why is -Djava.ext.dirs not supported? -Djava.ext.dirs provides the extension class loader mechanism which was introduced in JDK 1.2, which was released in 1998. According to JEP 220: Modular Run-Time Images, it ends in JDK 9, to simplify both the Java SE Platform and the JDK we have removed the extension mechanism, including the java.ext.dirs system property and the lib/ext directory.\nThis JEP has been applied since JDK11, which is the most active LTS JDK version. When use -Djava.ext.dirs in JDK11+, the JVM would not be able to boot with following error.\n\u0026lt;JAVA_HOME\u0026gt;/lib/ext exists, extensions mechanism no longer supported; Use -classpath instead. .Error: Could not create the Java Virtual Machine. Error: A fatal exception has occurred. Program will exit. So, SkyWalking agent would not support the extension class loader mechanism.\nHow to resolve this issue? If you are using JDK8 and -Djava.ext.dirs, follow the JRE recommendations, Use -classpath instead. This should be a transparent change, which only affects your booting script.\nAlso, if you insist on keeping using -Djava.ext.dirs, the community had a pull request, which leverages the bootstrap instrumentation core of the agent to support the extension class loader.\nIn theory, this should work, but the SkyWalking doesn\u0026rsquo;t officially verify it before noticing the above JEP. You could take it as a reference.\nThe official recommendation still keeps as Use -classpath instead.\n","excerpt":"Why is -Djava.ext.dirs not supported? -Djava.ext.dirs provides the extension class loader mechanism …","ref":"/docs/skywalking-java/v9.2.0/en/faq/ext-dirs/","title":"Why is `-Djava.ext.dirs` not supported?"},{"body":"Why is Clickhouse or Loki or xxx not supported as a storage option? Background In the past several years, community users have asked why Clickhouse, Loki, or some other storage is not supported in the upstream. We have repeated the answer many times, but it is still happening, at here, I would like to write down the summary to help people understand more\nPrevious Discussions All the following issues were about discussing new storage extension topics.\n Loki as storage  https://github.com/apache/skywalking/discussions/9836   ClickHouse  https://github.com/apache/skywalking/issues/11924 https://github.com/apache/skywalking/discussions/9011   Vertica  https://github.com/apache/skywalking/discussions/8817    Generally, all those asking are about adding a new kind of storage.\nWhy they don\u0026rsquo;t exist ? First of all, WHY is not a suitable question. SkyWalking is a volunteer-driven community, the volunteers build this project including bug fixes, maintenance work, and new features from their personal and employer interests. What you saw about the current status is the combination of all those interests rather than responsibilities. So, in SkyWalking, anything you saw existing is/was someone\u0026rsquo;s interest and contributed to upstream.\nThis logic is the same as this question, SkyWalking active maintainers are focusing on JDBC(MySQL and PostgreSQL ecosystem) Database and Elasticsearch for existing users, and moving forward on BanyanDB as the native one. We for now don\u0026rsquo;t have people interested in ClickHouse or any other database. That is why they are not there.\nHow could add one? To add a new feature, including a new storage plugin, you should go through SWIP - SkyWalking Improvement Proposal workflow, and have a full discussion with the maintenance team. SkyWalking has a pluggable storage system, so, ideally new storage option is possible to implement a new provider for the storage module. Meanwhile, in practice, as storage implementation should be in high performance and well optimized, considering our experiences with JDBC and Elasticsearch implementations, some flags and annotations may need to be added in the kernel level and data model declarations.\nFurthermore, as current maintainers are not a fun of Clickhouse or others(otherwise, you should have seen those implementations), they are not going to be involved in the code implementations and they don\u0026rsquo;t know much more from a general perspective about which kind of implementation in that specific database will have a better behavior and performance. So, if you want to propose this to upstream, you should be very experienced in that database, and have enough scale and environments to provide solid benchmark.\nWhat happens next if the new implementation gets accepted/merged/released? Who proposed this new implementation(such as clickhouse storage), has to take the responsibilities of the maintenance. The maintenance means they need to\n Join storage relative discussion to make sure SkyWalking can move forward on a kernel-level optimization without being blocked by these specific storage options. Respond to this storage relative questions, bugs, CVEs, and performance issues. Make the implementation performance match the expectation of the original proposal. Such as, about clickhouse, people are talking about how they are faster and have higher efficiency than Elasticsearch for large-scale deployments. Then we should always be able to see it has better benchmark and product side practice.  Even if the storage gets accepted/merged/released, but no one can\u0026rsquo;t take the above responsibilities or the community doesn\u0026rsquo;t receive the feedback and questions about those storages, SkyWalking PMC(Project Management Committee) will start the process to remove the implementations. This happened before for Apache IoTDB and InfluxDB storage options. Here is the last vote about this,\n https://github.com/apache/skywalking/discussions/9059  ","excerpt":"Why is Clickhouse or Loki or xxx not supported as a storage option? Background In the past several …","ref":"/docs/main/next/en/faq/why-clickhouse-not-supported/","title":"Why is Clickhouse or Loki or xxx not supported as a storage option?"},{"body":"Windows Monitoring SkyWalking leverages Prometheus windows_exporter to collect metrics data from the Windows and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. Windows entity as a Service in OAP and on the Layer: OS_WINDOWS.\nData flow For OpenTelemetry receiver:\n The Prometheus windows_exporter collects metrics data from the VMs. The OpenTelemetry Collector fetches metrics from windows_exporter via Prometheus Receiver and pushes metrics to the SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup For OpenTelemetry receiver:\n Setup Prometheus windows_exporter. Setup OpenTelemetry Collector . This is an example for OpenTelemetry Collector configuration otel-collector-config.yaml. Config SkyWalking OpenTelemetry receiver.  Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     CPU Usage % meter_win_cpu_total_percentage The total percentage usage of the CPU core. If there are 2 cores, the maximum usage is 200%. Prometheus windows_exporter   Memory RAM Usage MB meter_win_memory_used The total RAM usage Prometheus windows_exporter   Memory Swap Usage % meter_win_memory_swap_percentage The percentage usage of swap memory Prometheus windows_exporter   CPU Average Used % meter_win_cpu_average_used The percentage usage of the CPU core in each mode Prometheus windows_exporter   Memory RAM MB meter_win_memory_total\nmeter_win_memory_available\nmeter_win_memory_used The RAM statistics, including Total / Available / Used Prometheus windows_exporter   Memory Swap MB meter_win_memory_swap_free\nmeter_win_memory_swap_total Swap memory statistics, including Free / Total Prometheus windows_exporter   Disk R/W KB/s meter_win_disk_read,meter_win_disk_written The disk read and written Prometheus windows_exporter   Network Bandwidth Usage KB/s meter_win_network_receive\nmeter_win_network_transmit The network receive and transmit Prometheus windows_exporter    Customizing You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/windows.yaml. The dashboard panel confirmations are found in /config/ui-initialized-templates/os_windows.\n","excerpt":"Windows Monitoring SkyWalking leverages Prometheus windows_exporter to collect metrics data from the …","ref":"/docs/main/latest/en/setup/backend/backend-win-monitoring/","title":"Windows Monitoring"},{"body":"Windows Monitoring SkyWalking leverages Prometheus windows_exporter to collect metrics data from the Windows and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. Windows entity as a Service in OAP and on the Layer: OS_WINDOWS.\nData flow For OpenTelemetry receiver:\n The Prometheus windows_exporter collects metrics data from the VMs. The OpenTelemetry Collector fetches metrics from windows_exporter via Prometheus Receiver and pushes metrics to the SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup For OpenTelemetry receiver:\n Setup Prometheus windows_exporter. Setup OpenTelemetry Collector . This is an example for OpenTelemetry Collector configuration otel-collector-config.yaml. Config SkyWalking OpenTelemetry receiver.  Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     CPU Usage % meter_win_cpu_total_percentage The total percentage usage of the CPU core. If there are 2 cores, the maximum usage is 200%. Prometheus windows_exporter   Memory RAM Usage MB meter_win_memory_used The total RAM usage Prometheus windows_exporter   Memory Swap Usage % meter_win_memory_swap_percentage The percentage usage of swap memory Prometheus windows_exporter   CPU Average Used % meter_win_cpu_average_used The percentage usage of the CPU core in each mode Prometheus windows_exporter   Memory RAM MB meter_win_memory_total\nmeter_win_memory_available\nmeter_win_memory_used The RAM statistics, including Total / Available / Used Prometheus windows_exporter   Memory Swap MB meter_win_memory_swap_free\nmeter_win_memory_swap_total Swap memory statistics, including Free / Total Prometheus windows_exporter   Disk R/W KB/s meter_win_disk_read,meter_win_disk_written The disk read and written Prometheus windows_exporter   Network Bandwidth Usage KB/s meter_win_network_receive\nmeter_win_network_transmit The network receive and transmit Prometheus windows_exporter    Customizing You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/windows.yaml. The dashboard panel confirmations are found in /config/ui-initialized-templates/os_windows.\n","excerpt":"Windows Monitoring SkyWalking leverages Prometheus windows_exporter to collect metrics data from the …","ref":"/docs/main/next/en/setup/backend/backend-win-monitoring/","title":"Windows Monitoring"},{"body":"Windows Monitoring SkyWalking leverages Prometheus windows_exporter to collect metrics data from the Windows and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. Windows entity as a Service in OAP and on the Layer: OS_WINDOWS.\nData flow For OpenTelemetry receiver:\n The Prometheus windows_exporter collects metrics data from the VMs. The OpenTelemetry Collector fetches metrics from windows_exporter via Prometheus Receiver and pushes metrics to the SkyWalking OAP Server via the OpenCensus gRPC Exporter or OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup For OpenTelemetry receiver:\n Setup Prometheus windows_exporter. Setup OpenTelemetry Collector . This is an example for OpenTelemetry Collector configuration otel-collector-config.yaml. Config SkyWalking OpenTelemetry receiver.  Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     CPU Usage % meter_win_cpu_total_percentage The total percentage usage of the CPU core. If there are 2 cores, the maximum usage is 200%. Prometheus windows_exporter   Memory RAM Usage MB meter_win_memory_used The total RAM usage Prometheus windows_exporter   Memory Swap Usage % meter_win_memory_swap_percentage The percentage usage of swap memory Prometheus windows_exporter   CPU Average Used % meter_win_cpu_average_used The percentage usage of the CPU core in each mode Prometheus windows_exporter   Memory RAM MB meter_win_memory_total\nmeter_win_memory_available\nmeter_win_memory_used The RAM statistics, including Total / Available / Used Prometheus windows_exporter   Memory Swap MB meter_win_memory_swap_free\nmeter_win_memory_swap_total Swap memory statistics, including Free / Total Prometheus windows_exporter   Disk R/W KB/s meter_win_disk_read,meter_win_disk_written The disk read and written Prometheus windows_exporter   Network Bandwidth Usage KB/s meter_win_network_receive\nmeter_win_network_transmit The network receive and transmit Prometheus windows_exporter    Customizing You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/windows.yaml. The dashboard panel confirmations are found in /config/ui-initialized-templates/os_windows.\n","excerpt":"Windows Monitoring SkyWalking leverages Prometheus windows_exporter to collect metrics data from the …","ref":"/docs/main/v9.4.0/en/setup/backend/backend-win-monitoring/","title":"Windows Monitoring"},{"body":"Windows Monitoring SkyWalking leverages Prometheus windows_exporter to collect metrics data from the Windows and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. Windows entity as a Service in OAP and on the Layer: OS_WINDOWS.\nData flow For OpenTelemetry receiver:\n The Prometheus windows_exporter collects metrics data from the VMs. The OpenTelemetry Collector fetches metrics from windows_exporter via Prometheus Receiver and pushes metrics to the SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup For OpenTelemetry receiver:\n Setup Prometheus windows_exporter. Setup OpenTelemetry Collector . This is an example for OpenTelemetry Collector configuration otel-collector-config.yaml. Config SkyWalking OpenTelemetry receiver.  Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     CPU Usage % meter_win_cpu_total_percentage The total percentage usage of the CPU core. If there are 2 cores, the maximum usage is 200%. Prometheus windows_exporter   Memory RAM Usage MB meter_win_memory_used The total RAM usage Prometheus windows_exporter   Memory Swap Usage % meter_win_memory_swap_percentage The percentage usage of swap memory Prometheus windows_exporter   CPU Average Used % meter_win_cpu_average_used The percentage usage of the CPU core in each mode Prometheus windows_exporter   Memory RAM MB meter_win_memory_total\nmeter_win_memory_available\nmeter_win_memory_used The RAM statistics, including Total / Available / Used Prometheus windows_exporter   Memory Swap MB meter_win_memory_swap_free\nmeter_win_memory_swap_total Swap memory statistics, including Free / Total Prometheus windows_exporter   Disk R/W KB/s meter_win_disk_read,meter_win_disk_written The disk read and written Prometheus windows_exporter   Network Bandwidth Usage KB/s meter_win_network_receive\nmeter_win_network_transmit The network receive and transmit Prometheus windows_exporter    Customizing You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/windows.yaml. The dashboard panel confirmations are found in /config/ui-initialized-templates/os_windows.\n","excerpt":"Windows Monitoring SkyWalking leverages Prometheus windows_exporter to collect metrics data from the …","ref":"/docs/main/v9.5.0/en/setup/backend/backend-win-monitoring/","title":"Windows Monitoring"},{"body":"Windows Monitoring SkyWalking leverages Prometheus windows_exporter to collect metrics data from the Windows and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. Windows entity as a Service in OAP and on the Layer: OS_WINDOWS.\nData flow For OpenTelemetry receiver:\n The Prometheus windows_exporter collects metrics data from the VMs. The OpenTelemetry Collector fetches metrics from windows_exporter via Prometheus Receiver and pushes metrics to the SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup For OpenTelemetry receiver:\n Setup Prometheus windows_exporter. Setup OpenTelemetry Collector . This is an example for OpenTelemetry Collector configuration otel-collector-config.yaml. Config SkyWalking OpenTelemetry receiver.  Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     CPU Usage % meter_win_cpu_total_percentage The total percentage usage of the CPU core. If there are 2 cores, the maximum usage is 200%. Prometheus windows_exporter   Memory RAM Usage MB meter_win_memory_used The total RAM usage Prometheus windows_exporter   Memory Swap Usage % meter_win_memory_swap_percentage The percentage usage of swap memory Prometheus windows_exporter   CPU Average Used % meter_win_cpu_average_used The percentage usage of the CPU core in each mode Prometheus windows_exporter   Memory RAM MB meter_win_memory_total\nmeter_win_memory_available\nmeter_win_memory_used The RAM statistics, including Total / Available / Used Prometheus windows_exporter   Memory Swap MB meter_win_memory_swap_free\nmeter_win_memory_swap_total Swap memory statistics, including Free / Total Prometheus windows_exporter   Disk R/W KB/s meter_win_disk_read,meter_win_disk_written The disk read and written Prometheus windows_exporter   Network Bandwidth Usage KB/s meter_win_network_receive\nmeter_win_network_transmit The network receive and transmit Prometheus windows_exporter    Customizing You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/windows.yaml. The dashboard panel confirmations are found in /config/ui-initialized-templates/os_windows.\n","excerpt":"Windows Monitoring SkyWalking leverages Prometheus windows_exporter to collect metrics data from the …","ref":"/docs/main/v9.6.0/en/setup/backend/backend-win-monitoring/","title":"Windows Monitoring"},{"body":"Windows Monitoring SkyWalking leverages Prometheus windows_exporter to collect metrics data from the Windows and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. Windows entity as a Service in OAP and on the Layer: OS_WINDOWS.\nData flow For OpenTelemetry receiver:\n The Prometheus windows_exporter collects metrics data from the VMs. The OpenTelemetry Collector fetches metrics from windows_exporter via Prometheus Receiver and pushes metrics to the SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup For OpenTelemetry receiver:\n Setup Prometheus windows_exporter. Setup OpenTelemetry Collector . This is an example for OpenTelemetry Collector configuration otel-collector-config.yaml. Config SkyWalking OpenTelemetry receiver.  Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     CPU Usage % meter_win_cpu_total_percentage The total percentage usage of the CPU core. If there are 2 cores, the maximum usage is 200%. Prometheus windows_exporter   Memory RAM Usage MB meter_win_memory_used The total RAM usage Prometheus windows_exporter   Memory Swap Usage % meter_win_memory_swap_percentage The percentage usage of swap memory Prometheus windows_exporter   CPU Average Used % meter_win_cpu_average_used The percentage usage of the CPU core in each mode Prometheus windows_exporter   Memory RAM MB meter_win_memory_total\nmeter_win_memory_available\nmeter_win_memory_used The RAM statistics, including Total / Available / Used Prometheus windows_exporter   Memory Swap MB meter_win_memory_swap_free\nmeter_win_memory_swap_total Swap memory statistics, including Free / Total Prometheus windows_exporter   Disk R/W KB/s meter_win_disk_read,meter_win_disk_written The disk read and written Prometheus windows_exporter   Network Bandwidth Usage KB/s meter_win_network_receive\nmeter_win_network_transmit The network receive and transmit Prometheus windows_exporter    Customizing You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/windows.yaml. The dashboard panel confirmations are found in /config/ui-initialized-templates/os_windows.\n","excerpt":"Windows Monitoring SkyWalking leverages Prometheus windows_exporter to collect metrics data from the …","ref":"/docs/main/v9.7.0/en/setup/backend/backend-win-monitoring/","title":"Windows Monitoring"},{"body":"Working with Istio This document provides instructions on transporting Istio\u0026rsquo;s metrics to the SkyWalking OAP server.\nPrerequisites Istio should be installed in a Kubernetes cluster. Simply follow the steps in Getting Started in Istio.\nDeploy SkyWalking backend Follow the steps in deploying backend in Kubernetes to install the OAP server in the Kubernetes cluster. Refer to OpenTelemetry receiver to ingest metrics.\nDeploy OpenTelemetry Collector OpenTelemetry Collector is the location where Istio telemetry sends metrics, which are then processed and shipped to SkyWalking backend.\nTo deploy this collector, follow the steps in Getting Started in OpenTelemetry Collector. Several components are available in the collector, and they could be combined for different use cases.\nAfter installing the collector, you may configure it to scrape metrics from Istio and send them to SkyWalking backend.\nThe job configuration to scrape metrics from Istio and send them to SkyWalking backend is as follows:\nreceivers:prometheus:config:scrape_configs:- job_name:\u0026#39;istiod-monitor\u0026#39;kubernetes_sd_configs:- role:endpointsrelabel_configs:- source_labels:[__meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name ]action:keepregex:istiod;http-monitoring- action:labelmapregex:__meta_kubernetes_service_label_(.+)- source_labels:[]target_label:clusterreplacement:your-cluster# replace this with your cluster nameexporters:otlp:endpoint:oap.skywalking:11800# replace this with the OAP gRPC service addresstls:insecure:trueservice:pipelines:metrics:receivers:[prometheus ]exporters:[otlp,logging ]Observing Istio Open Istio Dashboard in SkyWaling UI by clicking Dashboard -\u0026gt; Istio. You can then view charts and diagrams generated by Istio metrics. You may also view them through swctl and set up alarm rules based on them.\nNote: If you would like to see metrics of Istio managed services, including topology, you may try our ALS solution.\n","excerpt":"Working with Istio This document provides instructions on transporting Istio\u0026rsquo;s metrics to the …","ref":"/docs/main/latest/en/setup/istio/readme/","title":"Working with Istio"},{"body":"Working with Istio This document provides instructions on transporting Istio\u0026rsquo;s metrics to the SkyWalking OAP server.\nPrerequisites Istio should be installed in a Kubernetes cluster. Simply follow the steps in Getting Started in Istio.\nDeploy SkyWalking backend Follow the steps in deploying backend in Kubernetes to install the OAP server in the Kubernetes cluster. Refer to OpenTelemetry receiver to ingest metrics.\nDeploy OpenTelemetry Collector OpenTelemetry Collector is the location where Istio telemetry sends metrics, which are then processed and shipped to SkyWalking backend.\nTo deploy this collector, follow the steps in Getting Started in OpenTelemetry Collector. Several components are available in the collector, and they could be combined for different use cases.\nAfter installing the collector, you may configure it to scrape metrics from Istio and send them to SkyWalking backend.\nThe job configuration to scrape metrics from Istio and send them to SkyWalking backend is as follows:\nreceivers:prometheus:config:scrape_configs:- job_name:\u0026#39;istiod-monitor\u0026#39;kubernetes_sd_configs:- role:endpointsrelabel_configs:- source_labels:[__meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name ]action:keepregex:istiod;http-monitoring- action:labelmapregex:__meta_kubernetes_service_label_(.+)- source_labels:[]target_label:clusterreplacement:your-cluster# replace this with your cluster nameexporters:otlp:endpoint:oap.skywalking:11800# replace this with the OAP gRPC service addresstls:insecure:trueservice:pipelines:metrics:receivers:[prometheus ]exporters:[otlp,logging ]Observing Istio Open Istio Dashboard in SkyWaling UI by clicking Dashboard -\u0026gt; Istio. You can then view charts and diagrams generated by Istio metrics. You may also view them through swctl and set up alarm rules based on them.\nNote: If you would like to see metrics of Istio managed services, including topology, you may try our ALS solution.\n","excerpt":"Working with Istio This document provides instructions on transporting Istio\u0026rsquo;s metrics to the …","ref":"/docs/main/next/en/setup/istio/readme/","title":"Working with Istio"},{"body":"Working with Istio This document provides instructions on transporting Istio\u0026rsquo;s metrics to the SkyWalking OAP server.\nPrerequisites Istio should be installed in the Kubernetes cluster. Simply follow the steps in Getting Started in Istio.\nDeploying SkyWalking backend Follow the steps in deploying backend in Kubernetes to install the OAP server in the Kubernetes cluster. Refer to OpenTelemetry receiver to ingest metrics. otel-receiver is disabled by default. Set env var SW_OTEL_RECEIVER to default to enable it.\nDeploying OpenTelemetry Collector OpenTelemetry Collector is the location where Istio telemetry sends metrics, which is then processed and sent to SkyWalking backend.\nFollow the steps in Getting Started in OpenTelemetry Collector to deploy this collector. There are several components available in the collector, and they could be combined for different use cases. For the sake of brevity, we use the Prometheus receiver to retrieve metrics from Istio control and data plane, then send them to SkyWalking by OpenCensus exporter.\nPrometheus Receiver Refer to Prometheus Receiver to set up this receiver. You could find more configuration details in Prometheus Integration of Istio to figure out how to direct Prometheus Receiver to query Istio metrics.\nSkyWalking supports receiving multi-cluster metrics in a single OAP cluster. A cluster label should be appended to every metric fetched by this receiver even if there\u0026rsquo;s only a single cluster needed to be collected. You could use relabel to add it, like this:\nrelabel_configs: - source_labels: [] target_label: cluster replacement: \u0026lt;cluster name\u0026gt; or you can do so through Resource Processor:\nprocessors: resource: attributes: - key: cluster value: \u0026quot;\u0026lt;cluster name\u0026gt;\u0026quot; action: upsert Note: If you try the sample Istio Prometheus Kubernetes configuration, you may experience an issue. Try to fix it using the solution described in the issue.\nOpenCensus exporter Follow OpenCensus exporter configuration to set up a connection between OpenTelemetry Collector and OAP cluster. endpoint is the address of OAP gRPC service.\nObserving Istio Open Istio Dashboard in SkyWaling UI by clicking Dashboard -\u0026gt; Istio. You can then view charts and diagrams generated by Istio metrics. You may also view them through swctl and set up alarm rules based on them.\nNOTE: If you would like to see metrics of Istio managed services, including topology, you may try our ALS solution.\n","excerpt":"Working with Istio This document provides instructions on transporting Istio\u0026rsquo;s metrics to the …","ref":"/docs/main/v9.0.0/en/setup/istio/readme/","title":"Working with Istio"},{"body":"Working with Istio This document provides instructions on transporting Istio\u0026rsquo;s metrics to the SkyWalking OAP server.\nPrerequisites Istio should be installed in a Kubernetes cluster. Simply follow the steps in Getting Started in Istio.\nDeploying SkyWalking backend Follow the steps in deploying backend in Kubernetes to install the OAP server in the Kubernetes cluster. Refer to OpenTelemetry receiver to ingest metrics. otel-receiver is disabled by default. Set env var SW_OTEL_RECEIVER to default to enable it.\nDeploying OpenTelemetry Collector OpenTelemetry Collector is the location where Istio telemetry sends metrics, which are then processed and shipped to SkyWalking backend.\nTo deploy this collector, follow the steps in Getting Started in OpenTelemetry Collector. Several components are available in the collector, and they could be combined for different use cases.\nFor the sake of brevity, we use the Prometheus receiver to retrieve metrics from Istio control and data plane, then send them to SkyWalking by OpenCensus exporter.\nPrometheus Receiver Refer to Prometheus Receiver to set up this receiver. You could find more configuration details in Prometheus Integration of Istio to figure out how to direct Prometheus Receiver to query Istio metrics.\nSkyWalking supports receiving multi-cluster metrics in a single OAP cluster. A cluster label should be appended to every metric fetched by this receiver even if there\u0026rsquo;s only a single cluster needed to be collected. You could use relabel to add it, like this:\nrelabel_configs: - source_labels: [] target_label: cluster replacement: \u0026lt;cluster name\u0026gt; or you can do so through Resource Processor:\nprocessors: resource: attributes: - key: cluster value: \u0026quot;\u0026lt;cluster name\u0026gt;\u0026quot; action: upsert Note: If you try the sample Istio Prometheus Kubernetes configuration, you may experience an issue. Try to fix it using the solution described in the issue.\nOpenCensus exporter Follow OpenCensus exporter configuration to set up a connection between OpenTelemetry Collector and OAP cluster. endpoint is the address of the OAP gRPC service.\nObserving Istio Open Istio Dashboard in SkyWaling UI by clicking Dashboard -\u0026gt; Istio. You can then view charts and diagrams generated by Istio metrics. You may also view them through swctl and set up alarm rules based on them.\nNote: If you would like to see metrics of Istio managed services, including topology, you may try our ALS solution.\n","excerpt":"Working with Istio This document provides instructions on transporting Istio\u0026rsquo;s metrics to the …","ref":"/docs/main/v9.1.0/en/setup/istio/readme/","title":"Working with Istio"},{"body":"Working with Istio This document provides instructions on transporting Istio\u0026rsquo;s metrics to the SkyWalking OAP server.\nPrerequisites Istio should be installed in a Kubernetes cluster. Simply follow the steps in Getting Started in Istio.\nDeploying SkyWalking backend Follow the steps in deploying backend in Kubernetes to install the OAP server in the Kubernetes cluster. Refer to OpenTelemetry receiver to ingest metrics. otel-receiver is disabled by default. Set env var SW_OTEL_RECEIVER to default to enable it.\nDeploying OpenTelemetry Collector OpenTelemetry Collector is the location where Istio telemetry sends metrics, which are then processed and shipped to SkyWalking backend.\nTo deploy this collector, follow the steps in Getting Started in OpenTelemetry Collector. Several components are available in the collector, and they could be combined for different use cases.\nFor the sake of brevity, we use the Prometheus receiver to retrieve metrics from Istio control and data plane, then send them to SkyWalking by OpenCensus exporter.\nPrometheus Receiver Refer to Prometheus Receiver to set up this receiver. You could find more configuration details in Prometheus Integration of Istio to figure out how to direct Prometheus Receiver to query Istio metrics.\nSkyWalking supports receiving multi-cluster metrics in a single OAP cluster. A cluster label should be appended to every metric fetched by this receiver even if there\u0026rsquo;s only a single cluster needed to be collected. You could use relabel to add it, like this:\nrelabel_configs: - source_labels: [] target_label: cluster replacement: \u0026lt;cluster name\u0026gt; or you can do so through Resource Processor:\nprocessors: resource: attributes: - key: cluster value: \u0026quot;\u0026lt;cluster name\u0026gt;\u0026quot; action: upsert Note: If you try the sample Istio Prometheus Kubernetes configuration, you may experience an issue. Try to fix it using the solution described in the issue.\nOpenCensus exporter Follow OpenCensus exporter configuration to set up a connection between OpenTelemetry Collector and OAP cluster. endpoint is the address of the OAP gRPC service.\nObserving Istio Open Istio Dashboard in SkyWaling UI by clicking Dashboard -\u0026gt; Istio. You can then view charts and diagrams generated by Istio metrics. You may also view them through swctl and set up alarm rules based on them.\nNote: If you would like to see metrics of Istio managed services, including topology, you may try our ALS solution.\n","excerpt":"Working with Istio This document provides instructions on transporting Istio\u0026rsquo;s metrics to the …","ref":"/docs/main/v9.2.0/en/setup/istio/readme/","title":"Working with Istio"},{"body":"Working with Istio This document provides instructions on transporting Istio\u0026rsquo;s metrics to the SkyWalking OAP server.\nPrerequisites Istio should be installed in a Kubernetes cluster. Simply follow the steps in Getting Started in Istio.\nDeploying SkyWalking backend Follow the steps in deploying backend in Kubernetes to install the OAP server in the Kubernetes cluster. Refer to OpenTelemetry receiver to ingest metrics. otel-receiver is disabled by default. Set env var SW_OTEL_RECEIVER to default to enable it.\nDeploying OpenTelemetry Collector OpenTelemetry Collector is the location where Istio telemetry sends metrics, which are then processed and shipped to SkyWalking backend.\nTo deploy this collector, follow the steps in Getting Started in OpenTelemetry Collector. Several components are available in the collector, and they could be combined for different use cases.\nFor the sake of brevity, we use the Prometheus receiver to retrieve metrics from Istio control and data plane, then send them to SkyWalking by OpenCensus exporter.\nPrometheus Receiver Refer to Prometheus Receiver to set up this receiver. You could find more configuration details in Prometheus Integration of Istio to figure out how to direct Prometheus Receiver to query Istio metrics.\nSkyWalking supports receiving multi-cluster metrics in a single OAP cluster. A cluster label should be appended to every metric fetched by this receiver even if there\u0026rsquo;s only a single cluster needed to be collected. You could use relabel to add it, like this:\nrelabel_configs: - source_labels: [] target_label: cluster replacement: \u0026lt;cluster name\u0026gt; or you can do so through Resource Processor:\nprocessors: resource: attributes: - key: cluster value: \u0026quot;\u0026lt;cluster name\u0026gt;\u0026quot; action: upsert Note: If you try the sample Istio Prometheus Kubernetes configuration, you may experience an issue. Try to fix it using the solution described in the issue.\nOpenCensus exporter Follow OpenCensus exporter configuration to set up a connection between OpenTelemetry Collector and OAP cluster. endpoint is the address of the OAP gRPC service.\nObserving Istio Open Istio Dashboard in SkyWaling UI by clicking Dashboard -\u0026gt; Istio. You can then view charts and diagrams generated by Istio metrics. You may also view them through swctl and set up alarm rules based on them.\nNote: If you would like to see metrics of Istio managed services, including topology, you may try our ALS solution.\n","excerpt":"Working with Istio This document provides instructions on transporting Istio\u0026rsquo;s metrics to the …","ref":"/docs/main/v9.3.0/en/setup/istio/readme/","title":"Working with Istio"},{"body":"Working with Istio This document provides instructions on transporting Istio\u0026rsquo;s metrics to the SkyWalking OAP server.\nPrerequisites Istio should be installed in a Kubernetes cluster. Simply follow the steps in Getting Started in Istio.\nDeploying SkyWalking backend Follow the steps in deploying backend in Kubernetes to install the OAP server in the Kubernetes cluster. Refer to OpenTelemetry receiver to ingest metrics. otel-receiver is disabled by default. Set env var SW_OTEL_RECEIVER to default to enable it.\nDeploying OpenTelemetry Collector OpenTelemetry Collector is the location where Istio telemetry sends metrics, which are then processed and shipped to SkyWalking backend.\nTo deploy this collector, follow the steps in Getting Started in OpenTelemetry Collector. Several components are available in the collector, and they could be combined for different use cases.\nFor the sake of brevity, we use the Prometheus receiver to retrieve metrics from Istio control and data plane, then send them to SkyWalking by OpenCensus exporter.\nPrometheus Receiver Refer to Prometheus Receiver to set up this receiver. You could find more configuration details in Prometheus Integration of Istio to figure out how to direct Prometheus Receiver to query Istio metrics.\nSkyWalking supports receiving multi-cluster metrics in a single OAP cluster. A cluster label should be appended to every metric fetched by this receiver even if there\u0026rsquo;s only a single cluster needed to be collected. You could use relabel to add it, like this:\nrelabel_configs: - source_labels: [] target_label: cluster replacement: \u0026lt;cluster name\u0026gt; or you can do so through Resource Processor:\nprocessors: resource: attributes: - key: cluster value: \u0026quot;\u0026lt;cluster name\u0026gt;\u0026quot; action: upsert Note: If you try the sample Istio Prometheus Kubernetes configuration, you may experience an issue. Try to fix it using the solution described in the issue.\nOpenCensus exporter Follow OpenCensus exporter configuration to set up a connection between OpenTelemetry Collector and OAP cluster. endpoint is the address of the OAP gRPC service.\nObserving Istio Open Istio Dashboard in SkyWaling UI by clicking Dashboard -\u0026gt; Istio. You can then view charts and diagrams generated by Istio metrics. You may also view them through swctl and set up alarm rules based on them.\nNote: If you would like to see metrics of Istio managed services, including topology, you may try our ALS solution.\n","excerpt":"Working with Istio This document provides instructions on transporting Istio\u0026rsquo;s metrics to the …","ref":"/docs/main/v9.4.0/en/setup/istio/readme/","title":"Working with Istio"},{"body":"Working with Istio This document provides instructions on transporting Istio\u0026rsquo;s metrics to the SkyWalking OAP server.\nPrerequisites Istio should be installed in a Kubernetes cluster. Simply follow the steps in Getting Started in Istio.\nDeploy SkyWalking backend Follow the steps in deploying backend in Kubernetes to install the OAP server in the Kubernetes cluster. Refer to OpenTelemetry receiver to ingest metrics.\nDeploy OpenTelemetry Collector OpenTelemetry Collector is the location where Istio telemetry sends metrics, which are then processed and shipped to SkyWalking backend.\nTo deploy this collector, follow the steps in Getting Started in OpenTelemetry Collector. Several components are available in the collector, and they could be combined for different use cases.\nAfter installing the collector, you may configure it to scrape metrics from Istio and send them to SkyWalking backend.\nThe job configuration to scrape metrics from Istio and send them to SkyWalking backend is as follows:\nreceivers:prometheus:config:scrape_configs:- job_name:\u0026#39;istiod-monitor\u0026#39;kubernetes_sd_configs:- role:endpointsrelabel_configs:- source_labels:[__meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name ]action:keepregex:istiod;http-monitoring- action:labelmapregex:__meta_kubernetes_service_label_(.+)- source_labels:[]target_label:clusterreplacement:your-cluster# replace this with your cluster nameexporters:otlp:endpoint:oap.skywalking:11800# replace this with the OAP gRPC service addresstls:insecure:trueservice:pipelines:metrics:receivers:[prometheus ]exporters:[otlp,logging ]Observing Istio Open Istio Dashboard in SkyWaling UI by clicking Dashboard -\u0026gt; Istio. You can then view charts and diagrams generated by Istio metrics. You may also view them through swctl and set up alarm rules based on them.\nNote: If you would like to see metrics of Istio managed services, including topology, you may try our ALS solution.\n","excerpt":"Working with Istio This document provides instructions on transporting Istio\u0026rsquo;s metrics to the …","ref":"/docs/main/v9.5.0/en/setup/istio/readme/","title":"Working with Istio"},{"body":"Working with Istio This document provides instructions on transporting Istio\u0026rsquo;s metrics to the SkyWalking OAP server.\nPrerequisites Istio should be installed in a Kubernetes cluster. Simply follow the steps in Getting Started in Istio.\nDeploy SkyWalking backend Follow the steps in deploying backend in Kubernetes to install the OAP server in the Kubernetes cluster. Refer to OpenTelemetry receiver to ingest metrics.\nDeploy OpenTelemetry Collector OpenTelemetry Collector is the location where Istio telemetry sends metrics, which are then processed and shipped to SkyWalking backend.\nTo deploy this collector, follow the steps in Getting Started in OpenTelemetry Collector. Several components are available in the collector, and they could be combined for different use cases.\nAfter installing the collector, you may configure it to scrape metrics from Istio and send them to SkyWalking backend.\nThe job configuration to scrape metrics from Istio and send them to SkyWalking backend is as follows:\nreceivers:prometheus:config:scrape_configs:- job_name:\u0026#39;istiod-monitor\u0026#39;kubernetes_sd_configs:- role:endpointsrelabel_configs:- source_labels:[__meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name ]action:keepregex:istiod;http-monitoring- action:labelmapregex:__meta_kubernetes_service_label_(.+)- source_labels:[]target_label:clusterreplacement:your-cluster# replace this with your cluster nameexporters:otlp:endpoint:oap.skywalking:11800# replace this with the OAP gRPC service addresstls:insecure:trueservice:pipelines:metrics:receivers:[prometheus ]exporters:[otlp,logging ]Observing Istio Open Istio Dashboard in SkyWaling UI by clicking Dashboard -\u0026gt; Istio. You can then view charts and diagrams generated by Istio metrics. You may also view them through swctl and set up alarm rules based on them.\nNote: If you would like to see metrics of Istio managed services, including topology, you may try our ALS solution.\n","excerpt":"Working with Istio This document provides instructions on transporting Istio\u0026rsquo;s metrics to the …","ref":"/docs/main/v9.6.0/en/setup/istio/readme/","title":"Working with Istio"},{"body":"Working with Istio This document provides instructions on transporting Istio\u0026rsquo;s metrics to the SkyWalking OAP server.\nPrerequisites Istio should be installed in a Kubernetes cluster. Simply follow the steps in Getting Started in Istio.\nDeploy SkyWalking backend Follow the steps in deploying backend in Kubernetes to install the OAP server in the Kubernetes cluster. Refer to OpenTelemetry receiver to ingest metrics.\nDeploy OpenTelemetry Collector OpenTelemetry Collector is the location where Istio telemetry sends metrics, which are then processed and shipped to SkyWalking backend.\nTo deploy this collector, follow the steps in Getting Started in OpenTelemetry Collector. Several components are available in the collector, and they could be combined for different use cases.\nAfter installing the collector, you may configure it to scrape metrics from Istio and send them to SkyWalking backend.\nThe job configuration to scrape metrics from Istio and send them to SkyWalking backend is as follows:\nreceivers:prometheus:config:scrape_configs:- job_name:\u0026#39;istiod-monitor\u0026#39;kubernetes_sd_configs:- role:endpointsrelabel_configs:- source_labels:[__meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name ]action:keepregex:istiod;http-monitoring- action:labelmapregex:__meta_kubernetes_service_label_(.+)- source_labels:[]target_label:clusterreplacement:your-cluster# replace this with your cluster nameexporters:otlp:endpoint:oap.skywalking:11800# replace this with the OAP gRPC service addresstls:insecure:trueservice:pipelines:metrics:receivers:[prometheus ]exporters:[otlp,logging ]Observing Istio Open Istio Dashboard in SkyWaling UI by clicking Dashboard -\u0026gt; Istio. You can then view charts and diagrams generated by Istio metrics. You may also view them through swctl and set up alarm rules based on them.\nNote: If you would like to see metrics of Istio managed services, including topology, you may try our ALS solution.\n","excerpt":"Working with Istio This document provides instructions on transporting Istio\u0026rsquo;s metrics to the …","ref":"/docs/main/v9.7.0/en/setup/istio/readme/","title":"Working with Istio"},{"body":"Write Plugin Test Writing plugin test cases can greatly help you determine if your plugin is running well across multiple versions. If you haven\u0026rsquo;t started developing your plugin yet, please read this Plugin Development Guide first.\nDeveloping a plugin involves the following steps:\n Create a new module: Please create a new module in the specified directory, and it is recommended to name the module the same as the plugin for easy reference. Write the configuration file: This file serves as the declaration file for the plugin, and test cases would be run based on this file. Write the test code: Simulate the actual service operation, including the plugin you want to test. Test execution: Check if the plugin is running properly.  Write Configuration File The configuration file is used to define the basic information of the test plugin. You can use the gin plugin configuration file as an example to write your own. It includes the following information:\n entry-service: The test HTTP service entry URL. When this address is accessed, the plugin code should be triggered. health-checker: Executed before the entry-service is accessed to ensure that the service starts without any issues. Status code of 200 is considered a successful service start. start-script: The script execution file path. Please compile and start the service in this file. framework: The access address of the current framework to be tested. During testing, this address would be used to switch between different framework versions. export-port: The port number for the external service entry. support-version: The version information supported by the current plugin.  go: The supported Golang language version for the current plugin. framework: A list of plugin version information. It would be used to switch between multiple framework versions.   dependencies: If your program relies on certain containers, please declare them here. The syntax is largely similar to the services in docker-compose.  image: The image name of service. hostname: The hostname of the container which deployed. port: The port list of the container which deployed. expose: The export port list of the container which deployed. environment: The environment variables of the container which deployed. command: The start command of the container. healthcheck: The health check command of the container. If the service defines a healthcheck, then the service being tested would depend on the current service\u0026rsquo;s service_healthy status. Otherwise, it depends on the service_started status.    URL Access When the service address is accessed, please use ${HTTP_HOST} and ${HTTP_PORT} to represent the domain name and port number to be accessed. The port number corresponds to the export-port field.\nStart Script The startup script is used to compile and execute the program.\nWhen starting, please add the ${GO_BUILD_OPTS} parameter, which specifies the Go Agent program information for hybrid compilation.\nWhen starting, just let the program keep running.\nVersion Matrix Multi-version support is a crucial step in plugin testing. It can test whether the plugin runs stably across multiple framework versions and go versions.\nPlugin testing would use the go get command to modify the plugin version. Please make sure you have filled in the correct framework and support-version.framework. The format is: ${framework}@${support-version.framework}\nDuring plugin execution, the specified official Golang image would be used, allowing the plugin to run in the designated Golang version.\nExcepted File For each plugin, you need to define the config/expected.yml file, which is used to define the observable data generated after the plugin runs. After the plugin runs, this file would be used to validate the data.\nPlease refer to the documentation to write this file.\nWrite Test Code In the test code, please start an HTTP service and expose the following two interfaces:\n Check service: Used to ensure that the service is running properly. This corresponds to the health-checker address in configuration. Entry service: Write the complete framework business logic at this address. Validate all the features provided by the plugin as much as possible. This corresponds to the entry-service address in configuration.  The test code, like a regular program, needs to import the github.com/apache/skywalking-go package.\nTest Execution Once you have completed the plugin configuration and test code writing, you can proceed to test the framework. Please follow these steps:\n Build tools: Execute the make build command in the test/plugins directory. It would generate some tools needed for testing in the dist folder of this directory. Run the plugin locally: Start the plugin test program and iterate through all framework versions for testing on your local environment. Add to GitHub Action: Fill in the name of the test plugin in this file, and the plugin test would be executed and validated each time a pull request is submitted.  Run the Plugin Test Locally Please execute the run.sh script in the test/plugins directory and pass in the name of the plugin you wrote (the folder name). At this point, the script would read the configuration file of the plugin test and create a workspace directory in this location for temporarily storing files generated by each plugin. Finally, it would start the test code and validate the data sequentially according to the supported version information.\nThe script supports the following two parameters:\n \u0026ndash;clean: Clean up the files and containers generated by the current running environment. \u0026ndash;debug: Enable debug mode for plugin testing. In this mode, the content generated by each framework in the workspace would not be cleared, and the temporary files generated during hybrid compilation would be saved.  ","excerpt":"Write Plugin Test Writing plugin test cases can greatly help you determine if your plugin is running …","ref":"/docs/skywalking-go/latest/en/development-and-contribution/write-plugin-testing/","title":"Write Plugin Test"},{"body":"Write Plugin Test Writing plugin test cases can greatly help you determine if your plugin is running well across multiple versions. If you haven\u0026rsquo;t started developing your plugin yet, please read this Plugin Development Guide first.\nDeveloping a plugin involves the following steps:\n Create a new module: Please create a new module in the specified directory, and it is recommended to name the module the same as the plugin for easy reference. Write the configuration file: This file serves as the declaration file for the plugin, and test cases would be run based on this file. Write the test code: Simulate the actual service operation, including the plugin you want to test. Test execution: Check if the plugin is running properly.  Write Configuration File The configuration file is used to define the basic information of the test plugin. You can use the gin plugin configuration file as an example to write your own. It includes the following information:\n entry-service: The test HTTP service entry URL. When this address is accessed, the plugin code should be triggered. health-checker: Executed before the entry-service is accessed to ensure that the service starts without any issues. Status code of 200 is considered a successful service start. start-script: The script execution file path. Please compile and start the service in this file. framework: The access address of the current framework to be tested. During testing, this address would be used to switch between different framework versions. export-port: The port number for the external service entry. support-version: The version information supported by the current plugin.  go: The supported Golang language version for the current plugin. framework: A list of plugin version information. It would be used to switch between multiple framework versions.   dependencies: If your program relies on certain containers, please declare them here. The syntax is largely similar to the services in docker-compose.  image: The image name of service. hostname: The hostname of the container which deployed. port: The port list of the container which deployed. expose: The export port list of the container which deployed. environment: The environment variables of the container which deployed. command: The start command of the container. healthcheck: The health check command of the container. If the service defines a healthcheck, then the service being tested would depend on the current service\u0026rsquo;s service_healthy status. Otherwise, it depends on the service_started status.    URL Access When the service address is accessed, please use ${HTTP_HOST} and ${HTTP_PORT} to represent the domain name and port number to be accessed. The port number corresponds to the export-port field.\nStart Script The startup script is used to compile and execute the program.\nWhen starting, please add the ${GO_BUILD_OPTS} parameter, which specifies the Go Agent program information for hybrid compilation.\nWhen starting, just let the program keep running.\nVersion Matrix Multi-version support is a crucial step in plugin testing. It can test whether the plugin runs stably across multiple framework versions and go versions.\nPlugin testing would use the go get command to modify the plugin version. Please make sure you have filled in the correct framework and support-version.framework. The format is: ${framework}@${support-version.framework}\nDuring plugin execution, the specified official Golang image would be used, allowing the plugin to run in the designated Golang version.\nExcepted File For each plugin, you need to define the config/expected.yml file, which is used to define the observable data generated after the plugin runs. After the plugin runs, this file would be used to validate the data.\nPlease refer to the documentation to write this file.\nWrite Test Code In the test code, please start an HTTP service and expose the following two interfaces:\n Check service: Used to ensure that the service is running properly. This corresponds to the health-checker address in configuration. Entry service: Write the complete framework business logic at this address. Validate all the features provided by the plugin as much as possible. This corresponds to the entry-service address in configuration.  The test code, like a regular program, needs to import the github.com/apache/skywalking-go package.\nTest Execution Once you have completed the plugin configuration and test code writing, you can proceed to test the framework. Please follow these steps:\n Build tools: Execute the make build command in the test/plugins directory. It would generate some tools needed for testing in the dist folder of this directory. Run the plugin locally: Start the plugin test program and iterate through all framework versions for testing on your local environment. Add to GitHub Action: Fill in the name of the test plugin in this file, and the plugin test would be executed and validated each time a pull request is submitted.  Run the Plugin Test Locally Please execute the run.sh script in the test/plugins directory and pass in the name of the plugin you wrote (the folder name). At this point, the script would read the configuration file of the plugin test and create a workspace directory in this location for temporarily storing files generated by each plugin. Finally, it would start the test code and validate the data sequentially according to the supported version information.\nThe script supports the following two parameters:\n \u0026ndash;clean: Clean up the files and containers generated by the current running environment. \u0026ndash;debug: Enable debug mode for plugin testing. In this mode, the content generated by each framework in the workspace would not be cleared, and the temporary files generated during hybrid compilation would be saved.  ","excerpt":"Write Plugin Test Writing plugin test cases can greatly help you determine if your plugin is running …","ref":"/docs/skywalking-go/next/en/development-and-contribution/write-plugin-testing/","title":"Write Plugin Test"},{"body":"Write Plugin Test Writing plugin test cases can greatly help you determine if your plugin is running well across multiple versions. If you haven\u0026rsquo;t started developing your plugin yet, please read this Plugin Development Guide first.\nDeveloping a plugin involves the following steps:\n Create a new module: Please create a new module in the specified directory, and it is recommended to name the module the same as the plugin for easy reference. Write the configuration file: This file serves as the declaration file for the plugin, and test cases would be run based on this file. Write the test code: Simulate the actual service operation, including the plugin you want to test. Test execution: Check if the plugin is running properly.  Write Configuration File The configuration file is used to define the basic information of the test plugin. You can use the gin plugin configuration file as an example to write your own. It includes the following information:\n entry-service: The test HTTP service entry URL. When this address is accessed, the plugin code should be triggered. health-checker: Executed before the entry-service is accessed to ensure that the service starts without any issues. Status code of 200 is considered a successful service start. start-script: The script execution file path. Please compile and start the service in this file. framework: The access address of the current framework to be tested. During testing, this address would be used to switch between different framework versions. export-port: The port number for the external service entry. support-version: The version information supported by the current plugin.  go: The supported Golang language version for the current plugin. framework: A list of plugin version information. It would be used to switch between multiple framework versions.   dependencies: If your program relies on certain containers, please declare them here. The syntax is largely similar to the services in docker-compose.  image: The image name of service. hostname: The hostname of the container which deployed. port: The port list of the container which deployed. expose: The export port list of the container which deployed. environment: The environment variables of the container which deployed. command: The start command of the container. healthcheck: The health check command of the container. If the service defines a healthcheck, then the service being tested would depend on the current service\u0026rsquo;s service_healthy status. Otherwise, it depends on the service_started status.    URL Access When the service address is accessed, please use ${HTTP_HOST} and ${HTTP_PORT} to represent the domain name and port number to be accessed. The port number corresponds to the export-port field.\nStart Script The startup script is used to compile and execute the program.\nWhen starting, please add the ${GO_BUILD_OPTS} parameter, which specifies the Go Agent program information for hybrid compilation.\nWhen starting, just let the program keep running.\nVersion Matrix Multi-version support is a crucial step in plugin testing. It can test whether the plugin runs stably across multiple framework versions and go versions.\nPlugin testing would use the go get command to modify the plugin version. Please make sure you have filled in the correct framework and support-version.framework. The format is: ${framework}@${support-version.framework}\nDuring plugin execution, the specified official Golang image would be used, allowing the plugin to run in the designated Golang version.\nExcepted File For each plugin, you need to define the config/expected.yml file, which is used to define the observable data generated after the plugin runs. After the plugin runs, this file would be used to validate the data.\nPlease refer to the documentation to write this file.\nWrite Test Code In the test code, please start an HTTP service and expose the following two interfaces:\n Check service: Used to ensure that the service is running properly. This corresponds to the health-checker address in configuration. Entry service: Write the complete framework business logic at this address. Validate all the features provided by the plugin as much as possible. This corresponds to the entry-service address in configuration.  The test code, like a regular program, needs to import the github.com/apache/skywalking-go package.\nTest Execution Once you have completed the plugin configuration and test code writing, you can proceed to test the framework. Please follow these steps:\n Build tools: Execute the make build command in the test/plugins directory. It would generate some tools needed for testing in the dist folder of this directory. Run the plugin locally: Start the plugin test program and iterate through all framework versions for testing on your local environment. Add to GitHub Action: Fill in the name of the test plugin in this file, and the plugin test would be executed and validated each time a pull request is submitted.  Run the Plugin Test Locally Please execute the run.sh script in the test/plugins directory and pass in the name of the plugin you wrote (the folder name). At this point, the script would read the configuration file of the plugin test and create a workspace directory in this location for temporarily storing files generated by each plugin. Finally, it would start the test code and validate the data sequentially according to the supported version information.\nThe script supports the following two parameters:\n \u0026ndash;clean: Clean up the files and containers generated by the current running environment. \u0026ndash;debug: Enable debug mode for plugin testing. In this mode, the content generated by each framework in the workspace would not be cleared, and the temporary files generated during hybrid compilation would be saved.  ","excerpt":"Write Plugin Test Writing plugin test cases can greatly help you determine if your plugin is running …","ref":"/docs/skywalking-go/v0.4.0/en/development-and-contribution/write-plugin-testing/","title":"Write Plugin Test"},{"body":"Zabbix Receiver The Zabbix receiver accepts metrics of Zabbix Agent Active Checks protocol format into the Meter System. Zabbix Agent is based on GPL-2.0 License.\nModule definition receiver-zabbix:selector:${SW_RECEIVER_ZABBIX:default}default:# Export tcp port, Zabbix agent could connected and transport dataport:10051# Bind to hosthost:0.0.0.0# Enable config when receive agent requestactiveFiles:agentConfiguration file The Zabbix receiver is configured via a configuration file that defines everything related to receiving from agents, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP fails to start up. The files are located at $CLASSPATH/zabbix-rules.\nThe file is written in YAML format, defined by the scheme described below. Square brackets indicate that a parameter is optional.\nAn example for Zabbix agent configuration could be found here. You can find details on Zabbix agent items from Zabbix Agent documentation.\nConfiguration file # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# Datasource from Zabbix Item keys.requiredZabbixItemKeys:- \u0026lt;zabbix item keys\u0026gt;# Support agent entities information.entities:# Allow hostname patterns to build metrics.hostPatterns:- \u0026lt;regex string\u0026gt;# Customized metrics label before parse to meter system.labels:[- \u0026lt;labels\u0026gt; ]# Metrics rule allow you to recompute queries.metrics:[- \u0026lt;metrics_rules\u0026gt; ] # Define the label name. The label value must query from `value` or `fromItem` attribute.name:\u0026lt;string\u0026gt;# Appoint value to label.[value:\u0026lt;string\u0026gt;]# Query label value from Zabbix Agent Item key.[fromItem:\u0026lt;string\u0026gt;]\u0026lt;metric_rules\u0026gt; # The name of rule, which combinates with a prefix \u0026#39;meter_\u0026#39; as the index/table name in storage.name:\u0026lt;string\u0026gt;# MAL expression.exp:\u0026lt;string\u0026gt;For more on MAL, please refer to mal.md.\n","excerpt":"Zabbix Receiver The Zabbix receiver accepts metrics of Zabbix Agent Active Checks protocol format …","ref":"/docs/main/latest/en/setup/backend/backend-zabbix/","title":"Zabbix Receiver"},{"body":"Zabbix Receiver The Zabbix receiver accepts metrics of Zabbix Agent Active Checks protocol format into the Meter System. Zabbix Agent is based on GPL-2.0 License.\nModule definition receiver-zabbix:selector:${SW_RECEIVER_ZABBIX:default}default:# Export tcp port, Zabbix agent could connected and transport dataport:10051# Bind to hosthost:0.0.0.0# Enable config when receive agent requestactiveFiles:agentConfiguration file The Zabbix receiver is configured via a configuration file that defines everything related to receiving from agents, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP fails to start up. The files are located at $CLASSPATH/zabbix-rules.\nThe file is written in YAML format, defined by the scheme described below. Square brackets indicate that a parameter is optional.\nAn example for Zabbix agent configuration could be found here. You can find details on Zabbix agent items from Zabbix Agent documentation.\nConfiguration file # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# Datasource from Zabbix Item keys.requiredZabbixItemKeys:- \u0026lt;zabbix item keys\u0026gt;# Support agent entities information.entities:# Allow hostname patterns to build metrics.hostPatterns:- \u0026lt;regex string\u0026gt;# Customized metrics label before parse to meter system.labels:[- \u0026lt;labels\u0026gt; ]# Metrics rule allow you to recompute queries.metrics:[- \u0026lt;metrics_rules\u0026gt; ] # Define the label name. The label value must query from `value` or `fromItem` attribute.name:\u0026lt;string\u0026gt;# Appoint value to label.[value:\u0026lt;string\u0026gt;]# Query label value from Zabbix Agent Item key.[fromItem:\u0026lt;string\u0026gt;]\u0026lt;metric_rules\u0026gt; # The name of rule, which combinates with a prefix \u0026#39;meter_\u0026#39; as the index/table name in storage.name:\u0026lt;string\u0026gt;# MAL expression.exp:\u0026lt;string\u0026gt;For more on MAL, please refer to mal.md.\n","excerpt":"Zabbix Receiver The Zabbix receiver accepts metrics of Zabbix Agent Active Checks protocol format …","ref":"/docs/main/next/en/setup/backend/backend-zabbix/","title":"Zabbix Receiver"},{"body":"Zabbix Receiver The Zabbix receiver accepts metrics of Zabbix Agent Active Checks protocol format into the Meter System. Zabbix Agent is based on GPL-2.0 License.\nModule definition receiver-zabbix:selector:${SW_RECEIVER_ZABBIX:default}default:# Export tcp port, Zabbix agent could connected and transport dataport:10051# Bind to hosthost:0.0.0.0# Enable config when receive agent requestactiveFiles:agentConfiguration file The Zabbix receiver is configured via a configuration file that defines everything related to receiving from agents, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP fails to start up. The files are located at $CLASSPATH/zabbix-rules.\nThe file is written in YAML format, defined by the scheme described below. Square brackets indicate that a parameter is optional.\nAn example for Zabbix agent configuration could be found here. You could find details on Zabbix agent items from Zabbix Agent documentation.\nConfiguration file # insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# Datasource from Zabbix Item keys.requiredZabbixItemKeys:- \u0026lt;zabbix item keys\u0026gt;# Support agent entities information.entities:# Allow hostname patterns to build metrics.hostPatterns:- \u0026lt;regex string\u0026gt;# Customized metrics label before parse to meter system.labels:[- \u0026lt;labels\u0026gt; ]# Metrics rule allow you to recompute queries.metrics:[- \u0026lt;metrics_rules\u0026gt; ] # Define the label name. The label value must query from `value` or `fromItem` attribute.name:\u0026lt;string\u0026gt;# Appoint value to label.[value:\u0026lt;string\u0026gt;]# Query label value from Zabbix Agent Item key.[fromItem:\u0026lt;string\u0026gt;]\u0026lt;metric_rules\u0026gt; # The name of rule, which combinates with a prefix \u0026#39;meter_\u0026#39; as the index/table name in storage.name:\u0026lt;string\u0026gt;# MAL expression.exp:\u0026lt;string\u0026gt;For more on MAL, please refer to mal.md.\n","excerpt":"Zabbix Receiver The Zabbix receiver accepts metrics of Zabbix Agent Active Checks protocol format …","ref":"/docs/main/v9.0.0/en/setup/backend/backend-zabbix/","title":"Zabbix Receiver"},{"body":"Zabbix Receiver The Zabbix receiver accepts metrics of Zabbix Agent Active Checks protocol format into the Meter System. Zabbix Agent is based on GPL-2.0 License.\nModule definition receiver-zabbix:selector:${SW_RECEIVER_ZABBIX:default}default:# Export tcp port, Zabbix agent could connected and transport dataport:10051# Bind to hosthost:0.0.0.0# Enable config when receive agent requestactiveFiles:agentConfiguration file The Zabbix receiver is configured via a configuration file that defines everything related to receiving from agents, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP fails to start up. The files are located at $CLASSPATH/zabbix-rules.\nThe file is written in YAML format, defined by the scheme described below. Square brackets indicate that a parameter is optional.\nAn example for Zabbix agent configuration could be found here. You can find details on Zabbix agent items from Zabbix Agent documentation.\nConfiguration file # insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# Datasource from Zabbix Item keys.requiredZabbixItemKeys:- \u0026lt;zabbix item keys\u0026gt;# Support agent entities information.entities:# Allow hostname patterns to build metrics.hostPatterns:- \u0026lt;regex string\u0026gt;# Customized metrics label before parse to meter system.labels:[- \u0026lt;labels\u0026gt; ]# Metrics rule allow you to recompute queries.metrics:[- \u0026lt;metrics_rules\u0026gt; ] # Define the label name. The label value must query from `value` or `fromItem` attribute.name:\u0026lt;string\u0026gt;# Appoint value to label.[value:\u0026lt;string\u0026gt;]# Query label value from Zabbix Agent Item key.[fromItem:\u0026lt;string\u0026gt;]\u0026lt;metric_rules\u0026gt; # The name of rule, which combinates with a prefix \u0026#39;meter_\u0026#39; as the index/table name in storage.name:\u0026lt;string\u0026gt;# MAL expression.exp:\u0026lt;string\u0026gt;For more on MAL, please refer to mal.md.\n","excerpt":"Zabbix Receiver The Zabbix receiver accepts metrics of Zabbix Agent Active Checks protocol format …","ref":"/docs/main/v9.1.0/en/setup/backend/backend-zabbix/","title":"Zabbix Receiver"},{"body":"Zabbix Receiver The Zabbix receiver accepts metrics of Zabbix Agent Active Checks protocol format into the Meter System. Zabbix Agent is based on GPL-2.0 License.\nModule definition receiver-zabbix:selector:${SW_RECEIVER_ZABBIX:default}default:# Export tcp port, Zabbix agent could connected and transport dataport:10051# Bind to hosthost:0.0.0.0# Enable config when receive agent requestactiveFiles:agentConfiguration file The Zabbix receiver is configured via a configuration file that defines everything related to receiving from agents, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP fails to start up. The files are located at $CLASSPATH/zabbix-rules.\nThe file is written in YAML format, defined by the scheme described below. Square brackets indicate that a parameter is optional.\nAn example for Zabbix agent configuration could be found here. You can find details on Zabbix agent items from Zabbix Agent documentation.\nConfiguration file # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# Datasource from Zabbix Item keys.requiredZabbixItemKeys:- \u0026lt;zabbix item keys\u0026gt;# Support agent entities information.entities:# Allow hostname patterns to build metrics.hostPatterns:- \u0026lt;regex string\u0026gt;# Customized metrics label before parse to meter system.labels:[- \u0026lt;labels\u0026gt; ]# Metrics rule allow you to recompute queries.metrics:[- \u0026lt;metrics_rules\u0026gt; ] # Define the label name. The label value must query from `value` or `fromItem` attribute.name:\u0026lt;string\u0026gt;# Appoint value to label.[value:\u0026lt;string\u0026gt;]# Query label value from Zabbix Agent Item key.[fromItem:\u0026lt;string\u0026gt;]\u0026lt;metric_rules\u0026gt; # The name of rule, which combinates with a prefix \u0026#39;meter_\u0026#39; as the index/table name in storage.name:\u0026lt;string\u0026gt;# MAL expression.exp:\u0026lt;string\u0026gt;For more on MAL, please refer to mal.md.\n","excerpt":"Zabbix Receiver The Zabbix receiver accepts metrics of Zabbix Agent Active Checks protocol format …","ref":"/docs/main/v9.2.0/en/setup/backend/backend-zabbix/","title":"Zabbix Receiver"},{"body":"Zabbix Receiver The Zabbix receiver accepts metrics of Zabbix Agent Active Checks protocol format into the Meter System. Zabbix Agent is based on GPL-2.0 License.\nModule definition receiver-zabbix:selector:${SW_RECEIVER_ZABBIX:default}default:# Export tcp port, Zabbix agent could connected and transport dataport:10051# Bind to hosthost:0.0.0.0# Enable config when receive agent requestactiveFiles:agentConfiguration file The Zabbix receiver is configured via a configuration file that defines everything related to receiving from agents, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP fails to start up. The files are located at $CLASSPATH/zabbix-rules.\nThe file is written in YAML format, defined by the scheme described below. Square brackets indicate that a parameter is optional.\nAn example for Zabbix agent configuration could be found here. You can find details on Zabbix agent items from Zabbix Agent documentation.\nConfiguration file # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# Datasource from Zabbix Item keys.requiredZabbixItemKeys:- \u0026lt;zabbix item keys\u0026gt;# Support agent entities information.entities:# Allow hostname patterns to build metrics.hostPatterns:- \u0026lt;regex string\u0026gt;# Customized metrics label before parse to meter system.labels:[- \u0026lt;labels\u0026gt; ]# Metrics rule allow you to recompute queries.metrics:[- \u0026lt;metrics_rules\u0026gt; ] # Define the label name. The label value must query from `value` or `fromItem` attribute.name:\u0026lt;string\u0026gt;# Appoint value to label.[value:\u0026lt;string\u0026gt;]# Query label value from Zabbix Agent Item key.[fromItem:\u0026lt;string\u0026gt;]\u0026lt;metric_rules\u0026gt; # The name of rule, which combinates with a prefix \u0026#39;meter_\u0026#39; as the index/table name in storage.name:\u0026lt;string\u0026gt;# MAL expression.exp:\u0026lt;string\u0026gt;For more on MAL, please refer to mal.md.\n","excerpt":"Zabbix Receiver The Zabbix receiver accepts metrics of Zabbix Agent Active Checks protocol format …","ref":"/docs/main/v9.3.0/en/setup/backend/backend-zabbix/","title":"Zabbix Receiver"},{"body":"Zabbix Receiver The Zabbix receiver accepts metrics of Zabbix Agent Active Checks protocol format into the Meter System. Zabbix Agent is based on GPL-2.0 License.\nModule definition receiver-zabbix:selector:${SW_RECEIVER_ZABBIX:default}default:# Export tcp port, Zabbix agent could connected and transport dataport:10051# Bind to hosthost:0.0.0.0# Enable config when receive agent requestactiveFiles:agentConfiguration file The Zabbix receiver is configured via a configuration file that defines everything related to receiving from agents, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP fails to start up. The files are located at $CLASSPATH/zabbix-rules.\nThe file is written in YAML format, defined by the scheme described below. Square brackets indicate that a parameter is optional.\nAn example for Zabbix agent configuration could be found here. You can find details on Zabbix agent items from Zabbix Agent documentation.\nConfiguration file # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# Datasource from Zabbix Item keys.requiredZabbixItemKeys:- \u0026lt;zabbix item keys\u0026gt;# Support agent entities information.entities:# Allow hostname patterns to build metrics.hostPatterns:- \u0026lt;regex string\u0026gt;# Customized metrics label before parse to meter system.labels:[- \u0026lt;labels\u0026gt; ]# Metrics rule allow you to recompute queries.metrics:[- \u0026lt;metrics_rules\u0026gt; ] # Define the label name. The label value must query from `value` or `fromItem` attribute.name:\u0026lt;string\u0026gt;# Appoint value to label.[value:\u0026lt;string\u0026gt;]# Query label value from Zabbix Agent Item key.[fromItem:\u0026lt;string\u0026gt;]\u0026lt;metric_rules\u0026gt; # The name of rule, which combinates with a prefix \u0026#39;meter_\u0026#39; as the index/table name in storage.name:\u0026lt;string\u0026gt;# MAL expression.exp:\u0026lt;string\u0026gt;For more on MAL, please refer to mal.md.\n","excerpt":"Zabbix Receiver The Zabbix receiver accepts metrics of Zabbix Agent Active Checks protocol format …","ref":"/docs/main/v9.4.0/en/setup/backend/backend-zabbix/","title":"Zabbix Receiver"},{"body":"Zabbix Receiver The Zabbix receiver accepts metrics of Zabbix Agent Active Checks protocol format into the Meter System. Zabbix Agent is based on GPL-2.0 License.\nModule definition receiver-zabbix:selector:${SW_RECEIVER_ZABBIX:default}default:# Export tcp port, Zabbix agent could connected and transport dataport:10051# Bind to hosthost:0.0.0.0# Enable config when receive agent requestactiveFiles:agentConfiguration file The Zabbix receiver is configured via a configuration file that defines everything related to receiving from agents, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP fails to start up. The files are located at $CLASSPATH/zabbix-rules.\nThe file is written in YAML format, defined by the scheme described below. Square brackets indicate that a parameter is optional.\nAn example for Zabbix agent configuration could be found here. You can find details on Zabbix agent items from Zabbix Agent documentation.\nConfiguration file # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# Datasource from Zabbix Item keys.requiredZabbixItemKeys:- \u0026lt;zabbix item keys\u0026gt;# Support agent entities information.entities:# Allow hostname patterns to build metrics.hostPatterns:- \u0026lt;regex string\u0026gt;# Customized metrics label before parse to meter system.labels:[- \u0026lt;labels\u0026gt; ]# Metrics rule allow you to recompute queries.metrics:[- \u0026lt;metrics_rules\u0026gt; ] # Define the label name. The label value must query from `value` or `fromItem` attribute.name:\u0026lt;string\u0026gt;# Appoint value to label.[value:\u0026lt;string\u0026gt;]# Query label value from Zabbix Agent Item key.[fromItem:\u0026lt;string\u0026gt;]\u0026lt;metric_rules\u0026gt; # The name of rule, which combinates with a prefix \u0026#39;meter_\u0026#39; as the index/table name in storage.name:\u0026lt;string\u0026gt;# MAL expression.exp:\u0026lt;string\u0026gt;For more on MAL, please refer to mal.md.\n","excerpt":"Zabbix Receiver The Zabbix receiver accepts metrics of Zabbix Agent Active Checks protocol format …","ref":"/docs/main/v9.5.0/en/setup/backend/backend-zabbix/","title":"Zabbix Receiver"},{"body":"Zabbix Receiver The Zabbix receiver accepts metrics of Zabbix Agent Active Checks protocol format into the Meter System. Zabbix Agent is based on GPL-2.0 License.\nModule definition receiver-zabbix:selector:${SW_RECEIVER_ZABBIX:default}default:# Export tcp port, Zabbix agent could connected and transport dataport:10051# Bind to hosthost:0.0.0.0# Enable config when receive agent requestactiveFiles:agentConfiguration file The Zabbix receiver is configured via a configuration file that defines everything related to receiving from agents, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP fails to start up. The files are located at $CLASSPATH/zabbix-rules.\nThe file is written in YAML format, defined by the scheme described below. Square brackets indicate that a parameter is optional.\nAn example for Zabbix agent configuration could be found here. You can find details on Zabbix agent items from Zabbix Agent documentation.\nConfiguration file # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# Datasource from Zabbix Item keys.requiredZabbixItemKeys:- \u0026lt;zabbix item keys\u0026gt;# Support agent entities information.entities:# Allow hostname patterns to build metrics.hostPatterns:- \u0026lt;regex string\u0026gt;# Customized metrics label before parse to meter system.labels:[- \u0026lt;labels\u0026gt; ]# Metrics rule allow you to recompute queries.metrics:[- \u0026lt;metrics_rules\u0026gt; ] # Define the label name. The label value must query from `value` or `fromItem` attribute.name:\u0026lt;string\u0026gt;# Appoint value to label.[value:\u0026lt;string\u0026gt;]# Query label value from Zabbix Agent Item key.[fromItem:\u0026lt;string\u0026gt;]\u0026lt;metric_rules\u0026gt; # The name of rule, which combinates with a prefix \u0026#39;meter_\u0026#39; as the index/table name in storage.name:\u0026lt;string\u0026gt;# MAL expression.exp:\u0026lt;string\u0026gt;For more on MAL, please refer to mal.md.\n","excerpt":"Zabbix Receiver The Zabbix receiver accepts metrics of Zabbix Agent Active Checks protocol format …","ref":"/docs/main/v9.6.0/en/setup/backend/backend-zabbix/","title":"Zabbix Receiver"},{"body":"Zabbix Receiver The Zabbix receiver accepts metrics of Zabbix Agent Active Checks protocol format into the Meter System. Zabbix Agent is based on GPL-2.0 License.\nModule definition receiver-zabbix:selector:${SW_RECEIVER_ZABBIX:default}default:# Export tcp port, Zabbix agent could connected and transport dataport:10051# Bind to hosthost:0.0.0.0# Enable config when receive agent requestactiveFiles:agentConfiguration file The Zabbix receiver is configured via a configuration file that defines everything related to receiving from agents, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP fails to start up. The files are located at $CLASSPATH/zabbix-rules.\nThe file is written in YAML format, defined by the scheme described below. Square brackets indicate that a parameter is optional.\nAn example for Zabbix agent configuration could be found here. You can find details on Zabbix agent items from Zabbix Agent documentation.\nConfiguration file # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# Datasource from Zabbix Item keys.requiredZabbixItemKeys:- \u0026lt;zabbix item keys\u0026gt;# Support agent entities information.entities:# Allow hostname patterns to build metrics.hostPatterns:- \u0026lt;regex string\u0026gt;# Customized metrics label before parse to meter system.labels:[- \u0026lt;labels\u0026gt; ]# Metrics rule allow you to recompute queries.metrics:[- \u0026lt;metrics_rules\u0026gt; ] # Define the label name. The label value must query from `value` or `fromItem` attribute.name:\u0026lt;string\u0026gt;# Appoint value to label.[value:\u0026lt;string\u0026gt;]# Query label value from Zabbix Agent Item key.[fromItem:\u0026lt;string\u0026gt;]\u0026lt;metric_rules\u0026gt; # The name of rule, which combinates with a prefix \u0026#39;meter_\u0026#39; as the index/table name in storage.name:\u0026lt;string\u0026gt;# MAL expression.exp:\u0026lt;string\u0026gt;For more on MAL, please refer to mal.md.\n","excerpt":"Zabbix Receiver The Zabbix receiver accepts metrics of Zabbix Agent Active Checks protocol format …","ref":"/docs/main/v9.7.0/en/setup/backend/backend-zabbix/","title":"Zabbix Receiver"},{"body":"Zend observer  Refer to: https://www.datadoghq.com/blog/engineering/php-8-observability-baked-right-in/#the-observability-landscape-before-php-8\n By default, skywalking-php hooks the zend_execute_internal and zend_execute_ex functions to implement auto instrumentation.\nBut there are some drawbacks:\n All PHP function calls are placed on the native C stack, which is limited by the value set in ulimit -s. Not compatible with the new JIT added in PHP 8.  The observer API in PHP 8+ Now, zend observer api is a new generation method, and it is also a method currently recommended by PHP8.\nThis method has no stack problem and will not affect JIT.\nConfiguration The following configuration example enables JIT in PHP8 and zend observer support in skywalking-php at the same time.\n[opcache] zend_extension = opcache ; Enable JIT opcache.jit = tracing [skywalking_agent] extension = skywalking_agent.so ; Switch to use zend observer api to implement auto instrumentation. skywalking_agent.enable_zend_observer = On ","excerpt":"Zend observer  Refer to: …","ref":"/docs/skywalking-php/latest/en/configuration/zend-observer/","title":"Zend observer"},{"body":"Zend observer  Refer to: https://www.datadoghq.com/blog/engineering/php-8-observability-baked-right-in/#the-observability-landscape-before-php-8\n By default, skywalking-php hooks the zend_execute_internal and zend_execute_ex functions to implement auto instrumentation.\nBut there are some drawbacks:\n All PHP function calls are placed on the native C stack, which is limited by the value set in ulimit -s. Not compatible with the new JIT added in PHP 8.  The observer API in PHP 8+ Now, zend observer api is a new generation method, and it is also a method currently recommended by PHP8.\nThis method has no stack problem and will not affect JIT.\nConfiguration The following configuration example enables JIT in PHP8 and zend observer support in skywalking-php at the same time.\n[opcache] zend_extension = opcache ; Enable JIT opcache.jit = tracing [skywalking_agent] extension = skywalking_agent.so ; Switch to use zend observer api to implement auto instrumentation. skywalking_agent.enable_zend_observer = On ","excerpt":"Zend observer  Refer to: …","ref":"/docs/skywalking-php/next/en/configuration/zend-observer/","title":"Zend observer"},{"body":"Zend observer  Refer to: https://www.datadoghq.com/blog/engineering/php-8-observability-baked-right-in/#the-observability-landscape-before-php-8\n By default, skywalking-php hooks the zend_execute_internal and zend_execute_ex functions to implement auto instrumentation.\nBut there are some drawbacks:\n All PHP function calls are placed on the native C stack, which is limited by the value set in ulimit -s. Not compatible with the new JIT added in PHP 8.  The observer API in PHP 8+ Now, zend observer api is a new generation method, and it is also a method currently recommended by PHP8.\nThis method has no stack problem and will not affect JIT.\nConfiguration The following configuration example enables JIT in PHP8 and zend observer support in skywalking-php at the same time.\n[opcache] zend_extension = opcache ; Enable JIT opcache.jit = tracing [skywalking_agent] extension = skywalking_agent.so ; Switch to use zend observer api to implement auto instrumentation. skywalking_agent.enable_zend_observer = On ","excerpt":"Zend observer  Refer to: …","ref":"/docs/skywalking-php/v0.7.0/en/configuration/zend-observer/","title":"Zend observer"},{"body":"Zipkin receiver The Zipkin receiver makes the OAP server work as an alternative Zipkin server implementation for collecting traces. It supports Zipkin v1/v2 formats through the HTTP collector and Kafka collector.\nNOTICE, Zipkin trace would not be analyzed like SkyWalking native trace format.\nUse the following config to activate it. Set enableHttpCollector to enable HTTP collector and enableKafkaCollector to enable Kafka collector.\nreceiver-zipkin:selector:${SW_RECEIVER_ZIPKIN:default}default:# Defines a set of span tag keys which are searchable.# The max length of key=value should be less than 256 or will be dropped.searchableTracesTags:${SW_ZIPKIN_SEARCHABLE_TAG_KEYS:http.method}# The sample rate precision is 1/10000, should be between 0 and 10000sampleRate:${SW_ZIPKIN_SAMPLE_RATE:10000}## The below configs are for OAP collect zipkin trace from HTTPenableHttpCollector:${SW_ZIPKIN_HTTP_COLLECTOR_ENABLED:true}restHost:${SW_RECEIVER_ZIPKIN_REST_HOST:0.0.0.0}restPort:${SW_RECEIVER_ZIPKIN_REST_PORT:9411}restContextPath:${SW_RECEIVER_ZIPKIN_REST_CONTEXT_PATH:/}restMaxThreads:${SW_RECEIVER_ZIPKIN_REST_MAX_THREADS:200}restIdleTimeOut:${SW_RECEIVER_ZIPKIN_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_RECEIVER_ZIPKIN_REST_QUEUE_SIZE:0}## The below configs are for OAP collect zipkin trace from kafkaenableKafkaCollector:${SW_ZIPKIN_KAFKA_COLLECTOR_ENABLED:true}kafkaBootstrapServers:${SW_ZIPKIN_KAFKA_SERVERS:localhost:9092}kafkaGroupId:${SW_ZIPKIN_KAFKA_Group_Id:zipkin}kafkaTopic:${SW_ZIPKIN_KAFKA_TOPIC:zipkin}# Kafka consumer config, JSON format as Properties. If it contains the same key with above, would override.kafkaConsumerConfig:${SW_ZIPKIN_KAFKA_CONSUMER_CONFIG:\u0026#34;{\\\u0026#34;auto.offset.reset\\\u0026#34;:\\\u0026#34;earliest\\\u0026#34;,\\\u0026#34;enable.auto.commit\\\u0026#34;:true}\u0026#34;}# The Count of the topic consumerskafkaConsumers:${SW_ZIPKIN_KAFKA_CONSUMERS:1}kafkaHandlerThreadPoolSize:${SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_SIZE:-1}kafkaHandlerThreadPoolQueueSize:${SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE:-1}Zipkin query The Zipkin receiver makes the OAP server work as an alternative Zipkin server implementation for query traces. It implemented ZipkinQueryApiV2 through the HTTP service, supporting Zipkin-lens UI.\nUse the following config to activate it.\nquery-zipkin:selector:${SW_QUERY_ZIPKIN:default}default:# For HTTP serverrestHost:${SW_QUERY_ZIPKIN_REST_HOST:0.0.0.0}restPort:${SW_QUERY_ZIPKIN_REST_PORT:9412}restContextPath:${SW_QUERY_ZIPKIN_REST_CONTEXT_PATH:/zipkin}restMaxThreads:${SW_QUERY_ZIPKIN_REST_MAX_THREADS:200}restIdleTimeOut:${SW_QUERY_ZIPKIN_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_QUERY_ZIPKIN_REST_QUEUE_SIZE:0}# Default look back for serviceNames, remoteServiceNames and spanNames, 1 day in millislookback:${SW_QUERY_ZIPKIN_LOOKBACK:86400000}# The Cache-Control max-age (seconds) for serviceNames, remoteServiceNames and spanNamesnamesMaxAge:${SW_QUERY_ZIPKIN_NAMES_MAX_AGE:300}## The below config are OAP support for zipkin-lens UI# Default traces query max sizeuiQueryLimit:${SW_QUERY_ZIPKIN_UI_QUERY_LIMIT:10}# Default look back for search traces, 15 minutes in millisuiDefaultLookback:${SW_QUERY_ZIPKIN_UI_DEFAULT_LOOKBACK:900000}Lens UI Lens UI is Zipkin native UI. SkyWalking webapp has bundled it in the binary distribution. {webapp IP}:{webapp port}/zipkin is exposed and accessible for the browser. Meanwhile, Iframe UI component could be used to host Zipkin Lens UI on the SkyWalking booster UI dashboard.(link=/zipkin)\nZipkin Lens UI source codes could be found here.\n","excerpt":"Zipkin receiver The Zipkin receiver makes the OAP server work as an alternative Zipkin server …","ref":"/docs/main/latest/en/setup/backend/zipkin-trace/","title":"Zipkin receiver"},{"body":"Zipkin receiver The Zipkin receiver makes the OAP server work as an alternative Zipkin server implementation for collecting traces. It supports Zipkin v1/v2 formats through the HTTP collector and Kafka collector.\nNOTICE, Zipkin trace would not be analyzed like SkyWalking native trace format.\nUse the following config to activate it. Set enableHttpCollector to enable HTTP collector and enableKafkaCollector to enable Kafka collector.\nreceiver-zipkin:selector:${SW_RECEIVER_ZIPKIN:default}default:# Defines a set of span tag keys which are searchable.# The max length of key=value should be less than 256 or will be dropped.searchableTracesTags:${SW_ZIPKIN_SEARCHABLE_TAG_KEYS:http.method}# The sample rate precision is 1/10000, should be between 0 and 10000sampleRate:${SW_ZIPKIN_SAMPLE_RATE:10000}## The below configs are for OAP collect zipkin trace from HTTPenableHttpCollector:${SW_ZIPKIN_HTTP_COLLECTOR_ENABLED:true}restHost:${SW_RECEIVER_ZIPKIN_REST_HOST:0.0.0.0}restPort:${SW_RECEIVER_ZIPKIN_REST_PORT:9411}restContextPath:${SW_RECEIVER_ZIPKIN_REST_CONTEXT_PATH:/}restMaxThreads:${SW_RECEIVER_ZIPKIN_REST_MAX_THREADS:200}restIdleTimeOut:${SW_RECEIVER_ZIPKIN_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_RECEIVER_ZIPKIN_REST_QUEUE_SIZE:0}## The below configs are for OAP collect zipkin trace from kafkaenableKafkaCollector:${SW_ZIPKIN_KAFKA_COLLECTOR_ENABLED:true}kafkaBootstrapServers:${SW_ZIPKIN_KAFKA_SERVERS:localhost:9092}kafkaGroupId:${SW_ZIPKIN_KAFKA_Group_Id:zipkin}kafkaTopic:${SW_ZIPKIN_KAFKA_TOPIC:zipkin}# Kafka consumer config, JSON format as Properties. If it contains the same key with above, would override.kafkaConsumerConfig:${SW_ZIPKIN_KAFKA_CONSUMER_CONFIG:\u0026#34;{\\\u0026#34;auto.offset.reset\\\u0026#34;:\\\u0026#34;earliest\\\u0026#34;,\\\u0026#34;enable.auto.commit\\\u0026#34;:true}\u0026#34;}# The Count of the topic consumerskafkaConsumers:${SW_ZIPKIN_KAFKA_CONSUMERS:1}kafkaHandlerThreadPoolSize:${SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_SIZE:-1}kafkaHandlerThreadPoolQueueSize:${SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE:-1}Zipkin query The Zipkin receiver makes the OAP server work as an alternative Zipkin server implementation for query traces. It implemented ZipkinQueryApiV2 through the HTTP service, supporting Zipkin-lens UI.\nUse the following config to activate it.\nquery-zipkin:selector:${SW_QUERY_ZIPKIN:default}default:# For HTTP serverrestHost:${SW_QUERY_ZIPKIN_REST_HOST:0.0.0.0}restPort:${SW_QUERY_ZIPKIN_REST_PORT:9412}restContextPath:${SW_QUERY_ZIPKIN_REST_CONTEXT_PATH:/zipkin}restMaxThreads:${SW_QUERY_ZIPKIN_REST_MAX_THREADS:200}restIdleTimeOut:${SW_QUERY_ZIPKIN_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_QUERY_ZIPKIN_REST_QUEUE_SIZE:0}# Default look back for serviceNames, remoteServiceNames and spanNames, 1 day in millislookback:${SW_QUERY_ZIPKIN_LOOKBACK:86400000}# The Cache-Control max-age (seconds) for serviceNames, remoteServiceNames and spanNamesnamesMaxAge:${SW_QUERY_ZIPKIN_NAMES_MAX_AGE:300}## The below config are OAP support for zipkin-lens UI# Default traces query max sizeuiQueryLimit:${SW_QUERY_ZIPKIN_UI_QUERY_LIMIT:10}# Default look back for search traces, 15 minutes in millisuiDefaultLookback:${SW_QUERY_ZIPKIN_UI_DEFAULT_LOOKBACK:900000}Lens UI Lens UI is Zipkin native UI. SkyWalking webapp has bundled it in the binary distribution. {webapp IP}:{webapp port}/zipkin is exposed and accessible for the browser. Meanwhile, Iframe UI component could be used to host Zipkin Lens UI on the SkyWalking booster UI dashboard.(link=/zipkin)\nZipkin Lens UI source codes could be found here.\n","excerpt":"Zipkin receiver The Zipkin receiver makes the OAP server work as an alternative Zipkin server …","ref":"/docs/main/next/en/setup/backend/zipkin-trace/","title":"Zipkin receiver"},{"body":"Zipkin receiver The Zipkin receiver makes the OAP server work as an alternative Zipkin server implementation. It supports Zipkin v1/v2 formats through HTTP service. Make sure you use this with SW_STORAGE=zipkin-elasticsearch option to activate Zipkin storage implementation. Once this receiver and storage are activated, SkyWalking\u0026rsquo;s native traces would be ignored, and SkyWalking wouldn\u0026rsquo;t analyze topology, metrics, and endpoint dependency from Zipkin\u0026rsquo;s trace.\nUse the following config to activate it.\nreceiver-zipkin:selector:${SW_RECEIVER_ZIPKIN:-}default:host:${SW_RECEIVER_ZIPKIN_HOST:0.0.0.0}port:${SW_RECEIVER_ZIPKIN_PORT:9411}contextPath:${SW_RECEIVER_ZIPKIN_CONTEXT_PATH:/}jettyMinThreads:${SW_RECEIVER_ZIPKIN_JETTY_MIN_THREADS:1}jettyMaxThreads:${SW_RECEIVER_ZIPKIN_JETTY_MAX_THREADS:200}jettyIdleTimeOut:${SW_RECEIVER_ZIPKIN_JETTY_IDLE_TIMEOUT:30000}jettyAcceptorPriorityDelta:${SW_RECEIVER_ZIPKIN_JETTY_DELTA:0}jettyAcceptQueueSize:${SW_RECEIVER_ZIPKIN_QUEUE_SIZE:0}NOTE: Zipkin receiver requires zipkin-elasticsearch storage implementation to be activated. Read this doc to learn about Zipkin as a storage option.\n","excerpt":"Zipkin receiver The Zipkin receiver makes the OAP server work as an alternative Zipkin server …","ref":"/docs/main/v9.0.0/en/setup/backend/zipkin-trace/","title":"Zipkin receiver"},{"body":"Zipkin receiver The Zipkin receiver makes the OAP server work as an alternative Zipkin server implementation for collecting traces. It supports Zipkin v1/v2 formats through the HTTP service.\nUse the following config to activate it.\nreceiver-zipkin:selector:${SW_RECEIVER_ZIPKIN:default}default:# For HTTP serverrestHost:${SW_RECEIVER_ZIPKIN_REST_HOST:0.0.0.0}restPort:${SW_RECEIVER_ZIPKIN_REST_PORT:9411}restContextPath:${SW_RECEIVER_ZIPKIN_REST_CONTEXT_PATH:/}restMaxThreads:${SW_RECEIVER_ZIPKIN_REST_MAX_THREADS:200}restIdleTimeOut:${SW_RECEIVER_ZIPKIN_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_RECEIVER_ZIPKIN_REST_QUEUE_SIZE:0}searchableTracesTags:${SW_ZIPKIN_SEARCHABLE_TAG_KEYS:http.method}# The sample rate precision is 1/10000, should be between 0 and 10000sampleRate:${SW_ZIPKIN_SAMPLE_RATE:10000}Zipkin query The Zipkin receiver makes the OAP server work as an alternative Zipkin server implementation for query traces. It implemented ZipkinQueryApiV2 through the HTTP service, supporting Zipkin-lens UI. Notice: Zipkin query API implementation does not support BanyanDB yet.\nUse the following config to activate it.\nquery-zipkin:selector:${SW_QUERY_ZIPKIN:default}default:# For HTTP serverrestHost:${SW_QUERY_ZIPKIN_REST_HOST:0.0.0.0}restPort:${SW_QUERY_ZIPKIN_REST_PORT:9412}restContextPath:${SW_QUERY_ZIPKIN_REST_CONTEXT_PATH:/zipkin}restMaxThreads:${SW_QUERY_ZIPKIN_REST_MAX_THREADS:200}restIdleTimeOut:${SW_QUERY_ZIPKIN_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_QUERY_ZIPKIN_REST_QUEUE_SIZE:0}# Default look back for serviceNames, remoteServiceNames and spanNames, 1 day in millislookback:${SW_QUERY_ZIPKIN_LOOKBACK:86400000}# The Cache-Control max-age (seconds) for serviceNames, remoteServiceNames and spanNamesnamesMaxAge:${SW_QUERY_ZIPKIN_NAMES_MAX_AGE:300}## The below config are OAP support for zipkin-lens UI# Default traces query max sizeuiQueryLimit:${SW_QUERY_ZIPKIN_UI_QUERY_LIMIT:10}# Default look back for search traces, 15 minutes in millisuiDefaultLookback:${SW_QUERY_ZIPKIN_UI_DEFAULT_LOOKBACK:900000}","excerpt":"Zipkin receiver The Zipkin receiver makes the OAP server work as an alternative Zipkin server …","ref":"/docs/main/v9.1.0/en/setup/backend/zipkin-trace/","title":"Zipkin receiver"},{"body":"Zipkin receiver The Zipkin receiver makes the OAP server work as an alternative Zipkin server implementation for collecting traces. It supports Zipkin v1/v2 formats through the HTTP collector and Kafka collector.\nUse the following config to activate it. Set enableHttpCollector to enable HTTP collector and enableKafkaCollector to enable Kafka collector.\nreceiver-zipkin:selector:${SW_RECEIVER_ZIPKIN:default}default:searchableTracesTags:${SW_ZIPKIN_SEARCHABLE_TAG_KEYS:http.method}# The sample rate precision is 1/10000, should be between 0 and 10000sampleRate:${SW_ZIPKIN_SAMPLE_RATE:10000}## The below configs are for OAP collect zipkin trace from HTTPenableHttpCollector:${SW_ZIPKIN_HTTP_COLLECTOR_ENABLED:true}restHost:${SW_RECEIVER_ZIPKIN_REST_HOST:0.0.0.0}restPort:${SW_RECEIVER_ZIPKIN_REST_PORT:9411}restContextPath:${SW_RECEIVER_ZIPKIN_REST_CONTEXT_PATH:/}restMaxThreads:${SW_RECEIVER_ZIPKIN_REST_MAX_THREADS:200}restIdleTimeOut:${SW_RECEIVER_ZIPKIN_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_RECEIVER_ZIPKIN_REST_QUEUE_SIZE:0}## The below configs are for OAP collect zipkin trace from kafkaenableKafkaCollector:${SW_ZIPKIN_KAFKA_COLLECTOR_ENABLED:true}kafkaBootstrapServers:${SW_ZIPKIN_KAFKA_SERVERS:localhost:9092}kafkaGroupId:${SW_ZIPKIN_KAFKA_Group_Id:zipkin}kafkaTopic:${SW_ZIPKIN_KAFKA_TOPIC:zipkin}# Kafka consumer config, JSON format as Properties. If it contains the same key with above, would override.kafkaConsumerConfig:${SW_ZIPKIN_KAFKA_CONSUMER_CONFIG:\u0026#34;{\\\u0026#34;auto.offset.reset\\\u0026#34;:\\\u0026#34;earliest\\\u0026#34;,\\\u0026#34;enable.auto.commit\\\u0026#34;:true}\u0026#34;}# The Count of the topic consumerskafkaConsumers:${SW_ZIPKIN_KAFKA_CONSUMERS:1}kafkaHandlerThreadPoolSize:${SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_SIZE:-1}kafkaHandlerThreadPoolQueueSize:${SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE:-1}Zipkin query The Zipkin receiver makes the OAP server work as an alternative Zipkin server implementation for query traces. It implemented ZipkinQueryApiV2 through the HTTP service, supporting Zipkin-lens UI. Notice: Zipkin query API implementation does not support BanyanDB yet.\nUse the following config to activate it.\nquery-zipkin:selector:${SW_QUERY_ZIPKIN:default}default:# For HTTP serverrestHost:${SW_QUERY_ZIPKIN_REST_HOST:0.0.0.0}restPort:${SW_QUERY_ZIPKIN_REST_PORT:9412}restContextPath:${SW_QUERY_ZIPKIN_REST_CONTEXT_PATH:/zipkin}restMaxThreads:${SW_QUERY_ZIPKIN_REST_MAX_THREADS:200}restIdleTimeOut:${SW_QUERY_ZIPKIN_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_QUERY_ZIPKIN_REST_QUEUE_SIZE:0}# Default look back for serviceNames, remoteServiceNames and spanNames, 1 day in millislookback:${SW_QUERY_ZIPKIN_LOOKBACK:86400000}# The Cache-Control max-age (seconds) for serviceNames, remoteServiceNames and spanNamesnamesMaxAge:${SW_QUERY_ZIPKIN_NAMES_MAX_AGE:300}## The below config are OAP support for zipkin-lens UI# Default traces query max sizeuiQueryLimit:${SW_QUERY_ZIPKIN_UI_QUERY_LIMIT:10}# Default look back for search traces, 15 minutes in millisuiDefaultLookback:${SW_QUERY_ZIPKIN_UI_DEFAULT_LOOKBACK:900000}","excerpt":"Zipkin receiver The Zipkin receiver makes the OAP server work as an alternative Zipkin server …","ref":"/docs/main/v9.2.0/en/setup/backend/zipkin-trace/","title":"Zipkin receiver"},{"body":"Zipkin receiver The Zipkin receiver makes the OAP server work as an alternative Zipkin server implementation for collecting traces. It supports Zipkin v1/v2 formats through the HTTP collector and Kafka collector.\nUse the following config to activate it. Set enableHttpCollector to enable HTTP collector and enableKafkaCollector to enable Kafka collector.\nreceiver-zipkin:selector:${SW_RECEIVER_ZIPKIN:default}default:# Defines a set of span tag keys which are searchable.# The max length of key=value should be less than 256 or will be dropped.searchableTracesTags:${SW_ZIPKIN_SEARCHABLE_TAG_KEYS:http.method}# The sample rate precision is 1/10000, should be between 0 and 10000sampleRate:${SW_ZIPKIN_SAMPLE_RATE:10000}## The below configs are for OAP collect zipkin trace from HTTPenableHttpCollector:${SW_ZIPKIN_HTTP_COLLECTOR_ENABLED:true}restHost:${SW_RECEIVER_ZIPKIN_REST_HOST:0.0.0.0}restPort:${SW_RECEIVER_ZIPKIN_REST_PORT:9411}restContextPath:${SW_RECEIVER_ZIPKIN_REST_CONTEXT_PATH:/}restMaxThreads:${SW_RECEIVER_ZIPKIN_REST_MAX_THREADS:200}restIdleTimeOut:${SW_RECEIVER_ZIPKIN_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_RECEIVER_ZIPKIN_REST_QUEUE_SIZE:0}## The below configs are for OAP collect zipkin trace from kafkaenableKafkaCollector:${SW_ZIPKIN_KAFKA_COLLECTOR_ENABLED:true}kafkaBootstrapServers:${SW_ZIPKIN_KAFKA_SERVERS:localhost:9092}kafkaGroupId:${SW_ZIPKIN_KAFKA_Group_Id:zipkin}kafkaTopic:${SW_ZIPKIN_KAFKA_TOPIC:zipkin}# Kafka consumer config, JSON format as Properties. If it contains the same key with above, would override.kafkaConsumerConfig:${SW_ZIPKIN_KAFKA_CONSUMER_CONFIG:\u0026#34;{\\\u0026#34;auto.offset.reset\\\u0026#34;:\\\u0026#34;earliest\\\u0026#34;,\\\u0026#34;enable.auto.commit\\\u0026#34;:true}\u0026#34;}# The Count of the topic consumerskafkaConsumers:${SW_ZIPKIN_KAFKA_CONSUMERS:1}kafkaHandlerThreadPoolSize:${SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_SIZE:-1}kafkaHandlerThreadPoolQueueSize:${SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE:-1}Zipkin query The Zipkin receiver makes the OAP server work as an alternative Zipkin server implementation for query traces. It implemented ZipkinQueryApiV2 through the HTTP service, supporting Zipkin-lens UI.\nUse the following config to activate it.\nquery-zipkin:selector:${SW_QUERY_ZIPKIN:default}default:# For HTTP serverrestHost:${SW_QUERY_ZIPKIN_REST_HOST:0.0.0.0}restPort:${SW_QUERY_ZIPKIN_REST_PORT:9412}restContextPath:${SW_QUERY_ZIPKIN_REST_CONTEXT_PATH:/zipkin}restMaxThreads:${SW_QUERY_ZIPKIN_REST_MAX_THREADS:200}restIdleTimeOut:${SW_QUERY_ZIPKIN_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_QUERY_ZIPKIN_REST_QUEUE_SIZE:0}# Default look back for serviceNames, remoteServiceNames and spanNames, 1 day in millislookback:${SW_QUERY_ZIPKIN_LOOKBACK:86400000}# The Cache-Control max-age (seconds) for serviceNames, remoteServiceNames and spanNamesnamesMaxAge:${SW_QUERY_ZIPKIN_NAMES_MAX_AGE:300}## The below config are OAP support for zipkin-lens UI# Default traces query max sizeuiQueryLimit:${SW_QUERY_ZIPKIN_UI_QUERY_LIMIT:10}# Default look back for search traces, 15 minutes in millisuiDefaultLookback:${SW_QUERY_ZIPKIN_UI_DEFAULT_LOOKBACK:900000}","excerpt":"Zipkin receiver The Zipkin receiver makes the OAP server work as an alternative Zipkin server …","ref":"/docs/main/v9.3.0/en/setup/backend/zipkin-trace/","title":"Zipkin receiver"},{"body":"Zipkin receiver The Zipkin receiver makes the OAP server work as an alternative Zipkin server implementation for collecting traces. It supports Zipkin v1/v2 formats through the HTTP collector and Kafka collector.\nUse the following config to activate it. Set enableHttpCollector to enable HTTP collector and enableKafkaCollector to enable Kafka collector.\nreceiver-zipkin:selector:${SW_RECEIVER_ZIPKIN:default}default:# Defines a set of span tag keys which are searchable.# The max length of key=value should be less than 256 or will be dropped.searchableTracesTags:${SW_ZIPKIN_SEARCHABLE_TAG_KEYS:http.method}# The sample rate precision is 1/10000, should be between 0 and 10000sampleRate:${SW_ZIPKIN_SAMPLE_RATE:10000}## The below configs are for OAP collect zipkin trace from HTTPenableHttpCollector:${SW_ZIPKIN_HTTP_COLLECTOR_ENABLED:true}restHost:${SW_RECEIVER_ZIPKIN_REST_HOST:0.0.0.0}restPort:${SW_RECEIVER_ZIPKIN_REST_PORT:9411}restContextPath:${SW_RECEIVER_ZIPKIN_REST_CONTEXT_PATH:/}restMaxThreads:${SW_RECEIVER_ZIPKIN_REST_MAX_THREADS:200}restIdleTimeOut:${SW_RECEIVER_ZIPKIN_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_RECEIVER_ZIPKIN_REST_QUEUE_SIZE:0}## The below configs are for OAP collect zipkin trace from kafkaenableKafkaCollector:${SW_ZIPKIN_KAFKA_COLLECTOR_ENABLED:true}kafkaBootstrapServers:${SW_ZIPKIN_KAFKA_SERVERS:localhost:9092}kafkaGroupId:${SW_ZIPKIN_KAFKA_Group_Id:zipkin}kafkaTopic:${SW_ZIPKIN_KAFKA_TOPIC:zipkin}# Kafka consumer config, JSON format as Properties. If it contains the same key with above, would override.kafkaConsumerConfig:${SW_ZIPKIN_KAFKA_CONSUMER_CONFIG:\u0026#34;{\\\u0026#34;auto.offset.reset\\\u0026#34;:\\\u0026#34;earliest\\\u0026#34;,\\\u0026#34;enable.auto.commit\\\u0026#34;:true}\u0026#34;}# The Count of the topic consumerskafkaConsumers:${SW_ZIPKIN_KAFKA_CONSUMERS:1}kafkaHandlerThreadPoolSize:${SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_SIZE:-1}kafkaHandlerThreadPoolQueueSize:${SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE:-1}Zipkin query The Zipkin receiver makes the OAP server work as an alternative Zipkin server implementation for query traces. It implemented ZipkinQueryApiV2 through the HTTP service, supporting Zipkin-lens UI.\nUse the following config to activate it.\nquery-zipkin:selector:${SW_QUERY_ZIPKIN:default}default:# For HTTP serverrestHost:${SW_QUERY_ZIPKIN_REST_HOST:0.0.0.0}restPort:${SW_QUERY_ZIPKIN_REST_PORT:9412}restContextPath:${SW_QUERY_ZIPKIN_REST_CONTEXT_PATH:/zipkin}restMaxThreads:${SW_QUERY_ZIPKIN_REST_MAX_THREADS:200}restIdleTimeOut:${SW_QUERY_ZIPKIN_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_QUERY_ZIPKIN_REST_QUEUE_SIZE:0}# Default look back for serviceNames, remoteServiceNames and spanNames, 1 day in millislookback:${SW_QUERY_ZIPKIN_LOOKBACK:86400000}# The Cache-Control max-age (seconds) for serviceNames, remoteServiceNames and spanNamesnamesMaxAge:${SW_QUERY_ZIPKIN_NAMES_MAX_AGE:300}## The below config are OAP support for zipkin-lens UI# Default traces query max sizeuiQueryLimit:${SW_QUERY_ZIPKIN_UI_QUERY_LIMIT:10}# Default look back for search traces, 15 minutes in millisuiDefaultLookback:${SW_QUERY_ZIPKIN_UI_DEFAULT_LOOKBACK:900000}Lens UI Lens UI is Zipkin native UI. SkyWalking webapp has bundled it in the binary distribution. {webapp IP}:{webapp port}/zipkin is exposed and accessible for the browser. Meanwhile, Iframe UI component could be used to host Zipkin Lens UI on the SkyWalking booster UI dashboard.(link=/zipkin)\nZipkin Lens UI source codes could be found here.\n","excerpt":"Zipkin receiver The Zipkin receiver makes the OAP server work as an alternative Zipkin server …","ref":"/docs/main/v9.4.0/en/setup/backend/zipkin-trace/","title":"Zipkin receiver"},{"body":"Zipkin receiver The Zipkin receiver makes the OAP server work as an alternative Zipkin server implementation for collecting traces. It supports Zipkin v1/v2 formats through the HTTP collector and Kafka collector.\nUse the following config to activate it. Set enableHttpCollector to enable HTTP collector and enableKafkaCollector to enable Kafka collector.\nreceiver-zipkin:selector:${SW_RECEIVER_ZIPKIN:default}default:# Defines a set of span tag keys which are searchable.# The max length of key=value should be less than 256 or will be dropped.searchableTracesTags:${SW_ZIPKIN_SEARCHABLE_TAG_KEYS:http.method}# The sample rate precision is 1/10000, should be between 0 and 10000sampleRate:${SW_ZIPKIN_SAMPLE_RATE:10000}## The below configs are for OAP collect zipkin trace from HTTPenableHttpCollector:${SW_ZIPKIN_HTTP_COLLECTOR_ENABLED:true}restHost:${SW_RECEIVER_ZIPKIN_REST_HOST:0.0.0.0}restPort:${SW_RECEIVER_ZIPKIN_REST_PORT:9411}restContextPath:${SW_RECEIVER_ZIPKIN_REST_CONTEXT_PATH:/}restMaxThreads:${SW_RECEIVER_ZIPKIN_REST_MAX_THREADS:200}restIdleTimeOut:${SW_RECEIVER_ZIPKIN_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_RECEIVER_ZIPKIN_REST_QUEUE_SIZE:0}## The below configs are for OAP collect zipkin trace from kafkaenableKafkaCollector:${SW_ZIPKIN_KAFKA_COLLECTOR_ENABLED:true}kafkaBootstrapServers:${SW_ZIPKIN_KAFKA_SERVERS:localhost:9092}kafkaGroupId:${SW_ZIPKIN_KAFKA_Group_Id:zipkin}kafkaTopic:${SW_ZIPKIN_KAFKA_TOPIC:zipkin}# Kafka consumer config, JSON format as Properties. If it contains the same key with above, would override.kafkaConsumerConfig:${SW_ZIPKIN_KAFKA_CONSUMER_CONFIG:\u0026#34;{\\\u0026#34;auto.offset.reset\\\u0026#34;:\\\u0026#34;earliest\\\u0026#34;,\\\u0026#34;enable.auto.commit\\\u0026#34;:true}\u0026#34;}# The Count of the topic consumerskafkaConsumers:${SW_ZIPKIN_KAFKA_CONSUMERS:1}kafkaHandlerThreadPoolSize:${SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_SIZE:-1}kafkaHandlerThreadPoolQueueSize:${SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE:-1}Zipkin query The Zipkin receiver makes the OAP server work as an alternative Zipkin server implementation for query traces. It implemented ZipkinQueryApiV2 through the HTTP service, supporting Zipkin-lens UI.\nUse the following config to activate it.\nquery-zipkin:selector:${SW_QUERY_ZIPKIN:default}default:# For HTTP serverrestHost:${SW_QUERY_ZIPKIN_REST_HOST:0.0.0.0}restPort:${SW_QUERY_ZIPKIN_REST_PORT:9412}restContextPath:${SW_QUERY_ZIPKIN_REST_CONTEXT_PATH:/zipkin}restMaxThreads:${SW_QUERY_ZIPKIN_REST_MAX_THREADS:200}restIdleTimeOut:${SW_QUERY_ZIPKIN_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_QUERY_ZIPKIN_REST_QUEUE_SIZE:0}# Default look back for serviceNames, remoteServiceNames and spanNames, 1 day in millislookback:${SW_QUERY_ZIPKIN_LOOKBACK:86400000}# The Cache-Control max-age (seconds) for serviceNames, remoteServiceNames and spanNamesnamesMaxAge:${SW_QUERY_ZIPKIN_NAMES_MAX_AGE:300}## The below config are OAP support for zipkin-lens UI# Default traces query max sizeuiQueryLimit:${SW_QUERY_ZIPKIN_UI_QUERY_LIMIT:10}# Default look back for search traces, 15 minutes in millisuiDefaultLookback:${SW_QUERY_ZIPKIN_UI_DEFAULT_LOOKBACK:900000}Lens UI Lens UI is Zipkin native UI. SkyWalking webapp has bundled it in the binary distribution. {webapp IP}:{webapp port}/zipkin is exposed and accessible for the browser. Meanwhile, Iframe UI component could be used to host Zipkin Lens UI on the SkyWalking booster UI dashboard.(link=/zipkin)\nZipkin Lens UI source codes could be found here.\n","excerpt":"Zipkin receiver The Zipkin receiver makes the OAP server work as an alternative Zipkin server …","ref":"/docs/main/v9.5.0/en/setup/backend/zipkin-trace/","title":"Zipkin receiver"},{"body":"Zipkin receiver The Zipkin receiver makes the OAP server work as an alternative Zipkin server implementation for collecting traces. It supports Zipkin v1/v2 formats through the HTTP collector and Kafka collector.\nNOTICE, Zipkin trace would not be analyzed like SkyWalking native trace format.\nUse the following config to activate it. Set enableHttpCollector to enable HTTP collector and enableKafkaCollector to enable Kafka collector.\nreceiver-zipkin:selector:${SW_RECEIVER_ZIPKIN:default}default:# Defines a set of span tag keys which are searchable.# The max length of key=value should be less than 256 or will be dropped.searchableTracesTags:${SW_ZIPKIN_SEARCHABLE_TAG_KEYS:http.method}# The sample rate precision is 1/10000, should be between 0 and 10000sampleRate:${SW_ZIPKIN_SAMPLE_RATE:10000}## The below configs are for OAP collect zipkin trace from HTTPenableHttpCollector:${SW_ZIPKIN_HTTP_COLLECTOR_ENABLED:true}restHost:${SW_RECEIVER_ZIPKIN_REST_HOST:0.0.0.0}restPort:${SW_RECEIVER_ZIPKIN_REST_PORT:9411}restContextPath:${SW_RECEIVER_ZIPKIN_REST_CONTEXT_PATH:/}restMaxThreads:${SW_RECEIVER_ZIPKIN_REST_MAX_THREADS:200}restIdleTimeOut:${SW_RECEIVER_ZIPKIN_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_RECEIVER_ZIPKIN_REST_QUEUE_SIZE:0}## The below configs are for OAP collect zipkin trace from kafkaenableKafkaCollector:${SW_ZIPKIN_KAFKA_COLLECTOR_ENABLED:true}kafkaBootstrapServers:${SW_ZIPKIN_KAFKA_SERVERS:localhost:9092}kafkaGroupId:${SW_ZIPKIN_KAFKA_Group_Id:zipkin}kafkaTopic:${SW_ZIPKIN_KAFKA_TOPIC:zipkin}# Kafka consumer config, JSON format as Properties. If it contains the same key with above, would override.kafkaConsumerConfig:${SW_ZIPKIN_KAFKA_CONSUMER_CONFIG:\u0026#34;{\\\u0026#34;auto.offset.reset\\\u0026#34;:\\\u0026#34;earliest\\\u0026#34;,\\\u0026#34;enable.auto.commit\\\u0026#34;:true}\u0026#34;}# The Count of the topic consumerskafkaConsumers:${SW_ZIPKIN_KAFKA_CONSUMERS:1}kafkaHandlerThreadPoolSize:${SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_SIZE:-1}kafkaHandlerThreadPoolQueueSize:${SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE:-1}Zipkin query The Zipkin receiver makes the OAP server work as an alternative Zipkin server implementation for query traces. It implemented ZipkinQueryApiV2 through the HTTP service, supporting Zipkin-lens UI.\nUse the following config to activate it.\nquery-zipkin:selector:${SW_QUERY_ZIPKIN:default}default:# For HTTP serverrestHost:${SW_QUERY_ZIPKIN_REST_HOST:0.0.0.0}restPort:${SW_QUERY_ZIPKIN_REST_PORT:9412}restContextPath:${SW_QUERY_ZIPKIN_REST_CONTEXT_PATH:/zipkin}restMaxThreads:${SW_QUERY_ZIPKIN_REST_MAX_THREADS:200}restIdleTimeOut:${SW_QUERY_ZIPKIN_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_QUERY_ZIPKIN_REST_QUEUE_SIZE:0}# Default look back for serviceNames, remoteServiceNames and spanNames, 1 day in millislookback:${SW_QUERY_ZIPKIN_LOOKBACK:86400000}# The Cache-Control max-age (seconds) for serviceNames, remoteServiceNames and spanNamesnamesMaxAge:${SW_QUERY_ZIPKIN_NAMES_MAX_AGE:300}## The below config are OAP support for zipkin-lens UI# Default traces query max sizeuiQueryLimit:${SW_QUERY_ZIPKIN_UI_QUERY_LIMIT:10}# Default look back for search traces, 15 minutes in millisuiDefaultLookback:${SW_QUERY_ZIPKIN_UI_DEFAULT_LOOKBACK:900000}Lens UI Lens UI is Zipkin native UI. SkyWalking webapp has bundled it in the binary distribution. {webapp IP}:{webapp port}/zipkin is exposed and accessible for the browser. Meanwhile, Iframe UI component could be used to host Zipkin Lens UI on the SkyWalking booster UI dashboard.(link=/zipkin)\nZipkin Lens UI source codes could be found here.\n","excerpt":"Zipkin receiver The Zipkin receiver makes the OAP server work as an alternative Zipkin server …","ref":"/docs/main/v9.6.0/en/setup/backend/zipkin-trace/","title":"Zipkin receiver"},{"body":"Zipkin receiver The Zipkin receiver makes the OAP server work as an alternative Zipkin server implementation for collecting traces. It supports Zipkin v1/v2 formats through the HTTP collector and Kafka collector.\nNOTICE, Zipkin trace would not be analyzed like SkyWalking native trace format.\nUse the following config to activate it. Set enableHttpCollector to enable HTTP collector and enableKafkaCollector to enable Kafka collector.\nreceiver-zipkin:selector:${SW_RECEIVER_ZIPKIN:default}default:# Defines a set of span tag keys which are searchable.# The max length of key=value should be less than 256 or will be dropped.searchableTracesTags:${SW_ZIPKIN_SEARCHABLE_TAG_KEYS:http.method}# The sample rate precision is 1/10000, should be between 0 and 10000sampleRate:${SW_ZIPKIN_SAMPLE_RATE:10000}## The below configs are for OAP collect zipkin trace from HTTPenableHttpCollector:${SW_ZIPKIN_HTTP_COLLECTOR_ENABLED:true}restHost:${SW_RECEIVER_ZIPKIN_REST_HOST:0.0.0.0}restPort:${SW_RECEIVER_ZIPKIN_REST_PORT:9411}restContextPath:${SW_RECEIVER_ZIPKIN_REST_CONTEXT_PATH:/}restMaxThreads:${SW_RECEIVER_ZIPKIN_REST_MAX_THREADS:200}restIdleTimeOut:${SW_RECEIVER_ZIPKIN_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_RECEIVER_ZIPKIN_REST_QUEUE_SIZE:0}## The below configs are for OAP collect zipkin trace from kafkaenableKafkaCollector:${SW_ZIPKIN_KAFKA_COLLECTOR_ENABLED:true}kafkaBootstrapServers:${SW_ZIPKIN_KAFKA_SERVERS:localhost:9092}kafkaGroupId:${SW_ZIPKIN_KAFKA_Group_Id:zipkin}kafkaTopic:${SW_ZIPKIN_KAFKA_TOPIC:zipkin}# Kafka consumer config, JSON format as Properties. If it contains the same key with above, would override.kafkaConsumerConfig:${SW_ZIPKIN_KAFKA_CONSUMER_CONFIG:\u0026#34;{\\\u0026#34;auto.offset.reset\\\u0026#34;:\\\u0026#34;earliest\\\u0026#34;,\\\u0026#34;enable.auto.commit\\\u0026#34;:true}\u0026#34;}# The Count of the topic consumerskafkaConsumers:${SW_ZIPKIN_KAFKA_CONSUMERS:1}kafkaHandlerThreadPoolSize:${SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_SIZE:-1}kafkaHandlerThreadPoolQueueSize:${SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE:-1}Zipkin query The Zipkin receiver makes the OAP server work as an alternative Zipkin server implementation for query traces. It implemented ZipkinQueryApiV2 through the HTTP service, supporting Zipkin-lens UI.\nUse the following config to activate it.\nquery-zipkin:selector:${SW_QUERY_ZIPKIN:default}default:# For HTTP serverrestHost:${SW_QUERY_ZIPKIN_REST_HOST:0.0.0.0}restPort:${SW_QUERY_ZIPKIN_REST_PORT:9412}restContextPath:${SW_QUERY_ZIPKIN_REST_CONTEXT_PATH:/zipkin}restMaxThreads:${SW_QUERY_ZIPKIN_REST_MAX_THREADS:200}restIdleTimeOut:${SW_QUERY_ZIPKIN_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_QUERY_ZIPKIN_REST_QUEUE_SIZE:0}# Default look back for serviceNames, remoteServiceNames and spanNames, 1 day in millislookback:${SW_QUERY_ZIPKIN_LOOKBACK:86400000}# The Cache-Control max-age (seconds) for serviceNames, remoteServiceNames and spanNamesnamesMaxAge:${SW_QUERY_ZIPKIN_NAMES_MAX_AGE:300}## The below config are OAP support for zipkin-lens UI# Default traces query max sizeuiQueryLimit:${SW_QUERY_ZIPKIN_UI_QUERY_LIMIT:10}# Default look back for search traces, 15 minutes in millisuiDefaultLookback:${SW_QUERY_ZIPKIN_UI_DEFAULT_LOOKBACK:900000}Lens UI Lens UI is Zipkin native UI. SkyWalking webapp has bundled it in the binary distribution. {webapp IP}:{webapp port}/zipkin is exposed and accessible for the browser. Meanwhile, Iframe UI component could be used to host Zipkin Lens UI on the SkyWalking booster UI dashboard.(link=/zipkin)\nZipkin Lens UI source codes could be found here.\n","excerpt":"Zipkin receiver The Zipkin receiver makes the OAP server work as an alternative Zipkin server …","ref":"/docs/main/v9.7.0/en/setup/backend/zipkin-trace/","title":"Zipkin receiver"},{"body":"","excerpt":"","ref":"/zh/","title":"博客"}]
\ No newline at end of file
+[{"body":"Apache SkyWalking从2015年开源到2024年,已经走过了9个年头,项目的规模和功能也得到了极大的丰富。 2024年4月至6月,SkyWalking社区联合纵目,举办线上的联合直播,分多个主题介绍SkyWalking的核心特性,也提供更多的答疑时间。\n2024年4月25日,SkyWalking创始人带来了第一次分享和Q\u0026amp;A\n 熟悉SkyWalking项目结构 介绍项目工程划分,边界,定位 SkyWalking文档使用,以及如何使用AI助手 Q\u0026amp;A  B站视频地址\n想参与直播的小伙伴,可以关注后续的直播安排和我们的B站直播预约\n","excerpt":"Apache SkyWalking从2015年开源到2024年,已经走过了9个年头,项目的规模和功能也得到了极大的丰富。 2024年4月至6月,SkyWalking社区联合纵目,举办线上的联合直播,分 …","ref":"/zh/2024-04-26-skywalking-in-practice-s01e01/","title":"SkyWalking从入门到精通 - 2024系列线上分享活动(第一讲)"},{"body":"","excerpt":"","ref":"/tags/activemq/","title":"ActiveMQ"},{"body":"Introduction Apache ActiveMQ Classic is a popular and powerful open-source messaging and integration pattern server. Founded in 2004, it has evolved into a mature and widely used open-source messaging middleware that complies with the Java Message Service (JMS). Today, with its stability and wide range of feature support, it still has a certain number of users of small and medium-sized enterprises. It‘s high-performance version Apache Artemis is developing rapidly and is also attracting attention from users of ActiveMQ.\nActiveMQ has broad support for JMX (Java Management Extensions), allowing to be monitored through JMX MBean. After enabling JMX, you can use JAVA\u0026rsquo;s built-in jconsole or VisualVM to view the metrics. In addition, some Collector components can also be used to convert JMX-style data into Prometheus-style data, which is suitable for more tools.\nOpenTelemetry as an industry-recognized, standardized solution that provides consistent and interoperable telemetry data collection, transmission, and analysis capabilities for distributed systems, and is also used here for data collection and transmission. Although it can directly accept JMX type data, the JMX indicators for collecting ActiveMQ are not in the standard library, and some versions are incompatible, so this article adopts two steps: convert JMX data into Prometheus-style indicator data, and then use OpenTelemetry to scrape HTTP endpoint data.\nSkyWalking as a one-stop distributed system monitoring solution, it accepts metrics from ActiveMQ and provides a basic monitoring dashboard.\nDeployment Please set up the following services:\n SkyWalking OAP, v10.0+. ActiveMQ v6.0.X+. JMX Exporter v0.20.0. If using docker, refer bitnami/jmx-exporter. OpenTelmetry-Collector v0.92.0.  Preparation The following describes how to deploy ActiveMQ with 2 single-node brokers and SkyWalking OAP with one single node. JMX Exporter runs in agent mode (recommended).\nConfiguration  Enable JMX in ActiveMQ, the JMX remote port defaults to 1616, you can change it through ACTIVEMQ_SUNJMX_START. Set up the exporter:  [Recommended] If run exporter in agent mode, need to append the startup parameter -DACTIVEMQ_OPTS=-javaagent:{activemqPath}/bin/jmx_prometheus_javaagent-0.20.0.jar=2345:{activemqPath}/conf/config.yaml in ActiveMQ env, then exporter server starts at the same time. If run exporter in single server, refer here to deploy the server alone. 2345 is open HTTP port that can be customized. JMX\u0026rsquo;s metrics can be queried through http://localhost:2345/metrics.    example of docker-compose.yml with agent exporter for ActiveMQ:\nversion:\u0026#39;3.8\u0026#39;services:amq1:image:apache/activemq-classic:latestcontainer_name:amq1hostname:amq1volumes:- ~/activemq1/conf/activemq.xml:/opt/apache-activemq/conf/activemq.xml- ~/activemq1/bin/jmx_prometheus_javaagent-0.20.0.jar:/opt/apache-activemq/bin/jmx_prometheus_javaagent-0.20.0.jar- ~/activemq1/conf/config.yaml:/opt/apache-activemq/conf/config.yamlports:- \u0026#34;61616:61616\u0026#34;- \u0026#34;8161:8161\u0026#34;- \u0026#34;2345:2345\u0026#34;environment:ACTIVEMQ_OPTS:\u0026#34;-javaagent:/opt/apache-activemq/bin/jmx_prometheus_javaagent-0.20.0.jar=2345:/opt/apache-activemq/conf/config.yaml\u0026#34;ACTIVEMQ_BROKER_NAME:broker-1networks:- amqtest amq2:image:apache/activemq-classic:latestcontainer_name:amq2hostname:amq2volumes:- ~/activemq2/conf/activemq.xml:/opt/apache-activemq/conf/activemq.xml- ~/activemq2/bin/jmx_prometheus_javaagent-0.20.0.jar:/opt/apache-activemq/bin/jmx_prometheus_javaagent-0.20.0.jar- ~/activemq2/conf/config.yaml:/opt/apache-activemq/conf/config.yaml ports:- \u0026#34;61617:61616\u0026#34;- \u0026#34;8162:8161\u0026#34;- \u0026#34;2346:2346\u0026#34;environment:ACTIVEMQ_OPTS:\u0026#34;-javaagent:/opt/apache-activemq/bin/jmx_prometheus_javaagent-0.20.0.jar=2346:/opt/apache-activemq/conf/config.yaml\u0026#34;ACTIVEMQ_BROKER_NAME:broker-2 networks:- amqtestotel-collector1:image:otel/opentelemetry-collector:latestcontainer_name:otel-collector1command:[\u0026#34;--config=/etc/otel-collector-config.yaml\u0026#34;]volumes:- ./otel-collector-config1.yaml:/etc/otel-collector-config.yamldepends_on:- amq1networks:- amqtest otel-collector2:image:otel/opentelemetry-collector:latestcontainer_name:otel-collector2command:[\u0026#34;--config=/etc/otel-collector-config.yaml\u0026#34;]volumes:- ./otel-collector-config2.yaml:/etc/otel-collector-config.yamldepends_on:- amq2networks:- amqtest networks:amqtest:example of otel-collector-config.yaml for OpenTelemetry:\nreceivers:prometheus:config:scrape_configs:- job_name:\u0026#39;activemq-monitoring\u0026#39;scrape_interval:30sstatic_configs:- targets:[\u0026#39;amq1:2345\u0026#39;]labels:cluster:activemq-broker1processors:batch:exporters:otlp:endpoint:oap:11800tls:insecure:trueservice:pipelines:metrics:receivers:- prometheusprocessors:- batchexporters:- otlpexample of config.yaml for ActiveMQ Exporter:\n---startDelaySeconds:10username:adminpassword:activemqssl:falselowercaseOutputName:falselowercaseOutputLabelNames:falseincludeObjectNames:[\u0026#34;org.apache.activemq:*\u0026#34;,\u0026#34;java.lang:type=OperatingSystem\u0026#34;,\u0026#34;java.lang:type=GarbageCollector,*\u0026#34;,\u0026#34;java.lang:type=Threading\u0026#34;,\u0026#34;java.lang:type=Runtime\u0026#34;,\u0026#34;java.lang:type=Memory\u0026#34;,\u0026#34;java.lang:name=*\u0026#34;]excludeObjectNames:[\u0026#34;org.apache.activemq:type=ColumnFamily,*\u0026#34;]autoExcludeObjectNameAttributes:trueexcludeObjectNameAttributes:\u0026#34;java.lang:type=OperatingSystem\u0026#34;:- \u0026#34;ObjectName\u0026#34;\u0026#34;java.lang:type=Runtime\u0026#34;:- \u0026#34;ClassPath\u0026#34;- \u0026#34;SystemProperties\u0026#34;rules:- pattern:\u0026#34;.*\u0026#34;Steps  Start ActiveMQ, and the Exporter(agent) and the service start at the same time. Start SkyWalking OAP and SkyWalking UI. Start OpenTelmetry-Collector.  After completed, node metrics will be captured and pushed to SkyWalking.\nMetrics Monitoring metrics involve in Cluster Metrics, Broker Metrics, and Destination Metrics.\n Cluster Metrics: including memory usage, rates of write/read, and average/max duration of write. Broker Metrics: including node state, number of connections, number of producers/consumers, and rate of write/read under the broker. Depending on the cluster mode, one cluster may include one or more brokers. Destination Metrics: including number of producers/consumers, messages in different states, queues, and enqueue duration in a queue/topic.  Cluster Metrics  System Load: range in [0, 100]. Thread Count: the number of threads currently used by the JVM. Heap Memory: capacity of heap memory. GC: memory of ActiveMQ is managed by Java\u0026rsquo;s garbage collection (GC) process. Enqueue/Dequeue/Dispatch/Expired Rate: growth rate of messages in different states. Average/Max Enqueue Time: time taken to join the queue.  Broker Metrics  Uptime: duration of the node. State: 1 = slave node, 0 = master node. Current Connentions: number of connections. Current Producer/Consumer Count: number of current producers/consumers. Increased Producer/Consumer Count: number of increased producers/consumers. Enqueue/Dequeue Count: number of enqueue and dequeue. Enqueue/Dequeue Rate: rate of enqueue and dequeue. Memory Percent Usage: amount of memory space used by undelivered messages. Store Percent Usage: space used by pending persistent messages. Temp Percent Usage: space used by non-persistent messages. Average/Max Message Size: number of messages. Queue Size: number of messages in the queue.  Destination Metrics  Produser/Consumer Count: number of producers/Consumers. Queue Size: unacknowledged messages of the queue. Memory usage: usage of memory. Enqueue/Dequeue/Dispatch/Expired/Inflight Count: number of messages in different states. Average/Max Message Size: number of messages. Average/Max Enqueue Time: time taken to join the queue.  Reference  ActiveMQ Classic clustering JMX Exporter Configuration JMX Exporter-Running the Standalone HTTP Server OpenTelemetry Collector Contrib Jmxreceiver  ","excerpt":"Introduction Apache ActiveMQ Classic is a popular and powerful open-source messaging and integration …","ref":"/blog/2024-04-19-monitoring-activemq-through-skywalking/","title":"Monitoring ActiveMQ through SkyWalking"},{"body":"","excerpt":"","ref":"/tags/","title":"Tags"},{"body":"引言 Apache ActiveMQ Classic 是一个流行且功能强大的开源消息传递和集成模式服务器。始于2004年,逐渐发展成为了一个成熟且广泛使用的开源消息中间件,符合Java消息服务(JMS)规范。 发展至今,凭借其稳定性和广泛的特性支持,仍然拥有一定数量的中小型企业的使用者。其高性能版本 Apache Artemis 目前处于快速发展阶段,也受到了 ActiveMQ 现有使用者的关注。\nActiveMQ 对 JMX(Java Management Extensions) 有广泛的支持,允许通过 JMX MBean 监视和控制代理的行为。 开启JMX之后,就可以使用 JAVA 自带的 jconsole 工具或者 VisualVM 等工具直观查看指标。此外也可以通过一些 Collector 组件,将 JMX 风格的数据转换为 prometheus 风格的数据,适配更多查询与展示工具。\nOpenTelemetry 作为业界公认的标准化解决方案,可为分布式系统提供一致且可互操作的遥测数据收集、传输和分析能力,这里也主要借助它实现数据的采集和传输。 它虽然可以直接接受 JMX 类型的数据,但是关于采集 ActiveMQ 的 JMX 指标并不在标准库,存在部分版本不兼容,因此本文采用两步:将 JMX 数据转换为 Prometheus 风格的指标数据,再使用 OpenTelemetry 传递。\nSkyWalking 作为一站式的分布式系统监控解决方案,接纳来自 ActiveMQ 的指标数据,并提供基础的指标监控面板。\n服务部署 请准备以下服务\n SkyWalking OAP, v10.0+。 ActiveMQ v6.0.X+。 JMX Exporter v0.20.0。如果你使用docker,参考使用 bitnami/jmx-exporter。 OpenTelmetry-Collector v0.92.0。  服务准备 以下通过 SkyWalking OAP 单节点、ActiveMQ 2个单节点服务的部署方式介绍。JMX Exporter 采用推荐的 agent 方式启动。\n配置流程  在 ActiveMQ 中开启JMX,其中 JMX 远程端口默认1616,如需修改可通过 ACTIVEMQ_SUNJMX_START 参数调整。 设置 Exporter:  如果采用推荐的 Agent 方式启动,需要追加启动参数 -DACTIVEMQ_OPTS=-javaagent:{activemqPath}/bin/jmx_prometheus_javaagent-0.20.0.jar=2345:{activemqPath}/conf/config.yaml 如果采用单独服务的方式启动,可以参考这里独立部署 Exporter 服务。 其中 2345 为开放的 HTTP 端口可自定义。最终可通过访问 http://localhost:2345/metrics 查询到 JMX 的指标数据。    采用 Agent Exporter 方式的 docker-compose.yml 配置样例:\nversion:\u0026#39;3.8\u0026#39;services:amq1:image:apache/activemq-classic:latestcontainer_name:amq1hostname:amq1volumes:- ~/activemq1/conf/activemq.xml:/opt/apache-activemq/conf/activemq.xml- ~/activemq1/bin/jmx_prometheus_javaagent-0.20.0.jar:/opt/apache-activemq/bin/jmx_prometheus_javaagent-0.20.0.jar- ~/activemq1/conf/config.yaml:/opt/apache-activemq/conf/config.yamlports:- \u0026#34;61616:61616\u0026#34;- \u0026#34;8161:8161\u0026#34;- \u0026#34;2345:2345\u0026#34;environment:ACTIVEMQ_OPTS:\u0026#34;-javaagent:/opt/apache-activemq/bin/jmx_prometheus_javaagent-0.20.0.jar=2345:/opt/apache-activemq/conf/config.yaml\u0026#34;ACTIVEMQ_BROKER_NAME:broker-1networks:- amqtest amq2:image:apache/activemq-classic:latestcontainer_name:amq2hostname:amq2volumes:- ~/activemq2/conf/activemq.xml:/opt/apache-activemq/conf/activemq.xml- ~/activemq2/bin/jmx_prometheus_javaagent-0.20.0.jar:/opt/apache-activemq/bin/jmx_prometheus_javaagent-0.20.0.jar- ~/activemq2/conf/config.yaml:/opt/apache-activemq/conf/config.yaml ports:- \u0026#34;61617:61616\u0026#34;- \u0026#34;8162:8161\u0026#34;- \u0026#34;2346:2346\u0026#34;environment:ACTIVEMQ_OPTS:\u0026#34;-javaagent:/opt/apache-activemq/bin/jmx_prometheus_javaagent-0.20.0.jar=2346:/opt/apache-activemq/conf/config.yaml\u0026#34;ACTIVEMQ_BROKER_NAME:broker-2 networks:- amqtestotel-collector1:image:otel/opentelemetry-collector:latestcontainer_name:otel-collector1command:[\u0026#34;--config=/etc/otel-collector-config.yaml\u0026#34;]volumes:- ./otel-collector-config1.yaml:/etc/otel-collector-config.yamldepends_on:- amq1networks:- amqtest otel-collector2:image:otel/opentelemetry-collector:latestcontainer_name:otel-collector2command:[\u0026#34;--config=/etc/otel-collector-config.yaml\u0026#34;]volumes:- ./otel-collector-config2.yaml:/etc/otel-collector-config.yamldepends_on:- amq2networks:- amqtest networks:amqtest:OpenTelemetry otel-collector-config.yaml 配置样例:\nreceivers:prometheus:config:scrape_configs:- job_name:\u0026#39;activemq-monitoring\u0026#39;scrape_interval:30sstatic_configs:- targets:[\u0026#39;amq1:2345\u0026#39;]labels:cluster:activemq-broker1processors:batch:exporters:otlp:endpoint:oap:11800tls:insecure:trueservice:pipelines:metrics:receivers:- prometheusprocessors:- batchexporters:- otlpActiveMQ Exporter config.yaml 配置样例:\n---startDelaySeconds:10username:adminpassword:activemqssl:falselowercaseOutputName:falselowercaseOutputLabelNames:falseincludeObjectNames:[\u0026#34;org.apache.activemq:*\u0026#34;,\u0026#34;java.lang:type=OperatingSystem\u0026#34;,\u0026#34;java.lang:type=GarbageCollector,*\u0026#34;,\u0026#34;java.lang:type=Threading\u0026#34;,\u0026#34;java.lang:type=Runtime\u0026#34;,\u0026#34;java.lang:type=Memory\u0026#34;,\u0026#34;java.lang:name=*\u0026#34;]excludeObjectNames:[\u0026#34;org.apache.activemq:type=ColumnFamily,*\u0026#34;]autoExcludeObjectNameAttributes:trueexcludeObjectNameAttributes:\u0026#34;java.lang:type=OperatingSystem\u0026#34;:- \u0026#34;ObjectName\u0026#34;\u0026#34;java.lang:type=Runtime\u0026#34;:- \u0026#34;ClassPath\u0026#34;- \u0026#34;SystemProperties\u0026#34;rules:- pattern:\u0026#34;.*\u0026#34;启动步骤  启动 ActiveMQ,Exporter 和服务同时启动。 启动 SkyWalking OAP 和 SkyWalking UI。 启动 OpenTelmetry-Collector。  以上步骤执行完成后,节点指标就会定时抓取后推送到 SkyWalking,经过分组聚合后前端页面可查看到 ActiveMQ 的面板数据。\n监控指标 监控指标主要分为3类:Cluster 指标、Broker 指标、Destination 指标\n Cluster 指标:主要关注集群的内存使用情况、数据写入与读取速率平均情况、平均与最大的写入时长等。 Broker 指标:主要关注 Broker 下节点状态、连接数、生产者消费者数量、写入读取速率等。根据集群形式不同,一个Cluster可能包括一个或多个Broker。 Destination 指标:主要关注 Queue/Topic 下的生产者消费者数量、不同状态消息数量、队列数量、入队时长等。  Cluster 指标  System Load:[0, 100]的值来反馈系统负载。 Thread Count:JVM 当前使用的线程数。 Heap Memory:堆内存的容量一定程度反映服务的处理性能。 GC:ActiveMQ 在 JVM 中运行,其内存由 Java 的垃圾回收 (GC) 进程管理,GC能直接反映服务的状态。 Enqueue/Dequeue/Dispatch/Expired Rate:不同状态信息的增长速率能直接反映生产活动。 Average/Max Enqueue Time:入队的耗时能一定程度影响生产者。  Broker 指标  Uptime:节点存活时长。 State:是否为从节点,1=从节点,0=主节点。 Current Connentions:目前的连接数。 Current Producer/Consumer Count:目前生产者消费者数量。 Increased Producer/Consumer Count:增长的生产者消费者数量。 Enqueue/Dequeue Count: 入队出队数量。 Enqueue/Dequeue Rate: 入队出队速率。 Memory Percent Usage:未送达消息使用的内存空间。 Store Percent Usage: 挂起的持久性消息占用的空间。 Temp Percent Usage:非持久化消息占用的空间。 Average/Max Message Size:消息量。 Queue Size:队列中消息量。  Destination 指标  Producer/Consumer Count:生产者/消费者数量。 Queue Size:队列的未消费数量。 Memory Usage:内存的使用。 Enqueue/Dequeue/Dispatch/Expired/Inflight Count:不同状态消息数。 Average/Max Enqueue Time:入队的耗时。 Average/Max Message Size:消息量。  参考文档  ActiveMQ Classic clustering JMX Exporter Configuration JMX Exporter-Running the Standalone HTTP Server OpenTelemetry Collector Contrib Jmxreceiver  ","excerpt":"引言 Apache ActiveMQ Classic 是一个流行且功能强大的开源消息传递和集成模式服务器。始于2004年,逐渐发展成为了一个成熟且广泛使用的开源消息中间件,符合Java消息服 …","ref":"/zh/2024-04-19-monitoring-activemq-through-skywalking/","title":"使用 SkyWalking 监控 ActiveMQ"},{"body":"Zixin Zhou(GitHub ID, CodePrometheus[1]) began the code contributions since Oct 28, 2023.\nUp to date, he has submitted 8 PRs in the Go agent repository, 7 PRs in the main repo, 1 PR in the UI repository and 2 PRs in the showcase repository.\nAt Apr 15th, 2024, the project management committee(PMC) passed the proposal of promoting him as a new committer. He has accepted the invitation at the same day.\nWelcome Zixin Zhou join the committer team.\n[1] https://github.com/CodePrometheus\n","excerpt":"Zixin Zhou(GitHub ID, CodePrometheus[1]) began the code contributions since Oct 28, 2023.\nUp to …","ref":"/events/welcome-zixin-zhou-as-new-committer/","title":"Welcome Zixin Zhou as new committer"},{"body":"SkyWalking Eyes 0.6.0 is released. Go to downloads page to find release tars.\n Add | as comment indicator by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/168 Correct the way of joining slack channels by @wu-sheng in https://github.com/apache/skywalking-eyes/pull/169 update: add weak-compatible to dependency check by @Two-Hearts in https://github.com/apache/skywalking-eyes/pull/171 feature: add support for Protocol Buffer by @spacewander in https://github.com/apache/skywalking-eyes/pull/172 feature: add support for OPA policy files by @spacewander in https://github.com/apache/skywalking-eyes/pull/174 add Eclipse Foundation specific Apache 2.0 license header by @gdams in https://github.com/apache/skywalking-eyes/pull/178 add instructions to fix header issues in markdown comment by @gdams in https://github.com/apache/skywalking-eyes/pull/179 bump action/setup-go to v5 by @gdams in https://github.com/apache/skywalking-eyes/pull/180 Draft release notes for 0.6.0 by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/181  Full Changelog: https://github.com/apache/skywalking-eyes/compare/v0.5.0...v0.6.0\n","excerpt":"SkyWalking Eyes 0.6.0 is released. Go to downloads page to find release tars.\n Add | as comment …","ref":"/events/release-apache-skywalking-eyes-0-6-0/","title":"Release Apache SkyWalking Eyes 0.6.0"},{"body":"SkyWalking Java Agent 9.2.0 is released. Go to downloads page to find release tars. Changes by Version\n9.2.0  Fix NoSuchMethodError in mvc-annotation-commons and change deprecated method. Fix forkjoinpool plugin in JDK11. Support for tracing spring-cloud-gateway 4.x in gateway-4.x-plugin. Fix re-transform bug when plugin enhanced class proxy parent method. Fix error HTTP status codes not recording as SLA failures in Vert.x plugins. Support for HttpExchange request tracing. Support tracing for async producing, batch sync consuming, and batch async consuming in rocketMQ-client-java-5.x-plugin. Convert the Redisson span into an async span. Rename system env name from sw_plugin_kafka_producer_config to SW_PLUGIN_KAFKA_PRODUCER_CONFIG. Support for ActiveMQ-Artemis messaging tracing. Archive the expired plugins impala-jdbc-2.6.x-plugin. Fix a bug in Spring Cloud Gateway if HttpClientFinalizer#send does not invoke, the span created at NettyRoutingFilterInterceptor can not stop. Fix not tracing in HttpClient v5 when HttpHost(arg[0]) is null but RoutingSupport#determineHost works. Support across thread tracing for SOFA-RPC. Update Jedis 4.x plugin to support Sharding and Cluster models.  Documentation  Update docs to describe expired-plugins.  All issues and pull requests are here\n","excerpt":"SkyWalking Java Agent 9.2.0 is released. Go to downloads page to find release tars. Changes by …","ref":"/events/release-apache-skywalking-java-agent-9-2-0/","title":"Release Apache SkyWalking Java Agent 9.2.0"},{"body":"SkyWalking Rover 0.6.0 is released. Go to downloads page to find release tars.\nFeatures  Enhance compatibility when profiling with SSL. Update LabelValue obtain pod information function to add default value parameter. Add HasOwnerName to judgement pod has owner name. Publish the latest Docker image tag. Improve the stability of Off CPU Profiling. Support collecting the access log from Kubernetes. Remove the scanner mode in the process discovery module. Upgrade Go library to 1.21, eBPF library to 0.13.2. Support using make docker.debug to building the debug docker image.  Bug Fixes Documentation  Update architecture diagram. Delete module design and project structure document. Adjust configuration modules during setup.  Issues and PR  All issues are here All and pull requests are here  ","excerpt":"SkyWalking Rover 0.6.0 is released. Go to downloads page to find release tars.\nFeatures  Enhance …","ref":"/events/release-apache-skwaylking-rover-0-6-0/","title":"Release Apache SkyWalking Rover 0.6.0"},{"body":"SkyWalking Cloud on Kubernetes 0.9.0 is released. Go to downloads page to find release tars.\n0.9.0 Features  Add a getting started document about how to deploy swck on the kubernetes cluster.  Bugs  Fix the bug that the java agent is duplicated injected when update the pod.  Chores  Bump up custom-metrics-apiserver Bump up golang to v1.22 Bump up controller-gen to v0.14.0  ","excerpt":"SkyWalking Cloud on Kubernetes 0.9.0 is released. Go to downloads page to find release tars.\n0.9.0 …","ref":"/events/release-apache-skywalking-cloud-on-kubernetes-0-9-0/","title":"Release Apache SkyWalking Cloud on Kubernetes 0.9.0"},{"body":"Background Apache SkyWalking is an open-source Application Performance Management system that helps users gather logs, traces, metrics, and events from various platforms and display them on the UI. With version 9.7.0, SkyWalking can collect access logs from probes in multiple languages and from Service Mesh, generating corresponding topologies, tracing, and other data. However, it could not initially collect and map access logs from applications in Kubernetes environments. This article explores how the 10.0.0 version of Apache SkyWalking employs eBPF technology to collect and store application access logs, addressing this limitation.\nWhy eBPF? To monitor the network traffic in Kubernetes, the following features support be support:\n Cross Language: Applications deployed in Kubernetes may be written in any programming language, making support for diverse languages important. Non-Intrusiveness: It\u0026rsquo;s imperative to monitor network traffic without making any modifications to the applications, as direct intervention with applications in Kubernetes is not feasible. Kernel Metrics Monitoring: Often, diagnosing network issues by analyzing traffic performance at the user-space level is insufficient. A deeper analysis incorporating kernel-space network traffic metrics is frequently necessary. Support for Various Network Protocols: Applications may communicate using different transport protocols, necessitating support for a range of protocols.  Given these requirements, eBPF emerges as a capable solution. In the next section, we will delve into detailed explanations of how Apache SkyWalking Rover resolves these aspects.\nKernel Monitoring and Protocol Analysis In previous articles, we\u0026rsquo;ve discussed how to monitor network traffic from programs written in various languages. This technique remains essential for network traffic monitoring, allowing for the collection of traffic data without language limitations. However, due to the unique aspects of our monitoring trigger mechanism and the specific features of kernel monitoring, these two areas warrant separate explanations.\nKernel Monitoring Kernel monitoring allows users to gain insights into network traffic performance based on the execution at the kernel level, specifically from Layer 2 (Data Link) to Layer 4 (Transport) of the OSI model.\nNetwork monitoring at the kernel layer is deference from the syscall (user-space) layer in terms of the metrics and identifiers used. While the syscalls layer can utilize file descriptors to correlate various operations, kernel layer network operations primarily use packets as unique identifiers. This discrepancy necessitates a mapping relationship that SkyWalking Rover can use to bind these two layers together for comprehensive monitoring.\nLet\u0026rsquo;s dive into the details of how data is monitored in both sending and receiving modes.\nObserve Sending When sending data, tracking the status and timing of each packet is crucial for understanding the state of each transmission. Within the kernel, operations progress from Layer 4 (L4) down to Layer 2 (L2), maintaining the same thread ID as during the syscalls layer, which simplifies data correlation.\nSkyWalking Rover monitors several key kernel functions to observe packet transmission dynamics, listed from L4 to L2:\n kprobe/tcp_sendmsg: Captures the time when a packet enters the L4 protocol stack for sending and the time it finishes processing. This function is essential for tracking the initial handling of packets at the transport layer. kprobe/tcp_transmit_skb: Records the total number of packet transmissions and the size of each packet sent. This function helps identify how many times a packet or a batch of packets is attempted to be sent, which is critical for understanding network throughput and congestion. tracepoint/tcp/tcp_retransmit_skb: Notes whether packet retransmission occurs, providing insights into network reliability and connection quality. Retransmissions can significantly impact application performance and user experience. tracepoint/skb/kfree_skb: Records packet loss during transmission and logs the reason for such occurrences. Understanding packet loss is crucial for diagnosing network issues and ensuring data integrity. kprobe/__ip_queue_xmit: Records the start and end times of processing by the L3 protocol. This function is vital for understanding the time taken for IP-level operations, including routing decisions. kprobe/nf_hook_slow: Records the total time and number of occurrences spent in Netfilter hooks, such as iptables rule evaluations. This monitoring point is important for assessing the impact of firewall rules and other filtering mechanisms on packet flow. kprobe/neigh_resolve_output: If resolving an unknown MAC address is necessary before sending a network request, this function records the occurrences and total time spent on this resolution. MAC address resolution times can affect the initial packet transmission delay. kprobe/__dev_queue_xmit: Records the start and end times of entering the L2 protocol stack, providing insights into the data link layer\u0026rsquo;s processing times. tracepoint/net/net_dev_start_xmit and tracepoint/net/net_dev_xmit: Records the actual time taken to transmit each packet at the network interface card (NIC). These functions are crucial for understanding the hardware-level performance and potential bottlenecks at the point of sending data to the physical network.  According to the interception of the above method, Apache SkyWalking Rover can provide key execution time and metrics for each level when sending network data, from the application layer (Layer 7) to the transport layer (Layer 4), and finally to the data link layer (Layer 2).\nObserve Receiving When receiving data, the focus is often on the time it takes for packets to travel from the network interface card (NIC) to the user space. Unlike the process of sending data, data receiving in the kernel proceeds from the data link layer (Layer 2) up to the transport layer (Layer 4), until the application layer (Layer 7) retrieves the packet\u0026rsquo;s content. In SkyWalking Rover, monitors the following key system functions to observe this process, listed from L2 to L4:\n tracepoint/net/netif_receive_skb: Records the time when a packet is received by the network interface card. This tracepoint is crucial for understanding the initial point of entry for incoming data into the system. kprobe/ip_rcv: Records the start and end times of packet processing at the network layer (Layer 3). This probe provides insights into how long it takes for the IP layer to handle routing, forwarding, and delivering packets to the correct application. kprobe/nf_hook_slow: Records the total time and occurrences spent in Netfilter hooks, same with the sending traffic flow. kprobe/tcp_v4_rcv: Records the start and end times of packet processing at the transport layer (Layer 4). This probe is key to understanding the efficiency of TCP operations, including connection management, congestion control, and data flow. tracepoint/skb/skb_copy_datagram_iovec: When application layer protocols use the data, this tracepoint binds the packet to the syscall layer data at Layer 7. This connection is essential for correlating the kernel\u0026rsquo;s handling of packets with their consumption by user-space applications.  Based on the above methods, network monitoring can help you understand the complete execution process and execution time from when data is received by the network card to when it is used by the program.\nMetrics By intercepting the methods mentioned above, we can gather key metrics that provide insights into network performance and behavior. These metrics include:\n Packets: The size of the packets and the frequency of their transmission or reception. These metric offers a fundamental understanding of the network load and the efficiency of data movement between the sender and receiver. Connections: The number of connections established or accepted between services and the time taken for these connections to be set up. This metric is crucial for analyzing the efficiency of communication and connection management between different services within the network. L2-L4 Events: The time spent on key events within the Layer 2 to Layer 4 protocols. This metric sheds light on the processing efficiency and potential bottlenecks within the lower layers of the network stack, which are essential for data transmission and reception.  Protocol Analyzing In previous articles, we have discussed parsing HTTP/1.x protocols. However, with HTTP/2.x, the protocol\u0026rsquo;s stateful nature and the pre-established connections between services complicate network profiling. This complexity makes it challenging for Apache SkyWalking Rover to fully perceive the connection context, hindering protocol parsing operations.\nTransitioning network monitoring to Daemon mode offers a solution to this challenge. By continuously observing service operations around the clock, SkyWalking Rover can begin monitoring as soon as a service starts. This immediate initiation allows for the tracking of the complete execution context, making the observation of stateful protocols like HTTP/2.x feasible.\nProbes To detect when a process is started, monitoring a specific trace point (tracepoint/sched/sched_process_fork) is essential. This approach enables the system to be aware of process initiation events. Given the necessity to filter process traffic based on certain criteria such as the process\u0026rsquo;s namespace, Apache SkyWalking Rover follows a series of steps to ensure accurate and efficient monitoring. These steps include:\n Monitoring Activation: The process is immediately added to a monitoring whitelist upon detection. This step ensures that the process is considered for monitoring from the moment it starts, without delay. Push to Queue: The process\u0026rsquo;s PID (Process ID) is pushed into a monitoring confirmation queue. This queue holds the PIDs of newly detected processes that are pending further confirmation from a user-space program. This asynchronous approach allows for the separation of immediate detection and subsequent processing, optimizing the monitoring workflow. User-Space Program Confirmation: The user-space program retrieves process PIDs from the queue and assesses whether each process should continue to be monitored. If a process is deemed unnecessary for monitoring, it is removed from the whitelist.  This process ensures that SkyWalking Rover can dynamically adapt its monitoring scope based on real-time conditions and configurations, allowing for both comprehensive coverage and efficient resource use.\nLimitations The monitoring of stateful protocols like HTTP/2.x currently faces certain limitations:\n Inability to Observe Pre-existing Connections: Monitoring the complete request and response cycle requires that monitoring be initiated before any connections are established. This requirement means that connections set up before the start of monitoring cannot be observed. Challenges with TLS Requests: Observing TLS encrypted traffic is complex because it relies on asynchronously attaching uprobes (user-space attaching) for observation. If new requests are made before these uprobes are successfully attached, it becomes impossible to access the data before encryption or after decryption.  Demo Next, let’s quickly demonstrate the Kubernetes monitoring feature, so you can understand more specifically what it accomplishes.\nDeploy SkyWalking Showcase SkyWalking Showcase contains a complete set of example services and can be monitored using SkyWalking. For more information, please check the official documentation.\nIn this demo, we only deploy service, the latest released SkyWalking OAP, and UI.\nexport FEATURE_FLAGS=java-agent-injector,single-node,elasticsearch,rover make deploy.kubernetes After deployment is complete, please run the following script to open SkyWalking UI: http://localhost:8080/.\nkubectl port-forward svc/ui 8080:8080 --namespace default Done Once deployed, Apache SkyWalking Rover automatically begins monitoring traffic within the system upon startup. Then, reports this traffic data to SkyWalking OAP, where it is ultimately stored in a database.\nIn the Service Dashboard within Kubernetes, you can view a list of monitored Kubernetes services. If any of these services have HTTP traffic, this information would be displayed alongside them in the dashboard.\nFigure 1: Kubernetes Service List\nAdditionally, within the Topology Tab, you can observe the topology among related services. In each service or call relationship, there would display relevant TCP and HTTP metrics.\nFigure 2: Kubernetes Service Topology\nWhen you select a specific service from the Service list, you can view service metrics at both the TCP and HTTP levels for the chosen service.\nFigure 3: Kubernetes Service TCP Metrics\nFigure 4: Kubernetes Service HTTP Metrics\nFurthermore, by using the Endpoint Tab, you can see which URIs have been accessed for the current service.\nFigure 5: Kubernetes Service Endpoint List\nConclusion In this article, I\u0026rsquo;ve detailed how to utilize eBPF technology for network monitoring of services within a Kubernetes cluster, a capability that has been implemented in Apache SkyWalking Rover. This approach leverages the power of eBPF to provide deep insights into network traffic and service interactions, enhancing visibility and observability across the cluster.\n","excerpt":"Background Apache SkyWalking is an open-source Application Performance Management system that helps …","ref":"/blog/2024-03-18-monitor-kubernetes-network-by-ebpf/","title":"Monitoring Kubernetes network traffic by using eBPF"},{"body":"SkyWalking Client JS 0.11.0 is released. Go to downloads page to find release tars.\n Fixed the bug that navigator.sendBeacon sent json to backend report \u0026ldquo;No suitable request converter found for a @RequestObject List\u0026rdquo;. Fix reading property from null. Pin selenium version and update license CI. Bump dependencies. Update README.  ","excerpt":"SkyWalking Client JS 0.11.0 is released. Go to downloads page to find release tars.\n Fixed the bug …","ref":"/events/release-apache-skywalking-client-js-0-11-0/","title":"Release Apache SkyWalking Client JS 0.11.0"},{"body":"背景 Apache SkyWalking 是一个开源的应用性能管理系统,帮助用户从各种平台收集日志、跟踪、指标和事件,并在用户界面上展示它们。\n在9.7.0版本中,Apache SkyWalking 可以从多语言的探针和 Service Mesh 中收集访问日志,并生成相应的拓扑图、链路和其他数据。 但是对于Kubernetes环境,暂时无法提供对应用程序的访问日志进行采集并生成拓扑图。本文探讨了Apache SkyWalking 10.0.0版本如何采用eBPF技术来收集和存储应用访问日志,解决了这一限制。\n为什么使用 eBPF? 为了在Kubernetes中监控网络流量,以下特性需得到支持:\n 跨语言: 在Kubernetes部署的应用可能使用任何编程语言编写,因此对多种语言的支持十分重要。 非侵入性: 监控网络流量时不对应用程序进行任何修改是必要的,因为直接干预Kubernetes中的应用程序是不可行的。 内核指标监控: 通常,仅通过分析用户空间级别的流量来诊断网络问题是不够的。经常需要深入分析,结合内核空间的网络流量指标。 支持多种网络协议: 应用程序可能使用不同的传输协议进行通信,这就需要支持一系列的协议。  鉴于这些要求,eBPF显现出作为一个有能力的解决方案。在下一节中,我们将深入讨论Apache SkyWalking Rover是如何解决这些方面作出更详细解释。\n内核监控与协议分析 在之前的文章中,我们讨论了如何对不同编程语言的程序进行网络流量获取。在网络流量监控中,我们仍然会使用该技术进行流量采集。 但是由于这次监控触发方式和内核监控方面的不同特性,所以这两部分会单独进行说明。\n内核监控 内核监控允许用户根据在内核层面的执行,洞察网络流量性能,特别是从OSI模型的第2层(数据链路层)到第4层(传输层)。\n内核层的网络监控与syscall(用户空间系统调用)层在关联指标不同。虽然syscall层可以利用文件描述符来关联各种操作,但内核层的网络操作主要使用数据包作为唯一标识符。 这种差异需要映射关系,Apache SkyWalking Rover可以使用它将这两层绑定在一起,进行全面监控。\n让我们深入了解数据在发送和接收模式下是如何被监控的。\n监控数据发送 在发送数据时,跟踪每个数据包的状态和时间对于理解每次传输的状态至关重要。在内核中,操作从第4层(L4)一直调用到第2层(L2),并且会保持与在syscall层相同的线程ID,这简化了数据的相关性分析。\nSkyWalking Rover监控了几个关键的内核函数,以观察数据包传输动态,顺序从L4到L2:\n kprobe/tcp_sendmsg: 记录数据包进入L4协议栈进行发送以及完成处理的时间。这个函数对于跟踪传输层对数据包的初始处理至关重要。 kprobe/tcp_transmit_skb: 记录数据包传输的总次数和每个发送的数据包的大小。这个函数有助于识别尝试发送一个数据包或一段时间内发送一批数据包的次数,这对于理解网络吞吐量和拥塞至关重要。 tracepoint/tcp/tcp_retransmit_skb: 记录是否发生数据包重传,提供网络可靠性和连接质量的见解。重传可以显著影响应用性能和用户体验。 tracepoint/skb/kfree_skb: 记录传输过程中的数据包丢失,并记录发生这种情况的原因。理解数据包丢失对于诊断网络问题和确保数据完整性至关重要。 kprobe/__ip_queue_xmit: 记录L3协议处理的开始和结束时间。这个功能对于理解IP级操作所需的时间至关重要,包括路由决策。 kprobe/nf_hook_slow: 记录在Netfilter钩子中花费的总时间和发生次数,例如 iptables 规则评估。这个函数对于评估防火墙规则和其他过滤机制对数据流的影响非常重要。 kprobe/neigh_resolve_output: 如果在发送网络请求之前需要解析未知的MAC地址,这个函数会记录发生的次数和在这个解析上花费的总时间。MAC地址解析时间可以影响初始数据包传输的延迟。 kprobe/__dev_queue_xmit: 记录进入L2协议栈的开始和结束时间,提供对数据链路层处理时间的见解。 tracepoint/net/net_dev_start_xmit and tracepoint/net/net_dev_xmit: 记录在网卡(NIC)上传输每个数据包所需的实际时间。这些功能对于理解硬件级性能和在将数据发送到物理网络时可能出现的瓶颈至关重要。  根据上述方法的拦截,Apache SkyWalking Rover可以在发送网络数据时为每个层级提供关键的执行时间和指标,从应用层(第7层)到传输层(第4层),最终到数据链路层(第2层)。\n监控数据接收 在接收数据时,通常关注的是数据包从网卡(NIC)到用户空间的传输时间。与发送数据的过程不同,在内核中接收数据是从数据链路层(第2层)开始,一直上升到传输层(第4层),直到应用层(第7层)检索到数据包的内容。\n在SkyWalking Rover中,监控以下关键系统功能以观察这一过程,顺序从L2到L4:\n tracepoint/net/netif_receive_skb: 记录网卡接收到数据包的时间。这个追踪点对于理解进入系统的传入数据的初始入口点至关重要。 kprobe/ip_rcv: 记录网络层(第3层)数据包处理的开始和结束时间。这个探针提供了IP层处理路由、转发和将数据包正确传递给应用程序所需时间的见解。 kprobe/nf_hook_slow: 记录在Netfilter钩子中花费的总时间和发生次数,与发送流量的情况相同。 kprobe/tcp_v4_rcv: 记录传输层(第4层)数据包处理的开始和结束时间。这个探针对于理解TCP操作的效率至关重要,包括连接管理、拥塞控制和数据流。 tracepoint/skb/skb_copy_datagram_iovec: 当应用层协议使用数据时,这个追踪点在第7层将数据包与syscall层的数据绑定。这种连接对于将内核对数据包的处理与用户空间应用程序的消费相关联是至关重要的。  基于上述方法,网络监控可以帮助您理解从网卡接收数据到程序使用数据的完整执行过程和执行时间。\n指标 通过拦截上述提到的方法,我们可以收集提供网络性能的关键指标。这些指标包括:\n 数据包: 数据包的大小及其传输或接收的频率。这些指标提供了对网络负载和数据在发送者与接收者之间传输效率的基本理解。 连接: 服务之间建立或接收的连接数量,以及设置这些连接所需的时间。这个指标对于分析网络内不同服务之间的通信效率和连接管理至关重要。 L2-L4 事件: 在第2层到第4层协议中关键事件上所花费的时间。这个指标揭示了网络堆栈较低层的处理效率和潜在瓶颈,这对于数据传输至关重要。  协议分析 在之前的文章中,我们已经讨论了解析 HTTP/1.x 协议。然而,对于 HTTP/2.x,协议的有状态性质和服务之间预先建立的连接使得网络分析变得复杂。 这种复杂性使得Apache SkyWalking Rover很难完全感知连接上下文,阻碍了协议解析操作。\n将网络监控转移到守护进程模式提供了一种解决这一挑战的方法。通过全天候不断观察服务,Apache SkyWalking Rover可以在服务启动时立即开始监控。 这种立即启动允许跟踪完整的执行上下文,使得观察像 HTTP/2.x 这样的有状态协议变得可行。\n追踪 为了检测到一个进程何时启动,监控一个特定的追踪点 (tracepoint/sched/sched_process_fork) 是必不可少的。这追踪点使系统能够意识到进程启动事件。\n鉴于需要根据某些标准(如进程的命名空间)过滤进程流量,Apache SkyWalking Rover遵循一系列步骤来确保准确和高效的监控。这些步骤包括:\n 启动监控: 一旦检测到进程,立即将其添加到监控白名单中。这一步确保从进程启动的那一刻起就考虑对其进行监控,不会有延迟。 推送队列: 进程的PID(进程ID)被推送到一个监控确认队列中。这个队列保存了新检测到的进程的PID,这些进程等待来自用户空间程序的进一步确认。这种异步方法对立即检测和后续处理进行分离,优化了监控工作流程。 用户态程序确认: 用户空间程序从队列中检索进程PID,并评估每个进程是否应该继续被监控。如果一个进程被认为不必要进行监控,它将被从白名单中移除。  这个过程确保了Apache SkyWalking Rover可以根据实时条件和配置动态调整其监控范围,允许既全面覆盖又有效的资源监控。\n限制 像 HTTP/2.x 这样的有状态协议的监控目前仍然面临一些限制:\n 无法观察现有连接: 要监控完整的请求和响应周期,需要在建立任何连接之前启动监控。这个要求意味着在监控开始之前建立的连接无法被观察到。 TLS请求的挑战: 观察TLS加密流量是复杂的,因为它依赖于异步加载uprobes(用户空间加载)进行观察。如果在成功加载这些uprobes之前发出新的请求,那么在加密之前或解密之后访问数据就变得不可能。  演示 接下来,让我们快速演示Kubernetes监控功能,以便更具体地了解它的功能。\n部署 SkyWalking Showcase SkyWalking Showcase 包含完整的示例服务,并可以使用 SkyWalking 进行监视。有关详细信息,请查看官方文档。\n在此演示中,我们只部署服务、最新发布的 SkyWalking OAP,UI和Rover。\nexport FEATURE_FLAGS=java-agent-injector,single-node,elasticsearch,rover make deploy.kubernetes 部署完成后,请运行以下脚本以打开 SkyWalking UI:http://localhost:8080/ 。\nkubectl port-forward svc/ui 8080:8080 --namespace default 完成 一旦部署,Apache SkyWalking Rover在启动时会自动开始监控系统中的流量。然后,它将这些流量数据报告给SkyWalking OAP,并最终存储在数据库中。\n在Kubernetes中的服务仪表板中,您可以查看被监控的Kubernetes服务列表。如果其中任何服务具有HTTP流量,这些指标信息将在列表中显示。\n图 1: Kubernetes 服务列表\n此外,在拓扑图选项卡中,您可以观察相关服务之间的拓扑关系。在每个服务节点或服务之间调用关系中,将显示相关的TCP和HTTP指标。\n图 2: Kubernetes 服务拓扑图\n当您从服务列表中选择特定服务时,您可以查看所选服务在TCP和HTTP级别的服务指标。\n图 3: Kubernetes 服务 TCP 指标\n图 4: Kubernetes 服务 HTTP 指标\n此外,通过使用端点选项卡,您可以查看当前服务所访问的URI。\n图 5: Kubernetes 服务端点列表\n结论 在本文中,我详细介绍了如何利用eBPF技术对Kubernetes集群中的服务进行网络流量监控,这是Apache SkyWalking Rover中实现的一项功能。\n这项功能利用了eBPF的强大功能,提供了对网络流量和服务交互的深入洞察,增强了对整个集群的可观测性。\n","excerpt":"背景 Apache SkyWalking 是一个开源的应用性能管理系统,帮助用户从各种平台收集日志、跟踪、指标和事件,并在用户界面上展示它们。\n在9.7.0版本中,Apache SkyWalking  …","ref":"/zh/2024-03-18-monitor-kubernetes-network-by-ebpf/","title":"使用 eBPF 监控 Kubernetes 网络流量"},{"body":"","excerpt":"","ref":"/tags/clickhouse/","title":"ClickHouse"},{"body":"Background ClickHouse is an open-source column-oriented database management system that allows generating analytical data reports in real-time, so it is widely used for online analytical processing (OLAP).\nApache SkyWalking is an open-source APM system that provides monitoring, tracing and diagnosing capabilities for distributed systems in Cloud Native architectures. Increasingly, App Service architectures incorporate Skywalking as an essential monitoring component of a service or instance.\nBoth ClickHouse and Skywalking are popular frameworks, and it would be great to monitor your ClickHouse database through Skywalking. Next, let\u0026rsquo;s share how to monitor ClickHouse database with Skywalking.\nPrerequisites and configurations Make sure you\u0026rsquo;ve met the following prerequisites before you start onboarding your monitor.\nConfig steps:\n Exposing prometheus endpoint. Fetching ClickHouse metrics by OpenTelemetry. Exporting metrics to Skywalking OAP server.  Prerequisites for setup The monitoring for ClickHouse relies on the embedded prometheus endpoint of ClickHouse and will not be supported in previous versions starting from v20.1.2.4.\nYou can check the version of your server:\n:) select version(); SELECT version() Query id: 2d3773ca-c320-41f6-b2ac-7ebe37eddc58 ┌─version()───┐ │ 24.2.1.2248 │ └─────────────┘ If your ClickHouse version is earlier than v20.1.2.4, you need to set up ClickHouse-exporter to access data.\nExpose prometheus Endpoint The embedded prometheus endpoint will make it easy for data collection, you just need to open the required configuration in the core configuration file config.xml of ClickHouse. In addition to your original configuration, you only need to modify the configuration of Prometheus.\n/etc/clickhouse-server/config.xml:\n\u0026lt;clickhouse\u0026gt; ...... \u0026lt;prometheus\u0026gt; \u0026lt;endpoint\u0026gt;/metrics\u0026lt;/endpoint\u0026gt; \u0026lt;port\u0026gt;9363\u0026lt;/port\u0026gt; \u0026lt;metrics\u0026gt;true\u0026lt;/metrics\u0026gt; \u0026lt;events\u0026gt;true\u0026lt;/events\u0026gt; \u0026lt;asynchronous_metrics\u0026gt;true\u0026lt;/asynchronous_metrics\u0026gt; \u0026lt;errors\u0026gt;true\u0026lt;/errors\u0026gt; \u0026lt;/prometheus\u0026gt; \u0026lt;/clickhouse\u0026gt; Settings:\n endpoint – HTTP endpoint for scraping metrics by prometheus server. Start from ‘/’. port – Port for endpoint. metrics – Expose metrics from the system.metrics table. events – Expose metrics from the system.events table. asynchronous_metrics – Expose current metrics values from the system.asynchronous_metrics table. errors - Expose the number of errors by error codes occurred since the last server restart. This information could be obtained from the system.errors as well.  Save the config and restart the ClickHouse server.\nIt contains more than 1,000 metrics, covering services、networks、disk、MergeTree、errors and so on. For more details, after restarting the server, you can call curl 127.0.0.1:9363/metrics to know about the metrics.\nYou also can check the metrics by tables to make a contrast.\n:) select * from system.metrics limit 10 SELECT * FROM system.metrics LIMIT 10 Query id: af677622-960e-4589-b2ca-0b6a40c443aa ┌─metric───────────────────────────────┬─value─┬─description─────────────────────────────────────────────────────────────────────┐ │ Query │ 1 │ Number of executing queries │ │ Merge │ 0 │ Number of executing background merges │ │ Move │ 0 │ Number of currently executing moves │ │ PartMutation │ 0 │ Number of mutations (ALTER DELETE/UPDATE) │ │ ReplicatedFetch │ 0 │ Number of data parts being fetched from replica │ │ ReplicatedSend │ 0 │ Number of data parts being sent to replicas │ │ ReplicatedChecks │ 0 │ Number of data parts checking for consistency │ │ BackgroundMergesAndMutationsPoolTask │ 0 │ Number of active merges and mutations in an associated background pool │ │ BackgroundMergesAndMutationsPoolSize │ 64 │ Limit on number of active merges and mutations in an associated background pool │ │ BackgroundFetchesPoolTask │ 0 │ Number of active fetches in an associated background pool │ └──────────────────────────────────────┴───────┴─────────────────────────────────────────────────────────────────────────────────┘ :) select * from system.events limit 10; SELECT * FROM system.events LIMIT 10 Query id: 32c618d0-037a-400a-92a4-59fde832e4e2 ┌─event────────────────────────────┬──value─┬─description────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐ │ Query │ 7 │ Number of queries to be interpreted and potentially executed. Does not include queries that failed to parse or were rejected due to AST size limits, quota limits or limits on the number of simultaneously running queries. May include internal queries initiated by ClickHouse itself. Does not count subqueries. │ │ SelectQuery │ 7 │ Same as Query, but only for SELECT queries. │ │ InitialQuery │ 7 │ Same as Query, but only counts initial queries (see is_initial_query). │ │ QueriesWithSubqueries │ 40 │ Count queries with all subqueries │ │ SelectQueriesWithSubqueries │ 40 │ Count SELECT queries with all subqueries │ │ QueryTimeMicroseconds │ 202862 │ Total time of all queries. │ │ SelectQueryTimeMicroseconds │ 202862 │ Total time of SELECT queries. │ │ FileOpen │ 40473 │ Number of files opened. │ │ Seek │ 100 │ Number of times the \u0026#39;lseek\u0026#39; function was called. │ │ ReadBufferFromFileDescriptorRead │ 67995 │ Number of reads (read/pread) from a file descriptor. Does not include sockets. │ └──────────────────────────────────┴────────┴────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ Start up Opentelemetry-Collector Configure OpenTelemetry based on your own requirements. Following the example below:\notel-collector-config.yaml:\nreceivers: prometheus: config: scrape_configs: - job_name: \u0026#39;clickhouse-monitoring\u0026#39; scrape_interval: 15s static_configs: - targets: [\u0026#39;127.0.0.1:9363\u0026#39;,\u0026#39;127.0.0.1:9364\u0026#39;,\u0026#39;127.0.0.1:9365\u0026#39;] labels: host_name: prometheus-clickhouse processors: batch: exporters: otlp: endpoint: 127.0.0.1:11800 tls: insecure: true service: pipelines: metrics: receivers: - prometheus processors: - batch exporters: - otlp Please ensure:\n job_name: 'clickhouse-monitoring' that marked the data from ClickHouse, If modified, it will be ignored. host_name defines the service name, you have to make one. endpoint point to the oap server address. the network between ClickHouse, OpenTelemetry Collector, and Skywalking OAP Server must be accessible.  If goes well, refresh the Skywalking-ui home page in a few seconds and you can see ClickHouse under the database menu.\nsuccess log:\n2024-03-12T03:57:39.407Z\tinfo\tservice@v0.93.0/telemetry.go:76\tSetting up own telemetry... 2024-03-12T03:57:39.412Z\tinfo\tservice@v0.93.0/telemetry.go:146\tServing metrics\t{\u0026quot;address\u0026quot;: \u0026quot;:8888\u0026quot;, \u0026quot;level\u0026quot;: \u0026quot;Basic\u0026quot;} 2024-03-12T03:57:39.416Z\tinfo\tservice@v0.93.0/service.go:139\tStarting otelcol...\t{\u0026quot;Version\u0026quot;: \u0026quot;0.93.0\u0026quot;, \u0026quot;NumCPU\u0026quot;: 4} 2024-03-12T03:57:39.416Z\tinfo\textensions/extensions.go:34\tStarting extensions... 2024-03-12T03:57:39.423Z\tinfo\tprometheusreceiver@v0.93.0/metrics_receiver.go:240\tStarting discovery manager\t{\u0026quot;kind\u0026quot;: \u0026quot;receiver\u0026quot;, \u0026quot;name\u0026quot;: \u0026quot;prometheus\u0026quot;, \u0026quot;data_type\u0026quot;: \u0026quot;metrics\u0026quot;} 2024-03-12T03:57:59.431Z\tinfo\tprometheusreceiver@v0.93.0/metrics_receiver.go:231\tScrape job added\t{\u0026quot;kind\u0026quot;: \u0026quot;receiver\u0026quot;, \u0026quot;name\u0026quot;: \u0026quot;prometheus\u0026quot;, \u0026quot;data_type\u0026quot;: \u0026quot;metrics\u0026quot;, \u0026quot;jobName\u0026quot;: \u0026quot;clickhouse-monitoring\u0026quot;} 2024-03-12T03:57:59.431Z\tinfo\tservice@v0.93.0/service.go:165\tEverything is ready. Begin running and processing data. 2024-03-12T03:57:59.432Z\tinfo\tprometheusreceiver@v0.93.0/metrics_receiver.go:282\tStarting scrape manager\t{\u0026quot;kind\u0026quot;: \u0026quot;receiver\u0026quot;, \u0026quot;name\u0026quot;: \u0026quot;prometheus\u0026quot;, \u0026quot;data_type\u0026quot;: \u0026quot;metrics\u0026quot;} ClickHouse monitoring dashboard About the dashboard The dashboard includes the service dashboard and the instance dashboard.\nMetrics include servers, queries, networks, insertions, replicas, MergeTree, ZooKeeper and embedded ClickHouse Keeper.\nThe service dashboard displays the metrics of the entire cluster.\nThe instance dashboard displays the metrics of an instance.\nAbout the metrics Here are some meanings of ClickHouse Instance metrics, more here.\n   Monitoring Panel Unit Description Data Source     CpuUsage count CPU time spent seen by OS per second(according to ClickHouse.system.dashboard.CPU Usage (cores)). ClickHouse   MemoryUsage percentage Total amount of memory (bytes) allocated by the server/ total amount of OS memory. ClickHouse   MemoryAvailable percentage Total amount of memory (bytes) available for program / total amount of OS memory. ClickHouse   Uptime sec The server uptime in seconds. It includes the time spent for server initialization before accepting connections. ClickHouse   Version string Version of the server in a single integer number in base-1000. ClickHouse   FileOpen count Number of files opened. ClickHouse     metrics about ZooKeeper are valid when managing cluster by ZooKeeper metrics about embedded ClickHouse Keeper are valid when ClickHouse Keeper is enabled  References  ClickHouse prometheus endpoint ClickHouse built-in observability dashboard ClickHouse Keeper  ","excerpt":"Background ClickHouse is an open-source column-oriented database management system that allows …","ref":"/blog/2024-03-12-monitoring-clickhouse-through-skywalking/","title":"Monitoring Clickhouse Server through SkyWalking"},{"body":"背景介绍 ClickHouse 是一个开源的面向列的数据库管理系统,可以实时生成分析数据报告,因此被广泛用于在线分析处理(OLAP)。\nApache SkyWalking 是一个开源的 APM 系统,为云原生架构中的分布式系统提供监控、跟踪和诊断能力。应用服务体系越来越多地将 Skywalking 作为服务或实例的基本监视组件。\nClickHouse 和 Skywalking 框架都是当下流行的服务组件,通过 Skywalking 监控您的 ClickHouse 数据库将是一个不错的选择。接下来,就来分享一下如何使用 Skywalking 监控 ClickHouse 数据库。\n前提与配置 在开始接入监控之前,请先确认以下前提条件。\n配置步骤:\n 暴露 Prometheus 端点。 通过 OpenTelemetry 拉取 ClickHouse 的指标数据。 将指标数据发送到 Skywalking OAP server.  使用的前提 ClickHouse 的监控依赖于 ClickHouse 的内嵌 Prometheus 端点配置,配置从 v20.1.2.4 开始支持,因此之前的老版本将无法支持。\n您可以检查 ClickHouse 服务的版本:\n:) select version(); SELECT version() Query id: 2d3773ca-c320-41f6-b2ac-7ebe37eddc58 ┌─version()───┐ │ 24.2.1.2248 │ └─────────────┘ 如果您的 ClickHouse 版本低于 v20.1.2.4,则需要依靠 ClickHouse-exporter 获取数据。\n暴露 Prometheus 端点 内嵌的 Prometheus 端点简化了数据采集流程,您只需要在 ClickHouse 的核心配置文件 config.xml 打开所需的配置即可。除了您原来的配置,您只需要参考如下修改 Prometheus 的配置。\n/etc/clickhouse-server/config.xml:\n\u0026lt;clickhouse\u0026gt; ...... \u0026lt;prometheus\u0026gt; \u0026lt;endpoint\u0026gt;/metrics\u0026lt;/endpoint\u0026gt; \u0026lt;port\u0026gt;9363\u0026lt;/port\u0026gt; \u0026lt;metrics\u0026gt;true\u0026lt;/metrics\u0026gt; \u0026lt;events\u0026gt;true\u0026lt;/events\u0026gt; \u0026lt;asynchronous_metrics\u0026gt;true\u0026lt;/asynchronous_metrics\u0026gt; \u0026lt;errors\u0026gt;true\u0026lt;/errors\u0026gt; \u0026lt;/prometheus\u0026gt; \u0026lt;/clickhouse\u0026gt; 配置说明:\n endpoint – 通过 prometheus 服务器抓取指标的 HTTP 端点。从/开始。 port – 端点的端口。 metrics – 暴露 system.metrics 表中的指标。 events – 暴露 system.events 表中的指标。 asynchronous_metrics – 暴露 system.asynchronous_metrics 表中的当前指标值。 errors - 按错误代码暴露自上次服务器重新启动以来发生的错误数。此信息也可以从 system.errors 中获得。  保存配置并重启 ClickHouse 服务。\n端点数据包含1000多个指标,涵盖服务、网络、磁盘、MergeTree、错误等。想了解更多指标细节,在重启服务后,可以调用 curl 127.0.0.1:9363/metrics 看到具体指标的内容。\n您还可以通过数据库表的数据与端点数据进行检查对比。\n:) select * from system.metrics limit 10 SELECT * FROM system.metrics LIMIT 10 Query id: af677622-960e-4589-b2ca-0b6a40c443aa ┌─metric───────────────────────────────┬─value─┬─description─────────────────────────────────────────────────────────────────────┐ │ Query │ 1 │ Number of executing queries │ │ Merge │ 0 │ Number of executing background merges │ │ Move │ 0 │ Number of currently executing moves │ │ PartMutation │ 0 │ Number of mutations (ALTER DELETE/UPDATE) │ │ ReplicatedFetch │ 0 │ Number of data parts being fetched from replica │ │ ReplicatedSend │ 0 │ Number of data parts being sent to replicas │ │ ReplicatedChecks │ 0 │ Number of data parts checking for consistency │ │ BackgroundMergesAndMutationsPoolTask │ 0 │ Number of active merges and mutations in an associated background pool │ │ BackgroundMergesAndMutationsPoolSize │ 64 │ Limit on number of active merges and mutations in an associated background pool │ │ BackgroundFetchesPoolTask │ 0 │ Number of active fetches in an associated background pool │ └──────────────────────────────────────┴───────┴─────────────────────────────────────────────────────────────────────────────────┘ :) select * from system.events limit 10; SELECT * FROM system.events LIMIT 10 Query id: 32c618d0-037a-400a-92a4-59fde832e4e2 ┌─event────────────────────────────┬──value─┬─description────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐ │ Query │ 7 │ Number of queries to be interpreted and potentially executed. Does not include queries that failed to parse or were rejected due to AST size limits, quota limits or limits on the number of simultaneously running queries. May include internal queries initiated by ClickHouse itself. Does not count subqueries. │ │ SelectQuery │ 7 │ Same as Query, but only for SELECT queries. │ │ InitialQuery │ 7 │ Same as Query, but only counts initial queries (see is_initial_query). │ │ QueriesWithSubqueries │ 40 │ Count queries with all subqueries │ │ SelectQueriesWithSubqueries │ 40 │ Count SELECT queries with all subqueries │ │ QueryTimeMicroseconds │ 202862 │ Total time of all queries. │ │ SelectQueryTimeMicroseconds │ 202862 │ Total time of SELECT queries. │ │ FileOpen │ 40473 │ Number of files opened. │ │ Seek │ 100 │ Number of times the \u0026#39;lseek\u0026#39; function was called. │ │ ReadBufferFromFileDescriptorRead │ 67995 │ Number of reads (read/pread) from a file descriptor. Does not include sockets. │ └──────────────────────────────────┴────────┴────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ 启动 Opentelemetry-Collector 根据自身环境 配置 OpenTelemetry。 您可参照下面的例子:\notel-collector-config.yaml:\nreceivers: prometheus: config: scrape_configs: - job_name: \u0026#39;clickhouse-monitoring\u0026#39; scrape_interval: 15s static_configs: - targets: [\u0026#39;127.0.0.1:9363\u0026#39;,\u0026#39;127.0.0.1:9364\u0026#39;,\u0026#39;127.0.0.1:9365\u0026#39;] labels: host_name: prometheus-clickhouse processors: batch: exporters: otlp: endpoint: 127.0.0.1:11800 tls: insecure: true service: pipelines: metrics: receivers: - prometheus processors: - batch exporters: - otlp 请着重关注:\n job_name: 'clickhouse-monitoring' 标记着来自 ClickHouse 的数据,如果自行修改,数据会被服务忽略。 host_name 定义服务的名称。 endpoint 指向您的 OAP 服务地址. ClickHouse、OpenTelemetry Collector 和 Skywalking OAP Server 之间的网络必须可访问。  如果进展顺利,几秒钟后刷新 Skywalking-ui 网页,您可以在数据库的菜单下看到 ClickHouse。\n启动成功日志样例:\n2024-03-12T03:57:39.407Z\tinfo\tservice@v0.93.0/telemetry.go:76\tSetting up own telemetry... 2024-03-12T03:57:39.412Z\tinfo\tservice@v0.93.0/telemetry.go:146\tServing metrics\t{\u0026quot;address\u0026quot;: \u0026quot;:8888\u0026quot;, \u0026quot;level\u0026quot;: \u0026quot;Basic\u0026quot;} 2024-03-12T03:57:39.416Z\tinfo\tservice@v0.93.0/service.go:139\tStarting otelcol...\t{\u0026quot;Version\u0026quot;: \u0026quot;0.93.0\u0026quot;, \u0026quot;NumCPU\u0026quot;: 4} 2024-03-12T03:57:39.416Z\tinfo\textensions/extensions.go:34\tStarting extensions... 2024-03-12T03:57:39.423Z\tinfo\tprometheusreceiver@v0.93.0/metrics_receiver.go:240\tStarting discovery manager\t{\u0026quot;kind\u0026quot;: \u0026quot;receiver\u0026quot;, \u0026quot;name\u0026quot;: \u0026quot;prometheus\u0026quot;, \u0026quot;data_type\u0026quot;: \u0026quot;metrics\u0026quot;} 2024-03-12T03:57:59.431Z\tinfo\tprometheusreceiver@v0.93.0/metrics_receiver.go:231\tScrape job added\t{\u0026quot;kind\u0026quot;: \u0026quot;receiver\u0026quot;, \u0026quot;name\u0026quot;: \u0026quot;prometheus\u0026quot;, \u0026quot;data_type\u0026quot;: \u0026quot;metrics\u0026quot;, \u0026quot;jobName\u0026quot;: \u0026quot;clickhouse-monitoring\u0026quot;} 2024-03-12T03:57:59.431Z\tinfo\tservice@v0.93.0/service.go:165\tEverything is ready. Begin running and processing data. 2024-03-12T03:57:59.432Z\tinfo\tprometheusreceiver@v0.93.0/metrics_receiver.go:282\tStarting scrape manager\t{\u0026quot;kind\u0026quot;: \u0026quot;receiver\u0026quot;, \u0026quot;name\u0026quot;: \u0026quot;prometheus\u0026quot;, \u0026quot;data_type\u0026quot;: \u0026quot;metrics\u0026quot;} ClickHouse 监控面板 关于面板 这个仪表盘包含服务仪表盘和实例仪表盘。\n指标涵盖服务器、查询、网络、插入、副本、MergeTree、ZooKeeper 和内嵌 ClickHouse Keeper。\n服务仪表盘主要展示整个集群相关的指标。\n实例仪表盘主要展示单个实例相关的指标。\n关于指标 以下是ClickHouse实例指标的一些含义,前往了解完整的指标列表。\n   面板名称 单位 指标含义 数据源     CpuUsage count 操作系统每秒花费的 CPU 时间(根据 ClickHouse.system.dashboard.CPU 使用率(核心数))。 ClickHouse   MemoryUsage percentage 服务器分配的内存总量(字节)/操作系统内存总量。 ClickHouse   MemoryAvailable percentage 可用于程序的内存总量(字节)/操作系统内存总量。 ClickHouse   Uptime sec 服务器正常运行时间(以秒为单位)。它包括在接受连接之前进行服务器初始化所花费的时间。 ClickHouse   Version string 以 base-1000 样式展示的服务器版本。 ClickHouse   FileOpen count 打开的文件数。 ClickHouse     ZooKeeper 的指标在 ZooKeeper 管理集群时有效。 内嵌ClickHouse Keeper的指标在开启内嵌 ClickHouse Keeper 配置时有效。  参考文档  ClickHouse prometheus endpoint ClickHouse built-in observability dashboard ClickHouse Keeper  ","excerpt":"背景介绍 ClickHouse 是一个开源的面向列的数据库管理系统,可以实时生成分析数据报告,因此被广泛用于在线分析处理(OLAP)。\nApache SkyWalking 是一个开源的 APM 系统, …","ref":"/zh/2024-03-12-monitoring-clickhouse-through-skywalking/","title":"使用 SkyWalking 监控 ClickHouse Server"},{"body":"","excerpt":"","ref":"/tags/rocketmq/","title":"RocketMQ"},{"body":"背景介绍 Apache RocketMQ 是一个开源的低延迟、高并发、高可用、高可靠的分布式消息中间件, 从SkyWalking OAP 10.0 版本开始, 新增了 对 RocketMQ Server的监控面板。本文将展示并介绍如何使用 Skywalking来监控RocketMQ\n部署 流程 通过RocketMQ官方提供的RocketMQ exporter来采集RocketMQ Server数据,再通过opentelmetry-collector来拉取RocketMQ exporter并传输到skywalking oap服务来处理\nDataFlow: 准备  Skywalking oap服务,v10.0 + RocketMQ v4.3.2 + RocketMQ exporter v0.0.2+ Opentelmetry-collector v0.87+  启动顺序  启动 RocketMQ namesrv 和 broker 启动 skywalking oap 和 ui 启动 RocketMQ exporter 启动 opentelmetry-collector  具体如何启动和配置请参考以上链接中官方教程.\n需要注意下的是 opentelmetry-collector 的配置文件.\njob_name: \u0026quot;rocketmq-monitoring\u0026quot; 请不要修改,否则 skywalking 不会处理这部分数据.\nrocketmq-exporter 替换成RocketMQ exporter 的地址.\nreplacement: rocketmq-cluster 中的rocketmq-cluster如果想要使用下文介绍的服务分层功能,请自行定义为其他服务层级相匹配的名称.\noap 为 skywalking oap 地址,请自行替换.\nreceivers: prometheus: config: scrape_configs: - job_name: \u0026quot;rocketmq-monitoring\u0026quot; scrape_interval: 30s static_configs: - targets: ['rocketmq-exporter:5557'] relabel_configs: - source_labels: [ ] target_label: cluster replacement: rocketmq-cluster exporters: otlp: endpoint: oap:11800 tls: insecure: true processors: batch: service: pipelines: metrics: receivers: - prometheus processors: - batch exporters: - otlp 监控指标 指标分为 三个维度, cluster,broker,topic\ncluster监控 cluster 主要是站在集群的角度来统计展示,比如\nMessages Produced Today 今日集群产生的消息数\nMax CommitLog Disk Ratio 展示集群中磁盘使用率最高的broker\nTotal Producer Tps 集群生产者tps\nbroker 监控 broker 主要是站在节点的角度来统计展示,比如\nProduce Tps 节点生产者tps\nProducer Message Size(MB)节点生产消息大小\ntopic 监控 topic 主要是站在主题的角度来统计展示,比如\nConsumer Group Count 消费该主题的消费者组个数\nConsumer Latency(s) 消费者组的消费延时时间\nBacklogged Messages 消费者组消费消息堆积\n注意:topic 维度是整个 topic 来聚合,并不是在一个 broker 上的 topic 聚合,在 dashboard 上你也可以看到 broker 跟 topic 是平级的。\n各个指标的含义可以在图标的 tip 上找到解释\n更多指标可以参考文档\ndemo 已经在 skywalking showcase 上线,可以在上面看到展示效果\n服务分层 skywalking 10 新增了重要功能Service Hierarchy,接收来自不同层级的服务数据,比如 java agent 上报,k8s 监控数据或者 otel 的监控数据. 根据设置规则如果发现这些服务名称符合匹配规则,则可以将这些不同层级的服务联系起来。\n如下图所示:\nskywalking 采集部署在 k8s 的 RocketMQ 服务端的k8s 数据,并接收来自 otel 的 RocketMQ 服务端监控数据,根据匹配规则这些服务具有相同的服务名称,则可以在 ui 上观察到它们的联系\n","excerpt":"背景介绍 Apache RocketMQ 是一个开源的低延迟、高并发、高可用、高可靠的分布式消息中间件, 从SkyWalking OAP 10.0 版本开始, 新增了 对 RocketMQ …","ref":"/zh/2024-02-29-rocketmq-monitoring-by-skywalking/","title":"使用 SkyWalking 监控 RocketMQ Server"},{"body":"SkyWalking Go 0.4.0 is released. Go to downloads page to find release tars.\nFeatures  Add support ignore suffix for span name. Adding go 1.21 and 1.22 in docker image.  Plugins  Support setting a discard type of reporter. Add redis.max_args_bytes parameter for redis plugin. Changing intercept point for gin, make sure interfaces could be grouped when params defined in relativePath. Support RocketMQ MQ. Support AMQP MQ. support Echov4 framework.  Documentation Bug Fixes  Fix users can not use async api in toolkit-trace. Fix cannot enhance the vendor management project. Fix SW_AGENT_REPORTER_GRPC_MAX_SEND_QUEUE not working on metricsSendCh \u0026amp; logSendCh chans of gRPC reporter. Fix ParseVendorModule error for special case in vendor/modules.txt. Fix enhance method error when unknown parameter type. Fix wrong tracing context when trace have been sampled. Fix enhance param error when there are multiple params. Fix lost trace when multi middleware handlerFunc in gin plugin. Fix DBQueryContext execute error in sql plugin. Fix stack overflow as endless logs triggered.  Issues and PR  All issues are here All and pull requests are here  ","excerpt":"SkyWalking Go 0.4.0 is released. Go to downloads page to find release tars.\nFeatures  Add support …","ref":"/events/release-apache-skwaylking-go-0.4.0/","title":"Release Apache SkyWalking Go 0.4.0"},{"body":"","excerpt":"","ref":"/tags/nginx/","title":"Nginx"},{"body":"背景介绍 在 Scala 中,纯函数式中主要使用 Fiber,而不是线程,诸如 Cats-Effect、ZIO 等 Effect 框架。 您可以将 Fiber 视为轻量级线程,它是一种并发模型,由框架本身掌控控制权,从而消除了上下文切换的开销。 基于这些 Effect 框架开发的 HTTP、gRCP、GraphQL 库而开发的应用,我们一般称为 纯函数式应用程序。\n我们以 ZIO 为切入点, 演示 SkyWalking Scala 如何支持 Effect 生态。\nZIO Trace 首先,我们想要实现 Fiber 上下文传递,而不是监控 Fiber 本身。对于一个大型应用来说,可能存在成千上万个 Fiber,监控 Fiber 本身的意义不大。\n虽然 Fiber 的 Span 是在活跃时才会创建,但难免会有目前遗漏的场景,所以提供了一个配置 plugin.ziov2.ignore_fiber_regexes。 它将使用正则去匹配 Fiber location,匹配上的 Fiber 将不会创建 Span。\nFiber Span的信息如下:\n下面是我们使用本 ZIO 插件,和一些官方插件(hikaricp、jdbc、pulsar)完成的 Trace:\n分析 在 ZIO 中,Fiber可以有两种方式被调度,它们都是 zio.Executor 的子类。当然您也可以使用自己的线程池,这样也需被 ZIO 包装,其实就类似下面的 blockingExecutor。\nabstract class Executor extends ExecutorPlatformSpecific { self =\u0026gt; def submit(runnable: Runnable)(implicit unsafe: Unsafe): Boolean } 一种是系统默认线程池 defaultExecutor:\nprivate[zio] trait RuntimePlatformSpecific { final val defaultExecutor: Executor = Executor.makeDefault() } 另一种是专用于阻塞 IO 的线程池 blockingExecutor:\nprivate[zio] trait RuntimePlatformSpecific { final val defaultBlockingExecutor: Executor = Blocking.blockingExecutor } 默认线程池 defaultExecutor 对于 defaultExecutor,其本身是很复杂的,但它就是一个 ZIO 的 Fiber 调度(执行)器:\n/** * A `ZScheduler` is an `Executor` that is optimized for running ZIO * applications. Inspired by \u0026#34;Making the Tokio Scheduler 10X Faster\u0026#34; by Carl * Lerche. [[https://tokio.rs/blog/2019-10-scheduler]] */ private final class ZScheduler extends Executor 由于它们都是 zio.Executor 的子类,我们只需要对其及其子类进行增强:\nfinal val ENHANCE_CLASS = LogicalMatchOperation.or( HierarchyMatch.byHierarchyMatch(\u0026#34;zio.Executor\u0026#34;), MultiClassNameMatch.byMultiClassMatch(\u0026#34;zio.Executor\u0026#34;) ) 它们都是线程池,我们只需要在 zio.Executor 的 submit 方法上进行类似 ThreadPoolExecutor 上下文捕获的操作,可以参考 jdk-threadpool-plugin\n这里需要注意,因为 Fiber 也是一种 Runnable:\nprivate[zio] trait FiberRunnable extends Runnable { def location: Trace def run(depth: Int): Unit } zio-v2x-plugin\n阻塞线程池 blockingExecutor 对于 blockingExecutor,其实它只是对 Java 线程池进行了一个包装:\nobject Blocking { val blockingExecutor: zio.Executor = zio.Executor.fromThreadPoolExecutor { val corePoolSize = 0 val maxPoolSize = Int.MaxValue val keepAliveTime = 60000L val timeUnit = TimeUnit.MILLISECONDS val workQueue = new SynchronousQueue[Runnable]() val threadFactory = new NamedThreadFactory(\u0026#34;zio-default-blocking\u0026#34;, true) val threadPool = new ThreadPoolExecutor( corePoolSize, maxPoolSize, keepAliveTime, timeUnit, workQueue, threadFactory ) threadPool } } 由于其本身是对 ThreadPoolExecutor 的封装,所以,当我们已经实现了 zio.Executor 的增强后,只需要使用官方 jdk-threadpool-plugin 插件即可。 这里我们还想要对代码进行定制修改和复用,所以重新使用 Scala 实现了一个 executors-plugin 插件。\n串连 Fiber 上下文 最后,上面谈到过,Fiber 也是一种 Runnable,因此还需要对 zio.internal.FiberRunnable 进行增强。大致分为两点,其实与 jdk-threading-plugin 是一样的。\n 每次创建 zio.internal.FiberRunnable 实例时,都需要保存 现场,即构造函数增强。 每次运行时创建一个过渡的 Span,将当前线程上下文与之前保存在构造函数中的上下文进行关联。Fiber 可能被不同线程执行,所以这是必须的。  zio-v2x-plugin\n说明 当我们完成了对 ZIO Fiber 的上下文传播处理后,任意基于 ZIO 的应用层框架都可以按照普通的 Java 插件思路去开发。 我们只需要找到一个全局切入点,这个切入点应该是每个请求都会调用的方法,然后对这个方法进行增强。\n要想激活插件,只需要在 Release Notes 下载插件,放到您的 skywalking-agent/plugins 目录,重新启动服务即可。\n如果您的项目使用 sbt assembly 打包,您可以参考这个 示例。该项目使用了下列技术栈:\nlibraryDependencies ++= Seq( \u0026#34;io.d11\u0026#34; %% \u0026#34;zhttp\u0026#34; % zioHttp2Version, \u0026#34;dev.zio\u0026#34; %% \u0026#34;zio\u0026#34; % zioVersion, \u0026#34;io.grpc\u0026#34; % \u0026#34;grpc-netty\u0026#34; % \u0026#34;1.50.1\u0026#34;, \u0026#34;com.thesamet.scalapb\u0026#34; %% \u0026#34;scalapb-runtime-grpc\u0026#34; % scalapb.compiler.Version.scalapbVersion ) ++ Seq( \u0026#34;dev.profunktor\u0026#34; %% \u0026#34;redis4cats-effects\u0026#34; % \u0026#34;1.3.0\u0026#34;, \u0026#34;dev.profunktor\u0026#34; %% \u0026#34;redis4cats-log4cats\u0026#34; % \u0026#34;1.3.0\u0026#34;, \u0026#34;dev.profunktor\u0026#34; %% \u0026#34;redis4cats-streams\u0026#34; % \u0026#34;1.3.0\u0026#34;, \u0026#34;org.typelevel\u0026#34; %% \u0026#34;log4cats-slf4j\u0026#34; % \u0026#34;2.5.0\u0026#34;, \u0026#34;dev.zio\u0026#34; %% \u0026#34;zio-interop-cats\u0026#34; % \u0026#34;23.0.03\u0026#34;, \u0026#34;ch.qos.logback\u0026#34; % \u0026#34;logback-classic\u0026#34; % \u0026#34;1.2.11\u0026#34;, \u0026#34;dev.zio\u0026#34; %% \u0026#34;zio-cache\u0026#34; % zioCacheVersion ) ","excerpt":"背景介绍 在 Scala 中,纯函数式中主要使用 Fiber,而不是线程,诸如 Cats-Effect、ZIO 等 Effect 框架。 您可以将 Fiber 视为轻量级线程,它是一种并发模型,由框架 …","ref":"/zh/2024-01-04-skywalking-for-scala-effect-runtime/","title":"SkyWalking 如何支持 ZIO 等 Scala Effect Runtime"},{"body":"Xiang Wei(GitHub ID, weixiang1862) made a lot of significant contributions to SkyWalking since 2023. He made dozens of pull requests to multiple SkyWalking repositories, including very important features, such as Loki LogQL support, Nginx monitoring, MongoDB monitoring, as well as bug fixes, blog posts, and showcase updates.\nHere are the complete pull request list grouped by repositories.\nskywalking  Support Nginx monitoring. (https://github.com/apache/skywalking/pull/11558) Fix JDBC Log query order. (https://github.com/apache/skywalking/pull/11544) Isolate MAL CounterWindow cache by metric name.(https://github.com/apache/skywalking/pull/11526) Support extract timestamp from patterned datetime string in LAL.(https://github.com/apache/skywalking/pull/11489) Adjust AlarmRecord alarmMessage column length to 512. (https://github.com/apache/skywalking/pull/11404) Use listening mode for Apollo configuration.(https://github.com/apache/skywalking/pull/11186) Support LogQL HTTP query APIs. (https://github.com/apache/skywalking/pull/11168) Support MongoDB monitoring (https://github.com/apache/skywalking/pull/11111) Support reduce aggregate function in MQE.(https://github.com/apache/skywalking/pull/11036) Fix instance query in JDBC implementation.(https://github.com/apache/skywalking/pull/11024) Fix metric session cache saving after batch insert when using mysql-connector-java.(https://github.com/apache/skywalking/pull/11012) Add component ID for WebSphere.(https://github.com/apache/skywalking/pull/10974) Support sumLabeled in MAL (https://github.com/apache/skywalking/pull/10916)  skywalking-java  Optimize plugin selector logic.(https://github.com/apache/skywalking-java/pull/651) Fix config length limitation.(https://github.com/apache/skywalking-java/pull/623) Optimize spring-cloud-gateway 2.1.x, 3.x witness class.(https://github.com/apache/skywalking-java/pull/610) Add WebSphere Liberty 23.x plugin.(https://github.com/apache/skywalking-java/pull/560)  skywalking-swck  Remove SwAgent default env JAVA_TOOL_OPTIONS.(https://github.com/apache/skywalking-swck/pull/106) Fix panic in storage reconciler.(https://github.com/apache/skywalking-swck/pull/94) Support inject java agent bootstrap-plugins.(https://github.com/apache/skywalking-swck/pull/91) Fix number env value format error in template yaml.(https://github.com/apache/skywalking-swck/pull/90)  skywalking-showcase  Nginx monitoring showcase.(https://github.com/apache/skywalking-showcase/pull/153) LogQL showcase. (https://github.com/apache/skywalking-showcase/pull/146) MongoDB monitoring showcase. (https://github.com/apache/skywalking-showcase/pull/144)##  skywalking-website  Add blog: monitoring-nginx-by-skywalking.(https://github.com/apache/skywalking-website/pull/666) Add blog: collect and analyse nginx access log by LAL.(https://github.com/apache/skywalking-website/pull/652) Add blog: integrating-skywalking-with-arthas.(https://github.com/apache/skywalking-website/pull/641)   At Dec. 28th, 2023, the project management committee (PMC) passed the proposal of promoting him as a new committer. He has accepted the invitation at the same day.\nWelcome to join the committer team, Xiang Wei! We are honored to have you in the team.\n","excerpt":"Xiang Wei(GitHub ID, weixiang1862) made a lot of significant contributions to SkyWalking since 2023. …","ref":"/events/welcome-xiang-wei-as-new-committer/","title":"Welcome Xiang Wei as new committer"},{"body":"Background Apache SkyWalking is an open-source application performance management system that helps users collect and aggregate logs, traces, metrics, and events, and display them on the UI.\nIn order to achieve monitoring capabilities for Nginx, we have introduced the Nginx monitoring dashboard in SkyWalking 9.7, and this article will demonstrate the use of this monitoring dashboard and introduce the meaning of related metrics.\nSetup Monitoring Dashboard Metric Define and Collection Since nginx-lua-prometheus is used to define and expose metrics, we need to install lua_nginx_module for Nginx, or use OpenResty directly.\nIn the following example, we define four metrics via nginx-lua-prometheus and expose the metrics interface via nginx ip:9145/metrics:\n histogram: nginx_http_latency,monitoring http latency gauge: nginx_http_connections,monitoring nginx http connections counter: nginx_http_size_bytes,monitoring http size of request and response counter: nginx_http_requests_total,monitoring total http request numbers  http { log_format main '$remote_addr - $remote_user [$time_local] \u0026quot;$request\u0026quot; ' '$status $body_bytes_sent \u0026quot;$http_referer\u0026quot; ' '\u0026quot;$http_user_agent\u0026quot; \u0026quot;$http_x_forwarded_for\u0026quot;'; access_log /var/log/nginx/access.log main; lua_shared_dict prometheus_metrics 10M; # lua_package_path \u0026quot;/path/to/nginx-lua-prometheus/?.lua;;\u0026quot;; init_worker_by_lua_block { prometheus = require(\u0026quot;prometheus\u0026quot;).init(\u0026quot;prometheus_metrics\u0026quot;) metric_bytes = prometheus:counter( \u0026quot;nginx_http_size_bytes\u0026quot;, \u0026quot;Total size of HTTP\u0026quot;, {\u0026quot;type\u0026quot;, \u0026quot;route\u0026quot;}) metric_requests = prometheus:counter( \u0026quot;nginx_http_requests_total\u0026quot;, \u0026quot;Number of HTTP requests\u0026quot;, {\u0026quot;status\u0026quot;, \u0026quot;route\u0026quot;}) metric_latency = prometheus:histogram( \u0026quot;nginx_http_latency\u0026quot;, \u0026quot;HTTP request latency\u0026quot;, {\u0026quot;route\u0026quot;}) metric_connections = prometheus:gauge( \u0026quot;nginx_http_connections\u0026quot;, \u0026quot;Number of HTTP connections\u0026quot;, {\u0026quot;state\u0026quot;}) } server { listen 8080; location /test { default_type application/json; return 200 '{\u0026quot;code\u0026quot;: 200, \u0026quot;message\u0026quot;: \u0026quot;success\u0026quot;}'; log_by_lua_block { metric_bytes:inc(tonumber(ngx.var.request_length), {\u0026quot;request\u0026quot;, \u0026quot;/test/**\u0026quot;}) metric_bytes:inc(tonumber(ngx.var.bytes_send), {\u0026quot;response\u0026quot;, \u0026quot;/test/**\u0026quot;}) metric_requests:inc(1, {ngx.var.status, \u0026quot;/test/**\u0026quot;}) metric_latency:observe(tonumber(ngx.var.request_time), {\u0026quot;/test/**\u0026quot;}) } } } server { listen 9145; location /metrics { content_by_lua_block { metric_connections:set(ngx.var.connections_reading, {\u0026quot;reading\u0026quot;}) metric_connections:set(ngx.var.connections_waiting, {\u0026quot;waiting\u0026quot;}) metric_connections:set(ngx.var.connections_writing, {\u0026quot;writing\u0026quot;}) prometheus:collect() } } } } In the above example, we exposed the route-level metrics, and you can also choose to expose the host-level metrics according to the monitoring granularity:\nhttp { log_by_lua_block { metric_bytes:inc(tonumber(ngx.var.request_length), {\u0026quot;request\u0026quot;, ngx.var.host}) metric_bytes:inc(tonumber(ngx.var.bytes_send), {\u0026quot;response\u0026quot;, ngx.var.host}) metric_requests:inc(1, {ngx.var.status, ngx.var.host}) metric_latency:observe(tonumber(ngx.var.request_time), {ngx.var.host}) } } or upstream-level metrics:\nupstream backend { server ip:port; } server { location /test_upstream { proxy_pass http://backend; log_by_lua_block { metric_bytes:inc(tonumber(ngx.var.request_length), {\u0026quot;request\u0026quot;, \u0026quot;upstream/backend\u0026quot;}) metric_bytes:inc(tonumber(ngx.var.bytes_send), {\u0026quot;response\u0026quot;, \u0026quot;upstream/backend\u0026quot;}) metric_requests:inc(1, {ngx.var.status, \u0026quot;upstream/backend\u0026quot;}) metric_latency:observe(tonumber(ngx.var.request_time), {\u0026quot;upstream/backend\u0026quot;}) } } } After defining the metrics, we start nginx and opentelemetry-collector to collect the metrics and send them to the SkyWalking backend for analysis and storage.\nPlease ensure that job_name: 'nginx-monitoring', otherwise the reported data will be ignored by SkyWalking. If you have multiple Nginx instances, you can distinguish them using the service and service_instance_id labels:\nreceivers: prometheus: config: scrape_configs: - job_name: 'nginx-monitoring' scrape_interval: 5s metrics_path: \u0026quot;/metrics\u0026quot; static_configs: - targets: ['nginx:9145'] labels: service: nginx service_instance_id: nginx-instance processors: batch: exporters: otlp: endpoint: oap:11800 tls: insecure: true service: pipelines: metrics: receivers: - prometheus processors: - batch exporters: - otlp If everything goes well, you will see the metric data reported by Nginx under the gateway menu of the skywalking-ui:\nAccess \u0026amp; Error Log Collection SkyWalking Nginx monitoring provides log collection and error log analysis. We can use fluent-bit to collect and report access logs and error logs to SkyWalking for analysis and storage.\nFluent-bit configuration below defines the log collection directory as /var/log/nginx/. The access and error logs will be reported through rest port 12800 of oap after being processed by rewrite_access_log and rewrite_error_log functions:\n[SERVICE] Flush 5 Daemon Off Log_Level warn [INPUT] Name tail Tag access Path /var/log/nginx/access.log [INPUT] Name tail Tag error Path /var/log/nginx/error.log [FILTER] Name lua Match access Script fluent-bit-script.lua Call rewrite_access_log [FILTER] Name lua Match error Script fluent-bit-script.lua Call rewrite_error_log [OUTPUT] Name stdout Match * Format json [OUTPUT] Name http Match * Host oap Port 12800 URI /v3/logs Format json In the fluent-bit-script.lua, we use LOG_KIND tag to distinguish between access logs and error logs.\nTo associate with the metrics, please ensure that the values of service and serviceInstance are consistent with the metric collection definition in the previous section.\nfunction rewrite_access_log(tag, timestamp, record) local newRecord = {} newRecord[\u0026quot;layer\u0026quot;] = \u0026quot;NGINX\u0026quot; newRecord[\u0026quot;service\u0026quot;] = \u0026quot;nginx::nginx\u0026quot; newRecord[\u0026quot;serviceInstance\u0026quot;] = \u0026quot;nginx-instance\u0026quot; newRecord[\u0026quot;body\u0026quot;] = { text = { text = record.log } } newRecord[\u0026quot;tags\u0026quot;] = { data = {{ key = \u0026quot;LOG_KIND\u0026quot;, value = \u0026quot;NGINX_ACCESS_LOG\u0026quot;}}} return 1, timestamp, newRecord end function rewrite_error_log(tag, timestamp, record) local newRecord = {} newRecord[\u0026quot;layer\u0026quot;] = \u0026quot;NGINX\u0026quot; newRecord[\u0026quot;service\u0026quot;] = \u0026quot;nginx::nginx\u0026quot; newRecord[\u0026quot;serviceInstance\u0026quot;] = \u0026quot;nginx-instance\u0026quot; newRecord[\u0026quot;body\u0026quot;] = { text = { text = record.log } } newRecord[\u0026quot;tags\u0026quot;] = { data = {{ key = \u0026quot;LOG_KIND\u0026quot;, value = \u0026quot;NGINX_ERROR_LOG\u0026quot; }}} return 1, timestamp, newRecord end After starting fluent-it, we can see the collected log information in the Log tab of the monitoring panel:\nMeaning of Metrics    Metric Name Unit Description Data Source     HTTP Request Trend  The increment rate of HTTP requests nginx-lua-prometheus   HTTP Latency ms The increment rate of the latency of HTTP requests nginx-lua-prometheus   HTTP Bandwidth KB The increment rate of the bandwidth of HTTP requests nginx-lua-prometheus   HTTP Connections  The avg number of the connections nginx-lua-prometheus   HTTP Status Trend % The increment rate of the status of HTTP requests nginx-lua-prometheus   HTTP Status 4xx Percent % The percentage of 4xx status of HTTP requests nginx-lua-prometheus   HTTP Status 5xx Percent % The percentage of 4xx status of HTTP requests nginx-lua-prometheus   Error Log Count  The count of log level of nginx error.log fluent-bit    References  nginx-lua-prometheus fluent-bit-lua-filter skywalking-apisix-monitoring  ","excerpt":"Background Apache SkyWalking is an open-source application performance management system that helps …","ref":"/blog/2023-12-23-monitoring-nginx-by-skywalking/","title":"Monitoring Nginx with SkyWalking"},{"body":"背景介绍 在前面的 Blog 使用 LAL 收集并分析 Nginx access log 中,我们以 Nginx access log 为切入点, 演示了 SkyWalking LAL 的日志分析能力。\n为了实现对 Nginx 更全面的监控能力,我们在 SkyWalking 9.7 中引入了 Nginx 监控面板,本文将演示该监控面板的使用,并介绍相关指标的含义。\n监控面板接入 Metric 定义与采集 由于使用了 nginx-lua-prometheus 来定义及暴露指标, 我们需要为 Nginx 安装 lua_nginx_module, 或者直接使用OpenResty。\n下面的例子中,我们通过 nginx-lua-prometheus 定义了四个指标,并通过 ip:9145/metrics 暴露指标接口:\n histogram: nginx_http_latency,监控 http 延时 gauge: nginx_http_connections,监控 http 连接数 counter: nginx_http_size_bytes,监控 http 请求和响应大小 counter: nginx_http_requests_total,监控 http 请求次数  http { log_format main '$remote_addr - $remote_user [$time_local] \u0026quot;$request\u0026quot; ' '$status $body_bytes_sent \u0026quot;$http_referer\u0026quot; ' '\u0026quot;$http_user_agent\u0026quot; \u0026quot;$http_x_forwarded_for\u0026quot;'; access_log /var/log/nginx/access.log main; lua_shared_dict prometheus_metrics 10M; # lua_package_path \u0026quot;/path/to/nginx-lua-prometheus/?.lua;;\u0026quot;; init_worker_by_lua_block { prometheus = require(\u0026quot;prometheus\u0026quot;).init(\u0026quot;prometheus_metrics\u0026quot;) metric_bytes = prometheus:counter( \u0026quot;nginx_http_size_bytes\u0026quot;, \u0026quot;Total size of HTTP\u0026quot;, {\u0026quot;type\u0026quot;, \u0026quot;route\u0026quot;}) metric_requests = prometheus:counter( \u0026quot;nginx_http_requests_total\u0026quot;, \u0026quot;Number of HTTP requests\u0026quot;, {\u0026quot;status\u0026quot;, \u0026quot;route\u0026quot;}) metric_latency = prometheus:histogram( \u0026quot;nginx_http_latency\u0026quot;, \u0026quot;HTTP request latency\u0026quot;, {\u0026quot;route\u0026quot;}) metric_connections = prometheus:gauge( \u0026quot;nginx_http_connections\u0026quot;, \u0026quot;Number of HTTP connections\u0026quot;, {\u0026quot;state\u0026quot;}) } server { listen 8080; location /test { default_type application/json; return 200 '{\u0026quot;code\u0026quot;: 200, \u0026quot;message\u0026quot;: \u0026quot;success\u0026quot;}'; log_by_lua_block { metric_bytes:inc(tonumber(ngx.var.request_length), {\u0026quot;request\u0026quot;, \u0026quot;/test/**\u0026quot;}) metric_bytes:inc(tonumber(ngx.var.bytes_send), {\u0026quot;response\u0026quot;, \u0026quot;/test/**\u0026quot;}) metric_requests:inc(1, {ngx.var.status, \u0026quot;/test/**\u0026quot;}) metric_latency:observe(tonumber(ngx.var.request_time), {\u0026quot;/test/**\u0026quot;}) } } } server { listen 9145; location /metrics { content_by_lua_block { metric_connections:set(ngx.var.connections_reading, {\u0026quot;reading\u0026quot;}) metric_connections:set(ngx.var.connections_waiting, {\u0026quot;waiting\u0026quot;}) metric_connections:set(ngx.var.connections_writing, {\u0026quot;writing\u0026quot;}) prometheus:collect() } } } } 上面的例子中,我们暴露了 route 级别的指标,你也可以根据监控粒度的需要,选择暴露 host 指标:\nhttp { log_by_lua_block { metric_bytes:inc(tonumber(ngx.var.request_length), {\u0026quot;request\u0026quot;, ngx.var.host}) metric_bytes:inc(tonumber(ngx.var.bytes_send), {\u0026quot;response\u0026quot;, ngx.var.host}) metric_requests:inc(1, {ngx.var.status, ngx.var.host}) metric_latency:observe(tonumber(ngx.var.request_time), {ngx.var.host}) } } 或者 upstream 指标:\nupstream backend { server ip:port; } server { location /test_upstream { proxy_pass http://backend; log_by_lua_block { metric_bytes:inc(tonumber(ngx.var.request_length), {\u0026quot;request\u0026quot;, \u0026quot;upstream/backend\u0026quot;}) metric_bytes:inc(tonumber(ngx.var.bytes_send), {\u0026quot;response\u0026quot;, \u0026quot;upstream/backend\u0026quot;}) metric_requests:inc(1, {ngx.var.status, \u0026quot;upstream/backend\u0026quot;}) metric_latency:observe(tonumber(ngx.var.request_time), {\u0026quot;upstream/backend\u0026quot;}) } } } 完成指标定义后,我们启动 nginx 和 opentelemetry-collector,将指标采集到 SkyWalking 后端进行分析和存储。\n请确保job_name: 'nginx-monitoring',否则上报的数据将被 SkyWalking 忽略。如果你有多个 Nginx 实例,你可以通过service及service_instance_id这两个 label 进行区分:\nreceivers: prometheus: config: scrape_configs: - job_name: 'nginx-monitoring' scrape_interval: 5s metrics_path: \u0026quot;/metrics\u0026quot; static_configs: - targets: ['nginx:9145'] labels: service: nginx service_instance_id: nginx-instance processors: batch: exporters: otlp: endpoint: oap:11800 tls: insecure: true service: pipelines: metrics: receivers: - prometheus processors: - batch exporters: - otlp 如果一切顺利,你将在 skywalking-ui 的网关菜单下看到 nginx 上报的指标数据:\nAccess \u0026amp; Error Log 采集 SkyWalking Nginx 监控提供了日志采集及错误日志统计功能,我们可以借助 fluent-bit 采集并上报 access log、error log 给 SkyWalking 分析存储。\n下面 fluent-bit 配置定义了日志采集目录为/var/log/nginx/,access 和 error log 经过 rewrite_access_log 和 rewrite_error_log 处理后会通过 oap 12800 端口进行上报:\n[SERVICE] Flush 5 Daemon Off Log_Level warn [INPUT] Name tail Tag access Path /var/log/nginx/access.log [INPUT] Name tail Tag error Path /var/log/nginx/error.log [FILTER] Name lua Match access Script fluent-bit-script.lua Call rewrite_access_log [FILTER] Name lua Match error Script fluent-bit-script.lua Call rewrite_error_log [OUTPUT] Name stdout Match * Format json [OUTPUT] Name http Match * Host oap Port 12800 URI /v3/logs Format json 在 fluent-bit-script.lua 中,我们通过 LOG_KIND 来区分 access log 和 error log。\n为了能够关联上文采集的 metric,请确保 service 和 serviceInstance 值与上文中指标采集定义一致。\nfunction rewrite_access_log(tag, timestamp, record) local newRecord = {} newRecord[\u0026quot;layer\u0026quot;] = \u0026quot;NGINX\u0026quot; newRecord[\u0026quot;service\u0026quot;] = \u0026quot;nginx::nginx\u0026quot; newRecord[\u0026quot;serviceInstance\u0026quot;] = \u0026quot;nginx-instance\u0026quot; newRecord[\u0026quot;body\u0026quot;] = { text = { text = record.log } } newRecord[\u0026quot;tags\u0026quot;] = { data = {{ key = \u0026quot;LOG_KIND\u0026quot;, value = \u0026quot;NGINX_ACCESS_LOG\u0026quot;}}} return 1, timestamp, newRecord end function rewrite_error_log(tag, timestamp, record) local newRecord = {} newRecord[\u0026quot;layer\u0026quot;] = \u0026quot;NGINX\u0026quot; newRecord[\u0026quot;service\u0026quot;] = \u0026quot;nginx::nginx\u0026quot; newRecord[\u0026quot;serviceInstance\u0026quot;] = \u0026quot;nginx-instance\u0026quot; newRecord[\u0026quot;body\u0026quot;] = { text = { text = record.log } } newRecord[\u0026quot;tags\u0026quot;] = { data = {{ key = \u0026quot;LOG_KIND\u0026quot;, value = \u0026quot;NGINX_ERROR_LOG\u0026quot; }}} return 1, timestamp, newRecord end 启动 fluent-it 后,我们便可以在监控面板的 Log tab 看到采集到的日志信息:\n面板指标含义    面板名称 单位 指标含义 数据源     HTTP Request Trend  每秒钟平均请求数 nginx-lua-prometheus   HTTP Latency ms 平均响应延时 nginx-lua-prometheus   HTTP Bandwidth KB 请求响应流量 nginx-lua-prometheus   HTTP Connections  nginx http 连接数 nginx-lua-prometheus   HTTP Status Trend % 每分钟 http 状态码统计 nginx-lua-prometheus   HTTP Status 4xx Percent % 4xx状态码比例 nginx-lua-prometheus   HTTP Status 5xx Percent % 5xx状态码比例 nginx-lua-prometheus   Error Log Count  每分钟错误日志数统计 fluent-bit    参考文档  nginx-lua-prometheus fluent-bit-lua-filter skywalking-apisix-monitoring  ","excerpt":"背景介绍 在前面的 Blog 使用 LAL 收集并分析 Nginx access log 中,我们以 Nginx access log 为切入点, 演示了 SkyWalking LAL 的日志分析能 …","ref":"/zh/2023-12-23-monitoring-nginx-by-skywalking/","title":"使用 SkyWalking 监控 Nginx"},{"body":"🚀 Dive into the World of Cutting-Edge Technology with Apache\u0026rsquo;s Finest! 🌐 Join me today as we embark on an exhilarating journey with two of Apache\u0026rsquo;s most brilliant minds - Sheng Wu and Trista Pan. We\u0026rsquo;re exploring the realms of Apache SkyWalking and Apache ShardingSphere, two groundbreaking initiatives that are reshaping the landscape of open-source technology. 🌟\nIn this exclusive session, we delve deep into Apache SkyWalking - an innovative observability platform that\u0026rsquo;s revolutionizing how we monitor and manage distributed systems in the cloud. Witness firsthand how SkyWalking is empowering developers and organizations to gain unparalleled insights into their applications, ensuring performance, reliability, and efficient troubleshooting. 🛰️🔍\nBut there\u0026rsquo;s more! We\u0026rsquo;re also unveiling the secrets of Apache ShardingSphere, a dynamic distributed database ecosystem. Learn how ShardingSphere is making waves in the world of big data, offering scalable, high-performance solutions for data sharding, encryption, and more. This is your gateway to understanding how these technologies are pivotal in handling massive data sets across various industries. 🌐💾\nWhether you\u0026rsquo;re a developer, tech enthusiast, or just curious about the future of open-source technology, this is a conversation you don\u0026rsquo;t want to miss! Get ready to be inspired and informed as we unlock new possibilities and applications of Apache SkyWalking and ShardingSphere. 🚀🌟\nJoin us, and let\u0026rsquo;s decode the future together!\n  Please join and follow Josh\u0026rsquo;s 龙之春 Youtube Coffee + Software with Josh Long Channel to learn more about technology and open source from telanted engineers and industry leads.\n","excerpt":"🚀 Dive into the World of Cutting-Edge Technology with Apache\u0026rsquo;s Finest! 🌐 Join me today as we …","ref":"/blog/2023-12-04-coffee+software-with-josh-long/","title":"[Video] Coffee + Software with Josh Long - Apache SkyWalking with Sheng Wu and Apache ShardingSphere with Trista Pan"},{"body":"SkyWalking CLI 0.13.0 is released. Go to downloads page to find release tars.\nFeatures  Add the sub-command menu get for get the ui menu items by @mrproliu in https://github.com/apache/skywalking-cli/pull/187  Bug Fixes  Fix the record list query does not support new OAP versions (with major version number \u0026gt; 9).  ","excerpt":"SkyWalking CLI 0.13.0 is released. Go to downloads page to find release tars.\nFeatures  Add the …","ref":"/events/release-apache-skywalking-cli-0-13-0/","title":"Release Apache SkyWalking CLI 0.13.0"},{"body":"SkyWalking Java Agent 9.1.0 is released. Go to downloads page to find release tars. Changes by Version\n9.1.0  Fix hbase onConstruct NPE in the file configuration scenario Fix the issue of createSpan failure caused by invalid request URL in HttpClient 4.x/5.x plugin Optimize ElasticSearch 6.x 7.x plugin compatibility Fix an issue with the httpasyncclient component where the isError state is incorrect. Support customization for the length limitation of string configurations Add max length configurations in agent.config file for service_name and instance_name Optimize spring-cloud-gateway 2.1.x, 3.x witness class. Support report MongoDB instance info in Mongodb 4.x plugin. To compatible upper and lower case Oracle TNS url parse. Support collecting ZGC memory pool metrics. Require OAP 9.7.0 to support these new metrics. Upgrade netty-codec-http2 to 4.1.100.Final Add a netty-http 4.1.x plugin to trace HTTP requests. Fix Impala Jdbc URL (including schema without properties) parsing exception. Optimize byte-buddy type description performance. Add eclipse-temurin:21-jre as another base image. Bump byte-buddy to 1.14.9 for JDK21 support. Add JDK21 plugin tests for Spring 6. Bump Lombok to 1.18.30 to adopt JDK21 compiling. Fix PostgreSQL Jdbc URL parsing exception. Bump up grpc version. Optimize plugin selector logic.  Documentation  Fix JDK requirement in the compiling docs. Add JDK21 support in the compiling docs.  All issues and pull requests are here\n","excerpt":"SkyWalking Java Agent 9.1.0 is released. Go to downloads page to find release tars. Changes by …","ref":"/events/release-apache-skywalking-java-agent-9-1-0/","title":"Release Apache SkyWalking Java Agent 9.1.0"},{"body":"","excerpt":"","ref":"/tags/video/","title":"Video"},{"body":"SkyWalking 9.7.0 is released. Go to downloads page to find release tars.\nDark Mode The dafult style mode is changed to the dark mode, and light mode is still available.\nNew Design Log View A new design for the log view is currently available. Easier to locate the logs, and more space for the raw text.\nProject  Bump Java agent to 9.1-dev in the e2e tests. Bump up netty to 4.1.100. Update Groovy 3 to 4.0.15. Support packaging the project in JDK21. Compiler source and target remain in JDK11.  OAP Server  ElasticSearchClient: Add deleteById API. Fix Custom alarm rules are overwritten by \u0026lsquo;resource/alarm-settings.yml\u0026rsquo; Support Kafka Monitoring. Support Pulsar server and BookKeeper server Monitoring. [Breaking Change] Elasticsearch storage merge all management data indices into one index management, including ui_template,ui_menu,continuous_profiling_policy. Add a release mechanism for alarm windows when it is expired in case of OOM. Fix Zipkin trace receiver response: make the HTTP status code from 200 to 202. Update BanyanDB Java Client to 0.5.0. Fix getInstances query in the BanyanDB Metadata DAO. BanyanDBStorageClient: Add keepAliveProperty API. Fix table exists check in the JDBC Storage Plugin. Enhance extensibility of HTTP Server library. Adjust AlarmRecord alarmMessage column length to 512. Fix EventHookCallback build event: build the layer from Service's Layer. Fix AlarmCore doAlarm: catch exception for each callback to avoid interruption. Optimize queryBasicTraces in TraceQueryEsDAO. Fix WebhookCallback send incorrect messages, add catch exception for each callback HTTP Post. Fix AlarmRule expression validation: add labeled metrics mock data for check. Support collect ZGC memory pool metrics. Add a component ID for Netty-http (ID=151). Add a component ID for Fiber (ID=5021). BanyanDBStorageClient: Add define(Property property, PropertyStore.Strategy strategy) API. Correct the file format and fix typos in the filenames for monitoring Kafka\u0026rsquo;s e2e tests. Support extract timestamp from patterned datetime string in LAL. Support output key parameters in the booting logs. Fix cannot query zipkin traces with annotationQuery parameter in the JDBC related storage. Fix limit doesn\u0026rsquo;t work for findEndpoint API in ES storage. Isolate MAL CounterWindow cache by metric name. Fix JDBC Log query order. Change the DataCarrier IF_POSSIBLE strategy to use ArrayBlockingQueue implementation. Change the policy of the queue(DataCarrier) in the L1 metric aggregate worker to IF_POSSIBLE mode. Add self-observability metric metrics_aggregator_abandon to count the number of abandon metrics. Support Nginx monitoring. Fix BanyanDB Metadata Query: make query single instance/process return full tags to avoid NPE. Repleace go2sky E2E to GO agent. Replace Metrics v2 protocol with MQE in UI templates and E2E Test. Fix incorrect apisix metrics otel rules. Support Scratch The OAP Config Dump. Support increase/rate function in the MQE query language. Group service endpoints into _abandoned when endpoints have high cardinality.  UI  Add new menu for kafka monitoring. Fix independent widget duration. Fix the display height of the link tree structure. Replace the name by shortName on service widget. Refactor: update pagination style. No visualization style change. Apply MQE on K8s layer UI-templates. Fix icons display in trace tree diagram. Fix: update tooltip style to support multiple metrics scrolling view in a metrics graph. Add a new widget to show jvm memory pool detail. Fix: avoid querying data with empty parameters. Add a title and a description for trace segments. Add Netty icon for Netty HTTP plugin. Add Pulsar menu i18n files. Refactor Logs view. Implement the Dark Theme. Change UI templates for Text widgets. Add Nginx menu i18n. Fix the height for trace widget. Polish list style. Fix Log associate with Trace. Enhance layout for broken Topology widget. Fix calls metric with call type for Topology widget. Fix changing metrics config for Topology widget. Fix routes for Tab widget. Remove OpenFunction(FAAS layer) relative UI templates and menu item. Fix: change colors to match dark theme for Network Profiling. Remove the description of OpenFunction in the UI i18n. Reduce component chunks to improve page loading resource time.  Documentation  Separate storage docs to different files, and add an estimated timeline for BanyanDB(end of 2023). Add topology configuration in UI-Grafana doc. Add missing metrics to the OpenTelemetry Metrics doc. Polish docs of Concepts and Designs. Fix incorrect notes of slowCacheReadThreshold. Update OAP setup and cluster coordinator docs to explain new booting parameters table in the logs, and how to setup cluster mode.  All issues and pull requests are here\n","excerpt":"SkyWalking 9.7.0 is released. Go to downloads page to find release tars.\nDark Mode The dafult style …","ref":"/events/release-apache-skywalking-apm-9.7.0/","title":"Release Apache SkyWalking APM 9.7.0"},{"body":"","excerpt":"","ref":"/zh_tags/conference/","title":"Conference"},{"body":"SkyWalking Summit 2023 @ Shanghai 会议时间:2023年11月4日 全天 地点:上海大华虹桥假日酒店 赞助商:纵目科技,Tetrate\n会议议程 与 PDF SkyWalking V9 In 2023 - 5 featured releases  吴晟 PDF  B站视频地址\n使用 Terraform 与 Ansible 快速部署 SkyWalking 集群  柯振旭 PDF  B站视频地址\n基于SkyWalking构建全域一体化观测平台  陈修能 PDF  B站视频地址\n云原生可观测性数据库BanyanDB  高洪涛 PDF  B站视频地址\n基于 SkyWalking Agent 的性能剖析和实时诊断  陆家靖 PDF  B站视频地址\n太保科技-多云环境下Zabbix的运用实践  田川 PDF  B站视频地址\nKubeSphere 在可观测性领域的探索与实践  霍秉杰 PDF  B站视频地址\n大型跨国企业的微服务治理  张文杰 PDF  B站视频地址\n","excerpt":"SkyWalking Summit 2023 @ Shanghai 会议时间:2023年11月4日 全天 地点:上海大华虹桥假日酒店 赞助商:纵目科技,Tetrate\n会议议程 与 PDF …","ref":"/zh/2023-11-04-skywalking-summit-shanghai/","title":"SkyWalking Summit 2023 @ Shanghai 会议回顾"},{"body":"","excerpt":"","ref":"/zh_tags/video/","title":"Video"},{"body":"","excerpt":"","ref":"/zh_tags/","title":"Zh_tags"},{"body":"SkyWalking Infra E2E 1.3.0 is released. Go to downloads page to find release tars.\nFeatures  Support sha256enc and sha512enc encoding in verify case. Support hasPrefix and hasSuffix string verifier in verify case. Bump up kind to v0.14.0. Add a field kubeconfig to support running e2e test on an existing kubernetes cluster. Support non-fail-fast execution of test cases support verify cases concurrently Add .exe suffix to windows build artifact Export the kubeconfig path during executing the following steps Automatically pull images before loading into KinD Support outputting the result of \u0026lsquo;verify\u0026rsquo; in YAML format and only outputting the summary of the result of \u0026lsquo;verify\u0026rsquo; Make e2e test itself in github action Support outputting the summary of \u0026lsquo;verify\u0026rsquo; in YAML format Make e2e output summary with numeric information Add \u0026lsquo;subtractor\u0026rsquo; function  Improvements  Bump up GHA to avoid too many warnings Leverage the built-in cache in setup-go@v4 Add batchOutput config to reduce outputs Disable batch mode by default, add it to GHA and enable by default Improve GitHub Actions usability and speed by using composite actions' new feature Migrate deprecated GitHub Actions command to recommended ones Bump up kind to v0.14.0 Optimization of the output information of verification verifier: notEmpty should be able to handle nil Remove invalid configuration in GitHub Actions  Bug Fixes  Fix deprecation warnings Ignore cancel error when copying container logs  Documentation  Add a doc to introduce how to use e2e to test itself  Issues and PR  All issues are here All pull requests are here  ","excerpt":"SkyWalking Infra E2E 1.3.0 is released. Go to downloads page to find release tars.\nFeatures  Support …","ref":"/events/release-apache-skywalking-infra-e2e-1-3-0/","title":"Release Apache SkyWalking Infra E2E 1.3.0"},{"body":"","excerpt":"","ref":"/tags/ospp/","title":"OSPP"},{"body":"Aapche SkyWalking PMC 和 committer团队参加了\u0026quot;开源之夏 2023\u0026quot;活动,作为导师,共获得了9个官方赞助名额。最终对学生开放如下任务\n SkyWalking 支持 GraalVM Skywalking Infra E2E 自测试 监控Apache Pulsar 统一BanyanDB的查询计划和查询执行器 使用Helm部署BanyanDB 编写go agent的gRPC插件 监控Kafka 集成SkyWalking PHP到SkyWalking E2E 测试 在线黄金指标异常检测  经过3个月的开发,上游评审,PMC成员评议,PMC Chair复议,OSPP官方委员会评审多个步骤,现公布项目参与人员与最终结果\n通过评审项目(共6个) SkyWalking 支持 GraalVM  学生:张跃骎 学校:辽宁大学 本科 合并PR:11354 后续情况说明:GraalVM因为复杂的生态,替代的代码将被分离到SkyWalking GraalVM Distro, 相关讨论,请参见Issue 11518  Skywalking Infra E2E 自测试  学生:王子忱 学校:华中师范大学 本科 合并PR:115, 116, 117, 118, 119 后续情况说明:此特性已经包含在发行版skywalking-infra-e2e v1.3.0中  统一BanyanDB的查询计划和查询执行器  学生:曾家华 学校:电子科技大学 本科 合并PR:343  使用Helm部署BanyanDB  学生:黄友亮 学校:北京邮电大学 硕士研究生 合并PR:1 情况说明:因为BanyanDB Helm为新项目,学生承接了项目初始化、功能提交、自动化测试,发布准备等多项任务。所参与功能包含在skywalking-banyandb-helm v0.1.0中  编写go agent的gRPC插件  学生:胡宇腾 学校:西安邮电大学 合并PR:88, 94 后续情况说明:该学生在开源之夏相关项目外,完成了feature: add support for iris #99和Go agent APIs功能开发。并发表文章SkyWalking Go Toolkit Trace 详解以及英文译本Detailed explanation of SkyWalking Go Toolkit Trace  监控Kafka  学生:王竹 学校:美国东北大学 ( Northeastern University) 合并PR:11282, UI 318  未通过评审项目(3个) 下列项目因为质量无法达到社区要求,违规等原因,将被标定为失败。 注:在开源之夏中失败的项目,其Pull Reqeust可能因为符合社区功能要求,也被接受合并。\n监控Apache Pulsar  学生:孟祥迎 学校:重庆邮电大学 本科 合并PR:11339 失败原因:项目申请成员,作为ASF Pulsar项目的Committer,在担任Pulsar开源之夏项目导师期间,但依然申请了学生参与项目。属于违规行为。SkyWalking PMC审查了此行为并通报开源之夏组委会。开源之夏组委会依据活动规则取消其结项奖金。  集成SkyWalking PHP到SkyWalking E2E 测试  学生:罗文 学校:San Jose State University B.S. 合并PR:11330 失败原因:根据pull reqeust中的提交记录,SkyWalking PMC Chair审查了提交明细,学生参与代码数量大幅度小于导师的提交代码。并在考虑到这个项目难度以及明显低于SkyWalking 开源之夏项目的平均水平的情况下,通报给开源之夏组委会。经过组委会综合评定,项目不合格。  在线黄金指标异常检测  学生:黄颖 学校:同济大学 研究生 合并PR:无 失败原因:项目在进度延迟后实现较为简单且粗糙,并且没有提供算法评估结果和文档等。在 PR 开启后的为期一个月审核合并期间,学生并未能成功按预定计划改善实现的质量和文档。和导师以及 SkyWalking 社区缺少沟通。  结语 SkyWalking社区每年都有近10位PMC成员或Committer参与开源之夏中,帮助在校学生了解顶级开源项目、开源社区的运作方式。我们希望大家在每年经过3个月的时间,能够真正的帮助在校学生了解开源和参与开源。 因为,社区即使在考虑到学生能力的情况下,不会明显的降低pull request的接受标准。希望今后的学生,能够在早期,积极、主动和导师,社区其他成员保持高频率的沟通,对参与的项目有更深入、准确的了解。\n","excerpt":"Aapche SkyWalking PMC 和 committer团队参加了\u0026quot;开源之夏 2023\u0026quot;活动,作为导师,共获得了9个官方赞助名额。 …","ref":"/zh/2023-11-09-ospp-summary/","title":"开源之夏 2023 SkyWalking 社区项目情况公示"},{"body":"SkyWalking NodeJS 0.7.0 is released. Go to downloads page to find release tars.\n Add deadline config for trace request (#118)  ","excerpt":"SkyWalking NodeJS 0.7.0 is released. Go to downloads page to find release tars.\n Add deadline config …","ref":"/events/release-apache-skywalking-nodejs-0-7-0/","title":"Release Apache SkyWalking for NodeJS 0.7.0"},{"body":"","excerpt":"","ref":"/tags/lal/","title":"LAL"},{"body":"","excerpt":"","ref":"/tags/logging/","title":"Logging"},{"body":"背景介绍 Nginx access log 中包含了丰富的信息,例如:日志时间、状态码、响应时间、body 大小等。通过收集并分析 access log,我们可以实现对 Nginx 中接口状态的监控。\n在本案例中,将由 fluent-bit 收集 access log,并通过 HTTP 将日志信息发送给 SkyWalking OAP Server 进行进一步的分析。\n环境准备 实验需要的 Nginx 及 Fluent-bit 相关配置文件都被上传到了Github,有需要的读者可以自行 git clone 并通过 docker compose 启动,本文中将介绍配置文件中几个关键点。\nNginx日志格式配置 LAL 目前支持 JSON、YAML 及 REGEX 日志解析,为了方便获取到日志中的指标字段,我们将 Nginx 的日志格式定义为 JSON.\nhttp { ... ... log_format main '{\u0026quot;remote_addr\u0026quot;: \u0026quot;$remote_addr\u0026quot;,' '\u0026quot;remote_user\u0026quot;: \u0026quot;$remote_user\u0026quot;,' '\u0026quot;request\u0026quot;: \u0026quot;$request\u0026quot;,' '\u0026quot;time\u0026quot;: \u0026quot;$time_iso8601\u0026quot;,' '\u0026quot;status\u0026quot;: \u0026quot;$status\u0026quot;,' '\u0026quot;request_time\u0026quot;:\u0026quot;$request_time\u0026quot;,' '\u0026quot;body_bytes_sent\u0026quot;: \u0026quot;$body_bytes_sent\u0026quot;,' '\u0026quot;http_referer\u0026quot;: \u0026quot;$http_referer\u0026quot;,' '\u0026quot;http_user_agent\u0026quot;: \u0026quot;$http_user_agent\u0026quot;,' '\u0026quot;http_x_forwarded_for\u0026quot;: \u0026quot;$http_x_forwarded_for\u0026quot;}'; access_log /var/log/nginx/access.log main; ... ... } Fluent bit Filter 我们通过 Fluent bit 的 lua filter 进行日志格式的改写,将其调整为 SkyWalking 所需要的格式,record的各个字段含义如下:\n body:日志内容体 service:服务名称 serviceInstance:实例名称  function rewrite_body(tag, timestamp, record) local newRecord = {} newRecord[\u0026quot;body\u0026quot;] = { json = { json = record.log } } newRecord[\u0026quot;service\u0026quot;] = \u0026quot;nginx::nginx\u0026quot; newRecord[\u0026quot;serviceInstance\u0026quot;] = \u0026quot;localhost\u0026quot; return 1, timestamp, newRecord end OAP 日志分析 LAL定义 在 filter 中,我们通过条件判断,只处理 service=nginx::nginx 的服务,其他服务依旧走默认逻辑:\n第一步,使用 json 指令对日志进行解析,解析的结果会被存放到 parsed 字段中,通过 parsed 字段我们可以获取 json 日志中的字段信息。\n第二步,使用 timestamp 指令解析 parsed.time 并将其赋值给日志的 timestamp 字段,这里的 time 就是access log json 中的 time。\n第三步,使用 tag 指令给日志打上对应的标签,标签的值依然可以通过 parsed 字段获取。\n第四步,使用 metrics 指令从日志中提取出指标信息,我们共提取了四个指标:\n nginx_log_count:Nginx 每次请求都会生成一条 access log,该指标可以帮助我们统计 Nginx 当前的请求数。 nginx_request_time:access log 中会记录请求时间,该指标可以帮助我们统计上游接口的响应时长。 nginx_body_bytes_sent:body 大小指标可以帮助我们了解网关上的流量情况。 nginx_status_code:状态码指标可以实现对状态码的监控,如果出现异常上涨可以结合 alarm 进行告警。  rules:- name:defaultlayer:GENERALdsl:|filter { if (log.service == \u0026#34;nginx::nginx\u0026#34;) { json { abortOnFailure true }extractor {timestamp parsed.time as String, \u0026#34;yyyy-MM-dd\u0026#39;T\u0026#39;HH:mm:ssXXX\u0026#34;tag status:parsed.statustag remote_addr:parsed.remote_addrmetrics {timestamp log.timestamp as Longlabels service: log.service, instance:log.serviceInstancename \u0026#34;nginx_log_count\u0026#34;value 1}metrics {timestamp log.timestamp as Longlabels service: log.service, instance:log.serviceInstancename \u0026#34;nginx_request_time\u0026#34;value parsed.request_time as Double}metrics {timestamp log.timestamp as Longlabels service: log.service, instance:log.serviceInstancename \u0026#34;nginx_body_bytes_sent\u0026#34;value parsed.body_bytes_sent as Long}metrics {timestamp log.timestamp as Longlabels service: log.service, instance: log.serviceInstance, status:parsed.statusname \u0026#34;nginx_status_code\u0026#34;value 1}}}sink {}}经过 LAL 处理后,我们已经可以在日志面板看到日志信息了,接下来我们将对 LAL 中提取的指标进行进一步分析:\nMAL定义 在 MAL 中,我们可以对上一步 LAL 中提取的指标进行进一步的分析聚合,下面的例子里:\nnginx_log_count、nginx_request_time、nginx_status_code 使用 sum 聚合函数处理,并使用 SUM 方式 downsampling,\nnginx_request_time 使用 avg 聚合函数求平均值,默认使用 AVG 方式 downsampling。\n完成聚合分析后,SkyWalking Meter System 会完成对上述指标的持久化。\nexpSuffix:service([\u0026#39;service\u0026#39;], Layer.GENERAL)metricPrefix:nginxmetricsRules:- name:cpmexp:nginx_log_count.sum([\u0026#39;service\u0026#39;]).downsampling(SUM)- name:avg_request_timeexp:nginx_request_time.avg([\u0026#39;service\u0026#39;])- name:body_bytes_sent_countexp:nginx_body_bytes_sent.sum([\u0026#39;service\u0026#39;]).downsampling(SUM)- name:status_code_countexp:nginx_status_code.sum([\u0026#39;service\u0026#39;,\u0026#39;status\u0026#39;]).downsampling(SUM)最后,我们便可以来到 SkyWalking UI 页面新建 Nginx 仪表板,使用刚刚 MAL 中定义的指标信息创建 Nginx Dashboard(也可以通过上文提到仓库中的 dashboard.json 直接导入测试):\n参考文档  Fluent Bit lua Filter Log Analysis Language Meter Analysis Language  ","excerpt":"背景介绍 Nginx access log 中包含了丰富的信息,例如:日志时间、状态码、响应时间、body 大小等。通过收集并分析 access log,我们可以实现对 Nginx 中接口状态的监控。 …","ref":"/zh/2023-10-29-collect-and-analyse-nginx-accesslog-by-lal/","title":"使用 LAL 收集并分析 Nginx access log"},{"body":"SkyWalking BanyanDB 0.5.0 is released. Go to downloads page to find release tars.\nFeatures  List all properties in a group. Implement Write-ahead Logging Document the clustering. Support multiple roles for banyand server. Support for recovery buffer using wal. Register the node role to the metadata registry. Implement the remote queue to spreading data to data nodes. Fix parse environment variables error Implement the distributed query engine. Add mod revision check to write requests. Add TTL to the property. Implement node selector (e.g. PickFirst Selector, Maglev Selector). Unified the buffers separated in blocks to a single buffer in the shard.  Bugs  BanyanDB ui unable to load icon. BanyanDB ui type error Fix timer not released BanyanDB ui misses fields when creating a group Fix data duplicate writing Syncing metadata change events from etcd instead of a local channel.  Chores  Bump several dependencies and tools. Drop redundant \u0026ldquo;discovery\u0026rdquo; module from banyand. \u0026ldquo;metadata\u0026rdquo; module is enough to play the node and shard discovery role.  ","excerpt":"SkyWalking BanyanDB 0.5.0 is released. Go to downloads page to find release tars.\nFeatures  List all …","ref":"/events/release-apache-skywalking-banyandb-0-5-0/","title":"Release Apache SkyWalking BanyanDB 0.5.0"},{"body":"SkyWalking Go 0.3.0 is released. Go to downloads page to find release tars.\nFeatures  Support manual tracing APIs for users.  Plugins  Support mux HTTP server framework. Support grpc server and client framework. Support iris framework.  Documentation  Add Tracing APIs document into Manual APIs.  Bug Fixes Issues and PR  All issues are here All and pull requests are here  ","excerpt":"SkyWalking Go 0.3.0 is released. Go to downloads page to find release tars.\nFeatures  Support manual …","ref":"/events/release-apache-skwaylking-go-0.3.0/","title":"Release Apache SkyWalking Go 0.3.0"},{"body":"Background SkyWalking Go is an open-source, non-intrusive Golang agent used for monitoring, tracing, and data collection within distributed systems. It enables users to observe the flow and latency of requests within the system, collect performance data from various system components for performance monitoring, and troubleshoot issues by tracing the complete path of requests.\nIn version v0.3.0, Skywalking Go introduced the toolkit trace tool. Trace APIs allow users to include critical operations, functions, or services in the tracing scope in situations where plugins do not support them. This inclusion enables tracking and monitoring of these operations and can be used for fault analysis, diagnosis, and performance monitoring.\nBefore diving into this, you can learn how to use the Skywalking Go agent by referring to the SkyWalking Go Agent Quick Start Guide.\nThe following sections will explain how to use these interfaces in specific scenarios.\nIntroducing the Trace Toolkit Execute the following command in the project\u0026rsquo;s root directory:\ngo get github.com/apache/skywalking-go/toolkit To use the toolkit trace interface, you need to import the package into your project:\n\u0026#34;github.com/apache/skywalking-go/toolkit/trace\u0026#34; Manual Tracing A Span is the fundamental unit of an operation in Tracing. It represents an operation within a specific timeframe, such as a request, a function call, or a specific action. It records essential information about a particular operation, including start and end times, the operation\u0026rsquo;s name, tags (key-value pairs), and relationships between operations. Multiple Spans can form a hierarchical structure.\nIn situations where Skywalking-go doesn\u0026rsquo;t support a particular framework, users can manually create Spans to obtain tracing information.\n(Here, I have removed the supported frameworks for the sake of the example. These are only examples. You should reference this when using the APIs in private and/or unsupported frameworks)\nFor example, when you need to trace an HTTP response, you can create a span using trace.CreateEntrySpan() within the method handling the request, and end the span using trace.StopSpan() after processing. When sending an HTTP request, use trace.CreateExitSpan() to create a span, and end the span after the request returns.\nHere are two HTTP services named consumer and provider. When a user accesses the consumer service, it receives the user\u0026rsquo;s request internally and then accesses the provider to obtain resources.\n// consumer.go package main import ( \u0026#34;io\u0026#34; \u0026#34;net/http\u0026#34; _ \u0026#34;github.com/apache/skywalking-go\u0026#34; \u0026#34;github.com/apache/skywalking-go/toolkit/trace\u0026#34; ) func getProvider() (*http.Response, error) { // Create an HTTP request \treq, err := http.NewRequest(\u0026#34;GET\u0026#34;, \u0026#34;http://localhost:9998/provider\u0026#34;, http.NoBody) // Create an ExitSpan before sending the HTTP request. \ttrace.CreateExitSpan(\u0026#34;GET:/provider\u0026#34;, \u0026#34;localhost:9999\u0026#34;, func(headerKey, headerValue string) error { // Injector adds specific header information to the request. \treq.Header.Add(headerKey, headerValue) return nil }) // Finish the ExitSpan and ensure it executes when the function returns using defer. \tdefer trace.StopSpan() // Send the request. \tclient := \u0026amp;http.Client{} resp, err := client.Do(req) if err != nil { return nil, err } return resp, nil } func consumerHandler(w http.ResponseWriter, r *http.Request) { // Create an EntrySpan to trace the execution of the consumerHandler method. \ttrace.CreateEntrySpan(r.Method+\u0026#34;/consumer\u0026#34;, func(headerKey string) (string, error) { // Extractor retrieves the header information added to the request. \treturn r.Header.Get(headerKey), nil }) // Finish the EntrySpan. \tdefer trace.StopSpan() // Prepare to send an HTTP request. \tresp, err := getProvider() body, err := io.ReadAll(resp.Body) if err != nil { return } _, _ = w.Write(body) } func main() { http.HandleFunc(\u0026#34;/consumer\u0026#34;, consumerHandler) _ = http.ListenAndServe(\u0026#34;:9999\u0026#34;, nil) } // provider.go package main import ( \u0026#34;net/http\u0026#34; _ \u0026#34;github.com/apache/skywalking-go\u0026#34; \u0026#34;github.com/apache/skywalking-go/toolkit/trace\u0026#34; ) func providerHandler(w http.ResponseWriter, r *http.Request) { //Create an EntrySpan to trace the execution of the providerHandler method. \ttrace.CreateEntrySpan(\u0026#34;GET:/provider\u0026#34;, func(headerKey string) (string, error) { return r.Header.Get(headerKey), nil }) // Finish the EntrySpan. \tdefer trace.StopSpan() _, _ = w.Write([]byte(\u0026#34;success from provider\u0026#34;)) } func main() { http.HandleFunc(\u0026#34;/provider\u0026#34;, providerHandler) _ = http.ListenAndServe(\u0026#34;:9998\u0026#34;, nil) } Then, in the terminal, execute:\ngo build -toolexec=\u0026#34;/path/to/go-agent\u0026#34; -a -o consumer ./consumer.go ./consumer go build -toolexec=\u0026#34;/path/to/go-agent\u0026#34; -a -o provider ./provider.go ./provider curl 127.0.0.1:9999/consumer At this point, the UI will display the span information you created.\nIf you need to trace methods that are executed only locally, you can use trace.CreateLocalSpan(). If you don\u0026rsquo;t need to monitor information or states from the other end, you can change ExitSpan and EntrySpan to LocalSpan.\nThe usage examples provided are for illustration purposes, and users can decide the tracing granularity and where in the program they need tracing.\nPlease note that if a program ends too quickly, it may cause tracing data to be unable to be asynchronously sent to the SkyWalking backend.\nPopulate The Span When there\u0026rsquo;s a necessity to record additional information, including creating/updating tags, appending logs, and setting a new operation name of the current traced Span, these APIs should be considered. These actions are used to enhance trace information, providing a more detailed and precise contextual description, which aids in better understanding the events or operations being traced.\nToolkit trace APIs provide a convenient way to access and manipulate trace data, including:\n Setting Tags: SetTag() Adding Logs: AddLog() Setting Span Names: SetOperationName() Getting various IDs: GetTraceID(), GetSegmentID(), GetSpanID()  For example, if you need to record the HTTP status code in a span, you can use the following interfaces while the span is not yet finished:\ntrace.CreateExitSpan(\u0026#34;GET:/provider\u0026#34;, \u0026#34;localhost:9999\u0026#34;, func(headerKey, headerValue string) error { r.Header.Add(headerKey, headerValue) return nil }) resp, err := http.Get(\u0026#34;http://localhost:9999/provider\u0026#34;) trace.SetTag(\u0026#34;status_code\u0026#34;, fmt.Sprintf(\u0026#34;%d\u0026#34;, resp.StatusCode)) spanID := trace.GetSpanID() trace.StopSpan() It\u0026rsquo;s important to note that when making these method calls, the current thread should have an active span.\nAsync APIs Async APIs work for manipulating spans across Goroutines. These scenarios might include:\n Applications involving concurrency or multiple goroutines where operating on Spans across different execution contexts is necessary. Updating or logging information for a Span during asynchronous operations. Requiring a delayed completion of a Span.  To use it, follow these steps:\n Obtain the return value of CreateSpan, which is SpanRef. Call spanRef.PrepareAsync() to prepare for operations in another goroutine. When the current goroutine\u0026rsquo;s work is done, call trace.StopSpan() to end the span (affecting only in the current goroutine). Pass the spanRef to another goroutine. After the work is done in any goroutine, call spanRef.AsyncFinish().  Here\u0026rsquo;s an example:\nspanRef, err := trace.CreateLocalSpan(\u0026#34;LocalSpan\u0026#34;) if err != nil { return } spanRef.PrepareAsync() go func(){ // some work  spanRef.AsyncFinish() }() // some work trace.StopSpan() Correlation Context Correlation Context is used to pass parameters within a Span, and the parent Span will pass the Correlation Context to all its child Spans. It allows the transmission of information between spans across different applications. The default number of elements in the Correlation Context is 3, and the content\u0026rsquo;s length cannot exceed 128 bytes.\nCorrelation Context is commonly applied in the following scenarios:\n Passing Information Between Spans: It facilitates the transfer of critical information between different Spans, enabling upstream and downstream Spans to understand the correlation and context between each other. Passing Business Parameters: In business scenarios, it involves transmitting specific parameters or information between different Spans, such as authentication tokens, business transaction IDs, and more.  Users can set the Correlation Context using trace.SetCorrelation(key, value) and then retrieve the corresponding value in downstream spans using value := trace.GetCorrelation(key).\nFor example, in the code below, we store the value in the tag of the span, making it easier to observe the result:\npackage main import ( _ \u0026#34;github.com/apache/skywalking-go\u0026#34; \u0026#34;github.com/apache/skywalking-go/toolkit/trace\u0026#34; \u0026#34;net/http\u0026#34; ) func providerHandler(w http.ResponseWriter, r *http.Request) { ctxValue := trace.GetCorrelation(\u0026#34;key\u0026#34;) trace.SetTag(\u0026#34;result\u0026#34;, ctxValue) } func consumerHandler(w http.ResponseWriter, r *http.Request) { trace.SetCorrelation(\u0026#34;key\u0026#34;, \u0026#34;value\u0026#34;) _, err := http.Get(\u0026#34;http://localhost:9999/provider\u0026#34;) if err != nil { return } } func main() { http.HandleFunc(\u0026#34;/provider\u0026#34;, providerHandler) http.HandleFunc(\u0026#34;/consumer\u0026#34;, consumerHandler) _ = http.ListenAndServe(\u0026#34;:9999\u0026#34;, nil) } Then, in the terminal, execute:\nexport SW_AGENT_NAME=server go build -toolexec=\u0026#34;/path/to/go-agent\u0026#34; -a -o server ./server.go ./server curl 127.0.0.1:9999/consumer Finally, in the providerHandler() span, you will find the information from the Correlation Context:\nConclusion This article provides an overview of Skywalking Go\u0026rsquo;s Trace APIs and their practical application. These APIs empower users with the ability to customize tracing functionality according to their specific needs.\nFor detailed information about the interfaces, please refer to the documentation: Tracing APIs.\nWelcome everyone to try out the new version.\n","excerpt":"Background SkyWalking Go is an open-source, non-intrusive Golang agent used for monitoring, tracing, …","ref":"/blog/2023-10-18-skywalking-toolkit-trace/","title":"Detailed explanation of SkyWalking Go Toolkit Trace"},{"body":"背景介绍 SkyWalking Go是一个开源的非侵入式Golang代理程序,用于监控、追踪和在分布式系统中进行数据收集。它使用户能够观察系统内请求的流程和延迟,从各个系统组件收集性能数据以进行性能监控,并通过追踪请求的完整路径来解决问题。\n在版本v0.3.0中,Skywalking Go引入了 toolkit-trace 工具。Trace APIs 允许用户在插件不支持的情况下将关键操作、函数或服务添加到追踪范围。从而实现追踪和监控这些操作,并可用于故障分析、诊断和性能监控。\n在深入了解之前,您可以参考SkyWalking Go Agent快速开始指南来学习如何使用SkyWalking Go Agent。\n下面将会介绍如何在特定场景中使用这些接口。\n导入 Trace Toolkit 在项目的根目录中执行以下命令:\ngo get github.com/apache/skywalking-go/toolkit 使用 toolkit trace 接口前,需要将该包导入到您的项目中:\n\u0026#34;github.com/apache/skywalking-go/toolkit/trace\u0026#34; 手动追踪 Span 是 Tracing 中单个操作的基本单元。它代表在特定时间范围内的操作,比如一个请求、一个函数调用或特定动作。Span记录了特定操作的关键信息,包括开始和结束时间、操作名称、标签(键-值对)以及操作之间的关系。多个 Span 可以形成层次结构。\n在遇到 Skywalking Go 不支持的框架的情况下,用户可以手动创建 Span 以获取追踪信息。\n(为了作为示例,我删除了已支持的框架。以下仅为示例。请在使用私有或不支持的框架的 API 时参考)\n例如,当需要追踪HTTP响应时,可以在处理请求的方法内部使用 trace.CreateEntrySpan() 来创建一个 span,在处理完成后使用 trace.StopSpan() 来结束这个 span。在发送HTTP请求时,使用 trace.CreateExitSpan() 来创建一个 span,在请求返回后结束这个 span。\n这里有两个名为 consumer 和 provider 的HTTP服务。当用户访问 consumer 服务时,它在内部接收用户的请求,然后访问 provider 以获取资源。\n// consumer.go package main import ( \u0026#34;io\u0026#34; \u0026#34;net/http\u0026#34; _ \u0026#34;github.com/apache/skywalking-go\u0026#34; \u0026#34;github.com/apache/skywalking-go/toolkit/trace\u0026#34; ) func getProvider() (*http.Response, error) { // 新建 HTTP 请求 \treq, err := http.NewRequest(\u0026#34;GET\u0026#34;, \u0026#34;http://localhost:9998/provider\u0026#34;, http.NoBody) // 在发送 HTTP 请求之前创建 ExitSpan \ttrace.CreateExitSpan(\u0026#34;GET:/provider\u0026#34;, \u0026#34;localhost:9999\u0026#34;, func(headerKey, headerValue string) error { // Injector 向请求中添加特定的 header 信息 \treq.Header.Add(headerKey, headerValue) return nil }) // 结束 ExitSpan,使用 defer 确保在函数返回时执行 \tdefer trace.StopSpan() // 发送请求 \tclient := \u0026amp;http.Client{} resp, err := client.Do(req) if err != nil { return nil, err } return resp, nil } func consumerHandler(w http.ResponseWriter, r *http.Request) { // 创建 EntrySpan 来追踪 consumerHandler 方法的执行 \ttrace.CreateEntrySpan(r.Method+\u0026#34;/consumer\u0026#34;, func(headerKey string) (string, error) { // Extractor 获取请求中添加的 header 信息 \treturn r.Header.Get(headerKey), nil }) // 结束 EntrySpan \tdefer trace.StopSpan() // 准备发送 HTTP 请求 \tresp, err := getProvider() body, err := io.ReadAll(resp.Body) if err != nil { return } _, _ = w.Write(body) } func main() { http.HandleFunc(\u0026#34;/consumer\u0026#34;, consumerHandler) _ = http.ListenAndServe(\u0026#34;:9999\u0026#34;, nil) } // provider.go package main import ( \u0026#34;net/http\u0026#34; _ \u0026#34;github.com/apache/skywalking-go\u0026#34; \u0026#34;github.com/apache/skywalking-go/toolkit/trace\u0026#34; ) func providerHandler(w http.ResponseWriter, r *http.Request) { // 创建 EntrySpan 来追踪 providerHandler 方法的执行 \ttrace.CreateEntrySpan(\u0026#34;GET:/provider\u0026#34;, func(headerKey string) (string, error) { return r.Header.Get(headerKey), nil }) // 结束 EntrySpan \tdefer trace.StopSpan() _, _ = w.Write([]byte(\u0026#34;success from provider\u0026#34;)) } func main() { http.HandleFunc(\u0026#34;/provider\u0026#34;, providerHandler) _ = http.ListenAndServe(\u0026#34;:9998\u0026#34;, nil) } 然后中终端中执行:\ngo build -toolexec=\u0026#34;/path/to/go-agent\u0026#34; -a -o consumer ./consumer.go ./consumer go build -toolexec=\u0026#34;/path/to/go-agent\u0026#34; -a -o provider ./provider.go ./provider curl 127.0.0.1:9999/consumer 此时 UI 中将会显示你所创建的span信息\n如果需要追踪仅在本地执行的方法,可以使用 trace.CreateLocalSpan()。如果不需要监控来自另一端的信息或状态,可以将 ExitSpan 和 EntrySpan 更改为 LocalSpan。\n以上方法仅作为示例,用户可以决定追踪的粒度以及程序中需要进行追踪的位置。\n注意,如果程序结束得太快,可能会导致 Tracing 数据无法异步发送到 SkyWalking 后端。\n填充 Span 当需要记录额外信息时,包括创建/更新标签、追加日志和设置当前被追踪 Span 的新操作名称时,可以使用这些API。这些操作用于增强追踪信息,提供更详细的上下文描述,有助于更好地理解被追踪的事件或操作。\nToolkit trace APIs 提供了一种简便的方式来访问和操作 Trace 数据:\n 设置标签:SetTag() 添加日志:AddLog() 设置 Span 名称:SetOperationName() 获取各种ID:GetTraceID(), GetSegmentID(), GetSpanID()  例如,如果需要在一个 Span 中记录HTTP状态码,就可以在 Span 未结束时调用以下接口:\ntrace.CreateExitSpan(\u0026#34;GET:/provider\u0026#34;, \u0026#34;localhost:9999\u0026#34;, func(headerKey, headerValue string) error { r.Header.Add(headerKey, headerValue) return nil }) resp, err := http.Get(\u0026#34;http://localhost:9999/provider\u0026#34;) trace.SetTag(\u0026#34;status_code\u0026#34;, fmt.Sprintf(\u0026#34;%d\u0026#34;, resp.StatusCode)) spanID := trace.GetSpanID() trace.StopSpan() 在调用这些方法时,当前线程需要有正在活跃的 span。\n异步 APIs 异步API 用于跨 goroutines 操作 spans。包括以下情况:\n 包含多个 goroutines 的程序,需要在不同上下文中中操作 Span。 在异步操作时更新或记录 Span 的信息。 延迟结束 Span。  按照以下步骤使用:\n 获取 CreateSpan 的返回值 SpanRef。 调用 spanRef.PrepareAsync() ,准备在另一个 goroutine 中执行操作。 当前 goroutine 工作结束后,调用 trace.StopSpan() 结束该 span(仅影响当前 goroutine)。 将 spanRef 传递给另一个 goroutine。 完成工作后在任意 goroutine 中调用 spanRef.AsyncFinish()。  以下为示例:\nspanRef, err := trace.CreateLocalSpan(\u0026#34;LocalSpan\u0026#34;) if err != nil { return } spanRef.PrepareAsync() go func(){ // some work \tspanRef.AsyncFinish() }() // some work trace.StopSpan() Correlation Context Correlation Context 用于在 Span 间传递参数,父 Span 会把 Correlation Context 递给其所有子 Spans。它允许在不同应用程序的 spans 之间传输信息。Correlation Context 的默认元素个数为3,其内容长度不能超过128字节。\nCorrelation Context 通常用于以下等情况:\n 在 Spans 之间传递信息:它允许关键信息在不同 Span 之间传输,使上游和下游 Spans 能够获取彼此之间的关联和上下文。 传递业务参数:在业务场景中,涉及在不同 Span 之间传输特定参数或信息,如认证令牌、交易ID等。  用户可以使用 trace.SetCorrelation(key, value) 设置 Correlation Context ,并可以使用 value := trace.GetCorrelation(key) 在下游 spans 中获取相应的值。\n例如在下面的代码中,我们将值存储在 span 的标签中,以便观察结果:\npackage main import ( _ \u0026#34;github.com/apache/skywalking-go\u0026#34; \u0026#34;github.com/apache/skywalking-go/toolkit/trace\u0026#34; \u0026#34;net/http\u0026#34; ) func providerHandler(w http.ResponseWriter, r *http.Request) { ctxValue := trace.GetCorrelation(\u0026#34;key\u0026#34;) trace.SetTag(\u0026#34;result\u0026#34;, ctxValue) } func consumerHandler(w http.ResponseWriter, r *http.Request) { trace.SetCorrelation(\u0026#34;key\u0026#34;, \u0026#34;value\u0026#34;) _, err := http.Get(\u0026#34;http://localhost:9999/provider\u0026#34;) if err != nil { return } } func main() { http.HandleFunc(\u0026#34;/provider\u0026#34;, providerHandler) http.HandleFunc(\u0026#34;/consumer\u0026#34;, consumerHandler) _ = http.ListenAndServe(\u0026#34;:9999\u0026#34;, nil) } 然后在终端执行:\nexport SW_AGENT_NAME=server go build -toolexec=\u0026#34;/path/to/go-agent\u0026#34; -a -o server ./server.go ./server curl 127.0.0.1:9999/consumer 最后在 providerHandler() 的 Span 中找到了 Correlation Context 的信息:\n总结 本文讲述了Skywalking Go的 Trace APIs 及其应用。它为用户提供了自定义追踪的功能。\n更多关于该接口的介绍见文档:Tracing APIs。\n欢迎大家来使用新版本。\n","excerpt":"背景介绍 SkyWalking Go是一个开源的非侵入式Golang代理程序,用于监控、追踪和在分布式系统中进行数据收集。它使用户能够观察系统内请求的流程和延迟,从各个系统组件收集性能数据以进行性能监 …","ref":"/zh/2023-10-18-skywalking-toolkit-trace/","title":"SkyWalking Go Toolkit Trace 详解"},{"body":"","excerpt":"","ref":"/tags/agent/","title":"Agent"},{"body":"CommunityOverCode (原 ApacheCon) 是 Apache 软件基金会(ASF)的官方全球系列大会。自 1998 年以来\u0026ndash;在 ASF 成立之前 \u0026ndash; ApacheCon 已经吸引了各个层次的参与者,在 300 多个 Apache 项目及其不同的社区中探索 \u0026ldquo;明天的技术\u0026rdquo;。CommunityOverCode 通过动手实作、主题演讲、实际案例研究、培训、黑客松活动等方式,展示 Apache 项目的最新发展和新兴创新。\nCommunityOverCode 展示了无处不在的 Apache 项目的最新突破和 Apache 孵化器中即将到来的创新,以及开源开发和以 Apache 之道领导社区驱动的项目。与会者可以了解到独立于商业利益、企业偏见或推销话术之外的核心开源技术。\nSkyWalking的Golang自动探针实践 刘晗 分布式追踪技术在可观测领域尤为重要,促使各个语言的追踪探针的易用性获得了更多的关注。目前在golang语言探针方面大多为手动埋点探针,接入流程过于复杂,而且局限性很强。本次讨论的重点着重于简化golang语言探针的接入方式,创新性的使用了自动埋点技术,并且突破了很多框架中对于上下文信息的依赖限制。\nB站视频地址\nBanyanDB一个高扩展性的分布式追踪数据库 高洪涛 追踪数据是一种用于分析微服务系统性能和故障的重要数据源,它记录了系统中每个请求的调用链路和相关指标。随着微服务系统的规模和复杂度的增长,追踪数据的量级也呈指数级增长,给追踪数据的存储和查询带来了巨大的挑战。传统的关系型数据库或者时序数据库往往难以满足追踪数据的高效存储和灵活查询的需求。 BanyanDB是一个专为追踪数据而设计的分布式数据库,它具有高扩展性、高性能、高可用性和高灵活性的特点。BanyanDB采用了基于时间序列的分片策略,将追踪数据按照时间范围划分为多个分片,每个分片可以独立地进行存储、复制和负载均衡。BanyanDB还支持多维索引,可以根据不同的维度对追踪数据进行快速过滤和聚合。 在本次演讲中,我们将介绍BanyanDB的设计思想、架构和实现细节,以及它在实际场景中的应用和效果。我们也将展示BanyanDB与其他数据库的对比和优势,以及它未来的发展方向和计划。\nB站视频地址\n","excerpt":"CommunityOverCode (原 ApacheCon) 是 Apache 软件基金会(ASF)的官方全球系列大会。自 1998 年以来\u0026ndash;在 ASF 成立之前 \u0026ndash; …","ref":"/zh/2023-08-20-coc-asia-2023/","title":"CommunityOverCode Conference 2023 Asia"},{"body":"","excerpt":"","ref":"/tags/database/","title":"Database"},{"body":"","excerpt":"","ref":"/tags/golang/","title":"Golang"},{"body":"SkyWalking PHP 0.7.0 is released. Go to downloads page to find release tars.\nWhat\u0026rsquo;s Changed  Start 0.7.0 development. by @jmjoy in https://github.com/apache/skywalking-php/pull/90 Add more info for error log. by @jmjoy in https://github.com/apache/skywalking-php/pull/91 Fix amqplib and predis argument problems. by @jmjoy in https://github.com/apache/skywalking-php/pull/92 Add Memcache plugin. by @jmjoy in https://github.com/apache/skywalking-php/pull/93 Refactor mysqli plugin, support procedural api. by @jmjoy in https://github.com/apache/skywalking-php/pull/94 Fix target address in cross process header. by @jmjoy in https://github.com/apache/skywalking-php/pull/95 Release SkyWalking PHP 0.7.0 by @jmjoy in https://github.com/apache/skywalking-php/pull/96  Full Changelog: https://github.com/apache/skywalking-php/compare/v0.7.0...v0.7.0\nPECL https://pecl.php.net/package/skywalking_agent/0.7.0\n","excerpt":"SkyWalking PHP 0.7.0 is released. Go to downloads page to find release tars.\nWhat\u0026rsquo;s Changed …","ref":"/events/release-apache-skywalking-php-0-7-0/","title":"Release Apache SkyWalking PHP 0.7.0"},{"body":"SkyWalking BanyanDB Helm 0.1.0 is released. Go to downloads page to find release tars.\nFeatures  Deploy banyandb with standalone mode by Chart  ","excerpt":"SkyWalking BanyanDB Helm 0.1.0 is released. Go to downloads page to find release tars.\nFeatures …","ref":"/events/release-apache-skywalking-banyandb-helm-0-1-0/","title":"Release Apache SkyWalking BanyanDB Helm 0.1.0"},{"body":"","excerpt":"","ref":"/tags/arthas/","title":"Arthas"},{"body":"背景介绍 Arthas 是一款常用的 Java 诊断工具,我们可以在 SkyWalking 监控到服务异常后,通过 Arthas 进一步分析和诊断以快速定位问题。\n在 Arthas 实际使用中,通常由开发人员拷贝或者下载安装包到服务对应的VM或者容器中,attach 到对应的 Java 进程进行问题排查。这一过程不可避免的会造成服务器敏感运维信息的扩散, 而且在分秒必争的问题排查过程中,这些繁琐的操作无疑会浪费大量时间。\nSkyWalking Java Agent 伴随 Java 服务一起启动,并定期上报服务、实例信息给OAP Server。我们可以借助 SkyWalking Java Agent 的插件化能力,开发一个 Arthas 控制插件, 由该插件管理 Arthas 运行生命周期,通过页面化的方式,完成Arthas的启动与停止。最终实现效果可以参考下图:\n要完成上述功能,我们需要实现以下几个关键点:\n 开发 agent arthas-control-plugin,执行 arthas 的启动与停止命令 开发 oap arthas-controller-module ,下发控制命令给 arthas agent plugin 定制 skywalking-ui, 连接 arthas-tunnel-server,发送 arthas 命令并获取执行结果  以上各个模块之间的交互流程如下图所示:\nconnect disconnect 本文涉及的所有代码均已发布在 github skywalking-x-arthas 上,如有需要,大家可以自行下载代码测试。 文章后半部分将主要介绍代码逻辑及其中包含的SkyWalking扩展点。\nagent arthas-control-plugin 首先在 skywalking-java/apm-sniffer/apm-sdk-plugin 下创建一个 arthas-control-plugin, 该模块在打包后会成为 skywalking-agent/plugins 下的一个插件, 其目录结构如下:\narthas-control-plugin/ ├── pom.xml └── src └── main ├── java │ └── org │ └── apache │ └── skywalking │ └── apm │ └── plugin │ └── arthas │ ├── config │ │ └── ArthasConfig.java # 模块配置 │ ├── service │ │ └── CommandListener.java # boot service,监听 oap command │ └── util │ ├── ArthasCtl.java # 控制 arthas 的启动与停止 │ └── ProcessUtils.java ├── proto │ └── ArthasCommandService.proto # 与oap server通信的 grpc 协议定义 └── resources └── META-INF └── services # boot service spi service └── org.apache.skywalking.apm.agent.core.boot.BootService 16 directories, 7 files 在 ArthasConfig.java 中,我们定义了以下配置,这些参数将在 arthas 启动时传递。\n以下的配置可以通过 agent.config 文件、system prop、env variable指定。 关于 skywalking-agent 配置的初始化的具体流程,大家可以参考 SnifferConfigInitializer 。\npublic class ArthasConfig { public static class Plugin { @PluginConfig(root = ArthasConfig.class) public static class Arthas { // arthas 目录  public static String ARTHAS_HOME; // arthas 启动时连接的tunnel server  public static String TUNNEL_SERVER; // arthas 会话超时时间  public static Long SESSION_TIMEOUT; // 禁用的 arthas command  public static String DISABLED_COMMANDS; } } } 接着,我们看下 CommandListener.java 的实现,CommandListener 实现了 BootService 接口, 并通过 resources/META-INF/services 下的文件暴露给 ServiceLoader。\nBootService 的定义如下,共有prepare()、boot()、onComplete()、shutdown()几个方法,这几个方法分别对应插件生命周期的不同阶段。\npublic interface BootService { void prepare() throws Throwable; void boot() throws Throwable; void onComplete() throws Throwable; void shutdown() throws Throwable; default int priority() { return 0; } } 在 ServiceManager 类的 boot() 方法中, 定义了BootService 的 load 与启动流程,该方法 由SkyWalkingAgent 的 premain 调用,在主程序运行前完成初始化与启动:\npublic enum ServiceManager { INSTANCE; ... ... public void boot() { bootedServices = loadAllServices(); prepare(); startup(); onComplete(); } ... ... } 回到我们 CommandListener 的 boot 方法,该方法在 agent 启动之初定义了一个定时任务,这个定时任务会轮询 oap ,查询是否需要启动或者停止arthas:\npublic class CommandListener implements BootService, GRPCChannelListener { ... ... @Override public void boot() throws Throwable { getCommandFuture = Executors.newSingleThreadScheduledExecutor( new DefaultNamedThreadFactory(\u0026#34;CommandListener\u0026#34;) ).scheduleWithFixedDelay( new RunnableWithExceptionProtection( this::getCommand, t -\u0026gt; LOGGER.error(\u0026#34;get arthas command error.\u0026#34;, t) ), 0, 2, TimeUnit.SECONDS ); } ... ... } getCommand方法中定义了start、stop的处理逻辑,分别对应页面上的 connect 和 disconnect 操作。 这两个 command 有分别转给 ArthasCtl 的 startArthas 和 stopArthas 两个方法处理,用来控制 arthas 的启停。\n在 startArthas 方法中,启动arthas-core.jar 并使用 skywalking-agent 的 serviceName 和 instanceName 注册连接至配置文件中指定的arthas-tunnel-server。\nArthasCtl 逻辑参考自 Arthas 的 BootStrap.java ,由于不是本篇文章的重点,这里不再赘述,感兴趣的小伙伴可以自行查看。\nswitch (commandResponse.getCommand()) { case START: if (alreadyAttached()) { LOGGER.warn(\u0026#34;arthas already attached, no need start again\u0026#34;); return; } try { arthasTelnetPort = SocketUtils.findAvailableTcpPort(); ArthasCtl.startArthas(PidUtils.currentLongPid(), arthasTelnetPort); } catch (Exception e) { LOGGER.info(\u0026#34;error when start arthas\u0026#34;, e); } break; case STOP: if (!alreadyAttached()) { LOGGER.warn(\u0026#34;no arthas attached, no need to stop\u0026#34;); return; } try { ArthasCtl.stopArthas(arthasTelnetPort); arthasTelnetPort = null; } catch (Exception e) { LOGGER.info(\u0026#34;error when stop arthas\u0026#34;, e); } break; } 看完 arthas 的启动与停止控制逻辑,我们回到 CommandListener 的 statusChanged 方法, 由于要和 oap 通信,这里我们按照惯例监听 grpc channel 的状态,只有状态正常时才会执行上面的getCommand轮询。\npublic class CommandListener implements BootService, GRPCChannelListener { ... ... @Override public void statusChanged(final GRPCChannelStatus status) { if (GRPCChannelStatus.CONNECTED.equals(status)) { Object channel = ServiceManager.INSTANCE.findService(GRPCChannelManager.class).getChannel(); // DO NOT REMOVE Channel CAST, or it will throw `incompatible types: org.apache.skywalking.apm.dependencies.io.grpc.Channel  // cannot be converted to io.grpc.Channel` exception when compile due to agent core\u0026#39;s shade of grpc dependencies.  commandServiceBlockingStub = ArthasCommandServiceGrpc.newBlockingStub((Channel) channel); } else { commandServiceBlockingStub = null; } this.status = status; } ... ... } 上面的代码,细心的小伙伴可能会发现,getChannel() 的返回值被向上转型成了 Object, 而在下面的 newBlockingStub 方法中,又强制转成了 Channel。\n看似有点多此一举,其实不然,我们将这里的转型去掉,尝试编译就会收到下面的错误:\n[ERROR] Failed to execute goal org.apache.maven.plugins:maven-compiler-plugin:3.10.1:compile (default-compile) on project arthas-control-plugin: Compilation failure [ERROR] .../CommandListener.java:[59,103] 不兼容的类型: org.apache.skywalking.apm.dependencies.io.grpc.Channel无法转换为io.grpc.Channel 上面的错误提示 ServiceManager.INSTANCE.findService(GRPCChannelManager.class).getChannel() 的返回值类型是 org.apache.skywalking.apm.dependencies.io.grpc.Channel,无法被赋值给 io.grpc.Channel 引用。\n我们查看GRPCChannelManager的getChannel()方法代码会发现,方法定义的返回值明明是 io.grpc.Channel,为什么编译时会报上面的错误?\n其实这是skywalking-agent的一个小魔法,由于 agent-core 最终会被打包进 skywalking-agent.jar,启动时由系统类装载器(或者其他父级类装载器)直接装载, 为了防止所依赖的类库和被监控服务的类发生版本冲突,agent 核心代码在打包时使用了maven-shade-plugin, 该插件会在 maven package 阶段改变 grpc 依赖的包名, 我们在源代码里看到的是 io.grpc.Channel,其实在真正运行时已经被改成了 org.apache.skywalking.apm.dependencies.io.grpc.Channel,这便可解释上面编译报错的原因。\n除了grpc以外,其他一些 well-known 的 dependency 也会进行 shade 操作,详情大家可以参考 apm-agent-core pom.xml :\n\u0026lt;plugin\u0026gt; \u0026lt;artifactId\u0026gt;maven-shade-plugin\u0026lt;/artifactId\u0026gt; \u0026lt;executions\u0026gt; \u0026lt;execution\u0026gt; \u0026lt;phase\u0026gt;package\u0026lt;/phase\u0026gt; \u0026lt;goals\u0026gt; \u0026lt;goal\u0026gt;shade\u0026lt;/goal\u0026gt; \u0026lt;/goals\u0026gt; \u0026lt;configuration\u0026gt; ... ... \u0026lt;relocations\u0026gt; \u0026lt;relocation\u0026gt; \u0026lt;pattern\u0026gt;${shade.com.google.source}\u0026lt;/pattern\u0026gt; \u0026lt;shadedPattern\u0026gt;${shade.com.google.target}\u0026lt;/shadedPattern\u0026gt; \u0026lt;/relocation\u0026gt; \u0026lt;relocation\u0026gt; \u0026lt;pattern\u0026gt;${shade.io.grpc.source}\u0026lt;/pattern\u0026gt; \u0026lt;shadedPattern\u0026gt;${shade.io.grpc.target}\u0026lt;/shadedPattern\u0026gt; \u0026lt;/relocation\u0026gt; \u0026lt;relocation\u0026gt; \u0026lt;pattern\u0026gt;${shade.io.netty.source}\u0026lt;/pattern\u0026gt; \u0026lt;shadedPattern\u0026gt;${shade.io.netty.target}\u0026lt;/shadedPattern\u0026gt; \u0026lt;/relocation\u0026gt; \u0026lt;relocation\u0026gt; \u0026lt;pattern\u0026gt;${shade.io.opencensus.source}\u0026lt;/pattern\u0026gt; \u0026lt;shadedPattern\u0026gt;${shade.io.opencensus.target}\u0026lt;/shadedPattern\u0026gt; \u0026lt;/relocation\u0026gt; \u0026lt;relocation\u0026gt; \u0026lt;pattern\u0026gt;${shade.io.perfmark.source}\u0026lt;/pattern\u0026gt; \u0026lt;shadedPattern\u0026gt;${shade.io.perfmark.target}\u0026lt;/shadedPattern\u0026gt; \u0026lt;/relocation\u0026gt; \u0026lt;relocation\u0026gt; \u0026lt;pattern\u0026gt;${shade.org.slf4j.source}\u0026lt;/pattern\u0026gt; \u0026lt;shadedPattern\u0026gt;${shade.org.slf4j.target}\u0026lt;/shadedPattern\u0026gt; \u0026lt;/relocation\u0026gt; \u0026lt;/relocations\u0026gt; ... ... \u0026lt;/configuration\u0026gt; \u0026lt;/execution\u0026gt; \u0026lt;/executions\u0026gt; \u0026lt;/plugin\u0026gt; 除了上面的注意点以外,我们来看一下另一个场景,假设我们需要在 agent plugin 的 interceptor 中使用 plugin 中定义的 BootService 会发生什么?\n我们回到 BootService 的加载逻辑,为了加载到 plugin 中定义的BootService,ServiceLoader 指定了类装载器为AgentClassLoader.getDefault(), (这行代码历史非常悠久,可以追溯到2018年:Allow use SkyWalking plugin to override service in Agent core. #1111 ), 由此可见,plugin 中定义的 BootService 的 classloader 是 AgentClassLoader.getDefault():\nvoid load(List\u0026lt;BootService\u0026gt; allServices) { for (final BootService bootService : ServiceLoader.load(BootService.class, AgentClassLoader.getDefault())) { allServices.add(bootService); } } 再来看下 interceptor 的加载逻辑,InterceptorInstanceLoader.java 的 load 方法规定了如果父加载器相同,plugin 中的 interceptor 将使用一个新创建的 AgentClassLoader (在绝大部分简单场景中,plugin 的 interceptor 都由同一个 AgentClassLoader 加载):\npublic static \u0026lt;T\u0026gt; T load(String className, ClassLoader targetClassLoader) throws IllegalAccessException, InstantiationException, ClassNotFoundException, AgentPackageNotFoundException { ... ... pluginLoader = EXTEND_PLUGIN_CLASSLOADERS.get(targetClassLoader); if (pluginLoader == null) { pluginLoader = new AgentClassLoader(targetClassLoader); EXTEND_PLUGIN_CLASSLOADERS.put(targetClassLoader, pluginLoader); } ... ... } 按照类装载器的委派机制,interceptor 中如果用到了 BootService,也会由当前的类的装载器去装载。 所以 ServiceManager 中装载的 BootService 和 interceptor 装载的 BootService 并不是同一个 (一个 class 文件被不同的 classloader 装载了两次),如果在 interceptor 中 调用 BootService 方法,同样会发生 cast 异常。 由此可见,目前的实现并不支持我们在interceptor中直接调用 plugin 中 BootService 的方法,如果需要调用,只能将 BootService 放到 agent-core 中,由更高级别的类装载器优先装载。\n这其实并不是 skywalking-agent 的问题,skywalking agent plugin 专注于自己的应用场景,只需要关注 trace、meter 以及默认 BootService 的覆盖就可以了。 只是我们如果有扩展 skywalking-agent 的需求,要对其类装载机制做到心中有数,否则可能会出现一些意想不到的问题。\noap arthas-controller-module 看完 agent-plugin 的实现,我们再来看看 oap 部分的修改,oap 同样是模块化的设计,我们可以很轻松的增加一个新的模块,在 /oap-server/ 目录下新建 arthas-controller 子模块:\narthas-controller/ ├── pom.xml └── src └── main ├── java │ └── org │ └── apache │ └── skywalking │ └── oap │ └── arthas │ ├── ArthasControllerModule.java # 模块定义 │ ├── ArthasControllerProvider.java # 模块逻辑实现者 │ ├── CommandQueue.java │ └── handler │ ├── CommandGrpcHandler.java # grpc handler,供 plugin 通信使用 │ └── CommandRestHandler.java # http handler,供 skywalking-ui 通信使用 ├── proto │ └── ArthasCommandService.proto └── resources └── META-INF └── services # 模块及模块实现的 spi service ├── org.apache.skywalking.oap.server.library.module.ModuleDefine └── org.apache.skywalking.oap.server.library.module.ModuleProvider 模块的定义非常简单,只包含一个模块名,由于我们新增的模块并不需要暴露service给其他模块调用,services 我们返回一个空数组\npublic class ArthasControllerModule extends ModuleDefine { public static final String NAME = \u0026#34;arthas-controller\u0026#34;; public ArthasControllerModule() { super(NAME); } @Override public Class\u0026lt;?\u0026gt;[] services() { return new Class[0]; } } 接着是模块实现者,实现者取名为 default,module 指定该 provider 所属模块,由于没有模块的自定义配置,newConfigCreator 我们返回null即可。 start 方法分别向 CoreModule 的 grpc 服务和 http 服务注册了两个 handler,grpc 服务和 http 服务就是我们熟知的 11800 和 12800 端口:\npublic class ArthasControllerProvider extends ModuleProvider { @Override public String name() { return \u0026#34;default\u0026#34;; } @Override public Class\u0026lt;? extends ModuleDefine\u0026gt; module() { return ArthasControllerModule.class; } @Override public ConfigCreator\u0026lt;?\u0026gt; newConfigCreator() { return null; } @Override public void prepare() throws ServiceNotProvidedException { } @Override public void start() throws ServiceNotProvidedException, ModuleStartException { // grpc service for agent  GRPCHandlerRegister grpcService = getManager().find(CoreModule.NAME) .provider() .getService(GRPCHandlerRegister.class); grpcService.addHandler( new CommandGrpcHandler() ); // rest service for ui  HTTPHandlerRegister restService = getManager().find(CoreModule.NAME) .provider() .getService(HTTPHandlerRegister.class); restService.addHandler( new CommandRestHandler(), Collections.singletonList(HttpMethod.POST) ); } @Override public void notifyAfterCompleted() throws ServiceNotProvidedException { } @Override public String[] requiredModules() { return new String[0]; } } 最后在配置文件中注册本模块及模块实现者,下面的配置表示 arthas-controller 这个 module 由 default provider 提供实现:\narthas-controller:selector:defaultdefault:CommandGrpcHandler 和 CommandHttpHandler 的逻辑非常简单,CommandHttpHandler 定义了 connect 和 disconnect 接口, 收到请求后会放到一个 Queue 中供 CommandGrpcHandler 消费,Queue 的实现如下,这里不再赘述:\npublic class CommandQueue { private static final Map\u0026lt;String, Command\u0026gt; COMMANDS = new ConcurrentHashMap\u0026lt;\u0026gt;(); // produce by connect、disconnect public static void produceCommand(String serviceName, String instanceName, Command command) { COMMANDS.put(serviceName + instanceName, command); } // consume by agent getCommand task public static Optional\u0026lt;Command\u0026gt; consumeCommand(String serviceName, String instanceName) { return Optional.ofNullable(COMMANDS.remove(serviceName + instanceName)); } } skywalking-ui arthas console 完成了 agent 和 oap 的开发,我们再看下 ui 部分:\n connect:调用oap server connect 接口,并连接 arthas-tunnel-server disconnect:调用oap server disconnect 接口,并与 arthas-tunnel-server 断开连接 arthas 命令交互,这部分代码主要参考 arthas,大家可以查看 web-ui console 的实现  修改完skywalking-ui的代码后,我们可以直接通过 npm run dev 测试了。\n如果需要通过主项目打包,别忘了在apm-webapp 的 ApplicationStartUp.java 类中添加一条 arthas 的路由:\nServer .builder() .port(port, SessionProtocol.HTTP) .service(\u0026#34;/arthas\u0026#34;, oap) .service(\u0026#34;/graphql\u0026#34;, oap) .service(\u0026#34;/internal/l7check\u0026#34;, HealthCheckService.of()) .service(\u0026#34;/zipkin/config.json\u0026#34;, zipkin) .serviceUnder(\u0026#34;/zipkin/api\u0026#34;, zipkin) .serviceUnder(\u0026#34;/zipkin\u0026#34;, FileService.of( ApplicationStartUp.class.getClassLoader(), \u0026#34;/zipkin-lens\u0026#34;) .orElse(zipkinIndexPage)) .serviceUnder(\u0026#34;/\u0026#34;, FileService.of( ApplicationStartUp.class.getClassLoader(), \u0026#34;/public\u0026#34;) .orElse(indexPage)) .build() .start() .join(); 总结  BootService 启动及停止流程 如何利用 BootService 实现自定义逻辑 Agent Plugin 的类装载机制 maven-shade-plugin 的使用与注意点 如何利用 ModuleDefine 与 ModuleProvider 定义新的模块 如何向 GRPC、HTTP Service 添加新的 handler  如果你还有任何的疑问,欢迎大家与我交流 。\n","excerpt":"背景介绍 Arthas 是一款常用的 Java 诊断工具,我们可以在 SkyWalking 监控到服务异常后,通过 Arthas 进一步分析和诊断以快速定位问题。\n在 Arthas 实际使用中,通常由 …","ref":"/zh/2023-09-17-integrating-skywalking-with-arthas/","title":"将 Apache SkyWalking 与 Arthas 集成"},{"body":"SkyWalking Eyes 0.5.0 is released. Go to downloads page to find release tars.\n feat(header templates): add support for AGPL-3.0 by @elijaholmos in https://github.com/apache/skywalking-eyes/pull/125 Upgrade go version to 1.18 by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/126 Add MulanPSL-2.0 support. by @jmjoy in https://github.com/apache/skywalking-eyes/pull/127 New Header Template: GPL-3.0-or-later by @ddlees in https://github.com/apache/skywalking-eyes/pull/128 Update README.md by @rovast in https://github.com/apache/skywalking-eyes/pull/129 Add more .env.[mode] support for VueJS project by @rovast in https://github.com/apache/skywalking-eyes/pull/130 Docker Multiple Architecture Support :fixes#9089 by @mohammedtabish0 in https://github.com/apache/skywalking-eyes/pull/132 Polish maven test for convenient debug by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/134 feat: list files by git when possible by @tisonkun in https://github.com/apache/skywalking-eyes/pull/133 Switch to npm ci for reliable builds by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/135 Fix optional dependencies are not excluded by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/136 Fix exclude not work for transitive dependencies and add recursive config by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/137 Add some tests for maven resovler by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/138 feat(header-fix): add Svelte support by @elijaholmos in https://github.com/apache/skywalking-eyes/pull/139 dep: do not write license files if they already exist by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/140 fix: not ignore *.txt to make sure files like CMakeLists.txt can be checked by @acelyc111 in https://github.com/apache/skywalking-eyes/pull/141 fix license header normalizer by @xiaoyawei in https://github.com/apache/skywalking-eyes/pull/142 Substitute variables in license content for header command by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/143 Correct indent in Apache-2.0 template by @tisonkun in https://github.com/apache/skywalking-eyes/pull/144 Add copyright-year configuration by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/145 dep/maven: use output file to store the dep tree for cleaner result by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/146 dep/maven: resolve dependencies before analysis by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/147 gha: switch to composite running mode and set up cache by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/149 gha: switch to composite running mode and set up cache by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/150 Fix GitHub Actions wrong path by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/151 Normalize license for cargo. by @jmjoy in https://github.com/apache/skywalking-eyes/pull/153 Remove space characters in license for cargo. by @jmjoy in https://github.com/apache/skywalking-eyes/pull/154 Bump up dependencies to fix CVE by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/155 Bump up GHA to depress warnings by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/156 Leverage the built-in cache in setup-go@v4 by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/157 Dependencies check should report unknown licneses by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/158 Fix wrong indentation in doc by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/159 Add EPL-2.0 header template by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/160 Fix wrong indentation in doc about multi license config by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/161 dependency resolve with default template and specified output of license by @crholm in https://github.com/apache/skywalking-eyes/pull/163 Bump up go git to support .gitconfig user path by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/164 Draft release notes for 0.5.0 by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/165 Remove \u0026ldquo;portions copyright\u0026rdquo; header normalizer by @antgamdia in https://github.com/apache/skywalking-eyes/pull/166  Full Changelog: https://github.com/apache/skywalking-eyes/compare/v0.4.0...v0.5.0\n","excerpt":"SkyWalking Eyes 0.5.0 is released. Go to downloads page to find release tars.\n feat(header …","ref":"/events/release-apache-skywalking-eyes-0-5-0/","title":"Release Apache SkyWalking Eyes 0.5.0"},{"body":"Abstract Apache SkyWalking hosts SkyWalking Summit 2023 on Nov. 4th, 2023, UTC+8, sponsored by ZMOps and Tetrate.\nWe are going to share SkyWalking\u0026rsquo;s roadmap, features, product experiences, and open-source culture.\nWelcome to join us.\nVenue Addr./地址 上海大华虹桥假日酒店\nDate 8:00 - 17:00, Nov 4th.\nRegister Register for IN-PERSON ticket\nCall For Proposals (CFP) The Call For Proposals open from now to 18:00 on Oct. 27th 2023, UTC+8. Submit your proposal at here\nWe have 1 open session and 8 sessions for the whole event.\n Open session is reserved for SkyWalking PMC members. 6 sessions are opened for CFP process. 2 sessions are reserved for sponsors.  Sponsors  ZMOps Inc. Tetrate Inc.  Anti-harassment policy SkyWalkingDay is dedicated to providing a harassment-free experience for everyone. We do not tolerate harassment of participants in any form. Sexual language and imagery will also not be tolerated in any event venue. Participants violating these rules may be sanctioned or expelled without a refund, at the discretion of the event organizers. Our anti-harassment policy can be found at Apache website.\nContact Us Send mail to dev@skywalking.apache.org.\n","excerpt":"Abstract Apache SkyWalking hosts SkyWalking Summit 2023 on Nov. 4th, 2023, UTC+8, sponsored by ZMOps …","ref":"/events/summit-23-cn/","title":"SkyWalking Summit 2023 @ Shanghai China"},{"body":"SkyWalking 9.6.0 is released. Go to downloads page to find release tars.\nNew Alerting Kernel  MQE(Metrics Query Expression) and a new notification mechanism are supported.  Support Loki LogQL  Newly added support for Loki LogQL and Grafana Loki Dashboard for SkyWalking collected logs  WARNING  ElasticSearch 6 storage relative tests are removed. It worked and is not promised due to end of life officially.  Project  Bump up Guava to 32.0.1 to avoid the lib listed as vulnerable due to CVE-2020-8908. This API is never used. Maven artifact skywalking-log-recevier-plugin is renamed to skywalking-log-receiver-plugin. Bump up cli version 0.11 to 0.12. Bump up the version of ASF parent pom to v30. Make builds reproducible for automatic releases CI.  OAP Server  Add Neo4j component ID(112) language: Python. Add Istio ServiceEntry registry to resolve unknown IPs in ALS. Wrap deleteProperty API to the BanyanDBStorageClient. [Breaking change] Remove matchedCounter from HttpUriRecognitionService#feedRawData. Remove patterns from HttpUriRecognitionService#feedRawData and add max 10 candidates of raw URIs for each pattern. Add component ID for WebSphere. Fix AI Pipeline uri caching NullPointer and IllegalArgument Exceptions. Fix NPE in metrics query when the metric is not exist. Remove E2E tests for Istio \u0026lt; 1.15, ElasticSearch \u0026lt; 7.16.3, they might still work but are not supported as planed. Scroll all results in ElasticSearch storage and refactor scrolling logics, including Service, Instance, Endpoint, Process, etc. Improve Kubernetes coordinator to remove Terminating OAP Pods in cluster. Support SW_CORE_SYNC_PERIOD_HTTP_URI_RECOGNITION_PATTERN and SW_CORE_TRAINING_PERIOD_HTTP_URI_RECOGNITION_PATTERN to control the period of training and sync HTTP URI recognition patterns. And shorten the default period to 10s for sync and 60s for training. Fix ElasticSearch scroller bug. Add component ID for Aerospike(ID=149). Packages with name recevier are renamed to receiver. BanyanDBMetricsDAO handles storeIDTag in multiGet for BanyanDBModelExtension. Fix endpoint grouping-related logic and enhance the performance of PatternTree retrieval. Fix metric session cache saving after batch insert when using mysql-connector-java. Support dynamic UI menu query. Add comment for docker/.env to explain the usage. Fix wrong environment variable name SW_OTEL_RECEIVER_ENABLED_OTEL_RULES to right SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES. Fix instance query in JDBC implementation. Set the SW_QUERY_MAX_QUERY_COMPLEXITY default value to 3000(was 1000). Accept length=4000 parameter value of the event. It was 2000. Tolerate parameter value in illegal JSON format. Update BanyanDB Java Client to 0.4.0 Support aggregate Labeled Value Metrics in MQE. [Breaking change] Change the default label name in MQE from label to _. Bump up grpc version to 1.53.0. [Breaking change] Removed \u0026lsquo;\u0026amp;\u0026rsquo; symbols from shell scripts to avoid OAP server process running as a background process. Revert part of #10616 to fix the unexpected changes: if there is no data we should return an array with 0s, but in #10616, an empty array is returned. Cache all service entity in memory for query. Bump up jackson version to 2.15.2. Increase the default memory size to avoid OOM. Bump up graphql-java to 21.0. Add Echo component ID(5015) language: Golang. Fix index out of bounds exception in aggregate_labels MQE function. Support MongoDB Server/Cluster monitoring powered by OTEL. Do not print configurations values in logs to avoid sensitive info leaked. Move created the latest index before retrieval indexes by aliases to avoid the 404 exception. This just prevents some interference from manual operations. Add more Go VM metrics, as new skywalking-go agent provided since its 0.2 release. Add component ID for Lock (ID=5016). [Breaking change] Adjust the structure of hooks in the alarm-settings.yml. Support multiple configs for each hook types and specifying the hooks in the alarm rule. Bump up Armeria to 1.24.3. Fix BooleanMatch and BooleanNotEqualMatch doing Boolean comparison. Support LogQL HTTP query APIs. Add Mux Server component ID(5017) language: Golang. Remove ElasticSearch 6.3.2 from our client lib tests. Bump up ElasticSearch server 8.8.1 to 8.9.0 for latest e2e testing. 8.1.0, 7.16.3 and 7.17.10 are still tested. Add OpenSearch 2.8.0 to our client lib tests. Use listening mode for apollo implementation of dynamic configuration. Add view_as_seq function in MQE for listing metrics in the given prioritized sequence. Fix the wrong default value of k8sServiceNameRule if it\u0026rsquo;s not explicitly set. Improve PromQL to allow for multiple metric operations within a single query. Fix MQE Binary Operation between labeled metrics and other type of value result. Add component ID for Nacos (ID=150). Support Compare Operation in MQE. Fix the Kubernetes resource cache not refreshed. Fix wrong classpath that might cause OOM in startup. Enhance the serviceRelation in MAL by adding settings for the delimiter and component fields. [Breaking change] Support MQE in the Alerting. The Alarm Rules configuration(alarm-settings.yml), add expression field and remove metrics-name/count/threshold/op/only-as-condition fields and remove composite-rules configuration. Check results in ALS as per downstream/upstream instead of per log. Fix GraphQL query listInstances not using endTime query Do not start server and Kafka consumer in init mode. Add Iris component ID(5018). Add OTLP Tracing support as a Zipkin trace input.  UI  Fix metric name browser_app_error_rate in Browser-Root dashboard. Fix display name of endpoint_cpm for endpoint list in General-Service dashboard. Implement customize menus and marketplace page. Fix minTraceDuration and maxTraceDuration types. Fix init minTime to Infinity. Bump dependencies to fix vulnerabilities. Add scss variables. Fix the title of instance list and notices in the continue profiling. Add a link to explain the expression metric, add units in the continue profiling widget. Calculate string width to set Tabs name width. [Breaking change] Removed \u0026lsquo;\u0026amp;\u0026rsquo; symbols from shell scripts to avoid web application server process running as a background process. Reset chart label. Fix service associates instances. Remove node-sass. Fix commit error on Windows. Apply MQE on MYSQL, POSTGRESQL, REDIS, ELASTICSEARCH and DYNAMODB layer UI-templates. Apply MQE on Virtual-Cache layer UI-templates Apply MQE on APISIX, AWS_EKS, AWS_GATEWAY and AWS_S3 layer UI templates. Apply MQE on RabbitMQ Dashboards. Apply MQE on Virtual-MQ layer UI-templates Apply MQE on Infra-Linux layer UI-templates Apply MQE on Infra-Windows layer UI-templates Apply MQE on Browser layer UI-templates. Implement MQE on topology widget. Fix getEndpoints keyword blank. Implement a breadcrumb component as navigation.  Documentation  Add Go agent into the server agent documentation. Add data unit description in the configuration of continuous profiling policy. Remove storage extension doc, as it is expired. Remove how to add menu doc, as SkyWalking supports marketplace and new backend-based setup. Separate contribution docs to a new menu structure. Add a doc to explain how to manage i18n. Add a doc to explain OTLP Trace support. Fix typo in dynamic-config-configmap.md. Fix out-dated docs about Kafka fetcher. Remove 3rd part fetchers from the docs, as they are not maintained anymore.  All issues and pull requests are here\n","excerpt":"SkyWalking 9.6.0 is released. Go to downloads page to find release tars.\nNew Alerting Kernel …","ref":"/events/release-apache-skywalking-apm-9.6.0/","title":"Release Apache SkyWalking APM 9.6.0"},{"body":"SkyWalking Java Agent 9.0.0 is released. Go to downloads page to find release tars. Changes by Version\n9.0.0 Kernel Updates  Support re-transform/hot-swap classes with other java agents, and remove the obsolete cache enhanced class feature. Implement new naming policies for names of auxiliary type, interceptor delegate field, renamed origin method, method access name, method cache value field. All names are under sw$ name trait. They are predictable and unchanged after re-transform.  * SWAuxiliaryTypeNamingStrategy Auxiliary type name pattern: \u0026lt;origin_class_name\u0026gt;$\u0026lt;name_trait\u0026gt;$auxiliary$\u0026lt;auxiliary_type_instance_hash\u0026gt; * DelegateNamingResolver Interceptor delegate field name pattern: \u0026lt;name_trait\u0026gt;$delegate$\u0026lt;class_name_hash\u0026gt;$\u0026lt;plugin_define_hash\u0026gt;$\u0026lt;intercept_point_hash\u0026gt; * SWMethodNameTransformer Renamed origin method pattern: \u0026lt;name_trait\u0026gt;$original$\u0026lt;method_name\u0026gt;$\u0026lt;method_description_hash\u0026gt; * SWImplementationContextFactory Method cache value field pattern: cachedValue$\u0026lt;name_trait\u0026gt;$\u0026lt;origin_class_name_hash\u0026gt;$\u0026lt;field_value_hash\u0026gt; Accessor method name pattern: \u0026lt;renamed_origin_method\u0026gt;$accessor$\u0026lt;name_trait\u0026gt;$\u0026lt;origin_class_name_hash\u0026gt; Here is an example of manipulated enhanced class with new naming policies of auxiliary classes, fields, and methods\nimport sample.mybatis.controller.HotelController$sw$auxiliary$19cja42; import sample.mybatis.controller.HotelController$sw$auxiliary$p257su0; import sample.mybatis.domain.Hotel; import sample.mybatis.service.HotelService; @RequestMapping(value={\u0026#34;/hotel\u0026#34;}) @RestController public class HotelController implements EnhancedInstance { @Autowired @lazy private HotelService hotelService; private volatile Object _$EnhancedClassField_ws; // Interceptor delegate fields  public static volatile /* synthetic */ InstMethodsInter sw$delegate$td03673$ain2do0$8im5jm1; public static volatile /* synthetic */ InstMethodsInter sw$delegate$td03673$ain2do0$edkmf61; public static volatile /* synthetic */ ConstructorInter sw$delegate$td03673$ain2do0$qs9unv1; public static volatile /* synthetic */ InstMethodsInter sw$delegate$td03673$fl4lnk1$m3ia3a2; public static volatile /* synthetic */ InstMethodsInter sw$delegate$td03673$fl4lnk1$sufrvp1; public static volatile /* synthetic */ ConstructorInter sw$delegate$td03673$fl4lnk1$cteu7s1; // Origin method cache value field  private static final /* synthetic */ Method cachedValue$sw$td03673$g5sobj1; public HotelController() { this(null); sw$delegate$td03673$ain2do0$qs9unv1.intercept(this, new Object[0]); } private /* synthetic */ HotelController(sw.auxiliary.p257su0 p257su02) { } @GetMapping(value={\u0026#34;city/{cityId}\u0026#34;}) public Hotel selectByCityId(@PathVariable(value=\u0026#34;cityId\u0026#34;) int n) { // call interceptor with auxiliary type and parameters and origin method object  return (Hotel)sw$delegate$td03673$ain2do0$8im5jm1.intercept(this, new Object[]{n}, new HotelController$sw$auxiliary$19cja42(this, n), cachedValue$sw$td03673$g5sobj1); } // Renamed origin method  private /* synthetic */ Hotel sw$origin$selectByCityId$a8458p3(int cityId) { /*22*/ return this.hotelService.selectByCityId(cityId); } // Accessor of renamed origin method, calling from auxiliary type  final /* synthetic */ Hotel sw$origin$selectByCityId$a8458p3$accessor$sw$td03673(int n) { // Calling renamed origin method  return this.sw$origin$selectByCityId$a8458p3(n); } @OverRide public Object getSkyWalkingDynamicField() { return this._$EnhancedClassField_ws; } @OverRide public void setSkyWalkingDynamicField(Object object) { this._$EnhancedClassField_ws = object; } static { ClassLoader.getSystemClassLoader().loadClass(\u0026#34;org.apache.skywalking.apm.dependencies.net.bytebuddy.dynamic.Nexus\u0026#34;).getMethod(\u0026#34;initialize\u0026#34;, Class.class, Integer.TYPE).invoke(null, HotelController.class, -1072476370); // Method object  cachedValue$sw$td03673$g5sobj1 = HotelController.class.getMethod(\u0026#34;selectByCityId\u0026#34;, Integer.TYPE); } } Auxiliary type of Constructor :\nclass HotelController$sw$auxiliary$p257su0 { } Auxiliary type of selectByCityId method:\nclass HotelController$sw$auxiliary$19cja42 implements Runnable, Callable { private HotelController argument0; private int argument1; public Object call() throws Exception { return this.argument0.sw$origin$selectByCityId$a8458p3$accessor$sw$td03673(this.argument1); } @OverRide public void run() { this.argument0.sw$origin$selectByCityId$a8458p3$accessor$sw$td03673(this.argument1); } HotelController$sw$auxiliary$19cja42(HotelController hotelController, int n) { this.argument0 = hotelController; this.argument1 = n; } } Features and Bug Fixes  Support Jdk17 ZGC metric collect Support Jetty 11.x plugin Support access to the sky-walking tracer context in spring gateway filter Fix the scenario of using the HBase plugin with spring-data-hadoop. Add RocketMQ 5.x plugin Fix the conflict between the logging kernel and the JDK threadpool plugin. Fix the thread safety bug of finishing operation for the span named \u0026ldquo;SpringCloudGateway/sendRequest\u0026rdquo; Fix NPE in guava-eventbus-plugin. Add WebSphere Liberty 23.x plugin Add Plugin to support aerospike Java client Add ClickHouse parsing to the jdbc-common plugin. Support to trace redisson lock Upgrade netty-codec-http2 to 4.1.94.Final Upgrade guava to 32.0.1 Fix issue with duplicate enhancement by ThreadPoolExecutor Add plugin to support for RESTeasy 6.x. Fix the conditions for resetting UUID, avoid the same uuid causing the configuration not to be updated. Fix witness class in springmvc-annotation-5.x-plugin to avoid falling into v3 use cases. Fix Jedis-2.x plugin bug and add test for Redis cluster scene Merge two instrumentation classes to avoid duplicate enhancements in MySQL plugins. Support asynchronous invocation in jetty client 9.0 and 9.x plugin Add nacos-client 2.x plugin Staticize the tags for preventing synchronization in JDK 8 Add RocketMQ-Client-Java 5.x plugin Fix NullPointerException in lettuce-5.x-plugin.  All issues and pull requests are here\n","excerpt":"SkyWalking Java Agent 9.0.0 is released. Go to downloads page to find release tars. Changes by …","ref":"/events/release-apache-skywalking-java-agent-9-0-0/","title":"Release Apache SkyWalking Java Agent 9.0.0"},{"body":"SkyWalking PHP 0.6.0 is released. Go to downloads page to find release tars.\nWhat\u0026rsquo;s Changed  Polish doc about Swoole by @wu-sheng in https://github.com/apache/skywalking-php/pull/73 Start 0.6.0 development. by @jmjoy in https://github.com/apache/skywalking-php/pull/74 Fix hook for Doctrine PDO class by @matikij in https://github.com/apache/skywalking-php/pull/76 Log Exception in tracing span when throw. by @jmjoy in https://github.com/apache/skywalking-php/pull/75 Upgrade dependencies and adapt. by @jmjoy in https://github.com/apache/skywalking-php/pull/77 Fix required rust version and add runing php-fpm notice in docs. by @jmjoy in https://github.com/apache/skywalking-php/pull/78 Bump openssl from 0.10.48 to 0.10.55 by @dependabot in https://github.com/apache/skywalking-php/pull/79 Fix the situation where the redis port is string. by @jmjoy in https://github.com/apache/skywalking-php/pull/80 Optionally enable zend observer api for auto instrumentation. by @jmjoy in https://github.com/apache/skywalking-php/pull/81 Fix the empty span situation in redis after hook. by @jmjoy in https://github.com/apache/skywalking-php/pull/82 Add mongodb pluhgin. by @jmjoy in https://github.com/apache/skywalking-php/pull/83 Update rust nightly toolchain in CI and format. by @jmjoy in https://github.com/apache/skywalking-php/pull/84 Add notice document for skywalking_agent.enable. by @jmjoy in https://github.com/apache/skywalking-php/pull/85 Upgrade dependencies. by @jmjoy in https://github.com/apache/skywalking-php/pull/86 Fix docs by @heyanlong in https://github.com/apache/skywalking-php/pull/87 Add kafka reporter. by @jmjoy in https://github.com/apache/skywalking-php/pull/88 Release SkyWalking PHP Agent 0.6.0 by @jmjoy in https://github.com/apache/skywalking-php/pull/89  New Contributors  @matikij made their first contribution in https://github.com/apache/skywalking-php/pull/76  Full Changelog: https://github.com/apache/skywalking-php/compare/v0.5.0...v0.6.0\nPECL https://pecl.php.net/package/skywalking_agent/0.6.0\n","excerpt":"SkyWalking PHP 0.6.0 is released. Go to downloads page to find release tars.\nWhat\u0026rsquo;s Changed …","ref":"/events/release-apache-skwaylking-php-0-6-0/","title":"Release Apache SkyWalking PHP 0.6.0"},{"body":"On Aug. 10th, 2023, HashiCorp announced to adopt the Business Source License (BSL) from Mozilla Public License v2.0 (MPL 2.0), here is their post. They officially annouced they have changed the license for the ALL of their open-source products from the previous MPL 2.0 to a source-available license, BSL 1.1. Meanwhile, HashiCorp APIs, SDKs, and almost all other libraries will remain MPL 2.0.\nHashiCorp Inc. is one of the most important vendors in the cloud-native landscape, as well as Golang ecosystem. This kind of changes would have potential implications for SkyWalking, which is closely integrated with cloud-native technology stacks.\nConclusion First  What does that mean for SkyWalking users?  SkyWalking community has evaluated our dependencies from HashiCorp products and libraries, the current conclusion is\nSkyWalking users would NOT suffer any implication. All components of SkyWalking don\u0026rsquo;t have hard-dependency on BSL license affected codes.\nSkyWalking community have found out all following dependencies of all relative repositories, all licenses are TRUELY stayed unchanged, and compatible with Apache 2.0 License.\n OAP Server @kezhenxu94 @wu-sheng  consul-client Apache 2.0 Repo archived on Jul 27, 2023   BanyanDB @hanahmily @lujiajing1126  Server @hanahmily  hashicorp/golang-lru MPL-2.0 hashicorp/hcl MPL-2.0   CLI @hanahmily No HashiCorp Dependency   SkyWalking OAP CLI @kezhenxu94  github.com/hashicorp/hcl v1.0.0 MPL-2.0 All under swck as transitive dependencies   SWCK @hanahmily  hashicorp/consul/api MPL-2.0 hashicorp/consul/sdk MPL-2.0 hashicorp/errwrap MPL-2.0 hashicorp/go-cleanhttp MPL-2.0 hashicorp/go-immutable-radix MPL-2.0 hashicorp/go-msgpack MIT hashicorp/go-multierror MPL-2.0 hashicorp/go-rootcerts MPL-2.0 hashicorp/go-sockaddr MPL-2.0 hashicorp/go-syslog MIT hashicorp/go-uuid MPL-2.0 hashicorp/go.net BSD-3 hashicorp/golang-lru MPL-2.0 hashicorp/hcl MPL-2.0 hashicorp/logutils MPL-2.0 hashicorp/mdns MIT hashicorp/memberlist MPL-2.0 hashicorp/serf MPL-2.0   Go agent @mrproliu  hashicorp/consul/api MPL-2.0 hashicorp/consul/sdk MPL-2.0 hashicorp/errwrap MPL-2.0 hashicorp/go-cleanhttp MPL-2.0 hashicorp/go-hclog MIT hashicorp/go-immutable-radix MPL-2.0 hashicorp/go-kms-wrapping/entropy MPL-2.0 hashicorp/go-kms-wrapping/entropy/v2 MPL-2.0 hashicorp/go-msgpack MIT hashicorp/go-multierror MPL-2.0 hashicorp/go-plugin MPL-2.0 hashicorp/go-retryablehttp MPL-2.0 hashicorp/go-rootcerts MPL-2.0 hashicorp/go-secure-stdlib/base62 MPL-2.0 hashicorp/go-secure-stdlib/mlock MPL-2.0 hashicorp/go-secure-stdlib/parseutil MPL-2.0 hashicorp/go-secure-stdlib/password MPL-2.0 hashicorp/go-secure-stdlib/tlsutil MPL-2.0 hashicorp/go-sockaddr MPL-2.0 hashicorp/go-syslog MIT hashicorp/go-uuid MPL-2.0 hashicorp/go-version MPL-2.0 hashicorp/go.net BSD-3-Clause hashicorp/golang-lru MPL-2.0 hashicorp/logutils MPL-2.0 hashicorp/mdns MIT hashicorp/memberlist MPL-2.0 hashicorp/serf MPL-2.0 hashicorp/vault/api MPL-2.0 hashicorp/vault/sdk MPL-2.0 hashicorp/yamux MPL-2.0   SkyWalking eyes @kezhenxu94  none   SkyWalking Infra e2e @kezhenxu94  all under swck as transitive dependencies   SkyWalking rover(ebpf agent) @mrproliu  hashicorp/consul/api MPL-2.0 hashicorp/consul/sdk MPL-2.0 hashicorp/errwrap MPL-2.0 hashicorp/go-cleanhttp MPL-2.0 hashicorp/go-hclog MIT hashicorp/go-immutable-radix MPL-2.0 hashicorp/go-msgpack MIT hashicorp/go-multierror MPL-2.0 hashicorp/go-retryablehttp MPL-2.0 hashicorp/go-rootcerts MPL-2.0 hashicorp/go-sockaddr MPL-2.0 hashicorp/go-syslog MIT hashicorp/go-uuid MPL-2.0 hashicorp/golang-lru MPL-2.0 hashicorp/hcl MPL-2.0 hashicorp/logutils MPL-2.0 hashicorp/mdns MIT hashicorp/memberlist MPL-2.0 hashicorp/serf MPL-2.0   SkyWalking satellite @mrproliu  hashicorp/consul/api MPL-2.0 hashicorp/consul/sdk MPL-2.0 hashicorp/errwrap MPL-2.0 hashicorp/go-cleanhttp MPL-2.0 hashicorp/go-immutable-radix MPL-2.0 hashicorp/go-msgpack MIT hashicorp/go-multierror MPL-2.0 hashicorp/go-rootcerts MPL-2.0 hashicorp/go-sockaddr MPL-2.0 hashicorp/go-syslog MIT hashicorp/go-uuid MPL-2.0 hashicorp/go.net BSD-3-Clause hashicorp/golang-lru MPL-2.0 hashicorp/hcl MPL-2.0 hashicorp/logutils MPL-2.0 hashicorp/mdns MIT hashicorp/memberlist MPL-2.0 hashicorp/serf MPL-2.0   SkyWalking Terraform (scripts) @kezhenxu94  No HashiCorp Dependency The scripts for Terraform users only. No hard requirement.    The GitHub ID is listed about the PMC members did the evaluations.\nFAQ If I am using Consul to manage SkyWalking Cluster or configurations, does this license change bring an implication? YES, anyone using their server sides would be affected once you upgrade to later released versions after Aug. 10th, 2023.\nThis is HashiCorp\u0026rsquo;s statement\n End users can continue to copy, modify, and redistribute the code for all non-commercial and commercial use, except where providing a competitive offering to HashiCorp. Partners can continue to build integrations for our joint customers. We will continue to work closely with the cloud service providers to ensure deep support for our mutual technologies. Customers of enterprise and cloud-managed HashiCorp products will see no change as well. Vendors who provide competitive services built on our community products will no longer be able to incorporate future releases, bug fixes, or security patches contributed to our products.\n So, notice that, the implication about whether voilating BSL 1.1 is determined by the HashiCorp Inc about the status of the identified competitive relationship. We can\u0026rsquo;t provide any suggestions. Please refer to FAQs and contacts for the official explanations.\nWill SkyWalking continoue to use HashiCorp Consul as an optional cluster coordinator and/or an optional dynamic configuration server? For short term, YES, we will keep that part of codes, as the licenses of the SDK and the APIs are still in the MPL 2.0.\nBut, during the evaluation, we noticed the consul client we are using is rickfast/consul-client which had been archived by the owner on Jul 27, 2023. So, we are facing the issues that no maintaining and no version to upgrade. If there is not a new consul Java client lib available, we may have to remove this to avoid CVEs or version incompatible with new released servers.\n","excerpt":"On Aug. 10th, 2023, HashiCorp announced to adopt the Business Source License (BSL) from Mozilla …","ref":"/blog/2023-08-13-hashicorp-bsl/","title":"The Statement for SkyWalking users on HashiCorp license changes"},{"body":"SkyWalking Rust 0.8.0 is released. Go to downloads page to find release tars.\nWhat\u0026rsquo;s Changed  Add kafka reporter. by @jmjoy in https://github.com/apache/skywalking-rust/pull/61 Rename AbstractSpan to HandleSpanObject. by @jmjoy in https://github.com/apache/skywalking-rust/pull/62 Bump to 0.8.0. by @jmjoy in https://github.com/apache/skywalking-rust/pull/63  ","excerpt":"SkyWalking Rust 0.8.0 is released. Go to downloads page to find release tars.\nWhat\u0026rsquo;s Changed …","ref":"/events/release-apache-skywalking-rust-0-8-0/","title":"Release Apache SkyWalking Rust 0.8.0"},{"body":"SkyWalking Cloud on Kubernetes 0.8.0 is released. Go to downloads page to find release tars.\nFeatures  [Breaking Change] Remove the way to configure the agent through Configmap.  Bugs  Fix errors in banyandb e2e test.  Chores  Bump up golang to v1.20. Bump up golangci-lint to v1.53.3. Bump up skywalking-java-agent to v8.16.0. Bump up kustomize to v4.5.6. Bump up SkyWalking OAP to 9.5.0.  ","excerpt":"SkyWalking Cloud on Kubernetes 0.8.0 is released. Go to downloads page to find release tars. …","ref":"/events/release-apache-skywalking-cloud-on-kubernetes-0-8-0/","title":"Release Apache SkyWalking Cloud on Kubernetes 0.8.0"},{"body":"","excerpt":"","ref":"/tags/metrics/","title":"Metrics"},{"body":"Announcing Apache SkyWalking Go 0.2.0 I\u0026rsquo;m excited to announce the release of Apache SkyWalking Go 0.2.0! This version packs several awesome new features that I\u0026rsquo;ll overview below.\nLog Reporting The log reporting feature allows the Go agent to automatically collect log content from supported logging frameworks like logrus and zap. The logs are organized and sent to the SkyWalking backend for visualization. You can see how the logs appear for each service in the SkyWalking UI:\nMaking Logs Searchable You can configure certain log fields to make them searchable in SkyWalking. Set the SW_AGENT_LOG_REPORTER_LABEL_KEYS environment variable to include additional fields beyond the default log level.\nFor example, with logrus:\n# define log with fields logrus.WithField(\u0026#34;module\u0026#34;, \u0026#34;test-service\u0026#34;).Info(\u0026#34;test log\u0026#34;) Metrics Reporting The agent can now collect and report custom metrics data from runtime/metrics to the backend. Supported metrics are documented here.\nAutomatic Instrumentation In 0.1.0, you had to manually integrate the agent into your apps. Now, the new commands can automatically analyze and instrument projects at a specified path, no code changes needed! Try using the following command to import skywalking-go into your project:\n# inject to project at current path skywalking-go-agent -inject=./ -all Or you can still use the original manual approach if preferred.\nGet It Now! Check out the CHANGELOG for the full list of additions and fixes. I encourage you to try out SkyWalking Go 0.2.0 today! Let me know if you have any feedback.\n","excerpt":"Announcing Apache SkyWalking Go 0.2.0 I\u0026rsquo;m excited to announce the release of Apache SkyWalking …","ref":"/blog/2023-07-31-skywalking-go-0.2.0-release/","title":"New Features of SkyWalking Go 0.2.0"},{"body":"SkyWalking Go 0.2.0 is released. Go to downloads page to find release tars.\nFeatures  Enhance the plugin rewrite ability to support switch and if/else in the plugin codes. Support inject the skywalking-go into project through agent. Support add configuration for plugin. Support metrics report API for plugin. Support report Golang runtime metrics. Support log reporter. Enhance the logrus logger plugin to support adapt without any settings method invoke. Disable sending observing data if the gRPC connection is not established for reducing the connection error log. Support enhance vendor management project. Support using base docker image to building the application.  Plugins  Support go-redis v9 redis client framework. Support collecting Native HTTP URI parameter on server side. Support Mongo database client framework. Support Native SQL database client framework with MySQL Driver. Support Logrus log report to the backend. Support Zap log report to the backend.  Documentation  Combine Supported Libraries and Performance Test into Plugins section. Add Tracing, Metrics and Logging document into Plugins section.  Bug Fixes  Fix throw panic when log the tracing context before agent core initialized. Fix plugin version matcher tryToFindThePluginVersion to support capital letters in module paths and versions.  Issues and PR  All issues are here All and pull requests are here  ","excerpt":"SkyWalking Go 0.2.0 is released. Go to downloads page to find release tars.\nFeatures  Enhance the …","ref":"/events/release-apache-skwaylking-go-0.2.0/","title":"Release Apache SkyWalking Go 0.2.0"},{"body":"今年 COSCUP 2023 在国立台湾科技大学举办。 COSCUP 是由台湾开放原始码社群联合推动的年度研讨会,起源于2006年,是台湾自由软体运动 (FOSSM) 重要的推动者之一。活动包括有讲座、摊位、社团同乐会等,除了邀请国际的重量级演讲者之外,台湾本土的自由软体推动者也经常在此发表演说,会议的发起人、工作人员与演讲者都是志愿参与的志工。COSCUP 的宗旨在于提供一个连接开放原始码开发者、使用者与推广者的平台。希望借由每年一度的研讨会来推动自由及开放原始码软体 (FLOSS)。由于有许多赞助商及热心捐助者,所有议程都是免费参加。\n在Go语言中使用自动增强探针完成链路追踪以及监控 B站视频地址\n刘晗,Tetrate\n  讲师介绍 刘晗,Tetrate 工程师,Apache SkyWalking PMC 成员,专注于应用性能可观测性领域。\n  议题概要\n   为什么需要自动增强探针 Go Agent演示 实现原理 未来展望  ","excerpt":"今年 COSCUP 2023 在国立台湾科技大学举办。 COSCUP 是由台湾开放原始码社群联合推动的年度研讨会,起源于2006年,是台湾自由软体运动 (FOSSM) 重要的推动者之一。活动包括有讲 …","ref":"/zh/2023-07-30-complete-auto-instrumentation-go-agent-for-distributed-tracing-and-monitoring/","title":"[视频] 在Go语言中使用自动增强探针完成链路追踪以及监控 - COSCUP Taiwan 2023"},{"body":"SkyWalking Kubernetes Helm Chart 4.5.0 is released. Go to downloads page to find release tars.\n Add helm chart for swck v0.7.0. Add pprof port export in satellite. Trunc the resource name in swck\u0026rsquo;s helm chart to no more than 63 characters. Adding the configmap into cluster role for oap init mode. Add config to set Pod securityContext. Keep the job name prefix the same as OAP Deployment name. Use startup probe option for first initialization of application Allow setting env for UI deployment. Add Istio ServiceEntry permissions.  ","excerpt":"SkyWalking Kubernetes Helm Chart 4.5.0 is released. Go to downloads page to find release tars.\n Add …","ref":"/events/release-apache-skywalking-kubernetes-helm-chart-4.5.0/","title":"Release Apache SkyWalking Kubernetes Helm Chart 4.5.0"},{"body":"SkyWalking BanyanDB 0.4.0 is released. Go to downloads page to find release tars.\nFeatures  Add TSDB concept document. [UI] Add YAML editor for inputting query criteria. Refactor TopN to support NULL group while keeping seriesID from the source measure. Add a sharded buffer to TSDB to replace Badger\u0026rsquo;s memtable. Badger KV only provides SST. Add a meter system to control the internal metrics. Add multiple metrics for measuring the storage subsystem. Refactor callback of TopNAggregation schema event to avoid deadlock and reload issue. Fix max ModRevision computation with inclusion of TopNAggregation Enhance meter performance Reduce logger creation frequency Add units to memory flags Introduce TSTable to customize the block\u0026rsquo;s structure Add /system endpoint to the monitoring server that displays a list of nodes' system information. Enhance the liaison module by implementing access logging. Add the Istio scenario stress test based on the data generated by the integration access log. Generalize the index\u0026rsquo;s docID to uint64. Remove redundant ID tag type. Improve granularity of index in measure by leveling up from data point to series. [UI] Add measure CRUD operations. [UI] Add indexRule CRUD operations. [UI] Add indexRuleBinding CRUD operations.  Bugs  Fix iterator leaks and ensure proper closure and introduce a closer to guarantee all iterators are closed Fix resource corrupts caused by update indexRule operation Set the maximum integer as the limit for aggregation or grouping operations when performing aggregation or grouping operations in a query plan.  Chores  Bump go to 1.20. Set KV\u0026rsquo;s minimum memtable size to 8MB [docs] Fix docs crud examples error Modified TestGoVersion to check for CPU architecture and Go Version Bump node to 18.16  ","excerpt":"SkyWalking BanyanDB 0.4.0 is released. Go to downloads page to find release tars.\nFeatures  Add TSDB …","ref":"/events/release-apache-skywalking-banyandb-0-4-0/","title":"Release Apache SkyWalking BanyanDB 0.4.0"},{"body":"Background In previous articles, We have discussed how to use SkyWalking and eBPF for performance problem detection within processes and networks. They are good methods to locate issues, but still there are some challenges:\n The timing of the task initiation: It\u0026rsquo;s always challenging to address the processes that require performance monitoring when problems occur. Typically, manual engagement is required to identify processes and the types of performance analysis necessary, which cause extra time during the crash recovery. The root cause locating and the time of crash recovery conflict with each other from time to time. In the real case, rebooting would be the first choice of recovery, meanwhile, it destroys the site of crashing. Resource consumption of tasks: The difficulties to determine the profiling scope. Wider profiling causes more resources than it should. We need a method to manage resource consumption and understand which processes necessitate performance analysis. Engineer capabilities: On-call is usually covered by the whole team, which have junior and senior engineers, even senior engineers have their understanding limitation of the complex distributed system, it is nearly impossible to understand the whole system by a single one person.  The Continuous Profiling is a new created mechanism to resolve the above issues.\nAutomate Profiling As profiling is resource costing and high experience required, how about introducing a method to narrow the scope and automate the profiling driven by polices creates by senior SRE engineer? So, in 9.5.0, SkyWalking first introduced preset policy rules for specific services to be monitored by the eBPF Agent in a low-energy manner, and run profiling when necessary automatically.\nPolicy Policy rules specify how to monitor target processes and determine the type of profiling task to initiate when certain threshold conditions are met.\nThese policy rules primarily consist of the following configuration information:\n Monitoring type: This specifies what kind of monitoring should be implemented on the target process. Threshold determination: This defines how to determine whether the target process requires the initiation of a profiling task. Trigger task: This specifies what kind of performance analysis task should be initiated.  Monitoring type The type of monitoring is determined by observing the data values of a specified process to generate corresponding metrics. These metric values can then facilitate subsequent threshold judgment operations. In eBPF observation, we believe the following metrics can most directly reflect the current performance of the program:\n   Monitor Type Unit Description     System Load Load System load average over a specified period.   Process CPU Percentage The CPU usage of the process as a percentage.   Process Thread Count Count The number of threads in the process.   HTTP Error Rate Percentage The percentage of HTTP requests that result in error responses (e.g., 4xx or 5xx status codes).   HTTP Avg Response Time Millisecond The average response time for HTTP requests.    Network related monitoring Monitoring network type metrics is not as simple as obtaining basic process information. It requires the initiation of eBPF programs and attaching them to the target process for observation. This is similar to the principles of network profiling task we introduced in the previous article, except that we no longer collect the full content of the data packets. Instead, we only collect the content of messages that match specified HTTP prefixes.\nBy using this method, we can significantly reduce the number of times the kernel sends data to the user space, and the user-space program can parse the data content with less system resource usage. This ultimately helps in conserving system resources.\nMetrics collector The eBPF agent would report metrics of processes periodically as follows to indicate the process performance in time.\n   Name Unit Description     process_cpu (0-100)% The CPU usage percent   process_thread_count count The thread count of process   system_load count The average system load for the last minute, each process have same value   http_error_rate (0-100)% The network request error rate percentage   http_avg_response_time ms The network average response duration    Threshold determination For the threshold determination, the judgement is made by the eBPF Agent based on the target monitoring process in its own memory, rather than relying on calculations performed by the SkyWalking backend. The advantage of this approach is that it doesn\u0026rsquo;t have to wait for the results of complex backend computations, and it reduces potential issues brought about by complicated interactions.\nBy using this method, the eBPF Agent can swiftly initiate tasks immediately after conditions are met, without any delay.\nIt includes the following configuration items:\n Threshold: Check if the monitoring value meets the specified expectations. Period: The time period(seconds) for monitoring data, which can also be understood as the most recent duration. Count: The number of times(seconds) the threshold is triggered within the detection period, which can also be understood as the total number of times the specified threshold rule is triggered in the most recent duration(seconds). Once the count check is met, the specified Profiling task will be started.  Trigger task When the eBPF Agent detects that the threshold determination in the specified policy meets the rules, it can initiate the corresponding task according to pre-configured rules. For each different target performance task, their task initiation parameters are different:\n On/Off CPU Profiling: It automatically performs performance analysis on processes that meet the conditions, defaulting to 10 minutes of monitoring. Network Profiling: It performs network performance analysis on all processes in the same Service Instance on the current machine, to prevent the cause of the issue from being unrealizable due to too few process being collected, defaulting to 10 minutes of monitoring.  Once the task is initiated, no new profiling tasks would be started for the current process for a certain period. The main reason for this is to prevent frequent task creation due to low threshold settings, which could affect program execution. The default time period is 20 minutes.\nData Flow The figure 1 illustrates the data flow of the continuous profiling feature:\nFigure 1: Data Flow of Continuous Profiling\neBPF Agent with Process Firstly, we need to ensure that the eBPF Agent and the process to be monitored are deployed on the same host machine, so that we can collect relevant data from the process. When the eBPF Agent detects a threshold validation rule that conforms to the policy, it immediately triggers the profiling task for the target process, thereby reducing any intermediate steps and accelerating the ability to pinpoint performance issues.\nSliding window The sliding window plays a crucial role in the eBPF Agent\u0026rsquo;s threshold determination process, as illustrated in the figure 2:\nFigure 2: Sliding Window in eBPF Agent\nEach element in the array represents the data value for a specified second in time. When the sliding window needs to verify whether it is responsible for a rule, it fetches the content of each element from a certain number of recent elements (period parameter). If an element exceeds the threshold, it is marked in red and counted. If the number of red elements exceeds a certain number, it is deemed to trigger a task.\nUsing a sliding window offers the following two advantages:\n Fast retrieval of recent content: With a sliding window, complex calculations are unnecessary. You can know the data by simply reading a certain number of recent array elements. Solving data spikes issues: Validation through count prevents situations where a data point suddenly spikes and then quickly returns to normal. Verification with multiple values can reveal whether exceeding the threshold is frequent or occasional.  eBPF Agent with SkyWalking Backend The eBPF Agent communicates periodically with the SkyWalking backend, involving three most crucial operations:\n Policy synchronization: Through periodic policy synchronization, the eBPF Agent can keep processes on the local machine updated with the latest policy rules as much as possible. Metrics sending: For processes that are already being monitored, the eBPF Agent periodically sends the collected data to the backend program. This facilitates real-time query of current data values by users, who can also compare this data with historical values or thresholds when problems arise. Profiling task reporting: When the eBPF detects that a certain process has triggered a policy rule, it automatically initiates a performance task, collects relevant information from the current process, and reports it to the SkyWalking backend. This allows users to know when, why, and what type of profiling task was triggered from the interface.  Demo Next, let\u0026rsquo;s quickly demonstrate the continuous profiling feature, so you can understand more specifically what it accomplishes.\nDeploy SkyWalking Showcase SkyWalking Showcase contains a complete set of example services and can be monitored using SkyWalking. For more information, please check the official documentation.\nIn this demo, we only deploy service, the latest released SkyWalking OAP, and UI.\nexport SW_OAP_IMAGE=apache/skywalking-oap-server:9.5.0 export SW_UI_IMAGE=apache/skywalking-ui:9.5.0 export SW_ROVER_IMAGE=apache/skywalking-rover:0.5.0 export FEATURE_FLAGS=mesh-with-agent,single-node,elasticsearch,rover make deploy.kubernetes After deployment is complete, please run the following script to open SkyWalking UI: http://localhost:8080/.\nkubectl port-forward svc/ui 8080:8080 --namespace default Create Continuous Profiling Policy Currently, continues profiling feature is set by default in the Service Mesh panel at the Service level.\nFigure 3: Continuous Policy Tab\nBy clicking on the edit button aside from the Policy List, the polices of current service could be created or updated.\nFigure 4: Edit Continuous Profiling Policy\nMultiple polices are supported. Every policy has the following configurations.\n Target Type: Specifies the type of profiling task to be triggered when the threshold determination is met. Items: For profiling task of the same target, one or more validation items can be specified. As long as one validation item meets the threshold determination, the corresponding performance analysis task will be launched.  Monitor Type: Specifies the type of monitoring to be carried out for the target process. Threshold: Depending on the type of monitoring, you need to fill in the corresponding threshold to complete the verification work. Period: Specifies the number of recent seconds of data you want to monitor. Count: Determines the total number of seconds triggered within the recent period. URI Regex/List: This is applicable to HTTP monitoring types, allowing URL filtering.    Done After clicking the save button, you can see the currently created monitoring rules, as shown in the figure 5:\nFigure 5: Continuous Profiling Monitoring Processes\nThe data can be divided into the following parts:\n Policy list: On the left, you can see the rule list you have created. Monitoring Summary List: Once a rule is selected, you can see which pods and processes would be monitored by this rule. It also summarizes how many profiling tasks have been triggered in the last 48 hours by the current pod or process, as well as the last trigger time. This list is also sorted in descending order by the number of triggers to facilitate your quick review.  When you click on a specific process, a new dashboard would show to list metrics and triggered profiling results.\nFigure 6: Continuous Profiling Triggered Tasks\nThe current figure contains the following data contents:\n Task Timeline: It lists all profiling tasks in the past 48 hours. And when the mouse hovers over a task, it would also display detailed information:  Task start and end time: It indicates when the current performance analysis task was triggered. Trigger reason: It would display the reason why the current process was profiled and list out the value of the metric exceeding the threshold when the profiling was triggered. so you can quickly understand the reason.   Task Detail: Similar to the CPU Profiling and Network Profiling introduced in previous articles, this would display the flame graph or process topology map of the current task, depending on the profiling type.  Meanwhile, on the Metrics tab, metrics relative to profiling policies are collected to retrieve the historical trend, in order to provide a comprehensive explanation of the trigger point about the profiling.\nFigure 7: Continuous Profiling Metrics\nConclusion In this article, I have detailed how the continuous profiling feature in SkyWalking and eBPF works. In general, it involves deploying the eBPF Agent service on the same machine where the process to be monitored resides, and monitoring the target process with low resource consumption. When it meets the threshold conditions, it would initiate more complex CPU Profiling and Network Profiling tasks.\nIn the future, we will offer even more features. Stay tuned!\n Twitter, ASFSkyWalking Slack. Send Request to join SkyWalking slack mail to the mail list(dev@skywalking.apache.org), we will invite you in. Subscribe to our medium list.  ","excerpt":"Background In previous articles, We have discussed how to use SkyWalking and eBPF for performance …","ref":"/blog/2023-06-25-intruducing-continuous-profiling-skywalking-with-ebpf/","title":"Activating Automatical Performance Analysis -- Continuous Profiling"},{"body":"","excerpt":"","ref":"/tags/ebpf/","title":"eBPF"},{"body":"","excerpt":"","ref":"/tags/profiling/","title":"Profiling"},{"body":"SkyWalking CLI 0.12.0 is released. Go to downloads page to find release tars.\n Add the sub-command records list for adapt the new record query API by @mrproliu in https://github.com/apache/skywalking-cli/pull/167 Add the attached events fields into the trace sub-command by @mrproliu in https://github.com/apache/skywalking-cli/pull/169 Add the sampling config file into the profiling ebpf create network sub-command by @mrproliu in https://github.com/apache/skywalking-cli/pull/171 Add the sub-command profiling continuous for adapt the new continuous profiling API by @mrproliu in https://github.com/apache/skywalking-cli/pull/173 Adapt the sub-command metrics for deprecate scope fron entity by @mrproliu in https://github.com/apache/skywalking-cli/pull/173 Add components in topology related sub-commands. @mrproliu in https://github.com/apache/skywalking-cli/pull/175 Add the sub-command metrics nullable for query the nullable metrics value. @mrproliu in https://github.com/apache/skywalking-cli/pull/176 Adapt the sub-command profiling trace for adapt the new trace profiling protocol. @mrproliu in https://github.com/apache/skywalking-cli/pull/177 Add isEmptyValue field in metrics related sub-commands. @mrproliu in https://github.com/apache/skywalking-cli/pull/180 Add the sub-command metrics execute for execute the metrics query. @mrproliu in https://github.com/apache/skywalking-cli/pull/182 Add the sub-command profiling continuous monitoring for query all continuous profiling monitoring instances. @mrproliu in https://github.com/apache/skywalking-cli/pull/182 Add continuousProfilingCauses.message field in the profiling ebpf list comamnds by @mrproliu in https://github.com/apache/skywalking-cli/pull/184  ","excerpt":"SkyWalking CLI 0.12.0 is released. Go to downloads page to find release tars.\n Add the sub-command …","ref":"/events/release-apache-skywalking-cli-0-12-0/","title":"Release Apache SkyWalking CLI 0.12.0"},{"body":"SkyWalking Rover 0.5.0 is released. Go to downloads page to find release tars.\nFeatures  Enhance the protocol reader for support long socket data. Add the syscall level event to the trace. Support OpenSSL 3.0.x. Optimized the data structure in BPF. Support continuous profiling. Improve the performance when getting goid in eBPF. Support build multiple architecture docker image: x86_64, arm64.  Bug Fixes  Fix HTTP method name in protocol analyzer. Fixed submitting multiple network profiling tasks with the same uri causing the rover to restart.  Documentation Issues and PR  All issues are here All and pull requests are here  ","excerpt":"SkyWalking Rover 0.5.0 is released. Go to downloads page to find release tars.\nFeatures  Enhance the …","ref":"/events/release-apache-skwaylking-rover-0-5-0/","title":"Release Apache SkyWalking Rover 0.5.0"},{"body":"SkyWalking Satellite 1.2.0 is released. Go to downloads page to find release tars.\nFeatures  Introduce pprof module. Support export multiple telemetry service. Update the base docker image. Add timeout configuration for gRPC client. Reduce log print when the enqueue data to the pipeline error. Support transmit the Continuous Profiling protocol.  Bug Fixes  Fix CVE-2022-41721. Use Go 19 to build the Docker image to fix CVEs.  Issues and PR  All issues are here All and pull requests are here  ","excerpt":"SkyWalking Satellite 1.2.0 is released. Go to downloads page to find release tars.\nFeatures …","ref":"/events/release-apache-skwaylking-satellite-1-2-0/","title":"Release Apache SkyWalking Satellite 1.2.0"},{"body":"","excerpt":"","ref":"/tags/tracing/","title":"Tracing"},{"body":"背景 在之前的文章中,我们讨论了如何使用 SkyWalking 和 eBPF 来检测性能问题,包括进程和网络。这些方法可以很好地定位问题,但仍然存在一些挑战:\n 任务启动的时间: 当需要进行性能监控时,解决需要性能监控的进程始终是一个挑战。通常需要手动参与,以标识进程和所需的性能分析类型,这会在崩溃恢复期间耗费额外的时间。根本原因定位和崩溃恢复时间有时会发生冲突。在实际情况中,重新启动可能是恢复的第一选择,同时也会破坏崩溃的现场。 任务的资源消耗: 确定分析范围的困难。过宽的分析范围会导致需要更多的资源。我们需要一种方法来管理资源消耗并了解哪些进程需要性能分析。 工程师能力: 通常由整个团队负责呼叫,其中有初级和高级工程师,即使是高级工程师也对复杂的分布式系统有其理解限制,单个人几乎无法理解整个系统。  持续剖析(Continuous Profiling) 是解决上述问题的新机制。\n自动剖析 由于性能分析的资源消耗和高经验要求,因此引入一种方法以缩小范围并由高级 SRE 工程师创建策略自动剖析。因此,在 9.5.0 中,SkyWalking 首先引入了预设策略规则,以低功耗方式监视特定服务的 eBPF 代理,并在必要时自动运行剖析。\n策略 策略规则指定了如何监视目标进程并确定在满足某些阈值条件时应启动何种类型的分析任务。\n这些策略规则主要包括以下配置信息:\n 监测类型: 这指定了应在目标进程上实施什么样的监测。 阈值确定: 这定义了如何确定目标进程是否需要启动分析任务。 触发任务: 这指定了应启动什么类型的性能分析任务。  监测类型 监测类型是通过观察指定进程的数据值来生成相应的指标来确定的。这些指标值可以促进后续的阈值判断操作。在 eBPF 观测中,我们认为以下指标最能直接反映程序的当前性能:\n   监测类型 单位 描述     系统负载 负载 在指定时间段内的系统负载平均值。   进程 CPU 百分比 进程的 CPU 使用率百分比。   进程线程计数 计数 进程中的线程数。   HTTP 错误率 百分比 导致错误响应(例如,4xx 或 5xx 状态代码)的 HTTP 请求的百分比。   HTTP 平均响应时间 毫秒 HTTP 请求的平均响应时间。    相关网络监测 监测网络类型的指标不像获取基本进程信息那么简单。它需要启动 eBPF 程序并将其附加到目标进程以进行观测。这类似于我们在先前文章中介绍的网络分析任务,不同的是我们不再收集数据包的完整内容。相反,我们仅收集与指定 HTTP 前缀匹配的消息的内容。\n通过使用此方法,我们可以大大减少内核向用户空间发送数据的次数,用户空间程序可以使用更少的系统资源来解析数据内容。这最终有助于节省系统资源。\n指标收集器 eBPF 代理会定期报告以下进程度量,以指示进程性能:\n   名称 单位 描述     process_cpu (0-100)% CPU 使用率百分比   process_thread_count 计数 进程中的线程数   system_load 计数 最近一分钟的平均系统负载,每个进程的值相同   http_error_rate (0-100)% 网络请求错误率百分比   http_avg_response_time 毫秒 网络平均响应持续时间    阈值确定 对于阈值的确定,eBPF 代理是基于其自身内存中的目标监测进程进行判断,而不是依赖于 SkyWalking 后端执行的计算。这种方法的优点在于,它不必等待复杂后端计算的结果,减少了复杂交互所带来的潜在问题。\n通过使用此方法,eBPF 代理可以在条件满足后立即启动任务,而无需任何延迟。\n它包括以下配置项:\n 阈值: 检查监测值是否符合指定的期望值。 周期: 监控数据的时间周期(秒),也可以理解为最近的持续时间。 计数: 检测期间触发阈值的次数(秒),也可以理解为最近持续时间内指定阈值规则触发的总次数(秒)。一旦满足计数检查,指定的分析任务将被开始。  触发任务 当 eBPF Agent 检测到指定策略中的阈值决策符合规则时,根据预配置的规则可以启动相应的任务。对于每个不同的目标性能任务,它们的任务启动参数都不同:\n On/Off CPU Profiling: 它会自动对符合条件的进程进行性能分析,缺省情况下监控时间为 10 分钟。 Network Profiling: 它会对当前机器上同一 Service Instance 中的所有进程进行网络性能分析,以防问题的原因因被收集进程太少而无法实现,缺省情况下监控时间为 10 分钟。  一旦任务启动,当前进程将在一定时间内不会启动新的剖析任务。主要原因是为了防止因低阈值设置而频繁创建任务,从而影响程序执行。缺省时间为 20 分钟。\n数据流 图 1 展示了持续剖析功能的数据流:\n图 1: 持续剖析的数据流\neBPF Agent进行进程跟踪 首先,我们需要确保 eBPF Agent 和要监测的进程部署在同一台主机上,以便我们可以从进程中收集相关数据。当 eBPF Agent 检测到符合策略的阈值验证规则时,它会立即为目标进程触发剖析任务,从而减少任何中间步骤并加速定位性能问题的能力。\n滑动窗口 滑动窗口在 eBPF Agent 的阈值决策过程中发挥着至关重要的作用,如图 2 所示:\n图 2: eBPF Agent 中的滑动窗口\n数组中的每个元素表示指定时间内的数据值。当滑动窗口需要验证是否负责某个规则时,它从最近的一定数量的元素 (period 参数) 中获取每个元素的内容。如果一个元素超过了阈值,则标记为红色并计数。如果红色元素的数量超过一定数量,则被认为触发了任务。\n使用滑动窗口具有以下两个优点:\n 快速检索最近的内容:使用滑动窗口,无需进行复杂的计算。你可以通过简单地读取一定数量的最近数组元素来了解数据。 解决数据峰值问题:通过计数进行验证,可以避免数据点突然增加然后快速返回正常的情况。使用多个值进行验证可以揭示超过阈值是频繁还是偶然发生的。  eBPF Agent与OAP后端通讯 eBPF Agent 定期与 SkyWalking 后端通信,涉及三个最关键的操作:\n 策略同步:通过定期的策略同步,eBPF Agent 可以尽可能地让本地机器上的进程与最新的策略规则保持同步。 指标发送:对于已经被监视的进程,eBPF Agent 定期将收集到的数据发送到后端程序。这就使用户能够实时查询当前数据值,用户也可以在出现问题时将此数据与历史值或阈值进行比较。 剖析任务报告:当 eBPF 检测到某个进程触发了策略规则时,它会自动启动性能任务,从当前进程收集相关信息,并将其报告给 SkyWalking 后端。这使用户可以从界面了解何时、为什么和触发了什么类型的剖析任务。  演示 接下来,让我们快速演示持续剖析功能,以便你更具体地了解它的功能。\n部署 SkyWalking Showcase SkyWalking Showcase 包含完整的示例服务,并可以使用 SkyWalking 进行监视。有关详细信息,请查看官方文档。\n在此演示中,我们只部署服务、最新发布的 SkyWalking OAP 和 UI。\nexport SW_OAP_IMAGE=apache/skywalking-oap-server:9.5.0 export SW_UI_IMAGE=apache/skywalking-ui:9.5.0 export SW_ROVER_IMAGE=apache/skywalking-rover:0.5.0 export FEATURE_FLAGS=mesh-with-agent,single-node,elasticsearch,rover make deploy.kubernetes 部署完成后,请运行以下脚本以打开 SkyWalking UI:http://localhost:8080/。\nkubectl port-forward svc/ui 8080:8080 --namespace default 创建持续剖析策略 目前,持续剖析功能在 Service Mesh 面板的 Service 级别中默认设置。\n图 3: 持续策略选项卡\n通过点击 Policy List 旁边的编辑按钮,可以创建或更新当前服务的策略。\n图 4: 编辑持续剖析策略\n支持多个策略。每个策略都有以下配置。\n Target Type:指定符合阈值决策时要触发的剖析任务的类型。 Items:对于相同目标的剖析任务,可以指定一个或多个验证项目。只要一个验证项目符合阈值决策,就会启动相应的性能分析任务。  Monitor Type:指定要为目标进程执行的监视类型。 Threshold:根据监视类型的不同,需要填写相应的阈值才能完成验证工作。 Period:指定你要监测的最近几秒钟的数据数量。 Count:确定最近时间段内触发的总秒数。 URI 正则表达式/列表:这适用于 HTTP 监控类型,允许 URL 过滤。    完成 单击保存按钮后,你可以看到当前已创建的监控规则,如图 5 所示:\n图 5: 持续剖析监控进程\n数据可以分为以下几个部分:\n 策略列表:在左侧,你可以看到已创建的规则列表。 监测摘要列表:选择规则后,你可以看到哪些 pod 和进程将受到该规则的监视。它还总结了当前 pod 或进程在过去 48 小时内触发的性能分析任务数量,以及最后一个触发时间。该列表还按触发次数降序排列,以便你快速查看。  当你单击特定进程时,将显示一个新的仪表板以列出指标和触发的剖析结果。\n图 6: 持续剖析触发的任务\n当前图包含以下数据内容:\n 任务时间轴:它列出了过去 48 小时的所有剖析任务。当鼠标悬停在任务上时,它还会显示详细信息:  任务的开始和结束时间:它指示当前性能分析任务何时被触发。 触发原因:它会显示为什么会对当前进程进行剖析,并列出当剖析被触发时超过阈值的度量值,以便你快速了解原因。   任务详情:与前几篇文章介绍的 CPU 剖析和网络剖析类似,它会显示当前任务的火焰图或进程拓扑图,具体取决于剖析类型。  同时,在 Metrics 选项卡中,收集与剖析策略相关的指标以检索历史趋势,以便在剖析的触发点提供全面的解释。\n图 7: 持续剖析指标\n结论 在本文中,我详细介绍了 SkyWalking 和 eBPF 中持续剖析功能的工作原理。通常情况下,它涉及将 eBPF Agent 服务部署在要监视的进程所在的同一台计算机上,并以低资源消耗监测目标进程。当它符合阈值条件时,它会启动更复杂的 CPU 剖析和网络剖析任务。\n在未来,我们将提供更多功能。敬请期待!\n Twitter:ASFSkyWalking Slack:向邮件列表 (dev@skywalking.apache.org) 发送“Request to join SkyWalking Slack”,我们会邀请你加入。 订阅我们的 Medium 列表。  ","excerpt":"背景 在之前的文章中,我们讨论了如何使用 SkyWalking 和 eBPF 来检测性能问题,包括进程和网络。这些方法可以很好地定位问题,但仍然存在一些挑战:\n 任务启动的时间: 当需要进行性能监控 …","ref":"/zh/2023-06-25-intruducing-continuous-profiling-skywalking-with-ebpf/","title":"自动化性能分析——持续剖析"},{"body":"SkyWalking 9.5.0 is released. Go to downloads page to find release tars.\nNew Topology Layout Elasticsearch Server Monitoring Project  Fix Duplicate class found due to the delombok goal.  OAP Server  Fix wrong layer of metric user error in DynamoDB monitoring. ElasticSearch storage does not check field types when OAP running in no-init mode. Support to bind TLS status as a part of component for service topology. Fix component ID priority bug. Fix component ID of topology overlap due to storage layer bugs. [Breaking Change] Enhance JDBC storage through merging tables and managing day-based table rolling. [Breaking Change] Sharding-MySQL implementations and tests get removed due to we have the day-based rolling mechanism by default Fix otel k8s-cluster rule add namespace dimension for MAL aggregation calculation(Deployment Status,Deployment Spec Replicas) Support continuous profiling feature. Support collect process level related metrics. Fix K8sRetag reads the wrong k8s service from the cache due to a possible namespace mismatch. [Breaking Change] Support cross-thread trace profiling. The data structure and query APIs are changed. Fix PromQL HTTP API /api/v1/labels response missing service label. Fix possible NPE when initialize IntList. Support parse PromQL expression has empty labels in the braces for metadata query. Support alarm metric OP !=. Support metrics query indicates whether value == 0 represents actually zero or no data. Fix NPE when query the not exist series indexes in ElasticSearch storage. Support collecting memory buff/cache metrics in VM monitoring. PromQL: Remove empty values from the query result, fix /api/v1/metadata param limit could cause out of bound. Support monitoring the total number metrics of k8s StatefulSet and DaemonSet. Support Amazon API Gateway monitoring. Bump up graphql-java to fix cve. Bump up Kubernetes Java client. Support Redis Monitoring. Add component ID for amqp, amqp-producer and amqp-consumer. Support no-proxy mode for aws-firehose receiver Bump up armeria to 1.23.1 Support Elasticsearch Monitoring. Fix PromQL HTTP API /api/v1/series response missing service label when matching metric. Support ServerSide TopN for BanyanDB. Add component ID for Jersey. Remove OpenCensus support, the related codes and docs as it\u0026rsquo;s sunsetting. Support dynamic configuration of searchableTracesTags Support exportErrorStatusTraceOnly for export the error status trace segments through the Kafka channel Add component ID for Grizzly. Fix potential NPE in Zipkin receiver when the Span is missing some fields. Filter out unknown_cluster metric data. Support RabbitMQ Monitoring. Support Redis slow logs collection. Fix data loss when query continuous profiling task record. Adapt the continuous profiling task query GraphQL. Support Metrics Query Expression(MQE) and allows users to do simple query-stage calculation through the expression. Deprecated metrics query v2 protocol. Deprecated record query protocol. Add component ID for go-redis. Add OpenSearch 2.8.0 to test case. Add ai-pipeline module. Support HTTP URI formatting through ai-pipeline to do pattern recognition. Add new HTTP URI grouping engine with benchmark. [Breaking Change] Use the new HTTP URI grouping engine to replace the old regex based mechanism. Support sumLabeled in MAL. Migrate from kubernetes-client/java to fabric8 client. Envoy ALS generated relation metrics considers http status codes \u0026gt;= 400 has an error at the client side. Add cause message field when query continuous profiling task.  UI  Revert: cpm5d function. This feature is cancelled from backend. Fix: alerting link breaks on the topology. Refactor Topology widget to make it more hierarchical.  Choose User as the first node. If User node is absent, choose the busiest node(which has the most calls of all). Do a left-to-right flow process. At the same level, list nodes from top to bottom in alphabetical order.   Fix filter ID when ReadRecords metric associates with trace. Add AWS API Gateway menu. Change trace profiling protocol. Add Redis menu. Optimize data types. Support isEmptyValue flag for metrics query. Add elasticsearch menu. [Clean UI templates before upgrade] Set showSymbol: true, and make the data point shows on the Line graph. Please clean ui_template index in elasticsearch storage or table in JDBC storage. [Clean UI templates before upgrade] UI templates: Simplify metric name with the label. Add MQ menu. Add Jeysey icon. Fix: set endpoint and instance selectors with url parameters correctly. Bump up dependencies versions icons-vue 1.1.4, element-plus 2.1.0, nanoid 3.3.6, postcss 8.4.23 Add OpenTelemetry log protocol support. [Breaking Change] Configuration key enabledOtelRules is renamed to enabledOtelMetricsRules and the corresponding environment variable is renamed to SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES. Add grizzly icon. Fix: the Instance List data display error. Fix: set topN type to Number. Support Metrics Query Expression(MQE) and allows users to do simple query-stage calculation through the expression. Bump up zipkin ui dependency to 2.24.1. Bump up vite to 4.0.5. Apply MQE on General and Virtual-Database layer UI-templates.  Documentation  Add Profiling related documentations. Add SUM_PER_MIN to MAL documentation. Make the log relative docs more clear, and easier for further more formats support. Update the cluster management and advanced deployment docs.  All issues and pull requests are here\n","excerpt":"SkyWalking 9.5.0 is released. Go to downloads page to find release tars.\nNew Topology Layout …","ref":"/events/release-apache-skywalking-apm-9.5.0/","title":"Release Apache SkyWalking APM 9.5.0"},{"body":"Celebrating 22k Stars! The Apache SkyWalking community is thrilled to reach the milestone of 22k stars on GitHub! This showcases its popularity and impact as an APM and observability tool.\nSince launching in 2016 to provide an open source APM solution, SkyWalking has evolved into a full stack observability platform with distributed tracing, metrics monitoring and alerting. It\u0026rsquo;s seeing widespread adoption globally, especially in Asia where APM needs are expanding rapidly.\nThe growing user base has enabled SkyWalking to achieve massive deployments demonstrating its ability to scale to extreme levels. There have been reported deployments collecting over 100TB of data from companies' complex distributed applications, monitoring over 8000 microservices and analyzing 100 billion distributed traces - providing end-to-end visibility, performance monitoring and issue troubleshooting for some of the largest distributed systems in the world.\nThis success and widespread adoption has attracted an active community of nearly 800 contributors, thanks in part to programs like GSoC and OSPP(Open Source Promotion Plan) that bring in university contributors. The SkyWalking team remains focused on building a reliable, performant platform to observe complex distributed systems. We\u0026rsquo;ll continue innovating with features like service mesh monitoring and metric analytics.Your ongoing support, feedback and contributions inspire us!\nThank you for helping SkyWalking reach 22k stars on GitHub! This is just the beginning - we have ambitious plans and can\u0026rsquo;t wait to have you along our journey!\n","excerpt":"Celebrating 22k Stars! The Apache SkyWalking community is thrilled to reach the milestone of 22k …","ref":"/blog/2023-06-13-celebrate-22k-stars/","title":"Celebrate 22k stars"},{"body":"本文演示如何将 Dubbo-Go 应用程序与 SkyWalking Go 集成,并在 SkyWalking UI 中查看结果。\n以前,如果你想要在 SkyWalking 中监控 Golang 应用程序,需要将项目与 go2sky 项目集成,并手动编写各种带有 go2sky 插件的框架。现在,我们有一个全新的项目( Skywalking Go ),允许你将 Golang 项目集成到 SkyWalking 中,几乎不需要编码,同时提供更大的灵活性和可扩展性。\n在本文中,我们将指导你快速将 skywalking-go 项目集成到 dubbo-go 项目中。\n演示包括以下步骤:\n 部署 SkyWalking:这涉及设置 SkyWalking 后端和 UI 程序,使你能够看到最终效果。 使用 SkyWalking Go 编译程序:在这里,你将把 SkyWalking Go Agent 编译到要监控的 Golang 程序中。 应用部署:你将导出环境变量并部署应用程序,以促进你的服务与 SkyWalking 后端之间的通信。 在 SkyWalking UI 上可视化:最后,你将发送请求并在 SkyWalking UI 中观察效果。  部署 SkyWalking 请从官方 SkyWalking 网站下载 SkyWalking APM 程序 。然后执行以下两个命令来启动服务:\n# 启动 OAP 后端 \u0026gt; bin/oapService.sh # 启动 UI \u0026gt; bin/webappService.sh 接下来,你可以访问地址 http://localhost:8080/ 。此时,由于尚未部署任何应用程序,因此你将看不到任何数据。\n使用 SkyWalking GO 编译 Dubbo Go 程序 这里将演示如何将 Dubbo-go 程序与SkyWalking Go Agent集成。请依次执行如下命令来创建一个新的项目:\n# 安装dubbo-go基础环境 \u0026gt; export GOPROXY=\u0026#34;https://goproxy.cn\u0026#34; \u0026gt; go install github.com/dubbogo/dubbogo-cli@latest \u0026gt; dubbogo-cli install all # 创建demo项目 \u0026gt; mkdir demo \u0026amp;\u0026amp; cd demo \u0026gt; dubbogo-cli newDemo . # 升级dubbo-go依赖到最新版本 \u0026gt; go get -u dubbo.apache.org/dubbo-go/v3 在项目的根目录中执行以下命令。此命令将下载 skywalking-go 所需的依赖项:\ngo get github.com/apache/skywalking-go 接下来,请分别在服务端和客户端的main包中引入。包含之后,代码将会更新为:\n// go-server/cmd/server.go package main import ( \u0026#34;context\u0026#34; ) import ( \u0026#34;dubbo.apache.org/dubbo-go/v3/common/logger\u0026#34; \u0026#34;dubbo.apache.org/dubbo-go/v3/config\u0026#34; _ \u0026#34;dubbo.apache.org/dubbo-go/v3/imports\u0026#34; \u0026#34;helloworld/api\u0026#34; // 引入skywalking-go \t_ \u0026#34;github.com/apache/skywalking-go\u0026#34; ) type GreeterProvider struct { api.UnimplementedGreeterServer } func (s *GreeterProvider) SayHello(ctx context.Context, in *api.HelloRequest) (*api.User, error) { logger.Infof(\u0026#34;Dubbo3 GreeterProvider get user name = %s\\n\u0026#34;, in.Name) return \u0026amp;api.User{Name: \u0026#34;Hello \u0026#34; + in.Name, Id: \u0026#34;12345\u0026#34;, Age: 21}, nil } // export DUBBO_GO_CONFIG_PATH= PATH_TO_SAMPLES/helloworld/go-server/conf/dubbogo.yaml func main() { config.SetProviderService(\u0026amp;GreeterProvider{}) if err := config.Load(); err != nil { panic(err) } select {} } 在客户端代码中除了需要引入skywalking-go之外,还需要在main方法中的最后一行增加主携程等待语句,以防止因为客户端快速关闭而无法将Tracing数据异步发送到SkyWalking后端:\npackage main import ( \u0026#34;context\u0026#34; ) import ( \u0026#34;dubbo.apache.org/dubbo-go/v3/common/logger\u0026#34; \u0026#34;dubbo.apache.org/dubbo-go/v3/config\u0026#34; _ \u0026#34;dubbo.apache.org/dubbo-go/v3/imports\u0026#34; \u0026#34;helloworld/api\u0026#34; // 引入skywalking-go \t_ \u0026#34;github.com/apache/skywalking-go\u0026#34; ) var grpcGreeterImpl = new(api.GreeterClientImpl) // export DUBBO_GO_CONFIG_PATH= PATH_TO_SAMPLES/helloworld/go-client/conf/dubbogo.yaml func main() { config.SetConsumerService(grpcGreeterImpl) if err := config.Load(); err != nil { panic(err) } logger.Info(\u0026#34;start to test dubbo\u0026#34;) req := \u0026amp;api.HelloRequest{ Name: \u0026#34;laurence\u0026#34;, } reply, err := grpcGreeterImpl.SayHello(context.Background(), req) if err != nil { logger.Error(err) } logger.Infof(\u0026#34;client response result: %v\\n\u0026#34;, reply) // 增加主携程等待语句 \tselect {} } 接下来,请从官方 SkyWalking 网站下载 Go Agent 程序 。当你使用 go build 命令进行编译时,请在 bin 目录中找到与当前操作系统匹配的代理程序,并添加 -toolexec=\u0026quot;/path/to/go-agent -a 参数。例如,请使用以下命令:\n# 进入项目主目录 \u0026gt; cd demo # 分别编译服务端和客户端 # -toolexec 参数定义为go-agent的路径 # -a 参数用于强制重新编译所有依赖项 \u0026gt; cd go-server \u0026amp;\u0026amp; go build -toolexec=\u0026#34;/path/to/go-agent\u0026#34; -a -o go-server cmd/server.go \u0026amp;\u0026amp; cd .. \u0026gt; cd go-client \u0026amp;\u0026amp; go build -toolexec=\u0026#34;/path/to/go-agent\u0026#34; -a -o go-client cmd/client.go \u0026amp;\u0026amp; cd .. 应用部署 在开始部署应用程序之前,你可以通过环境变量更改 SkyWalking 中当前应用程序的服务名称。你还可以更改其配置,例如服务器端的地址。有关详细信息,请参阅文档 。\n在这里,我们分别启动两个终端窗口来分别启动服务端和客户端。\n在服务端,将服务的名称更改为dubbo-server:\n# 导出dubbo-go服务端配置文件路径 export DUBBO_GO_CONFIG_PATH=/path/to/demo/go-server/conf/dubbogo.yaml # 导出skywalking-go的服务名称 export SW_AGENT_NAME=dubbo-server ./go-server/go-server 在客户端,将服务的名称更改为dubbo-client:\n# 导出dubbo-go客户端配置文件路径 export DUBBO_GO_CONFIG_PATH=/path/to/demo/go-client/conf/dubbogo.yaml # 导出skywalking-go的服务名称 export SW_AGENT_NAME=dubbo-client ./go-client/go-client 在 SkyWalking UI 上可视化 现在,由于客户端会自动像服务器端发送请求,现在就可以在 SkyWalking UI 中观察结果。\n几秒钟后,重新访问 http://localhost:8080 的 SkyWalking UI。能够在主页上看到部署的 dubbo-server 和 dubbo-client 服务。\n此外,在追踪页面上,可以看到刚刚发送的请求。\n并可以在拓扑图页面中看到服务之间的关系。\n总结 在本文中,我们指导你快速开发dubbo-go服务,并将其与 SkyWalking Go Agent 集成。这个过程也适用于你自己的任意 Golang 服务。最终,可以在 SkyWalking 服务中查看显示效果。如果你有兴趣了解 SkyWalking Go 代理当前支持的框架,请参阅此文档 。\n将来,我们将继续扩展 SkyWalking Go 的功能,添加更多插件支持。所以,请继续关注!\n","excerpt":"本文演示如何将 Dubbo-Go 应用程序与 SkyWalking Go 集成,并在 SkyWalking UI 中查看结果。\n以前,如果你想要在 SkyWalking 中监控 Golang 应用程 …","ref":"/zh/2023-06-05-quick-start-using-skywalking-go-monitoring-dubbo-go/","title":"使用SkyWalking go agent快速实现Dubbo Go监控"},{"body":"SkyWalking Go 0.1.0 is released. Go to downloads page to find release tars.\nFeatures  Initialize the agent core and user import library. Support gRPC reporter for management, tracing protocols. Automatic detect the log frameworks and inject the log context.  Plugins  Support Gin framework. Support Native HTTP server and client framework. Support Go Restful v3 framework. Support Dubbo server and client framework. Support Kratos v2 server and client framework. Support Go-Micro v4 server and client framework. Support GORM v2 database client framework.  Support MySQL Driver detection.    Documentation  Initialize the documentation.  Issues and PR  All issues are here All and pull requests are here  ","excerpt":"SkyWalking Go 0.1.0 is released. Go to downloads page to find release tars.\nFeatures  Initialize the …","ref":"/events/release-apache-skwaylking-go-0.1.0/","title":"Release Apache SkyWalking Go 0.1.0"},{"body":"SkyWalking Java Agent 8.16.0 is released. Go to downloads page to find release tars. Changes by Version\n8.16.0  Exclude synthetic methods for the WitnessMethod mechanism Support ForkJoinPool trace Support clickhouse-jdbc-plugin trace sql parameters Support monitor jetty server work thread pool metric Support Jersey REST framework Fix ClassCastException when SQLServer inserts data [Chore] Exclude org.checkerframework:checker-qual and com.google.j2objc:j2objc-annotations [Chore] Exclude proto files in the generated jar Fix Jedis-2.x plugin can not get host info in jedis 3.3.x+ Change the classloader to locate the agent path in AgentPackagePath, from SystemClassLoader to AgentPackagePath\u0026rsquo;s loader. Support Grizzly Trace Fix possible IllegalStateException when using Micrometer. Support Grizzly Work ThreadPool Metric Monitor Fix the gson dependency in the kafka-reporter-plugin. Fix deserialization of kafka producer json config in the kafka-reporter-plugin. Support to config custom decode methods for kafka configurations  All issues and pull requests are here\n","excerpt":"SkyWalking Java Agent 8.16.0 is released. Go to downloads page to find release tars. Changes by …","ref":"/events/release-apache-skywalking-java-agent-8-16-0/","title":"Release Apache SkyWalking Java Agent 8.16.0"},{"body":"Background Previously, if you wanted to monitor a Golang application in SkyWalking, you would integrate your project with the go2sky project and manually write various frameworks with go2sky plugins. Now, we have a brand-new project (Skywalking Go) that allows you to integrate your Golang projects into SkyWalking with almost zero coding, while offering greater flexibility and scalability.\nIn this article, we will guide you quickly integrating the skywalking-go project into your Golang project.\nQuick start This demonstration will consist of the following steps:\n Deploy SkyWalking: This involves setting up the SkyWalking backend and UI programs, enabling you to see the final effect. Compile Golang with SkyWalking Go: Here, you\u0026rsquo;ll compile the SkyWalking Go Agent into the Golang program you wish to monitor. Application Deployment: You\u0026rsquo;ll export environment variables and deploy the application to facilitate communication between your service and the SkyWalking backend. Visualization on SkyWalking UI: Finally, you\u0026rsquo;ll send requests and observe the effects within the SkyWalking UI.  Deploy SkyWalking Please download the SkyWalking APM program from the official SkyWalking website. Then execute the following two commands to start the service:\n# startup the OAP backend \u0026gt; bin/oapService.sh # startup the UI \u0026gt; bin/webappService.sh Next, you can access the address at http://localhost:8080/. At this point, as no applications have been deployed yet, you will not see any data.\nCompile Golang with SkyWalking GO Here is a simple business application here that starts an HTTP service.\npackage main import \u0026#34;net/http\u0026#34; func main() { http.HandleFunc(\u0026#34;/hello\u0026#34;, func(writer http.ResponseWriter, request *http.Request) { writer.Write([]byte(\u0026#34;Hello World\u0026#34;)) }) err := http.ListenAndServe(\u0026#34;:8000\u0026#34;, nil) if err != nil { panic(err) } } Execute the following command in the project\u0026rsquo;s root directory. This command will download the dependencies required for skywalking-go:\ngo get github.com/apache/skywalking-go Also, include it in the main package of the project. After the inclusion, the code will update to:\npackage main import ( \u0026#34;net/http\u0026#34; // This is an important step. DON\u0026#39;T MISS IT. \t_ \u0026#34;github.com/apache/skywalking-go\u0026#34; ) func main() { http.HandleFunc(\u0026#34;/hello\u0026#34;, func(writer http.ResponseWriter, request *http.Request) { writer.Write([]byte(\u0026#34;Hello World\u0026#34;)) }) err := http.ListenAndServe(\u0026#34;:8000\u0026#34;, nil) if err != nil { panic(err) } } Next, please download the Go Agent program from the official SkyWalking website. When you compile with the go build command, find the agent program that matches your current operating system in the bin directory, and add the -toolexec=\u0026quot;/path/to/go-agent -a parameter. For example, use the following command:\n# Build application with SkyWalking go agent # -toolexec parameter define the path of go-agent # -a parameter is used to force rebuild all packages \u0026gt; go build -toolexec=\u0026#34;/path/to/go-agent\u0026#34; -a -o test . Application Deployment Before you start to deploy the application, you can change the service name of the current application in SkyWalking through environment variables. You can also change its configuration such as the address with the server-side. For specific details, please refer to the documentation.\nHere, we\u0026rsquo;re just changing the name of the current service to demo.\n# Change the service name \u0026gt; export SW_AGENT_NAME=demo Next, you can start the application:\n# Start the application \u0026gt; ./test Visualization on SkyWalking UI Now, you can send a request to the application and observe the results in the SkyWalking UI.\n# Send a request \u0026gt; curl http://localhost:8000/hello After a few seconds, you can revisit the SkyWalking UI at http://localhost:8080. You will be able to see the demo service you deployed on the homepage.\nMoreover, on the Trace page, you can see the request you just sent.\nConclusion In this article, we\u0026rsquo;ve guided you to quickly develop a demo service and integrate it with SkyWalking Go Agent. This process is also applicable to your own Golang services. Ultimately, you can view the display effect in the SkyWalking service. If you\u0026rsquo;re interested in learning which frameworks the SkyWalking Go agent currently supports, please refer to this documentation.\nIn the future, we will continue to expand the functionality of SkyWalking Go, adding more plugin support. So, stay tuned!\n","excerpt":"Background Previously, if you wanted to monitor a Golang application in SkyWalking, you would …","ref":"/blog/2023-06-01-quick-start-with-skywalking-go-agent/","title":"Quick start with SkyWalking Go Agent"},{"body":"本文演示如何将应用程序与 SkyWalking Go 集成,并在 SkyWalking UI 中查看结果。\n以前,如果你想要在 SkyWalking 中监控 Golang 应用程序,需要将项目与 go2sky 项目集成,并手动编写各种带有 go2sky 插件的框架。现在,我们有一个全新的项目(Skywalking Go ),允许你将 Golang 项目集成到 SkyWalking 中,几乎不需要编码,同时提供更大的灵活性和可扩展性。\n在本文中,我们将指导你快速将 skywalking-go 项目集成到 Golang 项目中。\n演示包括以下步骤:\n 部署 SkyWalking:这涉及设置 SkyWalking 后端和 UI 程序,使你能够看到最终效果。 使用 SkyWalking Go 编译 Golang:在这里,你将把 SkyWalking Go Agent 编译到要监控的 Golang 程序中。 应用部署:你将导出环境变量并部署应用程序,以促进你的服务与 SkyWalking 后端之间的通信。 在 SkyWalking UI 上可视化:最后,你将发送请求并在 SkyWalking UI 中观察效果。  部署 SkyWalking 请从官方 SkyWalking 网站下载 SkyWalking APM 程序 。然后执行以下两个命令来启动服务:\n# 启动 OAP 后端 \u0026gt; bin/oapService.sh # 启动 UI \u0026gt; bin/webappService.sh 接下来,你可以访问地址 http://localhost:8080/ 。此时,由于尚未部署任何应用程序,因此你将看不到任何数据。\n使用 SkyWalking GO 编译 Golang 这里有一个简单的业务应用程序,启动了一个 HTTP 服务。\npackage main import \u0026#34;net/http\u0026#34; func main() { http.HandleFunc(\u0026#34;/hello\u0026#34;, func(writer http.ResponseWriter, request *http.Request) { writer.Write([]byte(\u0026#34;Hello World\u0026#34;)) }) err := http.ListenAndServe(\u0026#34;:8000\u0026#34;, nil) if err != nil { panic(err) } } 在项目的根目录中执行以下命令。此命令将下载 skywalking-go 所需的依赖项:\ngo get github.com/apache/skywalking-go 接下来,请将其包含在项目的 main 包中。包含之后,代码将会更新为:\npackage main import ( \u0026#34;net/http\u0026#34; _ \u0026#34;github.com/apache/skywalking-go\u0026#34; ) func main() { http.HandleFunc(\u0026#34;/hello\u0026#34;, func(writer http.ResponseWriter, request *http.Request) { writer.Write([]byte(\u0026#34;Hello World\u0026#34;)) }) err := http.ListenAndServe(\u0026#34;:8000\u0026#34;, nil) if err != nil { panic(err) } } 接下来,请从官方 SkyWalking 网站下载 Go Agent 程序 。当你使用 go build 命令进行编译时,请在 bin 目录中找到与当前操作系统匹配的代理程序,并添加 -toolexec=\u0026quot;/path/to/go-agent\u0026quot; -a 参数。例如,请使用以下命令:\ngo build -toolexec=\u0026#34;/path/to/go-agent\u0026#34; -a -o test . 应用部署 在开始部署应用程序之前,你可以通过环境变量更改 SkyWalking 中当前应用程序的服务名称。你还可以更改其配置,例如服务器端的地址。有关详细信息,请参阅文档 。\n在这里,我们只是将当前服务的名称更改为 demo。\n接下来,你可以启动应用程序:\nexport SW_AGENT_NAME=demo ./test 在 SkyWalking UI 上可视化 现在,向应用程序发送请求并在 SkyWalking UI 中观察结果。\n几秒钟后,重新访问 http://localhost:8080 的 SkyWalking UI。能够在主页上看到部署的 demo 服务。\n此外,在追踪页面上,可以看到刚刚发送的请求。\n总结 在本文中,我们指导你快速开发 demo 服务,并将其与 SkyWalking Go Agent 集成。这个过程也适用于你自己的 Golang 服务。最终,可以在 SkyWalking 服务中查看显示效果。如果你有兴趣了解 SkyWalking Go 代理当前支持的框架,请参阅此文档 。\n将来,我们将继续扩展 SkyWalking Go 的功能,添加更多插件支持。所以,请继续关注!\n","excerpt":"本文演示如何将应用程序与 SkyWalking Go 集成,并在 SkyWalking UI 中查看结果。\n以前,如果你想要在 SkyWalking 中监控 Golang 应用程序, …","ref":"/zh/2023-06-01-quick-start-with-skywalking-go-agent/","title":"SkyWalking Go Agent 快速开始指南"},{"body":"SkyWalking Rust 0.7.0 is released. Go to downloads page to find release tars.\nWhat\u0026rsquo;s Changed  Obtain Span object without intermediary. by @jmjoy in https://github.com/apache/skywalking-rust/pull/57 Rename module skywalking_proto to proto. by @jmjoy in https://github.com/apache/skywalking-rust/pull/59 Add Span::prepare_for_async method and AbstractSpan trait. by @jmjoy in https://github.com/apache/skywalking-rust/pull/58 Bump to 0.7.0. by @jmjoy in https://github.com/apache/skywalking-rust/pull/60  ","excerpt":"SkyWalking Rust 0.7.0 is released. Go to downloads page to find release tars.\nWhat\u0026rsquo;s Changed …","ref":"/events/release-apache-skywalking-rust-0-7-0/","title":"Release Apache SkyWalking Rust 0.7.0"},{"body":"SkyWalking PHP 0.5.0 is released. Go to downloads page to find release tars.\nWhat\u0026rsquo;s Changed  Bump openssl from 0.10.45 to 0.10.48 by @dependabot in https://github.com/apache/skywalking-php/pull/60 Make the SKYWALKING_AGENT_ENABLE work in the request hook as well. by @jmjoy in https://github.com/apache/skywalking-php/pull/61 Support tracing curl_multi_* api. by @jmjoy in https://github.com/apache/skywalking-php/pull/62 Fix parent endpoint and peer in segment ref and tag url in entry span. by @jmjoy in https://github.com/apache/skywalking-php/pull/63 Bump h2 from 0.3.15 to 0.3.17 by @dependabot in https://github.com/apache/skywalking-php/pull/65 Add amqplib plugin for producer. by @jmjoy in https://github.com/apache/skywalking-php/pull/64 Upgrade and adapt phper. by @jmjoy in https://github.com/apache/skywalking-php/pull/66 Refactor script create_package_xml. by @jmjoy in https://github.com/apache/skywalking-php/pull/67 Refactor predis plugin to hook Client. by @jmjoy in https://github.com/apache/skywalking-php/pull/68 Canonicalize unknown. by @jmjoy in https://github.com/apache/skywalking-php/pull/69 Bump guzzlehttp/psr7 from 2.4.0 to 2.5.0 in /tests/php by @dependabot in https://github.com/apache/skywalking-php/pull/70 Enhance support for Swoole. by @jmjoy in https://github.com/apache/skywalking-php/pull/71 Bump to 0.5.0. by @jmjoy in https://github.com/apache/skywalking-php/pull/72  Full Changelog: https://github.com/apache/skywalking-php/compare/v0.4.0...v0.5.0\nPECL https://pecl.php.net/package/skywalking_agent/0.5.0\n","excerpt":"SkyWalking PHP 0.5.0 is released. Go to downloads page to find release tars.\nWhat\u0026rsquo;s Changed …","ref":"/events/release-apache-skwaylking-php-0-5-0/","title":"Release Apache SkyWalking PHP 0.5.0"},{"body":"SkyWalking Python 1.0.1 is released! Go to downloads page to find release tars.\nPyPI Wheel: https://pypi.org/project/apache-skywalking/1.0.1/\nDockerHub Image: https://hub.docker.com/r/apache/skywalking-python\n  Upgrading from v1.0.0 to v1.0.1 is strongly encouraged\n This is a critical performance-oriented patch to address a CPU surge reported in https://github.com/apache/skywalking/issues/10672    Feature:\n Add a new workflow to push docker images for arm64 and amd64 (#297)    Plugins:\n Optimize loguru reporter plugin.(#302)    Fixes:\n Fix sw8 loss when use aiohttp (#299, issue#10669) Critical: Fix a bug that leads to high cpu usage (#300, issue#10672)    Others:\n Use Kraft mode in E2E Kafka reporter tests (#303)    New Contributors  @Forstwith made their first contribution in https://github.com/apache/skywalking-python/pull/299 @FAWC438 made their first contribution in https://github.com/apache/skywalking-python/pull/300  Full Changelog: https://github.com/apache/skywalking-python/compare/v1.0.0...v1.0.1\n","excerpt":"SkyWalking Python 1.0.1 is released! Go to downloads page to find release tars.\nPyPI Wheel: …","ref":"/events/release-apache-skywalking-python-1-0-1/","title":"Release Apache SkyWalking Python 1.0.1"},{"body":"本次活动于 2023 年 4 月 22 日在北京奥加美术馆酒店举行。该会议旨在探讨和分享有关可观测性的最佳实践, 包括在云原生应用程序和基础架构中实现可观测性的最新技术和工具。与会者将有机会了解行业领袖的最新见解,并与同行们分享经验和知识。 我们期待这次会议能够给云原生社区带来更多的启发和动力,推动我们在可观测性方面的进一步发展。\n圆桌讨论:云原生应用可观测性现状及趋势 B站视频地址\n嘉宾\n 罗广明,主持人 吴晟,Tetrate 创始工程师 向阳,云杉科技研发 VP 乔新亮,原苏宁科技副总裁,现彩食鲜 CTO 董江,中国移动云能力中心高级系统架构专家  为 Apache SkyWalking 构建 Grafana dashboards \u0026ndash; 基于对原生 PromQL 的支持 B站视频地址\n万凯,Tetrate\n  讲师介绍 万凯,Tetrate 工程师,Apache SkyWalking PMC 成员,专注于应用性能可观测性领域。\n  议题概要 本次分享将介绍 Apache SkyWalking 的新特性 PromQL Service,它将为 SkyWalking 带来更广泛的生态集成能力: 什么是 PromQL SkyWalking 的 PromQL Service 是什么,能够做什么 SkyWalking 中的基本概念和 metrics 的特性 如何使用 PromQL Service 使用 PromQL Service 构建 Grafana dashboards 的实践\n  ","excerpt":"本次活动于 2023 年 4 月 22 日在北京奥加美术馆酒店举行。该会议旨在探讨和分享有关可观测性的最佳实践, 包括在云原生应用程序和基础架构中实现可观测性的最新技术和工具。与会者将有机会了解行业领 …","ref":"/zh/2023-04-23-obs-summit-china/","title":"[视频] 可观测性峰会2023 - Observability Summit"},{"body":"SkyWalking Client JS 0.10.0 is released. Go to downloads page to find release tars.\n Fix the ability of Fetch constructure. Update README. Bump up dependencies.  ","excerpt":"SkyWalking Client JS 0.10.0 is released. Go to downloads page to find release tars.\n Fix the ability …","ref":"/events/release-apache-skywalking-client-js-0-10-0/","title":"Release Apache SkyWalking Client JS 0.10.0"},{"body":"SkyWalking Java Agent 8.15.0 is released. Go to downloads page to find release tars. Changes by Version\n8.15.0  Enhance lettuce plugin to adopt uniform tags. Expose complete Tracing APIs in the tracing toolkit. Add plugin to trace Spring 6 and Resttemplate 6. Move the baseline to JDK 17 for development, the runtime baseline is still Java 8 compatible. Remove Powermock entirely from the test cases. Fix H2 instrumentation point Refactor pipeline in jedis-plugin. Add plugin to support ClickHouse JDBC driver (0.3.2.*). Refactor kotlin coroutine plugin with CoroutineContext. Fix OracleURLParser ignoring actual port when :SID is absent. Change gRPC instrumentation point to fix plugin not working for server side. Fix servicecomb plugin trace break. Adapt Armeria\u0026rsquo;s plugins to the latest version 1.22.x Fix tomcat-10x-plugin and add test case to support tomcat7.x-8.x-9.x. Fix thrift plugin generate duplicate traceid when sendBase error occurs Support keep trace profiling when cross-thread. Fix unexpected whitespace of the command catalogs in several Redis plugins. Fix a thread leak in SamplingService when updated sampling policy in the runtime. Support MySQL plugin tracing SQL parameters when useServerPrepStmts Update the endpoint name of Undertow plugin to Method:Path. Build a dummy(empty) javadoc of finagle and jdk-http plugins due to incompatibility.  Documentation  Update docs of Tracing APIs, reorganize the API docs into six parts. Correct missing package name in native manual API docs. Add a FAQ doc about \u0026ldquo;How to make SkyWalking agent works in OSGI environment?\u0026rdquo;  All issues and pull requests are here\n","excerpt":"SkyWalking Java Agent 8.15.0 is released. Go to downloads page to find release tars. Changes by …","ref":"/events/release-apache-skywalking-java-agent-8-15-0/","title":"Release Apache SkyWalking Java Agent 8.15.0"},{"body":"SkyWalking PHP 0.4.0 is released. Go to downloads page to find release tars.\nWhat\u0026rsquo;s Changed  Bump tokio from 1.24.1 to 1.24.2 by @dependabot in https://github.com/apache/skywalking-php/pull/52 Bump to 0.4.0-dev by @heyanlong in https://github.com/apache/skywalking-php/pull/53 Avoid potential panic for logger. by @jmjoy in https://github.com/apache/skywalking-php/pull/54 Fix the curl plugin hook curl_setopt by mistake. by @jmjoy in https://github.com/apache/skywalking-php/pull/55 Update documents. by @jmjoy in https://github.com/apache/skywalking-php/pull/56 Upgrade dependencies and adapt the codes. by @jmjoy in https://github.com/apache/skywalking-php/pull/57 Add sub components licenses in dist material. by @jmjoy in https://github.com/apache/skywalking-php/pull/58 Bump to 0.4.0. by @jmjoy in https://github.com/apache/skywalking-php/pull/59  New Contributors  @dependabot made their first contribution in https://github.com/apache/skywalking-php/pull/52  Full Changelog: https://github.com/apache/skywalking-php/compare/v0.3.0...v0.4.0\nPECL https://pecl.php.net/package/skywalking_agent/0.4.0\n","excerpt":"SkyWalking PHP 0.4.0 is released. Go to downloads page to find release tars.\nWhat\u0026rsquo;s Changed …","ref":"/events/release-apache-skwaylking-php-0-4-0/","title":"Release Apache SkyWalking PHP 0.4.0"},{"body":"Background As an application performance monitoring tool for distributed systems, Apache SkyWalking provides monitoring, tracing, diagnosing capabilities for distributed system in Cloud Native architecture. Prometheus is an open-source systems monitoring and alerting toolkit with an active ecosystem. Especially Prometheus metrics receive widespread support through exporters and integrations. PromQL as Prometheus Querying Language containing a set of expressions and expose HTTP APIs to read metrics.\nSkyWalking supports to ingest Prometheus metrics through OpenTelemetry collector and through the aggregate calculation of these metrics to provide a variety of systems monitoring, such as Linux Monitoring and Kubernetes monitoring. SkyWalking already provides native UI and GraphQL API for users. But as designed to provide wider ecological integration capabilities, since 9.4.0, it provides PromQL Service, the third-party systems or visualization platforms that already support PromQL (such as Grafana), could obtain metrics through it. SkyWalking users will benefit from it when they integrate with different systems.\nWhat is PromQL Service in SkyWalking? PromQL Service is a query engine on the top of SkyWalking native GraphQL query, with additional query stage calculation capabilities powered by Prometheus expressions. It can accept PromQL HTTP API requests, parse Prometheus expressions, and transform between Prometheus metrics and SkyWalking metrics.\nThe PromQL Service follows all PromQL\u0026rsquo;s protocols and grammar and users can use it as they would with PromQL. As SkyWalking is fundamentally different from Prometheus in terms of metric classification, format, storage, etc. PromQL Service doesn\u0026rsquo;t have to implement the full PromQL feature. Refer to the documentation for the detail.\nSkyWalking Basic Concepts Here are some basic concepts and differences from Prometheus that users need to understand in order to use the PromQL service: Prometheus metrics specify the naming format and structure, the actual metric names and labels are determined by the client provider, and the details are stored. The user aggregates and calculates the metrics using the expression in PromQL. Unlike Prometheus, SkyWalking\u0026rsquo;s metric mechanism is built around the following core concepts with a hierarchical structure:\n Layer: represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), Kubernetes(k8s layer). This layer would be the owner of different services detected from different technologies. All Layers definitions can be found here. Service: Represents a set/group of workloads which provides the same behaviors for incoming requests. Service Instance: An individual workload in the Service group. Endpoint: A path in a service for incoming requests. Process: An operating system process. In some scenarios, a service instance is not a process, such as a pod Kubernetes could contain multiple processes.  The metric name and properties (labels) are configured by the SkyWalking OAP server based on the data source as well as OAL and MAL. SkyWalking provides the ability to down-sampling time series metrics, and generate different time bucket data (minute, hour, day).\nThe SkyWalking metric stream is as follows:\nTraffic  The metadata of the Service/ServiceRelation/Instance/ServiceInstanceRelation/Endpoint/EndpointRelation/Process/ProcessRelation. Include names, layers, properties, relations between them, etc.  Metric  Name: metric name, configuration from OAL and MAL. Entity: represents the metrics' belonging and used for the query. An Entity will contain the following information depending on the Scope: Scope represents the metrics level and in query stage represents the Scope catalog, Scope catalog provides high-dimension classifications for all scopes as a hierarchy structure.     Scope Entity Info     Service Service(include layer info)   ServiceInstance Service, ServiceInstance   Endpoint Service, Endpoint   ServiceRelation Service, DestService   ServiceInstanceRelation ServiceInstance, DestServiceInstance   EndpointRelation Endpoint, DestEndpoint   Process Service, ServiceInstance, Process   ProcessRelation Process, ServiceInstance, DestProcess     Value:   single value: long. labeled value: text, label1,value1|label2,value2|..., such as L2 aggregation,5000 | L1 aggregation,8000.   TimeBucket: the time is accurate to minute, hour, day.  How to use PromQL Service Setup PromQL Service is enabled by default after v9.4.0, so no additional configuration is required. The default ports, for example, can be configured by using OAP environment variables:\nrestHost: ${SW_PROMQL_REST_HOST:0.0.0.0} restPort: ${SW_PROMQL_REST_PORT:9090} restContextPath: ${SW_PROMQL_REST_CONTEXT_PATH:/} restMaxThreads: ${SW_PROMQL_REST_MAX_THREADS:200} restIdleTimeOut: ${SW_PROMQL_REST_IDLE_TIMEOUT:30000} restAcceptQueueSize: ${SW_PROMQL_REST_QUEUE_SIZE:0} Use Prometheus expression PromQL matches metric through the Prometheus expression. Here is a typical Prometheus metric.\nTo match the metric, the Prometheus expression is as follows:\nIn the PromQL Service, these reserved labels would be parsed as the metric name and entity info fields with other labels for the query. The mappings are as follows.\n   SkyWalking Concepts Prometheus expression     Metric name Metric name   Layer Label   Service Label   ServiceInstance Label\u0026lt;service_instance\u0026gt;   Endpoint Label   \u0026hellip; \u0026hellip;    For example, the following expressions are used to match query metrics: service_cpm, service_instance_cpm, endpoint_cpm\nservice_cpm{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;} service_instance_cpm{service=\u0026#39;agent::songs\u0026#39;, service_instance=\u0026#39;agent::songs_instance_1\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;} endpoint_cpm{service=\u0026#39;agent::songs\u0026#39;, endpoint=\u0026#39;GET:/songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;} Typical Query Example At here, we take the SkyWalking Showcase deployment as the playground to demonstrate how to use PromQL for SkyWalking metrics.\nThe following examples can be used to query the metadata and metrics of services through PromQL Service.\nGet metrics names Query:\nhttp://localhost:9099/api/v1/label/__name__/values Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;meter_mysql_instance_qps\u0026#34;, \u0026#34;service_cpm\u0026#34;, \u0026#34;envoy_cluster_up_rq_active\u0026#34;, \u0026#34;instance_jvm_class_loaded_class_count\u0026#34;, \u0026#34;k8s_cluster_memory_requests\u0026#34;, \u0026#34;meter_vm_memory_used\u0026#34;, \u0026#34;meter_apisix_sv_bandwidth_unmatched\u0026#34;, \u0026#34;meter_vm_memory_total\u0026#34;, ... ] } Select a metric and get the labels Query:\nhttp://localhost:9099/api/v1/labels?match[]=service_cpm Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;layer\u0026#34;, \u0026#34;service\u0026#34;, \u0026#34;top_n\u0026#34;, \u0026#34;order\u0026#34; ] } Get services from a specific layer Query:\nhttp://127.0.0.1:9099/api/v1/series?match[]=service_traffic{layer=\u0026#39;GENERAL\u0026#39;}\u0026amp;start=1677479336\u0026amp;end=1677479636 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::recommendation\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::app\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::gateway\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::frontend\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; } ] } Query specific metric for a service Query:\nhttp://127.0.0.1:9099/api/v1/query?query=service_cpm{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;} Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1679559960, \u0026#34;6\u0026#34; ] } ] } } About the range query and different metrics type for query can refer to the document here.\nBuild Grafana Dashboard From the above, we know the mechanism and how to query from PromQL Service, now we can build the Grafana Dashboard for the above service example. Note: All the following configurations are based on Grafana version 9.1.0.\nSkyWalking Showcase provides dashboards files such as services of General and Service Mesh layers, we can quickly create a dashboard for the General layer service by importing the dashboard JSON file.\nAfter the Grafana application is deployed, follow the steps below:\nConfigure Data Source First, we need to create a data source: In the data source config panel, chose Prometheus and set the URL to the OAP server address, the default port is 9090. Here set the data source name SkyWalking in case there are multiple Prometheus data sources.\nImport Dashboard File   Create a dashboard folder named SkyWalking.\n  Import the dashboard file into Grafana, there are two ways to get the file:\n From SkyWalking Showcase. Go to SkyWaking Demo: Preview metrics on Grafana, and export it from the General Service dashboard.    Done! Now we can see the dashboard is working, the services are in the drop-down list and the metrics are displayed on the panels.\n  This is an easy way to build, but we need to know how it works if we want to customize it.\nHow the dashboard works Dashboard Settings Open the Settings-Variables we can see the following variables:\nLet\u0026rsquo;s look at what each variable does:\n  $DS_SkyWalking\nThis is a data source ty variable that specifies the Prometheus data source which was defined earlier as SkyWalking.\n  $layer\nThis is a constant type because in the \u0026lsquo;General Service\u0026rsquo; dashboard, all services belong to the \u0026lsquo;GENERAL\u0026rsquo; layer, so they can be used directly in each query Note When you customize other layers, this value must be defined in the Layer mentioned above.\n  $service\nQuery type variable, to get all service names under this layer for the drop-down list.\nQuery expression:\nlabel_values(service_traffic{layer=\u0026#39;$layer\u0026#39;}, service) The query expression will query HTTP API /api/v1/series for service metadata in $layer and fetch the service name according to the label(service).\n  $service_instance\nSame as the $service is a query variable that is used to select all instances of the service in the drop-down list.\nQuery expression:\nlabel_values(instance_traffic{layer=\u0026#39;$layer\u0026#39;, service=\u0026#39;$service\u0026#39;}, service_instance) The query expression here not only specifies the $layer but also contains the variable $service, which is used to correlate with the services for the drop-down list.\n  $endpoint\nSame as the $service is a query variable that is used to select all endpoints of the service in the drop-down list.\nQuery expression:\nlabel_values(endpoint_traffic{layer=\u0026#39;$layer\u0026#39;, service=\u0026#39;$service\u0026#39;, keyword=\u0026#39;$endpoint_keyword\u0026#39;, limit=\u0026#39;$endpoint_limit\u0026#39;}, endpoint) The query expression here specifies the $layer and $service which are used to correlate with the services for the drop-down list. And also accept variables $endpoint_keyword and $endpoint_limit as filtering condition.\n  $endpoint_keyword\nA text type variable that the user can input to filter the return value of $endpoint.\n  $endpoint_limit\nCustom type, which the user can select to limit the maximum number of returned endpoints.\n  Panel Configurations There are several typical metrics panels on this dashboard, let\u0026rsquo;s see how it\u0026rsquo;s configured.\nCommon Value Metrics Select Time series chart panel Service Apdex and click edit.  Query expression service_apdex{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} / 10000 The metric scope is Service, add labels service and layer for the match, and the label value used the variables configured above. The calculation Divided by 10000 is used for matching the result units. The document for the query can refer to here.\n Set Query options --\u0026gt; Min interval = 1m, because the metrics min time bucket in SkyWalking is 1m. Set Connect null values --\u0026gt; Always and Show points --\u0026gt; Always because when the query interval \u0026gt; 1 hour or 1 day SkyWalking returns the hour/day step metrics values.  Labeled Value Metrics Select Time series chart panel Service Response Time Percentile and click edit.  Query expression service_percentile{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;, labels=\u0026#39;0,1,2,3,4\u0026#39;, relabels=\u0026#39;P50,P75,P90,P95,P99\u0026#39;} The metric scope is Service, add labels service and layer for the match, and the label value used the variables configured above. Add labels='0,1,2,3,4' filter the result label, and addrelabels='P50,P75,P90,P95,P99' rename the result label. The document for the query can refer to here.\n Set Query options --\u0026gt; Min interval = 1m, because the metrics min time bucket in SkyWalking is 1m. Set Connect null values --\u0026gt; Always and Show points --\u0026gt; Always because when the query interval \u0026gt; 1 hour or 1 day SkyWalking returns the hour/day step metrics values. Set Legend to {{label}} for show up.  Sort Metrics Select Time series chart panel Service Response Time Percentile and click edit.  Query expression service_instance_cpm{parent_service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;, top_n=\u0026#39;10\u0026#39;, order=\u0026#39;DES\u0026#39;} The expression is used for query the sore metrics under service, so add labels parent_service and layer for the match. Add top_n='10' and order='DES' filter the result. The document for the query can refer to here.\n Set Query options --\u0026gt; Min interval = 1m, because the metrics min time bucket in SkyWalking is 1m. Set the Calculation --\u0026gt; Latest*. Set Legend to {{service_instance}} for show up.  Conclusion In this article, we introduced what is the PromQL Service in SkyWalking and its background. Detailed how to use PromQL Service and the basic concepts related to SkyWalking, and show how to use PromQL Service to build Grafana dashboards for SkyWalking.\nIn the future, there will be more integrations by leveraging this protocol, such as CI/CD, HPA (scaling), etc.\n","excerpt":"Background As an application performance monitoring tool for distributed systems, Apache SkyWalking …","ref":"/blog/2023-03-17-build-grafana-dashboards-for-apache-skywalking-native-promql-support/","title":"Build Grafana dashboards for Apache SkyWalking -- Native PromQL Support"},{"body":"","excerpt":"","ref":"/tags/grafana/","title":"Grafana"},{"body":"","excerpt":"","ref":"/tags/metric/","title":"Metric"},{"body":"","excerpt":"","ref":"/tags/promql/","title":"PromQL"},{"body":"背景 Apache SkyWalking 作为分布式系统的应用性能监控工具,提供了对云原生架构下的分布式系统的监控、跟踪、诊断能力。Prometheus 是一个开源系统监控和警报工具包,具有活跃的生态系统。特别是 Prometheus 指标通过 导出器和集成 得到广泛支持。 PromQL 作为 Prometheus 查询语言,包含一组表达式并公开 HTTP API 以读取指标。\nSkyWalking 支持通过 OpenTelemetry 收集器 摄取 Prometheus 指标,并通过这些指标的聚合计算提供多种系统监控,例如 Linux 监控和 Kubernetes 监控。SkyWalking 已经为用户提供了 原生 UI 和 GraphQL API。但为了提供更广泛的生态整合能力,从 9.4.0 开始,它提供了 PromQL 服务,已经支持 PromQL 的第三方系统或可视化平台(如 Grafana),可以通过它获取指标。SkyWalking 用户在与不同系统集成时将从中受益。\nSkyWalking 中的 PromQL 服务是什么? PromQL 服务是 SkyWalking 原生 GraphQL 查询之上的查询引擎,具有由 Prometheus 表达式提供支持的附加查询阶段计算能力。它可以接受 PromQL HTTP API 请求,解析 Prometheus 表达式,并在 Prometheus 指标和 SkyWalking 指标之间进行转换。\nPromQL 服务遵循 PromQL 的所有协议和语法,用户可以像使用 PromQL 一样使用它。由于 SkyWalking 在度量分类、格式、存储等方面与 Prometheus 有根本不同,因此 PromQL 服务不必实现完整的 PromQL 功能。有关详细信息,请参阅文档。\nSkyWalking 基本概念 以下是用户使用 PromQL 服务需要了解的一些基本概念和与 Prometheus 的区别: Prometheus 指标指定命名格式和结构,实际指标名称和标签由客户端提供商确定,并存储详细信息。用户使用 PromQL 中的表达式聚合和计算指标。与 Prometheus 不同,SkyWalking 的度量机制是围绕以下具有层次结构的核心概念构建的:\n  层(Layer):表示计算机科学中的一个抽象框架,如 Operating System(OS_LINUX 层)、Kubernetes(k8s 层)。该层将是从不同技术检测到的不同服务的所有者。可以在此处\n找到所有层定义。\n  服务:表示一组 / 一组工作负载,它为传入请求提供相同的行为。\n  服务实例:服务组中的单个工作负载。\n  端点:传入请求的服务路径。\n  进程:操作系统进程。在某些场景下,service instance 不是一个进程,比如一个 Kubernetes Pod 可能包含多个进程。\n  Metric 名称和属性(标签)由 SkyWalking OAP 服务器根据数据源以及 OAL 和 MAL 配置。SkyWalking 提供了对时间序列指标进行下采样(down-sampling),并生成不同时间段数据(分钟、小时、天)的能力。\nSkyWalking 指标流如下:\n流量  Service/ServiceRelation/Instance/ServiceInstanceRelation/Endpoint/EndpointRelation/Process/ProcessRelation 的元数据。包括名称、层、属性、它们之间的关系等。  指标  名称(Name):指标名称,来自 OAL 和 MAL 的配置。 实体(Entity):表示指标的归属,用于查询。一个 Entity 根据 Scope 不同会包含如下信息: Scope 代表指标级别,在查询阶段代表 Scope catalog,Scope catalog 为所有的 scope 提供了高维的分类,层次结构。     Scope 实体信息     Service 服务(包括图层信息)   ServiceInstance 服务、服务实例   Endpoint 服务、端点   ServiceRelation 服务,目标服务   ServiceInstanceRelation 服务实例、目标服务实例   EndpointRelation 端点、目标端点   Process 服务、服务实例、流程   ProcessRelation 进程、服务实例、DestProcess     值:   单值:long 标签值:文本,label1,value1|label2,value2|... ,例如 L2 aggregation,5000 | L1 aggregation,8000   TimeBucket:时间精确到分钟、小时、天  如何使用 PromQL 服务 设置 PromQL 服务在 v9.4.0 之后默认开启,不需要额外配置。例如,可以使用 OAP 环境变量配置默认端口:\nrestHost: ${SW_PROMQL_REST_HOST:0.0.0.0} restPort: ${SW_PROMQL_REST_PORT:9090} restContextPath: ${SW_PROMQL_REST_CONTEXT_PATH:/} restMaxThreads: ${SW_PROMQL_REST_MAX_THREADS:200} restIdleTimeOut: ${SW_PROMQL_REST_IDLE_TIMEOUT:30000} restAcceptQueueSize: ${SW_PROMQL_REST_QUEUE_SIZE:0} 使用 Prometheus 表达式 PromQL 通过 Prometheus 表达式匹配指标。这是一个典型的 Prometheus 指标。\n为了匹配指标,Prometheus 表达式如下:\n在 PromQL 服务中,这些保留的标签将被解析为度量名称和实体信息字段以及用于查询的其他标签。映射如下。\n   SkyWalking 概念 Prometheus 表达     指标名称 指标名称   层 标签   服务 标签   服务实例 标签 \u0026lt;服务实例\u0026gt;   端点 标签   …… ……    例如,以下表达式用于匹配查询指标:service_cpm、service_instance_cpm、endpoint_cpm\nservice_cpm {service='agent::songs', layer='GENERAL'} service_instance_cpm {service='agent::songs', service_instance='agent::songs_instance_1', layer='GENERAL'} endpoint_cpm {service='agent::songs', endpoint='GET:/songs', layer='GENERAL'} 典型查询示例 在这里,我们将 SkyWalking Showcase 部署作为 Playground 来演示如何使用 PromQL 获取 SkyWalking 指标。\n以下示例可用于通过 PromQL 服务查询服务的元数据和指标。\n获取指标名称 查询:\nhttp://localhost:9099/api/v1/label/__name__/values 结果:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;meter_mysql_instance_qps\u0026#34;, \u0026#34;service_cpm\u0026#34;, \u0026#34;envoy_cluster_up_rq_active\u0026#34;, \u0026#34;instance_jvm_class_loaded_class_count\u0026#34;, \u0026#34;k8s_cluster_memory_requests\u0026#34;, \u0026#34;meter_vm_memory_used\u0026#34;, \u0026#34;meter_apisix_sv_bandwidth_unmatched\u0026#34;, \u0026#34;meter_vm_memory_total\u0026#34;, ... ] } 选择一个指标并获取标签 查询:\nhttp://localhost:9099/api/v1/labels?match []=service_cpm 结果:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;layer\u0026#34;, \u0026#34;service\u0026#34;, \u0026#34;top_n\u0026#34;, \u0026#34;order\u0026#34; ] } 从特定层获取服务 查询:\nhttp://127.0.0.1:9099/api/v1/series?match []=service_traffic {layer='GENERAL'}\u0026amp;start=1677479336\u0026amp;end=1677479636 结果:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ {\u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, {\u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::recommendation\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, {\u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::app\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, {\u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::gateway\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, {\u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::frontend\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; } ] } 查询服务的特定指标 查询:\nhttp://127.0.0.1:9099/api/v1/query?query=service_cpm {service='agent::songs', layer='GENERAL'} 结果:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ {\u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; },\u0026#34;value\u0026#34;: [ 1679559960, \u0026#34;6\u0026#34; ] } ] } } 关于range query和不同的metrics type for query 可以参考 这里的 文档。\n构建 Grafana Dashboard 从上面我们知道了 PromQL 服务的机制和查询方式,现在我们可以为上面的服务示例构建 Grafana Dashboard。注:以下所有配置均基于 Grafana 9.1.0 版本。\nSkyWalking Showcase 提供了 General Service 和 Service Mesh 层等 Dashboard 文件,我们可以通过导入 Dashboard JSON 文件快速为层服务创建 Dashboard。\n部署 Grafana 应用程序后,请按照以下步骤操作:\n配置数据源 首先,我们需要创建一个数据源: 在数据源配置面板中,选择 Prometheus 并设置 URL 为 OAP 服务器地址,默认端口为 9090。 SkyWalking 如果有多个 Prometheus 数据源,请在此处设置数据源名称。\n导入 Dashboard 文件   创建一个名为 SkyWalking 的 Dashboard 文件夹。\n  将 Dashboard 文件导入到 Grafana 中,有两种获取文件的方式:\n 来自 SkyWalking Showcase 转到 SkyWaking Demo:在 Grafana 上预览指标,并将其从 General Service Dashboard 导出。    完毕!现在我们可以看到 Dashboard 正在运行,服务位于下拉列表中,指标显示在面板上。\n  这是一种简单的构建方式,但是如果我们想要自定义它,我们需要知道它是如何工作的。\nDashboard 的工作原理 Dashboard 设置 打开 Settings-Variables 我们可以看到如下变量:\n让我们看看每个变量的作用:\n  $DS_SkyWalking\n这是一个数据源 ty 变量,它指定了之前定义为 SkyWalking 的 Prometheus 数据源。\n  $layer\n这是一个常量类型,因为在 \u0026lsquo;General Service\u0026rsquo; Dashboard 中,所有服务都属于 \u0026lsquo;GENERAL\u0026rsquo; 层,因此可以在每个查询中直接使用它们。注意,当您自定义其他层时,必须在 Layer 上面定义该值。\n  $service\n查询类型变量,为下拉列表获取该层下的所有服务名称。\n查询表达式:\nlabel_values (service_traffic {layer='$layer'}, service) 查询表达式将查询 HTTP API /api/v1/series,以获取 $layer 中服务元数据,并根据标签(服务)提取服务名称。\n  $service_instance\n与 $service 一样,是一个查询变量,用于在下拉列表中选择服务的所有实例。\n查询表达式:\nlabel_values (instance_traffic {layer='$layer', service='$service'}, service_instance) 这里的查询表达式不仅指定了 $layer 还包含 $service 变量,用于关联下拉列表的服务。\n  $endpoint\n与 $service 一样,是一个查询变量,用于在下拉列表中选择服务的所有端点。\n查询表达式:\nlabel_values (endpoint_traffic {layer='$layer', service='$service', keyword='$endpoint_keyword', limit='$endpoint_limit'}, endpoint) 此处的查询表达式指定 $layer 和 $service 用于与下拉列表的服务相关联的。并且还接受 $endpoint_keyword 和 $endpoint_limit 变量作为过滤条件。\n  $endpoint_keyword\n一个文本类型的变量,用户可以输入它来过滤 $endpoint 的返回值。\n  $endpoint_limit\n自定义类型,用户可以选择它以限制返回端点的最大数量。\n  Dashboard 配置 这个 Dashboard 上有几个典型的指标面板,让我们看看它是如何配置的。\n普通值指标 选择 Time series chart 面板 Service Apdex 并单击 edit。\n  查询表达式\nservice_apdex {service='$service', layer='$layer'} / 10000 指标范围为 Service,添加 service 和 layer 标签用于匹配,label 值使用上面配置的变量。该计算 Divided by 10000 用于匹配结果单位。查询文档可以参考 这里。\n  设置 Query options --\u0026gt; Min interval = 1m,因为 SkyWalking 中的指标最小时间段是 1m。\n  设置 Connect null values --\u0026gt; AlwaysShow points --\u0026gt; Always,因为当查询间隔大于 1 小时或 1 天时,SkyWalking 返回小时 / 天步长指标值。\n  标签值指标 选择 Time series chart 面板 Service Response Time Percentile 并单击 edit。\n  查询表达式\nservice_percentile {service='$service', layer='$layer', labels='0,1,2,3,4', relabels='P50,P75,P90,P95,P99'} 指标范围为 Service,添加 service 和 layer 标签用于匹配,label 值使用上面配置的变量。添加 labels='0,1,2,3,4' 过滤结果标签,并添加 relabels='P50,P75,P90,P95,P99' 重命名结果标签。查询文档可以参考 这里。\n  设置 Query options --\u0026gt; Min interval = 1m,因为 SkyWalking 中的指标最小时间段是 1m。\n  设置 Connect null values --\u0026gt; AlwaysShow points --\u0026gt; Always,因为当查询间隔 \u0026gt; 1 小时或 1 天时,SkyWalking 返回小时 / 天步长指标值。\n  设置 Legend 为 {{label}} 来展示。\n  排序指标 选择 Time series chart 面板 Service Response Time Percentile 并单击 edit。\n  查询表达式\nservice_instance_cpm {parent_service='$service', layer='$layer', top_n='10', order='DES'} 该表达式用于查询服务下的排序指标,因此添加标签 parent_service 和 layer 进行匹配。添加 top_n='10' 和 order='DES' 过滤结果。查询文档可以参考 这里。\n  设置 Query options --\u0026gt; Min interval = 1m,因为 SkyWalking 中的指标最小时间段是 1m。\n  设置 Calculation --\u0026gt; Latest*。\n  设置 Legend 为 {{service_instance}} 来展示。\n  结论 在这篇文章中,我们介绍了 SkyWalking 中的 PromQL 服务是什么以及它的背景。详细介绍了 PromQL 服务的使用方法和 SkyWalking 相关的基本概念,展示了如何使用 PromQL 服务为 SkyWalking 构建 Grafana Dashboard。\n未来,将会有更多的集成利用这个协议,比如 CI/CD、HPA(缩放)等。\n","excerpt":"背景 Apache SkyWalking 作为分布式系统的应用性能监控工具,提供了对云原生架构下的分布式系统的监控、跟踪、诊断能力。Prometheus 是一个开源系统监控和警报工具包,具有活跃的生态 …","ref":"/zh/2023-03-17-build-grafana-dashboards-for-apache-skywalking-native-promql-support/","title":"为 Apache SkyWalking 构建 Grafana Dashboard —— 原生 PromQL 支持"},{"body":"Background Apache SkyWalking is an open-source application performance management system that helps users collect and aggregate logs, traces, metrics, and events, and display them on the UI. Starting from OAP 9.4.0, SkyWalking has added AWS Firehose receiver, which is used to receive and calculate the data of CloudWatch metrics. In this article, we will take DynamoDB as an example to show how to use SkyWalking to receive and calculate CloudWatch metrics data for monitoring Amazon Web Services.\nWhat are Amazon CloudWatch and Amazon Kinesis Data Firehose? Amazon CloudWatch is a metrics repository, this tool can collect raw data from AWS (e.g. DynamoDB) and process it into readable metrics in near real-time. Also, we can use Metric Stream to continuously stream CloudWatch metrics to a selected target location for near real-time delivery and low latency. SkyWalking takes advantage of this feature to create metric streams and direct them to Amazon Kinesis Data Firehose transport streams for further transport processing.\nAmazon Kinesis Data Firehoseis an extract, transform, and load (ETL) service that reliably captures, transforms, and delivers streaming data to data lakes, data stores, and analytics services. SkyWalking takes advantage of this feature to eventually direct the metrics stream to the aws-firehose-receiver for OAP to calculate and ultimately display the metrics.\nThe flow chart is as follows.\nNotice  Due to Kinesis Data Firehose specifications, the URL of the HTTP endpoint must use the HTTPS protocol and must use port 443. Also, this URL must be proxied by Gateway and forwarded to the real aws-firehose-receiver. The TLS certificate must be signed by a CA and the self-signed certificate will not be trusted by Kinesis Data Firehose.  Setting up DynamoDB monitoring Next, let\u0026rsquo;s take DynamoDB as an example to illustrate the necessary settings in aws before using OAP to collect CloudWatch metrics:\n Go to Kinesis Console, create a data stream, and select Direct PUT for Source and HTTP Endpoint for Destination. And set HTTP Endpoint URL to Gateway URL. The rest of the configuration options can be configured as needed.  Go to the CloudWatch Console, select Metrics-Stream in the left control panel, and click Create metric stream. Select AWS/DynamoDB for namespace. Also, you can add other namespaces as needed. Kinesis Data Firehose selects the data stream created in the first step. Finally, set the output format to opentelemetry0.7. The rest of the configuration options can be configured as needed.  At this point, the AWS side of DynamoDB monitoring configuration is set up.\nSkyWalking OAP metrics processing analysis SkyWalking uses aws-firehose-receiver to receive and decode AWS metrics streams forwarded by Gateway, and send it to Opentelemetry-receiver for processing and transforming into SkyWalking metrics. Then, the metrics are analyzed and aggregated by Meter Analysis Language (MAL) and finally presented on the UI.\nThe MAL part and the UI part of SkyWalking support users' customization, to display the metrics data in a more diversified way. For details, please refer to MAL doc and UI doc.\nTypical metrics analysis Scope In SkyWalking, there is the concept of scope. By using scopes, we can classify and aggregate metrics more rationally. In the monitoring of DynamoDB, two of these scopes are used - Service and Endpoint.\nService represents a set of workloads that provide the same behavior for incoming requests. Commonly used as cluster-level scopes for services, user accounts are closer to the concept of clusters in AWS. So SkyWalking uses AWS account id as a key to map AWS accounts to Service types.\nSimilarly, Endpoint represents a logical concept, often used in services for the path of incoming requests, such as HTTP URI path or gRPC service class + method signature, and can also represent the table structure in the database. So SkyWalking maps DynamoDB tables to Endpoint type.\nMetrics    Metric Name Meaning     AccountMaxReads / AccountMaxWrites The maximum number of read/write capacity units that can be used by an account.   AccountMaxTableLevelReads / AccountMaxTableLevelWrites The maximum number of read/write capacity units that can be used by a table or global secondary index of an account.   AccountProvisionedReadCapacityUtilization / AccountProvisionedWriteCapacityUtilization The percentage of provisioned read/write capacity units utilized by an account.   MaxProvisionedTableReadCapacityUtilization / MaxProvisionedTableWriteCapacityUtilization The percentage of provisioned read/write capacity utilized by the highest provisioned read table or global secondary index of an account.    Above are some common account metrics (Serivce scope). They are various configuration information in DynamoDB, and SkyWalking can show a complete picture of the database configuration changes by monitoring these metrics.\n   Metric Name Meaning     ConsumedReadCapacityUnits / ConsumedWriteCapacityUnits The number of read/write capacity units consumed over the specified time period.   ReturnedItemCount The number of items returned by Query, Scan or ExecuteStatement (select) operations during the specified time period.   SuccessfulRequestLatency The latency of successful requests to DynamoDB or Amazon DynamoDB Streams during the specified time period.   TimeToLiveDeletedItemCount The number of items deleted by Time to Live (TTL) during the specified time period.    The above are some common table metrics (Endpoint scope), which will also be aggregated into account metrics. These metrics are generally used to analyze the performance of the database, and users can use them to determine the reasonable level of database configuration. For example, users can track how much of their provisioned throughput is used through ConsumedReadCapicityUnits / ConsumedReadCapicityUnits to determine the reasonableness of the preconfigured throughput of a table or account. For more information about provisioned throughput, see Provisioned Throughput Intro.\n   Metric Name Meaning     UserErrors Requests to DynamoDB or Amazon DynamoDB Streams that generate an HTTP 400 status code during the specified time period.   SystemErrors The requests to DynamoDB or Amazon DynamoDB Streams that generate an HTTP 500 status code during the specified time period.   ThrottledRequests Requests to DynamoDB that exceed the provisioned throughput limits on a resource.   TransactionConflict Rejected item-level requests due to transactional conflicts between concurrent requests on the same items.    The above are some common error metrics, among which UserErrors are account-level metrics and the rest are table-level metrics. Users can set alarms on these metrics, and if warnings appear, then it may indicate that there are some problems with the use of the database, and users need to check and verify by themselves.\nNotice SkyWalking\u0026rsquo;s metrics selection for DynamoDB comes directly from CloudWatch metrics, which can also be found at CloudWatch metrics doc to get metrics details.\nDemo In this section, we will demonstrate how to use terraform to create a DynamoDB table and other AWS services that can generate metrics streams, and deploy Skywalking to complete the metrics collection.\nFirst, you need a running gateway instance, such as NGINX, which is responsible for receiving metrics streams from AWS and forwarding them to the aws-firehose-receiver. Note that the gateway needs to be configured with certificates to accept HTTPS protocol requests.\nBelow is an example configuration for NGINX. The configuration does not need to be identical, as long as it can send incoming HTTPS requests to oap host:12801/aws/firehose/metrics.\nserver { listen 443 ssl; ssl_certificate /crt/test.pem; ssl_certificate_key /crt/test.key; ssl_session_timeout 5m; ssl_ciphers ECDHE-RSA-AES128-GCM-SHA256:ECDHE:ECDH:AES:HIGH:!NULL:!aNULL:!MD5:!ADH:!RC4; ssl_protocols TLSv1 TLSv1.1 TLSv1.2; ssl_prefer_server_ciphers on; location /aws/firehose/metrics { proxy_pass http://test.xyz:12801/aws/firehose/metrics; } } Deploying SkyWalking There are various ways to deploy SkyWalking, and you can get them directly from the release page.\nOf course, if you are more comfortable with Kubernetes, you can also find the appropriate deployment method from SkyWalking-kubernetes.\nPlease note that no matter which deployment method you use, please make sure that the OAP and UI version is 9.4.0 or higher and that port 12801 needs to be open.\nThe following is an example of a deployment using the helm command.\nexport SKYWALKING_RELEASE_VERSION=4.3.0 export SKYWALKING_RELEASE_NAME=skywalking export SKYWALKING_RELEASE_NAMESPACE=default helm install \u0026quot;${SKYWALKING_RELEASE_NAME}\u0026quot; \\ oci://registry-1.docker.io/apache/skywalking-helm \\ --version \u0026quot;${SKYWALKING_RELEASE_VERSION}\u0026quot; \\ -n \u0026quot;${SKYWALKING_RELEASE_NAMESPACE}\u0026quot; \\ --set oap.image.tag=9.4.0 \\ --set oap.storageType=elasticsearch \\ --set ui.image.tag=9.4.0 \\ --set oap.ports.firehose=12801 Start the corresponding AWS service The terraform configuration file is as follows (example modified inTerraform Registry - kinesis_firehose_delivery_stream):\n terraform configuration file  provider \u0026quot;aws\u0026quot; { region = \u0026quot;ap-northeast-1\u0026quot; access_key = \u0026quot;[need change]your access_key\u0026quot; secret_key = \u0026quot;[need change]your secret_key\u0026quot; } resource \u0026quot;aws_dynamodb_table\u0026quot; \u0026quot;basic-dynamodb-table\u0026quot; { name = \u0026quot;GameScores\u0026quot; billing_mode = \u0026quot;PROVISIONED\u0026quot; read_capacity = 20 write_capacity = 20 hash_key = \u0026quot;UserId\u0026quot; range_key = \u0026quot;GameTitle\u0026quot; attribute { name = \u0026quot;UserId\u0026quot; type = \u0026quot;S\u0026quot; } attribute { name = \u0026quot;GameTitle\u0026quot; type = \u0026quot;S\u0026quot; } attribute { name = \u0026quot;TopScore\u0026quot; type = \u0026quot;N\u0026quot; } ttl { attribute_name = \u0026quot;TimeToExist\u0026quot; enabled = true } global_secondary_index { name = \u0026quot;GameTitleIndex\u0026quot; hash_key = \u0026quot;GameTitle\u0026quot; range_key = \u0026quot;TopScore\u0026quot; write_capacity = 10 read_capacity = 10 projection_type = \u0026quot;INCLUDE\u0026quot; non_key_attributes = [\u0026quot;UserId\u0026quot;] } tags = { Name = \u0026quot;dynamodb-table-1\u0026quot; Environment = \u0026quot;production\u0026quot; } } resource \u0026quot;aws_cloudwatch_metric_stream\u0026quot; \u0026quot;main\u0026quot; { name = \u0026quot;my-metric-stream\u0026quot; role_arn = aws_iam_role.metric_stream_to_firehose.arn firehose_arn = aws_kinesis_firehose_delivery_stream.http_stream.arn output_format = \u0026quot;opentelemetry0.7\u0026quot; include_filter { namespace = \u0026quot;AWS/DynamoDB\u0026quot; } } # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-metric-streams-trustpolicy.html data \u0026quot;aws_iam_policy_document\u0026quot; \u0026quot;streams_assume_role\u0026quot; { statement { effect = \u0026quot;Allow\u0026quot; principals { type = \u0026quot;Service\u0026quot; identifiers = [\u0026quot;streams.metrics.cloudwatch.amazonaws.com\u0026quot;] } actions = [\u0026quot;sts:AssumeRole\u0026quot;] } } resource \u0026quot;aws_iam_role\u0026quot; \u0026quot;metric_stream_to_firehose\u0026quot; { name = \u0026quot;metric_stream_to_firehose_role\u0026quot; assume_role_policy = data.aws_iam_policy_document.streams_assume_role.json } # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-metric-streams-trustpolicy.html data \u0026quot;aws_iam_policy_document\u0026quot; \u0026quot;metric_stream_to_firehose\u0026quot; { statement { effect = \u0026quot;Allow\u0026quot; actions = [ \u0026quot;firehose:PutRecord\u0026quot;, \u0026quot;firehose:PutRecordBatch\u0026quot;, ] resources = [aws_kinesis_firehose_delivery_stream.http_stream.arn] } } resource \u0026quot;aws_iam_role_policy\u0026quot; \u0026quot;metric_stream_to_firehose\u0026quot; { name = \u0026quot;default\u0026quot; role = aws_iam_role.metric_stream_to_firehose.id policy = data.aws_iam_policy_document.metric_stream_to_firehose.json } resource \u0026quot;aws_s3_bucket\u0026quot; \u0026quot;bucket\u0026quot; { bucket = \u0026quot;metric-stream-test-bucket\u0026quot; } resource \u0026quot;aws_s3_bucket_acl\u0026quot; \u0026quot;bucket_acl\u0026quot; { bucket = aws_s3_bucket.bucket.id acl = \u0026quot;private\u0026quot; } data \u0026quot;aws_iam_policy_document\u0026quot; \u0026quot;firehose_assume_role\u0026quot; { statement { effect = \u0026quot;Allow\u0026quot; principals { type = \u0026quot;Service\u0026quot; identifiers = [\u0026quot;firehose.amazonaws.com\u0026quot;] } actions = [\u0026quot;sts:AssumeRole\u0026quot;] } } resource \u0026quot;aws_iam_role\u0026quot; \u0026quot;firehose_to_s3\u0026quot; { assume_role_policy = data.aws_iam_policy_document.firehose_assume_role.json } data \u0026quot;aws_iam_policy_document\u0026quot; \u0026quot;firehose_to_s3\u0026quot; { statement { effect = \u0026quot;Allow\u0026quot; actions = [ \u0026quot;s3:AbortMultipartUpload\u0026quot;, \u0026quot;s3:GetBucketLocation\u0026quot;, \u0026quot;s3:GetObject\u0026quot;, \u0026quot;s3:ListBucket\u0026quot;, \u0026quot;s3:ListBucketMultipartUploads\u0026quot;, \u0026quot;s3:PutObject\u0026quot;, ] resources = [ aws_s3_bucket.bucket.arn, \u0026quot;${aws_s3_bucket.bucket.arn}/*\u0026quot;, ] } } resource \u0026quot;aws_iam_role_policy\u0026quot; \u0026quot;firehose_to_s3\u0026quot; { name = \u0026quot;default\u0026quot; role = aws_iam_role.firehose_to_s3.id policy = data.aws_iam_policy_document.firehose_to_s3.json } resource \u0026quot;aws_kinesis_firehose_delivery_stream\u0026quot; \u0026quot;http_stream\u0026quot; { name = \u0026quot;metric-stream-test-stream\u0026quot; destination = \u0026quot;http_endpoint\u0026quot; http_endpoint_configuration { name = \u0026quot;test_http_endpoint\u0026quot; url = \u0026quot;[need change]Gateway url\u0026quot; role_arn = aws_iam_role.firehose_to_s3.arn } s3_configuration { role_arn = aws_iam_role.firehose_to_s3.arn bucket_arn = aws_s3_bucket.bucket.arn } }  Steps to use.\n  Get the access_key and secret_key of the AWS account.( For how to get them, please refer to create-access-key )\n  Fill in the access_key and secret_key you got in the previous step, and fill in the corresponding URL of your gateway in the corresponding location of aws_kinesis_firehose_delivery_stream configuration.\n  Copy the above content and save it to the main.tf file.\n  Execute the following code in the corresponding path.\n  terraform init terraform apply At this point, all the required AWS services have been successfully created, and you can check your console to see if the services were successfully created.\nDone! If all the above steps were successful, please wait for about five minutes. After that, you can visit the SkyWalking UI to see the metrics.\nCurrently, the metrics collected by SkyWalking by default are displayed as follows.\naccount metrics:\ntable metrics:\nOther services Currently, SkyWalking officially supports EKS, S3, DynamoDB monitoring. Users also refer to the OpenTelemetry receiver to configure OTel rules to collect and analyze CloudWatch metrics of other AWS services and display them through a custom dashboard.\nMaterial  Monitoring S3 metrics with Amazon CloudWatch Monitoring DynamoDB metrics with Amazon CloudWatch Supported metrics in AWS Firehose receiver of OAP Configuration Vocabulary | Apache SkyWalking  ","excerpt":"Background Apache SkyWalking is an open-source application performance management system that helps …","ref":"/blog/2023-03-13-skywalking-aws-dynamodb/","title":"Monitoring DynamoDB with SkyWalking"},{"body":"背景 Apache SkyWalking 是一个开源应用性能管理系统,帮助用户收集和聚合日志、追踪、指标和事件,并在 UI 上显示。从 OAP 9.4.0 开始,SkyWalking 新增了 AWS Firehose receiver,用来接收,计算CloudWatch metrics的数据。本文将以DynamoDB为例,展示如何使用 SkyWalking接收并计算 CloudWatch metrics 数据,以监控Amazon Web Services。\n什么是 Amazon CloudWatch 与 Amazon Kinesis Data Firehose ? Amazon CloudWatch 是一个指标存储库, 此工具可从 AWS中 ( 如 DynamoDB ) 收集原始数据,近实时处理为可读取的指标。同时,我们也可以使用指标流持续地将 CloudWatch 指标流式传输到所选的目标位置,实现近实时传送和低延迟。SkyWalking 利用此特性,创建指标流并将其导向 Amazon Kinesis Data Firehose 传输流,并由后者进一步传输处理。\nAmazon Kinesis Data Firehose是一项提取、转换、加载服务,可以将流式处理数据以可靠方式捕获、转换和提供到数据湖、数据存储和分析服务中。SkyWalking利用此特性,将指标流最终导向 aws-firehose-receiver,交由OAP计算并最终展示指标。\n整体过程流程图如下:\n注意  由于 Kinesis Data Firehose 规定,HTTP端点的URL必须使用HTTPS协议,且必须使用443端口。同时,此URL必须由Gateway代理并转发到真正的aws-firehose-receiver。 TLS 证书必须由CA签发的,自签证书不会被 Kinesis Data Firehose 信任。  设置DynamoDB监控 接下来以DynamoDB为例说明使用OAP 收集CloudWatch metrics 前,aws中必要的设置:\n 进入 Kinesis 控制台,创建数据流, Source选择 Direct PUT, Destination 选择 HTTP Endpoint. 并且设置HTTP Endpoint URL 为 Gateway对应URL。 其余配置选项可由需要自行配置。  进入 CloudWatch 控制台,在左侧控制面板中选择Metrics-Stream,点击Create metric stream。其中,namespace 选择 AWS/DynamoDB。同时,根据需要,也可以增加其他命名空间。 Kinesis Data Firehose选择在第一步中创建好的数据流。最后,设置输出格式为opentelemetry0.7。其余配置选项可由需要自行配置。  至此,DynamoDB监控配置的AWS方面设置完成。\nSkyWalking OAP 指标处理分析 SkyWalking 利用 aws-firehose-receiver 接收并解码由Gateway转发来的 AWS 指标流,交由Opentelemetry-receiver进行处理,转化为SkyWalking metrics。并由Meter Analysis Language (MAL)进行指标的分析与聚合,最终呈现在UI上。\n其中 MAL 部分以及 UI 部分,SkyWalking支持用户自由定制,从而更多样性的展示指标数据。详情请参考MAL doc 以及 UI doc。\n典型指标分析 作用域 SkyWalking中,有作用域 ( scope ) 的概念。通过作用域, 我们可以对指标进行更合理的分类与聚合。在对DynamoDB的监控中,使用到了其中两种作用域———Service和Endpoint。\nService表示一组工作负荷,这些工作负荷为传入请求提供相同的行为。常用作服务的集群级别作用域,在AWS中,用户的账户更接近集群的概念。 所以SkyWalking将AWS account id作为key,将AWS账户映射为Service类型。\n同理,Endpoint表示一种逻辑概念,常用于服务中用于传入请求的路径,例如 HTTP URI 路径或 gRPC 服务类 + 方法签名,也可以表示数据库中的表结构。所以SkyWalking将DynamoDB表映射为Endpoint类型。\n指标    指标名称 含义     AccountMaxReads / AccountMaxWrites 账户可以使用的最大 读取/写入 容量单位数。   AccountMaxTableLevelReads / AccountMaxTableLevelWrites 账户的表或全局二级索引可以使用的最大 读取/写入 容量单位数。   AccountProvisionedReadCapacityUtilization / AccountProvisionedWriteCapacityUtilization 账户使用的预置 读取/写入 容量单位百分比。   MaxProvisionedTableReadCapacityUtilization / MaxProvisionedTableWriteCapacityUtilization 账户的最高预调配 读取/写入 表或全局二级索引使用的预调配读取容量单位百分比。    以上为一些常用的账户指标(Serivce 作用域)。它们是DynamoDB中的各种配置信息,SkyWalking通过对这些指标的监控,可以完整的展示出数据库配置的变动情况。\n   指标名称 含义     ConsumedReadCapacityUnits / ConsumedWriteCapacityUnits 指定时间段内占用的 读取/写入 容量单位数   ReturnedItemCount Query、Scan 或 ExecuteStatement(可选择)操作在指定时段内返回的项目数。   SuccessfulRequestLatency 指定时间段内对于 DynamoDB 或 Amazon DynamoDB Streams 的成功请求的延迟。   TimeToLiveDeletedItemCount 指定时间段内按存活时间 (TTL) 删除的项目数。    以上为一些常用的表指标(Endpoint作用域),它们也会被聚合到账户指标中。这些指标一般用于分析数据库的性能,用户可以通过它们判断出数据库配置的合理程度。例如,用户可以通过ConsumedReadCapicityUnits / ConsumedReadCapicityUnits,跟踪预置吞吐量的使用,从而判断表或账户的预制吞吐量的合理性。关于预置吞吐量,请参见读/写容量模式。\n   指标名称 含义     UserErrors 在指定时间段内生成 HTTP 400 状态代码的对 DynamoDB 或 Amazon DynamoDB Streams 的请求。HTTP 400 通常表示客户端错误,如参数组合无效,尝试更新不存在的表或请求签名错误。   SystemErrors 在指定的时间段内生成 HTTP 500 状态代码的对 DynamoDB 或 Amazon DynamoDB Streams 的请求。HTTP 500 通常指示内部服务错误。   ThrottledRequests 超出资源(如表或索引)预置吞吐量限制的 DynamoDB 请求。   TransactionConflict 由于同一项目的并发请求之间的事务性冲突而被拒绝的项目级请求。    以上为一些常用的错误指标,其中UserErrors为用户级别指标,其余为表级别指标。用户可以在这些指标上设置告警,如果警告出现,那么可能说明数据库的使用出现了一些问题,需要用户自行查看验证。\n注意 SkyWalking对于DynamoDB的指标选取直接来源于CloudWatch metrics, 您也可以通过CloudWatch metrics doc来获取指标详细信息。\nDemo 在本节中,我们将演示如何利用terraform创建一个DynamoDB表,以及可以产生指标流的其他AWS服务,并部署Skywalking完成指标收集。\n首先,您需要一个正在运行的网关实例,例如 NGINX,它负责接收AWS传来的指标流并且转发到aws-firehose-receiver。注意, 网关需要配置证书以便接受HTTPS协议的请求。\n下面是一个NGINX的示例配置。配置不要求完全一致,只要能将收到的HTTPS请求发送到oap所在host:12801/aws/firehose/metrics即可。\nserver { listen 443 ssl; ssl_certificate /crt/test.pem; ssl_certificate_key /crt/test.key; ssl_session_timeout 5m; ssl_ciphers ECDHE-RSA-AES128-GCM-SHA256:ECDHE:ECDH:AES:HIGH:!NULL:!aNULL:!MD5:!ADH:!RC4; ssl_protocols TLSv1 TLSv1.1 TLSv1.2; ssl_prefer_server_ciphers on; location /aws/firehose/metrics { proxy_pass http://test.xyz:12801/aws/firehose/metrics; } } 部署SkyWalking SkyWalking的部署方式有很多种,您可以直接从release页面中直接获取。\n当然,如果您更习惯于 Kubernetes,您也可以从SkyWalking-kubernetes找到相应部署方式。\n请注意,无论使用哪种部署方式,请确保OAP和UI的版本为9.4.0以上,并且需要开放12801端口。\n下面是一个使用helm指令部署的示例:\nexport SKYWALKING_RELEASE_VERSION=4.3.0 export SKYWALKING_RELEASE_NAME=skywalking export SKYWALKING_RELEASE_NAMESPACE=default helm install \u0026quot;${SKYWALKING_RELEASE_NAME}\u0026quot; \\ oci://registry-1.docker.io/apache/skywalking-helm \\ --version \u0026quot;${SKYWALKING_RELEASE_VERSION}\u0026quot; \\ -n \u0026quot;${SKYWALKING_RELEASE_NAMESPACE}\u0026quot; \\ --set oap.image.tag=9.4.0 \\ --set oap.storageType=elasticsearch \\ --set ui.image.tag=9.4.0 \\ --set oap.ports.firehose=12801 开启对应AWS服务 terraform 配置文件如下(实例修改于Terraform Registry - kinesis_firehose_delivery_stream):\n terraform 配置文件  provider \u0026quot;aws\u0026quot; { region = \u0026quot;ap-northeast-1\u0026quot; access_key = \u0026quot;在这里填入您的access_key\u0026quot; secret_key = \u0026quot;在这里填入您的secret_key\u0026quot; } resource \u0026quot;aws_dynamodb_table\u0026quot; \u0026quot;basic-dynamodb-table\u0026quot; { name = \u0026quot;GameScores\u0026quot; billing_mode = \u0026quot;PROVISIONED\u0026quot; read_capacity = 20 write_capacity = 20 hash_key = \u0026quot;UserId\u0026quot; range_key = \u0026quot;GameTitle\u0026quot; attribute { name = \u0026quot;UserId\u0026quot; type = \u0026quot;S\u0026quot; } attribute { name = \u0026quot;GameTitle\u0026quot; type = \u0026quot;S\u0026quot; } attribute { name = \u0026quot;TopScore\u0026quot; type = \u0026quot;N\u0026quot; } ttl { attribute_name = \u0026quot;TimeToExist\u0026quot; enabled = true } global_secondary_index { name = \u0026quot;GameTitleIndex\u0026quot; hash_key = \u0026quot;GameTitle\u0026quot; range_key = \u0026quot;TopScore\u0026quot; write_capacity = 10 read_capacity = 10 projection_type = \u0026quot;INCLUDE\u0026quot; non_key_attributes = [\u0026quot;UserId\u0026quot;] } tags = { Name = \u0026quot;dynamodb-table-1\u0026quot; Environment = \u0026quot;production\u0026quot; } } resource \u0026quot;aws_cloudwatch_metric_stream\u0026quot; \u0026quot;main\u0026quot; { name = \u0026quot;my-metric-stream\u0026quot; role_arn = aws_iam_role.metric_stream_to_firehose.arn firehose_arn = aws_kinesis_firehose_delivery_stream.http_stream.arn output_format = \u0026quot;opentelemetry0.7\u0026quot; include_filter { namespace = \u0026quot;AWS/DynamoDB\u0026quot; } } # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-metric-streams-trustpolicy.html data \u0026quot;aws_iam_policy_document\u0026quot; \u0026quot;streams_assume_role\u0026quot; { statement { effect = \u0026quot;Allow\u0026quot; principals { type = \u0026quot;Service\u0026quot; identifiers = [\u0026quot;streams.metrics.cloudwatch.amazonaws.com\u0026quot;] } actions = [\u0026quot;sts:AssumeRole\u0026quot;] } } resource \u0026quot;aws_iam_role\u0026quot; \u0026quot;metric_stream_to_firehose\u0026quot; { name = \u0026quot;metric_stream_to_firehose_role\u0026quot; assume_role_policy = data.aws_iam_policy_document.streams_assume_role.json } # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-metric-streams-trustpolicy.html data \u0026quot;aws_iam_policy_document\u0026quot; \u0026quot;metric_stream_to_firehose\u0026quot; { statement { effect = \u0026quot;Allow\u0026quot; actions = [ \u0026quot;firehose:PutRecord\u0026quot;, \u0026quot;firehose:PutRecordBatch\u0026quot;, ] resources = [aws_kinesis_firehose_delivery_stream.http_stream.arn] } } resource \u0026quot;aws_iam_role_policy\u0026quot; \u0026quot;metric_stream_to_firehose\u0026quot; { name = \u0026quot;default\u0026quot; role = aws_iam_role.metric_stream_to_firehose.id policy = data.aws_iam_policy_document.metric_stream_to_firehose.json } resource \u0026quot;aws_s3_bucket\u0026quot; \u0026quot;bucket\u0026quot; { bucket = \u0026quot;metric-stream-test-bucket\u0026quot; } resource \u0026quot;aws_s3_bucket_acl\u0026quot; \u0026quot;bucket_acl\u0026quot; { bucket = aws_s3_bucket.bucket.id acl = \u0026quot;private\u0026quot; } data \u0026quot;aws_iam_policy_document\u0026quot; \u0026quot;firehose_assume_role\u0026quot; { statement { effect = \u0026quot;Allow\u0026quot; principals { type = \u0026quot;Service\u0026quot; identifiers = [\u0026quot;firehose.amazonaws.com\u0026quot;] } actions = [\u0026quot;sts:AssumeRole\u0026quot;] } } resource \u0026quot;aws_iam_role\u0026quot; \u0026quot;firehose_to_s3\u0026quot; { assume_role_policy = data.aws_iam_policy_document.firehose_assume_role.json } data \u0026quot;aws_iam_policy_document\u0026quot; \u0026quot;firehose_to_s3\u0026quot; { statement { effect = \u0026quot;Allow\u0026quot; actions = [ \u0026quot;s3:AbortMultipartUpload\u0026quot;, \u0026quot;s3:GetBucketLocation\u0026quot;, \u0026quot;s3:GetObject\u0026quot;, \u0026quot;s3:ListBucket\u0026quot;, \u0026quot;s3:ListBucketMultipartUploads\u0026quot;, \u0026quot;s3:PutObject\u0026quot;, ] resources = [ aws_s3_bucket.bucket.arn, \u0026quot;${aws_s3_bucket.bucket.arn}/*\u0026quot;, ] } } resource \u0026quot;aws_iam_role_policy\u0026quot; \u0026quot;firehose_to_s3\u0026quot; { name = \u0026quot;default\u0026quot; role = aws_iam_role.firehose_to_s3.id policy = data.aws_iam_policy_document.firehose_to_s3.json } resource \u0026quot;aws_kinesis_firehose_delivery_stream\u0026quot; \u0026quot;http_stream\u0026quot; { name = \u0026quot;metric-stream-test-stream\u0026quot; destination = \u0026quot;http_endpoint\u0026quot; http_endpoint_configuration { name = \u0026quot;test_http_endpoint\u0026quot; url = \u0026quot;这里填入Gateway的url\u0026quot; role_arn = aws_iam_role.firehose_to_s3.arn } s3_configuration { role_arn = aws_iam_role.firehose_to_s3.arn bucket_arn = aws_s3_bucket.bucket.arn } }  使用步骤:\n1.获取AWS账户的access_key以及secret_key。( 关于如何获取,请参考:create-access-key )\n2.将上一步中获取的access_key与secret_key填入对应位置,并将您的网关对应 url 填入 aws_kinesis_firehose_delivery_stream 配置的对应位置中。\n3.复制以上内容并保存到main.tf文件中。\n4.在对应路径下执行以下代码。\nterraform init terraform apply 至此,需要的AWS服务已全部建立成功,您可以检查您的控制台,查看服务是否成功创建。\n完成! 如果以上步骤全部成功,请耐心等待约五分钟。之后您可以访问SkyWalking UI,查看指标变动情况\n目前,SkyWalking 默认收集的指标展示如下:\n账户指标:\n表指标:\n现已支持的服务 目前SkyWalking官方支持EKS,S3,DynamoDB监控。 用户也参考 OpenTelemetry receiver 配置OTEL rules来收集,计算AWS其他服务的CloudWatch metrics,并且通过自定义dashboard展示。\n相关的资料  Monitoring S3 metrics with Amazon CloudWatch Monitoring DynamoDB metrics with Amazon CloudWatch Supported metrics in AWS Firehose receiver of OAP Configuration Vocabulary | Apache SkyWalking  ","excerpt":"背景 Apache SkyWalking 是一个开源应用性能管理系统,帮助用户收集和聚合日志、追踪、指标和事件,并在 UI 上显示。从 OAP 9.4.0 开始,SkyWalking 新增了 AWS …","ref":"/zh/2023-03-13-skywalking-aws-dynamodb/","title":"使用SkyWalking监控DynamoDB"},{"body":"SKyWalking OAP\u0026rsquo;s existing OpenTelemetry receiver can receive metrics through the OTLP protocol, and use MAL to analyze related metrics in real time. Starting from OAP 9.4.0, SkyWalking has added an AWS Firehose receiver to receive and analyze CloudWatch metrics data. This article will take EKS and S3 as examples to introduce the process of SkyWalking OAP receiving and analyzing the indicator data of AWS services.\nEKS OpenTelemetry Collector OpenTelemetry (OTel) is a series of tools, APIs, and SDKs that can generate, collect, and export telemetry data, such as metrics, logs, and traces. OTel Collector is mainly responsible for collecting, processing, and exporting. For telemetry data, Collector consists of the following main components:\n Receiver: Responsible for obtaining telemetry data, different receivers support different data sources, such as prometheus, kafka, otlp. Processor: Process data between receiver and exporter, such as adding or deleting attributes. Exporter: Responsible for sending data to different backends, such as kafka, SkyWalking OAP (via OTLP). Service: Components enabled as a unit configuration, only configured components will be enabled.  OpenTelemetry Protocol Specification(OTLP) OTLP mainly describes how to receive (pull) indicator data through gRPC and HTTP protocols. The OpenTelemetry receiver of SKyWalking OAP implements the OTLP/gRPC protocol, and the indicator data can be exported to OAP through the OTLP/gRPC exporter. Usually the data flow of a Collector is as follows:\nMonitor EKS with OTel EKS monitoring is realized through OTel. You only need to deploy OpenTelemetry Collector in the EKS cluster in the way of DaemonSet  \u0026ndash; use AWS Container Insights Receiver as the receiver, and set the address of otlp exporter to the address of OAP. In addition, it should be noted that OAP is used job_name : aws-cloud-eks-monitoring as the identifier of EKS metrics according to the attribute, so it is necessary to configure a processor in the collector to add this attribute.\nOTel Collector configuration demo extensions:health_check:receivers:awscontainerinsightreceiver:processors:# To enable OAP to correctly identify EKS metrics, add the job_name attributeresource/job-name:attributes:- key:job_name value:aws-cloud-eks-monitoringaction:insert # Specify OAP as exportersexporters:otlp:endpoint:oap-service:11800 tls:insecure:truelogging:loglevel:debug service:pipelines:metrics:receivers:[awscontainerinsightreceiver]processors:[resource/job-name]exporters:[otlp,logging]extensions:[health_check]By default, SkyWalking OAP counts the network, disk, CPU and other related indicator data in the three dimensions of Node, Pod, and Service. Only part of the content is shown here.\nPod dimensions Service dimensions EKS monitoring complete configuration  Click here to view complete k8s resource configuration  apiVersion:v1kind:ServiceAccountmetadata:name:aws-otel-sanamespace:aws-otel-eks---kind:ClusterRoleapiVersion:rbac.authorization.k8s.io/v1metadata:name:aoc-agent-rolerules:- apiGroups:[\u0026#34;\u0026#34;]resources:[\u0026#34;pods\u0026#34;,\u0026#34;nodes\u0026#34;,\u0026#34;endpoints\u0026#34;]verbs:[\u0026#34;list\u0026#34;,\u0026#34;watch\u0026#34;]- apiGroups:[\u0026#34;apps\u0026#34;]resources:[\u0026#34;replicasets\u0026#34;]verbs:[\u0026#34;list\u0026#34;,\u0026#34;watch\u0026#34;]- apiGroups:[\u0026#34;batch\u0026#34;]resources:[\u0026#34;jobs\u0026#34;]verbs:[\u0026#34;list\u0026#34;,\u0026#34;watch\u0026#34;]- apiGroups:[\u0026#34;\u0026#34;]resources:[\u0026#34;nodes/proxy\u0026#34;]verbs:[\u0026#34;get\u0026#34;]- apiGroups:[\u0026#34;\u0026#34;]resources:[\u0026#34;nodes/stats\u0026#34;,\u0026#34;configmaps\u0026#34;,\u0026#34;events\u0026#34;]verbs:[\u0026#34;create\u0026#34;,\u0026#34;get\u0026#34;]- apiGroups:[\u0026#34;\u0026#34;]resources:[\u0026#34;configmaps\u0026#34;]resourceNames:[\u0026#34;otel-container-insight-clusterleader\u0026#34;]verbs:[\u0026#34;get\u0026#34;,\u0026#34;update\u0026#34;]- apiGroups:[\u0026#34;coordination.k8s.io\u0026#34;]resources:[\u0026#34;leases\u0026#34;]verbs:[\u0026#34;create\u0026#34;,\u0026#34;get\u0026#34;,\u0026#34;update\u0026#34;]---kind:ClusterRoleBindingapiVersion:rbac.authorization.k8s.io/v1metadata:name:aoc-agent-role-bindingsubjects:- kind:ServiceAccountname:aws-otel-sanamespace:aws-otel-eksroleRef:kind:ClusterRolename:aoc-agent-roleapiGroup:rbac.authorization.k8s.io---apiVersion:v1kind:ConfigMapmetadata:name:otel-agent-confnamespace:aws-otel-ekslabels:app:opentelemetrycomponent:otel-agent-confdata:otel-agent-config:|extensions: health_check: receivers: awscontainerinsightreceiver: processors: resource/job-name: attributes: - key: job_name value: aws-cloud-eks-monitoring action: insert exporters: otlp: endpoint: oap-service:11800 tls: insecure: true logging: loglevel: debug service: pipelines: metrics: receivers: [awscontainerinsightreceiver] processors: [resource/job-name] exporters: [otlp,logging] extensions: [health_check]---apiVersion:apps/v1kind:DaemonSetmetadata:name:aws-otel-eks-cinamespace:aws-otel-eksspec:selector:matchLabels:name:aws-otel-eks-citemplate:metadata:labels:name:aws-otel-eks-cispec:containers:- name:aws-otel-collectorimage:amazon/aws-otel-collector:v0.23.0env:# Specify region- name:AWS_REGIONvalue:\u0026#34;ap-northeast-1\u0026#34;- name:K8S_NODE_NAMEvalueFrom:fieldRef:fieldPath:spec.nodeName- name:HOST_IPvalueFrom:fieldRef:fieldPath:status.hostIP- name:HOST_NAMEvalueFrom:fieldRef:fieldPath:spec.nodeName- name:K8S_NAMESPACEvalueFrom:fieldRef:fieldPath:metadata.namespaceimagePullPolicy:Alwayscommand:- \u0026#34;/awscollector\u0026#34;- \u0026#34;--config=/conf/otel-agent-config.yaml\u0026#34;volumeMounts:- name:rootfsmountPath:/rootfsreadOnly:true- name:dockersockmountPath:/var/run/docker.sockreadOnly:true- name:varlibdockermountPath:/var/lib/dockerreadOnly:true- name:containerdsockmountPath:/run/containerd/containerd.sockreadOnly:true- name:sysmountPath:/sysreadOnly:true- name:devdiskmountPath:/dev/diskreadOnly:true- name:otel-agent-config-volmountPath:/conf- name:otel-output-vol mountPath:/otel-outputresources:limits:cpu:200mmemory:200Mirequests:cpu:200mmemory:200Mivolumes:- configMap:name:otel-agent-confitems:- key:otel-agent-configpath:otel-agent-config.yamlname:otel-agent-config-vol- name:rootfshostPath:path:/- name:dockersockhostPath:path:/var/run/docker.sock- name:varlibdockerhostPath:path:/var/lib/docker- name:containerdsockhostPath:path:/run/containerd/containerd.sock- name:syshostPath:path:/sys- name:devdiskhostPath:path:/dev/disk/- name:otel-output-vol hostPath:path:/otel-outputserviceAccountName:aws-otel-sa S3 Amazon CloudWatch Amazon CloudWatch is a monitoring service provided by AWS. It is responsible for collecting indicator data of AWS services and resources. CloudWatch metrics stream is responsible for converting indicator data into stream processing data, and supports output in two formats: json and OTel v0.7.0.\nAmazon Kinesis Data Firehose (Firehose) Firehose is an extract, transform, load (ETL) service that reliably captures, transforms, and serves streaming data into data lakes, data stores (such as S3), and analytics services.\nTo ensure that external services can correctly receive indicator data, AWS provides Kinesis Data Firehose HTTP Endpoint Delivery Request and Response Specifications (Firehose Specifications) . Firhose pushes Json data by POST\nJson data example { \u0026#34;requestId\u0026#34;: \u0026#34;ed4acda5-034f-9f42-bba1-f29aea6d7d8f\u0026#34;, \u0026#34;timestamp\u0026#34;: 1578090901599 \u0026#34;records\u0026#34;: [ { \u0026#34;data\u0026#34;: \u0026#34;aGVsbG8=\u0026#34; }, { \u0026#34;data\u0026#34;: \u0026#34;aGVsbG8gd29ybGQ=\u0026#34; } ] }  requestId: Request id, which can achieve deduplication and debugging purposes. timestamp: Firehose generated the timestamp of the request (in milliseconds). records: Actual delivery records  data: The delivered data, encoded in base64, can be in json or OTel v0.7.0 format, depending on the format of CloudWatch data (described later). Skywalking currently supports OTel v0.7.0 format.    aws-firehose-receiver aws-firehose-receiver provides an HTTP Endpoint that implements Firehose Specifications: /aws/firehose/metrics. The figure below shows the data flow of monitoring DynamoDB, S3 and other services through CloudWatch, and using Firehose to send indicator data to SKywalking OAP.\nStep-by-step setup of S3 monitoring  Enter the S3 console and create a filter forRequest metrics: Amazon S3 \u0026gt;\u0026gt; Buckets \u0026gt;\u0026gt; (Your Bucket) \u0026gt;\u0026gt; Metrics \u0026gt;\u0026gt; metrics \u0026gt;\u0026gt; View additional charts \u0026gt;\u0026gt; Request metrics  Enter the Amazon Kinesis console, create a delivery stream, Source select Direct PUT, Destination select HTTP Endpoint. And set HTTP endpoint URL to https://your_domain/aws/firehose/metrics. Other configuration items:   Buffer hints: Set the size and period of the cache Access key just matches the AccessKey in aws-firehose-receiver Retry duration: Retry period Backup settings: Backup settings, optionally backup the posted data to S3 at the same time.  Enter the CloudWatch console Streams and click Create CloudWatch Stream. And Select your Kinesis Data Firehose stream configure the delivery stream created in the second step in the item. Note that it needs to be set Change output format to OpenTelemetry v0.7.0.  At this point, the S3 monitoring configuration settings are complete. The S3 metrics currently collected by SkyWalking by default are shown below:\nOther service Currently SkyWalking officially supports EKS, S3, DynamoDB monitoring. Users also refer to the OpenTelemetry receiver to configure OTel rules to collect and analyze CloudWatch metrics of other AWS services, and display them through a custom dashboard.\nMaterial  Monitoring S3 metrics with Amazon CloudWatch Monitoring DynamoDB metrics with Amazon CloudWatch Supported metrics in AWS Firehose receiver of OAP Configuration Vocabulary | Apache SkyWalking  ","excerpt":"SKyWalking OAP\u0026rsquo;s existing OpenTelemetry receiver can receive metrics through the OTLP …","ref":"/blog/2023-03-12-skywalking-aws-s3-eks/","title":"Monitoring AWS EKS and S3 with SkyWalking"},{"body":"SKyWalking OAP 现有的 OpenTelemetry receiver 可以通过OTLP协议接收指标(metrics),并且使用MAL实时分析相关指标。从OAP 9.4.0开始,SkyWalking 新增了AWS Firehose receiver,用来接收,分析CloudWatch metrics数据。本文将以EKS和S3为例介绍SkyWalking OAP 接收,分析 AWS 服务的指标数据的过程\nEKS OpenTelemetry Collector OpenTelemetry (OTel) 是一系列tools,API,SDK,可以生成,收集,导出遥测数据,比如 指标(metrics),日志(logs)和链路信息(traces),而OTel Collector主要负责收集、处理和导出遥测数据,Collector由以下主要组件组成:\n receiver: 负责获取遥测数据,不同的receiver支持不同的数据源,比如prometheus ,kafka,otlp, processor:在receiver和exporter之间处理数据,比如增加或者删除attributes, exporter:负责发送数据到不同的后端,比如kafka,SkyWalking OAP(通过OTLP) service: 作为一个单元配置启用的组件,只有配置的组件才会被启用  OpenTelemetry Protocol Specification(OTLP) OTLP 主要描述了如何通过gRPC,HTTP协议接收(拉取)指标数据。SKyWalking OAP的 OpenTelemetry receiver 实现了OTLP/gRPC协议,通过OTLP/gRPC exporter可以将指标数据导出到OAP。通常一个Collector的数据流向如下:\n使用OTel监控EKS EKS的监控就是通过OTel实现的,只需在EKS集群中以DaemonSet  的方式部署 OpenTelemetry Collector,使用 AWS Container Insights Receiver 作为receiver,并且设置otlp exporter的地址为OAP的的地址即可。另外需要注意的是OAP根据attribute job_name : aws-cloud-eks-monitoring 作为EKS metrics的标识,所以还需要再collector中配置一个processor来增加这个属性\nOTel Collector配置demo extensions:health_check:receivers:awscontainerinsightreceiver:processors:# 为了OAP能够正确识别EKS metrics,增加job_name attributeresource/job-name:attributes:- key:job_name value:aws-cloud-eks-monitoringaction:insert # 指定OAP作为 exportersexporters:otlp:endpoint:oap-service:11800 tls:insecure:truelogging:loglevel:debug service:pipelines:metrics:receivers:[awscontainerinsightreceiver]processors:[resource/job-name]exporters:[otlp,logging]extensions:[health_check]SkyWalking OAP 默认统计 Node,Pod,Service 三个维度的网络、磁盘、CPU等相关的指标数据,这里仅展示了部分内容\nPod 维度 Service 维度 EKS监控完整配置  Click here to view complete k8s resource configuration  apiVersion:v1kind:ServiceAccountmetadata:name:aws-otel-sanamespace:aws-otel-eks---kind:ClusterRoleapiVersion:rbac.authorization.k8s.io/v1metadata:name:aoc-agent-rolerules:- apiGroups:[\u0026#34;\u0026#34;]resources:[\u0026#34;pods\u0026#34;,\u0026#34;nodes\u0026#34;,\u0026#34;endpoints\u0026#34;]verbs:[\u0026#34;list\u0026#34;,\u0026#34;watch\u0026#34;]- apiGroups:[\u0026#34;apps\u0026#34;]resources:[\u0026#34;replicasets\u0026#34;]verbs:[\u0026#34;list\u0026#34;,\u0026#34;watch\u0026#34;]- apiGroups:[\u0026#34;batch\u0026#34;]resources:[\u0026#34;jobs\u0026#34;]verbs:[\u0026#34;list\u0026#34;,\u0026#34;watch\u0026#34;]- apiGroups:[\u0026#34;\u0026#34;]resources:[\u0026#34;nodes/proxy\u0026#34;]verbs:[\u0026#34;get\u0026#34;]- apiGroups:[\u0026#34;\u0026#34;]resources:[\u0026#34;nodes/stats\u0026#34;,\u0026#34;configmaps\u0026#34;,\u0026#34;events\u0026#34;]verbs:[\u0026#34;create\u0026#34;,\u0026#34;get\u0026#34;]- apiGroups:[\u0026#34;\u0026#34;]resources:[\u0026#34;configmaps\u0026#34;]resourceNames:[\u0026#34;otel-container-insight-clusterleader\u0026#34;]verbs:[\u0026#34;get\u0026#34;,\u0026#34;update\u0026#34;]- apiGroups:[\u0026#34;coordination.k8s.io\u0026#34;]resources:[\u0026#34;leases\u0026#34;]verbs:[\u0026#34;create\u0026#34;,\u0026#34;get\u0026#34;,\u0026#34;update\u0026#34;]---kind:ClusterRoleBindingapiVersion:rbac.authorization.k8s.io/v1metadata:name:aoc-agent-role-bindingsubjects:- kind:ServiceAccountname:aws-otel-sanamespace:aws-otel-eksroleRef:kind:ClusterRolename:aoc-agent-roleapiGroup:rbac.authorization.k8s.io---apiVersion:v1kind:ConfigMapmetadata:name:otel-agent-confnamespace:aws-otel-ekslabels:app:opentelemetrycomponent:otel-agent-confdata:otel-agent-config:|extensions: health_check: receivers: awscontainerinsightreceiver: processors: resource/job-name: attributes: - key: job_name value: aws-cloud-eks-monitoring action: insert exporters: otlp: endpoint: oap-service:11800 tls: insecure: true logging: loglevel: debug service: pipelines: metrics: receivers: [awscontainerinsightreceiver] processors: [resource/job-name] exporters: [otlp,logging] extensions: [health_check]---apiVersion:apps/v1kind:DaemonSetmetadata:name:aws-otel-eks-cinamespace:aws-otel-eksspec:selector:matchLabels:name:aws-otel-eks-citemplate:metadata:labels:name:aws-otel-eks-cispec:containers:- name:aws-otel-collectorimage:amazon/aws-otel-collector:v0.23.0env:# Specify region- name:AWS_REGIONvalue:\u0026#34;ap-northeast-1\u0026#34;- name:K8S_NODE_NAMEvalueFrom:fieldRef:fieldPath:spec.nodeName- name:HOST_IPvalueFrom:fieldRef:fieldPath:status.hostIP- name:HOST_NAMEvalueFrom:fieldRef:fieldPath:spec.nodeName- name:K8S_NAMESPACEvalueFrom:fieldRef:fieldPath:metadata.namespaceimagePullPolicy:Alwayscommand:- \u0026#34;/awscollector\u0026#34;- \u0026#34;--config=/conf/otel-agent-config.yaml\u0026#34;volumeMounts:- name:rootfsmountPath:/rootfsreadOnly:true- name:dockersockmountPath:/var/run/docker.sockreadOnly:true- name:varlibdockermountPath:/var/lib/dockerreadOnly:true- name:containerdsockmountPath:/run/containerd/containerd.sockreadOnly:true- name:sysmountPath:/sysreadOnly:true- name:devdiskmountPath:/dev/diskreadOnly:true- name:otel-agent-config-volmountPath:/conf- name:otel-output-vol mountPath:/otel-outputresources:limits:cpu:200mmemory:200Mirequests:cpu:200mmemory:200Mivolumes:- configMap:name:otel-agent-confitems:- key:otel-agent-configpath:otel-agent-config.yamlname:otel-agent-config-vol- name:rootfshostPath:path:/- name:dockersockhostPath:path:/var/run/docker.sock- name:varlibdockerhostPath:path:/var/lib/docker- name:containerdsockhostPath:path:/run/containerd/containerd.sock- name:syshostPath:path:/sys- name:devdiskhostPath:path:/dev/disk/- name:otel-output-vol hostPath:path:/otel-outputserviceAccountName:aws-otel-sa S3 Amazon CloudWatch Amazon CloudWatch 是AWS提供的监控服务,负责收集AWS 服务,资源的指标数据,CloudWatch metrics stream 负责将指标数据转换为流式处理数据,支持输出json,OTel v0.7.0 两种格式。\nAmazon Kinesis Data Firehose (Firehose) Firehose 是一项提取、转换、加载(ETL)服务,可以将流式处理数据以可靠方式捕获、转换和提供到数据湖、数据存储(比如S3)和分析服务中。\n为了确保外部服务能够正确地接收指标数据, AWS提供了 Kinesis Data Firehose HTTP Endpoint Delivery Request and Response Specifications (Firehose Specifications)。Firhose以POST的方式推送Json数据\nJson数据示例 { \u0026#34;requestId\u0026#34;: \u0026#34;ed4acda5-034f-9f42-bba1-f29aea6d7d8f\u0026#34;, \u0026#34;timestamp\u0026#34;: 1578090901599 \u0026#34;records\u0026#34;: [ { \u0026#34;data\u0026#34;: \u0026#34;aGVsbG8=\u0026#34; }, { \u0026#34;data\u0026#34;: \u0026#34;aGVsbG8gd29ybGQ=\u0026#34; } ] }  requestId: 请求id,可以实现去重,debug目的 timestamp: Firehose 产生该请求的时间戳(毫秒) records: 实际投递的记录  data: 投递的数据,以base64编码数据,可以是json或者OTel v0.7.0格式,取决于CloudWatch数据数据的格式(稍后会有描述)。Skywalking目前支持OTel v0.7.0格式    aws-firehose-receiver aws-firehose-receiver 就是提供了一个实现了Firehose Specifications的HTTP Endpoint:/aws/firehose/metrics。下图展示了通过CloudWatch监控DynamoDB,S3等服务,并利用Firehose将指标数据发送到SKywalking OAP的数据流向\n从上图可以看到 aws-firehose-receiver 将数据转换后交由 OpenTelemetry-receiver处理 ,所以 OpenTelemetry receiver 中配置的 otel-rules 同样可以适用CloudWatch metrics\n注意  因为 Kinesis Data Firehose 要求,必须在AWS Firehose receiver 前放置一个Gateway用来建立HTTPS链接。aws-firehose-receiver 将从v9.5.0开始支持HTTPS协议 TLS 证书必须是CA签发的  逐步设置S3监控  进入 S3控制台,通过 Amazon S3 \u0026gt;\u0026gt; Buckets \u0026gt;\u0026gt; (Your Bucket) \u0026gt;\u0026gt; Metrics \u0026gt;\u0026gt; metrics \u0026gt;\u0026gt; View additional charts \u0026gt;\u0026gt; Request metrics 为 Request metrics 创建filter  进入Amazon Kinesis 控制台,创建一个delivery stream, Source选择 Direct PUT, Destination 选择 HTTP Endpoint. 并且设置HTTP endpoint URL 为 https://your_domain/aws/firehose/metrics。其他配置项:  Buffer hints: 设置缓存的大小和周期 Access key 与aws-firehose-receiver中的AccessKey一致即可 Retry duration: 重试周期 Backup settings: 备份设置,可选地将投递的数据同时备份到S3。    进入 CloudWatch控制台,Streams 标签创建CloudWatch Stream。并且在Select your Kinesis Data Firehose stream项中配置第二步创建的delivery stream。注意需要设置Change output format 为 OpenTelemetry v0.7.0。  至此,S3监控配置设置完成。目前SkyWalking默认收集的S3 metrics 展示如下\n其他服务 目前SkyWalking官方支持EKS,S3,DynamoDB监控。 用户也参考 OpenTelemetry receiver 配置OTel rules来收集,分析AWS其他服务的CloudWatch metrics,并且通过自定义dashboard展示\n资料  Monitoring S3 metrics with Amazon CloudWatch Monitoring DynamoDB metrics with Amazon CloudWatch Supported metrics in AWS Firehose receiver of OAP Configuration Vocabulary | Apache SkyWalking  ","excerpt":"SKyWalking OAP 现有的 OpenTelemetry receiver 可以通过OTLP协议接收指标(metrics),并且使用MAL实时分析相关指标。从OAP 9.4.0开 …","ref":"/zh/2023-03-12-skywalking-aws-s3-eks/","title":"使用SkyWalking监控AWS EKS和S3"},{"body":"SkyWalking Rust 0.6.0 is released. Go to downloads page to find release tars.\nWhat\u0026rsquo;s Changed  Refactor span object api to make it more friendly. by @jmjoy in https://github.com/apache/skywalking-rust/pull/52 Refactor management report and keep alive api. by @jmjoy in https://github.com/apache/skywalking-rust/pull/53 Use stream and completed for a bulk to collect for grpc reporter. by @jmjoy in https://github.com/apache/skywalking-rust/pull/54 Add sub components licenses in dist material. by @jmjoy in https://github.com/apache/skywalking-rust/pull/55 Bump to 0.6.0. by @jmjoy in https://github.com/apache/skywalking-rust/pull/56  ","excerpt":"SkyWalking Rust 0.6.0 is released. Go to downloads page to find release tars.\nWhat\u0026rsquo;s Changed …","ref":"/events/release-apache-skywalking-rust-0-6-0/","title":"Release Apache SkyWalking Rust 0.6.0"},{"body":"SkyWalking 9.4.0 is released. Go to downloads page to find release tars.\nPromQL and Grafana Support Zipkin Lens UI Bundled AWS S3 and DynamoDB monitoring Project  Bump up Zipkin and Zipkin lens UI dependency to 2.24.0. Bump up Apache parent pom version to 29. Bump up Armeria version to 1.21.0. Clean up maven pom.xmls. Bump up Java version to 11. Bump up snakeyaml to 2.0.  OAP Server  Add ServerStatusService in the core module to provide a new way to expose booting status to other modules. Adds Micrometer as a new component.(ID=141) Refactor session cache in MetricsPersistentWorker. Cache enhancement - don\u0026rsquo;t read new metrics from database in minute dimensionality.   // When // (1) the time bucket of the server's latest stability status is provided // 1.1 the OAP has booted successfully // 1.2 the current dimensionality is in minute. // 1.3 the OAP cluster is rebalanced due to scaling // (2) the metrics are from the time after the timeOfLatestStabilitySts // (3) the metrics don't exist in the cache // the kernel should NOT try to load it from the database. // // Notice, about condition (2), // for the specific minute of booted successfully, the metrics are expected to load from database when // it doesn't exist in the cache.  Remove the offset of metric session timeout according to worker creation sequence. Correct MetricsExtension annotations declarations in manual entities. Support component IDs' priority in process relation metrics. Remove abandon logic in MergableBufferedData, which caused unexpected no-update. Fix miss set LastUpdateTimestamp that caused the metrics session to expire. Rename MAL rule spring-sleuth.yaml to spring-micrometer.yaml. Fix memory leak in Zipkin API. Remove the dependency of refresh_interval of ElasticSearch indices from elasticsearch/flushInterval config. Now, it uses core/persistentPeriod + 5s as refresh_interval for all indices instead. Change elasticsearch/flushInterval to 5s(was 15s). Optimize flushInterval of ElasticSearch BulkProcessor to avoid extra periodical flush in the continuous bulk streams. An unexpected dot is added when exp is a pure metric name and expPrefix != null. Support monitoring MariaDB. Remove measure/stream specific interval settings in BanyanDB. Add global-specific settings used to override global configurations (e.g segmentIntervalDays, blockIntervalHours) in BanyanDB. Use TTL-driven interval settings for the measure-default group in BanyanDB. Fix wrong group of non time-relative metadata in BanyanDB. Refactor StorageData#id to the new StorageID object from a String type. Support multiple component IDs in the service topology level. Add ElasticSearch.Keyword annotation to declare the target field type as keyword. [Breaking Change] Column component_id of service_relation_client_side and service_relation_server_side have been replaced by component_ids. Support priority definition in the component-libraries.yml. Enhance service topology query. When there are multiple components detected from the server side, the component type of the node would be determined by the priority, which was random in the previous release. Remove component_id from service_instance_relation_client_side and service_instance_relation_server_side. Make the satellite E2E test more stable. Add Istio 1.16 to test matrix. Register ValueColumn as Tag for Record in BanyanDB storage plugin. Bump up Netty to 4.1.86. Remove unnecessary additional columns when storage is in logical sharding mode. The cluster coordinator support watch mechanism for notifying RemoteClientManager and ServerStatusService. Fix ServiceMeshServiceDispatcher overwrite ServiceDispatcher debug file when open SW_OAL_ENGINE_DEBUG. Use groupBy and in operators to optimize topology query for BanyanDB storage plugin. Support server status watcher for MetricsPersistentWorker to check the metrics whether required initialization. Fix the meter value are not correct when using sumPerMinLabeld or sumHistogramPercentile MAL function. Fix cannot display attached events when using Zipkin Lens UI query traces. Remove time_bucket for both Stream and Measure kinds in BanyanDB plugin. Merge TIME_BUCKET of Metrics and Record into StorageData. Support no layer in the listServices query. Fix time_bucket of ServiceTraffic not set correctly in slowSql of MAL. Correct the TopN record query DAO of BanyanDB. Tweak interval settings of BanyanDB. Support monitoring AWS Cloud EKS. Bump BanyanDB Java client to 0.3.0-rc1. Remove id tag from measures. Add Banyandb.MeasureField to mark a column as a BanyanDB Measure field. Add BanyanDB.StoreIDTag to store a process\u0026rsquo;s id for searching. [Breaking Change] The supported version of ShardingSphere-Proxy is upgraded from 5.1.2 to 5.3.1. Due to the changes of ShardingSphere\u0026rsquo;s API, versions before 5.3.1 are not compatible. Add the eBPF network profiling E2E Test in the per storage. Fix TCP service instances are lack of instance properties like pod and namespace, which causes Pod log not to work for TCP workloads. Add Python HBase happybase module component ID(94). Fix gRPC alarm cannot update settings from dynamic configuration source. Add batchOfBytes configuration to limit the size of bulk flush. Add Python Websocket module component ID(7018). [Optional] Optimize single trace query performance by customizing routing in ElasticSearch. SkyWalking trace segments and Zipkin spans are using trace ID for routing. This is OFF by default, controlled by storage/elasticsearch/enableCustomRouting. Enhance OAP HTTP server to support HTTPS Remove handler scan in otel receiver, manual initialization instead Add aws-firehose-receiver to support collecting AWS CloudWatch metric(OpenTelemetry format). Notice, no HTTPS/TLS setup support. By following AWS Firehose request, it uses proxy request (https://... instead of /aws/firehose/metrics), there must be a proxy(Nginx, Envoy, etc.). Avoid Antlr dependencies' versions might be different in compile time and runtime. Now PrometheusMetricConverter#escapedName also support converting / to _. Add missing TCP throughput metrics. Refactor @Column annotation, swap Column#name and ElasticSearch.Column#columnAlias and rename ElasticSearch.Column#columnAlias to ElasticSearch.Column#legacyName. Add Python HTTPX module component ID(7019). Migrate tests from junit 4 to junit 5. Refactor http-based alarm plugins and extract common logic to HttpAlarmCallback. Support Amazon Simple Storage Service (Amazon S3) metrics monitoring Support process Sum metrics with AGGREGATION_TEMPORALITY_DELTA case Support Amazon DynamoDB monitoring. Support prometheus HTTP API and promQL. Scope in the Entity of Metrics query v1 protocol is not required and automatical correction. The scope is determined based on the metric itself. Add explicit ReadTimeout for ConsulConfigurationWatcher to avoid IllegalArgumentException: Cache watchInterval=10sec \u0026gt;= networkClientReadTimeout=10000ms. Fix DurationUtils.getDurationPoints exceed, when startTimeBucket equals endTimeBucket. Support process OpenTelemetry ExponentialHistogram metrics Add FreeRedis component ID(3018).  UI  Add Zipkin Lens UI to webapp, and proxy it to context path /zipkin. Migrate the build tool from vue cli to Vite4. Fix Instance Relation and Endpoint Relation dashboards show up. Add Micrometer icon. Update MySQL UI to support MariaDB. Add AWS menu for supporting AWS monitoring. Add missing FastAPI logo. Update the log details page to support the formatted display of JSON content. Fix build config. Avoid being unable to drag process nodes for the first time. Add node folder into ignore list. Add ElPopconfirm to component types. Add an iframe widget for zipkin UI. Optimize graph tooltips to make them more friendly. Bump json5 from 1.0.1 to 1.0.2. Add websockets icon. Implement independent mode for widgets. Bump http-cache-semantics from 4.1.0 to 4.1.1. Update menus for OpenFunction. Add auto fresh to widgets independent mode. Fix: clear trace ID on the Log and Trace widgets after using association. Fix: reset duration for query conditions after time range changes. Add AWS S3 menu. Refactor: optimize side bar component to make it more friendly. Fix: remove duplicate popup message for query result. Add logo for HTTPX. Refactor: optimize the attached events visualization in the trace widget. Update BanyanDB client to 0.3.1. Add AWS DynamoDB menu. Fix: add auto period to the independent mode for widgets. Optimize menus and add Windows monitoring menu. Add a calculation for the cpm5dAvg. add a cpm5d calculation. Fix data processing error in the eBPF profiling widget. Support for double quotes in SlowSQL statements. Fix: the wrong position of the menu when clicking the topology node.  Documentation  Remove Spring Sleuth docs, and add Spring MicroMeter Observations Analysis with the latest Java agent side enhancement. Update monitoring MySQL document to add the MariaDB part. Reorganize the protocols docs to a more clear API docs. Add documentation about replacing Zipkin server with SkyWalking OAP. Add Lens UI relative docs in Zipkin trace section. Add Profiling APIs. Fix backend telemetry doc and so11y dashboard doc as the OAP Prometheus fetcher was removed since 9.3.0  All issues and pull requests are here\n","excerpt":"SkyWalking 9.4.0 is released. Go to downloads page to find release tars.\nPromQL and Grafana Support …","ref":"/events/release-apache-skywalking-apm-9.4.0/","title":"Release Apache SkyWalking APM 9.4.0"},{"body":"SkyWalking BanyanDB 0.3.1 is released. Go to downloads page to find release tars.\nBugs  Fix the broken of schema chain. Add a timeout to all go leaking checkers.  Chores  Bump golang.org/x/net from 0.2.0 to 0.7.0.  ","excerpt":"SkyWalking BanyanDB 0.3.1 is released. Go to downloads page to find release tars.\nBugs  Fix the …","ref":"/events/release-apache-skywalking-banyandb-0-3-1/","title":"Release Apache SkyWalking BanyanDB 0.3.1"},{"body":"SkyWalking Python 1.0.0 is released! Go to downloads page to find release tars.\nPyPI Wheel: https://pypi.org/project/apache-skywalking/1.0.0/\nDockerHub Image: https://hub.docker.com/r/apache/skywalking-python\n  Important Notes and Breaking Changes:\n The new PVM metrics reported from Python agent requires SkyWalking OAP v9.3.0 to show out-of-the-box. BREAKING: Python 3.6 is no longer supported and may not function properly, Python 3.11 support is added and tested. BREAKING: A number of common configuration options and environment variables are renamed to follow the convention of Java agent, please check with the latest official documentation before upgrading. (#273, #282) https://skywalking.apache.org/docs/skywalking-python/v1.0.0/en/setup/configuration/ BREAKING: All agent core capabilities are now covered by test cases and enabled by default (Trace, Log, PVM runtime metrics, Profiler) BREAKING: DockerHub Python agent images since v1.0.0 will no longer include the run part in ENTRYPOINT [\u0026quot;sw-python\u0026quot;, \u0026quot;run\u0026quot;], user should prefix their command with [-d/--debug] run [-p/--prefork] \u0026lt;Command\u0026gt; for extra flexibility. Packaged wheel now provides a extra [all] option to support all three report protocols    Feature:\n Add support for Python 3.11 (#285) Add MeterReportService (gRPC, Kafka reporter) (default:enabled) (#231, #236, #241, #243) Add reporter for PVM runtime metrics (default:enabled) (#238, #247) Add Greenlet profiler (#246) Add test and support for Python Slim base images (#249) Add support for the tags of Virtual Cache for Redis (#263) Add a new configuration kafka_namespace to prefix the kafka topic names (#277) Add log reporter support for loguru (#276) Add experimental support for explicit os.fork(), restarts agent in forked process (#286) Add experimental sw-python CLI sw-python run [-p] flag (-p/\u0026ndash;prefork) to enable non-intrusive uWSGI and Gunicorn postfork support (#288)    Plugins:\n Add aioredis, aiormq, amqp, asyncpg, aio-pika, kombu RMQ plugins (#230 Missing test coverage) Add Confluent Kafka plugin (#233 Missing test coverage) Add HBase plugin Python HappyBase model (#266) Add FastAPI plugin websocket protocol support (#269) Add Websockets (client) plugin (#269) Add HTTPX plugin (#283)    Fixes:\n Allow RabbitMQ BlockingChannel.basic_consume() to link with outgoing spans (#224) Fix RabbitMQ basic_get bug (#225, #226) Fix case when tornado socket name is None (#227) Fix misspelled text \u0026ldquo;PostgreSLQ\u0026rdquo; -\u0026gt; \u0026ldquo;PostgreSQL\u0026rdquo; in Postgres-related plugins (#234) Make sure span.component initialized as Unknown rather than 0 (#242) Ignore websocket connections inside fastapi temporarily (#244, issue#9724) Fix Kafka-python plugin SkyWalking self reporter ignore condition (#249) Add primary endpoint in tracing context and endpoint info to log reporter (#261) Enforce tag class type conversion (#262) Fix sw_logging (log reporter) potentially throw exception leading to traceback confusion (#267) Avoid reporting meaningless tracecontext with logs when there\u0026rsquo;s no active span, UI will now show empty traceID (#272) Fix exception handler in profile_context (#273) Add namespace suffix to service name (#275) Add periodical instance property report to prevent data loss (#279) Fix sw_logging when Logger.disabled is true (#281)    Docs:\n New documentation on how to test locally (#222) New documentation on the newly added meter reporter feature (#240) New documentation on the newly added greenlet profiler and the original threading profiler (#250) Overhaul documentation on development setup and testing (#249) Add tables to state currently supported features of Python agent. (#271) New configuration documentation generator (#273)    Others:\n Pin CI SkyWalking License Eye (#221) Fix dead link due to the \u0026lsquo;next\u0026rsquo; url change (#235) Pin CI SkyWalking Infra-E2E (#251) Sync OAP, SWCTL versions in E2E and fix test cases (#249) Overhaul development flow with Poetry (#249) Fix grpcio-tools generated message type (#253) Switch plugin tests to use slim Python images (#268) Add unit tests to sw_filters (#269)    New Contributors  @ZEALi made their first contribution in https://github.com/apache/skywalking-python/pull/242 @westarest made their first contribution in https://github.com/apache/skywalking-python/pull/246 @Jedore made their first contribution in https://github.com/apache/skywalking-python/pull/263 @alidisi made their first contribution in https://github.com/apache/skywalking-python/pull/266 @SheltonZSL made their first contribution in https://github.com/apache/skywalking-python/pull/275 @XinweiLyu made their first contribution in https://github.com/apache/skywalking-python/pull/283  Full Changelog: https://github.com/apache/skywalking-python/compare/v0.8.0...v1.0.0\n","excerpt":"SkyWalking Python 1.0.0 is released! Go to downloads page to find release tars.\nPyPI Wheel: …","ref":"/events/release-apache-skywalking-python-1-0-0/","title":"Release Apache SkyWalking Python 1.0.0"},{"body":"SkyWalking BanyanDB 0.3.0 is released. Go to downloads page to find release tars.\nFeatures  Support 64-bit float type. Web Application. Close components in tsdb gracefully. Add TLS for the HTTP server. Use the table builder to compress data.  Bugs  Open blocks concurrently. Sync index writing and shard closing. TimestampRange query throws an exception if no data in this time range.  Chores  Fixes issues related to leaked goroutines. Add validations to APIs.  ","excerpt":"SkyWalking BanyanDB 0.3.0 is released. Go to downloads page to find release tars.\nFeatures  Support …","ref":"/events/release-apache-skywalking-banyandb-0-3-0/","title":"Release Apache SkyWalking BanyanDB 0.3.0"},{"body":"SkyWalking PHP 0.3.0 is released. Go to downloads page to find release tars.\nWhat\u0026rsquo;s Changed  Make explicit rust version requirement by @wu-sheng in https://github.com/apache/skywalking-php/pull/35 Update dependencies version limitation. by @jmjoy in https://github.com/apache/skywalking-php/pull/36 Startup 0.3.0 by @heyanlong in https://github.com/apache/skywalking-php/pull/37 Support PHP 8.2 by @heyanlong in https://github.com/apache/skywalking-php/pull/38 Fix php-fpm freeze after large amount of request. by @jmjoy in https://github.com/apache/skywalking-php/pull/39 Lock develop rust version to 1.65, upgrade deps. by @jmjoy in https://github.com/apache/skywalking-php/pull/41 Fix worker unexpected shutdown. by @jmjoy in https://github.com/apache/skywalking-php/pull/42 Update docs about installing rust. by @jmjoy in https://github.com/apache/skywalking-php/pull/43 Retry cargo test when failed in CI. by @jmjoy in https://github.com/apache/skywalking-php/pull/44 Hack dtor for mysqli to cleanup resources. by @jmjoy in https://github.com/apache/skywalking-php/pull/45 Report instance properties and keep alive. by @jmjoy in https://github.com/apache/skywalking-php/pull/46 Add configuration option skywalking_agent.runtime_dir. by @jmjoy in https://github.com/apache/skywalking-php/pull/47 Add authentication support. by @jmjoy in https://github.com/apache/skywalking-php/pull/48 Support TLS. by @jmjoy in https://github.com/apache/skywalking-php/pull/49 Periodic reporting instance properties. by @jmjoy in https://github.com/apache/skywalking-php/pull/50 Bump to 0.3.0. by @jmjoy in https://github.com/apache/skywalking-php/pull/51  Breaking  Remove http:// scheme in skywalking_agent.server_addr.  New Contributors  @wu-sheng made their first contribution in https://github.com/apache/skywalking-php/pull/35  Full Changelog: https://github.com/apache/skywalking-php/compare/v0.2.0...v0.3.0\nPECL https://pecl.php.net/package/skywalking_agent/0.3.0\n","excerpt":"SkyWalking PHP 0.3.0 is released. Go to downloads page to find release tars.\nWhat\u0026rsquo;s Changed …","ref":"/events/release-apache-skwaylking-php-0-3-0/","title":"Release Apache SkyWalking PHP 0.3.0"},{"body":"SkyWalking Java Agent 8.14.0 is released. Go to downloads page to find release tars. Changes by Version\n8.14.0  Polish test framework to support arm64/v8 platforms Fix wrong config name plugin.toolkit.use_qualified_name_as_operation_name, and system variable name SW_PLUGIN_TOOLKIT_USE_QUALIFIED_NAME_AS_OPERATION_NAME:false. They were toolit. Rename JDBI to JDBC Support collecting dubbo thread pool metrics Bump up byte-buddy to 1.12.19 Upgrade agent test tools [Breaking Change] Compatible with 3.x and 4.x RabbitMQ Client, rename rabbitmq-5.x-plugin to rabbitmq-plugin Polish JDBC plugins to make DBType accurate Report the agent version to OAP as an instance attribute Polish jedis-4.x-plugin to change command to lowercase, which is consistent with jedis-2.x-3.x-plugin Add micronauthttpclient,micronauthttpserver,memcached,ehcache,guavacache,jedis,redisson plugin config properties to agent.config Add Micrometer Observation support Add tags mq.message.keys and mq.message.tags for RocketMQ producer span Clean the trace context which injected into Pulsar MessageImpl after the instance recycled Fix In the higher version of mysql-connector-java 8x, there is an error in the value of db.instance. Add support for KafkaClients 3.x. Support to customize the collect period of JVM relative metrics. Upgrade netty-codec-http2 to 4.1.86.Final. Put Agent-Version property reading in the premain stage to avoid deadlock when using jarsigner. Add a config agent.enable(default: true) to support disabling the agent through system property -Dskywalking.agent.disable=false or system environment variable setting SW_AGENT_ENABLE=false. Enhance redisson plugin to adopt uniform tags.  Documentation  Update Plugin-test.md, support string operators start with and end with Polish agent configurations doc to fix type error  All issues and pull requests are here\n","excerpt":"SkyWalking Java Agent 8.14.0 is released. Go to downloads page to find release tars. Changes by …","ref":"/events/release-apache-skywalking-java-agent-8-14-0/","title":"Release Apache SkyWalking Java Agent 8.14.0"},{"body":"Background Apache SkyWalking is an open-source Application Performance Management system that helps users collect and aggregate logs, traces, metrics, and events for display on a UI. In the previous article, we introduced how to use Apache SkyWalking Rover to analyze the network performance issue in the service mesh environment. However, in business scenarios, users often rely on mature layer 7 protocols, such as HTTP, for interactions between systems. In this article, we will discuss how to use eBPF techniques to analyze performance bottlenecks of layer 7 protocols and how to enhance the tracing system using network sampling.\nThis article will show how to use Apache SkyWalking with eBPF to enhance metrics and traces in HTTP observability.\nHTTP Protocol Analysis HTTP is one of the most common Layer 7 protocols and is usually used to provide services to external parties and for inter-system communication. In the following sections, we will show how to identify and analyze HTTP/1.x protocols.\nProtocol Identification In HTTP/1.x, the client and server communicate through a single file descriptor (FD) on each side. Figure 1 shows the process of communication involving the following steps:\n Connect/accept: The client establishes a connection with the HTTP server, or the server accepts a connection from the client. Read/write (multiple times): The client or server reads and writes HTTPS requests and responses. A single request-response pair occurs within the same connection on each side. Close: The client and server close the connection.  To obtain HTTP content, it’s necessary to read it from the second step of this process. As defined in the RFC, the content is contained within the data of the Layer 4 protocol and can be obtained by parsing the data. The request and response pair can be correlated because they both occur within the same connection on each side.\nFigure 1: HTTP communication timeline.\nHTTP Pipeline HTTP pipelining is a feature of HTTP/1.1 that enables multiple HTTP requests to be sent over a single TCP connection without waiting for the corresponding responses. This feature is important because it ensures that the order of the responses on the server side matches the order of the requests.\nFigure 2 illustrates how this works. Consider the following scenario: an HTTP client sends multiple requests to a server, and the server responds by sending the HTTP responses in the same order as the requests. This means that the first request sent by the client will receive the first response from the server, the second request will receive the second response, and so on.\nWhen designing HTTP parsing, we should follow this principle by adding request data to a list and removing the first item when parsing a response. This ensures that the responses are processed in the correct order.\nFigure 2: HTTP/1.1 pipeline.\nMetrics Based on the identification of the HTTP content and process topology diagram mentioned in the previous article, we can combine these two to generate process-to-process metrics data.\nFigure 3 shows the metrics that currently support the analysis between the two processes. Based on the HTTP request and response data, we can analyze the following data:\n   Metrics Name Type Unit Description     Request CPM(Call Per Minute) Counter count The HTTP request count   Response Status CPM(Call Per Minute) Counter count The count of per HTTP response status code   Request Package Size Counter/Histogram Byte The request package size   Response Package Size Counter/Histogram Byte The response package size   Client Duration Counter/Histogram Millisecond The duration of single HTTP response on the client side   Server Duration Counter/Histogram Millisecond The duration of single HTTP response on the server side    Figure 3: Process-to-process metrics.\nHTTP and Trace During the HTTP process, if we unpack the HTTP requests and responses from raw data, we can use this data to correlate with the existing tracing system.\nTrace Context Identification In order to track the flow of requests between multiple services, the trace system usually creates a trace context when a request enters a service and passes it along to other services during the request-response process. For example, when an HTTP request is sent to another server, the trace context is included in the request header.\nFigure 4 displays the raw content of an HTTP request intercepted by Wireshark. The trace context information generated by the Zipkin Tracing system can be identified by the “X-B3” prefix in the header. By using eBPF to intercept the trace context in the HTTP header, we can connect the current request with the trace system.\nFigure 4: View of HTTP headers in Wireshark.\nTrace Event We have added the concept of an event to traces. An event can be attached to a span and consists of start and end times, tags, and summaries, allowing us to attach any desired information to the Trace.\nWhen performing eBPF network profiling, two events can be generated based on the request-response data. Figure 5 illustrates what happens when a service performs an HTTP request with profiling. The trace system generates trace context information and sends it in the request. When the service executes in the kernel, we can generate an event for the corresponding trace span by interacting with the request-response data and execution time in the kernel space.\nPreviously, we could only observe the execution status in the user space. However, by combining traces and eBPF technologies, we can now also get more information about the current trace in the kernel space, which would impact less performance for the target service if we do similar things in the tracing SDK and agent.\nFigure 5: Logical view of profiling an HTTP request and response.\nSampling To ensure efficient data storage and minimize unnecessary data sampling, we use a sampling mechanism for traces in our system. This mechanism triggers sampling only when certain conditions are met. We also provide a list of the top N traces, which allows users to quickly access the relevant request information for a specific trace.\nTo help users easily identify and analyze relevant events, we offer three different sampling rules:\n Slow Traces: Sampling is triggered when the response time for a request exceeds a specified threshold. Response Status [400, 500): Sampling is triggered when the response status code is greater than or equal to 400 and less than 500. Response Status [500, 600): Sampling is triggered when the response status code is greater than or equal to 500 and less than 600.  In addition, we recognize that not all request or response raw data may be necessary for analysis. For example, users may be more interested in requesting data when trying to identify performance issues, while they may be more interested in response data when troubleshooting errors. As such, we also provide configuration options for request or response events to allow users to specify which type of data they would like to sample.\nProfiling in a Service Mesh The SkyWalking and SkyWalking Rover projects have already implemented the HTTP protocol analyze and trace associations. How do they perform when running in a service mesh environment?\nDeployment Figure 6 demonstrates the deployment of SkyWalking and SkyWalking Rover in a service mesh environment. SkyWalking Rover is deployed as a DaemonSet on each machine where a service is located and communicates with the SkyWalking backend cluster. It automatically recognizes the services on the machine and reports metadata information to the SkyWalking backend cluster. When a new network profiling task arises, SkyWalking Rover senses the task and analyzes the designated processes, collecting and aggregating network data before ultimately reporting it back to the SkyWalking backend service.\nFigure 6: SkyWalking rover deployment topology in a service mesh.\nTracing Systems Starting from version 9.3.0, the SkyWalking backend fully supports all functions in the Zipkin server. Therefore, the SkyWalking backend can collect traces from both the SkyWalking and Zipkin protocols. Similarly, SkyWalking Rover can identify and analyze trace context in both the SkyWalking and Zipkin trace systems. In the following two sections, network analysis results will be displayed in the SkyWalking and Zipkin UI respectively.\nSkyWalking When SkyWalking performs network profiling, similar to the TCP metrics in the previous article, the SkyWalking UI will first display the topology between processes. When you open the dashboard of the line representing the traffic metrics between processes, you can see the metrics of HTTP traffic from the “HTTP/1.x” tab and the sampled HTTP requests with tracing in the “HTTP Requests” tab.\nAs shown in Figure 7, there are three lists in the tab, each corresponding to a condition in the event sampling rules. Each list displays the traces that meet the pre-specified conditions. When you click on an item in the trace list, you can view the complete trace.\nFigure 7: Sampled HTTP requests within tracing context.\nWhen you click on an item in the trace list, you can quickly view the specified trace. In Figure 8, we can see that in the current service-related span, there is a tag with a number indicating how many HTTP events are related to that trace span.\nSince we are in a service mesh environment, each service involves interacting with Envoy. Therefore, the current span includes Envoy’s request and response information. Additionally, since the current service has both incoming and outgoing requests, there are events in the corresponding span.\nFigure 8: Events in the trace detail.\nWhen the span is clicked, the details of the span will be displayed. If there are events in the current span, the relevant event information will be displayed on a time axis. As shown in Figure 9, there are a total of 6 related events in the current Span. Each event represents a data sample of an HTTP request/response. One of the events spans multiple time ranges, indicating a longer system call time. It may be due to a blocked system call, depending on the implementation details of the HTTP request in different languages. This can also help us query the possible causes of errors.\nFigure 9: Events in one trace span.\nFinally, we can click on a specific event to see its complete information. As shown in Figure 10, it displays the sampling information of a request, including the SkyWalking trace context protocol contained in the request header from the HTTP raw data. The raw request data allows you to quickly re-request the request to solve any issues.\nFigure 10: The detail of the event.\nZipkin Zipkin is one of the most widely used distributed tracing systems in the world. SkyWalking can function as an alternative server to provide advanced features for Zipkin users. Here, we use this way to bring the feature into the Zipkin ecosystem out-of-box. The new events would also be treated as a kind of Zipkin’s tags and annotations.\nTo add events to a Zipkin span, we need to do the following:\n Split the start and end times of each event into two annotations with a canonical name. Add the sampled HTTP raw data from the event to the Zipkin span tags, using the same event name for corresponding purposes.  Figures 11 and 12 show annotations and tags in the same span. In these figures, we can see that the span includes at least two events with the same event name and sequence suffix (e.g., “Start/Finished HTTP Request/Response Sampling-x” in the figure). Both events have separate timestamps to represent their relative times within the span. In the tags, the data content of the corresponding event is represented by the event name and sequence number, respectively.\nFigure 11: Event timestamp in the Zipkin span annotation.\nFigure 12: Event raw data in the Zipkin span tag.\nDemo In this section, we demonstrate how to perform network profiling in a service mesh and complete metrics collection and HTTP raw data sampling. To follow along, you will need a running Kubernetes environment.\nDeploy SkyWalking Showcase SkyWalking Showcase contains a complete set of example services and can be monitored using SkyWalking. For more information, please check the official documentation.\nIn this demo, we only deploy service, the latest released SkyWalking OAP, and UI.\nexport SW_OAP_IMAGE=apache/skywalking-oap-server:9.3.0 export SW_UI_IMAGE=apache/skywalking-ui:9.3.0 export SW_ROVER_IMAGE=apache/skywalking-rover:0.4.0 export FEATURE_FLAGS=mesh-with-agent,single-node,elasticsearch,rover make deploy.kubernetes After deployment is complete, please run the following script to open SkyWalking UI: http://localhost:8080/.\nkubectl port-forward svc/ui 8080:8080 --namespace default Start Network Profiling Task Currently, we can select the specific instances that we wish to monitor by clicking the Data Plane item in the Service Mesh panel and the Service item in the Kubernetes panel.\nIn figure 13, we have selected an instance with a list of tasks in the network profiling tab.\nFigure 13: Network Profiling tab in the Data Plane.\nWhen we click the Start button, as shown in Figure 14, we need to specify the sampling rules for the profiling task. The sampling rules consist of one or more rules, each of which is distinguished by a different URI regular expression. When the HTTP request URI matches the regular expression, the rule is used. If the URI regular expression is empty, the default rule is used. Using multiple rules can help us make different sampling configurations for different requests.\nEach rule has three parameters to determine if sampling is needed:\n Minimal Request Duration (ms): requests with a response time exceeding the specified time will be sampled. Sampling response status code between 400 and 499: all status codes in the range [400-499) will be sampled. Sampling response status code between 500 and 599: all status codes in the range [500-599) will be sampled.  Once the sampling configuration is complete, we can create the task.\nFigure 14: Create network profiling task page.\nDone! After a few seconds, you will see the process topology appear on the right side of the page.\nWhen you click on the line between processes, you can view the data between the two processes, which is divided into three tabs:\n TCP: displays TCP-related metrics. HTTP/1.x: displays metrics in the HTTP 1 protocol. HTTP Requests: displays the analyzed request and saves it to a list according to the sampling rule.  Figure 16: TCP metrics in a network profiling task.\nFigure 17: HTTP/1.x metrics in a network profiling task.\nFigure 18: HTTP sampled requests in a network profiling task.\nConclusion In this article, we detailed the overview of how to analyze the Layer 7 HTTP/1.x protocol in network analysis, and how to associate it with existing trace systems. This allows us to extend the scope of data we can observe from just user space to also include kernel-space data.\nIn the future, we will delve further into the analysis of kernel data, such as collecting information on TCP packet size, transmission frequency, network card, and help on enhancing distributed tracing from another perspective.\nAdditional Resources  SkyWalking Github Repo › SkyWalking Rover Github Repo › SkyWalking Rover Documentation › Diagnose Service Mesh Network Performance with eBPF blog post \u0026gt; SkyWalking Profiling Documentation \u0026gt; SkyWalking Trace Context Propagation \u0026gt; Zipkin Trace Context Propagation \u0026gt; RFC - Hypertext Transfer Protocol – HTTP/1.1 \u0026gt;  ","excerpt":"Background Apache SkyWalking is an open-source Application Performance Management system that helps …","ref":"/blog/ebpf-enhanced-http-observability-l7-metrics-and-tracing/","title":"eBPF enhanced HTTP observability - L7 metrics and tracing"},{"body":"","excerpt":"","ref":"/tags/http/","title":"HTTP"},{"body":"","excerpt":"","ref":"/tags/trace/","title":"Trace"},{"body":"背景 Apache SkyWalking 是一个开源应用性能管理系统,帮助用户收集和聚合日志、追踪、指标和事件,并在 UI 上显示。在上一篇文章中,我们介绍了如何使用 Apache SkyWalking Rover 分析服务网格环境中的网络性能问题。但是,在商业场景中,用户通常依靠成熟的第 7 层协议(如 HTTP)来进行系统之间的交互。在本文中,我们将讨论如何使用 eBPF 技术来分析第 7 层协议的性能瓶颈,以及如何使用网络采样来增强追踪系统。\n本文将演示如何使用 Apache SkyWalking 与 eBPF 来增强 HTTP 可观察性中的指标和追踪。\nHTTP 协议分析 HTTP 是最常用的 7 层协议之一,通常用于为外部方提供服务和进行系统间通信。在下面的章节中,我们将展示如何识别和分析 HTTP/1.x 协议。\n协议识别 在 HTTP/1.x 中,客户端和服务器通过两端的单个文件描述符(File Descriptor)进行通信。图 1 显示了涉及以下步骤的通信过程:\n Connect/Accept:客户端与 HTTP 服务器建立连接,或者服务器接受客户端的连接。 Read/Write(多次):客户端或服务器读取和写入 HTTPS 请求和响应。单个请求 - 响应对在每边的同一连接内发生。 Close:客户端和服务器关闭连接。  为了获取 HTTP 内容,必须从此过程的第二步读取它。根据 RFC 定义,内容包含在 4 层协议的数据中,可以通过解析数据来获取。请求和响应对可以相关联,因为它们都在两端的同一连接内发生。\n图 1:HTTP 通信时间线。\nHTTP 管线化 HTTP 管线化(Pipelining)是 HTTP/1.1 的一个特性,允许在等待对应的响应的情况下在单个 TCP 连接上发送多个 HTTP 请求。这个特性很重要,因为它确保了服务器端的响应顺序必须与请求的顺序匹配。\n图 2 说明了这是如何工作的,考虑以下情况:HTTP 客户端向服务器发送多个请求,服务器通过按照请求的顺序发送 HTTP 响应来响应。这意味着客户端发送的第一个请求将收到服务器的第一个响应,第二个请求将收到第二个响应,以此类推。\n在设计 HTTP 解析时,我们应该遵循这个原则,将请求数据添加到列表中,并在解析响应时删除第一个项目。这可以确保响应按正确的顺序处理。\n图 2: HTTP/1.1 管道。\n指标 根据前文提到的 HTTP 内容和流程拓扑图的识别,我们可以将这两者结合起来生成进程间的指标数据。\n图 3 显示了目前支持两个进程间分析的指标。基于 HTTP 请求和响应数据,可以分析以下数据:\n   指标名称 类型 单位 描述     请求 CPM(Call Per Minute) 计数器 计数 HTTP 请求计数   响应状态 CPM (Call Per Minute) 计数器 计数 每个 HTTP 响应状态码的计数   请求包大小 计数器 / 直方图 字节 请求包大小   响应包大小 计数器 / 直方图 字节 响应包大小   客户端持续时间 计数器 / 直方图 毫秒 客户端单个 HTTP 响应的持续时间   服务器持续时间 计数器 / 直方图 毫秒 服务器端单个 HTTP 响应的持续时间    图 3:进程到进程指标。\nHTTP 和追踪 在 HTTP 过程中,如果我们能够从原始数据中解包 HTTP 请求和响应,就可以使用这些数据与现有的追踪系统进行关联。\n追踪上下文标识 为了追踪多个服务之间的请求流,追踪系统通常在请求进入服务时创建追踪上下文,并在请求 - 响应过程中将其传递给其他服务。例如,当 HTTP 请求发送到另一个服务器时,追踪上下文包含在请求头中。\n图 4 显示了 Wireshark 拦截的 HTTP 请求的原始内容。由 Zipkin Tracing 系统生成的追踪上下文信息可以通过头中的 “X-B3” 前缀进行标识。通过使用 eBPF 拦截 HTTP 头中的追踪上下文,可以将当前请求与追踪系统连接起来。\n图 4:Wireshark 中的 HTTP Header 视图。\nTrace 事件 我们已经将事件这个概念加入了追踪中。事件可以附加到跨度上,并包含起始和结束时间、标签和摘要,允许我们将任何所需的信息附加到追踪中。\n在执行 eBPF 网络分析时,可以根据请求 - 响应数据生成两个事件。图 5 说明了在带分析的情况下执行 HTTP 请求时发生的情况。追踪系统生成追踪上下文信息并将其发送到请求中。当服务在内核中执行时,我们可以通过与内核空间中的请求 - 响应数据和执行时间交互,为相应的追踪跨度生成事件。\n以前,我们只能观察用户空间的执行状态。现在,通过结合追踪和 eBPF 技术,我们还可以在内核空间获取更多关于当前追踪的信息,如果我们在追踪 SDK 和代理中执行类似的操作,将对目标服务的性能产生较小的影响。\n图 5:分析 HTTP 请求和响应的逻辑视图。\n抽样 该机制仅在满足特定条件时触发抽样。我们还提供了前 N 条追踪的列表,允许用户快速访问特定追踪的相关请求信息。为了帮助用户轻松识别和分析相关事件,我们提供了三种不同的抽样规则:\n 慢速追踪:当请求的响应时间超过指定阈值时触发抽样。 响应状态 [400,500):当响应状态代码大于或等于 400 且小于 500 时触发抽样。 响应状态 [500,600):当响应状态代码大于或等于 500 且小于 600 时触发抽样。  此外,我们认识到分析时可能并不需要所有请求或响应的原始数据。例如,当试图识别性能问题时,用户可能更感兴趣于请求数据,而在解决错误时,他们可能更感兴趣于响应数据。因此,我们还提供了请求或响应事件的配置选项,允许用户指定要抽样的数据类型。\n服务网格中的分析 SkyWalking Rover 项目已经实现了 HTTP 协议的分析和追踪关联。当在服务网格环境中运行时它们的表现如何?\n部署 图 6 演示了 SkyWalking 和 SkyWalking Rover 在服务网格环境中的部署方式。SkyWalking Rover 作为一个 DaemonSet 部署在每台服务所在的机器上,并与 SkyWalking 后端集群通信。它会自动识别机器上的服务并向 SkyWalking 后端集群报告元数据信息。当出现新的网络分析任务时,SkyWalking Rover 会感知该任务并对指定的进程进行分析,在最终将数据报告回 SkyWalking 后端服务之前,收集和聚合网络数据。\n图 6:服务网格中的 SkyWalking rover 部署拓扑。\n追踪系统 从版本 9.3.0 开始,SkyWalking 后端完全支持 Zipkin 服务器中的所有功能。因此,SkyWalking 后端可以收集来自 SkyWalking 和 Zipkin 协议的追踪。同样,SkyWalking Rover 可以在 SkyWalking 和 Zipkin 追踪系统中识别和分析追踪上下文。在接下来的两节中,网络分析结果将分别在 SkyWalking 和 Zipkin UI 中显示。\nSkyWalking 当 SkyWalking 执行网络分析时,与前文中的 TCP 指标类似,SkyWalking UI 会首先显示进程间的拓扑图。当打开代表进程间流量指标的线的仪表板时,您可以在 “HTTP/1.x” 选项卡中看到 HTTP 流量的指标,并在 “HTTP Requests” 选项卡中看到带追踪的抽样的 HTTP 请求。\n如图 7 所示,选项卡中有三个列表,每个列表对应事件抽样规则中的一个条件。每个列表显示符合预先规定条件的追踪。当您单击追踪列表中的一个项目时,就可以查看完整的追踪。\n图 7:Tracing 上下文中的采样 HTTP 请求。\n当您单击追踪列表中的一个项目时,就可以快速查看指定的追踪。在图 8 中,我们可以看到在当前的服务相关的跨度中,有一个带有数字的标签,表示与该追踪跨度相关的 HTTP 事件数。\n由于我们在服务网格环境中,每个服务都涉及与 Envoy 交互。因此,当前的跨度包括 Envoy 的请求和响应信息。此外,由于当前的服务有传入和传出的请求,因此相应的跨度中有事件。\n图 8:Tracing 详细信息中的事件。\n当单击跨度时,将显示跨度的详细信息。如果当前跨度中有事件,则相关事件信息将在时间轴上显示。如图 9 所示,当前跨度中一共有 6 个相关事件。每个事件代表一个 HTTP 请求 / 响应的数据样本。其中一个事件跨越多个时间范围,表示较长的系统调用时间。这可能是由于系统调用被阻塞,具体取决于不同语言中的 HTTP 请求的实现细节。这也可以帮助我们查询错误的可能原因。\n图 9:一个 Tracing 范围内的事件。\n最后,我们可以单击特定的事件查看它的完整信息。如图 10 所示,它显示了一个请求的抽样信息,包括从 HTTP 原始数据中的请求头中包含的 SkyWalking 追踪上下文协议。原始请求数据允许您快速重新请求以解决任何问题。\n图 10:事件的详细信息。\nZipkin Zipkin 是世界上广泛使用的分布式追踪系统。SkyWalking 可以作为替代服务器,提供高级功能。在这里,我们使用这种方式将功能无缝集成到 Zipkin 生态系统中。新事件也将被视为 Zipkin 的标签和注释的一种。\n为 Zipkin 跨度添加事件,需要执行以下操作:\n 将每个事件的开始时间和结束时间分别拆分为两个具有规范名称的注释。 将抽样的 HTTP 原始数据从事件添加到 Zipkin 跨度标签中,使用相同的事件名称用于相应的目的。  图 11 和图 12 显示了同一跨度中的注释和标签。在这些图中,我们可以看到跨度包含至少两个具有相同事件名称和序列后缀的事件(例如,图中的 “Start/Finished HTTP Request/Response Sampling-x”)。这两个事件均具有单独的时间戳,用于表示其在跨度内的相对时间。在标签中,对应事件的数据内容分别由事件名称和序列号表示。\n图 11:Zipkin span 注释中的事件时间戳。\n图 12:Zipkin span 标签中的事件原始数据。\n演示 在本节中,我们将演示如何在服务网格中执行网络分析,并完成指标收集和 HTTP 原始数据抽样。要进行操作,您需要一个运行中的 Kubernetes 环境。\n部署 SkyWalking Showcase SkyWalking Showcase 包含一套完整的示例服务,可以使用 SkyWalking 进行监控。有关详细信息,请参阅官方文档。\n在本演示中,我们只部署了服务、最新发布的 SkyWalking OAP 和 UI。\nexport SW_OAP_IMAGE=apache/skywalking-oap-server:9.3.0 export SW_UI_IMAGE=apache/skywalking-ui:9.3.0 export SW_ROVER_IMAGE=apache/skywalking-rover:0.4.0 export FEATURE_FLAGS=mesh-with-agent,single-node,elasticsearch,rover make deploy.kubernetes 部署完成后,运行下面的脚本启动 SkyWalking UI:http://localhost:8080/。\nkubectl port-forward svc/ui 8080:8080 --namespace default 启动网络分析任务 目前,我们可以通过单击服务网格面板中的 Data Plane 项和 Kubernetes 面板中的 Service 项来选择要监视的特定实例。\n在图 13 中,我们已在网络分析选项卡中选择了一个具有任务列表的实例。\n图 13:数据平面中的网络分析选项卡。\n当我们单击 “开始” 按钮时,如图 14 所示,我们需要为分析任务指定抽样规则。抽样规则由一个或多个规则组成,每个规则都由不同的 URI 正则表达式区分。当 HTTP 请求的 URI 与正则表达式匹配时,将使用该规则。如果 URI 正则表达式为空,则使用默认规则。使用多个规则可以帮助我们为不同的请求配置不同的抽样配置。\n每个规则都有三个参数来确定是否需要抽样:\n 最小请求持续时间(毫秒):响应时间超过指定时间的请求将被抽样。 在 400 和 499 之间的抽样响应状态代码:范围 [400-499) 中的所有状态代码将被抽样。 在 500 和 599 之间的抽样响应状态代码:范围 [500-599) 中的所有状态码将被抽样。  抽样配置完成后,我们就可以创建任务了。\n图 14:创建网络分析任务页面。\n完成 几秒钟后,你会看到页面的右侧出现进程拓扑结构。\n图 15:网络分析任务中的流程拓扑。\n当您单击进程之间的线时,您可以查看两个过程之间的数据,它被分为三个选项卡:\n TCP:显示与 TCP 相关的指标。 HTTP/1.x:显示 HTTP 1 协议中的指标。 HTTP 请求:显示已分析的请求,并根据抽样规则保存到列表中。  图 16:网络分析任务中的 TCP 指标。\n图 17:网络分析任务中的 HTTP/1.x 指标。\n图 18:网络分析任务中的 HTTP 采样请求。\n总结 在本文中,我们详细介绍了如何在网络分析中分析 7 层 HTTP/1.x 协议,以及如何将其与现有追踪系统相关联。这使我们能够将我们能够观察到的数据从用户空间扩展到内核空间数据。\n在未来,我们将进一步探究内核数据的分析,例如收集 TCP 包大小、传输频率、网卡等信息,并从另一个角度提升分布式追踪。\n其他资源  SkyWalking Github Repo › SkyWalking Rover Github Repo › SkyWalking Rover Documentation › Diagnose Service Mesh Network Performance with eBPF blog post \u0026gt; SkyWalking Profiling Documentation \u0026gt; SkyWalking Trace Context Propagation \u0026gt; Zipkin Trace Context Propagation \u0026gt; RFC - Hypertext Transfer Protocol – HTTP/1.1 \u0026gt;  ","excerpt":"背景 Apache SkyWalking 是一个开源应用性能管理系统,帮助用户收集和聚合日志、追踪、指标和事件,并在 UI 上显示。在上一篇文章中,我们介绍了如何使用 Apache …","ref":"/zh/ebpf-enhanced-http-observability-l7-metrics-and-tracing/","title":"使用 eBPF 提升 HTTP 可观测性 - L7 指标和追踪"},{"body":"SkyWalking Rust 0.5.0 is released. Go to downloads page to find release tars.\nWhat\u0026rsquo;s Changed  Add management support. by @jmjoy in https://github.com/apache/skywalking-rust/pull/48 Add missing_docs lint and supply documents. by @jmjoy in https://github.com/apache/skywalking-rust/pull/49 Add authentication and custom intercept support. by @jmjoy in https://github.com/apache/skywalking-rust/pull/50 Bump to 0.5.0. by @jmjoy in https://github.com/apache/skywalking-rust/pull/51  ","excerpt":"SkyWalking Rust 0.5.0 is released. Go to downloads page to find release tars.\nWhat\u0026rsquo;s Changed …","ref":"/events/release-apache-skywalking-rust-0-5-0/","title":"Release Apache SkyWalking Rust 0.5.0"},{"body":"SkyWalking Satellite 1.1.0 is released. Go to downloads page to find release tars.\nFeatures  Support transmit the OpenTelemetry Metrics protocol. Upgrade to GO 1.18. Add Docker images for arm64 architecture. Support transmit Span Attached Event protocol data. Support dotnet CLRMetric forward.  Bug Fixes  Fix the missing return data when receive metrics in batch mode. Fix CVE-2022-21698, CVE-2022-27664.  Issues and PR  All issues are here All and pull requests are here  ","excerpt":"SkyWalking Satellite 1.1.0 is released. Go to downloads page to find release tars.\nFeatures  Support …","ref":"/events/release-apache-skwaylking-satellite-1-1-0/","title":"Release Apache SkyWalking Satellite 1.1.0"},{"body":"Apache SkyWalking is an open-source APM for a distributed system, Apache Software Foundation top-level project.\nOn Jan. 3rd, 2023, we received reports about Aliyun Trace Analysis Service. It provides a cloud service compatible with SkyWalking trace APIs and agents.\nOn their product page, there is a best-practice document describing about their service is not SkyWalking OAP, but can work with SkyWalking agents to support SkyWalking\u0026rsquo;s In-Process(Trace) Profiling.\nBUT, they copied the whole page of SkyWalking\u0026rsquo;s profiling UI, including page layout, words, and profiling task setup. The only difference is the color schemes.\nSkyWalking UI Aliyun Trace Analysis UI on their document page  The UI visualization is a part of the copyright. Aliyun declared their backend is NOT a re-distribution of SkyWalking repeatedly on their website, and they never mentioned this page is actually copied from upstream.\nThis is a LICENSE issue, violating SkyWalking\u0026rsquo;s copyright and Apache 2.0 License. They don\u0026rsquo;t respect Apache Software Foundation and Apache SkyWalking\u0026rsquo;s IP and Branding.\n","excerpt":"Apache SkyWalking is an open-source APM for a distributed system, Apache Software Foundation …","ref":"/blog/2023-01-03-aliyun-copy-page/","title":"[License Issue] Aliyun(阿里云)'s trace analysis service copied SkyWalking's trace profiling page."},{"body":"","excerpt":"","ref":"/tags/license/","title":"License"},{"body":"SkyWalking Rover 0.4.0 is released. Go to downloads page to find release tars.\nFeatures  Enhancing the render context for the Kubernetes process. Simplify the logic of network protocol analysis. Upgrade Go library to 1.18, eBPF library to 0.9.3. Make the Profiling module compatible with more Linux systems. Support monitor HTTP/1.x in the NETWORK profiling.  Bug Fixes Documentation  Adding support version of Linux documentation.  Issues and PR  All issues are here All and pull requests are here  ","excerpt":"SkyWalking Rover 0.4.0 is released. Go to downloads page to find release tars.\nFeatures  Enhancing …","ref":"/events/release-apache-skwaylking-rover-0-4-0/","title":"Release Apache SkyWalking Rover 0.4.0"},{"body":"Observability for modern distributed applications work is critical for understanding how they behave under a variety of conditions and for troubleshooting and resolving issues when they arise. Traces, metrics, and logs are regarded as fundamental parts of the observability stack. Traces are the footprints of distributed system executions, meanwhile, metrics measure system performance with numbers in the timeline. Essentially, they measure the performance from two dimensions. Being able to quickly visualize the connection between traces and corresponding metrics makes it possible to quickly diagnose which process flows are correlated to potentially pathological behavior. This powerful new capability is now available in SkyWalking 9.3.0.\nThe SkyWalking project started only with tracing, with a focus on 100% sampling-based metrics and topology analysis since 2018. When users face anomaly trends of time-series metrics, like a peak on the line chart, or histogram shows a larger gap between p95 and p95, the immediate question is, why is this happening? One of SkyWalking\u0026rsquo;s latest features, the trace-metric association, makes it much easier to answer that question and to address the root cause.\nHow Are Metrics Generated? SkyWalking provides three ways to calculate metrics:\n Metrics built from trace spans, depending on the span’s layer, kind, and tags. Metrics extracted from logs—a kind of keyword and tags-based metrics extraction. Metrics reported from mature and mainstream metrics/meter systems, such as OpenTelemetry, Prometheus, and Zabbix.  Tracing tracks the processes of requests between an application\u0026rsquo;s services. Most systems that generate traffic and performance-related metrics also generate tracing data, either from server-side trace-based aggregations or through client SDKs.\nUse SkyWalking to Reduce the Traditional Cost of Trace Indexing Tracing data and visualization are critical troubleshooting tools for both developers and operators alike because of how helpful they are in locating issue boundaries. But, because it has traditionally been difficult to find associations between metrics and traces, teams have added increasingly more tags into the spans, and search through various combinations. This trend of increased instrumentation and searching has required increased infrastructure investment to support this kind of search. SkyWalking\u0026rsquo;s metrics and tracing association capabilities can help reduce the cost of indexing and searching that data.\nFind the Associated Trace When looking for association between metrics and traces, the kind of metrics we\u0026rsquo;re dealing with determines their relationships to traces. Let’s review the standard request rate, error, and duration (RED) metrics to see how it works.\nSuccess Rate Metrics The success rate is determined by the return code, RPC response code, or exceptions of the process. When the success rate decreases, looking for errors in the traces of this service or pod are the first place to look to find clues.\nFigure 1: The success rate graph from SkyWalking\u0026rsquo;s 9.3.0 dashboard with the option to view related traces at a particular time.\nDrilling down from the peak of the success rate, SkyWalking lists all traces and their error status that were collected in this particular minute (Figure 2):\nFigure 2: SkyWalking shows related traces with an error status.\nRequests to /test can be located from the trace, and the span’s tag indicates a 404 response code of the HTTP request.\nFigure 3: A detail view of a request to http://frontend/test showing that the URI doesn\u0026rsquo;t exist.\nBy looking at the trace data, it becomes immediately clear that the drop in success rate is caused by requests to a nonexistent URI.\nAverage Response Time The average response time metric provides a general overview of service performance. When average response time is unstable, this usually means that the system is facing serious performance impacts.\nFigure 4: SkyWalking\u0026rsquo;s query UI for searching for related traces showing traces for requests that exceed a particular duration threshold.\nWhen you drill down from this metric, this query condition (Figure 4) will reveal the slowest traces of the service in this specific minute. Notice, at least 168ms is added as a condition automatically, to avoid scanning a large number of rows in the Database.\nApdex Apdex—the Application Performance Index—is a measure of response time based against a set threshold. It measures the ratio of satisfactory response times to unsatisfactory response times (Figure 5). The response time is measured from an asset request to completed delivery back to the requestor.\nFigure 5: The Apdex formula\nA user defines a response time tolerating threshold T. All responses handled in T or less time satisfy the user.\nFor example, if T is 1.2 seconds and a response completes in 0.5 seconds, then the user is satisfied. All responses greater than 1.2 seconds dissatisfy the user. Responses greater than 4.8 seconds frustrate the user.\nWhen the Apdex score decreases, we need to find related traces from two perspectives: slow traces and error status traces. SkyWalking\u0026rsquo;s new related tracing features offers a quick way to view both (Figure 6) directly from the Apdex graph.\nFigure 6: Show slow trace and error status traces from the Apdex graph\nService Response Time Percentile MetricThe percentile graph (Figure 7) provides p50, p75, p90, p95, and p99 latency ranks to measure the long-tail issues of service performance.\nFigure 7: The service response time percentile graph helps to highlight long-tail issues of service performance.\nThis percentile graph shows a typical long-tail issue. P99 latency is four times slower than the P95. When we use the association, we see the traces with latency between P95 - P99 and P99 - Infinity.\nThe traces of requests causing this kind of long-tail phenomena are automatically listing from there.\nFigure 8: Query parameters to search for traces based on latency.\nAre More Associations Available? SkyWalking provides more than just associations between between traces and metrics to help you find possible causal relationships and to avoid looking for the proverbial needle in a haystack.\nCurrently, SkyWalking 9.3.0 offers two more associations: metric-to-metric associations and event-to-metric associations.\nMetric-to-metric Associations There are dozens of metrics on the dashboard—which is great for getting a complete picture of application behavior. During a typical performance issue, the peaks of multiple metrics are affected simultaneously. But, trying to correlate peaks across all of these graphs can be difficult\u0026hellip;\nNow in SkyWalking 9.3.0, when you click the peak of one graph, the pop-out box lets you see associated metrics.\nFigure 9: SkyWalking\u0026rsquo;s option to view associated metrics.\nWhen you choose that option, all associated metrics graphs will show axis pointers (the dotted vertical lines) in all associated graphs like in Figure 10. This makes it easier to correlate the peaks in different graphs with each other. Often, these correlated peaks with have the same root cause.\nFigure 10: Axis pointers (vertical dotted lines) show associations between peaks across multiple metrics graphs.\nEvent-to-Metric Associations SkyWalking provides the event concept to associate possible service performance impacted by the infrastructure, such as new deployment even from k8s. Or, the anomaly had been detected by alerting or integrated AIOps engine.\nThe event to metrics association is also automatically, it could cover the time range of the event on the metric graphs(blue areas). If the area of event and peaks are matched, most likely this event covered this anomaly.\nFigure 11: SkyWalking\u0026rsquo;s event to metric association view.\nSkyWalking Makes it Easier and Faster to Find Root Causes SkyWalking now makes it easy to find associations between metrics, events, and traces, ultimately making it possible to identify root causes and fix problems fast. The associations we\u0026rsquo;ve discussed in this article are available out-of-box in the SkyWalking 9.3.0 release.\nFigure 12: Just click on the dots to see related traces and metrics associations.\nClick the dots on any metric graph, and you will see a View Related Traces item pop-out if this metric has logical mapping traces.\nConclusion In this blog, we took a look at the newly-added association feature between metrics and traces. With this new visualization, it\u0026rsquo;s now much easier to find key traces to identify root cause of issues.Associations in SkyWalking can go even deeper. Associations from metrics to traces is not the end of diagnosing system bottleneck. In the next post, we will introduce an eBPF powered trace enhancement where you’ll be able to see HTTP request and response details associated with tracing spans from network profiling. Stay tuned.\n","excerpt":"Observability for modern distributed applications work is critical for understanding how they behave …","ref":"/blog/boost-root-cause-analysis-quickly-with-skywalking-new-trace-metrics-association-feature/","title":"Boost Root Cause Analysis Quickly With SkyWalking’s New Trace-Metrics Association Feature"},{"body":"现代分布式应用程序工作的可观测性对于了解它们在各种条件下的行为方式以及在出现问题时进行故障排除和解决至关重要。追踪、指标和日志被视为可观测性堆栈的基本部分。Trace 是分布式系统执行的足迹,而 metric 则是用时间轴上的数字衡量系统性能。本质上,它们从两个维度衡量性能。能够快速可视化追踪和相应指标之间的联系,可以快速诊断哪些流程与潜在的异常相关。SkyWalking 9.3.0 现在提供了这一强大的新功能。\nSkyWalking 项目从 tracing 开始,从 2018 年开始专注于 100% 基于采样的指标和拓扑分析。当用户面对时间序列指标的异常趋势时,比如折线图上的峰值,或者直方图显示 p95 和 p95 之间的差距较大,直接的问题是,为什么会出现这种情况?SkyWalking 的最新功能之一,trace 与 metric 关联,使得回答这个问题和解决根本原因更加容易。\n指标是如何生成的? SkyWalking 提供了三种计算指标的方式:\n 根据追踪跨度构建的指标,具体取决于跨度的层、种类和标签。 从日志中提取指标—— 一种基于关键词和标签的指标提取。 从成熟和主流的指标 / 仪表系统报告的指标,例如 OpenTelemetry、Prometheus 和 Zabbix。  Tracing 追踪应用程序服务之间的请求过程。大多数生成流量和性能相关指标的系统也会生成追踪数据,这些数据来自服务器端基于追踪的聚合或通过客户端 SDK。\n使用 SkyWalking 降低追踪索引的传统成本 Trace 数据和可视化对于开发人员和运维人员来说都是至关重要的故障排除工具,因为它们在定位问题边界方面非常有帮助。但是,由于传统上很难找到指标和痕迹之间的关联,团队已经将越来越多的标签添加到跨度中,并搜索各种组合。这种增加仪器和搜索的趋势需要增加基础设施投资来支持这种搜索。SkyWalking 的指标和追踪关联功能有助于降低索引和搜索该数据的成本。\n查找关联的 trace 在寻找 metric 和 trace 之间的关联时,我们处理的指标类型决定了它们与 trace 的关系。让我们回顾一下标准请求*率、错误和持续时间(RED)*指标,看看它是如何工作的。\n成功率指标 成功率由返回码、RPC 响应码或进程异常决定。当成功率下降时,在这个服务或 Pod 的 trace 中寻找错误是第一个寻找线索的地方。\n图 1:SkyWalking 9.3.0 仪表板的成功率图表,带有在特定时间查看相关 trace 的选项。\n从成功率的峰值向下探索,SkyWalking 列出了在这一特定分钟内收集的所有 trace 及其错误状态(图 2):\n图 2:SkyWalking 显示具有错误状态的相关追踪。\n可以从 trace 中找到对 /test 的请求,并且 span 的标记指示 HTTP 请求的 404 响应代码。\n图 3:显示 URI 不存在的 http://frontend/test 请求的详细视图。\n通过查看 trace 数据,很明显成功率的下降是由对不存在的 URI 的请求引起的。\n平均响应时间 平均响应时间指标提供了服务性能的一般概览。当平均响应时间不稳定时,这通常意味着系统面临严重的性能影响。\n图 4:SkyWalking 用于搜索相关 trace 的查询 UI,显示超过特定持续时间阈值的请求的 trace。\n当您从该指标向下探索时,该查询条件(图 4)将揭示该特定分钟内服务的最慢 trace。请注意,至少 168ms 作为条件自动添加,以避免扫描数据库中的大量行。\nApdex Apdex(应用程序性能指数)是根据设定的阈值衡量响应时间的指标。它测量令人满意的响应时间与不令人满意的响应时间的比率(图 5)。响应时间是从资产请求到完成交付回请求者的时间。\n图 5:Apdex 公式\n用户定义响应时间容忍阈值 T。在 T 或更短时间内处理的所有响应都使用户满意。\n例如,如果 T 为 1.2 秒,响应在 0.5 秒内完成,则用户会感到满意。所有大于 1.2 秒的响应都会让用户不满意。超过 4.8 秒的响应会让用户感到沮丧。\n当 Apdex 分数下降时,我们需要从两个角度寻找相关的 trace:慢速和错误状态的 trace。SkyWalking 的新相关追踪功能提供了一种直接从 Apdex 图表查看两者(图 6)的快速方法。\n图 6:显示 Apdex 图中的慢速 trace 和错误状态 trace\n服务响应时间 百分位指标百分位图(图 7)提供 p50、p75、p90、p95 和 p99 延迟排名,以衡量服务性能的长尾问题。\n图 7:服务响应时间百分位图有助于突出服务性能的长尾问题。\n这个百分位数图显示了一个典型的长尾问题。P99 延迟比 P95 慢四倍。当我们使用关联时,我们会看到 P95 - P99 和 P99 - Infinity 之间具有延迟的 trace。\n造成这种长尾现象的请求 trace,就是从那里自动列出来的。\n图 8:用于根据延迟搜索 trace 的查询参数。\n是否有更多关联可用? SkyWalking 提供的不仅仅是 trace 和 metric 之间的关联,还可以帮助您找到可能的因果关系,避免大海捞针。\n目前,SkyWalking 9.3.0 提供了两种关联:metric-to-metric 关联和 event-to-metric 关联。\nMetric-to-metric 关联 仪表板上有许多指标 —— 这对于全面了解应用程序行为非常有用。在典型的性能问题中,多个指标的峰值会同时受到影响。但是,尝试关联所有这些图表中的峰值可能很困难……\n现在在 SkyWalking 9.3.0 中,当你点击一个图表的峰值时,弹出框可以让你看到相关的指标。\n图 9:SkyWalking 用于查看相关指标的选项。\n当您选择该选项时,所有关联的指标图表将在所有关联的图表中显示轴指针(垂直虚线),如图 10 所示。这使得将不同图表中的峰值相互关联起来变得更加容易。通常,这些相关的峰值具有相同的根本原因。\n图 10:轴指针(垂直虚线)显示多个指标图中峰值之间的关联。\nEvent-to-metric 关联 SkyWalking 提供了事件概念来关联可能受基础设施影响的服务性能,例如来自 Kubernetes 的新部署。或者,已通过警报或集成 AIOps 引擎检测到异常。\n事件到指标的关联也是自动的,它可以覆盖指标图上事件的时间范围(蓝色区域)。如果事件区域和峰值匹配,则很可能该事件覆盖了该异常。\n图 11:SkyWalking 的事件与指标关联视图。\nSkyWalking 使查找根本原因变得更加容易和快速 SkyWalking 现在可以轻松找到指标、事件和追踪之间的关联,最终可以确定根本原因并快速解决问题。我们在本文中讨论的关联在 SkyWalking 9.3.0 版本中开箱即用。\n图 12:只需单击圆点即可查看相关 trace 和 metric 关联。\n单击任何指标图上的点,如果该指标具有逻辑映射,您将看到一个查看相关 trace 弹出窗口。\n结论 在这篇博客中,我们了解了 metric 和 trace 之间新增的关联功能。有了这个新的可视化,现在可以更容易地找到关键 trace 来识别问题的根本原因。SkyWalking 中的关联可以更深入。从 metric 到 trace 的关联并不是诊断系统瓶颈的终点。在下一篇文章中,我们将介绍 eBPF 支持的追踪增强功能,您将看到与网络分析中的追踪跨度相关的 HTTP 请求和响应详细信息。敬请关注。\n","excerpt":"现代分布式应用程序工作的可观测性对于了解它们在各种条件下的行为方式以及在出现问题时进行故障排除和解决至关重要。追踪、指标和日志被视为可观测性堆栈的基本部分。Trace 是分布式系统执行的足迹, …","ref":"/zh/boost-root-cause-analysis-quickly-with-skywalking-new-trace-metrics-association-feature/","title":"SkyWalking 推出 trace-metric 关联功能助力快速根源问题排查"},{"body":"In cloud native applications, a request often needs to be processed through a series of APIs or backend services, some of which are parallel and some serial and located on different platforms or nodes. How do we determine the service paths and nodes a call goes through to help us troubleshoot the problem? This is where distributed tracing comes into play.\nThis article covers:\n How distributed tracing works How to choose distributed tracing software How to use distributed tracing in Istio How to view distributed tracing data using Bookinfo and SkyWalking as examples  Distributed Tracing Basics Distributed tracing is a method for tracing requests in a distributed system to help users better understand, control, and optimize distributed systems. There are two concepts used in distributed tracing: TraceID and SpanID. You can see them in Figure 1 below.\n TraceID is a globally unique ID that identifies the trace information of a request. All traces of a request belong to the same TraceID, and the TraceID remains constant throughout the trace of the request. SpanID is a locally unique ID that identifies a request’s trace information at a certain time. A request generates different SpanIDs at different periods, and SpanIDs are used to distinguish trace information for a request at different periods.  TraceID and SpanID are the basis of distributed tracing. They provide a uniform identifier for request tracing in distributed systems and facilitate users’ ability to query, manage, and analyze the trace information of requests.\nFigure 1: Trace and span\nThe following is the process of distributed tracing:\n When a system receives a request, the distributed tracing system assigns a TraceID to the request, which is used to chain together the entire chain of invocations. The distributed trace system generates a SpanID and ParentID for each service call within the system for the request, which is used to record the parent-child relationship of the call; a Span without a ParentID is used as the entry point of the call chain. TraceID and SpanID are to be passed during each service call. When viewing a distributed trace, query the full process of a particular request by TraceID.  How Istio Implements Distributed Tracing Istio’s distributed tracing is based on information collected by the Envoy proxy in the data plane. After a service request is intercepted by Envoy, Envoy adds tracing information as headers to the request forwarded to the destination workload. The following headers are relevant for distributed tracing:\n As TraceID: x-request-id Used to establish parent-child relationships for Span in the LightStep trace: x-ot-span-context\u0026lt;/li Used for Zipkin, also for Jaeger, SkyWalking, see b3-propagation:  x-b3-traceid x-b3-traceid x-b3-spanid x-b3-parentspanid x-b3-sampled x-b3-flags b3   For Datadog:  x-datadog-trace-id x-datadog-parent-id x-datadog-sampling-priority   For SkyWalking: sw8 For AWS X-Ray: x-amzn-trace-id  For more information on how to use these headers, please see the Envoy documentation.\nRegardless of the language of your application, Envoy will generate the appropriate tracing headers for you at the Ingress Gateway and forward these headers to the upstream cluster. However, in order to utilize the distributed tracing feature, you must modify your application code to attach the tracing headers to upstream requests. Since neither the service mesh nor the application can automatically propagate these headers, you can integrate the agent for distributed tracing into the application or manually propagate these headers in the application code itself. Once the tracing headers are propagated to all upstream requests, Envoy will send the tracing data to the tracer’s back-end processing, and then you can view the tracing data in the UI.\nFor example, look at the code of the Productpage service in the Bookinfo application. You can see that it integrates the Jaeger client library and synchronizes the header generated by Envoy with the HTTP requests to the Details and Reviews services in the getForwardHeaders (request) function.\ndef getForwardHeaders(request): headers = {} # Using Jaeger agent to get the x-b3-* headers span = get_current_span() carrier = {} tracer.inject( span_context=span.context, format=Format.HTTP_HEADERS, carrier=carrier) headers.update(carrier) # Dealing with the non x-b3-* header manually if \u0026#39;user\u0026#39; in session: headers[\u0026#39;end-user\u0026#39;] = session[\u0026#39;user\u0026#39;] incoming_headers = [ \u0026#39;x-request-id\u0026#39;, \u0026#39;x-ot-span-context\u0026#39;, \u0026#39;x-datadog-trace-id\u0026#39;, \u0026#39;x-datadog-parent-id\u0026#39;, \u0026#39;x-datadog-sampling-priority\u0026#39;, \u0026#39;traceparent\u0026#39;, \u0026#39;tracestate\u0026#39;, \u0026#39;x-cloud-trace-context\u0026#39;, \u0026#39;grpc-trace-bin\u0026#39;, \u0026#39;sw8\u0026#39;, \u0026#39;user-agent\u0026#39;, \u0026#39;cookie\u0026#39;, \u0026#39;authorization\u0026#39;, \u0026#39;jwt\u0026#39;, ] for ihdr in incoming_headers: val = request.headers.get(ihdr) if val is not None: headers[ihdr] = val return headers For more information, the Istio documentation provides answers to frequently asked questions about distributed tracing in Istio.\nHow to Choose A Distributed Tracing System Distributed tracing systems are similar in principle. There are many such systems on the market, such as Apache SkyWalking, Jaeger, Zipkin, Lightstep, Pinpoint, and so on. For our purposes here, we will choose three of them and compare them in several dimensions. Here are our inclusion criteria:\n They are currently the most popular open-source distributed tracing systems. All are based on the OpenTracing specification. They support integration with Istio and Envoy.     Items Apache SkyWalking Jaeger Zipkin     Implementations Language-based probes, service mesh probes, eBPF agent, third-party instrumental libraries (Zipkin currently supported) Language-based probes Language-based probes   Database ES, H2, MySQL, TiDB, Sharding-sphere, BanyanDB ES, MySQL, Cassandra, Memory ES, MySQL, Cassandra, Memory   Supported Languages Java, Rust, PHP, NodeJS, Go, Python, C++, .Net, Lua Java, Go, Python, NodeJS, C#, PHP, Ruby, C++ Java, Go, Python, NodeJS, C#, PHP, Ruby, C++   Initiator Personal Uber Twitter   Governance Apache Foundation CNCF CNCF   Version 9.3.0 1.39.0 2.23.19   Stars 20.9k 16.8k 15.8k    Although Apache SkyWalking’s agent does not support as many languages as Jaeger and Zipkin, SkyWalking’s implementation is richer and compatible with Jaeger and Zipkin trace data, and development is more active, so it is one of the best choices for building a telemetry platform.\nDemo Refer to the Istio documentation to install and configure Apache SkyWalking.\nEnvironment Description The following is the environment for our demo:\n Kubernetes 1.24.5 Istio 1.16 SkyWalking 9.1.0  Install Istio Before installing Istio, you can check the environment for any problems:\n$ istioctl experimental precheck ✔ No issues found when checking the cluster. Istio is safe to install or upgrade! To get started, check out https://istio.io/latest/docs/setup/getting-started/ Then install Istio and configure the destination for sending tracing messages as SkyWalking:\n# Initial Istio Operator istioctl operator init # Configure tracing destination kubectl apply -f - \u0026lt;\u0026lt;EOF apiVersion: install.istio.io/v1alpha1 kind: IstioOperator metadata: namespace: istio-system name: istio-with-skywalking spec: meshConfig: defaultProviders: tracing: - \u0026#34;skywalking\u0026#34; enableTracing: true extensionProviders: - name: \u0026#34;skywalking\u0026#34; skywalking: service: tracing.istio-system.svc.cluster.local port: 11800 EOF Deploy Apache SkyWalking Istio 1.16 supports distributed tracing using Apache SkyWalking. Install SkyWalking by executing the following code:\nkubectl apply -f https://raw.githubusercontent.com/istio/istio/release-1.16/samples/addons/extras/skywalking.yaml It will install the following components under the istio-system namespace:\n SkyWalking Observability Analysis Platform (OAP): Used to receive trace data, supports SkyWalking native data formats, Zipkin v1 and v2 and Jaeger format. UI: Used to query distributed trace data.  For more information about SkyWalking, please refer to the SkyWalking documentation.\nDeploy the Bookinfo Application Execute the following command to install the bookinfo application:\nkubectl label namespace default istio-injection=enabled kubectl apply -f samples/bookinfo/platform/kube/bookinfo.yaml kubectl apply -f samples/bookinfo/networking/bookinfo-gateway.yaml Launch the SkyWalking UI:\nistioctl dashboard skywalking Figure 2 shows all the services available in the bookinfo application:\nFigure 2: SkyWalking General Service page\nYou can also see information about instances, endpoints, topology, tracing, etc. For example, Figure 3 shows the service topology of the bookinfo application:\nFigure 3: Topology diagram of the Bookinfo application\nTracing views in SkyWalking can be displayed in a variety of formats, including list, tree, table, and statistics. See Figure 4:\nFigure 4: SkyWalking General Service trace supports multiple display formats\nTo facilitate our examination, set the sampling rate of the trace to 100%:\nkubectl apply -f - \u0026lt;\u0026lt;EOF apiVersion: telemetry.istio.io/v1alpha1 kind: Telemetry metadata: name: mesh-default namespace: istio-system spec: tracing: - randomSamplingPercentage: 100.00 EOF  Important: It’s generally not good practice to set the sampling rate to 100% in a production environment. To avoid the overhead of generating too many trace logs in production, please adjust the sampling strategy (sampling percentage).\n Uninstall After experimenting, uninstall Istio and SkyWalking by executing the following command.\nsamples/bookinfo/platform/kube/cleanup.sh istioctl unintall --purge kubectl delete namespace istio-system Understanding the Bookinfo Tracing Information Navigate to the General Service tab in the Apache SkyWalking UI, and you can see the trace information for the most recent istio-ingressgateway service, as shown in Figure 5. Click on each span to see the details.\nFigure 5: The table view shows the basic information about each span.\nSwitching to the list view, you can see the execution order and duration of each span, as shown in Figure 6:\nFigure 6: List display\nYou might want to know why such a straightforward application generates so much span data. Because after we inject the Envoy proxy into the pod, every request between services will be intercepted and processed by Envoy, as shown in Figure 7:\nFigure 7: Envoy intercepts requests to generate a span\nThe tracing process is shown in Figure 8:\nFigure 8: Trace of the Bookinfo application\nWe give each span a label with a serial number, and the time taken is indicated in parentheses. For illustration purposes, we have summarized all spans in the table below.\n   No. Endpoint Total Duration (ms) Component Duration (ms) Current Service Description     1 /productpage 190 0 istio-ingressgateway Envoy Outbound   2 /productpage 190 1 istio-ingressgateway Ingress -\u0026gt; Productpage network transmission   3 /productpage 189 1 productpage Envoy Inbound   4 /productpage 188 21 productpage Application internal processing   5 /details/0 8 1 productpage Envoy Outbound   6 /details/0 7 3 productpage Productpage -\u0026gt; Details network transmission   7 /details/0 4 0 details Envoy Inbound   8 /details/0 4 4 details Application internal processing   9 /reviews/0 159 0 productpage Envoy Outbound   10 /reviews/0 159 14 productpage Productpage -\u0026gt; Reviews network transmission   11 /reviews/0 145 1 reviews Envoy Inbound   12 /reviews/0 144 109 reviews Application internal processing   13 /ratings/0 35 2 reviews Envoy Outbound   14 /ratings/0 33 16 reviews Reviews -\u0026gt; Ratings network transmission   15 /ratings/0 17 1 ratings Envoy Inbound   16 /ratings/0 16 16 ratings Application internal processing    From the above information, it can be seen that:\n The total time consumed for this request is 190 ms. In Istio sidecar mode, each traffic flow in and out of the application container must pass through the Envoy proxy once, each time taking 0 to 2 ms. Network requests between Pods take between 1 and 16ms. This is because the data itself has errors and the start time of the Span is not necessarily equal to the end time of the parent Span. We can see that the most time-consuming part is the Reviews application, which takes 109 ms so that we can optimize it for that application.  Summary Distributed tracing is an indispensable tool for analyzing performance and troubleshooting modern distributed applications. In this tutorial, we’ve seen how, with just a few minor changes to your application code to propagate tracing headers, Istio makes distributed tracing simple to use. We’ve also reviewed Apache SkyWalking as one of the best distributed tracing systems that Istio supports. It is a fully functional platform for cloud native application analytics, with features such as metrics and log collection, alerting, Kubernetes monitoring, service mesh performance diagnosis using eBPF, and more.\n If you’re new to service mesh and Kubernetes security, we have a bunch of free online courses available at Tetrate Academy that will quickly get you up to speed with Istio and Envoy.\nIf you’re looking for a fast way to get to production with Istio, check out Tetrate Istio Distribution (TID). TID is Tetrate’s hardened, fully upstream Istio distribution, with FIPS-verified builds and support available. It’s a great way to get started with Istio knowing you have a trusted distribution to begin with, have an expert team supporting you, and also have the option to get to FIPS compliance quickly if you need to.\nOnce you have Istio up and running, you will probably need simpler ways to manage and secure your services beyond what’s available in Istio, that’s where Tetrate Service Bridge comes in. You can learn more about how Tetrate Service Bridge makes service mesh more secure, manageable, and resilient here, or contact us for a quick demo.\n","excerpt":"In cloud native applications, a request often needs to be processed through a series of APIs or …","ref":"/blog/how-to-use-skywalking-for-distributed-tracing-in-istio/","title":"How to Use SkyWalking for Distributed Tracing in Istio?"},{"body":"","excerpt":"","ref":"/tags/istio/","title":"Istio"},{"body":"","excerpt":"","ref":"/tags/service-mesh/","title":"Service Mesh"},{"body":"在云原生应用中,一次请求往往需要经过一系列的 API 或后台服务处理才能完成,这些服务有些是并行的,有些是串行的,而且位于不同的平台或节点。那么如何确定一次调用的经过的服务路径和节点以帮助我们进行问题排查?这时候就需要使用到分布式追踪。\n本文将向你介绍:\n 分布式追踪的原理 如何选择分布式追踪软件 在 Istio 中如何使用分布式追踪 以 Bookinfo 和 SkyWalking 为例说明如何查看分布式追踪数据  分布式追踪基础 分布式追踪是一种用来跟踪分布式系统中请求的方法,它可以帮助用户更好地理解、控制和优化分布式系统。分布式追踪中用到了两个概念:TraceID 和 SpanID。\n TraceID 是一个全局唯一的 ID,用来标识一个请求的追踪信息。一个请求的所有追踪信息都属于同一个 TraceID,TraceID 在整个请求的追踪过程中都是不变的; SpanID 是一个局部唯一的 ID,用来标识一个请求在某一时刻的追踪信息。一个请求在不同的时间段会产生不同的 SpanID,SpanID 用来区分一个请求在不同时间段的追踪信息;  TraceID 和 SpanID 是分布式追踪的基础,它们为分布式系统中请求的追踪提供了一个统一的标识,方便用户查询、管理和分析请求的追踪信息。\n下面是分布式追踪的过程:\n 当一个系统收到请求后,分布式追踪系统会为该请求分配一个 TraceID,用于串联起整个调用链; 分布式追踪系统会为该请求在系统内的每一次服务调用生成一个 SpanID 和 ParentID,用于记录调用的父子关系,没有 ParentID 的 Span 将作为调用链的入口; 每个服务调用过程中都要传递 TraceID 和 SpanID; 在查看分布式追踪时,通过 TraceID 查询某次请求的全过程;  Istio 如何实现分布式追踪 Istio 中的分布式追踪是基于数据平面中的 Envoy 代理实现的。服务请求在被劫持到 Envoy 中后,Envoy 在转发请求时会附加大量 Header,其中与分布式追踪相关的有:\n 作为 TraceID:x-request-id 用于在 LightStep 追踪系统中建立 Span 的父子关系:x-ot-span-context 用于 Zipkin,同时适用于 Jaeger、SkyWalking,详见 b3-propagation:  x-b3-traceid x-b3-spanid x-b3-parentspanid x-b3-sampled x-b3-flags b3   用于 Datadog:  x-datadog-trace-id x-datadog-parent-id x-datadog-sampling-priority   用于 SkyWalking:sw8 用于 AWS X-Ray:x-amzn-trace-id  关于这些 Header 的详细用法请参考 Envoy 文档 。\nEnvoy 会在 Ingress Gateway 中为你产生用于追踪的 Header,不论你的应用程序使用何种语言开发,Envoy 都会将这些 Header 转发到上游集群。但是,你还要对应用程序代码做一些小的修改,才能为使用分布式追踪功能。这是因为应用程序无法自动传播这些 Header,可以在程序中集成分布式追踪的 Agent,或者在代码中手动传播这些 Header。Envoy 会将追踪数据发送到 tracer 后端处理,然后就可以在 UI 中查看追踪数据了。\n例如在 Bookinfo 应用中的 Productpage 服务,如果你查看它的代码可以发现,其中集成了 Jaeger 客户端库,并在 getForwardHeaders (request) 方法中将 Envoy 生成的 Header 同步给对 Details 和 Reviews 服务的 HTTP 请求:\ndef getForwardHeaders(request): headers = {} # 使用 Jaeger agent 获取 x-b3-* header span = get_current_span() carrier = {} tracer.inject( span_context=span.context, format=Format.HTTP_HEADERS, carrier=carrier) headers.update(carrier) # 手动处理非 x-b3-* header if \u0026#39;user\u0026#39; in session: headers[\u0026#39;end-user\u0026#39;] = session[\u0026#39;user\u0026#39;] incoming_headers = [ \u0026#39;x-request-id\u0026#39;, \u0026#39;x-ot-span-context\u0026#39;, \u0026#39;x-datadog-trace-id\u0026#39;, \u0026#39;x-datadog-parent-id\u0026#39;, \u0026#39;x-datadog-sampling-priority\u0026#39;, \u0026#39;traceparent\u0026#39;, \u0026#39;tracestate\u0026#39;, \u0026#39;x-cloud-trace-context\u0026#39;, \u0026#39;grpc-trace-bin\u0026#39;, \u0026#39;sw8\u0026#39;, \u0026#39;user-agent\u0026#39;, \u0026#39;cookie\u0026#39;, \u0026#39;authorization\u0026#39;, \u0026#39;jwt\u0026#39;, ] for ihdr in incoming_headers: val = request.headers.get(ihdr) if val is not None: headers[ihdr] = val return headers 关于 Istio 中分布式追踪的常见问题请见 Istio 文档 。\n分布式追踪系统如何选择 分布式追踪系统的原理类似,市面上也有很多这样的系统,例如 Apache SkyWalking 、Jaeger 、Zipkin 、LightStep 、Pinpoint 等。我们将选择其中三个,从多个维度进行对比。之所以选择它们是因为:\n 它们是当前最流行的开源分布式追踪系统; 都是基于 OpenTracing 规范; 都支持与 Istio 及 Envoy 集成;     类别 Apache SkyWalking Jaeger Zipkin     实现方式 基于语言的探针、服务网格探针、eBPF agent、第三方指标库(当前支持 Zipkin) 基于语言的探针 基于语言的探针   数据存储 ES、H2、MySQL、TiDB、Sharding-sphere、BanyanDB ES、MySQL、Cassandra、内存 ES、MySQL、Cassandra、内存   支持语言 Java、Rust、PHP、NodeJS、Go、Python、C++、.NET、Lua Java、Go、Python、NodeJS、C#、PHP、Ruby、C++ Java、Go、Python、NodeJS、C#、PHP、Ruby、C++   发起者 个人 Uber Twitter   治理方式 Apache Foundation CNCF CNCF   版本 9.3.0 1.39.0 2.23.19   Star 数量 20.9k 16.8k 15.8k    分布式追踪系统对比表(数据截止时间 2022-12-07)\n虽然 Apache SkyWalking 的 Agent 支持的语言没有 Jaeger 和 Zipkin 多,但是 SkyWalking 的实现方式更丰富,并且与 Jaeger、Zipkin 的追踪数据兼容,开发更为活跃,且为国人开发,中文资料丰富,是构建遥测平台的最佳选择之一。\n实验 参考 Istio 文档 来安装和配置 Apache SkyWalking。\n环境说明 以下是我们实验的环境:\n Kubernetes 1.24.5 Istio 1.16 SkyWalking 9.1.0  安装 Istio 安装之前可以先检查下环境是否有问题:\n$ istioctl experimental precheck ✔ No issues found when checking the cluster. Istio is safe to install or upgrade! To get started, check out https://istio.io/latest/docs/setup/getting-started/ 然后安装 Istio 同时配置发送追踪信息的目的地为 SkyWalking:\n# 初始化 Istio Operator istioctl operator init # 安装 Istio 并配置使用 SkyWalking kubectl apply -f - \u0026lt;\u0026lt;EOF apiVersion: install.istio.io/v1alpha1 kind: IstioOperator metadata: namespace: istio-system name: istio-with-skywalking spec: meshConfig: defaultProviders: tracing: - \u0026#34;skywalking\u0026#34; enableTracing: true extensionProviders: - name: \u0026#34;skywalking\u0026#34; skywalking: service: tracing.istio-system.svc.cluster.local port: 11800 EOF 部署 Apache SkyWalking Istio 1.16 支持使用 Apache SkyWalking 进行分布式追踪,执行下面的代码安装 SkyWalking:\nkubectl apply -f https://raw.githubusercontent.com/istio/istio/release-1.16/samples/addons/extras/skywalking.yaml 它将在 istio-system 命名空间下安装:\n SkyWalking OAP (Observability Analysis Platform) :用于接收追踪数据,支持 SkyWalking 原生数据格式,Zipkin v1 和 v2 以及 Jaeger 格式。 UI :用于查询分布式追踪数据。  关于 SkyWalking 的详细信息请参考 SkyWalking 文档 。\n部署 Bookinfo 应用 执行下面的命令安装 bookinfo 示例:\nkubectl label namespace default istio-injection=enabled kubectl apply -f samples/bookinfo/platform/kube/bookinfo.yaml kubectl apply -f samples/bookinfo/networking/bookinfo-gateway.yaml 打开 SkyWalking UI:\nistioctl dashboard skywalking SkyWalking 的 General Service 页面展示了 bookinfo 应用中的所有服务。\n你还可以看到实例、端点、拓扑、追踪等信息。例如下图展示了 bookinfo 应用的服务拓扑。\nSkyWalking 的追踪视图有多种显示形式,如列表、树形、表格和统计。\nSkyWalking 通用服务追踪支持多种显示样式\n为了方便我们检查,将追踪的采样率设置为 100%:\nkubectl apply -f - \u0026lt;\u0026lt;EOF apiVersion: telemetry.istio.io/v1alpha1 kind: Telemetry metadata: name: mesh-default namespace: istio-system spec: tracing: - randomSamplingPercentage: 100.00 EOF 卸载 在实验完后,执行下面的命令卸载 Istio 和 SkyWalking:\nsamples/bookinfo/platform/kube/cleanup.sh istioctl unintall --purge kubectl delete namespace istio-system Bookinfo demo 追踪信息说明 在 Apache SkyWalking UI 中导航到 General Service 分页,查看最近的 istio-ingressgateway 服务的追踪信息,表视图如下所示。图中展示了此次请求所有 Span 的基本信息,点击每个 Span 可以查看详细信息。\n切换为列表视图,可以看到每个 Span 的执行顺序及持续时间,如下图所示。\n你可能会感到困惑,为什么这么简单的一个应用会产生如此多的 Span 信息?因为我们为 Pod 注入了 Envoy 代理之后,每个服务间的请求都会被 Envoy 拦截和处理,如下图所示。\n整个追踪流程如下图所示。\n图中给每一个 Span 标记了序号,并在括号里注明了耗时。为了便于说明我们将所有 Span 汇总在下面的表格中。\n   序号 方法 总耗时(ms) 组件耗时(ms) 当前服务 说明     1 /productpage 190 0 istio-ingressgateway Envoy Outbound   2 /productpage 190 1 istio-ingressgateway Ingress -\u0026gt; Productpage 网络传输   3 /productpage 189 1 productpage Envoy Inbound   4 /productpage 188 21 productpage 应用内部处理   5 /details/0 8 1 productpage Envoy Outbound   6 /details/0 7 3 productpage Productpage -\u0026gt; Details 网络传输   7 /details/0 4 0 details Envoy Inbound   8 /details/0 4 4 details 应用内部   9 /reviews/0 159 0 productpage Envoy Outbound   10 /reviews/0 159 14 productpage Productpage -\u0026gt; Reviews 网络传输   11 /reviews/0 145 1 reviews Envoy Inbound   12 /reviews/0 144 109 reviews 应用内部处理   13 /ratings/0 35 2 reviews Envoy Outbound   14 /ratings/0 33 16 reviews Reviews -\u0026gt; Ratings 网络传输   15 /ratings/0 17 1 ratings Envoy Inbound   16 /ratings/0 16 16 ratings 应用内部处理    从以上信息可以发现:\n 本次请求总耗时 190ms; 在 Istio sidecar 模式下,每次流量在进出应用容器时都需要经过一次 Envoy 代理,每次耗时在 0 到 2 ms; 在 Pod 间的网络请求耗时在 1 到 16ms 之间; 将耗时做多的调用链 Ingress Gateway -\u0026gt; Productpage -\u0026gt; Reviews -\u0026gt; Ratings 上的所有耗时累计 182 ms,小于请求总耗时 190ms,这是因为数据本身有误差,以及 Span 的开始时间并不一定等于父 Span 的结束时间,如果你在 SkyWalking 的追踪页面,选择「列表」样式查看追踪数据(见图 2)可以更直观的发现这个问题; 我们可以查看到最耗时的部分是 Reviews 应用,耗时 109ms,因此我们可以针对该应用进行优化;  总结 只要对应用代码稍作修改就可以在 Istio 很方便的使用分布式追踪功能。在 Istio 支持的众多分布式追踪系统中,Apache SkyWalking 是其中的佼佼者。它不仅支持分布式追踪,还支持指标和日志收集、报警、Kubernetes 和服务网格监控,使用 eBPF 诊断服务网格性能 等功能,是一个功能完备的云原生应用分析平台。本文中为了方便演示,将追踪采样率设置为了 100%,在生产使用时请根据需要调整采样策略(采样百分比),防止产生过多的追踪日志。\n 如果您不熟悉服务网格和 Kubernetes 安全性,我们在 Tetrate Academy 提供了一系列免费在线课程,可以让您快速了解 Istio 和 Envoy。\n如果您正在寻找一种快速将 Istio 投入生产的方法,请查看 Tetrate Istio Distribution (TID)。TID 是 Tetrate 的强化、完全上游的 Istio 发行版,具有经过 FIPS 验证的构建和支持。这是开始使用 Istio 的好方法,因为您知道您有一个值得信赖的发行版,有一个支持您的专家团队,并且如果需要,还可以选择快速获得 FIPS 合规性。\n一旦启动并运行 Istio,您可能需要更简单的方法来管理和保护您的服务,而不仅仅是 Istio 中可用的方法,这就是 Tetrate Service Bridge 的用武之地。您可以在这里详细了解 Tetrate Service Bridge 如何使服务网格更安全、更易于管理和弹性,或联系我们进行快速演示。\n","excerpt":"在云原生应用中,一次请求往往需要经过一系列的 API 或后台服务处理才能完成,这些服务有些是并行的,有些是串行的,而且位于不同的平台或节点。那么如何确定一次调用的经过的服务路径和节点以帮助我们进行问题 …","ref":"/zh/how-to-use-skywalking-for-distributed-tracing-in-istio/","title":"如何在 Istio 中使用 SkyWalking 进行分布式追踪?"},{"body":"","excerpt":"","ref":"/tags/aurora/","title":"Aurora"},{"body":"","excerpt":"","ref":"/tags/aws/","title":"AWS"},{"body":"","excerpt":"","ref":"/tags/demo/","title":"Demo"},{"body":"Introduction Apache SkyWalking is an open source APM tool for monitoring and troubleshooting distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. It provides distributed tracing, service mesh observability, metric aggregation and visualization, and alarm.\nIn this article, I will introduce how to quickly set up Apache SkyWalking on AWS EKS and RDS/Aurora, as well as a couple of sample services, monitoring services to observe SkyWalking itself.\nPrerequisites  AWS account AWS CLI Terraform kubectl  We can use the AWS web console or CLI to create all resources needed in this tutorial, but it can be too tedious and hard to debug when something goes wrong. So in this artical I will use Terraform to create all AWS resources, deploy SkyWalking, sample services, and load generator services (Locust).\nArchitecture The demo architecture is as follows:\ngraph LR subgraph AWS subgraph EKS subgraph istio-system namespace direction TB OAP[[SkyWalking OAP]] UI[[SkyWalking UI]] Istio[[istiod]] end subgraph sample namespace Service0[[Service0]] Service1[[Service1]] ServiceN[[Service ...]] end subgraph locust namespace LocustMaster[[Locust Master]] LocustWorkers0[[Locust Worker 0]] LocustWorkers1[[Locust Worker 1]] LocustWorkersN[[Locust Worker ...]] end end RDS[[RDS/Aurora]] end OAP --\u0026gt; RDS Service0 -. telemetry data -.-\u0026gt; OAP Service1 -. telemetry data -.-\u0026gt; OAP ServiceN -. telemetry data -.-\u0026gt; OAP UI --query--\u0026gt; OAP LocustWorkers0 -- traffic --\u0026gt; Service0 LocustWorkers1 -- traffic --\u0026gt; Service0 LocustWorkersN -- traffic --\u0026gt; Service0 Service0 --\u0026gt; Service1 --\u0026gt; ServiceN LocustMaster --\u0026gt; LocustWorkers0 LocustMaster --\u0026gt; LocustWorkers1 LocustMaster --\u0026gt; LocustWorkersN User --\u0026gt; LocustMaster As shown in the architecture diagram, we need to create the following AWS resources:\n EKS cluster RDS instance or Aurora cluster  Sounds simple, but there are a lot of things behind the scenes, such as VPC, subnets, security groups, etc. You have to configure them correctly to make sure the EKS cluster can connect to RDS instance/Aurora cluster otherwise the SkyWalking won\u0026rsquo;t work. Luckily, Terraform can help us to create and destroy all these resources automatically.\nI have created a Terraform module to create all AWS resources needed in this tutorial, you can find it in the GitHub repository.\nCreate AWS resources First, we need to clone the GitHub repository and cd into the folder:\ngit clone https://github.com/kezhenxu94/oap-load-test.git Then, we need to create a file named terraform.tfvars to specify the AWS region and other variables:\ncat \u0026gt; terraform.tfvars \u0026lt;\u0026lt;EOF aws_access_key = \u0026#34;\u0026#34; aws_secret_key = \u0026#34;\u0026#34; cluster_name = \u0026#34;skywalking-on-aws\u0026#34; region = \u0026#34;ap-east-1\u0026#34; db_type = \u0026#34;rds-postgresql\u0026#34; EOF If you have already configured the AWS CLI, you can skip the aws_access_key and aws_secret_key variables. To install SkyWalking with RDS postgresql, set the db_type to rds-postgresql, to install SkyWalking with Aurora postgresql, set the db_type to aurora-postgresql.\nThere are a lot of other variables you can configure, such as tags, sample services count, replicas, etc., you can find them in the variables.tf.\nThen, we can run the following commands to initialize the Terraform module and download the required providers, then create all AWS resources:\nterraform init terraform apply -var-file=terraform.tfvars Type yes to confirm the creation of all AWS resources, or add the -auto-approve flag to the terraform apply to skip the confirmation:\nterraform apply -var-file=terraform.tfvars -auto-approve Now what you need to do is to wait for the creation of all AWS resources to complete, it may take a few minutes. You can check the progress of the creation in the AWS web console, and check the deployment progress of the services inside the EKS cluster.\nGenerate traffic Besides creating necessary AWS resources, the Terraform module also deploys SkyWalking, sample services, and Locust load generator services to the EKS cluster.\nYou can access the Locust web UI to generate traffic to the sample services:\nopen http://$(kubectl get svc -n locust -l app=locust-master -o jsonpath=\u0026#39;{.items[0].status.loadBalancer.ingress[0].hostname}\u0026#39;):8089 The command opens the browser to the Locust web UI, you can configure the number of users and hatch rate to generate traffic.\nObserve SkyWalking You can access the SkyWalking web UI to observe the sample services.\nFirst you need to forward the SkyWalking UI port to local\nkubectl -n istio-system port-forward $(kubectl -n istio-system get pod -l app=skywalking -l component=ui -o name) 8080:8080 And then open the browser to http://localhost:8080 to access the SkyWalking web UI.\nObserve RDS/Aurora You can also access the RDS/Aurora web console to observe the performance of RDS/Aurora instance/Aurora cluste.\nTest Results Test 1: SkyWalking with EKS and RDS PostgreSQL Service Traffic RDS Performance SkyWalking Performance Test 2: SkyWalking with EKS and Aurora PostgreSQL Service Traffic RDS Performance SkyWalking Performance Clean up When you are done with the demo, you can run the following command to destroy all AWS resources:\nterraform destroy -var-file=terraform.tfvars -auto-approve ","excerpt":"Introduction Apache SkyWalking is an open source APM tool for monitoring and troubleshooting …","ref":"/blog/2022-12-13-how-to-run-apache-skywalking-on-aws-eks-rds/","title":"How to run Apache SkyWalking on AWS EKS and RDS/Aurora"},{"body":"","excerpt":"","ref":"/tags/observability/","title":"Observability"},{"body":"","excerpt":"","ref":"/tags/rds/","title":"RDS"},{"body":"","excerpt":"","ref":"/tags/skywalking/","title":"SkyWalking"},{"body":"介绍 Apache SkyWalking 是一个开源的 APM 工具,用于监控分布式系统和排除故障,特别是为微服务、云原生和基于容器(Docker、Kubernetes、Mesos)的架构而设计。它提供分布式跟踪、服务网格可观测性、指标聚合和可视化以及警报。\n在本文中,我将介绍如何在 AWS EKS 和 RDS/Aurora 上快速设置 Apache SkyWalking,以及几个示例服务,监控服务以观察 SkyWalking 本身。\n先决条件  AWS 账号 AWS CLI Terraform kubectl  我们可以使用 AWS Web 控制台或 CLI 来创建本教程所需的所有资源,但是当出现问题时,它可能过于繁琐且难以调试。因此,在本文中,我将使用 Terraform 创建所有 AWS 资源、部署 SkyWalking、示例服务和负载生成器服务 (Locust)。\n架构 演示架构如下:\ngraph LR subgraph AWS subgraph EKS subgraph istio-system namespace direction TB OAP[[SkyWalking OAP]] UI[[SkyWalking UI]] Istio[[istiod]] end subgraph sample namespace Service0[[Service0]] Service1[[Service1]] ServiceN[[Service ...]] end subgraph locust namespace LocustMaster[[Locust Master]] LocustWorkers0[[Locust Worker 0]] LocustWorkers1[[Locust Worker 1]] LocustWorkersN[[Locust Worker ...]] end end RDS[[RDS/Aurora]] end OAP --\u0026gt; RDS Service0 -. telemetry data -.-\u0026gt; OAP Service1 -. telemetry data -.-\u0026gt; OAP ServiceN -. telemetry data -.-\u0026gt; OAP UI --query--\u0026gt; OAP LocustWorkers0 -- traffic --\u0026gt; Service0 LocustWorkers1 -- traffic --\u0026gt; Service0 LocustWorkersN -- traffic --\u0026gt; Service0 Service0 --\u0026gt; Service1 --\u0026gt; ServiceN LocustMaster --\u0026gt; LocustWorkers0 LocustMaster --\u0026gt; LocustWorkers1 LocustMaster --\u0026gt; LocustWorkersN User --\u0026gt; LocustMaster 如架构图所示,我们需要创建以下 AWS 资源:\n EKS 集群 RDS 实例或 Aurora 集群  听起来很简单,但背后有很多东西,比如 VPC、子网、安全组等。你必须正确配置它们以确保 EKS 集群可以连接到 RDS 实例 / Aurora 集群,否则 SkyWalking 不会不工作。幸运的是,Terraform 可以帮助我们自动创建和销毁所有这些资源。\n我创建了一个 Terraform 模块来创建本教程所需的所有 AWS 资源,您可以在 GitHub 存储库中找到它。\n创建 AWS 资源 首先,我们需要将 GitHub 存储库克隆 cd 到文件夹中:\ngit clone https://github.com/kezhenxu94/oap-load-test.git 然后,我们需要创建一个文件 terraform.tfvars 来指定 AWS 区域和其他变量:\ncat \u0026gt; terraform.tfvars \u0026lt;\u0026lt;EOF aws_access_key = \u0026#34;\u0026#34; aws_secret_key = \u0026#34;\u0026#34; cluster_name = \u0026#34;skywalking-on-aws\u0026#34; region = \u0026#34;ap-east-1\u0026#34; db_type = \u0026#34;rds-postgresql\u0026#34; EOF 如果您已经配置了 AWS CLI,则可以跳过 aws_access_key 和 aws_secret_key 变量。要使用 RDS postgresql 安装 SkyWalking,请将 db_type 设置为 rds-postgresql,要使用 Aurora postgresql 安装 SkyWalking,请将 db_type 设置为 aurora-postgresql。\n您可以配置许多其他变量,例如标签、示例服务计数、副本等,您可以在 variables.tf 中找到它们。\n然后,我们可以运行以下命令来初始化 Terraform 模块并下载所需的提供程序,然后创建所有 AWS 资源:\nterraform init terraform apply -var-file=terraform.tfvars 键入 yes 以确认所有 AWS 资源的创建,或将标志 -auto-approve 添加到 terraform apply 以跳过确认:\nterraform apply -var-file=terraform.tfvars -auto-approve 现在你需要做的就是等待所有 AWS 资源的创建完成,这可能需要几分钟的时间。您可以在 AWS Web 控制台查看创建进度,也可以查看 EKS 集群内部服务的部署进度。\n产生流量 除了创建必要的 AWS 资源外,Terraform 模块还将 SkyWalking、示例服务和 Locust 负载生成器服务部署到 EKS 集群。\n您可以访问 Locust Web UI 以生成到示例服务的流量:\nopen http://$(kubectl get svc -n locust -l app=locust-master -o jsonpath=\u0026#39;{.items[0].status.loadBalancer.ingress[0].hostname}\u0026#39;):8089 该命令将浏览器打开到 Locust web UI,您可以配置用户数量和孵化率以生成流量。\n观察 SkyWalking 您可以访问 SkyWalking Web UI 来观察示例服务。\n首先需要将 SkyWalking UI 端口转发到本地:\nkubectl -n istio-system port-forward $(kubectl -n istio-system get pod -l app=skywalking -l component=ui -o name) 8080:8080 然后在浏览器中打开 http://localhost:8080 访问 SkyWalking web UI。\n观察 RDS/Aurora 您也可以访问 RDS/Aurora web 控制台,观察 RDS/Aurora 实例 / Aurora 集群的性能。\n试验结果 测试 1:使用 EKS 和 RDS PostgreSQL 的 SkyWalking 服务流量 RDS 性能 SkyWalking 性能 测试 2:使用 EKS 和 Aurora PostgreSQL 的 SkyWalking 服务流量 RDS 性能 SkyWalking 性能 清理 完成演示后,您可以运行以下命令销毁所有 AWS 资源:\nterraform destroy -var-file=terraform.tfvars -auto-approve ","excerpt":"介绍 Apache SkyWalking 是一个开源的 APM 工具,用于监控分布式系统和排除故障,特别是为微服务、云原生和基于容器(Docker、Kubernetes、Mesos)的架构而设计。它提 …","ref":"/zh/2022-12-13-how-to-run-apache-skywalking-on-aws-eks-rds/","title":"如何在 AWS EKS 和 RDS/Aurora 上运行 Apache SkyWalking"},{"body":"","excerpt":"","ref":"/tags/sharding-sphere/","title":"Sharding-Sphere"},{"body":"","excerpt":"","ref":"/tags/sharding-sphere-proxy/","title":"Sharding-Sphere-proxy"},{"body":"As an application performance monitoring tool for distributed systems, Apache SkyWalking observes metrics, logs, traces, and events in the service mesh.\nSkyWalking OAP’s dataflow processing architecture boasts high performance and is capable of dealing with massive data traffic in real-time. However, storing, updating, and querying massive amounts of data poses a great challenge to its backend storage system.\nBy default, SkyWalking provides storage methods including H2, OpenSearch, ElasticSearch, MySQL, TiDB, PostgreSQL, and BanyanDB. Among them, MySQL storage is suited to a single machine and table (MySQL cluster capability depends on your technology selection). Nevertheless, in the context of high-traffic business systems, the storage of monitoring data is put under great pressure and query performance is lowered.\nBased on MySQL storage, SkyWalking v9.3.0 provides a new storage method: MySQL-Sharding. It supports database and table sharding features thanks to ShardingSphere-Proxy, which is a mature solution for dealing with relational databases’ massive amounts of data.\n1. Architecture Deployment  SkyWalking will only interact with ShardingSphere-Proxy instead of directly connecting to the database. The connection exposed by each MySQL node is a data source managed by ShardingSphere-Proxy. ShardingSphere-Proxy will establish a virtual logical database based on the configuration and then carry out database and table sharding and routing according to the OAP provided data sharding rules. SkyWalking OAP creates data sharding rules and performs DDL and DML on a virtual logical database just like it does with MySQL.  2. Application Scenario Applicable to scenarios where MySQL is used for storage, but the single-table mode cannot meet the performance requirements created by business growth.\n3. How Does Data Sharding Work with SkyWalking? Data sharding defines the data Model in SkyWalking with the annotation @SQLDatabase.Sharding.\n@interface Sharding { ShardingAlgorithm shardingAlgorithm(); String dataSourceShardingColumn() default \u0026#34;\u0026#34;; String tableShardingColumn() default \u0026#34;\u0026#34;; } Note:\n shardingAlgorithm: Table sharding algorithm dataSourceShardingColumn: Database sharding key tableShardingColumn: Table sharding key\n SkyWalking selects database sharding key, table sharding key and table sharding algorithm based on @SQLDatabase.Sharding, in order to dynamically generate sharding rules for each table. Next, it performs rule definition by operating ShardingSphere-Proxy via DistSQL. ShardingSphere-Proxy carries out data sharding based on the rule definition.\n3.1 Database Sharding Method SkyWalking adopts a unified method to carry out database sharding. The number of databases that need to be sharded requires modulo by the hash value of the database sharding key, which should be the numeric suffix of the routing target database. Therefore, the routing target database is:\nds_{dataSourceShardingColumn.hashcode() % dataSourceList.size()} For example, we now have dataSourceList = ds_0…ds_n. If {dataSourceShardingColumn.hashcode() % dataSourceList.size() = 2}, all the data will be routed to the data source node ds_2.\n3.2 Table Sharding Method The table sharding algorithm mainly shards according to the data owing to the TTL mechanism. According to TTL, there will be one sharding table per day:\n{tableName = logicTableName_timeSeries (data)} To ensure that data within the TTL can be written and queried, the time series will generate the current date:\n{timeSeries = currentDate - TTL +1...currentDate + 1} For example, if TTL=3 and currentDate=20220907, sharding tables will be: logicTableName_20220905 logicTableName_20220906 logicTableName_20220907 logicTableName_20220908\nSkyWalking provides table sharding algorithms for different data models:\n   Algorithm Name Sharding Description Time Precision Requirements for Sharding Key Typical Application Data Model     NO_SHARDING No table sharding and single-table mode is maintained. N/A Data model with a small amount of data and no need for sharding.   TIME_RELATIVE_ID_SHARDING_ALGORITHM Shard by day using time_bucket in the ID column. time_bucket can be accurate to seconds, minutes, hours, or days in the same table. Various metrics.   TIME_SEC_RANGE_SHARDING_ALGORITHM Shard by day using time_bucket column. time_bucket must be accurate to seconds. SegmentRecordLogRecord, etc.   TIME_MIN_RANGE_SHARDING_ALGORITHM Shard by day using time_bucket column. time_bucket must be accurate to minutes. EndpointTraffic   TIME_BUCKET_SHARDING_ALGORITHM Shard by day using time_bucket column. time_bucket can be accurate to seconds, minutes, hours, and days in the same table. Service, Instance, Endpoint and other call relations such as ServiceRelationServerSideMetrics    4. TTL Mechanism   For sharding tables, delete the physical table deadline \u0026gt;= timeSeries according to TTL.\n{deadline = new DateTime().plusDays(-ttl)}   TTL timer will delete the expired tables according to the current date while updating sharding rules according to the new date and informing ShardingSphere-Proxy to create new sharding tables.\n  For a single table, use the previous method and delete the row record of deadline \u0026gt;=time_bucket.\n  5. Examples of Sharding Data Storage Next, we’ll take segment (Record type) and service_resp_time (Metrics type) as examples to illustrate the data storage logic and physical distribution. Here, imagine MySQL has two nodes ds_0 and ds_1.\nNote:\n The following storage table structure is just a simplified version as an example, and does not represent the real SkyWalking table structure.\n 5.1 segment The sharding configuration is as follows:\n@SQLDatabase.Sharding(shardingAlgorithm = ShardingAlgorithm.TIME_SEC_RANGE_SHARDING_ALGORITHM, dataSourceShardingColumn = service_id, tableShardingColumn = time_bucket) The logical database, table structures and actual ones are as follows:\n5.2 service_resp_time The sharding configuration is as follows:\n@SQLDatabase.Sharding(shardingAlgorithm = ShardingAlgorithm.TIME_RELATIVE_ID_SHARDING_ALGORITHM, tableShardingColumn = id, dataSourceShardingColumn = entity_id) The logical database and table structures and actual ones are as follows:\n6. How to Use ShardingSphere-Proxy? 6.1 Manual Deployment Here we take the deployment of a single-node SkyWalking OAP and ShardingSphere-Proxy 5.1.2 as an example. Please refer to the relevant documentation for the cluster deployment.\n Prepare the MySQL cluster. Deploy, install and configure ShardingSphere-Proxy:    conf/server.yaml and props.proxy-hint-enabled must be true. Refer to the link for the complete configuration.\n  conf/config-sharding.yaml configures logical database and dataSources list. The dataSource name must be prefixed with ds_ and start with ds_0. For details about the configuration, please refer to this page.\n   Deploy, install and configure SkyWalking OAP:    Set up OAP environment variables: ${SW_STORAGE:mysql-sharding},\n  Configure the connection information based on the actual deployment: ${SW_JDBC_URL} ${SW_DATA_SOURCE_USER} ${SW_DATA_SOURCE_PASSWORD}\n  Note:\n Connection information must correspond to ShardingSphere-Proxy virtual database.\n Configure the data source name configured by conf/config-sharding.yaml in ShardingSphere-Proxy to ${SW_JDBC_SHARDING_DATA_SOURCES} and separate names with commas.   Start the MySQL cluster. Start ShardingSphere-Proxy. Start SkyWalking OAP.  6.2 Running Demo with Docker Our GitHub repository provides a complete and operational demo based on Docker, allowing you to quickly grasp the operation’s effectiveness. The deployment includes the following:\n One OAP service. The TTL of Metrics and Record data set to 2 days. One sharding-proxy service with version 5.1.2. Its external port is 13307 and the logical database name is swtest. Two MySQL services. Their external ports are 3306 and 3307 respectively and they are configured as ds_0 and ds_1 in sharding-proxy’s conf/config-sharding.yaml. One provider service (simulated business programs used to verify trace and metrics and other data). Its external port is 9090. One consumer service (simulated business programs used to verify trace and metrics and other data). Its external port is 9092.  Download the demo program locally and run it directly in the directory skywalking-mysql-sharding-demo.\ndocker-compose up -d Note:\n The first startup may take some time to pull images and create all the tables.\n Once all the services are started, database tools can be used to check the creation of sharding-proxy logical tables and the actual physical sharding table in the two MySQL databases. Additionally, you can also connect the sharding-proxy logical database to view the data query routing. For example:\nPREVIEW SELECT * FROM SEGMENT The result is as follows:\nThe simulated business program provided by the demo can simulate business requests by requesting the consumer service to verify various types of data distribution:\ncurl http://127.0.0.1:9092/info 7. Conclusion In this blog, we introduced SkyWalking’s new storage feature, MySQL sharding, which leverage ShardingSphere-Proxy and covered details of its deployment architecture, application scenarios, sharding logic, and TTL mechanism. We’ve also provided sample data and deployment steps to help get started.\nSkyWalking offers a variety of storage options to fit many use cases. If you need a solution to store large volumes of telemetry data in a relational database, the new MySQL sharding feature is worth a look. For more information on the SkyWalking 9.3.0 release and where to get it, check out the release notes.\n","excerpt":"As an application performance monitoring tool for distributed systems, Apache SkyWalking observes …","ref":"/blog/skywalkings-new-storage-feature-based-on-shardingsphere-proxy-mysql-sharding/","title":"SkyWalking's New Storage Feature Based on ShardingSphere-Proxy: MySQL-Sharding"},{"body":"SkyWalking NodeJS 0.6.0 is released. Go to downloads page to find release tars.\n Add missing build doc by @kezhenxu94 in https://github.com/apache/skywalking-nodejs/pull/92 Fix invalid url error in axios plugin by @kezhenxu94 in https://github.com/apache/skywalking-nodejs/pull/93 Ignore no requests if ignoreSuffix is empty by @michaelzangl in https://github.com/apache/skywalking-nodejs/pull/94 Escape HTTP method in regexp by @michaelzangl in https://github.com/apache/skywalking-nodejs/pull/95 docs: grammar improvements by @BFergerson in https://github.com/apache/skywalking-nodejs/pull/97 fix: entry span url in endponts using Express middleware/router objects by @BFergerson in https://github.com/apache/skywalking-nodejs/pull/96 chore: use openapi format for endpoint uris by @BFergerson in https://github.com/apache/skywalking-nodejs/pull/98 AWS DynamoDB, Lambda, SQS and SNS plugins, webpack by @tom-pytel in https://github.com/apache/skywalking-nodejs/pull/100 Fix nits by @wu-sheng in https://github.com/apache/skywalking-nodejs/pull/101 Update AxiosPlugin for v1.0+ by @tom-pytel in https://github.com/apache/skywalking-nodejs/pull/102  ","excerpt":"SkyWalking NodeJS 0.6.0 is released. Go to downloads page to find release tars.\n Add missing build …","ref":"/events/release-apache-skywalking-nodejs-0-6-0/","title":"Release Apache SkyWalking for NodeJS 0.6.0"},{"body":"SkyWalking 9.3.0 is released. Go to downloads page to find release tars.\nMetrics Association    Dashboard Pop-up Trace Query          APISIX Dashboard Use Sharding MySQL as the Database Virtual Cache Performance Virtual MQ Performance Project  Bump up the embedded swctl version in OAP Docker image.  OAP Server  Add component ID(133) for impala JDBC Java agent plugin and component ID(134) for impala server. Use prepareStatement in H2SQLExecutor#getByIDs.(No function change). Bump up snakeyaml to 1.32 for fixing CVE. Fix DurationUtils.convertToTimeBucket missed verify date format. Enhance LAL to support converting LogData to DatabaseSlowStatement. [Breaking Change] Change the LAL script format(Add layer property). Adapt ElasticSearch 8.1+, migrate from removed APIs to recommended APIs. Support monitoring MySQL slow SQLs. Support analyzing cache related spans to provide metrics and slow commands for cache services from client side Optimize virtual database, fix dynamic config watcher NPE when default value is null Remove physical index existing check and keep template existing check only to avoid meaningless retry wait in no-init mode. Make sure instance list ordered in TTL processor to avoid TTL timer never runs. Support monitoring PostgreSQL slow SQLs. [Breaking Change] Support sharding MySQL database instances and tables by Shardingsphere-Proxy. SQL-Database requires removing tables log_tag/segment_tag/zipkin_query before OAP starts, if bump up from previous releases. Fix meter functions avgHistogram, avgHistogramPercentile, avgLabeled, sumHistogram having data conflict when downsampling. Do sorting readLabeledMetricsValues result forcedly in case the storage(database) doesn\u0026rsquo;t return data consistent with the parameter list. Fix the wrong watch semantics in Kubernetes watchers, which causes heavy traffic to API server in some Kubernetes clusters, we should use Get State and Start at Most Recent semantic instead of Start at Exact because we don\u0026rsquo;t need the changing history events, see https://kubernetes.io/docs/reference/using-api/api-concepts/#semantics-for-watch. Unify query services and DAOs codes time range condition to Duration. [Breaking Change]: Remove prometheus-fetcher plugin, please use OpenTelemetry to scrape Prometheus metrics and set up SkyWalking OpenTelemetry receiver instead. BugFix: histogram metrics sent to MAL should be treated as OpenTelemetry style, not Prometheus style: (-infinity, explicit_bounds[i]] for i == 0 (explicit_bounds[i-1], explicit_bounds[i]] for 0 \u0026lt; i \u0026lt; size(explicit_bounds) (explicit_bounds[i-1], +infinity) for i == size(explicit_bounds)  Support Golang runtime metrics analysis. Add APISIX metrics monitoring Support skywalking-client-js report empty service version and page path , set default version as latest and default page path as /(root). Fix the error fetching data (/browser_app_page_pv0) : Can't split endpoint id into 2 parts. [Breaking Change] Limit the max length of trace/log/alarm tag\u0026rsquo;s key=value, set the max length of column tags in tableslog_tag/segment_tag/alarm_record_tag and column query in zipkin_query and column tag_value in tag_autocomplete to 256. SQL-Database requires altering these columns' length or removing these tables before OAP starts, if bump up from previous releases. Optimize the creation conditions of profiling task. Lazy load the Kubernetes metadata and switch from event-driven to polling. Previously we set up watchers to watch the Kubernetes metadata changes, this is perfect when there are deployments changes and SkyWalking can react to the changes in real time. However when the cluster has many events (such as in large cluster or some special Kubernetes engine like OpenShift), the requests sent from SkyWalking becomes unpredictable, i.e. SkyWalking might send massive requests to Kubernetes API server, causing heavy load to the API server. This PR switches from the watcher mechanism to polling mechanism, SkyWalking polls the metadata in a specified interval, so that the requests sent to API server is predictable (~10 requests every interval, 3 minutes), and the requests count is constant regardless of the cluster\u0026rsquo;s changes. However with this change SkyWalking can\u0026rsquo;t react to the cluster changes in time, but the delay is acceptable in our case. Optimize the query time of tasks in ProfileTaskCache. Fix metrics was put into wrong slot of the window in the alerting kernel. Support sumPerMinLabeled in MAL. Bump up jackson databind, snakeyaml, grpc dependencies. Support export Trace and Log through Kafka. Add new config initialization mechanism of module provider. This is a ModuleManager lib kernel level change. [Breaking Change] Support new records query protocol, rename the column named service_id to entity_id for support difference entity. Please re-create top_n_database_statement index/table. Remove improper self-obs metrics in JvmMetricsHandler(for Kafka channel). gRPC stream canceling code is not logged as an error when the client cancels the stream. The client cancels the stream when the pod is terminated. [Breaking Change] Change the way of loading MAL rules(support pattern). Move k8s relative MAL files into /otel-rules/k8s. [Breaking Change] Refactor service mesh protobuf definitions and split TCP-related metrics to individual definition. Add TCP{Service,ServiceInstance,ServiceRelation,ServiceInstanceRelation} sources and split TCP-related entities out from original Service,ServiceInstance,ServiceRelation,ServiceInstanceRelation. [Breaking Change] TCP-related source names are changed, fields of TCP-related sources are changed, please refer to the latest oal/tcp.oal file. Do not log error logs when failed to create ElasticSearch index because the index is created already. Add virtual MQ analysis for native traces. Support Python runtime metrics analysis. Support sampledTrace in LAL. Support multiple rules with different names under the same layer of LAL script. (Optimization) Reduce the buffer size(queue) of MAL(only) metric streams. Set L1 queue size as 1/20, L2 queue size as 1/2. Support monitoring MySQL/PostgreSQL in the cluster mode. [Breaking Change] Migrate to BanyanDB v0.2.0.  Adopt new OR logical operator for,  MeasureIDs query BanyanDBProfileThreadSnapshotQueryDAO query Multiple Event conditions query Metrics query   Simplify Group check and creation Partially apply UITemplate changes Support index_only Return CompletableFuture\u0026lt;Void\u0026gt; directly from BanyanDB client Optimize data binary parse methods in *LogQueryDAO Support different indexType Support configuration for TTL and (block|segment) intervals   Elasticsearch storage: Provide system environment variable(SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS) and support specify the settings (number_of_shards/number_of_replicas) for each index individually. Elasticsearch storage: Support update index settings (number_of_shards/number_of_replicas) for the index template after rebooting. Optimize MQ Topology analysis. Use entry span\u0026rsquo;s peer from the consumer side as source service when no producer instrumentation(no cross-process reference). Refactor JDBC storage implementations to reuse logics. Fix ClassCastException in LoggingConfigWatcher. Support span attached event concept in Zipkin and SkyWalking trace query. Support span attached events on Zipkin lens UI. Force UTF-8 encoding in JsonLogHandler of kafka-fetcher-plugin. Fix max length to 512 of entity, instance and endpoint IDs in trace, log, profiling, topN tables(JDBC storages). The value was 200 by default. Add component IDs(135, 136, 137) for EventMesh server and client-side plugins. Bump up Kafka client to 2.8.1 to fix CVE-2021-38153. Remove lengthEnvVariable for Column as it never works as expected. Add LongText to support longer logs persistent as a text type in ElasticSearch, instead of a keyword, to avoid length limitation. Fix wrong system variable name SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI. It was opaenapi. Fix not-time-series model blocking OAP boots in no-init mode. Fix ShardingTopologyQueryDAO.loadServiceRelationsDetectedAtServerSide invoke backend miss parameter serviceIds. Changed system variable SW_SUPERDATASET_STORAGE_DAY_STEP to SW_STORAGE_ES_SUPER_DATASET_DAY_STEP to be consistent with other ES storage related variables. Fix ESEventQueryDAO missing metric_table boolQuery criteria. Add default entity name(_blank) if absent to avoid NPE in the decoding. This caused Can't split xxx id into 2 parts. Support dynamic config the sampling strategy in network profiling. Zipkin module support BanyanDB storage. Zipkin traces query API, sort the result set by start time by default. Enhance the cache mechanism in the metric persistent process.  This cache only worked when the metric is accessible(readable) from the database. Once the insert execution is delayed due to the scale, the cache loses efficacy. It only works for the last time update per minute, considering our 25s period. Fix ID conflicts for all JDBC storage implementations. Due to the insert delay, the JDBC storage implementation would still generate another new insert statement.   [Breaking Change] Remove core/default/enableDatabaseSession config. [Breaking Change] Add @BanyanDB.TimestampColumn to identify which column in Record is providing the timestamp(milliseconds) for BanyanDB, since BanyanDB stream requires a timestamp in milliseconds. For SQL-Database: add new column timestamp for tables profile_task_log/top_n_database_statement, requires altering this column or removing these tables before OAP starts, if bump up from previous releases. Fix Elasticsearch storage: In No-Sharding Mode, add specific analyzer to the template before index creation to avoid update index error. Internal API: remove undocumented ElasticSearch API usage and use documented one. Fix BanyanDB.ShardingKey annotation missed in the generated OAL metrics classes. Fix Elasticsearch storage: Query sortMetrics missing transform real index column name. Rename BanyanDB.ShardingKey to BanyanDB.SeriesID. Self-Observability: Add counters for metrics reading from DB or cached. Dashboard:Metrics Persistent Cache Count. Self-Observability: Fix GC Time calculation. Fix Elasticsearch storage: In No-Sharding Mode, column\u0026rsquo;s property indexOnly not applied and cannot be updated. Update the trace_id field as storage only(cannot be queried) in top_n_database_statement, top_n_cache_read_command, top_n_cache_read_command index.  UI  Fix: tab active incorrectly, when click tab space Add impala icon for impala JDBC Java agent plugin. (Webapp)Bump up snakeyaml to 1.31 for fixing CVE-2022-25857 [Breaking Change]: migrate from Spring Web to Armeria, now you should use the environment variable name SW_OAP_ADDRESS to change the OAP backend service addresses, like SW_OAP_ADDRESS=localhost:12800,localhost:12801, and use environment variable SW_SERVER_PORT to change the port. Other Spring-related configurations don\u0026rsquo;t take effect anymore. Polish the endpoint list graph. Fix styles for an adaptive height. Fix setting up a new time range after clicking the refresh button. Enhance the process topology graph to support dragging nodes. UI-template: Fix metrics calculation in general-service/mesh-service/faas-function top-list dashboard. Update MySQL dashboard to visualize collected slow SQLs. Add virtual cache dashboard. Remove responseCode fields of all OAL sources, as well as examples to avoid user\u0026rsquo;s confusion. Remove All from the endpoints selector. Enhance menu configurations to make it easier to change. Update PostgreSQL dashboard to visualize collected slow SQLs. Add Golang runtime metrics and cpu/memory used rate panels in General-Instance dashboard. Add gateway apisix menu. Query logs with the specific service ID. Bump d3-color from 3.0.1 to 3.1.0. Add Golang runtime metrics and cpu/memory used rate panels in FaaS-Instance dashboard. Revert logs on trace widget. Add a sub-menu for virtual mq. Add readRecords to metric types. Verify dashboard names for new dashboards. Associate metrics with the trace widget on dashboards. Fix configuration panel styles. Remove a un-use icon. Support labeled value on the service/instance/endpoint list widgets. Add menu for virtual MQ. Set selector props and update configuration panel styles. Add Python runtime metrics and cpu/memory utilization panels to General-Instance and Fass-Instance dashboards. Enhance the legend of metrics graph widget with the summary table. Add apache eventMesh logo file. Fix conditions for trace profiling. Fix tag keys list and duration condition. Fix typo. Fix condition logic for trace tree data. Enhance tags component to search tags with the input value. Fix topology loading style. Fix update metric processor for the readRecords and remove readSampledRecords from metrics selector. Add trace association for FAAS dashboards. Visualize attached events on the trace widget. Add HTTP/1.x metrics and HTTP req/resp body collecting tabs on the network profiling widget. Implement creating tasks ui for network profiling widget. Fix entity types for ProcessRelation. Add trace association for general service dashboards.  Documentation  Add metadata-uid setup doc about Kubernetes coordinator in the cluster management. Add a doc for adding menus to booster UI. Move general good read blogs from Agent Introduction to Academy. Add re-post for blog Scaling with Apache SkyWalking in the academy list. Add re-post for blog Diagnose Service Mesh Network Performance with eBPF in the academy list. Add Security Notice doc. Add new docs for Report Span Attached Events data collecting protocol. Add new docs for Record query protocol Update Server Agents and Compatibility for PHP agent. Add docs for profiling. Update the network profiling documentation.  All issues and pull requests are here\n","excerpt":"SkyWalking 9.3.0 is released. Go to downloads page to find release tars.\nMetrics Association …","ref":"/events/release-apache-skywalking-apm-9.3.0/","title":"Release Apache SkyWalking APM 9.3.0"},{"body":"","excerpt":"","ref":"/zh_tags/shardingsphere/","title":"ShardingSphere"},{"body":"","excerpt":"","ref":"/zh_tags/shardingsphere-proxy/","title":"ShardingSphere-proxy"},{"body":"Apache SkyWalking 作为一个分布式系统的应用性能监控工具,它观察服务网格中的指标、日志、痕迹和事件。其中 SkyWalking OAP 高性能的数据流处理架构能够实时处理庞大的数据流量,但是这些海量数据的存储更新和后续查询对后端存储系统带来了挑战。\nSkyWalking 默认已经提供了多种存储支持包括 H2、OpenSearch、ElasticSearch、MySQL、TiDB、PostgreSQL、BanyanDB。其中 MySQL 存储提供的是针对单机和单表的存储方式(MySQL 的集群能力需要自己选型提供),在面对高流量的业务系统时,监控数据的存储存在较大压力,同时影响查询性能。\n在 MySQL 存储基础上 SkyWalking v9.3.0 提供了一种新的存储方式 MySQL-Sharding,它提供了基于 ShardingSphere-Proxy 的分库分表特性,而分库分表是关系型数据库面对大数据量处理的成熟解决方案。\n部署架构 SkyWalking 使用 ShardingSphere-Proxy 的部署方式如下图所示。\n SkyWalking OAP 由直连数据库的方式变成只与 ShardingSphere-Proxy 进行交互; 每一个 MySQL 节点暴露的连接都是一个数据源,由 ShardingSphere-Proxy 进行统一管理; ShardingSphere-Proxy 会根据配置建立一个虚拟逻辑数据库,根据 OAP 提供的分库分表规则进行库表分片和路由; SkyWalking OAP 负责生成分库分表规则并且像操作 MySQL 一样对虚拟逻辑库执行 DDL 和 DML;  适用场景 希望使用 MySQL 作为存储,随着业务规模的增长,单表模式已经无法满足性能需要。\nSkyWalking 分库分表逻辑 分库分表逻辑通过注解 @SQLDatabase.Sharding 对 SkyWalking 中的数据模型 Model 进行定义:\n@interface Sharding { ShardingAlgorithm shardingAlgorithm(); String dataSourceShardingColumn() default \u0026#34;\u0026#34;; String tableShardingColumn() default \u0026#34;\u0026#34;; } 其中:\n  shardingAlgorithm:表分片算法\n  dataSourceShardingColumn:分库键\n  tableShardingColumn:分表键\n  SkyWalking 根据注解 @SQLDatabase.Sharding 选择分库键、分表键以及表分片算法对每个表动态生成分片规则通过 DistSQL 操作 Shardingsphere-Proxy 执行规则定义 Shardingsphere-Proxy 根据规则定义进行数据分片。\n分库方式 SkyWalking 对于分库采用统一的方式,路由目标库的数字后缀使用分库键的哈希值取模需要分库的数据库数量,所以路由目标库为:\nds_{dataSourceShardingColumn.hashcode() % dataSourceList.size()} 例如我们有 dataSourceList = ds_0...ds_n,如果\n{dataSourceShardingColumn.hashcode() % dataSourceList.size() = 2} 那么所有数据将会路由到 ds_2 这个数据源节点上。\n分表方式 由于 TTL 机制的存在,分表算法主要根据时间的日期进行分片,分片表的数量是根据 TTL 每天一个表:\n分片表名 = 逻辑表名_时间序列(日期):{tableName =logicTableName_timeSeries}\n为保证在 TTL 有效期内的数据能够被写入和查询,时间序列将生成当前日期\n{timeSeries = currentDate - TTL +1...currentDate + 1} 例如:如果 TTL=3, currentDate = 20220907,则分片表为:\nlogicTableName_20220905 logicTableName_20220906 logicTableName_20220907 logicTableName_20220908 SkyWalking 提供了多种不同的分表算法用于不同的数据模型:\n   算法名称 分片说明 分片键时间精度要求 典型应用数据模型     NO_SHARDING 不做任何表分片,保持单表模式 / 数据量小无需分片的数据模型   TIME_RELATIVE_ID_SHARDING_ALGORITHM 使用 ID 列中的 time_bucket 按天分片 time_bucket 的精度可以是同一表中的秒、分、小时和天 各类 Metrics 指标   TIME_SEC_RANGE_SHARDING_ALGORITHM 使用 time_bucket 列按天分片 time_bucket 的精度必须是秒 SegmentRecordLogRecord 等   TIME_MIN_RANGE_SHARDING_ALGORITHM 使用 time_bucket 列按天分片 time_bucket 的精度必须是分钟 EndpointTraffic   TIME_BUCKET_SHARDING_ALGORITHM 使用 time_bucket 列按天分片 time_bucket 的精度可以是同一个表中的秒、分、小时和天 Service、Instance、Endpoint 调用关系等如 ServiceRelationServerSideMetrics    TTL 机制  对于进行分片的表根据 TTL 直接删除 deadline \u0026gt;= timeSeries 的物理表 {deadline = new DateTime().plusDays(-ttl)} TTL 定时器在根据当前日期删除过期表的同时也会根据新日期更新分片规则,通知 ShardingSphere-Proxy 创建新的分片表 对于单表的延续之前的方式,删除 deadline \u0026gt;= time_bucket 的行记录  分片数据存储示例 下面以 segment(Record 类型)和 service_resp_time(Metrics 类型)两个为例说明数据存储的逻辑和物理分布。这里假设 MySQL 为 ds_0 和 ds_1 两个节点。\n注意:以下的存储表结构仅为简化后的存储示例,不表示 SkyWalking 真实的表结构。\nsegment 分片配置为:\n@SQLDatabase.Sharding(shardingAlgorithm = ShardingAlgorithm.TIME_SEC_RANGE_SHARDING_ALGORITHM, dataSourceShardingColumn = service_id, tableShardingColumn = time_bucket) 逻辑库表结构和实际库表如下图:\nservice_resp_time 分片配置为:\n@SQLDatabase.Sharding(shardingAlgorithm = ShardingAlgorithm.TIME_RELATIVE_ID_SHARDING_ALGORITHM, tableShardingColumn = id, dataSourceShardingColumn = entity_id) 逻辑库表结构和实际库表如下图:\n如何使用 你可以选择手动或使用 Docker 来运行 Demo。\n手动部署 这里以单节点 SkyWalking OAP 和 Shardingsphere-Proxy 5.1.2 部署为例,集群部署请参考其他相关文档。\n  准备好 MySQL 集群\n  部署安装并配置 Shardingsphere-Proxy:\n conf/server.yaml,props.proxy-hint-enabled 必须为 true,完整配置可参考这里。 conf/config-sharding.yaml,配置逻辑数据库和 dataSources 列表,dataSource 的名称必须以 ds_为前缀,并且从 ds_0 开始,完整配置可参考这里。    部署安装并配置 SkyWalking OAP:\n 设置 OAP 环境变量 ${SW_STORAGE:mysql-sharding} 根据实际部署情况配置连接信息: ${SW_JDBC_URL} ${SW_DATA_SOURCE_USER} ${SW_DATA_SOURCE_PASSWORD}  注意:连接信息需对应 Shardingsphere-Proxy 虚拟数据库。\n  将 Shardingsphere-Proxy 中 conf/config-sharding.yaml 配置的数据源名称配置在 ${SW_JDBC_SHARDING_DATA_SOURCES} 中,用 , 分割\n  启动 MySQL 集群\n  启动 Shardingsphere-Proxy\n  启动 SkyWalking OAP\n  使用 Docker 运行 Demo GitHub 资源库提供了一个基于 Docker 完整可运行的 demo:skywalking-mysql-sharding-demo,可以快速尝试实际运行效果。\n其中部署包含:\n oap 服务 1 个,Metrics 和 Record 数据的 TTL 均设为 2 天 sharding-proxy 服务 1 个版本为 5.1.2,对外端口为 13307,创建的逻辑库名称为 swtest mysql 服务 2 个,对外端口分别为 3306,3307,在 sharding-proxy 的 conf/config-sharding.yaml 中配置为 ds_0 和 ds_1 provider 服务 1 个(模拟业务程序用于验证 trace 和 metrics 等数据),对外端口为 9090 consumer 服务 1 个(模拟业务程序用于验证 trace 和 metrics 等数据),对外端口为 9092  将 Demo 程序获取到本地后,在 skywalking-mysql-sharding-demo 目录下直接运行:\ndocker-compose up -d 注意:初次启动由于拉取镜像和新建所有表可能需要一定的时间。\n所有服务启动完成之后可以通过数据库工具查看 sharding-proxy 逻辑表创建情况,以及两个 MySQL 库中实际的物理分片表创建情况。也可以连接 sharding-proxy 逻辑库 swtest 查看数据查询路由情况,如:\nPREVIEW SELECT * FROM SEGMENT 显示结果如下:\nDemo 提供的模拟业务程序可以通过请求 consumer 服务模拟业务请求,用于验证各类型数据分布:\ncurl http://127.0.0.1:9092/info 总结 在这篇文章中我们详细介绍了 SkyWalking 基于 ShardingSphere-Proxy 的 MySQL-Sharding 存储特性的部署架构、适应场景、核心分库分表逻辑以及 TTL 机制,并提供了运行后的数据存储示例和详细部署配置步骤以便大家快速理解上手。SkyWalking 提供了多种存储方式以供选择,如果你目前的需求如本文所述,欢迎使用该新特性。\n","excerpt":"Apache SkyWalking 作为一个分布式系统的应用性能监控工具,它观察服务网格中的指标、日志、痕迹和事件。其中 SkyWalking OAP 高性能的数据流处理架构能够实时处理庞大的数据流 …","ref":"/zh/skywalking-shardingsphere-proxy/","title":"SkyWalking 基于 ShardingSphere-Proxy 的 MySQL-Sharding 分库分表的存储特性介绍"},{"body":"SkyWalking Kubernetes Helm Chart 4.4.0 is released. Go to downloads page to find release tars.\n [Breaking Change]: remove .Values.oap.initEs, there is no need to use this to control whether to run init job anymore, SkyWalking Helm Chart automatically delete the init job when installing/upgrading. [Breaking Change]: remove files/config.d mechanism and use values.yaml files to put the configurations to override default config files in the /skywalking/config folder, using files/config.d is very limited and you have to clone the source codes if you want to use this mechanism, now you can simply use our Docker Helm Chart to install. Refactor oap init job, and support postgresql storage. Upgrade ElasticSearch Helm Chart dependency version.  ","excerpt":"SkyWalking Kubernetes Helm Chart 4.4.0 is released. Go to downloads page to find release tars. …","ref":"/events/release-apache-skywalking-kubernetes-helm-chart-4.4.0/","title":"Release Apache SkyWalking Kubernetes Helm Chart 4.4.0"},{"body":"SkyWalking PHP 0.2.0 is released. Go to downloads page to find release tars.\nWhat\u0026rsquo;s Changed  Update PECL user by @heyanlong in https://github.com/apache/skywalking-php/pull/12 Start up 0.2.0 by @heyanlong in https://github.com/apache/skywalking-php/pull/13 Update compiling project document. by @jmjoy in https://github.com/apache/skywalking-php/pull/14 Add PDO plugin, and switch unix datagram to stream. by @jmjoy in https://github.com/apache/skywalking-php/pull/15 Update readme about creating issue. by @jmjoy in https://github.com/apache/skywalking-php/pull/17 Fix package.xml role error by @heyanlong in https://github.com/apache/skywalking-php/pull/16 Add swoole support. by @jmjoy in https://github.com/apache/skywalking-php/pull/19 Add .fleet to .gitignore by @heyanlong in https://github.com/apache/skywalking-php/pull/20 [Feature] Add Mysql Improved Extension by @heyanlong in https://github.com/apache/skywalking-php/pull/18 Add predis plugin. by @jmjoy in https://github.com/apache/skywalking-php/pull/21 Take care of PDO false and DSN tailing semicolons. by @phanalpha in https://github.com/apache/skywalking-php/pull/22 Add container by @heyanlong in https://github.com/apache/skywalking-php/pull/23 Save PDO exceptions. by @phanalpha in https://github.com/apache/skywalking-php/pull/24 Update minimal supported PHP version to 7.2. by @jmjoy in https://github.com/apache/skywalking-php/pull/25 Utilize UnixListener for the worker process to accept reports. by @phanalpha in https://github.com/apache/skywalking-php/pull/26 Kill the worker on module shutdown. by @phanalpha in https://github.com/apache/skywalking-php/pull/28 Add plugin for memcached. by @jmjoy in https://github.com/apache/skywalking-php/pull/27 Upgrade rust mini version to 1.65. by @jmjoy in https://github.com/apache/skywalking-php/pull/30 Add plugin for phpredis. by @jmjoy in https://github.com/apache/skywalking-php/pull/29 Add missing request_id. by @jmjoy in https://github.com/apache/skywalking-php/pull/31 Adapt virtual cache. by @jmjoy in https://github.com/apache/skywalking-php/pull/32 Fix permission denied of unix socket. by @jmjoy in https://github.com/apache/skywalking-php/pull/33 Bump to 0.2.0. by @jmjoy in https://github.com/apache/skywalking-php/pull/34  New Contributors  @phanalpha made their first contribution in https://github.com/apache/skywalking-php/pull/22  Full Changelog: https://github.com/apache/skywalking-php/compare/v0.1.0...v0.2.0\nPECL https://pecl.php.net/package/skywalking_agent/0.2.0\n","excerpt":"SkyWalking PHP 0.2.0 is released. Go to downloads page to find release tars.\nWhat\u0026rsquo;s Changed …","ref":"/events/release-apache-skwaylking-php-0-2-0/","title":"Release Apache SkyWalking PHP 0.2.0"},{"body":"This is an official annoucement from SkyWalking team.\nDue to the Plan to End-of-life(EOL) all v8 releases in Nov. 2022 had been posted in 3 months, SkyWalking community doesn\u0026rsquo;t received any objection or a proposal about releasing a new patch version.\nNow, it is time to end the v8 series. All documents of v8 are not going to be hosted on the website. You only could find the artifacts and source codes from the Apache\u0026rsquo;s archive repository. The documents of each version are included in /docs/ folder in the source tars.\nThe SkyWalking community would reject the bug reports and release proposal due to its End-of-life(EOL) status. v9 provides more powerful features and covers all capabilities of the latest v8. Recommend upgrading to the latest.\nV8 was a memorable and significative release series, which makes the project globally adopted. It brought dev community scale up to over 500 contributors.\nWe want to highlight and thank all those contributors and end users again. You made today\u0026rsquo;s SkyWalking.\nWelcome more contributors and users to join the community, to contribute your ideas, experiences, and feedback. We need you to improve and enhance the project to a higher level.\n","excerpt":"This is an official annoucement from SkyWalking team.\nDue to the Plan to End-of-life(EOL) all v8 …","ref":"/events/v8-eol/","title":"SkyWalking v8 OAP server End-of-life(EOL)"},{"body":"SkyWalking BanyanDB 0.2.0 is released. Go to downloads page to find release tars.\nFeatures  Command line tool: bydbctl. Retention controller. Full-text searching. TopN aggregation. Add RESTFul style APIs based on gRPC gateway. Add \u0026ldquo;exists\u0026rdquo; endpoints to the schema registry. Support tag-based CRUD of the property. Support index-only tags. Support logical operator(and \u0026amp; or) for the query.  Bugs  \u0026ldquo;metadata\u0026rdquo; syncing pipeline complains about an \u0026ldquo;unknown group\u0026rdquo;. \u0026ldquo;having\u0026rdquo; semantic inconsistency. \u0026ldquo;tsdb\u0026rdquo; leaked goroutines.  Chores  \u0026ldquo;tsdb\u0026rdquo; structure optimization.  Merge the primary index into the LSM-based index Remove term metadata.   Memory parameters optimization. Bump go to 1.19.  ","excerpt":"SkyWalking BanyanDB 0.2.0 is released. Go to downloads page to find release tars.\nFeatures  Command …","ref":"/events/release-apache-skywalking-banyandb-0-2-0/","title":"Release Apache SkyWalking BanyanDB 0.2.0"},{"body":"SkyWalking Java Agent 8.13.0 is released. Go to downloads page to find release tars. Changes by Version\n8.13.0 This release begins to adopt SkyWalking 9.3.0+ Virtual Cache Analysis,Virtual MQ Analysis\n Support set-type in the agent or plugin configurations Optimize ConfigInitializer to output warning messages when the config value is truncated. Fix the default value of the Map field would merge rather than override by new values in the config. Support to set the value of Map/List field to an empty map/list. Add plugin to support Impala JDBC 2.6.x. Update guava-cache, jedis, memcached, ehcache plugins to adopt uniform tags. Fix Apache ShenYu plugin traceId empty string value. Add plugin to support brpc-java-3.x Update compose-start-script.template to make compatible with new version docker compose Bump up grpc to 1.50.0 to fix CVE-2022-3171 Polish up nats plugin to unify MQ related tags Correct the duration of the transaction span for Neo4J 4.x. Plugin-test configuration.yml dependencies support docker service command field Polish up rabbitmq-5.x plugin to fix missing broker tag on consumer side Polish up activemq plugin to fix missing broker tag on consumer side Enhance MQ plugin relative tests to check key tags not blank. Add RocketMQ test scenarios for version 4.3 - 4.9. No 4.0 - 4.2 release images for testing. Support mannual propagation of tracing context to next operators for webflux. Add MQ_TOPIC and MQ_BROKER tags for RocketMQ consumer\u0026rsquo;s span. Polish up Pulsar plugins to remove unnecessary dynamic value , set peer at consumer side Polish Kafka plugin to set peer at the consumer side. Polish NATS plugin to set peer at the consumer side. Polish ActiveMQ plugin to set peer at the consumer side. Polish RabbitMQ plugin to set peer at the consumer side.  Documentation  Update configuration doc about overriding default value as empty map/list accordingly. Update plugin dev tags for cache relative tags. Add plugin dev docs for virtual database tags. Add plugin dev docs for virtual MQ tags. Add doc about kafka plugin Manual APIs.  All issues and pull requests are here\n","excerpt":"SkyWalking Java Agent 8.13.0 is released. Go to downloads page to find release tars. Changes by …","ref":"/events/release-apache-skywalking-java-agent-8-13-0/","title":"Release Apache SkyWalking Java Agent 8.13.0"},{"body":"SkyWalking Client JS 0.9.0 is released. Go to downloads page to find release tars.\n Fix custom configurations when the page router changed for SPA. Fix reporting data by navigator.sendbeacon when pages is closed. Bump dependencies. Add Security Notice. Support adding custom tags to spans. Validate custom parameters for register.  ","excerpt":"SkyWalking Client JS 0.9.0 is released. Go to downloads page to find release tars.\n Fix custom …","ref":"/events/release-apache-skywalking-client-js-0-9-0/","title":"Release Apache SkyWalking Client JS 0.9.0"},{"body":"I am excited to announce a new SkyWalking committer, Yueqin Zhang(GitHub ID, yswdqz). Yueqin entered the SkyWalking community on Jul. 3rd[1], 2022, for the first time. Later, I knew he was invited by Yihao Chen, our committer, who is running an open-source program for students who can\u0026rsquo;t join Summer 2022 due to SkyWalking having limited slots.\nHis first PR[2] for Issue #7420 took 20 days to propose. I believe he took incredibly hard work in his own time. For every PMC member, we all were there. Purely following documents and existing codes to build a new feature is always not easy to start.\nAfter that, we had several private talks, he asked for more possible directions to join the community deeper. Then, I am honored to witness a great landscape extension in SkyWalking feature territory, SkyWalking adopts OpenTelemetry features quickly, and is powered by our powerful MAL and v9 kernel/UI, He built MySQL and PostgreSQL server monitoring, metrics, and slow SQLs collecting(through enhancing LAL with a new layer concept), under a new menu, .\nIt is unbelievable to see his contributions in the main repo, 8 PRs[3], LOC 4,857++, 1,627\u0026ndash;\nMeanwhile, this story continues, he is trying to build A lightweight and APM-oriented SQL parser module[4] under my mentoring. This would be another challenging idea, but also very useful to enhance existing virtual database perf. analyzing.\nI believe this would not be the end for the moment between SkyWalking and him.\nWelcome to join the team.\nReferrer \u0026amp; PMC member, Sheng Wu.\n [1] https://github.com/apache/skywalking/issues/7420#issuecomment-1173061870 [2] https://github.com/apache/skywalking-java/pull/286 [3] https://github.com/apache/skywalking/commits?author=yswdqz [4] https://github.com/apache/skywalking/issues/9661  ","excerpt":"I am excited to announce a new SkyWalking committer, Yueqin Zhang(GitHub ID, yswdqz). Yueqin entered …","ref":"/events/welcome-yueqin-zhang-as-new-committer/","title":"Welcome Yueqin Zhang as a new committer"},{"body":"SkyWalking PHP 0.1.0 is released. Go to downloads page to find release tars.\nWhat's Changed  [docs] Update README by @heyanlong in https://github.com/apache/skywalking-php/pull/1 Remove the CI limit first, in order to run CI. by @jmjoy in https://github.com/apache/skywalking-php/pull/3 Setup CI. by @jmjoy in https://github.com/apache/skywalking-php/pull/5 Implementation, with curl support. By @jmjoy in https://github.com/apache/skywalking-php/pull/4 Turn off Swoole support, and fix Makefile. By @jmjoy in https://github.com/apache/skywalking-php/pull/6 Update docs by @heyanlong in https://github.com/apache/skywalking-php/pull/7 Add PECL support. By @jmjoy in https://github.com/apache/skywalking-php/pull/8 Support macOS by replace ipc-channel with socket pair, upgrade dependencies and improve CI. by @jmjoy in https://github.com/apache/skywalking-php/pull/9 Add compile and release docs. By @jmjoy in https://github.com/apache/skywalking-php/pull/10 Update official documentation link. By @jmjoy in https://github.com/apache/skywalking-php/pull/11  New Contributors  @heyanlong made their first contribution in https://github.com/apache/skywalking-php/pull/1 @jmjoy made their first contribution in https://github.com/apache/skywalking-php/pull/3  Full Changelog: https://github.com/apache/skywalking-php/commits/v0.1.0\nPECL https://pecl.php.net/package/skywalking_agent/0.1.0\n","excerpt":"SkyWalking PHP 0.1.0 is released. Go to downloads page to find release tars.\nWhat's Changed  [docs] …","ref":"/events/release-apache-skwaylking-php-0-1-0/","title":"Release Apache SkyWalking PHP 0.1.0"},{"body":"Yanlong He (GitHub: heyanlong) is a SkyWalking committer for years. He was working on skyapm-php for years to support the SkyWalking ecosystem. That PHP agent has significant contributions for SkyWalking\u0026rsquo;s users adoption in the PHP landscape. Yanlong keeps active in supporting and maintaining the project to help the community.\nJiemin Xia (GitHub: jmjoy) is a new committer voted in July 2022. He is super active in this year. He took over the maintaince capatbilify from Rei Shimizu, who is too busy in his daily work. He leads on the Rust SDK, and is also a release manager for the Rust SDK.\nRecently, both of them are working with Yanlong He to build a new skywalking PHP agent.\nWe are having our PHP agent v0.1.0 for the community.\nSkyWalking PHP Agent\nNotice, SkyAPM PHP is going to be archived and replaced by SkyWalking PHP agent according to its project maintainer, Yanlong He. Our community would work more closely forward the new PHP agent together.\nLet\u0026rsquo;s welcome and congrats to our 31st and 32nd PMC members, Yanlong He and Jiemin Xia. We are honored to have you.\n","excerpt":"Yanlong He (GitHub: heyanlong) is a SkyWalking committer for years. He was working on skyapm-php for …","ref":"/events/welcome-heyanlong-xiajiemin-join-the-pmc/","title":"Welcome Yanlong He and Jiemin Xia to join the PMC"},{"body":"Background This article will show how to use Apache SkyWalking with eBPF to make network troubleshooting easier in a service mesh environment.\nApache SkyWalking is an application performance monitor tool for distributed systems. It observes metrics, logs, traces, and events in the service mesh environment and uses that data to generate a dependency graph of your pods and services. This dependency graph can provide quick insights into your system, especially when there\u0026rsquo;s an issue.\nHowever, when troubleshooting network issues in SkyWalking\u0026rsquo;s service topology, it is not always easy to pinpoint where the error actually is. There are two reasons for the difficulty:\n Traffic through the Envoy sidecar is not easy to observe. Data from Envoy\u0026rsquo;s Access Log Service (ALS) shows traffic between services (sidecar-to-sidecar), but not metrics on communication between the Envoy sidecar and the service it proxies. Without that information, it is more difficult to understand the impact of the sidecar. There is a lack of data from transport layer (OSI Layer 4) communication. Since services generally use application layer (OSI Layer 7) protocols such as HTTP, observability data is generally restricted to application layer communication. However, the root cause may actually be in the transport layer, which is typically opaque to observability tools.  Access to metrics from Envoy-to-service and transport layer communication can make it easier to diagnose service issues. To this end, SkyWalking needs to collect and analyze transport layer metrics between processes inside Kubernetes pods - a task well suited to eBPF. We investigated using eBPF for this purpose and present our results and a demo below.\nMonitoring Kubernetes Networks with eBPF With its origins as the Extended Berkeley Packet Filter, eBPF is a general purpose mechanism for injecting and running your own code into the Linux kernel and is an excellent tool for monitoring network traffic in Kubernetes Pods. In the next few sections, we'll provide an overview of how to use eBPF for network monitoring as background for introducing Skywalking Rover, a metrics collector and profiler powered by eBPF to diagnose CPU and network performance.\nHow Applications and the Network Interact Interactions between the application and the network can generally be divided into the following steps from higher to lower levels of abstraction:\n User Code: Application code uses high-level network libraries in the application stack to exchange data across the network, like sending and receiving HTTP requests. Network Library: When the network library receives a network request, it interacts with the language API to send the network data. Language API: Each language provides an API for operating the network, system, etc. When a request is received, it interacts with the system API. In Linux, this API is called syscalls. Linux API: When the Linux kernel receives the request through the API, it communicates with the socket to send the data, which is usually closer to an OSI Layer 4 protocol, such as TCP, UDP, etc. Socket Ops: Sending or receiving the data to/from the NIC.  Our hypothesis is that eBPF can monitor the network. There are two ways to implement the interception: User space (uprobe) or Kernel space (kprobe). The table below summarizes the differences.\n    Pros Cons     uprobe •\tGet more application-related contexts, such as whether the current request is HTTP or HTTPS.•\tRequests and responses can be intercepted by a single method •\tData structures can be unstable, so it is more difficult to get the desired data.  •\tImplementation may differ between language/library versions.  •\tDoes not work in applications without symbol tables.   kprobe •\tAvailable for all languages.  •\tThe data structure and methods are stable and do not require much adaptation.  •\tEasier correlation with underlying data, such as getting the destination address of TCP, OSI Layer 4 protocol metrics, etc. •\tA single request and response may be split into multiple probes.  •\tContextual information is not easy to get for stateful requests. For example header compression in HTTP/2.    For the general network performance monitor, we chose to use the kprobe (intercept the syscalls) for the following reasons:\n It\u0026rsquo;s available for applications written in any programming language, and it\u0026rsquo;s stable, so it saves a lot of development/adaptation costs. It can be correlated with metrics from the system level, which makes it easier to troubleshoot. As a single request and response are split into multiple probes, we can use technology to correlate them. For contextual information, It\u0026rsquo;s usually used in OSI Layer 7 protocol network analysis. So, if we just monitor the network performance, then they can be ignored.  Kprobes and network monitoring Following the network syscalls of Linux documentation, we can implement network monitoring by intercepting two types of methods: socket operations and send/receive methods.\nSocket Operations When accepting or connecting with another socket, we can get the following information:\n Connection information: Includes the remote address from the connection which helps us to understand which pod is connected. Connection statics: Includes basic metrics from sockets, such as round-trip time (RTT), lost packet count in TCP, etc. Socket and file descriptor (FD) mapping: Includes the relationship between the Linux file descriptor and socket object. It is useful when sending and receiving data through a Linux file descriptor.  Send/Receive The interface related to sending or receiving data is the focus of performance analysis. It mainly contains the following parameters:\n Socket file descriptor: The file descriptor of the current operation corresponding to the socket. Buffer: The data sent or received, passed as a byte array.  Based on the above parameters, we can analyze the following data:\n Bytes: The size of the packet in bytes. Protocol: The protocol analysis according to the buffer data, such as HTTP, MySQL, etc. Execution Time: The time it takes to send/receive the data.  At this point (Figure 1) we can analyze the following steps for the whole lifecycle of the connection:\n Connect/Accept: When the connection is created. Transform: Sending and receiving data on the connection. Close: When the connection is closed.  Figure 1\nProtocol and TLS The previous section described how to analyze connections using send or receive buffer data. For example, following the HTTP/1.1 message specification to analyze the connection. However, this does not work for TLS requests/responses.\nFigure 2\nWhen TLS is in use, the Linux Kernel transmits data encrypted in user space. In the figure above, The application usually transmits SSL data through a third-party library (such as OpenSSL). For this case, the Linux API can only get the encrypted data, so it cannot recognize any higher layer protocol. To decrypt inside eBPF, we need to follow these steps:\n Read unencrypted data through uprobe: Compatible multiple languages, using uprobe to capture the data that is not encrypted before sending or after receiving. In this way, we can get the original data and associate it with the socket. Associate with socket: We can associate unencrypted data with the socket.  OpenSSL Use case For example, the most common way to send/receive SSL data is to use OpenSSL as a shared library, specifically the SSL_read and SSL_write methods to submit the buffer data with the socket.\nFollowing the documentation, we can intercept these two methods, which are almost identical to the API in Linux. The source code of the SSL structure in OpenSSL shows that the Socket FD exists in the BIO object of the SSL structure, and we can get it by the offset.\nIn summary, with knowledge of how OpenSSL works, we can read unencrypted data in an eBPF function.\nIntroducing SkyWalking Rover, an eBPF-based Metrics Collector and Profiler SkyWalking Rover introduces the eBPF network profiling feature into the SkyWalking ecosystem. It\u0026rsquo;s currently supported in a Kubernetes environment, so must be deployed inside a Kubernetes cluster. Once the deployment is complete, SkyWalking Rover can monitor the network for all processes inside a given Pod. Based on the monitoring data, SkyWalking can generate the topology relationship diagram and metrics between processes.\nTopology Diagram The topology diagram can help us understand the network access between processes inside the same Pod, and between the process and external environment (other Pod or service). Additionally, it can identify the data direction of traffic based on the line flow direction.\nIn Figure 3 below, all nodes within the hexagon are the internal process of a Pod, and nodes outside the hexagon are externally associated services or Pods. Nodes are connected by lines, which indicate the direction of requests or responses between nodes (client or server). The protocol is indicated on the line, and it\u0026rsquo;s either HTTP(S), TCP, or TCP(TLS). Also, we can see in this figure that the line between Envoy and Python applications is bidirectional because Envoy intercepts all application traffic.\nFigure 3\nMetrics Once we recognize the network call relationship between processes through the topology, we can select a specific line and view the TCP metrics between the two processes.\nThe diagram below (Figure 4) shows the metrics of network monitoring between two processes. There are four metrics in each line. Two on the left side are on the client side, and two on the right side are on the server side. If the remote process is not in the same Pod, only one side of the metrics is displayed.\nFigure 4\nThe following two metric types are available:\n Counter: Records the total number of data in a certain period. Each counter contains the following data: a. Count: Execution count. b. Bytes: Packet size in bytes. c. Execution time: Execution duration. Histogram: Records the distribution of data in the buckets.  Based on the above data types, the following metrics are exposed:\n   Name Type Unit Description     Write Counter and histogram Millisecond The socket write counter.   Read Counter and histogram Millisecond The socket read counter.   Write RTT Counter and histogram Microsecond The socket write round trip time (RTT) counter.   Connect Counter and histogram Millisecond The socket connect/accept with another server/client counter.   Close Counter and histogram Millisecond The socket with other socket counter.   Retransmit Counter Millisecond The socket retransmit package counter.   Drop Counter Millisecond The socket drop package counter.    Demo In this section, we demonstrate how to perform network profiling in the service mesh. To follow along, you will need a running Kubernetes environment.\nNOTE: All commands and scripts are available in this GitHub repository.\nInstall Istio Istio is the most widely deployed service mesh, and comes with a complete demo application that we can use for testing. To install Istio and the demo application, follow these steps:\n Install Istio using the demo configuration profile. Label the default namespace, so Istio automatically injects Envoy sidecar proxies when we\u0026rsquo;ll deploy the application. Deploy the bookinfo application to the cluster. Deploy the traffic generator to generate some traffic to the application.  export ISTIO_VERSION=1.13.1 # install istio istioctl install -y --set profile=demo kubectl label namespace default istio-injection=enabled # deploy the bookinfo applications kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/platform/kube/bookinfo.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/bookinfo-gateway.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/destination-rule-all.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/virtual-service-all-v1.yaml # generate traffic kubectl apply -f https://raw.githubusercontent.com/mrproliu/skywalking-network-profiling-demo/main/resources/traffic-generator.yaml Install SkyWalking The following will install the storage, backend, and UI needed for SkyWalking:\ngit clone https://github.com/apache/skywalking-kubernetes.git cd skywalking-kubernetes cd chart helm dep up skywalking helm -n istio-system install skywalking skywalking \\  --set fullnameOverride=skywalking \\  --set elasticsearch.minimumMasterNodes=1 \\  --set elasticsearch.imageTag=7.5.1 \\  --set oap.replicas=1 \\  --set ui.image.repository=apache/skywalking-ui \\  --set ui.image.tag=9.2.0 \\  --set oap.image.tag=9.2.0 \\  --set oap.envoy.als.enabled=true \\  --set oap.image.repository=apache/skywalking-oap-server \\  --set oap.storageType=elasticsearch \\  --set oap.env.SW_METER_ANALYZER_ACTIVE_FILES=\u0026#39;network-profiling\u0026#39; Install SkyWalking Rover SkyWalking Rover is deployed on every node in Kubernetes, and it automatically detects the services in the Kubernetes cluster. The network profiling feature has been released in the version 0.3.0 of SkyWalking Rover. When a network monitoring task is created, the SkyWalking rover sends the data to the SkyWalking backend.\nkubectl apply -f https://raw.githubusercontent.com/mrproliu/skywalking-network-profiling-demo/main/resources/skywalking-rover.yaml Start the Network Profiling Task Once all deployments are completed, we must create a network profiling task for a specific instance of the service in the SkyWalking UI.\nTo open SkyWalking UI, run:\nkubectl port-forward svc/skywalking-ui 8080:80 --namespace istio-system Currently, we can select the specific instances that we wish to monitor by clicking the Data Plane item in the Service Mesh panel and the Service item in the Kubernetes panel.\nIn the figure below, we have selected an instance with a list of tasks in the network profiling tab. When we click the start button, the SkyWalking Rover starts monitoring this instance\u0026rsquo;s network.\nFigure 5\nDone! After a few seconds, you will see the process topology appear on the right side of the page.\nFigure 6\nWhen you click on the line between processes, you can see the TCP metrics between the two processes.\nFigure 7\nConclusion In this article, we detailed a problem that makes troubleshooting service mesh architectures difficult: lack of context between layers in the network stack. These are the cases when eBPF begins to really help with debugging/productivity when existing service mesh/envoy cannot. Then, we researched how eBPF could be applied to common communication, such as TLS. Finally, we demo the implementation of this process with SkyWalking Rover.\nFor now, we have completed the performance analysis for OSI layer 4 (mostly TCP). In the future, we will also introduce the analysis for OSI layer 7 protocols like HTTP.\nGet Started with Istio To get started with service mesh today, Tetrate Istio Distro is the easiest way to install, manage, and upgrade Istio. It provides a vetted upstream distribution of Istio that\u0026rsquo;s tested and optimized for specific platforms by Tetrate plus a CLI that facilitates acquiring, installing, and configuring multiple Istio versions. Tetrate Istio Distro also offers FIPS certified Istio builds for FedRAMP environments.\nFor enterprises that need a unified and consistent way to secure and manage services and traditional workloads across complex, heterogeneous deployment environments, we offer Tetrate Service Bridge, our flagship edge-to-workload application connectivity platform built on Istio and Envoy.\nContact us to learn more.\nAdditional Resources  SkyWalking Github Repo SkyWalking Rover Github Repo SkyWalking Rover Documentation Pinpoint Service Mesh Critical Performance impact by using eBPF blog post Apache SkyWalking with Native eBPF Agent presentation eBPF hook overview  ","excerpt":"Background This article will show how to use Apache SkyWalking with eBPF to make network …","ref":"/blog/diagnose-service-mesh-network-performance-with-ebpf/","title":"Diagnose Service Mesh Network Performance with eBPF"},{"body":"","excerpt":"","ref":"/zh_tags/ebpf/","title":"eBPF"},{"body":"","excerpt":"","ref":"/zh_tags/performance/","title":"Performance"},{"body":"","excerpt":"","ref":"/tags/performance/","title":"Performance"},{"body":"本文将展示如何利用 Apache SkyWalking 与 eBPF,使服务网格下的网络故障排除更加容易。\nApache SkyWalking 是一个分布式系统的应用性能监控工具。它观察服务网格中的指标、日志、痕迹和事件,并使用这些数据来生成 pod 和服务的依赖图。这个依赖关系图可以帮助你快速系统,尤其是在出现问题的时候。\n然而,在排除 SkyWalking 服务拓扑中的网络问题时,确定错误的实际位置有时候并不容易。造成这种困难的原因有两个:\n 通过 Envoy sidecar 的流量并不容易观察:来自 Envoy 的访问日志服务(ALS)的数据显示了服务之间的流量(sidecar-to-sidecar),但没有关于 Envoy sidecar 和它代理的服务之间的通信指标。如果没有这些信息,就很难理解 sidecar 的影响。 缺乏来自传输层(OSI 第 4 层)通信的数据:由于服务通常使用应用层(OSI 第 7 层)协议,如 HTTP,可观测性数据通常被限制在应用层通信中。然而,根本原因可能实际上是在传输层,而传输层对可观测性工具来说通常是不透明的。  获取 Envoy-to-service 和传输层通信的指标,可以更容易诊断服务问题。为此,SkyWalking 需要收集和分析 Kubernetes pod 内进程之间的传输层指标 —— 这项任务很适合 eBPF。我们调查了为此目的使用 eBPF 的情况,并在下面介绍了我们的结果和演示。\n用 eBPF 监控 Kubernetes 网络 eBPF 起源于 Extended Berkeley Packet Filter,是一种通用的机制,可以在 Linux 内核中注入和运行自己的代码,是监测 Kubernetes Pod 中网络流量的优秀工具。在接下来的几节中,我们将概述如何使用 eBPF 进行网络监控,作为介绍 Skywalking Rover 的背景,这是一个由 eBPF 驱动的指标收集器和分析器,用于诊断 CPU 和网络性能。\n应用程序和网络如何相互作用 应用程序和网络之间的互动一般可分为以下步骤,从较高的抽象层次到较低的抽象层次:\n 用户代码:应用程序代码使用应用程序堆栈中的高级网络库,在网络上交换数据,如发送和接收 HTTP 请求。 网络库:当网络库收到网络请求时,它与语言 API 进行交互以发送网络数据。 语言 API:每种语言都提供了一个操作网络、系统等的 API。当收到一个请求时,它与系统的 API 进行交互。在 Linux 中,这个 API 被称为系统调用(syscalls)。 Linux API:当 Linux 内核通过 API 收到请求时,它与套接字进行通信以发送数据,这通常更接近于 OSI 第四层协议,如 TCP、UDP 等。 Socket Ops:向 / 从网卡发送或接收数据。  我们的假设是,eBPF 可以监控网络。有两种方法可以实现拦截:用户空间(uprobe)或内核空间(kprobe)。下表总结了两者的区别。\n   方式 优点 缺点     uprobe • 获取更多与应用相关的上下文,例如当前请求是 HTTP 还是 HTTPS。 • 请求和响应可以通过一个方法来截获。 • 数据结构可能是不稳定的,所以更难获得所需的数据。 • 不同语言/库版本的实现可能不同。 • 在没有符号表的应用程序中不起作用。   kprobe • 可用于所有语言。 • 数据结构和方法很稳定,不需要太多调整。 • 更容易与底层数据相关联,如获得 TCP 的目标地址、OSI 第四层协议指标等。 • 一个单一的请求和响应可能被分割成多个 probe。 • 对于有状态的请求,上下文信息不容易得到。例如 HTTP/2 中的头压缩。    对于一般的网络性能监控,我们选择使用 kprobe(拦截系统调用),原因如下:\n 它可用于用任何编程语言编写的应用程序,而且很稳定,所以可以节省大量的开发 / 适应成本。 它可以与系统层面的指标相关联,这使得故障排除更加容易。 由于一个请求和响应被分割成多个 probe,我们可以利用技术将它们关联起来。 对于背景信息,它通常用于 OSI 第七层协议网络分析。因此,如果我们只是监测网络性能,那么它们可以被忽略。  Kprobes 和网络监控 按照 Linux 文档中的网络系统调用,我们可以通过两类拦截方法实现网络监控:套接字操作和发送 / 接收方法。\n套接字操作 当接受或与另一个套接字连接时,我们可以得到以下信息:\n 连接信息:包括来自连接的远程地址,这有助于我们了解哪个 pod 被连接。 连接统计 :包括来自套接字的基本指标,如往返时间(RTT)、TCP 的丢包数等。 套接字和文件描述符(FD)的映射:包括 Linux 文件描述符和套接字对象之间的关系。在通过 Linux 文件描述符发送和接收数据时,它很有用。  发送 / 接收 与发送或接收数据有关的接口是性能分析的重点。它主要包含以下参数:\n Socket 文件描述符:当前操作对应的套接字的文件描述符。 缓冲区:发送或接收的数据,以字节数组形式传递。  基于上述参数,我们可以分析以下数据:\n 字节:数据包的大小,以字节为单位。 协议:根据缓冲区的数据进行协议分析,如 HTTP、MySQL 等。 执行时间:发送 / 接收数据所需的时间。  在这一点上(图 1),我们可以分析出连接的整个生命周期的以下步骤:\n 连接 / 接受:当连接被创建时。 转化:在连接上发送和接收数据。 关闭:当连接被关闭时。  图 1\n协议和 TLS 上一节描述了如何使用发送或接收缓冲区数据来分析连接。例如,遵循 HTTP/1.1 消息规范来分析连接。然而,这对 TLS 请求 / 响应不起作用。\n图 2\n当使用 TLS 时,Linux 内核在用户空间中传输加密的数据。在上图中,应用程序通常通过第三方库(如 OpenSSL)传输 SSL 数据。对于这种情况,Linux API 只能得到加密的数据,所以它不能识别任何高层协议。为了在 eBPF 内部解密,我们需要遵循以下步骤:\n 通过 uprobe 读取未加密的数据:兼容多种语言,使用 uprobe 来捕获发送前或接收后没有加密的数据。通过这种方式,我们可以获得原始数据并将其与套接字联系起来。 与套接字关联:我们可以将未加密的数据与套接字关联。  OpenSSL 用例 例如,发送 / 接收 SSL 数据最常见的方法是使用 OpenSSL 作为共享库,特别是 SSL_read 和 SSL_write 方法,以提交缓冲区数据与套接字。\n按照文档,我们可以截获这两种方法,这与 Linux 中的 API 几乎相同。OpenSSL 中 SSL 结构的源代码显示, Socket FD 存在于 SSL 结构的 BIO 对象中,我们可以通过 offset 得到它。\n综上所述,通过对 OpenSSL 工作原理的了解,我们可以在一个 eBPF 函数中读取未加密的数据。\nSkyWalking Rover—— 基于 eBPF 的指标收集器和分析器 SkyWalking Rover 在 SkyWalking 生态系统中引入了 eBPF 网络分析功能。目前已在 Kubernetes 环境中得到支持,所以必须在 Kubernetes 集群内部署。部署完成后,SkyWalking Rover 可以监控特定 Pod 内所有进程的网络。基于监测数据,SkyWalking 可以生成进程之间的拓扑关系图和指标。\n拓扑结构图 拓扑图可以帮助我们了解同一 Pod 内的进程之间以及进程与外部环境(其他 Pod 或服务)之间的网络访问情况。此外,它还可以根据线路的流动方向来确定流量的数据方向。\n在下面的图 3 中,六边形内的所有节点都是一个 Pod 的内部进程,六边形外的节点是外部关联的服务或 Pod。节点由线连接,表示节点之间的请求或响应方向(客户端或服务器)。线条上标明了协议,它是 HTTP (S)、TCP 或 TCP (TLS)。另外,我们可以在这个图中看到,Envoy 和 Python 应用程序之间的线是双向的,因为 Envoy 拦截了所有的应用程序流量。\n图 3\n度量 一旦我们通过拓扑结构认识到进程之间的网络调用关系,我们就可以选择一个特定的线路,查看两个进程之间的 TCP 指标。\n下图(图4)显示了两个进程之间网络监控的指标。每行有四个指标。左边的两个是在客户端,右边的两个是在服务器端。如果远程进程不在同一个 Pod 中,则只显示一边的指标。\n图 4\n有以下两种度量类型。\n 计数器(Counter):记录一定时期内的数据总数。每个计数器包含以下数据。  计数:执行次数。 字节:数据包大小,以字节为单位。 执行时间:执行时间。   柱状图(Histogram):记录数据在桶中的分布。  基于上述数据类型,暴露了以下指标:\n   名称 类型 单位 描述     Write 计数器和柱状图 毫秒 套接字写计数器。   Read 计数器和柱状图 毫秒 套接字读计数器。   Write RTT 计数器和柱状图 微秒 套接字写入往返时间(RTT)计数器。   Connect 计数器和柱状图 毫秒 套接字连接/接受另一个服务器/客户端的计数器。   Close 计数器和柱状图 毫秒 有其他套接字的计数器。   Retransmit 计数器 毫秒 套接字重发包计数器   Drop 计数器 毫秒 套接字掉包计数器。    演示 在本节中,我们将演示如何在服务网格中执行网络分析。要跟上进度,你需要一个正在运行的 Kubernetes 环境。\n注意:所有的命令和脚本都可以在这个 GitHub 资源库中找到。\n安装 Istio Istio是最广泛部署的服务网格,并附带一个完整的演示应用程序,我们可以用来测试。要安装 Istio 和演示应用程序,请遵循以下步骤:\n 使用演示配置文件安装 Istio。 标记 default 命名空间,所以当我们要部署应用程序时,Istio 会自动注入 Envoy 的 sidecar 代理。 将 bookinfo 应用程序部署到集群上。 部署流量生成器,为应用程序生成一些流量。  export ISTIO_VERSION=1.13.1 # 安装 istio istioctl install -y --set profile=demo kubectl label namespace default istio-injection=enabled # 部署 bookinfo 应用程序 kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/platform/kube/bookinfo.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/bookinfo-gateway.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/destination-rule-all.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/virtual-service-all-v1.yaml # 产生流量 kubectl apply -f https://raw.githubusercontent.com/mrproliu/skywalking-network-profiling-demo/main/resources/traffic-generator.yaml 安装 SkyWalking 下面将安装 SkyWalking 所需的存储、后台和用户界面。\ngit clone https://github.com/apache/skywalking-kubernetes.git cd skywalking-kubernetes cd chart helm dep up skywalking helm -n istio-system install skywalking skywalking \\  --set fullnameOverride=skywalking \\  --set elasticsearch.minimumMasterNodes=1 \\  --set elasticsearch.imageTag=7.5.1 \\  --set oap.replicas=1 \\  --set ui.image.repository=apache/skywalking-ui \\  --set ui.image.tag=9.2.0 \\  --set oap.image.tag=9.2.0 \\  --set oap.envoy.als.enabled=true \\  --set oap.image.repository=apache/skywalking-oap-server \\  --set oap.storageType=elasticsearch \\  --set oap.env.SW_METER_ANALYZER_ACTIVE_FILES=\u0026#39;network-profiling\u0026#39; 安装 SkyWalking Rover SkyWalking Rover 部署在 Kubernetes 的每个节点上,它自动检测 Kubernetes 集群中的服务。网络剖析功能已经在 SkyWalking Rover 的 0.3.0 版本中发布。当网络监控任务被创建时,SkyWalking Rover 会将数据发送到 SkyWalking 后台。\nkubectl apply -f https://raw.githubusercontent.com/mrproliu/skywalking-network-profiling-demo/main/resources/skywalking-rover.yaml 启动网络分析任务 一旦所有部署完成,我们必须在 SkyWalking UI 中为服务的特定实例创建一个网络分析任务。\n要打开 SkyWalking UI,请运行:\nkubectl port-forward svc/skywalking-ui 8080:80 --namespace istio-system 目前,我们可以通过点击服务网格面板中的数据平面项目和 Kubernetes 面板中的服务项目来选择我们想要监控的特定实例。\n在下图中,我们选择了一个实例,在网络剖析标签里有一个任务列表。当我们点击启动按钮时,SkyWalking Rover 开始监测这个实例的网络。\n图 5\n完成 几秒钟后,你会看到页面的右侧出现进程拓扑结构。\n图 6\n当你点击进程之间的线时,你可以看到两个进程之间的 TCP 指标。\n图 7\n总结 在这篇文章中,我们详细介绍了一个使服务网格故障排除困难的问题:网络堆栈中各层之间缺乏上下文。这些情况下,当现有的服务网格 /envoy 不能时,eBPF 开始真正帮助调试 / 生产。然后,我们研究了如何将 eBPF 应用于普通的通信,如 TLS。最后,我们用 SkyWalking Rover 演示了这个过程的实现。\n目前,我们已经完成了对 OSI 第四层(主要是 TCP)的性能分析。在未来,我们还将介绍对 OSI 第 7 层协议的分析,如 HTTP。\n开始使用 Istio 开始使用服务网格,Tetrate Istio Distro 是安装、管理和升级 Istio 的最简单方法。它提供了一个经过审查的 Istio 上游发布,由 Tetrate 为特定平台进行测试和优化,加上一个 CLI,方便获取、安装和配置多个 Istio 版本。Tetrate Istio Distro 还为 FedRAMP 环境提供 FIPS 认证的 Istio 构建。\n对于需要以统一和一致的方式在复杂的异构部署环境中保护和管理服务和传统工作负载的企业,我们提供 Tetrate Service Bridge,这是我们建立在 Istio 和 Envoy 上的旗舰工作负载应用连接平台。\n联系我们以了解更多。\n其他资源  SkyWalking Github Repo SkyWalking Rover Github Repo SkyWalking Rover 文件 通过使用 eBPF 博文准确定位服务网格关键性能影响 Apache SkyWalking 与本地 eBPF 代理的介绍 eBPF hook概述  ","excerpt":"本文将展示如何利用 Apache SkyWalking 与 eBPF,使服务网格下的网络故障排除更加容易。\nApache SkyWalking 是一个分布式系统的应用性能监控工具。它观察服务网格中的指 …","ref":"/zh/diagnose-service-mesh-network-performance-with-ebpf/","title":"使用 eBPF 诊断服务网格网络性能"},{"body":"SkyWalking CLI 0.11.0 is released. Go to downloads page to find release tars.\n Add .github/scripts to release source tarball by @kezhenxu94 in https://github.com/apache/skywalking-cli/pull/140 Let the eBPF profiling could performs by service level by @mrproliu in https://github.com/apache/skywalking-cli/pull/141 Add the sub-command for estimate the process scale by @mrproliu in https://github.com/apache/skywalking-cli/pull/142 feature: update install.sh version regex by @Alexxxing in https://github.com/apache/skywalking-cli/pull/143 Update the commands relate to the process by @mrproliu in https://github.com/apache/skywalking-cli/pull/144 Add layer to event related commands by @fgksgf in https://github.com/apache/skywalking-cli/pull/145 Add layer to events.graphql by @fgksgf in https://github.com/apache/skywalking-cli/pull/146 Add layer field to alarms.graphql by @fgksgf in https://github.com/apache/skywalking-cli/pull/147 Upgrade crypto lib to fix cve by @kezhenxu94 in https://github.com/apache/skywalking-cli/pull/148 Remove layer field in the instance and process commands by @mrproliu in https://github.com/apache/skywalking-cli/pull/149 Remove duration flag in profiling ebpf schedules by @mrproliu in https://github.com/apache/skywalking-cli/pull/150 Remove total field in trace list and logs list commands by @mrproliu in https://github.com/apache/skywalking-cli/pull/152 Remove total field in event list, browser logs, alarm list commands. by @mrproliu in https://github.com/apache/skywalking-cli/pull/153 Add aggregate flag in profiling ebpf analysis commands by @mrproliu in https://github.com/apache/skywalking-cli/pull/154 event: fix event query should query all types by default by @kezhenxu94 in https://github.com/apache/skywalking-cli/pull/155 Fix a possible lint error and update CI lint version by @JarvisG495 in https://github.com/apache/skywalking-cli/pull/156 Add commands for support network profiling by @mrproliu in https://github.com/apache/skywalking-cli/pull/158 Add the components field in the process relation by @mrproliu in https://github.com/apache/skywalking-cli/pull/159 Trim license headers in query string by @kezhenxu94 in https://github.com/apache/skywalking-cli/pull/160 Bump up dependency swck version to fix CVE by @kezhenxu94 in https://github.com/apache/skywalking-cli/pull/161 Bump up swck dependency for transitive dep upgrade by @kezhenxu94 in https://github.com/apache/skywalking-cli/pull/162 Add the sub-commands for query sorted metrics/records by @mrproliu in https://github.com/apache/skywalking-cli/pull/163 Add compatibility documentation by @mrproliu in https://github.com/apache/skywalking-cli/pull/164 Overhaul licenses, prepare for 0.11.0 by @kezhenxu94 in https://github.com/apache/skywalking-cli/pull/165  ","excerpt":"SkyWalking CLI 0.11.0 is released. Go to downloads page to find release tars.\n Add .github/scripts …","ref":"/events/release-apache-skywalking-cli-0-11-0/","title":"Release Apache SkyWalking CLI 0.11.0"},{"body":"SkyWalking Kubernetes Helm Chart 4.3.0 is released. Go to downloads page to find release tars.\n Fix hasSuffix replace hasPrefix by @geffzhang in https://github.com/apache/skywalking-kubernetes/pull/86 Add \u0026ldquo;pods/log\u0026rdquo; permission to OAP so on-demand Pod log can work by @kezhenxu94 in https://github.com/apache/skywalking-kubernetes/pull/87 add .Values.oap.initEs to work with ES initial by @williamyao1982 in https://github.com/apache/skywalking-kubernetes/pull/88 Remove Istio adapter, add changelog for 4.3.0 by @kezhenxu94 in https://github.com/apache/skywalking-kubernetes/pull/89 Bump up helm chart version by @kezhenxu94 in https://github.com/apache/skywalking-kubernetes/pull/90  ","excerpt":"SkyWalking Kubernetes Helm Chart 4.3.0 is released. Go to downloads page to find release tars.\n Fix …","ref":"/events/release-apache-skywalking-kubernetes-helm-chart-4.3.0/","title":"Release Apache SkyWalking Kubernetes Helm Chart 4.3.0"},{"body":"SkyWalking Cloud on Kubernetes 0.7.0 is released. Go to downloads page to find release tars.\nFeatures  Replace go-bindata with embed lib. Add the OAPServerConfig CRD, webhooks and controller. Add the OAPServerDynamicConfig CRD, webhooks and controller. Add the SwAgent CRD, webhooks and controller. [Breaking Change] Remove the way to configure the agent through Configmap.  Bugs  Fix the error in e2e testing. Fix status inconsistent with CI. Bump up prometheus client version to fix cve.  Chores  Bump several dependencies of adapter. Update license eye version. Bump up SkyWalking OAP to 9.0.0. Bump up the k8s api of the e2e environment to v1.21.10.  ","excerpt":"SkyWalking Cloud on Kubernetes 0.7.0 is released. Go to downloads page to find release tars. …","ref":"/events/release-apache-skywalking-cloud-on-kubernetes-0-7-0/","title":"Release Apache SkyWalking Cloud on Kubernetes 0.7.0"},{"body":"SkyWalking Rover 0.3.0 is released. Go to downloads page to find release tars.\nFeatures  Support NETWORK Profiling. Let the logger as a configurable module. Support analyze the data of OpenSSL, BoringSSL library, GoTLS, NodeTLS in NETWORK Profiling. Enhancing the kubernetes process finder.  Bug Fixes  Fixed reading process paths incorrect when running as a container. Fix the crash caused by multiple profiling tasks.  Issues and PR  All issues are here All and pull requests are here  ","excerpt":"SkyWalking Rover 0.3.0 is released. Go to downloads page to find release tars.\nFeatures  Support …","ref":"/events/release-apache-skwaylking-rover-0-3-0/","title":"Release Apache SkyWalking Rover 0.3.0"},{"body":"SkyWalking Java Agent 8.12.0 is released. Go to downloads page to find release tars. Changes by Version\n8.12.0  Fix Shenyu plugin\u0026rsquo;s NPE in reading trace ID when IgnoredTracerContext is used in the context. Update witness class in elasticsearch-6.x-plugin, avoid throw NPE. Fix onHalfClose using span operation name /Request/onComplete instead of the wrong name /Request/onHalfClose. Add plugin to support RESTeasy 4.x. Add plugin to support hutool-http 5.x. Add plugin to support Tomcat 10.x. Save http status code regardless of it\u0026rsquo;s status. Upgrade byte-buddy to 1.12.13, and adopt byte-buddy APIs changes. Upgrade gson to 2.8.9. Upgrade netty-codec-http2 to 4.1.79.Final. Fix race condition causing agent to not reconnect after network error Force the injected high-priority classes in order to avoid NoClassDefFoundError. Plugin to support xxl-job 2.3.x. Add plugin to support Micronaut(HTTP Client/Server) 3.2.x-3.6.x Add plugin to support NATS Java client 2.14.x-2.15.x Remove inappropriate dependency from elasticsearch-7.x-plugin Upgrade jedis plugin to support 3.x(stream),4.x  Documentation  Add a section in Bootstrap-plugins doc, introducing HttpURLConnection Plugin compatibility. Update Plugin automatic test framework, fix inconsistent description about configuration.yml. Update Plugin automatic test framework, add expected data format of the log items.  All issues and pull requests are here\n","excerpt":"SkyWalking Java Agent 8.12.0 is released. Go to downloads page to find release tars. Changes by …","ref":"/events/release-apache-skywalking-java-agent-8-12-0/","title":"Release Apache SkyWalking Java Agent 8.12.0"},{"body":"This is an official annoucement from SkyWalking team.\nSkyWalking backend server and UI released significant 9.2.0 at Sep. 2nd, 2022. With the new added Layer concept, the ebpf agent, wider middleware server monitoring(Such as MySQL and PostgreSQL servers) powered by OpenTelemetry ecosystem, SkyWalking v9 has been much more powerful than the last v8 version(8.9.1).\nFrom now, we have resolved all found critical bugs since 9.0.0 release which could block the v8 users to upgrade. v9 releases also provide the as same compatibility as the 8.9.1 release. So, end users would not have a block when they apply to upgrade. (We don\u0026rsquo;t provide storage structure compatibility as usually, users should use an empty database to initialize for a new version.)\nAnd more importantly, we are confident that, v9 could provide a stable and higher performance APM in the product environment.\nThe 8.9.1 release was released at Dec., 2021. Since then, there is no one contributed any code, and there is no committer requested to begin a new iteration or plan to run a patch release. From the project management committee perspective, the 8.x had became inactive.\nWe are going to wait for another 3 month to official end 8.x series' life.\nNotice, this could be changed if there are at least 3 committers supporting to work on further 8.x releases officially, and provide a release plan.\n","excerpt":"This is an official annoucement from SkyWalking team.\nSkyWalking backend server and UI released …","ref":"/events/deprecate-v8/","title":"Plan to End-of-life(EOL) all v8 releases in Nov. 2022"},{"body":"SkyWalking 9.2.0 is released. Go to downloads page to find release tars.\neBPF Network Profiling for K8s Pod Event and Metrics Association MySQL Server Monitoring PostgreSQL Server Monitoring Project  [Critical] Fix a low performance issue of metrics persistent in the ElasticSearch storage implementation. One single metric could have to wait for an unnecessary 7~10s(System Env Variable SW_STORAGE_ES_FLUSH_INTERVAL) since 8.8.0 - 9.1.0 releases. Upgrade Armeria to 1.16.0, Kubernetes Java client to 15.0.1.  OAP Server  Add more entities for Zipkin to improve performance. ElasticSearch: scroll id should be updated when scrolling as it may change. Mesh: fix only last rule works when multiple rules are defined in metadata-service-mapping.yaml. Support sending alarm messages to PagerDuty. Support Zipkin kafka collector. Add VIRTUAL detect type to Process for Network Profiling. Add component ID(128) for Java Hutool plugin. Add Zipkin query exception handler, response error message for illegal arguments. Fix a NullPointerException in the endpoint analysis, which would cause missing MQ-related LocalSpan in the trace. Add forEach, processRelation function to MAL expression. Add expPrefix, initExp in MAL config. Add component ID(7015) for Python Bottle plugin. Remove legacy OAL percentile functions, p99, p95, p90, p75, p50 func(s). Revert #8066. Keep all metrics persistent even it is default value. Skip loading UI templates if folder is empty or doesn\u0026rsquo;t exist. Optimize ElasticSearch query performance by using _mGet and physical index name rather than alias in these scenarios, (a) Metrics aggregation (b) Zipkin query (c) Metrics query (d) Log query Support the NETWORK type of eBPF Profiling task. Support sumHistogram in MAL. [Breaking Change] Make the eBPF Profiling task support to the service instance level, index/table ebpf_profiling_task is required to be re-created when bump up from previous releases. Fix race condition in Banyandb storage Support SUM_PER_MIN downsampling in MAL. Support sumHistogramPercentile in MAL. Add VIRTUAL_CACHE to Layer, to fix conjectured Redis server, which icon can\u0026rsquo;t show on the topology. [Breaking Change] Elasticsearch storage merge all metrics/meter and records(without super datasets) indices into one physical index template metrics-all and records-all on the default setting. Provide system environment variable(SW_STORAGE_ES_LOGIC_SHARDING) to shard metrics/meter indices into multi-physical indices as the previous versions(one index template per metric/meter aggregation function). In the current one index mode, users still could choose to adjust ElasticSearch\u0026rsquo;s shard number(SW_STORAGE_ES_INDEX_SHARDS_NUMBER) to scale out. More details please refer to New ElasticSearch storage option explanation in 9.2.0 and backend-storage.md [Breaking Change] Index/table ebpf_profiling_schedule added a new column ebpf_profiling_schedule_id, the H2/Mysql/Tidb/Postgres storage users are required to re-created it when bump up from previous releases. Fix Zipkin trace query the max size of spans. Add tls and https component IDs for Network Profiling. Support Elasticsearch column alias for the compatibility between storage logicSharding model and no-logicSharding model. Support MySQL monitoring. Support PostgreSQL monitoring. Fix query services by serviceId error when Elasticsearch storage SW_STORAGE_ES_QUERY_MAX_SIZE \u0026gt; 10000. Support sending alarm messages to Discord. Fix query history process data failure. Optimize TTL mechanism for Elasticsearch storage, skip executed indices in one TTL rotation. Add Kubernetes support module to share codes between modules and reduce calls to Kubernetes API server. Bump up Kubernetes Java client to fix cve. Adapt OpenTelemetry native metrics protocol. [Breaking Change] rename configuration folder from otel-oc-rules to otel-rules. [Breaking Change] rename configuration field from enabledOcRules to enabledOtelRules and environment variable name from SW_OTEL_RECEIVER_ENABLED_OC_RULES to SW_OTEL_RECEIVER_ENABLED_OTEL_RULES. [Breaking Change] Fix JDBC TTL to delete additional tables data. SQL Database requires removing segment,segment_tag, logs, logs_tag, alarms, alarms_tag, zipkin_span, zipkin_query before OAP starts. SQL Database: add @SQLDatabase.ExtraColumn4AdditionalEntity to support add an extra column from parent to an additional table. Add component ID(131) for Java Micronaut plugin Add component ID(132) for Nats java client plugin  UI  Fix query conditions for the browser logs. Implement a URL parameter to activate tab index. Fix clear interval fail when switch autoRefresh to off. Optimize log tables. Fix log detail pop-up page doesn\u0026rsquo;t work. Optimize table widget to hide the whole metric column when no metric is set. Implement the Event widget. Remove event menu. Fix span detail text overlap. Add Python Bottle Plugin Logo. Implement an association between widgets(line, bar, area graphs) with time. Fix tag dropdown style. Hide the copy button when db.statement is empty. Fix legend metrics for topology. Dashboard: Add metrics association. Dashboard: Fix FaaS-Root document link and topology service relation dashboard link. Dashboard: Fix Mesh-Instance metric Throughput. Dashboard: Fix Mesh-Service-Relation metric Throughput and Proxy Sidecar Internal Latency in Nanoseconds (Client Response). Dashboard: Fix Mesh-Instance-Relation metric Throughput. Enhance associations for the Event widget. Add event widgets in dashboard where applicable. Fix dashboard list search box not work. Fix short time range. Fix event widget incompatibility in Safari. Refactor the tags component to support searching for tag keys and values. Implement the log widget and the trace widget associate with each other, remove log tables on the trace widget. Add log widget to general service root. Associate the event widget with the trace and log widget. Add the MySQL layer and update layer routers. Fix query order for trace list. Add a calculation to convert seconds to days. q* Add Spring Sleuth dashboard to general service instance. Support the process dashboard and create the time range text widget. Fix picking calendar with a wrong time range and setting a unique value for dashboard grid key. Add PostgreSQL to Database sub-menu. Implement the network profiling widget. Add Micronaut icon for Java plugin. Add Nats icon for Java plugin. Bump moment and @vue/cli-plugin-e2e-cypress. Add Network Profiling for Service Mesh DP instance and K8s pod panels.  Documentation  Fix invalid links in release docs. Clean up doc about event metrics. Add a table for metric calculations in the UI doc. Add an explanation for alerting kernel and its in-memory window mechanism. Add more docs for widget details. Update alarm doc introduce configuration property key Fix dependency license\u0026rsquo;s NOTICE and binary jar included issues in the source release. Add eBPF CPU profiling doc.  All issues and pull requests are here\n","excerpt":"SkyWalking 9.2.0 is released. Go to downloads page to find release tars.\neBPF Network Profiling for …","ref":"/events/release-apache-skywalking-apm-9.2.0/","title":"Release Apache SkyWalking APM 9.2.0"},{"body":"SkyWalking Rust 0.4.0 is released. Go to downloads page to find release tars.\nWhat\u0026rsquo;s Changed  Publish release doc. by @wu-sheng in https://github.com/apache/skywalking-rust/pull/31 Set up CI and approval requirements by @wu-sheng in https://github.com/apache/skywalking-rust/pull/32 Move skywalking_proto mod to single files. by @jmjoy in https://github.com/apache/skywalking-rust/pull/33 Polish the release doc. by @wu-sheng in https://github.com/apache/skywalking-rust/pull/34 Add serde support for protobuf generated struct. by @jmjoy in https://github.com/apache/skywalking-rust/pull/35 Improve LogReporter and fix tests. by @jmjoy in https://github.com/apache/skywalking-rust/pull/36 Split tracer inner segment sender and receiver into traits. by @jmjoy in https://github.com/apache/skywalking-rust/pull/37 Switch to use nightly rustfmt. by @jmjoy in https://github.com/apache/skywalking-rust/pull/38 Change Span to refer to SpanStack, rather than TracingContext. by @jmjoy in https://github.com/apache/skywalking-rust/pull/39 Adjust the trace structure. by @jmjoy in https://github.com/apache/skywalking-rust/pull/40 Add logging. by @jmjoy in https://github.com/apache/skywalking-rust/pull/41 Upgrade dependencies. by @jmjoy in https://github.com/apache/skywalking-rust/pull/42 Add feature vendored, to auto build protoc. by @jmjoy in https://github.com/apache/skywalking-rust/pull/43 Add metrics. by @jmjoy in https://github.com/apache/skywalking-rust/pull/44 Add more GH labels as new supports by @wu-sheng in https://github.com/apache/skywalking-rust/pull/45 Bump to 0.4.0. by @jmjoy in https://github.com/apache/skywalking-rust/pull/46 Fix trace id is not transmitted. by @jmjoy in https://github.com/apache/skywalking-rust/pull/47  ","excerpt":"SkyWalking Rust 0.4.0 is released. Go to downloads page to find release tars.\nWhat\u0026rsquo;s Changed …","ref":"/events/release-apache-skywalking-rust-0-4-0/","title":"Release Apache SkyWalking Rust 0.4.0"},{"body":"目录  开篇 为什么需要全链路监控 为什么选择SkyWalking 预研 POC 优化 未来  1、开篇 自从SkyWalking开始在公司推广,时不时会在排查问题的人群中听到这样的话:“你咋还没接SkyWalking?接入后,一眼就看出是哪儿的问题了\u0026hellip;\u0026quot;,正如同事所说的,在许多情况下,SkyWalking就是这么秀。作为实践者,我非常感谢SkyWalking,因为这款国产全链路监控产品给公司的的伙伴们带来了实实在在的帮助;也特别感谢公司的领导和同事们,正因为他们的支持和帮助,才让这套SkyWalking(V8.5.0)系统从起初的有用进化到现在的好用;从几十亿的Segment储能上限、几十秒的查询耗时,优化到千亿级的Segment储能、毫秒级的查询耗时。\n小提示:\n SkyWalking迭代速度很快,公司使用的是8.5.0版本,其新版本的性能肯定有改善。 Segment是SkyWalking中提出的概念,表示一次请求在某个服务内的执行链路片段的合集,一个请求在多个服务中先后产生的Segment串起来构成一个完整的Trace,如下图所示:  SkyWalking的这次实践,截止到现在有一年多的时间,回顾总结一下这段历程中的些许积累和收获,愿能反哺社区,给有需求的道友提供个案例借鉴;也希望能收获到专家们的指导建议,把项目做得更好。因为安全约束,要把有些内容和谐掉,但也努力把这段历程中那些**靓丽的风景,**尽可能完整的呈现给大家。\n2、为什么需要全链路监控 随着微服务架构的演进,单体应用按照服务维度进行拆分,组织架构也随之演进以横向、纵向维度拆分;一个业务请求的执行轨迹,也从单体应用时期一个应用实例内一个接口,变成多个服务实例的多个接口;对应到组织架构,可能跨越多个BU、多个Owner。虽然微服务架构高内聚低耦合的优势是不言而喻的,但是低耦合也有明显的副作用,它在现实中给跨部门沟通、协作带来额外的不可控的开销;因此开发者尤其是终端业务侧的架构师、管理者,特别需要一些可以帮助理解系统拓扑和用于分析性能问题的工具,便于在架构调整、性能检测和发生故障时,缩减沟通协作方面的精力和时间耗费,快速定位并解决问题。\n我所在的平安健康互联网股份有限公司(文中简称公司),是微服务架构的深度实践者。公司用互联网技术搭建医疗服务平台,致力于构筑专业的医患桥梁,提供专业、全面、高品质、一站式企业健康管理服务。为了进一步提高系统服务质量、提升问题响应效率,部门在21年结合自身的一些情况,决定对现行的全链路监控系统进行升级,目的与以下网络中常见的描述基本一致:\n 快速发现问题 判断故障影响范围 梳理服务依赖并判断依赖的合理性 分析链路性能并实施容量规划  3、为什么选择SkyWalking 在做技术选型时,网络中搜集的资料显示,谷歌的 Dapper系统,算是链路追踪领域的始祖。受其公开论文中提出的概念和理念的影响,一些优秀的企业、个人先后做出不少非常nice的产品,有些还在社区开源共建,如:韩国的Pinpoint,Twitter的Zipkin,Uber的Jaeger及中国的SkyWalking 等,我司选型立项的过程中综合考虑的因素较多,这里只归纳一下SkyWalking吸引我们的2个优势:\n  产品的完善度高:\n java生态,功能丰富 社区活跃,迭代迅速    链路追踪、拓扑分析的能力强:\n 插件丰富,探针无侵入。 采用先进的流式拓扑分析设计    “好东西不需要多说,实际行动告诉你“,这句话我个人非常喜欢,关于SkyWalking的众多的优点,网络上可以找到很多,此处先不逐一比较、赘述了。\n4、预研 当时最新版本8.5.0,梳理分析8.x的发布记录后,评估此版本的核心功能是蛮稳定的,于是基于此版本开始了SkyWalking的探索之旅。当时的认知是有限的,串行思维模型驱使我将关注的问题聚焦在架构原理是怎样、有什么副作用这2个方面:\n  架构和原理:\n agent端 主要关注 Java Agent的机制、SkyWalking Agent端的配置、插件的工作机制、数据采集及上报的机制。 服务端 主要关注 角色和职责、模块和配置、数据接收的机制、指标构建的机制、指标聚合的机制及指标存储的机制。 存储端 主要关注 数据量,存储架构要求以及资源评估。    副作用:\n 功能干扰 性能损耗    4.1 架构和原理 SkyWalking社区很棒,官网文档和官方出版的书籍有较系统化的讲解,因为自己在APM系统以及Java Agent方面有一些相关的经验沉淀,通过在这两个渠道的学习,对Agent端和OAP(服务端)很快便有了较系统化的认知。在做系统架构选型时,评估数据量会比较大(成千上万的JVM实例数,每天采集的Segment数量可能是50-100亿的级别),所以传输通道选择Kafka、存储选择Elasticsearch,如此简易版的架构以及数据流转如下图所示:\n这里有几处要解释一下:\n Agent上报数据给OAP端,有grpc通道和kafka通道,当时就盲猜grpc通道可能撑不住,所以选择kafka通道来削峰;kafka通道是在8.x里加入的。 千亿级的数据用ES来做存储肯定是可以的。 图中L1聚合的意思是:SkyWalking OAP服务端 接收数据后,构建metric并完成metric 的Level-1聚合,这里简称L1聚合。 图中L2聚合的意思是:服务端 基于metric的Level-1聚合结果,再做一次聚合,即Level-2聚合,这里简称L2聚合。后续把纯Mixed角色的集群拆成了两个集群。  4.2 副作用 对于质量团队和接入方来说,他们最关注的问题是,接入SkyWalking后:\n 是否对应用有功能性干扰 在运行期能带来哪些性能损耗  这两个问题从3个维度来得到答案:\n  网络资料显示:\n Agent带来的性能损耗在5%以内 未搜到功能性干扰相关的资料(盲猜没有这方面问题)    实现机制评估:\n 字节码增强机制是JVM提供的机制,SkyWalking使用的字节码操控框架ByteBuddy也是成熟稳定的;通过自定义ClassLoader来加载管理插件类,不会产生冲突和污染。 Agent内插件开发所使用的AOP机制是基于模板方法模式实现的,风控很到位,即使插件的实现逻辑有异常也不影响用户逻辑的执行; 插件采集数据跟上报逻辑之间用了一个轻量级的无锁环形队列进行解耦,算是一种保护机制;这个队列在MPSC场景下性能还不错;队列采用满时丢弃的策略,不会有积压阻塞和OOM。    性能测试验证\n 测试的老师针对dubbo、http 这两种常规RPC通信场景,进行压力测试和稳定性测试,结果与网络资料描述一致,符合预期。    5、POC 在POC阶段,接入几十个种子应用,在非生产环境试点观察,同时完善插件补全链路,对接公司的配置中心,对接发布系统,完善自监控.全面准备达到推广就绪状态。\n5.1 对接发布系统 为了对接公司的发布系统,方便系统的发布,将SkyWalking应用拆分为4个子应用:\n   应用 介绍     Webapp Skywalking的web端   Agent Skywalking的Agent端   OAP-Receiver skywakling的服务端,角色是Mixed或Receiver   OAP-Aggregator skywalking的服务端,角色是Aggregator    这里有个考虑,暂定先使用纯Mixed角色的单集群,有性能问题时就试试 Receiver+Aggregator双角色集群模式,最终选哪种视效果而定。\nSkyWalking Agent端是基于Java Agent机制实现的,采用的是启动挂载模式;启动挂载需在启动脚本里加入挂载Java Agent的逻辑,发布系统实现这个功能需要注意2点:\n 启动脚本挂载SkyWalking Agent的环节,尽量让用户无感知。 发布系统在挂载Agent的时候,给Agent指定应用名称和所属分组信息。  SkyWalking Agent的发布和升级也由发布系统来负责;Agent的升级采用了灰度管控的方案,控制的粒度是应用级和实例级两种:\n 按照应用灰度,可给应用指定使用什么版本的Agent 按照应用的实例灰度,可给应用指定其若干实例使用什么版本的Agent  5.2 完善插件补全链路 针对公司OLTP技术栈,量身定制了插件套,其中大部分在开源社区的插件库中有,缺失的部分通过自研快速补齐。\n这些插件给各组件的核心环节埋点,采集数据上报给SkyWalking后,Web端的【追踪】页面就能勾勒出丰满完美的请求执行链路;这对架构师理解真实架构,测试同学验证逻辑变更和分析性能损耗,开发同学精准定位问题都非常的有帮助。这里借官方在线Demo的截图一用(抱歉后端程序员,五毛特效都没做出来,丰满画面还请自行脑补)\n友情小提示:移除不用的插件对程序编译打包和减少应用启动耗时很有帮助。\n5.3压测稳测 测试的老师,针对SkyWalking Agent端的插件套,设计了丰富的用例,压力测试和稳定性测试的结果都符合预期;每家公司的标准不尽一致,此处不再赘述。\n5.4 对接自研的配置中心 把应用中繁杂的配置交给配置中心来管理是非常必要的,配置中心既能提供启动时的静态配置,又能管理运行期的动态配置,而且外部化配置的机制特别容易满足容器场景下应用的无状态化要求。啰嗦一下,举2个例子:\n 调优时,修改参数的值不用来一遍开发到测试再到生产的发布。 观测系统状态,修改日志配置后不需要来一遍开发到测试再到生产的发布。  Skywaling在外接配置中心这块儿,适配了市面中主流的配置中心产品。而公司的配置中心是自研的,需要对接一下,得益于SkyWalking提供的模块化管理机制,只用扩展一个模块即可。\n在POC阶段,梳理服务端各模块的功能,能感受到其配置化做的不错,配置项很丰富,管控的粒度也很细;在POC阶段几乎没有变动,除了对Webapp模块的外部化配置稍作改造,与配置中心打通以便在配置中心管理 Webapp模块中Ribbon和Hystrix的相关配置。\n5.5完善自监控 自监控是说监控SkyWalking系统内各模块的运转情况:\n   组件 监控方案 说明     kafka kafka-manager 它俩是老搭档了   Agent端 Skywalking Agent端会发心跳信息给服务端,可在Web端看到Agent的信息   OAP集群 prometheus 指标还算丰富,感觉缺的可以自己补充   ES集群 prometheus 指标还算丰富    完善自监控后的架构如下图所示:\n5.6 自研Native端SDK 公司移动端的应用很核心,也要使用链路追踪的功能,社区缺了这块,于是基于SkyWalking的协议,移动端的伙伴们自研了一套SDK,弥补了Native端链路数据的缺失,也在后来的秒开页面指标统计中发挥了作用。随着口口相传,不断有团队提出需求、加入建设,所以也在持续迭代中;内容很多,这里先不展开。\n5.7 小结 POC阶段数据量不大,主要是发现系统的各种功能性问题,查缺补漏。\n6、优化 SkyWalking的正式推广采用的是城市包围农村的策略;公司的核心应用作为第一批次接入,这个策略有几个好处:\n 核心应用的监管是重中之重,优先级默认最高。 核心应用的上下游应用,会随着大家对SkyWalking依赖的加深,而逐步自主接入。  当然安全是第一位的,无论新系统多好、多厉害,其引入都需遵守安全稳定的前提要求。既要安全又要快速还要方便,于是基于之前Agent灰度接入的能力,在发布系统中增加应用Owner自助式灰度接入和快速卸载SkyWalking Agent的能力,即应用负责人可自主选择哪个应用接入,接入几个实例,倘若遇到问题仅通过重启即可完成快速卸载;这个能力在推广的前期发挥了巨大的作用;毕竟安全第一,信任也需逐步建立。\n随着应用的接入、使用,我们也逐渐遇到了一些问题,这里按照时间递增的顺序将问题和优化效果快速的介绍给大家,更多技术原理的内容计划在【SkyWalking(v8.5.0)调优系列】补充。开始之前有几个事项要说明:\n 下文中提到的数字仅代表我司的情况,标注的Segment数量是处理这个问题的那段时间的情况,并不是说达到这个数量才开始出现这个现象。 这些数值以及当时的现象,受到宿主机配置、Segment数据的大小、存储处理能力等多种因素的影响;请关注调整的过程和效果,不必把数字和现象对号入座哈。  6.1 启动耗时: 问题: 有同事反馈应用启动变慢,排查发现容器中多数应用启动的总耗时,在接入SkyWalking前是2秒,接入后变成了16秒以上,公司很多核心应用的实例数很多,这样的启动损耗对它们的发布影响太大。\n优化:  记录启动耗时并随着其他启动数据上报到服务端,方便查看对比。 优化Kafka Reporter的启动过程,将启动耗时减少了3-4秒。 优化类匹配和增强环节(重点)后,容器中的应用启动总耗时从之前16秒以上降低到了3秒内。 梳理Kafka 启动和上报的过程中,顺带调整了Agent端的数据上报到kafka的分区选择策略,将一个JVM实例中的数据全部发送到同一个的分区中,如此在L1层的聚合就完成了JVM实例级的Metric聚合,需注意调整Kafka分片数来保证负载均衡。  6.2 kafka积压-6亿segment/天 问题: SkyWalking OAP端消费慢,导致Kafka中Segment积压。未能达到能用的目标。\n优化: 从SkyWalking OAP端的监控指标中没有定位出哪个环节的问题,把服务端单集群拆为双集群,即把 Mixed角色的集群 ,修改为 Receiver 角色(接收和L1聚合)的集群 ,并加入 Aggregation角色(L2聚合)的集群,调整成了双集群模式,数据流传如下图所示:\n6.3 kafka积压-8亿segment/天 问题: SkyWalking OAP端消费慢,导致Kafka中Segment积压,监控指标能看出是在ES存储环节慢,未能达到能用的目标。\n优化:  优化segment保存到ES的批处理过程,调整BulkProcessor的线程数和批处理大小。 优化metrics保存到ES的批处理过程,调整批处理的时间间隔、线程数、批处理大小以及刷盘时间。  6.4 kafka积压-20亿segment/天 问题: Aggregation集群的实例持续Full GC,Receiver集群通过grpc 给Aggregation集群发送metric失败。未能达到能用的目标。\n优化:  增加ES节点、分片,效果不明显。 ES集群有压力,但无法精准定位出是什么数据的什么操作引发的。采用分治策略,尝试将数据拆分,从OAP服务端读写逻辑调整,将ES单集群拆分为 trace集群 和 metric集群;之后对比ES的监控指标明确看出是metric集群读写压力太大。  优化Receiver集群metric的L1聚合,完成1分钟的数据聚合后,再提交给Aggregation集群做L2聚合。 Aggregation集群metric的L2 聚合是基于db实现的,会有 空读-写-再读-累加-更新写 这样的逻辑,每次写都会有读,调整逻辑是:提升读的性能,优化缓存机制减少读的触发;调整间隔,避免触发累加和更新。 将metric批量写ES操作调整成BulkProcessor。 ES的metric集群 使用SSD存储,增加节点数和分片数。  这一次的持续优化具有里程碑式的意义,Kafka消费很快,OAP各机器的Full GC没了,ES的各方面指标也很稳定;接下来开始优化查询,提升易用性。\n6.5 trace查询慢-25亿segment/天 问题: Web端【追踪】页中的查询都很慢,仅保存了15天的数据,按照traceId查询耗时要20多秒,按照条件查询trace列表的耗时更糟糕;这给人的感受就是“一肚子墨水倒不出来”,未能达到好用的目标。\n优化: ES查询优化方面的信息挺多,但通过百度筛选出解决此问题的有效方案,就要看咱家爱犬的品类了;当时搜集整理了并尝试了N多优化条款,可惜没有跟好运偶遇,结论是颜值不可靠。言归正传,影响读写性能的基本要素有3个:读写频率,数据规模,硬件性能;trace的情况从这三个维度来套一套模板:\n   要素 trace的情况 备注     读写频率 宏观来看是写多读少的状况    数据规模 按照每天50亿个segment来算,半个月是750亿,1个月是1500亿。    硬件性能 普通硬盘速度一般     这个分析没有得出具有指导意义的结论,读写频率这里粒度太粗,用户的使用情况跟时间也有紧密的关系,情况大概是:\n 当天的数据是读多写多(当天不断有新数据写入,基于紧急响应的需求,问题出现时可能是近实时的排查处理)。 前一天的数据是读多写少(一般也会有问题隔天密集上报的情况,0点后会有前一天数据延迟到达的情况)。 再早的话无新数据写入,数据越早被读的概率也越小。  基于以上分析,增加时间维度并细化更多的参考因素后,分析模型变成了这样:\n   要素 当天 当天-1 当天-2 ~ 当天-N     写频率 多 少 无   读(查询)频率 多 多 少   读响应速度要求 快 快 慢点也行   数据规模 50亿 50亿 50亿* (N-2)   宿主机性能要求 高 高 次高   硬盘速度要求 高(SSD) 高(SSD) 次高(机械)   硬件成本 高 高 次高   期望成本 低 低 低    从上表可以看出,整体呈现出hot-warm数据架构的需求之势,近1-2天为hot数据,之前的为warm数据;恰好ES7提供了hot-warm架构支持,按照hot-warm改造后架构如下图所示:\n 恰逢公司ES中台调优版的ES发布,其内置的ZSTD压缩算法 空间压缩效果非常显著。 对 trace集群进行hot-warm架构调整,查询耗时从20多秒变成了2-3秒,效果是非常明显的。 从查询逻辑进一步调整,充分利用ES的数据分片、路由机制,把全量检索调整为精准检索,即降低检索时需要扫描的数据量,把2-3秒优化到毫秒。  这里要炫一个5毛特效,这套机制下,Segment数据即使是保留半年的,按照TraceId查询的耗时也是毫秒。\n至此完成了查询千亿级Trace数据只要毫秒级耗时的阶段性优化。\n6.6 仪表盘和拓扑查询慢 问题: Web端的【拓扑】页,在开始只有几十个应用的时候,虽然很慢,但还是能看到数据,随着应用增多后,【拓扑】页面数据请求一直是超时(配置的60s超时)的,精力有限,先通过功能降级把这个页面隐藏了;【仪表盘】的指标查询也非常的慢,未能达到好用的目标。\n优化: Web端的【仪表盘】页和【拓扑】页是对SkyWalking里metric数据的展现,metric数据同trace数据一样满足hot-warm的特征。\n metric集群采用hot-warm架构调整,之后仪表盘中的查询耗时也都减小为毫秒级。 【拓扑】页接口依然是超时(60s),对拓扑这里做了几个针对性的调整:  把内部的循环调用合并,压缩调用次数。 去除非必要的查询。 拆分隔离通用索引中的数据,避免互相干扰。 全量检索调整为精准检索,即降低检索时需要扫描的数据量。    至此完成了拓扑页数据查询毫秒级耗时的阶段性优化。\n6.7 小结 SkyWalking调优这个阶段,恰逢上海疫情封城,既要为生存抢菜,又要翻阅学习着各种ES原理、调优的文档资料,一行一行反复的品味思考SkyWalking相关的源码,尝试各种方案去优化它,梦中都在努力提升它的性能。疫情让很多人变得焦虑烦躁,但以我的感受来看在系统的性能压力下疫情不值一提。凡事贵在坚持,时间搞定了诸多困难,调优的效果是很显著的。\n可能在业务价值驱动的价值观中这些技术优化不产生直接业务价值,顶多是五毛特效,但从其他维度来看它价值显著:\n 对个人来说,技术有提升。 对团队来说,实战练兵提升战力,团队协作加深友情;特别感谢ES中台这段时间的鼎力支持! 对公司来说,易用性的提升将充分发挥SkyWalking的价值,在问题发生时,给到同事们切实、高效的帮助,使得问题可以被快速响应;须知战争拼的是保障。  这期间其实也是有考虑过其他的2个方案的:\n 使用降低采样率的兜底方案;但为了得到更准确的指标数据,以及后续其他的规划而坚持了全采样。 采用ClickHouse优化存储;因为公司有定制优化的ES版本,所以就继续在ES上做存储优化,刚好借此机会验证一下。后续【全链路结构化日志】的存储会使用ClickHouse。  这个章节将内容聚焦在落地推广时期技术层面的准备和调优,未描述团队协调、推广等方面的情况;因每个公司情况不同,所以并未提及;但其实对多数公司来说,有些项目的推广比技术本身可能难度更大,这个项目也遇到过一些困难,PM去推广是既靠能力又靠颜值, 以后有机会再与大家探讨。\n7、未来 H5、Native以及后端应用都在持续接入中,相应的SDK也在不断的迭代;目前正在基于已建立的链路通道,完善【全链路业务状态追踪】和【全链路结构化日志追踪】,旨在给运营、客服、运维、开发等服务在一线的同事们提供多视角一站式的观测平台,全方位提升系统服务质量、提高问题响应速度。\n","excerpt":"目录  开篇 为什么需要全链路监控 为什么选择SkyWalking 预研 POC 优化 未来  1、开篇 自从SkyWalking开始在公司推广,时不时会在排查问题的人群中听到这样的话:“你咋还没 …","ref":"/zh/2022-08-30-pingan-jiankang/","title":"SkyWalking on the way - 平安健康千亿级的全链路追踪系统的建设与实践"},{"body":"Observability essential when working with distributed systems. Built on 3 pillars of metrics, logging and tracing, having the right tools in place to quickly identify and determine the root cause of an issue in production is imperative. In this Kongcast interview, we explore the benefits of having observability and demo the use of Apache SkyWalking. We walk through the capabilities that SkyWalking offers out of the box and debug a common HTTP 500 error using the tool.\nAndrew Kew is interviewed by Viktor Gamov, a developer advocate at Kong Inc\nAndrew is a highly passionate technologist with over 16 valuable years experience in building server side and cloud applications. Having spent the majority of his time in the Financial Services domain, his meritocratic rise to CTO of an Algorithmic Trading firm allowed him to not only steer the business from a technology standpoint, but build robust and scalable trading algorithms. His mantra is \u0026ldquo;right first time\u0026rdquo;, thus ensuring the projects or clients he is involved in are left in a better place than they were before he arrived.\nHe is the founder of a boutique software consultancy in the United Kingdom, QuadCorps Ltd, working in the API and Integration Ecosystem space and is currently on a residency programme at Kong Inc as a senior field engineer and technical account manager working across many of their enterprise strategic accounts.\n  ","excerpt":"Observability essential when working with distributed systems. Built on 3 pillars of metrics, …","ref":"/blog/2022-08-11-kongcast-20-distributed-tracing-using-skywalking-kong/","title":"[Video] Distributed tracing demo using Apache SkyWalking and Kong API Gateway"},{"body":"","excerpt":"","ref":"/tags/conference/","title":"Conference"},{"body":"","excerpt":"","ref":"/tags/kong/","title":"Kong"},{"body":"SkyWalking Rust 0.3.0 is released. Go to downloads page to find release tars.\nWhat\u0026rsquo;s Changed  Update README.md by @wu-sheng in https://github.com/apache/skywalking-rust/pull/24 Improve errors. by @jmjoy in https://github.com/apache/skywalking-rust/pull/25 Add tracer. by @jmjoy in https://github.com/apache/skywalking-rust/pull/26 Move e2e to workspace. by @jmjoy in https://github.com/apache/skywalking-rust/pull/27 Auto finalize context and span when dropped. by @jmjoy in https://github.com/apache/skywalking-rust/pull/28 Add context capture and continued methods. by @jmjoy in https://github.com/apache/skywalking-rust/pull/29 Bump to 0.3.0. by @jmjoy in https://github.com/apache/skywalking-rust/pull/30  ","excerpt":"SkyWalking Rust 0.3.0 is released. Go to downloads page to find release tars.\nWhat\u0026rsquo;s Changed …","ref":"/events/release-apache-skywalking-rust-0-3-0/","title":"Release Apache SkyWalking Rust 0.3.0"},{"body":"SkyWalking NodeJS 0.5.1 is released. Go to downloads page to find release tars.\nSkyWalking NodeJS 0.5.1 is a patch release that fixed a vulnerability(CVE-2022-36127) in all previous versions \u0026lt;=0.5.0, we recommend all users who are using versions \u0026lt;=0.5.0 should upgrade to this version.\nThe vulnerability could cause NodeJS services that has this agent installed to be unavailable if the header includes an illegal SkyWalking header, such as\n OAP is unhealthy and the downstream service\u0026rsquo;s agent can\u0026rsquo;t establish the connection. Some sampling mechanism is activated in downstream agents.  ","excerpt":"SkyWalking NodeJS 0.5.1 is released. Go to downloads page to find release tars.\nSkyWalking NodeJS …","ref":"/events/release-apache-skywalking-nodejs-0-5-1/","title":"[CVE-2022-36127] Release Apache SkyWalking for NodeJS 0.5.1"},{"body":"SkyWalking Eyes 0.4.0 is released. Go to downloads page to find release tars.\n Reorganize GHA by header and dependency. (#123) Add rust cargo support for dep command. (#121) Support license expression in dep check. (#120) Prune npm packages before listing all dependencies (#119) Add support for multiple licenses in the header config section (#118) Add excludes to license resolve config (#117) maven: set group:artifact as dependency name and extend functions in summary template (#116) Stablize summary context to perform consistant output (#115) Add custom license urls for identification (#114) Lazy initialize GitHub client for comment (#111) Make license identifying threshold configurable (#110) Use Google\u0026rsquo;s licensecheck to identify licenses (#107) dep: short circuit if user declare dep license (#108)  ","excerpt":"SkyWalking Eyes 0.4.0 is released. Go to downloads page to find release tars.\n Reorganize GHA by …","ref":"/events/release-apache-skywalking-eyes-0-4-0/","title":"Release Apache SkyWalking Eyes 0.4.0"},{"body":"SkyWalking NodeJS 0.5.0 is released. Go to downloads page to find release tars.\n Bump up grpc-node to 1.6.7 to fix CVE-2022-25878 (#85) Fix issue #9165 express router entry duplicated (#84) Fix skywalking s3 upload error #8824 (#82) Improved ignore path regex (#81) Upgrade data collect protocol (#78) Fix wrong instance properties (#77) Fix wrong command in release doc (#76)  ","excerpt":"SkyWalking NodeJS 0.5.0 is released. Go to downloads page to find release tars.\n Bump up grpc-node …","ref":"/events/release-apache-skywalking-nodejs-0-5-0/","title":"Release Apache SkyWalking for NodeJS 0.5.0"},{"body":"SkyWalking Infra E2E 1.2.0 is released. Go to downloads page to find release tars.\nFeatures  Expand kind file path with system environment. Support shutdown service during setup phase in compose mode. Expand kind file path with system environment. Support arbitrary os and arch. Support docker-compose v2 container naming. Support installing via go install and add install doc. Add retry when delete kind cluster. Upgrade to go1.18.  Bug Fixes  Fix the problem of parsing verify.retry.interval without setting value.  Documentation  Make trigger.times parameter doc more clear.  Issues and PR  All issues are here All and pull requests are here  ","excerpt":"SkyWalking Infra E2E 1.2.0 is released. Go to downloads page to find release tars.\nFeatures  Expand …","ref":"/events/release-apache-skywalking-infra-e2e-1-2-0/","title":"Release Apache SkyWalking Infra E2E 1.2.0"},{"body":"SkyWalking Python 0.8.0 is released. Go to downloads page to find release tars.\n  Feature:\n Update mySQL plugin to support two different parameter keys. (#186) Add a SW_AGENT_LOG_REPORTER_SAFE_MODE option to control the HTTP basic auth credential filter (#200)    Plugins:\n Add Psycopg(3.x) support (#168) Add MySQL support (#178) Add FastAPI support (#181) Drop support for flask 1.x due to dependency issue in Jinja2 and EOL (#195) Add Bottle support (#214)    Fixes:\n Spans now correctly reference finished parents (#161) Remove potential password leak from Aiohttp outgoing url (#175) Handle error when REMOTE_PORT is missing in Flask (#176) Fix sw-rabbitmq TypeError when there are no headers (#182) Fix agent bootstrap traceback not shown in sw-python CLI (#183) Fix local log stack depth overridden by agent log formatter (#192) Fix typo that cause user sitecustomize.py not loaded (#193) Fix instance property wrongly shown as UNKNOWN in OAP (#194) Fix multiple components inconsistently named on SkyWalking UI (#199) Fix SW_AGENT_LOGGING_LEVEL not properly set during startup (#196) Unify the http tag name with other agents (#208) Remove namespace to instance properties and add pid property (#205) Fix the properties are not set correctly (#198) Improved ignore path regex (#210) Fix sw_psycopg2 register_type() (#211) Fix psycopg2 register_type() second arg default (#212) Enhance Traceback depth (#206) Set spans whose http code \u0026gt; 400 to error (#187)    Docs:\n Add a FAQ doc on how to use with uwsgi (#188)    Others:\n Refactor current Python agent docs to serve on SkyWalking official website (#162) Refactor SkyWalking Python to use the CLI for CI instead of legacy setup (#165) Add support for Python 3.10 (#167) Move flake configs all together (#169) Introduce another set of flake8 extensions (#174) Add E2E test coverage for trace and logging (#199) Now Log reporter cause_exception_depth traceback limit defaults to 10 Enable faster CI by categorical parallelism (#170)    ","excerpt":"SkyWalking Python 0.8.0 is released. Go to downloads page to find release tars.\n  Feature:\n Update …","ref":"/events/release-apache-skywalking-python-0-8-0/","title":"Release Apache SkyWalking Python 0.8.0"},{"body":"SkyWalking Satellite 1.0.1 is released. Go to downloads page to find release tars.\nFeatures Bug Fixes  Fix metadata messed up when transferring Log data.  Issues and PR  All issues are here All and pull requests are here  ","excerpt":"SkyWalking Satellite 1.0.1 is released. Go to downloads page to find release tars.\nFeatures Bug …","ref":"/events/release-apache-skwaylking-satellite-1-0-1/","title":"Release Apache SkyWalking Satellite 1.0.1"},{"body":"Content Background Apache SkyWalking observes metrics, logs, traces, and events for services deployed into the service mesh. When troubleshooting, SkyWalking error analysis can be an invaluable tool helping to pinpoint where an error occurred. However, performance problems are more difficult: It’s often impossible to locate the root cause of performance problems with pre-existing observation data. To move beyond the status quo, dynamic debugging and troubleshooting are essential service performance tools. In this article, we\u0026rsquo;ll discuss how to use eBPF technology to improve the profiling feature in SkyWalking and analyze the performance impact in the service mesh.\nTrace Profiling in SkyWalking Since SkyWalking 7.0.0, Trace Profiling has helped developers find performance problems by periodically sampling the thread stack to let developers know which lines of code take more time. However, Trace Profiling is not suitable for the following scenarios:\n Thread Model: Trace Profiling is most useful for profiling code that executes in a single thread. It is less useful for middleware that relies heavily on async execution models. For example Goroutines in Go or Kotlin Coroutines. Language: Currently, Trace Profiling is only supported in Java and Python, since it’s not easy to obtain the thread stack in the runtimes of some languages such as Go and Node.js. Agent Binding: Trace Profiling requires Agent installation, which can be tricky depending on the language (e.g., PHP has to rely on its C kernel; Rust and C/C++ require manual instrumentation to make install). Trace Correlation: Since Trace Profiling is only associated with a single request it can be hard to determine which request is causing the problem. Short Lifecycle Services: Trace Profiling doesn\u0026rsquo;t support short-lived services for (at least) two reasons:  It\u0026rsquo;s hard to differentiate system performance from class code manipulation in the booting stage. Trace profiling is linked to an endpoint to identify performance impact, but there is no endpoint to match these short-lived services.    Fortunately, there are techniques that can go further than Trace Profiling in these situations.\nIntroduce eBPF We have found that eBPF — a technology that can run sandboxed programs in an operating system kernel and thus safely and efficiently extend the capabilities of the kernel without requiring kernel modifications or loading kernel modules — can help us fill gaps left by Trace Profiling. eBPF is a trending technology because it breaks the traditional barrier between user and kernel space. Programs can now inject bytecode that runs in the kernel, instead of having to recompile the kernel to customize it. This is naturally a good fit for observability.\nIn the figure below, we can see that when the system executes the execve syscalls, the eBPF program is triggered, and the current process runtime information is obtained by using function calls.\nUsing eBPF technology, we can expand the scope of Skywalking\u0026rsquo;s profiling capabilities:\n Global Performance Analysis: Before eBPF, data collection was limited to what agents can observe. Since eBPF programs run in the kernel, they can observe all threads. This is especially useful when you are not sure whether a performance problem is caused by a particular request. Data Content: eBPF can dump both user and kernel space thread stacks, so if a performance issue happens in kernel space, it’s easier to find. Agent Binding: All modern Linux kernels support eBPF, so there is no need to install anything. This means it is an orchestration-free vs an agent model. This reduces friction caused by built-in software which may not have the correct agents installed, such as Envoy in a Service Mesh. Sampling Type: Unlike Trace Profiling, eBPF is event-driven and, therefore, not constrained by interval polling. For example, eBPF can trigger events and collect more data depending on a transfer size threshold. This can allow the system to triage and prioritize data collection under extreme load.  eBPF Limitations While eBPF offers significant advantages for hunting performance bottlenecks, no technology is perfect. eBPF has a number of limitations described below. Fortunately, since SkyWalking does not require eBPF, the impact is limited.\n Linux Version Requirement: eBPF programs require a Linux kernel version above 4.4, with later kernel versions offering more data to be collected. The BCC has documented the features supported by different Linux kernel versions, with the differences between versions usually being what data can be collected with eBPF. Privileges Required: All processes that intend to load eBPF programs into the Linux kernel must be running in privileged mode. As such, bugs or other issues in such code may have a big impact. Weak Support for Dynamic Language: eBPF has weak support for JIT-based dynamic languages, such as Java. It also depends on what data you want to collect. For Profiling, eBPF does not support parsing the symbols of the program, which is why most eBPF-based profiling technologies only support static languages like C, C++, Go, and Rust. However, symbol mapping can sometimes be solved through tools provided by the language. For example, in Java, perf-map-agent can be used to generate the symbol mapping. However, dynamic languages don\u0026rsquo;t support the attach (uprobe) functionality that would allow us to trace execution events through symbols.  Introducing SkyWalking Rover SkyWalking Rover introduces the eBPF profiling feature into the SkyWalking ecosystem. The figure below shows the overall architecture of SkyWalking Rover. SkyWalking Rover is currently supported in Kubernetes environments and must be deployed inside a Kubernetes cluster. After establishing a connection with the SkyWalking backend server, it saves information about the processes on the current machine to SkyWalking. When the user creates an eBPF profiling task via the user interface, SkyWalking Rover receives the task and executes it in the relevant C, C++, Golang, and Rust language-based programs.\nOther than an eBPF-capable kernel, there are no additional prerequisites for deploying SkyWalking Rover.\nCPU Profiling with Rover CPU profiling is the most intuitive way to show service performance. Inspired by Brendan Gregg‘s blog post, we\u0026rsquo;ve divided CPU profiling into two types that we have implemented in Rover:\n On-CPU Profiling: Where threads are spending time running on-CPU. Off-CPU Profiling: Where time is spent waiting while blocked on I/O, locks, timers, paging/swapping, etc.  Profiling Envoy with eBPF Envoy is a popular proxy, used as the data plane by the Istio service mesh. In a Kubernetes cluster, Istio injects Envoy into each service’s pod as a sidecar where it transparently intercepts and processes incoming and outgoing traffic. As the data plane, any performance issues in Envoy can affect all service traffic in the mesh. In this scenario, it’s more powerful to use eBPF profiling to analyze issues in production caused by service mesh configuration.\nDemo Environment If you want to see this scenario in action, we\u0026rsquo;ve built a demo environment where we deploy an Nginx service for stress testing. Traffic is intercepted by Envoy and forwarded to Nginx. The commands to install the whole environment can be accessed through GitHub.\nOn-CPU Profiling On-CPU profiling is suitable for analyzing thread stacks when service CPU usage is high. If the stack is dumped more times, it means that the thread stack occupies more CPU resources.\nWhen installing Istio using the demo configuration profile, we found there are two places where we can optimize performance:\n Zipkin Tracing: Different Zipkin sampling percentages have a direct impact on QPS. Access Log Format: Reducing the fields of the Envoy access log can improve QPS.  Zipkin Tracing Zipkin with 100% sampling In the default demo configuration profile, Envoy is using 100% sampling as default tracing policy. How does that impact the performance?\nAs shown in the figure below, using the on-CPU profiling, we found that it takes about 16% of the CPU overhead. At a fixed consumption of 2 CPUs, its QPS can reach 5.7K.\nDisable Zipkin tracing At this point, we found that if Zipkin is not necessary, the sampling percentage can be reduced or we can even disable tracing. Based on the Istio documentation, we can disable tracing when installing the service mesh using the following command:\nistioctl install -y --set profile=demo \\  --set \u0026#39;meshConfig.enableTracing=false\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.tracing.sampling=0.0\u0026#39; After disabling tracing, we performed on-CPU profiling again. According to the figure below, we found that Zipkin has disappeared from the flame graph. With the same 2 CPU consumption as in the previous example, the QPS reached 9K, which is an almost 60% increase. Tracing with Throughput With the same CPU usage, we\u0026rsquo;ve discovered that Envoy performance greatly improves when the tracing feature is disabled. Of course, this requires us to make trade-offs between the number of samples Zipkin collects and the desired performance of Envoy (QPS).\nThe table below illustrates how different Zipkin sampling percentages under the same CPU usage affect QPS.\n   Zipkin sampling % QPS CPUs Note     100% (default) 5.7K 2 16% used by Zipkin   1% 8.1K 2 0.3% used by Zipkin   disabled 9.2K 2 0% used by Zipkin    Access Log Format Default Log Format In the default demo configuration profile, the default Access Log format contains a lot of data. The flame graph below shows various functions involved in parsing the data such as request headers, response headers, and streaming the body.\nSimplifying Access Log Format Typically, we don’t need all the information in the access log, so we can often simplify it to get what we need. The following command simplifies the access log format to only display basic information:\nistioctl install -y --set profile=demo \\  --set meshConfig.accessLogFormat=\u0026#34;[%START_TIME%] \\\u0026#34;%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\\\u0026#34; %RESPONSE_CODE%\\n\u0026#34; After simplifying the access log format, we found that the QPS increased from 5.7K to 5.9K. When executing the on-CPU profiling again, the CPU usage of log formatting dropped from 2.4% to 0.7%.\nSimplifying the log format helped us to improve the performance.\nOff-CPU Profiling Off-CPU profiling is suitable for performance issues that are not caused by high CPU usage. For example, when there are too many threads in one service, using off-CPU profiling could reveal which threads spend more time context switching.\nWe provide data aggregation in two dimensions:\n Switch count: The number of times a thread switches context. When the thread returns to the CPU, it completes one context switch. A thread stack with a higher switch count spends more time context switching. Switch duration: The time it takes a thread to switch the context. A thread stack with a higher switch duration spends more time off-CPU.  Write Access Log Enable Write Using the same environment and settings as before in the on-CPU test, we performed off-CPU profiling. As shown below, we found that access log writes accounted for about 28% of the total context switches. The \u0026ldquo;__write\u0026rdquo; shown below also indicates that this method is the Linux kernel method.\nDisable Write SkyWalking implements Envoy\u0026rsquo;s Access Log Service (ALS) feature which allows us to send access logs to the SkyWalking Observability Analysis Platform (OAP) using the gRPC protocol. Even by disabling the access logging, we can still use ALS to capture/aggregate the logs. We\u0026rsquo;ve disabled writing to the access log using the following command:\nistioctl install -y --set profile=demo --set meshConfig.accessLogFile=\u0026#34;\u0026#34; After disabling the Access Log feature, we performed the off-CPU profiling. File writing entries have disappeared as shown in the figure below. Envoy throughput also increased from 5.7K to 5.9K.\nConclusion In this article, we\u0026rsquo;ve examined the insights Apache Skywalking\u0026rsquo;s Trace Profiling can give us and how much more can be achieved with eBPF profiling. All of these features are implemented in skywalking-rover. In addition to on- and off-CPU profiling, you will also find the following features:\n Continuous profiling, helps you automatically profile without manual intervention. For example, when Rover detects that the CPU exceeds a configurable threshold, it automatically executes the on-CPU profiling task. More profiling types to enrich usage scenarios, such as network, and memory profiling.  ","excerpt":"Content Background Apache SkyWalking observes metrics, logs, traces, and events for services …","ref":"/blog/2022-07-05-pinpoint-service-mesh-critical-performance-impact-by-using-ebpf/","title":"Pinpoint Service Mesh Critical Performance Impact by using eBPF"},{"body":"SkyWalking Rust 0.2.0 is released. Go to downloads page to find release tars.\nWhat\u0026rsquo;s Changed  add a description to compile in README.md by @Shikugawa in https://github.com/apache/skywalking-rust/pull/16 Update NOTICE to 2022 by @wu-sheng in https://github.com/apache/skywalking-rust/pull/17 fix ignore /e2e/target folder by @tisonkun in https://github.com/apache/skywalking-rust/pull/18 Remove Cargo.lock, update dependencies, update submodule, disable build grpc server api. by @jmjoy in https://github.com/apache/skywalking-rust/pull/19 Enhance Trace Context machenism. by @jmjoy in https://github.com/apache/skywalking-rust/pull/20 chore(typo): fix typo in context/propagation/context.rs by @CherishCai in https://github.com/apache/skywalking-rust/pull/21 Feature(tonic-build): set tonic-build.build_server(false), do not build Server code. by @CherishCai in https://github.com/apache/skywalking-rust/pull/22 Rename crate name skywalking_rust to skywalking? by @jmjoy in https://github.com/apache/skywalking-rust/pull/23  ","excerpt":"SkyWalking Rust 0.2.0 is released. Go to downloads page to find release tars.\nWhat\u0026rsquo;s Changed …","ref":"/events/release-apache-skywalking-rust-0-2-0/","title":"Release Apache SkyWalking Rust 0.2.0"},{"body":"B站视频地址\n","excerpt":"B站视频地址","ref":"/zh/2022-06-23-more-than-tracing-logging-metrics/","title":"阿里云 - 可观测技术峰会 2022 - More than Tracing Logging Metrics"},{"body":"SkyWalking Java Agent 8.11.0 is released. Go to downloads page to find release tars. Changes by Version\n8.11.0  Fix cluster and namespace value duplicated(namespace value) in properties report. Add layer field to event when reporting. Remove redundant shade.package property. Add servicecomb-2.x plugin and Testcase. Fix NPE in gateway plugin when the timer triggers webflux webclient call. Add an optional plugin, trace-sampler-cpu-policy-plugin, which could disable trace collecting in high CPU load. Change the dateformat of logs to yyyy-MM-dd HH:mm:ss.SSS(was yyyy-MM-dd HH:mm:ss:SSS). Fix NPE in elasticsearch plugin. Grpc plugin support trace client async generic call(without grpc stubs), support Method type: UNARY、SERVER_STREAMING. Enhance Apache ShenYu (incubating) plugin: support trace grpc,sofarpc,motan,tars rpc proxy. Add primary endpoint name to log events. Fix Span not finished in gateway plugin when the gateway request timeout. Support -Dlog4j2.contextSelector=org.apache.logging.log4j.core.async.AsyncLoggerContextSelector in gRPC log report. Fix tcnative libraries relocation for aarch64. Add plugin.jdbc.trace_sql_parameters into Configuration Discovery Service. Fix argument type name of Array in postgresql-8.x-plugin from java.lang.String[] to [Ljava.lang.String; Add type name checking in ArgumentTypeNameMatch and ReturnTypeNameMatch Highlight ArgumentTypeNameMatch and ReturnTypeNameMatch type naming rule in docs/en/setup/service-agent/java-agent/Java-Plugin-Development-Guide.md Fix FileWriter scheduled task NPE Optimize gRPC Log reporter to set service name for the first element in the streaming.(No change for Kafka reporter)  All issues and pull requests are here\n","excerpt":"SkyWalking Java Agent 8.11.0 is released. Go to downloads page to find release tars. Changes by …","ref":"/events/release-apache-skywalking-java-agent-8-11-0/","title":"Release Apache SkyWalking Java Agent 8.11.0"},{"body":"SkyWalking Rover 0.2.0 is released. Go to downloads page to find release tars.\nFeatures  Support OFF_CPU Profiling. Introduce the BTFHub module. Update to using frequency mode to ON_CPU Profiling. Add logs in the profiling module logical.  Bug Fixes  Fix docker based process could not be detected.  Issues and PR  All issues are here All and pull requests are here  ","excerpt":"SkyWalking Rover 0.2.0 is released. Go to downloads page to find release tars.\nFeatures  Support …","ref":"/events/release-apache-skwaylking-rover-0-2-0/","title":"Release Apache SkyWalking Rover 0.2.0"},{"body":"SkyWalking 9.1.0 is released. Go to downloads page to find release tars.\n eBPF agent(skywalking rover) is integrated in the first time  BanyanDB(skywalking native database) is integrated and passed MVP phase. On-demand logs are provided first time in skywalking for all mesh services and k8s deployment as a zero cost log solution  Zipkin alternative is being official, and Zipkin\u0026rsquo;s HTTP APIs are supported as well as lens UI.  Changes by Version Project  [IMPORTANT] Remove InfluxDB 1.x and Apache IoTDB 0.X as storage options, check details at here. Remove converter-moshi 2.5.0, influx-java 2.15, iotdb java 0.12.5, thrift 0.14.1, moshi 1.5.0, msgpack 0.8.16 dependencies. Remove InfluxDB and IoTDB relative codes and E2E tests. Upgrade OAP dependencies zipkin to 2.23.16, H2 to 2.1.212, Apache Freemarker to 2.3.31, gRPC-java 1.46.0, netty to 4.1.76. Upgrade Webapp dependencies, spring-cloud-dependencies to 2021.0.2, logback-classic to 1.2.11 [IMPORTANT] Add BanyanDB storage implementation. Notice BanyanDB is currently under active development and SHOULD NOT be used in production cluster.  OAP Server  Add component definition(ID=127) for Apache ShenYu (incubating). Fix Zipkin receiver: Decode spans error, missing Layer for V9 and wrong time bucket for generate Service and Endpoint. [Refactor] Move SQLDatabase(H2/MySQL/PostgreSQL), ElasticSearch and BanyanDB specific configurations out of column. Support BanyanDB global index for entities. Log and Segment record entities declare this new feature. Remove unnecessary analyzer settings in columns of templates. Many were added due to analyzer\u0026rsquo;s default value. Simplify the Kafka Fetch configuration in cluster mode. [Breaking Change] Update the eBPF Profiling task to the service level, please delete index/table: ebpf_profiling_task, process_traffic. Fix event can\u0026rsquo;t split service ID into 2 parts. Fix OAP Self-Observability metric GC Time calculation. Set SW_QUERY_MAX_QUERY_COMPLEXITY default value to 1000 Webapp module (for UI) enabled compression. [Breaking Change] Add layer field to event, report an event without layer is not allowed. Fix ES flush thread stops when flush schedule task throws exception, such as ElasticSearch flush failed. Fix ES BulkProcessor in BatchProcessEsDAO was initialized multiple times and created multiple ES flush schedule tasks. HTTPServer support the handler register with allowed HTTP methods. [Critical] Revert Enhance DataCarrier#MultipleChannelsConsumer to add priority to avoid consuming issues. Fix the problem that some configurations (such as group.id) did not take effect due to the override order when using the kafkaConsumerConfig property to extend the configuration in Kafka Fetcher. Remove build time from the OAP version. Add data-generator module to run OAP in testing mode, generating mock data for testing. Support receive Kubernetes processes from gRPC protocol. Fix the problem that es index(TimeSeriesTable, eg. endpoint_traffic, alarm_record) didn\u0026rsquo;t create even after rerun with init-mode. This problem caused the OAP server to fail to start when the OAP server was down for more than a day. Support autocomplete tags in traces query. [Breaking Change] Replace all configurations **_JETTY_** to **_REST_**. Add the support eBPF profiling field into the process entity. E2E: fix log test miss verify LAL and metrics. Enhance Converter mechanism in kernel level to make BanyanDB native feature more effective. Add TermsAggregation properties collect_mode and execution_hint. Add \u0026ldquo;execution_hint\u0026rdquo;: \u0026ldquo;map\u0026rdquo;, \u0026ldquo;collect_mode\u0026rdquo;: \u0026ldquo;breadth_first\u0026rdquo; for aggregation and topology query to improve 5-10x performance. Clean up scroll contexts after used. Support autocomplete tags in logs query. Enhance Deprecated MetricQuery(v1) getValues querying to asynchronous concurrency query Fix the pod match error when the service has multiple selector in kubernetes environment. VM monitoring adapts the 0.50.0 of the opentelemetry-collector. Add Envoy internal cost metrics. Remove Layer concept from ServiceInstance. Remove unnecessary onCompleted on gRPC onError callback. Remove Layer concept form Process. Update to list all eBPF profiling schedulers without duration. Storage(ElasticSearch): add search options to tolerate inexisting indices. Fix the problem that MQ has the wrong Layer type. Fix NoneStream model has wrong downsampling(was Second, should be Minute). SQL Database: provide @SQLDatabase.AdditionalEntity to support create additional tables from a model. [Breaking Change] SQL Database: remove SQL Database config maxSizeOfArrayColumn and numOfSearchableValuesPerTag. [Breaking Change] SQL Database: move Tags list from Segment,Logs,Alarms to their additional table. [Breaking Change] Remove total field in Trace, Log, Event, Browser log, and alarm list query. Support OFF_CPU eBPF Profiling. Fix SumAggregationBuilder#build should use the SumAggregation rather than MaxAggregation. Add TiDB, OpenSearch, Postgres storage optional to Trace and eBPF Profiling E2E testing. Add OFF CPU eBPF Profiling E2E Testing. Fix searchableTag as rpc.status_code and http.status_code. status_code had been removed. Fix scroll query failure exception. Add profileDataQueryBatchSize config in Elasticsearch Storage. Add APIs to query Pod log on demand. Remove OAL for events. Simplify the format index name logical in ES storage. Add instance properties extractor in MAL. Support Zipkin traces collect and zipkin traces query API. [Breaking Change] Zipkin receiver mechanism changes and traces do not stream into OAP Segment anymore.  UI  General service instance: move Thread Pool from JVM to Overview, fix JVM GC Count calculation. Add Apache ShenYu (incubating) component LOGO. Show more metrics on service/instance/endpoint list on the dashboards. Support average values of metrics on the service/list/endpoint table widgets, with pop-up linear graph. Fix viewLogs button query no data. Fix UTC when page loads. Implement the eBPF profile widget on dashboard. Optimize the trace widget. Avoid invalid query for topology metrics. Add the alarm and log tag tips. Fix spans details and task logs. Verify query params to avoid invalid queries. Mobile terminal adaptation. Fix: set dropdown for the Tab widget, init instance/endpoint relation selectors, update sankey graph. Add eBPF Profiling widget into General service, Service Mesh and Kubernetes tabs. Fix jump to endpoint-relation dashboard template. Fix set graph options. Remove the Layer filed from the Instance and Process. Fix date time picker display when set hour to 0. Implement tags auto-complete for Trace and Log. Support multiple trees for the flame graph. Fix the page doesn\u0026rsquo;t need to be re-rendered when the url changes. Remove unexpected data for exporting dashboards. Fix duration time. Remove the total field from query conditions. Fix minDuration and maxDuration for the trace filter. Add Log configuration for the browser templates. Fix query conditions for the browser logs. Add Spanish Translation. Visualize the OFF CPU eBPF profiling. Add Spanish language to UI. Sort spans with startTime or spanId in a segment. Visualize a on-demand log widget. Fix activate the correct tab index after renaming a Tabs name. FaaS dashboard support on-demand log (OpenFunction/functions-framework-go version \u0026gt; 0.3.0).  Documentation  Add eBPF agent into probe introduction.  All issues and pull requests are here\n","excerpt":"SkyWalking 9.1.0 is released. Go to downloads page to find release tars.\n eBPF agent(skywalking …","ref":"/events/release-apache-skywalking-apm-9.1.0/","title":"Release Apache SkyWalking APM 9.1.0"},{"body":"SkyWalking BanyanDB 0.1.0 is released. Go to downloads page to find release tars.\nFeatures  BanyanD is the server of BanyanDB  TSDB module. It provides the primary time series database with a key-value data module. Stream module. It implements the stream data model\u0026rsquo;s writing. Measure module. It implements the measure data model\u0026rsquo;s writing. Metadata module. It implements resource registering and property CRUD. Query module. It handles the querying requests of stream and measure. Liaison module. It\u0026rsquo;s the gateway to other modules and provides access endpoints to clients.   gRPC based APIs Document  API reference Installation instrument Basic concepts   Testing  UT E2E with Java Client and OAP    ","excerpt":"SkyWalking BanyanDB 0.1.0 is released. Go to downloads page to find release tars.\nFeatures  BanyanD …","ref":"/events/release-apache-skywalking-banyandb-0-1-0/","title":"Release Apache SkyWalking BanyanDB 0.1.0"},{"body":"SkyWalking BanyanDB 0.1.0 is released. Go to downloads page to find release tars.\nFeatures  Support Measure, Stream and Property Query and Write APIs Support Metadata Management APIs for Measure, Stream, IndexRule and IndexRuleBinding  Chores  Set up GitHub actions to check code styles, licenses, and tests.  ","excerpt":"SkyWalking BanyanDB 0.1.0 is released. Go to downloads page to find release tars.\nFeatures  Support …","ref":"/events/release-apache-skywalking-banyandb-java-client-0-1-0/","title":"Release Apache SkyWalking BanyanDB Java Client 0.1.0"},{"body":"SkyWalking Rover 0.1.0 is released. Go to downloads page to find release tars.\nFeatures  Support detect processes in scanner or kubernetes mode. Support profiling C, C++, Golang, and Rust service.  Bug Fixes Issues and PR  All issues are here All and pull requests are here  ","excerpt":"SkyWalking Rover 0.1.0 is released. Go to downloads page to find release tars.\nFeatures  Support …","ref":"/events/release-apache-skwaylking-rover-0-1-0/","title":"Release Apache SkyWalking Rover 0.1.0"},{"body":"SkyWalking Satellite 1.0.0 is released. Go to downloads page to find release tars.\nFeatures  Add the compat protocol receiver for the old version of agents. Support transmit the native eBPF Process and Profiling protocol. Change the name of plugin that is not well-named.  Bug Fixes  Fix Metadata lost in the Native Meter protocol.  Issues and PR  All issues are here All and pull requests are here  ","excerpt":"SkyWalking Satellite 1.0.0 is released. Go to downloads page to find release tars.\nFeatures  Add the …","ref":"/events/release-apache-skwaylking-satellite-1-0-0/","title":"Release Apache SkyWalking Satellite 1.0.0"},{"body":"SkyWalking Eyes 0.3.0 is released. Go to downloads page to find release tars.\n  Dependency License\n Fix license check in go library testify (#93)    License Header\n fix command supports more languages:  Add comment style for cmake language (#86) Add comment style for hcl (#89) Add mpl-2.0 header template (#87) Support fix license header for tcl files (#102) Add python docstring comment style (#100) Add comment style for makefile \u0026amp; editorconfig (#90)   Support config license header comment style (#97) Trim leading and trailing newlines before rewrite license header cotent (#94) Replace already existing license header based on pattern (#98) [docs] add the usage for config the license header comment style (#99)    Project\n Obtain default github token in github actions (#82) Add tests for bare spdx license header content (#92) Add github action step summary for better experience (#104) Adds an option to the action to run in fix mode (#84) Provide --summary flag to generate the license summary file (#103) Add .exe suffix to windows binary (#101) Fix wrong file path and exclude binary files in src release (#81) Use t.tempdir to create temporary test directory (#95) Config: fix incorrect log message (#91) [docs] correct spelling mistakes (#96)    ","excerpt":"SkyWalking Eyes 0.3.0 is released. Go to downloads page to find release tars.\n  Dependency License …","ref":"/events/release-apache-skywalking-eyes-0-3-0/","title":"Release Apache SkyWalking Eyes 0.3.0"},{"body":"","excerpt":"","ref":"/zh_tags/apache-shenyu-incubating/","title":"Apache ShenYu (incubating)"},{"body":"","excerpt":"","ref":"/tags/apache-shenyu-incubating/","title":"Apache ShenYu (incubating)"},{"body":"目录  SkyWalking和ShenYu介绍 ApacheShenYu插件实现原理 给gRPC插件增加泛化调用追踪并保持兼容 ShenYu网关可观测性实践 总结  1.SkyWalking和ShenYu介绍 1.1 SkyWalking SkyWalking是一个针对微服务、分布式系统、云原生的应用性能监控(APM)和可观测性分析平台(OAP), 拥有强大的功能,提供了多维度应用性能分析手段,包含分布式拓扑图、应用性能指标、分布式链路追踪、日志关联分析和告警。同时还拥有非常丰富的生态。广泛应用于各个公司和开源项目。\n1.2 Apache ShenYu (incubating) Apache ShenYu (incubating)是一个高性能,多协议,易扩展,响应式的API网关。 兼容各种主流框架体系,支持热插拔,用户可以定制化开发,满足用户各种场景的现状和未来需求,经历过大规模场景的锤炼。 支持丰富的协议:Http、Spring Cloud、gRPC、Dubbo、SOFARPC、Motan、Tars等等。\n2.ApacheShenYu插件实现原理 ShenYu的异步和以往接触的异步有一点不一样,是一种全链路异步,每一个插件的执行都是异步的,并且线程切换并不是单一固定的情况(和各个插件实现有关)。 网关会发起各种协议类型的服务调用,现有的SkyWalking插件发起服务调用的时候会创建ExitSpan(同步或异步). 网关接收到请求会创建异步的EntrySpan。 异步的EntrySpan需要和同步或异步的ExitSpan串联起来,否则链路会断。 串联方案有2种:\n 快照传递: 将创建EntrySpan之后的快照通过某种方式传递到创建ExitSpan的线程中。\n目前这种方式应用在异步的WebClient插件中,该插件能接收异步快照。ShenYu代理Http服务或SpringCloud服务便是通过快照传递实现span串联。 LocalSpan中转: 其它RPC类插件不像异步WebClient那样可以接收快照实现串联。尽管你可以改动其它RPC插件让其接收快照实现串联,但不推荐也没必要, 因为可以通过在创建ExitSpan的线程中,创建一个LocalSpan就可以实现和ExitSpan串联,然后将异步的EntrySpan和LocalSpan通过快照传递的方式串联。这样实现完全可以不改动原先插件的代码。  span连接如下图所示:\n也许你会问是否可以在一个通用的插件里面创建LocalSpan,而不是ShenYu RPC插件分别创建一个? 答案是不行,因为需要保证LocalSpan和ExitSpan在同一个线程,而ShenYu是全链路异步. 在实现上创建LocalSpan的代码是复用的。\n3. 给gRPC插件增加泛化调用追踪并保持兼容 现有的SkyWalking gRPC插件只支持通过存根的方式发起的调用。而对于网关而言并没有proto文件,网关采取的是泛化调用(不通过存根),所以追踪rpc请求,你会发现链路会在网关节点断掉。 在这种情况下,需要让gRPC插件支持泛化调用,而同时需要保持兼容,不影响原先的追踪方式。实现上通过判断请求参数是否是动态消息(DynamicMessage),如果不是则走原先通过存根的追踪逻辑, 如果是则走泛化调用追踪逻辑。另外的兼容则是在gRPC新旧版本的差异,以及获取服务端IP各种情况的兼容,感兴趣的可以看看源码。\n4. ShenYu网关可观测性实践 上面讲解了SkyWalking ShenYu插件的实现原理,下面部署应用看下效果。SkyWalking功能强大,除了了链路追踪需要开发插件外,其它功能强大功能开箱即用。 这里只描述链路追踪和应用性能剖析部分,如果想体验SkyWalking功能的强大,请参考SkyWalking官方文档。\n版本说明:\n skywalking-java: 8.11.0-SNAPSHOT源码构建。说明:shenyu插件会在8.11.0版本发布,可能会在5月或6月初步发布它。Java代理正处于常规发布阶段。 skywalking: 9.0.0 V9 版本  用法说明:\nSkyWalking的设计非常易用,配置和激活插件请参考官方文档。\n SkyWalking Documentation SkyWalking Java Agent Documentation  4.1 向网关发起请求 通过postman客户端或者其它方式向网关发起各种服务请求\n4.2 请求拓扑图  4.3 请求链路(以gRPC为例) 正常链路: 异常链路: 点击链路节点变可以看到对应的节点信息和异常信息\n服务提供者span 网关请求span 4.4 服务指标监控 服务指标监控 4.5 网关后台指标监控 数据库监控: 线程池和连接池监控 4.6 JVM监控 4.7 接口分析 4.8 异常日志和异常链路分析 日志配置见官方文档\n日志监控 异常日志对应的分布式链路追踪详情 5. 总结 SkyWalking在可观测性方面对指标、链路追踪、日志有着非常全面的支持,功能强大,简单易用,专为大型分布式系统、微服务、云原生、容器架构而设计,拥有丰富的生态。 使用SkyWalking为Apache ShenYu (incubating)提供强大的可观测性支持,让ShenYu如虎添翼。最后,如果你对高性能响应式网关感兴趣,可以关注 Apache ShenYu (incubating) 。 同时感谢SkyWalking这么优秀的开源软件对行业所作的贡献。\n","excerpt":"目录  SkyWalking和ShenYu介绍 ApacheShenYu插件实现原理 给gRPC插件增加泛化调用追踪并保持兼容 ShenYu网关可观测性实践 总结  1.SkyWalking …","ref":"/zh/2022-05-08-apache-shenyuincubating-integrated-skywalking-practice-observability/","title":"Apache ShenYu (incubating)插件实现原理和可观测性实践"},{"body":"Content  Introduction of SkyWalking and ShenYu Apache ShenYu plugin implementation principle Adding generalized call tracking to the gRPC plugin and keeping it compatible ShenYu Gateway Observability Practice Summary  1. Introduction of SkyWalking and ShenYu 1.1 SkyWalking SkyWalking is an Application Performance Monitoring (APM) and Observability Analysis Platform (OAP) for microservices, distributed systems, and cloud natives, Has powerful features that provide a multi-dimensional means of application performance analysis, including distributed topology diagrams, application performance metrics, distributed link tracing, log correlation analysis and alerts. Also has a very rich ecology. Widely used in various companies and open source projects.\n1.2 Apache ShenYu (incubating) Apache ShenYu (incubating) High-performance,multi-protocol,extensible,responsive API Gateway. Compatible with a variety of mainstream framework systems, support hot plug, users can customize the development, meet the current situation and future needs of users in a variety of scenarios, experienced the temper of large-scale scenes. Rich protocol support: Http, Spring Cloud, gRPC, Dubbo, SOFARPC, Motan, Tars, etc.\n2. Apache ShenYu plugin implementation principle ShenYu\u0026rsquo;s asynchrony is a little different from previous exposure to asynchrony, it is a full-link asynchrony, the execution of each plug-in is asynchronous, and thread switching is not a single fixed situation (and the individual plug-in implementation is related). The gateway initiates service calls of various protocol types, and the existing SkyWalking plugins create ExitSpan (synchronous or asynchronous) when they initiate service calls. The gateway receives the request and creates an asynchronous EntrySpan. The asynchronous EntrySpan needs to be concatenated with the synchronous or asynchronous ExitSpan, otherwise the link will be broken.\nThere are 2 types of tandem solutions:\n Snapshot Delivery:\nPass the snapshot after creating the EntrySpan to the thread that created the ExitSpan in some way.\nCurrently this approach is used in the asynchronous WebClient plugin, which can receive asynchronous snapshots. shenYu proxy Http service or SpringCloud service is to achieve span concatenation through snapshot passing. LocalSpan transit:\nOther RPC class plugins do not receive snapshots for concatenation like Asynchronous WebClient. Although you can modify other RPC plugins to receive snapshots for concatenation, it is not recommended or necessary to do so. This can be achieved by creating a LocalSpan in the thread where the ExitSpan is created, and then connecting the asynchronous EntrySpan and LocalSpan by snapshot passing. This can be done without changing the original plugin code.  The span connection is shown below:\nYou may ask if it is possible to create LocalSpan inside a generic plugin, instead of creating one separately for ShenYu RPC plugin? The answer is no, because you need to ensure that LocalSpan and ExitSpan are in the same thread, and ShenYu is fully linked asynchronously. The code to create LocalSpan is reused in the implementation.\n3. Adding generalized call tracking to the gRPC plugin and keeping it compatible The existing SkyWalking gRPC plugin only supports calls initiated by way of stubs. For the gateway there is no proto file, the gateway takes generalized calls (not through stubs), so tracing RPC requests, you will find that the link will break at the gateway node. In this case, it is necessary to make the gRPC plugin support generalized calls, while at the same time needing to remain compatible and not affect the original tracing method. This is achieved by determining whether the request parameter is a DynamicMessage, and if it is not, then the original tracing logic through the stub is used. If not, then the original tracing logic via stubs is used, and if not, then the generalized call tracing logic is used. The other compatibility is the difference between the old and new versions of gRPC, as well as the compatibility of various cases of obtaining server-side IP, for those interested in the source code.\n4. ShenYu Gateway Observability Practice The above explains the principle of SkyWalking ShenYu plug-in implementation, the following deployment application to see the effect. SkyWalking powerful, in addition to the link tracking requires the development of plug-ins, other powerful features out of the box. Here only describe the link tracking and application performance analysis part, if you want to experience the power of SkyWalking features, please refer to the SkyWalking official documentation.\nVersion description:\n skywalking-java: 8.11.0-SNAPSHOT source code build. Note: The shenyu plugin will be released in version 8.11.0, and will probably release it initially in May or June. the Java agent is in the regular release phase. skywalking: 9.0.0 V9 version  Usage instructions:\nSkyWalking is designed to be very easy to use. Please refer to the official documentation for configuring and activating the shenyu plugin.\n SkyWalking Documentation SkyWalking Java Agent Documentation  4.1 Sending requests to the gateway Initiate various service requests to the gateway via the postman client or other means.\n4.2 Request Topology Diagram   4.3 Request Trace (in the case of gRPC) Normal Trace: Abnormal Trace: Click on the link node to see the corresponding node information and exception information\nService Provider Span Gateway request span 4.4 Service Metrics Monitoring 4.5 Gateway background metrics monitoring Database Monitoring: Thread pool and connection pool monitoring: 4.6 JVM Monitoring 4.7 Endpoint Analysis 4.8 Exception log and exception link analysis See official documentation for log configuration\nLog monitoring Distributed link trace details corresponding to exception logs 5. Summary SkyWalking has very comprehensive support for metrics, link tracing, and logging in observability, and is powerful, easy to use, and designed for large distributed systems, microservices, cloud-native, container architectures, and has a rich ecosystem. Using SkyWalking to provide powerful observability support for Apache ShenYu (incubating) gives ShenYu a boost. Finally, if you are interested in high-performance responsive gateways, you can follow Apache ShenYu (incubating). Also, thanks to SkyWalking such an excellent open source software to the industry contributions.\n","excerpt":"Content  Introduction of SkyWalking and ShenYu Apache ShenYu plugin implementation principle Adding …","ref":"/blog/2022-05-08-apache-shenyuincubating-integrated-skywalking-practice-observability/","title":"Apache ShenYu(incubating) plugin implementation principles and observability practices"},{"body":"","excerpt":"","ref":"/zh_tags/logging/","title":"Logging"},{"body":"","excerpt":"","ref":"/zh_tags/metrics/","title":"Metrics"},{"body":"","excerpt":"","ref":"/zh_tags/observability/","title":"Observability"},{"body":"","excerpt":"","ref":"/zh_tags/skywalking/","title":"SkyWalking"},{"body":"","excerpt":"","ref":"/zh_tags/tracing/","title":"Tracing"},{"body":"SkyWalking Kubernetes Event Exporter 1.0.0 is released. Go to downloads page to find release tars.\n Add Apache SkyWalking exporter to export events into SkyWalking OAP. Add console exporter for debugging purpose.  ","excerpt":"SkyWalking Kubernetes Event Exporter 1.0.0 is released. Go to downloads page to find release tars. …","ref":"/events/release-apache-skywalking-kubernetes-event-exporter-1.0.0/","title":"Release Apache SkyWalking Kubernetes Event Exporter 1.0.0"},{"body":"content:  Introduction Features Install SWCK Deploy a demo application Verify the injector Concluding remarks  1. Introduction 1.1 What\u0026rsquo;s SWCK? SWCK is a platform for the SkyWalking user, provisions, upgrades, maintains SkyWalking relevant components, and makes them work natively on Kubernetes.\nIn fact, SWCK is an operator developed based on kubebuilder, providing users with Custom Resources ( CR ) and controllers for managing resources ( Controller ), all CustomResourceDefinitions(CRDs)are as follows:\n JavaAgent OAP UI Storage Satellite Fetcher  1.2 What\u0026rsquo;s the java agent injector? For a java application, users need to inject the java agent into the application to get metadata and send it to the SkyWalking backend. To make users use the java agent more natively, we propose the java agent injector to inject the java agent sidecar into a pod. The java agent injector is actually a Kubernetes Mutation Webhook Controller. The controller intercepts pod events and applies mutations to the pod if annotations exist within the request.\n2. Features   Transparent. User’s applications generally run in normal containers while the java agent runs in the init container, and both belong to the same pod. Each container in the pod mounts a shared memory volume that provides a storage path for the java agent. When the pod starts, the java agent in the init container will run before the application container, and the injector will store the java agent file in the shared memory volume. When the application container starts, the injector injects the agent file into the application by setting the JVM parameter. Users can inject the java agent in this way without rebuilding the container image containing the java agent.\n  Configurability. The injector provides two ways to configure the java agent: global configuration and custom configuration. The default global configuration is stored in the configmap, you can update it as your own global configuration, such as backend_service. In addition, you can also set custom configuration for some applications via annotation, such as “service_name”. For more information, please see java-agent-injector.\n  Observability. For each injected java agent, we provide CustomDefinitionResources called JavaAgent to observe the final agent configuration. Please refer to javaagent to get more details.\n  3. Install SWCK In the next steps, we will show how to build a stand-alone Kubernetes cluster and deploy the 0.6.1 version of SWCK on the platform.\n3.1 Tool Preparation Firstly, you need to install some tools as follows:\n kind, which is used to create a stand-alone Kubernetes cluster. kubectl, which is used to communicate with the Kubernetes cluster.  3.2 Install stand-alone Kubernetes cluster After installing kind , you could use the following command to create a stand-alone Kubernetes cluster.\n Notice! If your terminal is configured with a proxy, you need to close it before the cluster is created to avoid some errors.\n $ kind create cluster --image=kindest/node:v1.19.1 After creating a cluster, you can get the pods as below.\n$ kubectl get pod -A NAMESPACE NAME READY STATUS RESTARTS AGE kube-system coredns-f9fd979d6-57xpc 1/1 Running 0 7m16s kube-system coredns-f9fd979d6-8zj8h 1/1 Running 0 7m16s kube-system etcd-kind-control-plane 1/1 Running 0 7m23s kube-system kindnet-gc9gt 1/1 Running 0 7m16s kube-system kube-apiserver-kind-control-plane 1/1 Running 0 7m23s kube-system kube-controller-manager-kind-control-plane 1/1 Running 0 7m23s kube-system kube-proxy-6zbtb 1/1 Running 0 7m16s kube-system kube-scheduler-kind-control-plane 1/1 Running 0 7m23s local-path-storage local-path-provisioner-78776bfc44-jwwcs 1/1 Running 0 7m16s 3.3 Install certificates manger(cert-manger) The certificates of SWCK are distributed and verified by the certificate manager. You need to install the cert-manager through the following command.\n$ kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.3.1/cert-manager.yaml Verify whether cert-manager is installed successfully.\n$ kubectl get pod -n cert-manager NAME READY STATUS RESTARTS AGE cert-manager-7dd5854bb4-slcmd 1/1 Running 0 73s cert-manager-cainjector-64c949654c-tfmt2 1/1 Running 0 73s cert-manager-webhook-6bdffc7c9d-h8cfv 1/1 Running 0 73s 3.4 Install SWCK The java agent injector is a component of the operator, so please follow the next steps to install the operator first.\n Get the deployment yaml file of SWCK and deploy it.  $ curl -Ls https://archive.apache.org/dist/skywalking/swck/0.6.1/skywalking-swck-0.6.1-bin.tgz | tar -zxf - -O ./config/operator-bundle.yaml | kubectl apply -f - Check SWCK as below.  $ kubectl get pod -n skywalking-swck-system NAME READY STATUS RESTARTS AGE skywalking-swck-controller-manager-7f64f996fc-qh8s9 2/2 Running 0 94s 3.5 Install Skywalking components — OAPServer and UI  Deploy the OAPServer and UI in the default namespace.  $ kubectl apply -f https://raw.githubusercontent.com/apache/skywalking-swck/master/operator/config/samples/default.yaml Check the OAPServer.  $ kubectl get oapserver NAME INSTANCES RUNNING ADDRESS default 1 1 default-oap.default Check the UI.  $ kubectl get ui NAME INSTANCES RUNNING INTERNALADDRESS EXTERNALIPS PORTS default 1 1 default-ui.default [80] 4. Deploy a demo application In the third step, we have installed SWCK and related Skywalking components. Next, we will show how to use the java agent injector in SWCK through two java application examples in two ways: global configuration and custom configuration.\n4.1 Set the global configuration When we have installed SWCK, the default configuration is the configmap in the system namespace, we can get it as follows.\n$ kubectl get configmap skywalking-swck-java-agent-configmap -n skywalking-swck-system -oyaml apiVersion: v1 data: agent.config: |- # The service name in UI agent.service_name=${SW_AGENT_NAME:Your_ApplicationName} # Backend service addresses. collector.backend_service=${SW_AGENT_COLLECTOR_BACKEND_SERVICES:127.0.0.1:11800} # Please refer to https://skywalking.apache.org/docs/skywalking-java/latest/en/setup/service-agent/java-agent/configurations/#table-of-agent-configuration-properties to get more details. In the cluster created by kind, the backend_service may not be correct, we need to use the real OAPServer\u0026rsquo;s address default-oap.default to replace the default 127.0.0.1, so we can edit the configmap as follow.\n$ kubectl edit configmap skywalking-swck-java-agent-configmap -n skywalking-swck-system configmap/skywalking-swck-java-agent-configmap edited $ kubectl get configmap skywalking-swck-java-agent-configmap -n skywalking-swck-system -oyaml apiVersion: v1 data: agent.config: |- # The service name in UI agent.service_name=${SW_AGENT_NAME:Your_ApplicationName} # Backend service addresses. collector.backend_service=${SW_AGENT_COLLECTOR_BACKEND_SERVICES:default-oap.default:11800} # Please refer to https://skywalking.apache.org/docs/skywalking-java/latest/en/setup/service-agent/java-agent/configurations/#table-of-agent-configuration-properties to get more details. 4.2 Set the custom configuration In some cases, we need to use the Skywalking component to monitor different java applications, so the agent configuration of different applications may be different, such as the name of the application, and the plugins that the application needs to use, etc. Next, we will take two simple java applications developed based on spring boot and spring cloud gateway as examples for a detailed description. You can use the source code to build the image.\n# build the springboot and springcloudgateway image  $ git clone https://github.com/dashanji/swck-spring-cloud-k8s-demo $ cd swck-spring-cloud-k8s-demo \u0026amp;\u0026amp; make # check the image $ docker images REPOSITORY TAG IMAGE ID CREATED SIZE gateway v0.0.1 51d16251c1d5 48 minutes ago 723MB app v0.0.1 62f4dbcde2ed 48 minutes ago 561MB # load the image into the cluster $ kind load docker-image app:v0.0.1 \u0026amp;\u0026amp; kind load docker-image gateway:v0.0.1 4.3 deploy spring boot application  Create the springboot-system namespace.  $ kubectl create namespace springboot-system Label the springboot-systemnamespace to enable the java agent injector.  $ kubectl label namespace springboot-system swck-injection=enabled Deploy the corresponding deployment file springboot.yaml for the spring boot application, which uses annotation to override the default agent configuration, such as service_name.   Notice! Before using the annotation to override the agent configuration, you need to add strategy.skywalking.apache.org/agent.Overlay: \u0026quot;true\u0026quot; to make the override take effect.\n apiVersion:apps/v1kind:Deploymentmetadata:name:demo-springbootnamespace:springboot-systemspec:selector:matchLabels:app:demo-springboottemplate:metadata:labels:swck-java-agent-injected:\u0026#34;true\u0026#34;# enable the java agent injectorapp:demo-springbootannotations:strategy.skywalking.apache.org/agent.Overlay:\u0026#34;true\u0026#34;# enable the agent overlayagent.skywalking.apache.org/agent.service_name:\u0026#34;backend-service\u0026#34;spec:containers:- name:springbootimagePullPolicy:IfNotPresentimage:app:v0.0.1command:[\u0026#34;java\u0026#34;]args:[\u0026#34;-jar\u0026#34;,\u0026#34;/app.jar\u0026#34;]---apiVersion:v1kind:Servicemetadata:name:demonamespace:springboot-systemspec:type:ClusterIPports:- name:8085-tcpport:8085protocol:TCPtargetPort:8085selector:app:demo-springbootDeploy a spring boot application in the springboot-system namespace.  $ kubectl apply -f springboot.yaml Check for deployment.  $ kubectl get pod -n springboot-system NAME READY STATUS RESTARTS AGE demo-springboot-7c89f79885-dvk8m 1/1 Running 0 11s Get the finnal injected java agent configuration through JavaAgent.  $ kubectl get javaagent -n springboot-system NAME PODSELECTOR SERVICENAME BACKENDSERVICE app-demo-springboot-javaagent app=demo-springboot backend-service default-oap.default:11800 4.4 deploy spring cloud gateway application  Create the gateway-system namespace.  $ kubectl create namespace gateway-system Label the gateway-systemnamespace to enable the java agent injector.  $ kubectl label namespace gateway-system swck-injection=enabled Deploy the corresponding deployment file springgateway.yaml for the spring cloud gateway application, which uses annotation to override the default agent configuration, such as service_name. In addition, when using spring cloud gateway, we need to add the spring cloud gateway plugin to the agent configuration.   Notice! Before using the annotation to override the agent configuration, you need to add strategy.skywalking.apache.org/agent.Overlay: \u0026quot;true\u0026quot; to make the override take effect.\n apiVersion:apps/v1kind:Deploymentmetadata:labels:app:demo-gatewayname:demo-gatewaynamespace:gateway-systemspec:selector:matchLabels:app:demo-gatewaytemplate:metadata:labels:swck-java-agent-injected:\u0026#34;true\u0026#34;app:demo-gatewayannotations:strategy.skywalking.apache.org/agent.Overlay:\u0026#34;true\u0026#34;agent.skywalking.apache.org/agent.service_name:\u0026#34;gateway-service\u0026#34;optional.skywalking.apache.org:\u0026#34;cloud-gateway-3.x\u0026#34;# add spring cloud gateway pluginspec:containers:- image:gateway:v0.0.1name:gatewaycommand:[\u0026#34;java\u0026#34;]args:[\u0026#34;-jar\u0026#34;,\u0026#34;/gateway.jar\u0026#34;]---apiVersion:v1kind:Servicemetadata:name:service-gatewaynamespace:gateway-systemspec:type:ClusterIPports:- name:9999-tcpport:9999protocol:TCPtargetPort:9999selector:app:demo-gatewayDeploy a spring cloud gateway application in the gateway-system namespace.  $ kubectl apply -f springgateway.yaml Check for deployment.  $ kubectl get pod -n gateway-system NAME READY STATUS RESTARTS AGE demo-gateway-5bb77f6d85-9j7c6 1/1 Running 0 15s Get the finnal injected java agent configuration through JavaAgent.  $ kubectl get javaagent -n gateway-system NAME PODSELECTOR SERVICENAME BACKENDSERVICE app-demo-gateway-javaagent app=demo-gateway gateway-service default-oap.default:11800 5. Verify the injector  After completing the above steps, we can view detailed state of the injected pod, like the injected agent container.  # get all injected pod $ kubectl get pod -A -lswck-java-agent-injected=true NAMESPACE NAME READY STATUS RESTARTS AGE gateway-system demo-gateway-5bb77f6d85-lt4z7 1/1 Running 0 69s springboot-system demo-springboot-7c89f79885-lkb5j 1/1 Running 0 75s # view detailed state of the injected pod [demo-springboot] $ kubectl describe pod -l app=demo-springboot -n springboot-system ... Events: Type Reason Age From Message ---- ------ ---- ---- ------- ... Normal Created 91s kubelet,kind-control-plane Created container inject-skywalking-agent Normal Started 91s kubelet,kind-control-plane Started container inject-skywalking-agent ... Normal Created 90s kubelet,kind-control-plane Created container springboot Normal Started 90s kubelet,kind-control-plane Started container springboot # view detailed state of the injected pod [demo-gateway]  $ kubectl describe pod -l app=demo-gateway -n gateway-system ... Events: Type Reason Age From Message ---- ------ ---- ---- ------- ... Normal Created 2m20s kubelet,kind-control-plane Created container inject-skywalking-agent Normal Started 2m20s kubelet,kind-control-plane Started container inject-skywalking-agent ... Normal Created 2m20s kubelet,kind-control-plane Created container gateway Normal Started 2m20s kubelet,kind-control-plane Started container gateway Now we can expose the service and watch the data displayed on the web. First of all, we need to get the gateway service and the ui service as follows.  $ kubectl get service service-gateway -n gateway-system NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE service-gateway ClusterIP 10.99.181.145 \u0026lt;none\u0026gt; 9999/TCP 9m19s $ kubectl get service default-ui NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE default-ui ClusterIP 10.111.39.250 \u0026lt;none\u0026gt; 80/TCP 82m Then open two terminals to expose the service: service-gateway、default-ui.  $ kubectl port-forward service/service-gateway -n gateway-system 9999:9999 Forwarding from 127.0.0.1:9999 -\u0026gt; 9999 Forwarding from [::1]:9999 -\u0026gt; 9999 $ kubectl port-forward service/default-ui 8090:80 Forwarding from 127.0.0.1:8090 -\u0026gt; 8080 Forwarding from [::1]:8090 -\u0026gt; 8080 Use the following commands to access the spring boot demo 10 times through the spring cloud gateway service.  $ for i in {1..10}; do curl http://127.0.0.1:9999/gateway/hello \u0026amp;\u0026amp; echo \u0026#34;\u0026#34;; done Hello World! Hello World! Hello World! Hello World! Hello World! Hello World! Hello World! Hello World! Hello World! Hello World! We can see the Dashboard by accessing http://127.0.0.1:8090.  All services' topology is shown below.  We can see the trace information of gateway-service.  We can see the trace information of backend-service.  6. Concluding remarks If your application is deployed in the Kubernetes platform and requires Skywalking to provide monitoring services, SWCK can help you deploy, upgrade and maintain the Skywalking components in the Kubernetes cluster. In addition to this blog, you can also view swck document and Java agent injector documentation for more information. If you find this project useful, please give SWCK a star! If you have any questions, welcome to ask in Issues or Discussions.\n","excerpt":"content:  Introduction Features Install SWCK Deploy a demo application Verify the injector …","ref":"/blog/2022-04-19-how-to-use-the-java-agent-injector/","title":"How to use the java agent injector?"},{"body":"","excerpt":"","ref":"/zh_tags/user-manual/","title":"User Manual"},{"body":"","excerpt":"","ref":"/tags/user-manual/","title":"User Manual"},{"body":"目录  介绍 主要特点 安装SWCK 部署demo应用 验证注入器 结束语  1. 介绍 1.1 SWCK 是什么? SWCK是部署在 Kubernetes 环境中,为 Skywalking 用户提供服务的平台,用户可以基于该平台使用、升级和维护 SkyWalking 相关组件。\n实际上,SWCK 是基于 kubebuilder 开发的Operator,为用户提供自定义资源( CR )以及管理资源的控制器( Controller ),所有的自定义资源定义(CRD)如下所示:\n JavaAgent OAP UI Storage Satellite Fetcher  1.2 java 探针注入器是什么? 对于 java 应用来说,用户需要将 java 探针注入到应用程序中获取元数据并发送到 Skywalking 后端。为了让用户在 Kubernetes 平台上更原生地使用 java 探针,我们提供了 java 探针注入器,该注入器能够将 java 探针通过 sidecar 方式注入到应用程序所在的 pod 中。 java 探针注入器实际上是一个Kubernetes Mutation Webhook控制器,如果请求中存在 annotations ,控制器会拦截 pod 事件并将其应用于 pod 上。\n2. 主要特点  透明性。用户应用一般运行在普通容器中而 java 探针则运行在初始化容器中,且两者都属于同一个 pod 。该 pod 中的每个容器都会挂载一个共享内存卷,为 java 探针提供存储路径。在 pod 启动时,初始化容器中的 java 探针会先于应用容器运行,由注入器将其中的探针文件存放在共享内存卷中。在应用容器启动时,注入器通过设置 JVM 参数将探针文件注入到应用程序中。用户可以通过这种方式实现 java 探针的注入,而无需重新构建包含 java 探针的容器镜像。 可配置性。注入器提供两种方式配置 java 探针:全局配置和自定义配置。默认的全局配置存放在 configmap 中,用户可以根据需求修改全局配置,比如修改 backend_service 的地址。此外,用户也能通过 annotation 为特定应用设置自定义的一些配置,比如不同服务的 service_name 名称。详情可见 java探针说明书。 可观察性。每个 java 探针在被注入时,用户可以查看名为 JavaAgent 的 CRD 资源,用于观测注入后的 java 探针配置。详情可见 JavaAgent说明。  3. 安装SWCK 在接下来的几个步骤中,我们将演示如何从0开始搭建单机版的 Kubernetes 集群,并在该平台部署0.6.1版本的 SWCK。\n3.1 工具准备 首先,你需要安装一些必要的工具,如下所示:\n kind,用于创建单机版 Kubernetes集群。 kubectl,用于和Kubernetes 集群交互。  3.2 搭建单机版 Kubernetes集群 在安装完 kind 工具后,可通过如下命令创建一个单机集群。\n 注意!如果你的终端配置了代理,在运行以下命令之前最好先关闭代理,防止一些意外错误的发生。\n $ kind create cluster --image=kindest/node:v1.19.1 在集群创建完毕后,可获得如下的pod信息。\n$ kubectl get pod -A NAMESPACE NAME READY STATUS RESTARTS AGE kube-system coredns-f9fd979d6-57xpc 1/1 Running 0 7m16s kube-system coredns-f9fd979d6-8zj8h 1/1 Running 0 7m16s kube-system etcd-kind-control-plane 1/1 Running 0 7m23s kube-system kindnet-gc9gt 1/1 Running 0 7m16s kube-system kube-apiserver-kind-control-plane 1/1 Running 0 7m23s kube-system kube-controller-manager-kind-control-plane 1/1 Running 0 7m23s kube-system kube-proxy-6zbtb 1/1 Running 0 7m16s kube-system kube-scheduler-kind-control-plane 1/1 Running 0 7m23s local-path-storage local-path-provisioner-78776bfc44-jwwcs 1/1 Running 0 7m16s 3.3 安装证书管理器(cert-manger) SWCK 的证书都是由证书管理器分发和验证,需要先通过如下命令安装证书管理器cert-manger。\n$ kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.3.1/cert-manager.yaml 验证 cert-manger 是否安装成功。\n$ kubectl get pod -n cert-manager NAME READY STATUS RESTARTS AGE cert-manager-7dd5854bb4-slcmd 1/1 Running 0 73s cert-manager-cainjector-64c949654c-tfmt2 1/1 Running 0 73s cert-manager-webhook-6bdffc7c9d-h8cfv 1/1 Running 0 73s 3.4 安装SWCK java 探针注入器是 SWCK 中的一个组件,首先需要按照如下步骤安装 SWCK:\n 输入如下命令获取 SWCK 的 yaml 文件并部署在 Kubernetes 集群中。  $ curl -Ls https://archive.apache.org/dist/skywalking/swck/0.6.1/skywalking-swck-0.6.1-bin.tgz | tar -zxf - -O ./config/operator-bundle.yaml | kubectl apply -f - 检查 SWCK 是否正常运行。  $ kubectl get pod -n skywalking-swck-system NAME READY STATUS RESTARTS AGE skywalking-swck-controller-manager-7f64f996fc-qh8s9 2/2 Running 0 94s 3.5 安装 Skywalking 组件 — OAPServer 和 UI  在 default 命名空间中部署 OAPServer 组件和 UI 组件。  $ kubectl apply -f https://raw.githubusercontent.com/apache/skywalking-swck/master/operator/config/samples/default.yaml 查看 OAPServer 组件部署情况。  $ kubectl get oapserver NAME INSTANCES RUNNING ADDRESS default 1 1 default-oap.default 查看 UI 组件部署情况。  $ kubectl get ui NAME INSTANCES RUNNING INTERNALADDRESS EXTERNALIPS PORTS default 1 1 default-ui.default [80] 4. 部署demo应用 在第3个步骤中,我们已经安装好 SWCK 以及相关的 Skywalking 组件,接下来按照全局配置以及自定义配置两种方式,通过两个 java 应用实例,分别演示如何使用 SWCK 中的 java 探针注入器。\n4.1 设置全局配置 当 SWCK 安装完成后,默认的全局配置就会以 configmap 的形式存储在系统命令空间中,可通过如下命令查看。\n$ kubectl get configmap skywalking-swck-java-agent-configmap -n skywalking-swck-system -oyaml apiVersion: v1 data: agent.config: |- # The service name in UI agent.service_name=${SW_AGENT_NAME:Your_ApplicationName} # Backend service addresses. collector.backend_service=${SW_AGENT_COLLECTOR_BACKEND_SERVICES:127.0.0.1:11800} # Please refer to https://skywalking.apache.org/docs/skywalking-java/latest/en/setup/service-agent/java-agent/configurations/#table-of-agent-configuration-properties to get more details. 在 kind 创建的 Kubernetes 集群中, SkyWalking 后端地址和 configmap 中指定的地址可能不同,我们需要使用真正的 OAPServer 组件的地址 default-oap.default 来代替默认的 127.0.0.1 ,可通过修改 configmap 实现。\n$ kubectl edit configmap skywalking-swck-java-agent-configmap -n skywalking-swck-system configmap/skywalking-swck-java-agent-configmap edited $ kubectl get configmap skywalking-swck-java-agent-configmap -n skywalking-swck-system -oyaml apiVersion: v1 data: agent.config: |- # The service name in UI agent.service_name=${SW_AGENT_NAME:Your_ApplicationName} # Backend service addresses. collector.backend_service=${SW_AGENT_COLLECTOR_BACKEND_SERVICES:default-oap.default:11800} # Please refer to https://skywalking.apache.org/docs/skywalking-java/latest/en/setup/service-agent/java-agent/configurations/#table-of-agent-configuration-properties to get more details. 4.2 设置自定义配置 在实际使用场景中,我们需要使用 Skywalking 组件监控不同的 java 应用,因此不同应用的探针配置可能有所不同,比如应用的名称、应用需要使用的插件等。为了支持自定义配置,注入器提供 annotation 来覆盖默认的全局配置。接下来我们将分别以基于 spring boot 以及 spring cloud gateway 开发的两个简单java应用为例进行详细说明,你可以使用这两个应用的源代码构建镜像。\n# build the springboot and springcloudgateway image  $ git clone https://github.com/dashanji/swck-spring-cloud-k8s-demo $ cd swck-spring-cloud-k8s-demo \u0026amp;\u0026amp; make # check the image $ docker images REPOSITORY TAG IMAGE ID CREATED SIZE gateway v0.0.1 51d16251c1d5 48 minutes ago 723MB app v0.0.1 62f4dbcde2ed 48 minutes ago 561MB # load the image into the cluster $ kind load docker-image app:v0.0.1 \u0026amp;\u0026amp; kind load docker-image gateway:v0.0.1 4.3 部署 spring boot 应用  创建 springboot-system 命名空间。  $ kubectl create namespace springboot-system 给 springboot-system 命名空间打上标签使能 java 探针注入器。  $ kubectl label namespace springboot-system swck-injection=enabled 接下来为 spring boot 应用对应的部署文件 springboot.yaml ,其中使用了 annotation 覆盖默认的探针配置,比如 service_name ,将其覆盖为 backend-service 。   需要注意的是,在使用 annotation 覆盖探针配置之前,需要增加 strategy.skywalking.apache.org/agent.Overlay: \u0026quot;true\u0026quot; 来使覆盖生效。\n apiVersion:apps/v1kind:Deploymentmetadata:name:demo-springbootnamespace:springboot-systemspec:selector:matchLabels:app:demo-springboottemplate:metadata:labels:swck-java-agent-injected:\u0026#34;true\u0026#34;# enable the java agent injectorapp:demo-springbootannotations:strategy.skywalking.apache.org/agent.Overlay:\u0026#34;true\u0026#34;# enable the agent overlayagent.skywalking.apache.org/agent.service_name:\u0026#34;backend-service\u0026#34;spec:containers:- name:springbootimagePullPolicy:IfNotPresentimage:app:v0.0.1command:[\u0026#34;java\u0026#34;]args:[\u0026#34;-jar\u0026#34;,\u0026#34;/app.jar\u0026#34;]---apiVersion:v1kind:Servicemetadata:name:demonamespace:springboot-systemspec:type:ClusterIPports:- name:8085-tcpport:8085protocol:TCPtargetPort:8085selector:app:demo-springboot在 springboot-system 命名空间中部署 spring boot 应用。  $ kubectl apply -f springboot.yaml 查看部署情况。  $ kubectl get pod -n springboot-system NAME READY STATUS RESTARTS AGE demo-springboot-7c89f79885-dvk8m 1/1 Running 0 11s 通过 JavaAgent 查看最终注入的 java 探针配置。  $ kubectl get javaagent -n springboot-system NAME PODSELECTOR SERVICENAME BACKENDSERVICE app-demo-springboot-javaagent app=demo-springboot backend-service default-oap.default:11800 4.4 部署 spring cloud gateway 应用  创建 gateway-system 命名空间。  $ kubectl create namespace gateway-system 给 gateway-system 命名空间打上标签使能 java 探针注入器。  $ kubectl label namespace gateway-system swck-injection=enabled 接下来为 spring cloud gateway 应用对应的部署文件 springgateway.yaml ,其中使用了 annotation 覆盖默认的探针配置,比如 service_name ,将其覆盖为 gateway-service 。此外,在使用 spring cloud gateway 时,我们需要在探针配置中添加 spring cloud gateway 插件。   需要注意的是,在使用 annotation 覆盖探针配置之前,需要增加 strategy.skywalking.apache.org/agent.Overlay: \u0026quot;true\u0026quot; 来使覆盖生效。\n apiVersion:apps/v1kind:Deploymentmetadata:labels:app:demo-gatewayname:demo-gatewaynamespace:gateway-systemspec:selector:matchLabels:app:demo-gatewaytemplate:metadata:labels:swck-java-agent-injected:\u0026#34;true\u0026#34;app:demo-gatewayannotations:strategy.skywalking.apache.org/agent.Overlay:\u0026#34;true\u0026#34;agent.skywalking.apache.org/agent.service_name:\u0026#34;gateway-service\u0026#34;optional.skywalking.apache.org:\u0026#34;cloud-gateway-3.x\u0026#34;# add spring cloud gateway pluginspec:containers:- image:gateway:v0.0.1name:gatewaycommand:[\u0026#34;java\u0026#34;]args:[\u0026#34;-jar\u0026#34;,\u0026#34;/gateway.jar\u0026#34;]---apiVersion:v1kind:Servicemetadata:name:service-gatewaynamespace:gateway-systemspec:type:ClusterIPports:- name:9999-tcpport:9999protocol:TCPtargetPort:9999selector:app:demo-gateway在 gateway-system 命名空间中部署 spring cloud gateway 应用。  $ kubectl apply -f springgateway.yaml 查看部署情况。  $ kubectl get pod -n gateway-system NAME READY STATUS RESTARTS AGE demo-gateway-758899c99-6872s 1/1 Running 0 15s 通过 JavaAgent 获取最终注入的java探针配置。  $ kubectl get javaagent -n gateway-system NAME PODSELECTOR SERVICENAME BACKENDSERVICE app-demo-gateway-javaagent app=demo-gateway gateway-service default-oap.default:11800 5. 验证注入器  当完成上述步骤后,我们可以查看被注入pod的详细状态,比如被注入的agent容器。  # get all injected pod $ kubectl get pod -A -lswck-java-agent-injected=true NAMESPACE NAME READY STATUS RESTARTS AGE gateway-system demo-gateway-5bb77f6d85-lt4z7 1/1 Running 0 69s springboot-system demo-springboot-7c89f79885-lkb5j 1/1 Running 0 75s # view detailed state of the injected pod [demo-springboot] $ kubectl describe pod -l app=demo-springboot -n springboot-system ... Events: Type Reason Age From Message ---- ------ ---- ---- ------- ... Normal Created 91s kubelet,kind-control-plane Created container inject-skywalking-agent Normal Started 91s kubelet,kind-control-plane Started container inject-skywalking-agent ... Normal Created 90s kubelet,kind-control-plane Created container springboot Normal Started 90s kubelet,kind-control-plane Started container springboot # view detailed state of the injected pod [demo-gateway]  $ kubectl describe pod -l app=demo-gateway -n gateway-system ... Events: Type Reason Age From Message ---- ------ ---- ---- ------- ... Normal Created 2m20s kubelet,kind-control-plane Created container inject-skywalking-agent Normal Started 2m20s kubelet,kind-control-plane Started container inject-skywalking-agent ... Normal Created 2m20s kubelet,kind-control-plane Created container gateway Normal Started 2m20s kubelet,kind-control-plane Started container gateway 现在我们可以将服务绑定在某个端口上并通过 web 浏览器查看采样数据。首先,我们需要通过以下命令获取gateway服务和ui服务的信息。  $ kubectl get service service-gateway -n gateway-system NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE service-gateway ClusterIP 10.99.181.145 \u0026lt;none\u0026gt; 9999/TCP 9m19s $ kubectl get service default-ui NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE default-ui ClusterIP 10.111.39.250 \u0026lt;none\u0026gt; 80/TCP 82m 接下来分别启动2个终端将service-gateway 以及 default-ui 绑定到本地端口上,如下所示:  $ kubectl port-forward service/service-gateway -n gateway-system 9999:9999 Forwarding from 127.0.0.1:9999 -\u0026gt; 9999 Forwarding from [::1]:9999 -\u0026gt; 9999 $ kubectl port-forward service/default-ui 8090:80 Forwarding from 127.0.0.1:8090 -\u0026gt; 8080 Forwarding from [::1]:8090 -\u0026gt; 8080 使用以下命令通过spring cloud gateway 网关服务暴露的端口来访问 spring boot 应用服务。  $ for i in {1..10}; do curl http://127.0.0.1:9999/gateway/hello \u0026amp;\u0026amp; echo \u0026#34;\u0026#34;; done Hello World! Hello World! Hello World! Hello World! Hello World! Hello World! Hello World! Hello World! Hello World! Hello World! 我们可以在 web 浏览器中输入 http://127.0.0.1:8090 来访问探针采集到的数据。  所有服务的拓扑图如下所示。  查看 gateway-service 网关服务的 trace 信息。  查看 backend-service 应用服务的 trace 信息。  6. 结束语 如果你的应用部署在 Kubernetes 平台中,且需要 Skywalking 提供监控服务, SWCK 能够帮助你部署、升级和维护 Kubernetes 集群中的 Skywalking 组件。除了本篇博客外,你还可以查看 SWCK文档 以及 java探针注入器文档 获取更多的信息。如果你觉得这个项目好用,请给 SWCK 一个star! 如果你有任何疑问,欢迎在Issues或者Discussions中提出。\n","excerpt":"目录  介绍 主要特点 安装SWCK 部署demo应用 验证注入器 结束语  1. 介绍 1.1 SWCK 是什么? SWCK是部署在 Kubernetes 环境中,为 Skywalking 用户提供 …","ref":"/zh/2022-04-19-how-to-use-the-java-agent-injector/","title":"如何使用java探针注入器?"},{"body":"Apache SkyWalking 是中国首个,也是目前唯一的个人开源的 Apache 顶级项目。\n作为一个针对分布式系统的应用性能监控 APM 和可观测性分析平台, SkyWalking 提供了媲美商业APM/监控的功能。\nCSDN云原生系列在线峰会第4期,特邀SkyWalking创始人、Apache基金会首位中国董事、Tetrate创始工程师吴晟担任出品人,推出SkyWalking峰会。\nSkyWalking峰会在解读SkyWalking v9新特性的同时,还将首发解密APM的专用数据库BanyanDB,以及分享SkyWalking在原生eBPF探针、监控虚拟机和Kubernetes、云原生函数计算可观测性等方面的应用实践。\n峰会议程:\n14:00-14:30 开场演讲:SkyWalking v9解析 吴晟 Tetrate 创始工程师、Apache 基金会首位中国董事\n14:30-15:00 首发解密:APM的专用数据库BanyanDB\n高洪涛 Tetrate 创始工程师\n15:00-15:30 SkyWalking 原生eBPF探针展示\n刘晗 Tetrate 工程师\n15:30-16:00 Apache SkyWalking MAL实践-监控虚拟机和Kubernetes\n万凯 Tetrate 工程师\n16:00-16:30 SkyWalking助力云原生函数计算可观测\n霍秉杰 青云科技 资深架构师\n峰会视频 B站视频地址\n","excerpt":"Apache SkyWalking 是中国首个,也是目前唯一的个人开源的 Apache 顶级项目。\n作为一个针对分布式系统的应用性能监控 APM 和可观测性分析平台, SkyWalking 提供了媲美 …","ref":"/zh/2022-04-18-meeting/","title":"Apache SkyWalking 2022 峰会"},{"body":"SkyWalking Java Agent 8.10.0 is released. Go to downloads page to find release tars. Changes by Version\n8.10.0  [Important] Namespace represents a subnet, such as kubernetes namespace, or 172.10... Make namespace concept as a part of service naming format. [Important] Add cluster concept, also as a part of service naming format. The cluster name would be  Add as {@link #SERVICE_NAME} suffix. Add as exit span\u0026rsquo;s peer, ${CLUSTER} / original peer Cross Process Propagation Header\u0026rsquo;s value addressUsedAtClient[index=8] (Target address of this request used on the client end).   Support Undertow thread pool metrics collecting. Support Tomcat thread pool metric collect. Remove plugin for ServiceComb Java Chassis 0.x Add Guava EventBus plugin. Fix Dubbo 3.x plugin\u0026rsquo;s tracing problem. Fix the bug that maybe generate multiple trace when invoke http request by spring webflux webclient. Support Druid Connection pool metrics collecting. Support HikariCP Connection pool metrics collecting. Support Dbcp2 Connection pool metrics collecting. Ignore the synthetic constructor created by the agent in the Spring patch plugin. Add witness class for vertx-core-3.x plugin. Add witness class for graphql plugin. Add vertx-core-4.x plugin. Renamed graphql-12.x-plugin to graphql-12.x-15.x-plugin and graphql-12.x-scenario to graphql-12.x-15.x-scenario. Add graphql-16plus plugin. [Test] Support to configure plugin test base images. [Breaking Change] Remove deprecated agent.instance_properties configuration. Recommend agent.instance_properties_json. The namespace and cluster would be reported as instance properties, keys are namespace and cluster. Notice, if instance_properties_json includes these two keys, they would be overrided by the agent core. [Breaking Change] Remove the namespace from cross process propagation key. Make sure the parent endpoint in tracing context from existing first ENTRY span, rather than first span only. Fix the bug that maybe causing memory leak and repeated traceId when use gateway-2.1.x-plugin or gateway-3.x-plugin. Fix Grpc 1.x plugin could leak context due to gRPC cancelled. Add JDK ThreadPoolExecutor Plugin. Support default database(not set through JDBC URL) in mysql-5.x plugin.  Documentation  Add link about java agent injector. Update configurations doc, remove agent.instance_properties[key]=value. Update configurations doc, add agent.cluster and update agent.namespace.  All issues and pull requests are here\n","excerpt":"SkyWalking Java Agent 8.10.0 is released. Go to downloads page to find release tars. Changes by …","ref":"/events/release-apache-skywalking-java-agent-8-10-0/","title":"Release Apache SkyWalking Java Agent 8.10.0"},{"body":"Introduction  The most profound technologies are those that disappear. They weave themselves into the fabric of everyday life until they are indistinguishable from it. - Mark Weiser\n Mark Weiser prophetically argued in the late 1980s, that the most far-reaching technologies are those which vanish into thin air. According to Weiser, \u0026ldquo;Whenever people learn something sufficiently well, they cease to be aware of it.\u0026rdquo; This disappearing act, as Weiser claimed, is not limited to technology but rather human psychology. It is this very experience that allows us to escape lower-level thinking into higher-level thinking. For once we are no longer impeded by mundane details, we are then free to focus on new goals.\nThis realization becomes more relevant as APMs become increasingly popular. As more applications are deployed with APMs, the number of abstract representations of the underlying source code also increases. While this provides great value to many non-development roles within an organization, it does pose additional challenges to those in development roles who must translate these representations into concepts they can work with (i.e. source code). Weiser sums this difficultly up rather succinctly when he states that \u0026ldquo;Programmers should no more be asked to work without access to source code than auto-mechanics should be asked to work without looking at the engine.\u0026rdquo;\nStill, APMs collect more information only to produce a plethora of new abstract representations. In this article, we will introduce a new concept in Source++, the open-source live-coding platform, specifically designed to allow developers to monitor production applications more intuitively.\nLive Views  And we really don\u0026rsquo;t understand even yet, hundreds of metrics later, what make a program easier to understand or modify or reuse or borrow. I don\u0026rsquo;t think we\u0026rsquo;ll find out by looking away from programs to their abstract interfaces. The answers are in the source code. - Mark Weiser\n As APMs move from the \u0026ldquo;nice to have\u0026rdquo; category to the \u0026ldquo;must-have\u0026rdquo; category, there is a fundamental feature holding them back from ubiquity. They must disappear from consciousness. As developers, we should feel no impulse to open our browsers to better understand the underlying source code. The answers are literally in the source code. Instead, we should improve our tools so the source code conveniently tells us what we need to know. Think of how simple life could be if failing code always indicated how and why it failed. This is the idea behind Source++.\nIn our last blog post, we discussed Extending Apache SkyWalking with non-breaking breakpoints. In that post, we introduced a concept called Live Instruments, which developers can use to easily debug live production applications without leaving their IDE. Today, we will discuss how existing SkyWalking installations can be integrated into your IDE via a new concept called Live Views. Unlike Live Instruments, which are designed for debugging live applications, Live Views are designed for increasing application comprehension and awareness. This is accomplished through a variety of commands which are input into the Live Command Palette.\nLive Command Palette The Live Command Palette (LCP) is a contextual command prompt, included in the Source++ JetBrains Plugin, that allows developers to control and query live applications from their IDE. Opened via keyboard shortcut (Ctrl+Shift+S), the LCP allows developers to easily view metrics relevant to the source code they\u0026rsquo;re currently viewing. The following Live View commands are currently supported:\nCommand: view (overview/activity/traces/logs) The view commands display contextual popups with live operational data of the current source code. These commands allow developers to view traditional SkyWalking operational data filtered down to the relevant metrics.\nCommand: watch log The watch log command allows developers to follow individual log statements of a running application in real-time. This command allows developers to negate the need for manually scrolling through the logs to find instances of a specific log statement.\nCommand: (show/hide) quick stats The show quick stats command displays live endpoint metrics for a quick idea of an endpoint\u0026rsquo;s activity. Using this command, developers can quickly assess the status of an endpoint and determine if the endpoint is performing as expected.\nFuture Work  A good tool is an invisible tool. By invisible, I mean that the tool does not intrude on your consciousness; you focus on the task, not the tool. Eyeglasses are a good tool \u0026ndash; you look at the world, not the eyeglasses. - Mark Weiser\n Source++ aims to extend SkyWalking in such a way that SkyWalking itself becomes invisible. To accomplish this, we plan to support custom developer commands. Developers will be able to build customized commands for themselves, as well as commands to share with their team. These commands will recognize context, types, and conditions allowing for a wide possibility of operations. As more commands are added, developers will be able to expose everything SkyWalking has to offer while focusing on what matters most, the source code.\nIf you find these features useful, please consider giving Source++ a try. You can install the plugin directly from your JetBrains IDE, or through the JetBrains Marketplace. If you have any issues or questions, please open an issue. Feedback is always welcome!\n","excerpt":"Introduction  The most profound technologies are those that disappear. They weave themselves into …","ref":"/blog/2022-04-14-integrating-skywalking-with-source-code/","title":"Integrating Apache SkyWalking with source code"},{"body":"Read this post in original language: English\n介绍  最具影响力的技术是那些消失的技术。他们交织在日常生活中,直到二者完全相融。 - 马克韦瑟\n 马克韦瑟在 1980 年代后期预言,影响最深远的技术是那些消失在空气中的技术。\n“当人们足够熟知它,就不会再意识到它。”\n正如韦瑟所说,这种消失的现象不只源于技术,更是人类的心理。 正是这种经验使我们能够摆脱对底层的考量,进入更高层次的思考。 一旦我们不再被平凡的细枝末节所阻碍,我们就可以自如地专注于新的目标。\n随着 APM(应用性能管理系统) 变得越来越普遍,这种认识变得更加重要。随着更多的应用程序开始使用 APM 部署,底层源代码抽象表示的数量也在同步增加。 虽然这为组织内的许多非开发角色提供了巨大的价值,但它确实也对开发人员提出了额外的挑战 - 他们必须将这些表示转化为可操作的概念(即源代码)。 对此,韦瑟相当简洁的总结道,“就像不应要求汽车机械师在不查看引擎的情况下工作一样,我们不应要求程序员在不访问源代码的情况下工作”。\n尽管如此,APM 收集更多信息只是为了产生充足的新抽象表示。 在本文中,我们将介绍开源实时编码平台 Source++ 中的一个新概念,旨在让开发人员更直观地监控生产应用程序。\n实时查看  我们尚且不理解在收集了数百个指标之后,是什么让程序更容易理解、修改、重复使用或借用。 我不认为我们能够通过原理程序本身而到它们的抽象接口中找到答案。答案就在源代码之中。 - 马克韦瑟\n 随着 APM 从“有了更好”转变为“必须拥有”,有一个基本特性阻碍了它们的普及。 它们必须从意识中消失。作为开发人员,我们不应急于打开浏览器以更好地理解底层源代码,答案就在源代码中。 相反,我们应该改进我们的工具,以便源代码直观地告诉我们需要了解的内容。 想想如果失败的代码总是表明它是如何以及为什么失败的,生活会多么简单。这就是 Source++ 背后的理念。\n在我们的上一篇博客中,我们讨论了不间断断点 Extending Apache SkyWalking。 我们介绍了一个名为 Live Instruments(实时埋点) 的概念,开发人员可以使用它轻松调试实时生产应用程序,而无需离开他们的开发环境。 而今天,我们将讨论如何通过一个名为 Live Views(实时查看)的新概念将现有部署的 SkyWalking 集成到您的 IDE 中。 与专为调试实时应用程序而设计的 Live Instruments (实时埋点) 不同,Live Views(实时查看)旨在提高对应用程序的理解和领悟。 这将通过输入到 Live Command Palette (实时命令面板) 中的各种命令来完成。\n实时命令面板 Live Command Palette (LCP) 是一个当前上下文场景下的命令行面板,这个组件包含在 Source++ JetBrains 插件中,它允许开发人员从 IDE 中直接控制和对实时应用程序发起查询。\nLCP 通过键盘快捷键 (Ctrl+Shift+S) 打开,允许开发人员轻松了解与他们当前正在查看的源代码相关的运行指标。\n目前 LCP 支持以下实时查看命令:\n命令:view(overview/activity/traces/Logs)- 查看 总览/活动/追踪/日志 view 查看命令会展示一个与当前源码的实时运维数据关联的弹窗。 这些命令允许开发人员查看根据相关指标过滤的传统 SkyWalking 的运维数据。\n命令:watch log - 实时监听日志 本日志命令允许开发人员实时跟踪正在运行的应用程序的每一条日志。 通过此命令开发人员无需手动查阅大量日志就可以查找特定日志语句的实例。\n命令:(show/hide) quick stats (显示/隐藏)快速统计 show quick stats 显示快速统计命令显示实时端点指标,以便快速了解端点的活动。 使用此命令,开发人员可以快速评估端点的状态并确定端点是否按预期正常运行。\n未来的工作  好工具是无形的。我所指的无形,是指这个工具不会侵入你的意识; 你专注于任务,而不是工具。 眼镜就是很好的工具——你看的是世界,而不是眼镜。 - 马克韦瑟\n Source++ 旨在扩展 SkyWalking,使 SkyWalking 本身变得无需感知。 为此,我们计划支持自定义的开发人员命令。 开发人员将能够构建自定义命令,以及与团队共享的命令。 这些命令将识别上下文、类型和条件,从而允许广泛的操作。 随着更多命令的添加,开发人员将能够洞悉 SkyWalking 所提供的所有功能,同时专注于最重要的源码。\n如果您觉得这些功能有用,请考虑尝试使用 Source++。 您可以通过 JetBrains Marketplace 或直接从您的 JetBrains IDE 安装插件。 如果您有任何疑问,请到这提 issue。\n欢迎随时反馈!\n","excerpt":"Read this post in original language: English\n介绍  最具影响力的技术是那些消失的技术。他们交织在日常生活中,直到二者完全相融。 - …","ref":"/zh/2022-04-14-integrating-skywalking-with-source-code/","title":"将 Apache SkyWalking 与源代码集成"},{"body":"随着无人驾驶在行业的不断发展和技术的持续革新,规范化、常态化的真无人运营逐渐成为事实标准,而要保障各个场景下的真无人业务运作,一个迫切需要解决的现状就是业务链路长,出现问题难以定位。本文由此前于 KubeSphere 直播上的分享整理而成,主要介绍 SkyWalking 的基本概念和使用方法,以及在无人驾驶领域的一系列实践。\nB站视频地址\n行业背景 驭势科技(UISEE)是国内领先的无人驾驶公司。致力于为全行业、全场景提供 AI 驾驶服务,做赋能出行和物流新生态的 AI 驾驶员。早在三年前, 驭势科技已在机场和厂区领域实现了“去安全员” 无人驾驶常态化运营的重大突破,落地“全场景、真无人、全天候”的自动驾驶技术,并由此迈向大规模商用。要保证各个场景下没有安全员参与的业务运作,我们在链路追踪上做了一系列实践。\n对于无人驾驶来说,从云端到车端的链路长且复杂,任何一层出问题都会导致严重的后果;然而在如下图所示的链路中,准确迅速地定位故障服务并不容易,经常遇到多个服务层层排查的情况。我们希望做到的事情,就是在出现问题以后,能够尽快定位到源头,从而快速解决问题,以绝后患。\n前提条件 SkyWalking 简介 Apache SkyWalking 是一个开源的可观察性平台,用于收集、分析、聚集和可视化来自服务和云原生基础设施的数据。SkyWalking 通过简单的方法,提拱了分布式系统的清晰视图,甚至跨云。它是一个现代的 APM(Application Performence Management),专门为云原生、基于容器的分布式系统设计。它在逻辑上被分成四个部分。探针、平台后端、存储和用户界面。\n 探针收集数据并根据 SkyWalking 的要求重新格式化(不同的探针支持不同的来源)。 平台后端支持数据聚合、分析以及从探针接收数据流的过程,包括 Tracing、Logging、Metrics。 存储系统通过一个开放/可插拔接口容纳 SkyWalking 数据。用户可以选择一个现有的实现,如 ElasticSearch、H2、MySQL、TiDB、InfluxDB,或实现自定义的存储。 UI是一个高度可定制的基于网络的界面,允许 SkyWalking 终端用户可视化和管理 SkyWalking 数据。  综合考虑了对各语言、各框架的支持性、可观测性的全面性以及社区环境等因素,我们选择了 SkyWalking 进行链路追踪。\n链路追踪简介 关于链路追踪的基本概念,可以参看吴晟老师翻译的 OpenTracing 概念和术语 以及 OpenTelemetry。在这里,择取几个重要的概念供大家参考:\n Trace:代表一个潜在的分布式的存在并行数据或者并行执行轨迹的系统。一个 Trace 可以认为是多个 Span 的有向无环图(DAG)。简单来说,在微服务体系下,一个 Trace 代表从第一个服务到最后一个服务经历的一系列的服务的调用链。   Span:在服务中埋点时,最需要关注的内容。一个 Span 代表系统中具有开始时间和执行时长的逻辑运行单元。举例来说,在一个服务发出请求时,可以认为是一个 Span 的开始;在这个服务接收到上游服务的返回值时,可以认为是这个 Span 的结束。Span 之间通过嵌套或者顺序排列建立逻辑因果关系。在 SkyWalking 中,Span 被区分为:  LocalSpan:服务内部调用方法时创建的 Span 类型 EntrySpan:请求进入服务时会创建的 Span 类型(例如处理其他服务对于本服务接口的调用) ExitSpan:请求离开服务时会创建的 Span 类型(例如调用其他服务的接口)   TraceSegment:SkyWalking 中的概念,介于 Trace 和 Span 之间,是一条 Trace 的一段,可以包含多个 Span。一个 TraceSegment 记录了一个线程中的执行过程,一个 Trace 由一个或多个 TraceSegment 组成,一个 TraceSegment 又由一个或多个 Span 组成。 SpanContext:代表跨越进程上下文,传递到下级 Span 的状态。一般包含 Trace ID、Span ID 等信息。 Baggage:存储在 SpanContext 中的一个键值对集合。它会在一条追踪链路上的所有 Span 内全局传输,包含这些 Span 对应的 SpanContext。Baggage 会随着 Trace 一同传播。  SkyWalking 中,上下文数据通过名为 sw8 的头部项进行传递,值中包含 8 个字段,由 - 进行分割(包括 Trace ID,Parent Span ID 等等) 另外 SkyWalking 中还提供名为 sw8-correlation 的扩展头部项,可以传递一些自定义的信息    快速上手 以 Go 为例,介绍如何使用 SkyWalking 在服务中埋点。\n部署 我们选择使用 Helm Chart 在 Kubernetes 中进行部署。\nexport SKYWALKING_RELEASE_NAME=skywalking # change the release name according to your scenario export SKYWALKING_RELEASE_NAMESPACE=default # change the namespace to where you want to install SkyWalking export REPO=skywalking helm repo add ${REPO} https://apache.jfrog.io/artifactory/skywalking-helm helm install \u0026#34;${SKYWALKING_RELEASE_NAME}\u0026#34; ${REPO}/skywalking -n \u0026#34;${SKYWALKING_RELEASE_NAMESPACE}\u0026#34; \\  --set oap.image.tag=8.8.1 \\  --set oap.storageType=elasticsearch \\  --set ui.image.tag=8.8.1 \\  --set elasticsearch.imageTag=6.8.6 埋点 部署完以后,需要在服务中进行埋点,以生成 Span 数据:主要的方式即在服务的入口和出口创建 Span。在代码中,首先我们会创建一个 Reporter,用于向 SkyWalking 后端发送数据。接下来,我们需要创建一个名为 \u0026quot;example\u0026quot; 的 Tracer 实例。此时,我们就可以使用 Tracer 实例来创建 Span。 在 Go 中,主要利用 context.Context 来创建以及传递 Span。\nimport \u0026#34;github.com/SkyAPM/go2sky\u0026#34; // configure to export to OAP server r, err := reporter.NewGRPCReporter(\u0026#34;oap-skywalking:11800\u0026#34;) if err != nil { log.Fatalf(\u0026#34;new reporter error %v \\n\u0026#34;, err) } defer r.Close() tracer, err := go2sky.NewTracer(\u0026#34;example\u0026#34;, go2sky.WithReporter(r)) 服务内部 在下面的代码片段中,通过 context.background() 生成的 Context 创建了一个 Root Span,同时在创建该 Span 的时候,也会产生一个跟这 个 Span 相关联的 Context。利用这个新的 Context,就可以创建一个与 Root Span 相关联的 Child Span。\n// create root span span, ctx, err := tracer.CreateLocalSpan(context.Background()) // create sub span w/ context above subSpan, newCtx, err := tracer.CreateLocalSpan(ctx) 服务间通信 在服务内部,我们会利用 Context 传的递来进行 Span 的创建。但是如果是服务间通信的话,这也是链路追踪最为广泛的应用场景,肯定是没有办法直接传递 Context 参数的。这种情况下,应该怎么做呢?一般来说,SkyWalking 会把 Context 中与当前 Span 相关的键值对进行编码,后续在服务通信时进行传递。例如,在 HTTP 协议中,一般利用请求头进行链路传递。再例如 gRPC 协议,一般想到的就是利用 Metadata 进行传递。\n在服务间通信的时候,我们会利用 EntrySpan 和 ExitSpan 进行链路的串联。以 HTTP 请求为例,在创建 EntrySpan 时,会从请求头中获取到 Span 上下文信息。而在 ExitSpan 中,则在请求中注入了上下文。这里的上下文是经过了 SkyWalking 编码后的字符串,以便在服务间进行传递。除了传递 Span 信息,也可以给 Span 打上 Tag 进行标记。例如,记录 HTTP 请求的方法,URL 等等,以便于后续数据的可视化。\n//Extract context from HTTP request header `sw8` span, ctx, err := tracer.CreateEntrySpan(r.Context(), \u0026#34;/api/login\u0026#34;, func(key string) (string, error) { return r.Header.Get(key), nil }) // Some operation ... // Inject context into HTTP request header `sw8` span, err := tracer.CreateExitSpan(req.Context(), \u0026#34;/service/validate\u0026#34;, \u0026#34;tomcat-service:8080\u0026#34;, func(key, value string) error { req.Header.Set(key, value) return nil }) // tags span.Tag(go2sky.TagHTTPMethod, req.Method) span.Tag(go2sky.TagURL, req.URL.String()) 但是,我们可能也会用到一些不那么常用的协议,比如说 MQTT 协议。在这些情况下,应该如何传递上下文呢?关于这个问题,我们在自定义插件的部分做了实践。\nUI 经过刚才的埋点以后,就可以在 SkyWalking 的 UI 界面看到调用链。SkyWalking 官方提供了一个 Demo 页面,有兴趣可以一探究竟:\n UI http://demo.skywalking.apache.org\nUsername skywalking Password skywalking\n 插件体系 如上述埋点的方式,其实是比较麻烦的。好在 SkyWalking 官方提供了很多插件,一般情况下,直接接入插件便能达到埋点效果。SkyWalking 官方为多种语言都是提供了丰富的插件,对一些主流框架都有插件支持。由于我们部门使用的主要是 Go 和 Python 插件,下文中便主要介绍这两种语言的插件。同时,由于我们的链路复杂,用到的协议较多,不可避免的是也需要开发一些自定义插件。下图中整理了 Go 与 Python 插件的主要思想,以及我们开发的各框架协议自定义插件的研发思路。\n官方插件 Go · Gin 插件 Gin 是 Go 的 Web 框架,利用其中间件,可以进行链路追踪。由于是接收请求,所以需要在中间件中,创建一个 EntrySpan,同时从请求头中获取 Span 的上下文的信息。获取到上下文信息以后,还需要再进行一步操作:把当前请求请求的上下文 c.Request.Context(), 设置成为刚才创建完 EntrySpan 时生成的 Context。这样一来,这个请求的 Context 就会携带有 Span 上下文信息,可以用于在后续的请求处理中进行后续传递。\nfunc Middleware(engine *gin.Engine, tracer *go2sky.Tracer) gin.HandlerFunc { return func(c *gin.Context) { span, ctx, err := tracer.CreateEntrySpan(c.Request.Context(), getOperationName(c), func(key string) (string, error) { return c.Request.Header.Get(key), nil }) // some operation \tc.Request = c.Request.WithContext(ctx) c.Next() span.End() } } Python · requests Requests 插件会直接修改 Requests 库中的request函数,把它替换成 SkyWalking 自定义的_sw_request函数。在这个函数中,创建了 ExitSpan,并将 ExitSpan 上下文注入到请求头中。在服务安装该插件后,实际调用 Requests 库进行请求的时候,就会携带带有上下文的请求体进行请求。\ndef install(): from requests import Session _request = Session.request def _sw_request(this: Session, method, url, other params...): span = get_context().new_exit_span(op=url_param.path or \u0026#39;/\u0026#39;, peer=url_param.netloc, component=Component.Requests) with span: carrier = span.inject() span.layer = Layer.Http if headers is None: headers = {} for item in carrier: headers[item.key] = item.val span.tag(TagHttpMethod(method.upper())) span.tag(TagHttpURL(url_param.geturl())) res = _request(this, method, url, , other params...n) # some operation return res Session.request = _sw_request 自定义插件 Go · Gorm Gorm 框架是 Go 的 ORM 框架。我们自己在开发的时候经常用到这个框架,因此希望能对通过 Gorm 调用数据库的链路进行追踪。\nGorm 有自己的插件体系,会在数据库的操作前调用BeforeCallback函数,数据库的操作后调用AfterCallback函数。于是在BeforeCallback中,我们创建 ExitSpan,并在AfterCallback里结束先前在BeforeCallback中创建的 ExitSpan。\nfunc (s *SkyWalking) BeforeCallback(operation string) func(db *gorm.DB) { // some operation  return func(db *gorm.DB) { tableName := db.Statement.Table operation := fmt.Sprintf(\u0026#34;%s/%s\u0026#34;, tableName, operation) span, err := tracer.CreateExitSpan(db.Statement.Context, operation, peer, func(key, value string) error { return nil }) // set span from db instance\u0026#39;s context to pass span  db.Set(spanKey, span) } } 需要注意的是,因为 Gorm 的插件分为 Before 与 After 两个 Callback,所以需要在两个回调函数间传递 Span,这样我们才可以在AfterCallback中结束当前的 Span。\nfunc (s *SkyWalking) AfterCallback() func(db *gorm.DB) { // some operation  return func(db *gorm.DB) { // get span from db instance\u0026#39;s context  spanInterface, _ := db.Get(spanKey) span, ok := spanInterface.(go2sky.Span) if !ok { return } defer span.End() // some operation  } } Python · MQTT 在 IoT 领域,MQTT 是非常常用的协议,无人驾驶领域自然也相当依赖这个协议。\n以 Publish 为例,根据官方插件的示例,我们直接修改 paho.mqtt 库中的publish函数,改为自己定义的_sw_publish函数。在自定义函数中,创建 ExitSpan,并将上下文注入到 MQTT 的 Payload 中。\ndef install(): from paho.mqtt.client import Client _publish = Client.publish Client.publish = _sw_publish_func(_publish) def _sw_publish_func(_publish): def _sw_publish(this, topic, payload=None, qos=0, retain=False, properties=None): # some operation with get_context().new_exit_span(op=\u0026#34;EMQX/Topic/\u0026#34; + topic + \u0026#34;/Producer\u0026#34; or \u0026#34;/\u0026#34;, peer=peer) as span: carrier = span.inject() span.layer = Layer.MQ span.component = Component.RabbitmqProducer payload = {} if payload is None else json.loads(payload) payload[\u0026#39;headers\u0026#39;] = {} for item in carrier: payload[\u0026#39;headers\u0026#39;][item.key] = item.val # ... return _sw_publish 可能这个方式不是特别优雅:因为我们目前使用 MQTT 3.1 版本,此时尚未引入 Properties 属性(类似于请求头)。直到 MQTT 5.0,才对此有相关支持。我们希望在升级到 MQTT 5.0 以后,能够将上下文注入到 Properties 中进行传递。\n无人驾驶领域的实践 虽然这些插件基本上涵盖了所有的场景,但是链路追踪并不是只要接入插件就万事大吉。在一些复杂场景下,尤其无人驾驶领域的链路追踪,由于微服务架构中涉及的语言环境、中间件种类以及业务诉求通常都比较丰富,导致在接入全链路追踪的过程中,难免遇到各种主观和客观的坑。下面选取了几个典型例子和大家分享。\n【问题一】Kong 网关的插件链路接入 我们的请求在进入服务之前,都会通过 API 网关 Kong,同时我们在 Kong 中定义了一个自定义权限插件,这个插件会调用权限服务接口进行授权。如果只是单独单纯地接入 SkyWalking Kong 插件,对于权限服务的调用无法在调用链中体现。所以我们的解决思路是,直接地在权限插件里进行埋点,而不是使用官方的插件,这样就可以把对于权限服务的调用也纳入到调用链中。\n【问题二】 Context 传递 我们有这样一个场景:一个服务,使用 Gin Web 框架,同时在处理 HTTP 请求时调用上游服务的 gRPC 接口。起初以为只要接入 Gin 的插件以及 gRPC 的插件,这个场景的链路就会轻松地接上。但是结果并不如预期。\n最后发现,Gin 提供一个 Contextc;同时对于某一个请求,可以通过c.Request.Context()获取到请求的 ContextreqCtx,二者不一致;接入 SkyWalking 提供的 Gin 插件后,修改的是reqCtx,使其包含 Span 上下文信息;而现有服务,在 gRPC 调用时传入的 Context 是c,所以一开始 HTTP -\u0026gt; gRPC 无法连接。最后通过一个工具函数,复制了reqCtx的键值对到c后,解决了这个问题。\n【问题三】官方 Python·Redis 插件 Pub/Sub 断路 由于官方提供了 Python ·Redis 插件,所以一开始认为,安装了 Redis 插件,对于一切 Redis 操作,都能互相连接。但是实际上,对于 Pub/Sub 操作,链路会断开。\n查看代码后发现,对于所有的 Redis 操作,插件都创建一个 ExitSpan;也就是说该插件其实仅适用于 Redis 作缓存等情况;但是在我们的场景中,需要进行 Pub/Sub 操作。这导致两个操作都会创建 ExitSpan,而使链路无法相连。通过改造插件,在 Pub 时创建 ExitSpan,在 Sub 时创建 EntrySpan 后,解决该问题。\n【问题四】MQTT Broker 的多种 DataBridge 接入 一般来说,对 MQTT 的追踪链路是 Publisher -\u0026gt; Subscriber,但是在我们的使用场景中,存在 MQTT broker 接收到消息后,通过规则引擎调用其他服务接口这种特殊场景。这便不是 Publisher -\u0026gt; Subscriber,而是 Publisher -\u0026gt; HTTP。\n我们希望能够从 MQTT Payload 中取出 Span 上下文,再注入到 HTTP 的请求头中。然而规则引擎调用接口时,没有办法自定义请求头,所以我们最后的做法是,约定好参数名称,将上下文放到请求体中,在服务收到请求后,从请求体中提取 Context。\n【问题五】Tracing 与 Logging 如何结合 很多时候,只有 Tracing 信息,对于问题排查来说可能还是不充分的,我们非常的期望也能够把 Tracing 和 Logging 进行结合。\n如上图所示,我们会把所有服务的 Tracing 的信息发送到 SkyWalking,同时也会把这个服务产生的日志通过 Fluent Bit 以及 Fluentd 发送到 ElasticSearch。对于这种情况,我们只需要在日志中去记录 Span 的上下文,比如记录 Trace ID 或者 Span ID 等,就可以在 Kibana 里面去进行对于 Trace ID 的搜索,来快速的查看同一次调用链中的日志。\n当然,SkyWalking 它本身也提供了自己的日志收集和分析机制,可以利用 Fluentd 或者 Fluent Bit 等向 SkyWalking 后端发送日志(我们选用了 Fluentd)。当然,像 SkyWalking 后端发送日志的时候,也要符合其日志协议,即可在 UI 上查看相应日志。\n本文介绍了 SkyWalking 的使用方法、插件体系以及实践踩坑等,希望对大家有所帮助。总结一下,SkyWalking 的使用的确是有迹可循的,一般来说我们只要接入插件,基本上可以涵盖大部分的场景,达到链路追踪的目的。但是也要注意,很多时候需要具体问题具体分析,尤其是在链路复杂的情况下,很多地方还是需要根据不同场景来进行一些特殊处理。\n最后,我们正在使用的 FaaS 平台 OpenFunction 近期也接入了 SkyWalking 作为其 链路追踪的解决方案:\nOpenFunction 提供了插件体系,并预先定义了 SkyWalking pre/post 插件;编写函数时,用户无需手动埋点,只需在 OpenFunction 配置文件中简单配置,即可开启 SkyWalking 插件,达到链路追踪的目的。\n 在感叹 OpenFunction 动作迅速的同时,也能够看到 SkyWalking 已成为链路追踪领域的首要选择之一。\n参考资料  OpenTracing 文档:https://wu-sheng.gitbooks.io/opentracing-io/content/pages/spec.html SkyWalking 文档:https://skywalking.apache.org/docs/main/latest/readme/ SkyWalking GitHub:https://github.com/apache/skywalking SkyWalking go2sky GitHub:https://github.com/SkyAPM/go2sky SkyWalking Python GitHub:https://github.com/apache/skywalking-python SkyWalking Helm Chart:https://github.com/apache/skywalking-kubernetes SkyWalking Solution for OpenFunction https://openfunction.dev/docs/best-practices/skywalking-solution-for-openfunction/  ","excerpt":"随着无人驾驶在行业的不断发展和技术的持续革新,规范化、常态化的真无人运营逐渐成为事实标准,而要保障各个场景下的真无人业务运作,一个迫切需要解决的现状就是业务链路长,出现问题难以定位。 …","ref":"/zh/2022-04-13-skywalking-in-autonomous-driving/","title":"SkyWalking 在无人驾驶领域的实践"},{"body":"SkyWalking Client JS 0.8.0 is released. Go to downloads page to find release tars.\n Fix fmp metric. Add e2e tese based on skywaling-infra-e2e. Update metric and events. Remove ServiceTag by following SkyWalking v9 new layer model.  ","excerpt":"SkyWalking Client JS 0.8.0 is released. Go to downloads page to find release tars.\n Fix fmp metric. …","ref":"/events/release-apache-skywalking-client-js-0-8-0/","title":"Release Apache SkyWalking Client JS 0.8.0"},{"body":"SkyWalking 9.0.0 is released. Go to downloads page to find release tars.\nSkyWalking v9 is the next main stream of the OAP and UI.\nStarting from v9, SkyWalking introduces the new core concept Layer. A layer represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), Kubernetes(k8s layer). All detected instances belong to a layer to represent the running environment of this instance, the service would have one or multiple layer definitions according to its instances.\nRocketBot UI has officially been replaced by the Booster UI.\nChanges by Version Project  Upgrade log4j2 to 2.17.1 for CVE-2021-44228, CVE-2021-45046, CVE-2021-45105 and CVE-2021-44832. This CVE only effects on JDK if JNDI is opened in default. Notice, using JVM option -Dlog4j2.formatMsgNoLookups=true or setting the LOG4J_FORMAT_MSG_NO_LOOKUPS=”true” environment variable also avoids CVEs. Upgrade maven-wrapper to 3.1.0, maven to 3.8.4 for performance improvements and ARM more native support. Exclude unnecessary libs when building under JDK 9+. Migrate base Docker image to eclipse-temurin as adoptopenjdk is deprecated. Add E2E test under Java 17. Upgrade protoc to 3.19.2. Add Istio 1.13.1 to E2E test matrix for verification. Upgrade Apache parent pom version to 25. Use the plugin version defined by the Apache maven parent.  Upgrade maven-dependency-plugin to 3.2.0. Upgrade maven-assembly-plugin to 3.3.0. Upgrade maven-failsafe-plugin to 2.22.2. Upgrade maven-surefire-plugin to 2.22.2. Upgrade maven-jar-plugin to 3.2.2. Upgrade maven-enforcer-plugin to 3.0.0. Upgrade maven-compiler-plugin to 3.10.0. Upgrade maven-resources-plugin to 3.2.0. Upgrade maven-source-plugin to 3.2.1.   Update codeStyle.xml to fix incompatibility on M1\u0026rsquo;s IntelliJ IDEA 2021.3.2. Update frontend-maven-plugin to 1.12 and npm to 16.14.0 for booster UI build. Improve CI with the GHA new feature \u0026ldquo;run failed jobs\u0026rdquo;. Fix ./mvnw compile not work if ./mvnw install is not executed at least once. Add JD_PRESERVE_LINE_FEEDS=true in official code style file. Upgrade OAP dependencies gson(2.9.0), guava(31.1), jackson(2.13.2), protobuf-java(3.18.4), commons-io(2.7), postgresql(42.3.3). Remove commons-pool and commons-dbcp from OAP dependencies(Not used before). Upgrade webapp dependencies gson(2.9.0), spring boot(2.6.6), jackson(2.13.2.2), spring cloud(2021.0.1), Apache httpclient(4.5.13).  OAP Server  Fix potential NPE in OAL string match and a bug when right-hand-side variable includes double quotes. Bump up Armeria version to 1.14.1 to fix CVE. Polish ETCD cluster config environment variables. Add the analysis of metrics in Satellite MetricsService. Fix Can't split endpoint id into 2 parts bug for endpoint ID. In the TCP in service mesh observability, endpoint name doesn\u0026rsquo;t exist in TCP traffic. Upgrade H2 version to 2.0.206 to fix CVE-2021-23463 and GHSA-h376-j262-vhq6. Extend column name override mechanism working for ValueColumnMetadata. Introduce new concept Layer and removed NodeType. More details refer to v9-version-upgrade. Fix query sort metrics failure in H2 Storage. Bump up grpc to 1.43.2 and protobuf to 3.19.2 to fix CVE-2021-22569. Add source layer and dest layer to relation. Follow protocol grammar fix GCPhrase -\u0026gt; GCPhase. Set layer to mesh relation. Add FAAS to SpanLayer. Adjust e2e case for V9 core. Support ZGC GC time and count metric collecting. Sync proto buffers files from upstream Envoy (Related to https://github.com/envoyproxy/envoy/pull/18955). Bump up GraphQL related dependencies to latest versions. Add normal to V9 service meta query. Support scope=ALL catalog for metrics. Bump up H2 to 2.1.210 to fix CVE-2022-23221. E2E: Add normal field to Service. Add FreeSql component ID(3017) of dotnet agent. E2E: verify OAP cluster model data aggregation. Fix SelfRemoteClient self observing metrics. Add env variables SW_CLUSTER_INTERNAL_COM_HOST and SW_CLUSTER_INTERNAL_COM_PORT for cluster selectors zookeeper ,consul,etcd and nacos. Doc update: configuration-vocabulary,backend-cluster about env variables SW_CLUSTER_INTERNAL_COM_HOST and SW_CLUSTER_INTERNAL_COM_PORT. Add Python MysqlClient component ID(7013) with mapping information. Support Java thread pool metrics analysis. Fix IoTDB Storage Option insert null index value. Set the default value of SW_STORAGE_IOTDB_SESSIONPOOL_SIZE to 8. Bump up iotdb-session to 0.12.4. Bump up PostgreSQL driver to fix CVE. Add Guava EventBus component ID(123) of Java agent. Add OpenFunction component ID(5013). Expose configuration responseTimeout of ES client. Support datasource metric analysis. [Breaking Change] Keep the endpoint avg resp time meter name the same with others scope. (This may break 3rd party integration and existing alarm rule settings) Add Python FastAPI component ID(7014). Support all metrics from MAL engine in alarm core, including Prometheus, OC receiver, meter receiver. Allow updating non-metrics templates when structure changed. Set default connection timeout of ElasticSearch to 3000 milliseconds. Support ElasticSearch 8 and add it into E2E tests. Disable indexing for field alarm_record.tags_raw_data of binary type in ElasticSearch storage. Fix Zipkin receiver wrong condition for decoding gzip. Add a new sampler (possibility) in LAL. Unify module name receiver_zipkin to receiver-zipkin, remove receiver_jaeger from application.yaml. Introduce the entity of Process type. Set the length of event#parameters to 2000. Limit the length of Event#parameters. Support large service/instance/networkAddressAlias list query by using ElasticSearch scrolling API, add metadataQueryBatchSize to configure scrolling page size. Change default value of metadataQueryMaxSize from 5000 to 10000 Replace deprecated Armeria API BasicToken.of with AuthToken.ofBasic. Implement v9 UI template management protocol. Implement process metadata query protocol. Expose more ElasticSearch health check related logs to help to diagnose Health check fails. reason: No healthy endpoint. Add source event generated metrics to SERVICE_CATALOG_NAME catalog. [Breaking Change] Deprecate All from OAL source. [Breaking Change] Remove SRC_ALL: 'All' from OAL grammar tree. Remove all_heatmap and all_percentile metrics. Fix ElasticSearch normal index couldn\u0026rsquo;t apply mapping and update. Enhance DataCarrier#MultipleChannelsConsumer to add priority for the channels, which makes OAP server has a better performance to activate all analyzers on default. Activate receiver-otel#enabledOcRules receiver with k8s-node,oap,vm rules on default. Activate satellite,spring-sleuth for agent-analyzer#meterAnalyzerActiveFiles on default. Activate receiver-zabbix receiver with agent rule on default. Replace HTTP server (GraphQL, agent HTTP protocol) from Jetty with Armeria. [Breaking Change] Remove configuration restAcceptorPriorityDelta (env var: SW_RECEIVER_SHARING_JETTY_DELTA , SW_CORE_REST_JETTY_DELTA). [Breaking Change] Remove configuration graphql/path (env var: SW_QUERY_GRAPHQL_PATH). Add storage column attribute indexOnly, support ElasticSearch only index and not store some fields. Add indexOnly=true to SegmentRecord.tags, AlarmRecord.tags, AbstractLogRecord.tags, to reduce unnecessary storage. [Breaking Change] Remove configuration restMinThreads (env var: SW_CORE_REST_JETTY_MIN_THREADS , SW_RECEIVER_SHARING_JETTY_MIN_THREADS). Refactor the core Builder mechanism, new storage plugin could implement their own converter and get rid of hard requirement of using HashMap to communicate between data object and database native structure. [Breaking Change] Break all existing 3rd-party storage extensions. Remove hard requirement of BASE64 encoding for binary field. Add complexity limitation for GraphQL query to avoid malicious query. Add Column.shardingKeyIdx for column definition for BanyanDB.  Sharding key is used to group time series data per metric of one entity in one place (same sharding and/or same row for column-oriented database). For example, ServiceA's traffic gauge, service call per minute, includes following timestamp values, then it should be sharded by service ID [ServiceA(encoded ID): 01-28 18:30 values-1, 01-28 18:31 values-2, 01-28 18:32 values-3, 01-28 18:32 values-4] BanyanDB is the 1st storage implementation supporting this. It would make continuous time series metrics stored closely and compressed better. NOTICE, this sharding concept is NOT just for splitting data into different database instances or physical files.  Support ElasticSearch template mappings properties parameters and _source update. Implement the eBPF profiling query and data collect protocol. [Breaking Change] Remove Deprecated responseCode from sources, including Service, ServiceInstance, Endpoint Enhance endpoint dependency analysis to support cross threads cases. Refactor span analysis code structures. Remove isNotNormal service requirement when use alias to merge service topology from client side. All RPCs' peer services from client side are always normal services. This cause the topology is not merged correctly. Fix event type of export data is incorrect, it was EventType.TOTAL always. Reduce redundancy ThreadLocal in MAL core. Improve MAL performance. Trim tag\u0026rsquo;s key and value in log query. Refactor IoTDB storage plugin, add IoTDBDataConverter and fix ModifyCollectionInEnhancedForLoop bug. Bump up iotdb-session to 0.12.5. Fix the configuration of Aggregation and GC Count metrics for oap self observability E2E: Add verify OAP eBPF Profiling. Let multiGet could query without tag value in the InfluxDB storage plugin. Adjust MAL for V9, remove some groups, add a new Service function for the custom delimiter. Add service catalog DatabaseSlowStatement. Add Error Prone Annotations dependency to suppress warnings, which are not errors.  UI  [Breaking Change] Introduce Booster UI, remove RocketBot UI. [Breaking Change] UI Templates have been redesigned totally. GraphQL query is minimal compatible for metadata and metrics query. Remove unused jars (log4j-api.jar) in classpath. Bump up netty version to fix CVE. Add Database Connection pool metric. Re-implement UI template initialization for Booster UI. Add environment variable SW_ENABLE_UPDATE_UI_TEMPLATE to control user edit UI template. Add the Self Observability template of the SkyWalking Satellite. Add the template of OpenFunction observability.  Documentation  Reconstruction doc menu for v9. Update backend-alarm.md doc, support op \u0026ldquo;=\u0026rdquo; to \u0026ldquo;==\u0026rdquo;. Update backend-meter.md doc . Add \u0026lt;STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System\u0026gt; paper. Add Academy menu for recommending articles. Remove All source relative document and examples. Update Booster UI\u0026rsquo;s dependency licenses. Add profiling doc, and remove service mesh intro doc(not necessary). Add a doc for virtual database. Rewrite UI introduction. Update k8s-monitoring, backend-telemetry and v9-version-upgrade doc for v9.  All issues and pull requests are here\n","excerpt":"SkyWalking 9.0.0 is released. Go to downloads page to find release tars.\nSkyWalking v9 is the next …","ref":"/events/release-apache-skywalking-apm-9.0.0/","title":"Release Apache SkyWalking APM 9.0.0"},{"body":"SkyWalking CLI 0.10.0 is released. Go to downloads page to find release tars.\nFeatures  Allow setting start and end with relative time (#128) Add some commands for the browser (#126) Add the sub-command service layer to query services according to layer (#133) Add the sub-command layer list to query layer list (#133) Add the sub-command instance get to query single instance (#134) Add the sub-command endpoint get to query single endpoint info (#134) Change the GraphQL method to the v9 version according to the server version (#134) Add normal field to Service entity (#136) Add the command process for query Process metadata (#137) Add the command profiling ebpf for process ebpf profiling (#138) Support getprofiletasklogs query (#125) Support query list alarms (#127) [Breaking Change] Update the command profile as a sub-command profiling trace, and update profiled-analyze command to analysis (#138) profiling ebpf/trace analysis generates the profiling graph HTML on default and saves it to the current work directory (#138)  Bug Fixes  Fix quick install (#131) Set correct go version in publishing snapshot docker image (#124) Stop build kit container after finishing (#130)  Chores  Add cross platform build targets (#129) Update download host (#132)  ","excerpt":"SkyWalking CLI 0.10.0 is released. Go to downloads page to find release tars.\nFeatures  Allow …","ref":"/events/release-apache-skywalking-cli-0-10-0/","title":"Release Apache SkyWalking CLI 0.10.0"},{"body":"SkyWalking is an open-source APM system, including monitoring, tracing, and diagnosing capabilities for distributed systems in Cloud Native architecture. It covers monitoring for Linux, Kubernetes, Service Mesh, Serverless/Function-as-a-Service, agent-attached services, and browsers. With data covering traces, metrics, logs, and events, SkyWalking is a full-stack observability APM system.\nOpen Source Promotion Plan is a summer program organized and long-term supported by Open Source Software Supply Chain Promotion Plan. It aims to encourage college students to actively participate in developing and maintaining open-source software and promote the vigorous development of an excellent open-source software community.\nApache SkyWalking has been accepted in OSPP 2022\n   Project Description Difficulty Mentor / E-mail Expectation Tech. Requirements Repository     SkyAPM-PHP Add switches for monitoring items Advanced Level Yanlong He / heyanlong@apache.org Complete project development work C++, GO, PHP https://github.com/SkyAPM/SkyAPM-php-sdk   SkyWalking-Infra-E2E Optimize verifier Normal Level Huaxi Jiang / hoshea@apache.org 1. Continue to verify cases when other cases fail  2. Merge retry outputs  3. Prettify verify results' output Go https://github.com/apache/skywalking-infra-e2e   SkyWalking Metrics anomaly detection with machine learning Advanced Level Yihao Chen / yihaochen@apache.org An MVP version of ML-powered metrics anomaly detection using dynamic baselines and thresholds Python, Java https://github.com/apache/skywalking   SkyWalking Python Collect PVM metrics and send the metrics to OAP backend, configure dashboard in UI Normal Level Zhenxu Ke / kezhenxu94@apache.org Core Python VM metrics should be collected and displayed in SkyWalking. Python https://github.com/apache/skywalking-python issue   SkyWalking BanyanDB Command line tools for BanyanDB Normal Level Hongtao Gao / hanahmily@apache.org Command line tools should access relevant APIs to manage resources and online data. Go https://github.com/apache/skywalking-banyandb   SkyWalking SWCK CRD and controller for BanyanDB Advance Level Ye Cao / dashanji@apache.org CRD and controller provision BanyanDB as the native Storage resource. Go https://github.com/apache/skywalking-swck   SkyAPM-Go2sky Collect golang metrics such as gc, goroutines and threads, and send the the metrics to OAP backend, configure dashboard in UI Normal Level Wei Zhang / zhangwei24@apache.org Core golang metrics should be collected and displayed in SkyWalking. Go https://github.com/SkyAPM/go2sky   SkyWalking Collect system metrics such as system_load, cpu_usage, mem_usage from telegraf and send the metrics to OAP backend, configure dashboard in UI Normal Level Haoyang Liu / liuhaoyangzz@apache.org System metrics should be collected and displayed in SkyWalking. Java https://github.com/apache/skywalking    Mentors could submit pull requests to update the above list.\nContact the community You could send emails to mentor\u0026rsquo;s personal email to talk about the project and details. The official mail list of the community is dev@skywalking.apache.org. You need to subscribe to the mail list to get all replies. Send mail to dev-suscribe@skywalking.apache.org and follow the replies.\n","excerpt":"SkyWalking is an open-source APM system, including monitoring, tracing, and diagnosing capabilities …","ref":"/events/summer-ospp-2022/readme/","title":"Open Source Promotion Plan 2022 -- Project List"},{"body":"如果要讨论提高自己系统设计能力的方式,我想大多数人都会选择去阅读优秀开源项目的源代码。近年来我参与了多个监控服务的开发工作,并在工作中大量地使用了 SkyWalking 并对其进行二次开发。在这个过程中,我发现 SkyWalking 天然的因其国产的身份,整套源代码地组织和设计非常符合国人的编程思维。由此我录制了本套课程,旨在和大家分享我的一些浅薄的心得和体会。\n本套课程分为两个阶段,分别讲解 Agent 端和 OAP 端地设计和实现。每个阶段的内容都是以启动流程作为讲解主线,逐步展开相关的功能模块。除了对 SKyWalking 本身内容进行讲解,课程还针对 SKyWalking 使用到的一些较为生僻的知识点进行了补充讲解(如 synthetic、NBAC 机制、自定义类加载器等),以便于大家更清晰地掌握课程内容。\nSkyWalking8.7.0 源码分析 - 视频课程直达链接\n目前课程已更新完 Agent 端的讲解,目录如下:\n 01-开篇和源码环境准备 02-Agent 启动流程 03-Agent 配置加载流程 04-自定义类加载器 AgentClassLoader 05-插件定义体系 07-插件加载 06-定制 Agent 08-什么是 synthetic 09-NBAC 机制 10-服务加载 11-witness 组件版本识别 12-Transform 工作流程 13-静态方法插桩 14-构造器和实例方法插桩 15-插件拦截器加载流程(非常重要) 16-运行时插件效果的字节码讲解 17-JDK 类库插件工作原理 18-服务-GRPCChanelService 19-服务-ServiceManagementClient 20-服务-CommandService 21-服务-SamplingService 22-服务-JVMService 23-服务-KafkaXxxService 24-服务-StatusCheckService 25-链路基础知识 26-链路 ID 生成 27-TraceSegment 28-Span 基本概念 29-Span 完整模型 30-StackBasedTracingSpan 31-ExitSpan 和 LocalSpan 32-链路追踪上下文 TracerContext 33-上下文适配器 ContextManager 34-DataCarrier-Buffer 35-DataCarrier-全解 36-链路数据发送到 OAP  B站视频地址\n","excerpt":"如果要讨论提高自己系统设计能力的方式,我想大多数人都会选择去阅读优秀开源项目的源代码。近年来我参与了多个监控服务的开发工作,并在工作中大量地使用了 SkyWalking 并对其进行二次开发。在这个过程 …","ref":"/zh/2022-03-25-skywalking-source-code-analyzation/","title":"[视频] SkyWalking 8.7.0 源码分析"},{"body":"","excerpt":"","ref":"/zh_tags/course/","title":"Course"},{"body":"SkyWalking NodeJS 0.4.0 is released. Go to downloads page to find release tars.\n Fix mysql2 plugin install error. (#74) Update IORedis Plugin, fill dbinstance tag as host if condition.select doesn\u0026rsquo;t exist. (#73) Experimental AWS Lambda Function support. (#70) Upgrade dependencies to fix vulnerabilities. (#68) Add lint pre-commit hook and migrate to eslint. (#66, #67) Bump up gRPC version, and use its new release repository. (#65) Regard baseURL when in Axios Plugin. (#63) Add an API to access the trace id. (#60) Use agent test tool snapshot Docker image instead of building in CI. (#59) Wrapped IORedisPlugin call in try/catch. (#58)  ","excerpt":"SkyWalking NodeJS 0.4.0 is released. Go to downloads page to find release tars.\n Fix mysql2 plugin …","ref":"/events/release-apache-skywalking-nodejs-0-4-0/","title":"Release Apache SkyWalking for NodeJS 0.4.0"},{"body":"大约二十年前我刚开始进入互联网的世界的时候,支撑起整个网络的基础设施,就包括了 Apache 软件基金会(ASF)治下的软件。\nApache Httpd 是开启这个故事的软件,巅峰时期有超过七成的市场占有率,即使是在今天 NGINX 等新技术蓬勃发展的时代,也有三成左右的市场占有率。由 Linux、Apache Httpd、MySQL 和 PHP 组成的 LAMP 技术栈,是开源吞噬软件应用的第一场大型胜利。\n我从 2018 年参与 Apache Flink 开始正式直接接触到成立于 1999 年,如今已经有二十年以上历史的 Apache 软件基金会,并在一年后的 2019 年成为 Apache Flink 项目 Committer 队伍的一员,2020 年成为 Apache Curator 项目 PMC(项目管理委员会)的一员。今年,经由姜宁老师推荐,成为了 Apache Members 之一,也就是 Apache 软件基金会层面的正式成员。\n我想系统性地做一个开源案例库已经很久了。无论怎么分类筛选优秀的开源共同体,The Apache Community 都是无法绕开的。然而,拥有三百余个开源软件项目的 Apache 软件基金会,并不是一篇文章就能讲清楚的案例。本文也没有打算写成一篇长文顾及方方面面,而是启发于自己的新角色,回顾过去近五年在 Apache Community 当中的经历和体验,简单讨论 Apache 的理念,以及这些理念是如何落实到基金会组织、项目组织以及每一个参与者的日常生活事务当中的。\n不过,尽管对讨论的对象做了如此大幅度的缩减,由我自己来定义什么是 Apache 的理念未免也太容易有失偏颇。幸运的是,Apache Community 作为优秀的开源共同体,当然做到了我在《共同创造价值》一文中提到的回答好“我能为你做什么”以及“我应该怎么做到”的问题。Apache Community 的理念之一就是 Open Communications 即开放式讨论,由此产生的公开材料以及基于公开材料整理的文档汗牛充栋。这既是研究 Apache Community 的珍贵材料,也为还原和讨论一个真实的 Apache Community 提出了不小的挑战。\n无论如何,本文将以 Apache 软件基金会在 2020 年发布的纪录片 Trillions and Trillions Served 为主线,结合其他文档和文字材料来介绍 Apache 的理念。\n以人为本 纪录片一开始就讲起了 Apache Httpd 项目的历史,当初的 Apache Group 是基于一个源代码共享的 Web Server 建立起来的邮件列表上的一群人。软件开发当初的印象如同科学研究,因此交流源码在近似科学共同体的开源共同体当中是非常自然的。\n如同 ASF 的联合创始人 Brian Behlendorf 所说,每当有人解决了一个问题或者实现了一个新功能,他出于一种朴素的分享精神,也就是“为什么不把补丁提交回共享的源代码当中呢”的念头,基于开源软件的协作就这样自然发生了。纪录片中有一位提到,她很喜欢 Apache 这个词和 a patchy software 的谐音,共享同一个软件的补丁(patches)就是开源精神最早诞生的形式。\n这是 Apache Community 的根基,我们将会看到这种朴素精神经过发展形成了一个怎样的共同体,在共同体的发展过程当中,这样的根基又是如何深刻地影响了 Apache 理念的方方面面。\nApache Group 的工作模式还有一个重要的特征,那就是每个人都是基于自己的需求修复缺陷或是新增功能,在邮件列表上交流和提交补丁的个人,仅仅只是代表他个人,而没有一个“背后的组织”或者“背后的公司”。因此,ASF 的 How it Works 文档中一直强调,在基金会当中的个体,都只是个体(individuals),或者称之为志愿者(volunteers)。\n我在某公司的分享当中提到过,商业产品可以基于开源软件打造,但是当公司的雇员出现在社群当中的时候,他应该保持自己志愿者的身份。这就像是开源软件可以被用于生产环境或者严肃场景,例如航空器的发射和运行离不开 Linux 操作系统,但是开源软件本身是具有免责条款的。商业公司或专业团队提供服务保障,而开源软件本身是 AS IS 的。同样,社群成员本人可以有商业公司雇员的身份,但是他在社群当中,就是一个志愿者。\n毫无疑问,这种论调当即受到了质疑,因为通常的认知里,我就是拿了公司的钱,就是因为在给这家公司打工,才会去关注这个项目,你非要说我是一个志愿者,我还就真不是一个志愿者,你怎么说?\n其实这个问题,同样在 How it Works 文档中已经有了解答。\n All participants in ASF projects are volunteers and nobody (not even members or officers) is paid directly by the foundation to do their job. There are many examples of committers who are paid to work on projects, but never by the foundation itself. Rather, companies or institutions that use the software and want to enhance it or maintain it provide the salary.\n 我当时基于这样的认识,给到质疑的回答是,如果你不想背负起因为你是员工,因此必须响应社群成员的 issue 或 PR 等信息,那么你可以试着把自己摆在一个 volunteer 的角度来观察和参与社群。实际上,你并没有这样的义务,即使公司要求你必须回答,那也是公司的规定,而不是社群的要求。如果你保持着这样的认识和心态,那么社群于你而言,才有可能是一个跨越职业生涯不同阶段的归属地,而不是工作的附庸。\n社群从来不会从你这里索取什么,因为你的参与本身也是自愿的。其他社群成员会感谢你的参与,并且如果相处得好,这会是一个可爱的去处。社群不是你的敌人,不要因为公司下达了离谱的社群指标而把怒火发泄在社群和社群成员身上。压力来源于公司,作为社群成员的你本来可以不用承受这些。\nApache Community 对个体贡献者组成社群这点有多么重视呢?只看打印出来不过 10 页 A4 纸的 How it Works 文档,volunteer 和 individuals 两个词加起来出现了 19 次。The Apache Way 文档中强调的社群特征就包括了 Independence 一条,唯一并列的另一个是经常被引用的 Community over code 原则。甚至,有一个专门的 Project independence 文档讨论了 ASF 治下的项目如何由个体志愿者开发和维护,又为何因此是中立和非商业性的。\nINDIVIDUALS COMPOSE THE ASF 集中体现了 ASF 以人为本的理念。实际上,不止上面提到的 Independence 强调了社群成员个体志愿者的属性,Community over code 这一原则也在强调 ASF 关注围绕开源软件聚集起来的人,包括开发者、用户和其他各种形式的参与者。人是维持社群常青的根本,在后面具体讨论 The Apache Way 的内容的时候还会展开。\n上善若水 众所周知,Apache License 2.0 (APL-2.0) 是所谓的宽容式软件协议。也就是说,不同于 GPL 3.0 这样的 Copyleft 软件协议要求衍生作品需要以相同的条款发布,其中包括开放源代码和自由修改从而使得软件源代码总是可以获取和修改的,Apache License 在协议内容当中仅保留了著作权和商标,并要求保留软件作者的任何声明(NOTICE)。\nASF 在软件协议上的理念是赋予最大程度的使用自由,鼓励用户和开发者参与到共同体当中来,鼓励与上游共同创造价值,共享补丁。“鼓励”而不是“要求”,是 ASF 和自由软件基金会(Free Software Foundation, FSF)最主要的区别。\n这一倾向可以追溯到 Apache Group 建立的基础。Apache Httpd 派生自伊利诺伊大学的 NCSA Httpd 项目,由于使用并开发这个 web server 的人以邮件列表为纽带聚集在一起,通过交换补丁来开发同一个项目。在项目的发起人 Robert McCool 等大学生毕业以后,Apache Group 的发起人们接过这个软件的维护和开发工作。当时他们看到的软件协议,就是一个 MIT License 精神下的宽容式软件协议。自然而然地,Apache Group 维护 Apache Httpd 的时候,也就继承了这个协议。\n后来,Apache Httpd 打下了 web server 的半壁江山,也验证了这一模式的可靠性。虽然有些路径依赖的嫌疑,但是 ASF 凭借近似“上善若水”的宽容理念,在二十年间成功创造了数以百亿计美元价值的三百多个软件项目。\n纪录片中 ASF 的元老 Ted Dunning 提到,在他早期创造的软件当中,他会在宽容式软件协议之上,添加一个商用的例外条款。这就像是著名开源领域律师 Heather Meeker 起草的 The Commons Clause 附加条款。\n Without limiting other conditions in the License, the grant of rights under the License will not include, and the License does not grant to you, the right to Sell the Software.\n 附加 The Commons Clause 条款的软件都不是符合 OSD 定义的开源软件,也不再是原来的协议了。NebulaGraph 曾经在附加 The Commons Clause 条款的情况下声称自己是 APL-2.0 协议许可的软件,当时的 ASF 董事吴晟就提 issue (vesoft-inc/nebula#3247) 指出这一问题。NebulaGraph 于是删除了所有 The Commons Clause 的字样,保证无误地以 APL-2.0 协议许可该软件。\nTed Dunning 随后提到,这样的附加条款实际上严重影响了软件的采用。他意识到自己实际上并不想为此打官司,因此加上这样的条款对他而言是毫无意义的。Ted Dunning 于是去掉了附加条款,而这使得使用他的软件的条件能够简单的被理解,从而需要这些软件的用户能够大规模的采用。“水利万物而不争”,反而是不去强迫和约束用户行为的做法,为软件赢得了更多贡献。\n我仍然很敬佩采用 GPL 系列协议发布高质量软件的开发者,Linux 和 GCC 这样的软件的成功改变了世人对软件领域的自由的认识。然而,FSF 自己也认识到需要提出修正的 LGPL 来改进应用程序以外的软件的发布和采用,例如基础库。\nAPL-2.0 的思路与之不同,它允许任何人以任何形式使用、修改和分发软件,因此 ASF 治下的项目,以及 Linux Foundation 治下采用 APL-2.0 的项目,以及更多个人或组织采用 APL-2.0 的项目,共同构成了强大的开源软件生态,涵盖了应用软件,基础库,开发工具和框架等等各个方面。事实证明,“鼓励”而不是“要求”用户秉持 upstream first 的理念,尽可能参与到开源共同体并交换知识和补丁,共同创造价值,是能够制造出高质量的软件,构建出繁荣的社群和生态的。\n匠人精神 Apache Community 关注开发者的需要。\nApache Group 成立 ASF 的原因,是在 Apache Httpd 流行起来以后,商业公司和社会团体开始寻求和这个围绕项目形成的群体交流。然而,缺少一个正式的法律实体让组织之间的往来缺乏保障和流程。因此,如同纪录片当中提到的,ASF 成立的主要原因,是为了支撑 Apache Httpd 项目。只不过当初的创始成员们很难想到的是,ASF 最终支撑了数百个开源项目。\n不同于 Linux Foundation 是行业联盟,主要目的是为了促进其成员的共同商业利益,ASF 主要服务于开发者,由此支撑开源项目的开发以及开源共同体的发展。\n举例来说,进入 ASF 孵化器的项目都能够在 ASF Infra 的支持下运行自己的 apache.org 域名的网站,将代码托管在 ASF 仓库中上,例如 Apache GitBox Repositories 和 Apache GitHub Organization 等。这些仓库上运行着自由取用的开发基础设施,例如持续集成和持续发布的工具和资源等等。ASF 还维护了自己的邮件列表和文件服务器等一系列资源,以帮助开源项目建立起自己的共同体和发布自己的构件。\n反观 Linux Foundation 的主要思路,则是关注围绕项目聚集起来的供应商,以行业联盟的形式举办联合市场活动扩大影响,协调谈判推出行业标准等等。典型地,例如 CNCF 一直致力于定义云上应用开发的标准,容器虚拟化技术的标准。上述 ASF Infra 关注的内容和资源,则大多需要项目开发者自己解决,这些开发者往往主要为一个或若干个供应商工作,他们解决的方式通常也是依赖供应商出力。\n当然,上面的对比只是为了说明区别,并无优劣之分,也不相互对立。ASF 的创始成员 Brian Behlendorf 同时是 Linux Foundation 下 Open Source Security Foundation 的经理,以及 Hyperledger 的执行董事。\nASF 关注开发者的需要,体现出 Apache Community 及其成员对开发者的人文关怀。纪录片中谈到 ASF 治下项目的开发体验时,几乎每个人的眼里都有光。他们谈论着匠人精神,称赞知识分享,与人合作,以及打磨技艺的愉快经历。实际上,要想从 Apache 孵化器中成功毕业,相当部分的 mentor 关注的是围绕开源软件形成的共同体,能否支撑开源软件长久的发展和采用,这其中就包括共同体成员是否能够沉下心来做技术,而不是追求花哨的数字指标和人头凑数。\n讲几个具体的开发者福利。\n每个拥有 @apache.org 邮箱的人,即成为 ASF 治下项目 Committer 或 ASF Member 的成员,JetBrains 会提供免费的全家桶订阅授权码。我从 2019 年成为 Apache Flink 项目的 Committer 以后,已经三年沉浸在 IDEA 和 CLion 的包容下,成为彻底使用 IDE 主力开发的程序员了。\nApache GitHub Organization 下的 GitHub Actions 资源是企业级支持,这部分开销也是由 ASF 作为非营利组织募资和运营得到的资金支付的。基本上,如果你的项目成为 Apache 孵化器项目或顶级项目,那么和 GitHub Actions 集成的 CI 体验是非常顺畅的。Apache SkyWalking 只算主仓库就基于 GitHub Actions 运行了十多个端到端测试作业,Apache Pulsar 也全面基于 GitHub Actions 集成了自己的 CI 作业。\n提到匠人精神,一个隐形的开发者福利,其实是 ASF 的成员尤其是孵化器的 mentor 大多是经验非常丰富的开发者。软件开发不只是写代码,Apache Community 成员之间相互帮助,能够帮你跟上全世界最前沿的开发实践。如何提问题,如何做项目管理,如何发布软件,这些平日里在学校在公司很难有机会接触的知识和实践机会,在 Apache Community 当中只要你积极承担责任,都是触手可得的。\n当然,如何写代码也是开发当中最常交流的话题。我深入接触 Maven 开始于跟 Flink Community 的 Chesnay Schepler 的交流。我对 Java 开发的理解,分布式系统开发的知识,很大程度上也得到了 Apache Flink 和 Apache ZooKeeper 等项目的成员的帮助,尤其是 Till Rohrmann 和 Enrico Olivelli 几位。上面提到的 Ted Dunning 开始攻读博士的时候,我还没出生。但是我在项目当中用到 ZooKeeper 的 multi 功能并提出疑问和改进想法的时候,也跟他有过一系列的讨论。\n谈到技艺就会想起人,这也是 ASF 一直坚持以人为本带来的社群风气。\n我跟姜宁老师在一年前认识,交流 The Apache Way 期间萌生出相互认同。姜宁老师在 Apache 孵化器当中帮助众多项目理解 The Apache Way 并予以实践,德高望重。在今年的 ASF Members 年会当中,姜宁老师也被推举为 ASF Board 的一员。\n我跟吴晟老师在去年认识。他经常会强调开发者尤其是没有强烈公司背景的开发者的视角,多次提到这些开发者是整个开源生态的重要组成部分。他作为 PMC Chair 的 Apache SkyWalking 项目相信“没有下一个版本的计划,只知道会有下一个版本”,这是最佳实践的传播,也是伴随技术的文化理念的传播。SkyWalking 项目出于自己需要,也出于为开源世界添砖加瓦的动机创建的 SkyWalking Eyes 项目,被广泛用在不止于 ASF 治下项目,而是整个开源世界的轻量级的软件协议审计和 License Header 检查上。\n主要贡献在 Apache APISIX 的琚致远同学今年也被推选成为 Apache Members 的一员。他最让我印象深刻的是在 APISIX 社群当中积极讨论社群建设的议题,以及作为 APISIX 发布的 GSoC 项目的 mentor 帮助在校学生接触开源,实践开源,锻炼技艺。巧合的是,他跟我年龄相同,于是我痛失 Youngest Apache Member 的噱头,哈哈。\n或许,参与 Apache Community 就是这样的一种体验。并不是什么复杂的叙事,只是找到志同道合的人做出好的软件。我希望能够为提升整个软件行业付出自己的努力,希望我(参与)制造的软件创造出更大的价值,这里的人看起来大都也有相似的想法,这很好。仅此而已。\n原本还想聊聊 The Apache Way 的具体内容,还有介绍 Apache Incubator 这个保持 Apache Community 理念常青,完成代际传承的重要机制,但是到此为止似乎也很好。Apache Community 的故事和经验很难用一篇文章讲完,这两个话题就留待以后再写吧。\n","excerpt":"大约二十年前我刚开始进入互联网的世界的时候,支撑起整个网络的基础设施,就包括了 Apache 软件基金会(ASF)治下的软件。\nApache Httpd 是开启这个故事的软件,巅峰时期有超过七成的市场 …","ref":"/zh/2022-03-14-the-apache-community/","title":"我眼中的 The Apache Way"},{"body":"SkyWalking Client Rust 0.1.0 is released. Go to downloads page to find release tars.\n","excerpt":"SkyWalking Client Rust 0.1.0 is released. Go to downloads page to find release tars.","ref":"/events/release-apache-skywalking-client-rust-0-1-0/","title":"Release Apache SkyWalking Client Rust 0.1.0"},{"body":"SkyWalking Java Agent 8.9.0 is released. Go to downloads page to find release tars. Changes by Version\n8.9.0  Support Transaction and fix duplicated methods enhancements for jedis-2.x plugin. Add ConsumerWrapper/FunctionWrapper to support CompletableFuture.x.thenAcceptAsync/thenApplyAsync. Build CLI from Docker instead of source codes, add alpine based Docker image. Support set instance properties in json format. Upgrade grpc-java to 1.42.1 and protoc to 3.17.3 to allow using native Mac osx-aarch_64 artifacts. Add doc about system environment variables to configurations.md Avoid ProfileTaskChannelService.addProfilingSnapshot throw IllegalStateException(Queue full) Increase ProfileTaskChannelService.snapshotQueue default size from 50 to 4500 Support 2.8 and 2.9 of pulsar client. Add dubbo 3.x plugin. Fix TracePathMatcher should match pattern \u0026ldquo;**\u0026rdquo; with paths end by \u0026ldquo;/\u0026rdquo; Add support returnedObj expression for apm-customize-enhance-plugin Fix the bug that httpasyncclient-4.x-plugin puts the dirty tracing context in the connection context Compatible with the versions after dubbo-2.7.14 Follow protocol grammar fix GCPhrase -\u0026gt; GCPhase. Support ZGC GC time and count metric collect. (Require 9.0.0 OAP) Support configuration for collecting redis parameters for jedis-2.x and redisson-3.x plugin. Migrate base images to Temurin and add images for ARM. (Plugin Test) Fix compiling issues in many plugin tests due to they didn\u0026rsquo;t lock the Spring version, and Spring 3 is incompatible with 2.x APIs and JDK8 compiling. Support ShardingSphere 5.0.0 Bump up gRPC to 1.44.0, fix relative CVEs.  Documentation  Add a FAQ, Why is -Djava.ext.dirs not supported?.  All issues and pull requests are here\n","excerpt":"SkyWalking Java Agent 8.9.0 is released. Go to downloads page to find release tars. Changes by …","ref":"/events/release-apache-skywalking-java-agent-8-9-0/","title":"Release Apache SkyWalking Java Agent 8.9.0"},{"body":"Apache SkyWalking is an open-source APM for a distributed system, Apache Software Foundation top-level project.\nOn Jan. 28th, we received a License violation report from one of the committers (anonymously). They have a cloud service called Application Performance Monitoring - Distributed Tracing (应用性能监控全链路版). At the Java service monitoring section, it provides this agent download link\n wget https://datarangers.com.cn/apminsight/repo/v2/download/java-agent/apminsight-java-agent_latest.tar.gz\n We downloaded it at 23:15 Jan. 28th UTC+8(Beijing), and archived it at here\nWe have confirmed this is a distribution of SkyWalking Java agent.\nWe listed several pieces of evidence to prove this here, every reader could compare with the official SkyWalking source codes\n The first and the easiest one is agent.config file, which is using the same config keys, and the same config format.  This is the Volcengine\u0026rsquo;s version, and check SkyWalking agent.config In the apmplus-agent.jar, Volcengine\u0026rsquo;s agent core jar, you could easily find several core classes exactly as same as SkyWalking\u0026rsquo;s.  The ComponentsDefine class is unchanged, even with component ID and name. This is Volcengine\u0026rsquo;s version, and check SkyWalking\u0026rsquo;s version\nThe whole code names, package names, and hierarchy structure are all as same as SkyWalking 6.x version.  This is the Volcengine package hierarchy structure, and check the SkyWalking\u0026rsquo;s version\n Volcengine Inc.\u0026rsquo;s team changed all package names, removed the Apache Software Foundation\u0026rsquo;s header, and don\u0026rsquo;t keep Apache Software Foundation and Apache SkyWalking\u0026rsquo;s LICENSE and NOTICE file in their redistribution.\nAlso, we can\u0026rsquo;t find anything on their website to declare they are distributing SkyWalking.\nAll above have proved they are violating the Apache 2.0 License, and don\u0026rsquo;t respect Apache Software Foundation and Apache SkyWalking\u0026rsquo;s IP and Branding.\nWe have contacted their legal team, and wait for their official response.\nResolution On Jan. 30th night, UTC+8, 2022. We received a response from Volcengine\u0026rsquo;s APMPlus team. They admitted their violation behaviors, and made the following changes.\n Volcengine\u0026rsquo;s APMPlus service page was updated on January 30th and stated that the agent is a fork version(re-distribution) of Apache SkyWalking agent. Below is the screenshot of Volcengine\u0026rsquo;s APMPlus product page.  Volcengine\u0026rsquo;s APMPlus agent distributions were also updated and include SkyWalking\u0026rsquo;s License and NOTICE now. Below is the screenshot of Volcengine\u0026rsquo;s APMPlus latest agent, you could download from the product page. We keep a copy of their Jan. 30th 2022 at here.  Volcengine\u0026rsquo;s APMPlus team had restored all license headers of SkyWalking in the agent, and the modifications of the project files are also listed in \u0026ldquo;SkyWalking-NOTICE\u0026rdquo;, which you could download from the product page.  We have updated the status to the PMC mail list. This license violation issue has been resolved for now.\n Appendix Inquiries of committers Q: I hope Volcengine Inc. can give a reason for this license issue, not just an afterthought PR. This will not only let us know where the issue is but also avoid similar problems in the future.\nA(apmplus apmplus@volcengine.com):\nThe developers neglected this repository during submitting compliance assessment. Currently, APMPlus team had introduced advanced tools provided by the company for compliance assessment, and we also strengthened training for our developers. In the future, the compliance assessment process will be further improved from tool assessment and manual assessment. ","excerpt":"Apache SkyWalking is an open-source APM for a distributed system, Apache Software Foundation …","ref":"/blog/2022-01-28-volcengine-violates-aplv2/","title":"[Resolved][License Issue] Volcengine Inc.(火山引擎) violates the Apache 2.0 License when using SkyWalking."},{"body":"Background In the Apache SkyWalking ecosystem, the OAP obtains metrics, traces, logs, and event data through SkyWalking Agent, Envoy, or other data sources. Under the gRPC protocol, it transmits data by communicating with a single server node. Only when the connection is broken, the reconnecting policy would be used based on DNS round-robin mode. When new services are added at runtime or the OAP load is kept high due to increased traffic of observed services, the OAP cluster needs to scale out for increased traffic. The load of the new OAP node would be less due to all existing agents having connected to previous nodes. Even without scaling, the load of OAP nodes would be unbalanced, because the agent would keep the connection due to random policy at the booting stage. In these cases, it would become a challenge to keep up the health status of all nodes, and be able to scale out when needed.\nIn this article, we mainly discuss how to solve this challenge in SkyWalking.\nHow to Load Balance SkyWalking mainly uses the gRPC protocol for data transmission, so this article mainly introduces load balancing in the gRPC protocol.\nProxy Or Client-side Based on the gRPC official Load Balancing blog, there are two approaches to load balancing:\n Client-side: The client perceives multiple back-end services and uses a load-balancing algorithm to select a back-end service for each RPC. Proxy: The client sends the message to the proxy server, and the proxy server load balances the message to the back-end service.  From the perspective of observability system architecture:\n    Pros Cons     Client-side High performance because of the elimination of extra hop Complex client (cluster awareness, load balancing, health check, etc.)Ensure each data source to be connected provides complex client capabilities   Proxy Simple Client Higher latency    We choose Proxy mode for the following reasons:\n Observable data is not very time-sensitive, a little latency caused by transmission is acceptable. A little extra hop is acceptable and there is no impact on the client-side. As an observability platform, we cannot/should not ask clients to change. They make their own tech decisions and may have their own commercial considerations.  Transmission Policy In the proxy mode, we should determine the transmission path between downstream and upstream.\nDifferent data protocols require different processing policies. There are two transmission policies:\n Synchronous: Suitable for protocols that require data exchange in the client, such as SkyWalking Dynamic Configuration Service. This type of protocol provides real-time results. Asynchronous batch: Used when the client doesn’t care about the upstream processing results, but only the transmitted data (e.g., trace report, log report, etc.)  The synchronization policy requires that the proxy send the message to the upstream server when receiving the client message, and synchronously return the response data to the downstream client. Usually, only a few protocols need to use the synchronization policy.\nAs shown below, after the client sends the request to the Proxy, the proxy would send the message to the server synchronously. When the proxy receives the result, it returns to the client.\nThe asynchronous batch policy means that the data is sent to the upstream server in batches asynchronously. This policy is more common because most protocols in SkyWalking are primarily based on data reporting. We think using the queue as a buffer could have a good effect. The asynchronous batch policy is executed according to the following steps:\n The proxy receives the data and wraps it as an Event object. An event is added into the queue. When the cycle time is reached or when the queue elements reach the fixed number, the elements in the queue will parallel consume and send to the OAP.  The advantage of using queues is:\n Separate data receiving and sending to reduce the mutual influence. The interval quantization mechanism can be used to combine events, which helps to speed up sending events to the OAP. Using multi-threaded consumption queue events can make fuller use of network IO.  As shown below, after the proxy receives the message, the proxy would wrap the message as an event and push it to the queue. The message sender would take batch events from the queue and send them to the upstream OAP.\nRouting Routing algorithms are used to route messages to a single upstream server node.\nThe Round-Robin algorithm selects nodes in order from the list of upstream service nodes. The advantage of this algorithm is that the number of times each node is selected is average. When the size of the data is close to the same, each upstream node can handle the same quantity of data content.\nWith the Weight Round-Robin, each upstream server node has a corresponding routing weight ratio. The difference from Round-Robin is that each upstream node has more chances to be routed according to its weight. This algorithm is more suitable to use when the upstream server node machine configuration is not the same.\nThe Fixed algorithm is a hybrid algorithm. It can ensure that the same data is routed to the same upstream server node, and when the upstream server scales out, it still maintains routing to the same node; unless the upstream node does not exist, it will reroute. This algorithm is mainly used in the SkyWalking Meter protocol because this protocol needs to ensure that the metrics of the same service instance are sent to the same OAP node. The Routing steps are as follows:\n Generate a unique identification string based on the data content, as short as possible. The amount of data is controllable. Get the upstream node of identity from LRU Cache, and use it if it exists. According to the identification, generate the corresponding hash value, and find the upstream server node from the upstream list. Save the mapping relationship between the upstream server node and identification to LRU Cache.  The advantage of this algorithm is to bind the data with the upstream server node as much as possible, so the upstream server can better process continuous data. The disadvantage is that it takes up a certain amount of memory space to save the corresponding relationship.\nAs shown below, the image is divided into two parts:\n The left side represents that the same data content always is routed to the same server node. The right side represents the data routing algorithm. Get the number from the data, and use the remainder algorithm to obtain the position.  We choose to use a combination of Round-Robin and Fixed algorithm for routing:\n The Fixed routing algorithm is suitable for specific protocols, mainly used when passing metrics data to the SkyWalking Meter protocol The Round-Robin algorithm is used by default. When the SkyWalking OAP cluster is deployed, the configuration of the nodes needs to be as much the same as possible, so there would be no need to use the Weight Round-Robin algorithm.  How to balance the load balancer itself? Proxy still needs to deal with the load balancing problem from client to itself, especially when deploying a Proxy cluster in a production environment.\nThere are three ways to solve this problem:\n Connection management: Use the max_connection config on the client-side to specify the maximum connection duration of each connection. For more information, please read the proposal. Cluster awareness: The proxy has cluster awareness, and actively disconnects the connection when the load is unbalanced to allow the client to re-pick up the proxy. Resource limit+HPA: Restrict the connection resource situation of each proxy, and no longer accept new connections when the resource limit is reached. And use the HPA mechanism of Kubernetes to dynamically scale out the number of the proxy.      Connection management Cluster awareness Resource Limit+HPA     Pros Simple to use Ensure that the number of connections in each proxy is relatively  Simple to use   Cons Each client needs to ensure that data is not lostThe client is required to accept GOWAY responses May cause a sudden increase in traffic on some nodesEach client needs to ensure that data is not lost  Traffic will not be particularly balanced in each instance    We choose Limit+HPA for these reasons:\n Easy to config and use the proxy and easy to understand based on basic data metrics. No data loss due to broken connection. There is no need for the client to implement any other protocols to prevent data loss, especially when the client is a commercial product. The connection of each node in the proxy cluster does not need to be particularly balanced, as long as the proxy node itself is high-performance.  SkyWalking-Satellite We have implemented this Proxy in the SkyWalking-Satellite project. It’s used between Client and SkyWalking OAP, effectively solving the load balancing problem.\nAfter the system is deployed, the Satellite would accept the traffic from the Client, and the Satellite will perceive all the nodes of the OAP through Kubernetes Label Selector or manual configuration, and load balance the traffic to the upstream OAP node.\nAs shown below, a single client still maintains a connection with a single Satellite, Satellite would establish the connection with each OAP, and load balance message to the OAP node.\nWhen scaling Satellite, we need to deploy the SWCK adapter and configure the HPA in Kubernetes. SWCK is a platform for the SkyWalking users, provisions, upgrades, maintains SkyWalking relevant components, and makes them work natively on Kubernetes.\nAfter deployment is finished, the following steps would be performed:\n Read metrics from OAP: HPA requests the SWCK metrics adapter to dynamically read the metrics in the OAP. Scaling the Satellite: Kubernetes HPA senses that the metrics values are in line with expectations, so the Satellite would be scaling automatically.  As shown below, use the dotted line to divide the two parts. HPA uses SWCK Adapter to read the metrics in the OAP. When the threshold is met, HPA would scale the Satellite deployment.\nExample In this section, we will demonstrate two cases:\n SkyWalking Scaling: After SkyWalking OAP scaling, the traffic would auto load balancing through Satellite. Satellite Scaling: Satellite’s own traffic load balancing.  NOTE: All commands could be accessed through GitHub.\nSkyWalking Scaling We will use the bookinfo application to demonstrate how to integrate Apache SkyWalking 8.9.1 with Apache SkyWalking-Satellite 0.5.0, and observe the service mesh through the Envoy ALS protocol.\nBefore starting, please make sure that you already have a Kubernetes environment.\nInstall Istio Istio provides a very convenient way to configure the Envoy proxy and enable the access log service. The following step:\n Install the istioctl locally to help manage the Istio mesh. Install Istio into the Kubernetes environment with a demo configuration profile, and enable the Envoy ALS. Transmit the ALS message to the satellite. The satellite we will deploy later. Add the label into the default namespace so Istio could automatically inject Envoy sidecar proxies when you deploy your application later.  # install istioctl export ISTIO_VERSION=1.12.0 curl -L https://istio.io/downloadIstio | sh - sudo mv $PWD/istio-$ISTIO_VERSION/bin/istioctl /usr/local/bin/ # install istio istioctl install -y --set profile=demo \\ \t--set meshConfig.enableEnvoyAccessLogService=true \\ \t--set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-system-satellite.skywalking-system:11800 # enbale envoy proxy in default namespace kubectl label namespace default istio-injection=enabled Install SWCK SWCK provides convenience for users to deploy and upgrade SkyWalking related components based on Kubernetes. The automatic scale function of Satellite also mainly relies on SWCK. For more information, you could refer to the official documentation.\n# Install cert-manager kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.3.1/cert-manager.yaml # Deploy SWCK mkdir -p skywalking-swck \u0026amp;\u0026amp; cd skywalking-swck wget https://dlcdn.apache.org/skywalking/swck/0.6.1/skywalking-swck-0.6.1-bin.tgz tar -zxvf skywalking-swck-0.6.1-bin.tgz cd config kubectl apply -f operator-bundle.yaml Deploy Apache SkyWalking And Apache SkyWalking-Satellite We have provided a simple script to deploy the skywalking OAP, UI, and Satellite.\n# Create the skywalking components namespace kubectl create namespace skywalking-system kubectl label namespace skywalking-system swck-injection=enabled # Deploy components kubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/sw-components.yaml Deploy Bookinfo Application export ISTIO_VERSION=1.12.0 kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/platform/kube/bookinfo.yaml kubectl wait --for=condition=Ready pods --all --timeout=1200s kubectl port-forward service/productpage 9080 Next, please open your browser and visit http://localhost:9080. You should be able to see the Bookinfo application. Refresh the webpage several times to generate enough access logs.\nThen, you can see the topology and metrics of the Bookinfo application on SkyWalking WebUI. At this time, you can see that the Satellite is working!\nDeploy Monitor We need to install OpenTelemetry Collector to collect metrics in OAPs and analyze them.\n# Add OTEL collector kubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/otel-collector-oap.yaml kubectl port-forward -n skywalking-system service/skywalking-system-ui 8080:80 Next, please open your browser and visit http://localhost:8080/ and create a new item on the dashboard. The SkyWalking Web UI pictured below shows how the data content is applied.\nScaling OAP Scaling the number of OAPs by deployment.\nkubectl scale --replicas=3 -n skywalking-system deployment/skywalking-system-oap Done! After a period of time, you will see that the number of OAPs becomes 3, and the ALS traffic is balanced to each OAP.\nSatellite Scaling After we have completed the SkyWalking Scaling, we would carry out the Satellite Scaling demo.\nDeploy SWCK HPA SWCK provides an adapter to implement the Kubernetes external metrics to adapt the HPA through reading the metrics in SkyWalking OAP. We expose the metrics service in Satellite to OAP and configure HPA Resource to auto-scaling the Satellite.\nInstall the SWCK adapter into the Kubernetes environment:\nkubectl apply -f skywalking-swck/config/adapter-bundle.yaml Create the HPA resource, and limit each Satellite to handle a maximum of 10 connections:\nkubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/satellite-hpa.yaml Then, you could see we have 9 connections in one satellite. One envoy proxy may establish multiple connections to the satellite.\n$ kubectl get HorizontalPodAutoscaler -n skywalking-system NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE hpa-demo Deployment/skywalking-system-satellite 9/10 1 3 1 5m18s Scaling Application The scaling application could establish more connections to the satellite, to verify whether the HPA is in effect.\nkubectl scale --replicas=3 deployment/productpage-v1 deployment/details-v1 Done! By default, Satellite will deploy a single instance and a single instance will only accept 11 connections. HPA resources limit one Satellite to handle 10 connections and use a stabilization window to make Satellite stable scaling up. In this case, we deploy the Bookinfo application in 10+ instances after scaling, which means that 10+ connections will be established to the Satellite.\nSo after HPA resources are running, the Satellite would be automatically scaled up to 2 instances. You can learn about the calculation algorithm of replicas through the official documentation. Run the following command to view the running status:\n$ kubectl get HorizontalPodAutoscaler -n skywalking-system --watch NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 1 3m31s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 1 4m20s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 2 4m38s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 2 5m8s hpa-demo Deployment/skywalking-system-satellite 6/10 1 3 2 5m23s By observing the “number of connections” metric, we would be able to see that when the number of connections of each gRPC exceeds 10 connections, then the satellite automatically scales through the HPA rule. As a result, the connection number is down to normal status (in this example, less than 10)\nswctl metrics linear --name satellite_service_grpc_connect_count --service-name satellite::satellite-service ","excerpt":"Background In the Apache SkyWalking ecosystem, the OAP obtains metrics, traces, logs, and event data …","ref":"/blog/2022-01-24-scaling-with-apache-skywalking/","title":"Scaling with Apache SkyWalking"},{"body":"SkyWalking Cloud on Kubernetes 0.6.1 is released. Go to downloads page to find release tars.\n Bugs  Fix could not deploy metrics adapter to GKE    ","excerpt":"SkyWalking Cloud on Kubernetes 0.6.1 is released. Go to downloads page to find release tars.\n Bugs …","ref":"/events/release-apache-skywalking-cloud-on-kubernetes-0-6-1/","title":"Release Apache SkyWalking Cloud on Kubernetes 0.6.1"},{"body":"随着业务与用户量的持续发展,系统的瓶颈也逐渐出现。尤其在一些节假日、突发的营销活动中,访问量激增可能会导致系统性能下降,甚至造成系统瘫痪。 全链路压测可以很好的帮助我们预先演练高峰流量,从而提前模拟出系统的执行情况,帮助我们预估系统容量。当流量真正来临时,也可以更从容面对。 Apache SkyWalking 联合 Apache APISIX 及 Apache ShardingSphere,三大顶级开源社区通力合作,共同打造生产级可用的全链路压测解决方案,CyborgFlow。\n介绍 CyborgFlow 是一款面向生产级可用的全链路压测解决方案。总共由三个组件组成,如下图所示。\n Flow Gateway: 压测流量网关。当流量到达该组件时,则会将请求认定为压测流量,并将压测流量标识传递至上游服务。 Database Shadow: 数据库中间件。当数据库中间件感知到当前流量为压测流量时,则会将数据库操作路由至影子表中进行操作。 Agent/Dashboard: 分布式监控系统。与业务系统紧密结合,当感知到压测请求后,自动将其标识传递至上游,无需业务代码改造。并且利用分析能力,构建Dashboard来便于查看流量情况。  以此,便覆盖了单个请求的完整生命周期,在网关层构建压测标识,到业务系统透传标识,最终将请求与影子表交互。同时整个流程拥有完整的监控分析。\n原理 依托于三大社区合作,让这一切变得简单易用。下图为全链路压测系统的运行原理,橙色和蓝色分别代表正常流量和压测流量。\nFlow Gateway Flow Gateway 作为压测流量网关,主要负责接收流量,并传递压测流量表示至上游。\n 添加 skywalking插件 构建链路入口。 依据 proxy-rewrite插件 将压测流量标识注入到上游的请求头中。  Agent/Dashboard 该组件中则分为两部分内容说明。\nAgent Agent与业务程序拥有相同生命周期,负责压测流量标识在各个业务系统之间传递,并与 Database Shadow 交互。\n SkyWalking Agent通过读取从Flow Gateway传递的压测流量标识,利用 透传协议 将该标识在应用之间传递。 当准备进行数据库调用时,则通过判断是否包含压测流量标识来决定是否SQL调用时追加压测流量标识(/* cyborg-flow: true */)。 当检测到当前请求包含压测流量标识后,将该数据与Trace绑定,用于Dashboard数据分析。  Dashboard Dashboard 用于压测过程进行中的监控数据分析,并最终以图表的方式进行展示。\n 接收来自Agent中上报的Trace数据,并依据OAL中的Tag过滤器(.filter(tags contain \u0026quot;cyborg-flow:true\u0026quot;))来生成压测与非压测的指标数据。 利用指标数据便可以在Dashboard中创建图表进行观察。  Database Shadow Database Shadow 作为 Proxy 在业务程序与数据库中间完成数据交互,当检测到压测流量时则会将SQL传递至影子表中处理。\n 检测下游传递的数据库语句中是否包含压测流量标识(/* cyborg-flow: true */),存在时则将SQL交给由用户配置的影子表中处理。  快速上手 下面将带你快速将Cyborg Flow集成至你的项目中。相关组件的下载请至 Github Release 中下载,目前已发布 0.1.0 版本。\n部署 Database Shadow  解压缩cyborg-database-shadow.tar.gz。 将 conf/config-shadow.yaml 文件中的业务数据库与影子数据库配置为自身业务中的配置。 启动 Database Shadow服务,启动脚本位于bin/start.sh中。  如需了解更详细的部署参数配置,请参考 官方文档 。\n部署 Cyborg Dashboard  解压缩cyborg-dashboard.tar.gz。 启动后端与UI界面服务,用于链路数据解析与界面展示,启动脚本位于bin/startup.sh中。 接下来就可以通过打开浏览器并访问http://localhost:8080/,此页面为Cyborg Dashboard界面,由于目前尚未部署任何业务程序,所以暂无任何数据。  如需了解更详细的部署参数配置,请参考 后端服务 与 UI界面服务 的安装文档。\n部署 Cyborg Agent 到业务程序中  解压缩cyborg-agent.tar.gz. 修改config/agent.config中的collector.backend_service为 Cyborg Dashboard 中后端地址(默认为11800端口),用于将监控数据上报至 Cyborg Dashboard 。 修改业务程序中与数据库的链接,将其更改为 Database Shadow 中的配置。默认访问端口为3307,用户名密码均为root。 当程序启动时,增加该参数到启动命令中:-jar path/to/cyborg-agent/skywalking-agent.jar。  如需了解更详细的部署参数配置,请参考 Agent安装文档 。\n部署 Flow Gateway  参考 Flow Gateway 快速开始 进行下载 Apache APISIX 并配置相关插件。 基于 APISIX 创建路由文档 进行路由创建。  完成! 最后,通过Flow Gateway访问业务系统资源,便完成了一次压测流量请求。\n 压测流量最终访问至影子表进行数据操作。 如下图所示,通过观察 Cyborg Dashboard 便可以得知压测与非压测请求的执行情况。  总结 在本文中,我们详细介绍了Cyborg Flow中的各个组件的功能、原理,最终搭配快速上手来快速将该系统与自己的业务系统结合。 如果在使用中有任何问题,欢迎来共同讨论。\n","excerpt":"随着业务与用户量的持续发展,系统的瓶颈也逐渐出现。尤其在一些节假日、突发的营销活动中,访问量激增可能会导致系统性能下降,甚至造成系统瘫痪。 全链路压测可以很好的帮助我们预先演练高峰流量,从而提前模拟出 …","ref":"/zh/2022-01-18-cyborg-flow/","title":"Cyborg Flow X SkyWalking: 生产环境全链路压测"},{"body":"SkyWalking Cloud on Kubernetes 0.6.0 is released. Go to downloads page to find release tars.\n Features  Add the Satellite CRD, webhooks and controller   Bugs  Update release images to set numeric user id Fix the satellite config not support number error Use env JAVA_TOOL_OPTIONS to replace AGENT_OPTS   Chores  Add stabilization windows feature in satellite HPA documentation    ","excerpt":"SkyWalking Cloud on Kubernetes 0.6.0 is released. Go to downloads page to find release tars. …","ref":"/events/release-apache-skywalking-cloud-on-kubernetes-0-6-0/","title":"Release Apache SkyWalking Cloud on Kubernetes 0.6.0"},{"body":"SkyWalking Kong Agent 0.2.0 is released. Go to downloads page to find release tars.\n Establish the SkyWalking Kong Agent.  ","excerpt":"SkyWalking Kong Agent 0.2.0 is released. Go to downloads page to find release tars.\n Establish the …","ref":"/events/release-apache-skywalking-kong-0-2-0/","title":"Release Apache SkyWalking Kong 0.2.0"},{"body":"SkyWalking Satellite 0.5.0 is released. Go to downloads page to find release tars.\nFeatures  Make the gRPC client client_pem_path and client_key_path as an optional config. Remove prometheus-server sharing server plugin. Support let the telemetry metrics export to prometheus or metricsService. Add the resource limit when gRPC server accept connection.  Bug Fixes  Fix the gRPC server enable TLS failure. Fix the native meter protocol message load balance bug.  Issues and PR  All issues are here All and pull requests are here  ","excerpt":"SkyWalking Satellite 0.5.0 is released. Go to downloads page to find release tars.\nFeatures  Make …","ref":"/events/release-apache-skwaylking-satellite-0-5-0/","title":"Release Apache SkyWalking Satellite 0.5.0"},{"body":"SkyWalking LUA Nginx 0.6.0 is released. Go to downloads page to find release tars.\n fix: skywalking_tracer:finish() will not be called in some case such as upstream timeout.  ","excerpt":"SkyWalking LUA Nginx 0.6.0 is released. Go to downloads page to find release tars.\n fix: …","ref":"/events/release-apache-skywalking-lua-nginx-0.6.0/","title":"Release Apache SkyWalking LUA Nginx 0.6.0"},{"body":"","excerpt":"","ref":"/tags/chaos-engineering/","title":"Chaos Engineering"},{"body":"","excerpt":"","ref":"/tags/chaos-mesh/","title":"Chaos Mesh"},{"body":"Chaos Mesh is an open-source cloud-native chaos engineering platform. You can use Chaos Mesh to conveniently inject failures and simulate abnormalities that might occur in reality, so you can identify potential problems in your system. Chaos Mesh also offers a Chaos Dashboard which allows you to monitor the status of a chaos experiment. However, this dashboard cannot let you observe how the failures in the experiment impact the service performance of applications. This hinders us from further testing our systems and finding potential problems.\n Apache SkyWalking is an open-source application performance monitor (APM), specially designed to monitor, track, and diagnose cloud native, container-based distributed systems. It collects events that occur and then displays them on its dashboard, allowing you to observe directly the type and number of events that have occurred in your system and how different events impact the service performance.\nWhen you use SkyWalking and Chaos Mesh together during chaos experiments, you can observe how different failures impact the service performance.\nThis tutorial will show you how to configure SkyWalking and Chaos Mesh. You’ll also learn how to leverage the two systems to monitor events and observe in real time how chaos experiments impact applications’ service performance.\nPreparation Before you start to use SkyWalking and Chaos Mesh, you have to:\n Set up a SkyWalking cluster according to the SkyWalking configuration guide. Deploy Chao Mesh using Helm. Install JMeter or other Java testing tools (to increase service loads). Configure SkyWalking and Chaos Mesh according to this guide if you just want to run a demo.  Now, you are fully prepared, and we can cut to the chase.\nStep 1: Access the SkyWalking cluster After you install the SkyWalking cluster, you can access its user interface (UI). However, no service is running at this point, so before you start monitoring, you have to add one and set the agents.\nIn this tutorial, we take Spring Boot, a lightweight microservice framework, as an example to build a simplified demo environment.\n Create a SkyWalking demo in Spring Boot by referring to this document. Execute the command kubectl apply -f demo-deployment.yaml -n skywalking to deploy the demo.  After you finish deployment, you can observe the real-time monitoring results at the SkyWalking UI.\nNote: Spring Boot and SkyWalking have the same default port number: 8080. Be careful when you configure the port forwarding; otherise, you may have port conflicts. For example, you can set Spring Boot’s port to 8079 by using a command like kubectl port-forward svc/spring-boot-skywalking-demo 8079:8080 -n skywalking to avoid conflicts.\nStep 2: Deploy SkyWalking Kubernetes Event Exporter SkyWalking Kubernetes Event Exporter is able to watch, filter, and send Kubernetes events into the SkyWalking backend. SkyWalking then associates the events with the system metrics and displays an overview about when and how the metrics are affected by the events.\nIf you want to deploy SkyWalking Kubernetes Event Explorer with one line of commands, refer to this document to create configuration files in YAML format and then customize the parameters in the filters and exporters. Now, you can use the command kubectl apply to deploy SkyWalking Kubernetes Event Explorer.\nStep 3: Use JMeter to increase service loads To better observe the change in service performance, you need to increase the service loads on Spring Boot. In this tutorial, we use JMeter, a widely adopted Java testing tool, to increase the service loads.\nPerform a stress test on localhost:8079 using JMeter and add five threads to continuously increase the service loads.\nOpen the SkyWalking Dashboard. You can see that the access rate is 100%, and that the service loads reach about 5,300 calls per minute (CPM).\nStep 4: Inject failures via Chaos Mesh and observe results After you finish the three steps above, you can use the Chaos Dashboard to simulate stress scenarios and observe the change in service performance during chaos experiments.\nThe following sections describe how service performance varies under the stress of three chaos conditions:\n  CPU load: 10%; memory load: 128 MB\nThe first chaos experiment simulates low CPU usage. To display when a chaos experiment starts and ends, click the switching button on the right side of the dashboard. To learn whether the experiment is Applied to the system or Recovered from the system, move your cursor onto the short, green line.\nDuring the time period between the two short, green lines, the service load decreases to 4,929 CPM, but returns to normal after the chaos experiment ends.\n  CPU load: 50%; memory load: 128 MB\nWhen the application’s CPU load increases to 50%, the service load decreases to 4,307 CPM.\n  CPU load: 100%; memory load: 128 MB\nWhen the CPU usage is at 100%, the service load decreases to only 40% of what it would be if no chaos experiments were taking place.\nBecause the process scheduling under the Linux system does not allow a process to occupy the CPU all the time, the deployed Spring Boot Demo can still handle 40% of the access requests even in the extreme case of a full CPU load.\n  Summary By combining SkyWalking and Chaos Mesh, you can clearly observe when and to what extent chaos experiments affect application service performance. This combination of tools lets you observe the service performance in various extreme conditions, thus boosting your confidence in your services.\nChaos Mesh has grown a lot in 2021 thanks to the unremitting efforts of all PingCAP engineers and community contributors. In order to continue to upgrade our support for our wide variety of users and learn more about users’ experience in Chaos Engineering, we’d like to invite you to takethis survey and give us your valuable feedback.\nIf you want to know more about Chaos Mesh, you’re welcome to join the Chaos Mesh community on GitHub or our Slack discussions (#project-chaos-mesh). If you find any bugs or missing features when using Chaos Mesh, you can submit your pull requests or issues to our GitHub repository.\n","excerpt":"Chaos Mesh is an open-source cloud-native chaos engineering platform. You can use Chaos Mesh to …","ref":"/blog/2021-12-21-better-observability-for-chaos-engineering/","title":"Chaos Mesh + SkyWalking: Better Observability for Chaos Engineering"},{"body":"","excerpt":"","ref":"/tags/tutorial/","title":"Tutorial"},{"body":"SkyWalking Cloud on Kubernetes 0.5.0 is released. Go to downloads page to find release tars.\n Features  Add E2E test cases to verify OAPServer, UI, Java agent and Storage components.   Bugs  Fix operator role patch issues Fix invalid CSR signername Fix bug in the configmap controller   Chores  Bump up KubeBuilder to V3 Bump up metric adapter server to v1.21.0 Split mono-project to two independent projects    ","excerpt":"SkyWalking Cloud on Kubernetes 0.5.0 is released. Go to downloads page to find release tars. …","ref":"/events/release-apache-skywalking-cloud-on-kubernetes-0-5-0/","title":"Release Apache SkyWalking Cloud on Kubernetes 0.5.0"},{"body":"We Can integrate Skywalking to Java Application by Java Agent TEC., In typical application, the system runs Java Web applications at the backend of the load balancer, and the most commonly used load balancer is nginx. What should we do if we want to bring it under surveillance? Fortunately, skywalking has provided Nginx agent。 During the integration process, it is found that the examples on the official website only support openresty. For openresty, common modules such as luajit and Lua nginx module have been integrated. Adding skywalking related configurations according to the examples on the official website can take effect. However, when configured for nginx startup, many errors will be reported. We may not want to change a load balancer (nginx to openresty) in order to use skywalking. Therefore, we must solve the integration problem between skywalking and nginx.\nNote: openresty is a high-performance web development platform based on nginx + Lua, which solves the short board that is not easy to program in nginx.\nBased on Skywalking-8.7.0 and Nginx-1.20.1\nUpgrade of nginx: The agent plug-in of nginx is written based on Lua, so nginx needs to add support for Lua, Lua nginx module It just provides this function. The Lua nginx module depends on luajit Therefore, first we need to install luajit. In the environment, it is best to choose version 2.1.\nFor nginx, you need to compile the necessary modules yourself. It depends on the following two modules:\nlua-nginx-module The version is lua-nginx-module-0.10.21rc1\nngx_devel_kit The version using ngx_devel_kit-0.3.1\nCompile nginx parameters\nconfigure arguments: --add-module=/path/to/ngx_devel_kit-0.3.1 --add-module=/path/to/lua-nginx-module-0.10.21rc1 --with-ld-opt=-Wl,-rpath,/usr/local/LuaJIT/lib The following is for skywalking-nginx-lua-0.3.0 and 0.3.0+ are described separately.\nskywalking-nginx-lua-0.3.0 After testing, skywalking-nginx-lua-0.3.0 requires the following Lua related modules\nlua-resty-core https://github.com/openresty/lua-resty-core lua-resty-lrucache https://github.com/openresty/lua-resty-lrucache lua-cjson https://github.com/openresty/lua-cjson The dependent Lua modules are as follows:\nlua_package_path \u0026#34;/path/to/lua-resty-core/lua-resty-core-master/lib/?.lua;/path/to/lua-resty-lrucache-0.11/lib/?.lua;/path/to/skywalking-nginx-lua-0.3.0/lib/?.lua;;\u0026#34;; In the process of make \u0026amp; \u0026amp; make install, Lua cjson needs to pay attention to:\nModify a path in makefile\nLUA_INCLUDE_DIR ?= /usr/local/LuaJIT/include/luajit-2.0\nReference:https://blog.csdn.net/ymeputer/article/details/50146143 \nskywalking-nginx-lua-0.3.0+ For skywalking-nginx-lua-0.3.0+, tablepool support needs to be added, but it seems that cjson is not required\nlua-resty-core https://github.com/openresty/lua-resty-core lua-resty-lrucache https://github.com/openresty/lua-resty-lrucache lua-tablepool https://github.com/openresty/lua-tablepool lua_ package_ path \u0026#34;/path/to/lua-resty-core/lua-resty-core-master/lib/?.lua;/path/to/lua-resty-lrucache-0.11/lib/?.lua;/path/to/lua-tablepool-master/lib/?.lua;/path/to/skywalking-nginx-lua-master/lib/?.lua;;\u0026#34;; tablepool introduces two APIs according to its official documents table new and table. Clear requires luajit2.1, there is a paragraph in the skywalking-nginx-lua document that says you can use \u0026lsquo;require (\u0026ldquo;skywalking. Util\u0026rdquo;) disable_ Tablepool() ` disable tablepool\nWhen you start nginx, you will be prompted to install openresty\u0026rsquo;s own [luajit version]( https://github.com/openresty/luajit2 )\ndetected a LuaJIT version which is not OpenResty\u0026#39;s; many optimizations will be disabled and performance will be compromised (see https://github.com/openresty/luajit2 for OpenResty\u0026#39;s LuaJIT or, even better, consider using the OpenResty releases from https://openresty.org/en/download.html ) here is successful configuration:\nhttp { lua_package_path \u0026#34;/path/to/lua-resty-core/lua-resty-core-master/lib/?.lua;/path/to/lua-resty-lrucache-0.11/lib/?.lua;/path/to/lua-tablepool-master/lib/?.lua;/path/to/skywalking-nginx-lua-master/lib/?.lua;;\u0026#34;; # Buffer represents the register inform and the queue of the finished segment lua_shared_dict tracing_buffer 100m; # Init is the timer setter and keeper # Setup an infinite loop timer to do register and trace report. init_worker_by_lua_block { local metadata_buffer = ngx.shared.tracing_buffer -- Set service name metadata_buffer:set(\u0026#39;serviceName\u0026#39;, \u0026#39;User Service Name\u0026#39;) -- Instance means the number of Nginx deployment, does not mean the worker instances metadata_buffer:set(\u0026#39;serviceInstanceName\u0026#39;, \u0026#39;User Service Instance Name\u0026#39;) -- type \u0026#39;boolean\u0026#39;, mark the entrySpan include host/domain metadata_buffer:set(\u0026#39;includeHostInEntrySpan\u0026#39;, false) -- set random seed require(\u0026#34;skywalking.util\u0026#34;).set_randomseed() require(\u0026#34;skywalking.client\u0026#34;):startBackendTimer(\u0026#34;http://127.0.0.1:12800\u0026#34;) -- If there is a bug of this `tablepool` implementation, we can -- disable it in this way -- require(\u0026#34;skywalking.util\u0026#34;).disable_tablepool() skywalking_tracer = require(\u0026#34;skywalking.tracer\u0026#34;) } server { listen 8090; location /ingress { default_type text/html; rewrite_by_lua_block { ------------------------------------------------------ -- NOTICE, this should be changed manually -- This variable represents the upstream logic address -- Please set them as service logic name or DNS name -- -- Currently, we can not have the upstream real network address ------------------------------------------------------ skywalking_tracer:start(\u0026#34;upstream service\u0026#34;) -- If you want correlation custom data to the downstream service -- skywalking_tracer:start(\u0026#34;upstream service\u0026#34;, {custom = \u0026#34;custom_value\u0026#34;}) } -- Target upstream service proxy_pass http://127.0.0.1:8080/backend; body_filter_by_lua_block { if ngx.arg[2] then skywalking_tracer:finish() end } log_by_lua_block { skywalking_tracer:prepareForReport() } } } } Original post:https://www.cnblogs.com/kebibuluan/p/14440228.html\n","excerpt":"We Can integrate Skywalking to Java Application by Java Agent TEC., In typical application, the …","ref":"/blog/2021-12-13-skywalking-nginx-agent-integration/","title":"How to integrate skywalking-nginx-lua to Nginx?"},{"body":"SkyWalking 8.9.1 is released. Go to downloads page to find release tars.\nChanges by Version\nProject  Upgrade log4j2 to 2.15.0 for CVE-2021-44228. This CVE only effects on JDK versions below 6u211, 7u201, 8u191 and 11.0.1 according to the post. Notice, using JVM option -Dlog4j2.formatMsgNoLookups=true also avoids CVE if your JRE opened JNDI in default.  ","excerpt":"SkyWalking 8.9.1 is released. Go to downloads page to find release tars.\nChanges by Version\nProject …","ref":"/events/release-apache-skywalking-apm-8-9-1/","title":"Release Apache SkyWalking APM 8.9.1"},{"body":"In the field of observability, the three main directions of data collection and analysis, Metrics, Logger and Tracing, are usually used to achieve insight into the operational status of applications.\nApache APISIX has integrated Apache SkyWaling Tracing capabilities as early as version 1.4, with features such as error logging and access log collection added in subsequent versions. Now with Apache SkyWalking\u0026rsquo;s support for Metrics, it enables Apache APISIX to implement a one-stop observable solution in integrated mode, covering both logging, metrics and call tracing.\nFeature Development Background Those of you who are familiar with Apache APISIX should know that Apache APISIX produces two types of logs during operation, namely the access log and the error log.\nAccess logs record detailed information about each request and are logs generated within the scope of the request, so they can be directly associated with Tracing. Error logs, on the other hand, are Apache APISIX runtime output log messages, which are application-wide logs, but cannot be 100% associated with requests.\nAt present, Apache APISIX provides very rich log processing plug-ins, including TCP/HTTP/Kafka and other collection and reporting plug-ins, but they are weakly associated with Tracing. Take Apache SkyWalking as an example. We extract the SkyWalking Tracing Conetxt Header from the log records of Apache APISIX and export it to the file system, and then use the log processing framework (fluentbit) to convert the logs into a log format acceptable to SkyWalking. The Tracing Context is then parsed and extracted to obtain the Tracing ID to establish a connection with the Trace.\nObviously, the above way of handling the process is tedious and complicated, and requires additional conversion of log formats. For this reason, in PR#5500 we have implemented the Apache SkyWalking access log into the Apache APISIX plug-in ecosystem to make it easier for users to collect and process logs using Apache SkyWalking in Apache APISIX.\nIntroduction of the New Plugins SkyWalking Logger Pulgin The SkyWalking Logger plugin parses the SkyWalking Tracing Context Header and prints the relevant Tracing Context information to the log, thus enabling the log to be associated with the call chain.\nBy using this plug-in, Apache APISIX can get the SkyWalking Tracing Context and associate it with Tracing even if the SkyWalking Tracing plug-in is not turned on, if Apache SkyWalking is already integrated downstream.\nThe above Content is the log content, where the Apache APISIX metadata configuration is used to collect request-related information. You can later modify the Log Format to customize the log content by Plugin Metadata, please refer to the official documentation.\nHow to Use When using this plugin, since the SkyWalking plugin is \u0026ldquo;not enabled\u0026rdquo; by default, you need to manually modify the plugins section in the conf/default-apisix.yaml file to enable the plugin.\nplugins:...- error-log-logger...Then you can use the SkyWalking Tracing plug-in to get the tracing data directly, so you can verify that the Logging plug-in-related features are enabled and working properly.\nStep 1: Create a route Next, create a route and bind the SkyWalking Tracing plugin and the SkyWalking Logging plugin. More details of the plugin configuration can be found in the official Apache APISIX documentation.\ncurl -X PUT \u0026#39;http://192.168.0.108:9080/apisix/admin/routes/1001\u0026#39; \\ -H \u0026#39;X-API-KEY: edd1c9f034335f136f87ad84b625c8f1\u0026#39; \\ -H \u0026#39;Content-Type: application/json\u0026#39; \\ -d \u0026#39;{ \u0026#34;uri\u0026#34;: \u0026#34;/get\u0026#34;, \u0026#34;plugins\u0026#34;: { \u0026#34;skywalking\u0026#34;: { \u0026#34;sample_ratio\u0026#34;: 1 }, \u0026#34;skywalking-logger\u0026#34;: { \u0026#34;endpoint_addr\u0026#34;: \u0026#34;http://127.0.0.1:12800\u0026#34; } }, \u0026#34;upstream\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;roundrobin\u0026#34;, \u0026#34;nodes\u0026#34;: { \u0026#34;httpbin.org:80\u0026#34;: 1 } } }\u0026#39; Step 2: Log Processing On the Apache SkyWalking side, you can use LAL (Logger Analysis Language) scripts for log processing, such as Tag extraction, SkyWalking metadata correction, and so on.\nThe main purpose of Tag extraction here is to facilitate subsequent retrieval and to add dependencies to the Metrics statistics. The following code can be used to configure the SkyWalking LAL script to complete the Tag extraction. For more information on how to use the SkyWalking LAL script, please refer to the official Apache SkyWalking documentation.\n# The default LAL script to save all logs, behaving like the versions before 8.5.0.rules:- name:defaultdsl:|filter { json { abortOnFailure false } extractor { tag routeId: parsed.route_id tag upstream: parsed.upstream tag clientIp: parsed.client_ip tag latency: parsed.latency } sink { } }After configuring the above LAL script in SkyWalking OAP Server the following log will be displayed.\nDetails of the expanded log are as follows.\nAs you can see from the above, displaying routeId, upstream and clientIp as key-value pairs is much easier than searching directly in the log body. This is because the Tag format not only supports log display format and search, but also generates information such as Metrics using MAL statistics.\nSkyWalking Error Logger Plugin The error-log-logger plug-in now supports the SkyWalking log format, and you can now use the http-error-log plug-in to quickly connect Apache APISIX error logs to Apache SkyWalking. Currently, error logs do not have access to SkyWalking Tracing Context information, and therefore cannot be directly associated with SkyWalking Tracing.\nThe main reason for the error log to be integrated into SkyWalking is to centralize the Apache APISIX log data and to make it easier to view all observable data within SkyWalking.\nHow to Use Since the error-log-logger plugin is \u0026ldquo;not enabled\u0026rdquo; by default, you still need to enable the plugin in the way mentioned above.\nplugins:...- error-log-logger...Step 1: Bind the route After enabling, you need to bind the plugin to routes or global rules. Here we take \u0026ldquo;bind routes\u0026rdquo; as an example.\ncurl -X PUT \u0026#39;http://192.168.0.108:9080/apisix/admin/plugin_metadata/error-log-logger\u0026#39; \\ -H \u0026#39;X-API-KEY: edd1c9f034335f136f87ad84b625c8f1\u0026#39; \\ -H \u0026#39;Content-Type: application/json\u0026#39; \\ -d \u0026#39;{ \u0026#34;inactive_timeout\u0026#34;: 10, \u0026#34;level\u0026#34;: \u0026#34;ERROR\u0026#34;, \u0026#34;skywalking\u0026#34;: { \u0026#34;endpoint_addr\u0026#34;: \u0026#34;http://127.0.0.1:12800/v3/logs\u0026#34; } }\u0026#39;  Note that the endpoint_addr is the SkyWalking OAP Server address and needs to have the URI (i.e. /v3/logs).\n Step 2: LAL Processing In much the same way as the Access Log processing, the logs are also processed by LAL when they reach SkyWalking OAP Server. Therefore, we can still use the SkyWalking LAL script to analyze and process the log messages.\nIt is important to note that the Error Log message body is in text format. If you are extracting tags, you will need to use regular expressions to do this. Unlike Access Log, which handles the message body in a slightly different way, Acces Log uses JSON format and can directly reference the fields of the JSON object using JSON parsing, but the rest of the process is largely the same.\nTags can also be used to optimize the display and retrieval for subsequent metrics calculations using SkyWalking MAL.\nrules: - name: apisix-errlog dsl: | filter { text { regexp \u0026#34;(?\u0026lt;datetime\u0026gt;\\\\d{4}/\\\\d{2}/\\\\d{2} \\\\d{2}:\\\\d{2}:\\\\d{2}) \\\\[(?\u0026lt;level\u0026gt;\\\\w+)\\\\] \\\\d+\\\\#\\\\d+:( \\\\*\\\\d+ \\\\[(?\u0026lt;module\u0026gt;\\\\w+)\\\\] (?\u0026lt;position\u0026gt;.*\\\\.lua:\\\\d+): (?\u0026lt;function\u0026gt;\\\\w+\\\\(\\\\)):)* (?\u0026lt;msg\u0026gt;.+)\u0026#34; } extractor { tag level: parsed.level if (parsed?.module) { tag module: parsed.module tag position: parsed.position tag function: parsed.function } } sink { } } After the LAL script used by SkyWalking OAP Server, some of the Tags will be extracted from the logs, as shown below.\nSummary This article introduces two logging plug-ins for Apache APISIX that integrate with SkyWalking to provide a more convenient operation and environment for logging in Apache APISIX afterwards.\nWe hope that through this article, you will have a fuller understanding of the new features and be able to use Apache APISIX for centralized management of observable data more conveniently in the future.\n","excerpt":"In the field of observability, the three main directions of data collection and analysis, Metrics, …","ref":"/blog/2021-12-08-apisix-integrate-skywalking-plugin/apisix-integrate-skywalking-plugin/","title":"Apache APISIX Integrates with SkyWalking to Create a Full Range of Log Processing"},{"body":"","excerpt":"","ref":"/tags/apisix/","title":"APISIX"},{"body":"","excerpt":"","ref":"/tags/iotdb/","title":"IoTDB"},{"body":"","excerpt":"","ref":"/tags/storage/","title":"Storage"},{"body":"This document is one of the outcomes of Apache IoTDB - Apache SkyWalking Adapter in Summer 2021 of Open Source Promotion Plan. The design and development work is under the guidance of @jixuan1989 from IoTDB and @wu-sheng from SkyWalking. Thanks for their guidance and the help from community.\nStart with SkyWalking Showcase Before using SkyWalking Showcase to quick start with IoTDB, please ensure your have make installed and Docker daemon running.\nPlease run the command below.\ngit clone https://github.com/LIU-WEI-git/skywalking-showcase.git cd skywalking-showcase make deploy.docker FEATURE_FLAGS=single-node.iotdb,agent The former variable single-node.iotdb will deploy only one single node of SkyWalking OAP-v8.9.0, and SkyWalking RocketBot UI-v8.9.0, IoTDB-v0.12.3 as storage. The latter variable agent will deploy micro-services with SkyWalking agent enabled, which include agents for Java, NodeJS server, browser, Python.\nThese shell command maybe take a long while. After pulling and running docker image, please visit http://localhost:9999/. Then you will see the SkyWalking UI and data from OAP backend.\nIf you want to use more functions of SkyWalking Showcase, please visit its official document and clone official repository.\nStart Manually If you want to download and run IoTDB and SkyWalking manually, here is the guidance.\nInstall and Run IoTDB Apache IoTDB (Database for Internet of Things) is an IoT native database with high performance for data management and analysis, deployable on the edge and the cloud. It is a time-series database storage option for SkyWalking now. Please ensure your IoTDB server version \u0026gt;= 0.12.3 and a single node version is sufficient. For more installation details, please see official document: IoTDB Quick Start and IoTDB Download Page. You could download it from Docker Hub as well.\nThere is some connection tools for IoTDB\n Command Line Interface(CLI)\nIf iotdb-cli connects successfully, you will see   _____ _________ ______ ______ |_ _| | _ _ ||_ _ `.|_ _ \\ | | .--.|_/ | | \\_| | | `. \\ | |_) | | | / .'`\\ \\ | | | | | | | __'. _| |_| \\__. | _| |_ _| |_.' /_| |__) | |_____|'.__.' |_____| |______.'|_______/ version x.x.x IoTDB\u0026gt; login successfully IoTDB\u0026gt;  IoTDB-Grafana\nIoTDB-Grafana is a connector which we developed to show time series data in IoTDB by reading data from IoTDB and sends to Grafana.  Zeppelin-IoTDB\nYou could enable Zeppelin to operate IoTDB via SQL.   For more ecosystem integration, please visit official documents.\nWe will use iotdb-cli in the next examples.\nRun SkyWalking OAP Server There are some SkyWalking official documents which will help you start. Please ensure your SkyWalking version \u0026gt;= 8.9.0. We recommend you download SkyWalking OAP distributions from its official download page or pull docker images.\n SkyWalking Download Page SkyWalking Backend Setup SkyWalking UI Setup  Before starting SkyWalking backend, please edit /config/application.yml, set storage.selector: ${SW_STORAGE:iotdb} or set environment variable SW_STORAGE=iotdb. All config options about IoTDB is following, please edit it or not according to your local environment:\nstorage:selector:${SW_STORAGE:iotdb}iotdb:host:${SW_STORAGE_IOTDB_HOST:127.0.0.1}rpcPort:${SW_STORAGE_IOTDB_RPC_PORT:6667}username:${SW_STORAGE_IOTDB_USERNAME:root}password:${SW_STORAGE_IOTDB_PASSWORD:root}storageGroup:${SW_STORAGE_IOTDB_STORAGE_GROUP:root.skywalking}sessionPoolSize:${SW_STORAGE_IOTDB_SESSIONPOOL_SIZE:16}fetchTaskLogMaxSize:${SW_STORAGE_IOTDB_FETCH_TASK_LOG_MAX_SIZE:1000}# the max number of fetch task log in a requestVisit IoTDB Server and Query SkyWalking Data There are some official document about data model and IoTDB-SQL language:\n Data Model and Terminology DDL (Data Definition Language) DML (Data Manipulation Language) Maintenance Command  Example Model and Insert SQL Before giving any example, we set time display type as long (CLI: set time_display_type=long).\nIn our design, we choose id, entity_id, node_type, service_id, service_group, trace_id as indexes and fix their appearance order. The value of these indexed fields store in the path with double quotation mark wrapping, just like \u0026quot;value\u0026quot;.\nThere is a model named service_traffic with fields id, time_bucket, name, node_type, service_group. In order to see its data, we could use a query SQL: select * from root.skywalking.service_traffic align by device. root.skywalking is the default storage group and align by device could return a more friendly result. The query result is following:\n   Time Device name     1637919540000 root.skywalking.service_traffic.\u0026ldquo;YXBwbGljYXRpb24tZGVtbw==.1\u0026rdquo;.\u0026ldquo;0\u0026rdquo;.\u0026quot;\u0026quot; application-demo   1637919600000 root.skywalking.service_traffic.\u0026ldquo;YXBwbGljYXRpb24tZGVtby1teXNxbA==.1\u0026rdquo;.\u0026ldquo;0\u0026rdquo;.\u0026quot;\u0026quot; application-demo-mysql    Another example model is service_cpm which has fields id, service_id, total, value. Query its data with select * from root.skywalking.service_cpm align by device. The result is following:\n   Time Device total value     1637919540000 root.skywalking.service_cpm.\u0026ldquo;202111261739_YXBwbGljYXRpb24tZGVtbw==.1\u0026rdquo;.\u0026ldquo;YXBwbGljYXRpb24tZGVtbw==.1\u0026rdquo; 2 2   1637919600000 root.skywalking.service_cpm.\u0026ldquo;202111261740_YXBwbGljYXRpb24tZGVtby1teXNxbA==.1\u0026rdquo;.\u0026ldquo;YXBwbGljYXRpb24tZGVtby1teXNxbA==.1\u0026rdquo; 1 1   1637917200000 root.skywalking.service_cpm.\u0026ldquo;2021112617_YXBwbGljYXRpb24tZGVtbw==.1\u0026rdquo;.\u0026ldquo;YXBwbGljYXRpb24tZGVtbw==.1\u0026rdquo; 2 0    For the first data of service_traffic, the mapping between fields and values is following. Notice, all time_bucket are converted to timestamp(also named time in IoTDB) and the value of all indexed fields are stored in the Device path.\n   Field Value     id(indexed) YXBwbGljYXRpb24tZGVtbw==.1   time(converted from time_bucket) 1637919540000   name application-demo   node_type(indexed) 0   service_group(indexed) (empty string)    You could use the SQL below to insert example data.\ncreate storage group root.skywalking insert into root.skywalking.service_traffic.\u0026#34;YXBwbGljYXRpb24tZGVtbw==.1\u0026#34;.\u0026#34;0\u0026#34;.\u0026#34;\u0026#34;(timestamp, name) values(1637919540000, \u0026#34;application-demo\u0026#34;) insert into root.skywalking.service_traffic.\u0026#34;YXBwbGljYXRpb24tZGVtby1teXNxbA==.1\u0026#34;.\u0026#34;0\u0026#34;.\u0026#34;\u0026#34;(timestamp, name) values(1637919600000, \u0026#34;application-demo-mysql\u0026#34;) insert into root.skywalking.service_cpm.\u0026#34;202111261739_YXBwbGljYXRpb24tZGVtbw==.1\u0026#34;.\u0026#34;YXBwbGljYXRpb24tZGVtbw==.1\u0026#34;(timestamp, total, value) values(1637919540000, 2, 2) insert into root.skywalking.service_cpm.\u0026#34;202111261740_YXBwbGljYXRpb24tZGVtby1teXNxbA==.1\u0026#34;.\u0026#34;YXBwbGljYXRpb24tZGVtby1teXNxbA==.1\u0026#34;(timestamp, total, value) values(1637919600000, 1, 1) insert into root.skywalking.service_cpm.\u0026#34;2021112617_YXBwbGljYXRpb24tZGVtbw==.1\u0026#34;.\u0026#34;YXBwbGljYXRpb24tZGVtbw==.1\u0026#34;(timestamp, total, value) values(1637917200000, 2, 0) Query SQL Now, let\u0026rsquo;s show some query examples.\n  Filter Query\n If you want to query name field of service_traffic, the query SQL is select name from root.skywalking.service_traffic align by device. If you want to query service_traffic with id = YXBwbGljYXRpb24tZGVtbw==.1, the query SQL is select * from root.skywalking.service_traffic.\u0026quot;YXBwbGljYXRpb24tZGVtbw==.1\u0026quot; align by device. If you want to query service_traffic with name = application-demo, the query SQL is select * from root.skywalking.service_traffic where name = \u0026quot;application-demo\u0026quot; align by device. Combining the above three, the query SQL is select name from root.skywalking.service_traffic.\u0026quot;YXBwbGljYXRpb24tZGVtbw==.1\u0026quot; where name = \u0026quot;application-demo\u0026quot; align by device.    Fuzzy Query\n If you want to query service_traffic with name contains application, the query SQL is select * from root.skywalking.service_traffic.*.*.* where name like '%application%' align by device.    Aggregate Query\nIoTDB only supports group by time and group by level. The former please refer to Down-Frequency Aggregate Query and the latter please refer to Aggregation By Level. Here is an example about group by level: select sum(total) from root.skywalking.service_cpm.*.* group by level = 3. We couldn\u0026rsquo;t get a expected result since our design make the data of one model spread across multiple devices. So we don\u0026rsquo;t recommend using group by level to query SkyWalking backend data. You could refer to the Discussion #3907 in IoTDB community for more details.\n  Sort Query\nIoTDB only supports order by time, but we could use its select function which contains top_k and bottom_k to get top/bottom k data. For example, select top_k(total, \u0026quot;k\u0026quot;=\u0026quot;3\u0026quot;) from root.skywalking.service_cpm.*.*. We don\u0026rsquo;t recommend using this to query SkyWalking backend data since its result is not friendly. You could refer to the Discussion #3888 in IoTDB community for more details.\n  Pagination Query\nWe could use limit and offset to paginate the query result. Please refer to Row and Column Control over Query Results.\n  Delete\n Delete storage group:  delete storage group root.skywalking   Delete timeseries:  delete timeseries root.skywalking.service_cpm.*.*.total delete timeseries root.skywalking.service_cpm.\u0026quot;202111261739_YXBwbGljYXRpb24tZGVtbw==.1\u0026quot;.\u0026quot;YXBwbGljYXRpb24tZGVtbw==.1\u0026quot;.total   Delete data:  delete from root.skywalking.service_traffic delete from root.skywalking.service_traffic where time \u0026lt; 1637919540000      ","excerpt":"This document is one of the outcomes of Apache IoTDB - Apache SkyWalking Adapter in Summer 2021 of …","ref":"/blog/2021-12-08-application-guide-of-iotdb-storage-option/","title":"The Application Guide of Apache IoTDB Storage Option"},{"body":"Non-breaking breakpoints are breakpoints specifically designed for live production environments. With non-breaking breakpoints, reproducing production bugs locally or in staging is conveniently replaced with capturing them directly in production.\nLike regular breakpoints, non-breaking breakpoints can be:\n placed almost anywhere added and removed at will set to fire on specific conditions expose internal application state persist as long as desired (even between application reboots)  The last feature is especially useful given non-breaking breakpoints can be left in production for days, weeks, and even months at a time while waiting to capture behavior that happens rarely and unpredictably.\nHow do non-breaking breakpoints work? If you\u0026rsquo;re familiar with general distributed tracing concepts, such as \u0026ldquo;traces\u0026rdquo; and \u0026ldquo;spans\u0026rdquo;, then you\u0026rsquo;re already broadly familiar with how non-breaking breakpoints work. Put simply, non-breaking breakpoints are small fragments of code added during runtime that, upon the proper conditions, save a portion of the application\u0026rsquo;s current state, and resume normal execution. In SkyWalking, this can be implemented by simply opening a new local span, adding some tags, and closing the local span.\nWhile this process is relatively simple, the range of functionality that can be achieved through this technique is quite impressive. Save the current and global variables to create a non-breaking breakpoint; add the ability to format log messages to create just-in-time logging; add the ability to trigger metric telemetry to create real-time KPI monitoring. If you keep moving in this direction, you eventually enter the realm of live debugging/coding, and this is where Source++ comes in.\nLive Coding Platform Source++ is an open-source live coding platform designed for production environments, powered by Apache SkyWalking. Using Source++, developers can add breakpoints, logs, metrics, and distributed tracing to live production software in real-time on-demand, right from their IDE or CLI. While capable of stand-alone deployment, the latest version of Source++ makes it easier than ever to integrate into existing Apache SkyWalking installations. This process can be completed in a few minutes and is easy to customize for your specific needs.\nFor a better idea of how Source++ works, take a look at the following diagram:\nIn this diagram, blue components represent existing SkyWalking architecture, black components represent new Source++ architecture, and the red arrows show how non-breaking breakpoints make their way from production to IDEs. A process that is facilitated by Source++ components: Live Probe, Live Processors, Live Platform, and Live Interface.\nLive Probe The Live Probe is currently available for JVM and Python applications. It runs alongside the SkyWalking agent and is responsible for dynamically adding and removing code fragments based on valid instrumentation requests from developers. These code fragments in turn make use of the SkyWalking agent\u0026rsquo;s internal APIs to facilitate production instrumentation.\nLive Processors Live Processors are responsible for finding, extracting, and transforming data found in distributed traces produced via live probes. They run alongside SkyWalking collectors and implement additional post-processing logic, such as PII redaction. Live processors work via uniquely identifiable tags (prefix spp.) added previously by live probes.\nOne could easily view a non-breaking breakpoint ready for processing using Rocketbot, however, it will look like this:\nEven though the above does not resemble what\u0026rsquo;s normally thought of as a breakpoint, the necessary information is there. With live processors added to your SkyWalking installation, this data is refined and may be viewed more traditionally via live interfaces.\nLive Platform The Live Platform is the core part of the Source++ architecture. Unlike the live probe and processors, the live platform does not have a direct correlation with SkyWalking components. It is a standalone server responsible for validating and distributing production breakpoints, logs, metrics, and traces. Each component of the Source++ architecture (probes, processors, interfaces) communicates with each other through the live platform. It is important to ensure the live platform is accessible to all of these components.\nLive Interface Finally, with all the previous parts installed, we\u0026rsquo;re now at the component software developers will find the most useful. A Live Interface is what developers use to create, manage, and view non-breaking breakpoints, and so on. There are a few live interfaces available:\n JetBrains Plugin CLI  With the Live Instrument Processor enabled, and the JetBrains Plugin installed, non-breaking breakpoints appear as such:\nThe above should be a sight far more familiar to software developers. Beyond the fact that you can\u0026rsquo;t step through execution, non-breaking breakpoints look and feel just like regular breakpoints.\n For more details and complete setup instructions, please visit:\n https://github.com/sourceplusplus/deploy-skywalking  ","excerpt":"Non-breaking breakpoints are breakpoints specifically designed for live production environments. …","ref":"/blog/2021-12-06-extend-skywalking-with-nbb/","title":"Extending Apache SkyWalking with non-breaking breakpoints"},{"body":"SkyWalking Kubernetes Helm Chart 4.2.0 is released. Go to downloads page to find release tars.\n Fix Can\u0026rsquo;t evaluate field Capabilities in type interface{}. Update the document let that all docker images use the latest version. Fix missing nodes resource permission when the OAP using k8s-mesh analyzer. Fix bug that customized config files are not loaded into es-init job. Add skywalking satellite support.  ","excerpt":"SkyWalking Kubernetes Helm Chart 4.2.0 is released. Go to downloads page to find release tars.\n Fix …","ref":"/events/release-apache-skywalking-kubernetes-helm-chart-4.2.0/","title":"Release Apache SkyWalking Kubernetes Helm Chart 4.2.0"},{"body":"SkyWalking Satellite 0.4.0 is released. Go to downloads page to find release tars.\nFeatures  Support partition queue. Using byte array to transmit the ALS streaming, Native tracing segment and log, reducing en/decoding cpu usage. Support using the new ALS protocol to transmit the Envoy accesslog. Support transmit the Native Meter Batch protocol.  Bug Fixes Issues and PR  All issues are here All and pull requests are here  ","excerpt":"SkyWalking Satellite 0.4.0 is released. Go to downloads page to find release tars.\nFeatures  Support …","ref":"/events/release-apache-skwaylking-satellite-0-4-0/","title":"Release Apache SkyWalking Satellite 0.4.0"},{"body":"SkyWalking 8.9.0 is released. Go to downloads page to find release tars.\nChanges by Version\nProject  E2E tests immigrate to e2e-v2. Support JDK 16 and 17. Add Docker images for arm64 architecture.  OAP Server  Add component definition for Jackson. Fix that zipkin-receiver plugin is not packaged into dist. Upgrade Armeria to 1.12, upgrade OpenSearch test version to 1.1.0. Add component definition for Apache-Kylin. Enhance get generation mechanism of OAL engine, support map type of source\u0026rsquo;s field. Add tag(Map) into All, Service, ServiceInstance and Endpoint sources. Fix funcParamExpression and literalExpression can\u0026rsquo;t be used in the same aggregation function. Support cast statement in the OAL core engine. Support (str-\u0026gt;long) and (long) for string to long cast statement. Support (str-\u0026gt;int) and (int) for string to int cast statement. Support Long literal number in the OAL core engine. Support literal string as parameter of aggregation function. Add attributeExpression and attributeExpressionSegment in the OAL grammar tree to support map type for the attribute expression. Refactor the OAL compiler context to improve readability. Fix wrong generated codes of hashCode and remoteHashCode methods for numeric fields. Support != null in OAL engine. Add Message Queue Consuming Count metric for MQ consuming service and endpoint. Add Message Queue Avg Consuming Latency metric for MQ consuming service and endpoint. Support -Inf as bucket in the meter system. Fix setting wrong field when combining Events. Support search browser service. Add getProfileTaskLogs to profile query protocol. Set SW_KAFKA_FETCHER_ENABLE_NATIVE_PROTO_LOG, SW_KAFKA_FETCHER_ENABLE_NATIVE_JSON_LOG default true. Fix unexpected deleting due to TTL mechanism bug for H2, MySQL, TiDB and PostgreSQL. Add a GraphQL query to get OAP version, display OAP version in startup message and error logs. Fix TimeBucket missing in H2, MySQL, TiDB and PostgreSQL bug, which causes TTL doesn\u0026rsquo;t work for service_traffic. Fix TimeBucket missing in ElasticSearch and provide compatible storage2Entity for previous versions. Fix ElasticSearch implementation of queryMetricsValues and readLabeledMetricsValues doesn\u0026rsquo;t fill default values when no available data in the ElasticSearch server. Fix config yaml data type conversion bug when meets special character like !. Optimize metrics of minute dimensionality persistence. The value of metrics, which has declaration of the default value and current value equals the default value logically, the whole row wouldn\u0026rsquo;t be pushed into database. Fix max function in OAL doesn\u0026rsquo;t support negative long. Add MicroBench module to make it easier for developers to write JMH test. Upgrade Kubernetes Java client to 14.0.0, supports GCP token refreshing and fixes some bugs. Change SO11Y metric envoy_als_in_count to calculate the ALS message count. Support Istio 1.10.3, 1.11.4, 1.12.0 release.(Tested through e2e) Add filter mechanism in MAL core to filter metrics. Fix concurrency bug in MAL increase-related calculation. Fix a null pointer bug when building SampleFamily. Fix the so11y latency of persistence execution latency not correct in ElasticSearch storage. Add MeterReportService collectBatch method. Add OpenSearch 1.2.0 to test and verify it works. Upgrade grpc-java to 1.42.1 and protoc to 3.17.3 to allow using native Mac osx-aarch_64 artifacts. Fix TopologyQuery.loadEndpointRelation bug. Support using IoTDB as a new storage option. Add customized envoy ALS protocol receiver for satellite transmit batch data. Remove logback dependencies in IoTDB plugin. Fix StorageModuleElasticsearchProvider doesn\u0026rsquo;t watch on trustStorePath. Fix a wrong check about entity if GraphQL at the endpoint relation level.  UI  Optimize endpoint dependency. Show service name by hovering nodes in the sankey chart. Add Apache Kylin logo. Add ClickHouse logo. Optimize the style and add tips for log conditions. Fix the condition for trace table. Optimize profile functions. Implement a reminder to clear cache for dashboard templates. Support +/- hh:mm in TimeZone setting. Optimize global settings. Fix current endpoint for endpoint dependency. Add version in the global settings popup. Optimize Log page style. Avoid some abnormal settings. Fix query condition of events.  Documentation  Enhance documents about the data report and query protocols. Restructure documents about receivers and fetchers.  Remove general receiver and fetcher docs Add more specific menu with docs to help users to find documents easier.   Add a guidance doc about the logic endpoint. Link Satellite as Load Balancer documentation and compatibility with satellite.  All issues and pull requests are here\n","excerpt":"SkyWalking 8.9.0 is released. Go to downloads page to find release tars.\nChanges by Version\nProject …","ref":"/events/release-apache-skywalking-apm-8-9-0/","title":"Release Apache SkyWalking APM 8.9.0"},{"body":"Chaos Mesh 是一个开源的云原生混沌工程平台,借助 Chaos Mesh,用户可以很方便地对服务注入异常故障,并配合 Chaos Dashboard 实现对整个混沌实验运行状况的监测 。然而,对混沌实验运行情况的监控并不能告诉我们应用服务性能的变化。从系统可观测性的角度来说,我们可能无法单纯通过混沌实验的动态了解故障的全貌,这也阻碍了我们对系统和故障的进一步了解,调试。\nApache SkyWalking 是一个开源的 APM (Application Performance Monitor) 系统,可以对云原生服务提供监控、跟踪、诊断等功能。SkyWalking 支持收集 Event(事件),可在 Dashboard 中查看分布式系统中发生了哪些事件,并可以直观地观测到不同 Event 对服务性能造成的影响,和 Chaos Mesh 结合使用,便可为混沌实验造成的服务影响提供监控。\n本教程将分享如何通过将 SkyWalking 和 Chaos Mesh 结合,运用 Event 信息监控,实时了解混沌实验对应用服务性能造成的影响。\n准备工作  创建 Skywalking 集群,具体可以参考 SkyWalking Readme。 部署 Chaos Mesh,推荐使用 helm 安装。 安装 Java 测试工具 JMeter (其他工具亦可,仅用于增加服务负载) 如果仅作为 Demo 使用,可以参考 chaos-mesh-on-skywalking 这个仓库进行配置  Step 1 - 访问 SkyWalking 集群 安装 SkyWalking 后,就可以访问它的UI了,但因为还没有服务进行监控,这里还需要添加服务并进行 Agent 埋点设置。本文选用轻量级微服务框架 Spring Boot 作为埋点对象搭建一个简易 Demo 环境。\n可以参考 chaos-mesh-on-skywalking 仓库中的 demo-deployment.yaml 文件创建。之后使用 kubectl apply -f demo-deployment.yaml -n skywalking 进行部署。部署成功后即可在SkyWalking-UI 中看到实时监控的服务信息。\n注意:因为 Spring Boot 的端口也是8080,在端口转发时要避免和 **SkyWalking **的端口冲突,比如使用 kubectl port-forward svc/spring-boot-skywalking-demo 8079:8080 -n skywalking 。\nStep 2 - 部署 SkyWalking Kubernetes Event Exporter SkyWalking Kubernetes Event Exporter 可以用来监控和过滤 Kubernetes 集群中的 Event ,通过设置过滤条件筛选出需要的 Event,并将这些 Event 发送到 SkyWalking 后台, 这样就可以通过 SkyWalking 观察到你的 Kubernetes 集群中的Event 何时影响到服务的各项指标了。如果想要一条命令部署,可以参考此配置创建 yaml 文件 ,设置 filters 和 exporters 的参数后,使用 kubectl apply 进行部署。\nStep 3 - 使用 JMeter 对服务加压 为了达到更好的观察效果,需要先对 Spring Boot 增加服务负载,本文选择使用 JMeter 这一使用广泛的 Java 压力测试工具来对服务加压。\n通过 JMeter 对 localhost:8079 进行压测,添加5个线程持续进行加压。 通过 SkyWalking Dashboard 可以看到,目前访问成功率为100%,服务负载大约在5300 CPM (Calls Per Minute)。\nStep 4 - Chaos Mesh 注入故障,观察效果 做好了这些准备工便可以使用 Chaos Dashboard 进行压力场景模拟,并在实验进程中观察服务性能的变化。\n以下使用不同 Stress Chaos 配置,观测对应服务性能变化:\n  CPU 负载10%,内存负载128 MB 。\n混沌实验开始和结束的时间点标记可以通过右侧开关显示在在图表中,将鼠标移至短线出可以看到是实验的 Applied 或 Recovered。可以看到两个绿色短线之间的时间段里,服务处理调用的的性能降低,为4929 CPM,在实验结束后,性能恢复正常。\n  CPU load 增加到50%,发现服务负载进一步降低至4307 CPM。\n  极端情况下 CPU 负载达到100%,服务负载降至无混沌实验时的40% 。\n  因为 Linux 系统下的进程调度并不会让某个进程一直占据 CPU,所以即使实在 CPU 满载的极端情况下,该部署的 Spring Boot Demo 仍可以处理40%的访问请求。\n小结 通过 SkyWalking 与 Chaos Mesh 的结合,我们可以清晰的观察到服务在何时受到混沌实验的影响,在注入混沌后服务的表现性能又将如何。SkyWalking 与 Chaos Mesh 的结合使得我们轻松地观察到了服务在各种极端情况下的表现,增强了我们对服务的信心。\nChaos Mesh 在 2021 年成长了许多。为了更多地了解用户在实践混沌工程方面的经验,以便持续完善和提升对用户的支持,社区发起了 Chaos Mesh 用户问卷调查,点击【阅读原文】参与调查,谢谢!\nhttps://www.surveymonkey.com/r/X78WQPC\n欢迎大家加入 Chaos Mesh 社区,加入 CNCF Slack (slack.cncf.io) 底下的 Chaos Mesh 频道: project-chaos-mesh,一起参与到项目的讨论与开发中来!大家在使用过程发现 Bug 或缺失什么功能,也可以直接在 GitHub (https://github.com/chaos-mesh) 上提 Issue 或 PR。\n","excerpt":"Chaos Mesh 是一个开源的云原生混沌工程平台,借助 Chaos Mesh,用户可以很方便地对服务注入异常故障,并配合 Chaos Dashboard 实现对整个混沌实验运行状况的监测 。然而, …","ref":"/zh/2021-11-29-better-observability-for-chaos-engineering/","title":"Chaos Mesh X SkyWalking: 可观测的混沌工程"},{"body":"This plugin is one of the outcomes of Apache IoTDB - Apache SkyWalking Adapter in Summer 2021 of Open Source Promotion Plan. The design and development work is under the guidance of @jixuan1989 from IoTDB and @wu-sheng from SkyWalking. Thanks for their guidance and the help from community.\nIoTDB Storage Plugin Setup IoTDB is a time-series database from Apache, which is one of the storage plugin options. If you want to use iotdb as SkyWalking backend storage, please refer to the following configuration.\nIoTDB storage plugin is still in progress. Its efficiency will improve in the future.\nstorage:selector:${SW_STORAGE:iotdb}iotdb:host:${SW_STORAGE_IOTDB_HOST:127.0.0.1}rpcPort:${SW_STORAGE_IOTDB_RPC_PORT:6667}username:${SW_STORAGE_IOTDB_USERNAME:root}password:${SW_STORAGE_IOTDB_PASSWORD:root}storageGroup:${SW_STORAGE_IOTDB_STORAGE_GROUP:root.skywalking}sessionPoolSize:${SW_STORAGE_IOTDB_SESSIONPOOL_SIZE:16}fetchTaskLogMaxSize:${SW_STORAGE_IOTDB_FETCH_TASK_LOG_MAX_SIZE:1000}# the max number of fetch task log in a requestAll connection related settings, including host, rpcPort, username, and password are found in application.yml. Please ensure the IoTDB version \u0026gt;= 0.12.3.\nIoTDB Introduction Apache IoTDB (Database for Internet of Things) is an IoT native database with high performance for data management and analysis, deployable on the edge and the cloud. It is a time-series database donated by Tsinghua University to Apache Foundation.\nThe Data Model of IoTDB We can use the tree structure to understand the data model of iotdb. If divided according to layers, from high to low is: Storage Group \u0026ndash; (LayerName) \u0026ndash; Device \u0026ndash; Measurement. From the top layer to a certain layer below it is called a Path. The top layer is Storage Group (must start with root), the penultimate layer is Device, and the bottom layer is Measurement. There can be many layers in the middle, and each layer is called a LayerName. For more information, please refer to the Data Model and Terminology in the official document of the version 0.12.x.\nThe Design of IoTDB Storage Plugin The Data Model of SkyWalking Each storage model of SkyWalking can be considered as a Model, which contains multiple Columns. Each Column has ColumnName and ColumnType attributes, representing the name and type of Column respectively. Each Column named ColumnName stores multiple Value of the ColumnType. From a relational database perspective, Model is a relational table and Column is the field in a relational table.\nSchema Design Since each LayerName of IoTDB is stored in memory, it can be considered as an index, and this feature can be fully utilized to improve IoTDB query performance. The default storage group is root.skywalking, it will occupy the first and the second layer of the path. The model name is stored at the next layer of the storage group (the third layer of the path), such as root.skywalking.model_name.\nSkyWalking has its own index requirement, but it isn\u0026rsquo;t applicable to IoTDB. Considering query frequency and referring to the implementation of the other storage options, we choose id, entity_id, node_type, service_id, service_group, trace_id as indexes and fix their appearance order in the path. The value of these indexed columns will occupy the last few layers of the path. If we don\u0026rsquo;t fix their order, we cannot map their value to column, since we only store their value in the path but don\u0026rsquo;t store their column name. The other columns are treated as Measurements.\nThe mapping from SkyWalking data model to IoTDB data model is below.\n   SkyWalking IoTDB     Database Storage Group (1st and 2nd layer of the path)   Model LayerName (3rd layer of the path)   Indexed Column stored in memory through hard-code   Indexed Column Value LayerName (after 3rd layer of the path)   Non-indexed Column Measurement   Non-indexed Value the value of Measurement    For general example There are model1(column11, column12), model2(column21, column22, column23), model3(column31). Underline indicates that the column requires to be indexed. In this example, modelx_name refers to the name of modelx, columnx_name refers to the name of columnx and columnx_value refers to the value of columnx.\nBefore these 3 model storage schema, here are some points we need to know.\n In order to avoid the value of indexed column contains dot(.), all of them should be wrapped in double quotation mark since IoTDB use dot(.) as the separator in the path. We use align by device in query SQL to get a more friendly result. For more information about align by device, please see DML (Data Manipulation Language) and Query by device alignment.  The path of them is following:\n The Model with index:  root.skywalking.model1_name.column11_value.column12_name root.skywalking.model2_name.column21_value.column22_value.column23_name   The Model without index:  root.skywalking.model3_name.column31_Name    Use select * from root.skywalking.modelx_name align by device respectively to get their schema and data. The SQL result is following:\n   Time Device column12_name     1637494020000 root.skywalking.model1_name.\u0026ldquo;column11_value\u0026rdquo; column12_value       Time Device column23_name     1637494020000 root.skywalking.model2_name.\u0026ldquo;column21_value\u0026rdquo;.\u0026ldquo;column22_value\u0026rdquo; column23_value       Time Device column31_name     1637494020000 root.skywalking.model3_name column31_value    For specific example Before 5 typical examples, here are some points we need to know.\n The indexed columns and their order: id, entity_id, node_type, service_id, service_group, trace_id. Other columns are treated as non indexed and stored as Measurement. The storage entity extends Metrics or Record contains a column time_bucket. The time_bucket column in SkyWalking Model can be converted to the timestamp of IoTDB when inserting data. We don\u0026rsquo;t need to store time_bucket separately. In the next examples, we won\u0026rsquo;t list time_bucket anymore. The Time in query result corresponds to the timestamp in insert SQL and API.   Metadata: service_traffic\nservice_traffic entity has 4 columns: id, name, node_type, service_group. When service_traffic entity includes a row with timestamp 1637494020000, the row should be as following: (Notice: the value of service_group is null.)     id name node_type service_group     ZTJlLXNlcnZpY2UtcHJvdmlkZXI=.1 e2e-service-provider 0     And the row stored in IoTDB should be as following: (Query SQL: select from root.skywalking.service_traffic align by device)\n   Time Device name     1637494020000 root.skywalking.service_traffic.\u0026ldquo;ZTJlLXNlcnZpY2UtcHJvdmlkZXI=.1\u0026rdquo;.\u0026ldquo;0\u0026rdquo;.\u0026ldquo;null\u0026rdquo; e2e-service-provider    The value of id, node_type and service_group are stored in the path in the specified order. Notice: If those index value is null, it will be transformed to a string \u0026ldquo;null\u0026rdquo;.\nMetrics: service_cpm\nservice_cpm entity has 4 columns: id, service_id, total, value.\nWhen service_cpm entity includes a row with timestamp 1637494020000, the row should be as following:     id service_id total value     202111211127_ZTJlLXNlcnZpY2UtY29uc3VtZXI=.1 ZTJlLXNlcnZpY2UtY29uc3VtZXI=.1 4 4    And the row stored in IoTDB should be as following: (Query SQL: select from root.skywalking.service_cpm align by device)\n   Time Device total value     1637494020000 root.skywalking.service_cpm.\u0026ldquo;202111211127_ZTJlLXNlcnZpY2UtY29uc3VtZXI=.1\u0026rdquo;.\u0026ldquo;ZTJlLXNlcnZpY2UtY29uc3VtZXI=.1\u0026rdquo; 4 4    The value of id and service_id are stored in the path in the specified order.\nTrace segment: segment\nsegment entity has 10 columns at least: id, segment_id, trace_id, service_id, service_instance_id, endpoint_id, start_time, latency, is_error, data_binary. In addition, it could have variable number of tags.\nWhen segment entity includes 2 rows with timestamp 1637494106000 and 1637494134000, these rows should be as following. The db.type and db.instance are two tags. The first data has two tags, and the second data doesn\u0026rsquo;t have tag.     id segment_id trace_id service_id service_instance_id endpoint_id start_time latency is_error data_binary db.type db.instance     id_1 segment_id_1 trace_id_1 service_id_1 service_instance_id_1 endpoint_id_1 1637494106515 1425 0 data_binary_1 sql testdb   id_2 segment_id_2 trace_id_2 service_id_2 service_instance_id_2 endpoint_id_2 2637494106765 1254 0 data_binary_2      And these row stored in IoTDB should be as following: (Query SQL: select from root.skywalking.segment align by device)\n   Time Device start_time data_binary latency endpoint_id is_error service_instance_id segment_id \u0026ldquo;db.type\u0026rdquo; \u0026ldquo;db.instance\u0026rdquo;     1637494106000 root.skywalking.segment.\u0026ldquo;id_1\u0026rdquo;.\u0026ldquo;service_id_1\u0026rdquo;.\u0026ldquo;trace_id_1\u0026rdquo; 1637494106515 data_binary_1 1425 endpoint_id_1 0 service_instance_id_1 segment_id_1 sql testdb   1637494106000 root.skywalking.segment.\u0026ldquo;id_2\u0026rdquo;.\u0026ldquo;service_id_2\u0026rdquo;.\u0026ldquo;trace_id_2\u0026rdquo; 1637494106765 data_binary_2 1254 endpoint_id_2 0 service_instance_id_2 segment_id_2 null null    The value of id, service_id and trace_id are stored in the path in the specified order. Notice: If the measurement contains dot(.), it will be wrapped in double quotation mark since IoTDB doesn\u0026rsquo;t allow it. In order to align, IoTDB will append null value for those data without tag in some models.\nLog\nlog entity has 12 columns at least: id, unique_id, service_id, service_instance_id, endpoint_id, trace_id, trace_segment_id, span_id, content_type, content, tags_raw_data, timestamp. In addition, it could have variable number of tags. When log entity includes a row with timestamp 1637494052000, the row should be as following and the level is a tag.     id unique_id service_id service_instance_id endpoint_id trace_id trace_segment_id span_id content_type content tags_raw_data timestamp level     id_1 unique_id_1 service_id_1 service_instance_id_1 endpoint_id_1 trace_id_1 trace_segment_id_1 0 1 content_1 tags_raw_data_1 1637494052118 INFO    And the row stored in IoTDB should be as following: (Query SQL: select from root.skywalking.log align by device)\n   Time Device unique_id content_type span_id tags_raw_data \u0026ldquo;timestamp\u0026rdquo; level service_instance_id content trace_segment_id     1637494052000 root.skywalking.\u0026ldquo;id_1\u0026rdquo;.\u0026ldquo;service_id_1\u0026rdquo;.\u0026ldquo;trace_id_1\u0026rdquo; unique_id_1 1 0 tags_raw_data_1 1637494052118 INFO service_instance_id_1 content_1 trace_segment_id_1    The value of id, service_id and trace_id are stored in the path in the specified order. Notice: If the measurement named timestamp, it will be wrapped in double quotation mark since IoTDB doesn\u0026rsquo;t allow it.\nProfiling snapshots: profile_task_segment_snapshot\nprofile_task_segment_snapshot entity has 6 columns: id, task_id, segment_id, dump_time, sequence, stack_binary. When profile_task_segment_snapshot includes a row with timestamp 1637494131000, the row should be as following.     id task_id segment_id dump_time sequence stack_binary     id_1 task_id_1 segment_id_1 1637494131153 0 stack_binary_1    And the row stored in IoTDB should be as following: (Query SQL: select from root.skywalking.profile_task_segment_snapshot align by device)\n   Time Device sequence dump_time stack_binary task_id segment_id     1637494131000 root.skywalking.profile_task_segment_snapshot.\u0026ldquo;id_1\u0026rdquo; 0 1637494131153 stack_binary_1 task_id_1 segment_id_1    The value of id is stored in the path in the specified order.\nQuery In this design, part of the data is stored in memory through LayerName, so data from the same Model is spread across multiple devices. Queries often need to cross multiple devices. But in this aspect, IoTDB\u0026rsquo;s support is not perfect in cross-device aggregation query, sort query and pagination query. In some cases, we have to use a violence method that query all data meets the condition and then aggregate, sort or paginate them. So it might not be efficient. For detailed descriptions, please refer to the Discussion submitted in IoTDB community below.\n Discussion:  一个有关排序查询的问题(A problem about sort query)#3888 一个有关聚合查询的问题(A problem about aggregation query)#3907    Query SQL for the general example above:\n-- query all data in model1 select * from root.skywalking.model1_name align by device; -- query the data in model2 with column22_value=\u0026#34;test\u0026#34; select * from root.skywalking.model2_name.*.\u0026#34;test\u0026#34; align by device; -- query the sum of column23 in model2 and group by column21 select sum(column23) from root.skywalking.model2_name.*.* group by level = 3; iotdb-cli is a useful tools to connect and visit IoTDB server. More information please refer Command Line Interface(CLI)\n","excerpt":"This plugin is one of the outcomes of Apache IoTDB - Apache SkyWalking Adapter in Summer 2021 of …","ref":"/blog/2021-11-23-design-of-iotdb-storage-option/","title":"The Design of Apache IoTDB Storage Option"},{"body":"SkyWalking Infra E2E 1.1.0 is released. Go to downloads page to find release tars.\nFeatures  Support using setup.init-system-environment to import environment. Support body and headers in http trigger. Add install target in makefile. Stop trigger when cleaning up. Change interval setting to Duration style. Add reasonable default cleanup.on. Support float value compare when type not match Support reuse verify.cases. Ignore trigger when not set. Support export KUBECONFIG to the environment. Support using setup.kind.import-images to load local docker images. Support using setup.kind.expose-ports to declare the resource port for host access. Support save pod/container std log on the Environment.  Bug Fixes  Fix that trigger is not continuously triggered when running e2e trigger. Migrate timeout config to Duration style and wait for node ready in KinD setup. Remove manifest only could apply the default namespace resource.  Issues and PR  All issues are here All and pull requests are here  ","excerpt":"SkyWalking Infra E2E 1.1.0 is released. Go to downloads page to find release tars.\nFeatures  Support …","ref":"/events/release-apache-skywalking-infra-e2e-1-1-0/","title":"Release Apache SkyWalking Infra E2E 1.1.0"},{"body":"SkyWalking Cloud on Kubernetes 0.4.0 is released. Go to downloads page to find release tars.\n  Support special characters in the metric selector of HPA metric adapter.\n  Add the namespace to HPA metric name.\n  Features\n Add Java agent injector. Add JavaAgent and Storage CRDs of the operator.    Vulnerabilities\n CVE-2021-3121: An issue was discovered in GoGo Protobuf before 1.3.2. plugin/unmarshal/unmarshal.go lacks certain index validation CVE-2020-29652: A nil pointer dereference in the golang.org/x/crypto/ssh component through v0.0.0-20201203163018-be400aefbc4c for Go allows remote attackers to cause a denial of service against SSH servers.    Chores\n Bump up GO to 1.17. Bump up k8s api to 0.20.11. Polish documents. Bump up SkyWalking OAP to 8.8.1.    ","excerpt":"SkyWalking Cloud on Kubernetes 0.4.0 is released. Go to downloads page to find release tars. …","ref":"/events/release-apache-skywalking-cloud-on-kubernetes-0-4-0/","title":"Release Apache SkyWalking Cloud on Kubernetes 0.4.0"},{"body":"SkyWalking Satellite 0.3.0 is released. Go to downloads page to find release tars.\nFeatures  Support load-balance GRPC client with the static server list. Support load-balance GRPC client with the Kubernetes selector. Support transmit Envoy ALS v2/v3 protocol. Support transmit Envoy Metrics v2/v3 protocol.  Bug Fixes  Fix errors when converting meter data from histogram and summary.#75  Issues and PR  All issues are here All and pull requests are here  ","excerpt":"SkyWalking Satellite 0.3.0 is released. Go to downloads page to find release tars.\nFeatures  Support …","ref":"/events/release-apache-skwaylking-satellite-0-3-0/","title":"Release Apache SkyWalking Satellite 0.3.0"},{"body":"SkyWalking Java Agent 8.8.0 is released. Go to downloads page to find release tars. Changes by Version\n8.8.0  Split Java agent from the main monorepo. It is a separate repository and going to release separately. Support JDK 8-17 through upgrading byte-buddy to 1.11.18. Upgrade JDK 11 in dockerfile and remove unused java_opts. DataCarrier changes a #consume API to add properties as a parameter to initialize consumer when use Class\u0026lt;? extends IConsumer\u0026lt;T\u0026gt;\u0026gt; consumerClass. Support Multiple DNS period resolving mechanism Modify Tags.STATUS_CODE field name to Tags.HTTP_RESPONSE_STATUS_CODE and type from StringTag to IntegerTag, add Tags.RPC_RESPONSE_STATUS_CODE field to hold rpc response code value. Fix kafka-reporter-plugin shade package conflict Add all config items to agent.conf file for convenient containerization use cases. Advanced Kafka Producer configuration enhancement. Support mTLS for gRPC channel. fix the bug that plugin record wrong time elapse for lettuce plugin fix the bug that the wrong db.instance value displayed on Skywalking-UI when existing multi-database-instance on same host port pair. Add thrift plugin support thrift TMultiplexedProcessor. Add benchmark result for exception-ignore plugin and polish plugin guide. Provide Alibaba Druid database connection pool plugin. Provide HikariCP database connection pool plugin. Fix NumberFormat exception in jdbc-commons plugin when MysqlURLParser parser jdbcurl Provide Alibaba Fastjson parser/generator plugin. Provide Jackson serialization and deserialization plugin. Fix a tracing context leak of SpringMVC plugin, when an internal exception throws due to response can\u0026rsquo;t be found. Make GRPC log reporter sharing GRPC channel with other reporters of agent. Remove config items of agent.conf, plugin.toolkit.log.grpc.reporter.server_host, plugin.toolkit.log.grpc.reporter.server_port, and plugin.toolkit.log.grpc.reporter.upstream_timeout. rename plugin.toolkit.log.grpc.reporter.max_message_size to log.max_message_size. Implement Kafka Log Reporter. Add config item of agnt.conf, plugin.kafka.topic_logging. Add plugin to support Apache HttpClient 5. Format SpringMVC \u0026amp; Tomcat EntrySpan operation name to METHOD:URI. Make HTTP method in the operation name according to runtime, rather than previous code-level definition, which used to have possibilities including multiple HTTP methods. Fix the bug that httpasyncclient-4.x-plugin does not take effect every time. Add plugin to support ClickHouse JDBC driver. Fix version compatibility for JsonRPC4J plugin. Add plugin to support Apache Kylin-jdbc 2.6.x 3.x 4.x Fix instrumentation v2 API doesn\u0026rsquo;t work for constructor instrumentation. Add plugin to support okhttp 2.x Optimize okhttp 3.x 4.x plugin to get span time cost precisely Adapt message header properties of RocketMQ 4.9.x  Documentation All issues and pull requests are here\n","excerpt":"SkyWalking Java Agent 8.8.0 is released. Go to downloads page to find release tars. Changes by …","ref":"/events/release-apache-skywalking-java-agent-8-8-0/","title":"Release Apache SkyWalking Java Agent 8.8.0"},{"body":"SkyWalking CLI 0.9.0 is released. Go to downloads page to find release tars.\nFeatures  Add the sub-command dependency instance to query instance relationships (#117)  Bug Fixes  fix: multiple-linear command\u0026rsquo;s labels type can be string type (#122) Add missing dest-service-id dest-service-name to metrics linear command (#121) Fix the wrong name when getting destInstance flag (#118)  Chores  Upgrade Go version to 1.16 (#120) Migrate tests to infra-e2e, overhaul the flags names (#119) Publish Docker snapshot images to ghcr (#116) Remove dist directory when build release source tar (#115)  ","excerpt":"SkyWalking CLI 0.9.0 is released. Go to downloads page to find release tars.\nFeatures  Add the …","ref":"/events/release-apache-skywalking-cli-0-9-0/","title":"Release Apache SkyWalking CLI 0.9.0"},{"body":"SkyWalking Eyes 0.2.0 is released. Go to downloads page to find release tars.\n  Dependency License\n Support resolving go.mod for Go Support resolving pom.xml for maven (#50) Support resolving jars' licenses (#53) Support resolving npm dependencies' licenses (#48) Support saving dependencies' licenses (#69) Add dependency check to check dependencies license compatibilities (#58)    License Header\n fix command supports more languages:  Add support for plantuml (#42) Add support for PHP (#40) Add support for Twig template language (#39) Add support for Smarty template language (#38) Add support for MatLab files (#37) Add support for TypeScript language files (#73) Add support for nextflow files (#65) Add support for perl files (#63) Add support for ini extension (#24) Add support for R files (#64) Add support for .rst files and allow fixing header of a single file (#25) Add support for Rust files (#29) Add support for bat files (#32)   Remove .tsx from XML language extensions Honor Python\u0026rsquo;s coding directive (#68) Fix file extension conflict between RenderScript and Rust (#66) Add comment type to cython declaration (#62) header fix: respect user configured license content (#60) Expose license-location-threshold as config item (#34) Fix infinite recursive calls when containing symbolic files (#33) defect: avoid crash when no comment style is found (#23)    Project\n Enhance license identification (#79) Support installing via go install (#76) Speed up the initialization phase (#75) Resolve absolute path in .gitignore to relative path (#67) Reduce img size and add npm env (#59) Make the config file and log level in GitHub Action configurable (#56, #57) doc: add a PlantUML activity diagram of header fixing mechanism (#41) Fix bug: license file is not found but reported message is nil (#49) Add all well-known licenses and polish normalizers (#47) Fix compatibility issues in Windows (#44) feature: add reasonable default config to allow running in a new repo without copying config file (#28) chore: only build linux binary when building inside docker (#26) chore: upgrade to go 1.16 and remove go-bindata (#22) Add documentation about how to use via docker image (#20)    ","excerpt":"SkyWalking Eyes 0.2.0 is released. Go to downloads page to find release tars.\n  Dependency License …","ref":"/events/release-apache-skywalking-eyes-0-2-0/","title":"Release Apache SkyWalking Eyes 0.2.0"},{"body":"SkyWalking Client JS 0.7.0 is released. Go to downloads page to find release tars.\n Support setting time interval to report segments. Fix segments report only send once. Fix apache/skywalking#7335. Fix apache/skywalking#7793. Fix firstReportedError for SPA.  ","excerpt":"SkyWalking Client JS 0.7.0 is released. Go to downloads page to find release tars.\n Support setting …","ref":"/events/release-apache-skywalking-client-js-0-7-0/","title":"Release Apache SkyWalking Client JS 0.7.0"},{"body":"SkyWalking 8.8.1 is released. Go to downloads page to find release tars.\nThis is a bugfix version that fixes several important bugs in previous version 8.8.0.\nChanges OAP Server  Fix wrong (de)serializer of ElasticSearch client for OpenSearch storage. Fix that traces query with tags will report error. Replace e2e simple cases to e2e-v2. Fix endpoint dependency breaking.  UI  Delete duplicate calls for endpoint dependency.  All issues and pull requests are here\n","excerpt":"SkyWalking 8.8.1 is released. Go to downloads page to find release tars.\nThis is a bugfix version …","ref":"/events/release-apache-skywalking-apm-8-8-1/","title":"Release Apache SkyWalking APM 8.8.1"},{"body":"Kai Wan has been involved in SkyWalking for over half a year since the first PR(Dec 21, 2020). He majorly focuses on the Service Mesh and metrics analysis engine(MAL). And recently add the support of OpenAPI specification into SkyWalking.\nHe learnd fast, and dedicates hours every day on the project, and has finished 37 PRs 11,168 LOC++ 1,586 LOC\u0026ndash;. In these days, he is working with PMC and infra-e2e team to upgrade our main repository\u0026rsquo;s test framework to the NGET(Next Generation E2E Test framework).\nIt is our honor to have him join the team.\n","excerpt":"Kai Wan has been involved in SkyWalking for over half a year since the first PR(Dec 21, 2020). He …","ref":"/events/welcome-kai-wan-to-join-the-pmc/","title":"Welcome Kai Wan (万凯) to join the PMC"},{"body":"SkyWalking 8.8.0 is released. Go to downloads page to find release tars.\nThis is a first OAP server + UI release, Java agent will be release independently. Check the latest compatibility document to find suitable agent releases.\nChanges by Version\nProject  Split javaagent into skywalking-java repository. https://github.com/apache/skywalking-java Merge Dockerfiles from apache/skywalking-docker into this codebase.  OAP Server  Fix CVE-2021-35515, CVE-2021-35516, CVE-2021-35517, CVE-2021-36090. Upgrade org.apache.commons:commons-compress to 1.21. kubernetes java client upgrade from 12.0.1 to 13.0.0 Add event http receiver Support Metric level function serviceRelation in MAL. Support envoy metrics binding into the topology. Fix openapi-definitions folder not being read correctly. Trace segment wouldn\u0026rsquo;t be recognized as a TopN sample service. Add through #4694 experimentally, but it caused performance impact. Remove version and endTime in the segment entity. Reduce indexing payload. Fix mapper_parsing_exception in ElasticSearch 7.14. Support component IDs for Go-Kratos framework. [Break Change] Remove endpoint name in the trace query condition. Only support query by endpoint id. Fix ProfileSnapshotExporterTest case on OpenJDK Runtime Environment AdoptOpenJDK-11.0.11+9 (build 11.0.11+9), MacOS. [Break Change] Remove page path in the browser log query condition. Only support query by page path id. [Break Change] Remove endpoint name in the backend log query condition. Only support query by endpoint id. [Break Change] Fix typo for a column page_path_id(was pate_path_id) of storage entity browser_error_log. Add component id for Python falcon plugin. Add rpcStatusCode for rpc.status_code tag. The responseCode field is marked as deprecated and replaced by httpResponseStatusCode field. Remove the duplicated tags to reduce the storage payload. Add a new API to test log analysis language. Harden the security of Groovy-based DSL, MAL and LAL. Fix distinct in Service/Instance/Endpoint query is not working. Support collection type in dynamic configuration core. Support zookeeper grouped dynamic configurations. Fix NPE when OAP nodes synchronize events with each other in cluster mode. Support k8s configmap grouped dynamic configurations. Add desc sort function in H2 and ElasticSearch implementations of IBrowserLogQueryDAO Support configure sampling policy by configuration module dynamically and static configuration file trace-sampling-policy-settings.yml for service dimension on the backend side. Dynamic configurations agent-analyzer.default.sampleRate and agent-analyzer.default.slowTraceSegmentThreshold are replaced by agent-analyzer.default.traceSamplingPolicy. Static configurations agent-analyzer.default.sampleRate and agent-analyzer.default.slowTraceSegmentThreshold are replaced by agent-analyzer.default.traceSamplingPolicySettingsFile. Fix dynamic configuration watch implementation current value not null when the config is deleted. Fix LoggingConfigWatcher return watch.value would not consistent with the real configuration content. Fix ZookeeperConfigWatcherRegister.readConfig() could cause NPE when data.getData() is null. Support nacos grouped dynamic configurations. Support for filter function filtering of int type values. Support mTLS for gRPC channel. Add yaml file suffix limit when reading ui templates. Support consul grouped dynamic configurations. Fix H2MetadataQueryDAO.searchService doesn\u0026rsquo;t support auto grouping. Rebuilt ElasticSearch client on top of their REST API. Fix ElasticSearch storage plugin doesn\u0026rsquo;t work when hot reloading from secretsManagementFile. Support etcd grouped dynamic configurations. Unified the config word namespace in the project. Switch JRE base image for dev images. Support apollo grouped dynamic configurations. Fix ProfileThreadSnapshotQuery.queryProfiledSegments adopts a wrong sort function Support gRPC sync grouped dynamic configurations. Fix H2EventQueryDAO doesn\u0026rsquo;t sort data by Event.START_TIME and uses a wrong pagination query. Fix LogHandler of kafka-fetcher-plugin cannot recognize namespace. Improve the speed of writing TiDB by batching the SQL execution. Fix wrong service name when IP is node IP in k8s-mesh. Support dynamic configurations for openAPI endpoint name grouping rule. Add component definition for Alibaba Druid and HikariCP. Fix Hour and Day dimensionality metrics not accurate, due to the cache read-then-clear mechanism conflicts with low down metrics flush period added in 8.7.0. Fix Slow SQL sampling not accurate, due to TopN works conflict with cache read-then-clear mechanism. The persistent cache is only read when necessary. Add component definition for Alibaba Fastjson. Fix entity(service/instance/endpoint) names in the MAL system(prometheus, native meter, open census, envoy metric service) are not controlled by core\u0026rsquo;s naming-control mechanism. Upgrade netty version to 4.1.68.Final avoid cve-2021-37136.  UI  Fix not found error when refresh UI. Update endpointName to endpointId in the query trace condition. Add Python falcon icon on the UI. Fix searching endpoints with keywords. Support clicking the service name in the chart to link to the trace or log page. Implement the Log Analysis Language text regexp debugger. Fix fetching nodes and calls with serviceIds on the topology side. Implement Alerts for query errors. Fixes graph parameter of query for topology metrics.  Documentation  Add a section in Log Collecting And Analysis doc, introducing the new Python agent log reporter. Add one missing step in otel-receiver doc about how to activate the default receiver. Reorganize dynamic configuration doc. Add more description about meter configurations in backend-meter doc. Fix typo in endpoint-grouping-rules doc.  All issues and pull requests are here\n","excerpt":"SkyWalking 8.8.0 is released. Go to downloads page to find release tars.\nThis is a first OAP server …","ref":"/events/release-apache-skywalking-apm-8-8-0/","title":"Release Apache SkyWalking APM 8.8.0"},{"body":"SkyWalking CLI 0.8.0 is released. Go to downloads page to find release tars.\n  Features\n Add profile command Add logs command Add dependency command Support query events protocol Support auto-completion for bash and powershell    Bug Fixes\n Fix missing service instance name in trace command    Chores\n Optimize output by adding color to help information Set display style explicitly for commands in the test script Set different default display style for different commands Add scripts for quick install Update release doc and add scripts for release split into multiple workflows to speed up CI    ","excerpt":"SkyWalking CLI 0.8.0 is released. Go to downloads page to find release tars.\n  Features\n Add profile …","ref":"/events/release-apache-skywalking-cli-0-8-0/","title":"Release Apache SkyWalking CLI 0.8.0"},{"body":"SkyWalking Satellite 0.2.0 is released. Go to downloads page to find release tars.\nFeatures  Set MAXPROCS according to real cpu quota. Update golangci-lint version to 1.39.0. Update protoc-gen-go version to 1.26.0. Add prometheus-metrics-fetcher plugin. Add grpc client plugin. Add nativelog-grpc-forwarder plugin. Add meter-grpc-forwarder plugin. Support native management protocol. Support native tracing protocol. Support native profile protocol. Support native CDS protocol. Support native JVM protocol. Support native Meter protocol. Support native Event protocol. Support native protocols E2E testing. Add Prometheus service discovery in Kubernetes.  Bug Fixes  Fix the data race in mmap queue. Fix channel blocking in sender module. Fix pipes.sender.min_flush_events config could not support min number. Remove service name and instance name labels from Prometheus fetcher.  Issues and PR  All issues are here All and pull requests are here  ","excerpt":"SkyWalking Satellite 0.2.0 is released. Go to downloads page to find release tars.\nFeatures  Set …","ref":"/events/release-apache-skwaylking-satellite-0-2-0/","title":"Release Apache SkyWalking Satellite 0.2.0"},{"body":"SkyWalking Python 0.7.0 is released. Go to downloads page to find release tars.\n  Feature:\n Support collecting and reporting logs to backend (#147) Support profiling Python method level performance (#127 Add a new sw-python CLI that enables agent non-intrusive integration (#156) Add exponential reconnection backoff strategy when OAP is down (#157) Support ignoring traces by http method (#143) NoopSpan on queue full, propagation downstream (#141) Support agent namespace. (#126) Support secure connection option for GRPC and HTTP (#134)    Plugins:\n Add Falcon Plugin (#146) Update sw_pymongo.py to be compatible with cluster mode (#150) Add Python celery plugin (#125) Support tornado5+ and tornado6+ (#119)    Fixes:\n Remove HTTP basic auth credentials from log, stacktrace, segment (#152) Fix @trace decorator not work (#136) Fix grpc disconnect, add SW_AGENT_MAX_BUFFER_SIZE to control buffer queue size (#138)    Others:\n Chore: bump up requests version to avoid license issue (#142) Fix module wrapt as normal install dependency (#123) Explicit component inheritance (#132) Provide dockerfile \u0026amp; images for easy integration in containerized scenarios (#159)    ","excerpt":"SkyWalking Python 0.7.0 is released. Go to downloads page to find release tars.\n  Feature:\n Support …","ref":"/events/release-apache-skywalking-python-0-7-0/","title":"Release Apache SkyWalking Python 0.7.0"},{"body":"","excerpt":"","ref":"/tags/python/","title":"Python"},{"body":"SkyWalking Infra E2E 1.0.0 is released. Go to downloads page to find release tars.\nFeatures  Support using docker-compose to setup the environment. Support using the HTTP request as trigger. Support verify test case by command-line or file with retry strategy. Support GitHub Action.  Bug Fixes Issues and PR  All issues are here All and pull requests are here  ","excerpt":"SkyWalking Infra E2E 1.0.0 is released. Go to downloads page to find release tars.\nFeatures  Support …","ref":"/events/release-apache-skywalking-infra-e2e-1-0-0/","title":"Release Apache SkyWalking Infra E2E 1.0.0"},{"body":"The Java Agent of Apache SkyWalking has supported profiling since v7.0.0, and it enables users to troubleshoot the root cause of performance issues, and now we bring it into Python Agent. In this blog, we will show you how to use it, and we will introduce the mechanism of profiling.\nHow to use profiling in Python Agent This feature is released in Python Agent at v0.7.0. It is turned on by default, so you don\u0026rsquo;t need any extra configuration to use it. You can find the environment variables about it here.\nHere are the demo codes of an intentional slow application.\nimport time def method1(): time.sleep(0.02) return \u0026#39;1\u0026#39; def method2(): time.sleep(0.02) return method1() def method3(): time.sleep(0.02) return method2() if __name__ == \u0026#39;__main__\u0026#39;: import socketserver from http.server import BaseHTTPRequestHandler class SimpleHTTPRequestHandler(BaseHTTPRequestHandler): def do_POST(self): method3() time.sleep(0.5) self.send_response(200) self.send_header(\u0026#39;Content-Type\u0026#39;, \u0026#39;application/json\u0026#39;) self.end_headers() self.wfile.write(\u0026#39;{\u0026#34;song\u0026#34;: \u0026#34;Despacito\u0026#34;, \u0026#34;artist\u0026#34;: \u0026#34;Luis Fonsi\u0026#34;}\u0026#39;.encode(\u0026#39;ascii\u0026#39;)) PORT = 19090 Handler = SimpleHTTPRequestHandler with socketserver.TCPServer((\u0026#34;\u0026#34;, PORT), Handler) as httpd: httpd.serve_forever() We can start it with SkyWalking Python Agent CLI without changing any application code now, which is also the latest feature of v0.7.0. We just need to add sw-python run before our start command(i.e. sw-python run python3 main.py), to start the application with python agent attached. More information about sw-python can be found there.\nThen, we should add a new profile task for the / endpoint from the SkyWalking UI, as shown below.\nWe can access it by curl -X POST http://localhost:19090/, after that, we can view the result of this profile task on the SkyWalking UI.\nThe mechanism of profiling When a request lands on an application with the profile function enabled, the agent begins the profiling automatically if the request’s URI is as required by the profiling task. A new thread is spawned to fetch the thread dump periodically until the end of request.\nThe agent sends these thread dumps, called ThreadSnapshot, to SkyWalking OAPServer, and the OAPServer analyzes those ThreadSnapshot(s) and gets the final result. It will take a method invocation with the same stack depth and code signature as the same operation, and estimate the execution time of each method from this.\nLet\u0026rsquo;s demonstrate how this analysis works through the following example. Suppose we have such a program below and we profile it at 10ms intervals.\ndef main(): methodA() def methodA(): methodB() def methodB(): methodC() methodD() def methodC(): time.sleep(0.04) def methodD(): time.sleep(0.06) The agent collects a total of 10 ThreadSnapShot(s) over the entire time period(Diagram A). The first 4 snapshots represent the thread dumps during the execution of function C, and the last 6 snapshots represent the thread dumps during the execution of function D. After the analysis of OAPServer, we can see the result of this profile task on the SkyWalking Rocketbot UI as shown in the right of the diagram. With this result, we can clearly see the function call relationship and the time consumption situation of this program.\nDiagram A You can read more details of profiling theory from this blog.\nWe hope you enjoy the profile in the Python Agent, and if so, you can give us a star on Python Agent and SkyWalking on GitHub.\n","excerpt":"The Java Agent of Apache SkyWalking has supported profiling since v7.0.0, and it enables users to …","ref":"/blog/2021-09-12-skywalking-python-profiling/","title":"SkyWalking Python Agent Supports Profiling Now"},{"body":"SkyWalking Kubernetes Helm Chart 4.1.0 is released. Go to downloads page to find release tars.\n Add missing service account to init job. Improve notes.txt and nodePort configuration. Improve ingress compatibility. Fix bug that customized config files are not loaded into es-init job. Add imagePullSecrets and node selector. Fix istio adapter description. Enhancement: allow mounting binary data files.  ","excerpt":"SkyWalking Kubernetes Helm Chart 4.1.0 is released. Go to downloads page to find release tars.\n Add …","ref":"/events/release-apache-skywalking-kubernetes-helm-chart-4.1.0/","title":"Release Apache SkyWalking Kubernetes Helm Chart 4.1.0"},{"body":"GOUP hosted a webinar, and invited Sheng Wu to introduce Apache SkyWalking. This is a 1.5 hours presentation including the full landscape of Apache SkyWalking 8.x.\nChapter04 Session10 - Apache Skywalking by Sheng Wu   ","excerpt":"GOUP hosted a webinar, and invited Sheng Wu to introduce Apache SkyWalking. This is a 1.5 hours …","ref":"/blog/2021-08-01-skywalking-8-intro/","title":"[Webinar] SkyWalking 8.x Introduction"},{"body":"SkyWalking 8.7.0 is released. Go to downloads page to find release tars. Changes by Version\nProject  Extract dependency management to a bom. Add JDK 16 to test matrix. DataCarrier consumer add a new event notification, call nothingToConsume method if the queue has no element to consume. Build and push snapshot Docker images to GitHub Container Registry, this is only for people who want to help to test the master branch codes, please don\u0026rsquo;t use in production environments.  Java Agent  Supports modifying span attributes in async mode. Agent supports the collection of JVM arguments and jar dependency information. [Temporary] Support authentication for log report channel. This feature and grpc channel is going to be removed after Satellite 0.2.0 release. Remove deprecated gRPC method, io.grpc.ManagedChannelBuilder#nameResolverFactory. See gRPC-java 7133 for more details. Add Neo4j-4.x plugin. Correct profile.duration to profile.max_duration in the default agent.config file. Fix the response time of gRPC. Support parameter collection for SqlServer. Add ShardingSphere-5.0.0-beta plugin. Fix some method exception error. Fix async finish repeatedly in spring-webflux-5.x-webclient plugin. Add agent plugin to support Sentinel. Move ehcache-2.x plugin as an optional plugin. Support guava-cache plugin. Enhance the compatibility of mysql-8.x-plugin plugin. Support Kafka SASL login module. Fix gateway plugin async finish repeatedly when fallback url configured. Chore: polish methods naming for Spring-Kafka plugins. Remove plugins for ShardingSphere legacy version. Update agent plugin for ElasticJob GA version Remove the logic of generating instance name in KafkaServiceManagementServiceClient class. Improve okhttp plugin performance by optimizing Class.getDeclaredField(). Fix GRPCLogClientAppender no context warning. Fix spring-webflux-5.x-webclient-plugin NPE.  OAP-Backend  Disable Spring sleuth meter analyzer by default. Only count 5xx as error in Envoy ALS receiver. Upgrade apollo core caused by CVE-2020-15170. Upgrade kubernetes client caused by CVE-2020-28052. Upgrade Elasticsearch 7 client caused by CVE-2020-7014. Upgrade jackson related libs caused by CVE-2018-11307, CVE-2018-14718 ~ CVE-2018-14721, CVE-2018-19360 ~ CVE-2018-19362, CVE-2019-14379, CVE-2019-14540, CVE-2019-14892, CVE-2019-14893, CVE-2019-16335, CVE-2019-16942, CVE-2019-16943, CVE-2019-17267, CVE-2019-17531, CVE-2019-20330, CVE-2020-8840, CVE-2020-9546, CVE-2020-9547, CVE-2020-9548, CVE-2018-12022, CVE-2018-12023, CVE-2019-12086, CVE-2019-14439, CVE-2020-10672, CVE-2020-10673, CVE-2020-10968, CVE-2020-10969, CVE-2020-11111, CVE-2020-11112, CVE-2020-11113, CVE-2020-11619, CVE-2020-11620, CVE-2020-14060, CVE-2020-14061, CVE-2020-14062, CVE-2020-14195, CVE-2020-24616, CVE-2020-24750, CVE-2020-25649, CVE-2020-35490, CVE-2020-35491, CVE-2020-35728 and CVE-2020-36179 ~ CVE-2020-36190. Exclude log4j 1.x caused by CVE-2019-17571. Upgrade log4j 2.x caused by CVE-2020-9488. Upgrade nacos libs caused by CVE-2021-29441 and CVE-2021-29442. Upgrade netty caused by CVE-2019-20444, CVE-2019-20445, CVE-2019-16869, CVE-2020-11612, CVE-2021-21290, CVE-2021-21295 and CVE-2021-21409. Upgrade consul client caused by CVE-2018-1000844, CVE-2018-1000850. Upgrade zookeeper caused by CVE-2019-0201, zookeeper cluster coordinator plugin now requires zookeeper server 3.5+. Upgrade snake yaml caused by CVE-2017-18640. Upgrade embed tomcat caused by CVE-2020-13935. Upgrade commons-lang3 to avoid potential NPE in some JDK versions. OAL supports generating metrics from events. Support endpoint name grouping by OpenAPI definitions. Concurrent create PrepareRequest when persist Metrics Fix CounterWindow increase computing issue. Performance: optimize Envoy ALS analyzer performance in high traffic load scenario (reduce ~1cpu in ~10k RPS). Performance: trim useless metadata fields in Envoy ALS metadata to improve performance. Fix: slowDBAccessThreshold dynamic config error when not configured. Performance: cache regex pattern and result, optimize string concatenation in Envy ALS analyzer. Performance: cache metrics id and entity id in Metrics and ISource. Performance: enhance persistent session mechanism, about differentiating cache timeout for different dimensionality metrics. The timeout of the cache for minute and hour level metrics has been prolonged to ~5 min. Performance: Add L1 aggregation flush period, which reduce the CPU load and help young GC. Support connectTimeout and socketTimeout settings for ElasticSearch6 and ElasticSearch7 storages. Re-implement storage session mechanism, cached metrics are removed only according to their last access timestamp, rather than first time. This makes sure hot data never gets removed unexpectedly. Support session expired threshold configurable. Fix InfluxDB storage-plugin Metrics#multiGet issue. Replace zuul proxy with spring cloud gateway 2.x. in webapp module. Upgrade etcd cluster coordinator and dynamic configuration to v3.x. Configuration: Allow configuring server maximum request header size and ES index template order. Add thread state metric and class loaded info metric to JVMMetric. Performance: compile LAL DSL statically and run with type checked. Add pagination to event query protocol. Performance: optimize Envoy error logs persistence performance. Support envoy cluster manager metrics. Performance: remove the synchronous persistence mechanism from batch ElasticSearch DAO. Because the current enhanced persistent session mechanism, don\u0026rsquo;t require the data queryable immediately after the insert and update anymore. Performance: share flushInterval setting for both metrics and record data, due to synchronous persistence mechanism removed. Record flush interval used to be hardcoded as 10s. Remove syncBulkActions in ElasticSearch storage option. Increase the default bulkActions(env, SW_STORAGE_ES_BULK_ACTIONS) to 5000(from 1000). Increase the flush interval of ElasticSearch indices to 15s(from 10s) Provide distinct for elements of metadata lists. Due to the more aggressive asynchronous flush, metadata lists have more chances including duplicate elements. Don\u0026rsquo;t need this as indicate anymore. Reduce the flush period of hour and day level metrics, only run in 4 times of regular persistent period. This means default flush period of hour and day level metrics are 25s * 4. Performance: optimize IDs read of ElasticSearch storage options(6 and 7). Use the physical index rather than template alias name. Adjust index refresh period as INT(flushInterval * 2/3), it used to be as same as bulk flush period. At the edge case, in low traffic(traffic \u0026lt; bulkActions in the whole period), there is a possible case, 2 period bulks are included in one index refresh rebuild operation, which could cause version conflicts. And this case can\u0026rsquo;t be fixed through core/persistentPeriod as the bulk fresh is not controlled by the persistent timer anymore. The core/maxSyncOperationNum setting(added in 8.5.0) is removed due to metrics persistence is fully asynchronous. The core/syncThreads setting(added in 8.5.0) is removed due to metrics persistence is fully asynchronous. Optimization: Concurrency mode of execution stage for metrics is removed(added in 8.5.0). Only concurrency of prepare stage is meaningful and kept. Fix -meters metrics topic isn\u0026rsquo;t created with namespace issue Enhance persistent session timeout mechanism. Because the enhanced session could cache the metadata metrics forever, new timeout mechanism is designed for avoiding this specific case. Fix Kafka transport topics are created duplicated with and without namespace issue Fix the persistent session timeout mechanism bug. Fix possible version_conflict_engine_exception in bulk execution. Fix PrometheusMetricConverter may throw an IllegalArgumentException when convert metrics to SampleFamily Filtering NaN value samples when build SampleFamily Add Thread and ClassLoader Metrics for the self-observability and otel-oc-rules Simple optimization of trace sql query statement. Avoid \u0026ldquo;select *\u0026rdquo; query method Introduce dynamical logging to update log configuration at runtime Fix Kubernetes ConfigMap configuration center doesn\u0026rsquo;t send delete event Breaking Change: emove qps and add rpm in LAL  UI  Fix the date component for log conditions. Fix selector keys for duplicate options. Add Python celery plugin. Fix default config for metrics. Fix trace table for profile ui. Fix the error of server response time in the topology. Fix chart types for setting metrics configure. Fix logs pages number. Implement a timeline for Events in a new page. Fix style for event details.  Documentation  Add FAQ about Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Add Self Observability service discovery (k8s). Add sending Envoy Metrics to OAP in envoy 1.19 example and bump up to Envoy V3 api.  All issues and pull requests are here\n","excerpt":"SkyWalking 8.7.0 is released. Go to downloads page to find release tars. Changes by Version\nProject …","ref":"/events/release-apache-skywalking-apm-8-7-0/","title":"Release Apache SkyWalking APM 8.7.0"},{"body":"SkyWalking Client JS 0.6.0 is released. Go to downloads page to find release tars.\n Separate production and development environments when building. Upgrade packages to fix vulnerabilities. Fix headers could be null . Fix catching errors for http requests. Fix the firstReportedError is calculated with more types of errors.  ","excerpt":"SkyWalking Client JS 0.6.0 is released. Go to downloads page to find release tars.\n Separate …","ref":"/events/release-apache-skywalking-client-js-0-6-0/","title":"Release Apache SkyWalking Client JS 0.6.0"},{"body":"SkyWalking is an open source APM (application performance monitor) system, especially designed for microservices, cloud native, and container-based architectures.\nFrom 2020, it has dominated the open source APM market in China, and expanded aggressively in North American, Europe and Asia\u0026rsquo;s other countries.\nWith over 6 years (2015-2021) of development, driven by the global open source community, SkyWalking now provides full stack observability covering metrics, tracing and logging, plus event detector, which are built based on various native and ecosystem solutions.\n Language agent-based(Java, Dot Net, Golang, PHP, NodeJS, Python, C++, LUA) in-process monitoring, is as powerful as commercial APM vendors' agents. Mostly auto-instrumentation, and good interactivity. Service Mesh Observability, working closely with Envoy and Istio teams. Transparent integration of popular metrics ecosystem. Accept metrics from Prometheus SDK, OpenTelemetry collectors, Zabbix agents, etc. Log collection with analysis capability from FluentD, Fluent-bit, Filebeat, etc. agents. Infrastructure monitoring, such as Linux and k8s, is out of the box.  The SkyWalking ecosystem was started by very few people. The community drives the project to cover real scenarios, from tracing to the whole APM field. Even today, more professional open source developers, powered by the vendors behind them, are bringing the project to a different level.\nTypically and most attractively, SkyWalking is going to build the first known open source APM specific database in the world, at least providing\n Time series-based database engine. Support traces/logs and metrics in the database core level. High performance with cluster mode and HPA. Reasonable resource cost.  We nearly doubled the number of contributors in the last year, from ~300 to over 500. The whole community is very energetic. Here, we want to thank our 47 committers(28 PMC members included), listed here, and over 400 other contributors.\nWe together built this humongous Apache Top Level project, and proved the stronge competitiveness of an open-source project.\nThis is a hard-won and impressive achievement. We won\u0026rsquo;t stop here. The trend is there, the ground is solid. We are going to build the top-level APM system relying on our open-source community.\n500 Contributors List    GitHub         1095071913 182148432** 295198088** 394102339** 437376068**   50168383 55846420** 826245622** 844067874 Ahoo-Wang   AirTrioa AlexanderWert AlseinX AngryMills Ax1an   BFergerson BZFYS CalvinKirs CharlesMaster ChaunceyLin5152   CommissarXia Cvimer DeadLion Doublemine Du-fei   ElderJames EvanLjp FatihErdem FeynmanZhou Fine0830   FingerLiu FrankyXu Gallardot GerryYuan HackerRookie   HarryFQ Heguoya Hen1ng HendSame Humbertzhang   IanCao IluckySi Indifer J-Cod3r JaredTan95   Jargon96 Jijun JoeKerouac JohnNiang Johor03   Jozdortraz Jtrust Just-maple KangZhiDong LazyLei   LiWenGu Lin1997 Linda-pan LiteSun Liu-XinYuan   MiracleDx Miss-you MoGuGuai-hzr MrYzys O-ll-O   Patrick0308 QHWG67 Qiliang QuanjieDeng RandyAbernethy   RedzRedz Runrioter SataQiu ScienJus SevenBlue2018   ShaoHans Shikugawa SoberChina SummerOfServenteen Switch-vov   TJ666 Technoboy- TerrellChen TeslaCN TheRealHaui   TinyAllen TomMD ViberW Videl WALL-E   WeihanLi WildWolfBang WillemJiang Wooo0 XhangUeiJong   Xlinlin YczYanchengzhe Yebemeto YoungHu YunaiV   YunfengGao Z-Beatles ZS-Oliver ZhHong ZhuoSiChen   a198720 a1vin-tian a526672351 acurtain adamni135   adermxzs adriancole** aeolusheath agile6v aix3   aiyanbo ajanthan alexkarezin alonelaval amogege   amwyyyy andyliyuze andyzzl aoxls arugal   ascrutae ascrutae** augustowebd aviaviavi bai-yang   beckhampu beckjin beiwangnull bigflybrother bootsrc   bostin brucewu-fly buxingzhe buzuotaxuan bwh12398**   c feng c1ay candyleer carllhw carlvine500   carrypann cheenursn cheetah012 chenbeitang chenglei**   chengshiwen chenmudu chenpengfei chenvista chess-equality   chestarss chidaodezhongsheng chopin-d clevertension clk1st   cngdkxw cnlangzi codeglzhang codelipenghui coder-yqj   coki230 compilerduck constanine coolbeevip crystaldust   cui-liqiang cuiweiwei cutePanda123 cyberdak cyejing   cyhii dafu-wu dagmom dalekliuhan** darcydai   dengliming devkanro devon-ye dickens7 dimaaan   dingdongnigetou dio divyakumarjain dmsolr dominicqi   donbing007 dsc6636926 dvsv2 dzx2018 echooymxq   efekaptan elk-g emschu eoeac evanljp**   evanxuhe feelwing1314 fgksgf fredster33 fuhuo   fulmicoton fushiqinghuan111 geektcp geomonlin ggndnn   gitter-badger givingwu glongzh gnr163 gonedays   grissom-grissom grissomsh guodongq guyukou gxthrj   gy09535 gzshilu hailin0 hanahmily haotian2015   haoyann hardzhang harvies heihaozi hepyu   heyanlong hi-sb honganan horber hsoftxl   huangyoje huliangdream huohuanhuan iluckysi innerpeacez   itsvse jasper-zsh jbampton jialong121 jinlongwang   jjlu521016 jjtyro jmjoy jsbxyyx justeene   juzhiyuan jy00464346 kaanid kagaya85 karott   kayleyang kevinyyyy kezhenxu94 kikupotter kilingzhang   killGC kkl129 klboke ksewen kuaikuai   kun-song kylixs landonzeng langke93 langyan1022   langyizhao lazycathome leemove leizhiyuan libinglong   lijial lilien1010 limfriend linkinshi linliaoy   liqiangz liu-junchi liufei** liuhaoXD liuhaoyang   liuweiyi** liuyanggithup liuzhengyang liweiv lixin40**   lizl9** lkxiaolou llissery louis-zhou lpcy   lpf32 lsyf lucperkins lujiajing1126 lunamagic1978   lunchboxav lxin96** lxliuxuankb lytscu lyzhang1999   mage3k makefriend8 makingtime mantuliu maolie   margauxcabrera masterxxo maxiaoguang64 me** membphis   mestarshine mgsheng michaelsembwever mikkeschiren ming_flycash**   minquan.chen** misaya momo0313 moonming mrproliu   mrproliu** muyun12 nacx neatlife neeuq   nic-chen nickwongwong nikitap492 nileblack nisiyong   novayoung oatiz oflebbe olzhy onecloud360   osiriswd panniyuyu peng-yongsheng pengweiqhca potiuk   probeyang purgeyao qijianbo010 qinhang3 qiuyu-d   qjgszzx qq362220083 qqeasonchen qxo ralphgj   raybi-asus refactor2 remicollet rlenferink rootsongjc   rovast ruibaby s00373198 scolia sdanzo   seifeHu sergicastro shiluo34 sikelangya simonlei   sk163 snakorse songzhendong songzhian songzhian**   sonxy spacewander stalary stenio2011 stevehu   stone-wlg sungitly surechen swartz-k sxzaihua   tangxqa tanjunchen tankilo tanzhen** taskmgr   tbdpmi terranhu terrymanu tevahp thanq   thebouv tianyk tianyuak tincopper tinyu0   tom-pytel tristaZero tristan-tsl trustin tsuilouis   tuohai666 tzsword-2020 tzy1316106836 vcjmhg viktoryi   vision-ken viswaramamoorthy wallezhang wang-yeliang wang_weihan**   wangrzneu wankai123 wbpcode web-xiaxia webb2019   weiqiang-w weiqiang333 wendal wengangJi wenjianzhang   whfjam whl12345 willseeyou wilsonwu wind2008hxy   wingwong-knh withlin wl4g wqr2016 wu-sheng   wuguangkuo wujun8 wuwen5 wuxingye x22x22   xbkaishui xcaspar xdRight xiaoweiyu** xiaoxiangmoe   xiaoy00 xinfeingxia85 xingren23 xinzhuxiansheng xonze   xuanyu66 xuchangjunjx xudianyang yanbw yanfch   yang-xiaodong yangxb2010000 yanickxia yanmaipian yanmingbi   yantaowu yaojingguo yaowenqiang yazong ychandu   ycoe yimeng yu199195 yuqichou yushuqiang**   yuyujulin yxudong yymoth zaunist zaygrzx   zcai2 zeaposs zhang98722 zhanghao001 zhangjianweibj   zhangkewei zhangsean zhangxin** zhaoyuguang zhe1926   zhentaoJin zhongjianno1** zhousiliang163 zhuCheer zhyyu   zifeihan zijin-m zkscpqm zoidbergwill zoumingzm   zouyx zpf1989 zshit zxbu zygfengyuwuzu    ","excerpt":"SkyWalking is an open source APM (application performance monitor) system, especially designed for …","ref":"/blog/2021-07-12-500-contributors-mark/","title":"[Community win] SkyWalking achieved 500 contributors milestone."},{"body":"时间:2021 年 6 月 26 日\n地点:北京市海淀区西格玛大厦 B1 多功能厅\n视频回放:见 Bilibili\nApache SkyWalking Landscape  吴晟 Sheng Wu. Tetrate Founding Engineer, Apache Software Foundation board director. SkyWalking founder.  SkyWalking 2020-2021 年发展和后续计划\n微服务可观测性分析平台的探索与实践  凌若川 腾讯高级工程师  可观测性分析平台作为云原生时代微服务系统基础组件,开放性与性能是决定平台价值的核心要素。 复杂微服务应用场景与海量多维链路数据,对可观测性分析平台在开放性设计和各环节高性能实现带来诸多挑战。 本次分享中将重点梳理腾讯云微服务团队在构建云原生可观测性分析平台过程中遇到的挑战,介绍我们在架构设计与实现方面的探索与实践。\n 云原生时代微服务可观测性平台面临的性能与可用性挑战 腾讯云在构建高性能微服务可观测性分析平台的探索与实践 微服务可观测性分析平台架构的下一阶段演进方向展望  BanyanDB 数据模型背后的逻辑  高洪涛 Hongtao Gao. Tetrate SRE, SkyWalking PMC, Apache ShardingSphere PMC.  BanyanDB 作为为处理 Apache SkyWalking 产生的 trace,log 和 metric 的数据而特别设计的数据库,其背后数据模型的抉择是非常与众不同的。 在本次分享中,我将根据 RUM 猜想来讨论为什么 BanyanDB 使用的数据模型对于 APM 数据而言是更加高效和可靠的。\n通过本次分享,观众可以:\n 理解数据库设计的取舍 了解 BanyanDB 的数据模型 认识到该模型对于 APM 类数据有特定的优势  Apache SkyWalking 如何做前端监控  范秋霞 Qiuxia Fan,Tetrate FE SRE,SkyWalking PMC.  Apache SkyWalking 对前端进行了监控与跟踪,分别有 Metric, Log, Trace 三部分。本次分享我会介绍页面性能指标的收集与计算,同时用案列进行分析,也会讲解 Log 的采集方法以及 Source Map 错误定位的实施。最后介绍浏览器端 Requets 的跟踪方法。\n通过本次分享,观众可以:\n 了解页面的性能指标以及收集计算方法 了解前端如何做错误日志收集 如何对页面请求进行跟踪以及跟踪的好处  一名普通工程师,该如何正确的理解开源精神?  王晔倞 Yeliang Wang. API7 Partner / Product VP.  开源精神,那也许是一种给于和获取的平衡,有给于才能有获取,有获取才会有给于的动力。无需指责别人只会获取,我们应该懂得开源是一种创造方式,一个没有创造欲和创造力的人加入开源也是无用的。\n通过本次分享,观众可以:\n 为什么国内一些程序员会对开源产生误解? 了解 “开源≠自由≠非商业” 的来龙去脉。 一名普通工程师,如何高效地向开源社区做贡献?  可观测性技术生态和 OpenTelemetry 原理及实践  陈一枭 腾讯. OpenTelemetry docs-cn maintainer、Tencent OpenTelemetry OTeam 创始人  综述云原生可观测性技术生态,介绍 OpenTracing,OpenMetrics,OpenTelemetry 等标准演进。介绍 OpenTelemetry 存在价值意义,介绍 OpenTelemetry 原理及其整体生态规划。介绍腾讯在 OpenTelemetry 方面的实践。\n本次分享内容如下:\n 云原生可观测性技术简介 OpenTelemetry 及其它规范简介 OpenTelemetry 原理 OpenTelemetry 在腾讯的应用及实践  Apache SkyWalking 事件采集系统更快定位故障  柯振旭 Zhenxu Ke,Tetrate SRE, Apache SkyWalking PMC. Apache Incubator PMC. Apache Dubbo committer.  通过本次分享,听众可以:\n 了解 SkyWalking 的事件采集系统; 了解上报事件至 SkyWalking 的多种方式; 学习如何利用 SkyWalking 采集的事件结合 metrics,分析目标系统的性能问题;  可观测性自动注入技术原理探索与实践  詹启新 Tencnet OpenTelemetry Oteam PMC  在可观测领域中自动注入已经成为重要的组成部分之一,其优异简便的使用方式并且可同时覆盖到链路、指标、日志,大大降低了接入成本及运维成本,属于友好的一种接入方式; 本次分享将介绍 Java 中的字节码注入技术原理,及在可观测领域的应用实践\n 常用的自动注入技术原理简介 介绍可观测性在 Java 落地的要点 opentelemetry-java-instrumentation 的核心原理及实现 opentelemetry 自动注入的应用实践  如何利用 Apache APISIX 提升 Nginx 的可观测性  金卫 Wei Jin, API7 Engineer Apache SkyWalking committer. Apache apisix-ingress-controller Founder. Apache APISIX PMC.  在云原生时代,动态和可观测性是 API 网关的标准特性。Apache APISIX 不仅覆盖了 Nginx 的传统功能,在可观测性上也和 SkyWalking 深度合作,大大提升了服务治理能力。本次分享会介绍如何无痛的提升 Nginx 的可观测性和 APISIX 在未来可观测性方面的规划。\n通过本次分享,观众可以:\n 通过 Apache APISIX 实现观测性的几种手段. 了解 Apache APISIX 高效且易用的秘诀. 结合 Apache skywalking 进一步提升可观测性.  ","excerpt":"时间:2021 年 6 月 26 日\n地点:北京市海淀区西格玛大厦 B1 多功能厅\n视频回放:见 Bilibili\nApache SkyWalking Landscape  吴晟 Sheng Wu. …","ref":"/zh/skywalking-day-2021/","title":"[视频] SkyWalking Day 2021 演讲视频"},{"body":"SkyWalking CLI 0.7.0 is released. Go to downloads page to find release tars.\n  Features\n Add GitHub Action for integration of event reporter    Bug Fixes\n Fix metrics top can\u0026rsquo;t infer the scope automatically    Chores\n Upgrade dependency crypto Refactor project to use goapi Move parseScope to pkg Update release doc    ","excerpt":"SkyWalking CLI 0.7.0 is released. Go to downloads page to find release tars.\n  Features\n Add GitHub …","ref":"/events/release-apache-skywalking-cli-0-7-0/","title":"Release Apache SkyWalking CLI 0.7.0"},{"body":"SkyWalking 8.6.0 is released. Go to downloads page to find release tars. Changes by Version\nProject  Add OpenSearch as storage option. Upgrade Kubernetes Java client dependency to 11.0. Fix plugin test script error in macOS.  Java Agent  Add trace_segment_ref_limit_per_span configuration mechanism to avoid OOM. Improve GlobalIdGenerator performance. Add an agent plugin to support elasticsearch7. Add jsonrpc4j agent plugin. new options to support multi skywalking cluster use same kafka cluster(plugin.kafka.namespace) resolve agent has no retries if connect kafka cluster failed when bootstrap Add Seata in the component definition. Seata plugin hosts on Seata project. Extended Kafka plugin to properly trace consumers that have topic partitions directly assigned. Support Kafka consumer 2.8.0. Support print SkyWalking context to logs. Add MessageListener enhancement in pulsar plugin. fix a bug that spring-mvc set an error endpoint name if the controller class annotation implements an interface. Add an optional agent plugin to support mybatis. Add spring-cloud-gateway-3.x optional plugin. Add okhttp-4.x plugin. Fix NPE when thrift field is nested in plugin thrift Fix possible NullPointerException in agent\u0026rsquo;s ES plugin. Fix the conversion problem of float type in ConfigInitializer. Fixed part of the dynamic configuration of ConfigurationDiscoveryService that does not take effect under certain circumstances. Introduce method interceptor API v2 Fix ClassCast issue for RequestHolder/ResponseHolder. fixed jdk-threading-plugin memory leak. Optimize multiple field reflection operation in Feign plugin. Fix trace-ignore-plugin TraceIgnorePathPatterns can\u0026rsquo;t set empty value  OAP-Backend  BugFix: filter invalid Envoy access logs whose socket address is empty. Fix K8s monitoring the incorrect metrics calculate. Loop alarm into event system. Support alarm tags. Support WeLink as a channel of alarm notification. Fix: Some defensive codes didn\u0026rsquo;t work in PercentileFunction combine. CVE: fix Jetty vulnerability. https://nvd.nist.gov/vuln/detail/CVE-2019-17638 Fix: MAL function would miss samples name after creating new samples. perf: use iterator.remove() to remove modulesWithoutProvider Support analyzing Envoy TCP access logs and persist error TCP logs. Fix: Envoy error logs are not persisted when no metrics are generated Fix: Memory leakage of low version etcd client. fix-issue Allow multiple definitions as fallback in metadata-service-mapping.yaml file and k8sServiceNameRule. Fix: NPE when configmap has no data. Fix: Dynamic Configuration key slowTraceSegmentThreshold not work Fix: != is not supported in oal when parameters are numbers. Include events of the entity(s) in the alarm. Support native-json format log in kafka-fetcher-plugin. Fix counter misuse in the alarm core. Alarm can\u0026rsquo;t be triggered in time. Events can be configured as alarm source. Make the number of core worker in meter converter thread pool configurable. Add HTTP implementation of logs reporting protocol. Make metrics exporter still work even when storage layer failed. Fix Jetty HTTP TRACE issue, disable HTTP methods except POST. CVE: upgrade snakeyaml to prevent billion laughs attack in dynamic configuration. polish debug logging avoids null value when the segment ignored.  UI  Add logo for kong plugin. Add apisix logo. Refactor js to ts for browser logs and style change. When creating service groups in the topology, it is better if the service names are sorted. Add tooltip for dashboard component. Fix style of endpoint dependency. Support search and visualize alarms with tags. Fix configurations on dashboard. Support to configure the maximum number of displayed items. After changing the durationTime, the topology shows the originally selected group or service. remove the no use maxItemNum for labeled-value metric, etc. Add Azure Functions logo. Support search Endpoint use keyword params in trace view. Add a function which show the statistics infomation during the trace query. Remove the sort button at the column of Type in the trace statistics page. Optimize the APISIX icon in the topology. Implement metrics templates in the topology. Visualize Events on the alarm page. Update duration steps in graphs for Trace and Log.  Documentation  Polish k8s monitoring otel-collector configuration example. Print SkyWalking context to logs configuration example. Update doc about metrics v2 APIs.  All issues and pull requests are here\n","excerpt":"SkyWalking 8.6.0 is released. Go to downloads page to find release tars. Changes by Version\nProject …","ref":"/events/release-apache-skywalking-apm-8-6-0/","title":"Release Apache SkyWalking APM 8.6.0"},{"body":"Abstract Apache SkyWalking hosts SkyWalkingDay Conference 2021 in June 26th, jointly with Tencent and Tetrate.\nWe are going to share SkyWalking\u0026rsquo;s roadmap, features, product experiences and open source culture.\nWelcome to join us.\nVenue Addr./地址 北京市海淀区西格玛大厦B1多功能厅\nDate June 26th.\nRegistration For Free Register for onsite or online\nSessions 10:00 - 10:20 Apache SkyWalking Landscape  吴晟 Sheng Wu. Tetrate Founding Engineer, Apache Software Foundation board director. SkyWalking founder.  SkyWalking 2020-2021年发展和后续计划\n10:20 - 10:50 微服务可观测性分析平台的探索与实践  凌若川 腾讯高级工程师  可观测性分析平台作为云原生时代微服务系统基础组件,开放性与性能是决定平台价值的核心要素。 复杂微服务应用场景与海量多维链路数据,对可观测性分析平台在开放性设计和各环节高性能实现带来诸多挑战。 本次分享中将重点梳理腾讯云微服务团队在构建云原生可观测性分析平台过程中遇到的挑战,介绍我们在架构设计与实现方面的探索与实践。\n 云原生时代微服务可观测性平台面临的性能与可用性挑战 腾讯云在构建高性能微服务可观测性分析平台的探索与实践 微服务可观测性分析平台架构的下一阶段演进方向展望  10:50 - 11:20 BanyanDB数据模型背后的逻辑  高洪涛 Hongtao Gao. Tetrate SRE, SkyWalking PMC, Apache ShardingSphere PMC.  BanyanDB作为为处理Apache SkyWalking产生的trace,log和metric的数据而特别设计的数据库,其背后数据模型的抉择是非常与众不同的。 在本次分享中,我将根据RUM猜想来讨论为什么BanyanDB使用的数据模型对于APM数据而言是更加高效和可靠的。\n通过本次分享,观众可以:\n 理解数据库设计的取舍 了解BanyanDB的数据模型 认识到该模型对于APM类数据有特定的优势  11:20 - 11:50 Apache SkyWalking 如何做前端监控  范秋霞 Qiuxia Fan,Tetrate FE SRE,SkyWalking PMC.  Apache SkyWalking对前端进行了监控与跟踪,分别有Metric, Log, Trace三部分。本次分享我会介绍页面性能指标的收集与计算,同时用案列进行分析,也会讲解Log的采集方法以及Source Map错误定位的实施。最后介绍浏览器端Requets的跟踪方法。\n通过本次分享,观众可以:\n 了解页面的性能指标以及收集计算方法 了解前端如何做错误日志收集 如何对页面请求进行跟踪以及跟踪的好处  午休 13:30 - 14:00 一名普通工程师,该如何正确的理解开源精神?  王晔倞 Yeliang Wang. API7 Partner / Product VP.  开源精神,那也许是一种给于和获取的平衡,有给于才能有获取,有获取才会有给于的动力。无需指责别人只会获取,我们应该懂得开源是一种创造方式,一个没有创造欲和创造力的人加入开源也是无用的。\n通过本次分享,观众可以:\n 为什么国内一些程序员会对开源产生误解? 了解 “开源≠自由≠非商业” 的来龙去脉。 一名普通工程师,如何高效地向开源社区做贡献?  14:00 - 14:30 可观测性技术生态和OpenTelemetry原理及实践  陈一枭 腾讯. OpenTelemetry docs-cn maintainer、Tencent OpenTelemetry OTeam创始人  综述云原生可观测性技术生态,介绍OpenTracing,OpenMetrics,OpenTelemetry等标准演进。介绍OpenTelemetry存在价值意义,介绍OpenTelemetry原理及其整体生态规划。介绍腾讯在OpenTelemetry方面的实践。\n本次分享内容如下:\n 云原生可观测性技术简介 OpenTelemetry及其它规范简介 OpenTelemetry原理 OpenTelemetry在腾讯的应用及实践  14:30 - 15:10 利用 Apache SkyWalking 事件采集系统更快定位故障  柯振旭 Zhenxu Ke,Tetrate SRE, Apache SkyWalking PMC. Apache Incubator PMC. Apache Dubbo committer.  通过本次分享,听众可以:\n 了解 SkyWalking 的事件采集系统; 了解上报事件至 SkyWalking 的多种方式; 学习如何利用 SkyWalking 采集的事件结合 metrics,分析目标系统的性能问题;  15:10 - 15:30 茶歇 15:30 - 16:00 可观测性自动注入技术原理探索与实践  詹启新 Tencnet OpenTelemetry Oteam PMC  在可观测领域中自动注入已经成为重要的组成部分之一,其优异简便的使用方式并且可同时覆盖到链路、指标、日志,大大降低了接入成本及运维成本,属于友好的一种接入方式; 本次分享将介绍Java中的字节码注入技术原理,及在可观测领域的应用实践\n 常用的自动注入技术原理简介 介绍可观测性在Java落地的要点 opentelemetry-java-instrumentation的核心原理及实现 opentelemetry自动注入的应用实践  16:00 - 16:30 如何利用 Apache APISIX 提升 Nginx 的可观测性  金卫 Wei Jin, API7 Engineer Apache SkyWalking committer. Apache apisix-ingress-controller Founder. Apache APISIX PMC.  在云原生时代,动态和可观测性是 API 网关的标准特性。Apache APISIX 不仅覆盖了 Nginx 的传统功能,在可观测性上也和 SkyWalking 深度合作,大大提升了服务治理能力。本次分享会介绍如何无痛的提升 Nginx 的可观测性和 APISIX 在未来可观测性方面的规划。\n通过本次分享,观众可以:\n 通过 Apache APISIX 实现观测性的几种手段. 了解 Apache APISIX 高效且易用的秘诀. 结合 Apache skywalking 进一步提升可观测性.  16:35 抽奖,结束 Sponsors  Tencent Tetrate SegmentFault 思否  Anti-harassment policy SkyWalkingDay is dedicated to providing a harassment-free experience for everyone. We do not tolerate harassment of participants in any form. Sexual language and imagery will also not be tolerated in any event venue. Participants violating these rules may be sanctioned or expelled without a refund, at the discretion of the event organizers. Our anti-harassment policy can be found at Apache website.\nContact Us Send mail to dev@skywalking.apache.org.\n","excerpt":"Abstract Apache SkyWalking hosts SkyWalkingDay Conference 2021 in June 26th, jointly with Tencent …","ref":"/events/skywalkingday-2021/","title":"SkyWalkingDay Conference 2021, relocating at Beijing"},{"body":"SkyWalking NodeJS 0.3.0 is released. Go to downloads page to find release tars.\n Add ioredis plugin. (#53) Endpoint cold start detection and marking. (#52) Add mysql2 plugin. (#54) Add AzureHttpTriggerPlugin. (#51) Add Node 15 into test matrix. (#45) Segment reference and reporting overhaul. (#50) Add http ignore by method. (#49) Add secure connection option. (#48) BugFix: wrong context during many async spans. (#46) Add Node Mongoose Plugin. (#44)  ","excerpt":"SkyWalking NodeJS 0.3.0 is released. Go to downloads page to find release tars.\n Add ioredis plugin. …","ref":"/events/release-apache-skywalking-nodejs-0-3-0/","title":"Release Apache SkyWalking for NodeJS 0.3.0"},{"body":"SkyWalking Client JS 0.5.1 is released. Go to downloads page to find release tars.\n Add noTraceOrigins option. Fix wrong URL when using relative path. Catch frames errors. Get response.body as a stream with the fetch API. Support reporting multiple logs. Support typescript project.  ","excerpt":"SkyWalking Client JS 0.5.1 is released. Go to downloads page to find release tars.\n Add …","ref":"/events/release-apache-skywalking-client-js-0-5-1/","title":"Release Apache SkyWalking Client JS 0.5.1"},{"body":"SkyWalking Kong Agent 0.1.1 is released. Go to downloads page to find release tars.\n Establish the SkyWalking Kong Agent.  ","excerpt":"SkyWalking Kong Agent 0.1.1 is released. Go to downloads page to find release tars.\n Establish the …","ref":"/events/release-apache-skywalking-kong-0-1-1/","title":"Release Apache SkyWalking Kong 0.1.1"},{"body":"B站视频地址\n","excerpt":"B站视频地址","ref":"/zh/2021-05-09-summer-2021-asf20/","title":"[视频] 大咖说开源 第二季 第4期 | Apache软件基金会20年"},{"body":"We posted our Response to Elastic 2021 License Change blog 4 months ago. It doesn\u0026rsquo;t have a big impact in the short term, but because of the incompatibility between SSPL and Apache 2.0, we lost the chance of upgrading the storage server, which concerns the community and our users. So, we have to keep looking for a new option as a replacement.\nThere was an open source project, Open Distro for Elasticsearch, maintained by the AWS team. It is an Apache 2.0-licensed distribution of Elasticsearch enhanced with enterprise security, alerting, SQL, and more. After Elastic relicensed its projects, we talked with their team, and they have an agenda to take over the community leadship and keep maintaining Elasticsearch, as it was licensed by Apache 2.0. So, they are good to fork and continue.\nOn April 12th, 2021, AWS announced the new project, OpenSearch, driven by the community, which is initialized from people of AWS, Red Hat, SAP, Capital One, and Logz.io. Read this Introducing OpenSearch blog for more detail.\nOnce we had this news in public, we begin to plan the process of evaluating and testing OpenSearch as SkyWalking\u0026rsquo;s storage option. Read our issue.\nToday, we are glad to ANNOUNCE, OpenSearch could replace ElastcSearch as the storage, and it is still licensed under Apache 2.0.\nThis has been merged in the main stream, and you can find it in the dev doc already.\nOpenSearch OpenSearch storage shares the same configurations as Elasticsearch 7. In order to activate Elasticsearch 7 as storage, set storage provider to elasticsearch7. Please download the apache-skywalking-bin-es7.tar.gz if you want to use OpenSearch as storage.\nSkyWalking community will keep our eyes on the OpenSearch project, and look forward to their first GA release.\n NOTE: we have to add a warning NOTICE to the Elasticsearch storage doc:\nNOTICE: Elastic announced through their blog that Elasticsearch will be moving over to a Server Side Public License (SSPL), which is incompatible with Apache License 2.0. This license change is effective from Elasticsearch version 7.11. So please choose the suitable Elasticsearch version according to your usage.\n","excerpt":"We posted our Response to Elastic 2021 License Change blog 4 months ago. It doesn\u0026rsquo;t have a big …","ref":"/blog/2021-05-09-opensearch-supported/","title":"OpenSearch, a new storage option to avoid ElasticSearch's SSPL"},{"body":"Hailin Wang(GitHub ID, hailin0) began his SkyWalking journey since Aug 23rd, 2020.\nHe is very active on the code contributions and brought several important features into the SkyWalking ecosystem.\nHe is on the 33rd of the contributor in the main repository[1], focuses on plugin contributions, and logs ecosystem integration, see his code contributions[2]. And also, he started a new and better way to make other open-source projects integrating with SkyWalking.\nHe used over 2 months to make the SkyWalking agent and its plugins as a part of Apache DolphinScheduler\u0026rsquo;s default binary distribution[3], see this PR[4]. This kind of example has affected further community development. Our PMC member, Yuguang Zhao, is using this way to ship our agent and plugins into the Seata project[5]. With SkyWalking\u0026rsquo;s growing, I would not doubt that this kind of integration would be more.\nThe SkyWalking accepts him as a new committer.\nWelcome Hailin Wang join the committer team.\n[1] https://github.com/apache/skywalking/graphs/contributors [2] https://github.com/apache/skywalking/commits?author=hailin0 [3] https://github.com/apache/dolphinscheduler/tree/1.3.6-prepare/ext/skywalking [4] https://github.com/apache/incubator-dolphinscheduler/pull/4852 [5] https://github.com/seata/seata/pull/3652\n","excerpt":"Hailin Wang(GitHub ID, hailin0) began his SkyWalking journey since Aug 23rd, 2020.\nHe is very active …","ref":"/events/welcome-hailin-wang-as-new-committer/","title":"Welcome Hailin Wang as new committer"},{"body":"SkyWalking LUA Nginx 0.5.0 is released. Go to downloads page to find release tars.\n Adapt to Kong agent. Correct the version format luarock.  ","excerpt":"SkyWalking LUA Nginx 0.5.0 is released. Go to downloads page to find release tars.\n Adapt to Kong …","ref":"/events/release-apache-skywalking-lua-nginx-0.5.0/","title":"Release Apache SkyWalking LUA Nginx 0.5.0"},{"body":"SkyWalking 8.5.0 is released. Go to downloads page to find release tars. Changes by Version\nProject  Incompatible Change. Indices and templates of ElasticSearch(6/7, including zipkin-elasticsearch7) storage option have been changed. Update frontend-maven-plugin to 1.11.0, for Download node x64 binary on Apple Silicon. Add E2E test for VM monitoring that metrics from Prometheus node-exporter. Upgrade lombok to 1.18.16. Add Java agent Dockerfile to build Docker image for Java agent.  Java Agent  Remove invalid mysql configuration in agent.config. Add net.bytebuddy.agent.builder.AgentBuilder.RedefinitionStrategy.Listener to show detail message when redefine errors occur. Fix ClassCastException of log4j gRPC reporter. Fix NPE when Kafka reporter activated. Enhance gRPC log appender to allow layout pattern. Fix apm-dubbo-2.7.x-plugin memory leak due to some Dubbo RpcExceptions. Fix lettuce-5.x-plugin get null host in redis sentinel mode. Fix ClassCastException by making CallbackAdapterInterceptor to implement EnhancedInstance interface in the spring-kafka plugin. Fix NullPointerException with KafkaProducer.send(record). Support config agent.span_limit_per_segment can be changed in the runtime. Collect and report agent starting / shutdown events. Support jedis pipeline in jedis-2.x-plugin. Fix apm-toolkit-log4j-2.x-activation no trace Id in async log. Replace hbase-1.x-plugin with hbase-1.x-2.x-plugin to adapt hbase client 2.x Remove the close_before_method and close_after_method parameters of custom-enhance-plugin to avoid memory leaks. Fix bug that springmvc-annotation-4.x-plugin, witness class does not exist in some versions. Add Redis command parameters to \u0026lsquo;db.statement\u0026rsquo; field on Lettuce span UI for displaying more info. Fix NullPointerException with ReactiveRequestHolder.getHeaders. Fix springmvc reactive api can\u0026rsquo;t collect HTTP statusCode. Fix bug that asynchttpclient plugin does not record the response status code. Fix spanLayer is null in optional plugin(gateway-2.0.x-plugin gateway-2.1.x-plugin). Support @Trace, @Tag and @Tags work for static methods.  OAP-Backend  Allow user-defined JAVA_OPTS in the startup script. Metrics combination API supports abandoning results. Add a new concept \u0026ldquo;Event\u0026rdquo; and its implementations to collect events. Add some defensive codes for NPE and bump up Kubernetes client version to expose exception stack trace. Update the timestamp field type for LogQuery. Support Zabbix protocol to receive agent metrics. Update the Apdex metric combine calculator. Enhance MeterSystem to allow creating metrics with same metricName / function / scope. Storage plugin supports postgresql. Fix kubernetes.client.openapi.ApiException. Remove filename suffix in the meter active file config. Introduce log analysis language (LAL). Fix alarm httpclient connection leak. Add sum function in meter system. Remove Jaeger receiver. Remove the experimental Zipkin span analyzer. Upgrade the Zipkin Elasticsearch storage from 6 to 7. Require Zipkin receiver must work with zipkin-elasticsearch7 storage option. Fix DatabaseSlowStatementBuilder statement maybe null. Remove fields of parent entity in the relation sources. Save Envoy http access logs when error occurs. Fix wrong service_instance_sla setting in the topology-instance.yml. Fix wrong metrics name setting in the self-observability.yml. Add telemetry data about metrics in, metrics scraping, mesh error and trace in metrics to zipkin receiver. Fix tags store of log and trace on h2/mysql/pg storage. Merge indices by Metrics Function and Meter Function in Elasticsearch Storage. Fix receiver don\u0026rsquo;t need to get itself when healthCheck Remove group concept from AvgHistogramFunction. Heatmap(function result) doesn\u0026rsquo;t support labels. Support metrics grouped by scope labelValue in MAL, no need global same labelValue as before. Add functions in MAL to filter metrics according to the metric value. Optimize the self monitoring grafana dashboard. Enhance the export service. Add function retagByK8sMeta and opt type K8sRetagType.Pod2Service in MAL for k8s to relate pods and services. Using \u0026ldquo;service.istio.io/canonical-name\u0026rdquo; to replace \u0026ldquo;app\u0026rdquo; label to resolve Envoy ALS service name. Support k8s monitoring. Make the flushing metrics operation concurrent. Fix ALS K8SServiceRegistry didn\u0026rsquo;t remove the correct entry. Using \u0026ldquo;service.istio.io/canonical-name\u0026rdquo; to replace \u0026ldquo;app\u0026rdquo; label to resolve Envoy ALS service name. Append the root slash(/) to getIndex and getTemplate requests in ES(6 and 7) client. Fix disable statement not working. This bug exists since 8.0.0. Remove the useless metric in vm.yaml.  UI  Update selector scroller to show in all pages. Implement searching logs with date. Add nodejs 14 compiling. Fix trace id by clear search conditions. Search endpoints with keywords. Fix pageSize on logs page. Update echarts version to 5.0.2. Fix instance dependency on the topology page. Fix resolved url for vue-property-decorator. Show instance attributes. Copywriting grammar fix. Fix log pages tags column not updated. Fix the problem that the footer and topology group is shaded when the topology radiation is displayed. When the topology radiation chart is displayed, the corresponding button should be highlighted. Refactor the route mapping, Dynamically import routing components, Improve first page loading performance. Support topology of two mutually calling services. Implement a type of table chart in the dashboard. Support event in the dashboard. Show instance name in the trace view. Fix groups of services in the topography.  Documentation  Polish documentation due to we have covered all tracing, logging, and metrics fields. Adjust documentation about Zipkin receiver. Add backend-infrastructure-monitoring doc.  All issues and pull requests are here\n","excerpt":"SkyWalking 8.5.0 is released. Go to downloads page to find release tars. Changes by Version\nProject …","ref":"/events/release-apache-skywalking-apm-8-5-0/","title":"Release Apache SkyWalking APM 8.5.0"},{"body":"SkyWalking Cloud on Kubernetes 0.3.0 is released. Go to downloads page to find release tars.\n Support special characters in the metric selector of HPA metric adapter. Add the namespace to HPA metric name.  ","excerpt":"SkyWalking Cloud on Kubernetes 0.3.0 is released. Go to downloads page to find release tars. …","ref":"/events/release-apache-skywalking-cloud-on-kubernetes-0-3-0/","title":"Release Apache SkyWalking Cloud on Kubernetes 0.3.0"},{"body":"SkyWalking NodeJS 0.2.0 is released. Go to downloads page to find release tars.\n Add AMQPLib plugin (RabbitMQ). (#34) Add MongoDB plugin. (#33) Add PgPlugin - PosgreSQL. (#31) Add MySQLPlugin to plugins. (#30) Add http protocol of host to http plugins. (#28) Add tag http.method to plugins. (#26) Bugfix: child spans created on immediate cb from op. (#41) Bugfix: async and preparing child entry/exit. (#36) Bugfix: tsc error of dist lib. (#24) Bugfix: AxiosPlugin async() / resync(). (#21) Bugfix: some requests of express / axios are not close correctly. (#20) Express plugin uses http wrap explicitly if http plugin disabled. (#42)  ","excerpt":"SkyWalking NodeJS 0.2.0 is released. Go to downloads page to find release tars.\n Add AMQPLib plugin …","ref":"/events/release-apache-skywalking-nodejs-0-2-0/","title":"Release Apache SkyWalking for NodeJS 0.2.0"},{"body":"SkyWalking Python 0.6.0 is released. Go to downloads page to find release tars.\n Fixes:  Segment data loss when gRPC timing out. (#116) sw_tornado plugin async handler status set correctly. (#115) sw_pymysql error when connection haven\u0026rsquo;t db. (#113)    ","excerpt":"SkyWalking Python 0.6.0 is released. Go to downloads page to find release tars.\n Fixes:  Segment …","ref":"/events/release-apache-skywalking-python-0-6-0/","title":"Release Apache SkyWalking Python 0.6.0"},{"body":"","excerpt":"","ref":"/tags/apm/","title":"APM"},{"body":" Origin: End-User Tracing in a SkyWalking-Observed Browser - The New Stack\n Apache SkyWalking: an APM (application performance monitor) system, especially designed for microservices, cloud native, and container-based (Docker, Kubernetes, Mesos) architectures.\nskywalking-client-js: a lightweight client-side JavaScript exception, performance, and tracing library. It provides metrics and error collection to the SkyWalking backend. It also makes the browser the starting point for distributed tracing.\nBackground Web application performance affects the retention rate of users. If a page load time is too long, the user will give up. So we need to monitor the web application to understand performance and ensure that servers are stable, available and healthy. SkyWalking is an APM tool and the skywalking-client-js extends its monitoring to include the browser, providing performance metrics and error collection to the SkyWalking backend.\nPerformance Metrics The skywalking-client-js uses [window.performance] (https://developer.mozilla.org/en-US/docs/Web/API/Window/performance) for performance data collection. From the MDN doc, the performance interface provides access to performance-related information for the current page. It\u0026rsquo;s part of the High Resolution Time API, but is enhanced by the Performance Timeline API, the Navigation Timing API, the User Timing API, and the Resource Timing API. In skywalking-client-js, all performance metrics are calculated according to the Navigation Timing API defined in the W3C specification. We can get a PerformanceTiming object describing our page using the window.performance.timing property. The PerformanceTiming interface contains properties that offer performance timing information for various events that occur during the loading and use of the current page.\nWe can better understand these attributes when we see them together in the figure below from W3C:\nThe following table contains performance metrics in skywalking-client-js.\n   Metrics Name Describe Calculating Formulae Note     redirectTime Page redirection time redirectEnd - redirectStart If the current document and the document that is redirected to are not from the same origin, set redirectStart, redirectEnd to 0   ttfbTime Time to First Byte responseStart - requestStart According to Google Development   dnsTime Time to DNS query domainLookupEnd - domainLookupStart    tcpTime Time to TCP link connectEnd - connectStart    transTime Time to content transfer responseEnd - responseStart    sslTime Time to SSL secure connection connectEnd - secureConnectionStart Only supports HTTPS   resTime Time to resource loading loadEventStart - domContentLoadedEventEnd Represents a synchronized load resource in pages   fmpTime Time to First Meaningful Paint - Listen for changes in page elements. Traverse each new element, and calculate the total score of these elements. If the element is visible, the score is 1 * weight; if the element is not visible, the score is 0   domAnalysisTime Time to DOM analysis domInteractive - responseEnd    fptTime First Paint Time responseEnd - fetchStart    domReadyTime Time to DOM ready domContentLoadedEventEnd - fetchStart    loadPageTime Page full load time loadEventStart - fetchStart    ttlTime Time to interact domInteractive - fetchStart    firstPackTime Time to first package responseStart - domainLookupStart     Skywalking-client-js collects those performance metrics and sends them to the OAP (Observability Analysis Platform) server , which aggregates data on the back-end side that is then shown in visualizations on the UI side. Users can optimize the page according to these data.\nException Metrics There are five kinds of errors that can be caught in skywalking-client-js:\n The resource loading error is captured by window.addeventlistener ('error ', callback, true) window.onerror catches JS execution errors window.addEventListener('unhandledrejection', callback) is used to catch the promise errors the Vue errors are captured by Vue.config.errorHandler the Ajax errors are captured by addEventListener('error', callback); addEventListener('abort', callback); addEventListener('timeout', callback);  in send callback.  The Skywalking-client-js traces error data to the OAP server, finally visualizing data on the UI side. For an error overview of the App, there are several metrics for basic statistics and trends of errors, including the following metrics.\n App Error Count, the total number of errors in the selected time period. App JS Error Rate, the proportion of PV with JS errors in a selected time period to total PV. All of Apps Error Count, Top N Apps error count ranking. All of Apps JS Error Rate, Top N Apps JS error rate ranking. Error Count of Versions in the Selected App, Top N Error Count of Versions in the Selected App ranking. Error Rate of Versions in the Selected App, Top N JS Error Rate of Versions in the Selected App ranking. Error Count of the Selected App, Top N Error Count of the Selected App ranking. Error Rate of the Selected App, Top N JS Error Rate of the Selected App ranking.  For pages, we use several metrics for basic statistics and trends of errors, including the following metrics:\n Top Unstable Pages / Error Rate, Top N Error Count pages of the Selected version ranking. Top Unstable Pages / Error Count, Top N Error Count pages of the Selected version ranking. Page Error Count Layout, data display of different errors in a period of time.  User Metrics SkyWalking browser monitoring also provides metrics about how the visitors use the monitored websites, such as PV(page views), UV(unique visitors), top N PV(page views), etc.\nIn SPAs (single page applications), the page will be refreshed only once. The traditional method only reports PV once after the page loading, but cannot count the PV of each sub-page, and can\u0026rsquo;t make other types of logs aggregate by sub-page.\nSkyWalking browser monitoring provides two processing methods for SPA pages:\n  Enable SPA automatic parsing. This method is suitable for most single page application scenarios with URL hash as the route. In the initialized configuration item, set enableSPA to true, which will turn on the page\u0026rsquo;s hashchange event listener (trigger re reporting PV), and use URL hash as the page field in other data reporting.\n  Manual reporting. This method can be used in all single page application scenarios. This method can be used if the first method is not usable. The following example provides a set page method to manually update the page name when data is reported. When this method is called, the page PV will be re reported by default:\n  app.on(\u0026#39;routeChange\u0026#39;, function (to) { ClientMonitor.setPerformance({ collector: \u0026#39;http://127.0.0.1:8080\u0026#39;, service: \u0026#39;browser-app\u0026#39;, serviceVersion: \u0026#39;1.0.0\u0026#39;, pagePath: to.path, autoTracePerf: true, enableSPA: true, }); }); Let\u0026rsquo;s take a look at the result found in the following image. It shows the most popular applications and versions, and the changes of PV over a period of time.\nMake the browser the starting point for distributed tracing SkyWalking browser monitoring intercepts HTTP requests to trace segments and spans. It supports tracking these following modes of HTTP requests: XMLHttpRequest and fetch. It also supports tracking libraries and tools based on XMLHttpRequest and fetch - such as Axios, SuperAgent, OpenApi, and so on.\nLet’s see how the SkyWalking browser monitoring intercepts HTTP requests:\nAfter this, use window.addEventListener('xhrReadyStateChange', callback) and set the readyState value tosw8 = xxxx in the request header. At the same time, reporting requests information to the back-end side. Finally, we can view trace data on the trace page. The following graphic is from the trace page:\nTo see how we listen for fetch requests, let’s see the source code of fetch\nAs you can see, it creates a promise and a new XMLHttpRequest object. Because the code of the fetch is built into the browser, it must monitor the code execution first. Therefore, when we add listening events, we can\u0026rsquo;t monitor the code in the fetch. Just after monitoring the code execution, let\u0026rsquo;s rewrite the fetch:\nimport { fetch } from \u0026#39;whatwg-fetch\u0026#39;; window.fetch = fetch; In this way, we can intercept the fetch request through the above method.\nAdditional Resources  End-User Tracing in a SkyWalking-Observed Browser.  ","excerpt":"Origin: End-User Tracing in a SkyWalking-Observed Browser - The New Stack\n Apache SkyWalking: an APM …","ref":"/blog/end-user-tracing-in-a-skywalking-observed-browser/","title":"End-User Tracing in a SkyWalking-Observed Browser"},{"body":"","excerpt":"","ref":"/tags/web-performance/","title":"Web-performance"},{"body":"","excerpt":"","ref":"/tags/design/","title":"Design"},{"body":"","excerpt":"","ref":"/tags/logs/","title":"Logs"},{"body":"SourceMarker is an open-source continuous feedback IDE plugin built on top of Apache SkyWalking, a popular open-source APM system with monitoring, tracing, and diagnosing capabilities for distributed software systems. SkyWalking, a truly holistic system, provides the means for automatically producing, storing, and querying software operation metrics. It requires little to no code changes to implement and is lightweight enough to be used in production. By itself, SkyWalking is a formidable force in the realm of continuous monitoring technology.\nSourceMarker, leveraging the continuous monitoring functionality provided by SkyWalking, creates continuous feedback technology by automatically linking software operation metrics to source code and displaying feedback directly inside of the IDE. While currently only supporting JetBrains-based IDEs and JVM-based programming languages, SourceMarker may be extended to support any number of programming languages and IDEs. Using SourceMarker, software developers can understand and validate software operation inside of their IDE. Instead of charts that indicate the health of the application, software developers can view the health of individual source code components and interpret software operation metrics from a much more familiar perspective. Such capabilities improve productivity as time spent continuously context switching from development to monitoring would be eliminated.\nLogging The benefits of continuous feedback technology are immediately apparent with the ability to view and search logs directly from source code. Instead of tailing log files or viewing logs through the browser, SourceMarker allows software developers to navigate production logs just as easily as they navigate source code. By using the source code as the primary perspective for navigating logs, SourceMarker allows software developers to view logs specific to any package, class, method, or line directly from the context of the source code which resulted in those logs.\nTracing Furthermore, continuous feedback technology offers software developers a deeper understanding of software by explicitly tying the implicit software operation to source code. Instead of visualizing software traces as Gantt charts, SourceMarker allows software developers to step through trace stacks while automatically resolving trace tags and logs. With SourceMarker, software developers can navigate production software traces in much the same way one debugs local applications.\nAlerting Most importantly, continuous feedback technology keeps software developers aware of production software operation. Armed with an APM-powered IDE, every software developer can keep track of the behavior of any method, class, package, and even the entire application itself. Moreover, this allows for source code to be the medium through which production bugs are made evident, thereby creating the feasibility of source code with the ability to self-diagnose and convey its own health.\n Download SourceMarker SourceMarker aims to bridge the theoretical and empirical practices of software development through continuous feedback. The goal is to make developing software with empirical data feel natural and intuitive, creating more complete software developers that understand the entire software development cycle.\n https://github.com/sourceplusplus/sourcemarker  This project is still early in its development, so if you think of any ways to improve SourceMarker, please let us know.\n","excerpt":"SourceMarker is an open-source continuous feedback IDE plugin built on top of Apache SkyWalking, a …","ref":"/blog/2021-03-16-continuous-feedback/","title":"SourceMarker: Continuous Feedback for Developers"},{"body":"SkyWalking LUA Nginx 0.4.1 is released. Go to downloads page to find release tars.\n fix: missing constants in the rockspsec.  ","excerpt":"SkyWalking LUA Nginx 0.4.1 is released. Go to downloads page to find release tars.\n fix: missing …","ref":"/events/release-apache-skywalking-lua-nginx-0.4.1/","title":"Release Apache SkyWalking LUA Nginx 0.4.1"},{"body":"SkyWalking LUA Nginx 0.4.0 is released. Go to downloads page to find release tars.\n Add a global field \u0026lsquo;includeHostInEntrySpan\u0026rsquo;, type \u0026lsquo;boolean\u0026rsquo;, mark the entrySpan include host/domain. Add destroyBackendTimer to stop reporting metrics. Doc: set random seed in init_worker phase. Local cache some variables and reuse them in Lua module. Enable local cache and use tablepool to reuse the temporary table.  ","excerpt":"SkyWalking LUA Nginx 0.4.0 is released. Go to downloads page to find release tars.\n Add a global …","ref":"/events/release-apache-skywalking-lua-nginx-0.4.0/","title":"Release Apache SkyWalking LUA Nginx 0.4.0"},{"body":"SkyWalking Client JS 0.4.0 is released. Go to downloads page to find release tars.\n Update stack and message in logs. Fix wrong URL when using relative path in xhr.  ","excerpt":"SkyWalking Client JS 0.4.0 is released. Go to downloads page to find release tars.\n Update stack and …","ref":"/events/release-apache-skywalking-client-js-0-4-0/","title":"Release Apache SkyWalking Client JS 0.4.0"},{"body":"SkyWalking Satellite 0.1.0 is released. Go to downloads page to find release tars.\nFeatures  Build the Satellite core structure. Add prometheus self telemetry. Add kafka client plugin. Add none-fallbacker plugin. Add timer-fallbacker plugin. Add nativelog-kafka-forwarder plugin. Add memory-queue plugin. Add mmap-queue plugin. Add grpc-nativelog-receiver plugin. Add http-nativelog-receiver plugin. Add grpc-server plugin. Add http-server plugin. Add prometheus-server plugin.  Bug Fixes Issues and PR  All issues are here All and pull requests are here  ","excerpt":"SkyWalking Satellite 0.1.0 is released. Go to downloads page to find release tars.\nFeatures  Build …","ref":"/events/release-apache-skwaylking-satellite-0-1-0/","title":"Release Apache SkyWalking Satellite 0.1.0"},{"body":"Juntao Zhang leads and finished the re-build process of the whole skywalking website. Immigrate to the whole automatic website update, super friendly to users. Within the re-building process, he took several months contributions to bring the document of our main repository to host on the SkyWalking website, which is also available for host documentations of other repositories. We were waiting for this for years.\nJust in the website repository, he has 3800 LOC contributions through 26 commits.\nWe are honored to have him on the PMC team.\n","excerpt":"Juntao Zhang leads and finished the re-build process of the whole skywalking website. Immigrate to …","ref":"/events/welcome-juntao-zhang-to-join-the-pmc/","title":"Welcome Juntao Zhang (张峻滔) to join the PMC"},{"body":" Origin: Observe VM Service Meshes with Apache SkyWalking and the Envoy Access Log Service - The New Stack\n Apache SkyWalking: an APM (application performance monitor) system, especially designed for microservices, cloud native, and container-based (Docker, Kubernetes, Mesos) architectures.\nEnvoy Access Log Service: Access Log Service (ALS) is an Envoy extension that emits detailed access logs of all requests going through Envoy.\nBackground In the previous post, we talked about the observability of service mesh under Kubernetes environment, and applied it to the bookinfo application in practice. We also mentioned that, in order to map the IP addresses into services, SkyWalking needs access to the service metadata from a Kubernetes cluster, which is not available for services deployed in virtual machines (VMs). In this post, we will introduce a new analyzer in SkyWalking that leverages Envoy’s metadata exchange mechanism to decouple with Kubernetes. The analyzer is designed to work in Kubernetes environments, VM environments, and hybrid environments. If there are virtual machines in your service mesh, you might want to try out this new analyzer for better observability, which we will demonstrate in this tutorial.\nHow it works The mechanism of how the analyzer works is the same as what we discussed in the previous post. What makes VMs different from Kubernetes is that, for VM services, there are no places where we can fetch the metadata to map the IP addresses into services.\nThe basic idea we present in this article is to carry the metadata along with Envoy’s access logs, which is called metadata-exchange mechanism in Envoy. When Istio pilot-agent starts an Envoy proxy as a sidecar of a service, it collects the metadata of that service from the Kubernetes platform, or a file on the VM where that service is deployed, and injects the metadata into the bootstrap configuration of Envoy. Envoy will carry the metadata transparently when emitting access logs to the SkyWalking receiver.\nBut how does Envoy compose a piece of a complete access log that involves the client side and server side? When a request goes out from Envoy, a plugin of istio-proxy named \u0026ldquo;metadata-exchange\u0026rdquo; injects the metadata into the http headers (with a prefix like x-envoy-downstream-), and the metadata is propagated to the server side. The Envoy sidecar of the server side receives the request and parses the headers into metadata, and puts the metadata into the access log, keyed by wasm.downstream_peer. The server side Envoy also puts its own metadata into the access log keyed by wasm.upstream_peer. Hence the two sides of a single request are completed.\nWith the metadata-exchange mechanism, we can use the metadata directly without any extra query.\nExample In this tutorial, we will use another demo application Online Boutique that consists of 10+ services so that we can deploy some of them in VMs and make them communicate with other services deployed in Kubernetes.\nTopology of Online Boutique In order to cover as many cases as possible, we will deploy CheckoutService and PaymentService on VM and all the other services on Kubernetes, so that we can cover the cases like Kubernetes → VM (e.g. Frontend → CheckoutService), VM → Kubernetes (e.g. CheckoutService → ShippingService), and VM → VM ( e.g. CheckoutService → PaymentService).\nNOTE: All the commands used in this tutorial are accessible on GitHub.\ngit clone https://github.com/SkyAPMTest/sw-als-vm-demo-scripts cd sw-als-vm-demo-scripts Make sure to init the gcloud SDK properly before moving on. Modify the GCP_PROJECT in file env.sh to your own project name. Most of the other variables should be OK to work if you keep them intact. If you would like to use ISTIO_VERSION \u0026gt;/= 1.8.0, please make sure this patch is included.\n  Prepare Kubernetes cluster and VM instances 00-create-cluster-and-vms.sh creates a new GKE cluster and 2 VM instances that will be used through the entire tutorial, and sets up some necessary firewall rules for them to communicate with each other.\n  Install Istio and SkyWalking 01a-install-istio.sh installs Istio Operator with spec resources/vmintegration.yaml. In the YAML file, we enable the meshExpansion that supports VM in mesh. We also enable the Envoy access log service and specify the address skywalking-oap.istio-system.svc.cluster.local:11800 to which Envoy emits the access logs. 01b-install-skywalking.sh installs Apache SkyWalking and sets the analyzer to mx-mesh.\n  Create files to initialize the VM 02-create-files-to-transfer-to-vm.sh creates necessary files that will be used to initialize the VMs. 03-copy-work-files-to-vm.sh securely transfers the generated files to the VMs with gcloud scp command. Now use ./ssh.sh checkoutservice and ./ssh.sh paymentservice to log into the two VMs respectively, and cd to the ~/work directory, execute ./prep-checkoutservice.sh on checkoutservice VM instance and ./prep-paymentservice.sh on paymentservice VM instance. The Istio sidecar should be installed and started properly. To verify that, use tail -f /var/logs/istio/istio.log to check the Istio logs. The output should be something like:\n2020-12-12T08:07:07.348329Z\tinfo\tsds\tresource:default new connection 2020-12-12T08:07:07.348401Z\tinfo\tsds\tSkipping waiting for gateway secret 2020-12-12T08:07:07.348401Z\tinfo\tsds\tSkipping waiting for gateway secret 2020-12-12T08:07:07.568676Z\tinfo\tcache\tRoot cert has changed, start rotating root cert for SDS clients 2020-12-12T08:07:07.568718Z\tinfo\tcache\tGenerateSecret default 2020-12-12T08:07:07.569398Z\tinfo\tsds\tresource:default pushed key/cert pair to proxy 2020-12-12T08:07:07.949156Z\tinfo\tcache\tLoaded root cert from certificate ROOTCA 2020-12-12T08:07:07.949348Z\tinfo\tsds\tresource:ROOTCA pushed root cert to proxy 2020-12-12T20:12:07.384782Z\tinfo\tsds\tresource:default pushed key/cert pair to proxy 2020-12-12T20:12:07.384832Z\tinfo\tsds\tDynamic push for secret default The dnsmasq configuration address=/.svc.cluster.local/{ISTIO_SERVICE_IP_STUB} also resolves the domain names ended with .svc.cluster.local to Istio service IP, so that you are able to access the Kubernetes services in the VM by fully qualified domain name (FQDN) such as httpbin.default.svc.cluster.local.\n  Deploy demo application Because we want to deploy CheckoutService and PaymentService manually on VM, resources/google-demo.yaml removes the two services from the original YAML . 04a-deploy-demo-app.sh deploys the other services on Kubernetes. Then log into the 2 VMs, run ~/work/deploy-checkoutservice.sh and ~/work/deploy-paymentservice.sh respectively to deploy CheckoutService and PaymentService.\n  Register VMs to Istio Services on VMs can access the services on Kubernetes by FQDN, but that’s not the case when the Kubernetes services want to talk to the VM services. The mesh has no idea where to forward the requests such as checkoutservice.default.svc.cluster.local because checkoutservice is isolated in the VM. Therefore, we need to register the services to the mesh. 04b-register-vm-with-istio.sh registers the VM services to the mesh by creating a \u0026ldquo;dummy\u0026rdquo; service without running Pods, and a WorkloadEntry to bridge the \u0026ldquo;dummy\u0026rdquo; service with the VM service.\n  Done! The demo application contains a load generator service that performs requests repeatedly. We only need to wait a few seconds, and then open the SkyWalking web UI to check the results.\nexport POD_NAME=$(kubectl get pods --namespace istio-system -l \u0026quot;app=skywalking,release=skywalking,component=ui\u0026quot; -o jsonpath=\u0026quot;{.items[0].metadata.name}\u0026quot;) echo \u0026quot;Visit http://127.0.0.1:8080 to use your application\u0026quot; kubectl port-forward $POD_NAME 8080:8080 --namespace istio-system Navigate the browser to http://localhost:8080 . The metrics, topology should be there.\nTroubleshooting If you face any trouble when walking through the steps, here are some common problems and possible solutions:\n  VM service cannot access Kubernetes services? It’s likely the DNS on the VM doesn’t correctly resolve the fully qualified domain names. Try to verify that with nslookup istiod.istio-system.svc.cluster.local. If it doesn’t resolve to the Kubernetes CIDR address, recheck the step in prep-checkoutservice.sh and prep-paymentservice.sh. If the DNS works correctly, try to verify that Envoy has fetched the upstream clusters from the control plane with curl http://localhost:15000/clusters. If it doesn’t contain the target service, recheck prep-checkoutservice.sh.\n  Services are normal but nothing on SkyWalking WebUI? Check the SkyWalking OAP logs via kubectl -n istio-system logs -f $(kubectl get pod -A -l \u0026quot;app=skywalking,release=skywalking,component=oap\u0026quot; -o name) and WebUI logs via kubectl -n istio-system logs -f $(kubectl get pod -A -l \u0026quot;app=skywalking,release=skywalking,component=ui\u0026quot; -o name) to see whether there are any error logs . Also, make sure the time zone at the bottom-right of the browser is set to UTC +0.\n  Additional Resources  Observe a Service Mesh with Envoy ALS.  ","excerpt":"Origin: Observe VM Service Meshes with Apache SkyWalking and the Envoy Access Log Service - The New …","ref":"/blog/obs-service-mesh-vm-with-sw-and-als/","title":"Observe VM Service Meshes with Apache SkyWalking and the Envoy Access Log Service"},{"body":"When using SkyWalking java agent, people usually propagate context easily. They even do not need to change the business code. However, it becomes harder when you want to propagate context between threads when using ThreadPoolExecutor. You can use the RunnableWrapper in the maven artifact org.apache.skywalking:apm-toolkit-trace. This way you must change your code. The developer manager usually don\u0026rsquo;t like this because there may be lots of projects, or lots of runnable code. If they don\u0026rsquo;t use SkyWalking some day, the code added will be superfluous and inelegant.\nIs there a way to propagate context without changing the business code? Yes.\nSkywalking java agent enhances a class by add a field and implement an interface. The ThreadPoolExecutor is a special class that is used widely. We even don\u0026rsquo;t know when and where it is loaded. Most JVMs do not allow changes in the class file format for classes that have been loaded previously. So SkyWalking should not enhance the ThreadPoolExecutor successfully by retransforming when the ThreadPoolExecutor has been loaded. However, we can apply advice to the ThreadPoolExecutor#execute method and wrap the Runnable param using our own agent, then enhance the wrapper class by SkyWalking java agent. An advice do not change the layout of a class.\nNow we should decide how to do this. You can use the RunnableWrapper in the maven artifact org.apache.skywalking:apm-toolkit-trace to wrap the param, but you need to face another problem. This RunnableWrapper has a plugin whose active condition is checking if there is @TraceCrossThread. Agent core uses net.bytebuddy.pool.TypePool.Default.WithLazyResolution.LazyTypeDescription to find the annotations of a class. The LazyTypeDescription finds annotations by using a URLClassLoader with no urls if the classloader is null(bootstrap classloader). So it can not find the @TraceCrossThread class unless you change the LocationStrategy of SkyWalking java agent builder.\nIn this project, I write my own wrapper class, and simply add a plugin with a name match condition. Next, Let me show you how these two agents work together.\n  Move the plugin to the skywalking \u0026ldquo;plugins\u0026rdquo; directory.\n  Add this agent after the SkyWalking agent since the wrapper class should not be loaded before SkyWalking agent instrumentation have finished. For example,\n java -javaagent:/path/to/skywalking-agent.jar -javaagent:/path/to/skywalking-tool-agent-v1.0.0.jar \u0026hellip;\n   When our application runs\n SkyWalking java agent adds a transformer by parsing the plugin for enhancing the wrapper class in the tool agent. The tool agent loads the wrapper class into bootstrap classloader. This triggers the previous transformer. The tool agent applies an advice to the ThreadPoolExecutor class, wrapping the java.lang.Runnable param of \u0026ldquo;execute\u0026rdquo; method with the wrapper class. Now SkyWalking propagates the context with the wrapper class.    Enjoy tracing with ThreadPoolExecutor in SkyWalking!\n","excerpt":"When using SkyWalking java agent, people usually propagate context easily. They even do not need to …","ref":"/blog/2021-02-09-skywalking-trace-threadpool/","title":"Apache SkyWalking: How to propagate context between threads when using ThreadPoolExecutor"},{"body":"","excerpt":"","ref":"/tags/java/","title":"Java"},{"body":"SkyWalking CLI 0.6.0 is released. Go to downloads page to find release tars.\n  Features\n Support authorization when connecting to the OAP Add install command and manifest sub-command Add event command and report sub-command    Bug Fixes\n Fix the bug that can\u0026rsquo;t query JVM instance metrics    Chores\n Set up a simple test with GitHub Actions Reorganize the project layout Update year in NOTICE Add missing license of swck Use license-eye to check license header    ","excerpt":"SkyWalking CLI 0.6.0 is released. Go to downloads page to find release tars.\n  Features\n Support …","ref":"/events/release-apache-skywalking-cli-0-6-0/","title":"Release Apache SkyWalking CLI 0.6.0"},{"body":"","excerpt":"","ref":"/tags/infrastructure-monitoring/","title":"Infrastructure Monitoring"},{"body":" Origin: Tetrate.io blog\n Background Apache SkyWalking\u0026ndash; the APM tool for distributed systems\u0026ndash; has historically focused on providing observability around tracing and metrics, but service performance is often affected by the host. The newest release, SkyWalking 8.4.0, introduces a new feature for monitoring virtual machines. Users can easily detect possible problems from the dashboard\u0026ndash; for example, when CPU usage is overloaded, when there’s not enough memory or disk space, or when the network status is unhealthy, etc.\nHow it works SkyWalking leverages Prometheus and OpenTelemetry for collecting metrics data as we did for Istio control panel metrics; Prometheus is mature and widely used, and we expect to see increased adoption of the new CNCF project, OpenTelemetry. The SkyWalking OAP Server receives these metrics data of OpenCensus format from OpenTelemetry. The process is as follows:\n Prometheus Node Exporter collects metrics data from the VMs. OpenTelemetry Collector fetches metrics from Node Exporters via Prometheus Receiver, and pushes metrics to SkyWalking OAP Server via the OpenCensus GRPC Exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results. The expression rules are in /config/otel-oc-rules/vm.yaml. We can now see the data on the SkyWalking WebUI dashboard.  What to monitor SkyWalking provides default monitoring metrics including:\n CPU Usage (%) Memory RAM Usage (MB) Memory Swap Usage (MB) CPU Average Used CPU Load Memory RAM (total/available/used MB) Memory Swap (total/free MB) File System Mount point Usage (%) Disk R/W (KB/s) Network Bandwidth Usage (receive/transmit KB/s) Network Status (tcp_curr_estab/tcp_tw/tcp_alloc/sockets_used/udp_inuse) File fd Allocated  The following is how it looks when we monitor Linux:\nHow to use To enable this feature, we need to install Prometheus Node Exporter and OpenTelemetry Collector and activate the VM monitoring rules in SkyWalking OAP Server.\nInstall Prometheus Node Exporter wget https://github.com/prometheus/node_exporter/releases/download/v1.0.1/node_exporter-1.0.1.linux-amd64.tar.gz tar xvfz node_exporter-1.0.1.linux-amd64.tar.gz cd node_exporter-1.0.1.linux-amd64 ./node_exporter In linux Node Exporter exposes metrics on port 9100 by default. When it is running, we can get the metrics from the /metrics endpoint. Use a web browser or command curl to verify.\ncurl http://localhost:9100/metrics We should see all the metrics from the output like:\n# HELP go_gc_duration_seconds A summary of the pause duration of garbage collection cycles. # TYPE go_gc_duration_seconds summary go_gc_duration_seconds{quantile=\u0026#34;0\u0026#34;} 7.7777e-05 go_gc_duration_seconds{quantile=\u0026#34;0.25\u0026#34;} 0.000113756 go_gc_duration_seconds{quantile=\u0026#34;0.5\u0026#34;} 0.000127199 go_gc_duration_seconds{quantile=\u0026#34;0.75\u0026#34;} 0.000147778 go_gc_duration_seconds{quantile=\u0026#34;1\u0026#34;} 0.000371894 go_gc_duration_seconds_sum 0.292994058 go_gc_duration_seconds_count 2029 ... Note: We only need to install Node Exporter, rather than Prometheus server. If you want to get more information about Prometheus Node Exporter see: https://prometheus.io/docs/guides/node-exporter/\nInstall OpenTelemetry Collector We can quickly install a OpenTelemetry Collector instance by using docker-compose with the following steps:\n Create a directory to store the configuration files, like /usr/local/otel. Create docker-compose.yaml and otel-collector-config.yaml in this directory represented below:  docker-compose.yaml\nversion:\u0026#34;2\u0026#34;services:# Collectorotel-collector:# Specify the image to start the container fromimage:otel/opentelemetry-collector:0.19.0# Set the otel-collector configfile command:[\u0026#34;--config=/etc/otel-collector-config.yaml\u0026#34;]# Mapping the configfile to host directoryvolumes:- ./otel-collector-config.yaml:/etc/otel-collector-config.yamlports:- \u0026#34;13133:13133\u0026#34;# health_check extension- \u0026#34;55678\u0026#34;# OpenCensus receiverotel-collector-config.yaml\nextensions:health_check:# A receiver is how data gets into the OpenTelemetry Collectorreceivers:# Set Prometheus Receiver to collects metrics from targets# It’s supports the full set of Prometheus configurationprometheus:config:scrape_configs:- job_name:\u0026#39;otel-collector\u0026#39;scrape_interval:10sstatic_configs:# Replace the IP to your VMs‘s IP which has installed Node Exporter- targets:[\u0026#39;vm1:9100\u0026#39;]- targets:[\u0026#39;vm2:9100\u0026#39;]- targets:[‘vm3:9100\u0026#39; ]processors:batch:# An exporter is how data gets sent to different systems/back-endsexporters:# Exports metrics via gRPC using OpenCensus formatopencensus:endpoint:\u0026#34;docker.for.mac.host.internal:11800\u0026#34;# The OAP Server addressinsecure:truelogging:logLevel:debugservice:pipelines:metrics:receivers:[prometheus]processors:[batch]exporters:[logging, opencensus]extensions:[health_check]In this directory use command docker-compose to start up the container:  docker-compose up -d After the container is up and running, you should see metrics already exported in the logs:\n... Metric #165 Descriptor: -\u0026gt; Name: node_network_receive_compressed_total -\u0026gt; Description: Network device statistic receive_compressed. -\u0026gt; Unit: -\u0026gt; DataType: DoubleSum -\u0026gt; IsMonotonic: true -\u0026gt; AggregationTemporality: AGGREGATION_TEMPORALITY_CUMULATIVE DoubleDataPoints #0 Data point labels: -\u0026gt; device: ens4 StartTime: 1612234754364000000 Timestamp: 1612235563448000000 Value: 0.000000 DoubleDataPoints #1 Data point labels: -\u0026gt; device: lo StartTime: 1612234754364000000 Timestamp: 1612235563448000000 Value: 0.000000 ... If you want to get more information about OpenTelemetry Collector see: https://opentelemetry.io/docs/collector/\nSet up SkyWalking OAP Server To activate the oc handler and vm relevant rules, set your environment variables:\nSW_OTEL_RECEIVER=default SW_OTEL_RECEIVER_ENABLED_OC_RULES=vm Note: If there are other rules already activated , you can add vm with use , as a separator.\nSW_OTEL_RECEIVER_ENABLED_OC_RULES=vm,oap Start the SkyWalking OAP Server.\nDone! After all of the above steps are completed, check out the SkyWalking WebUI. Dashboard VM provides the default metrics of all observed virtual machines. Note: Clear the browser local cache if you used it to access deployments of previous SkyWalking versions.\nAdditional Resources  Read more about the SkyWalking 8.4 release highlights. Get more SkyWalking updates on Twitter.  ","excerpt":"Origin: Tetrate.io blog\n Background Apache SkyWalking\u0026ndash; the APM tool for distributed …","ref":"/blog/2021-02-07-infrastructure-monitoring/","title":"SkyWalking 8.4 provides infrastructure monitoring"},{"body":" Origin: Tetrate.io blog\n The Apache SkyWalking team today announced the 8.4 release is generally available. This release fills the gap between all previous versions of SkyWalking and the logging domain area. The release also advances SkyWalking’s capabilities for infrastructure observability, starting with virtual machine monitoring.\nBackground SkyWalking has historically focused on the tracing and metrics fields of observability. As its features for tracing, metrics and service level monitoring have become more and more powerful and stable, the SkyWalking team has started to explore new scenarios covered by observability. Because service performance is reflected in the logs, and is highly impacted by the infrastructure on which it runs, SkyWalking brings these two fields into the 8.4 release. This release blog briefly introduces the two new features as well as some other notable changes.\nLogs Metrics, tracing, and logging are considered the three pillars of observability [1]. SkyWalking had the full features of metrics and tracing prior to 8.4; today, as 8.4 is released, the last piece of the jigsaw is now in place.\nFigure 1: Logs Collected By SkyWalking\nFigure 2: Logs Collected By SkyWalking\nThe Java agent firstly provides SDKs to enhance the widely-used logging frameworks, log4j (1.x and 2.x) [2] and logback [3], and send the logs to the SkyWalking backend (OAP). The latter is able to collect logs from wherever the protocol is implemented. This is not a big deal, but when it comes to the correlation between logs and traces, the traditional solution is to print the trace IDs in the logs, and pick the IDs in the error logs to query the related traces. SkyWalking just simplifies the workflow by correlating the logs and traces natively. Navigating between traces and their related logs is as simple as clicking a button.\nFigure 3: Correlation Between Logs and Traces\nInfrastructure Monitoring SkyWalking is known as an application performance monitoring tool. One of the most important factors that impacts the application’s performance is the infrastructure on which the application runs. In the 8.4 release, we added the monitoring metrics of virtual machines into the dashboard.\nFigure 4: VM Metrics\nFundamental metrics such as CPU Used, Memory Used, Disk Read / Write and Network Usage are available on the dashboard. And as usual, those metrics are also available to be configured as alarm triggers when needed.\nDynamic Configurations at Agent Side Dynamic configuration at the backend side has long existed in SkyWalking for several versions. Now, it finally comes to the agent side! Prior to 8.4, you’d have to restart the target services when you modify some configuration items of the agent \u0026ndash; for instance, sampling rate (agent side), ignorable endpoint paths, etc. Now, say goodbye to rebooting. Modifying configurations is not the only usage of the dynamic configuration mechanism. The latter gives countless possibilities to the agent side in terms of dynamic behaviours, e.g. enabling / disabling plugins, enabling / disabling the whole agent, etc. Just imagine!\nGrouped Service Topology This enhancement is from the UI. SkyWalking backend supports grouping the services by user-defined dimensions. In a real world use case, the services are usually grouped by business group or department. When a developer opens the topology map, out of hundreds of services, he or she may just want to focus on the services in charge. The grouped service topology comes to the rescue: one can now choose to display only services belonging to a specified group.\nFigure 5: Grouped Service Topology\nOther Notable Enhancements  Agent: resolves domain names to look up backend service IP addresses. Backend: meter receiver supports meter analysis language (MAL). Backend: several CVE fixes. Backend: supports Envoy {AccessLog,Metrics}Service API V3 and adopts MAL.  Links  [1] https://peter.bourgon.org/blog/2017/02/21/metrics-tracing-and-logging.html [2] https://logging.apache.org/log4j/2.x/ [3] http://logback.qos.ch  Additional Resources  Read more about the SkyWalking 8.4 release highlights. Get more SkyWalking updates on Twitter.  ","excerpt":"Origin: Tetrate.io blog\n The Apache SkyWalking team today announced the 8.4 release is generally …","ref":"/blog/skywalking8-4-release/","title":"Apache SkyWalking 8.4: Logs, VM Monitoring, and Dynamic Configurations at Agent Side"},{"body":"","excerpt":"","ref":"/tags/release-blog/","title":"Release Blog"},{"body":"SkyWalking 8.4.0 is released. Go to downloads page to find release tars. Changes by Version\nProject  Incompatible with previous releases when use H2/MySQL/TiDB storage options, due to support multiple alarm rules triggered for one entity. Chore: adapt create_source_release.sh to make it runnable on Linux. Add package to .proto files, prevent polluting top-level namespace in some languages; The OAP server supports previous agent releases, whereas the previous OAP server (\u0026lt;=8.3.0) won\u0026rsquo;t recognize newer agents since this version (\u0026gt;= 8.4.0). Add ElasticSearch 7.10 to test matrix and verify it works. Replace Apache RAT with skywalking-eyes to check license headers. Set up test of Envoy ALS / MetricsService under Istio 1.8.2 to verify Envoy V3 protocol Test: fix flaky E2E test of Kafka.  Java Agent  The operation name of quartz-scheduler plugin, has been changed as the quartz-scheduler/${className} format. Fix jdk-http and okhttp-3.x plugin did not overwrite the old trace header. Add interceptors of method(analyze, searchScroll, clearScroll, searchTemplate and deleteByQuery) for elasticsearch-6.x-plugin. Fix the unexpected RunningContext recreation in the Tomcat plugin. Fix the potential NPE when trace_sql_parameters is enabled. Update byte-buddy to 1.10.19. Fix thrift plugin trace link broken when intermediate service does not mount agent Fix thrift plugin collects wrong args when the method without parameter. Fix DataCarrier\u0026rsquo;s org.apache.skywalking.apm.commons.datacarrier.buffer.Buffer implementation isn\u0026rsquo;t activated in IF_POSSIBLE mode. Fix ArrayBlockingQueueBuffer\u0026rsquo;s useless IF_POSSIBLE mode list Support building gRPC TLS channel but CA file is not required. Add witness method mechanism in the agent plugin core. Add Dolphinscheduler plugin definition. Make sampling still works when the trace ignores plug-in activation. Fix mssql-plugin occur ClassCastException when call the method of return generate key. The operation name of dubbo and dubbo-2.7.x-plugin, has been changed as the groupValue/className.methodName format Fix bug that rocketmq-plugin set the wrong tag. Fix duplicated EnhancedInstance interface added. Fix thread leaks caused by the elasticsearch-6.x-plugin plugin. Support reading segmentId and spanId with toolkit. Fix RestTemplate plugin recording url tag with wrong port Support collecting logs and forwarding through gRPC. Support config agent.sample_n_per_3_secs can be changed in the runtime. Support config agent.ignore_suffix can be changed in the runtime. Support DNS periodic resolving mechanism to update backend service. Support config agent.trace.ignore_path can be changed in the runtime. Added support for transmitting logback 1.x and log4j 2.x formatted \u0026amp; un-formatted messages via gPRC  OAP-Backend  Make meter receiver support MAL. Support influxDB connection response format option. Fix some error when use JSON as influxDB response format. Support Kafka MirrorMaker 2.0 to replicate topics between Kafka clusters. Add the rule name field to alarm record storage entity as a part of ID, to support multiple alarm rules triggered for one entity. The scope id has been removed from the ID. Fix MAL concurrent execution issues. Fix group name can\u0026rsquo;t be queried in the GraphQL. Fix potential gRPC connection leak(not closed) for the channels among OAP instances. Filter OAP instances(unassigned in booting stage) of the empty IP in KubernetesCoordinator. Add component ID for Python aiohttp plugin requester and server. Fix H2 in-memory database table missing issues Add component ID for Python pyramid plugin server. Add component ID for NodeJS Axios plugin. Fix searchService method error in storage-influxdb-plugin. Add JavaScript component ID. Fix CVE of UninstrumentedGateways in Dynamic Configuration activation. Improve query performance in storage-influxdb-plugin. Fix the uuid field in GRPCConfigWatcherRegister is not updated. Support Envoy {AccessLog,Metrics}Service API V3. Adopt the MAL in Envoy metrics service analyzer. Fix the priority setting doesn\u0026rsquo;t work of the ALS analyzers. Fix bug that endpoint-name-grouping.yml is not customizable in Dockerized case. Fix bug that istio version metric type on UI template mismatches the otel rule. Improve ReadWriteSafeCache concurrency read-write performance Fix bug that if use JSON as InfluxDB.ResponseFormat then NumberFormatException maybe occur. Fix timeBucket not taking effect in EqualsAndHashCode annotation of some relationship metrics. Fix SharingServerConfig\u0026rsquo;s propertie is not correct in the application.yml, contextPath -\u0026gt; restConnextPath. Istio control plane: remove redundant metrics and polish panel layout. Fix bug endpoint name grouping not work due to setting service name and endpoint name out of order. Fix receiver analysis error count metrics. Log collecting and query implementation. Support Alarm to feishu. Add the implementation of ConfigurationDiscovery on the OAP side. Fix bug in parseInternalErrorCode where some error codes are never reached. OAL supports multiple values when as numeric. Add node information from the Openensus proto to the labels of the samples, to support the identification of the source of the Metric data. Fix bug that the same sample name in one MAL expression caused IllegalArgumentException in Analyzer.analyse. Add the text analyzer for querying log in the es storage. Chore: Remove duplicate codes in Envoy ALS handler. Remove the strict rule of OAL disable statement parameter. Fix a legal metric query adoption bug. Don\u0026rsquo;t support global level metric query. Add VM MAL and ui-template configration, support Prometheus node-exporter VM metrics that pushed from OpenTelemetry-collector. Remove unused log query parameters.  UI  Fix un-removed tags in trace query. Fix unexpected metrics name on single value component. Don\u0026rsquo;t allow negative value as the refresh period. Fix style issue in trace table view. Separation Log and Dashboard selector data to avoid conflicts. Fix trace instance selector bug. Fix Unnecessary sidebar in tooltips for charts. Refactor dashboard query in a common script. Implement refreshing data for topology by updating date. Implement group selector in the topology. Fix all as default parameter for services selector. Add icon for Python aiohttp plugin. Add icon for Python pyramid plugin. Fix topology render all services nodes when groups changed. Fix rk-footer utc input\u0026rsquo;s width. Update rk-icon and rewrite rk-header svg tags with rk-icon. Add icon for http type. Fix rk-footer utc without local storage. Sort group names in the topology. Add logo for Dolphinscheduler. Fix dashboard wrong instance. Add a legend for the topology. Update the condition of unhealthy cube. Fix: use icons to replace buttons for task list in profile. Fix: support = in the tag value in the trace query page. Add envoy proxy component logo. Chore: set up license-eye to check license headers and add missing license headers. Fix prop for instances-survey and endpoints-survey. Fix envoy icon in topology. Implement the service logs on UI. Change the flask icon to light version for a better view of topology dark theme. Implement viewing logs on trace page. Fix update props of date component. Fix query conditions for logs. Fix style of selectors to word wrap. Fix logs time. Fix search ui for logs.  Documentation  Update the documents of backend fetcher and self observability about the latest configurations. Add documents about the group name of service. Update docs about the latest UI. Update the document of backend trace sampling with the latest configuration. Update kafka plugin support version to 2.6.1. Add FAQ about Fix compiling on Mac M1 chip.  All issues and pull requests are here\n","excerpt":"SkyWalking 8.4.0 is released. Go to downloads page to find release tars. Changes by Version\nProject …","ref":"/events/release-apache-skywalking-apm-8-4-0/","title":"Release Apache SkyWalking APM 8.4.0"},{"body":"Background The verifier is an important part of the next generation End-to-End Testing framework (NGE2E), which is responsible for verifying whether the actual output satisfies the expected template.\nDesign Thinking We will implement the verifier with Go template, plus some enhancements. Firstly, users need to write a Go template file with provided functions and actions to describe how the expected data looks like. Then the verifer renders the template with the actual data object. Finally, the verifier compares the rendered output with the actual data. If the rendered output is not the same with the actual output, it means the actual data is inconsist with the expected data. Otherwise, it means the actual data match the expected data. On failure, the verifier will also print out what are different between expected and actual data.\nBranches / Actions The verifier inherits all the actions from the standard Go template, such as if, with, range, etc. In addition, we also provide some custom actions to satisfy our own needs.\nList Elements Match contains checks if the actual list contains elements that match the given template.\nExamples:\nmetrics:{{- contains .metrics }}- name:{{notEmpty .name }}id:{{notEmpty .id }}value:{{gt .value 0 }}{{- end }}It means that the list metrics must contain an element whose name and id are not empty, and value is greater than 0.\nmetrics:{{- contains .metrics }}- name:p95value:{{gt .value 0 }}- name:p99value:{{gt .value 0 }}{{- end }}This means that the list metrics must contain an element named p95 with a value greater than 0, and an element named p95 with a value greater than 0. Besides the two element, the list metrics may or may not have other random elements.\nFunctions Users can use these provided functions in the template to describe the expected data.\nNot Empty notEmpty checks if the string s is empty.\nExample:\nid:{{notEmpty .id }}Regexp match regexp checks if string s matches the regular expression pattern.\nExamples:\nlabel:{{regexp .label \u0026#34;ratings.*\u0026#34; }}Base64 b64enc s returns the Base64 encoded string of s.\nExamples:\nid:{{b64enc \u0026#34;User\u0026#34; }}.static-suffix# this evalutes the base64 encoded string of \u0026#34;User\u0026#34;, concatenated with a static suffix \u0026#34;.static-suffix\u0026#34;Result:\nid:VXNlcg==.static-suffixFull Example Here is an example of expected data:\n# expected.data.yamlnodes:- id:{{b64enc \u0026#34;User\u0026#34; }}.0name:Usertype:USERisReal:false- id:{{b64enc \u0026#34;Your_ApplicationName\u0026#34; }}.1name:Your_ApplicationNametype:TomcatisReal:true- id:{{$h2ID := (index .nodes 2).id }}{{ notEmpty $h2ID }}# We assert that nodes[2].id is not empty and save it to variable `h2ID` for later usename:localhost:-1type:H2isReal:falsecalls:- id:{{notEmpty (index .calls 0).id }}source:{{b64enc \u0026#34;Your_ApplicationName\u0026#34; }}.1target:{{$h2ID }}# We use the previously assigned variable `h2Id` to asert that the `target` is equal to the `id` of the nodes[2]detectPoints:- CLIENT- id:{{b64enc \u0026#34;User\u0026#34; }}.0-{{ b64enc \u0026#34;Your_ApplicationName\u0026#34; }}.1source:{{b64enc \u0026#34;User\u0026#34; }}.0target:{{b64enc \u0026#34;Your_ApplicationName\u0026#34; }}.1detectPoints:- SERVERwill validate this data:\n# actual.data.yamlnodes:- id:VXNlcg==.0name:Usertype:USERisReal:false- id:WW91cl9BcHBsaWNhdGlvbk5hbWU=.1name:Your_ApplicationNametype:TomcatisReal:true- id:bG9jYWxob3N0Oi0x.0name:localhost:-1type:H2isReal:falsecalls:- id:WW91cl9BcHBsaWNhdGlvbk5hbWU=.1-bG9jYWxob3N0Oi0x.0source:WW91cl9BcHBsaWNhdGlvbk5hbWU=.1detectPoints:- CLIENTtarget:bG9jYWxob3N0Oi0x.0- id:VXNlcg==.0-WW91cl9BcHBsaWNhdGlvbk5hbWU=.1source:VXNlcg==.0detectPoints:- SERVERtarget:WW91cl9BcHBsaWNhdGlvbk5hbWU=.1# expected.data.yamlmetrics:{{- contains .metrics }}- name:{{notEmpty .name }}id:{{notEmpty .id }}value:{{gt .value 0 }}{{- end }}will validate this data:\n# actual.data.yamlmetrics:- name:business-zone::projectAid:YnVzaW5lc3Mtem9uZTo6cHJvamVjdEE=.1value:1- name:system::load balancer1id:c3lzdGVtOjpsb2FkIGJhbGFuY2VyMQ==.1value:0- name:system::load balancer2id:c3lzdGVtOjpsb2FkIGJhbGFuY2VyMg==.1value:0and will report an error when validating this data, because there is no element with a value greater than 0:\n# actual.data.yamlmetrics:- name:business-zone::projectAid:YnVzaW5lc3Mtem9uZTo6cHJvamVjdEE=.1value:0- name:system::load balancer1id:c3lzdGVtOjpsb2FkIGJhbGFuY2VyMQ==.1value:0- name:system::load balancer2id:c3lzdGVtOjpsb2FkIGJhbGFuY2VyMg==.1value:0The contains does an unordered list verification, in order to do list verifications including orders, you can simply use the basic ruls like this:\n# expected.data.yamlmetrics:- name:p99value:{{gt (index .metrics 0).value 0 }}- name:p95value:{{gt (index .metrics 1).value 0 }}which expects the actual metrics list to be exactly ordered, with first element named p99 and value greater 0, second element named p95 and value greater 0.\n","excerpt":"Background The verifier is an important part of the next generation End-to-End Testing framework …","ref":"/blog/2021-02-01-e2e-verifier-design/","title":"[Design] The Verifier of NGE2E"},{"body":"","excerpt":"","ref":"/tags/testing/","title":"Testing"},{"body":"SkyWalking Cloud on Kubernetes 0.2.0 is released. Go to downloads page to find release tars.\n Introduce custom metrics adapter to SkyWalking OAP cluster for Kubernetes HPA autoscaling. Add RBAC files and service account to support Kubernetes coordination. Add default and validation webhooks to operator controllers. Add UI CRD to deploy skywalking UI server. Add Fetcher CRD to fetch metrics from other telemetry system, for example, Prometheus.  ","excerpt":"SkyWalking Cloud on Kubernetes 0.2.0 is released. Go to downloads page to find release tars. …","ref":"/events/release-apache-skywalking-cloud-on-kubernetes-0-2-0/","title":"Release Apache SkyWalking Cloud on Kubernetes 0.2.0"},{"body":"Apache SkyWalking is an open source APM for distributed system, Apache Software Foundation top-level project.\nAt Jan. 11th, 2021, we noticed the Tencent Cloud Service, Tencent Service Watcher - TSW, for first time. Due to the similar short name, which SkyWalking is also called SW in the community, we connected with the service team of Tencent Cloud, and kindly asked.\nThey used to replay, TSW is purely developed by Tencent team itself, which doesn\u0026rsquo;t have any code dependency on SkyWalking.. We didn\u0026rsquo;t push harder.\nBut one week later, Jan 18th, 2021, our V.P., Sheng got the report again from Haoyang SkyWalking PMC member, through WeChat DM(direct message),. He provided complete evidence to prove TSW actually re-distributed the SkyWalking\u0026rsquo;s Java agent. We keep one copy of their agent\u0026rsquo;s distribution(at Jan. 18th), you could be downloaded here.\nSome typically evidences are here\n  ServiceManager is copied and package-name changed in the TSW\u0026rsquo;s agent.   ContextManager is copied and ackage-name changed in the TSW\u0026rsquo;s agent.   At the same time, we checked their tsw-client-package.zip, it didn\u0026rsquo;t include the SkyWalking\u0026rsquo;s LICENSE and NOTICE. Also, they didn\u0026rsquo;t mention TSW agent is the re-ditribution SkyWalking on their website.\nWith all above information, we had enough reason to believe, from the tech perspective, they were violating the Apache 2.0 License.\nFrom the 18th Jan., 2021, we sent mail [Apache 2.0 License Violation] Tencent Cloud TSW service doesn't follow the Apache 2.0 License to brief the SkyWalking PMC, and took the following actions to connect with Tencent.\n Made direct call to Tencent Open Source Office. Connected with Tencent Cloud TVP program committee, as Sheng Wu(Our VP) is a Tencent Cloud TVP. Talked with the Tencent Cloud team lead.  In all above channels, we provided the evidences of copy-redistribution hebaviors, requested them to revaluate their statements on the website, and follow the License\u0026rsquo;s requirements.\nResolution At Jan. 19th night, UTC+8, 2021. We received response from the Tencent cloud team. They admited their violation behaviors, and did following changes\n  Tencent Cloud TSW service page states, the agent is the fork version(re-distribution) of Apache SkyWalking agent.   TSW agent distributions include the SkyWalking\u0026rsquo;s License and NOTICE. Below is the screenshot, you could download from their product page. We keep a copy of their Jan. 19th 2021 at here.   We have updated the status to the PMC mail list. This license violation issue has been resolved for now.\nThe SkyWalking community and program management committee will keep our eyes on Tencent TSW. ","excerpt":"Apache SkyWalking is an open source APM for distributed system, Apache Software Foundation top-level …","ref":"/blog/2021-01-23-tencent-cloud-violates-aplv2/","title":"[Resolved][License Issue] Tencent Cloud TSW service violates the Apache 2.0 License when using SkyWalking."},{"body":" 第一节:开篇介绍 第二节:数字游戏(Number Game) 第三节:社区原则(Community “Principles”) 第四节:基金会原则(For public good) 第五节:一些不太好的事情  B站视频地址\n","excerpt":"第一节:开篇介绍 第二节:数字游戏(Number Game) 第三节:社区原则(Community “Principles”) 第四节:基金会原则(For public good) 第五节:一些不太好 …","ref":"/zh/2021-01-21-educate-community/","title":"[视频] 开放原子开源基金会2020年度峰会 - Educate community Over Support community"},{"body":"Elastic announced their license change, Upcoming licensing changes to Elasticsearch and Kibana.\n We are moving our Apache 2.0-licensed source code in Elasticsearch and Kibana to be dual licensed under Server Side Public License (SSPL) and the Elastic License, giving users the choice of which license to apply. This license change ensures our community and customers have free and open access to use, modify, redistribute, and collaborate on the code. It also protects our continued investment in developing products that we distribute for free and in the open by restricting cloud service providers from offering Elasticsearch and Kibana as a service without contributing back. This will apply to all maintained branches of these two products and will take place before our upcoming 7.11 release. Our releases will continue to be under the Elastic License as they have been for the last three years.\n Also, they provide the FAQ page for more information about the impact for the users, developers, and vendors.\nIn the perspective of Apache Software Foundation, SSPL has been confirmed as a Catalog X LICENSE(https://www.apache.org/legal/resolved.html#category-x), which means hard-dependency as a part of the core is not allowed. With that, we can\u0026rsquo;t only focus on it anymore. We need to consider other storage options. Right now, we still have InfluxDB, TiDB, H2 server still in Apache 2.0 licensed. Right now, we still have InfluxDB, TiDB, H2 server as storage options still in Apache 2.0 licensed.\nAs one optional plugin, we need to focus on the client driver license. Right now, we are only using ElasticSearch 7.5.0 and 6.3.2 drivers, which are both Apache 2.0 licensed. So, we are safe. For further upgrade, here is their announcement. They answer these typical cases in the FAQ page.\n  I build a SaaS application using Elasticsearch as the backend, how does this affect me?\n This source code license change should not affect you - you can use our default distribution or develop applications on top of it for free, under the Elastic License. This source-available license does not contain any copyleft provisions and the default functionality is free of charge. For a specific example, you can see our response to a question around this at Magento.\nOur users still could use, redistribute, sale the products/services, based on SkyWalking, even they are using self hosting Elastic Search unmodified server.\n  I\u0026rsquo;m using Elasticsearch via APIs, how does this change affect me?\n This change does not affect how you use client libraries to access Elasticsearch. Our client libraries remain licensed under Apache 2.0, with the exception of our Java High Level Rest Client (Java HLRC). The Java HLRC has dependencies on the core of Elasticsearch, and as a result this client library will be licensed under the Elastic License. Over time, we will eliminate this dependency and move the Java HLRC to be licensed under Apache 2.0. Until that time, for the avoidance of doubt, we do not consider using the Java HLRC as a client library in development of an application or library used to access Elasticsearch to constitute a derivative work under the Elastic License, and this will not have any impact on how you license the source code of your application using this client library or how you distribute it.\nThe client driver license incompatible issue will exist, we can\u0026rsquo;t upgrade the driver(s) until they release the Apache 2.0 licensed driver jars. But users are still safe to upgrade the drivers by themselves.\n Apache SkyWalking will discuss the further actions here. If you have any question, welcome to ask. In the later 2021, we will begin to invest the posibility of creating SkyWalking\u0026rsquo;s observability database implementation.\n","excerpt":"Elastic announced their license change, Upcoming licensing changes to Elasticsearch and Kibana.\n We …","ref":"/blog/2021-01-17-elastic-change-license/","title":"Response to Elastic 2021 License Change"},{"body":"SkyWalking Client JS 0.3.0 is released. Go to downloads page to find release tars.\n Support tracing starting at the browser. Add traceSDKInternal SDK for tracing SDK internal RPC. Add detailMode SDK for tracing http method and url as tags in spans. Fix conditions of http status.  ","excerpt":"SkyWalking Client JS 0.3.0 is released. Go to downloads page to find release tars.\n Support tracing …","ref":"/events/release-apache-skywalking-client-js-0-3-0/","title":"Release Apache SkyWalking Client JS 0.3.0"},{"body":"SkyWalking Eyes 0.1.0 is released. Go to downloads page to find release tars.\n License Header  Add check and fix command. check results can be reported to pull request as comments. fix suggestions can be filed on pull request as edit suggestions.    ","excerpt":"SkyWalking Eyes 0.1.0 is released. Go to downloads page to find release tars.\n License Header  Add …","ref":"/events/release-apache-skywalking-eyes-0-1-0/","title":"Release Apache SkyWalking Eyes 0.1.0"},{"body":"SkyWalking NodeJS 0.1.0 is released. Go to downloads page to find release tars.\n Initialize project core codes. Built-in http/https plugin. Express plugin. Axios plugin.  ","excerpt":"SkyWalking NodeJS 0.1.0 is released. Go to downloads page to find release tars.\n Initialize project …","ref":"/events/release-apache-skywalking-nodejs-0-1-0/","title":"Release Apache SkyWalking for NodeJS 0.1.0"},{"body":"SkyWalking Python 0.5.0 is released. Go to downloads page to find release tars.\n  New plugins\n Pyramid Plugin (#102) AioHttp Plugin (#101) Sanic Plugin (#91)    API and enhancements\n @trace decorator supports async functions Supports async task context Optimized path trace ignore Moved exception check to Span.__exit__ Moved Method \u0026amp; Url tags before requests    Fixes:\n BaseExceptions not recorded as errors Allow pending data to send before exit sw_flask general exceptions handled Make skywalking logging Non-global    Chores and tests\n Make tests really run on specified Python version Deprecate 3.5 as it\u0026rsquo;s EOL    ","excerpt":"SkyWalking Python 0.5.0 is released. Go to downloads page to find release tars.\n  New plugins …","ref":"/events/release-apache-skywalking-python-0-5-0/","title":"Release Apache SkyWalking Python 0.5.0"},{"body":"Apache SkyWalking is an open source APM for distributed system. Provide tracing, service mesh observability, metrics analysis, alarm and visualization.\nJust 11 months ago, on Jan. 20th, 2020, SkyWalking hit the 200 contributors mark. With the growth of the project and the community, SkyWalking now includes over 20 sub(ecosystem) projects covering multiple language agents and service mesh, integration with mature open source projects, like Prometheus, Spring(Sleuth), hundreds of libraries to support all tracing/metrics/logs fields. In the past year, the number of contributors grows super astoundingly , and all its metrics point to its community vibrancy. Many corporate titans are already using SkyWalking in a large-scale production environment, including, Alibaba, Huawei, Baidu, Tencent, etc.\nRecently, our SkyWalking main repository overs 300 contributors.\nOur website has thousands of views from most countries in the world every week.\nAlthough we know that, the metrics like GitHub stars and the numbers of open users and contributors, are not a determinant of vibrancy, they do show the trend, we are very proud to share the increased numbers here, too.\nWe double those numbers and are honored with the development of our community.\nThank you, all of our contributors. Not just these 300 contributors of the main repository, or nearly 400 contributors in all repositories, counted by GitHub. There are countless people contributing codes to SkyWalking\u0026rsquo;s subprojects, ecosystem projects, and private fork versions; writing blogs and guidances, translating documents, books, and presentations; setting up learning sessions for new users; convincing friends to join the community as end-users, contributors, even committers. Companies behinds those contributors support their employees to work with the community to provide feedback and contribute the improvements and features upstream. Conference organizers share the stages with speakers from the SkyWalking community.\nSkyWalking can’t make this happen without your help. You made this community extraordinary.\nAt this crazy distributed computing and cloud native age, we as a community could make DEV, OPS, and SRE teams' work easier by locating the issue(s) in the haystack quicker than before, like why we named the project as SkyWalking, we will have a clear site line when you stand on the glass bridge Skywalk at Grand Canyon West.\n 376 Contributors counted by GitHub account are following. Dec. 22st, 2020. Generated by a tool deveoped by Yousa\n 1095071913 50168383 Ahoo-Wang AirTrioa AlexanderWert AlseinX Ax1an BFergerson BZFYS CharlesMaster ChaunceyLin5152 CommissarXia Cvimer Doublemine ElderJames EvanLjp FatihErdem FeynmanZhou Fine0830 FingerLiu Gallardot GerryYuan HackerRookie Heguoya Hen1ng Humbertzhang IanCao IluckySi Indifer J-Cod3r JaredTan95 Jargon96 Jijun JohnNiang Jozdortraz Jtrust Just-maple KangZhiDong LazyLei LiWenGu Liu-XinYuan Miss-you O-ll-O Patrick0308 QHWG67 Qiliang RandyAbernethy RedzRedz Runrioter SataQiu ScienJus SevenPointOld ShaoHans Shikugawa SoberChina SummerOfServenteen TJ666 TerrellChen TheRealHaui TinyAllen TomMD ViberW Videl WALL-E WeihanLi WildWolfBang WillemJiang Wooo0 XhangUeiJong Xlinlin YczYanchengzhe YoungHu YunaiV ZhHong ZhuoSiChen ZS-Oliver a198720 a526672351 acurtain adamni135 adermxzs adriancole aeolusheath agile6v aix3 aiyanbo ajanthan alexkarezin alonelaval amogege amwyyyy arugal ascrutae augustowebd bai-yang beckhampu beckjin beiwangnull bigflybrother bostin brucewu-fly c1ay candyleer carlvine500 carrypann cheenursn cheetah012 chenpengfei chenvista chess-equality chestarss chidaodezhongsheng chopin-d clevertension clk1st cngdkxw codeglzhang codelipenghui coder-yqj coki230 coolbeevip crystaldust cui-liqiang cuiweiwei cyberdak cyejing dagmom dengliming devkanro devon-ye dimaaan dingdongnigetou dio dmsolr dominicqi donbing007 dsc6636926 duotai dvsv2 dzx2018 echooymxq efekaptan eoeac evanxuhe feelwing1314 fgksgf fuhuo geektcp geomonlin ggndnn gitter-badger glongzh gnr163 gonedays grissom-grissom grissomsh guodongq guyukou gxthrj gzshilu hailin0 hanahmily haotian2015 haoyann hardzhang harvies hepyu heyanlong hi-sb honganan hsoftxl huangyoje huliangdream huohuanhuan innerpeacez itsvse jasonz93 jialong121 jinlongwang jjlu521016 jjtyro jmjoy jsbxyyx justeene juzhiyuan jy00464346 kaanid karott kayleyang kevinyyyy kezhenxu94 kikupotter kilingzhang killGC klboke ksewen kuaikuai kun-song kylixs landonzeng langke93 langyan1022 langyizhao lazycathome leemove leizhiyuan libinglong lilien1010 limfriend linkinshi linliaoy liuhaoXD liuhaoyang liuyanggithup liuzhengyang liweiv lkxiaolou llissery louis-zhou lpf32 lsyf lucperkins lujiajing1126 lunamagic1978 lunchboxav lxliuxuankb lytscu lyzhang1999 magic-akari makingtime maolie masterxxo maxiaoguang64 membphis mestarshine mgsheng michaelsembwever mikkeschiren mm23504570 momo0313 moonming mrproliu muyun12 nacx neatlife neeuq nic-chen nikitap492 nileblack nisiyong novayoung oatiz oflebbe olzhy onecloud360 osiriswd peng-yongsheng pengweiqhca potiuk purgeyao qijianbo010 qinhang3 qiuyu-d qqeasonchen qxo raybi-asus refactor2 remicollet rlenferink rootsongjc rovast scolia sdanzo seifeHu shiluo34 sikelangya simonlei sk163 snakorse songzhendong songzhian sonxy spacewander stalary stenio2011 stevehu stone-wlg sungitly surechen swartz-k sxzaihua tanjunchen tankilo taskmgr tbdpmi terranhu terrymanu tevahp thanq thebouv tianyuak tincopper tinyu0 tom-pytel tristaZero tristan-tsl trustin tsuilouis tuohai666 tzsword-2020 tzy1316106836 vcjmhg vision-ken viswaramamoorthy wankai123 wbpcode web-xiaxia webb2019 weiqiang333 wendal wengangJi wenjianzhang whfjam wind2008hxy withlin wqr2016 wu-sheng wuguangkuo wujun8 wuxingye x22x22 xbkaishui xcaspar xiaoxiangmoe xiaoy00 xinfeingxia85 xinzhuxiansheng xudianyang yanbw yanfch yang-xiaodong yangxb2010000 yanickxia yanmaipian yanmingbi yantaowu yaowenqiang yazong ychandu ycoe yimeng yu199195 yuqichou yuyujulin yymoth zaunist zaygrzx zcai2 zeaposs zhang98722 zhanghao001 zhangjianweibj zhangkewei zhangsean zhaoyuguang zhentaoJin zhousiliang163 zhuCheer zifeihan zkscpqm zoidbergwill zoumingzm zouyx zshit zxbu zygfengyuwuzu  ","excerpt":"Apache SkyWalking is an open source APM for distributed system. Provide tracing, service mesh …","ref":"/blog/2021-01-01-300-contributors-mark/","title":"Celebrate SkyWalking single repository hits the 300 contributors mark"},{"body":"","excerpt":"","ref":"/zh_tags/open-source-contribution/","title":"Open Source Contribution"},{"body":"","excerpt":"","ref":"/zh_tags/open-source-promotion-plan/","title":"Open Source Promotion Plan"},{"body":"Ke Zhang (a.k.a. HumbertZhang) mainly focuses on the SkyWalking Python agent, he had participated in the \u0026ldquo;Open Source Promotion Plan - Summer 2020\u0026rdquo; and completed the project smoothly, and won the award \u0026ldquo;Most Potential Students\u0026rdquo; that shows his great willingness to continuously contribute to our community.\nUp to date, he has submitted 8 PRs in the Python agent repository, 7 PRs in the main repo, all in total include ~2000 LOC.\nAt Dec. 13th, 2020, the project management committee (PMC) passed the proposal of promoting him as a new committer. He has accepted the invitation at the same day.\nWelcome to join the committer team, Ke Zhang!\n","excerpt":"Ke Zhang (a.k.a. HumbertZhang) mainly focuses on the SkyWalking Python agent, he had participated in …","ref":"/events/welcome-ke-zhang-as-new-committer/","title":"Welcome Ke Zhang (张可) as new committer"},{"body":"今年暑假期间我参加了开源软件供应链点亮计划—暑期 2020 的活动,在这个活动中,我主要参加了 Apache SkyWalking 的 Python Agent 的开发,最终项目顺利结项并获得了”最具潜力奖“,今天我想分享一下我参与这个活动以及开源社区的感受与收获。\n缘起 其实我在参加暑期 2020 活动之前就听说过 SkyWalking 了。我研究生的主要研究方向是微服务和云原生,组里的学长们之前就在使用 SkyWalking 进行一些研究工作,也是通过他们,我了解到了 OpenTracing, SkyWalking 等与微服务相关的 Tracing 工具以及 APM 等,当时我就在想如果有机会可以深度参加这些开源项目就好了。 巧的是,也正是在差不多的时候,本科的一个学长发给了我暑期 2020 活动的链接,我在其中惊喜的发现了 SkyWalking 项目。\n虽然说想要参与 SkyWalking 的开发,但是真的有了机会我却有一些不自信——这可是 Star 上万的 Apache 顶级项目。万幸的是在暑期 2020 活动中,每一个社区都提供了很多题目以供选择,想参与的同学可以提前对要做的事情有所了解,并可以提前做一些准备。我当时也仔细地浏览了项目列表,最终决定申请为 Python Agent 支持 Flask 或 Django 埋点的功能。当时主要考虑的是,我对 Python 语言比较熟悉,同时也有使用 Flask 等 web 框架进行开发的经验,我认为应该可以完成项目要求。为了能让心里更有底一些,我阅读了 Python Agent 的源码,写下了对项目需要做的工作的理解,并向项目的导师柯振旭发送了自荐邮件,最终被选中去完成这个项目。\n过程 被选中后我很激动,也把这份激动化作了参与开源的动力。我在进一步阅读源码,搭建本地环境后,用了三周左右的时间完成了 Django 项目的埋点插件的开发,毕竟我选择的项目是一个低难度的项目,而我在 Python web 方面也有一些经验。在这之后,我的导师和我进行了沟通,在我表达了想要继续做贡献的意愿之后,他给我建议了一些可以进一步进行贡献的方向,我也就继续参与 Python Agent 的开发。接下来,我陆续完成了 PyMongo 埋点插件, 插件版本检查机制, 支持使用 kafka 协议进行数据上报等功能。在提交了暑期 2020 活动的结项申请书后,我又继续参与了在端到端测试中增加对百分位数的验证等功能。\n在整个过程中,我遇到过很多问题,包括对问题认识不够清晰,功能的设计不够完善等等,但是通过与导师的讨论以及 Code Review,这些问题最终都迎刃而解了。此外他还经常会和我交流项目进一步发展方向,并给我以鼓励和肯定,在这里我想特别感谢我的导师在整个项目过程中给我的各种帮助。\n收获 参加暑期 2020 的活动带给我了很多收获,主要有以下几点:\n第一是让我真正参与到了开源项目中。在之前我只向在项目代码或文档中发现的 typo 发起过一些 Pull Request,但是暑期 2020 活动通过列出项目 + 导师指导的方式,明确了所要做的事情,并提供了相应的指导,降低了参与开源的门槛,使得我们学生可以参与到项目的开发中来。\n第二是对我的专业研究方向也有很多启发,我的研究方向就是微服务与云原生相关,通过参与到 SkyWalking 的开发中使得我可以更好地理解研究问题中的一些概念,也让我更得心应手得使用 SkyWalking 来解决一些实际的问题。\n第三是通过参与 SkyWalking Python Agent 以及其他部分的开发,我的贡献得到了社区的承认,并在最近被邀请作为 Committer 加入了社区,这对我而言是很高的认可,也提升了我的自信心。\n​\t第四点就是我通过这个活动认识了不少新朋友,同时也开拓了我的视野,使得我对于开源项目与开源社区有了很多新的认识。\n建议 最后同样是我对想要参与开源社区,想要参与此类活动的同学们的一些建议:\n 虽然奖金很吸引人,但是还是希望大家能抱着长期为项目进行贡献的心态来参与开源项目,以这样的心态参与开源可以让你更好地理解开源社区的运作方式,也可以让你更有机会参与完成激动人心的功能,你在一个东西上付出的时间精力越多,你能收获的往往也越多。 在申请项目的时候,可以提前阅读一下相关功能的源码,并结合自己的思考去写一份清晰明了的 proposal ,这样可以帮助你在申请人中脱颖而出。 在开始着手去完成一个功能之前,首先理清思路,并和自己的导师或了解这一部分的人进行沟通与确认,从而尽量避免在错误的方向上浪费太多时间。  ","excerpt":"今年暑假期间我参加了开源软件供应链点亮计划—暑期 2020 的活动,在这个活动中,我主要参加了 Apache SkyWalking 的 Python Agent 的开发,最终项目顺利结项并获得了”最具 …","ref":"/zh/2020-12-20-summer2020-activity-sharing2/","title":"暑期 2020 活动学生(张可)心得分享"},{"body":"背景 我是一个热爱编程、热爱技术的人,⼀直以来都向往着能参与到开源项⽬中锻炼⾃⼰,但当我面对庞大而复杂的项目代码时,却感到手足无措,不知该从何开始。⽽此次的“开源软件供应链点亮计划-暑期2020”活动则正好提供了这样⼀个机会:清晰的任务要求、开源社区成员作为导师提供指导以及一笔丰厚的奖金,让我顺利地踏上了开源这条道路。\n回顾 在“暑期2020”活动的这两个多月里,我为 SkyWalking 的命令行工具实现了一个 dashboard,此外在阅读项目源码的过程中,还发现并修复了几个 bug。到活动结束时,我共提交了11个 PR,贡献了两千多行改动,对 SkyWalking CLI 项目的贡献数量排名第二,还获得了“最具潜力奖”。\n我觉得之所以能够如此顺利地完成这个项⽬主要有两个原因。一方面,我选择的 SkyWalking CLI 项⽬当时最新的版本号为0.3.0,还处于起步阶段,代码量相对较少,⽽且项⽬结构非常清晰,文档也较为详细,这对于我理解整个项⽬⾮常有帮助,从⽽能够更快地上⼿。另一方面,我的项目导师非常认真负责,每次我遇到问题,导师都会及时地为我解答,然后我提交的 PR 也能够很快地被 review。⽽且导师不时会给予我肯定的评论与⿎励,这极⼤地提⾼了我的成就感,让我更加积极地投⼊到下⼀阶段的⼯作,形成⼀个正向的循环。\n收获 回顾整个参与过程,觉得自己收获颇多:\n首先,我学习到了很多可能在学校里接触不到的新技术,了解了开源项目是如何进行协作,开源社区是如何运转治理的,以及开源文化、Apache way 等知识,仿佛进入了一个崭新而精彩的世界。\n其次,我的编程能力得到了锻炼。因为开源项目对于代码的质量有较高的要求,因此我会在编程时有意识地遵守相关的规范,培养良好的编码习惯。然后在导师的 code review 中也学习到了一些编程技巧。\n此外,参与开源为我的科研带来了不少灵感。因为我的研究方向是智能软件工程,旨在将人工智能技术应用在软件工程的各个环节中,这需要我在实践中发现实际问题。而开源则提供了这样一个窗口,让我足不出户即可参与到软件项目的设计、开发、测试和发布等环节。\n最后也是本次活动最大的一个收获,我的贡献得到了社区的认可,被提名成为了 SkyWalking 社区的第一位学生 committer。\n建议 最后,对于将来想要参加此类活动的同学,附上我的一些建议:\n第一,选择活跃、知名的社区。社区对你的影响将是极其深远的,好的社区意味着成熟的协作流程、良好的氛围、严谨的代码规范,以及有更大几率遇到优秀的导师,这些对于你今后在开源方面的发展都是非常有帮助的。\n第二,以兴趣为导向来选择项目,同时要敢于走出舒适区。我最初在选择项目时,初步确定了两个,一个是低难度的 Python 项目,另一个是中等难度的 Go 项目。当时我很纠结:因为我对 Python 语言比较熟悉,选择一个低难度的项目是比较稳妥的,但是项目的代码我看的并不是很懂,具体要怎么做我完全没有头绪;而 Go 项目是一个命令行工具,我对这个比较感兴趣,且有一个大致的思路,但是我对 Go 语言并不是很熟悉,实践经验为零。最后凭借清晰具体的 proposal 我成功申请到了 Go 项目并顺利地完成了,还在实践中快速掌握了一门新的编程语言。\n这次的“暑期2020”活动虽已圆满结束,但我的开源之路才刚刚开始。\n","excerpt":"背景 我是一个热爱编程、热爱技术的人,⼀直以来都向往着能参与到开源项⽬中锻炼⾃⼰,但当我面对庞大而复杂的项目代码时,却感到手足无措,不知该从何开始。⽽此次的“开源软件供应链点亮计划-暑期2020”活动 …","ref":"/zh/2020-12-19-summer2020-activity-sharing/","title":"暑期2020活动心得分享"},{"body":"NGE2E is the next generation End-to-End Testing framework that aims to help developers to set up, debug, and verify E2E tests with ease. It\u0026rsquo;s built based on the lessons learnt from tens of hundreds of test cases in the SkyWalking main repo.\nGoal  Keep the feature parity with the existing E2E framework in SkyWalking main repo; Support both docker-compose and KinD to orchestrate the tested services under different environments; Get rid of the heavy Java/Maven stack, which exists in the current E2E; be language independent as much as possible, users only need to configure YAMLs and run commands, without writing codes;  Non-Goal  This framework is not involved with the build process, i.e. it won\u0026rsquo;t do something like mvn package or docker build, the artifacts (.tar, docker images) should be ready in an earlier process before this; This project doesn\u0026rsquo;t take the plugin tests into account, at least for now; This project doesn\u0026rsquo;t mean to add/remove any new/existing test case to/from the main repo; This documentation won\u0026rsquo;t cover too much technical details of how to implement the framework, that should go into an individual documentation;  Design Before diving into the design details, let\u0026rsquo;s take a quick look at how the end user might use NGE2E.\n All the following commands are mock, and are open to debate.\n To run a test case in a directory /path/to/the/case/directory\ne2e run /path/to/the/case/directory # or cd /path/to/the/case/directory \u0026amp;\u0026amp; e2e run This will run the test case in the specified directory, this command is a wrapper that glues all the following commands, which can be executed separately, for example, to debug the case:\nNOTE: because all the options can be loaded from a configuration file, so as long as a configuration file (say e2e.yaml) is given in the directory, every command should be able to run in bare mode (without any option explicitly specified in the command line);\nSet Up e2e setup --env=compose --file=docker-compose.yaml --wait-for=service/health e2e setup --env=kind --file=kind.yaml --manifests=bookinfo.yaml,gateway.yaml --wait-for=pod/ready e2e setup # If configuration file e2e.yaml is present  --env: the environment, may be compose or kind, represents docker-compose and KinD respectively; --file: the docker-compose.yaml or kind.yaml file that declares how to set up the environment; --manifests: for KinD, the resources files/directories to apply (using kubectl apply -f); --command: a command to run after the environment is started, this may be useful when users need to install some extra tools or apply resources from command line, like istioctl install --profile=demo; --wait-for: can be specified multiple times to give a list of conditions to be met; wait until the given conditions are met; the most frequently-used strategy should be --wait-for=service/health, --wait-for=deployments/available, etc. that make the e2e setup command to wait for all conditions to be met; other possible strategies may be something like --wait-for=\u0026quot;log:Started Successfully\u0026quot;, --wait-for=\u0026quot;http:localhost:8080/healthcheck\u0026quot;, etc. if really needed;  Trigger Inputs e2e trigger --interval=3s --times=0 --action=http --url=\u0026#34;localhost:8080/users\u0026#34; e2e trigger --interval=3s --times=0 --action=cmd --cmd=\u0026#34;curl localhost:8080/users\u0026#34; e2e trigger # If configuration file e2e.yaml is present  --interval=3s: trigger the action every 3 seconds; --times=0: how many times to trigger the action, 0=infinite; --action=http: the action of the trigger, i.e. \u0026ldquo;perform an http request as an input\u0026rdquo;; --action=cmd: the action of the trigger, i.e. \u0026ldquo;execute the cmd as an input\u0026rdquo;;  Query Output swctl service ls this is a project-specific step, different project may use different tools to query the actual output, for SkyWalking, it uses swctl to query the actual output.\nVerify e2e verify --actual=actual.data.yaml --expected=expected.data.yaml e2e verify --query=\u0026#34;swctl service ls\u0026#34; --expected=expected.data.yaml e2e verify # If configuration file e2e.yaml is present   --actual: the actual data file, only YAML file format is supported;\n  --expected: the expected data file, only YAML file format is supported;\n  --query: the query to get the actual data, the query result must have the same format as --actual and --expected;\n The --query option will get the output into a temporary file and use the --actual under the hood;\n   Cleanup e2e cleanup --env=compose --file=docker-compose.yaml e2e cleanup --env=kind --file=kind.yaml --resources=bookinfo.yaml,gateway.yaml e2e cleanup # If configuration file e2e.yaml is present This step requires the same options in the setup step so that it can clean up all things necessarily.\nSummarize To summarize, the directory structure of a test case might be\ncase-name ├── agent-service # optional, an arbitrary project that is used in the docker-compose.yaml if needed │ ├── Dockerfile │ ├── pom.xml │ └── src ├── docker-compose.yaml ├── e2e.yaml # see a sample below └── testdata ├── expected.endpoints.service1.yaml ├── expected.endpoints.service2.yaml └── expected.services.yaml or\ncase-name ├── kind.yaml ├── bookinfo │ ├── bookinfo.yaml │ └── bookinfo-gateway.yaml ├── e2e.yaml # see a sample below └── testdata ├── expected.endpoints.service1.yaml ├── expected.endpoints.service2.yaml └── expected.services.yaml a sample of e2e.yaml may be\nsetup:env:kindfile:kind.yamlmanifests:- path:bookinfo.yamlwait:# you can have multiple conditions to wait- namespace:bookinfolabel-selector:app=productfor:deployment/available- namespace:reviewslabel-selector:app=productfor:deployment/available- namespace:ratingslabel-selector:app=productfor:deployment/availablerun:- command:|# it can be a shell script or anything executableistioctl install --profile=demo -ykubectl label namespace default istio-injection=enabledwait:- namespace:istio-systemlabel-selector:app=istiodfor:deployment/available# OR# env: compose# file: docker-compose.yamltrigger:action:httpinterval:3stimes:0url:localhost:9090/usersverify:- query:swctl service lsexpected:expected.services.yaml- query:swctl endpoint ls --service=\u0026#34;YnVzaW5lc3Mtem9uZTo6cHJvamVjdEM=.1\u0026#34;expected:expected.projectC.endpoints.yamlthen a single command should do the trick.\ne2e run Modules This project is divided into the following modules.\nController A controller command (e2e run) composes all the steps declared in the e2e.yaml, it should be progressive and clearly display which step is currently running. If it failed in a step, the error message should be as much comprehensive as possible. An example of the output might be\ne2e run ✔ Started Kind Cluster - Cluster Name ✔ Checked Pods Readiness - All pods are ready ? Generating Traffic - http localhost:9090/users (progress spinner) ✔ Verified Output - service ls (progress spinner) Verifying Output - endpoint ls ✘ Failed to Verify Output Data - endpoint ls \u0026lt;the diff content\u0026gt; ✔ Clean Up Compared with running the steps one by one, the controller is also responsible for cleaning up env (by executing cleanup command) no mater what status other commands are, even if they are failed, the controller has the following semantics in terms of setup and cleanup.\n// Java try { setup(); // trigger step // verify step // ... } finally { cleanup(); } // GoLang func run() { setup(); defer cleanup(); // trigger step // verify step // ... } Initializer The initializer is responsible for\n  When env==compose\n Start the docker-compose services; Check the services' healthiness; Wait until all services are ready according to the interval, etc.;    When env==kind\n Start the KinD cluster according to the config files; Apply the resources files (--manifests) or/and run the custom init command (--commands); Check the pods' readiness; Wait until all pods are ready according to the interval, etc.;    Verifier According to scenarios we have at the moment, the must-have features are:\n  Matchers\n Exact match Not null Not empty Greater than 0 Regexp match At least one of list element match    Functions\n Base64 encode/decode    in order to help to identify simple bugs from the GitHub Actions workflow, there are some \u0026ldquo;nice to have\u0026rdquo; features:\n Printing the diff content when verification failed is a super helpful bonus proved in the Python agent repo;  Logging When a test case failed, all the necessary logs should be collected into a dedicated directory, which could be uploaded to the GitHub Artifacts for downloading and analysis;\nLogs through the entire process of a test case are:\n KinD clusters logs; Containers/pods logs; The logs from the NGE2E itself;  More Planned Debugging Debugging the E2E locally has been a strong requirement and time killer that we haven\u0026rsquo;t solve up to date, though we have enhancements like https://github.com/apache/skywalking/pull/5198 , but in this framework, we will adopt a new method to \u0026ldquo;really\u0026rdquo; support debugging locally.\nThe most common case when debugging is to run the E2E tests, with one or more services forwarded into the host machine, where the services are run in the IDE or in debug mode.\nFor example, you may run the SkyWalking OAP server in an IDE and run e2e run, expecting the other services (e.g. agent services, SkyWalking WebUI, etc.) inside the containers to connect to your local OAP, instead of the one declared in docker-compose.yaml.\nFor Docker Desktop Mac/Windows, we can access the services running on the host machine inside containers via host.docker.internal, for Linux, it\u0026rsquo;s 172.17.0.1.\nOne possible solution is to add an option --debug-services=oap,other-service-name that rewrites all the router rules inside the containers from oap to host.docker.internal/172.17.0.1.\nCodeGen When adding new test case, a code generator would be of great value to eliminate the repeated labor and copy-pasting issues.\ne2e new \u0026lt;case-name\u0026gt; ","excerpt":"NGE2E is the next generation End-to-End Testing framework that aims to help developers to set up, …","ref":"/blog/e2e-design/","title":"[Design] NGE2E - Next Generation End-to-End Testing Framework"},{"body":"这篇文章暂时不讲告警策略, 直接看默认情况下激活的告警目标以及钉钉上的告警效果\nSkyWalking内置了很多默认的告警策略, 然后根据告警策略生成告警目标, 我们可以很容易的在界面上看到\n当我们想去让这些告警目标通知到我们时, 由于SkyWalking目前版本(8.3)已经自带了, 只需要简单配置一下即可\n我们先来钉钉群中创建机器人并勾选加签\n然后再修改告警部分的配置文件, 如果你是默认的配置文件(就像我一样), 你可以直接执行以下命令, 反之你也可以手动修改configs/alarm-settings.yml文件\ntee \u0026lt;your_skywalking_path\u0026gt;/configs/alarm-settings.yml \u0026lt;\u0026lt;-'EOF' dingtalkHooks: textTemplate: |- { \u0026quot;msgtype\u0026quot;: \u0026quot;text\u0026quot;, \u0026quot;text\u0026quot;: { \u0026quot;content\u0026quot;: \u0026quot;Apache SkyWalking Alarm: \\n %s.\u0026quot; } } webhooks: - url: https://oapi.dingtalk.com/robot/send?access_token=\u0026lt;access_token\u0026gt; secret: \u0026lt;加签值\u0026gt; EOF 最终效果如下\n参考文档:\nhttps://github.com/apache/skywalking/blob/master/docs/en/setup/backend/backend-alarm.md\nhttps://ding-doc.dingtalk.com/doc#/serverapi2/qf2nxq/uKPlK\n谢谢观看, 后续我会在SkyWalking告警这块写更多实战文章\n","excerpt":"这篇文章暂时不讲告警策略, 直接看默认情况下激活的告警目标以及钉钉上的告警效果\nSkyWalking内置了很多默认的告警策略, 然后根据告警策略生成告警目标, 我们可以很容易的在界面上看到\n当我们想去 …","ref":"/zh/2020-12-13-skywalking-alarm/","title":"SkyWalking报警发送到钉钉群"},{"body":"Gui Cao began the code contributions since May 3, 2020. In the past 6 months, his 23 pull requests(GitHub, zifeihan[1]) have been accepted, which includes 5k+ lines of codes.\nMeanwhile, he took part in the tech discussion, and show the interests to contribute more to the project.\nAt Dec. 4th, 2020, the project management committee(PMC) passed the proposal of promoting him as a new committer. He has accepted the invitation at the same day.\nWelcome Gui Cao join the committer team.\n[1] https://github.com/apache/skywalking/commits?author=zifeihan\n","excerpt":"Gui Cao began the code contributions since May 3, 2020. In the past 6 months, his 23 pull …","ref":"/events/welcome-gui-cao-as-new-committer/","title":"Welcome Gui Cao as new committer"},{"body":" Author: Zhenxu Ke, Sheng Wu, and Tevah Platt. tetrate.io Original link, Tetrate.io blog Dec. 03th, 2020  Apache SkyWalking: an APM (application performance monitor) system, especially designed for microservices, cloud native, and container-based (Docker, Kubernetes, Mesos) architectures.\nEnvoy Access Log Service: Access Log Service (ALS) is an Envoy extension that emits detailed access logs of all requests going through Envoy.\nBackground Apache SkyWalking has long supported observability in service mesh with Istio Mixer adapter. But since v1.5, Istio began to deprecate Mixer due to its poor performance in large scale clusters. Mixer’s functionalities have been moved into the Envoy proxies, and is supported only through the 1.7 Istio release. On the other hand, Sheng Wu and Lizan Zhou presented a better solution based on the Apache SkyWalking and Envoy ALS on KubeCon China 2019, to reduce the performance impact brought by Mixer, while retaining the same observability in service mesh. This solution was initially implemented by Sheng Wu, Hongtao Gao, Lizan Zhou, and Dhi Aurrahman at Tetrate.io. If you are looking for a more efficient solution to observe your service mesh instead of using a Mixer-based solution, this is exactly what you need. In this tutorial, we will explain a little bit how the new solution works, and apply it to the bookinfo application in practice.\nHow it works From a perspective of observability, Envoy can be typically deployed in 2 modes, sidecar, and router. As a sidecar, Envoy mostly represents a single service to receive and send requests (2 and 3 in the picture below). While as a proxy, Envoy may represent many services (1 in the picture below).\nIn both modes, the logs emitted by ALS include a node identifier. The identifier starts with router~ (or ingress~) in router mode and sidecar~ in sidecar proxy mode.\nApart from the node identifier, there are several noteworthy properties in the access logs that will be used in this solution:\n  downstream_direct_remote_address: This field is the downstream direct remote address on which the request from the user was received. Note: This is always the physical peer, even if the remote address is inferred from for example the x-forwarded-for header, proxy protocol, etc.\n  downstream_remote_address: The remote/origin address on which the request from the user was received.\n  downstream_local_address: The local/destination address on which the request from the user was received.\n  upstream_remote_address: The upstream remote/destination address that handles this exchange.\n  upstream_local_address: The upstream local/origin address that handles this exchange.\n  upstream_cluster: The upstream cluster that upstream_remote_address belongs to.\n  We will discuss more about the properties in the following sections.\nSidecar When serving as a sidecar, Envoy is deployed alongside a service, and delegates all the incoming/outgoing requests to/from the service.\n  Delegating incoming requests: in this case, Envoy acts as a server side sidecar, and sets the upstream_cluster in form of inbound|portNumber|portName|Hostname[or]SidecarScopeID.\nThe SkyWalking analyzer checks whether either downstream_remote_address can be mapped to a Kubernetes service:\na. If there is a service (say Service B) whose implementation is running in this IP(and port), then we have a service-to-service relation, Service B -\u0026gt; Service A, which can be used to build the topology. Together with the start_time and duration fields in the access log, we have the latency metrics now.\nb. If there is no service that can be mapped to downstream_remote_address, then the request may come from a service out of the mesh. Since SkyWalking cannot identify the source service where the requests come from, it simply generates the metrics without source service, according to the topology analysis method. The topology can be built as accurately as possible, and the metrics detected from server side are still correct.\n  Delegating outgoing requests: in this case, Envoy acts as a client-side sidecar, and sets the upstream_cluster in form of outbound|\u0026lt;port\u0026gt;|\u0026lt;subset\u0026gt;|\u0026lt;serviceFQDN\u0026gt;.\nClient side detection is relatively simpler than (1. Delegating incoming requests). If upstream_remote_address is another sidecar or proxy, we simply get the mapped service name and generate the topology and metrics. Otherwise, we have no idea what it is and consider it an UNKNOWN service.\n  Proxy role When Envoy is deployed as a proxy, it is an independent service itself and doesn\u0026rsquo;t represent any other service like a sidecar does. Therefore, we can build client-side metrics as well as server-side metrics.\nExample In this section, we will use the typical bookinfo application to demonstrate how Apache SkyWalking 8.3.0+ (the latest version up to Nov. 30th, 2020) works together with Envoy ALS to observe a service mesh.\nInstalling Kubernetes SkyWalking 8.3.0 supports the Envoy ALS solution under both Kubernetes environment and virtual machines (VM) environment, in this tutorial, we’ll only focus on the Kubernetes scenario, for VM solution, please stay tuned for our next blog, so we need to install Kubernetes before taking further steps.\nIn this tutorial, we will use the Minikube tool to quickly set up a local Kubernetes(v1.17) cluster for testing. In order to run all the needed components, including the bookinfo application, the SkyWalking OAP and WebUI, the cluster may need up to 4GB RAM and 2 CPU cores.\nminikube start --memory=4096 --cpus=2 Next, run kubectl get pods --namespace=kube-system --watch to check whether all the Kubernetes components are ready. If not, wait for the readiness before going on.\nInstalling Istio Istio provides a very convenient way to configure the Envoy proxy and enable the access log service. The built-in configuration profiles free us from lots of manual operations. So, for demonstration purposes, we will use Istio through this tutorial.\nexport ISTIO_VERSION=1.7.1 curl -L https://istio.io/downloadIstio | sh - sudo mv $PWD/istio-$ISTIO_VERSION/bin/istioctl /usr/local/bin/ istioctl install --set profile=demo kubectl label namespace default istio-injection=enabled Run kubectl get pods --namespace=istio-system --watch to check whether all the Istio components are ready. If not, wait for the readiness before going on.\nEnabling ALS The demo profile doesn’t enable ALS by default. We need to reconfigure it to enable ALS via some configuration.\nistioctl manifest install \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-oap.istio-system:11800 The example command --set meshConfig.enableEnvoyAccessLogService=true enables the Envoy access log service in the mesh. And as we said earlier, ALS is essentially a gRPC service that emits requests logs. The config meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-oap.istio-system:11800 tells this gRPC service where to emit the logs, say skywalking-oap.istio-system:11800, where we will deploy the SkyWalking ALS receiver later.\nNOTE: You can also enable the ALS when installing Istio so that you don’t need to restart Istio after installation:\nistioctl install --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-oap.istio-system:11800 kubectl label namespace default istio-injection=enabled Deploying Apache SkyWalking The SkyWalking community provides a Helm Chart to make it easier to deploy SkyWalking and its dependent services in Kubernetes. The Helm Chart can be found at the GitHub repository.\n# Install Helm curl -sSLO https://get.helm.sh/helm-v3.0.0-linux-amd64.tar.gz sudo tar xz -C /usr/local/bin --strip-components=1 linux-amd64/helm -f helm-v3.0.0-linux-amd64.tar.gz # Clone SkyWalking Helm Chart git clone https://github.com/apache/skywalking-kubernetes cd skywalking-kubernetes/chart git reset --hard dd749f25913830c47a97430618cefc4167612e75 # Update dependencies helm dep up skywalking # Deploy SkyWalking helm -n istio-system install skywalking skywalking \\  --set oap.storageType=\u0026#39;h2\u0026#39;\\  --set ui.image.tag=8.3.0 \\  --set oap.image.tag=8.3.0-es7 \\  --set oap.replicas=1 \\  --set oap.env.SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=k8s-mesh \\  --set oap.env.JAVA_OPTS=\u0026#39;-Dmode=\u0026#39; \\  --set oap.envoy.als.enabled=true \\  --set elasticsearch.enabled=false We deploy SkyWalking to the namespace istio-system, so that SkyWalking OAP service can be accessed by skywalking-oap.istio-system:11800, to which we told ALS to emit their logs, in the previous step.\nWe also enable the ALS analyzer in the SkyWalking OAP: oap.env.SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=k8s-mesh. The analyzer parses the access logs and maps the IP addresses in the logs to the real service names in the Kubernetes, to build a topology.\nIn order to retrieve the metadata (such as Pod IP and service names) from a Kubernetes cluster for IP mappings, we also set oap.envoy.als.enabled=true, to apply for a ClusterRole that has access to the metadata.\nexport POD_NAME=$(kubectl get pods -A -l \u0026#34;app=skywalking,release=skywalking,component=ui\u0026#34; -o name) echo $POD_NAME kubectl -n istio-system port-forward $POD_NAME 8080:8080 Now navigate your browser to http://localhost:8080 . You should be able to see the SkyWalking dashboard. The dashboard is empty for now, but after we deploy the demo application and generate traffic, it should be filled up later.\nDeploying Bookinfo application Run:\nexport ISTIO_VERSION=1.7.1 kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/platform/kube/bookinfo.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/bookinfo-gateway.yaml kubectl wait --for=condition=Ready pods --all --timeout=1200s minikube tunnel Then navigate your browser to http://localhost/productpage. You should be able to see the typical bookinfo application. Refresh the webpage several times to generate enough access logs.\nDone! And you’re all done! Check out the SkyWalking WebUI again. You should see the topology of the bookinfo application, as well the metrics of each individual service of the bookinfo application.\nTroubleshooting  Check all pods status: kubectl get pods -A. SkyWalking OAP logs: kubectl -n istio-system logs -f $(kubectl get pod -A -l \u0026quot;app=skywalking,release=skywalking,component=oap\u0026quot; -o name). SkyWalking WebUI logs: kubectl -n istio-system logs -f $(kubectl get pod -A -l \u0026quot;app=skywalking,release=skywalking,component=ui\u0026quot; -o name). Make sure the time zone at the bottom-right of the WebUI is set to UTC +0.  Customizing Service Names The SkyWalking community brought more improvements to the ALS solution in the 8.3.0 version. You can decide how to compose the service names when mapping from the IP addresses, with variables service and pod. For instance, configuring K8S_SERVICE_NAME_RULE to the expression ${service.metadata.name}-${pod.metadata.labels.version} gets service names with version label such as reviews-v1, reviews-v2, and reviews-v3, instead of a single service reviews, see the PR.\nWorking ALS with VM Kubernetes is popular, but what about VMs? From what we discussed above, in order to map the IPs to services, SkyWalking needs access to the Kubernetes cluster, fetching service metadata and Pod IPs. But in a VM environment, there is no source from which we can fetch those metadata. In the next post, we will introduce another ALS analyzer based on the Envoy metadata exchange mechanism. With this analyzer, you are able to observe a service mesh in the VM environment. Stay tuned! If you want to have commercial support for the ALS solution or hybrid mesh observability, Tetrate Service Bridge, TSB is another good option out there.\nAdditional Resources  KubeCon 2019 Recorded Video. Get more SkyWalking updates on the official website.  Apache SkyWalking founder Sheng Wu, SkyWalking core maintainer Zhenxu Ke are Tetrate engineers, and Tevah Platt is a content writer for Tetrate. Tetrate helps organizations adopt open source service mesh tools, including Istio, Envoy, and Apache SkyWalking, so they can manage microservices, run service mesh on any infrastructure, and modernize their applications.\n","excerpt":"Author: Zhenxu Ke, Sheng Wu, and Tevah Platt. tetrate.io Original link, Tetrate.io blog Dec. 03th, …","ref":"/blog/2020-12-03-obs-service-mesh-with-sw-and-als/","title":"Observe Service Mesh with SkyWalking and Envoy Access Log Service"},{"body":"","excerpt":"","ref":"/zh_tags/service-mesh/","title":"Service Mesh"},{"body":" 如果你正在寻找在 Mixer 方案以外观察服务网格的更优解,本文正符合你的需要。\n Apache Skywalking︰特别为微服务、云原生和容器化(Docker、Kubernetes、Mesos)架构而设计的 APM(应用性能监控)系统。\nEnvoy 访问日志服务︰访问日志服务(ALS)是 Envoy 的扩展组件,会将所有通过 Envoy 的请求的详细访问日志发送出来。\n背景 Apache SkyWalking 一直通过 Istio Mixer 的适配器,支持服务网格的可观察性。不过自从 v1.5 版本,由于 Mixer 在大型集群中差强人意的表现,Istio 开始弃用 Mixer。Mixer 的功能现已迁至 Envoy 代理,并获 Istio 1.7 版本支持。\n在去年的中国 KubeCon 中,吴晟和周礼赞基于 Apache SkyWalking 和 Envoy ALS,发布了新的方案:不再受制于 Mixer 带来的性能影响,也同时保持服务网格中同等的可观察性。这个方案最初是由吴晟、高洪涛、周礼赞和 Dhi Aurrahman 在 Tetrate.io 实现的。\n如果你正在寻找在 Mixer 方案之外,为你的服务网格进行观察的最优解,本文正是你当前所需的。在这个教程中,我们会解释此方案的运作逻辑,并将它实践到 bookinfo 应用上。\n运作逻辑 从可观察性的角度来说,Envoy 一般有两种部署模式︰Sidecar 和路由模式。 Envoy 代理可以代表多项服务(见下图之 1),或者当它作为 Sidecar 时,一般是代表接收和发送请求的单项服务(下图之 2 和 3)。\n在两种模式中,ALS 发放的日志都会带有一个节点标记符。该标记符在路由模式时,以 router~ (或 ingress~)开头,而在 Sidecar 代理模式时,则以 sidecar~ 开头。\n除了节点标记符之外,这个方案[1]所采用的访问日志也有几个值得一提的字段︰\ndownstream_direct_remote_address︰此字段是下游的直接远程地址,用作接收来自用户的请求。注意︰它永远是对端实体的地址,即使远程地址是从 x-forwarded-for header、代理协议等推断出来的。\ndownstream_remote_address︰远程或原始地址,用作接收来自用户的请求。\ndownstream_local_address︰本地或目标地址,用作接收来自用户的请求。\nupstream_remote_address︰上游的远程或目标地址,用作处理本次交换。\nupstream_local_address︰上游的本地或原始地址,用作处理本次交换。\nupstream_cluster︰upstream_remote_address 所属的上游集群。\n我们会在下面详细讲解各个字段。\nSidecar 当 Envoy 作为 Sidecar 的时候,会搭配服务一起部署,并代理来往服务的传入或传出请求。\n  代理传入请求︰在此情况下,Envoy 会作为服务器端的 Sidecar,以 inbound|portNumber|portName|Hostname[or]SidecarScopeID 格式设定 upstream_cluster。\nSkyWalking 分析器会检查 downstream_remote_address 是否能够找到对应的 Kubernetes 服务。\n如果在此 IP(和端口)中有一个服务(例如服务 B)正在运行,那我们就会建立起服务对服务的关系(即服务 B → 服务 A),帮助建立拓扑。再配合访问日志中的 start_time 和 duration 两个字段,我们就可以获得延迟的指标数据了。\n如果没有任何服务可以和 downstream_remote_address 相对应,那请求就有可能来自网格以外的服务。由于 SkyWalking 无法识别请求的服务来源,在没有源服务的情况下,它简单地根据拓扑分析方法生成数据。拓扑依然可以准确地建立,而从服务器端侦测出来的指标数据也依然是正确的。\n  代理传出请求︰在此情况下,Envoy 会作为客户端的 Sidecar,以 outbound|\u0026lt;port\u0026gt;|\u0026lt;subset\u0026gt;|\u0026lt;serviceFQDN\u0026gt; 格式设定 upstream_cluster。\n客户端的侦测相对来说比代理传入请求容易。如果 upstream_remote_address 是另一个 Sidecar 或代理的话,我们只需要获得它相应的服务名称,便可生成拓扑和指标数据。否则,我们没有办法理解它,只能把它当作 UNKNOWN 服务。\n  代理角色 当 Envoy 被部署为前端代理时,它是独立的服务,并不会像 Sidecar 一样,代表任何其他的服务。所以,我们可以建立客户端以及服务器端的指标数据。\n演示范例 在本章,我们会使用典型的 bookinfo 应用,来演示 Apache SkyWalking 8.3.0+ (截至 2020 年 11 月 30 日的最新版本)如何与 Envoy ALS 合作,联手观察服务网格。\n安装 Kubernetes 在 Kubernetes 和虚拟机器(VM)的环境下,SkyWalking 8.3.0 均支持 Envoy ALS 的方案。在本教程中,我们只会演示在 Kubernetes 的情境,至于 VM 方案,请耐心期待我们下一篇文章。所以在进行下一步之前,我们需要先安装 Kubernetes。\n在本教程中,我们会使用 Minikube 工具来快速设立本地的 Kubernetes(v1.17 版本)集群用作测试。要运行所有必要组件,包括 bookinfo 应用、SkyWalking OAP 和 WebUI,集群需要动用至少 4GB 内存和 2 个 CPU 的核心。\nminikube start --memory=4096 --cpus=2 然后,运行 kubectl get pods --namespace=kube-system --watch,检查所有 Kubernetes 的组件是否已准备好。如果还没,在进行下一步前,请耐心等待准备就绪。\n安装 Istio Istio 为配置 Envoy 代理和实现访问日志服务提供了一个非常方便的方案。内建的配置设定档为我们省去了不少手动的操作。所以,考虑到演示的目的,我们会在本教程全程使用 Istio。\nexport ISTIO_VERSION=1.7.1 curl -L https://istio.io/downloadIstio | sh - sudo mv $PWD/istio-$ISTIO_VERSION/bin/istioctl /usr/local/bin/ istioctl install --set profile=demo kubectl label namespace default istio-injection=enabled 然后,运行 kubectl get pods --namespace=istio-system --watch,检查 Istio 的所有组件是否已准备好。如果还没,在进行下一步前,请耐心等待准备就绪。\n启动访问日志服务 演示的设定档没有预设启动 ALS,我们需要重新配置才能够启动 ALS。\nistioctl manifest install \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-oap.istio-system:11800 范例指令 --set meshConfig.enableEnvoyAccessLogService=true 会在网格中启动访问日志服务。正如之前提到,ALS 本质上是一个会发放请求日志的 gRPC 服务。配置 meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-oap.istio-system:11800 会告诉这个gRPC 服务往哪里发送日志,这里是往 skywalking-oap.istio-system:11800 发送,稍后我们会部署 SkyWalking ALS 接收器到这个地址。\n注意︰\n你也可以在安装 Istio 时启动 ALS,那就不需要在安装后重新启动 Istio︰\nistioctl install --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-oap.istio-system:11800 kubectl label namespace default istio-injection=enabled 部署 Apache SkyWalking SkyWalking 社区提供了 Helm Chart ,让你更轻易地在 Kubernetes 中部署 SkyWalking 以及其依赖服务。 Helm Chart 可以在 GitHub 仓库找到。\n# Install Helm curl -sSLO https://get.helm.sh/helm-v3.0.0-linux-amd64.tar.gz sudo tar xz -C /usr/local/bin --strip-components=1 linux-amd64/helm -f helm-v3.0.0-linux-amd64.tar.gz # Clone SkyWalking Helm Chart git clone https://github.com/apache/skywalking-kubernetes cd skywalking-kubernetes/chart git reset --hard dd749f25913830c47a97430618cefc4167612e75 # Update dependencies helm dep up skywalking # Deploy SkyWalking helm -n istio-system install skywalking skywalking \\  --set oap.storageType=\u0026#39;h2\u0026#39;\\  --set ui.image.tag=8.3.0 \\  --set oap.image.tag=8.3.0-es7 \\  --set oap.replicas=1 \\  --set oap.env.SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=k8s-mesh \\  --set oap.env.JAVA_OPTS=\u0026#39;-Dmode=\u0026#39; \\  --set oap.envoy.als.enabled=true \\  --set elasticsearch.enabled=false 我们在 istio-system 的命名空间内部署 SkyWalking,使 SkyWalking OAP 服务可以使用地址 skywalking-oap.istio-system:11800 访问,在上一步中,我们曾告诉过 ALS 应往此处发放它们的日志。\n我们也在 SkyWalking OAP 中启动 ALS 分析器︰oap.env.SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=k8s-mesh。分析器会对访问日志进行分析,并解析日志中的 IP 地址和 Kubernetes 中的真实服务名称,以建立拓扑。\n为了从 Kubernetes 集群处获取元数据(例如 Pod IP 和服务名称),以识别相应的 IP 地址,我们还会设定 oap.envoy.als.enabled=true,用来申请一个对元数据有访问权的 ClusterRole。\nexport POD_NAME=$(kubectl get pods -A -l \u0026#34;app=skywalking,release=skywalking,component=ui\u0026#34; -o name) echo $POD_NAME kubectl -n istio-system port-forward $POD_NAME 8080:8080 现在到你的浏览器上访问 http://localhost:8080。你应该会看到 SkyWalking 的 Dashboard。 Dashboard 现在应该是空的,但稍后部署应用和生成流量后,它就会被填满。\n部署 Bookinfo 应用 运行︰\nexport ISTIO_VERSION=1.7.1 kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/platform/kube/bookinfo.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/bookinfo-gateway.yaml kubectl wait --for=condition=Ready pods --all --timeout=1200s minikube tunnel 现在到你的浏览器上进入 http://localhost/productpage。你应该会看到典型的 bookinfo 应用画面。重新整理该页面几次,以生成足够的访问日志。\n完成了! 这样做,你就成功完成设置了!再查看 SkyWalking 的 WebUI,你应该会看到 bookinfo 应用的拓扑,以及它每一个单独服务的指标数据。\n疑难解答  检查所有 pod 的状态︰kubectl get pods -A。 SkyWalking OAP 的日志︰kubectl -n istio-system logs -f $(kubectl get pod -A -l \u0026quot;app=skywalking,release=skywalking,component=oap\u0026quot; -o name)。 SkyWalking WebUI 的日志︰kubectl -n istio-system logs -f $(kubectl get pod -A -l \u0026quot;app=skywalking,release=skywalking,component=ui\u0026quot; -o name)。 确保 WebUI 右下方的时区设定在 UTC +0。  自定义服务器名称 SkyWalking 社区在 ALS 方案的 8.3.0 版本中,作出了许多改善。你现在可以在映射 IP 地址时,决定如何用 service 和 pod 变量去自定义服务器的名称。例如,将 K8S_SERVICE_NAME_RULE 设置为 ${service.metadata.name}-${pod.metadata.labels.version},就可以使服务名称带上版本的标签,类似 reviews-v1、reviews-v2 和 reviews- v3,而不再是单个服务 review[2]。\n在 VM 上使用 ALS Kubernetes 很受欢迎,可是 VM 呢?正如我们之前所说,为了替 IP 找到对应的服务,SkyWalking 需要对 Kubernetes 集群有访问权,以获得服务的元数据和 Pod 的 IP。可是在 VM 环境中,我们并没有来源去收集这些元数据。\n在下一篇文章,我们会介绍另外一个 ALS 分析器,它是建立于 Envoy 的元数据交换机制。有了这个分析器,你就可以在 VM 环境中观察服务网格了。万勿错过!\n如果你希望在 ALS 方案或是混合式网格可观察性上获得商业支持,TSB 会是一个好选项。\n额外资源\n KubeCon 2019 的录影视频。 在官方网站上获得更多有关 SkyWalking 的最新消息吧。  如有任何问题或反馈,发送邮件至 learn@tetrate.io。\nApache SkyWalking 创始人吴晟和 SkyWalking 的核心贡献者柯振旭都是 Tetrate 的工程师。 Tetrate 的内容创造者编辑与贡献于本文章。 Tetrate 帮助企业采用开源服务网格工具,包括 Istio、Envoy 和 Apache SkyWalking,让它们轻松管理微服务,在任何架构上运行服务网格,以至现代化他们的应用。\n[1]https://github.com/envoyproxy/envoy/blob/549164c42cae84b59154ca4c36009e408aa10b52/generated_api_shadow/envoy/data/accesslog/v2/accesslog.proto\n[2]https://github.com/apache/skywalking/pull/5722\n","excerpt":"如果你正在寻找在 Mixer 方案以外观察服务网格的更优解,本文正符合你的需要。\n Apache Skywalking︰特别为微服务、云原生和容器化(Docker、Kubernetes、Mesos)架 …","ref":"/zh/observe-service-mesh-with-skywalking-and-envoy-access-log-service/","title":"使用 SkyWalking 和 Envoy 访问日志服务对服务网格进行观察"},{"body":"SkyWalking 8.3.0 is released. Go to downloads page to find release tars.\nProject  Test: ElasticSearch version 7.0.0 and 7.9.3 as storage are E2E tested. Test: Bump up testcontainers version to work around the Docker bug on MacOS.  Java Agent  Support propagate the sending timestamp in MQ plugins to calculate the transfer latency in the async MQ scenarios. Support auto-tag with the fixed values propagated in the correlation context. Make HttpClient 3.x, 4.x, and HttpAsyncClient 3.x plugins to support collecting HTTP parameters. Make the Feign plugin to support Java 14 Make the okhttp3 plugin to support Java 14 Polish tracing context related codes. Add the plugin for async-http-client 2.x Fix NPE in the nutz plugin. Provide Apache Commons DBCP 2.x plugin. Add the plugin for mssql-jtds 1.x. Add the plugin for mssql-jdbc 6.x -\u0026gt; 9.x. Fix the default ignore mechanism isn\u0026rsquo;t accurate enough bug. Add the plugin for spring-kafka 1.3.x. Add the plugin for Apache CXF 3.x. Fix okhttp-3.x and async-http-client-2.x did not overwrite the old trace header.  OAP-Backend  Add the @SuperDataset annotation for BrowserErrorLog. Add the thread pool to the Kafka fetcher to increase the performance. Add contain and not contain OPS in OAL. Add Envoy ALS analyzer based on metadata exchange. Add listMetrics GraphQL query. Add group name into services of so11y and istio relevant metrics Support keeping collecting the slowly segments in the sampling mechanism. Support choose files to active the meter analyzer. Support nested class definition in the Service, ServiceInstance, Endpoint, ServiceRelation, and ServiceInstanceRelation sources. Support sideCar.internalErrorCode in the Service, ServiceInstance, Endpoint, ServiceRelation, and ServiceInstanceRelation sources. Improve Kubernetes service registry for ALS analysis. Add health checker for cluster management Support the service auto grouping. Support query service list by the group name. Improve the queryable tags generation. Remove the duplicated tags to reduce the storage payload. Fix the threads of the Kafka fetcher exit if some unexpected exceptions happen. Fix the excessive timeout period set by the kubernetes-client. Fix deadlock problem when using elasticsearch-client-7.0.0. Fix storage-jdbc isExists not set dbname. Fix searchService bug in the InfluxDB storage implementation. Fix CVE in the alarm module, when activating the dynamic configuration feature. Fix CVE in the endpoint grouping, when activating the dynamic configuration feature. Fix CVE in the uninstrumented gateways configs, when activating the dynamic configuration feature. Fix CVE in the Apdex threshold configs, when activating the dynamic configuration feature. Make the codes and doc consistent in sharding server and core server. Fix that chunked string is incorrect while the tag contains colon. Fix the incorrect dynamic configuration key bug of endpoint-name-grouping. Remove unused min date timebucket in jdbc deletehistory logical Fix \u0026ldquo;transaction too large error\u0026rdquo; when use TiDB as storage. Fix \u0026ldquo;index not found\u0026rdquo; in trace query when use ES7 storage. Add otel rules to ui template to observe Istio control plane. Remove istio mixer Support close influxdb batch write model. Check SAN in the ALS (m)TLS process.  UI  Fix incorrect label in radial chart in topology. Replace node-sass with dart-sass. Replace serviceFilter with serviceGroup Removed \u0026ldquo;Les Miserables\u0026rdquo; from radial chart in topology. Add the Promise dropdown option  Documentation  Add VNode FAQ doc. Add logic endpoint section in the agent setup doc. Adjust configuration names and system environment names of the sharing server module Tweak Istio metrics collection doc. Add otel receiver.  All issues and pull requests are here\n","excerpt":"SkyWalking 8.3.0 is released. Go to downloads page to find release tars.\nProject  Test: …","ref":"/events/release-apache-skwaylking-apm-8-3-0/","title":"Release Apache SkyWalking APM 8.3.0"},{"body":"Python 作为一门功能强大的编程语言,被广泛的应用于计算机行业之中; 在微服务系统架构盛行的今天,Python 以其丰富的软件生态和灵活的语言特性在服务端编程领域也占有重要的一席之地。 本次分享将阐述 Apache SkyWalking 在微服务架构中要解决的问题,展示如何使用 Apache SkyWalking 来近乎自动化地监控 Python 后端应用服务,并对 Apache SkyWalking 的 Python 语言探针的实现技术进行解读。\nB站视频地址\n","excerpt":"Python 作为一门功能强大的编程语言,被广泛的应用于计算机行业之中; 在微服务系统架构盛行的今天,Python 以其丰富的软件生态和灵活的语言特性在服务端编程领域也占有重要的一席之地。 本次分享将 …","ref":"/zh/2020-11-30-pycon/","title":"[视频] PyCon China 2020 - Python 微服务应用性能监控"},{"body":"SkyWalking CLI 0.5.0 is released. Go to downloads page to find release tars.\n  Features\n Use template files in yaml format instead Refactor metrics command to adopt metrics-v2 protocol Use goroutine to speed up dashboard global command Add metrics list command    Bug Fixes\n Add flags of instance, endpoint and normal for metrics command Fix the problem of unable to query database metrics    Chores\n Update release guide doc Add screenshots for use cases in README.md Introduce generated codes into codebase    ","excerpt":"SkyWalking CLI 0.5.0 is released. Go to downloads page to find release tars.\n  Features\n Use …","ref":"/events/release-apache-skywalking-cli-0-5-0/","title":"Release Apache SkyWalking CLI 0.5.0"},{"body":"","excerpt":"","ref":"/tags/satellite/","title":"Satellite"},{"body":" Author: Jiapeng Liu. Baidu. skywalking-satellite: The Sidecar Project of Apache SkyWalking Nov. 25th, 2020  A lightweight collector/sidecar which can be deployed close to the target monitored system, to collect metrics, traces, and logs. It also provides advanced features, such as local cache, format transformation, and sampling.\nDesign Thinking Satellite is a 2 level system to collect observability data from other core systems. So, the core element of the design is to guarantee data stability during Pod startup all the way to Pod shutdown avoiding alarm loss. All modules are designed as plugins, and if you have other ideas, you can add them yourself.\nSLO  Single gatherer supports \u0026gt; 1000 ops (Based 0.5 Core,50M) At least once delivery.(Optional) Data stability: 99.999%.(Optional)  Because they are influenced by the choice of plugins, some items in SLO are optional.\nRole Satellite would be running as a Sidecar. Although Daemonset mode would take up fewer resources, it will cause more troubles to the forwarding of agents. So we also want to use Sidecar mode by reducing the costs. But Daemonset mode would be also supported in the future plan.\nCore Modules The Satellite has 3 core modules which are Gatherer, Processor, and Sender.\n The Gatherer module is responsible for fetching or receiving data and pushing the data to Queue. The Processor module is responsible for reading data from the queue and processing data by a series of filter chains. The Sender module is responsible for async processing and forwarding the data to the external services in the batch mode. After sending success, Sender would also acknowledge the offset of Queue in Gatherer.  Detailed Structure The overall design is shown in detail in the figure below. We will explain the specific components one by one.\nGatherer Concepts The Gatherer has 4 components to support the data collection, which are Input, Collector, Worker, and Queue. There are 2 roles in the Worker, which are Fetcher and Receiver.\n The Input is an abstraction of the input source, which is usually mapped to a configuration file. The Collector is created by the Source, but many collectors could be created by the same Source. For example, when a log path has been configured as the /var/*.log in an Input, the number of collectors is the same as the file number in this path. The Fetcher and Receiver is the real worker to collect data. The receiver interface is an abstraction, which has multiple implementations, such as gRPC receiver and HTTP receiver.Here are some specific use cases:  Trace Receiver is a gRPC server for receiving trace data created by Skywalking agents. Log Receiver is also a gRPC server for receiving log data which is collected by Skywalking agents. (In the future we want Skywalking Agent to support log sending, and RPC-based log sending is more efficient and needs fewer resources than file reading. For example, the way of file reading will bring IO pressure and performance cost under multi-line splicing.) Log Fetcher is like Filebeat, which fits the common log collection scenario. This fetcher will have more responsibility than any other workers because it needs to record the offset and process the multi-line splicing. This feature will be implemented in the future. Prometheus Fetcher supports a new way to fetch Prometheus data and push the data to the upstream. \u0026hellip;\u0026hellip;   The Queue is a buffer module to decouple collection and transmission. In the 1st release version, we will use persistent storage to ensure data stability. But the implementation is a plug-in design that can support pure memory queues later.   The data flow We use the Trace Receiver as an example to introduce the data flow. Queue MmapQueue We have simplified the design of MmapQueue to reduce the resources cost on the memory and disk.\nConcepts There are 2 core concepts in MmapQueue.\n Segment: Segment is the real data store center, that provides large-space storage and does not reduce read and write performance as much as possible by using mmap. And we will avoid deleting files by reusing them. Meta: The purpose of meta is to find the data that the consumer needs.  Segment One MmapQueue has a directory to store the whole data. The Queue directory is made up with many segments and 1 meta file. The number of the segments would be computed by 2 params, which are the max cost of the Queue and the cost of each segment. For example, If the max cost is 512M and each segment cost is 256K, the directory can hold up to 2000 files. Once capacity is exceeded, an coverage policy is adopted that means the 2000th would override the first file.\nEach segment in Queue will be N times the size of the page cache and will be read and written in an appended sequence rather than randomly. These would improve the performance of Queue. For example, each Segment is a 128k file, as shown in the figure below.\nMeta The Meta is a mmap file that only contains 56Bit. There are 5 concepts in the Meta.\n Version: A version flag. Watermark Offset: Point to the current writing space.  ID: SegmentID Offset: The offset in Segment.   Writed Offset: Point to the latest refreshed data, that would be overridden by the write offset after period refresh.  ID: SegmentID Offset: The offset in Segment.   Reading Offset: Point to the current reading space.  ID: SegmentID Offset: The offset in Segment.   Committed Offset: Point to the latest committed offset , that is equal to the latest acked offset plus one.  ID: SegmentID Offset: The offset in Segment.    The following diagram illustrates the transformation process.\n The publisher receives data and wants to write to Queue.  The publisher would read Writing Offset to find a space and do plus one. After this, the publisher will write the data to the space.   The consumer wants to read the data from Queue.  The consumer would read Reading Offset to find the current read offset and do plus one. After this, the consumer will read the data from the space.   On period flush, the flusher would override Watermark Offset by using Writing Offset. When the ack operation is triggered, Committed Offset would plus the batch size in the ack batch. When facing crash, Writing Offset and Reading Offset would be overridden by Watermark Offset and Committed Offset. That is because the Reading Offset and Writing Offset cannot guarantee at least once delivery.  Mmap Performance Test The test is to verify the efficiency of mmap in low memory cost.\n The rate of data generation: 7.5K/item 1043 item/s (Based on Aifanfan online pod.) The test structure is based on Bigqueue because of similar structure. Test tool: Go Benchmark Test Command: go test -bench BenchmarkEnqueue -run=none -cpu=1 Result On Mac(15-inch, 2018,16 GB 2400 MHz DDR4, 2.2 GHz Intel Core i7 SSD):  BenchmarkEnqueue/ArenaSize-128KB/MessageSize-8KB/MaxMem-384KB 66501 21606 ns/op 68 B/op 1 allocs/op BenchmarkEnqueue/ArenaSize-128KB/MessageSize-8KB/MaxMem-1.25MB 72348 16649 ns/op 67 B/op 1 allocs/op BenchmarkEnqueue/ArenaSize-128KB/MessageSize-16KB/MaxMem-1.25MB 39996 33199 ns/op 103 B/op 1 allocs/op   Result On Linux(INTEL Xeon E5-2450 V2 8C 2.5GHZ2,INVENTEC PC3L-10600 16G8,INVENTEC SATA 4T 7.2K*8):  BenchmarkEnqueue/ArenaSize-128KB/MessageSize-8KB/MaxMem-384KB 126662\t12070 ns/op\t62 B/op\t1 allocs/op BenchmarkEnqueue/ArenaSize-128KB/MessageSize-8KB/MaxMem-1.25MB 127393\t12097 ns/op\t62 B/op\t1 allocs/op BenchmarkEnqueue/ArenaSize-128KB/MessageSize-16KB/MaxMem-1.25MB 63292\t23806 ns/op\t92 B/op\t1 allocs/op   Conclusion: Based on the above tests, mmap is both satisfied at the write speed and at little memory with very low consumption when running as a sidecar.  Processor The Processor has 3 core components, which are Consumer, Filter, and Context.\n The Consumer is created by the downstream Queue. The consumer has its own read offset and committed offset, which is similar to the offset concept of Spark Streaming. Due to the particularity of APM data preprocessing, Context is a unique concept in the Satellite filter chain, which supports storing the intermediate event because the intermediate state event also needs to be sent in sometimes. The Filter is the core data processing part, which is similar to the processor of beats. Due to the context, the upstream/downstream filters would be logically coupling.  Sender  BatchConverter decouples the Processor and Sender by staging the Buffer structure, providing parallelization. But if BatchBuffer is full, the downstream processors would be blocked. Follower is a real send worker that has a client, such as a gRPC client or Kafka client, and a fallback strategy. Fallback strategy is an interface, we can add more strategies to resolve the abnormal conditions, such as Instability in the network, upgrade the oap cluster. When sent success, Committed Offset in Queue would plus the number of this batch.  High Performance The scenario using Satellite is to collect a lot of APM data collection. We guarantee high performance by the following ways.\n Shorten transmission path, that means only join 2 components,which are Queue and Processor, between receiving and forwarding. High Performance Queue. MmapQueue provides a big, fast and persistent queue based on memory mapped file and ring structure. Processor maintains a linear design, that could be functional processed in one go-routine to avoid too much goroutines switching.  Stability Stability is a core point in Satellite. Stability can be considered in many ways, such as stable resources cost, stable running and crash recovery.\nStable resource cost In terms of resource cost, Memory and CPU should be a concern.\nIn the aspect of the CPU, we keep a sequence structure to avoid a large number of retries occurring when facing network congestion. And Satellite avoids keep pulling when the Queue is empty based on the offset design of Queue.\nIn the aspect of the Memory, we have guaranteed only one data caching in Satellite, that is Queue. For the queue structure, we also keep the size fixed based on the ring structure to maintain stable Memory cost. Also, MmapQueue is designed for minimizing memory consumption and providing persistence while keeping speed as fast as possible. Maybe supports some strategy to dynamically control the size of MmapQueue to process more extreme conditions in the future.\nStable running There are many cases of network congestion, such as the network problem on the host node, OAP cluster is under upgrating, and Kafka cluster is unstable. When facing the above cases, Follower would process fallback strategy and block the downstream processes. Once the failure strategy is finished, such that send success or give up this batch, the Follower would process the next batch.\nCrash Recovery The crash recovery only works when the user selects MmapQueue in Gatherer because of persistent file system design. When facing a crash, Reading Offset would be overridden by Committed Offset that ensure the at least once delivery. And Writed Offset would override Writing Offset that ensures the consumer always works properly and avoid encountering uncrossable defective data blocks.\nBuffer pool The Queue is to store fixed structure objects, object buffer pool would be efficient to reuse memory to avoid GC.\n ackChan batch convertor  Some metrics In Satellite, we should also collect its own monitoring metrics. The following metrics are necessary for Satellite.\n cpu memory go routine number gatherer_writing_offset gatherer_watermark_offset processor_reading_count sender_committed_offset sender_abandoned_count sender_retry_count  Input and Output We will reuse this diagram to explain the input and output.\n Input  Because the push-pull mode is both supported, Queue is a core component. Queue is designed to be a ring-shaped fixed capacity, that means the oldest data would be overridden by the latest data. If users find data loss, users should raise the ceiling of memory Queue. MmapQueue generally doesn\u0026rsquo;t face this problem unless the Sender transport is congested.   Ouput  If the BatchBuffer is full, the processor would be blocked. If the Channel is full, the downstream components would be blocked, such as BatchConvertor and Processor. When SenderWorker sends failure, the batch data would do a failure strategy that would block pulling data from the Channel. The strategy is a part of Sender,the operation mode is synchronous. Once the failure strategy is finished, such that send success or give up this batch, the Sendworker would keep pulling data from the Channel.    Questions How to avoid keep pulling when the Queue is empty? If Watermark Offset is less than or equal to Reading Offset, a signal would be sent to the consumer to avoid keep pulling.\nWhy reusing files in Queue? The unified model is a ring in Queue, that limits fixed resources cost in memory or disk.In Mmap Queue, reusing files turns the delete operations into an overwrite operations, effectively reducing the creation and deletion behavior in files.\nWhat are the strategies for file creation and deletion in MmapQueue? As Satellite running, the number of the files in MmapQueue would keep growing until up to the maximum capacity. After this, the old files will be overridden by the new data to avoid file deletion. When the Pod died, all resources were recycled.\n","excerpt":"Author: Jiapeng Liu. Baidu. skywalking-satellite: The Sidecar Project of Apache SkyWalking Nov. …","ref":"/blog/2020-11-25-skywalking-satellite-0.1.0-design/","title":"The first design of Satellite 0.1.0"},{"body":"SkyWalking Python 0.4.0 is released. Go to downloads page to find release tars.\n Feature: Support Kafka reporter protocol (#74) BugFix: Move generated packages into skywalking namespace to avoid conflicts (#72) BugFix: Agent cannot reconnect after server is down (#79) Test: Mitigate unsafe yaml loading (#76)  ","excerpt":"SkyWalking Python 0.4.0 is released. Go to downloads page to find release tars.\n Feature: Support …","ref":"/events/release-apache-skywalking-python-0-4-0/","title":"Release Apache SkyWalking Python 0.4.0"},{"body":"活动介绍 Apache SkyWalking 2020 开发者线下活动,社区创始人,PMC成员和Committer会亲临现场,和大家交流和分享项目中的使用经验。 以及邀请Apache Local Community 北京的成员一起分享Apache文化和Apache之道。\n日程安排 开场演讲 09:30-09:50 SkyWalking\u0026rsquo;s 2019-2020 and beyond\n吴晟,Tetrate.io创始工程师,Apache SkyWalking创始人\nB站视频地址\n 上午 09:55-10:30 贝壳全链路跟踪实践\n赵禹光,赵禹光,贝壳找房监控技术负责人,Apache SkyWalking PMC成员\n10:35-11:15 SkyWalking在百度爱番番部门实践\n刘嘉鹏,百度,SkyWalking contributor\n11:15-11:55 非计算机背景的同学如何贡献开源\n缘于一位本科在读的社会学系的同学的问题,这让我反思我们开源community的定位和Open的程度,于是,适兕从生产、分发、消费的软件供应的角度,根据涉及到的角色,然后再反观现代大学教育体系的专业,进一步对一个开源项目和community需要的专业背景多样性进行一个阐述和探究。并以ALC Beijing为例进行一个事例性的说明。\n适兕,开源布道师,ALC Beijing member,开源之道主创,开源社教育组成员。\nB站视频地址\n 下午 13:30-14:10 如何从 Apache SkyWalking 社区学习 Apache Way\n温铭,支流科技联合创始人&CEO,Apache APISIX 项目 VP, Apache SkyWalking Committer\n14:10-14:50 Apache SkyWalking 在小米公司的应用\n宋振东,小米公司小米信息技术部 skywalking 研发负责人\n14:50-15:30 Istio全生命周期监控\n高洪涛,Tetrate.io创始工程师,Apache SkyWalking PMC成员\n15:30-15:45 茶歇\n15:45-16:25 针对HikariCP数据库连接池的监控\n张鑫 Apache SkyWalking PMC 成员\n16:25-17:00 SkyWalking 与 Nginx 的优化实践\n王院生 深圳支流科技创始人兼 CTO,Apache APISIX 创始人 \u0026amp; PMC成员\nB站视频地址\n","excerpt":"活动介绍 Apache SkyWalking 2020 开发者线下活动,社区创始人,PMC成员和Committer会亲临现场,和大家交流和分享项目中的使用经验。 以及邀请Apache Local …","ref":"/zh/2020-11-23-devcon/","title":"[视频] SkyWalking DevCon 2020"},{"body":"The APM system provides the tracing or metrics for distributed systems or microservice architectures. Back to APM themselves, they always need backend storage to store the necessary massive data. What are the features required for backend storage? Simple, fewer dependencies, widely used query language, and the efficiency could be into your consideration. Based on that, traditional SQL databases (like MySQL) or NoSQL databases would be better choices. However, this topic will present another backend storage solution for the APM system viewing from NewSQL. Taking Apache Skywalking for instance, this talking will share how to make use of Apache ShardingSphere, a distributed database middleware ecosystem to extend the APM system\u0026rsquo;s storage capability.\nAs a senior DBA worked at JD.com, the responsibility is to develop the distributed database and middleware, and the automated management platform for database clusters. As a PMC of Apache ShardingSphere, I am willing to contribute to the OS community and explore the area of distributed databases and NewSQL.\n  ","excerpt":"The APM system provides the tracing or metrics for distributed systems or microservice …","ref":"/blog/2020-11-21-apachecon-obs-shardingsphere/","title":"[Video] Another backend storage solution for the APM system"},{"body":"Apache APISIX is a cloud-native microservices API gateway, delivering the ultimate performance, security, open-source and scalable platform for all your APIs and microservices. Apache SkyWalking: an APM(application performance monitor) system, especially designed for microservices, cloud-native and container-based (Docker, Kubernetes, Mesos) architectures. Through the powerful plug-in mechanism of Apache APISIX, Apache Skywalking is quickly supported, so that we can see the complete life cycle of requests from the edge to the internal service. Monitor and manage each request in a visual way, and improve the observability of the service.\n  ","excerpt":"Apache APISIX is a cloud-native microservices API gateway, delivering the ultimate performance, …","ref":"/blog/2020-11-21-apachecon-obs-apisix/","title":"[Video] Improve Apache APISIX observability with Apache SkyWalking"},{"body":"Today\u0026rsquo;s monitoring solutions are geared towards operational tasks, displaying behavior as time-series graphs inside dashboards and other abstractions. These abstractions are immensely useful but are largely designed for software operators, whose responsibilities require them to think in systems, rather than the underlying source code. This is problematic given that an ongoing trend of software development is the blurring boundaries between building and operating software. This trend makes it increasingly necessary for programming environments to not just support development-centric activities, but operation-centric activities as well. Such is the goal of the feedback-driven development approach. By combining IDE and APM technology, software developers can intuitively explore multiple dimensions of their software simultaneously with continuous feedback about their software from inception to production.\nBrandon Fergerson is an open-source software developer who does not regard himself as a specialist in the field of programming, but rather as someone who is a devoted admirer. He discovered the beauty of programming at a young age and views programming as an art and those who do it well to be artists. He has an affinity towards getting meta and combining that with admiration of programming, has found source code analysis to be exceptionally interesting. Lately, his primary focus involves researching and building AI-based pair programming technology.\n  ","excerpt":"Today\u0026rsquo;s monitoring solutions are geared towards operational tasks, displaying behavior as …","ref":"/blog/2020-11-21-apachecon-obs-sourcemarker/","title":"[Video] SourceMarker - Continuous Feedback for Developers"},{"body":"Over the past few years, and coupled with the growing adoption of microservices, distributed tracing has emerged as one of the most commonly used monitoring and troubleshooting methodologies. New tracing tools are increasingly being introduced, driving adoption even further. One of these tools is Apache SkyWalking, a popular open-source tracing, and APM platform. This talk explores the history of the SkyWalking storage module, shows the evolution of distributed tracing storage layers, from the traditional relational database to document-based search engine. I hope that this talk contributes to the understanding of history and also that it helps to clarify the different types of storage that are available to organizations today.\nHongtao Gao is the engineer of tetrate.io and the former Huawei Cloud expert. One of PMC members of Apache SkyWalking and participates in some popular open-source projects such as Apache ShardingSphere and Elastic-Job. He has an in-depth understanding of distributed databases, container scheduling, microservices, ServicMesh, and other technologies.\n  ","excerpt":"Over the past few years, and coupled with the growing adoption of microservices, distributed tracing …","ref":"/blog/2020-11-21-apachecon-obs-storage/","title":"[Video] The history of distributed tracing storage"},{"body":" 作者: 赵禹光 原文链接: 亲临百人盛况的Apache SkyWalking 2020 DevCon,看见了什么? 2020 年 10 月 29 日  活动现场 2020年11月14日Apache SkyWalking 2020 DevCon由贝壳找房和tetrate赞助,Apache SkyWalking、云原生、Apache APISIX、Apache Pulsar 和 ALC Beijing 五大社区合作,在贝壳找房一年级会议室盛大举行,本次活动主要面对Apache SkyWalking的使用者、开发者和潜在用户。线上线下共有230多人报名。经统计,实际参加活动人数超过130人,近60%的人愿意抽出自己的休息时间,来交流学习Apache SkyWalking和开源文化。不难看见,在可预见的未来,中国的开源项目很快将进入下一个维度,那必定是更广的社区人员参与,更高技术知识体现,更强的线上稳定性和及时修复能力。\n活动历程: 09:30-09:50 SkyWalking\u0026rsquo;s 2019-2020 and beyond 吴晟老师本次分享:回顾2020年度SkyWalking发布的重要的新特性,出版的《Apache SkyWalking实战》图书,社区的进展,开源爱好者如何参与SkyWalking建设,和已知社区在主导的SkyWalking2021年孵化中的新特性。\n09:55-10:30 贝壳全链路跟踪实践 赵禹光老师(作者)本次分享:回顾了贝壳找房2018年至今,贝壳找房的全链路跟踪项目与SkyWalking的渊源,分享了SkyWalking在实践中遇到的问题,和解决方案。以及SkyWalking近10%的Committer都曾经或正在贝壳人店平台签中研发部,工作过的趣事。\n10:35-11:15 刘嘉鹏老师分享 SkyWalking在百度爱番番部门实践 刘嘉鹏老师本次分享:回顾了百度爱番番部门在使用SkyWalking的发展历程\u0026amp;现状,CRM SAAS产品在近1年使用SkyWalking实践经验,以及如何参与SkyWalking的贡献,并成为的Apache Committer。\n11:15-11:55 适兕老师分享 非计算机背景的同学如何贡献开源 适兕是国内很有名的开源布道师,本次分享从生产、分发、消费的软件供应的角度,根据涉及到的角色,然后再反观现代大学教育体系的专业,进一步对一个开源项目和community需要的专业背景多样性进行一个阐述和探究。并以ALC Beijing为例进行一个事例性的说明,非计算机背景的同学如何贡献开源。\n13:30-14:10 如何从 Apache SkyWalking 社区学习 Apache Way 14:10-14:50 Apache SkyWalking 在小米公司的应用 宋振东老师是小米信息技术部分布式链路追踪系统研发负责人,分别以小米公司,业务开发、架构师、SRE、Leader和QA等多个视角,回顾了SkyWalking在小米公司的应用实践。从APM的产品选型到实际落地,对其他公司准备使用SkyWalking落地,非常有借鉴意义。\n14:50-15:30 Istio全生命周期监控 高洪涛老师本次分享了SkyWalking和可观测云原生等非常前沿的知识布道,其中有,云原生在Logging、Metrics和Tracing的相关知识,Istio,K8S等方面的实践。对一些公司在前沿技术的落地,非常有借鉴意义。\n15:45-16:25 针对HikariCP数据库连接池的监控 张鑫老师本次分享了,以一个SkyWalking无法Tracing的实际线上故障的故事出发,讲述如何定位,和补充SkyWalking插件的不足,并将最后的实践贡献到社区。对大家参与开源很有帮助。\n16:25-17:00 SkyWalking 与 Nginx 的优化实践 王院生老师本次分享SkyWalking社区和APISIX社区合作,在Nginx插件的实践过程,对社区之间的如何开展合作,非常有借鉴意义,院生老师的工作\u0026amp;开源态度,很好的诠释Geek精神,也是我们互联网从业者需要学习恪守的。\nApache SkyWalking 2020 DevCon 讲师PPT Apache SkyWalking 2020 DevCon 讲师 PPT\nSkyWalking 后续发展计划 正如吴晟老师所说:No plan, open to the community,Apache SkyWalking是没有RoadMap。社区的后续发展,依赖于每个人在社区的贡献。与其期待,不如大胆设想,将自己的设计按照Apache Way贡献到SkyWalking,你就是下一个Apache SkyWalking Commiter,加入Member of SkyWalking大家庭,让社区因为你,而更加有活力。\n","excerpt":"作者: 赵禹光 原文链接: 亲临百人盛况的Apache SkyWalking 2020 DevCon,看见了什么? 2020 年 10 月 29 日  活动现场 2020年11月14日Apache …","ref":"/zh/2020-11-21-what-do-we-see-at-the-apache-skywalking-2020-devcon-event/","title":"亲临百人盛况的Apache SkyWalking 2020 DevCon,看见了什么?"},{"body":"Sheng Wu is a founding engineer at tetrate.io, leads the observability for service mesh and hybrid cloud. A searcher, evangelist, and developer in the observability, distributed tracing, and APM. He is a member of the Apache Software Foundation. Love open source software and culture. Created the Apache SkyWalking project and being its VP and PMC member. Co-founder and PMC member of Apache ShardingSphere. Also as a PMC member of Apache Incubator and APISIX. He is awarded as Microsoft MVP, Alibaba Cloud MVP, Tencent Cloud TVP.\nIn the Apache FY2020 report, China is on the top of the download statistics. More China initiated projects joined the incubator, and some of them graduated as the Apache TLP. Sheng joined the Apache community since 2017, in the past 3 years, he witnessed the growth of the open-source culture and Apache way in China. Many developers have joined the ASF as new contributors, committers, foundation members. Chinese enterprises and companies paid more attention to open source contributions, rather than simply using the project like before. In the keynote, he would share the progress about China embracing the Apache culture, and willing of enhancing the whole Apache community.\n  ","excerpt":"Sheng Wu is a founding engineer at tetrate.io, leads the observability for service mesh and hybrid …","ref":"/blog/2020-11-21-apachecon-keynote/","title":"[Video] Apache grows in China"},{"body":"SkyWalking Client JS 0.2.0 is released. Go to downloads page to find release tars.\n Bug Fixes  Fixed a bug in sslTime calculate. Fixed a bug in server response status judgment.    ","excerpt":"SkyWalking Client JS 0.2.0 is released. Go to downloads page to find release tars.\n Bug Fixes  Fixed …","ref":"/events/release-apache-skywalking-client-js-0-2-0/","title":"Release Apache SkyWalking Client JS 0.2.0"},{"body":"SkyWalking Cloud on Kubernetes 0.1.0 is released. Go to downloads page to find release tars.\n Add OAPServer CRDs and controller.  ","excerpt":"SkyWalking Cloud on Kubernetes 0.1.0 is released. Go to downloads page to find release tars.\n Add …","ref":"/events/release-apache-skywalking-cloud-on-kubernetes-0.1.0/","title":"Release Apache SkyWalking Cloud on Kubernetes 0.1.0"},{"body":"Based on his continuous contributions, Jiapeng Liu (a.k.a evanljp) has been voted as a new committer.\n","excerpt":"Based on his continuous contributions, Jiapeng Liu (a.k.a evanljp) has been voted as a new …","ref":"/events/welcome-jiapeng-liu-as-new-committer/","title":"Welcome Jiapeng Liu as new committer"},{"body":"SkyWalking Kubernetes Helm Chart 4.0.0 is released. Go to downloads page to find release tars.\n Allow overriding configurations files under /skywalking/config. Unify the usages of different SkyWalking versions. Add Values for init container in case of using private regestry. Add services, endpoints resources in ClusterRole.  ","excerpt":"SkyWalking Kubernetes Helm Chart 4.0.0 is released. Go to downloads page to find release tars. …","ref":"/events/release-apache-skywalking-kubernetes-helm-chart-4.0.0/","title":"Release Apache SkyWalking Kubernetes Helm Chart 4.0.0"},{"body":"SkyWalking Client JS 0.1.0 is released. Go to downloads page to find release tars.\n Support Browser Side Monitoring. Require SkyWalking APM 8.2+.  ","excerpt":"SkyWalking Client JS 0.1.0 is released. Go to downloads page to find release tars.\n Support Browser …","ref":"/events/release-apache-skywalking-client-js-0-1-0/","title":"Release Apache SkyWalking Client JS 0.1.0"},{"body":"","excerpt":"","ref":"/tags/browser/","title":"Browser"},{"body":" Author: Zhenxu Ke, Sheng Wu, Hongtao Gao, and Tevah Platt. tetrate.io Original link, Tetrate.io blog Oct. 29th, 2020  Apache SkyWalking, the observability platform, and open-source application performance monitor (APM) project, today announced the general availability of its 8.2 release. The release extends Apache SkyWalking’s functionalities and monitoring boundary to the browser side.\nBackground SkyWalking is an observability platform and APM tool that works with or without a service mesh, providing automatic instrumentation for microservices, cloud-native and container-based applications. The top-level Apache project is supported by a global community and is used by Alibaba, Huawei, Tencent, Baidu, ByteDance, and scores of others.\nBrowser side monitoring APM helps SRE and Engineering teams to diagnose system failures, or optimize the systems before they become intolerably slow. But is it enough to always make the users happy?\nIn 8.2.0, SkyWalking extends its monitoring boundary to the browser side, e.g., Chrome, or the network between Chrome and the backend service, or the codes running in the browser. With this, not only can we monitor the backend services and requests sent by the browser as usual, but also the front end rendering speed, error logs, etc., which are the most efficient metrics for capturing the experiences of our end users. (This does not currently extend to IoT devices, but this feature moves SkyWalking a step in that direction).\nWhat\u0026rsquo;s more, SkyWalking browser monitoring also provides data about how the users use products, such as PV(page views), UV(unique visitors), top N PV(page views), etc., which can give a product team clues for optimizing their products.\nQuery traces by tags In SkyWalking\u0026rsquo;s Span data model, there are many important fields that are already indexed and can be queried by the users, but for the sake of performance, querying by Span tags was not supported until now. In SkyWalking 8.2.0, we allow users to query traces by specified tags, which is extremely useful. For example, SRE engineers running tests on the product environment can tag the synthetic traffic and query by this tag later.\nMeter Analysis Language In 8.2.0, the meter system provides a functional analysis language called MAL(Meter Analysis Language) that allows users to analyze and aggregate meter data in the OAP streaming system. The result of an expression can be ingested by either the agent analyzer or OpenTelemetry/Prometheus analyzer.\nComposite Alert Rules Alerting is a good way to discover system failures in time. A common problem is that we configure too many triggers just to avoid missing any possible issue. Nobody likes to be woken up by alert messages at midnight, only to find out that the trigger is too sensitive. These kinds of alerts become noisy and don\u0026rsquo;t help at all.\nIn 8.2.0, users can now configure composite alert rules, where composite rules take multiple metrics dimensions into account. With composite alert rules, we can leverage as many metrics as needed to more accurately determine whether there’s a real problem or just an occasional glitch.\nCommon scenarios like successful rate \u0026lt; 90% but there are only 1~2 requests can now be resolved by a composite rule, such as traffic(calls per minute) \u0026gt; n \u0026amp;\u0026amp; successful rate \u0026lt; m%.\nOther Notable Enhancements  The agent toolkit exposes some APIs for users to send customizable metrics. The agent exclude_plugins allows you to exclude some plugins; mount enables you to load a new set of plugins. More than 10 new plugins have been contributed to the agent. The alert system natively supports sending alert messages to Slack, WeChat, DingTalk.  Additional Resources  Read more about the SkyWalking 8.2 release highlights. Get more SkyWalking updates on Twitter.  ","excerpt":"Author: Zhenxu Ke, Sheng Wu, Hongtao Gao, and Tevah Platt. tetrate.io Original link, Tetrate.io blog …","ref":"/blog/2020-10-29-skywalking8-2-release/","title":"Features in SkyWalking 8.2: Browser Side Monitoring; Query Traces by Tags; Meter Analysis Language"},{"body":"","excerpt":"","ref":"/zh_tags/release-blog/","title":"Release Blog"},{"body":" 作者: 柯振旭, 吴晟, 高洪涛, Tevah Platt. tetrate.io 原文链接: What\u0026rsquo;s new with Apache SkyWalking 8.2? Browser monitoring and more 2020 年 10 月 29 日  Apache SkyWalking,一个可观测性平台,也是一个开源的应用性能监视器(APM)项目,今日宣布 8.2 发行版全面可用。该发行版拓展了核心功能,并将其监控边界拓展到浏览器端。\n背景 SkyWalking 是一个观测平台和 APM 工具。它可以选择性的与 Service Mesh 协同工作,为微服务、云原生和基于容器的应用提供自动的指标。该项目是全球社区支持的 Apache 顶级项目,阿里巴巴、华为、腾讯、百度、字节跳动等许多公司都在使用。\n浏览器端监控 APM 可以帮助 SRE 和工程团队诊断系统故障,也能在系统异常缓慢之前优化它。但它是否足以让用户总是满意呢?\n在 8.2.0 版本中, SkyWalking 将它的监控边界拓展到了浏览器端,比如 Chrome ,或者 Chrome 和后端服务之间的网络。这样,我们不仅可以像以前一样监控浏览器发送给后端服务的与请求,还能看到前端的渲染速度、错误日志等信息——这些信息是获取最终用户体验的最有效指标。(目前此功能尚未拓展到物联网设备中,但这项功能使得 SkyWalking 向着这个方向前进了一步)\n此外,SkyWalking浏览器监视也提供以下数据: PV(page views,页面浏览量), UV(unique visitors,独立访客数),浏览量前 N 的页面(Top N Page Views)等。这些数据可以为产品队伍优化他们的产品提供线索。\n按标签 (tag) 查询链路数据 在 SkyWalking 的 Span 数据模型中,已经有了许多被索引并可供用户查询的重要字段。但出于性能考虑,使用 Span 标签查询链路数据的功能直到现在才正式提供。在 SkyWalking 8.2.0 中,我们允许用户查询被特定标签标记的链路,这非常有用。SRE 工程师可以在生产环境中运行测试,将其打上仿真流量的标签,并稍后通过该标签查找它。\n指标分析语言 在 8.2.0 中,仪表系统提供了一项名为MAL(Meter Analysis Language,指标分析语言)的强大分析语言。该语言允许用户在 OAP 流系统中分析并聚合(aggregate)指标数据。 表达式的结果可以被 Agent 分析器或 OpenTelemetry/Prometheus 分析器获取。\n复合警报规则 警报是及时发现系统失效的有效方式。一个常见的问题是,为了避免错过任何可能的问题,我们通常会配置过多的触发器(triggers)。没有人喜欢半夜被警报叫醒,结果只是因为触发系统太敏感。这种警报很嘈杂并毫无帮助。\n在 8.2.0 版本中,用户选择可以配置考虑了多个度量维度的复合警报规则。使用复合报警规则,我们可以根据需要添加尽可能多的指标来更精确地判断是否存在真正的问题,或者只是一个偶发的小问题。\n一些常见的情况,如 成功率 \u0026lt; 90% 但只有 1~2 个请求,现在可以通过复合规则解决,如流量(即每分钟调用数) \u0026gt; n \u0026amp;\u0026amp; 成功率 \u0026lt; m%。\n其它值得注意的功能增强  agent-toolkit SDK 公开了某些 API,供用户发送自定义指标。 Agent exclude_plgins 配置允许您排除某些插件(plugins); mount 配置使您能够加载一套新的插件。 社区贡献了超过 10 个新 Agent 插件。 报警系统原生支持发送消息到 Slack,企业微信,钉钉。  附加资源   阅读更多关于SkyWalkng 8.2 发行版重点.\n  在推特上获取更多关于 SkyWalking 的更新。\n  Apache SkyWalking DevCon 报名信息 Apache SkyWalking DevCon 2020 开始报名了。 2020 年 11 月 14 日,欢迎大家来线下参加活动和交流, 或者报名观看线上直播。\n","excerpt":"作者: 柯振旭, 吴晟, 高洪涛, Tevah Platt. tetrate.io 原文链接: What\u0026rsquo;s new with Apache SkyWalking 8.2? Browser …","ref":"/zh/2020-10-29-skywalking8-2-release/","title":"SkyWalking 8.2.0 中的新特性: 浏览器端监控; 使用标签查询; 指标分析语言"},{"body":"SkyWalking 8.2.0 is released. Go to downloads page to find release tars.\nProject  Support Browser monitoring. Add e2e test for ALS solution of service mesh observability. Support compiling(include testing) in JDK11. Support build a single module.  Java Agent  Support metrics plugin. Support slf4j logs of gRPC and Kafka(when agent uses them) into the agent log files. Add PROPERTIES_REPORT_PERIOD_FACTOR config to avoid the properties of instance cleared. Limit the size of traced SQL to avoid OOM. Support mount command to load a new set of plugins. Add plugin selector mechanism. Enhance the witness classes for MongoDB plugin. Enhance the parameter truncate mechanism of SQL plugins. Enhance the SpringMVC plugin in the reactive APIs. Enhance the SpringMVC plugin to collect HTTP headers as the span tags. Enhance the Kafka plugin, about @KafkaPollAndInvoke Enhance the configuration initialization core. Plugin could have its own plugins. Enhance Feign plugin to collect parameters. Enhance Dubbo plugin to collect parameters. Provide Thrift plugin. Provide XXL-job plugin. Provide MongoDB 4.x plugin. Provide Kafka client 2.1+ plugin. Provide WebFlux-WebClient plugin. Provide ignore-exception plugin. Provide quartz scheduler plugin. Provide ElasticJob 2.x plugin. Provide Spring @Scheduled plugin. Provide Spring-Kafka plugin. Provide HBase client plugin. Provide JSON log format. Move Spring WebFlux plugin to the optional plugin. Fix inconsistent logic bug in PrefixMatch Fix duplicate exit spans in Feign LoadBalancer mechanism. Fix the target service blocked by the Kafka reporter. Fix configurations of Kafka report don\u0026rsquo;t work. Fix rest template concurrent conflict. Fix NPE in the ActiveMQ plugin. Fix conflict between Kafka reporter and sampling plugin. Fix NPE in the log formatter. Fix span layer missing in certain cases, in the Kafka plugin. Fix error format of time in serviceTraffic update. Upgrade bytebuddy to 1.10.14  OAP-Backend  Support Nacos authentication. Support labeled meter in the meter receiver. Separate UI template into multiple files. Provide support for Envoy tracing. Envoy tracer depends on the Envoy community. Support query trace by tags. Support composite alarm rules. Support alarm messages to DingTalk. Support alarm messages to WeChat. Support alarm messages to Slack. Support SSL for Prometheus fetcher and self telemetry. Support labeled histogram in the prometheus format. Support the status of segment based on entry span or first span only. Support the error segment in the sampling mechanism. Support SSL certs of gRPC server. Support labeled metrics in the alarm rule setting. Support to query all labeled data, if no explicit label in the query condition. Add TLS parameters in the mesh analysis. Add health check for InfluxDB storage. Add super dataset concept for the traces/logs. Add separate replicas configuration for super dataset. Add IN operator in the OAL. Add != operator in the OAL. Add like operator in the OAL. Add latest function in the prometheus analysis. Add more configurations in the gRPC server. Optimize the trace query performance. Optimize the CPU usage rate calculation, at least to be 1. Optimize the length of slow SQL column in the MySQL storage. Optimize the topology query, use client side component name when no server side mapping. Add component IDs for Python component. Add component ID range for C++. Fix Slack notification setting NPE. Fix some module missing check of the module manager core. Fix authentication doesn\u0026rsquo;t work in sharing server. Fix metrics batch persistent size bug. Fix trace sampling bug. Fix CLR receiver bug. Fix end time bug in the query process. Fix Exporter INCREMENT mode is not working. Fix an error when executing startup.bat when the log directory exists Add syncBulkActions configuration to set up the batch size of the metrics persistent. Meter Analysis Language.  UI  Add browser dashboard. Add browser log query page. Support query trace by tags. Fix JVM configuration. Fix CLR configuration.  Document  Add the document about SW_NO_UPSTREAM_REAL_ADDRESS. Update ALS setup document. Add Customization Config section for plugin development.  All issues and pull requests are here\n","excerpt":"SkyWalking 8.2.0 is released. Go to downloads page to find release tars.\nProject  Support Browser …","ref":"/events/release-apache-skywalking-apm-8-2-0/","title":"Release Apache SkyWalking APM 8.2.0"},{"body":"高洪涛 美国ServiceMesh服务商tetrate创始工程师。原华为软件开发云技术专家。目前为Apache SkyWalking核心贡献者,参与该开源项目在软件开发云的商业化进程。曾任职当当网系统架构师,开源达人,曾参与Apache ShardingSphere,Elastic-Job等知名开源项目。对分布式数据库,容器调度,微服务,ServicMesh等技术有深入的了解。\n议题简介 定制化Operator模式在面向Kubernetes的云化平台建构中变得越来越流行。Apache SkyWalking社区已经开始尝试使用Operator模式去构建基于Kubernetes平台的PaaS云组件。本次分享给将会给听众带来该项目的初衷,实现与未来演进等相关内容。分享的内容包含:\n 项目动机与设计理念 核心功能展示,包含SkyWalking核心组件的发布,更新与维护。 观测ServiceMesh,包含于Istio的自动集成。 目前的工作进展和对未来的规划。  B站视频地址\n","excerpt":"高洪涛 美国ServiceMesh服务商tetrate创始工程师。原华为软件开发云技术专家。目前为Apache SkyWalking核心贡献者,参与该开源项目在软件开发云的商业化进程。曾任职当当网系统 …","ref":"/zh/2020-10-25-coscon20-swck/","title":"[视频] Apache SkyWalking Cloud on Kubernetes"},{"body":"SkyWalking LUA Nginx 0.3.0 is released. Go to downloads page to find release tars.\n Load the base64 module in utils, different ENV use different library. Add prefix skywalking, avoid conflicts with other lua libraries. Chore: only expose the method of setting random seed, it is optional. Coc: use correct code block type. CI: add upstream_status to tag http.status Add http.status  ","excerpt":"SkyWalking LUA Nginx 0.3.0 is released. Go to downloads page to find release tars.\n Load the base64 …","ref":"/events/release-apache-skywalking-lua-nginx-0.3.0/","title":"Release Apache SkyWalking LUA Nginx 0.3.0"},{"body":"SkyWalking CLI 0.4.0 is released. Go to downloads page to find release tars.\n Features  Add dashboard global command with auto-refresh Add dashboard global-metrics command Add traces search Refactor metrics thermodynamic command to adopt the new query protocol   Bug Fixes  Fix wrong golang standard time    ","excerpt":"SkyWalking CLI 0.4.0 is released. Go to downloads page to find release tars.\n Features  Add …","ref":"/events/release-apache-skywalking-cli-0-4-0/","title":"Release Apache SkyWalking CLI 0.4.0"},{"body":"Huaxi Jiang (江华禧) (a.k.a. fgksgf) mainly focuses on the SkyWalking CLI project, he had participated in the \u0026ldquo;Open Source Promotion Plan - Summer 2020\u0026rdquo; and completed the project smoothly, and won the award \u0026ldquo;Most Potential Students\u0026rdquo; that shows his great willingness to continuously contribute to our community.\nUp to date, he has submitted 26 PRs in the CLI repository, 3 PRs in the main repo, all in total include ~4000 LOC.\nAt Sep. 28th, 2020, the project management committee (PMC) passed the proposal of promoting him as a new committer. He has accepted the invitation at the same day.\nWelcome to join the committer team, Huaxi!\n","excerpt":"Huaxi Jiang (江华禧) (a.k.a. fgksgf) mainly focuses on the SkyWalking CLI project, he had participated …","ref":"/events/welcome-huaxi-jiang-as-new-committer/","title":"Welcome Huaxi Jiang (江华禧) as new committer"},{"body":"SkyWalking Python 0.3.0 is released. Go to downloads page to find release tars.\n  New plugins\n Urllib3 Plugin (#69) Elasticsearch Plugin (#64) PyMongo Plugin (#60) Rabbitmq Plugin (#53) Make plugin compatible with Django (#52)    API\n Add process propagation (#67) Add tags to decorators (#65) Add Check version of packages when install plugins (#63) Add thread propagation (#62) Add trace ignore (#59) Support snapshot context (#56) Support correlation context (#55)    Chores and tests\n Test: run multiple versions of supported libraries (#66) Chore: add pull request template for plugin (#61) Chore: add dev doc and reorganize the structure (#58) Test: update test health check (#57) Chore: add make goal to package release tar ball (#54)    ","excerpt":"SkyWalking Python 0.3.0 is released. Go to downloads page to find release tars.\n  New plugins …","ref":"/events/release-apache-skywalking-python-0-3-0/","title":"Release Apache SkyWalking Python 0.3.0"},{"body":"吴晟 吴晟,Apache 基金会会员,Apache SkyWalking 创始人、项目 VP 和 PMC 成员,Apache 孵化器 PMC 成员,Apache ShardingSphere PMC成员,Apache APISIX PMC 成员,Apache ECharts (incubating) 和Apache DolphinScheduler (incubating) 孵化器导师,Zipkin 成员和贡献者。\n分享大纲  分布式追踪兴起的背景 SkyWalking和其他分布式追踪的异同 定位问题的流程和方法 性能剖析的由来、用途和优势  听众收获 听众能够全面的了解分布式追踪的技术背景,和技术原理。以及为什么这些年,分布式追踪和基于分布式追踪的APM系统,Apache SkyWalking,得到了广泛的使用、集成,甚至云厂商的支持。同时,除了针对追踪数据,我们应该关注更多的是,如何利用其产生的监控数据,定位系统的性能问题。以及它有哪些短板,应该如何弥补。\nB站视频地址\n","excerpt":"吴晟 吴晟,Apache 基金会会员,Apache SkyWalking 创始人、项目 VP 和 PMC 成员,Apache 孵化器 PMC 成员,Apache ShardingSphere PMC成 …","ref":"/zh/2020-08-13-cloud-native-academy/","title":"[视频] 云原生学院 - 后分布式追踪时代的性能问题定位——方法级性能剖析"},{"body":"SkyWalking Chart 3.1.0 is released. Go to downloads page to find release tars.\n Support SkyWalking 8.1.0 Support enable oap dynamic configuration through k8s configmap  ","excerpt":"SkyWalking Chart 3.1.0 is released. Go to downloads page to find release tars.\n Support SkyWalking …","ref":"/events/release-apache-skywalking-chart-3-1-0-for-skywalking-8-1-0/","title":"Release Apache SkyWalking Chart 3.1.0 for SkyWalking 8.1.0"},{"body":" Author: Sheng Wu Original link, Tetrate.io blog  SkyWalking, a top-level Apache project, is the open source APM and observability analysis platform that is solving the problems of 21st-century systems that are increasingly large, distributed, and heterogenous. It\u0026rsquo;s built for the struggles system admins face today: To identify and locate needles in a haystack of interdependent services, to get apples-to-apples metrics across polyglot apps, and to get a complete and meaningful view of performance.\nSkyWalking is a holistic platform that can observe microservices on or off a mesh, and can provide consistent monitoring with a lightweight payload.\nLet\u0026rsquo;s take a look at how SkyWalking evolved to address the problem of observability at scale, and grew from a pure tracing system to a feature-rich observability platform that is now used to analyze deployments that collect tens of billions of traces per day.\nDesigning for scale When SkyWalking was first initialized back in 2015, its primary use case was monitoring the first-generation distributed core system of China Top Telecom companies, China Unicom and China Mobile. In 2013-2014, the telecom companies planned to replace their old traditional monolithic applications with a distributed system. Supporting a super-large distributed system and scaleablity were the high-priority design goals from Day one. So, what matters at scale?\nPull vs. push Pull and push modes relate to the direction of data flow. If the agent collects data and pushes them to the backend for further analysis, we call it \u0026ldquo;push\u0026rdquo; mode. Debate over pull vs. push has gone on for a long time. The key for an observability system is to minimize the cost of the agent, and to be generally suitable for different kinds of observability data.\nThe agent would send the data out a short period after it is collected. Then, we would have less concern about overloading the local cache. One typical case would be endpoint (URI of HTTP, service of gRPC) metrics. Any service could easily have hundreds, even thousands of endpoints. An APM system must have these metrics analysis capabilities.\nFurthermore, metrics aren\u0026rsquo;t the only thing in the observability landscape; traces and logs are important too. SkyWalking is designed to provide a 100% sampling rate tracing capability in the production environment. Clearly, push mode is the only solution.\nAt the same time, using push mode natively doesn\u0026rsquo;t mean SkyWalking can\u0026rsquo;t do data pulling. In recent 8.x releases, SkyWalking supports fetching data from Prometheus-instrumented services for reducing the Non-Recurring Engineering of the end users. Also, pull mode is popular in the MQ based transport, typically as a Kafka consumer. The SkyWalking agent side uses the push mode, and the OAP server uses the pull mode.\nThe conclusion: push mode is the native way, but pull mode works in some special cases too.\nMetrics analysis isn\u0026rsquo;t just mathematical calculation Metrics rely on mathematical theories and calculations. Percentile is a good measure for identifying the long tail issue, and reasonable average response time and successful rate are good SLO(s). But those are not all. Distributed tracing provides not just traces with detailed information, but high values metrics that can be analyzed.\nThe service topology map is required from Ops and SRE teams for the NOC dashboard and confirmation of system data flow. SkyWalking uses the STAM (Streaming Topology Analysis Method) to analyze topology from the traces, or based on ALS (Envoy Access Log Service) in the service mesh environment. This topology and metrics of nodes (services) and lines (service relationships) can\u0026rsquo;t be pulled from simple metrics SDKs.\nAs with fixing the limitation of endpoint metrics collection, SkyWalking needs to do endpoint dependency analysis from trace data too. Endpoint dependency analysis provides more important and specific information, including upstream and downstream. Those dependency relationships and metrics help the developer team to locate the boundaries of a performance issue, to specific code blocks.\nPre-calculation vs. query stage calculation? Query stage calculation provides flexibility. Pre-calculation, in the analysis stage, provides better and much more stable performance. Recall our design principle: SkyWalking targets a large-scale distributed system. Query stage calculation was very limited in scope, and most metrics calculations need to be pre-defined and pre-calculated. The key of supporting large datasets is reducing the size of datasets in the design level. Pre-calculation allows the original data to be merged into aggregated results downstream, to be used in a query or even for an alert check.\nTTL of metrics is another important business enabler. With the near linear performance offered by queries because of pre-calculation, with a similar query infrastructure, organizations can offer higher TTL, thereby providing extended visibility of performance.\nSpeaking of alerts, query-stage calculation also means the alerting query is required to be based on the query engine. But in this case, when the dataset increasing, the query performance could be inconsistent. The same thing happens in a different metrics query.\nCases today Today, SkyWalking is monitoring super large-scale distributed systems in many large enterprises, including Alibaba, Huawei, Tencent, Baidu, China Telecom, and various banks and insurance companies. The online service companies have more traffic than the traditional companies, like banks and telecom suppliers.\nSkyWalking is the observability platform used for a variety of use cases for distributed systems that are super-large by many measures:\n Lagou.com, an online job recruitment platform  SkyWalking is observing \u0026gt;100 services, 500+ JVM instances SkyWalking collects and analyzes 4+ billion traces per day to analyze performance data, including metrics of 300k+ endpoints and dependencies Monitoring \u0026gt;50k traffic per second in the whole cluster   Yonghui SuperMarket, online service  SkyWalking analyzes at least 10+ billion (3B) traces with metrics per day SkyWalking\u0026rsquo;s second, smaller deployment, analyzes 200+ million traces per day   Baidu, internet and AI company, Kubernetes deployment  SkyWalking collects 1T+ traces a day from 1,400+ pods of 120+ services Continues to scale out as more services are added   Beike Zhaofang(ke.com), a Chinese online property brokerage backed by Tencent Holdings and SoftBank Group  Has used SkyWalking from its very beginning, and has two members in the PMC team. Deployments collect 16+ billion traces per day   Ali Yunxiao, DevOps service on the Alibaba Cloud,  SkyWalking collects and analyzes billions of spans per day SkyWalking keeps AliCloud\u0026rsquo;s 45 services and ~300 instances stable   A department of Alibaba TMall, one of the largest business-to-consumer online retailers, spun off from Taobao  A customized version of SkyWalking monitors billions of traces per day At the same time, they are building a load testing platform based on SkyWalking\u0026rsquo;s agent tech stack, leveraging its tracing and context propagation cabilities    Conclusion SkyWalking\u0026rsquo;s approach to observability follows these principles:\n Understand the logic model: don\u0026rsquo;t treat observability as a mathematical tool. Identify dependencies first, then their metrics. Scaling should be accomplished easily and natively. Maintain consistency across different architectures, and in the performance of APM itself.  Resources  Read about the SkyWalking 8.1 release highlights. Get more SkyWalking updates on Twitter. Sign up to hear more about SkyWalking and observability from Tetrate.  ","excerpt":"Author: Sheng Wu Original link, Tetrate.io blog  SkyWalking, a top-level Apache project, is the open …","ref":"/blog/2020-08-11-observability-at-scale/","title":"Observability at Scale: SkyWalking it is"},{"body":" 作者:吴晟 翻译:董旭 金蝶医疗 原文链接:Tetrate.io blog  SkyWalking做为Apache的顶级项目,是一个开源的APM和可观测性分析平台,它解决了21世纪日益庞大、分布式和异构的系统的问题。它是为应对当前系统管理所面临的困难而构建的:就像大海捞针,SkyWalking可以在服务依赖复杂且多语言环境下,获取服务对应的指标,以及完整而有意义的性能视图。\nSkyWalking是一个非常全面的平台,无论你的微服务是否在服务网格(Service Mesh)架构下,它都可以提供高性能且一致性的监控。\n让我们来看看,SkyWalking是如何解决大规模集群的可观测性问题,并从一个纯粹的链路跟踪系统,发展成为一个每天分析百亿级跟踪数据,功能丰富的可观测性平台。\n为超大规模而生 SkyWalking的诞生,时间要追溯到2015年,当时它主要应用于监控顶级电信公司(例如:中国联通和中国移动)的第一代分布式核心系统。2013-2014年,这些电信公司计划用分布式系统取代传统的单体架构应用。从诞生那天开始,SkyWalking首要的设计目标,就是能够支持超大型分布式系统,并具有很好可扩展性。那么支撑超大规模系统要考虑什么呢?\n拉取vs推送 与数据流向息息相关的:拉取模式和推送模式。Agent(客户端)收集数据并将其推送到后端,再对数据进一步分析,我们称之为“推送”模式。究竟应该使用拉取还是推送?这个话题已经争论已久。关键因素取决于可观测性系统的目标,即:在Agent端花最小的成本,使其适配不同类型的可观测性数据。\nAgent收集数据后,可以在短时间内发送出去。这样,我们就不必担心本地缓存压力过大。举一个典型的例子,任意服务都可以轻松地拥有数百个甚至数千个端点指标(如:HTTP的URI,gRPC的服务)。那么APM系统就必须具有分析这些数量庞大指标的能力。\n此外,度量指标并不是可观测性领域中的唯一关注点,链路跟踪和日志也很重要。在生产环境下,SkyWalking为了能提供100%采样率的跟踪能力,数据推送模式是唯一可行的解决方案。\nSkyWalking即便使用了推送模式,同时也可进行数据拉取。在最近的8.x的发版本中,SkyWalking支持从已经集成Prometheus的服务中获取终端用户的数据,避免重复工程建设,减少资源浪费。另外,比较常见的是基于MQ的传输构建拉取模式,Kafka消费者就是一个比较典型的例子。SkyWalking的Agent端使用推送模式,OAP服务器端使用拉取模式。\n结论:SkyWalking的推送模式是原生方式,但拉取式模式也适用于某些特殊场景。\n度量指标分析并不仅仅是数学统计 度量指标依赖于数学理论和计算。Percentile(百分位数)是用于反映响应时间的长尾效应。服务具备合理的平均响应时间和成功率,说明服务的服务等级目标(SLO)很好。除此之外,分布式跟踪还为跟踪提供了详细的信息,以及可分析的高价值指标。\n运维团队(OPS)和系统稳定性(SRE)团队通过服务拓扑图,用来观察网络情况(当做NOC dashboard使用)、确认系统数据流。SkyWalking依靠trace(跟踪数据),使用STAM(Streaming Topology Analysis Method)方法进行分析拓扑结构。在服务网格环境下,使用ALS(Envoy Access Log Service)进行拓扑分析。节点(services)和线路(service relationships)的拓扑结构和度量指标数据,无法通过sdk轻而易举的拿到。\n为了解决端点度量指标收集的局限性,SkyWalking还要从跟踪数据中分析端点依赖关系,从而拿到链路上游、下游这些关键具体的信息。这些依赖关系和度量指标信息,有助于开发团队定位引起性能问题的边界,甚至代码块。\n预计算还是查询时计算? 相比查询时计算的灵活性,预计算可以提供更好、更稳定的性能,这在分析场景下尤为重要。回想一下我们的设计原则:SkyWalking是为了一个大规模的分布式系统而设计。查询时计算的使用范围非常有限,大多数度量计算都需要预先定义和预先计算。支持大数据集的关键是:在设计阶段,要减小数据集。预计算允许将原始数据合并到下游的聚合结果中,用于查询,甚至用于警报检查。\n使用SkyWalking的另一个重要因素是:指标的有效期,TTL(Time To Live)。由于采用了预先计算,查询提供了近似线性的高性能。这也帮助“查询系统”这类基础设施系统,提供更好的性能扩展。\n关于警报,使用查询时计算方案,也意味着警报查询需要基于查询引擎。但在这种情况下,随着数据集增加,查询性能会随之下降,其他指标查询也是一样的结果。\n目前使用案例 如今,SkyWalking在许多大型企业的超大规模分布式系统中使用,包括阿里巴巴、华为、腾讯、百度、中国通讯企业以及多家银行和保险公司。上线SkyWalking公司的流量,比银行和电信运营商这种传统公司还要大。\n在很多行业中,SkyWalking是被应用于超大型分布式系统各种场景下的一个可观测性平台:\n  拉勾网\n  SkyWalking正在观测超过100个服务,500多个JVM实例\n  SkyWalking每天收集和分析40多亿个跟踪数据,用来分析性能,其中包括30万个端点和依赖关系的指标\n  在整个群集中监控\u0026gt;50k流量/秒\n    永辉超市\n  SkyWalking每天分析至少100多亿(3B)的跟踪数据\n  其次,SkyWalking用较小的部署,每天分析2亿多个跟踪数据\n    百度\n  SkyWalking每天从1400多个pod中,从120多个服务收集1T以上的跟踪数据\n  随着更多服务的增加,规模会持续增大\n    贝壳找房(ke.com)\n  很早就使用了SkyWalking,有两名成员已经成为PMC\n  Deployments每天收集160多亿个跟踪数据\n    阿里云效\n  SkyWalking每天收集和分析数十亿个span\n  SkyWalking使阿里云的45项服务和~300个实例保持稳定\n    阿里巴巴天猫\n  SkyWalking个性化定制版,每天监控数十亿跟踪数据\n  与此同时,他们基于SkyWalking的Agent技术栈,利用其跟踪和上下文传播能力,正在构建一个全链路压测平台\n    结论 SkyWalking针对可观测性遵循以下原则:\n 理解逻辑模型:不要把可观测性当作数学统计工具。 首先确定依赖关系,然后确定它们的度量指标。 原生和方便的支撑大规模增长。 在不同的架构情况下,APM各方面表现依然保持稳定和一致。  资源  阅读SkyWalking 8.1发布亮点。 在Twitter上获取更多SkyWalking更新。 注册Tetrate以了解更多有关SkyWalking可观测性的信息。  ","excerpt":"作者:吴晟 翻译:董旭 金蝶医疗 原文链接:Tetrate.io blog  SkyWalking做为Apache的顶级项目,是一个开源的APM和可观测性分析平台,它解决了21世纪日益庞大、分布式和异 …","ref":"/zh/2020-08-11-observability-at-scale-skywalking-it-is/","title":"SkyWalking 为超大规模而生"},{"body":"","excerpt":"","ref":"/zh_tags/use-case/","title":"Use Case"},{"body":" Author: Sheng Wu, Hongtao Gao, and Tevah Platt(Tetrate) Original link, Tetrate.io blog  Apache SkyWalking, the observability platform, and open-source application performance monitor (APM) project, today announced the general availability of its 8.1 release that extends its functionalities and provides a transport layer to maintain the lightweight of the platform that observes data continuously.\nBackground SkyWalking is an observability platform and APM tool that works with or without a service mesh, providing automatic instrumentation for microservices, cloud-native and container-based applications. The top-level Apache project is supported by a global community and is used by Alibaba, Huawei, Tencent, Baidu, and scores of others.\nTransport traces For a long time, SkyWalking has used gRPC and HTTP to transport traces, metrics, and logs. They provide good performance and are quite lightweight, but people kept asking about the MQ as a transport layer because they want to keep the observability data continuously as much as possible. From SkyWalking’s perspective, the MQ based transport layer consumes more resources required in the deployment and the complexity of deployment and maintenance but brings more powerful throughput capacity between the agent and backend.\nIn 8.1.0, SkyWalking officially provides the typical MQ implementation, Kafka, to transport all observability data, including traces, metrics, logs, and profiling data. At the same time, the backend can support traditional gRPC and HTTP receivers, with the new Kafka consumer at the same time. Different users could choose the transport layer(s) according to their own requirements. Also, by referring to this implementation, the community could contribute various transport plugins for Apache Pulsar, RabbitMQ.\nAutomatic endpoint dependencies detection The 8.1 SkyWalking release offers automatic detection of endpoint dependencies. SkyWalking has long offered automatic endpoint detection, but endpoint dependencies, including upstream and downstream endpoints, are critical for Ops and SRE teams’ performance analysis. The APM system is expected to detect the relationships powered by the distributed tracing. While SkyWalking has been designed to include this important information at the beginning the latest 8.1 release offers a cool visualization about the dependency and metrics between dependent endpoints. It provides a new drill-down angle from the topology. Once you have the performance issue from the service level, you could check on instance and endpoint perspectives:\nSpringSleuth metrics detection In the Java field, the Spring ecosystem is one of the most widely used. Micrometer, the metrics API lib included in the Spring Boot 2.0, is now adopted by SkyWalking’s native meter system APIs and agent. For applications using Micrometer with the SkyWalking agent installed, all Micrometer collected metrics could then be shipped into SkyWalking OAP. With some configurations in the OAP and UI, all metrics are analyzed and visualized in the SkyWalking UI, with all other metrics detected by SkyWalking agents automatically.\nNotable enhancements The Java agent core is enhanced in this release. It could work better in the concurrency class loader case and is more compatible with another agent solution, such as Alibaba’s Arthas.\n With the logic endpoint supported, the local span can be analyzed to get metrics. One span could carry the raw data of more than one endpoint’s performance. GraphQL, InfluxDB Java Client, and Quasar fiber libs are supported to be observed automatically. Kubernetes Configmap can now for the first time be used as the dynamic configuration center– a more cloud-native solution for k8s deployment environments. OAP supports health checks, especially including the storage health status. If the storage (e.g., ElasticSearch) is not available, you could get the unhealth status with explicit reasons through the health status query. Opencensus receiver supports ingesting OpenTelemetry/OpenCensus agent metrics by meter-system.  Additional resources  Read more about the SkyWalking 8.1 release highlights. Read more about SkyWalking from Tetrate on our blog. Get more SkyWalking updates on Twitter. Sign up to hear more about SkyWalking and observability from Tetrate.  ","excerpt":"Author: Sheng Wu, Hongtao Gao, and Tevah Platt(Tetrate) Original link, Tetrate.io blog  Apache …","ref":"/blog/2020-08-03-skywalking8-1-release/","title":"Features in SkyWalking 8.1: SpringSleuth metrics, endpoint dependency detection, Kafka transport traces and metrics"},{"body":"","excerpt":"","ref":"/tags/kafka/","title":"Kafka"},{"body":"SkyWalking APM 8.1.0 is release. Go to downloads page to find release tars.\nProject  Support Kafka as an optional trace, JVM metrics, profiling snapshots and meter system data transport layer. Support Meter system, including the native metrics APIs and the Spring Sleuth adoption. Support JVM thread metrics.  Java Agent  [Core] Fix the concurrency access bug in the Concurrency ClassLoader Case. [Core] Separate the config of the plugins from the core level. [Core] Support instrumented class cached in memory or file, to be compatible with other agents, such as Arthas. Add logic endpoint concept. Could analysis any span or tags flagged by the logic endpoint. Add Spring annotation component name for UI visualization only. Add support to trace Call procedures in MySQL plugin. Support GraphQL plugin. Support Quasar fiber plugin. Support InfluxDB java client plugin. Support brpc java plugin Support ConsoleAppender in the logback v1 plugin. Enhance vert.x endpoint names. Optimize the code to prevent mongo statements from being too long. Fix WebFlux plugin concurrency access bug. Fix ShardingSphere plugins internal conflicts. Fix duplicated Spring MVC endpoint. Fix lettuce plugin sometimes trace doesn‘t show span layer. Fix @Tag returnedObject bug.  OAP-Backend  Support Jetty Server advanced configurations. Support label based filter in the prometheus fetcher and OpenCensus receiver. Support using k8s configmap as the configuration center. Support OAP health check, and storage module health check. Support sampling rate in the dynamic configuration. Add endpoint_relation_sla and endpoint_relation_percentile for endpoint relationship metrics. Add components for Python plugins, including Kafka, Tornado, Redis, Django, PyMysql. Add components for Golang SDK. Add Nacos 1.3.1 back as an optional cluster coordinator and dynamic configuration center. Enhance the metrics query for ElasticSearch implementation to increase the stability. Reduce the length of storage entity names in the self-observability for MySQL and TiDB storage. Fix labels are missing in Prometheus analysis context. Fix column length issue in MySQL/TiDB storage. Fix no data in 2nd level aggregation in self-observability. Fix searchService bug in ES implementation. Fix wrong validation of endpoint relation entity query. Fix the bug caused by the OAL debug flag. Fix endpoint dependency bug in MQ and uninstrumented proxy cases. Fix time bucket conversion issue in the InfluxDB storage implementation. Update k8s client to 8.0.0  UI  Support endpoint dependency graph. Support x-scroll of trace/profile page Fix database selector issue. Add the bar chart in the UI templates.  Document  Update the user logo wall. Add backend configuration vocabulary document. Add agent installation doc for Tomcat9 on Windows. Add istioctl ALS commands for the document. Fix TTL documentation. Add FAQ doc about thread instrumentation.  CVE  Fix fuzzy query sql injection in the MySQL/TiDB storage.  All issues and pull requests are here\n","excerpt":"SkyWalking APM 8.1.0 is release. Go to downloads page to find release tars.\nProject  Support Kafka …","ref":"/events/release-apache-skywalking-apm-8-1-0/","title":"Release Apache SkyWalking APM 8.1.0"},{"body":"","excerpt":"","ref":"/tags/spring/","title":"Spring"},{"body":"Based on his continuous contributions, Wei Hua (a.k.a alonelaval) has been voted as a new committer.\n","excerpt":"Based on his continuous contributions, Wei Hua (a.k.a alonelaval) has been voted as a new committer.","ref":"/events/welcome-wei-hua-as-new-committer/","title":"Welcome Wei Hua as new committer"},{"body":"SkyWalking Python 0.2.0 is released. Go to downloads page to find release tars.\n  Plugins:\n Kafka Plugin (#50) Tornado Plugin (#48) Redis Plugin (#44) Django Plugin (#37) PyMsql Plugin (#35) Flask plugin (#31)    API\n Add ignore_suffix Config (#40) Add missing log method and simplify test codes (#34) Add content equality of SegmentRef (#30) Validate carrier before using it (#29)    Chores and tests\n Test: print the diff list when validation failed (#46) Created venv builders for linux/windows and req flashers + use documentation (#38)    ","excerpt":"SkyWalking Python 0.2.0 is released. Go to downloads page to find release tars.\n  Plugins:\n Kafka …","ref":"/events/release-apache-skywalking-python-0-2-0/","title":"Release Apache SkyWalking Python 0.2.0"},{"body":"SkyWalking CLI 0.3.0 is released. Go to downloads page to find release tars.\n Command: health check command Command: Add trace command BugFix: Fix wrong metrics graphql path  ","excerpt":"SkyWalking CLI 0.3.0 is released. Go to downloads page to find release tars.\n Command: health check …","ref":"/events/release-apache-skywalking-cli-0-3-0/","title":"Release Apache SkyWalking CLI 0.3.0"},{"body":" Author: Srinivasan Ramaswamy, tetrate Original link, Tetrate.io blog  Asking How are you is more profound than What are your symptoms Background Recently I visited my preferred doctor. Whenever I visit, the doctor greets me with a series of light questions: How’s your day? How about the week before? Any recent trips? Did I break my cycling record? How’s your workout regimen? _Finally _he asks, “Do you have any problems?\u0026quot; On those visits when I didn\u0026rsquo;t feel ok, I would say something like, \u0026ldquo;I\u0026rsquo;m feeling dull this week, and I\u0026rsquo;m feeling more tired towards noon….\u0026quot; It\u0026rsquo;s at this point that he takes out his stethoscope, his pulse oximeter, and blood pressure apparatus. Then, if he feels he needs a more in-depth insight, he starts listing out specific tests to be made.\nWhen I asked him if the first part of the discussion was just an ice-breaker, he said, \u0026ldquo;That\u0026rsquo;s the essential part. It helps me find out how you feel, rather than what your symptoms are.\u0026quot; So, despite appearances, our opening chat about life helped him structure subsequent questions on symptoms, investigations and test results.\nOn the way back, I couldn\u0026rsquo;t stop asking myself, \u0026ldquo;Shouldn\u0026rsquo;t we be managing our mesh this way, too?\u0026quot;\nIf I strike parallels between my own health check and a health check, “tests” would be log analysis, “investigations” would be tracing, and “symptoms” would be the traditional RED (Rate, Errors and Duration) metrics. That leaves the “essential part,” which is what we are talking about here: the Wellness Factor, primarily the health of our mesh.\nHealth in the context of service mesh We can measure the performance of any observed service through RED metrics. RED metrics offer immense value in understanding the performance, reliability, and throughput of every service. Compelling visualizations of these metrics across the mesh make monitoring the entire mesh standardized and scalable. Also, setting alerts based on thresholds for each of these metrics helps to detect anomalies as and when they arise.\nTo establish the context of any service and observe them, it\u0026rsquo;s ideal to visualize the mesh as a topology.\nA topology visualization of the mesh not only allows for picking any service and watching its metrics, but also gives vital information about service dependencies and the potential impact of a given service on the mesh.\nWhile RED metrics of each service offer tremendous insights, the user is more concerned with the overall responsiveness of the mesh rather than each of these services in isolation.\nTo describe the performance of any service, right from submitting the request to receiving a completed http response, we’d be measuring the user\u0026rsquo;s perception of responsiveness. This measure of response time compared with a set threshold is called Apdex. This Apdex is an indicator of the health of a service in the mesh.\nApdex Apdex is a measure of response time considered against a set threshold**. **It is the ratio of satisfactory response times and unsatisfactory response times to total response times.\nApdex is an industry standard to measure the satisfaction of users based on the response time of applications and services. It measures how satisfied your users are with your services, as traditional metrics such as average response time could get skewed quickly.\nSatisfactory response time indicates the number of times when the roundtrip response time of a particular service was less than this threshold. Unsatisfactory response time while meaning the opposite, is further categorized as Tolerating and Frustrating. Tolerating accommodates any performance that is up to four times the threshold, and anything over that or any errors encountered is considered Frustrating. The threshold mentioned here is an ideal roundtrip performance that we expect from any service. We could even start with an organization-wide limit of say, 500ms.\nThe Apdex score is a ratio of satisfied and tolerating requests to the total requests made.\nEach satisfied request counts as one request, while each tolerating request counts as half a satisfied request.\nAn Apdex score takes values from 0 to 1, with 0 being the worst possible score indicating that users were always frustrated, and ‘1’ as the best possible score (100% of response times were Satisfactory).\nA percentage representation of this score also serves as the Health Indicator of the service.\nThe Math The actual computation of this Apdex score is achieved through the following formula.\n\tSatisfiedCount + ( ToleratingCount / 2 ) Apdex Score = ------------------------------------------------------ TotalSamples A percentage representation of this score is known as the Health Indicator of a service.\nExample Computation During a 2-minute period, a host handles 200 requests.\nThe Apdex threshold T = 0.5 seconds (500ms).\n 170 of the requests were handled within 500ms, so they are classified as Satisfied. 20 of the requests were handled between 500ms and 2 seconds (2000 ms), so they are classified as Tolerating. The remaining 10 were not handled properly or took longer than 2 seconds, so they are classified as Frustrated.  The resulting Apdex score is 0.9: (170 + (20/2))/200 = 0.9.\nThe next level At the next level, we can attempt to improve our topology visualization by coloring nodes based on their health. Also, we can include health as a part of the information we show when the user taps on a service.\nApdex specifications recommend the following Apdex Quality Ratings by classifying Apdex Score as Excellent (0.94 - 1.00), Good (0.85 - 0.93), Fair (0.70 - 0.84), Poor (0.50 - 0.69) and Unacceptable (0.00 - 0.49).\nTo visualize this, let’s look at our topology using traffic light colors, marking our nodes as Healthy, At-Risk and Unhealthy, where Unhealthy indicates health that falls below 80%. A rate between 80% and 95% indicates At-Risk, and health at 95% and above is termed Healthy.\nLet’s incorporate this coloring into our topology visualization and take its usability to the next level. If implemented, we will be looking at something like this.\nMoving further Apdex provides tremendous visibility into customer satisfaction on the responsiveness of our services. Even more, by extending the implementation to the edges calling this service we get further insight into the health of the mesh itself.\nTwo services with similar Apdex scores offer the same customer satisfaction to the customer. However, the size of traffic that flows into the service can be of immense help in prioritizing between services to address. A service with higher traffic flow is an indication that this experience is impacting a significant number of users on the mesh.\nWhile health relates to a service, we can also analyze the interactions between two services and calculate the health of the interaction. This health calculation of every interaction on the mesh helps us establish a critical path, based on the health of all interactions in the entire topology.\nIn a big mesh, showing traffic as yet another number will make it more challenging to visualize and monitor. We can, with a bit of creativity, improve the entire visualization by rendering the edges that connect services with different thickness depending on the throughput of the service.\nAn unhealthy service participating in a high throughput transaction could lead to excessive consumption of resources. On the other hand, this visualization also offers a great tip to maximize investment in tuning services.\nTuning service that is a part of a high throughput transaction offers exponential benefits when compared to tuning an occasionally used service.\nIf we look at implementing such a visualization, which includes the health of interactions and throughput of such interactions, we would be looking at something like below :\nThe day is not far These capabilities are already available to users today as one of the UI features of Tetrate’s service mesh platform, using the highly configurable and performant observability and performance management framework: Apache SkyWalking (https://skywalking.apache.org), which monitors traffic across the mesh, aggregates RED metrics for both services and their interactions, continuously computes and monitors health of the services, and enables users to configure alerts and notifications when services cross specific thresholds, thereby having a comprehensive health visibility of the mesh.\nWith such tremendous visibility into our mesh performance, the day is not far when we at our NOC (Network Operations Center) for the mesh have this topology as our HUD (Heads Up Display).\nThis HUD, with the insights and patterns gathered over time, would predict situations and proactively prompt us on potential focus areas to improve customer satisfaction.\nThe visualization with rich historical data can also empower the Network Engineers to go back in time and look at the performance of the mesh on a similar day in the past.\nAn earnest implementation of such a visualization would be something like below :\nTo conclude With all the discussion so far, the health of a mesh is more about how our users feel, and what we can proactively do as service providers to sustain, if not enhance, the experience of our users.\nAs the world advances toward personalized medicine, we\u0026rsquo;re not far from a day when my doctor will text me: \u0026ldquo;How about feasting yourself with ice cream today and take the Gray Butte Trail to Mount Shasta!\u0026rdquo; Likewise, we can do more for our customers by having better insight into their overall wellness.\nTetrate’s approach to “service mesh health” is not only to offer management, monitoring and support but to make infrastructure healthy from the start to reduce the probability of incidents. Powered by the Istio, Envoy, and SkyWalking, Tetrate\u0026rsquo;s solutions enable consistent end-to-end observability, runtime security, and traffic management for any workload in any environment.\nOur customers deserve healthy systems! Please do share your thoughts on making service mesh an exciting and robust experience for our customers.\nReferences  https://en.wikipedia.org/wiki/Apdex https://www.apdex.org/overview.html https://www.apdex.org/index.php/specifications/ https://skywalking.apache.org/  ","excerpt":"Author: Srinivasan Ramaswamy, tetrate Original link, Tetrate.io blog  Asking How are you is more …","ref":"/blog/2020-07-26-apdex-and-skywalking/","title":"The Apdex Score for Measuring Service Mesh Health"},{"body":" 作者: Srinivasan Ramaswamy, tetrate 翻译:唐昊杰,南京大学在读学生 校对:吴晟 Original link, Tetrate.io blog July. 26th, 2020  \u0026ldquo;你感觉怎么样\u0026rdquo; 比 \u0026ldquo;你的症状是什么\u0026rdquo; 更重要 背景 最近我拜访了我的医生。每次去看病,医生都会首先问我一连串轻快的问题,比如:你今天过得怎么样?上周过的怎么样?最近有什么出行吗?你打破了自己的骑车记录吗?你的锻炼计划实施如何?最后他会问:“你有什么麻烦吗?”如果这个时候我感觉自己不太好,我会说:“我这周感觉很沉闷,临近中午的时候感觉更累。”这时他就会拿出听诊器、脉搏血氧仪和血压仪。然后,如果他觉得自己需要更深入的了解情况,他就开始列出我需要做的具体检查。\n当我问他,最开始的讨论是否只是为了缓和氛围。他说:“这是必不可少的部分。它帮助我发现你感觉如何,而不是你的症状是什么。\u0026quot;。我们这样关于生活的开场聊天,帮助他组织了后续关于症状、调查和测试结果的问题。\n在回来的路上,我不停地问自己:“我们是不是也应该用这种方式管理我们的网格(service mesh)?”\n如果我把自己的健康检查和网格的健康检查进行类比,“医疗检查”就是日志分析,“调查”就是追踪,“症状”就是传统的RED指标(请求速率、请求错误和请求耗时)。那么根本的问题,就是我们在这里讨论的:健康因素(主要是网格的健康)。\n服务网格中的健康状况 我们可以通过RED指标来衡量任何被观察到的服务的性能。RED指标在了解每个服务的性能、可靠性和吞吐量方面提供了巨大的价值。这些指标在网格上的令人信服的可视化使得监控全部网格变得标准化和可扩展。此外,根据这些指标的阈值设置警报有助于在指标值异常的时候进行异常检测。\n为了建立任何服务的上下文环境并观察它们,理想的做法是将网格可视化为一个拓扑结构。\n网格的拓扑结构可视化不仅允许使用者挑选任意服务并观察其指标,还可以提供有关服务依赖和特定服务在网格上的潜在影响这些重要信息。\n虽然每个服务的RED指标为使用者提供了深刻的洞察能力,但使用者更关心网格的整体响应性,而非每个单独出来的服务的响应性。\n为了描述任意服务的性能(即从提交请求到收到完成了的http响应这段时间内的表现),我们会测量用户对响应性的感知。这种将响应时间与设定的阈值进行比较的衡量标准叫做Apdex。Apdex是衡量一个服务在网格中的健康程度的指标。\nApdex Apdex是根据设定的阈值和响应时间结合考虑的衡量标准。它是满意响应时间和不满意响应时间相对于总响应时间的比率。\nApdex是根据应用和服务的响应时间来衡量使用者满意程度的行业标准。它衡量的是用户对你的服务的满意程度,因为传统的指标(如平均响应时间)可能很快就会容易形成偏差。\n基于满意度的响应时间,表示特定服务的往返响应时间小于设定的阈值的次数。不满意响应时间虽然意思相反,但又进一步分为容忍型和失望型。容忍型包括了了任何响应时间不超过四倍阈值的表现,而任何超过四倍阈值或遇到了错误的表现都被认为是失望型。这里提到的阈值是我们对任意服务所期望的理想响应表现。我们可以设置一个全局范围的阈值,如,500ms。\nApdex得分是满意型请求和容忍型请求与做出的总请求的比率。\n每个_满意的请求_算作一个请求,而每个_容忍的请求_算作半个_满意_的请求。\n一个Apdex得分从0到1的范围内取值。0是最差的分数,表示用户总是感到失望;而'1\u0026rsquo;是最好的分数(100%的响应时间是令人满意的)。\n这个分数的百分比表示也可以用作服务的健康指标。\n数学表示 Apdex得分的实际计算是通过以下公式实现的:\n\t满意请求数 + ( 容忍请求数 / 2 ) Apdex 得分 = ------------------------------------------------------ 总请求数 此公示得到的百分率,即可视为服务的健康度。\n样例计算 在两分钟的采样时间内,主机处理200个请求。\nApdex阈值T设置为0.5秒(500ms)。\n*.\t170个请求在500ms内被处理完成,它们被分类为满意型。 *.\t20个请求在500ms和2秒间被处理,它们被分类为容忍型。 *.\t剩余的10个请求没有被正确处理或者处理时间超过了2秒,所以它们被分类为失望型。\n最终的Apdex得分是0.9,即(170 + (20 / 2))/ 200。\n深入使用 在接下来的层次,我们可以尝试通过根据节点的健康状况来着色节点以改进我们的拓扑可视化。此外,我们还可以在用户点击服务时将健康状况作为我们展示的信息的一部分。\nApdex规范推荐了以下Apdex质量评级,将Apdex得分分为优秀(0.94 - 1.00)、良好(0.85 - 0.93)、一般(0.70 - 0.84)、差(0.50 - 0.69)和不可接受(0.00 - 0.49)。\n为了可视化网格的健康状况,我们用交通灯的颜色将我们的节点标记为健康、有风险和不健康,其中不健康表示健康率低于80%。健康率在80%到95%之间的表示有风险,健康率在95%及以上的称为健康。\n让我们将这种着色融入到我们的拓扑可视化中,并将其可用性提升到一个新的水平。如果实施,我们将看到下图所示的情况。\n更进一步 Apdex为客户对我们服务响应性的满意度提供了可见性。更有甚者,通过将实施范围扩展到调用该服务的调用关系,我们可以进一步了解网格本身的健康状况。\n两个有着相似Apdex分数的服务,为客户提供了相同的客户满意度。然而,流入服务的流量大小对于优先处理哪一服务有着巨大的帮助。流量较高的服务表明这种服务体验影响了网格上更大量的使用者。\n虽然健康程度与单个服务有关,但我们也可以分析两个服务之间的交互并计算交互过程的健康程度。这种对网格上每一个交互的健康程度的计算,可以帮助我们根据整个拓扑结构中所有交互的健康程度,建立一个关键路径。\n在一个大的网格中,将流量展示为另一个数字将使可视化和监控更具挑战性。我们可以根据服务的吞吐量,通过用不同的粗细程度渲染连接服务的边来改善整个可视化的效果。\n一个位于高吞吐量事务的不健康的服务可能会导致资源的过度消耗。另一方面,这种可视化也为调整服务时获取最大化投资效果提供了一个很好的提示。\n与调整一个偶尔使用的服务相比,调整作为高吞吐量事务的一部分的那些服务会带来指数级的收益。\n实施这种包括了交互的健康状况和吞吐量的可视化,我们会看到下图所示的情况:\n这一天即将到来 目前,这些功能已经作为Tetrate服务网格平台的UI功能之一来提供给用户。该平台使用了高速可配置化、高性能的可观测性和监控性能管理平台:Apache SkyWalking (https://skywalking.apache.org),SkyWalking可以监控整个网格的流量,为服务及它们的交互合计RED指标,持续计算和监控服务的健康状况,并使用户能够在服务超过特定阈值时配置报警和通知。这些功能使得SkyWalking对网格拥有全面的健康状况可见性。\n有了这样强大的网格性能可视性,我们将可以在为网格准备的网络运营中心使用这种拓扑结构作为我们的HUD(Heads Up Display)。\nHUD随着时间的推移收集了解到的信息和模式,并将预测各种情况和主动提示我们潜在的重点领域以提高客户满意度。\n丰富的历史数据的可视化也可以使网络工程师能够看看过去中类似的一天的网格表现。\n可视化效果如下图所示。\n总结 综合到目前为止的所有讨论,网格的健康状况更多地是关于用户的感受,以及我们作为服务提供商可以采取积极行动来维持(如果不能增强)用户的体验。\n着个人化医学的发展,现在距离我的医生给我发这样短信的日子并不遥远:“要不今天享用冰淇淋并且沿着灰色小山步道到达沙斯塔山!”相似的,我们可以通过更好地了解客户的整体健康状况为他们做更多的事情。\nTetrate的“服务网格健康程度”方法不仅提供了管理,监视和支持,而且从一开始就使基础架构保持健康以减少事故发生的可能性。在Istio,Envoy和SkyWalking的支持下,Tetrate的解决方案可为任何环境中的任何工作负载提供持续的端到端可观察性,运行时安全性和流量管理。\n我们的客户应该拥有健康的系统!请分享您对使用服务网格为我们的客户带来令人兴奋和强健的体验的想法。\n引用  https://en.wikipedia.org/wiki/Apdex https://www.apdex.org/overview.html https://www.apdex.org/index.php/specifications/ https://skywalking.apache.org/  ","excerpt":"作者: Srinivasan Ramaswamy, tetrate 翻译:唐昊杰,南京大学在读学生 校对:吴晟 Original link, Tetrate.io blog July. 26th, …","ref":"/zh/2020-07-26-apdex-and-skywalking/","title":"度量服务网格健康度——Apdex得分"},{"body":"SkyWalking Python 0.1.0 is released. Go to downloads page to find release tars.\n API: agent core APIs, check the APIs and the examples Plugin: built-in libraries http, urllib.request and third-party library requests are supported. Test: agent test framework is setup, and the corresponding tests of aforementioned plugins are also added.  ","excerpt":"SkyWalking Python 0.1.0 is released. Go to downloads page to find release tars.\n API: agent core …","ref":"/events/release-apache-skywalking-python-0-1-0/","title":"Release Apache SkyWalking Python 0.1.0"},{"body":"SkyWalking Chart 3.0.0 is released. Go to downloads page to find release tars.\n Support SkyWalking 8.0.1  ","excerpt":"SkyWalking Chart 3.0.0 is released. Go to downloads page to find release tars.\n Support SkyWalking …","ref":"/events/release-apache-skywalking-chart-3-0-0-for-skywalking-8-0-1/","title":"Release Apache SkyWalking Chart 3.0.0 for SkyWalking 8.0.1"},{"body":"Apache SkyWalking 8.0.1 已发布。SkyWalking 是观察性分析平台和应用性能管理系统,提供分布式追踪、服务网格遥测分析、度量聚合和可视化一体化解决方案,支持 Java, .Net Core, PHP, NodeJS, Golang, LUA 语言探针,支持 Envoy + Istio 构建的 Service Mesh。\n与 8.0.0 相比,此版本包含一个热修复程序。\nOAP-Backend\n 修复 no-init 模式在 Elasticsearch 存储中无法运行的错误  8.0.0 值得关注的变化:\n 添加并实现了 v3 协议,旧版本与 8.x 不兼容 移除服务、实例、端点注册机制和 inventory 存储实体 (inventory storage entities) 提供新的 GraphQL 查询协议,同时支持旧协议(计划在今年年底移除) 支持 Prometheus 网络协议,可将 Prometheus 格式的指标传输到 SkyWalking 中 提供 Python agent 移除所有 inventory 缓存 提供 Apache ShardingSphere (4.0.0, 4.1.1) agent 插件 UI dashboard 100% 可配置,可采用后台定义的新指标 修复 H2/MySQL 实现中的 SQL 注入漏洞 Upgrade Nacos to avoid the FastJson CVE in high frequency. 升级 Nacos 以避免 FastJson CVE 升级 jasckson-databind 至 2.9.10  下载地址:http://skywalking.apache.org/downloads/\n","excerpt":"Apache SkyWalking 8.0.1 已发布。SkyWalking 是观察性分析平台和应用性能管理系统,提供分布式追踪、服务网格遥测分析、度量聚合和可视化一体化解决方案,支持 Java, …","ref":"/zh/2020-06-21-skywalking8-0-1-release/","title":"Apache SkyWalking 8.0.1 发布"},{"body":"SkyWalking Nginx LUA 0.2.0 is release. Go to downloads page to find release tars.\n Adapt the new v3 protocol. Implement correlation protocol. Support batch segment report.  ","excerpt":"SkyWalking Nginx LUA 0.2.0 is release. Go to downloads page to find release tars.\n Adapt the new v3 …","ref":"/events/release-apache-skywalking-nginx-lua-0-2-0/","title":"Relase Apache SkyWalking Nginx LUA 0.2.0"},{"body":"SkyWalking APM 8.0.0 is release. Go to downloads page to find release tars.\nProject  v3 protocol is added and implemented. All previous releases are incompatible with 8.x releases. Service, Instance, Endpoint register mechanism and inventory storage entities are removed. New GraphQL query protocol is provided, the legacy procotol is still supported(plan to remove at the end of this year). Support Prometheus network protocol. Metrics in Prometheus format could be transferred into SkyWalking. Python agent provided. All inventory caches have been removed. Apache ShardingSphere(4.1.0, 4.1.1) agent plugin provided.  Java Agent  Add MariaDB plugin. Vert.x plugin enhancement. More cases are covered. Support v3 extension header. Fix ElasticSearch 5.x plugin TransportClient error. Support Correlation protocol v1. Fix Finagle plugin bug, in processing Noop Span. Make CommandService daemon to avoid blocking target application shutting down gracefully. Refactor spring cloud gateway plugin and support tracing spring cloud gateway 2.2.x  OAP-Backend  Support meter system for Prometheus adoption. In future releases, we will add native meter APIs and MicroMeter(Sleuth) system. Support endpoint grouping. Add SuperDataSet annotation for storage entity. Add superDatasetIndexShardsFactor in the ElasticSearch storage, to provide more shards for @SuperDataSet annotated entites. Typically TraceSegment. Support alarm settings for relationship of service, instance, and endpoint level metrics. Support alarm settings for database(conjecture node in tracing scenario). Data Model could be added in the runtime, don\u0026rsquo;t depend on the bootstrap sequence anymore. Reduce the memory cost, due to no inventory caches. No buffer files in tracing and service mesh cases. New ReadWriteSafe cache implementation. Simplify codes. Provide default way for metrics query, even the metrics doesn\u0026rsquo;t exist. New GraphQL query protocol is provided. Support the metrics type query. Set up length rule of service, instance, and endpoint. Adjust the default jks for ElasticSearch to empty. Fix Apdex function integer overflow issue. Fix profile storage issue. Fix TTL issue. Fix H2 column type bug. Add JRE 8-14 test for the backend.  UI  UI dashboard is 100% configurable to adopt new metrics definited in the backend.  Document  Add v8 upgrade document. Make the coverage accurate including UT and e2e tests. Add miss doc about collecting parameters in the profiled traces.  CVE  Fix SQL Injection vulnerability in H2/MySQL implementation. Upgrade Nacos to avoid the FastJson CVE in high frequency. Upgrade jasckson-databind to 2.9.10.  All issues and pull requests are here\n","excerpt":"SkyWalking APM 8.0.0 is release. Go to downloads page to find release tars.\nProject  v3 protocol is …","ref":"/events/release-apache-skywalking-apm-8-0-0/","title":"Release Apache SkyWalking APM 8.0.0"},{"body":"可观察性平台和开源应用程序性能监控(APM)项目 Apache SkyWalking,今天刚宣布 8.0 的发布版本。素以强劲指标、追踪与服务网格能力见称的 SkyWalking ,在最新版本中的功能性延展到用户渴求已久的功能 —— 将指标功能和包括 Prometheus 的其他指标收集系统进行了融合。\n什么是 Apache SkyWalking? SkyWalking 是可观察性平台和 APM 工具,可以选择是否搭载服务网格的使用,为微服务、云原生和容器化应用提供自动度量功能。顶尖的 Apache 项目由来自世界各地的社区人员支持,应用在阿里巴巴、华为、腾讯、百度和大量其他企业。SkyWalking 提供记录、监控和追踪功能,同时也得力于其架构而拥有数据收集终端、分析平台,还有用户界面。\n值得关注的优化包括:  用户界面 Dashboard 上提供百分百的自由度,用户可以任意进行配置,采用后台新定义的指标。 支持 Prometheus 导出格式。Prometheus 格式的指标可以转换至 SkyWalking。 SkyWalking 现已可以自主监控服务网格,为 Istio 和 Envoy 提供指标。 服务、实例、终端地址的注册机制,和库存存储实体已经被移除了。  无须修改原始码的前提下,为用户界面加入新的指标 对于 SkyWalking 的用户,8.0 版本的亮点将会是数据模型的更新,而且传播格式也针对更多语言进行优化。再加上引进了新的 MeterSystem ,除了可以同步运行传统追踪模式,用户还可自定义需要收集的指标。追踪和服务网格专注在拓扑和服务流量的指标上,而 MeterSystem 则汇报用户感兴趣的业务指标,例如是数据库存取性能、圣诞节期间的下单率,或者用户注册或下单的百分比。这些指标数据会在 SkyWalking 的用户界面 Dashboard 上以图像显示。指标的面板数据和拓扑图可以通过 Envoy 的指标绘制,而追踪分析也可以支持 Istio 的遥测。Dashboard 还支持以 JSON 格式导入、导出,而 Dashboard 上的自定义指标也支持设定指标名称、实体种类(服务、实例、终端地址或全部)、标记值等。用户界面模板上已详细描述了用户界面的逻辑和原型配置,以及它的 Dashboard、tab 和组件。\n观察任何配备了 Prometheus 的应用 在这次最新的社区发布中,SkyWalking 可以观察任何配备了 Prometheus 或者提供了 Prometheus 终端地址的应用。这项更新为很多想采用 SkyWalking 指标和追踪的用户节省了不少时间,现在你不再需要重新设置指标工具,就可以获得 Prometheus 数据。因为 Prometheus 更简单、更为人熟悉,是不少用户的不二选择。有了 8.0 版本,Prometheus 网络协议就能够读取所有已设定在 API 上的数据,另外 Prometheus 格式的指标也可转换至 SkyWalking 上。如此一来,通过图像方式展示,所有的指标和拓扑都能一目了然。同时,也支持 Prometheus 的 fetcher。\n监控你的网格 SkyWalking 现在不再只是监控服务或平台,而是监控整个网格。有了 8.0 版本,你除了能获取关于你的网格的指标(包括 Istio 和 Envoy 在内),同时也能通过 SkyWalking 监控自身的性能。因为当监控服务在观察业务集群的同时,它也能实现自我观察,确保运维团队拥有稳定可靠的平台。\n性能优化 最后,8.0 发布移除了注册机制,也不再需要使用独一无二的整数来代表实体。这项改变将大幅优化性能。想了解完整的更新功能列表,可以阅读在 SkyWalking 社区发布的公告页面。\n额外资源  追踪 Twitter 获取更多 SkyWalking 最新资讯 SkyWalking 未来的发布会加入原生指标 API 和融合 Micrometer (Sleuth) 指标集合。  ","excerpt":"可观察性平台和开源应用程序性能监控(APM)项目 Apache SkyWalking,今天刚宣布 8.0 的发布版本。素以强劲指标、追踪与服务网格能力见称的 SkyWalking ,在最新版本中的功能 …","ref":"/zh/whats-new-in-skywalking-metersystem-and-mesh-monitoring-in-8-0/","title":"SkyWalking 的最新动向?8.0 版本的 MeterSystem 和网格监控"},{"body":"作者:宋净超、张伟\n日前,云原生网络代理 MOSN v0.12.0 发布,观察性分析平台和应用性能管理系统 SkyWalking 完成了与 MOSN 的集成,作为 MOSN 中的支持的分布式追踪系统之一,旨在实现在微服务和 Service Mesh 中的更强大的可观察性。\n背景 相比传统的巨石(Monolith)应用,微服务的一个主要变化是将应用中的不同模块拆分为了独立的进程。在微服务架构下,原来进程内的方法调用成为了跨进程的远程方法调用。相对于单一进程内的方法调用而言,跨进程调用的调试和故障分析是非常困难的,难以使用传统的代码调试程序或者日志打印来对分布式的调用过程进行查看和分析。\n如上图右边所示,微服务架构中系统中各个微服务之间存在复杂的调用关系。\n一个来自客户端的请求在其业务处理过程中经过了多个微服务进程。我们如果想要对该请求的端到端调用过程进行完整的分析,则必须将该请求经过的所有进程的相关信息都收集起来并关联在一起,这就是“分布式追踪”。\n以上关于分布式追踪的介绍引用自 Istio Handbook。\nMOSN 中 tracing 的架构 MOSN 的 tracing 框架由 Driver、Tracer 和 Span 三个部分组成。\nDriver 是 Tracer 的容器,管理注册的 Tracer 实例,Tracer 是 tracing 的入口,根据请求信息创建一个 Span,Span 存储当前跨度的链路信息。\n目前 MOSN tracing 有 SOFATracer 和 SkyWalking 两种实现。SOFATracer 支持 http1 和 xprotocol 协议的链路追踪,将 trace 数据写入本地日志文件中。SkyWalking 支持 http1 协议的链路追踪,使用原生的 Go 语言探针 go2sky 将 trace 数据通过 gRPC 上报到 SkyWalking 后端服务。\n快速开始 下面将使用 Docker 和 docker-compose 来快速开始运行一个集成了 SkyWalking 的分布式追踪示例,该示例代码请见 MOSN GitHub。\n准备 安装 docker 和 docker-compose。\n  安装 docker\n  安装 docker-compose\n  需要一个编译好的 MOSN 程序,您可以下载 MOSN 源码自行编译,或者直接下载 MOSN v0.12.0 发行版以获取 MOSN 的运行时二进制文件。\n下面将以源码编译的方式演示 MOSN 如何与 SkyWalking 集成。\ncd ${projectpath}/cmd/mosn/main go build 获取示例代码目录。\n${targetpath} = ${projectpath}/examples/codes/trace/skywalking/http/ 将编译好的程序移动到示例代码目录。\nmv main ${targetpath}/ cd ${targetpath} 目录结构 下面是 SkyWalking 的目录结构。\n* skywalking └─── http │ main # 编译完成的 MOSN 程序 | server.go # 模拟的 Http Server | clint.go # 模拟的 Http Client | config.json # MOSN 配置 | skywalking-docker-compose.yaml # skywalking docker-compose 运行说明 启动 SkyWalking oap \u0026amp; ui。\ndocker-compose -f skywalking-docker-compose.yaml up -d 启动一个 HTTP Server。\ngo run server.go 启动 MOSN。\n./main start -c config.json 启动一个 HTTP Client。\ngo run client.go 打开 http://127.0.0.1:8080 查看 SkyWalking-UI,SkyWalking Dashboard 界面如下图所示。\n在打开 Dashboard 后请点击右上角的 Auto 按钮以使页面自动刷新。\nDemo 视频 下面来看一下该 Demo 的操作视频。\n\n清理 要想销毁 SkyWalking 后台运行的 docker 容器只需要下面的命令。\ncd ${projectpath}/examples/codes/trace/skywalking/http/ docker-compose -f skywalking-docker-compose.yaml down 未来计划 在今年五月份,SkyWalking 8.0 版本会进行一次全面升级,采用新的探针协议和分析逻辑,探针将更具互感知能力,更好的在 Service Mesh 下使用探针进行监控。同时,SkyWalking 将开放之前仅存在于内核中的 metrics 指标分析体系。Prmoetheus、Spring Cloud Sleuth、Zabbix 等常用的 metrics 监控方式,都会被统一的接入进来,进行分析。此外, SkyWalking 与 MOSN 社区将继续合作:支持追踪 Dubbo 和 SOFARPC,同时适配 sidecar 模式下的链路追踪。\n关于 MOSN MOSN 是一款使用 Go 语言开发的网络代理软件,由蚂蚁金服开源并经过几十万容器的生产级验证。 MOSN 作为云原生的网络数据平面,旨在为服务提供多协议、模块化、智能化、安全的代理能力。 MOSN 是 Modular Open Smart Network 的简称。 MOSN 可以与任何支持 xDS API 的 Service Mesh 集成,亦可以作为独立的四、七层负载均衡,API Gateway、云原生 Ingress 等使用。\n GitHub:https://github.com/mosn/mosn 官网:https://mosn.io  关于 Skywalking SkyWalking 是观察性分析平台和应用性能管理系统。提供分布式追踪、服务网格遥测分析、度量聚合和可视化一体化解决方案。支持 Java、.Net Core、PHP、NodeJS、Golang、LUA 语言探针,支持 Envoy/MOSN + Istio 构建的 Service Mesh。\n GitHub:https://github.com/apache/skywalking 官网:https://skywalking.apache.org  关于本文中的示例请参考 MOSN GitHub 和 MOSN 官方文档。\n","excerpt":"作者:宋净超、张伟\n日前,云原生网络代理 MOSN v0.12.0 发布,观察性分析平台和应用性能管理系统 SkyWalking 完成了与 MOSN 的集成,作为 MOSN 中的支持的分布式追踪系统之 …","ref":"/zh/2020-04-28-skywalking-and-mosn/","title":"SkyWalking 支持云原生网络代理 MOSN 做分布式追踪"},{"body":"Based on his continuous contributions, Wei Zhang (a.k.a arugal) has been invited to join the PMC. Welcome aboard.\n","excerpt":"Based on his continuous contributions, Wei Zhang (a.k.a arugal) has been invited to join the PMC. …","ref":"/events/welcome-wei-zhang-to-join-the-pmc/","title":"Welcome Wei Zhang to join the PMC"},{"body":"目录:\n 1. 概述 2. 搭建 SkyWalking 单机环境 3. 搭建 SkyWalking 集群环境 4. 告警 5. 注意事项 6. Spring Boot 使用示例 6. Spring Cloud 使用示例    作者:芋道源码 原文地址   1. 概述 1.1 概念 SkyWalking 是什么?\n FROM http://skywalking.apache.org/\n分布式系统的应用程序性能监视工具,专为微服务、云原生架构和基于容器(Docker、K8s、Mesos)架构而设计。\n提供分布式追踪、服务网格遥测分析、度量聚合和可视化一体化解决方案。\n 1.2 功能列表 SkyWalking 有哪些功能?\n FROM http://skywalking.apache.org/\n 多种监控手段。可以通过语言探针和 service mesh 获得监控是数据。 多个语言自动探针。包括 Java,.NET Core 和 Node.JS。 轻量高效。无需大数据平台,和大量的服务器资源。 模块化。UI、存储、集群管理都有多种机制可选。 支持告警。 优秀的可视化解决方案。   1.3 整体架构 SkyWalking 整体架构如何?\n FROM http://skywalking.apache.org/\n 整个架构,分成上、下、左、右四部分:\n 考虑到让描述更简单,我们舍弃掉 Metric 指标相关,而着重在 Tracing 链路相关功能。\n  上部分 Agent :负责从应用中,收集链路信息,发送给 SkyWalking OAP 服务器。目前支持 SkyWalking、Zikpin、Jaeger 等提供的 Tracing 数据信息。而我们目前采用的是,SkyWalking Agent 收集 SkyWalking Tracing 数据,传递给服务器。 下部分 SkyWalking OAP :负责接收 Agent 发送的 Tracing 数据信息,然后进行分析(Analysis Core) ,存储到外部存储器( Storage ),最终提供查询( Query )功能。 右部分 Storage :Tracing 数据存储。目前支持 ES、MySQL、Sharding Sphere、TiDB、H2 多种存储器。而我们目前采用的是 ES ,主要考虑是 SkyWalking 开发团队自己的生产环境采用 ES 为主。 左部分 SkyWalking UI :负责提供控台,查看链路等等。  1.4 官方文档 在 https://github.com/apache/skywalking/tree/master/docs 地址下,提供了 SkyWalking 的英文文档。\n考虑到大多数胖友的英语水平和艿艿不相伯仲,再加上胖友一开始对 SkyWalking 比较陌生,所以比较推荐先阅读 https://github.com/SkyAPM/document-cn-translation-of-skywalking 地址,提供了 SkyWalking 的中文文档。\n考虑到胖友使用 SkyWalking 的目的,是实现分布式链路追踪的功能,所以最好去了解下相关的知识。这里推荐阅读两篇文章:\n 《OpenTracing 官方标准 —— 中文版》 Google 论文 《Dapper,大规模分布式系统的跟踪系统》  2. 搭建 SkyWalking 单机环境 考虑到让胖友更快的入门,我们来搭建一个 SkyWalking 单机环境,步骤如下:\n 第一步,搭建一个 Elasticsearch 服务。 第二步,下载 SkyWalking 软件包。 第三步,搭建一个 SkyWalking OAP 服务。 第四步,启动一个 Spring Boot 应用,并配置 SkyWalking Agent。 第五步,搭建一个 SkyWalking UI 服务。  仅仅五步,按照艿艿标题党的性格,应该给本文取个《10 分钟快速搭建 SkyWalking 服务》标题才对,哈哈哈。\n2.1 Elasticsearch 搭建  FROM https://www.elastic.co/cn/products/elasticsearch\nElasticsearch 是一个分布式、RESTful 风格的搜索和数据分析引擎,能够解决不断涌现出的各种用例。 作为 Elastic Stack 的核心,它集中存储您的数据,帮助您发现意料之中以及意料之外的情况。\n 参考《Elasticsearch 极简入门》的「1. 单机部署」小节,搭建一个 Elasticsearch 单机服务。\n不过要注意,本文使用的是 Elasticsearch 7.5.1 版本。因为 SkyWalking 6.6.0 版本,增加了对 Elasticsearch 7.X 版本的支持。当然,如果胖友使用 Elasticsearch 6.X 版本也是可以的。\n2.2 下载 SkyWalking 软件包 对于 SkyWalking 的软件包,有两种方式获取:\n 手动编译 官方包  一般情况下,我们建议使用官方包。手动编译,更多是尝鲜或者等着急修复的 BUG 的版本。\n2.2.1 官方包 在 http://skywalking.apache.org/downloads/ 下,我们下载操作系统对应的发布版。\n这里,我们选择 Binary Distribution for ElasticSearch 7 (Linux) 版本,因为艿艿是 Mac 环境,再加上想使用 Elasticsearch 7.X 版本作为存储。如果胖友想用 Elasticsearch 6.X 版本作为存储,记得下载 Binary Distribution (Linux) 版本。\n① 下载:\n# 创建目录 $ mkdir -p /Users/yunai/skywalking $ cd /Users/yunai/skywalking # 下载 $ wget http://mirror.bit.edu.cn/apache/skywalking/6.6.0/apache-skywalking-apm-es7-6.6.0.tar.gz ② 解压:\n# 解压 $ tar -zxvf apache-skywalking-apm-es7-6.6.0.tar.gz $ cd apache-skywalking-apm-bin-es7 $ ls -ls 4 drwxr-xr-x 8 root root 4096 Sep 9 15:09 agent # SkyWalking Agent 4 drwxr-xr-x 2 root root 4096 Sep 9 15:44 bin # 执行脚本 4 drwxr-xr-x 2 root root 4096 Sep 9 15:44 config # SkyWalking OAP Server 配置文件 32 -rwxr-xr-x 1 root root 28903 Sep 9 14:32 LICENSE 4 drwxr-xr-x 3 root root 4096 Sep 9 15:44 licenses 32 -rwxr-xr-x 1 root root 31850 Sep 9 14:32 NOTICE 16 drwxr-xr-x 2 root root 16384 Sep 9 15:22 oap-libs # SkyWalking OAP Server 4 -rw-r--r-- 1 root root 1978 Sep 9 14:32 README.txt 4 drwxr-xr-x 2 root root 4096 Sep 9 15:44 webapp # SkyWalking UI 2.2.2 手动编译  友情提示:如果胖友没有编译 SkyWalking 源码的诉求,可以跳过本小节。\n 参考 How to build project 文章。\n需要前置安装如下:\n GIT JDK 8+ Maven  ① 克隆代码:\n$ git clone https://github.com/apache/skywalking.git  因为网络问题,可能克隆会有点久。  ② 初始化子模块:\n$ cd skywalking $ git submodule init $ git submodule update ③ 编译\n$ ./mvnw clean package -DskipTests  编译过程,如果机子比较差,花费时间会比较久。  ④ 查看编译结果\n$ cd apm-dist # 编译结果目录 $ cd target $ tar -zxvf apache-skywalking-apm-bin.tar.gz # 解压 Linux 包 $ cd apache-skywalking-apm-bin $ ls -ls 4 drwxr-xr-x 8 root root 4096 Sep 9 15:09 agent # SkyWalking Agent 4 drwxr-xr-x 2 root root 4096 Sep 9 15:44 bin # 执行脚本 4 drwxr-xr-x 2 root root 4096 Sep 9 15:44 config # SkyWalking OAP Server 配置文件 32 -rwxr-xr-x 1 root root 28903 Sep 9 14:32 LICENSE 4 drwxr-xr-x 3 root root 4096 Sep 9 15:44 licenses 32 -rwxr-xr-x 1 root root 31850 Sep 9 14:32 NOTICE 16 drwxr-xr-x 2 root root 16384 Sep 9 15:22 oap-libs # SkyWalking OAP Server 4 -rw-r--r-- 1 root root 1978 Sep 9 14:32 README.txt 4 drwxr-xr-x 2 root root 4096 Sep 9 15:44 webapp # SkyWalking UI 2.3 SkyWalking OAP 搭建 ① 修改 OAP 配置文件\n 友情提示:如果配置文件,适合 SkyWalking 6.X 版本。\n $ vi config/application.ymlstorage:elasticsearch7:nameSpace:${SW_NAMESPACE:\u0026#34;elasticsearch\u0026#34;}clusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:9200}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;http\u0026#34;}# trustStorePath: ${SW_SW_STORAGE_ES_SSL_JKS_PATH:\u0026#34;../es_keystore.jks\u0026#34;}# trustStorePass: ${SW_SW_STORAGE_ES_SSL_JKS_PASS:\u0026#34;\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}password:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}indexShardsNumber:${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:2}indexReplicasNumber:${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:0}# Those data TTL settings will override the same settings in core module.recordDataTTL:${SW_STORAGE_ES_RECORD_DATA_TTL:7}# Unit is dayotherMetricsDataTTL:${SW_STORAGE_ES_OTHER_METRIC_DATA_TTL:45}# Unit is daymonthMetricsDataTTL:${SW_STORAGE_ES_MONTH_METRIC_DATA_TTL:18}# Unit is month# Batch process setting, refer to https://www.elastic.co/guide/en/elasticsearch/client/java-api/5.5/java-docs-bulk-processor.htmlbulkActions:${SW_STORAGE_ES_BULK_ACTIONS:1000}# Execute the bulk every 1000 requestsflushInterval:${SW_STORAGE_ES_FLUSH_INTERVAL:10}# flush the bulk every 10 seconds whatever the number of requestsconcurrentRequests:${SW_STORAGE_ES_CONCURRENT_REQUESTS:2}# the number of concurrent requestsresultWindowMaxSize:${SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE:10000}metadataQueryMaxSize:${SW_STORAGE_ES_QUERY_MAX_SIZE:5000}segmentQueryMaxSize:${SW_STORAGE_ES_QUERY_SEGMENT_SIZE:200}# h2:# driver: ${SW_STORAGE_H2_DRIVER:org.h2.jdbcx.JdbcDataSource}# url: ${SW_STORAGE_H2_URL:jdbc:h2:mem:skywalking-oap-db}# user: ${SW_STORAGE_H2_USER:sa}# metadataQueryMaxSize: ${SW_STORAGE_H2_QUERY_MAX_SIZE:5000} storage.elasticsearch7 配置项,设置使用 Elasticsearch 7.X 版本作为存储器。  这里,我们打开注释,并记得通过 nameSpace 设置 Elasticsearch 集群名。   storage.elasticsearch 配置项,设置使用 Elasticsearch 6.X 版本作为存储器。  这里,我们无需做任何改动。 如果胖友使用 Elasticsearch 6.X 版本作为存储器,记得设置这个配置项,而不是 storage.elasticsearch7 配置项。   storage.h2 配置项,设置使用 H2 作为存储器。  这里,我们需要手动注释掉,因为 H2 是默认配置的存储器。     友情提示:如果配置文件,适合 SkyWalking 7.X 版本。\n  重点修改 storage 配置项,通过 storage.selector 配置项来设置具体使用的存储器。 storage.elasticsearch 配置项,设置使用 Elasticsearch 6.X 版本作为存储器。胖友可以主要修改 nameSpace、clusterNodes 两个配置项即可,设置使用的 Elasticsearch 的集群和命名空间。 storage.elasticsearch7 配置项,设置使用 Elasticsearch 7.X 版本作为存储器。 还有 MySQL、H2、InfluxDB 等等存储器的配置可以选择,胖友自己根据需要去选择哈~  ② 启动 SkyWalking OAP 服务\n$ bin/oapService.sh SkyWalking OAP started successfully! 是否真正启动成功,胖友打开 logs/skywalking-oap-server.log 日志文件,查看是否有错误日志。首次启动时,因为 SkyWalking OAP 会创建 Elasticsearch 的索引,所以会“疯狂”的打印日志。最终,我们看到如下日志,基本可以代表 SkyWalking OAP 服务启动成功:\n 友情提示:因为首次启动会创建 Elasticsearch 索引,所以可能会比较慢。\n 2020-01-02 18:22:53,635 - org.eclipse.jetty.server.Server - 444 [main] INFO [] - Started @35249ms 2.4 SkyWalking UI 搭建 ① 启动 SkyWalking UI 服务\nbin/webappService.sh SkyWalking Web Application started successfully! 是否真正启动成功,胖友打开 logs/logs/webapp.log 日志文件,查看是否有错误日志。最终,我们看到如下日志,基本可以代表 SkyWalking UI 服务启动成功:\n2020-01-02 18:27:02.824 INFO 48250 --- [main] o.a.s.apm.webapp.ApplicationStartUp : Started ApplicationStartUp in 7.774 seconds (JVM running for 8.316) 如果想要修改 SkyWalking UI 服务的参数,可以编辑 webapp/webapp.yml 配置文件。例如说:\n server.port :SkyWalking UI 服务端口。 collector.ribbon.listOfServers :SkyWalking OAP 服务地址数组。因为 SkyWalking UI 界面的数据,是通过请求 SkyWalking OAP 服务来获得的。  ② 访问 UI 界面:\n浏览器打开 http://127.0.0.1:8080 。界面如下图:2.5 SkyWalking Agent 大多数情况下,我们在启动项目的 Shell 脚本上,通过 -javaagent 参数进行配置 SkyWalking Agent 。我们在 「2.3.1 Shell」 小节来看。\n考虑到偶尔我们需要在 IDE 中,也希望使用 SkyWalking Agent ,所以我们在 「2.3.2 IDEA」 小节来看。\n2.3.1 Shell ① Agent 软件包\n我们需要将 apache-skywalking-apm-bin/agent 目录,拷贝到 Java 应用所在的服务器上。这样,Java 应用才可以配置使用该 SkyWalking Agent。我们来看看 Agent 目录下有哪些:\n$ ls -ls total 35176 0 drwxr-xr-x@ 7 yunai staff 224 Dec 24 14:20 activations 0 drwxr-xr-x@ 4 yunai staff 128 Dec 24 14:21 bootstrap-plugins 0 drwxr-xr-x@ 3 yunai staff 96 Dec 24 14:12 config # SkyWalking Agent 配置 0 drwxr-xr-x@ 3 yunai staff 96 Jan 2 19:29 logs # SkyWalking Agent 日志 0 drwxr-xr-x@ 13 yunai staff 416 Dec 24 14:22 optional-plugins # 可选插件 0 drwxr-xr-x@ 68 yunai staff 2176 Dec 24 14:20 plugins # 插件 35176 -rw-r--r--@ 1 yunai staff 18006420 Dec 24 14:12 skywalking-agent.jar # SkyWalking Agent  关于 SkyWalking Agent 提供的插件列表,可以看看《SkyWalking 文档 —— 插件支持列表》。  因为艿艿是在本机测试,所以无需拷贝,SkyWalking Agent 目录是 /Users/yunai/skywalking/apache-skywalking-apm-bin-es7/agent/。\n考虑到方便胖友,艿艿这里提供了一个最简的 Spring Boot 应用 lab-39-demo-2.2.2.RELEASE.jar。对应 Github 仓库是 lab-39-demo。\n② 配置 Java 启动脚本\n# SkyWalking Agent 配置 export SW_AGENT_NAME=demo-application # 配置 Agent 名字。一般来说,我们直接使用 Spring Boot 项目的 `spring.application.name` 。 export SW_AGENT_COLLECTOR_BACKEND_SERVICES=127.0.0.1:11800 # 配置 Collector 地址。 export SW_AGENT_SPAN_LIMIT=2000 # 配置链路的最大 Span 数量。一般情况下,不需要配置,默认为 300 。主要考虑,有些新上 SkyWalking Agent 的项目,代码可能比较糟糕。 export JAVA_AGENT=-javaagent:/Users/yunai/skywalking/apache-skywalking-apm-bin-es7/agent/skywalking-agent.jar # SkyWalking Agent jar 地址。 # Jar 启动 java -jar $JAVA_AGENT -jar lab-39-demo-2.2.2.RELEASE.jar  通过环境变量,进行配置。 更多的变量,可以在 /work/programs/skywalking/apache-skywalking-apm-bin/agent/config/agent.config 查看。要注意,可能有些变量是被注释掉的,例如说 SW_AGENT_SPAN_LIMIT 对应的 agent.span_limit_per_segment 。  ③ 执行脚本:\n直接执行上述的 Shell 脚本,启动 Java 项目。在启动日志中,我们可以看到 SkyWalking Agent 被加载的日志。日志示例如下:\nDEBUG 2020-01-02 19:29:29:400 main AgentPackagePath : The beacon class location is jar:file:/Users/yunai/skywalking/apache-skywalking-apm-bin-es7/agent/skywalking-agent.jar!/org/apache/skywalking/apm/agent/core/boot/AgentPackagePath.class. INFO 2020-01-02 19:29:29:402 main SnifferConfigInitializer : Config file found in /Users/yunai/skywalking/apache-skywalking-apm-bin-es7/agent/config/agent.config. 同时,也可以在 /Users/yunai/skywalking/apache-skywalking-apm-bin-es7/agent/agent/logs/skywalking-api.log 查看对应的 SkyWalking Agent 日志。日志示例如下:\nDEBUG 2020-01-02 19:37:22:539 SkywalkingAgent-5-ServiceAndEndpointRegisterClient-0 ServiceAndEndpointRegisterClient : ServiceAndEndpointRegisterClient running, status:CONNECTED.  这里,我们看到 status:CONNECTED ,表示 SkyWalking Agent 连接 SkyWalking OAP 服务成功。  ④ 简单测试\n完事,可以去 SkyWalking UI 查看是否链路收集成功。\n1、首先,使用浏览器,访问下 http://127.0.0.1:8079/demo/echo 地址,请求下 Spring Boot 应用提供的 API。因为,我们要追踪下该链路。\n2、然后,继续使用浏览器,打开 http://127.0.0.1:8080/ 地址,进入 SkyWalking UI 界面。如下图所示:这里,我们会看到 SkyWalking 中非常重要的三个概念:\n  服务(Service) :表示对请求提供相同行为的一系列或一组工作负载。在使用 Agent 或 SDK 的时候,你可以定义服务的名字。如果不定义的话,SkyWalking 将会使用你在平台(例如说 Istio)上定义的名字。\n 这里,我们可以看到 Spring Boot 应用的服务为 \u0026quot;demo-application\u0026quot;,就是我们在环境变量 SW_AGENT_NAME 中所定义的。\n   服务实例(Service Instance) :上述的一组工作负载中的每一个工作负载称为一个实例。就像 Kubernetes 中的 pods 一样, 服务实例未必就是操作系统上的一个进程。但当你在使用 Agent 的时候, 一个服务实例实际就是操作系统上的一个真实进程。\n 这里,我们可以看到 Spring Boot 应用的服务为 {agent_name}-pid:{pid}@{hostname},由 Agent 自动生成。关于它,我们在「5.1 hostname」小节中,有进一步的讲解,胖友可以瞅瞅。\n   端点(Endpoint) :对于特定服务所接收的请求路径, 如 HTTP 的 URI 路径和 gRPC 服务的类名 + 方法签名。\n 这里,我们可以看到 Spring Boot 应用的一个端点,为 API 接口 /demo/echo。\n   3、之后,点击「拓扑图」菜单,进入查看拓扑图的界面。如下图所示:4、再之后,点击「追踪」菜单,进入查看链路数据的界面。如下图所示:2.3.2 IDEA 我们统一使用 IDEA 作为开发 IDE ,所以忽略 Eclipse 的配置方式。\n具体参考下图,比较简单:3. 搭建 SkyWalking 集群环境 在生产环境下,我们一般推荐搭建 SkyWalking 集群环境。😈 当然,如果公司比较抠门,也可以在生产环境下使用 SkyWalking 单机环境,毕竟 SkyWalking 挂了之后,不影响业务的正常运行。\n搭建一个 SkyWalking 集群环境,步骤如下:\n 第一步,搭建一个 Elasticsearch 服务的集群。 第二步,搭建一个注册中心的集群。目前 SkyWalking 支持 Zookeeper、Kubernetes、Consul、Nacos 作为注册中心。 第三步,搭建一个 SkyWalking OAP 服务的集群,同时参考《SkyWalking 文档 —— 集群管理》,将 SkyWalking OAP 服务注册到注册中心上。 第四步,启动一个 Spring Boot 应用,并配置 SkyWalking Agent。另外,在设置 SkyWaling Agent 的 SW_AGENT_COLLECTOR_BACKEND_SERVICES 地址时,需要设置多个 SkyWalking OAP 服务的地址数组。 第五步,搭建一个 SkyWalking UI 服务的集群,同时使用 Nginx 进行负载均衡。另外,在设置 SkyWalking UI 的 collector.ribbon.listOfServers 地址时,也需要设置多个 SkyWalking OAP 服务的地址数组。  😈 具体的搭建过程,并不复杂,胖友自己去尝试下。\n4. 告警 在 SkyWaling 中,已经提供了告警功能,具体可见《SkyWalking 文档 —— 告警》。\n默认情况下,SkyWalking 已经内置告警规则。同时,我们可以参考告警规则,进行自定义。\n在满足 SkyWalking 告警规则的触发规则时,我们在 SkyWaling UI 的告警界面,可以看到告警内容。如下图所示:同时,我们自定义 Webhook ,对接 SkyWalking 的告警请求。而具体的邮箱、钉钉等告警方式,需要自己进行开发。至于自定义 WebHook 如何实现,可以参考:\n Java 语言:  《基于 SkyWalking 的分布式跟踪系统 - 异常告警》   Go 语言:  dingding-notify-for-skywalking infra-skywalking-webhook    5. 注意事项 5.1 hostname 配置 在 SkyWalking 中,每个被监控的实例的名字,会包含 hostname 。格式为:{agent_name}-pid:{pid}@{hostname} ,例如说:\u0026quot;scrm-scheduler-pid:27629@iZbp1e2xlyvr7fh67qi59oZ\u0026quot; 。\n因为有些服务器未正确设置 hostname ,所以我们一定要去修改,不然都不知道是哪个服务器上的实例(😈 鬼知道 \u0026quot;iZbp1e2xlyvr7fh67qi59oZ\u0026quot; 一串是哪个服务器啊)。\n修改方式如下:\n1、修改 /etc/hosts 的 hostname :\n127.0.0.1 localhost ::1 localhost localhost.localdomain localhost6 localhost6.localdomain6 10.80.62.151 pre-app-01 # 就是这个,其中 10.80.62.151 是本机内网 IP ,pre-app-01 是 hostname 。 2、修改本机 hostname :\n参考 《CentOS7 修改主机名(hostname)》\n$ hostname pre-app-01 # 其中 pre-app-01 就是你希望的 hostname 。 $ hostnamectl set-hostname pre-app-01 # 其中 pre-app-01 就是你希望的 hostname 。 6. Spring Boot 使用示例 在 《芋道 Spring Boot 链路追踪 SkyWalking 入门》 中,我们来详细学习如何在 Spring Boot 中,整合并使用 SkyWalking 收集链路数据。😈 相比「2.5 SkyWaling Agent」来说,我们会提供更加丰富的示例哟。\n7. Spring Cloud 使用示例 在 《芋道 Spring Cloud 链路追踪 SkyWalking 入门》 中,我们来详细学习如何在 Spring Cloud 中,整合并使用 SkyWalking 收集链路数据。😈 相比「2.5 SkyWaling Agent」来说,我们会提供更加丰富的示例哟。\n666. 彩蛋 本文仅仅是简单的 SkyWalking 入门文章,如果胖友想要更好的使用 SkyWalking,推荐通读下《SkyWalking 文档》。\n想要进一步深入的胖友,也可以阅读如下资料:\n 《SkyWalking 源码解析》 《APM 巅峰对决:Apache Skywalking P.K. Pinpoint》 《SkyWalking 官方 —— 博客合集》  😈 最后弱弱的问一句,上完 SkyWaling 之后,有没发现自己系统各种地方慢慢慢!嘻嘻。\n","excerpt":"目录:\n 1. 概述 2. 搭建 SkyWalking 单机环境 3. 搭建 SkyWalking 集群环境 4. 告警 5. 注意事项 6. Spring Boot 使用示例 6. Spring …","ref":"/zh/2020-04-19-skywalking-quick-start/","title":"SkyWalking 极简入门"},{"body":"This post originally appears on The New Stack\nThis post introduces a way to automatically profile code in production with Apache SkyWalking. We believe the profile method helps reduce maintenance and overhead while increasing the precision in root cause analysis.\nLimitations of the Distributed Tracing In the early days, metrics and logging systems were the key solutions in monitoring platforms. With the adoption of microservice and distributed system-based architecture, distributed tracing has become more important. Distributed tracing provides relevant service context, such as system topology map and RPC parent-child relationships.\nSome claim that distributed tracing is the best way to discover the cause of performance issues in a distributed system. It’s good at finding issues at the RPC abstraction, or in the scope of components instrumented with spans. However, it isn’t that perfect.\nHave you been surprised to find a span duration longer than expected, but no insight into why? What should you do next? Some may think that the next step is to add more instrumentation, more spans into the trace, thinking that you would eventually find the root cause, with more data points. We’ll argue this is not a good option within a production environment. Here’s why:\n There is a risk of application overhead and system overload. Ad-hoc spans measure the performance of specific scopes or methods, but picking the right place can be difficult. To identify the precise cause, you can “instrument” (add spans to) many suspicious places. The additional instrumentation costs more CPU and memory in the production environment. Next, ad-hoc instrumentation that didn’t help is often forgotten, not deleted. This creates a valueless overhead load. In the worst case, excess instrumentation can cause performance problems in the production app or overload the tracing system. The process of ad-hoc (manual) instrumentation usually implies at least a restart. Trace instrumentation libraries, like Zipkin Brave, are integrated into many framework libraries. To instrument a method’s performance typically implies changing code, even if only an annotation. This implies a re-deploy. Even if you have the way to do auto instrumentation, like Apache SkyWalking, you still need to change the configuration and reboot the app. Otherwise, you take the risk of GC caused by hot dynamic instrumentation. Injecting instrumentation into an uninstrumented third party library is hard and complex. It takes more time and many won’t know how to do this. Usually, we don’t have code line numbers in the distributed tracing. Particularly when lambdas are in use, it can be difficult to identify the line of code associated with a span. Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.  Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.\nProfiling in Production Introduction To reuse distributed tracing to achieve method scope precision requires an understanding of the above limitations and a different approach. We called it PROFILE.\nMost high-level languages build and run on a thread concept. The profile approach takes continuous thread dumps. We merge the thread dumps to estimate the execution time of every method shown in the thread dumps. The key for distributed tracing is the tracing context, identifiers active (or current) for the profiled method. Using this trace context, we can weave data harvested from profiling into existing traces. This allows the system to automate otherwise ad-hoc instrumentation. Let’s dig deeper into how profiling works:\nWe consider a method invocation with the same stack depth and signature (method, line number etc), the same operation. We derive span timestamps from the thread dumps the same operation is in. Let’s put this visually:\nAbove, represents 10 successive thread dumps. If this method is in dumps 4-8, we assume it started before dump 4 and finished after dump 8. We can’t tell exactly when the method started and stopped. but the timestamps of thread dumps are close enough.\nTo reduce overhead caused by thread dumps, we only profile methods enclosed by a specific entry point, such as a URI or MVC Controller method. We identify these entry points through the trace context and the APM system.\nThe profile does thread dump analysis and gives us:\n The root cause, precise to the line number in the code. Reduced maintenance as ad-hoc instrumentation is obviated. Reduced overload risk caused by ad-hoc instrumentation. Dynamic activation: only when necessary and with a very clear profile target.  Implementing Precise Profiling with Apache SkyWalking 7 Distributed profiling is built-into Apache SkyWalking application performance monitoring (APM). Let’s demonstrate how the profiling approach locates the root cause of the performance issue.\nfinal CountDownLatchcountDownLatch= new CountDownLatch(2); threadPool.submit(new Task1(countDownLatch)); threadPool.submit(new Task2(countDownLatch)); try { countDownLatch.await(500, TimeUnit.MILLISECONDS); } catch (InterruptedExceptione) { } Task1 and Task2 have a race condition and unstable execution time: they will impact the performance of each other and anything calling them. While this code looks suspicious, it is representative of real life. People in the OPS/SRE team are not usually aware of all code changes and who did them. They only know something in the new code is causing a problem.\nTo make matters interesting, the above code is not always slow: it only happens when the condition is locked. In SkyWalking APM, we have metrics of endpoint p99/p95 latency, so, we are easy to find out the p99 of this endpoint is far from the avg response time. However, this is not the same as understanding the cause of the latency. To locate the root cause, add a profile condition to this endpoint: duration greater than 500ms. This means faster executions will not add profiling load.\nThis is a typical profiled trace segment (part of the whole distributed trace) shown on the SkyWalking UI. We now notice the “service/processWithThreadPool” span is slow as we expected, but why? This method is the one we added the faulty code to. As the UI shows that method, we know the profiler is working. Now, let’s see what the profile analysis result say.\nThis is the profile analysis stack view. We see the stack element names, duration (include/exclude the children) and slowest methods have been highlighted. It shows clearly, “sun.misc.Unsafe.park” costs the most time. If we look for the caller, it is the code we added: CountDownLatch.await.\nThe Limitations of the Profile Method No diagnostic tool can fit all cases, not even the profile method.\nThe first consideration is mistaking a repeatedly called method for a slow method. Thread dumps are periodic. If there is a loop of calling one method, the profile analysis result would say the target method is slow because it is captured every time in the dump process. There could be another reason. A method called many times can also end up captured in each thread dump. Even so, the profile did what it is designed for. It still helps the OPS/SRE team to locate the code having the issue.\nThe second consideration is overhead, the impact of repeated thread dumps is real and can’t be ignored. In SkyWalking, we set the profile dump period to at least 10ms. This means we can’t locate method performance issues if they complete in less than 10ms. SkyWalking has a threshold to control the maximum parallel degree as well.\nUnderstanding the above keeps distributed tracing and APM systems useful for your OPS/SRE team.\nHow to Try This Everything we discussed, including the Apache SkyWalking Java Agent, profile analysis code, and UI, could be found in our GitHub repository. We hope you enjoyed this new profile method, and love Apache SkyWalking. If so, give us a star on GitHub to encourage us.\nSkyWalking 7 has just been released. You can contact the project team through the following channels:\n Follow SkyWalking twitter. Subscribe mailing list: dev@skywalking.apache.org. Send to dev-subscribe@kywalking.apache.org to subscribe to the mail list.  Co-author Sheng Wu is a Tetrate founding engineer and the founder and VP of Apache SkyWalking. He is solving the problem of observability for large-scale service meshes in hybrid and multi-cloud environments.\nAdrian Cole works in the Spring Cloud team at VMware, mostly on Zipkin\nHan Liu is a tech expert at Lagou. He is an Apache SkyWalking committer\n","excerpt":"This post originally appears on The New Stack\nThis post introduces a way to automatically profile …","ref":"/blog/2020-04-13-apache-skywalking-profiling/","title":"Apache SkyWalking: Use Profiling to Fix the Blind Spot of Distributed Tracing"},{"body":"SkyWalking Chart 2.0.0 is released. Go to downloads page to find release tars.\n Support SkyWalking 7.0.0 Support set ES user/password Add CI for release  ","excerpt":"SkyWalking Chart 2.0.0 is released. Go to downloads page to find release tars.\n Support SkyWalking …","ref":"/events/release-apache-skywalking-chart-2-0-0-for-skywalking-7-0-0/","title":"Release Apache SkyWalking Chart 2.0.0 for SkyWalking 7.0.0"},{"body":"SkyWalking APM 7.0.0 is release. Go to downloads page to find release tars.\n Upgrade JDK minimal JDK requirement to JDK8 Support profiling code level performance Don\u0026rsquo;t support SkyWalking v5 agent in-wire and out-wire protocol. V6 is required.  ","excerpt":"SkyWalking APM 7.0.0 is release. Go to downloads page to find release tars.\n Upgrade JDK minimal JDK …","ref":"/events/release-apache-skywalking-apm-7-0-0/","title":"Release Apache SkyWalking APM 7.0.0"},{"body":"","excerpt":"","ref":"/zh_tags/agent/","title":"Agent"},{"body":"","excerpt":"","ref":"/zh_tags/java/","title":"Java"},{"body":"","excerpt":"","ref":"/zh_tags/profiling/","title":"Profiling"},{"body":" 作者:吴晟,刘晗 原文地址  在本文中,我们详细介绍了代码级的性能剖析方法,以及我们在 Apache SkyWalking 中的实践。希望能够帮助大家在线定位系统性能短板,缓解系统压力。\n分布式链路追踪的局限性 在传统的监控系统中,我们如果想要得知系统中的业务是否正常,会采用进程监控、日志收集分析等方式来对系统进行监控。当机器或者服务出现问题时,则会触发告警及时通知负责人。通过这种方式,我们可以得知具体哪些服务出现了问题。但是这时我们并不能得知具体的错误原因出在了哪里,开发人员或者运维人员需要到日志系统里面查看错误日志,甚至需要到真实的业务服务器上查看执行情况来解决问题。\n如此一来,仅仅是发现问题的阶段,可能就会耗费相当长的时间;另外,发现问题但是并不能追溯到问题产生具体原因的情况,也常有发生。这样反反复复极其耗费时间和精力,为此我们便有了基于分布式追踪的 APM 系统。\n通过将业务系统接入分布式追踪中,我们就像是给程序增加了一个放大镜功能,可以清晰看到真实业务请求的整体链路,包括请求时间、请求路径,甚至是操作数据库的语句都可以看得一清二楚。通过这种方式,我们结合告警便可以快速追踪到真实用户请求的完整链路信息,并且这些数据信息完全是持久化的,可以随时进行查询,复盘错误的原因。\n然而随着我们对服务监控理解的加深,我们发现事情并没有那么简单。在分布式链路追踪中我们有这样的两个流派:代码埋点和字节码增强。无论使用哪种方式,底层逻辑一定都逃不过面向切面这个基础逻辑。因为只有这样才可以做到大面积的使用。这也就决定了它只能做到框架级别和 RPC 粒度的监控。这时我们可能依旧会遇到程序执行缓慢或者响应时间不稳定等情况,但无法具体查询到原因。这时候,大家很自然的会考虑到增加埋点粒度,比如对所有的 Spring Bean 方法、甚至主要的业务层方法都加上埋点。但是这种思路会遇到不小的挑战:\n第一,增加埋点时系统开销大,埋点覆盖不够全面。通过这种方式我们确实可以做到具体业务场景具体分析。但随着业务不断迭代上线,弊端也很明显:大量的埋点无疑会加大系统资源的开销,造成 CPU、内存使用率增加,更有可能拖慢整个链路的执行效率。虽然每个埋点消耗的性能很小,在微秒级别,但是因为数量的增加,甚至因为业务代码重用造成重复埋点或者循环使用,此时的性能开销已经无法忽略。\n第二,动态埋点作为一项埋点技术,和手动埋点的性能消耗上十分类似,只是减少的代码修改量,但是因为通用技术的特别,上一个挑战中提到的循环埋点和重复使用的场景甚至更为严重。比如选择所有方法或者特定包下的所有方法埋点,很可能造成系统性能彻底崩溃。\n第三,即使我们通过合理设计和埋点,解决了上述问题,但是 JDK 函数是广泛使用的,我们很难限制对 JDK API 的使用场景。对 JDK 过多方法、特别是非 RPC 方法的监控会造成系统的巨大延迟风险。而且有一些基础类型和底层工具类,是很难通过字节码进行增强的。当我们的 SDK 使用不当或者出现 bug 时,我们无法具体得知真实的错误原因。\n代码级性能剖析方法 方法介绍 基于以上问题,在系统性能监控方法上,我们提出了代码级性能剖析这种在线诊断方法。这种方法基于一个高级语言编程模型共性,即使再复杂的系统,再复杂的业务逻辑,都是基于线程去进行执行的,而且多数逻辑是在单个线程状态下执行的。\n代码级性能剖析就是利用方法栈快照,并对方法执行情况进行分析和汇总。并结合有限的分布式追踪 span 上下文,对代码执行速度进行估算。\n性能剖析激活时,会对指定线程周期性的进行线程栈快照,并将所有的快照进行汇总分析,如果两个连续的快照含有同样的方法栈,则说明此栈中的方法大概率在这个时间间隔内都处于执行状态。从而,通过这种连续快照的时间间隔累加成为估算的方法执行时间。时间估算方法如下图所示:\n在上图中,d0-d10 代表 10 次连续的内存栈快照,实际方法执行时间在 d3-d4 区间,结束时间在 d8-d9 之间。性能剖析无法告诉你方法的准确执行时间,但是他会估算出方法执行时间为 d4-d8 的 4 个快照采集间隔时间之和,这已经是非常的精确的时间估算了。\n而这个过程因为不涉及代码埋点,所以自然性能消耗是稳定和可控的,也无需担心是否被埋点,是否是 JDK 方法等问题。同时,由于上层已经在分布式追踪之下,性能剖析方法可以明确地确定分析开始和结束时间,减少不必要的性能开销。\n性能剖析可以很好的对线程的堆栈信息进行监控,主要有以下几点优势:\n 精确的问题定位,直接到代码方法和代码行; 无需反复的增删埋点,大大减少了人力开发成本; 不用承担过多埋点对目标系统和监控系统的压力和性能风险; 按需使用,平时对系统无消耗,使用时的消耗稳定可能。  SkyWalking 实践实例 我们首先在 Apache SkyWalking APM 中实现此技术方法,下面我们就以一个真实的例子来说明此方法的执行效果。\nfinal CountDownLatchcountDownLatch= new CountDownLatch(2); threadPool.submit(new Task1(countDownLatch)); threadPool.submit(new Task2(countDownLatch)); try { countDownLatch.await(500, TimeUnit.MILLISECONDS); } catch (InterruptedExceptione) { } 这是我们故意加入的问题代码,我们使用 CountDownLanth 设置了两个任务完成后方法执行结束,Task1 和 Task2 是两个执行时间不稳定的任务,所以主任务也会执行速度不稳定。但对于运维和监控团队来说,很难定位到这个方法片段。\n针对于这种情况,我们看看性能剖析会怎样直接定位此问题。\n上图所示的就是我们在进行链路追踪时所看到的真实执行情况,其中我们可以看到在 service/processWithThreadPool 执行速度缓慢,这正是我们植入问题代码的方法。此时在这个调用中没有后续链路了,所以并没有更细致的原因,我们也不打算去 review 代码,从而增加新埋点。这时,我们可以对 HelloService 进行性能剖析,并执行只剖析响应速度大于 500 毫秒的请求。\n注意,指定特定响应时间的剖析是保证剖析有效性的重要特性,如果方法在平均响应时间上已经出现问题,往往通过分布式链路可以快速定位,因为此时链路总时间长,新埋点带来的性能影响相对可控。但是方法性能抖动是不容易用新增埋点来解决的,而且往往只发生在生产环境。\n上图就是我们进行性能剖析后的真实结果图。从左到右分别表示:栈帧名称、该栈帧总计耗时(包含其下面所有自栈帧)、当前栈帧自身耗时和监控次数。我们可以在最后一行看到,线程卡在了 sun.misc.Unsafe.park 中了。如果你熟悉 Java 就可以知道此时进行了锁等待,我们继续按照树的结构向上推,便可以看到线程真正是卡在了 CountDownLatch.await 方法中。\n方法局限性 当然任何的方法都不是万能的,性能剖析也有一些局限性。\n第一, 对于高频反复执行的方法,如循环调用,可能会误报为缓慢方法。但这并不是大问题,因为如果反复执行的耗时较长,必然是系统需要关注的性能瓶颈。\n第二, 由于性能栈快照有一定的性能消耗,所以采集周期不宜过密,如 SkyWalking 实践中,不支持小于 10ms 的采集间隔。所以如果问题方法执行时间过小(比如在 10 毫秒内波动),此方法并不适用。我们也再此强调,方法论和工具的强大,始终不能代替程序员。\n","excerpt":"作者:吴晟,刘晗 原文地址  在本文中,我们详细介绍了代码级的性能剖析方法,以及我们在 Apache SkyWalking 中的实践。希望能够帮助大家在线定位系统性能短板,缓解系统压力。\n分布式链路追 …","ref":"/zh/2020-03-23-using-profiling-to-fix-the-blind-spot-of-distributed-tracing/","title":"在线代码级性能剖析,补全分布式追踪的最后一块“短板”"},{"body":"SkyWalking CLI 0.2.0 is released. Go to downloads page to find release tars.\n Support visualization of heat map Support top N entities, swctl metrics top 5 --name service_sla Support thermodynamic metrics, swctl metrics thermodynamic --name all_heatmap Support multiple linear metrics, swctl --display=graph --debug metrics multiple-linear --name all_percentile  ","excerpt":"SkyWalking CLI 0.2.0 is released. Go to downloads page to find release tars.\n Support visualization …","ref":"/events/release-apache-skywalking-cli-0-2-0/","title":"Release Apache SkyWalking CLI 0.2.0"},{"body":"SkyWalking Chart 1.1.0 is released. Go to downloads page to find release tars.\n Support SkyWalking 6.6.0 Support deploy Elasticsearch 7 The official helm repo was changed to the official Elasticsearch repo (https://helm.elastic.co/)  ","excerpt":"SkyWalking Chart 1.1.0 is released. Go to downloads page to find release tars.\n Support SkyWalking …","ref":"/events/release-apache-skywalking-chart-1-1-0-for-skywalking-6-6-0/","title":"Release Apache SkyWalking Chart 1.1.0 for SkyWalking 6.6.0"},{"body":"Support tracing and collect metrics from Nginx server. Require SkyWalking APM 7.0+.\n","excerpt":"Support tracing and collect metrics from Nginx server. Require SkyWalking APM 7.0+.","ref":"/events/skywalking-nginx-lua-0-1-0-release/","title":"SkyWalking Nginx LUA 0.1.0 release"},{"body":"Based on his continuous contributions, Ming Wen (a.k.a moonming) has been voted as a new committer.\n","excerpt":"Based on his continuous contributions, Ming Wen (a.k.a moonming) has been voted as a new committer.","ref":"/events/welcome-ming-wen-as-new-committer/","title":"Welcome Ming Wen as new committer"},{"body":"Based on his continuous contributions, Haochao Zhuang (a.k.a dmsolr) has been invited to join the PMC. Welcome aboard.\n","excerpt":"Based on his continuous contributions, Haochao Zhuang (a.k.a dmsolr) has been invited to join the …","ref":"/events/welcome-haochao-zhuang-to-join-the-pmc/","title":"Welcome Haochao Zhuang to join the PMC"},{"body":"Based on his continuous contributions, Zhusheng Xu (a.k.a aderm) has been voted as a new committer.\n","excerpt":"Based on his continuous contributions, Zhusheng Xu (a.k.a aderm) has been voted as a new committer.","ref":"/events/welcome-zhusheng-xu-as-new-committer/","title":"Welcome Zhusheng Xu as new committer"},{"body":"Based on his continuous contributions, Han Liu (a.k.a mrproliu) has been voted as a new committer.\n","excerpt":"Based on his continuous contributions, Han Liu (a.k.a mrproliu) has been voted as a new committer.","ref":"/events/welcome-han-liu-as-new-committer/","title":"Welcome Han Liu as new committer"},{"body":" Author: Wu Sheng, tetrate.io, SkyWalking original creator, SkyWalking V.P. GitHub, Twitter, Linkedin  The SkyWalking project provides distributed tracing, topology map analysis, service mesh telemetry analysis, metrics analysis and a super cool visualization targeting distributed systems in k8s or traditional VM deployments.\nThe project is widely used in Alibaba, Huawei, Tencent, DiDi, xiaomi, Pingan, China’s top 3 telecom companies (China Mobile, China telecom, China Unicom), airlines, banks and more. It has over 140 company users listed on our powered by page.\nToday, we welcome and celebrate reaching 200 code contributors on our main repo. We hereby mark this milestone as official today, : Jan. 20th 2020.\nAt this great moment, I would like to share SkyWalking’s 4-year open source journey.\nI wrote the first line on Nov. 1st, 2015, guiding people to understand a distributed system just as micro-services and distributed architecture were becoming popular. In the first 2 years, I never thought it would become such a big and active community. I didn’t even expect it would be an open source project. Initially, the goal was primarily to teach others about distributed tracing and analysis.\nIt was a typical open source project in obscurity in its first two years. But people still showed up, asked questions, and tried to improve the project. I got several invitations to share the project at local meetups.All these made me realize people really needed a good open source APM project.\nIn 2017, I decided to dedicate myself as much as possible to make the project successful, and it became my day job. To be honest, I had no clue about how to do that; at that time in China, it was rare to have this kind of job. So, I began to ask friends around me, “Do you want to collaborate on the open source APM with me?” Most people were busy and gave a clear NO, but two of them agreed to help: Xin Zhang and Yongsheng Peng. We built SkyWalking 3.x and shared the 3.2 release at GOPS Shanghai, China.\nIt became the first adoption version used in production\nCompared to today\u0026rsquo;s SkyWalking, it was a toy prototype, but it had the same tracing design, protocol and analysis method.\nThat year the contributor team was 15-20, and the project had obvious potential to expand. I began to consider bringing the project into a worldwide, top-level open source foundation. Thanks to our initial incubator mentors, Michael Semb Wever, William Jiang, and Luke Han, this really worked. At the end of 2017, SkyWalking joined the Apache Incubator, and kept following the Apache Way to build community. More contributors joined the community.\nWith more people spending time on the project collaborations, including codes, tests, blogs, conference talks, books and uses of the project, a chemical reaction happens. New developers begin to provide bug fixes, new feature requirements and new proposals. At the moment of graduation in spring 2019, the project had 100 contributors. Now, only 9 months later, it’s surged to 200 super quickly. They enhance the project and extend it to frontiers we never imaged: 5 popular language agents, service mesh adoption, CLI tool, super cool visualization. We are even moving on thread profiling, browser performance and Nginx tracing NOW.\nOver the whole 4+ years open source journey, we have had supports from leaders in the tracing open source community around the world, including Adrian Cole, William Jiang, Luke Han, Michael Semb Wever, Ben Sigelman, and Jonah Kowall. And we’ve had critical foundations' help, especially Apache Software Foundation and the Cloud Native Computing Foundation.\nOur contributors also have their support from their employers, including, to the best of my knowledge, Alibaba, Huawei, China Mobile, ke.com, DaoCloud, Lizhi.fm, Yonghui Supermarket, and dangdang.com. I also have support from my employers, tetrate.io, Huawei, and OneAPM.\nThanks to our 200+ contributors and the companies behind them. You make this magic happen.\n","excerpt":"Author: Wu Sheng, tetrate.io, SkyWalking original creator, SkyWalking V.P. GitHub, Twitter, Linkedin …","ref":"/blog/2020-01-20-celebrate-200th-contributor/","title":"SkyWalking hits 200 contributors mark"},{"body":"Based on his continuous contributions, Hongwei Zhai (a.k.a innerpeacez) has been invited to join the PMC. Welcome aboard.\n","excerpt":"Based on his continuous contributions, Hongwei Zhai (a.k.a innerpeacez) has been invited to join the …","ref":"/events/welcome-hongwei-zhai-to-join-the-pmc/","title":"Welcome Hongwei Zhai to join the PMC"},{"body":"Apache APM 6.6.0 release. Go to downloads page to find release tars.\n Service Instance dependency detection are available. Support ElasticSearch 7 as a storage option. Reduce the register load.  ","excerpt":"Apache APM 6.6.0 release. Go to downloads page to find release tars.\n Service Instance dependency …","ref":"/events/release-apache-skywalking-apm-6-6-0/","title":"Release Apache SkyWalking APM 6.6.0"},{"body":"SkyWalking Chart 1.0.0 is released. Go to downloads page to find release tars.\n Deploy SkyWalking 6.5.0 by Chart. Elasticsearch deploy optional.  ","excerpt":"SkyWalking Chart 1.0.0 is released. Go to downloads page to find release tars.\n Deploy SkyWalking …","ref":"/events/release-apache-skywalking-chart-1-0-0-for-skywalking-6-5-0/","title":"Release Apache SkyWalking Chart 1.0.0 for SkyWalking 6.5.0"},{"body":"SkyWalking CLI 0.1.0 is released. Go to downloads page to find release tars.\n Add command swctl service to list services Add command swctl instance and swctl search to list and search instances of service. Add command swctl endpoint to list endpoints of service. Add command swctl linear-metrics to query linear metrics and plot the metrics in Ascii Graph mode. Add command swctl single-metrics to query single-value metrics.  ","excerpt":"SkyWalking CLI 0.1.0 is released. Go to downloads page to find release tars.\n Add command swctl …","ref":"/events/release-apache-skywalking-cli-0-1-0/","title":"Release Apache SkyWalking CLI 0.1.0"},{"body":"Based on his continuous contributions, Weiyi Liu (a.k.a wayilau) has been voted as a new committer.\n","excerpt":"Based on his continuous contributions, Weiyi Liu (a.k.a wayilau) has been voted as a new committer.","ref":"/events/welcome-weiyi-liu-as-new-committer/","title":"Welcome Weiyi Liu as new committer"},{"body":"Based on his contributions to the project, he has been accepted as SkyWalking committer. Welcome aboard.\n","excerpt":"Based on his contributions to the project, he has been accepted as SkyWalking committer. Welcome …","ref":"/events/welcome-lang-li-as-a-new-committer/","title":"Welcome Lang Li as a new committer"},{"body":"Based on her continuous contributions, Qiuxia Fan (a.k.a Fine0830) has been voted as a new committer.\n","excerpt":"Based on her continuous contributions, Qiuxia Fan (a.k.a Fine0830) has been voted as a new …","ref":"/events/welcome-qiuxia-fan-as-new-committer/","title":"Welcome Qiuxia Fan as new committer"},{"body":"6.5.0 release. Go to downloads page to find release tars.\n New metrics comparison view in UI. Dynamic Alert setting supported. JDK9-12 supported in backend.  ","excerpt":"6.5.0 release. Go to downloads page to find release tars.\n New metrics comparison view in UI. …","ref":"/events/release-apache-skywalking-apm-6-5-0/","title":"Release Apache SkyWalking APM 6.5.0"},{"body":"Based on his continuous contributions, Wei Zhang (a.k.a arugal) has been voted as a new committer.\n","excerpt":"Based on his continuous contributions, Wei Zhang (a.k.a arugal) has been voted as a new committer.","ref":"/events/welcome-wei-zhang-as-new-committer/","title":"Welcome Wei Zhang as new committer"},{"body":"PS:本文仅仅是在我的测试环境实验过,如果有问题,请自行优化调整\n前记:记得skywlking还是6.0版本的时候我就在试用,当时是skywalking基本在两三天左右就会监控数据完全查不出来,elasticsearch日志报错,由于当时也算是初用es,主要用来日志收集,并且时间有限,没有继续深入研究,最近空闲,更新到最新的6.5.0(开发版本)还是会出现同样的问题,下定决心解决下,于是有了本文的浅知拙见\n本次调优环境 skywalking: 6.5.0 elasticsearch:6.3.2(下文用es代替)\n调优过程   当然是百度了,百度后其实翻来翻去就找到一个相关的文章https://my.oschina.net/keking/blog/3025303 ,参考之。\n  调整skywalking的这两个参数试试 bulkActions: 4000 # Execute the bulk every 2000 requests  bulkSize: 60 # flush the bulk every 20mb 然后es还是继续挂,继续频繁的重启\n  继续看这个文章,发现了另外一篇https://www.easyice.cn/archives/207 ,继续参考之\n  这篇文章发现每一个字我都认识,看起来也能懂,但是对于es小白的我来说,着实不知道怎么调整这些参数,姑且先加到es的配置文件里边试试看吧,于是就加了,然后重启es的时候说发现index参数配置,自从5.0之后就不支持这样配置了,还给调了个es的接口去设置,但是设置失败(真够不错的),朝着这个思路去百度,百度到快放弃,后来就寻思,再试试看吧,(百度的结果是知道了index有静态参数和动态参数,动态的参数是可以随时设置,静态的只能创建或者关闭状态的索引才可以设置) 然鹅并不知道怎么关闭索引,继续百度,(怎么全特么百度,好吧不百度了,直接来干货)\n 关闭索引(我的skywalking索引命名空间是dry_trace) curl -XPOST \u0026quot;http://localhost:9200/dry_trace*/_close\u0026quot; 设置参数 curl -XPUT 'http://localhost:9200/dry_trace*/_settings?preserve_existing=true' -H 'Content-type:application/json' -d '{ \u0026quot;index.refresh_interval\u0026quot; : \u0026quot;10s\u0026quot;, \u0026quot;index.translog.durability\u0026quot; : \u0026quot;async\u0026quot;, \u0026quot;index.translog.flush_threshold_size\u0026quot; : \u0026quot;1024mb\u0026quot;, \u0026quot;index.translog.sync_interval\u0026quot; : \u0026quot;120s\u0026quot; }'  打开索引 curl -XPOST \u0026quot;http://localhost:9200/dry_trace*/_open\u0026quot;    还有一点,第四步的方式只适用于现有的索引设置,那么新的索引设置呢,总不能每天重复下第四步吧。当然不需要,来干货 首先登陆kinaba控制台找到开发工具 贴入以下代码\n   PUT /_template/dry_trace_tmp { \u0026quot;index_patterns\u0026quot;: \u0026quot;dry_trace*\u0026quot;, \u0026quot;order\u0026quot;: 1, \u0026quot;settings\u0026quot;: { \u0026quot;index\u0026quot;: { \u0026quot;refresh_interval\u0026quot;: \u0026quot;30s\u0026quot;, \u0026quot;translog\u0026quot;: { \u0026quot;flush_threshold_size\u0026quot;: \u0026quot;1GB\u0026quot;, \u0026quot;sync_interval\u0026quot;: \u0026quot;60s\u0026quot;, \u0026quot;durability\u0026quot;: \u0026quot;async\u0026quot; } } } } 截止目前为止运行一周,还未发现挂掉,一切看起来正常   完结\u0026mdash; 于 2019年11月\n","excerpt":"PS:本文仅仅是在我的测试环境实验过,如果有问题,请自行优化调整\n前记:记得skywlking还是6.0版本的时候我就在试用,当时是skywalking基本在两三天左右就会监控数据完全查不出 …","ref":"/zh/2019-11-07-skywalking-elasticsearch-storage-optimization/","title":"SkyWalking 使用 ElasticSearch 存储的优化"},{"body":"Based on his continuous contributions, Haochao Zhuang (a.k.a dmsolr) has been voted as a new committer.\n","excerpt":"Based on his continuous contributions, Haochao Zhuang (a.k.a dmsolr) has been voted as a new …","ref":"/events/welcome-haochao-zhuang-as-new-committer/","title":"Welcome Haochao Zhuang as new committer"},{"body":" 作者:innerpeacez 原文地址  本文主要讲述的是如何使用 Helm Charts 将 SkyWalking 部署到 Kubernetes 集群中,相关文档可以参考skywalking-kubernetes 和 backend-k8s 文档 。\n目前推荐的四种方式:\n 使用 helm 2 提供的 helm serve 启动本地 helm repo 使用本地 chart 文件部署 使用 harbor 提供的 repo 功能 直接从官方 repo 进行部署  注意:目前 skywalking 的 chart 还没有提交到官方仓库,请先参照前三种方式进行部署\nHelm 2 提供的 helm serve 打包对应版本的 skywalking chart 1.配置 helm 环境,参考 Helm 环境配置 ,如果你要部署 helm2 相关 chart 可以直接配置 helm2 的相关环境\n2.克隆/下载ZIP skywalking-kubernetes 这个仓库,仓库关于chart的目录结构如下\n helm-chart\n helm2  6.0.0-GA 6.1.0   helm3  6.3.0 6.4.0     克隆/下载ZIP 完成后进入指定目录打包对应版本的chart\ncd skywalking-kubernetes/helm-chart/\u0026lt;helm-version\u0026gt;/\u0026lt;skywalking-version\u0026gt; 注意:helm-version 为对应的 helm 版本目录,skywalking-version 为对应的 skywalking 版本目录,下面以helm3 和 skywalking 6.3.0 为例\ncd skywalking-kubernetes/helm-chart/helm3/6.3.0 3.由于skywalking 依赖 elasticsearch 作为存储库,执行以下命令更新依赖,默认会从官方repo进行拉取\nhelm dep up skywalking  Hang tight while we grab the latest from your chart repositories\u0026hellip; \u0026hellip;Successfully got an update from the \u0026ldquo;stable\u0026rdquo; chart repository Update Complete. ⎈Happy Helming!⎈ Saving 1 charts Downloading elasticsearch from repo https://kubernetes-charts.storage.googleapis.com/ Deleting outdated charts\n 如果官方 repo 不存在,请先添加官方仓库\nhelm repo add stable https://kubernetes-charts.storage.googleapis.com  \u0026ldquo;stable\u0026rdquo; has been added to your repositories\n 4.打包 skywalking , 执行以下命令\nhelm package skywalking/  Successfully packaged chart and saved it to: C:\\code\\innerpeacez_github\\skywalking-kubernetes\\helm-chart\\helm3\\6.3.0\\skywalking-0.1.0.tgz\n 打包完成后会在当前目录的同级目录生成 .tgz 文件\n ls  skywalking/ skywalking-0.1.0.tgz\n 启动 helm serve 由于上文配置的 helm 为 helm3 ,但是 helm 3中移除了 helm serve 的相关命令,所以需要另外一个环境配置helm2 的相关环境,下载 helm 2.14.3 的二进制文件,配置基本上没有大的差别,不在赘述\n初始化 helm\nhelm init 将上文生成的 skywalking-0.1.0.tgz 文件复制到 helm 相关目录 /root/.helm/repository/local,启动 serve\nhelm serve --address \u0026lt;ip\u0026gt;:8879 --repo-path /root/.helm/repository/local 注意: ip 为要能够被上文配置 helm 3 环境的机器访问到\n可以访问一下看看服务 serve 是否启动成功\ncurl ip:8879 部署 skywalking 1.在helm3 环境中添加启动的本地 repo\nhelm repo add local http://\u0026lt;ip\u0026gt;:8879 2.查看 skywalking chart 是否存在于本地仓库中\nhelm search skywalking  NAME CHART VERSION\tAPP VERSION\tDESCRIPTION local/skywalking 0.1.0 6.3.0 Apache SkyWalking APM System\n 3.部署\nhelm -n test install skywalking local/skywalking 这样 skywalking 就部署到了 k8s 集群中的 test 命名空间了,至此本地安装skywalking 就完成了。\n本地文件部署 如果你不想存储到 chart 到仓库中也可以直接使用本地文件部署 skywalking,按照上面的步骤将skywalking chart 打包完成之后,直接使用以下命令进行部署\nhelm -n test install skywalking skywalking-0.1.0.tgz harbor 作为 repo 存储 charts harbor 目前已经提供了,charts repo 的能力,这样就可以将 docker 镜像和 chart 存储在一个仓库中了,方便维护,具体harbor 的部署方法参考 Harbor 作为存储仓库存储 chart\n官方 repo 部署 目前没有发布到官方 repo 中,后续发布完成后,只需要执行下面命令即可\nhelm install -n test stable/skywalking 总结 四种方式都可以进行部署,如果你想要自定义 chart ,需要使用上述两种本地方法及 harbor 存储的方式,以便你修改好 chart 之后进行部署.\n","excerpt":"作者:innerpeacez 原文地址  本文主要讲述的是如何使用 Helm Charts 将 SkyWalking 部署到 Kubernetes 集群中,相关文档可以参 …","ref":"/zh/2019-10-08-how-to-use-sw-chart/","title":"使用 chart 部署 SkyWalking"},{"body":" Author: Wei Qiang GitHub  Background SkyWalking backend provides the alarm function, we can define some Alarm rules, call webhook after the rule is triggered. I share my implementation\nDemonstration SkyWalking alarm UI\ndingtalk message body\nIntroduction  install  go get -u github.com/weiqiang333/infra-skywalking-webhook cd $GOPATH/src/github.com/weiqiang333/infra-skywalking-webhook/ bash build/build.sh ./bin/infra-skywalking-webhook help  Configuration  main configs file:configs/production.ymldingtalk:p3:token... Example  ./bin/infra-skywalking-webhook --config configs/production.yml --address 0.0.0.0:8000  SkyWalking backend alarm settings  webhooks:- http://127.0.0.1:8000/dingtalkCollaboration Hope that we can improve together webhook\nSkyWalking alarm rules may add more metric names (eg priority name), we can send different channels by locating different levels of alerts (dingtalk / SMS / phone)\nThanks.\n","excerpt":"Author: Wei Qiang GitHub  Background SkyWalking backend provides the alarm function, we can define …","ref":"/blog/2019-09-25-alarm-webhook-share/","title":"SkyWalking alarm webhook sharing"},{"body":"作者: SkyWalking committer,Kdump\n本文介绍申请Apache SkyWalking Committer流程, 流程包括以下步骤\n 与PMC成员表达想成为committer的意愿(主动/被动) PMC内部投票 PMC正式邮件邀请 填写Apache iCLA申请表 设置ApacheID和邮箱 设置GitHub加入Apache组织 GitHub其它一些不重要设置  前期过程  与PMC成员表达想成为committer的意愿(主动/被动) PMC内部投票  当你对项目的贡献活跃度足够高或足够多时, Skywalking项目的PMC(项目管理委员会)会找到你并询问你是否有意愿成为项目的Committer, 或者也可以主动联系项目的PMC表达自己的意向, 在此之后PMC们会进行内部讨论和投票并告知你是否可以进入下一个环节.这个过程可能需要一周. 如果PMC主动邀请你进行非正式的意愿咨询, 你可以选择接受或拒绝.\nPS:PMC会向你索要你的个人邮箱, 建议提供Gmail, 因为后期绑定Apache邮箱需要用到, 其它邮箱我不确定是否能绑定.\nPS:从Apache官方的流程来讲, 现有的PMC会在没有通知候选人的情况下先进行候选人投票, 但是Skywalking项目的PMC有可能更倾向于先得到候选人的意愿再进行投票.\n正式阶段   PMC正式邮件邀请\n 当你收到PMC正式的邀请邮件时, 恭喜你, 你已经通过了PMC的内部投票, 你需要用英文回答接受邀请或者拒绝邀请, 记住回复的时候一定要选择全部回复.    填写Apache iCLA申请表\n  在你收到的PMC邮件中, 有几个ASF官方链接需要你去浏览, 重点的内容是查看CLAs, 并填写Individual Contributor License Agreement, 你可以将icla.pdf文件下载到本地, 使用PDF工具填写里面所需的信息, 并打印出来签名(一定要手写签名, 否则会被要求重新签名), 再扫描(或手机拍照)成电子文档(需要回复PDF格式, 文件名建议重命名为你的名字-icla.pdf), 使用gpg对电子文档进行签名(参考[HOW-TO: SUBMITTING LICENSE AGREEMENTS AND GRANTS\n](http://www.apache.org/licenses/contributor-agreements.html#submitting)), Window可以使用GnuPG或者Gpg4win.\n  完成gpg签名后, 请将你签名用的公钥上送到pool.sks-keyservers.net服务器, 并在这个页面中验证你的公钥是否可以被搜索到, 搜索关键词可以是你秘钥中填写的名字或者邮箱地址.\n  gpg签名后, 会生成.pdf.asc的文件, 需要将你的你的名字-icla.pdf和你的名字-icla.pdf.asc以附件的方式一起发送到secretary@apache.org, 并抄送给private@skywalking.apache.org.\n    设置ApacheID和邮箱\n 大概5个工作日内, 你会收到一封来至于root@apache.org的邮件, 主题为Welcome to the Apache Software Foundation (ASF)!, 恭喜你, 你已经获得了ApacheID, 这时候你需要根据邮件内容的提示去设置你的ApacheID密码, 密码设置完成后, 需要在Apache Account Utility页面中重点设置Forwarding email address和Your GitHub Username两个信息.保存信息的时候需要你填写当前的ApacheID的密码. 现在进入Gmail, 选择右上角的齿轮-\u0026gt;设置-\u0026gt;账号和导入-\u0026gt;添加其他电子邮件地址-\u0026gt;参考Sending email from your apache.org email address给出的信息根据向导填写Apache邮箱.    设置GitHub加入Apache组织\n 进入Welcome to the GitBox Account Linking Utility!, 按照顺序将Apache Account和GitHub Account点绿, 想点绿MFA Status, 需要去GitHub开启2FA, 请参考配置双重身份验证完成2FA的功能. 等待1~2小时后登陆自己的GitHub的dashboard界面, 你应该会看到一条Apache组织邀请你加入的通知, 这个时候接受即可享有Skywalking相关GitHub项目权限了.    其它提示  GitHub其它一些不重要设置  在GitHub首页展示Apache组织的logo: 进入Apache GitHub组织-\u0026gt;People-\u0026gt;搜索自己的GitHubID-\u0026gt;将Private改成Public    ","excerpt":"作者: SkyWalking committer,Kdump\n本文介绍申请Apache SkyWalking Committer流程, 流程包括以下步骤\n 与PMC成员表达想成为committer的意 …","ref":"/zh/2019-09-12-apache-skywalking-committer-apply-process/","title":"Apache SkyWalking Committer申请流程"},{"body":"Based on his contributions to the skywalking ui project, Weijie Zou (a.k.a Kdump) has been accepted as a new committer.\n","excerpt":"Based on his contributions to the skywalking ui project, Weijie Zou (a.k.a Kdump) has been accepted …","ref":"/events/welcome-weijie-zou-as-a-new-committer/","title":"Welcome Weijie Zou as a new committer"},{"body":"6.4.0 release. Go to downloads page to find release tars.\n Highly recommend to upgrade due to Pxx metrics calculation bug. Make agent working in JDK9+ Module system.  Read changelog for the details.\n","excerpt":"6.4.0 release. Go to downloads page to find release tars.\n Highly recommend to upgrade due to Pxx …","ref":"/events/release-apache-skywalking-apm-6-4-0/","title":"Release Apache SkyWalking APM 6.4.0"},{"body":"  作者:innerpeacez 原文地址   如果你还不知道 Skywalking agent 是什么,请点击这里查看 Probe 或者这里查看快速了解agent,由于我这边大部分都是 JAVA 服务,所以下文以 Java 中使用 agent 为例,提供了以下三种方式供你选择\n三种方式:  使用官方提供的基础镜像 将 agent 包构建到已经存在的基础镜像中 sidecar 模式挂载 agent  1.使用官方提供的基础镜像 查看官方 docker hub 提供的基础镜像,只需要在你构建服务镜像是 From 这个镜像即可,直接集成到 Jenkins 中可以更加方便\n2.将 agent 包构建到已经存在的基础镜像中 提供这种方式的原因是:官方的镜像属于精简镜像,并且是 openjdk ,可能很多命令没有,需要自己二次安装,以下是我构建的过程\n  下载 oracle jdk\n这个现在 oracle 有点恶心了,wget 各种不行,然后我放弃了,直接从官网下载了\n  下载 skywalking 官方发行包,并解压(以6.3.0为例)\nwget https://www.apache.org/dyn/closer.cgi/skywalking/6.3.0/apache-skywalking-apm-6.3.0.tar.gz \u0026amp;\u0026amp; tar -zxvf apache-skywalking-apm-6.3.0.tar.gz   通过以下 dockerfile 构建基础镜像\nFROMalpine:3.8  ENV LANG=C.UTF-8 RUN set -eux \u0026amp;\u0026amp; \\  apk update \u0026amp;\u0026amp; apk upgrade \u0026amp;\u0026amp; \\  wget -q -O /etc/apk/keys/sgerrand.rsa.pub https://alpine-pkgs.sgerrand.com/sgerrand.rsa.pub \u0026amp;\u0026amp;\\  wget https://github.com/sgerrand/alpine-pkg-glibc/releases/download/2.30-r0/glibc-2.30-r0.apk \u0026amp;\u0026amp;\\  apk --no-cache add unzip vim curl git bash ca-certificates glibc-2.30-r0.apk file \u0026amp;\u0026amp; \\  rm -rf /var/lib/apk/* \u0026amp;\u0026amp;\\  mkdir -p /usr/skywalking/agent/ # A streamlined jreADD jdk1.8.0_221/ /usr/java/jdk1.8.0_221/ADD apache-skywalking-apm-bin/agent/ /usr/skywalking/agent/ # set envENV JAVA_HOME /usr/java/jdk1.8.0_221ENV PATH ${PATH}:${JAVA_HOME}/bin # run container with base path:/WORKDIR/ CMD bash  这里由于 alpine 是基于mini lib 的,但是 java 需要 glibc ,所以加入了 glibc 相关的东西,最后构建出的镜像大小在 490M 左右,因为加了挺多命令还是有点大,仅供参考,同样构建出的镜像也可以直接配置到 jenkins 中。\n3.sidecar 模式挂载 agent 如果你们的服务是部署在 Kubernetes 中,你还可以使用这种方式来使用 Skywalking Agent ,这种方式的好处在与不需要修改原来的基础镜像,也不用重新构建新的服务镜像,而是以sidecar 模式,通过共享volume的方式将agent 所需的相关文件挂载到已经存在的服务镜像中\n构建 skywalking agent sidecar 镜像的方法\n  下载skywalking 官方发行包,并解压\nwget https://www.apache.org/dyn/closer.cgi/skywalking/6.3.0/apache-skywalking-apm-6.3.0.tar.gz \u0026amp;\u0026amp; tar -zxvf apache-skywalking-apm-6.3.0.tar.gz   通过以下 dockerfile 进行构建\nFROMbusybox:latest  ENV LANG=C.UTF-8 RUN set -eux \u0026amp;\u0026amp; mkdir -p /usr/skywalking/agent/ ADD apache-skywalking-apm-bin/agent/ /usr/skywalking/agent/ WORKDIR/  注意:这里我没有在dockerfile中下载skywalking 发行包是因为保证构建出的 sidecar 镜像保持最小,bosybox 只有700 k左右,加上 agent 最后大小小于20M\n如何使用 sidecar 呢?\napiVersion:apps/v1kind:Deploymentmetadata:labels:name:demo-swname:demo-swspec:replicas:1selector:matchLabels:name:demo-swtemplate:metadata:labels:name:demo-swspec:initContainers:- image:innerpeacez/sw-agent-sidecar:latestname:sw-agent-sidecarimagePullPolicy:IfNotPresentcommand:[\u0026#39;sh\u0026#39;]args:[\u0026#39;-c\u0026#39;,\u0026#39;mkdir -p /skywalking/agent \u0026amp;\u0026amp; cp -r /usr/skywalking/agent/* /skywalking/agent\u0026#39;]volumeMounts:- mountPath:/skywalking/agentname:sw-agentcontainers:- image:nginx:1.7.9name:nginxvolumeMounts:- mountPath:/usr/skywalking/agentname:sw-agentports:- containerPort:80volumes:- name:sw-agentemptyDir:{}以上是挂载 sidecar 的 deployment.yaml 文件,以nginx 作为服务为例,主要是通过共享 volume 的方式挂载 agent,首先 initContainers 通过 sw-agent 卷挂载了 sw-agent-sidecar 中的 /skywalking/agent ,并且将上面构建好的镜像中的 agent 目录 cp 到了 /skywalking/agent 目录,完成之后 nginx 启动时也挂载了 sw-agent 卷,并将其挂载到了容器的 /usr/skywalking/agent 目录,这样就完成了共享过程。\n总结 这样除去 ServiceMesh 以外,我能想到的方式就介绍完了,希望可以帮助到你。最后给 Skywalking 一个 Star 吧,国人的骄傲。\n","excerpt":"作者:innerpeacez 原文地址   如果你还不知道 Skywalking agent 是什么,请点击这里查看 Probe 或者这里查看快速了解agent,由于我这边大部分都是 JAVA 服务, …","ref":"/zh/2019-08-30-how-to-use-skywalking-agent/","title":"如何使用 SkyWalking Agent ?"},{"body":"Based on his continuous contributions, Yuguang Zhao (a.k.a zhaoyuguang) has been invited to join the PMC. Welcome aboard.\n","excerpt":"Based on his continuous contributions, Yuguang Zhao (a.k.a zhaoyuguang) has been invited to join the …","ref":"/events/welcome-yuguang-zhao-to-join-the-pmc/","title":"Welcome Yuguang Zhao to join the PMC"},{"body":"Based on his continuous contributions, Zhenxu Ke (a.k.a kezhenxu94) has been invited to join the PMC. Welcome aboard.\n","excerpt":"Based on his continuous contributions, Zhenxu Ke (a.k.a kezhenxu94) has been invited to join the …","ref":"/events/welcome-zhenxu-ke-to-join-the-pmc/","title":"Welcome Zhenxu Ke to join the PMC"},{"body":"Based on his contributions to the skywalking PHP project, Yanlong He (a.k.a heyanlong has been accepted as a new committer.\n","excerpt":"Based on his contributions to the skywalking PHP project, Yanlong He (a.k.a heyanlong has been …","ref":"/events/welcome-yanlong-he-as-a-new-committer/","title":"Welcome Yanlong He as a new committer"},{"body":"6.3.0 release. Go to downloads page to find release tars.\n Improve ElasticSearch storage implementation performance again. OAP backend re-install w/o agent reboot required.  Read changelog for the details.\n","excerpt":"6.3.0 release. Go to downloads page to find release tars.\n Improve ElasticSearch storage …","ref":"/events/release-apache-skywalking-apm-6-3-0/","title":"Release Apache SkyWalking APM 6.3.0"},{"body":"6.2.0 release. Go to downloads page to find release tars. ElasticSearch storage implementation changed, high reduce payload to ElasticSearch cluster.\nRead changelog for the details.\n","excerpt":"6.2.0 release. Go to downloads page to find release tars. ElasticSearch storage implementation …","ref":"/events/release-apache-skywalking-apm-6-2-0/","title":"Release Apache SkyWalking APM 6.2.0"},{"body":"Based on his continuous contributions, Zhenxu Ke (a.k.a kezhenxu94) has been voted as a new committer.\n","excerpt":"Based on his continuous contributions, Zhenxu Ke (a.k.a kezhenxu94) has been voted as a new …","ref":"/events/welcome-zhenxu-ke-as-a-new-committer/","title":"Welcome Zhenxu Ke as a new committer"},{"body":"6.1.0 release. Go to downloads page to find release tars. This is the first top level project version.\nKey updates\n RocketBot UI OAP performance improvement  ","excerpt":"6.1.0 release. Go to downloads page to find release tars. This is the first top level project …","ref":"/events/release-apache-skywalking-apm-6-1-0/","title":"Release Apache SkyWalking APM 6.1.0"},{"body":"Apache SkyWalking PMC accept the RocketBot UI contributions. After IP clearance, it will be released in SkyWalking 6.1 soon.\n","excerpt":"Apache SkyWalking PMC accept the RocketBot UI contributions. After IP clearance, it will be released …","ref":"/events/rocketbot-ui-has-been-accepted-as-skywalking-primary-ui/","title":"RocketBot UI has been accepted as SkyWalking primary UI"},{"body":"Apache board approved SkyWalking graduated as TLP at April 17th 2019.\n","excerpt":"Apache board approved SkyWalking graduated as TLP at April 17th 2019.","ref":"/events/skywalking-graduated-as-apache-top-level-project/","title":"SkyWalking graduated as Apache Top Level Project"},{"body":"Based on his continuous contributions, he has been accepted as a new committer.\n","excerpt":"Based on his continuous contributions, he has been accepted as a new committer.","ref":"/events/welcome-yuguang-zhao-as-a-new-committer/","title":"Welcome Yuguang Zhao as a new committer"},{"body":"APM和调用链跟踪 随着企业经营规模的扩大,以及对内快速诊断效率和对外SLA(服务品质协议,service-level agreement)的追求,对于业务系统的掌控度的要求越来越高,主要体现在:\n 对于第三方依赖的监控,实时/准实时了解第三方的健康状况/服务品质,降低第三方依赖对于自身系统的扰动(服务降级、故障转移) 对于容器的监控,实时/准实时的了解应用部署环境(CPU、内存、进程、线程、网络、带宽)情况,以便快速扩容/缩容、流量控制、业务迁移 业务方对于自己的调用情况,方便作容量规划,同时对于突发的请求也能进行异常告警和应急准备 自己业务的健康、性能监控,实时/准实时的了解自身的业务运行情况,排查业务瓶颈,快速诊断和定位异常,增加对自己业务的掌控力  同时,对于企业来说,能够更精确的了解资源的使用情况,对于成本核算和控制也有非常大的裨益。\n在这种情况下,一般都会引入APM(Application Performance Management \u0026amp; Monitoring)系统,通过各种探针采集数据,收集关键指标,同时搭配数据呈现和监控告警,能够解决上述的大部分问题。\n然而随着RPC框架、微服务、云计算、大数据的发展,同时业务的规模和深度相比过往也都增加了很多,一次业务可能横跨多个模块/服务/容器,依赖的中间件也越来越多,其中任何一个节点出现异常,都可能导致业务出现波动或者异常,这就导致服务质量监控和异常诊断/定位变得异常复杂,于是催生了新的业务监控模式:调用链跟踪\n 能够分布式的抓取多个节点的业务记录,并且通过统一的业务id(traceId,messageId,requestId等)将一次业务在各个节点的记录串联起来,方便排查业务的瓶颈或者异常点  产品对比 APM和调用链跟踪均不是新诞生事务,很多公司已经有了大量的实践,不过开源的并且能够开箱即用的产品并不多,这里主要选取了Pinpoint,Skywalking,CAT来进行对比(当然也有其他的例如Zipkin,Jaeger等产品,不过总体来说不如前面选取的3个完成度高),了解一下APM和调用链跟踪在开源方面的发展状态。\nPinpoint Pinpoint是一个比较早并且成熟度也非常高的APM+调用链监控的项目,在全世界范围内均有用户使用,支持Java和PHP的探针,数据容器为HBase,其界面参考:\nSkywalking Skywalking是一个新晋的项目,最近一两年发展非常迅猛,本身支持OpenTracing规范,优秀的设计提供了良好的扩展性,支持Java、PHP、.Net、NodeJs探针,数据容器为ElasticSearch,其界面参考:\nCAT CAT是由美团开源的一个APM项目,也历经了多年的迭代升级,拥有大量的企业级用户,对于监控和报警整合比较紧密,支持Java、C/C++、.Net、Python、Go、NodeJs,不过CAT目前主要通过侵入性的方式接入,数据容器包括HDFS(存储原始数据)和mysql(二次统计),其界面参考:\n横向对比 上面只是做了一个简介,那这三个项目各自有什么特色或者优势/劣势呢(三者的主要产品均针对Java,这里也主要针对Java的特性)?\n Pinpoint  优势  大企业/长时间验证,稳定性和完成度高 探针收集的数据粒度比较细 HBase的数据密度较大,支持PB级别下的数据查询 代码设计考虑的扩展性较弱,二次开发难度较大(探针为插件式,开发比较简单) 拥有完整的APM和调用链跟踪功能   劣势  代码针对性强,扩展较难 容器为HBase,查询功能较弱(主要为时间维度) 探针的额外消耗较多(探针采集粒度细,大概10%~20%) 项目趋于成熟,而扩展难度较大,目前社区活跃度偏低,基本只进行探针的增加或者升级 缺少自定义指标的设计     Skywalking  优势  数据容器为ES,查询支持的维度较多并且扩展潜力大 项目设计采用微内核+插件,易读性和扩展性都比较强 主要的研发人员为华人并且均比较活跃,能够进行更加直接的沟通 拥有完整的APM和调用链跟踪功能   劣势  项目发展非常快,稳定性有待验证 ES数据密度较小,在PB级别可能会有性能压力 缺少自定义指标的设计     CAT  优势  大企业/长时间验证,稳定性和完成度高 采用手动数据埋点而不是探针,数据采集的灵活性更强 支持自定义指标 代码设计考虑的扩展性较弱,并且数据结构复杂,二次开发难度较大 拥有完善的监控告警机制   劣势  代码针对性强,扩展较难 需要手动接入埋点,代码侵入性强 APM功能完善,但是不支持调用链跟踪      基本组件 如果分别去看Pinpoint/Skywalking/CAT的整体设计,我们会发现三者更像是一个规范的三种实现,虽然各自有不同的机制和特性,但是从模块划分和功能基本是一致的:\n当然也有一些微小的区别:\n Pinpoint基本没有aggregator,同时query和alarm集成在了web中,只有agent,collector和web Skywalking则是把collector、aggregator、alarm集成为OAP(Observability Analysis Platform),并且可以通过集群部署,不同的实例可以分别承担collector或者aggregator+alarm的角色 CAT则和Skywalking类似,把collector、aggregator、alarm集成为cat-consumer,而由于CAT有比较复杂的配置管理,所以query和配置一起集成为cat-home 当然最大的区别是Pinpoint和Skywalking均是通过javaagent做字节码的扩展,通过切面编程采集数据,类似于探针,而CAT的agent则更像是一个工具集,用于手动埋点  Skywalking 前戏这么多,终于开始进入主题,介绍今天的主角:Skywalking,不过通过之前的铺垫,我们基本都知道了Skywalking期望解决的问题以及总体的结构,下面我们则从细节来看Skywalking是怎么一步一步实现的。\n模块构成 首先,Skywalking进行了精准的领域模型划分:\n整个系统分为三部分:\n agent:采集tracing(调用链数据)和metric(指标)信息并上报 OAP:收集tracing和metric信息通过analysis core模块将数据放入持久化容器中(ES,H2(内存数据库),mysql等等),并进行二次统计和监控告警 webapp:前后端分离,前端负责呈现,并将查询请求封装为graphQL提交给后端,后端通过ribbon做负载均衡转发给OAP集群,再将查询结果渲染展示  而整个Skywalking(包括agent和OAP,而webapp后端业务非常简单主要就是认证和请求转发)均通过微内核+插件式的模式进行编码,代码结构和扩展性均非常强,具体设计可以参考: 从Skywalking看如何设计一个微核+插件式扩展的高扩展框架 ,Spring Cloud Gateway的GatewayFilterFactory的扩展也是通过这种plugin define的方式来实现的。\nSkywalking也提供了其他的一些特性:\n 配置重载:支持通过jvm参数覆写默认配置,支持动态配置管理 集群管理:这个主要体现在OAP,通过集群部署分担数据上报的流量压力和二次计算的计算压力,同时集群也可以通过配置切换角色,分别面向数据采集(collector)和计算(aggregator,alarm),需要注意的是agent目前不支持多collector负载均衡,而是随机从集群中选择一个实例进行数据上报 支持k8s和mesh 支持数据容器的扩展,例如官方主推是ES,通过扩展接口,也可以实现插件去支持其他的数据容器 支持数据上报receiver的扩展,例如目前主要是支持gRPC接受agent的上报,但是也可以实现插件支持其他类型的数据上报(官方默认实现了对Zipkin,telemetry和envoy的支持) 支持客户端采样和服务端采样,不过服务端采样最有意义 官方制定了一个数据查询脚本规范:OAL(Observability Analysis Language),语法类似Linq,以简化数据查询扩展的工作量 支持监控预警,通过OAL获取数据指标和阈值进行对比来触发告警,支持webhook扩展告警方式,支持统计周期的自定义,以及告警静默防止重复告警  数据容器 由于Skywalking并没有自己定制的数据容器或者使用多种数据容器增加复杂度,而是主要使用ElasticSearch(当然开源的基本上都是这样来保持简洁,例如Pinpoint也只使用了HBase),所以数据容器的特性以及自己数据结构基本上就限制了业务的上限,以ES为例:\n ES查询功能异常强大,在数据筛选方面碾压其他所有容器,在数据筛选潜力巨大(Skywalking默认的查询维度就比使用HBase的Pinpoint强很多) 支持sharding分片和replicas数据备份,在高可用/高性能/大数据支持都非常好 支持批量插入,高并发下的插入性能大大增强 数据密度低,源于ES会提前构建大量的索引来优化搜索查询,这是查询功能强大和性能好的代价,但是链路跟踪往往有非常多的上下文需要记录,所以Skywalking把这些上下文二进制化然后通过Base64编码放入data_binary字段并且将字段标记为not_analyzed来避免进行预处理建立查询索引  总体来说,Skywalking尽量使用ES在大数据和查询方面的优势,同时尽量减少ES数据密度低的劣势带来的影响,从目前来看,ES在调用链跟踪方面是不二的数据容器,而在数据指标方面,ES也能中规中矩的完成业务,虽然和时序数据库相比要弱一些,但在PB级以下的数据支持也不会有太大问题。\n数据结构 如果说数据容器决定了上限,那么数据结构则决定了实际到达的高度。Skywalking的数据结构主要为:\n 数据维度(ES索引为skywalking_*_inventory)  service:服务 instance:实例 endpoint:接口 network_adress:外部依赖   数据内容  原始数据  调用链跟踪数据(调用链的trace信息,ES索引为skywalking_segment,Skywalking主要的数据消耗都在这里) 指标(主要是jvm或者envoy的运行时指标,例如ES索引skywalking_instance_jvm_cpu)   二次统计指标  指标(按维度/时间二次统计出来的例如pxx、sla等指标,例如ES索引skywalking_database_access_p75_month) 数据库慢查询记录(数据库索引:skywalking_top_n_database_statement)   关联关系(维度/指标之间的关联关系,ES索引为skywalking_*_relation_*) 特别记录  告警信息(ES索引为skywalking_alarm_record) 并发控制(ES索引为skywalking_register_lock)      其中数量占比最大的就是调用链跟踪数据和各种指标,而这些数据均可以通过OAP设置过期时间,以降低历史数据的对磁盘占用和查询效率的影响。\n调用链跟踪数据 作为Skywalking的核心数据,调用链跟踪数据(skywalking_segment)基本上奠定了整个系统的基础,而如果要详细的了解调用链跟踪的话,就不得不提到openTracing。\nopenTracing基本上是目前开源调用链跟踪系统的一个事实标准,它制定了调用链跟踪的基本流程和基本的数据结构,同时也提供了各个语言的实现。如果用一张图来表现openTracing,则是如下:\n其中:\n SpanContext:一个类似于MDC(Slfj)或者ThreadLocal的组件,负责整个调用链数据采集过程中的上下文保持和传递 Trace:一次调用的完整记录  Span:一次调用中的某个节点/步骤,类似于一层堆栈信息,Trace是由多个Span组成,Span和Span之间也有父子或者并列的关系来标志这个节点/步骤在整个调用中的位置  Tag:节点/步骤中的关键信息 Log:节点/步骤中的详细记录,例如异常时的异常堆栈   Baggage:和SpanContext一样并不属于数据结构而是一种机制,主要用于跨Span或者跨实例的上下文传递,Baggage的数据更多是用于运行时,而不会进行持久化    以一个Trace为例:\n首先是外部请求调用A,然后A依次同步调用了B和C,而B被调用时会去同步调用D,C被调用的时候会依次同步调用E和F,F被调用的时候会通过异步调用G,G则会异步调用H,最终完成一次调用。\n上图是通过Span之间的依赖关系来表现一个Trace,而在时间线上,则可以有如下的表达:\n当然,如果是同步调用的话,父Span的时间占用是包括子Span的时间消耗的。\n而落地到Skywalking中,我们以一条skywalking_segment的记录为例:\n{ \u0026quot;trace_id\u0026quot;: \u0026quot;52.70.15530767312125341\u0026quot;, \u0026quot;endpoint_name\u0026quot;: \u0026quot;Mysql/JDBI/Connection/commit\u0026quot;, \u0026quot;latency\u0026quot;: 0, \u0026quot;end_time\u0026quot;: 1553076731212, \u0026quot;endpoint_id\u0026quot;: 96142, \u0026quot;service_instance_id\u0026quot;: 52, \u0026quot;version\u0026quot;: 2, \u0026quot;start_time\u0026quot;: 1553076731212, \u0026quot;data_binary\u0026quot;: \u0026quot;CgwKCjRGnPvp5eikyxsSXhD///////////8BGMz62NSZLSDM+tjUmS0wju8FQChQAVgBYCF6DgoHZGIudHlwZRIDc3FsehcKC2RiLmluc3RhbmNlEghyaXNrZGF0YXoOCgxkYi5zdGF0ZW1lbnQYAiA0\u0026quot;, \u0026quot;service_id\u0026quot;: 2, \u0026quot;time_bucket\u0026quot;: 20190320181211, \u0026quot;is_error\u0026quot;: 0, \u0026quot;segment_id\u0026quot;: \u0026quot;52.70.15530767312125340\u0026quot; } 其中:\n trace_id:本次调用的唯一id,通过snowflake模式生成 endpoint_name:被调用的接口 latency:耗时 end_time:结束时间戳 endpoint_id:被调用的接口的唯一id service_instance_id:被调用的实例的唯一id version:本数据结构的版本号 start_time:开始时间戳 data_binary:里面保存了本次调用的所有Span的数据,序列化并用Base64编码,不会进行分析和用于查询 service_id:服务的唯一id time_bucket:调用所处的时段 is_error:是否失败 segment_id:数据本身的唯一id,类似于主键,通过snowflake模式生成  这里可以看到,目前Skywalking虽然相较于Pinpoint来说查询的维度要多一些,但是也很有限,而且除了endPoint,并没有和业务有关联的字段,只能通过时间/服务/实例/接口/成功标志/耗时来进行非业务相关的查询,如果后续要增强业务相关的搜索查询的话,应该还需要增加一些用于保存动态内容(如messageId,orderId等业务关键字)的字段用于快速定位。\n指标 指标数据相对于Tracing则要简单得多了,一般来说就是指标标志、时间戳、指标值,而Skywalking中的指标有两种:一种是采集的原始指标值,例如jvm的各种运行时指标(例如cpu消耗、内存结构、GC信息等);一种是各种二次统计指标(例如tp性能指标、SLA等,当然也有为了便于查询的更高时间维度的指标,例如基于分钟、小时、天、周、月)\n例如以下是索引skywalking_endpoint_cpm_hour中的一条记录,用于标志一个小时内某个接口的cpm指标:\n{ \u0026quot;total\u0026quot;: 8900, \u0026quot;service_id\u0026quot;: 5, \u0026quot;time_bucket\u0026quot;: 2019031816, \u0026quot;service_instance_id\u0026quot;: 5, \u0026quot;entity_id\u0026quot;: \u0026quot;7\u0026quot;, \u0026quot;value\u0026quot;: 148 } 各个字段的释义如下:\n total:一分钟内的调用总量 service_id:所属服务的唯一id time_bucket:统计的时段 service_instance_id:所属实例的唯一id entity_id:接口(endpoint)的唯一id value:cpm的指标值(cpm=call per minute,即total/60)  工程实现 Skywalking的工程实现堪比Dubbo,框架设计和代码质量都达到非常高的水准,以dubbo为例,即使2012年发布的老版本放到当今,其设计和编码看起来也依然赏心悦目,设计简洁但是覆盖了所有的核心需求,同时又具备非常强的扩展性,二次开发非常简单,然而却又不会像Spring那样过度封装(当然Spring作为一个更加高度通用的框架,更高的封装也是有必要的)导致代码阅读异常困难。\nagent agent(apm-sniffer)是Skywalking的Java探针实现,主要负责:\n 采集应用实例的jvm指标 通过切向编程进行数据埋点,采集调用链数据 通过RPC将采集的数据上报  当然,agent还实现了客户端采样,不过在APM监控系统里进行客户端数据采样都是没有灵魂的,所以这里就不再赘述了。\n首先,agent通过 org.apache.skywalking.apm.agent.core.boot.BootService 实现了整体的插件化,agent启动会加载所有的BootService实现,并通过 ServiceManager 来管理这些插件的生命周期,采集jvm指标、gRPC连接管理、调用链数据维护、数据上报OAP这些服务均是通过这种方式扩展。\n然后,agent还通过bytebuddy以javaagent的模式,通过字节码增强的机制来构造AOP环境,再提供PluginDefine的规范方便探针的开发,最终实现非侵入性的数据埋点,采集调用链数据。\n最终落地到代码上则异常清晰:\n//通过bytebuddy的AgentBuilder构造javaagent增强classLoader new AgentBuilder.Default(byteBuddy) .ignore( //忽略这些包的内容,不进行增强 nameStartsWith(\u0026quot;net.bytebuddy.\u0026quot;) .or(nameStartsWith(\u0026quot;org.slf4j.\u0026quot;)) .or(nameStartsWith(\u0026quot;org.apache.logging.\u0026quot;)) .or(nameStartsWith(\u0026quot;org.groovy.\u0026quot;)) .or(nameContains(\u0026quot;javassist\u0026quot;)) .or(nameContains(\u0026quot;.asm.\u0026quot;)) .or(nameStartsWith(\u0026quot;sun.reflect\u0026quot;)) .or(allSkyWalkingAgentExcludeToolkit()) .or(ElementMatchers.\u0026lt;TypeDescription\u0026gt;isSynthetic())) //通过pluginFinder加载所有的探针扩展,并获取所有可以增强的class .type(pluginFinder.buildMatch()) //按照pluginFinder的实现,去改变字节码增强类 .transform(new Transformer(pluginFinder)) //通过listener订阅增强的操作记录,方便调试 .with(new Listener()) .installOn(instrumentation); try { //加载所有的service实现并启动 ServiceManager.INSTANCE.boot(); } catch (Exception e) { logger.error(e, \u0026quot;Skywalking agent boot failure.\u0026quot;); } agent也提供了非常简单的扩展实现机制,以增强一个普通类的方法为例,首先你需要定义一个切向点:\npublic interface InstanceMethodsInterceptPoint { //定义切向方法的适配器,符合适配器的class将被增强 ElementMatcher\u0026lt;MethodDescription\u0026gt; getMethodsMatcher(); //增强的具体实现类,classReference String getMethodsInterceptor(); //是否重写参数 boolean isOverrideArgs(); } 然后你还需要一个增强的实现类:\npublic interface InstanceMethodsAroundInterceptor { //方法真正执行前执行 void beforeMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, MethodInterceptResult result) throws Throwable; //方法真正执行后执行 Object afterMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Object ret) throws Throwable; //当异常发生时执行 void handleMethodException(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Throwable t); } 一般在执行前和执行后进行数据埋点,就可以采集到想要的数据,当然实际编程要稍微复杂一点,不过官方也实现了对应的abstract类和数据埋点工具类,所以探针的二次开发在Skywalking这个级别确实是非常简单,只需要处理好资源占用和并发问题即可。真正的难点是要对需要增强的对象非常了解,熟悉其运作机制,才能找准切向点,既要所有的流程都需要经过这个点,又可以抓取到期望抓取的上下文信息。同时,多版本的适配和测试也是非常大的工作量,官方虽然提供witness的机制(通过验证某个class是否存在来验证版本),但是作为影响全局的探针,开发和测试都是需要慎之又慎的。\nOAP 同agent类似,OAP作为Skywalking最核心的模块,也实现了自己的扩展机制,不过在这里叫做Module,具体可以参考library-module,在module的机制下,Skywalking实现了自己必须核心组件:\n core:整个OAP核心业务(remoting、cluster、storage、analysis、query、alarm)的规范和接口 cluster:集群管理的具体实现 storage:数据容器的具体实现 query:为前端提供的查询接口的具体实现 receiver:接收探针上报数据的接收器的具体实现 alarm:监控告警的具体实现  以及一个可选组件:\n telemetry:用于监控OAP自身的健康状况  而前面提到的OAP的高扩展性则体现在核心业务的规范均定义在了core中,如果有需要自己扩展的,只需要自己单独做自己的实现,而不需要做侵入式的改动,最典型的示例则是官方支持的storage,不仅支持单机demo的内存数据库H2和经典的ES,连目前开源的Tidb都可以接入。\n初步实践 对于Skywalking的实践我们经历了三个阶段\n 线下测试 第一次生产环境小规模测试 第二次生产环境小规模测试+全量接入  线下测试 环境 由于是线下测试,所以我们直接使用物理机(E5-2680v2 x2, 128G)虚拟了一个集群(实际性能相比云服务器应该偏好一些):\n ES:单机实例,v6.5,4C8G,jvm内存分配为4G OAP:单机实例,v6.1.0-SNAPSHOT,4C8G,jvm内存分配为4G 应用:基于SpringCloud的4个测试实例,调用关系为A-\u0026gt;B-\u0026gt;C-\u0026gt;D,QPS为200  测试结果 拓扑图:\nOAP机器监控:\nES机器监控:\n服务监控面板:\n其中一个调用链记录:\n可以看出,Skywalking非常依赖CPU(不论是OAP还是ES),同时对于网络IO也有一定的要求,至于ES的文件IO在可接受范围内,毕竟确实有大量内容需要持久化。测试结果也基本达到预期要求,调用链和各个指标的监控都工作良好。\n第一次生产环境测试 在线下测试之后,我们再进行了一次基于实际业务针对探针的测试,测试没有发现探针的异常问题,也没有影响业务的正常运作,同时对于jvm实例影响也不是很大,CPU大概提高了5%左右,并不很明显。在这个基础上我们选择了线上的一台服务器,进行了我们第一次生产环境的测试。\n环境  ES:基于现有的一个ES集群,node x 3,v6.0 OAP:2C4G x 2,v6.1.0-SNAPSHOT,jvm内存分配为2G 应用:两个jvm实例  测试时间:03.11-03.16\n测试结果 业务机器负载情况:\n从最敏感的CPU指标上来看,增加agent并没有导致可见的CPU使用率的变化,而其他的内存、网络IO、连接数也基本没有变化。\nOAP负载情况:\n可以看到机器的CPU和网络均有较大的波动,但是也都没有真正打爆服务器,但是我们的实例却经常出现两种日志:\n One trace segment has been abandoned, cause by buffer is full.\n  Collector traceSegment service doesn\u0026rsquo;t response in xxx seconds.\n 通过阅读源码发现:\n agent和OAP只会使用一个长连接阻塞式的交换数据,如果某次数据交换没有得到响应,则会阻塞后续的上报流程(一般长连接的RPC请求会在数据传输期间互相阻塞,但是不会在等待期间互相阻塞,当然这也是源于agent并没有并发上报的机制),所以一旦OAP在接收数据的过程中发生阻塞,就会导致agent本地的缓冲区满,最终只能将监控数据直接丢弃防止内存泄漏  而导致OAP没有及时响应的一方面是OAP本身性能不够(OAP需要承担大量的二次统计工作,通过Jstack统计,长期有超过几十个线程处于RUNNABLE状态,据吴晟描述目前OAP都是高性能模式,后续将会提供配置来支持低性能模式),另一方面可能是ES批量插入效率不够,因此我们修改了OAP的批量插入参数来增加插入频率,降低单次插入数量:\n bulkActions: ${SW_STORAGE_ES_BULK_ACTIONS:2000 -\u0026gt; 20} # Execute the bulk every 2000 requests bulkSize: ${SW_STORAGE_ES_BULK_SIZE:20 -\u0026gt; 2} # flush the bulk every 20mb flushInterval: ${SW_STORAGE_ES_FLUSH_INTERVAL:10 -\u0026gt; 2} # flush the bulk every 10 seconds whatever the number of requests  虽然 service doesn\u0026rsquo;t response 出现的频率明显降低,但是依然还是会偶尔出现,而每一次出现都会伴随大量的 trace segment has been abandoned ,推测OAP和ES可能都存在性能瓶颈(应该进行更进一步的诊断确定问题,不过当时直接和吴晟沟通,确认确实OAP非常消耗CPU资源,考虑到当时部署只是2C,并且还部署有其他业务,就没有进一步的测试)。\n同时,在频繁的数据丢弃过程中,也偶发了一个bug:当agent上报数据超时并且大量丢弃数据之后,即使后续恢复正常也能通过日志看到数据正常上报,在查询界面查询的时候,会查不到这个实例上报的数据,不过在重启OAP和agent之后,之前上报的数据又能查询到,这个也和吴晟沟通过,没有其他的案例,后续想重现却也一直没有成功。\n而同时还发现两个更加严重的问题:\n 我们使用的是线上已经部署好的ES集群,其版本只有6.0,而新的Skywalking使用了6.3的查询特性,导致很多查询执行报错,只能使用最简单的查询 我们的kafka集群版本也非常古老,不支持v1或者更高版本的header,而kafka的探针强依赖header来传输上下文信息,导致kafka客户端直接报错影响业务,所以也立即移除了kafka的探针  在这一次测试中,我们基本确认了agent对于应用的影响,同时也发现了一些我们和Skywalking的一些问题,留待后续测试确认。\n第二次生产环境测试 为了排除性能和ES版本的影响,测试Skywalking本身的可用性,参考吴晟的建议(这也是在最初技术选型的时候没有选择Pinpoint和CAT的部分原因:一方面Skywalking的功能符合我们的要求,更重要的是有更加直接和效率的和项目维护者直接沟通的渠道),所以这一次我们新申请了ES集群和OAP机器。\n环境  ES:腾讯云托管ES集群,4C16G x 3 SSD,v6.4 OAP:16C32G,standalone,jvm分配24G 应用:2~8个jvm实例  测试时间:03.18-至今\n测试结果 OAP负载情况:\nES集群负载:\n测试过程中,我们先接入了一台机器上的两个实例,完全没有遇到一测中的延迟或者数据丢弃的问题,三天后我们又接入了另外两台机器的4个实例,这之后两天我们又接入了另外两台机器的2个实例。依然没有遇到一测中的延迟或者数据丢弃的问题。\n而ES负载的监控也基本验证了一测延迟的问题,Skywalking由于较高的并发插入,对于ES的性能压力很大(批量插入时需要针对每条数据分析并且构建查询索引),大概率是ES批量插入性能不够导致延迟,考虑到我们仅仅接入了8个实例,日均segment插入量大概5000万条(即日均5000万次独立调用),如果想支持更大规模的监控,对于ES容量规划势必要留够足够的冗余。同时OAP和ES集群的网络开销也不容忽视,在支撑大规模的监控时,需要集群并且receiver和aggregattor分离部署来分担网络IO的压力。\n而在磁盘容量占用上,我们设置的原始数据7天过期,目前刚刚开始滚动过期,目前segment索引已经累计了314757240条记录总计158G数据,当然我们目前异常记录较少,如果异常记录较多的话,其磁盘开销将会急剧增加(span中会记录异常堆栈信息)。而由于选择的SSD,磁盘的写入和查询性能都很高,即使只有3个节点,也完全没有任何压力。\n而在新版本的ES集群下,Skywalking的所有查询功能都变得可用,和我们之前自己的单独编写的异常指标监控都能完美对照。当然我们也遇到一个问题:Skywalking仅采集了调用记录,但是对于调用过程中的过程数据,除了异常堆栈其他均没有采集,导致真的出现异常也缺少充足的上下文信息还原现场,于是我们扩展了Skywalking的两个探针(我们项目目前重度依赖的组件):OkHttp(增加对requestBody和responseBody的采集)和SpringMVC(增加了对requestBody的采集),目前工作正常,如果进一步的增加其他的探针,采集到足够的数据,那么我们基本可以脱离ELK了。\n而OAP方面,CPU和内存的消耗远远低于预期的估计,CPU占用率一直较低,而分配的24G内存也仅使用了10+G,完全可以支持更大规模的接入量,不过在网络IO方面可能存在一定的风险,推测应该8C16G的容器就足以支持十万CPM级别的数据接入。\n当然我们在查询也遇到了一些瓶颈,最大的问题就是无法精确的命中某一条调用记录,就如前面的分析,因为segment的数据结构问题,无法进行面向业务的查询(例如messageId、requestId、orderId等),所以如果想精确匹配某一次调用请求,需要通过各个维度的条件约束慢慢缩小范围最后定位。\nSkywalking展望 通过上述对Skywalking的剖析和实践,Skywalking确实是一个优秀的APM+调用链跟踪监控系统,能够覆盖大部分使用场景,让研发和运维能够更加实时/准实时的了解线上服务的运行情况。当然Skywailking也不是尽善尽美,例如下面就是个人觉得目前可见的不满足我们期望的:\n 数据准实时通过gRPC上报,本地缓存的瓶颈(当然官方主要是为了简化模型,减少依赖,否则Skywalking还依赖ELK就玩得有点大了)  缓存队列的长度,过长占据内存,过短容易buffer满丢弃数据 优雅停机同时又不丢失缓存   数据上报需要在起点上报,链路回传的时候需要携带SPAN及子SPAN的信息,当链路较长或者SPAN保存的信息较多时,会额外消耗一定的带宽 skywalking更多是一个APM系统而不是分布式调用链跟踪系统  在整个链路的探针上均缺少输入输出的抓取 在调用链的筛查上并没用进行增强,并且体现在数据结构的设计,例如TAG信息均保存在SPAN信息中,而SPAN信息均被BASE64编码作为数据保存,无法检索,最终trace的筛查只能通过时间/traceId/service/endPoint/state进行非业务相关的搜索   skywalking缺少对三方接口依赖的指标,这个对于系统稳定往往非常重要  而作为一个初级的使用者,个人觉得我们可以使用有限的人力在以下方向进行扩展:\n 增加receiver:整合ELK,通过日志采集采集数据,降低异构系统的采集开发成本 优化数据结构,提供基于业务关键数据的查询接口 优化探针,采集更多的业务数据,争取代替传统的ELK日志简单查询,绝大部分异常诊断和定位均可以通过Skywalking即可完成 增加业务指标监控的模式,能够自定义业务指标(目前官方已经在实现 Metric Exporter )  ","excerpt":"APM和调用链跟踪 随着企业经营规模的扩大,以及对内快速诊断效率和对外SLA(服务品质协议,service-level agreement)的追求,对于业务系统的掌控度的要求越来越高,主要体现在:\n  …","ref":"/zh/2019-03-29-introduction-of-skywalking-and-simple-practice/","title":"SkyWalking调研与初步实践"},{"body":"前言 首先描述下问题的背景,博主有个习惯,每天上下班的时候看下skywalking的trace页面的error情况。但是某天突然发现生产环境skywalking页面没有任何数据了,页面也没有显示任何的异常,有点慌,我们线上虽然没有全面铺开对接skywalking,但是也有十多个应用。看了应用agent端日志后,其实也不用太担心,对应用毫无影响。大概情况就是这样,但是问题还是要解决,下面就开始排查skywalking不可用的问题。\n使用到的工具arthas Arthas是阿里巴巴开源的一款在线诊断java应用程序的工具,是greys工具的升级版本,深受开发者喜爱。当你遇到以下类似问题而束手无策时,Arthas可以帮助你解决:\n 这个类从哪个 jar 包加载的?为什么会报各种类相关的 Exception? 我改的代码为什么没有执行到?难道是我没 commit?分支搞错了? 遇到问题无法在线上 debug,难道只能通过加日志再重新发布吗? 线上遇到某个用户的数据处理有问题,但线上同样无法 debug,线下无法重现! 是否有一个全局视角来查看系统的运行状况? 有什么办法可以监控到JVM的实时运行状态? Arthas采用命令行交互模式,同时提供丰富的 Tab 自动补全功能,进一步方便进行问题的定位和诊断。  项目地址:https://github.com/alibaba/arthas\n先定位问题一 查看skywalking-oap-server.log的日志,发现会有一条异常疯狂的在输出,异常详情如下:\n2019-03-01 09:12:11,578 - org.apache.skywalking.oap.server.core.register.worker.RegisterPersistentWorker -3264081149 [DataCarrier.IndicatorPersistentWorker.endpoint_inventory.Consumser.0.Thread] ERROR [] - Validation Failed: 1: id is too long, must be no longer than 512 bytes but was: 684; org.elasticsearch.action.ActionRequestValidationException: Validation Failed: 1: id is too long, must be no longer than 512 bytes but was: 684; at org.elasticsearch.action.ValidateActions.addValidationError(ValidateActions.java:26) ~[elasticsearch-6.3.2.jar:6.3.2] at org.elasticsearch.action.index.IndexRequest.validate(IndexRequest.java:183) ~[elasticsearch-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestHighLevelClient.performRequest(RestHighLevelClient.java:515) ~[elasticsearch-rest-high-level-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestHighLevelClient.performRequestAndParseEntity(RestHighLevelClient.java:508) ~[elasticsearch-rest-high-level-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestHighLevelClient.index(RestHighLevelClient.java:348) ~[elasticsearch-rest-high-level-client-6.3.2.jar:6.3.2] at org.apache.skywalking.oap.server.library.client.elasticsearch.ElasticSearchClient.forceInsert(ElasticSearchClient.java:141) ~[library-client-6.0.0-alpha.jar:6.0.0-alpha] at org.apache.skywalking.oap.server.storage.plugin.elasticsearch.base.RegisterEsDAO.forceInsert(RegisterEsDAO.java:66) ~[storage-elasticsearch-plugin-6.0.0-alpha.jar:6.0.0-alpha] at org.apache.skywalking.oap.server.core.register.worker.RegisterPersistentWorker.lambda$onWork$0(RegisterPersistentWorker.java:83) ~[server-core-6.0.0-alpha.jar:6.0.0-alpha] at java.util.HashMap$Values.forEach(HashMap.java:981) [?:1.8.0_201] at org.apache.skywalking.oap.server.core.register.worker.RegisterPersistentWorker.onWork(RegisterPersistentWorker.java:74) [server-core-6.0.0-alpha.jar:6.0.0-alpha] at org.apache.skywalking.oap.server.core.register.worker.RegisterPersistentWorker.access$100(RegisterPersistentWorker.java:35) [server-core-6.0.0-alpha.jar:6.0.0-alpha] at org.apache.skywalking.oap.server.core.register.worker.RegisterPersistentWorker$PersistentConsumer.consume(RegisterPersistentWorker.java:120) [server-core-6.0.0-alpha.jar:6.0.0-alpha] at org.apache.skywalking.apm.commons.datacarrier.consumer.ConsumerThread.consume(ConsumerThread.java:101) [apm-datacarrier-6.0.0-alpha.jar:6.0.0-alpha] at org.apache.skywalking.apm.commons.datacarrier.consumer.ConsumerThread.run(ConsumerThread.java:68) [apm-datacarrier-6.0.0-alpha.jar:6.0.0-alpha] 2019-03-01 09:12:11,627 - org.apache.skywalking.oap.server.core.register.worker.RegisterPersistentWorker -3264081198 [DataCarrier.IndicatorPersistentWorker.endpoint_inventory.Consumser.0.Thread] ERROR [] - Validation Failed: 1: id is too long, must be no longer than 512 bytes but was: 684; org.elasticsearch.action.ActionRequestValidationException: Validation Failed: 1: id is too long, must be no longer than 512 bytes but was: 684; at org.elasticsearch.action.ValidateActions.addValidationError(ValidateActions.java:26) ~[elasticsearch-6.3.2.jar:6.3.2] at org.elasticsearch.action.index.IndexRequest.validate(IndexRequest.java:183) ~[elasticsearch-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestHighLevelClient.performRequest(RestHighLevelClient.java:515) ~[elasticsearch-rest-high-level-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestHighLevelClient.performRequestAndParseEntity(RestHighLevelClient.java:508) ~[elasticsearch-rest-high-level-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestHighLevelClient.index(RestHighLevelClient.java:348) ~[elasticsearch-rest-high-level-client-6.3.2.jar:6.3.2] at org.apache.skywalking.oap.server.library.client.elasticsearch.ElasticSearchClient.forceInsert(ElasticSearchClient.java:141) ~[library-client-6.0.0-alpha.jar:6.0.0-alpha] at org.apache.skywalking.oap.server.storage.plugin.elasticsearch.base.RegisterEsDAO.forceInsert(RegisterEsDAO.java:66) ~[storage-elasticsearch-plugin-6.0.0-alpha.jar:6.0.0-alpha] at org.apache.skywalking.oap.server.core.register.worker.RegisterPersistentWorker.lambda$onWork$0(RegisterPersistentWorker.java:83) ~[server-core-6.0.0-alpha.jar:6.0.0-alpha] at java.util.HashMap$Values.forEach(HashMap.java:981) [?:1.8.0_201] at org.apache.skywalking.oap.server.core.register.worker.RegisterPersistentWorker.onWork(RegisterPersistentWorker.java:74) [server-core-6.0.0-alpha.jar:6.0.0-alpha] at org.apache.skywalking.oap.server.core.register.worker.RegisterPersistentWorker.access$100(RegisterPersistentWorker.java:35) [server-core-6.0.0-alpha.jar:6.0.0-alpha] at org.apache.skywalking.oap.server.core.register.worker.RegisterPersistentWorker$PersistentConsumer.consume(RegisterPersistentWorker.java:120) [server-core-6.0.0-alpha.jar:6.0.0-alpha] at org.apache.skywalking.apm.commons.datacarrier.consumer.ConsumerThread.consume(ConsumerThread.java:101) [apm-datacarrier-6.0.0-alpha.jar:6.0.0-alpha] at org.apache.skywalking.apm.commons.datacarrier.consumer.ConsumerThread.run(ConsumerThread.java:68) [apm-datacarrier-6.0.0-alpha.jar:6.0.0-alpha] 可以看到,上面的异常输出的时间节点,以这种频率在疯狂的刷新。通过异常message,得知到是因为skywalking在写elasticsearch时,索引的id太长了。下面是elasticsearch的源码:\nif (id != null \u0026amp;\u0026amp; id.getBytes(StandardCharsets.UTF_8).length \u0026gt; 512) { validationException = addValidationError(\u0026#34;id is too long, must be no longer than 512 bytes but was: \u0026#34; + id.getBytes(StandardCharsets.UTF_8).length, validationException); } 具体可见:elasticsearch/action/index/IndexRequest.java#L240\n问题一: 通过日志,初步定位是哪个系统的url太长,skywalking在注册url数据时触发elasticsearch针对索引id校验的异常,而skywalking注册失败后会不断的重试,所以才有了上面日志不断刷的现象。\n问题解决: elasticsearch client在写es前通过硬编码的方式写死了索引id的长度不能超过512字节大小。也就是我们不能通过从ES侧找解决方案了。回到异常的message,只能看到提示id太长,并没有写明id具体是什么,这个异常提示其实是不合格的,博主觉得应该把id的具体内容抛出来,问题就简单了。因为异常没有明确提示,系统又比较多,不能十多个系统依次关闭重启来验证到底是哪个系统的哪个url有问题。这个时候Arthas就派上用场了,在不重启应用不开启debug模式下,查看实例中的属性对象。下面通过Arthas找到具体的url。\n从异常中得知,org.elasticsearch.action.index.IndexRequest这个类的validate方法触发的,这个方法是没有入参的,校验的id属性其实是对象本身的属性,那么我们使用Arthas的watch指令来看下这个实例id属性。先介绍下watch的用法:\n功能说明 让你能方便的观察到指定方法的调用情况。能观察到的范围为:返回值、抛出异常、入参,通过编写 \u0008OGNL 表达式进行对应变量的查看。\n参数说明 watch 的参数比较多,主要是因为它能在 4 个不同的场景观察对象\n   参数名称 参数说明     class-pattern 类名表达式匹配   method-pattern 方法名表达式匹配   express 观察表达式   condition-express 条件表达式   [b] 在方法调用之前观察   [e] 在方法异常之后观察   [s] 在方法返回之后观察   [f] 在方法结束之后(正常返回和异常返回)观察   [E] 开启正则表达式匹配,默认为通配符匹配   [x:] 指定输出结果的属性遍历深度,默认为 1    从上面的用法说明结合异常信息,我们得到了如下的指令脚本:\nwatch org.elasticsearch.action.index.IndexRequest validate \u0026ldquo;target\u0026rdquo;\n执行后,就看到了我们希望了解到的内容,如:\n索引id的具体内容看到后,就好办了。我们暂时把定位到的这个应用启动脚本中的的skywalking agent移除后(计划后面重新设计下接口)重启了下系统验证下。果然疯狂输出的日志停住了,但是问题并没完全解决,skywalking页面上的数据还是没有恢复。\n定位问题二 skywalking数据存储使用了elasticsearch,页面没有数据,很有可能是elasticsearch出问题了。查看elasticsearch日志后,发现elasticsearch正在疯狂的GC,日志如:\n: 139939K-\u0026gt;3479K(153344K), 0.0285655 secs] 473293K-\u0026gt;336991K(5225856K), 0.0286918 secs] [Times: user=0.05 sys=0.00, real=0.03 secs] 2019-02-28T20:05:38.276+0800: 3216940.387: Total time for which application threads were stopped: 0.0301495 seconds, Stopping threads took: 0.0001549 seconds 2019-02-28T20:05:38.535+0800: 3216940.646: [GC (Allocation Failure) 2019-02-28T20:05:38.535+0800: 3216940.646: [ParNew Desired survivor size 8716288 bytes, new threshold 6 (max 6) - age 1: 1220136 bytes, 1220136 total - age 2: 158496 bytes, 1378632 total - age 3: 88200 bytes, 1466832 total - age 4: 46240 bytes, 1513072 total - age 5: 126584 bytes, 1639656 total - age 6: 159224 bytes, 1798880 total : 139799K-\u0026gt;3295K(153344K), 0.0261667 secs] 473311K-\u0026gt;336837K(5225856K), 0.0263158 secs] [Times: user=0.06 sys=0.00, real=0.03 secs] 2019-02-28T20:05:38.562+0800: 3216940.673: Total time for which application threads were stopped: 0.0276971 seconds, Stopping threads took: 0.0001030 seconds 2019-02-28T20:05:38.901+0800: 3216941.012: [GC (Allocation Failure) 2019-02-28T20:05:38.901+0800: 3216941.012: [ParNew Desired survivor size 8716288 bytes, new threshold 6 (max 6) 问题二: 查询后得知,elasticsearch的内存配置偏大了,GC时间太长,导致elasticsearch脱离服务了。elasticsearch所在主机的内存是8G的实际内存7.6G,刚开始配置了5G的堆内存大小,可能Full GC的时候耗时太久了。查询elasticsearch官方文档后,得到如下的jvm优化建议:\n 将最小堆大小(Xms)和最大堆大小(Xmx)设置为彼此相等。 Elasticsearch可用的堆越多,它可用于缓存的内存就越多。但请注意,过多的堆可能会使您陷入长时间的垃圾收集暂停。 设置Xmx为不超过物理RAM的50%,以确保有足够的物理RAM用于内核文件系统缓存。 不要设置Xmx为JVM用于压缩对象指针(压缩oops)的截止值之上; 确切的截止值变化但接近32 GB。  详情见:https://www.elastic.co/guide/en/elasticsearch/reference/6.5/heap-size.html\n问题解决: 根据Xmx不超过物理RAM的50%上面的jvm优化建议。后面将Xms和Xmx都设置成了3G。然后先停掉skywalking(由于skywalking中会缓存部分数据,如果直接先停ES,会报索引找不到的类似异常,这个大部分skywalking用户应该有遇到过),清空skywalking缓存目录下的内容,如:\n在重启elasticsearch,接着启动skywalking后页面终于恢复了\n结语 整个问题排查到解决大概花了半天时间,幸好一点也不影响线上应用的使用,这个要得益于skywalking的设计,不然就是大灾难了。然后要感谢下Arthas的技术团队,写了这么好用的一款产品并且开源了,如果没有Arthas,这个问题真的不好定位,甚至一度想到了换掉elasticsearch,采用mysql来解决索引id过长的问题。Arthas真的是线上找问题的利器,博主在Arthas刚面世的时候就关注了,并且一直在公司推广使用,在这里在硬推一波。\n作者简介: 陈凯玲,2016年5月加入凯京科技。曾任职高级研发和项目经理,现任凯京科技研发中心架构\u0026amp;运维部负责人。pmp项目管理认证,阿里云MVP。热爱开源,先后开源过多个热门项目。热爱分享技术点滴,独立博客KL博客(http://www.kailing.pub)博主。\n","excerpt":"前言 首先描述下问题的背景,博主有个习惯,每天上下班的时候看下skywalking的trace页面的error情况。但是某天突然发现生产环境skywalking页面没有任何数据了,页面也没有显示任何的 …","ref":"/zh/2019-03-01-skywalking-troubleshoot/","title":"SkyWalking线上问题排查定位"},{"body":" 作者:王振飞, 写于:2019-02-24 说明:此文是个人所写,版本归属作者,代表个人观点,仅供参考,不代表skywalking官方观点。 说明:本次对比基于skywalking-6.0.0-GA和Pinpoint-1.8.2(截止2019-02-19最新版本)。另外,我们这次技术选型直接否定了Zipkin,其最大原因是它对代码有侵入性,CAT也是一样。这是我们所完全无法接受的。\n 这应该是目前最优秀的两款开源APM产品了,而且两款产品都通过字节码注入的方式,实现了对代码完全无任何侵入,他们的对比信息如下:\nOAP说明: skywalking6.x才有OAP这个概念,skywalking5.x叫collector。\n接下来,对每个PK项进行深入分析和对比。更多精彩和首发内容请关注公众号:【阿飞的博客】。\n社区比较\n这一点上面skywalking肯定完胜。一方面,skywalking已经进入apache孵化,社区相当活跃。而且项目发起人是中国人,我们能够进入官方群(Apache SkyWalking交流群:392443393)和项目发起人吴晟零距离沟通,很多问题能第一时间得到大家的帮助(玩过开源的都知道,这个价值有多大)。 而Pinpoint是韩国人开发的,免不了有沟通障碍。至于github上最近一年的commit频率,skywalking和Pinpoint旗鼓相当,都是接近20的水平: 所以,社区方面,skywalking更胜一筹。\n支持语言比较 Pinpoint只支持Java和PHP,而skywalking支持5种语言:Java, C#, PHP, Node.js, Go。如果公司的服务涉及到多个开发语言,那么skywalking会是你更好的选择。并且,如果你要实现自己的探针(比如python语言),skywalking的二次开发成本也比Pinpoint更低。\n 说明:Github上有开发者为Pinpoint贡献了对Node.js的支持,请戳链接:https://github.com/peaksnail/pinpoint-node-agent。但是已经停止维护,几年没更新了!\n 所以,支持语言方面,skywalking更胜一筹。\n协议比较 SkyWalking支持gRPC和http,不过建议使用gRPC,skywalking6.x版本已经不提供http方式(但是还会保留接收5.x的数据),以后会考虑删除。 而Pinpoint使用的是thrift协议。 协议本身没有谁好谁坏。\n存储比较(重要) 笔者认为,存储是skywalking和Pinpoint最大的差异所在,因为底层存储决定了上层功能。\nPinpoint只支持HBase,且扩展代价较大。这就意味着,如果选择Pinpoint,还要有能力hold住一套HBase集群(daocloud从Pinpoint切换到skywalking就是因为HBase的维护代价有点大)。在这方面,skywalking支持的存储就多很多,这样的话,技术选型时可以根据团队技术特点选择合适的存储,而且还可以自行扩展(不过生产环境上应该大部分是以es存储为主)。\nPinpoint只支持HBase的另一个缺陷就是,HBase本身查询能力有限(HBase只能支持三种方式查询:RowKey精确查找,SCAN范围查找,全表扫描)限制了Pinpoint的查询能力,所以其支持的查询一定是在时间的基础上(Pinpoint通过鼠标圈定一个时间范围后查看这个范围内的Trace信息)。而skywalking可以多个维度任意组合查询,例如:时间范围,服务名,Trace状态,请求路径,TraceId等。\n另外,Pinpoint和skywalking都支持TTL,即历史数据保留策略。skywalking是在OAP模块的application.yml中配置从而指定保留时间。而Pinpoint是通过HBase的ttl功能实现,通过Pinpoint提供的hbase脚本https://github.com/naver/pinpoint/blob/master/hbase/scripts/hbase-create.hbase可以看到:ApplicationTraceIndex配置了TTL =\u0026gt; 5184000,SqlMetaData_Ver2配合了TTL =\u0026gt; 15552000,单位是秒。\n 说明:es并不是完全碾压HBase,es和HBase没有绝对的好和坏。es强在检索能力,存储能力偏弱(千亿以下,es还是完全有能力hold的住的)。HBase强在存储能力,检索能力偏弱。如果搜集的日志量非常庞大,那么es存储就比较吃力。当然,没有蹩脚的中间件,只有蹩脚的程序员,无论是es还是HBase,调优才是最关键的。同样的,如果对检索能力有一定的要求,那么HBase肯定满足不了你。所以,又到了根据你的业务和需求决定的时刻了,trade-off真是无所不在。\n UI比较 Pinpoint的UI确实比skywalking稍微好些,尤其是服务的拓扑图展示。不过daocloud根据Pinpoint的风格为skywalking定制了一款UI。请戳链接:https://github.com/TinyAllen/rocketbot,项目介绍是:rocketbot: A UI for Skywalking。截图如下所示; 所以,只比较原生UI的话,Pinpoint更胜一筹。\n扩展性比较 Pinpoint好像设计之初就没有过多考虑扩展性,无论是底层的存储,还是自定义探针实现等。而skywalking核心设计目标之一就是Pluggable,即可插拔。\n以存储为例,pinpoint完全没有考虑扩展性,而skywalking如果要自定义实现一套存储,只需要定义一个类实现接口org.apache.skywalking.oap.server.library.module.ModuleProvider,然后实现一些DAO即可。至于Pinpoint则完全没有考虑过扩展底层存储。\n再以实现一个自己的探针为例(比如我要实现python语言的探针),Pinpoint选择thrift作为数据传输协议标准,而且为了节省数据传输大小,在传递常量的时候也尽量使用数据参考字典,传递一个数字而不是直接传递字符串等等。这些优化也增加了系统的复杂度:包括使用 Thrift 接口的难度、UDP 数据传输的问题、以及数据常量字典的注册问题等等。Pinpoint发展这么年才支持Java和PHP,可见一斑。而skywalking的数据接口就标准很多,并且支持OpenTracing协议,除了官方支持Java以外,C#、PHP和Node.js的支持都是由社区开发并维护。\n还有后面会提到的告警,skywalking的可扩展性也要远好于Pinpoint。\n最后,Pinpoint和skywalking都支持插件开发,Pinpoint插件开发参考:http://naver.github.io/pinpoint/1.8.2/plugindevguide.html。skywalking插件开发参考:https://github.com/apache/incubator-skywalking/blob/master/docs/en/guides/Java-Plugin-Development-Guide.md。\n所以,扩展性方面skywalking更胜一筹。\n告警比较 Pinpoint和skywalking都支持自定义告警规则。\n但是恼人的是,Pinpoint如果要配置告警规则,还需要安装MySQL(配置告警时的用户,用户组信息以及告警规则都持久化保存在MySQL中),这就导致Pinpoint的维护成本又高了一些,既要维护HBase又要维护MySQL。\nPinpoint支持的告警规则有:SLOW COUNT|RATE, ERROR COUNT|RATE, TOTAL COUNT, SLOW COUNT|RATE TO CALLEE, ERROR COUNT|RATE TO CALLEE, ERROR RATE TO CALLEE, HEAP USAGE RATE, JVM CPU USAGE RATE, DATASOURCE CONNECTION USAGE RATE。\nPinpoint每3分钟周期性检查过去5分钟的数据,如果有符合规则的告警,就会发送sms/email给用户组下的所有用户。需要说明的是,实现发送sms/email的逻辑需要自己实现,Pinpoint只提供了接口com.navercorp.pinpoint.web.alarm.AlarmMessageSender。并且Pinpoint发现告警持续时,会递增发送sms/email的时间间隔 3min -\u0026gt; 6min -\u0026gt; 12min -\u0026gt; 24min,防止sms/email狂刷。\n Pinpoint告警参考:http://naver.github.io/pinpoint/1.8.2/alarm.html\n skywalking配置告警不需要引入任何其他存储。skywalking在config/alarm-settings.xml中可以配置告警规则,告警规则支持自定义。\nskywalking支持的告警规则(配置项中的名称是indicator-name)有:service_resp_time, service_sla, service_cpm, service_p99, service_p95, service_p90, service_p75, service_p50, service_instance_sla, service_instance_resp_time, service_instance_cpm, endpoint_cpm, endpoint_avg, endpoint_sla, endpoint_p99, endpoint_p95, endpoint_p90, endpoint_p75, endpoint_p50。\nSkywalking通过HttpClient的方式远程调用在配置项webhooks中定义的告警通知服务地址。skywalking也支持silence-period配置,假设在TN这个时间点触发了告警,那么TN -\u0026gt; TN+period 这段时间内不会再重复发送该告警。\n skywalking告警参考:https://github.com/apache/incubator-skywalking/blob/master/docs/en/setup/backend/backend-alarm.md。目前只支持official_analysis.oal脚本中Service, Service Instance, Endpoint scope的metric,其他scope的metric需要等待后续扩展。\n Pinpoint和skywalking都支持常用的告警规则配置,但是skywalking采用webhooks的方式就灵活很多:短信通知,邮件通知,微信通知都是可以支持的。而Pinpoint只能sms/email通知,并且还需要引入MySQL存储,增加了整个系统复杂度。所以,告警方面,skywalking更胜一筹。\nJVM监控 skywalking支持监控:Heap, Non-Heap, GC(YGC和FGC)。 Pinpoint能够监控的指标主要有:Heap, Non-Heap, FGC, DirectBufferMemory, MappedBufferMemory,但是没有YGC。另外,Pinpoint还支持多个指标同一时间点查看的功能。如下图所示:\n所以,对JVM的监控方面,Pinpoint更胜一筹。\n服务监控 包括操作系统,和部署的服务实例的监控。 Pinpoint支持的维度有:CPU使用率,Open File Descriptor,数据源,活动线程数,RT,TPS。 skywalking支持的维度有:CPU使用率,SLA,RT,CPM(Call Per Minutes)。 所以,这方面两者旗鼓相当,没有明显的差距。\n跟踪粒度比较 Pinpoint在这方面做的非常好,跟踪粒度非常细。如下图所示,是Pinpoint对某个接口的trace信息: 而同一个接口skywalking的trace信息如下图所示:  备注: 此截图是skywalking加载了插件apm-spring-annotation-plugin-6.0.0-GA.jar(这个插件允许跟踪加了@Bean, @Service, @Component and @Repository注解的spring context中的bean的方法)。\n 通过对比发现,在跟踪粒度方面,Pinpoint更胜一筹。\n过滤追踪 Pinpoint和skywalking都可以实现,而且配置的表达式都是基于ant风格。 Pinpoint在Web UI上配置 filter wizard 即可自定义过滤追踪。 skywalking通过加载apm-trace-ignore-plugin插件就能自定义过滤跟踪,skywalking这种方式更灵活,比如一台高配服务器上有若干个服务,在共用的agent配置文件apm-trace-ignore-plugin.config中可以配置通用的过滤规则,然后通过-D的方式为每个服务配置个性化过滤。\n所以,在过滤追踪方面,skywalking更胜一筹。\n性能损耗 由于Pinpoint采集信息太过详细,所以,它对性能的损耗最大。而skywalking默认策略比较保守,对性能损耗很小。 有网友做过压力测试,对比如下:\n 图片来源于:https://juejin.im/post/5a7a9e0af265da4e914b46f1\n 所以,在性能损耗方面,skywalking更胜一筹。\n发布包比较 skywalking与时俱进,全系标配jar包,部署只需要执行start.sh脚本即可。而Pinpoint的collector和web还是war包,部署时依赖web容器(比如Tomcat)。拜托,都9012年了。\n所以,在发布包方面,skywalking更胜一筹。\n支持组件比较 skywalking和Pinpoint支持的中间件对比说明:\n WEB容器说明:Pinpoint支持几乎所有的WEB容器,包括开源和商业的。而wkywalking只支持开源的WEB容器,对2款大名鼎鼎的商业WEB容器Weblogic和Wevsphere都不支持。 RPC框架说明:对RPC框架的支持,skywalking简直秒杀Pinpoint。连小众的motan和sofarpc都支持。 MQ说明:skywalking比Pinpoint多支持一个国产的MQ中间件RocketMQ,毕竟RocketMQ在国内名气大,而在国外就一般了。加之skywalking也是国产的。 RDBMS/NoSQL说明:Pinpoint对RDBMS和NoSQL的支持都要略好于skywalking,RDBMS方面,skywalking不支持MSSQL和MariaDB。而NoSQL方面,skywalking不支持Cassandra和HBase。至于Pinpoint不支持的H2,完全不是问题,毕竟生产环境是肯定不会使用H2作为底层存储的。 Redis客户端说明:虽然skywalking和Pinpoint都支持Redis,但是skywalking支持三种流行的Redis客户端:Jedis,Redisson,Lettuce。而Pinpoint只支持Jedis和Lettuce,再一次,韩国人开发的Pinpoint无视了目前中国人开发的GitHub上star最多的Redis Client \u0026ndash; Redisson。 日志框架说明:Pinpoint居然不支持log4j2?但是已经有人开发了相关功能,详情请戳链接:log4j plugin support log4j2 or not? https://github.com/naver/pinpoint/issues/3055  通过对skywalking和Pinpoint支持中间件的对比我们发现,skywalking对国产软件的支持真的是全方位秒杀Pinpoint,比如小众化的RPC框架:motan(微博出品),sofarpc,阿里的RocketMQ,Redis客户端Redisson,以及分布式任务调度框架elastic-job等。当然也从另一方面反应国产开源软件在世界上的影响力还很小。\n这方面没有谁好谁坏,毕竟每个公司使用的技术栈不一样。如果你对RocketMQ有强需求,那么skywalking是你的最佳选择。如果你对es有强需求,那么skywalking也是你的最佳选择。如果HBase是你的强需求,那么Pinpoint就是你的最佳选择。如果MSSQL是你的强需求,那么Pinpoint也是你的最佳选择。总之,这里完全取决你的项目了。\n总结 经过前面对skywalking和Pinpoint全方位对比后我们发现,对于两款非常优秀的APM软件,有一种既生瑜何生亮的感觉。Pinpoint的优势在于:追踪数据粒度非常细、功能强大的用户界面,以及使用HBase作为存储带来的海量存储能力。而skywalking的优势在于:非常活跃的中文社区,支持多种语言的探针,对国产开源软件非常全面的支持,以及使用es作为底层存储带来的强大的检索能力,并且skywalking的扩展性以及定制化要更优于Pinpoint:\n 如果你有海量的日志存储需求,推荐Pinpoint。 如果你更看重二次开发的便捷性,推荐skywalking。  最后,参考上面的对比,结合你的需求,哪些不能妥协,哪些可以舍弃,从而更好的选择一款最适合你的APM软件。\n参考链接  参考[1]. https://github.com/apache/incubator-skywalking/blob/master/docs/en/setup/service-agent/java-agent/Supported-list.md 参考[2]. http://naver.github.io/pinpoint/1.8.2/main.html#supported-modules 参考[3]. https://juejin.im/post/5a7a9e0af265da4e914b46f1    如果觉得本文不错,请关注作者公众号:【阿飞的博客】,多谢!\n ","excerpt":"作者:王振飞, 写于:2019-02-24 说明:此文是个人所写,版本归属作者,代表个人观点,仅供参考,不代表skywalking官方观点。 说明:本次对比基于skywalking-6.0.0-GA …","ref":"/zh/2019-02-24-skywalking-pk-pinpoint/","title":"APM巅峰对决:SkyWalking P.K. Pinpoint"},{"body":"According to Apache Software Foundation branding policy all docker images of Apache Skywalking should be transferred from skywalking to apache with a prefix skywalking-. The transfer details are as follows\n skywalking/base -\u0026gt; apache/skywalking-base skywalking/oap -\u0026gt; apache/skywalking-oap-server skywalking/ui -\u0026gt; apache/skywalking-ui  All of repositories in skywalking will be removed after one week.\n","excerpt":"According to Apache Software Foundation branding policy all docker images of Apache Skywalking …","ref":"/events/transfer-docker-images-to-apache-official-repository/","title":"Transfer Docker Images to Apache Official Repository"},{"body":"6.0.0-GA release. Go to downloads page to find release tars. This is an important milestone version, we recommend all users upgrade to this version.\nKey updates\n Bug fixed Register bug fix, refactor and performance improvement New trace UI  ","excerpt":"6.0.0-GA release. Go to downloads page to find release tars. This is an important milestone version, …","ref":"/events/release-apache-skywalking-apm-6-0-0-ga/","title":"Release Apache SkyWalking APM 6.0.0-GA"},{"body":"Based on his contributions to the project, he has been accepted as SkyWalking PPMC. Welcome aboard.\n","excerpt":"Based on his contributions to the project, he has been accepted as SkyWalking PPMC. Welcome aboard.","ref":"/events/welcome-jian-tan-as-a-new-ppmc/","title":"Welcome Jian Tan as a new PPMC"},{"body":" Author: Hongtao Gao, Apache SkyWalking \u0026amp; ShardingShpere PMC GitHub, Twitter, Linkedin  Service mesh receiver was first introduced in Apache SkyWalking 6.0.0-beta. It is designed to provide a common entrance for receiving telemetry data from service mesh framework, for instance, Istio, Linkerd, Envoy etc. What’s the service mesh? According to Istio’s explain:\nThe term service mesh is used to describe the network of microservices that make up such applications and the interactions between them.\nAs a PMC member of Apache SkyWalking, I tested trace receiver and well understood the performance of collectors in trace scenario. I also would like to figure out the performance of service mesh receiver.\nDifferent between trace and service mesh Following chart presents a typical trace map:\nYou could find a variety of elements in it just like web service, local method, database, cache, MQ and so on. But service mesh only collect service network telemetry data that contains the entrance and exit data of a service for now(more elements will be imported soon, just like Database). A smaller quantity of data is sent to the service mesh receiver than the trace.\nBut using sidecar is a little different.The client requesting “A” that will send a segment to service mesh receiver from “A”’s sidecar. If “A” depends on “B”, another segment will be sent from “A”’s sidecar. But for a trace system, only one segment is received by the collector. The sidecar model splits one segment into small segments, that will increase service mesh receiver network overhead.\nDeployment Architecture In this test, I will pick two different backend deployment. One is called mini unit, consist of one collector and one elasticsearch instance. Another is a standard production cluster, contains three collectors and three elasticsearch instances.\nMini unit is a suitable architecture for dev or test environment. It saves your time and VM resources, speeds up depolyment process.\nThe standard cluster provides good performance and HA for a production scenario. Though you will pay more money and take care of the cluster carefully, the reliability of the cluster will be a good reward to you.\nI pick 8 CPU and 16GB VM to set up the test environment. This test targets the performance of normal usage scenarios, so that choice is reasonable. The cluster is built on Google Kubernetes Engine(GKE), and every node links each other with a VPC network. For running collector is a CPU intensive task, the resource request of collector deployment should be 8 CPU, which means every collector instance occupy a VM node.\nTesting Process Receiving mesh fragments per second(MPS) depends on the following variables.\n Ingress query per second(QPS) The topology of a microservice cluster Service mesh mode(proxy or sidecar)  In this test, I use Bookinfo app as a demo cluster.\nSo every request will touch max 4 nodes. Plus picking the sidecar mode(every request will send two telemetry data), the MPS will be QPS * 4 *2.\nThere are also some important metrics that should be explained\n Client Query Latency: GraphQL API query response time heatmap. Client Mesh Sender: Send mesh segments per second. The total line represents total send amount and the error line is the total number of failed send. Mesh telemetry latency: service mesh receiver handling data heatmap. Mesh telemetry received: received mesh telemetry data per second.  Mini Unit You could find collector can process up to 25k data per second. The CPU usage is about 4 cores. Most of the query latency is less than 50ms. After login the VM on which collector instance running, I know that system load is reaching the limit(max is 8).\nAccording to the previous formula, a single collector instance could process 3k QPS of Bookinfo traffic.\nStandard Cluster Compare to the mini-unit, cluster’s throughput increases linearly. Three instances provide total 80k per second processing power. Query latency increases slightly, but it’s also very small(less than 500ms). I also checked every collector instance system load that all reached the limit. 10k QPS of BookInfo telemetry data could be processed by the cluster.\nConclusion Let’s wrap them up. There are some important things you could get from this test.\n QPS varies by the there variables. The test results in this blog are not important. The user should pick property value according to his system. Collector cluster’s processing power could scale out. The collector is CPU intensive application. So you should provide sufficient CPU resource to it.  This blog gives people a common method to evaluate the throughput of Service Mesh Receiver. Users could use this to design their Apache Skywalking backend deployment architecture.\n","excerpt":"Author: Hongtao Gao, Apache SkyWalking \u0026amp; ShardingShpere PMC GitHub, Twitter, Linkedin  Service …","ref":"/blog/2019-01-25-mesh-loadtest/","title":"SkyWalking performance in Service Mesh scenario"},{"body":"","excerpt":"","ref":"/zh_tags/development/","title":"Development"},{"body":"ps:本文仅写给菜鸟,以及不知道如何远程调试的程序员,并且仅仅适用skywalking的远程调试\n概述 远程调试的目的是为了解决代码或者说程序包部署在服务器上运行,只能通过log来查看问题,以及不能跟在本地IDE运行debug那样查找问题,观看程序运行流程\u0026hellip; 想想当你的程序运行在服务器上,你在本地的IDE随时debug,是不是很爽的感觉。\n好了不废话,切入正题。\n环境篇 IDE:推荐 IntelliJ IDEA\n开发语言: 本文仅限于java,其他语言请自行询问google爸爸或者baidu娘娘\n源代码:自行从github下载,并且确保你运行的skywalking包也源代码的一致,(也就是说你自己从源代码编译打包运行,虽然不一样也可以调试,但是你想想你在本地开发,更改完代码,没有重新运行,debug出现的诡异情况)\n场景篇 假定有如下三台机器\n   IP 用途 备注     10.193.78.1 oap-server skywalking 的oap服务(或者说collector所在的服务器)   10.193.78.2 agent skywalking agent运行所在的服务器   10.193.78.0 IDE 你自己装IDE也就是IntelliJ IDEA的机器    以上环境,场景请自行安装好,并确认正常运行。本文不在赘述\n废话终于说完了\n操作篇 首要条件,下载源码后,先用maven 打包编译。然后使用Idea打开源码的父目录,整体结构大致如下图 1 :agent调试 1)Idea 配置部分 点击Edit Configurations 在弹出窗口中依次找到(红色线框的部分)并点击 打开的界面如下 修改Name值,自己随意,好记即可 然后Host输入10.193.78.2 Port默认或者其他的,重要的是这个端口在10.193.78.2上没有被占用\n然后找到Use module classpath 选择 apm-agent 最终的结果如下: 注意选择目标agent运行的jdk版本,很重要\n然后点击Apply,并找到如下内容,并且复制待用 2)agent配置部分 找到agent配置的脚本,并打开,找到配置agent的地方, 就这个地方,在这个后边加上刚才复制的内容 最终的结果如下 提供一个我配置的weblogic的配置(仅供参考) 然后重启应用(agent)\n3)调试 回到Idea中找到这个地方,并点击debug按钮,你没看错,就是红色圈住的地方 然后控制台如果出现以下字样: 那么恭喜你,可以愉快的加断点调试了。 ps:需要注意的是agent的、 service instance的注册可能不能那么愉快的调试。因为这个注册比较快,而且是在agent启动的时候就发生的, 而远程调试也需要agent打开后才可以调试,所以,如果你手快当我没说这句话。\n2 :oap-server的调试(也就是collector的调试) 具体过程不在赘述,和上一步的agent调试大同小异,不同的是 Use module classpath需要选择oap-server\n","excerpt":"ps:本文仅写给菜鸟,以及不知道如何远程调试的程序员,并且仅仅适用skywalking的远程调试\n概述 远程调试的目的是为了解决代码或者说程序包部署在服务器上运行,只能通过log来查看问题,以及不能跟 …","ref":"/zh/2019-01-24-skywalking-remote-debug/","title":"SkyWalking的远程调试"},{"body":"引言 《SkyWalking Java 插件贡献实践》:本文将基于SkyWalking 6.0.0-GA-SNAPSHOT版本,以编写Redis客户端Lettuce的SkyWalking Java Agent 插件为例,与大家分享我贡献PR的过程,希望对大家了解SkyWalking Java Agent插件有所帮助。\n基础概念 OpenTracing和SkyWalking链路模块几个很重要的语义概念。\n  Span:可理解为一次方法调用,一个程序块的调用,或一次RPC/数据库访问。只要是一个具有完整时间周期的程序访问,都可以被认为是一个span。SkyWalking Span对象中的重要属性\n   属性 名称 备注     component 组件 插件的组件名称,如:Lettuce,详见:ComponentsDefine.Class。   tag 标签 k-v结构,关键标签,key详见:Tags.Class。   peer 对端资源 用于拓扑图,若DB组件,需记录集群信息。   operationName 操作名称 若span=0,operationName将会搜索的下拉列表。   layer 显示 在链路页显示,详见SpanLayer.Class。      Trace:调用链,通过归属于其的Span来隐性的定义。一条Trace可被认为是一个由多个Span组成的有向无环图(DAG图),在SkyWalking链路模块你可以看到,Trace又由多个归属于其的trace segment组成。\n  Trace segment:Segment是SkyWalking中的一个概念,它应该包括单个OS进程中每个请求的所有范围,通常是基于语言的单线程。由多个归属于本线程操作的Span组成。\n  核心API 跨进程ContextCarrier核心API  为了实现分布式跟踪,需要绑定跨进程的跟踪,并且应该传播上下文 整个过程。 这就是ContextCarrier的职责。 以下是实现有关跨进程传播的步骤:  在客户端,创建一个新的空的ContextCarrier,将ContextCarrier所有信息放到HTTP heads、Dubbo attachments 或者Kafka messages。 通过服务调用,将ContextCarrier传递到服务端。 在服务端,在对应组件的heads、attachments或messages获取ContextCarrier所有消息。将服务端和客户端的链路信息绑定。    跨线程ContextSnapshot核心API  除了跨进程,跨线程也是需要支持的,例如异步线程(内存中的消息队列)和批处理在Java中很常见,跨进程和跨线程十分相似,因为都是需要传播 上下文。 唯一的区别是,不需要跨线程序列化。 以下是实现有关跨线程传播的步骤:  使用ContextManager#capture获取ContextSnapshot对象。 让子线程以任何方式,通过方法参数或由现有参数携带来访问ContextSnapshot。 在子线程中使用ContextManager#continued。    详尽的核心API相关知识,可点击阅读 《插件开发指南-中文版本》\n插件实践 Lettuce操作redis代码 @PostMapping(\u0026#34;/ping\u0026#34;) public String ping(HttpServletRequest request) throws ExecutionException, InterruptedException { RedisClient redisClient = RedisClient.create(\u0026#34;redis://\u0026#34; + \u0026#34;127.0.0.1\u0026#34; + \u0026#34;:6379\u0026#34;); StatefulRedisConnection\u0026lt;String, String\u0026gt; connection0 = redisClient.connect(); RedisAsyncCommands\u0026lt;String, String\u0026gt; asyncCommands0 = connection0.async(); AsyncCommand\u0026lt;String, String, String\u0026gt; future = (AsyncCommand\u0026lt;String, String, String\u0026gt;)asyncCommands0.set(\u0026#34;key_a\u0026#34;, \u0026#34;value_a\u0026#34;); future.onComplete(s -\u0026gt; OkHttpClient.call(\u0026#34;http://skywalking.apache.org\u0026#34;)); future.get(); connection0.close(); redisClient.shutdown(); return \u0026#34;pong\u0026#34;; } 插件源码架构 Lettuce对Redis封装与Redisson Redisson 类似,目的均是实现简单易用,且无学习曲线的Java的Redis客户端。所以要是先对Redis操作的拦截,需要学习对应客户端的源码。\n设计插件 理解插件实现过程,找到最佳InterceptPoint位置是实现插件融入SkyWalking的核心所在。\n代码实现 PR的url:Support lettuce plugin\n实践中遇到的问题  多线程编程使用debug断点会将链路变成同步,建议使用run模式增加log,或者远程debug来解决。 多线程编程,需要使用跨线程ContextSnapshot核心API,否则链路会断裂。 CompleteableCommand.onComplete方法有时会同步执行,这个和内部机制有关,有时候不分离线程。 插件编译版本若为1.7+,需要将插件放到可选插件中。因为sniffer支持的版本是1.6。  插件兼容 为了插件得到插件最终的兼容兼容版本,我们需要使用docker对所有插件版本的测试,具体步骤如下:\n 编写测试用例:关于如何编写测试用例,请按照如何编写文档来实现。 提供自动测试用例。 如:Redisson插件testcase 确保本地几个流行的插件版本,在本地运行起来是和自己的预期是一致的。 在提供自动测试用例并在CI中递交测试后,插件提交者会批准您的插件。 最终得到完整的插件测试报告。  Pull Request 提交PR 提交PR的时候,需要简述自己对插件的设计,这样有助于与社区的贡献者讨论完成codereview。\n申请自动化测试 测试用例编写完成后,可以申请自动化测试,在自己的PR中会生成插件兼容版本的报告。\n插件文档 插件文档需要更新:Supported-list.md相关插件信息的支持。\n插件如果为可选插件需要在agent-optional-plugins可选插件文档中增加对应的描述。\n注释 Lettuce是一个完全无阻塞的Redis客户端,使用netty构建,提供反应,异步和同步数据访问。了解细节可点击阅读 lettuce.io;\nOpenTracing是一个跨编程语言的标准,了解细节可点击阅读 《OpenTracing语义标准》;\nspan:org.apache.skywalking.apm.agent.core.context.trace.AbstractSpan接口定义了所有Span实现需要完成的方法;\nRedisson是一个非常易用Java的Redis客户端, 它没有学习曲线,无需知道任何Redis命令即可开始使用它。了解细节可点击阅读 redisson.org;\n","excerpt":"引言 《SkyWalking Java 插件贡献实践》:本文将基于SkyWalking 6.0.0-GA-SNAPSHOT版本,以编写Redis客户端Lettuce的SkyWalking Java …","ref":"/zh/2019-01-21-agent-plugin-practice/","title":"SkyWalking Java 插件贡献实践"},{"body":"Jinlin Fu has contributed 4 new plugins, including gson, activemq, rabbitmq and canal, which made SkyWalking supporting all mainstream OSS MQ. Also provide several documents and bug fixes. The SkyWalking PPMC based on these, promote him as new committer. Welcome on board.\n","excerpt":"Jinlin Fu has contributed 4 new plugins, including gson, activemq, rabbitmq and canal, which made …","ref":"/events/welcome-jinlin-fu-as-new-committer/","title":"Welcome Jinlin Fu as new committer"},{"body":" 作者:赵瑞栋 原文地址  引言 微服务框架落地后,分布式部署架构带来的问题就会迅速凸显出来。服务之间的相互调用过程中,如果业务出现错误或者异常,如何快速定位问题?如何跟踪业务调用链路?如何分析解决业务瓶颈?\u0026hellip;本文我们来看看如何解决以上问题。\n一、SkyWalking初探 Skywalking 简介 Skywalking是一款国内开源的应用性能监控工具,支持对分布式系统的监控、跟踪和诊断。\n它提供了如下的主要功能特性: Skywalking 技术架构 SW总体可以分为四部分:\n1.Skywalking Agent:使用Javaagent做字节码植入,无侵入式的收集,并通过HTTP或者gRPC方式发送数据到Skywalking Collector。\nSkywalking Collector :链路数据收集器,对agent传过来的数据进行整合分析处理并落入相关的数据存储中。 Storage:Skywalking的存储,时间更迭,sw已经开发迭代到了6.x版本,在6.x版本中支持以ElasticSearch、Mysql、TiDB、H2、作为存储介质进行数据存储。 UI :Web可视化平台,用来展示落地的数据。  Skywalking Agent配置 通过了解配置,可以对一个组件功能有一个大致的了解。让我们一起看一下skywalking的相关配置。\n解压开skywalking的压缩包,在agent/config文件夹中可以看到agent的配置文件。\n从skywalking支持环境变量配置加载,在启动的时候优先读取环境变量中的相关配置。\n agent.namespace: 跨进程链路中的header,不同的namespace会导致跨进程的链路中断 agent.service_name:一个服务(项目)的唯一标识,这个字段决定了在sw的UI上的关于service的展示名称 agent.sample_n_per_3_secs: 客户端采样率,默认是-1代表全采样 agent.authentication: 与collector进行通信的安全认证,需要同collector中配置相同 agent.ignore_suffix: 忽略特定请求后缀的trace collecttor.backend_service: agent需要同collector进行数据传输的IP和端口 logging.level: agent记录日志级别  skywalking agent使用javaagent无侵入式的配合collector实现对分布式系统的追踪和相关数据的上下文传递。\nSkywalking Collector关键配置 Collector支持集群部署,zookeeper、kubernetes(如果你的应用是部署在容器中的)、consul(GO语言开发的服务发现工具)是sw可选的集群管理工具,结合大家具体的部署方式进行选择。详细配置大家可以去Skywalking官网下载介质包进行了解。\nCollector端口设置\n downsampling: 采样汇总统计维度,会分别按照分钟、【小时、天、月】(可选)来统计各项指标数据。 通过设置TTL相关配置项可以对数据进行自动清理。  Skywalking 在6.X中简化了配置。collector提供了gRPC和HTTP两种通信方式。\nUI使用rest http通信,agent在大多数场景下使用grpc方式通信,在语言不支持的情况下会使用http通信。\n关于绑定IP和端口需要注意的一点是,通过绑定IP,agent和collector必须配置对应ip才可以正常通信。\nCollector存储配置\n在application.yml中配置的storage模块配置中选择要使用的数据库类型,并填写相关的配置信息。\nCollector Receiver\nReceiver是Skywalking在6.x提出的新的概念,负责从被监控的系统中接受指标数据。用户完全可以参照OpenTracing规范来上传自定义的监控数据。Skywalking官方提供了service-mesh、istio、zipkin的相关能力。\n现在Skywalking支持服务端采样,配置项为sampleRate,比例采样,如果配置为5000则采样率就是50%。\n关于采样设置的一点注意事项\n关于服务采样配置的一点建议,如果Collector以集群方式部署,比如:Acollector和Bcollector,建议Acollector.sampleRate = Bcollector.sampleRate。如果采样率设置不相同可能会出现数据丢失问题。\n假设Agent端将所有数据发送到后端Collector处,A采样率设置为30%,B采样率为50%。\n假设有30%的数据,发送到A上,这些数据被全部正确接受并存储,极端情况(与期望的采样数据量相同)下,如果剩下20%待采样的数据发送到了B,这个时候一切都是正常的,如果这20%中有一部分数据被送到了A那么,这些数据将是被忽略的,由此就会造成数据丢失。\n二、业务调用链路监控 Service Topology监控 调用链路监控可以从两个角度去看待。我们先从整体上来认识一下我们所监控的系统。\n通过给服务添加探针并产生实际的调用之后,我们可以通过Skywalking的前端UI查看服务之间的调用关系。\n我们简单模拟一次服务之间的调用。新建两个服务,service-provider以及service-consumer,服务之间简单的通过Feign Client 来模拟远程调用。\n从图中可以看到:\n 有两个服务节点:provider \u0026amp; consumer 有一个数据库节点:localhost【mysql】 一个注册中心节点  consumer消费了provider提供出来的接口。\n一个系统的拓扑图让我们清晰的认识到系统之间的应用的依赖关系以及当前状态下的业务流转流程。细心的可能发现图示节点consumer上有一部分是红色的,红色是什么意思呢?\n红色代表当前流经consumer节点的请求有一断时间内是响应异常的。当节点全部变红的时候证明服务现阶段内就彻底不可用了。运维人员可以通过Topology迅速发现某一个服务潜在的问题,并进行下一步的排查并做到预防。\nSkywalking Trace监控 Skywalking通过业务调用监控进行依赖分析,提供给我们了服务之间的服务调用拓扑关系、以及针对每个endpoint的trace记录。\n我们在之前看到consumer节点服务中发生了错误,让我们一起来定位下错误是发生在了什么地方又是什么原因呢?\n在每一条trace的信息中都可以看到当前请求的时间、GloableId、以及请求被调用的时间。我们分别看一看正确的调用和异常的调用。\nTrace调用链路监控 图示展示的是一次正常的响应,这条响应总耗时19ms,它有4个span:\n span1 /getStore = 19ms 响应的总流转时间 span2 /demo2/stores = 14ms feign client 开始调用远程服务后的响应的总时间 span3 /stores = 14ms 接口服务响应总时间 span4 Mysql = 1ms 服务提供端查询数据库的时间  这里span2和span3的时间表现相同,其实是不同的,因为这里时间取了整。\n在每个Span中可以查看当前Span的相关属性。\n 组件类型: SpringMVC、Feign Span状态: false HttpMethod: GET Url: http://192.168.16.125:10002/demo2/stores  这是一次正常的请求调用Trace日志,可能我们并不关心正常的时候,毕竟一切正常不就是我们期待的么!\n我们再来看下,异常状态下我们的Trace以及Span又是什么样的呢。\n发生错误的调用链中Span中的is error标识变为true,并且在名为Logs的TAB中可以看到错误发生的具体原因。根据异常情况我们就可以轻松定位到影响业务的具体原因,从而快速定位问题,解决问题。\n通过Log我们看到连接被拒,那么可能是我们的网络出现了问题(可能性小,因为实际情况如果网络出现问题我们连这个trace都看不到了),也有可能是服务端配置问题无法正确建立连接。通过异常日志,我们迅速就找到了问题的关键。\n实际情况是,我把服务方停掉了,做了一次简单的模拟。可见,通过拓扑图示我们可以清晰的看到众多服务中哪个服务是出现了问题的,通过trace日志我们可以很快就定位到问题所在,在最短的时间内解决问题。\n三、服务性能指标监控 Skywalking还可以查看具体Service的性能指标,根据相关的性能指标可以分析系统的瓶颈所在并提出优化方案。\nSkywalking 性能监控 在服务调用拓扑图上点击相应的节点我们可以看到该服务的\n SLA: 服务可用性(主要是通过请求成功与失败次数来计算) CPM: 每分钟调用次数 Avg Response Time: 平均响应时间  从应用整体外部来看我们可以监测到应用在一定时间段内的\n 服务可用性指标SLA 每分钟平均响应数 平均响应时间 服务进程PID 服务所在物理机的IP、HostName、Operation System  Service JVM信息监控 还可以监控到Service运行时的CPU、堆内存、非堆内存使用率、以及GC情况。这些信息来源于JVM。注意这里的数据可不是机器本身的数据。\n四、服务告警 前文我们提到了通过查看拓扑图以及调用链路可以定位问题,可是运维人员又不可能一直盯着这些数据,那么我们就需要告警能力,在异常达到一定阈值的时候主动的提示我们去查看系统状态。\n在Sywalking 6.x版本中新增了对服务状态的告警能力。它通过webhook的方式让我们可以自定义我们告警信息的通知方式。诸如:邮件通知、微信通知、短信通知等。\nSkywalking 服务告警 先来看一下告警的规则配置。在alarm-settings.xml中可以配置告警规则,告警规则支持自定义。\n一份告警配置由以下几部分组成:\n service_resp_time_rule:告警规则名称 ***_rule (规则名称可以自定义但是必须以’_rule’结尾 indicator-name:指标数据名称: 定义参见http://t.cn/EGhfbmd op: 操作符: \u0026gt; , \u0026lt; , = 【当然你可以自己扩展开发其他的操作符】 threshold:目标值:指标数据的目标数据 如sample中的1000就是服务响应时间,配合上操作符就是大于1000ms的服务响应 period: 告警检查周期:多久检查一次当前的指标数据是否符合告警规则 counts: 达到告警阈值的次数 silence-period:忽略相同告警信息的周期 message:告警信息 webhooks:服务告警通知服务地址  Skywalking通过HttpClient的方式远程调用在配置项webhooks中定义的告警通知服务地址。\n了解了SW所传送的数据格式我们就可以对告警信息进行接收处理,实现我们需要的告警通知服务啦!\n我们将一个服务停掉,并将另外一个服务的某个对外暴露的接口让他休眠一定的时间。然后调用一定的次数观察服务的状态信息以及告警情况。\n总结 本文简单的通过skwaylking的配置来对skywlaking的功能进行一次初步的了解,对skwaylking新提出的概念以及新功能进行简单的诠释,方便大家了解和使用。通过使用APM工具,可以让我们方便的查看微服务架构中系统瓶颈以及性能问题等。\n精选提问 问1:想问问选型的时候用pinpoint还是SK好?\n答:选型问题\n 要结合具体的业务场景, 比如你的代码运行环境 是java、php、net还是什么。 pinpoint在安装部署上要比skywalking略微复杂 pinpoint和sw支持的组件列表是不同的。 https://github.com/apache/incubator-skywalking/blob/master/docs/en/setup/service-agent/java-agent/Supported-list.md你可以参照这里的支持列表对比下pinpoint的支持对象做一个简单对比。 sw经过测试在并发量较高的情况下比pinpoint的吞吐量更好一些。  问2:有没有指标统计,比如某个url 的top10 请求、响应最慢的10个请求?某个服务在整个链条中的耗时占比?\n答:1.sw自带有响应最慢的请求top10统计针对所有的endpoint的统计。 2.针对每个url的top10统计,sw本身没有做统计,数据都是现成的通过简单的检索就可以搜到你想要的结果。 3.没有具体的耗时占比,但是有具体总链路时间统计以及某个服务的耗时统计,至于占比自己算吧,可以看ppt中的调用链路监控的span时间解释。\n问3:能不能具体说一下在你们系统中的应用?\n答:EOS8LA版本中,我们整合sw对应用提供拓扑、调用链路、性能指标的监控、并在sw数据的基础上增加系统的维度。 当服务数很庞大的时候,整体的拓扑其实就是一张密密麻麻的蜘蛛网。我们可以通过系统来选择具体某个系统下的应用。 8LA中SW是5.0.0alpha版本,受限于sw功能,我们并没有提供告警能力,这在之后会是我们的考虑目标。\n问4:业务访问日志大概每天100G,kubernetes 环境中部署,使用稳定吗?\n答:监控数据没有长时间的存储必要,除非你有特定的需求。它有一定的时效性,你可以设置ttl自动清除过时信息。100g,es集群还是能轻松支撑的。\n问5:和pinpoint相比有什么优势吗?\n答:\n 部署方式、使用方式简单 功能特性支持的更多 高并发性能会更好一些  问6:skywalking的侵入式追踪功能方便进行单服务链的服务追踪。但是跨多台服务器多项目的整体服务链追踪是否有整体设计考虑?\n答:sw本身特性就是对分布式系统的追踪,他是无侵入式的。无关你的应用部署在多少台服务器上。\n问7:应用在加上代理之后性能会下降。请问您有什么解决方法吗?\n答:性能下降是在所难免的,但是据我了解,以及官方的测试,他的性能影响是很低的。这是sw的测试数据供你参考。 https://skywalkingtest.github.io/Agent-Benchmarks/README_zh.html。\n问8:有异构系统需求的话可以用sw吗?\n答:只要skywalking的探针支持的应该都是可以的。\n问9:sw对于商用的web中间件,如bes、tongweb、websphere、weblogic的支持如何?\n答:商业组件支持的比较少,因为涉及到相关license的问题,sw项目组需要获得他们的支持来进行数据上报,据我了解,支持不是很好。\n","excerpt":"作者:赵瑞栋 原文地址  引言 微服务框架落地后,分布式部署架构带来的问题就会迅速凸显出来。服务之间的相互调用过程中,如果业务出现错误或者异常,如何快速定位问题?如何跟踪业务调用链路?如何分析解决业务 …","ref":"/zh/2019-01-03-monitor-microservice/","title":"SkyWalking 微服务监控分析"},{"body":"","excerpt":"","ref":"/zh_tags/elasticsearch/","title":"ElasticSearch"},{"body":"SkyWalking 依赖 elasticsearch 集群,如果 elasticsearch 安装有 x-pack 插件的话,那么就会存在一个 Basic 认证,导致 skywalking 无法调用 elasticsearch, 解决方法是使用 nginx 做代理,让 nginx 来做这个 Basic 认证,那么这个问题就自然解决了。\n方法如下:\n 安装 nginx   yum install -y nginx\n 配置 nginx  server { listen 9200 default_server; server_name _; location / { proxy_set_header Host $host; proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_pass http://localhost:9200; #Basic字符串就是使用你的用户名(admin),密码(12345)编码后的值 #注意:在进行Basic加密的时候要使用如下格式如:admin:123456 注意中间有个冒号 proxy_set_header Authorization \u0026#34;Basic YWRtaW4gMTIzNDU2\u0026#34;; } } 验证   curl localhost:9200\n { \u0026#34;name\u0026#34; : \u0026#34;Yd0rCp9\u0026#34;, \u0026#34;cluster_name\u0026#34; : \u0026#34;es-cn-4590xv9md0009doky\u0026#34;, \u0026#34;cluster_uuid\u0026#34; : \u0026#34;jAPLrqY5R6KWWgHnGCWOAA\u0026#34;, \u0026#34;version\u0026#34; : { \u0026#34;number\u0026#34; : \u0026#34;6.3.2\u0026#34;, \u0026#34;build_flavor\u0026#34; : \u0026#34;default\u0026#34;, \u0026#34;build_type\u0026#34; : \u0026#34;tar\u0026#34;, \u0026#34;build_hash\u0026#34; : \u0026#34;053779d\u0026#34;, \u0026#34;build_date\u0026#34; : \u0026#34;2018-07-20T05:20:23.451332Z\u0026#34;, \u0026#34;build_snapshot\u0026#34; : false, \u0026#34;lucene_version\u0026#34; : \u0026#34;7.3.1\u0026#34;, \u0026#34;minimum_wire_compatibility_version\u0026#34; : \u0026#34;5.6.0\u0026#34;, \u0026#34;minimum_index_compatibility_version\u0026#34; : \u0026#34;5.0.0\u0026#34; }, \u0026#34;tagline\u0026#34; : \u0026#34;You Know, for Search\u0026#34; } 看到如上结果那么恭喜你成功了。\n","excerpt":"SkyWalking 依赖 elasticsearch 集群,如果 elasticsearch 安装有 x-pack 插件的话,那么就会存在一个 Basic 认证,导致 skywalking …","ref":"/zh/2019-01-02-skywalking-elasticsearch-basic/","title":"关于 ElasticSearch 因 basic 认证导致 SkyWalking 无法正常调用接口问题"},{"body":" 作者: Wu Sheng, tetrate, SkyWalking original creator GitHub, Twitter, Linkedin 翻译: jjlu521016  背景 在当前的微服务架构中分布式链路追踪是很有必要的一部分,但是对于一些用户来说如何去理解和使用分布式链路追踪的相关数据是不清楚的。 这个博客概述了典型的分布式跟踪用例,以及Skywalking的V6版本中新的可视化功能。我们希望新的用户通过这些示例来更好的理解。\n指标和拓扑图 跟踪数据支持两个众所周知的分析特性:指标和拓扑图\n指标: 每个service, service instance, endpoint的指标都是从跟踪中的入口span派生的。指标代表响应时间的性能。所以可以有一个平均响应时间,99%的响应时间,成功率等。它们按service, service instance, endpoint进行分解。\n拓扑图: 拓扑表示服务之间的链接,是分布式跟踪最有吸引力的特性。拓扑结构允许所有用户理解分布式服务关系和依赖关系,即使它们是不同的或复杂的。这一点很重要,因为它为所有相关方提供了一个单一的视图,无论他们是开发人员、设计者还是操作者。\n这里有一个拓扑图的例子包含了4个项目,包括kafka和两个外部依赖。\n-在skywalking的可选择UI0RocketBot的拓扑图-\nTrace 在分布式链路追踪系统中,我们花费大量资源(CPU、内存、磁盘和网络)来生成、传输和持久跟踪数据。让我们试着回答为什么要这样做?我们可以用跟踪数据回答哪些典型的诊断和系统性能问题?\nSkywalking v6包含两种追踪视图:\n   TreeMode: 第一次提供,帮助您更容易识别问题。    ListMode: 常规的时间线视图,通常也出现在其他跟踪系统中,如Zipkin。    发生错误 在trace视图,最简单的部分是定位错误,可能是由代码异常或网络故障引起的。通过span详情提供的细节,ListMode和TreeMode都能够找到错误 -ListMode 错误span-\n-TreeMode 错误span-\n慢span 一个高优先级的特性是识别跟踪中最慢的span。这将使用应用程序代理捕获的执行持续时间。在旧的ListMode跟踪视图中,由于嵌套,父span几乎总是包括子span的持续时间。换句话说,一个缓慢的span通常会导致它的父节点也变慢,在Skywalking 6中,我们提供了 最慢的前5个span 过滤器来帮助你您直接定位span。\n-最慢的前5个span-\n太多子span 在某些情况下,个别持续时间很快,但跟踪速度仍然很慢,如: -没有慢span的追踪-\n如果要了解根问题是否与太多操作相关,请使用子范围号的Top 5 of children span number,筛选器显示每个span的子级数量,突出显示前5个。 -13个数据库访问相关的span-\n在这个截图中,有一个包含13个子项的span,这些子项都是数据库访问。另外,当您看到跟踪的概述时,这个2000ms跟踪的数据库花费了1380ms。 -1380ms花费在数据库访问-\n在本例中,根本原因是数据库访问太多。这在其他场景中也很常见,比如太多的RPC或缓存访问。\n链路深度 跟踪深度也与延迟有关。像太多子span的场景一样,每个span延迟看起来不错,但整个链路追踪的过程很慢。 -链路深度-\n上图所示,最慢的span小鱼500ms,对于2000毫秒的跟踪来说,速度并不太慢。当您看到第一行时,有四种不同的颜色表示这个分布式跟踪中涉及的四个services。每一个都需要100~400ms,这四个都需要近2000ms,从这里我们知道这个缓慢的跟踪是由一个序列中的3个RPC造成的。\n结束语 分布式链路追踪和APM 工具帮助我们确定造成问题的根源,允许开发和操作团队进行相应的优化。我们希望您喜欢这一点,并且喜欢Apache Skywalking和我们的新链路追踪可视化界面。如果你喜欢的话,在github上面给我们加start来鼓励我们\nSkywakling 6计划在2019年的1月底完成release。您可以通过以下渠道联系项目团队成员\n 关注 skywalking推特 订阅邮件:dev@skywalking.apache.org。发送邮件到 dev-subscribe@kywalking.apache.org 来订阅. 加入Gitter聊天室  ","excerpt":"作者: Wu Sheng, tetrate, SkyWalking original creator GitHub, Twitter, Linkedin 翻译: jjlu521016  背景 在当前的 …","ref":"/zh/2019-01-02-understand-trace-trans2cn/","title":"更容易理解将要到来的分布式链路追踪 6.0GA (翻译)"},{"body":"Background Distributed tracing is a necessary part of modern microservices architecture, but how to understand or use distributed tracing data is unclear to some end users. This blog overviews typical distributed tracing use cases with new visualization features in SkyWalking v6. We hope new users will understand more through these examples.\nMetric and topology Trace data underpins in two well known analysis features: metric and topology\nMetric of each service, service instance, endpoint are derived from entry spans in trace. Metrics represent response time performance. So, you could have average response time, 99% response time, success rate, etc. These are broken down by service, service instance, endpoint.\nTopology represents links between services and is distributed tracing\u0026rsquo;s most attractive feature. Topologies allows all users to understand distributed service relationships and dependencies even when they are varied or complex. This is important as it brings a single view to all interested parties, regardless of if they are a developer, designer or operator.\nHere\u0026rsquo;s an example topology of 4 projects, including Kafka and two outside dependencies.\nTopology in SkyWalking optional UI, RocketBot\nTrace In a distributed tracing system, we spend a lot of resources(CPU, Memory, Disk and Network) to generate, transport and persistent trace data. Let\u0026rsquo;s try to answer why we do this? What are the typical diagnosis and system performance questions we can answer with trace data?\nSkyWalking v6 includes two trace views:\n TreeMode: The first time provided. Help you easier to identify issues. ListMode: Traditional view in time line, also usually seen in other tracing system, such as Zipkin.  Error occurred In the trace view, the easiest part is locating the error, possibly caused by a code exception or network fault. Both ListMode and TreeMode can identify errors, while the span detail screen provides details.\nListMode error span\nTreeMode error span\nSlow span A high priority feature is identifying the slowest spans in a trace. This uses execution duration captured by application agents. In the old ListMode trace view, parent span almost always includes the child span\u0026rsquo;s duration, due to nesting. In other words, a slow span usually causes its parent to also become slow. In SkyWalking 6, we provide Top 5 of slow span filter to help you locate the spans directly.\nTop 5 slow span\nThe above screenshot highlights the top 5 slow spans, excluding child span duration. Also, this shows all spans' execution time, which helps identify the slowest ones.\nToo many child spans In some cases, individual durations are quick, but the trace is still slow, like this one:\nTrace with no slow span\nTo understand if the root problem is related to too many operations, use Top 5 of children span number. This filter shows the amount of children each span has, highlighting the top 5.\n13 database accesses of a span\nIn this screenshot, there is a span with 13 children, which are all Database accesses. Also, when you see overview of trace, database cost 1380ms of this 2000ms trace.\n1380ms database accesses\nIn this example, the root cause is too many database accesses. This is also typical in other scenarios like too many RPCs or cache accesses.\nTrace depth Trace depth is also related latency. Like the too many child spans scenario, each span latency looks good, but the whole trace is slow.\nTrace depth\nHere, the slowest spans are less than 500ms, which are not too slow for a 2000ms trace. When you see the first line, there are four different colors representing four services involved in this distributed trace. Every one of them costs 100~400ms. For all four, there nearly 2000ms. From here, we know this slow trace is caused by 3 RPCs in a serial sequence.\nAt the end Distributed tracing and APM tools help users identify root causes, allowing development and operation teams to optimize accordingly. We hope you enjoyed this, and love Apache SkyWalking and our new trace visualization. If so, give us a star on GitHub to encourage us.\nSkyWalking 6 is scheduled to release at the end of January 2019. You can contact the project team through the following channels:\n Follow SkyWalking twitter Subscribe mailing list: dev@skywalking.apache.org . Send to dev-subscribe@kywalking.apache.org to subscribe the mail list. Join Gitter room.  ","excerpt":"Background Distributed tracing is a necessary part of modern microservices architecture, but how to …","ref":"/blog/2019-01-01-understand-trace/","title":"Understand distributed trace easier in the incoming 6-GA"},{"body":"6.0.0-beta release. Go to downloads page to find release tars.\nKey updates\n Bugs fixed, closed to GA New protocols provided, old still compatible. Spring 5 supported MySQL and TiDB as optional storage  ","excerpt":"6.0.0-beta release. Go to downloads page to find release tars.\nKey updates\n Bugs fixed, closed to GA …","ref":"/events/release-apache-skywalking-apm-6-0-0-beta/","title":"Release Apache SkyWalking APM 6.0.0-beta"},{"body":"Based on his contributions. Including created RocketBot as our secondary UI, new website and very cool trace view page in next release. he has been accepted as SkyWalking PPMC. Welcome aboard.\n","excerpt":"Based on his contributions. Including created RocketBot as our secondary UI, new website and very …","ref":"/events/welcome-yao-wang-as-a-new-ppmc/","title":"Welcome Yao Wang as a new PPMC"},{"body":"导读  SkyWalking 中 Java 探针是使用 JavaAgent 的两大字节码操作工具之一的 Byte Buddy(另外是 Javassist)实现的。项目还包含.Net core 和 Nodejs 自动探针,以及 Service Mesh Istio 的监控。总体上,SkyWalking 是一个多语言,多场景的适配,特别为微服务、云原生和基于容器架构设计的可观测性分析平台(Observability Analysis Platform)。 本文基于 SkyWalking 5.0.0-RC2 和 Byte Buddy 1.7.9 版本,会从以下几个章节,让大家掌握 SkyWalking Java 探针的使用,进而让 SkyWalking 在自己公司中的二次开发变得触手可及。  Byte Buddy 实现 JavaAgent 项目 迭代 JavaAgent 项目的方法论 SkyWalking agent 项目如何 Debug SkyWalking 插件开发实践   文章底部有 SkyWalking 和 Byte Buddy 相应的学习资源。  Byte Buddy 实现  首先如果你对 JavaAgent 还不是很了解可以先百度一下,或在公众号内看下《JavaAgent 原理与实践》简单入门下。 SpringMVC 分发请求的关键方法相信已经不用我在赘述了,那我们来编写 Byte Buddy JavaAgent 代码吧。  public class AgentMain { public static void premain(String agentOps, Instrumentation instrumentation) { new AgentBuilder.Default() .type(ElementMatchers.named(\u0026#34;org.springframework.web.servlet.DispatcherServlet\u0026#34;)) .transform((builder, type, classLoader, module) -\u0026gt; builder.method(ElementMatchers.named(\u0026#34;doDispatch\u0026#34;)) .intercept(MethodDelegation.to(DoDispatchInterceptor.class))) .installOn(instrumentation); } }  编写 DispatcherServlet doDispatch 拦截器代码(是不是跟 AOP 如出一辙)  public class DoDispatchInterceptor { @RuntimeType public static Object intercept(@Argument(0) HttpServletRequest request, @SuperCall Callable\u0026lt;?\u0026gt; callable) { final StringBuilder in = new StringBuilder(); if (request.getParameterMap() != null \u0026amp;\u0026amp; request.getParameterMap().size() \u0026gt; 0) { request.getParameterMap().keySet().forEach(key -\u0026gt; in.append(\u0026#34;key=\u0026#34; + key + \u0026#34;_value=\u0026#34; + request.getParameter(key) + \u0026#34;,\u0026#34;)); } long agentStart = System.currentTimeMillis(); try { return callable.call(); } catch (Exception e) { System.out.println(\u0026#34;Exception :\u0026#34; + e.getMessage()); return null; } finally { System.out.println(\u0026#34;path:\u0026#34; + request.getRequestURI() + \u0026#34; 入参:\u0026#34; + in + \u0026#34; 耗时:\u0026#34; + (System.currentTimeMillis() - agentStart)); } } }  resources/META-INF/MANIFEST.MF  Manifest-Version: 1.0 Premain-Class: com.z.test.agent.AgentMain Can-Redefine-Classes: true  pom.xml 文件  dependencies +net.bytebuddy.byte-buddy +javax.servlet.javax.servlet-api *scope=provided plugins +maven-jar-plugin *manifestFile=src/main/resources/META-INF/MANIFEST.MF +maven-shade-plugin *include:net.bytebuddy:byte-buddy:jar: +maven-compiler-plugin  小结:没几十行代码就完成了,通过 Byte Buddy 实现应用组件 SpringMVC 记录请求路径、入参、执行时间 JavaAgent 项目,是不是觉得自己很优秀。  持续迭代 JavaAgent  本章节主要介绍 JavaAgent 如何 Debug,以及持续集成的方法论。 首先我的 JavaAgent 项目目录结构如图所示: 应用项目是用几行代码实现的 SpringBootWeb 项目:  @SpringBootApplication(scanBasePackages = {\u0026#34;com\u0026#34;}) public class TestBootWeb { public static void main(String[] args) { SpringApplication.run(TestBootWeb.class, args); } @RestController public class ApiController { @PostMapping(\u0026#34;/ping\u0026#34;) public String ping(HttpServletRequest request) { return \u0026#34;pong\u0026#34;; } } }  下面是关键 JavaAgent 项目如何持续迭代与集成:  VM options增加:-JavaAgent:{$HOME}/Code/github/z_my_test/test-agent/target/test-agent-1.0-SNAPSHOT.jar=args Before launch 在Build之前增加: Working directory:{$HOME}/Code/github/incubator-skywalking Command line:-T 1C -pl test-agent -am clean package -Denforcer.skip=true -Dmaven.test.skip=true -Dmaven.compile.fork=true  小结:看到这里的将 JavaAgent 持续迭代集成方法,是不是瞬间觉得自己手心已经发痒起来,很想编写一个自己的 agent 项目了呢,等等还有一个好消息:test-demo 这 10 几行的代码实现的 Web 服务,居然有 5k 左右的类可以使用 agent 增强。 注意 mvn 编译加速的命令是 maven3 + 版本以上才支持的哈。  SkyWalking Debug  峰回路转,到了文章的主题《SkyWalking 之高级用法》的正文啦。首先,JavaAgent 项目想 Debug,还需要将 agent 代码与接入 agent 项目至少在同一个工作空间内,网上方法有很多,这里我推荐大家一个最简单的方法。File-\u0026gt;New-\u0026gt;Module from Exisiting Sources… 引入 skywalking-agent 源码即可 详细的 idea 编辑器配置: 优化 SkyWalking agent 编译时间,我的集成时间优化到 30 秒左右:  VM options增加:-JavaAgent:-JavaAgent:{$HOME}/Code/github/incubator-skywalking/skywalking-agent/skywalking-agent.jar:不要用dist里面的skywalking-agent.jar,具体原因大家可以看看源码:apm-sniffer/apm-agent/pom.xml中的maven插件的使用。 Before launch 在Build之前增加: Working directory:{$HOME}/Code/github/incubator-skywalking Command line:-T 1C -pl apm-sniffer/apm-sdk-plugin -amd clean package -Denforcer.skip=true -Dmaven.test.skip=true -Dmaven.compile.fork=true: 这里我针对插件包,因为紧接着下文要开发插件 另外根pom注释maven-checkstyle-plugin也可加速编译 kob 之 SkyWalking 插件编写  kob(贝壳分布式作业调度框架)是贝壳找房项目微服务集群中的基础组件,通过编写贝壳分布式作业调度框架的 SkyWalking 插件,可以实时收集作业调度任务的执行链路信息,从而及时得到基础组件的稳定性,了解细节可点击阅读《贝壳分布式调度框架简介》。想详细了解 SkyWalking 插件编写可在文章底部参考链接中,跳转至对应的官方资源,好话不多说,代码一把唆起来。 apm-sdk-plugin pom.xml 增加自己的插件 model  \u0026lt;artifactId\u0026gt;apm-sdk-plugin\u0026lt;/artifactId\u0026gt; \u0026lt;modules\u0026gt; \u0026lt;module\u0026gt;kob-plugin\u0026lt;/module\u0026gt; ... \u0026lt;modules\u0026gt;  resources.skywalking-plugin.def 增加自己的描述  kob=org.apache.skywalking.apm.plugin.kob.KobInstrumentation  在 SkyWalking 的项目中,通过继承 ClassInstanceMethodsEnhancePluginDefine 可以定义需要拦截的类和增强的方法,编写作业调度方法的 instrumentation  public class KobInstrumentation extends ClassInstanceMethodsEnhancePluginDefine { private static final String ENHANCE_CLASS = \u0026#34;com.ke.kob.client.spring.core.TaskDispatcher\u0026#34;; private static final String INTERCEPT_CLASS = \u0026#34;org.apache.skywalking.apm.plugin.kob.KobInterceptor\u0026#34;; @Override protected ClassMatch enhanceClass() { return NameMatch.byName(ENHANCE_CLASS); } @Override protected ConstructorInterceptPoint[] getConstructorsInterceptPoints() { return null; } @Override protected InstanceMethodsInterceptPoint[] getInstanceMethodsInterceptPoints() { return new InstanceMethodsInterceptPoint[] { new InstanceMethodsInterceptPoint() { @Override public ElementMatcher\u0026lt;MethodDescription\u0026gt; getMethodsMatcher() { return named(\u0026#34;dispatcher1\u0026#34;); } @Override public String getMethodsInterceptor() { return INTERCEPT_CLASS; } @Override public boolean isOverrideArgs() { return false; } } }; } }  通过实现 InstanceMethodsAroundInterceptor 后,定义 beforeMethod、afterMethod 和 handleMethodException 的实现方法,可以环绕增强指定目标方法,下面自定义 interceptor 实现 span 的跟踪(这里需要注意 SkyWalking 中 span 的生命周期,在 afterMethod 方法中结束 span)  public class KobInterceptor implements InstanceMethodsAroundInterceptor { @Override public void beforeMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, MethodInterceptResult result) throws Throwable { final ContextCarrier contextCarrier = new ContextCarrier(); com.ke.kob.client.spring.model.TaskContext context = (TaskContext) allArguments[0]; CarrierItem next = contextCarrier.items(); while (next.hasNext()) { next = next.next(); next.setHeadValue(JSON.toJSONString(context.getUserParam())); } AbstractSpan span = ContextManager.createEntrySpan(\u0026#34;client:\u0026#34;+allArguments[1]+\u0026#34;,task:\u0026#34;+context.getTaskKey(), contextCarrier); span.setComponent(ComponentsDefine.TRANSPORT_CLIENT); SpanLayer.asRPCFramework(span); } @Override public Object afterMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Object ret) throws Throwable { ContextManager.stopSpan(); return ret; } @Override public void handleMethodException(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Throwable t) { } }  实现效果,将操作名改成任务执行节点 + 任务执行方法,实现 kob 的 SkyWalking 的插件编写,加上报警体系,可以进一步增加公司基础组件的稳定性。  参考链接  Apache SkyWalking Byte Buddy(runtime code generation for the Java virtual machine)  ","excerpt":"导读  SkyWalking 中 Java 探针是使用 JavaAgent 的两大字节码操作工具之一的 Byte Buddy(另外是 Javassist)实现的。项目还包含.Net core …","ref":"/zh/2018-12-21-skywalking-apm-sniffer-beginning/","title":"SkyWalking apm-sniffer 原理学习与插件编写"},{"body":"搭建调试环境 阅读 SkyWalking 源码,从配置调试环境开始。\n一定一定一定不要干读代码,而是通过调试的方式。\n 01 通过 Skywalking-5.x 版本的源码构建并运行 👉:哔哩哔哩 | 腾讯视频 02 通过 Skywalking-6.x 版本的源码构建并运行 👉:哔哩哔哩 | 腾讯视频 03 Java 应用(探针)接入 Skywalking[6.x] 👉:哔哩哔哩 | 腾讯视频  SkyWalking 3.X 源码解析合集 虽然是基于 3.X 版本的源码解析,但是对于阅读 SkyWalking Java Agent 和插件部分,同样适用。\n对于 SkyWalking Collector 部分,可以作为一定的参考。\n 《SkyWalking 源码分析 —— 调试环境搭建》 《SkyWalking 源码分析 —— Agent 初始化》 《SkyWalking 源码分析 —— Agent 插件体系》 《SkyWalking 源码分析 —— Collector 初始化》 《SkyWalking 源码分析 —— Collector Cluster 集群管理》 《SkyWalking 源码分析 —— Collector Client Component 客户端组件》 《SkyWalking 源码分析 —— Collector Server Component 服务器组件》 《SkyWalking 源码分析 —— Collector Jetty Server Manager》 《SkyWalking 源码分析 —— Collector gRPC Server Manager》 《SkyWalking 源码分析 —— Collector Naming Server 命名服务》 《SkyWalking 源码分析 —— Collector Queue 队列组件》 《SkyWalking 源码分析 —— Collector Storage 存储组件》 《SkyWalking 源码分析 —— Collector Streaming Computing 流式处理(一)》 《SkyWalking 源码分析 —— Collector Streaming Computing 流式处理(二)》 《SkyWalking 源码分析 —— Collector Cache 缓存组件》 《SkyWalking 源码分析 —— Collector Remote 远程通信服务》 《SkyWalking 源码分析 —— DataCarrier 异步处理库》 《SkyWalking 源码分析 —— Agent Remote 远程通信服务》 《SkyWalking 源码分析 —— 应用于应用实例的注册》 《SkyWalking 源码分析 —— Agent DictionaryManager 字典管理》 《SkyWalking 源码分析 —— Agent 收集 Trace 数据》 《SkyWalking 源码分析 —— Agent 发送 Trace 数据》 《SkyWalking 源码分析 —— Collector 接收 Trace 数据》 《SkyWalking 源码分析 —— Collector 存储 Trace 数据》 《SkyWalking 源码分析 —— JVM 指标的收集与存储》 《SkyWalking 源码分析 —— 运维界面(一)之应用视角》 《SkyWalking 源码分析 —— 运维界面(二)之应用实例视角》 《SkyWalking 源码分析 —— 运维界面(三)之链路追踪视角》 《SkyWalking 源码分析 —— 运维界面(四)之操作视角》 《SkyWalking 源码分析 —— @Trace 注解想要追踪的任何方法》 《SkyWalking 源码分析 —— traceId 集成到日志组件》 《SkyWalking 源码分析 —— Agent 插件(一)之 Tomcat》 《SkyWalking 源码分析 —— Agent 插件(二)之 Dubbo》 《SkyWalking 源码分析 —— Agent 插件(三)之 SpringMVC》 《SkyWalking 源码分析 —— Agent 插件(四)之 MongoDB》  SkyWalking 6.X 源码解析合集  《SkyWalking 6.x 源码分析 —— 调试环境搭建》  ","excerpt":"搭建调试环境 阅读 SkyWalking 源码,从配置调试环境开始。\n一定一定一定不要干读代码,而是通过调试的方式。\n 01 通过 Skywalking-5.x 版本的源码构建并运行 👉:哔哩哔哩 | …","ref":"/zh/2018-12-21-skywalking-source-code-read/","title":"SkyWalking 源码解析合集"},{"body":"","excerpt":"","ref":"/zh_tags/source-code/","title":"Source Code"},{"body":"版本选择 我们采用的是 5.0.0-RC2 的版本,SkyWalking 的版本信息可以参考 https://github.com/apache/incubator-skywalking/blob/5.x/CHANGES.md\n那么为什么我们没有采用 5.1.0 版本呢,这是因为我们公司内部需要支持 es x-pack,但是在官方发布里面,没有支持 xpack 的版本。\n在 Apache SkyWalking 官方文档 https://github.com/CharlesMaster/incubator-skywalking/tree/master/docs/others/cn 中有提到,SkyWalking 5.x 仍受社区支持。\n对于用户计划从 5.x 升级到 6.x,您应该知道关于有一些概念的定义的变更。最重要的两个改变了的概念是:\n Application(在 5.x 中)更改为 Service(在 6.x 中),Application Instance 也更改为 Service Instance。 Service(在 5.x 中)更改为 Endpoint(在 6.x 中)。  图文详解 Apache SkyWalking 的监控界面由 Monitor 和 Trace 两者构成,Monitor 菜单又包括 Dashbord、Topology、Application、Service、Alarm 五个子菜单构成。本文就是围绕这些菜单分别逐一进行介绍。\nMonitor 当用户通过 SkyWalking 登陆界面使用用户名、密码登陆以后,就会默认进入到 SkyWalking 的 Monitor 下的 Dashboard 界面\nDashboard 下图就是用户登陆之后都会看到的关键 Dashboard 页面,在这个页面的下方的关键指标,图中都做了详细的解释。\n上图中 app 需要强调的是,52 个 app 并不代表 52 个应用,比如 paycenter 有两台 paycenter1 和 paycenter2 就算了 2 个 app,当然还有一些应用是 3 个以上的。在我们公司,paycenter1、paycenter2 这些运维都和我们跳板机管理平台上的名称设置的一样,约定大于配置,开发人员可以更加便捷的排查问题。\n 再次修正一下,关于 dashboard 页面的 app 数,语言类探针,是探针的 app_code 来决定的。比如我们公司的线上配置就是 agent.application_code=auth-center-1\n 上图中需要解释两个概念:\n cpm 代表每分钟请求次数 SLA=(TRANSACTION_CALLS- TRANSACTION_ERROR_CALLS ) * 10000 ) / TRANSACTION_CALLS  该页面主要支持四个跳转:\n一、在上图中,App 板块上的帮助选项是可以直接跳转到 Application 监控页面的。 二、 Service 板块上的帮助选项是可以直接跳转到 Service 监控页面的。\n三、 Slow Service 列表中的每一个慢服务点击以后都会进入到其专项的 Service 监控页面。\n四、 Application Throughput 列表中的每一个 Application 点击以后也都是可以进入到其专项的 Application 监控页面。\n 关于 Application 和 Service 的详细介绍我们后续会展开\n 在 Dashboard 的页面上部分,还有一个选择功能模块: 左侧部分可以定期 refresh Dashboard 的数据,右侧则可以调整整体的查询区间。\nTopology 点击 Monitor 菜单下的 Topology 你会看到下面这张拓扑图\n当然这张图太过于夸张了,如果接入 SkyWalking 的应用并不是很多,会如下图所示: 左侧的三个小按钮可以调整你的视图,支持拖拽。右侧可以输入你所关心的应用名。比如我们输入一个支付和订单两个应用,左侧的拓扑图会变得更加清晰:\n另外,上图中的绿色圆圈都是可以点击的,如果你点击以后,还会出现节点信息: Application 点击 Monitor 菜单下的 Application 你会看到下面这张图,这张图里你可以看到的东西都做了注解。\n这张图里有一个惊喜,就是如果你点开 More Server Details,你可以看到更多的信息\n是的,除了 Host、IPv4、Pid、OS 以外,你还可以看到 CPU、Heap、Non-Heap、GC(Young GC、Old GC)等详细监控信息。\nService 点击 Monitor 菜单下的 Service 你会看到下面这张图,这张图里你可以看到的同样都做了注解。 关于 Dependency Map 这张图我们再补充一下,鼠标悬停可以看到每个阶段的执行时间,这是 Service 下的功能 我们点开图中该图中 Top 20 Slow Traces 下面的被我马赛克掉的 trace 的按钮框,可以看到如下更加详细的信息:\n这些信息可以帮助我们知道每一个方法在哪个阶段那个具体实现耗时了多久。\n如上图所示,每一行基本都是可以打开的,每一行都包含了 Tags、Logs 等监控内容\nAlarm 点击 Monitor 菜单下的 Alarm 你会看到告警菜单。目前 5.X 版本的还没有接入邮件、短信等告警方式,后续 6 支持 webhook,用户可以自己去接短信和邮件。\n告警内容中你可以看到 Applicaion、Server 和 Service 三个层面的告警内容\nTrace Trace 是一个非常实用的功能,用户可以根据精确的 TraceId 去查找\n也可以设定时间段去查找\n我在写使用手册时候,非常巧的是,看到了上图三起异常,于是我们往下拉列表看到了具体的数据\n点击进去,我们可以看到具体的失败原因 当然用户也可以直接将 Trace State 调整为 Error 级别进行查询\n再回顾一遍 一、首先我们进入首页:\n二、点击一下首页的 Slow Service 的 projectC,可以看到如下信息:\n三、如果点击首页的 Appliation Throughput 中的 projectD,可以看到如下信息:\n四、继续点进去右下角的这个 slow service 里的 Consumer,我们可以看到下图:\n参考资料  https://twitter.com/AsfSkyWalking/status/1013616673218179072 https://twitter.com/AsfSkyWalking/status/1013617100143800320  ","excerpt":"版本选择 我们采用的是 5.0.0-RC2 的版本,SkyWalking …","ref":"/zh/2018-12-18-apache-skywalking-5-0-userguide/","title":"Apache SkyWalking 5.0 中文版图文详解使用手册"},{"body":"","excerpt":"","ref":"/zh_tags/web-ui/","title":"Web UI"},{"body":"Based on his contributions to the project, he has been accepted as SkyWalking committer. Welcome aboard.\n","excerpt":"Based on his contributions to the project, he has been accepted as SkyWalking committer. Welcome …","ref":"/events/welcome-yixiong-cao-as-a-new-committer/","title":"Welcome Yixiong Cao as a new committer"},{"body":"Original link, Tetrate.io blog\nContext The integration of SkyWalking and Istio Service Mesh yields an essential open-source tool for resolving the chaos created by the proliferation of siloed, cloud-based services.\nApache SkyWalking is an open, modern performance management tool for distributed services, designed especially for microservices, cloud native and container-based (Docker, K8s, Mesos) architectures. We at Tetrate believe it is going to be an important project for understanding the performance of microservices. The recently released v6 integrates with Istio Service Mesh and focuses on metrics and tracing. It natively understands the most common language runtimes (Java, .Net, and NodeJS). With its new core code, SkyWalking v6 also supports Istrio telemetry data formats, providing consistent analysis, persistence, and visualization.\nSkyWalking has evolved into an Observability Analysis Platform that enables observation and monitoring of hundreds of services all at once. It promises solutions for some of the trickiest problems faced by system administrators using complex arrays of abundant services: Identifying why and where a request is slow, distinguishing normal from deviant system performance, comparing apples-to-apples metrics across apps regardless of programming language, and attaining a complete and meaningful view of performance.\nSkyWalking History Launched in China by Wu Sheng in 2015, SkyWalking started as just a distributed tracing system, like Zipkin, but with auto instrumentation from a Java agent. This enabled JVM users to see distributed traces without any change to their source code. In the last two years, it has been used for research and production by more than 50 companies. With its expanded capabilities, we expect to see it adopted more globally.\nWhat\u0026rsquo;s new Service Mesh Integration Istio has picked up a lot of steam as the framework of choice for distributed services. Based on all the interest in the Istio project, and community feedback, some SkyWalking (P)PMC members decided to integrate with Istio Service Mesh to move SkyWalking to a higher level.\nSo now you can use Skywalking to get metrics and understand the topology of your applications. This works not just for Java, .NET and Node using our language agents, but also for microservices running under the Istio service mesh. You can get a full topology of both kinds of applications.\nObservability analysis platform With its roots in tracing, SkyWalking is now transitioning into an open-standards based Observability Analysis Platform, which means the following:\n It can accept different kinds and formats of telemetry data from mesh like Istio telemetry. Its agents support various popular software technologies and frameworks like Tomcat, Spring, Kafka. The whole supported framework list is here. It can accept data from other compliant sources like Zipkin-formatted traces reported from Zipkin, Jaeger, or OpenCensus clients.  SkyWalking is logically split into four parts: Probes, Platform Backend, Storage and UI:\nThere are two kinds of probes:\n Language agents or SDKs following SkyWalking across-thread propagation formats and trace formats, run in the user’s application process. The Istio mixer adaptor, which collects telemetry from the Service Mesh.  The platform backend provides gRPC and RESTful HTTP endpoints for all SkyWalking-supported trace and metric telemetry data. For example, you can stream these metrics into an analysis system.\nStorage supports multiple implementations such as ElasticSearch, H2 (alpha), MySQL, and Apache ShardingSphere for MySQL Cluster. TiDB will be supported in next release.\nSkyWalking’s built-in UI with a GraphQL endpoint for data allows intuitive, customizable integration.\nSome examples of SkyWalking’s UI:\n Observe a Spring app using the SkyWalking JVM-agent   Observe on Istio without any agent, no matter what langugage the service is written in   See fine-grained metrics like request/Call per Minute, P99/95/90/75/50 latency, avg response time, heatmap   Service dependencies and metrics  Service Focused At Tetrate, we are focused on discovery, reliability, and security of your running services. This is why we are embracing Skywalking, which makes service performance observable.\nBehind this admittedly cool UI, the aggregation logic is very easy to understand, making it easy to customize SkyWalking in its Observability Analysis Language (OAL) script.\nWe’ll post more about OAL for developers looking to customize SkyWalking, and you can read the official OAL introduction document.\nScripts are based on three core concepts:\n  Service represents a group of workloads that provide the same behaviours for incoming requests. You can define the service name whether you are using instrument agents or SDKs. Otherwise, SkyWalking uses the name you defined in the underlying platform, such as Istio.\n  Service Instance Each workload in the Service group is called an instance. Like Pods in Kubernetes, it doesn\u0026rsquo;t need to be a single OS process. If you are using an instrument agent, an instance does map to one OS process.\n  Endpoint is a path in a certain service that handles incoming requests, such as HTTP paths or a gRPC service + method. Mesh telemetry and trace data are formatted as source objects (aka scope). These are the input for the aggregation, with the script describing how to aggregate, including input, conditions, and the resulting metric name.\n  Core Features The other core features in SkyWalking v6 are:\n Service, service instance, endpoint metrics analysis. Consistent visualization in Service Mesh and no mesh. Topology discovery, Service dependency analysis. Distributed tracing. Slow services and endpoints detected. Alarms.  Of course, SkyWalking has some more upgrades from v5, such as:\n ElasticSearch 6 as storage is supported. H2 storage implementor is back. Kubernetes cluster management is provided. You don’t need Zookeeper to keep the backend running in cluster mode. Totally new alarm core. Easier configuration. More cloud native style. MySQL will be supported in the next release.  Please: Test and Provide Feedback! We would love everyone to try to test our new version. You can find everything you need in our Apache repository,read the document for further details. You can contact the project team through the following channels:\n Submit an issue on GitHub repository Mailing list: dev@skywalking.apache.org . Send to dev-subscribe@kywalking.apache.org to subscribe the mail list. Gitter Project twitter  Oh, and one last thing! If you like our project, don\u0026rsquo;t forget to give us a star on GitHub.\n","excerpt":"Original link, Tetrate.io blog\nContext The integration of SkyWalking and Istio Service Mesh yields …","ref":"/blog/2018-12-12-skywalking-service-mesh-ready/","title":"SkyWalking v6 is Service Mesh ready"},{"body":"Based on his contributions to the project, he has been accepted as SkyWalking committer. Welcome aboard.\n","excerpt":"Based on his contributions to the project, he has been accepted as SkyWalking committer. Welcome …","ref":"/events/welcome-jian-tan-as-a-new-committer/","title":"Welcome Jian Tan as a new committer"},{"body":"APM consistently compatible in language agent(Java, .Net, NodeJS), 3rd party format(Zipkin) and service mesh telemetry(Istio). Go to downloads page to find release tars.\n","excerpt":"APM consistently compatible in language agent(Java, .Net, NodeJS), 3rd party format(Zipkin) and …","ref":"/events/release-apache-skywalking-6-0-0-alpha/","title":"Release Apache SkyWalking 6.0.0-alpha"},{"body":"A stable version of 5.x release. Go to downloads page to find release tars.\n","excerpt":"A stable version of 5.x release. Go to downloads page to find release tars.","ref":"/events/release-apache-skywalking-5-0-0-ga/","title":"Release Apache SkyWalking 5.0.0-GA"},{"body":"5.0.0-RC2 release. Go to downloads page to find release tars.\n","excerpt":"5.0.0-RC2 release. Go to downloads page to find release tars.","ref":"/events/release-apache-skywalking-5-0-0-rc2/","title":"Release Apache SkyWalking 5.0.0-RC2"},{"body":"5.0.0-beta2 release. Go to downloads page to find release tars.\n","excerpt":"5.0.0-beta2 release. Go to downloads page to find release tars.","ref":"/events/release-apache-skywalking-5-0-0-beta2/","title":"Release Apache SkyWalking 5.0.0-beta2"},{"body":"Translated by Sheng Wu.\nIn many big systems, distributed and especially microservice architectures become more and more popular. With the increase of modules and services, one incoming request could cross dozens of service. How to pinpoint the issues of the online system, and the bottleneck of the whole distributed system? This became a very important problem, which must be resolved.\nTo resolve the problems in distributed system, Google published the paper “Dapper, a Large-Scale Distributed Systems Tracing Infrastructure”, which mentioned the designs and ideas of building a distributed system. Many projects are inspired by it, created in the last 10 years. At 2015, Apache SkyWalking was created by Wu Sheng as a simple distributed system at first and open source. Through almost 3 years developments, at 2018, according to its 5.0.0-alpha/beta releases, it had already became a cool open source APM system for cloud native, container based system.\nAt the early of this year, I was trying to build the Butterfly open source APM in .NET Core, and that is when I met the Apache SkyWalking team and its creator. I decided to join them, and cooperate with them, to provide .NET Core agent native compatible with SkyWalking. At April, I released the first version .NET core agent 0.1.0. After several weeks interation, we released 0.2.0, for increasing the stability and adding HttpClient, Database driver supports.\nBefore we used .NET Core agent, we need to deploy SkyWalking collector, UI and ElasticSearch 5.x. You can download the release versions at here: http://skywalking.apache.org/downloads/ and follow the docs (Deploy-backend-in-standalone-mode, Deploy-backend-in-cluster-mode) to setup the backend.\nAt here, I are giving a quick start to represent, how to monitor a demo distributed .NET Core applications. I can say, that is easy.\n git clone https://github.com/OpenSkywalking/skywalking-netcore.git\n  cd skywalking-netcore\n  dotnet restore\n  dotnet run -p sample/SkyWalking.Sample.Backend dotnet run -p sample/SkyWalking.Sample.Frontend\n Now you can open http://localhost:5001/api/values to access the demo application. Then you can open SkyWalking WebUI http://localhost:8080\n  Overview of the whole distributed system   Topology of distributed system   Application view   Trace query   Span’s tags, logs and related traces   GitHub  Website: http://skywalking.apache.org/ SkyWalking Github Repo: https://github.com/apache/incubator-skywalking SkyWalking-NetCore Github Repo: https://github.com/OpenSkywalking/skywalking-netcore  ","excerpt":"Translated by Sheng Wu.\nIn many big systems, distributed and especially microservice architectures …","ref":"/blog/2018-05-24-skywalking-net/","title":"Apache SkyWalking provides open source APM and distributed tracing in .NET Core field"},{"body":"在大型网站系统设计中,随着分布式架构,特别是微服务架构的流行,我们将系统解耦成更小的单元,通过不断的添加新的、小的模块或者重用已经有的模块来构建复杂的系统。随着模块的不断增多,一次请求可能会涉及到十几个甚至几十个服务的协同处理,那么如何准确快速的定位到线上故障和性能瓶颈,便成为我们不得不面对的棘手问题。\n为解决分布式架构中复杂的服务定位和性能问题,Google 在论文《Dapper, a Large-Scale Distributed Systems Tracing Infrastructure》中提出了分布式跟踪系统的设计和构建思路。在这样的背景下,Apache SkyWalking 创建于 2015 年,参考 Dapper 论文实现分布式追踪功能,并逐渐进化为一个完整功能的 Application Performance Management 系统,用于追踪、监控和诊断大型分布式系统,尤其是容器和云原生下的微服务系统。\n今年初我在尝试使用.NET Core 构建分布式追踪系统 Butterfly 时接触到 SkyWalking 团队,开始和 SkyWalking 团队合作探索 SkyWalking 对.NET Core 的支持,并于 4 月发布 SkyWalking .NET Core 探针的 第一个版本,同时我也有幸加入 SkyWalking 团队共同进行 SkyWalking 在多语言生态的推动。在.NET Core 探针 v0.1 版本发布之后,得到了一些同学的尝鲜使用,也得到诸多改进的建议。经过几周的迭代,SkyWalking .NET Core 探针于今天发布 v0.2 release,在 v0.1 的基础上增加了\u0008稳定性和 HttpClient 及数据库驱动的追踪支持。\n在使用 SkyWalking 对.NET Core 应用追踪之前,我们需要先部署 SkyWalking Collector 收集分析 Trace 和 Elasticsearch 作为 Trace 数据存储。SkyWalking 支持 5.x 的 ES,所以我们需要下载安装对应版本的 ES,并配置 ES 的 cluster.name 为 CollectorDBCluster。然后部署 SkyWalking 5.0 beta 或更高版本 (下载地址:http://skywalking.apache.org/downloads/)。更详细的 Collector 部署文档,请参考 Deploy-backend-in-standalone-mode 和 Deploy-backend-in-cluster-mode。\n最后我们使用示例项目来演示在.NET Core 应用中使用 SkyWalking 进行追踪和监控,克隆 SkyWalking-NetCore 项目到本地:\ngit clone https://github.com/OpenSkywalking/skywalking-netcore.git 进入 skywalking-netcore 目录:\ncd skywalking-netcore 还原 nuget package:\ndotnet restore 启动示例项目:\ndotnet run -p sample/SkyWalking.Sample.Backend dotnet run -p sample/SkyWalking.Sample.Frontend 访问示例应用:\n打开 SkyWalking WebUI 即可看到我们的应用监控面板 http://localhost:8080\nDashboard 视图\nTopologyMap 视图\nApplication 视图\nTrace 视图\nTraceDetails 视图\nGitHub  SkyWalking Github Repo:https://github.com/apache/incubator-skywalking SkyWalking-NetCore Github Repo:https://github.com/OpenSkywalking/skywalking-netcore  ","excerpt":"在大型网站系统设计中,随着分布式架构,特别是微服务架构的流行,我们将系统解耦成更小的单元,通过不断的添加新的、小的模块或者重用已经有的模块来构建复杂的系统。随着模块的不断增多,一次请求可能会涉及到十几 …","ref":"/zh/2018-05-24-skywalking-net/","title":"Apache SkyWalking 为.NET Core带来开箱即用的分布式追踪和应用性能监控"},{"body":"","excerpt":"","ref":"/zh_tags/dotnetcore/","title":"DotNetCore"},{"body":"","excerpt":"","ref":"/tags/dotnetcore/","title":"DotNetCore"},{"body":"5.0.0-beta release. Go to downloads page to find release tars.\n","excerpt":"5.0.0-beta release. Go to downloads page to find release tars.","ref":"/events/release-apache-skywalking-5-0-0-beta/","title":"Release Apache SkyWalking 5.0.0-beta"},{"body":"5.0.0-alpha release. Go to downloads page to find release tars.\n","excerpt":"5.0.0-alpha release. Go to downloads page to find release tars.","ref":"/events/release-apache-skywalking-apm-5-0-0-alpha/","title":"Release Apache SkyWalking APM 5.0.0-alpha"},{"body":"","excerpt":"","ref":"/index.json","title":""},{"body":"10.0.0 Project  Support Java 21 runtime. Support oap-java21 image for Java 21 runtime. Upgrade OTEL collector version to 0.92.0 in all e2e tests. Switch CI macOS runner to m1. Upgrade PostgreSQL driver to 42.4.4 to fix CVE-2024-1597. Remove CLI(swctl) from the image. Remove CLI_VERSION variable from Makefile build. Add BanyanDB to docker-compose quickstart. Bump up Armeria, jackson, netty, jetcd and grpc to fix CVEs.  OAP Server  Add layer parameter to the global topology graphQL query. Add is_present function in MQE for check if the list metrics has a value or not. Remove unreasonable default configurations for gRPC thread executor. Remove gRPCThreadPoolQueueSize (SW_RECEIVER_GRPC_POOL_QUEUE_SIZE) configuration. Allow excluding ServiceEntries in some namespaces when looking up ServiceEntries as a final resolution method of service metadata. Set up the length of source and dest IDs in relation entities of service, instance, endpoint, and process to 250(was 200). Support build Service/Instance Hierarchy and query. Change the string field in Elasticsearch storage from keyword type to text type if it set more than 32766 length. [Break Change] Change the configuration field of ui_template and ui_menu in Elasticsearch storage from keyword type to text. Support Service Hierarchy auto matching, add auto matching layer relationships (upper -\u0026gt; lower) as following:  MESH -\u0026gt; MESH_DP MESH -\u0026gt; K8S_SERVICE MESH_DP -\u0026gt; K8S_SERVICE GENERAL -\u0026gt; K8S_SERVICE   Add namespace suffix for K8S_SERVICE_NAME_RULE/ISTIO_SERVICE_NAME_RULE and metadata-service-mapping.yaml as default. Allow using a dedicated port for ALS receiver. Fix log query by traceId in JDBCLogQueryDAO. Support handler eBPF access log protocol. Fix SumPerMinFunctionTest error function. Remove unnecessary annotations and functions from Meter Functions. Add max and min functions for MAL down sampling. Fix critical bug of uncontrolled memory cost of TopN statistics. Change topN group key from StorageId to entityId + timeBucket. Add Service Hierarchy auto matching layer relationships (upper -\u0026gt; lower) as following:  MYSQL -\u0026gt; K8S_SERVICE POSTGRESQL -\u0026gt; K8S_SERVICE SO11Y_OAP -\u0026gt; K8S_SERVICE VIRTUAL_DATABASE -\u0026gt; MYSQL VIRTUAL_DATABASE -\u0026gt; POSTGRESQL   Add Golang as a supported language for AMQP. Support available layers of service in the topology. Add count aggregation function for MAL Add Service Hierarchy auto matching layer relationships (upper -\u0026gt; lower) as following:  NGINX -\u0026gt; K8S_SERVICE APISIX -\u0026gt; K8S_SERVICE GENERAL -\u0026gt; APISIX   Add Golang as a supported language for RocketMQ. Support Apache RocketMQ server monitoring. Add Service Hierarchy auto matching layer relationships (upper -\u0026gt; lower) as following:  ROCKETMQ -\u0026gt; K8S_SERVICE VIRTUAL_MQ -\u0026gt; ROCKETMQ   Fix ServiceInstance in query. Mock /api/v1/status/buildinfo for PromQL API. Fix table exists check in the JDBC Storage Plugin. Fix day-based table rolling time range strategy in JDBC storage. Add maxInboundMessageSize (SW_DCS_MAX_INBOUND_MESSAGE_SIZE) configuration to change the max inbound message size of DCS. Fix Service Layer when building Events in the EventHookCallback. Add Golang as a supported language for Pulsar. Add Service Hierarchy auto matching layer relationships (upper -\u0026gt; lower) as following:  RABBITMQ -\u0026gt; K8S_SERVICE VIRTUAL_MQ -\u0026gt; RABBITMQ   Remove Column#function mechanism in the kernel. Make query readMetricValue always return the average value of the duration. Add Service Hierarchy auto matching layer relationships (upper -\u0026gt; lower) as following:  KAFKA -\u0026gt; K8S_SERVICE VIRTUAL_MQ -\u0026gt; KAFKA   Support ClickHouse server monitoring. Add Service Hierarchy auto matching layer relationships (upper -\u0026gt; lower) as following:  CLICKHOUSE -\u0026gt; K8S_SERVICE VIRTUAL_DATABASE -\u0026gt; CLICKHOUSE   Add Service Hierarchy auto matching layer relationships (upper -\u0026gt; lower) as following:  PULSAR -\u0026gt; K8S_SERVICE VIRTUAL_MQ -\u0026gt; PULSAR   Add Golang as a supported language for Kafka. Support displaying the port services listen to from OAP and UI during server start. Refactor data-generator to support generating metrics. Fix AvgHistogramPercentileFunction legacy name. [Break Change] Labeled Metrics support multiple labels.  Storage: store all label names and values instead of only the values. MQE:  Support querying by multiple labels(name and value) instead using _ as the anonymous label name. aggregate_labels function support aggregate by specific labels. relabels function require target label and rename label name and value.   PromQL:  Support querying by multiple labels(name and value) instead using lables as the anonymous label name. Remove general labels labels/relabels/label function. API /api/v1/labels and /api/v1/label/\u0026lt;label_name\u0026gt;/values support return matched metrics labels.   OAL:  Deprecate percentile function and introduce percentile2 function instead.     Bump up Kafka to fix CVE. Fix NullPointerException in Istio ServiceEntry registry. Remove unnecessary componentIds as series ID in the ServiceRelationClientSideMetrics and ServiceRelationServerSideMetrics entities. Fix not throw error when part of expression not matched any expression node in the MQE and `PromQL. Remove kafka-fetcher/default/createTopicIfNotExist as the creation is automatically since #7326 (v8.7.0). Fix inaccuracy nginx service metrics. Fix/Change Windows metrics name(Swap -\u0026gt; Virtual Memory)  memory_swap_free -\u0026gt; memory_virtual_memory_free memory_swap_total -\u0026gt; memory_virtual_memory_total memory_swap_percentage -\u0026gt; memory_virtual_memory_percentage   Fix/Change UI init setting for Windows Swap -\u0026gt; Virtual Memory Fix Memory Swap Usage/Virtual Memory Usage display with UI init.(Linux/Windows) Fix inaccurate APISIX metrics. Fix inaccurate MongoDB Metrics. Support Apache ActiveMQ server monitoring. Add Service Hierarchy auto matching layer relationships (upper -\u0026gt; lower) as following:  ACTIVEMQ -\u0026gt; K8S_SERVICE   Calculate Nginx service HTTP Latency by MQE. MQE query: make metadata not return null. MQE labeled metrics Binary Operation: return empty value if the labels not match rather than report error. Fix inaccurate Hierarchy of RabbitMQ Server monitoring metrics. Fix inaccurate MySQL/MariaDB, Redis, PostgreSQL metrics. Support DoubleValue,IntValue,BoolValue in OTEL metrics attributes. [Break Change] gGRPC metrics exporter unified the metric value type and support labeled metrics. Add component definition(ID=152) for c3p0(JDBC3 Connection and Statement Pooling). Fix MQE top_n global query. Fix inaccurate Pulsar and Bookkeeper metrics. MQE support sort_values and sort_label_values functions.  UI  Fix the mismatch between the unit and calculation of the \u0026ldquo;Network Bandwidth Usage\u0026rdquo; widget in Linux-Service Dashboard. Add theme change animation. Implement the Service and Instance hierarchy topology. Support Tabs in the widget visible when MQE expressions. Support search on Marketplace. Fix default route. Fix layout on the Log widget. Fix Trace associates with Log widget. Add isDefault to the dashboard configuration. Add expressions to dashboard configurations on the dashboard list page. Update Kubernetes related UI templates for adapt data from eBPF access log. Fix dashboard K8S-Service-Root metrics expression. Add dashboards for Service/Instance Hierarchy. Fix MQE in dashboards when using Card widget. Optimize tooltips style. Fix resizing window causes the trace graph to display incorrectly. Add the not found page(404). Enhance VNode logic and support multiple Trace IDs in span\u0026rsquo;s ref. Add the layers filed and associate layers dashboards for the service topology nodes. Fix Nginx-Instance metrics to instance level. Update tabs of the Kubernetes service page. Add Airflow menu i18n. Add Support for dragging in the trace panel. Add workflow icon. Metrics support multiple labels. Support the SINGLE_VALUE for table widgets. Remove the General metric mode and related logical code. Remove metrics for unreal nodes in the topology. Enhance the Trace widget for batch consuming spans. Clean the unused elements in the UI-templates.  Documentation  Update the release doc to remove the announcement as the tests are through e2e rather than manually. Update the release notification mail a little. Polish docs structure. Move customization docs separately from the introduction docs. Add webhook/gRPC hooks settings example for backend-alarm.md. Begin the process of SWIP - SkyWalking Improvement Proposal. Add SWIP-1 Create and detect Service Hierarchy Relationship. Add SWIP-2 Collecting and Gathering Kubernetes Monitoring Data. Update the Overview docs to add the Service Hierarchy Relationship section. Fix incorrect words for backend-bookkeeper-monitoring.md and backend-pulsar-monitoring.md Document a new way to load balance OAP. Add SWIP-3 Support RocketMQ monitoring. Add OpenTelemetry SkyWalking Exporter deprecated warning doc. Update i18n for rocketmq monitoring. Fix: remove click event after unmounted. Fix: end loading without query results. Update nanoid version to 3.3.7. Update postcss version to 8.4.33. Fix kafka topic name in exporter doc. Fix query-protocol.md, make it consistent with the GraphQL query protocol. Add SWIP-5 Support ClickHouse Monitoring. Remove OpenTelemetry Exporter support from meter doc, as this has been flagged as unmaintained on OTEL upstream. Add doc of one-line quick start script for different storage types. Add FAQ for Why is Clickhouse or Loki or xxx not supported as a storage option?. Add SWIP-8 Support ActiveMQ Monitoring.  All issues and pull requests are here\n","excerpt":"10.0.0 Project  Support Java 21 runtime. Support oap-java21 image for Java 21 runtime. Upgrade OTEL …","ref":"/docs/main/next/en/changes/changes/","title":"10.0.0"},{"body":"5.1.0 Agent Changes  Fix spring inherit issue in another way Fix classloader dead lock in jdk7+ - 5.x Support Spring mvc 5.x Support Spring webflux 5.x  Collector Changes  Fix too many open files. Fix the buffer file cannot delete.  5.0.0-GA Agent Changes  Add several package names ignore in agent settings. Classes in these packages would be enhanced, even plugin declared. Support Undertow 2.x plugin. Fix wrong class names of Motan plugin, not a feature related issue, just naming.  Collector Changes  Make buffer file handler close more safety. Fix NPE in AlarmService  Documentation  Fix compiling doc link. Update new live demo address.  5.0.0-RC2 Agent Changes  Support ActiveMQ 5.x Support RuntimeContext used out of TracingContext. Support Oracle ojdbc8 Plugin. Support ElasticSearch client transport 5.2-5.6 Plugin Support using agent.config with given path through system properties. Add a new way to transmit the Request and Response, to avoid bugs in Hytrix scenarios. Fix HTTPComponent client v4 operation name is empty. Fix 2 possible NPEs in Spring plugin. Fix a possible span leak in SpringMVC plugin. Fix NPE in Spring callback plugin.  Collector Changes  Add GZip support for Zipkin receiver. Add new component IDs for nodejs. Fix Zipkin span receiver may miss data in request. Optimize codes in heatmap calculation. Reduce unnecessary divide. Fix NPE in Alarm content generation. Fix the precision lost in ServiceNameService#startTimeMillis. Fix GC count is 0. Fix topology breaks when RPC client uses the async thread call.  UI Changes  Fix UI port can\u0026rsquo;t be set by startup script in Windows. Fix Topology self link error. Fix stack color mismatch label color in gc time chart.  Documentation  Add users list. Fix several document typo. Sync the Chinese documents. Add OpenAPM badge. Add icon/font documents to NOTICE files.  Issues and Pull requests\n5.0.0-beta2 UI -\u0026gt; Collector GraphQL query protocol  Add order and status in trace query.  Agent Changes  Add SOFA plugin. Add witness class for Kafka plugin. Add RuntimeContext in Context. Fix RuntimeContext fail in Tomcat plugin. Fix incompatible for getPropertyDescriptors in Spring core. Fix spymemcached plugin bug. Fix database URL parser bug. Fix StringIndexOutOfBoundsException when mysql jdbc url without databaseName。 Fix duplicate slash in Spring MVC plugin bug. Fix namespace bug. Fix NPE in Okhttp plugin when connect failed. FIx MalformedURLException in httpClientComponent plugin. Remove unused dependencies in Dubbo plugin. Remove gRPC timeout to avoid out of memory leak. Rewrite Async http client plugin. [Incubating] Add trace custom ignore optional plugin.  Collector Changes  Topology query optimization for more than 100 apps. Error rate alarm is not triggered. Tolerate unsupported segments. Support Integer Array, Long Array, String Array, Double Array in streaming data model. Support multiple entry span and multiple service name in one segment durtaion record. Use BulkProcessor to control the linear writing of data by multiple threads. Determine the log is enabled for the DEBUG level before printing message. Add static modifier to Logger. Add AspNet component. Filter inactive service in query. Support to query service based on Application. Fix RemoteDataMappingIdNotFoundException Exclude component-libaries.xml file in collector-*.jar, make sure it is in /conf only. Separate a single TTL in minute to in minute, hour, day, month metric and trace. Add order and status in trace query. Add folder lock to buffer folder. Modify operationName search from match to match_phrase. [Incubating] Add Zipkin span receiver. Support analysis Zipkin v1/v2 formats. [Incubating] Support sharding-sphere as storage implementor.  UI Changes  Support login and access control. Add new webapp.yml configuration file. Modify webapp startup script. Link to trace query from Thermodynamic graph Add application selector in service view. Add order and status in trace query.  Documentation  Add architecture design doc. Reformat deploy document. Adjust Tomcat deploy document. Remove all Apache licenses files in dist release packages. Update user cases. Update UI licenses. Add incubating sections in doc.  Issues and Pull requests\n5.0.0-beta UI -\u0026gt; Collector GraphQL query protocol  Replace all tps to throughput/cpm(calls per min) Add getThermodynamic service Update version to beta  Agent Changes  Support TLS. Support namespace. Support direct link. Support token. Add across thread toolkit. Add new plugin extend machenism to override agent core implementations. Fix an agent start up sequence bug. Fix wrong gc count. Remove system env override. Add Spring AOP aspect patch to avoid aop conflicts.  Collector Changes  Trace query based on timeline. Delete JVM aggregation in second. Support TLS. Support namespace. Support token auth. Group and aggregate requests based on response time and timeline, support Thermodynamic chart query Support component librariy setting through yml file for better extendibility. Optimize performance. Support short column name in ES or other storage implementor. Add a new cache module implementor, based on Caffeine. Support system property override settings. Refactor settings initialization. Provide collector instrumentation agent. Support .NET core component libraries. Fix divide zero in query. Fix Data don't remove as expected in ES implementor. Add some checks in collector modulization core. Add some test cases.  UI Changes  New trace query UI. New Application UI, merge server tab(removed) into application as sub page. New Topology UI. New response time / throughput TopN list. Add Thermodynamic chart in overview page. Change all tps to cpm(calls per minutes). Fix wrong osName in server view. Fix wrong startTime in trace view. Fix some icons internet requirements.  Documentation  Add TLS document. Add namespace document. Add direct link document. Add token document. Add across thread toolkit document. Add a FAQ about, Agent or collector version upgrade. Sync all English document to Chinese.  Issues and Pull requests\n5.0.0-alpha Agent -\u0026gt; Collector protocol  Remove C++ keywords Move Ref into Span from Segment Add span type, when register an operation  UI -\u0026gt; Collector GraphQL query protocol  First version protocol  Agent Changes  Support gRPC 1.x plugin Support kafka 0.11 and 1.x plugin Support ServiceComb 0.x plugin Support optional plugin mechanism. Support Spring 3.x and 4.x bean annotation optional plugin Support Apache httpcomponent AsyncClient 4.x plugin Provide automatic agent daily tests, and release reports here. Refactor Postgresql, Oracle, MySQL plugin for compatible. Fix jetty client 9 plugin error Fix async APIs of okhttp plugin error Fix log config didn\u0026rsquo;t work Fix a class loader error in okhttp plugin  Collector Changes  Support metrics analysis and aggregation for application, application instance and service in minute, hour, day and month. Support new GraphQL query protocol Support alarm Provide a prototype instrument for collector. Support node speculate in cluster and application topology. (Provider Node -\u0026gt; Consumer Node) -\u0026gt; (Provider Node -\u0026gt; MQ Server -\u0026gt; Consumer Node)  UI Changes  New 5.0.0 UI!!!  Issues and Pull requests\n","excerpt":"5.1.0 Agent Changes  Fix spring inherit issue in another way Fix classloader dead lock in jdk7+ - …","ref":"/docs/main/latest/en/changes/changes-5.x/","title":"5.1.0"},{"body":"5.1.0 Agent Changes  Fix spring inherit issue in another way Fix classloader dead lock in jdk7+ - 5.x Support Spring mvc 5.x Support Spring webflux 5.x  Collector Changes  Fix too many open files. Fix the buffer file cannot delete.  5.0.0-GA Agent Changes  Add several package names ignore in agent settings. Classes in these packages would be enhanced, even plugin declared. Support Undertow 2.x plugin. Fix wrong class names of Motan plugin, not a feature related issue, just naming.  Collector Changes  Make buffer file handler close more safety. Fix NPE in AlarmService  Documentation  Fix compiling doc link. Update new live demo address.  5.0.0-RC2 Agent Changes  Support ActiveMQ 5.x Support RuntimeContext used out of TracingContext. Support Oracle ojdbc8 Plugin. Support ElasticSearch client transport 5.2-5.6 Plugin Support using agent.config with given path through system properties. Add a new way to transmit the Request and Response, to avoid bugs in Hytrix scenarios. Fix HTTPComponent client v4 operation name is empty. Fix 2 possible NPEs in Spring plugin. Fix a possible span leak in SpringMVC plugin. Fix NPE in Spring callback plugin.  Collector Changes  Add GZip support for Zipkin receiver. Add new component IDs for nodejs. Fix Zipkin span receiver may miss data in request. Optimize codes in heatmap calculation. Reduce unnecessary divide. Fix NPE in Alarm content generation. Fix the precision lost in ServiceNameService#startTimeMillis. Fix GC count is 0. Fix topology breaks when RPC client uses the async thread call.  UI Changes  Fix UI port can\u0026rsquo;t be set by startup script in Windows. Fix Topology self link error. Fix stack color mismatch label color in gc time chart.  Documentation  Add users list. Fix several document typo. Sync the Chinese documents. Add OpenAPM badge. Add icon/font documents to NOTICE files.  Issues and Pull requests\n5.0.0-beta2 UI -\u0026gt; Collector GraphQL query protocol  Add order and status in trace query.  Agent Changes  Add SOFA plugin. Add witness class for Kafka plugin. Add RuntimeContext in Context. Fix RuntimeContext fail in Tomcat plugin. Fix incompatible for getPropertyDescriptors in Spring core. Fix spymemcached plugin bug. Fix database URL parser bug. Fix StringIndexOutOfBoundsException when mysql jdbc url without databaseName。 Fix duplicate slash in Spring MVC plugin bug. Fix namespace bug. Fix NPE in Okhttp plugin when connect failed. FIx MalformedURLException in httpClientComponent plugin. Remove unused dependencies in Dubbo plugin. Remove gRPC timeout to avoid out of memory leak. Rewrite Async http client plugin. [Incubating] Add trace custom ignore optional plugin.  Collector Changes  Topology query optimization for more than 100 apps. Error rate alarm is not triggered. Tolerate unsupported segments. Support Integer Array, Long Array, String Array, Double Array in streaming data model. Support multiple entry span and multiple service name in one segment durtaion record. Use BulkProcessor to control the linear writing of data by multiple threads. Determine the log is enabled for the DEBUG level before printing message. Add static modifier to Logger. Add AspNet component. Filter inactive service in query. Support to query service based on Application. Fix RemoteDataMappingIdNotFoundException Exclude component-libaries.xml file in collector-*.jar, make sure it is in /conf only. Separate a single TTL in minute to in minute, hour, day, month metric and trace. Add order and status in trace query. Add folder lock to buffer folder. Modify operationName search from match to match_phrase. [Incubating] Add Zipkin span receiver. Support analysis Zipkin v1/v2 formats. [Incubating] Support sharding-sphere as storage implementor.  UI Changes  Support login and access control. Add new webapp.yml configuration file. Modify webapp startup script. Link to trace query from Thermodynamic graph Add application selector in service view. Add order and status in trace query.  Documentation  Add architecture design doc. Reformat deploy document. Adjust Tomcat deploy document. Remove all Apache licenses files in dist release packages. Update user cases. Update UI licenses. Add incubating sections in doc.  Issues and Pull requests\n5.0.0-beta UI -\u0026gt; Collector GraphQL query protocol  Replace all tps to throughput/cpm(calls per min) Add getThermodynamic service Update version to beta  Agent Changes  Support TLS. Support namespace. Support direct link. Support token. Add across thread toolkit. Add new plugin extend machenism to override agent core implementations. Fix an agent start up sequence bug. Fix wrong gc count. Remove system env override. Add Spring AOP aspect patch to avoid aop conflicts.  Collector Changes  Trace query based on timeline. Delete JVM aggregation in second. Support TLS. Support namespace. Support token auth. Group and aggregate requests based on response time and timeline, support Thermodynamic chart query Support component librariy setting through yml file for better extendibility. Optimize performance. Support short column name in ES or other storage implementor. Add a new cache module implementor, based on Caffeine. Support system property override settings. Refactor settings initialization. Provide collector instrumentation agent. Support .NET core component libraries. Fix divide zero in query. Fix Data don't remove as expected in ES implementor. Add some checks in collector modulization core. Add some test cases.  UI Changes  New trace query UI. New Application UI, merge server tab(removed) into application as sub page. New Topology UI. New response time / throughput TopN list. Add Thermodynamic chart in overview page. Change all tps to cpm(calls per minutes). Fix wrong osName in server view. Fix wrong startTime in trace view. Fix some icons internet requirements.  Documentation  Add TLS document. Add namespace document. Add direct link document. Add token document. Add across thread toolkit document. Add a FAQ about, Agent or collector version upgrade. Sync all English document to Chinese.  Issues and Pull requests\n5.0.0-alpha Agent -\u0026gt; Collector protocol  Remove C++ keywords Move Ref into Span from Segment Add span type, when register an operation  UI -\u0026gt; Collector GraphQL query protocol  First version protocol  Agent Changes  Support gRPC 1.x plugin Support kafka 0.11 and 1.x plugin Support ServiceComb 0.x plugin Support optional plugin mechanism. Support Spring 3.x and 4.x bean annotation optional plugin Support Apache httpcomponent AsyncClient 4.x plugin Provide automatic agent daily tests, and release reports here. Refactor Postgresql, Oracle, MySQL plugin for compatible. Fix jetty client 9 plugin error Fix async APIs of okhttp plugin error Fix log config didn\u0026rsquo;t work Fix a class loader error in okhttp plugin  Collector Changes  Support metrics analysis and aggregation for application, application instance and service in minute, hour, day and month. Support new GraphQL query protocol Support alarm Provide a prototype instrument for collector. Support node speculate in cluster and application topology. (Provider Node -\u0026gt; Consumer Node) -\u0026gt; (Provider Node -\u0026gt; MQ Server -\u0026gt; Consumer Node)  UI Changes  New 5.0.0 UI!!!  Issues and Pull requests\n","excerpt":"5.1.0 Agent Changes  Fix spring inherit issue in another way Fix classloader dead lock in jdk7+ - …","ref":"/docs/main/next/en/changes/changes-5.x/","title":"5.1.0"},{"body":"5.1.0 Agent Changes  Fix spring inherit issue in another way Fix classloader dead lock in jdk7+ - 5.x Support Spring mvc 5.x Support Spring webflux 5.x  Collector Changes  Fix too many open files. Fix the buffer file cannot delete.  5.0.0-GA Agent Changes  Add several package names ignore in agent settings. Classes in these packages would be enhanced, even plugin declared. Support Undertow 2.x plugin. Fix wrong class names of Motan plugin, not a feature related issue, just naming.  Collector Changes  Make buffer file handler close more safety. Fix NPE in AlarmService  Documentation  Fix compiling doc link. Update new live demo address.  5.0.0-RC2 Agent Changes  Support ActiveMQ 5.x Support RuntimeContext used out of TracingContext. Support Oracle ojdbc8 Plugin. Support ElasticSearch client transport 5.2-5.6 Plugin Support using agent.config with given path through system properties. Add a new way to transmit the Request and Response, to avoid bugs in Hytrix scenarios. Fix HTTPComponent client v4 operation name is empty. Fix 2 possible NPEs in Spring plugin. Fix a possible span leak in SpringMVC plugin. Fix NPE in Spring callback plugin.  Collector Changes  Add GZip support for Zipkin receiver. Add new component IDs for nodejs. Fix Zipkin span receiver may miss data in request. Optimize codes in heatmap calculation. Reduce unnecessary divide. Fix NPE in Alarm content generation. Fix the precision lost in ServiceNameService#startTimeMillis. Fix GC count is 0. Fix topology breaks when RPC client uses the async thread call.  UI Changes  Fix UI port can\u0026rsquo;t be set by startup script in Windows. Fix Topology self link error. Fix stack color mismatch label color in gc time chart.  Documentation  Add users list. Fix several document typo. Sync the Chinese documents. Add OpenAPM badge. Add icon/font documents to NOTICE files.  Issues and Pull requests\n5.0.0-beta2 UI -\u0026gt; Collector GraphQL query protocol  Add order and status in trace query.  Agent Changes  Add SOFA plugin. Add witness class for Kafka plugin. Add RuntimeContext in Context. Fix RuntimeContext fail in Tomcat plugin. Fix incompatible for getPropertyDescriptors in Spring core. Fix spymemcached plugin bug. Fix database URL parser bug. Fix StringIndexOutOfBoundsException when mysql jdbc url without databaseName。 Fix duplicate slash in Spring MVC plugin bug. Fix namespace bug. Fix NPE in Okhttp plugin when connect failed. FIx MalformedURLException in httpClientComponent plugin. Remove unused dependencies in Dubbo plugin. Remove gRPC timeout to avoid out of memory leak. Rewrite Async http client plugin. [Incubating] Add trace custom ignore optional plugin.  Collector Changes  Topology query optimization for more than 100 apps. Error rate alarm is not triggered. Tolerate unsupported segments. Support Integer Array, Long Array, String Array, Double Array in streaming data model. Support multiple entry span and multiple service name in one segment durtaion record. Use BulkProcessor to control the linear writing of data by multiple threads. Determine the log is enabled for the DEBUG level before printing message. Add static modifier to Logger. Add AspNet component. Filter inactive service in query. Support to query service based on Application. Fix RemoteDataMappingIdNotFoundException Exclude component-libaries.xml file in collector-*.jar, make sure it is in /conf only. Separate a single TTL in minute to in minute, hour, day, month metric and trace. Add order and status in trace query. Add folder lock to buffer folder. Modify operationName search from match to match_phrase. [Incubating] Add Zipkin span receiver. Support analysis Zipkin v1/v2 formats. [Incubating] Support sharding-sphere as storage implementor.  UI Changes  Support login and access control. Add new webapp.yml configuration file. Modify webapp startup script. Link to trace query from Thermodynamic graph Add application selector in service view. Add order and status in trace query.  Documentation  Add architecture design doc. Reformat deploy document. Adjust Tomcat deploy document. Remove all Apache licenses files in dist release packages. Update user cases. Update UI licenses. Add incubating sections in doc.  Issues and Pull requests\n5.0.0-beta UI -\u0026gt; Collector GraphQL query protocol  Replace all tps to throughput/cpm(calls per min) Add getThermodynamic service Update version to beta  Agent Changes  Support TLS. Support namespace. Support direct link. Support token. Add across thread toolkit. Add new plugin extend machenism to override agent core implementations. Fix an agent start up sequence bug. Fix wrong gc count. Remove system env override. Add Spring AOP aspect patch to avoid aop conflicts.  Collector Changes  Trace query based on timeline. Delete JVM aggregation in second. Support TLS. Support namespace. Support token auth. Group and aggregate requests based on response time and timeline, support Thermodynamic chart query Support component librariy setting through yml file for better extendibility. Optimize performance. Support short column name in ES or other storage implementor. Add a new cache module implementor, based on Caffeine. Support system property override settings. Refactor settings initialization. Provide collector instrumentation agent. Support .NET core component libraries. Fix divide zero in query. Fix Data don't remove as expected in ES implementor. Add some checks in collector modulization core. Add some test cases.  UI Changes  New trace query UI. New Application UI, merge server tab(removed) into application as sub page. New Topology UI. New response time / throughput TopN list. Add Thermodynamic chart in overview page. Change all tps to cpm(calls per minutes). Fix wrong osName in server view. Fix wrong startTime in trace view. Fix some icons internet requirements.  Documentation  Add TLS document. Add namespace document. Add direct link document. Add token document. Add across thread toolkit document. Add a FAQ about, Agent or collector version upgrade. Sync all English document to Chinese.  Issues and Pull requests\n5.0.0-alpha Agent -\u0026gt; Collector protocol  Remove C++ keywords Move Ref into Span from Segment Add span type, when register an operation  UI -\u0026gt; Collector GraphQL query protocol  First version protocol  Agent Changes  Support gRPC 1.x plugin Support kafka 0.11 and 1.x plugin Support ServiceComb 0.x plugin Support optional plugin mechanism. Support Spring 3.x and 4.x bean annotation optional plugin Support Apache httpcomponent AsyncClient 4.x plugin Provide automatic agent daily tests, and release reports here. Refactor Postgresql, Oracle, MySQL plugin for compatible. Fix jetty client 9 plugin error Fix async APIs of okhttp plugin error Fix log config didn\u0026rsquo;t work Fix a class loader error in okhttp plugin  Collector Changes  Support metrics analysis and aggregation for application, application instance and service in minute, hour, day and month. Support new GraphQL query protocol Support alarm Provide a prototype instrument for collector. Support node speculate in cluster and application topology. (Provider Node -\u0026gt; Consumer Node) -\u0026gt; (Provider Node -\u0026gt; MQ Server -\u0026gt; Consumer Node)  UI Changes  New 5.0.0 UI!!!  Issues and Pull requests\n","excerpt":"5.1.0 Agent Changes  Fix spring inherit issue in another way Fix classloader dead lock in jdk7+ - …","ref":"/docs/main/v9.1.0/en/changes/changes-5.x/","title":"5.1.0"},{"body":"5.1.0 Agent Changes  Fix spring inherit issue in another way Fix classloader dead lock in jdk7+ - 5.x Support Spring mvc 5.x Support Spring webflux 5.x  Collector Changes  Fix too many open files. Fix the buffer file cannot delete.  5.0.0-GA Agent Changes  Add several package names ignore in agent settings. Classes in these packages would be enhanced, even plugin declared. Support Undertow 2.x plugin. Fix wrong class names of Motan plugin, not a feature related issue, just naming.  Collector Changes  Make buffer file handler close more safety. Fix NPE in AlarmService  Documentation  Fix compiling doc link. Update new live demo address.  5.0.0-RC2 Agent Changes  Support ActiveMQ 5.x Support RuntimeContext used out of TracingContext. Support Oracle ojdbc8 Plugin. Support ElasticSearch client transport 5.2-5.6 Plugin Support using agent.config with given path through system properties. Add a new way to transmit the Request and Response, to avoid bugs in Hytrix scenarios. Fix HTTPComponent client v4 operation name is empty. Fix 2 possible NPEs in Spring plugin. Fix a possible span leak in SpringMVC plugin. Fix NPE in Spring callback plugin.  Collector Changes  Add GZip support for Zipkin receiver. Add new component IDs for nodejs. Fix Zipkin span receiver may miss data in request. Optimize codes in heatmap calculation. Reduce unnecessary divide. Fix NPE in Alarm content generation. Fix the precision lost in ServiceNameService#startTimeMillis. Fix GC count is 0. Fix topology breaks when RPC client uses the async thread call.  UI Changes  Fix UI port can\u0026rsquo;t be set by startup script in Windows. Fix Topology self link error. Fix stack color mismatch label color in gc time chart.  Documentation  Add users list. Fix several document typo. Sync the Chinese documents. Add OpenAPM badge. Add icon/font documents to NOTICE files.  Issues and Pull requests\n5.0.0-beta2 UI -\u0026gt; Collector GraphQL query protocol  Add order and status in trace query.  Agent Changes  Add SOFA plugin. Add witness class for Kafka plugin. Add RuntimeContext in Context. Fix RuntimeContext fail in Tomcat plugin. Fix incompatible for getPropertyDescriptors in Spring core. Fix spymemcached plugin bug. Fix database URL parser bug. Fix StringIndexOutOfBoundsException when mysql jdbc url without databaseName。 Fix duplicate slash in Spring MVC plugin bug. Fix namespace bug. Fix NPE in Okhttp plugin when connect failed. FIx MalformedURLException in httpClientComponent plugin. Remove unused dependencies in Dubbo plugin. Remove gRPC timeout to avoid out of memory leak. Rewrite Async http client plugin. [Incubating] Add trace custom ignore optional plugin.  Collector Changes  Topology query optimization for more than 100 apps. Error rate alarm is not triggered. Tolerate unsupported segments. Support Integer Array, Long Array, String Array, Double Array in streaming data model. Support multiple entry span and multiple service name in one segment durtaion record. Use BulkProcessor to control the linear writing of data by multiple threads. Determine the log is enabled for the DEBUG level before printing message. Add static modifier to Logger. Add AspNet component. Filter inactive service in query. Support to query service based on Application. Fix RemoteDataMappingIdNotFoundException Exclude component-libaries.xml file in collector-*.jar, make sure it is in /conf only. Separate a single TTL in minute to in minute, hour, day, month metric and trace. Add order and status in trace query. Add folder lock to buffer folder. Modify operationName search from match to match_phrase. [Incubating] Add Zipkin span receiver. Support analysis Zipkin v1/v2 formats. [Incubating] Support sharding-sphere as storage implementor.  UI Changes  Support login and access control. Add new webapp.yml configuration file. Modify webapp startup script. Link to trace query from Thermodynamic graph Add application selector in service view. Add order and status in trace query.  Documentation  Add architecture design doc. Reformat deploy document. Adjust Tomcat deploy document. Remove all Apache licenses files in dist release packages. Update user cases. Update UI licenses. Add incubating sections in doc.  Issues and Pull requests\n5.0.0-beta UI -\u0026gt; Collector GraphQL query protocol  Replace all tps to throughput/cpm(calls per min) Add getThermodynamic service Update version to beta  Agent Changes  Support TLS. Support namespace. Support direct link. Support token. Add across thread toolkit. Add new plugin extend machenism to override agent core implementations. Fix an agent start up sequence bug. Fix wrong gc count. Remove system env override. Add Spring AOP aspect patch to avoid aop conflicts.  Collector Changes  Trace query based on timeline. Delete JVM aggregation in second. Support TLS. Support namespace. Support token auth. Group and aggregate requests based on response time and timeline, support Thermodynamic chart query Support component librariy setting through yml file for better extendibility. Optimize performance. Support short column name in ES or other storage implementor. Add a new cache module implementor, based on Caffeine. Support system property override settings. Refactor settings initialization. Provide collector instrumentation agent. Support .NET core component libraries. Fix divide zero in query. Fix Data don't remove as expected in ES implementor. Add some checks in collector modulization core. Add some test cases.  UI Changes  New trace query UI. New Application UI, merge server tab(removed) into application as sub page. New Topology UI. New response time / throughput TopN list. Add Thermodynamic chart in overview page. Change all tps to cpm(calls per minutes). Fix wrong osName in server view. Fix wrong startTime in trace view. Fix some icons internet requirements.  Documentation  Add TLS document. Add namespace document. Add direct link document. Add token document. Add across thread toolkit document. Add a FAQ about, Agent or collector version upgrade. Sync all English document to Chinese.  Issues and Pull requests\n5.0.0-alpha Agent -\u0026gt; Collector protocol  Remove C++ keywords Move Ref into Span from Segment Add span type, when register an operation  UI -\u0026gt; Collector GraphQL query protocol  First version protocol  Agent Changes  Support gRPC 1.x plugin Support kafka 0.11 and 1.x plugin Support ServiceComb 0.x plugin Support optional plugin mechanism. Support Spring 3.x and 4.x bean annotation optional plugin Support Apache httpcomponent AsyncClient 4.x plugin Provide automatic agent daily tests, and release reports here. Refactor Postgresql, Oracle, MySQL plugin for compatible. Fix jetty client 9 plugin error Fix async APIs of okhttp plugin error Fix log config didn\u0026rsquo;t work Fix a class loader error in okhttp plugin  Collector Changes  Support metrics analysis and aggregation for application, application instance and service in minute, hour, day and month. Support new GraphQL query protocol Support alarm Provide a prototype instrument for collector. Support node speculate in cluster and application topology. (Provider Node -\u0026gt; Consumer Node) -\u0026gt; (Provider Node -\u0026gt; MQ Server -\u0026gt; Consumer Node)  UI Changes  New 5.0.0 UI!!!  Issues and Pull requests\n","excerpt":"5.1.0 Agent Changes  Fix spring inherit issue in another way Fix classloader dead lock in jdk7+ - …","ref":"/docs/main/v9.2.0/en/changes/changes-5.x/","title":"5.1.0"},{"body":"5.1.0 Agent Changes  Fix spring inherit issue in another way Fix classloader dead lock in jdk7+ - 5.x Support Spring mvc 5.x Support Spring webflux 5.x  Collector Changes  Fix too many open files. Fix the buffer file cannot delete.  5.0.0-GA Agent Changes  Add several package names ignore in agent settings. Classes in these packages would be enhanced, even plugin declared. Support Undertow 2.x plugin. Fix wrong class names of Motan plugin, not a feature related issue, just naming.  Collector Changes  Make buffer file handler close more safety. Fix NPE in AlarmService  Documentation  Fix compiling doc link. Update new live demo address.  5.0.0-RC2 Agent Changes  Support ActiveMQ 5.x Support RuntimeContext used out of TracingContext. Support Oracle ojdbc8 Plugin. Support ElasticSearch client transport 5.2-5.6 Plugin Support using agent.config with given path through system properties. Add a new way to transmit the Request and Response, to avoid bugs in Hytrix scenarios. Fix HTTPComponent client v4 operation name is empty. Fix 2 possible NPEs in Spring plugin. Fix a possible span leak in SpringMVC plugin. Fix NPE in Spring callback plugin.  Collector Changes  Add GZip support for Zipkin receiver. Add new component IDs for nodejs. Fix Zipkin span receiver may miss data in request. Optimize codes in heatmap calculation. Reduce unnecessary divide. Fix NPE in Alarm content generation. Fix the precision lost in ServiceNameService#startTimeMillis. Fix GC count is 0. Fix topology breaks when RPC client uses the async thread call.  UI Changes  Fix UI port can\u0026rsquo;t be set by startup script in Windows. Fix Topology self link error. Fix stack color mismatch label color in gc time chart.  Documentation  Add users list. Fix several document typo. Sync the Chinese documents. Add OpenAPM badge. Add icon/font documents to NOTICE files.  Issues and Pull requests\n5.0.0-beta2 UI -\u0026gt; Collector GraphQL query protocol  Add order and status in trace query.  Agent Changes  Add SOFA plugin. Add witness class for Kafka plugin. Add RuntimeContext in Context. Fix RuntimeContext fail in Tomcat plugin. Fix incompatible for getPropertyDescriptors in Spring core. Fix spymemcached plugin bug. Fix database URL parser bug. Fix StringIndexOutOfBoundsException when mysql jdbc url without databaseName。 Fix duplicate slash in Spring MVC plugin bug. Fix namespace bug. Fix NPE in Okhttp plugin when connect failed. FIx MalformedURLException in httpClientComponent plugin. Remove unused dependencies in Dubbo plugin. Remove gRPC timeout to avoid out of memory leak. Rewrite Async http client plugin. [Incubating] Add trace custom ignore optional plugin.  Collector Changes  Topology query optimization for more than 100 apps. Error rate alarm is not triggered. Tolerate unsupported segments. Support Integer Array, Long Array, String Array, Double Array in streaming data model. Support multiple entry span and multiple service name in one segment durtaion record. Use BulkProcessor to control the linear writing of data by multiple threads. Determine the log is enabled for the DEBUG level before printing message. Add static modifier to Logger. Add AspNet component. Filter inactive service in query. Support to query service based on Application. Fix RemoteDataMappingIdNotFoundException Exclude component-libaries.xml file in collector-*.jar, make sure it is in /conf only. Separate a single TTL in minute to in minute, hour, day, month metric and trace. Add order and status in trace query. Add folder lock to buffer folder. Modify operationName search from match to match_phrase. [Incubating] Add Zipkin span receiver. Support analysis Zipkin v1/v2 formats. [Incubating] Support sharding-sphere as storage implementor.  UI Changes  Support login and access control. Add new webapp.yml configuration file. Modify webapp startup script. Link to trace query from Thermodynamic graph Add application selector in service view. Add order and status in trace query.  Documentation  Add architecture design doc. Reformat deploy document. Adjust Tomcat deploy document. Remove all Apache licenses files in dist release packages. Update user cases. Update UI licenses. Add incubating sections in doc.  Issues and Pull requests\n5.0.0-beta UI -\u0026gt; Collector GraphQL query protocol  Replace all tps to throughput/cpm(calls per min) Add getThermodynamic service Update version to beta  Agent Changes  Support TLS. Support namespace. Support direct link. Support token. Add across thread toolkit. Add new plugin extend machenism to override agent core implementations. Fix an agent start up sequence bug. Fix wrong gc count. Remove system env override. Add Spring AOP aspect patch to avoid aop conflicts.  Collector Changes  Trace query based on timeline. Delete JVM aggregation in second. Support TLS. Support namespace. Support token auth. Group and aggregate requests based on response time and timeline, support Thermodynamic chart query Support component librariy setting through yml file for better extendibility. Optimize performance. Support short column name in ES or other storage implementor. Add a new cache module implementor, based on Caffeine. Support system property override settings. Refactor settings initialization. Provide collector instrumentation agent. Support .NET core component libraries. Fix divide zero in query. Fix Data don't remove as expected in ES implementor. Add some checks in collector modulization core. Add some test cases.  UI Changes  New trace query UI. New Application UI, merge server tab(removed) into application as sub page. New Topology UI. New response time / throughput TopN list. Add Thermodynamic chart in overview page. Change all tps to cpm(calls per minutes). Fix wrong osName in server view. Fix wrong startTime in trace view. Fix some icons internet requirements.  Documentation  Add TLS document. Add namespace document. Add direct link document. Add token document. Add across thread toolkit document. Add a FAQ about, Agent or collector version upgrade. Sync all English document to Chinese.  Issues and Pull requests\n5.0.0-alpha Agent -\u0026gt; Collector protocol  Remove C++ keywords Move Ref into Span from Segment Add span type, when register an operation  UI -\u0026gt; Collector GraphQL query protocol  First version protocol  Agent Changes  Support gRPC 1.x plugin Support kafka 0.11 and 1.x plugin Support ServiceComb 0.x plugin Support optional plugin mechanism. Support Spring 3.x and 4.x bean annotation optional plugin Support Apache httpcomponent AsyncClient 4.x plugin Provide automatic agent daily tests, and release reports here. Refactor Postgresql, Oracle, MySQL plugin for compatible. Fix jetty client 9 plugin error Fix async APIs of okhttp plugin error Fix log config didn\u0026rsquo;t work Fix a class loader error in okhttp plugin  Collector Changes  Support metrics analysis and aggregation for application, application instance and service in minute, hour, day and month. Support new GraphQL query protocol Support alarm Provide a prototype instrument for collector. Support node speculate in cluster and application topology. (Provider Node -\u0026gt; Consumer Node) -\u0026gt; (Provider Node -\u0026gt; MQ Server -\u0026gt; Consumer Node)  UI Changes  New 5.0.0 UI!!!  Issues and Pull requests\n","excerpt":"5.1.0 Agent Changes  Fix spring inherit issue in another way Fix classloader dead lock in jdk7+ - …","ref":"/docs/main/v9.3.0/en/changes/changes-5.x/","title":"5.1.0"},{"body":"5.1.0 Agent Changes  Fix spring inherit issue in another way Fix classloader dead lock in jdk7+ - 5.x Support Spring mvc 5.x Support Spring webflux 5.x  Collector Changes  Fix too many open files. Fix the buffer file cannot delete.  5.0.0-GA Agent Changes  Add several package names ignore in agent settings. Classes in these packages would be enhanced, even plugin declared. Support Undertow 2.x plugin. Fix wrong class names of Motan plugin, not a feature related issue, just naming.  Collector Changes  Make buffer file handler close more safety. Fix NPE in AlarmService  Documentation  Fix compiling doc link. Update new live demo address.  5.0.0-RC2 Agent Changes  Support ActiveMQ 5.x Support RuntimeContext used out of TracingContext. Support Oracle ojdbc8 Plugin. Support ElasticSearch client transport 5.2-5.6 Plugin Support using agent.config with given path through system properties. Add a new way to transmit the Request and Response, to avoid bugs in Hytrix scenarios. Fix HTTPComponent client v4 operation name is empty. Fix 2 possible NPEs in Spring plugin. Fix a possible span leak in SpringMVC plugin. Fix NPE in Spring callback plugin.  Collector Changes  Add GZip support for Zipkin receiver. Add new component IDs for nodejs. Fix Zipkin span receiver may miss data in request. Optimize codes in heatmap calculation. Reduce unnecessary divide. Fix NPE in Alarm content generation. Fix the precision lost in ServiceNameService#startTimeMillis. Fix GC count is 0. Fix topology breaks when RPC client uses the async thread call.  UI Changes  Fix UI port can\u0026rsquo;t be set by startup script in Windows. Fix Topology self link error. Fix stack color mismatch label color in gc time chart.  Documentation  Add users list. Fix several document typo. Sync the Chinese documents. Add OpenAPM badge. Add icon/font documents to NOTICE files.  Issues and Pull requests\n5.0.0-beta2 UI -\u0026gt; Collector GraphQL query protocol  Add order and status in trace query.  Agent Changes  Add SOFA plugin. Add witness class for Kafka plugin. Add RuntimeContext in Context. Fix RuntimeContext fail in Tomcat plugin. Fix incompatible for getPropertyDescriptors in Spring core. Fix spymemcached plugin bug. Fix database URL parser bug. Fix StringIndexOutOfBoundsException when mysql jdbc url without databaseName。 Fix duplicate slash in Spring MVC plugin bug. Fix namespace bug. Fix NPE in Okhttp plugin when connect failed. FIx MalformedURLException in httpClientComponent plugin. Remove unused dependencies in Dubbo plugin. Remove gRPC timeout to avoid out of memory leak. Rewrite Async http client plugin. [Incubating] Add trace custom ignore optional plugin.  Collector Changes  Topology query optimization for more than 100 apps. Error rate alarm is not triggered. Tolerate unsupported segments. Support Integer Array, Long Array, String Array, Double Array in streaming data model. Support multiple entry span and multiple service name in one segment durtaion record. Use BulkProcessor to control the linear writing of data by multiple threads. Determine the log is enabled for the DEBUG level before printing message. Add static modifier to Logger. Add AspNet component. Filter inactive service in query. Support to query service based on Application. Fix RemoteDataMappingIdNotFoundException Exclude component-libaries.xml file in collector-*.jar, make sure it is in /conf only. Separate a single TTL in minute to in minute, hour, day, month metric and trace. Add order and status in trace query. Add folder lock to buffer folder. Modify operationName search from match to match_phrase. [Incubating] Add Zipkin span receiver. Support analysis Zipkin v1/v2 formats. [Incubating] Support sharding-sphere as storage implementor.  UI Changes  Support login and access control. Add new webapp.yml configuration file. Modify webapp startup script. Link to trace query from Thermodynamic graph Add application selector in service view. Add order and status in trace query.  Documentation  Add architecture design doc. Reformat deploy document. Adjust Tomcat deploy document. Remove all Apache licenses files in dist release packages. Update user cases. Update UI licenses. Add incubating sections in doc.  Issues and Pull requests\n5.0.0-beta UI -\u0026gt; Collector GraphQL query protocol  Replace all tps to throughput/cpm(calls per min) Add getThermodynamic service Update version to beta  Agent Changes  Support TLS. Support namespace. Support direct link. Support token. Add across thread toolkit. Add new plugin extend machenism to override agent core implementations. Fix an agent start up sequence bug. Fix wrong gc count. Remove system env override. Add Spring AOP aspect patch to avoid aop conflicts.  Collector Changes  Trace query based on timeline. Delete JVM aggregation in second. Support TLS. Support namespace. Support token auth. Group and aggregate requests based on response time and timeline, support Thermodynamic chart query Support component librariy setting through yml file for better extendibility. Optimize performance. Support short column name in ES or other storage implementor. Add a new cache module implementor, based on Caffeine. Support system property override settings. Refactor settings initialization. Provide collector instrumentation agent. Support .NET core component libraries. Fix divide zero in query. Fix Data don't remove as expected in ES implementor. Add some checks in collector modulization core. Add some test cases.  UI Changes  New trace query UI. New Application UI, merge server tab(removed) into application as sub page. New Topology UI. New response time / throughput TopN list. Add Thermodynamic chart in overview page. Change all tps to cpm(calls per minutes). Fix wrong osName in server view. Fix wrong startTime in trace view. Fix some icons internet requirements.  Documentation  Add TLS document. Add namespace document. Add direct link document. Add token document. Add across thread toolkit document. Add a FAQ about, Agent or collector version upgrade. Sync all English document to Chinese.  Issues and Pull requests\n5.0.0-alpha Agent -\u0026gt; Collector protocol  Remove C++ keywords Move Ref into Span from Segment Add span type, when register an operation  UI -\u0026gt; Collector GraphQL query protocol  First version protocol  Agent Changes  Support gRPC 1.x plugin Support kafka 0.11 and 1.x plugin Support ServiceComb 0.x plugin Support optional plugin mechanism. Support Spring 3.x and 4.x bean annotation optional plugin Support Apache httpcomponent AsyncClient 4.x plugin Provide automatic agent daily tests, and release reports here. Refactor Postgresql, Oracle, MySQL plugin for compatible. Fix jetty client 9 plugin error Fix async APIs of okhttp plugin error Fix log config didn\u0026rsquo;t work Fix a class loader error in okhttp plugin  Collector Changes  Support metrics analysis and aggregation for application, application instance and service in minute, hour, day and month. Support new GraphQL query protocol Support alarm Provide a prototype instrument for collector. Support node speculate in cluster and application topology. (Provider Node -\u0026gt; Consumer Node) -\u0026gt; (Provider Node -\u0026gt; MQ Server -\u0026gt; Consumer Node)  UI Changes  New 5.0.0 UI!!!  Issues and Pull requests\n","excerpt":"5.1.0 Agent Changes  Fix spring inherit issue in another way Fix classloader dead lock in jdk7+ - …","ref":"/docs/main/v9.4.0/en/changes/changes-5.x/","title":"5.1.0"},{"body":"5.1.0 Agent Changes  Fix spring inherit issue in another way Fix classloader dead lock in jdk7+ - 5.x Support Spring mvc 5.x Support Spring webflux 5.x  Collector Changes  Fix too many open files. Fix the buffer file cannot delete.  5.0.0-GA Agent Changes  Add several package names ignore in agent settings. Classes in these packages would be enhanced, even plugin declared. Support Undertow 2.x plugin. Fix wrong class names of Motan plugin, not a feature related issue, just naming.  Collector Changes  Make buffer file handler close more safety. Fix NPE in AlarmService  Documentation  Fix compiling doc link. Update new live demo address.  5.0.0-RC2 Agent Changes  Support ActiveMQ 5.x Support RuntimeContext used out of TracingContext. Support Oracle ojdbc8 Plugin. Support ElasticSearch client transport 5.2-5.6 Plugin Support using agent.config with given path through system properties. Add a new way to transmit the Request and Response, to avoid bugs in Hytrix scenarios. Fix HTTPComponent client v4 operation name is empty. Fix 2 possible NPEs in Spring plugin. Fix a possible span leak in SpringMVC plugin. Fix NPE in Spring callback plugin.  Collector Changes  Add GZip support for Zipkin receiver. Add new component IDs for nodejs. Fix Zipkin span receiver may miss data in request. Optimize codes in heatmap calculation. Reduce unnecessary divide. Fix NPE in Alarm content generation. Fix the precision lost in ServiceNameService#startTimeMillis. Fix GC count is 0. Fix topology breaks when RPC client uses the async thread call.  UI Changes  Fix UI port can\u0026rsquo;t be set by startup script in Windows. Fix Topology self link error. Fix stack color mismatch label color in gc time chart.  Documentation  Add users list. Fix several document typo. Sync the Chinese documents. Add OpenAPM badge. Add icon/font documents to NOTICE files.  Issues and Pull requests\n5.0.0-beta2 UI -\u0026gt; Collector GraphQL query protocol  Add order and status in trace query.  Agent Changes  Add SOFA plugin. Add witness class for Kafka plugin. Add RuntimeContext in Context. Fix RuntimeContext fail in Tomcat plugin. Fix incompatible for getPropertyDescriptors in Spring core. Fix spymemcached plugin bug. Fix database URL parser bug. Fix StringIndexOutOfBoundsException when mysql jdbc url without databaseName。 Fix duplicate slash in Spring MVC plugin bug. Fix namespace bug. Fix NPE in Okhttp plugin when connect failed. FIx MalformedURLException in httpClientComponent plugin. Remove unused dependencies in Dubbo plugin. Remove gRPC timeout to avoid out of memory leak. Rewrite Async http client plugin. [Incubating] Add trace custom ignore optional plugin.  Collector Changes  Topology query optimization for more than 100 apps. Error rate alarm is not triggered. Tolerate unsupported segments. Support Integer Array, Long Array, String Array, Double Array in streaming data model. Support multiple entry span and multiple service name in one segment durtaion record. Use BulkProcessor to control the linear writing of data by multiple threads. Determine the log is enabled for the DEBUG level before printing message. Add static modifier to Logger. Add AspNet component. Filter inactive service in query. Support to query service based on Application. Fix RemoteDataMappingIdNotFoundException Exclude component-libaries.xml file in collector-*.jar, make sure it is in /conf only. Separate a single TTL in minute to in minute, hour, day, month metric and trace. Add order and status in trace query. Add folder lock to buffer folder. Modify operationName search from match to match_phrase. [Incubating] Add Zipkin span receiver. Support analysis Zipkin v1/v2 formats. [Incubating] Support sharding-sphere as storage implementor.  UI Changes  Support login and access control. Add new webapp.yml configuration file. Modify webapp startup script. Link to trace query from Thermodynamic graph Add application selector in service view. Add order and status in trace query.  Documentation  Add architecture design doc. Reformat deploy document. Adjust Tomcat deploy document. Remove all Apache licenses files in dist release packages. Update user cases. Update UI licenses. Add incubating sections in doc.  Issues and Pull requests\n5.0.0-beta UI -\u0026gt; Collector GraphQL query protocol  Replace all tps to throughput/cpm(calls per min) Add getThermodynamic service Update version to beta  Agent Changes  Support TLS. Support namespace. Support direct link. Support token. Add across thread toolkit. Add new plugin extend machenism to override agent core implementations. Fix an agent start up sequence bug. Fix wrong gc count. Remove system env override. Add Spring AOP aspect patch to avoid aop conflicts.  Collector Changes  Trace query based on timeline. Delete JVM aggregation in second. Support TLS. Support namespace. Support token auth. Group and aggregate requests based on response time and timeline, support Thermodynamic chart query Support component librariy setting through yml file for better extendibility. Optimize performance. Support short column name in ES or other storage implementor. Add a new cache module implementor, based on Caffeine. Support system property override settings. Refactor settings initialization. Provide collector instrumentation agent. Support .NET core component libraries. Fix divide zero in query. Fix Data don't remove as expected in ES implementor. Add some checks in collector modulization core. Add some test cases.  UI Changes  New trace query UI. New Application UI, merge server tab(removed) into application as sub page. New Topology UI. New response time / throughput TopN list. Add Thermodynamic chart in overview page. Change all tps to cpm(calls per minutes). Fix wrong osName in server view. Fix wrong startTime in trace view. Fix some icons internet requirements.  Documentation  Add TLS document. Add namespace document. Add direct link document. Add token document. Add across thread toolkit document. Add a FAQ about, Agent or collector version upgrade. Sync all English document to Chinese.  Issues and Pull requests\n5.0.0-alpha Agent -\u0026gt; Collector protocol  Remove C++ keywords Move Ref into Span from Segment Add span type, when register an operation  UI -\u0026gt; Collector GraphQL query protocol  First version protocol  Agent Changes  Support gRPC 1.x plugin Support kafka 0.11 and 1.x plugin Support ServiceComb 0.x plugin Support optional plugin mechanism. Support Spring 3.x and 4.x bean annotation optional plugin Support Apache httpcomponent AsyncClient 4.x plugin Provide automatic agent daily tests, and release reports here. Refactor Postgresql, Oracle, MySQL plugin for compatible. Fix jetty client 9 plugin error Fix async APIs of okhttp plugin error Fix log config didn\u0026rsquo;t work Fix a class loader error in okhttp plugin  Collector Changes  Support metrics analysis and aggregation for application, application instance and service in minute, hour, day and month. Support new GraphQL query protocol Support alarm Provide a prototype instrument for collector. Support node speculate in cluster and application topology. (Provider Node -\u0026gt; Consumer Node) -\u0026gt; (Provider Node -\u0026gt; MQ Server -\u0026gt; Consumer Node)  UI Changes  New 5.0.0 UI!!!  Issues and Pull requests\n","excerpt":"5.1.0 Agent Changes  Fix spring inherit issue in another way Fix classloader dead lock in jdk7+ - …","ref":"/docs/main/v9.5.0/en/changes/changes-5.x/","title":"5.1.0"},{"body":"5.1.0 Agent Changes  Fix spring inherit issue in another way Fix classloader dead lock in jdk7+ - 5.x Support Spring mvc 5.x Support Spring webflux 5.x  Collector Changes  Fix too many open files. Fix the buffer file cannot delete.  5.0.0-GA Agent Changes  Add several package names ignore in agent settings. Classes in these packages would be enhanced, even plugin declared. Support Undertow 2.x plugin. Fix wrong class names of Motan plugin, not a feature related issue, just naming.  Collector Changes  Make buffer file handler close more safety. Fix NPE in AlarmService  Documentation  Fix compiling doc link. Update new live demo address.  5.0.0-RC2 Agent Changes  Support ActiveMQ 5.x Support RuntimeContext used out of TracingContext. Support Oracle ojdbc8 Plugin. Support ElasticSearch client transport 5.2-5.6 Plugin Support using agent.config with given path through system properties. Add a new way to transmit the Request and Response, to avoid bugs in Hytrix scenarios. Fix HTTPComponent client v4 operation name is empty. Fix 2 possible NPEs in Spring plugin. Fix a possible span leak in SpringMVC plugin. Fix NPE in Spring callback plugin.  Collector Changes  Add GZip support for Zipkin receiver. Add new component IDs for nodejs. Fix Zipkin span receiver may miss data in request. Optimize codes in heatmap calculation. Reduce unnecessary divide. Fix NPE in Alarm content generation. Fix the precision lost in ServiceNameService#startTimeMillis. Fix GC count is 0. Fix topology breaks when RPC client uses the async thread call.  UI Changes  Fix UI port can\u0026rsquo;t be set by startup script in Windows. Fix Topology self link error. Fix stack color mismatch label color in gc time chart.  Documentation  Add users list. Fix several document typo. Sync the Chinese documents. Add OpenAPM badge. Add icon/font documents to NOTICE files.  Issues and Pull requests\n5.0.0-beta2 UI -\u0026gt; Collector GraphQL query protocol  Add order and status in trace query.  Agent Changes  Add SOFA plugin. Add witness class for Kafka plugin. Add RuntimeContext in Context. Fix RuntimeContext fail in Tomcat plugin. Fix incompatible for getPropertyDescriptors in Spring core. Fix spymemcached plugin bug. Fix database URL parser bug. Fix StringIndexOutOfBoundsException when mysql jdbc url without databaseName。 Fix duplicate slash in Spring MVC plugin bug. Fix namespace bug. Fix NPE in Okhttp plugin when connect failed. FIx MalformedURLException in httpClientComponent plugin. Remove unused dependencies in Dubbo plugin. Remove gRPC timeout to avoid out of memory leak. Rewrite Async http client plugin. [Incubating] Add trace custom ignore optional plugin.  Collector Changes  Topology query optimization for more than 100 apps. Error rate alarm is not triggered. Tolerate unsupported segments. Support Integer Array, Long Array, String Array, Double Array in streaming data model. Support multiple entry span and multiple service name in one segment durtaion record. Use BulkProcessor to control the linear writing of data by multiple threads. Determine the log is enabled for the DEBUG level before printing message. Add static modifier to Logger. Add AspNet component. Filter inactive service in query. Support to query service based on Application. Fix RemoteDataMappingIdNotFoundException Exclude component-libaries.xml file in collector-*.jar, make sure it is in /conf only. Separate a single TTL in minute to in minute, hour, day, month metric and trace. Add order and status in trace query. Add folder lock to buffer folder. Modify operationName search from match to match_phrase. [Incubating] Add Zipkin span receiver. Support analysis Zipkin v1/v2 formats. [Incubating] Support sharding-sphere as storage implementor.  UI Changes  Support login and access control. Add new webapp.yml configuration file. Modify webapp startup script. Link to trace query from Thermodynamic graph Add application selector in service view. Add order and status in trace query.  Documentation  Add architecture design doc. Reformat deploy document. Adjust Tomcat deploy document. Remove all Apache licenses files in dist release packages. Update user cases. Update UI licenses. Add incubating sections in doc.  Issues and Pull requests\n5.0.0-beta UI -\u0026gt; Collector GraphQL query protocol  Replace all tps to throughput/cpm(calls per min) Add getThermodynamic service Update version to beta  Agent Changes  Support TLS. Support namespace. Support direct link. Support token. Add across thread toolkit. Add new plugin extend machenism to override agent core implementations. Fix an agent start up sequence bug. Fix wrong gc count. Remove system env override. Add Spring AOP aspect patch to avoid aop conflicts.  Collector Changes  Trace query based on timeline. Delete JVM aggregation in second. Support TLS. Support namespace. Support token auth. Group and aggregate requests based on response time and timeline, support Thermodynamic chart query Support component librariy setting through yml file for better extendibility. Optimize performance. Support short column name in ES or other storage implementor. Add a new cache module implementor, based on Caffeine. Support system property override settings. Refactor settings initialization. Provide collector instrumentation agent. Support .NET core component libraries. Fix divide zero in query. Fix Data don't remove as expected in ES implementor. Add some checks in collector modulization core. Add some test cases.  UI Changes  New trace query UI. New Application UI, merge server tab(removed) into application as sub page. New Topology UI. New response time / throughput TopN list. Add Thermodynamic chart in overview page. Change all tps to cpm(calls per minutes). Fix wrong osName in server view. Fix wrong startTime in trace view. Fix some icons internet requirements.  Documentation  Add TLS document. Add namespace document. Add direct link document. Add token document. Add across thread toolkit document. Add a FAQ about, Agent or collector version upgrade. Sync all English document to Chinese.  Issues and Pull requests\n5.0.0-alpha Agent -\u0026gt; Collector protocol  Remove C++ keywords Move Ref into Span from Segment Add span type, when register an operation  UI -\u0026gt; Collector GraphQL query protocol  First version protocol  Agent Changes  Support gRPC 1.x plugin Support kafka 0.11 and 1.x plugin Support ServiceComb 0.x plugin Support optional plugin mechanism. Support Spring 3.x and 4.x bean annotation optional plugin Support Apache httpcomponent AsyncClient 4.x plugin Provide automatic agent daily tests, and release reports here. Refactor Postgresql, Oracle, MySQL plugin for compatible. Fix jetty client 9 plugin error Fix async APIs of okhttp plugin error Fix log config didn\u0026rsquo;t work Fix a class loader error in okhttp plugin  Collector Changes  Support metrics analysis and aggregation for application, application instance and service in minute, hour, day and month. Support new GraphQL query protocol Support alarm Provide a prototype instrument for collector. Support node speculate in cluster and application topology. (Provider Node -\u0026gt; Consumer Node) -\u0026gt; (Provider Node -\u0026gt; MQ Server -\u0026gt; Consumer Node)  UI Changes  New 5.0.0 UI!!!  Issues and Pull requests\n","excerpt":"5.1.0 Agent Changes  Fix spring inherit issue in another way Fix classloader dead lock in jdk7+ - …","ref":"/docs/main/v9.6.0/en/changes/changes-5.x/","title":"5.1.0"},{"body":"5.1.0 Agent Changes  Fix spring inherit issue in another way Fix classloader dead lock in jdk7+ - 5.x Support Spring mvc 5.x Support Spring webflux 5.x  Collector Changes  Fix too many open files. Fix the buffer file cannot delete.  5.0.0-GA Agent Changes  Add several package names ignore in agent settings. Classes in these packages would be enhanced, even plugin declared. Support Undertow 2.x plugin. Fix wrong class names of Motan plugin, not a feature related issue, just naming.  Collector Changes  Make buffer file handler close more safety. Fix NPE in AlarmService  Documentation  Fix compiling doc link. Update new live demo address.  5.0.0-RC2 Agent Changes  Support ActiveMQ 5.x Support RuntimeContext used out of TracingContext. Support Oracle ojdbc8 Plugin. Support ElasticSearch client transport 5.2-5.6 Plugin Support using agent.config with given path through system properties. Add a new way to transmit the Request and Response, to avoid bugs in Hytrix scenarios. Fix HTTPComponent client v4 operation name is empty. Fix 2 possible NPEs in Spring plugin. Fix a possible span leak in SpringMVC plugin. Fix NPE in Spring callback plugin.  Collector Changes  Add GZip support for Zipkin receiver. Add new component IDs for nodejs. Fix Zipkin span receiver may miss data in request. Optimize codes in heatmap calculation. Reduce unnecessary divide. Fix NPE in Alarm content generation. Fix the precision lost in ServiceNameService#startTimeMillis. Fix GC count is 0. Fix topology breaks when RPC client uses the async thread call.  UI Changes  Fix UI port can\u0026rsquo;t be set by startup script in Windows. Fix Topology self link error. Fix stack color mismatch label color in gc time chart.  Documentation  Add users list. Fix several document typo. Sync the Chinese documents. Add OpenAPM badge. Add icon/font documents to NOTICE files.  Issues and Pull requests\n5.0.0-beta2 UI -\u0026gt; Collector GraphQL query protocol  Add order and status in trace query.  Agent Changes  Add SOFA plugin. Add witness class for Kafka plugin. Add RuntimeContext in Context. Fix RuntimeContext fail in Tomcat plugin. Fix incompatible for getPropertyDescriptors in Spring core. Fix spymemcached plugin bug. Fix database URL parser bug. Fix StringIndexOutOfBoundsException when mysql jdbc url without databaseName。 Fix duplicate slash in Spring MVC plugin bug. Fix namespace bug. Fix NPE in Okhttp plugin when connect failed. FIx MalformedURLException in httpClientComponent plugin. Remove unused dependencies in Dubbo plugin. Remove gRPC timeout to avoid out of memory leak. Rewrite Async http client plugin. [Incubating] Add trace custom ignore optional plugin.  Collector Changes  Topology query optimization for more than 100 apps. Error rate alarm is not triggered. Tolerate unsupported segments. Support Integer Array, Long Array, String Array, Double Array in streaming data model. Support multiple entry span and multiple service name in one segment durtaion record. Use BulkProcessor to control the linear writing of data by multiple threads. Determine the log is enabled for the DEBUG level before printing message. Add static modifier to Logger. Add AspNet component. Filter inactive service in query. Support to query service based on Application. Fix RemoteDataMappingIdNotFoundException Exclude component-libaries.xml file in collector-*.jar, make sure it is in /conf only. Separate a single TTL in minute to in minute, hour, day, month metric and trace. Add order and status in trace query. Add folder lock to buffer folder. Modify operationName search from match to match_phrase. [Incubating] Add Zipkin span receiver. Support analysis Zipkin v1/v2 formats. [Incubating] Support sharding-sphere as storage implementor.  UI Changes  Support login and access control. Add new webapp.yml configuration file. Modify webapp startup script. Link to trace query from Thermodynamic graph Add application selector in service view. Add order and status in trace query.  Documentation  Add architecture design doc. Reformat deploy document. Adjust Tomcat deploy document. Remove all Apache licenses files in dist release packages. Update user cases. Update UI licenses. Add incubating sections in doc.  Issues and Pull requests\n5.0.0-beta UI -\u0026gt; Collector GraphQL query protocol  Replace all tps to throughput/cpm(calls per min) Add getThermodynamic service Update version to beta  Agent Changes  Support TLS. Support namespace. Support direct link. Support token. Add across thread toolkit. Add new plugin extend machenism to override agent core implementations. Fix an agent start up sequence bug. Fix wrong gc count. Remove system env override. Add Spring AOP aspect patch to avoid aop conflicts.  Collector Changes  Trace query based on timeline. Delete JVM aggregation in second. Support TLS. Support namespace. Support token auth. Group and aggregate requests based on response time and timeline, support Thermodynamic chart query Support component librariy setting through yml file for better extendibility. Optimize performance. Support short column name in ES or other storage implementor. Add a new cache module implementor, based on Caffeine. Support system property override settings. Refactor settings initialization. Provide collector instrumentation agent. Support .NET core component libraries. Fix divide zero in query. Fix Data don't remove as expected in ES implementor. Add some checks in collector modulization core. Add some test cases.  UI Changes  New trace query UI. New Application UI, merge server tab(removed) into application as sub page. New Topology UI. New response time / throughput TopN list. Add Thermodynamic chart in overview page. Change all tps to cpm(calls per minutes). Fix wrong osName in server view. Fix wrong startTime in trace view. Fix some icons internet requirements.  Documentation  Add TLS document. Add namespace document. Add direct link document. Add token document. Add across thread toolkit document. Add a FAQ about, Agent or collector version upgrade. Sync all English document to Chinese.  Issues and Pull requests\n5.0.0-alpha Agent -\u0026gt; Collector protocol  Remove C++ keywords Move Ref into Span from Segment Add span type, when register an operation  UI -\u0026gt; Collector GraphQL query protocol  First version protocol  Agent Changes  Support gRPC 1.x plugin Support kafka 0.11 and 1.x plugin Support ServiceComb 0.x plugin Support optional plugin mechanism. Support Spring 3.x and 4.x bean annotation optional plugin Support Apache httpcomponent AsyncClient 4.x plugin Provide automatic agent daily tests, and release reports here. Refactor Postgresql, Oracle, MySQL plugin for compatible. Fix jetty client 9 plugin error Fix async APIs of okhttp plugin error Fix log config didn\u0026rsquo;t work Fix a class loader error in okhttp plugin  Collector Changes  Support metrics analysis and aggregation for application, application instance and service in minute, hour, day and month. Support new GraphQL query protocol Support alarm Provide a prototype instrument for collector. Support node speculate in cluster and application topology. (Provider Node -\u0026gt; Consumer Node) -\u0026gt; (Provider Node -\u0026gt; MQ Server -\u0026gt; Consumer Node)  UI Changes  New 5.0.0 UI!!!  Issues and Pull requests\n","excerpt":"5.1.0 Agent Changes  Fix spring inherit issue in another way Fix classloader dead lock in jdk7+ - …","ref":"/docs/main/v9.7.0/en/changes/changes-5.x/","title":"5.1.0"},{"body":"6.6.0 Project  [IMPORTANT] Local span and exit span are not treated as endpoint detected at client and local. Only entry span is the endpoint. Reduce the load of register and memory cost.   Support MiniKube, Istio and SkyWalking on K8s deployment in CI. Support Windows and MacOS build in GitHub Action CI. Support ElasticSearch 7 in official dist. Hundreds plugin cases have been added in GitHub Action CI process.  Java Agent  Remove the local/exit span operation name register mechanism. Add plugin for JDK Threading classes. Add plugin for Armeria. Support set operation name in async span. Enhance webflux plugin, related to Spring Gateway plugin. Webflux plugin is in optional, due to JDK8 required. Fix a possible deadlock. Fix NPE when OAL scripts are different in different OAP nodes, mostly in upgrading stage. Fix bug about wrong peer in ES plugin. Fix NPE in Spring plugin. Fix wrong class name in Dubbo 2.7 conflict patch. Fix spring annotation inheritance problem.  OAP-Backend  Remove the local/exit span operation name register mechanism. Remove client side endpoint register in service mesh. Service instance dependency and related metrics. Support min func in OAL Support apdex func in OAL Support custom ES config setting at the index level. Envoy ALS proto upgraded. Update JODA lib as bugs in UTC +13/+14. Support topN sample period configurable. Ignore no statement DB operations in slow SQL collection. Fix bug in docker-entrypoint.sh when using MySQL as storage  UI  Service topology enhancement. Dive into service, instance and endpoint metrics on topo map. Service instance dependency view and related metrics. Support using URL parameter in trace query page. Support apdex score in service page. Add service dependency metrics into metrics comparison. Fix alarm search not working.  Document  Update user list and user wall. Add document link for CLI. Add deployment guide of agent in Jetty case. Modify Consul cluster doc. Add document about injecting traceId into the logback with logstack in JSON format. ElementUI license and dependency added.  All issues and pull requests are here\n6.5.0 Project  TTL E2E test (#3437) Test coverage is back in pull request check status (#3503) Plugin tests begin to be migrated into main repo, and is in process. (#3528, #3756, #3751, etc.) Switch to SkyWalking CI (exclusive) nodes (#3546) MySQL storage e2e test. (#3648) E2E tests are verified in multiple jdk versions, jdk 8, 9, 11, 12 (#3657) Jenkins build jobs run only when necessary (#3662)  OAP-Backend  Support dynamically configure alarm settings (#3557) Language of instance could be null (#3485) Make query max window size configurable. (#3765) Remove two max size 500 limit. (#3748) Parameterize the cache size. (#3741) ServiceInstanceRelation set error id (#3683) Makes the scope of alarm message more semantic. (#3680) Add register persistent worker latency metrics (#3677) Fix more reasonable error (#3619) Add GraphQL getServiceInstance instanceUuid field. (#3595) Support namespace in Nacos cluster/configuration (#3578) Instead of datasource-settings.properties, use application.yml for MySQLStorageProvider (#3564) Provide consul dynamic configuration center implementation (#3560) Upgrade guava version to support higher jdk version (#3541) Sync latest als from envoy api (#3507) Set telemetry instanced id for Etcd and Nacos plugin (#3492) Support timeout configuration in agent and backend. (#3491) Make sure the cluster register happens before streaming process. (#3471) Agent supports custom properties. (#3367) Miscellaneous bug fixes (#3567)  UI  Feature: node detail display in topo circle-chart view. BugFix: the jvm-maxheap \u0026amp; jvm-maxnonheap is -1, free is no value Fix bug: time select operation not in effect Fix bug: language initialization failed Fix bug: not show instance language Feature: support the trace list display export png Feature: Metrics comparison view BugFix: Fix dashboard top throughput copy  Java Agent  Spring async scenario optimize (#3723) Support log4j2 AsyncLogger (#3715) Add config to collect PostgreSQL sql query params (#3695) Support namespace in Nacos cluster/configuration (#3578) Provide plugin for ehcache 2.x (#3575) Supporting RequestRateLimiterGatewayFilterFactory (#3538) Kafka-plugin compatible with KafkaTemplate (#3505) Add pulsar apm plugin (#3476) Spring-cloud-gateway traceId does not transmit #3411 (#3446) Gateway compatible with downstream loss (#3445) Provide cassandra java driver 3.x plugin (#3410) Fix SpringMVC4 NoSuchMethodError (#3408) BugFix: endpoint grouping rules may be not unique (#3510) Add feature to control the maximum agent log files (#3475) Agent support custom properties. (#3367) Add Light4j plugin (#3323)  Document  Remove travis badge (#3763) Replace user wall to typical users in readme page (#3719) Update istio docs according latest istio release (#3646) Use chart deploy sw docs (#3573) Reorganize the doc, and provide catalog (#3563) Committer vote and set up document. (#3496) Update als setup doc as istio 1.3 released (#3470) Fill faq reply in official document. (#3450)  All issues and pull requests are here\n6.4.0 Project  Highly recommend to upgrade due to Pxx metrics calculation bug. Make agent working in JDK9+ Module system.  Java Agent  Make agent working in JDK9+ Module system. Support Kafka 2.x client libs. Log error in OKHTTP OnFailure callback. Support injecting traceid into logstack appender in logback. Add OperationName(including endpoint name) length max threshold. Support using Regex to group operation name. Support Undertow routing handler. RestTemplate plugin support operation name grouping. Fix ClassCastException in Webflux plugin. Ordering zookeeper server list, to make it better in topology. Fix a Dubbo plugin incompatible issue. Fix MySQL 5 plugin issue. Make log writer cached. Optimize Spring Cloud Gateway plugin Fix and improve gRPC reconnect mechanism. Remove Disruptor dependency from agent.  Backend  Fix Pxx(p50,p75,p90,p95,p99) metrics func bug.(Critical) Support Gateway in backend analysis, even when it doesn\u0026rsquo;t have suitable language agent. Support using HTTPs SSL accessing ElasticSearch storage. Support Zookeeper ACL. Make alarm records listed in order. Fix Pxx data persistence failure in some cases. Fix some bugs in MySQL storage. Setup slow SQL length threshold. Fix TTL settings is not working as expected. Remove scope-meta file.  UI  Enhance alarm page layout. Support trace tree chart resize. Support trace auto completion when partial traces abandoned somehow. Fix dashboard endpoint slow chart. Add radial chart in topology page. Add trace table mode. Fix topology page bug. Fix calender js bug. Fix \u0026ldquo;The \u0026ldquo;topo-services\u0026rdquo; component did not update the data in time after modifying the time range on the topology page.  Document  Restore the broken Istio setup doc. Add etcd config center document. Correct span_limit_per_segment default value in document. Enhance plugin develop doc. Fix error description in build document.  All issues and pull requests are here\n6.3.0 Project  e2e tests have been added, and verify every pull request. Use ArrayList to replace LinkedList in DataCarrier for much better performance. Add plugin instrumentation definition check in CI. DataCarrier performance improvement by avoiding false-sharing.  Java Agent  Java agent supports JDK 9 - 12, but don\u0026rsquo;t support Java Module yet. Support JVM class auto instrumentation, cataloged as bootstrap plugin. Support JVM HttpClient and HttpsClient plugin.[Optional] Support backend upgrade without rebooting required. Open Redefine and Retransform by other agents. Support Servlet 2.5 in Jetty, Tomcat and SpringMVC plugins. Support Spring @Async plugin. Add new config item to restrict the length of span#peer. Refactor ContextManager#stopSpan. Add gRPC timeout. Support Logback AsyncAppender print tid Fix gRPC reconnect bug. Fix trace segment service doesn\u0026rsquo;t report onComplete. Fix wrong logger class name. Fix gRPC plugin bug. Fix ContextManager.activeSpan() API usage error.  Backend  Support agent reset command downstream when the storage is erased, mostly because of backend upgrade. Backend stream flow refactor. High dimensionality metrics(Hour/Day/Month) are changed to lower priority, to ease the storage payload. Add OAP metrics cache to ease the storage query payload and improve performance. Remove DataCarrier in trace persistent of ElasticSearch storage, by leveraging the elasticsearch bulk queue. OAP internal communication protocol changed. Don\u0026rsquo;t be compatible with old releases. Improve ElasticSearch storage bulk performance. Support etcd as dynamic configuration center. Simplify the PxxMetrics and ThermodynamicMetrics functions for better performance and GC. Support JVM metrics self observability. Add the new OAL runtime engine. Add gRPC timeout. Add Charset in the alarm web hook. Fix buffer lost. Fix dirty read in ElasticSearch storage. Fix bug of cluster management plugins in un-Mixed mode. Fix wrong logger class name. Fix delete bug in ElasticSearch when using namespace. Fix MySQL TTL failure. Totally remove IDs can't be null log, to avoid misleading. Fix provider has been initialized repeatedly. Adjust providers conflict log message. Fix using wrong gc time metrics in OAL.  UI  Fix refresh is not working after endpoint and instance changed. Fix endpoint selector but. Fix wrong copy value in slow traces. Fix can\u0026rsquo;t show trace when it is broken partially(Because of agent sampling or fail safe). Fix database and response time graph bugs.  Document  Add bootstrap plugin development document. Alarm documentation typo fixed. Clarify the Docker file purpose. Fix a license typo.  All issues and pull requests are here\n6.2.0 Project  ElasticSearch implementation performance improved, and CHANGED totally. Must delete all existing indexes to do upgrade. CI and Integration tests provided by ASF INFRA. Plan to enhance tests including e2e, plugin tests in all pull requests, powered by ASF INFRA. DataCarrier queue write index controller performance improvement. 3-5 times quicker than before. Add windows compile support in CI.  Java Agent  Support collect SQL parameter in MySQL plugin.[Optional] Support SolrJ plugin. Support RESTEasy plugin. Support Spring Gateway plugin for 2.1.x[Optional] TracingContext performance improvement. Support Apache ShardingSphere(incubating) plugin. Support span#error in application toolkit. Fix OOM by empty stack of exception. FIx wrong cause exception of stack in span log. Fix unclear the running context in SpringMVC plugin. Fix CPU usage accessor calculation issue. Fix SpringMVC plugin span not stop bug when doing HTTP forward. Fix lettuce plugin async commend bug and NPE. Fix webflux plugin cast exception. [CI]Support import check.  Backend  Support time serious ElasticSearch storage. Provide dynamic configuration module and implementation. Slow SQL threshold supports dynamic config today. Dynamic Configuration module provide multiple implementations, DCS(gRPC based), Zookeeper, Apollo, Nacos. Provide P99/95/90/75/50 charts in topology edge. New topology query protocol and implementation. Support Envoy ALS in Service Mesh scenario. Support Nacos cluster management. Enhance metric exporter. Run in increment and total modes. Fix module provider is loaded repeatedly. Change TOP slow SQL storage in ES to Text from Keyword, as too long text issue. Fix H2TopologyQuery tiny bug. Fix H2 log query bug.(No feature provided yet) Filtering pods not in \u0026lsquo;Running\u0026rsquo; phase in mesh scenario. Fix query alarm bug in MySQL and H2 storage. Codes refactor.  UI  Fix some ID is null query(s). Page refactor, especially time-picker, more friendly. Login removed. Trace timestamp visualization issue fixed. Provide P99/95/90/75/50 charts in topology edge. Change all P99/95/90/75/50 charts style. More readable. Fix 404 in trace page.  Document  Go2Sky project has been donated to SkyAPM, change document link. Add FAQ for ElasticSearch storage, and links from document. Add FAQ fro WebSphere installation. Add several open users. Add alarm webhook document.  All issues and pull requests are here\n6.1.0 Project SkyWalking graduated as Apache Top Level Project.\n Support compiling project agent, backend, UI separately.  Java Agent  Support Vert.x Core 3.x plugin. Support Apache Dubbo plugin. Support use_qualified_name_as_endpoint_name and use_qualified_name_as_operation_name configs in SpringMVC plugin. Support span async close APIs in core. Used in Vert.x plugin. Support MySQL 5,8 plugins. Support set instance id manually(optional). Support customize enhance trace plugin in optional list. Support to set peer in Entry Span. Support Zookeeper plugin. Fix Webflux plugin created unexpected Entry Span. Fix Kafka plugin NPE in Kafka 1.1+ Fix wrong operation name in postgre 8.x plugin. Fix RabbitMQ plugin NPE. Fix agent can\u0026rsquo;t run in JVM 6/7, remove module-info.class. Fix agent can\u0026rsquo;t work well, if there is whitespace in agent path. Fix Spring annotation bug and inheritance enhance issue. Fix CPU accessor bug.  Backend Performance improved, especially in CPU limited environment. 3x improvement in service mesh scenario(no trace) in 8C16G VM. Significantly cost less CPU in low payload.\n Support database metrics and SLOW SQL detection. Support to set max size of metadata query. And change default to 5000 from 100. Support ElasticSearch template for new feature in the future. Support shutdown Zipkin trace analysis, because it doesn\u0026rsquo;t fit production environment. Support log type, scope HTTP_ACCESS_LOG and query. No feature provided, prepare for future versions. Support .NET clr receiver. Support Jaeger trace format, no analysis. Support group endpoint name by regax rules in mesh receiver. Support disable statement in OAL. Support basic auth in ElasticSearch connection. Support metrics exporter module and gRPC implementor. Support \u0026gt;, \u0026lt;, \u0026gt;=, \u0026lt;= in OAL. Support role mode in backend. Support Envoy metrics. Support query segment by service instance. Support to set host/port manually at cluster coordinator, rather than based on core settings. Make sure OAP shutdown when it faces startup error. Support set separated gRPC/Jetty ip:port for receiver, default still use core settings. Fix JVM receiver bug. Fix wrong dest service in mesh analysis. Fix search doesn\u0026rsquo;t work as expected. Refactor ScopeDeclaration annotation. Refactor register lock mechanism. Add SmartSql component for .NET Add integration tests for ElasticSearch client. Add test cases for exporter. Add test cases for queue consume.  UI  RocketBot UI has been accepted and bind in this release. Support CLR metrics.  Document  Documents updated, matching Top Level Project requirement. UI licenses updated, according to RocketBot UI IP clearance. User wall and powered-by list updated. CN documents removed, only consider to provide by volunteer out of Apache.  All issues and pull requests are here\n6.0.0-GA Java Agent  Support gson plugin(optional). Support canal plugin. Fix missing ojdbc component id. Fix dubbo plugin conflict. Fix OpenTracing tag match bug. Fix a missing check in ignore plugin.  Backend  Adjust service inventory entity, to add properties. Adjust service instance inventory entity, to add properties. Add nodeType to service inventory entity. Fix when operation name of local and exit spans in ref, the segment lost. Fix the index names don\u0026rsquo;t show right in logs. Fix wrong alarm text. Add test case for span limit mechanism. Add telemetry module and prometheus implementation, with grafana setting. A refactor for register API in storage module. Fix H2 and MySQL endpoint dependency map miss upstream side. Optimize the inventory register and refactor the implementation. Speed up the trace buffer read. Fix and removed unnecessary inventory register operations.  UI  Add new trace view. Add word-break to tag value.  Document  Add two startup modes document. Add PHP agent links. Add some cn documents. Update year to 2019 User wall updated. Fix a wrong description in how-to-build doc.  All issues and pull requests are here\n6.0.0-beta Protocol  Provide Trace Data Protocol v2 Provide SkyWalking Cross Process Propagation Headers Protocol v2.  Java Agent  Support Trace Data Protocol v2 Support SkyWalking Cross Process Propagation Headers Protocol v2. Support SkyWalking Cross Process Propagation Headers Protocol v1 running in compatible way. Need declare open explicitly. Support SpringMVC 5 Support webflux Support a new way to override agent.config by system env. Span tag can override by explicit way. Fix Spring Controller Inherit issue. Fix ElasticSearch plugin NPE. Fix agent classloader dead lock in certain situation. Fix agent log typo. Fix wrong component id in resettemplete plugin. Fix use transform ignore() in wrong way. Fix H2 query bug.  Backend  Support Trace Data Protocol v2. And Trace Data Protocol v1 is still supported. Support MySQL as storage. Support TiDB as storage. Support a new way to override application.yml by system env. Support service instance and endpoint alarm. Support namespace in istio receiver. Support service throughput(cpm), successful rate(sla), avg response time and p99/p95/p90/p75/p50 response time. Support backend trace sampling. Support Zipkin format again. Support init mode. Support namespace in Zookeeper cluster management. Support consul plugin in cluster module. OAL generate tool has been integrated into main repo, in the maven compile stage. Optimize trace paging query. Fix trace query don\u0026rsquo;t use fuzzy query in ElasticSearch storage. Fix alarm can\u0026rsquo;t be active in right way. Fix unnecessary condition in database and cache number query. Fix wrong namespace bug in ElasticSearch storage. Fix Remote clients selector error: / by zero . Fix segment TTL is not working.  UI  Support service throughput(cpm), successful rate(sla), avg response time and p99/p95/p90/p75/p50 response time. Fix TopN endpoint link doesn\u0026rsquo;t work right. Fix trace stack style. Fix CI.  Document  Add more agent setting documents. Add more contribution documents. Update user wall and powered-by page. Add RocketBot UI project link in document.  All issues and pull requests are here\n6.0.0-alpha SkyWalking 6 is totally new milestone for the project. At this point, we are not just a distributing tracing system with analysis and visualization capabilities. We are an Observability Analysis Platform(OAL).\nThe core and most important features in v6 are\n Support to collect telemetry data from different sources, such as multiple language agents and service mesh. Extensible stream analysis core. Make SQL and cache analysis available in core level, although haven\u0026rsquo;t provided in this release. Provide Observability Analysis Language(OAL) to make analysis metrics customization available. New GraphQL query protocol. Not binding with UI now. UI topology is better now. New alarm core provided. In alpha, only on service related metrics.  All issues and pull requests are here\n","excerpt":"6.6.0 Project  [IMPORTANT] Local span and exit span are not treated as endpoint detected at client …","ref":"/docs/main/latest/en/changes/changes-6.x/","title":"6.6.0"},{"body":"6.6.0 Project  [IMPORTANT] Local span and exit span are not treated as endpoint detected at client and local. Only entry span is the endpoint. Reduce the load of register and memory cost.   Support MiniKube, Istio and SkyWalking on K8s deployment in CI. Support Windows and MacOS build in GitHub Action CI. Support ElasticSearch 7 in official dist. Hundreds plugin cases have been added in GitHub Action CI process.  Java Agent  Remove the local/exit span operation name register mechanism. Add plugin for JDK Threading classes. Add plugin for Armeria. Support set operation name in async span. Enhance webflux plugin, related to Spring Gateway plugin. Webflux plugin is in optional, due to JDK8 required. Fix a possible deadlock. Fix NPE when OAL scripts are different in different OAP nodes, mostly in upgrading stage. Fix bug about wrong peer in ES plugin. Fix NPE in Spring plugin. Fix wrong class name in Dubbo 2.7 conflict patch. Fix spring annotation inheritance problem.  OAP-Backend  Remove the local/exit span operation name register mechanism. Remove client side endpoint register in service mesh. Service instance dependency and related metrics. Support min func in OAL Support apdex func in OAL Support custom ES config setting at the index level. Envoy ALS proto upgraded. Update JODA lib as bugs in UTC +13/+14. Support topN sample period configurable. Ignore no statement DB operations in slow SQL collection. Fix bug in docker-entrypoint.sh when using MySQL as storage  UI  Service topology enhancement. Dive into service, instance and endpoint metrics on topo map. Service instance dependency view and related metrics. Support using URL parameter in trace query page. Support apdex score in service page. Add service dependency metrics into metrics comparison. Fix alarm search not working.  Document  Update user list and user wall. Add document link for CLI. Add deployment guide of agent in Jetty case. Modify Consul cluster doc. Add document about injecting traceId into the logback with logstack in JSON format. ElementUI license and dependency added.  All issues and pull requests are here\n6.5.0 Project  TTL E2E test (#3437) Test coverage is back in pull request check status (#3503) Plugin tests begin to be migrated into main repo, and is in process. (#3528, #3756, #3751, etc.) Switch to SkyWalking CI (exclusive) nodes (#3546) MySQL storage e2e test. (#3648) E2E tests are verified in multiple jdk versions, jdk 8, 9, 11, 12 (#3657) Jenkins build jobs run only when necessary (#3662)  OAP-Backend  Support dynamically configure alarm settings (#3557) Language of instance could be null (#3485) Make query max window size configurable. (#3765) Remove two max size 500 limit. (#3748) Parameterize the cache size. (#3741) ServiceInstanceRelation set error id (#3683) Makes the scope of alarm message more semantic. (#3680) Add register persistent worker latency metrics (#3677) Fix more reasonable error (#3619) Add GraphQL getServiceInstance instanceUuid field. (#3595) Support namespace in Nacos cluster/configuration (#3578) Instead of datasource-settings.properties, use application.yml for MySQLStorageProvider (#3564) Provide consul dynamic configuration center implementation (#3560) Upgrade guava version to support higher jdk version (#3541) Sync latest als from envoy api (#3507) Set telemetry instanced id for Etcd and Nacos plugin (#3492) Support timeout configuration in agent and backend. (#3491) Make sure the cluster register happens before streaming process. (#3471) Agent supports custom properties. (#3367) Miscellaneous bug fixes (#3567)  UI  Feature: node detail display in topo circle-chart view. BugFix: the jvm-maxheap \u0026amp; jvm-maxnonheap is -1, free is no value Fix bug: time select operation not in effect Fix bug: language initialization failed Fix bug: not show instance language Feature: support the trace list display export png Feature: Metrics comparison view BugFix: Fix dashboard top throughput copy  Java Agent  Spring async scenario optimize (#3723) Support log4j2 AsyncLogger (#3715) Add config to collect PostgreSQL sql query params (#3695) Support namespace in Nacos cluster/configuration (#3578) Provide plugin for ehcache 2.x (#3575) Supporting RequestRateLimiterGatewayFilterFactory (#3538) Kafka-plugin compatible with KafkaTemplate (#3505) Add pulsar apm plugin (#3476) Spring-cloud-gateway traceId does not transmit #3411 (#3446) Gateway compatible with downstream loss (#3445) Provide cassandra java driver 3.x plugin (#3410) Fix SpringMVC4 NoSuchMethodError (#3408) BugFix: endpoint grouping rules may be not unique (#3510) Add feature to control the maximum agent log files (#3475) Agent support custom properties. (#3367) Add Light4j plugin (#3323)  Document  Remove travis badge (#3763) Replace user wall to typical users in readme page (#3719) Update istio docs according latest istio release (#3646) Use chart deploy sw docs (#3573) Reorganize the doc, and provide catalog (#3563) Committer vote and set up document. (#3496) Update als setup doc as istio 1.3 released (#3470) Fill faq reply in official document. (#3450)  All issues and pull requests are here\n6.4.0 Project  Highly recommend to upgrade due to Pxx metrics calculation bug. Make agent working in JDK9+ Module system.  Java Agent  Make agent working in JDK9+ Module system. Support Kafka 2.x client libs. Log error in OKHTTP OnFailure callback. Support injecting traceid into logstack appender in logback. Add OperationName(including endpoint name) length max threshold. Support using Regex to group operation name. Support Undertow routing handler. RestTemplate plugin support operation name grouping. Fix ClassCastException in Webflux plugin. Ordering zookeeper server list, to make it better in topology. Fix a Dubbo plugin incompatible issue. Fix MySQL 5 plugin issue. Make log writer cached. Optimize Spring Cloud Gateway plugin Fix and improve gRPC reconnect mechanism. Remove Disruptor dependency from agent.  Backend  Fix Pxx(p50,p75,p90,p95,p99) metrics func bug.(Critical) Support Gateway in backend analysis, even when it doesn\u0026rsquo;t have suitable language agent. Support using HTTPs SSL accessing ElasticSearch storage. Support Zookeeper ACL. Make alarm records listed in order. Fix Pxx data persistence failure in some cases. Fix some bugs in MySQL storage. Setup slow SQL length threshold. Fix TTL settings is not working as expected. Remove scope-meta file.  UI  Enhance alarm page layout. Support trace tree chart resize. Support trace auto completion when partial traces abandoned somehow. Fix dashboard endpoint slow chart. Add radial chart in topology page. Add trace table mode. Fix topology page bug. Fix calender js bug. Fix \u0026ldquo;The \u0026ldquo;topo-services\u0026rdquo; component did not update the data in time after modifying the time range on the topology page.  Document  Restore the broken Istio setup doc. Add etcd config center document. Correct span_limit_per_segment default value in document. Enhance plugin develop doc. Fix error description in build document.  All issues and pull requests are here\n6.3.0 Project  e2e tests have been added, and verify every pull request. Use ArrayList to replace LinkedList in DataCarrier for much better performance. Add plugin instrumentation definition check in CI. DataCarrier performance improvement by avoiding false-sharing.  Java Agent  Java agent supports JDK 9 - 12, but don\u0026rsquo;t support Java Module yet. Support JVM class auto instrumentation, cataloged as bootstrap plugin. Support JVM HttpClient and HttpsClient plugin.[Optional] Support backend upgrade without rebooting required. Open Redefine and Retransform by other agents. Support Servlet 2.5 in Jetty, Tomcat and SpringMVC plugins. Support Spring @Async plugin. Add new config item to restrict the length of span#peer. Refactor ContextManager#stopSpan. Add gRPC timeout. Support Logback AsyncAppender print tid Fix gRPC reconnect bug. Fix trace segment service doesn\u0026rsquo;t report onComplete. Fix wrong logger class name. Fix gRPC plugin bug. Fix ContextManager.activeSpan() API usage error.  Backend  Support agent reset command downstream when the storage is erased, mostly because of backend upgrade. Backend stream flow refactor. High dimensionality metrics(Hour/Day/Month) are changed to lower priority, to ease the storage payload. Add OAP metrics cache to ease the storage query payload and improve performance. Remove DataCarrier in trace persistent of ElasticSearch storage, by leveraging the elasticsearch bulk queue. OAP internal communication protocol changed. Don\u0026rsquo;t be compatible with old releases. Improve ElasticSearch storage bulk performance. Support etcd as dynamic configuration center. Simplify the PxxMetrics and ThermodynamicMetrics functions for better performance and GC. Support JVM metrics self observability. Add the new OAL runtime engine. Add gRPC timeout. Add Charset in the alarm web hook. Fix buffer lost. Fix dirty read in ElasticSearch storage. Fix bug of cluster management plugins in un-Mixed mode. Fix wrong logger class name. Fix delete bug in ElasticSearch when using namespace. Fix MySQL TTL failure. Totally remove IDs can't be null log, to avoid misleading. Fix provider has been initialized repeatedly. Adjust providers conflict log message. Fix using wrong gc time metrics in OAL.  UI  Fix refresh is not working after endpoint and instance changed. Fix endpoint selector but. Fix wrong copy value in slow traces. Fix can\u0026rsquo;t show trace when it is broken partially(Because of agent sampling or fail safe). Fix database and response time graph bugs.  Document  Add bootstrap plugin development document. Alarm documentation typo fixed. Clarify the Docker file purpose. Fix a license typo.  All issues and pull requests are here\n6.2.0 Project  ElasticSearch implementation performance improved, and CHANGED totally. Must delete all existing indexes to do upgrade. CI and Integration tests provided by ASF INFRA. Plan to enhance tests including e2e, plugin tests in all pull requests, powered by ASF INFRA. DataCarrier queue write index controller performance improvement. 3-5 times quicker than before. Add windows compile support in CI.  Java Agent  Support collect SQL parameter in MySQL plugin.[Optional] Support SolrJ plugin. Support RESTEasy plugin. Support Spring Gateway plugin for 2.1.x[Optional] TracingContext performance improvement. Support Apache ShardingSphere(incubating) plugin. Support span#error in application toolkit. Fix OOM by empty stack of exception. FIx wrong cause exception of stack in span log. Fix unclear the running context in SpringMVC plugin. Fix CPU usage accessor calculation issue. Fix SpringMVC plugin span not stop bug when doing HTTP forward. Fix lettuce plugin async commend bug and NPE. Fix webflux plugin cast exception. [CI]Support import check.  Backend  Support time serious ElasticSearch storage. Provide dynamic configuration module and implementation. Slow SQL threshold supports dynamic config today. Dynamic Configuration module provide multiple implementations, DCS(gRPC based), Zookeeper, Apollo, Nacos. Provide P99/95/90/75/50 charts in topology edge. New topology query protocol and implementation. Support Envoy ALS in Service Mesh scenario. Support Nacos cluster management. Enhance metric exporter. Run in increment and total modes. Fix module provider is loaded repeatedly. Change TOP slow SQL storage in ES to Text from Keyword, as too long text issue. Fix H2TopologyQuery tiny bug. Fix H2 log query bug.(No feature provided yet) Filtering pods not in \u0026lsquo;Running\u0026rsquo; phase in mesh scenario. Fix query alarm bug in MySQL and H2 storage. Codes refactor.  UI  Fix some ID is null query(s). Page refactor, especially time-picker, more friendly. Login removed. Trace timestamp visualization issue fixed. Provide P99/95/90/75/50 charts in topology edge. Change all P99/95/90/75/50 charts style. More readable. Fix 404 in trace page.  Document  Go2Sky project has been donated to SkyAPM, change document link. Add FAQ for ElasticSearch storage, and links from document. Add FAQ fro WebSphere installation. Add several open users. Add alarm webhook document.  All issues and pull requests are here\n6.1.0 Project SkyWalking graduated as Apache Top Level Project.\n Support compiling project agent, backend, UI separately.  Java Agent  Support Vert.x Core 3.x plugin. Support Apache Dubbo plugin. Support use_qualified_name_as_endpoint_name and use_qualified_name_as_operation_name configs in SpringMVC plugin. Support span async close APIs in core. Used in Vert.x plugin. Support MySQL 5,8 plugins. Support set instance id manually(optional). Support customize enhance trace plugin in optional list. Support to set peer in Entry Span. Support Zookeeper plugin. Fix Webflux plugin created unexpected Entry Span. Fix Kafka plugin NPE in Kafka 1.1+ Fix wrong operation name in postgre 8.x plugin. Fix RabbitMQ plugin NPE. Fix agent can\u0026rsquo;t run in JVM 6/7, remove module-info.class. Fix agent can\u0026rsquo;t work well, if there is whitespace in agent path. Fix Spring annotation bug and inheritance enhance issue. Fix CPU accessor bug.  Backend Performance improved, especially in CPU limited environment. 3x improvement in service mesh scenario(no trace) in 8C16G VM. Significantly cost less CPU in low payload.\n Support database metrics and SLOW SQL detection. Support to set max size of metadata query. And change default to 5000 from 100. Support ElasticSearch template for new feature in the future. Support shutdown Zipkin trace analysis, because it doesn\u0026rsquo;t fit production environment. Support log type, scope HTTP_ACCESS_LOG and query. No feature provided, prepare for future versions. Support .NET clr receiver. Support Jaeger trace format, no analysis. Support group endpoint name by regax rules in mesh receiver. Support disable statement in OAL. Support basic auth in ElasticSearch connection. Support metrics exporter module and gRPC implementor. Support \u0026gt;, \u0026lt;, \u0026gt;=, \u0026lt;= in OAL. Support role mode in backend. Support Envoy metrics. Support query segment by service instance. Support to set host/port manually at cluster coordinator, rather than based on core settings. Make sure OAP shutdown when it faces startup error. Support set separated gRPC/Jetty ip:port for receiver, default still use core settings. Fix JVM receiver bug. Fix wrong dest service in mesh analysis. Fix search doesn\u0026rsquo;t work as expected. Refactor ScopeDeclaration annotation. Refactor register lock mechanism. Add SmartSql component for .NET Add integration tests for ElasticSearch client. Add test cases for exporter. Add test cases for queue consume.  UI  RocketBot UI has been accepted and bind in this release. Support CLR metrics.  Document  Documents updated, matching Top Level Project requirement. UI licenses updated, according to RocketBot UI IP clearance. User wall and powered-by list updated. CN documents removed, only consider to provide by volunteer out of Apache.  All issues and pull requests are here\n6.0.0-GA Java Agent  Support gson plugin(optional). Support canal plugin. Fix missing ojdbc component id. Fix dubbo plugin conflict. Fix OpenTracing tag match bug. Fix a missing check in ignore plugin.  Backend  Adjust service inventory entity, to add properties. Adjust service instance inventory entity, to add properties. Add nodeType to service inventory entity. Fix when operation name of local and exit spans in ref, the segment lost. Fix the index names don\u0026rsquo;t show right in logs. Fix wrong alarm text. Add test case for span limit mechanism. Add telemetry module and prometheus implementation, with grafana setting. A refactor for register API in storage module. Fix H2 and MySQL endpoint dependency map miss upstream side. Optimize the inventory register and refactor the implementation. Speed up the trace buffer read. Fix and removed unnecessary inventory register operations.  UI  Add new trace view. Add word-break to tag value.  Document  Add two startup modes document. Add PHP agent links. Add some cn documents. Update year to 2019 User wall updated. Fix a wrong description in how-to-build doc.  All issues and pull requests are here\n6.0.0-beta Protocol  Provide Trace Data Protocol v2 Provide SkyWalking Cross Process Propagation Headers Protocol v2.  Java Agent  Support Trace Data Protocol v2 Support SkyWalking Cross Process Propagation Headers Protocol v2. Support SkyWalking Cross Process Propagation Headers Protocol v1 running in compatible way. Need declare open explicitly. Support SpringMVC 5 Support webflux Support a new way to override agent.config by system env. Span tag can override by explicit way. Fix Spring Controller Inherit issue. Fix ElasticSearch plugin NPE. Fix agent classloader dead lock in certain situation. Fix agent log typo. Fix wrong component id in resettemplete plugin. Fix use transform ignore() in wrong way. Fix H2 query bug.  Backend  Support Trace Data Protocol v2. And Trace Data Protocol v1 is still supported. Support MySQL as storage. Support TiDB as storage. Support a new way to override application.yml by system env. Support service instance and endpoint alarm. Support namespace in istio receiver. Support service throughput(cpm), successful rate(sla), avg response time and p99/p95/p90/p75/p50 response time. Support backend trace sampling. Support Zipkin format again. Support init mode. Support namespace in Zookeeper cluster management. Support consul plugin in cluster module. OAL generate tool has been integrated into main repo, in the maven compile stage. Optimize trace paging query. Fix trace query don\u0026rsquo;t use fuzzy query in ElasticSearch storage. Fix alarm can\u0026rsquo;t be active in right way. Fix unnecessary condition in database and cache number query. Fix wrong namespace bug in ElasticSearch storage. Fix Remote clients selector error: / by zero . Fix segment TTL is not working.  UI  Support service throughput(cpm), successful rate(sla), avg response time and p99/p95/p90/p75/p50 response time. Fix TopN endpoint link doesn\u0026rsquo;t work right. Fix trace stack style. Fix CI.  Document  Add more agent setting documents. Add more contribution documents. Update user wall and powered-by page. Add RocketBot UI project link in document.  All issues and pull requests are here\n6.0.0-alpha SkyWalking 6 is totally new milestone for the project. At this point, we are not just a distributing tracing system with analysis and visualization capabilities. We are an Observability Analysis Platform(OAL).\nThe core and most important features in v6 are\n Support to collect telemetry data from different sources, such as multiple language agents and service mesh. Extensible stream analysis core. Make SQL and cache analysis available in core level, although haven\u0026rsquo;t provided in this release. Provide Observability Analysis Language(OAL) to make analysis metrics customization available. New GraphQL query protocol. Not binding with UI now. UI topology is better now. New alarm core provided. In alpha, only on service related metrics.  All issues and pull requests are here\n","excerpt":"6.6.0 Project  [IMPORTANT] Local span and exit span are not treated as endpoint detected at client …","ref":"/docs/main/next/en/changes/changes-6.x/","title":"6.6.0"},{"body":"6.6.0 Project  [IMPORTANT] Local span and exit span are not treated as endpoint detected at client and local. Only entry span is the endpoint. Reduce the load of register and memory cost.   Support MiniKube, Istio and SkyWalking on K8s deployment in CI. Support Windows and MacOS build in GitHub Action CI. Support ElasticSearch 7 in official dist. Hundreds plugin cases have been added in GitHub Action CI process.  Java Agent  Remove the local/exit span operation name register mechanism. Add plugin for JDK Threading classes. Add plugin for Armeria. Support set operation name in async span. Enhance webflux plugin, related to Spring Gateway plugin. Webflux plugin is in optional, due to JDK8 required. Fix a possible deadlock. Fix NPE when OAL scripts are different in different OAP nodes, mostly in upgrading stage. Fix bug about wrong peer in ES plugin. Fix NPE in Spring plugin. Fix wrong class name in Dubbo 2.7 conflict patch. Fix spring annotation inheritance problem.  OAP-Backend  Remove the local/exit span operation name register mechanism. Remove client side endpoint register in service mesh. Service instance dependency and related metrics. Support min func in OAL Support apdex func in OAL Support custom ES config setting at the index level. Envoy ALS proto upgraded. Update JODA lib as bugs in UTC +13/+14. Support topN sample period configurable. Ignore no statement DB operations in slow SQL collection. Fix bug in docker-entrypoint.sh when using MySQL as storage  UI  Service topology enhancement. Dive into service, instance and endpoint metrics on topo map. Service instance dependency view and related metrics. Support using URL parameter in trace query page. Support apdex score in service page. Add service dependency metrics into metrics comparison. Fix alarm search not working.  Document  Update user list and user wall. Add document link for CLI. Add deployment guide of agent in Jetty case. Modify Consul cluster doc. Add document about injecting traceId into the logback with logstack in JSON format. ElementUI license and dependency added.  All issues and pull requests are here\n6.5.0 Project  TTL E2E test (#3437) Test coverage is back in pull request check status (#3503) Plugin tests begin to be migrated into main repo, and is in process. (#3528, #3756, #3751, etc.) Switch to SkyWalking CI (exclusive) nodes (#3546) MySQL storage e2e test. (#3648) E2E tests are verified in multiple jdk versions, jdk 8, 9, 11, 12 (#3657) Jenkins build jobs run only when necessary (#3662)  OAP-Backend  Support dynamically configure alarm settings (#3557) Language of instance could be null (#3485) Make query max window size configurable. (#3765) Remove two max size 500 limit. (#3748) Parameterize the cache size. (#3741) ServiceInstanceRelation set error id (#3683) Makes the scope of alarm message more semantic. (#3680) Add register persistent worker latency metrics (#3677) Fix more reasonable error (#3619) Add GraphQL getServiceInstance instanceUuid field. (#3595) Support namespace in Nacos cluster/configuration (#3578) Instead of datasource-settings.properties, use application.yml for MySQLStorageProvider (#3564) Provide consul dynamic configuration center implementation (#3560) Upgrade guava version to support higher jdk version (#3541) Sync latest als from envoy api (#3507) Set telemetry instanced id for Etcd and Nacos plugin (#3492) Support timeout configuration in agent and backend. (#3491) Make sure the cluster register happens before streaming process. (#3471) Agent supports custom properties. (#3367) Miscellaneous bug fixes (#3567)  UI  Feature: node detail display in topo circle-chart view. BugFix: the jvm-maxheap \u0026amp; jvm-maxnonheap is -1, free is no value Fix bug: time select operation not in effect Fix bug: language initialization failed Fix bug: not show instance language Feature: support the trace list display export png Feature: Metrics comparison view BugFix: Fix dashboard top throughput copy  Java Agent  Spring async scenario optimize (#3723) Support log4j2 AsyncLogger (#3715) Add config to collect PostgreSQL sql query params (#3695) Support namespace in Nacos cluster/configuration (#3578) Provide plugin for ehcache 2.x (#3575) Supporting RequestRateLimiterGatewayFilterFactory (#3538) Kafka-plugin compatible with KafkaTemplate (#3505) Add pulsar apm plugin (#3476) Spring-cloud-gateway traceId does not transmit #3411 (#3446) Gateway compatible with downstream loss (#3445) Provide cassandra java driver 3.x plugin (#3410) Fix SpringMVC4 NoSuchMethodError (#3408) BugFix: endpoint grouping rules may be not unique (#3510) Add feature to control the maximum agent log files (#3475) Agent support custom properties. (#3367) Add Light4j plugin (#3323)  Document  Remove travis badge (#3763) Replace user wall to typical users in readme page (#3719) Update istio docs according latest istio release (#3646) Use chart deploy sw docs (#3573) Reorganize the doc, and provide catalog (#3563) Committer vote and set up document. (#3496) Update als setup doc as istio 1.3 released (#3470) Fill faq reply in official document. (#3450)  All issues and pull requests are here\n6.4.0 Project  Highly recommend to upgrade due to Pxx metrics calculation bug. Make agent working in JDK9+ Module system.  Java Agent  Make agent working in JDK9+ Module system. Support Kafka 2.x client libs. Log error in OKHTTP OnFailure callback. Support injecting traceid into logstack appender in logback. Add OperationName(including endpoint name) length max threshold. Support using Regex to group operation name. Support Undertow routing handler. RestTemplate plugin support operation name grouping. Fix ClassCastException in Webflux plugin. Ordering zookeeper server list, to make it better in topology. Fix a Dubbo plugin incompatible issue. Fix MySQL 5 plugin issue. Make log writer cached. Optimize Spring Cloud Gateway plugin Fix and improve gRPC reconnect mechanism. Remove Disruptor dependency from agent.  Backend  Fix Pxx(p50,p75,p90,p95,p99) metrics func bug.(Critical) Support Gateway in backend analysis, even when it doesn\u0026rsquo;t have suitable language agent. Support using HTTPs SSL accessing ElasticSearch storage. Support Zookeeper ACL. Make alarm records listed in order. Fix Pxx data persistence failure in some cases. Fix some bugs in MySQL storage. Setup slow SQL length threshold. Fix TTL settings is not working as expected. Remove scope-meta file.  UI  Enhance alarm page layout. Support trace tree chart resize. Support trace auto completion when partial traces abandoned somehow. Fix dashboard endpoint slow chart. Add radial chart in topology page. Add trace table mode. Fix topology page bug. Fix calender js bug. Fix \u0026ldquo;The \u0026ldquo;topo-services\u0026rdquo; component did not update the data in time after modifying the time range on the topology page.  Document  Restore the broken Istio setup doc. Add etcd config center document. Correct span_limit_per_segment default value in document. Enhance plugin develop doc. Fix error description in build document.  All issues and pull requests are here\n6.3.0 Project  e2e tests have been added, and verify every pull request. Use ArrayList to replace LinkedList in DataCarrier for much better performance. Add plugin instrumentation definition check in CI. DataCarrier performance improvement by avoiding false-sharing.  Java Agent  Java agent supports JDK 9 - 12, but don\u0026rsquo;t support Java Module yet. Support JVM class auto instrumentation, cataloged as bootstrap plugin. Support JVM HttpClient and HttpsClient plugin.[Optional] Support backend upgrade without rebooting required. Open Redefine and Retransform by other agents. Support Servlet 2.5 in Jetty, Tomcat and SpringMVC plugins. Support Spring @Async plugin. Add new config item to restrict the length of span#peer. Refactor ContextManager#stopSpan. Add gRPC timeout. Support Logback AsyncAppender print tid Fix gRPC reconnect bug. Fix trace segment service doesn\u0026rsquo;t report onComplete. Fix wrong logger class name. Fix gRPC plugin bug. Fix ContextManager.activeSpan() API usage error.  Backend  Support agent reset command downstream when the storage is erased, mostly because of backend upgrade. Backend stream flow refactor. High dimensionality metrics(Hour/Day/Month) are changed to lower priority, to ease the storage payload. Add OAP metrics cache to ease the storage query payload and improve performance. Remove DataCarrier in trace persistent of ElasticSearch storage, by leveraging the elasticsearch bulk queue. OAP internal communication protocol changed. Don\u0026rsquo;t be compatible with old releases. Improve ElasticSearch storage bulk performance. Support etcd as dynamic configuration center. Simplify the PxxMetrics and ThermodynamicMetrics functions for better performance and GC. Support JVM metrics self observability. Add the new OAL runtime engine. Add gRPC timeout. Add Charset in the alarm web hook. Fix buffer lost. Fix dirty read in ElasticSearch storage. Fix bug of cluster management plugins in un-Mixed mode. Fix wrong logger class name. Fix delete bug in ElasticSearch when using namespace. Fix MySQL TTL failure. Totally remove IDs can't be null log, to avoid misleading. Fix provider has been initialized repeatedly. Adjust providers conflict log message. Fix using wrong gc time metrics in OAL.  UI  Fix refresh is not working after endpoint and instance changed. Fix endpoint selector but. Fix wrong copy value in slow traces. Fix can\u0026rsquo;t show trace when it is broken partially(Because of agent sampling or fail safe). Fix database and response time graph bugs.  Document  Add bootstrap plugin development document. Alarm documentation typo fixed. Clarify the Docker file purpose. Fix a license typo.  All issues and pull requests are here\n6.2.0 Project  ElasticSearch implementation performance improved, and CHANGED totally. Must delete all existing indexes to do upgrade. CI and Integration tests provided by ASF INFRA. Plan to enhance tests including e2e, plugin tests in all pull requests, powered by ASF INFRA. DataCarrier queue write index controller performance improvement. 3-5 times quicker than before. Add windows compile support in CI.  Java Agent  Support collect SQL parameter in MySQL plugin.[Optional] Support SolrJ plugin. Support RESTEasy plugin. Support Spring Gateway plugin for 2.1.x[Optional] TracingContext performance improvement. Support Apache ShardingSphere(incubating) plugin. Support span#error in application toolkit. Fix OOM by empty stack of exception. FIx wrong cause exception of stack in span log. Fix unclear the running context in SpringMVC plugin. Fix CPU usage accessor calculation issue. Fix SpringMVC plugin span not stop bug when doing HTTP forward. Fix lettuce plugin async commend bug and NPE. Fix webflux plugin cast exception. [CI]Support import check.  Backend  Support time serious ElasticSearch storage. Provide dynamic configuration module and implementation. Slow SQL threshold supports dynamic config today. Dynamic Configuration module provide multiple implementations, DCS(gRPC based), Zookeeper, Apollo, Nacos. Provide P99/95/90/75/50 charts in topology edge. New topology query protocol and implementation. Support Envoy ALS in Service Mesh scenario. Support Nacos cluster management. Enhance metric exporter. Run in increment and total modes. Fix module provider is loaded repeatedly. Change TOP slow SQL storage in ES to Text from Keyword, as too long text issue. Fix H2TopologyQuery tiny bug. Fix H2 log query bug.(No feature provided yet) Filtering pods not in \u0026lsquo;Running\u0026rsquo; phase in mesh scenario. Fix query alarm bug in MySQL and H2 storage. Codes refactor.  UI  Fix some ID is null query(s). Page refactor, especially time-picker, more friendly. Login removed. Trace timestamp visualization issue fixed. Provide P99/95/90/75/50 charts in topology edge. Change all P99/95/90/75/50 charts style. More readable. Fix 404 in trace page.  Document  Go2Sky project has been donated to SkyAPM, change document link. Add FAQ for ElasticSearch storage, and links from document. Add FAQ fro WebSphere installation. Add several open users. Add alarm webhook document.  All issues and pull requests are here\n6.1.0 Project SkyWalking graduated as Apache Top Level Project.\n Support compiling project agent, backend, UI separately.  Java Agent  Support Vert.x Core 3.x plugin. Support Apache Dubbo plugin. Support use_qualified_name_as_endpoint_name and use_qualified_name_as_operation_name configs in SpringMVC plugin. Support span async close APIs in core. Used in Vert.x plugin. Support MySQL 5,8 plugins. Support set instance id manually(optional). Support customize enhance trace plugin in optional list. Support to set peer in Entry Span. Support Zookeeper plugin. Fix Webflux plugin created unexpected Entry Span. Fix Kafka plugin NPE in Kafka 1.1+ Fix wrong operation name in postgre 8.x plugin. Fix RabbitMQ plugin NPE. Fix agent can\u0026rsquo;t run in JVM 6/7, remove module-info.class. Fix agent can\u0026rsquo;t work well, if there is whitespace in agent path. Fix Spring annotation bug and inheritance enhance issue. Fix CPU accessor bug.  Backend Performance improved, especially in CPU limited environment. 3x improvement in service mesh scenario(no trace) in 8C16G VM. Significantly cost less CPU in low payload.\n Support database metrics and SLOW SQL detection. Support to set max size of metadata query. And change default to 5000 from 100. Support ElasticSearch template for new feature in the future. Support shutdown Zipkin trace analysis, because it doesn\u0026rsquo;t fit production environment. Support log type, scope HTTP_ACCESS_LOG and query. No feature provided, prepare for future versions. Support .NET clr receiver. Support Jaeger trace format, no analysis. Support group endpoint name by regax rules in mesh receiver. Support disable statement in OAL. Support basic auth in ElasticSearch connection. Support metrics exporter module and gRPC implementor. Support \u0026gt;, \u0026lt;, \u0026gt;=, \u0026lt;= in OAL. Support role mode in backend. Support Envoy metrics. Support query segment by service instance. Support to set host/port manually at cluster coordinator, rather than based on core settings. Make sure OAP shutdown when it faces startup error. Support set separated gRPC/Jetty ip:port for receiver, default still use core settings. Fix JVM receiver bug. Fix wrong dest service in mesh analysis. Fix search doesn\u0026rsquo;t work as expected. Refactor ScopeDeclaration annotation. Refactor register lock mechanism. Add SmartSql component for .NET Add integration tests for ElasticSearch client. Add test cases for exporter. Add test cases for queue consume.  UI  RocketBot UI has been accepted and bind in this release. Support CLR metrics.  Document  Documents updated, matching Top Level Project requirement. UI licenses updated, according to RocketBot UI IP clearance. User wall and powered-by list updated. CN documents removed, only consider to provide by volunteer out of Apache.  All issues and pull requests are here\n6.0.0-GA Java Agent  Support gson plugin(optional). Support canal plugin. Fix missing ojdbc component id. Fix dubbo plugin conflict. Fix OpenTracing tag match bug. Fix a missing check in ignore plugin.  Backend  Adjust service inventory entity, to add properties. Adjust service instance inventory entity, to add properties. Add nodeType to service inventory entity. Fix when operation name of local and exit spans in ref, the segment lost. Fix the index names don\u0026rsquo;t show right in logs. Fix wrong alarm text. Add test case for span limit mechanism. Add telemetry module and prometheus implementation, with grafana setting. A refactor for register API in storage module. Fix H2 and MySQL endpoint dependency map miss upstream side. Optimize the inventory register and refactor the implementation. Speed up the trace buffer read. Fix and removed unnecessary inventory register operations.  UI  Add new trace view. Add word-break to tag value.  Document  Add two startup modes document. Add PHP agent links. Add some cn documents. Update year to 2019 User wall updated. Fix a wrong description in how-to-build doc.  All issues and pull requests are here\n6.0.0-beta Protocol  Provide Trace Data Protocol v2 Provide SkyWalking Cross Process Propagation Headers Protocol v2.  Java Agent  Support Trace Data Protocol v2 Support SkyWalking Cross Process Propagation Headers Protocol v2. Support SkyWalking Cross Process Propagation Headers Protocol v1 running in compatible way. Need declare open explicitly. Support SpringMVC 5 Support webflux Support a new way to override agent.config by system env. Span tag can override by explicit way. Fix Spring Controller Inherit issue. Fix ElasticSearch plugin NPE. Fix agent classloader dead lock in certain situation. Fix agent log typo. Fix wrong component id in resettemplete plugin. Fix use transform ignore() in wrong way. Fix H2 query bug.  Backend  Support Trace Data Protocol v2. And Trace Data Protocol v1 is still supported. Support MySQL as storage. Support TiDB as storage. Support a new way to override application.yml by system env. Support service instance and endpoint alarm. Support namespace in istio receiver. Support service throughput(cpm), successful rate(sla), avg response time and p99/p95/p90/p75/p50 response time. Support backend trace sampling. Support Zipkin format again. Support init mode. Support namespace in Zookeeper cluster management. Support consul plugin in cluster module. OAL generate tool has been integrated into main repo, in the maven compile stage. Optimize trace paging query. Fix trace query don\u0026rsquo;t use fuzzy query in ElasticSearch storage. Fix alarm can\u0026rsquo;t be active in right way. Fix unnecessary condition in database and cache number query. Fix wrong namespace bug in ElasticSearch storage. Fix Remote clients selector error: / by zero . Fix segment TTL is not working.  UI  Support service throughput(cpm), successful rate(sla), avg response time and p99/p95/p90/p75/p50 response time. Fix TopN endpoint link doesn\u0026rsquo;t work right. Fix trace stack style. Fix CI.  Document  Add more agent setting documents. Add more contribution documents. Update user wall and powered-by page. Add RocketBot UI project link in document.  All issues and pull requests are here\n6.0.0-alpha SkyWalking 6 is totally new milestone for the project. At this point, we are not just a distributing tracing system with analysis and visualization capabilities. We are an Observability Analysis Platform(OAL).\nThe core and most important features in v6 are\n Support to collect telemetry data from different sources, such as multiple language agents and service mesh. Extensible stream analysis core. Make SQL and cache analysis available in core level, although haven\u0026rsquo;t provided in this release. Provide Observability Analysis Language(OAL) to make analysis metrics customization available. New GraphQL query protocol. Not binding with UI now. UI topology is better now. New alarm core provided. In alpha, only on service related metrics.  All issues and pull requests are here\n","excerpt":"6.6.0 Project  [IMPORTANT] Local span and exit span are not treated as endpoint detected at client …","ref":"/docs/main/v9.1.0/en/changes/changes-6.x/","title":"6.6.0"},{"body":"6.6.0 Project  [IMPORTANT] Local span and exit span are not treated as endpoint detected at client and local. Only entry span is the endpoint. Reduce the load of register and memory cost.   Support MiniKube, Istio and SkyWalking on K8s deployment in CI. Support Windows and MacOS build in GitHub Action CI. Support ElasticSearch 7 in official dist. Hundreds plugin cases have been added in GitHub Action CI process.  Java Agent  Remove the local/exit span operation name register mechanism. Add plugin for JDK Threading classes. Add plugin for Armeria. Support set operation name in async span. Enhance webflux plugin, related to Spring Gateway plugin. Webflux plugin is in optional, due to JDK8 required. Fix a possible deadlock. Fix NPE when OAL scripts are different in different OAP nodes, mostly in upgrading stage. Fix bug about wrong peer in ES plugin. Fix NPE in Spring plugin. Fix wrong class name in Dubbo 2.7 conflict patch. Fix spring annotation inheritance problem.  OAP-Backend  Remove the local/exit span operation name register mechanism. Remove client side endpoint register in service mesh. Service instance dependency and related metrics. Support min func in OAL Support apdex func in OAL Support custom ES config setting at the index level. Envoy ALS proto upgraded. Update JODA lib as bugs in UTC +13/+14. Support topN sample period configurable. Ignore no statement DB operations in slow SQL collection. Fix bug in docker-entrypoint.sh when using MySQL as storage  UI  Service topology enhancement. Dive into service, instance and endpoint metrics on topo map. Service instance dependency view and related metrics. Support using URL parameter in trace query page. Support apdex score in service page. Add service dependency metrics into metrics comparison. Fix alarm search not working.  Document  Update user list and user wall. Add document link for CLI. Add deployment guide of agent in Jetty case. Modify Consul cluster doc. Add document about injecting traceId into the logback with logstack in JSON format. ElementUI license and dependency added.  All issues and pull requests are here\n6.5.0 Project  TTL E2E test (#3437) Test coverage is back in pull request check status (#3503) Plugin tests begin to be migrated into main repo, and is in process. (#3528, #3756, #3751, etc.) Switch to SkyWalking CI (exclusive) nodes (#3546) MySQL storage e2e test. (#3648) E2E tests are verified in multiple jdk versions, jdk 8, 9, 11, 12 (#3657) Jenkins build jobs run only when necessary (#3662)  OAP-Backend  Support dynamically configure alarm settings (#3557) Language of instance could be null (#3485) Make query max window size configurable. (#3765) Remove two max size 500 limit. (#3748) Parameterize the cache size. (#3741) ServiceInstanceRelation set error id (#3683) Makes the scope of alarm message more semantic. (#3680) Add register persistent worker latency metrics (#3677) Fix more reasonable error (#3619) Add GraphQL getServiceInstance instanceUuid field. (#3595) Support namespace in Nacos cluster/configuration (#3578) Instead of datasource-settings.properties, use application.yml for MySQLStorageProvider (#3564) Provide consul dynamic configuration center implementation (#3560) Upgrade guava version to support higher jdk version (#3541) Sync latest als from envoy api (#3507) Set telemetry instanced id for Etcd and Nacos plugin (#3492) Support timeout configuration in agent and backend. (#3491) Make sure the cluster register happens before streaming process. (#3471) Agent supports custom properties. (#3367) Miscellaneous bug fixes (#3567)  UI  Feature: node detail display in topo circle-chart view. BugFix: the jvm-maxheap \u0026amp; jvm-maxnonheap is -1, free is no value Fix bug: time select operation not in effect Fix bug: language initialization failed Fix bug: not show instance language Feature: support the trace list display export png Feature: Metrics comparison view BugFix: Fix dashboard top throughput copy  Java Agent  Spring async scenario optimize (#3723) Support log4j2 AsyncLogger (#3715) Add config to collect PostgreSQL sql query params (#3695) Support namespace in Nacos cluster/configuration (#3578) Provide plugin for ehcache 2.x (#3575) Supporting RequestRateLimiterGatewayFilterFactory (#3538) Kafka-plugin compatible with KafkaTemplate (#3505) Add pulsar apm plugin (#3476) Spring-cloud-gateway traceId does not transmit #3411 (#3446) Gateway compatible with downstream loss (#3445) Provide cassandra java driver 3.x plugin (#3410) Fix SpringMVC4 NoSuchMethodError (#3408) BugFix: endpoint grouping rules may be not unique (#3510) Add feature to control the maximum agent log files (#3475) Agent support custom properties. (#3367) Add Light4j plugin (#3323)  Document  Remove travis badge (#3763) Replace user wall to typical users in readme page (#3719) Update istio docs according latest istio release (#3646) Use chart deploy sw docs (#3573) Reorganize the doc, and provide catalog (#3563) Committer vote and set up document. (#3496) Update als setup doc as istio 1.3 released (#3470) Fill faq reply in official document. (#3450)  All issues and pull requests are here\n6.4.0 Project  Highly recommend to upgrade due to Pxx metrics calculation bug. Make agent working in JDK9+ Module system.  Java Agent  Make agent working in JDK9+ Module system. Support Kafka 2.x client libs. Log error in OKHTTP OnFailure callback. Support injecting traceid into logstack appender in logback. Add OperationName(including endpoint name) length max threshold. Support using Regex to group operation name. Support Undertow routing handler. RestTemplate plugin support operation name grouping. Fix ClassCastException in Webflux plugin. Ordering zookeeper server list, to make it better in topology. Fix a Dubbo plugin incompatible issue. Fix MySQL 5 plugin issue. Make log writer cached. Optimize Spring Cloud Gateway plugin Fix and improve gRPC reconnect mechanism. Remove Disruptor dependency from agent.  Backend  Fix Pxx(p50,p75,p90,p95,p99) metrics func bug.(Critical) Support Gateway in backend analysis, even when it doesn\u0026rsquo;t have suitable language agent. Support using HTTPs SSL accessing ElasticSearch storage. Support Zookeeper ACL. Make alarm records listed in order. Fix Pxx data persistence failure in some cases. Fix some bugs in MySQL storage. Setup slow SQL length threshold. Fix TTL settings is not working as expected. Remove scope-meta file.  UI  Enhance alarm page layout. Support trace tree chart resize. Support trace auto completion when partial traces abandoned somehow. Fix dashboard endpoint slow chart. Add radial chart in topology page. Add trace table mode. Fix topology page bug. Fix calender js bug. Fix \u0026ldquo;The \u0026ldquo;topo-services\u0026rdquo; component did not update the data in time after modifying the time range on the topology page.  Document  Restore the broken Istio setup doc. Add etcd config center document. Correct span_limit_per_segment default value in document. Enhance plugin develop doc. Fix error description in build document.  All issues and pull requests are here\n6.3.0 Project  e2e tests have been added, and verify every pull request. Use ArrayList to replace LinkedList in DataCarrier for much better performance. Add plugin instrumentation definition check in CI. DataCarrier performance improvement by avoiding false-sharing.  Java Agent  Java agent supports JDK 9 - 12, but don\u0026rsquo;t support Java Module yet. Support JVM class auto instrumentation, cataloged as bootstrap plugin. Support JVM HttpClient and HttpsClient plugin.[Optional] Support backend upgrade without rebooting required. Open Redefine and Retransform by other agents. Support Servlet 2.5 in Jetty, Tomcat and SpringMVC plugins. Support Spring @Async plugin. Add new config item to restrict the length of span#peer. Refactor ContextManager#stopSpan. Add gRPC timeout. Support Logback AsyncAppender print tid Fix gRPC reconnect bug. Fix trace segment service doesn\u0026rsquo;t report onComplete. Fix wrong logger class name. Fix gRPC plugin bug. Fix ContextManager.activeSpan() API usage error.  Backend  Support agent reset command downstream when the storage is erased, mostly because of backend upgrade. Backend stream flow refactor. High dimensionality metrics(Hour/Day/Month) are changed to lower priority, to ease the storage payload. Add OAP metrics cache to ease the storage query payload and improve performance. Remove DataCarrier in trace persistent of ElasticSearch storage, by leveraging the elasticsearch bulk queue. OAP internal communication protocol changed. Don\u0026rsquo;t be compatible with old releases. Improve ElasticSearch storage bulk performance. Support etcd as dynamic configuration center. Simplify the PxxMetrics and ThermodynamicMetrics functions for better performance and GC. Support JVM metrics self observability. Add the new OAL runtime engine. Add gRPC timeout. Add Charset in the alarm web hook. Fix buffer lost. Fix dirty read in ElasticSearch storage. Fix bug of cluster management plugins in un-Mixed mode. Fix wrong logger class name. Fix delete bug in ElasticSearch when using namespace. Fix MySQL TTL failure. Totally remove IDs can't be null log, to avoid misleading. Fix provider has been initialized repeatedly. Adjust providers conflict log message. Fix using wrong gc time metrics in OAL.  UI  Fix refresh is not working after endpoint and instance changed. Fix endpoint selector but. Fix wrong copy value in slow traces. Fix can\u0026rsquo;t show trace when it is broken partially(Because of agent sampling or fail safe). Fix database and response time graph bugs.  Document  Add bootstrap plugin development document. Alarm documentation typo fixed. Clarify the Docker file purpose. Fix a license typo.  All issues and pull requests are here\n6.2.0 Project  ElasticSearch implementation performance improved, and CHANGED totally. Must delete all existing indexes to do upgrade. CI and Integration tests provided by ASF INFRA. Plan to enhance tests including e2e, plugin tests in all pull requests, powered by ASF INFRA. DataCarrier queue write index controller performance improvement. 3-5 times quicker than before. Add windows compile support in CI.  Java Agent  Support collect SQL parameter in MySQL plugin.[Optional] Support SolrJ plugin. Support RESTEasy plugin. Support Spring Gateway plugin for 2.1.x[Optional] TracingContext performance improvement. Support Apache ShardingSphere(incubating) plugin. Support span#error in application toolkit. Fix OOM by empty stack of exception. FIx wrong cause exception of stack in span log. Fix unclear the running context in SpringMVC plugin. Fix CPU usage accessor calculation issue. Fix SpringMVC plugin span not stop bug when doing HTTP forward. Fix lettuce plugin async commend bug and NPE. Fix webflux plugin cast exception. [CI]Support import check.  Backend  Support time serious ElasticSearch storage. Provide dynamic configuration module and implementation. Slow SQL threshold supports dynamic config today. Dynamic Configuration module provide multiple implementations, DCS(gRPC based), Zookeeper, Apollo, Nacos. Provide P99/95/90/75/50 charts in topology edge. New topology query protocol and implementation. Support Envoy ALS in Service Mesh scenario. Support Nacos cluster management. Enhance metric exporter. Run in increment and total modes. Fix module provider is loaded repeatedly. Change TOP slow SQL storage in ES to Text from Keyword, as too long text issue. Fix H2TopologyQuery tiny bug. Fix H2 log query bug.(No feature provided yet) Filtering pods not in \u0026lsquo;Running\u0026rsquo; phase in mesh scenario. Fix query alarm bug in MySQL and H2 storage. Codes refactor.  UI  Fix some ID is null query(s). Page refactor, especially time-picker, more friendly. Login removed. Trace timestamp visualization issue fixed. Provide P99/95/90/75/50 charts in topology edge. Change all P99/95/90/75/50 charts style. More readable. Fix 404 in trace page.  Document  Go2Sky project has been donated to SkyAPM, change document link. Add FAQ for ElasticSearch storage, and links from document. Add FAQ fro WebSphere installation. Add several open users. Add alarm webhook document.  All issues and pull requests are here\n6.1.0 Project SkyWalking graduated as Apache Top Level Project.\n Support compiling project agent, backend, UI separately.  Java Agent  Support Vert.x Core 3.x plugin. Support Apache Dubbo plugin. Support use_qualified_name_as_endpoint_name and use_qualified_name_as_operation_name configs in SpringMVC plugin. Support span async close APIs in core. Used in Vert.x plugin. Support MySQL 5,8 plugins. Support set instance id manually(optional). Support customize enhance trace plugin in optional list. Support to set peer in Entry Span. Support Zookeeper plugin. Fix Webflux plugin created unexpected Entry Span. Fix Kafka plugin NPE in Kafka 1.1+ Fix wrong operation name in postgre 8.x plugin. Fix RabbitMQ plugin NPE. Fix agent can\u0026rsquo;t run in JVM 6/7, remove module-info.class. Fix agent can\u0026rsquo;t work well, if there is whitespace in agent path. Fix Spring annotation bug and inheritance enhance issue. Fix CPU accessor bug.  Backend Performance improved, especially in CPU limited environment. 3x improvement in service mesh scenario(no trace) in 8C16G VM. Significantly cost less CPU in low payload.\n Support database metrics and SLOW SQL detection. Support to set max size of metadata query. And change default to 5000 from 100. Support ElasticSearch template for new feature in the future. Support shutdown Zipkin trace analysis, because it doesn\u0026rsquo;t fit production environment. Support log type, scope HTTP_ACCESS_LOG and query. No feature provided, prepare for future versions. Support .NET clr receiver. Support Jaeger trace format, no analysis. Support group endpoint name by regax rules in mesh receiver. Support disable statement in OAL. Support basic auth in ElasticSearch connection. Support metrics exporter module and gRPC implementor. Support \u0026gt;, \u0026lt;, \u0026gt;=, \u0026lt;= in OAL. Support role mode in backend. Support Envoy metrics. Support query segment by service instance. Support to set host/port manually at cluster coordinator, rather than based on core settings. Make sure OAP shutdown when it faces startup error. Support set separated gRPC/Jetty ip:port for receiver, default still use core settings. Fix JVM receiver bug. Fix wrong dest service in mesh analysis. Fix search doesn\u0026rsquo;t work as expected. Refactor ScopeDeclaration annotation. Refactor register lock mechanism. Add SmartSql component for .NET Add integration tests for ElasticSearch client. Add test cases for exporter. Add test cases for queue consume.  UI  RocketBot UI has been accepted and bind in this release. Support CLR metrics.  Document  Documents updated, matching Top Level Project requirement. UI licenses updated, according to RocketBot UI IP clearance. User wall and powered-by list updated. CN documents removed, only consider to provide by volunteer out of Apache.  All issues and pull requests are here\n6.0.0-GA Java Agent  Support gson plugin(optional). Support canal plugin. Fix missing ojdbc component id. Fix dubbo plugin conflict. Fix OpenTracing tag match bug. Fix a missing check in ignore plugin.  Backend  Adjust service inventory entity, to add properties. Adjust service instance inventory entity, to add properties. Add nodeType to service inventory entity. Fix when operation name of local and exit spans in ref, the segment lost. Fix the index names don\u0026rsquo;t show right in logs. Fix wrong alarm text. Add test case for span limit mechanism. Add telemetry module and prometheus implementation, with grafana setting. A refactor for register API in storage module. Fix H2 and MySQL endpoint dependency map miss upstream side. Optimize the inventory register and refactor the implementation. Speed up the trace buffer read. Fix and removed unnecessary inventory register operations.  UI  Add new trace view. Add word-break to tag value.  Document  Add two startup modes document. Add PHP agent links. Add some cn documents. Update year to 2019 User wall updated. Fix a wrong description in how-to-build doc.  All issues and pull requests are here\n6.0.0-beta Protocol  Provide Trace Data Protocol v2 Provide SkyWalking Cross Process Propagation Headers Protocol v2.  Java Agent  Support Trace Data Protocol v2 Support SkyWalking Cross Process Propagation Headers Protocol v2. Support SkyWalking Cross Process Propagation Headers Protocol v1 running in compatible way. Need declare open explicitly. Support SpringMVC 5 Support webflux Support a new way to override agent.config by system env. Span tag can override by explicit way. Fix Spring Controller Inherit issue. Fix ElasticSearch plugin NPE. Fix agent classloader dead lock in certain situation. Fix agent log typo. Fix wrong component id in resettemplete plugin. Fix use transform ignore() in wrong way. Fix H2 query bug.  Backend  Support Trace Data Protocol v2. And Trace Data Protocol v1 is still supported. Support MySQL as storage. Support TiDB as storage. Support a new way to override application.yml by system env. Support service instance and endpoint alarm. Support namespace in istio receiver. Support service throughput(cpm), successful rate(sla), avg response time and p99/p95/p90/p75/p50 response time. Support backend trace sampling. Support Zipkin format again. Support init mode. Support namespace in Zookeeper cluster management. Support consul plugin in cluster module. OAL generate tool has been integrated into main repo, in the maven compile stage. Optimize trace paging query. Fix trace query don\u0026rsquo;t use fuzzy query in ElasticSearch storage. Fix alarm can\u0026rsquo;t be active in right way. Fix unnecessary condition in database and cache number query. Fix wrong namespace bug in ElasticSearch storage. Fix Remote clients selector error: / by zero . Fix segment TTL is not working.  UI  Support service throughput(cpm), successful rate(sla), avg response time and p99/p95/p90/p75/p50 response time. Fix TopN endpoint link doesn\u0026rsquo;t work right. Fix trace stack style. Fix CI.  Document  Add more agent setting documents. Add more contribution documents. Update user wall and powered-by page. Add RocketBot UI project link in document.  All issues and pull requests are here\n6.0.0-alpha SkyWalking 6 is totally new milestone for the project. At this point, we are not just a distributing tracing system with analysis and visualization capabilities. We are an Observability Analysis Platform(OAL).\nThe core and most important features in v6 are\n Support to collect telemetry data from different sources, such as multiple language agents and service mesh. Extensible stream analysis core. Make SQL and cache analysis available in core level, although haven\u0026rsquo;t provided in this release. Provide Observability Analysis Language(OAL) to make analysis metrics customization available. New GraphQL query protocol. Not binding with UI now. UI topology is better now. New alarm core provided. In alpha, only on service related metrics.  All issues and pull requests are here\n","excerpt":"6.6.0 Project  [IMPORTANT] Local span and exit span are not treated as endpoint detected at client …","ref":"/docs/main/v9.2.0/en/changes/changes-6.x/","title":"6.6.0"},{"body":"6.6.0 Project  [IMPORTANT] Local span and exit span are not treated as endpoint detected at client and local. Only entry span is the endpoint. Reduce the load of register and memory cost.   Support MiniKube, Istio and SkyWalking on K8s deployment in CI. Support Windows and MacOS build in GitHub Action CI. Support ElasticSearch 7 in official dist. Hundreds plugin cases have been added in GitHub Action CI process.  Java Agent  Remove the local/exit span operation name register mechanism. Add plugin for JDK Threading classes. Add plugin for Armeria. Support set operation name in async span. Enhance webflux plugin, related to Spring Gateway plugin. Webflux plugin is in optional, due to JDK8 required. Fix a possible deadlock. Fix NPE when OAL scripts are different in different OAP nodes, mostly in upgrading stage. Fix bug about wrong peer in ES plugin. Fix NPE in Spring plugin. Fix wrong class name in Dubbo 2.7 conflict patch. Fix spring annotation inheritance problem.  OAP-Backend  Remove the local/exit span operation name register mechanism. Remove client side endpoint register in service mesh. Service instance dependency and related metrics. Support min func in OAL Support apdex func in OAL Support custom ES config setting at the index level. Envoy ALS proto upgraded. Update JODA lib as bugs in UTC +13/+14. Support topN sample period configurable. Ignore no statement DB operations in slow SQL collection. Fix bug in docker-entrypoint.sh when using MySQL as storage  UI  Service topology enhancement. Dive into service, instance and endpoint metrics on topo map. Service instance dependency view and related metrics. Support using URL parameter in trace query page. Support apdex score in service page. Add service dependency metrics into metrics comparison. Fix alarm search not working.  Document  Update user list and user wall. Add document link for CLI. Add deployment guide of agent in Jetty case. Modify Consul cluster doc. Add document about injecting traceId into the logback with logstack in JSON format. ElementUI license and dependency added.  All issues and pull requests are here\n6.5.0 Project  TTL E2E test (#3437) Test coverage is back in pull request check status (#3503) Plugin tests begin to be migrated into main repo, and is in process. (#3528, #3756, #3751, etc.) Switch to SkyWalking CI (exclusive) nodes (#3546) MySQL storage e2e test. (#3648) E2E tests are verified in multiple jdk versions, jdk 8, 9, 11, 12 (#3657) Jenkins build jobs run only when necessary (#3662)  OAP-Backend  Support dynamically configure alarm settings (#3557) Language of instance could be null (#3485) Make query max window size configurable. (#3765) Remove two max size 500 limit. (#3748) Parameterize the cache size. (#3741) ServiceInstanceRelation set error id (#3683) Makes the scope of alarm message more semantic. (#3680) Add register persistent worker latency metrics (#3677) Fix more reasonable error (#3619) Add GraphQL getServiceInstance instanceUuid field. (#3595) Support namespace in Nacos cluster/configuration (#3578) Instead of datasource-settings.properties, use application.yml for MySQLStorageProvider (#3564) Provide consul dynamic configuration center implementation (#3560) Upgrade guava version to support higher jdk version (#3541) Sync latest als from envoy api (#3507) Set telemetry instanced id for Etcd and Nacos plugin (#3492) Support timeout configuration in agent and backend. (#3491) Make sure the cluster register happens before streaming process. (#3471) Agent supports custom properties. (#3367) Miscellaneous bug fixes (#3567)  UI  Feature: node detail display in topo circle-chart view. BugFix: the jvm-maxheap \u0026amp; jvm-maxnonheap is -1, free is no value Fix bug: time select operation not in effect Fix bug: language initialization failed Fix bug: not show instance language Feature: support the trace list display export png Feature: Metrics comparison view BugFix: Fix dashboard top throughput copy  Java Agent  Spring async scenario optimize (#3723) Support log4j2 AsyncLogger (#3715) Add config to collect PostgreSQL sql query params (#3695) Support namespace in Nacos cluster/configuration (#3578) Provide plugin for ehcache 2.x (#3575) Supporting RequestRateLimiterGatewayFilterFactory (#3538) Kafka-plugin compatible with KafkaTemplate (#3505) Add pulsar apm plugin (#3476) Spring-cloud-gateway traceId does not transmit #3411 (#3446) Gateway compatible with downstream loss (#3445) Provide cassandra java driver 3.x plugin (#3410) Fix SpringMVC4 NoSuchMethodError (#3408) BugFix: endpoint grouping rules may be not unique (#3510) Add feature to control the maximum agent log files (#3475) Agent support custom properties. (#3367) Add Light4j plugin (#3323)  Document  Remove travis badge (#3763) Replace user wall to typical users in readme page (#3719) Update istio docs according latest istio release (#3646) Use chart deploy sw docs (#3573) Reorganize the doc, and provide catalog (#3563) Committer vote and set up document. (#3496) Update als setup doc as istio 1.3 released (#3470) Fill faq reply in official document. (#3450)  All issues and pull requests are here\n6.4.0 Project  Highly recommend to upgrade due to Pxx metrics calculation bug. Make agent working in JDK9+ Module system.  Java Agent  Make agent working in JDK9+ Module system. Support Kafka 2.x client libs. Log error in OKHTTP OnFailure callback. Support injecting traceid into logstack appender in logback. Add OperationName(including endpoint name) length max threshold. Support using Regex to group operation name. Support Undertow routing handler. RestTemplate plugin support operation name grouping. Fix ClassCastException in Webflux plugin. Ordering zookeeper server list, to make it better in topology. Fix a Dubbo plugin incompatible issue. Fix MySQL 5 plugin issue. Make log writer cached. Optimize Spring Cloud Gateway plugin Fix and improve gRPC reconnect mechanism. Remove Disruptor dependency from agent.  Backend  Fix Pxx(p50,p75,p90,p95,p99) metrics func bug.(Critical) Support Gateway in backend analysis, even when it doesn\u0026rsquo;t have suitable language agent. Support using HTTPs SSL accessing ElasticSearch storage. Support Zookeeper ACL. Make alarm records listed in order. Fix Pxx data persistence failure in some cases. Fix some bugs in MySQL storage. Setup slow SQL length threshold. Fix TTL settings is not working as expected. Remove scope-meta file.  UI  Enhance alarm page layout. Support trace tree chart resize. Support trace auto completion when partial traces abandoned somehow. Fix dashboard endpoint slow chart. Add radial chart in topology page. Add trace table mode. Fix topology page bug. Fix calender js bug. Fix \u0026ldquo;The \u0026ldquo;topo-services\u0026rdquo; component did not update the data in time after modifying the time range on the topology page.  Document  Restore the broken Istio setup doc. Add etcd config center document. Correct span_limit_per_segment default value in document. Enhance plugin develop doc. Fix error description in build document.  All issues and pull requests are here\n6.3.0 Project  e2e tests have been added, and verify every pull request. Use ArrayList to replace LinkedList in DataCarrier for much better performance. Add plugin instrumentation definition check in CI. DataCarrier performance improvement by avoiding false-sharing.  Java Agent  Java agent supports JDK 9 - 12, but don\u0026rsquo;t support Java Module yet. Support JVM class auto instrumentation, cataloged as bootstrap plugin. Support JVM HttpClient and HttpsClient plugin.[Optional] Support backend upgrade without rebooting required. Open Redefine and Retransform by other agents. Support Servlet 2.5 in Jetty, Tomcat and SpringMVC plugins. Support Spring @Async plugin. Add new config item to restrict the length of span#peer. Refactor ContextManager#stopSpan. Add gRPC timeout. Support Logback AsyncAppender print tid Fix gRPC reconnect bug. Fix trace segment service doesn\u0026rsquo;t report onComplete. Fix wrong logger class name. Fix gRPC plugin bug. Fix ContextManager.activeSpan() API usage error.  Backend  Support agent reset command downstream when the storage is erased, mostly because of backend upgrade. Backend stream flow refactor. High dimensionality metrics(Hour/Day/Month) are changed to lower priority, to ease the storage payload. Add OAP metrics cache to ease the storage query payload and improve performance. Remove DataCarrier in trace persistent of ElasticSearch storage, by leveraging the elasticsearch bulk queue. OAP internal communication protocol changed. Don\u0026rsquo;t be compatible with old releases. Improve ElasticSearch storage bulk performance. Support etcd as dynamic configuration center. Simplify the PxxMetrics and ThermodynamicMetrics functions for better performance and GC. Support JVM metrics self observability. Add the new OAL runtime engine. Add gRPC timeout. Add Charset in the alarm web hook. Fix buffer lost. Fix dirty read in ElasticSearch storage. Fix bug of cluster management plugins in un-Mixed mode. Fix wrong logger class name. Fix delete bug in ElasticSearch when using namespace. Fix MySQL TTL failure. Totally remove IDs can't be null log, to avoid misleading. Fix provider has been initialized repeatedly. Adjust providers conflict log message. Fix using wrong gc time metrics in OAL.  UI  Fix refresh is not working after endpoint and instance changed. Fix endpoint selector but. Fix wrong copy value in slow traces. Fix can\u0026rsquo;t show trace when it is broken partially(Because of agent sampling or fail safe). Fix database and response time graph bugs.  Document  Add bootstrap plugin development document. Alarm documentation typo fixed. Clarify the Docker file purpose. Fix a license typo.  All issues and pull requests are here\n6.2.0 Project  ElasticSearch implementation performance improved, and CHANGED totally. Must delete all existing indexes to do upgrade. CI and Integration tests provided by ASF INFRA. Plan to enhance tests including e2e, plugin tests in all pull requests, powered by ASF INFRA. DataCarrier queue write index controller performance improvement. 3-5 times quicker than before. Add windows compile support in CI.  Java Agent  Support collect SQL parameter in MySQL plugin.[Optional] Support SolrJ plugin. Support RESTEasy plugin. Support Spring Gateway plugin for 2.1.x[Optional] TracingContext performance improvement. Support Apache ShardingSphere(incubating) plugin. Support span#error in application toolkit. Fix OOM by empty stack of exception. FIx wrong cause exception of stack in span log. Fix unclear the running context in SpringMVC plugin. Fix CPU usage accessor calculation issue. Fix SpringMVC plugin span not stop bug when doing HTTP forward. Fix lettuce plugin async commend bug and NPE. Fix webflux plugin cast exception. [CI]Support import check.  Backend  Support time serious ElasticSearch storage. Provide dynamic configuration module and implementation. Slow SQL threshold supports dynamic config today. Dynamic Configuration module provide multiple implementations, DCS(gRPC based), Zookeeper, Apollo, Nacos. Provide P99/95/90/75/50 charts in topology edge. New topology query protocol and implementation. Support Envoy ALS in Service Mesh scenario. Support Nacos cluster management. Enhance metric exporter. Run in increment and total modes. Fix module provider is loaded repeatedly. Change TOP slow SQL storage in ES to Text from Keyword, as too long text issue. Fix H2TopologyQuery tiny bug. Fix H2 log query bug.(No feature provided yet) Filtering pods not in \u0026lsquo;Running\u0026rsquo; phase in mesh scenario. Fix query alarm bug in MySQL and H2 storage. Codes refactor.  UI  Fix some ID is null query(s). Page refactor, especially time-picker, more friendly. Login removed. Trace timestamp visualization issue fixed. Provide P99/95/90/75/50 charts in topology edge. Change all P99/95/90/75/50 charts style. More readable. Fix 404 in trace page.  Document  Go2Sky project has been donated to SkyAPM, change document link. Add FAQ for ElasticSearch storage, and links from document. Add FAQ fro WebSphere installation. Add several open users. Add alarm webhook document.  All issues and pull requests are here\n6.1.0 Project SkyWalking graduated as Apache Top Level Project.\n Support compiling project agent, backend, UI separately.  Java Agent  Support Vert.x Core 3.x plugin. Support Apache Dubbo plugin. Support use_qualified_name_as_endpoint_name and use_qualified_name_as_operation_name configs in SpringMVC plugin. Support span async close APIs in core. Used in Vert.x plugin. Support MySQL 5,8 plugins. Support set instance id manually(optional). Support customize enhance trace plugin in optional list. Support to set peer in Entry Span. Support Zookeeper plugin. Fix Webflux plugin created unexpected Entry Span. Fix Kafka plugin NPE in Kafka 1.1+ Fix wrong operation name in postgre 8.x plugin. Fix RabbitMQ plugin NPE. Fix agent can\u0026rsquo;t run in JVM 6/7, remove module-info.class. Fix agent can\u0026rsquo;t work well, if there is whitespace in agent path. Fix Spring annotation bug and inheritance enhance issue. Fix CPU accessor bug.  Backend Performance improved, especially in CPU limited environment. 3x improvement in service mesh scenario(no trace) in 8C16G VM. Significantly cost less CPU in low payload.\n Support database metrics and SLOW SQL detection. Support to set max size of metadata query. And change default to 5000 from 100. Support ElasticSearch template for new feature in the future. Support shutdown Zipkin trace analysis, because it doesn\u0026rsquo;t fit production environment. Support log type, scope HTTP_ACCESS_LOG and query. No feature provided, prepare for future versions. Support .NET clr receiver. Support Jaeger trace format, no analysis. Support group endpoint name by regax rules in mesh receiver. Support disable statement in OAL. Support basic auth in ElasticSearch connection. Support metrics exporter module and gRPC implementor. Support \u0026gt;, \u0026lt;, \u0026gt;=, \u0026lt;= in OAL. Support role mode in backend. Support Envoy metrics. Support query segment by service instance. Support to set host/port manually at cluster coordinator, rather than based on core settings. Make sure OAP shutdown when it faces startup error. Support set separated gRPC/Jetty ip:port for receiver, default still use core settings. Fix JVM receiver bug. Fix wrong dest service in mesh analysis. Fix search doesn\u0026rsquo;t work as expected. Refactor ScopeDeclaration annotation. Refactor register lock mechanism. Add SmartSql component for .NET Add integration tests for ElasticSearch client. Add test cases for exporter. Add test cases for queue consume.  UI  RocketBot UI has been accepted and bind in this release. Support CLR metrics.  Document  Documents updated, matching Top Level Project requirement. UI licenses updated, according to RocketBot UI IP clearance. User wall and powered-by list updated. CN documents removed, only consider to provide by volunteer out of Apache.  All issues and pull requests are here\n6.0.0-GA Java Agent  Support gson plugin(optional). Support canal plugin. Fix missing ojdbc component id. Fix dubbo plugin conflict. Fix OpenTracing tag match bug. Fix a missing check in ignore plugin.  Backend  Adjust service inventory entity, to add properties. Adjust service instance inventory entity, to add properties. Add nodeType to service inventory entity. Fix when operation name of local and exit spans in ref, the segment lost. Fix the index names don\u0026rsquo;t show right in logs. Fix wrong alarm text. Add test case for span limit mechanism. Add telemetry module and prometheus implementation, with grafana setting. A refactor for register API in storage module. Fix H2 and MySQL endpoint dependency map miss upstream side. Optimize the inventory register and refactor the implementation. Speed up the trace buffer read. Fix and removed unnecessary inventory register operations.  UI  Add new trace view. Add word-break to tag value.  Document  Add two startup modes document. Add PHP agent links. Add some cn documents. Update year to 2019 User wall updated. Fix a wrong description in how-to-build doc.  All issues and pull requests are here\n6.0.0-beta Protocol  Provide Trace Data Protocol v2 Provide SkyWalking Cross Process Propagation Headers Protocol v2.  Java Agent  Support Trace Data Protocol v2 Support SkyWalking Cross Process Propagation Headers Protocol v2. Support SkyWalking Cross Process Propagation Headers Protocol v1 running in compatible way. Need declare open explicitly. Support SpringMVC 5 Support webflux Support a new way to override agent.config by system env. Span tag can override by explicit way. Fix Spring Controller Inherit issue. Fix ElasticSearch plugin NPE. Fix agent classloader dead lock in certain situation. Fix agent log typo. Fix wrong component id in resettemplete plugin. Fix use transform ignore() in wrong way. Fix H2 query bug.  Backend  Support Trace Data Protocol v2. And Trace Data Protocol v1 is still supported. Support MySQL as storage. Support TiDB as storage. Support a new way to override application.yml by system env. Support service instance and endpoint alarm. Support namespace in istio receiver. Support service throughput(cpm), successful rate(sla), avg response time and p99/p95/p90/p75/p50 response time. Support backend trace sampling. Support Zipkin format again. Support init mode. Support namespace in Zookeeper cluster management. Support consul plugin in cluster module. OAL generate tool has been integrated into main repo, in the maven compile stage. Optimize trace paging query. Fix trace query don\u0026rsquo;t use fuzzy query in ElasticSearch storage. Fix alarm can\u0026rsquo;t be active in right way. Fix unnecessary condition in database and cache number query. Fix wrong namespace bug in ElasticSearch storage. Fix Remote clients selector error: / by zero . Fix segment TTL is not working.  UI  Support service throughput(cpm), successful rate(sla), avg response time and p99/p95/p90/p75/p50 response time. Fix TopN endpoint link doesn\u0026rsquo;t work right. Fix trace stack style. Fix CI.  Document  Add more agent setting documents. Add more contribution documents. Update user wall and powered-by page. Add RocketBot UI project link in document.  All issues and pull requests are here\n6.0.0-alpha SkyWalking 6 is totally new milestone for the project. At this point, we are not just a distributing tracing system with analysis and visualization capabilities. We are an Observability Analysis Platform(OAL).\nThe core and most important features in v6 are\n Support to collect telemetry data from different sources, such as multiple language agents and service mesh. Extensible stream analysis core. Make SQL and cache analysis available in core level, although haven\u0026rsquo;t provided in this release. Provide Observability Analysis Language(OAL) to make analysis metrics customization available. New GraphQL query protocol. Not binding with UI now. UI topology is better now. New alarm core provided. In alpha, only on service related metrics.  All issues and pull requests are here\n","excerpt":"6.6.0 Project  [IMPORTANT] Local span and exit span are not treated as endpoint detected at client …","ref":"/docs/main/v9.3.0/en/changes/changes-6.x/","title":"6.6.0"},{"body":"6.6.0 Project  [IMPORTANT] Local span and exit span are not treated as endpoint detected at client and local. Only entry span is the endpoint. Reduce the load of register and memory cost.   Support MiniKube, Istio and SkyWalking on K8s deployment in CI. Support Windows and MacOS build in GitHub Action CI. Support ElasticSearch 7 in official dist. Hundreds plugin cases have been added in GitHub Action CI process.  Java Agent  Remove the local/exit span operation name register mechanism. Add plugin for JDK Threading classes. Add plugin for Armeria. Support set operation name in async span. Enhance webflux plugin, related to Spring Gateway plugin. Webflux plugin is in optional, due to JDK8 required. Fix a possible deadlock. Fix NPE when OAL scripts are different in different OAP nodes, mostly in upgrading stage. Fix bug about wrong peer in ES plugin. Fix NPE in Spring plugin. Fix wrong class name in Dubbo 2.7 conflict patch. Fix spring annotation inheritance problem.  OAP-Backend  Remove the local/exit span operation name register mechanism. Remove client side endpoint register in service mesh. Service instance dependency and related metrics. Support min func in OAL Support apdex func in OAL Support custom ES config setting at the index level. Envoy ALS proto upgraded. Update JODA lib as bugs in UTC +13/+14. Support topN sample period configurable. Ignore no statement DB operations in slow SQL collection. Fix bug in docker-entrypoint.sh when using MySQL as storage  UI  Service topology enhancement. Dive into service, instance and endpoint metrics on topo map. Service instance dependency view and related metrics. Support using URL parameter in trace query page. Support apdex score in service page. Add service dependency metrics into metrics comparison. Fix alarm search not working.  Document  Update user list and user wall. Add document link for CLI. Add deployment guide of agent in Jetty case. Modify Consul cluster doc. Add document about injecting traceId into the logback with logstack in JSON format. ElementUI license and dependency added.  All issues and pull requests are here\n6.5.0 Project  TTL E2E test (#3437) Test coverage is back in pull request check status (#3503) Plugin tests begin to be migrated into main repo, and is in process. (#3528, #3756, #3751, etc.) Switch to SkyWalking CI (exclusive) nodes (#3546) MySQL storage e2e test. (#3648) E2E tests are verified in multiple jdk versions, jdk 8, 9, 11, 12 (#3657) Jenkins build jobs run only when necessary (#3662)  OAP-Backend  Support dynamically configure alarm settings (#3557) Language of instance could be null (#3485) Make query max window size configurable. (#3765) Remove two max size 500 limit. (#3748) Parameterize the cache size. (#3741) ServiceInstanceRelation set error id (#3683) Makes the scope of alarm message more semantic. (#3680) Add register persistent worker latency metrics (#3677) Fix more reasonable error (#3619) Add GraphQL getServiceInstance instanceUuid field. (#3595) Support namespace in Nacos cluster/configuration (#3578) Instead of datasource-settings.properties, use application.yml for MySQLStorageProvider (#3564) Provide consul dynamic configuration center implementation (#3560) Upgrade guava version to support higher jdk version (#3541) Sync latest als from envoy api (#3507) Set telemetry instanced id for Etcd and Nacos plugin (#3492) Support timeout configuration in agent and backend. (#3491) Make sure the cluster register happens before streaming process. (#3471) Agent supports custom properties. (#3367) Miscellaneous bug fixes (#3567)  UI  Feature: node detail display in topo circle-chart view. BugFix: the jvm-maxheap \u0026amp; jvm-maxnonheap is -1, free is no value Fix bug: time select operation not in effect Fix bug: language initialization failed Fix bug: not show instance language Feature: support the trace list display export png Feature: Metrics comparison view BugFix: Fix dashboard top throughput copy  Java Agent  Spring async scenario optimize (#3723) Support log4j2 AsyncLogger (#3715) Add config to collect PostgreSQL sql query params (#3695) Support namespace in Nacos cluster/configuration (#3578) Provide plugin for ehcache 2.x (#3575) Supporting RequestRateLimiterGatewayFilterFactory (#3538) Kafka-plugin compatible with KafkaTemplate (#3505) Add pulsar apm plugin (#3476) Spring-cloud-gateway traceId does not transmit #3411 (#3446) Gateway compatible with downstream loss (#3445) Provide cassandra java driver 3.x plugin (#3410) Fix SpringMVC4 NoSuchMethodError (#3408) BugFix: endpoint grouping rules may be not unique (#3510) Add feature to control the maximum agent log files (#3475) Agent support custom properties. (#3367) Add Light4j plugin (#3323)  Document  Remove travis badge (#3763) Replace user wall to typical users in readme page (#3719) Update istio docs according latest istio release (#3646) Use chart deploy sw docs (#3573) Reorganize the doc, and provide catalog (#3563) Committer vote and set up document. (#3496) Update als setup doc as istio 1.3 released (#3470) Fill faq reply in official document. (#3450)  All issues and pull requests are here\n6.4.0 Project  Highly recommend to upgrade due to Pxx metrics calculation bug. Make agent working in JDK9+ Module system.  Java Agent  Make agent working in JDK9+ Module system. Support Kafka 2.x client libs. Log error in OKHTTP OnFailure callback. Support injecting traceid into logstack appender in logback. Add OperationName(including endpoint name) length max threshold. Support using Regex to group operation name. Support Undertow routing handler. RestTemplate plugin support operation name grouping. Fix ClassCastException in Webflux plugin. Ordering zookeeper server list, to make it better in topology. Fix a Dubbo plugin incompatible issue. Fix MySQL 5 plugin issue. Make log writer cached. Optimize Spring Cloud Gateway plugin Fix and improve gRPC reconnect mechanism. Remove Disruptor dependency from agent.  Backend  Fix Pxx(p50,p75,p90,p95,p99) metrics func bug.(Critical) Support Gateway in backend analysis, even when it doesn\u0026rsquo;t have suitable language agent. Support using HTTPs SSL accessing ElasticSearch storage. Support Zookeeper ACL. Make alarm records listed in order. Fix Pxx data persistence failure in some cases. Fix some bugs in MySQL storage. Setup slow SQL length threshold. Fix TTL settings is not working as expected. Remove scope-meta file.  UI  Enhance alarm page layout. Support trace tree chart resize. Support trace auto completion when partial traces abandoned somehow. Fix dashboard endpoint slow chart. Add radial chart in topology page. Add trace table mode. Fix topology page bug. Fix calender js bug. Fix \u0026ldquo;The \u0026ldquo;topo-services\u0026rdquo; component did not update the data in time after modifying the time range on the topology page.  Document  Restore the broken Istio setup doc. Add etcd config center document. Correct span_limit_per_segment default value in document. Enhance plugin develop doc. Fix error description in build document.  All issues and pull requests are here\n6.3.0 Project  e2e tests have been added, and verify every pull request. Use ArrayList to replace LinkedList in DataCarrier for much better performance. Add plugin instrumentation definition check in CI. DataCarrier performance improvement by avoiding false-sharing.  Java Agent  Java agent supports JDK 9 - 12, but don\u0026rsquo;t support Java Module yet. Support JVM class auto instrumentation, cataloged as bootstrap plugin. Support JVM HttpClient and HttpsClient plugin.[Optional] Support backend upgrade without rebooting required. Open Redefine and Retransform by other agents. Support Servlet 2.5 in Jetty, Tomcat and SpringMVC plugins. Support Spring @Async plugin. Add new config item to restrict the length of span#peer. Refactor ContextManager#stopSpan. Add gRPC timeout. Support Logback AsyncAppender print tid Fix gRPC reconnect bug. Fix trace segment service doesn\u0026rsquo;t report onComplete. Fix wrong logger class name. Fix gRPC plugin bug. Fix ContextManager.activeSpan() API usage error.  Backend  Support agent reset command downstream when the storage is erased, mostly because of backend upgrade. Backend stream flow refactor. High dimensionality metrics(Hour/Day/Month) are changed to lower priority, to ease the storage payload. Add OAP metrics cache to ease the storage query payload and improve performance. Remove DataCarrier in trace persistent of ElasticSearch storage, by leveraging the elasticsearch bulk queue. OAP internal communication protocol changed. Don\u0026rsquo;t be compatible with old releases. Improve ElasticSearch storage bulk performance. Support etcd as dynamic configuration center. Simplify the PxxMetrics and ThermodynamicMetrics functions for better performance and GC. Support JVM metrics self observability. Add the new OAL runtime engine. Add gRPC timeout. Add Charset in the alarm web hook. Fix buffer lost. Fix dirty read in ElasticSearch storage. Fix bug of cluster management plugins in un-Mixed mode. Fix wrong logger class name. Fix delete bug in ElasticSearch when using namespace. Fix MySQL TTL failure. Totally remove IDs can't be null log, to avoid misleading. Fix provider has been initialized repeatedly. Adjust providers conflict log message. Fix using wrong gc time metrics in OAL.  UI  Fix refresh is not working after endpoint and instance changed. Fix endpoint selector but. Fix wrong copy value in slow traces. Fix can\u0026rsquo;t show trace when it is broken partially(Because of agent sampling or fail safe). Fix database and response time graph bugs.  Document  Add bootstrap plugin development document. Alarm documentation typo fixed. Clarify the Docker file purpose. Fix a license typo.  All issues and pull requests are here\n6.2.0 Project  ElasticSearch implementation performance improved, and CHANGED totally. Must delete all existing indexes to do upgrade. CI and Integration tests provided by ASF INFRA. Plan to enhance tests including e2e, plugin tests in all pull requests, powered by ASF INFRA. DataCarrier queue write index controller performance improvement. 3-5 times quicker than before. Add windows compile support in CI.  Java Agent  Support collect SQL parameter in MySQL plugin.[Optional] Support SolrJ plugin. Support RESTEasy plugin. Support Spring Gateway plugin for 2.1.x[Optional] TracingContext performance improvement. Support Apache ShardingSphere(incubating) plugin. Support span#error in application toolkit. Fix OOM by empty stack of exception. FIx wrong cause exception of stack in span log. Fix unclear the running context in SpringMVC plugin. Fix CPU usage accessor calculation issue. Fix SpringMVC plugin span not stop bug when doing HTTP forward. Fix lettuce plugin async commend bug and NPE. Fix webflux plugin cast exception. [CI]Support import check.  Backend  Support time serious ElasticSearch storage. Provide dynamic configuration module and implementation. Slow SQL threshold supports dynamic config today. Dynamic Configuration module provide multiple implementations, DCS(gRPC based), Zookeeper, Apollo, Nacos. Provide P99/95/90/75/50 charts in topology edge. New topology query protocol and implementation. Support Envoy ALS in Service Mesh scenario. Support Nacos cluster management. Enhance metric exporter. Run in increment and total modes. Fix module provider is loaded repeatedly. Change TOP slow SQL storage in ES to Text from Keyword, as too long text issue. Fix H2TopologyQuery tiny bug. Fix H2 log query bug.(No feature provided yet) Filtering pods not in \u0026lsquo;Running\u0026rsquo; phase in mesh scenario. Fix query alarm bug in MySQL and H2 storage. Codes refactor.  UI  Fix some ID is null query(s). Page refactor, especially time-picker, more friendly. Login removed. Trace timestamp visualization issue fixed. Provide P99/95/90/75/50 charts in topology edge. Change all P99/95/90/75/50 charts style. More readable. Fix 404 in trace page.  Document  Go2Sky project has been donated to SkyAPM, change document link. Add FAQ for ElasticSearch storage, and links from document. Add FAQ fro WebSphere installation. Add several open users. Add alarm webhook document.  All issues and pull requests are here\n6.1.0 Project SkyWalking graduated as Apache Top Level Project.\n Support compiling project agent, backend, UI separately.  Java Agent  Support Vert.x Core 3.x plugin. Support Apache Dubbo plugin. Support use_qualified_name_as_endpoint_name and use_qualified_name_as_operation_name configs in SpringMVC plugin. Support span async close APIs in core. Used in Vert.x plugin. Support MySQL 5,8 plugins. Support set instance id manually(optional). Support customize enhance trace plugin in optional list. Support to set peer in Entry Span. Support Zookeeper plugin. Fix Webflux plugin created unexpected Entry Span. Fix Kafka plugin NPE in Kafka 1.1+ Fix wrong operation name in postgre 8.x plugin. Fix RabbitMQ plugin NPE. Fix agent can\u0026rsquo;t run in JVM 6/7, remove module-info.class. Fix agent can\u0026rsquo;t work well, if there is whitespace in agent path. Fix Spring annotation bug and inheritance enhance issue. Fix CPU accessor bug.  Backend Performance improved, especially in CPU limited environment. 3x improvement in service mesh scenario(no trace) in 8C16G VM. Significantly cost less CPU in low payload.\n Support database metrics and SLOW SQL detection. Support to set max size of metadata query. And change default to 5000 from 100. Support ElasticSearch template for new feature in the future. Support shutdown Zipkin trace analysis, because it doesn\u0026rsquo;t fit production environment. Support log type, scope HTTP_ACCESS_LOG and query. No feature provided, prepare for future versions. Support .NET clr receiver. Support Jaeger trace format, no analysis. Support group endpoint name by regax rules in mesh receiver. Support disable statement in OAL. Support basic auth in ElasticSearch connection. Support metrics exporter module and gRPC implementor. Support \u0026gt;, \u0026lt;, \u0026gt;=, \u0026lt;= in OAL. Support role mode in backend. Support Envoy metrics. Support query segment by service instance. Support to set host/port manually at cluster coordinator, rather than based on core settings. Make sure OAP shutdown when it faces startup error. Support set separated gRPC/Jetty ip:port for receiver, default still use core settings. Fix JVM receiver bug. Fix wrong dest service in mesh analysis. Fix search doesn\u0026rsquo;t work as expected. Refactor ScopeDeclaration annotation. Refactor register lock mechanism. Add SmartSql component for .NET Add integration tests for ElasticSearch client. Add test cases for exporter. Add test cases for queue consume.  UI  RocketBot UI has been accepted and bind in this release. Support CLR metrics.  Document  Documents updated, matching Top Level Project requirement. UI licenses updated, according to RocketBot UI IP clearance. User wall and powered-by list updated. CN documents removed, only consider to provide by volunteer out of Apache.  All issues and pull requests are here\n6.0.0-GA Java Agent  Support gson plugin(optional). Support canal plugin. Fix missing ojdbc component id. Fix dubbo plugin conflict. Fix OpenTracing tag match bug. Fix a missing check in ignore plugin.  Backend  Adjust service inventory entity, to add properties. Adjust service instance inventory entity, to add properties. Add nodeType to service inventory entity. Fix when operation name of local and exit spans in ref, the segment lost. Fix the index names don\u0026rsquo;t show right in logs. Fix wrong alarm text. Add test case for span limit mechanism. Add telemetry module and prometheus implementation, with grafana setting. A refactor for register API in storage module. Fix H2 and MySQL endpoint dependency map miss upstream side. Optimize the inventory register and refactor the implementation. Speed up the trace buffer read. Fix and removed unnecessary inventory register operations.  UI  Add new trace view. Add word-break to tag value.  Document  Add two startup modes document. Add PHP agent links. Add some cn documents. Update year to 2019 User wall updated. Fix a wrong description in how-to-build doc.  All issues and pull requests are here\n6.0.0-beta Protocol  Provide Trace Data Protocol v2 Provide SkyWalking Cross Process Propagation Headers Protocol v2.  Java Agent  Support Trace Data Protocol v2 Support SkyWalking Cross Process Propagation Headers Protocol v2. Support SkyWalking Cross Process Propagation Headers Protocol v1 running in compatible way. Need declare open explicitly. Support SpringMVC 5 Support webflux Support a new way to override agent.config by system env. Span tag can override by explicit way. Fix Spring Controller Inherit issue. Fix ElasticSearch plugin NPE. Fix agent classloader dead lock in certain situation. Fix agent log typo. Fix wrong component id in resettemplete plugin. Fix use transform ignore() in wrong way. Fix H2 query bug.  Backend  Support Trace Data Protocol v2. And Trace Data Protocol v1 is still supported. Support MySQL as storage. Support TiDB as storage. Support a new way to override application.yml by system env. Support service instance and endpoint alarm. Support namespace in istio receiver. Support service throughput(cpm), successful rate(sla), avg response time and p99/p95/p90/p75/p50 response time. Support backend trace sampling. Support Zipkin format again. Support init mode. Support namespace in Zookeeper cluster management. Support consul plugin in cluster module. OAL generate tool has been integrated into main repo, in the maven compile stage. Optimize trace paging query. Fix trace query don\u0026rsquo;t use fuzzy query in ElasticSearch storage. Fix alarm can\u0026rsquo;t be active in right way. Fix unnecessary condition in database and cache number query. Fix wrong namespace bug in ElasticSearch storage. Fix Remote clients selector error: / by zero . Fix segment TTL is not working.  UI  Support service throughput(cpm), successful rate(sla), avg response time and p99/p95/p90/p75/p50 response time. Fix TopN endpoint link doesn\u0026rsquo;t work right. Fix trace stack style. Fix CI.  Document  Add more agent setting documents. Add more contribution documents. Update user wall and powered-by page. Add RocketBot UI project link in document.  All issues and pull requests are here\n6.0.0-alpha SkyWalking 6 is totally new milestone for the project. At this point, we are not just a distributing tracing system with analysis and visualization capabilities. We are an Observability Analysis Platform(OAL).\nThe core and most important features in v6 are\n Support to collect telemetry data from different sources, such as multiple language agents and service mesh. Extensible stream analysis core. Make SQL and cache analysis available in core level, although haven\u0026rsquo;t provided in this release. Provide Observability Analysis Language(OAL) to make analysis metrics customization available. New GraphQL query protocol. Not binding with UI now. UI topology is better now. New alarm core provided. In alpha, only on service related metrics.  All issues and pull requests are here\n","excerpt":"6.6.0 Project  [IMPORTANT] Local span and exit span are not treated as endpoint detected at client …","ref":"/docs/main/v9.4.0/en/changes/changes-6.x/","title":"6.6.0"},{"body":"6.6.0 Project  [IMPORTANT] Local span and exit span are not treated as endpoint detected at client and local. Only entry span is the endpoint. Reduce the load of register and memory cost.   Support MiniKube, Istio and SkyWalking on K8s deployment in CI. Support Windows and MacOS build in GitHub Action CI. Support ElasticSearch 7 in official dist. Hundreds plugin cases have been added in GitHub Action CI process.  Java Agent  Remove the local/exit span operation name register mechanism. Add plugin for JDK Threading classes. Add plugin for Armeria. Support set operation name in async span. Enhance webflux plugin, related to Spring Gateway plugin. Webflux plugin is in optional, due to JDK8 required. Fix a possible deadlock. Fix NPE when OAL scripts are different in different OAP nodes, mostly in upgrading stage. Fix bug about wrong peer in ES plugin. Fix NPE in Spring plugin. Fix wrong class name in Dubbo 2.7 conflict patch. Fix spring annotation inheritance problem.  OAP-Backend  Remove the local/exit span operation name register mechanism. Remove client side endpoint register in service mesh. Service instance dependency and related metrics. Support min func in OAL Support apdex func in OAL Support custom ES config setting at the index level. Envoy ALS proto upgraded. Update JODA lib as bugs in UTC +13/+14. Support topN sample period configurable. Ignore no statement DB operations in slow SQL collection. Fix bug in docker-entrypoint.sh when using MySQL as storage  UI  Service topology enhancement. Dive into service, instance and endpoint metrics on topo map. Service instance dependency view and related metrics. Support using URL parameter in trace query page. Support apdex score in service page. Add service dependency metrics into metrics comparison. Fix alarm search not working.  Document  Update user list and user wall. Add document link for CLI. Add deployment guide of agent in Jetty case. Modify Consul cluster doc. Add document about injecting traceId into the logback with logstack in JSON format. ElementUI license and dependency added.  All issues and pull requests are here\n6.5.0 Project  TTL E2E test (#3437) Test coverage is back in pull request check status (#3503) Plugin tests begin to be migrated into main repo, and is in process. (#3528, #3756, #3751, etc.) Switch to SkyWalking CI (exclusive) nodes (#3546) MySQL storage e2e test. (#3648) E2E tests are verified in multiple jdk versions, jdk 8, 9, 11, 12 (#3657) Jenkins build jobs run only when necessary (#3662)  OAP-Backend  Support dynamically configure alarm settings (#3557) Language of instance could be null (#3485) Make query max window size configurable. (#3765) Remove two max size 500 limit. (#3748) Parameterize the cache size. (#3741) ServiceInstanceRelation set error id (#3683) Makes the scope of alarm message more semantic. (#3680) Add register persistent worker latency metrics (#3677) Fix more reasonable error (#3619) Add GraphQL getServiceInstance instanceUuid field. (#3595) Support namespace in Nacos cluster/configuration (#3578) Instead of datasource-settings.properties, use application.yml for MySQLStorageProvider (#3564) Provide consul dynamic configuration center implementation (#3560) Upgrade guava version to support higher jdk version (#3541) Sync latest als from envoy api (#3507) Set telemetry instanced id for Etcd and Nacos plugin (#3492) Support timeout configuration in agent and backend. (#3491) Make sure the cluster register happens before streaming process. (#3471) Agent supports custom properties. (#3367) Miscellaneous bug fixes (#3567)  UI  Feature: node detail display in topo circle-chart view. BugFix: the jvm-maxheap \u0026amp; jvm-maxnonheap is -1, free is no value Fix bug: time select operation not in effect Fix bug: language initialization failed Fix bug: not show instance language Feature: support the trace list display export png Feature: Metrics comparison view BugFix: Fix dashboard top throughput copy  Java Agent  Spring async scenario optimize (#3723) Support log4j2 AsyncLogger (#3715) Add config to collect PostgreSQL sql query params (#3695) Support namespace in Nacos cluster/configuration (#3578) Provide plugin for ehcache 2.x (#3575) Supporting RequestRateLimiterGatewayFilterFactory (#3538) Kafka-plugin compatible with KafkaTemplate (#3505) Add pulsar apm plugin (#3476) Spring-cloud-gateway traceId does not transmit #3411 (#3446) Gateway compatible with downstream loss (#3445) Provide cassandra java driver 3.x plugin (#3410) Fix SpringMVC4 NoSuchMethodError (#3408) BugFix: endpoint grouping rules may be not unique (#3510) Add feature to control the maximum agent log files (#3475) Agent support custom properties. (#3367) Add Light4j plugin (#3323)  Document  Remove travis badge (#3763) Replace user wall to typical users in readme page (#3719) Update istio docs according latest istio release (#3646) Use chart deploy sw docs (#3573) Reorganize the doc, and provide catalog (#3563) Committer vote and set up document. (#3496) Update als setup doc as istio 1.3 released (#3470) Fill faq reply in official document. (#3450)  All issues and pull requests are here\n6.4.0 Project  Highly recommend to upgrade due to Pxx metrics calculation bug. Make agent working in JDK9+ Module system.  Java Agent  Make agent working in JDK9+ Module system. Support Kafka 2.x client libs. Log error in OKHTTP OnFailure callback. Support injecting traceid into logstack appender in logback. Add OperationName(including endpoint name) length max threshold. Support using Regex to group operation name. Support Undertow routing handler. RestTemplate plugin support operation name grouping. Fix ClassCastException in Webflux plugin. Ordering zookeeper server list, to make it better in topology. Fix a Dubbo plugin incompatible issue. Fix MySQL 5 plugin issue. Make log writer cached. Optimize Spring Cloud Gateway plugin Fix and improve gRPC reconnect mechanism. Remove Disruptor dependency from agent.  Backend  Fix Pxx(p50,p75,p90,p95,p99) metrics func bug.(Critical) Support Gateway in backend analysis, even when it doesn\u0026rsquo;t have suitable language agent. Support using HTTPs SSL accessing ElasticSearch storage. Support Zookeeper ACL. Make alarm records listed in order. Fix Pxx data persistence failure in some cases. Fix some bugs in MySQL storage. Setup slow SQL length threshold. Fix TTL settings is not working as expected. Remove scope-meta file.  UI  Enhance alarm page layout. Support trace tree chart resize. Support trace auto completion when partial traces abandoned somehow. Fix dashboard endpoint slow chart. Add radial chart in topology page. Add trace table mode. Fix topology page bug. Fix calender js bug. Fix \u0026ldquo;The \u0026ldquo;topo-services\u0026rdquo; component did not update the data in time after modifying the time range on the topology page.  Document  Restore the broken Istio setup doc. Add etcd config center document. Correct span_limit_per_segment default value in document. Enhance plugin develop doc. Fix error description in build document.  All issues and pull requests are here\n6.3.0 Project  e2e tests have been added, and verify every pull request. Use ArrayList to replace LinkedList in DataCarrier for much better performance. Add plugin instrumentation definition check in CI. DataCarrier performance improvement by avoiding false-sharing.  Java Agent  Java agent supports JDK 9 - 12, but don\u0026rsquo;t support Java Module yet. Support JVM class auto instrumentation, cataloged as bootstrap plugin. Support JVM HttpClient and HttpsClient plugin.[Optional] Support backend upgrade without rebooting required. Open Redefine and Retransform by other agents. Support Servlet 2.5 in Jetty, Tomcat and SpringMVC plugins. Support Spring @Async plugin. Add new config item to restrict the length of span#peer. Refactor ContextManager#stopSpan. Add gRPC timeout. Support Logback AsyncAppender print tid Fix gRPC reconnect bug. Fix trace segment service doesn\u0026rsquo;t report onComplete. Fix wrong logger class name. Fix gRPC plugin bug. Fix ContextManager.activeSpan() API usage error.  Backend  Support agent reset command downstream when the storage is erased, mostly because of backend upgrade. Backend stream flow refactor. High dimensionality metrics(Hour/Day/Month) are changed to lower priority, to ease the storage payload. Add OAP metrics cache to ease the storage query payload and improve performance. Remove DataCarrier in trace persistent of ElasticSearch storage, by leveraging the elasticsearch bulk queue. OAP internal communication protocol changed. Don\u0026rsquo;t be compatible with old releases. Improve ElasticSearch storage bulk performance. Support etcd as dynamic configuration center. Simplify the PxxMetrics and ThermodynamicMetrics functions for better performance and GC. Support JVM metrics self observability. Add the new OAL runtime engine. Add gRPC timeout. Add Charset in the alarm web hook. Fix buffer lost. Fix dirty read in ElasticSearch storage. Fix bug of cluster management plugins in un-Mixed mode. Fix wrong logger class name. Fix delete bug in ElasticSearch when using namespace. Fix MySQL TTL failure. Totally remove IDs can't be null log, to avoid misleading. Fix provider has been initialized repeatedly. Adjust providers conflict log message. Fix using wrong gc time metrics in OAL.  UI  Fix refresh is not working after endpoint and instance changed. Fix endpoint selector but. Fix wrong copy value in slow traces. Fix can\u0026rsquo;t show trace when it is broken partially(Because of agent sampling or fail safe). Fix database and response time graph bugs.  Document  Add bootstrap plugin development document. Alarm documentation typo fixed. Clarify the Docker file purpose. Fix a license typo.  All issues and pull requests are here\n6.2.0 Project  ElasticSearch implementation performance improved, and CHANGED totally. Must delete all existing indexes to do upgrade. CI and Integration tests provided by ASF INFRA. Plan to enhance tests including e2e, plugin tests in all pull requests, powered by ASF INFRA. DataCarrier queue write index controller performance improvement. 3-5 times quicker than before. Add windows compile support in CI.  Java Agent  Support collect SQL parameter in MySQL plugin.[Optional] Support SolrJ plugin. Support RESTEasy plugin. Support Spring Gateway plugin for 2.1.x[Optional] TracingContext performance improvement. Support Apache ShardingSphere(incubating) plugin. Support span#error in application toolkit. Fix OOM by empty stack of exception. FIx wrong cause exception of stack in span log. Fix unclear the running context in SpringMVC plugin. Fix CPU usage accessor calculation issue. Fix SpringMVC plugin span not stop bug when doing HTTP forward. Fix lettuce plugin async commend bug and NPE. Fix webflux plugin cast exception. [CI]Support import check.  Backend  Support time serious ElasticSearch storage. Provide dynamic configuration module and implementation. Slow SQL threshold supports dynamic config today. Dynamic Configuration module provide multiple implementations, DCS(gRPC based), Zookeeper, Apollo, Nacos. Provide P99/95/90/75/50 charts in topology edge. New topology query protocol and implementation. Support Envoy ALS in Service Mesh scenario. Support Nacos cluster management. Enhance metric exporter. Run in increment and total modes. Fix module provider is loaded repeatedly. Change TOP slow SQL storage in ES to Text from Keyword, as too long text issue. Fix H2TopologyQuery tiny bug. Fix H2 log query bug.(No feature provided yet) Filtering pods not in \u0026lsquo;Running\u0026rsquo; phase in mesh scenario. Fix query alarm bug in MySQL and H2 storage. Codes refactor.  UI  Fix some ID is null query(s). Page refactor, especially time-picker, more friendly. Login removed. Trace timestamp visualization issue fixed. Provide P99/95/90/75/50 charts in topology edge. Change all P99/95/90/75/50 charts style. More readable. Fix 404 in trace page.  Document  Go2Sky project has been donated to SkyAPM, change document link. Add FAQ for ElasticSearch storage, and links from document. Add FAQ fro WebSphere installation. Add several open users. Add alarm webhook document.  All issues and pull requests are here\n6.1.0 Project SkyWalking graduated as Apache Top Level Project.\n Support compiling project agent, backend, UI separately.  Java Agent  Support Vert.x Core 3.x plugin. Support Apache Dubbo plugin. Support use_qualified_name_as_endpoint_name and use_qualified_name_as_operation_name configs in SpringMVC plugin. Support span async close APIs in core. Used in Vert.x plugin. Support MySQL 5,8 plugins. Support set instance id manually(optional). Support customize enhance trace plugin in optional list. Support to set peer in Entry Span. Support Zookeeper plugin. Fix Webflux plugin created unexpected Entry Span. Fix Kafka plugin NPE in Kafka 1.1+ Fix wrong operation name in postgre 8.x plugin. Fix RabbitMQ plugin NPE. Fix agent can\u0026rsquo;t run in JVM 6/7, remove module-info.class. Fix agent can\u0026rsquo;t work well, if there is whitespace in agent path. Fix Spring annotation bug and inheritance enhance issue. Fix CPU accessor bug.  Backend Performance improved, especially in CPU limited environment. 3x improvement in service mesh scenario(no trace) in 8C16G VM. Significantly cost less CPU in low payload.\n Support database metrics and SLOW SQL detection. Support to set max size of metadata query. And change default to 5000 from 100. Support ElasticSearch template for new feature in the future. Support shutdown Zipkin trace analysis, because it doesn\u0026rsquo;t fit production environment. Support log type, scope HTTP_ACCESS_LOG and query. No feature provided, prepare for future versions. Support .NET clr receiver. Support Jaeger trace format, no analysis. Support group endpoint name by regax rules in mesh receiver. Support disable statement in OAL. Support basic auth in ElasticSearch connection. Support metrics exporter module and gRPC implementor. Support \u0026gt;, \u0026lt;, \u0026gt;=, \u0026lt;= in OAL. Support role mode in backend. Support Envoy metrics. Support query segment by service instance. Support to set host/port manually at cluster coordinator, rather than based on core settings. Make sure OAP shutdown when it faces startup error. Support set separated gRPC/Jetty ip:port for receiver, default still use core settings. Fix JVM receiver bug. Fix wrong dest service in mesh analysis. Fix search doesn\u0026rsquo;t work as expected. Refactor ScopeDeclaration annotation. Refactor register lock mechanism. Add SmartSql component for .NET Add integration tests for ElasticSearch client. Add test cases for exporter. Add test cases for queue consume.  UI  RocketBot UI has been accepted and bind in this release. Support CLR metrics.  Document  Documents updated, matching Top Level Project requirement. UI licenses updated, according to RocketBot UI IP clearance. User wall and powered-by list updated. CN documents removed, only consider to provide by volunteer out of Apache.  All issues and pull requests are here\n6.0.0-GA Java Agent  Support gson plugin(optional). Support canal plugin. Fix missing ojdbc component id. Fix dubbo plugin conflict. Fix OpenTracing tag match bug. Fix a missing check in ignore plugin.  Backend  Adjust service inventory entity, to add properties. Adjust service instance inventory entity, to add properties. Add nodeType to service inventory entity. Fix when operation name of local and exit spans in ref, the segment lost. Fix the index names don\u0026rsquo;t show right in logs. Fix wrong alarm text. Add test case for span limit mechanism. Add telemetry module and prometheus implementation, with grafana setting. A refactor for register API in storage module. Fix H2 and MySQL endpoint dependency map miss upstream side. Optimize the inventory register and refactor the implementation. Speed up the trace buffer read. Fix and removed unnecessary inventory register operations.  UI  Add new trace view. Add word-break to tag value.  Document  Add two startup modes document. Add PHP agent links. Add some cn documents. Update year to 2019 User wall updated. Fix a wrong description in how-to-build doc.  All issues and pull requests are here\n6.0.0-beta Protocol  Provide Trace Data Protocol v2 Provide SkyWalking Cross Process Propagation Headers Protocol v2.  Java Agent  Support Trace Data Protocol v2 Support SkyWalking Cross Process Propagation Headers Protocol v2. Support SkyWalking Cross Process Propagation Headers Protocol v1 running in compatible way. Need declare open explicitly. Support SpringMVC 5 Support webflux Support a new way to override agent.config by system env. Span tag can override by explicit way. Fix Spring Controller Inherit issue. Fix ElasticSearch plugin NPE. Fix agent classloader dead lock in certain situation. Fix agent log typo. Fix wrong component id in resettemplete plugin. Fix use transform ignore() in wrong way. Fix H2 query bug.  Backend  Support Trace Data Protocol v2. And Trace Data Protocol v1 is still supported. Support MySQL as storage. Support TiDB as storage. Support a new way to override application.yml by system env. Support service instance and endpoint alarm. Support namespace in istio receiver. Support service throughput(cpm), successful rate(sla), avg response time and p99/p95/p90/p75/p50 response time. Support backend trace sampling. Support Zipkin format again. Support init mode. Support namespace in Zookeeper cluster management. Support consul plugin in cluster module. OAL generate tool has been integrated into main repo, in the maven compile stage. Optimize trace paging query. Fix trace query don\u0026rsquo;t use fuzzy query in ElasticSearch storage. Fix alarm can\u0026rsquo;t be active in right way. Fix unnecessary condition in database and cache number query. Fix wrong namespace bug in ElasticSearch storage. Fix Remote clients selector error: / by zero . Fix segment TTL is not working.  UI  Support service throughput(cpm), successful rate(sla), avg response time and p99/p95/p90/p75/p50 response time. Fix TopN endpoint link doesn\u0026rsquo;t work right. Fix trace stack style. Fix CI.  Document  Add more agent setting documents. Add more contribution documents. Update user wall and powered-by page. Add RocketBot UI project link in document.  All issues and pull requests are here\n6.0.0-alpha SkyWalking 6 is totally new milestone for the project. At this point, we are not just a distributing tracing system with analysis and visualization capabilities. We are an Observability Analysis Platform(OAL).\nThe core and most important features in v6 are\n Support to collect telemetry data from different sources, such as multiple language agents and service mesh. Extensible stream analysis core. Make SQL and cache analysis available in core level, although haven\u0026rsquo;t provided in this release. Provide Observability Analysis Language(OAL) to make analysis metrics customization available. New GraphQL query protocol. Not binding with UI now. UI topology is better now. New alarm core provided. In alpha, only on service related metrics.  All issues and pull requests are here\n","excerpt":"6.6.0 Project  [IMPORTANT] Local span and exit span are not treated as endpoint detected at client …","ref":"/docs/main/v9.5.0/en/changes/changes-6.x/","title":"6.6.0"},{"body":"6.6.0 Project  [IMPORTANT] Local span and exit span are not treated as endpoint detected at client and local. Only entry span is the endpoint. Reduce the load of register and memory cost.   Support MiniKube, Istio and SkyWalking on K8s deployment in CI. Support Windows and MacOS build in GitHub Action CI. Support ElasticSearch 7 in official dist. Hundreds plugin cases have been added in GitHub Action CI process.  Java Agent  Remove the local/exit span operation name register mechanism. Add plugin for JDK Threading classes. Add plugin for Armeria. Support set operation name in async span. Enhance webflux plugin, related to Spring Gateway plugin. Webflux plugin is in optional, due to JDK8 required. Fix a possible deadlock. Fix NPE when OAL scripts are different in different OAP nodes, mostly in upgrading stage. Fix bug about wrong peer in ES plugin. Fix NPE in Spring plugin. Fix wrong class name in Dubbo 2.7 conflict patch. Fix spring annotation inheritance problem.  OAP-Backend  Remove the local/exit span operation name register mechanism. Remove client side endpoint register in service mesh. Service instance dependency and related metrics. Support min func in OAL Support apdex func in OAL Support custom ES config setting at the index level. Envoy ALS proto upgraded. Update JODA lib as bugs in UTC +13/+14. Support topN sample period configurable. Ignore no statement DB operations in slow SQL collection. Fix bug in docker-entrypoint.sh when using MySQL as storage  UI  Service topology enhancement. Dive into service, instance and endpoint metrics on topo map. Service instance dependency view and related metrics. Support using URL parameter in trace query page. Support apdex score in service page. Add service dependency metrics into metrics comparison. Fix alarm search not working.  Document  Update user list and user wall. Add document link for CLI. Add deployment guide of agent in Jetty case. Modify Consul cluster doc. Add document about injecting traceId into the logback with logstack in JSON format. ElementUI license and dependency added.  All issues and pull requests are here\n6.5.0 Project  TTL E2E test (#3437) Test coverage is back in pull request check status (#3503) Plugin tests begin to be migrated into main repo, and is in process. (#3528, #3756, #3751, etc.) Switch to SkyWalking CI (exclusive) nodes (#3546) MySQL storage e2e test. (#3648) E2E tests are verified in multiple jdk versions, jdk 8, 9, 11, 12 (#3657) Jenkins build jobs run only when necessary (#3662)  OAP-Backend  Support dynamically configure alarm settings (#3557) Language of instance could be null (#3485) Make query max window size configurable. (#3765) Remove two max size 500 limit. (#3748) Parameterize the cache size. (#3741) ServiceInstanceRelation set error id (#3683) Makes the scope of alarm message more semantic. (#3680) Add register persistent worker latency metrics (#3677) Fix more reasonable error (#3619) Add GraphQL getServiceInstance instanceUuid field. (#3595) Support namespace in Nacos cluster/configuration (#3578) Instead of datasource-settings.properties, use application.yml for MySQLStorageProvider (#3564) Provide consul dynamic configuration center implementation (#3560) Upgrade guava version to support higher jdk version (#3541) Sync latest als from envoy api (#3507) Set telemetry instanced id for Etcd and Nacos plugin (#3492) Support timeout configuration in agent and backend. (#3491) Make sure the cluster register happens before streaming process. (#3471) Agent supports custom properties. (#3367) Miscellaneous bug fixes (#3567)  UI  Feature: node detail display in topo circle-chart view. BugFix: the jvm-maxheap \u0026amp; jvm-maxnonheap is -1, free is no value Fix bug: time select operation not in effect Fix bug: language initialization failed Fix bug: not show instance language Feature: support the trace list display export png Feature: Metrics comparison view BugFix: Fix dashboard top throughput copy  Java Agent  Spring async scenario optimize (#3723) Support log4j2 AsyncLogger (#3715) Add config to collect PostgreSQL sql query params (#3695) Support namespace in Nacos cluster/configuration (#3578) Provide plugin for ehcache 2.x (#3575) Supporting RequestRateLimiterGatewayFilterFactory (#3538) Kafka-plugin compatible with KafkaTemplate (#3505) Add pulsar apm plugin (#3476) Spring-cloud-gateway traceId does not transmit #3411 (#3446) Gateway compatible with downstream loss (#3445) Provide cassandra java driver 3.x plugin (#3410) Fix SpringMVC4 NoSuchMethodError (#3408) BugFix: endpoint grouping rules may be not unique (#3510) Add feature to control the maximum agent log files (#3475) Agent support custom properties. (#3367) Add Light4j plugin (#3323)  Document  Remove travis badge (#3763) Replace user wall to typical users in readme page (#3719) Update istio docs according latest istio release (#3646) Use chart deploy sw docs (#3573) Reorganize the doc, and provide catalog (#3563) Committer vote and set up document. (#3496) Update als setup doc as istio 1.3 released (#3470) Fill faq reply in official document. (#3450)  All issues and pull requests are here\n6.4.0 Project  Highly recommend to upgrade due to Pxx metrics calculation bug. Make agent working in JDK9+ Module system.  Java Agent  Make agent working in JDK9+ Module system. Support Kafka 2.x client libs. Log error in OKHTTP OnFailure callback. Support injecting traceid into logstack appender in logback. Add OperationName(including endpoint name) length max threshold. Support using Regex to group operation name. Support Undertow routing handler. RestTemplate plugin support operation name grouping. Fix ClassCastException in Webflux plugin. Ordering zookeeper server list, to make it better in topology. Fix a Dubbo plugin incompatible issue. Fix MySQL 5 plugin issue. Make log writer cached. Optimize Spring Cloud Gateway plugin Fix and improve gRPC reconnect mechanism. Remove Disruptor dependency from agent.  Backend  Fix Pxx(p50,p75,p90,p95,p99) metrics func bug.(Critical) Support Gateway in backend analysis, even when it doesn\u0026rsquo;t have suitable language agent. Support using HTTPs SSL accessing ElasticSearch storage. Support Zookeeper ACL. Make alarm records listed in order. Fix Pxx data persistence failure in some cases. Fix some bugs in MySQL storage. Setup slow SQL length threshold. Fix TTL settings is not working as expected. Remove scope-meta file.  UI  Enhance alarm page layout. Support trace tree chart resize. Support trace auto completion when partial traces abandoned somehow. Fix dashboard endpoint slow chart. Add radial chart in topology page. Add trace table mode. Fix topology page bug. Fix calender js bug. Fix \u0026ldquo;The \u0026ldquo;topo-services\u0026rdquo; component did not update the data in time after modifying the time range on the topology page.  Document  Restore the broken Istio setup doc. Add etcd config center document. Correct span_limit_per_segment default value in document. Enhance plugin develop doc. Fix error description in build document.  All issues and pull requests are here\n6.3.0 Project  e2e tests have been added, and verify every pull request. Use ArrayList to replace LinkedList in DataCarrier for much better performance. Add plugin instrumentation definition check in CI. DataCarrier performance improvement by avoiding false-sharing.  Java Agent  Java agent supports JDK 9 - 12, but don\u0026rsquo;t support Java Module yet. Support JVM class auto instrumentation, cataloged as bootstrap plugin. Support JVM HttpClient and HttpsClient plugin.[Optional] Support backend upgrade without rebooting required. Open Redefine and Retransform by other agents. Support Servlet 2.5 in Jetty, Tomcat and SpringMVC plugins. Support Spring @Async plugin. Add new config item to restrict the length of span#peer. Refactor ContextManager#stopSpan. Add gRPC timeout. Support Logback AsyncAppender print tid Fix gRPC reconnect bug. Fix trace segment service doesn\u0026rsquo;t report onComplete. Fix wrong logger class name. Fix gRPC plugin bug. Fix ContextManager.activeSpan() API usage error.  Backend  Support agent reset command downstream when the storage is erased, mostly because of backend upgrade. Backend stream flow refactor. High dimensionality metrics(Hour/Day/Month) are changed to lower priority, to ease the storage payload. Add OAP metrics cache to ease the storage query payload and improve performance. Remove DataCarrier in trace persistent of ElasticSearch storage, by leveraging the elasticsearch bulk queue. OAP internal communication protocol changed. Don\u0026rsquo;t be compatible with old releases. Improve ElasticSearch storage bulk performance. Support etcd as dynamic configuration center. Simplify the PxxMetrics and ThermodynamicMetrics functions for better performance and GC. Support JVM metrics self observability. Add the new OAL runtime engine. Add gRPC timeout. Add Charset in the alarm web hook. Fix buffer lost. Fix dirty read in ElasticSearch storage. Fix bug of cluster management plugins in un-Mixed mode. Fix wrong logger class name. Fix delete bug in ElasticSearch when using namespace. Fix MySQL TTL failure. Totally remove IDs can't be null log, to avoid misleading. Fix provider has been initialized repeatedly. Adjust providers conflict log message. Fix using wrong gc time metrics in OAL.  UI  Fix refresh is not working after endpoint and instance changed. Fix endpoint selector but. Fix wrong copy value in slow traces. Fix can\u0026rsquo;t show trace when it is broken partially(Because of agent sampling or fail safe). Fix database and response time graph bugs.  Document  Add bootstrap plugin development document. Alarm documentation typo fixed. Clarify the Docker file purpose. Fix a license typo.  All issues and pull requests are here\n6.2.0 Project  ElasticSearch implementation performance improved, and CHANGED totally. Must delete all existing indexes to do upgrade. CI and Integration tests provided by ASF INFRA. Plan to enhance tests including e2e, plugin tests in all pull requests, powered by ASF INFRA. DataCarrier queue write index controller performance improvement. 3-5 times quicker than before. Add windows compile support in CI.  Java Agent  Support collect SQL parameter in MySQL plugin.[Optional] Support SolrJ plugin. Support RESTEasy plugin. Support Spring Gateway plugin for 2.1.x[Optional] TracingContext performance improvement. Support Apache ShardingSphere(incubating) plugin. Support span#error in application toolkit. Fix OOM by empty stack of exception. FIx wrong cause exception of stack in span log. Fix unclear the running context in SpringMVC plugin. Fix CPU usage accessor calculation issue. Fix SpringMVC plugin span not stop bug when doing HTTP forward. Fix lettuce plugin async commend bug and NPE. Fix webflux plugin cast exception. [CI]Support import check.  Backend  Support time serious ElasticSearch storage. Provide dynamic configuration module and implementation. Slow SQL threshold supports dynamic config today. Dynamic Configuration module provide multiple implementations, DCS(gRPC based), Zookeeper, Apollo, Nacos. Provide P99/95/90/75/50 charts in topology edge. New topology query protocol and implementation. Support Envoy ALS in Service Mesh scenario. Support Nacos cluster management. Enhance metric exporter. Run in increment and total modes. Fix module provider is loaded repeatedly. Change TOP slow SQL storage in ES to Text from Keyword, as too long text issue. Fix H2TopologyQuery tiny bug. Fix H2 log query bug.(No feature provided yet) Filtering pods not in \u0026lsquo;Running\u0026rsquo; phase in mesh scenario. Fix query alarm bug in MySQL and H2 storage. Codes refactor.  UI  Fix some ID is null query(s). Page refactor, especially time-picker, more friendly. Login removed. Trace timestamp visualization issue fixed. Provide P99/95/90/75/50 charts in topology edge. Change all P99/95/90/75/50 charts style. More readable. Fix 404 in trace page.  Document  Go2Sky project has been donated to SkyAPM, change document link. Add FAQ for ElasticSearch storage, and links from document. Add FAQ fro WebSphere installation. Add several open users. Add alarm webhook document.  All issues and pull requests are here\n6.1.0 Project SkyWalking graduated as Apache Top Level Project.\n Support compiling project agent, backend, UI separately.  Java Agent  Support Vert.x Core 3.x plugin. Support Apache Dubbo plugin. Support use_qualified_name_as_endpoint_name and use_qualified_name_as_operation_name configs in SpringMVC plugin. Support span async close APIs in core. Used in Vert.x plugin. Support MySQL 5,8 plugins. Support set instance id manually(optional). Support customize enhance trace plugin in optional list. Support to set peer in Entry Span. Support Zookeeper plugin. Fix Webflux plugin created unexpected Entry Span. Fix Kafka plugin NPE in Kafka 1.1+ Fix wrong operation name in postgre 8.x plugin. Fix RabbitMQ plugin NPE. Fix agent can\u0026rsquo;t run in JVM 6/7, remove module-info.class. Fix agent can\u0026rsquo;t work well, if there is whitespace in agent path. Fix Spring annotation bug and inheritance enhance issue. Fix CPU accessor bug.  Backend Performance improved, especially in CPU limited environment. 3x improvement in service mesh scenario(no trace) in 8C16G VM. Significantly cost less CPU in low payload.\n Support database metrics and SLOW SQL detection. Support to set max size of metadata query. And change default to 5000 from 100. Support ElasticSearch template for new feature in the future. Support shutdown Zipkin trace analysis, because it doesn\u0026rsquo;t fit production environment. Support log type, scope HTTP_ACCESS_LOG and query. No feature provided, prepare for future versions. Support .NET clr receiver. Support Jaeger trace format, no analysis. Support group endpoint name by regax rules in mesh receiver. Support disable statement in OAL. Support basic auth in ElasticSearch connection. Support metrics exporter module and gRPC implementor. Support \u0026gt;, \u0026lt;, \u0026gt;=, \u0026lt;= in OAL. Support role mode in backend. Support Envoy metrics. Support query segment by service instance. Support to set host/port manually at cluster coordinator, rather than based on core settings. Make sure OAP shutdown when it faces startup error. Support set separated gRPC/Jetty ip:port for receiver, default still use core settings. Fix JVM receiver bug. Fix wrong dest service in mesh analysis. Fix search doesn\u0026rsquo;t work as expected. Refactor ScopeDeclaration annotation. Refactor register lock mechanism. Add SmartSql component for .NET Add integration tests for ElasticSearch client. Add test cases for exporter. Add test cases for queue consume.  UI  RocketBot UI has been accepted and bind in this release. Support CLR metrics.  Document  Documents updated, matching Top Level Project requirement. UI licenses updated, according to RocketBot UI IP clearance. User wall and powered-by list updated. CN documents removed, only consider to provide by volunteer out of Apache.  All issues and pull requests are here\n6.0.0-GA Java Agent  Support gson plugin(optional). Support canal plugin. Fix missing ojdbc component id. Fix dubbo plugin conflict. Fix OpenTracing tag match bug. Fix a missing check in ignore plugin.  Backend  Adjust service inventory entity, to add properties. Adjust service instance inventory entity, to add properties. Add nodeType to service inventory entity. Fix when operation name of local and exit spans in ref, the segment lost. Fix the index names don\u0026rsquo;t show right in logs. Fix wrong alarm text. Add test case for span limit mechanism. Add telemetry module and prometheus implementation, with grafana setting. A refactor for register API in storage module. Fix H2 and MySQL endpoint dependency map miss upstream side. Optimize the inventory register and refactor the implementation. Speed up the trace buffer read. Fix and removed unnecessary inventory register operations.  UI  Add new trace view. Add word-break to tag value.  Document  Add two startup modes document. Add PHP agent links. Add some cn documents. Update year to 2019 User wall updated. Fix a wrong description in how-to-build doc.  All issues and pull requests are here\n6.0.0-beta Protocol  Provide Trace Data Protocol v2 Provide SkyWalking Cross Process Propagation Headers Protocol v2.  Java Agent  Support Trace Data Protocol v2 Support SkyWalking Cross Process Propagation Headers Protocol v2. Support SkyWalking Cross Process Propagation Headers Protocol v1 running in compatible way. Need declare open explicitly. Support SpringMVC 5 Support webflux Support a new way to override agent.config by system env. Span tag can override by explicit way. Fix Spring Controller Inherit issue. Fix ElasticSearch plugin NPE. Fix agent classloader dead lock in certain situation. Fix agent log typo. Fix wrong component id in resettemplete plugin. Fix use transform ignore() in wrong way. Fix H2 query bug.  Backend  Support Trace Data Protocol v2. And Trace Data Protocol v1 is still supported. Support MySQL as storage. Support TiDB as storage. Support a new way to override application.yml by system env. Support service instance and endpoint alarm. Support namespace in istio receiver. Support service throughput(cpm), successful rate(sla), avg response time and p99/p95/p90/p75/p50 response time. Support backend trace sampling. Support Zipkin format again. Support init mode. Support namespace in Zookeeper cluster management. Support consul plugin in cluster module. OAL generate tool has been integrated into main repo, in the maven compile stage. Optimize trace paging query. Fix trace query don\u0026rsquo;t use fuzzy query in ElasticSearch storage. Fix alarm can\u0026rsquo;t be active in right way. Fix unnecessary condition in database and cache number query. Fix wrong namespace bug in ElasticSearch storage. Fix Remote clients selector error: / by zero . Fix segment TTL is not working.  UI  Support service throughput(cpm), successful rate(sla), avg response time and p99/p95/p90/p75/p50 response time. Fix TopN endpoint link doesn\u0026rsquo;t work right. Fix trace stack style. Fix CI.  Document  Add more agent setting documents. Add more contribution documents. Update user wall and powered-by page. Add RocketBot UI project link in document.  All issues and pull requests are here\n6.0.0-alpha SkyWalking 6 is totally new milestone for the project. At this point, we are not just a distributing tracing system with analysis and visualization capabilities. We are an Observability Analysis Platform(OAL).\nThe core and most important features in v6 are\n Support to collect telemetry data from different sources, such as multiple language agents and service mesh. Extensible stream analysis core. Make SQL and cache analysis available in core level, although haven\u0026rsquo;t provided in this release. Provide Observability Analysis Language(OAL) to make analysis metrics customization available. New GraphQL query protocol. Not binding with UI now. UI topology is better now. New alarm core provided. In alpha, only on service related metrics.  All issues and pull requests are here\n","excerpt":"6.6.0 Project  [IMPORTANT] Local span and exit span are not treated as endpoint detected at client …","ref":"/docs/main/v9.6.0/en/changes/changes-6.x/","title":"6.6.0"},{"body":"6.6.0 Project  [IMPORTANT] Local span and exit span are not treated as endpoint detected at client and local. Only entry span is the endpoint. Reduce the load of register and memory cost.   Support MiniKube, Istio and SkyWalking on K8s deployment in CI. Support Windows and MacOS build in GitHub Action CI. Support ElasticSearch 7 in official dist. Hundreds plugin cases have been added in GitHub Action CI process.  Java Agent  Remove the local/exit span operation name register mechanism. Add plugin for JDK Threading classes. Add plugin for Armeria. Support set operation name in async span. Enhance webflux plugin, related to Spring Gateway plugin. Webflux plugin is in optional, due to JDK8 required. Fix a possible deadlock. Fix NPE when OAL scripts are different in different OAP nodes, mostly in upgrading stage. Fix bug about wrong peer in ES plugin. Fix NPE in Spring plugin. Fix wrong class name in Dubbo 2.7 conflict patch. Fix spring annotation inheritance problem.  OAP-Backend  Remove the local/exit span operation name register mechanism. Remove client side endpoint register in service mesh. Service instance dependency and related metrics. Support min func in OAL Support apdex func in OAL Support custom ES config setting at the index level. Envoy ALS proto upgraded. Update JODA lib as bugs in UTC +13/+14. Support topN sample period configurable. Ignore no statement DB operations in slow SQL collection. Fix bug in docker-entrypoint.sh when using MySQL as storage  UI  Service topology enhancement. Dive into service, instance and endpoint metrics on topo map. Service instance dependency view and related metrics. Support using URL parameter in trace query page. Support apdex score in service page. Add service dependency metrics into metrics comparison. Fix alarm search not working.  Document  Update user list and user wall. Add document link for CLI. Add deployment guide of agent in Jetty case. Modify Consul cluster doc. Add document about injecting traceId into the logback with logstack in JSON format. ElementUI license and dependency added.  All issues and pull requests are here\n6.5.0 Project  TTL E2E test (#3437) Test coverage is back in pull request check status (#3503) Plugin tests begin to be migrated into main repo, and is in process. (#3528, #3756, #3751, etc.) Switch to SkyWalking CI (exclusive) nodes (#3546) MySQL storage e2e test. (#3648) E2E tests are verified in multiple jdk versions, jdk 8, 9, 11, 12 (#3657) Jenkins build jobs run only when necessary (#3662)  OAP-Backend  Support dynamically configure alarm settings (#3557) Language of instance could be null (#3485) Make query max window size configurable. (#3765) Remove two max size 500 limit. (#3748) Parameterize the cache size. (#3741) ServiceInstanceRelation set error id (#3683) Makes the scope of alarm message more semantic. (#3680) Add register persistent worker latency metrics (#3677) Fix more reasonable error (#3619) Add GraphQL getServiceInstance instanceUuid field. (#3595) Support namespace in Nacos cluster/configuration (#3578) Instead of datasource-settings.properties, use application.yml for MySQLStorageProvider (#3564) Provide consul dynamic configuration center implementation (#3560) Upgrade guava version to support higher jdk version (#3541) Sync latest als from envoy api (#3507) Set telemetry instanced id for Etcd and Nacos plugin (#3492) Support timeout configuration in agent and backend. (#3491) Make sure the cluster register happens before streaming process. (#3471) Agent supports custom properties. (#3367) Miscellaneous bug fixes (#3567)  UI  Feature: node detail display in topo circle-chart view. BugFix: the jvm-maxheap \u0026amp; jvm-maxnonheap is -1, free is no value Fix bug: time select operation not in effect Fix bug: language initialization failed Fix bug: not show instance language Feature: support the trace list display export png Feature: Metrics comparison view BugFix: Fix dashboard top throughput copy  Java Agent  Spring async scenario optimize (#3723) Support log4j2 AsyncLogger (#3715) Add config to collect PostgreSQL sql query params (#3695) Support namespace in Nacos cluster/configuration (#3578) Provide plugin for ehcache 2.x (#3575) Supporting RequestRateLimiterGatewayFilterFactory (#3538) Kafka-plugin compatible with KafkaTemplate (#3505) Add pulsar apm plugin (#3476) Spring-cloud-gateway traceId does not transmit #3411 (#3446) Gateway compatible with downstream loss (#3445) Provide cassandra java driver 3.x plugin (#3410) Fix SpringMVC4 NoSuchMethodError (#3408) BugFix: endpoint grouping rules may be not unique (#3510) Add feature to control the maximum agent log files (#3475) Agent support custom properties. (#3367) Add Light4j plugin (#3323)  Document  Remove travis badge (#3763) Replace user wall to typical users in readme page (#3719) Update istio docs according latest istio release (#3646) Use chart deploy sw docs (#3573) Reorganize the doc, and provide catalog (#3563) Committer vote and set up document. (#3496) Update als setup doc as istio 1.3 released (#3470) Fill faq reply in official document. (#3450)  All issues and pull requests are here\n6.4.0 Project  Highly recommend to upgrade due to Pxx metrics calculation bug. Make agent working in JDK9+ Module system.  Java Agent  Make agent working in JDK9+ Module system. Support Kafka 2.x client libs. Log error in OKHTTP OnFailure callback. Support injecting traceid into logstack appender in logback. Add OperationName(including endpoint name) length max threshold. Support using Regex to group operation name. Support Undertow routing handler. RestTemplate plugin support operation name grouping. Fix ClassCastException in Webflux plugin. Ordering zookeeper server list, to make it better in topology. Fix a Dubbo plugin incompatible issue. Fix MySQL 5 plugin issue. Make log writer cached. Optimize Spring Cloud Gateway plugin Fix and improve gRPC reconnect mechanism. Remove Disruptor dependency from agent.  Backend  Fix Pxx(p50,p75,p90,p95,p99) metrics func bug.(Critical) Support Gateway in backend analysis, even when it doesn\u0026rsquo;t have suitable language agent. Support using HTTPs SSL accessing ElasticSearch storage. Support Zookeeper ACL. Make alarm records listed in order. Fix Pxx data persistence failure in some cases. Fix some bugs in MySQL storage. Setup slow SQL length threshold. Fix TTL settings is not working as expected. Remove scope-meta file.  UI  Enhance alarm page layout. Support trace tree chart resize. Support trace auto completion when partial traces abandoned somehow. Fix dashboard endpoint slow chart. Add radial chart in topology page. Add trace table mode. Fix topology page bug. Fix calender js bug. Fix \u0026ldquo;The \u0026ldquo;topo-services\u0026rdquo; component did not update the data in time after modifying the time range on the topology page.  Document  Restore the broken Istio setup doc. Add etcd config center document. Correct span_limit_per_segment default value in document. Enhance plugin develop doc. Fix error description in build document.  All issues and pull requests are here\n6.3.0 Project  e2e tests have been added, and verify every pull request. Use ArrayList to replace LinkedList in DataCarrier for much better performance. Add plugin instrumentation definition check in CI. DataCarrier performance improvement by avoiding false-sharing.  Java Agent  Java agent supports JDK 9 - 12, but don\u0026rsquo;t support Java Module yet. Support JVM class auto instrumentation, cataloged as bootstrap plugin. Support JVM HttpClient and HttpsClient plugin.[Optional] Support backend upgrade without rebooting required. Open Redefine and Retransform by other agents. Support Servlet 2.5 in Jetty, Tomcat and SpringMVC plugins. Support Spring @Async plugin. Add new config item to restrict the length of span#peer. Refactor ContextManager#stopSpan. Add gRPC timeout. Support Logback AsyncAppender print tid Fix gRPC reconnect bug. Fix trace segment service doesn\u0026rsquo;t report onComplete. Fix wrong logger class name. Fix gRPC plugin bug. Fix ContextManager.activeSpan() API usage error.  Backend  Support agent reset command downstream when the storage is erased, mostly because of backend upgrade. Backend stream flow refactor. High dimensionality metrics(Hour/Day/Month) are changed to lower priority, to ease the storage payload. Add OAP metrics cache to ease the storage query payload and improve performance. Remove DataCarrier in trace persistent of ElasticSearch storage, by leveraging the elasticsearch bulk queue. OAP internal communication protocol changed. Don\u0026rsquo;t be compatible with old releases. Improve ElasticSearch storage bulk performance. Support etcd as dynamic configuration center. Simplify the PxxMetrics and ThermodynamicMetrics functions for better performance and GC. Support JVM metrics self observability. Add the new OAL runtime engine. Add gRPC timeout. Add Charset in the alarm web hook. Fix buffer lost. Fix dirty read in ElasticSearch storage. Fix bug of cluster management plugins in un-Mixed mode. Fix wrong logger class name. Fix delete bug in ElasticSearch when using namespace. Fix MySQL TTL failure. Totally remove IDs can't be null log, to avoid misleading. Fix provider has been initialized repeatedly. Adjust providers conflict log message. Fix using wrong gc time metrics in OAL.  UI  Fix refresh is not working after endpoint and instance changed. Fix endpoint selector but. Fix wrong copy value in slow traces. Fix can\u0026rsquo;t show trace when it is broken partially(Because of agent sampling or fail safe). Fix database and response time graph bugs.  Document  Add bootstrap plugin development document. Alarm documentation typo fixed. Clarify the Docker file purpose. Fix a license typo.  All issues and pull requests are here\n6.2.0 Project  ElasticSearch implementation performance improved, and CHANGED totally. Must delete all existing indexes to do upgrade. CI and Integration tests provided by ASF INFRA. Plan to enhance tests including e2e, plugin tests in all pull requests, powered by ASF INFRA. DataCarrier queue write index controller performance improvement. 3-5 times quicker than before. Add windows compile support in CI.  Java Agent  Support collect SQL parameter in MySQL plugin.[Optional] Support SolrJ plugin. Support RESTEasy plugin. Support Spring Gateway plugin for 2.1.x[Optional] TracingContext performance improvement. Support Apache ShardingSphere(incubating) plugin. Support span#error in application toolkit. Fix OOM by empty stack of exception. FIx wrong cause exception of stack in span log. Fix unclear the running context in SpringMVC plugin. Fix CPU usage accessor calculation issue. Fix SpringMVC plugin span not stop bug when doing HTTP forward. Fix lettuce plugin async commend bug and NPE. Fix webflux plugin cast exception. [CI]Support import check.  Backend  Support time serious ElasticSearch storage. Provide dynamic configuration module and implementation. Slow SQL threshold supports dynamic config today. Dynamic Configuration module provide multiple implementations, DCS(gRPC based), Zookeeper, Apollo, Nacos. Provide P99/95/90/75/50 charts in topology edge. New topology query protocol and implementation. Support Envoy ALS in Service Mesh scenario. Support Nacos cluster management. Enhance metric exporter. Run in increment and total modes. Fix module provider is loaded repeatedly. Change TOP slow SQL storage in ES to Text from Keyword, as too long text issue. Fix H2TopologyQuery tiny bug. Fix H2 log query bug.(No feature provided yet) Filtering pods not in \u0026lsquo;Running\u0026rsquo; phase in mesh scenario. Fix query alarm bug in MySQL and H2 storage. Codes refactor.  UI  Fix some ID is null query(s). Page refactor, especially time-picker, more friendly. Login removed. Trace timestamp visualization issue fixed. Provide P99/95/90/75/50 charts in topology edge. Change all P99/95/90/75/50 charts style. More readable. Fix 404 in trace page.  Document  Go2Sky project has been donated to SkyAPM, change document link. Add FAQ for ElasticSearch storage, and links from document. Add FAQ fro WebSphere installation. Add several open users. Add alarm webhook document.  All issues and pull requests are here\n6.1.0 Project SkyWalking graduated as Apache Top Level Project.\n Support compiling project agent, backend, UI separately.  Java Agent  Support Vert.x Core 3.x plugin. Support Apache Dubbo plugin. Support use_qualified_name_as_endpoint_name and use_qualified_name_as_operation_name configs in SpringMVC plugin. Support span async close APIs in core. Used in Vert.x plugin. Support MySQL 5,8 plugins. Support set instance id manually(optional). Support customize enhance trace plugin in optional list. Support to set peer in Entry Span. Support Zookeeper plugin. Fix Webflux plugin created unexpected Entry Span. Fix Kafka plugin NPE in Kafka 1.1+ Fix wrong operation name in postgre 8.x plugin. Fix RabbitMQ plugin NPE. Fix agent can\u0026rsquo;t run in JVM 6/7, remove module-info.class. Fix agent can\u0026rsquo;t work well, if there is whitespace in agent path. Fix Spring annotation bug and inheritance enhance issue. Fix CPU accessor bug.  Backend Performance improved, especially in CPU limited environment. 3x improvement in service mesh scenario(no trace) in 8C16G VM. Significantly cost less CPU in low payload.\n Support database metrics and SLOW SQL detection. Support to set max size of metadata query. And change default to 5000 from 100. Support ElasticSearch template for new feature in the future. Support shutdown Zipkin trace analysis, because it doesn\u0026rsquo;t fit production environment. Support log type, scope HTTP_ACCESS_LOG and query. No feature provided, prepare for future versions. Support .NET clr receiver. Support Jaeger trace format, no analysis. Support group endpoint name by regax rules in mesh receiver. Support disable statement in OAL. Support basic auth in ElasticSearch connection. Support metrics exporter module and gRPC implementor. Support \u0026gt;, \u0026lt;, \u0026gt;=, \u0026lt;= in OAL. Support role mode in backend. Support Envoy metrics. Support query segment by service instance. Support to set host/port manually at cluster coordinator, rather than based on core settings. Make sure OAP shutdown when it faces startup error. Support set separated gRPC/Jetty ip:port for receiver, default still use core settings. Fix JVM receiver bug. Fix wrong dest service in mesh analysis. Fix search doesn\u0026rsquo;t work as expected. Refactor ScopeDeclaration annotation. Refactor register lock mechanism. Add SmartSql component for .NET Add integration tests for ElasticSearch client. Add test cases for exporter. Add test cases for queue consume.  UI  RocketBot UI has been accepted and bind in this release. Support CLR metrics.  Document  Documents updated, matching Top Level Project requirement. UI licenses updated, according to RocketBot UI IP clearance. User wall and powered-by list updated. CN documents removed, only consider to provide by volunteer out of Apache.  All issues and pull requests are here\n6.0.0-GA Java Agent  Support gson plugin(optional). Support canal plugin. Fix missing ojdbc component id. Fix dubbo plugin conflict. Fix OpenTracing tag match bug. Fix a missing check in ignore plugin.  Backend  Adjust service inventory entity, to add properties. Adjust service instance inventory entity, to add properties. Add nodeType to service inventory entity. Fix when operation name of local and exit spans in ref, the segment lost. Fix the index names don\u0026rsquo;t show right in logs. Fix wrong alarm text. Add test case for span limit mechanism. Add telemetry module and prometheus implementation, with grafana setting. A refactor for register API in storage module. Fix H2 and MySQL endpoint dependency map miss upstream side. Optimize the inventory register and refactor the implementation. Speed up the trace buffer read. Fix and removed unnecessary inventory register operations.  UI  Add new trace view. Add word-break to tag value.  Document  Add two startup modes document. Add PHP agent links. Add some cn documents. Update year to 2019 User wall updated. Fix a wrong description in how-to-build doc.  All issues and pull requests are here\n6.0.0-beta Protocol  Provide Trace Data Protocol v2 Provide SkyWalking Cross Process Propagation Headers Protocol v2.  Java Agent  Support Trace Data Protocol v2 Support SkyWalking Cross Process Propagation Headers Protocol v2. Support SkyWalking Cross Process Propagation Headers Protocol v1 running in compatible way. Need declare open explicitly. Support SpringMVC 5 Support webflux Support a new way to override agent.config by system env. Span tag can override by explicit way. Fix Spring Controller Inherit issue. Fix ElasticSearch plugin NPE. Fix agent classloader dead lock in certain situation. Fix agent log typo. Fix wrong component id in resettemplete plugin. Fix use transform ignore() in wrong way. Fix H2 query bug.  Backend  Support Trace Data Protocol v2. And Trace Data Protocol v1 is still supported. Support MySQL as storage. Support TiDB as storage. Support a new way to override application.yml by system env. Support service instance and endpoint alarm. Support namespace in istio receiver. Support service throughput(cpm), successful rate(sla), avg response time and p99/p95/p90/p75/p50 response time. Support backend trace sampling. Support Zipkin format again. Support init mode. Support namespace in Zookeeper cluster management. Support consul plugin in cluster module. OAL generate tool has been integrated into main repo, in the maven compile stage. Optimize trace paging query. Fix trace query don\u0026rsquo;t use fuzzy query in ElasticSearch storage. Fix alarm can\u0026rsquo;t be active in right way. Fix unnecessary condition in database and cache number query. Fix wrong namespace bug in ElasticSearch storage. Fix Remote clients selector error: / by zero . Fix segment TTL is not working.  UI  Support service throughput(cpm), successful rate(sla), avg response time and p99/p95/p90/p75/p50 response time. Fix TopN endpoint link doesn\u0026rsquo;t work right. Fix trace stack style. Fix CI.  Document  Add more agent setting documents. Add more contribution documents. Update user wall and powered-by page. Add RocketBot UI project link in document.  All issues and pull requests are here\n6.0.0-alpha SkyWalking 6 is totally new milestone for the project. At this point, we are not just a distributing tracing system with analysis and visualization capabilities. We are an Observability Analysis Platform(OAL).\nThe core and most important features in v6 are\n Support to collect telemetry data from different sources, such as multiple language agents and service mesh. Extensible stream analysis core. Make SQL and cache analysis available in core level, although haven\u0026rsquo;t provided in this release. Provide Observability Analysis Language(OAL) to make analysis metrics customization available. New GraphQL query protocol. Not binding with UI now. UI topology is better now. New alarm core provided. In alpha, only on service related metrics.  All issues and pull requests are here\n","excerpt":"6.6.0 Project  [IMPORTANT] Local span and exit span are not treated as endpoint detected at client …","ref":"/docs/main/v9.7.0/en/changes/changes-6.x/","title":"6.6.0"},{"body":"7.0.0 Project  SkyWalking discards the supports of JDK 1.6 and 1.7 on the java agent side. The minimal requirement of JDK is JDK8. Support method performance profile. Provide new E2E test framework. Remove AppVeyor from the CI, use GitHub action only. Provide new plugin test tool. Don\u0026rsquo;t support SkyWalking v5 agent in-wire and out-wire protocol. v6 is required.  Java Agent  Add lazy injection API in the agent core. Support Servlet 2.5 in the Struts plugin. Fix RestTemplate plugin ClassCastException in the Async call. Add Finagle plugin. Add test cases of H2 and struts. Add Armeria 0.98 plugin. Fix ElasticSearch plugin bug. Fix EHCache plugin bug. Fix a potential I/O leak. Support Oracle SID mode. Update Byte-buddy core. Performance tuning: replace AtomicInteger with AtomicIntegerFieldUpdater. Add AVRO plugin. Update to JDK 1.8 Optimize the ignore plugin. Enhance the gRPC plugin. Add Kotlin Coroutine plugin. Support HTTP parameter collection in Tomcat and SpringMVC plugin. Add @Tag annotation in the application toolkit. Move Lettuce into the default plugin list. Move Webflux into the default plugin list. Add HttpClient 3.x plugin.  OAP-Backend  Support InfluxDB as a new storage option. Add selector in the application.yml. Make the provider activation more flexible through System ENV. Support sub-topology map query. Support gRPC SSL. Support HTTP protocol for agent. Support Nginx LUA agent. Support skip the instance relationship analysis if some agents doesn\u0026rsquo;t have upstream address, currently for LUA agent. Support metrics entity name in the storage. Optional, default OFF. Merge the HOUR and DAY metrics into MINUTE in the ElasticSearch storage implementation. Reduce the payload for ElasticSearch server. Support change detection mechanism in DCS. Support Daily step in the ElasticSearch storage implementation for low traffic system. Provide profile export tool. Support alarm gRPC hook. Fix PHP language doesn\u0026rsquo;t show up on the instance page. Add more comments in the source codes. Add a new metrics type, multiple linears. Fix thread concurrency issue in the alarm core.  UI  Support custom topology definition.  Document  Add FAQ about python2 command required in the compiling. Add doc about new e2e framework. Add doc about the new profile feature. Powered-by page updated.  All issues and pull requests are here\n","excerpt":"7.0.0 Project  SkyWalking discards the supports of JDK 1.6 and 1.7 on the java agent side. The …","ref":"/docs/main/latest/en/changes/changes-7.0.0/","title":"7.0.0"},{"body":"7.0.0 Project  SkyWalking discards the supports of JDK 1.6 and 1.7 on the java agent side. The minimal requirement of JDK is JDK8. Support method performance profile. Provide new E2E test framework. Remove AppVeyor from the CI, use GitHub action only. Provide new plugin test tool. Don\u0026rsquo;t support SkyWalking v5 agent in-wire and out-wire protocol. v6 is required.  Java Agent  Add lazy injection API in the agent core. Support Servlet 2.5 in the Struts plugin. Fix RestTemplate plugin ClassCastException in the Async call. Add Finagle plugin. Add test cases of H2 and struts. Add Armeria 0.98 plugin. Fix ElasticSearch plugin bug. Fix EHCache plugin bug. Fix a potential I/O leak. Support Oracle SID mode. Update Byte-buddy core. Performance tuning: replace AtomicInteger with AtomicIntegerFieldUpdater. Add AVRO plugin. Update to JDK 1.8 Optimize the ignore plugin. Enhance the gRPC plugin. Add Kotlin Coroutine plugin. Support HTTP parameter collection in Tomcat and SpringMVC plugin. Add @Tag annotation in the application toolkit. Move Lettuce into the default plugin list. Move Webflux into the default plugin list. Add HttpClient 3.x plugin.  OAP-Backend  Support InfluxDB as a new storage option. Add selector in the application.yml. Make the provider activation more flexible through System ENV. Support sub-topology map query. Support gRPC SSL. Support HTTP protocol for agent. Support Nginx LUA agent. Support skip the instance relationship analysis if some agents doesn\u0026rsquo;t have upstream address, currently for LUA agent. Support metrics entity name in the storage. Optional, default OFF. Merge the HOUR and DAY metrics into MINUTE in the ElasticSearch storage implementation. Reduce the payload for ElasticSearch server. Support change detection mechanism in DCS. Support Daily step in the ElasticSearch storage implementation for low traffic system. Provide profile export tool. Support alarm gRPC hook. Fix PHP language doesn\u0026rsquo;t show up on the instance page. Add more comments in the source codes. Add a new metrics type, multiple linears. Fix thread concurrency issue in the alarm core.  UI  Support custom topology definition.  Document  Add FAQ about python2 command required in the compiling. Add doc about new e2e framework. Add doc about the new profile feature. Powered-by page updated.  All issues and pull requests are here\n","excerpt":"7.0.0 Project  SkyWalking discards the supports of JDK 1.6 and 1.7 on the java agent side. The …","ref":"/docs/main/next/en/changes/changes-7.0.0/","title":"7.0.0"},{"body":"7.0.0 Project  SkyWalking discards the supports of JDK 1.6 and 1.7 on the java agent side. The minimal requirement of JDK is JDK8. Support method performance profile. Provide new E2E test framework. Remove AppVeyor from the CI, use GitHub action only. Provide new plugin test tool. Don\u0026rsquo;t support SkyWalking v5 agent in-wire and out-wire protocol. v6 is required.  Java Agent  Add lazy injection API in the agent core. Support Servlet 2.5 in the Struts plugin. Fix RestTemplate plugin ClassCastException in the Async call. Add Finagle plugin. Add test cases of H2 and struts. Add Armeria 0.98 plugin. Fix ElasticSearch plugin bug. Fix EHCache plugin bug. Fix a potential I/O leak. Support Oracle SID mode. Update Byte-buddy core. Performance tuning: replace AtomicInteger with AtomicIntegerFieldUpdater. Add AVRO plugin. Update to JDK 1.8 Optimize the ignore plugin. Enhance the gRPC plugin. Add Kotlin Coroutine plugin. Support HTTP parameter collection in Tomcat and SpringMVC plugin. Add @Tag annotation in the application toolkit. Move Lettuce into the default plugin list. Move Webflux into the default plugin list. Add HttpClient 3.x plugin.  OAP-Backend  Support InfluxDB as a new storage option. Add selector in the application.yml. Make the provider activation more flexible through System ENV. Support sub-topology map query. Support gRPC SSL. Support HTTP protocol for agent. Support Nginx LUA agent. Support skip the instance relationship analysis if some agents doesn\u0026rsquo;t have upstream address, currently for LUA agent. Support metrics entity name in the storage. Optional, default OFF. Merge the HOUR and DAY metrics into MINUTE in the ElasticSearch storage implementation. Reduce the payload for ElasticSearch server. Support change detection mechanism in DCS. Support Daily step in the ElasticSearch storage implementation for low traffic system. Provide profile export tool. Support alarm gRPC hook. Fix PHP language doesn\u0026rsquo;t show up on the instance page. Add more comments in the source codes. Add a new metrics type, multiple linears. Fix thread concurrency issue in the alarm core.  UI  Support custom topology definition.  Document  Add FAQ about python2 command required in the compiling. Add doc about new e2e framework. Add doc about the new profile feature. Powered-by page updated.  All issues and pull requests are here\n","excerpt":"7.0.0 Project  SkyWalking discards the supports of JDK 1.6 and 1.7 on the java agent side. The …","ref":"/docs/main/v9.1.0/en/changes/changes-7.0.0/","title":"7.0.0"},{"body":"7.0.0 Project  SkyWalking discards the supports of JDK 1.6 and 1.7 on the java agent side. The minimal requirement of JDK is JDK8. Support method performance profile. Provide new E2E test framework. Remove AppVeyor from the CI, use GitHub action only. Provide new plugin test tool. Don\u0026rsquo;t support SkyWalking v5 agent in-wire and out-wire protocol. v6 is required.  Java Agent  Add lazy injection API in the agent core. Support Servlet 2.5 in the Struts plugin. Fix RestTemplate plugin ClassCastException in the Async call. Add Finagle plugin. Add test cases of H2 and struts. Add Armeria 0.98 plugin. Fix ElasticSearch plugin bug. Fix EHCache plugin bug. Fix a potential I/O leak. Support Oracle SID mode. Update Byte-buddy core. Performance tuning: replace AtomicInteger with AtomicIntegerFieldUpdater. Add AVRO plugin. Update to JDK 1.8 Optimize the ignore plugin. Enhance the gRPC plugin. Add Kotlin Coroutine plugin. Support HTTP parameter collection in Tomcat and SpringMVC plugin. Add @Tag annotation in the application toolkit. Move Lettuce into the default plugin list. Move Webflux into the default plugin list. Add HttpClient 3.x plugin.  OAP-Backend  Support InfluxDB as a new storage option. Add selector in the application.yml. Make the provider activation more flexible through System ENV. Support sub-topology map query. Support gRPC SSL. Support HTTP protocol for agent. Support Nginx LUA agent. Support skip the instance relationship analysis if some agents doesn\u0026rsquo;t have upstream address, currently for LUA agent. Support metrics entity name in the storage. Optional, default OFF. Merge the HOUR and DAY metrics into MINUTE in the ElasticSearch storage implementation. Reduce the payload for ElasticSearch server. Support change detection mechanism in DCS. Support Daily step in the ElasticSearch storage implementation for low traffic system. Provide profile export tool. Support alarm gRPC hook. Fix PHP language doesn\u0026rsquo;t show up on the instance page. Add more comments in the source codes. Add a new metrics type, multiple linears. Fix thread concurrency issue in the alarm core.  UI  Support custom topology definition.  Document  Add FAQ about python2 command required in the compiling. Add doc about new e2e framework. Add doc about the new profile feature. Powered-by page updated.  All issues and pull requests are here\n","excerpt":"7.0.0 Project  SkyWalking discards the supports of JDK 1.6 and 1.7 on the java agent side. The …","ref":"/docs/main/v9.2.0/en/changes/changes-7.0.0/","title":"7.0.0"},{"body":"7.0.0 Project  SkyWalking discards the supports of JDK 1.6 and 1.7 on the java agent side. The minimal requirement of JDK is JDK8. Support method performance profile. Provide new E2E test framework. Remove AppVeyor from the CI, use GitHub action only. Provide new plugin test tool. Don\u0026rsquo;t support SkyWalking v5 agent in-wire and out-wire protocol. v6 is required.  Java Agent  Add lazy injection API in the agent core. Support Servlet 2.5 in the Struts plugin. Fix RestTemplate plugin ClassCastException in the Async call. Add Finagle plugin. Add test cases of H2 and struts. Add Armeria 0.98 plugin. Fix ElasticSearch plugin bug. Fix EHCache plugin bug. Fix a potential I/O leak. Support Oracle SID mode. Update Byte-buddy core. Performance tuning: replace AtomicInteger with AtomicIntegerFieldUpdater. Add AVRO plugin. Update to JDK 1.8 Optimize the ignore plugin. Enhance the gRPC plugin. Add Kotlin Coroutine plugin. Support HTTP parameter collection in Tomcat and SpringMVC plugin. Add @Tag annotation in the application toolkit. Move Lettuce into the default plugin list. Move Webflux into the default plugin list. Add HttpClient 3.x plugin.  OAP-Backend  Support InfluxDB as a new storage option. Add selector in the application.yml. Make the provider activation more flexible through System ENV. Support sub-topology map query. Support gRPC SSL. Support HTTP protocol for agent. Support Nginx LUA agent. Support skip the instance relationship analysis if some agents doesn\u0026rsquo;t have upstream address, currently for LUA agent. Support metrics entity name in the storage. Optional, default OFF. Merge the HOUR and DAY metrics into MINUTE in the ElasticSearch storage implementation. Reduce the payload for ElasticSearch server. Support change detection mechanism in DCS. Support Daily step in the ElasticSearch storage implementation for low traffic system. Provide profile export tool. Support alarm gRPC hook. Fix PHP language doesn\u0026rsquo;t show up on the instance page. Add more comments in the source codes. Add a new metrics type, multiple linears. Fix thread concurrency issue in the alarm core.  UI  Support custom topology definition.  Document  Add FAQ about python2 command required in the compiling. Add doc about new e2e framework. Add doc about the new profile feature. Powered-by page updated.  All issues and pull requests are here\n","excerpt":"7.0.0 Project  SkyWalking discards the supports of JDK 1.6 and 1.7 on the java agent side. The …","ref":"/docs/main/v9.3.0/en/changes/changes-7.0.0/","title":"7.0.0"},{"body":"7.0.0 Project  SkyWalking discards the supports of JDK 1.6 and 1.7 on the java agent side. The minimal requirement of JDK is JDK8. Support method performance profile. Provide new E2E test framework. Remove AppVeyor from the CI, use GitHub action only. Provide new plugin test tool. Don\u0026rsquo;t support SkyWalking v5 agent in-wire and out-wire protocol. v6 is required.  Java Agent  Add lazy injection API in the agent core. Support Servlet 2.5 in the Struts plugin. Fix RestTemplate plugin ClassCastException in the Async call. Add Finagle plugin. Add test cases of H2 and struts. Add Armeria 0.98 plugin. Fix ElasticSearch plugin bug. Fix EHCache plugin bug. Fix a potential I/O leak. Support Oracle SID mode. Update Byte-buddy core. Performance tuning: replace AtomicInteger with AtomicIntegerFieldUpdater. Add AVRO plugin. Update to JDK 1.8 Optimize the ignore plugin. Enhance the gRPC plugin. Add Kotlin Coroutine plugin. Support HTTP parameter collection in Tomcat and SpringMVC plugin. Add @Tag annotation in the application toolkit. Move Lettuce into the default plugin list. Move Webflux into the default plugin list. Add HttpClient 3.x plugin.  OAP-Backend  Support InfluxDB as a new storage option. Add selector in the application.yml. Make the provider activation more flexible through System ENV. Support sub-topology map query. Support gRPC SSL. Support HTTP protocol for agent. Support Nginx LUA agent. Support skip the instance relationship analysis if some agents doesn\u0026rsquo;t have upstream address, currently for LUA agent. Support metrics entity name in the storage. Optional, default OFF. Merge the HOUR and DAY metrics into MINUTE in the ElasticSearch storage implementation. Reduce the payload for ElasticSearch server. Support change detection mechanism in DCS. Support Daily step in the ElasticSearch storage implementation for low traffic system. Provide profile export tool. Support alarm gRPC hook. Fix PHP language doesn\u0026rsquo;t show up on the instance page. Add more comments in the source codes. Add a new metrics type, multiple linears. Fix thread concurrency issue in the alarm core.  UI  Support custom topology definition.  Document  Add FAQ about python2 command required in the compiling. Add doc about new e2e framework. Add doc about the new profile feature. Powered-by page updated.  All issues and pull requests are here\n","excerpt":"7.0.0 Project  SkyWalking discards the supports of JDK 1.6 and 1.7 on the java agent side. The …","ref":"/docs/main/v9.4.0/en/changes/changes-7.0.0/","title":"7.0.0"},{"body":"7.0.0 Project  SkyWalking discards the supports of JDK 1.6 and 1.7 on the java agent side. The minimal requirement of JDK is JDK8. Support method performance profile. Provide new E2E test framework. Remove AppVeyor from the CI, use GitHub action only. Provide new plugin test tool. Don\u0026rsquo;t support SkyWalking v5 agent in-wire and out-wire protocol. v6 is required.  Java Agent  Add lazy injection API in the agent core. Support Servlet 2.5 in the Struts plugin. Fix RestTemplate plugin ClassCastException in the Async call. Add Finagle plugin. Add test cases of H2 and struts. Add Armeria 0.98 plugin. Fix ElasticSearch plugin bug. Fix EHCache plugin bug. Fix a potential I/O leak. Support Oracle SID mode. Update Byte-buddy core. Performance tuning: replace AtomicInteger with AtomicIntegerFieldUpdater. Add AVRO plugin. Update to JDK 1.8 Optimize the ignore plugin. Enhance the gRPC plugin. Add Kotlin Coroutine plugin. Support HTTP parameter collection in Tomcat and SpringMVC plugin. Add @Tag annotation in the application toolkit. Move Lettuce into the default plugin list. Move Webflux into the default plugin list. Add HttpClient 3.x plugin.  OAP-Backend  Support InfluxDB as a new storage option. Add selector in the application.yml. Make the provider activation more flexible through System ENV. Support sub-topology map query. Support gRPC SSL. Support HTTP protocol for agent. Support Nginx LUA agent. Support skip the instance relationship analysis if some agents doesn\u0026rsquo;t have upstream address, currently for LUA agent. Support metrics entity name in the storage. Optional, default OFF. Merge the HOUR and DAY metrics into MINUTE in the ElasticSearch storage implementation. Reduce the payload for ElasticSearch server. Support change detection mechanism in DCS. Support Daily step in the ElasticSearch storage implementation for low traffic system. Provide profile export tool. Support alarm gRPC hook. Fix PHP language doesn\u0026rsquo;t show up on the instance page. Add more comments in the source codes. Add a new metrics type, multiple linears. Fix thread concurrency issue in the alarm core.  UI  Support custom topology definition.  Document  Add FAQ about python2 command required in the compiling. Add doc about new e2e framework. Add doc about the new profile feature. Powered-by page updated.  All issues and pull requests are here\n","excerpt":"7.0.0 Project  SkyWalking discards the supports of JDK 1.6 and 1.7 on the java agent side. The …","ref":"/docs/main/v9.5.0/en/changes/changes-7.0.0/","title":"7.0.0"},{"body":"7.0.0 Project  SkyWalking discards the supports of JDK 1.6 and 1.7 on the java agent side. The minimal requirement of JDK is JDK8. Support method performance profile. Provide new E2E test framework. Remove AppVeyor from the CI, use GitHub action only. Provide new plugin test tool. Don\u0026rsquo;t support SkyWalking v5 agent in-wire and out-wire protocol. v6 is required.  Java Agent  Add lazy injection API in the agent core. Support Servlet 2.5 in the Struts plugin. Fix RestTemplate plugin ClassCastException in the Async call. Add Finagle plugin. Add test cases of H2 and struts. Add Armeria 0.98 plugin. Fix ElasticSearch plugin bug. Fix EHCache plugin bug. Fix a potential I/O leak. Support Oracle SID mode. Update Byte-buddy core. Performance tuning: replace AtomicInteger with AtomicIntegerFieldUpdater. Add AVRO plugin. Update to JDK 1.8 Optimize the ignore plugin. Enhance the gRPC plugin. Add Kotlin Coroutine plugin. Support HTTP parameter collection in Tomcat and SpringMVC plugin. Add @Tag annotation in the application toolkit. Move Lettuce into the default plugin list. Move Webflux into the default plugin list. Add HttpClient 3.x plugin.  OAP-Backend  Support InfluxDB as a new storage option. Add selector in the application.yml. Make the provider activation more flexible through System ENV. Support sub-topology map query. Support gRPC SSL. Support HTTP protocol for agent. Support Nginx LUA agent. Support skip the instance relationship analysis if some agents doesn\u0026rsquo;t have upstream address, currently for LUA agent. Support metrics entity name in the storage. Optional, default OFF. Merge the HOUR and DAY metrics into MINUTE in the ElasticSearch storage implementation. Reduce the payload for ElasticSearch server. Support change detection mechanism in DCS. Support Daily step in the ElasticSearch storage implementation for low traffic system. Provide profile export tool. Support alarm gRPC hook. Fix PHP language doesn\u0026rsquo;t show up on the instance page. Add more comments in the source codes. Add a new metrics type, multiple linears. Fix thread concurrency issue in the alarm core.  UI  Support custom topology definition.  Document  Add FAQ about python2 command required in the compiling. Add doc about new e2e framework. Add doc about the new profile feature. Powered-by page updated.  All issues and pull requests are here\n","excerpt":"7.0.0 Project  SkyWalking discards the supports of JDK 1.6 and 1.7 on the java agent side. The …","ref":"/docs/main/v9.6.0/en/changes/changes-7.0.0/","title":"7.0.0"},{"body":"7.0.0 Project  SkyWalking discards the supports of JDK 1.6 and 1.7 on the java agent side. The minimal requirement of JDK is JDK8. Support method performance profile. Provide new E2E test framework. Remove AppVeyor from the CI, use GitHub action only. Provide new plugin test tool. Don\u0026rsquo;t support SkyWalking v5 agent in-wire and out-wire protocol. v6 is required.  Java Agent  Add lazy injection API in the agent core. Support Servlet 2.5 in the Struts plugin. Fix RestTemplate plugin ClassCastException in the Async call. Add Finagle plugin. Add test cases of H2 and struts. Add Armeria 0.98 plugin. Fix ElasticSearch plugin bug. Fix EHCache plugin bug. Fix a potential I/O leak. Support Oracle SID mode. Update Byte-buddy core. Performance tuning: replace AtomicInteger with AtomicIntegerFieldUpdater. Add AVRO plugin. Update to JDK 1.8 Optimize the ignore plugin. Enhance the gRPC plugin. Add Kotlin Coroutine plugin. Support HTTP parameter collection in Tomcat and SpringMVC plugin. Add @Tag annotation in the application toolkit. Move Lettuce into the default plugin list. Move Webflux into the default plugin list. Add HttpClient 3.x plugin.  OAP-Backend  Support InfluxDB as a new storage option. Add selector in the application.yml. Make the provider activation more flexible through System ENV. Support sub-topology map query. Support gRPC SSL. Support HTTP protocol for agent. Support Nginx LUA agent. Support skip the instance relationship analysis if some agents doesn\u0026rsquo;t have upstream address, currently for LUA agent. Support metrics entity name in the storage. Optional, default OFF. Merge the HOUR and DAY metrics into MINUTE in the ElasticSearch storage implementation. Reduce the payload for ElasticSearch server. Support change detection mechanism in DCS. Support Daily step in the ElasticSearch storage implementation for low traffic system. Provide profile export tool. Support alarm gRPC hook. Fix PHP language doesn\u0026rsquo;t show up on the instance page. Add more comments in the source codes. Add a new metrics type, multiple linears. Fix thread concurrency issue in the alarm core.  UI  Support custom topology definition.  Document  Add FAQ about python2 command required in the compiling. Add doc about new e2e framework. Add doc about the new profile feature. Powered-by page updated.  All issues and pull requests are here\n","excerpt":"7.0.0 Project  SkyWalking discards the supports of JDK 1.6 and 1.7 on the java agent side. The …","ref":"/docs/main/v9.7.0/en/changes/changes-7.0.0/","title":"7.0.0"},{"body":"8.0.0 Project  v3 protocol is added and implemented. All previous releases are incompatible with 8.x releases. Service, Instance, Endpoint register mechanism and inventory storage entities are removed. New GraphQL query protocol is provided, the legacy protocol is still supported(plan to remove at the end of this year). Support Prometheus network protocol. Metrics in Prometheus format could be transferred into SkyWalking. Python agent provided. All inventory caches have been removed. Apache ShardingSphere(4.1.0, 4.1.1) agent plugin provided.  Java Agent  Add MariaDB plugin. Vert.x plugin enhancement. More cases are covered. Support v3 extension header. Fix ElasticSearch 5.x plugin TransportClient error. Support Correlation protocol v1. Fix Finagle plugin bug, in processing Noop Span. Make CommandService daemon to avoid blocking target application shutting down gracefully. Refactor spring cloud gateway plugin and support tracing spring cloud gateway 2.2.x  OAP-Backend  Support meter system for Prometheus adoption. In future releases, we will add native meter APIs and MicroMeter(Sleuth) system. Support endpoint grouping. Add SuperDataSet annotation for storage entity. Add superDatasetIndexShardsFactor in the ElasticSearch storage, to provide more shards for @SuperDataSet annotated entites. Typically TraceSegment. Support alarm settings for relationship of service, instance, and endpoint level metrics. Support alarm settings for database(conjecture node in tracing scenario). Data Model could be added in the runtime, don\u0026rsquo;t depend on the bootstrap sequence anymore. Reduce the memory cost, due to no inventory caches. No buffer files in tracing and service mesh cases. New ReadWriteSafe cache implementation. Simplify codes. Provide default way for metrics query, even the metrics doesn\u0026rsquo;t exist. New GraphQL query protocol is provided. Support the metrics type query. Set up length rule of service, instance, and endpoint. Adjust the default jks for ElasticSearch to empty. Fix Apdex function integer overflow issue. Fix profile storage issue. Fix TTL issue. Fix H2 column type bug. Add JRE 8-14 test for the backend.  UI  UI dashboard is 100% configurable to adopt new metrics definited in the backend.  Document  Add v8 upgrade document. Make the coverage accurate including UT and e2e tests. Add miss doc about collecting parameters in the profiled traces.  CVE  Fix SQL Injection vulnerability in H2/MySQL implementation. Upgrade Nacos to avoid the FastJson CVE in high frequency. Upgrade jasckson-databind to 2.9.10.  All issues and pull requests are here\n","excerpt":"8.0.0 Project  v3 protocol is added and implemented. All previous releases are incompatible with 8.x …","ref":"/docs/main/latest/en/changes/changes-8.0.0/","title":"8.0.0"},{"body":"8.0.0 Project  v3 protocol is added and implemented. All previous releases are incompatible with 8.x releases. Service, Instance, Endpoint register mechanism and inventory storage entities are removed. New GraphQL query protocol is provided, the legacy protocol is still supported(plan to remove at the end of this year). Support Prometheus network protocol. Metrics in Prometheus format could be transferred into SkyWalking. Python agent provided. All inventory caches have been removed. Apache ShardingSphere(4.1.0, 4.1.1) agent plugin provided.  Java Agent  Add MariaDB plugin. Vert.x plugin enhancement. More cases are covered. Support v3 extension header. Fix ElasticSearch 5.x plugin TransportClient error. Support Correlation protocol v1. Fix Finagle plugin bug, in processing Noop Span. Make CommandService daemon to avoid blocking target application shutting down gracefully. Refactor spring cloud gateway plugin and support tracing spring cloud gateway 2.2.x  OAP-Backend  Support meter system for Prometheus adoption. In future releases, we will add native meter APIs and MicroMeter(Sleuth) system. Support endpoint grouping. Add SuperDataSet annotation for storage entity. Add superDatasetIndexShardsFactor in the ElasticSearch storage, to provide more shards for @SuperDataSet annotated entites. Typically TraceSegment. Support alarm settings for relationship of service, instance, and endpoint level metrics. Support alarm settings for database(conjecture node in tracing scenario). Data Model could be added in the runtime, don\u0026rsquo;t depend on the bootstrap sequence anymore. Reduce the memory cost, due to no inventory caches. No buffer files in tracing and service mesh cases. New ReadWriteSafe cache implementation. Simplify codes. Provide default way for metrics query, even the metrics doesn\u0026rsquo;t exist. New GraphQL query protocol is provided. Support the metrics type query. Set up length rule of service, instance, and endpoint. Adjust the default jks for ElasticSearch to empty. Fix Apdex function integer overflow issue. Fix profile storage issue. Fix TTL issue. Fix H2 column type bug. Add JRE 8-14 test for the backend.  UI  UI dashboard is 100% configurable to adopt new metrics definited in the backend.  Document  Add v8 upgrade document. Make the coverage accurate including UT and e2e tests. Add miss doc about collecting parameters in the profiled traces.  CVE  Fix SQL Injection vulnerability in H2/MySQL implementation. Upgrade Nacos to avoid the FastJson CVE in high frequency. Upgrade jasckson-databind to 2.9.10.  All issues and pull requests are here\n","excerpt":"8.0.0 Project  v3 protocol is added and implemented. All previous releases are incompatible with 8.x …","ref":"/docs/main/next/en/changes/changes-8.0.0/","title":"8.0.0"},{"body":"8.0.0 Project  v3 protocol is added and implemented. All previous releases are incompatible with 8.x releases. Service, Instance, Endpoint register mechanism and inventory storage entities are removed. New GraphQL query protocol is provided, the legacy protocol is still supported(plan to remove at the end of this year). Support Prometheus network protocol. Metrics in Prometheus format could be transferred into SkyWalking. Python agent provided. All inventory caches have been removed. Apache ShardingSphere(4.1.0, 4.1.1) agent plugin provided.  Java Agent  Add MariaDB plugin. Vert.x plugin enhancement. More cases are covered. Support v3 extension header. Fix ElasticSearch 5.x plugin TransportClient error. Support Correlation protocol v1. Fix Finagle plugin bug, in processing Noop Span. Make CommandService daemon to avoid blocking target application shutting down gracefully. Refactor spring cloud gateway plugin and support tracing spring cloud gateway 2.2.x  OAP-Backend  Support meter system for Prometheus adoption. In future releases, we will add native meter APIs and MicroMeter(Sleuth) system. Support endpoint grouping. Add SuperDataSet annotation for storage entity. Add superDatasetIndexShardsFactor in the ElasticSearch storage, to provide more shards for @SuperDataSet annotated entites. Typically TraceSegment. Support alarm settings for relationship of service, instance, and endpoint level metrics. Support alarm settings for database(conjecture node in tracing scenario). Data Model could be added in the runtime, don\u0026rsquo;t depend on the bootstrap sequence anymore. Reduce the memory cost, due to no inventory caches. No buffer files in tracing and service mesh cases. New ReadWriteSafe cache implementation. Simplify codes. Provide default way for metrics query, even the metrics doesn\u0026rsquo;t exist. New GraphQL query protocol is provided. Support the metrics type query. Set up length rule of service, instance, and endpoint. Adjust the default jks for ElasticSearch to empty. Fix Apdex function integer overflow issue. Fix profile storage issue. Fix TTL issue. Fix H2 column type bug. Add JRE 8-14 test for the backend.  UI  UI dashboard is 100% configurable to adopt new metrics definited in the backend.  Document  Add v8 upgrade document. Make the coverage accurate including UT and e2e tests. Add miss doc about collecting parameters in the profiled traces.  CVE  Fix SQL Injection vulnerability in H2/MySQL implementation. Upgrade Nacos to avoid the FastJson CVE in high frequency. Upgrade jasckson-databind to 2.9.10.  All issues and pull requests are here\n","excerpt":"8.0.0 Project  v3 protocol is added and implemented. All previous releases are incompatible with 8.x …","ref":"/docs/main/v9.1.0/en/changes/changes-8.0.0/","title":"8.0.0"},{"body":"8.0.0 Project  v3 protocol is added and implemented. All previous releases are incompatible with 8.x releases. Service, Instance, Endpoint register mechanism and inventory storage entities are removed. New GraphQL query protocol is provided, the legacy protocol is still supported(plan to remove at the end of this year). Support Prometheus network protocol. Metrics in Prometheus format could be transferred into SkyWalking. Python agent provided. All inventory caches have been removed. Apache ShardingSphere(4.1.0, 4.1.1) agent plugin provided.  Java Agent  Add MariaDB plugin. Vert.x plugin enhancement. More cases are covered. Support v3 extension header. Fix ElasticSearch 5.x plugin TransportClient error. Support Correlation protocol v1. Fix Finagle plugin bug, in processing Noop Span. Make CommandService daemon to avoid blocking target application shutting down gracefully. Refactor spring cloud gateway plugin and support tracing spring cloud gateway 2.2.x  OAP-Backend  Support meter system for Prometheus adoption. In future releases, we will add native meter APIs and MicroMeter(Sleuth) system. Support endpoint grouping. Add SuperDataSet annotation for storage entity. Add superDatasetIndexShardsFactor in the ElasticSearch storage, to provide more shards for @SuperDataSet annotated entites. Typically TraceSegment. Support alarm settings for relationship of service, instance, and endpoint level metrics. Support alarm settings for database(conjecture node in tracing scenario). Data Model could be added in the runtime, don\u0026rsquo;t depend on the bootstrap sequence anymore. Reduce the memory cost, due to no inventory caches. No buffer files in tracing and service mesh cases. New ReadWriteSafe cache implementation. Simplify codes. Provide default way for metrics query, even the metrics doesn\u0026rsquo;t exist. New GraphQL query protocol is provided. Support the metrics type query. Set up length rule of service, instance, and endpoint. Adjust the default jks for ElasticSearch to empty. Fix Apdex function integer overflow issue. Fix profile storage issue. Fix TTL issue. Fix H2 column type bug. Add JRE 8-14 test for the backend.  UI  UI dashboard is 100% configurable to adopt new metrics definited in the backend.  Document  Add v8 upgrade document. Make the coverage accurate including UT and e2e tests. Add miss doc about collecting parameters in the profiled traces.  CVE  Fix SQL Injection vulnerability in H2/MySQL implementation. Upgrade Nacos to avoid the FastJson CVE in high frequency. Upgrade jasckson-databind to 2.9.10.  All issues and pull requests are here\n","excerpt":"8.0.0 Project  v3 protocol is added and implemented. All previous releases are incompatible with 8.x …","ref":"/docs/main/v9.2.0/en/changes/changes-8.0.0/","title":"8.0.0"},{"body":"8.0.0 Project  v3 protocol is added and implemented. All previous releases are incompatible with 8.x releases. Service, Instance, Endpoint register mechanism and inventory storage entities are removed. New GraphQL query protocol is provided, the legacy protocol is still supported(plan to remove at the end of this year). Support Prometheus network protocol. Metrics in Prometheus format could be transferred into SkyWalking. Python agent provided. All inventory caches have been removed. Apache ShardingSphere(4.1.0, 4.1.1) agent plugin provided.  Java Agent  Add MariaDB plugin. Vert.x plugin enhancement. More cases are covered. Support v3 extension header. Fix ElasticSearch 5.x plugin TransportClient error. Support Correlation protocol v1. Fix Finagle plugin bug, in processing Noop Span. Make CommandService daemon to avoid blocking target application shutting down gracefully. Refactor spring cloud gateway plugin and support tracing spring cloud gateway 2.2.x  OAP-Backend  Support meter system for Prometheus adoption. In future releases, we will add native meter APIs and MicroMeter(Sleuth) system. Support endpoint grouping. Add SuperDataSet annotation for storage entity. Add superDatasetIndexShardsFactor in the ElasticSearch storage, to provide more shards for @SuperDataSet annotated entites. Typically TraceSegment. Support alarm settings for relationship of service, instance, and endpoint level metrics. Support alarm settings for database(conjecture node in tracing scenario). Data Model could be added in the runtime, don\u0026rsquo;t depend on the bootstrap sequence anymore. Reduce the memory cost, due to no inventory caches. No buffer files in tracing and service mesh cases. New ReadWriteSafe cache implementation. Simplify codes. Provide default way for metrics query, even the metrics doesn\u0026rsquo;t exist. New GraphQL query protocol is provided. Support the metrics type query. Set up length rule of service, instance, and endpoint. Adjust the default jks for ElasticSearch to empty. Fix Apdex function integer overflow issue. Fix profile storage issue. Fix TTL issue. Fix H2 column type bug. Add JRE 8-14 test for the backend.  UI  UI dashboard is 100% configurable to adopt new metrics definited in the backend.  Document  Add v8 upgrade document. Make the coverage accurate including UT and e2e tests. Add miss doc about collecting parameters in the profiled traces.  CVE  Fix SQL Injection vulnerability in H2/MySQL implementation. Upgrade Nacos to avoid the FastJson CVE in high frequency. Upgrade jasckson-databind to 2.9.10.  All issues and pull requests are here\n","excerpt":"8.0.0 Project  v3 protocol is added and implemented. All previous releases are incompatible with 8.x …","ref":"/docs/main/v9.3.0/en/changes/changes-8.0.0/","title":"8.0.0"},{"body":"8.0.0 Project  v3 protocol is added and implemented. All previous releases are incompatible with 8.x releases. Service, Instance, Endpoint register mechanism and inventory storage entities are removed. New GraphQL query protocol is provided, the legacy protocol is still supported(plan to remove at the end of this year). Support Prometheus network protocol. Metrics in Prometheus format could be transferred into SkyWalking. Python agent provided. All inventory caches have been removed. Apache ShardingSphere(4.1.0, 4.1.1) agent plugin provided.  Java Agent  Add MariaDB plugin. Vert.x plugin enhancement. More cases are covered. Support v3 extension header. Fix ElasticSearch 5.x plugin TransportClient error. Support Correlation protocol v1. Fix Finagle plugin bug, in processing Noop Span. Make CommandService daemon to avoid blocking target application shutting down gracefully. Refactor spring cloud gateway plugin and support tracing spring cloud gateway 2.2.x  OAP-Backend  Support meter system for Prometheus adoption. In future releases, we will add native meter APIs and MicroMeter(Sleuth) system. Support endpoint grouping. Add SuperDataSet annotation for storage entity. Add superDatasetIndexShardsFactor in the ElasticSearch storage, to provide more shards for @SuperDataSet annotated entites. Typically TraceSegment. Support alarm settings for relationship of service, instance, and endpoint level metrics. Support alarm settings for database(conjecture node in tracing scenario). Data Model could be added in the runtime, don\u0026rsquo;t depend on the bootstrap sequence anymore. Reduce the memory cost, due to no inventory caches. No buffer files in tracing and service mesh cases. New ReadWriteSafe cache implementation. Simplify codes. Provide default way for metrics query, even the metrics doesn\u0026rsquo;t exist. New GraphQL query protocol is provided. Support the metrics type query. Set up length rule of service, instance, and endpoint. Adjust the default jks for ElasticSearch to empty. Fix Apdex function integer overflow issue. Fix profile storage issue. Fix TTL issue. Fix H2 column type bug. Add JRE 8-14 test for the backend.  UI  UI dashboard is 100% configurable to adopt new metrics definited in the backend.  Document  Add v8 upgrade document. Make the coverage accurate including UT and e2e tests. Add miss doc about collecting parameters in the profiled traces.  CVE  Fix SQL Injection vulnerability in H2/MySQL implementation. Upgrade Nacos to avoid the FastJson CVE in high frequency. Upgrade jasckson-databind to 2.9.10.  All issues and pull requests are here\n","excerpt":"8.0.0 Project  v3 protocol is added and implemented. All previous releases are incompatible with 8.x …","ref":"/docs/main/v9.4.0/en/changes/changes-8.0.0/","title":"8.0.0"},{"body":"8.0.0 Project  v3 protocol is added and implemented. All previous releases are incompatible with 8.x releases. Service, Instance, Endpoint register mechanism and inventory storage entities are removed. New GraphQL query protocol is provided, the legacy protocol is still supported(plan to remove at the end of this year). Support Prometheus network protocol. Metrics in Prometheus format could be transferred into SkyWalking. Python agent provided. All inventory caches have been removed. Apache ShardingSphere(4.1.0, 4.1.1) agent plugin provided.  Java Agent  Add MariaDB plugin. Vert.x plugin enhancement. More cases are covered. Support v3 extension header. Fix ElasticSearch 5.x plugin TransportClient error. Support Correlation protocol v1. Fix Finagle plugin bug, in processing Noop Span. Make CommandService daemon to avoid blocking target application shutting down gracefully. Refactor spring cloud gateway plugin and support tracing spring cloud gateway 2.2.x  OAP-Backend  Support meter system for Prometheus adoption. In future releases, we will add native meter APIs and MicroMeter(Sleuth) system. Support endpoint grouping. Add SuperDataSet annotation for storage entity. Add superDatasetIndexShardsFactor in the ElasticSearch storage, to provide more shards for @SuperDataSet annotated entites. Typically TraceSegment. Support alarm settings for relationship of service, instance, and endpoint level metrics. Support alarm settings for database(conjecture node in tracing scenario). Data Model could be added in the runtime, don\u0026rsquo;t depend on the bootstrap sequence anymore. Reduce the memory cost, due to no inventory caches. No buffer files in tracing and service mesh cases. New ReadWriteSafe cache implementation. Simplify codes. Provide default way for metrics query, even the metrics doesn\u0026rsquo;t exist. New GraphQL query protocol is provided. Support the metrics type query. Set up length rule of service, instance, and endpoint. Adjust the default jks for ElasticSearch to empty. Fix Apdex function integer overflow issue. Fix profile storage issue. Fix TTL issue. Fix H2 column type bug. Add JRE 8-14 test for the backend.  UI  UI dashboard is 100% configurable to adopt new metrics definited in the backend.  Document  Add v8 upgrade document. Make the coverage accurate including UT and e2e tests. Add miss doc about collecting parameters in the profiled traces.  CVE  Fix SQL Injection vulnerability in H2/MySQL implementation. Upgrade Nacos to avoid the FastJson CVE in high frequency. Upgrade jasckson-databind to 2.9.10.  All issues and pull requests are here\n","excerpt":"8.0.0 Project  v3 protocol is added and implemented. All previous releases are incompatible with 8.x …","ref":"/docs/main/v9.5.0/en/changes/changes-8.0.0/","title":"8.0.0"},{"body":"8.0.0 Project  v3 protocol is added and implemented. All previous releases are incompatible with 8.x releases. Service, Instance, Endpoint register mechanism and inventory storage entities are removed. New GraphQL query protocol is provided, the legacy protocol is still supported(plan to remove at the end of this year). Support Prometheus network protocol. Metrics in Prometheus format could be transferred into SkyWalking. Python agent provided. All inventory caches have been removed. Apache ShardingSphere(4.1.0, 4.1.1) agent plugin provided.  Java Agent  Add MariaDB plugin. Vert.x plugin enhancement. More cases are covered. Support v3 extension header. Fix ElasticSearch 5.x plugin TransportClient error. Support Correlation protocol v1. Fix Finagle plugin bug, in processing Noop Span. Make CommandService daemon to avoid blocking target application shutting down gracefully. Refactor spring cloud gateway plugin and support tracing spring cloud gateway 2.2.x  OAP-Backend  Support meter system for Prometheus adoption. In future releases, we will add native meter APIs and MicroMeter(Sleuth) system. Support endpoint grouping. Add SuperDataSet annotation for storage entity. Add superDatasetIndexShardsFactor in the ElasticSearch storage, to provide more shards for @SuperDataSet annotated entites. Typically TraceSegment. Support alarm settings for relationship of service, instance, and endpoint level metrics. Support alarm settings for database(conjecture node in tracing scenario). Data Model could be added in the runtime, don\u0026rsquo;t depend on the bootstrap sequence anymore. Reduce the memory cost, due to no inventory caches. No buffer files in tracing and service mesh cases. New ReadWriteSafe cache implementation. Simplify codes. Provide default way for metrics query, even the metrics doesn\u0026rsquo;t exist. New GraphQL query protocol is provided. Support the metrics type query. Set up length rule of service, instance, and endpoint. Adjust the default jks for ElasticSearch to empty. Fix Apdex function integer overflow issue. Fix profile storage issue. Fix TTL issue. Fix H2 column type bug. Add JRE 8-14 test for the backend.  UI  UI dashboard is 100% configurable to adopt new metrics definited in the backend.  Document  Add v8 upgrade document. Make the coverage accurate including UT and e2e tests. Add miss doc about collecting parameters in the profiled traces.  CVE  Fix SQL Injection vulnerability in H2/MySQL implementation. Upgrade Nacos to avoid the FastJson CVE in high frequency. Upgrade jasckson-databind to 2.9.10.  All issues and pull requests are here\n","excerpt":"8.0.0 Project  v3 protocol is added and implemented. All previous releases are incompatible with 8.x …","ref":"/docs/main/v9.6.0/en/changes/changes-8.0.0/","title":"8.0.0"},{"body":"8.0.0 Project  v3 protocol is added and implemented. All previous releases are incompatible with 8.x releases. Service, Instance, Endpoint register mechanism and inventory storage entities are removed. New GraphQL query protocol is provided, the legacy protocol is still supported(plan to remove at the end of this year). Support Prometheus network protocol. Metrics in Prometheus format could be transferred into SkyWalking. Python agent provided. All inventory caches have been removed. Apache ShardingSphere(4.1.0, 4.1.1) agent plugin provided.  Java Agent  Add MariaDB plugin. Vert.x plugin enhancement. More cases are covered. Support v3 extension header. Fix ElasticSearch 5.x plugin TransportClient error. Support Correlation protocol v1. Fix Finagle plugin bug, in processing Noop Span. Make CommandService daemon to avoid blocking target application shutting down gracefully. Refactor spring cloud gateway plugin and support tracing spring cloud gateway 2.2.x  OAP-Backend  Support meter system for Prometheus adoption. In future releases, we will add native meter APIs and MicroMeter(Sleuth) system. Support endpoint grouping. Add SuperDataSet annotation for storage entity. Add superDatasetIndexShardsFactor in the ElasticSearch storage, to provide more shards for @SuperDataSet annotated entites. Typically TraceSegment. Support alarm settings for relationship of service, instance, and endpoint level metrics. Support alarm settings for database(conjecture node in tracing scenario). Data Model could be added in the runtime, don\u0026rsquo;t depend on the bootstrap sequence anymore. Reduce the memory cost, due to no inventory caches. No buffer files in tracing and service mesh cases. New ReadWriteSafe cache implementation. Simplify codes. Provide default way for metrics query, even the metrics doesn\u0026rsquo;t exist. New GraphQL query protocol is provided. Support the metrics type query. Set up length rule of service, instance, and endpoint. Adjust the default jks for ElasticSearch to empty. Fix Apdex function integer overflow issue. Fix profile storage issue. Fix TTL issue. Fix H2 column type bug. Add JRE 8-14 test for the backend.  UI  UI dashboard is 100% configurable to adopt new metrics definited in the backend.  Document  Add v8 upgrade document. Make the coverage accurate including UT and e2e tests. Add miss doc about collecting parameters in the profiled traces.  CVE  Fix SQL Injection vulnerability in H2/MySQL implementation. Upgrade Nacos to avoid the FastJson CVE in high frequency. Upgrade jasckson-databind to 2.9.10.  All issues and pull requests are here\n","excerpt":"8.0.0 Project  v3 protocol is added and implemented. All previous releases are incompatible with 8.x …","ref":"/docs/main/v9.7.0/en/changes/changes-8.0.0/","title":"8.0.0"},{"body":"8.0.1 OAP-Backend  Fix no-init mode is not working in ElasticSearch storage.  ","excerpt":"8.0.1 OAP-Backend  Fix no-init mode is not working in ElasticSearch storage.  ","ref":"/docs/main/latest/en/changes/changes-8.0.1/","title":"8.0.1"},{"body":"8.0.1 OAP-Backend  Fix no-init mode is not working in ElasticSearch storage.  ","excerpt":"8.0.1 OAP-Backend  Fix no-init mode is not working in ElasticSearch storage.  ","ref":"/docs/main/next/en/changes/changes-8.0.1/","title":"8.0.1"},{"body":"8.0.1 OAP-Backend  Fix no-init mode is not working in ElasticSearch storage.  ","excerpt":"8.0.1 OAP-Backend  Fix no-init mode is not working in ElasticSearch storage.  ","ref":"/docs/main/v9.1.0/en/changes/changes-8.0.1/","title":"8.0.1"},{"body":"8.0.1 OAP-Backend  Fix no-init mode is not working in ElasticSearch storage.  ","excerpt":"8.0.1 OAP-Backend  Fix no-init mode is not working in ElasticSearch storage.  ","ref":"/docs/main/v9.2.0/en/changes/changes-8.0.1/","title":"8.0.1"},{"body":"8.0.1 OAP-Backend  Fix no-init mode is not working in ElasticSearch storage.  ","excerpt":"8.0.1 OAP-Backend  Fix no-init mode is not working in ElasticSearch storage.  ","ref":"/docs/main/v9.3.0/en/changes/changes-8.0.1/","title":"8.0.1"},{"body":"8.0.1 OAP-Backend  Fix no-init mode is not working in ElasticSearch storage.  ","excerpt":"8.0.1 OAP-Backend  Fix no-init mode is not working in ElasticSearch storage.  ","ref":"/docs/main/v9.4.0/en/changes/changes-8.0.1/","title":"8.0.1"},{"body":"8.0.1 OAP-Backend  Fix no-init mode is not working in ElasticSearch storage.  ","excerpt":"8.0.1 OAP-Backend  Fix no-init mode is not working in ElasticSearch storage.  ","ref":"/docs/main/v9.5.0/en/changes/changes-8.0.1/","title":"8.0.1"},{"body":"8.0.1 OAP-Backend  Fix no-init mode is not working in ElasticSearch storage.  ","excerpt":"8.0.1 OAP-Backend  Fix no-init mode is not working in ElasticSearch storage.  ","ref":"/docs/main/v9.6.0/en/changes/changes-8.0.1/","title":"8.0.1"},{"body":"8.0.1 OAP-Backend  Fix no-init mode is not working in ElasticSearch storage.  ","excerpt":"8.0.1 OAP-Backend  Fix no-init mode is not working in ElasticSearch storage.  ","ref":"/docs/main/v9.7.0/en/changes/changes-8.0.1/","title":"8.0.1"},{"body":"8.1.0 Project  Support Kafka as an optional trace, JVM metrics, profiling snapshots and meter system data transport layer. Support Meter system, including the native metrics APIs and the Spring Sleuth adoption. Support JVM thread metrics.  Java Agent  [Core] Fix the concurrency access bug in the Concurrency ClassLoader Case. [Core] Separate the config of the plugins from the core level. [Core] Support instrumented class cached in memory or file, to be compatible with other agents, such as Arthas. Add logic endpoint concept. Could analysis any span or tags flagged by the logic endpoint. Add Spring annotation component name for UI visualization only. Add support to trace Call procedures in MySQL plugin. Support GraphQL plugin. Support Quasar fiber plugin. Support InfluxDB java client plugin. Support brpc java plugin Support ConsoleAppender in the logback v1 plugin. Enhance vert.x endpoint names. Optimize the code to prevent mongo statements from being too long. Fix WebFlux plugin concurrency access bug. Fix ShardingSphere plugins internal conflicts. Fix duplicated Spring MVC endpoint. Fix lettuce plugin sometimes trace doesn‘t show span layer. Fix @Tag returnedObject bug.  OAP-Backend  Support Jetty Server advanced configurations. Support label based filter in the prometheus fetcher and OpenCensus receiver. Support using k8s configmap as the configuration center. Support OAP health check, and storage module health check. Support sampling rate in the dynamic configuration. Add endpoint_relation_sla and endpoint_relation_percentile for endpoint relationship metrics. Add components for Python plugins, including Kafka, Tornado, Redis, Django, PyMysql. Add components for Golang SDK. Add Nacos 1.3.1 back as an optional cluster coordinator and dynamic configuration center. Enhance the metrics query for ElasticSearch implementation to increase the stability. Reduce the length of storage entity names in the self-observability for MySQL and TiDB storage. Fix labels are missing in Prometheus analysis context. Fix column length issue in MySQL/TiDB storage. Fix no data in 2nd level aggregation in self-observability. Fix searchService bug in ES implementation. Fix wrong validation of endpoint relation entity query. Fix the bug caused by the OAL debug flag. Fix endpoint dependency bug in MQ and uninstrumented proxy cases. Fix time bucket conversion issue in the InfluxDB storage implementation. Update k8s client to 8.0.0  UI  Support endpoint dependency graph. Support x-scroll of trace/profile page Fix database selector issue. Add the bar chart in the UI templates.  Document  Update the user logo wall. Add backend configuration vocabulary document. Add agent installation doc for Tomcat9 on Windows. Add istioctl ALS commands for the document. Fix TTL documentation. Add FAQ doc about thread instrumentation.  CVE  Fix fuzzy query sql injection in the MySQL/TiDB storage.  All issues and pull requests are here\n","excerpt":"8.1.0 Project  Support Kafka as an optional trace, JVM metrics, profiling snapshots and meter system …","ref":"/docs/main/latest/en/changes/changes-8.1.0/","title":"8.1.0"},{"body":"8.1.0 Project  Support Kafka as an optional trace, JVM metrics, profiling snapshots and meter system data transport layer. Support Meter system, including the native metrics APIs and the Spring Sleuth adoption. Support JVM thread metrics.  Java Agent  [Core] Fix the concurrency access bug in the Concurrency ClassLoader Case. [Core] Separate the config of the plugins from the core level. [Core] Support instrumented class cached in memory or file, to be compatible with other agents, such as Arthas. Add logic endpoint concept. Could analysis any span or tags flagged by the logic endpoint. Add Spring annotation component name for UI visualization only. Add support to trace Call procedures in MySQL plugin. Support GraphQL plugin. Support Quasar fiber plugin. Support InfluxDB java client plugin. Support brpc java plugin Support ConsoleAppender in the logback v1 plugin. Enhance vert.x endpoint names. Optimize the code to prevent mongo statements from being too long. Fix WebFlux plugin concurrency access bug. Fix ShardingSphere plugins internal conflicts. Fix duplicated Spring MVC endpoint. Fix lettuce plugin sometimes trace doesn‘t show span layer. Fix @Tag returnedObject bug.  OAP-Backend  Support Jetty Server advanced configurations. Support label based filter in the prometheus fetcher and OpenCensus receiver. Support using k8s configmap as the configuration center. Support OAP health check, and storage module health check. Support sampling rate in the dynamic configuration. Add endpoint_relation_sla and endpoint_relation_percentile for endpoint relationship metrics. Add components for Python plugins, including Kafka, Tornado, Redis, Django, PyMysql. Add components for Golang SDK. Add Nacos 1.3.1 back as an optional cluster coordinator and dynamic configuration center. Enhance the metrics query for ElasticSearch implementation to increase the stability. Reduce the length of storage entity names in the self-observability for MySQL and TiDB storage. Fix labels are missing in Prometheus analysis context. Fix column length issue in MySQL/TiDB storage. Fix no data in 2nd level aggregation in self-observability. Fix searchService bug in ES implementation. Fix wrong validation of endpoint relation entity query. Fix the bug caused by the OAL debug flag. Fix endpoint dependency bug in MQ and uninstrumented proxy cases. Fix time bucket conversion issue in the InfluxDB storage implementation. Update k8s client to 8.0.0  UI  Support endpoint dependency graph. Support x-scroll of trace/profile page Fix database selector issue. Add the bar chart in the UI templates.  Document  Update the user logo wall. Add backend configuration vocabulary document. Add agent installation doc for Tomcat9 on Windows. Add istioctl ALS commands for the document. Fix TTL documentation. Add FAQ doc about thread instrumentation.  CVE  Fix fuzzy query sql injection in the MySQL/TiDB storage.  All issues and pull requests are here\n","excerpt":"8.1.0 Project  Support Kafka as an optional trace, JVM metrics, profiling snapshots and meter system …","ref":"/docs/main/next/en/changes/changes-8.1.0/","title":"8.1.0"},{"body":"8.1.0 Project  Support Kafka as an optional trace, JVM metrics, profiling snapshots and meter system data transport layer. Support Meter system, including the native metrics APIs and the Spring Sleuth adoption. Support JVM thread metrics.  Java Agent  [Core] Fix the concurrency access bug in the Concurrency ClassLoader Case. [Core] Separate the config of the plugins from the core level. [Core] Support instrumented class cached in memory or file, to be compatible with other agents, such as Arthas. Add logic endpoint concept. Could analysis any span or tags flagged by the logic endpoint. Add Spring annotation component name for UI visualization only. Add support to trace Call procedures in MySQL plugin. Support GraphQL plugin. Support Quasar fiber plugin. Support InfluxDB java client plugin. Support brpc java plugin Support ConsoleAppender in the logback v1 plugin. Enhance vert.x endpoint names. Optimize the code to prevent mongo statements from being too long. Fix WebFlux plugin concurrency access bug. Fix ShardingSphere plugins internal conflicts. Fix duplicated Spring MVC endpoint. Fix lettuce plugin sometimes trace doesn‘t show span layer. Fix @Tag returnedObject bug.  OAP-Backend  Support Jetty Server advanced configurations. Support label based filter in the prometheus fetcher and OpenCensus receiver. Support using k8s configmap as the configuration center. Support OAP health check, and storage module health check. Support sampling rate in the dynamic configuration. Add endpoint_relation_sla and endpoint_relation_percentile for endpoint relationship metrics. Add components for Python plugins, including Kafka, Tornado, Redis, Django, PyMysql. Add components for Golang SDK. Add Nacos 1.3.1 back as an optional cluster coordinator and dynamic configuration center. Enhance the metrics query for ElasticSearch implementation to increase the stability. Reduce the length of storage entity names in the self-observability for MySQL and TiDB storage. Fix labels are missing in Prometheus analysis context. Fix column length issue in MySQL/TiDB storage. Fix no data in 2nd level aggregation in self-observability. Fix searchService bug in ES implementation. Fix wrong validation of endpoint relation entity query. Fix the bug caused by the OAL debug flag. Fix endpoint dependency bug in MQ and uninstrumented proxy cases. Fix time bucket conversion issue in the InfluxDB storage implementation. Update k8s client to 8.0.0  UI  Support endpoint dependency graph. Support x-scroll of trace/profile page Fix database selector issue. Add the bar chart in the UI templates.  Document  Update the user logo wall. Add backend configuration vocabulary document. Add agent installation doc for Tomcat9 on Windows. Add istioctl ALS commands for the document. Fix TTL documentation. Add FAQ doc about thread instrumentation.  CVE  Fix fuzzy query sql injection in the MySQL/TiDB storage.  All issues and pull requests are here\n","excerpt":"8.1.0 Project  Support Kafka as an optional trace, JVM metrics, profiling snapshots and meter system …","ref":"/docs/main/v9.1.0/en/changes/changes-8.1.0/","title":"8.1.0"},{"body":"8.1.0 Project  Support Kafka as an optional trace, JVM metrics, profiling snapshots and meter system data transport layer. Support Meter system, including the native metrics APIs and the Spring Sleuth adoption. Support JVM thread metrics.  Java Agent  [Core] Fix the concurrency access bug in the Concurrency ClassLoader Case. [Core] Separate the config of the plugins from the core level. [Core] Support instrumented class cached in memory or file, to be compatible with other agents, such as Arthas. Add logic endpoint concept. Could analysis any span or tags flagged by the logic endpoint. Add Spring annotation component name for UI visualization only. Add support to trace Call procedures in MySQL plugin. Support GraphQL plugin. Support Quasar fiber plugin. Support InfluxDB java client plugin. Support brpc java plugin Support ConsoleAppender in the logback v1 plugin. Enhance vert.x endpoint names. Optimize the code to prevent mongo statements from being too long. Fix WebFlux plugin concurrency access bug. Fix ShardingSphere plugins internal conflicts. Fix duplicated Spring MVC endpoint. Fix lettuce plugin sometimes trace doesn‘t show span layer. Fix @Tag returnedObject bug.  OAP-Backend  Support Jetty Server advanced configurations. Support label based filter in the prometheus fetcher and OpenCensus receiver. Support using k8s configmap as the configuration center. Support OAP health check, and storage module health check. Support sampling rate in the dynamic configuration. Add endpoint_relation_sla and endpoint_relation_percentile for endpoint relationship metrics. Add components for Python plugins, including Kafka, Tornado, Redis, Django, PyMysql. Add components for Golang SDK. Add Nacos 1.3.1 back as an optional cluster coordinator and dynamic configuration center. Enhance the metrics query for ElasticSearch implementation to increase the stability. Reduce the length of storage entity names in the self-observability for MySQL and TiDB storage. Fix labels are missing in Prometheus analysis context. Fix column length issue in MySQL/TiDB storage. Fix no data in 2nd level aggregation in self-observability. Fix searchService bug in ES implementation. Fix wrong validation of endpoint relation entity query. Fix the bug caused by the OAL debug flag. Fix endpoint dependency bug in MQ and uninstrumented proxy cases. Fix time bucket conversion issue in the InfluxDB storage implementation. Update k8s client to 8.0.0  UI  Support endpoint dependency graph. Support x-scroll of trace/profile page Fix database selector issue. Add the bar chart in the UI templates.  Document  Update the user logo wall. Add backend configuration vocabulary document. Add agent installation doc for Tomcat9 on Windows. Add istioctl ALS commands for the document. Fix TTL documentation. Add FAQ doc about thread instrumentation.  CVE  Fix fuzzy query sql injection in the MySQL/TiDB storage.  All issues and pull requests are here\n","excerpt":"8.1.0 Project  Support Kafka as an optional trace, JVM metrics, profiling snapshots and meter system …","ref":"/docs/main/v9.2.0/en/changes/changes-8.1.0/","title":"8.1.0"},{"body":"8.1.0 Project  Support Kafka as an optional trace, JVM metrics, profiling snapshots and meter system data transport layer. Support Meter system, including the native metrics APIs and the Spring Sleuth adoption. Support JVM thread metrics.  Java Agent  [Core] Fix the concurrency access bug in the Concurrency ClassLoader Case. [Core] Separate the config of the plugins from the core level. [Core] Support instrumented class cached in memory or file, to be compatible with other agents, such as Arthas. Add logic endpoint concept. Could analysis any span or tags flagged by the logic endpoint. Add Spring annotation component name for UI visualization only. Add support to trace Call procedures in MySQL plugin. Support GraphQL plugin. Support Quasar fiber plugin. Support InfluxDB java client plugin. Support brpc java plugin Support ConsoleAppender in the logback v1 plugin. Enhance vert.x endpoint names. Optimize the code to prevent mongo statements from being too long. Fix WebFlux plugin concurrency access bug. Fix ShardingSphere plugins internal conflicts. Fix duplicated Spring MVC endpoint. Fix lettuce plugin sometimes trace doesn‘t show span layer. Fix @Tag returnedObject bug.  OAP-Backend  Support Jetty Server advanced configurations. Support label based filter in the prometheus fetcher and OpenCensus receiver. Support using k8s configmap as the configuration center. Support OAP health check, and storage module health check. Support sampling rate in the dynamic configuration. Add endpoint_relation_sla and endpoint_relation_percentile for endpoint relationship metrics. Add components for Python plugins, including Kafka, Tornado, Redis, Django, PyMysql. Add components for Golang SDK. Add Nacos 1.3.1 back as an optional cluster coordinator and dynamic configuration center. Enhance the metrics query for ElasticSearch implementation to increase the stability. Reduce the length of storage entity names in the self-observability for MySQL and TiDB storage. Fix labels are missing in Prometheus analysis context. Fix column length issue in MySQL/TiDB storage. Fix no data in 2nd level aggregation in self-observability. Fix searchService bug in ES implementation. Fix wrong validation of endpoint relation entity query. Fix the bug caused by the OAL debug flag. Fix endpoint dependency bug in MQ and uninstrumented proxy cases. Fix time bucket conversion issue in the InfluxDB storage implementation. Update k8s client to 8.0.0  UI  Support endpoint dependency graph. Support x-scroll of trace/profile page Fix database selector issue. Add the bar chart in the UI templates.  Document  Update the user logo wall. Add backend configuration vocabulary document. Add agent installation doc for Tomcat9 on Windows. Add istioctl ALS commands for the document. Fix TTL documentation. Add FAQ doc about thread instrumentation.  CVE  Fix fuzzy query sql injection in the MySQL/TiDB storage.  All issues and pull requests are here\n","excerpt":"8.1.0 Project  Support Kafka as an optional trace, JVM metrics, profiling snapshots and meter system …","ref":"/docs/main/v9.3.0/en/changes/changes-8.1.0/","title":"8.1.0"},{"body":"8.1.0 Project  Support Kafka as an optional trace, JVM metrics, profiling snapshots and meter system data transport layer. Support Meter system, including the native metrics APIs and the Spring Sleuth adoption. Support JVM thread metrics.  Java Agent  [Core] Fix the concurrency access bug in the Concurrency ClassLoader Case. [Core] Separate the config of the plugins from the core level. [Core] Support instrumented class cached in memory or file, to be compatible with other agents, such as Arthas. Add logic endpoint concept. Could analysis any span or tags flagged by the logic endpoint. Add Spring annotation component name for UI visualization only. Add support to trace Call procedures in MySQL plugin. Support GraphQL plugin. Support Quasar fiber plugin. Support InfluxDB java client plugin. Support brpc java plugin Support ConsoleAppender in the logback v1 plugin. Enhance vert.x endpoint names. Optimize the code to prevent mongo statements from being too long. Fix WebFlux plugin concurrency access bug. Fix ShardingSphere plugins internal conflicts. Fix duplicated Spring MVC endpoint. Fix lettuce plugin sometimes trace doesn‘t show span layer. Fix @Tag returnedObject bug.  OAP-Backend  Support Jetty Server advanced configurations. Support label based filter in the prometheus fetcher and OpenCensus receiver. Support using k8s configmap as the configuration center. Support OAP health check, and storage module health check. Support sampling rate in the dynamic configuration. Add endpoint_relation_sla and endpoint_relation_percentile for endpoint relationship metrics. Add components for Python plugins, including Kafka, Tornado, Redis, Django, PyMysql. Add components for Golang SDK. Add Nacos 1.3.1 back as an optional cluster coordinator and dynamic configuration center. Enhance the metrics query for ElasticSearch implementation to increase the stability. Reduce the length of storage entity names in the self-observability for MySQL and TiDB storage. Fix labels are missing in Prometheus analysis context. Fix column length issue in MySQL/TiDB storage. Fix no data in 2nd level aggregation in self-observability. Fix searchService bug in ES implementation. Fix wrong validation of endpoint relation entity query. Fix the bug caused by the OAL debug flag. Fix endpoint dependency bug in MQ and uninstrumented proxy cases. Fix time bucket conversion issue in the InfluxDB storage implementation. Update k8s client to 8.0.0  UI  Support endpoint dependency graph. Support x-scroll of trace/profile page Fix database selector issue. Add the bar chart in the UI templates.  Document  Update the user logo wall. Add backend configuration vocabulary document. Add agent installation doc for Tomcat9 on Windows. Add istioctl ALS commands for the document. Fix TTL documentation. Add FAQ doc about thread instrumentation.  CVE  Fix fuzzy query sql injection in the MySQL/TiDB storage.  All issues and pull requests are here\n","excerpt":"8.1.0 Project  Support Kafka as an optional trace, JVM metrics, profiling snapshots and meter system …","ref":"/docs/main/v9.4.0/en/changes/changes-8.1.0/","title":"8.1.0"},{"body":"8.1.0 Project  Support Kafka as an optional trace, JVM metrics, profiling snapshots and meter system data transport layer. Support Meter system, including the native metrics APIs and the Spring Sleuth adoption. Support JVM thread metrics.  Java Agent  [Core] Fix the concurrency access bug in the Concurrency ClassLoader Case. [Core] Separate the config of the plugins from the core level. [Core] Support instrumented class cached in memory or file, to be compatible with other agents, such as Arthas. Add logic endpoint concept. Could analysis any span or tags flagged by the logic endpoint. Add Spring annotation component name for UI visualization only. Add support to trace Call procedures in MySQL plugin. Support GraphQL plugin. Support Quasar fiber plugin. Support InfluxDB java client plugin. Support brpc java plugin Support ConsoleAppender in the logback v1 plugin. Enhance vert.x endpoint names. Optimize the code to prevent mongo statements from being too long. Fix WebFlux plugin concurrency access bug. Fix ShardingSphere plugins internal conflicts. Fix duplicated Spring MVC endpoint. Fix lettuce plugin sometimes trace doesn‘t show span layer. Fix @Tag returnedObject bug.  OAP-Backend  Support Jetty Server advanced configurations. Support label based filter in the prometheus fetcher and OpenCensus receiver. Support using k8s configmap as the configuration center. Support OAP health check, and storage module health check. Support sampling rate in the dynamic configuration. Add endpoint_relation_sla and endpoint_relation_percentile for endpoint relationship metrics. Add components for Python plugins, including Kafka, Tornado, Redis, Django, PyMysql. Add components for Golang SDK. Add Nacos 1.3.1 back as an optional cluster coordinator and dynamic configuration center. Enhance the metrics query for ElasticSearch implementation to increase the stability. Reduce the length of storage entity names in the self-observability for MySQL and TiDB storage. Fix labels are missing in Prometheus analysis context. Fix column length issue in MySQL/TiDB storage. Fix no data in 2nd level aggregation in self-observability. Fix searchService bug in ES implementation. Fix wrong validation of endpoint relation entity query. Fix the bug caused by the OAL debug flag. Fix endpoint dependency bug in MQ and uninstrumented proxy cases. Fix time bucket conversion issue in the InfluxDB storage implementation. Update k8s client to 8.0.0  UI  Support endpoint dependency graph. Support x-scroll of trace/profile page Fix database selector issue. Add the bar chart in the UI templates.  Document  Update the user logo wall. Add backend configuration vocabulary document. Add agent installation doc for Tomcat9 on Windows. Add istioctl ALS commands for the document. Fix TTL documentation. Add FAQ doc about thread instrumentation.  CVE  Fix fuzzy query sql injection in the MySQL/TiDB storage.  All issues and pull requests are here\n","excerpt":"8.1.0 Project  Support Kafka as an optional trace, JVM metrics, profiling snapshots and meter system …","ref":"/docs/main/v9.5.0/en/changes/changes-8.1.0/","title":"8.1.0"},{"body":"8.1.0 Project  Support Kafka as an optional trace, JVM metrics, profiling snapshots and meter system data transport layer. Support Meter system, including the native metrics APIs and the Spring Sleuth adoption. Support JVM thread metrics.  Java Agent  [Core] Fix the concurrency access bug in the Concurrency ClassLoader Case. [Core] Separate the config of the plugins from the core level. [Core] Support instrumented class cached in memory or file, to be compatible with other agents, such as Arthas. Add logic endpoint concept. Could analysis any span or tags flagged by the logic endpoint. Add Spring annotation component name for UI visualization only. Add support to trace Call procedures in MySQL plugin. Support GraphQL plugin. Support Quasar fiber plugin. Support InfluxDB java client plugin. Support brpc java plugin Support ConsoleAppender in the logback v1 plugin. Enhance vert.x endpoint names. Optimize the code to prevent mongo statements from being too long. Fix WebFlux plugin concurrency access bug. Fix ShardingSphere plugins internal conflicts. Fix duplicated Spring MVC endpoint. Fix lettuce plugin sometimes trace doesn‘t show span layer. Fix @Tag returnedObject bug.  OAP-Backend  Support Jetty Server advanced configurations. Support label based filter in the prometheus fetcher and OpenCensus receiver. Support using k8s configmap as the configuration center. Support OAP health check, and storage module health check. Support sampling rate in the dynamic configuration. Add endpoint_relation_sla and endpoint_relation_percentile for endpoint relationship metrics. Add components for Python plugins, including Kafka, Tornado, Redis, Django, PyMysql. Add components for Golang SDK. Add Nacos 1.3.1 back as an optional cluster coordinator and dynamic configuration center. Enhance the metrics query for ElasticSearch implementation to increase the stability. Reduce the length of storage entity names in the self-observability for MySQL and TiDB storage. Fix labels are missing in Prometheus analysis context. Fix column length issue in MySQL/TiDB storage. Fix no data in 2nd level aggregation in self-observability. Fix searchService bug in ES implementation. Fix wrong validation of endpoint relation entity query. Fix the bug caused by the OAL debug flag. Fix endpoint dependency bug in MQ and uninstrumented proxy cases. Fix time bucket conversion issue in the InfluxDB storage implementation. Update k8s client to 8.0.0  UI  Support endpoint dependency graph. Support x-scroll of trace/profile page Fix database selector issue. Add the bar chart in the UI templates.  Document  Update the user logo wall. Add backend configuration vocabulary document. Add agent installation doc for Tomcat9 on Windows. Add istioctl ALS commands for the document. Fix TTL documentation. Add FAQ doc about thread instrumentation.  CVE  Fix fuzzy query sql injection in the MySQL/TiDB storage.  All issues and pull requests are here\n","excerpt":"8.1.0 Project  Support Kafka as an optional trace, JVM metrics, profiling snapshots and meter system …","ref":"/docs/main/v9.6.0/en/changes/changes-8.1.0/","title":"8.1.0"},{"body":"8.1.0 Project  Support Kafka as an optional trace, JVM metrics, profiling snapshots and meter system data transport layer. Support Meter system, including the native metrics APIs and the Spring Sleuth adoption. Support JVM thread metrics.  Java Agent  [Core] Fix the concurrency access bug in the Concurrency ClassLoader Case. [Core] Separate the config of the plugins from the core level. [Core] Support instrumented class cached in memory or file, to be compatible with other agents, such as Arthas. Add logic endpoint concept. Could analysis any span or tags flagged by the logic endpoint. Add Spring annotation component name for UI visualization only. Add support to trace Call procedures in MySQL plugin. Support GraphQL plugin. Support Quasar fiber plugin. Support InfluxDB java client plugin. Support brpc java plugin Support ConsoleAppender in the logback v1 plugin. Enhance vert.x endpoint names. Optimize the code to prevent mongo statements from being too long. Fix WebFlux plugin concurrency access bug. Fix ShardingSphere plugins internal conflicts. Fix duplicated Spring MVC endpoint. Fix lettuce plugin sometimes trace doesn‘t show span layer. Fix @Tag returnedObject bug.  OAP-Backend  Support Jetty Server advanced configurations. Support label based filter in the prometheus fetcher and OpenCensus receiver. Support using k8s configmap as the configuration center. Support OAP health check, and storage module health check. Support sampling rate in the dynamic configuration. Add endpoint_relation_sla and endpoint_relation_percentile for endpoint relationship metrics. Add components for Python plugins, including Kafka, Tornado, Redis, Django, PyMysql. Add components for Golang SDK. Add Nacos 1.3.1 back as an optional cluster coordinator and dynamic configuration center. Enhance the metrics query for ElasticSearch implementation to increase the stability. Reduce the length of storage entity names in the self-observability for MySQL and TiDB storage. Fix labels are missing in Prometheus analysis context. Fix column length issue in MySQL/TiDB storage. Fix no data in 2nd level aggregation in self-observability. Fix searchService bug in ES implementation. Fix wrong validation of endpoint relation entity query. Fix the bug caused by the OAL debug flag. Fix endpoint dependency bug in MQ and uninstrumented proxy cases. Fix time bucket conversion issue in the InfluxDB storage implementation. Update k8s client to 8.0.0  UI  Support endpoint dependency graph. Support x-scroll of trace/profile page Fix database selector issue. Add the bar chart in the UI templates.  Document  Update the user logo wall. Add backend configuration vocabulary document. Add agent installation doc for Tomcat9 on Windows. Add istioctl ALS commands for the document. Fix TTL documentation. Add FAQ doc about thread instrumentation.  CVE  Fix fuzzy query sql injection in the MySQL/TiDB storage.  All issues and pull requests are here\n","excerpt":"8.1.0 Project  Support Kafka as an optional trace, JVM metrics, profiling snapshots and meter system …","ref":"/docs/main/v9.7.0/en/changes/changes-8.1.0/","title":"8.1.0"},{"body":"8.2.0 Project  Support Browser monitoring. Add e2e test for ALS solution of service mesh observability. Support compiling(include testing) in JDK11. Support build a single module.  Java Agent  Support metrics plugin. Support slf4j logs of gRPC and Kafka(when agent uses them) into the agent log files. Add PROPERTIES_REPORT_PERIOD_FACTOR config to avoid the properties of instance cleared. Limit the size of traced SQL to avoid OOM. Support mount command to load a new set of plugins. Add plugin selector mechanism. Enhance the witness classes for MongoDB plugin. Enhance the parameter truncate mechanism of SQL plugins. Enhance the SpringMVC plugin in the reactive APIs. Enhance the SpringMVC plugin to collect HTTP headers as the span tags. Enhance the Kafka plugin, about @KafkaPollAndInvoke Enhance the configuration initialization core. Plugin could have its own plugins. Enhance Feign plugin to collect parameters. Enhance Dubbo plugin to collect parameters. Provide Thrift plugin. Provide XXL-job plugin. Provide MongoDB 4.x plugin. Provide Kafka client 2.1+ plugin. Provide WebFlux-WebClient plugin. Provide ignore-exception plugin. Provide quartz scheduler plugin. Provide ElasticJob 2.x plugin. Provide Spring @Scheduled plugin. Provide Spring-Kafka plugin. Provide HBase client plugin. Provide JSON log format. Move Spring WebFlux plugin to the optional plugin. Fix inconsistent logic bug in PrefixMatch Fix duplicate exit spans in Feign LoadBalancer mechanism. Fix the target service blocked by the Kafka reporter. Fix configurations of Kafka report don\u0026rsquo;t work. Fix rest template concurrent conflict. Fix NPE in the ActiveMQ plugin. Fix conflict between Kafka reporter and sampling plugin. Fix NPE in the log formatter. Fix span layer missing in certain cases, in the Kafka plugin. Fix error format of time in serviceTraffic update. Upgrade bytebuddy to 1.10.14  OAP-Backend  Support Nacos authentication. Support labeled meter in the meter receiver. Separate UI template into multiple files. Provide support for Envoy tracing. Envoy tracer depends on the Envoy community. Support query trace by tags. Support composite alarm rules. Support alarm messages to DingTalk. Support alarm messages to WeChat. Support alarm messages to Slack. Support SSL for Prometheus fetcher and self telemetry. Support labeled histogram in the prometheus format. Support the status of segment based on entry span or first span only. Support the error segment in the sampling mechanism. Support SSL certs of gRPC server. Support labeled metrics in the alarm rule setting. Support to query all labeled data, if no explicit label in the query condition. Add TLS parameters in the mesh analysis. Add health check for InfluxDB storage. Add super dataset concept for the traces/logs. Add separate replicas configuration for super dataset. Add IN operator in the OAL. Add != operator in the OAL. Add like operator in the OAL. Add latest function in the prometheus analysis. Add more configurations in the gRPC server. Optimize the trace query performance. Optimize the CPU usage rate calculation, at least to be 1. Optimize the length of slow SQL column in the MySQL storage. Optimize the topology query, use client side component name when no server side mapping. Add component IDs for Python component. Add component ID range for C++. Fix Slack notification setting NPE. Fix some module missing check of the module manager core. Fix authentication doesn\u0026rsquo;t work in sharing server. Fix metrics batch persistent size bug. Fix trace sampling bug. Fix CLR receiver bug. Fix end time bug in the query process. Fix Exporter INCREMENT mode is not working. Fix an error when executing startup.bat when the log directory exists Add syncBulkActions configuration to set up the batch size of the metrics persistent. Meter Analysis Language.  UI  Add browser dashboard. Add browser log query page. Support query trace by tags. Fix JVM configuration. Fix CLR configuration.  Document  Add the document about SW_NO_UPSTREAM_REAL_ADDRESS. Update ALS setup document. Add Customization Config section for plugin development.  All issues and pull requests are here\n","excerpt":"8.2.0 Project  Support Browser monitoring. Add e2e test for ALS solution of service mesh …","ref":"/docs/main/latest/en/changes/changes-8.2.0/","title":"8.2.0"},{"body":"8.2.0 Project  Support Browser monitoring. Add e2e test for ALS solution of service mesh observability. Support compiling(include testing) in JDK11. Support build a single module.  Java Agent  Support metrics plugin. Support slf4j logs of gRPC and Kafka(when agent uses them) into the agent log files. Add PROPERTIES_REPORT_PERIOD_FACTOR config to avoid the properties of instance cleared. Limit the size of traced SQL to avoid OOM. Support mount command to load a new set of plugins. Add plugin selector mechanism. Enhance the witness classes for MongoDB plugin. Enhance the parameter truncate mechanism of SQL plugins. Enhance the SpringMVC plugin in the reactive APIs. Enhance the SpringMVC plugin to collect HTTP headers as the span tags. Enhance the Kafka plugin, about @KafkaPollAndInvoke Enhance the configuration initialization core. Plugin could have its own plugins. Enhance Feign plugin to collect parameters. Enhance Dubbo plugin to collect parameters. Provide Thrift plugin. Provide XXL-job plugin. Provide MongoDB 4.x plugin. Provide Kafka client 2.1+ plugin. Provide WebFlux-WebClient plugin. Provide ignore-exception plugin. Provide quartz scheduler plugin. Provide ElasticJob 2.x plugin. Provide Spring @Scheduled plugin. Provide Spring-Kafka plugin. Provide HBase client plugin. Provide JSON log format. Move Spring WebFlux plugin to the optional plugin. Fix inconsistent logic bug in PrefixMatch Fix duplicate exit spans in Feign LoadBalancer mechanism. Fix the target service blocked by the Kafka reporter. Fix configurations of Kafka report don\u0026rsquo;t work. Fix rest template concurrent conflict. Fix NPE in the ActiveMQ plugin. Fix conflict between Kafka reporter and sampling plugin. Fix NPE in the log formatter. Fix span layer missing in certain cases, in the Kafka plugin. Fix error format of time in serviceTraffic update. Upgrade bytebuddy to 1.10.14  OAP-Backend  Support Nacos authentication. Support labeled meter in the meter receiver. Separate UI template into multiple files. Provide support for Envoy tracing. Envoy tracer depends on the Envoy community. Support query trace by tags. Support composite alarm rules. Support alarm messages to DingTalk. Support alarm messages to WeChat. Support alarm messages to Slack. Support SSL for Prometheus fetcher and self telemetry. Support labeled histogram in the prometheus format. Support the status of segment based on entry span or first span only. Support the error segment in the sampling mechanism. Support SSL certs of gRPC server. Support labeled metrics in the alarm rule setting. Support to query all labeled data, if no explicit label in the query condition. Add TLS parameters in the mesh analysis. Add health check for InfluxDB storage. Add super dataset concept for the traces/logs. Add separate replicas configuration for super dataset. Add IN operator in the OAL. Add != operator in the OAL. Add like operator in the OAL. Add latest function in the prometheus analysis. Add more configurations in the gRPC server. Optimize the trace query performance. Optimize the CPU usage rate calculation, at least to be 1. Optimize the length of slow SQL column in the MySQL storage. Optimize the topology query, use client side component name when no server side mapping. Add component IDs for Python component. Add component ID range for C++. Fix Slack notification setting NPE. Fix some module missing check of the module manager core. Fix authentication doesn\u0026rsquo;t work in sharing server. Fix metrics batch persistent size bug. Fix trace sampling bug. Fix CLR receiver bug. Fix end time bug in the query process. Fix Exporter INCREMENT mode is not working. Fix an error when executing startup.bat when the log directory exists Add syncBulkActions configuration to set up the batch size of the metrics persistent. Meter Analysis Language.  UI  Add browser dashboard. Add browser log query page. Support query trace by tags. Fix JVM configuration. Fix CLR configuration.  Document  Add the document about SW_NO_UPSTREAM_REAL_ADDRESS. Update ALS setup document. Add Customization Config section for plugin development.  All issues and pull requests are here\n","excerpt":"8.2.0 Project  Support Browser monitoring. Add e2e test for ALS solution of service mesh …","ref":"/docs/main/next/en/changes/changes-8.2.0/","title":"8.2.0"},{"body":"8.2.0 Project  Support Browser monitoring. Add e2e test for ALS solution of service mesh observability. Support compiling(include testing) in JDK11. Support build a single module.  Java Agent  Support metrics plugin. Support slf4j logs of gRPC and Kafka(when agent uses them) into the agent log files. Add PROPERTIES_REPORT_PERIOD_FACTOR config to avoid the properties of instance cleared. Limit the size of traced SQL to avoid OOM. Support mount command to load a new set of plugins. Add plugin selector mechanism. Enhance the witness classes for MongoDB plugin. Enhance the parameter truncate mechanism of SQL plugins. Enhance the SpringMVC plugin in the reactive APIs. Enhance the SpringMVC plugin to collect HTTP headers as the span tags. Enhance the Kafka plugin, about @KafkaPollAndInvoke Enhance the configuration initialization core. Plugin could have its own plugins. Enhance Feign plugin to collect parameters. Enhance Dubbo plugin to collect parameters. Provide Thrift plugin. Provide XXL-job plugin. Provide MongoDB 4.x plugin. Provide Kafka client 2.1+ plugin. Provide WebFlux-WebClient plugin. Provide ignore-exception plugin. Provide quartz scheduler plugin. Provide ElasticJob 2.x plugin. Provide Spring @Scheduled plugin. Provide Spring-Kafka plugin. Provide HBase client plugin. Provide JSON log format. Move Spring WebFlux plugin to the optional plugin. Fix inconsistent logic bug in PrefixMatch Fix duplicate exit spans in Feign LoadBalancer mechanism. Fix the target service blocked by the Kafka reporter. Fix configurations of Kafka report don\u0026rsquo;t work. Fix rest template concurrent conflict. Fix NPE in the ActiveMQ plugin. Fix conflict between Kafka reporter and sampling plugin. Fix NPE in the log formatter. Fix span layer missing in certain cases, in the Kafka plugin. Fix error format of time in serviceTraffic update. Upgrade bytebuddy to 1.10.14  OAP-Backend  Support Nacos authentication. Support labeled meter in the meter receiver. Separate UI template into multiple files. Provide support for Envoy tracing. Envoy tracer depends on the Envoy community. Support query trace by tags. Support composite alarm rules. Support alarm messages to DingTalk. Support alarm messages to WeChat. Support alarm messages to Slack. Support SSL for Prometheus fetcher and self telemetry. Support labeled histogram in the prometheus format. Support the status of segment based on entry span or first span only. Support the error segment in the sampling mechanism. Support SSL certs of gRPC server. Support labeled metrics in the alarm rule setting. Support to query all labeled data, if no explicit label in the query condition. Add TLS parameters in the mesh analysis. Add health check for InfluxDB storage. Add super dataset concept for the traces/logs. Add separate replicas configuration for super dataset. Add IN operator in the OAL. Add != operator in the OAL. Add like operator in the OAL. Add latest function in the prometheus analysis. Add more configurations in the gRPC server. Optimize the trace query performance. Optimize the CPU usage rate calculation, at least to be 1. Optimize the length of slow SQL column in the MySQL storage. Optimize the topology query, use client side component name when no server side mapping. Add component IDs for Python component. Add component ID range for C++. Fix Slack notification setting NPE. Fix some module missing check of the module manager core. Fix authentication doesn\u0026rsquo;t work in sharing server. Fix metrics batch persistent size bug. Fix trace sampling bug. Fix CLR receiver bug. Fix end time bug in the query process. Fix Exporter INCREMENT mode is not working. Fix an error when executing startup.bat when the log directory exists Add syncBulkActions configuration to set up the batch size of the metrics persistent. Meter Analysis Language.  UI  Add browser dashboard. Add browser log query page. Support query trace by tags. Fix JVM configuration. Fix CLR configuration.  Document  Add the document about SW_NO_UPSTREAM_REAL_ADDRESS. Update ALS setup document. Add Customization Config section for plugin development.  All issues and pull requests are here\n","excerpt":"8.2.0 Project  Support Browser monitoring. Add e2e test for ALS solution of service mesh …","ref":"/docs/main/v9.1.0/en/changes/changes-8.2.0/","title":"8.2.0"},{"body":"8.2.0 Project  Support Browser monitoring. Add e2e test for ALS solution of service mesh observability. Support compiling(include testing) in JDK11. Support build a single module.  Java Agent  Support metrics plugin. Support slf4j logs of gRPC and Kafka(when agent uses them) into the agent log files. Add PROPERTIES_REPORT_PERIOD_FACTOR config to avoid the properties of instance cleared. Limit the size of traced SQL to avoid OOM. Support mount command to load a new set of plugins. Add plugin selector mechanism. Enhance the witness classes for MongoDB plugin. Enhance the parameter truncate mechanism of SQL plugins. Enhance the SpringMVC plugin in the reactive APIs. Enhance the SpringMVC plugin to collect HTTP headers as the span tags. Enhance the Kafka plugin, about @KafkaPollAndInvoke Enhance the configuration initialization core. Plugin could have its own plugins. Enhance Feign plugin to collect parameters. Enhance Dubbo plugin to collect parameters. Provide Thrift plugin. Provide XXL-job plugin. Provide MongoDB 4.x plugin. Provide Kafka client 2.1+ plugin. Provide WebFlux-WebClient plugin. Provide ignore-exception plugin. Provide quartz scheduler plugin. Provide ElasticJob 2.x plugin. Provide Spring @Scheduled plugin. Provide Spring-Kafka plugin. Provide HBase client plugin. Provide JSON log format. Move Spring WebFlux plugin to the optional plugin. Fix inconsistent logic bug in PrefixMatch Fix duplicate exit spans in Feign LoadBalancer mechanism. Fix the target service blocked by the Kafka reporter. Fix configurations of Kafka report don\u0026rsquo;t work. Fix rest template concurrent conflict. Fix NPE in the ActiveMQ plugin. Fix conflict between Kafka reporter and sampling plugin. Fix NPE in the log formatter. Fix span layer missing in certain cases, in the Kafka plugin. Fix error format of time in serviceTraffic update. Upgrade bytebuddy to 1.10.14  OAP-Backend  Support Nacos authentication. Support labeled meter in the meter receiver. Separate UI template into multiple files. Provide support for Envoy tracing. Envoy tracer depends on the Envoy community. Support query trace by tags. Support composite alarm rules. Support alarm messages to DingTalk. Support alarm messages to WeChat. Support alarm messages to Slack. Support SSL for Prometheus fetcher and self telemetry. Support labeled histogram in the prometheus format. Support the status of segment based on entry span or first span only. Support the error segment in the sampling mechanism. Support SSL certs of gRPC server. Support labeled metrics in the alarm rule setting. Support to query all labeled data, if no explicit label in the query condition. Add TLS parameters in the mesh analysis. Add health check for InfluxDB storage. Add super dataset concept for the traces/logs. Add separate replicas configuration for super dataset. Add IN operator in the OAL. Add != operator in the OAL. Add like operator in the OAL. Add latest function in the prometheus analysis. Add more configurations in the gRPC server. Optimize the trace query performance. Optimize the CPU usage rate calculation, at least to be 1. Optimize the length of slow SQL column in the MySQL storage. Optimize the topology query, use client side component name when no server side mapping. Add component IDs for Python component. Add component ID range for C++. Fix Slack notification setting NPE. Fix some module missing check of the module manager core. Fix authentication doesn\u0026rsquo;t work in sharing server. Fix metrics batch persistent size bug. Fix trace sampling bug. Fix CLR receiver bug. Fix end time bug in the query process. Fix Exporter INCREMENT mode is not working. Fix an error when executing startup.bat when the log directory exists Add syncBulkActions configuration to set up the batch size of the metrics persistent. Meter Analysis Language.  UI  Add browser dashboard. Add browser log query page. Support query trace by tags. Fix JVM configuration. Fix CLR configuration.  Document  Add the document about SW_NO_UPSTREAM_REAL_ADDRESS. Update ALS setup document. Add Customization Config section for plugin development.  All issues and pull requests are here\n","excerpt":"8.2.0 Project  Support Browser monitoring. Add e2e test for ALS solution of service mesh …","ref":"/docs/main/v9.2.0/en/changes/changes-8.2.0/","title":"8.2.0"},{"body":"8.2.0 Project  Support Browser monitoring. Add e2e test for ALS solution of service mesh observability. Support compiling(include testing) in JDK11. Support build a single module.  Java Agent  Support metrics plugin. Support slf4j logs of gRPC and Kafka(when agent uses them) into the agent log files. Add PROPERTIES_REPORT_PERIOD_FACTOR config to avoid the properties of instance cleared. Limit the size of traced SQL to avoid OOM. Support mount command to load a new set of plugins. Add plugin selector mechanism. Enhance the witness classes for MongoDB plugin. Enhance the parameter truncate mechanism of SQL plugins. Enhance the SpringMVC plugin in the reactive APIs. Enhance the SpringMVC plugin to collect HTTP headers as the span tags. Enhance the Kafka plugin, about @KafkaPollAndInvoke Enhance the configuration initialization core. Plugin could have its own plugins. Enhance Feign plugin to collect parameters. Enhance Dubbo plugin to collect parameters. Provide Thrift plugin. Provide XXL-job plugin. Provide MongoDB 4.x plugin. Provide Kafka client 2.1+ plugin. Provide WebFlux-WebClient plugin. Provide ignore-exception plugin. Provide quartz scheduler plugin. Provide ElasticJob 2.x plugin. Provide Spring @Scheduled plugin. Provide Spring-Kafka plugin. Provide HBase client plugin. Provide JSON log format. Move Spring WebFlux plugin to the optional plugin. Fix inconsistent logic bug in PrefixMatch Fix duplicate exit spans in Feign LoadBalancer mechanism. Fix the target service blocked by the Kafka reporter. Fix configurations of Kafka report don\u0026rsquo;t work. Fix rest template concurrent conflict. Fix NPE in the ActiveMQ plugin. Fix conflict between Kafka reporter and sampling plugin. Fix NPE in the log formatter. Fix span layer missing in certain cases, in the Kafka plugin. Fix error format of time in serviceTraffic update. Upgrade bytebuddy to 1.10.14  OAP-Backend  Support Nacos authentication. Support labeled meter in the meter receiver. Separate UI template into multiple files. Provide support for Envoy tracing. Envoy tracer depends on the Envoy community. Support query trace by tags. Support composite alarm rules. Support alarm messages to DingTalk. Support alarm messages to WeChat. Support alarm messages to Slack. Support SSL for Prometheus fetcher and self telemetry. Support labeled histogram in the prometheus format. Support the status of segment based on entry span or first span only. Support the error segment in the sampling mechanism. Support SSL certs of gRPC server. Support labeled metrics in the alarm rule setting. Support to query all labeled data, if no explicit label in the query condition. Add TLS parameters in the mesh analysis. Add health check for InfluxDB storage. Add super dataset concept for the traces/logs. Add separate replicas configuration for super dataset. Add IN operator in the OAL. Add != operator in the OAL. Add like operator in the OAL. Add latest function in the prometheus analysis. Add more configurations in the gRPC server. Optimize the trace query performance. Optimize the CPU usage rate calculation, at least to be 1. Optimize the length of slow SQL column in the MySQL storage. Optimize the topology query, use client side component name when no server side mapping. Add component IDs for Python component. Add component ID range for C++. Fix Slack notification setting NPE. Fix some module missing check of the module manager core. Fix authentication doesn\u0026rsquo;t work in sharing server. Fix metrics batch persistent size bug. Fix trace sampling bug. Fix CLR receiver bug. Fix end time bug in the query process. Fix Exporter INCREMENT mode is not working. Fix an error when executing startup.bat when the log directory exists Add syncBulkActions configuration to set up the batch size of the metrics persistent. Meter Analysis Language.  UI  Add browser dashboard. Add browser log query page. Support query trace by tags. Fix JVM configuration. Fix CLR configuration.  Document  Add the document about SW_NO_UPSTREAM_REAL_ADDRESS. Update ALS setup document. Add Customization Config section for plugin development.  All issues and pull requests are here\n","excerpt":"8.2.0 Project  Support Browser monitoring. Add e2e test for ALS solution of service mesh …","ref":"/docs/main/v9.3.0/en/changes/changes-8.2.0/","title":"8.2.0"},{"body":"8.2.0 Project  Support Browser monitoring. Add e2e test for ALS solution of service mesh observability. Support compiling(include testing) in JDK11. Support build a single module.  Java Agent  Support metrics plugin. Support slf4j logs of gRPC and Kafka(when agent uses them) into the agent log files. Add PROPERTIES_REPORT_PERIOD_FACTOR config to avoid the properties of instance cleared. Limit the size of traced SQL to avoid OOM. Support mount command to load a new set of plugins. Add plugin selector mechanism. Enhance the witness classes for MongoDB plugin. Enhance the parameter truncate mechanism of SQL plugins. Enhance the SpringMVC plugin in the reactive APIs. Enhance the SpringMVC plugin to collect HTTP headers as the span tags. Enhance the Kafka plugin, about @KafkaPollAndInvoke Enhance the configuration initialization core. Plugin could have its own plugins. Enhance Feign plugin to collect parameters. Enhance Dubbo plugin to collect parameters. Provide Thrift plugin. Provide XXL-job plugin. Provide MongoDB 4.x plugin. Provide Kafka client 2.1+ plugin. Provide WebFlux-WebClient plugin. Provide ignore-exception plugin. Provide quartz scheduler plugin. Provide ElasticJob 2.x plugin. Provide Spring @Scheduled plugin. Provide Spring-Kafka plugin. Provide HBase client plugin. Provide JSON log format. Move Spring WebFlux plugin to the optional plugin. Fix inconsistent logic bug in PrefixMatch Fix duplicate exit spans in Feign LoadBalancer mechanism. Fix the target service blocked by the Kafka reporter. Fix configurations of Kafka report don\u0026rsquo;t work. Fix rest template concurrent conflict. Fix NPE in the ActiveMQ plugin. Fix conflict between Kafka reporter and sampling plugin. Fix NPE in the log formatter. Fix span layer missing in certain cases, in the Kafka plugin. Fix error format of time in serviceTraffic update. Upgrade bytebuddy to 1.10.14  OAP-Backend  Support Nacos authentication. Support labeled meter in the meter receiver. Separate UI template into multiple files. Provide support for Envoy tracing. Envoy tracer depends on the Envoy community. Support query trace by tags. Support composite alarm rules. Support alarm messages to DingTalk. Support alarm messages to WeChat. Support alarm messages to Slack. Support SSL for Prometheus fetcher and self telemetry. Support labeled histogram in the prometheus format. Support the status of segment based on entry span or first span only. Support the error segment in the sampling mechanism. Support SSL certs of gRPC server. Support labeled metrics in the alarm rule setting. Support to query all labeled data, if no explicit label in the query condition. Add TLS parameters in the mesh analysis. Add health check for InfluxDB storage. Add super dataset concept for the traces/logs. Add separate replicas configuration for super dataset. Add IN operator in the OAL. Add != operator in the OAL. Add like operator in the OAL. Add latest function in the prometheus analysis. Add more configurations in the gRPC server. Optimize the trace query performance. Optimize the CPU usage rate calculation, at least to be 1. Optimize the length of slow SQL column in the MySQL storage. Optimize the topology query, use client side component name when no server side mapping. Add component IDs for Python component. Add component ID range for C++. Fix Slack notification setting NPE. Fix some module missing check of the module manager core. Fix authentication doesn\u0026rsquo;t work in sharing server. Fix metrics batch persistent size bug. Fix trace sampling bug. Fix CLR receiver bug. Fix end time bug in the query process. Fix Exporter INCREMENT mode is not working. Fix an error when executing startup.bat when the log directory exists Add syncBulkActions configuration to set up the batch size of the metrics persistent. Meter Analysis Language.  UI  Add browser dashboard. Add browser log query page. Support query trace by tags. Fix JVM configuration. Fix CLR configuration.  Document  Add the document about SW_NO_UPSTREAM_REAL_ADDRESS. Update ALS setup document. Add Customization Config section for plugin development.  All issues and pull requests are here\n","excerpt":"8.2.0 Project  Support Browser monitoring. Add e2e test for ALS solution of service mesh …","ref":"/docs/main/v9.4.0/en/changes/changes-8.2.0/","title":"8.2.0"},{"body":"8.2.0 Project  Support Browser monitoring. Add e2e test for ALS solution of service mesh observability. Support compiling(include testing) in JDK11. Support build a single module.  Java Agent  Support metrics plugin. Support slf4j logs of gRPC and Kafka(when agent uses them) into the agent log files. Add PROPERTIES_REPORT_PERIOD_FACTOR config to avoid the properties of instance cleared. Limit the size of traced SQL to avoid OOM. Support mount command to load a new set of plugins. Add plugin selector mechanism. Enhance the witness classes for MongoDB plugin. Enhance the parameter truncate mechanism of SQL plugins. Enhance the SpringMVC plugin in the reactive APIs. Enhance the SpringMVC plugin to collect HTTP headers as the span tags. Enhance the Kafka plugin, about @KafkaPollAndInvoke Enhance the configuration initialization core. Plugin could have its own plugins. Enhance Feign plugin to collect parameters. Enhance Dubbo plugin to collect parameters. Provide Thrift plugin. Provide XXL-job plugin. Provide MongoDB 4.x plugin. Provide Kafka client 2.1+ plugin. Provide WebFlux-WebClient plugin. Provide ignore-exception plugin. Provide quartz scheduler plugin. Provide ElasticJob 2.x plugin. Provide Spring @Scheduled plugin. Provide Spring-Kafka plugin. Provide HBase client plugin. Provide JSON log format. Move Spring WebFlux plugin to the optional plugin. Fix inconsistent logic bug in PrefixMatch Fix duplicate exit spans in Feign LoadBalancer mechanism. Fix the target service blocked by the Kafka reporter. Fix configurations of Kafka report don\u0026rsquo;t work. Fix rest template concurrent conflict. Fix NPE in the ActiveMQ plugin. Fix conflict between Kafka reporter and sampling plugin. Fix NPE in the log formatter. Fix span layer missing in certain cases, in the Kafka plugin. Fix error format of time in serviceTraffic update. Upgrade bytebuddy to 1.10.14  OAP-Backend  Support Nacos authentication. Support labeled meter in the meter receiver. Separate UI template into multiple files. Provide support for Envoy tracing. Envoy tracer depends on the Envoy community. Support query trace by tags. Support composite alarm rules. Support alarm messages to DingTalk. Support alarm messages to WeChat. Support alarm messages to Slack. Support SSL for Prometheus fetcher and self telemetry. Support labeled histogram in the prometheus format. Support the status of segment based on entry span or first span only. Support the error segment in the sampling mechanism. Support SSL certs of gRPC server. Support labeled metrics in the alarm rule setting. Support to query all labeled data, if no explicit label in the query condition. Add TLS parameters in the mesh analysis. Add health check for InfluxDB storage. Add super dataset concept for the traces/logs. Add separate replicas configuration for super dataset. Add IN operator in the OAL. Add != operator in the OAL. Add like operator in the OAL. Add latest function in the prometheus analysis. Add more configurations in the gRPC server. Optimize the trace query performance. Optimize the CPU usage rate calculation, at least to be 1. Optimize the length of slow SQL column in the MySQL storage. Optimize the topology query, use client side component name when no server side mapping. Add component IDs for Python component. Add component ID range for C++. Fix Slack notification setting NPE. Fix some module missing check of the module manager core. Fix authentication doesn\u0026rsquo;t work in sharing server. Fix metrics batch persistent size bug. Fix trace sampling bug. Fix CLR receiver bug. Fix end time bug in the query process. Fix Exporter INCREMENT mode is not working. Fix an error when executing startup.bat when the log directory exists Add syncBulkActions configuration to set up the batch size of the metrics persistent. Meter Analysis Language.  UI  Add browser dashboard. Add browser log query page. Support query trace by tags. Fix JVM configuration. Fix CLR configuration.  Document  Add the document about SW_NO_UPSTREAM_REAL_ADDRESS. Update ALS setup document. Add Customization Config section for plugin development.  All issues and pull requests are here\n","excerpt":"8.2.0 Project  Support Browser monitoring. Add e2e test for ALS solution of service mesh …","ref":"/docs/main/v9.5.0/en/changes/changes-8.2.0/","title":"8.2.0"},{"body":"8.2.0 Project  Support Browser monitoring. Add e2e test for ALS solution of service mesh observability. Support compiling(include testing) in JDK11. Support build a single module.  Java Agent  Support metrics plugin. Support slf4j logs of gRPC and Kafka(when agent uses them) into the agent log files. Add PROPERTIES_REPORT_PERIOD_FACTOR config to avoid the properties of instance cleared. Limit the size of traced SQL to avoid OOM. Support mount command to load a new set of plugins. Add plugin selector mechanism. Enhance the witness classes for MongoDB plugin. Enhance the parameter truncate mechanism of SQL plugins. Enhance the SpringMVC plugin in the reactive APIs. Enhance the SpringMVC plugin to collect HTTP headers as the span tags. Enhance the Kafka plugin, about @KafkaPollAndInvoke Enhance the configuration initialization core. Plugin could have its own plugins. Enhance Feign plugin to collect parameters. Enhance Dubbo plugin to collect parameters. Provide Thrift plugin. Provide XXL-job plugin. Provide MongoDB 4.x plugin. Provide Kafka client 2.1+ plugin. Provide WebFlux-WebClient plugin. Provide ignore-exception plugin. Provide quartz scheduler plugin. Provide ElasticJob 2.x plugin. Provide Spring @Scheduled plugin. Provide Spring-Kafka plugin. Provide HBase client plugin. Provide JSON log format. Move Spring WebFlux plugin to the optional plugin. Fix inconsistent logic bug in PrefixMatch Fix duplicate exit spans in Feign LoadBalancer mechanism. Fix the target service blocked by the Kafka reporter. Fix configurations of Kafka report don\u0026rsquo;t work. Fix rest template concurrent conflict. Fix NPE in the ActiveMQ plugin. Fix conflict between Kafka reporter and sampling plugin. Fix NPE in the log formatter. Fix span layer missing in certain cases, in the Kafka plugin. Fix error format of time in serviceTraffic update. Upgrade bytebuddy to 1.10.14  OAP-Backend  Support Nacos authentication. Support labeled meter in the meter receiver. Separate UI template into multiple files. Provide support for Envoy tracing. Envoy tracer depends on the Envoy community. Support query trace by tags. Support composite alarm rules. Support alarm messages to DingTalk. Support alarm messages to WeChat. Support alarm messages to Slack. Support SSL for Prometheus fetcher and self telemetry. Support labeled histogram in the prometheus format. Support the status of segment based on entry span or first span only. Support the error segment in the sampling mechanism. Support SSL certs of gRPC server. Support labeled metrics in the alarm rule setting. Support to query all labeled data, if no explicit label in the query condition. Add TLS parameters in the mesh analysis. Add health check for InfluxDB storage. Add super dataset concept for the traces/logs. Add separate replicas configuration for super dataset. Add IN operator in the OAL. Add != operator in the OAL. Add like operator in the OAL. Add latest function in the prometheus analysis. Add more configurations in the gRPC server. Optimize the trace query performance. Optimize the CPU usage rate calculation, at least to be 1. Optimize the length of slow SQL column in the MySQL storage. Optimize the topology query, use client side component name when no server side mapping. Add component IDs for Python component. Add component ID range for C++. Fix Slack notification setting NPE. Fix some module missing check of the module manager core. Fix authentication doesn\u0026rsquo;t work in sharing server. Fix metrics batch persistent size bug. Fix trace sampling bug. Fix CLR receiver bug. Fix end time bug in the query process. Fix Exporter INCREMENT mode is not working. Fix an error when executing startup.bat when the log directory exists Add syncBulkActions configuration to set up the batch size of the metrics persistent. Meter Analysis Language.  UI  Add browser dashboard. Add browser log query page. Support query trace by tags. Fix JVM configuration. Fix CLR configuration.  Document  Add the document about SW_NO_UPSTREAM_REAL_ADDRESS. Update ALS setup document. Add Customization Config section for plugin development.  All issues and pull requests are here\n","excerpt":"8.2.0 Project  Support Browser monitoring. Add e2e test for ALS solution of service mesh …","ref":"/docs/main/v9.6.0/en/changes/changes-8.2.0/","title":"8.2.0"},{"body":"8.2.0 Project  Support Browser monitoring. Add e2e test for ALS solution of service mesh observability. Support compiling(include testing) in JDK11. Support build a single module.  Java Agent  Support metrics plugin. Support slf4j logs of gRPC and Kafka(when agent uses them) into the agent log files. Add PROPERTIES_REPORT_PERIOD_FACTOR config to avoid the properties of instance cleared. Limit the size of traced SQL to avoid OOM. Support mount command to load a new set of plugins. Add plugin selector mechanism. Enhance the witness classes for MongoDB plugin. Enhance the parameter truncate mechanism of SQL plugins. Enhance the SpringMVC plugin in the reactive APIs. Enhance the SpringMVC plugin to collect HTTP headers as the span tags. Enhance the Kafka plugin, about @KafkaPollAndInvoke Enhance the configuration initialization core. Plugin could have its own plugins. Enhance Feign plugin to collect parameters. Enhance Dubbo plugin to collect parameters. Provide Thrift plugin. Provide XXL-job plugin. Provide MongoDB 4.x plugin. Provide Kafka client 2.1+ plugin. Provide WebFlux-WebClient plugin. Provide ignore-exception plugin. Provide quartz scheduler plugin. Provide ElasticJob 2.x plugin. Provide Spring @Scheduled plugin. Provide Spring-Kafka plugin. Provide HBase client plugin. Provide JSON log format. Move Spring WebFlux plugin to the optional plugin. Fix inconsistent logic bug in PrefixMatch Fix duplicate exit spans in Feign LoadBalancer mechanism. Fix the target service blocked by the Kafka reporter. Fix configurations of Kafka report don\u0026rsquo;t work. Fix rest template concurrent conflict. Fix NPE in the ActiveMQ plugin. Fix conflict between Kafka reporter and sampling plugin. Fix NPE in the log formatter. Fix span layer missing in certain cases, in the Kafka plugin. Fix error format of time in serviceTraffic update. Upgrade bytebuddy to 1.10.14  OAP-Backend  Support Nacos authentication. Support labeled meter in the meter receiver. Separate UI template into multiple files. Provide support for Envoy tracing. Envoy tracer depends on the Envoy community. Support query trace by tags. Support composite alarm rules. Support alarm messages to DingTalk. Support alarm messages to WeChat. Support alarm messages to Slack. Support SSL for Prometheus fetcher and self telemetry. Support labeled histogram in the prometheus format. Support the status of segment based on entry span or first span only. Support the error segment in the sampling mechanism. Support SSL certs of gRPC server. Support labeled metrics in the alarm rule setting. Support to query all labeled data, if no explicit label in the query condition. Add TLS parameters in the mesh analysis. Add health check for InfluxDB storage. Add super dataset concept for the traces/logs. Add separate replicas configuration for super dataset. Add IN operator in the OAL. Add != operator in the OAL. Add like operator in the OAL. Add latest function in the prometheus analysis. Add more configurations in the gRPC server. Optimize the trace query performance. Optimize the CPU usage rate calculation, at least to be 1. Optimize the length of slow SQL column in the MySQL storage. Optimize the topology query, use client side component name when no server side mapping. Add component IDs for Python component. Add component ID range for C++. Fix Slack notification setting NPE. Fix some module missing check of the module manager core. Fix authentication doesn\u0026rsquo;t work in sharing server. Fix metrics batch persistent size bug. Fix trace sampling bug. Fix CLR receiver bug. Fix end time bug in the query process. Fix Exporter INCREMENT mode is not working. Fix an error when executing startup.bat when the log directory exists Add syncBulkActions configuration to set up the batch size of the metrics persistent. Meter Analysis Language.  UI  Add browser dashboard. Add browser log query page. Support query trace by tags. Fix JVM configuration. Fix CLR configuration.  Document  Add the document about SW_NO_UPSTREAM_REAL_ADDRESS. Update ALS setup document. Add Customization Config section for plugin development.  All issues and pull requests are here\n","excerpt":"8.2.0 Project  Support Browser monitoring. Add e2e test for ALS solution of service mesh …","ref":"/docs/main/v9.7.0/en/changes/changes-8.2.0/","title":"8.2.0"},{"body":"8.3.0  Project  Test: ElasticSearch version 7.0.0 and 7.9.3 as storage are E2E tested. Test: Bump up testcontainers version to work around the Docker bug on MacOS.  Java Agent  Support propagate the sending timestamp in MQ plugins to calculate the transfer latency in the async MQ scenarios. Support auto-tag with the fixed values propagated in the correlation context. Make HttpClient 3.x, 4.x, and HttpAsyncClient 3.x plugins to support collecting HTTP parameters. Make the Feign plugin to support Java 14 Make the okhttp3 plugin to support Java 14 Polish tracing context related codes. Add the plugin for async-http-client 2.x Fix NPE in the nutz plugin. Provide Apache Commons DBCP 2.x plugin. Add the plugin for mssql-jtds 1.x. Add the plugin for mssql-jdbc 6.x -\u0026gt; 9.x. Fix the default ignore mechanism isn\u0026rsquo;t accurate enough bug. Add the plugin for spring-kafka 1.3.x. Add the plugin for Apache CXF 3.x. Fix okhttp-3.x and async-http-client-2.x did not overwrite the old trace header.  OAP-Backend  Add the @SuperDataset annotation for BrowserErrorLog. Add the thread pool to the Kafka fetcher to increase the performance. Add contain and not contain OPS in OAL. Add Envoy ALS analyzer based on metadata exchange. Add listMetrics GraphQL query. Add group name into services of so11y and istio relevant metrics Support keeping collecting the slowly segments in the sampling mechanism. Support choose files to active the meter analyzer. Support nested class definition in the Service, ServiceInstance, Endpoint, ServiceRelation, and ServiceInstanceRelation sources. Support sideCar.internalErrorCode in the Service, ServiceInstance, Endpoint, ServiceRelation, and ServiceInstanceRelation sources. Improve Kubernetes service registry for ALS analysis. Add health checker for cluster management Support the service auto grouping. Support query service list by the group name. Improve the queryable tags generation. Remove the duplicated tags to reduce the storage payload. Fix the threads of the Kafka fetcher exit if some unexpected exceptions happen. Fix the excessive timeout period set by the kubernetes-client. Fix deadlock problem when using elasticsearch-client-7.0.0. Fix storage-jdbc isExists not set dbname. Fix searchService bug in the InfluxDB storage implementation. Fix CVE in the alarm module, when activating the dynamic configuration feature. Fix CVE in the endpoint grouping, when activating the dynamic configuration feature. Fix CVE in the uninstrumented gateways configs, when activating the dynamic configuration feature. Fix CVE in the Apdex threshold configs, when activating the dynamic configuration feature. Make the codes and doc consistent in sharding server and core server. Fix that chunked string is incorrect while the tag contains colon. Fix the incorrect dynamic configuration key bug of endpoint-name-grouping. Remove unused min date timebucket in jdbc deletehistory logical Fix \u0026ldquo;transaction too large error\u0026rdquo; when use TiDB as storage. Fix \u0026ldquo;index not found\u0026rdquo; in trace query when use ES7 storage. Add otel rules to ui template to observe Istio control plane. Remove istio mixer Support close influxdb batch write model. Check SAN in the ALS (m)TLS process.  UI  Fix incorrect label in radial chart in topology. Replace node-sass with dart-sass. Replace serviceFilter with serviceGroup Removed \u0026ldquo;Les Miserables\u0026rdquo; from radial chart in topology. Add the Promise dropdown option  Documentation  Add VNode FAQ doc. Add logic endpoint section in the agent setup doc. Adjust configuration names and system environment names of the sharing server module Tweak Istio metrics collection doc. Add otel receiver.  All issues and pull requests are here\n","excerpt":"8.3.0  Project  Test: ElasticSearch version 7.0.0 and 7.9.3 as storage are E2E tested. Test: Bump up …","ref":"/docs/main/latest/en/changes/changes-8.3.0/","title":"8.3.0"},{"body":"8.3.0  Project  Test: ElasticSearch version 7.0.0 and 7.9.3 as storage are E2E tested. Test: Bump up testcontainers version to work around the Docker bug on MacOS.  Java Agent  Support propagate the sending timestamp in MQ plugins to calculate the transfer latency in the async MQ scenarios. Support auto-tag with the fixed values propagated in the correlation context. Make HttpClient 3.x, 4.x, and HttpAsyncClient 3.x plugins to support collecting HTTP parameters. Make the Feign plugin to support Java 14 Make the okhttp3 plugin to support Java 14 Polish tracing context related codes. Add the plugin for async-http-client 2.x Fix NPE in the nutz plugin. Provide Apache Commons DBCP 2.x plugin. Add the plugin for mssql-jtds 1.x. Add the plugin for mssql-jdbc 6.x -\u0026gt; 9.x. Fix the default ignore mechanism isn\u0026rsquo;t accurate enough bug. Add the plugin for spring-kafka 1.3.x. Add the plugin for Apache CXF 3.x. Fix okhttp-3.x and async-http-client-2.x did not overwrite the old trace header.  OAP-Backend  Add the @SuperDataset annotation for BrowserErrorLog. Add the thread pool to the Kafka fetcher to increase the performance. Add contain and not contain OPS in OAL. Add Envoy ALS analyzer based on metadata exchange. Add listMetrics GraphQL query. Add group name into services of so11y and istio relevant metrics Support keeping collecting the slowly segments in the sampling mechanism. Support choose files to active the meter analyzer. Support nested class definition in the Service, ServiceInstance, Endpoint, ServiceRelation, and ServiceInstanceRelation sources. Support sideCar.internalErrorCode in the Service, ServiceInstance, Endpoint, ServiceRelation, and ServiceInstanceRelation sources. Improve Kubernetes service registry for ALS analysis. Add health checker for cluster management Support the service auto grouping. Support query service list by the group name. Improve the queryable tags generation. Remove the duplicated tags to reduce the storage payload. Fix the threads of the Kafka fetcher exit if some unexpected exceptions happen. Fix the excessive timeout period set by the kubernetes-client. Fix deadlock problem when using elasticsearch-client-7.0.0. Fix storage-jdbc isExists not set dbname. Fix searchService bug in the InfluxDB storage implementation. Fix CVE in the alarm module, when activating the dynamic configuration feature. Fix CVE in the endpoint grouping, when activating the dynamic configuration feature. Fix CVE in the uninstrumented gateways configs, when activating the dynamic configuration feature. Fix CVE in the Apdex threshold configs, when activating the dynamic configuration feature. Make the codes and doc consistent in sharding server and core server. Fix that chunked string is incorrect while the tag contains colon. Fix the incorrect dynamic configuration key bug of endpoint-name-grouping. Remove unused min date timebucket in jdbc deletehistory logical Fix \u0026ldquo;transaction too large error\u0026rdquo; when use TiDB as storage. Fix \u0026ldquo;index not found\u0026rdquo; in trace query when use ES7 storage. Add otel rules to ui template to observe Istio control plane. Remove istio mixer Support close influxdb batch write model. Check SAN in the ALS (m)TLS process.  UI  Fix incorrect label in radial chart in topology. Replace node-sass with dart-sass. Replace serviceFilter with serviceGroup Removed \u0026ldquo;Les Miserables\u0026rdquo; from radial chart in topology. Add the Promise dropdown option  Documentation  Add VNode FAQ doc. Add logic endpoint section in the agent setup doc. Adjust configuration names and system environment names of the sharing server module Tweak Istio metrics collection doc. Add otel receiver.  All issues and pull requests are here\n","excerpt":"8.3.0  Project  Test: ElasticSearch version 7.0.0 and 7.9.3 as storage are E2E tested. Test: Bump up …","ref":"/docs/main/next/en/changes/changes-8.3.0/","title":"8.3.0"},{"body":"8.3.0  Project  Test: ElasticSearch version 7.0.0 and 7.9.3 as storage are E2E tested. Test: Bump up testcontainers version to work around the Docker bug on MacOS.  Java Agent  Support propagate the sending timestamp in MQ plugins to calculate the transfer latency in the async MQ scenarios. Support auto-tag with the fixed values propagated in the correlation context. Make HttpClient 3.x, 4.x, and HttpAsyncClient 3.x plugins to support collecting HTTP parameters. Make the Feign plugin to support Java 14 Make the okhttp3 plugin to support Java 14 Polish tracing context related codes. Add the plugin for async-http-client 2.x Fix NPE in the nutz plugin. Provide Apache Commons DBCP 2.x plugin. Add the plugin for mssql-jtds 1.x. Add the plugin for mssql-jdbc 6.x -\u0026gt; 9.x. Fix the default ignore mechanism isn\u0026rsquo;t accurate enough bug. Add the plugin for spring-kafka 1.3.x. Add the plugin for Apache CXF 3.x. Fix okhttp-3.x and async-http-client-2.x did not overwrite the old trace header.  OAP-Backend  Add the @SuperDataset annotation for BrowserErrorLog. Add the thread pool to the Kafka fetcher to increase the performance. Add contain and not contain OPS in OAL. Add Envoy ALS analyzer based on metadata exchange. Add listMetrics GraphQL query. Add group name into services of so11y and istio relevant metrics Support keeping collecting the slowly segments in the sampling mechanism. Support choose files to active the meter analyzer. Support nested class definition in the Service, ServiceInstance, Endpoint, ServiceRelation, and ServiceInstanceRelation sources. Support sideCar.internalErrorCode in the Service, ServiceInstance, Endpoint, ServiceRelation, and ServiceInstanceRelation sources. Improve Kubernetes service registry for ALS analysis. Add health checker for cluster management Support the service auto grouping. Support query service list by the group name. Improve the queryable tags generation. Remove the duplicated tags to reduce the storage payload. Fix the threads of the Kafka fetcher exit if some unexpected exceptions happen. Fix the excessive timeout period set by the kubernetes-client. Fix deadlock problem when using elasticsearch-client-7.0.0. Fix storage-jdbc isExists not set dbname. Fix searchService bug in the InfluxDB storage implementation. Fix CVE in the alarm module, when activating the dynamic configuration feature. Fix CVE in the endpoint grouping, when activating the dynamic configuration feature. Fix CVE in the uninstrumented gateways configs, when activating the dynamic configuration feature. Fix CVE in the Apdex threshold configs, when activating the dynamic configuration feature. Make the codes and doc consistent in sharding server and core server. Fix that chunked string is incorrect while the tag contains colon. Fix the incorrect dynamic configuration key bug of endpoint-name-grouping. Remove unused min date timebucket in jdbc deletehistory logical Fix \u0026ldquo;transaction too large error\u0026rdquo; when use TiDB as storage. Fix \u0026ldquo;index not found\u0026rdquo; in trace query when use ES7 storage. Add otel rules to ui template to observe Istio control plane. Remove istio mixer Support close influxdb batch write model. Check SAN in the ALS (m)TLS process.  UI  Fix incorrect label in radial chart in topology. Replace node-sass with dart-sass. Replace serviceFilter with serviceGroup Removed \u0026ldquo;Les Miserables\u0026rdquo; from radial chart in topology. Add the Promise dropdown option  Documentation  Add VNode FAQ doc. Add logic endpoint section in the agent setup doc. Adjust configuration names and system environment names of the sharing server module Tweak Istio metrics collection doc. Add otel receiver.  All issues and pull requests are here\n","excerpt":"8.3.0  Project  Test: ElasticSearch version 7.0.0 and 7.9.3 as storage are E2E tested. Test: Bump up …","ref":"/docs/main/v9.1.0/en/changes/changes-8.3.0/","title":"8.3.0"},{"body":"8.3.0  Project  Test: ElasticSearch version 7.0.0 and 7.9.3 as storage are E2E tested. Test: Bump up testcontainers version to work around the Docker bug on MacOS.  Java Agent  Support propagate the sending timestamp in MQ plugins to calculate the transfer latency in the async MQ scenarios. Support auto-tag with the fixed values propagated in the correlation context. Make HttpClient 3.x, 4.x, and HttpAsyncClient 3.x plugins to support collecting HTTP parameters. Make the Feign plugin to support Java 14 Make the okhttp3 plugin to support Java 14 Polish tracing context related codes. Add the plugin for async-http-client 2.x Fix NPE in the nutz plugin. Provide Apache Commons DBCP 2.x plugin. Add the plugin for mssql-jtds 1.x. Add the plugin for mssql-jdbc 6.x -\u0026gt; 9.x. Fix the default ignore mechanism isn\u0026rsquo;t accurate enough bug. Add the plugin for spring-kafka 1.3.x. Add the plugin for Apache CXF 3.x. Fix okhttp-3.x and async-http-client-2.x did not overwrite the old trace header.  OAP-Backend  Add the @SuperDataset annotation for BrowserErrorLog. Add the thread pool to the Kafka fetcher to increase the performance. Add contain and not contain OPS in OAL. Add Envoy ALS analyzer based on metadata exchange. Add listMetrics GraphQL query. Add group name into services of so11y and istio relevant metrics Support keeping collecting the slowly segments in the sampling mechanism. Support choose files to active the meter analyzer. Support nested class definition in the Service, ServiceInstance, Endpoint, ServiceRelation, and ServiceInstanceRelation sources. Support sideCar.internalErrorCode in the Service, ServiceInstance, Endpoint, ServiceRelation, and ServiceInstanceRelation sources. Improve Kubernetes service registry for ALS analysis. Add health checker for cluster management Support the service auto grouping. Support query service list by the group name. Improve the queryable tags generation. Remove the duplicated tags to reduce the storage payload. Fix the threads of the Kafka fetcher exit if some unexpected exceptions happen. Fix the excessive timeout period set by the kubernetes-client. Fix deadlock problem when using elasticsearch-client-7.0.0. Fix storage-jdbc isExists not set dbname. Fix searchService bug in the InfluxDB storage implementation. Fix CVE in the alarm module, when activating the dynamic configuration feature. Fix CVE in the endpoint grouping, when activating the dynamic configuration feature. Fix CVE in the uninstrumented gateways configs, when activating the dynamic configuration feature. Fix CVE in the Apdex threshold configs, when activating the dynamic configuration feature. Make the codes and doc consistent in sharding server and core server. Fix that chunked string is incorrect while the tag contains colon. Fix the incorrect dynamic configuration key bug of endpoint-name-grouping. Remove unused min date timebucket in jdbc deletehistory logical Fix \u0026ldquo;transaction too large error\u0026rdquo; when use TiDB as storage. Fix \u0026ldquo;index not found\u0026rdquo; in trace query when use ES7 storage. Add otel rules to ui template to observe Istio control plane. Remove istio mixer Support close influxdb batch write model. Check SAN in the ALS (m)TLS process.  UI  Fix incorrect label in radial chart in topology. Replace node-sass with dart-sass. Replace serviceFilter with serviceGroup Removed \u0026ldquo;Les Miserables\u0026rdquo; from radial chart in topology. Add the Promise dropdown option  Documentation  Add VNode FAQ doc. Add logic endpoint section in the agent setup doc. Adjust configuration names and system environment names of the sharing server module Tweak Istio metrics collection doc. Add otel receiver.  All issues and pull requests are here\n","excerpt":"8.3.0  Project  Test: ElasticSearch version 7.0.0 and 7.9.3 as storage are E2E tested. Test: Bump up …","ref":"/docs/main/v9.2.0/en/changes/changes-8.3.0/","title":"8.3.0"},{"body":"8.3.0  Project  Test: ElasticSearch version 7.0.0 and 7.9.3 as storage are E2E tested. Test: Bump up testcontainers version to work around the Docker bug on MacOS.  Java Agent  Support propagate the sending timestamp in MQ plugins to calculate the transfer latency in the async MQ scenarios. Support auto-tag with the fixed values propagated in the correlation context. Make HttpClient 3.x, 4.x, and HttpAsyncClient 3.x plugins to support collecting HTTP parameters. Make the Feign plugin to support Java 14 Make the okhttp3 plugin to support Java 14 Polish tracing context related codes. Add the plugin for async-http-client 2.x Fix NPE in the nutz plugin. Provide Apache Commons DBCP 2.x plugin. Add the plugin for mssql-jtds 1.x. Add the plugin for mssql-jdbc 6.x -\u0026gt; 9.x. Fix the default ignore mechanism isn\u0026rsquo;t accurate enough bug. Add the plugin for spring-kafka 1.3.x. Add the plugin for Apache CXF 3.x. Fix okhttp-3.x and async-http-client-2.x did not overwrite the old trace header.  OAP-Backend  Add the @SuperDataset annotation for BrowserErrorLog. Add the thread pool to the Kafka fetcher to increase the performance. Add contain and not contain OPS in OAL. Add Envoy ALS analyzer based on metadata exchange. Add listMetrics GraphQL query. Add group name into services of so11y and istio relevant metrics Support keeping collecting the slowly segments in the sampling mechanism. Support choose files to active the meter analyzer. Support nested class definition in the Service, ServiceInstance, Endpoint, ServiceRelation, and ServiceInstanceRelation sources. Support sideCar.internalErrorCode in the Service, ServiceInstance, Endpoint, ServiceRelation, and ServiceInstanceRelation sources. Improve Kubernetes service registry for ALS analysis. Add health checker for cluster management Support the service auto grouping. Support query service list by the group name. Improve the queryable tags generation. Remove the duplicated tags to reduce the storage payload. Fix the threads of the Kafka fetcher exit if some unexpected exceptions happen. Fix the excessive timeout period set by the kubernetes-client. Fix deadlock problem when using elasticsearch-client-7.0.0. Fix storage-jdbc isExists not set dbname. Fix searchService bug in the InfluxDB storage implementation. Fix CVE in the alarm module, when activating the dynamic configuration feature. Fix CVE in the endpoint grouping, when activating the dynamic configuration feature. Fix CVE in the uninstrumented gateways configs, when activating the dynamic configuration feature. Fix CVE in the Apdex threshold configs, when activating the dynamic configuration feature. Make the codes and doc consistent in sharding server and core server. Fix that chunked string is incorrect while the tag contains colon. Fix the incorrect dynamic configuration key bug of endpoint-name-grouping. Remove unused min date timebucket in jdbc deletehistory logical Fix \u0026ldquo;transaction too large error\u0026rdquo; when use TiDB as storage. Fix \u0026ldquo;index not found\u0026rdquo; in trace query when use ES7 storage. Add otel rules to ui template to observe Istio control plane. Remove istio mixer Support close influxdb batch write model. Check SAN in the ALS (m)TLS process.  UI  Fix incorrect label in radial chart in topology. Replace node-sass with dart-sass. Replace serviceFilter with serviceGroup Removed \u0026ldquo;Les Miserables\u0026rdquo; from radial chart in topology. Add the Promise dropdown option  Documentation  Add VNode FAQ doc. Add logic endpoint section in the agent setup doc. Adjust configuration names and system environment names of the sharing server module Tweak Istio metrics collection doc. Add otel receiver.  All issues and pull requests are here\n","excerpt":"8.3.0  Project  Test: ElasticSearch version 7.0.0 and 7.9.3 as storage are E2E tested. Test: Bump up …","ref":"/docs/main/v9.3.0/en/changes/changes-8.3.0/","title":"8.3.0"},{"body":"8.3.0  Project  Test: ElasticSearch version 7.0.0 and 7.9.3 as storage are E2E tested. Test: Bump up testcontainers version to work around the Docker bug on MacOS.  Java Agent  Support propagate the sending timestamp in MQ plugins to calculate the transfer latency in the async MQ scenarios. Support auto-tag with the fixed values propagated in the correlation context. Make HttpClient 3.x, 4.x, and HttpAsyncClient 3.x plugins to support collecting HTTP parameters. Make the Feign plugin to support Java 14 Make the okhttp3 plugin to support Java 14 Polish tracing context related codes. Add the plugin for async-http-client 2.x Fix NPE in the nutz plugin. Provide Apache Commons DBCP 2.x plugin. Add the plugin for mssql-jtds 1.x. Add the plugin for mssql-jdbc 6.x -\u0026gt; 9.x. Fix the default ignore mechanism isn\u0026rsquo;t accurate enough bug. Add the plugin for spring-kafka 1.3.x. Add the plugin for Apache CXF 3.x. Fix okhttp-3.x and async-http-client-2.x did not overwrite the old trace header.  OAP-Backend  Add the @SuperDataset annotation for BrowserErrorLog. Add the thread pool to the Kafka fetcher to increase the performance. Add contain and not contain OPS in OAL. Add Envoy ALS analyzer based on metadata exchange. Add listMetrics GraphQL query. Add group name into services of so11y and istio relevant metrics Support keeping collecting the slowly segments in the sampling mechanism. Support choose files to active the meter analyzer. Support nested class definition in the Service, ServiceInstance, Endpoint, ServiceRelation, and ServiceInstanceRelation sources. Support sideCar.internalErrorCode in the Service, ServiceInstance, Endpoint, ServiceRelation, and ServiceInstanceRelation sources. Improve Kubernetes service registry for ALS analysis. Add health checker for cluster management Support the service auto grouping. Support query service list by the group name. Improve the queryable tags generation. Remove the duplicated tags to reduce the storage payload. Fix the threads of the Kafka fetcher exit if some unexpected exceptions happen. Fix the excessive timeout period set by the kubernetes-client. Fix deadlock problem when using elasticsearch-client-7.0.0. Fix storage-jdbc isExists not set dbname. Fix searchService bug in the InfluxDB storage implementation. Fix CVE in the alarm module, when activating the dynamic configuration feature. Fix CVE in the endpoint grouping, when activating the dynamic configuration feature. Fix CVE in the uninstrumented gateways configs, when activating the dynamic configuration feature. Fix CVE in the Apdex threshold configs, when activating the dynamic configuration feature. Make the codes and doc consistent in sharding server and core server. Fix that chunked string is incorrect while the tag contains colon. Fix the incorrect dynamic configuration key bug of endpoint-name-grouping. Remove unused min date timebucket in jdbc deletehistory logical Fix \u0026ldquo;transaction too large error\u0026rdquo; when use TiDB as storage. Fix \u0026ldquo;index not found\u0026rdquo; in trace query when use ES7 storage. Add otel rules to ui template to observe Istio control plane. Remove istio mixer Support close influxdb batch write model. Check SAN in the ALS (m)TLS process.  UI  Fix incorrect label in radial chart in topology. Replace node-sass with dart-sass. Replace serviceFilter with serviceGroup Removed \u0026ldquo;Les Miserables\u0026rdquo; from radial chart in topology. Add the Promise dropdown option  Documentation  Add VNode FAQ doc. Add logic endpoint section in the agent setup doc. Adjust configuration names and system environment names of the sharing server module Tweak Istio metrics collection doc. Add otel receiver.  All issues and pull requests are here\n","excerpt":"8.3.0  Project  Test: ElasticSearch version 7.0.0 and 7.9.3 as storage are E2E tested. Test: Bump up …","ref":"/docs/main/v9.4.0/en/changes/changes-8.3.0/","title":"8.3.0"},{"body":"8.3.0  Project  Test: ElasticSearch version 7.0.0 and 7.9.3 as storage are E2E tested. Test: Bump up testcontainers version to work around the Docker bug on MacOS.  Java Agent  Support propagate the sending timestamp in MQ plugins to calculate the transfer latency in the async MQ scenarios. Support auto-tag with the fixed values propagated in the correlation context. Make HttpClient 3.x, 4.x, and HttpAsyncClient 3.x plugins to support collecting HTTP parameters. Make the Feign plugin to support Java 14 Make the okhttp3 plugin to support Java 14 Polish tracing context related codes. Add the plugin for async-http-client 2.x Fix NPE in the nutz plugin. Provide Apache Commons DBCP 2.x plugin. Add the plugin for mssql-jtds 1.x. Add the plugin for mssql-jdbc 6.x -\u0026gt; 9.x. Fix the default ignore mechanism isn\u0026rsquo;t accurate enough bug. Add the plugin for spring-kafka 1.3.x. Add the plugin for Apache CXF 3.x. Fix okhttp-3.x and async-http-client-2.x did not overwrite the old trace header.  OAP-Backend  Add the @SuperDataset annotation for BrowserErrorLog. Add the thread pool to the Kafka fetcher to increase the performance. Add contain and not contain OPS in OAL. Add Envoy ALS analyzer based on metadata exchange. Add listMetrics GraphQL query. Add group name into services of so11y and istio relevant metrics Support keeping collecting the slowly segments in the sampling mechanism. Support choose files to active the meter analyzer. Support nested class definition in the Service, ServiceInstance, Endpoint, ServiceRelation, and ServiceInstanceRelation sources. Support sideCar.internalErrorCode in the Service, ServiceInstance, Endpoint, ServiceRelation, and ServiceInstanceRelation sources. Improve Kubernetes service registry for ALS analysis. Add health checker for cluster management Support the service auto grouping. Support query service list by the group name. Improve the queryable tags generation. Remove the duplicated tags to reduce the storage payload. Fix the threads of the Kafka fetcher exit if some unexpected exceptions happen. Fix the excessive timeout period set by the kubernetes-client. Fix deadlock problem when using elasticsearch-client-7.0.0. Fix storage-jdbc isExists not set dbname. Fix searchService bug in the InfluxDB storage implementation. Fix CVE in the alarm module, when activating the dynamic configuration feature. Fix CVE in the endpoint grouping, when activating the dynamic configuration feature. Fix CVE in the uninstrumented gateways configs, when activating the dynamic configuration feature. Fix CVE in the Apdex threshold configs, when activating the dynamic configuration feature. Make the codes and doc consistent in sharding server and core server. Fix that chunked string is incorrect while the tag contains colon. Fix the incorrect dynamic configuration key bug of endpoint-name-grouping. Remove unused min date timebucket in jdbc deletehistory logical Fix \u0026ldquo;transaction too large error\u0026rdquo; when use TiDB as storage. Fix \u0026ldquo;index not found\u0026rdquo; in trace query when use ES7 storage. Add otel rules to ui template to observe Istio control plane. Remove istio mixer Support close influxdb batch write model. Check SAN in the ALS (m)TLS process.  UI  Fix incorrect label in radial chart in topology. Replace node-sass with dart-sass. Replace serviceFilter with serviceGroup Removed \u0026ldquo;Les Miserables\u0026rdquo; from radial chart in topology. Add the Promise dropdown option  Documentation  Add VNode FAQ doc. Add logic endpoint section in the agent setup doc. Adjust configuration names and system environment names of the sharing server module Tweak Istio metrics collection doc. Add otel receiver.  All issues and pull requests are here\n","excerpt":"8.3.0  Project  Test: ElasticSearch version 7.0.0 and 7.9.3 as storage are E2E tested. Test: Bump up …","ref":"/docs/main/v9.5.0/en/changes/changes-8.3.0/","title":"8.3.0"},{"body":"8.3.0  Project  Test: ElasticSearch version 7.0.0 and 7.9.3 as storage are E2E tested. Test: Bump up testcontainers version to work around the Docker bug on MacOS.  Java Agent  Support propagate the sending timestamp in MQ plugins to calculate the transfer latency in the async MQ scenarios. Support auto-tag with the fixed values propagated in the correlation context. Make HttpClient 3.x, 4.x, and HttpAsyncClient 3.x plugins to support collecting HTTP parameters. Make the Feign plugin to support Java 14 Make the okhttp3 plugin to support Java 14 Polish tracing context related codes. Add the plugin for async-http-client 2.x Fix NPE in the nutz plugin. Provide Apache Commons DBCP 2.x plugin. Add the plugin for mssql-jtds 1.x. Add the plugin for mssql-jdbc 6.x -\u0026gt; 9.x. Fix the default ignore mechanism isn\u0026rsquo;t accurate enough bug. Add the plugin for spring-kafka 1.3.x. Add the plugin for Apache CXF 3.x. Fix okhttp-3.x and async-http-client-2.x did not overwrite the old trace header.  OAP-Backend  Add the @SuperDataset annotation for BrowserErrorLog. Add the thread pool to the Kafka fetcher to increase the performance. Add contain and not contain OPS in OAL. Add Envoy ALS analyzer based on metadata exchange. Add listMetrics GraphQL query. Add group name into services of so11y and istio relevant metrics Support keeping collecting the slowly segments in the sampling mechanism. Support choose files to active the meter analyzer. Support nested class definition in the Service, ServiceInstance, Endpoint, ServiceRelation, and ServiceInstanceRelation sources. Support sideCar.internalErrorCode in the Service, ServiceInstance, Endpoint, ServiceRelation, and ServiceInstanceRelation sources. Improve Kubernetes service registry for ALS analysis. Add health checker for cluster management Support the service auto grouping. Support query service list by the group name. Improve the queryable tags generation. Remove the duplicated tags to reduce the storage payload. Fix the threads of the Kafka fetcher exit if some unexpected exceptions happen. Fix the excessive timeout period set by the kubernetes-client. Fix deadlock problem when using elasticsearch-client-7.0.0. Fix storage-jdbc isExists not set dbname. Fix searchService bug in the InfluxDB storage implementation. Fix CVE in the alarm module, when activating the dynamic configuration feature. Fix CVE in the endpoint grouping, when activating the dynamic configuration feature. Fix CVE in the uninstrumented gateways configs, when activating the dynamic configuration feature. Fix CVE in the Apdex threshold configs, when activating the dynamic configuration feature. Make the codes and doc consistent in sharding server and core server. Fix that chunked string is incorrect while the tag contains colon. Fix the incorrect dynamic configuration key bug of endpoint-name-grouping. Remove unused min date timebucket in jdbc deletehistory logical Fix \u0026ldquo;transaction too large error\u0026rdquo; when use TiDB as storage. Fix \u0026ldquo;index not found\u0026rdquo; in trace query when use ES7 storage. Add otel rules to ui template to observe Istio control plane. Remove istio mixer Support close influxdb batch write model. Check SAN in the ALS (m)TLS process.  UI  Fix incorrect label in radial chart in topology. Replace node-sass with dart-sass. Replace serviceFilter with serviceGroup Removed \u0026ldquo;Les Miserables\u0026rdquo; from radial chart in topology. Add the Promise dropdown option  Documentation  Add VNode FAQ doc. Add logic endpoint section in the agent setup doc. Adjust configuration names and system environment names of the sharing server module Tweak Istio metrics collection doc. Add otel receiver.  All issues and pull requests are here\n","excerpt":"8.3.0  Project  Test: ElasticSearch version 7.0.0 and 7.9.3 as storage are E2E tested. Test: Bump up …","ref":"/docs/main/v9.6.0/en/changes/changes-8.3.0/","title":"8.3.0"},{"body":"8.3.0  Project  Test: ElasticSearch version 7.0.0 and 7.9.3 as storage are E2E tested. Test: Bump up testcontainers version to work around the Docker bug on MacOS.  Java Agent  Support propagate the sending timestamp in MQ plugins to calculate the transfer latency in the async MQ scenarios. Support auto-tag with the fixed values propagated in the correlation context. Make HttpClient 3.x, 4.x, and HttpAsyncClient 3.x plugins to support collecting HTTP parameters. Make the Feign plugin to support Java 14 Make the okhttp3 plugin to support Java 14 Polish tracing context related codes. Add the plugin for async-http-client 2.x Fix NPE in the nutz plugin. Provide Apache Commons DBCP 2.x plugin. Add the plugin for mssql-jtds 1.x. Add the plugin for mssql-jdbc 6.x -\u0026gt; 9.x. Fix the default ignore mechanism isn\u0026rsquo;t accurate enough bug. Add the plugin for spring-kafka 1.3.x. Add the plugin for Apache CXF 3.x. Fix okhttp-3.x and async-http-client-2.x did not overwrite the old trace header.  OAP-Backend  Add the @SuperDataset annotation for BrowserErrorLog. Add the thread pool to the Kafka fetcher to increase the performance. Add contain and not contain OPS in OAL. Add Envoy ALS analyzer based on metadata exchange. Add listMetrics GraphQL query. Add group name into services of so11y and istio relevant metrics Support keeping collecting the slowly segments in the sampling mechanism. Support choose files to active the meter analyzer. Support nested class definition in the Service, ServiceInstance, Endpoint, ServiceRelation, and ServiceInstanceRelation sources. Support sideCar.internalErrorCode in the Service, ServiceInstance, Endpoint, ServiceRelation, and ServiceInstanceRelation sources. Improve Kubernetes service registry for ALS analysis. Add health checker for cluster management Support the service auto grouping. Support query service list by the group name. Improve the queryable tags generation. Remove the duplicated tags to reduce the storage payload. Fix the threads of the Kafka fetcher exit if some unexpected exceptions happen. Fix the excessive timeout period set by the kubernetes-client. Fix deadlock problem when using elasticsearch-client-7.0.0. Fix storage-jdbc isExists not set dbname. Fix searchService bug in the InfluxDB storage implementation. Fix CVE in the alarm module, when activating the dynamic configuration feature. Fix CVE in the endpoint grouping, when activating the dynamic configuration feature. Fix CVE in the uninstrumented gateways configs, when activating the dynamic configuration feature. Fix CVE in the Apdex threshold configs, when activating the dynamic configuration feature. Make the codes and doc consistent in sharding server and core server. Fix that chunked string is incorrect while the tag contains colon. Fix the incorrect dynamic configuration key bug of endpoint-name-grouping. Remove unused min date timebucket in jdbc deletehistory logical Fix \u0026ldquo;transaction too large error\u0026rdquo; when use TiDB as storage. Fix \u0026ldquo;index not found\u0026rdquo; in trace query when use ES7 storage. Add otel rules to ui template to observe Istio control plane. Remove istio mixer Support close influxdb batch write model. Check SAN in the ALS (m)TLS process.  UI  Fix incorrect label in radial chart in topology. Replace node-sass with dart-sass. Replace serviceFilter with serviceGroup Removed \u0026ldquo;Les Miserables\u0026rdquo; from radial chart in topology. Add the Promise dropdown option  Documentation  Add VNode FAQ doc. Add logic endpoint section in the agent setup doc. Adjust configuration names and system environment names of the sharing server module Tweak Istio metrics collection doc. Add otel receiver.  All issues and pull requests are here\n","excerpt":"8.3.0  Project  Test: ElasticSearch version 7.0.0 and 7.9.3 as storage are E2E tested. Test: Bump up …","ref":"/docs/main/v9.7.0/en/changes/changes-8.3.0/","title":"8.3.0"},{"body":"8.4.0 Project  Incompatible with previous releases when use H2/MySQL/TiDB storage options, due to support multiple alarm rules triggered for one entity. Chore: adapt create_source_release.sh to make it runnable on Linux. Add package to .proto files, prevent polluting top-level namespace in some languages; The OAP server supports previous agent releases, whereas the previous OAP server (\u0026lt;=8.3.0) won\u0026rsquo;t recognize newer agents since this version (\u0026gt;= 8.4.0). Add ElasticSearch 7.10 to test matrix and verify it works. Replace Apache RAT with skywalking-eyes to check license headers. Set up test of Envoy ALS / MetricsService under Istio 1.8.2 to verify Envoy V3 protocol Test: fix flaky E2E test of Kafka.  Java Agent  The operation name of quartz-scheduler plugin, has been changed as the quartz-scheduler/${className} format. Fix jdk-http and okhttp-3.x plugin did not overwrite the old trace header. Add interceptors of method(analyze, searchScroll, clearScroll, searchTemplate and deleteByQuery) for elasticsearch-6.x-plugin. Fix the unexpected RunningContext recreation in the Tomcat plugin. Fix the potential NPE when trace_sql_parameters is enabled. Update byte-buddy to 1.10.19. Fix thrift plugin trace link broken when intermediate service does not mount agent Fix thrift plugin collects wrong args when the method without parameter. Fix DataCarrier\u0026rsquo;s org.apache.skywalking.apm.commons.datacarrier.buffer.Buffer implementation isn\u0026rsquo;t activated in IF_POSSIBLE mode. Fix ArrayBlockingQueueBuffer\u0026rsquo;s useless IF_POSSIBLE mode list Support building gRPC TLS channel but CA file is not required. Add witness method mechanism in the agent plugin core. Add Dolphinscheduler plugin definition. Make sampling still works when the trace ignores plug-in activation. Fix mssql-plugin occur ClassCastException when call the method of return generate key. The operation name of dubbo and dubbo-2.7.x-plugin, has been changed as the groupValue/className.methodName format Fix bug that rocketmq-plugin set the wrong tag. Fix duplicated EnhancedInstance interface added. Fix thread leaks caused by the elasticsearch-6.x-plugin plugin. Support reading segmentId and spanId with toolkit. Fix RestTemplate plugin recording url tag with wrong port Support collecting logs and forwarding through gRPC. Support config agent.sample_n_per_3_secs can be changed in the runtime. Support config agent.ignore_suffix can be changed in the runtime. Support DNS periodic resolving mechanism to update backend service. Support config agent.trace.ignore_path can be changed in the runtime. Added support for transmitting logback 1.x and log4j 2.x formatted \u0026amp; un-formatted messages via gPRC  OAP-Backend  Make meter receiver support MAL. Support influxDB connection response format option. Fix some error when use JSON as influxDB response format. Support Kafka MirrorMaker 2.0 to replicate topics between Kafka clusters. Add the rule name field to alarm record storage entity as a part of ID, to support multiple alarm rules triggered for one entity. The scope id has been removed from the ID. Fix MAL concurrent execution issues. Fix group name can\u0026rsquo;t be queried in the GraphQL. Fix potential gRPC connection leak(not closed) for the channels among OAP instances. Filter OAP instances(unassigned in booting stage) of the empty IP in KubernetesCoordinator. Add component ID for Python aiohttp plugin requester and server. Fix H2 in-memory database table missing issues Add component ID for Python pyramid plugin server. Add component ID for NodeJS Axios plugin. Fix searchService method error in storage-influxdb-plugin. Add JavaScript component ID. Fix CVE of UninstrumentedGateways in Dynamic Configuration activation. Improve query performance in storage-influxdb-plugin. Fix the uuid field in GRPCConfigWatcherRegister is not updated. Support Envoy {AccessLog,Metrics}Service API V3. Adopt the MAL in Envoy metrics service analyzer. Fix the priority setting doesn\u0026rsquo;t work of the ALS analyzers. Fix bug that endpoint-name-grouping.yml is not customizable in Dockerized case. Fix bug that istio version metric type on UI template mismatches the otel rule. Improve ReadWriteSafeCache concurrency read-write performance Fix bug that if use JSON as InfluxDB.ResponseFormat then NumberFormatException maybe occur. Fix timeBucket not taking effect in EqualsAndHashCode annotation of some relationship metrics. Fix SharingServerConfig\u0026rsquo;s propertie is not correct in the application.yml, contextPath -\u0026gt; restConnextPath. Istio control plane: remove redundant metrics and polish panel layout. Fix bug endpoint name grouping not work due to setting service name and endpoint name out of order. Fix receiver analysis error count metrics. Log collecting and query implementation. Support Alarm to feishu. Add the implementation of ConfigurationDiscovery on the OAP side. Fix bug in parseInternalErrorCode where some error codes are never reached. OAL supports multiple values when as numeric. Add node information from the Openensus proto to the labels of the samples, to support the identification of the source of the Metric data. Fix bug that the same sample name in one MAL expression caused IllegalArgumentException in Analyzer.analyse. Add the text analyzer for querying log in the es storage. Chore: Remove duplicate codes in Envoy ALS handler. Remove the strict rule of OAL disable statement parameter. Fix a legal metric query adoption bug. Don\u0026rsquo;t support global level metric query. Add VM MAL and ui-template configration, support Prometheus node-exporter VM metrics that pushed from OpenTelemetry-collector. Remove unused log query parameters.  UI  Fix un-removed tags in trace query. Fix unexpected metrics name on single value component. Don\u0026rsquo;t allow negative value as the refresh period. Fix style issue in trace table view. Separation Log and Dashboard selector data to avoid conflicts. Fix trace instance selector bug. Fix Unnecessary sidebar in tooltips for charts. Refactor dashboard query in a common script. Implement refreshing data for topology by updating date. Implement group selector in the topology. Fix all as default parameter for services selector. Add icon for Python aiohttp plugin. Add icon for Python pyramid plugin. Fix topology render all services nodes when groups changed. Fix rk-footer utc input\u0026rsquo;s width. Update rk-icon and rewrite rk-header svg tags with rk-icon. Add icon for http type. Fix rk-footer utc without local storage. Sort group names in the topology. Add logo for Dolphinscheduler. Fix dashboard wrong instance. Add a legend for the topology. Update the condition of unhealthy cube. Fix: use icons to replace buttons for task list in profile. Fix: support = in the tag value in the trace query page. Add envoy proxy component logo. Chore: set up license-eye to check license headers and add missing license headers. Fix prop for instances-survey and endpoints-survey. Fix envoy icon in topology. Implement the service logs on UI. Change the flask icon to light version for a better view of topology dark theme. Implement viewing logs on trace page. Fix update props of date component. Fix query conditions for logs. Fix style of selectors to word wrap. Fix logs time. Fix search ui for logs.  Documentation  Update the documents of backend fetcher and self observability about the latest configurations. Add documents about the group name of service. Update docs about the latest UI. Update the document of backend trace sampling with the latest configuration. Update kafka plugin support version to 2.6.1. Add FAQ about Fix compiling on Mac M1 chip.  All issues and pull requests are here\n","excerpt":"8.4.0 Project  Incompatible with previous releases when use H2/MySQL/TiDB storage options, due to …","ref":"/docs/main/latest/en/changes/changes-8.4.0/","title":"8.4.0"},{"body":"8.4.0 Project  Incompatible with previous releases when use H2/MySQL/TiDB storage options, due to support multiple alarm rules triggered for one entity. Chore: adapt create_source_release.sh to make it runnable on Linux. Add package to .proto files, prevent polluting top-level namespace in some languages; The OAP server supports previous agent releases, whereas the previous OAP server (\u0026lt;=8.3.0) won\u0026rsquo;t recognize newer agents since this version (\u0026gt;= 8.4.0). Add ElasticSearch 7.10 to test matrix and verify it works. Replace Apache RAT with skywalking-eyes to check license headers. Set up test of Envoy ALS / MetricsService under Istio 1.8.2 to verify Envoy V3 protocol Test: fix flaky E2E test of Kafka.  Java Agent  The operation name of quartz-scheduler plugin, has been changed as the quartz-scheduler/${className} format. Fix jdk-http and okhttp-3.x plugin did not overwrite the old trace header. Add interceptors of method(analyze, searchScroll, clearScroll, searchTemplate and deleteByQuery) for elasticsearch-6.x-plugin. Fix the unexpected RunningContext recreation in the Tomcat plugin. Fix the potential NPE when trace_sql_parameters is enabled. Update byte-buddy to 1.10.19. Fix thrift plugin trace link broken when intermediate service does not mount agent Fix thrift plugin collects wrong args when the method without parameter. Fix DataCarrier\u0026rsquo;s org.apache.skywalking.apm.commons.datacarrier.buffer.Buffer implementation isn\u0026rsquo;t activated in IF_POSSIBLE mode. Fix ArrayBlockingQueueBuffer\u0026rsquo;s useless IF_POSSIBLE mode list Support building gRPC TLS channel but CA file is not required. Add witness method mechanism in the agent plugin core. Add Dolphinscheduler plugin definition. Make sampling still works when the trace ignores plug-in activation. Fix mssql-plugin occur ClassCastException when call the method of return generate key. The operation name of dubbo and dubbo-2.7.x-plugin, has been changed as the groupValue/className.methodName format Fix bug that rocketmq-plugin set the wrong tag. Fix duplicated EnhancedInstance interface added. Fix thread leaks caused by the elasticsearch-6.x-plugin plugin. Support reading segmentId and spanId with toolkit. Fix RestTemplate plugin recording url tag with wrong port Support collecting logs and forwarding through gRPC. Support config agent.sample_n_per_3_secs can be changed in the runtime. Support config agent.ignore_suffix can be changed in the runtime. Support DNS periodic resolving mechanism to update backend service. Support config agent.trace.ignore_path can be changed in the runtime. Added support for transmitting logback 1.x and log4j 2.x formatted \u0026amp; un-formatted messages via gPRC  OAP-Backend  Make meter receiver support MAL. Support influxDB connection response format option. Fix some error when use JSON as influxDB response format. Support Kafka MirrorMaker 2.0 to replicate topics between Kafka clusters. Add the rule name field to alarm record storage entity as a part of ID, to support multiple alarm rules triggered for one entity. The scope id has been removed from the ID. Fix MAL concurrent execution issues. Fix group name can\u0026rsquo;t be queried in the GraphQL. Fix potential gRPC connection leak(not closed) for the channels among OAP instances. Filter OAP instances(unassigned in booting stage) of the empty IP in KubernetesCoordinator. Add component ID for Python aiohttp plugin requester and server. Fix H2 in-memory database table missing issues Add component ID for Python pyramid plugin server. Add component ID for NodeJS Axios plugin. Fix searchService method error in storage-influxdb-plugin. Add JavaScript component ID. Fix CVE of UninstrumentedGateways in Dynamic Configuration activation. Improve query performance in storage-influxdb-plugin. Fix the uuid field in GRPCConfigWatcherRegister is not updated. Support Envoy {AccessLog,Metrics}Service API V3. Adopt the MAL in Envoy metrics service analyzer. Fix the priority setting doesn\u0026rsquo;t work of the ALS analyzers. Fix bug that endpoint-name-grouping.yml is not customizable in Dockerized case. Fix bug that istio version metric type on UI template mismatches the otel rule. Improve ReadWriteSafeCache concurrency read-write performance Fix bug that if use JSON as InfluxDB.ResponseFormat then NumberFormatException maybe occur. Fix timeBucket not taking effect in EqualsAndHashCode annotation of some relationship metrics. Fix SharingServerConfig\u0026rsquo;s propertie is not correct in the application.yml, contextPath -\u0026gt; restConnextPath. Istio control plane: remove redundant metrics and polish panel layout. Fix bug endpoint name grouping not work due to setting service name and endpoint name out of order. Fix receiver analysis error count metrics. Log collecting and query implementation. Support Alarm to feishu. Add the implementation of ConfigurationDiscovery on the OAP side. Fix bug in parseInternalErrorCode where some error codes are never reached. OAL supports multiple values when as numeric. Add node information from the Openensus proto to the labels of the samples, to support the identification of the source of the Metric data. Fix bug that the same sample name in one MAL expression caused IllegalArgumentException in Analyzer.analyse. Add the text analyzer for querying log in the es storage. Chore: Remove duplicate codes in Envoy ALS handler. Remove the strict rule of OAL disable statement parameter. Fix a legal metric query adoption bug. Don\u0026rsquo;t support global level metric query. Add VM MAL and ui-template configration, support Prometheus node-exporter VM metrics that pushed from OpenTelemetry-collector. Remove unused log query parameters.  UI  Fix un-removed tags in trace query. Fix unexpected metrics name on single value component. Don\u0026rsquo;t allow negative value as the refresh period. Fix style issue in trace table view. Separation Log and Dashboard selector data to avoid conflicts. Fix trace instance selector bug. Fix Unnecessary sidebar in tooltips for charts. Refactor dashboard query in a common script. Implement refreshing data for topology by updating date. Implement group selector in the topology. Fix all as default parameter for services selector. Add icon for Python aiohttp plugin. Add icon for Python pyramid plugin. Fix topology render all services nodes when groups changed. Fix rk-footer utc input\u0026rsquo;s width. Update rk-icon and rewrite rk-header svg tags with rk-icon. Add icon for http type. Fix rk-footer utc without local storage. Sort group names in the topology. Add logo for Dolphinscheduler. Fix dashboard wrong instance. Add a legend for the topology. Update the condition of unhealthy cube. Fix: use icons to replace buttons for task list in profile. Fix: support = in the tag value in the trace query page. Add envoy proxy component logo. Chore: set up license-eye to check license headers and add missing license headers. Fix prop for instances-survey and endpoints-survey. Fix envoy icon in topology. Implement the service logs on UI. Change the flask icon to light version for a better view of topology dark theme. Implement viewing logs on trace page. Fix update props of date component. Fix query conditions for logs. Fix style of selectors to word wrap. Fix logs time. Fix search ui for logs.  Documentation  Update the documents of backend fetcher and self observability about the latest configurations. Add documents about the group name of service. Update docs about the latest UI. Update the document of backend trace sampling with the latest configuration. Update kafka plugin support version to 2.6.1. Add FAQ about Fix compiling on Mac M1 chip.  All issues and pull requests are here\n","excerpt":"8.4.0 Project  Incompatible with previous releases when use H2/MySQL/TiDB storage options, due to …","ref":"/docs/main/next/en/changes/changes-8.4.0/","title":"8.4.0"},{"body":"8.4.0 Project  Incompatible with previous releases when use H2/MySQL/TiDB storage options, due to support multiple alarm rules triggered for one entity. Chore: adapt create_source_release.sh to make it runnable on Linux. Add package to .proto files, prevent polluting top-level namespace in some languages; The OAP server supports previous agent releases, whereas the previous OAP server (\u0026lt;=8.3.0) won\u0026rsquo;t recognize newer agents since this version (\u0026gt;= 8.4.0). Add ElasticSearch 7.10 to test matrix and verify it works. Replace Apache RAT with skywalking-eyes to check license headers. Set up test of Envoy ALS / MetricsService under Istio 1.8.2 to verify Envoy V3 protocol Test: fix flaky E2E test of Kafka.  Java Agent  The operation name of quartz-scheduler plugin, has been changed as the quartz-scheduler/${className} format. Fix jdk-http and okhttp-3.x plugin did not overwrite the old trace header. Add interceptors of method(analyze, searchScroll, clearScroll, searchTemplate and deleteByQuery) for elasticsearch-6.x-plugin. Fix the unexpected RunningContext recreation in the Tomcat plugin. Fix the potential NPE when trace_sql_parameters is enabled. Update byte-buddy to 1.10.19. Fix thrift plugin trace link broken when intermediate service does not mount agent Fix thrift plugin collects wrong args when the method without parameter. Fix DataCarrier\u0026rsquo;s org.apache.skywalking.apm.commons.datacarrier.buffer.Buffer implementation isn\u0026rsquo;t activated in IF_POSSIBLE mode. Fix ArrayBlockingQueueBuffer\u0026rsquo;s useless IF_POSSIBLE mode list Support building gRPC TLS channel but CA file is not required. Add witness method mechanism in the agent plugin core. Add Dolphinscheduler plugin definition. Make sampling still works when the trace ignores plug-in activation. Fix mssql-plugin occur ClassCastException when call the method of return generate key. The operation name of dubbo and dubbo-2.7.x-plugin, has been changed as the groupValue/className.methodName format Fix bug that rocketmq-plugin set the wrong tag. Fix duplicated EnhancedInstance interface added. Fix thread leaks caused by the elasticsearch-6.x-plugin plugin. Support reading segmentId and spanId with toolkit. Fix RestTemplate plugin recording url tag with wrong port Support collecting logs and forwarding through gRPC. Support config agent.sample_n_per_3_secs can be changed in the runtime. Support config agent.ignore_suffix can be changed in the runtime. Support DNS periodic resolving mechanism to update backend service. Support config agent.trace.ignore_path can be changed in the runtime. Added support for transmitting logback 1.x and log4j 2.x formatted \u0026amp; un-formatted messages via gPRC  OAP-Backend  Make meter receiver support MAL. Support influxDB connection response format option. Fix some error when use JSON as influxDB response format. Support Kafka MirrorMaker 2.0 to replicate topics between Kafka clusters. Add the rule name field to alarm record storage entity as a part of ID, to support multiple alarm rules triggered for one entity. The scope id has been removed from the ID. Fix MAL concurrent execution issues. Fix group name can\u0026rsquo;t be queried in the GraphQL. Fix potential gRPC connection leak(not closed) for the channels among OAP instances. Filter OAP instances(unassigned in booting stage) of the empty IP in KubernetesCoordinator. Add component ID for Python aiohttp plugin requester and server. Fix H2 in-memory database table missing issues Add component ID for Python pyramid plugin server. Add component ID for NodeJS Axios plugin. Fix searchService method error in storage-influxdb-plugin. Add JavaScript component ID. Fix CVE of UninstrumentedGateways in Dynamic Configuration activation. Improve query performance in storage-influxdb-plugin. Fix the uuid field in GRPCConfigWatcherRegister is not updated. Support Envoy {AccessLog,Metrics}Service API V3. Adopt the MAL in Envoy metrics service analyzer. Fix the priority setting doesn\u0026rsquo;t work of the ALS analyzers. Fix bug that endpoint-name-grouping.yml is not customizable in Dockerized case. Fix bug that istio version metric type on UI template mismatches the otel rule. Improve ReadWriteSafeCache concurrency read-write performance Fix bug that if use JSON as InfluxDB.ResponseFormat then NumberFormatException maybe occur. Fix timeBucket not taking effect in EqualsAndHashCode annotation of some relationship metrics. Fix SharingServerConfig\u0026rsquo;s propertie is not correct in the application.yml, contextPath -\u0026gt; restConnextPath. Istio control plane: remove redundant metrics and polish panel layout. Fix bug endpoint name grouping not work due to setting service name and endpoint name out of order. Fix receiver analysis error count metrics. Log collecting and query implementation. Support Alarm to feishu. Add the implementation of ConfigurationDiscovery on the OAP side. Fix bug in parseInternalErrorCode where some error codes are never reached. OAL supports multiple values when as numeric. Add node information from the Openensus proto to the labels of the samples, to support the identification of the source of the Metric data. Fix bug that the same sample name in one MAL expression caused IllegalArgumentException in Analyzer.analyse. Add the text analyzer for querying log in the es storage. Chore: Remove duplicate codes in Envoy ALS handler. Remove the strict rule of OAL disable statement parameter. Fix a legal metric query adoption bug. Don\u0026rsquo;t support global level metric query. Add VM MAL and ui-template configration, support Prometheus node-exporter VM metrics that pushed from OpenTelemetry-collector. Remove unused log query parameters.  UI  Fix un-removed tags in trace query. Fix unexpected metrics name on single value component. Don\u0026rsquo;t allow negative value as the refresh period. Fix style issue in trace table view. Separation Log and Dashboard selector data to avoid conflicts. Fix trace instance selector bug. Fix Unnecessary sidebar in tooltips for charts. Refactor dashboard query in a common script. Implement refreshing data for topology by updating date. Implement group selector in the topology. Fix all as default parameter for services selector. Add icon for Python aiohttp plugin. Add icon for Python pyramid plugin. Fix topology render all services nodes when groups changed. Fix rk-footer utc input\u0026rsquo;s width. Update rk-icon and rewrite rk-header svg tags with rk-icon. Add icon for http type. Fix rk-footer utc without local storage. Sort group names in the topology. Add logo for Dolphinscheduler. Fix dashboard wrong instance. Add a legend for the topology. Update the condition of unhealthy cube. Fix: use icons to replace buttons for task list in profile. Fix: support = in the tag value in the trace query page. Add envoy proxy component logo. Chore: set up license-eye to check license headers and add missing license headers. Fix prop for instances-survey and endpoints-survey. Fix envoy icon in topology. Implement the service logs on UI. Change the flask icon to light version for a better view of topology dark theme. Implement viewing logs on trace page. Fix update props of date component. Fix query conditions for logs. Fix style of selectors to word wrap. Fix logs time. Fix search ui for logs.  Documentation  Update the documents of backend fetcher and self observability about the latest configurations. Add documents about the group name of service. Update docs about the latest UI. Update the document of backend trace sampling with the latest configuration. Update kafka plugin support version to 2.6.1. Add FAQ about Fix compiling on Mac M1 chip.  All issues and pull requests are here\n","excerpt":"8.4.0 Project  Incompatible with previous releases when use H2/MySQL/TiDB storage options, due to …","ref":"/docs/main/v9.1.0/en/changes/changes-8.4.0/","title":"8.4.0"},{"body":"8.4.0 Project  Incompatible with previous releases when use H2/MySQL/TiDB storage options, due to support multiple alarm rules triggered for one entity. Chore: adapt create_source_release.sh to make it runnable on Linux. Add package to .proto files, prevent polluting top-level namespace in some languages; The OAP server supports previous agent releases, whereas the previous OAP server (\u0026lt;=8.3.0) won\u0026rsquo;t recognize newer agents since this version (\u0026gt;= 8.4.0). Add ElasticSearch 7.10 to test matrix and verify it works. Replace Apache RAT with skywalking-eyes to check license headers. Set up test of Envoy ALS / MetricsService under Istio 1.8.2 to verify Envoy V3 protocol Test: fix flaky E2E test of Kafka.  Java Agent  The operation name of quartz-scheduler plugin, has been changed as the quartz-scheduler/${className} format. Fix jdk-http and okhttp-3.x plugin did not overwrite the old trace header. Add interceptors of method(analyze, searchScroll, clearScroll, searchTemplate and deleteByQuery) for elasticsearch-6.x-plugin. Fix the unexpected RunningContext recreation in the Tomcat plugin. Fix the potential NPE when trace_sql_parameters is enabled. Update byte-buddy to 1.10.19. Fix thrift plugin trace link broken when intermediate service does not mount agent Fix thrift plugin collects wrong args when the method without parameter. Fix DataCarrier\u0026rsquo;s org.apache.skywalking.apm.commons.datacarrier.buffer.Buffer implementation isn\u0026rsquo;t activated in IF_POSSIBLE mode. Fix ArrayBlockingQueueBuffer\u0026rsquo;s useless IF_POSSIBLE mode list Support building gRPC TLS channel but CA file is not required. Add witness method mechanism in the agent plugin core. Add Dolphinscheduler plugin definition. Make sampling still works when the trace ignores plug-in activation. Fix mssql-plugin occur ClassCastException when call the method of return generate key. The operation name of dubbo and dubbo-2.7.x-plugin, has been changed as the groupValue/className.methodName format Fix bug that rocketmq-plugin set the wrong tag. Fix duplicated EnhancedInstance interface added. Fix thread leaks caused by the elasticsearch-6.x-plugin plugin. Support reading segmentId and spanId with toolkit. Fix RestTemplate plugin recording url tag with wrong port Support collecting logs and forwarding through gRPC. Support config agent.sample_n_per_3_secs can be changed in the runtime. Support config agent.ignore_suffix can be changed in the runtime. Support DNS periodic resolving mechanism to update backend service. Support config agent.trace.ignore_path can be changed in the runtime. Added support for transmitting logback 1.x and log4j 2.x formatted \u0026amp; un-formatted messages via gPRC  OAP-Backend  Make meter receiver support MAL. Support influxDB connection response format option. Fix some error when use JSON as influxDB response format. Support Kafka MirrorMaker 2.0 to replicate topics between Kafka clusters. Add the rule name field to alarm record storage entity as a part of ID, to support multiple alarm rules triggered for one entity. The scope id has been removed from the ID. Fix MAL concurrent execution issues. Fix group name can\u0026rsquo;t be queried in the GraphQL. Fix potential gRPC connection leak(not closed) for the channels among OAP instances. Filter OAP instances(unassigned in booting stage) of the empty IP in KubernetesCoordinator. Add component ID for Python aiohttp plugin requester and server. Fix H2 in-memory database table missing issues Add component ID for Python pyramid plugin server. Add component ID for NodeJS Axios plugin. Fix searchService method error in storage-influxdb-plugin. Add JavaScript component ID. Fix CVE of UninstrumentedGateways in Dynamic Configuration activation. Improve query performance in storage-influxdb-plugin. Fix the uuid field in GRPCConfigWatcherRegister is not updated. Support Envoy {AccessLog,Metrics}Service API V3. Adopt the MAL in Envoy metrics service analyzer. Fix the priority setting doesn\u0026rsquo;t work of the ALS analyzers. Fix bug that endpoint-name-grouping.yml is not customizable in Dockerized case. Fix bug that istio version metric type on UI template mismatches the otel rule. Improve ReadWriteSafeCache concurrency read-write performance Fix bug that if use JSON as InfluxDB.ResponseFormat then NumberFormatException maybe occur. Fix timeBucket not taking effect in EqualsAndHashCode annotation of some relationship metrics. Fix SharingServerConfig\u0026rsquo;s propertie is not correct in the application.yml, contextPath -\u0026gt; restConnextPath. Istio control plane: remove redundant metrics and polish panel layout. Fix bug endpoint name grouping not work due to setting service name and endpoint name out of order. Fix receiver analysis error count metrics. Log collecting and query implementation. Support Alarm to feishu. Add the implementation of ConfigurationDiscovery on the OAP side. Fix bug in parseInternalErrorCode where some error codes are never reached. OAL supports multiple values when as numeric. Add node information from the Openensus proto to the labels of the samples, to support the identification of the source of the Metric data. Fix bug that the same sample name in one MAL expression caused IllegalArgumentException in Analyzer.analyse. Add the text analyzer for querying log in the es storage. Chore: Remove duplicate codes in Envoy ALS handler. Remove the strict rule of OAL disable statement parameter. Fix a legal metric query adoption bug. Don\u0026rsquo;t support global level metric query. Add VM MAL and ui-template configration, support Prometheus node-exporter VM metrics that pushed from OpenTelemetry-collector. Remove unused log query parameters.  UI  Fix un-removed tags in trace query. Fix unexpected metrics name on single value component. Don\u0026rsquo;t allow negative value as the refresh period. Fix style issue in trace table view. Separation Log and Dashboard selector data to avoid conflicts. Fix trace instance selector bug. Fix Unnecessary sidebar in tooltips for charts. Refactor dashboard query in a common script. Implement refreshing data for topology by updating date. Implement group selector in the topology. Fix all as default parameter for services selector. Add icon for Python aiohttp plugin. Add icon for Python pyramid plugin. Fix topology render all services nodes when groups changed. Fix rk-footer utc input\u0026rsquo;s width. Update rk-icon and rewrite rk-header svg tags with rk-icon. Add icon for http type. Fix rk-footer utc without local storage. Sort group names in the topology. Add logo for Dolphinscheduler. Fix dashboard wrong instance. Add a legend for the topology. Update the condition of unhealthy cube. Fix: use icons to replace buttons for task list in profile. Fix: support = in the tag value in the trace query page. Add envoy proxy component logo. Chore: set up license-eye to check license headers and add missing license headers. Fix prop for instances-survey and endpoints-survey. Fix envoy icon in topology. Implement the service logs on UI. Change the flask icon to light version for a better view of topology dark theme. Implement viewing logs on trace page. Fix update props of date component. Fix query conditions for logs. Fix style of selectors to word wrap. Fix logs time. Fix search ui for logs.  Documentation  Update the documents of backend fetcher and self observability about the latest configurations. Add documents about the group name of service. Update docs about the latest UI. Update the document of backend trace sampling with the latest configuration. Update kafka plugin support version to 2.6.1. Add FAQ about Fix compiling on Mac M1 chip.  All issues and pull requests are here\n","excerpt":"8.4.0 Project  Incompatible with previous releases when use H2/MySQL/TiDB storage options, due to …","ref":"/docs/main/v9.2.0/en/changes/changes-8.4.0/","title":"8.4.0"},{"body":"8.4.0 Project  Incompatible with previous releases when use H2/MySQL/TiDB storage options, due to support multiple alarm rules triggered for one entity. Chore: adapt create_source_release.sh to make it runnable on Linux. Add package to .proto files, prevent polluting top-level namespace in some languages; The OAP server supports previous agent releases, whereas the previous OAP server (\u0026lt;=8.3.0) won\u0026rsquo;t recognize newer agents since this version (\u0026gt;= 8.4.0). Add ElasticSearch 7.10 to test matrix and verify it works. Replace Apache RAT with skywalking-eyes to check license headers. Set up test of Envoy ALS / MetricsService under Istio 1.8.2 to verify Envoy V3 protocol Test: fix flaky E2E test of Kafka.  Java Agent  The operation name of quartz-scheduler plugin, has been changed as the quartz-scheduler/${className} format. Fix jdk-http and okhttp-3.x plugin did not overwrite the old trace header. Add interceptors of method(analyze, searchScroll, clearScroll, searchTemplate and deleteByQuery) for elasticsearch-6.x-plugin. Fix the unexpected RunningContext recreation in the Tomcat plugin. Fix the potential NPE when trace_sql_parameters is enabled. Update byte-buddy to 1.10.19. Fix thrift plugin trace link broken when intermediate service does not mount agent Fix thrift plugin collects wrong args when the method without parameter. Fix DataCarrier\u0026rsquo;s org.apache.skywalking.apm.commons.datacarrier.buffer.Buffer implementation isn\u0026rsquo;t activated in IF_POSSIBLE mode. Fix ArrayBlockingQueueBuffer\u0026rsquo;s useless IF_POSSIBLE mode list Support building gRPC TLS channel but CA file is not required. Add witness method mechanism in the agent plugin core. Add Dolphinscheduler plugin definition. Make sampling still works when the trace ignores plug-in activation. Fix mssql-plugin occur ClassCastException when call the method of return generate key. The operation name of dubbo and dubbo-2.7.x-plugin, has been changed as the groupValue/className.methodName format Fix bug that rocketmq-plugin set the wrong tag. Fix duplicated EnhancedInstance interface added. Fix thread leaks caused by the elasticsearch-6.x-plugin plugin. Support reading segmentId and spanId with toolkit. Fix RestTemplate plugin recording url tag with wrong port Support collecting logs and forwarding through gRPC. Support config agent.sample_n_per_3_secs can be changed in the runtime. Support config agent.ignore_suffix can be changed in the runtime. Support DNS periodic resolving mechanism to update backend service. Support config agent.trace.ignore_path can be changed in the runtime. Added support for transmitting logback 1.x and log4j 2.x formatted \u0026amp; un-formatted messages via gPRC  OAP-Backend  Make meter receiver support MAL. Support influxDB connection response format option. Fix some error when use JSON as influxDB response format. Support Kafka MirrorMaker 2.0 to replicate topics between Kafka clusters. Add the rule name field to alarm record storage entity as a part of ID, to support multiple alarm rules triggered for one entity. The scope id has been removed from the ID. Fix MAL concurrent execution issues. Fix group name can\u0026rsquo;t be queried in the GraphQL. Fix potential gRPC connection leak(not closed) for the channels among OAP instances. Filter OAP instances(unassigned in booting stage) of the empty IP in KubernetesCoordinator. Add component ID for Python aiohttp plugin requester and server. Fix H2 in-memory database table missing issues Add component ID for Python pyramid plugin server. Add component ID for NodeJS Axios plugin. Fix searchService method error in storage-influxdb-plugin. Add JavaScript component ID. Fix CVE of UninstrumentedGateways in Dynamic Configuration activation. Improve query performance in storage-influxdb-plugin. Fix the uuid field in GRPCConfigWatcherRegister is not updated. Support Envoy {AccessLog,Metrics}Service API V3. Adopt the MAL in Envoy metrics service analyzer. Fix the priority setting doesn\u0026rsquo;t work of the ALS analyzers. Fix bug that endpoint-name-grouping.yml is not customizable in Dockerized case. Fix bug that istio version metric type on UI template mismatches the otel rule. Improve ReadWriteSafeCache concurrency read-write performance Fix bug that if use JSON as InfluxDB.ResponseFormat then NumberFormatException maybe occur. Fix timeBucket not taking effect in EqualsAndHashCode annotation of some relationship metrics. Fix SharingServerConfig\u0026rsquo;s propertie is not correct in the application.yml, contextPath -\u0026gt; restConnextPath. Istio control plane: remove redundant metrics and polish panel layout. Fix bug endpoint name grouping not work due to setting service name and endpoint name out of order. Fix receiver analysis error count metrics. Log collecting and query implementation. Support Alarm to feishu. Add the implementation of ConfigurationDiscovery on the OAP side. Fix bug in parseInternalErrorCode where some error codes are never reached. OAL supports multiple values when as numeric. Add node information from the Openensus proto to the labels of the samples, to support the identification of the source of the Metric data. Fix bug that the same sample name in one MAL expression caused IllegalArgumentException in Analyzer.analyse. Add the text analyzer for querying log in the es storage. Chore: Remove duplicate codes in Envoy ALS handler. Remove the strict rule of OAL disable statement parameter. Fix a legal metric query adoption bug. Don\u0026rsquo;t support global level metric query. Add VM MAL and ui-template configration, support Prometheus node-exporter VM metrics that pushed from OpenTelemetry-collector. Remove unused log query parameters.  UI  Fix un-removed tags in trace query. Fix unexpected metrics name on single value component. Don\u0026rsquo;t allow negative value as the refresh period. Fix style issue in trace table view. Separation Log and Dashboard selector data to avoid conflicts. Fix trace instance selector bug. Fix Unnecessary sidebar in tooltips for charts. Refactor dashboard query in a common script. Implement refreshing data for topology by updating date. Implement group selector in the topology. Fix all as default parameter for services selector. Add icon for Python aiohttp plugin. Add icon for Python pyramid plugin. Fix topology render all services nodes when groups changed. Fix rk-footer utc input\u0026rsquo;s width. Update rk-icon and rewrite rk-header svg tags with rk-icon. Add icon for http type. Fix rk-footer utc without local storage. Sort group names in the topology. Add logo for Dolphinscheduler. Fix dashboard wrong instance. Add a legend for the topology. Update the condition of unhealthy cube. Fix: use icons to replace buttons for task list in profile. Fix: support = in the tag value in the trace query page. Add envoy proxy component logo. Chore: set up license-eye to check license headers and add missing license headers. Fix prop for instances-survey and endpoints-survey. Fix envoy icon in topology. Implement the service logs on UI. Change the flask icon to light version for a better view of topology dark theme. Implement viewing logs on trace page. Fix update props of date component. Fix query conditions for logs. Fix style of selectors to word wrap. Fix logs time. Fix search ui for logs.  Documentation  Update the documents of backend fetcher and self observability about the latest configurations. Add documents about the group name of service. Update docs about the latest UI. Update the document of backend trace sampling with the latest configuration. Update kafka plugin support version to 2.6.1. Add FAQ about Fix compiling on Mac M1 chip.  All issues and pull requests are here\n","excerpt":"8.4.0 Project  Incompatible with previous releases when use H2/MySQL/TiDB storage options, due to …","ref":"/docs/main/v9.3.0/en/changes/changes-8.4.0/","title":"8.4.0"},{"body":"8.4.0 Project  Incompatible with previous releases when use H2/MySQL/TiDB storage options, due to support multiple alarm rules triggered for one entity. Chore: adapt create_source_release.sh to make it runnable on Linux. Add package to .proto files, prevent polluting top-level namespace in some languages; The OAP server supports previous agent releases, whereas the previous OAP server (\u0026lt;=8.3.0) won\u0026rsquo;t recognize newer agents since this version (\u0026gt;= 8.4.0). Add ElasticSearch 7.10 to test matrix and verify it works. Replace Apache RAT with skywalking-eyes to check license headers. Set up test of Envoy ALS / MetricsService under Istio 1.8.2 to verify Envoy V3 protocol Test: fix flaky E2E test of Kafka.  Java Agent  The operation name of quartz-scheduler plugin, has been changed as the quartz-scheduler/${className} format. Fix jdk-http and okhttp-3.x plugin did not overwrite the old trace header. Add interceptors of method(analyze, searchScroll, clearScroll, searchTemplate and deleteByQuery) for elasticsearch-6.x-plugin. Fix the unexpected RunningContext recreation in the Tomcat plugin. Fix the potential NPE when trace_sql_parameters is enabled. Update byte-buddy to 1.10.19. Fix thrift plugin trace link broken when intermediate service does not mount agent Fix thrift plugin collects wrong args when the method without parameter. Fix DataCarrier\u0026rsquo;s org.apache.skywalking.apm.commons.datacarrier.buffer.Buffer implementation isn\u0026rsquo;t activated in IF_POSSIBLE mode. Fix ArrayBlockingQueueBuffer\u0026rsquo;s useless IF_POSSIBLE mode list Support building gRPC TLS channel but CA file is not required. Add witness method mechanism in the agent plugin core. Add Dolphinscheduler plugin definition. Make sampling still works when the trace ignores plug-in activation. Fix mssql-plugin occur ClassCastException when call the method of return generate key. The operation name of dubbo and dubbo-2.7.x-plugin, has been changed as the groupValue/className.methodName format Fix bug that rocketmq-plugin set the wrong tag. Fix duplicated EnhancedInstance interface added. Fix thread leaks caused by the elasticsearch-6.x-plugin plugin. Support reading segmentId and spanId with toolkit. Fix RestTemplate plugin recording url tag with wrong port Support collecting logs and forwarding through gRPC. Support config agent.sample_n_per_3_secs can be changed in the runtime. Support config agent.ignore_suffix can be changed in the runtime. Support DNS periodic resolving mechanism to update backend service. Support config agent.trace.ignore_path can be changed in the runtime. Added support for transmitting logback 1.x and log4j 2.x formatted \u0026amp; un-formatted messages via gPRC  OAP-Backend  Make meter receiver support MAL. Support influxDB connection response format option. Fix some error when use JSON as influxDB response format. Support Kafka MirrorMaker 2.0 to replicate topics between Kafka clusters. Add the rule name field to alarm record storage entity as a part of ID, to support multiple alarm rules triggered for one entity. The scope id has been removed from the ID. Fix MAL concurrent execution issues. Fix group name can\u0026rsquo;t be queried in the GraphQL. Fix potential gRPC connection leak(not closed) for the channels among OAP instances. Filter OAP instances(unassigned in booting stage) of the empty IP in KubernetesCoordinator. Add component ID for Python aiohttp plugin requester and server. Fix H2 in-memory database table missing issues Add component ID for Python pyramid plugin server. Add component ID for NodeJS Axios plugin. Fix searchService method error in storage-influxdb-plugin. Add JavaScript component ID. Fix CVE of UninstrumentedGateways in Dynamic Configuration activation. Improve query performance in storage-influxdb-plugin. Fix the uuid field in GRPCConfigWatcherRegister is not updated. Support Envoy {AccessLog,Metrics}Service API V3. Adopt the MAL in Envoy metrics service analyzer. Fix the priority setting doesn\u0026rsquo;t work of the ALS analyzers. Fix bug that endpoint-name-grouping.yml is not customizable in Dockerized case. Fix bug that istio version metric type on UI template mismatches the otel rule. Improve ReadWriteSafeCache concurrency read-write performance Fix bug that if use JSON as InfluxDB.ResponseFormat then NumberFormatException maybe occur. Fix timeBucket not taking effect in EqualsAndHashCode annotation of some relationship metrics. Fix SharingServerConfig\u0026rsquo;s propertie is not correct in the application.yml, contextPath -\u0026gt; restConnextPath. Istio control plane: remove redundant metrics and polish panel layout. Fix bug endpoint name grouping not work due to setting service name and endpoint name out of order. Fix receiver analysis error count metrics. Log collecting and query implementation. Support Alarm to feishu. Add the implementation of ConfigurationDiscovery on the OAP side. Fix bug in parseInternalErrorCode where some error codes are never reached. OAL supports multiple values when as numeric. Add node information from the Openensus proto to the labels of the samples, to support the identification of the source of the Metric data. Fix bug that the same sample name in one MAL expression caused IllegalArgumentException in Analyzer.analyse. Add the text analyzer for querying log in the es storage. Chore: Remove duplicate codes in Envoy ALS handler. Remove the strict rule of OAL disable statement parameter. Fix a legal metric query adoption bug. Don\u0026rsquo;t support global level metric query. Add VM MAL and ui-template configration, support Prometheus node-exporter VM metrics that pushed from OpenTelemetry-collector. Remove unused log query parameters.  UI  Fix un-removed tags in trace query. Fix unexpected metrics name on single value component. Don\u0026rsquo;t allow negative value as the refresh period. Fix style issue in trace table view. Separation Log and Dashboard selector data to avoid conflicts. Fix trace instance selector bug. Fix Unnecessary sidebar in tooltips for charts. Refactor dashboard query in a common script. Implement refreshing data for topology by updating date. Implement group selector in the topology. Fix all as default parameter for services selector. Add icon for Python aiohttp plugin. Add icon for Python pyramid plugin. Fix topology render all services nodes when groups changed. Fix rk-footer utc input\u0026rsquo;s width. Update rk-icon and rewrite rk-header svg tags with rk-icon. Add icon for http type. Fix rk-footer utc without local storage. Sort group names in the topology. Add logo for Dolphinscheduler. Fix dashboard wrong instance. Add a legend for the topology. Update the condition of unhealthy cube. Fix: use icons to replace buttons for task list in profile. Fix: support = in the tag value in the trace query page. Add envoy proxy component logo. Chore: set up license-eye to check license headers and add missing license headers. Fix prop for instances-survey and endpoints-survey. Fix envoy icon in topology. Implement the service logs on UI. Change the flask icon to light version for a better view of topology dark theme. Implement viewing logs on trace page. Fix update props of date component. Fix query conditions for logs. Fix style of selectors to word wrap. Fix logs time. Fix search ui for logs.  Documentation  Update the documents of backend fetcher and self observability about the latest configurations. Add documents about the group name of service. Update docs about the latest UI. Update the document of backend trace sampling with the latest configuration. Update kafka plugin support version to 2.6.1. Add FAQ about Fix compiling on Mac M1 chip.  All issues and pull requests are here\n","excerpt":"8.4.0 Project  Incompatible with previous releases when use H2/MySQL/TiDB storage options, due to …","ref":"/docs/main/v9.4.0/en/changes/changes-8.4.0/","title":"8.4.0"},{"body":"8.4.0 Project  Incompatible with previous releases when use H2/MySQL/TiDB storage options, due to support multiple alarm rules triggered for one entity. Chore: adapt create_source_release.sh to make it runnable on Linux. Add package to .proto files, prevent polluting top-level namespace in some languages; The OAP server supports previous agent releases, whereas the previous OAP server (\u0026lt;=8.3.0) won\u0026rsquo;t recognize newer agents since this version (\u0026gt;= 8.4.0). Add ElasticSearch 7.10 to test matrix and verify it works. Replace Apache RAT with skywalking-eyes to check license headers. Set up test of Envoy ALS / MetricsService under Istio 1.8.2 to verify Envoy V3 protocol Test: fix flaky E2E test of Kafka.  Java Agent  The operation name of quartz-scheduler plugin, has been changed as the quartz-scheduler/${className} format. Fix jdk-http and okhttp-3.x plugin did not overwrite the old trace header. Add interceptors of method(analyze, searchScroll, clearScroll, searchTemplate and deleteByQuery) for elasticsearch-6.x-plugin. Fix the unexpected RunningContext recreation in the Tomcat plugin. Fix the potential NPE when trace_sql_parameters is enabled. Update byte-buddy to 1.10.19. Fix thrift plugin trace link broken when intermediate service does not mount agent Fix thrift plugin collects wrong args when the method without parameter. Fix DataCarrier\u0026rsquo;s org.apache.skywalking.apm.commons.datacarrier.buffer.Buffer implementation isn\u0026rsquo;t activated in IF_POSSIBLE mode. Fix ArrayBlockingQueueBuffer\u0026rsquo;s useless IF_POSSIBLE mode list Support building gRPC TLS channel but CA file is not required. Add witness method mechanism in the agent plugin core. Add Dolphinscheduler plugin definition. Make sampling still works when the trace ignores plug-in activation. Fix mssql-plugin occur ClassCastException when call the method of return generate key. The operation name of dubbo and dubbo-2.7.x-plugin, has been changed as the groupValue/className.methodName format Fix bug that rocketmq-plugin set the wrong tag. Fix duplicated EnhancedInstance interface added. Fix thread leaks caused by the elasticsearch-6.x-plugin plugin. Support reading segmentId and spanId with toolkit. Fix RestTemplate plugin recording url tag with wrong port Support collecting logs and forwarding through gRPC. Support config agent.sample_n_per_3_secs can be changed in the runtime. Support config agent.ignore_suffix can be changed in the runtime. Support DNS periodic resolving mechanism to update backend service. Support config agent.trace.ignore_path can be changed in the runtime. Added support for transmitting logback 1.x and log4j 2.x formatted \u0026amp; un-formatted messages via gPRC  OAP-Backend  Make meter receiver support MAL. Support influxDB connection response format option. Fix some error when use JSON as influxDB response format. Support Kafka MirrorMaker 2.0 to replicate topics between Kafka clusters. Add the rule name field to alarm record storage entity as a part of ID, to support multiple alarm rules triggered for one entity. The scope id has been removed from the ID. Fix MAL concurrent execution issues. Fix group name can\u0026rsquo;t be queried in the GraphQL. Fix potential gRPC connection leak(not closed) for the channels among OAP instances. Filter OAP instances(unassigned in booting stage) of the empty IP in KubernetesCoordinator. Add component ID for Python aiohttp plugin requester and server. Fix H2 in-memory database table missing issues Add component ID for Python pyramid plugin server. Add component ID for NodeJS Axios plugin. Fix searchService method error in storage-influxdb-plugin. Add JavaScript component ID. Fix CVE of UninstrumentedGateways in Dynamic Configuration activation. Improve query performance in storage-influxdb-plugin. Fix the uuid field in GRPCConfigWatcherRegister is not updated. Support Envoy {AccessLog,Metrics}Service API V3. Adopt the MAL in Envoy metrics service analyzer. Fix the priority setting doesn\u0026rsquo;t work of the ALS analyzers. Fix bug that endpoint-name-grouping.yml is not customizable in Dockerized case. Fix bug that istio version metric type on UI template mismatches the otel rule. Improve ReadWriteSafeCache concurrency read-write performance Fix bug that if use JSON as InfluxDB.ResponseFormat then NumberFormatException maybe occur. Fix timeBucket not taking effect in EqualsAndHashCode annotation of some relationship metrics. Fix SharingServerConfig\u0026rsquo;s propertie is not correct in the application.yml, contextPath -\u0026gt; restConnextPath. Istio control plane: remove redundant metrics and polish panel layout. Fix bug endpoint name grouping not work due to setting service name and endpoint name out of order. Fix receiver analysis error count metrics. Log collecting and query implementation. Support Alarm to feishu. Add the implementation of ConfigurationDiscovery on the OAP side. Fix bug in parseInternalErrorCode where some error codes are never reached. OAL supports multiple values when as numeric. Add node information from the Openensus proto to the labels of the samples, to support the identification of the source of the Metric data. Fix bug that the same sample name in one MAL expression caused IllegalArgumentException in Analyzer.analyse. Add the text analyzer for querying log in the es storage. Chore: Remove duplicate codes in Envoy ALS handler. Remove the strict rule of OAL disable statement parameter. Fix a legal metric query adoption bug. Don\u0026rsquo;t support global level metric query. Add VM MAL and ui-template configration, support Prometheus node-exporter VM metrics that pushed from OpenTelemetry-collector. Remove unused log query parameters.  UI  Fix un-removed tags in trace query. Fix unexpected metrics name on single value component. Don\u0026rsquo;t allow negative value as the refresh period. Fix style issue in trace table view. Separation Log and Dashboard selector data to avoid conflicts. Fix trace instance selector bug. Fix Unnecessary sidebar in tooltips for charts. Refactor dashboard query in a common script. Implement refreshing data for topology by updating date. Implement group selector in the topology. Fix all as default parameter for services selector. Add icon for Python aiohttp plugin. Add icon for Python pyramid plugin. Fix topology render all services nodes when groups changed. Fix rk-footer utc input\u0026rsquo;s width. Update rk-icon and rewrite rk-header svg tags with rk-icon. Add icon for http type. Fix rk-footer utc without local storage. Sort group names in the topology. Add logo for Dolphinscheduler. Fix dashboard wrong instance. Add a legend for the topology. Update the condition of unhealthy cube. Fix: use icons to replace buttons for task list in profile. Fix: support = in the tag value in the trace query page. Add envoy proxy component logo. Chore: set up license-eye to check license headers and add missing license headers. Fix prop for instances-survey and endpoints-survey. Fix envoy icon in topology. Implement the service logs on UI. Change the flask icon to light version for a better view of topology dark theme. Implement viewing logs on trace page. Fix update props of date component. Fix query conditions for logs. Fix style of selectors to word wrap. Fix logs time. Fix search ui for logs.  Documentation  Update the documents of backend fetcher and self observability about the latest configurations. Add documents about the group name of service. Update docs about the latest UI. Update the document of backend trace sampling with the latest configuration. Update kafka plugin support version to 2.6.1. Add FAQ about Fix compiling on Mac M1 chip.  All issues and pull requests are here\n","excerpt":"8.4.0 Project  Incompatible with previous releases when use H2/MySQL/TiDB storage options, due to …","ref":"/docs/main/v9.5.0/en/changes/changes-8.4.0/","title":"8.4.0"},{"body":"8.4.0 Project  Incompatible with previous releases when use H2/MySQL/TiDB storage options, due to support multiple alarm rules triggered for one entity. Chore: adapt create_source_release.sh to make it runnable on Linux. Add package to .proto files, prevent polluting top-level namespace in some languages; The OAP server supports previous agent releases, whereas the previous OAP server (\u0026lt;=8.3.0) won\u0026rsquo;t recognize newer agents since this version (\u0026gt;= 8.4.0). Add ElasticSearch 7.10 to test matrix and verify it works. Replace Apache RAT with skywalking-eyes to check license headers. Set up test of Envoy ALS / MetricsService under Istio 1.8.2 to verify Envoy V3 protocol Test: fix flaky E2E test of Kafka.  Java Agent  The operation name of quartz-scheduler plugin, has been changed as the quartz-scheduler/${className} format. Fix jdk-http and okhttp-3.x plugin did not overwrite the old trace header. Add interceptors of method(analyze, searchScroll, clearScroll, searchTemplate and deleteByQuery) for elasticsearch-6.x-plugin. Fix the unexpected RunningContext recreation in the Tomcat plugin. Fix the potential NPE when trace_sql_parameters is enabled. Update byte-buddy to 1.10.19. Fix thrift plugin trace link broken when intermediate service does not mount agent Fix thrift plugin collects wrong args when the method without parameter. Fix DataCarrier\u0026rsquo;s org.apache.skywalking.apm.commons.datacarrier.buffer.Buffer implementation isn\u0026rsquo;t activated in IF_POSSIBLE mode. Fix ArrayBlockingQueueBuffer\u0026rsquo;s useless IF_POSSIBLE mode list Support building gRPC TLS channel but CA file is not required. Add witness method mechanism in the agent plugin core. Add Dolphinscheduler plugin definition. Make sampling still works when the trace ignores plug-in activation. Fix mssql-plugin occur ClassCastException when call the method of return generate key. The operation name of dubbo and dubbo-2.7.x-plugin, has been changed as the groupValue/className.methodName format Fix bug that rocketmq-plugin set the wrong tag. Fix duplicated EnhancedInstance interface added. Fix thread leaks caused by the elasticsearch-6.x-plugin plugin. Support reading segmentId and spanId with toolkit. Fix RestTemplate plugin recording url tag with wrong port Support collecting logs and forwarding through gRPC. Support config agent.sample_n_per_3_secs can be changed in the runtime. Support config agent.ignore_suffix can be changed in the runtime. Support DNS periodic resolving mechanism to update backend service. Support config agent.trace.ignore_path can be changed in the runtime. Added support for transmitting logback 1.x and log4j 2.x formatted \u0026amp; un-formatted messages via gPRC  OAP-Backend  Make meter receiver support MAL. Support influxDB connection response format option. Fix some error when use JSON as influxDB response format. Support Kafka MirrorMaker 2.0 to replicate topics between Kafka clusters. Add the rule name field to alarm record storage entity as a part of ID, to support multiple alarm rules triggered for one entity. The scope id has been removed from the ID. Fix MAL concurrent execution issues. Fix group name can\u0026rsquo;t be queried in the GraphQL. Fix potential gRPC connection leak(not closed) for the channels among OAP instances. Filter OAP instances(unassigned in booting stage) of the empty IP in KubernetesCoordinator. Add component ID for Python aiohttp plugin requester and server. Fix H2 in-memory database table missing issues Add component ID for Python pyramid plugin server. Add component ID for NodeJS Axios plugin. Fix searchService method error in storage-influxdb-plugin. Add JavaScript component ID. Fix CVE of UninstrumentedGateways in Dynamic Configuration activation. Improve query performance in storage-influxdb-plugin. Fix the uuid field in GRPCConfigWatcherRegister is not updated. Support Envoy {AccessLog,Metrics}Service API V3. Adopt the MAL in Envoy metrics service analyzer. Fix the priority setting doesn\u0026rsquo;t work of the ALS analyzers. Fix bug that endpoint-name-grouping.yml is not customizable in Dockerized case. Fix bug that istio version metric type on UI template mismatches the otel rule. Improve ReadWriteSafeCache concurrency read-write performance Fix bug that if use JSON as InfluxDB.ResponseFormat then NumberFormatException maybe occur. Fix timeBucket not taking effect in EqualsAndHashCode annotation of some relationship metrics. Fix SharingServerConfig\u0026rsquo;s propertie is not correct in the application.yml, contextPath -\u0026gt; restConnextPath. Istio control plane: remove redundant metrics and polish panel layout. Fix bug endpoint name grouping not work due to setting service name and endpoint name out of order. Fix receiver analysis error count metrics. Log collecting and query implementation. Support Alarm to feishu. Add the implementation of ConfigurationDiscovery on the OAP side. Fix bug in parseInternalErrorCode where some error codes are never reached. OAL supports multiple values when as numeric. Add node information from the Openensus proto to the labels of the samples, to support the identification of the source of the Metric data. Fix bug that the same sample name in one MAL expression caused IllegalArgumentException in Analyzer.analyse. Add the text analyzer for querying log in the es storage. Chore: Remove duplicate codes in Envoy ALS handler. Remove the strict rule of OAL disable statement parameter. Fix a legal metric query adoption bug. Don\u0026rsquo;t support global level metric query. Add VM MAL and ui-template configration, support Prometheus node-exporter VM metrics that pushed from OpenTelemetry-collector. Remove unused log query parameters.  UI  Fix un-removed tags in trace query. Fix unexpected metrics name on single value component. Don\u0026rsquo;t allow negative value as the refresh period. Fix style issue in trace table view. Separation Log and Dashboard selector data to avoid conflicts. Fix trace instance selector bug. Fix Unnecessary sidebar in tooltips for charts. Refactor dashboard query in a common script. Implement refreshing data for topology by updating date. Implement group selector in the topology. Fix all as default parameter for services selector. Add icon for Python aiohttp plugin. Add icon for Python pyramid plugin. Fix topology render all services nodes when groups changed. Fix rk-footer utc input\u0026rsquo;s width. Update rk-icon and rewrite rk-header svg tags with rk-icon. Add icon for http type. Fix rk-footer utc without local storage. Sort group names in the topology. Add logo for Dolphinscheduler. Fix dashboard wrong instance. Add a legend for the topology. Update the condition of unhealthy cube. Fix: use icons to replace buttons for task list in profile. Fix: support = in the tag value in the trace query page. Add envoy proxy component logo. Chore: set up license-eye to check license headers and add missing license headers. Fix prop for instances-survey and endpoints-survey. Fix envoy icon in topology. Implement the service logs on UI. Change the flask icon to light version for a better view of topology dark theme. Implement viewing logs on trace page. Fix update props of date component. Fix query conditions for logs. Fix style of selectors to word wrap. Fix logs time. Fix search ui for logs.  Documentation  Update the documents of backend fetcher and self observability about the latest configurations. Add documents about the group name of service. Update docs about the latest UI. Update the document of backend trace sampling with the latest configuration. Update kafka plugin support version to 2.6.1. Add FAQ about Fix compiling on Mac M1 chip.  All issues and pull requests are here\n","excerpt":"8.4.0 Project  Incompatible with previous releases when use H2/MySQL/TiDB storage options, due to …","ref":"/docs/main/v9.6.0/en/changes/changes-8.4.0/","title":"8.4.0"},{"body":"8.4.0 Project  Incompatible with previous releases when use H2/MySQL/TiDB storage options, due to support multiple alarm rules triggered for one entity. Chore: adapt create_source_release.sh to make it runnable on Linux. Add package to .proto files, prevent polluting top-level namespace in some languages; The OAP server supports previous agent releases, whereas the previous OAP server (\u0026lt;=8.3.0) won\u0026rsquo;t recognize newer agents since this version (\u0026gt;= 8.4.0). Add ElasticSearch 7.10 to test matrix and verify it works. Replace Apache RAT with skywalking-eyes to check license headers. Set up test of Envoy ALS / MetricsService under Istio 1.8.2 to verify Envoy V3 protocol Test: fix flaky E2E test of Kafka.  Java Agent  The operation name of quartz-scheduler plugin, has been changed as the quartz-scheduler/${className} format. Fix jdk-http and okhttp-3.x plugin did not overwrite the old trace header. Add interceptors of method(analyze, searchScroll, clearScroll, searchTemplate and deleteByQuery) for elasticsearch-6.x-plugin. Fix the unexpected RunningContext recreation in the Tomcat plugin. Fix the potential NPE when trace_sql_parameters is enabled. Update byte-buddy to 1.10.19. Fix thrift plugin trace link broken when intermediate service does not mount agent Fix thrift plugin collects wrong args when the method without parameter. Fix DataCarrier\u0026rsquo;s org.apache.skywalking.apm.commons.datacarrier.buffer.Buffer implementation isn\u0026rsquo;t activated in IF_POSSIBLE mode. Fix ArrayBlockingQueueBuffer\u0026rsquo;s useless IF_POSSIBLE mode list Support building gRPC TLS channel but CA file is not required. Add witness method mechanism in the agent plugin core. Add Dolphinscheduler plugin definition. Make sampling still works when the trace ignores plug-in activation. Fix mssql-plugin occur ClassCastException when call the method of return generate key. The operation name of dubbo and dubbo-2.7.x-plugin, has been changed as the groupValue/className.methodName format Fix bug that rocketmq-plugin set the wrong tag. Fix duplicated EnhancedInstance interface added. Fix thread leaks caused by the elasticsearch-6.x-plugin plugin. Support reading segmentId and spanId with toolkit. Fix RestTemplate plugin recording url tag with wrong port Support collecting logs and forwarding through gRPC. Support config agent.sample_n_per_3_secs can be changed in the runtime. Support config agent.ignore_suffix can be changed in the runtime. Support DNS periodic resolving mechanism to update backend service. Support config agent.trace.ignore_path can be changed in the runtime. Added support for transmitting logback 1.x and log4j 2.x formatted \u0026amp; un-formatted messages via gPRC  OAP-Backend  Make meter receiver support MAL. Support influxDB connection response format option. Fix some error when use JSON as influxDB response format. Support Kafka MirrorMaker 2.0 to replicate topics between Kafka clusters. Add the rule name field to alarm record storage entity as a part of ID, to support multiple alarm rules triggered for one entity. The scope id has been removed from the ID. Fix MAL concurrent execution issues. Fix group name can\u0026rsquo;t be queried in the GraphQL. Fix potential gRPC connection leak(not closed) for the channels among OAP instances. Filter OAP instances(unassigned in booting stage) of the empty IP in KubernetesCoordinator. Add component ID for Python aiohttp plugin requester and server. Fix H2 in-memory database table missing issues Add component ID for Python pyramid plugin server. Add component ID for NodeJS Axios plugin. Fix searchService method error in storage-influxdb-plugin. Add JavaScript component ID. Fix CVE of UninstrumentedGateways in Dynamic Configuration activation. Improve query performance in storage-influxdb-plugin. Fix the uuid field in GRPCConfigWatcherRegister is not updated. Support Envoy {AccessLog,Metrics}Service API V3. Adopt the MAL in Envoy metrics service analyzer. Fix the priority setting doesn\u0026rsquo;t work of the ALS analyzers. Fix bug that endpoint-name-grouping.yml is not customizable in Dockerized case. Fix bug that istio version metric type on UI template mismatches the otel rule. Improve ReadWriteSafeCache concurrency read-write performance Fix bug that if use JSON as InfluxDB.ResponseFormat then NumberFormatException maybe occur. Fix timeBucket not taking effect in EqualsAndHashCode annotation of some relationship metrics. Fix SharingServerConfig\u0026rsquo;s propertie is not correct in the application.yml, contextPath -\u0026gt; restConnextPath. Istio control plane: remove redundant metrics and polish panel layout. Fix bug endpoint name grouping not work due to setting service name and endpoint name out of order. Fix receiver analysis error count metrics. Log collecting and query implementation. Support Alarm to feishu. Add the implementation of ConfigurationDiscovery on the OAP side. Fix bug in parseInternalErrorCode where some error codes are never reached. OAL supports multiple values when as numeric. Add node information from the Openensus proto to the labels of the samples, to support the identification of the source of the Metric data. Fix bug that the same sample name in one MAL expression caused IllegalArgumentException in Analyzer.analyse. Add the text analyzer for querying log in the es storage. Chore: Remove duplicate codes in Envoy ALS handler. Remove the strict rule of OAL disable statement parameter. Fix a legal metric query adoption bug. Don\u0026rsquo;t support global level metric query. Add VM MAL and ui-template configration, support Prometheus node-exporter VM metrics that pushed from OpenTelemetry-collector. Remove unused log query parameters.  UI  Fix un-removed tags in trace query. Fix unexpected metrics name on single value component. Don\u0026rsquo;t allow negative value as the refresh period. Fix style issue in trace table view. Separation Log and Dashboard selector data to avoid conflicts. Fix trace instance selector bug. Fix Unnecessary sidebar in tooltips for charts. Refactor dashboard query in a common script. Implement refreshing data for topology by updating date. Implement group selector in the topology. Fix all as default parameter for services selector. Add icon for Python aiohttp plugin. Add icon for Python pyramid plugin. Fix topology render all services nodes when groups changed. Fix rk-footer utc input\u0026rsquo;s width. Update rk-icon and rewrite rk-header svg tags with rk-icon. Add icon for http type. Fix rk-footer utc without local storage. Sort group names in the topology. Add logo for Dolphinscheduler. Fix dashboard wrong instance. Add a legend for the topology. Update the condition of unhealthy cube. Fix: use icons to replace buttons for task list in profile. Fix: support = in the tag value in the trace query page. Add envoy proxy component logo. Chore: set up license-eye to check license headers and add missing license headers. Fix prop for instances-survey and endpoints-survey. Fix envoy icon in topology. Implement the service logs on UI. Change the flask icon to light version for a better view of topology dark theme. Implement viewing logs on trace page. Fix update props of date component. Fix query conditions for logs. Fix style of selectors to word wrap. Fix logs time. Fix search ui for logs.  Documentation  Update the documents of backend fetcher and self observability about the latest configurations. Add documents about the group name of service. Update docs about the latest UI. Update the document of backend trace sampling with the latest configuration. Update kafka plugin support version to 2.6.1. Add FAQ about Fix compiling on Mac M1 chip.  All issues and pull requests are here\n","excerpt":"8.4.0 Project  Incompatible with previous releases when use H2/MySQL/TiDB storage options, due to …","ref":"/docs/main/v9.7.0/en/changes/changes-8.4.0/","title":"8.4.0"},{"body":"8.5.0 Project  Incompatible Change. Indices and templates of ElasticSearch(6/7, including zipkin-elasticsearch7) storage option have been changed. Update frontend-maven-plugin to 1.11.0, for Download node x64 binary on Apple Silicon. Add E2E test for VM monitoring that metrics from Prometheus node-exporter. Upgrade lombok to 1.18.16. Add Java agent Dockerfile to build Docker image for Java agent.  Java Agent  Remove invalid mysql configuration in agent.config. Add net.bytebuddy.agent.builder.AgentBuilder.RedefinitionStrategy.Listener to show detail message when redefine errors occur. Fix ClassCastException of log4j gRPC reporter. Fix NPE when Kafka reporter activated. Enhance gRPC log appender to allow layout pattern. Fix apm-dubbo-2.7.x-plugin memory leak due to some Dubbo RpcExceptions. Fix lettuce-5.x-plugin get null host in redis sentinel mode. Fix ClassCastException by making CallbackAdapterInterceptor to implement EnhancedInstance interface in the spring-kafka plugin. Fix NullPointerException with KafkaProducer.send(record). Support config agent.span_limit_per_segment can be changed in the runtime. Collect and report agent starting / shutdown events. Support jedis pipeline in jedis-2.x-plugin. Fix apm-toolkit-log4j-2.x-activation no trace Id in async log. Replace hbase-1.x-plugin with hbase-1.x-2.x-plugin to adapt hbase client 2.x Remove the close_before_method and close_after_method parameters of custom-enhance-plugin to avoid memory leaks. Fix bug that springmvc-annotation-4.x-plugin, witness class does not exist in some versions. Add Redis command parameters to \u0026lsquo;db.statement\u0026rsquo; field on Lettuce span UI for displaying more info. Fix NullPointerException with ReactiveRequestHolder.getHeaders. Fix springmvc reactive api can\u0026rsquo;t collect HTTP statusCode. Fix bug that asynchttpclient plugin does not record the response status code. Fix spanLayer is null in optional plugin(gateway-2.0.x-plugin gateway-2.1.x-plugin). Support @Trace, @Tag and @Tags work for static methods.  OAP-Backend  Allow user-defined JAVA_OPTS in the startup script. Metrics combination API supports abandoning results. Add a new concept \u0026ldquo;Event\u0026rdquo; and its implementations to collect events. Add some defensive codes for NPE and bump up Kubernetes client version to expose exception stack trace. Update the timestamp field type for LogQuery. Support Zabbix protocol to receive agent metrics. Update the Apdex metric combine calculator. Enhance MeterSystem to allow creating metrics with same metricName / function / scope. Storage plugin supports postgresql. Fix kubernetes.client.openapi.ApiException. Remove filename suffix in the meter active file config. Introduce log analysis language (LAL). Fix alarm httpclient connection leak. Add sum function in meter system. Remove Jaeger receiver. Remove the experimental Zipkin span analyzer. Upgrade the Zipkin Elasticsearch storage from 6 to 7. Require Zipkin receiver must work with zipkin-elasticsearch7 storage option. Fix DatabaseSlowStatementBuilder statement maybe null. Remove fields of parent entity in the relation sources. Save Envoy http access logs when error occurs. Fix wrong service_instance_sla setting in the topology-instance.yml. Fix wrong metrics name setting in the self-observability.yml. Add telemetry data about metrics in, metrics scraping, mesh error and trace in metrics to zipkin receiver. Fix tags store of log and trace on h2/mysql/pg storage. Merge indices by Metrics Function and Meter Function in Elasticsearch Storage. Fix receiver don\u0026rsquo;t need to get itself when healthCheck Remove group concept from AvgHistogramFunction. Heatmap(function result) doesn\u0026rsquo;t support labels. Support metrics grouped by scope labelValue in MAL, no need global same labelValue as before. Add functions in MAL to filter metrics according to the metric value. Optimize the self monitoring grafana dashboard. Enhance the export service. Add function retagByK8sMeta and opt type K8sRetagType.Pod2Service in MAL for k8s to relate pods and services. Using \u0026ldquo;service.istio.io/canonical-name\u0026rdquo; to replace \u0026ldquo;app\u0026rdquo; label to resolve Envoy ALS service name. Support k8s monitoring. Make the flushing metrics operation concurrent. Fix ALS K8SServiceRegistry didn\u0026rsquo;t remove the correct entry. Using \u0026ldquo;service.istio.io/canonical-name\u0026rdquo; to replace \u0026ldquo;app\u0026rdquo; label to resolve Envoy ALS service name. Append the root slash(/) to getIndex and getTemplate requests in ES(6 and 7) client. Fix disable statement not working. This bug exists since 8.0.0. Remove the useless metric in vm.yaml.  UI  Update selector scroller to show in all pages. Implement searching logs with date. Add nodejs 14 compiling. Fix trace id by clear search conditions. Search endpoints with keywords. Fix pageSize on logs page. Update echarts version to 5.0.2. Fix instance dependency on the topology page. Fix resolved url for vue-property-decorator. Show instance attributes. Copywriting grammar fix. Fix log pages tags column not updated. Fix the problem that the footer and topology group is shaded when the topology radiation is displayed. When the topology radiation chart is displayed, the corresponding button should be highlighted. Refactor the route mapping, Dynamically import routing components, Improve first page loading performance. Support topology of two mutually calling services. Implement a type of table chart in the dashboard. Support event in the dashboard. Show instance name in the trace view. Fix groups of services in the topography.  Documentation  Polish documentation due to we have covered all tracing, logging, and metrics fields. Adjust documentation about Zipkin receiver. Add backend-infrastructure-monitoring doc.  All issues and pull requests are here\n","excerpt":"8.5.0 Project  Incompatible Change. Indices and templates of ElasticSearch(6/7, including …","ref":"/docs/main/latest/en/changes/changes-8.5.0/","title":"8.5.0"},{"body":"8.5.0 Project  Incompatible Change. Indices and templates of ElasticSearch(6/7, including zipkin-elasticsearch7) storage option have been changed. Update frontend-maven-plugin to 1.11.0, for Download node x64 binary on Apple Silicon. Add E2E test for VM monitoring that metrics from Prometheus node-exporter. Upgrade lombok to 1.18.16. Add Java agent Dockerfile to build Docker image for Java agent.  Java Agent  Remove invalid mysql configuration in agent.config. Add net.bytebuddy.agent.builder.AgentBuilder.RedefinitionStrategy.Listener to show detail message when redefine errors occur. Fix ClassCastException of log4j gRPC reporter. Fix NPE when Kafka reporter activated. Enhance gRPC log appender to allow layout pattern. Fix apm-dubbo-2.7.x-plugin memory leak due to some Dubbo RpcExceptions. Fix lettuce-5.x-plugin get null host in redis sentinel mode. Fix ClassCastException by making CallbackAdapterInterceptor to implement EnhancedInstance interface in the spring-kafka plugin. Fix NullPointerException with KafkaProducer.send(record). Support config agent.span_limit_per_segment can be changed in the runtime. Collect and report agent starting / shutdown events. Support jedis pipeline in jedis-2.x-plugin. Fix apm-toolkit-log4j-2.x-activation no trace Id in async log. Replace hbase-1.x-plugin with hbase-1.x-2.x-plugin to adapt hbase client 2.x Remove the close_before_method and close_after_method parameters of custom-enhance-plugin to avoid memory leaks. Fix bug that springmvc-annotation-4.x-plugin, witness class does not exist in some versions. Add Redis command parameters to \u0026lsquo;db.statement\u0026rsquo; field on Lettuce span UI for displaying more info. Fix NullPointerException with ReactiveRequestHolder.getHeaders. Fix springmvc reactive api can\u0026rsquo;t collect HTTP statusCode. Fix bug that asynchttpclient plugin does not record the response status code. Fix spanLayer is null in optional plugin(gateway-2.0.x-plugin gateway-2.1.x-plugin). Support @Trace, @Tag and @Tags work for static methods.  OAP-Backend  Allow user-defined JAVA_OPTS in the startup script. Metrics combination API supports abandoning results. Add a new concept \u0026ldquo;Event\u0026rdquo; and its implementations to collect events. Add some defensive codes for NPE and bump up Kubernetes client version to expose exception stack trace. Update the timestamp field type for LogQuery. Support Zabbix protocol to receive agent metrics. Update the Apdex metric combine calculator. Enhance MeterSystem to allow creating metrics with same metricName / function / scope. Storage plugin supports postgresql. Fix kubernetes.client.openapi.ApiException. Remove filename suffix in the meter active file config. Introduce log analysis language (LAL). Fix alarm httpclient connection leak. Add sum function in meter system. Remove Jaeger receiver. Remove the experimental Zipkin span analyzer. Upgrade the Zipkin Elasticsearch storage from 6 to 7. Require Zipkin receiver must work with zipkin-elasticsearch7 storage option. Fix DatabaseSlowStatementBuilder statement maybe null. Remove fields of parent entity in the relation sources. Save Envoy http access logs when error occurs. Fix wrong service_instance_sla setting in the topology-instance.yml. Fix wrong metrics name setting in the self-observability.yml. Add telemetry data about metrics in, metrics scraping, mesh error and trace in metrics to zipkin receiver. Fix tags store of log and trace on h2/mysql/pg storage. Merge indices by Metrics Function and Meter Function in Elasticsearch Storage. Fix receiver don\u0026rsquo;t need to get itself when healthCheck Remove group concept from AvgHistogramFunction. Heatmap(function result) doesn\u0026rsquo;t support labels. Support metrics grouped by scope labelValue in MAL, no need global same labelValue as before. Add functions in MAL to filter metrics according to the metric value. Optimize the self monitoring grafana dashboard. Enhance the export service. Add function retagByK8sMeta and opt type K8sRetagType.Pod2Service in MAL for k8s to relate pods and services. Using \u0026ldquo;service.istio.io/canonical-name\u0026rdquo; to replace \u0026ldquo;app\u0026rdquo; label to resolve Envoy ALS service name. Support k8s monitoring. Make the flushing metrics operation concurrent. Fix ALS K8SServiceRegistry didn\u0026rsquo;t remove the correct entry. Using \u0026ldquo;service.istio.io/canonical-name\u0026rdquo; to replace \u0026ldquo;app\u0026rdquo; label to resolve Envoy ALS service name. Append the root slash(/) to getIndex and getTemplate requests in ES(6 and 7) client. Fix disable statement not working. This bug exists since 8.0.0. Remove the useless metric in vm.yaml.  UI  Update selector scroller to show in all pages. Implement searching logs with date. Add nodejs 14 compiling. Fix trace id by clear search conditions. Search endpoints with keywords. Fix pageSize on logs page. Update echarts version to 5.0.2. Fix instance dependency on the topology page. Fix resolved url for vue-property-decorator. Show instance attributes. Copywriting grammar fix. Fix log pages tags column not updated. Fix the problem that the footer and topology group is shaded when the topology radiation is displayed. When the topology radiation chart is displayed, the corresponding button should be highlighted. Refactor the route mapping, Dynamically import routing components, Improve first page loading performance. Support topology of two mutually calling services. Implement a type of table chart in the dashboard. Support event in the dashboard. Show instance name in the trace view. Fix groups of services in the topography.  Documentation  Polish documentation due to we have covered all tracing, logging, and metrics fields. Adjust documentation about Zipkin receiver. Add backend-infrastructure-monitoring doc.  All issues and pull requests are here\n","excerpt":"8.5.0 Project  Incompatible Change. Indices and templates of ElasticSearch(6/7, including …","ref":"/docs/main/next/en/changes/changes-8.5.0/","title":"8.5.0"},{"body":"8.5.0 Project  Incompatible Change. Indices and templates of ElasticSearch(6/7, including zipkin-elasticsearch7) storage option have been changed. Update frontend-maven-plugin to 1.11.0, for Download node x64 binary on Apple Silicon. Add E2E test for VM monitoring that metrics from Prometheus node-exporter. Upgrade lombok to 1.18.16. Add Java agent Dockerfile to build Docker image for Java agent.  Java Agent  Remove invalid mysql configuration in agent.config. Add net.bytebuddy.agent.builder.AgentBuilder.RedefinitionStrategy.Listener to show detail message when redefine errors occur. Fix ClassCastException of log4j gRPC reporter. Fix NPE when Kafka reporter activated. Enhance gRPC log appender to allow layout pattern. Fix apm-dubbo-2.7.x-plugin memory leak due to some Dubbo RpcExceptions. Fix lettuce-5.x-plugin get null host in redis sentinel mode. Fix ClassCastException by making CallbackAdapterInterceptor to implement EnhancedInstance interface in the spring-kafka plugin. Fix NullPointerException with KafkaProducer.send(record). Support config agent.span_limit_per_segment can be changed in the runtime. Collect and report agent starting / shutdown events. Support jedis pipeline in jedis-2.x-plugin. Fix apm-toolkit-log4j-2.x-activation no trace Id in async log. Replace hbase-1.x-plugin with hbase-1.x-2.x-plugin to adapt hbase client 2.x Remove the close_before_method and close_after_method parameters of custom-enhance-plugin to avoid memory leaks. Fix bug that springmvc-annotation-4.x-plugin, witness class does not exist in some versions. Add Redis command parameters to \u0026lsquo;db.statement\u0026rsquo; field on Lettuce span UI for displaying more info. Fix NullPointerException with ReactiveRequestHolder.getHeaders. Fix springmvc reactive api can\u0026rsquo;t collect HTTP statusCode. Fix bug that asynchttpclient plugin does not record the response status code. Fix spanLayer is null in optional plugin(gateway-2.0.x-plugin gateway-2.1.x-plugin). Support @Trace, @Tag and @Tags work for static methods.  OAP-Backend  Allow user-defined JAVA_OPTS in the startup script. Metrics combination API supports abandoning results. Add a new concept \u0026ldquo;Event\u0026rdquo; and its implementations to collect events. Add some defensive codes for NPE and bump up Kubernetes client version to expose exception stack trace. Update the timestamp field type for LogQuery. Support Zabbix protocol to receive agent metrics. Update the Apdex metric combine calculator. Enhance MeterSystem to allow creating metrics with same metricName / function / scope. Storage plugin supports postgresql. Fix kubernetes.client.openapi.ApiException. Remove filename suffix in the meter active file config. Introduce log analysis language (LAL). Fix alarm httpclient connection leak. Add sum function in meter system. Remove Jaeger receiver. Remove the experimental Zipkin span analyzer. Upgrade the Zipkin Elasticsearch storage from 6 to 7. Require Zipkin receiver must work with zipkin-elasticsearch7 storage option. Fix DatabaseSlowStatementBuilder statement maybe null. Remove fields of parent entity in the relation sources. Save Envoy http access logs when error occurs. Fix wrong service_instance_sla setting in the topology-instance.yml. Fix wrong metrics name setting in the self-observability.yml. Add telemetry data about metrics in, metrics scraping, mesh error and trace in metrics to zipkin receiver. Fix tags store of log and trace on h2/mysql/pg storage. Merge indices by Metrics Function and Meter Function in Elasticsearch Storage. Fix receiver don\u0026rsquo;t need to get itself when healthCheck Remove group concept from AvgHistogramFunction. Heatmap(function result) doesn\u0026rsquo;t support labels. Support metrics grouped by scope labelValue in MAL, no need global same labelValue as before. Add functions in MAL to filter metrics according to the metric value. Optimize the self monitoring grafana dashboard. Enhance the export service. Add function retagByK8sMeta and opt type K8sRetagType.Pod2Service in MAL for k8s to relate pods and services. Using \u0026ldquo;service.istio.io/canonical-name\u0026rdquo; to replace \u0026ldquo;app\u0026rdquo; label to resolve Envoy ALS service name. Support k8s monitoring. Make the flushing metrics operation concurrent. Fix ALS K8SServiceRegistry didn\u0026rsquo;t remove the correct entry. Using \u0026ldquo;service.istio.io/canonical-name\u0026rdquo; to replace \u0026ldquo;app\u0026rdquo; label to resolve Envoy ALS service name. Append the root slash(/) to getIndex and getTemplate requests in ES(6 and 7) client. Fix disable statement not working. This bug exists since 8.0.0. Remove the useless metric in vm.yaml.  UI  Update selector scroller to show in all pages. Implement searching logs with date. Add nodejs 14 compiling. Fix trace id by clear search conditions. Search endpoints with keywords. Fix pageSize on logs page. Update echarts version to 5.0.2. Fix instance dependency on the topology page. Fix resolved url for vue-property-decorator. Show instance attributes. Copywriting grammar fix. Fix log pages tags column not updated. Fix the problem that the footer and topology group is shaded when the topology radiation is displayed. When the topology radiation chart is displayed, the corresponding button should be highlighted. Refactor the route mapping, Dynamically import routing components, Improve first page loading performance. Support topology of two mutually calling services. Implement a type of table chart in the dashboard. Support event in the dashboard. Show instance name in the trace view. Fix groups of services in the topography.  Documentation  Polish documentation due to we have covered all tracing, logging, and metrics fields. Adjust documentation about Zipkin receiver. Add backend-infrastructure-monitoring doc.  All issues and pull requests are here\n","excerpt":"8.5.0 Project  Incompatible Change. Indices and templates of ElasticSearch(6/7, including …","ref":"/docs/main/v9.1.0/en/changes/changes-8.5.0/","title":"8.5.0"},{"body":"8.5.0 Project  Incompatible Change. Indices and templates of ElasticSearch(6/7, including zipkin-elasticsearch7) storage option have been changed. Update frontend-maven-plugin to 1.11.0, for Download node x64 binary on Apple Silicon. Add E2E test for VM monitoring that metrics from Prometheus node-exporter. Upgrade lombok to 1.18.16. Add Java agent Dockerfile to build Docker image for Java agent.  Java Agent  Remove invalid mysql configuration in agent.config. Add net.bytebuddy.agent.builder.AgentBuilder.RedefinitionStrategy.Listener to show detail message when redefine errors occur. Fix ClassCastException of log4j gRPC reporter. Fix NPE when Kafka reporter activated. Enhance gRPC log appender to allow layout pattern. Fix apm-dubbo-2.7.x-plugin memory leak due to some Dubbo RpcExceptions. Fix lettuce-5.x-plugin get null host in redis sentinel mode. Fix ClassCastException by making CallbackAdapterInterceptor to implement EnhancedInstance interface in the spring-kafka plugin. Fix NullPointerException with KafkaProducer.send(record). Support config agent.span_limit_per_segment can be changed in the runtime. Collect and report agent starting / shutdown events. Support jedis pipeline in jedis-2.x-plugin. Fix apm-toolkit-log4j-2.x-activation no trace Id in async log. Replace hbase-1.x-plugin with hbase-1.x-2.x-plugin to adapt hbase client 2.x Remove the close_before_method and close_after_method parameters of custom-enhance-plugin to avoid memory leaks. Fix bug that springmvc-annotation-4.x-plugin, witness class does not exist in some versions. Add Redis command parameters to \u0026lsquo;db.statement\u0026rsquo; field on Lettuce span UI for displaying more info. Fix NullPointerException with ReactiveRequestHolder.getHeaders. Fix springmvc reactive api can\u0026rsquo;t collect HTTP statusCode. Fix bug that asynchttpclient plugin does not record the response status code. Fix spanLayer is null in optional plugin(gateway-2.0.x-plugin gateway-2.1.x-plugin). Support @Trace, @Tag and @Tags work for static methods.  OAP-Backend  Allow user-defined JAVA_OPTS in the startup script. Metrics combination API supports abandoning results. Add a new concept \u0026ldquo;Event\u0026rdquo; and its implementations to collect events. Add some defensive codes for NPE and bump up Kubernetes client version to expose exception stack trace. Update the timestamp field type for LogQuery. Support Zabbix protocol to receive agent metrics. Update the Apdex metric combine calculator. Enhance MeterSystem to allow creating metrics with same metricName / function / scope. Storage plugin supports postgresql. Fix kubernetes.client.openapi.ApiException. Remove filename suffix in the meter active file config. Introduce log analysis language (LAL). Fix alarm httpclient connection leak. Add sum function in meter system. Remove Jaeger receiver. Remove the experimental Zipkin span analyzer. Upgrade the Zipkin Elasticsearch storage from 6 to 7. Require Zipkin receiver must work with zipkin-elasticsearch7 storage option. Fix DatabaseSlowStatementBuilder statement maybe null. Remove fields of parent entity in the relation sources. Save Envoy http access logs when error occurs. Fix wrong service_instance_sla setting in the topology-instance.yml. Fix wrong metrics name setting in the self-observability.yml. Add telemetry data about metrics in, metrics scraping, mesh error and trace in metrics to zipkin receiver. Fix tags store of log and trace on h2/mysql/pg storage. Merge indices by Metrics Function and Meter Function in Elasticsearch Storage. Fix receiver don\u0026rsquo;t need to get itself when healthCheck Remove group concept from AvgHistogramFunction. Heatmap(function result) doesn\u0026rsquo;t support labels. Support metrics grouped by scope labelValue in MAL, no need global same labelValue as before. Add functions in MAL to filter metrics according to the metric value. Optimize the self monitoring grafana dashboard. Enhance the export service. Add function retagByK8sMeta and opt type K8sRetagType.Pod2Service in MAL for k8s to relate pods and services. Using \u0026ldquo;service.istio.io/canonical-name\u0026rdquo; to replace \u0026ldquo;app\u0026rdquo; label to resolve Envoy ALS service name. Support k8s monitoring. Make the flushing metrics operation concurrent. Fix ALS K8SServiceRegistry didn\u0026rsquo;t remove the correct entry. Using \u0026ldquo;service.istio.io/canonical-name\u0026rdquo; to replace \u0026ldquo;app\u0026rdquo; label to resolve Envoy ALS service name. Append the root slash(/) to getIndex and getTemplate requests in ES(6 and 7) client. Fix disable statement not working. This bug exists since 8.0.0. Remove the useless metric in vm.yaml.  UI  Update selector scroller to show in all pages. Implement searching logs with date. Add nodejs 14 compiling. Fix trace id by clear search conditions. Search endpoints with keywords. Fix pageSize on logs page. Update echarts version to 5.0.2. Fix instance dependency on the topology page. Fix resolved url for vue-property-decorator. Show instance attributes. Copywriting grammar fix. Fix log pages tags column not updated. Fix the problem that the footer and topology group is shaded when the topology radiation is displayed. When the topology radiation chart is displayed, the corresponding button should be highlighted. Refactor the route mapping, Dynamically import routing components, Improve first page loading performance. Support topology of two mutually calling services. Implement a type of table chart in the dashboard. Support event in the dashboard. Show instance name in the trace view. Fix groups of services in the topography.  Documentation  Polish documentation due to we have covered all tracing, logging, and metrics fields. Adjust documentation about Zipkin receiver. Add backend-infrastructure-monitoring doc.  All issues and pull requests are here\n","excerpt":"8.5.0 Project  Incompatible Change. Indices and templates of ElasticSearch(6/7, including …","ref":"/docs/main/v9.2.0/en/changes/changes-8.5.0/","title":"8.5.0"},{"body":"8.5.0 Project  Incompatible Change. Indices and templates of ElasticSearch(6/7, including zipkin-elasticsearch7) storage option have been changed. Update frontend-maven-plugin to 1.11.0, for Download node x64 binary on Apple Silicon. Add E2E test for VM monitoring that metrics from Prometheus node-exporter. Upgrade lombok to 1.18.16. Add Java agent Dockerfile to build Docker image for Java agent.  Java Agent  Remove invalid mysql configuration in agent.config. Add net.bytebuddy.agent.builder.AgentBuilder.RedefinitionStrategy.Listener to show detail message when redefine errors occur. Fix ClassCastException of log4j gRPC reporter. Fix NPE when Kafka reporter activated. Enhance gRPC log appender to allow layout pattern. Fix apm-dubbo-2.7.x-plugin memory leak due to some Dubbo RpcExceptions. Fix lettuce-5.x-plugin get null host in redis sentinel mode. Fix ClassCastException by making CallbackAdapterInterceptor to implement EnhancedInstance interface in the spring-kafka plugin. Fix NullPointerException with KafkaProducer.send(record). Support config agent.span_limit_per_segment can be changed in the runtime. Collect and report agent starting / shutdown events. Support jedis pipeline in jedis-2.x-plugin. Fix apm-toolkit-log4j-2.x-activation no trace Id in async log. Replace hbase-1.x-plugin with hbase-1.x-2.x-plugin to adapt hbase client 2.x Remove the close_before_method and close_after_method parameters of custom-enhance-plugin to avoid memory leaks. Fix bug that springmvc-annotation-4.x-plugin, witness class does not exist in some versions. Add Redis command parameters to \u0026lsquo;db.statement\u0026rsquo; field on Lettuce span UI for displaying more info. Fix NullPointerException with ReactiveRequestHolder.getHeaders. Fix springmvc reactive api can\u0026rsquo;t collect HTTP statusCode. Fix bug that asynchttpclient plugin does not record the response status code. Fix spanLayer is null in optional plugin(gateway-2.0.x-plugin gateway-2.1.x-plugin). Support @Trace, @Tag and @Tags work for static methods.  OAP-Backend  Allow user-defined JAVA_OPTS in the startup script. Metrics combination API supports abandoning results. Add a new concept \u0026ldquo;Event\u0026rdquo; and its implementations to collect events. Add some defensive codes for NPE and bump up Kubernetes client version to expose exception stack trace. Update the timestamp field type for LogQuery. Support Zabbix protocol to receive agent metrics. Update the Apdex metric combine calculator. Enhance MeterSystem to allow creating metrics with same metricName / function / scope. Storage plugin supports postgresql. Fix kubernetes.client.openapi.ApiException. Remove filename suffix in the meter active file config. Introduce log analysis language (LAL). Fix alarm httpclient connection leak. Add sum function in meter system. Remove Jaeger receiver. Remove the experimental Zipkin span analyzer. Upgrade the Zipkin Elasticsearch storage from 6 to 7. Require Zipkin receiver must work with zipkin-elasticsearch7 storage option. Fix DatabaseSlowStatementBuilder statement maybe null. Remove fields of parent entity in the relation sources. Save Envoy http access logs when error occurs. Fix wrong service_instance_sla setting in the topology-instance.yml. Fix wrong metrics name setting in the self-observability.yml. Add telemetry data about metrics in, metrics scraping, mesh error and trace in metrics to zipkin receiver. Fix tags store of log and trace on h2/mysql/pg storage. Merge indices by Metrics Function and Meter Function in Elasticsearch Storage. Fix receiver don\u0026rsquo;t need to get itself when healthCheck Remove group concept from AvgHistogramFunction. Heatmap(function result) doesn\u0026rsquo;t support labels. Support metrics grouped by scope labelValue in MAL, no need global same labelValue as before. Add functions in MAL to filter metrics according to the metric value. Optimize the self monitoring grafana dashboard. Enhance the export service. Add function retagByK8sMeta and opt type K8sRetagType.Pod2Service in MAL for k8s to relate pods and services. Using \u0026ldquo;service.istio.io/canonical-name\u0026rdquo; to replace \u0026ldquo;app\u0026rdquo; label to resolve Envoy ALS service name. Support k8s monitoring. Make the flushing metrics operation concurrent. Fix ALS K8SServiceRegistry didn\u0026rsquo;t remove the correct entry. Using \u0026ldquo;service.istio.io/canonical-name\u0026rdquo; to replace \u0026ldquo;app\u0026rdquo; label to resolve Envoy ALS service name. Append the root slash(/) to getIndex and getTemplate requests in ES(6 and 7) client. Fix disable statement not working. This bug exists since 8.0.0. Remove the useless metric in vm.yaml.  UI  Update selector scroller to show in all pages. Implement searching logs with date. Add nodejs 14 compiling. Fix trace id by clear search conditions. Search endpoints with keywords. Fix pageSize on logs page. Update echarts version to 5.0.2. Fix instance dependency on the topology page. Fix resolved url for vue-property-decorator. Show instance attributes. Copywriting grammar fix. Fix log pages tags column not updated. Fix the problem that the footer and topology group is shaded when the topology radiation is displayed. When the topology radiation chart is displayed, the corresponding button should be highlighted. Refactor the route mapping, Dynamically import routing components, Improve first page loading performance. Support topology of two mutually calling services. Implement a type of table chart in the dashboard. Support event in the dashboard. Show instance name in the trace view. Fix groups of services in the topography.  Documentation  Polish documentation due to we have covered all tracing, logging, and metrics fields. Adjust documentation about Zipkin receiver. Add backend-infrastructure-monitoring doc.  All issues and pull requests are here\n","excerpt":"8.5.0 Project  Incompatible Change. Indices and templates of ElasticSearch(6/7, including …","ref":"/docs/main/v9.3.0/en/changes/changes-8.5.0/","title":"8.5.0"},{"body":"8.5.0 Project  Incompatible Change. Indices and templates of ElasticSearch(6/7, including zipkin-elasticsearch7) storage option have been changed. Update frontend-maven-plugin to 1.11.0, for Download node x64 binary on Apple Silicon. Add E2E test for VM monitoring that metrics from Prometheus node-exporter. Upgrade lombok to 1.18.16. Add Java agent Dockerfile to build Docker image for Java agent.  Java Agent  Remove invalid mysql configuration in agent.config. Add net.bytebuddy.agent.builder.AgentBuilder.RedefinitionStrategy.Listener to show detail message when redefine errors occur. Fix ClassCastException of log4j gRPC reporter. Fix NPE when Kafka reporter activated. Enhance gRPC log appender to allow layout pattern. Fix apm-dubbo-2.7.x-plugin memory leak due to some Dubbo RpcExceptions. Fix lettuce-5.x-plugin get null host in redis sentinel mode. Fix ClassCastException by making CallbackAdapterInterceptor to implement EnhancedInstance interface in the spring-kafka plugin. Fix NullPointerException with KafkaProducer.send(record). Support config agent.span_limit_per_segment can be changed in the runtime. Collect and report agent starting / shutdown events. Support jedis pipeline in jedis-2.x-plugin. Fix apm-toolkit-log4j-2.x-activation no trace Id in async log. Replace hbase-1.x-plugin with hbase-1.x-2.x-plugin to adapt hbase client 2.x Remove the close_before_method and close_after_method parameters of custom-enhance-plugin to avoid memory leaks. Fix bug that springmvc-annotation-4.x-plugin, witness class does not exist in some versions. Add Redis command parameters to \u0026lsquo;db.statement\u0026rsquo; field on Lettuce span UI for displaying more info. Fix NullPointerException with ReactiveRequestHolder.getHeaders. Fix springmvc reactive api can\u0026rsquo;t collect HTTP statusCode. Fix bug that asynchttpclient plugin does not record the response status code. Fix spanLayer is null in optional plugin(gateway-2.0.x-plugin gateway-2.1.x-plugin). Support @Trace, @Tag and @Tags work for static methods.  OAP-Backend  Allow user-defined JAVA_OPTS in the startup script. Metrics combination API supports abandoning results. Add a new concept \u0026ldquo;Event\u0026rdquo; and its implementations to collect events. Add some defensive codes for NPE and bump up Kubernetes client version to expose exception stack trace. Update the timestamp field type for LogQuery. Support Zabbix protocol to receive agent metrics. Update the Apdex metric combine calculator. Enhance MeterSystem to allow creating metrics with same metricName / function / scope. Storage plugin supports postgresql. Fix kubernetes.client.openapi.ApiException. Remove filename suffix in the meter active file config. Introduce log analysis language (LAL). Fix alarm httpclient connection leak. Add sum function in meter system. Remove Jaeger receiver. Remove the experimental Zipkin span analyzer. Upgrade the Zipkin Elasticsearch storage from 6 to 7. Require Zipkin receiver must work with zipkin-elasticsearch7 storage option. Fix DatabaseSlowStatementBuilder statement maybe null. Remove fields of parent entity in the relation sources. Save Envoy http access logs when error occurs. Fix wrong service_instance_sla setting in the topology-instance.yml. Fix wrong metrics name setting in the self-observability.yml. Add telemetry data about metrics in, metrics scraping, mesh error and trace in metrics to zipkin receiver. Fix tags store of log and trace on h2/mysql/pg storage. Merge indices by Metrics Function and Meter Function in Elasticsearch Storage. Fix receiver don\u0026rsquo;t need to get itself when healthCheck Remove group concept from AvgHistogramFunction. Heatmap(function result) doesn\u0026rsquo;t support labels. Support metrics grouped by scope labelValue in MAL, no need global same labelValue as before. Add functions in MAL to filter metrics according to the metric value. Optimize the self monitoring grafana dashboard. Enhance the export service. Add function retagByK8sMeta and opt type K8sRetagType.Pod2Service in MAL for k8s to relate pods and services. Using \u0026ldquo;service.istio.io/canonical-name\u0026rdquo; to replace \u0026ldquo;app\u0026rdquo; label to resolve Envoy ALS service name. Support k8s monitoring. Make the flushing metrics operation concurrent. Fix ALS K8SServiceRegistry didn\u0026rsquo;t remove the correct entry. Using \u0026ldquo;service.istio.io/canonical-name\u0026rdquo; to replace \u0026ldquo;app\u0026rdquo; label to resolve Envoy ALS service name. Append the root slash(/) to getIndex and getTemplate requests in ES(6 and 7) client. Fix disable statement not working. This bug exists since 8.0.0. Remove the useless metric in vm.yaml.  UI  Update selector scroller to show in all pages. Implement searching logs with date. Add nodejs 14 compiling. Fix trace id by clear search conditions. Search endpoints with keywords. Fix pageSize on logs page. Update echarts version to 5.0.2. Fix instance dependency on the topology page. Fix resolved url for vue-property-decorator. Show instance attributes. Copywriting grammar fix. Fix log pages tags column not updated. Fix the problem that the footer and topology group is shaded when the topology radiation is displayed. When the topology radiation chart is displayed, the corresponding button should be highlighted. Refactor the route mapping, Dynamically import routing components, Improve first page loading performance. Support topology of two mutually calling services. Implement a type of table chart in the dashboard. Support event in the dashboard. Show instance name in the trace view. Fix groups of services in the topography.  Documentation  Polish documentation due to we have covered all tracing, logging, and metrics fields. Adjust documentation about Zipkin receiver. Add backend-infrastructure-monitoring doc.  All issues and pull requests are here\n","excerpt":"8.5.0 Project  Incompatible Change. Indices and templates of ElasticSearch(6/7, including …","ref":"/docs/main/v9.4.0/en/changes/changes-8.5.0/","title":"8.5.0"},{"body":"8.5.0 Project  Incompatible Change. Indices and templates of ElasticSearch(6/7, including zipkin-elasticsearch7) storage option have been changed. Update frontend-maven-plugin to 1.11.0, for Download node x64 binary on Apple Silicon. Add E2E test for VM monitoring that metrics from Prometheus node-exporter. Upgrade lombok to 1.18.16. Add Java agent Dockerfile to build Docker image for Java agent.  Java Agent  Remove invalid mysql configuration in agent.config. Add net.bytebuddy.agent.builder.AgentBuilder.RedefinitionStrategy.Listener to show detail message when redefine errors occur. Fix ClassCastException of log4j gRPC reporter. Fix NPE when Kafka reporter activated. Enhance gRPC log appender to allow layout pattern. Fix apm-dubbo-2.7.x-plugin memory leak due to some Dubbo RpcExceptions. Fix lettuce-5.x-plugin get null host in redis sentinel mode. Fix ClassCastException by making CallbackAdapterInterceptor to implement EnhancedInstance interface in the spring-kafka plugin. Fix NullPointerException with KafkaProducer.send(record). Support config agent.span_limit_per_segment can be changed in the runtime. Collect and report agent starting / shutdown events. Support jedis pipeline in jedis-2.x-plugin. Fix apm-toolkit-log4j-2.x-activation no trace Id in async log. Replace hbase-1.x-plugin with hbase-1.x-2.x-plugin to adapt hbase client 2.x Remove the close_before_method and close_after_method parameters of custom-enhance-plugin to avoid memory leaks. Fix bug that springmvc-annotation-4.x-plugin, witness class does not exist in some versions. Add Redis command parameters to \u0026lsquo;db.statement\u0026rsquo; field on Lettuce span UI for displaying more info. Fix NullPointerException with ReactiveRequestHolder.getHeaders. Fix springmvc reactive api can\u0026rsquo;t collect HTTP statusCode. Fix bug that asynchttpclient plugin does not record the response status code. Fix spanLayer is null in optional plugin(gateway-2.0.x-plugin gateway-2.1.x-plugin). Support @Trace, @Tag and @Tags work for static methods.  OAP-Backend  Allow user-defined JAVA_OPTS in the startup script. Metrics combination API supports abandoning results. Add a new concept \u0026ldquo;Event\u0026rdquo; and its implementations to collect events. Add some defensive codes for NPE and bump up Kubernetes client version to expose exception stack trace. Update the timestamp field type for LogQuery. Support Zabbix protocol to receive agent metrics. Update the Apdex metric combine calculator. Enhance MeterSystem to allow creating metrics with same metricName / function / scope. Storage plugin supports postgresql. Fix kubernetes.client.openapi.ApiException. Remove filename suffix in the meter active file config. Introduce log analysis language (LAL). Fix alarm httpclient connection leak. Add sum function in meter system. Remove Jaeger receiver. Remove the experimental Zipkin span analyzer. Upgrade the Zipkin Elasticsearch storage from 6 to 7. Require Zipkin receiver must work with zipkin-elasticsearch7 storage option. Fix DatabaseSlowStatementBuilder statement maybe null. Remove fields of parent entity in the relation sources. Save Envoy http access logs when error occurs. Fix wrong service_instance_sla setting in the topology-instance.yml. Fix wrong metrics name setting in the self-observability.yml. Add telemetry data about metrics in, metrics scraping, mesh error and trace in metrics to zipkin receiver. Fix tags store of log and trace on h2/mysql/pg storage. Merge indices by Metrics Function and Meter Function in Elasticsearch Storage. Fix receiver don\u0026rsquo;t need to get itself when healthCheck Remove group concept from AvgHistogramFunction. Heatmap(function result) doesn\u0026rsquo;t support labels. Support metrics grouped by scope labelValue in MAL, no need global same labelValue as before. Add functions in MAL to filter metrics according to the metric value. Optimize the self monitoring grafana dashboard. Enhance the export service. Add function retagByK8sMeta and opt type K8sRetagType.Pod2Service in MAL for k8s to relate pods and services. Using \u0026ldquo;service.istio.io/canonical-name\u0026rdquo; to replace \u0026ldquo;app\u0026rdquo; label to resolve Envoy ALS service name. Support k8s monitoring. Make the flushing metrics operation concurrent. Fix ALS K8SServiceRegistry didn\u0026rsquo;t remove the correct entry. Using \u0026ldquo;service.istio.io/canonical-name\u0026rdquo; to replace \u0026ldquo;app\u0026rdquo; label to resolve Envoy ALS service name. Append the root slash(/) to getIndex and getTemplate requests in ES(6 and 7) client. Fix disable statement not working. This bug exists since 8.0.0. Remove the useless metric in vm.yaml.  UI  Update selector scroller to show in all pages. Implement searching logs with date. Add nodejs 14 compiling. Fix trace id by clear search conditions. Search endpoints with keywords. Fix pageSize on logs page. Update echarts version to 5.0.2. Fix instance dependency on the topology page. Fix resolved url for vue-property-decorator. Show instance attributes. Copywriting grammar fix. Fix log pages tags column not updated. Fix the problem that the footer and topology group is shaded when the topology radiation is displayed. When the topology radiation chart is displayed, the corresponding button should be highlighted. Refactor the route mapping, Dynamically import routing components, Improve first page loading performance. Support topology of two mutually calling services. Implement a type of table chart in the dashboard. Support event in the dashboard. Show instance name in the trace view. Fix groups of services in the topography.  Documentation  Polish documentation due to we have covered all tracing, logging, and metrics fields. Adjust documentation about Zipkin receiver. Add backend-infrastructure-monitoring doc.  All issues and pull requests are here\n","excerpt":"8.5.0 Project  Incompatible Change. Indices and templates of ElasticSearch(6/7, including …","ref":"/docs/main/v9.5.0/en/changes/changes-8.5.0/","title":"8.5.0"},{"body":"8.5.0 Project  Incompatible Change. Indices and templates of ElasticSearch(6/7, including zipkin-elasticsearch7) storage option have been changed. Update frontend-maven-plugin to 1.11.0, for Download node x64 binary on Apple Silicon. Add E2E test for VM monitoring that metrics from Prometheus node-exporter. Upgrade lombok to 1.18.16. Add Java agent Dockerfile to build Docker image for Java agent.  Java Agent  Remove invalid mysql configuration in agent.config. Add net.bytebuddy.agent.builder.AgentBuilder.RedefinitionStrategy.Listener to show detail message when redefine errors occur. Fix ClassCastException of log4j gRPC reporter. Fix NPE when Kafka reporter activated. Enhance gRPC log appender to allow layout pattern. Fix apm-dubbo-2.7.x-plugin memory leak due to some Dubbo RpcExceptions. Fix lettuce-5.x-plugin get null host in redis sentinel mode. Fix ClassCastException by making CallbackAdapterInterceptor to implement EnhancedInstance interface in the spring-kafka plugin. Fix NullPointerException with KafkaProducer.send(record). Support config agent.span_limit_per_segment can be changed in the runtime. Collect and report agent starting / shutdown events. Support jedis pipeline in jedis-2.x-plugin. Fix apm-toolkit-log4j-2.x-activation no trace Id in async log. Replace hbase-1.x-plugin with hbase-1.x-2.x-plugin to adapt hbase client 2.x Remove the close_before_method and close_after_method parameters of custom-enhance-plugin to avoid memory leaks. Fix bug that springmvc-annotation-4.x-plugin, witness class does not exist in some versions. Add Redis command parameters to \u0026lsquo;db.statement\u0026rsquo; field on Lettuce span UI for displaying more info. Fix NullPointerException with ReactiveRequestHolder.getHeaders. Fix springmvc reactive api can\u0026rsquo;t collect HTTP statusCode. Fix bug that asynchttpclient plugin does not record the response status code. Fix spanLayer is null in optional plugin(gateway-2.0.x-plugin gateway-2.1.x-plugin). Support @Trace, @Tag and @Tags work for static methods.  OAP-Backend  Allow user-defined JAVA_OPTS in the startup script. Metrics combination API supports abandoning results. Add a new concept \u0026ldquo;Event\u0026rdquo; and its implementations to collect events. Add some defensive codes for NPE and bump up Kubernetes client version to expose exception stack trace. Update the timestamp field type for LogQuery. Support Zabbix protocol to receive agent metrics. Update the Apdex metric combine calculator. Enhance MeterSystem to allow creating metrics with same metricName / function / scope. Storage plugin supports postgresql. Fix kubernetes.client.openapi.ApiException. Remove filename suffix in the meter active file config. Introduce log analysis language (LAL). Fix alarm httpclient connection leak. Add sum function in meter system. Remove Jaeger receiver. Remove the experimental Zipkin span analyzer. Upgrade the Zipkin Elasticsearch storage from 6 to 7. Require Zipkin receiver must work with zipkin-elasticsearch7 storage option. Fix DatabaseSlowStatementBuilder statement maybe null. Remove fields of parent entity in the relation sources. Save Envoy http access logs when error occurs. Fix wrong service_instance_sla setting in the topology-instance.yml. Fix wrong metrics name setting in the self-observability.yml. Add telemetry data about metrics in, metrics scraping, mesh error and trace in metrics to zipkin receiver. Fix tags store of log and trace on h2/mysql/pg storage. Merge indices by Metrics Function and Meter Function in Elasticsearch Storage. Fix receiver don\u0026rsquo;t need to get itself when healthCheck Remove group concept from AvgHistogramFunction. Heatmap(function result) doesn\u0026rsquo;t support labels. Support metrics grouped by scope labelValue in MAL, no need global same labelValue as before. Add functions in MAL to filter metrics according to the metric value. Optimize the self monitoring grafana dashboard. Enhance the export service. Add function retagByK8sMeta and opt type K8sRetagType.Pod2Service in MAL for k8s to relate pods and services. Using \u0026ldquo;service.istio.io/canonical-name\u0026rdquo; to replace \u0026ldquo;app\u0026rdquo; label to resolve Envoy ALS service name. Support k8s monitoring. Make the flushing metrics operation concurrent. Fix ALS K8SServiceRegistry didn\u0026rsquo;t remove the correct entry. Using \u0026ldquo;service.istio.io/canonical-name\u0026rdquo; to replace \u0026ldquo;app\u0026rdquo; label to resolve Envoy ALS service name. Append the root slash(/) to getIndex and getTemplate requests in ES(6 and 7) client. Fix disable statement not working. This bug exists since 8.0.0. Remove the useless metric in vm.yaml.  UI  Update selector scroller to show in all pages. Implement searching logs with date. Add nodejs 14 compiling. Fix trace id by clear search conditions. Search endpoints with keywords. Fix pageSize on logs page. Update echarts version to 5.0.2. Fix instance dependency on the topology page. Fix resolved url for vue-property-decorator. Show instance attributes. Copywriting grammar fix. Fix log pages tags column not updated. Fix the problem that the footer and topology group is shaded when the topology radiation is displayed. When the topology radiation chart is displayed, the corresponding button should be highlighted. Refactor the route mapping, Dynamically import routing components, Improve first page loading performance. Support topology of two mutually calling services. Implement a type of table chart in the dashboard. Support event in the dashboard. Show instance name in the trace view. Fix groups of services in the topography.  Documentation  Polish documentation due to we have covered all tracing, logging, and metrics fields. Adjust documentation about Zipkin receiver. Add backend-infrastructure-monitoring doc.  All issues and pull requests are here\n","excerpt":"8.5.0 Project  Incompatible Change. Indices and templates of ElasticSearch(6/7, including …","ref":"/docs/main/v9.6.0/en/changes/changes-8.5.0/","title":"8.5.0"},{"body":"8.5.0 Project  Incompatible Change. Indices and templates of ElasticSearch(6/7, including zipkin-elasticsearch7) storage option have been changed. Update frontend-maven-plugin to 1.11.0, for Download node x64 binary on Apple Silicon. Add E2E test for VM monitoring that metrics from Prometheus node-exporter. Upgrade lombok to 1.18.16. Add Java agent Dockerfile to build Docker image for Java agent.  Java Agent  Remove invalid mysql configuration in agent.config. Add net.bytebuddy.agent.builder.AgentBuilder.RedefinitionStrategy.Listener to show detail message when redefine errors occur. Fix ClassCastException of log4j gRPC reporter. Fix NPE when Kafka reporter activated. Enhance gRPC log appender to allow layout pattern. Fix apm-dubbo-2.7.x-plugin memory leak due to some Dubbo RpcExceptions. Fix lettuce-5.x-plugin get null host in redis sentinel mode. Fix ClassCastException by making CallbackAdapterInterceptor to implement EnhancedInstance interface in the spring-kafka plugin. Fix NullPointerException with KafkaProducer.send(record). Support config agent.span_limit_per_segment can be changed in the runtime. Collect and report agent starting / shutdown events. Support jedis pipeline in jedis-2.x-plugin. Fix apm-toolkit-log4j-2.x-activation no trace Id in async log. Replace hbase-1.x-plugin with hbase-1.x-2.x-plugin to adapt hbase client 2.x Remove the close_before_method and close_after_method parameters of custom-enhance-plugin to avoid memory leaks. Fix bug that springmvc-annotation-4.x-plugin, witness class does not exist in some versions. Add Redis command parameters to \u0026lsquo;db.statement\u0026rsquo; field on Lettuce span UI for displaying more info. Fix NullPointerException with ReactiveRequestHolder.getHeaders. Fix springmvc reactive api can\u0026rsquo;t collect HTTP statusCode. Fix bug that asynchttpclient plugin does not record the response status code. Fix spanLayer is null in optional plugin(gateway-2.0.x-plugin gateway-2.1.x-plugin). Support @Trace, @Tag and @Tags work for static methods.  OAP-Backend  Allow user-defined JAVA_OPTS in the startup script. Metrics combination API supports abandoning results. Add a new concept \u0026ldquo;Event\u0026rdquo; and its implementations to collect events. Add some defensive codes for NPE and bump up Kubernetes client version to expose exception stack trace. Update the timestamp field type for LogQuery. Support Zabbix protocol to receive agent metrics. Update the Apdex metric combine calculator. Enhance MeterSystem to allow creating metrics with same metricName / function / scope. Storage plugin supports postgresql. Fix kubernetes.client.openapi.ApiException. Remove filename suffix in the meter active file config. Introduce log analysis language (LAL). Fix alarm httpclient connection leak. Add sum function in meter system. Remove Jaeger receiver. Remove the experimental Zipkin span analyzer. Upgrade the Zipkin Elasticsearch storage from 6 to 7. Require Zipkin receiver must work with zipkin-elasticsearch7 storage option. Fix DatabaseSlowStatementBuilder statement maybe null. Remove fields of parent entity in the relation sources. Save Envoy http access logs when error occurs. Fix wrong service_instance_sla setting in the topology-instance.yml. Fix wrong metrics name setting in the self-observability.yml. Add telemetry data about metrics in, metrics scraping, mesh error and trace in metrics to zipkin receiver. Fix tags store of log and trace on h2/mysql/pg storage. Merge indices by Metrics Function and Meter Function in Elasticsearch Storage. Fix receiver don\u0026rsquo;t need to get itself when healthCheck Remove group concept from AvgHistogramFunction. Heatmap(function result) doesn\u0026rsquo;t support labels. Support metrics grouped by scope labelValue in MAL, no need global same labelValue as before. Add functions in MAL to filter metrics according to the metric value. Optimize the self monitoring grafana dashboard. Enhance the export service. Add function retagByK8sMeta and opt type K8sRetagType.Pod2Service in MAL for k8s to relate pods and services. Using \u0026ldquo;service.istio.io/canonical-name\u0026rdquo; to replace \u0026ldquo;app\u0026rdquo; label to resolve Envoy ALS service name. Support k8s monitoring. Make the flushing metrics operation concurrent. Fix ALS K8SServiceRegistry didn\u0026rsquo;t remove the correct entry. Using \u0026ldquo;service.istio.io/canonical-name\u0026rdquo; to replace \u0026ldquo;app\u0026rdquo; label to resolve Envoy ALS service name. Append the root slash(/) to getIndex and getTemplate requests in ES(6 and 7) client. Fix disable statement not working. This bug exists since 8.0.0. Remove the useless metric in vm.yaml.  UI  Update selector scroller to show in all pages. Implement searching logs with date. Add nodejs 14 compiling. Fix trace id by clear search conditions. Search endpoints with keywords. Fix pageSize on logs page. Update echarts version to 5.0.2. Fix instance dependency on the topology page. Fix resolved url for vue-property-decorator. Show instance attributes. Copywriting grammar fix. Fix log pages tags column not updated. Fix the problem that the footer and topology group is shaded when the topology radiation is displayed. When the topology radiation chart is displayed, the corresponding button should be highlighted. Refactor the route mapping, Dynamically import routing components, Improve first page loading performance. Support topology of two mutually calling services. Implement a type of table chart in the dashboard. Support event in the dashboard. Show instance name in the trace view. Fix groups of services in the topography.  Documentation  Polish documentation due to we have covered all tracing, logging, and metrics fields. Adjust documentation about Zipkin receiver. Add backend-infrastructure-monitoring doc.  All issues and pull requests are here\n","excerpt":"8.5.0 Project  Incompatible Change. Indices and templates of ElasticSearch(6/7, including …","ref":"/docs/main/v9.7.0/en/changes/changes-8.5.0/","title":"8.5.0"},{"body":"8.6.0 Project  Add OpenSearch as storage option. Upgrade Kubernetes Java client dependency to 11.0. Fix plugin test script error in macOS.  Java Agent  Add trace_segment_ref_limit_per_span configuration mechanism to avoid OOM. Improve GlobalIdGenerator performance. Add an agent plugin to support elasticsearch7. Add jsonrpc4j agent plugin. new options to support multi skywalking cluster use same kafka cluster(plugin.kafka.namespace) resolve agent has no retries if connect kafka cluster failed when bootstrap Add Seata in the component definition. Seata plugin hosts on Seata project. Extended Kafka plugin to properly trace consumers that have topic partitions directly assigned. Support Kafka consumer 2.8.0. Support print SkyWalking context to logs. Add MessageListener enhancement in pulsar plugin. fix a bug that spring-mvc set an error endpoint name if the controller class annotation implements an interface. Add an optional agent plugin to support mybatis. Add spring-cloud-gateway-3.x optional plugin. Add okhttp-4.x plugin. Fix NPE when thrift field is nested in plugin thrift Fix possible NullPointerException in agent\u0026rsquo;s ES plugin. Fix the conversion problem of float type in ConfigInitializer. Fixed part of the dynamic configuration of ConfigurationDiscoveryService that does not take effect under certain circumstances. Introduce method interceptor API v2 Fix ClassCast issue for RequestHolder/ResponseHolder. fixed jdk-threading-plugin memory leak. Optimize multiple field reflection operation in Feign plugin. Fix trace-ignore-plugin TraceIgnorePathPatterns can\u0026rsquo;t set empty value  OAP-Backend  BugFix: filter invalid Envoy access logs whose socket address is empty. Fix K8s monitoring the incorrect metrics calculate. Loop alarm into event system. Support alarm tags. Support WeLink as a channel of alarm notification. Fix: Some defensive codes didn\u0026rsquo;t work in PercentileFunction combine. CVE: fix Jetty vulnerability. https://nvd.nist.gov/vuln/detail/CVE-2019-17638 Fix: MAL function would miss samples name after creating new samples. perf: use iterator.remove() to remove modulesWithoutProvider Support analyzing Envoy TCP access logs and persist error TCP logs. Fix: Envoy error logs are not persisted when no metrics are generated Fix: Memory leakage of low version etcd client. fix-issue Allow multiple definitions as fallback in metadata-service-mapping.yaml file and k8sServiceNameRule. Fix: NPE when configmap has no data. Fix: Dynamic Configuration key slowTraceSegmentThreshold not work Fix: != is not supported in oal when parameters are numbers. Include events of the entity(s) in the alarm. Support native-json format log in kafka-fetcher-plugin. Fix counter misuse in the alarm core. Alarm can\u0026rsquo;t be triggered in time. Events can be configured as alarm source. Make the number of core worker in meter converter thread pool configurable. Add HTTP implementation of logs reporting protocol. Make metrics exporter still work even when storage layer failed. Fix Jetty HTTP TRACE issue, disable HTTP methods except POST. CVE: upgrade snakeyaml to prevent billion laughs attack in dynamic configuration. polish debug logging avoids null value when the segment ignored.  UI  Add logo for kong plugin. Add apisix logo. Refactor js to ts for browser logs and style change. When creating service groups in the topology, it is better if the service names are sorted. Add tooltip for dashboard component. Fix style of endpoint dependency. Support search and visualize alarms with tags. Fix configurations on dashboard. Support to configure the maximum number of displayed items. After changing the durationTime, the topology shows the originally selected group or service. remove the no use maxItemNum for labeled-value metric, etc. Add Azure Functions logo. Support search Endpoint use keyword params in trace view. Add a function which show the statistics information during the trace query. Remove the sort button at the column of Type in the trace statistics page. Optimize the APISIX icon in the topology. Implement metrics templates in the topology. Visualize Events on the alarm page. Update duration steps in graphs for Trace and Log.  Documentation  Polish k8s monitoring otel-collector configuration example. Print SkyWalking context to logs configuration example. Update doc about metrics v2 APIs.  All issues and pull requests are here\n Find change logs of all versions here.\n","excerpt":"8.6.0 Project  Add OpenSearch as storage option. Upgrade Kubernetes Java client dependency to 11.0. …","ref":"/docs/main/latest/en/changes/changes-8.6.0/","title":"8.6.0"},{"body":"8.6.0 Project  Add OpenSearch as storage option. Upgrade Kubernetes Java client dependency to 11.0. Fix plugin test script error in macOS.  Java Agent  Add trace_segment_ref_limit_per_span configuration mechanism to avoid OOM. Improve GlobalIdGenerator performance. Add an agent plugin to support elasticsearch7. Add jsonrpc4j agent plugin. new options to support multi skywalking cluster use same kafka cluster(plugin.kafka.namespace) resolve agent has no retries if connect kafka cluster failed when bootstrap Add Seata in the component definition. Seata plugin hosts on Seata project. Extended Kafka plugin to properly trace consumers that have topic partitions directly assigned. Support Kafka consumer 2.8.0. Support print SkyWalking context to logs. Add MessageListener enhancement in pulsar plugin. fix a bug that spring-mvc set an error endpoint name if the controller class annotation implements an interface. Add an optional agent plugin to support mybatis. Add spring-cloud-gateway-3.x optional plugin. Add okhttp-4.x plugin. Fix NPE when thrift field is nested in plugin thrift Fix possible NullPointerException in agent\u0026rsquo;s ES plugin. Fix the conversion problem of float type in ConfigInitializer. Fixed part of the dynamic configuration of ConfigurationDiscoveryService that does not take effect under certain circumstances. Introduce method interceptor API v2 Fix ClassCast issue for RequestHolder/ResponseHolder. fixed jdk-threading-plugin memory leak. Optimize multiple field reflection operation in Feign plugin. Fix trace-ignore-plugin TraceIgnorePathPatterns can\u0026rsquo;t set empty value  OAP-Backend  BugFix: filter invalid Envoy access logs whose socket address is empty. Fix K8s monitoring the incorrect metrics calculate. Loop alarm into event system. Support alarm tags. Support WeLink as a channel of alarm notification. Fix: Some defensive codes didn\u0026rsquo;t work in PercentileFunction combine. CVE: fix Jetty vulnerability. https://nvd.nist.gov/vuln/detail/CVE-2019-17638 Fix: MAL function would miss samples name after creating new samples. perf: use iterator.remove() to remove modulesWithoutProvider Support analyzing Envoy TCP access logs and persist error TCP logs. Fix: Envoy error logs are not persisted when no metrics are generated Fix: Memory leakage of low version etcd client. fix-issue Allow multiple definitions as fallback in metadata-service-mapping.yaml file and k8sServiceNameRule. Fix: NPE when configmap has no data. Fix: Dynamic Configuration key slowTraceSegmentThreshold not work Fix: != is not supported in oal when parameters are numbers. Include events of the entity(s) in the alarm. Support native-json format log in kafka-fetcher-plugin. Fix counter misuse in the alarm core. Alarm can\u0026rsquo;t be triggered in time. Events can be configured as alarm source. Make the number of core worker in meter converter thread pool configurable. Add HTTP implementation of logs reporting protocol. Make metrics exporter still work even when storage layer failed. Fix Jetty HTTP TRACE issue, disable HTTP methods except POST. CVE: upgrade snakeyaml to prevent billion laughs attack in dynamic configuration. polish debug logging avoids null value when the segment ignored.  UI  Add logo for kong plugin. Add apisix logo. Refactor js to ts for browser logs and style change. When creating service groups in the topology, it is better if the service names are sorted. Add tooltip for dashboard component. Fix style of endpoint dependency. Support search and visualize alarms with tags. Fix configurations on dashboard. Support to configure the maximum number of displayed items. After changing the durationTime, the topology shows the originally selected group or service. remove the no use maxItemNum for labeled-value metric, etc. Add Azure Functions logo. Support search Endpoint use keyword params in trace view. Add a function which show the statistics information during the trace query. Remove the sort button at the column of Type in the trace statistics page. Optimize the APISIX icon in the topology. Implement metrics templates in the topology. Visualize Events on the alarm page. Update duration steps in graphs for Trace and Log.  Documentation  Polish k8s monitoring otel-collector configuration example. Print SkyWalking context to logs configuration example. Update doc about metrics v2 APIs.  All issues and pull requests are here\n Find change logs of all versions here.\n","excerpt":"8.6.0 Project  Add OpenSearch as storage option. Upgrade Kubernetes Java client dependency to 11.0. …","ref":"/docs/main/next/en/changes/changes-8.6.0/","title":"8.6.0"},{"body":"8.6.0 Project  Add OpenSearch as storage option. Upgrade Kubernetes Java client dependency to 11.0. Fix plugin test script error in macOS.  Java Agent  Add trace_segment_ref_limit_per_span configuration mechanism to avoid OOM. Improve GlobalIdGenerator performance. Add an agent plugin to support elasticsearch7. Add jsonrpc4j agent plugin. new options to support multi skywalking cluster use same kafka cluster(plugin.kafka.namespace) resolve agent has no retries if connect kafka cluster failed when bootstrap Add Seata in the component definition. Seata plugin hosts on Seata project. Extended Kafka plugin to properly trace consumers that have topic partitions directly assigned. Support Kafka consumer 2.8.0. Support print SkyWalking context to logs. Add MessageListener enhancement in pulsar plugin. fix a bug that spring-mvc set an error endpoint name if the controller class annotation implements an interface. Add an optional agent plugin to support mybatis. Add spring-cloud-gateway-3.x optional plugin. Add okhttp-4.x plugin. Fix NPE when thrift field is nested in plugin thrift Fix possible NullPointerException in agent\u0026rsquo;s ES plugin. Fix the conversion problem of float type in ConfigInitializer. Fixed part of the dynamic configuration of ConfigurationDiscoveryService that does not take effect under certain circumstances. Introduce method interceptor API v2 Fix ClassCast issue for RequestHolder/ResponseHolder. fixed jdk-threading-plugin memory leak. Optimize multiple field reflection operation in Feign plugin. Fix trace-ignore-plugin TraceIgnorePathPatterns can\u0026rsquo;t set empty value  OAP-Backend  BugFix: filter invalid Envoy access logs whose socket address is empty. Fix K8s monitoring the incorrect metrics calculate. Loop alarm into event system. Support alarm tags. Support WeLink as a channel of alarm notification. Fix: Some defensive codes didn\u0026rsquo;t work in PercentileFunction combine. CVE: fix Jetty vulnerability. https://nvd.nist.gov/vuln/detail/CVE-2019-17638 Fix: MAL function would miss samples name after creating new samples. perf: use iterator.remove() to remove modulesWithoutProvider Support analyzing Envoy TCP access logs and persist error TCP logs. Fix: Envoy error logs are not persisted when no metrics are generated Fix: Memory leakage of low version etcd client. fix-issue Allow multiple definitions as fallback in metadata-service-mapping.yaml file and k8sServiceNameRule. Fix: NPE when configmap has no data. Fix: Dynamic Configuration key slowTraceSegmentThreshold not work Fix: != is not supported in oal when parameters are numbers. Include events of the entity(s) in the alarm. Support native-json format log in kafka-fetcher-plugin. Fix counter misuse in the alarm core. Alarm can\u0026rsquo;t be triggered in time. Events can be configured as alarm source. Make the number of core worker in meter converter thread pool configurable. Add HTTP implementation of logs reporting protocol. Make metrics exporter still work even when storage layer failed. Fix Jetty HTTP TRACE issue, disable HTTP methods except POST. CVE: upgrade snakeyaml to prevent billion laughs attack in dynamic configuration. polish debug logging avoids null value when the segment ignored.  UI  Add logo for kong plugin. Add apisix logo. Refactor js to ts for browser logs and style change. When creating service groups in the topology, it is better if the service names are sorted. Add tooltip for dashboard component. Fix style of endpoint dependency. Support search and visualize alarms with tags. Fix configurations on dashboard. Support to configure the maximum number of displayed items. After changing the durationTime, the topology shows the originally selected group or service. remove the no use maxItemNum for labeled-value metric, etc. Add Azure Functions logo. Support search Endpoint use keyword params in trace view. Add a function which show the statistics infomation during the trace query. Remove the sort button at the column of Type in the trace statistics page. Optimize the APISIX icon in the topology. Implement metrics templates in the topology. Visualize Events on the alarm page. Update duration steps in graphs for Trace and Log.  Documentation  Polish k8s monitoring otel-collector configuration example. Print SkyWalking context to logs configuration example. Update doc about metrics v2 APIs.  All issues and pull requests are here\n Find change logs of all versions here.\n","excerpt":"8.6.0 Project  Add OpenSearch as storage option. Upgrade Kubernetes Java client dependency to 11.0. …","ref":"/docs/main/v9.1.0/en/changes/changes-8.6.0/","title":"8.6.0"},{"body":"8.6.0 Project  Add OpenSearch as storage option. Upgrade Kubernetes Java client dependency to 11.0. Fix plugin test script error in macOS.  Java Agent  Add trace_segment_ref_limit_per_span configuration mechanism to avoid OOM. Improve GlobalIdGenerator performance. Add an agent plugin to support elasticsearch7. Add jsonrpc4j agent plugin. new options to support multi skywalking cluster use same kafka cluster(plugin.kafka.namespace) resolve agent has no retries if connect kafka cluster failed when bootstrap Add Seata in the component definition. Seata plugin hosts on Seata project. Extended Kafka plugin to properly trace consumers that have topic partitions directly assigned. Support Kafka consumer 2.8.0. Support print SkyWalking context to logs. Add MessageListener enhancement in pulsar plugin. fix a bug that spring-mvc set an error endpoint name if the controller class annotation implements an interface. Add an optional agent plugin to support mybatis. Add spring-cloud-gateway-3.x optional plugin. Add okhttp-4.x plugin. Fix NPE when thrift field is nested in plugin thrift Fix possible NullPointerException in agent\u0026rsquo;s ES plugin. Fix the conversion problem of float type in ConfigInitializer. Fixed part of the dynamic configuration of ConfigurationDiscoveryService that does not take effect under certain circumstances. Introduce method interceptor API v2 Fix ClassCast issue for RequestHolder/ResponseHolder. fixed jdk-threading-plugin memory leak. Optimize multiple field reflection operation in Feign plugin. Fix trace-ignore-plugin TraceIgnorePathPatterns can\u0026rsquo;t set empty value  OAP-Backend  BugFix: filter invalid Envoy access logs whose socket address is empty. Fix K8s monitoring the incorrect metrics calculate. Loop alarm into event system. Support alarm tags. Support WeLink as a channel of alarm notification. Fix: Some defensive codes didn\u0026rsquo;t work in PercentileFunction combine. CVE: fix Jetty vulnerability. https://nvd.nist.gov/vuln/detail/CVE-2019-17638 Fix: MAL function would miss samples name after creating new samples. perf: use iterator.remove() to remove modulesWithoutProvider Support analyzing Envoy TCP access logs and persist error TCP logs. Fix: Envoy error logs are not persisted when no metrics are generated Fix: Memory leakage of low version etcd client. fix-issue Allow multiple definitions as fallback in metadata-service-mapping.yaml file and k8sServiceNameRule. Fix: NPE when configmap has no data. Fix: Dynamic Configuration key slowTraceSegmentThreshold not work Fix: != is not supported in oal when parameters are numbers. Include events of the entity(s) in the alarm. Support native-json format log in kafka-fetcher-plugin. Fix counter misuse in the alarm core. Alarm can\u0026rsquo;t be triggered in time. Events can be configured as alarm source. Make the number of core worker in meter converter thread pool configurable. Add HTTP implementation of logs reporting protocol. Make metrics exporter still work even when storage layer failed. Fix Jetty HTTP TRACE issue, disable HTTP methods except POST. CVE: upgrade snakeyaml to prevent billion laughs attack in dynamic configuration. polish debug logging avoids null value when the segment ignored.  UI  Add logo for kong plugin. Add apisix logo. Refactor js to ts for browser logs and style change. When creating service groups in the topology, it is better if the service names are sorted. Add tooltip for dashboard component. Fix style of endpoint dependency. Support search and visualize alarms with tags. Fix configurations on dashboard. Support to configure the maximum number of displayed items. After changing the durationTime, the topology shows the originally selected group or service. remove the no use maxItemNum for labeled-value metric, etc. Add Azure Functions logo. Support search Endpoint use keyword params in trace view. Add a function which show the statistics information during the trace query. Remove the sort button at the column of Type in the trace statistics page. Optimize the APISIX icon in the topology. Implement metrics templates in the topology. Visualize Events on the alarm page. Update duration steps in graphs for Trace and Log.  Documentation  Polish k8s monitoring otel-collector configuration example. Print SkyWalking context to logs configuration example. Update doc about metrics v2 APIs.  All issues and pull requests are here\n Find change logs of all versions here.\n","excerpt":"8.6.0 Project  Add OpenSearch as storage option. Upgrade Kubernetes Java client dependency to 11.0. …","ref":"/docs/main/v9.2.0/en/changes/changes-8.6.0/","title":"8.6.0"},{"body":"8.6.0 Project  Add OpenSearch as storage option. Upgrade Kubernetes Java client dependency to 11.0. Fix plugin test script error in macOS.  Java Agent  Add trace_segment_ref_limit_per_span configuration mechanism to avoid OOM. Improve GlobalIdGenerator performance. Add an agent plugin to support elasticsearch7. Add jsonrpc4j agent plugin. new options to support multi skywalking cluster use same kafka cluster(plugin.kafka.namespace) resolve agent has no retries if connect kafka cluster failed when bootstrap Add Seata in the component definition. Seata plugin hosts on Seata project. Extended Kafka plugin to properly trace consumers that have topic partitions directly assigned. Support Kafka consumer 2.8.0. Support print SkyWalking context to logs. Add MessageListener enhancement in pulsar plugin. fix a bug that spring-mvc set an error endpoint name if the controller class annotation implements an interface. Add an optional agent plugin to support mybatis. Add spring-cloud-gateway-3.x optional plugin. Add okhttp-4.x plugin. Fix NPE when thrift field is nested in plugin thrift Fix possible NullPointerException in agent\u0026rsquo;s ES plugin. Fix the conversion problem of float type in ConfigInitializer. Fixed part of the dynamic configuration of ConfigurationDiscoveryService that does not take effect under certain circumstances. Introduce method interceptor API v2 Fix ClassCast issue for RequestHolder/ResponseHolder. fixed jdk-threading-plugin memory leak. Optimize multiple field reflection operation in Feign plugin. Fix trace-ignore-plugin TraceIgnorePathPatterns can\u0026rsquo;t set empty value  OAP-Backend  BugFix: filter invalid Envoy access logs whose socket address is empty. Fix K8s monitoring the incorrect metrics calculate. Loop alarm into event system. Support alarm tags. Support WeLink as a channel of alarm notification. Fix: Some defensive codes didn\u0026rsquo;t work in PercentileFunction combine. CVE: fix Jetty vulnerability. https://nvd.nist.gov/vuln/detail/CVE-2019-17638 Fix: MAL function would miss samples name after creating new samples. perf: use iterator.remove() to remove modulesWithoutProvider Support analyzing Envoy TCP access logs and persist error TCP logs. Fix: Envoy error logs are not persisted when no metrics are generated Fix: Memory leakage of low version etcd client. fix-issue Allow multiple definitions as fallback in metadata-service-mapping.yaml file and k8sServiceNameRule. Fix: NPE when configmap has no data. Fix: Dynamic Configuration key slowTraceSegmentThreshold not work Fix: != is not supported in oal when parameters are numbers. Include events of the entity(s) in the alarm. Support native-json format log in kafka-fetcher-plugin. Fix counter misuse in the alarm core. Alarm can\u0026rsquo;t be triggered in time. Events can be configured as alarm source. Make the number of core worker in meter converter thread pool configurable. Add HTTP implementation of logs reporting protocol. Make metrics exporter still work even when storage layer failed. Fix Jetty HTTP TRACE issue, disable HTTP methods except POST. CVE: upgrade snakeyaml to prevent billion laughs attack in dynamic configuration. polish debug logging avoids null value when the segment ignored.  UI  Add logo for kong plugin. Add apisix logo. Refactor js to ts for browser logs and style change. When creating service groups in the topology, it is better if the service names are sorted. Add tooltip for dashboard component. Fix style of endpoint dependency. Support search and visualize alarms with tags. Fix configurations on dashboard. Support to configure the maximum number of displayed items. After changing the durationTime, the topology shows the originally selected group or service. remove the no use maxItemNum for labeled-value metric, etc. Add Azure Functions logo. Support search Endpoint use keyword params in trace view. Add a function which show the statistics information during the trace query. Remove the sort button at the column of Type in the trace statistics page. Optimize the APISIX icon in the topology. Implement metrics templates in the topology. Visualize Events on the alarm page. Update duration steps in graphs for Trace and Log.  Documentation  Polish k8s monitoring otel-collector configuration example. Print SkyWalking context to logs configuration example. Update doc about metrics v2 APIs.  All issues and pull requests are here\n Find change logs of all versions here.\n","excerpt":"8.6.0 Project  Add OpenSearch as storage option. Upgrade Kubernetes Java client dependency to 11.0. …","ref":"/docs/main/v9.3.0/en/changes/changes-8.6.0/","title":"8.6.0"},{"body":"8.6.0 Project  Add OpenSearch as storage option. Upgrade Kubernetes Java client dependency to 11.0. Fix plugin test script error in macOS.  Java Agent  Add trace_segment_ref_limit_per_span configuration mechanism to avoid OOM. Improve GlobalIdGenerator performance. Add an agent plugin to support elasticsearch7. Add jsonrpc4j agent plugin. new options to support multi skywalking cluster use same kafka cluster(plugin.kafka.namespace) resolve agent has no retries if connect kafka cluster failed when bootstrap Add Seata in the component definition. Seata plugin hosts on Seata project. Extended Kafka plugin to properly trace consumers that have topic partitions directly assigned. Support Kafka consumer 2.8.0. Support print SkyWalking context to logs. Add MessageListener enhancement in pulsar plugin. fix a bug that spring-mvc set an error endpoint name if the controller class annotation implements an interface. Add an optional agent plugin to support mybatis. Add spring-cloud-gateway-3.x optional plugin. Add okhttp-4.x plugin. Fix NPE when thrift field is nested in plugin thrift Fix possible NullPointerException in agent\u0026rsquo;s ES plugin. Fix the conversion problem of float type in ConfigInitializer. Fixed part of the dynamic configuration of ConfigurationDiscoveryService that does not take effect under certain circumstances. Introduce method interceptor API v2 Fix ClassCast issue for RequestHolder/ResponseHolder. fixed jdk-threading-plugin memory leak. Optimize multiple field reflection operation in Feign plugin. Fix trace-ignore-plugin TraceIgnorePathPatterns can\u0026rsquo;t set empty value  OAP-Backend  BugFix: filter invalid Envoy access logs whose socket address is empty. Fix K8s monitoring the incorrect metrics calculate. Loop alarm into event system. Support alarm tags. Support WeLink as a channel of alarm notification. Fix: Some defensive codes didn\u0026rsquo;t work in PercentileFunction combine. CVE: fix Jetty vulnerability. https://nvd.nist.gov/vuln/detail/CVE-2019-17638 Fix: MAL function would miss samples name after creating new samples. perf: use iterator.remove() to remove modulesWithoutProvider Support analyzing Envoy TCP access logs and persist error TCP logs. Fix: Envoy error logs are not persisted when no metrics are generated Fix: Memory leakage of low version etcd client. fix-issue Allow multiple definitions as fallback in metadata-service-mapping.yaml file and k8sServiceNameRule. Fix: NPE when configmap has no data. Fix: Dynamic Configuration key slowTraceSegmentThreshold not work Fix: != is not supported in oal when parameters are numbers. Include events of the entity(s) in the alarm. Support native-json format log in kafka-fetcher-plugin. Fix counter misuse in the alarm core. Alarm can\u0026rsquo;t be triggered in time. Events can be configured as alarm source. Make the number of core worker in meter converter thread pool configurable. Add HTTP implementation of logs reporting protocol. Make metrics exporter still work even when storage layer failed. Fix Jetty HTTP TRACE issue, disable HTTP methods except POST. CVE: upgrade snakeyaml to prevent billion laughs attack in dynamic configuration. polish debug logging avoids null value when the segment ignored.  UI  Add logo for kong plugin. Add apisix logo. Refactor js to ts for browser logs and style change. When creating service groups in the topology, it is better if the service names are sorted. Add tooltip for dashboard component. Fix style of endpoint dependency. Support search and visualize alarms with tags. Fix configurations on dashboard. Support to configure the maximum number of displayed items. After changing the durationTime, the topology shows the originally selected group or service. remove the no use maxItemNum for labeled-value metric, etc. Add Azure Functions logo. Support search Endpoint use keyword params in trace view. Add a function which show the statistics information during the trace query. Remove the sort button at the column of Type in the trace statistics page. Optimize the APISIX icon in the topology. Implement metrics templates in the topology. Visualize Events on the alarm page. Update duration steps in graphs for Trace and Log.  Documentation  Polish k8s monitoring otel-collector configuration example. Print SkyWalking context to logs configuration example. Update doc about metrics v2 APIs.  All issues and pull requests are here\n Find change logs of all versions here.\n","excerpt":"8.6.0 Project  Add OpenSearch as storage option. Upgrade Kubernetes Java client dependency to 11.0. …","ref":"/docs/main/v9.4.0/en/changes/changes-8.6.0/","title":"8.6.0"},{"body":"8.6.0 Project  Add OpenSearch as storage option. Upgrade Kubernetes Java client dependency to 11.0. Fix plugin test script error in macOS.  Java Agent  Add trace_segment_ref_limit_per_span configuration mechanism to avoid OOM. Improve GlobalIdGenerator performance. Add an agent plugin to support elasticsearch7. Add jsonrpc4j agent plugin. new options to support multi skywalking cluster use same kafka cluster(plugin.kafka.namespace) resolve agent has no retries if connect kafka cluster failed when bootstrap Add Seata in the component definition. Seata plugin hosts on Seata project. Extended Kafka plugin to properly trace consumers that have topic partitions directly assigned. Support Kafka consumer 2.8.0. Support print SkyWalking context to logs. Add MessageListener enhancement in pulsar plugin. fix a bug that spring-mvc set an error endpoint name if the controller class annotation implements an interface. Add an optional agent plugin to support mybatis. Add spring-cloud-gateway-3.x optional plugin. Add okhttp-4.x plugin. Fix NPE when thrift field is nested in plugin thrift Fix possible NullPointerException in agent\u0026rsquo;s ES plugin. Fix the conversion problem of float type in ConfigInitializer. Fixed part of the dynamic configuration of ConfigurationDiscoveryService that does not take effect under certain circumstances. Introduce method interceptor API v2 Fix ClassCast issue for RequestHolder/ResponseHolder. fixed jdk-threading-plugin memory leak. Optimize multiple field reflection operation in Feign plugin. Fix trace-ignore-plugin TraceIgnorePathPatterns can\u0026rsquo;t set empty value  OAP-Backend  BugFix: filter invalid Envoy access logs whose socket address is empty. Fix K8s monitoring the incorrect metrics calculate. Loop alarm into event system. Support alarm tags. Support WeLink as a channel of alarm notification. Fix: Some defensive codes didn\u0026rsquo;t work in PercentileFunction combine. CVE: fix Jetty vulnerability. https://nvd.nist.gov/vuln/detail/CVE-2019-17638 Fix: MAL function would miss samples name after creating new samples. perf: use iterator.remove() to remove modulesWithoutProvider Support analyzing Envoy TCP access logs and persist error TCP logs. Fix: Envoy error logs are not persisted when no metrics are generated Fix: Memory leakage of low version etcd client. fix-issue Allow multiple definitions as fallback in metadata-service-mapping.yaml file and k8sServiceNameRule. Fix: NPE when configmap has no data. Fix: Dynamic Configuration key slowTraceSegmentThreshold not work Fix: != is not supported in oal when parameters are numbers. Include events of the entity(s) in the alarm. Support native-json format log in kafka-fetcher-plugin. Fix counter misuse in the alarm core. Alarm can\u0026rsquo;t be triggered in time. Events can be configured as alarm source. Make the number of core worker in meter converter thread pool configurable. Add HTTP implementation of logs reporting protocol. Make metrics exporter still work even when storage layer failed. Fix Jetty HTTP TRACE issue, disable HTTP methods except POST. CVE: upgrade snakeyaml to prevent billion laughs attack in dynamic configuration. polish debug logging avoids null value when the segment ignored.  UI  Add logo for kong plugin. Add apisix logo. Refactor js to ts for browser logs and style change. When creating service groups in the topology, it is better if the service names are sorted. Add tooltip for dashboard component. Fix style of endpoint dependency. Support search and visualize alarms with tags. Fix configurations on dashboard. Support to configure the maximum number of displayed items. After changing the durationTime, the topology shows the originally selected group or service. remove the no use maxItemNum for labeled-value metric, etc. Add Azure Functions logo. Support search Endpoint use keyword params in trace view. Add a function which show the statistics information during the trace query. Remove the sort button at the column of Type in the trace statistics page. Optimize the APISIX icon in the topology. Implement metrics templates in the topology. Visualize Events on the alarm page. Update duration steps in graphs for Trace and Log.  Documentation  Polish k8s monitoring otel-collector configuration example. Print SkyWalking context to logs configuration example. Update doc about metrics v2 APIs.  All issues and pull requests are here\n Find change logs of all versions here.\n","excerpt":"8.6.0 Project  Add OpenSearch as storage option. Upgrade Kubernetes Java client dependency to 11.0. …","ref":"/docs/main/v9.5.0/en/changes/changes-8.6.0/","title":"8.6.0"},{"body":"8.6.0 Project  Add OpenSearch as storage option. Upgrade Kubernetes Java client dependency to 11.0. Fix plugin test script error in macOS.  Java Agent  Add trace_segment_ref_limit_per_span configuration mechanism to avoid OOM. Improve GlobalIdGenerator performance. Add an agent plugin to support elasticsearch7. Add jsonrpc4j agent plugin. new options to support multi skywalking cluster use same kafka cluster(plugin.kafka.namespace) resolve agent has no retries if connect kafka cluster failed when bootstrap Add Seata in the component definition. Seata plugin hosts on Seata project. Extended Kafka plugin to properly trace consumers that have topic partitions directly assigned. Support Kafka consumer 2.8.0. Support print SkyWalking context to logs. Add MessageListener enhancement in pulsar plugin. fix a bug that spring-mvc set an error endpoint name if the controller class annotation implements an interface. Add an optional agent plugin to support mybatis. Add spring-cloud-gateway-3.x optional plugin. Add okhttp-4.x plugin. Fix NPE when thrift field is nested in plugin thrift Fix possible NullPointerException in agent\u0026rsquo;s ES plugin. Fix the conversion problem of float type in ConfigInitializer. Fixed part of the dynamic configuration of ConfigurationDiscoveryService that does not take effect under certain circumstances. Introduce method interceptor API v2 Fix ClassCast issue for RequestHolder/ResponseHolder. fixed jdk-threading-plugin memory leak. Optimize multiple field reflection operation in Feign plugin. Fix trace-ignore-plugin TraceIgnorePathPatterns can\u0026rsquo;t set empty value  OAP-Backend  BugFix: filter invalid Envoy access logs whose socket address is empty. Fix K8s monitoring the incorrect metrics calculate. Loop alarm into event system. Support alarm tags. Support WeLink as a channel of alarm notification. Fix: Some defensive codes didn\u0026rsquo;t work in PercentileFunction combine. CVE: fix Jetty vulnerability. https://nvd.nist.gov/vuln/detail/CVE-2019-17638 Fix: MAL function would miss samples name after creating new samples. perf: use iterator.remove() to remove modulesWithoutProvider Support analyzing Envoy TCP access logs and persist error TCP logs. Fix: Envoy error logs are not persisted when no metrics are generated Fix: Memory leakage of low version etcd client. fix-issue Allow multiple definitions as fallback in metadata-service-mapping.yaml file and k8sServiceNameRule. Fix: NPE when configmap has no data. Fix: Dynamic Configuration key slowTraceSegmentThreshold not work Fix: != is not supported in oal when parameters are numbers. Include events of the entity(s) in the alarm. Support native-json format log in kafka-fetcher-plugin. Fix counter misuse in the alarm core. Alarm can\u0026rsquo;t be triggered in time. Events can be configured as alarm source. Make the number of core worker in meter converter thread pool configurable. Add HTTP implementation of logs reporting protocol. Make metrics exporter still work even when storage layer failed. Fix Jetty HTTP TRACE issue, disable HTTP methods except POST. CVE: upgrade snakeyaml to prevent billion laughs attack in dynamic configuration. polish debug logging avoids null value when the segment ignored.  UI  Add logo for kong plugin. Add apisix logo. Refactor js to ts for browser logs and style change. When creating service groups in the topology, it is better if the service names are sorted. Add tooltip for dashboard component. Fix style of endpoint dependency. Support search and visualize alarms with tags. Fix configurations on dashboard. Support to configure the maximum number of displayed items. After changing the durationTime, the topology shows the originally selected group or service. remove the no use maxItemNum for labeled-value metric, etc. Add Azure Functions logo. Support search Endpoint use keyword params in trace view. Add a function which show the statistics information during the trace query. Remove the sort button at the column of Type in the trace statistics page. Optimize the APISIX icon in the topology. Implement metrics templates in the topology. Visualize Events on the alarm page. Update duration steps in graphs for Trace and Log.  Documentation  Polish k8s monitoring otel-collector configuration example. Print SkyWalking context to logs configuration example. Update doc about metrics v2 APIs.  All issues and pull requests are here\n Find change logs of all versions here.\n","excerpt":"8.6.0 Project  Add OpenSearch as storage option. Upgrade Kubernetes Java client dependency to 11.0. …","ref":"/docs/main/v9.6.0/en/changes/changes-8.6.0/","title":"8.6.0"},{"body":"8.6.0 Project  Add OpenSearch as storage option. Upgrade Kubernetes Java client dependency to 11.0. Fix plugin test script error in macOS.  Java Agent  Add trace_segment_ref_limit_per_span configuration mechanism to avoid OOM. Improve GlobalIdGenerator performance. Add an agent plugin to support elasticsearch7. Add jsonrpc4j agent plugin. new options to support multi skywalking cluster use same kafka cluster(plugin.kafka.namespace) resolve agent has no retries if connect kafka cluster failed when bootstrap Add Seata in the component definition. Seata plugin hosts on Seata project. Extended Kafka plugin to properly trace consumers that have topic partitions directly assigned. Support Kafka consumer 2.8.0. Support print SkyWalking context to logs. Add MessageListener enhancement in pulsar plugin. fix a bug that spring-mvc set an error endpoint name if the controller class annotation implements an interface. Add an optional agent plugin to support mybatis. Add spring-cloud-gateway-3.x optional plugin. Add okhttp-4.x plugin. Fix NPE when thrift field is nested in plugin thrift Fix possible NullPointerException in agent\u0026rsquo;s ES plugin. Fix the conversion problem of float type in ConfigInitializer. Fixed part of the dynamic configuration of ConfigurationDiscoveryService that does not take effect under certain circumstances. Introduce method interceptor API v2 Fix ClassCast issue for RequestHolder/ResponseHolder. fixed jdk-threading-plugin memory leak. Optimize multiple field reflection operation in Feign plugin. Fix trace-ignore-plugin TraceIgnorePathPatterns can\u0026rsquo;t set empty value  OAP-Backend  BugFix: filter invalid Envoy access logs whose socket address is empty. Fix K8s monitoring the incorrect metrics calculate. Loop alarm into event system. Support alarm tags. Support WeLink as a channel of alarm notification. Fix: Some defensive codes didn\u0026rsquo;t work in PercentileFunction combine. CVE: fix Jetty vulnerability. https://nvd.nist.gov/vuln/detail/CVE-2019-17638 Fix: MAL function would miss samples name after creating new samples. perf: use iterator.remove() to remove modulesWithoutProvider Support analyzing Envoy TCP access logs and persist error TCP logs. Fix: Envoy error logs are not persisted when no metrics are generated Fix: Memory leakage of low version etcd client. fix-issue Allow multiple definitions as fallback in metadata-service-mapping.yaml file and k8sServiceNameRule. Fix: NPE when configmap has no data. Fix: Dynamic Configuration key slowTraceSegmentThreshold not work Fix: != is not supported in oal when parameters are numbers. Include events of the entity(s) in the alarm. Support native-json format log in kafka-fetcher-plugin. Fix counter misuse in the alarm core. Alarm can\u0026rsquo;t be triggered in time. Events can be configured as alarm source. Make the number of core worker in meter converter thread pool configurable. Add HTTP implementation of logs reporting protocol. Make metrics exporter still work even when storage layer failed. Fix Jetty HTTP TRACE issue, disable HTTP methods except POST. CVE: upgrade snakeyaml to prevent billion laughs attack in dynamic configuration. polish debug logging avoids null value when the segment ignored.  UI  Add logo for kong plugin. Add apisix logo. Refactor js to ts for browser logs and style change. When creating service groups in the topology, it is better if the service names are sorted. Add tooltip for dashboard component. Fix style of endpoint dependency. Support search and visualize alarms with tags. Fix configurations on dashboard. Support to configure the maximum number of displayed items. After changing the durationTime, the topology shows the originally selected group or service. remove the no use maxItemNum for labeled-value metric, etc. Add Azure Functions logo. Support search Endpoint use keyword params in trace view. Add a function which show the statistics information during the trace query. Remove the sort button at the column of Type in the trace statistics page. Optimize the APISIX icon in the topology. Implement metrics templates in the topology. Visualize Events on the alarm page. Update duration steps in graphs for Trace and Log.  Documentation  Polish k8s monitoring otel-collector configuration example. Print SkyWalking context to logs configuration example. Update doc about metrics v2 APIs.  All issues and pull requests are here\n Find change logs of all versions here.\n","excerpt":"8.6.0 Project  Add OpenSearch as storage option. Upgrade Kubernetes Java client dependency to 11.0. …","ref":"/docs/main/v9.7.0/en/changes/changes-8.6.0/","title":"8.6.0"},{"body":"8.7.0 Project  Extract dependency management to a bom. Add JDK 16 to test matrix. DataCarrier consumer add a new event notification, call nothingToConsume method if the queue has no element to consume. Build and push snapshot Docker images to GitHub Container Registry, this is only for people who want to help to test the master branch codes, please don\u0026rsquo;t use in production environments.  Java Agent  Supports modifying span attributes in async mode. Agent supports the collection of JVM arguments and jar dependency information. [Temporary] Support authentication for log report channel. This feature and grpc channel is going to be removed after Satellite 0.2.0 release. Remove deprecated gRPC method, io.grpc.ManagedChannelBuilder#nameResolverFactory. See gRPC-java 7133 for more details. Add Neo4j-4.x plugin. Correct profile.duration to profile.max_duration in the default agent.config file. Fix the response time of gRPC. Support parameter collection for SqlServer. Add ShardingSphere-5.0.0-beta plugin. Fix some method exception error. Fix async finish repeatedly in spring-webflux-5.x-webclient plugin. Add agent plugin to support Sentinel. Move ehcache-2.x plugin as an optional plugin. Support guava-cache plugin. Enhance the compatibility of mysql-8.x-plugin plugin. Support Kafka SASL login module. Fix gateway plugin async finish repeatedly when fallback url configured. Chore: polish methods naming for Spring-Kafka plugins. Remove plugins for ShardingSphere legacy version. Update agent plugin for ElasticJob GA version Remove the logic of generating instance name in KafkaServiceManagementServiceClient class. Improve okhttp plugin performance by optimizing Class.getDeclaredField(). Fix GRPCLogClientAppender no context warning. Fix spring-webflux-5.x-webclient-plugin NPE.  OAP-Backend  Disable Spring sleuth meter analyzer by default. Only count 5xx as error in Envoy ALS receiver. Upgrade apollo core caused by CVE-2020-15170. Upgrade kubernetes client caused by CVE-2020-28052. Upgrade Elasticsearch 7 client caused by CVE-2020-7014. Upgrade jackson related libs caused by CVE-2018-11307, CVE-2018-14718 ~ CVE-2018-14721, CVE-2018-19360 ~ CVE-2018-19362, CVE-2019-14379, CVE-2019-14540, CVE-2019-14892, CVE-2019-14893, CVE-2019-16335, CVE-2019-16942, CVE-2019-16943, CVE-2019-17267, CVE-2019-17531, CVE-2019-20330, CVE-2020-8840, CVE-2020-9546, CVE-2020-9547, CVE-2020-9548, CVE-2018-12022, CVE-2018-12023, CVE-2019-12086, CVE-2019-14439, CVE-2020-10672, CVE-2020-10673, CVE-2020-10968, CVE-2020-10969, CVE-2020-11111, CVE-2020-11112, CVE-2020-11113, CVE-2020-11619, CVE-2020-11620, CVE-2020-14060, CVE-2020-14061, CVE-2020-14062, CVE-2020-14195, CVE-2020-24616, CVE-2020-24750, CVE-2020-25649, CVE-2020-35490, CVE-2020-35491, CVE-2020-35728 and CVE-2020-36179 ~ CVE-2020-36190. Exclude log4j 1.x caused by CVE-2019-17571. Upgrade log4j 2.x caused by CVE-2020-9488. Upgrade nacos libs caused by CVE-2021-29441 and CVE-2021-29442. Upgrade netty caused by CVE-2019-20444, CVE-2019-20445, CVE-2019-16869, CVE-2020-11612, CVE-2021-21290, CVE-2021-21295 and CVE-2021-21409. Upgrade consul client caused by CVE-2018-1000844, CVE-2018-1000850. Upgrade zookeeper caused by CVE-2019-0201, zookeeper cluster coordinator plugin now requires zookeeper server 3.5+. Upgrade snake yaml caused by CVE-2017-18640. Upgrade embed tomcat caused by CVE-2020-13935. Upgrade commons-lang3 to avoid potential NPE in some JDK versions. OAL supports generating metrics from events. Support endpoint name grouping by OpenAPI definitions. Concurrent create PrepareRequest when persist Metrics Fix CounterWindow increase computing issue. Performance: optimize Envoy ALS analyzer performance in high traffic load scenario (reduce ~1cpu in ~10k RPS). Performance: trim useless metadata fields in Envoy ALS metadata to improve performance. Fix: slowDBAccessThreshold dynamic config error when not configured. Performance: cache regex pattern and result, optimize string concatenation in Envy ALS analyzer. Performance: cache metrics id and entity id in Metrics and ISource. Performance: enhance persistent session mechanism, about differentiating cache timeout for different dimensionality metrics. The timeout of the cache for minute and hour level metrics has been prolonged to ~5 min. Performance: Add L1 aggregation flush period, which reduce the CPU load and help young GC. Support connectTimeout and socketTimeout settings for ElasticSearch6 and ElasticSearch7 storages. Re-implement storage session mechanism, cached metrics are removed only according to their last access timestamp, rather than first time. This makes sure hot data never gets removed unexpectedly. Support session expired threshold configurable. Fix InfluxDB storage-plugin Metrics#multiGet issue. Replace zuul proxy with spring cloud gateway 2.x. in webapp module. Upgrade etcd cluster coordinator and dynamic configuration to v3.x. Configuration: Allow configuring server maximum request header size and ES index template order. Add thread state metric and class loaded info metric to JVMMetric. Performance: compile LAL DSL statically and run with type checked. Add pagination to event query protocol. Performance: optimize Envoy error logs persistence performance. Support envoy cluster manager metrics. Performance: remove the synchronous persistence mechanism from batch ElasticSearch DAO. Because the current enhanced persistent session mechanism, don\u0026rsquo;t require the data queryable immediately after the insert and update anymore. Performance: share flushInterval setting for both metrics and record data, due to synchronous persistence mechanism removed. Record flush interval used to be hardcoded as 10s. Remove syncBulkActions in ElasticSearch storage option. Increase the default bulkActions(env, SW_STORAGE_ES_BULK_ACTIONS) to 5000(from 1000). Increase the flush interval of ElasticSearch indices to 15s(from 10s) Provide distinct for elements of metadata lists. Due to the more aggressive asynchronous flush, metadata lists have more chances including duplicate elements. Don\u0026rsquo;t need this as indicate anymore. Reduce the flush period of hour and day level metrics, only run in 4 times of regular persistent period. This means default flush period of hour and day level metrics are 25s * 4. Performance: optimize IDs read of ElasticSearch storage options(6 and 7). Use the physical index rather than template alias name. Adjust index refresh period as INT(flushInterval * 2/3), it used to be as same as bulk flush period. At the edge case, in low traffic(traffic \u0026lt; bulkActions in the whole period), there is a possible case, 2 period bulks are included in one index refresh rebuild operation, which could cause version conflicts. And this case can\u0026rsquo;t be fixed through core/persistentPeriod as the bulk fresh is not controlled by the persistent timer anymore. The core/maxSyncOperationNum setting(added in 8.5.0) is removed due to metrics persistence is fully asynchronous. The core/syncThreads setting(added in 8.5.0) is removed due to metrics persistence is fully asynchronous. Optimization: Concurrency mode of execution stage for metrics is removed(added in 8.5.0). Only concurrency of prepare stage is meaningful and kept. Fix -meters metrics topic isn\u0026rsquo;t created with namespace issue Enhance persistent session timeout mechanism. Because the enhanced session could cache the metadata metrics forever, new timeout mechanism is designed for avoiding this specific case. Fix Kafka transport topics are created duplicated with and without namespace issue Fix the persistent session timeout mechanism bug. Fix possible version_conflict_engine_exception in bulk execution. Fix PrometheusMetricConverter may throw an IllegalArgumentException when convert metrics to SampleFamily Filtering NaN value samples when build SampleFamily Add Thread and ClassLoader Metrics for the self-observability and otel-oc-rules Simple optimization of trace sql query statement. Avoid \u0026ldquo;select *\u0026rdquo; query method Introduce dynamical logging to update log configuration at runtime Fix Kubernetes ConfigMap configuration center doesn\u0026rsquo;t send delete event Breaking Change: emove qps and add rpm in LAL  UI  Fix the date component for log conditions. Fix selector keys for duplicate options. Add Python celery plugin. Fix default config for metrics. Fix trace table for profile ui. Fix the error of server response time in the topology. Fix chart types for setting metrics configure. Fix logs pages number. Implement a timeline for Events in a new page. Fix style for event details.  Documentation  Add FAQ about Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Add Self Observability service discovery (k8s). Add sending Envoy Metrics to OAP in envoy 1.19 example and bump up to Envoy V3 api.  All issues and pull requests are here\n","excerpt":"8.7.0 Project  Extract dependency management to a bom. Add JDK 16 to test matrix. DataCarrier …","ref":"/docs/main/latest/en/changes/changes-8.7.0/","title":"8.7.0"},{"body":"8.7.0 Project  Extract dependency management to a bom. Add JDK 16 to test matrix. DataCarrier consumer add a new event notification, call nothingToConsume method if the queue has no element to consume. Build and push snapshot Docker images to GitHub Container Registry, this is only for people who want to help to test the master branch codes, please don\u0026rsquo;t use in production environments.  Java Agent  Supports modifying span attributes in async mode. Agent supports the collection of JVM arguments and jar dependency information. [Temporary] Support authentication for log report channel. This feature and grpc channel is going to be removed after Satellite 0.2.0 release. Remove deprecated gRPC method, io.grpc.ManagedChannelBuilder#nameResolverFactory. See gRPC-java 7133 for more details. Add Neo4j-4.x plugin. Correct profile.duration to profile.max_duration in the default agent.config file. Fix the response time of gRPC. Support parameter collection for SqlServer. Add ShardingSphere-5.0.0-beta plugin. Fix some method exception error. Fix async finish repeatedly in spring-webflux-5.x-webclient plugin. Add agent plugin to support Sentinel. Move ehcache-2.x plugin as an optional plugin. Support guava-cache plugin. Enhance the compatibility of mysql-8.x-plugin plugin. Support Kafka SASL login module. Fix gateway plugin async finish repeatedly when fallback url configured. Chore: polish methods naming for Spring-Kafka plugins. Remove plugins for ShardingSphere legacy version. Update agent plugin for ElasticJob GA version Remove the logic of generating instance name in KafkaServiceManagementServiceClient class. Improve okhttp plugin performance by optimizing Class.getDeclaredField(). Fix GRPCLogClientAppender no context warning. Fix spring-webflux-5.x-webclient-plugin NPE.  OAP-Backend  Disable Spring sleuth meter analyzer by default. Only count 5xx as error in Envoy ALS receiver. Upgrade apollo core caused by CVE-2020-15170. Upgrade kubernetes client caused by CVE-2020-28052. Upgrade Elasticsearch 7 client caused by CVE-2020-7014. Upgrade jackson related libs caused by CVE-2018-11307, CVE-2018-14718 ~ CVE-2018-14721, CVE-2018-19360 ~ CVE-2018-19362, CVE-2019-14379, CVE-2019-14540, CVE-2019-14892, CVE-2019-14893, CVE-2019-16335, CVE-2019-16942, CVE-2019-16943, CVE-2019-17267, CVE-2019-17531, CVE-2019-20330, CVE-2020-8840, CVE-2020-9546, CVE-2020-9547, CVE-2020-9548, CVE-2018-12022, CVE-2018-12023, CVE-2019-12086, CVE-2019-14439, CVE-2020-10672, CVE-2020-10673, CVE-2020-10968, CVE-2020-10969, CVE-2020-11111, CVE-2020-11112, CVE-2020-11113, CVE-2020-11619, CVE-2020-11620, CVE-2020-14060, CVE-2020-14061, CVE-2020-14062, CVE-2020-14195, CVE-2020-24616, CVE-2020-24750, CVE-2020-25649, CVE-2020-35490, CVE-2020-35491, CVE-2020-35728 and CVE-2020-36179 ~ CVE-2020-36190. Exclude log4j 1.x caused by CVE-2019-17571. Upgrade log4j 2.x caused by CVE-2020-9488. Upgrade nacos libs caused by CVE-2021-29441 and CVE-2021-29442. Upgrade netty caused by CVE-2019-20444, CVE-2019-20445, CVE-2019-16869, CVE-2020-11612, CVE-2021-21290, CVE-2021-21295 and CVE-2021-21409. Upgrade consul client caused by CVE-2018-1000844, CVE-2018-1000850. Upgrade zookeeper caused by CVE-2019-0201, zookeeper cluster coordinator plugin now requires zookeeper server 3.5+. Upgrade snake yaml caused by CVE-2017-18640. Upgrade embed tomcat caused by CVE-2020-13935. Upgrade commons-lang3 to avoid potential NPE in some JDK versions. OAL supports generating metrics from events. Support endpoint name grouping by OpenAPI definitions. Concurrent create PrepareRequest when persist Metrics Fix CounterWindow increase computing issue. Performance: optimize Envoy ALS analyzer performance in high traffic load scenario (reduce ~1cpu in ~10k RPS). Performance: trim useless metadata fields in Envoy ALS metadata to improve performance. Fix: slowDBAccessThreshold dynamic config error when not configured. Performance: cache regex pattern and result, optimize string concatenation in Envy ALS analyzer. Performance: cache metrics id and entity id in Metrics and ISource. Performance: enhance persistent session mechanism, about differentiating cache timeout for different dimensionality metrics. The timeout of the cache for minute and hour level metrics has been prolonged to ~5 min. Performance: Add L1 aggregation flush period, which reduce the CPU load and help young GC. Support connectTimeout and socketTimeout settings for ElasticSearch6 and ElasticSearch7 storages. Re-implement storage session mechanism, cached metrics are removed only according to their last access timestamp, rather than first time. This makes sure hot data never gets removed unexpectedly. Support session expired threshold configurable. Fix InfluxDB storage-plugin Metrics#multiGet issue. Replace zuul proxy with spring cloud gateway 2.x. in webapp module. Upgrade etcd cluster coordinator and dynamic configuration to v3.x. Configuration: Allow configuring server maximum request header size and ES index template order. Add thread state metric and class loaded info metric to JVMMetric. Performance: compile LAL DSL statically and run with type checked. Add pagination to event query protocol. Performance: optimize Envoy error logs persistence performance. Support envoy cluster manager metrics. Performance: remove the synchronous persistence mechanism from batch ElasticSearch DAO. Because the current enhanced persistent session mechanism, don\u0026rsquo;t require the data queryable immediately after the insert and update anymore. Performance: share flushInterval setting for both metrics and record data, due to synchronous persistence mechanism removed. Record flush interval used to be hardcoded as 10s. Remove syncBulkActions in ElasticSearch storage option. Increase the default bulkActions(env, SW_STORAGE_ES_BULK_ACTIONS) to 5000(from 1000). Increase the flush interval of ElasticSearch indices to 15s(from 10s) Provide distinct for elements of metadata lists. Due to the more aggressive asynchronous flush, metadata lists have more chances including duplicate elements. Don\u0026rsquo;t need this as indicate anymore. Reduce the flush period of hour and day level metrics, only run in 4 times of regular persistent period. This means default flush period of hour and day level metrics are 25s * 4. Performance: optimize IDs read of ElasticSearch storage options(6 and 7). Use the physical index rather than template alias name. Adjust index refresh period as INT(flushInterval * 2/3), it used to be as same as bulk flush period. At the edge case, in low traffic(traffic \u0026lt; bulkActions in the whole period), there is a possible case, 2 period bulks are included in one index refresh rebuild operation, which could cause version conflicts. And this case can\u0026rsquo;t be fixed through core/persistentPeriod as the bulk fresh is not controlled by the persistent timer anymore. The core/maxSyncOperationNum setting(added in 8.5.0) is removed due to metrics persistence is fully asynchronous. The core/syncThreads setting(added in 8.5.0) is removed due to metrics persistence is fully asynchronous. Optimization: Concurrency mode of execution stage for metrics is removed(added in 8.5.0). Only concurrency of prepare stage is meaningful and kept. Fix -meters metrics topic isn\u0026rsquo;t created with namespace issue Enhance persistent session timeout mechanism. Because the enhanced session could cache the metadata metrics forever, new timeout mechanism is designed for avoiding this specific case. Fix Kafka transport topics are created duplicated with and without namespace issue Fix the persistent session timeout mechanism bug. Fix possible version_conflict_engine_exception in bulk execution. Fix PrometheusMetricConverter may throw an IllegalArgumentException when convert metrics to SampleFamily Filtering NaN value samples when build SampleFamily Add Thread and ClassLoader Metrics for the self-observability and otel-oc-rules Simple optimization of trace sql query statement. Avoid \u0026ldquo;select *\u0026rdquo; query method Introduce dynamical logging to update log configuration at runtime Fix Kubernetes ConfigMap configuration center doesn\u0026rsquo;t send delete event Breaking Change: emove qps and add rpm in LAL  UI  Fix the date component for log conditions. Fix selector keys for duplicate options. Add Python celery plugin. Fix default config for metrics. Fix trace table for profile ui. Fix the error of server response time in the topology. Fix chart types for setting metrics configure. Fix logs pages number. Implement a timeline for Events in a new page. Fix style for event details.  Documentation  Add FAQ about Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Add Self Observability service discovery (k8s). Add sending Envoy Metrics to OAP in envoy 1.19 example and bump up to Envoy V3 api.  All issues and pull requests are here\n","excerpt":"8.7.0 Project  Extract dependency management to a bom. Add JDK 16 to test matrix. DataCarrier …","ref":"/docs/main/next/en/changes/changes-8.7.0/","title":"8.7.0"},{"body":"8.7.0 Project  Extract dependency management to a bom. Add JDK 16 to test matrix. DataCarrier consumer add a new event notification, call nothingToConsume method if the queue has no element to consume. Build and push snapshot Docker images to GitHub Container Registry, this is only for people who want to help to test the master branch codes, please don\u0026rsquo;t use in production environments.  Java Agent  Supports modifying span attributes in async mode. Agent supports the collection of JVM arguments and jar dependency information. [Temporary] Support authentication for log report channel. This feature and grpc channel is going to be removed after Satellite 0.2.0 release. Remove deprecated gRPC method, io.grpc.ManagedChannelBuilder#nameResolverFactory. See gRPC-java 7133 for more details. Add Neo4j-4.x plugin. Correct profile.duration to profile.max_duration in the default agent.config file. Fix the response time of gRPC. Support parameter collection for SqlServer. Add ShardingSphere-5.0.0-beta plugin. Fix some method exception error. Fix async finish repeatedly in spring-webflux-5.x-webclient plugin. Add agent plugin to support Sentinel. Move ehcache-2.x plugin as an optional plugin. Support guava-cache plugin. Enhance the compatibility of mysql-8.x-plugin plugin. Support Kafka SASL login module. Fix gateway plugin async finish repeatedly when fallback url configured. Chore: polish methods naming for Spring-Kafka plugins. Remove plugins for ShardingSphere legacy version. Update agent plugin for ElasticJob GA version Remove the logic of generating instance name in KafkaServiceManagementServiceClient class. Improve okhttp plugin performance by optimizing Class.getDeclaredField(). Fix GRPCLogClientAppender no context warning. Fix spring-webflux-5.x-webclient-plugin NPE.  OAP-Backend  Disable Spring sleuth meter analyzer by default. Only count 5xx as error in Envoy ALS receiver. Upgrade apollo core caused by CVE-2020-15170. Upgrade kubernetes client caused by CVE-2020-28052. Upgrade Elasticsearch 7 client caused by CVE-2020-7014. Upgrade jackson related libs caused by CVE-2018-11307, CVE-2018-14718 ~ CVE-2018-14721, CVE-2018-19360 ~ CVE-2018-19362, CVE-2019-14379, CVE-2019-14540, CVE-2019-14892, CVE-2019-14893, CVE-2019-16335, CVE-2019-16942, CVE-2019-16943, CVE-2019-17267, CVE-2019-17531, CVE-2019-20330, CVE-2020-8840, CVE-2020-9546, CVE-2020-9547, CVE-2020-9548, CVE-2018-12022, CVE-2018-12023, CVE-2019-12086, CVE-2019-14439, CVE-2020-10672, CVE-2020-10673, CVE-2020-10968, CVE-2020-10969, CVE-2020-11111, CVE-2020-11112, CVE-2020-11113, CVE-2020-11619, CVE-2020-11620, CVE-2020-14060, CVE-2020-14061, CVE-2020-14062, CVE-2020-14195, CVE-2020-24616, CVE-2020-24750, CVE-2020-25649, CVE-2020-35490, CVE-2020-35491, CVE-2020-35728 and CVE-2020-36179 ~ CVE-2020-36190. Exclude log4j 1.x caused by CVE-2019-17571. Upgrade log4j 2.x caused by CVE-2020-9488. Upgrade nacos libs caused by CVE-2021-29441 and CVE-2021-29442. Upgrade netty caused by CVE-2019-20444, CVE-2019-20445, CVE-2019-16869, CVE-2020-11612, CVE-2021-21290, CVE-2021-21295 and CVE-2021-21409. Upgrade consul client caused by CVE-2018-1000844, CVE-2018-1000850. Upgrade zookeeper caused by CVE-2019-0201, zookeeper cluster coordinator plugin now requires zookeeper server 3.5+. Upgrade snake yaml caused by CVE-2017-18640. Upgrade embed tomcat caused by CVE-2020-13935. Upgrade commons-lang3 to avoid potential NPE in some JDK versions. OAL supports generating metrics from events. Support endpoint name grouping by OpenAPI definitions. Concurrent create PrepareRequest when persist Metrics Fix CounterWindow increase computing issue. Performance: optimize Envoy ALS analyzer performance in high traffic load scenario (reduce ~1cpu in ~10k RPS). Performance: trim useless metadata fields in Envoy ALS metadata to improve performance. Fix: slowDBAccessThreshold dynamic config error when not configured. Performance: cache regex pattern and result, optimize string concatenation in Envy ALS analyzer. Performance: cache metrics id and entity id in Metrics and ISource. Performance: enhance persistent session mechanism, about differentiating cache timeout for different dimensionality metrics. The timeout of the cache for minute and hour level metrics has been prolonged to ~5 min. Performance: Add L1 aggregation flush period, which reduce the CPU load and help young GC. Support connectTimeout and socketTimeout settings for ElasticSearch6 and ElasticSearch7 storages. Re-implement storage session mechanism, cached metrics are removed only according to their last access timestamp, rather than first time. This makes sure hot data never gets removed unexpectedly. Support session expired threshold configurable. Fix InfluxDB storage-plugin Metrics#multiGet issue. Replace zuul proxy with spring cloud gateway 2.x. in webapp module. Upgrade etcd cluster coordinator and dynamic configuration to v3.x. Configuration: Allow configuring server maximum request header size and ES index template order. Add thread state metric and class loaded info metric to JVMMetric. Performance: compile LAL DSL statically and run with type checked. Add pagination to event query protocol. Performance: optimize Envoy error logs persistence performance. Support envoy cluster manager metrics. Performance: remove the synchronous persistence mechanism from batch ElasticSearch DAO. Because the current enhanced persistent session mechanism, don\u0026rsquo;t require the data queryable immediately after the insert and update anymore. Performance: share flushInterval setting for both metrics and record data, due to synchronous persistence mechanism removed. Record flush interval used to be hardcoded as 10s. Remove syncBulkActions in ElasticSearch storage option. Increase the default bulkActions(env, SW_STORAGE_ES_BULK_ACTIONS) to 5000(from 1000). Increase the flush interval of ElasticSearch indices to 15s(from 10s) Provide distinct for elements of metadata lists. Due to the more aggressive asynchronous flush, metadata lists have more chances including duplicate elements. Don\u0026rsquo;t need this as indicate anymore. Reduce the flush period of hour and day level metrics, only run in 4 times of regular persistent period. This means default flush period of hour and day level metrics are 25s * 4. Performance: optimize IDs read of ElasticSearch storage options(6 and 7). Use the physical index rather than template alias name. Adjust index refresh period as INT(flushInterval * 2/3), it used to be as same as bulk flush period. At the edge case, in low traffic(traffic \u0026lt; bulkActions in the whole period), there is a possible case, 2 period bulks are included in one index refresh rebuild operation, which could cause version conflicts. And this case can\u0026rsquo;t be fixed through core/persistentPeriod as the bulk fresh is not controlled by the persistent timer anymore. The core/maxSyncOperationNum setting(added in 8.5.0) is removed due to metrics persistence is fully asynchronous. The core/syncThreads setting(added in 8.5.0) is removed due to metrics persistence is fully asynchronous. Optimization: Concurrency mode of execution stage for metrics is removed(added in 8.5.0). Only concurrency of prepare stage is meaningful and kept. Fix -meters metrics topic isn\u0026rsquo;t created with namespace issue Enhance persistent session timeout mechanism. Because the enhanced session could cache the metadata metrics forever, new timeout mechanism is designed for avoiding this specific case. Fix Kafka transport topics are created duplicated with and without namespace issue Fix the persistent session timeout mechanism bug. Fix possible version_conflict_engine_exception in bulk execution. Fix PrometheusMetricConverter may throw an IllegalArgumentException when convert metrics to SampleFamily Filtering NaN value samples when build SampleFamily Add Thread and ClassLoader Metrics for the self-observability and otel-oc-rules Simple optimization of trace sql query statement. Avoid \u0026ldquo;select *\u0026rdquo; query method Introduce dynamical logging to update log configuration at runtime Fix Kubernetes ConfigMap configuration center doesn\u0026rsquo;t send delete event Breaking Change: emove qps and add rpm in LAL  UI  Fix the date component for log conditions. Fix selector keys for duplicate options. Add Python celery plugin. Fix default config for metrics. Fix trace table for profile ui. Fix the error of server response time in the topology. Fix chart types for setting metrics configure. Fix logs pages number. Implement a timeline for Events in a new page. Fix style for event details.  Documentation  Add FAQ about Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Add Self Observability service discovery (k8s). Add sending Envoy Metrics to OAP in envoy 1.19 example and bump up to Envoy V3 api.  All issues and pull requests are here\n","excerpt":"8.7.0 Project  Extract dependency management to a bom. Add JDK 16 to test matrix. DataCarrier …","ref":"/docs/main/v9.1.0/en/changes/changes-8.7.0/","title":"8.7.0"},{"body":"8.7.0 Project  Extract dependency management to a bom. Add JDK 16 to test matrix. DataCarrier consumer add a new event notification, call nothingToConsume method if the queue has no element to consume. Build and push snapshot Docker images to GitHub Container Registry, this is only for people who want to help to test the master branch codes, please don\u0026rsquo;t use in production environments.  Java Agent  Supports modifying span attributes in async mode. Agent supports the collection of JVM arguments and jar dependency information. [Temporary] Support authentication for log report channel. This feature and grpc channel is going to be removed after Satellite 0.2.0 release. Remove deprecated gRPC method, io.grpc.ManagedChannelBuilder#nameResolverFactory. See gRPC-java 7133 for more details. Add Neo4j-4.x plugin. Correct profile.duration to profile.max_duration in the default agent.config file. Fix the response time of gRPC. Support parameter collection for SqlServer. Add ShardingSphere-5.0.0-beta plugin. Fix some method exception error. Fix async finish repeatedly in spring-webflux-5.x-webclient plugin. Add agent plugin to support Sentinel. Move ehcache-2.x plugin as an optional plugin. Support guava-cache plugin. Enhance the compatibility of mysql-8.x-plugin plugin. Support Kafka SASL login module. Fix gateway plugin async finish repeatedly when fallback url configured. Chore: polish methods naming for Spring-Kafka plugins. Remove plugins for ShardingSphere legacy version. Update agent plugin for ElasticJob GA version Remove the logic of generating instance name in KafkaServiceManagementServiceClient class. Improve okhttp plugin performance by optimizing Class.getDeclaredField(). Fix GRPCLogClientAppender no context warning. Fix spring-webflux-5.x-webclient-plugin NPE.  OAP-Backend  Disable Spring sleuth meter analyzer by default. Only count 5xx as error in Envoy ALS receiver. Upgrade apollo core caused by CVE-2020-15170. Upgrade kubernetes client caused by CVE-2020-28052. Upgrade Elasticsearch 7 client caused by CVE-2020-7014. Upgrade jackson related libs caused by CVE-2018-11307, CVE-2018-14718 ~ CVE-2018-14721, CVE-2018-19360 ~ CVE-2018-19362, CVE-2019-14379, CVE-2019-14540, CVE-2019-14892, CVE-2019-14893, CVE-2019-16335, CVE-2019-16942, CVE-2019-16943, CVE-2019-17267, CVE-2019-17531, CVE-2019-20330, CVE-2020-8840, CVE-2020-9546, CVE-2020-9547, CVE-2020-9548, CVE-2018-12022, CVE-2018-12023, CVE-2019-12086, CVE-2019-14439, CVE-2020-10672, CVE-2020-10673, CVE-2020-10968, CVE-2020-10969, CVE-2020-11111, CVE-2020-11112, CVE-2020-11113, CVE-2020-11619, CVE-2020-11620, CVE-2020-14060, CVE-2020-14061, CVE-2020-14062, CVE-2020-14195, CVE-2020-24616, CVE-2020-24750, CVE-2020-25649, CVE-2020-35490, CVE-2020-35491, CVE-2020-35728 and CVE-2020-36179 ~ CVE-2020-36190. Exclude log4j 1.x caused by CVE-2019-17571. Upgrade log4j 2.x caused by CVE-2020-9488. Upgrade nacos libs caused by CVE-2021-29441 and CVE-2021-29442. Upgrade netty caused by CVE-2019-20444, CVE-2019-20445, CVE-2019-16869, CVE-2020-11612, CVE-2021-21290, CVE-2021-21295 and CVE-2021-21409. Upgrade consul client caused by CVE-2018-1000844, CVE-2018-1000850. Upgrade zookeeper caused by CVE-2019-0201, zookeeper cluster coordinator plugin now requires zookeeper server 3.5+. Upgrade snake yaml caused by CVE-2017-18640. Upgrade embed tomcat caused by CVE-2020-13935. Upgrade commons-lang3 to avoid potential NPE in some JDK versions. OAL supports generating metrics from events. Support endpoint name grouping by OpenAPI definitions. Concurrent create PrepareRequest when persist Metrics Fix CounterWindow increase computing issue. Performance: optimize Envoy ALS analyzer performance in high traffic load scenario (reduce ~1cpu in ~10k RPS). Performance: trim useless metadata fields in Envoy ALS metadata to improve performance. Fix: slowDBAccessThreshold dynamic config error when not configured. Performance: cache regex pattern and result, optimize string concatenation in Envy ALS analyzer. Performance: cache metrics id and entity id in Metrics and ISource. Performance: enhance persistent session mechanism, about differentiating cache timeout for different dimensionality metrics. The timeout of the cache for minute and hour level metrics has been prolonged to ~5 min. Performance: Add L1 aggregation flush period, which reduce the CPU load and help young GC. Support connectTimeout and socketTimeout settings for ElasticSearch6 and ElasticSearch7 storages. Re-implement storage session mechanism, cached metrics are removed only according to their last access timestamp, rather than first time. This makes sure hot data never gets removed unexpectedly. Support session expired threshold configurable. Fix InfluxDB storage-plugin Metrics#multiGet issue. Replace zuul proxy with spring cloud gateway 2.x. in webapp module. Upgrade etcd cluster coordinator and dynamic configuration to v3.x. Configuration: Allow configuring server maximum request header size and ES index template order. Add thread state metric and class loaded info metric to JVMMetric. Performance: compile LAL DSL statically and run with type checked. Add pagination to event query protocol. Performance: optimize Envoy error logs persistence performance. Support envoy cluster manager metrics. Performance: remove the synchronous persistence mechanism from batch ElasticSearch DAO. Because the current enhanced persistent session mechanism, don\u0026rsquo;t require the data queryable immediately after the insert and update anymore. Performance: share flushInterval setting for both metrics and record data, due to synchronous persistence mechanism removed. Record flush interval used to be hardcoded as 10s. Remove syncBulkActions in ElasticSearch storage option. Increase the default bulkActions(env, SW_STORAGE_ES_BULK_ACTIONS) to 5000(from 1000). Increase the flush interval of ElasticSearch indices to 15s(from 10s) Provide distinct for elements of metadata lists. Due to the more aggressive asynchronous flush, metadata lists have more chances including duplicate elements. Don\u0026rsquo;t need this as indicate anymore. Reduce the flush period of hour and day level metrics, only run in 4 times of regular persistent period. This means default flush period of hour and day level metrics are 25s * 4. Performance: optimize IDs read of ElasticSearch storage options(6 and 7). Use the physical index rather than template alias name. Adjust index refresh period as INT(flushInterval * 2/3), it used to be as same as bulk flush period. At the edge case, in low traffic(traffic \u0026lt; bulkActions in the whole period), there is a possible case, 2 period bulks are included in one index refresh rebuild operation, which could cause version conflicts. And this case can\u0026rsquo;t be fixed through core/persistentPeriod as the bulk fresh is not controlled by the persistent timer anymore. The core/maxSyncOperationNum setting(added in 8.5.0) is removed due to metrics persistence is fully asynchronous. The core/syncThreads setting(added in 8.5.0) is removed due to metrics persistence is fully asynchronous. Optimization: Concurrency mode of execution stage for metrics is removed(added in 8.5.0). Only concurrency of prepare stage is meaningful and kept. Fix -meters metrics topic isn\u0026rsquo;t created with namespace issue Enhance persistent session timeout mechanism. Because the enhanced session could cache the metadata metrics forever, new timeout mechanism is designed for avoiding this specific case. Fix Kafka transport topics are created duplicated with and without namespace issue Fix the persistent session timeout mechanism bug. Fix possible version_conflict_engine_exception in bulk execution. Fix PrometheusMetricConverter may throw an IllegalArgumentException when convert metrics to SampleFamily Filtering NaN value samples when build SampleFamily Add Thread and ClassLoader Metrics for the self-observability and otel-oc-rules Simple optimization of trace sql query statement. Avoid \u0026ldquo;select *\u0026rdquo; query method Introduce dynamical logging to update log configuration at runtime Fix Kubernetes ConfigMap configuration center doesn\u0026rsquo;t send delete event Breaking Change: emove qps and add rpm in LAL  UI  Fix the date component for log conditions. Fix selector keys for duplicate options. Add Python celery plugin. Fix default config for metrics. Fix trace table for profile ui. Fix the error of server response time in the topology. Fix chart types for setting metrics configure. Fix logs pages number. Implement a timeline for Events in a new page. Fix style for event details.  Documentation  Add FAQ about Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Add Self Observability service discovery (k8s). Add sending Envoy Metrics to OAP in envoy 1.19 example and bump up to Envoy V3 api.  All issues and pull requests are here\n","excerpt":"8.7.0 Project  Extract dependency management to a bom. Add JDK 16 to test matrix. DataCarrier …","ref":"/docs/main/v9.2.0/en/changes/changes-8.7.0/","title":"8.7.0"},{"body":"8.7.0 Project  Extract dependency management to a bom. Add JDK 16 to test matrix. DataCarrier consumer add a new event notification, call nothingToConsume method if the queue has no element to consume. Build and push snapshot Docker images to GitHub Container Registry, this is only for people who want to help to test the master branch codes, please don\u0026rsquo;t use in production environments.  Java Agent  Supports modifying span attributes in async mode. Agent supports the collection of JVM arguments and jar dependency information. [Temporary] Support authentication for log report channel. This feature and grpc channel is going to be removed after Satellite 0.2.0 release. Remove deprecated gRPC method, io.grpc.ManagedChannelBuilder#nameResolverFactory. See gRPC-java 7133 for more details. Add Neo4j-4.x plugin. Correct profile.duration to profile.max_duration in the default agent.config file. Fix the response time of gRPC. Support parameter collection for SqlServer. Add ShardingSphere-5.0.0-beta plugin. Fix some method exception error. Fix async finish repeatedly in spring-webflux-5.x-webclient plugin. Add agent plugin to support Sentinel. Move ehcache-2.x plugin as an optional plugin. Support guava-cache plugin. Enhance the compatibility of mysql-8.x-plugin plugin. Support Kafka SASL login module. Fix gateway plugin async finish repeatedly when fallback url configured. Chore: polish methods naming for Spring-Kafka plugins. Remove plugins for ShardingSphere legacy version. Update agent plugin for ElasticJob GA version Remove the logic of generating instance name in KafkaServiceManagementServiceClient class. Improve okhttp plugin performance by optimizing Class.getDeclaredField(). Fix GRPCLogClientAppender no context warning. Fix spring-webflux-5.x-webclient-plugin NPE.  OAP-Backend  Disable Spring sleuth meter analyzer by default. Only count 5xx as error in Envoy ALS receiver. Upgrade apollo core caused by CVE-2020-15170. Upgrade kubernetes client caused by CVE-2020-28052. Upgrade Elasticsearch 7 client caused by CVE-2020-7014. Upgrade jackson related libs caused by CVE-2018-11307, CVE-2018-14718 ~ CVE-2018-14721, CVE-2018-19360 ~ CVE-2018-19362, CVE-2019-14379, CVE-2019-14540, CVE-2019-14892, CVE-2019-14893, CVE-2019-16335, CVE-2019-16942, CVE-2019-16943, CVE-2019-17267, CVE-2019-17531, CVE-2019-20330, CVE-2020-8840, CVE-2020-9546, CVE-2020-9547, CVE-2020-9548, CVE-2018-12022, CVE-2018-12023, CVE-2019-12086, CVE-2019-14439, CVE-2020-10672, CVE-2020-10673, CVE-2020-10968, CVE-2020-10969, CVE-2020-11111, CVE-2020-11112, CVE-2020-11113, CVE-2020-11619, CVE-2020-11620, CVE-2020-14060, CVE-2020-14061, CVE-2020-14062, CVE-2020-14195, CVE-2020-24616, CVE-2020-24750, CVE-2020-25649, CVE-2020-35490, CVE-2020-35491, CVE-2020-35728 and CVE-2020-36179 ~ CVE-2020-36190. Exclude log4j 1.x caused by CVE-2019-17571. Upgrade log4j 2.x caused by CVE-2020-9488. Upgrade nacos libs caused by CVE-2021-29441 and CVE-2021-29442. Upgrade netty caused by CVE-2019-20444, CVE-2019-20445, CVE-2019-16869, CVE-2020-11612, CVE-2021-21290, CVE-2021-21295 and CVE-2021-21409. Upgrade consul client caused by CVE-2018-1000844, CVE-2018-1000850. Upgrade zookeeper caused by CVE-2019-0201, zookeeper cluster coordinator plugin now requires zookeeper server 3.5+. Upgrade snake yaml caused by CVE-2017-18640. Upgrade embed tomcat caused by CVE-2020-13935. Upgrade commons-lang3 to avoid potential NPE in some JDK versions. OAL supports generating metrics from events. Support endpoint name grouping by OpenAPI definitions. Concurrent create PrepareRequest when persist Metrics Fix CounterWindow increase computing issue. Performance: optimize Envoy ALS analyzer performance in high traffic load scenario (reduce ~1cpu in ~10k RPS). Performance: trim useless metadata fields in Envoy ALS metadata to improve performance. Fix: slowDBAccessThreshold dynamic config error when not configured. Performance: cache regex pattern and result, optimize string concatenation in Envy ALS analyzer. Performance: cache metrics id and entity id in Metrics and ISource. Performance: enhance persistent session mechanism, about differentiating cache timeout for different dimensionality metrics. The timeout of the cache for minute and hour level metrics has been prolonged to ~5 min. Performance: Add L1 aggregation flush period, which reduce the CPU load and help young GC. Support connectTimeout and socketTimeout settings for ElasticSearch6 and ElasticSearch7 storages. Re-implement storage session mechanism, cached metrics are removed only according to their last access timestamp, rather than first time. This makes sure hot data never gets removed unexpectedly. Support session expired threshold configurable. Fix InfluxDB storage-plugin Metrics#multiGet issue. Replace zuul proxy with spring cloud gateway 2.x. in webapp module. Upgrade etcd cluster coordinator and dynamic configuration to v3.x. Configuration: Allow configuring server maximum request header size and ES index template order. Add thread state metric and class loaded info metric to JVMMetric. Performance: compile LAL DSL statically and run with type checked. Add pagination to event query protocol. Performance: optimize Envoy error logs persistence performance. Support envoy cluster manager metrics. Performance: remove the synchronous persistence mechanism from batch ElasticSearch DAO. Because the current enhanced persistent session mechanism, don\u0026rsquo;t require the data queryable immediately after the insert and update anymore. Performance: share flushInterval setting for both metrics and record data, due to synchronous persistence mechanism removed. Record flush interval used to be hardcoded as 10s. Remove syncBulkActions in ElasticSearch storage option. Increase the default bulkActions(env, SW_STORAGE_ES_BULK_ACTIONS) to 5000(from 1000). Increase the flush interval of ElasticSearch indices to 15s(from 10s) Provide distinct for elements of metadata lists. Due to the more aggressive asynchronous flush, metadata lists have more chances including duplicate elements. Don\u0026rsquo;t need this as indicate anymore. Reduce the flush period of hour and day level metrics, only run in 4 times of regular persistent period. This means default flush period of hour and day level metrics are 25s * 4. Performance: optimize IDs read of ElasticSearch storage options(6 and 7). Use the physical index rather than template alias name. Adjust index refresh period as INT(flushInterval * 2/3), it used to be as same as bulk flush period. At the edge case, in low traffic(traffic \u0026lt; bulkActions in the whole period), there is a possible case, 2 period bulks are included in one index refresh rebuild operation, which could cause version conflicts. And this case can\u0026rsquo;t be fixed through core/persistentPeriod as the bulk fresh is not controlled by the persistent timer anymore. The core/maxSyncOperationNum setting(added in 8.5.0) is removed due to metrics persistence is fully asynchronous. The core/syncThreads setting(added in 8.5.0) is removed due to metrics persistence is fully asynchronous. Optimization: Concurrency mode of execution stage for metrics is removed(added in 8.5.0). Only concurrency of prepare stage is meaningful and kept. Fix -meters metrics topic isn\u0026rsquo;t created with namespace issue Enhance persistent session timeout mechanism. Because the enhanced session could cache the metadata metrics forever, new timeout mechanism is designed for avoiding this specific case. Fix Kafka transport topics are created duplicated with and without namespace issue Fix the persistent session timeout mechanism bug. Fix possible version_conflict_engine_exception in bulk execution. Fix PrometheusMetricConverter may throw an IllegalArgumentException when convert metrics to SampleFamily Filtering NaN value samples when build SampleFamily Add Thread and ClassLoader Metrics for the self-observability and otel-oc-rules Simple optimization of trace sql query statement. Avoid \u0026ldquo;select *\u0026rdquo; query method Introduce dynamical logging to update log configuration at runtime Fix Kubernetes ConfigMap configuration center doesn\u0026rsquo;t send delete event Breaking Change: emove qps and add rpm in LAL  UI  Fix the date component for log conditions. Fix selector keys for duplicate options. Add Python celery plugin. Fix default config for metrics. Fix trace table for profile ui. Fix the error of server response time in the topology. Fix chart types for setting metrics configure. Fix logs pages number. Implement a timeline for Events in a new page. Fix style for event details.  Documentation  Add FAQ about Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Add Self Observability service discovery (k8s). Add sending Envoy Metrics to OAP in envoy 1.19 example and bump up to Envoy V3 api.  All issues and pull requests are here\n","excerpt":"8.7.0 Project  Extract dependency management to a bom. Add JDK 16 to test matrix. DataCarrier …","ref":"/docs/main/v9.3.0/en/changes/changes-8.7.0/","title":"8.7.0"},{"body":"8.7.0 Project  Extract dependency management to a bom. Add JDK 16 to test matrix. DataCarrier consumer add a new event notification, call nothingToConsume method if the queue has no element to consume. Build and push snapshot Docker images to GitHub Container Registry, this is only for people who want to help to test the master branch codes, please don\u0026rsquo;t use in production environments.  Java Agent  Supports modifying span attributes in async mode. Agent supports the collection of JVM arguments and jar dependency information. [Temporary] Support authentication for log report channel. This feature and grpc channel is going to be removed after Satellite 0.2.0 release. Remove deprecated gRPC method, io.grpc.ManagedChannelBuilder#nameResolverFactory. See gRPC-java 7133 for more details. Add Neo4j-4.x plugin. Correct profile.duration to profile.max_duration in the default agent.config file. Fix the response time of gRPC. Support parameter collection for SqlServer. Add ShardingSphere-5.0.0-beta plugin. Fix some method exception error. Fix async finish repeatedly in spring-webflux-5.x-webclient plugin. Add agent plugin to support Sentinel. Move ehcache-2.x plugin as an optional plugin. Support guava-cache plugin. Enhance the compatibility of mysql-8.x-plugin plugin. Support Kafka SASL login module. Fix gateway plugin async finish repeatedly when fallback url configured. Chore: polish methods naming for Spring-Kafka plugins. Remove plugins for ShardingSphere legacy version. Update agent plugin for ElasticJob GA version Remove the logic of generating instance name in KafkaServiceManagementServiceClient class. Improve okhttp plugin performance by optimizing Class.getDeclaredField(). Fix GRPCLogClientAppender no context warning. Fix spring-webflux-5.x-webclient-plugin NPE.  OAP-Backend  Disable Spring sleuth meter analyzer by default. Only count 5xx as error in Envoy ALS receiver. Upgrade apollo core caused by CVE-2020-15170. Upgrade kubernetes client caused by CVE-2020-28052. Upgrade Elasticsearch 7 client caused by CVE-2020-7014. Upgrade jackson related libs caused by CVE-2018-11307, CVE-2018-14718 ~ CVE-2018-14721, CVE-2018-19360 ~ CVE-2018-19362, CVE-2019-14379, CVE-2019-14540, CVE-2019-14892, CVE-2019-14893, CVE-2019-16335, CVE-2019-16942, CVE-2019-16943, CVE-2019-17267, CVE-2019-17531, CVE-2019-20330, CVE-2020-8840, CVE-2020-9546, CVE-2020-9547, CVE-2020-9548, CVE-2018-12022, CVE-2018-12023, CVE-2019-12086, CVE-2019-14439, CVE-2020-10672, CVE-2020-10673, CVE-2020-10968, CVE-2020-10969, CVE-2020-11111, CVE-2020-11112, CVE-2020-11113, CVE-2020-11619, CVE-2020-11620, CVE-2020-14060, CVE-2020-14061, CVE-2020-14062, CVE-2020-14195, CVE-2020-24616, CVE-2020-24750, CVE-2020-25649, CVE-2020-35490, CVE-2020-35491, CVE-2020-35728 and CVE-2020-36179 ~ CVE-2020-36190. Exclude log4j 1.x caused by CVE-2019-17571. Upgrade log4j 2.x caused by CVE-2020-9488. Upgrade nacos libs caused by CVE-2021-29441 and CVE-2021-29442. Upgrade netty caused by CVE-2019-20444, CVE-2019-20445, CVE-2019-16869, CVE-2020-11612, CVE-2021-21290, CVE-2021-21295 and CVE-2021-21409. Upgrade consul client caused by CVE-2018-1000844, CVE-2018-1000850. Upgrade zookeeper caused by CVE-2019-0201, zookeeper cluster coordinator plugin now requires zookeeper server 3.5+. Upgrade snake yaml caused by CVE-2017-18640. Upgrade embed tomcat caused by CVE-2020-13935. Upgrade commons-lang3 to avoid potential NPE in some JDK versions. OAL supports generating metrics from events. Support endpoint name grouping by OpenAPI definitions. Concurrent create PrepareRequest when persist Metrics Fix CounterWindow increase computing issue. Performance: optimize Envoy ALS analyzer performance in high traffic load scenario (reduce ~1cpu in ~10k RPS). Performance: trim useless metadata fields in Envoy ALS metadata to improve performance. Fix: slowDBAccessThreshold dynamic config error when not configured. Performance: cache regex pattern and result, optimize string concatenation in Envy ALS analyzer. Performance: cache metrics id and entity id in Metrics and ISource. Performance: enhance persistent session mechanism, about differentiating cache timeout for different dimensionality metrics. The timeout of the cache for minute and hour level metrics has been prolonged to ~5 min. Performance: Add L1 aggregation flush period, which reduce the CPU load and help young GC. Support connectTimeout and socketTimeout settings for ElasticSearch6 and ElasticSearch7 storages. Re-implement storage session mechanism, cached metrics are removed only according to their last access timestamp, rather than first time. This makes sure hot data never gets removed unexpectedly. Support session expired threshold configurable. Fix InfluxDB storage-plugin Metrics#multiGet issue. Replace zuul proxy with spring cloud gateway 2.x. in webapp module. Upgrade etcd cluster coordinator and dynamic configuration to v3.x. Configuration: Allow configuring server maximum request header size and ES index template order. Add thread state metric and class loaded info metric to JVMMetric. Performance: compile LAL DSL statically and run with type checked. Add pagination to event query protocol. Performance: optimize Envoy error logs persistence performance. Support envoy cluster manager metrics. Performance: remove the synchronous persistence mechanism from batch ElasticSearch DAO. Because the current enhanced persistent session mechanism, don\u0026rsquo;t require the data queryable immediately after the insert and update anymore. Performance: share flushInterval setting for both metrics and record data, due to synchronous persistence mechanism removed. Record flush interval used to be hardcoded as 10s. Remove syncBulkActions in ElasticSearch storage option. Increase the default bulkActions(env, SW_STORAGE_ES_BULK_ACTIONS) to 5000(from 1000). Increase the flush interval of ElasticSearch indices to 15s(from 10s) Provide distinct for elements of metadata lists. Due to the more aggressive asynchronous flush, metadata lists have more chances including duplicate elements. Don\u0026rsquo;t need this as indicate anymore. Reduce the flush period of hour and day level metrics, only run in 4 times of regular persistent period. This means default flush period of hour and day level metrics are 25s * 4. Performance: optimize IDs read of ElasticSearch storage options(6 and 7). Use the physical index rather than template alias name. Adjust index refresh period as INT(flushInterval * 2/3), it used to be as same as bulk flush period. At the edge case, in low traffic(traffic \u0026lt; bulkActions in the whole period), there is a possible case, 2 period bulks are included in one index refresh rebuild operation, which could cause version conflicts. And this case can\u0026rsquo;t be fixed through core/persistentPeriod as the bulk fresh is not controlled by the persistent timer anymore. The core/maxSyncOperationNum setting(added in 8.5.0) is removed due to metrics persistence is fully asynchronous. The core/syncThreads setting(added in 8.5.0) is removed due to metrics persistence is fully asynchronous. Optimization: Concurrency mode of execution stage for metrics is removed(added in 8.5.0). Only concurrency of prepare stage is meaningful and kept. Fix -meters metrics topic isn\u0026rsquo;t created with namespace issue Enhance persistent session timeout mechanism. Because the enhanced session could cache the metadata metrics forever, new timeout mechanism is designed for avoiding this specific case. Fix Kafka transport topics are created duplicated with and without namespace issue Fix the persistent session timeout mechanism bug. Fix possible version_conflict_engine_exception in bulk execution. Fix PrometheusMetricConverter may throw an IllegalArgumentException when convert metrics to SampleFamily Filtering NaN value samples when build SampleFamily Add Thread and ClassLoader Metrics for the self-observability and otel-oc-rules Simple optimization of trace sql query statement. Avoid \u0026ldquo;select *\u0026rdquo; query method Introduce dynamical logging to update log configuration at runtime Fix Kubernetes ConfigMap configuration center doesn\u0026rsquo;t send delete event Breaking Change: emove qps and add rpm in LAL  UI  Fix the date component for log conditions. Fix selector keys for duplicate options. Add Python celery plugin. Fix default config for metrics. Fix trace table for profile ui. Fix the error of server response time in the topology. Fix chart types for setting metrics configure. Fix logs pages number. Implement a timeline for Events in a new page. Fix style for event details.  Documentation  Add FAQ about Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Add Self Observability service discovery (k8s). Add sending Envoy Metrics to OAP in envoy 1.19 example and bump up to Envoy V3 api.  All issues and pull requests are here\n","excerpt":"8.7.0 Project  Extract dependency management to a bom. Add JDK 16 to test matrix. DataCarrier …","ref":"/docs/main/v9.4.0/en/changes/changes-8.7.0/","title":"8.7.0"},{"body":"8.7.0 Project  Extract dependency management to a bom. Add JDK 16 to test matrix. DataCarrier consumer add a new event notification, call nothingToConsume method if the queue has no element to consume. Build and push snapshot Docker images to GitHub Container Registry, this is only for people who want to help to test the master branch codes, please don\u0026rsquo;t use in production environments.  Java Agent  Supports modifying span attributes in async mode. Agent supports the collection of JVM arguments and jar dependency information. [Temporary] Support authentication for log report channel. This feature and grpc channel is going to be removed after Satellite 0.2.0 release. Remove deprecated gRPC method, io.grpc.ManagedChannelBuilder#nameResolverFactory. See gRPC-java 7133 for more details. Add Neo4j-4.x plugin. Correct profile.duration to profile.max_duration in the default agent.config file. Fix the response time of gRPC. Support parameter collection for SqlServer. Add ShardingSphere-5.0.0-beta plugin. Fix some method exception error. Fix async finish repeatedly in spring-webflux-5.x-webclient plugin. Add agent plugin to support Sentinel. Move ehcache-2.x plugin as an optional plugin. Support guava-cache plugin. Enhance the compatibility of mysql-8.x-plugin plugin. Support Kafka SASL login module. Fix gateway plugin async finish repeatedly when fallback url configured. Chore: polish methods naming for Spring-Kafka plugins. Remove plugins for ShardingSphere legacy version. Update agent plugin for ElasticJob GA version Remove the logic of generating instance name in KafkaServiceManagementServiceClient class. Improve okhttp plugin performance by optimizing Class.getDeclaredField(). Fix GRPCLogClientAppender no context warning. Fix spring-webflux-5.x-webclient-plugin NPE.  OAP-Backend  Disable Spring sleuth meter analyzer by default. Only count 5xx as error in Envoy ALS receiver. Upgrade apollo core caused by CVE-2020-15170. Upgrade kubernetes client caused by CVE-2020-28052. Upgrade Elasticsearch 7 client caused by CVE-2020-7014. Upgrade jackson related libs caused by CVE-2018-11307, CVE-2018-14718 ~ CVE-2018-14721, CVE-2018-19360 ~ CVE-2018-19362, CVE-2019-14379, CVE-2019-14540, CVE-2019-14892, CVE-2019-14893, CVE-2019-16335, CVE-2019-16942, CVE-2019-16943, CVE-2019-17267, CVE-2019-17531, CVE-2019-20330, CVE-2020-8840, CVE-2020-9546, CVE-2020-9547, CVE-2020-9548, CVE-2018-12022, CVE-2018-12023, CVE-2019-12086, CVE-2019-14439, CVE-2020-10672, CVE-2020-10673, CVE-2020-10968, CVE-2020-10969, CVE-2020-11111, CVE-2020-11112, CVE-2020-11113, CVE-2020-11619, CVE-2020-11620, CVE-2020-14060, CVE-2020-14061, CVE-2020-14062, CVE-2020-14195, CVE-2020-24616, CVE-2020-24750, CVE-2020-25649, CVE-2020-35490, CVE-2020-35491, CVE-2020-35728 and CVE-2020-36179 ~ CVE-2020-36190. Exclude log4j 1.x caused by CVE-2019-17571. Upgrade log4j 2.x caused by CVE-2020-9488. Upgrade nacos libs caused by CVE-2021-29441 and CVE-2021-29442. Upgrade netty caused by CVE-2019-20444, CVE-2019-20445, CVE-2019-16869, CVE-2020-11612, CVE-2021-21290, CVE-2021-21295 and CVE-2021-21409. Upgrade consul client caused by CVE-2018-1000844, CVE-2018-1000850. Upgrade zookeeper caused by CVE-2019-0201, zookeeper cluster coordinator plugin now requires zookeeper server 3.5+. Upgrade snake yaml caused by CVE-2017-18640. Upgrade embed tomcat caused by CVE-2020-13935. Upgrade commons-lang3 to avoid potential NPE in some JDK versions. OAL supports generating metrics from events. Support endpoint name grouping by OpenAPI definitions. Concurrent create PrepareRequest when persist Metrics Fix CounterWindow increase computing issue. Performance: optimize Envoy ALS analyzer performance in high traffic load scenario (reduce ~1cpu in ~10k RPS). Performance: trim useless metadata fields in Envoy ALS metadata to improve performance. Fix: slowDBAccessThreshold dynamic config error when not configured. Performance: cache regex pattern and result, optimize string concatenation in Envy ALS analyzer. Performance: cache metrics id and entity id in Metrics and ISource. Performance: enhance persistent session mechanism, about differentiating cache timeout for different dimensionality metrics. The timeout of the cache for minute and hour level metrics has been prolonged to ~5 min. Performance: Add L1 aggregation flush period, which reduce the CPU load and help young GC. Support connectTimeout and socketTimeout settings for ElasticSearch6 and ElasticSearch7 storages. Re-implement storage session mechanism, cached metrics are removed only according to their last access timestamp, rather than first time. This makes sure hot data never gets removed unexpectedly. Support session expired threshold configurable. Fix InfluxDB storage-plugin Metrics#multiGet issue. Replace zuul proxy with spring cloud gateway 2.x. in webapp module. Upgrade etcd cluster coordinator and dynamic configuration to v3.x. Configuration: Allow configuring server maximum request header size and ES index template order. Add thread state metric and class loaded info metric to JVMMetric. Performance: compile LAL DSL statically and run with type checked. Add pagination to event query protocol. Performance: optimize Envoy error logs persistence performance. Support envoy cluster manager metrics. Performance: remove the synchronous persistence mechanism from batch ElasticSearch DAO. Because the current enhanced persistent session mechanism, don\u0026rsquo;t require the data queryable immediately after the insert and update anymore. Performance: share flushInterval setting for both metrics and record data, due to synchronous persistence mechanism removed. Record flush interval used to be hardcoded as 10s. Remove syncBulkActions in ElasticSearch storage option. Increase the default bulkActions(env, SW_STORAGE_ES_BULK_ACTIONS) to 5000(from 1000). Increase the flush interval of ElasticSearch indices to 15s(from 10s) Provide distinct for elements of metadata lists. Due to the more aggressive asynchronous flush, metadata lists have more chances including duplicate elements. Don\u0026rsquo;t need this as indicate anymore. Reduce the flush period of hour and day level metrics, only run in 4 times of regular persistent period. This means default flush period of hour and day level metrics are 25s * 4. Performance: optimize IDs read of ElasticSearch storage options(6 and 7). Use the physical index rather than template alias name. Adjust index refresh period as INT(flushInterval * 2/3), it used to be as same as bulk flush period. At the edge case, in low traffic(traffic \u0026lt; bulkActions in the whole period), there is a possible case, 2 period bulks are included in one index refresh rebuild operation, which could cause version conflicts. And this case can\u0026rsquo;t be fixed through core/persistentPeriod as the bulk fresh is not controlled by the persistent timer anymore. The core/maxSyncOperationNum setting(added in 8.5.0) is removed due to metrics persistence is fully asynchronous. The core/syncThreads setting(added in 8.5.0) is removed due to metrics persistence is fully asynchronous. Optimization: Concurrency mode of execution stage for metrics is removed(added in 8.5.0). Only concurrency of prepare stage is meaningful and kept. Fix -meters metrics topic isn\u0026rsquo;t created with namespace issue Enhance persistent session timeout mechanism. Because the enhanced session could cache the metadata metrics forever, new timeout mechanism is designed for avoiding this specific case. Fix Kafka transport topics are created duplicated with and without namespace issue Fix the persistent session timeout mechanism bug. Fix possible version_conflict_engine_exception in bulk execution. Fix PrometheusMetricConverter may throw an IllegalArgumentException when convert metrics to SampleFamily Filtering NaN value samples when build SampleFamily Add Thread and ClassLoader Metrics for the self-observability and otel-oc-rules Simple optimization of trace sql query statement. Avoid \u0026ldquo;select *\u0026rdquo; query method Introduce dynamical logging to update log configuration at runtime Fix Kubernetes ConfigMap configuration center doesn\u0026rsquo;t send delete event Breaking Change: emove qps and add rpm in LAL  UI  Fix the date component for log conditions. Fix selector keys for duplicate options. Add Python celery plugin. Fix default config for metrics. Fix trace table for profile ui. Fix the error of server response time in the topology. Fix chart types for setting metrics configure. Fix logs pages number. Implement a timeline for Events in a new page. Fix style for event details.  Documentation  Add FAQ about Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Add Self Observability service discovery (k8s). Add sending Envoy Metrics to OAP in envoy 1.19 example and bump up to Envoy V3 api.  All issues and pull requests are here\n","excerpt":"8.7.0 Project  Extract dependency management to a bom. Add JDK 16 to test matrix. DataCarrier …","ref":"/docs/main/v9.5.0/en/changes/changes-8.7.0/","title":"8.7.0"},{"body":"8.7.0 Project  Extract dependency management to a bom. Add JDK 16 to test matrix. DataCarrier consumer add a new event notification, call nothingToConsume method if the queue has no element to consume. Build and push snapshot Docker images to GitHub Container Registry, this is only for people who want to help to test the master branch codes, please don\u0026rsquo;t use in production environments.  Java Agent  Supports modifying span attributes in async mode. Agent supports the collection of JVM arguments and jar dependency information. [Temporary] Support authentication for log report channel. This feature and grpc channel is going to be removed after Satellite 0.2.0 release. Remove deprecated gRPC method, io.grpc.ManagedChannelBuilder#nameResolverFactory. See gRPC-java 7133 for more details. Add Neo4j-4.x plugin. Correct profile.duration to profile.max_duration in the default agent.config file. Fix the response time of gRPC. Support parameter collection for SqlServer. Add ShardingSphere-5.0.0-beta plugin. Fix some method exception error. Fix async finish repeatedly in spring-webflux-5.x-webclient plugin. Add agent plugin to support Sentinel. Move ehcache-2.x plugin as an optional plugin. Support guava-cache plugin. Enhance the compatibility of mysql-8.x-plugin plugin. Support Kafka SASL login module. Fix gateway plugin async finish repeatedly when fallback url configured. Chore: polish methods naming for Spring-Kafka plugins. Remove plugins for ShardingSphere legacy version. Update agent plugin for ElasticJob GA version Remove the logic of generating instance name in KafkaServiceManagementServiceClient class. Improve okhttp plugin performance by optimizing Class.getDeclaredField(). Fix GRPCLogClientAppender no context warning. Fix spring-webflux-5.x-webclient-plugin NPE.  OAP-Backend  Disable Spring sleuth meter analyzer by default. Only count 5xx as error in Envoy ALS receiver. Upgrade apollo core caused by CVE-2020-15170. Upgrade kubernetes client caused by CVE-2020-28052. Upgrade Elasticsearch 7 client caused by CVE-2020-7014. Upgrade jackson related libs caused by CVE-2018-11307, CVE-2018-14718 ~ CVE-2018-14721, CVE-2018-19360 ~ CVE-2018-19362, CVE-2019-14379, CVE-2019-14540, CVE-2019-14892, CVE-2019-14893, CVE-2019-16335, CVE-2019-16942, CVE-2019-16943, CVE-2019-17267, CVE-2019-17531, CVE-2019-20330, CVE-2020-8840, CVE-2020-9546, CVE-2020-9547, CVE-2020-9548, CVE-2018-12022, CVE-2018-12023, CVE-2019-12086, CVE-2019-14439, CVE-2020-10672, CVE-2020-10673, CVE-2020-10968, CVE-2020-10969, CVE-2020-11111, CVE-2020-11112, CVE-2020-11113, CVE-2020-11619, CVE-2020-11620, CVE-2020-14060, CVE-2020-14061, CVE-2020-14062, CVE-2020-14195, CVE-2020-24616, CVE-2020-24750, CVE-2020-25649, CVE-2020-35490, CVE-2020-35491, CVE-2020-35728 and CVE-2020-36179 ~ CVE-2020-36190. Exclude log4j 1.x caused by CVE-2019-17571. Upgrade log4j 2.x caused by CVE-2020-9488. Upgrade nacos libs caused by CVE-2021-29441 and CVE-2021-29442. Upgrade netty caused by CVE-2019-20444, CVE-2019-20445, CVE-2019-16869, CVE-2020-11612, CVE-2021-21290, CVE-2021-21295 and CVE-2021-21409. Upgrade consul client caused by CVE-2018-1000844, CVE-2018-1000850. Upgrade zookeeper caused by CVE-2019-0201, zookeeper cluster coordinator plugin now requires zookeeper server 3.5+. Upgrade snake yaml caused by CVE-2017-18640. Upgrade embed tomcat caused by CVE-2020-13935. Upgrade commons-lang3 to avoid potential NPE in some JDK versions. OAL supports generating metrics from events. Support endpoint name grouping by OpenAPI definitions. Concurrent create PrepareRequest when persist Metrics Fix CounterWindow increase computing issue. Performance: optimize Envoy ALS analyzer performance in high traffic load scenario (reduce ~1cpu in ~10k RPS). Performance: trim useless metadata fields in Envoy ALS metadata to improve performance. Fix: slowDBAccessThreshold dynamic config error when not configured. Performance: cache regex pattern and result, optimize string concatenation in Envy ALS analyzer. Performance: cache metrics id and entity id in Metrics and ISource. Performance: enhance persistent session mechanism, about differentiating cache timeout for different dimensionality metrics. The timeout of the cache for minute and hour level metrics has been prolonged to ~5 min. Performance: Add L1 aggregation flush period, which reduce the CPU load and help young GC. Support connectTimeout and socketTimeout settings for ElasticSearch6 and ElasticSearch7 storages. Re-implement storage session mechanism, cached metrics are removed only according to their last access timestamp, rather than first time. This makes sure hot data never gets removed unexpectedly. Support session expired threshold configurable. Fix InfluxDB storage-plugin Metrics#multiGet issue. Replace zuul proxy with spring cloud gateway 2.x. in webapp module. Upgrade etcd cluster coordinator and dynamic configuration to v3.x. Configuration: Allow configuring server maximum request header size and ES index template order. Add thread state metric and class loaded info metric to JVMMetric. Performance: compile LAL DSL statically and run with type checked. Add pagination to event query protocol. Performance: optimize Envoy error logs persistence performance. Support envoy cluster manager metrics. Performance: remove the synchronous persistence mechanism from batch ElasticSearch DAO. Because the current enhanced persistent session mechanism, don\u0026rsquo;t require the data queryable immediately after the insert and update anymore. Performance: share flushInterval setting for both metrics and record data, due to synchronous persistence mechanism removed. Record flush interval used to be hardcoded as 10s. Remove syncBulkActions in ElasticSearch storage option. Increase the default bulkActions(env, SW_STORAGE_ES_BULK_ACTIONS) to 5000(from 1000). Increase the flush interval of ElasticSearch indices to 15s(from 10s) Provide distinct for elements of metadata lists. Due to the more aggressive asynchronous flush, metadata lists have more chances including duplicate elements. Don\u0026rsquo;t need this as indicate anymore. Reduce the flush period of hour and day level metrics, only run in 4 times of regular persistent period. This means default flush period of hour and day level metrics are 25s * 4. Performance: optimize IDs read of ElasticSearch storage options(6 and 7). Use the physical index rather than template alias name. Adjust index refresh period as INT(flushInterval * 2/3), it used to be as same as bulk flush period. At the edge case, in low traffic(traffic \u0026lt; bulkActions in the whole period), there is a possible case, 2 period bulks are included in one index refresh rebuild operation, which could cause version conflicts. And this case can\u0026rsquo;t be fixed through core/persistentPeriod as the bulk fresh is not controlled by the persistent timer anymore. The core/maxSyncOperationNum setting(added in 8.5.0) is removed due to metrics persistence is fully asynchronous. The core/syncThreads setting(added in 8.5.0) is removed due to metrics persistence is fully asynchronous. Optimization: Concurrency mode of execution stage for metrics is removed(added in 8.5.0). Only concurrency of prepare stage is meaningful and kept. Fix -meters metrics topic isn\u0026rsquo;t created with namespace issue Enhance persistent session timeout mechanism. Because the enhanced session could cache the metadata metrics forever, new timeout mechanism is designed for avoiding this specific case. Fix Kafka transport topics are created duplicated with and without namespace issue Fix the persistent session timeout mechanism bug. Fix possible version_conflict_engine_exception in bulk execution. Fix PrometheusMetricConverter may throw an IllegalArgumentException when convert metrics to SampleFamily Filtering NaN value samples when build SampleFamily Add Thread and ClassLoader Metrics for the self-observability and otel-oc-rules Simple optimization of trace sql query statement. Avoid \u0026ldquo;select *\u0026rdquo; query method Introduce dynamical logging to update log configuration at runtime Fix Kubernetes ConfigMap configuration center doesn\u0026rsquo;t send delete event Breaking Change: emove qps and add rpm in LAL  UI  Fix the date component for log conditions. Fix selector keys for duplicate options. Add Python celery plugin. Fix default config for metrics. Fix trace table for profile ui. Fix the error of server response time in the topology. Fix chart types for setting metrics configure. Fix logs pages number. Implement a timeline for Events in a new page. Fix style for event details.  Documentation  Add FAQ about Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Add Self Observability service discovery (k8s). Add sending Envoy Metrics to OAP in envoy 1.19 example and bump up to Envoy V3 api.  All issues and pull requests are here\n","excerpt":"8.7.0 Project  Extract dependency management to a bom. Add JDK 16 to test matrix. DataCarrier …","ref":"/docs/main/v9.6.0/en/changes/changes-8.7.0/","title":"8.7.0"},{"body":"8.7.0 Project  Extract dependency management to a bom. Add JDK 16 to test matrix. DataCarrier consumer add a new event notification, call nothingToConsume method if the queue has no element to consume. Build and push snapshot Docker images to GitHub Container Registry, this is only for people who want to help to test the master branch codes, please don\u0026rsquo;t use in production environments.  Java Agent  Supports modifying span attributes in async mode. Agent supports the collection of JVM arguments and jar dependency information. [Temporary] Support authentication for log report channel. This feature and grpc channel is going to be removed after Satellite 0.2.0 release. Remove deprecated gRPC method, io.grpc.ManagedChannelBuilder#nameResolverFactory. See gRPC-java 7133 for more details. Add Neo4j-4.x plugin. Correct profile.duration to profile.max_duration in the default agent.config file. Fix the response time of gRPC. Support parameter collection for SqlServer. Add ShardingSphere-5.0.0-beta plugin. Fix some method exception error. Fix async finish repeatedly in spring-webflux-5.x-webclient plugin. Add agent plugin to support Sentinel. Move ehcache-2.x plugin as an optional plugin. Support guava-cache plugin. Enhance the compatibility of mysql-8.x-plugin plugin. Support Kafka SASL login module. Fix gateway plugin async finish repeatedly when fallback url configured. Chore: polish methods naming for Spring-Kafka plugins. Remove plugins for ShardingSphere legacy version. Update agent plugin for ElasticJob GA version Remove the logic of generating instance name in KafkaServiceManagementServiceClient class. Improve okhttp plugin performance by optimizing Class.getDeclaredField(). Fix GRPCLogClientAppender no context warning. Fix spring-webflux-5.x-webclient-plugin NPE.  OAP-Backend  Disable Spring sleuth meter analyzer by default. Only count 5xx as error in Envoy ALS receiver. Upgrade apollo core caused by CVE-2020-15170. Upgrade kubernetes client caused by CVE-2020-28052. Upgrade Elasticsearch 7 client caused by CVE-2020-7014. Upgrade jackson related libs caused by CVE-2018-11307, CVE-2018-14718 ~ CVE-2018-14721, CVE-2018-19360 ~ CVE-2018-19362, CVE-2019-14379, CVE-2019-14540, CVE-2019-14892, CVE-2019-14893, CVE-2019-16335, CVE-2019-16942, CVE-2019-16943, CVE-2019-17267, CVE-2019-17531, CVE-2019-20330, CVE-2020-8840, CVE-2020-9546, CVE-2020-9547, CVE-2020-9548, CVE-2018-12022, CVE-2018-12023, CVE-2019-12086, CVE-2019-14439, CVE-2020-10672, CVE-2020-10673, CVE-2020-10968, CVE-2020-10969, CVE-2020-11111, CVE-2020-11112, CVE-2020-11113, CVE-2020-11619, CVE-2020-11620, CVE-2020-14060, CVE-2020-14061, CVE-2020-14062, CVE-2020-14195, CVE-2020-24616, CVE-2020-24750, CVE-2020-25649, CVE-2020-35490, CVE-2020-35491, CVE-2020-35728 and CVE-2020-36179 ~ CVE-2020-36190. Exclude log4j 1.x caused by CVE-2019-17571. Upgrade log4j 2.x caused by CVE-2020-9488. Upgrade nacos libs caused by CVE-2021-29441 and CVE-2021-29442. Upgrade netty caused by CVE-2019-20444, CVE-2019-20445, CVE-2019-16869, CVE-2020-11612, CVE-2021-21290, CVE-2021-21295 and CVE-2021-21409. Upgrade consul client caused by CVE-2018-1000844, CVE-2018-1000850. Upgrade zookeeper caused by CVE-2019-0201, zookeeper cluster coordinator plugin now requires zookeeper server 3.5+. Upgrade snake yaml caused by CVE-2017-18640. Upgrade embed tomcat caused by CVE-2020-13935. Upgrade commons-lang3 to avoid potential NPE in some JDK versions. OAL supports generating metrics from events. Support endpoint name grouping by OpenAPI definitions. Concurrent create PrepareRequest when persist Metrics Fix CounterWindow increase computing issue. Performance: optimize Envoy ALS analyzer performance in high traffic load scenario (reduce ~1cpu in ~10k RPS). Performance: trim useless metadata fields in Envoy ALS metadata to improve performance. Fix: slowDBAccessThreshold dynamic config error when not configured. Performance: cache regex pattern and result, optimize string concatenation in Envy ALS analyzer. Performance: cache metrics id and entity id in Metrics and ISource. Performance: enhance persistent session mechanism, about differentiating cache timeout for different dimensionality metrics. The timeout of the cache for minute and hour level metrics has been prolonged to ~5 min. Performance: Add L1 aggregation flush period, which reduce the CPU load and help young GC. Support connectTimeout and socketTimeout settings for ElasticSearch6 and ElasticSearch7 storages. Re-implement storage session mechanism, cached metrics are removed only according to their last access timestamp, rather than first time. This makes sure hot data never gets removed unexpectedly. Support session expired threshold configurable. Fix InfluxDB storage-plugin Metrics#multiGet issue. Replace zuul proxy with spring cloud gateway 2.x. in webapp module. Upgrade etcd cluster coordinator and dynamic configuration to v3.x. Configuration: Allow configuring server maximum request header size and ES index template order. Add thread state metric and class loaded info metric to JVMMetric. Performance: compile LAL DSL statically and run with type checked. Add pagination to event query protocol. Performance: optimize Envoy error logs persistence performance. Support envoy cluster manager metrics. Performance: remove the synchronous persistence mechanism from batch ElasticSearch DAO. Because the current enhanced persistent session mechanism, don\u0026rsquo;t require the data queryable immediately after the insert and update anymore. Performance: share flushInterval setting for both metrics and record data, due to synchronous persistence mechanism removed. Record flush interval used to be hardcoded as 10s. Remove syncBulkActions in ElasticSearch storage option. Increase the default bulkActions(env, SW_STORAGE_ES_BULK_ACTIONS) to 5000(from 1000). Increase the flush interval of ElasticSearch indices to 15s(from 10s) Provide distinct for elements of metadata lists. Due to the more aggressive asynchronous flush, metadata lists have more chances including duplicate elements. Don\u0026rsquo;t need this as indicate anymore. Reduce the flush period of hour and day level metrics, only run in 4 times of regular persistent period. This means default flush period of hour and day level metrics are 25s * 4. Performance: optimize IDs read of ElasticSearch storage options(6 and 7). Use the physical index rather than template alias name. Adjust index refresh period as INT(flushInterval * 2/3), it used to be as same as bulk flush period. At the edge case, in low traffic(traffic \u0026lt; bulkActions in the whole period), there is a possible case, 2 period bulks are included in one index refresh rebuild operation, which could cause version conflicts. And this case can\u0026rsquo;t be fixed through core/persistentPeriod as the bulk fresh is not controlled by the persistent timer anymore. The core/maxSyncOperationNum setting(added in 8.5.0) is removed due to metrics persistence is fully asynchronous. The core/syncThreads setting(added in 8.5.0) is removed due to metrics persistence is fully asynchronous. Optimization: Concurrency mode of execution stage for metrics is removed(added in 8.5.0). Only concurrency of prepare stage is meaningful and kept. Fix -meters metrics topic isn\u0026rsquo;t created with namespace issue Enhance persistent session timeout mechanism. Because the enhanced session could cache the metadata metrics forever, new timeout mechanism is designed for avoiding this specific case. Fix Kafka transport topics are created duplicated with and without namespace issue Fix the persistent session timeout mechanism bug. Fix possible version_conflict_engine_exception in bulk execution. Fix PrometheusMetricConverter may throw an IllegalArgumentException when convert metrics to SampleFamily Filtering NaN value samples when build SampleFamily Add Thread and ClassLoader Metrics for the self-observability and otel-oc-rules Simple optimization of trace sql query statement. Avoid \u0026ldquo;select *\u0026rdquo; query method Introduce dynamical logging to update log configuration at runtime Fix Kubernetes ConfigMap configuration center doesn\u0026rsquo;t send delete event Breaking Change: emove qps and add rpm in LAL  UI  Fix the date component for log conditions. Fix selector keys for duplicate options. Add Python celery plugin. Fix default config for metrics. Fix trace table for profile ui. Fix the error of server response time in the topology. Fix chart types for setting metrics configure. Fix logs pages number. Implement a timeline for Events in a new page. Fix style for event details.  Documentation  Add FAQ about Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Add Self Observability service discovery (k8s). Add sending Envoy Metrics to OAP in envoy 1.19 example and bump up to Envoy V3 api.  All issues and pull requests are here\n","excerpt":"8.7.0 Project  Extract dependency management to a bom. Add JDK 16 to test matrix. DataCarrier …","ref":"/docs/main/v9.7.0/en/changes/changes-8.7.0/","title":"8.7.0"},{"body":"8.8.0 Project  Split javaagent into skywalking-java repository. https://github.com/apache/skywalking-java Merge Dockerfiles from apache/skywalking-docker into this codebase.  OAP Server  Fix CVE-2021-35515, CVE-2021-35516, CVE-2021-35517, CVE-2021-36090. Upgrade org.apache.commons:commons-compress to 1.21. kubernetes java client upgrade from 12.0.1 to 13.0.0 Add event http receiver Support Metric level function serviceRelation in MAL. Support envoy metrics binding into the topology. Fix openapi-definitions folder not being read correctly. Trace segment wouldn\u0026rsquo;t be recognized as a TopN sample service. Add through #4694 experimentally, but it caused performance impact. Remove version and endTime in the segment entity. Reduce indexing payload. Fix mapper_parsing_exception in ElasticSearch 7.14. Support component IDs for Go-Kratos framework. [Break Change] Remove endpoint name in the trace query condition. Only support query by endpoint id. Fix ProfileSnapshotExporterTest case on OpenJDK Runtime Environment AdoptOpenJDK-11.0.11+9 (build 11.0.11+9), MacOS. [Break Change] Remove page path in the browser log query condition. Only support query by page path id. [Break Change] Remove endpoint name in the backend log query condition. Only support query by endpoint id. [Break Change] Fix typo for a column page_path_id(was pate_path_id) of storage entity browser_error_log. Add component id for Python falcon plugin. Add rpcStatusCode for rpc.status_code tag. The responseCode field is marked as deprecated and replaced by httpResponseStatusCode field. Remove the duplicated tags to reduce the storage payload. Add a new API to test log analysis language. Harden the security of Groovy-based DSL, MAL and LAL. Fix distinct in Service/Instance/Endpoint query is not working. Support collection type in dynamic configuration core. Support zookeeper grouped dynamic configurations. Fix NPE when OAP nodes synchronize events with each other in cluster mode. Support k8s configmap grouped dynamic configurations. Add desc sort function in H2 and ElasticSearch implementations of IBrowserLogQueryDAO Support configure sampling policy by configuration module dynamically and static configuration file trace-sampling-policy-settings.yml for service dimension on the backend side. Dynamic configurations agent-analyzer.default.sampleRate and agent-analyzer.default.slowTraceSegmentThreshold are replaced by agent-analyzer.default.traceSamplingPolicy. Static configurations agent-analyzer.default.sampleRate and agent-analyzer.default.slowTraceSegmentThreshold are replaced by agent-analyzer.default.traceSamplingPolicySettingsFile. Fix dynamic configuration watch implementation current value not null when the config is deleted. Fix LoggingConfigWatcher return watch.value would not consistent with the real configuration content. Fix ZookeeperConfigWatcherRegister.readConfig() could cause NPE when data.getData() is null. Support nacos grouped dynamic configurations. Support for filter function filtering of int type values. Support mTLS for gRPC channel. Add yaml file suffix limit when reading ui templates. Support consul grouped dynamic configurations. Fix H2MetadataQueryDAO.searchService doesn\u0026rsquo;t support auto grouping. Rebuilt ElasticSearch client on top of their REST API. Fix ElasticSearch storage plugin doesn\u0026rsquo;t work when hot reloading from secretsManagementFile. Support etcd grouped dynamic configurations. Unified the config word namespace in the project. Switch JRE base image for dev images. Support apollo grouped dynamic configurations. Fix ProfileThreadSnapshotQuery.queryProfiledSegments adopts a wrong sort function Support gRPC sync grouped dynamic configurations. Fix H2EventQueryDAO doesn\u0026rsquo;t sort data by Event.START_TIME and uses a wrong pagination query. Fix LogHandler of kafka-fetcher-plugin cannot recognize namespace. Improve the speed of writing TiDB by batching the SQL execution. Fix wrong service name when IP is node IP in k8s-mesh. Support dynamic configurations for openAPI endpoint name grouping rule. Add component definition for Alibaba Druid and HikariCP. Fix Hour and Day dimensionality metrics not accurate, due to the cache read-then-clear mechanism conflicts with low down metrics flush period added in 8.7.0. Fix Slow SQL sampling not accurate, due to TopN works conflict with cache read-then-clear mechanism. The persistent cache is only read when necessary. Add component definition for Alibaba Fastjson. Fix entity(service/instance/endpoint) names in the MAL system(prometheus, native meter, open census, envoy metric service) are not controlled by core\u0026rsquo;s naming-control mechanism. Upgrade netty version to 4.1.68.Final avoid cve-2021-37136.  UI  Fix not found error when refresh UI. Update endpointName to endpointId in the query trace condition. Add Python falcon icon on the UI. Fix searching endpoints with keywords. Support clicking the service name in the chart to link to the trace or log page. Implement the Log Analysis Language text regexp debugger. Fix fetching nodes and calls with serviceIds on the topology side. Implement Alerts for query errors. Fixes graph parameter of query for topology metrics.  Documentation  Add a section in Log Collecting And Analysis doc, introducing the new Python agent log reporter. Add one missing step in otel-receiver doc about how to activate the default receiver. Reorganize dynamic configuration doc. Add more description about meter configurations in backend-meter doc. Fix typo in endpoint-grouping-rules doc.  All issues and pull requests are here\n","excerpt":"8.8.0 Project  Split javaagent into skywalking-java repository. …","ref":"/docs/main/latest/en/changes/changes-8.8.0/","title":"8.8.0"},{"body":"8.8.0 Project  Split javaagent into skywalking-java repository. https://github.com/apache/skywalking-java Merge Dockerfiles from apache/skywalking-docker into this codebase.  OAP Server  Fix CVE-2021-35515, CVE-2021-35516, CVE-2021-35517, CVE-2021-36090. Upgrade org.apache.commons:commons-compress to 1.21. kubernetes java client upgrade from 12.0.1 to 13.0.0 Add event http receiver Support Metric level function serviceRelation in MAL. Support envoy metrics binding into the topology. Fix openapi-definitions folder not being read correctly. Trace segment wouldn\u0026rsquo;t be recognized as a TopN sample service. Add through #4694 experimentally, but it caused performance impact. Remove version and endTime in the segment entity. Reduce indexing payload. Fix mapper_parsing_exception in ElasticSearch 7.14. Support component IDs for Go-Kratos framework. [Break Change] Remove endpoint name in the trace query condition. Only support query by endpoint id. Fix ProfileSnapshotExporterTest case on OpenJDK Runtime Environment AdoptOpenJDK-11.0.11+9 (build 11.0.11+9), MacOS. [Break Change] Remove page path in the browser log query condition. Only support query by page path id. [Break Change] Remove endpoint name in the backend log query condition. Only support query by endpoint id. [Break Change] Fix typo for a column page_path_id(was pate_path_id) of storage entity browser_error_log. Add component id for Python falcon plugin. Add rpcStatusCode for rpc.status_code tag. The responseCode field is marked as deprecated and replaced by httpResponseStatusCode field. Remove the duplicated tags to reduce the storage payload. Add a new API to test log analysis language. Harden the security of Groovy-based DSL, MAL and LAL. Fix distinct in Service/Instance/Endpoint query is not working. Support collection type in dynamic configuration core. Support zookeeper grouped dynamic configurations. Fix NPE when OAP nodes synchronize events with each other in cluster mode. Support k8s configmap grouped dynamic configurations. Add desc sort function in H2 and ElasticSearch implementations of IBrowserLogQueryDAO Support configure sampling policy by configuration module dynamically and static configuration file trace-sampling-policy-settings.yml for service dimension on the backend side. Dynamic configurations agent-analyzer.default.sampleRate and agent-analyzer.default.slowTraceSegmentThreshold are replaced by agent-analyzer.default.traceSamplingPolicy. Static configurations agent-analyzer.default.sampleRate and agent-analyzer.default.slowTraceSegmentThreshold are replaced by agent-analyzer.default.traceSamplingPolicySettingsFile. Fix dynamic configuration watch implementation current value not null when the config is deleted. Fix LoggingConfigWatcher return watch.value would not consistent with the real configuration content. Fix ZookeeperConfigWatcherRegister.readConfig() could cause NPE when data.getData() is null. Support nacos grouped dynamic configurations. Support for filter function filtering of int type values. Support mTLS for gRPC channel. Add yaml file suffix limit when reading ui templates. Support consul grouped dynamic configurations. Fix H2MetadataQueryDAO.searchService doesn\u0026rsquo;t support auto grouping. Rebuilt ElasticSearch client on top of their REST API. Fix ElasticSearch storage plugin doesn\u0026rsquo;t work when hot reloading from secretsManagementFile. Support etcd grouped dynamic configurations. Unified the config word namespace in the project. Switch JRE base image for dev images. Support apollo grouped dynamic configurations. Fix ProfileThreadSnapshotQuery.queryProfiledSegments adopts a wrong sort function Support gRPC sync grouped dynamic configurations. Fix H2EventQueryDAO doesn\u0026rsquo;t sort data by Event.START_TIME and uses a wrong pagination query. Fix LogHandler of kafka-fetcher-plugin cannot recognize namespace. Improve the speed of writing TiDB by batching the SQL execution. Fix wrong service name when IP is node IP in k8s-mesh. Support dynamic configurations for openAPI endpoint name grouping rule. Add component definition for Alibaba Druid and HikariCP. Fix Hour and Day dimensionality metrics not accurate, due to the cache read-then-clear mechanism conflicts with low down metrics flush period added in 8.7.0. Fix Slow SQL sampling not accurate, due to TopN works conflict with cache read-then-clear mechanism. The persistent cache is only read when necessary. Add component definition for Alibaba Fastjson. Fix entity(service/instance/endpoint) names in the MAL system(prometheus, native meter, open census, envoy metric service) are not controlled by core\u0026rsquo;s naming-control mechanism. Upgrade netty version to 4.1.68.Final avoid cve-2021-37136.  UI  Fix not found error when refresh UI. Update endpointName to endpointId in the query trace condition. Add Python falcon icon on the UI. Fix searching endpoints with keywords. Support clicking the service name in the chart to link to the trace or log page. Implement the Log Analysis Language text regexp debugger. Fix fetching nodes and calls with serviceIds on the topology side. Implement Alerts for query errors. Fixes graph parameter of query for topology metrics.  Documentation  Add a section in Log Collecting And Analysis doc, introducing the new Python agent log reporter. Add one missing step in otel-receiver doc about how to activate the default receiver. Reorganize dynamic configuration doc. Add more description about meter configurations in backend-meter doc. Fix typo in endpoint-grouping-rules doc.  All issues and pull requests are here\n","excerpt":"8.8.0 Project  Split javaagent into skywalking-java repository. …","ref":"/docs/main/next/en/changes/changes-8.8.0/","title":"8.8.0"},{"body":"8.8.0 Project  Split javaagent into skywalking-java repository. https://github.com/apache/skywalking-java Merge Dockerfiles from apache/skywalking-docker into this codebase.  OAP Server  Fix CVE-2021-35515, CVE-2021-35516, CVE-2021-35517, CVE-2021-36090. Upgrade org.apache.commons:commons-compress to 1.21. kubernetes java client upgrade from 12.0.1 to 13.0.0 Add event http receiver Support Metric level function serviceRelation in MAL. Support envoy metrics binding into the topology. Fix openapi-definitions folder not being read correctly. Trace segment wouldn\u0026rsquo;t be recognized as a TopN sample service. Add through #4694 experimentally, but it caused performance impact. Remove version and endTime in the segment entity. Reduce indexing payload. Fix mapper_parsing_exception in ElasticSearch 7.14. Support component IDs for Go-Kratos framework. [Break Change] Remove endpoint name in the trace query condition. Only support query by endpoint id. Fix ProfileSnapshotExporterTest case on OpenJDK Runtime Environment AdoptOpenJDK-11.0.11+9 (build 11.0.11+9), MacOS. [Break Change] Remove page path in the browser log query condition. Only support query by page path id. [Break Change] Remove endpoint name in the backend log query condition. Only support query by endpoint id. [Break Change] Fix typo for a column page_path_id(was pate_path_id) of storage entity browser_error_log. Add component id for Python falcon plugin. Add rpcStatusCode for rpc.status_code tag. The responseCode field is marked as deprecated and replaced by httpResponseStatusCode field. Remove the duplicated tags to reduce the storage payload. Add a new API to test log analysis language. Harden the security of Groovy-based DSL, MAL and LAL. Fix distinct in Service/Instance/Endpoint query is not working. Support collection type in dynamic configuration core. Support zookeeper grouped dynamic configurations. Fix NPE when OAP nodes synchronize events with each other in cluster mode. Support k8s configmap grouped dynamic configurations. Add desc sort function in H2 and ElasticSearch implementations of IBrowserLogQueryDAO Support configure sampling policy by configuration module dynamically and static configuration file trace-sampling-policy-settings.yml for service dimension on the backend side. Dynamic configurations agent-analyzer.default.sampleRate and agent-analyzer.default.slowTraceSegmentThreshold are replaced by agent-analyzer.default.traceSamplingPolicy. Static configurations agent-analyzer.default.sampleRate and agent-analyzer.default.slowTraceSegmentThreshold are replaced by agent-analyzer.default.traceSamplingPolicySettingsFile. Fix dynamic configuration watch implementation current value not null when the config is deleted. Fix LoggingConfigWatcher return watch.value would not consistent with the real configuration content. Fix ZookeeperConfigWatcherRegister.readConfig() could cause NPE when data.getData() is null. Support nacos grouped dynamic configurations. Support for filter function filtering of int type values. Support mTLS for gRPC channel. Add yaml file suffix limit when reading ui templates. Support consul grouped dynamic configurations. Fix H2MetadataQueryDAO.searchService doesn\u0026rsquo;t support auto grouping. Rebuilt ElasticSearch client on top of their REST API. Fix ElasticSearch storage plugin doesn\u0026rsquo;t work when hot reloading from secretsManagementFile. Support etcd grouped dynamic configurations. Unified the config word namespace in the project. Switch JRE base image for dev images. Support apollo grouped dynamic configurations. Fix ProfileThreadSnapshotQuery.queryProfiledSegments adopts a wrong sort function Support gRPC sync grouped dynamic configurations. Fix H2EventQueryDAO doesn\u0026rsquo;t sort data by Event.START_TIME and uses a wrong pagination query. Fix LogHandler of kafka-fetcher-plugin cannot recognize namespace. Improve the speed of writing TiDB by batching the SQL execution. Fix wrong service name when IP is node IP in k8s-mesh. Support dynamic configurations for openAPI endpoint name grouping rule. Add component definition for Alibaba Druid and HikariCP. Fix Hour and Day dimensionality metrics not accurate, due to the cache read-then-clear mechanism conflicts with low down metrics flush period added in 8.7.0. Fix Slow SQL sampling not accurate, due to TopN works conflict with cache read-then-clear mechanism. The persistent cache is only read when necessary. Add component definition for Alibaba Fastjson. Fix entity(service/instance/endpoint) names in the MAL system(prometheus, native meter, open census, envoy metric service) are not controlled by core\u0026rsquo;s naming-control mechanism. Upgrade netty version to 4.1.68.Final avoid cve-2021-37136.  UI  Fix not found error when refresh UI. Update endpointName to endpointId in the query trace condition. Add Python falcon icon on the UI. Fix searching endpoints with keywords. Support clicking the service name in the chart to link to the trace or log page. Implement the Log Analysis Language text regexp debugger. Fix fetching nodes and calls with serviceIds on the topology side. Implement Alerts for query errors. Fixes graph parameter of query for topology metrics.  Documentation  Add a section in Log Collecting And Analysis doc, introducing the new Python agent log reporter. Add one missing step in otel-receiver doc about how to activate the default receiver. Reorganize dynamic configuration doc. Add more description about meter configurations in backend-meter doc. Fix typo in endpoint-grouping-rules doc.  All issues and pull requests are here\n","excerpt":"8.8.0 Project  Split javaagent into skywalking-java repository. …","ref":"/docs/main/v9.1.0/en/changes/changes-8.8.0/","title":"8.8.0"},{"body":"8.8.0 Project  Split javaagent into skywalking-java repository. https://github.com/apache/skywalking-java Merge Dockerfiles from apache/skywalking-docker into this codebase.  OAP Server  Fix CVE-2021-35515, CVE-2021-35516, CVE-2021-35517, CVE-2021-36090. Upgrade org.apache.commons:commons-compress to 1.21. kubernetes java client upgrade from 12.0.1 to 13.0.0 Add event http receiver Support Metric level function serviceRelation in MAL. Support envoy metrics binding into the topology. Fix openapi-definitions folder not being read correctly. Trace segment wouldn\u0026rsquo;t be recognized as a TopN sample service. Add through #4694 experimentally, but it caused performance impact. Remove version and endTime in the segment entity. Reduce indexing payload. Fix mapper_parsing_exception in ElasticSearch 7.14. Support component IDs for Go-Kratos framework. [Break Change] Remove endpoint name in the trace query condition. Only support query by endpoint id. Fix ProfileSnapshotExporterTest case on OpenJDK Runtime Environment AdoptOpenJDK-11.0.11+9 (build 11.0.11+9), MacOS. [Break Change] Remove page path in the browser log query condition. Only support query by page path id. [Break Change] Remove endpoint name in the backend log query condition. Only support query by endpoint id. [Break Change] Fix typo for a column page_path_id(was pate_path_id) of storage entity browser_error_log. Add component id for Python falcon plugin. Add rpcStatusCode for rpc.status_code tag. The responseCode field is marked as deprecated and replaced by httpResponseStatusCode field. Remove the duplicated tags to reduce the storage payload. Add a new API to test log analysis language. Harden the security of Groovy-based DSL, MAL and LAL. Fix distinct in Service/Instance/Endpoint query is not working. Support collection type in dynamic configuration core. Support zookeeper grouped dynamic configurations. Fix NPE when OAP nodes synchronize events with each other in cluster mode. Support k8s configmap grouped dynamic configurations. Add desc sort function in H2 and ElasticSearch implementations of IBrowserLogQueryDAO Support configure sampling policy by configuration module dynamically and static configuration file trace-sampling-policy-settings.yml for service dimension on the backend side. Dynamic configurations agent-analyzer.default.sampleRate and agent-analyzer.default.slowTraceSegmentThreshold are replaced by agent-analyzer.default.traceSamplingPolicy. Static configurations agent-analyzer.default.sampleRate and agent-analyzer.default.slowTraceSegmentThreshold are replaced by agent-analyzer.default.traceSamplingPolicySettingsFile. Fix dynamic configuration watch implementation current value not null when the config is deleted. Fix LoggingConfigWatcher return watch.value would not consistent with the real configuration content. Fix ZookeeperConfigWatcherRegister.readConfig() could cause NPE when data.getData() is null. Support nacos grouped dynamic configurations. Support for filter function filtering of int type values. Support mTLS for gRPC channel. Add yaml file suffix limit when reading ui templates. Support consul grouped dynamic configurations. Fix H2MetadataQueryDAO.searchService doesn\u0026rsquo;t support auto grouping. Rebuilt ElasticSearch client on top of their REST API. Fix ElasticSearch storage plugin doesn\u0026rsquo;t work when hot reloading from secretsManagementFile. Support etcd grouped dynamic configurations. Unified the config word namespace in the project. Switch JRE base image for dev images. Support apollo grouped dynamic configurations. Fix ProfileThreadSnapshotQuery.queryProfiledSegments adopts a wrong sort function Support gRPC sync grouped dynamic configurations. Fix H2EventQueryDAO doesn\u0026rsquo;t sort data by Event.START_TIME and uses a wrong pagination query. Fix LogHandler of kafka-fetcher-plugin cannot recognize namespace. Improve the speed of writing TiDB by batching the SQL execution. Fix wrong service name when IP is node IP in k8s-mesh. Support dynamic configurations for openAPI endpoint name grouping rule. Add component definition for Alibaba Druid and HikariCP. Fix Hour and Day dimensionality metrics not accurate, due to the cache read-then-clear mechanism conflicts with low down metrics flush period added in 8.7.0. Fix Slow SQL sampling not accurate, due to TopN works conflict with cache read-then-clear mechanism. The persistent cache is only read when necessary. Add component definition for Alibaba Fastjson. Fix entity(service/instance/endpoint) names in the MAL system(prometheus, native meter, open census, envoy metric service) are not controlled by core\u0026rsquo;s naming-control mechanism. Upgrade netty version to 4.1.68.Final avoid cve-2021-37136.  UI  Fix not found error when refresh UI. Update endpointName to endpointId in the query trace condition. Add Python falcon icon on the UI. Fix searching endpoints with keywords. Support clicking the service name in the chart to link to the trace or log page. Implement the Log Analysis Language text regexp debugger. Fix fetching nodes and calls with serviceIds on the topology side. Implement Alerts for query errors. Fixes graph parameter of query for topology metrics.  Documentation  Add a section in Log Collecting And Analysis doc, introducing the new Python agent log reporter. Add one missing step in otel-receiver doc about how to activate the default receiver. Reorganize dynamic configuration doc. Add more description about meter configurations in backend-meter doc. Fix typo in endpoint-grouping-rules doc.  All issues and pull requests are here\n","excerpt":"8.8.0 Project  Split javaagent into skywalking-java repository. …","ref":"/docs/main/v9.2.0/en/changes/changes-8.8.0/","title":"8.8.0"},{"body":"8.8.0 Project  Split javaagent into skywalking-java repository. https://github.com/apache/skywalking-java Merge Dockerfiles from apache/skywalking-docker into this codebase.  OAP Server  Fix CVE-2021-35515, CVE-2021-35516, CVE-2021-35517, CVE-2021-36090. Upgrade org.apache.commons:commons-compress to 1.21. kubernetes java client upgrade from 12.0.1 to 13.0.0 Add event http receiver Support Metric level function serviceRelation in MAL. Support envoy metrics binding into the topology. Fix openapi-definitions folder not being read correctly. Trace segment wouldn\u0026rsquo;t be recognized as a TopN sample service. Add through #4694 experimentally, but it caused performance impact. Remove version and endTime in the segment entity. Reduce indexing payload. Fix mapper_parsing_exception in ElasticSearch 7.14. Support component IDs for Go-Kratos framework. [Break Change] Remove endpoint name in the trace query condition. Only support query by endpoint id. Fix ProfileSnapshotExporterTest case on OpenJDK Runtime Environment AdoptOpenJDK-11.0.11+9 (build 11.0.11+9), MacOS. [Break Change] Remove page path in the browser log query condition. Only support query by page path id. [Break Change] Remove endpoint name in the backend log query condition. Only support query by endpoint id. [Break Change] Fix typo for a column page_path_id(was pate_path_id) of storage entity browser_error_log. Add component id for Python falcon plugin. Add rpcStatusCode for rpc.status_code tag. The responseCode field is marked as deprecated and replaced by httpResponseStatusCode field. Remove the duplicated tags to reduce the storage payload. Add a new API to test log analysis language. Harden the security of Groovy-based DSL, MAL and LAL. Fix distinct in Service/Instance/Endpoint query is not working. Support collection type in dynamic configuration core. Support zookeeper grouped dynamic configurations. Fix NPE when OAP nodes synchronize events with each other in cluster mode. Support k8s configmap grouped dynamic configurations. Add desc sort function in H2 and ElasticSearch implementations of IBrowserLogQueryDAO Support configure sampling policy by configuration module dynamically and static configuration file trace-sampling-policy-settings.yml for service dimension on the backend side. Dynamic configurations agent-analyzer.default.sampleRate and agent-analyzer.default.slowTraceSegmentThreshold are replaced by agent-analyzer.default.traceSamplingPolicy. Static configurations agent-analyzer.default.sampleRate and agent-analyzer.default.slowTraceSegmentThreshold are replaced by agent-analyzer.default.traceSamplingPolicySettingsFile. Fix dynamic configuration watch implementation current value not null when the config is deleted. Fix LoggingConfigWatcher return watch.value would not consistent with the real configuration content. Fix ZookeeperConfigWatcherRegister.readConfig() could cause NPE when data.getData() is null. Support nacos grouped dynamic configurations. Support for filter function filtering of int type values. Support mTLS for gRPC channel. Add yaml file suffix limit when reading ui templates. Support consul grouped dynamic configurations. Fix H2MetadataQueryDAO.searchService doesn\u0026rsquo;t support auto grouping. Rebuilt ElasticSearch client on top of their REST API. Fix ElasticSearch storage plugin doesn\u0026rsquo;t work when hot reloading from secretsManagementFile. Support etcd grouped dynamic configurations. Unified the config word namespace in the project. Switch JRE base image for dev images. Support apollo grouped dynamic configurations. Fix ProfileThreadSnapshotQuery.queryProfiledSegments adopts a wrong sort function Support gRPC sync grouped dynamic configurations. Fix H2EventQueryDAO doesn\u0026rsquo;t sort data by Event.START_TIME and uses a wrong pagination query. Fix LogHandler of kafka-fetcher-plugin cannot recognize namespace. Improve the speed of writing TiDB by batching the SQL execution. Fix wrong service name when IP is node IP in k8s-mesh. Support dynamic configurations for openAPI endpoint name grouping rule. Add component definition for Alibaba Druid and HikariCP. Fix Hour and Day dimensionality metrics not accurate, due to the cache read-then-clear mechanism conflicts with low down metrics flush period added in 8.7.0. Fix Slow SQL sampling not accurate, due to TopN works conflict with cache read-then-clear mechanism. The persistent cache is only read when necessary. Add component definition for Alibaba Fastjson. Fix entity(service/instance/endpoint) names in the MAL system(prometheus, native meter, open census, envoy metric service) are not controlled by core\u0026rsquo;s naming-control mechanism. Upgrade netty version to 4.1.68.Final avoid cve-2021-37136.  UI  Fix not found error when refresh UI. Update endpointName to endpointId in the query trace condition. Add Python falcon icon on the UI. Fix searching endpoints with keywords. Support clicking the service name in the chart to link to the trace or log page. Implement the Log Analysis Language text regexp debugger. Fix fetching nodes and calls with serviceIds on the topology side. Implement Alerts for query errors. Fixes graph parameter of query for topology metrics.  Documentation  Add a section in Log Collecting And Analysis doc, introducing the new Python agent log reporter. Add one missing step in otel-receiver doc about how to activate the default receiver. Reorganize dynamic configuration doc. Add more description about meter configurations in backend-meter doc. Fix typo in endpoint-grouping-rules doc.  All issues and pull requests are here\n","excerpt":"8.8.0 Project  Split javaagent into skywalking-java repository. …","ref":"/docs/main/v9.3.0/en/changes/changes-8.8.0/","title":"8.8.0"},{"body":"8.8.0 Project  Split javaagent into skywalking-java repository. https://github.com/apache/skywalking-java Merge Dockerfiles from apache/skywalking-docker into this codebase.  OAP Server  Fix CVE-2021-35515, CVE-2021-35516, CVE-2021-35517, CVE-2021-36090. Upgrade org.apache.commons:commons-compress to 1.21. kubernetes java client upgrade from 12.0.1 to 13.0.0 Add event http receiver Support Metric level function serviceRelation in MAL. Support envoy metrics binding into the topology. Fix openapi-definitions folder not being read correctly. Trace segment wouldn\u0026rsquo;t be recognized as a TopN sample service. Add through #4694 experimentally, but it caused performance impact. Remove version and endTime in the segment entity. Reduce indexing payload. Fix mapper_parsing_exception in ElasticSearch 7.14. Support component IDs for Go-Kratos framework. [Break Change] Remove endpoint name in the trace query condition. Only support query by endpoint id. Fix ProfileSnapshotExporterTest case on OpenJDK Runtime Environment AdoptOpenJDK-11.0.11+9 (build 11.0.11+9), MacOS. [Break Change] Remove page path in the browser log query condition. Only support query by page path id. [Break Change] Remove endpoint name in the backend log query condition. Only support query by endpoint id. [Break Change] Fix typo for a column page_path_id(was pate_path_id) of storage entity browser_error_log. Add component id for Python falcon plugin. Add rpcStatusCode for rpc.status_code tag. The responseCode field is marked as deprecated and replaced by httpResponseStatusCode field. Remove the duplicated tags to reduce the storage payload. Add a new API to test log analysis language. Harden the security of Groovy-based DSL, MAL and LAL. Fix distinct in Service/Instance/Endpoint query is not working. Support collection type in dynamic configuration core. Support zookeeper grouped dynamic configurations. Fix NPE when OAP nodes synchronize events with each other in cluster mode. Support k8s configmap grouped dynamic configurations. Add desc sort function in H2 and ElasticSearch implementations of IBrowserLogQueryDAO Support configure sampling policy by configuration module dynamically and static configuration file trace-sampling-policy-settings.yml for service dimension on the backend side. Dynamic configurations agent-analyzer.default.sampleRate and agent-analyzer.default.slowTraceSegmentThreshold are replaced by agent-analyzer.default.traceSamplingPolicy. Static configurations agent-analyzer.default.sampleRate and agent-analyzer.default.slowTraceSegmentThreshold are replaced by agent-analyzer.default.traceSamplingPolicySettingsFile. Fix dynamic configuration watch implementation current value not null when the config is deleted. Fix LoggingConfigWatcher return watch.value would not consistent with the real configuration content. Fix ZookeeperConfigWatcherRegister.readConfig() could cause NPE when data.getData() is null. Support nacos grouped dynamic configurations. Support for filter function filtering of int type values. Support mTLS for gRPC channel. Add yaml file suffix limit when reading ui templates. Support consul grouped dynamic configurations. Fix H2MetadataQueryDAO.searchService doesn\u0026rsquo;t support auto grouping. Rebuilt ElasticSearch client on top of their REST API. Fix ElasticSearch storage plugin doesn\u0026rsquo;t work when hot reloading from secretsManagementFile. Support etcd grouped dynamic configurations. Unified the config word namespace in the project. Switch JRE base image for dev images. Support apollo grouped dynamic configurations. Fix ProfileThreadSnapshotQuery.queryProfiledSegments adopts a wrong sort function Support gRPC sync grouped dynamic configurations. Fix H2EventQueryDAO doesn\u0026rsquo;t sort data by Event.START_TIME and uses a wrong pagination query. Fix LogHandler of kafka-fetcher-plugin cannot recognize namespace. Improve the speed of writing TiDB by batching the SQL execution. Fix wrong service name when IP is node IP in k8s-mesh. Support dynamic configurations for openAPI endpoint name grouping rule. Add component definition for Alibaba Druid and HikariCP. Fix Hour and Day dimensionality metrics not accurate, due to the cache read-then-clear mechanism conflicts with low down metrics flush period added in 8.7.0. Fix Slow SQL sampling not accurate, due to TopN works conflict with cache read-then-clear mechanism. The persistent cache is only read when necessary. Add component definition for Alibaba Fastjson. Fix entity(service/instance/endpoint) names in the MAL system(prometheus, native meter, open census, envoy metric service) are not controlled by core\u0026rsquo;s naming-control mechanism. Upgrade netty version to 4.1.68.Final avoid cve-2021-37136.  UI  Fix not found error when refresh UI. Update endpointName to endpointId in the query trace condition. Add Python falcon icon on the UI. Fix searching endpoints with keywords. Support clicking the service name in the chart to link to the trace or log page. Implement the Log Analysis Language text regexp debugger. Fix fetching nodes and calls with serviceIds on the topology side. Implement Alerts for query errors. Fixes graph parameter of query for topology metrics.  Documentation  Add a section in Log Collecting And Analysis doc, introducing the new Python agent log reporter. Add one missing step in otel-receiver doc about how to activate the default receiver. Reorganize dynamic configuration doc. Add more description about meter configurations in backend-meter doc. Fix typo in endpoint-grouping-rules doc.  All issues and pull requests are here\n","excerpt":"8.8.0 Project  Split javaagent into skywalking-java repository. …","ref":"/docs/main/v9.4.0/en/changes/changes-8.8.0/","title":"8.8.0"},{"body":"8.8.0 Project  Split javaagent into skywalking-java repository. https://github.com/apache/skywalking-java Merge Dockerfiles from apache/skywalking-docker into this codebase.  OAP Server  Fix CVE-2021-35515, CVE-2021-35516, CVE-2021-35517, CVE-2021-36090. Upgrade org.apache.commons:commons-compress to 1.21. kubernetes java client upgrade from 12.0.1 to 13.0.0 Add event http receiver Support Metric level function serviceRelation in MAL. Support envoy metrics binding into the topology. Fix openapi-definitions folder not being read correctly. Trace segment wouldn\u0026rsquo;t be recognized as a TopN sample service. Add through #4694 experimentally, but it caused performance impact. Remove version and endTime in the segment entity. Reduce indexing payload. Fix mapper_parsing_exception in ElasticSearch 7.14. Support component IDs for Go-Kratos framework. [Break Change] Remove endpoint name in the trace query condition. Only support query by endpoint id. Fix ProfileSnapshotExporterTest case on OpenJDK Runtime Environment AdoptOpenJDK-11.0.11+9 (build 11.0.11+9), MacOS. [Break Change] Remove page path in the browser log query condition. Only support query by page path id. [Break Change] Remove endpoint name in the backend log query condition. Only support query by endpoint id. [Break Change] Fix typo for a column page_path_id(was pate_path_id) of storage entity browser_error_log. Add component id for Python falcon plugin. Add rpcStatusCode for rpc.status_code tag. The responseCode field is marked as deprecated and replaced by httpResponseStatusCode field. Remove the duplicated tags to reduce the storage payload. Add a new API to test log analysis language. Harden the security of Groovy-based DSL, MAL and LAL. Fix distinct in Service/Instance/Endpoint query is not working. Support collection type in dynamic configuration core. Support zookeeper grouped dynamic configurations. Fix NPE when OAP nodes synchronize events with each other in cluster mode. Support k8s configmap grouped dynamic configurations. Add desc sort function in H2 and ElasticSearch implementations of IBrowserLogQueryDAO Support configure sampling policy by configuration module dynamically and static configuration file trace-sampling-policy-settings.yml for service dimension on the backend side. Dynamic configurations agent-analyzer.default.sampleRate and agent-analyzer.default.slowTraceSegmentThreshold are replaced by agent-analyzer.default.traceSamplingPolicy. Static configurations agent-analyzer.default.sampleRate and agent-analyzer.default.slowTraceSegmentThreshold are replaced by agent-analyzer.default.traceSamplingPolicySettingsFile. Fix dynamic configuration watch implementation current value not null when the config is deleted. Fix LoggingConfigWatcher return watch.value would not consistent with the real configuration content. Fix ZookeeperConfigWatcherRegister.readConfig() could cause NPE when data.getData() is null. Support nacos grouped dynamic configurations. Support for filter function filtering of int type values. Support mTLS for gRPC channel. Add yaml file suffix limit when reading ui templates. Support consul grouped dynamic configurations. Fix H2MetadataQueryDAO.searchService doesn\u0026rsquo;t support auto grouping. Rebuilt ElasticSearch client on top of their REST API. Fix ElasticSearch storage plugin doesn\u0026rsquo;t work when hot reloading from secretsManagementFile. Support etcd grouped dynamic configurations. Unified the config word namespace in the project. Switch JRE base image for dev images. Support apollo grouped dynamic configurations. Fix ProfileThreadSnapshotQuery.queryProfiledSegments adopts a wrong sort function Support gRPC sync grouped dynamic configurations. Fix H2EventQueryDAO doesn\u0026rsquo;t sort data by Event.START_TIME and uses a wrong pagination query. Fix LogHandler of kafka-fetcher-plugin cannot recognize namespace. Improve the speed of writing TiDB by batching the SQL execution. Fix wrong service name when IP is node IP in k8s-mesh. Support dynamic configurations for openAPI endpoint name grouping rule. Add component definition for Alibaba Druid and HikariCP. Fix Hour and Day dimensionality metrics not accurate, due to the cache read-then-clear mechanism conflicts with low down metrics flush period added in 8.7.0. Fix Slow SQL sampling not accurate, due to TopN works conflict with cache read-then-clear mechanism. The persistent cache is only read when necessary. Add component definition for Alibaba Fastjson. Fix entity(service/instance/endpoint) names in the MAL system(prometheus, native meter, open census, envoy metric service) are not controlled by core\u0026rsquo;s naming-control mechanism. Upgrade netty version to 4.1.68.Final avoid cve-2021-37136.  UI  Fix not found error when refresh UI. Update endpointName to endpointId in the query trace condition. Add Python falcon icon on the UI. Fix searching endpoints with keywords. Support clicking the service name in the chart to link to the trace or log page. Implement the Log Analysis Language text regexp debugger. Fix fetching nodes and calls with serviceIds on the topology side. Implement Alerts for query errors. Fixes graph parameter of query for topology metrics.  Documentation  Add a section in Log Collecting And Analysis doc, introducing the new Python agent log reporter. Add one missing step in otel-receiver doc about how to activate the default receiver. Reorganize dynamic configuration doc. Add more description about meter configurations in backend-meter doc. Fix typo in endpoint-grouping-rules doc.  All issues and pull requests are here\n","excerpt":"8.8.0 Project  Split javaagent into skywalking-java repository. …","ref":"/docs/main/v9.5.0/en/changes/changes-8.8.0/","title":"8.8.0"},{"body":"8.8.0 Project  Split javaagent into skywalking-java repository. https://github.com/apache/skywalking-java Merge Dockerfiles from apache/skywalking-docker into this codebase.  OAP Server  Fix CVE-2021-35515, CVE-2021-35516, CVE-2021-35517, CVE-2021-36090. Upgrade org.apache.commons:commons-compress to 1.21. kubernetes java client upgrade from 12.0.1 to 13.0.0 Add event http receiver Support Metric level function serviceRelation in MAL. Support envoy metrics binding into the topology. Fix openapi-definitions folder not being read correctly. Trace segment wouldn\u0026rsquo;t be recognized as a TopN sample service. Add through #4694 experimentally, but it caused performance impact. Remove version and endTime in the segment entity. Reduce indexing payload. Fix mapper_parsing_exception in ElasticSearch 7.14. Support component IDs for Go-Kratos framework. [Break Change] Remove endpoint name in the trace query condition. Only support query by endpoint id. Fix ProfileSnapshotExporterTest case on OpenJDK Runtime Environment AdoptOpenJDK-11.0.11+9 (build 11.0.11+9), MacOS. [Break Change] Remove page path in the browser log query condition. Only support query by page path id. [Break Change] Remove endpoint name in the backend log query condition. Only support query by endpoint id. [Break Change] Fix typo for a column page_path_id(was pate_path_id) of storage entity browser_error_log. Add component id for Python falcon plugin. Add rpcStatusCode for rpc.status_code tag. The responseCode field is marked as deprecated and replaced by httpResponseStatusCode field. Remove the duplicated tags to reduce the storage payload. Add a new API to test log analysis language. Harden the security of Groovy-based DSL, MAL and LAL. Fix distinct in Service/Instance/Endpoint query is not working. Support collection type in dynamic configuration core. Support zookeeper grouped dynamic configurations. Fix NPE when OAP nodes synchronize events with each other in cluster mode. Support k8s configmap grouped dynamic configurations. Add desc sort function in H2 and ElasticSearch implementations of IBrowserLogQueryDAO Support configure sampling policy by configuration module dynamically and static configuration file trace-sampling-policy-settings.yml for service dimension on the backend side. Dynamic configurations agent-analyzer.default.sampleRate and agent-analyzer.default.slowTraceSegmentThreshold are replaced by agent-analyzer.default.traceSamplingPolicy. Static configurations agent-analyzer.default.sampleRate and agent-analyzer.default.slowTraceSegmentThreshold are replaced by agent-analyzer.default.traceSamplingPolicySettingsFile. Fix dynamic configuration watch implementation current value not null when the config is deleted. Fix LoggingConfigWatcher return watch.value would not consistent with the real configuration content. Fix ZookeeperConfigWatcherRegister.readConfig() could cause NPE when data.getData() is null. Support nacos grouped dynamic configurations. Support for filter function filtering of int type values. Support mTLS for gRPC channel. Add yaml file suffix limit when reading ui templates. Support consul grouped dynamic configurations. Fix H2MetadataQueryDAO.searchService doesn\u0026rsquo;t support auto grouping. Rebuilt ElasticSearch client on top of their REST API. Fix ElasticSearch storage plugin doesn\u0026rsquo;t work when hot reloading from secretsManagementFile. Support etcd grouped dynamic configurations. Unified the config word namespace in the project. Switch JRE base image for dev images. Support apollo grouped dynamic configurations. Fix ProfileThreadSnapshotQuery.queryProfiledSegments adopts a wrong sort function Support gRPC sync grouped dynamic configurations. Fix H2EventQueryDAO doesn\u0026rsquo;t sort data by Event.START_TIME and uses a wrong pagination query. Fix LogHandler of kafka-fetcher-plugin cannot recognize namespace. Improve the speed of writing TiDB by batching the SQL execution. Fix wrong service name when IP is node IP in k8s-mesh. Support dynamic configurations for openAPI endpoint name grouping rule. Add component definition for Alibaba Druid and HikariCP. Fix Hour and Day dimensionality metrics not accurate, due to the cache read-then-clear mechanism conflicts with low down metrics flush period added in 8.7.0. Fix Slow SQL sampling not accurate, due to TopN works conflict with cache read-then-clear mechanism. The persistent cache is only read when necessary. Add component definition for Alibaba Fastjson. Fix entity(service/instance/endpoint) names in the MAL system(prometheus, native meter, open census, envoy metric service) are not controlled by core\u0026rsquo;s naming-control mechanism. Upgrade netty version to 4.1.68.Final avoid cve-2021-37136.  UI  Fix not found error when refresh UI. Update endpointName to endpointId in the query trace condition. Add Python falcon icon on the UI. Fix searching endpoints with keywords. Support clicking the service name in the chart to link to the trace or log page. Implement the Log Analysis Language text regexp debugger. Fix fetching nodes and calls with serviceIds on the topology side. Implement Alerts for query errors. Fixes graph parameter of query for topology metrics.  Documentation  Add a section in Log Collecting And Analysis doc, introducing the new Python agent log reporter. Add one missing step in otel-receiver doc about how to activate the default receiver. Reorganize dynamic configuration doc. Add more description about meter configurations in backend-meter doc. Fix typo in endpoint-grouping-rules doc.  All issues and pull requests are here\n","excerpt":"8.8.0 Project  Split javaagent into skywalking-java repository. …","ref":"/docs/main/v9.6.0/en/changes/changes-8.8.0/","title":"8.8.0"},{"body":"8.8.0 Project  Split javaagent into skywalking-java repository. https://github.com/apache/skywalking-java Merge Dockerfiles from apache/skywalking-docker into this codebase.  OAP Server  Fix CVE-2021-35515, CVE-2021-35516, CVE-2021-35517, CVE-2021-36090. Upgrade org.apache.commons:commons-compress to 1.21. kubernetes java client upgrade from 12.0.1 to 13.0.0 Add event http receiver Support Metric level function serviceRelation in MAL. Support envoy metrics binding into the topology. Fix openapi-definitions folder not being read correctly. Trace segment wouldn\u0026rsquo;t be recognized as a TopN sample service. Add through #4694 experimentally, but it caused performance impact. Remove version and endTime in the segment entity. Reduce indexing payload. Fix mapper_parsing_exception in ElasticSearch 7.14. Support component IDs for Go-Kratos framework. [Break Change] Remove endpoint name in the trace query condition. Only support query by endpoint id. Fix ProfileSnapshotExporterTest case on OpenJDK Runtime Environment AdoptOpenJDK-11.0.11+9 (build 11.0.11+9), MacOS. [Break Change] Remove page path in the browser log query condition. Only support query by page path id. [Break Change] Remove endpoint name in the backend log query condition. Only support query by endpoint id. [Break Change] Fix typo for a column page_path_id(was pate_path_id) of storage entity browser_error_log. Add component id for Python falcon plugin. Add rpcStatusCode for rpc.status_code tag. The responseCode field is marked as deprecated and replaced by httpResponseStatusCode field. Remove the duplicated tags to reduce the storage payload. Add a new API to test log analysis language. Harden the security of Groovy-based DSL, MAL and LAL. Fix distinct in Service/Instance/Endpoint query is not working. Support collection type in dynamic configuration core. Support zookeeper grouped dynamic configurations. Fix NPE when OAP nodes synchronize events with each other in cluster mode. Support k8s configmap grouped dynamic configurations. Add desc sort function in H2 and ElasticSearch implementations of IBrowserLogQueryDAO Support configure sampling policy by configuration module dynamically and static configuration file trace-sampling-policy-settings.yml for service dimension on the backend side. Dynamic configurations agent-analyzer.default.sampleRate and agent-analyzer.default.slowTraceSegmentThreshold are replaced by agent-analyzer.default.traceSamplingPolicy. Static configurations agent-analyzer.default.sampleRate and agent-analyzer.default.slowTraceSegmentThreshold are replaced by agent-analyzer.default.traceSamplingPolicySettingsFile. Fix dynamic configuration watch implementation current value not null when the config is deleted. Fix LoggingConfigWatcher return watch.value would not consistent with the real configuration content. Fix ZookeeperConfigWatcherRegister.readConfig() could cause NPE when data.getData() is null. Support nacos grouped dynamic configurations. Support for filter function filtering of int type values. Support mTLS for gRPC channel. Add yaml file suffix limit when reading ui templates. Support consul grouped dynamic configurations. Fix H2MetadataQueryDAO.searchService doesn\u0026rsquo;t support auto grouping. Rebuilt ElasticSearch client on top of their REST API. Fix ElasticSearch storage plugin doesn\u0026rsquo;t work when hot reloading from secretsManagementFile. Support etcd grouped dynamic configurations. Unified the config word namespace in the project. Switch JRE base image for dev images. Support apollo grouped dynamic configurations. Fix ProfileThreadSnapshotQuery.queryProfiledSegments adopts a wrong sort function Support gRPC sync grouped dynamic configurations. Fix H2EventQueryDAO doesn\u0026rsquo;t sort data by Event.START_TIME and uses a wrong pagination query. Fix LogHandler of kafka-fetcher-plugin cannot recognize namespace. Improve the speed of writing TiDB by batching the SQL execution. Fix wrong service name when IP is node IP in k8s-mesh. Support dynamic configurations for openAPI endpoint name grouping rule. Add component definition for Alibaba Druid and HikariCP. Fix Hour and Day dimensionality metrics not accurate, due to the cache read-then-clear mechanism conflicts with low down metrics flush period added in 8.7.0. Fix Slow SQL sampling not accurate, due to TopN works conflict with cache read-then-clear mechanism. The persistent cache is only read when necessary. Add component definition for Alibaba Fastjson. Fix entity(service/instance/endpoint) names in the MAL system(prometheus, native meter, open census, envoy metric service) are not controlled by core\u0026rsquo;s naming-control mechanism. Upgrade netty version to 4.1.68.Final avoid cve-2021-37136.  UI  Fix not found error when refresh UI. Update endpointName to endpointId in the query trace condition. Add Python falcon icon on the UI. Fix searching endpoints with keywords. Support clicking the service name in the chart to link to the trace or log page. Implement the Log Analysis Language text regexp debugger. Fix fetching nodes and calls with serviceIds on the topology side. Implement Alerts for query errors. Fixes graph parameter of query for topology metrics.  Documentation  Add a section in Log Collecting And Analysis doc, introducing the new Python agent log reporter. Add one missing step in otel-receiver doc about how to activate the default receiver. Reorganize dynamic configuration doc. Add more description about meter configurations in backend-meter doc. Fix typo in endpoint-grouping-rules doc.  All issues and pull requests are here\n","excerpt":"8.8.0 Project  Split javaagent into skywalking-java repository. …","ref":"/docs/main/v9.7.0/en/changes/changes-8.8.0/","title":"8.8.0"},{"body":"8.8.1 OAP Server  Fix wrong (de)serializer of ElasticSearch client for OpenSearch storage. Fix that traces query with tags will report error. Replace e2e simple cases to e2e-v2. Fix endpoint dependency breaking.  UI  Delete duplicate calls for endpoint dependency.  Documentation All issues and pull requests are here\n","excerpt":"8.8.1 OAP Server  Fix wrong (de)serializer of ElasticSearch client for OpenSearch storage. Fix that …","ref":"/docs/main/latest/en/changes/changes-8.8.1/","title":"8.8.1"},{"body":"8.8.1 OAP Server  Fix wrong (de)serializer of ElasticSearch client for OpenSearch storage. Fix that traces query with tags will report error. Replace e2e simple cases to e2e-v2. Fix endpoint dependency breaking.  UI  Delete duplicate calls for endpoint dependency.  Documentation All issues and pull requests are here\n","excerpt":"8.8.1 OAP Server  Fix wrong (de)serializer of ElasticSearch client for OpenSearch storage. Fix that …","ref":"/docs/main/next/en/changes/changes-8.8.1/","title":"8.8.1"},{"body":"8.8.1 OAP Server  Fix wrong (de)serializer of ElasticSearch client for OpenSearch storage. Fix that traces query with tags will report error. Replace e2e simple cases to e2e-v2. Fix endpoint dependency breaking.  UI  Delete duplicate calls for endpoint dependency.  Documentation All issues and pull requests are here\n","excerpt":"8.8.1 OAP Server  Fix wrong (de)serializer of ElasticSearch client for OpenSearch storage. Fix that …","ref":"/docs/main/v9.1.0/en/changes/changes-8.8.1/","title":"8.8.1"},{"body":"8.8.1 OAP Server  Fix wrong (de)serializer of ElasticSearch client for OpenSearch storage. Fix that traces query with tags will report error. Replace e2e simple cases to e2e-v2. Fix endpoint dependency breaking.  UI  Delete duplicate calls for endpoint dependency.  Documentation All issues and pull requests are here\n","excerpt":"8.8.1 OAP Server  Fix wrong (de)serializer of ElasticSearch client for OpenSearch storage. Fix that …","ref":"/docs/main/v9.2.0/en/changes/changes-8.8.1/","title":"8.8.1"},{"body":"8.8.1 OAP Server  Fix wrong (de)serializer of ElasticSearch client for OpenSearch storage. Fix that traces query with tags will report error. Replace e2e simple cases to e2e-v2. Fix endpoint dependency breaking.  UI  Delete duplicate calls for endpoint dependency.  Documentation All issues and pull requests are here\n","excerpt":"8.8.1 OAP Server  Fix wrong (de)serializer of ElasticSearch client for OpenSearch storage. Fix that …","ref":"/docs/main/v9.3.0/en/changes/changes-8.8.1/","title":"8.8.1"},{"body":"8.8.1 OAP Server  Fix wrong (de)serializer of ElasticSearch client for OpenSearch storage. Fix that traces query with tags will report error. Replace e2e simple cases to e2e-v2. Fix endpoint dependency breaking.  UI  Delete duplicate calls for endpoint dependency.  Documentation All issues and pull requests are here\n","excerpt":"8.8.1 OAP Server  Fix wrong (de)serializer of ElasticSearch client for OpenSearch storage. Fix that …","ref":"/docs/main/v9.4.0/en/changes/changes-8.8.1/","title":"8.8.1"},{"body":"8.8.1 OAP Server  Fix wrong (de)serializer of ElasticSearch client for OpenSearch storage. Fix that traces query with tags will report error. Replace e2e simple cases to e2e-v2. Fix endpoint dependency breaking.  UI  Delete duplicate calls for endpoint dependency.  Documentation All issues and pull requests are here\n","excerpt":"8.8.1 OAP Server  Fix wrong (de)serializer of ElasticSearch client for OpenSearch storage. Fix that …","ref":"/docs/main/v9.5.0/en/changes/changes-8.8.1/","title":"8.8.1"},{"body":"8.8.1 OAP Server  Fix wrong (de)serializer of ElasticSearch client for OpenSearch storage. Fix that traces query with tags will report error. Replace e2e simple cases to e2e-v2. Fix endpoint dependency breaking.  UI  Delete duplicate calls for endpoint dependency.  Documentation All issues and pull requests are here\n","excerpt":"8.8.1 OAP Server  Fix wrong (de)serializer of ElasticSearch client for OpenSearch storage. Fix that …","ref":"/docs/main/v9.6.0/en/changes/changes-8.8.1/","title":"8.8.1"},{"body":"8.8.1 OAP Server  Fix wrong (de)serializer of ElasticSearch client for OpenSearch storage. Fix that traces query with tags will report error. Replace e2e simple cases to e2e-v2. Fix endpoint dependency breaking.  UI  Delete duplicate calls for endpoint dependency.  Documentation All issues and pull requests are here\n","excerpt":"8.8.1 OAP Server  Fix wrong (de)serializer of ElasticSearch client for OpenSearch storage. Fix that …","ref":"/docs/main/v9.7.0/en/changes/changes-8.8.1/","title":"8.8.1"},{"body":"8.9.0 Project  E2E tests immigrate to e2e-v2. Support JDK 16 and 17. Add Docker images for arm64 architecture.  OAP Server  Add component definition for Jackson. Fix that zipkin-receiver plugin is not packaged into dist. Upgrade Armeria to 1.12, upgrade OpenSearch test version to 1.1.0. Add component definition for Apache-Kylin. Enhance get generation mechanism of OAL engine, support map type of source\u0026rsquo;s field. Add tag(Map) into All, Service, ServiceInstance and Endpoint sources. Fix funcParamExpression and literalExpression can\u0026rsquo;t be used in the same aggregation function. Support cast statement in the OAL core engine. Support (str-\u0026gt;long) and (long) for string to long cast statement. Support (str-\u0026gt;int) and (int) for string to int cast statement. Support Long literal number in the OAL core engine. Support literal string as parameter of aggregation function. Add attributeExpression and attributeExpressionSegment in the OAL grammar tree to support map type for the attribute expression. Refactor the OAL compiler context to improve readability. Fix wrong generated codes of hashCode and remoteHashCode methods for numeric fields. Support != null in OAL engine. Add Message Queue Consuming Count metric for MQ consuming service and endpoint. Add Message Queue Avg Consuming Latency metric for MQ consuming service and endpoint. Support -Inf as bucket in the meter system. Fix setting wrong field when combining Events. Support search browser service. Add getProfileTaskLogs to profile query protocol. Set SW_KAFKA_FETCHER_ENABLE_NATIVE_PROTO_LOG, SW_KAFKA_FETCHER_ENABLE_NATIVE_JSON_LOG default true. Fix unexpected deleting due to TTL mechanism bug for H2, MySQL, TiDB and PostgreSQL. Add a GraphQL query to get OAP version, display OAP version in startup message and error logs. Fix TimeBucket missing in H2, MySQL, TiDB and PostgreSQL bug, which causes TTL doesn\u0026rsquo;t work for service_traffic. Fix TimeBucket missing in ElasticSearch and provide compatible storage2Entity for previous versions. Fix ElasticSearch implementation of queryMetricsValues and readLabeledMetricsValues doesn\u0026rsquo;t fill default values when no available data in the ElasticSearch server. Fix config yaml data type conversion bug when meets special character like !. Optimize metrics of minute dimensionality persistence. The value of metrics, which has declaration of the default value and current value equals the default value logically, the whole row wouldn\u0026rsquo;t be pushed into database. Fix max function in OAL doesn\u0026rsquo;t support negative long. Add MicroBench module to make it easier for developers to write JMH test. Upgrade Kubernetes Java client to 14.0.0, supports GCP token refreshing and fixes some bugs. Change SO11Y metric envoy_als_in_count to calculate the ALS message count. Support Istio 1.10.3, 1.11.4, 1.12.0 release.(Tested through e2e) Add filter mechanism in MAL core to filter metrics. Fix concurrency bug in MAL increase-related calculation. Fix a null pointer bug when building SampleFamily. Fix the so11y latency of persistence execution latency not correct in ElasticSearch storage. Add MeterReportService collectBatch method. Add OpenSearch 1.2.0 to test and verify it works. Upgrade grpc-java to 1.42.1 and protoc to 3.17.3 to allow using native Mac osx-aarch_64 artifacts. Fix TopologyQuery.loadEndpointRelation bug. Support using IoTDB as a new storage option. Add customized envoy ALS protocol receiver for satellite transmit batch data. Remove logback dependencies in IoTDB plugin. Fix StorageModuleElasticsearchProvider doesn\u0026rsquo;t watch on trustStorePath. Fix a wrong check about entity if GraphQL at the endpoint relation level.  UI  Optimize endpoint dependency. Show service name by hovering nodes in the sankey chart. Add Apache Kylin logo. Add ClickHouse logo. Optimize the style and add tips for log conditions. Fix the condition for trace table. Optimize profile functions. Implement a reminder to clear cache for dashboard templates. Support +/- hh:mm in TimeZone setting. Optimize global settings. Fix current endpoint for endpoint dependency. Add version in the global settings popup. Optimize Log page style. Avoid some abnormal settings. Fix query condition of events.  Documentation  Enhance documents about the data report and query protocols. Restructure documents about receivers and fetchers.  Remove general receiver and fetcher docs Add more specific menu with docs to help users to find documents easier.   Add a guidance doc about the logic endpoint. Link Satellite as Load Balancer documentation and compatibility with satellite.  All issues and pull requests are here\n","excerpt":"8.9.0 Project  E2E tests immigrate to e2e-v2. Support JDK 16 and 17. Add Docker images for arm64 …","ref":"/docs/main/latest/en/changes/changes-8.9.0/","title":"8.9.0"},{"body":"8.9.0 Project  E2E tests immigrate to e2e-v2. Support JDK 16 and 17. Add Docker images for arm64 architecture.  OAP Server  Add component definition for Jackson. Fix that zipkin-receiver plugin is not packaged into dist. Upgrade Armeria to 1.12, upgrade OpenSearch test version to 1.1.0. Add component definition for Apache-Kylin. Enhance get generation mechanism of OAL engine, support map type of source\u0026rsquo;s field. Add tag(Map) into All, Service, ServiceInstance and Endpoint sources. Fix funcParamExpression and literalExpression can\u0026rsquo;t be used in the same aggregation function. Support cast statement in the OAL core engine. Support (str-\u0026gt;long) and (long) for string to long cast statement. Support (str-\u0026gt;int) and (int) for string to int cast statement. Support Long literal number in the OAL core engine. Support literal string as parameter of aggregation function. Add attributeExpression and attributeExpressionSegment in the OAL grammar tree to support map type for the attribute expression. Refactor the OAL compiler context to improve readability. Fix wrong generated codes of hashCode and remoteHashCode methods for numeric fields. Support != null in OAL engine. Add Message Queue Consuming Count metric for MQ consuming service and endpoint. Add Message Queue Avg Consuming Latency metric for MQ consuming service and endpoint. Support -Inf as bucket in the meter system. Fix setting wrong field when combining Events. Support search browser service. Add getProfileTaskLogs to profile query protocol. Set SW_KAFKA_FETCHER_ENABLE_NATIVE_PROTO_LOG, SW_KAFKA_FETCHER_ENABLE_NATIVE_JSON_LOG default true. Fix unexpected deleting due to TTL mechanism bug for H2, MySQL, TiDB and PostgreSQL. Add a GraphQL query to get OAP version, display OAP version in startup message and error logs. Fix TimeBucket missing in H2, MySQL, TiDB and PostgreSQL bug, which causes TTL doesn\u0026rsquo;t work for service_traffic. Fix TimeBucket missing in ElasticSearch and provide compatible storage2Entity for previous versions. Fix ElasticSearch implementation of queryMetricsValues and readLabeledMetricsValues doesn\u0026rsquo;t fill default values when no available data in the ElasticSearch server. Fix config yaml data type conversion bug when meets special character like !. Optimize metrics of minute dimensionality persistence. The value of metrics, which has declaration of the default value and current value equals the default value logically, the whole row wouldn\u0026rsquo;t be pushed into database. Fix max function in OAL doesn\u0026rsquo;t support negative long. Add MicroBench module to make it easier for developers to write JMH test. Upgrade Kubernetes Java client to 14.0.0, supports GCP token refreshing and fixes some bugs. Change SO11Y metric envoy_als_in_count to calculate the ALS message count. Support Istio 1.10.3, 1.11.4, 1.12.0 release.(Tested through e2e) Add filter mechanism in MAL core to filter metrics. Fix concurrency bug in MAL increase-related calculation. Fix a null pointer bug when building SampleFamily. Fix the so11y latency of persistence execution latency not correct in ElasticSearch storage. Add MeterReportService collectBatch method. Add OpenSearch 1.2.0 to test and verify it works. Upgrade grpc-java to 1.42.1 and protoc to 3.17.3 to allow using native Mac osx-aarch_64 artifacts. Fix TopologyQuery.loadEndpointRelation bug. Support using IoTDB as a new storage option. Add customized envoy ALS protocol receiver for satellite transmit batch data. Remove logback dependencies in IoTDB plugin. Fix StorageModuleElasticsearchProvider doesn\u0026rsquo;t watch on trustStorePath. Fix a wrong check about entity if GraphQL at the endpoint relation level.  UI  Optimize endpoint dependency. Show service name by hovering nodes in the sankey chart. Add Apache Kylin logo. Add ClickHouse logo. Optimize the style and add tips for log conditions. Fix the condition for trace table. Optimize profile functions. Implement a reminder to clear cache for dashboard templates. Support +/- hh:mm in TimeZone setting. Optimize global settings. Fix current endpoint for endpoint dependency. Add version in the global settings popup. Optimize Log page style. Avoid some abnormal settings. Fix query condition of events.  Documentation  Enhance documents about the data report and query protocols. Restructure documents about receivers and fetchers.  Remove general receiver and fetcher docs Add more specific menu with docs to help users to find documents easier.   Add a guidance doc about the logic endpoint. Link Satellite as Load Balancer documentation and compatibility with satellite.  All issues and pull requests are here\n","excerpt":"8.9.0 Project  E2E tests immigrate to e2e-v2. Support JDK 16 and 17. Add Docker images for arm64 …","ref":"/docs/main/next/en/changes/changes-8.9.0/","title":"8.9.0"},{"body":"8.9.0 Project  E2E tests immigrate to e2e-v2. Support JDK 16 and 17. Add Docker images for arm64 architecture.  OAP Server  Add component definition for Jackson. Fix that zipkin-receiver plugin is not packaged into dist. Upgrade Armeria to 1.12, upgrade OpenSearch test version to 1.1.0. Add component definition for Apache-Kylin. Enhance get generation mechanism of OAL engine, support map type of source\u0026rsquo;s field. Add tag(Map) into All, Service, ServiceInstance and Endpoint sources. Fix funcParamExpression and literalExpression can\u0026rsquo;t be used in the same aggregation function. Support cast statement in the OAL core engine. Support (str-\u0026gt;long) and (long) for string to long cast statement. Support (str-\u0026gt;int) and (int) for string to int cast statement. Support Long literal number in the OAL core engine. Support literal string as parameter of aggregation function. Add attributeExpression and attributeExpressionSegment in the OAL grammar tree to support map type for the attribute expression. Refactor the OAL compiler context to improve readability. Fix wrong generated codes of hashCode and remoteHashCode methods for numeric fields. Support != null in OAL engine. Add Message Queue Consuming Count metric for MQ consuming service and endpoint. Add Message Queue Avg Consuming Latency metric for MQ consuming service and endpoint. Support -Inf as bucket in the meter system. Fix setting wrong field when combining Events. Support search browser service. Add getProfileTaskLogs to profile query protocol. Set SW_KAFKA_FETCHER_ENABLE_NATIVE_PROTO_LOG, SW_KAFKA_FETCHER_ENABLE_NATIVE_JSON_LOG default true. Fix unexpected deleting due to TTL mechanism bug for H2, MySQL, TiDB and PostgreSQL. Add a GraphQL query to get OAP version, display OAP version in startup message and error logs. Fix TimeBucket missing in H2, MySQL, TiDB and PostgreSQL bug, which causes TTL doesn\u0026rsquo;t work for service_traffic. Fix TimeBucket missing in ElasticSearch and provide compatible storage2Entity for previous versions. Fix ElasticSearch implementation of queryMetricsValues and readLabeledMetricsValues doesn\u0026rsquo;t fill default values when no available data in the ElasticSearch server. Fix config yaml data type conversion bug when meets special character like !. Optimize metrics of minute dimensionality persistence. The value of metrics, which has declaration of the default value and current value equals the default value logically, the whole row wouldn\u0026rsquo;t be pushed into database. Fix max function in OAL doesn\u0026rsquo;t support negative long. Add MicroBench module to make it easier for developers to write JMH test. Upgrade Kubernetes Java client to 14.0.0, supports GCP token refreshing and fixes some bugs. Change SO11Y metric envoy_als_in_count to calculate the ALS message count. Support Istio 1.10.3, 1.11.4, 1.12.0 release.(Tested through e2e) Add filter mechanism in MAL core to filter metrics. Fix concurrency bug in MAL increase-related calculation. Fix a null pointer bug when building SampleFamily. Fix the so11y latency of persistence execution latency not correct in ElasticSearch storage. Add MeterReportService collectBatch method. Add OpenSearch 1.2.0 to test and verify it works. Upgrade grpc-java to 1.42.1 and protoc to 3.17.3 to allow using native Mac osx-aarch_64 artifacts. Fix TopologyQuery.loadEndpointRelation bug. Support using IoTDB as a new storage option. Add customized envoy ALS protocol receiver for satellite transmit batch data. Remove logback dependencies in IoTDB plugin. Fix StorageModuleElasticsearchProvider doesn\u0026rsquo;t watch on trustStorePath. Fix a wrong check about entity if GraphQL at the endpoint relation level.  UI  Optimize endpoint dependency. Show service name by hovering nodes in the sankey chart. Add Apache Kylin logo. Add ClickHouse logo. Optimize the style and add tips for log conditions. Fix the condition for trace table. Optimize profile functions. Implement a reminder to clear cache for dashboard templates. Support +/- hh:mm in TimeZone setting. Optimize global settings. Fix current endpoint for endpoint dependency. Add version in the global settings popup. Optimize Log page style. Avoid some abnormal settings. Fix query condition of events.  Documentation  Enhance documents about the data report and query protocols. Restructure documents about receivers and fetchers.  Remove general receiver and fetcher docs Add more specific menu with docs to help users to find documents easier.   Add a guidance doc about the logic endpoint. Link Satellite as Load Balancer documentation and compatibility with satellite.  All issues and pull requests are here\n","excerpt":"8.9.0 Project  E2E tests immigrate to e2e-v2. Support JDK 16 and 17. Add Docker images for arm64 …","ref":"/docs/main/v9.1.0/en/changes/changes-8.9.0/","title":"8.9.0"},{"body":"8.9.0 Project  E2E tests immigrate to e2e-v2. Support JDK 16 and 17. Add Docker images for arm64 architecture.  OAP Server  Add component definition for Jackson. Fix that zipkin-receiver plugin is not packaged into dist. Upgrade Armeria to 1.12, upgrade OpenSearch test version to 1.1.0. Add component definition for Apache-Kylin. Enhance get generation mechanism of OAL engine, support map type of source\u0026rsquo;s field. Add tag(Map) into All, Service, ServiceInstance and Endpoint sources. Fix funcParamExpression and literalExpression can\u0026rsquo;t be used in the same aggregation function. Support cast statement in the OAL core engine. Support (str-\u0026gt;long) and (long) for string to long cast statement. Support (str-\u0026gt;int) and (int) for string to int cast statement. Support Long literal number in the OAL core engine. Support literal string as parameter of aggregation function. Add attributeExpression and attributeExpressionSegment in the OAL grammar tree to support map type for the attribute expression. Refactor the OAL compiler context to improve readability. Fix wrong generated codes of hashCode and remoteHashCode methods for numeric fields. Support != null in OAL engine. Add Message Queue Consuming Count metric for MQ consuming service and endpoint. Add Message Queue Avg Consuming Latency metric for MQ consuming service and endpoint. Support -Inf as bucket in the meter system. Fix setting wrong field when combining Events. Support search browser service. Add getProfileTaskLogs to profile query protocol. Set SW_KAFKA_FETCHER_ENABLE_NATIVE_PROTO_LOG, SW_KAFKA_FETCHER_ENABLE_NATIVE_JSON_LOG default true. Fix unexpected deleting due to TTL mechanism bug for H2, MySQL, TiDB and PostgreSQL. Add a GraphQL query to get OAP version, display OAP version in startup message and error logs. Fix TimeBucket missing in H2, MySQL, TiDB and PostgreSQL bug, which causes TTL doesn\u0026rsquo;t work for service_traffic. Fix TimeBucket missing in ElasticSearch and provide compatible storage2Entity for previous versions. Fix ElasticSearch implementation of queryMetricsValues and readLabeledMetricsValues doesn\u0026rsquo;t fill default values when no available data in the ElasticSearch server. Fix config yaml data type conversion bug when meets special character like !. Optimize metrics of minute dimensionality persistence. The value of metrics, which has declaration of the default value and current value equals the default value logically, the whole row wouldn\u0026rsquo;t be pushed into database. Fix max function in OAL doesn\u0026rsquo;t support negative long. Add MicroBench module to make it easier for developers to write JMH test. Upgrade Kubernetes Java client to 14.0.0, supports GCP token refreshing and fixes some bugs. Change SO11Y metric envoy_als_in_count to calculate the ALS message count. Support Istio 1.10.3, 1.11.4, 1.12.0 release.(Tested through e2e) Add filter mechanism in MAL core to filter metrics. Fix concurrency bug in MAL increase-related calculation. Fix a null pointer bug when building SampleFamily. Fix the so11y latency of persistence execution latency not correct in ElasticSearch storage. Add MeterReportService collectBatch method. Add OpenSearch 1.2.0 to test and verify it works. Upgrade grpc-java to 1.42.1 and protoc to 3.17.3 to allow using native Mac osx-aarch_64 artifacts. Fix TopologyQuery.loadEndpointRelation bug. Support using IoTDB as a new storage option. Add customized envoy ALS protocol receiver for satellite transmit batch data. Remove logback dependencies in IoTDB plugin. Fix StorageModuleElasticsearchProvider doesn\u0026rsquo;t watch on trustStorePath. Fix a wrong check about entity if GraphQL at the endpoint relation level.  UI  Optimize endpoint dependency. Show service name by hovering nodes in the sankey chart. Add Apache Kylin logo. Add ClickHouse logo. Optimize the style and add tips for log conditions. Fix the condition for trace table. Optimize profile functions. Implement a reminder to clear cache for dashboard templates. Support +/- hh:mm in TimeZone setting. Optimize global settings. Fix current endpoint for endpoint dependency. Add version in the global settings popup. Optimize Log page style. Avoid some abnormal settings. Fix query condition of events.  Documentation  Enhance documents about the data report and query protocols. Restructure documents about receivers and fetchers.  Remove general receiver and fetcher docs Add more specific menu with docs to help users to find documents easier.   Add a guidance doc about the logic endpoint. Link Satellite as Load Balancer documentation and compatibility with satellite.  All issues and pull requests are here\n","excerpt":"8.9.0 Project  E2E tests immigrate to e2e-v2. Support JDK 16 and 17. Add Docker images for arm64 …","ref":"/docs/main/v9.2.0/en/changes/changes-8.9.0/","title":"8.9.0"},{"body":"8.9.0 Project  E2E tests immigrate to e2e-v2. Support JDK 16 and 17. Add Docker images for arm64 architecture.  OAP Server  Add component definition for Jackson. Fix that zipkin-receiver plugin is not packaged into dist. Upgrade Armeria to 1.12, upgrade OpenSearch test version to 1.1.0. Add component definition for Apache-Kylin. Enhance get generation mechanism of OAL engine, support map type of source\u0026rsquo;s field. Add tag(Map) into All, Service, ServiceInstance and Endpoint sources. Fix funcParamExpression and literalExpression can\u0026rsquo;t be used in the same aggregation function. Support cast statement in the OAL core engine. Support (str-\u0026gt;long) and (long) for string to long cast statement. Support (str-\u0026gt;int) and (int) for string to int cast statement. Support Long literal number in the OAL core engine. Support literal string as parameter of aggregation function. Add attributeExpression and attributeExpressionSegment in the OAL grammar tree to support map type for the attribute expression. Refactor the OAL compiler context to improve readability. Fix wrong generated codes of hashCode and remoteHashCode methods for numeric fields. Support != null in OAL engine. Add Message Queue Consuming Count metric for MQ consuming service and endpoint. Add Message Queue Avg Consuming Latency metric for MQ consuming service and endpoint. Support -Inf as bucket in the meter system. Fix setting wrong field when combining Events. Support search browser service. Add getProfileTaskLogs to profile query protocol. Set SW_KAFKA_FETCHER_ENABLE_NATIVE_PROTO_LOG, SW_KAFKA_FETCHER_ENABLE_NATIVE_JSON_LOG default true. Fix unexpected deleting due to TTL mechanism bug for H2, MySQL, TiDB and PostgreSQL. Add a GraphQL query to get OAP version, display OAP version in startup message and error logs. Fix TimeBucket missing in H2, MySQL, TiDB and PostgreSQL bug, which causes TTL doesn\u0026rsquo;t work for service_traffic. Fix TimeBucket missing in ElasticSearch and provide compatible storage2Entity for previous versions. Fix ElasticSearch implementation of queryMetricsValues and readLabeledMetricsValues doesn\u0026rsquo;t fill default values when no available data in the ElasticSearch server. Fix config yaml data type conversion bug when meets special character like !. Optimize metrics of minute dimensionality persistence. The value of metrics, which has declaration of the default value and current value equals the default value logically, the whole row wouldn\u0026rsquo;t be pushed into database. Fix max function in OAL doesn\u0026rsquo;t support negative long. Add MicroBench module to make it easier for developers to write JMH test. Upgrade Kubernetes Java client to 14.0.0, supports GCP token refreshing and fixes some bugs. Change SO11Y metric envoy_als_in_count to calculate the ALS message count. Support Istio 1.10.3, 1.11.4, 1.12.0 release.(Tested through e2e) Add filter mechanism in MAL core to filter metrics. Fix concurrency bug in MAL increase-related calculation. Fix a null pointer bug when building SampleFamily. Fix the so11y latency of persistence execution latency not correct in ElasticSearch storage. Add MeterReportService collectBatch method. Add OpenSearch 1.2.0 to test and verify it works. Upgrade grpc-java to 1.42.1 and protoc to 3.17.3 to allow using native Mac osx-aarch_64 artifacts. Fix TopologyQuery.loadEndpointRelation bug. Support using IoTDB as a new storage option. Add customized envoy ALS protocol receiver for satellite transmit batch data. Remove logback dependencies in IoTDB plugin. Fix StorageModuleElasticsearchProvider doesn\u0026rsquo;t watch on trustStorePath. Fix a wrong check about entity if GraphQL at the endpoint relation level.  UI  Optimize endpoint dependency. Show service name by hovering nodes in the sankey chart. Add Apache Kylin logo. Add ClickHouse logo. Optimize the style and add tips for log conditions. Fix the condition for trace table. Optimize profile functions. Implement a reminder to clear cache for dashboard templates. Support +/- hh:mm in TimeZone setting. Optimize global settings. Fix current endpoint for endpoint dependency. Add version in the global settings popup. Optimize Log page style. Avoid some abnormal settings. Fix query condition of events.  Documentation  Enhance documents about the data report and query protocols. Restructure documents about receivers and fetchers.  Remove general receiver and fetcher docs Add more specific menu with docs to help users to find documents easier.   Add a guidance doc about the logic endpoint. Link Satellite as Load Balancer documentation and compatibility with satellite.  All issues and pull requests are here\n","excerpt":"8.9.0 Project  E2E tests immigrate to e2e-v2. Support JDK 16 and 17. Add Docker images for arm64 …","ref":"/docs/main/v9.3.0/en/changes/changes-8.9.0/","title":"8.9.0"},{"body":"8.9.0 Project  E2E tests immigrate to e2e-v2. Support JDK 16 and 17. Add Docker images for arm64 architecture.  OAP Server  Add component definition for Jackson. Fix that zipkin-receiver plugin is not packaged into dist. Upgrade Armeria to 1.12, upgrade OpenSearch test version to 1.1.0. Add component definition for Apache-Kylin. Enhance get generation mechanism of OAL engine, support map type of source\u0026rsquo;s field. Add tag(Map) into All, Service, ServiceInstance and Endpoint sources. Fix funcParamExpression and literalExpression can\u0026rsquo;t be used in the same aggregation function. Support cast statement in the OAL core engine. Support (str-\u0026gt;long) and (long) for string to long cast statement. Support (str-\u0026gt;int) and (int) for string to int cast statement. Support Long literal number in the OAL core engine. Support literal string as parameter of aggregation function. Add attributeExpression and attributeExpressionSegment in the OAL grammar tree to support map type for the attribute expression. Refactor the OAL compiler context to improve readability. Fix wrong generated codes of hashCode and remoteHashCode methods for numeric fields. Support != null in OAL engine. Add Message Queue Consuming Count metric for MQ consuming service and endpoint. Add Message Queue Avg Consuming Latency metric for MQ consuming service and endpoint. Support -Inf as bucket in the meter system. Fix setting wrong field when combining Events. Support search browser service. Add getProfileTaskLogs to profile query protocol. Set SW_KAFKA_FETCHER_ENABLE_NATIVE_PROTO_LOG, SW_KAFKA_FETCHER_ENABLE_NATIVE_JSON_LOG default true. Fix unexpected deleting due to TTL mechanism bug for H2, MySQL, TiDB and PostgreSQL. Add a GraphQL query to get OAP version, display OAP version in startup message and error logs. Fix TimeBucket missing in H2, MySQL, TiDB and PostgreSQL bug, which causes TTL doesn\u0026rsquo;t work for service_traffic. Fix TimeBucket missing in ElasticSearch and provide compatible storage2Entity for previous versions. Fix ElasticSearch implementation of queryMetricsValues and readLabeledMetricsValues doesn\u0026rsquo;t fill default values when no available data in the ElasticSearch server. Fix config yaml data type conversion bug when meets special character like !. Optimize metrics of minute dimensionality persistence. The value of metrics, which has declaration of the default value and current value equals the default value logically, the whole row wouldn\u0026rsquo;t be pushed into database. Fix max function in OAL doesn\u0026rsquo;t support negative long. Add MicroBench module to make it easier for developers to write JMH test. Upgrade Kubernetes Java client to 14.0.0, supports GCP token refreshing and fixes some bugs. Change SO11Y metric envoy_als_in_count to calculate the ALS message count. Support Istio 1.10.3, 1.11.4, 1.12.0 release.(Tested through e2e) Add filter mechanism in MAL core to filter metrics. Fix concurrency bug in MAL increase-related calculation. Fix a null pointer bug when building SampleFamily. Fix the so11y latency of persistence execution latency not correct in ElasticSearch storage. Add MeterReportService collectBatch method. Add OpenSearch 1.2.0 to test and verify it works. Upgrade grpc-java to 1.42.1 and protoc to 3.17.3 to allow using native Mac osx-aarch_64 artifacts. Fix TopologyQuery.loadEndpointRelation bug. Support using IoTDB as a new storage option. Add customized envoy ALS protocol receiver for satellite transmit batch data. Remove logback dependencies in IoTDB plugin. Fix StorageModuleElasticsearchProvider doesn\u0026rsquo;t watch on trustStorePath. Fix a wrong check about entity if GraphQL at the endpoint relation level.  UI  Optimize endpoint dependency. Show service name by hovering nodes in the sankey chart. Add Apache Kylin logo. Add ClickHouse logo. Optimize the style and add tips for log conditions. Fix the condition for trace table. Optimize profile functions. Implement a reminder to clear cache for dashboard templates. Support +/- hh:mm in TimeZone setting. Optimize global settings. Fix current endpoint for endpoint dependency. Add version in the global settings popup. Optimize Log page style. Avoid some abnormal settings. Fix query condition of events.  Documentation  Enhance documents about the data report and query protocols. Restructure documents about receivers and fetchers.  Remove general receiver and fetcher docs Add more specific menu with docs to help users to find documents easier.   Add a guidance doc about the logic endpoint. Link Satellite as Load Balancer documentation and compatibility with satellite.  All issues and pull requests are here\n","excerpt":"8.9.0 Project  E2E tests immigrate to e2e-v2. Support JDK 16 and 17. Add Docker images for arm64 …","ref":"/docs/main/v9.4.0/en/changes/changes-8.9.0/","title":"8.9.0"},{"body":"8.9.0 Project  E2E tests immigrate to e2e-v2. Support JDK 16 and 17. Add Docker images for arm64 architecture.  OAP Server  Add component definition for Jackson. Fix that zipkin-receiver plugin is not packaged into dist. Upgrade Armeria to 1.12, upgrade OpenSearch test version to 1.1.0. Add component definition for Apache-Kylin. Enhance get generation mechanism of OAL engine, support map type of source\u0026rsquo;s field. Add tag(Map) into All, Service, ServiceInstance and Endpoint sources. Fix funcParamExpression and literalExpression can\u0026rsquo;t be used in the same aggregation function. Support cast statement in the OAL core engine. Support (str-\u0026gt;long) and (long) for string to long cast statement. Support (str-\u0026gt;int) and (int) for string to int cast statement. Support Long literal number in the OAL core engine. Support literal string as parameter of aggregation function. Add attributeExpression and attributeExpressionSegment in the OAL grammar tree to support map type for the attribute expression. Refactor the OAL compiler context to improve readability. Fix wrong generated codes of hashCode and remoteHashCode methods for numeric fields. Support != null in OAL engine. Add Message Queue Consuming Count metric for MQ consuming service and endpoint. Add Message Queue Avg Consuming Latency metric for MQ consuming service and endpoint. Support -Inf as bucket in the meter system. Fix setting wrong field when combining Events. Support search browser service. Add getProfileTaskLogs to profile query protocol. Set SW_KAFKA_FETCHER_ENABLE_NATIVE_PROTO_LOG, SW_KAFKA_FETCHER_ENABLE_NATIVE_JSON_LOG default true. Fix unexpected deleting due to TTL mechanism bug for H2, MySQL, TiDB and PostgreSQL. Add a GraphQL query to get OAP version, display OAP version in startup message and error logs. Fix TimeBucket missing in H2, MySQL, TiDB and PostgreSQL bug, which causes TTL doesn\u0026rsquo;t work for service_traffic. Fix TimeBucket missing in ElasticSearch and provide compatible storage2Entity for previous versions. Fix ElasticSearch implementation of queryMetricsValues and readLabeledMetricsValues doesn\u0026rsquo;t fill default values when no available data in the ElasticSearch server. Fix config yaml data type conversion bug when meets special character like !. Optimize metrics of minute dimensionality persistence. The value of metrics, which has declaration of the default value and current value equals the default value logically, the whole row wouldn\u0026rsquo;t be pushed into database. Fix max function in OAL doesn\u0026rsquo;t support negative long. Add MicroBench module to make it easier for developers to write JMH test. Upgrade Kubernetes Java client to 14.0.0, supports GCP token refreshing and fixes some bugs. Change SO11Y metric envoy_als_in_count to calculate the ALS message count. Support Istio 1.10.3, 1.11.4, 1.12.0 release.(Tested through e2e) Add filter mechanism in MAL core to filter metrics. Fix concurrency bug in MAL increase-related calculation. Fix a null pointer bug when building SampleFamily. Fix the so11y latency of persistence execution latency not correct in ElasticSearch storage. Add MeterReportService collectBatch method. Add OpenSearch 1.2.0 to test and verify it works. Upgrade grpc-java to 1.42.1 and protoc to 3.17.3 to allow using native Mac osx-aarch_64 artifacts. Fix TopologyQuery.loadEndpointRelation bug. Support using IoTDB as a new storage option. Add customized envoy ALS protocol receiver for satellite transmit batch data. Remove logback dependencies in IoTDB plugin. Fix StorageModuleElasticsearchProvider doesn\u0026rsquo;t watch on trustStorePath. Fix a wrong check about entity if GraphQL at the endpoint relation level.  UI  Optimize endpoint dependency. Show service name by hovering nodes in the sankey chart. Add Apache Kylin logo. Add ClickHouse logo. Optimize the style and add tips for log conditions. Fix the condition for trace table. Optimize profile functions. Implement a reminder to clear cache for dashboard templates. Support +/- hh:mm in TimeZone setting. Optimize global settings. Fix current endpoint for endpoint dependency. Add version in the global settings popup. Optimize Log page style. Avoid some abnormal settings. Fix query condition of events.  Documentation  Enhance documents about the data report and query protocols. Restructure documents about receivers and fetchers.  Remove general receiver and fetcher docs Add more specific menu with docs to help users to find documents easier.   Add a guidance doc about the logic endpoint. Link Satellite as Load Balancer documentation and compatibility with satellite.  All issues and pull requests are here\n","excerpt":"8.9.0 Project  E2E tests immigrate to e2e-v2. Support JDK 16 and 17. Add Docker images for arm64 …","ref":"/docs/main/v9.5.0/en/changes/changes-8.9.0/","title":"8.9.0"},{"body":"8.9.0 Project  E2E tests immigrate to e2e-v2. Support JDK 16 and 17. Add Docker images for arm64 architecture.  OAP Server  Add component definition for Jackson. Fix that zipkin-receiver plugin is not packaged into dist. Upgrade Armeria to 1.12, upgrade OpenSearch test version to 1.1.0. Add component definition for Apache-Kylin. Enhance get generation mechanism of OAL engine, support map type of source\u0026rsquo;s field. Add tag(Map) into All, Service, ServiceInstance and Endpoint sources. Fix funcParamExpression and literalExpression can\u0026rsquo;t be used in the same aggregation function. Support cast statement in the OAL core engine. Support (str-\u0026gt;long) and (long) for string to long cast statement. Support (str-\u0026gt;int) and (int) for string to int cast statement. Support Long literal number in the OAL core engine. Support literal string as parameter of aggregation function. Add attributeExpression and attributeExpressionSegment in the OAL grammar tree to support map type for the attribute expression. Refactor the OAL compiler context to improve readability. Fix wrong generated codes of hashCode and remoteHashCode methods for numeric fields. Support != null in OAL engine. Add Message Queue Consuming Count metric for MQ consuming service and endpoint. Add Message Queue Avg Consuming Latency metric for MQ consuming service and endpoint. Support -Inf as bucket in the meter system. Fix setting wrong field when combining Events. Support search browser service. Add getProfileTaskLogs to profile query protocol. Set SW_KAFKA_FETCHER_ENABLE_NATIVE_PROTO_LOG, SW_KAFKA_FETCHER_ENABLE_NATIVE_JSON_LOG default true. Fix unexpected deleting due to TTL mechanism bug for H2, MySQL, TiDB and PostgreSQL. Add a GraphQL query to get OAP version, display OAP version in startup message and error logs. Fix TimeBucket missing in H2, MySQL, TiDB and PostgreSQL bug, which causes TTL doesn\u0026rsquo;t work for service_traffic. Fix TimeBucket missing in ElasticSearch and provide compatible storage2Entity for previous versions. Fix ElasticSearch implementation of queryMetricsValues and readLabeledMetricsValues doesn\u0026rsquo;t fill default values when no available data in the ElasticSearch server. Fix config yaml data type conversion bug when meets special character like !. Optimize metrics of minute dimensionality persistence. The value of metrics, which has declaration of the default value and current value equals the default value logically, the whole row wouldn\u0026rsquo;t be pushed into database. Fix max function in OAL doesn\u0026rsquo;t support negative long. Add MicroBench module to make it easier for developers to write JMH test. Upgrade Kubernetes Java client to 14.0.0, supports GCP token refreshing and fixes some bugs. Change SO11Y metric envoy_als_in_count to calculate the ALS message count. Support Istio 1.10.3, 1.11.4, 1.12.0 release.(Tested through e2e) Add filter mechanism in MAL core to filter metrics. Fix concurrency bug in MAL increase-related calculation. Fix a null pointer bug when building SampleFamily. Fix the so11y latency of persistence execution latency not correct in ElasticSearch storage. Add MeterReportService collectBatch method. Add OpenSearch 1.2.0 to test and verify it works. Upgrade grpc-java to 1.42.1 and protoc to 3.17.3 to allow using native Mac osx-aarch_64 artifacts. Fix TopologyQuery.loadEndpointRelation bug. Support using IoTDB as a new storage option. Add customized envoy ALS protocol receiver for satellite transmit batch data. Remove logback dependencies in IoTDB plugin. Fix StorageModuleElasticsearchProvider doesn\u0026rsquo;t watch on trustStorePath. Fix a wrong check about entity if GraphQL at the endpoint relation level.  UI  Optimize endpoint dependency. Show service name by hovering nodes in the sankey chart. Add Apache Kylin logo. Add ClickHouse logo. Optimize the style and add tips for log conditions. Fix the condition for trace table. Optimize profile functions. Implement a reminder to clear cache for dashboard templates. Support +/- hh:mm in TimeZone setting. Optimize global settings. Fix current endpoint for endpoint dependency. Add version in the global settings popup. Optimize Log page style. Avoid some abnormal settings. Fix query condition of events.  Documentation  Enhance documents about the data report and query protocols. Restructure documents about receivers and fetchers.  Remove general receiver and fetcher docs Add more specific menu with docs to help users to find documents easier.   Add a guidance doc about the logic endpoint. Link Satellite as Load Balancer documentation and compatibility with satellite.  All issues and pull requests are here\n","excerpt":"8.9.0 Project  E2E tests immigrate to e2e-v2. Support JDK 16 and 17. Add Docker images for arm64 …","ref":"/docs/main/v9.6.0/en/changes/changes-8.9.0/","title":"8.9.0"},{"body":"8.9.0 Project  E2E tests immigrate to e2e-v2. Support JDK 16 and 17. Add Docker images for arm64 architecture.  OAP Server  Add component definition for Jackson. Fix that zipkin-receiver plugin is not packaged into dist. Upgrade Armeria to 1.12, upgrade OpenSearch test version to 1.1.0. Add component definition for Apache-Kylin. Enhance get generation mechanism of OAL engine, support map type of source\u0026rsquo;s field. Add tag(Map) into All, Service, ServiceInstance and Endpoint sources. Fix funcParamExpression and literalExpression can\u0026rsquo;t be used in the same aggregation function. Support cast statement in the OAL core engine. Support (str-\u0026gt;long) and (long) for string to long cast statement. Support (str-\u0026gt;int) and (int) for string to int cast statement. Support Long literal number in the OAL core engine. Support literal string as parameter of aggregation function. Add attributeExpression and attributeExpressionSegment in the OAL grammar tree to support map type for the attribute expression. Refactor the OAL compiler context to improve readability. Fix wrong generated codes of hashCode and remoteHashCode methods for numeric fields. Support != null in OAL engine. Add Message Queue Consuming Count metric for MQ consuming service and endpoint. Add Message Queue Avg Consuming Latency metric for MQ consuming service and endpoint. Support -Inf as bucket in the meter system. Fix setting wrong field when combining Events. Support search browser service. Add getProfileTaskLogs to profile query protocol. Set SW_KAFKA_FETCHER_ENABLE_NATIVE_PROTO_LOG, SW_KAFKA_FETCHER_ENABLE_NATIVE_JSON_LOG default true. Fix unexpected deleting due to TTL mechanism bug for H2, MySQL, TiDB and PostgreSQL. Add a GraphQL query to get OAP version, display OAP version in startup message and error logs. Fix TimeBucket missing in H2, MySQL, TiDB and PostgreSQL bug, which causes TTL doesn\u0026rsquo;t work for service_traffic. Fix TimeBucket missing in ElasticSearch and provide compatible storage2Entity for previous versions. Fix ElasticSearch implementation of queryMetricsValues and readLabeledMetricsValues doesn\u0026rsquo;t fill default values when no available data in the ElasticSearch server. Fix config yaml data type conversion bug when meets special character like !. Optimize metrics of minute dimensionality persistence. The value of metrics, which has declaration of the default value and current value equals the default value logically, the whole row wouldn\u0026rsquo;t be pushed into database. Fix max function in OAL doesn\u0026rsquo;t support negative long. Add MicroBench module to make it easier for developers to write JMH test. Upgrade Kubernetes Java client to 14.0.0, supports GCP token refreshing and fixes some bugs. Change SO11Y metric envoy_als_in_count to calculate the ALS message count. Support Istio 1.10.3, 1.11.4, 1.12.0 release.(Tested through e2e) Add filter mechanism in MAL core to filter metrics. Fix concurrency bug in MAL increase-related calculation. Fix a null pointer bug when building SampleFamily. Fix the so11y latency of persistence execution latency not correct in ElasticSearch storage. Add MeterReportService collectBatch method. Add OpenSearch 1.2.0 to test and verify it works. Upgrade grpc-java to 1.42.1 and protoc to 3.17.3 to allow using native Mac osx-aarch_64 artifacts. Fix TopologyQuery.loadEndpointRelation bug. Support using IoTDB as a new storage option. Add customized envoy ALS protocol receiver for satellite transmit batch data. Remove logback dependencies in IoTDB plugin. Fix StorageModuleElasticsearchProvider doesn\u0026rsquo;t watch on trustStorePath. Fix a wrong check about entity if GraphQL at the endpoint relation level.  UI  Optimize endpoint dependency. Show service name by hovering nodes in the sankey chart. Add Apache Kylin logo. Add ClickHouse logo. Optimize the style and add tips for log conditions. Fix the condition for trace table. Optimize profile functions. Implement a reminder to clear cache for dashboard templates. Support +/- hh:mm in TimeZone setting. Optimize global settings. Fix current endpoint for endpoint dependency. Add version in the global settings popup. Optimize Log page style. Avoid some abnormal settings. Fix query condition of events.  Documentation  Enhance documents about the data report and query protocols. Restructure documents about receivers and fetchers.  Remove general receiver and fetcher docs Add more specific menu with docs to help users to find documents easier.   Add a guidance doc about the logic endpoint. Link Satellite as Load Balancer documentation and compatibility with satellite.  All issues and pull requests are here\n","excerpt":"8.9.0 Project  E2E tests immigrate to e2e-v2. Support JDK 16 and 17. Add Docker images for arm64 …","ref":"/docs/main/v9.7.0/en/changes/changes-8.9.0/","title":"8.9.0"},{"body":"8.9.1 Project  Upgrade log4j2 to 2.15.0 for CVE-2021-44228  ","excerpt":"8.9.1 Project  Upgrade log4j2 to 2.15.0 for CVE-2021-44228  ","ref":"/docs/main/latest/en/changes/changes-8.9.1/","title":"8.9.1"},{"body":"8.9.1 Project  Upgrade log4j2 to 2.15.0 for CVE-2021-44228  ","excerpt":"8.9.1 Project  Upgrade log4j2 to 2.15.0 for CVE-2021-44228  ","ref":"/docs/main/next/en/changes/changes-8.9.1/","title":"8.9.1"},{"body":"8.9.1 Project  Upgrade log4j2 to 2.15.0 for CVE-2021-44228  ","excerpt":"8.9.1 Project  Upgrade log4j2 to 2.15.0 for CVE-2021-44228  ","ref":"/docs/main/v9.1.0/en/changes/changes-8.9.1/","title":"8.9.1"},{"body":"8.9.1 Project  Upgrade log4j2 to 2.15.0 for CVE-2021-44228  ","excerpt":"8.9.1 Project  Upgrade log4j2 to 2.15.0 for CVE-2021-44228  ","ref":"/docs/main/v9.2.0/en/changes/changes-8.9.1/","title":"8.9.1"},{"body":"8.9.1 Project  Upgrade log4j2 to 2.15.0 for CVE-2021-44228  ","excerpt":"8.9.1 Project  Upgrade log4j2 to 2.15.0 for CVE-2021-44228  ","ref":"/docs/main/v9.3.0/en/changes/changes-8.9.1/","title":"8.9.1"},{"body":"8.9.1 Project  Upgrade log4j2 to 2.15.0 for CVE-2021-44228  ","excerpt":"8.9.1 Project  Upgrade log4j2 to 2.15.0 for CVE-2021-44228  ","ref":"/docs/main/v9.4.0/en/changes/changes-8.9.1/","title":"8.9.1"},{"body":"8.9.1 Project  Upgrade log4j2 to 2.15.0 for CVE-2021-44228  ","excerpt":"8.9.1 Project  Upgrade log4j2 to 2.15.0 for CVE-2021-44228  ","ref":"/docs/main/v9.5.0/en/changes/changes-8.9.1/","title":"8.9.1"},{"body":"8.9.1 Project  Upgrade log4j2 to 2.15.0 for CVE-2021-44228  ","excerpt":"8.9.1 Project  Upgrade log4j2 to 2.15.0 for CVE-2021-44228  ","ref":"/docs/main/v9.6.0/en/changes/changes-8.9.1/","title":"8.9.1"},{"body":"8.9.1 Project  Upgrade log4j2 to 2.15.0 for CVE-2021-44228  ","excerpt":"8.9.1 Project  Upgrade log4j2 to 2.15.0 for CVE-2021-44228  ","ref":"/docs/main/v9.7.0/en/changes/changes-8.9.1/","title":"8.9.1"},{"body":"9.0.0 Project  Upgrade log4j2 to 2.17.1 for CVE-2021-44228, CVE-2021-45046, CVE-2021-45105 and CVE-2021-44832. This CVE only effects on JDK if JNDI is opened in default. Notice, using JVM option -Dlog4j2.formatMsgNoLookups=true or setting the LOG4J_FORMAT_MSG_NO_LOOKUPS=”true” environment variable also avoids CVEs. Upgrade maven-wrapper to 3.1.0, maven to 3.8.4 for performance improvements and ARM more native support. Exclude unnecessary libs when building under JDK 9+. Migrate base Docker image to eclipse-temurin as adoptopenjdk is deprecated. Add E2E test under Java 17. Upgrade protoc to 3.19.2. Add Istio 1.13.1 to E2E test matrix for verification. Upgrade Apache parent pom version to 25. Use the plugin version defined by the Apache maven parent.  Upgrade maven-dependency-plugin to 3.2.0. Upgrade maven-assembly-plugin to 3.3.0. Upgrade maven-failsafe-plugin to 2.22.2. Upgrade maven-surefire-plugin to 2.22.2. Upgrade maven-jar-plugin to 3.2.2. Upgrade maven-enforcer-plugin to 3.0.0. Upgrade maven-compiler-plugin to 3.10.0. Upgrade maven-resources-plugin to 3.2.0. Upgrade maven-source-plugin to 3.2.1.   Update codeStyle.xml to fix incompatibility on M1\u0026rsquo;s IntelliJ IDEA 2021.3.2. Update frontend-maven-plugin to 1.12 and npm to 16.14.0 for booster UI build. Improve CI with the GHA new feature \u0026ldquo;run failed jobs\u0026rdquo;. Fix ./mvnw compile not work if ./mvnw install is not executed at least once. Add JD_PRESERVE_LINE_FEEDS=true in official code style file. Upgrade OAP dependencies gson(2.9.0), guava(31.1), jackson(2.13.2), protobuf-java(3.18.4), commons-io(2.7), postgresql(42.3.3). Remove commons-pool and commons-dbcp from OAP dependencies(Not used before). Upgrade webapp dependencies gson(2.9.0), spring boot(2.6.6), jackson(2.13.2.2), spring cloud(2021.0.1), Apache httpclient(4.5.13).  OAP Server  Fix potential NPE in OAL string match and a bug when right-hand-side variable includes double quotes. Bump up Armeria version to 1.14.1 to fix CVE. Polish ETCD cluster config environment variables. Add the analysis of metrics in Satellite MetricsService. Fix Can't split endpoint id into 2 parts bug for endpoint ID. In the TCP in service mesh observability, endpoint name doesn\u0026rsquo;t exist in TCP traffic. Upgrade H2 version to 2.0.206 to fix CVE-2021-23463 and GHSA-h376-j262-vhq6. Extend column name override mechanism working for ValueColumnMetadata. Introduce new concept Layer and removed NodeType. More details refer to v9-version-upgrade. Fix query sort metrics failure in H2 Storage. Bump up grpc to 1.43.2 and protobuf to 3.19.2 to fix CVE-2021-22569. Add source layer and dest layer to relation. Follow protocol grammar fix GCPhrase -\u0026gt; GCPhase. Set layer to mesh relation. Add FAAS to SpanLayer. Adjust e2e case for V9 core. Support ZGC GC time and count metric collecting. Sync proto buffers files from upstream Envoy (Related to https://github.com/envoyproxy/envoy/pull/18955). Bump up GraphQL related dependencies to latest versions. Add normal to V9 service meta query. Support scope=ALL catalog for metrics. Bump up H2 to 2.1.210 to fix CVE-2022-23221. E2E: Add normal field to Service. Add FreeSql component ID(3017) of dotnet agent. E2E: verify OAP cluster model data aggregation. Fix SelfRemoteClient self observing metrics. Add env variables SW_CLUSTER_INTERNAL_COM_HOST and SW_CLUSTER_INTERNAL_COM_PORT for cluster selectors zookeeper ,consul,etcd and nacos. Doc update: configuration-vocabulary,backend-cluster about env variables SW_CLUSTER_INTERNAL_COM_HOST and SW_CLUSTER_INTERNAL_COM_PORT. Add Python MysqlClient component ID(7013) with mapping information. Support Java thread pool metrics analysis. Fix IoTDB Storage Option insert null index value. Set the default value of SW_STORAGE_IOTDB_SESSIONPOOL_SIZE to 8. Bump up iotdb-session to 0.12.4. Bump up PostgreSQL driver to fix CVE. Add Guava EventBus component ID(123) of Java agent. Add OpenFunction component ID(5013). Expose configuration responseTimeout of ES client. Support datasource metric analysis. [Breaking Change] Keep the endpoint avg resp time meter name the same with others scope. (This may break 3rd party integration and existing alarm rule settings) Add Python FastAPI component ID(7014). Support all metrics from MAL engine in alarm core, including Prometheus, OC receiver, meter receiver. Allow updating non-metrics templates when structure changed. Set default connection timeout of ElasticSearch to 3000 milliseconds. Support ElasticSearch 8 and add it into E2E tests. Disable indexing for field alarm_record.tags_raw_data of binary type in ElasticSearch storage. Fix Zipkin receiver wrong condition for decoding gzip. Add a new sampler (possibility) in LAL. Unify module name receiver_zipkin to receiver-zipkin, remove receiver_jaeger from application.yaml. Introduce the entity of Process type. Set the length of event#parameters to 2000. Limit the length of Event#parameters. Support large service/instance/networkAddressAlias list query by using ElasticSearch scrolling API, add metadataQueryBatchSize to configure scrolling page size. Change default value of metadataQueryMaxSize from 5000 to 10000 Replace deprecated Armeria API BasicToken.of with AuthToken.ofBasic. Implement v9 UI template management protocol. Implement process metadata query protocol. Expose more ElasticSearch health check related logs to help to diagnose Health check fails. reason: No healthy endpoint. Add source event generated metrics to SERVICE_CATALOG_NAME catalog. [Breaking Change] Deprecate All from OAL source. [Breaking Change] Remove SRC_ALL: 'All' from OAL grammar tree. Remove all_heatmap and all_percentile metrics. Fix ElasticSearch normal index couldn\u0026rsquo;t apply mapping and update. Enhance DataCarrier#MultipleChannelsConsumer to add priority for the channels, which makes OAP server has a better performance to activate all analyzers on default. Activate receiver-otel#enabledOcRules receiver with k8s-node,oap,vm rules on default. Activate satellite,spring-sleuth for agent-analyzer#meterAnalyzerActiveFiles on default. Activate receiver-zabbix receiver with agent rule on default. Replace HTTP server (GraphQL, agent HTTP protocol) from Jetty with Armeria. [Breaking Change] Remove configuration restAcceptorPriorityDelta (env var: SW_RECEIVER_SHARING_JETTY_DELTA , SW_CORE_REST_JETTY_DELTA). [Breaking Change] Remove configuration graphql/path (env var: SW_QUERY_GRAPHQL_PATH). Add storage column attribute indexOnly, support ElasticSearch only index and not store some fields. Add indexOnly=true to SegmentRecord.tags, AlarmRecord.tags, AbstractLogRecord.tags, to reduce unnecessary storage. [Breaking Change] Remove configuration restMinThreads (env var: SW_CORE_REST_JETTY_MIN_THREADS , SW_RECEIVER_SHARING_JETTY_MIN_THREADS). Refactor the core Builder mechanism, new storage plugin could implement their own converter and get rid of hard requirement of using HashMap to communicate between data object and database native structure. [Breaking Change] Break all existing 3rd-party storage extensions. Remove hard requirement of BASE64 encoding for binary field. Add complexity limitation for GraphQL query to avoid malicious query. Add Column.shardingKeyIdx for column definition for BanyanDB.  Sharding key is used to group time series data per metric of one entity in one place (same sharding and/or same row for column-oriented database). For example, ServiceA's traffic gauge, service call per minute, includes following timestamp values, then it should be sharded by service ID [ServiceA(encoded ID): 01-28 18:30 values-1, 01-28 18:31 values-2, 01-28 18:32 values-3, 01-28 18:32 values-4] BanyanDB is the 1st storage implementation supporting this. It would make continuous time series metrics stored closely and compressed better. NOTICE, this sharding concept is NOT just for splitting data into different database instances or physical files.  Support ElasticSearch template mappings properties parameters and _source update. Implement the eBPF profiling query and data collect protocol. [Breaking Change] Remove Deprecated responseCode from sources, including Service, ServiceInstance, Endpoint Enhance endpoint dependency analysis to support cross threads cases. Refactor span analysis code structures. Remove isNotNormal service requirement when use alias to merge service topology from client side. All RPCs' peer services from client side are always normal services. This cause the topology is not merged correctly. Fix event type of export data is incorrect, it was EventType.TOTAL always. Reduce redundancy ThreadLocal in MAL core. Improve MAL performance. Trim tag\u0026rsquo;s key and value in log query. Refactor IoTDB storage plugin, add IoTDBDataConverter and fix ModifyCollectionInEnhancedForLoop bug. Bump up iotdb-session to 0.12.5. Fix the configuration of Aggregation and GC Count metrics for oap self observability E2E: Add verify OAP eBPF Profiling. Let multiGet could query without tag value in the InfluxDB storage plugin. Adjust MAL for V9, remove some groups, add a new Service function for the custom delimiter. Add service catalog DatabaseSlowStatement. Add Error Prone Annotations dependency to suppress warnings, which are not errors.  UI  [Breaking Change] Introduce Booster UI, remove RocketBot UI. [Breaking Change] UI Templates have been redesigned totally. GraphQL query is minimal compatible for metadata and metrics query. Remove unused jars (log4j-api.jar) in classpath. Bump up netty version to fix CVE. Add Database Connection pool metric. Re-implement UI template initialization for Booster UI. Add environment variable SW_ENABLE_UPDATE_UI_TEMPLATE to control user edit UI template. Add the Self Observability template of the SkyWalking Satellite. Add the template of OpenFunction observability.  Documentation  Reconstruction doc menu for v9. Update backend-alarm.md doc, support op \u0026ldquo;=\u0026rdquo; to \u0026ldquo;==\u0026rdquo;. Update backend-meter.md doc . Add \u0026lt;STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System\u0026gt; paper. Add Academy menu for recommending articles. Remove All source relative document and examples. Update Booster UI\u0026rsquo;s dependency licenses. Add profiling doc, and remove service mesh intro doc(not necessary). Add a doc for virtual database. Rewrite UI introduction. Update k8s-monitoring, backend-telemetry and v9-version-upgrade doc for v9.  All issues and pull requests are here\n","excerpt":"9.0.0 Project  Upgrade log4j2 to 2.17.1 for CVE-2021-44228, CVE-2021-45046, CVE-2021-45105 and …","ref":"/docs/main/latest/en/changes/changes-9.0.0/","title":"9.0.0"},{"body":"9.0.0 Project  Upgrade log4j2 to 2.17.1 for CVE-2021-44228, CVE-2021-45046, CVE-2021-45105 and CVE-2021-44832. This CVE only effects on JDK if JNDI is opened in default. Notice, using JVM option -Dlog4j2.formatMsgNoLookups=true or setting the LOG4J_FORMAT_MSG_NO_LOOKUPS=”true” environment variable also avoids CVEs. Upgrade maven-wrapper to 3.1.0, maven to 3.8.4 for performance improvements and ARM more native support. Exclude unnecessary libs when building under JDK 9+. Migrate base Docker image to eclipse-temurin as adoptopenjdk is deprecated. Add E2E test under Java 17. Upgrade protoc to 3.19.2. Add Istio 1.13.1 to E2E test matrix for verification. Upgrade Apache parent pom version to 25. Use the plugin version defined by the Apache maven parent.  Upgrade maven-dependency-plugin to 3.2.0. Upgrade maven-assembly-plugin to 3.3.0. Upgrade maven-failsafe-plugin to 2.22.2. Upgrade maven-surefire-plugin to 2.22.2. Upgrade maven-jar-plugin to 3.2.2. Upgrade maven-enforcer-plugin to 3.0.0. Upgrade maven-compiler-plugin to 3.10.0. Upgrade maven-resources-plugin to 3.2.0. Upgrade maven-source-plugin to 3.2.1.   Update codeStyle.xml to fix incompatibility on M1\u0026rsquo;s IntelliJ IDEA 2021.3.2. Update frontend-maven-plugin to 1.12 and npm to 16.14.0 for booster UI build. Improve CI with the GHA new feature \u0026ldquo;run failed jobs\u0026rdquo;. Fix ./mvnw compile not work if ./mvnw install is not executed at least once. Add JD_PRESERVE_LINE_FEEDS=true in official code style file. Upgrade OAP dependencies gson(2.9.0), guava(31.1), jackson(2.13.2), protobuf-java(3.18.4), commons-io(2.7), postgresql(42.3.3). Remove commons-pool and commons-dbcp from OAP dependencies(Not used before). Upgrade webapp dependencies gson(2.9.0), spring boot(2.6.6), jackson(2.13.2.2), spring cloud(2021.0.1), Apache httpclient(4.5.13).  OAP Server  Fix potential NPE in OAL string match and a bug when right-hand-side variable includes double quotes. Bump up Armeria version to 1.14.1 to fix CVE. Polish ETCD cluster config environment variables. Add the analysis of metrics in Satellite MetricsService. Fix Can't split endpoint id into 2 parts bug for endpoint ID. In the TCP in service mesh observability, endpoint name doesn\u0026rsquo;t exist in TCP traffic. Upgrade H2 version to 2.0.206 to fix CVE-2021-23463 and GHSA-h376-j262-vhq6. Extend column name override mechanism working for ValueColumnMetadata. Introduce new concept Layer and removed NodeType. More details refer to v9-version-upgrade. Fix query sort metrics failure in H2 Storage. Bump up grpc to 1.43.2 and protobuf to 3.19.2 to fix CVE-2021-22569. Add source layer and dest layer to relation. Follow protocol grammar fix GCPhrase -\u0026gt; GCPhase. Set layer to mesh relation. Add FAAS to SpanLayer. Adjust e2e case for V9 core. Support ZGC GC time and count metric collecting. Sync proto buffers files from upstream Envoy (Related to https://github.com/envoyproxy/envoy/pull/18955). Bump up GraphQL related dependencies to latest versions. Add normal to V9 service meta query. Support scope=ALL catalog for metrics. Bump up H2 to 2.1.210 to fix CVE-2022-23221. E2E: Add normal field to Service. Add FreeSql component ID(3017) of dotnet agent. E2E: verify OAP cluster model data aggregation. Fix SelfRemoteClient self observing metrics. Add env variables SW_CLUSTER_INTERNAL_COM_HOST and SW_CLUSTER_INTERNAL_COM_PORT for cluster selectors zookeeper ,consul,etcd and nacos. Doc update: configuration-vocabulary,backend-cluster about env variables SW_CLUSTER_INTERNAL_COM_HOST and SW_CLUSTER_INTERNAL_COM_PORT. Add Python MysqlClient component ID(7013) with mapping information. Support Java thread pool metrics analysis. Fix IoTDB Storage Option insert null index value. Set the default value of SW_STORAGE_IOTDB_SESSIONPOOL_SIZE to 8. Bump up iotdb-session to 0.12.4. Bump up PostgreSQL driver to fix CVE. Add Guava EventBus component ID(123) of Java agent. Add OpenFunction component ID(5013). Expose configuration responseTimeout of ES client. Support datasource metric analysis. [Breaking Change] Keep the endpoint avg resp time meter name the same with others scope. (This may break 3rd party integration and existing alarm rule settings) Add Python FastAPI component ID(7014). Support all metrics from MAL engine in alarm core, including Prometheus, OC receiver, meter receiver. Allow updating non-metrics templates when structure changed. Set default connection timeout of ElasticSearch to 3000 milliseconds. Support ElasticSearch 8 and add it into E2E tests. Disable indexing for field alarm_record.tags_raw_data of binary type in ElasticSearch storage. Fix Zipkin receiver wrong condition for decoding gzip. Add a new sampler (possibility) in LAL. Unify module name receiver_zipkin to receiver-zipkin, remove receiver_jaeger from application.yaml. Introduce the entity of Process type. Set the length of event#parameters to 2000. Limit the length of Event#parameters. Support large service/instance/networkAddressAlias list query by using ElasticSearch scrolling API, add metadataQueryBatchSize to configure scrolling page size. Change default value of metadataQueryMaxSize from 5000 to 10000 Replace deprecated Armeria API BasicToken.of with AuthToken.ofBasic. Implement v9 UI template management protocol. Implement process metadata query protocol. Expose more ElasticSearch health check related logs to help to diagnose Health check fails. reason: No healthy endpoint. Add source event generated metrics to SERVICE_CATALOG_NAME catalog. [Breaking Change] Deprecate All from OAL source. [Breaking Change] Remove SRC_ALL: 'All' from OAL grammar tree. Remove all_heatmap and all_percentile metrics. Fix ElasticSearch normal index couldn\u0026rsquo;t apply mapping and update. Enhance DataCarrier#MultipleChannelsConsumer to add priority for the channels, which makes OAP server has a better performance to activate all analyzers on default. Activate receiver-otel#enabledOcRules receiver with k8s-node,oap,vm rules on default. Activate satellite,spring-sleuth for agent-analyzer#meterAnalyzerActiveFiles on default. Activate receiver-zabbix receiver with agent rule on default. Replace HTTP server (GraphQL, agent HTTP protocol) from Jetty with Armeria. [Breaking Change] Remove configuration restAcceptorPriorityDelta (env var: SW_RECEIVER_SHARING_JETTY_DELTA , SW_CORE_REST_JETTY_DELTA). [Breaking Change] Remove configuration graphql/path (env var: SW_QUERY_GRAPHQL_PATH). Add storage column attribute indexOnly, support ElasticSearch only index and not store some fields. Add indexOnly=true to SegmentRecord.tags, AlarmRecord.tags, AbstractLogRecord.tags, to reduce unnecessary storage. [Breaking Change] Remove configuration restMinThreads (env var: SW_CORE_REST_JETTY_MIN_THREADS , SW_RECEIVER_SHARING_JETTY_MIN_THREADS). Refactor the core Builder mechanism, new storage plugin could implement their own converter and get rid of hard requirement of using HashMap to communicate between data object and database native structure. [Breaking Change] Break all existing 3rd-party storage extensions. Remove hard requirement of BASE64 encoding for binary field. Add complexity limitation for GraphQL query to avoid malicious query. Add Column.shardingKeyIdx for column definition for BanyanDB.  Sharding key is used to group time series data per metric of one entity in one place (same sharding and/or same row for column-oriented database). For example, ServiceA's traffic gauge, service call per minute, includes following timestamp values, then it should be sharded by service ID [ServiceA(encoded ID): 01-28 18:30 values-1, 01-28 18:31 values-2, 01-28 18:32 values-3, 01-28 18:32 values-4] BanyanDB is the 1st storage implementation supporting this. It would make continuous time series metrics stored closely and compressed better. NOTICE, this sharding concept is NOT just for splitting data into different database instances or physical files.  Support ElasticSearch template mappings properties parameters and _source update. Implement the eBPF profiling query and data collect protocol. [Breaking Change] Remove Deprecated responseCode from sources, including Service, ServiceInstance, Endpoint Enhance endpoint dependency analysis to support cross threads cases. Refactor span analysis code structures. Remove isNotNormal service requirement when use alias to merge service topology from client side. All RPCs' peer services from client side are always normal services. This cause the topology is not merged correctly. Fix event type of export data is incorrect, it was EventType.TOTAL always. Reduce redundancy ThreadLocal in MAL core. Improve MAL performance. Trim tag\u0026rsquo;s key and value in log query. Refactor IoTDB storage plugin, add IoTDBDataConverter and fix ModifyCollectionInEnhancedForLoop bug. Bump up iotdb-session to 0.12.5. Fix the configuration of Aggregation and GC Count metrics for oap self observability E2E: Add verify OAP eBPF Profiling. Let multiGet could query without tag value in the InfluxDB storage plugin. Adjust MAL for V9, remove some groups, add a new Service function for the custom delimiter. Add service catalog DatabaseSlowStatement. Add Error Prone Annotations dependency to suppress warnings, which are not errors.  UI  [Breaking Change] Introduce Booster UI, remove RocketBot UI. [Breaking Change] UI Templates have been redesigned totally. GraphQL query is minimal compatible for metadata and metrics query. Remove unused jars (log4j-api.jar) in classpath. Bump up netty version to fix CVE. Add Database Connection pool metric. Re-implement UI template initialization for Booster UI. Add environment variable SW_ENABLE_UPDATE_UI_TEMPLATE to control user edit UI template. Add the Self Observability template of the SkyWalking Satellite. Add the template of OpenFunction observability.  Documentation  Reconstruction doc menu for v9. Update backend-alarm.md doc, support op \u0026ldquo;=\u0026rdquo; to \u0026ldquo;==\u0026rdquo;. Update backend-meter.md doc . Add \u0026lt;STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System\u0026gt; paper. Add Academy menu for recommending articles. Remove All source relative document and examples. Update Booster UI\u0026rsquo;s dependency licenses. Add profiling doc, and remove service mesh intro doc(not necessary). Add a doc for virtual database. Rewrite UI introduction. Update k8s-monitoring, backend-telemetry and v9-version-upgrade doc for v9.  All issues and pull requests are here\n","excerpt":"9.0.0 Project  Upgrade log4j2 to 2.17.1 for CVE-2021-44228, CVE-2021-45046, CVE-2021-45105 and …","ref":"/docs/main/next/en/changes/changes-9.0.0/","title":"9.0.0"},{"body":"9.0.0 Project  Upgrade log4j2 to 2.17.1 for CVE-2021-44228, CVE-2021-45046, CVE-2021-45105 and CVE-2021-44832. This CVE only effects on JDK if JNDI is opened in default. Notice, using JVM option -Dlog4j2.formatMsgNoLookups=true or setting the LOG4J_FORMAT_MSG_NO_LOOKUPS=”true” environment variable also avoids CVEs. Upgrade maven-wrapper to 3.1.0, maven to 3.8.4 for performance improvements and ARM more native support. Exclude unnecessary libs when building under JDK 9+. Migrate base Docker image to eclipse-temurin as adoptopenjdk is deprecated. Add E2E test under Java 17. Upgrade protoc to 3.19.2. Add Istio 1.13.1 to E2E test matrix for verification. Upgrade Apache parent pom version to 25. Use the plugin version defined by the Apache maven parent.  Upgrade maven-dependency-plugin to 3.2.0. Upgrade maven-assembly-plugin to 3.3.0. Upgrade maven-failsafe-plugin to 2.22.2. Upgrade maven-surefire-plugin to 2.22.2. Upgrade maven-jar-plugin to 3.2.2. Upgrade maven-enforcer-plugin to 3.0.0. Upgrade maven-compiler-plugin to 3.10.0. Upgrade maven-resources-plugin to 3.2.0. Upgrade maven-source-plugin to 3.2.1.   Update codeStyle.xml to fix incompatibility on M1\u0026rsquo;s IntelliJ IDEA 2021.3.2. Update frontend-maven-plugin to 1.12 and npm to 16.14.0 for booster UI build. Improve CI with the GHA new feature \u0026ldquo;run failed jobs\u0026rdquo;. Fix ./mvnw compile not work if ./mvnw install is not executed at least once. Add JD_PRESERVE_LINE_FEEDS=true in official code style file. Upgrade OAP dependencies gson(2.9.0), guava(31.1), jackson(2.13.2), protobuf-java(3.18.4), commons-io(2.7), postgresql(42.3.3). Remove commons-pool and commons-dbcp from OAP dependencies(Not used before). Upgrade webapp dependencies gson(2.9.0), spring boot(2.6.6), jackson(2.13.2.2), spring cloud(2021.0.1), Apache httpclient(4.5.13).  OAP Server  Fix potential NPE in OAL string match and a bug when right-hand-side variable includes double quotes. Bump up Armeria version to 1.14.1 to fix CVE. Polish ETCD cluster config environment variables. Add the analysis of metrics in Satellite MetricsService. Fix Can't split endpoint id into 2 parts bug for endpoint ID. In the TCP in service mesh observability, endpoint name doesn\u0026rsquo;t exist in TCP traffic. Upgrade H2 version to 2.0.206 to fix CVE-2021-23463 and GHSA-h376-j262-vhq6. Extend column name override mechanism working for ValueColumnMetadata. Introduce new concept Layer and removed NodeType. More details refer to v9-version-upgrade. Fix query sort metrics failure in H2 Storage. Bump up grpc to 1.43.2 and protobuf to 3.19.2 to fix CVE-2021-22569. Add source layer and dest layer to relation. Follow protocol grammar fix GCPhrase -\u0026gt; GCPhase. Set layer to mesh relation. Add FAAS to SpanLayer. Adjust e2e case for V9 core. Support ZGC GC time and count metric collecting. Sync proto buffers files from upstream Envoy (Related to https://github.com/envoyproxy/envoy/pull/18955). Bump up GraphQL related dependencies to latest versions. Add normal to V9 service meta query. Support scope=ALL catalog for metrics. Bump up H2 to 2.1.210 to fix CVE-2022-23221. E2E: Add normal field to Service. Add FreeSql component ID(3017) of dotnet agent. E2E: verify OAP cluster model data aggregation. Fix SelfRemoteClient self observing metrics. Add env variables SW_CLUSTER_INTERNAL_COM_HOST and SW_CLUSTER_INTERNAL_COM_PORT for cluster selectors zookeeper ,consul,etcd and nacos. Doc update: configuration-vocabulary,backend-cluster about env variables SW_CLUSTER_INTERNAL_COM_HOST and SW_CLUSTER_INTERNAL_COM_PORT. Add Python MysqlClient component ID(7013) with mapping information. Support Java thread pool metrics analysis. Fix IoTDB Storage Option insert null index value. Set the default value of SW_STORAGE_IOTDB_SESSIONPOOL_SIZE to 8. Bump up iotdb-session to 0.12.4. Bump up PostgreSQL driver to fix CVE. Add Guava EventBus component ID(123) of Java agent. Add OpenFunction component ID(5013). Expose configuration responseTimeout of ES client. Support datasource metric analysis. [Breaking Change] Keep the endpoint avg resp time meter name the same with others scope. (This may break 3rd party integration and existing alarm rule settings) Add Python FastAPI component ID(7014). Support all metrics from MAL engine in alarm core, including Prometheus, OC receiver, meter receiver. Allow updating non-metrics templates when structure changed. Set default connection timeout of ElasticSearch to 3000 milliseconds. Support ElasticSearch 8 and add it into E2E tests. Disable indexing for field alarm_record.tags_raw_data of binary type in ElasticSearch storage. Fix Zipkin receiver wrong condition for decoding gzip. Add a new sampler (possibility) in LAL. Unify module name receiver_zipkin to receiver-zipkin, remove receiver_jaeger from application.yaml. Introduce the entity of Process type. Set the length of event#parameters to 2000. Limit the length of Event#parameters. Support large service/instance/networkAddressAlias list query by using ElasticSearch scrolling API, add metadataQueryBatchSize to configure scrolling page size. Change default value of metadataQueryMaxSize from 5000 to 10000 Replace deprecated Armeria API BasicToken.of with AuthToken.ofBasic. Implement v9 UI template management protocol. Implement process metadata query protocol. Expose more ElasticSearch health check related logs to help to diagnose Health check fails. reason: No healthy endpoint. Add source event generated metrics to SERVICE_CATALOG_NAME catalog. [Breaking Change] Deprecate All from OAL source. [Breaking Change] Remove SRC_ALL: 'All' from OAL grammar tree. Remove all_heatmap and all_percentile metrics. Fix ElasticSearch normal index couldn\u0026rsquo;t apply mapping and update. Enhance DataCarrier#MultipleChannelsConsumer to add priority for the channels, which makes OAP server has a better performance to activate all analyzers on default. Activate receiver-otel#enabledOcRules receiver with k8s-node,oap,vm rules on default. Activate satellite,spring-sleuth for agent-analyzer#meterAnalyzerActiveFiles on default. Activate receiver-zabbix receiver with agent rule on default. Replace HTTP server (GraphQL, agent HTTP protocol) from Jetty with Armeria. [Breaking Change] Remove configuration restAcceptorPriorityDelta (env var: SW_RECEIVER_SHARING_JETTY_DELTA , SW_CORE_REST_JETTY_DELTA). [Breaking Change] Remove configuration graphql/path (env var: SW_QUERY_GRAPHQL_PATH). Add storage column attribute indexOnly, support ElasticSearch only index and not store some fields. Add indexOnly=true to SegmentRecord.tags, AlarmRecord.tags, AbstractLogRecord.tags, to reduce unnecessary storage. [Breaking Change] Remove configuration restMinThreads (env var: SW_CORE_REST_JETTY_MIN_THREADS , SW_RECEIVER_SHARING_JETTY_MIN_THREADS). Refactor the core Builder mechanism, new storage plugin could implement their own converter and get rid of hard requirement of using HashMap to communicate between data object and database native structure. [Breaking Change] Break all existing 3rd-party storage extensions. Remove hard requirement of BASE64 encoding for binary field. Add complexity limitation for GraphQL query to avoid malicious query. Add Column.shardingKeyIdx for column definition for BanyanDB.  Sharding key is used to group time series data per metric of one entity in one place (same sharding and/or same row for column-oriented database). For example, ServiceA's traffic gauge, service call per minute, includes following timestamp values, then it should be sharded by service ID [ServiceA(encoded ID): 01-28 18:30 values-1, 01-28 18:31 values-2, 01-28 18:32 values-3, 01-28 18:32 values-4] BanyanDB is the 1st storage implementation supporting this. It would make continuous time series metrics stored closely and compressed better. NOTICE, this sharding concept is NOT just for splitting data into different database instances or physical files.  Support ElasticSearch template mappings properties parameters and _source update. Implement the eBPF profiling query and data collect protocol. [Breaking Change] Remove Deprecated responseCode from sources, including Service, ServiceInstance, Endpoint Enhance endpoint dependency analysis to support cross threads cases. Refactor span analysis code structures. Remove isNotNormal service requirement when use alias to merge service topology from client side. All RPCs' peer services from client side are always normal services. This cause the topology is not merged correctly. Fix event type of export data is incorrect, it was EventType.TOTAL always. Reduce redundancy ThreadLocal in MAL core. Improve MAL performance. Trim tag\u0026rsquo;s key and value in log query. Refactor IoTDB storage plugin, add IoTDBDataConverter and fix ModifyCollectionInEnhancedForLoop bug. Bump up iotdb-session to 0.12.5. Fix the configuration of Aggregation and GC Count metrics for oap self observability E2E: Add verify OAP eBPF Profiling. Let multiGet could query without tag value in the InfluxDB storage plugin. Adjust MAL for V9, remove some groups, add a new Service function for the custom delimiter. Add service catalog DatabaseSlowStatement. Add Error Prone Annotations dependency to suppress warnings, which are not errors.  UI  [Breaking Change] Introduce Booster UI, remove RocketBot UI. [Breaking Change] UI Templates have been redesigned totally. GraphQL query is minimal compatible for metadata and metrics query. Remove unused jars (log4j-api.jar) in classpath. Bump up netty version to fix CVE. Add Database Connection pool metric. Re-implement UI template initialization for Booster UI. Add environment variable SW_ENABLE_UPDATE_UI_TEMPLATE to control user edit UI template. Add the Self Observability template of the SkyWalking Satellite. Add the template of OpenFunction observability.  Documentation  Reconstruction doc menu for v9. Update backend-alarm.md doc, support op \u0026ldquo;=\u0026rdquo; to \u0026ldquo;==\u0026rdquo;. Update backend-meter.md doc . Add \u0026lt;STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System\u0026gt; paper. Add Academy menu for recommending articles. Remove All source relative document and examples. Update Booster UI\u0026rsquo;s dependency licenses. Add profiling doc, and remove service mesh intro doc(not necessary). Add a doc for virtual database. Rewrite UI introduction. Update k8s-monitoring, backend-telemetry and v9-version-upgrade doc for v9.  All issues and pull requests are here\n","excerpt":"9.0.0 Project  Upgrade log4j2 to 2.17.1 for CVE-2021-44228, CVE-2021-45046, CVE-2021-45105 and …","ref":"/docs/main/v9.1.0/en/changes/changes-9.0.0/","title":"9.0.0"},{"body":"9.0.0 Project  Upgrade log4j2 to 2.17.1 for CVE-2021-44228, CVE-2021-45046, CVE-2021-45105 and CVE-2021-44832. This CVE only effects on JDK if JNDI is opened in default. Notice, using JVM option -Dlog4j2.formatMsgNoLookups=true or setting the LOG4J_FORMAT_MSG_NO_LOOKUPS=”true” environment variable also avoids CVEs. Upgrade maven-wrapper to 3.1.0, maven to 3.8.4 for performance improvements and ARM more native support. Exclude unnecessary libs when building under JDK 9+. Migrate base Docker image to eclipse-temurin as adoptopenjdk is deprecated. Add E2E test under Java 17. Upgrade protoc to 3.19.2. Add Istio 1.13.1 to E2E test matrix for verification. Upgrade Apache parent pom version to 25. Use the plugin version defined by the Apache maven parent.  Upgrade maven-dependency-plugin to 3.2.0. Upgrade maven-assembly-plugin to 3.3.0. Upgrade maven-failsafe-plugin to 2.22.2. Upgrade maven-surefire-plugin to 2.22.2. Upgrade maven-jar-plugin to 3.2.2. Upgrade maven-enforcer-plugin to 3.0.0. Upgrade maven-compiler-plugin to 3.10.0. Upgrade maven-resources-plugin to 3.2.0. Upgrade maven-source-plugin to 3.2.1.   Update codeStyle.xml to fix incompatibility on M1\u0026rsquo;s IntelliJ IDEA 2021.3.2. Update frontend-maven-plugin to 1.12 and npm to 16.14.0 for booster UI build. Improve CI with the GHA new feature \u0026ldquo;run failed jobs\u0026rdquo;. Fix ./mvnw compile not work if ./mvnw install is not executed at least once. Add JD_PRESERVE_LINE_FEEDS=true in official code style file. Upgrade OAP dependencies gson(2.9.0), guava(31.1), jackson(2.13.2), protobuf-java(3.18.4), commons-io(2.7), postgresql(42.3.3). Remove commons-pool and commons-dbcp from OAP dependencies(Not used before). Upgrade webapp dependencies gson(2.9.0), spring boot(2.6.6), jackson(2.13.2.2), spring cloud(2021.0.1), Apache httpclient(4.5.13).  OAP Server  Fix potential NPE in OAL string match and a bug when right-hand-side variable includes double quotes. Bump up Armeria version to 1.14.1 to fix CVE. Polish ETCD cluster config environment variables. Add the analysis of metrics in Satellite MetricsService. Fix Can't split endpoint id into 2 parts bug for endpoint ID. In the TCP in service mesh observability, endpoint name doesn\u0026rsquo;t exist in TCP traffic. Upgrade H2 version to 2.0.206 to fix CVE-2021-23463 and GHSA-h376-j262-vhq6. Extend column name override mechanism working for ValueColumnMetadata. Introduce new concept Layer and removed NodeType. More details refer to v9-version-upgrade. Fix query sort metrics failure in H2 Storage. Bump up grpc to 1.43.2 and protobuf to 3.19.2 to fix CVE-2021-22569. Add source layer and dest layer to relation. Follow protocol grammar fix GCPhrase -\u0026gt; GCPhase. Set layer to mesh relation. Add FAAS to SpanLayer. Adjust e2e case for V9 core. Support ZGC GC time and count metric collecting. Sync proto buffers files from upstream Envoy (Related to https://github.com/envoyproxy/envoy/pull/18955). Bump up GraphQL related dependencies to latest versions. Add normal to V9 service meta query. Support scope=ALL catalog for metrics. Bump up H2 to 2.1.210 to fix CVE-2022-23221. E2E: Add normal field to Service. Add FreeSql component ID(3017) of dotnet agent. E2E: verify OAP cluster model data aggregation. Fix SelfRemoteClient self observing metrics. Add env variables SW_CLUSTER_INTERNAL_COM_HOST and SW_CLUSTER_INTERNAL_COM_PORT for cluster selectors zookeeper ,consul,etcd and nacos. Doc update: configuration-vocabulary,backend-cluster about env variables SW_CLUSTER_INTERNAL_COM_HOST and SW_CLUSTER_INTERNAL_COM_PORT. Add Python MysqlClient component ID(7013) with mapping information. Support Java thread pool metrics analysis. Fix IoTDB Storage Option insert null index value. Set the default value of SW_STORAGE_IOTDB_SESSIONPOOL_SIZE to 8. Bump up iotdb-session to 0.12.4. Bump up PostgreSQL driver to fix CVE. Add Guava EventBus component ID(123) of Java agent. Add OpenFunction component ID(5013). Expose configuration responseTimeout of ES client. Support datasource metric analysis. [Breaking Change] Keep the endpoint avg resp time meter name the same with others scope. (This may break 3rd party integration and existing alarm rule settings) Add Python FastAPI component ID(7014). Support all metrics from MAL engine in alarm core, including Prometheus, OC receiver, meter receiver. Allow updating non-metrics templates when structure changed. Set default connection timeout of ElasticSearch to 3000 milliseconds. Support ElasticSearch 8 and add it into E2E tests. Disable indexing for field alarm_record.tags_raw_data of binary type in ElasticSearch storage. Fix Zipkin receiver wrong condition for decoding gzip. Add a new sampler (possibility) in LAL. Unify module name receiver_zipkin to receiver-zipkin, remove receiver_jaeger from application.yaml. Introduce the entity of Process type. Set the length of event#parameters to 2000. Limit the length of Event#parameters. Support large service/instance/networkAddressAlias list query by using ElasticSearch scrolling API, add metadataQueryBatchSize to configure scrolling page size. Change default value of metadataQueryMaxSize from 5000 to 10000 Replace deprecated Armeria API BasicToken.of with AuthToken.ofBasic. Implement v9 UI template management protocol. Implement process metadata query protocol. Expose more ElasticSearch health check related logs to help to diagnose Health check fails. reason: No healthy endpoint. Add source event generated metrics to SERVICE_CATALOG_NAME catalog. [Breaking Change] Deprecate All from OAL source. [Breaking Change] Remove SRC_ALL: 'All' from OAL grammar tree. Remove all_heatmap and all_percentile metrics. Fix ElasticSearch normal index couldn\u0026rsquo;t apply mapping and update. Enhance DataCarrier#MultipleChannelsConsumer to add priority for the channels, which makes OAP server has a better performance to activate all analyzers on default. Activate receiver-otel#enabledOcRules receiver with k8s-node,oap,vm rules on default. Activate satellite,spring-sleuth for agent-analyzer#meterAnalyzerActiveFiles on default. Activate receiver-zabbix receiver with agent rule on default. Replace HTTP server (GraphQL, agent HTTP protocol) from Jetty with Armeria. [Breaking Change] Remove configuration restAcceptorPriorityDelta (env var: SW_RECEIVER_SHARING_JETTY_DELTA , SW_CORE_REST_JETTY_DELTA). [Breaking Change] Remove configuration graphql/path (env var: SW_QUERY_GRAPHQL_PATH). Add storage column attribute indexOnly, support ElasticSearch only index and not store some fields. Add indexOnly=true to SegmentRecord.tags, AlarmRecord.tags, AbstractLogRecord.tags, to reduce unnecessary storage. [Breaking Change] Remove configuration restMinThreads (env var: SW_CORE_REST_JETTY_MIN_THREADS , SW_RECEIVER_SHARING_JETTY_MIN_THREADS). Refactor the core Builder mechanism, new storage plugin could implement their own converter and get rid of hard requirement of using HashMap to communicate between data object and database native structure. [Breaking Change] Break all existing 3rd-party storage extensions. Remove hard requirement of BASE64 encoding for binary field. Add complexity limitation for GraphQL query to avoid malicious query. Add Column.shardingKeyIdx for column definition for BanyanDB.  Sharding key is used to group time series data per metric of one entity in one place (same sharding and/or same row for column-oriented database). For example, ServiceA's traffic gauge, service call per minute, includes following timestamp values, then it should be sharded by service ID [ServiceA(encoded ID): 01-28 18:30 values-1, 01-28 18:31 values-2, 01-28 18:32 values-3, 01-28 18:32 values-4] BanyanDB is the 1st storage implementation supporting this. It would make continuous time series metrics stored closely and compressed better. NOTICE, this sharding concept is NOT just for splitting data into different database instances or physical files.  Support ElasticSearch template mappings properties parameters and _source update. Implement the eBPF profiling query and data collect protocol. [Breaking Change] Remove Deprecated responseCode from sources, including Service, ServiceInstance, Endpoint Enhance endpoint dependency analysis to support cross threads cases. Refactor span analysis code structures. Remove isNotNormal service requirement when use alias to merge service topology from client side. All RPCs' peer services from client side are always normal services. This cause the topology is not merged correctly. Fix event type of export data is incorrect, it was EventType.TOTAL always. Reduce redundancy ThreadLocal in MAL core. Improve MAL performance. Trim tag\u0026rsquo;s key and value in log query. Refactor IoTDB storage plugin, add IoTDBDataConverter and fix ModifyCollectionInEnhancedForLoop bug. Bump up iotdb-session to 0.12.5. Fix the configuration of Aggregation and GC Count metrics for oap self observability E2E: Add verify OAP eBPF Profiling. Let multiGet could query without tag value in the InfluxDB storage plugin. Adjust MAL for V9, remove some groups, add a new Service function for the custom delimiter. Add service catalog DatabaseSlowStatement. Add Error Prone Annotations dependency to suppress warnings, which are not errors.  UI  [Breaking Change] Introduce Booster UI, remove RocketBot UI. [Breaking Change] UI Templates have been redesigned totally. GraphQL query is minimal compatible for metadata and metrics query. Remove unused jars (log4j-api.jar) in classpath. Bump up netty version to fix CVE. Add Database Connection pool metric. Re-implement UI template initialization for Booster UI. Add environment variable SW_ENABLE_UPDATE_UI_TEMPLATE to control user edit UI template. Add the Self Observability template of the SkyWalking Satellite. Add the template of OpenFunction observability.  Documentation  Reconstruction doc menu for v9. Update backend-alarm.md doc, support op \u0026ldquo;=\u0026rdquo; to \u0026ldquo;==\u0026rdquo;. Update backend-meter.md doc . Add \u0026lt;STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System\u0026gt; paper. Add Academy menu for recommending articles. Remove All source relative document and examples. Update Booster UI\u0026rsquo;s dependency licenses. Add profiling doc, and remove service mesh intro doc(not necessary). Add a doc for virtual database. Rewrite UI introduction. Update k8s-monitoring, backend-telemetry and v9-version-upgrade doc for v9.  All issues and pull requests are here\n","excerpt":"9.0.0 Project  Upgrade log4j2 to 2.17.1 for CVE-2021-44228, CVE-2021-45046, CVE-2021-45105 and …","ref":"/docs/main/v9.2.0/en/changes/changes-9.0.0/","title":"9.0.0"},{"body":"9.0.0 Project  Upgrade log4j2 to 2.17.1 for CVE-2021-44228, CVE-2021-45046, CVE-2021-45105 and CVE-2021-44832. This CVE only effects on JDK if JNDI is opened in default. Notice, using JVM option -Dlog4j2.formatMsgNoLookups=true or setting the LOG4J_FORMAT_MSG_NO_LOOKUPS=”true” environment variable also avoids CVEs. Upgrade maven-wrapper to 3.1.0, maven to 3.8.4 for performance improvements and ARM more native support. Exclude unnecessary libs when building under JDK 9+. Migrate base Docker image to eclipse-temurin as adoptopenjdk is deprecated. Add E2E test under Java 17. Upgrade protoc to 3.19.2. Add Istio 1.13.1 to E2E test matrix for verification. Upgrade Apache parent pom version to 25. Use the plugin version defined by the Apache maven parent.  Upgrade maven-dependency-plugin to 3.2.0. Upgrade maven-assembly-plugin to 3.3.0. Upgrade maven-failsafe-plugin to 2.22.2. Upgrade maven-surefire-plugin to 2.22.2. Upgrade maven-jar-plugin to 3.2.2. Upgrade maven-enforcer-plugin to 3.0.0. Upgrade maven-compiler-plugin to 3.10.0. Upgrade maven-resources-plugin to 3.2.0. Upgrade maven-source-plugin to 3.2.1.   Update codeStyle.xml to fix incompatibility on M1\u0026rsquo;s IntelliJ IDEA 2021.3.2. Update frontend-maven-plugin to 1.12 and npm to 16.14.0 for booster UI build. Improve CI with the GHA new feature \u0026ldquo;run failed jobs\u0026rdquo;. Fix ./mvnw compile not work if ./mvnw install is not executed at least once. Add JD_PRESERVE_LINE_FEEDS=true in official code style file. Upgrade OAP dependencies gson(2.9.0), guava(31.1), jackson(2.13.2), protobuf-java(3.18.4), commons-io(2.7), postgresql(42.3.3). Remove commons-pool and commons-dbcp from OAP dependencies(Not used before). Upgrade webapp dependencies gson(2.9.0), spring boot(2.6.6), jackson(2.13.2.2), spring cloud(2021.0.1), Apache httpclient(4.5.13).  OAP Server  Fix potential NPE in OAL string match and a bug when right-hand-side variable includes double quotes. Bump up Armeria version to 1.14.1 to fix CVE. Polish ETCD cluster config environment variables. Add the analysis of metrics in Satellite MetricsService. Fix Can't split endpoint id into 2 parts bug for endpoint ID. In the TCP in service mesh observability, endpoint name doesn\u0026rsquo;t exist in TCP traffic. Upgrade H2 version to 2.0.206 to fix CVE-2021-23463 and GHSA-h376-j262-vhq6. Extend column name override mechanism working for ValueColumnMetadata. Introduce new concept Layer and removed NodeType. More details refer to v9-version-upgrade. Fix query sort metrics failure in H2 Storage. Bump up grpc to 1.43.2 and protobuf to 3.19.2 to fix CVE-2021-22569. Add source layer and dest layer to relation. Follow protocol grammar fix GCPhrase -\u0026gt; GCPhase. Set layer to mesh relation. Add FAAS to SpanLayer. Adjust e2e case for V9 core. Support ZGC GC time and count metric collecting. Sync proto buffers files from upstream Envoy (Related to https://github.com/envoyproxy/envoy/pull/18955). Bump up GraphQL related dependencies to latest versions. Add normal to V9 service meta query. Support scope=ALL catalog for metrics. Bump up H2 to 2.1.210 to fix CVE-2022-23221. E2E: Add normal field to Service. Add FreeSql component ID(3017) of dotnet agent. E2E: verify OAP cluster model data aggregation. Fix SelfRemoteClient self observing metrics. Add env variables SW_CLUSTER_INTERNAL_COM_HOST and SW_CLUSTER_INTERNAL_COM_PORT for cluster selectors zookeeper ,consul,etcd and nacos. Doc update: configuration-vocabulary,backend-cluster about env variables SW_CLUSTER_INTERNAL_COM_HOST and SW_CLUSTER_INTERNAL_COM_PORT. Add Python MysqlClient component ID(7013) with mapping information. Support Java thread pool metrics analysis. Fix IoTDB Storage Option insert null index value. Set the default value of SW_STORAGE_IOTDB_SESSIONPOOL_SIZE to 8. Bump up iotdb-session to 0.12.4. Bump up PostgreSQL driver to fix CVE. Add Guava EventBus component ID(123) of Java agent. Add OpenFunction component ID(5013). Expose configuration responseTimeout of ES client. Support datasource metric analysis. [Breaking Change] Keep the endpoint avg resp time meter name the same with others scope. (This may break 3rd party integration and existing alarm rule settings) Add Python FastAPI component ID(7014). Support all metrics from MAL engine in alarm core, including Prometheus, OC receiver, meter receiver. Allow updating non-metrics templates when structure changed. Set default connection timeout of ElasticSearch to 3000 milliseconds. Support ElasticSearch 8 and add it into E2E tests. Disable indexing for field alarm_record.tags_raw_data of binary type in ElasticSearch storage. Fix Zipkin receiver wrong condition for decoding gzip. Add a new sampler (possibility) in LAL. Unify module name receiver_zipkin to receiver-zipkin, remove receiver_jaeger from application.yaml. Introduce the entity of Process type. Set the length of event#parameters to 2000. Limit the length of Event#parameters. Support large service/instance/networkAddressAlias list query by using ElasticSearch scrolling API, add metadataQueryBatchSize to configure scrolling page size. Change default value of metadataQueryMaxSize from 5000 to 10000 Replace deprecated Armeria API BasicToken.of with AuthToken.ofBasic. Implement v9 UI template management protocol. Implement process metadata query protocol. Expose more ElasticSearch health check related logs to help to diagnose Health check fails. reason: No healthy endpoint. Add source event generated metrics to SERVICE_CATALOG_NAME catalog. [Breaking Change] Deprecate All from OAL source. [Breaking Change] Remove SRC_ALL: 'All' from OAL grammar tree. Remove all_heatmap and all_percentile metrics. Fix ElasticSearch normal index couldn\u0026rsquo;t apply mapping and update. Enhance DataCarrier#MultipleChannelsConsumer to add priority for the channels, which makes OAP server has a better performance to activate all analyzers on default. Activate receiver-otel#enabledOcRules receiver with k8s-node,oap,vm rules on default. Activate satellite,spring-sleuth for agent-analyzer#meterAnalyzerActiveFiles on default. Activate receiver-zabbix receiver with agent rule on default. Replace HTTP server (GraphQL, agent HTTP protocol) from Jetty with Armeria. [Breaking Change] Remove configuration restAcceptorPriorityDelta (env var: SW_RECEIVER_SHARING_JETTY_DELTA , SW_CORE_REST_JETTY_DELTA). [Breaking Change] Remove configuration graphql/path (env var: SW_QUERY_GRAPHQL_PATH). Add storage column attribute indexOnly, support ElasticSearch only index and not store some fields. Add indexOnly=true to SegmentRecord.tags, AlarmRecord.tags, AbstractLogRecord.tags, to reduce unnecessary storage. [Breaking Change] Remove configuration restMinThreads (env var: SW_CORE_REST_JETTY_MIN_THREADS , SW_RECEIVER_SHARING_JETTY_MIN_THREADS). Refactor the core Builder mechanism, new storage plugin could implement their own converter and get rid of hard requirement of using HashMap to communicate between data object and database native structure. [Breaking Change] Break all existing 3rd-party storage extensions. Remove hard requirement of BASE64 encoding for binary field. Add complexity limitation for GraphQL query to avoid malicious query. Add Column.shardingKeyIdx for column definition for BanyanDB.  Sharding key is used to group time series data per metric of one entity in one place (same sharding and/or same row for column-oriented database). For example, ServiceA's traffic gauge, service call per minute, includes following timestamp values, then it should be sharded by service ID [ServiceA(encoded ID): 01-28 18:30 values-1, 01-28 18:31 values-2, 01-28 18:32 values-3, 01-28 18:32 values-4] BanyanDB is the 1st storage implementation supporting this. It would make continuous time series metrics stored closely and compressed better. NOTICE, this sharding concept is NOT just for splitting data into different database instances or physical files.  Support ElasticSearch template mappings properties parameters and _source update. Implement the eBPF profiling query and data collect protocol. [Breaking Change] Remove Deprecated responseCode from sources, including Service, ServiceInstance, Endpoint Enhance endpoint dependency analysis to support cross threads cases. Refactor span analysis code structures. Remove isNotNormal service requirement when use alias to merge service topology from client side. All RPCs' peer services from client side are always normal services. This cause the topology is not merged correctly. Fix event type of export data is incorrect, it was EventType.TOTAL always. Reduce redundancy ThreadLocal in MAL core. Improve MAL performance. Trim tag\u0026rsquo;s key and value in log query. Refactor IoTDB storage plugin, add IoTDBDataConverter and fix ModifyCollectionInEnhancedForLoop bug. Bump up iotdb-session to 0.12.5. Fix the configuration of Aggregation and GC Count metrics for oap self observability E2E: Add verify OAP eBPF Profiling. Let multiGet could query without tag value in the InfluxDB storage plugin. Adjust MAL for V9, remove some groups, add a new Service function for the custom delimiter. Add service catalog DatabaseSlowStatement. Add Error Prone Annotations dependency to suppress warnings, which are not errors.  UI  [Breaking Change] Introduce Booster UI, remove RocketBot UI. [Breaking Change] UI Templates have been redesigned totally. GraphQL query is minimal compatible for metadata and metrics query. Remove unused jars (log4j-api.jar) in classpath. Bump up netty version to fix CVE. Add Database Connection pool metric. Re-implement UI template initialization for Booster UI. Add environment variable SW_ENABLE_UPDATE_UI_TEMPLATE to control user edit UI template. Add the Self Observability template of the SkyWalking Satellite. Add the template of OpenFunction observability.  Documentation  Reconstruction doc menu for v9. Update backend-alarm.md doc, support op \u0026ldquo;=\u0026rdquo; to \u0026ldquo;==\u0026rdquo;. Update backend-meter.md doc . Add \u0026lt;STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System\u0026gt; paper. Add Academy menu for recommending articles. Remove All source relative document and examples. Update Booster UI\u0026rsquo;s dependency licenses. Add profiling doc, and remove service mesh intro doc(not necessary). Add a doc for virtual database. Rewrite UI introduction. Update k8s-monitoring, backend-telemetry and v9-version-upgrade doc for v9.  All issues and pull requests are here\n","excerpt":"9.0.0 Project  Upgrade log4j2 to 2.17.1 for CVE-2021-44228, CVE-2021-45046, CVE-2021-45105 and …","ref":"/docs/main/v9.3.0/en/changes/changes-9.0.0/","title":"9.0.0"},{"body":"9.0.0 Project  Upgrade log4j2 to 2.17.1 for CVE-2021-44228, CVE-2021-45046, CVE-2021-45105 and CVE-2021-44832. This CVE only effects on JDK if JNDI is opened in default. Notice, using JVM option -Dlog4j2.formatMsgNoLookups=true or setting the LOG4J_FORMAT_MSG_NO_LOOKUPS=”true” environment variable also avoids CVEs. Upgrade maven-wrapper to 3.1.0, maven to 3.8.4 for performance improvements and ARM more native support. Exclude unnecessary libs when building under JDK 9+. Migrate base Docker image to eclipse-temurin as adoptopenjdk is deprecated. Add E2E test under Java 17. Upgrade protoc to 3.19.2. Add Istio 1.13.1 to E2E test matrix for verification. Upgrade Apache parent pom version to 25. Use the plugin version defined by the Apache maven parent.  Upgrade maven-dependency-plugin to 3.2.0. Upgrade maven-assembly-plugin to 3.3.0. Upgrade maven-failsafe-plugin to 2.22.2. Upgrade maven-surefire-plugin to 2.22.2. Upgrade maven-jar-plugin to 3.2.2. Upgrade maven-enforcer-plugin to 3.0.0. Upgrade maven-compiler-plugin to 3.10.0. Upgrade maven-resources-plugin to 3.2.0. Upgrade maven-source-plugin to 3.2.1.   Update codeStyle.xml to fix incompatibility on M1\u0026rsquo;s IntelliJ IDEA 2021.3.2. Update frontend-maven-plugin to 1.12 and npm to 16.14.0 for booster UI build. Improve CI with the GHA new feature \u0026ldquo;run failed jobs\u0026rdquo;. Fix ./mvnw compile not work if ./mvnw install is not executed at least once. Add JD_PRESERVE_LINE_FEEDS=true in official code style file. Upgrade OAP dependencies gson(2.9.0), guava(31.1), jackson(2.13.2), protobuf-java(3.18.4), commons-io(2.7), postgresql(42.3.3). Remove commons-pool and commons-dbcp from OAP dependencies(Not used before). Upgrade webapp dependencies gson(2.9.0), spring boot(2.6.6), jackson(2.13.2.2), spring cloud(2021.0.1), Apache httpclient(4.5.13).  OAP Server  Fix potential NPE in OAL string match and a bug when right-hand-side variable includes double quotes. Bump up Armeria version to 1.14.1 to fix CVE. Polish ETCD cluster config environment variables. Add the analysis of metrics in Satellite MetricsService. Fix Can't split endpoint id into 2 parts bug for endpoint ID. In the TCP in service mesh observability, endpoint name doesn\u0026rsquo;t exist in TCP traffic. Upgrade H2 version to 2.0.206 to fix CVE-2021-23463 and GHSA-h376-j262-vhq6. Extend column name override mechanism working for ValueColumnMetadata. Introduce new concept Layer and removed NodeType. More details refer to v9-version-upgrade. Fix query sort metrics failure in H2 Storage. Bump up grpc to 1.43.2 and protobuf to 3.19.2 to fix CVE-2021-22569. Add source layer and dest layer to relation. Follow protocol grammar fix GCPhrase -\u0026gt; GCPhase. Set layer to mesh relation. Add FAAS to SpanLayer. Adjust e2e case for V9 core. Support ZGC GC time and count metric collecting. Sync proto buffers files from upstream Envoy (Related to https://github.com/envoyproxy/envoy/pull/18955). Bump up GraphQL related dependencies to latest versions. Add normal to V9 service meta query. Support scope=ALL catalog for metrics. Bump up H2 to 2.1.210 to fix CVE-2022-23221. E2E: Add normal field to Service. Add FreeSql component ID(3017) of dotnet agent. E2E: verify OAP cluster model data aggregation. Fix SelfRemoteClient self observing metrics. Add env variables SW_CLUSTER_INTERNAL_COM_HOST and SW_CLUSTER_INTERNAL_COM_PORT for cluster selectors zookeeper ,consul,etcd and nacos. Doc update: configuration-vocabulary,backend-cluster about env variables SW_CLUSTER_INTERNAL_COM_HOST and SW_CLUSTER_INTERNAL_COM_PORT. Add Python MysqlClient component ID(7013) with mapping information. Support Java thread pool metrics analysis. Fix IoTDB Storage Option insert null index value. Set the default value of SW_STORAGE_IOTDB_SESSIONPOOL_SIZE to 8. Bump up iotdb-session to 0.12.4. Bump up PostgreSQL driver to fix CVE. Add Guava EventBus component ID(123) of Java agent. Add OpenFunction component ID(5013). Expose configuration responseTimeout of ES client. Support datasource metric analysis. [Breaking Change] Keep the endpoint avg resp time meter name the same with others scope. (This may break 3rd party integration and existing alarm rule settings) Add Python FastAPI component ID(7014). Support all metrics from MAL engine in alarm core, including Prometheus, OC receiver, meter receiver. Allow updating non-metrics templates when structure changed. Set default connection timeout of ElasticSearch to 3000 milliseconds. Support ElasticSearch 8 and add it into E2E tests. Disable indexing for field alarm_record.tags_raw_data of binary type in ElasticSearch storage. Fix Zipkin receiver wrong condition for decoding gzip. Add a new sampler (possibility) in LAL. Unify module name receiver_zipkin to receiver-zipkin, remove receiver_jaeger from application.yaml. Introduce the entity of Process type. Set the length of event#parameters to 2000. Limit the length of Event#parameters. Support large service/instance/networkAddressAlias list query by using ElasticSearch scrolling API, add metadataQueryBatchSize to configure scrolling page size. Change default value of metadataQueryMaxSize from 5000 to 10000 Replace deprecated Armeria API BasicToken.of with AuthToken.ofBasic. Implement v9 UI template management protocol. Implement process metadata query protocol. Expose more ElasticSearch health check related logs to help to diagnose Health check fails. reason: No healthy endpoint. Add source event generated metrics to SERVICE_CATALOG_NAME catalog. [Breaking Change] Deprecate All from OAL source. [Breaking Change] Remove SRC_ALL: 'All' from OAL grammar tree. Remove all_heatmap and all_percentile metrics. Fix ElasticSearch normal index couldn\u0026rsquo;t apply mapping and update. Enhance DataCarrier#MultipleChannelsConsumer to add priority for the channels, which makes OAP server has a better performance to activate all analyzers on default. Activate receiver-otel#enabledOcRules receiver with k8s-node,oap,vm rules on default. Activate satellite,spring-sleuth for agent-analyzer#meterAnalyzerActiveFiles on default. Activate receiver-zabbix receiver with agent rule on default. Replace HTTP server (GraphQL, agent HTTP protocol) from Jetty with Armeria. [Breaking Change] Remove configuration restAcceptorPriorityDelta (env var: SW_RECEIVER_SHARING_JETTY_DELTA , SW_CORE_REST_JETTY_DELTA). [Breaking Change] Remove configuration graphql/path (env var: SW_QUERY_GRAPHQL_PATH). Add storage column attribute indexOnly, support ElasticSearch only index and not store some fields. Add indexOnly=true to SegmentRecord.tags, AlarmRecord.tags, AbstractLogRecord.tags, to reduce unnecessary storage. [Breaking Change] Remove configuration restMinThreads (env var: SW_CORE_REST_JETTY_MIN_THREADS , SW_RECEIVER_SHARING_JETTY_MIN_THREADS). Refactor the core Builder mechanism, new storage plugin could implement their own converter and get rid of hard requirement of using HashMap to communicate between data object and database native structure. [Breaking Change] Break all existing 3rd-party storage extensions. Remove hard requirement of BASE64 encoding for binary field. Add complexity limitation for GraphQL query to avoid malicious query. Add Column.shardingKeyIdx for column definition for BanyanDB.  Sharding key is used to group time series data per metric of one entity in one place (same sharding and/or same row for column-oriented database). For example, ServiceA's traffic gauge, service call per minute, includes following timestamp values, then it should be sharded by service ID [ServiceA(encoded ID): 01-28 18:30 values-1, 01-28 18:31 values-2, 01-28 18:32 values-3, 01-28 18:32 values-4] BanyanDB is the 1st storage implementation supporting this. It would make continuous time series metrics stored closely and compressed better. NOTICE, this sharding concept is NOT just for splitting data into different database instances or physical files.  Support ElasticSearch template mappings properties parameters and _source update. Implement the eBPF profiling query and data collect protocol. [Breaking Change] Remove Deprecated responseCode from sources, including Service, ServiceInstance, Endpoint Enhance endpoint dependency analysis to support cross threads cases. Refactor span analysis code structures. Remove isNotNormal service requirement when use alias to merge service topology from client side. All RPCs' peer services from client side are always normal services. This cause the topology is not merged correctly. Fix event type of export data is incorrect, it was EventType.TOTAL always. Reduce redundancy ThreadLocal in MAL core. Improve MAL performance. Trim tag\u0026rsquo;s key and value in log query. Refactor IoTDB storage plugin, add IoTDBDataConverter and fix ModifyCollectionInEnhancedForLoop bug. Bump up iotdb-session to 0.12.5. Fix the configuration of Aggregation and GC Count metrics for oap self observability E2E: Add verify OAP eBPF Profiling. Let multiGet could query without tag value in the InfluxDB storage plugin. Adjust MAL for V9, remove some groups, add a new Service function for the custom delimiter. Add service catalog DatabaseSlowStatement. Add Error Prone Annotations dependency to suppress warnings, which are not errors.  UI  [Breaking Change] Introduce Booster UI, remove RocketBot UI. [Breaking Change] UI Templates have been redesigned totally. GraphQL query is minimal compatible for metadata and metrics query. Remove unused jars (log4j-api.jar) in classpath. Bump up netty version to fix CVE. Add Database Connection pool metric. Re-implement UI template initialization for Booster UI. Add environment variable SW_ENABLE_UPDATE_UI_TEMPLATE to control user edit UI template. Add the Self Observability template of the SkyWalking Satellite. Add the template of OpenFunction observability.  Documentation  Reconstruction doc menu for v9. Update backend-alarm.md doc, support op \u0026ldquo;=\u0026rdquo; to \u0026ldquo;==\u0026rdquo;. Update backend-meter.md doc . Add \u0026lt;STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System\u0026gt; paper. Add Academy menu for recommending articles. Remove All source relative document and examples. Update Booster UI\u0026rsquo;s dependency licenses. Add profiling doc, and remove service mesh intro doc(not necessary). Add a doc for virtual database. Rewrite UI introduction. Update k8s-monitoring, backend-telemetry and v9-version-upgrade doc for v9.  All issues and pull requests are here\n","excerpt":"9.0.0 Project  Upgrade log4j2 to 2.17.1 for CVE-2021-44228, CVE-2021-45046, CVE-2021-45105 and …","ref":"/docs/main/v9.4.0/en/changes/changes-9.0.0/","title":"9.0.0"},{"body":"9.0.0 Project  Upgrade log4j2 to 2.17.1 for CVE-2021-44228, CVE-2021-45046, CVE-2021-45105 and CVE-2021-44832. This CVE only effects on JDK if JNDI is opened in default. Notice, using JVM option -Dlog4j2.formatMsgNoLookups=true or setting the LOG4J_FORMAT_MSG_NO_LOOKUPS=”true” environment variable also avoids CVEs. Upgrade maven-wrapper to 3.1.0, maven to 3.8.4 for performance improvements and ARM more native support. Exclude unnecessary libs when building under JDK 9+. Migrate base Docker image to eclipse-temurin as adoptopenjdk is deprecated. Add E2E test under Java 17. Upgrade protoc to 3.19.2. Add Istio 1.13.1 to E2E test matrix for verification. Upgrade Apache parent pom version to 25. Use the plugin version defined by the Apache maven parent.  Upgrade maven-dependency-plugin to 3.2.0. Upgrade maven-assembly-plugin to 3.3.0. Upgrade maven-failsafe-plugin to 2.22.2. Upgrade maven-surefire-plugin to 2.22.2. Upgrade maven-jar-plugin to 3.2.2. Upgrade maven-enforcer-plugin to 3.0.0. Upgrade maven-compiler-plugin to 3.10.0. Upgrade maven-resources-plugin to 3.2.0. Upgrade maven-source-plugin to 3.2.1.   Update codeStyle.xml to fix incompatibility on M1\u0026rsquo;s IntelliJ IDEA 2021.3.2. Update frontend-maven-plugin to 1.12 and npm to 16.14.0 for booster UI build. Improve CI with the GHA new feature \u0026ldquo;run failed jobs\u0026rdquo;. Fix ./mvnw compile not work if ./mvnw install is not executed at least once. Add JD_PRESERVE_LINE_FEEDS=true in official code style file. Upgrade OAP dependencies gson(2.9.0), guava(31.1), jackson(2.13.2), protobuf-java(3.18.4), commons-io(2.7), postgresql(42.3.3). Remove commons-pool and commons-dbcp from OAP dependencies(Not used before). Upgrade webapp dependencies gson(2.9.0), spring boot(2.6.6), jackson(2.13.2.2), spring cloud(2021.0.1), Apache httpclient(4.5.13).  OAP Server  Fix potential NPE in OAL string match and a bug when right-hand-side variable includes double quotes. Bump up Armeria version to 1.14.1 to fix CVE. Polish ETCD cluster config environment variables. Add the analysis of metrics in Satellite MetricsService. Fix Can't split endpoint id into 2 parts bug for endpoint ID. In the TCP in service mesh observability, endpoint name doesn\u0026rsquo;t exist in TCP traffic. Upgrade H2 version to 2.0.206 to fix CVE-2021-23463 and GHSA-h376-j262-vhq6. Extend column name override mechanism working for ValueColumnMetadata. Introduce new concept Layer and removed NodeType. More details refer to v9-version-upgrade. Fix query sort metrics failure in H2 Storage. Bump up grpc to 1.43.2 and protobuf to 3.19.2 to fix CVE-2021-22569. Add source layer and dest layer to relation. Follow protocol grammar fix GCPhrase -\u0026gt; GCPhase. Set layer to mesh relation. Add FAAS to SpanLayer. Adjust e2e case for V9 core. Support ZGC GC time and count metric collecting. Sync proto buffers files from upstream Envoy (Related to https://github.com/envoyproxy/envoy/pull/18955). Bump up GraphQL related dependencies to latest versions. Add normal to V9 service meta query. Support scope=ALL catalog for metrics. Bump up H2 to 2.1.210 to fix CVE-2022-23221. E2E: Add normal field to Service. Add FreeSql component ID(3017) of dotnet agent. E2E: verify OAP cluster model data aggregation. Fix SelfRemoteClient self observing metrics. Add env variables SW_CLUSTER_INTERNAL_COM_HOST and SW_CLUSTER_INTERNAL_COM_PORT for cluster selectors zookeeper ,consul,etcd and nacos. Doc update: configuration-vocabulary,backend-cluster about env variables SW_CLUSTER_INTERNAL_COM_HOST and SW_CLUSTER_INTERNAL_COM_PORT. Add Python MysqlClient component ID(7013) with mapping information. Support Java thread pool metrics analysis. Fix IoTDB Storage Option insert null index value. Set the default value of SW_STORAGE_IOTDB_SESSIONPOOL_SIZE to 8. Bump up iotdb-session to 0.12.4. Bump up PostgreSQL driver to fix CVE. Add Guava EventBus component ID(123) of Java agent. Add OpenFunction component ID(5013). Expose configuration responseTimeout of ES client. Support datasource metric analysis. [Breaking Change] Keep the endpoint avg resp time meter name the same with others scope. (This may break 3rd party integration and existing alarm rule settings) Add Python FastAPI component ID(7014). Support all metrics from MAL engine in alarm core, including Prometheus, OC receiver, meter receiver. Allow updating non-metrics templates when structure changed. Set default connection timeout of ElasticSearch to 3000 milliseconds. Support ElasticSearch 8 and add it into E2E tests. Disable indexing for field alarm_record.tags_raw_data of binary type in ElasticSearch storage. Fix Zipkin receiver wrong condition for decoding gzip. Add a new sampler (possibility) in LAL. Unify module name receiver_zipkin to receiver-zipkin, remove receiver_jaeger from application.yaml. Introduce the entity of Process type. Set the length of event#parameters to 2000. Limit the length of Event#parameters. Support large service/instance/networkAddressAlias list query by using ElasticSearch scrolling API, add metadataQueryBatchSize to configure scrolling page size. Change default value of metadataQueryMaxSize from 5000 to 10000 Replace deprecated Armeria API BasicToken.of with AuthToken.ofBasic. Implement v9 UI template management protocol. Implement process metadata query protocol. Expose more ElasticSearch health check related logs to help to diagnose Health check fails. reason: No healthy endpoint. Add source event generated metrics to SERVICE_CATALOG_NAME catalog. [Breaking Change] Deprecate All from OAL source. [Breaking Change] Remove SRC_ALL: 'All' from OAL grammar tree. Remove all_heatmap and all_percentile metrics. Fix ElasticSearch normal index couldn\u0026rsquo;t apply mapping and update. Enhance DataCarrier#MultipleChannelsConsumer to add priority for the channels, which makes OAP server has a better performance to activate all analyzers on default. Activate receiver-otel#enabledOcRules receiver with k8s-node,oap,vm rules on default. Activate satellite,spring-sleuth for agent-analyzer#meterAnalyzerActiveFiles on default. Activate receiver-zabbix receiver with agent rule on default. Replace HTTP server (GraphQL, agent HTTP protocol) from Jetty with Armeria. [Breaking Change] Remove configuration restAcceptorPriorityDelta (env var: SW_RECEIVER_SHARING_JETTY_DELTA , SW_CORE_REST_JETTY_DELTA). [Breaking Change] Remove configuration graphql/path (env var: SW_QUERY_GRAPHQL_PATH). Add storage column attribute indexOnly, support ElasticSearch only index and not store some fields. Add indexOnly=true to SegmentRecord.tags, AlarmRecord.tags, AbstractLogRecord.tags, to reduce unnecessary storage. [Breaking Change] Remove configuration restMinThreads (env var: SW_CORE_REST_JETTY_MIN_THREADS , SW_RECEIVER_SHARING_JETTY_MIN_THREADS). Refactor the core Builder mechanism, new storage plugin could implement their own converter and get rid of hard requirement of using HashMap to communicate between data object and database native structure. [Breaking Change] Break all existing 3rd-party storage extensions. Remove hard requirement of BASE64 encoding for binary field. Add complexity limitation for GraphQL query to avoid malicious query. Add Column.shardingKeyIdx for column definition for BanyanDB.  Sharding key is used to group time series data per metric of one entity in one place (same sharding and/or same row for column-oriented database). For example, ServiceA's traffic gauge, service call per minute, includes following timestamp values, then it should be sharded by service ID [ServiceA(encoded ID): 01-28 18:30 values-1, 01-28 18:31 values-2, 01-28 18:32 values-3, 01-28 18:32 values-4] BanyanDB is the 1st storage implementation supporting this. It would make continuous time series metrics stored closely and compressed better. NOTICE, this sharding concept is NOT just for splitting data into different database instances or physical files.  Support ElasticSearch template mappings properties parameters and _source update. Implement the eBPF profiling query and data collect protocol. [Breaking Change] Remove Deprecated responseCode from sources, including Service, ServiceInstance, Endpoint Enhance endpoint dependency analysis to support cross threads cases. Refactor span analysis code structures. Remove isNotNormal service requirement when use alias to merge service topology from client side. All RPCs' peer services from client side are always normal services. This cause the topology is not merged correctly. Fix event type of export data is incorrect, it was EventType.TOTAL always. Reduce redundancy ThreadLocal in MAL core. Improve MAL performance. Trim tag\u0026rsquo;s key and value in log query. Refactor IoTDB storage plugin, add IoTDBDataConverter and fix ModifyCollectionInEnhancedForLoop bug. Bump up iotdb-session to 0.12.5. Fix the configuration of Aggregation and GC Count metrics for oap self observability E2E: Add verify OAP eBPF Profiling. Let multiGet could query without tag value in the InfluxDB storage plugin. Adjust MAL for V9, remove some groups, add a new Service function for the custom delimiter. Add service catalog DatabaseSlowStatement. Add Error Prone Annotations dependency to suppress warnings, which are not errors.  UI  [Breaking Change] Introduce Booster UI, remove RocketBot UI. [Breaking Change] UI Templates have been redesigned totally. GraphQL query is minimal compatible for metadata and metrics query. Remove unused jars (log4j-api.jar) in classpath. Bump up netty version to fix CVE. Add Database Connection pool metric. Re-implement UI template initialization for Booster UI. Add environment variable SW_ENABLE_UPDATE_UI_TEMPLATE to control user edit UI template. Add the Self Observability template of the SkyWalking Satellite. Add the template of OpenFunction observability.  Documentation  Reconstruction doc menu for v9. Update backend-alarm.md doc, support op \u0026ldquo;=\u0026rdquo; to \u0026ldquo;==\u0026rdquo;. Update backend-meter.md doc . Add \u0026lt;STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System\u0026gt; paper. Add Academy menu for recommending articles. Remove All source relative document and examples. Update Booster UI\u0026rsquo;s dependency licenses. Add profiling doc, and remove service mesh intro doc(not necessary). Add a doc for virtual database. Rewrite UI introduction. Update k8s-monitoring, backend-telemetry and v9-version-upgrade doc for v9.  All issues and pull requests are here\n","excerpt":"9.0.0 Project  Upgrade log4j2 to 2.17.1 for CVE-2021-44228, CVE-2021-45046, CVE-2021-45105 and …","ref":"/docs/main/v9.5.0/en/changes/changes-9.0.0/","title":"9.0.0"},{"body":"9.0.0 Project  Upgrade log4j2 to 2.17.1 for CVE-2021-44228, CVE-2021-45046, CVE-2021-45105 and CVE-2021-44832. This CVE only effects on JDK if JNDI is opened in default. Notice, using JVM option -Dlog4j2.formatMsgNoLookups=true or setting the LOG4J_FORMAT_MSG_NO_LOOKUPS=”true” environment variable also avoids CVEs. Upgrade maven-wrapper to 3.1.0, maven to 3.8.4 for performance improvements and ARM more native support. Exclude unnecessary libs when building under JDK 9+. Migrate base Docker image to eclipse-temurin as adoptopenjdk is deprecated. Add E2E test under Java 17. Upgrade protoc to 3.19.2. Add Istio 1.13.1 to E2E test matrix for verification. Upgrade Apache parent pom version to 25. Use the plugin version defined by the Apache maven parent.  Upgrade maven-dependency-plugin to 3.2.0. Upgrade maven-assembly-plugin to 3.3.0. Upgrade maven-failsafe-plugin to 2.22.2. Upgrade maven-surefire-plugin to 2.22.2. Upgrade maven-jar-plugin to 3.2.2. Upgrade maven-enforcer-plugin to 3.0.0. Upgrade maven-compiler-plugin to 3.10.0. Upgrade maven-resources-plugin to 3.2.0. Upgrade maven-source-plugin to 3.2.1.   Update codeStyle.xml to fix incompatibility on M1\u0026rsquo;s IntelliJ IDEA 2021.3.2. Update frontend-maven-plugin to 1.12 and npm to 16.14.0 for booster UI build. Improve CI with the GHA new feature \u0026ldquo;run failed jobs\u0026rdquo;. Fix ./mvnw compile not work if ./mvnw install is not executed at least once. Add JD_PRESERVE_LINE_FEEDS=true in official code style file. Upgrade OAP dependencies gson(2.9.0), guava(31.1), jackson(2.13.2), protobuf-java(3.18.4), commons-io(2.7), postgresql(42.3.3). Remove commons-pool and commons-dbcp from OAP dependencies(Not used before). Upgrade webapp dependencies gson(2.9.0), spring boot(2.6.6), jackson(2.13.2.2), spring cloud(2021.0.1), Apache httpclient(4.5.13).  OAP Server  Fix potential NPE in OAL string match and a bug when right-hand-side variable includes double quotes. Bump up Armeria version to 1.14.1 to fix CVE. Polish ETCD cluster config environment variables. Add the analysis of metrics in Satellite MetricsService. Fix Can't split endpoint id into 2 parts bug for endpoint ID. In the TCP in service mesh observability, endpoint name doesn\u0026rsquo;t exist in TCP traffic. Upgrade H2 version to 2.0.206 to fix CVE-2021-23463 and GHSA-h376-j262-vhq6. Extend column name override mechanism working for ValueColumnMetadata. Introduce new concept Layer and removed NodeType. More details refer to v9-version-upgrade. Fix query sort metrics failure in H2 Storage. Bump up grpc to 1.43.2 and protobuf to 3.19.2 to fix CVE-2021-22569. Add source layer and dest layer to relation. Follow protocol grammar fix GCPhrase -\u0026gt; GCPhase. Set layer to mesh relation. Add FAAS to SpanLayer. Adjust e2e case for V9 core. Support ZGC GC time and count metric collecting. Sync proto buffers files from upstream Envoy (Related to https://github.com/envoyproxy/envoy/pull/18955). Bump up GraphQL related dependencies to latest versions. Add normal to V9 service meta query. Support scope=ALL catalog for metrics. Bump up H2 to 2.1.210 to fix CVE-2022-23221. E2E: Add normal field to Service. Add FreeSql component ID(3017) of dotnet agent. E2E: verify OAP cluster model data aggregation. Fix SelfRemoteClient self observing metrics. Add env variables SW_CLUSTER_INTERNAL_COM_HOST and SW_CLUSTER_INTERNAL_COM_PORT for cluster selectors zookeeper ,consul,etcd and nacos. Doc update: configuration-vocabulary,backend-cluster about env variables SW_CLUSTER_INTERNAL_COM_HOST and SW_CLUSTER_INTERNAL_COM_PORT. Add Python MysqlClient component ID(7013) with mapping information. Support Java thread pool metrics analysis. Fix IoTDB Storage Option insert null index value. Set the default value of SW_STORAGE_IOTDB_SESSIONPOOL_SIZE to 8. Bump up iotdb-session to 0.12.4. Bump up PostgreSQL driver to fix CVE. Add Guava EventBus component ID(123) of Java agent. Add OpenFunction component ID(5013). Expose configuration responseTimeout of ES client. Support datasource metric analysis. [Breaking Change] Keep the endpoint avg resp time meter name the same with others scope. (This may break 3rd party integration and existing alarm rule settings) Add Python FastAPI component ID(7014). Support all metrics from MAL engine in alarm core, including Prometheus, OC receiver, meter receiver. Allow updating non-metrics templates when structure changed. Set default connection timeout of ElasticSearch to 3000 milliseconds. Support ElasticSearch 8 and add it into E2E tests. Disable indexing for field alarm_record.tags_raw_data of binary type in ElasticSearch storage. Fix Zipkin receiver wrong condition for decoding gzip. Add a new sampler (possibility) in LAL. Unify module name receiver_zipkin to receiver-zipkin, remove receiver_jaeger from application.yaml. Introduce the entity of Process type. Set the length of event#parameters to 2000. Limit the length of Event#parameters. Support large service/instance/networkAddressAlias list query by using ElasticSearch scrolling API, add metadataQueryBatchSize to configure scrolling page size. Change default value of metadataQueryMaxSize from 5000 to 10000 Replace deprecated Armeria API BasicToken.of with AuthToken.ofBasic. Implement v9 UI template management protocol. Implement process metadata query protocol. Expose more ElasticSearch health check related logs to help to diagnose Health check fails. reason: No healthy endpoint. Add source event generated metrics to SERVICE_CATALOG_NAME catalog. [Breaking Change] Deprecate All from OAL source. [Breaking Change] Remove SRC_ALL: 'All' from OAL grammar tree. Remove all_heatmap and all_percentile metrics. Fix ElasticSearch normal index couldn\u0026rsquo;t apply mapping and update. Enhance DataCarrier#MultipleChannelsConsumer to add priority for the channels, which makes OAP server has a better performance to activate all analyzers on default. Activate receiver-otel#enabledOcRules receiver with k8s-node,oap,vm rules on default. Activate satellite,spring-sleuth for agent-analyzer#meterAnalyzerActiveFiles on default. Activate receiver-zabbix receiver with agent rule on default. Replace HTTP server (GraphQL, agent HTTP protocol) from Jetty with Armeria. [Breaking Change] Remove configuration restAcceptorPriorityDelta (env var: SW_RECEIVER_SHARING_JETTY_DELTA , SW_CORE_REST_JETTY_DELTA). [Breaking Change] Remove configuration graphql/path (env var: SW_QUERY_GRAPHQL_PATH). Add storage column attribute indexOnly, support ElasticSearch only index and not store some fields. Add indexOnly=true to SegmentRecord.tags, AlarmRecord.tags, AbstractLogRecord.tags, to reduce unnecessary storage. [Breaking Change] Remove configuration restMinThreads (env var: SW_CORE_REST_JETTY_MIN_THREADS , SW_RECEIVER_SHARING_JETTY_MIN_THREADS). Refactor the core Builder mechanism, new storage plugin could implement their own converter and get rid of hard requirement of using HashMap to communicate between data object and database native structure. [Breaking Change] Break all existing 3rd-party storage extensions. Remove hard requirement of BASE64 encoding for binary field. Add complexity limitation for GraphQL query to avoid malicious query. Add Column.shardingKeyIdx for column definition for BanyanDB.  Sharding key is used to group time series data per metric of one entity in one place (same sharding and/or same row for column-oriented database). For example, ServiceA's traffic gauge, service call per minute, includes following timestamp values, then it should be sharded by service ID [ServiceA(encoded ID): 01-28 18:30 values-1, 01-28 18:31 values-2, 01-28 18:32 values-3, 01-28 18:32 values-4] BanyanDB is the 1st storage implementation supporting this. It would make continuous time series metrics stored closely and compressed better. NOTICE, this sharding concept is NOT just for splitting data into different database instances or physical files.  Support ElasticSearch template mappings properties parameters and _source update. Implement the eBPF profiling query and data collect protocol. [Breaking Change] Remove Deprecated responseCode from sources, including Service, ServiceInstance, Endpoint Enhance endpoint dependency analysis to support cross threads cases. Refactor span analysis code structures. Remove isNotNormal service requirement when use alias to merge service topology from client side. All RPCs' peer services from client side are always normal services. This cause the topology is not merged correctly. Fix event type of export data is incorrect, it was EventType.TOTAL always. Reduce redundancy ThreadLocal in MAL core. Improve MAL performance. Trim tag\u0026rsquo;s key and value in log query. Refactor IoTDB storage plugin, add IoTDBDataConverter and fix ModifyCollectionInEnhancedForLoop bug. Bump up iotdb-session to 0.12.5. Fix the configuration of Aggregation and GC Count metrics for oap self observability E2E: Add verify OAP eBPF Profiling. Let multiGet could query without tag value in the InfluxDB storage plugin. Adjust MAL for V9, remove some groups, add a new Service function for the custom delimiter. Add service catalog DatabaseSlowStatement. Add Error Prone Annotations dependency to suppress warnings, which are not errors.  UI  [Breaking Change] Introduce Booster UI, remove RocketBot UI. [Breaking Change] UI Templates have been redesigned totally. GraphQL query is minimal compatible for metadata and metrics query. Remove unused jars (log4j-api.jar) in classpath. Bump up netty version to fix CVE. Add Database Connection pool metric. Re-implement UI template initialization for Booster UI. Add environment variable SW_ENABLE_UPDATE_UI_TEMPLATE to control user edit UI template. Add the Self Observability template of the SkyWalking Satellite. Add the template of OpenFunction observability.  Documentation  Reconstruction doc menu for v9. Update backend-alarm.md doc, support op \u0026ldquo;=\u0026rdquo; to \u0026ldquo;==\u0026rdquo;. Update backend-meter.md doc . Add \u0026lt;STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System\u0026gt; paper. Add Academy menu for recommending articles. Remove All source relative document and examples. Update Booster UI\u0026rsquo;s dependency licenses. Add profiling doc, and remove service mesh intro doc(not necessary). Add a doc for virtual database. Rewrite UI introduction. Update k8s-monitoring, backend-telemetry and v9-version-upgrade doc for v9.  All issues and pull requests are here\n","excerpt":"9.0.0 Project  Upgrade log4j2 to 2.17.1 for CVE-2021-44228, CVE-2021-45046, CVE-2021-45105 and …","ref":"/docs/main/v9.6.0/en/changes/changes-9.0.0/","title":"9.0.0"},{"body":"9.0.0 Project  Upgrade log4j2 to 2.17.1 for CVE-2021-44228, CVE-2021-45046, CVE-2021-45105 and CVE-2021-44832. This CVE only effects on JDK if JNDI is opened in default. Notice, using JVM option -Dlog4j2.formatMsgNoLookups=true or setting the LOG4J_FORMAT_MSG_NO_LOOKUPS=”true” environment variable also avoids CVEs. Upgrade maven-wrapper to 3.1.0, maven to 3.8.4 for performance improvements and ARM more native support. Exclude unnecessary libs when building under JDK 9+. Migrate base Docker image to eclipse-temurin as adoptopenjdk is deprecated. Add E2E test under Java 17. Upgrade protoc to 3.19.2. Add Istio 1.13.1 to E2E test matrix for verification. Upgrade Apache parent pom version to 25. Use the plugin version defined by the Apache maven parent.  Upgrade maven-dependency-plugin to 3.2.0. Upgrade maven-assembly-plugin to 3.3.0. Upgrade maven-failsafe-plugin to 2.22.2. Upgrade maven-surefire-plugin to 2.22.2. Upgrade maven-jar-plugin to 3.2.2. Upgrade maven-enforcer-plugin to 3.0.0. Upgrade maven-compiler-plugin to 3.10.0. Upgrade maven-resources-plugin to 3.2.0. Upgrade maven-source-plugin to 3.2.1.   Update codeStyle.xml to fix incompatibility on M1\u0026rsquo;s IntelliJ IDEA 2021.3.2. Update frontend-maven-plugin to 1.12 and npm to 16.14.0 for booster UI build. Improve CI with the GHA new feature \u0026ldquo;run failed jobs\u0026rdquo;. Fix ./mvnw compile not work if ./mvnw install is not executed at least once. Add JD_PRESERVE_LINE_FEEDS=true in official code style file. Upgrade OAP dependencies gson(2.9.0), guava(31.1), jackson(2.13.2), protobuf-java(3.18.4), commons-io(2.7), postgresql(42.3.3). Remove commons-pool and commons-dbcp from OAP dependencies(Not used before). Upgrade webapp dependencies gson(2.9.0), spring boot(2.6.6), jackson(2.13.2.2), spring cloud(2021.0.1), Apache httpclient(4.5.13).  OAP Server  Fix potential NPE in OAL string match and a bug when right-hand-side variable includes double quotes. Bump up Armeria version to 1.14.1 to fix CVE. Polish ETCD cluster config environment variables. Add the analysis of metrics in Satellite MetricsService. Fix Can't split endpoint id into 2 parts bug for endpoint ID. In the TCP in service mesh observability, endpoint name doesn\u0026rsquo;t exist in TCP traffic. Upgrade H2 version to 2.0.206 to fix CVE-2021-23463 and GHSA-h376-j262-vhq6. Extend column name override mechanism working for ValueColumnMetadata. Introduce new concept Layer and removed NodeType. More details refer to v9-version-upgrade. Fix query sort metrics failure in H2 Storage. Bump up grpc to 1.43.2 and protobuf to 3.19.2 to fix CVE-2021-22569. Add source layer and dest layer to relation. Follow protocol grammar fix GCPhrase -\u0026gt; GCPhase. Set layer to mesh relation. Add FAAS to SpanLayer. Adjust e2e case for V9 core. Support ZGC GC time and count metric collecting. Sync proto buffers files from upstream Envoy (Related to https://github.com/envoyproxy/envoy/pull/18955). Bump up GraphQL related dependencies to latest versions. Add normal to V9 service meta query. Support scope=ALL catalog for metrics. Bump up H2 to 2.1.210 to fix CVE-2022-23221. E2E: Add normal field to Service. Add FreeSql component ID(3017) of dotnet agent. E2E: verify OAP cluster model data aggregation. Fix SelfRemoteClient self observing metrics. Add env variables SW_CLUSTER_INTERNAL_COM_HOST and SW_CLUSTER_INTERNAL_COM_PORT for cluster selectors zookeeper ,consul,etcd and nacos. Doc update: configuration-vocabulary,backend-cluster about env variables SW_CLUSTER_INTERNAL_COM_HOST and SW_CLUSTER_INTERNAL_COM_PORT. Add Python MysqlClient component ID(7013) with mapping information. Support Java thread pool metrics analysis. Fix IoTDB Storage Option insert null index value. Set the default value of SW_STORAGE_IOTDB_SESSIONPOOL_SIZE to 8. Bump up iotdb-session to 0.12.4. Bump up PostgreSQL driver to fix CVE. Add Guava EventBus component ID(123) of Java agent. Add OpenFunction component ID(5013). Expose configuration responseTimeout of ES client. Support datasource metric analysis. [Breaking Change] Keep the endpoint avg resp time meter name the same with others scope. (This may break 3rd party integration and existing alarm rule settings) Add Python FastAPI component ID(7014). Support all metrics from MAL engine in alarm core, including Prometheus, OC receiver, meter receiver. Allow updating non-metrics templates when structure changed. Set default connection timeout of ElasticSearch to 3000 milliseconds. Support ElasticSearch 8 and add it into E2E tests. Disable indexing for field alarm_record.tags_raw_data of binary type in ElasticSearch storage. Fix Zipkin receiver wrong condition for decoding gzip. Add a new sampler (possibility) in LAL. Unify module name receiver_zipkin to receiver-zipkin, remove receiver_jaeger from application.yaml. Introduce the entity of Process type. Set the length of event#parameters to 2000. Limit the length of Event#parameters. Support large service/instance/networkAddressAlias list query by using ElasticSearch scrolling API, add metadataQueryBatchSize to configure scrolling page size. Change default value of metadataQueryMaxSize from 5000 to 10000 Replace deprecated Armeria API BasicToken.of with AuthToken.ofBasic. Implement v9 UI template management protocol. Implement process metadata query protocol. Expose more ElasticSearch health check related logs to help to diagnose Health check fails. reason: No healthy endpoint. Add source event generated metrics to SERVICE_CATALOG_NAME catalog. [Breaking Change] Deprecate All from OAL source. [Breaking Change] Remove SRC_ALL: 'All' from OAL grammar tree. Remove all_heatmap and all_percentile metrics. Fix ElasticSearch normal index couldn\u0026rsquo;t apply mapping and update. Enhance DataCarrier#MultipleChannelsConsumer to add priority for the channels, which makes OAP server has a better performance to activate all analyzers on default. Activate receiver-otel#enabledOcRules receiver with k8s-node,oap,vm rules on default. Activate satellite,spring-sleuth for agent-analyzer#meterAnalyzerActiveFiles on default. Activate receiver-zabbix receiver with agent rule on default. Replace HTTP server (GraphQL, agent HTTP protocol) from Jetty with Armeria. [Breaking Change] Remove configuration restAcceptorPriorityDelta (env var: SW_RECEIVER_SHARING_JETTY_DELTA , SW_CORE_REST_JETTY_DELTA). [Breaking Change] Remove configuration graphql/path (env var: SW_QUERY_GRAPHQL_PATH). Add storage column attribute indexOnly, support ElasticSearch only index and not store some fields. Add indexOnly=true to SegmentRecord.tags, AlarmRecord.tags, AbstractLogRecord.tags, to reduce unnecessary storage. [Breaking Change] Remove configuration restMinThreads (env var: SW_CORE_REST_JETTY_MIN_THREADS , SW_RECEIVER_SHARING_JETTY_MIN_THREADS). Refactor the core Builder mechanism, new storage plugin could implement their own converter and get rid of hard requirement of using HashMap to communicate between data object and database native structure. [Breaking Change] Break all existing 3rd-party storage extensions. Remove hard requirement of BASE64 encoding for binary field. Add complexity limitation for GraphQL query to avoid malicious query. Add Column.shardingKeyIdx for column definition for BanyanDB.  Sharding key is used to group time series data per metric of one entity in one place (same sharding and/or same row for column-oriented database). For example, ServiceA's traffic gauge, service call per minute, includes following timestamp values, then it should be sharded by service ID [ServiceA(encoded ID): 01-28 18:30 values-1, 01-28 18:31 values-2, 01-28 18:32 values-3, 01-28 18:32 values-4] BanyanDB is the 1st storage implementation supporting this. It would make continuous time series metrics stored closely and compressed better. NOTICE, this sharding concept is NOT just for splitting data into different database instances or physical files.  Support ElasticSearch template mappings properties parameters and _source update. Implement the eBPF profiling query and data collect protocol. [Breaking Change] Remove Deprecated responseCode from sources, including Service, ServiceInstance, Endpoint Enhance endpoint dependency analysis to support cross threads cases. Refactor span analysis code structures. Remove isNotNormal service requirement when use alias to merge service topology from client side. All RPCs' peer services from client side are always normal services. This cause the topology is not merged correctly. Fix event type of export data is incorrect, it was EventType.TOTAL always. Reduce redundancy ThreadLocal in MAL core. Improve MAL performance. Trim tag\u0026rsquo;s key and value in log query. Refactor IoTDB storage plugin, add IoTDBDataConverter and fix ModifyCollectionInEnhancedForLoop bug. Bump up iotdb-session to 0.12.5. Fix the configuration of Aggregation and GC Count metrics for oap self observability E2E: Add verify OAP eBPF Profiling. Let multiGet could query without tag value in the InfluxDB storage plugin. Adjust MAL for V9, remove some groups, add a new Service function for the custom delimiter. Add service catalog DatabaseSlowStatement. Add Error Prone Annotations dependency to suppress warnings, which are not errors.  UI  [Breaking Change] Introduce Booster UI, remove RocketBot UI. [Breaking Change] UI Templates have been redesigned totally. GraphQL query is minimal compatible for metadata and metrics query. Remove unused jars (log4j-api.jar) in classpath. Bump up netty version to fix CVE. Add Database Connection pool metric. Re-implement UI template initialization for Booster UI. Add environment variable SW_ENABLE_UPDATE_UI_TEMPLATE to control user edit UI template. Add the Self Observability template of the SkyWalking Satellite. Add the template of OpenFunction observability.  Documentation  Reconstruction doc menu for v9. Update backend-alarm.md doc, support op \u0026ldquo;=\u0026rdquo; to \u0026ldquo;==\u0026rdquo;. Update backend-meter.md doc . Add \u0026lt;STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System\u0026gt; paper. Add Academy menu for recommending articles. Remove All source relative document and examples. Update Booster UI\u0026rsquo;s dependency licenses. Add profiling doc, and remove service mesh intro doc(not necessary). Add a doc for virtual database. Rewrite UI introduction. Update k8s-monitoring, backend-telemetry and v9-version-upgrade doc for v9.  All issues and pull requests are here\n","excerpt":"9.0.0 Project  Upgrade log4j2 to 2.17.1 for CVE-2021-44228, CVE-2021-45046, CVE-2021-45105 and …","ref":"/docs/main/v9.7.0/en/changes/changes-9.0.0/","title":"9.0.0"},{"body":"9.1.0 Project  [IMPORTANT] Remove InfluxDB 1.x and Apache IoTDB 0.X as storage options, check details at here. Remove converter-moshi 2.5.0, influx-java 2.15, iotdb java 0.12.5, thrift 0.14.1, moshi 1.5.0, msgpack 0.8.16 dependencies. Remove InfluxDB and IoTDB relative codes and E2E tests. Upgrade OAP dependencies zipkin to 2.23.16, H2 to 2.1.212, Apache Freemarker to 2.3.31, gRPC-java 1.46.0, netty to 4.1.76. Upgrade Webapp dependencies, spring-cloud-dependencies to 2021.0.2, logback-classic to 1.2.11 [IMPORTANT] Add BanyanDB storage implementation. Notice BanyanDB is currently under active development and SHOULD NOT be used in production cluster.  OAP Server  Add component definition(ID=127) for Apache ShenYu (incubating). Fix Zipkin receiver: Decode spans error, missing Layer for V9 and wrong time bucket for generate Service and Endpoint. [Refactor] Move SQLDatabase(H2/MySQL/PostgreSQL), ElasticSearch and BanyanDB specific configurations out of column. Support BanyanDB global index for entities. Log and Segment record entities declare this new feature. Remove unnecessary analyzer settings in columns of templates. Many were added due to analyzer\u0026rsquo;s default value. Simplify the Kafka Fetch configuration in cluster mode. [Breaking Change] Update the eBPF Profiling task to the service level, please delete index/table: ebpf_profiling_task, process_traffic. Fix event can\u0026rsquo;t split service ID into 2 parts. Fix OAP Self-Observability metric GC Time calculation. Set SW_QUERY_MAX_QUERY_COMPLEXITY default value to 1000 Webapp module (for UI) enabled compression. [Breaking Change] Add layer field to event, report an event without layer is not allowed. Fix ES flush thread stops when flush schedule task throws exception, such as ElasticSearch flush failed. Fix ES BulkProcessor in BatchProcessEsDAO was initialized multiple times and created multiple ES flush schedule tasks. HTTPServer support the handler register with allowed HTTP methods. [Critical] Revert Enhance DataCarrier#MultipleChannelsConsumer to add priority to avoid consuming issues. Fix the problem that some configurations (such as group.id) did not take effect due to the override order when using the kafkaConsumerConfig property to extend the configuration in Kafka Fetcher. Remove build time from the OAP version. Add data-generator module to run OAP in testing mode, generating mock data for testing. Support receive Kubernetes processes from gRPC protocol. Fix the problem that es index(TimeSeriesTable, eg. endpoint_traffic, alarm_record) didn\u0026rsquo;t create even after rerun with init-mode. This problem caused the OAP server to fail to start when the OAP server was down for more than a day. Support autocomplete tags in traces query. [Breaking Change] Replace all configurations **_JETTY_** to **_REST_**. Add the support eBPF profiling field into the process entity. E2E: fix log test miss verify LAL and metrics. Enhance Converter mechanism in kernel level to make BanyanDB native feature more effective. Add TermsAggregation properties collect_mode and execution_hint. Add \u0026ldquo;execution_hint\u0026rdquo;: \u0026ldquo;map\u0026rdquo;, \u0026ldquo;collect_mode\u0026rdquo;: \u0026ldquo;breadth_first\u0026rdquo; for aggregation and topology query to improve 5-10x performance. Clean up scroll contexts after used. Support autocomplete tags in logs query. Enhance Deprecated MetricQuery(v1) getValues querying to asynchronous concurrency query Fix the pod match error when the service has multiple selector in kubernetes environment. VM monitoring adapts the 0.50.0 of the opentelemetry-collector. Add Envoy internal cost metrics. Remove Layer concept from ServiceInstance. Remove unnecessary onCompleted on gRPC onError callback. Remove Layer concept form Process. Update to list all eBPF profiling schedulers without duration. Storage(ElasticSearch): add search options to tolerate inexisting indices. Fix the problem that MQ has the wrong Layer type. Fix NoneStream model has wrong downsampling(was Second, should be Minute). SQL Database: provide @SQLDatabase.AdditionalEntity to support create additional tables from a model. [Breaking Change] SQL Database: remove SQL Database config maxSizeOfArrayColumn and numOfSearchableValuesPerTag. [Breaking Change] SQL Database: move Tags list from Segment,Logs,Alarms to their additional table. [Breaking Change] Remove total field in Trace, Log, Event, Browser log, and alarm list query. Support OFF_CPU eBPF Profiling. Fix SumAggregationBuilder#build should use the SumAggregation rather than MaxAggregation. Add TiDB, OpenSearch, Postgres storage optional to Trace and eBPF Profiling E2E testing. Add OFF CPU eBPF Profiling E2E Testing. Fix searchableTag as rpc.status_code and http.status_code. status_code had been removed. Fix scroll query failure exception. Add profileDataQueryBatchSize config in Elasticsearch Storage. Add APIs to query Pod log on demand. Remove OAL for events. Simplify the format index name logical in ES storage. Add instance properties extractor in MAL. Support Zipkin traces collect and zipkin traces query API. [Breaking Change] Zipkin receiver mechanism changes and traces do not stream into OAP Segment anymore.  UI  General service instance: move Thread Pool from JVM to Overview, fix JVM GC Count calculation. Add Apache ShenYu (incubating) component LOGO. Show more metrics on service/instance/endpoint list on the dashboards. Support average values of metrics on the service/list/endpoint table widgets, with pop-up linear graph. Fix viewLogs button query no data. Fix UTC when page loads. Implement the eBPF profile widget on dashboard. Optimize the trace widget. Avoid invalid query for topology metrics. Add the alarm and log tag tips. Fix spans details and task logs. Verify query params to avoid invalid queries. Mobile terminal adaptation. Fix: set dropdown for the Tab widget, init instance/endpoint relation selectors, update sankey graph. Add eBPF Profiling widget into General service, Service Mesh and Kubernetes tabs. Fix jump to endpoint-relation dashboard template. Fix set graph options. Remove the Layer filed from the Instance and Process. Fix date time picker display when set hour to 0. Implement tags auto-complete for Trace and Log. Support multiple trees for the flame graph. Fix the page doesn\u0026rsquo;t need to be re-rendered when the url changes. Remove unexpected data for exporting dashboards. Fix duration time. Remove the total field from query conditions. Fix minDuration and maxDuration for the trace filter. Add Log configuration for the browser templates. Fix query conditions for the browser logs. Add Spanish Translation. Visualize the OFF CPU eBPF profiling. Add Spanish language to UI. Sort spans with startTime or spanId in a segment. Visualize a on-demand log widget. Fix activate the correct tab index after renaming a Tabs name. FaaS dashboard support on-demand log (OpenFunction/functions-framework-go version \u0026gt; 0.3.0).  Documentation  Add eBPF agent into probe introduction.  All issues and pull requests are here\n","excerpt":"9.1.0 Project  [IMPORTANT] Remove InfluxDB 1.x and Apache IoTDB 0.X as storage options, check …","ref":"/docs/main/latest/en/changes/changes-9.1.0/","title":"9.1.0"},{"body":"9.1.0 Project  [IMPORTANT] Remove InfluxDB 1.x and Apache IoTDB 0.X as storage options, check details at here. Remove converter-moshi 2.5.0, influx-java 2.15, iotdb java 0.12.5, thrift 0.14.1, moshi 1.5.0, msgpack 0.8.16 dependencies. Remove InfluxDB and IoTDB relative codes and E2E tests. Upgrade OAP dependencies zipkin to 2.23.16, H2 to 2.1.212, Apache Freemarker to 2.3.31, gRPC-java 1.46.0, netty to 4.1.76. Upgrade Webapp dependencies, spring-cloud-dependencies to 2021.0.2, logback-classic to 1.2.11 [IMPORTANT] Add BanyanDB storage implementation. Notice BanyanDB is currently under active development and SHOULD NOT be used in production cluster.  OAP Server  Add component definition(ID=127) for Apache ShenYu (incubating). Fix Zipkin receiver: Decode spans error, missing Layer for V9 and wrong time bucket for generate Service and Endpoint. [Refactor] Move SQLDatabase(H2/MySQL/PostgreSQL), ElasticSearch and BanyanDB specific configurations out of column. Support BanyanDB global index for entities. Log and Segment record entities declare this new feature. Remove unnecessary analyzer settings in columns of templates. Many were added due to analyzer\u0026rsquo;s default value. Simplify the Kafka Fetch configuration in cluster mode. [Breaking Change] Update the eBPF Profiling task to the service level, please delete index/table: ebpf_profiling_task, process_traffic. Fix event can\u0026rsquo;t split service ID into 2 parts. Fix OAP Self-Observability metric GC Time calculation. Set SW_QUERY_MAX_QUERY_COMPLEXITY default value to 1000 Webapp module (for UI) enabled compression. [Breaking Change] Add layer field to event, report an event without layer is not allowed. Fix ES flush thread stops when flush schedule task throws exception, such as ElasticSearch flush failed. Fix ES BulkProcessor in BatchProcessEsDAO was initialized multiple times and created multiple ES flush schedule tasks. HTTPServer support the handler register with allowed HTTP methods. [Critical] Revert Enhance DataCarrier#MultipleChannelsConsumer to add priority to avoid consuming issues. Fix the problem that some configurations (such as group.id) did not take effect due to the override order when using the kafkaConsumerConfig property to extend the configuration in Kafka Fetcher. Remove build time from the OAP version. Add data-generator module to run OAP in testing mode, generating mock data for testing. Support receive Kubernetes processes from gRPC protocol. Fix the problem that es index(TimeSeriesTable, eg. endpoint_traffic, alarm_record) didn\u0026rsquo;t create even after rerun with init-mode. This problem caused the OAP server to fail to start when the OAP server was down for more than a day. Support autocomplete tags in traces query. [Breaking Change] Replace all configurations **_JETTY_** to **_REST_**. Add the support eBPF profiling field into the process entity. E2E: fix log test miss verify LAL and metrics. Enhance Converter mechanism in kernel level to make BanyanDB native feature more effective. Add TermsAggregation properties collect_mode and execution_hint. Add \u0026ldquo;execution_hint\u0026rdquo;: \u0026ldquo;map\u0026rdquo;, \u0026ldquo;collect_mode\u0026rdquo;: \u0026ldquo;breadth_first\u0026rdquo; for aggregation and topology query to improve 5-10x performance. Clean up scroll contexts after used. Support autocomplete tags in logs query. Enhance Deprecated MetricQuery(v1) getValues querying to asynchronous concurrency query Fix the pod match error when the service has multiple selector in kubernetes environment. VM monitoring adapts the 0.50.0 of the opentelemetry-collector. Add Envoy internal cost metrics. Remove Layer concept from ServiceInstance. Remove unnecessary onCompleted on gRPC onError callback. Remove Layer concept form Process. Update to list all eBPF profiling schedulers without duration. Storage(ElasticSearch): add search options to tolerate inexisting indices. Fix the problem that MQ has the wrong Layer type. Fix NoneStream model has wrong downsampling(was Second, should be Minute). SQL Database: provide @SQLDatabase.AdditionalEntity to support create additional tables from a model. [Breaking Change] SQL Database: remove SQL Database config maxSizeOfArrayColumn and numOfSearchableValuesPerTag. [Breaking Change] SQL Database: move Tags list from Segment,Logs,Alarms to their additional table. [Breaking Change] Remove total field in Trace, Log, Event, Browser log, and alarm list query. Support OFF_CPU eBPF Profiling. Fix SumAggregationBuilder#build should use the SumAggregation rather than MaxAggregation. Add TiDB, OpenSearch, Postgres storage optional to Trace and eBPF Profiling E2E testing. Add OFF CPU eBPF Profiling E2E Testing. Fix searchableTag as rpc.status_code and http.status_code. status_code had been removed. Fix scroll query failure exception. Add profileDataQueryBatchSize config in Elasticsearch Storage. Add APIs to query Pod log on demand. Remove OAL for events. Simplify the format index name logical in ES storage. Add instance properties extractor in MAL. Support Zipkin traces collect and zipkin traces query API. [Breaking Change] Zipkin receiver mechanism changes and traces do not stream into OAP Segment anymore.  UI  General service instance: move Thread Pool from JVM to Overview, fix JVM GC Count calculation. Add Apache ShenYu (incubating) component LOGO. Show more metrics on service/instance/endpoint list on the dashboards. Support average values of metrics on the service/list/endpoint table widgets, with pop-up linear graph. Fix viewLogs button query no data. Fix UTC when page loads. Implement the eBPF profile widget on dashboard. Optimize the trace widget. Avoid invalid query for topology metrics. Add the alarm and log tag tips. Fix spans details and task logs. Verify query params to avoid invalid queries. Mobile terminal adaptation. Fix: set dropdown for the Tab widget, init instance/endpoint relation selectors, update sankey graph. Add eBPF Profiling widget into General service, Service Mesh and Kubernetes tabs. Fix jump to endpoint-relation dashboard template. Fix set graph options. Remove the Layer filed from the Instance and Process. Fix date time picker display when set hour to 0. Implement tags auto-complete for Trace and Log. Support multiple trees for the flame graph. Fix the page doesn\u0026rsquo;t need to be re-rendered when the url changes. Remove unexpected data for exporting dashboards. Fix duration time. Remove the total field from query conditions. Fix minDuration and maxDuration for the trace filter. Add Log configuration for the browser templates. Fix query conditions for the browser logs. Add Spanish Translation. Visualize the OFF CPU eBPF profiling. Add Spanish language to UI. Sort spans with startTime or spanId in a segment. Visualize a on-demand log widget. Fix activate the correct tab index after renaming a Tabs name. FaaS dashboard support on-demand log (OpenFunction/functions-framework-go version \u0026gt; 0.3.0).  Documentation  Add eBPF agent into probe introduction.  All issues and pull requests are here\n","excerpt":"9.1.0 Project  [IMPORTANT] Remove InfluxDB 1.x and Apache IoTDB 0.X as storage options, check …","ref":"/docs/main/next/en/changes/changes-9.1.0/","title":"9.1.0"},{"body":"9.1.0 Project  [IMPORTANT] Remove InfluxDB 1.x and Apache IoTDB 0.X as storage options, check details at here. Remove converter-moshi 2.5.0, influx-java 2.15, iotdb java 0.12.5, thrift 0.14.1, moshi 1.5.0, msgpack 0.8.16 dependencies. Remove InfluxDB and IoTDB relative codes and E2E tests. Upgrade OAP dependencies zipkin to 2.23.16, H2 to 2.1.212, Apache Freemarker to 2.3.31, gRPC-java 1.46.0, netty to 4.1.76. Upgrade Webapp dependencies, spring-cloud-dependencies to 2021.0.2, logback-classic to 1.2.11 [IMPORTANT] Add BanyanDB storage implementation. Notice BanyanDB is currently under active development and SHOULD NOT be used in production cluster.  OAP Server  Add component definition(ID=127) for Apache ShenYu (incubating). Fix Zipkin receiver: Decode spans error, missing Layer for V9 and wrong time bucket for generate Service and Endpoint. [Refactor] Move SQLDatabase(H2/MySQL/PostgreSQL), ElasticSearch and BanyanDB specific configurations out of column. Support BanyanDB global index for entities. Log and Segment record entities declare this new feature. Remove unnecessary analyzer settings in columns of templates. Many were added due to analyzer\u0026rsquo;s default value. Simplify the Kafka Fetch configuration in cluster mode. [Breaking Change] Update the eBPF Profiling task to the service level, please delete index/table: ebpf_profiling_task, process_traffic. Fix event can\u0026rsquo;t split service ID into 2 parts. Fix OAP Self-Observability metric GC Time calculation. Set SW_QUERY_MAX_QUERY_COMPLEXITY default value to 1000 Webapp module (for UI) enabled compression. [Breaking Change] Add layer field to event, report an event without layer is not allowed. Fix ES flush thread stops when flush schedule task throws exception, such as ElasticSearch flush failed. Fix ES BulkProcessor in BatchProcessEsDAO was initialized multiple times and created multiple ES flush schedule tasks. HTTPServer support the handler register with allowed HTTP methods. [Critical] Revert Enhance DataCarrier#MultipleChannelsConsumer to add priority to avoid consuming issues. Fix the problem that some configurations (such as group.id) did not take effect due to the override order when using the kafkaConsumerConfig property to extend the configuration in Kafka Fetcher. Remove build time from the OAP version. Add data-generator module to run OAP in testing mode, generating mock data for testing. Support receive Kubernetes processes from gRPC protocol. Fix the problem that es index(TimeSeriesTable, eg. endpoint_traffic, alarm_record) didn\u0026rsquo;t create even after rerun with init-mode. This problem caused the OAP server to fail to start when the OAP server was down for more than a day. Support autocomplete tags in traces query. [Breaking Change] Replace all configurations **_JETTY_** to **_REST_**. Add the support eBPF profiling field into the process entity. E2E: fix log test miss verify LAL and metrics. Enhance Converter mechanism in kernel level to make BanyanDB native feature more effective. Add TermsAggregation properties collect_mode and execution_hint. Add \u0026ldquo;execution_hint\u0026rdquo;: \u0026ldquo;map\u0026rdquo;, \u0026ldquo;collect_mode\u0026rdquo;: \u0026ldquo;breadth_first\u0026rdquo; for aggregation and topology query to improve 5-10x performance. Clean up scroll contexts after used. Support autocomplete tags in logs query. Enhance Deprecated MetricQuery(v1) getValues querying to asynchronous concurrency query Fix the pod match error when the service has multiple selector in kubernetes environment. VM monitoring adapts the 0.50.0 of the opentelemetry-collector. Add Envoy internal cost metrics. Remove Layer concept from ServiceInstance. Remove unnecessary onCompleted on gRPC onError callback. Remove Layer concept form Process. Update to list all eBPF profiling schedulers without duration. Storage(ElasticSearch): add search options to tolerate inexisting indices. Fix the problem that MQ has the wrong Layer type. Fix NoneStream model has wrong downsampling(was Second, should be Minute). SQL Database: provide @SQLDatabase.AdditionalEntity to support create additional tables from a model. [Breaking Change] SQL Database: remove SQL Database config maxSizeOfArrayColumn and numOfSearchableValuesPerTag. [Breaking Change] SQL Database: move Tags list from Segment,Logs,Alarms to their additional table. [Breaking Change] Remove total field in Trace, Log, Event, Browser log, and alarm list query. Support OFF_CPU eBPF Profiling. Fix SumAggregationBuilder#build should use the SumAggregation rather than MaxAggregation. Add TiDB, OpenSearch, Postgres storage optional to Trace and eBPF Profiling E2E testing. Add OFF CPU eBPF Profiling E2E Testing. Fix searchableTag as rpc.status_code and http.status_code. status_code had been removed. Fix scroll query failure exception. Add profileDataQueryBatchSize config in Elasticsearch Storage. Add APIs to query Pod log on demand. Remove OAL for events. Simplify the format index name logical in ES storage. Add instance properties extractor in MAL. Support Zipkin traces collect and zipkin traces query API. [Breaking Change] Zipkin receiver mechanism changes and traces do not stream into OAP Segment anymore.  UI  General service instance: move Thread Pool from JVM to Overview, fix JVM GC Count calculation. Add Apache ShenYu (incubating) component LOGO. Show more metrics on service/instance/endpoint list on the dashboards. Support average values of metrics on the service/list/endpoint table widgets, with pop-up linear graph. Fix viewLogs button query no data. Fix UTC when page loads. Implement the eBPF profile widget on dashboard. Optimize the trace widget. Avoid invalid query for topology metrics. Add the alarm and log tag tips. Fix spans details and task logs. Verify query params to avoid invalid queries. Mobile terminal adaptation. Fix: set dropdown for the Tab widget, init instance/endpoint relation selectors, update sankey graph. Add eBPF Profiling widget into General service, Service Mesh and Kubernetes tabs. Fix jump to endpoint-relation dashboard template. Fix set graph options. Remove the Layer filed from the Instance and Process. Fix date time picker display when set hour to 0. Implement tags auto-complete for Trace and Log. Support multiple trees for the flame graph. Fix the page doesn\u0026rsquo;t need to be re-rendered when the url changes. Remove unexpected data for exporting dashboards. Fix duration time. Remove the total field from query conditions. Fix minDuration and maxDuration for the trace filter. Add Log configuration for the browser templates. Fix query conditions for the browser logs. Add Spanish Translation. Visualize the OFF CPU eBPF profiling. Add Spanish language to UI. Sort spans with startTime or spanId in a segment. Visualize a on-demand log widget. Fix activate the correct tab index after renaming a Tabs name. FaaS dashboard support on-demand log (OpenFunction/functions-framework-go version \u0026gt; 0.3.0).  Documentation  Add eBPF agent into probe introduction.  All issues and pull requests are here\n","excerpt":"9.1.0 Project  [IMPORTANT] Remove InfluxDB 1.x and Apache IoTDB 0.X as storage options, check …","ref":"/docs/main/v9.1.0/en/changes/changes/","title":"9.1.0"},{"body":"9.1.0 Project  [IMPORTANT] Remove InfluxDB 1.x and Apache IoTDB 0.X as storage options, check details at here. Remove converter-moshi 2.5.0, influx-java 2.15, iotdb java 0.12.5, thrift 0.14.1, moshi 1.5.0, msgpack 0.8.16 dependencies. Remove InfluxDB and IoTDB relative codes and E2E tests. Upgrade OAP dependencies zipkin to 2.23.16, H2 to 2.1.212, Apache Freemarker to 2.3.31, gRPC-java 1.46.0, netty to 4.1.76. Upgrade Webapp dependencies, spring-cloud-dependencies to 2021.0.2, logback-classic to 1.2.11 [IMPORTANT] Add BanyanDB storage implementation. Notice BanyanDB is currently under active development and SHOULD NOT be used in production cluster.  OAP Server  Add component definition(ID=127) for Apache ShenYu (incubating). Fix Zipkin receiver: Decode spans error, missing Layer for V9 and wrong time bucket for generate Service and Endpoint. [Refactor] Move SQLDatabase(H2/MySQL/PostgreSQL), ElasticSearch and BanyanDB specific configurations out of column. Support BanyanDB global index for entities. Log and Segment record entities declare this new feature. Remove unnecessary analyzer settings in columns of templates. Many were added due to analyzer\u0026rsquo;s default value. Simplify the Kafka Fetch configuration in cluster mode. [Breaking Change] Update the eBPF Profiling task to the service level, please delete index/table: ebpf_profiling_task, process_traffic. Fix event can\u0026rsquo;t split service ID into 2 parts. Fix OAP Self-Observability metric GC Time calculation. Set SW_QUERY_MAX_QUERY_COMPLEXITY default value to 1000 Webapp module (for UI) enabled compression. [Breaking Change] Add layer field to event, report an event without layer is not allowed. Fix ES flush thread stops when flush schedule task throws exception, such as ElasticSearch flush failed. Fix ES BulkProcessor in BatchProcessEsDAO was initialized multiple times and created multiple ES flush schedule tasks. HTTPServer support the handler register with allowed HTTP methods. [Critical] Revert Enhance DataCarrier#MultipleChannelsConsumer to add priority to avoid consuming issues. Fix the problem that some configurations (such as group.id) did not take effect due to the override order when using the kafkaConsumerConfig property to extend the configuration in Kafka Fetcher. Remove build time from the OAP version. Add data-generator module to run OAP in testing mode, generating mock data for testing. Support receive Kubernetes processes from gRPC protocol. Fix the problem that es index(TimeSeriesTable, eg. endpoint_traffic, alarm_record) didn\u0026rsquo;t create even after rerun with init-mode. This problem caused the OAP server to fail to start when the OAP server was down for more than a day. Support autocomplete tags in traces query. [Breaking Change] Replace all configurations **_JETTY_** to **_REST_**. Add the support eBPF profiling field into the process entity. E2E: fix log test miss verify LAL and metrics. Enhance Converter mechanism in kernel level to make BanyanDB native feature more effective. Add TermsAggregation properties collect_mode and execution_hint. Add \u0026ldquo;execution_hint\u0026rdquo;: \u0026ldquo;map\u0026rdquo;, \u0026ldquo;collect_mode\u0026rdquo;: \u0026ldquo;breadth_first\u0026rdquo; for aggregation and topology query to improve 5-10x performance. Clean up scroll contexts after used. Support autocomplete tags in logs query. Enhance Deprecated MetricQuery(v1) getValues querying to asynchronous concurrency query Fix the pod match error when the service has multiple selector in kubernetes environment. VM monitoring adapts the 0.50.0 of the opentelemetry-collector. Add Envoy internal cost metrics. Remove Layer concept from ServiceInstance. Remove unnecessary onCompleted on gRPC onError callback. Remove Layer concept form Process. Update to list all eBPF profiling schedulers without duration. Storage(ElasticSearch): add search options to tolerate inexisting indices. Fix the problem that MQ has the wrong Layer type. Fix NoneStream model has wrong downsampling(was Second, should be Minute). SQL Database: provide @SQLDatabase.AdditionalEntity to support create additional tables from a model. [Breaking Change] SQL Database: remove SQL Database config maxSizeOfArrayColumn and numOfSearchableValuesPerTag. [Breaking Change] SQL Database: move Tags list from Segment,Logs,Alarms to their additional table. [Breaking Change] Remove total field in Trace, Log, Event, Browser log, and alarm list query. Support OFF_CPU eBPF Profiling. Fix SumAggregationBuilder#build should use the SumAggregation rather than MaxAggregation. Add TiDB, OpenSearch, Postgres storage optional to Trace and eBPF Profiling E2E testing. Add OFF CPU eBPF Profiling E2E Testing. Fix searchableTag as rpc.status_code and http.status_code. status_code had been removed. Fix scroll query failure exception. Add profileDataQueryBatchSize config in Elasticsearch Storage. Add APIs to query Pod log on demand. Remove OAL for events. Simplify the format index name logical in ES storage. Add instance properties extractor in MAL. Support Zipkin traces collect and zipkin traces query API. [Breaking Change] Zipkin receiver mechanism changes and traces do not stream into OAP Segment anymore.  UI  General service instance: move Thread Pool from JVM to Overview, fix JVM GC Count calculation. Add Apache ShenYu (incubating) component LOGO. Show more metrics on service/instance/endpoint list on the dashboards. Support average values of metrics on the service/list/endpoint table widgets, with pop-up linear graph. Fix viewLogs button query no data. Fix UTC when page loads. Implement the eBPF profile widget on dashboard. Optimize the trace widget. Avoid invalid query for topology metrics. Add the alarm and log tag tips. Fix spans details and task logs. Verify query params to avoid invalid queries. Mobile terminal adaptation. Fix: set dropdown for the Tab widget, init instance/endpoint relation selectors, update sankey graph. Add eBPF Profiling widget into General service, Service Mesh and Kubernetes tabs. Fix jump to endpoint-relation dashboard template. Fix set graph options. Remove the Layer filed from the Instance and Process. Fix date time picker display when set hour to 0. Implement tags auto-complete for Trace and Log. Support multiple trees for the flame graph. Fix the page doesn\u0026rsquo;t need to be re-rendered when the url changes. Remove unexpected data for exporting dashboards. Fix duration time. Remove the total field from query conditions. Fix minDuration and maxDuration for the trace filter. Add Log configuration for the browser templates. Fix query conditions for the browser logs. Add Spanish Translation. Visualize the OFF CPU eBPF profiling. Add Spanish language to UI. Sort spans with startTime or spanId in a segment. Visualize a on-demand log widget. Fix activate the correct tab index after renaming a Tabs name. FaaS dashboard support on-demand log (OpenFunction/functions-framework-go version \u0026gt; 0.3.0).  Documentation  Add eBPF agent into probe introduction.  All issues and pull requests are here\n","excerpt":"9.1.0 Project  [IMPORTANT] Remove InfluxDB 1.x and Apache IoTDB 0.X as storage options, check …","ref":"/docs/main/v9.2.0/en/changes/changes-9.1.0/","title":"9.1.0"},{"body":"9.1.0 Project  [IMPORTANT] Remove InfluxDB 1.x and Apache IoTDB 0.X as storage options, check details at here. Remove converter-moshi 2.5.0, influx-java 2.15, iotdb java 0.12.5, thrift 0.14.1, moshi 1.5.0, msgpack 0.8.16 dependencies. Remove InfluxDB and IoTDB relative codes and E2E tests. Upgrade OAP dependencies zipkin to 2.23.16, H2 to 2.1.212, Apache Freemarker to 2.3.31, gRPC-java 1.46.0, netty to 4.1.76. Upgrade Webapp dependencies, spring-cloud-dependencies to 2021.0.2, logback-classic to 1.2.11 [IMPORTANT] Add BanyanDB storage implementation. Notice BanyanDB is currently under active development and SHOULD NOT be used in production cluster.  OAP Server  Add component definition(ID=127) for Apache ShenYu (incubating). Fix Zipkin receiver: Decode spans error, missing Layer for V9 and wrong time bucket for generate Service and Endpoint. [Refactor] Move SQLDatabase(H2/MySQL/PostgreSQL), ElasticSearch and BanyanDB specific configurations out of column. Support BanyanDB global index for entities. Log and Segment record entities declare this new feature. Remove unnecessary analyzer settings in columns of templates. Many were added due to analyzer\u0026rsquo;s default value. Simplify the Kafka Fetch configuration in cluster mode. [Breaking Change] Update the eBPF Profiling task to the service level, please delete index/table: ebpf_profiling_task, process_traffic. Fix event can\u0026rsquo;t split service ID into 2 parts. Fix OAP Self-Observability metric GC Time calculation. Set SW_QUERY_MAX_QUERY_COMPLEXITY default value to 1000 Webapp module (for UI) enabled compression. [Breaking Change] Add layer field to event, report an event without layer is not allowed. Fix ES flush thread stops when flush schedule task throws exception, such as ElasticSearch flush failed. Fix ES BulkProcessor in BatchProcessEsDAO was initialized multiple times and created multiple ES flush schedule tasks. HTTPServer support the handler register with allowed HTTP methods. [Critical] Revert Enhance DataCarrier#MultipleChannelsConsumer to add priority to avoid consuming issues. Fix the problem that some configurations (such as group.id) did not take effect due to the override order when using the kafkaConsumerConfig property to extend the configuration in Kafka Fetcher. Remove build time from the OAP version. Add data-generator module to run OAP in testing mode, generating mock data for testing. Support receive Kubernetes processes from gRPC protocol. Fix the problem that es index(TimeSeriesTable, eg. endpoint_traffic, alarm_record) didn\u0026rsquo;t create even after rerun with init-mode. This problem caused the OAP server to fail to start when the OAP server was down for more than a day. Support autocomplete tags in traces query. [Breaking Change] Replace all configurations **_JETTY_** to **_REST_**. Add the support eBPF profiling field into the process entity. E2E: fix log test miss verify LAL and metrics. Enhance Converter mechanism in kernel level to make BanyanDB native feature more effective. Add TermsAggregation properties collect_mode and execution_hint. Add \u0026ldquo;execution_hint\u0026rdquo;: \u0026ldquo;map\u0026rdquo;, \u0026ldquo;collect_mode\u0026rdquo;: \u0026ldquo;breadth_first\u0026rdquo; for aggregation and topology query to improve 5-10x performance. Clean up scroll contexts after used. Support autocomplete tags in logs query. Enhance Deprecated MetricQuery(v1) getValues querying to asynchronous concurrency query Fix the pod match error when the service has multiple selector in kubernetes environment. VM monitoring adapts the 0.50.0 of the opentelemetry-collector. Add Envoy internal cost metrics. Remove Layer concept from ServiceInstance. Remove unnecessary onCompleted on gRPC onError callback. Remove Layer concept form Process. Update to list all eBPF profiling schedulers without duration. Storage(ElasticSearch): add search options to tolerate inexisting indices. Fix the problem that MQ has the wrong Layer type. Fix NoneStream model has wrong downsampling(was Second, should be Minute). SQL Database: provide @SQLDatabase.AdditionalEntity to support create additional tables from a model. [Breaking Change] SQL Database: remove SQL Database config maxSizeOfArrayColumn and numOfSearchableValuesPerTag. [Breaking Change] SQL Database: move Tags list from Segment,Logs,Alarms to their additional table. [Breaking Change] Remove total field in Trace, Log, Event, Browser log, and alarm list query. Support OFF_CPU eBPF Profiling. Fix SumAggregationBuilder#build should use the SumAggregation rather than MaxAggregation. Add TiDB, OpenSearch, Postgres storage optional to Trace and eBPF Profiling E2E testing. Add OFF CPU eBPF Profiling E2E Testing. Fix searchableTag as rpc.status_code and http.status_code. status_code had been removed. Fix scroll query failure exception. Add profileDataQueryBatchSize config in Elasticsearch Storage. Add APIs to query Pod log on demand. Remove OAL for events. Simplify the format index name logical in ES storage. Add instance properties extractor in MAL. Support Zipkin traces collect and zipkin traces query API. [Breaking Change] Zipkin receiver mechanism changes and traces do not stream into OAP Segment anymore.  UI  General service instance: move Thread Pool from JVM to Overview, fix JVM GC Count calculation. Add Apache ShenYu (incubating) component LOGO. Show more metrics on service/instance/endpoint list on the dashboards. Support average values of metrics on the service/list/endpoint table widgets, with pop-up linear graph. Fix viewLogs button query no data. Fix UTC when page loads. Implement the eBPF profile widget on dashboard. Optimize the trace widget. Avoid invalid query for topology metrics. Add the alarm and log tag tips. Fix spans details and task logs. Verify query params to avoid invalid queries. Mobile terminal adaptation. Fix: set dropdown for the Tab widget, init instance/endpoint relation selectors, update sankey graph. Add eBPF Profiling widget into General service, Service Mesh and Kubernetes tabs. Fix jump to endpoint-relation dashboard template. Fix set graph options. Remove the Layer filed from the Instance and Process. Fix date time picker display when set hour to 0. Implement tags auto-complete for Trace and Log. Support multiple trees for the flame graph. Fix the page doesn\u0026rsquo;t need to be re-rendered when the url changes. Remove unexpected data for exporting dashboards. Fix duration time. Remove the total field from query conditions. Fix minDuration and maxDuration for the trace filter. Add Log configuration for the browser templates. Fix query conditions for the browser logs. Add Spanish Translation. Visualize the OFF CPU eBPF profiling. Add Spanish language to UI. Sort spans with startTime or spanId in a segment. Visualize a on-demand log widget. Fix activate the correct tab index after renaming a Tabs name. FaaS dashboard support on-demand log (OpenFunction/functions-framework-go version \u0026gt; 0.3.0).  Documentation  Add eBPF agent into probe introduction.  All issues and pull requests are here\n","excerpt":"9.1.0 Project  [IMPORTANT] Remove InfluxDB 1.x and Apache IoTDB 0.X as storage options, check …","ref":"/docs/main/v9.3.0/en/changes/changes-9.1.0/","title":"9.1.0"},{"body":"9.1.0 Project  [IMPORTANT] Remove InfluxDB 1.x and Apache IoTDB 0.X as storage options, check details at here. Remove converter-moshi 2.5.0, influx-java 2.15, iotdb java 0.12.5, thrift 0.14.1, moshi 1.5.0, msgpack 0.8.16 dependencies. Remove InfluxDB and IoTDB relative codes and E2E tests. Upgrade OAP dependencies zipkin to 2.23.16, H2 to 2.1.212, Apache Freemarker to 2.3.31, gRPC-java 1.46.0, netty to 4.1.76. Upgrade Webapp dependencies, spring-cloud-dependencies to 2021.0.2, logback-classic to 1.2.11 [IMPORTANT] Add BanyanDB storage implementation. Notice BanyanDB is currently under active development and SHOULD NOT be used in production cluster.  OAP Server  Add component definition(ID=127) for Apache ShenYu (incubating). Fix Zipkin receiver: Decode spans error, missing Layer for V9 and wrong time bucket for generate Service and Endpoint. [Refactor] Move SQLDatabase(H2/MySQL/PostgreSQL), ElasticSearch and BanyanDB specific configurations out of column. Support BanyanDB global index for entities. Log and Segment record entities declare this new feature. Remove unnecessary analyzer settings in columns of templates. Many were added due to analyzer\u0026rsquo;s default value. Simplify the Kafka Fetch configuration in cluster mode. [Breaking Change] Update the eBPF Profiling task to the service level, please delete index/table: ebpf_profiling_task, process_traffic. Fix event can\u0026rsquo;t split service ID into 2 parts. Fix OAP Self-Observability metric GC Time calculation. Set SW_QUERY_MAX_QUERY_COMPLEXITY default value to 1000 Webapp module (for UI) enabled compression. [Breaking Change] Add layer field to event, report an event without layer is not allowed. Fix ES flush thread stops when flush schedule task throws exception, such as ElasticSearch flush failed. Fix ES BulkProcessor in BatchProcessEsDAO was initialized multiple times and created multiple ES flush schedule tasks. HTTPServer support the handler register with allowed HTTP methods. [Critical] Revert Enhance DataCarrier#MultipleChannelsConsumer to add priority to avoid consuming issues. Fix the problem that some configurations (such as group.id) did not take effect due to the override order when using the kafkaConsumerConfig property to extend the configuration in Kafka Fetcher. Remove build time from the OAP version. Add data-generator module to run OAP in testing mode, generating mock data for testing. Support receive Kubernetes processes from gRPC protocol. Fix the problem that es index(TimeSeriesTable, eg. endpoint_traffic, alarm_record) didn\u0026rsquo;t create even after rerun with init-mode. This problem caused the OAP server to fail to start when the OAP server was down for more than a day. Support autocomplete tags in traces query. [Breaking Change] Replace all configurations **_JETTY_** to **_REST_**. Add the support eBPF profiling field into the process entity. E2E: fix log test miss verify LAL and metrics. Enhance Converter mechanism in kernel level to make BanyanDB native feature more effective. Add TermsAggregation properties collect_mode and execution_hint. Add \u0026ldquo;execution_hint\u0026rdquo;: \u0026ldquo;map\u0026rdquo;, \u0026ldquo;collect_mode\u0026rdquo;: \u0026ldquo;breadth_first\u0026rdquo; for aggregation and topology query to improve 5-10x performance. Clean up scroll contexts after used. Support autocomplete tags in logs query. Enhance Deprecated MetricQuery(v1) getValues querying to asynchronous concurrency query Fix the pod match error when the service has multiple selector in kubernetes environment. VM monitoring adapts the 0.50.0 of the opentelemetry-collector. Add Envoy internal cost metrics. Remove Layer concept from ServiceInstance. Remove unnecessary onCompleted on gRPC onError callback. Remove Layer concept form Process. Update to list all eBPF profiling schedulers without duration. Storage(ElasticSearch): add search options to tolerate inexisting indices. Fix the problem that MQ has the wrong Layer type. Fix NoneStream model has wrong downsampling(was Second, should be Minute). SQL Database: provide @SQLDatabase.AdditionalEntity to support create additional tables from a model. [Breaking Change] SQL Database: remove SQL Database config maxSizeOfArrayColumn and numOfSearchableValuesPerTag. [Breaking Change] SQL Database: move Tags list from Segment,Logs,Alarms to their additional table. [Breaking Change] Remove total field in Trace, Log, Event, Browser log, and alarm list query. Support OFF_CPU eBPF Profiling. Fix SumAggregationBuilder#build should use the SumAggregation rather than MaxAggregation. Add TiDB, OpenSearch, Postgres storage optional to Trace and eBPF Profiling E2E testing. Add OFF CPU eBPF Profiling E2E Testing. Fix searchableTag as rpc.status_code and http.status_code. status_code had been removed. Fix scroll query failure exception. Add profileDataQueryBatchSize config in Elasticsearch Storage. Add APIs to query Pod log on demand. Remove OAL for events. Simplify the format index name logical in ES storage. Add instance properties extractor in MAL. Support Zipkin traces collect and zipkin traces query API. [Breaking Change] Zipkin receiver mechanism changes and traces do not stream into OAP Segment anymore.  UI  General service instance: move Thread Pool from JVM to Overview, fix JVM GC Count calculation. Add Apache ShenYu (incubating) component LOGO. Show more metrics on service/instance/endpoint list on the dashboards. Support average values of metrics on the service/list/endpoint table widgets, with pop-up linear graph. Fix viewLogs button query no data. Fix UTC when page loads. Implement the eBPF profile widget on dashboard. Optimize the trace widget. Avoid invalid query for topology metrics. Add the alarm and log tag tips. Fix spans details and task logs. Verify query params to avoid invalid queries. Mobile terminal adaptation. Fix: set dropdown for the Tab widget, init instance/endpoint relation selectors, update sankey graph. Add eBPF Profiling widget into General service, Service Mesh and Kubernetes tabs. Fix jump to endpoint-relation dashboard template. Fix set graph options. Remove the Layer filed from the Instance and Process. Fix date time picker display when set hour to 0. Implement tags auto-complete for Trace and Log. Support multiple trees for the flame graph. Fix the page doesn\u0026rsquo;t need to be re-rendered when the url changes. Remove unexpected data for exporting dashboards. Fix duration time. Remove the total field from query conditions. Fix minDuration and maxDuration for the trace filter. Add Log configuration for the browser templates. Fix query conditions for the browser logs. Add Spanish Translation. Visualize the OFF CPU eBPF profiling. Add Spanish language to UI. Sort spans with startTime or spanId in a segment. Visualize a on-demand log widget. Fix activate the correct tab index after renaming a Tabs name. FaaS dashboard support on-demand log (OpenFunction/functions-framework-go version \u0026gt; 0.3.0).  Documentation  Add eBPF agent into probe introduction.  All issues and pull requests are here\n","excerpt":"9.1.0 Project  [IMPORTANT] Remove InfluxDB 1.x and Apache IoTDB 0.X as storage options, check …","ref":"/docs/main/v9.4.0/en/changes/changes-9.1.0/","title":"9.1.0"},{"body":"9.1.0 Project  [IMPORTANT] Remove InfluxDB 1.x and Apache IoTDB 0.X as storage options, check details at here. Remove converter-moshi 2.5.0, influx-java 2.15, iotdb java 0.12.5, thrift 0.14.1, moshi 1.5.0, msgpack 0.8.16 dependencies. Remove InfluxDB and IoTDB relative codes and E2E tests. Upgrade OAP dependencies zipkin to 2.23.16, H2 to 2.1.212, Apache Freemarker to 2.3.31, gRPC-java 1.46.0, netty to 4.1.76. Upgrade Webapp dependencies, spring-cloud-dependencies to 2021.0.2, logback-classic to 1.2.11 [IMPORTANT] Add BanyanDB storage implementation. Notice BanyanDB is currently under active development and SHOULD NOT be used in production cluster.  OAP Server  Add component definition(ID=127) for Apache ShenYu (incubating). Fix Zipkin receiver: Decode spans error, missing Layer for V9 and wrong time bucket for generate Service and Endpoint. [Refactor] Move SQLDatabase(H2/MySQL/PostgreSQL), ElasticSearch and BanyanDB specific configurations out of column. Support BanyanDB global index for entities. Log and Segment record entities declare this new feature. Remove unnecessary analyzer settings in columns of templates. Many were added due to analyzer\u0026rsquo;s default value. Simplify the Kafka Fetch configuration in cluster mode. [Breaking Change] Update the eBPF Profiling task to the service level, please delete index/table: ebpf_profiling_task, process_traffic. Fix event can\u0026rsquo;t split service ID into 2 parts. Fix OAP Self-Observability metric GC Time calculation. Set SW_QUERY_MAX_QUERY_COMPLEXITY default value to 1000 Webapp module (for UI) enabled compression. [Breaking Change] Add layer field to event, report an event without layer is not allowed. Fix ES flush thread stops when flush schedule task throws exception, such as ElasticSearch flush failed. Fix ES BulkProcessor in BatchProcessEsDAO was initialized multiple times and created multiple ES flush schedule tasks. HTTPServer support the handler register with allowed HTTP methods. [Critical] Revert Enhance DataCarrier#MultipleChannelsConsumer to add priority to avoid consuming issues. Fix the problem that some configurations (such as group.id) did not take effect due to the override order when using the kafkaConsumerConfig property to extend the configuration in Kafka Fetcher. Remove build time from the OAP version. Add data-generator module to run OAP in testing mode, generating mock data for testing. Support receive Kubernetes processes from gRPC protocol. Fix the problem that es index(TimeSeriesTable, eg. endpoint_traffic, alarm_record) didn\u0026rsquo;t create even after rerun with init-mode. This problem caused the OAP server to fail to start when the OAP server was down for more than a day. Support autocomplete tags in traces query. [Breaking Change] Replace all configurations **_JETTY_** to **_REST_**. Add the support eBPF profiling field into the process entity. E2E: fix log test miss verify LAL and metrics. Enhance Converter mechanism in kernel level to make BanyanDB native feature more effective. Add TermsAggregation properties collect_mode and execution_hint. Add \u0026ldquo;execution_hint\u0026rdquo;: \u0026ldquo;map\u0026rdquo;, \u0026ldquo;collect_mode\u0026rdquo;: \u0026ldquo;breadth_first\u0026rdquo; for aggregation and topology query to improve 5-10x performance. Clean up scroll contexts after used. Support autocomplete tags in logs query. Enhance Deprecated MetricQuery(v1) getValues querying to asynchronous concurrency query Fix the pod match error when the service has multiple selector in kubernetes environment. VM monitoring adapts the 0.50.0 of the opentelemetry-collector. Add Envoy internal cost metrics. Remove Layer concept from ServiceInstance. Remove unnecessary onCompleted on gRPC onError callback. Remove Layer concept form Process. Update to list all eBPF profiling schedulers without duration. Storage(ElasticSearch): add search options to tolerate inexisting indices. Fix the problem that MQ has the wrong Layer type. Fix NoneStream model has wrong downsampling(was Second, should be Minute). SQL Database: provide @SQLDatabase.AdditionalEntity to support create additional tables from a model. [Breaking Change] SQL Database: remove SQL Database config maxSizeOfArrayColumn and numOfSearchableValuesPerTag. [Breaking Change] SQL Database: move Tags list from Segment,Logs,Alarms to their additional table. [Breaking Change] Remove total field in Trace, Log, Event, Browser log, and alarm list query. Support OFF_CPU eBPF Profiling. Fix SumAggregationBuilder#build should use the SumAggregation rather than MaxAggregation. Add TiDB, OpenSearch, Postgres storage optional to Trace and eBPF Profiling E2E testing. Add OFF CPU eBPF Profiling E2E Testing. Fix searchableTag as rpc.status_code and http.status_code. status_code had been removed. Fix scroll query failure exception. Add profileDataQueryBatchSize config in Elasticsearch Storage. Add APIs to query Pod log on demand. Remove OAL for events. Simplify the format index name logical in ES storage. Add instance properties extractor in MAL. Support Zipkin traces collect and zipkin traces query API. [Breaking Change] Zipkin receiver mechanism changes and traces do not stream into OAP Segment anymore.  UI  General service instance: move Thread Pool from JVM to Overview, fix JVM GC Count calculation. Add Apache ShenYu (incubating) component LOGO. Show more metrics on service/instance/endpoint list on the dashboards. Support average values of metrics on the service/list/endpoint table widgets, with pop-up linear graph. Fix viewLogs button query no data. Fix UTC when page loads. Implement the eBPF profile widget on dashboard. Optimize the trace widget. Avoid invalid query for topology metrics. Add the alarm and log tag tips. Fix spans details and task logs. Verify query params to avoid invalid queries. Mobile terminal adaptation. Fix: set dropdown for the Tab widget, init instance/endpoint relation selectors, update sankey graph. Add eBPF Profiling widget into General service, Service Mesh and Kubernetes tabs. Fix jump to endpoint-relation dashboard template. Fix set graph options. Remove the Layer filed from the Instance and Process. Fix date time picker display when set hour to 0. Implement tags auto-complete for Trace and Log. Support multiple trees for the flame graph. Fix the page doesn\u0026rsquo;t need to be re-rendered when the url changes. Remove unexpected data for exporting dashboards. Fix duration time. Remove the total field from query conditions. Fix minDuration and maxDuration for the trace filter. Add Log configuration for the browser templates. Fix query conditions for the browser logs. Add Spanish Translation. Visualize the OFF CPU eBPF profiling. Add Spanish language to UI. Sort spans with startTime or spanId in a segment. Visualize a on-demand log widget. Fix activate the correct tab index after renaming a Tabs name. FaaS dashboard support on-demand log (OpenFunction/functions-framework-go version \u0026gt; 0.3.0).  Documentation  Add eBPF agent into probe introduction.  All issues and pull requests are here\n","excerpt":"9.1.0 Project  [IMPORTANT] Remove InfluxDB 1.x and Apache IoTDB 0.X as storage options, check …","ref":"/docs/main/v9.5.0/en/changes/changes-9.1.0/","title":"9.1.0"},{"body":"9.1.0 Project  [IMPORTANT] Remove InfluxDB 1.x and Apache IoTDB 0.X as storage options, check details at here. Remove converter-moshi 2.5.0, influx-java 2.15, iotdb java 0.12.5, thrift 0.14.1, moshi 1.5.0, msgpack 0.8.16 dependencies. Remove InfluxDB and IoTDB relative codes and E2E tests. Upgrade OAP dependencies zipkin to 2.23.16, H2 to 2.1.212, Apache Freemarker to 2.3.31, gRPC-java 1.46.0, netty to 4.1.76. Upgrade Webapp dependencies, spring-cloud-dependencies to 2021.0.2, logback-classic to 1.2.11 [IMPORTANT] Add BanyanDB storage implementation. Notice BanyanDB is currently under active development and SHOULD NOT be used in production cluster.  OAP Server  Add component definition(ID=127) for Apache ShenYu (incubating). Fix Zipkin receiver: Decode spans error, missing Layer for V9 and wrong time bucket for generate Service and Endpoint. [Refactor] Move SQLDatabase(H2/MySQL/PostgreSQL), ElasticSearch and BanyanDB specific configurations out of column. Support BanyanDB global index for entities. Log and Segment record entities declare this new feature. Remove unnecessary analyzer settings in columns of templates. Many were added due to analyzer\u0026rsquo;s default value. Simplify the Kafka Fetch configuration in cluster mode. [Breaking Change] Update the eBPF Profiling task to the service level, please delete index/table: ebpf_profiling_task, process_traffic. Fix event can\u0026rsquo;t split service ID into 2 parts. Fix OAP Self-Observability metric GC Time calculation. Set SW_QUERY_MAX_QUERY_COMPLEXITY default value to 1000 Webapp module (for UI) enabled compression. [Breaking Change] Add layer field to event, report an event without layer is not allowed. Fix ES flush thread stops when flush schedule task throws exception, such as ElasticSearch flush failed. Fix ES BulkProcessor in BatchProcessEsDAO was initialized multiple times and created multiple ES flush schedule tasks. HTTPServer support the handler register with allowed HTTP methods. [Critical] Revert Enhance DataCarrier#MultipleChannelsConsumer to add priority to avoid consuming issues. Fix the problem that some configurations (such as group.id) did not take effect due to the override order when using the kafkaConsumerConfig property to extend the configuration in Kafka Fetcher. Remove build time from the OAP version. Add data-generator module to run OAP in testing mode, generating mock data for testing. Support receive Kubernetes processes from gRPC protocol. Fix the problem that es index(TimeSeriesTable, eg. endpoint_traffic, alarm_record) didn\u0026rsquo;t create even after rerun with init-mode. This problem caused the OAP server to fail to start when the OAP server was down for more than a day. Support autocomplete tags in traces query. [Breaking Change] Replace all configurations **_JETTY_** to **_REST_**. Add the support eBPF profiling field into the process entity. E2E: fix log test miss verify LAL and metrics. Enhance Converter mechanism in kernel level to make BanyanDB native feature more effective. Add TermsAggregation properties collect_mode and execution_hint. Add \u0026ldquo;execution_hint\u0026rdquo;: \u0026ldquo;map\u0026rdquo;, \u0026ldquo;collect_mode\u0026rdquo;: \u0026ldquo;breadth_first\u0026rdquo; for aggregation and topology query to improve 5-10x performance. Clean up scroll contexts after used. Support autocomplete tags in logs query. Enhance Deprecated MetricQuery(v1) getValues querying to asynchronous concurrency query Fix the pod match error when the service has multiple selector in kubernetes environment. VM monitoring adapts the 0.50.0 of the opentelemetry-collector. Add Envoy internal cost metrics. Remove Layer concept from ServiceInstance. Remove unnecessary onCompleted on gRPC onError callback. Remove Layer concept form Process. Update to list all eBPF profiling schedulers without duration. Storage(ElasticSearch): add search options to tolerate inexisting indices. Fix the problem that MQ has the wrong Layer type. Fix NoneStream model has wrong downsampling(was Second, should be Minute). SQL Database: provide @SQLDatabase.AdditionalEntity to support create additional tables from a model. [Breaking Change] SQL Database: remove SQL Database config maxSizeOfArrayColumn and numOfSearchableValuesPerTag. [Breaking Change] SQL Database: move Tags list from Segment,Logs,Alarms to their additional table. [Breaking Change] Remove total field in Trace, Log, Event, Browser log, and alarm list query. Support OFF_CPU eBPF Profiling. Fix SumAggregationBuilder#build should use the SumAggregation rather than MaxAggregation. Add TiDB, OpenSearch, Postgres storage optional to Trace and eBPF Profiling E2E testing. Add OFF CPU eBPF Profiling E2E Testing. Fix searchableTag as rpc.status_code and http.status_code. status_code had been removed. Fix scroll query failure exception. Add profileDataQueryBatchSize config in Elasticsearch Storage. Add APIs to query Pod log on demand. Remove OAL for events. Simplify the format index name logical in ES storage. Add instance properties extractor in MAL. Support Zipkin traces collect and zipkin traces query API. [Breaking Change] Zipkin receiver mechanism changes and traces do not stream into OAP Segment anymore.  UI  General service instance: move Thread Pool from JVM to Overview, fix JVM GC Count calculation. Add Apache ShenYu (incubating) component LOGO. Show more metrics on service/instance/endpoint list on the dashboards. Support average values of metrics on the service/list/endpoint table widgets, with pop-up linear graph. Fix viewLogs button query no data. Fix UTC when page loads. Implement the eBPF profile widget on dashboard. Optimize the trace widget. Avoid invalid query for topology metrics. Add the alarm and log tag tips. Fix spans details and task logs. Verify query params to avoid invalid queries. Mobile terminal adaptation. Fix: set dropdown for the Tab widget, init instance/endpoint relation selectors, update sankey graph. Add eBPF Profiling widget into General service, Service Mesh and Kubernetes tabs. Fix jump to endpoint-relation dashboard template. Fix set graph options. Remove the Layer filed from the Instance and Process. Fix date time picker display when set hour to 0. Implement tags auto-complete for Trace and Log. Support multiple trees for the flame graph. Fix the page doesn\u0026rsquo;t need to be re-rendered when the url changes. Remove unexpected data for exporting dashboards. Fix duration time. Remove the total field from query conditions. Fix minDuration and maxDuration for the trace filter. Add Log configuration for the browser templates. Fix query conditions for the browser logs. Add Spanish Translation. Visualize the OFF CPU eBPF profiling. Add Spanish language to UI. Sort spans with startTime or spanId in a segment. Visualize a on-demand log widget. Fix activate the correct tab index after renaming a Tabs name. FaaS dashboard support on-demand log (OpenFunction/functions-framework-go version \u0026gt; 0.3.0).  Documentation  Add eBPF agent into probe introduction.  All issues and pull requests are here\n","excerpt":"9.1.0 Project  [IMPORTANT] Remove InfluxDB 1.x and Apache IoTDB 0.X as storage options, check …","ref":"/docs/main/v9.6.0/en/changes/changes-9.1.0/","title":"9.1.0"},{"body":"9.1.0 Project  [IMPORTANT] Remove InfluxDB 1.x and Apache IoTDB 0.X as storage options, check details at here. Remove converter-moshi 2.5.0, influx-java 2.15, iotdb java 0.12.5, thrift 0.14.1, moshi 1.5.0, msgpack 0.8.16 dependencies. Remove InfluxDB and IoTDB relative codes and E2E tests. Upgrade OAP dependencies zipkin to 2.23.16, H2 to 2.1.212, Apache Freemarker to 2.3.31, gRPC-java 1.46.0, netty to 4.1.76. Upgrade Webapp dependencies, spring-cloud-dependencies to 2021.0.2, logback-classic to 1.2.11 [IMPORTANT] Add BanyanDB storage implementation. Notice BanyanDB is currently under active development and SHOULD NOT be used in production cluster.  OAP Server  Add component definition(ID=127) for Apache ShenYu (incubating). Fix Zipkin receiver: Decode spans error, missing Layer for V9 and wrong time bucket for generate Service and Endpoint. [Refactor] Move SQLDatabase(H2/MySQL/PostgreSQL), ElasticSearch and BanyanDB specific configurations out of column. Support BanyanDB global index for entities. Log and Segment record entities declare this new feature. Remove unnecessary analyzer settings in columns of templates. Many were added due to analyzer\u0026rsquo;s default value. Simplify the Kafka Fetch configuration in cluster mode. [Breaking Change] Update the eBPF Profiling task to the service level, please delete index/table: ebpf_profiling_task, process_traffic. Fix event can\u0026rsquo;t split service ID into 2 parts. Fix OAP Self-Observability metric GC Time calculation. Set SW_QUERY_MAX_QUERY_COMPLEXITY default value to 1000 Webapp module (for UI) enabled compression. [Breaking Change] Add layer field to event, report an event without layer is not allowed. Fix ES flush thread stops when flush schedule task throws exception, such as ElasticSearch flush failed. Fix ES BulkProcessor in BatchProcessEsDAO was initialized multiple times and created multiple ES flush schedule tasks. HTTPServer support the handler register with allowed HTTP methods. [Critical] Revert Enhance DataCarrier#MultipleChannelsConsumer to add priority to avoid consuming issues. Fix the problem that some configurations (such as group.id) did not take effect due to the override order when using the kafkaConsumerConfig property to extend the configuration in Kafka Fetcher. Remove build time from the OAP version. Add data-generator module to run OAP in testing mode, generating mock data for testing. Support receive Kubernetes processes from gRPC protocol. Fix the problem that es index(TimeSeriesTable, eg. endpoint_traffic, alarm_record) didn\u0026rsquo;t create even after rerun with init-mode. This problem caused the OAP server to fail to start when the OAP server was down for more than a day. Support autocomplete tags in traces query. [Breaking Change] Replace all configurations **_JETTY_** to **_REST_**. Add the support eBPF profiling field into the process entity. E2E: fix log test miss verify LAL and metrics. Enhance Converter mechanism in kernel level to make BanyanDB native feature more effective. Add TermsAggregation properties collect_mode and execution_hint. Add \u0026ldquo;execution_hint\u0026rdquo;: \u0026ldquo;map\u0026rdquo;, \u0026ldquo;collect_mode\u0026rdquo;: \u0026ldquo;breadth_first\u0026rdquo; for aggregation and topology query to improve 5-10x performance. Clean up scroll contexts after used. Support autocomplete tags in logs query. Enhance Deprecated MetricQuery(v1) getValues querying to asynchronous concurrency query Fix the pod match error when the service has multiple selector in kubernetes environment. VM monitoring adapts the 0.50.0 of the opentelemetry-collector. Add Envoy internal cost metrics. Remove Layer concept from ServiceInstance. Remove unnecessary onCompleted on gRPC onError callback. Remove Layer concept form Process. Update to list all eBPF profiling schedulers without duration. Storage(ElasticSearch): add search options to tolerate inexisting indices. Fix the problem that MQ has the wrong Layer type. Fix NoneStream model has wrong downsampling(was Second, should be Minute). SQL Database: provide @SQLDatabase.AdditionalEntity to support create additional tables from a model. [Breaking Change] SQL Database: remove SQL Database config maxSizeOfArrayColumn and numOfSearchableValuesPerTag. [Breaking Change] SQL Database: move Tags list from Segment,Logs,Alarms to their additional table. [Breaking Change] Remove total field in Trace, Log, Event, Browser log, and alarm list query. Support OFF_CPU eBPF Profiling. Fix SumAggregationBuilder#build should use the SumAggregation rather than MaxAggregation. Add TiDB, OpenSearch, Postgres storage optional to Trace and eBPF Profiling E2E testing. Add OFF CPU eBPF Profiling E2E Testing. Fix searchableTag as rpc.status_code and http.status_code. status_code had been removed. Fix scroll query failure exception. Add profileDataQueryBatchSize config in Elasticsearch Storage. Add APIs to query Pod log on demand. Remove OAL for events. Simplify the format index name logical in ES storage. Add instance properties extractor in MAL. Support Zipkin traces collect and zipkin traces query API. [Breaking Change] Zipkin receiver mechanism changes and traces do not stream into OAP Segment anymore.  UI  General service instance: move Thread Pool from JVM to Overview, fix JVM GC Count calculation. Add Apache ShenYu (incubating) component LOGO. Show more metrics on service/instance/endpoint list on the dashboards. Support average values of metrics on the service/list/endpoint table widgets, with pop-up linear graph. Fix viewLogs button query no data. Fix UTC when page loads. Implement the eBPF profile widget on dashboard. Optimize the trace widget. Avoid invalid query for topology metrics. Add the alarm and log tag tips. Fix spans details and task logs. Verify query params to avoid invalid queries. Mobile terminal adaptation. Fix: set dropdown for the Tab widget, init instance/endpoint relation selectors, update sankey graph. Add eBPF Profiling widget into General service, Service Mesh and Kubernetes tabs. Fix jump to endpoint-relation dashboard template. Fix set graph options. Remove the Layer filed from the Instance and Process. Fix date time picker display when set hour to 0. Implement tags auto-complete for Trace and Log. Support multiple trees for the flame graph. Fix the page doesn\u0026rsquo;t need to be re-rendered when the url changes. Remove unexpected data for exporting dashboards. Fix duration time. Remove the total field from query conditions. Fix minDuration and maxDuration for the trace filter. Add Log configuration for the browser templates. Fix query conditions for the browser logs. Add Spanish Translation. Visualize the OFF CPU eBPF profiling. Add Spanish language to UI. Sort spans with startTime or spanId in a segment. Visualize a on-demand log widget. Fix activate the correct tab index after renaming a Tabs name. FaaS dashboard support on-demand log (OpenFunction/functions-framework-go version \u0026gt; 0.3.0).  Documentation  Add eBPF agent into probe introduction.  All issues and pull requests are here\n","excerpt":"9.1.0 Project  [IMPORTANT] Remove InfluxDB 1.x and Apache IoTDB 0.X as storage options, check …","ref":"/docs/main/v9.7.0/en/changes/changes-9.1.0/","title":"9.1.0"},{"body":"9.2.0 Project  [Critical] Fix a low performance issue of metrics persistent in the ElasticSearch storage implementation. One single metric could have to wait for an unnecessary 7~10s(System Env Variable SW_STORAGE_ES_FLUSH_INTERVAL) since 8.8.0 - 9.1.0 releases. Upgrade Armeria to 1.16.0, Kubernetes Java client to 15.0.1.  OAP Server  Add more entities for Zipkin to improve performance. ElasticSearch: scroll id should be updated when scrolling as it may change. Mesh: fix only last rule works when multiple rules are defined in metadata-service-mapping.yaml. Support sending alarm messages to PagerDuty. Support Zipkin kafka collector. Add VIRTUAL detect type to Process for Network Profiling. Add component ID(128) for Java Hutool plugin. Add Zipkin query exception handler, response error message for illegal arguments. Fix a NullPointerException in the endpoint analysis, which would cause missing MQ-related LocalSpan in the trace. Add forEach, processRelation function to MAL expression. Add expPrefix, initExp in MAL config. Add component ID(7015) for Python Bottle plugin. Remove legacy OAL percentile functions, p99, p95, p90, p75, p50 func(s). Revert #8066. Keep all metrics persistent even it is default value. Skip loading UI templates if folder is empty or doesn\u0026rsquo;t exist. Optimize ElasticSearch query performance by using _mGet and physical index name rather than alias in these scenarios, (a) Metrics aggregation (b) Zipkin query (c) Metrics query (d) Log query Support the NETWORK type of eBPF Profiling task. Support sumHistogram in MAL. [Breaking Change] Make the eBPF Profiling task support to the service instance level, index/table ebpf_profiling_task is required to be re-created when bump up from previous releases. Fix race condition in Banyandb storage Support SUM_PER_MIN downsampling in MAL. Support sumHistogramPercentile in MAL. Add VIRTUAL_CACHE to Layer, to fix conjectured Redis server, which icon can\u0026rsquo;t show on the topology. [Breaking Change] Elasticsearch storage merge all metrics/meter and records(without super datasets) indices into one physical index template metrics-all and records-all on the default setting. Provide system environment variable(SW_STORAGE_ES_LOGIC_SHARDING) to shard metrics/meter indices into multi-physical indices as the previous versions(one index template per metric/meter aggregation function). In the current one index mode, users still could choose to adjust ElasticSearch\u0026rsquo;s shard number(SW_STORAGE_ES_INDEX_SHARDS_NUMBER) to scale out. More details please refer to New ElasticSearch storage option explanation in 9.2.0 and backend-storage.md [Breaking Change] Index/table ebpf_profiling_schedule added a new column ebpf_profiling_schedule_id, the H2/Mysql/Tidb/Postgres storage users are required to re-created it when bump up from previous releases. Fix Zipkin trace query the max size of spans. Add tls and https component IDs for Network Profiling. Support Elasticsearch column alias for the compatibility between storage logicSharding model and no-logicSharding model. Support MySQL monitoring. Support PostgreSQL monitoring. Fix query services by serviceId error when Elasticsearch storage SW_STORAGE_ES_QUERY_MAX_SIZE \u0026gt; 10000. Support sending alarm messages to Discord. Fix query history process data failure. Optimize TTL mechanism for Elasticsearch storage, skip executed indices in one TTL rotation. Add Kubernetes support module to share codes between modules and reduce calls to Kubernetes API server. Bump up Kubernetes Java client to fix cve. Adapt OpenTelemetry native metrics protocol. [Breaking Change] rename configuration folder from otel-oc-rules to otel-rules. [Breaking Change] rename configuration field from enabledOcRules to enabledOtelRules and environment variable name from SW_OTEL_RECEIVER_ENABLED_OC_RULES to SW_OTEL_RECEIVER_ENABLED_OTEL_RULES. [Breaking Change] Fix JDBC TTL to delete additional tables data. SQL Database requires removing segment,segment_tag, logs, logs_tag, alarms, alarms_tag, zipkin_span, zipkin_query before OAP starts. SQL Database: add @SQLDatabase.ExtraColumn4AdditionalEntity to support add an extra column from parent to an additional table. Add component ID(131) for Java Micronaut plugin Add component ID(132) for Nats java client plugin  UI  Fix query conditions for the browser logs. Implement a url parameter to activate tab index. Fix clear interval fail when switch autoRefresh to off. Optimize log tables. Fix log detail pop-up page doesn\u0026rsquo;t work. Optimize table widget to hide the whole metric column when no metric is set. Implement the Event widget. Remove event menu. Fix span detail text overlap. Add Python Bottle Plugin Logo. Implement an association between widgets(line, bar, area graphs) with time. Fix tag dropdown style. Hide the copy button when db.statement is empty. Fix legend metrics for topology. Dashboard: Add metrics association. Dashboard: Fix FaaS-Root document link and topology service relation dashboard link. Dashboard: Fix Mesh-Instance metric Throughput. Dashboard: Fix Mesh-Service-Relation metric Throughput and Proxy Sidecar Internal Latency in Nanoseconds (Client Response). Dashboard: Fix Mesh-Instance-Relation metric Throughput. Enhance associations for the Event widget. Add event widgets in dashboard where applicable. Fix dashboard list search box not work. Fix short time range. Fix event widget incompatibility in Safari. Refactor the tags component to support searching for tag keys and values. Implement the log widget and the trace widget associate with each other, remove log tables on the trace widget. Add log widget to general service root. Associate the event widget with the trace and log widget. Add the MYSQL layer and update layer routers. Fix query order for trace list. Add a calculation to convert seconds to days. q* Add Spring Sleuth dashboard to general service instance. Support the process dashboard and create the time range text widget. Fix picking calendar with a wrong time range and setting a unique value for dashboard grid key. Add PostgreSQL to Database sub-menu. Implement the network profiling widget. Add Micronaut icon for Java plugin. Add Nats icon for Java plugin. Bump moment and @vue/cli-plugin-e2e-cypress. Add Network Profiling for Service Mesh DP instance and K8s pod panels.  Documentation  Fix invalid links in release docs. Clean up doc about event metrics. Add a table for metric calculations in the ui doc. Add an explanation for alerting kernel and its in-memory window mechanism. Add more docs for widget details. Update alarm doc introduce configuration property key Fix dependency license\u0026rsquo;s NOTICE and binary jar included issues in the source release. Add eBPF CPU profiling doc.  All issues and pull requests are here\n","excerpt":"9.2.0 Project  [Critical] Fix a low performance issue of metrics persistent in the ElasticSearch …","ref":"/docs/main/latest/en/changes/changes-9.2.0/","title":"9.2.0"},{"body":"9.2.0 Project  [Critical] Fix a low performance issue of metrics persistent in the ElasticSearch storage implementation. One single metric could have to wait for an unnecessary 7~10s(System Env Variable SW_STORAGE_ES_FLUSH_INTERVAL) since 8.8.0 - 9.1.0 releases. Upgrade Armeria to 1.16.0, Kubernetes Java client to 15.0.1.  OAP Server  Add more entities for Zipkin to improve performance. ElasticSearch: scroll id should be updated when scrolling as it may change. Mesh: fix only last rule works when multiple rules are defined in metadata-service-mapping.yaml. Support sending alarm messages to PagerDuty. Support Zipkin kafka collector. Add VIRTUAL detect type to Process for Network Profiling. Add component ID(128) for Java Hutool plugin. Add Zipkin query exception handler, response error message for illegal arguments. Fix a NullPointerException in the endpoint analysis, which would cause missing MQ-related LocalSpan in the trace. Add forEach, processRelation function to MAL expression. Add expPrefix, initExp in MAL config. Add component ID(7015) for Python Bottle plugin. Remove legacy OAL percentile functions, p99, p95, p90, p75, p50 func(s). Revert #8066. Keep all metrics persistent even it is default value. Skip loading UI templates if folder is empty or doesn\u0026rsquo;t exist. Optimize ElasticSearch query performance by using _mGet and physical index name rather than alias in these scenarios, (a) Metrics aggregation (b) Zipkin query (c) Metrics query (d) Log query Support the NETWORK type of eBPF Profiling task. Support sumHistogram in MAL. [Breaking Change] Make the eBPF Profiling task support to the service instance level, index/table ebpf_profiling_task is required to be re-created when bump up from previous releases. Fix race condition in Banyandb storage Support SUM_PER_MIN downsampling in MAL. Support sumHistogramPercentile in MAL. Add VIRTUAL_CACHE to Layer, to fix conjectured Redis server, which icon can\u0026rsquo;t show on the topology. [Breaking Change] Elasticsearch storage merge all metrics/meter and records(without super datasets) indices into one physical index template metrics-all and records-all on the default setting. Provide system environment variable(SW_STORAGE_ES_LOGIC_SHARDING) to shard metrics/meter indices into multi-physical indices as the previous versions(one index template per metric/meter aggregation function). In the current one index mode, users still could choose to adjust ElasticSearch\u0026rsquo;s shard number(SW_STORAGE_ES_INDEX_SHARDS_NUMBER) to scale out. More details please refer to New ElasticSearch storage option explanation in 9.2.0 and backend-storage.md [Breaking Change] Index/table ebpf_profiling_schedule added a new column ebpf_profiling_schedule_id, the H2/Mysql/Tidb/Postgres storage users are required to re-created it when bump up from previous releases. Fix Zipkin trace query the max size of spans. Add tls and https component IDs for Network Profiling. Support Elasticsearch column alias for the compatibility between storage logicSharding model and no-logicSharding model. Support MySQL monitoring. Support PostgreSQL monitoring. Fix query services by serviceId error when Elasticsearch storage SW_STORAGE_ES_QUERY_MAX_SIZE \u0026gt; 10000. Support sending alarm messages to Discord. Fix query history process data failure. Optimize TTL mechanism for Elasticsearch storage, skip executed indices in one TTL rotation. Add Kubernetes support module to share codes between modules and reduce calls to Kubernetes API server. Bump up Kubernetes Java client to fix cve. Adapt OpenTelemetry native metrics protocol. [Breaking Change] rename configuration folder from otel-oc-rules to otel-rules. [Breaking Change] rename configuration field from enabledOcRules to enabledOtelRules and environment variable name from SW_OTEL_RECEIVER_ENABLED_OC_RULES to SW_OTEL_RECEIVER_ENABLED_OTEL_RULES. [Breaking Change] Fix JDBC TTL to delete additional tables data. SQL Database requires removing segment,segment_tag, logs, logs_tag, alarms, alarms_tag, zipkin_span, zipkin_query before OAP starts. SQL Database: add @SQLDatabase.ExtraColumn4AdditionalEntity to support add an extra column from parent to an additional table. Add component ID(131) for Java Micronaut plugin Add component ID(132) for Nats java client plugin  UI  Fix query conditions for the browser logs. Implement a url parameter to activate tab index. Fix clear interval fail when switch autoRefresh to off. Optimize log tables. Fix log detail pop-up page doesn\u0026rsquo;t work. Optimize table widget to hide the whole metric column when no metric is set. Implement the Event widget. Remove event menu. Fix span detail text overlap. Add Python Bottle Plugin Logo. Implement an association between widgets(line, bar, area graphs) with time. Fix tag dropdown style. Hide the copy button when db.statement is empty. Fix legend metrics for topology. Dashboard: Add metrics association. Dashboard: Fix FaaS-Root document link and topology service relation dashboard link. Dashboard: Fix Mesh-Instance metric Throughput. Dashboard: Fix Mesh-Service-Relation metric Throughput and Proxy Sidecar Internal Latency in Nanoseconds (Client Response). Dashboard: Fix Mesh-Instance-Relation metric Throughput. Enhance associations for the Event widget. Add event widgets in dashboard where applicable. Fix dashboard list search box not work. Fix short time range. Fix event widget incompatibility in Safari. Refactor the tags component to support searching for tag keys and values. Implement the log widget and the trace widget associate with each other, remove log tables on the trace widget. Add log widget to general service root. Associate the event widget with the trace and log widget. Add the MYSQL layer and update layer routers. Fix query order for trace list. Add a calculation to convert seconds to days. q* Add Spring Sleuth dashboard to general service instance. Support the process dashboard and create the time range text widget. Fix picking calendar with a wrong time range and setting a unique value for dashboard grid key. Add PostgreSQL to Database sub-menu. Implement the network profiling widget. Add Micronaut icon for Java plugin. Add Nats icon for Java plugin. Bump moment and @vue/cli-plugin-e2e-cypress. Add Network Profiling for Service Mesh DP instance and K8s pod panels.  Documentation  Fix invalid links in release docs. Clean up doc about event metrics. Add a table for metric calculations in the ui doc. Add an explanation for alerting kernel and its in-memory window mechanism. Add more docs for widget details. Update alarm doc introduce configuration property key Fix dependency license\u0026rsquo;s NOTICE and binary jar included issues in the source release. Add eBPF CPU profiling doc.  All issues and pull requests are here\n","excerpt":"9.2.0 Project  [Critical] Fix a low performance issue of metrics persistent in the ElasticSearch …","ref":"/docs/main/next/en/changes/changes-9.2.0/","title":"9.2.0"},{"body":"9.2.0 Project  [Critical] Fix a low performance issue of metrics persistent in the ElasticSearch storage implementation. One single metric could have to wait for an unnecessary 7~10s(System Env Variable SW_STORAGE_ES_FLUSH_INTERVAL) since 8.8.0 - 9.1.0 releases. Upgrade Armeria to 1.16.0, Kubernetes Java client to 15.0.1.  OAP Server  Add more entities for Zipkin to improve performance. ElasticSearch: scroll id should be updated when scrolling as it may change. Mesh: fix only last rule works when multiple rules are defined in metadata-service-mapping.yaml. Support sending alarm messages to PagerDuty. Support Zipkin kafka collector. Add VIRTUAL detect type to Process for Network Profiling. Add component ID(128) for Java Hutool plugin. Add Zipkin query exception handler, response error message for illegal arguments. Fix a NullPointerException in the endpoint analysis, which would cause missing MQ-related LocalSpan in the trace. Add forEach, processRelation function to MAL expression. Add expPrefix, initExp in MAL config. Add component ID(7015) for Python Bottle plugin. Remove legacy OAL percentile functions, p99, p95, p90, p75, p50 func(s). Revert #8066. Keep all metrics persistent even it is default value. Skip loading UI templates if folder is empty or doesn\u0026rsquo;t exist. Optimize ElasticSearch query performance by using _mGet and physical index name rather than alias in these scenarios, (a) Metrics aggregation (b) Zipkin query (c) Metrics query (d) Log query Support the NETWORK type of eBPF Profiling task. Support sumHistogram in MAL. [Breaking Change] Make the eBPF Profiling task support to the service instance level, index/table ebpf_profiling_task is required to be re-created when bump up from previous releases. Fix race condition in Banyandb storage Support SUM_PER_MIN downsampling in MAL. Support sumHistogramPercentile in MAL. Add VIRTUAL_CACHE to Layer, to fix conjectured Redis server, which icon can\u0026rsquo;t show on the topology. [Breaking Change] Elasticsearch storage merge all metrics/meter and records(without super datasets) indices into one physical index template metrics-all and records-all on the default setting. Provide system environment variable(SW_STORAGE_ES_LOGIC_SHARDING) to shard metrics/meter indices into multi-physical indices as the previous versions(one index template per metric/meter aggregation function). In the current one index mode, users still could choose to adjust ElasticSearch\u0026rsquo;s shard number(SW_STORAGE_ES_INDEX_SHARDS_NUMBER) to scale out. More details please refer to New ElasticSearch storage option explanation in 9.2.0 and backend-storage.md [Breaking Change] Index/table ebpf_profiling_schedule added a new column ebpf_profiling_schedule_id, the H2/Mysql/Tidb/Postgres storage users are required to re-created it when bump up from previous releases. Fix Zipkin trace query the max size of spans. Add tls and https component IDs for Network Profiling. Support Elasticsearch column alias for the compatibility between storage logicSharding model and no-logicSharding model. Support MySQL monitoring. Support PostgreSQL monitoring. Fix query services by serviceId error when Elasticsearch storage SW_STORAGE_ES_QUERY_MAX_SIZE \u0026gt; 10000. Support sending alarm messages to Discord. Fix query history process data failure. Optimize TTL mechanism for Elasticsearch storage, skip executed indices in one TTL rotation. Add Kubernetes support module to share codes between modules and reduce calls to Kubernetes API server. Bump up Kubernetes Java client to fix cve. Adapt OpenTelemetry native metrics protocol. [Breaking Change] rename configuration folder from otel-oc-rules to otel-rules. [Breaking Change] rename configuration field from enabledOcRules to enabledOtelRules and environment variable name from SW_OTEL_RECEIVER_ENABLED_OC_RULES to SW_OTEL_RECEIVER_ENABLED_OTEL_RULES. [Breaking Change] Fix JDBC TTL to delete additional tables data. SQL Database requires removing segment,segment_tag, logs, logs_tag, alarms, alarms_tag, zipkin_span, zipkin_query before OAP starts. SQL Database: add @SQLDatabase.ExtraColumn4AdditionalEntity to support add an extra column from parent to an additional table. Add component ID(131) for Java Micronaut plugin Add component ID(132) for Nats java client plugin  UI  Fix query conditions for the browser logs. Implement a url parameter to activate tab index. Fix clear interval fail when switch autoRefresh to off. Optimize log tables. Fix log detail pop-up page doesn\u0026rsquo;t work. Optimize table widget to hide the whole metric column when no metric is set. Implement the Event widget. Remove event menu. Fix span detail text overlap. Add Python Bottle Plugin Logo. Implement an association between widgets(line, bar, area graphs) with time. Fix tag dropdown style. Hide the copy button when db.statement is empty. Fix legend metrics for topology. Dashboard: Add metrics association. Dashboard: Fix FaaS-Root document link and topology service relation dashboard link. Dashboard: Fix Mesh-Instance metric Throughput. Dashboard: Fix Mesh-Service-Relation metric Throughput and Proxy Sidecar Internal Latency in Nanoseconds (Client Response). Dashboard: Fix Mesh-Instance-Relation metric Throughput. Enhance associations for the Event widget. Add event widgets in dashboard where applicable. Fix dashboard list search box not work. Fix short time range. Fix event widget incompatibility in Safari. Refactor the tags component to support searching for tag keys and values. Implement the log widget and the trace widget associate with each other, remove log tables on the trace widget. Add log widget to general service root. Associate the event widget with the trace and log widget. Add the MYSQL layer and update layer routers. Fix query order for trace list. Add a calculation to convert seconds to days. q* Add Spring Sleuth dashboard to general service instance. Support the process dashboard and create the time range text widget. Fix picking calendar with a wrong time range and setting a unique value for dashboard grid key. Add PostgreSQL to Database sub-menu. Implement the network profiling widget. Add Micronaut icon for Java plugin. Add Nats icon for Java plugin. Bump moment and @vue/cli-plugin-e2e-cypress. Add Network Profiling for Service Mesh DP instance and K8s pod panels.  Documentation  Fix invalid links in release docs. Clean up doc about event metrics. Add a table for metric calculations in the ui doc. Add an explanation for alerting kernel and its in-memory window mechanism. Add more docs for widget details. Update alarm doc introduce configuration property key Fix dependency license\u0026rsquo;s NOTICE and binary jar included issues in the source release. Add eBPF CPU profiling doc.  All issues and pull requests are here\n","excerpt":"9.2.0 Project  [Critical] Fix a low performance issue of metrics persistent in the ElasticSearch …","ref":"/docs/main/v9.2.0/en/changes/changes/","title":"9.2.0"},{"body":"9.2.0 Project  [Critical] Fix a low performance issue of metrics persistent in the ElasticSearch storage implementation. One single metric could have to wait for an unnecessary 7~10s(System Env Variable SW_STORAGE_ES_FLUSH_INTERVAL) since 8.8.0 - 9.1.0 releases. Upgrade Armeria to 1.16.0, Kubernetes Java client to 15.0.1.  OAP Server  Add more entities for Zipkin to improve performance. ElasticSearch: scroll id should be updated when scrolling as it may change. Mesh: fix only last rule works when multiple rules are defined in metadata-service-mapping.yaml. Support sending alarm messages to PagerDuty. Support Zipkin kafka collector. Add VIRTUAL detect type to Process for Network Profiling. Add component ID(128) for Java Hutool plugin. Add Zipkin query exception handler, response error message for illegal arguments. Fix a NullPointerException in the endpoint analysis, which would cause missing MQ-related LocalSpan in the trace. Add forEach, processRelation function to MAL expression. Add expPrefix, initExp in MAL config. Add component ID(7015) for Python Bottle plugin. Remove legacy OAL percentile functions, p99, p95, p90, p75, p50 func(s). Revert #8066. Keep all metrics persistent even it is default value. Skip loading UI templates if folder is empty or doesn\u0026rsquo;t exist. Optimize ElasticSearch query performance by using _mGet and physical index name rather than alias in these scenarios, (a) Metrics aggregation (b) Zipkin query (c) Metrics query (d) Log query Support the NETWORK type of eBPF Profiling task. Support sumHistogram in MAL. [Breaking Change] Make the eBPF Profiling task support to the service instance level, index/table ebpf_profiling_task is required to be re-created when bump up from previous releases. Fix race condition in Banyandb storage Support SUM_PER_MIN downsampling in MAL. Support sumHistogramPercentile in MAL. Add VIRTUAL_CACHE to Layer, to fix conjectured Redis server, which icon can\u0026rsquo;t show on the topology. [Breaking Change] Elasticsearch storage merge all metrics/meter and records(without super datasets) indices into one physical index template metrics-all and records-all on the default setting. Provide system environment variable(SW_STORAGE_ES_LOGIC_SHARDING) to shard metrics/meter indices into multi-physical indices as the previous versions(one index template per metric/meter aggregation function). In the current one index mode, users still could choose to adjust ElasticSearch\u0026rsquo;s shard number(SW_STORAGE_ES_INDEX_SHARDS_NUMBER) to scale out. More details please refer to New ElasticSearch storage option explanation in 9.2.0 and backend-storage.md [Breaking Change] Index/table ebpf_profiling_schedule added a new column ebpf_profiling_schedule_id, the H2/Mysql/Tidb/Postgres storage users are required to re-created it when bump up from previous releases. Fix Zipkin trace query the max size of spans. Add tls and https component IDs for Network Profiling. Support Elasticsearch column alias for the compatibility between storage logicSharding model and no-logicSharding model. Support MySQL monitoring. Support PostgreSQL monitoring. Fix query services by serviceId error when Elasticsearch storage SW_STORAGE_ES_QUERY_MAX_SIZE \u0026gt; 10000. Support sending alarm messages to Discord. Fix query history process data failure. Optimize TTL mechanism for Elasticsearch storage, skip executed indices in one TTL rotation. Add Kubernetes support module to share codes between modules and reduce calls to Kubernetes API server. Bump up Kubernetes Java client to fix cve. Adapt OpenTelemetry native metrics protocol. [Breaking Change] rename configuration folder from otel-oc-rules to otel-rules. [Breaking Change] rename configuration field from enabledOcRules to enabledOtelRules and environment variable name from SW_OTEL_RECEIVER_ENABLED_OC_RULES to SW_OTEL_RECEIVER_ENABLED_OTEL_RULES. [Breaking Change] Fix JDBC TTL to delete additional tables data. SQL Database requires removing segment,segment_tag, logs, logs_tag, alarms, alarms_tag, zipkin_span, zipkin_query before OAP starts. SQL Database: add @SQLDatabase.ExtraColumn4AdditionalEntity to support add an extra column from parent to an additional table. Add component ID(131) for Java Micronaut plugin Add component ID(132) for Nats java client plugin  UI  Fix query conditions for the browser logs. Implement a url parameter to activate tab index. Fix clear interval fail when switch autoRefresh to off. Optimize log tables. Fix log detail pop-up page doesn\u0026rsquo;t work. Optimize table widget to hide the whole metric column when no metric is set. Implement the Event widget. Remove event menu. Fix span detail text overlap. Add Python Bottle Plugin Logo. Implement an association between widgets(line, bar, area graphs) with time. Fix tag dropdown style. Hide the copy button when db.statement is empty. Fix legend metrics for topology. Dashboard: Add metrics association. Dashboard: Fix FaaS-Root document link and topology service relation dashboard link. Dashboard: Fix Mesh-Instance metric Throughput. Dashboard: Fix Mesh-Service-Relation metric Throughput and Proxy Sidecar Internal Latency in Nanoseconds (Client Response). Dashboard: Fix Mesh-Instance-Relation metric Throughput. Enhance associations for the Event widget. Add event widgets in dashboard where applicable. Fix dashboard list search box not work. Fix short time range. Fix event widget incompatibility in Safari. Refactor the tags component to support searching for tag keys and values. Implement the log widget and the trace widget associate with each other, remove log tables on the trace widget. Add log widget to general service root. Associate the event widget with the trace and log widget. Add the MYSQL layer and update layer routers. Fix query order for trace list. Add a calculation to convert seconds to days. q* Add Spring Sleuth dashboard to general service instance. Support the process dashboard and create the time range text widget. Fix picking calendar with a wrong time range and setting a unique value for dashboard grid key. Add PostgreSQL to Database sub-menu. Implement the network profiling widget. Add Micronaut icon for Java plugin. Add Nats icon for Java plugin. Bump moment and @vue/cli-plugin-e2e-cypress. Add Network Profiling for Service Mesh DP instance and K8s pod panels.  Documentation  Fix invalid links in release docs. Clean up doc about event metrics. Add a table for metric calculations in the ui doc. Add an explanation for alerting kernel and its in-memory window mechanism. Add more docs for widget details. Update alarm doc introduce configuration property key Fix dependency license\u0026rsquo;s NOTICE and binary jar included issues in the source release. Add eBPF CPU profiling doc.  All issues and pull requests are here\n","excerpt":"9.2.0 Project  [Critical] Fix a low performance issue of metrics persistent in the ElasticSearch …","ref":"/docs/main/v9.3.0/en/changes/changes-9.2.0/","title":"9.2.0"},{"body":"9.2.0 Project  [Critical] Fix a low performance issue of metrics persistent in the ElasticSearch storage implementation. One single metric could have to wait for an unnecessary 7~10s(System Env Variable SW_STORAGE_ES_FLUSH_INTERVAL) since 8.8.0 - 9.1.0 releases. Upgrade Armeria to 1.16.0, Kubernetes Java client to 15.0.1.  OAP Server  Add more entities for Zipkin to improve performance. ElasticSearch: scroll id should be updated when scrolling as it may change. Mesh: fix only last rule works when multiple rules are defined in metadata-service-mapping.yaml. Support sending alarm messages to PagerDuty. Support Zipkin kafka collector. Add VIRTUAL detect type to Process for Network Profiling. Add component ID(128) for Java Hutool plugin. Add Zipkin query exception handler, response error message for illegal arguments. Fix a NullPointerException in the endpoint analysis, which would cause missing MQ-related LocalSpan in the trace. Add forEach, processRelation function to MAL expression. Add expPrefix, initExp in MAL config. Add component ID(7015) for Python Bottle plugin. Remove legacy OAL percentile functions, p99, p95, p90, p75, p50 func(s). Revert #8066. Keep all metrics persistent even it is default value. Skip loading UI templates if folder is empty or doesn\u0026rsquo;t exist. Optimize ElasticSearch query performance by using _mGet and physical index name rather than alias in these scenarios, (a) Metrics aggregation (b) Zipkin query (c) Metrics query (d) Log query Support the NETWORK type of eBPF Profiling task. Support sumHistogram in MAL. [Breaking Change] Make the eBPF Profiling task support to the service instance level, index/table ebpf_profiling_task is required to be re-created when bump up from previous releases. Fix race condition in Banyandb storage Support SUM_PER_MIN downsampling in MAL. Support sumHistogramPercentile in MAL. Add VIRTUAL_CACHE to Layer, to fix conjectured Redis server, which icon can\u0026rsquo;t show on the topology. [Breaking Change] Elasticsearch storage merge all metrics/meter and records(without super datasets) indices into one physical index template metrics-all and records-all on the default setting. Provide system environment variable(SW_STORAGE_ES_LOGIC_SHARDING) to shard metrics/meter indices into multi-physical indices as the previous versions(one index template per metric/meter aggregation function). In the current one index mode, users still could choose to adjust ElasticSearch\u0026rsquo;s shard number(SW_STORAGE_ES_INDEX_SHARDS_NUMBER) to scale out. More details please refer to New ElasticSearch storage option explanation in 9.2.0 and backend-storage.md [Breaking Change] Index/table ebpf_profiling_schedule added a new column ebpf_profiling_schedule_id, the H2/Mysql/Tidb/Postgres storage users are required to re-created it when bump up from previous releases. Fix Zipkin trace query the max size of spans. Add tls and https component IDs for Network Profiling. Support Elasticsearch column alias for the compatibility between storage logicSharding model and no-logicSharding model. Support MySQL monitoring. Support PostgreSQL monitoring. Fix query services by serviceId error when Elasticsearch storage SW_STORAGE_ES_QUERY_MAX_SIZE \u0026gt; 10000. Support sending alarm messages to Discord. Fix query history process data failure. Optimize TTL mechanism for Elasticsearch storage, skip executed indices in one TTL rotation. Add Kubernetes support module to share codes between modules and reduce calls to Kubernetes API server. Bump up Kubernetes Java client to fix cve. Adapt OpenTelemetry native metrics protocol. [Breaking Change] rename configuration folder from otel-oc-rules to otel-rules. [Breaking Change] rename configuration field from enabledOcRules to enabledOtelRules and environment variable name from SW_OTEL_RECEIVER_ENABLED_OC_RULES to SW_OTEL_RECEIVER_ENABLED_OTEL_RULES. [Breaking Change] Fix JDBC TTL to delete additional tables data. SQL Database requires removing segment,segment_tag, logs, logs_tag, alarms, alarms_tag, zipkin_span, zipkin_query before OAP starts. SQL Database: add @SQLDatabase.ExtraColumn4AdditionalEntity to support add an extra column from parent to an additional table. Add component ID(131) for Java Micronaut plugin Add component ID(132) for Nats java client plugin  UI  Fix query conditions for the browser logs. Implement a url parameter to activate tab index. Fix clear interval fail when switch autoRefresh to off. Optimize log tables. Fix log detail pop-up page doesn\u0026rsquo;t work. Optimize table widget to hide the whole metric column when no metric is set. Implement the Event widget. Remove event menu. Fix span detail text overlap. Add Python Bottle Plugin Logo. Implement an association between widgets(line, bar, area graphs) with time. Fix tag dropdown style. Hide the copy button when db.statement is empty. Fix legend metrics for topology. Dashboard: Add metrics association. Dashboard: Fix FaaS-Root document link and topology service relation dashboard link. Dashboard: Fix Mesh-Instance metric Throughput. Dashboard: Fix Mesh-Service-Relation metric Throughput and Proxy Sidecar Internal Latency in Nanoseconds (Client Response). Dashboard: Fix Mesh-Instance-Relation metric Throughput. Enhance associations for the Event widget. Add event widgets in dashboard where applicable. Fix dashboard list search box not work. Fix short time range. Fix event widget incompatibility in Safari. Refactor the tags component to support searching for tag keys and values. Implement the log widget and the trace widget associate with each other, remove log tables on the trace widget. Add log widget to general service root. Associate the event widget with the trace and log widget. Add the MYSQL layer and update layer routers. Fix query order for trace list. Add a calculation to convert seconds to days. q* Add Spring Sleuth dashboard to general service instance. Support the process dashboard and create the time range text widget. Fix picking calendar with a wrong time range and setting a unique value for dashboard grid key. Add PostgreSQL to Database sub-menu. Implement the network profiling widget. Add Micronaut icon for Java plugin. Add Nats icon for Java plugin. Bump moment and @vue/cli-plugin-e2e-cypress. Add Network Profiling for Service Mesh DP instance and K8s pod panels.  Documentation  Fix invalid links in release docs. Clean up doc about event metrics. Add a table for metric calculations in the ui doc. Add an explanation for alerting kernel and its in-memory window mechanism. Add more docs for widget details. Update alarm doc introduce configuration property key Fix dependency license\u0026rsquo;s NOTICE and binary jar included issues in the source release. Add eBPF CPU profiling doc.  All issues and pull requests are here\n","excerpt":"9.2.0 Project  [Critical] Fix a low performance issue of metrics persistent in the ElasticSearch …","ref":"/docs/main/v9.4.0/en/changes/changes-9.2.0/","title":"9.2.0"},{"body":"9.2.0 Project  [Critical] Fix a low performance issue of metrics persistent in the ElasticSearch storage implementation. One single metric could have to wait for an unnecessary 7~10s(System Env Variable SW_STORAGE_ES_FLUSH_INTERVAL) since 8.8.0 - 9.1.0 releases. Upgrade Armeria to 1.16.0, Kubernetes Java client to 15.0.1.  OAP Server  Add more entities for Zipkin to improve performance. ElasticSearch: scroll id should be updated when scrolling as it may change. Mesh: fix only last rule works when multiple rules are defined in metadata-service-mapping.yaml. Support sending alarm messages to PagerDuty. Support Zipkin kafka collector. Add VIRTUAL detect type to Process for Network Profiling. Add component ID(128) for Java Hutool plugin. Add Zipkin query exception handler, response error message for illegal arguments. Fix a NullPointerException in the endpoint analysis, which would cause missing MQ-related LocalSpan in the trace. Add forEach, processRelation function to MAL expression. Add expPrefix, initExp in MAL config. Add component ID(7015) for Python Bottle plugin. Remove legacy OAL percentile functions, p99, p95, p90, p75, p50 func(s). Revert #8066. Keep all metrics persistent even it is default value. Skip loading UI templates if folder is empty or doesn\u0026rsquo;t exist. Optimize ElasticSearch query performance by using _mGet and physical index name rather than alias in these scenarios, (a) Metrics aggregation (b) Zipkin query (c) Metrics query (d) Log query Support the NETWORK type of eBPF Profiling task. Support sumHistogram in MAL. [Breaking Change] Make the eBPF Profiling task support to the service instance level, index/table ebpf_profiling_task is required to be re-created when bump up from previous releases. Fix race condition in Banyandb storage Support SUM_PER_MIN downsampling in MAL. Support sumHistogramPercentile in MAL. Add VIRTUAL_CACHE to Layer, to fix conjectured Redis server, which icon can\u0026rsquo;t show on the topology. [Breaking Change] Elasticsearch storage merge all metrics/meter and records(without super datasets) indices into one physical index template metrics-all and records-all on the default setting. Provide system environment variable(SW_STORAGE_ES_LOGIC_SHARDING) to shard metrics/meter indices into multi-physical indices as the previous versions(one index template per metric/meter aggregation function). In the current one index mode, users still could choose to adjust ElasticSearch\u0026rsquo;s shard number(SW_STORAGE_ES_INDEX_SHARDS_NUMBER) to scale out. More details please refer to New ElasticSearch storage option explanation in 9.2.0 and backend-storage.md [Breaking Change] Index/table ebpf_profiling_schedule added a new column ebpf_profiling_schedule_id, the H2/Mysql/Tidb/Postgres storage users are required to re-created it when bump up from previous releases. Fix Zipkin trace query the max size of spans. Add tls and https component IDs for Network Profiling. Support Elasticsearch column alias for the compatibility between storage logicSharding model and no-logicSharding model. Support MySQL monitoring. Support PostgreSQL monitoring. Fix query services by serviceId error when Elasticsearch storage SW_STORAGE_ES_QUERY_MAX_SIZE \u0026gt; 10000. Support sending alarm messages to Discord. Fix query history process data failure. Optimize TTL mechanism for Elasticsearch storage, skip executed indices in one TTL rotation. Add Kubernetes support module to share codes between modules and reduce calls to Kubernetes API server. Bump up Kubernetes Java client to fix cve. Adapt OpenTelemetry native metrics protocol. [Breaking Change] rename configuration folder from otel-oc-rules to otel-rules. [Breaking Change] rename configuration field from enabledOcRules to enabledOtelRules and environment variable name from SW_OTEL_RECEIVER_ENABLED_OC_RULES to SW_OTEL_RECEIVER_ENABLED_OTEL_RULES. [Breaking Change] Fix JDBC TTL to delete additional tables data. SQL Database requires removing segment,segment_tag, logs, logs_tag, alarms, alarms_tag, zipkin_span, zipkin_query before OAP starts. SQL Database: add @SQLDatabase.ExtraColumn4AdditionalEntity to support add an extra column from parent to an additional table. Add component ID(131) for Java Micronaut plugin Add component ID(132) for Nats java client plugin  UI  Fix query conditions for the browser logs. Implement a url parameter to activate tab index. Fix clear interval fail when switch autoRefresh to off. Optimize log tables. Fix log detail pop-up page doesn\u0026rsquo;t work. Optimize table widget to hide the whole metric column when no metric is set. Implement the Event widget. Remove event menu. Fix span detail text overlap. Add Python Bottle Plugin Logo. Implement an association between widgets(line, bar, area graphs) with time. Fix tag dropdown style. Hide the copy button when db.statement is empty. Fix legend metrics for topology. Dashboard: Add metrics association. Dashboard: Fix FaaS-Root document link and topology service relation dashboard link. Dashboard: Fix Mesh-Instance metric Throughput. Dashboard: Fix Mesh-Service-Relation metric Throughput and Proxy Sidecar Internal Latency in Nanoseconds (Client Response). Dashboard: Fix Mesh-Instance-Relation metric Throughput. Enhance associations for the Event widget. Add event widgets in dashboard where applicable. Fix dashboard list search box not work. Fix short time range. Fix event widget incompatibility in Safari. Refactor the tags component to support searching for tag keys and values. Implement the log widget and the trace widget associate with each other, remove log tables on the trace widget. Add log widget to general service root. Associate the event widget with the trace and log widget. Add the MYSQL layer and update layer routers. Fix query order for trace list. Add a calculation to convert seconds to days. q* Add Spring Sleuth dashboard to general service instance. Support the process dashboard and create the time range text widget. Fix picking calendar with a wrong time range and setting a unique value for dashboard grid key. Add PostgreSQL to Database sub-menu. Implement the network profiling widget. Add Micronaut icon for Java plugin. Add Nats icon for Java plugin. Bump moment and @vue/cli-plugin-e2e-cypress. Add Network Profiling for Service Mesh DP instance and K8s pod panels.  Documentation  Fix invalid links in release docs. Clean up doc about event metrics. Add a table for metric calculations in the ui doc. Add an explanation for alerting kernel and its in-memory window mechanism. Add more docs for widget details. Update alarm doc introduce configuration property key Fix dependency license\u0026rsquo;s NOTICE and binary jar included issues in the source release. Add eBPF CPU profiling doc.  All issues and pull requests are here\n","excerpt":"9.2.0 Project  [Critical] Fix a low performance issue of metrics persistent in the ElasticSearch …","ref":"/docs/main/v9.5.0/en/changes/changes-9.2.0/","title":"9.2.0"},{"body":"9.2.0 Project  [Critical] Fix a low performance issue of metrics persistent in the ElasticSearch storage implementation. One single metric could have to wait for an unnecessary 7~10s(System Env Variable SW_STORAGE_ES_FLUSH_INTERVAL) since 8.8.0 - 9.1.0 releases. Upgrade Armeria to 1.16.0, Kubernetes Java client to 15.0.1.  OAP Server  Add more entities for Zipkin to improve performance. ElasticSearch: scroll id should be updated when scrolling as it may change. Mesh: fix only last rule works when multiple rules are defined in metadata-service-mapping.yaml. Support sending alarm messages to PagerDuty. Support Zipkin kafka collector. Add VIRTUAL detect type to Process for Network Profiling. Add component ID(128) for Java Hutool plugin. Add Zipkin query exception handler, response error message for illegal arguments. Fix a NullPointerException in the endpoint analysis, which would cause missing MQ-related LocalSpan in the trace. Add forEach, processRelation function to MAL expression. Add expPrefix, initExp in MAL config. Add component ID(7015) for Python Bottle plugin. Remove legacy OAL percentile functions, p99, p95, p90, p75, p50 func(s). Revert #8066. Keep all metrics persistent even it is default value. Skip loading UI templates if folder is empty or doesn\u0026rsquo;t exist. Optimize ElasticSearch query performance by using _mGet and physical index name rather than alias in these scenarios, (a) Metrics aggregation (b) Zipkin query (c) Metrics query (d) Log query Support the NETWORK type of eBPF Profiling task. Support sumHistogram in MAL. [Breaking Change] Make the eBPF Profiling task support to the service instance level, index/table ebpf_profiling_task is required to be re-created when bump up from previous releases. Fix race condition in Banyandb storage Support SUM_PER_MIN downsampling in MAL. Support sumHistogramPercentile in MAL. Add VIRTUAL_CACHE to Layer, to fix conjectured Redis server, which icon can\u0026rsquo;t show on the topology. [Breaking Change] Elasticsearch storage merge all metrics/meter and records(without super datasets) indices into one physical index template metrics-all and records-all on the default setting. Provide system environment variable(SW_STORAGE_ES_LOGIC_SHARDING) to shard metrics/meter indices into multi-physical indices as the previous versions(one index template per metric/meter aggregation function). In the current one index mode, users still could choose to adjust ElasticSearch\u0026rsquo;s shard number(SW_STORAGE_ES_INDEX_SHARDS_NUMBER) to scale out. More details please refer to New ElasticSearch storage option explanation in 9.2.0 and backend-storage.md [Breaking Change] Index/table ebpf_profiling_schedule added a new column ebpf_profiling_schedule_id, the H2/Mysql/Tidb/Postgres storage users are required to re-created it when bump up from previous releases. Fix Zipkin trace query the max size of spans. Add tls and https component IDs for Network Profiling. Support Elasticsearch column alias for the compatibility between storage logicSharding model and no-logicSharding model. Support MySQL monitoring. Support PostgreSQL monitoring. Fix query services by serviceId error when Elasticsearch storage SW_STORAGE_ES_QUERY_MAX_SIZE \u0026gt; 10000. Support sending alarm messages to Discord. Fix query history process data failure. Optimize TTL mechanism for Elasticsearch storage, skip executed indices in one TTL rotation. Add Kubernetes support module to share codes between modules and reduce calls to Kubernetes API server. Bump up Kubernetes Java client to fix cve. Adapt OpenTelemetry native metrics protocol. [Breaking Change] rename configuration folder from otel-oc-rules to otel-rules. [Breaking Change] rename configuration field from enabledOcRules to enabledOtelRules and environment variable name from SW_OTEL_RECEIVER_ENABLED_OC_RULES to SW_OTEL_RECEIVER_ENABLED_OTEL_RULES. [Breaking Change] Fix JDBC TTL to delete additional tables data. SQL Database requires removing segment,segment_tag, logs, logs_tag, alarms, alarms_tag, zipkin_span, zipkin_query before OAP starts. SQL Database: add @SQLDatabase.ExtraColumn4AdditionalEntity to support add an extra column from parent to an additional table. Add component ID(131) for Java Micronaut plugin Add component ID(132) for Nats java client plugin  UI  Fix query conditions for the browser logs. Implement a url parameter to activate tab index. Fix clear interval fail when switch autoRefresh to off. Optimize log tables. Fix log detail pop-up page doesn\u0026rsquo;t work. Optimize table widget to hide the whole metric column when no metric is set. Implement the Event widget. Remove event menu. Fix span detail text overlap. Add Python Bottle Plugin Logo. Implement an association between widgets(line, bar, area graphs) with time. Fix tag dropdown style. Hide the copy button when db.statement is empty. Fix legend metrics for topology. Dashboard: Add metrics association. Dashboard: Fix FaaS-Root document link and topology service relation dashboard link. Dashboard: Fix Mesh-Instance metric Throughput. Dashboard: Fix Mesh-Service-Relation metric Throughput and Proxy Sidecar Internal Latency in Nanoseconds (Client Response). Dashboard: Fix Mesh-Instance-Relation metric Throughput. Enhance associations for the Event widget. Add event widgets in dashboard where applicable. Fix dashboard list search box not work. Fix short time range. Fix event widget incompatibility in Safari. Refactor the tags component to support searching for tag keys and values. Implement the log widget and the trace widget associate with each other, remove log tables on the trace widget. Add log widget to general service root. Associate the event widget with the trace and log widget. Add the MYSQL layer and update layer routers. Fix query order for trace list. Add a calculation to convert seconds to days. q* Add Spring Sleuth dashboard to general service instance. Support the process dashboard and create the time range text widget. Fix picking calendar with a wrong time range and setting a unique value for dashboard grid key. Add PostgreSQL to Database sub-menu. Implement the network profiling widget. Add Micronaut icon for Java plugin. Add Nats icon for Java plugin. Bump moment and @vue/cli-plugin-e2e-cypress. Add Network Profiling for Service Mesh DP instance and K8s pod panels.  Documentation  Fix invalid links in release docs. Clean up doc about event metrics. Add a table for metric calculations in the ui doc. Add an explanation for alerting kernel and its in-memory window mechanism. Add more docs for widget details. Update alarm doc introduce configuration property key Fix dependency license\u0026rsquo;s NOTICE and binary jar included issues in the source release. Add eBPF CPU profiling doc.  All issues and pull requests are here\n","excerpt":"9.2.0 Project  [Critical] Fix a low performance issue of metrics persistent in the ElasticSearch …","ref":"/docs/main/v9.6.0/en/changes/changes-9.2.0/","title":"9.2.0"},{"body":"9.2.0 Project  [Critical] Fix a low performance issue of metrics persistent in the ElasticSearch storage implementation. One single metric could have to wait for an unnecessary 7~10s(System Env Variable SW_STORAGE_ES_FLUSH_INTERVAL) since 8.8.0 - 9.1.0 releases. Upgrade Armeria to 1.16.0, Kubernetes Java client to 15.0.1.  OAP Server  Add more entities for Zipkin to improve performance. ElasticSearch: scroll id should be updated when scrolling as it may change. Mesh: fix only last rule works when multiple rules are defined in metadata-service-mapping.yaml. Support sending alarm messages to PagerDuty. Support Zipkin kafka collector. Add VIRTUAL detect type to Process for Network Profiling. Add component ID(128) for Java Hutool plugin. Add Zipkin query exception handler, response error message for illegal arguments. Fix a NullPointerException in the endpoint analysis, which would cause missing MQ-related LocalSpan in the trace. Add forEach, processRelation function to MAL expression. Add expPrefix, initExp in MAL config. Add component ID(7015) for Python Bottle plugin. Remove legacy OAL percentile functions, p99, p95, p90, p75, p50 func(s). Revert #8066. Keep all metrics persistent even it is default value. Skip loading UI templates if folder is empty or doesn\u0026rsquo;t exist. Optimize ElasticSearch query performance by using _mGet and physical index name rather than alias in these scenarios, (a) Metrics aggregation (b) Zipkin query (c) Metrics query (d) Log query Support the NETWORK type of eBPF Profiling task. Support sumHistogram in MAL. [Breaking Change] Make the eBPF Profiling task support to the service instance level, index/table ebpf_profiling_task is required to be re-created when bump up from previous releases. Fix race condition in Banyandb storage Support SUM_PER_MIN downsampling in MAL. Support sumHistogramPercentile in MAL. Add VIRTUAL_CACHE to Layer, to fix conjectured Redis server, which icon can\u0026rsquo;t show on the topology. [Breaking Change] Elasticsearch storage merge all metrics/meter and records(without super datasets) indices into one physical index template metrics-all and records-all on the default setting. Provide system environment variable(SW_STORAGE_ES_LOGIC_SHARDING) to shard metrics/meter indices into multi-physical indices as the previous versions(one index template per metric/meter aggregation function). In the current one index mode, users still could choose to adjust ElasticSearch\u0026rsquo;s shard number(SW_STORAGE_ES_INDEX_SHARDS_NUMBER) to scale out. More details please refer to New ElasticSearch storage option explanation in 9.2.0 and backend-storage.md [Breaking Change] Index/table ebpf_profiling_schedule added a new column ebpf_profiling_schedule_id, the H2/Mysql/Tidb/Postgres storage users are required to re-created it when bump up from previous releases. Fix Zipkin trace query the max size of spans. Add tls and https component IDs for Network Profiling. Support Elasticsearch column alias for the compatibility between storage logicSharding model and no-logicSharding model. Support MySQL monitoring. Support PostgreSQL monitoring. Fix query services by serviceId error when Elasticsearch storage SW_STORAGE_ES_QUERY_MAX_SIZE \u0026gt; 10000. Support sending alarm messages to Discord. Fix query history process data failure. Optimize TTL mechanism for Elasticsearch storage, skip executed indices in one TTL rotation. Add Kubernetes support module to share codes between modules and reduce calls to Kubernetes API server. Bump up Kubernetes Java client to fix cve. Adapt OpenTelemetry native metrics protocol. [Breaking Change] rename configuration folder from otel-oc-rules to otel-rules. [Breaking Change] rename configuration field from enabledOcRules to enabledOtelRules and environment variable name from SW_OTEL_RECEIVER_ENABLED_OC_RULES to SW_OTEL_RECEIVER_ENABLED_OTEL_RULES. [Breaking Change] Fix JDBC TTL to delete additional tables data. SQL Database requires removing segment,segment_tag, logs, logs_tag, alarms, alarms_tag, zipkin_span, zipkin_query before OAP starts. SQL Database: add @SQLDatabase.ExtraColumn4AdditionalEntity to support add an extra column from parent to an additional table. Add component ID(131) for Java Micronaut plugin Add component ID(132) for Nats java client plugin  UI  Fix query conditions for the browser logs. Implement a url parameter to activate tab index. Fix clear interval fail when switch autoRefresh to off. Optimize log tables. Fix log detail pop-up page doesn\u0026rsquo;t work. Optimize table widget to hide the whole metric column when no metric is set. Implement the Event widget. Remove event menu. Fix span detail text overlap. Add Python Bottle Plugin Logo. Implement an association between widgets(line, bar, area graphs) with time. Fix tag dropdown style. Hide the copy button when db.statement is empty. Fix legend metrics for topology. Dashboard: Add metrics association. Dashboard: Fix FaaS-Root document link and topology service relation dashboard link. Dashboard: Fix Mesh-Instance metric Throughput. Dashboard: Fix Mesh-Service-Relation metric Throughput and Proxy Sidecar Internal Latency in Nanoseconds (Client Response). Dashboard: Fix Mesh-Instance-Relation metric Throughput. Enhance associations for the Event widget. Add event widgets in dashboard where applicable. Fix dashboard list search box not work. Fix short time range. Fix event widget incompatibility in Safari. Refactor the tags component to support searching for tag keys and values. Implement the log widget and the trace widget associate with each other, remove log tables on the trace widget. Add log widget to general service root. Associate the event widget with the trace and log widget. Add the MYSQL layer and update layer routers. Fix query order for trace list. Add a calculation to convert seconds to days. q* Add Spring Sleuth dashboard to general service instance. Support the process dashboard and create the time range text widget. Fix picking calendar with a wrong time range and setting a unique value for dashboard grid key. Add PostgreSQL to Database sub-menu. Implement the network profiling widget. Add Micronaut icon for Java plugin. Add Nats icon for Java plugin. Bump moment and @vue/cli-plugin-e2e-cypress. Add Network Profiling for Service Mesh DP instance and K8s pod panels.  Documentation  Fix invalid links in release docs. Clean up doc about event metrics. Add a table for metric calculations in the ui doc. Add an explanation for alerting kernel and its in-memory window mechanism. Add more docs for widget details. Update alarm doc introduce configuration property key Fix dependency license\u0026rsquo;s NOTICE and binary jar included issues in the source release. Add eBPF CPU profiling doc.  All issues and pull requests are here\n","excerpt":"9.2.0 Project  [Critical] Fix a low performance issue of metrics persistent in the ElasticSearch …","ref":"/docs/main/v9.7.0/en/changes/changes-9.2.0/","title":"9.2.0"},{"body":"9.3.0 Project  Bump up the embedded swctl version in OAP Docker image.  OAP Server  Add component ID(133) for impala JDBC Java agent plugin and component ID(134) for impala server. Use prepareStatement in H2SQLExecutor#getByIDs.(No function change). Bump up snakeyaml to 1.32 for fixing CVE. Fix DurationUtils.convertToTimeBucket missed verify date format. Enhance LAL to support converting LogData to DatabaseSlowStatement. [Breaking Change] Change the LAL script format(Add layer property). Adapt ElasticSearch 8.1+, migrate from removed APIs to recommended APIs. Support monitoring MySQL slow SQLs. Support analyzing cache related spans to provide metrics and slow commands for cache services from client side Optimize virtual database, fix dynamic config watcher NPE when default value is null Remove physical index existing check and keep template existing check only to avoid meaningless retry wait in no-init mode. Make sure instance list ordered in TTL processor to avoid TTL timer never runs. Support monitoring PostgreSQL slow SQLs. [Breaking Change] Support sharding MySQL database instances and tables by Shardingsphere-Proxy. SQL-Database requires removing tables log_tag/segment_tag/zipkin_query before OAP starts, if bump up from previous releases. Fix meter functions avgHistogram, avgHistogramPercentile, avgLabeled, sumHistogram having data conflict when downsampling. Do sorting readLabeledMetricsValues result forcedly in case the storage(database) doesn\u0026rsquo;t return data consistent with the parameter list. Fix the wrong watch semantics in Kubernetes watchers, which causes heavy traffic to API server in some Kubernetes clusters, we should use Get State and Start at Most Recent semantic instead of Start at Exact because we don\u0026rsquo;t need the changing history events, see https://kubernetes.io/docs/reference/using-api/api-concepts/#semantics-for-watch. Unify query services and DAOs codes time range condition to Duration. [Breaking Change]: Remove prometheus-fetcher plugin, please use OpenTelemetry to scrape Prometheus metrics and set up SkyWalking OpenTelemetry receiver instead. BugFix: histogram metrics sent to MAL should be treated as OpenTelemetry style, not Prometheus style: (-infinity, explicit_bounds[i]] for i == 0 (explicit_bounds[i-1], explicit_bounds[i]] for 0 \u0026lt; i \u0026lt; size(explicit_bounds) (explicit_bounds[i-1], +infinity) for i == size(explicit_bounds)  Support Golang runtime metrics analysis. Add APISIX metrics monitoring Support skywalking-client-js report empty service version and page path , set default version as latest and default page path as /(root). Fix the error fetching data (/browser_app_page_pv0) : Can't split endpoint id into 2 parts. [Breaking Change] Limit the max length of trace/log/alarm tag\u0026rsquo;s key=value, set the max length of column tags in tableslog_tag/segment_tag/alarm_record_tag and column query in zipkin_query and column tag_value in tag_autocomplete to 256. SQL-Database requires altering these columns' length or removing these tables before OAP starts, if bump up from previous releases. Optimize the creation conditions of profiling task. Lazy load the Kubernetes metadata and switch from event-driven to polling. Previously we set up watchers to watch the Kubernetes metadata changes, this is perfect when there are deployments changes and SkyWalking can react to the changes in real time. However when the cluster has many events (such as in large cluster or some special Kubernetes engine like OpenShift), the requests sent from SkyWalking becomes unpredictable, i.e. SkyWalking might send massive requests to Kubernetes API server, causing heavy load to the API server. This PR switches from the watcher mechanism to polling mechanism, SkyWalking polls the metadata in a specified interval, so that the requests sent to API server is predictable (~10 requests every interval, 3 minutes), and the requests count is constant regardless of the cluster\u0026rsquo;s changes. However with this change SkyWalking can\u0026rsquo;t react to the cluster changes in time, but the delay is acceptable in our case. Optimize the query time of tasks in ProfileTaskCache. Fix metrics was put into wrong slot of the window in the alerting kernel. Support sumPerMinLabeled in MAL. Bump up jackson databind, snakeyaml, grpc dependencies. Support export Trace and Log through Kafka. Add new config initialization mechanism of module provider. This is a ModuleManager lib kernel level change. [Breaking Change] Support new records query protocol, rename the column named service_id to entity_id for support difference entity. Please re-create top_n_database_statement index/table. Remove improper self-obs metrics in JvmMetricsHandler(for Kafka channel). gRPC stream canceling code is not logged as an error when the client cancels the stream. The client cancels the stream when the pod is terminated. [Breaking Change] Change the way of loading MAL rules(support pattern). Move k8s relative MAL files into /otel-rules/k8s. [Breaking Change] Refactor service mesh protobuf definitions and split TCP-related metrics to individual definition. Add TCP{Service,ServiceInstance,ServiceRelation,ServiceInstanceRelation} sources and split TCP-related entities out from original Service,ServiceInstance,ServiceRelation,ServiceInstanceRelation. [Breaking Change] TCP-related source names are changed, fields of TCP-related sources are changed, please refer to the latest oal/tcp.oal file. Do not log error logs when failed to create ElasticSearch index because the index is created already. Add virtual MQ analysis for native traces. Support Python runtime metrics analysis. Support sampledTrace in LAL. Support multiple rules with different names under the same layer of LAL script. (Optimization) Reduce the buffer size(queue) of MAL(only) metric streams. Set L1 queue size as 1/20, L2 queue size as 1/2. Support monitoring MySQL/PostgreSQL in the cluster mode. [Breaking Change] Migrate to BanyanDB v0.2.0.  Adopt new OR logical operator for,  MeasureIDs query BanyanDBProfileThreadSnapshotQueryDAO query Multiple Event conditions query Metrics query   Simplify Group check and creation Partially apply UITemplate changes Support index_only Return CompletableFuture\u0026lt;Void\u0026gt; directly from BanyanDB client Optimize data binary parse methods in *LogQueryDAO Support different indexType Support configuration for TTL and (block|segment) intervals   Elasticsearch storage: Provide system environment variable(SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS) and support specify the settings (number_of_shards/number_of_replicas) for each index individually. Elasticsearch storage: Support update index settings (number_of_shards/number_of_replicas) for the index template after rebooting. Optimize MQ Topology analysis. Use entry span\u0026rsquo;s peer from the consumer side as source service when no producer instrumentation(no cross-process reference). Refactor JDBC storage implementations to reuse logics. Fix ClassCastException in LoggingConfigWatcher. Support span attached event concept in Zipkin and SkyWalking trace query. Support span attached events on Zipkin lens UI. Force UTF-8 encoding in JsonLogHandler of kafka-fetcher-plugin. Fix max length to 512 of entity, instance and endpoint IDs in trace, log, profiling, topN tables(JDBC storages). The value was 200 by default. Add component IDs(135, 136, 137) for EventMesh server and client-side plugins. Bump up Kafka client to 2.8.1 to fix CVE-2021-38153. Remove lengthEnvVariable for Column as it never works as expected. Add LongText to support longer logs persistent as a text type in ElasticSearch, instead of a keyword, to avoid length limitation. Fix wrong system variable name SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI. It was opaenapi. Fix not-time-series model blocking OAP boots in no-init mode. Fix ShardingTopologyQueryDAO.loadServiceRelationsDetectedAtServerSide invoke backend miss parameter serviceIds. Changed system variable SW_SUPERDATASET_STORAGE_DAY_STEP to SW_STORAGE_ES_SUPER_DATASET_DAY_STEP to be consistent with other ES storage related variables. Fix ESEventQueryDAO missing metric_table boolQuery criteria. Add default entity name(_blank) if absent to avoid NPE in the decoding. This caused Can't split xxx id into 2 parts. Support dynamic config the sampling strategy in network profiling. Zipkin module support BanyanDB storage. Zipkin traces query API, sort the result set by start time by default. Enhance the cache mechanism in the metric persistent process.  This cache only worked when the metric is accessible(readable) from the database. Once the insert execution is delayed due to the scale, the cache loses efficacy. It only works for the last time update per minute, considering our 25s period. Fix ID conflicts for all JDBC storage implementations. Due to the insert delay, the JDBC storage implementation would still generate another new insert statement.   [Breaking Change] Remove core/default/enableDatabaseSession config. [Breaking Change] Add @BanyanDB.TimestampColumn to identify which column in Record is providing the timestamp(milliseconds) for BanyanDB, since BanyanDB stream requires a timestamp in milliseconds. For SQL-Database: add new column timestamp for tables profile_task_log/top_n_database_statement, requires altering this column or removing these tables before OAP starts, if bump up from previous releases. Fix Elasticsearch storage: In No-Sharding Mode, add specific analyzer to the template before index creation to avoid update index error. Internal API: remove undocumented ElasticSearch API usage and use documented one. Fix BanyanDB.ShardingKey annotation missed in the generated OAL metrics classes. Fix Elasticsearch storage: Query sortMetrics missing transform real index column name. Rename BanyanDB.ShardingKey to BanyanDB.SeriesID. Self-Observability: Add counters for metrics reading from DB or cached. Dashboard:Metrics Persistent Cache Count. Self-Observability: Fix GC Time calculation. Fix Elasticsearch storage: In No-Sharding Mode, column\u0026rsquo;s property indexOnly not applied and cannot be updated. Update the trace_id field as storage only(cannot be queried) in top_n_database_statement, top_n_cache_read_command, top_n_cache_read_command index.  UI  Fix: tab active incorrectly, when click tab space Add impala icon for impala JDBC Java agent plugin. (Webapp)Bump up snakeyaml to 1.31 for fixing CVE-2022-25857 [Breaking Change]: migrate from Spring Web to Armeria, now you should use the environment variable name SW_OAP_ADDRESS to change the OAP backend service addresses, like SW_OAP_ADDRESS=localhost:12800,localhost:12801, and use environment variable SW_SERVER_PORT to change the port. Other Spring-related configurations don\u0026rsquo;t take effect anymore. Polish the endpoint list graph. Fix styles for an adaptive height. Fix setting up a new time range after clicking the refresh button. Enhance the process topology graph to support dragging nodes. UI-template: Fix metrics calculation in general-service/mesh-service/faas-function top-list dashboard. Update MySQL dashboard to visualize collected slow SQLs. Add virtual cache dashboard. Remove responseCode fields of all OAL sources, as well as examples to avoid user\u0026rsquo;s confusion. Remove All from the endpoints selector. Enhance menu configurations to make it easier to change. Update PostgreSQL dashboard to visualize collected slow SQLs. Add Golang runtime metrics and cpu/memory used rate panels in General-Instance dashboard. Add gateway apisix menu. Query logs with the specific service ID. Bump d3-color from 3.0.1 to 3.1.0. Add Golang runtime metrics and cpu/memory used rate panels in FaaS-Instance dashboard. Revert logs on trace widget. Add a sub-menu for virtual mq. Add readRecords to metric types. Verify dashboard names for new dashboards. Associate metrics with the trace widget on dashboards. Fix configuration panel styles. Remove a un-use icon. Support labeled value on the service/instance/endpoint list widgets. Add menu for virtual MQ. Set selector props and update configuration panel styles. Add Python runtime metrics and cpu/memory utilization panels to General-Instance and Fass-Instance dashboards. Enhance the legend of metrics graph widget with the summary table. Add apache eventMesh logo file. Fix conditions for trace profiling. Fix tag keys list and duration condition. Fix typo. Fix condition logic for trace tree data. Enhance tags component to search tags with the input value. Fix topology loading style. Fix update metric processor for the readRecords and remove readSampledRecords from metrics selector. Add trace association for FAAS dashboards. Visualize attached events on the trace widget. Add HTTP/1.x metrics and HTTP req/resp body collecting tabs on the network profiling widget. Implement creating tasks ui for network profiling widget. Fix entity types for ProcessRelation. Add trace association for general service dashboards.  Documentation  Add metadata-uid setup doc about Kubernetes coordinator in the cluster management. Add a doc for adding menus to booster UI. Move general good read blogs from Agent Introduction to Academy. Add re-post for blog Scaling with Apache SkyWalking in the academy list. Add re-post for blog Diagnose Service Mesh Network Performance with eBPF in the academy list. Add Security Notice doc. Add new docs for Report Span Attached Events data collecting protocol. Add new docs for Record query protocol Update Server Agents and Compatibility for PHP agent. Add docs for profiling. Update the network profiling documentation.  All issues and pull requests are here\n","excerpt":"9.3.0 Project  Bump up the embedded swctl version in OAP Docker image.  OAP Server  Add component …","ref":"/docs/main/latest/en/changes/changes-9.3.0/","title":"9.3.0"},{"body":"9.3.0 Project  Bump up the embedded swctl version in OAP Docker image.  OAP Server  Add component ID(133) for impala JDBC Java agent plugin and component ID(134) for impala server. Use prepareStatement in H2SQLExecutor#getByIDs.(No function change). Bump up snakeyaml to 1.32 for fixing CVE. Fix DurationUtils.convertToTimeBucket missed verify date format. Enhance LAL to support converting LogData to DatabaseSlowStatement. [Breaking Change] Change the LAL script format(Add layer property). Adapt ElasticSearch 8.1+, migrate from removed APIs to recommended APIs. Support monitoring MySQL slow SQLs. Support analyzing cache related spans to provide metrics and slow commands for cache services from client side Optimize virtual database, fix dynamic config watcher NPE when default value is null Remove physical index existing check and keep template existing check only to avoid meaningless retry wait in no-init mode. Make sure instance list ordered in TTL processor to avoid TTL timer never runs. Support monitoring PostgreSQL slow SQLs. [Breaking Change] Support sharding MySQL database instances and tables by Shardingsphere-Proxy. SQL-Database requires removing tables log_tag/segment_tag/zipkin_query before OAP starts, if bump up from previous releases. Fix meter functions avgHistogram, avgHistogramPercentile, avgLabeled, sumHistogram having data conflict when downsampling. Do sorting readLabeledMetricsValues result forcedly in case the storage(database) doesn\u0026rsquo;t return data consistent with the parameter list. Fix the wrong watch semantics in Kubernetes watchers, which causes heavy traffic to API server in some Kubernetes clusters, we should use Get State and Start at Most Recent semantic instead of Start at Exact because we don\u0026rsquo;t need the changing history events, see https://kubernetes.io/docs/reference/using-api/api-concepts/#semantics-for-watch. Unify query services and DAOs codes time range condition to Duration. [Breaking Change]: Remove prometheus-fetcher plugin, please use OpenTelemetry to scrape Prometheus metrics and set up SkyWalking OpenTelemetry receiver instead. BugFix: histogram metrics sent to MAL should be treated as OpenTelemetry style, not Prometheus style: (-infinity, explicit_bounds[i]] for i == 0 (explicit_bounds[i-1], explicit_bounds[i]] for 0 \u0026lt; i \u0026lt; size(explicit_bounds) (explicit_bounds[i-1], +infinity) for i == size(explicit_bounds)  Support Golang runtime metrics analysis. Add APISIX metrics monitoring Support skywalking-client-js report empty service version and page path , set default version as latest and default page path as /(root). Fix the error fetching data (/browser_app_page_pv0) : Can't split endpoint id into 2 parts. [Breaking Change] Limit the max length of trace/log/alarm tag\u0026rsquo;s key=value, set the max length of column tags in tableslog_tag/segment_tag/alarm_record_tag and column query in zipkin_query and column tag_value in tag_autocomplete to 256. SQL-Database requires altering these columns' length or removing these tables before OAP starts, if bump up from previous releases. Optimize the creation conditions of profiling task. Lazy load the Kubernetes metadata and switch from event-driven to polling. Previously we set up watchers to watch the Kubernetes metadata changes, this is perfect when there are deployments changes and SkyWalking can react to the changes in real time. However when the cluster has many events (such as in large cluster or some special Kubernetes engine like OpenShift), the requests sent from SkyWalking becomes unpredictable, i.e. SkyWalking might send massive requests to Kubernetes API server, causing heavy load to the API server. This PR switches from the watcher mechanism to polling mechanism, SkyWalking polls the metadata in a specified interval, so that the requests sent to API server is predictable (~10 requests every interval, 3 minutes), and the requests count is constant regardless of the cluster\u0026rsquo;s changes. However with this change SkyWalking can\u0026rsquo;t react to the cluster changes in time, but the delay is acceptable in our case. Optimize the query time of tasks in ProfileTaskCache. Fix metrics was put into wrong slot of the window in the alerting kernel. Support sumPerMinLabeled in MAL. Bump up jackson databind, snakeyaml, grpc dependencies. Support export Trace and Log through Kafka. Add new config initialization mechanism of module provider. This is a ModuleManager lib kernel level change. [Breaking Change] Support new records query protocol, rename the column named service_id to entity_id for support difference entity. Please re-create top_n_database_statement index/table. Remove improper self-obs metrics in JvmMetricsHandler(for Kafka channel). gRPC stream canceling code is not logged as an error when the client cancels the stream. The client cancels the stream when the pod is terminated. [Breaking Change] Change the way of loading MAL rules(support pattern). Move k8s relative MAL files into /otel-rules/k8s. [Breaking Change] Refactor service mesh protobuf definitions and split TCP-related metrics to individual definition. Add TCP{Service,ServiceInstance,ServiceRelation,ServiceInstanceRelation} sources and split TCP-related entities out from original Service,ServiceInstance,ServiceRelation,ServiceInstanceRelation. [Breaking Change] TCP-related source names are changed, fields of TCP-related sources are changed, please refer to the latest oal/tcp.oal file. Do not log error logs when failed to create ElasticSearch index because the index is created already. Add virtual MQ analysis for native traces. Support Python runtime metrics analysis. Support sampledTrace in LAL. Support multiple rules with different names under the same layer of LAL script. (Optimization) Reduce the buffer size(queue) of MAL(only) metric streams. Set L1 queue size as 1/20, L2 queue size as 1/2. Support monitoring MySQL/PostgreSQL in the cluster mode. [Breaking Change] Migrate to BanyanDB v0.2.0.  Adopt new OR logical operator for,  MeasureIDs query BanyanDBProfileThreadSnapshotQueryDAO query Multiple Event conditions query Metrics query   Simplify Group check and creation Partially apply UITemplate changes Support index_only Return CompletableFuture\u0026lt;Void\u0026gt; directly from BanyanDB client Optimize data binary parse methods in *LogQueryDAO Support different indexType Support configuration for TTL and (block|segment) intervals   Elasticsearch storage: Provide system environment variable(SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS) and support specify the settings (number_of_shards/number_of_replicas) for each index individually. Elasticsearch storage: Support update index settings (number_of_shards/number_of_replicas) for the index template after rebooting. Optimize MQ Topology analysis. Use entry span\u0026rsquo;s peer from the consumer side as source service when no producer instrumentation(no cross-process reference). Refactor JDBC storage implementations to reuse logics. Fix ClassCastException in LoggingConfigWatcher. Support span attached event concept in Zipkin and SkyWalking trace query. Support span attached events on Zipkin lens UI. Force UTF-8 encoding in JsonLogHandler of kafka-fetcher-plugin. Fix max length to 512 of entity, instance and endpoint IDs in trace, log, profiling, topN tables(JDBC storages). The value was 200 by default. Add component IDs(135, 136, 137) for EventMesh server and client-side plugins. Bump up Kafka client to 2.8.1 to fix CVE-2021-38153. Remove lengthEnvVariable for Column as it never works as expected. Add LongText to support longer logs persistent as a text type in ElasticSearch, instead of a keyword, to avoid length limitation. Fix wrong system variable name SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI. It was opaenapi. Fix not-time-series model blocking OAP boots in no-init mode. Fix ShardingTopologyQueryDAO.loadServiceRelationsDetectedAtServerSide invoke backend miss parameter serviceIds. Changed system variable SW_SUPERDATASET_STORAGE_DAY_STEP to SW_STORAGE_ES_SUPER_DATASET_DAY_STEP to be consistent with other ES storage related variables. Fix ESEventQueryDAO missing metric_table boolQuery criteria. Add default entity name(_blank) if absent to avoid NPE in the decoding. This caused Can't split xxx id into 2 parts. Support dynamic config the sampling strategy in network profiling. Zipkin module support BanyanDB storage. Zipkin traces query API, sort the result set by start time by default. Enhance the cache mechanism in the metric persistent process.  This cache only worked when the metric is accessible(readable) from the database. Once the insert execution is delayed due to the scale, the cache loses efficacy. It only works for the last time update per minute, considering our 25s period. Fix ID conflicts for all JDBC storage implementations. Due to the insert delay, the JDBC storage implementation would still generate another new insert statement.   [Breaking Change] Remove core/default/enableDatabaseSession config. [Breaking Change] Add @BanyanDB.TimestampColumn to identify which column in Record is providing the timestamp(milliseconds) for BanyanDB, since BanyanDB stream requires a timestamp in milliseconds. For SQL-Database: add new column timestamp for tables profile_task_log/top_n_database_statement, requires altering this column or removing these tables before OAP starts, if bump up from previous releases. Fix Elasticsearch storage: In No-Sharding Mode, add specific analyzer to the template before index creation to avoid update index error. Internal API: remove undocumented ElasticSearch API usage and use documented one. Fix BanyanDB.ShardingKey annotation missed in the generated OAL metrics classes. Fix Elasticsearch storage: Query sortMetrics missing transform real index column name. Rename BanyanDB.ShardingKey to BanyanDB.SeriesID. Self-Observability: Add counters for metrics reading from DB or cached. Dashboard:Metrics Persistent Cache Count. Self-Observability: Fix GC Time calculation. Fix Elasticsearch storage: In No-Sharding Mode, column\u0026rsquo;s property indexOnly not applied and cannot be updated. Update the trace_id field as storage only(cannot be queried) in top_n_database_statement, top_n_cache_read_command, top_n_cache_read_command index.  UI  Fix: tab active incorrectly, when click tab space Add impala icon for impala JDBC Java agent plugin. (Webapp)Bump up snakeyaml to 1.31 for fixing CVE-2022-25857 [Breaking Change]: migrate from Spring Web to Armeria, now you should use the environment variable name SW_OAP_ADDRESS to change the OAP backend service addresses, like SW_OAP_ADDRESS=localhost:12800,localhost:12801, and use environment variable SW_SERVER_PORT to change the port. Other Spring-related configurations don\u0026rsquo;t take effect anymore. Polish the endpoint list graph. Fix styles for an adaptive height. Fix setting up a new time range after clicking the refresh button. Enhance the process topology graph to support dragging nodes. UI-template: Fix metrics calculation in general-service/mesh-service/faas-function top-list dashboard. Update MySQL dashboard to visualize collected slow SQLs. Add virtual cache dashboard. Remove responseCode fields of all OAL sources, as well as examples to avoid user\u0026rsquo;s confusion. Remove All from the endpoints selector. Enhance menu configurations to make it easier to change. Update PostgreSQL dashboard to visualize collected slow SQLs. Add Golang runtime metrics and cpu/memory used rate panels in General-Instance dashboard. Add gateway apisix menu. Query logs with the specific service ID. Bump d3-color from 3.0.1 to 3.1.0. Add Golang runtime metrics and cpu/memory used rate panels in FaaS-Instance dashboard. Revert logs on trace widget. Add a sub-menu for virtual mq. Add readRecords to metric types. Verify dashboard names for new dashboards. Associate metrics with the trace widget on dashboards. Fix configuration panel styles. Remove a un-use icon. Support labeled value on the service/instance/endpoint list widgets. Add menu for virtual MQ. Set selector props and update configuration panel styles. Add Python runtime metrics and cpu/memory utilization panels to General-Instance and Fass-Instance dashboards. Enhance the legend of metrics graph widget with the summary table. Add apache eventMesh logo file. Fix conditions for trace profiling. Fix tag keys list and duration condition. Fix typo. Fix condition logic for trace tree data. Enhance tags component to search tags with the input value. Fix topology loading style. Fix update metric processor for the readRecords and remove readSampledRecords from metrics selector. Add trace association for FAAS dashboards. Visualize attached events on the trace widget. Add HTTP/1.x metrics and HTTP req/resp body collecting tabs on the network profiling widget. Implement creating tasks ui for network profiling widget. Fix entity types for ProcessRelation. Add trace association for general service dashboards.  Documentation  Add metadata-uid setup doc about Kubernetes coordinator in the cluster management. Add a doc for adding menus to booster UI. Move general good read blogs from Agent Introduction to Academy. Add re-post for blog Scaling with Apache SkyWalking in the academy list. Add re-post for blog Diagnose Service Mesh Network Performance with eBPF in the academy list. Add Security Notice doc. Add new docs for Report Span Attached Events data collecting protocol. Add new docs for Record query protocol Update Server Agents and Compatibility for PHP agent. Add docs for profiling. Update the network profiling documentation.  All issues and pull requests are here\n","excerpt":"9.3.0 Project  Bump up the embedded swctl version in OAP Docker image.  OAP Server  Add component …","ref":"/docs/main/next/en/changes/changes-9.3.0/","title":"9.3.0"},{"body":"9.3.0 Project  Bump up the embedded swctl version in OAP Docker image.  OAP Server  Add component ID(133) for impala JDBC Java agent plugin and component ID(134) for impala server. Use prepareStatement in H2SQLExecutor#getByIDs.(No function change). Bump up snakeyaml to 1.32 for fixing CVE. Fix DurationUtils.convertToTimeBucket missed verify date format. Enhance LAL to support converting LogData to DatabaseSlowStatement. [Breaking Change] Change the LAL script format(Add layer property). Adapt ElasticSearch 8.1+, migrate from removed APIs to recommended APIs. Support monitoring MySQL slow SQLs. Support analyzing cache related spans to provide metrics and slow commands for cache services from client side Optimize virtual database, fix dynamic config watcher NPE when default value is null Remove physical index existing check and keep template existing check only to avoid meaningless retry wait in no-init mode. Make sure instance list ordered in TTL processor to avoid TTL timer never runs. Support monitoring PostgreSQL slow SQLs. [Breaking Change] Support sharding MySQL database instances and tables by Shardingsphere-Proxy. SQL-Database requires removing tables log_tag/segment_tag/zipkin_query before OAP starts, if bump up from previous releases. Fix meter functions avgHistogram, avgHistogramPercentile, avgLabeled, sumHistogram having data conflict when downsampling. Do sorting readLabeledMetricsValues result forcedly in case the storage(database) doesn\u0026rsquo;t return data consistent with the parameter list. Fix the wrong watch semantics in Kubernetes watchers, which causes heavy traffic to API server in some Kubernetes clusters, we should use Get State and Start at Most Recent semantic instead of Start at Exact because we don\u0026rsquo;t need the changing history events, see https://kubernetes.io/docs/reference/using-api/api-concepts/#semantics-for-watch. Unify query services and DAOs codes time range condition to Duration. [Breaking Change]: Remove prometheus-fetcher plugin, please use OpenTelemetry to scrape Prometheus metrics and set up SkyWalking OpenTelemetry receiver instead. BugFix: histogram metrics sent to MAL should be treated as OpenTelemetry style, not Prometheus style: (-infinity, explicit_bounds[i]] for i == 0 (explicit_bounds[i-1], explicit_bounds[i]] for 0 \u0026lt; i \u0026lt; size(explicit_bounds) (explicit_bounds[i-1], +infinity) for i == size(explicit_bounds)  Support Golang runtime metrics analysis. Add APISIX metrics monitoring Support skywalking-client-js report empty service version and page path , set default version as latest and default page path as /(root). Fix the error fetching data (/browser_app_page_pv0) : Can't split endpoint id into 2 parts. [Breaking Change] Limit the max length of trace/log/alarm tag\u0026rsquo;s key=value, set the max length of column tags in tableslog_tag/segment_tag/alarm_record_tag and column query in zipkin_query and column tag_value in tag_autocomplete to 256. SQL-Database requires altering these columns' length or removing these tables before OAP starts, if bump up from previous releases. Optimize the creation conditions of profiling task. Lazy load the Kubernetes metadata and switch from event-driven to polling. Previously we set up watchers to watch the Kubernetes metadata changes, this is perfect when there are deployments changes and SkyWalking can react to the changes in real time. However when the cluster has many events (such as in large cluster or some special Kubernetes engine like OpenShift), the requests sent from SkyWalking becomes unpredictable, i.e. SkyWalking might send massive requests to Kubernetes API server, causing heavy load to the API server. This PR switches from the watcher mechanism to polling mechanism, SkyWalking polls the metadata in a specified interval, so that the requests sent to API server is predictable (~10 requests every interval, 3 minutes), and the requests count is constant regardless of the cluster\u0026rsquo;s changes. However with this change SkyWalking can\u0026rsquo;t react to the cluster changes in time, but the delay is acceptable in our case. Optimize the query time of tasks in ProfileTaskCache. Fix metrics was put into wrong slot of the window in the alerting kernel. Support sumPerMinLabeled in MAL. Bump up jackson databind, snakeyaml, grpc dependencies. Support export Trace and Log through Kafka. Add new config initialization mechanism of module provider. This is a ModuleManager lib kernel level change. [Breaking Change] Support new records query protocol, rename the column named service_id to entity_id for support difference entity. Please re-create top_n_database_statement index/table. Remove improper self-obs metrics in JvmMetricsHandler(for Kafka channel). gRPC stream canceling code is not logged as an error when the client cancels the stream. The client cancels the stream when the pod is terminated. [Breaking Change] Change the way of loading MAL rules(support pattern). Move k8s relative MAL files into /otel-rules/k8s. [Breaking Change] Refactor service mesh protobuf definitions and split TCP-related metrics to individual definition. Add TCP{Service,ServiceInstance,ServiceRelation,ServiceInstanceRelation} sources and split TCP-related entities out from original Service,ServiceInstance,ServiceRelation,ServiceInstanceRelation. [Breaking Change] TCP-related source names are changed, fields of TCP-related sources are changed, please refer to the latest oal/tcp.oal file. Do not log error logs when failed to create ElasticSearch index because the index is created already. Add virtual MQ analysis for native traces. Support Python runtime metrics analysis. Support sampledTrace in LAL. Support multiple rules with different names under the same layer of LAL script. (Optimization) Reduce the buffer size(queue) of MAL(only) metric streams. Set L1 queue size as 1/20, L2 queue size as 1/2. Support monitoring MySQL/PostgreSQL in the cluster mode. [Breaking Change] Migrate to BanyanDB v0.2.0.  Adopt new OR logical operator for,  MeasureIDs query BanyanDBProfileThreadSnapshotQueryDAO query Multiple Event conditions query Metrics query   Simplify Group check and creation Partially apply UITemplate changes Support index_only Return CompletableFuture\u0026lt;Void\u0026gt; directly from BanyanDB client Optimize data binary parse methods in *LogQueryDAO Support different indexType Support configuration for TTL and (block|segment) intervals   Elasticsearch storage: Provide system environment variable(SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS) and support specify the settings (number_of_shards/number_of_replicas) for each index individually. Elasticsearch storage: Support update index settings (number_of_shards/number_of_replicas) for the index template after rebooting. Optimize MQ Topology analysis. Use entry span\u0026rsquo;s peer from the consumer side as source service when no producer instrumentation(no cross-process reference). Refactor JDBC storage implementations to reuse logics. Fix ClassCastException in LoggingConfigWatcher. Support span attached event concept in Zipkin and SkyWalking trace query. Support span attached events on Zipkin lens UI. Force UTF-8 encoding in JsonLogHandler of kafka-fetcher-plugin. Fix max length to 512 of entity, instance and endpoint IDs in trace, log, profiling, topN tables(JDBC storages). The value was 200 by default. Add component IDs(135, 136, 137) for EventMesh server and client-side plugins. Bump up Kafka client to 2.8.1 to fix CVE-2021-38153. Remove lengthEnvVariable for Column as it never works as expected. Add LongText to support longer logs persistent as a text type in ElasticSearch, instead of a keyword, to avoid length limitation. Fix wrong system variable name SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI. It was opaenapi. Fix not-time-series model blocking OAP boots in no-init mode. Fix ShardingTopologyQueryDAO.loadServiceRelationsDetectedAtServerSide invoke backend miss parameter serviceIds. Changed system variable SW_SUPERDATASET_STORAGE_DAY_STEP to SW_STORAGE_ES_SUPER_DATASET_DAY_STEP to be consistent with other ES storage related variables. Fix ESEventQueryDAO missing metric_table boolQuery criteria. Add default entity name(_blank) if absent to avoid NPE in the decoding. This caused Can't split xxx id into 2 parts. Support dynamic config the sampling strategy in network profiling. Zipkin module support BanyanDB storage. Zipkin traces query API, sort the result set by start time by default. Enhance the cache mechanism in the metric persistent process.  This cache only worked when the metric is accessible(readable) from the database. Once the insert execution is delayed due to the scale, the cache loses efficacy. It only works for the last time update per minute, considering our 25s period. Fix ID conflicts for all JDBC storage implementations. Due to the insert delay, the JDBC storage implementation would still generate another new insert statement.   [Breaking Change] Remove core/default/enableDatabaseSession config. [Breaking Change] Add @BanyanDB.TimestampColumn to identify which column in Record is providing the timestamp(milliseconds) for BanyanDB, since BanyanDB stream requires a timestamp in milliseconds. For SQL-Database: add new column timestamp for tables profile_task_log/top_n_database_statement, requires altering this column or removing these tables before OAP starts, if bump up from previous releases. Fix Elasticsearch storage: In No-Sharding Mode, add specific analyzer to the template before index creation to avoid update index error. Internal API: remove undocumented ElasticSearch API usage and use documented one. Fix BanyanDB.ShardingKey annotation missed in the generated OAL metrics classes. Fix Elasticsearch storage: Query sortMetrics missing transform real index column name. Rename BanyanDB.ShardingKey to BanyanDB.SeriesID. Self-Observability: Add counters for metrics reading from DB or cached. Dashboard:Metrics Persistent Cache Count. Self-Observability: Fix GC Time calculation. Fix Elasticsearch storage: In No-Sharding Mode, column\u0026rsquo;s property indexOnly not applied and cannot be updated. Update the trace_id field as storage only(cannot be queried) in top_n_database_statement, top_n_cache_read_command, top_n_cache_read_command index.  UI  Fix: tab active incorrectly, when click tab space Add impala icon for impala JDBC Java agent plugin. (Webapp)Bump up snakeyaml to 1.31 for fixing CVE-2022-25857 [Breaking Change]: migrate from Spring Web to Armeria, now you should use the environment variable name SW_OAP_ADDRESS to change the OAP backend service addresses, like SW_OAP_ADDRESS=localhost:12800,localhost:12801, and use environment variable SW_SERVER_PORT to change the port. Other Spring-related configurations don\u0026rsquo;t take effect anymore. Polish the endpoint list graph. Fix styles for an adaptive height. Fix setting up a new time range after clicking the refresh button. Enhance the process topology graph to support dragging nodes. UI-template: Fix metrics calculation in general-service/mesh-service/faas-function top-list dashboard. Update MySQL dashboard to visualize collected slow SQLs. Add virtual cache dashboard. Remove responseCode fields of all OAL sources, as well as examples to avoid user\u0026rsquo;s confusion. Remove All from the endpoints selector. Enhance menu configurations to make it easier to change. Update PostgreSQL dashboard to visualize collected slow SQLs. Add Golang runtime metrics and cpu/memory used rate panels in General-Instance dashboard. Add gateway apisix menu. Query logs with the specific service ID. Bump d3-color from 3.0.1 to 3.1.0. Add Golang runtime metrics and cpu/memory used rate panels in FaaS-Instance dashboard. Revert logs on trace widget. Add a sub-menu for virtual mq. Add readRecords to metric types. Verify dashboard names for new dashboards. Associate metrics with the trace widget on dashboards. Fix configuration panel styles. Remove a un-use icon. Support labeled value on the service/instance/endpoint list widgets. Add menu for virtual MQ. Set selector props and update configuration panel styles. Add Python runtime metrics and cpu/memory utilization panels to General-Instance and Fass-Instance dashboards. Enhance the legend of metrics graph widget with the summary table. Add apache eventMesh logo file. Fix conditions for trace profiling. Fix tag keys list and duration condition. Fix typo. Fix condition logic for trace tree data. Enhance tags component to search tags with the input value. Fix topology loading style. Fix update metric processor for the readRecords and remove readSampledRecords from metrics selector. Add trace association for FAAS dashboards. Visualize attached events on the trace widget. Add HTTP/1.x metrics and HTTP req/resp body collecting tabs on the network profiling widget. Implement creating tasks ui for network profiling widget. Fix entity types for ProcessRelation. Add trace association for general service dashboards.  Documentation  Add metadata-uid setup doc about Kubernetes coordinator in the cluster management. Add a doc for adding menus to booster UI. Move general good read blogs from Agent Introduction to Academy. Add re-post for blog Scaling with Apache SkyWalking in the academy list. Add re-post for blog Diagnose Service Mesh Network Performance with eBPF in the academy list. Add Security Notice doc. Add new docs for Report Span Attached Events data collecting protocol. Add new docs for Record query protocol Update Server Agents and Compatibility for PHP agent. Add docs for profiling. Update the network profiling documentation.  All issues and pull requests are here\n","excerpt":"9.3.0 Project  Bump up the embedded swctl version in OAP Docker image.  OAP Server  Add component …","ref":"/docs/main/v9.3.0/en/changes/changes/","title":"9.3.0"},{"body":"9.3.0 Project  Bump up the embedded swctl version in OAP Docker image.  OAP Server  Add component ID(133) for impala JDBC Java agent plugin and component ID(134) for impala server. Use prepareStatement in H2SQLExecutor#getByIDs.(No function change). Bump up snakeyaml to 1.32 for fixing CVE. Fix DurationUtils.convertToTimeBucket missed verify date format. Enhance LAL to support converting LogData to DatabaseSlowStatement. [Breaking Change] Change the LAL script format(Add layer property). Adapt ElasticSearch 8.1+, migrate from removed APIs to recommended APIs. Support monitoring MySQL slow SQLs. Support analyzing cache related spans to provide metrics and slow commands for cache services from client side Optimize virtual database, fix dynamic config watcher NPE when default value is null Remove physical index existing check and keep template existing check only to avoid meaningless retry wait in no-init mode. Make sure instance list ordered in TTL processor to avoid TTL timer never runs. Support monitoring PostgreSQL slow SQLs. [Breaking Change] Support sharding MySQL database instances and tables by Shardingsphere-Proxy. SQL-Database requires removing tables log_tag/segment_tag/zipkin_query before OAP starts, if bump up from previous releases. Fix meter functions avgHistogram, avgHistogramPercentile, avgLabeled, sumHistogram having data conflict when downsampling. Do sorting readLabeledMetricsValues result forcedly in case the storage(database) doesn\u0026rsquo;t return data consistent with the parameter list. Fix the wrong watch semantics in Kubernetes watchers, which causes heavy traffic to API server in some Kubernetes clusters, we should use Get State and Start at Most Recent semantic instead of Start at Exact because we don\u0026rsquo;t need the changing history events, see https://kubernetes.io/docs/reference/using-api/api-concepts/#semantics-for-watch. Unify query services and DAOs codes time range condition to Duration. [Breaking Change]: Remove prometheus-fetcher plugin, please use OpenTelemetry to scrape Prometheus metrics and set up SkyWalking OpenTelemetry receiver instead. BugFix: histogram metrics sent to MAL should be treated as OpenTelemetry style, not Prometheus style: (-infinity, explicit_bounds[i]] for i == 0 (explicit_bounds[i-1], explicit_bounds[i]] for 0 \u0026lt; i \u0026lt; size(explicit_bounds) (explicit_bounds[i-1], +infinity) for i == size(explicit_bounds)  Support Golang runtime metrics analysis. Add APISIX metrics monitoring Support skywalking-client-js report empty service version and page path , set default version as latest and default page path as /(root). Fix the error fetching data (/browser_app_page_pv0) : Can't split endpoint id into 2 parts. [Breaking Change] Limit the max length of trace/log/alarm tag\u0026rsquo;s key=value, set the max length of column tags in tableslog_tag/segment_tag/alarm_record_tag and column query in zipkin_query and column tag_value in tag_autocomplete to 256. SQL-Database requires altering these columns' length or removing these tables before OAP starts, if bump up from previous releases. Optimize the creation conditions of profiling task. Lazy load the Kubernetes metadata and switch from event-driven to polling. Previously we set up watchers to watch the Kubernetes metadata changes, this is perfect when there are deployments changes and SkyWalking can react to the changes in real time. However when the cluster has many events (such as in large cluster or some special Kubernetes engine like OpenShift), the requests sent from SkyWalking becomes unpredictable, i.e. SkyWalking might send massive requests to Kubernetes API server, causing heavy load to the API server. This PR switches from the watcher mechanism to polling mechanism, SkyWalking polls the metadata in a specified interval, so that the requests sent to API server is predictable (~10 requests every interval, 3 minutes), and the requests count is constant regardless of the cluster\u0026rsquo;s changes. However with this change SkyWalking can\u0026rsquo;t react to the cluster changes in time, but the delay is acceptable in our case. Optimize the query time of tasks in ProfileTaskCache. Fix metrics was put into wrong slot of the window in the alerting kernel. Support sumPerMinLabeled in MAL. Bump up jackson databind, snakeyaml, grpc dependencies. Support export Trace and Log through Kafka. Add new config initialization mechanism of module provider. This is a ModuleManager lib kernel level change. [Breaking Change] Support new records query protocol, rename the column named service_id to entity_id for support difference entity. Please re-create top_n_database_statement index/table. Remove improper self-obs metrics in JvmMetricsHandler(for Kafka channel). gRPC stream canceling code is not logged as an error when the client cancels the stream. The client cancels the stream when the pod is terminated. [Breaking Change] Change the way of loading MAL rules(support pattern). Move k8s relative MAL files into /otel-rules/k8s. [Breaking Change] Refactor service mesh protobuf definitions and split TCP-related metrics to individual definition. Add TCP{Service,ServiceInstance,ServiceRelation,ServiceInstanceRelation} sources and split TCP-related entities out from original Service,ServiceInstance,ServiceRelation,ServiceInstanceRelation. [Breaking Change] TCP-related source names are changed, fields of TCP-related sources are changed, please refer to the latest oal/tcp.oal file. Do not log error logs when failed to create ElasticSearch index because the index is created already. Add virtual MQ analysis for native traces. Support Python runtime metrics analysis. Support sampledTrace in LAL. Support multiple rules with different names under the same layer of LAL script. (Optimization) Reduce the buffer size(queue) of MAL(only) metric streams. Set L1 queue size as 1/20, L2 queue size as 1/2. Support monitoring MySQL/PostgreSQL in the cluster mode. [Breaking Change] Migrate to BanyanDB v0.2.0.  Adopt new OR logical operator for,  MeasureIDs query BanyanDBProfileThreadSnapshotQueryDAO query Multiple Event conditions query Metrics query   Simplify Group check and creation Partially apply UITemplate changes Support index_only Return CompletableFuture\u0026lt;Void\u0026gt; directly from BanyanDB client Optimize data binary parse methods in *LogQueryDAO Support different indexType Support configuration for TTL and (block|segment) intervals   Elasticsearch storage: Provide system environment variable(SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS) and support specify the settings (number_of_shards/number_of_replicas) for each index individually. Elasticsearch storage: Support update index settings (number_of_shards/number_of_replicas) for the index template after rebooting. Optimize MQ Topology analysis. Use entry span\u0026rsquo;s peer from the consumer side as source service when no producer instrumentation(no cross-process reference). Refactor JDBC storage implementations to reuse logics. Fix ClassCastException in LoggingConfigWatcher. Support span attached event concept in Zipkin and SkyWalking trace query. Support span attached events on Zipkin lens UI. Force UTF-8 encoding in JsonLogHandler of kafka-fetcher-plugin. Fix max length to 512 of entity, instance and endpoint IDs in trace, log, profiling, topN tables(JDBC storages). The value was 200 by default. Add component IDs(135, 136, 137) for EventMesh server and client-side plugins. Bump up Kafka client to 2.8.1 to fix CVE-2021-38153. Remove lengthEnvVariable for Column as it never works as expected. Add LongText to support longer logs persistent as a text type in ElasticSearch, instead of a keyword, to avoid length limitation. Fix wrong system variable name SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI. It was opaenapi. Fix not-time-series model blocking OAP boots in no-init mode. Fix ShardingTopologyQueryDAO.loadServiceRelationsDetectedAtServerSide invoke backend miss parameter serviceIds. Changed system variable SW_SUPERDATASET_STORAGE_DAY_STEP to SW_STORAGE_ES_SUPER_DATASET_DAY_STEP to be consistent with other ES storage related variables. Fix ESEventQueryDAO missing metric_table boolQuery criteria. Add default entity name(_blank) if absent to avoid NPE in the decoding. This caused Can't split xxx id into 2 parts. Support dynamic config the sampling strategy in network profiling. Zipkin module support BanyanDB storage. Zipkin traces query API, sort the result set by start time by default. Enhance the cache mechanism in the metric persistent process.  This cache only worked when the metric is accessible(readable) from the database. Once the insert execution is delayed due to the scale, the cache loses efficacy. It only works for the last time update per minute, considering our 25s period. Fix ID conflicts for all JDBC storage implementations. Due to the insert delay, the JDBC storage implementation would still generate another new insert statement.   [Breaking Change] Remove core/default/enableDatabaseSession config. [Breaking Change] Add @BanyanDB.TimestampColumn to identify which column in Record is providing the timestamp(milliseconds) for BanyanDB, since BanyanDB stream requires a timestamp in milliseconds. For SQL-Database: add new column timestamp for tables profile_task_log/top_n_database_statement, requires altering this column or removing these tables before OAP starts, if bump up from previous releases. Fix Elasticsearch storage: In No-Sharding Mode, add specific analyzer to the template before index creation to avoid update index error. Internal API: remove undocumented ElasticSearch API usage and use documented one. Fix BanyanDB.ShardingKey annotation missed in the generated OAL metrics classes. Fix Elasticsearch storage: Query sortMetrics missing transform real index column name. Rename BanyanDB.ShardingKey to BanyanDB.SeriesID. Self-Observability: Add counters for metrics reading from DB or cached. Dashboard:Metrics Persistent Cache Count. Self-Observability: Fix GC Time calculation. Fix Elasticsearch storage: In No-Sharding Mode, column\u0026rsquo;s property indexOnly not applied and cannot be updated. Update the trace_id field as storage only(cannot be queried) in top_n_database_statement, top_n_cache_read_command, top_n_cache_read_command index.  UI  Fix: tab active incorrectly, when click tab space Add impala icon for impala JDBC Java agent plugin. (Webapp)Bump up snakeyaml to 1.31 for fixing CVE-2022-25857 [Breaking Change]: migrate from Spring Web to Armeria, now you should use the environment variable name SW_OAP_ADDRESS to change the OAP backend service addresses, like SW_OAP_ADDRESS=localhost:12800,localhost:12801, and use environment variable SW_SERVER_PORT to change the port. Other Spring-related configurations don\u0026rsquo;t take effect anymore. Polish the endpoint list graph. Fix styles for an adaptive height. Fix setting up a new time range after clicking the refresh button. Enhance the process topology graph to support dragging nodes. UI-template: Fix metrics calculation in general-service/mesh-service/faas-function top-list dashboard. Update MySQL dashboard to visualize collected slow SQLs. Add virtual cache dashboard. Remove responseCode fields of all OAL sources, as well as examples to avoid user\u0026rsquo;s confusion. Remove All from the endpoints selector. Enhance menu configurations to make it easier to change. Update PostgreSQL dashboard to visualize collected slow SQLs. Add Golang runtime metrics and cpu/memory used rate panels in General-Instance dashboard. Add gateway apisix menu. Query logs with the specific service ID. Bump d3-color from 3.0.1 to 3.1.0. Add Golang runtime metrics and cpu/memory used rate panels in FaaS-Instance dashboard. Revert logs on trace widget. Add a sub-menu for virtual mq. Add readRecords to metric types. Verify dashboard names for new dashboards. Associate metrics with the trace widget on dashboards. Fix configuration panel styles. Remove a un-use icon. Support labeled value on the service/instance/endpoint list widgets. Add menu for virtual MQ. Set selector props and update configuration panel styles. Add Python runtime metrics and cpu/memory utilization panels to General-Instance and Fass-Instance dashboards. Enhance the legend of metrics graph widget with the summary table. Add apache eventMesh logo file. Fix conditions for trace profiling. Fix tag keys list and duration condition. Fix typo. Fix condition logic for trace tree data. Enhance tags component to search tags with the input value. Fix topology loading style. Fix update metric processor for the readRecords and remove readSampledRecords from metrics selector. Add trace association for FAAS dashboards. Visualize attached events on the trace widget. Add HTTP/1.x metrics and HTTP req/resp body collecting tabs on the network profiling widget. Implement creating tasks ui for network profiling widget. Fix entity types for ProcessRelation. Add trace association for general service dashboards.  Documentation  Add metadata-uid setup doc about Kubernetes coordinator in the cluster management. Add a doc for adding menus to booster UI. Move general good read blogs from Agent Introduction to Academy. Add re-post for blog Scaling with Apache SkyWalking in the academy list. Add re-post for blog Diagnose Service Mesh Network Performance with eBPF in the academy list. Add Security Notice doc. Add new docs for Report Span Attached Events data collecting protocol. Add new docs for Record query protocol Update Server Agents and Compatibility for PHP agent. Add docs for profiling. Update the network profiling documentation.  All issues and pull requests are here\n","excerpt":"9.3.0 Project  Bump up the embedded swctl version in OAP Docker image.  OAP Server  Add component …","ref":"/docs/main/v9.4.0/en/changes/changes-9.3.0/","title":"9.3.0"},{"body":"9.3.0 Project  Bump up the embedded swctl version in OAP Docker image.  OAP Server  Add component ID(133) for impala JDBC Java agent plugin and component ID(134) for impala server. Use prepareStatement in H2SQLExecutor#getByIDs.(No function change). Bump up snakeyaml to 1.32 for fixing CVE. Fix DurationUtils.convertToTimeBucket missed verify date format. Enhance LAL to support converting LogData to DatabaseSlowStatement. [Breaking Change] Change the LAL script format(Add layer property). Adapt ElasticSearch 8.1+, migrate from removed APIs to recommended APIs. Support monitoring MySQL slow SQLs. Support analyzing cache related spans to provide metrics and slow commands for cache services from client side Optimize virtual database, fix dynamic config watcher NPE when default value is null Remove physical index existing check and keep template existing check only to avoid meaningless retry wait in no-init mode. Make sure instance list ordered in TTL processor to avoid TTL timer never runs. Support monitoring PostgreSQL slow SQLs. [Breaking Change] Support sharding MySQL database instances and tables by Shardingsphere-Proxy. SQL-Database requires removing tables log_tag/segment_tag/zipkin_query before OAP starts, if bump up from previous releases. Fix meter functions avgHistogram, avgHistogramPercentile, avgLabeled, sumHistogram having data conflict when downsampling. Do sorting readLabeledMetricsValues result forcedly in case the storage(database) doesn\u0026rsquo;t return data consistent with the parameter list. Fix the wrong watch semantics in Kubernetes watchers, which causes heavy traffic to API server in some Kubernetes clusters, we should use Get State and Start at Most Recent semantic instead of Start at Exact because we don\u0026rsquo;t need the changing history events, see https://kubernetes.io/docs/reference/using-api/api-concepts/#semantics-for-watch. Unify query services and DAOs codes time range condition to Duration. [Breaking Change]: Remove prometheus-fetcher plugin, please use OpenTelemetry to scrape Prometheus metrics and set up SkyWalking OpenTelemetry receiver instead. BugFix: histogram metrics sent to MAL should be treated as OpenTelemetry style, not Prometheus style: (-infinity, explicit_bounds[i]] for i == 0 (explicit_bounds[i-1], explicit_bounds[i]] for 0 \u0026lt; i \u0026lt; size(explicit_bounds) (explicit_bounds[i-1], +infinity) for i == size(explicit_bounds)  Support Golang runtime metrics analysis. Add APISIX metrics monitoring Support skywalking-client-js report empty service version and page path , set default version as latest and default page path as /(root). Fix the error fetching data (/browser_app_page_pv0) : Can't split endpoint id into 2 parts. [Breaking Change] Limit the max length of trace/log/alarm tag\u0026rsquo;s key=value, set the max length of column tags in tableslog_tag/segment_tag/alarm_record_tag and column query in zipkin_query and column tag_value in tag_autocomplete to 256. SQL-Database requires altering these columns' length or removing these tables before OAP starts, if bump up from previous releases. Optimize the creation conditions of profiling task. Lazy load the Kubernetes metadata and switch from event-driven to polling. Previously we set up watchers to watch the Kubernetes metadata changes, this is perfect when there are deployments changes and SkyWalking can react to the changes in real time. However when the cluster has many events (such as in large cluster or some special Kubernetes engine like OpenShift), the requests sent from SkyWalking becomes unpredictable, i.e. SkyWalking might send massive requests to Kubernetes API server, causing heavy load to the API server. This PR switches from the watcher mechanism to polling mechanism, SkyWalking polls the metadata in a specified interval, so that the requests sent to API server is predictable (~10 requests every interval, 3 minutes), and the requests count is constant regardless of the cluster\u0026rsquo;s changes. However with this change SkyWalking can\u0026rsquo;t react to the cluster changes in time, but the delay is acceptable in our case. Optimize the query time of tasks in ProfileTaskCache. Fix metrics was put into wrong slot of the window in the alerting kernel. Support sumPerMinLabeled in MAL. Bump up jackson databind, snakeyaml, grpc dependencies. Support export Trace and Log through Kafka. Add new config initialization mechanism of module provider. This is a ModuleManager lib kernel level change. [Breaking Change] Support new records query protocol, rename the column named service_id to entity_id for support difference entity. Please re-create top_n_database_statement index/table. Remove improper self-obs metrics in JvmMetricsHandler(for Kafka channel). gRPC stream canceling code is not logged as an error when the client cancels the stream. The client cancels the stream when the pod is terminated. [Breaking Change] Change the way of loading MAL rules(support pattern). Move k8s relative MAL files into /otel-rules/k8s. [Breaking Change] Refactor service mesh protobuf definitions and split TCP-related metrics to individual definition. Add TCP{Service,ServiceInstance,ServiceRelation,ServiceInstanceRelation} sources and split TCP-related entities out from original Service,ServiceInstance,ServiceRelation,ServiceInstanceRelation. [Breaking Change] TCP-related source names are changed, fields of TCP-related sources are changed, please refer to the latest oal/tcp.oal file. Do not log error logs when failed to create ElasticSearch index because the index is created already. Add virtual MQ analysis for native traces. Support Python runtime metrics analysis. Support sampledTrace in LAL. Support multiple rules with different names under the same layer of LAL script. (Optimization) Reduce the buffer size(queue) of MAL(only) metric streams. Set L1 queue size as 1/20, L2 queue size as 1/2. Support monitoring MySQL/PostgreSQL in the cluster mode. [Breaking Change] Migrate to BanyanDB v0.2.0.  Adopt new OR logical operator for,  MeasureIDs query BanyanDBProfileThreadSnapshotQueryDAO query Multiple Event conditions query Metrics query   Simplify Group check and creation Partially apply UITemplate changes Support index_only Return CompletableFuture\u0026lt;Void\u0026gt; directly from BanyanDB client Optimize data binary parse methods in *LogQueryDAO Support different indexType Support configuration for TTL and (block|segment) intervals   Elasticsearch storage: Provide system environment variable(SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS) and support specify the settings (number_of_shards/number_of_replicas) for each index individually. Elasticsearch storage: Support update index settings (number_of_shards/number_of_replicas) for the index template after rebooting. Optimize MQ Topology analysis. Use entry span\u0026rsquo;s peer from the consumer side as source service when no producer instrumentation(no cross-process reference). Refactor JDBC storage implementations to reuse logics. Fix ClassCastException in LoggingConfigWatcher. Support span attached event concept in Zipkin and SkyWalking trace query. Support span attached events on Zipkin lens UI. Force UTF-8 encoding in JsonLogHandler of kafka-fetcher-plugin. Fix max length to 512 of entity, instance and endpoint IDs in trace, log, profiling, topN tables(JDBC storages). The value was 200 by default. Add component IDs(135, 136, 137) for EventMesh server and client-side plugins. Bump up Kafka client to 2.8.1 to fix CVE-2021-38153. Remove lengthEnvVariable for Column as it never works as expected. Add LongText to support longer logs persistent as a text type in ElasticSearch, instead of a keyword, to avoid length limitation. Fix wrong system variable name SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI. It was opaenapi. Fix not-time-series model blocking OAP boots in no-init mode. Fix ShardingTopologyQueryDAO.loadServiceRelationsDetectedAtServerSide invoke backend miss parameter serviceIds. Changed system variable SW_SUPERDATASET_STORAGE_DAY_STEP to SW_STORAGE_ES_SUPER_DATASET_DAY_STEP to be consistent with other ES storage related variables. Fix ESEventQueryDAO missing metric_table boolQuery criteria. Add default entity name(_blank) if absent to avoid NPE in the decoding. This caused Can't split xxx id into 2 parts. Support dynamic config the sampling strategy in network profiling. Zipkin module support BanyanDB storage. Zipkin traces query API, sort the result set by start time by default. Enhance the cache mechanism in the metric persistent process.  This cache only worked when the metric is accessible(readable) from the database. Once the insert execution is delayed due to the scale, the cache loses efficacy. It only works for the last time update per minute, considering our 25s period. Fix ID conflicts for all JDBC storage implementations. Due to the insert delay, the JDBC storage implementation would still generate another new insert statement.   [Breaking Change] Remove core/default/enableDatabaseSession config. [Breaking Change] Add @BanyanDB.TimestampColumn to identify which column in Record is providing the timestamp(milliseconds) for BanyanDB, since BanyanDB stream requires a timestamp in milliseconds. For SQL-Database: add new column timestamp for tables profile_task_log/top_n_database_statement, requires altering this column or removing these tables before OAP starts, if bump up from previous releases. Fix Elasticsearch storage: In No-Sharding Mode, add specific analyzer to the template before index creation to avoid update index error. Internal API: remove undocumented ElasticSearch API usage and use documented one. Fix BanyanDB.ShardingKey annotation missed in the generated OAL metrics classes. Fix Elasticsearch storage: Query sortMetrics missing transform real index column name. Rename BanyanDB.ShardingKey to BanyanDB.SeriesID. Self-Observability: Add counters for metrics reading from DB or cached. Dashboard:Metrics Persistent Cache Count. Self-Observability: Fix GC Time calculation. Fix Elasticsearch storage: In No-Sharding Mode, column\u0026rsquo;s property indexOnly not applied and cannot be updated. Update the trace_id field as storage only(cannot be queried) in top_n_database_statement, top_n_cache_read_command, top_n_cache_read_command index.  UI  Fix: tab active incorrectly, when click tab space Add impala icon for impala JDBC Java agent plugin. (Webapp)Bump up snakeyaml to 1.31 for fixing CVE-2022-25857 [Breaking Change]: migrate from Spring Web to Armeria, now you should use the environment variable name SW_OAP_ADDRESS to change the OAP backend service addresses, like SW_OAP_ADDRESS=localhost:12800,localhost:12801, and use environment variable SW_SERVER_PORT to change the port. Other Spring-related configurations don\u0026rsquo;t take effect anymore. Polish the endpoint list graph. Fix styles for an adaptive height. Fix setting up a new time range after clicking the refresh button. Enhance the process topology graph to support dragging nodes. UI-template: Fix metrics calculation in general-service/mesh-service/faas-function top-list dashboard. Update MySQL dashboard to visualize collected slow SQLs. Add virtual cache dashboard. Remove responseCode fields of all OAL sources, as well as examples to avoid user\u0026rsquo;s confusion. Remove All from the endpoints selector. Enhance menu configurations to make it easier to change. Update PostgreSQL dashboard to visualize collected slow SQLs. Add Golang runtime metrics and cpu/memory used rate panels in General-Instance dashboard. Add gateway apisix menu. Query logs with the specific service ID. Bump d3-color from 3.0.1 to 3.1.0. Add Golang runtime metrics and cpu/memory used rate panels in FaaS-Instance dashboard. Revert logs on trace widget. Add a sub-menu for virtual mq. Add readRecords to metric types. Verify dashboard names for new dashboards. Associate metrics with the trace widget on dashboards. Fix configuration panel styles. Remove a un-use icon. Support labeled value on the service/instance/endpoint list widgets. Add menu for virtual MQ. Set selector props and update configuration panel styles. Add Python runtime metrics and cpu/memory utilization panels to General-Instance and Fass-Instance dashboards. Enhance the legend of metrics graph widget with the summary table. Add apache eventMesh logo file. Fix conditions for trace profiling. Fix tag keys list and duration condition. Fix typo. Fix condition logic for trace tree data. Enhance tags component to search tags with the input value. Fix topology loading style. Fix update metric processor for the readRecords and remove readSampledRecords from metrics selector. Add trace association for FAAS dashboards. Visualize attached events on the trace widget. Add HTTP/1.x metrics and HTTP req/resp body collecting tabs on the network profiling widget. Implement creating tasks ui for network profiling widget. Fix entity types for ProcessRelation. Add trace association for general service dashboards.  Documentation  Add metadata-uid setup doc about Kubernetes coordinator in the cluster management. Add a doc for adding menus to booster UI. Move general good read blogs from Agent Introduction to Academy. Add re-post for blog Scaling with Apache SkyWalking in the academy list. Add re-post for blog Diagnose Service Mesh Network Performance with eBPF in the academy list. Add Security Notice doc. Add new docs for Report Span Attached Events data collecting protocol. Add new docs for Record query protocol Update Server Agents and Compatibility for PHP agent. Add docs for profiling. Update the network profiling documentation.  All issues and pull requests are here\n","excerpt":"9.3.0 Project  Bump up the embedded swctl version in OAP Docker image.  OAP Server  Add component …","ref":"/docs/main/v9.5.0/en/changes/changes-9.3.0/","title":"9.3.0"},{"body":"9.3.0 Project  Bump up the embedded swctl version in OAP Docker image.  OAP Server  Add component ID(133) for impala JDBC Java agent plugin and component ID(134) for impala server. Use prepareStatement in H2SQLExecutor#getByIDs.(No function change). Bump up snakeyaml to 1.32 for fixing CVE. Fix DurationUtils.convertToTimeBucket missed verify date format. Enhance LAL to support converting LogData to DatabaseSlowStatement. [Breaking Change] Change the LAL script format(Add layer property). Adapt ElasticSearch 8.1+, migrate from removed APIs to recommended APIs. Support monitoring MySQL slow SQLs. Support analyzing cache related spans to provide metrics and slow commands for cache services from client side Optimize virtual database, fix dynamic config watcher NPE when default value is null Remove physical index existing check and keep template existing check only to avoid meaningless retry wait in no-init mode. Make sure instance list ordered in TTL processor to avoid TTL timer never runs. Support monitoring PostgreSQL slow SQLs. [Breaking Change] Support sharding MySQL database instances and tables by Shardingsphere-Proxy. SQL-Database requires removing tables log_tag/segment_tag/zipkin_query before OAP starts, if bump up from previous releases. Fix meter functions avgHistogram, avgHistogramPercentile, avgLabeled, sumHistogram having data conflict when downsampling. Do sorting readLabeledMetricsValues result forcedly in case the storage(database) doesn\u0026rsquo;t return data consistent with the parameter list. Fix the wrong watch semantics in Kubernetes watchers, which causes heavy traffic to API server in some Kubernetes clusters, we should use Get State and Start at Most Recent semantic instead of Start at Exact because we don\u0026rsquo;t need the changing history events, see https://kubernetes.io/docs/reference/using-api/api-concepts/#semantics-for-watch. Unify query services and DAOs codes time range condition to Duration. [Breaking Change]: Remove prometheus-fetcher plugin, please use OpenTelemetry to scrape Prometheus metrics and set up SkyWalking OpenTelemetry receiver instead. BugFix: histogram metrics sent to MAL should be treated as OpenTelemetry style, not Prometheus style: (-infinity, explicit_bounds[i]] for i == 0 (explicit_bounds[i-1], explicit_bounds[i]] for 0 \u0026lt; i \u0026lt; size(explicit_bounds) (explicit_bounds[i-1], +infinity) for i == size(explicit_bounds)  Support Golang runtime metrics analysis. Add APISIX metrics monitoring Support skywalking-client-js report empty service version and page path , set default version as latest and default page path as /(root). Fix the error fetching data (/browser_app_page_pv0) : Can't split endpoint id into 2 parts. [Breaking Change] Limit the max length of trace/log/alarm tag\u0026rsquo;s key=value, set the max length of column tags in tableslog_tag/segment_tag/alarm_record_tag and column query in zipkin_query and column tag_value in tag_autocomplete to 256. SQL-Database requires altering these columns' length or removing these tables before OAP starts, if bump up from previous releases. Optimize the creation conditions of profiling task. Lazy load the Kubernetes metadata and switch from event-driven to polling. Previously we set up watchers to watch the Kubernetes metadata changes, this is perfect when there are deployments changes and SkyWalking can react to the changes in real time. However when the cluster has many events (such as in large cluster or some special Kubernetes engine like OpenShift), the requests sent from SkyWalking becomes unpredictable, i.e. SkyWalking might send massive requests to Kubernetes API server, causing heavy load to the API server. This PR switches from the watcher mechanism to polling mechanism, SkyWalking polls the metadata in a specified interval, so that the requests sent to API server is predictable (~10 requests every interval, 3 minutes), and the requests count is constant regardless of the cluster\u0026rsquo;s changes. However with this change SkyWalking can\u0026rsquo;t react to the cluster changes in time, but the delay is acceptable in our case. Optimize the query time of tasks in ProfileTaskCache. Fix metrics was put into wrong slot of the window in the alerting kernel. Support sumPerMinLabeled in MAL. Bump up jackson databind, snakeyaml, grpc dependencies. Support export Trace and Log through Kafka. Add new config initialization mechanism of module provider. This is a ModuleManager lib kernel level change. [Breaking Change] Support new records query protocol, rename the column named service_id to entity_id for support difference entity. Please re-create top_n_database_statement index/table. Remove improper self-obs metrics in JvmMetricsHandler(for Kafka channel). gRPC stream canceling code is not logged as an error when the client cancels the stream. The client cancels the stream when the pod is terminated. [Breaking Change] Change the way of loading MAL rules(support pattern). Move k8s relative MAL files into /otel-rules/k8s. [Breaking Change] Refactor service mesh protobuf definitions and split TCP-related metrics to individual definition. Add TCP{Service,ServiceInstance,ServiceRelation,ServiceInstanceRelation} sources and split TCP-related entities out from original Service,ServiceInstance,ServiceRelation,ServiceInstanceRelation. [Breaking Change] TCP-related source names are changed, fields of TCP-related sources are changed, please refer to the latest oal/tcp.oal file. Do not log error logs when failed to create ElasticSearch index because the index is created already. Add virtual MQ analysis for native traces. Support Python runtime metrics analysis. Support sampledTrace in LAL. Support multiple rules with different names under the same layer of LAL script. (Optimization) Reduce the buffer size(queue) of MAL(only) metric streams. Set L1 queue size as 1/20, L2 queue size as 1/2. Support monitoring MySQL/PostgreSQL in the cluster mode. [Breaking Change] Migrate to BanyanDB v0.2.0.  Adopt new OR logical operator for,  MeasureIDs query BanyanDBProfileThreadSnapshotQueryDAO query Multiple Event conditions query Metrics query   Simplify Group check and creation Partially apply UITemplate changes Support index_only Return CompletableFuture\u0026lt;Void\u0026gt; directly from BanyanDB client Optimize data binary parse methods in *LogQueryDAO Support different indexType Support configuration for TTL and (block|segment) intervals   Elasticsearch storage: Provide system environment variable(SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS) and support specify the settings (number_of_shards/number_of_replicas) for each index individually. Elasticsearch storage: Support update index settings (number_of_shards/number_of_replicas) for the index template after rebooting. Optimize MQ Topology analysis. Use entry span\u0026rsquo;s peer from the consumer side as source service when no producer instrumentation(no cross-process reference). Refactor JDBC storage implementations to reuse logics. Fix ClassCastException in LoggingConfigWatcher. Support span attached event concept in Zipkin and SkyWalking trace query. Support span attached events on Zipkin lens UI. Force UTF-8 encoding in JsonLogHandler of kafka-fetcher-plugin. Fix max length to 512 of entity, instance and endpoint IDs in trace, log, profiling, topN tables(JDBC storages). The value was 200 by default. Add component IDs(135, 136, 137) for EventMesh server and client-side plugins. Bump up Kafka client to 2.8.1 to fix CVE-2021-38153. Remove lengthEnvVariable for Column as it never works as expected. Add LongText to support longer logs persistent as a text type in ElasticSearch, instead of a keyword, to avoid length limitation. Fix wrong system variable name SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI. It was opaenapi. Fix not-time-series model blocking OAP boots in no-init mode. Fix ShardingTopologyQueryDAO.loadServiceRelationsDetectedAtServerSide invoke backend miss parameter serviceIds. Changed system variable SW_SUPERDATASET_STORAGE_DAY_STEP to SW_STORAGE_ES_SUPER_DATASET_DAY_STEP to be consistent with other ES storage related variables. Fix ESEventQueryDAO missing metric_table boolQuery criteria. Add default entity name(_blank) if absent to avoid NPE in the decoding. This caused Can't split xxx id into 2 parts. Support dynamic config the sampling strategy in network profiling. Zipkin module support BanyanDB storage. Zipkin traces query API, sort the result set by start time by default. Enhance the cache mechanism in the metric persistent process.  This cache only worked when the metric is accessible(readable) from the database. Once the insert execution is delayed due to the scale, the cache loses efficacy. It only works for the last time update per minute, considering our 25s period. Fix ID conflicts for all JDBC storage implementations. Due to the insert delay, the JDBC storage implementation would still generate another new insert statement.   [Breaking Change] Remove core/default/enableDatabaseSession config. [Breaking Change] Add @BanyanDB.TimestampColumn to identify which column in Record is providing the timestamp(milliseconds) for BanyanDB, since BanyanDB stream requires a timestamp in milliseconds. For SQL-Database: add new column timestamp for tables profile_task_log/top_n_database_statement, requires altering this column or removing these tables before OAP starts, if bump up from previous releases. Fix Elasticsearch storage: In No-Sharding Mode, add specific analyzer to the template before index creation to avoid update index error. Internal API: remove undocumented ElasticSearch API usage and use documented one. Fix BanyanDB.ShardingKey annotation missed in the generated OAL metrics classes. Fix Elasticsearch storage: Query sortMetrics missing transform real index column name. Rename BanyanDB.ShardingKey to BanyanDB.SeriesID. Self-Observability: Add counters for metrics reading from DB or cached. Dashboard:Metrics Persistent Cache Count. Self-Observability: Fix GC Time calculation. Fix Elasticsearch storage: In No-Sharding Mode, column\u0026rsquo;s property indexOnly not applied and cannot be updated. Update the trace_id field as storage only(cannot be queried) in top_n_database_statement, top_n_cache_read_command, top_n_cache_read_command index.  UI  Fix: tab active incorrectly, when click tab space Add impala icon for impala JDBC Java agent plugin. (Webapp)Bump up snakeyaml to 1.31 for fixing CVE-2022-25857 [Breaking Change]: migrate from Spring Web to Armeria, now you should use the environment variable name SW_OAP_ADDRESS to change the OAP backend service addresses, like SW_OAP_ADDRESS=localhost:12800,localhost:12801, and use environment variable SW_SERVER_PORT to change the port. Other Spring-related configurations don\u0026rsquo;t take effect anymore. Polish the endpoint list graph. Fix styles for an adaptive height. Fix setting up a new time range after clicking the refresh button. Enhance the process topology graph to support dragging nodes. UI-template: Fix metrics calculation in general-service/mesh-service/faas-function top-list dashboard. Update MySQL dashboard to visualize collected slow SQLs. Add virtual cache dashboard. Remove responseCode fields of all OAL sources, as well as examples to avoid user\u0026rsquo;s confusion. Remove All from the endpoints selector. Enhance menu configurations to make it easier to change. Update PostgreSQL dashboard to visualize collected slow SQLs. Add Golang runtime metrics and cpu/memory used rate panels in General-Instance dashboard. Add gateway apisix menu. Query logs with the specific service ID. Bump d3-color from 3.0.1 to 3.1.0. Add Golang runtime metrics and cpu/memory used rate panels in FaaS-Instance dashboard. Revert logs on trace widget. Add a sub-menu for virtual mq. Add readRecords to metric types. Verify dashboard names for new dashboards. Associate metrics with the trace widget on dashboards. Fix configuration panel styles. Remove a un-use icon. Support labeled value on the service/instance/endpoint list widgets. Add menu for virtual MQ. Set selector props and update configuration panel styles. Add Python runtime metrics and cpu/memory utilization panels to General-Instance and Fass-Instance dashboards. Enhance the legend of metrics graph widget with the summary table. Add apache eventMesh logo file. Fix conditions for trace profiling. Fix tag keys list and duration condition. Fix typo. Fix condition logic for trace tree data. Enhance tags component to search tags with the input value. Fix topology loading style. Fix update metric processor for the readRecords and remove readSampledRecords from metrics selector. Add trace association for FAAS dashboards. Visualize attached events on the trace widget. Add HTTP/1.x metrics and HTTP req/resp body collecting tabs on the network profiling widget. Implement creating tasks ui for network profiling widget. Fix entity types for ProcessRelation. Add trace association for general service dashboards.  Documentation  Add metadata-uid setup doc about Kubernetes coordinator in the cluster management. Add a doc for adding menus to booster UI. Move general good read blogs from Agent Introduction to Academy. Add re-post for blog Scaling with Apache SkyWalking in the academy list. Add re-post for blog Diagnose Service Mesh Network Performance with eBPF in the academy list. Add Security Notice doc. Add new docs for Report Span Attached Events data collecting protocol. Add new docs for Record query protocol Update Server Agents and Compatibility for PHP agent. Add docs for profiling. Update the network profiling documentation.  All issues and pull requests are here\n","excerpt":"9.3.0 Project  Bump up the embedded swctl version in OAP Docker image.  OAP Server  Add component …","ref":"/docs/main/v9.6.0/en/changes/changes-9.3.0/","title":"9.3.0"},{"body":"9.3.0 Project  Bump up the embedded swctl version in OAP Docker image.  OAP Server  Add component ID(133) for impala JDBC Java agent plugin and component ID(134) for impala server. Use prepareStatement in H2SQLExecutor#getByIDs.(No function change). Bump up snakeyaml to 1.32 for fixing CVE. Fix DurationUtils.convertToTimeBucket missed verify date format. Enhance LAL to support converting LogData to DatabaseSlowStatement. [Breaking Change] Change the LAL script format(Add layer property). Adapt ElasticSearch 8.1+, migrate from removed APIs to recommended APIs. Support monitoring MySQL slow SQLs. Support analyzing cache related spans to provide metrics and slow commands for cache services from client side Optimize virtual database, fix dynamic config watcher NPE when default value is null Remove physical index existing check and keep template existing check only to avoid meaningless retry wait in no-init mode. Make sure instance list ordered in TTL processor to avoid TTL timer never runs. Support monitoring PostgreSQL slow SQLs. [Breaking Change] Support sharding MySQL database instances and tables by Shardingsphere-Proxy. SQL-Database requires removing tables log_tag/segment_tag/zipkin_query before OAP starts, if bump up from previous releases. Fix meter functions avgHistogram, avgHistogramPercentile, avgLabeled, sumHistogram having data conflict when downsampling. Do sorting readLabeledMetricsValues result forcedly in case the storage(database) doesn\u0026rsquo;t return data consistent with the parameter list. Fix the wrong watch semantics in Kubernetes watchers, which causes heavy traffic to API server in some Kubernetes clusters, we should use Get State and Start at Most Recent semantic instead of Start at Exact because we don\u0026rsquo;t need the changing history events, see https://kubernetes.io/docs/reference/using-api/api-concepts/#semantics-for-watch. Unify query services and DAOs codes time range condition to Duration. [Breaking Change]: Remove prometheus-fetcher plugin, please use OpenTelemetry to scrape Prometheus metrics and set up SkyWalking OpenTelemetry receiver instead. BugFix: histogram metrics sent to MAL should be treated as OpenTelemetry style, not Prometheus style: (-infinity, explicit_bounds[i]] for i == 0 (explicit_bounds[i-1], explicit_bounds[i]] for 0 \u0026lt; i \u0026lt; size(explicit_bounds) (explicit_bounds[i-1], +infinity) for i == size(explicit_bounds)  Support Golang runtime metrics analysis. Add APISIX metrics monitoring Support skywalking-client-js report empty service version and page path , set default version as latest and default page path as /(root). Fix the error fetching data (/browser_app_page_pv0) : Can't split endpoint id into 2 parts. [Breaking Change] Limit the max length of trace/log/alarm tag\u0026rsquo;s key=value, set the max length of column tags in tableslog_tag/segment_tag/alarm_record_tag and column query in zipkin_query and column tag_value in tag_autocomplete to 256. SQL-Database requires altering these columns' length or removing these tables before OAP starts, if bump up from previous releases. Optimize the creation conditions of profiling task. Lazy load the Kubernetes metadata and switch from event-driven to polling. Previously we set up watchers to watch the Kubernetes metadata changes, this is perfect when there are deployments changes and SkyWalking can react to the changes in real time. However when the cluster has many events (such as in large cluster or some special Kubernetes engine like OpenShift), the requests sent from SkyWalking becomes unpredictable, i.e. SkyWalking might send massive requests to Kubernetes API server, causing heavy load to the API server. This PR switches from the watcher mechanism to polling mechanism, SkyWalking polls the metadata in a specified interval, so that the requests sent to API server is predictable (~10 requests every interval, 3 minutes), and the requests count is constant regardless of the cluster\u0026rsquo;s changes. However with this change SkyWalking can\u0026rsquo;t react to the cluster changes in time, but the delay is acceptable in our case. Optimize the query time of tasks in ProfileTaskCache. Fix metrics was put into wrong slot of the window in the alerting kernel. Support sumPerMinLabeled in MAL. Bump up jackson databind, snakeyaml, grpc dependencies. Support export Trace and Log through Kafka. Add new config initialization mechanism of module provider. This is a ModuleManager lib kernel level change. [Breaking Change] Support new records query protocol, rename the column named service_id to entity_id for support difference entity. Please re-create top_n_database_statement index/table. Remove improper self-obs metrics in JvmMetricsHandler(for Kafka channel). gRPC stream canceling code is not logged as an error when the client cancels the stream. The client cancels the stream when the pod is terminated. [Breaking Change] Change the way of loading MAL rules(support pattern). Move k8s relative MAL files into /otel-rules/k8s. [Breaking Change] Refactor service mesh protobuf definitions and split TCP-related metrics to individual definition. Add TCP{Service,ServiceInstance,ServiceRelation,ServiceInstanceRelation} sources and split TCP-related entities out from original Service,ServiceInstance,ServiceRelation,ServiceInstanceRelation. [Breaking Change] TCP-related source names are changed, fields of TCP-related sources are changed, please refer to the latest oal/tcp.oal file. Do not log error logs when failed to create ElasticSearch index because the index is created already. Add virtual MQ analysis for native traces. Support Python runtime metrics analysis. Support sampledTrace in LAL. Support multiple rules with different names under the same layer of LAL script. (Optimization) Reduce the buffer size(queue) of MAL(only) metric streams. Set L1 queue size as 1/20, L2 queue size as 1/2. Support monitoring MySQL/PostgreSQL in the cluster mode. [Breaking Change] Migrate to BanyanDB v0.2.0.  Adopt new OR logical operator for,  MeasureIDs query BanyanDBProfileThreadSnapshotQueryDAO query Multiple Event conditions query Metrics query   Simplify Group check and creation Partially apply UITemplate changes Support index_only Return CompletableFuture\u0026lt;Void\u0026gt; directly from BanyanDB client Optimize data binary parse methods in *LogQueryDAO Support different indexType Support configuration for TTL and (block|segment) intervals   Elasticsearch storage: Provide system environment variable(SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS) and support specify the settings (number_of_shards/number_of_replicas) for each index individually. Elasticsearch storage: Support update index settings (number_of_shards/number_of_replicas) for the index template after rebooting. Optimize MQ Topology analysis. Use entry span\u0026rsquo;s peer from the consumer side as source service when no producer instrumentation(no cross-process reference). Refactor JDBC storage implementations to reuse logics. Fix ClassCastException in LoggingConfigWatcher. Support span attached event concept in Zipkin and SkyWalking trace query. Support span attached events on Zipkin lens UI. Force UTF-8 encoding in JsonLogHandler of kafka-fetcher-plugin. Fix max length to 512 of entity, instance and endpoint IDs in trace, log, profiling, topN tables(JDBC storages). The value was 200 by default. Add component IDs(135, 136, 137) for EventMesh server and client-side plugins. Bump up Kafka client to 2.8.1 to fix CVE-2021-38153. Remove lengthEnvVariable for Column as it never works as expected. Add LongText to support longer logs persistent as a text type in ElasticSearch, instead of a keyword, to avoid length limitation. Fix wrong system variable name SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI. It was opaenapi. Fix not-time-series model blocking OAP boots in no-init mode. Fix ShardingTopologyQueryDAO.loadServiceRelationsDetectedAtServerSide invoke backend miss parameter serviceIds. Changed system variable SW_SUPERDATASET_STORAGE_DAY_STEP to SW_STORAGE_ES_SUPER_DATASET_DAY_STEP to be consistent with other ES storage related variables. Fix ESEventQueryDAO missing metric_table boolQuery criteria. Add default entity name(_blank) if absent to avoid NPE in the decoding. This caused Can't split xxx id into 2 parts. Support dynamic config the sampling strategy in network profiling. Zipkin module support BanyanDB storage. Zipkin traces query API, sort the result set by start time by default. Enhance the cache mechanism in the metric persistent process.  This cache only worked when the metric is accessible(readable) from the database. Once the insert execution is delayed due to the scale, the cache loses efficacy. It only works for the last time update per minute, considering our 25s period. Fix ID conflicts for all JDBC storage implementations. Due to the insert delay, the JDBC storage implementation would still generate another new insert statement.   [Breaking Change] Remove core/default/enableDatabaseSession config. [Breaking Change] Add @BanyanDB.TimestampColumn to identify which column in Record is providing the timestamp(milliseconds) for BanyanDB, since BanyanDB stream requires a timestamp in milliseconds. For SQL-Database: add new column timestamp for tables profile_task_log/top_n_database_statement, requires altering this column or removing these tables before OAP starts, if bump up from previous releases. Fix Elasticsearch storage: In No-Sharding Mode, add specific analyzer to the template before index creation to avoid update index error. Internal API: remove undocumented ElasticSearch API usage and use documented one. Fix BanyanDB.ShardingKey annotation missed in the generated OAL metrics classes. Fix Elasticsearch storage: Query sortMetrics missing transform real index column name. Rename BanyanDB.ShardingKey to BanyanDB.SeriesID. Self-Observability: Add counters for metrics reading from DB or cached. Dashboard:Metrics Persistent Cache Count. Self-Observability: Fix GC Time calculation. Fix Elasticsearch storage: In No-Sharding Mode, column\u0026rsquo;s property indexOnly not applied and cannot be updated. Update the trace_id field as storage only(cannot be queried) in top_n_database_statement, top_n_cache_read_command, top_n_cache_read_command index.  UI  Fix: tab active incorrectly, when click tab space Add impala icon for impala JDBC Java agent plugin. (Webapp)Bump up snakeyaml to 1.31 for fixing CVE-2022-25857 [Breaking Change]: migrate from Spring Web to Armeria, now you should use the environment variable name SW_OAP_ADDRESS to change the OAP backend service addresses, like SW_OAP_ADDRESS=localhost:12800,localhost:12801, and use environment variable SW_SERVER_PORT to change the port. Other Spring-related configurations don\u0026rsquo;t take effect anymore. Polish the endpoint list graph. Fix styles for an adaptive height. Fix setting up a new time range after clicking the refresh button. Enhance the process topology graph to support dragging nodes. UI-template: Fix metrics calculation in general-service/mesh-service/faas-function top-list dashboard. Update MySQL dashboard to visualize collected slow SQLs. Add virtual cache dashboard. Remove responseCode fields of all OAL sources, as well as examples to avoid user\u0026rsquo;s confusion. Remove All from the endpoints selector. Enhance menu configurations to make it easier to change. Update PostgreSQL dashboard to visualize collected slow SQLs. Add Golang runtime metrics and cpu/memory used rate panels in General-Instance dashboard. Add gateway apisix menu. Query logs with the specific service ID. Bump d3-color from 3.0.1 to 3.1.0. Add Golang runtime metrics and cpu/memory used rate panels in FaaS-Instance dashboard. Revert logs on trace widget. Add a sub-menu for virtual mq. Add readRecords to metric types. Verify dashboard names for new dashboards. Associate metrics with the trace widget on dashboards. Fix configuration panel styles. Remove a un-use icon. Support labeled value on the service/instance/endpoint list widgets. Add menu for virtual MQ. Set selector props and update configuration panel styles. Add Python runtime metrics and cpu/memory utilization panels to General-Instance and Fass-Instance dashboards. Enhance the legend of metrics graph widget with the summary table. Add apache eventMesh logo file. Fix conditions for trace profiling. Fix tag keys list and duration condition. Fix typo. Fix condition logic for trace tree data. Enhance tags component to search tags with the input value. Fix topology loading style. Fix update metric processor for the readRecords and remove readSampledRecords from metrics selector. Add trace association for FAAS dashboards. Visualize attached events on the trace widget. Add HTTP/1.x metrics and HTTP req/resp body collecting tabs on the network profiling widget. Implement creating tasks ui for network profiling widget. Fix entity types for ProcessRelation. Add trace association for general service dashboards.  Documentation  Add metadata-uid setup doc about Kubernetes coordinator in the cluster management. Add a doc for adding menus to booster UI. Move general good read blogs from Agent Introduction to Academy. Add re-post for blog Scaling with Apache SkyWalking in the academy list. Add re-post for blog Diagnose Service Mesh Network Performance with eBPF in the academy list. Add Security Notice doc. Add new docs for Report Span Attached Events data collecting protocol. Add new docs for Record query protocol Update Server Agents and Compatibility for PHP agent. Add docs for profiling. Update the network profiling documentation.  All issues and pull requests are here\n","excerpt":"9.3.0 Project  Bump up the embedded swctl version in OAP Docker image.  OAP Server  Add component …","ref":"/docs/main/v9.7.0/en/changes/changes-9.3.0/","title":"9.3.0"},{"body":"9.4.0 Project  Bump up Zipkin and Zipkin lens UI dependency to 2.24.0. Bump up Apache parent pom version to 29. Bump up Armeria version to 1.21.0. Clean up maven pom.xmls. Bump up Java version to 11. Bump up snakeyaml to 2.0.  OAP Server  Add ServerStatusService in the core module to provide a new way to expose booting status to other modules. Adds Micrometer as a new component.(ID=141) Refactor session cache in MetricsPersistentWorker. Cache enhancement - don\u0026rsquo;t read new metrics from database in minute dimensionality.   // When // (1) the time bucket of the server's latest stability status is provided // 1.1 the OAP has booted successfully // 1.2 the current dimensionality is in minute. // 1.3 the OAP cluster is rebalanced due to scaling // (2) the metrics are from the time after the timeOfLatestStabilitySts // (3) the metrics don't exist in the cache // the kernel should NOT try to load it from the database. // // Notice, about condition (2), // for the specific minute of booted successfully, the metrics are expected to load from database when // it doesn't exist in the cache.  Remove the offset of metric session timeout according to worker creation sequence. Correct MetricsExtension annotations declarations in manual entities. Support component IDs' priority in process relation metrics. Remove abandon logic in MergableBufferedData, which caused unexpected no-update. Fix miss set LastUpdateTimestamp that caused the metrics session to expire. Rename MAL rule spring-sleuth.yaml to spring-micrometer.yaml. Fix memory leak in Zipkin API. Remove the dependency of refresh_interval of ElasticSearch indices from elasticsearch/flushInterval config. Now, it uses core/persistentPeriod + 5s as refresh_interval for all indices instead. Change elasticsearch/flushInterval to 5s(was 15s). Optimize flushInterval of ElasticSearch BulkProcessor to avoid extra periodical flush in the continuous bulk streams. An unexpected dot is added when exp is a pure metric name and expPrefix != null. Support monitoring MariaDB. Remove measure/stream specific interval settings in BanyanDB. Add global-specific settings used to override global configurations (e.g segmentIntervalDays, blockIntervalHours) in BanyanDB. Use TTL-driven interval settings for the measure-default group in BanyanDB. Fix wrong group of non time-relative metadata in BanyanDB. Refactor StorageData#id to the new StorageID object from a String type. Support multiple component IDs in the service topology level. Add ElasticSearch.Keyword annotation to declare the target field type as keyword. [Breaking Change] Column component_id of service_relation_client_side and service_relation_server_side have been replaced by component_ids. Support priority definition in the component-libraries.yml. Enhance service topology query. When there are multiple components detected from the server side, the component type of the node would be determined by the priority, which was random in the previous release. Remove component_id from service_instance_relation_client_side and service_instance_relation_server_side. Make the satellite E2E test more stable. Add Istio 1.16 to test matrix. Register ValueColumn as Tag for Record in BanyanDB storage plugin. Bump up Netty to 4.1.86. Remove unnecessary additional columns when storage is in logical sharding mode. The cluster coordinator support watch mechanism for notifying RemoteClientManager and ServerStatusService. Fix ServiceMeshServiceDispatcher overwrite ServiceDispatcher debug file when open SW_OAL_ENGINE_DEBUG. Use groupBy and in operators to optimize topology query for BanyanDB storage plugin. Support server status watcher for MetricsPersistentWorker to check the metrics whether required initialization. Fix the meter value are not correct when using sumPerMinLabeld or sumHistogramPercentile MAL function. Fix cannot display attached events when using Zipkin Lens UI query traces. Remove time_bucket for both Stream and Measure kinds in BanyanDB plugin. Merge TIME_BUCKET of Metrics and Record into StorageData. Support no layer in the listServices query. Fix time_bucket of ServiceTraffic not set correctly in slowSql of MAL. Correct the TopN record query DAO of BanyanDB. Tweak interval settings of BanyanDB. Support monitoring AWS Cloud EKS. Bump BanyanDB Java client to 0.3.0-rc1. Remove id tag from measures. Add Banyandb.MeasureField to mark a column as a BanyanDB Measure field. Add BanyanDB.StoreIDTag to store a process\u0026rsquo;s id for searching. [Breaking Change] The supported version of ShardingSphere-Proxy is upgraded from 5.1.2 to 5.3.1. Due to the changes of ShardingSphere\u0026rsquo;s API, versions before 5.3.1 are not compatible. Add the eBPF network profiling E2E Test in the per storage. Fix TCP service instances are lack of instance properties like pod and namespace, which causes Pod log not to work for TCP workloads. Add Python HBase happybase module component ID(94). Fix gRPC alarm cannot update settings from dynamic configuration source. Add batchOfBytes configuration to limit the size of bulk flush. Add Python Websocket module component ID(7018). [Optional] Optimize single trace query performance by customizing routing in ElasticSearch. SkyWalking trace segments and Zipkin spans are using trace ID for routing. This is OFF by default, controlled by storage/elasticsearch/enableCustomRouting. Enhance OAP HTTP server to support HTTPS Remove handler scan in otel receiver, manual initialization instead Add aws-firehose-receiver to support collecting AWS CloudWatch metric(OpenTelemetry format). Notice, no HTTPS/TLS setup support. By following AWS Firehose request, it uses proxy request (https://... instead of /aws/firehose/metrics), there must be a proxy(Nginx, Envoy, etc.). Avoid Antlr dependencies' versions might be different in compile time and runtime. Now PrometheusMetricConverter#escapedName also support converting / to _. Add missing TCP throughput metrics. Refactor @Column annotation, swap Column#name and ElasticSearch.Column#columnAlias and rename ElasticSearch.Column#columnAlias to ElasticSearch.Column#legacyName. Add Python HTTPX module component ID(7019). Migrate tests from junit 4 to junit 5. Refactor http-based alarm plugins and extract common logic to HttpAlarmCallback. Support Amazon Simple Storage Service (Amazon S3) metrics monitoring Support process Sum metrics with AGGREGATION_TEMPORALITY_DELTA case Support Amazon DynamoDB monitoring. Support prometheus HTTP API and promQL. Scope in the Entity of Metrics query v1 protocol is not required and automatical correction. The scope is determined based on the metric itself. Add explicit ReadTimeout for ConsulConfigurationWatcher to avoid IllegalArgumentException: Cache watchInterval=10sec \u0026gt;= networkClientReadTimeout=10000ms. Fix DurationUtils.getDurationPoints exceed, when startTimeBucket equals endTimeBucket. Support process OpenTelemetry ExponentialHistogram metrics Add FreeRedis component ID(3018).  UI  Add Zipkin Lens UI to webapp, and proxy it to context path /zipkin. Migrate the build tool from vue cli to Vite4. Fix Instance Relation and Endpoint Relation dashboards show up. Add Micrometer icon. Update MySQL UI to support MariaDB. Add AWS menu for supporting AWS monitoring. Add missing FastAPI logo. Update the log details page to support the formatted display of JSON content. Fix build config. Avoid being unable to drag process nodes for the first time. Add node folder into ignore list. Add ElPopconfirm to component types. Add an iframe widget for zipkin UI. Optimize graph tooltips to make them more friendly. Bump json5 from 1.0.1 to 1.0.2. Add websockets icon. Implement independent mode for widgets. Bump http-cache-semantics from 4.1.0 to 4.1.1. Update menus for OpenFunction. Add auto fresh to widgets independent mode. Fix: clear trace ID on the Log and Trace widgets after using association. Fix: reset duration for query conditions after time range changes. Add AWS S3 menu. Refactor: optimize side bar component to make it more friendly. Fix: remove duplicate popup message for query result. Add logo for HTTPX. Refactor: optimize the attached events visualization in the trace widget. Update BanyanDB client to 0.3.1. Add AWS DynamoDB menu. Fix: add auto period to the independent mode for widgets. Optimize menus and add Windows monitoring menu. Add a calculation for the cpm5dAvg. add a cpm5d calculation. Fix data processing error in the eBPF profiling widget. Support for double quotes in SlowSQL statements. Fix: the wrong position of the menu when clicking the topology node.  Documentation  Remove Spring Sleuth docs, and add Spring MicroMeter Observations Analysis with the latest Java agent side enhancement. Update monitoring MySQL document to add the MariaDB part. Reorganize the protocols docs to a more clear API docs. Add documentation about replacing Zipkin server with SkyWalking OAP. Add Lens UI relative docs in Zipkin trace section. Add Profiling APIs. Fix backend telemetry doc and so11y dashboard doc as the OAP Prometheus fetcher was removed since 9.3.0  All issues and pull requests are here\n","excerpt":"9.4.0 Project  Bump up Zipkin and Zipkin lens UI dependency to 2.24.0. Bump up Apache parent pom …","ref":"/docs/main/latest/en/changes/changes-9.4.0/","title":"9.4.0"},{"body":"9.4.0 Project  Bump up Zipkin and Zipkin lens UI dependency to 2.24.0. Bump up Apache parent pom version to 29. Bump up Armeria version to 1.21.0. Clean up maven pom.xmls. Bump up Java version to 11. Bump up snakeyaml to 2.0.  OAP Server  Add ServerStatusService in the core module to provide a new way to expose booting status to other modules. Adds Micrometer as a new component.(ID=141) Refactor session cache in MetricsPersistentWorker. Cache enhancement - don\u0026rsquo;t read new metrics from database in minute dimensionality.   // When // (1) the time bucket of the server's latest stability status is provided // 1.1 the OAP has booted successfully // 1.2 the current dimensionality is in minute. // 1.3 the OAP cluster is rebalanced due to scaling // (2) the metrics are from the time after the timeOfLatestStabilitySts // (3) the metrics don't exist in the cache // the kernel should NOT try to load it from the database. // // Notice, about condition (2), // for the specific minute of booted successfully, the metrics are expected to load from database when // it doesn't exist in the cache.  Remove the offset of metric session timeout according to worker creation sequence. Correct MetricsExtension annotations declarations in manual entities. Support component IDs' priority in process relation metrics. Remove abandon logic in MergableBufferedData, which caused unexpected no-update. Fix miss set LastUpdateTimestamp that caused the metrics session to expire. Rename MAL rule spring-sleuth.yaml to spring-micrometer.yaml. Fix memory leak in Zipkin API. Remove the dependency of refresh_interval of ElasticSearch indices from elasticsearch/flushInterval config. Now, it uses core/persistentPeriod + 5s as refresh_interval for all indices instead. Change elasticsearch/flushInterval to 5s(was 15s). Optimize flushInterval of ElasticSearch BulkProcessor to avoid extra periodical flush in the continuous bulk streams. An unexpected dot is added when exp is a pure metric name and expPrefix != null. Support monitoring MariaDB. Remove measure/stream specific interval settings in BanyanDB. Add global-specific settings used to override global configurations (e.g segmentIntervalDays, blockIntervalHours) in BanyanDB. Use TTL-driven interval settings for the measure-default group in BanyanDB. Fix wrong group of non time-relative metadata in BanyanDB. Refactor StorageData#id to the new StorageID object from a String type. Support multiple component IDs in the service topology level. Add ElasticSearch.Keyword annotation to declare the target field type as keyword. [Breaking Change] Column component_id of service_relation_client_side and service_relation_server_side have been replaced by component_ids. Support priority definition in the component-libraries.yml. Enhance service topology query. When there are multiple components detected from the server side, the component type of the node would be determined by the priority, which was random in the previous release. Remove component_id from service_instance_relation_client_side and service_instance_relation_server_side. Make the satellite E2E test more stable. Add Istio 1.16 to test matrix. Register ValueColumn as Tag for Record in BanyanDB storage plugin. Bump up Netty to 4.1.86. Remove unnecessary additional columns when storage is in logical sharding mode. The cluster coordinator support watch mechanism for notifying RemoteClientManager and ServerStatusService. Fix ServiceMeshServiceDispatcher overwrite ServiceDispatcher debug file when open SW_OAL_ENGINE_DEBUG. Use groupBy and in operators to optimize topology query for BanyanDB storage plugin. Support server status watcher for MetricsPersistentWorker to check the metrics whether required initialization. Fix the meter value are not correct when using sumPerMinLabeld or sumHistogramPercentile MAL function. Fix cannot display attached events when using Zipkin Lens UI query traces. Remove time_bucket for both Stream and Measure kinds in BanyanDB plugin. Merge TIME_BUCKET of Metrics and Record into StorageData. Support no layer in the listServices query. Fix time_bucket of ServiceTraffic not set correctly in slowSql of MAL. Correct the TopN record query DAO of BanyanDB. Tweak interval settings of BanyanDB. Support monitoring AWS Cloud EKS. Bump BanyanDB Java client to 0.3.0-rc1. Remove id tag from measures. Add Banyandb.MeasureField to mark a column as a BanyanDB Measure field. Add BanyanDB.StoreIDTag to store a process\u0026rsquo;s id for searching. [Breaking Change] The supported version of ShardingSphere-Proxy is upgraded from 5.1.2 to 5.3.1. Due to the changes of ShardingSphere\u0026rsquo;s API, versions before 5.3.1 are not compatible. Add the eBPF network profiling E2E Test in the per storage. Fix TCP service instances are lack of instance properties like pod and namespace, which causes Pod log not to work for TCP workloads. Add Python HBase happybase module component ID(94). Fix gRPC alarm cannot update settings from dynamic configuration source. Add batchOfBytes configuration to limit the size of bulk flush. Add Python Websocket module component ID(7018). [Optional] Optimize single trace query performance by customizing routing in ElasticSearch. SkyWalking trace segments and Zipkin spans are using trace ID for routing. This is OFF by default, controlled by storage/elasticsearch/enableCustomRouting. Enhance OAP HTTP server to support HTTPS Remove handler scan in otel receiver, manual initialization instead Add aws-firehose-receiver to support collecting AWS CloudWatch metric(OpenTelemetry format). Notice, no HTTPS/TLS setup support. By following AWS Firehose request, it uses proxy request (https://... instead of /aws/firehose/metrics), there must be a proxy(Nginx, Envoy, etc.). Avoid Antlr dependencies' versions might be different in compile time and runtime. Now PrometheusMetricConverter#escapedName also support converting / to _. Add missing TCP throughput metrics. Refactor @Column annotation, swap Column#name and ElasticSearch.Column#columnAlias and rename ElasticSearch.Column#columnAlias to ElasticSearch.Column#legacyName. Add Python HTTPX module component ID(7019). Migrate tests from junit 4 to junit 5. Refactor http-based alarm plugins and extract common logic to HttpAlarmCallback. Support Amazon Simple Storage Service (Amazon S3) metrics monitoring Support process Sum metrics with AGGREGATION_TEMPORALITY_DELTA case Support Amazon DynamoDB monitoring. Support prometheus HTTP API and promQL. Scope in the Entity of Metrics query v1 protocol is not required and automatical correction. The scope is determined based on the metric itself. Add explicit ReadTimeout for ConsulConfigurationWatcher to avoid IllegalArgumentException: Cache watchInterval=10sec \u0026gt;= networkClientReadTimeout=10000ms. Fix DurationUtils.getDurationPoints exceed, when startTimeBucket equals endTimeBucket. Support process OpenTelemetry ExponentialHistogram metrics Add FreeRedis component ID(3018).  UI  Add Zipkin Lens UI to webapp, and proxy it to context path /zipkin. Migrate the build tool from vue cli to Vite4. Fix Instance Relation and Endpoint Relation dashboards show up. Add Micrometer icon. Update MySQL UI to support MariaDB. Add AWS menu for supporting AWS monitoring. Add missing FastAPI logo. Update the log details page to support the formatted display of JSON content. Fix build config. Avoid being unable to drag process nodes for the first time. Add node folder into ignore list. Add ElPopconfirm to component types. Add an iframe widget for zipkin UI. Optimize graph tooltips to make them more friendly. Bump json5 from 1.0.1 to 1.0.2. Add websockets icon. Implement independent mode for widgets. Bump http-cache-semantics from 4.1.0 to 4.1.1. Update menus for OpenFunction. Add auto fresh to widgets independent mode. Fix: clear trace ID on the Log and Trace widgets after using association. Fix: reset duration for query conditions after time range changes. Add AWS S3 menu. Refactor: optimize side bar component to make it more friendly. Fix: remove duplicate popup message for query result. Add logo for HTTPX. Refactor: optimize the attached events visualization in the trace widget. Update BanyanDB client to 0.3.1. Add AWS DynamoDB menu. Fix: add auto period to the independent mode for widgets. Optimize menus and add Windows monitoring menu. Add a calculation for the cpm5dAvg. add a cpm5d calculation. Fix data processing error in the eBPF profiling widget. Support for double quotes in SlowSQL statements. Fix: the wrong position of the menu when clicking the topology node.  Documentation  Remove Spring Sleuth docs, and add Spring MicroMeter Observations Analysis with the latest Java agent side enhancement. Update monitoring MySQL document to add the MariaDB part. Reorganize the protocols docs to a more clear API docs. Add documentation about replacing Zipkin server with SkyWalking OAP. Add Lens UI relative docs in Zipkin trace section. Add Profiling APIs. Fix backend telemetry doc and so11y dashboard doc as the OAP Prometheus fetcher was removed since 9.3.0  All issues and pull requests are here\n","excerpt":"9.4.0 Project  Bump up Zipkin and Zipkin lens UI dependency to 2.24.0. Bump up Apache parent pom …","ref":"/docs/main/next/en/changes/changes-9.4.0/","title":"9.4.0"},{"body":"9.4.0 Project  Bump up Zipkin and Zipkin lens UI dependency to 2.24.0. Bump up Apache parent pom version to 29. Bump up Armeria version to 1.21.0. Clean up maven pom.xmls. Bump up Java version to 11. Bump up snakeyaml to 2.0.  OAP Server  Add ServerStatusService in the core module to provide a new way to expose booting status to other modules. Adds Micrometer as a new component.(ID=141) Refactor session cache in MetricsPersistentWorker. Cache enhancement - don\u0026rsquo;t read new metrics from database in minute dimensionality.   // When // (1) the time bucket of the server's latest stability status is provided // 1.1 the OAP has booted successfully // 1.2 the current dimensionality is in minute. // 1.3 the OAP cluster is rebalanced due to scaling // (2) the metrics are from the time after the timeOfLatestStabilitySts // (3) the metrics don't exist in the cache // the kernel should NOT try to load it from the database. // // Notice, about condition (2), // for the specific minute of booted successfully, the metrics are expected to load from database when // it doesn't exist in the cache.  Remove the offset of metric session timeout according to worker creation sequence. Correct MetricsExtension annotations declarations in manual entities. Support component IDs' priority in process relation metrics. Remove abandon logic in MergableBufferedData, which caused unexpected no-update. Fix miss set LastUpdateTimestamp that caused the metrics session to expire. Rename MAL rule spring-sleuth.yaml to spring-micrometer.yaml. Fix memory leak in Zipkin API. Remove the dependency of refresh_interval of ElasticSearch indices from elasticsearch/flushInterval config. Now, it uses core/persistentPeriod + 5s as refresh_interval for all indices instead. Change elasticsearch/flushInterval to 5s(was 15s). Optimize flushInterval of ElasticSearch BulkProcessor to avoid extra periodical flush in the continuous bulk streams. An unexpected dot is added when exp is a pure metric name and expPrefix != null. Support monitoring MariaDB. Remove measure/stream specific interval settings in BanyanDB. Add global-specific settings used to override global configurations (e.g segmentIntervalDays, blockIntervalHours) in BanyanDB. Use TTL-driven interval settings for the measure-default group in BanyanDB. Fix wrong group of non time-relative metadata in BanyanDB. Refactor StorageData#id to the new StorageID object from a String type. Support multiple component IDs in the service topology level. Add ElasticSearch.Keyword annotation to declare the target field type as keyword. [Breaking Change] Column component_id of service_relation_client_side and service_relation_server_side have been replaced by component_ids. Support priority definition in the component-libraries.yml. Enhance service topology query. When there are multiple components detected from the server side, the component type of the node would be determined by the priority, which was random in the previous release. Remove component_id from service_instance_relation_client_side and service_instance_relation_server_side. Make the satellite E2E test more stable. Add Istio 1.16 to test matrix. Register ValueColumn as Tag for Record in BanyanDB storage plugin. Bump up Netty to 4.1.86. Remove unnecessary additional columns when storage is in logical sharding mode. The cluster coordinator support watch mechanism for notifying RemoteClientManager and ServerStatusService. Fix ServiceMeshServiceDispatcher overwrite ServiceDispatcher debug file when open SW_OAL_ENGINE_DEBUG. Use groupBy and in operators to optimize topology query for BanyanDB storage plugin. Support server status watcher for MetricsPersistentWorker to check the metrics whether required initialization. Fix the meter value are not correct when using sumPerMinLabeld or sumHistogramPercentile MAL function. Fix cannot display attached events when using Zipkin Lens UI query traces. Remove time_bucket for both Stream and Measure kinds in BanyanDB plugin. Merge TIME_BUCKET of Metrics and Record into StorageData. Support no layer in the listServices query. Fix time_bucket of ServiceTraffic not set correctly in slowSql of MAL. Correct the TopN record query DAO of BanyanDB. Tweak interval settings of BanyanDB. Support monitoring AWS Cloud EKS. Bump BanyanDB Java client to 0.3.0-rc1. Remove id tag from measures. Add Banyandb.MeasureField to mark a column as a BanyanDB Measure field. Add BanyanDB.StoreIDTag to store a process\u0026rsquo;s id for searching. [Breaking Change] The supported version of ShardingSphere-Proxy is upgraded from 5.1.2 to 5.3.1. Due to the changes of ShardingSphere\u0026rsquo;s API, versions before 5.3.1 are not compatible. Add the eBPF network profiling E2E Test in the per storage. Fix TCP service instances are lack of instance properties like pod and namespace, which causes Pod log not to work for TCP workloads. Add Python HBase happybase module component ID(94). Fix gRPC alarm cannot update settings from dynamic configuration source. Add batchOfBytes configuration to limit the size of bulk flush. Add Python Websocket module component ID(7018). [Optional] Optimize single trace query performance by customizing routing in ElasticSearch. SkyWalking trace segments and Zipkin spans are using trace ID for routing. This is OFF by default, controlled by storage/elasticsearch/enableCustomRouting. Enhance OAP HTTP server to support HTTPS Remove handler scan in otel receiver, manual initialization instead Add aws-firehose-receiver to support collecting AWS CloudWatch metric(OpenTelemetry format). Notice, no HTTPS/TLS setup support. By following AWS Firehose request, it uses proxy request (https://... instead of /aws/firehose/metrics), there must be a proxy(Nginx, Envoy, etc.). Avoid Antlr dependencies' versions might be different in compile time and runtime. Now PrometheusMetricConverter#escapedName also support converting / to _. Add missing TCP throughput metrics. Refactor @Column annotation, swap Column#name and ElasticSearch.Column#columnAlias and rename ElasticSearch.Column#columnAlias to ElasticSearch.Column#legacyName. Add Python HTTPX module component ID(7019). Migrate tests from junit 4 to junit 5. Refactor http-based alarm plugins and extract common logic to HttpAlarmCallback. Support Amazon Simple Storage Service (Amazon S3) metrics monitoring Support process Sum metrics with AGGREGATION_TEMPORALITY_DELTA case Support Amazon DynamoDB monitoring. Support prometheus HTTP API and promQL. Scope in the Entity of Metrics query v1 protocol is not required and automatical correction. The scope is determined based on the metric itself. Add explicit ReadTimeout for ConsulConfigurationWatcher to avoid IllegalArgumentException: Cache watchInterval=10sec \u0026gt;= networkClientReadTimeout=10000ms. Fix DurationUtils.getDurationPoints exceed, when startTimeBucket equals endTimeBucket. Support process OpenTelemetry ExponentialHistogram metrics Add FreeRedis component ID(3018).  UI  Add Zipkin Lens UI to webapp, and proxy it to context path /zipkin. Migrate the build tool from vue cli to Vite4. Fix Instance Relation and Endpoint Relation dashboards show up. Add Micrometer icon. Update MySQL UI to support MariaDB. Add AWS menu for supporting AWS monitoring. Add missing FastAPI logo. Update the log details page to support the formatted display of JSON content. Fix build config. Avoid being unable to drag process nodes for the first time. Add node folder into ignore list. Add ElPopconfirm to component types. Add an iframe widget for zipkin UI. Optimize graph tooltips to make them more friendly. Bump json5 from 1.0.1 to 1.0.2. Add websockets icon. Implement independent mode for widgets. Bump http-cache-semantics from 4.1.0 to 4.1.1. Update menus for OpenFunction. Add auto fresh to widgets independent mode. Fix: clear trace ID on the Log and Trace widgets after using association. Fix: reset duration for query conditions after time range changes. Add AWS S3 menu. Refactor: optimize side bar component to make it more friendly. Fix: remove duplicate popup message for query result. Add logo for HTTPX. Refactor: optimize the attached events visualization in the trace widget. Update BanyanDB client to 0.3.1. Add AWS DynamoDB menu. Fix: add auto period to the independent mode for widgets. Optimize menus and add Windows monitoring menu. Add a calculation for the cpm5dAvg. add a cpm5d calculation. Fix data processing error in the eBPF profiling widget. Support for double quotes in SlowSQL statements. Fix: the wrong position of the menu when clicking the topology node.  Documentation  Remove Spring Sleuth docs, and add Spring MicroMeter Observations Analysis with the latest Java agent side enhancement. Update monitoring MySQL document to add the MariaDB part. Reorganize the protocols docs to a more clear API docs. Add documentation about replacing Zipkin server with SkyWalking OAP. Add Lens UI relative docs in Zipkin trace section. Add Profiling APIs. Fix backend telemetry doc and so11y dashboard doc as the OAP Prometheus fetcher was removed since 9.3.0  All issues and pull requests are here\n","excerpt":"9.4.0 Project  Bump up Zipkin and Zipkin lens UI dependency to 2.24.0. Bump up Apache parent pom …","ref":"/docs/main/v9.4.0/en/changes/changes/","title":"9.4.0"},{"body":"9.4.0 Project  Bump up Zipkin and Zipkin lens UI dependency to 2.24.0. Bump up Apache parent pom version to 29. Bump up Armeria version to 1.21.0. Clean up maven pom.xmls. Bump up Java version to 11. Bump up snakeyaml to 2.0.  OAP Server  Add ServerStatusService in the core module to provide a new way to expose booting status to other modules. Adds Micrometer as a new component.(ID=141) Refactor session cache in MetricsPersistentWorker. Cache enhancement - don\u0026rsquo;t read new metrics from database in minute dimensionality.   // When // (1) the time bucket of the server's latest stability status is provided // 1.1 the OAP has booted successfully // 1.2 the current dimensionality is in minute. // 1.3 the OAP cluster is rebalanced due to scaling // (2) the metrics are from the time after the timeOfLatestStabilitySts // (3) the metrics don't exist in the cache // the kernel should NOT try to load it from the database. // // Notice, about condition (2), // for the specific minute of booted successfully, the metrics are expected to load from database when // it doesn't exist in the cache.  Remove the offset of metric session timeout according to worker creation sequence. Correct MetricsExtension annotations declarations in manual entities. Support component IDs' priority in process relation metrics. Remove abandon logic in MergableBufferedData, which caused unexpected no-update. Fix miss set LastUpdateTimestamp that caused the metrics session to expire. Rename MAL rule spring-sleuth.yaml to spring-micrometer.yaml. Fix memory leak in Zipkin API. Remove the dependency of refresh_interval of ElasticSearch indices from elasticsearch/flushInterval config. Now, it uses core/persistentPeriod + 5s as refresh_interval for all indices instead. Change elasticsearch/flushInterval to 5s(was 15s). Optimize flushInterval of ElasticSearch BulkProcessor to avoid extra periodical flush in the continuous bulk streams. An unexpected dot is added when exp is a pure metric name and expPrefix != null. Support monitoring MariaDB. Remove measure/stream specific interval settings in BanyanDB. Add global-specific settings used to override global configurations (e.g segmentIntervalDays, blockIntervalHours) in BanyanDB. Use TTL-driven interval settings for the measure-default group in BanyanDB. Fix wrong group of non time-relative metadata in BanyanDB. Refactor StorageData#id to the new StorageID object from a String type. Support multiple component IDs in the service topology level. Add ElasticSearch.Keyword annotation to declare the target field type as keyword. [Breaking Change] Column component_id of service_relation_client_side and service_relation_server_side have been replaced by component_ids. Support priority definition in the component-libraries.yml. Enhance service topology query. When there are multiple components detected from the server side, the component type of the node would be determined by the priority, which was random in the previous release. Remove component_id from service_instance_relation_client_side and service_instance_relation_server_side. Make the satellite E2E test more stable. Add Istio 1.16 to test matrix. Register ValueColumn as Tag for Record in BanyanDB storage plugin. Bump up Netty to 4.1.86. Remove unnecessary additional columns when storage is in logical sharding mode. The cluster coordinator support watch mechanism for notifying RemoteClientManager and ServerStatusService. Fix ServiceMeshServiceDispatcher overwrite ServiceDispatcher debug file when open SW_OAL_ENGINE_DEBUG. Use groupBy and in operators to optimize topology query for BanyanDB storage plugin. Support server status watcher for MetricsPersistentWorker to check the metrics whether required initialization. Fix the meter value are not correct when using sumPerMinLabeld or sumHistogramPercentile MAL function. Fix cannot display attached events when using Zipkin Lens UI query traces. Remove time_bucket for both Stream and Measure kinds in BanyanDB plugin. Merge TIME_BUCKET of Metrics and Record into StorageData. Support no layer in the listServices query. Fix time_bucket of ServiceTraffic not set correctly in slowSql of MAL. Correct the TopN record query DAO of BanyanDB. Tweak interval settings of BanyanDB. Support monitoring AWS Cloud EKS. Bump BanyanDB Java client to 0.3.0-rc1. Remove id tag from measures. Add Banyandb.MeasureField to mark a column as a BanyanDB Measure field. Add BanyanDB.StoreIDTag to store a process\u0026rsquo;s id for searching. [Breaking Change] The supported version of ShardingSphere-Proxy is upgraded from 5.1.2 to 5.3.1. Due to the changes of ShardingSphere\u0026rsquo;s API, versions before 5.3.1 are not compatible. Add the eBPF network profiling E2E Test in the per storage. Fix TCP service instances are lack of instance properties like pod and namespace, which causes Pod log not to work for TCP workloads. Add Python HBase happybase module component ID(94). Fix gRPC alarm cannot update settings from dynamic configuration source. Add batchOfBytes configuration to limit the size of bulk flush. Add Python Websocket module component ID(7018). [Optional] Optimize single trace query performance by customizing routing in ElasticSearch. SkyWalking trace segments and Zipkin spans are using trace ID for routing. This is OFF by default, controlled by storage/elasticsearch/enableCustomRouting. Enhance OAP HTTP server to support HTTPS Remove handler scan in otel receiver, manual initialization instead Add aws-firehose-receiver to support collecting AWS CloudWatch metric(OpenTelemetry format). Notice, no HTTPS/TLS setup support. By following AWS Firehose request, it uses proxy request (https://... instead of /aws/firehose/metrics), there must be a proxy(Nginx, Envoy, etc.). Avoid Antlr dependencies' versions might be different in compile time and runtime. Now PrometheusMetricConverter#escapedName also support converting / to _. Add missing TCP throughput metrics. Refactor @Column annotation, swap Column#name and ElasticSearch.Column#columnAlias and rename ElasticSearch.Column#columnAlias to ElasticSearch.Column#legacyName. Add Python HTTPX module component ID(7019). Migrate tests from junit 4 to junit 5. Refactor http-based alarm plugins and extract common logic to HttpAlarmCallback. Support Amazon Simple Storage Service (Amazon S3) metrics monitoring Support process Sum metrics with AGGREGATION_TEMPORALITY_DELTA case Support Amazon DynamoDB monitoring. Support prometheus HTTP API and promQL. Scope in the Entity of Metrics query v1 protocol is not required and automatical correction. The scope is determined based on the metric itself. Add explicit ReadTimeout for ConsulConfigurationWatcher to avoid IllegalArgumentException: Cache watchInterval=10sec \u0026gt;= networkClientReadTimeout=10000ms. Fix DurationUtils.getDurationPoints exceed, when startTimeBucket equals endTimeBucket. Support process OpenTelemetry ExponentialHistogram metrics Add FreeRedis component ID(3018).  UI  Add Zipkin Lens UI to webapp, and proxy it to context path /zipkin. Migrate the build tool from vue cli to Vite4. Fix Instance Relation and Endpoint Relation dashboards show up. Add Micrometer icon. Update MySQL UI to support MariaDB. Add AWS menu for supporting AWS monitoring. Add missing FastAPI logo. Update the log details page to support the formatted display of JSON content. Fix build config. Avoid being unable to drag process nodes for the first time. Add node folder into ignore list. Add ElPopconfirm to component types. Add an iframe widget for zipkin UI. Optimize graph tooltips to make them more friendly. Bump json5 from 1.0.1 to 1.0.2. Add websockets icon. Implement independent mode for widgets. Bump http-cache-semantics from 4.1.0 to 4.1.1. Update menus for OpenFunction. Add auto fresh to widgets independent mode. Fix: clear trace ID on the Log and Trace widgets after using association. Fix: reset duration for query conditions after time range changes. Add AWS S3 menu. Refactor: optimize side bar component to make it more friendly. Fix: remove duplicate popup message for query result. Add logo for HTTPX. Refactor: optimize the attached events visualization in the trace widget. Update BanyanDB client to 0.3.1. Add AWS DynamoDB menu. Fix: add auto period to the independent mode for widgets. Optimize menus and add Windows monitoring menu. Add a calculation for the cpm5dAvg. add a cpm5d calculation. Fix data processing error in the eBPF profiling widget. Support for double quotes in SlowSQL statements. Fix: the wrong position of the menu when clicking the topology node.  Documentation  Remove Spring Sleuth docs, and add Spring MicroMeter Observations Analysis with the latest Java agent side enhancement. Update monitoring MySQL document to add the MariaDB part. Reorganize the protocols docs to a more clear API docs. Add documentation about replacing Zipkin server with SkyWalking OAP. Add Lens UI relative docs in Zipkin trace section. Add Profiling APIs. Fix backend telemetry doc and so11y dashboard doc as the OAP Prometheus fetcher was removed since 9.3.0  All issues and pull requests are here\n","excerpt":"9.4.0 Project  Bump up Zipkin and Zipkin lens UI dependency to 2.24.0. Bump up Apache parent pom …","ref":"/docs/main/v9.5.0/en/changes/changes-9.4.0/","title":"9.4.0"},{"body":"9.4.0 Project  Bump up Zipkin and Zipkin lens UI dependency to 2.24.0. Bump up Apache parent pom version to 29. Bump up Armeria version to 1.21.0. Clean up maven pom.xmls. Bump up Java version to 11. Bump up snakeyaml to 2.0.  OAP Server  Add ServerStatusService in the core module to provide a new way to expose booting status to other modules. Adds Micrometer as a new component.(ID=141) Refactor session cache in MetricsPersistentWorker. Cache enhancement - don\u0026rsquo;t read new metrics from database in minute dimensionality.   // When // (1) the time bucket of the server's latest stability status is provided // 1.1 the OAP has booted successfully // 1.2 the current dimensionality is in minute. // 1.3 the OAP cluster is rebalanced due to scaling // (2) the metrics are from the time after the timeOfLatestStabilitySts // (3) the metrics don't exist in the cache // the kernel should NOT try to load it from the database. // // Notice, about condition (2), // for the specific minute of booted successfully, the metrics are expected to load from database when // it doesn't exist in the cache.  Remove the offset of metric session timeout according to worker creation sequence. Correct MetricsExtension annotations declarations in manual entities. Support component IDs' priority in process relation metrics. Remove abandon logic in MergableBufferedData, which caused unexpected no-update. Fix miss set LastUpdateTimestamp that caused the metrics session to expire. Rename MAL rule spring-sleuth.yaml to spring-micrometer.yaml. Fix memory leak in Zipkin API. Remove the dependency of refresh_interval of ElasticSearch indices from elasticsearch/flushInterval config. Now, it uses core/persistentPeriod + 5s as refresh_interval for all indices instead. Change elasticsearch/flushInterval to 5s(was 15s). Optimize flushInterval of ElasticSearch BulkProcessor to avoid extra periodical flush in the continuous bulk streams. An unexpected dot is added when exp is a pure metric name and expPrefix != null. Support monitoring MariaDB. Remove measure/stream specific interval settings in BanyanDB. Add global-specific settings used to override global configurations (e.g segmentIntervalDays, blockIntervalHours) in BanyanDB. Use TTL-driven interval settings for the measure-default group in BanyanDB. Fix wrong group of non time-relative metadata in BanyanDB. Refactor StorageData#id to the new StorageID object from a String type. Support multiple component IDs in the service topology level. Add ElasticSearch.Keyword annotation to declare the target field type as keyword. [Breaking Change] Column component_id of service_relation_client_side and service_relation_server_side have been replaced by component_ids. Support priority definition in the component-libraries.yml. Enhance service topology query. When there are multiple components detected from the server side, the component type of the node would be determined by the priority, which was random in the previous release. Remove component_id from service_instance_relation_client_side and service_instance_relation_server_side. Make the satellite E2E test more stable. Add Istio 1.16 to test matrix. Register ValueColumn as Tag for Record in BanyanDB storage plugin. Bump up Netty to 4.1.86. Remove unnecessary additional columns when storage is in logical sharding mode. The cluster coordinator support watch mechanism for notifying RemoteClientManager and ServerStatusService. Fix ServiceMeshServiceDispatcher overwrite ServiceDispatcher debug file when open SW_OAL_ENGINE_DEBUG. Use groupBy and in operators to optimize topology query for BanyanDB storage plugin. Support server status watcher for MetricsPersistentWorker to check the metrics whether required initialization. Fix the meter value are not correct when using sumPerMinLabeld or sumHistogramPercentile MAL function. Fix cannot display attached events when using Zipkin Lens UI query traces. Remove time_bucket for both Stream and Measure kinds in BanyanDB plugin. Merge TIME_BUCKET of Metrics and Record into StorageData. Support no layer in the listServices query. Fix time_bucket of ServiceTraffic not set correctly in slowSql of MAL. Correct the TopN record query DAO of BanyanDB. Tweak interval settings of BanyanDB. Support monitoring AWS Cloud EKS. Bump BanyanDB Java client to 0.3.0-rc1. Remove id tag from measures. Add Banyandb.MeasureField to mark a column as a BanyanDB Measure field. Add BanyanDB.StoreIDTag to store a process\u0026rsquo;s id for searching. [Breaking Change] The supported version of ShardingSphere-Proxy is upgraded from 5.1.2 to 5.3.1. Due to the changes of ShardingSphere\u0026rsquo;s API, versions before 5.3.1 are not compatible. Add the eBPF network profiling E2E Test in the per storage. Fix TCP service instances are lack of instance properties like pod and namespace, which causes Pod log not to work for TCP workloads. Add Python HBase happybase module component ID(94). Fix gRPC alarm cannot update settings from dynamic configuration source. Add batchOfBytes configuration to limit the size of bulk flush. Add Python Websocket module component ID(7018). [Optional] Optimize single trace query performance by customizing routing in ElasticSearch. SkyWalking trace segments and Zipkin spans are using trace ID for routing. This is OFF by default, controlled by storage/elasticsearch/enableCustomRouting. Enhance OAP HTTP server to support HTTPS Remove handler scan in otel receiver, manual initialization instead Add aws-firehose-receiver to support collecting AWS CloudWatch metric(OpenTelemetry format). Notice, no HTTPS/TLS setup support. By following AWS Firehose request, it uses proxy request (https://... instead of /aws/firehose/metrics), there must be a proxy(Nginx, Envoy, etc.). Avoid Antlr dependencies' versions might be different in compile time and runtime. Now PrometheusMetricConverter#escapedName also support converting / to _. Add missing TCP throughput metrics. Refactor @Column annotation, swap Column#name and ElasticSearch.Column#columnAlias and rename ElasticSearch.Column#columnAlias to ElasticSearch.Column#legacyName. Add Python HTTPX module component ID(7019). Migrate tests from junit 4 to junit 5. Refactor http-based alarm plugins and extract common logic to HttpAlarmCallback. Support Amazon Simple Storage Service (Amazon S3) metrics monitoring Support process Sum metrics with AGGREGATION_TEMPORALITY_DELTA case Support Amazon DynamoDB monitoring. Support prometheus HTTP API and promQL. Scope in the Entity of Metrics query v1 protocol is not required and automatical correction. The scope is determined based on the metric itself. Add explicit ReadTimeout for ConsulConfigurationWatcher to avoid IllegalArgumentException: Cache watchInterval=10sec \u0026gt;= networkClientReadTimeout=10000ms. Fix DurationUtils.getDurationPoints exceed, when startTimeBucket equals endTimeBucket. Support process OpenTelemetry ExponentialHistogram metrics Add FreeRedis component ID(3018).  UI  Add Zipkin Lens UI to webapp, and proxy it to context path /zipkin. Migrate the build tool from vue cli to Vite4. Fix Instance Relation and Endpoint Relation dashboards show up. Add Micrometer icon. Update MySQL UI to support MariaDB. Add AWS menu for supporting AWS monitoring. Add missing FastAPI logo. Update the log details page to support the formatted display of JSON content. Fix build config. Avoid being unable to drag process nodes for the first time. Add node folder into ignore list. Add ElPopconfirm to component types. Add an iframe widget for zipkin UI. Optimize graph tooltips to make them more friendly. Bump json5 from 1.0.1 to 1.0.2. Add websockets icon. Implement independent mode for widgets. Bump http-cache-semantics from 4.1.0 to 4.1.1. Update menus for OpenFunction. Add auto fresh to widgets independent mode. Fix: clear trace ID on the Log and Trace widgets after using association. Fix: reset duration for query conditions after time range changes. Add AWS S3 menu. Refactor: optimize side bar component to make it more friendly. Fix: remove duplicate popup message for query result. Add logo for HTTPX. Refactor: optimize the attached events visualization in the trace widget. Update BanyanDB client to 0.3.1. Add AWS DynamoDB menu. Fix: add auto period to the independent mode for widgets. Optimize menus and add Windows monitoring menu. Add a calculation for the cpm5dAvg. add a cpm5d calculation. Fix data processing error in the eBPF profiling widget. Support for double quotes in SlowSQL statements. Fix: the wrong position of the menu when clicking the topology node.  Documentation  Remove Spring Sleuth docs, and add Spring MicroMeter Observations Analysis with the latest Java agent side enhancement. Update monitoring MySQL document to add the MariaDB part. Reorganize the protocols docs to a more clear API docs. Add documentation about replacing Zipkin server with SkyWalking OAP. Add Lens UI relative docs in Zipkin trace section. Add Profiling APIs. Fix backend telemetry doc and so11y dashboard doc as the OAP Prometheus fetcher was removed since 9.3.0  All issues and pull requests are here\n","excerpt":"9.4.0 Project  Bump up Zipkin and Zipkin lens UI dependency to 2.24.0. Bump up Apache parent pom …","ref":"/docs/main/v9.6.0/en/changes/changes-9.4.0/","title":"9.4.0"},{"body":"9.4.0 Project  Bump up Zipkin and Zipkin lens UI dependency to 2.24.0. Bump up Apache parent pom version to 29. Bump up Armeria version to 1.21.0. Clean up maven pom.xmls. Bump up Java version to 11. Bump up snakeyaml to 2.0.  OAP Server  Add ServerStatusService in the core module to provide a new way to expose booting status to other modules. Adds Micrometer as a new component.(ID=141) Refactor session cache in MetricsPersistentWorker. Cache enhancement - don\u0026rsquo;t read new metrics from database in minute dimensionality.   // When // (1) the time bucket of the server's latest stability status is provided // 1.1 the OAP has booted successfully // 1.2 the current dimensionality is in minute. // 1.3 the OAP cluster is rebalanced due to scaling // (2) the metrics are from the time after the timeOfLatestStabilitySts // (3) the metrics don't exist in the cache // the kernel should NOT try to load it from the database. // // Notice, about condition (2), // for the specific minute of booted successfully, the metrics are expected to load from database when // it doesn't exist in the cache.  Remove the offset of metric session timeout according to worker creation sequence. Correct MetricsExtension annotations declarations in manual entities. Support component IDs' priority in process relation metrics. Remove abandon logic in MergableBufferedData, which caused unexpected no-update. Fix miss set LastUpdateTimestamp that caused the metrics session to expire. Rename MAL rule spring-sleuth.yaml to spring-micrometer.yaml. Fix memory leak in Zipkin API. Remove the dependency of refresh_interval of ElasticSearch indices from elasticsearch/flushInterval config. Now, it uses core/persistentPeriod + 5s as refresh_interval for all indices instead. Change elasticsearch/flushInterval to 5s(was 15s). Optimize flushInterval of ElasticSearch BulkProcessor to avoid extra periodical flush in the continuous bulk streams. An unexpected dot is added when exp is a pure metric name and expPrefix != null. Support monitoring MariaDB. Remove measure/stream specific interval settings in BanyanDB. Add global-specific settings used to override global configurations (e.g segmentIntervalDays, blockIntervalHours) in BanyanDB. Use TTL-driven interval settings for the measure-default group in BanyanDB. Fix wrong group of non time-relative metadata in BanyanDB. Refactor StorageData#id to the new StorageID object from a String type. Support multiple component IDs in the service topology level. Add ElasticSearch.Keyword annotation to declare the target field type as keyword. [Breaking Change] Column component_id of service_relation_client_side and service_relation_server_side have been replaced by component_ids. Support priority definition in the component-libraries.yml. Enhance service topology query. When there are multiple components detected from the server side, the component type of the node would be determined by the priority, which was random in the previous release. Remove component_id from service_instance_relation_client_side and service_instance_relation_server_side. Make the satellite E2E test more stable. Add Istio 1.16 to test matrix. Register ValueColumn as Tag for Record in BanyanDB storage plugin. Bump up Netty to 4.1.86. Remove unnecessary additional columns when storage is in logical sharding mode. The cluster coordinator support watch mechanism for notifying RemoteClientManager and ServerStatusService. Fix ServiceMeshServiceDispatcher overwrite ServiceDispatcher debug file when open SW_OAL_ENGINE_DEBUG. Use groupBy and in operators to optimize topology query for BanyanDB storage plugin. Support server status watcher for MetricsPersistentWorker to check the metrics whether required initialization. Fix the meter value are not correct when using sumPerMinLabeld or sumHistogramPercentile MAL function. Fix cannot display attached events when using Zipkin Lens UI query traces. Remove time_bucket for both Stream and Measure kinds in BanyanDB plugin. Merge TIME_BUCKET of Metrics and Record into StorageData. Support no layer in the listServices query. Fix time_bucket of ServiceTraffic not set correctly in slowSql of MAL. Correct the TopN record query DAO of BanyanDB. Tweak interval settings of BanyanDB. Support monitoring AWS Cloud EKS. Bump BanyanDB Java client to 0.3.0-rc1. Remove id tag from measures. Add Banyandb.MeasureField to mark a column as a BanyanDB Measure field. Add BanyanDB.StoreIDTag to store a process\u0026rsquo;s id for searching. [Breaking Change] The supported version of ShardingSphere-Proxy is upgraded from 5.1.2 to 5.3.1. Due to the changes of ShardingSphere\u0026rsquo;s API, versions before 5.3.1 are not compatible. Add the eBPF network profiling E2E Test in the per storage. Fix TCP service instances are lack of instance properties like pod and namespace, which causes Pod log not to work for TCP workloads. Add Python HBase happybase module component ID(94). Fix gRPC alarm cannot update settings from dynamic configuration source. Add batchOfBytes configuration to limit the size of bulk flush. Add Python Websocket module component ID(7018). [Optional] Optimize single trace query performance by customizing routing in ElasticSearch. SkyWalking trace segments and Zipkin spans are using trace ID for routing. This is OFF by default, controlled by storage/elasticsearch/enableCustomRouting. Enhance OAP HTTP server to support HTTPS Remove handler scan in otel receiver, manual initialization instead Add aws-firehose-receiver to support collecting AWS CloudWatch metric(OpenTelemetry format). Notice, no HTTPS/TLS setup support. By following AWS Firehose request, it uses proxy request (https://... instead of /aws/firehose/metrics), there must be a proxy(Nginx, Envoy, etc.). Avoid Antlr dependencies' versions might be different in compile time and runtime. Now PrometheusMetricConverter#escapedName also support converting / to _. Add missing TCP throughput metrics. Refactor @Column annotation, swap Column#name and ElasticSearch.Column#columnAlias and rename ElasticSearch.Column#columnAlias to ElasticSearch.Column#legacyName. Add Python HTTPX module component ID(7019). Migrate tests from junit 4 to junit 5. Refactor http-based alarm plugins and extract common logic to HttpAlarmCallback. Support Amazon Simple Storage Service (Amazon S3) metrics monitoring Support process Sum metrics with AGGREGATION_TEMPORALITY_DELTA case Support Amazon DynamoDB monitoring. Support prometheus HTTP API and promQL. Scope in the Entity of Metrics query v1 protocol is not required and automatical correction. The scope is determined based on the metric itself. Add explicit ReadTimeout for ConsulConfigurationWatcher to avoid IllegalArgumentException: Cache watchInterval=10sec \u0026gt;= networkClientReadTimeout=10000ms. Fix DurationUtils.getDurationPoints exceed, when startTimeBucket equals endTimeBucket. Support process OpenTelemetry ExponentialHistogram metrics Add FreeRedis component ID(3018).  UI  Add Zipkin Lens UI to webapp, and proxy it to context path /zipkin. Migrate the build tool from vue cli to Vite4. Fix Instance Relation and Endpoint Relation dashboards show up. Add Micrometer icon. Update MySQL UI to support MariaDB. Add AWS menu for supporting AWS monitoring. Add missing FastAPI logo. Update the log details page to support the formatted display of JSON content. Fix build config. Avoid being unable to drag process nodes for the first time. Add node folder into ignore list. Add ElPopconfirm to component types. Add an iframe widget for zipkin UI. Optimize graph tooltips to make them more friendly. Bump json5 from 1.0.1 to 1.0.2. Add websockets icon. Implement independent mode for widgets. Bump http-cache-semantics from 4.1.0 to 4.1.1. Update menus for OpenFunction. Add auto fresh to widgets independent mode. Fix: clear trace ID on the Log and Trace widgets after using association. Fix: reset duration for query conditions after time range changes. Add AWS S3 menu. Refactor: optimize side bar component to make it more friendly. Fix: remove duplicate popup message for query result. Add logo for HTTPX. Refactor: optimize the attached events visualization in the trace widget. Update BanyanDB client to 0.3.1. Add AWS DynamoDB menu. Fix: add auto period to the independent mode for widgets. Optimize menus and add Windows monitoring menu. Add a calculation for the cpm5dAvg. add a cpm5d calculation. Fix data processing error in the eBPF profiling widget. Support for double quotes in SlowSQL statements. Fix: the wrong position of the menu when clicking the topology node.  Documentation  Remove Spring Sleuth docs, and add Spring MicroMeter Observations Analysis with the latest Java agent side enhancement. Update monitoring MySQL document to add the MariaDB part. Reorganize the protocols docs to a more clear API docs. Add documentation about replacing Zipkin server with SkyWalking OAP. Add Lens UI relative docs in Zipkin trace section. Add Profiling APIs. Fix backend telemetry doc and so11y dashboard doc as the OAP Prometheus fetcher was removed since 9.3.0  All issues and pull requests are here\n","excerpt":"9.4.0 Project  Bump up Zipkin and Zipkin lens UI dependency to 2.24.0. Bump up Apache parent pom …","ref":"/docs/main/v9.7.0/en/changes/changes-9.4.0/","title":"9.4.0"},{"body":"9.5.0 Project  Fix Duplicate class found due to the delombok goal.  OAP Server  Fix wrong layer of metric user error in DynamoDB monitoring. ElasticSearch storage does not check field types when OAP running in no-init mode. Support to bind TLS status as a part of component for service topology. Fix component ID priority bug. Fix component ID of topology overlap due to storage layer bugs. [Breaking Change] Enhance JDBC storage through merging tables and managing day-based table rolling. [Breaking Change] Sharding-MySQL implementations and tests get removed due to we have the day-based rolling mechanism by default Fix otel k8s-cluster rule add namespace dimension for MAL aggregation calculation(Deployment Status,Deployment Spec Replicas) Support continuous profiling feature. Support collect process level related metrics. Fix K8sRetag reads the wrong k8s service from the cache due to a possible namespace mismatch. [Breaking Change] Support cross-thread trace profiling. The data structure and query APIs are changed. Fix PromQL HTTP API /api/v1/labels response missing service label. Fix possible NPE when initialize IntList. Support parse PromQL expression has empty labels in the braces for metadata query. Support alarm metric OP !=. Support metrics query indicates whether value == 0 represents actually zero or no data. Fix NPE when query the not exist series indexes in ElasticSearch storage. Support collecting memory buff/cache metrics in VM monitoring. PromQL: Remove empty values from the query result, fix /api/v1/metadata param limit could cause out of bound. Support monitoring the total number metrics of k8s StatefulSet and DaemonSet. Support Amazon API Gateway monitoring. Bump up graphql-java to fix cve. Bump up Kubernetes Java client. Support Redis Monitoring. Add component ID for amqp, amqp-producer and amqp-consumer. Support no-proxy mode for aws-firehose receiver Bump up armeria to 1.23.1 Support Elasticsearch Monitoring. Fix PromQL HTTP API /api/v1/series response missing service label when matching metric. Support ServerSide TopN for BanyanDB. Add component ID for Jersey. Remove OpenCensus support, the related codes and docs as it\u0026rsquo;s sunsetting. Support dynamic configuration of searchableTracesTags Support exportErrorStatusTraceOnly for export the error status trace segments through the Kafka channel Add component ID for Grizzly. Fix potential NPE in Zipkin receiver when the Span is missing some fields. Filter out unknown_cluster metric data. Support RabbitMQ Monitoring. Support Redis slow logs collection. Fix data loss when query continuous profiling task record. Adapt the continuous profiling task query GraphQL. Support Metrics Query Expression(MQE) and allows users to do simple query-stage calculation through the expression. Deprecated metrics query v2 protocol. Deprecated record query protocol. Add component ID for go-redis. Add OpenSearch 2.8.0 to test case. Add ai-pipeline module. Support HTTP URI formatting through ai-pipeline to do pattern recognition. Add new HTTP URI grouping engine with benchmark. [Breaking Change] Use the new HTTP URI grouping engine to replace the old regex based mechanism. Support sumLabeled in MAL. Migrate from kubernetes-client/java to fabric8 client. Envoy ALS generated relation metrics considers http status codes \u0026gt;= 400 has an error at the client side. Add cause message field when query continuous profiling task.  UI  Revert: cpm5d function. This feature is cancelled from backend. Fix: alerting link breaks on the topology. Refactor Topology widget to make it more hierarchical.  Choose User as the first node. If User node is absent, choose the busiest node(which has the most calls of all). Do a left-to-right flow process. At the same level, list nodes from top to bottom in alphabetical order.   Fix filter ID when ReadRecords metric associates with trace. Add AWS API Gateway menu. Change trace profiling protocol. Add Redis menu. Optimize data types. Support isEmptyValue flag for metrics query. Add elasticsearch menu. [Clean UI templates before upgrade] Set showSymbol: true, and make the data point shows on the Line graph. Please clean ui_template index in elasticsearch storage or table in JDBC storage. [Clean UI templates before upgrade] UI templates: Simplify metric name with the label. Add MQ menu. Add Jeysey icon. Fix: set endpoint and instance selectors with url parameters correctly. Bump up dependencies versions icons-vue 1.1.4, element-plus 2.1.0, nanoid 3.3.6, postcss 8.4.23 Add OpenTelemetry log protocol support. [Breaking Change] Configuration key enabledOtelRules is renamed to enabledOtelMetricsRules and the corresponding environment variable is renamed to SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES. Add grizzly icon. Fix: the Instance List data display error. Fix: set topN type to Number. Support Metrics Query Expression(MQE) and allows users to do simple query-stage calculation through the expression. Bump up zipkin ui dependency to 2.24.1. Bump up vite to 4.0.5. Apply MQE on General and Virtual-Database layer UI-templates. Add Continuous Profiling tab on Mesh layer UI-templates.  Documentation  Add Profiling related documentations. Add SUM_PER_MIN to MAL documentation. Make the log relative docs more clear, and easier for further more formats support. Update the cluster management and advanced deployment docs.  All issues and pull requests are here\n","excerpt":"9.5.0 Project  Fix Duplicate class found due to the delombok goal.  OAP Server  Fix wrong layer of …","ref":"/docs/main/latest/en/changes/changes-9.5.0/","title":"9.5.0"},{"body":"9.5.0 Project  Fix Duplicate class found due to the delombok goal.  OAP Server  Fix wrong layer of metric user error in DynamoDB monitoring. ElasticSearch storage does not check field types when OAP running in no-init mode. Support to bind TLS status as a part of component for service topology. Fix component ID priority bug. Fix component ID of topology overlap due to storage layer bugs. [Breaking Change] Enhance JDBC storage through merging tables and managing day-based table rolling. [Breaking Change] Sharding-MySQL implementations and tests get removed due to we have the day-based rolling mechanism by default Fix otel k8s-cluster rule add namespace dimension for MAL aggregation calculation(Deployment Status,Deployment Spec Replicas) Support continuous profiling feature. Support collect process level related metrics. Fix K8sRetag reads the wrong k8s service from the cache due to a possible namespace mismatch. [Breaking Change] Support cross-thread trace profiling. The data structure and query APIs are changed. Fix PromQL HTTP API /api/v1/labels response missing service label. Fix possible NPE when initialize IntList. Support parse PromQL expression has empty labels in the braces for metadata query. Support alarm metric OP !=. Support metrics query indicates whether value == 0 represents actually zero or no data. Fix NPE when query the not exist series indexes in ElasticSearch storage. Support collecting memory buff/cache metrics in VM monitoring. PromQL: Remove empty values from the query result, fix /api/v1/metadata param limit could cause out of bound. Support monitoring the total number metrics of k8s StatefulSet and DaemonSet. Support Amazon API Gateway monitoring. Bump up graphql-java to fix cve. Bump up Kubernetes Java client. Support Redis Monitoring. Add component ID for amqp, amqp-producer and amqp-consumer. Support no-proxy mode for aws-firehose receiver Bump up armeria to 1.23.1 Support Elasticsearch Monitoring. Fix PromQL HTTP API /api/v1/series response missing service label when matching metric. Support ServerSide TopN for BanyanDB. Add component ID for Jersey. Remove OpenCensus support, the related codes and docs as it\u0026rsquo;s sunsetting. Support dynamic configuration of searchableTracesTags Support exportErrorStatusTraceOnly for export the error status trace segments through the Kafka channel Add component ID for Grizzly. Fix potential NPE in Zipkin receiver when the Span is missing some fields. Filter out unknown_cluster metric data. Support RabbitMQ Monitoring. Support Redis slow logs collection. Fix data loss when query continuous profiling task record. Adapt the continuous profiling task query GraphQL. Support Metrics Query Expression(MQE) and allows users to do simple query-stage calculation through the expression. Deprecated metrics query v2 protocol. Deprecated record query protocol. Add component ID for go-redis. Add OpenSearch 2.8.0 to test case. Add ai-pipeline module. Support HTTP URI formatting through ai-pipeline to do pattern recognition. Add new HTTP URI grouping engine with benchmark. [Breaking Change] Use the new HTTP URI grouping engine to replace the old regex based mechanism. Support sumLabeled in MAL. Migrate from kubernetes-client/java to fabric8 client. Envoy ALS generated relation metrics considers http status codes \u0026gt;= 400 has an error at the client side. Add cause message field when query continuous profiling task.  UI  Revert: cpm5d function. This feature is cancelled from backend. Fix: alerting link breaks on the topology. Refactor Topology widget to make it more hierarchical.  Choose User as the first node. If User node is absent, choose the busiest node(which has the most calls of all). Do a left-to-right flow process. At the same level, list nodes from top to bottom in alphabetical order.   Fix filter ID when ReadRecords metric associates with trace. Add AWS API Gateway menu. Change trace profiling protocol. Add Redis menu. Optimize data types. Support isEmptyValue flag for metrics query. Add elasticsearch menu. [Clean UI templates before upgrade] Set showSymbol: true, and make the data point shows on the Line graph. Please clean ui_template index in elasticsearch storage or table in JDBC storage. [Clean UI templates before upgrade] UI templates: Simplify metric name with the label. Add MQ menu. Add Jeysey icon. Fix: set endpoint and instance selectors with url parameters correctly. Bump up dependencies versions icons-vue 1.1.4, element-plus 2.1.0, nanoid 3.3.6, postcss 8.4.23 Add OpenTelemetry log protocol support. [Breaking Change] Configuration key enabledOtelRules is renamed to enabledOtelMetricsRules and the corresponding environment variable is renamed to SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES. Add grizzly icon. Fix: the Instance List data display error. Fix: set topN type to Number. Support Metrics Query Expression(MQE) and allows users to do simple query-stage calculation through the expression. Bump up zipkin ui dependency to 2.24.1. Bump up vite to 4.0.5. Apply MQE on General and Virtual-Database layer UI-templates. Add Continuous Profiling tab on Mesh layer UI-templates.  Documentation  Add Profiling related documentations. Add SUM_PER_MIN to MAL documentation. Make the log relative docs more clear, and easier for further more formats support. Update the cluster management and advanced deployment docs.  All issues and pull requests are here\n","excerpt":"9.5.0 Project  Fix Duplicate class found due to the delombok goal.  OAP Server  Fix wrong layer of …","ref":"/docs/main/next/en/changes/changes-9.5.0/","title":"9.5.0"},{"body":"9.5.0 Project  Fix Duplicate class found due to the delombok goal.  OAP Server  Fix wrong layer of metric user error in DynamoDB monitoring. ElasticSearch storage does not check field types when OAP running in no-init mode. Support to bind TLS status as a part of component for service topology. Fix component ID priority bug. Fix component ID of topology overlap due to storage layer bugs. [Breaking Change] Enhance JDBC storage through merging tables and managing day-based table rolling. [Breaking Change] Sharding-MySQL implementations and tests get removed due to we have the day-based rolling mechanism by default Fix otel k8s-cluster rule add namespace dimension for MAL aggregation calculation(Deployment Status,Deployment Spec Replicas) Support continuous profiling feature. Support collect process level related metrics. Fix K8sRetag reads the wrong k8s service from the cache due to a possible namespace mismatch. [Breaking Change] Support cross-thread trace profiling. The data structure and query APIs are changed. Fix PromQL HTTP API /api/v1/labels response missing service label. Fix possible NPE when initialize IntList. Support parse PromQL expression has empty labels in the braces for metadata query. Support alarm metric OP !=. Support metrics query indicates whether value == 0 represents actually zero or no data. Fix NPE when query the not exist series indexes in ElasticSearch storage. Support collecting memory buff/cache metrics in VM monitoring. PromQL: Remove empty values from the query result, fix /api/v1/metadata param limit could cause out of bound. Support monitoring the total number metrics of k8s StatefulSet and DaemonSet. Support Amazon API Gateway monitoring. Bump up graphql-java to fix cve. Bump up Kubernetes Java client. Support Redis Monitoring. Add component ID for amqp, amqp-producer and amqp-consumer. Support no-proxy mode for aws-firehose receiver Bump up armeria to 1.23.1 Support Elasticsearch Monitoring. Fix PromQL HTTP API /api/v1/series response missing service label when matching metric. Support ServerSide TopN for BanyanDB. Add component ID for Jersey. Remove OpenCensus support, the related codes and docs as it\u0026rsquo;s sunsetting. Support dynamic configuration of searchableTracesTags Support exportErrorStatusTraceOnly for export the error status trace segments through the Kafka channel Add component ID for Grizzly. Fix potential NPE in Zipkin receiver when the Span is missing some fields. Filter out unknown_cluster metric data. Support RabbitMQ Monitoring. Support Redis slow logs collection. Fix data loss when query continuous profiling task record. Adapt the continuous profiling task query GraphQL. Support Metrics Query Expression(MQE) and allows users to do simple query-stage calculation through the expression. Deprecated metrics query v2 protocol. Deprecated record query protocol. Add component ID for go-redis. Add OpenSearch 2.8.0 to test case. Add ai-pipeline module. Support HTTP URI formatting through ai-pipeline to do pattern recognition. Add new HTTP URI grouping engine with benchmark. [Breaking Change] Use the new HTTP URI grouping engine to replace the old regex based mechanism. Support sumLabeled in MAL. Migrate from kubernetes-client/java to fabric8 client. Envoy ALS generated relation metrics considers http status codes \u0026gt;= 400 has an error at the client side. Add cause message field when query continuous profiling task.  UI  Revert: cpm5d function. This feature is cancelled from backend. Fix: alerting link breaks on the topology. Refactor Topology widget to make it more hierarchical.  Choose User as the first node. If User node is absent, choose the busiest node(which has the most calls of all). Do a left-to-right flow process. At the same level, list nodes from top to bottom in alphabetical order.   Fix filter ID when ReadRecords metric associates with trace. Add AWS API Gateway menu. Change trace profiling protocol. Add Redis menu. Optimize data types. Support isEmptyValue flag for metrics query. Add elasticsearch menu. [Clean UI templates before upgrade] Set showSymbol: true, and make the data point shows on the Line graph. Please clean ui_template index in elasticsearch storage or table in JDBC storage. [Clean UI templates before upgrade] UI templates: Simplify metric name with the label. Add MQ menu. Add Jeysey icon. Fix: set endpoint and instance selectors with url parameters correctly. Bump up dependencies versions icons-vue 1.1.4, element-plus 2.1.0, nanoid 3.3.6, postcss 8.4.23 Add OpenTelemetry log protocol support. [Breaking Change] Configuration key enabledOtelRules is renamed to enabledOtelMetricsRules and the corresponding environment variable is renamed to SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES. Add grizzly icon. Fix: the Instance List data display error. Fix: set topN type to Number. Support Metrics Query Expression(MQE) and allows users to do simple query-stage calculation through the expression. Bump up zipkin ui dependency to 2.24.1. Bump up vite to 4.0.5. Apply MQE on General and Virtual-Database layer UI-templates. Add Continuous Profiling tab on Mesh layer UI-templates.  Documentation  Add Profiling related documentations. Add SUM_PER_MIN to MAL documentation. Make the log relative docs more clear, and easier for further more formats support. Update the cluster management and advanced deployment docs.  All issues and pull requests are here\n","excerpt":"9.5.0 Project  Fix Duplicate class found due to the delombok goal.  OAP Server  Fix wrong layer of …","ref":"/docs/main/v9.5.0/en/changes/changes/","title":"9.5.0"},{"body":"9.5.0 Project  Fix Duplicate class found due to the delombok goal.  OAP Server  Fix wrong layer of metric user error in DynamoDB monitoring. ElasticSearch storage does not check field types when OAP running in no-init mode. Support to bind TLS status as a part of component for service topology. Fix component ID priority bug. Fix component ID of topology overlap due to storage layer bugs. [Breaking Change] Enhance JDBC storage through merging tables and managing day-based table rolling. [Breaking Change] Sharding-MySQL implementations and tests get removed due to we have the day-based rolling mechanism by default Fix otel k8s-cluster rule add namespace dimension for MAL aggregation calculation(Deployment Status,Deployment Spec Replicas) Support continuous profiling feature. Support collect process level related metrics. Fix K8sRetag reads the wrong k8s service from the cache due to a possible namespace mismatch. [Breaking Change] Support cross-thread trace profiling. The data structure and query APIs are changed. Fix PromQL HTTP API /api/v1/labels response missing service label. Fix possible NPE when initialize IntList. Support parse PromQL expression has empty labels in the braces for metadata query. Support alarm metric OP !=. Support metrics query indicates whether value == 0 represents actually zero or no data. Fix NPE when query the not exist series indexes in ElasticSearch storage. Support collecting memory buff/cache metrics in VM monitoring. PromQL: Remove empty values from the query result, fix /api/v1/metadata param limit could cause out of bound. Support monitoring the total number metrics of k8s StatefulSet and DaemonSet. Support Amazon API Gateway monitoring. Bump up graphql-java to fix cve. Bump up Kubernetes Java client. Support Redis Monitoring. Add component ID for amqp, amqp-producer and amqp-consumer. Support no-proxy mode for aws-firehose receiver Bump up armeria to 1.23.1 Support Elasticsearch Monitoring. Fix PromQL HTTP API /api/v1/series response missing service label when matching metric. Support ServerSide TopN for BanyanDB. Add component ID for Jersey. Remove OpenCensus support, the related codes and docs as it\u0026rsquo;s sunsetting. Support dynamic configuration of searchableTracesTags Support exportErrorStatusTraceOnly for export the error status trace segments through the Kafka channel Add component ID for Grizzly. Fix potential NPE in Zipkin receiver when the Span is missing some fields. Filter out unknown_cluster metric data. Support RabbitMQ Monitoring. Support Redis slow logs collection. Fix data loss when query continuous profiling task record. Adapt the continuous profiling task query GraphQL. Support Metrics Query Expression(MQE) and allows users to do simple query-stage calculation through the expression. Deprecated metrics query v2 protocol. Deprecated record query protocol. Add component ID for go-redis. Add OpenSearch 2.8.0 to test case. Add ai-pipeline module. Support HTTP URI formatting through ai-pipeline to do pattern recognition. Add new HTTP URI grouping engine with benchmark. [Breaking Change] Use the new HTTP URI grouping engine to replace the old regex based mechanism. Support sumLabeled in MAL. Migrate from kubernetes-client/java to fabric8 client. Envoy ALS generated relation metrics considers http status codes \u0026gt;= 400 has an error at the client side. Add cause message field when query continuous profiling task.  UI  Revert: cpm5d function. This feature is cancelled from backend. Fix: alerting link breaks on the topology. Refactor Topology widget to make it more hierarchical.  Choose User as the first node. If User node is absent, choose the busiest node(which has the most calls of all). Do a left-to-right flow process. At the same level, list nodes from top to bottom in alphabetical order.   Fix filter ID when ReadRecords metric associates with trace. Add AWS API Gateway menu. Change trace profiling protocol. Add Redis menu. Optimize data types. Support isEmptyValue flag for metrics query. Add elasticsearch menu. [Clean UI templates before upgrade] Set showSymbol: true, and make the data point shows on the Line graph. Please clean ui_template index in elasticsearch storage or table in JDBC storage. [Clean UI templates before upgrade] UI templates: Simplify metric name with the label. Add MQ menu. Add Jeysey icon. Fix: set endpoint and instance selectors with url parameters correctly. Bump up dependencies versions icons-vue 1.1.4, element-plus 2.1.0, nanoid 3.3.6, postcss 8.4.23 Add OpenTelemetry log protocol support. [Breaking Change] Configuration key enabledOtelRules is renamed to enabledOtelMetricsRules and the corresponding environment variable is renamed to SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES. Add grizzly icon. Fix: the Instance List data display error. Fix: set topN type to Number. Support Metrics Query Expression(MQE) and allows users to do simple query-stage calculation through the expression. Bump up zipkin ui dependency to 2.24.1. Bump up vite to 4.0.5. Apply MQE on General and Virtual-Database layer UI-templates. Add Continuous Profiling tab on Mesh layer UI-templates.  Documentation  Add Profiling related documentations. Add SUM_PER_MIN to MAL documentation. Make the log relative docs more clear, and easier for further more formats support. Update the cluster management and advanced deployment docs.  All issues and pull requests are here\n","excerpt":"9.5.0 Project  Fix Duplicate class found due to the delombok goal.  OAP Server  Fix wrong layer of …","ref":"/docs/main/v9.6.0/en/changes/changes-9.5.0/","title":"9.5.0"},{"body":"9.5.0 Project  Fix Duplicate class found due to the delombok goal.  OAP Server  Fix wrong layer of metric user error in DynamoDB monitoring. ElasticSearch storage does not check field types when OAP running in no-init mode. Support to bind TLS status as a part of component for service topology. Fix component ID priority bug. Fix component ID of topology overlap due to storage layer bugs. [Breaking Change] Enhance JDBC storage through merging tables and managing day-based table rolling. [Breaking Change] Sharding-MySQL implementations and tests get removed due to we have the day-based rolling mechanism by default Fix otel k8s-cluster rule add namespace dimension for MAL aggregation calculation(Deployment Status,Deployment Spec Replicas) Support continuous profiling feature. Support collect process level related metrics. Fix K8sRetag reads the wrong k8s service from the cache due to a possible namespace mismatch. [Breaking Change] Support cross-thread trace profiling. The data structure and query APIs are changed. Fix PromQL HTTP API /api/v1/labels response missing service label. Fix possible NPE when initialize IntList. Support parse PromQL expression has empty labels in the braces for metadata query. Support alarm metric OP !=. Support metrics query indicates whether value == 0 represents actually zero or no data. Fix NPE when query the not exist series indexes in ElasticSearch storage. Support collecting memory buff/cache metrics in VM monitoring. PromQL: Remove empty values from the query result, fix /api/v1/metadata param limit could cause out of bound. Support monitoring the total number metrics of k8s StatefulSet and DaemonSet. Support Amazon API Gateway monitoring. Bump up graphql-java to fix cve. Bump up Kubernetes Java client. Support Redis Monitoring. Add component ID for amqp, amqp-producer and amqp-consumer. Support no-proxy mode for aws-firehose receiver Bump up armeria to 1.23.1 Support Elasticsearch Monitoring. Fix PromQL HTTP API /api/v1/series response missing service label when matching metric. Support ServerSide TopN for BanyanDB. Add component ID for Jersey. Remove OpenCensus support, the related codes and docs as it\u0026rsquo;s sunsetting. Support dynamic configuration of searchableTracesTags Support exportErrorStatusTraceOnly for export the error status trace segments through the Kafka channel Add component ID for Grizzly. Fix potential NPE in Zipkin receiver when the Span is missing some fields. Filter out unknown_cluster metric data. Support RabbitMQ Monitoring. Support Redis slow logs collection. Fix data loss when query continuous profiling task record. Adapt the continuous profiling task query GraphQL. Support Metrics Query Expression(MQE) and allows users to do simple query-stage calculation through the expression. Deprecated metrics query v2 protocol. Deprecated record query protocol. Add component ID for go-redis. Add OpenSearch 2.8.0 to test case. Add ai-pipeline module. Support HTTP URI formatting through ai-pipeline to do pattern recognition. Add new HTTP URI grouping engine with benchmark. [Breaking Change] Use the new HTTP URI grouping engine to replace the old regex based mechanism. Support sumLabeled in MAL. Migrate from kubernetes-client/java to fabric8 client. Envoy ALS generated relation metrics considers http status codes \u0026gt;= 400 has an error at the client side. Add cause message field when query continuous profiling task.  UI  Revert: cpm5d function. This feature is cancelled from backend. Fix: alerting link breaks on the topology. Refactor Topology widget to make it more hierarchical.  Choose User as the first node. If User node is absent, choose the busiest node(which has the most calls of all). Do a left-to-right flow process. At the same level, list nodes from top to bottom in alphabetical order.   Fix filter ID when ReadRecords metric associates with trace. Add AWS API Gateway menu. Change trace profiling protocol. Add Redis menu. Optimize data types. Support isEmptyValue flag for metrics query. Add elasticsearch menu. [Clean UI templates before upgrade] Set showSymbol: true, and make the data point shows on the Line graph. Please clean ui_template index in elasticsearch storage or table in JDBC storage. [Clean UI templates before upgrade] UI templates: Simplify metric name with the label. Add MQ menu. Add Jeysey icon. Fix: set endpoint and instance selectors with url parameters correctly. Bump up dependencies versions icons-vue 1.1.4, element-plus 2.1.0, nanoid 3.3.6, postcss 8.4.23 Add OpenTelemetry log protocol support. [Breaking Change] Configuration key enabledOtelRules is renamed to enabledOtelMetricsRules and the corresponding environment variable is renamed to SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES. Add grizzly icon. Fix: the Instance List data display error. Fix: set topN type to Number. Support Metrics Query Expression(MQE) and allows users to do simple query-stage calculation through the expression. Bump up zipkin ui dependency to 2.24.1. Bump up vite to 4.0.5. Apply MQE on General and Virtual-Database layer UI-templates. Add Continuous Profiling tab on Mesh layer UI-templates.  Documentation  Add Profiling related documentations. Add SUM_PER_MIN to MAL documentation. Make the log relative docs more clear, and easier for further more formats support. Update the cluster management and advanced deployment docs.  All issues and pull requests are here\n","excerpt":"9.5.0 Project  Fix Duplicate class found due to the delombok goal.  OAP Server  Fix wrong layer of …","ref":"/docs/main/v9.7.0/en/changes/changes-9.5.0/","title":"9.5.0"},{"body":"9.6.0 Project  Bump up Guava to 32.0.1 to avoid the lib listed as vulnerable due to CVE-2020-8908. This API is never used. Maven artifact skywalking-log-recevier-plugin is renamed to skywalking-log-receiver-plugin. Bump up cli version 0.11 to 0.12. Bump up the version of ASF parent pom to v30. Make builds reproducible for automatic releases CI.  OAP Server  Add Neo4j component ID(112) language: Python. Add Istio ServiceEntry registry to resolve unknown IPs in ALS. Wrap deleteProperty API to the BanyanDBStorageClient. [Breaking change] Remove matchedCounter from HttpUriRecognitionService#feedRawData. Remove patterns from HttpUriRecognitionService#feedRawData and add max 10 candidates of raw URIs for each pattern. Add component ID for WebSphere. Fix AI Pipeline uri caching NullPointer and IllegalArgument Exceptions. Fix NPE in metrics query when the metric is not exist. Remove E2E tests for Istio \u0026lt; 1.15, ElasticSearch \u0026lt; 7.16.3, they might still work but are not supported as planed. Scroll all results in ElasticSearch storage and refactor scrolling logics, including Service, Instance, Endpoint, Process, etc. Improve Kubernetes coordinator to remove Terminating OAP Pods in cluster. Support SW_CORE_SYNC_PERIOD_HTTP_URI_RECOGNITION_PATTERN and SW_CORE_TRAINING_PERIOD_HTTP_URI_RECOGNITION_PATTERN to control the period of training and sync HTTP URI recognition patterns. And shorten the default period to 10s for sync and 60s for training. Fix ElasticSearch scroller bug. Add component ID for Aerospike(ID=149). Packages with name recevier are renamed to receiver. BanyanDBMetricsDAO handles storeIDTag in multiGet for BanyanDBModelExtension. Fix endpoint grouping-related logic and enhance the performance of PatternTree retrieval. Fix metric session cache saving after batch insert when using mysql-connector-java. Support dynamic UI menu query. Add comment for docker/.env to explain the usage. Fix wrong environment variable name SW_OTEL_RECEIVER_ENABLED_OTEL_RULES to right SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES. Fix instance query in JDBC implementation. Set the SW_QUERY_MAX_QUERY_COMPLEXITY default value to 3000(was 1000). Accept length=4000 parameter value of the event. It was 2000. Tolerate parameter value in illegal JSON format. Update BanyanDB Java Client to 0.4.0 Support aggregate Labeled Value Metrics in MQE. [Breaking change] Change the default label name in MQE from label to _. Bump up grpc version to 1.53.0. [Breaking change] Removed \u0026lsquo;\u0026amp;\u0026rsquo; symbols from shell scripts to avoid OAP server process running as a background process. Revert part of #10616 to fix the unexpected changes: if there is no data we should return an array with 0s, but in #10616, an empty array is returned. Cache all service entity in memory for query. Bump up jackson version to 2.15.2. Increase the default memory size to avoid OOM. Bump up graphql-java to 21.0. Add Echo component ID(5015) language: Golang. Fix index out of bounds exception in aggregate_labels MQE function. Support MongoDB Server/Cluster monitoring powered by OTEL. Do not print configurations values in logs to avoid sensitive info leaked. Move created the latest index before retrieval indexes by aliases to avoid the 404 exception. This just prevents some interference from manual operations. Add more Go VM metrics, as new skywalking-go agent provided since its 0.2 release. Add component ID for Lock (ID=5016). [Breaking change] Adjust the structure of hooks in the alarm-settings.yml. Support multiple configs for each hook types and specifying the hooks in the alarm rule. Bump up Armeria to 1.24.3. Fix BooleanMatch and BooleanNotEqualMatch doing Boolean comparison. Support LogQL HTTP query APIs. Add Mux Server component ID(5017) language: Golang. Remove ElasticSearch 6.3.2 from our client lib tests. Bump up ElasticSearch server 8.8.1 to 8.9.0 for latest e2e testing. 8.1.0, 7.16.3 and 7.17.10 are still tested. Add OpenSearch 2.8.0 to our client lib tests. Use listening mode for apollo implementation of dynamic configuration. Add view_as_seq function in MQE for listing metrics in the given prioritized sequence. Fix the wrong default value of k8sServiceNameRule if it\u0026rsquo;s not explicitly set. Improve PromQL to allow for multiple metric operations within a single query. Fix MQE Binary Operation between labeled metrics and other type of value result. Add component ID for Nacos (ID=150). Support Compare Operation in MQE. Fix the Kubernetes resource cache not refreshed. Fix wrong classpath that might cause OOM in startup. Enhance the serviceRelation in MAL by adding settings for the delimiter and component fields. [Breaking change] Support MQE in the Alerting. The Alarm Rules configuration(alarm-settings.yml), add expression field and remove metrics-name/count/threshold/op/only-as-condition fields and remove composite-rules configuration. Check results in ALS as per downstream/upstream instead of per log. Fix GraphQL query listInstances not using endTime query Do not start server and Kafka consumer in init mode. Add Iris component ID(5018). Add OTLP Tracing support as a Zipkin trace input.  UI  Fix metric name browser_app_error_rate in Browser-Root dashboard. Fix display name of endpoint_cpm for endpoint list in General-Service dashboard. Implement customize menus and marketplace page. Fix minTraceDuration and maxTraceDuration types. Fix init minTime to Infinity. Bump dependencies to fix vulnerabilities. Add scss variables. Fix the title of instance list and notices in the continue profiling. Add a link to explain the expression metric, add units in the continue profiling widget. Calculate string width to set Tabs name width. [Breaking change] Removed \u0026lsquo;\u0026amp;\u0026rsquo; symbols from shell scripts to avoid web application server process running as a background process. Reset chart label. Fix service associates instances. Remove node-sass. Fix commit error on Windows. Apply MQE on MYSQL, POSTGRESQL, REDIS, ELASTICSEARCH and DYNAMODB layer UI-templates. Apply MQE on Virtual-Cache layer UI-templates Apply MQE on APISIX, AWS_EKS, AWS_GATEWAY and AWS_S3 layer UI templates. Apply MQE on RabbitMQ Dashboards. Apply MQE on Virtual-MQ layer UI-templates Apply MQE on Infra-Linux layer UI-templates Apply MQE on Infra-Windows layer UI-templates Apply MQE on Browser layer UI-templates. Implement MQE on topology widget. Fix getEndpoints keyword blank. Implement a breadcrumb component as navigation.  Documentation  Add Go agent into the server agent documentation. Add data unit description in the configuration of continuous profiling policy. Remove storage extension doc, as it is expired. Remove how to add menu doc, as SkyWalking supports marketplace and new backend-based setup. Separate contribution docs to a new menu structure. Add a doc to explain how to manage i18n. Add a doc to explain OTLP Trace support. Fix typo in dynamic-config-configmap.md. Fix out-dated docs about Kafka fetcher. Remove 3rd part fetchers from the docs, as they are not maintained anymore.  All issues and pull requests are here\n","excerpt":"9.6.0 Project  Bump up Guava to 32.0.1 to avoid the lib listed as vulnerable due to CVE-2020-8908. …","ref":"/docs/main/latest/en/changes/changes-9.6.0/","title":"9.6.0"},{"body":"9.6.0 Project  Bump up Guava to 32.0.1 to avoid the lib listed as vulnerable due to CVE-2020-8908. This API is never used. Maven artifact skywalking-log-recevier-plugin is renamed to skywalking-log-receiver-plugin. Bump up cli version 0.11 to 0.12. Bump up the version of ASF parent pom to v30. Make builds reproducible for automatic releases CI.  OAP Server  Add Neo4j component ID(112) language: Python. Add Istio ServiceEntry registry to resolve unknown IPs in ALS. Wrap deleteProperty API to the BanyanDBStorageClient. [Breaking change] Remove matchedCounter from HttpUriRecognitionService#feedRawData. Remove patterns from HttpUriRecognitionService#feedRawData and add max 10 candidates of raw URIs for each pattern. Add component ID for WebSphere. Fix AI Pipeline uri caching NullPointer and IllegalArgument Exceptions. Fix NPE in metrics query when the metric is not exist. Remove E2E tests for Istio \u0026lt; 1.15, ElasticSearch \u0026lt; 7.16.3, they might still work but are not supported as planed. Scroll all results in ElasticSearch storage and refactor scrolling logics, including Service, Instance, Endpoint, Process, etc. Improve Kubernetes coordinator to remove Terminating OAP Pods in cluster. Support SW_CORE_SYNC_PERIOD_HTTP_URI_RECOGNITION_PATTERN and SW_CORE_TRAINING_PERIOD_HTTP_URI_RECOGNITION_PATTERN to control the period of training and sync HTTP URI recognition patterns. And shorten the default period to 10s for sync and 60s for training. Fix ElasticSearch scroller bug. Add component ID for Aerospike(ID=149). Packages with name recevier are renamed to receiver. BanyanDBMetricsDAO handles storeIDTag in multiGet for BanyanDBModelExtension. Fix endpoint grouping-related logic and enhance the performance of PatternTree retrieval. Fix metric session cache saving after batch insert when using mysql-connector-java. Support dynamic UI menu query. Add comment for docker/.env to explain the usage. Fix wrong environment variable name SW_OTEL_RECEIVER_ENABLED_OTEL_RULES to right SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES. Fix instance query in JDBC implementation. Set the SW_QUERY_MAX_QUERY_COMPLEXITY default value to 3000(was 1000). Accept length=4000 parameter value of the event. It was 2000. Tolerate parameter value in illegal JSON format. Update BanyanDB Java Client to 0.4.0 Support aggregate Labeled Value Metrics in MQE. [Breaking change] Change the default label name in MQE from label to _. Bump up grpc version to 1.53.0. [Breaking change] Removed \u0026lsquo;\u0026amp;\u0026rsquo; symbols from shell scripts to avoid OAP server process running as a background process. Revert part of #10616 to fix the unexpected changes: if there is no data we should return an array with 0s, but in #10616, an empty array is returned. Cache all service entity in memory for query. Bump up jackson version to 2.15.2. Increase the default memory size to avoid OOM. Bump up graphql-java to 21.0. Add Echo component ID(5015) language: Golang. Fix index out of bounds exception in aggregate_labels MQE function. Support MongoDB Server/Cluster monitoring powered by OTEL. Do not print configurations values in logs to avoid sensitive info leaked. Move created the latest index before retrieval indexes by aliases to avoid the 404 exception. This just prevents some interference from manual operations. Add more Go VM metrics, as new skywalking-go agent provided since its 0.2 release. Add component ID for Lock (ID=5016). [Breaking change] Adjust the structure of hooks in the alarm-settings.yml. Support multiple configs for each hook types and specifying the hooks in the alarm rule. Bump up Armeria to 1.24.3. Fix BooleanMatch and BooleanNotEqualMatch doing Boolean comparison. Support LogQL HTTP query APIs. Add Mux Server component ID(5017) language: Golang. Remove ElasticSearch 6.3.2 from our client lib tests. Bump up ElasticSearch server 8.8.1 to 8.9.0 for latest e2e testing. 8.1.0, 7.16.3 and 7.17.10 are still tested. Add OpenSearch 2.8.0 to our client lib tests. Use listening mode for apollo implementation of dynamic configuration. Add view_as_seq function in MQE for listing metrics in the given prioritized sequence. Fix the wrong default value of k8sServiceNameRule if it\u0026rsquo;s not explicitly set. Improve PromQL to allow for multiple metric operations within a single query. Fix MQE Binary Operation between labeled metrics and other type of value result. Add component ID for Nacos (ID=150). Support Compare Operation in MQE. Fix the Kubernetes resource cache not refreshed. Fix wrong classpath that might cause OOM in startup. Enhance the serviceRelation in MAL by adding settings for the delimiter and component fields. [Breaking change] Support MQE in the Alerting. The Alarm Rules configuration(alarm-settings.yml), add expression field and remove metrics-name/count/threshold/op/only-as-condition fields and remove composite-rules configuration. Check results in ALS as per downstream/upstream instead of per log. Fix GraphQL query listInstances not using endTime query Do not start server and Kafka consumer in init mode. Add Iris component ID(5018). Add OTLP Tracing support as a Zipkin trace input.  UI  Fix metric name browser_app_error_rate in Browser-Root dashboard. Fix display name of endpoint_cpm for endpoint list in General-Service dashboard. Implement customize menus and marketplace page. Fix minTraceDuration and maxTraceDuration types. Fix init minTime to Infinity. Bump dependencies to fix vulnerabilities. Add scss variables. Fix the title of instance list and notices in the continue profiling. Add a link to explain the expression metric, add units in the continue profiling widget. Calculate string width to set Tabs name width. [Breaking change] Removed \u0026lsquo;\u0026amp;\u0026rsquo; symbols from shell scripts to avoid web application server process running as a background process. Reset chart label. Fix service associates instances. Remove node-sass. Fix commit error on Windows. Apply MQE on MYSQL, POSTGRESQL, REDIS, ELASTICSEARCH and DYNAMODB layer UI-templates. Apply MQE on Virtual-Cache layer UI-templates Apply MQE on APISIX, AWS_EKS, AWS_GATEWAY and AWS_S3 layer UI templates. Apply MQE on RabbitMQ Dashboards. Apply MQE on Virtual-MQ layer UI-templates Apply MQE on Infra-Linux layer UI-templates Apply MQE on Infra-Windows layer UI-templates Apply MQE on Browser layer UI-templates. Implement MQE on topology widget. Fix getEndpoints keyword blank. Implement a breadcrumb component as navigation.  Documentation  Add Go agent into the server agent documentation. Add data unit description in the configuration of continuous profiling policy. Remove storage extension doc, as it is expired. Remove how to add menu doc, as SkyWalking supports marketplace and new backend-based setup. Separate contribution docs to a new menu structure. Add a doc to explain how to manage i18n. Add a doc to explain OTLP Trace support. Fix typo in dynamic-config-configmap.md. Fix out-dated docs about Kafka fetcher. Remove 3rd part fetchers from the docs, as they are not maintained anymore.  All issues and pull requests are here\n","excerpt":"9.6.0 Project  Bump up Guava to 32.0.1 to avoid the lib listed as vulnerable due to CVE-2020-8908. …","ref":"/docs/main/next/en/changes/changes-9.6.0/","title":"9.6.0"},{"body":"9.6.0 Project  Bump up Guava to 32.0.1 to avoid the lib listed as vulnerable due to CVE-2020-8908. This API is never used. Maven artifact skywalking-log-recevier-plugin is renamed to skywalking-log-receiver-plugin. Bump up cli version 0.11 to 0.12. Bump up the version of ASF parent pom to v30. Make builds reproducible for automatic releases CI.  OAP Server  Add Neo4j component ID(112) language: Python. Add Istio ServiceEntry registry to resolve unknown IPs in ALS. Wrap deleteProperty API to the BanyanDBStorageClient. [Breaking change] Remove matchedCounter from HttpUriRecognitionService#feedRawData. Remove patterns from HttpUriRecognitionService#feedRawData and add max 10 candidates of raw URIs for each pattern. Add component ID for WebSphere. Fix AI Pipeline uri caching NullPointer and IllegalArgument Exceptions. Fix NPE in metrics query when the metric is not exist. Remove E2E tests for Istio \u0026lt; 1.15, ElasticSearch \u0026lt; 7.16.3, they might still work but are not supported as planed. Scroll all results in ElasticSearch storage and refactor scrolling logics, including Service, Instance, Endpoint, Process, etc. Improve Kubernetes coordinator to remove Terminating OAP Pods in cluster. Support SW_CORE_SYNC_PERIOD_HTTP_URI_RECOGNITION_PATTERN and SW_CORE_TRAINING_PERIOD_HTTP_URI_RECOGNITION_PATTERN to control the period of training and sync HTTP URI recognition patterns. And shorten the default period to 10s for sync and 60s for training. Fix ElasticSearch scroller bug. Add component ID for Aerospike(ID=149). Packages with name recevier are renamed to receiver. BanyanDBMetricsDAO handles storeIDTag in multiGet for BanyanDBModelExtension. Fix endpoint grouping-related logic and enhance the performance of PatternTree retrieval. Fix metric session cache saving after batch insert when using mysql-connector-java. Support dynamic UI menu query. Add comment for docker/.env to explain the usage. Fix wrong environment variable name SW_OTEL_RECEIVER_ENABLED_OTEL_RULES to right SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES. Fix instance query in JDBC implementation. Set the SW_QUERY_MAX_QUERY_COMPLEXITY default value to 3000(was 1000). Accept length=4000 parameter value of the event. It was 2000. Tolerate parameter value in illegal JSON format. Update BanyanDB Java Client to 0.4.0 Support aggregate Labeled Value Metrics in MQE. [Breaking change] Change the default label name in MQE from label to _. Bump up grpc version to 1.53.0. [Breaking change] Removed \u0026lsquo;\u0026amp;\u0026rsquo; symbols from shell scripts to avoid OAP server process running as a background process. Revert part of #10616 to fix the unexpected changes: if there is no data we should return an array with 0s, but in #10616, an empty array is returned. Cache all service entity in memory for query. Bump up jackson version to 2.15.2. Increase the default memory size to avoid OOM. Bump up graphql-java to 21.0. Add Echo component ID(5015) language: Golang. Fix index out of bounds exception in aggregate_labels MQE function. Support MongoDB Server/Cluster monitoring powered by OTEL. Do not print configurations values in logs to avoid sensitive info leaked. Move created the latest index before retrieval indexes by aliases to avoid the 404 exception. This just prevents some interference from manual operations. Add more Go VM metrics, as new skywalking-go agent provided since its 0.2 release. Add component ID for Lock (ID=5016). [Breaking change] Adjust the structure of hooks in the alarm-settings.yml. Support multiple configs for each hook types and specifying the hooks in the alarm rule. Bump up Armeria to 1.24.3. Fix BooleanMatch and BooleanNotEqualMatch doing Boolean comparison. Support LogQL HTTP query APIs. Add Mux Server component ID(5017) language: Golang. Remove ElasticSearch 6.3.2 from our client lib tests. Bump up ElasticSearch server 8.8.1 to 8.9.0 for latest e2e testing. 8.1.0, 7.16.3 and 7.17.10 are still tested. Add OpenSearch 2.8.0 to our client lib tests. Use listening mode for apollo implementation of dynamic configuration. Add view_as_seq function in MQE for listing metrics in the given prioritized sequence. Fix the wrong default value of k8sServiceNameRule if it\u0026rsquo;s not explicitly set. Improve PromQL to allow for multiple metric operations within a single query. Fix MQE Binary Operation between labeled metrics and other type of value result. Add component ID for Nacos (ID=150). Support Compare Operation in MQE. Fix the Kubernetes resource cache not refreshed. Fix wrong classpath that might cause OOM in startup. Enhance the serviceRelation in MAL by adding settings for the delimiter and component fields. [Breaking change] Support MQE in the Alerting. The Alarm Rules configuration(alarm-settings.yml), add expression field and remove metrics-name/count/threshold/op/only-as-condition fields and remove composite-rules configuration. Check results in ALS as per downstream/upstream instead of per log. Fix GraphQL query listInstances not using endTime query Do not start server and Kafka consumer in init mode. Add Iris component ID(5018). Add OTLP Tracing support as a Zipkin trace input.  UI  Fix metric name browser_app_error_rate in Browser-Root dashboard. Fix display name of endpoint_cpm for endpoint list in General-Service dashboard. Implement customize menus and marketplace page. Fix minTraceDuration and maxTraceDuration types. Fix init minTime to Infinity. Bump dependencies to fix vulnerabilities. Add scss variables. Fix the title of instance list and notices in the continue profiling. Add a link to explain the expression metric, add units in the continue profiling widget. Calculate string width to set Tabs name width. [Breaking change] Removed \u0026lsquo;\u0026amp;\u0026rsquo; symbols from shell scripts to avoid web application server process running as a background process. Reset chart label. Fix service associates instances. Remove node-sass. Fix commit error on Windows. Apply MQE on MYSQL, POSTGRESQL, REDIS, ELASTICSEARCH and DYNAMODB layer UI-templates. Apply MQE on Virtual-Cache layer UI-templates Apply MQE on APISIX, AWS_EKS, AWS_GATEWAY and AWS_S3 layer UI templates. Apply MQE on RabbitMQ Dashboards. Apply MQE on Virtual-MQ layer UI-templates Apply MQE on Infra-Linux layer UI-templates Apply MQE on Infra-Windows layer UI-templates Apply MQE on Browser layer UI-templates. Implement MQE on topology widget. Fix getEndpoints keyword blank. Implement a breadcrumb component as navigation.  Documentation  Add Go agent into the server agent documentation. Add data unit description in the configuration of continuous profiling policy. Remove storage extension doc, as it is expired. Remove how to add menu doc, as SkyWalking supports marketplace and new backend-based setup. Separate contribution docs to a new menu structure. Add a doc to explain how to manage i18n. Add a doc to explain OTLP Trace support. Fix typo in dynamic-config-configmap.md. Fix out-dated docs about Kafka fetcher. Remove 3rd part fetchers from the docs, as they are not maintained anymore.  All issues and pull requests are here\n","excerpt":"9.6.0 Project  Bump up Guava to 32.0.1 to avoid the lib listed as vulnerable due to CVE-2020-8908. …","ref":"/docs/main/v9.6.0/en/changes/changes/","title":"9.6.0"},{"body":"9.6.0 Project  Bump up Guava to 32.0.1 to avoid the lib listed as vulnerable due to CVE-2020-8908. This API is never used. Maven artifact skywalking-log-recevier-plugin is renamed to skywalking-log-receiver-plugin. Bump up cli version 0.11 to 0.12. Bump up the version of ASF parent pom to v30. Make builds reproducible for automatic releases CI.  OAP Server  Add Neo4j component ID(112) language: Python. Add Istio ServiceEntry registry to resolve unknown IPs in ALS. Wrap deleteProperty API to the BanyanDBStorageClient. [Breaking change] Remove matchedCounter from HttpUriRecognitionService#feedRawData. Remove patterns from HttpUriRecognitionService#feedRawData and add max 10 candidates of raw URIs for each pattern. Add component ID for WebSphere. Fix AI Pipeline uri caching NullPointer and IllegalArgument Exceptions. Fix NPE in metrics query when the metric is not exist. Remove E2E tests for Istio \u0026lt; 1.15, ElasticSearch \u0026lt; 7.16.3, they might still work but are not supported as planed. Scroll all results in ElasticSearch storage and refactor scrolling logics, including Service, Instance, Endpoint, Process, etc. Improve Kubernetes coordinator to remove Terminating OAP Pods in cluster. Support SW_CORE_SYNC_PERIOD_HTTP_URI_RECOGNITION_PATTERN and SW_CORE_TRAINING_PERIOD_HTTP_URI_RECOGNITION_PATTERN to control the period of training and sync HTTP URI recognition patterns. And shorten the default period to 10s for sync and 60s for training. Fix ElasticSearch scroller bug. Add component ID for Aerospike(ID=149). Packages with name recevier are renamed to receiver. BanyanDBMetricsDAO handles storeIDTag in multiGet for BanyanDBModelExtension. Fix endpoint grouping-related logic and enhance the performance of PatternTree retrieval. Fix metric session cache saving after batch insert when using mysql-connector-java. Support dynamic UI menu query. Add comment for docker/.env to explain the usage. Fix wrong environment variable name SW_OTEL_RECEIVER_ENABLED_OTEL_RULES to right SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES. Fix instance query in JDBC implementation. Set the SW_QUERY_MAX_QUERY_COMPLEXITY default value to 3000(was 1000). Accept length=4000 parameter value of the event. It was 2000. Tolerate parameter value in illegal JSON format. Update BanyanDB Java Client to 0.4.0 Support aggregate Labeled Value Metrics in MQE. [Breaking change] Change the default label name in MQE from label to _. Bump up grpc version to 1.53.0. [Breaking change] Removed \u0026lsquo;\u0026amp;\u0026rsquo; symbols from shell scripts to avoid OAP server process running as a background process. Revert part of #10616 to fix the unexpected changes: if there is no data we should return an array with 0s, but in #10616, an empty array is returned. Cache all service entity in memory for query. Bump up jackson version to 2.15.2. Increase the default memory size to avoid OOM. Bump up graphql-java to 21.0. Add Echo component ID(5015) language: Golang. Fix index out of bounds exception in aggregate_labels MQE function. Support MongoDB Server/Cluster monitoring powered by OTEL. Do not print configurations values in logs to avoid sensitive info leaked. Move created the latest index before retrieval indexes by aliases to avoid the 404 exception. This just prevents some interference from manual operations. Add more Go VM metrics, as new skywalking-go agent provided since its 0.2 release. Add component ID for Lock (ID=5016). [Breaking change] Adjust the structure of hooks in the alarm-settings.yml. Support multiple configs for each hook types and specifying the hooks in the alarm rule. Bump up Armeria to 1.24.3. Fix BooleanMatch and BooleanNotEqualMatch doing Boolean comparison. Support LogQL HTTP query APIs. Add Mux Server component ID(5017) language: Golang. Remove ElasticSearch 6.3.2 from our client lib tests. Bump up ElasticSearch server 8.8.1 to 8.9.0 for latest e2e testing. 8.1.0, 7.16.3 and 7.17.10 are still tested. Add OpenSearch 2.8.0 to our client lib tests. Use listening mode for apollo implementation of dynamic configuration. Add view_as_seq function in MQE for listing metrics in the given prioritized sequence. Fix the wrong default value of k8sServiceNameRule if it\u0026rsquo;s not explicitly set. Improve PromQL to allow for multiple metric operations within a single query. Fix MQE Binary Operation between labeled metrics and other type of value result. Add component ID for Nacos (ID=150). Support Compare Operation in MQE. Fix the Kubernetes resource cache not refreshed. Fix wrong classpath that might cause OOM in startup. Enhance the serviceRelation in MAL by adding settings for the delimiter and component fields. [Breaking change] Support MQE in the Alerting. The Alarm Rules configuration(alarm-settings.yml), add expression field and remove metrics-name/count/threshold/op/only-as-condition fields and remove composite-rules configuration. Check results in ALS as per downstream/upstream instead of per log. Fix GraphQL query listInstances not using endTime query Do not start server and Kafka consumer in init mode. Add Iris component ID(5018). Add OTLP Tracing support as a Zipkin trace input.  UI  Fix metric name browser_app_error_rate in Browser-Root dashboard. Fix display name of endpoint_cpm for endpoint list in General-Service dashboard. Implement customize menus and marketplace page. Fix minTraceDuration and maxTraceDuration types. Fix init minTime to Infinity. Bump dependencies to fix vulnerabilities. Add scss variables. Fix the title of instance list and notices in the continue profiling. Add a link to explain the expression metric, add units in the continue profiling widget. Calculate string width to set Tabs name width. [Breaking change] Removed \u0026lsquo;\u0026amp;\u0026rsquo; symbols from shell scripts to avoid web application server process running as a background process. Reset chart label. Fix service associates instances. Remove node-sass. Fix commit error on Windows. Apply MQE on MYSQL, POSTGRESQL, REDIS, ELASTICSEARCH and DYNAMODB layer UI-templates. Apply MQE on Virtual-Cache layer UI-templates Apply MQE on APISIX, AWS_EKS, AWS_GATEWAY and AWS_S3 layer UI templates. Apply MQE on RabbitMQ Dashboards. Apply MQE on Virtual-MQ layer UI-templates Apply MQE on Infra-Linux layer UI-templates Apply MQE on Infra-Windows layer UI-templates Apply MQE on Browser layer UI-templates. Implement MQE on topology widget. Fix getEndpoints keyword blank. Implement a breadcrumb component as navigation.  Documentation  Add Go agent into the server agent documentation. Add data unit description in the configuration of continuous profiling policy. Remove storage extension doc, as it is expired. Remove how to add menu doc, as SkyWalking supports marketplace and new backend-based setup. Separate contribution docs to a new menu structure. Add a doc to explain how to manage i18n. Add a doc to explain OTLP Trace support. Fix typo in dynamic-config-configmap.md. Fix out-dated docs about Kafka fetcher. Remove 3rd part fetchers from the docs, as they are not maintained anymore.  All issues and pull requests are here\n","excerpt":"9.6.0 Project  Bump up Guava to 32.0.1 to avoid the lib listed as vulnerable due to CVE-2020-8908. …","ref":"/docs/main/v9.7.0/en/changes/changes-9.6.0/","title":"9.6.0"},{"body":"9.7.0 Project  Bump Java agent to 9.1-dev in the e2e tests. Bump up netty to 4.1.100. Update Groovy 3 to 4.0.15. Support packaging the project in JDK21. Compiler source and target remain in JDK11.  OAP Server  ElasticSearchClient: Add deleteById API. Fix Custom alarm rules are overwritten by \u0026lsquo;resource/alarm-settings.yml\u0026rsquo; Support Kafka Monitoring. Support Pulsar server and BookKeeper server Monitoring. [Breaking Change] Elasticsearch storage merge all management data indices into one index management, including ui_template,ui_menu,continuous_profiling_policy. Add a release mechanism for alarm windows when it is expired in case of OOM. Fix Zipkin trace receiver response: make the HTTP status code from 200 to 202. Update BanyanDB Java Client to 0.5.0. Fix getInstances query in the BanyanDB Metadata DAO. BanyanDBStorageClient: Add keepAliveProperty API. Fix table exists check in the JDBC Storage Plugin. Enhance extensibility of HTTP Server library. Adjust AlarmRecord alarmMessage column length to 512. Fix EventHookCallback build event: build the layer from Service's Layer. Fix AlarmCore doAlarm: catch exception for each callback to avoid interruption. Optimize queryBasicTraces in TraceQueryEsDAO. Fix WebhookCallback send incorrect messages, add catch exception for each callback HTTP Post. Fix AlarmRule expression validation: add labeled metrics mock data for check. Support collect ZGC memory pool metrics. Add a component ID for Netty-http (ID=151). Add a component ID for Fiber (ID=5021). BanyanDBStorageClient: Add define(Property property, PropertyStore.Strategy strategy) API. Correct the file format and fix typos in the filenames for monitoring Kafka\u0026rsquo;s e2e tests. Support extract timestamp from patterned datetime string in LAL. Support output key parameters in the booting logs. Fix cannot query zipkin traces with annotationQuery parameter in the JDBC related storage. Fix limit doesn\u0026rsquo;t work for findEndpoint API in ES storage. Isolate MAL CounterWindow cache by metric name. Fix JDBC Log query order. Change the DataCarrier IF_POSSIBLE strategy to use ArrayBlockingQueue implementation. Change the policy of the queue(DataCarrier) in the L1 metric aggregate worker to IF_POSSIBLE mode. Add self-observability metric metrics_aggregator_abandon to count the number of abandon metrics. Support Nginx monitoring. Fix BanyanDB Metadata Query: make query single instance/process return full tags to avoid NPE. Repleace go2sky E2E to GO agent. Replace Metrics v2 protocol with MQE in UI templates and E2E Test. Fix incorrect apisix metrics otel rules. Support Scratch The OAP Config Dump. Support increase/rate function in the MQE query language. Group service endpoints into _abandoned when endpoints have high cardinality.  UI  Add new menu for kafka monitoring. Fix independent widget duration. Fix the display height of the link tree structure. Replace the name by shortName on service widget. Refactor: update pagination style. No visualization style change. Apply MQE on K8s layer UI-templates. Fix icons display in trace tree diagram. Fix: update tooltip style to support multiple metrics scrolling view in a metrics graph. Add a new widget to show jvm memory pool detail. Fix: avoid querying data with empty parameters. Add a title and a description for trace segments. Add Netty icon for Netty HTTP plugin. Add Pulsar menu i18n files. Refactor Logs view. Implement the Dark Theme. Change UI templates for Text widgets. Add Nginx menu i18n. Fix the height for trace widget. Polish list style. Fix Log associate with Trace. Enhance layout for broken Topology widget. Fix calls metric with call type for Topology widget. Fix changing metrics config for Topology widget. Fix routes for Tab widget. Remove OpenFunction(FAAS layer) relative UI templates and menu item. Fix: change colors to match dark theme for Network Profiling. Remove the description of OpenFunction in the UI i18n. Reduce component chunks to improve page loading resource time.  Documentation  Separate storage docs to different files, and add an estimated timeline for BanyanDB(end of 2023). Add topology configuration in UI-Grafana doc. Add missing metrics to the OpenTelemetry Metrics doc. Polish docs of Concepts and Designs. Fix incorrect notes of slowCacheReadThreshold. Update OAP setup and cluster coordinator docs to explain new booting parameters table in the logs, and how to setup cluster mode.  All issues and pull requests are here\n","excerpt":"9.7.0 Project  Bump Java agent to 9.1-dev in the e2e tests. Bump up netty to 4.1.100. Update Groovy …","ref":"/docs/main/latest/en/changes/changes/","title":"9.7.0"},{"body":"9.7.0 Project  Bump Java agent to 9.1-dev in the e2e tests. Bump up netty to 4.1.100. Update Groovy 3 to 4.0.15. Support packaging the project in JDK21. Compiler source and target remain in JDK11.  OAP Server  ElasticSearchClient: Add deleteById API. Fix Custom alarm rules are overwritten by \u0026lsquo;resource/alarm-settings.yml\u0026rsquo; Support Kafka Monitoring. Support Pulsar server and BookKeeper server Monitoring. [Breaking Change] Elasticsearch storage merge all management data indices into one index management, including ui_template,ui_menu,continuous_profiling_policy. Add a release mechanism for alarm windows when it is expired in case of OOM. Fix Zipkin trace receiver response: make the HTTP status code from 200 to 202. Update BanyanDB Java Client to 0.5.0. Fix getInstances query in the BanyanDB Metadata DAO. BanyanDBStorageClient: Add keepAliveProperty API. Fix table exists check in the JDBC Storage Plugin. Enhance extensibility of HTTP Server library. Adjust AlarmRecord alarmMessage column length to 512. Fix EventHookCallback build event: build the layer from Service's Layer. Fix AlarmCore doAlarm: catch exception for each callback to avoid interruption. Optimize queryBasicTraces in TraceQueryEsDAO. Fix WebhookCallback send incorrect messages, add catch exception for each callback HTTP Post. Fix AlarmRule expression validation: add labeled metrics mock data for check. Support collect ZGC memory pool metrics. Add a component ID for Netty-http (ID=151). Add a component ID for Fiber (ID=5021). BanyanDBStorageClient: Add define(Property property, PropertyStore.Strategy strategy) API. Correct the file format and fix typos in the filenames for monitoring Kafka\u0026rsquo;s e2e tests. Support extract timestamp from patterned datetime string in LAL. Support output key parameters in the booting logs. Fix cannot query zipkin traces with annotationQuery parameter in the JDBC related storage. Fix limit doesn\u0026rsquo;t work for findEndpoint API in ES storage. Isolate MAL CounterWindow cache by metric name. Fix JDBC Log query order. Change the DataCarrier IF_POSSIBLE strategy to use ArrayBlockingQueue implementation. Change the policy of the queue(DataCarrier) in the L1 metric aggregate worker to IF_POSSIBLE mode. Add self-observability metric metrics_aggregator_abandon to count the number of abandon metrics. Support Nginx monitoring. Fix BanyanDB Metadata Query: make query single instance/process return full tags to avoid NPE. Repleace go2sky E2E to GO agent. Replace Metrics v2 protocol with MQE in UI templates and E2E Test. Fix incorrect apisix metrics otel rules. Support Scratch The OAP Config Dump. Support increase/rate function in the MQE query language. Group service endpoints into _abandoned when endpoints have high cardinality.  UI  Add new menu for kafka monitoring. Fix independent widget duration. Fix the display height of the link tree structure. Replace the name by shortName on service widget. Refactor: update pagination style. No visualization style change. Apply MQE on K8s layer UI-templates. Fix icons display in trace tree diagram. Fix: update tooltip style to support multiple metrics scrolling view in a metrics graph. Add a new widget to show jvm memory pool detail. Fix: avoid querying data with empty parameters. Add a title and a description for trace segments. Add Netty icon for Netty HTTP plugin. Add Pulsar menu i18n files. Refactor Logs view. Implement the Dark Theme. Change UI templates for Text widgets. Add Nginx menu i18n. Fix the height for trace widget. Polish list style. Fix Log associate with Trace. Enhance layout for broken Topology widget. Fix calls metric with call type for Topology widget. Fix changing metrics config for Topology widget. Fix routes for Tab widget. Remove OpenFunction(FAAS layer) relative UI templates and menu item. Fix: change colors to match dark theme for Network Profiling. Remove the description of OpenFunction in the UI i18n. Reduce component chunks to improve page loading resource time.  Documentation  Separate storage docs to different files, and add an estimated timeline for BanyanDB(end of 2023). Add topology configuration in UI-Grafana doc. Add missing metrics to the OpenTelemetry Metrics doc. Polish docs of Concepts and Designs. Fix incorrect notes of slowCacheReadThreshold. Update OAP setup and cluster coordinator docs to explain new booting parameters table in the logs, and how to setup cluster mode.  All issues and pull requests are here\n","excerpt":"9.7.0 Project  Bump Java agent to 9.1-dev in the e2e tests. Bump up netty to 4.1.100. Update Groovy …","ref":"/docs/main/next/en/changes/changes-9.7.0/","title":"9.7.0"},{"body":"9.7.0 Project  Bump Java agent to 9.1-dev in the e2e tests. Bump up netty to 4.1.100. Update Groovy 3 to 4.0.15. Support packaging the project in JDK21. Compiler source and target remain in JDK11.  OAP Server  ElasticSearchClient: Add deleteById API. Fix Custom alarm rules are overwritten by \u0026lsquo;resource/alarm-settings.yml\u0026rsquo; Support Kafka Monitoring. Support Pulsar server and BookKeeper server Monitoring. [Breaking Change] Elasticsearch storage merge all management data indices into one index management, including ui_template,ui_menu,continuous_profiling_policy. Add a release mechanism for alarm windows when it is expired in case of OOM. Fix Zipkin trace receiver response: make the HTTP status code from 200 to 202. Update BanyanDB Java Client to 0.5.0. Fix getInstances query in the BanyanDB Metadata DAO. BanyanDBStorageClient: Add keepAliveProperty API. Fix table exists check in the JDBC Storage Plugin. Enhance extensibility of HTTP Server library. Adjust AlarmRecord alarmMessage column length to 512. Fix EventHookCallback build event: build the layer from Service's Layer. Fix AlarmCore doAlarm: catch exception for each callback to avoid interruption. Optimize queryBasicTraces in TraceQueryEsDAO. Fix WebhookCallback send incorrect messages, add catch exception for each callback HTTP Post. Fix AlarmRule expression validation: add labeled metrics mock data for check. Support collect ZGC memory pool metrics. Add a component ID for Netty-http (ID=151). Add a component ID for Fiber (ID=5021). BanyanDBStorageClient: Add define(Property property, PropertyStore.Strategy strategy) API. Correct the file format and fix typos in the filenames for monitoring Kafka\u0026rsquo;s e2e tests. Support extract timestamp from patterned datetime string in LAL. Support output key parameters in the booting logs. Fix cannot query zipkin traces with annotationQuery parameter in the JDBC related storage. Fix limit doesn\u0026rsquo;t work for findEndpoint API in ES storage. Isolate MAL CounterWindow cache by metric name. Fix JDBC Log query order. Change the DataCarrier IF_POSSIBLE strategy to use ArrayBlockingQueue implementation. Change the policy of the queue(DataCarrier) in the L1 metric aggregate worker to IF_POSSIBLE mode. Add self-observability metric metrics_aggregator_abandon to count the number of abandon metrics. Support Nginx monitoring. Fix BanyanDB Metadata Query: make query single instance/process return full tags to avoid NPE. Repleace go2sky E2E to GO agent. Replace Metrics v2 protocol with MQE in UI templates and E2E Test. Fix incorrect apisix metrics otel rules. Support Scratch The OAP Config Dump. Support increase/rate function in the MQE query language. Group service endpoints into _abandoned when endpoints have high cardinality.  UI  Add new menu for kafka monitoring. Fix independent widget duration. Fix the display height of the link tree structure. Replace the name by shortName on service widget. Refactor: update pagination style. No visualization style change. Apply MQE on K8s layer UI-templates. Fix icons display in trace tree diagram. Fix: update tooltip style to support multiple metrics scrolling view in a metrics graph. Add a new widget to show jvm memory pool detail. Fix: avoid querying data with empty parameters. Add a title and a description for trace segments. Add Netty icon for Netty HTTP plugin. Add Pulsar menu i18n files. Refactor Logs view. Implement the Dark Theme. Change UI templates for Text widgets. Add Nginx menu i18n. Fix the height for trace widget. Polish list style. Fix Log associate with Trace. Enhance layout for broken Topology widget. Fix calls metric with call type for Topology widget. Fix changing metrics config for Topology widget. Fix routes for Tab widget. Remove OpenFunction(FAAS layer) relative UI templates and menu item. Fix: change colors to match dark theme for Network Profiling. Remove the description of OpenFunction in the UI i18n. Reduce component chunks to improve page loading resource time.  Documentation  Separate storage docs to different files, and add an estimated timeline for BanyanDB(end of 2023). Add topology configuration in UI-Grafana doc. Add missing metrics to the OpenTelemetry Metrics doc. Polish docs of Concepts and Designs. Fix incorrect notes of slowCacheReadThreshold. Update OAP setup and cluster coordinator docs to explain new booting parameters table in the logs, and how to setup cluster mode.  All issues and pull requests are here\n","excerpt":"9.7.0 Project  Bump Java agent to 9.1-dev in the e2e tests. Bump up netty to 4.1.100. Update Groovy …","ref":"/docs/main/v9.7.0/en/changes/changes/","title":"9.7.0"},{"body":"Academy Academy is an article/video list recommended by the committer team.\n  STAM Paper about the fundamental theory of SkyWalking tracing models.\n  Blog about Scaling SkyWalking server automatically in kubernetes.\n  Blog about Use Profiling to Fix the Blind Spot of Distributed Tracing.\n  Blog about observing Istio + Envoy service mesh with ALS solution.\n  Blog about observing Istio + Envoy service mesh with ALS Metadata-Exchange mechanism (in VMs and / or Kubernetes).\n  ","excerpt":"Academy Academy is an article/video list recommended by the committer team.\n  STAM Paper about the …","ref":"/docs/main/v9.0.0/en/academy/list/","title":"Academy"},{"body":"Academy Academy is an article/video list recommended by the committer team.\n  STAM Paper about the fundamental theory of SkyWalking tracing models.\n  Blog about Scaling SkyWalking server automatically in kubernetes.\n  Blog about Use Profiling to Fix the Blind Spot of Distributed Tracing.\n  Blog about observing Istio + Envoy service mesh with ALS solution.\n  Blog about observing Istio + Envoy service mesh with ALS Metadata-Exchange mechanism (in VMs and / or Kubernetes).\n  ","excerpt":"Academy Academy is an article/video list recommended by the committer team.\n  STAM Paper about the …","ref":"/docs/main/v9.1.0/en/academy/list/","title":"Academy"},{"body":"Academy Academy is an article/video list recommended by the committer team.\n  STAM Paper about the fundamental theory of SkyWalking tracing models.\n  Blog about Scaling SkyWalking server automatically in kubernetes.\n  Blog about Use Profiling to Fix the Blind Spot of Distributed Tracing.\n  Blog about observing Istio + Envoy service mesh with ALS solution.\n  Blog about observing Istio + Envoy service mesh with ALS Metadata-Exchange mechanism (in VMs and / or Kubernetes).\n  Blog about using eBPF Profiling to pinpoint service mesh critical performance Impact.\n  ","excerpt":"Academy Academy is an article/video list recommended by the committer team.\n  STAM Paper about the …","ref":"/docs/main/v9.2.0/en/academy/list/","title":"Academy"},{"body":"ActiveMQ classic monitoring SkyWalking leverages jmx prometheus exporter for collecting metrics data from ActiveMQ classic. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  ActiveMQ classic has extensive support for JMX to allow you to monitor and control the behavior of the broker via the JMX MBeans. The jmx prometheus exporter collects metrics data from ActiveMQ classic, this exporter is intended to be run as a Java Agent, exposing a HTTP server and serving metrics of the local JVM. OpenTelemetry Collector fetches metrics from jmx prometheus exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Enable JMX in activemq.xml, the JMX remote port defaults to 1616, you can change it through ACTIVEMQ_SUNJMX_START. The example for ActiveMQ configuration, refer to here. Set up jmx prometheus exporter which runs as a Java Agent(recommended) of ActiveMQ classic. If you work with docker, you also can set up a single server for exporter, refer to here(note the configuration of includeObjectNames). Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  ActiveMQ classic Monitoring ActiveMQ classic monitoring provides multidimensional metrics monitoring of ActiveMQ Exporter as Layer: ActiveMQ Service in the OAP. In each cluster, the broker is represented as Instance and the destination is represented as Endpoint.\nActiveMQ Cluster Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     System Load Average Count meter_activemq_cluster_system_load_average The average system load, range:[0, 10000]. JMX Prometheus Exporter   Thread Count Count meter_activemq_cluster_thread_count Threads currently used by the JVM. JMX Prometheus Exporter   Init Heap Memory Usage Bytes meter_activemq_cluster_heap_memory_usage_init The initial amount of heap memory available. JMX Prometheus Exporter   Committed Heap Memory Usage Bytes meter_activemq_cluster_heap_memory_usage_committed The memory is guaranteed to be available for the JVM to use. JMX Prometheus Exporter   Used Heap Memory Usage Bytes meter_activemq_cluster_heap_memory_usage_used The amount of JVM heap memory currently in use. JMX Prometheus Exporter   Max Heap Memory Usage Bytes meter_activemq_cluster_heap_memory_usage_max The maximum possible size of the heap memory. JMX Prometheus Exporter   GC G1 Old Collection Count Count meter_activemq_cluster_gc_g1_old_collection_count The gc count of G1 Old Generation(JDK[9,17]). JMX Prometheus Exporter   GC G1 Young Collection Count Count meter_activemq_cluster_gc_g1_young_collection_count The gc count of G1 Young Generation(JDK[9,17]). JMX Prometheus Exporter   GC G1 Old Collection Time ms meter_activemq_cluster_gc_g1_old_collection_time The gc time spent in G1 Old Generation in milliseconds(JDK[9,17]). JMX Prometheus Exporter   GC G1 Young Collection Time ms meter_activemq_cluster_gc_g1_young_collection_time The gc time spent in G1 Young Generation in milliseconds(JDK[9,17]). JMX Prometheus Exporter   GC Parallel Old Collection Count Count meter_activemq_cluster_gc_parallel_old_collection_count The gc count of Parallel Old Generation(JDK[6,8]). JMX Prometheus Exporter   GC Parallel Young Collection Count Count meter_activemq_cluster_gc_parallel_young_collection_count The gc count of Parallel Young Generation(JDK[6,8]). JMX Prometheus Exporter   GC Parallel Old Collection Time ms meter_activemq_cluster_gc_parallel_old_collection_time The gc time spent in Parallel Old Generation in milliseconds(JDK[6,8]). JMX Prometheus Exporter   GC Parallel Young Collection Time ms meter_activemq_cluster_gc_parallel_young_collection_time The gc time spent in Parallel Young Generation in milliseconds(JDK[6,8]). JMX Prometheus Exporter   Enqueue Rate Count/s meter_activemq_cluster_enqueue_rate Number of messages that have been sent to the cluster per second(JDK[6,8]). JMX Prometheus Exporter   Dequeue Rate Count/s meter_activemq_cluster_dequeue_rate Number of messages that have been acknowledged or discarded on the cluster per second. JMX Prometheus Exporter   Dispatch Rate Count/s meter_activemq_cluster_dispatch_rate Number of messages that has been delivered to consumers per second. JMX Prometheus Exporter   Expired Rate Count/s meter_activemq_cluster_expired_rate Number of messages that have been expired per second. JMX Prometheus Exporter   Average Enqueue Time ms meter_activemq_cluster_average_enqueue_time The average time a message was held on this cluster. JMX Prometheus Exporter   Max Enqueue Time ms meter_activemq_cluster_max_enqueue_time The max time a message was held on this cluster. JMX Prometheus Exporter    ActiveMQ Broker Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Uptime sec meter_activemq_broker_uptime Uptime of the broker in day. JMX Prometheus Exporter   State  meter_activemq_broker_state If slave broker 1 else 0. JMX Prometheus Exporter   Current Connections Count meter_activemq_broker_current_connections The number of clients connected to the broker currently. JMX Prometheus Exporter   Current Producer Count Count meter_activemq_broker_current_producer_count The number of producers currently attached to the broker. JMX Prometheus Exporter   Current Consumer Count Count meter_activemq_broker_current_consumer_count The number of consumers consuming messages from the broker. JMX Prometheus Exporter   Producer Count Count meter_activemq_broker_producer_count Number of message producers active on destinations. JMX Prometheus Exporter   Consumer Count Count meter_activemq_broker_consumer_count Number of message consumers subscribed to destinations. JMX Prometheus Exporter   Enqueue Count Count meter_activemq_broker_enqueue_count The total number of messages sent to the broker. JMX Prometheus Exporter   Dequeue Count Count meter_activemq_broker_dequeue_count The total number of messages the broker has delivered to consumers. JMX Prometheus Exporter   Enqueue Rate Count/sec meter_activemq_broker_enqueue_rate The total number of messages sent to the broker per second. JMX Prometheus Exporter   Dequeue Rate Count/sec meter_activemq_broker_dequeue_rate The total number of messages the broker has delivered to consumers per second. JMX Prometheus Exporter   Memory Percent Usage % meter_activemq_broker_memory_percent_usage Percentage of configured memory used by the broker. JMX Prometheus Exporter   Memory Usage Bytes meter_activemq_broker_memory_percent_usage Memory used by undelivered messages in bytes. JMX Prometheus Exporter   Memory Limit Bytes meter_activemq_broker_memory_limit Memory limited used for holding undelivered messages before paging to temporary storage. JMX Prometheus Exporter   Store Percent Usage % meter_activemq_broker_store_percent_usage Percentage of available disk space used for persistent message storage. JMX Prometheus Exporter   Store Limit Bytes meter_activemq_broker_store_limit Disk limited used for persistent messages before producers are blocked. JMX Prometheus Exporter   Temp Percent Usage Bytes meter_activemq_broker_temp_percent_usage Percentage of available disk space used for non-persistent message storage. JMX Prometheus Exporter   Temp Limit Bytes meter_activemq_broker_temp_limit Disk limited used for non-persistent messages and temporary data before producers are blocked. JMX Prometheus Exporter   Average Message Size Bytes meter_activemq_broker_average_message_size Average message size on this broker. JMX Prometheus Exporter   Max Message Size Bytes meter_activemq_broker_max_message_size Max message size on this broker. JMX Prometheus Exporter   Queue Size Count meter_activemq_broker_queue_size Number of messages on this broker that have been dispatched but not acknowledged. JMX Prometheus Exporter    ActiveMQ Destination Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Producer Count Count meter_activemq_destination_producer_count Number of producers attached to this destination. JMX Prometheus Exporter   Consumer Count Count meter_activemq_destination_consumer_count Number of consumers subscribed to this destination. JMX Prometheus Exporter   Topic Consumer Count Count meter_activemq_destination_topic_consumer_count Number of consumers subscribed to the topics. JMX Prometheus Exporter   Queue Size Count meter_activemq_destination_queue_size The number of messages that have not been acknowledged by a consumer. JMX Prometheus Exporter   Memory Usage Bytes meter_activemq_destination_memory_usage Memory used by undelivered messages in bytes. JMX Prometheus Exporter   Memory Percent Usage % meter_activemq_destination_memory_percent_usage Percentage of configured memory used by the destination. JMX Prometheus Exporter   Enqueue Count Count meter_activemq_destination_enqueue_count The number of messages sent to the destination. JMX Prometheus Exporter   Dequeue Count Count meter_activemq_destination_dequeue_count The number of messages the destination has delivered to consumers. JMX Prometheus Exporter   Average Enqueue Time ms meter_activemq_destination_average_enqueue_time The average time a message was held on this destination. JMX Prometheus Exporter   Max Enqueue Time ms meter_activemq_destination_max_enqueue_time The max time a message was held on this destination. JMX Prometheus Exporter   Dispatch Count Count meter_activemq_destination_dispatch_count Number of messages that has been delivered to consumers. JMX Prometheus Exporter   Expired Count Count meter_activemq_destination_expired_count Number of messages that have been expired. JMX Prometheus Exporter   Inflight Count Count meter_activemq_destination_inflight_count Number of messages that have been dispatched to but not acknowledged by consumers. JMX Prometheus Exporter   Average Message Size Bytes meter_activemq_destination_average_message_size Average message size on this destination. JMX Prometheus Exporter   Max Message Size Bytes meter_activemq_destination_max_message_size Max message size on this destination. JMX Prometheus Exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in otel-rules/activemq/activemq-cluster.yaml, otel-rules/activemq/activemq-broker.yaml, otel-rules/activemq/activemq-destination.yaml. The ActiveMQ dashboard panel configurations are found in ui-initialized-templates/activemq.\n","excerpt":"ActiveMQ classic monitoring SkyWalking leverages jmx prometheus exporter for collecting metrics data …","ref":"/docs/main/next/en/setup/backend/backend-activemq-monitoring/","title":"ActiveMQ classic monitoring"},{"body":"Advanced deployment OAP servers communicate with each other in a cluster environment to do distributed aggregation. In the cluster mode, all OAP nodes are running in Mixed mode by default.\nThe available roles for OAP are,\n Mixed(default) Receiver Aggregator  Sometimes users may wish to deploy cluster nodes with a clearly defined role. They could then use this function.\nMixed By default, the OAP is responsible for:\n Receiving agent traces or metrics. L1 aggregation Internal communication (sending/receiving) L2 aggregation Persistence Alarm  Receiver The OAP is responsible for:\n Receiving agent traces or metrics. L1 aggregation Internal communication (sending)  Aggregator The OAP is responsible for:\n Internal communication(receiving from Receiver and Mixed roles OAP) L2 aggregation Persistence Alarm   These roles are designed for complex deployment requirements on security and network policy.\nKubernetes If you are using our native Kubernetes coordinator, and you insist to install OAP nodes with a clearly defined role. There should be two deployments for each role, one for receiver OAPs and the other for aggregator OAPs to separate different system environment settings. Then, the labelSelector should be set for Aggregator role selection rules to choose the right OAP deployment based on your needs.\n","excerpt":"Advanced deployment OAP servers communicate with each other in a cluster environment to do …","ref":"/docs/main/latest/en/setup/backend/advanced-deployment/","title":"Advanced deployment"},{"body":"Advanced deployment OAP servers communicate with each other in a cluster environment to do distributed aggregation. In the cluster mode, all OAP nodes are running in Mixed mode by default.\nThe available roles for OAP are,\n Mixed(default) Receiver Aggregator  Sometimes users may wish to deploy cluster nodes with a clearly defined role. They could then use this function.\nMixed By default, the OAP is responsible for:\n Receiving agent traces or metrics. L1 aggregation Internal communication (sending/receiving) L2 aggregation Persistence Alarm  Receiver The OAP is responsible for:\n Receiving agent traces or metrics. L1 aggregation Internal communication (sending)  Aggregator The OAP is responsible for:\n Internal communication(receiving from Receiver and Mixed roles OAP) L2 aggregation Persistence Alarm   These roles are designed for complex deployment requirements on security and network policy.\nKubernetes If you are using our native Kubernetes coordinator, and you insist to install OAP nodes with a clearly defined role. There should be two deployments for each role, one for receiver OAPs and the other for aggregator OAPs to separate different system environment settings. Then, the labelSelector should be set for Aggregator role selection rules to choose the right OAP deployment based on your needs.\n","excerpt":"Advanced deployment OAP servers communicate with each other in a cluster environment to do …","ref":"/docs/main/next/en/setup/backend/advanced-deployment/","title":"Advanced deployment"},{"body":"Advanced deployment OAP servers communicate with each other in a cluster environment. In the cluster mode, you could run in different roles.\n Mixed(default) Receiver Aggregator  Sometimes users may wish to deploy cluster nodes with a clearly defined role. They could then use this function.\nMixed By default, the OAP is responsible for:\n Receiving agent traces or metrics. L1 aggregation Internal communication (sending/receiving) L2 aggregation Persistence Alarm  Receiver The OAP is responsible for:\n Receiving agent traces or metrics. L1 aggregation Internal communication (sending)  Aggregator The OAP is responsible for:\n Internal communication(receive) L2 aggregation Persistence Alarm   These roles are designed for complex deployment requirements on security and network policy.\nKubernetes If you are using our native Kubernetes coordinator, the labelSelector setting is used for Aggregator role selection rules. Choose the right OAP deployment based on your needs.\n","excerpt":"Advanced deployment OAP servers communicate with each other in a cluster environment. In the cluster …","ref":"/docs/main/v9.0.0/en/setup/backend/advanced-deployment/","title":"Advanced deployment"},{"body":"Advanced deployment OAP servers communicate with each other in a cluster environment. In the cluster mode, you could run in different roles.\n Mixed(default) Receiver Aggregator  Sometimes users may wish to deploy cluster nodes with a clearly defined role. They could then use this function.\nMixed By default, the OAP is responsible for:\n Receiving agent traces or metrics. L1 aggregation Internal communication (sending/receiving) L2 aggregation Persistence Alarm  Receiver The OAP is responsible for:\n Receiving agent traces or metrics. L1 aggregation Internal communication (sending)  Aggregator The OAP is responsible for:\n Internal communication(receive) L2 aggregation Persistence Alarm   These roles are designed for complex deployment requirements on security and network policy.\nKubernetes If you are using our native Kubernetes coordinator, the labelSelector setting is used for Aggregator role selection rules. Choose the right OAP deployment based on your needs.\n","excerpt":"Advanced deployment OAP servers communicate with each other in a cluster environment. In the cluster …","ref":"/docs/main/v9.1.0/en/setup/backend/advanced-deployment/","title":"Advanced deployment"},{"body":"Advanced deployment OAP servers communicate with each other in a cluster environment. In the cluster mode, you could run in different roles.\n Mixed(default) Receiver Aggregator  Sometimes users may wish to deploy cluster nodes with a clearly defined role. They could then use this function.\nMixed By default, the OAP is responsible for:\n Receiving agent traces or metrics. L1 aggregation Internal communication (sending/receiving) L2 aggregation Persistence Alarm  Receiver The OAP is responsible for:\n Receiving agent traces or metrics. L1 aggregation Internal communication (sending)  Aggregator The OAP is responsible for:\n Internal communication(receive) L2 aggregation Persistence Alarm   These roles are designed for complex deployment requirements on security and network policy.\nKubernetes If you are using our native Kubernetes coordinator, the labelSelector setting is used for Aggregator role selection rules. Choose the right OAP deployment based on your needs.\n","excerpt":"Advanced deployment OAP servers communicate with each other in a cluster environment. In the cluster …","ref":"/docs/main/v9.2.0/en/setup/backend/advanced-deployment/","title":"Advanced deployment"},{"body":"Advanced deployment OAP servers communicate with each other in a cluster environment. In the cluster mode, you could run in different roles.\n Mixed(default) Receiver Aggregator  Sometimes users may wish to deploy cluster nodes with a clearly defined role. They could then use this function.\nMixed By default, the OAP is responsible for:\n Receiving agent traces or metrics. L1 aggregation Internal communication (sending/receiving) L2 aggregation Persistence Alarm  Receiver The OAP is responsible for:\n Receiving agent traces or metrics. L1 aggregation Internal communication (sending)  Aggregator The OAP is responsible for:\n Internal communication(receive) L2 aggregation Persistence Alarm   These roles are designed for complex deployment requirements on security and network policy.\nKubernetes If you are using our native Kubernetes coordinator, the labelSelector setting is used for Aggregator role selection rules. Choose the right OAP deployment based on your needs.\n","excerpt":"Advanced deployment OAP servers communicate with each other in a cluster environment. In the cluster …","ref":"/docs/main/v9.3.0/en/setup/backend/advanced-deployment/","title":"Advanced deployment"},{"body":"Advanced deployment OAP servers communicate with each other in a cluster environment. In the cluster mode, you could run in different roles.\n Mixed(default) Receiver Aggregator  Sometimes users may wish to deploy cluster nodes with a clearly defined role. They could then use this function.\nMixed By default, the OAP is responsible for:\n Receiving agent traces or metrics. L1 aggregation Internal communication (sending/receiving) L2 aggregation Persistence Alarm  Receiver The OAP is responsible for:\n Receiving agent traces or metrics. L1 aggregation Internal communication (sending)  Aggregator The OAP is responsible for:\n Internal communication(receive) L2 aggregation Persistence Alarm   These roles are designed for complex deployment requirements on security and network policy.\nKubernetes If you are using our native Kubernetes coordinator, the labelSelector setting is used for Aggregator role selection rules. Choose the right OAP deployment based on your needs.\n","excerpt":"Advanced deployment OAP servers communicate with each other in a cluster environment. In the cluster …","ref":"/docs/main/v9.4.0/en/setup/backend/advanced-deployment/","title":"Advanced deployment"},{"body":"Advanced deployment OAP servers communicate with each other in a cluster environment to do distributed aggregation. In the cluster mode, all OAP nodes are running in Mixed mode by default.\nThe available roles for OAP are,\n Mixed(default) Receiver Aggregator  Sometimes users may wish to deploy cluster nodes with a clearly defined role. They could then use this function.\nMixed By default, the OAP is responsible for:\n Receiving agent traces or metrics. L1 aggregation Internal communication (sending/receiving) L2 aggregation Persistence Alarm  Receiver The OAP is responsible for:\n Receiving agent traces or metrics. L1 aggregation Internal communication (sending)  Aggregator The OAP is responsible for:\n Internal communication(receiving from Receiver and Mixed roles OAP) L2 aggregation Persistence Alarm   These roles are designed for complex deployment requirements on security and network policy.\nKubernetes If you are using our native Kubernetes coordinator, and you insist to install OAP nodes with a clearly defined role. There should be two deployments for each role, one for receiver OAPs and the other for aggregator OAPs to separate different system environment settings. Then, the labelSelector should be set for Aggregator role selection rules to choose the right OAP deployment based on your needs.\n","excerpt":"Advanced deployment OAP servers communicate with each other in a cluster environment to do …","ref":"/docs/main/v9.5.0/en/setup/backend/advanced-deployment/","title":"Advanced deployment"},{"body":"Advanced deployment OAP servers communicate with each other in a cluster environment to do distributed aggregation. In the cluster mode, all OAP nodes are running in Mixed mode by default.\nThe available roles for OAP are,\n Mixed(default) Receiver Aggregator  Sometimes users may wish to deploy cluster nodes with a clearly defined role. They could then use this function.\nMixed By default, the OAP is responsible for:\n Receiving agent traces or metrics. L1 aggregation Internal communication (sending/receiving) L2 aggregation Persistence Alarm  Receiver The OAP is responsible for:\n Receiving agent traces or metrics. L1 aggregation Internal communication (sending)  Aggregator The OAP is responsible for:\n Internal communication(receiving from Receiver and Mixed roles OAP) L2 aggregation Persistence Alarm   These roles are designed for complex deployment requirements on security and network policy.\nKubernetes If you are using our native Kubernetes coordinator, and you insist to install OAP nodes with a clearly defined role. There should be two deployments for each role, one for receiver OAPs and the other for aggregator OAPs to separate different system environment settings. Then, the labelSelector should be set for Aggregator role selection rules to choose the right OAP deployment based on your needs.\n","excerpt":"Advanced deployment OAP servers communicate with each other in a cluster environment to do …","ref":"/docs/main/v9.6.0/en/setup/backend/advanced-deployment/","title":"Advanced deployment"},{"body":"Advanced deployment OAP servers communicate with each other in a cluster environment to do distributed aggregation. In the cluster mode, all OAP nodes are running in Mixed mode by default.\nThe available roles for OAP are,\n Mixed(default) Receiver Aggregator  Sometimes users may wish to deploy cluster nodes with a clearly defined role. They could then use this function.\nMixed By default, the OAP is responsible for:\n Receiving agent traces or metrics. L1 aggregation Internal communication (sending/receiving) L2 aggregation Persistence Alarm  Receiver The OAP is responsible for:\n Receiving agent traces or metrics. L1 aggregation Internal communication (sending)  Aggregator The OAP is responsible for:\n Internal communication(receiving from Receiver and Mixed roles OAP) L2 aggregation Persistence Alarm   These roles are designed for complex deployment requirements on security and network policy.\nKubernetes If you are using our native Kubernetes coordinator, and you insist to install OAP nodes with a clearly defined role. There should be two deployments for each role, one for receiver OAPs and the other for aggregator OAPs to separate different system environment settings. Then, the labelSelector should be set for Aggregator role selection rules to choose the right OAP deployment based on your needs.\n","excerpt":"Advanced deployment OAP servers communicate with each other in a cluster environment to do …","ref":"/docs/main/v9.7.0/en/setup/backend/advanced-deployment/","title":"Advanced deployment"},{"body":"Advanced Features  Set the settings through system properties for config file override. Read setting override. Use gRPC TLS to link backend. See open TLS Set client token if backend open the token authentication. Application Toolkit, are a collection of libraries, provided by SkyWalking APM. Using them, you have a bridge between your application and SkyWalking APM agent.  If you want your codes to interact with SkyWalking agent, including getting trace id, setting tags, propagating custom data etc.. Try SkyWalking manual APIs. If you require customized metrics, try SkyWalking Meter System Toolkit. If you want to continue traces across thread manually, use across thread solution APIs. If you want to forward Micrometer metrics / observations, use SkyWalking Micrometer Register. If you want to use OpenTracing Java APIs, try SkyWalking OpenTracing compatible tracer. More details you could find at http://opentracing.io If you want to tolerate some exceptions, read tolerate custom exception doc. If you want to print trace context(e.g. traceId) in your logs, or collect logs, choose the log frameworks, log4j, log4j2, logback.   If you want to specify the path of your agent.config file. Read set config file through system properties  ","excerpt":"Advanced Features  Set the settings through system properties for config file override. Read setting …","ref":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/advanced-features/","title":"Advanced Features"},{"body":"Advanced Features  Set the settings through system properties for config file override. Read setting override. Use gRPC TLS to link backend. See open TLS Set client token if backend open the token authentication. Application Toolkit, are a collection of libraries, provided by SkyWalking APM. Using them, you have a bridge between your application and SkyWalking APM agent.  If you want your codes to interact with SkyWalking agent, including getting trace id, setting tags, propagating custom data etc.. Try SkyWalking manual APIs. If you require customized metrics, try SkyWalking Meter System Toolkit. If you want to continue traces across thread manually, use across thread solution APIs. If you want to forward Micrometer metrics / observations, use SkyWalking Micrometer Register. If you want to use OpenTracing Java APIs, try SkyWalking OpenTracing compatible tracer. More details you could find at http://opentracing.io If you want to tolerate some exceptions, read tolerate custom exception doc. If you want to print trace context(e.g. traceId) in your logs, or collect logs, choose the log frameworks, log4j, log4j2, logback.   If you want to specify the path of your agent.config file. Read set config file through system properties  ","excerpt":"Advanced Features  Set the settings through system properties for config file override. Read setting …","ref":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/advanced-features/","title":"Advanced Features"},{"body":"Advanced Features  Set the settings through system properties for config file override. Read setting override. Use gRPC TLS to link backend. See open TLS Set client token if backend open the token authentication. Application Toolkit, are a collection of libraries, provided by SkyWalking APM. Using them, you have a bridge between your application and SkyWalking APM agent.  If you want your codes to interact with SkyWalking agent, including getting trace id, setting tags, propagating custom data etc.. Try SkyWalking manual APIs. If you require customized metrics, try SkyWalking Meter System Toolkit. If you want to continue traces across thread manually, use across thread solution APIs. If you want to forward Micrometer metrics / observations, use SkyWalking Micrometer Register. If you want to use OpenTracing Java APIs, try SkyWalking OpenTracing compatible tracer. More details you could find at http://opentracing.io If you want to tolerate some exceptions, read tolerate custom exception doc. If you want to print trace context(e.g. traceId) in your logs, or collect logs, choose the log frameworks, log4j, log4j2, logback.   If you want to specify the path of your agent.config file. Read set config file through system properties  ","excerpt":"Advanced Features  Set the settings through system properties for config file override. Read setting …","ref":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/advanced-features/","title":"Advanced Features"},{"body":"Advanced Features  Set the settings through system properties for config file override. Read setting override. Use gRPC TLS to link backend. See open TLS Set client token if backend open the token authentication. Application Toolkit, are a collection of libraries, provided by SkyWalking APM. Using them, you have a bridge between your application and SkyWalking APM agent.  If you want your codes to interact with SkyWalking agent, including getting trace id, setting tags, propagating custom data etc.. Try SkyWalking manual APIs. If you require customized metrics, try SkyWalking Meter System Toolkit. If you want to continue traces across thread manually, use across thread solution APIs. If you want to forward Micrometer metrics / observations, use SkyWalking Micrometer Register. If you want to use OpenTracing Java APIs, try SkyWalking OpenTracing compatible tracer. More details you could find at http://opentracing.io If you want to tolerate some exceptions, read tolerate custom exception doc. If you want to print trace context(e.g. traceId) in your logs, or collect logs, choose the log frameworks, log4j, log4j2, logback.   If you want to specify the path of your agent.config file. Read set config file through system properties  ","excerpt":"Advanced Features  Set the settings through system properties for config file override. Read setting …","ref":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/advanced-features/","title":"Advanced Features"},{"body":"Advanced Features  Set the settings through system properties for config file override. Read setting override. Use gRPC TLS to link backend. See open TLS Set client token if backend open the token authentication. Application Toolkit, are a collection of libraries, provided by SkyWalking APM. Using them, you have a bridge between your application and SkyWalking APM agent.  If you want your codes to interact with SkyWalking agent, including getting trace id, setting tags, propagating custom data etc.. Try SkyWalking manual APIs. If you require customized metrics, try SkyWalking Meter System Toolkit. If you want to continue traces across thread manually, use across thread solution APIs. If you want to forward Micrometer metrics / observations, use SkyWalking Micrometer Register. If you want to use OpenTracing Java APIs, try SkyWalking OpenTracing compatible tracer. More details you could find at http://opentracing.io If you want to tolerate some exceptions, read tolerate custom exception doc. If you want to print trace context(e.g. traceId) in your logs, or collect logs, choose the log frameworks, log4j, log4j2, logback.   If you want to specify the path of your agent.config file. Read set config file through system properties  ","excerpt":"Advanced Features  Set the settings through system properties for config file override. Read setting …","ref":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/advanced-features/","title":"Advanced Features"},{"body":"Advanced Reporters The advanced report provides an alternative way to submit the agent collected data to the backend. All of them are in the optional-reporter-plugins folder, move the one you needed into the reporter-plugins folder for the activation. Notice, don\u0026rsquo;t try to activate multiple reporters, that could cause unexpected fatal errors.\nKafka Reporter The Kafka reporter plugin support report traces, JVM metrics, Instance Properties, and profiled snapshots to Kafka cluster, which is disabled in default. Move the jar of the plugin, kafka-reporter-plugin-x.y.z.jar, from agent/optional-reporter-plugins to agent/plugins for activating.\nIf you configure to use compression.type such as lz4, zstd, snappy, etc., you also need to move the jar of the plugin, lz4-java-x.y.z.jar or zstd-jni-x.y.z.jar or snappy-java.x.y.z.jar, from agent/optional-reporter-plugins to agent/plugins.\nNotice, currently, the agent still needs to configure GRPC receiver for delivering the task of profiling. In other words, the following configure cannot be omitted.\n# Backend service addresses. collector.backend_service=${SW_AGENT_COLLECTOR_BACKEND_SERVICES:127.0.0.1:11800} # Kafka producer configuration plugin.kafka.bootstrap_servers=${SW_KAFKA_BOOTSTRAP_SERVERS:localhost:9092} plugin.kafka.get_topic_timeout=${SW_GET_TOPIC_TIMEOUT:10} Before you activated the Kafka reporter, you have to make sure that Kafka fetcher of OAP server has been opened in service.\nAdvanced Kafka Producer Configurations Kafka reporter plugin support to customize all configurations of listed in here. For example:\nplugin.kafka.producer_config[delivery.timeout.ms]=12000 Since SkyWalking 8.8.0, support to configure advanced Producer configurations in JSON format, like this:\nplugin.kafka.producer_config_json={\u0026quot;delivery.timeout.ms\u0026quot;: 12000, \u0026quot;compression.type\u0026quot;: \u0026quot;snappy\u0026quot;} Currently, there are 2 ways to configure advanced configurations below. Notice that, the new way, configured in JSON format, will be overridden by plugin.kafka.producer_config[key]=value when they have the duplication keys.\nSince 8.16.0, users could implement their decoder for kafka configurations rather than using plain configurations(such as password) of Kafka producer, Including plugin.kafka.producer_config_json,plugin.kafka.producer_config or environment variable SW_PLUGIN_KAFKA_PRODUCER_CONFIG_JSON.\nBy doing that, add the kafka-config-extension dependency to your decoder project and implement decode interface.\n Add the KafkaConfigExtension dependency to your project.  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;kafka-config-extension\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;scope\u0026gt;provided\u0026lt;/scope\u0026gt; \u0026lt;/dependency\u0026gt;  Implement your custom decode method.Like this:  package org.apache.skywalking.apm.agent.sample; import org.apache.skywalking.apm.agent.core.kafka.KafkaConfigExtension; import java.util.Map; /** * Custom decode class */ public class DecodeUtil implements KafkaConfigExtension { /** * Custom decode method. * @param config the value of `plugin.kafka.producer_config` or `plugin.kafka.producer_config_json` in `agent.config`. * @return the decoded configuration if you implement your custom decode logic. */ public Map\u0026lt;String, String\u0026gt; decode(Map\u0026lt;String, String\u0026gt; config) { /** * implement your custom decode logic * */ return config; } } Then, package your decoder project as a jar and move to agent/plugins.\nNotice, the jar package should contain all the dependencies required for your custom decode code.\nThe last step is to activate the decoder class in agent.config like this:\nplugin.kafka.decrypt_class=\u0026quot;org.apache.skywalking.apm.agent.sample.DecodeUtil\u0026quot; or configure by environment variable\nSW_KAFKA_DECRYPT_CLASS=\u0026quot;org.apache.skywalking.apm.agent.sample.DecodeUtil\u0026quot; 3rd party reporters There are other reporter implementations from out of the Apache Software Foundation.\nPulsar Reporter Go to Pulsar-reporter-plugin for more details.\nRocketMQ Reporter Go to RocketMQ-reporter-plugin for more details.\n","excerpt":"Advanced Reporters The advanced report provides an alternative way to submit the agent collected …","ref":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/advanced-reporters/","title":"Advanced Reporters"},{"body":"Advanced Reporters The advanced report provides an alternative way to submit the agent collected data to the backend. All of them are in the optional-reporter-plugins folder, move the one you needed into the plugins folder for the activation. Notice, don\u0026rsquo;t try to activate multiple reporters, that could cause unexpected fatal errors.\nKafka Reporter The Kafka reporter plugin support report traces, JVM metrics, Instance Properties, and profiled snapshots to Kafka cluster, which is disabled in default. Move the jar of the plugin, kafka-reporter-plugin-x.y.z.jar, from agent/optional-reporter-plugins to agent/plugins for activating.\nIf you configure to use compression.type such as lz4, zstd, snappy, etc., you also need to move the jar of the plugin, lz4-java-x.y.z.jar or zstd-jni-x.y.z.jar or snappy-java.x.y.z.jar, from agent/optional-reporter-plugins to agent/plugins.\nNotice, currently, the agent still needs to configure GRPC receiver for delivering the task of profiling. In other words, the following configure cannot be omitted.\n# Backend service addresses. collector.backend_service=${SW_AGENT_COLLECTOR_BACKEND_SERVICES:127.0.0.1:11800} # Kafka producer configuration plugin.kafka.bootstrap_servers=${SW_KAFKA_BOOTSTRAP_SERVERS:localhost:9092} plugin.kafka.get_topic_timeout=${SW_GET_TOPIC_TIMEOUT:10} Before you activated the Kafka reporter, you have to make sure that Kafka fetcher of OAP server has been opened in service.\nAdvanced Kafka Producer Configurations Kafka reporter plugin support to customize all configurations of listed in here. For example:\nplugin.kafka.producer_config[delivery.timeout.ms]=12000 Since SkyWalking 8.8.0, support to configure advanced Producer configurations in JSON format, like this:\nplugin.kafka.producer_config_json={\u0026quot;delivery.timeout.ms\u0026quot;: 12000, \u0026quot;compression.type\u0026quot;: \u0026quot;snappy\u0026quot;} Currently, there are 2 ways to configure advanced configurations below. Notice that, the new way, configured in JSON format, will be overridden by plugin.kafka.producer_config[key]=value when they have the duplication keys.\nSince 8.16.0, users could implement their decoder for kafka configurations rather than using plain configurations(such as password) of Kafka producer, Including plugin.kafka.producer_config_json,plugin.kafka.producer_config or environment variable SW_PLUGIN_KAFKA_PRODUCER_CONFIG_JSON.\nBy doing that, add the kafka-config-extension dependency to your decoder project and implement decode interface.\n Add the KafkaConfigExtension dependency to your project.  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;kafka-config-extension\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;scope\u0026gt;provided\u0026lt;/scope\u0026gt; \u0026lt;/dependency\u0026gt;  Implement your custom decode method.Like this:  package org.apache.skywalking.apm.agent.sample; import org.apache.skywalking.apm.agent.core.kafka.KafkaConfigExtension; import java.util.Map; /** * Custom decode class */ public class DecodeUtil implements KafkaConfigExtension { /** * Custom decode method. * @param config the value of `plugin.kafka.producer_config` or `plugin.kafka.producer_config_json` in `agent.config`. * @return the decoded configuration if you implement your custom decode logic. */ public Map\u0026lt;String, String\u0026gt; decode(Map\u0026lt;String, String\u0026gt; config) { /** * implement your custom decode logic * */ return config; } } Then, package your decoder project as a jar and move to agent/plugins.\nNotice, the jar package should contain all the dependencies required for your custom decode code.\nThe last step is to activate the decoder class in agent.config like this:\nplugin.kafka.decrypt_class=\u0026quot;org.apache.skywalking.apm.agent.sample.DecodeUtil\u0026quot; or configure by environment variable\nSW_KAFKA_DECRYPT_CLASS=\u0026quot;org.apache.skywalking.apm.agent.sample.DecodeUtil\u0026quot; 3rd party reporters There are other reporter implementations from out of the Apache Software Foundation.\nPulsar Reporter Go to Pulsar-reporter-plugin for more details.\nRocketMQ Reporter Go to RocketMQ-reporter-plugin for more details.\n","excerpt":"Advanced Reporters The advanced report provides an alternative way to submit the agent collected …","ref":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/advanced-reporters/","title":"Advanced Reporters"},{"body":"Advanced Reporters The advanced report provides an alternative way to submit the agent collected data to the backend. All of them are in the optional-reporter-plugins folder, move the one you needed into the reporter-plugins folder for the activation. Notice, don\u0026rsquo;t try to activate multiple reporters, that could cause unexpected fatal errors.\nKafka Reporter The Kafka reporter plugin support report traces, JVM metrics, Instance Properties, and profiled snapshots to Kafka cluster, which is disabled in default. Move the jar of the plugin, kafka-reporter-plugin-x.y.z.jar, from agent/optional-reporter-plugins to agent/plugins for activating.\nIf you configure to use compression.type such as lz4, zstd, snappy, etc., you also need to move the jar of the plugin, lz4-java-x.y.z.jar or zstd-jni-x.y.z.jar or snappy-java.x.y.z.jar, from agent/optional-reporter-plugins to agent/plugins.\nNotice, currently, the agent still needs to configure GRPC receiver for delivering the task of profiling. In other words, the following configure cannot be omitted.\n# Backend service addresses. collector.backend_service=${SW_AGENT_COLLECTOR_BACKEND_SERVICES:127.0.0.1:11800} # Kafka producer configuration plugin.kafka.bootstrap_servers=${SW_KAFKA_BOOTSTRAP_SERVERS:localhost:9092} plugin.kafka.get_topic_timeout=${SW_GET_TOPIC_TIMEOUT:10} Before you activated the Kafka reporter, you have to make sure that Kafka fetcher of OAP server has been opened in service.\nAdvanced Kafka Producer Configurations Kafka reporter plugin support to customize all configurations of listed in here. For example:\nplugin.kafka.producer_config[delivery.timeout.ms]=12000 Since SkyWalking 8.8.0, support to configure advanced Producer configurations in JSON format, like this:\nplugin.kafka.producer_config_json={\u0026quot;delivery.timeout.ms\u0026quot;: 12000, \u0026quot;compression.type\u0026quot;: \u0026quot;snappy\u0026quot;} Currently, there are 2 ways to configure advanced configurations below. Notice that, the new way, configured in JSON format, will be overridden by plugin.kafka.producer_config[key]=value when they have the duplication keys.\nSince 8.16.0, users could implement their decoder for kafka configurations rather than using plain configurations(such as password) of Kafka producer, Including plugin.kafka.producer_config_json,plugin.kafka.producer_config or environment variable SW_PLUGIN_KAFKA_PRODUCER_CONFIG_JSON.\nBy doing that, add the kafka-config-extension dependency to your decoder project and implement decode interface.\n Add the KafkaConfigExtension dependency to your project.  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;kafka-config-extension\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;scope\u0026gt;provided\u0026lt;/scope\u0026gt; \u0026lt;/dependency\u0026gt;  Implement your custom decode method.Like this:  package org.apache.skywalking.apm.agent.sample; import org.apache.skywalking.apm.agent.core.kafka.KafkaConfigExtension; import java.util.Map; /** * Custom decode class */ public class DecodeUtil implements KafkaConfigExtension { /** * Custom decode method. * @param config the value of `plugin.kafka.producer_config` or `plugin.kafka.producer_config_json` in `agent.config`. * @return the decoded configuration if you implement your custom decode logic. */ public Map\u0026lt;String, String\u0026gt; decode(Map\u0026lt;String, String\u0026gt; config) { /** * implement your custom decode logic * */ return config; } } Then, package your decoder project as a jar and move to agent/plugins.\nNotice, the jar package should contain all the dependencies required for your custom decode code.\nThe last step is to activate the decoder class in agent.config like this:\nplugin.kafka.decrypt_class=\u0026quot;org.apache.skywalking.apm.agent.sample.DecodeUtil\u0026quot; or configure by environment variable\nSW_KAFKA_DECRYPT_CLASS=\u0026quot;org.apache.skywalking.apm.agent.sample.DecodeUtil\u0026quot; 3rd party reporters There are other reporter implementations from out of the Apache Software Foundation.\nPulsar Reporter Go to Pulsar-reporter-plugin for more details.\nRocketMQ Reporter Go to RocketMQ-reporter-plugin for more details.\n","excerpt":"Advanced Reporters The advanced report provides an alternative way to submit the agent collected …","ref":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/advanced-reporters/","title":"Advanced Reporters"},{"body":"Advanced Reporters The advanced report provides an alternative way to submit the agent collected data to the backend. All of them are in the optional-reporter-plugins folder, move the one you needed into the reporter-plugins folder for the activation. Notice, don\u0026rsquo;t try to activate multiple reporters, that could cause unexpected fatal errors.\nKafka Reporter The Kafka reporter plugin support report traces, JVM metrics, Instance Properties, and profiled snapshots to Kafka cluster, which is disabled in default. Move the jar of the plugin, kafka-reporter-plugin-x.y.z.jar, from agent/optional-reporter-plugins to agent/plugins for activating.\nIf you configure to use compression.type such as lz4, zstd, snappy, etc., you also need to move the jar of the plugin, lz4-java-x.y.z.jar or zstd-jni-x.y.z.jar or snappy-java.x.y.z.jar, from agent/optional-reporter-plugins to agent/plugins.\nNotice, currently, the agent still needs to configure GRPC receiver for delivering the task of profiling. In other words, the following configure cannot be omitted.\n# Backend service addresses. collector.backend_service=${SW_AGENT_COLLECTOR_BACKEND_SERVICES:127.0.0.1:11800} # Kafka producer configuration plugin.kafka.bootstrap_servers=${SW_KAFKA_BOOTSTRAP_SERVERS:localhost:9092} plugin.kafka.get_topic_timeout=${SW_GET_TOPIC_TIMEOUT:10} Before you activated the Kafka reporter, you have to make sure that Kafka fetcher of OAP server has been opened in service.\nAdvanced Kafka Producer Configurations Kafka reporter plugin support to customize all configurations of listed in here. For example:\nplugin.kafka.producer_config[delivery.timeout.ms]=12000 Since SkyWalking 8.8.0, support to configure advanced Producer configurations in JSON format, like this:\nplugin.kafka.producer_config_json={\u0026quot;delivery.timeout.ms\u0026quot;: 12000, \u0026quot;compression.type\u0026quot;: \u0026quot;snappy\u0026quot;} Currently, there are 2 ways to configure advanced configurations below. Notice that, the new way, configured in JSON format, will be overridden by plugin.kafka.producer_config[key]=value when they have the duplication keys.\nSince 8.16.0, users could implement their decoder for kafka configurations rather than using plain configurations(such as password) of Kafka producer, Including plugin.kafka.producer_config_json,plugin.kafka.producer_config or environment variable SW_PLUGIN_KAFKA_PRODUCER_CONFIG_JSON.\nBy doing that, add the kafka-config-extension dependency to your decoder project and implement decode interface.\n Add the KafkaConfigExtension dependency to your project.  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;kafka-config-extension\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;scope\u0026gt;provided\u0026lt;/scope\u0026gt; \u0026lt;/dependency\u0026gt;  Implement your custom decode method.Like this:  package org.apache.skywalking.apm.agent.sample; import org.apache.skywalking.apm.agent.core.kafka.KafkaConfigExtension; import java.util.Map; /** * Custom decode class */ public class DecodeUtil implements KafkaConfigExtension { /** * Custom decode method. * @param config the value of `plugin.kafka.producer_config` or `plugin.kafka.producer_config_json` in `agent.config`. * @return the decoded configuration if you implement your custom decode logic. */ public Map\u0026lt;String, String\u0026gt; decode(Map\u0026lt;String, String\u0026gt; config) { /** * implement your custom decode logic * */ return config; } } Then, package your decoder project as a jar and move to agent/plugins.\nNotice, the jar package should contain all the dependencies required for your custom decode code.\nThe last step is to activate the decoder class in agent.config like this:\nplugin.kafka.decrypt_class=\u0026quot;org.apache.skywalking.apm.agent.sample.DecodeUtil\u0026quot; or configure by environment variable\nSW_KAFKA_DECRYPT_CLASS=\u0026quot;org.apache.skywalking.apm.agent.sample.DecodeUtil\u0026quot; 3rd party reporters There are other reporter implementations from out of the Apache Software Foundation.\nPulsar Reporter Go to Pulsar-reporter-plugin for more details.\nRocketMQ Reporter Go to RocketMQ-reporter-plugin for more details.\n","excerpt":"Advanced Reporters The advanced report provides an alternative way to submit the agent collected …","ref":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/advanced-reporters/","title":"Advanced Reporters"},{"body":"Advanced Reporters The advanced report provides an alternative way to submit the agent collected data to the backend. All of them are in the optional-reporter-plugins folder, move the one you needed into the reporter-plugins folder for the activation. Notice, don\u0026rsquo;t try to activate multiple reporters, that could cause unexpected fatal errors.\nKafka Reporter The Kafka reporter plugin support report traces, JVM metrics, Instance Properties, and profiled snapshots to Kafka cluster, which is disabled in default. Move the jar of the plugin, kafka-reporter-plugin-x.y.z.jar, from agent/optional-reporter-plugins to agent/plugins for activating.\nIf you configure to use compression.type such as lz4, zstd, snappy, etc., you also need to move the jar of the plugin, lz4-java-x.y.z.jar or zstd-jni-x.y.z.jar or snappy-java.x.y.z.jar, from agent/optional-reporter-plugins to agent/plugins.\nNotice, currently, the agent still needs to configure GRPC receiver for delivering the task of profiling. In other words, the following configure cannot be omitted.\n# Backend service addresses. collector.backend_service=${SW_AGENT_COLLECTOR_BACKEND_SERVICES:127.0.0.1:11800} # Kafka producer configuration plugin.kafka.bootstrap_servers=${SW_KAFKA_BOOTSTRAP_SERVERS:localhost:9092} plugin.kafka.get_topic_timeout=${SW_GET_TOPIC_TIMEOUT:10} Before you activated the Kafka reporter, you have to make sure that Kafka fetcher of OAP server has been opened in service.\nAdvanced Kafka Producer Configurations Kafka reporter plugin support to customize all configurations of listed in here. For example:\nplugin.kafka.producer_config[delivery.timeout.ms]=12000 Since SkyWalking 8.8.0, support to configure advanced Producer configurations in JSON format, like this:\nplugin.kafka.producer_config_json={\u0026quot;delivery.timeout.ms\u0026quot;: 12000, \u0026quot;compression.type\u0026quot;: \u0026quot;snappy\u0026quot;} Currently, there are 2 ways to configure advanced configurations below. Notice that, the new way, configured in JSON format, will be overridden by plugin.kafka.producer_config[key]=value when they have the duplication keys.\nSince 8.16.0, users could implement their decoder for kafka configurations rather than using plain configurations(such as password) of Kafka producer, Including plugin.kafka.producer_config_json,plugin.kafka.producer_config or environment variable SW_PLUGIN_KAFKA_PRODUCER_CONFIG_JSON.\nBy doing that, add the kafka-config-extension dependency to your decoder project and implement decode interface.\n Add the KafkaConfigExtension dependency to your project.  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;kafka-config-extension\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;scope\u0026gt;provided\u0026lt;/scope\u0026gt; \u0026lt;/dependency\u0026gt;  Implement your custom decode method.Like this:  package org.apache.skywalking.apm.agent.sample; import org.apache.skywalking.apm.agent.core.kafka.KafkaConfigExtension; import java.util.Map; /** * Custom decode class */ public class DecodeUtil implements KafkaConfigExtension { /** * Custom decode method. * @param config the value of `plugin.kafka.producer_config` or `plugin.kafka.producer_config_json` in `agent.config`. * @return the decoded configuration if you implement your custom decode logic. */ public Map\u0026lt;String, String\u0026gt; decode(Map\u0026lt;String, String\u0026gt; config) { /** * implement your custom decode logic * */ return config; } } Then, package your decoder project as a jar and move to agent/plugins.\nNotice, the jar package should contain all the dependencies required for your custom decode code.\nThe last step is to activate the decoder class in agent.config like this:\nplugin.kafka.decrypt_class=\u0026quot;org.apache.skywalking.apm.agent.sample.DecodeUtil\u0026quot; or configure by environment variable\nSW_KAFKA_DECRYPT_CLASS=\u0026quot;org.apache.skywalking.apm.agent.sample.DecodeUtil\u0026quot; 3rd party reporters There are other reporter implementations from out of the Apache Software Foundation.\nPulsar Reporter Go to Pulsar-reporter-plugin for more details.\nRocketMQ Reporter Go to RocketMQ-reporter-plugin for more details.\n","excerpt":"Advanced Reporters The advanced report provides an alternative way to submit the agent collected …","ref":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/advanced-reporters/","title":"Advanced Reporters"},{"body":"AI Pipeline Warning, this module is still in the ALPHA stage. This is not stable.\nPattern Recognition, Machine Learning(ML) and Artificial Intelligence(AI) are common technology to identify patterns in data. This module provides a way to integrate these technologies in a standardized way about shipping the data from OAP kernel to 3rd party.\nFrom the industry practice, Pattern Recognition, Machine Learning(ML) and Artificial Intelligence(AI) are always overestimated, they are good at many things but have to run in a clear context.\nThe ai-pipeline module is activated by default.\nai-pipeline:selector:${SW_AI_PIPELINE:default}default:uriRecognitionServerAddr:${SW_AI_PIPELINE_URI_RECOGNITION_SERVER_ADDR:}uriRecognitionServerPort:${SW_AI_PIPELINE_URI_RECOGNITION_SERVER_PORT:17128}Supported Scenarios  HTTP Restful URI recognition.  ","excerpt":"AI Pipeline Warning, this module is still in the ALPHA stage. This is not stable.\nPattern …","ref":"/docs/main/latest/en/setup/ai-pipeline/introduction/","title":"AI Pipeline"},{"body":"AI Pipeline Warning, this module is still in the ALPHA stage. This is not stable.\nPattern Recognition, Machine Learning(ML) and Artificial Intelligence(AI) are common technology to identify patterns in data. This module provides a way to integrate these technologies in a standardized way about shipping the data from OAP kernel to 3rd party.\nFrom the industry practice, Pattern Recognition, Machine Learning(ML) and Artificial Intelligence(AI) are always overestimated, they are good at many things but have to run in a clear context.\nThe ai-pipeline module is activated by default.\nai-pipeline:selector:${SW_AI_PIPELINE:default}default:uriRecognitionServerAddr:${SW_AI_PIPELINE_URI_RECOGNITION_SERVER_ADDR:}uriRecognitionServerPort:${SW_AI_PIPELINE_URI_RECOGNITION_SERVER_PORT:17128}Supported Scenarios  HTTP Restful URI recognition.  ","excerpt":"AI Pipeline Warning, this module is still in the ALPHA stage. This is not stable.\nPattern …","ref":"/docs/main/next/en/setup/ai-pipeline/introduction/","title":"AI Pipeline"},{"body":"AI Pipeline Warning, this module is still in the ALPHA stage. This is not stable.\nPattern Recognition, Machine Learning(ML) and Artificial Intelligence(AI) are common technology to identify patterns in data. This module provides a way to integrate these technologies in a standardized way about shipping the data from OAP kernel to 3rd party.\nFrom the industry practice, Pattern Recognition, Machine Learning(ML) and Artificial Intelligence(AI) are always overestimated, they are good at many things but have to run in a clear context.\nThe ai-pipeline module is activated by default.\nai-pipeline:selector:${SW_AI_PIPELINE:default}default:uriRecognitionServerAddr:${SW_AI_PIPELINE_URI_RECOGNITION_SERVER_ADDR:}uriRecognitionServerPort:${SW_AI_PIPELINE_URI_RECOGNITION_SERVER_PORT:17128}Supported Scenarios  HTTP Restful URI recognition.  ","excerpt":"AI Pipeline Warning, this module is still in the ALPHA stage. This is not stable.\nPattern …","ref":"/docs/main/v9.5.0/en/setup/ai-pipeline/introduction/","title":"AI Pipeline"},{"body":"AI Pipeline Warning, this module is still in the ALPHA stage. This is not stable.\nPattern Recognition, Machine Learning(ML) and Artificial Intelligence(AI) are common technology to identify patterns in data. This module provides a way to integrate these technologies in a standardized way about shipping the data from OAP kernel to 3rd party.\nFrom the industry practice, Pattern Recognition, Machine Learning(ML) and Artificial Intelligence(AI) are always overestimated, they are good at many things but have to run in a clear context.\nThe ai-pipeline module is activated by default.\nai-pipeline:selector:${SW_AI_PIPELINE:default}default:uriRecognitionServerAddr:${SW_AI_PIPELINE_URI_RECOGNITION_SERVER_ADDR:}uriRecognitionServerPort:${SW_AI_PIPELINE_URI_RECOGNITION_SERVER_PORT:17128}Supported Scenarios  HTTP Restful URI recognition.  ","excerpt":"AI Pipeline Warning, this module is still in the ALPHA stage. This is not stable.\nPattern …","ref":"/docs/main/v9.6.0/en/setup/ai-pipeline/introduction/","title":"AI Pipeline"},{"body":"AI Pipeline Warning, this module is still in the ALPHA stage. This is not stable.\nPattern Recognition, Machine Learning(ML) and Artificial Intelligence(AI) are common technology to identify patterns in data. This module provides a way to integrate these technologies in a standardized way about shipping the data from OAP kernel to 3rd party.\nFrom the industry practice, Pattern Recognition, Machine Learning(ML) and Artificial Intelligence(AI) are always overestimated, they are good at many things but have to run in a clear context.\nThe ai-pipeline module is activated by default.\nai-pipeline:selector:${SW_AI_PIPELINE:default}default:uriRecognitionServerAddr:${SW_AI_PIPELINE_URI_RECOGNITION_SERVER_ADDR:}uriRecognitionServerPort:${SW_AI_PIPELINE_URI_RECOGNITION_SERVER_PORT:17128}Supported Scenarios  HTTP Restful URI recognition.  ","excerpt":"AI Pipeline Warning, this module is still in the ALPHA stage. This is not stable.\nPattern …","ref":"/docs/main/v9.7.0/en/setup/ai-pipeline/introduction/","title":"AI Pipeline"},{"body":"Alarm Alarm core is driven by a collection of rules, which are defined in config/alarm-settings.yml. There are three parts in alarm rule definition.\n Alarm rules. They define how metrics alarm should be triggered and what conditions should be considered. Webhooks. The list of web service endpoints, which should be called after the alarm is triggered. gRPCHook. The host and port of the remote gRPC method, which should be called after the alarm is triggered.  Entity name Defines the relation between scope and entity name.\n Service: Service name Instance: {Instance name} of {Service name} Endpoint: {Endpoint name} in {Service name} Database: Database service name Service Relation: {Source service name} to {Dest service name} Instance Relation: {Source instance name} of {Source service name} to {Dest instance name} of {Dest service name} Endpoint Relation: {Source endpoint name} in {Source Service name} to {Dest endpoint name} in {Dest service name}  Rules There are two types of rules: individual rules and composite rules. A composite rule is a combination of individual rules.\nIndividual rules An alarm rule is made up of the following elements:\n Rule name. A unique name shown in the alarm message. It must end with _rule. Metrics name. This is also the metrics name in the OAL script. Only long, double, int types are supported. See the list of all potential metrics name. Events can be also configured as the source of alarm, please refer to the event doc for more details. Include names. Entity names which are included in this rule. Please follow the entity name definitions. Exclude names. Entity names which are excluded from this rule. Please follow the entity name definitions. Include names regex. A regex that includes entity names. If both include-name list and include-name regex are set, both rules will take effect. Exclude names regex. A regex that excludes entity names. If both exclude-name list and exclude-name regex are set, both rules will take effect. Include labels. Metric labels which are included in this rule. Exclude labels. Metric labels which are excluded from this rule. Include labels regex. A regex that includes labels. If both include-label list and include-label regex are set, both rules will take effect. Exclude labels regex. A regex that exclude labels. If both the exclude-label list and exclude-label regex are set, both rules will take effect. Tags. Tags are key/value pairs that are attached to alarms. Tags are used to specify distinguishing attributes of alarms that are meaningful and relevant to users. If you would like to make these tags searchable on the SkyWalking UI, you may set the tag keys in core/default/searchableAlarmTags, or through system environment variable SW_SEARCHABLE_ALARM_TAG_KEYS. The key level is supported by default.  Label settings are required by the meter-system. They are used to store metrics from the label-system platform, such as Prometheus, Micrometer, etc. The four label settings mentioned above must implement LabeledValueHolder.\n Threshold. The target value. For multiple-value metrics, such as percentile, the threshold is an array. It is described as: value1, value2, value3, value4, value5. Each value may serve as the threshold for each value of the metrics. Set the value to - if you do not wish to trigger the alarm by one or more of the values.\nFor example in percentile, value1 is the threshold of P50, and -, -, value3, value4, value5 means that there is no threshold for P50 and P75 in the percentile alarm rule. OP. The operator. It supports \u0026gt;, \u0026gt;=, \u0026lt;, \u0026lt;=, ==. We welcome contributions of all OPs. Period. The size of metrics cache in minutes for checking the alarm conditions. This is a time window that corresponds to the backend deployment env time. Count. Within a period window, if the number of times which value goes over the threshold (based on OP) reaches count, then an alarm will be sent. Only as condition. Indicates if the rule can send notifications, or if it simply serves as an condition of the composite rule. Silence period. After the alarm is triggered in Time-N, there will be silence during the TN -\u0026gt; TN + period. By default, it works in the same manner as period. The same alarm (having the same ID in the same metrics name) may only be triggered once within a period.  Composite rules NOTE: Composite rules are only applicable to alarm rules targeting the same entity level, such as service-level alarm rules (service_percent_rule \u0026amp;\u0026amp; service_resp_time_percentile_rule). Do not compose alarm rules of different entity levels, such as an alarm rule of the service metrics with another rule of the endpoint metrics.\nA composite rule is made up of the following elements:\n Rule name. A unique name shown in the alarm message. Must end with _rule. Expression. Specifies how to compose rules, and supports \u0026amp;\u0026amp;, ||, and (). Message. The notification message to be sent out when the rule is triggered. Tags. Tags are key/value pairs that are attached to alarms. Tags are used to specify distinguishing attributes of alarms that are meaningful and relevant to users.  rules:# Rule unique name, must be ended with `_rule`.endpoint_percent_rule:# Metrics value need to be long, double or intmetrics-name:endpoint_percentthreshold:75op:\u0026lt;# The length of time to evaluate the metricsperiod:10# How many times after the metrics match the condition, will trigger alarmcount:3# How many times of checks, the alarm keeps silence after alarm triggered, default as same as period.silence-period:10# Specify if the rule can send notification or just as an condition of composite ruleonly-as-condition:falsetags:level:WARNINGservice_percent_rule:metrics-name:service_percent# [Optional] Default, match all services in this metricsinclude-names:- service_a- service_bexclude-names:- service_c# Single value metrics threshold.threshold:85op:\u0026lt;period:10count:4only-as-condition:falseservice_resp_time_percentile_rule:# Metrics value need to be long, double or intmetrics-name:service_percentileop:\u0026#34;\u0026gt;\u0026#34;# Multiple value metrics threshold. Thresholds for P50, P75, P90, P95, P99.threshold:1000,1000,1000,1000,1000period:10count:3silence-period:5message:Percentile response time of service {name} alarm in 3 minutes of last 10 minutes, due to more than one condition of p50 \u0026gt; 1000, p75 \u0026gt; 1000, p90 \u0026gt; 1000, p95 \u0026gt; 1000, p99 \u0026gt; 1000only-as-condition:falsemeter_service_status_code_rule:metrics-name:meter_status_codeexclude-labels:- \u0026#34;200\u0026#34;op:\u0026#34;\u0026gt;\u0026#34;threshold:10period:10count:3silence-period:5message:The request number of entity {name} non-200 status is more than expected.only-as-condition:falsecomposite-rules:comp_rule:# Must satisfied percent rule and resp time rule expression:service_percent_rule \u0026amp;\u0026amp; service_resp_time_percentile_rulemessage:Service {name} successful rate is less than 80% and P50 of response time is over 1000mstags:level:CRITICALDefault alarm rules For convenience\u0026rsquo;s sake, we have provided a default alarm-setting.yml in our release. It includes the following rules:\n Service average response time over 1s in the last 3 minutes. Service success rate lower than 80% in the last 2 minutes. Percentile of service response time over 1s in the last 3 minutes Service Instance average response time over 1s in the last 2 minutes, and the instance name matches the regex. Endpoint average response time over 1s in the last 2 minutes. Database access average response time over 1s in the last 2 minutes. Endpoint relation average response time over 1s in the last 2 minutes.  List of all potential metrics name The metrics names are defined in the official OAL scripts and MAL scripts, the Event names can also serve as the metrics names, all possible event names can be also found in the Event doc.\nCurrently, metrics from the Service, Service Instance, Endpoint, Service Relation, Service Instance Relation, Endpoint Relation scopes could be used in Alarm, and the Database access scope is same as Service.\nSubmit an issue or a pull request if you want to support any other scopes in alarm.\nWebhook The Webhook requires the peer to be a web container. The alarm message will be sent through HTTP post by application/json content type. The JSON format is based on List\u0026lt;org.apache.skywalking.oap.server.core.alarm.AlarmMessage\u0026gt; with the following key information:\n scopeId, scope. All scopes are defined in org.apache.skywalking.oap.server.core.source.DefaultScopeDefine. name. Target scope entity name. Please follow the entity name definitions. id0. The ID of the scope entity that matches with the name. When using the relation scope, it is the source entity ID. id1. When using the relation scope, it is the destination entity ID. Otherwise, it is empty. ruleName. The rule name configured in alarm-settings.yml. alarmMessage. The alarm text message. startTime. The alarm time measured in milliseconds, which occurs between the current time and the midnight of January 1, 1970 UTC. tags. The tags configured in alarm-settings.yml.  See the following example:\n[{ \u0026#34;scopeId\u0026#34;: 1, \u0026#34;scope\u0026#34;: \u0026#34;SERVICE\u0026#34;, \u0026#34;name\u0026#34;: \u0026#34;serviceA\u0026#34;, \u0026#34;id0\u0026#34;: \u0026#34;12\u0026#34;, \u0026#34;id1\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;ruleName\u0026#34;: \u0026#34;service_resp_time_rule\u0026#34;, \u0026#34;alarmMessage\u0026#34;: \u0026#34;alarmMessage xxxx\u0026#34;, \u0026#34;startTime\u0026#34;: 1560524171000, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;WARNING\u0026#34; }] }, { \u0026#34;scopeId\u0026#34;: 1, \u0026#34;scope\u0026#34;: \u0026#34;SERVICE\u0026#34;, \u0026#34;name\u0026#34;: \u0026#34;serviceB\u0026#34;, \u0026#34;id0\u0026#34;: \u0026#34;23\u0026#34;, \u0026#34;id1\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;ruleName\u0026#34;: \u0026#34;service_resp_time_rule\u0026#34;, \u0026#34;alarmMessage\u0026#34;: \u0026#34;alarmMessage yyy\u0026#34;, \u0026#34;startTime\u0026#34;: 1560524171000, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;CRITICAL\u0026#34; }] }] gRPCHook The alarm message will be sent through remote gRPC method by Protobuf content type. The message contains key information which are defined in oap-server/server-alarm-plugin/src/main/proto/alarm-hook.proto.\nPart of the protocol looks like this:\nmessage AlarmMessage { int64 scopeId = 1; string scope = 2; string name = 3; string id0 = 4; string id1 = 5; string ruleName = 6; string alarmMessage = 7; int64 startTime = 8; AlarmTags tags = 9;}message AlarmTags { // String key, String value pair.  repeated KeyStringValuePair data = 1;}message KeyStringValuePair { string key = 1; string value = 2;}Slack Chat Hook Follow the Getting Started with Incoming Webhooks guide and create new Webhooks.\nThe alarm message will be sent through HTTP post by application/json content type if you have configured Slack Incoming Webhooks as follows:\nslackHooks:textTemplate:|-{ \u0026#34;type\u0026#34;: \u0026#34;section\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;mrkdwn\u0026#34;, \u0026#34;text\u0026#34;: \u0026#34;:alarm_clock: *Apache Skywalking Alarm* \\n **%s**.\u0026#34; } }webhooks:- https://hooks.slack.com/services/x/y/zWeChat Hook Note that only the WeChat Company Edition (WeCom) supports WebHooks. To use the WeChat WebHook, follow the Wechat Webhooks guide. The alarm message will be sent through HTTP post by application/json content type after you have set up Wechat Webhooks as follows:\nwechatHooks:textTemplate:|-{ \u0026#34;msgtype\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;content\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; } }webhooks:- https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=dummy_keyDingtalk Hook Follow the Dingtalk Webhooks guide and create new Webhooks. For security purposes, you can config an optional secret for an individual webhook URL. The alarm message will be sent through HTTP post by application/json content type if you have configured Dingtalk Webhooks as follows:\ndingtalkHooks:textTemplate:|-{ \u0026#34;msgtype\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;content\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; } }webhooks:- url:https://oapi.dingtalk.com/robot/send?access_token=dummy_tokensecret:dummysecretFeishu Hook Follow the Feishu Webhooks guide and create new Webhooks. For security purposes, you can config an optional secret for an individual webhook URL. If you would like to direct a text to a user, you can config ats which is the feishu\u0026rsquo;s user_id and separated by \u0026ldquo;,\u0026rdquo; . The alarm message will be sent through HTTP post by application/json content type if you have configured Feishu Webhooks as follows:\nfeishuHooks:textTemplate:|-{ \u0026#34;msg_type\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;content\u0026#34;: { \u0026#34;text\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; }, \u0026#34;ats\u0026#34;:\u0026#34;feishu_user_id_1,feishu_user_id_2\u0026#34; }webhooks:- url:https://open.feishu.cn/open-apis/bot/v2/hook/dummy_tokensecret:dummysecretWeLink Hook Follow the WeLink Webhooks guide and create new Webhooks. The alarm message will be sent through HTTP post by application/json content type if you have configured WeLink Webhooks as follows:\nwelinkHooks:textTemplate:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;webhooks:# you may find your own client_id and client_secret in your app, below are dummy, need to change.- client_id:\u0026#34;dummy_client_id\u0026#34;client_secret:dummy_secret_keyaccess_token_url:https://open.welink.huaweicloud.com/api/auth/v2/ticketsmessage_url:https://open.welink.huaweicloud.com/api/welinkim/v1/im-service/chat/group-chat# if you send to multi group at a time, separate group_ids with commas, e.g. \u0026#34;123xx\u0026#34;,\u0026#34;456xx\u0026#34;group_ids:\u0026#34;dummy_group_id\u0026#34;# make a name you like for the robot, it will display in grouprobot_name:robotUpdate the settings dynamically Since 6.5.0, the alarm settings can be updated dynamically at runtime by Dynamic Configuration, which will override the settings in alarm-settings.yml.\nIn order to determine whether an alarm rule is triggered or not, SkyWalking needs to cache the metrics of a time window for each alarm rule. If any attribute (metrics-name, op, threshold, period, count, etc.) of a rule is changed, the sliding window will be destroyed and re-created, causing the alarm of this specific rule to restart again.\n","excerpt":"Alarm Alarm core is driven by a collection of rules, which are defined in config/alarm-settings.yml. …","ref":"/docs/main/v9.0.0/en/setup/backend/backend-alarm/","title":"Alarm"},{"body":"Alarm The alarm core is driven by a collection of rules defined in config/alarm-settings.yml. There are three parts to alarm rule definitions.\n Alarm rules. They define how metrics alarm should be triggered and what conditions should be considered. Webhooks. The list of web service endpoints, which should be called after an alarm is triggered. gRPCHook. The host and port of the remote gRPC method, which should be called after an alarm is triggered.  Entity name Defines the relation between scope and entity name.\n Service: Service name Instance: {Instance name} of {Service name} Endpoint: {Endpoint name} in {Service name} Database: Database service name Service Relation: {Source service name} to {Dest service name} Instance Relation: {Source instance name} of {Source service name} to {Dest instance name} of {Dest service name} Endpoint Relation: {Source endpoint name} in {Source Service name} to {Dest endpoint name} in {Dest service name}  Rules There are two types of rules: individual rules and composite rules. A composite rule is a combination of individual rules.\nIndividual rules An alarm rule is made up of the following elements:\n Rule name. A unique name shown in the alarm message. It must end with _rule. Metrics name. This is also the metrics name in the OAL script. Only long, double, int types are supported. See the list of all potential metrics name. Events can also be configured as the source of Alarm. Please refer to the event doc for more details. Include names. Entity names that are included in this rule. Please follow the entity name definitions. Exclude names. Entity names that are excluded from this rule. Please follow the entity name definitions. Include names regex. A regex that includes entity names. If both include-name list and include-name regex are set, both rules will take effect. Exclude names regex. A regex that excludes entity names. Both rules will take effect if both include-label list and include-label regex are set. Include labels. Metric labels that are included in this rule. Exclude labels. Metric labels that are excluded from this rule. Include labels regex. A regex that includes labels. If both include-label list and include-label regex are set, both rules will take effect. Exclude labels regex. A regex that excludes labels. Both rules will take effect if both exclude-label list and exclude-label regex are set. Tags. Tags are key/value pairs that are attached to alarms. Tags are used to specify distinguishing attributes of alarms that are meaningful and relevant to users. If you want to make these tags searchable on the SkyWalking UI, you may set the tag keys in core/default/searchableAlarmTags or through the system environment variable SW_SEARCHABLE_ALARM_TAG_KEYS. The key level is supported by default.  Label settings are required by the meter system. They are used to store metrics from the label-system platform, such as Prometheus, Micrometer, etc. The four label settings mentioned above must implement LabeledValueHolder.\n Threshold. The target value. For multiple-value metrics, such as percentile, the threshold is an array. It is described as: value1, value2, value3, value4, value5. Each value may serve as the threshold for each value of the metrics. Set the value to - if you do not wish to trigger the Alarm by one or more of the values.\nFor example, in percentile, value1 is the threshold of P50, and -, -, value3, value4, value5 means that there is no threshold for P50 and P75 in the percentile alarm rule. OP. The operator. It supports \u0026gt;, \u0026gt;=, \u0026lt;, \u0026lt;=, ==. We welcome contributions of all OPs. Period. The size of metrics cache in minutes for checking the alarm conditions. This is a time window that corresponds to the backend deployment env time. Count. Within a period window, if the number of times which value goes over the threshold (based on OP) reaches count, then an alarm will be sent. Only as condition. Indicates if the rule can send notifications or if it simply serves as a condition of the composite rule. Silence period. After the alarm is triggered at Time-N (TN), there will be silence during the TN -\u0026gt; TN + period. By default, it works in the same manner as period. The same Alarm (having the same ID in the same metrics name) may only be triggered once within a period.  Composite rules NOTE: Composite rules are only applicable to alarm rules targeting the same entity level, such as service-level alarm rules (service_percent_rule \u0026amp;\u0026amp; service_resp_time_percentile_rule). Do not compose alarm rules of different entity levels, such as an alarm rule of the service metrics with another rule of the endpoint metrics.\nA composite rule is made up of the following elements:\n Rule name. A unique name shown in the alarm message. Must end with _rule. Expression. Specifies how to compose rules, and supports \u0026amp;\u0026amp;, ||, and (). Message. The notification message to be sent out when the rule is triggered. Tags. Tags are key/value pairs that are attached to alarms. Tags are used to specify distinguishing attributes of alarms that are meaningful and relevant to users.  rules:# Rule unique name, must be ended with `_rule`.endpoint_percent_rule:# Metrics value need to be long, double or intmetrics-name:endpoint_percentthreshold:75op:\u0026lt;# The length of time to evaluate the metricsperiod:10# How many times after the metrics match the condition, will trigger alarmcount:3# How many times of checks, the alarm keeps silence after alarm triggered, default as same as period.silence-period:10# Specify if the rule can send notification or just as an condition of composite ruleonly-as-condition:falsetags:level:WARNINGservice_percent_rule:metrics-name:service_percent# [Optional] Default, match all services in this metricsinclude-names:- service_a- service_bexclude-names:- service_c# Single value metrics threshold.threshold:85op:\u0026lt;period:10count:4only-as-condition:falseservice_resp_time_percentile_rule:# Metrics value need to be long, double or intmetrics-name:service_percentileop:\u0026#34;\u0026gt;\u0026#34;# Multiple value metrics threshold. Thresholds for P50, P75, P90, P95, P99.threshold:1000,1000,1000,1000,1000period:10count:3silence-period:5message:Percentile response time of service {name} alarm in 3 minutes of last 10 minutes, due to more than one condition of p50 \u0026gt; 1000, p75 \u0026gt; 1000, p90 \u0026gt; 1000, p95 \u0026gt; 1000, p99 \u0026gt; 1000only-as-condition:falsemeter_service_status_code_rule:metrics-name:meter_status_codeexclude-labels:- \u0026#34;200\u0026#34;op:\u0026#34;\u0026gt;\u0026#34;threshold:10period:10count:3silence-period:5message:The request number of entity {name} non-200 status is more than expected.only-as-condition:falsecomposite-rules:comp_rule:# Must satisfied percent rule and resp time rule expression:service_percent_rule \u0026amp;\u0026amp; service_resp_time_percentile_rulemessage:Service {name} successful rate is less than 80% and P50 of response time is over 1000mstags:level:CRITICALDefault alarm rules For convenience\u0026rsquo;s sake, we have provided a default alarm-setting.yml in our release. It includes the following rules:\n Service average response time over 1s in the last 3 minutes. Service success rate lower than 80% in the last 2 minutes. Percentile of service response time over 1s in the last 3 minutes Service Instance average response time over 1s in the last 2 minutes, and the instance name matches the regex. Endpoint average response time over 1s in the last 2 minutes. Database access average response time over 1s in the last 2 minutes. Endpoint relation average response time over 1s in the last 2 minutes.  List of all potential metrics name The metrics names are defined in the official OAL scripts and MAL scripts, the Event names can also serve as the metrics names, all possible event names can be also found in the Event doc.\nCurrently, metrics from the Service, Service Instance, Endpoint, Service Relation, Service Instance Relation, Endpoint Relation scopes could be used in Alarm, and the Database access scope is the same as Service.\nSubmit an issue or a pull request if you want to support any other scopes in Alarm.\nWebhook The Webhook requires the peer to be a web container. The alarm message will be sent through HTTP post by application/json content type. The JSON format is based on List\u0026lt;org.apache.skywalking.oap.server.core.alarm.AlarmMessage\u0026gt; with the following key information:\n scopeId, scope. All scopes are defined in org.apache.skywalking.oap.server.core.source.DefaultScopeDefine. name. Target scope entity name. Please follow the entity name definitions. id0. The ID of the scope entity that matches with the name. When using the relation scope, it is the source entity ID. id1. When using the relation scope, it is the destination entity ID. Otherwise, it is empty. ruleName. The rule name configured in alarm-settings.yml. alarmMessage. The alarm text message. startTime. The alarm time measured in milliseconds, which occurs between the current time and the midnight of January 1, 1970 UTC. tags. The tags configured in alarm-settings.yml.  See the following example:\n[{ \u0026#34;scopeId\u0026#34;: 1, \u0026#34;scope\u0026#34;: \u0026#34;SERVICE\u0026#34;, \u0026#34;name\u0026#34;: \u0026#34;serviceA\u0026#34;, \u0026#34;id0\u0026#34;: \u0026#34;12\u0026#34;, \u0026#34;id1\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;ruleName\u0026#34;: \u0026#34;service_resp_time_rule\u0026#34;, \u0026#34;alarmMessage\u0026#34;: \u0026#34;alarmMessage xxxx\u0026#34;, \u0026#34;startTime\u0026#34;: 1560524171000, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;WARNING\u0026#34; }] }, { \u0026#34;scopeId\u0026#34;: 1, \u0026#34;scope\u0026#34;: \u0026#34;SERVICE\u0026#34;, \u0026#34;name\u0026#34;: \u0026#34;serviceB\u0026#34;, \u0026#34;id0\u0026#34;: \u0026#34;23\u0026#34;, \u0026#34;id1\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;ruleName\u0026#34;: \u0026#34;service_resp_time_rule\u0026#34;, \u0026#34;alarmMessage\u0026#34;: \u0026#34;alarmMessage yyy\u0026#34;, \u0026#34;startTime\u0026#34;: 1560524171000, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;CRITICAL\u0026#34; }] }] gRPCHook The alarm message will be sent through remote gRPC method by Protobuf content type. The message contains key information which are defined in oap-server/server-alarm-plugin/src/main/proto/alarm-hook.proto.\nPart of the protocol looks like this:\nmessage AlarmMessage { int64 scopeId = 1; string scope = 2; string name = 3; string id0 = 4; string id1 = 5; string ruleName = 6; string alarmMessage = 7; int64 startTime = 8; AlarmTags tags = 9;}message AlarmTags { // String key, String value pair.  repeated KeyStringValuePair data = 1;}message KeyStringValuePair { string key = 1; string value = 2;}Slack Chat Hook Follow the Getting Started with Incoming Webhooks guide and create new Webhooks.\nThe alarm message will be sent through HTTP post by application/json content type if you have configured Slack Incoming Webhooks as follows:\nslackHooks:textTemplate:|-{ \u0026#34;type\u0026#34;: \u0026#34;section\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;mrkdwn\u0026#34;, \u0026#34;text\u0026#34;: \u0026#34;:alarm_clock: *Apache Skywalking Alarm* \\n **%s**.\u0026#34; } }webhooks:- https://hooks.slack.com/services/x/y/zWeChat Hook Note that only the WeChat Company Edition (WeCom) supports WebHooks. To use the WeChat WebHook, follow the Wechat Webhooks guide. The alarm message will be sent through HTTP post by application/json content type after you have set up Wechat Webhooks as follows:\nwechatHooks:textTemplate:|-{ \u0026#34;msgtype\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;content\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; } }webhooks:- https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=dummy_keyDingTalk Hook Follow the Dingtalk Webhooks guide and create new Webhooks. You can configure an optional secret for an individual webhook URL for security purposes. The alarm message will be sent through HTTP post by application/json content type if you have configured DingTalk Webhooks as follows:\ndingtalkHooks:textTemplate:|-{ \u0026#34;msgtype\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;content\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; } }webhooks:- url:https://oapi.dingtalk.com/robot/send?access_token=dummy_tokensecret:dummysecretFeishu Hook Follow the Feishu Webhooks guide and create new Webhooks. You can configure an optional secret for an individual webhook URL for security purposes. If you want to direct a text to a user, you can configure ats, which is Feishu\u0026rsquo;s user_id and separated by \u0026ldquo;,\u0026rdquo; . The alarm message will be sent through HTTP post by application/json content type if you have configured Feishu Webhooks as follows:\nfeishuHooks:textTemplate:|-{ \u0026#34;msg_type\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;content\u0026#34;: { \u0026#34;text\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; }, \u0026#34;ats\u0026#34;:\u0026#34;feishu_user_id_1,feishu_user_id_2\u0026#34; }webhooks:- url:https://open.feishu.cn/open-apis/bot/v2/hook/dummy_tokensecret:dummysecretWeLink Hook Follow the WeLink Webhooks guide and create new Webhooks. The alarm message will be sent through HTTP post by application/json content type if you have configured WeLink Webhooks as follows:\nwelinkHooks:textTemplate:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;webhooks:# you may find your own client_id and client_secret in your app, below are dummy, need to change.- client_id:\u0026#34;dummy_client_id\u0026#34;client_secret:dummy_secret_keyaccess_token_url:https://open.welink.huaweicloud.com/api/auth/v2/ticketsmessage_url:https://open.welink.huaweicloud.com/api/welinkim/v1/im-service/chat/group-chat# if you send to multi group at a time, separate group_ids with commas, e.g. \u0026#34;123xx\u0026#34;,\u0026#34;456xx\u0026#34;group_ids:\u0026#34;dummy_group_id\u0026#34;# make a name you like for the robot, it will display in grouprobot_name:robotUpdate the settings dynamically Since 6.5.0, the alarm settings can be updated dynamically at runtime by Dynamic Configuration, which will override the settings in alarm-settings.yml.\nIn order to determine whether an alarm rule is triggered or not, SkyWalking needs to cache the metrics of a time window for each alarm rule. If any attribute (metrics-name, op, threshold, period, count, etc.) of a rule is changed, the sliding window will be destroyed and re-created, causing the Alarm of this specific rule to restart again.\n","excerpt":"Alarm The alarm core is driven by a collection of rules defined in config/alarm-settings.yml. There …","ref":"/docs/main/v9.1.0/en/setup/backend/backend-alarm/","title":"Alarm"},{"body":"Alerting Alerting mechanism measures system performance according to the metrics of services/instances/endpoints from different layers. Alerting kernel is an in-memory, time-window based queue.\nThe alerting core is driven by a collection of rules defined in config/alarm-settings.yml. There are three parts to alerting rule definitions.\n alerting rules. They define how metrics alerting should be triggered and what conditions should be considered. hooks. The list of hooks, which should be called after an alerting is triggered.  Entity name Defines the relation between scope and entity name.\n Service: Service name Instance: {Instance name} of {Service name} Endpoint: {Endpoint name} in {Service name} Service Relation: {Source service name} to {Dest service name} Instance Relation: {Source instance name} of {Source service name} to {Dest instance name} of {Dest service name} Endpoint Relation: {Source endpoint name} in {Source Service name} to {Dest endpoint name} in {Dest service name}  Rules An alerting rule is made up of the following elements:\n Rule name. A unique name shown in the alarm message. It must end with _rule. Expression. A MQE expression that defines the conditions of the rule. The result type must be SINGLE_VALUE and the root operation of the expression must be a Compare Operation which provides 1(true) or 0(false) result. When the result is 1(true), the alarm will be triggered. For example, avg(service_resp_time / 1000) \u0026gt; 1 is a valid expression to indicate the request latency is slower than 1s. The typical illegal expressions are  avg(service_resp_time \u0026gt; 1000) + 1 expression root doesn\u0026rsquo;t use Compare Operation service_resp_time \u0026gt; 1000 expression return a TIME_SERIES_VALUES type of values rather than a SINGLE_VALUE value.    The metrics names in the expression could be found in the list of all potential metrics name doc.\n Include names. Entity names that are included in this rule. Please follow the entity name definitions. Exclude names. Entity names that are excluded from this rule. Please follow the entity name definitions. Include names regex. A regex that includes entity names. If both include-name list and include-name regex are set, both rules will take effect. Exclude names regex. A regex that excludes entity names. Both rules will take effect if both include-label list and include-label regex are set. Tags. Tags are key/value pairs that are attached to alarms. Tags are used to specify distinguishing attributes of alarms that are meaningful and relevant to users. If you want to make these tags searchable on the SkyWalking UI, you may set the tag keys in core/default/searchableAlarmTags or through the system environment variable SW_SEARCHABLE_ALARM_TAG_KEYS. The key level is supported by default. Period. The size of metrics cache in minutes for checking the alarm conditions. This is a time window that corresponds to the backend deployment env time. Hooks. Binding the specific names of the hooks when the alarm is triggered. The name format is {hookType}.{hookName} (slack.custom1 e.g.) and must be defined in the hooks section of the alarm-settings.yml file. If the hook name is not specified, the global hook will be used. Silence period. After the alarm is triggered at Time-N (TN), there will be silence during the TN -\u0026gt; TN + period. By default, it works in the same manner as period. The same Alarm (having the same ID in the same metrics name) may only be triggered once within a period.  Such as for a metric, there is a shifting window as following at T7.\n   T1 T2 T3 T4 T5 T6 T7     Value1 Value2 Value3 Value4 Value5 Value6 Value7     Period(Time point T1 ~ T7) are continuous data points for minutes. Notice, alerts are not supported above minute-by-minute periods as they would not be efficient. Values(Value1 ~ Value7) are the values or labeled values for every time point. Expression is calculated based on the metric values(Value1 ~ Value7). For example, expression avg(service_resp_time) \u0026gt; 1000, if the value are 1001, 1001, 1001, 1001, 1001, 1001, 1001, the calculation is ((1001 + 10001 + ... + 1001) / 7) \u0026gt; 1000 and the result would be 1(true). Then the alarm would be triggered. In every minute, the window would shift automatically. At T8, Value8 would be cached, and T1/Value1 would be removed from the window.  NOTE:\n If the expression include labeled metrics and result has multiple labeled value(e.g. sum(service_percentile{_='0,1'} \u0026gt; 1000) \u0026gt;= 3), the alarm will be triggered if any of the labeled value result matches 3 times of the condition(P50 \u0026gt; 1000 or P75 \u0026gt; 1000). One alarm rule is targeting the same entity level, such as service-level expression (avg(service_resp_time) \u0026gt; 1000). Set entity names(Include/Exclude names\u0026hellip;) according to metrics entity levels, do not include different entity levels metrics in the same expression, such as service metrics and endpoint metrics.  rules:# Rule unique name, must be ended with `_rule`.endpoint_percent_rule:# A MQE expression and the root operation of the expression must be a Compare Operation.expression:sum((endpoint_sla / 100) \u0026lt; 75) \u0026gt;= 3# The length of time to evaluate the metricsperiod:10# How many times of checks, the alarm keeps silence after alarm triggered, default as same as period.silence-period:10message:Successful rate of endpoint {name} is lower than 75%tags:level:WARNINGservice_percent_rule:expression:sum((service_sla / 100) \u0026lt; 85) \u0026gt;= 4# [Optional] Default, match all services in this metricsinclude-names:- service_a- service_bexclude-names:- service_cperiod:10message:Service {name} successful rate is less than 85%service_resp_time_percentile_rule:expression:sum(service_percentile{_=\u0026#39;0,1,2,3,4\u0026#39;} \u0026gt; 1000) \u0026gt;= 3period:10silence-period:5message:Percentile response time of service {name} alarm in 3 minutes of last 10 minutes, due to more than one condition of p50 \u0026gt; 1000, p75 \u0026gt; 1000, p90 \u0026gt; 1000, p95 \u0026gt; 1000, p99 \u0026gt; 1000meter_service_status_code_rule:expression:sum(aggregate_labels(meter_status_code{_=\u0026#39;4xx,5xx\u0026#39;},sum) \u0026gt; 10) \u0026gt; 3period:10count:3silence-period:5message:The request number of entity {name} 4xx and 5xx status is more than expected.hooks:- \u0026#34;slack.custom1\u0026#34;- \u0026#34;pagerduty.custom1\u0026#34;comp_rule:expression:(avg(service_sla / 100) \u0026gt; 80) * (avg(service_percentile{_=\u0026#39;0\u0026#39;}) \u0026gt; 1000) == 1period:10message:Service {name} avg successful rate is less than 80% and P50 of avg response time is over 1000ms in last 10 minutes.tags:level:CRITICALhooks:- \u0026#34;slack.default\u0026#34;- \u0026#34;slack.custom1\u0026#34;- \u0026#34;pagerduty.custom1\u0026#34;Default alarm rules For convenience\u0026rsquo;s sake, we have provided a default alarm-setting.yml in our release. It includes the following rules:\n Service average response time over 1s in the last 3 minutes. Service success rate lower than 80% in the last 2 minutes. Percentile of service response time over 1s in the last 3 minutes Service Instance average response time over 1s in the last 2 minutes, and the instance name matches the regex. Endpoint average response time over 1s in the last 2 minutes. Database access average response time over 1s in the last 2 minutes. Endpoint relation average response time over 1s in the last 2 minutes.  List of all potential metrics name The metrics names are defined in the official OAL scripts and MAL scripts.\nCurrently, metrics from the Service, Service Instance, Endpoint, Service Relation, Service Instance Relation, Endpoint Relation scopes could be used in Alarm, and the Database access scope is the same as Service.\nSubmit an issue or a pull request if you want to support any other scopes in Alarm.\nHooks Hooks are a way to send alarm messages to the outside world. SkyWalking supports multiple hooks of the same type, each hook can support different configurations. For example, you can configure two Slack hooks, one named default and set is-default: true means this hook will apply on all Alarm Rules without config hooks. Another named custom1 will only apply on the Alarm Rules which with config hooks and include the name slack.custom1.\nhooks:slack:# default here is just a name, set the field \u0026#39;is-default: true\u0026#39; if this notification hook is expected to be default globally.default:# If true, this hook will apply on all rules, unless a rule has its own specific hook. Could have more than one default hooks in the same hook type.is-default:truetext-template:|-{ \u0026#34;type\u0026#34;: \u0026#34;section\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;mrkdwn\u0026#34;, \u0026#34;text\u0026#34;: \u0026#34;:alarm_clock: *Apache Skywalking Alarm* \\n **%s**.\u0026#34; } }webhooks:- https://hooks.slack.com/services/x/y/zsssscustom1:text-template:|-{ \u0026#34;type\u0026#34;: \u0026#34;section\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;mrkdwn\u0026#34;, \u0026#34;text\u0026#34;: \u0026#34;:alarm_clock: *Apache Skywalking Alarm* \\n **%s**.\u0026#34; } }webhooks:- https://hooks.slack.com/services/x/y/custom1Currently, SkyWalking supports the following hook types:\nWebhook The Webhook requires the peer to be a web container. The alarm message will be sent through HTTP post by application/json content type after you have set up Webhook hooks as follows:\nwebhook:default:is-default:trueurls:- http://ip:port/xxx- http://ip:port/yyyThe JSON format is based on List\u0026lt;org.apache.skywalking.oap.server.core.alarm.AlarmMessage\u0026gt; with the following key information:\n scopeId, scope. All scopes are defined in org.apache.skywalking.oap.server.core.source.DefaultScopeDefine. name. Target scope entity name. Please follow the entity name definitions. id0. The ID of the scope entity that matches with the name. When using the relation scope, it is the source entity ID. id1. When using the relation scope, it is the destination entity ID. Otherwise, it is empty. ruleName. The rule name configured in alarm-settings.yml. alarmMessage. The alarm text message. startTime. The alarm time measured in milliseconds, which occurs between the current time and the midnight of January 1, 1970 UTC. tags. The tags configured in alarm-settings.yml.  See the following example:\n[{ \u0026#34;scopeId\u0026#34;: 1, \u0026#34;scope\u0026#34;: \u0026#34;SERVICE\u0026#34;, \u0026#34;name\u0026#34;: \u0026#34;serviceA\u0026#34;, \u0026#34;id0\u0026#34;: \u0026#34;12\u0026#34;, \u0026#34;id1\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;ruleName\u0026#34;: \u0026#34;service_resp_time_rule\u0026#34;, \u0026#34;alarmMessage\u0026#34;: \u0026#34;alarmMessage xxxx\u0026#34;, \u0026#34;startTime\u0026#34;: 1560524171000, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;WARNING\u0026#34; }] }, { \u0026#34;scopeId\u0026#34;: 1, \u0026#34;scope\u0026#34;: \u0026#34;SERVICE\u0026#34;, \u0026#34;name\u0026#34;: \u0026#34;serviceB\u0026#34;, \u0026#34;id0\u0026#34;: \u0026#34;23\u0026#34;, \u0026#34;id1\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;ruleName\u0026#34;: \u0026#34;service_resp_time_rule\u0026#34;, \u0026#34;alarmMessage\u0026#34;: \u0026#34;alarmMessage yyy\u0026#34;, \u0026#34;startTime\u0026#34;: 1560524171000, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;CRITICAL\u0026#34; }] }] gRPC The alarm message will be sent through remote gRPC method by Protobuf content type after you have set up gRPC hooks as follows:\ngRPC:default:is-default:truetarget-host:iptarget-port:portThe message contains key information which are defined in oap-server/server-alarm-plugin/src/main/proto/alarm-hook.proto.\nPart of the protocol looks like this:\nmessage AlarmMessage { int64 scopeId = 1; string scope = 2; string name = 3; string id0 = 4; string id1 = 5; string ruleName = 6; string alarmMessage = 7; int64 startTime = 8; AlarmTags tags = 9;}message AlarmTags { // String key, String value pair.  repeated KeyStringValuePair data = 1;}message KeyStringValuePair { string key = 1; string value = 2;}Slack Chat Follow the Getting Started with Incoming Webhooks guide and create new Webhooks.\nThe alarm message will be sent through HTTP post by application/json content type if you have configured Slack Incoming Webhooks as follows:\nslack:default:is-default:truetext-template:|-{ \u0026#34;type\u0026#34;: \u0026#34;section\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;mrkdwn\u0026#34;, \u0026#34;text\u0026#34;: \u0026#34;:alarm_clock: *Apache Skywalking Alarm* \\n **%s**.\u0026#34; } }webhooks:- https://hooks.slack.com/services/x/y/zWeChat Note that only the WeChat Company Edition (WeCom) supports WebHooks. To use the WeChat WebHook, follow the Wechat Webhooks guide. The alarm message will be sent through HTTP post by application/json content type after you have set up Wechat Webhooks as follows:\nwechat:default:is-default:truetext-template:|-{ \u0026#34;msgtype\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;content\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; } }webhooks:- https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=dummy_keyDingTalk Follow the Dingtalk Webhooks guide and create new Webhooks. You can configure an optional secret for an individual webhook URL for security purposes. The alarm message will be sent through HTTP post by application/json content type if you have configured DingTalk Webhooks as follows:\ndingtalk:default:is-default:truetext-template:|-{ \u0026#34;msgtype\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;content\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; } }webhooks:- url:https://oapi.dingtalk.com/robot/send?access_token=dummy_tokensecret:dummysecretFeishu Follow the Feishu Webhooks guide and create new Webhooks. You can configure an optional secret for an individual webhook URL for security purposes. If you want to direct a text to a user, you can configure ats, which is Feishu\u0026rsquo;s user_id and separated by \u0026ldquo;,\u0026rdquo; . The alarm message will be sent through HTTP post by application/json content type if you have configured Feishu Webhooks as follows:\nfeishu:default:is-default:truetext-template:|-{ \u0026#34;msg_type\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;content\u0026#34;: { \u0026#34;text\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; }, \u0026#34;ats\u0026#34;:\u0026#34;feishu_user_id_1,feishu_user_id_2\u0026#34; }webhooks:- url:https://open.feishu.cn/open-apis/bot/v2/hook/dummy_tokensecret:dummysecretWeLink Follow the WeLink Webhooks guide and create new Webhooks. The alarm message will be sent through HTTP post by application/json content type if you have configured WeLink Webhooks as follows:\nwelink:default:is-default:truetext-template:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;webhooks:# you may find your own client_id and client_secret in your app, below are dummy, need to change.- client-id:\u0026#34;dummy_client_id\u0026#34;client-secret:dummy_secret_keyaccess-token-url:https://open.welink.huaweicloud.com/api/auth/v2/ticketsmessage-url:https://open.welink.huaweicloud.com/api/welinkim/v1/im-service/chat/group-chat# if you send to multi group at a time, separate group_ids with commas, e.g. \u0026#34;123xx\u0026#34;,\u0026#34;456xx\u0026#34;group-ids:\u0026#34;dummy_group_id\u0026#34;# make a name you like for the robot, it will display in grouprobot-name:robotPagerDuty The PagerDuty hook is based on Events API v2.\nFollow the Getting Started section to create an Events API v2 integration on your PagerDuty service and copy the integration key.\nThen configure as follows:\npagerduty:default:is-default:truetext-template:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;integration-keys:- 5c6d805c9dcf4e03d09dfa81e8789ba1You can also configure multiple integration keys.\nDiscord Follow the Discord Webhooks guide and create a new webhook.\nThen configure as follows:\ndiscord:default:is-default:truetext-template:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;webhooks:- url:https://discordapp.com/api/webhooks/1008166889777414645/8e0Am4Zb-YGbBqqbiiq0jSHPTEEaHa4j1vIC-zSSm231T8ewGxgY0_XUYpY-k1nN4HBlusername:robotUpdate the settings dynamically Since 6.5.0, the alerting settings can be updated dynamically at runtime by Dynamic Configuration, which will override the settings in alarm-settings.yml.\nIn order to determine whether an alerting rule is triggered or not, SkyWalking needs to cache the metrics of a time window for each alerting rule. If any attribute (expression, period, etc.) of a rule is changed, the sliding window will be destroyed and re-created, causing the Alarm of this specific rule to restart again.\nKeys with data types of alerting rule configuration file    Alerting element Configuration property key Type Description     Expression expression string MQE expression   Include names include-names string array    Exclude names exclude-names string array    Include names regex include-names-regex string Java regex Pattern   Exclude names regex exclude-names-regex string Java regex Pattern   Tags tags key-value pair    Period Period int    Silence period silence-period int    Message message string    Hooks hooks string array     ","excerpt":"Alerting Alerting mechanism measures system performance according to the metrics of …","ref":"/docs/main/latest/en/setup/backend/backend-alarm/","title":"Alerting"},{"body":"Alerting Alerting mechanism measures system performance according to the metrics of services/instances/endpoints from different layers. Alerting kernel is an in-memory, time-window based queue.\nThe alerting core is driven by a collection of rules defined in config/alarm-settings.yml. There are three parts to alerting rule definitions.\n alerting rules. They define how metrics alerting should be triggered and what conditions should be considered. hooks. The list of hooks, which should be called after an alerting is triggered.  Entity name Defines the relation between scope and entity name.\n Service: Service name Instance: {Instance name} of {Service name} Endpoint: {Endpoint name} in {Service name} Service Relation: {Source service name} to {Dest service name} Instance Relation: {Source instance name} of {Source service name} to {Dest instance name} of {Dest service name} Endpoint Relation: {Source endpoint name} in {Source Service name} to {Dest endpoint name} in {Dest service name}  Rules An alerting rule is made up of the following elements:\n Rule name. A unique name shown in the alarm message. It must end with _rule. Expression. A MQE expression that defines the conditions of the rule. The result type must be SINGLE_VALUE and the root operation of the expression must be a Compare Operation which provides 1(true) or 0(false) result. When the result is 1(true), the alarm will be triggered. For example, avg(service_resp_time / 1000) \u0026gt; 1 is a valid expression to indicate the request latency is slower than 1s. The typical illegal expressions are  avg(service_resp_time \u0026gt; 1000) + 1 expression root doesn\u0026rsquo;t use Compare Operation service_resp_time \u0026gt; 1000 expression return a TIME_SERIES_VALUES type of values rather than a SINGLE_VALUE value.    The metrics names in the expression could be found in the list of all potential metrics name doc.\n Include names. Entity names that are included in this rule. Please follow the entity name definitions. Exclude names. Entity names that are excluded from this rule. Please follow the entity name definitions. Include names regex. A regex that includes entity names. If both include-name list and include-name regex are set, both rules will take effect. Exclude names regex. A regex that excludes entity names. Both rules will take effect if both include-label list and include-label regex are set. Tags. Tags are key/value pairs that are attached to alarms. Tags are used to specify distinguishing attributes of alarms that are meaningful and relevant to users. If you want to make these tags searchable on the SkyWalking UI, you may set the tag keys in core/default/searchableAlarmTags or through the system environment variable SW_SEARCHABLE_ALARM_TAG_KEYS. The key level is supported by default. Period. The size of metrics cache in minutes for checking the alarm conditions. This is a time window that corresponds to the backend deployment env time. Hooks. Binding the specific names of the hooks when the alarm is triggered. The name format is {hookType}.{hookName} (slack.custom1 e.g.) and must be defined in the hooks section of the alarm-settings.yml file. If the hook name is not specified, the global hook will be used. Silence period. After the alarm is triggered at Time-N (TN), there will be silence during the TN -\u0026gt; TN + period. By default, it works in the same manner as period. The same Alarm (having the same ID in the same metrics name) may only be triggered once within a period.  Such as for a metric, there is a shifting window as following at T7.\n   T1 T2 T3 T4 T5 T6 T7     Value1 Value2 Value3 Value4 Value5 Value6 Value7     Period(Time point T1 ~ T7) are continuous data points for minutes. Notice, alerts are not supported above minute-by-minute periods as they would not be efficient. Values(Value1 ~ Value7) are the values or labeled values for every time point. Expression is calculated based on the metric values(Value1 ~ Value7). For example, expression avg(service_resp_time) \u0026gt; 1000, if the value are 1001, 1001, 1001, 1001, 1001, 1001, 1001, the calculation is ((1001 + 10001 + ... + 1001) / 7) \u0026gt; 1000 and the result would be 1(true). Then the alarm would be triggered. In every minute, the window would shift automatically. At T8, Value8 would be cached, and T1/Value1 would be removed from the window.  NOTE:\n If the expression include labeled metrics and result has multiple labeled value(e.g. sum(service_percentile{p='50,75'} \u0026gt; 1000) \u0026gt;= 3), the alarm will be triggered if any of the labeled value result matches 3 times of the condition(P50 \u0026gt; 1000 or P75 \u0026gt; 1000). One alarm rule is targeting the same entity level, such as service-level expression (avg(service_resp_time) \u0026gt; 1000). Set entity names(Include/Exclude names\u0026hellip;) according to metrics entity levels, do not include different entity levels metrics in the same expression, such as service metrics and endpoint metrics.  rules:# Rule unique name, must be ended with `_rule`.endpoint_percent_rule:# A MQE expression and the root operation of the expression must be a Compare Operation.expression:sum((endpoint_sla / 100) \u0026lt; 75) \u0026gt;= 3# The length of time to evaluate the metricsperiod:10# How many times of checks, the alarm keeps silence after alarm triggered, default as same as period.silence-period:10message:Successful rate of endpoint {name} is lower than 75%tags:level:WARNINGservice_percent_rule:expression:sum((service_sla / 100) \u0026lt; 85) \u0026gt;= 4# [Optional] Default, match all services in this metricsinclude-names:- service_a- service_bexclude-names:- service_cperiod:10message:Service {name} successful rate is less than 85%service_resp_time_percentile_rule:expression:sum(service_percentile{p=\u0026#39;50,75,90,95,99\u0026#39;} \u0026gt; 1000) \u0026gt;= 3period:10silence-period:5message:Percentile response time of service {name} alarm in 3 minutes of last 10 minutes, due to more than one condition of p50 \u0026gt; 1000, p75 \u0026gt; 1000, p90 \u0026gt; 1000, p95 \u0026gt; 1000, p99 \u0026gt; 1000meter_service_status_code_rule:expression:sum(aggregate_labels(meter_status_code{_=\u0026#39;4xx,5xx\u0026#39;},sum) \u0026gt; 10) \u0026gt; 3period:10count:3silence-period:5message:The request number of entity {name} 4xx and 5xx status is more than expected.hooks:- \u0026#34;slack.custom1\u0026#34;- \u0026#34;pagerduty.custom1\u0026#34;comp_rule:expression:(avg(service_sla / 100) \u0026gt; 80) * (avg(service_percentile{_=\u0026#39;0\u0026#39;}) \u0026gt; 1000) == 1period:10message:Service {name} avg successful rate is less than 80% and P50 of avg response time is over 1000ms in last 10 minutes.tags:level:CRITICALhooks:- \u0026#34;slack.default\u0026#34;- \u0026#34;slack.custom1\u0026#34;- \u0026#34;pagerduty.custom1\u0026#34;Default alarm rules For convenience\u0026rsquo;s sake, we have provided a default alarm-setting.yml in our release. It includes the following rules:\n Service average response time over 1s in the last 3 minutes. Service success rate lower than 80% in the last 2 minutes. Percentile of service response time over 1s in the last 3 minutes Service Instance average response time over 1s in the last 2 minutes, and the instance name matches the regex. Endpoint average response time over 1s in the last 2 minutes. Database access average response time over 1s in the last 2 minutes. Endpoint relation average response time over 1s in the last 2 minutes.  List of all potential metrics name The metrics names are defined in the official OAL scripts and MAL scripts.\nCurrently, metrics from the Service, Service Instance, Endpoint, Service Relation, Service Instance Relation, Endpoint Relation scopes could be used in Alarm, and the Database access scope is the same as Service.\nSubmit an issue or a pull request if you want to support any other scopes in Alarm.\nHooks Hooks are a way to send alarm messages to the outside world. SkyWalking supports multiple hooks of the same type, each hook can support different configurations. For example, you can configure two Slack hooks, one named default and set is-default: true means this hook will apply on all Alarm Rules without config hooks. Another named custom1 will only apply on the Alarm Rules which with config hooks and include the name slack.custom1.\nhooks:slack:# default here is just a name, set the field \u0026#39;is-default: true\u0026#39; if this notification hook is expected to be default globally.default:# If true, this hook will apply on all rules, unless a rule has its own specific hook. Could have more than one default hooks in the same hook type.is-default:truetext-template:|-{ \u0026#34;type\u0026#34;: \u0026#34;section\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;mrkdwn\u0026#34;, \u0026#34;text\u0026#34;: \u0026#34;:alarm_clock: *Apache Skywalking Alarm* \\n **%s**.\u0026#34; } }webhooks:- https://hooks.slack.com/services/x/y/zsssscustom1:text-template:|-{ \u0026#34;type\u0026#34;: \u0026#34;section\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;mrkdwn\u0026#34;, \u0026#34;text\u0026#34;: \u0026#34;:alarm_clock: *Apache Skywalking Alarm* \\n **%s**.\u0026#34; } }webhooks:- https://hooks.slack.com/services/x/y/custom1Currently, SkyWalking supports the following hook types:\nWebhook The Webhook requires the peer to be a web container. The alarm message will be sent through HTTP post by application/json content type after you have set up Webhook hooks as follows:\nwebhook:default:is-default:trueurls:- http://ip:port/xxx- http://ip:port/yyyThe JSON format is based on List\u0026lt;org.apache.skywalking.oap.server.core.alarm.AlarmMessage\u0026gt; with the following key information:\n scopeId, scope. All scopes are defined in org.apache.skywalking.oap.server.core.source.DefaultScopeDefine. name. Target scope entity name. Please follow the entity name definitions. id0. The ID of the scope entity that matches with the name. When using the relation scope, it is the source entity ID. id1. When using the relation scope, it is the destination entity ID. Otherwise, it is empty. ruleName. The rule name configured in alarm-settings.yml. alarmMessage. The alarm text message. startTime. The alarm time measured in milliseconds, which occurs between the current time and the midnight of January 1, 1970 UTC. tags. The tags configured in alarm-settings.yml.  See the following example:\n[{ \u0026#34;scopeId\u0026#34;: 1, \u0026#34;scope\u0026#34;: \u0026#34;SERVICE\u0026#34;, \u0026#34;name\u0026#34;: \u0026#34;serviceA\u0026#34;, \u0026#34;id0\u0026#34;: \u0026#34;12\u0026#34;, \u0026#34;id1\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;ruleName\u0026#34;: \u0026#34;service_resp_time_rule\u0026#34;, \u0026#34;alarmMessage\u0026#34;: \u0026#34;alarmMessage xxxx\u0026#34;, \u0026#34;startTime\u0026#34;: 1560524171000, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;WARNING\u0026#34; }] }, { \u0026#34;scopeId\u0026#34;: 1, \u0026#34;scope\u0026#34;: \u0026#34;SERVICE\u0026#34;, \u0026#34;name\u0026#34;: \u0026#34;serviceB\u0026#34;, \u0026#34;id0\u0026#34;: \u0026#34;23\u0026#34;, \u0026#34;id1\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;ruleName\u0026#34;: \u0026#34;service_resp_time_rule\u0026#34;, \u0026#34;alarmMessage\u0026#34;: \u0026#34;alarmMessage yyy\u0026#34;, \u0026#34;startTime\u0026#34;: 1560524171000, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;CRITICAL\u0026#34; }] }] gRPC The alarm message will be sent through remote gRPC method by Protobuf content type after you have set up gRPC hooks as follows:\ngRPC:default:is-default:truetarget-host:iptarget-port:portThe message contains key information which are defined in oap-server/server-alarm-plugin/src/main/proto/alarm-hook.proto.\nPart of the protocol looks like this:\nmessage AlarmMessage { int64 scopeId = 1; string scope = 2; string name = 3; string id0 = 4; string id1 = 5; string ruleName = 6; string alarmMessage = 7; int64 startTime = 8; AlarmTags tags = 9;}message AlarmTags { // String key, String value pair.  repeated KeyStringValuePair data = 1;}message KeyStringValuePair { string key = 1; string value = 2;}Slack Chat Follow the Getting Started with Incoming Webhooks guide and create new Webhooks.\nThe alarm message will be sent through HTTP post by application/json content type if you have configured Slack Incoming Webhooks as follows:\nslack:default:is-default:truetext-template:|-{ \u0026#34;type\u0026#34;: \u0026#34;section\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;mrkdwn\u0026#34;, \u0026#34;text\u0026#34;: \u0026#34;:alarm_clock: *Apache Skywalking Alarm* \\n **%s**.\u0026#34; } }webhooks:- https://hooks.slack.com/services/x/y/zWeChat Note that only the WeChat Company Edition (WeCom) supports WebHooks. To use the WeChat WebHook, follow the Wechat Webhooks guide. The alarm message will be sent through HTTP post by application/json content type after you have set up Wechat Webhooks as follows:\nwechat:default:is-default:truetext-template:|-{ \u0026#34;msgtype\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;content\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; } }webhooks:- https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=dummy_keyDingTalk Follow the Dingtalk Webhooks guide and create new Webhooks. You can configure an optional secret for an individual webhook URL for security purposes. The alarm message will be sent through HTTP post by application/json content type if you have configured DingTalk Webhooks as follows:\ndingtalk:default:is-default:truetext-template:|-{ \u0026#34;msgtype\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;content\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; } }webhooks:- url:https://oapi.dingtalk.com/robot/send?access_token=dummy_tokensecret:dummysecretFeishu Follow the Feishu Webhooks guide and create new Webhooks. You can configure an optional secret for an individual webhook URL for security purposes. If you want to direct a text to a user, you can configure ats, which is Feishu\u0026rsquo;s user_id and separated by \u0026ldquo;,\u0026rdquo; . The alarm message will be sent through HTTP post by application/json content type if you have configured Feishu Webhooks as follows:\nfeishu:default:is-default:truetext-template:|-{ \u0026#34;msg_type\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;content\u0026#34;: { \u0026#34;text\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; }, \u0026#34;ats\u0026#34;:\u0026#34;feishu_user_id_1,feishu_user_id_2\u0026#34; }webhooks:- url:https://open.feishu.cn/open-apis/bot/v2/hook/dummy_tokensecret:dummysecretWeLink Follow the WeLink Webhooks guide and create new Webhooks. The alarm message will be sent through HTTP post by application/json content type if you have configured WeLink Webhooks as follows:\nwelink:default:is-default:truetext-template:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;webhooks:# you may find your own client_id and client_secret in your app, below are dummy, need to change.- client-id:\u0026#34;dummy_client_id\u0026#34;client-secret:dummy_secret_keyaccess-token-url:https://open.welink.huaweicloud.com/api/auth/v2/ticketsmessage-url:https://open.welink.huaweicloud.com/api/welinkim/v1/im-service/chat/group-chat# if you send to multi group at a time, separate group_ids with commas, e.g. \u0026#34;123xx\u0026#34;,\u0026#34;456xx\u0026#34;group-ids:\u0026#34;dummy_group_id\u0026#34;# make a name you like for the robot, it will display in grouprobot-name:robotPagerDuty The PagerDuty hook is based on Events API v2.\nFollow the Getting Started section to create an Events API v2 integration on your PagerDuty service and copy the integration key.\nThen configure as follows:\npagerduty:default:is-default:truetext-template:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;integration-keys:- 5c6d805c9dcf4e03d09dfa81e8789ba1You can also configure multiple integration keys.\nDiscord Follow the Discord Webhooks guide and create a new webhook.\nThen configure as follows:\ndiscord:default:is-default:truetext-template:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;webhooks:- url:https://discordapp.com/api/webhooks/1008166889777414645/8e0Am4Zb-YGbBqqbiiq0jSHPTEEaHa4j1vIC-zSSm231T8ewGxgY0_XUYpY-k1nN4HBlusername:robotUpdate the settings dynamically Since 6.5.0, the alerting settings can be updated dynamically at runtime by Dynamic Configuration, which will override the settings in alarm-settings.yml.\nIn order to determine whether an alerting rule is triggered or not, SkyWalking needs to cache the metrics of a time window for each alerting rule. If any attribute (expression, period, etc.) of a rule is changed, the sliding window will be destroyed and re-created, causing the Alarm of this specific rule to restart again.\nKeys with data types of alerting rule configuration file    Alerting element Configuration property key Type Description     Expression expression string MQE expression   Include names include-names string array    Exclude names exclude-names string array    Include names regex include-names-regex string Java regex Pattern   Exclude names regex exclude-names-regex string Java regex Pattern   Tags tags key-value pair    Period Period int    Silence period silence-period int    Message message string    Hooks hooks string array     ","excerpt":"Alerting Alerting mechanism measures system performance according to the metrics of …","ref":"/docs/main/next/en/setup/backend/backend-alarm/","title":"Alerting"},{"body":"Alerting Alerting mechanism measures system performance according to the metrics of services/instances/endpoints from different layers. Alerting kernel is an in-memory, time-window based queue.\nThe alerting core is driven by a collection of rules defined in config/alarm-settings.yml. There are three parts to alerting rule definitions.\n alerting rules. They define how metrics alerting should be triggered and what conditions should be considered. Webhooks. The list of web service endpoints, which should be called after an alerting is triggered. gRPCHook. The host and port of the remote gRPC method, which should be called after an alerting is triggered.  Entity name Defines the relation between scope and entity name.\n Service: Service name Instance: {Instance name} of {Service name} Endpoint: {Endpoint name} in {Service name} Database: Database service name Service Relation: {Source service name} to {Dest service name} Instance Relation: {Source instance name} of {Source service name} to {Dest instance name} of {Dest service name} Endpoint Relation: {Source endpoint name} in {Source Service name} to {Dest endpoint name} in {Dest service name}  Rules There are two types of rules: individual rules and composite rules. A composite rule is a combination of individual rules.\nIndividual rules An alerting rule is made up of the following elements:\n Rule name. A unique name shown in the alarm message. It must end with _rule. Metrics name. This is also the metrics name in the OAL script. Only long, double, int types are supported. See the list of all potential metrics name. Events can also be configured as the source of Alarm. Please refer to the event doc for more details. Include names. Entity names that are included in this rule. Please follow the entity name definitions. Exclude names. Entity names that are excluded from this rule. Please follow the entity name definitions. Include names regex. A regex that includes entity names. If both include-name list and include-name regex are set, both rules will take effect. Exclude names regex. A regex that excludes entity names. Both rules will take effect if both include-label list and include-label regex are set. Include labels. Metric labels that are included in this rule. Exclude labels. Metric labels that are excluded from this rule. Include labels regex. A regex that includes labels. If both include-label list and include-label regex are set, both rules will take effect. Exclude labels regex. A regex that excludes labels. Both rules will take effect if both exclude-label list and exclude-label regex are set. Tags. Tags are key/value pairs that are attached to alarms. Tags are used to specify distinguishing attributes of alarms that are meaningful and relevant to users. If you want to make these tags searchable on the SkyWalking UI, you may set the tag keys in core/default/searchableAlarmTags or through the system environment variable SW_SEARCHABLE_ALARM_TAG_KEYS. The key level is supported by default.  Label settings are required by the meter system. They are used to store metrics from the label-system platform, such as Prometheus, Micrometer, etc. The four label settings mentioned above must implement LabeledValueHolder.\n Threshold. The target value. For multiple-value metrics, such as percentile, the threshold is an array. It is described as: value1, value2, value3, value4, value5. Each value may serve as the threshold for each value of the metrics. Set the value to - if you do not wish to trigger the Alarm by one or more of the values.\nFor example, in percentile, value1 is the threshold of P50, and -, -, value3, value4, value5 means that there is no threshold for P50 and P75 in the percentile alarm rule. OP. The operator. It supports \u0026gt;, \u0026gt;=, \u0026lt;, \u0026lt;=, ==. We welcome contributions of all OPs. Period. The size of metrics cache in minutes for checking the alarm conditions. This is a time window that corresponds to the backend deployment env time. Count. Within a period window, if the number of times which value goes over the threshold (based on OP) reaches count, then an alarm will be sent. Only as condition. Indicates if the rule can send notifications or if it simply serves as a condition of the composite rule. Silence period. After the alarm is triggered at Time-N (TN), there will be silence during the TN -\u0026gt; TN + period. By default, it works in the same manner as period. The same Alarm (having the same ID in the same metrics name) may only be triggered once within a period.  Such as for a metric, there is a shifting window as following at T7.\n   T1 T2 T3 T4 T5 T6 T7     Value1 Value2 Value3 Value4 Value5 Value6 Value7     Period(Time point T1 ~ T7) are continuous data points for minutes. Notice, alerts are not supported above minute-by-minute periods as they would not be efficient. Values(Value1 ~ Value7) are the values or labeled values for every time point. Count\u0026rsquo;s value(N) represents there are N values in the window matched the operator and threshold. In every minute, the window would shift automatically. At T8, Value8 would be cached, and T1/Value1 would be removed from the window.  Composite rules NOTE: Composite rules are only applicable to alerting rules targeting the same entity level, such as service-level alarm rules (service_percent_rule \u0026amp;\u0026amp; service_resp_time_percentile_rule). Do not compose alarm rules of different entity levels, such as an alarm rule of the service metrics with another rule of the endpoint metrics.\nA composite rule is made up of the following elements:\n Rule name. A unique name shown in the alarm message. Must end with _rule. Expression. Specifies how to compose rules, and supports \u0026amp;\u0026amp;, ||, and (). Message. The notification message to be sent out when the rule is triggered. Tags. Tags are key/value pairs that are attached to alarms. Tags are used to specify distinguishing attributes of alarms that are meaningful and relevant to users.  rules:# Rule unique name, must be ended with `_rule`.endpoint_percent_rule:# Metrics value need to be long, double or intmetrics-name:endpoint_percentthreshold:75op:\u0026lt;# The length of time to evaluate the metricsperiod:10# How many times after the metrics match the condition, will trigger alarmcount:3# How many times of checks, the alarm keeps silence after alarm triggered, default as same as period.silence-period:10# Specify if the rule can send notification or just as an condition of composite ruleonly-as-condition:falsetags:level:WARNINGservice_percent_rule:metrics-name:service_percent# [Optional] Default, match all services in this metricsinclude-names:- service_a- service_bexclude-names:- service_c# Single value metrics threshold.threshold:85op:\u0026lt;period:10count:4only-as-condition:falseservice_resp_time_percentile_rule:# Metrics value need to be long, double or intmetrics-name:service_percentileop:\u0026#34;\u0026gt;\u0026#34;# Multiple value metrics threshold. Thresholds for P50, P75, P90, P95, P99.threshold:1000,1000,1000,1000,1000period:10count:3silence-period:5message:Percentile response time of service {name} alarm in 3 minutes of last 10 minutes, due to more than one condition of p50 \u0026gt; 1000, p75 \u0026gt; 1000, p90 \u0026gt; 1000, p95 \u0026gt; 1000, p99 \u0026gt; 1000only-as-condition:falsemeter_service_status_code_rule:metrics-name:meter_status_codeexclude-labels:- \u0026#34;200\u0026#34;op:\u0026#34;\u0026gt;\u0026#34;threshold:10period:10count:3silence-period:5message:The request number of entity {name} non-200 status is more than expected.only-as-condition:falsecomposite-rules:comp_rule:# Must satisfied percent rule and resp time rule expression:service_percent_rule \u0026amp;\u0026amp; service_resp_time_percentile_rulemessage:Service {name} successful rate is less than 80% and P50 of response time is over 1000mstags:level:CRITICALDefault alarm rules For convenience\u0026rsquo;s sake, we have provided a default alarm-setting.yml in our release. It includes the following rules:\n Service average response time over 1s in the last 3 minutes. Service success rate lower than 80% in the last 2 minutes. Percentile of service response time over 1s in the last 3 minutes Service Instance average response time over 1s in the last 2 minutes, and the instance name matches the regex. Endpoint average response time over 1s in the last 2 minutes. Database access average response time over 1s in the last 2 minutes. Endpoint relation average response time over 1s in the last 2 minutes.  List of all potential metrics name The metrics names are defined in the official OAL scripts and MAL scripts, the Event names can also serve as the metrics names, all possible event names can be also found in the Event doc.\nCurrently, metrics from the Service, Service Instance, Endpoint, Service Relation, Service Instance Relation, Endpoint Relation scopes could be used in Alarm, and the Database access scope is the same as Service.\nSubmit an issue or a pull request if you want to support any other scopes in Alarm.\nWebhook The Webhook requires the peer to be a web container. The alarm message will be sent through HTTP post by application/json content type. The JSON format is based on List\u0026lt;org.apache.skywalking.oap.server.core.alarm.AlarmMessage\u0026gt; with the following key information:\n scopeId, scope. All scopes are defined in org.apache.skywalking.oap.server.core.source.DefaultScopeDefine. name. Target scope entity name. Please follow the entity name definitions. id0. The ID of the scope entity that matches with the name. When using the relation scope, it is the source entity ID. id1. When using the relation scope, it is the destination entity ID. Otherwise, it is empty. ruleName. The rule name configured in alarm-settings.yml. alarmMessage. The alarm text message. startTime. The alarm time measured in milliseconds, which occurs between the current time and the midnight of January 1, 1970 UTC. tags. The tags configured in alarm-settings.yml.  See the following example:\n[{ \u0026#34;scopeId\u0026#34;: 1, \u0026#34;scope\u0026#34;: \u0026#34;SERVICE\u0026#34;, \u0026#34;name\u0026#34;: \u0026#34;serviceA\u0026#34;, \u0026#34;id0\u0026#34;: \u0026#34;12\u0026#34;, \u0026#34;id1\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;ruleName\u0026#34;: \u0026#34;service_resp_time_rule\u0026#34;, \u0026#34;alarmMessage\u0026#34;: \u0026#34;alarmMessage xxxx\u0026#34;, \u0026#34;startTime\u0026#34;: 1560524171000, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;WARNING\u0026#34; }] }, { \u0026#34;scopeId\u0026#34;: 1, \u0026#34;scope\u0026#34;: \u0026#34;SERVICE\u0026#34;, \u0026#34;name\u0026#34;: \u0026#34;serviceB\u0026#34;, \u0026#34;id0\u0026#34;: \u0026#34;23\u0026#34;, \u0026#34;id1\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;ruleName\u0026#34;: \u0026#34;service_resp_time_rule\u0026#34;, \u0026#34;alarmMessage\u0026#34;: \u0026#34;alarmMessage yyy\u0026#34;, \u0026#34;startTime\u0026#34;: 1560524171000, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;CRITICAL\u0026#34; }] }] gRPCHook The alarm message will be sent through remote gRPC method by Protobuf content type. The message contains key information which are defined in oap-server/server-alarm-plugin/src/main/proto/alarm-hook.proto.\nPart of the protocol looks like this:\nmessage AlarmMessage { int64 scopeId = 1; string scope = 2; string name = 3; string id0 = 4; string id1 = 5; string ruleName = 6; string alarmMessage = 7; int64 startTime = 8; AlarmTags tags = 9;}message AlarmTags { // String key, String value pair.  repeated KeyStringValuePair data = 1;}message KeyStringValuePair { string key = 1; string value = 2;}Slack Chat Hook Follow the Getting Started with Incoming Webhooks guide and create new Webhooks.\nThe alarm message will be sent through HTTP post by application/json content type if you have configured Slack Incoming Webhooks as follows:\nslackHooks:textTemplate:|-{ \u0026#34;type\u0026#34;: \u0026#34;section\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;mrkdwn\u0026#34;, \u0026#34;text\u0026#34;: \u0026#34;:alarm_clock: *Apache Skywalking Alarm* \\n **%s**.\u0026#34; } }webhooks:- https://hooks.slack.com/services/x/y/zWeChat Hook Note that only the WeChat Company Edition (WeCom) supports WebHooks. To use the WeChat WebHook, follow the Wechat Webhooks guide. The alarm message will be sent through HTTP post by application/json content type after you have set up Wechat Webhooks as follows:\nwechatHooks:textTemplate:|-{ \u0026#34;msgtype\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;content\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; } }webhooks:- https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=dummy_keyDingTalk Hook Follow the Dingtalk Webhooks guide and create new Webhooks. You can configure an optional secret for an individual webhook URL for security purposes. The alarm message will be sent through HTTP post by application/json content type if you have configured DingTalk Webhooks as follows:\ndingtalkHooks:textTemplate:|-{ \u0026#34;msgtype\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;content\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; } }webhooks:- url:https://oapi.dingtalk.com/robot/send?access_token=dummy_tokensecret:dummysecretFeishu Hook Follow the Feishu Webhooks guide and create new Webhooks. You can configure an optional secret for an individual webhook URL for security purposes. If you want to direct a text to a user, you can configure ats, which is Feishu\u0026rsquo;s user_id and separated by \u0026ldquo;,\u0026rdquo; . The alarm message will be sent through HTTP post by application/json content type if you have configured Feishu Webhooks as follows:\nfeishuHooks:textTemplate:|-{ \u0026#34;msg_type\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;content\u0026#34;: { \u0026#34;text\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; }, \u0026#34;ats\u0026#34;:\u0026#34;feishu_user_id_1,feishu_user_id_2\u0026#34; }webhooks:- url:https://open.feishu.cn/open-apis/bot/v2/hook/dummy_tokensecret:dummysecretWeLink Hook Follow the WeLink Webhooks guide and create new Webhooks. The alarm message will be sent through HTTP post by application/json content type if you have configured WeLink Webhooks as follows:\nwelinkHooks:textTemplate:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;webhooks:# you may find your own client_id and client_secret in your app, below are dummy, need to change.- client_id:\u0026#34;dummy_client_id\u0026#34;client_secret:dummy_secret_keyaccess_token_url:https://open.welink.huaweicloud.com/api/auth/v2/ticketsmessage_url:https://open.welink.huaweicloud.com/api/welinkim/v1/im-service/chat/group-chat# if you send to multi group at a time, separate group_ids with commas, e.g. \u0026#34;123xx\u0026#34;,\u0026#34;456xx\u0026#34;group_ids:\u0026#34;dummy_group_id\u0026#34;# make a name you like for the robot, it will display in grouprobot_name:robotPagerDuty Hook The PagerDuty hook is based on Events API v2.\nFollow the Getting Started section to create an Events API v2 integration on your PagerDuty service and copy the integration key.\nThen configure as follows:\npagerDutyHooks:textTemplate:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;integrationKeys:- 5c6d805c9dcf4e03d09dfa81e8789ba1You can also configure multiple integration keys.\nDiscord Hook Follow the Discord Webhooks guide and create a new webhook.\nThen configure as follows:\ndiscordHooks:textTemplate:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;webhooks:- url:https://discordapp.com/api/webhooks/1008166889777414645/8e0Am4Zb-YGbBqqbiiq0jSHPTEEaHa4j1vIC-zSSm231T8ewGxgY0_XUYpY-k1nN4HBlusername:robotUpdate the settings dynamically Since 6.5.0, the alerting settings can be updated dynamically at runtime by Dynamic Configuration, which will override the settings in alarm-settings.yml.\nIn order to determine whether an alerting rule is triggered or not, SkyWalking needs to cache the metrics of a time window for each alerting rule. If any attribute (metrics-name, op, threshold, period, count, etc.) of a rule is changed, the sliding window will be destroyed and re-created, causing the Alarm of this specific rule to restart again.\nKeys with data types of alerting rule configuration file    Alerting element Configuration property key Type Description     Include names include-names string array    Exclude names exclude-names string array    Include names regex include-names-regex string Java regex Pattern   Exclude names regex exclude-names-regex string Java regex Pattern   Include labels include-labels string array    Exclude labels exclude-labels string array    Include labels regex include-labels-regex string Java regex Pattern   Exclude labels regex exclude-labels-regex string Java regex Pattern   Tags tags key-value pair    Threshold threshold number    OP op operator example: \u0026gt;, \u0026gt;=   Period Period int    Count count int    Only as condition only-as-condition boolean    Silence period silence-period int    Message message string     ","excerpt":"Alerting Alerting mechanism measures system performance according to the metrics of …","ref":"/docs/main/v9.2.0/en/setup/backend/backend-alarm/","title":"Alerting"},{"body":"Alerting Alerting mechanism measures system performance according to the metrics of services/instances/endpoints from different layers. Alerting kernel is an in-memory, time-window based queue.\nThe alerting core is driven by a collection of rules defined in config/alarm-settings.yml. There are three parts to alerting rule definitions.\n alerting rules. They define how metrics alerting should be triggered and what conditions should be considered. Webhooks. The list of web service endpoints, which should be called after an alerting is triggered. gRPCHook. The host and port of the remote gRPC method, which should be called after an alerting is triggered.  Entity name Defines the relation between scope and entity name.\n Service: Service name Instance: {Instance name} of {Service name} Endpoint: {Endpoint name} in {Service name} Database: Database service name Service Relation: {Source service name} to {Dest service name} Instance Relation: {Source instance name} of {Source service name} to {Dest instance name} of {Dest service name} Endpoint Relation: {Source endpoint name} in {Source Service name} to {Dest endpoint name} in {Dest service name}  Rules There are two types of rules: individual rules and composite rules. A composite rule is a combination of individual rules.\nIndividual rules An alerting rule is made up of the following elements:\n Rule name. A unique name shown in the alarm message. It must end with _rule. Metrics name. This is also the metrics name in the OAL script. Only long, double, int types are supported. See the list of all potential metrics name. Events can also be configured as the source of Alarm. Please refer to the event doc for more details. Include names. Entity names that are included in this rule. Please follow the entity name definitions. Exclude names. Entity names that are excluded from this rule. Please follow the entity name definitions. Include names regex. A regex that includes entity names. If both include-name list and include-name regex are set, both rules will take effect. Exclude names regex. A regex that excludes entity names. Both rules will take effect if both include-label list and include-label regex are set. Include labels. Metric labels that are included in this rule. Exclude labels. Metric labels that are excluded from this rule. Include labels regex. A regex that includes labels. If both include-label list and include-label regex are set, both rules will take effect. Exclude labels regex. A regex that excludes labels. Both rules will take effect if both exclude-label list and exclude-label regex are set. Tags. Tags are key/value pairs that are attached to alarms. Tags are used to specify distinguishing attributes of alarms that are meaningful and relevant to users. If you want to make these tags searchable on the SkyWalking UI, you may set the tag keys in core/default/searchableAlarmTags or through the system environment variable SW_SEARCHABLE_ALARM_TAG_KEYS. The key level is supported by default.  Label settings are required by the meter system. They are used to store metrics from the label-system platform, such as Prometheus, Micrometer, etc. The four label settings mentioned above must implement LabeledValueHolder.\n Threshold. The target value. For multiple-value metrics, such as percentile, the threshold is an array. It is described as: value1, value2, value3, value4, value5. Each value may serve as the threshold for each value of the metrics. Set the value to - if you do not wish to trigger the Alarm by one or more of the values.\nFor example, in percentile, value1 is the threshold of P50, and -, -, value3, value4, value5 means that there is no threshold for P50 and P75 in the percentile alarm rule. OP. The operator. It supports \u0026gt;, \u0026gt;=, \u0026lt;, \u0026lt;=, ==. We welcome contributions of all OPs. Period. The size of metrics cache in minutes for checking the alarm conditions. This is a time window that corresponds to the backend deployment env time. Count. Within a period window, if the number of times which value goes over the threshold (based on OP) reaches count, then an alarm will be sent. Only as condition. Indicates if the rule can send notifications or if it simply serves as a condition of the composite rule. Silence period. After the alarm is triggered at Time-N (TN), there will be silence during the TN -\u0026gt; TN + period. By default, it works in the same manner as period. The same Alarm (having the same ID in the same metrics name) may only be triggered once within a period.  Such as for a metric, there is a shifting window as following at T7.\n   T1 T2 T3 T4 T5 T6 T7     Value1 Value2 Value3 Value4 Value5 Value6 Value7     Period(Time point T1 ~ T7) are continuous data points for minutes. Notice, alerts are not supported above minute-by-minute periods as they would not be efficient. Values(Value1 ~ Value7) are the values or labeled values for every time point. Count\u0026rsquo;s value(N) represents there are N values in the window matched the operator and threshold. In every minute, the window would shift automatically. At T8, Value8 would be cached, and T1/Value1 would be removed from the window.  Composite rules NOTE: Composite rules are only applicable to alerting rules targeting the same entity level, such as service-level alarm rules (service_percent_rule \u0026amp;\u0026amp; service_resp_time_percentile_rule). Do not compose alarm rules of different entity levels, such as an alarm rule of the service metrics with another rule of the endpoint metrics.\nA composite rule is made up of the following elements:\n Rule name. A unique name shown in the alarm message. Must end with _rule. Expression. Specifies how to compose rules, and supports \u0026amp;\u0026amp;, ||, and (). Message. The notification message to be sent out when the rule is triggered. Tags. Tags are key/value pairs that are attached to alarms. Tags are used to specify distinguishing attributes of alarms that are meaningful and relevant to users.  rules:# Rule unique name, must be ended with `_rule`.endpoint_percent_rule:# Metrics value need to be long, double or intmetrics-name:endpoint_percentthreshold:75op:\u0026lt;# The length of time to evaluate the metricsperiod:10# How many times after the metrics match the condition, will trigger alarmcount:3# How many times of checks, the alarm keeps silence after alarm triggered, default as same as period.silence-period:10# Specify if the rule can send notification or just as an condition of composite ruleonly-as-condition:falsetags:level:WARNINGservice_percent_rule:metrics-name:service_percent# [Optional] Default, match all services in this metricsinclude-names:- service_a- service_bexclude-names:- service_c# Single value metrics threshold.threshold:85op:\u0026lt;period:10count:4only-as-condition:falseservice_resp_time_percentile_rule:# Metrics value need to be long, double or intmetrics-name:service_percentileop:\u0026#34;\u0026gt;\u0026#34;# Multiple value metrics threshold. Thresholds for P50, P75, P90, P95, P99.threshold:1000,1000,1000,1000,1000period:10count:3silence-period:5message:Percentile response time of service {name} alarm in 3 minutes of last 10 minutes, due to more than one condition of p50 \u0026gt; 1000, p75 \u0026gt; 1000, p90 \u0026gt; 1000, p95 \u0026gt; 1000, p99 \u0026gt; 1000only-as-condition:falsemeter_service_status_code_rule:metrics-name:meter_status_codeexclude-labels:- \u0026#34;200\u0026#34;op:\u0026#34;\u0026gt;\u0026#34;threshold:10period:10count:3silence-period:5message:The request number of entity {name} non-200 status is more than expected.only-as-condition:falsecomposite-rules:comp_rule:# Must satisfied percent rule and resp time rule expression:service_percent_rule \u0026amp;\u0026amp; service_resp_time_percentile_rulemessage:Service {name} successful rate is less than 80% and P50 of response time is over 1000mstags:level:CRITICALDefault alarm rules For convenience\u0026rsquo;s sake, we have provided a default alarm-setting.yml in our release. It includes the following rules:\n Service average response time over 1s in the last 3 minutes. Service success rate lower than 80% in the last 2 minutes. Percentile of service response time over 1s in the last 3 minutes Service Instance average response time over 1s in the last 2 minutes, and the instance name matches the regex. Endpoint average response time over 1s in the last 2 minutes. Database access average response time over 1s in the last 2 minutes. Endpoint relation average response time over 1s in the last 2 minutes.  List of all potential metrics name The metrics names are defined in the official OAL scripts and MAL scripts, the Event names can also serve as the metrics names, all possible event names can be also found in the Event doc.\nCurrently, metrics from the Service, Service Instance, Endpoint, Service Relation, Service Instance Relation, Endpoint Relation scopes could be used in Alarm, and the Database access scope is the same as Service.\nSubmit an issue or a pull request if you want to support any other scopes in Alarm.\nWebhook The Webhook requires the peer to be a web container. The alarm message will be sent through HTTP post by application/json content type. The JSON format is based on List\u0026lt;org.apache.skywalking.oap.server.core.alarm.AlarmMessage\u0026gt; with the following key information:\n scopeId, scope. All scopes are defined in org.apache.skywalking.oap.server.core.source.DefaultScopeDefine. name. Target scope entity name. Please follow the entity name definitions. id0. The ID of the scope entity that matches with the name. When using the relation scope, it is the source entity ID. id1. When using the relation scope, it is the destination entity ID. Otherwise, it is empty. ruleName. The rule name configured in alarm-settings.yml. alarmMessage. The alarm text message. startTime. The alarm time measured in milliseconds, which occurs between the current time and the midnight of January 1, 1970 UTC. tags. The tags configured in alarm-settings.yml.  See the following example:\n[{ \u0026#34;scopeId\u0026#34;: 1, \u0026#34;scope\u0026#34;: \u0026#34;SERVICE\u0026#34;, \u0026#34;name\u0026#34;: \u0026#34;serviceA\u0026#34;, \u0026#34;id0\u0026#34;: \u0026#34;12\u0026#34;, \u0026#34;id1\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;ruleName\u0026#34;: \u0026#34;service_resp_time_rule\u0026#34;, \u0026#34;alarmMessage\u0026#34;: \u0026#34;alarmMessage xxxx\u0026#34;, \u0026#34;startTime\u0026#34;: 1560524171000, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;WARNING\u0026#34; }] }, { \u0026#34;scopeId\u0026#34;: 1, \u0026#34;scope\u0026#34;: \u0026#34;SERVICE\u0026#34;, \u0026#34;name\u0026#34;: \u0026#34;serviceB\u0026#34;, \u0026#34;id0\u0026#34;: \u0026#34;23\u0026#34;, \u0026#34;id1\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;ruleName\u0026#34;: \u0026#34;service_resp_time_rule\u0026#34;, \u0026#34;alarmMessage\u0026#34;: \u0026#34;alarmMessage yyy\u0026#34;, \u0026#34;startTime\u0026#34;: 1560524171000, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;CRITICAL\u0026#34; }] }] gRPCHook The alarm message will be sent through remote gRPC method by Protobuf content type. The message contains key information which are defined in oap-server/server-alarm-plugin/src/main/proto/alarm-hook.proto.\nPart of the protocol looks like this:\nmessage AlarmMessage { int64 scopeId = 1; string scope = 2; string name = 3; string id0 = 4; string id1 = 5; string ruleName = 6; string alarmMessage = 7; int64 startTime = 8; AlarmTags tags = 9;}message AlarmTags { // String key, String value pair.  repeated KeyStringValuePair data = 1;}message KeyStringValuePair { string key = 1; string value = 2;}Slack Chat Hook Follow the Getting Started with Incoming Webhooks guide and create new Webhooks.\nThe alarm message will be sent through HTTP post by application/json content type if you have configured Slack Incoming Webhooks as follows:\nslackHooks:textTemplate:|-{ \u0026#34;type\u0026#34;: \u0026#34;section\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;mrkdwn\u0026#34;, \u0026#34;text\u0026#34;: \u0026#34;:alarm_clock: *Apache Skywalking Alarm* \\n **%s**.\u0026#34; } }webhooks:- https://hooks.slack.com/services/x/y/zWeChat Hook Note that only the WeChat Company Edition (WeCom) supports WebHooks. To use the WeChat WebHook, follow the Wechat Webhooks guide. The alarm message will be sent through HTTP post by application/json content type after you have set up Wechat Webhooks as follows:\nwechatHooks:textTemplate:|-{ \u0026#34;msgtype\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;content\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; } }webhooks:- https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=dummy_keyDingTalk Hook Follow the Dingtalk Webhooks guide and create new Webhooks. You can configure an optional secret for an individual webhook URL for security purposes. The alarm message will be sent through HTTP post by application/json content type if you have configured DingTalk Webhooks as follows:\ndingtalkHooks:textTemplate:|-{ \u0026#34;msgtype\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;content\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; } }webhooks:- url:https://oapi.dingtalk.com/robot/send?access_token=dummy_tokensecret:dummysecretFeishu Hook Follow the Feishu Webhooks guide and create new Webhooks. You can configure an optional secret for an individual webhook URL for security purposes. If you want to direct a text to a user, you can configure ats, which is Feishu\u0026rsquo;s user_id and separated by \u0026ldquo;,\u0026rdquo; . The alarm message will be sent through HTTP post by application/json content type if you have configured Feishu Webhooks as follows:\nfeishuHooks:textTemplate:|-{ \u0026#34;msg_type\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;content\u0026#34;: { \u0026#34;text\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; }, \u0026#34;ats\u0026#34;:\u0026#34;feishu_user_id_1,feishu_user_id_2\u0026#34; }webhooks:- url:https://open.feishu.cn/open-apis/bot/v2/hook/dummy_tokensecret:dummysecretWeLink Hook Follow the WeLink Webhooks guide and create new Webhooks. The alarm message will be sent through HTTP post by application/json content type if you have configured WeLink Webhooks as follows:\nwelinkHooks:textTemplate:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;webhooks:# you may find your own client_id and client_secret in your app, below are dummy, need to change.- client_id:\u0026#34;dummy_client_id\u0026#34;client_secret:dummy_secret_keyaccess_token_url:https://open.welink.huaweicloud.com/api/auth/v2/ticketsmessage_url:https://open.welink.huaweicloud.com/api/welinkim/v1/im-service/chat/group-chat# if you send to multi group at a time, separate group_ids with commas, e.g. \u0026#34;123xx\u0026#34;,\u0026#34;456xx\u0026#34;group_ids:\u0026#34;dummy_group_id\u0026#34;# make a name you like for the robot, it will display in grouprobot_name:robotPagerDuty Hook The PagerDuty hook is based on Events API v2.\nFollow the Getting Started section to create an Events API v2 integration on your PagerDuty service and copy the integration key.\nThen configure as follows:\npagerDutyHooks:textTemplate:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;integrationKeys:- 5c6d805c9dcf4e03d09dfa81e8789ba1You can also configure multiple integration keys.\nDiscord Hook Follow the Discord Webhooks guide and create a new webhook.\nThen configure as follows:\ndiscordHooks:textTemplate:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;webhooks:- url:https://discordapp.com/api/webhooks/1008166889777414645/8e0Am4Zb-YGbBqqbiiq0jSHPTEEaHa4j1vIC-zSSm231T8ewGxgY0_XUYpY-k1nN4HBlusername:robotUpdate the settings dynamically Since 6.5.0, the alerting settings can be updated dynamically at runtime by Dynamic Configuration, which will override the settings in alarm-settings.yml.\nIn order to determine whether an alerting rule is triggered or not, SkyWalking needs to cache the metrics of a time window for each alerting rule. If any attribute (metrics-name, op, threshold, period, count, etc.) of a rule is changed, the sliding window will be destroyed and re-created, causing the Alarm of this specific rule to restart again.\nKeys with data types of alerting rule configuration file    Alerting element Configuration property key Type Description     Include names include-names string array    Exclude names exclude-names string array    Include names regex include-names-regex string Java regex Pattern   Exclude names regex exclude-names-regex string Java regex Pattern   Include labels include-labels string array    Exclude labels exclude-labels string array    Include labels regex include-labels-regex string Java regex Pattern   Exclude labels regex exclude-labels-regex string Java regex Pattern   Tags tags key-value pair    Threshold threshold number    OP op operator example: \u0026gt;, \u0026gt;=   Period Period int    Count count int    Only as condition only-as-condition boolean    Silence period silence-period int    Message message string     ","excerpt":"Alerting Alerting mechanism measures system performance according to the metrics of …","ref":"/docs/main/v9.3.0/en/setup/backend/backend-alarm/","title":"Alerting"},{"body":"Alerting Alerting mechanism measures system performance according to the metrics of services/instances/endpoints from different layers. Alerting kernel is an in-memory, time-window based queue.\nThe alerting core is driven by a collection of rules defined in config/alarm-settings.yml. There are three parts to alerting rule definitions.\n alerting rules. They define how metrics alerting should be triggered and what conditions should be considered. Webhooks. The list of web service endpoints, which should be called after an alerting is triggered. gRPCHook. The host and port of the remote gRPC method, which should be called after an alerting is triggered.  Entity name Defines the relation between scope and entity name.\n Service: Service name Instance: {Instance name} of {Service name} Endpoint: {Endpoint name} in {Service name} Database: Database service name Service Relation: {Source service name} to {Dest service name} Instance Relation: {Source instance name} of {Source service name} to {Dest instance name} of {Dest service name} Endpoint Relation: {Source endpoint name} in {Source Service name} to {Dest endpoint name} in {Dest service name}  Rules There are two types of rules: individual rules and composite rules. A composite rule is a combination of individual rules.\nIndividual rules An alerting rule is made up of the following elements:\n Rule name. A unique name shown in the alarm message. It must end with _rule. Metrics name. This is also the metrics name in the OAL script. Only long, double, int types are supported. See the list of all potential metrics name. Events can also be configured as the source of Alarm. Please refer to the event doc for more details. Include names. Entity names that are included in this rule. Please follow the entity name definitions. Exclude names. Entity names that are excluded from this rule. Please follow the entity name definitions. Include names regex. A regex that includes entity names. If both include-name list and include-name regex are set, both rules will take effect. Exclude names regex. A regex that excludes entity names. Both rules will take effect if both include-label list and include-label regex are set. Include labels. Metric labels that are included in this rule. Exclude labels. Metric labels that are excluded from this rule. Include labels regex. A regex that includes labels. If both include-label list and include-label regex are set, both rules will take effect. Exclude labels regex. A regex that excludes labels. Both rules will take effect if both exclude-label list and exclude-label regex are set. Tags. Tags are key/value pairs that are attached to alarms. Tags are used to specify distinguishing attributes of alarms that are meaningful and relevant to users. If you want to make these tags searchable on the SkyWalking UI, you may set the tag keys in core/default/searchableAlarmTags or through the system environment variable SW_SEARCHABLE_ALARM_TAG_KEYS. The key level is supported by default.  Label settings are required by the meter system. They are used to store metrics from the label-system platform, such as Prometheus, Micrometer, etc. The four label settings mentioned above must implement LabeledValueHolder.\n Threshold. The target value. For multiple-value metrics, such as percentile, the threshold is an array. It is described as: value1, value2, value3, value4, value5. Each value may serve as the threshold for each value of the metrics. Set the value to - if you do not wish to trigger the Alarm by one or more of the values.\nFor example, in percentile, value1 is the threshold of P50, and -, -, value3, value4, value5 means that there is no threshold for P50 and P75 in the percentile alarm rule. OP. The operator. It supports \u0026gt;, \u0026gt;=, \u0026lt;, \u0026lt;=, ==. We welcome contributions of all OPs. Period. The size of metrics cache in minutes for checking the alarm conditions. This is a time window that corresponds to the backend deployment env time. Count. Within a period window, if the number of times which value goes over the threshold (based on OP) reaches count, then an alarm will be sent. Only as condition. Indicates if the rule can send notifications or if it simply serves as a condition of the composite rule. Silence period. After the alarm is triggered at Time-N (TN), there will be silence during the TN -\u0026gt; TN + period. By default, it works in the same manner as period. The same Alarm (having the same ID in the same metrics name) may only be triggered once within a period.  Such as for a metric, there is a shifting window as following at T7.\n   T1 T2 T3 T4 T5 T6 T7     Value1 Value2 Value3 Value4 Value5 Value6 Value7     Period(Time point T1 ~ T7) are continuous data points for minutes. Notice, alerts are not supported above minute-by-minute periods as they would not be efficient. Values(Value1 ~ Value7) are the values or labeled values for every time point. Count\u0026rsquo;s value(N) represents there are N values in the window matched the operator and threshold. In every minute, the window would shift automatically. At T8, Value8 would be cached, and T1/Value1 would be removed from the window.  Composite rules NOTE: Composite rules are only applicable to alerting rules targeting the same entity level, such as service-level alarm rules (service_percent_rule \u0026amp;\u0026amp; service_resp_time_percentile_rule). Do not compose alarm rules of different entity levels, such as an alarm rule of the service metrics with another rule of the endpoint metrics.\nA composite rule is made up of the following elements:\n Rule name. A unique name shown in the alarm message. Must end with _rule. Expression. Specifies how to compose rules, and supports \u0026amp;\u0026amp;, ||, and (). Message. The notification message to be sent out when the rule is triggered. Tags. Tags are key/value pairs that are attached to alarms. Tags are used to specify distinguishing attributes of alarms that are meaningful and relevant to users.  rules:# Rule unique name, must be ended with `_rule`.endpoint_percent_rule:# Metrics value need to be long, double or intmetrics-name:endpoint_percentthreshold:75op:\u0026lt;# The length of time to evaluate the metricsperiod:10# How many times after the metrics match the condition, will trigger alarmcount:3# How many times of checks, the alarm keeps silence after alarm triggered, default as same as period.silence-period:10# Specify if the rule can send notification or just as an condition of composite ruleonly-as-condition:falsetags:level:WARNINGservice_percent_rule:metrics-name:service_percent# [Optional] Default, match all services in this metricsinclude-names:- service_a- service_bexclude-names:- service_c# Single value metrics threshold.threshold:85op:\u0026lt;period:10count:4only-as-condition:falseservice_resp_time_percentile_rule:# Metrics value need to be long, double or intmetrics-name:service_percentileop:\u0026#34;\u0026gt;\u0026#34;# Multiple value metrics threshold. Thresholds for P50, P75, P90, P95, P99.threshold:1000,1000,1000,1000,1000period:10count:3silence-period:5message:Percentile response time of service {name} alarm in 3 minutes of last 10 minutes, due to more than one condition of p50 \u0026gt; 1000, p75 \u0026gt; 1000, p90 \u0026gt; 1000, p95 \u0026gt; 1000, p99 \u0026gt; 1000only-as-condition:falsemeter_service_status_code_rule:metrics-name:meter_status_codeexclude-labels:- \u0026#34;200\u0026#34;op:\u0026#34;\u0026gt;\u0026#34;threshold:10period:10count:3silence-period:5message:The request number of entity {name} non-200 status is more than expected.only-as-condition:falsecomposite-rules:comp_rule:# Must satisfied percent rule and resp time rule expression:service_percent_rule \u0026amp;\u0026amp; service_resp_time_percentile_rulemessage:Service {name} successful rate is less than 80% and P50 of response time is over 1000mstags:level:CRITICALDefault alarm rules For convenience\u0026rsquo;s sake, we have provided a default alarm-setting.yml in our release. It includes the following rules:\n Service average response time over 1s in the last 3 minutes. Service success rate lower than 80% in the last 2 minutes. Percentile of service response time over 1s in the last 3 minutes Service Instance average response time over 1s in the last 2 minutes, and the instance name matches the regex. Endpoint average response time over 1s in the last 2 minutes. Database access average response time over 1s in the last 2 minutes. Endpoint relation average response time over 1s in the last 2 minutes.  List of all potential metrics name The metrics names are defined in the official OAL scripts and MAL scripts, the Event names can also serve as the metrics names, all possible event names can be also found in the Event doc.\nCurrently, metrics from the Service, Service Instance, Endpoint, Service Relation, Service Instance Relation, Endpoint Relation scopes could be used in Alarm, and the Database access scope is the same as Service.\nSubmit an issue or a pull request if you want to support any other scopes in Alarm.\nWebhook The Webhook requires the peer to be a web container. The alarm message will be sent through HTTP post by application/json content type. The JSON format is based on List\u0026lt;org.apache.skywalking.oap.server.core.alarm.AlarmMessage\u0026gt; with the following key information:\n scopeId, scope. All scopes are defined in org.apache.skywalking.oap.server.core.source.DefaultScopeDefine. name. Target scope entity name. Please follow the entity name definitions. id0. The ID of the scope entity that matches with the name. When using the relation scope, it is the source entity ID. id1. When using the relation scope, it is the destination entity ID. Otherwise, it is empty. ruleName. The rule name configured in alarm-settings.yml. alarmMessage. The alarm text message. startTime. The alarm time measured in milliseconds, which occurs between the current time and the midnight of January 1, 1970 UTC. tags. The tags configured in alarm-settings.yml.  See the following example:\n[{ \u0026#34;scopeId\u0026#34;: 1, \u0026#34;scope\u0026#34;: \u0026#34;SERVICE\u0026#34;, \u0026#34;name\u0026#34;: \u0026#34;serviceA\u0026#34;, \u0026#34;id0\u0026#34;: \u0026#34;12\u0026#34;, \u0026#34;id1\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;ruleName\u0026#34;: \u0026#34;service_resp_time_rule\u0026#34;, \u0026#34;alarmMessage\u0026#34;: \u0026#34;alarmMessage xxxx\u0026#34;, \u0026#34;startTime\u0026#34;: 1560524171000, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;WARNING\u0026#34; }] }, { \u0026#34;scopeId\u0026#34;: 1, \u0026#34;scope\u0026#34;: \u0026#34;SERVICE\u0026#34;, \u0026#34;name\u0026#34;: \u0026#34;serviceB\u0026#34;, \u0026#34;id0\u0026#34;: \u0026#34;23\u0026#34;, \u0026#34;id1\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;ruleName\u0026#34;: \u0026#34;service_resp_time_rule\u0026#34;, \u0026#34;alarmMessage\u0026#34;: \u0026#34;alarmMessage yyy\u0026#34;, \u0026#34;startTime\u0026#34;: 1560524171000, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;CRITICAL\u0026#34; }] }] gRPCHook The alarm message will be sent through remote gRPC method by Protobuf content type. The message contains key information which are defined in oap-server/server-alarm-plugin/src/main/proto/alarm-hook.proto.\nPart of the protocol looks like this:\nmessage AlarmMessage { int64 scopeId = 1; string scope = 2; string name = 3; string id0 = 4; string id1 = 5; string ruleName = 6; string alarmMessage = 7; int64 startTime = 8; AlarmTags tags = 9;}message AlarmTags { // String key, String value pair.  repeated KeyStringValuePair data = 1;}message KeyStringValuePair { string key = 1; string value = 2;}Slack Chat Hook Follow the Getting Started with Incoming Webhooks guide and create new Webhooks.\nThe alarm message will be sent through HTTP post by application/json content type if you have configured Slack Incoming Webhooks as follows:\nslackHooks:textTemplate:|-{ \u0026#34;type\u0026#34;: \u0026#34;section\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;mrkdwn\u0026#34;, \u0026#34;text\u0026#34;: \u0026#34;:alarm_clock: *Apache Skywalking Alarm* \\n **%s**.\u0026#34; } }webhooks:- https://hooks.slack.com/services/x/y/zWeChat Hook Note that only the WeChat Company Edition (WeCom) supports WebHooks. To use the WeChat WebHook, follow the Wechat Webhooks guide. The alarm message will be sent through HTTP post by application/json content type after you have set up Wechat Webhooks as follows:\nwechatHooks:textTemplate:|-{ \u0026#34;msgtype\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;content\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; } }webhooks:- https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=dummy_keyDingTalk Hook Follow the Dingtalk Webhooks guide and create new Webhooks. You can configure an optional secret for an individual webhook URL for security purposes. The alarm message will be sent through HTTP post by application/json content type if you have configured DingTalk Webhooks as follows:\ndingtalkHooks:textTemplate:|-{ \u0026#34;msgtype\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;content\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; } }webhooks:- url:https://oapi.dingtalk.com/robot/send?access_token=dummy_tokensecret:dummysecretFeishu Hook Follow the Feishu Webhooks guide and create new Webhooks. You can configure an optional secret for an individual webhook URL for security purposes. If you want to direct a text to a user, you can configure ats, which is Feishu\u0026rsquo;s user_id and separated by \u0026ldquo;,\u0026rdquo; . The alarm message will be sent through HTTP post by application/json content type if you have configured Feishu Webhooks as follows:\nfeishuHooks:textTemplate:|-{ \u0026#34;msg_type\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;content\u0026#34;: { \u0026#34;text\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; }, \u0026#34;ats\u0026#34;:\u0026#34;feishu_user_id_1,feishu_user_id_2\u0026#34; }webhooks:- url:https://open.feishu.cn/open-apis/bot/v2/hook/dummy_tokensecret:dummysecretWeLink Hook Follow the WeLink Webhooks guide and create new Webhooks. The alarm message will be sent through HTTP post by application/json content type if you have configured WeLink Webhooks as follows:\nwelinkHooks:textTemplate:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;webhooks:# you may find your own client_id and client_secret in your app, below are dummy, need to change.- client_id:\u0026#34;dummy_client_id\u0026#34;client_secret:dummy_secret_keyaccess_token_url:https://open.welink.huaweicloud.com/api/auth/v2/ticketsmessage_url:https://open.welink.huaweicloud.com/api/welinkim/v1/im-service/chat/group-chat# if you send to multi group at a time, separate group_ids with commas, e.g. \u0026#34;123xx\u0026#34;,\u0026#34;456xx\u0026#34;group_ids:\u0026#34;dummy_group_id\u0026#34;# make a name you like for the robot, it will display in grouprobot_name:robotPagerDuty Hook The PagerDuty hook is based on Events API v2.\nFollow the Getting Started section to create an Events API v2 integration on your PagerDuty service and copy the integration key.\nThen configure as follows:\npagerDutyHooks:textTemplate:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;integrationKeys:- 5c6d805c9dcf4e03d09dfa81e8789ba1You can also configure multiple integration keys.\nDiscord Hook Follow the Discord Webhooks guide and create a new webhook.\nThen configure as follows:\ndiscordHooks:textTemplate:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;webhooks:- url:https://discordapp.com/api/webhooks/1008166889777414645/8e0Am4Zb-YGbBqqbiiq0jSHPTEEaHa4j1vIC-zSSm231T8ewGxgY0_XUYpY-k1nN4HBlusername:robotUpdate the settings dynamically Since 6.5.0, the alerting settings can be updated dynamically at runtime by Dynamic Configuration, which will override the settings in alarm-settings.yml.\nIn order to determine whether an alerting rule is triggered or not, SkyWalking needs to cache the metrics of a time window for each alerting rule. If any attribute (metrics-name, op, threshold, period, count, etc.) of a rule is changed, the sliding window will be destroyed and re-created, causing the Alarm of this specific rule to restart again.\nKeys with data types of alerting rule configuration file    Alerting element Configuration property key Type Description     Include names include-names string array    Exclude names exclude-names string array    Include names regex include-names-regex string Java regex Pattern   Exclude names regex exclude-names-regex string Java regex Pattern   Include labels include-labels string array    Exclude labels exclude-labels string array    Include labels regex include-labels-regex string Java regex Pattern   Exclude labels regex exclude-labels-regex string Java regex Pattern   Tags tags key-value pair    Threshold threshold number    OP op operator example: \u0026gt;, \u0026gt;=   Period Period int    Count count int    Only as condition only-as-condition boolean    Silence period silence-period int    Message message string     ","excerpt":"Alerting Alerting mechanism measures system performance according to the metrics of …","ref":"/docs/main/v9.4.0/en/setup/backend/backend-alarm/","title":"Alerting"},{"body":"Alerting Alerting mechanism measures system performance according to the metrics of services/instances/endpoints from different layers. Alerting kernel is an in-memory, time-window based queue.\nThe alerting core is driven by a collection of rules defined in config/alarm-settings.yml. There are three parts to alerting rule definitions.\n alerting rules. They define how metrics alerting should be triggered and what conditions should be considered. Webhooks. The list of web service endpoints, which should be called after an alerting is triggered. gRPCHook. The host and port of the remote gRPC method, which should be called after an alerting is triggered.  Entity name Defines the relation between scope and entity name.\n Service: Service name Instance: {Instance name} of {Service name} Endpoint: {Endpoint name} in {Service name} Database: Database service name Service Relation: {Source service name} to {Dest service name} Instance Relation: {Source instance name} of {Source service name} to {Dest instance name} of {Dest service name} Endpoint Relation: {Source endpoint name} in {Source Service name} to {Dest endpoint name} in {Dest service name}  Rules There are two types of rules: individual rules and composite rules. A composite rule is a combination of individual rules.\nIndividual rules An alerting rule is made up of the following elements:\n Rule name. A unique name shown in the alarm message. It must end with _rule. Metrics name. This is also the metrics name in the OAL script. Only long, double, int types are supported. See the list of all potential metrics name. Events can also be configured as the source of Alarm. Please refer to the event doc for more details. Include names. Entity names that are included in this rule. Please follow the entity name definitions. Exclude names. Entity names that are excluded from this rule. Please follow the entity name definitions. Include names regex. A regex that includes entity names. If both include-name list and include-name regex are set, both rules will take effect. Exclude names regex. A regex that excludes entity names. Both rules will take effect if both include-label list and include-label regex are set. Include labels. Metric labels that are included in this rule. Exclude labels. Metric labels that are excluded from this rule. Include labels regex. A regex that includes labels. If both include-label list and include-label regex are set, both rules will take effect. Exclude labels regex. A regex that excludes labels. Both rules will take effect if both exclude-label list and exclude-label regex are set. Tags. Tags are key/value pairs that are attached to alarms. Tags are used to specify distinguishing attributes of alarms that are meaningful and relevant to users. If you want to make these tags searchable on the SkyWalking UI, you may set the tag keys in core/default/searchableAlarmTags or through the system environment variable SW_SEARCHABLE_ALARM_TAG_KEYS. The key level is supported by default.  Label settings are required by the meter system. They are used to store metrics from the label-system platform, such as Prometheus, Micrometer, etc. The four label settings mentioned above must implement LabeledValueHolder.\n Threshold. The target value. For multiple-value metrics, such as percentile, the threshold is an array. It is described as: value1, value2, value3, value4, value5. Each value may serve as the threshold for each value of the metrics. Set the value to - if you do not wish to trigger the Alarm by one or more of the values.\nFor example, in percentile, value1 is the threshold of P50, and -, -, value3, value4, value5 means that there is no threshold for P50 and P75 in the percentile alarm rule. OP. The operator. It supports \u0026gt;, \u0026gt;=, \u0026lt;, \u0026lt;=, ==, !=. We welcome contributions of all OPs. Period. The size of metrics cache in minutes for checking the alarm conditions. This is a time window that corresponds to the backend deployment env time. Count. Within a period window, if the number of times which value goes over the threshold (based on OP) reaches count, then an alarm will be sent. Only as condition. Indicates if the rule can send notifications or if it simply serves as a condition of the composite rule. Silence period. After the alarm is triggered at Time-N (TN), there will be silence during the TN -\u0026gt; TN + period. By default, it works in the same manner as period. The same Alarm (having the same ID in the same metrics name) may only be triggered once within a period.  Such as for a metric, there is a shifting window as following at T7.\n   T1 T2 T3 T4 T5 T6 T7     Value1 Value2 Value3 Value4 Value5 Value6 Value7     Period(Time point T1 ~ T7) are continuous data points for minutes. Notice, alerts are not supported above minute-by-minute periods as they would not be efficient. Values(Value1 ~ Value7) are the values or labeled values for every time point. Count\u0026rsquo;s value(N) represents there are N values in the window matched the operator and threshold. In every minute, the window would shift automatically. At T8, Value8 would be cached, and T1/Value1 would be removed from the window.  Composite rules NOTE: Composite rules are only applicable to alerting rules targeting the same entity level, such as service-level alarm rules (service_percent_rule \u0026amp;\u0026amp; service_resp_time_percentile_rule). Do not compose alarm rules of different entity levels, such as an alarm rule of the service metrics with another rule of the endpoint metrics.\nA composite rule is made up of the following elements:\n Rule name. A unique name shown in the alarm message. Must end with _rule. Expression. Specifies how to compose rules, and supports \u0026amp;\u0026amp;, ||, and (). Message. The notification message to be sent out when the rule is triggered. Tags. Tags are key/value pairs that are attached to alarms. Tags are used to specify distinguishing attributes of alarms that are meaningful and relevant to users.  rules:# Rule unique name, must be ended with `_rule`.endpoint_percent_rule:# Metrics value need to be long, double or intmetrics-name:endpoint_percentthreshold:75op:\u0026lt;# The length of time to evaluate the metricsperiod:10# How many times after the metrics match the condition, will trigger alarmcount:3# How many times of checks, the alarm keeps silence after alarm triggered, default as same as period.silence-period:10# Specify if the rule can send notification or just as an condition of composite ruleonly-as-condition:falsetags:level:WARNINGservice_percent_rule:metrics-name:service_percent# [Optional] Default, match all services in this metricsinclude-names:- service_a- service_bexclude-names:- service_c# Single value metrics threshold.threshold:85op:\u0026lt;period:10count:4only-as-condition:falseservice_resp_time_percentile_rule:# Metrics value need to be long, double or intmetrics-name:service_percentileop:\u0026#34;\u0026gt;\u0026#34;# Multiple value metrics threshold. Thresholds for P50, P75, P90, P95, P99.threshold:1000,1000,1000,1000,1000period:10count:3silence-period:5message:Percentile response time of service {name} alarm in 3 minutes of last 10 minutes, due to more than one condition of p50 \u0026gt; 1000, p75 \u0026gt; 1000, p90 \u0026gt; 1000, p95 \u0026gt; 1000, p99 \u0026gt; 1000only-as-condition:falsemeter_service_status_code_rule:metrics-name:meter_status_codeexclude-labels:- \u0026#34;200\u0026#34;op:\u0026#34;\u0026gt;\u0026#34;threshold:10period:10count:3silence-period:5message:The request number of entity {name} non-200 status is more than expected.only-as-condition:falsecomposite-rules:comp_rule:# Must satisfied percent rule and resp time rule expression:service_percent_rule \u0026amp;\u0026amp; service_resp_time_percentile_rulemessage:Service {name} successful rate is less than 80% and P50 of response time is over 1000mstags:level:CRITICALDefault alarm rules For convenience\u0026rsquo;s sake, we have provided a default alarm-setting.yml in our release. It includes the following rules:\n Service average response time over 1s in the last 3 minutes. Service success rate lower than 80% in the last 2 minutes. Percentile of service response time over 1s in the last 3 minutes Service Instance average response time over 1s in the last 2 minutes, and the instance name matches the regex. Endpoint average response time over 1s in the last 2 minutes. Database access average response time over 1s in the last 2 minutes. Endpoint relation average response time over 1s in the last 2 minutes.  List of all potential metrics name The metrics names are defined in the official OAL scripts and MAL scripts, the Event names can also serve as the metrics names, all possible event names can be also found in the Event doc.\nCurrently, metrics from the Service, Service Instance, Endpoint, Service Relation, Service Instance Relation, Endpoint Relation scopes could be used in Alarm, and the Database access scope is the same as Service.\nSubmit an issue or a pull request if you want to support any other scopes in Alarm.\nWebhook The Webhook requires the peer to be a web container. The alarm message will be sent through HTTP post by application/json content type. The JSON format is based on List\u0026lt;org.apache.skywalking.oap.server.core.alarm.AlarmMessage\u0026gt; with the following key information:\n scopeId, scope. All scopes are defined in org.apache.skywalking.oap.server.core.source.DefaultScopeDefine. name. Target scope entity name. Please follow the entity name definitions. id0. The ID of the scope entity that matches with the name. When using the relation scope, it is the source entity ID. id1. When using the relation scope, it is the destination entity ID. Otherwise, it is empty. ruleName. The rule name configured in alarm-settings.yml. alarmMessage. The alarm text message. startTime. The alarm time measured in milliseconds, which occurs between the current time and the midnight of January 1, 1970 UTC. tags. The tags configured in alarm-settings.yml.  See the following example:\n[{ \u0026#34;scopeId\u0026#34;: 1, \u0026#34;scope\u0026#34;: \u0026#34;SERVICE\u0026#34;, \u0026#34;name\u0026#34;: \u0026#34;serviceA\u0026#34;, \u0026#34;id0\u0026#34;: \u0026#34;12\u0026#34;, \u0026#34;id1\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;ruleName\u0026#34;: \u0026#34;service_resp_time_rule\u0026#34;, \u0026#34;alarmMessage\u0026#34;: \u0026#34;alarmMessage xxxx\u0026#34;, \u0026#34;startTime\u0026#34;: 1560524171000, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;WARNING\u0026#34; }] }, { \u0026#34;scopeId\u0026#34;: 1, \u0026#34;scope\u0026#34;: \u0026#34;SERVICE\u0026#34;, \u0026#34;name\u0026#34;: \u0026#34;serviceB\u0026#34;, \u0026#34;id0\u0026#34;: \u0026#34;23\u0026#34;, \u0026#34;id1\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;ruleName\u0026#34;: \u0026#34;service_resp_time_rule\u0026#34;, \u0026#34;alarmMessage\u0026#34;: \u0026#34;alarmMessage yyy\u0026#34;, \u0026#34;startTime\u0026#34;: 1560524171000, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;CRITICAL\u0026#34; }] }] gRPCHook The alarm message will be sent through remote gRPC method by Protobuf content type. The message contains key information which are defined in oap-server/server-alarm-plugin/src/main/proto/alarm-hook.proto.\nPart of the protocol looks like this:\nmessage AlarmMessage { int64 scopeId = 1; string scope = 2; string name = 3; string id0 = 4; string id1 = 5; string ruleName = 6; string alarmMessage = 7; int64 startTime = 8; AlarmTags tags = 9;}message AlarmTags { // String key, String value pair.  repeated KeyStringValuePair data = 1;}message KeyStringValuePair { string key = 1; string value = 2;}Slack Chat Hook Follow the Getting Started with Incoming Webhooks guide and create new Webhooks.\nThe alarm message will be sent through HTTP post by application/json content type if you have configured Slack Incoming Webhooks as follows:\nslackHooks:textTemplate:|-{ \u0026#34;type\u0026#34;: \u0026#34;section\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;mrkdwn\u0026#34;, \u0026#34;text\u0026#34;: \u0026#34;:alarm_clock: *Apache Skywalking Alarm* \\n **%s**.\u0026#34; } }webhooks:- https://hooks.slack.com/services/x/y/zWeChat Hook Note that only the WeChat Company Edition (WeCom) supports WebHooks. To use the WeChat WebHook, follow the Wechat Webhooks guide. The alarm message will be sent through HTTP post by application/json content type after you have set up Wechat Webhooks as follows:\nwechatHooks:textTemplate:|-{ \u0026#34;msgtype\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;content\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; } }webhooks:- https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=dummy_keyDingTalk Hook Follow the Dingtalk Webhooks guide and create new Webhooks. You can configure an optional secret for an individual webhook URL for security purposes. The alarm message will be sent through HTTP post by application/json content type if you have configured DingTalk Webhooks as follows:\ndingtalkHooks:textTemplate:|-{ \u0026#34;msgtype\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;content\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; } }webhooks:- url:https://oapi.dingtalk.com/robot/send?access_token=dummy_tokensecret:dummysecretFeishu Hook Follow the Feishu Webhooks guide and create new Webhooks. You can configure an optional secret for an individual webhook URL for security purposes. If you want to direct a text to a user, you can configure ats, which is Feishu\u0026rsquo;s user_id and separated by \u0026ldquo;,\u0026rdquo; . The alarm message will be sent through HTTP post by application/json content type if you have configured Feishu Webhooks as follows:\nfeishuHooks:textTemplate:|-{ \u0026#34;msg_type\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;content\u0026#34;: { \u0026#34;text\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; }, \u0026#34;ats\u0026#34;:\u0026#34;feishu_user_id_1,feishu_user_id_2\u0026#34; }webhooks:- url:https://open.feishu.cn/open-apis/bot/v2/hook/dummy_tokensecret:dummysecretWeLink Hook Follow the WeLink Webhooks guide and create new Webhooks. The alarm message will be sent through HTTP post by application/json content type if you have configured WeLink Webhooks as follows:\nwelinkHooks:textTemplate:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;webhooks:# you may find your own client_id and client_secret in your app, below are dummy, need to change.- client_id:\u0026#34;dummy_client_id\u0026#34;client_secret:dummy_secret_keyaccess_token_url:https://open.welink.huaweicloud.com/api/auth/v2/ticketsmessage_url:https://open.welink.huaweicloud.com/api/welinkim/v1/im-service/chat/group-chat# if you send to multi group at a time, separate group_ids with commas, e.g. \u0026#34;123xx\u0026#34;,\u0026#34;456xx\u0026#34;group_ids:\u0026#34;dummy_group_id\u0026#34;# make a name you like for the robot, it will display in grouprobot_name:robotPagerDuty Hook The PagerDuty hook is based on Events API v2.\nFollow the Getting Started section to create an Events API v2 integration on your PagerDuty service and copy the integration key.\nThen configure as follows:\npagerDutyHooks:textTemplate:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;integrationKeys:- 5c6d805c9dcf4e03d09dfa81e8789ba1You can also configure multiple integration keys.\nDiscord Hook Follow the Discord Webhooks guide and create a new webhook.\nThen configure as follows:\ndiscordHooks:textTemplate:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;webhooks:- url:https://discordapp.com/api/webhooks/1008166889777414645/8e0Am4Zb-YGbBqqbiiq0jSHPTEEaHa4j1vIC-zSSm231T8ewGxgY0_XUYpY-k1nN4HBlusername:robotUpdate the settings dynamically Since 6.5.0, the alerting settings can be updated dynamically at runtime by Dynamic Configuration, which will override the settings in alarm-settings.yml.\nIn order to determine whether an alerting rule is triggered or not, SkyWalking needs to cache the metrics of a time window for each alerting rule. If any attribute (metrics-name, op, threshold, period, count, etc.) of a rule is changed, the sliding window will be destroyed and re-created, causing the Alarm of this specific rule to restart again.\nKeys with data types of alerting rule configuration file    Alerting element Configuration property key Type Description     Include names include-names string array    Exclude names exclude-names string array    Include names regex include-names-regex string Java regex Pattern   Exclude names regex exclude-names-regex string Java regex Pattern   Include labels include-labels string array    Exclude labels exclude-labels string array    Include labels regex include-labels-regex string Java regex Pattern   Exclude labels regex exclude-labels-regex string Java regex Pattern   Tags tags key-value pair    Threshold threshold number    OP op operator example: \u0026gt;, \u0026gt;=   Period Period int    Count count int    Only as condition only-as-condition boolean    Silence period silence-period int    Message message string     ","excerpt":"Alerting Alerting mechanism measures system performance according to the metrics of …","ref":"/docs/main/v9.5.0/en/setup/backend/backend-alarm/","title":"Alerting"},{"body":"Alerting Alerting mechanism measures system performance according to the metrics of services/instances/endpoints from different layers. Alerting kernel is an in-memory, time-window based queue.\nThe alerting core is driven by a collection of rules defined in config/alarm-settings.yml. There are three parts to alerting rule definitions.\n alerting rules. They define how metrics alerting should be triggered and what conditions should be considered. hooks. The list of hooks, which should be called after an alerting is triggered.  Entity name Defines the relation between scope and entity name.\n Service: Service name Instance: {Instance name} of {Service name} Endpoint: {Endpoint name} in {Service name} Service Relation: {Source service name} to {Dest service name} Instance Relation: {Source instance name} of {Source service name} to {Dest instance name} of {Dest service name} Endpoint Relation: {Source endpoint name} in {Source Service name} to {Dest endpoint name} in {Dest service name}  Rules An alerting rule is made up of the following elements:\n Rule name. A unique name shown in the alarm message. It must end with _rule. Expression. A MQE expression that defines the conditions of the rule. The result type must be SINGLE_VALUE and the root operation of the expression must be a Compare Operation which provides 1(true) or 0(false) result. When the result is 1(true), the alarm will be triggered. For example, avg(service_resp_time / 1000) \u0026gt; 1 is a valid expression to indicate the request latency is slower than 1s. The typical illegal expressions are  avg(service_resp_time \u0026gt; 1000) + 1 expression root doesn\u0026rsquo;t use Compare Operation service_resp_time \u0026gt; 1000 expression return a TIME_SERIES_VALUES type of values rather than a SINGLE_VALUE value.    The metrics names in the expression could be found in the list of all potential metrics name doc.\n Include names. Entity names that are included in this rule. Please follow the entity name definitions. Exclude names. Entity names that are excluded from this rule. Please follow the entity name definitions. Include names regex. A regex that includes entity names. If both include-name list and include-name regex are set, both rules will take effect. Exclude names regex. A regex that excludes entity names. Both rules will take effect if both include-label list and include-label regex are set. Tags. Tags are key/value pairs that are attached to alarms. Tags are used to specify distinguishing attributes of alarms that are meaningful and relevant to users. If you want to make these tags searchable on the SkyWalking UI, you may set the tag keys in core/default/searchableAlarmTags or through the system environment variable SW_SEARCHABLE_ALARM_TAG_KEYS. The key level is supported by default. Period. The size of metrics cache in minutes for checking the alarm conditions. This is a time window that corresponds to the backend deployment env time. Hooks. Binding the specific names of the hooks when the alarm is triggered. The name format is {hookType}.{hookName} (slack.custom1 e.g.) and must be defined in the hooks section of the alarm-settings.yml file. If the hook name is not specified, the global hook will be used. Silence period. After the alarm is triggered at Time-N (TN), there will be silence during the TN -\u0026gt; TN + period. By default, it works in the same manner as period. The same Alarm (having the same ID in the same metrics name) may only be triggered once within a period.  Such as for a metric, there is a shifting window as following at T7.\n   T1 T2 T3 T4 T5 T6 T7     Value1 Value2 Value3 Value4 Value5 Value6 Value7     Period(Time point T1 ~ T7) are continuous data points for minutes. Notice, alerts are not supported above minute-by-minute periods as they would not be efficient. Values(Value1 ~ Value7) are the values or labeled values for every time point. Expression is calculated based on the metric values(Value1 ~ Value7). For example, expression avg(service_resp_time) \u0026gt; 1000, if the value are 1001, 1001, 1001, 1001, 1001, 1001, 1001, the calculation is ((1001 + 10001 + ... + 1001) / 7) \u0026gt; 1000 and the result would be 1(true). Then the alarm would be triggered. In every minute, the window would shift automatically. At T8, Value8 would be cached, and T1/Value1 would be removed from the window.  NOTE:\n If the expression include labeled metrics and result has multiple labeled value(e.g. sum(service_percentile{_='0,1'} \u0026gt; 1000) \u0026gt;= 3), the alarm will be triggered if any of the labeled value result matches 3 times of the condition(P50 \u0026gt; 1000 or P75 \u0026gt; 1000). One alarm rule is targeting the same entity level, such as service-level expression (avg(service_resp_time) \u0026gt; 1000). Set entity names(Include/Exclude names\u0026hellip;) according to metrics entity levels, do not include different entity levels metrics in the same expression, such as service metrics and endpoint metrics.  rules:# Rule unique name, must be ended with `_rule`.endpoint_percent_rule:# A MQE expression and the root operation of the expression must be a Compare Operation.expression:sum((endpoint_sla / 100) \u0026lt; 75) \u0026gt;= 3# The length of time to evaluate the metricsperiod:10# How many times of checks, the alarm keeps silence after alarm triggered, default as same as period.silence-period:10message:Successful rate of endpoint {name} is lower than 75%tags:level:WARNINGservice_percent_rule:expression:sum((service_sla / 100) \u0026lt; 85) \u0026gt;= 4# [Optional] Default, match all services in this metricsinclude-names:- service_a- service_bexclude-names:- service_cperiod:10message:Service {name} successful rate is less than 85%service_resp_time_percentile_rule:expression:sum(service_percentile{_=\u0026#39;0,1,2,3,4\u0026#39;} \u0026gt; 1000) \u0026gt;= 3period:10silence-period:5message:Percentile response time of service {name} alarm in 3 minutes of last 10 minutes, due to more than one condition of p50 \u0026gt; 1000, p75 \u0026gt; 1000, p90 \u0026gt; 1000, p95 \u0026gt; 1000, p99 \u0026gt; 1000meter_service_status_code_rule:expression:sum(aggregate_labels(meter_status_code{_=\u0026#39;4xx,5xx\u0026#39;},sum) \u0026gt; 10) \u0026gt; 3period:10count:3silence-period:5message:The request number of entity {name} 4xx and 5xx status is more than expected.hooks:- \u0026#34;slack.custom1\u0026#34;- \u0026#34;pagerduty.custom1\u0026#34;comp_rule:expression:(avg(service_sla / 100) \u0026gt; 80) * (avg(service_percentile{_=\u0026#39;0\u0026#39;}) \u0026gt; 1000) == 1period:10message:Service {name} avg successful rate is less than 80% and P50 of avg response time is over 1000ms in last 10 minutes.tags:level:CRITICALhooks:- \u0026#34;slack.default\u0026#34;- \u0026#34;slack.custom1\u0026#34;- \u0026#34;pagerduty.custom1\u0026#34;Default alarm rules For convenience\u0026rsquo;s sake, we have provided a default alarm-setting.yml in our release. It includes the following rules:\n Service average response time over 1s in the last 3 minutes. Service success rate lower than 80% in the last 2 minutes. Percentile of service response time over 1s in the last 3 minutes Service Instance average response time over 1s in the last 2 minutes, and the instance name matches the regex. Endpoint average response time over 1s in the last 2 minutes. Database access average response time over 1s in the last 2 minutes. Endpoint relation average response time over 1s in the last 2 minutes.  List of all potential metrics name The metrics names are defined in the official OAL scripts and MAL scripts.\nCurrently, metrics from the Service, Service Instance, Endpoint, Service Relation, Service Instance Relation, Endpoint Relation scopes could be used in Alarm, and the Database access scope is the same as Service.\nSubmit an issue or a pull request if you want to support any other scopes in Alarm.\nHooks Hooks are a way to send alarm messages to the outside world. SkyWalking supports multiple hooks of the same type, each hook can support different configurations. For example, you can configure two Slack hooks, one named default and set is-default: true means this hook will apply on all Alarm Rules without config hooks. Another named custom1 will only apply on the Alarm Rules which with config hooks and include the name slack.custom1.\nhooks:slack:# default here is just a name, set the field \u0026#39;is-default: true\u0026#39; if this notification hook is expected to be default globally.default:# If true, this hook will apply on all rules, unless a rule has its own specific hook. Could have more than one default hooks in the same hook type.is-default:truetext-template:|-{ \u0026#34;type\u0026#34;: \u0026#34;section\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;mrkdwn\u0026#34;, \u0026#34;text\u0026#34;: \u0026#34;:alarm_clock: *Apache Skywalking Alarm* \\n **%s**.\u0026#34; } }webhooks:- https://hooks.slack.com/services/x/y/zsssscustom1:text-template:|-{ \u0026#34;type\u0026#34;: \u0026#34;section\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;mrkdwn\u0026#34;, \u0026#34;text\u0026#34;: \u0026#34;:alarm_clock: *Apache Skywalking Alarm* \\n **%s**.\u0026#34; } }webhooks:- https://hooks.slack.com/services/x/y/custom1Currently, SkyWalking supports the following hook types:\nWebhook The Webhook requires the peer to be a web container. The alarm message will be sent through HTTP post by application/json content type. The JSON format is based on List\u0026lt;org.apache.skywalking.oap.server.core.alarm.AlarmMessage\u0026gt; with the following key information:\n scopeId, scope. All scopes are defined in org.apache.skywalking.oap.server.core.source.DefaultScopeDefine. name. Target scope entity name. Please follow the entity name definitions. id0. The ID of the scope entity that matches with the name. When using the relation scope, it is the source entity ID. id1. When using the relation scope, it is the destination entity ID. Otherwise, it is empty. ruleName. The rule name configured in alarm-settings.yml. alarmMessage. The alarm text message. startTime. The alarm time measured in milliseconds, which occurs between the current time and the midnight of January 1, 1970 UTC. tags. The tags configured in alarm-settings.yml.  See the following example:\n[{ \u0026#34;scopeId\u0026#34;: 1, \u0026#34;scope\u0026#34;: \u0026#34;SERVICE\u0026#34;, \u0026#34;name\u0026#34;: \u0026#34;serviceA\u0026#34;, \u0026#34;id0\u0026#34;: \u0026#34;12\u0026#34;, \u0026#34;id1\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;ruleName\u0026#34;: \u0026#34;service_resp_time_rule\u0026#34;, \u0026#34;alarmMessage\u0026#34;: \u0026#34;alarmMessage xxxx\u0026#34;, \u0026#34;startTime\u0026#34;: 1560524171000, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;WARNING\u0026#34; }] }, { \u0026#34;scopeId\u0026#34;: 1, \u0026#34;scope\u0026#34;: \u0026#34;SERVICE\u0026#34;, \u0026#34;name\u0026#34;: \u0026#34;serviceB\u0026#34;, \u0026#34;id0\u0026#34;: \u0026#34;23\u0026#34;, \u0026#34;id1\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;ruleName\u0026#34;: \u0026#34;service_resp_time_rule\u0026#34;, \u0026#34;alarmMessage\u0026#34;: \u0026#34;alarmMessage yyy\u0026#34;, \u0026#34;startTime\u0026#34;: 1560524171000, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;CRITICAL\u0026#34; }] }] gRPC The alarm message will be sent through remote gRPC method by Protobuf content type. The message contains key information which are defined in oap-server/server-alarm-plugin/src/main/proto/alarm-hook.proto.\nPart of the protocol looks like this:\nmessage AlarmMessage { int64 scopeId = 1; string scope = 2; string name = 3; string id0 = 4; string id1 = 5; string ruleName = 6; string alarmMessage = 7; int64 startTime = 8; AlarmTags tags = 9;}message AlarmTags { // String key, String value pair.  repeated KeyStringValuePair data = 1;}message KeyStringValuePair { string key = 1; string value = 2;}Slack Chat Follow the Getting Started with Incoming Webhooks guide and create new Webhooks.\nThe alarm message will be sent through HTTP post by application/json content type if you have configured Slack Incoming Webhooks as follows:\nslack:default:is-default:truetext-template:|-{ \u0026#34;type\u0026#34;: \u0026#34;section\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;mrkdwn\u0026#34;, \u0026#34;text\u0026#34;: \u0026#34;:alarm_clock: *Apache Skywalking Alarm* \\n **%s**.\u0026#34; } }webhooks:- https://hooks.slack.com/services/x/y/zWeChat Note that only the WeChat Company Edition (WeCom) supports WebHooks. To use the WeChat WebHook, follow the Wechat Webhooks guide. The alarm message will be sent through HTTP post by application/json content type after you have set up Wechat Webhooks as follows:\nwechat:default:is-default:truetext-template:|-{ \u0026#34;msgtype\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;content\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; } }webhooks:- https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=dummy_keyDingTalk Follow the Dingtalk Webhooks guide and create new Webhooks. You can configure an optional secret for an individual webhook URL for security purposes. The alarm message will be sent through HTTP post by application/json content type if you have configured DingTalk Webhooks as follows:\ndingtalk:default:is-default:truetext-template:|-{ \u0026#34;msgtype\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;content\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; } }webhooks:- url:https://oapi.dingtalk.com/robot/send?access_token=dummy_tokensecret:dummysecretFeishu Follow the Feishu Webhooks guide and create new Webhooks. You can configure an optional secret for an individual webhook URL for security purposes. If you want to direct a text to a user, you can configure ats, which is Feishu\u0026rsquo;s user_id and separated by \u0026ldquo;,\u0026rdquo; . The alarm message will be sent through HTTP post by application/json content type if you have configured Feishu Webhooks as follows:\nfeishu:default:is-default:truetext-template:|-{ \u0026#34;msg_type\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;content\u0026#34;: { \u0026#34;text\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; }, \u0026#34;ats\u0026#34;:\u0026#34;feishu_user_id_1,feishu_user_id_2\u0026#34; }webhooks:- url:https://open.feishu.cn/open-apis/bot/v2/hook/dummy_tokensecret:dummysecretWeLink Follow the WeLink Webhooks guide and create new Webhooks. The alarm message will be sent through HTTP post by application/json content type if you have configured WeLink Webhooks as follows:\nwelink:default:is-default:truetext-template:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;webhooks:# you may find your own client_id and client_secret in your app, below are dummy, need to change.- client-id:\u0026#34;dummy_client_id\u0026#34;client-secret:dummy_secret_keyaccess-token-url:https://open.welink.huaweicloud.com/api/auth/v2/ticketsmessage-url:https://open.welink.huaweicloud.com/api/welinkim/v1/im-service/chat/group-chat# if you send to multi group at a time, separate group_ids with commas, e.g. \u0026#34;123xx\u0026#34;,\u0026#34;456xx\u0026#34;group-ids:\u0026#34;dummy_group_id\u0026#34;# make a name you like for the robot, it will display in grouprobot-name:robotPagerDuty The PagerDuty hook is based on Events API v2.\nFollow the Getting Started section to create an Events API v2 integration on your PagerDuty service and copy the integration key.\nThen configure as follows:\npagerduty:default:is-default:truetext-template:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;integration-keys:- 5c6d805c9dcf4e03d09dfa81e8789ba1You can also configure multiple integration keys.\nDiscord Follow the Discord Webhooks guide and create a new webhook.\nThen configure as follows:\ndiscord:default:is-default:truetext-template:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;webhooks:- url:https://discordapp.com/api/webhooks/1008166889777414645/8e0Am4Zb-YGbBqqbiiq0jSHPTEEaHa4j1vIC-zSSm231T8ewGxgY0_XUYpY-k1nN4HBlusername:robotUpdate the settings dynamically Since 6.5.0, the alerting settings can be updated dynamically at runtime by Dynamic Configuration, which will override the settings in alarm-settings.yml.\nIn order to determine whether an alerting rule is triggered or not, SkyWalking needs to cache the metrics of a time window for each alerting rule. If any attribute (expression, period, etc.) of a rule is changed, the sliding window will be destroyed and re-created, causing the Alarm of this specific rule to restart again.\nKeys with data types of alerting rule configuration file    Alerting element Configuration property key Type Description     Expression expression string MQE expression   Include names include-names string array    Exclude names exclude-names string array    Include names regex include-names-regex string Java regex Pattern   Exclude names regex exclude-names-regex string Java regex Pattern   Tags tags key-value pair    Period Period int    Silence period silence-period int    Message message string    Hooks hooks string array     ","excerpt":"Alerting Alerting mechanism measures system performance according to the metrics of …","ref":"/docs/main/v9.6.0/en/setup/backend/backend-alarm/","title":"Alerting"},{"body":"Alerting Alerting mechanism measures system performance according to the metrics of services/instances/endpoints from different layers. Alerting kernel is an in-memory, time-window based queue.\nThe alerting core is driven by a collection of rules defined in config/alarm-settings.yml. There are three parts to alerting rule definitions.\n alerting rules. They define how metrics alerting should be triggered and what conditions should be considered. hooks. The list of hooks, which should be called after an alerting is triggered.  Entity name Defines the relation between scope and entity name.\n Service: Service name Instance: {Instance name} of {Service name} Endpoint: {Endpoint name} in {Service name} Service Relation: {Source service name} to {Dest service name} Instance Relation: {Source instance name} of {Source service name} to {Dest instance name} of {Dest service name} Endpoint Relation: {Source endpoint name} in {Source Service name} to {Dest endpoint name} in {Dest service name}  Rules An alerting rule is made up of the following elements:\n Rule name. A unique name shown in the alarm message. It must end with _rule. Expression. A MQE expression that defines the conditions of the rule. The result type must be SINGLE_VALUE and the root operation of the expression must be a Compare Operation which provides 1(true) or 0(false) result. When the result is 1(true), the alarm will be triggered. For example, avg(service_resp_time / 1000) \u0026gt; 1 is a valid expression to indicate the request latency is slower than 1s. The typical illegal expressions are  avg(service_resp_time \u0026gt; 1000) + 1 expression root doesn\u0026rsquo;t use Compare Operation service_resp_time \u0026gt; 1000 expression return a TIME_SERIES_VALUES type of values rather than a SINGLE_VALUE value.    The metrics names in the expression could be found in the list of all potential metrics name doc.\n Include names. Entity names that are included in this rule. Please follow the entity name definitions. Exclude names. Entity names that are excluded from this rule. Please follow the entity name definitions. Include names regex. A regex that includes entity names. If both include-name list and include-name regex are set, both rules will take effect. Exclude names regex. A regex that excludes entity names. Both rules will take effect if both include-label list and include-label regex are set. Tags. Tags are key/value pairs that are attached to alarms. Tags are used to specify distinguishing attributes of alarms that are meaningful and relevant to users. If you want to make these tags searchable on the SkyWalking UI, you may set the tag keys in core/default/searchableAlarmTags or through the system environment variable SW_SEARCHABLE_ALARM_TAG_KEYS. The key level is supported by default. Period. The size of metrics cache in minutes for checking the alarm conditions. This is a time window that corresponds to the backend deployment env time. Hooks. Binding the specific names of the hooks when the alarm is triggered. The name format is {hookType}.{hookName} (slack.custom1 e.g.) and must be defined in the hooks section of the alarm-settings.yml file. If the hook name is not specified, the global hook will be used. Silence period. After the alarm is triggered at Time-N (TN), there will be silence during the TN -\u0026gt; TN + period. By default, it works in the same manner as period. The same Alarm (having the same ID in the same metrics name) may only be triggered once within a period.  Such as for a metric, there is a shifting window as following at T7.\n   T1 T2 T3 T4 T5 T6 T7     Value1 Value2 Value3 Value4 Value5 Value6 Value7     Period(Time point T1 ~ T7) are continuous data points for minutes. Notice, alerts are not supported above minute-by-minute periods as they would not be efficient. Values(Value1 ~ Value7) are the values or labeled values for every time point. Expression is calculated based on the metric values(Value1 ~ Value7). For example, expression avg(service_resp_time) \u0026gt; 1000, if the value are 1001, 1001, 1001, 1001, 1001, 1001, 1001, the calculation is ((1001 + 10001 + ... + 1001) / 7) \u0026gt; 1000 and the result would be 1(true). Then the alarm would be triggered. In every minute, the window would shift automatically. At T8, Value8 would be cached, and T1/Value1 would be removed from the window.  NOTE:\n If the expression include labeled metrics and result has multiple labeled value(e.g. sum(service_percentile{_='0,1'} \u0026gt; 1000) \u0026gt;= 3), the alarm will be triggered if any of the labeled value result matches 3 times of the condition(P50 \u0026gt; 1000 or P75 \u0026gt; 1000). One alarm rule is targeting the same entity level, such as service-level expression (avg(service_resp_time) \u0026gt; 1000). Set entity names(Include/Exclude names\u0026hellip;) according to metrics entity levels, do not include different entity levels metrics in the same expression, such as service metrics and endpoint metrics.  rules:# Rule unique name, must be ended with `_rule`.endpoint_percent_rule:# A MQE expression and the root operation of the expression must be a Compare Operation.expression:sum((endpoint_sla / 100) \u0026lt; 75) \u0026gt;= 3# The length of time to evaluate the metricsperiod:10# How many times of checks, the alarm keeps silence after alarm triggered, default as same as period.silence-period:10message:Successful rate of endpoint {name} is lower than 75%tags:level:WARNINGservice_percent_rule:expression:sum((service_sla / 100) \u0026lt; 85) \u0026gt;= 4# [Optional] Default, match all services in this metricsinclude-names:- service_a- service_bexclude-names:- service_cperiod:10message:Service {name} successful rate is less than 85%service_resp_time_percentile_rule:expression:sum(service_percentile{_=\u0026#39;0,1,2,3,4\u0026#39;} \u0026gt; 1000) \u0026gt;= 3period:10silence-period:5message:Percentile response time of service {name} alarm in 3 minutes of last 10 minutes, due to more than one condition of p50 \u0026gt; 1000, p75 \u0026gt; 1000, p90 \u0026gt; 1000, p95 \u0026gt; 1000, p99 \u0026gt; 1000meter_service_status_code_rule:expression:sum(aggregate_labels(meter_status_code{_=\u0026#39;4xx,5xx\u0026#39;},sum) \u0026gt; 10) \u0026gt; 3period:10count:3silence-period:5message:The request number of entity {name} 4xx and 5xx status is more than expected.hooks:- \u0026#34;slack.custom1\u0026#34;- \u0026#34;pagerduty.custom1\u0026#34;comp_rule:expression:(avg(service_sla / 100) \u0026gt; 80) * (avg(service_percentile{_=\u0026#39;0\u0026#39;}) \u0026gt; 1000) == 1period:10message:Service {name} avg successful rate is less than 80% and P50 of avg response time is over 1000ms in last 10 minutes.tags:level:CRITICALhooks:- \u0026#34;slack.default\u0026#34;- \u0026#34;slack.custom1\u0026#34;- \u0026#34;pagerduty.custom1\u0026#34;Default alarm rules For convenience\u0026rsquo;s sake, we have provided a default alarm-setting.yml in our release. It includes the following rules:\n Service average response time over 1s in the last 3 minutes. Service success rate lower than 80% in the last 2 minutes. Percentile of service response time over 1s in the last 3 minutes Service Instance average response time over 1s in the last 2 minutes, and the instance name matches the regex. Endpoint average response time over 1s in the last 2 minutes. Database access average response time over 1s in the last 2 minutes. Endpoint relation average response time over 1s in the last 2 minutes.  List of all potential metrics name The metrics names are defined in the official OAL scripts and MAL scripts.\nCurrently, metrics from the Service, Service Instance, Endpoint, Service Relation, Service Instance Relation, Endpoint Relation scopes could be used in Alarm, and the Database access scope is the same as Service.\nSubmit an issue or a pull request if you want to support any other scopes in Alarm.\nHooks Hooks are a way to send alarm messages to the outside world. SkyWalking supports multiple hooks of the same type, each hook can support different configurations. For example, you can configure two Slack hooks, one named default and set is-default: true means this hook will apply on all Alarm Rules without config hooks. Another named custom1 will only apply on the Alarm Rules which with config hooks and include the name slack.custom1.\nhooks:slack:# default here is just a name, set the field \u0026#39;is-default: true\u0026#39; if this notification hook is expected to be default globally.default:# If true, this hook will apply on all rules, unless a rule has its own specific hook. Could have more than one default hooks in the same hook type.is-default:truetext-template:|-{ \u0026#34;type\u0026#34;: \u0026#34;section\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;mrkdwn\u0026#34;, \u0026#34;text\u0026#34;: \u0026#34;:alarm_clock: *Apache Skywalking Alarm* \\n **%s**.\u0026#34; } }webhooks:- https://hooks.slack.com/services/x/y/zsssscustom1:text-template:|-{ \u0026#34;type\u0026#34;: \u0026#34;section\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;mrkdwn\u0026#34;, \u0026#34;text\u0026#34;: \u0026#34;:alarm_clock: *Apache Skywalking Alarm* \\n **%s**.\u0026#34; } }webhooks:- https://hooks.slack.com/services/x/y/custom1Currently, SkyWalking supports the following hook types:\nWebhook The Webhook requires the peer to be a web container. The alarm message will be sent through HTTP post by application/json content type after you have set up Webhook hooks as follows:\nwebhook:default:is-default:trueurls:- http://ip:port/xxx- http://ip:port/yyyThe JSON format is based on List\u0026lt;org.apache.skywalking.oap.server.core.alarm.AlarmMessage\u0026gt; with the following key information:\n scopeId, scope. All scopes are defined in org.apache.skywalking.oap.server.core.source.DefaultScopeDefine. name. Target scope entity name. Please follow the entity name definitions. id0. The ID of the scope entity that matches with the name. When using the relation scope, it is the source entity ID. id1. When using the relation scope, it is the destination entity ID. Otherwise, it is empty. ruleName. The rule name configured in alarm-settings.yml. alarmMessage. The alarm text message. startTime. The alarm time measured in milliseconds, which occurs between the current time and the midnight of January 1, 1970 UTC. tags. The tags configured in alarm-settings.yml.  See the following example:\n[{ \u0026#34;scopeId\u0026#34;: 1, \u0026#34;scope\u0026#34;: \u0026#34;SERVICE\u0026#34;, \u0026#34;name\u0026#34;: \u0026#34;serviceA\u0026#34;, \u0026#34;id0\u0026#34;: \u0026#34;12\u0026#34;, \u0026#34;id1\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;ruleName\u0026#34;: \u0026#34;service_resp_time_rule\u0026#34;, \u0026#34;alarmMessage\u0026#34;: \u0026#34;alarmMessage xxxx\u0026#34;, \u0026#34;startTime\u0026#34;: 1560524171000, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;WARNING\u0026#34; }] }, { \u0026#34;scopeId\u0026#34;: 1, \u0026#34;scope\u0026#34;: \u0026#34;SERVICE\u0026#34;, \u0026#34;name\u0026#34;: \u0026#34;serviceB\u0026#34;, \u0026#34;id0\u0026#34;: \u0026#34;23\u0026#34;, \u0026#34;id1\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;ruleName\u0026#34;: \u0026#34;service_resp_time_rule\u0026#34;, \u0026#34;alarmMessage\u0026#34;: \u0026#34;alarmMessage yyy\u0026#34;, \u0026#34;startTime\u0026#34;: 1560524171000, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;CRITICAL\u0026#34; }] }] gRPC The alarm message will be sent through remote gRPC method by Protobuf content type after you have set up gRPC hooks as follows:\ngRPC:default:is-default:truetarget-host:iptarget-port:portThe message contains key information which are defined in oap-server/server-alarm-plugin/src/main/proto/alarm-hook.proto.\nPart of the protocol looks like this:\nmessage AlarmMessage { int64 scopeId = 1; string scope = 2; string name = 3; string id0 = 4; string id1 = 5; string ruleName = 6; string alarmMessage = 7; int64 startTime = 8; AlarmTags tags = 9;}message AlarmTags { // String key, String value pair.  repeated KeyStringValuePair data = 1;}message KeyStringValuePair { string key = 1; string value = 2;}Slack Chat Follow the Getting Started with Incoming Webhooks guide and create new Webhooks.\nThe alarm message will be sent through HTTP post by application/json content type if you have configured Slack Incoming Webhooks as follows:\nslack:default:is-default:truetext-template:|-{ \u0026#34;type\u0026#34;: \u0026#34;section\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;mrkdwn\u0026#34;, \u0026#34;text\u0026#34;: \u0026#34;:alarm_clock: *Apache Skywalking Alarm* \\n **%s**.\u0026#34; } }webhooks:- https://hooks.slack.com/services/x/y/zWeChat Note that only the WeChat Company Edition (WeCom) supports WebHooks. To use the WeChat WebHook, follow the Wechat Webhooks guide. The alarm message will be sent through HTTP post by application/json content type after you have set up Wechat Webhooks as follows:\nwechat:default:is-default:truetext-template:|-{ \u0026#34;msgtype\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;content\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; } }webhooks:- https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=dummy_keyDingTalk Follow the Dingtalk Webhooks guide and create new Webhooks. You can configure an optional secret for an individual webhook URL for security purposes. The alarm message will be sent through HTTP post by application/json content type if you have configured DingTalk Webhooks as follows:\ndingtalk:default:is-default:truetext-template:|-{ \u0026#34;msgtype\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;content\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; } }webhooks:- url:https://oapi.dingtalk.com/robot/send?access_token=dummy_tokensecret:dummysecretFeishu Follow the Feishu Webhooks guide and create new Webhooks. You can configure an optional secret for an individual webhook URL for security purposes. If you want to direct a text to a user, you can configure ats, which is Feishu\u0026rsquo;s user_id and separated by \u0026ldquo;,\u0026rdquo; . The alarm message will be sent through HTTP post by application/json content type if you have configured Feishu Webhooks as follows:\nfeishu:default:is-default:truetext-template:|-{ \u0026#34;msg_type\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;content\u0026#34;: { \u0026#34;text\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; }, \u0026#34;ats\u0026#34;:\u0026#34;feishu_user_id_1,feishu_user_id_2\u0026#34; }webhooks:- url:https://open.feishu.cn/open-apis/bot/v2/hook/dummy_tokensecret:dummysecretWeLink Follow the WeLink Webhooks guide and create new Webhooks. The alarm message will be sent through HTTP post by application/json content type if you have configured WeLink Webhooks as follows:\nwelink:default:is-default:truetext-template:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;webhooks:# you may find your own client_id and client_secret in your app, below are dummy, need to change.- client-id:\u0026#34;dummy_client_id\u0026#34;client-secret:dummy_secret_keyaccess-token-url:https://open.welink.huaweicloud.com/api/auth/v2/ticketsmessage-url:https://open.welink.huaweicloud.com/api/welinkim/v1/im-service/chat/group-chat# if you send to multi group at a time, separate group_ids with commas, e.g. \u0026#34;123xx\u0026#34;,\u0026#34;456xx\u0026#34;group-ids:\u0026#34;dummy_group_id\u0026#34;# make a name you like for the robot, it will display in grouprobot-name:robotPagerDuty The PagerDuty hook is based on Events API v2.\nFollow the Getting Started section to create an Events API v2 integration on your PagerDuty service and copy the integration key.\nThen configure as follows:\npagerduty:default:is-default:truetext-template:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;integration-keys:- 5c6d805c9dcf4e03d09dfa81e8789ba1You can also configure multiple integration keys.\nDiscord Follow the Discord Webhooks guide and create a new webhook.\nThen configure as follows:\ndiscord:default:is-default:truetext-template:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;webhooks:- url:https://discordapp.com/api/webhooks/1008166889777414645/8e0Am4Zb-YGbBqqbiiq0jSHPTEEaHa4j1vIC-zSSm231T8ewGxgY0_XUYpY-k1nN4HBlusername:robotUpdate the settings dynamically Since 6.5.0, the alerting settings can be updated dynamically at runtime by Dynamic Configuration, which will override the settings in alarm-settings.yml.\nIn order to determine whether an alerting rule is triggered or not, SkyWalking needs to cache the metrics of a time window for each alerting rule. If any attribute (expression, period, etc.) of a rule is changed, the sliding window will be destroyed and re-created, causing the Alarm of this specific rule to restart again.\nKeys with data types of alerting rule configuration file    Alerting element Configuration property key Type Description     Expression expression string MQE expression   Include names include-names string array    Exclude names exclude-names string array    Include names regex include-names-regex string Java regex Pattern   Exclude names regex exclude-names-regex string Java regex Pattern   Tags tags key-value pair    Period Period int    Silence period silence-period int    Message message string    Hooks hooks string array     ","excerpt":"Alerting Alerting mechanism measures system performance according to the metrics of …","ref":"/docs/main/v9.7.0/en/setup/backend/backend-alarm/","title":"Alerting"},{"body":"ALS Load Balance Using satellite as a load balancer in envoy and OAP can effectively prevent the problem of unbalanced messages received by OAP.\nIn this case, we mainly use memory queues for intermediate data storage.\nDeference Envoy Count, OAP performance could impact the Satellite transmit performance.\n   Envoy Instance Concurrent User ALS OPS Satellite CPU Satellite Memory     150 100 ~50K 1.2C 0.5-1.0G   150 300 ~80K 1.8C 1.0-1.5G   300 100 ~50K 1.4C 0.8-1.2G   300 300 ~100K 2.2C 1.3-2.0G   800 100 ~50K 1.5C 0.9-1.5G   800 300 ~100K 2.6C 1.7-2.7G   1500 100 ~50K 1.7C 1.4-2.4G   1500 300 ~100K 2.7C 2.3-3.0G   2300 150 ~50K 1.8C 1.9-3.1G   2300 300 ~90K 2.5C 2.3-4.0G   2300 500 ~110K 3.2C 2.8-4.7G    Detail Environment Using GKE Environment, helm to build cluster.\n   Module Version Replicate Count CPU Limit Memory Limit Description     OAP 8.9.0 6 12C 32Gi Using ElasticSearch as Storage   Satellite 0.4.0 1 8C 16Gi    ElasticSearch 7.5.1 3 8 16Gi     Setting 800 Envoy, 100K QPS ALS.\n   Module Environment Config Use Value Default Value Description Recommend Value     Satellite SATELLITE_QUEUE_PARTITION 50 4 Support several goroutines concurrently to consume the queue Satellite CPU number * 4-6, It could help improve throughput, but the default value also could handle 800 Envoy Instance and 100K QPS ALS message.   Satellite SATELLITE_QUEUE_EVENT_BUFFER_SIZE 3000 1000 The size of the queue in each concurrency This is related to the number of Envoys. If the number of Envoys is large, it is recommended to increase the value.   Satellite SATELLITE_ENVOY_ALS_V3_PIPE_RECEIVER_FLUSH_TIME 3000 1000 When the Satellite receives the message, how long(millisecond) will the ALS message be merged into an Event. If a certain time delay is accepted, the value can be adjusted larger, which can effectively reduce CPU usage and make the Satellite more stable   Satellite SATELLITE_ENVOY_ALS_V3_PIPE_SENDER_FLUSH_TIME 3000 1000 How long(millisecond) is the memory queue data for each Goroutine to be summarized and sent to OAP This depends on the amount of data in your queue, you can keep it consistent with SATELLITE_ENVOY_ALS_V3_PIPE_RECEIVER_FLUSH_TIME   OAP SW_CORE_GRPC_MAX_CONCURRENT_CALL 50 4 A link between Satellite and OAP, how many requests parallelism is supported Same with SATELLITE_QUEUE_PARTITION in Satellite    ","excerpt":"ALS Load Balance Using satellite as a load balancer in envoy and OAP can effectively prevent the …","ref":"/docs/skywalking-satellite/latest/en/setup/performance/als-load-balance/readme/","title":"ALS Load Balance"},{"body":"ALS Load Balance Using satellite as a load balancer in envoy and OAP can effectively prevent the problem of unbalanced messages received by OAP.\nIn this case, we mainly use memory queues for intermediate data storage.\nDeference Envoy Count, OAP performance could impact the Satellite transmit performance.\n   Envoy Instance Concurrent User ALS OPS Satellite CPU Satellite Memory     150 100 ~50K 1.2C 0.5-1.0G   150 300 ~80K 1.8C 1.0-1.5G   300 100 ~50K 1.4C 0.8-1.2G   300 300 ~100K 2.2C 1.3-2.0G   800 100 ~50K 1.5C 0.9-1.5G   800 300 ~100K 2.6C 1.7-2.7G   1500 100 ~50K 1.7C 1.4-2.4G   1500 300 ~100K 2.7C 2.3-3.0G   2300 150 ~50K 1.8C 1.9-3.1G   2300 300 ~90K 2.5C 2.3-4.0G   2300 500 ~110K 3.2C 2.8-4.7G    Detail Environment Using GKE Environment, helm to build cluster.\n   Module Version Replicate Count CPU Limit Memory Limit Description     OAP 8.9.0 6 12C 32Gi Using ElasticSearch as Storage   Satellite 0.4.0 1 8C 16Gi    ElasticSearch 7.5.1 3 8 16Gi     Setting 800 Envoy, 100K QPS ALS.\n   Module Environment Config Use Value Default Value Description Recommend Value     Satellite SATELLITE_QUEUE_PARTITION 50 4 Support several goroutines concurrently to consume the queue Satellite CPU number * 4-6, It could help improve throughput, but the default value also could handle 800 Envoy Instance and 100K QPS ALS message.   Satellite SATELLITE_QUEUE_EVENT_BUFFER_SIZE 3000 1000 The size of the queue in each concurrency This is related to the number of Envoys. If the number of Envoys is large, it is recommended to increase the value.   Satellite SATELLITE_ENVOY_ALS_V3_PIPE_RECEIVER_FLUSH_TIME 3000 1000 When the Satellite receives the message, how long(millisecond) will the ALS message be merged into an Event. If a certain time delay is accepted, the value can be adjusted larger, which can effectively reduce CPU usage and make the Satellite more stable   Satellite SATELLITE_ENVOY_ALS_V3_PIPE_SENDER_FLUSH_TIME 3000 1000 How long(millisecond) is the memory queue data for each Goroutine to be summarized and sent to OAP This depends on the amount of data in your queue, you can keep it consistent with SATELLITE_ENVOY_ALS_V3_PIPE_RECEIVER_FLUSH_TIME   OAP SW_CORE_GRPC_MAX_CONCURRENT_CALL 50 4 A link between Satellite and OAP, how many requests parallelism is supported Same with SATELLITE_QUEUE_PARTITION in Satellite    ","excerpt":"ALS Load Balance Using satellite as a load balancer in envoy and OAP can effectively prevent the …","ref":"/docs/skywalking-satellite/next/en/setup/performance/als-load-balance/readme/","title":"ALS Load Balance"},{"body":"ALS Load Balance Using satellite as a load balancer in envoy and OAP can effectively prevent the problem of unbalanced messages received by OAP.\nIn this case, we mainly use memory queues for intermediate data storage.\nDeference Envoy Count, OAP performance could impact the Satellite transmit performance.\n   Envoy Instance Concurrent User ALS OPS Satellite CPU Satellite Memory     150 100 ~50K 1.2C 0.5-1.0G   150 300 ~80K 1.8C 1.0-1.5G   300 100 ~50K 1.4C 0.8-1.2G   300 300 ~100K 2.2C 1.3-2.0G   800 100 ~50K 1.5C 0.9-1.5G   800 300 ~100K 2.6C 1.7-2.7G   1500 100 ~50K 1.7C 1.4-2.4G   1500 300 ~100K 2.7C 2.3-3.0G   2300 150 ~50K 1.8C 1.9-3.1G   2300 300 ~90K 2.5C 2.3-4.0G   2300 500 ~110K 3.2C 2.8-4.7G    Detail Environment Using GKE Environment, helm to build cluster.\n   Module Version Replicate Count CPU Limit Memory Limit Description     OAP 8.9.0 6 12C 32Gi Using ElasticSearch as Storage   Satellite 0.4.0 1 8C 16Gi    ElasticSearch 7.5.1 3 8 16Gi     Setting 800 Envoy, 100K QPS ALS.\n   Module Environment Config Use Value Default Value Description Recommend Value     Satellite SATELLITE_QUEUE_PARTITION 50 4 Support several goroutines concurrently to consume the queue Satellite CPU number * 4-6, It could help improve throughput, but the default value also could handle 800 Envoy Instance and 100K QPS ALS message.   Satellite SATELLITE_QUEUE_EVENT_BUFFER_SIZE 3000 1000 The size of the queue in each concurrency This is related to the number of Envoys. If the number of Envoys is large, it is recommended to increase the value.   Satellite SATELLITE_ENVOY_ALS_V3_PIPE_RECEIVER_FLUSH_TIME 3000 1000 When the Satellite receives the message, how long(millisecond) will the ALS message be merged into an Event. If a certain time delay is accepted, the value can be adjusted larger, which can effectively reduce CPU usage and make the Satellite more stable   Satellite SATELLITE_ENVOY_ALS_V3_PIPE_SENDER_FLUSH_TIME 3000 1000 How long(millisecond) is the memory queue data for each Goroutine to be summarized and sent to OAP This depends on the amount of data in your queue, you can keep it consistent with SATELLITE_ENVOY_ALS_V3_PIPE_RECEIVER_FLUSH_TIME   OAP SW_CORE_GRPC_MAX_CONCURRENT_CALL 50 4 A link between Satellite and OAP, how many requests parallelism is supported Same with SATELLITE_QUEUE_PARTITION in Satellite    ","excerpt":"ALS Load Balance Using satellite as a load balancer in envoy and OAP can effectively prevent the …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/performance/als-load-balance/readme/","title":"ALS Load Balance"},{"body":"Analysis Native Streaming Traces and Service Mesh Traffic The traces in SkyWalking native format and Service Mesh Traffic(Access Log in gRPC) are able to be analyzed by OAL, to build metrics of services, service instances and endpoints, and to build topology/dependency of services, service instances and endpoints(traces-oriented analysis only).\nThe spans of traces relative with RPC, such as HTTP, gRPC, Dubbo, RocketMQ, Kafka, would be converted to service input/output traffic, like access logs collected from service mesh. Both of those traffic would be cataloged as the defined sources in the Observability Analysis Language engine.\nThe metrics are customizable through Observability Analysis Language(OAL) scripts, and the topology/dependency is built by the SkyWalking OAP kernel automatically without explicit OAL scripts.\nObservability Analysis Language OAL(Observability Analysis Language) serves to analyze incoming data in streaming mode.\nOAL focuses on metrics in Service, Service Instance and Endpoint. Therefore, the language is easy to learn and use.\nOAL scripts are now found in the /config folder, and users could simply change and reboot the server to run them. However, the OAL script is a compiled language, and the OAL Runtime generates java codes dynamically. Don\u0026rsquo;t expect to mount the changes of those scripts in the runtime. If your OAP servers are running in a cluster mode, these script defined metrics should be aligned.\nYou can open set SW_OAL_ENGINE_DEBUG=Y at system env to see which classes are generated.\nGrammar Scripts should be named *.oal\n// Declare the metrics. METRICS_NAME = from(CAST SCOPE.(* | [FIELD][,FIELD ...])) [.filter(CAST FIELD OP [INT | STRING])] .FUNCTION([PARAM][, PARAM ...]) // Disable hard code disable(METRICS_NAME); From The from statement defines the data source of this OAL expression.\nPrimary SCOPEs are Service, ServiceInstance, Endpoint, ServiceRelation, ServiceInstanceRelation, and EndpointRelation. There are also some secondary scopes which belong to a primary scope.\nSee Scope Definitions, where you can find all existing Scopes and Fields.\nFilter Use filter to build conditions for the value of fields by using field name and expression.\nThe filter expressions run as a chain, generally connected with logic AND. The OPs support ==, !=, \u0026gt;, \u0026lt;, \u0026gt;=, \u0026lt;=, in [...] ,like %..., like ...% , like %...% , contain and not contain, with type detection based on field type. In the event of incompatibility, compile or code generation errors may be triggered.\nAggregation Function The default functions are provided by the SkyWalking OAP core, and it is possible to implement additional functions.\nFunctions provided\n longAvg. The avg of all input per scope entity. The input field must be a long.   instance_jvm_memory_max = from(ServiceInstanceJVMMemory.max).longAvg();\n In this case, the input represents the request of each ServiceInstanceJVMMemory scope, and avg is based on field max.\n doubleAvg. The avg of all input per scope entity. The input field must be a double.   instance_jvm_cpu = from(ServiceInstanceJVMCPU.usePercent).doubleAvg();\n In this case, the input represents the request of each ServiceInstanceJVMCPU scope, and avg is based on field usePercent.\n percent. The number or ratio is expressed as a fraction of 100, where the input matches with the condition.   endpoint_percent = from(Endpoint.*).percent(status == true);\n In this case, all input represents requests of each endpoint, and the condition is endpoint.status == true.\n rate. The rate expressed is as a fraction of 100, where the input matches with the condition.   browser_app_error_rate = from(BrowserAppTraffic.*).rate(trafficCategory == BrowserAppTrafficCategory.FIRST_ERROR, trafficCategory == BrowserAppTrafficCategory.NORMAL);\n In this case, all input represents requests of each browser app traffic, the numerator condition is trafficCategory == BrowserAppTrafficCategory.FIRST_ERROR and denominator condition is trafficCategory == BrowserAppTrafficCategory.NORMAL. Parameter (1) is the numerator condition. Parameter (2) is the denominator condition.\n count. The sum of calls per scope entity.   service_calls_sum = from(Service.*).count();\n In this case, the number of calls of each service.\n histogram. See Heatmap in WIKI.   service_heatmap = from(Service.latency).histogram(100, 20);\n In this case, the thermodynamic heatmap of all incoming requests. Parameter (1) is the precision of latency calculation, such as in the above case, where 113ms and 193ms are considered the same in the 101-200ms group. Parameter (2) is the group amount. In the above case, 21(param value + 1) groups are 0-100ms, 101-200ms, \u0026hellip; 1901-2000ms, 2000+ms\n apdex. See Apdex in WIKI.   service_apdex = from(Service.latency).apdex(name, status);\n In this case, the apdex score of each service. Parameter (1) is the service name, which reflects the Apdex threshold value loaded from service-apdex-threshold.yml in the config folder. Parameter (2) is the status of this request. The status(success/failure) reflects the Apdex calculation.\n p99, p95, p90, p75, p50. See percentile in WIKI.   service_percentile = from(Service.latency).percentile(10);\n percentile is the first multiple-value metric, which has been introduced since 7.0.0. As a metric with multiple values, it could be queried through the getMultipleLinearIntValues GraphQL query. In this case, see p99, p95, p90, p75, and p50 of all incoming requests. The parameter is precise to a latency at p99, such as in the above case, and 120ms and 124ms are considered to produce the same response time.\nIn this case, the p99 value of all incoming requests. The parameter is precise to a latency at p99, such as in the above case, and 120ms and 124ms are considered to produce the same response time.\nMetrics name The metrics name for storage implementor, alarm and query modules. The type inference is supported by core.\nGroup All metrics data will be grouped by Scope.ID and min-level TimeBucket.\n In the Endpoint scope, the Scope.ID is same as the Endpoint ID (i.e. the unique ID based on service and its endpoint).  Cast Fields of source are static type. In some cases, the type required by the filter expression and aggregation function doesn\u0026rsquo;t match the type in the source, such as tag value in the source is String type, most aggregation calculation requires numeric.\nCast expression is provided to do so.\n (str-\u0026gt;long) or (long), cast string type into long. (str-\u0026gt;int) or (int), cast string type into int.  mq_consume_latency = from((str-\u0026gt;long)Service.tag[\u0026quot;transmission.latency\u0026quot;]).longAvg(); // the value of tag is string type. Cast statement is supported in\n From statement. from((cast)source.attre). Filter expression. .filter((cast)tag[\u0026quot;transmission.latency\u0026quot;] \u0026gt; 0) Aggregation function parameter. .longAvg((cast)strField1== 1, (cast)strField2)  Disable Disable is an advanced statement in OAL, which is only used in certain cases. Some of the aggregation and metrics are defined through core hard codes. Examples include segment and top_n_database_statement. This disable statement is designed to render them inactive. By default, none of them are disabled.\nNOTICE, all disable statements should be in oal/disable.oal script file.\nExamples // Calculate p99 of both Endpoint1 and Endpoint2 endpoint_p99 = from(Endpoint.latency).filter(name in (\u0026quot;Endpoint1\u0026quot;, \u0026quot;Endpoint2\u0026quot;)).summary(0.99) // Calculate p99 of Endpoint name started with `serv` serv_Endpoint_p99 = from(Endpoint.latency).filter(name like \u0026quot;serv%\u0026quot;).summary(0.99) // Calculate the avg response time of each Endpoint endpoint_resp_time = from(Endpoint.latency).avg() // Calculate the p50, p75, p90, p95 and p99 of each Endpoint by 50 ms steps. endpoint_percentile = from(Endpoint.latency).percentile(10) // Calculate the percent of response status is true, for each service. endpoint_success = from(Endpoint.*).filter(status == true).percent() // Calculate the sum of response code in [404, 500, 503], for each service. endpoint_abnormal = from(Endpoint.*).filter(httpResponseStatusCode in [404, 500, 503]).count() // Calculate the sum of request type in [RequestType.RPC, RequestType.gRPC], for each service. endpoint_rpc_calls_sum = from(Endpoint.*).filter(type in [RequestType.RPC, RequestType.gRPC]).count() // Calculate the sum of endpoint name in [\u0026quot;/v1\u0026quot;, \u0026quot;/v2\u0026quot;], for each service. endpoint_url_sum = from(Endpoint.*).filter(name in [\u0026quot;/v1\u0026quot;, \u0026quot;/v2\u0026quot;]).count() // Calculate the sum of calls for each service. endpoint_calls = from(Endpoint.*).count() // Calculate the CPM with the GET method for each service.The value is made up with `tagKey:tagValue`. // Option 1, use `tags contain`. service_cpm_http_get = from(Service.*).filter(tags contain \u0026quot;http.method:GET\u0026quot;).cpm() // Option 2, use `tag[key]`. service_cpm_http_get = from(Service.*).filter(tag[\u0026quot;http.method\u0026quot;] == \u0026quot;GET\u0026quot;).cpm(); // Calculate the CPM with the HTTP method except for the GET method for each service.The value is made up with `tagKey:tagValue`. service_cpm_http_other = from(Service.*).filter(tags not contain \u0026quot;http.method:GET\u0026quot;).cpm() disable(segment); disable(endpoint_relation_server_side); disable(top_n_database_statement); ","excerpt":"Analysis Native Streaming Traces and Service Mesh Traffic The traces in SkyWalking native format and …","ref":"/docs/main/latest/en/concepts-and-designs/oal/","title":"Analysis Native Streaming Traces and Service Mesh Traffic"},{"body":"Analysis Native Streaming Traces and Service Mesh Traffic The traces in SkyWalking native format and Service Mesh Traffic(Access Log in gRPC) are able to be analyzed by OAL, to build metrics of services, service instances and endpoints, and to build topology/dependency of services, service instances and endpoints(traces-oriented analysis only).\nThe spans of traces relative with RPC, such as HTTP, gRPC, Dubbo, RocketMQ, Kafka, would be converted to service input/output traffic, like access logs collected from service mesh. Both of those traffic would be cataloged as the defined sources in the Observability Analysis Language engine.\nThe metrics are customizable through Observability Analysis Language(OAL) scripts, and the topology/dependency is built by the SkyWalking OAP kernel automatically without explicit OAL scripts.\nObservability Analysis Language OAL(Observability Analysis Language) serves to analyze incoming data in streaming mode.\nOAL focuses on metrics in Service, Service Instance and Endpoint. Therefore, the language is easy to learn and use.\nOAL scripts are now found in the /config folder, and users could simply change and reboot the server to run them. However, the OAL script is a compiled language, and the OAL Runtime generates java codes dynamically. Don\u0026rsquo;t expect to mount the changes of those scripts in the runtime. If your OAP servers are running in a cluster mode, these script defined metrics should be aligned.\nYou can open set SW_OAL_ENGINE_DEBUG=Y at system env to see which classes are generated.\nGrammar Scripts should be named *.oal\n// Declare the metrics. METRICS_NAME = from(CAST SCOPE.(* | [FIELD][,FIELD ...])) [.filter(CAST FIELD OP [INT | STRING])] .FUNCTION([PARAM][, PARAM ...]) // Disable hard code disable(METRICS_NAME); From The from statement defines the data source of this OAL expression.\nPrimary SCOPEs are Service, ServiceInstance, Endpoint, ServiceRelation, ServiceInstanceRelation, and EndpointRelation. There are also some secondary scopes which belong to a primary scope.\nSee Scope Definitions, where you can find all existing Scopes and Fields.\nFilter Use filter to build conditions for the value of fields by using field name and expression.\nThe filter expressions run as a chain, generally connected with logic AND. The OPs support ==, !=, \u0026gt;, \u0026lt;, \u0026gt;=, \u0026lt;=, in [...] ,like %..., like ...% , like %...% , contain and not contain, with type detection based on field type. In the event of incompatibility, compile or code generation errors may be triggered.\nAggregation Function The default functions are provided by the SkyWalking OAP core, and it is possible to implement additional functions.\nFunctions provided\n longAvg. The avg of all input per scope entity. The input field must be a long.   instance_jvm_memory_max = from(ServiceInstanceJVMMemory.max).longAvg();\n In this case, the input represents the request of each ServiceInstanceJVMMemory scope, and avg is based on field max.\n doubleAvg. The avg of all input per scope entity. The input field must be a double.   instance_jvm_cpu = from(ServiceInstanceJVMCPU.usePercent).doubleAvg();\n In this case, the input represents the request of each ServiceInstanceJVMCPU scope, and avg is based on field usePercent.\n percent. The number or ratio is expressed as a fraction of 100, where the input matches with the condition.   endpoint_percent = from(Endpoint.*).percent(status == true);\n In this case, all input represents requests of each endpoint, and the condition is endpoint.status == true.\n rate. The rate expressed is as a fraction of 100, where the input matches with the condition.   browser_app_error_rate = from(BrowserAppTraffic.*).rate(trafficCategory == BrowserAppTrafficCategory.FIRST_ERROR, trafficCategory == BrowserAppTrafficCategory.NORMAL);\n In this case, all input represents requests of each browser app traffic, the numerator condition is trafficCategory == BrowserAppTrafficCategory.FIRST_ERROR and denominator condition is trafficCategory == BrowserAppTrafficCategory.NORMAL. Parameter (1) is the numerator condition. Parameter (2) is the denominator condition.\n count. The sum of calls per scope entity.   service_calls_sum = from(Service.*).count();\n In this case, the number of calls of each service.\n histogram. See Heatmap in WIKI.   service_heatmap = from(Service.latency).histogram(100, 20);\n In this case, the thermodynamic heatmap of all incoming requests. Parameter (1) is the precision of latency calculation, such as in the above case, where 113ms and 193ms are considered the same in the 101-200ms group. Parameter (2) is the group amount. In the above case, 21(param value + 1) groups are 0-100ms, 101-200ms, \u0026hellip; 1901-2000ms, 2000+ms\n apdex. See Apdex in WIKI.   service_apdex = from(Service.latency).apdex(name, status);\n In this case, the apdex score of each service. Parameter (1) is the service name, which reflects the Apdex threshold value loaded from service-apdex-threshold.yml in the config folder. Parameter (2) is the status of this request. The status(success/failure) reflects the Apdex calculation.\n p99, p95, p90, p75, p50. See percentile in WIKI.   service_percentile = from(Service.latency).percentile2(10);\n percentile (deprecated since 10.0.0) is the first multiple-value metric, which has been introduced since 7.0.0. As a metric with multiple values, it could be queried through the getMultipleLinearIntValues GraphQL query. percentile2 Since 10.0.0, the percentile function has been instead by percentile2. The percentile2 function is a labeled-value metric with default label name p and label values 50,75,90,95,99. In this case, see p99, p95, p90, p75, and p50 of all incoming requests. The parameter is precise to a latency at p99, such as in the above case, and 120ms and 124ms are considered to produce the same response time.\nIn this case, the p99 value of all incoming requests. The parameter is precise to a latency at p99, such as in the above case, and 120ms and 124ms are considered to produce the same response time.\nMetrics name The metrics name for storage implementor, alarm and query modules. The type inference is supported by core.\nGroup All metrics data will be grouped by Scope.ID and min-level TimeBucket.\n In the Endpoint scope, the Scope.ID is same as the Endpoint ID (i.e. the unique ID based on service and its endpoint).  Cast Fields of source are static type. In some cases, the type required by the filter expression and aggregation function doesn\u0026rsquo;t match the type in the source, such as tag value in the source is String type, most aggregation calculation requires numeric.\nCast expression is provided to do so.\n (str-\u0026gt;long) or (long), cast string type into long. (str-\u0026gt;int) or (int), cast string type into int.  mq_consume_latency = from((str-\u0026gt;long)Service.tag[\u0026quot;transmission.latency\u0026quot;]).longAvg(); // the value of tag is string type. Cast statement is supported in\n From statement. from((cast)source.attre). Filter expression. .filter((cast)tag[\u0026quot;transmission.latency\u0026quot;] \u0026gt; 0) Aggregation function parameter. .longAvg((cast)strField1== 1, (cast)strField2)  Disable Disable is an advanced statement in OAL, which is only used in certain cases. Some of the aggregation and metrics are defined through core hard codes. Examples include segment and top_n_database_statement. This disable statement is designed to render them inactive. By default, none of them are disabled.\nNOTICE, all disable statements should be in oal/disable.oal script file.\nExamples // Calculate p99 of both Endpoint1 and Endpoint2 endpoint_p99 = from(Endpoint.latency).filter(name in (\u0026quot;Endpoint1\u0026quot;, \u0026quot;Endpoint2\u0026quot;)).summary(0.99) // Calculate p99 of Endpoint name started with `serv` serv_Endpoint_p99 = from(Endpoint.latency).filter(name like \u0026quot;serv%\u0026quot;).summary(0.99) // Calculate the avg response time of each Endpoint endpoint_resp_time = from(Endpoint.latency).avg() // Calculate the p50, p75, p90, p95 and p99 of each Endpoint by 50 ms steps. endpoint_percentile = from(Endpoint.latency).percentile2(10) // Calculate the percent of response status is true, for each service. endpoint_success = from(Endpoint.*).filter(status == true).percent() // Calculate the sum of response code in [404, 500, 503], for each service. endpoint_abnormal = from(Endpoint.*).filter(httpResponseStatusCode in [404, 500, 503]).count() // Calculate the sum of request type in [RequestType.RPC, RequestType.gRPC], for each service. endpoint_rpc_calls_sum = from(Endpoint.*).filter(type in [RequestType.RPC, RequestType.gRPC]).count() // Calculate the sum of endpoint name in [\u0026quot;/v1\u0026quot;, \u0026quot;/v2\u0026quot;], for each service. endpoint_url_sum = from(Endpoint.*).filter(name in [\u0026quot;/v1\u0026quot;, \u0026quot;/v2\u0026quot;]).count() // Calculate the sum of calls for each service. endpoint_calls = from(Endpoint.*).count() // Calculate the CPM with the GET method for each service.The value is made up with `tagKey:tagValue`. // Option 1, use `tags contain`. service_cpm_http_get = from(Service.*).filter(tags contain \u0026quot;http.method:GET\u0026quot;).cpm() // Option 2, use `tag[key]`. service_cpm_http_get = from(Service.*).filter(tag[\u0026quot;http.method\u0026quot;] == \u0026quot;GET\u0026quot;).cpm(); // Calculate the CPM with the HTTP method except for the GET method for each service.The value is made up with `tagKey:tagValue`. service_cpm_http_other = from(Service.*).filter(tags not contain \u0026quot;http.method:GET\u0026quot;).cpm() disable(segment); disable(endpoint_relation_server_side); disable(top_n_database_statement); ","excerpt":"Analysis Native Streaming Traces and Service Mesh Traffic The traces in SkyWalking native format and …","ref":"/docs/main/next/en/concepts-and-designs/oal/","title":"Analysis Native Streaming Traces and Service Mesh Traffic"},{"body":"Analysis Native Streaming Traces and Service Mesh Traffic The traces in SkyWalking native format and Service Mesh Traffic(Access Log in gRPC) are able to be analyzed by OAL, to build metrics of services, service instances and endpoints, and to build topology/dependency of services, service instances and endpoints(traces-oriented analysis only).\nThe spans of traces relative with RPC, such as HTTP, gRPC, Dubbo, RocketMQ, Kafka, would be converted to service input/output traffic, like access logs collected from service mesh. Both of those traffic would be cataloged as the defined sources in the Observability Analysis Language engine.\nThe metrics are customizable through Observability Analysis Language(OAL) scripts, and the topology/dependency is built by the SkyWalking OAP kernel automatically without explicit OAL scripts.\nObservability Analysis Language OAL(Observability Analysis Language) serves to analyze incoming data in streaming mode.\nOAL focuses on metrics in Service, Service Instance and Endpoint. Therefore, the language is easy to learn and use.\nOAL scripts are now found in the /config folder, and users could simply change and reboot the server to run them. However, the OAL script is a compiled language, and the OAL Runtime generates java codes dynamically. Don\u0026rsquo;t expect to mount the changes of those scripts in the runtime. If your OAP servers are running in a cluster mode, these script defined metrics should be aligned.\nYou can open set SW_OAL_ENGINE_DEBUG=Y at system env to see which classes are generated.\nGrammar Scripts should be named *.oal\n// Declare the metrics. METRICS_NAME = from(CAST SCOPE.(* | [FIELD][,FIELD ...])) [.filter(CAST FIELD OP [INT | STRING])] .FUNCTION([PARAM][, PARAM ...]) // Disable hard code disable(METRICS_NAME); From The from statement defines the data source of this OAL expression.\nPrimary SCOPEs are Service, ServiceInstance, Endpoint, ServiceRelation, ServiceInstanceRelation, and EndpointRelation. There are also some secondary scopes which belong to a primary scope.\nSee Scope Definitions, where you can find all existing Scopes and Fields.\nFilter Use filter to build conditions for the value of fields by using field name and expression.\nThe filter expressions run as a chain, generally connected with logic AND. The OPs support ==, !=, \u0026gt;, \u0026lt;, \u0026gt;=, \u0026lt;=, in [...] ,like %..., like ...% , like %...% , contain and not contain, with type detection based on field type. In the event of incompatibility, compile or code generation errors may be triggered.\nAggregation Function The default functions are provided by the SkyWalking OAP core, and it is possible to implement additional functions.\nFunctions provided\n longAvg. The avg of all input per scope entity. The input field must be a long.   instance_jvm_memory_max = from(ServiceInstanceJVMMemory.max).longAvg();\n In this case, the input represents the request of each ServiceInstanceJVMMemory scope, and avg is based on field max.\n doubleAvg. The avg of all input per scope entity. The input field must be a double.   instance_jvm_cpu = from(ServiceInstanceJVMCPU.usePercent).doubleAvg();\n In this case, the input represents the request of each ServiceInstanceJVMCPU scope, and avg is based on field usePercent.\n percent. The number or ratio is expressed as a fraction of 100, where the input matches with the condition.   endpoint_percent = from(Endpoint.*).percent(status == true);\n In this case, all input represents requests of each endpoint, and the condition is endpoint.status == true.\n rate. The rate expressed is as a fraction of 100, where the input matches with the condition.   browser_app_error_rate = from(BrowserAppTraffic.*).rate(trafficCategory == BrowserAppTrafficCategory.FIRST_ERROR, trafficCategory == BrowserAppTrafficCategory.NORMAL);\n In this case, all input represents requests of each browser app traffic, the numerator condition is trafficCategory == BrowserAppTrafficCategory.FIRST_ERROR and denominator condition is trafficCategory == BrowserAppTrafficCategory.NORMAL. Parameter (1) is the numerator condition. Parameter (2) is the denominator condition.\n count. The sum of calls per scope entity.   service_calls_sum = from(Service.*).count();\n In this case, the number of calls of each service.\n histogram. See Heatmap in WIKI.   service_heatmap = from(Service.latency).histogram(100, 20);\n In this case, the thermodynamic heatmap of all incoming requests. Parameter (1) is the precision of latency calculation, such as in the above case, where 113ms and 193ms are considered the same in the 101-200ms group. Parameter (2) is the group amount. In the above case, 21(param value + 1) groups are 0-100ms, 101-200ms, \u0026hellip; 1901-2000ms, 2000+ms\n apdex. See Apdex in WIKI.   service_apdex = from(Service.latency).apdex(name, status);\n In this case, the apdex score of each service. Parameter (1) is the service name, which reflects the Apdex threshold value loaded from service-apdex-threshold.yml in the config folder. Parameter (2) is the status of this request. The status(success/failure) reflects the Apdex calculation.\n p99, p95, p90, p75, p50. See percentile in WIKI.   service_percentile = from(Service.latency).percentile(10);\n percentile is the first multiple-value metric, which has been introduced since 7.0.0. As a metric with multiple values, it could be queried through the getMultipleLinearIntValues GraphQL query. In this case, see p99, p95, p90, p75, and p50 of all incoming requests. The parameter is precise to a latency at p99, such as in the above case, and 120ms and 124ms are considered to produce the same response time.\nIn this case, the p99 value of all incoming requests. The parameter is precise to a latency at p99, such as in the above case, and 120ms and 124ms are considered to produce the same response time.\nMetrics name The metrics name for storage implementor, alarm and query modules. The type inference is supported by core.\nGroup All metrics data will be grouped by Scope.ID and min-level TimeBucket.\n In the Endpoint scope, the Scope.ID is same as the Endpoint ID (i.e. the unique ID based on service and its endpoint).  Cast Fields of source are static type. In some cases, the type required by the filter expression and aggregation function doesn\u0026rsquo;t match the type in the source, such as tag value in the source is String type, most aggregation calculation requires numeric.\nCast expression is provided to do so.\n (str-\u0026gt;long) or (long), cast string type into long. (str-\u0026gt;int) or (int), cast string type into int.  mq_consume_latency = from((str-\u0026gt;long)Service.tag[\u0026quot;transmission.latency\u0026quot;]).longAvg(); // the value of tag is string type. Cast statement is supported in\n From statement. from((cast)source.attre). Filter expression. .filter((cast)tag[\u0026quot;transmission.latency\u0026quot;] \u0026gt; 0) Aggregation function parameter. .longAvg((cast)strField1== 1, (cast)strField2)  Disable Disable is an advanced statement in OAL, which is only used in certain cases. Some of the aggregation and metrics are defined through core hard codes. Examples include segment and top_n_database_statement. This disable statement is designed to render them inactive. By default, none of them are disabled.\nNOTICE, all disable statements should be in oal/disable.oal script file.\nExamples // Calculate p99 of both Endpoint1 and Endpoint2 endpoint_p99 = from(Endpoint.latency).filter(name in (\u0026quot;Endpoint1\u0026quot;, \u0026quot;Endpoint2\u0026quot;)).summary(0.99) // Calculate p99 of Endpoint name started with `serv` serv_Endpoint_p99 = from(Endpoint.latency).filter(name like \u0026quot;serv%\u0026quot;).summary(0.99) // Calculate the avg response time of each Endpoint endpoint_resp_time = from(Endpoint.latency).avg() // Calculate the p50, p75, p90, p95 and p99 of each Endpoint by 50 ms steps. endpoint_percentile = from(Endpoint.latency).percentile(10) // Calculate the percent of response status is true, for each service. endpoint_success = from(Endpoint.*).filter(status == true).percent() // Calculate the sum of response code in [404, 500, 503], for each service. endpoint_abnormal = from(Endpoint.*).filter(httpResponseStatusCode in [404, 500, 503]).count() // Calculate the sum of request type in [RequestType.RPC, RequestType.gRPC], for each service. endpoint_rpc_calls_sum = from(Endpoint.*).filter(type in [RequestType.RPC, RequestType.gRPC]).count() // Calculate the sum of endpoint name in [\u0026quot;/v1\u0026quot;, \u0026quot;/v2\u0026quot;], for each service. endpoint_url_sum = from(Endpoint.*).filter(name in [\u0026quot;/v1\u0026quot;, \u0026quot;/v2\u0026quot;]).count() // Calculate the sum of calls for each service. endpoint_calls = from(Endpoint.*).count() // Calculate the CPM with the GET method for each service.The value is made up with `tagKey:tagValue`. // Option 1, use `tags contain`. service_cpm_http_get = from(Service.*).filter(tags contain \u0026quot;http.method:GET\u0026quot;).cpm() // Option 2, use `tag[key]`. service_cpm_http_get = from(Service.*).filter(tag[\u0026quot;http.method\u0026quot;] == \u0026quot;GET\u0026quot;).cpm(); // Calculate the CPM with the HTTP method except for the GET method for each service.The value is made up with `tagKey:tagValue`. service_cpm_http_other = from(Service.*).filter(tags not contain \u0026quot;http.method:GET\u0026quot;).cpm() disable(segment); disable(endpoint_relation_server_side); disable(top_n_database_statement); ","excerpt":"Analysis Native Streaming Traces and Service Mesh Traffic The traces in SkyWalking native format and …","ref":"/docs/main/v9.7.0/en/concepts-and-designs/oal/","title":"Analysis Native Streaming Traces and Service Mesh Traffic"},{"body":" COMMUNITY SkyWalking has received contributions from 882 individuals now.   Apache SkyWalking    Application performance monitor tool for distributed systems, especially designed for microservices, cloud native and container-based (Kubernetes) architectures.  Quick start  Live demo  Username: skywalking Password: skywalking Go to native UI    Preview metrics on Grafana        Agent for Service       Topology       Trace       eBPF Profiling       Database       Kubernetes       Linux       Service Mesh          Agent for Service  Topology  Trace  eBPF Profiling  Database  Kubernetes  Linux  Service Mesh    All-in-one APM solution   Distributed Tracing  End-to-end distributed tracing. Service topology analysis, service-centric observability and APIs dashboards.   Agents for your stack  Java, .Net Core, PHP, NodeJS, Golang, LUA, Rust, C++, Client JavaScript and Python agents with active development and maintenance.   eBPF early adoption  Rover agent works as a monitor and profiler powered by eBPF to monitor Kubernetes deployments and diagnose CPU and network performance.    Scaling  100+ billion telemetry data could be collected and analyzed from one SkyWalking cluster.   Mature Telemetry Ecosystems Supported  Metrics, Traces, and Logs from mature ecosystems are supported, e.g. Zipkin, OpenTelemetry, Prometheus, Zabbix, Fluentd   Native APM Database  BanyanDB, an observability database, created in 2022, aims to ingest, analyze and store telemetry/observability data.    Consistent Metrics Aggregation  SkyWalking native meter format and widely known metrics format(OpenCensus, OTLP, Telegraf, Zabbix, e.g.) are processed through the same script pipeline.   Log Management Pipeline  Support log formatting, extract metrics, various sampling policies through script pipeline in high performance.   Alerting and Telemetry Pipelines  Support service-centric, deployment-centric, API-centric alarm rule setting. Support forwarding alarms and all telemetry data to 3rd party.      Events \u0026amp; Blogs  Welcome Zixin Zhou as new committer Mon, Apr 15, 2024 Zixin Zhou(GitHub ID, CodePrometheus[1]) began the code contributions since Oct 28, 2023. Up to …\n   Release Apache SkyWalking Eyes 0.6.0 Fri, Apr 12, 2024 SkyWalking Eyes 0.6.0 is released. Go to downloads page to find release tars. Add | as comment …\n   Release Apache SkyWalking Java Agent 9.2.0 Mon, Apr 1, 2024 SkyWalking Java Agent 9.2.0 is released. Go to downloads page to find release tars. Changes by …\n   Monitoring ActiveMQ through SkyWalking Fri, Apr 19, 2024 Introduction Apache ActiveMQ Classic is a popular and powerful open-source messaging and integration …\n   Monitoring Kubernetes network traffic by using eBPF Mon, Mar 18, 2024 Background Apache SkyWalking is an open-source Application Performance Management system that helps …\n   Monitoring Clickhouse Server through SkyWalking Tue, Mar 12, 2024 Background ClickHouse is an open-source column-oriented database management system that allows …\n    Ready to get started?  Run SkyWalking in a snap Try this demo music application to showcase features of Apache SkyWalking in action.\n Quick start     All releases    Stay tuned with SkyWalking   Questions/bugs? Features requests, questions or report bugs? Feel free to open a discussion or file an issue.\n  Join our slack workspace! Send \"Request to join SkyWalking slack\" mail to dev@skywalking.apache.org. We will invite you in.\n  Follow us on Twitter For announcement of latest features etc, stay tuned with @ASFSkyWalking.    ","excerpt":"COMMUNITY SkyWalking has received contributions from 882 individuals now.   Apache SkyWalking …","ref":"/","title":"Apache SkyWalking"},{"body":"Apache SkyWalking Agent Containerized Scenarios Docker images are not official ASF releases but provided for convenience. Recommended usage is always to build the source\nThis image only hosts the pre-built SkyWalking Java agent jars, and provides some convenient configurations for containerized scenarios.\nHow to use this image Docker FROMapache/skywalking-java-agent:8.5.0-jdk8# ... build your java applicationYou can start your Java application with CMD or ENTRYPOINT, but you don\u0026rsquo;t need to care about the Java options to enable SkyWalking agent, it should be adopted automatically.\nKubernetes Currently, SkyWalking provides two ways to install the java agent on your services on Kubernetes.\n  To use the java agent more natively, you can try the java agent injector to inject the java agent image as a sidecar.\n  If you think it\u0026rsquo;s hard to install the injector, you can also use this java agent image as a sidecar as below.\n  apiVersion:v1kind:Podmetadata:name:agent-as-sidecarspec:restartPolicy:Nevervolumes:- name:skywalking-agentemptyDir:{}initContainers:- name:agent-containerimage:apache/skywalking-java-agent:8.7.0-alpinevolumeMounts:- name:skywalking-agentmountPath:/agentcommand:[\u0026#34;/bin/sh\u0026#34;]args:[\u0026#34;-c\u0026#34;,\u0026#34;cp -R /skywalking/agent /agent/\u0026#34;]containers:- name:app-containerimage:springio/gs-spring-boot-dockervolumeMounts:- name:skywalking-agentmountPath:/skywalkingenv:- name:JAVA_TOOL_OPTIONSvalue:\u0026#34;-javaagent:/skywalking/agent/skywalking-agent.jar\u0026#34;","excerpt":"Apache SkyWalking Agent Containerized Scenarios Docker images are not official ASF releases but …","ref":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/containerization/","title":"Apache SkyWalking Agent Containerized Scenarios"},{"body":"Apache SkyWalking Agent Containerized Scenarios Docker images are not official ASF releases but provided for convenience. Recommended usage is always to build the source\nThis image only hosts the pre-built SkyWalking Java agent jars, and provides some convenient configurations for containerized scenarios.\nHow to use this image Docker FROMapache/skywalking-java-agent:8.5.0-jdk8# ... build your java applicationYou can start your Java application with CMD or ENTRYPOINT, but you don\u0026rsquo;t need to care about the Java options to enable SkyWalking agent, it should be adopted automatically.\nKubernetes Currently, SkyWalking provides two ways to install the java agent on your services on Kubernetes.\n  To use the java agent more natively, you can try the java agent injector to inject the java agent image as a sidecar.\n  If you think it\u0026rsquo;s hard to install the injector, you can also use this java agent image as a sidecar as below.\n  apiVersion:v1kind:Podmetadata:name:agent-as-sidecarspec:restartPolicy:Nevervolumes:- name:skywalking-agentemptyDir:{}initContainers:- name:agent-containerimage:apache/skywalking-java-agent:8.7.0-alpinevolumeMounts:- name:skywalking-agentmountPath:/agentcommand:[\u0026#34;/bin/sh\u0026#34;]args:[\u0026#34;-c\u0026#34;,\u0026#34;cp -R /skywalking/agent /agent/\u0026#34;]containers:- name:app-containerimage:springio/gs-spring-boot-dockervolumeMounts:- name:skywalking-agentmountPath:/skywalkingenv:- name:JAVA_TOOL_OPTIONSvalue:\u0026#34;-javaagent:/skywalking/agent/skywalking-agent.jar\u0026#34;","excerpt":"Apache SkyWalking Agent Containerized Scenarios Docker images are not official ASF releases but …","ref":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/containerization/","title":"Apache SkyWalking Agent Containerized Scenarios"},{"body":"Apache SkyWalking Agent Containerized Scenarios Docker images are not official ASF releases but provided for convenience. Recommended usage is always to build the source\nThis image only hosts the pre-built SkyWalking Java agent jars, and provides some convenient configurations for containerized scenarios.\nHow to use this image Docker FROMapache/skywalking-java-agent:8.5.0-jdk8# ... build your java applicationYou can start your Java application with CMD or ENTRYPOINT, but you don\u0026rsquo;t need to care about the Java options to enable SkyWalking agent, it should be adopted automatically.\nKubernetes Currently, SkyWalking provides two ways to install the java agent on your services on Kubernetes.\n  To use the java agent more natively, you can try the java agent injector to inject the java agent image as a sidecar.\n  If you think it\u0026rsquo;s hard to install the injector, you can also use this java agent image as a sidecar as below.\n  apiVersion:v1kind:Podmetadata:name:agent-as-sidecarspec:restartPolicy:Nevervolumes:- name:skywalking-agentemptyDir:{}initContainers:- name:agent-containerimage:apache/skywalking-java-agent:8.7.0-alpinevolumeMounts:- name:skywalking-agentmountPath:/agentcommand:[\u0026#34;/bin/sh\u0026#34;]args:[\u0026#34;-c\u0026#34;,\u0026#34;cp -R /skywalking/agent /agent/\u0026#34;]containers:- name:app-containerimage:springio/gs-spring-boot-dockervolumeMounts:- name:skywalking-agentmountPath:/skywalkingenv:- name:JAVA_TOOL_OPTIONSvalue:\u0026#34;-javaagent:/skywalking/agent/skywalking-agent.jar\u0026#34;","excerpt":"Apache SkyWalking Agent Containerized Scenarios Docker images are not official ASF releases but …","ref":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/containerization/","title":"Apache SkyWalking Agent Containerized Scenarios"},{"body":"Apache SkyWalking Agent Containerized Scenarios Docker images are not official ASF releases but provided for convenience. Recommended usage is always to build the source\nThis image only hosts the pre-built SkyWalking Java agent jars, and provides some convenient configurations for containerized scenarios.\nHow to use this image Docker FROMapache/skywalking-java-agent:8.5.0-jdk8# ... build your java applicationYou can start your Java application with CMD or ENTRYPOINT, but you don\u0026rsquo;t need to care about the Java options to enable SkyWalking agent, it should be adopted automatically.\nKubernetes Currently, SkyWalking provides two ways to install the java agent on your services on Kubernetes.\n  To use the java agent more natively, you can try the java agent injector to inject the java agent image as a sidecar.\n  If you think it\u0026rsquo;s hard to install the injector, you can also use this java agent image as a sidecar as below.\n  apiVersion:v1kind:Podmetadata:name:agent-as-sidecarspec:restartPolicy:Nevervolumes:- name:skywalking-agentemptyDir:{}initContainers:- name:agent-containerimage:apache/skywalking-java-agent:8.7.0-alpinevolumeMounts:- name:skywalking-agentmountPath:/agentcommand:[\u0026#34;/bin/sh\u0026#34;]args:[\u0026#34;-c\u0026#34;,\u0026#34;cp -R /skywalking/agent /agent/\u0026#34;]containers:- name:app-containerimage:springio/gs-spring-boot-dockervolumeMounts:- name:skywalking-agentmountPath:/skywalkingenv:- name:JAVA_TOOL_OPTIONSvalue:\u0026#34;-javaagent:/skywalking/agent/skywalking-agent.jar\u0026#34;","excerpt":"Apache SkyWalking Agent Containerized Scenarios Docker images are not official ASF releases but …","ref":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/containerization/","title":"Apache SkyWalking Agent Containerized Scenarios"},{"body":"Apache SkyWalking Agent Containerized Scenarios Docker images are not official ASF releases but provided for convenience. Recommended usage is always to build the source\nThis image only hosts the pre-built SkyWalking Java agent jars, and provides some convenient configurations for containerized scenarios.\nHow to use this image Docker FROMapache/skywalking-java-agent:8.5.0-jdk8# ... build your java applicationYou can start your Java application with CMD or ENTRYPOINT, but you don\u0026rsquo;t need to care about the Java options to enable SkyWalking agent, it should be adopted automatically.\nKubernetes Currently, SkyWalking provides two ways to install the java agent on your services on Kubernetes.\n  To use the java agent more natively, you can try the java agent injector to inject the java agent image as a sidecar.\n  If you think it\u0026rsquo;s hard to install the injector, you can also use this java agent image as a sidecar as below.\n  apiVersion:v1kind:Podmetadata:name:agent-as-sidecarspec:restartPolicy:Nevervolumes:- name:skywalking-agentemptyDir:{}initContainers:- name:agent-containerimage:apache/skywalking-java-agent:8.7.0-alpinevolumeMounts:- name:skywalking-agentmountPath:/agentcommand:[\u0026#34;/bin/sh\u0026#34;]args:[\u0026#34;-c\u0026#34;,\u0026#34;cp -R /skywalking/agent /agent/\u0026#34;]containers:- name:app-containerimage:springio/gs-spring-boot-dockervolumeMounts:- name:skywalking-agentmountPath:/skywalkingenv:- name:JAVA_TOOL_OPTIONSvalue:\u0026#34;-javaagent:/skywalking/agent/skywalking-agent.jar\u0026#34;","excerpt":"Apache SkyWalking Agent Containerized Scenarios Docker images are not official ASF releases but …","ref":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/containerization/","title":"Apache SkyWalking Agent Containerized Scenarios"},{"body":"Apache SkyWalking BanyanDB release guide This documentation guides the release manager to release the SkyWalking BanyanDB in the Apache Way, and also helps people to check the release for vote.\nPrerequisites  Close(if finished, or move to next milestone otherwise) all issues in the current milestone from skywalking-banyandb and skywalking, create a new milestone if needed. Update CHANGES.md.  Add your GPG public key to Apache svn   Log in id.apache.org and submit your key fingerprint.\n  Add your GPG public key into SkyWalking GPG KEYS file, you can do this only if you are a PMC member. You can ask a PMC member for help. DO NOT override the existed KEYS file content, only append your key at the end of the file.\n  Build and sign the source code package export VERSION=\u0026lt;the version to release\u0026gt; git clone git@github.com:apache/skywalking-banyandb \u0026amp;\u0026amp; cd skywalking-banyandb git tag -a \u0026#34;v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking BanyanDB $VERSION\u0026#34; git push --tags make clean \u0026amp;\u0026amp; make release-assembly The skywalking-banyandb-${VERSION}-bin.tgz, skywalking-banyandb-${VERSION}-src.tgz, and their corresponding asc, sha512. In total, six files should be automatically generated in the directory.\nUpload to Apache svn svn co https://dist.apache.org/repos/dist/dev/skywalking/ mkdir -p skywalking/banyandb/\u0026#34;$VERSION\u0026#34; cp skywalking-banyandb/build/skywalking-banyandb*.tgz skywalking/banyandb/\u0026#34;$VERSION\u0026#34; cp skywalking-banyandb/build/skywalking-banyandb*.tgz.asc skywalking/banyandb/\u0026#34;$VERSION\u0026#34; cp skywalking-banyandb/build/skywalking-banyandb*.tgz.sha512 skywalking/banyandb/\u0026#34;$VERSION\u0026#34; cd skywalking/banyandb \u0026amp;\u0026amp; svn add \u0026#34;$VERSION\u0026#34; \u0026amp;\u0026amp; svn commit -m \u0026#34;Draft Apache SkyWalking BanyanDB release $VERSION\u0026#34; Call for vote in dev@ mailing list Call for vote in dev@skywalking.apache.org\nSubject: [VOTE] Release Apache SkyWalking BanyanDB version $VERSION Content: Hi the SkyWalking Community: This is a call for vote to release Apache SkyWalking BanyanDB version $VERSION. Release notes: * https://github.com/apache/skywalking-banyandb/blob/v$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/banyandb/$VERSION * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-banyandb-src-x.x.x.tgz - sha512xxxxyyyzzz apache-skywalking-banyandb-bin-x.x.x.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-banyandb/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-banyandb/blob/v$VERSION/docs/installation.md Voting will start now and will remain open for at least 72 hours, all PMC members are required to give their votes. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Thanks. [1] https://github.com/apache/skywalking/blob/master/docs/en/guides/How-to-release.md#vote-check Vote Check All PMC members and committers should check these before voting +1:\n Features test. All artifacts in staging repository are published with .asc, .md5, and sha files. Source codes and distribution packages (apache-skywalking-banyandb-{src,bin}-$VERSION.tgz) are in https://dist.apache.org/repos/dist/dev/skywalking/banyandb/$VERSION with .asc, .sha512. LICENSE and NOTICE are in source codes and distribution package. Check shasum -c apache-skywalking-banyandb-{src,bin}-$VERSION.tgz.sha512. Check GPG signature. Download KEYS and import them by curl https://www.apache.org/dist/skywalking/KEYS -o KEYS \u0026amp;\u0026amp; gpg --import KEYS. Check gpg --batch --verify apache-skywalking-banyandb-{src,bin}-$VERSION.tgz.asc apache-skywalking-banyandb-{src,bin}-$VERSION.tgz Build distribution from source code package by following this the build guide. Licenses header check.  Vote result should follow these:\n  PMC vote is +1 binding, all others is +1 no binding.\n  Within 72 hours, you get at least 3 (+1 binding), and have more +1 than -1. Vote pass.\n  Send the closing vote mail to announce the result. When count the binding and no binding votes, please list the names of voters. An example like this:\n[RESULT][VOTE] Release Apache SkyWalking BanyanDB version $VERSION 3 days passed, we’ve got ($NUMBER) +1 bindings: xxx xxx xxx ... (list names) I’ll continue the release process.   Publish release   Move source codes tar balls and distributions to https://dist.apache.org/repos/dist/release/skywalking/, you can do this only if you are a PMC member.\nexport SVN_EDITOR=vim svn mv https://dist.apache.org/repos/dist/dev/skywalking/banyandb/$VERSION https://dist.apache.org/repos/dist/release/skywalking/banyandb # .... # enter your apache password # ....   Remove last released tar balls from https://dist.apache.org/repos/dist/release/skywalking\n  Refer to the previous PR, update news and links on the website. There are seven files need to modify.\n  Update Github release page, follow the previous convention.\n  Send ANNOUNCE email to dev@skywalking.apache.org and announce@apache.org, the sender should use his/her Apache email account. You can get the permlink of vote thread at here.\nSubject: [ANNOUNCEMENT] Apache SkyWalking BanyanDB $VERSION Released Content: Hi the SkyWalking Community On behalf of the SkyWalking Team, I’m glad to announce that SkyWalking BanyanDB $VERSION is now released. SkyWalking BanyanDB: An observability database, aims to ingest, analyze and store Metrics, Tracing and Logging data. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. Vote Thread: $VOTE_THREAD_PERMALINK Download Links: https://skywalking.apache.org/downloads/ Release Notes : https://github.com/apache/skywalking-banyandb/blob/v$VERSION/CHANGES.md Website: https://skywalking.apache.org/ SkyWalking BanyanDB Resources: - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Documents: https://github.com/apache/skywalking-banyandb/blob/v$VERSION/README.md The Apache SkyWalking Team   ","excerpt":"Apache SkyWalking BanyanDB release guide This documentation guides the release manager to release …","ref":"/docs/skywalking-banyandb/latest/release/","title":"Apache SkyWalking BanyanDB release guide"},{"body":"Apache SkyWalking BanyanDB release guide This documentation guides the release manager to release the SkyWalking BanyanDB in the Apache Way, and also helps people to check the release for vote.\nPrerequisites  Close(if finished, or move to next milestone otherwise) all issues in the current milestone from skywalking-banyandb and skywalking, create a new milestone if needed. Update CHANGES.md.  Add your GPG public key to Apache svn   Log in id.apache.org and submit your key fingerprint.\n  Add your GPG public key into SkyWalking GPG KEYS file, you can do this only if you are a PMC member. You can ask a PMC member for help. DO NOT override the existed KEYS file content, only append your key at the end of the file.\n  Build and sign the source code package export VERSION=\u0026lt;the version to release\u0026gt; git clone git@github.com:apache/skywalking-banyandb \u0026amp;\u0026amp; cd skywalking-banyandb git tag -a \u0026#34;v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking BanyanDB $VERSION\u0026#34; git push --tags make clean \u0026amp;\u0026amp; make release-assembly The skywalking-banyandb-${VERSION}-bin.tgz, skywalking-banyandb-${VERSION}-src.tgz, and their corresponding asc, sha512. In total, six files should be automatically generated in the directory.\nUpload to Apache svn svn co https://dist.apache.org/repos/dist/dev/skywalking/ mkdir -p skywalking/banyandb/\u0026#34;$VERSION\u0026#34; cp skywalking-banyandb/build/skywalking-banyandb*.tgz skywalking/banyandb/\u0026#34;$VERSION\u0026#34; cp skywalking-banyandb/build/skywalking-banyandb*.tgz.asc skywalking/banyandb/\u0026#34;$VERSION\u0026#34; cp skywalking-banyandb/build/skywalking-banyandb*.tgz.sha512 skywalking/banyandb/\u0026#34;$VERSION\u0026#34; cd skywalking/banyandb \u0026amp;\u0026amp; svn add \u0026#34;$VERSION\u0026#34; \u0026amp;\u0026amp; svn commit -m \u0026#34;Draft Apache SkyWalking BanyanDB release $VERSION\u0026#34; Call for vote in dev@ mailing list Call for vote in dev@skywalking.apache.org\nSubject: [VOTE] Release Apache SkyWalking BanyanDB version $VERSION Content: Hi the SkyWalking Community: This is a call for vote to release Apache SkyWalking BanyanDB version $VERSION. Release notes: * https://github.com/apache/skywalking-banyandb/blob/v$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/banyandb/$VERSION * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-banyandb-src-x.x.x.tgz - sha512xxxxyyyzzz apache-skywalking-banyandb-bin-x.x.x.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-banyandb/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-banyandb/blob/v$VERSION/docs/installation/binaries.md#Build-From-Source Voting will start now and will remain open for at least 72 hours, all PMC members are required to give their votes. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Thanks. [1] https://github.com/apache/skywalking/blob/master/docs/en/guides/How-to-release.md#vote-check Vote Check All PMC members and committers should check these before voting +1:\n Features test. All artifacts in staging repository are published with .asc, .md5, and sha files. Source codes and distribution packages (apache-skywalking-banyandb-{src,bin}-$VERSION.tgz) are in https://dist.apache.org/repos/dist/dev/skywalking/banyandb/$VERSION with .asc, .sha512. LICENSE and NOTICE are in source codes and distribution package. Check shasum -c apache-skywalking-banyandb-{src,bin}-$VERSION.tgz.sha512. Check GPG signature. Download KEYS and import them by curl https://www.apache.org/dist/skywalking/KEYS -o KEYS \u0026amp;\u0026amp; gpg --import KEYS. Check gpg --batch --verify apache-skywalking-banyandb-{src,bin}-$VERSION.tgz.asc apache-skywalking-banyandb-{src,bin}-$VERSION.tgz Build distribution from source code package by following this the build guide. Licenses header check.  Vote result should follow these:\n  PMC vote is +1 binding, all others is +1 no binding.\n  Within 72 hours, you get at least 3 (+1 binding), and have more +1 than -1. Vote pass.\n  Send the closing vote mail to announce the result. When count the binding and no binding votes, please list the names of voters. An example like this:\n[RESULT][VOTE] Release Apache SkyWalking BanyanDB version $VERSION 3 days passed, we’ve got ($NUMBER) +1 bindings: xxx xxx xxx ... (list names) I’ll continue the release process.   Publish release   Move source codes tar balls and distributions to https://dist.apache.org/repos/dist/release/skywalking/, you can do this only if you are a PMC member.\nexport SVN_EDITOR=vim svn mv https://dist.apache.org/repos/dist/dev/skywalking/banyandb/$VERSION https://dist.apache.org/repos/dist/release/skywalking/banyandb # .... # enter your apache password # ....   Remove last released tar balls from https://dist.apache.org/repos/dist/release/skywalking\n  Refer to the previous PR, update news and links on the website. There are seven files need to modify.\n  Update Github release page, follow the previous convention.\n  Send ANNOUNCE email to dev@skywalking.apache.org and announce@apache.org, the sender should use his/her Apache email account. You can get the permlink of vote thread at here.\nSubject: [ANNOUNCEMENT] Apache SkyWalking BanyanDB $VERSION Released Content: Hi the SkyWalking Community On behalf of the SkyWalking Team, I’m glad to announce that SkyWalking BanyanDB $VERSION is now released. SkyWalking BanyanDB: An observability database, aims to ingest, analyze and store Metrics, Tracing and Logging data. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. Vote Thread: $VOTE_THREAD_PERMALINK Download Links: https://skywalking.apache.org/downloads/ Release Notes : https://github.com/apache/skywalking-banyandb/blob/v$VERSION/CHANGES.md Website: https://skywalking.apache.org/ SkyWalking BanyanDB Resources: - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Documents: https://github.com/apache/skywalking-banyandb/blob/v$VERSION/README.md The Apache SkyWalking Team   ","excerpt":"Apache SkyWalking BanyanDB release guide This documentation guides the release manager to release …","ref":"/docs/skywalking-banyandb/next/release/","title":"Apache SkyWalking BanyanDB release guide"},{"body":"Apache SkyWalking BanyanDB release guide This documentation guides the release manager to release the SkyWalking BanyanDB in the Apache Way, and also helps people to check the release for vote.\nPrerequisites  Close(if finished, or move to next milestone otherwise) all issues in the current milestone from skywalking-banyandb and skywalking, create a new milestone if needed. Update CHANGES.md.  Add your GPG public key to Apache svn   Log in id.apache.org and submit your key fingerprint.\n  Add your GPG public key into SkyWalking GPG KEYS file, you can do this only if you are a PMC member. You can ask a PMC member for help. DO NOT override the existed KEYS file content, only append your key at the end of the file.\n  Build and sign the source code package export VERSION=\u0026lt;the version to release\u0026gt; git clone git@github.com:apache/skywalking-banyandb \u0026amp;\u0026amp; cd skywalking-banyandb git tag -a \u0026#34;v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking BanyanDB $VERSION\u0026#34; git push --tags make clean \u0026amp;\u0026amp; make release-assembly The skywalking-banyandb-${VERSION}-bin.tgz, skywalking-banyandb-${VERSION}-src.tgz, and their corresponding asc, sha512. In total, six files should be automatically generated in the directory.\nUpload to Apache svn svn co https://dist.apache.org/repos/dist/dev/skywalking/ mkdir -p skywalking/banyandb/\u0026#34;$VERSION\u0026#34; cp skywalking-banyandb/build/skywalking-banyandb*.tgz skywalking/banyandb/\u0026#34;$VERSION\u0026#34; cp skywalking-banyandb/build/skywalking-banyandb*.tgz.asc skywalking/banyandb/\u0026#34;$VERSION\u0026#34; cp skywalking-banyandb/build/skywalking-banyandb*.tgz.sha512 skywalking/banyandb/\u0026#34;$VERSION\u0026#34; cd skywalking/banyandb \u0026amp;\u0026amp; svn add \u0026#34;$VERSION\u0026#34; \u0026amp;\u0026amp; svn commit -m \u0026#34;Draft Apache SkyWalking BanyanDB release $VERSION\u0026#34; Call for vote in dev@ mailing list Call for vote in dev@skywalking.apache.org\nSubject: [VOTE] Release Apache SkyWalking BanyanDB version $VERSION Content: Hi the SkyWalking Community: This is a call for vote to release Apache SkyWalking BanyanDB version $VERSION. Release notes: * https://github.com/apache/skywalking-banyandb/blob/v$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/banyandb/$VERSION * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-banyandb-src-x.x.x.tgz - sha512xxxxyyyzzz apache-skywalking-banyandb-bin-x.x.x.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-banyandb/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-banyandb/blob/v$VERSION/docs/installation.md Voting will start now and will remain open for at least 72 hours, all PMC members are required to give their votes. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Thanks. [1] https://github.com/apache/skywalking/blob/master/docs/en/guides/How-to-release.md#vote-check Vote Check All PMC members and committers should check these before voting +1:\n Features test. All artifacts in staging repository are published with .asc, .md5, and sha files. Source codes and distribution packages (apache-skywalking-banyandb-{src,bin}-$VERSION.tgz) are in https://dist.apache.org/repos/dist/dev/skywalking/banyandb/$VERSION with .asc, .sha512. LICENSE and NOTICE are in source codes and distribution package. Check shasum -c apache-skywalking-banyandb-{src,bin}-$VERSION.tgz.sha512. Check GPG signature. Download KEYS and import them by curl https://www.apache.org/dist/skywalking/KEYS -o KEYS \u0026amp;\u0026amp; gpg --import KEYS. Check gpg --batch --verify apache-skywalking-banyandb-{src,bin}-$VERSION.tgz.asc apache-skywalking-banyandb-{src,bin}-$VERSION.tgz Build distribution from source code package by following this the build guide. Licenses header check.  Vote result should follow these:\n  PMC vote is +1 binding, all others is +1 no binding.\n  Within 72 hours, you get at least 3 (+1 binding), and have more +1 than -1. Vote pass.\n  Send the closing vote mail to announce the result. When count the binding and no binding votes, please list the names of voters. An example like this:\n[RESULT][VOTE] Release Apache SkyWalking BanyanDB version $VERSION 3 days passed, we’ve got ($NUMBER) +1 bindings: xxx xxx xxx ... (list names) I’ll continue the release process.   Publish release   Move source codes tar balls and distributions to https://dist.apache.org/repos/dist/release/skywalking/, you can do this only if you are a PMC member.\nexport SVN_EDITOR=vim svn mv https://dist.apache.org/repos/dist/dev/skywalking/banyandb/$VERSION https://dist.apache.org/repos/dist/release/skywalking/banyandb # .... # enter your apache password # ....   Remove last released tar balls from https://dist.apache.org/repos/dist/release/skywalking\n  Refer to the previous PR, update news and links on the website. There are seven files need to modify.\n  Update Github release page, follow the previous convention.\n  Send ANNOUNCE email to dev@skywalking.apache.org and announce@apache.org, the sender should use his/her Apache email account. You can get the permlink of vote thread at here.\nSubject: [ANNOUNCEMENT] Apache SkyWalking BanyanDB $VERSION Released Content: Hi the SkyWalking Community On behalf of the SkyWalking Team, I’m glad to announce that SkyWalking BanyanDB $VERSION is now released. SkyWalking BanyanDB: An observability database, aims to ingest, analyze and store Metrics, Tracing and Logging data. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. Vote Thread: $VOTE_THREAD_PERMALINK Download Links: https://skywalking.apache.org/downloads/ Release Notes : https://github.com/apache/skywalking-banyandb/blob/v$VERSION/CHANGES.md Website: https://skywalking.apache.org/ SkyWalking BanyanDB Resources: - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Documents: https://github.com/apache/skywalking-banyandb/blob/v$VERSION/README.md The Apache SkyWalking Team   ","excerpt":"Apache SkyWalking BanyanDB release guide This documentation guides the release manager to release …","ref":"/docs/skywalking-banyandb/v0.5.0/release/","title":"Apache SkyWalking BanyanDB release guide"},{"body":"Apache SkyWalking Cloud on Kubernetes release guide This documentation guides the release manager to release the SkyWalking Cloud on Kubernetes in the Apache Way, and also helps people to check the release for vote.\nPrerequisites  Close(if finished, or move to next milestone otherwise) all issues in the current milestone from skywalking-swck and skywalking, create a new milestone if needed. Update CHANGES.md. Update image tags of adapter and operator.  Add your GPG public key to Apache svn   Log in id.apache.org and submit your key fingerprint.\n  Add your GPG public key into SkyWalking GPG KEYS file, you can do this only if you are a PMC member. You can ask a PMC member for help. DO NOT override the existed KEYS file content, only append your key at the end of the file.\n  Build and sign the source code package export VERSION=\u0026lt;the version to release\u0026gt; git clone git@github.com:apache/skywalking-swck \u0026amp;\u0026amp; cd skywalking-swck git tag -a \u0026#34;v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking Cloud on Kubernetes $VERSION\u0026#34; git push --tags make clean \u0026amp;\u0026amp; make release The skywalking-swck-${VERSION}-bin.tgz, skywalking-swck-${VERSION}-src.tgz, and their corresponding asc, sha512. In total, six files should be automatically generated in the directory.\nUpload to Apache svn svn co https://dist.apache.org/repos/dist/dev/skywalking/ mkdir -p skywalking/swck/\u0026#34;$VERSION\u0026#34; cp skywalking-swck/build/release/skywalking-swck*.tgz skywalking/swck/\u0026#34;$VERSION\u0026#34; cp skywalking-swck/build/release/skywalking-swck*.tgz.asc skywalking/swck/\u0026#34;$VERSION\u0026#34; cp skywalking-swck/build/release/skywalking-swck*.tgz.sha512 skywalking/swck/\u0026#34;$VERSION\u0026#34; cd skywalking/swck \u0026amp;\u0026amp; svn add \u0026#34;$VERSION\u0026#34; \u0026amp;\u0026amp; svn commit -m \u0026#34;Draft Apache SkyWalking-SWCK release $VERSION\u0026#34; Make the internal announcement Send an announcement email to dev@ mailing list.\nSubject: [ANNOUNCEMENT] SkyWalking Cloud on Kubernetes $VERSION test build available Content: The test build of SkyWalking Cloud on Kubernetes $VERSION is now available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking-swck/blob/$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/swck/$VERSION * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-swck-bin-x.x.x.tgz - sha512xxxxyyyzzz apache-skywalking-swck-src-x.x.x.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-swck/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-swck/blob/$VERSION/docs/operator.md#build-from-sources * https://github.com/apache/skywalking-swck/blob/$VERSION/docs/custom-metrics-adapter.md#use-kustomize-to-customise-your-deployment * https://github.com/apache/skywalking-swck/blob/$VERSION/docs/release.md A vote regarding the quality of this test build will be initiated within the next couple of days. Wait at least 48 hours for test responses Any PMC, committer or contributor can test features for releasing, and feedback. Based on that, PMC will decide whether to start a vote or not.\nCall for vote in dev@ mailing list Call for vote in dev@skywalking.apache.org\nSubject: [VOTE] Release Apache SkyWalking Cloud on Kubernetes version $VERSION Content: Hi the SkyWalking Community: This is a call for vote to release Apache SkyWalking Cloud on Kubernetes version $VERSION. Release notes: * https://github.com/apache/skywalking-swck/blob/$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/swck/$VERSION * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-swck-src-x.x.x.tgz - sha512xxxxyyyzzz apache-skywalking-swck-bin-x.x.x.tgz Release Tag : * (Git Tag) $VERSION Release Commit Hash : * https://github.com/apache/skywalking-swck/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-swck/blob/$VERSION/docs/release.md Voting will start now and will remain open for at least 72 hours, all PMC members are required to give their votes. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Thanks. [1] https://github.com/apache/skywalking/blob/master/docs/en/guides/How-to-release.md#vote-check Vote Check All PMC members and committers should check these before voting +1:\n Features test. All artifacts in staging repository are published with .asc, .md5, and sha files. Source codes and distribution packages (apache-skywalking-swck-{src,bin}-$VERSION.tgz) are in https://dist.apache.org/repos/dist/dev/skywalking/swck/$VERSION with .asc, .sha512. LICENSE and NOTICE are in source codes and distribution package. Check shasum -c apache-skywalking-swck-{src,bin}-$VERSION.tgz.sha512. Check GPG signature. Download KEYS and import them by curl https://www.apache.org/dist/skywalking/KEYS -o KEYS \u0026amp;\u0026amp; gpg --import KEYS. Check gpg --batch --verify apache-skywalking-swck-{src,bin}-$VERSION.tgz.asc apache-skywalking-swck-{src,bin}-$VERSION.tgz Build distribution from source code package by following this the build guide. Licenses header check.  Vote result should follow these:\n  PMC vote is +1 binding, all others is +1 no binding.\n  Within 72 hours, you get at least 3 (+1 binding), and have more +1 than -1. Vote pass.\n  Send the closing vote mail to announce the result. When count the binding and no binding votes, please list the names of voters. An example like this:\n[RESULT][VOTE] Release Apache SkyWalking Cloud on Kubernetes version $VERSION 3 days passed, we’ve got ($NUMBER) +1 bindings: xxx xxx xxx ... (list names) I’ll continue the release process.   Publish release   Move source codes tar balls and distributions to https://dist.apache.org/repos/dist/release/skywalking/, you can do this only if you are a PMC member.\nexport SVN_EDITOR=vim svn mv https://dist.apache.org/repos/dist/dev/skywalking/swck/$VERSION https://dist.apache.org/repos/dist/release/skywalking/swck # .... # enter your apache password # ....   Remove last released tar balls from https://dist.apache.org/repos/dist/release/skywalking\n  Refer to the previous PR, update news and links on the website. There are seven files need to modify.\n  Update Github release page, follow the previous convention.\n  Send ANNOUNCE email to dev@skywalking.apache.org and announce@apache.org, the sender should use his/her Apache email account. You can get the permlink of vote thread at here.\nSubject: [ANNOUNCEMENT] Apache SkyWalking Cloud on Kubernetes $VERSION Released Content: Hi the SkyWalking Community On behalf of the SkyWalking Team, I’m glad to announce that SkyWalking Cloud on Kubernetes $VERSION is now released. SkyWalking Cloud on Kubernetes: A bridge platform between Apache SkyWalking and Kubernetes. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. Vote Thread: $VOTE_THREAD_PERMALINK Download Links: https://skywalking.apache.org/downloads/ Release Notes : https://github.com/apache/skywalking-swck/blob/$VERSION/CHANGES.md Website: https://skywalking.apache.org/ SkyWalking Cloud on Kubernetes Resources: - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Documents: https://github.com/apache/skywalking-swck/blob/$VERSION/README.md The Apache SkyWalking Team   ","excerpt":"Apache SkyWalking Cloud on Kubernetes release guide This documentation guides the release manager to …","ref":"/docs/skywalking-swck/latest/release/","title":"Apache SkyWalking Cloud on Kubernetes release guide"},{"body":"Apache SkyWalking Cloud on Kubernetes release guide This documentation guides the release manager to release the SkyWalking Cloud on Kubernetes in the Apache Way, and also helps people to check the release for vote.\nPrerequisites  Close(if finished, or move to next milestone otherwise) all issues in the current milestone from skywalking-swck and skywalking, create a new milestone if needed. Update CHANGES.md. Update image tags of adapter and operator.  Add your GPG public key to Apache svn   Log in id.apache.org and submit your key fingerprint.\n  Add your GPG public key into SkyWalking GPG KEYS file, you can do this only if you are a PMC member. You can ask a PMC member for help. DO NOT override the existed KEYS file content, only append your key at the end of the file.\n  Build and sign the source code package export VERSION=\u0026lt;the version to release\u0026gt; git clone git@github.com:apache/skywalking-swck \u0026amp;\u0026amp; cd skywalking-swck git tag -a \u0026#34;v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking Cloud on Kubernetes $VERSION\u0026#34; git push --tags make clean \u0026amp;\u0026amp; make release The skywalking-swck-${VERSION}-bin.tgz, skywalking-swck-${VERSION}-src.tgz, and their corresponding asc, sha512. In total, six files should be automatically generated in the directory.\nUpload to Apache svn svn co https://dist.apache.org/repos/dist/dev/skywalking/ mkdir -p skywalking/swck/\u0026#34;$VERSION\u0026#34; cp skywalking-swck/build/release/skywalking-swck*.tgz skywalking/swck/\u0026#34;$VERSION\u0026#34; cp skywalking-swck/build/release/skywalking-swck*.tgz.asc skywalking/swck/\u0026#34;$VERSION\u0026#34; cp skywalking-swck/build/release/skywalking-swck*.tgz.sha512 skywalking/swck/\u0026#34;$VERSION\u0026#34; cd skywalking/swck \u0026amp;\u0026amp; svn add \u0026#34;$VERSION\u0026#34; \u0026amp;\u0026amp; svn commit -m \u0026#34;Draft Apache SkyWalking-SWCK release $VERSION\u0026#34; Make the internal announcement Send an announcement email to dev@ mailing list.\nSubject: [ANNOUNCEMENT] SkyWalking Cloud on Kubernetes $VERSION test build available Content: The test build of SkyWalking Cloud on Kubernetes $VERSION is now available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking-swck/blob/$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/swck/$VERSION * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-swck-bin-x.x.x.tgz - sha512xxxxyyyzzz apache-skywalking-swck-src-x.x.x.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-swck/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-swck/blob/$VERSION/docs/operator.md#build-from-sources * https://github.com/apache/skywalking-swck/blob/$VERSION/docs/custom-metrics-adapter.md#use-kustomize-to-customise-your-deployment * https://github.com/apache/skywalking-swck/blob/$VERSION/docs/release.md A vote regarding the quality of this test build will be initiated within the next couple of days. Wait at least 48 hours for test responses Any PMC, committer or contributor can test features for releasing, and feedback. Based on that, PMC will decide whether to start a vote or not.\nCall for vote in dev@ mailing list Call for vote in dev@skywalking.apache.org\nSubject: [VOTE] Release Apache SkyWalking Cloud on Kubernetes version $VERSION Content: Hi the SkyWalking Community: This is a call for vote to release Apache SkyWalking Cloud on Kubernetes version $VERSION. Release notes: * https://github.com/apache/skywalking-swck/blob/$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/swck/$VERSION * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-swck-src-x.x.x.tgz - sha512xxxxyyyzzz apache-skywalking-swck-bin-x.x.x.tgz Release Tag : * (Git Tag) $VERSION Release Commit Hash : * https://github.com/apache/skywalking-swck/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-swck/blob/$VERSION/docs/release.md Voting will start now and will remain open for at least 72 hours, all PMC members are required to give their votes. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Thanks. [1] https://github.com/apache/skywalking/blob/master/docs/en/guides/How-to-release.md#vote-check Vote Check All PMC members and committers should check these before voting +1:\n Features test. All artifacts in staging repository are published with .asc, .md5, and sha files. Source codes and distribution packages (apache-skywalking-swck-{src,bin}-$VERSION.tgz) are in https://dist.apache.org/repos/dist/dev/skywalking/swck/$VERSION with .asc, .sha512. LICENSE and NOTICE are in source codes and distribution package. Check shasum -c apache-skywalking-swck-{src,bin}-$VERSION.tgz.sha512. Check GPG signature. Download KEYS and import them by curl https://www.apache.org/dist/skywalking/KEYS -o KEYS \u0026amp;\u0026amp; gpg --import KEYS. Check gpg --batch --verify apache-skywalking-swck-{src,bin}-$VERSION.tgz.asc apache-skywalking-swck-{src,bin}-$VERSION.tgz Build distribution from source code package by following this the build guide. Licenses header check.  Vote result should follow these:\n  PMC vote is +1 binding, all others is +1 no binding.\n  Within 72 hours, you get at least 3 (+1 binding), and have more +1 than -1. Vote pass.\n  Send the closing vote mail to announce the result. When count the binding and no binding votes, please list the names of voters. An example like this:\n[RESULT][VOTE] Release Apache SkyWalking Cloud on Kubernetes version $VERSION 3 days passed, we’ve got ($NUMBER) +1 bindings: xxx xxx xxx ... (list names) I’ll continue the release process.   Publish release   Move source codes tar balls and distributions to https://dist.apache.org/repos/dist/release/skywalking/, you can do this only if you are a PMC member.\nexport SVN_EDITOR=vim svn mv https://dist.apache.org/repos/dist/dev/skywalking/swck/$VERSION https://dist.apache.org/repos/dist/release/skywalking/swck # .... # enter your apache password # ....   Remove last released tar balls from https://dist.apache.org/repos/dist/release/skywalking\n  Refer to the previous PR, update news and links on the website. There are seven files need to modify.\n  Update Github release page, follow the previous convention.\n  Send ANNOUNCE email to dev@skywalking.apache.org and announce@apache.org, the sender should use his/her Apache email account. You can get the permlink of vote thread at here.\nSubject: [ANNOUNCEMENT] Apache SkyWalking Cloud on Kubernetes $VERSION Released Content: Hi the SkyWalking Community On behalf of the SkyWalking Team, I’m glad to announce that SkyWalking Cloud on Kubernetes $VERSION is now released. SkyWalking Cloud on Kubernetes: A bridge platform between Apache SkyWalking and Kubernetes. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. Vote Thread: $VOTE_THREAD_PERMALINK Download Links: https://skywalking.apache.org/downloads/ Release Notes : https://github.com/apache/skywalking-swck/blob/$VERSION/CHANGES.md Website: https://skywalking.apache.org/ SkyWalking Cloud on Kubernetes Resources: - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Documents: https://github.com/apache/skywalking-swck/blob/$VERSION/README.md The Apache SkyWalking Team   ","excerpt":"Apache SkyWalking Cloud on Kubernetes release guide This documentation guides the release manager to …","ref":"/docs/skywalking-swck/next/release/","title":"Apache SkyWalking Cloud on Kubernetes release guide"},{"body":"Apache SkyWalking Cloud on Kubernetes release guide This documentation guides the release manager to release the SkyWalking Cloud on Kubernetes in the Apache Way, and also helps people to check the release for vote.\nPrerequisites  Close(if finished, or move to next milestone otherwise) all issues in the current milestone from skywalking-swck and skywalking, create a new milestone if needed. Update CHANGES.md. Update image tags of adapter and operator.  Add your GPG public key to Apache svn   Log in id.apache.org and submit your key fingerprint.\n  Add your GPG public key into SkyWalking GPG KEYS file, you can do this only if you are a PMC member. You can ask a PMC member for help. DO NOT override the existed KEYS file content, only append your key at the end of the file.\n  Build and sign the source code package export VERSION=\u0026lt;the version to release\u0026gt; git clone git@github.com:apache/skywalking-swck \u0026amp;\u0026amp; cd skywalking-swck git tag -a \u0026#34;v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking Cloud on Kubernetes $VERSION\u0026#34; git push --tags make clean \u0026amp;\u0026amp; make release The skywalking-swck-${VERSION}-bin.tgz, skywalking-swck-${VERSION}-src.tgz, and their corresponding asc, sha512. In total, six files should be automatically generated in the directory.\nUpload to Apache svn svn co https://dist.apache.org/repos/dist/dev/skywalking/ mkdir -p skywalking/swck/\u0026#34;$VERSION\u0026#34; cp skywalking-swck/build/release/skywalking-swck*.tgz skywalking/swck/\u0026#34;$VERSION\u0026#34; cp skywalking-swck/build/release/skywalking-swck*.tgz.asc skywalking/swck/\u0026#34;$VERSION\u0026#34; cp skywalking-swck/build/release/skywalking-swck*.tgz.sha512 skywalking/swck/\u0026#34;$VERSION\u0026#34; cd skywalking/swck \u0026amp;\u0026amp; svn add \u0026#34;$VERSION\u0026#34; \u0026amp;\u0026amp; svn commit -m \u0026#34;Draft Apache SkyWalking-SWCK release $VERSION\u0026#34; Make the internal announcement Send an announcement email to dev@ mailing list.\nSubject: [ANNOUNCEMENT] SkyWalking Cloud on Kubernetes $VERSION test build available Content: The test build of SkyWalking Cloud on Kubernetes $VERSION is now available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking-swck/blob/$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/swck/$VERSION * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-swck-bin-x.x.x.tgz - sha512xxxxyyyzzz apache-skywalking-swck-src-x.x.x.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-swck/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-swck/blob/$VERSION/docs/operator.md#build-from-sources * https://github.com/apache/skywalking-swck/blob/$VERSION/docs/custom-metrics-adapter.md#use-kustomize-to-customise-your-deployment * https://github.com/apache/skywalking-swck/blob/$VERSION/docs/release.md A vote regarding the quality of this test build will be initiated within the next couple of days. Wait at least 48 hours for test responses Any PMC, committer or contributor can test features for releasing, and feedback. Based on that, PMC will decide whether to start a vote or not.\nCall for vote in dev@ mailing list Call for vote in dev@skywalking.apache.org\nSubject: [VOTE] Release Apache SkyWalking Cloud on Kubernetes version $VERSION Content: Hi the SkyWalking Community: This is a call for vote to release Apache SkyWalking Cloud on Kubernetes version $VERSION. Release notes: * https://github.com/apache/skywalking-swck/blob/$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/swck/$VERSION * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-swck-src-x.x.x.tgz - sha512xxxxyyyzzz apache-skywalking-swck-bin-x.x.x.tgz Release Tag : * (Git Tag) $VERSION Release Commit Hash : * https://github.com/apache/skywalking-swck/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-swck/blob/$VERSION/docs/release.md Voting will start now and will remain open for at least 72 hours, all PMC members are required to give their votes. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Thanks. [1] https://github.com/apache/skywalking/blob/master/docs/en/guides/How-to-release.md#vote-check Vote Check All PMC members and committers should check these before voting +1:\n Features test. All artifacts in staging repository are published with .asc, .md5, and sha files. Source codes and distribution packages (apache-skywalking-swck-{src,bin}-$VERSION.tgz) are in https://dist.apache.org/repos/dist/dev/skywalking/swck/$VERSION with .asc, .sha512. LICENSE and NOTICE are in source codes and distribution package. Check shasum -c apache-skywalking-swck-{src,bin}-$VERSION.tgz.sha512. Check GPG signature. Download KEYS and import them by curl https://www.apache.org/dist/skywalking/KEYS -o KEYS \u0026amp;\u0026amp; gpg --import KEYS. Check gpg --batch --verify apache-skywalking-swck-{src,bin}-$VERSION.tgz.asc apache-skywalking-swck-{src,bin}-$VERSION.tgz Build distribution from source code package by following this the build guide. Licenses header check.  Vote result should follow these:\n  PMC vote is +1 binding, all others is +1 no binding.\n  Within 72 hours, you get at least 3 (+1 binding), and have more +1 than -1. Vote pass.\n  Send the closing vote mail to announce the result. When count the binding and no binding votes, please list the names of voters. An example like this:\n[RESULT][VOTE] Release Apache SkyWalking Cloud on Kubernetes version $VERSION 3 days passed, we’ve got ($NUMBER) +1 bindings: xxx xxx xxx ... (list names) I’ll continue the release process.   Publish release   Move source codes tar balls and distributions to https://dist.apache.org/repos/dist/release/skywalking/, you can do this only if you are a PMC member.\nexport SVN_EDITOR=vim svn mv https://dist.apache.org/repos/dist/dev/skywalking/swck/$VERSION https://dist.apache.org/repos/dist/release/skywalking/swck # .... # enter your apache password # ....   Remove last released tar balls from https://dist.apache.org/repos/dist/release/skywalking\n  Refer to the previous PR, update news and links on the website. There are seven files need to modify.\n  Update Github release page, follow the previous convention.\n  Send ANNOUNCE email to dev@skywalking.apache.org and announce@apache.org, the sender should use his/her Apache email account. You can get the permlink of vote thread at here.\nSubject: [ANNOUNCEMENT] Apache SkyWalking Cloud on Kubernetes $VERSION Released Content: Hi the SkyWalking Community On behalf of the SkyWalking Team, I’m glad to announce that SkyWalking Cloud on Kubernetes $VERSION is now released. SkyWalking Cloud on Kubernetes: A bridge platform between Apache SkyWalking and Kubernetes. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. Vote Thread: $VOTE_THREAD_PERMALINK Download Links: https://skywalking.apache.org/downloads/ Release Notes : https://github.com/apache/skywalking-swck/blob/$VERSION/CHANGES.md Website: https://skywalking.apache.org/ SkyWalking Cloud on Kubernetes Resources: - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Documents: https://github.com/apache/skywalking-swck/blob/$VERSION/README.md The Apache SkyWalking Team   ","excerpt":"Apache SkyWalking Cloud on Kubernetes release guide This documentation guides the release manager to …","ref":"/docs/skywalking-swck/v0.9.0/release/","title":"Apache SkyWalking Cloud on Kubernetes release guide"},{"body":"Apache SkyWalking committer SkyWalking Project Management Committee (PMC) is responsible for assessing the contributions of candidates.\nLike many Apache projects, SkyWalking welcome all contributions, including code contributions, blog entries, guides for new users, public speeches, and enhancement of the project in various ways.\nCommitter Nominate new committer In SkyWalking, new committer nomination could only be officially started by existing PMC members. If a new committer feels that he/she is qualified, he/she should contact any existing PMC member and discuss. If this is agreed among some members of the PMC, the process will kick off.\nThe following steps are recommended (to be initiated only by an existing PMC member):\n Send an email titled [DISCUSS] Promote xxx as new committer to private@skywalking.a.o. List the important contributions of the candidate, so you could gather support from other PMC members for your proposal. Keep the discussion open for more than 3 days but no more than 1 week, unless there is any express objection or concern. If the PMC generally agrees to the proposal, send an email titled [VOTE] Promote xxx as new committer to private@skywalking.a.o. Keep the voting process open for more than 3 days, but no more than 1 week. Consider the result as Consensus Approval if there are three +1 votes and +1 votes \u0026gt; -1 votes. Send an email titled [RESULT][VOTE] Promote xxx as new committer to private@skywalking.a.o, and list the voting details, including who the voters are.  Invite new committer The PMC member who starts the promotion is responsible for sending an invitation to the new committer and guiding him/her to set up the ASF env.\nThe PMC member should send an email using the following template to the new committer:\nTo: JoeBloggs@foo.net Cc: private@skywalking.apache.org Subject: Invitation to become SkyWalking committer: Joe Bloggs Hello [invitee name], The SkyWalking Project Management Committee] (PMC) hereby offers you committer privileges to the project. These privileges are offered on the understanding that you'll use them reasonably and with common sense. We like to work on trust rather than unnecessary constraints. Being a committer enables you to more easily make changes without needing to go through the patch submission process. Being a committer does not require you to participate any more than you already do. It does tend to make one even more committed. You will probably find that you spend more time here. Of course, you can decline and instead remain as a contributor, participating as you do now. A. This personal invitation is a chance for you to accept or decline in private. Either way, please let us know in reply to the [private@skywalking.apache.org] address only. B. If you accept, the next step is to register an iCLA: 1. Details of the iCLA and the forms are found through this link: http://www.apache.org/licenses/#clas 2. Instructions for its completion and return to the Secretary of the ASF are found at http://www.apache.org/licenses/#submitting 3. When you transmit the completed iCLA, request to notify the Apache SkyWalking and choose a unique Apache id. Look to see if your preferred id is already taken at http://people.apache.org/committer-index.html This will allow the Secretary to notify the PMC when your iCLA has been recorded. When recording of your iCLA is noticed, you will receive a follow-up message with the next steps for establishing you as a committer. Invitation acceptance process The new committer should reply to private@skywalking.apache.org (choose reply all), and express his/her intention to accept the invitation. Then, this invitation will be treated as accepted by the project\u0026rsquo;s PMC. Of course, the new committer may also choose to decline the invitation.\nOnce the invitation has been accepted, the new committer has to take the following steps:\n Subscribe to dev@skywalking.apache.org. Usually this is already done. Choose a Apache ID that is not on the apache committers list page. Download the ICLA (If the new committer contributes to the project as a day job, CCLA is expected). After filling in the icla.pdf (or ccla.pdf) with the correct information, print, sign it by hand, scan it as an PDF, and send it as an attachment to secretary@apache.org. (If electronic signature is preferred, please follow the steps on this page) The PMC will wait for the Apache secretary to confirm the ICLA (or CCLA) filed. The new committer and PMC will receive the following email:  Dear XXX, This message acknowledges receipt of your ICLA, which has been filed in the Apache Software Foundation records. Your account has been requested for you and you should receive email with next steps within the next few days (can take up to a week). Please refer to https://www.apache.org/foundation/how-it-works.html#developers for more information about roles at Apache. In the unlikely event that the account has not yet been requested, the PMC member should contact the project V.P.. The V.P. could request through the Apache Account Submission Helper Form.\nAfter several days, the new committer will receive an email confirming creation of the account, titled Welcome to the Apache Software Foundation (ASF)!. Congratulations! The new committer now has an official Apache ID.\nThe PMC member should add the new committer to the official committer list through roster.\nSet up the Apache ID and dev env  Go to Apache Account Utility Platform, create your password, set up your personal mailbox (Forwarding email address) and GitHub account(Your GitHub Username). An organizational invite will be sent to you via email shortly thereafter (within 2 hours). If you would like to use the xxx@apache.org email service, please refer to here. Gmail is recommended, because this forwarding mode is not easy to find in most mailbox service settings. Follow the authorized GitHub 2FA wiki to enable two-factor authorization (2FA) on Github. When you set 2FA to \u0026ldquo;off\u0026rdquo;, it will be delisted by the corresponding Apache committer write permission group until you set it up again. (NOTE: Treat your recovery codes with the same level of attention as you would your password!) Use GitBox Account Linking Utility to obtain write permission of the SkyWalking project. Follow this doc to update the website.  If you would like to show up publicly in the Apache GitHub org, you need to go to the Apache GitHub org people page, search for yourself, and choose Organization visibility to Public.\nCommitter rights, duties, and responsibilities The SkyWalking project doesn\u0026rsquo;t require continuing contributions from you after you have become a committer, but we truly hope that you will continue to play a part in our community!\nAs a committer, you could\n Review and merge the pull request to the master branch in the Apache repo. A pull request often contains multiple commits. Those commits must be squashed and merged into a single commit with explanatory comments. It is recommended for new committers to request recheck of the pull request from senior committers. Create and push codes to the new branch in the Apache repo. Follow the release process to prepare a new release. Remember to confirm with the committer team that it is the right time to create the release.  The PMC hopes that the new committer will take part in the release process as well as release voting, even though their vote will be regarded as +1 no binding. Being familiar with the release process is key to being promoted to the role of PMC member.\nProject Management Committee The Project Management Committee (PMC) member does not have any special rights in code contributions. They simply oversee the project and make sure that it follows the Apache requirements. Its functions include:\n Binding voting for releases and license checks; New committer and PMC member recognition; Identification of branding issues and brand protection; and Responding to questions raised by the ASF board, and taking necessary actions.  The V.P. and chair of the PMC is the secretary, who is responsible for initializing the board report.\nIn most cases, a new PMC member is nominated from the committer team. But it is also possible to become a PMC member directly, so long as the PMC agrees to the nomination and is confident that the candidate is ready. For instance, this can be demonstrated by the fact that he/she has been an Apache member, an Apache officer, or a PMC member of another project.\nThe new PMC voting process should also follow the [DISCUSS], [VOTE] and [RESULT][VOTE] procedures using a private mail list, just like the voting process for new committers. Before sending the invitation, the PMC must also send a NOTICE mail to the Apache board.\nTo: board@apache.org Cc: private@skywalking.apache.org Subject: [NOTICE] Jane Doe for SkyWalking PMC SkyWalking proposes to invite Jane Doe (janedoe) to join the PMC. (include if a vote was held) The vote result is available here: https://lists.apache.org/... After 72 hours, if the board doesn\u0026rsquo;t object to the nomination (which it won\u0026rsquo;t most cases), an invitation may then be sent to the candidate.\nOnce the invitation is accepted, a PMC member should add the new member to the official PMC list through roster.\n","excerpt":"Apache SkyWalking committer SkyWalking Project Management Committee (PMC) is responsible for …","ref":"/docs/main/latest/en/guides/asf/committer/","title":"Apache SkyWalking committer"},{"body":"Apache SkyWalking committer SkyWalking Project Management Committee (PMC) is responsible for assessing the contributions of candidates.\nLike many Apache projects, SkyWalking welcome all contributions, including code contributions, blog entries, guides for new users, public speeches, and enhancement of the project in various ways.\nCommitter Nominate new committer In SkyWalking, new committer nomination could only be officially started by existing PMC members. If a new committer feels that he/she is qualified, he/she should contact any existing PMC member and discuss. If this is agreed among some members of the PMC, the process will kick off.\nThe following steps are recommended (to be initiated only by an existing PMC member):\n Send an email titled [DISCUSS] Promote xxx as new committer to private@skywalking.a.o. List the important contributions of the candidate, so you could gather support from other PMC members for your proposal. Keep the discussion open for more than 3 days but no more than 1 week, unless there is any express objection or concern. If the PMC generally agrees to the proposal, send an email titled [VOTE] Promote xxx as new committer to private@skywalking.a.o. Keep the voting process open for more than 3 days, but no more than 1 week. Consider the result as Consensus Approval if there are three +1 votes and +1 votes \u0026gt; -1 votes. Send an email titled [RESULT][VOTE] Promote xxx as new committer to private@skywalking.a.o, and list the voting details, including who the voters are.  Invite new committer The PMC member who starts the promotion is responsible for sending an invitation to the new committer and guiding him/her to set up the ASF env.\nThe PMC member should send an email using the following template to the new committer:\nTo: JoeBloggs@foo.net Cc: private@skywalking.apache.org Subject: Invitation to become SkyWalking committer: Joe Bloggs Hello [invitee name], The SkyWalking Project Management Committee] (PMC) hereby offers you committer privileges to the project. These privileges are offered on the understanding that you'll use them reasonably and with common sense. We like to work on trust rather than unnecessary constraints. Being a committer enables you to more easily make changes without needing to go through the patch submission process. Being a committer does not require you to participate any more than you already do. It does tend to make one even more committed. You will probably find that you spend more time here. Of course, you can decline and instead remain as a contributor, participating as you do now. A. This personal invitation is a chance for you to accept or decline in private. Either way, please let us know in reply to the [private@skywalking.apache.org] address only. B. If you accept, the next step is to register an iCLA: 1. Details of the iCLA and the forms are found through this link: http://www.apache.org/licenses/#clas 2. Instructions for its completion and return to the Secretary of the ASF are found at http://www.apache.org/licenses/#submitting 3. When you transmit the completed iCLA, request to notify the Apache SkyWalking and choose a unique Apache id. Look to see if your preferred id is already taken at http://people.apache.org/committer-index.html This will allow the Secretary to notify the PMC when your iCLA has been recorded. When recording of your iCLA is noticed, you will receive a follow-up message with the next steps for establishing you as a committer. Invitation acceptance process The new committer should reply to private@skywalking.apache.org (choose reply all), and express his/her intention to accept the invitation. Then, this invitation will be treated as accepted by the project\u0026rsquo;s PMC. Of course, the new committer may also choose to decline the invitation.\nOnce the invitation has been accepted, the new committer has to take the following steps:\n Subscribe to dev@skywalking.apache.org. Usually this is already done. Choose a Apache ID that is not on the apache committers list page. Download the ICLA (If the new committer contributes to the project as a day job, CCLA is expected). After filling in the icla.pdf (or ccla.pdf) with the correct information, print, sign it by hand, scan it as an PDF, and send it as an attachment to secretary@apache.org. (If electronic signature is preferred, please follow the steps on this page) The PMC will wait for the Apache secretary to confirm the ICLA (or CCLA) filed. The new committer and PMC will receive the following email:  Dear XXX, This message acknowledges receipt of your ICLA, which has been filed in the Apache Software Foundation records. Your account has been requested for you and you should receive email with next steps within the next few days (can take up to a week). Please refer to https://www.apache.org/foundation/how-it-works.html#developers for more information about roles at Apache. In the unlikely event that the account has not yet been requested, the PMC member should contact the project V.P.. The V.P. could request through the Apache Account Submission Helper Form.\nAfter several days, the new committer will receive an email confirming creation of the account, titled Welcome to the Apache Software Foundation (ASF)!. Congratulations! The new committer now has an official Apache ID.\nThe PMC member should add the new committer to the official committer list through roster.\nSet up the Apache ID and dev env  Go to Apache Account Utility Platform, create your password, set up your personal mailbox (Forwarding email address) and GitHub account(Your GitHub Username). An organizational invite will be sent to you via email shortly thereafter (within 2 hours). If you would like to use the xxx@apache.org email service, please refer to here. Gmail is recommended, because this forwarding mode is not easy to find in most mailbox service settings. Follow the authorized GitHub 2FA wiki to enable two-factor authorization (2FA) on Github. When you set 2FA to \u0026ldquo;off\u0026rdquo;, it will be delisted by the corresponding Apache committer write permission group until you set it up again. (NOTE: Treat your recovery codes with the same level of attention as you would your password!) Use GitBox Account Linking Utility to obtain write permission of the SkyWalking project. Follow this doc to update the website.  If you would like to show up publicly in the Apache GitHub org, you need to go to the Apache GitHub org people page, search for yourself, and choose Organization visibility to Public.\nCommitter rights, duties, and responsibilities The SkyWalking project doesn\u0026rsquo;t require continuing contributions from you after you have become a committer, but we truly hope that you will continue to play a part in our community!\nAs a committer, you could\n Review and merge the pull request to the master branch in the Apache repo. A pull request often contains multiple commits. Those commits must be squashed and merged into a single commit with explanatory comments. It is recommended for new committers to request recheck of the pull request from senior committers. Create and push codes to the new branch in the Apache repo. Follow the release process to prepare a new release. Remember to confirm with the committer team that it is the right time to create the release.  The PMC hopes that the new committer will take part in the release process as well as release voting, even though their vote will be regarded as +1 no binding. Being familiar with the release process is key to being promoted to the role of PMC member.\nProject Management Committee The Project Management Committee (PMC) member does not have any special rights in code contributions. They simply oversee the project and make sure that it follows the Apache requirements. Its functions include:\n Binding voting for releases and license checks; New committer and PMC member recognition; Identification of branding issues and brand protection; and Responding to questions raised by the ASF board, and taking necessary actions.  The V.P. and chair of the PMC is the secretary, who is responsible for initializing the board report.\nIn most cases, a new PMC member is nominated from the committer team. But it is also possible to become a PMC member directly, so long as the PMC agrees to the nomination and is confident that the candidate is ready. For instance, this can be demonstrated by the fact that he/she has been an Apache member, an Apache officer, or a PMC member of another project.\nThe new PMC voting process should also follow the [DISCUSS], [VOTE] and [RESULT][VOTE] procedures using a private mail list, just like the voting process for new committers. Before sending the invitation, the PMC must also send a NOTICE mail to the Apache board.\nTo: board@apache.org Cc: private@skywalking.apache.org Subject: [NOTICE] Jane Doe for SkyWalking PMC SkyWalking proposes to invite Jane Doe (janedoe) to join the PMC. (include if a vote was held) The vote result is available here: https://lists.apache.org/... After 72 hours, if the board doesn\u0026rsquo;t object to the nomination (which it won\u0026rsquo;t most cases), an invitation may then be sent to the candidate.\nOnce the invitation is accepted, a PMC member should add the new member to the official PMC list through roster.\n","excerpt":"Apache SkyWalking committer SkyWalking Project Management Committee (PMC) is responsible for …","ref":"/docs/main/next/en/guides/asf/committer/","title":"Apache SkyWalking committer"},{"body":"Apache SkyWalking committer SkyWalking Project Management Committee (PMC) is responsible for assessing the contributions of candidates.\nLike many Apache projects, SkyWalking welcome all contributions, including code contributions, blog entries, guides for new users, public speeches, and enhancement of the project in various ways.\nCommitter Nominate new committer In SkyWalking, new committer nomination could only be officially started by existing PMC members. If a new committer feels that he/she is qualified, he/she should contact any existing PMC member and discuss. If this is agreed among some members of the PMC, the process will kick off.\nThe following steps are recommended (to be initiated only by an existing PMC member):\n Send an email titled [DISCUSS] Promote xxx as new committer to private@skywalking.a.o. List the important contributions of the candidate, so you could gather support from other PMC members for your proposal. Keep the discussion open for more than 3 days but no more than 1 week, unless there is any express objection or concern. If the PMC generally agrees to the proposal, send an email titled [VOTE] Promote xxx as new committer to private@skywalking.a.o. Keep the voting process open for more than 3 days, but no more than 1 week. Consider the result as Consensus Approval if there are three +1 votes and +1 votes \u0026gt; -1 votes. Send an email titled [RESULT][VOTE] Promote xxx as new committer to private@skywalking.a.o, and list the voting details, including who the voters are.  Invite new committer The PMC member who starts the promotion is responsible for sending an invitation to the new committer and guiding him/her to set up the ASF env.\nThe PMC member should send an email using the following template to the new committer:\nTo: JoeBloggs@foo.net Cc: private@skywalking.apache.org Subject: Invitation to become SkyWalking committer: Joe Bloggs Hello [invitee name], The SkyWalking Project Management Committee] (PMC) hereby offers you committer privileges to the project. These privileges are offered on the understanding that you'll use them reasonably and with common sense. We like to work on trust rather than unnecessary constraints. Being a committer enables you to more easily make changes without needing to go through the patch submission process. Being a committer does not require you to participate any more than you already do. It does tend to make one even more committed. You will probably find that you spend more time here. Of course, you can decline and instead remain as a contributor, participating as you do now. A. This personal invitation is a chance for you to accept or decline in private. Either way, please let us know in reply to the [private@skywalking.apache.org] address only. B. If you accept, the next step is to register an iCLA: 1. Details of the iCLA and the forms are found through this link: http://www.apache.org/licenses/#clas 2. Instructions for its completion and return to the Secretary of the ASF are found at http://www.apache.org/licenses/#submitting 3. When you transmit the completed iCLA, request to notify the Apache SkyWalking and choose a unique Apache id. Look to see if your preferred id is already taken at http://people.apache.org/committer-index.html This will allow the Secretary to notify the PMC when your iCLA has been recorded. When recording of your iCLA is noticed, you will receive a follow-up message with the next steps for establishing you as a committer. Invitation acceptance process The new committer should reply to private@skywalking.apache.org (choose reply all), and express his/her intention to accept the invitation. Then, this invitation will be treated as accepted by the project\u0026rsquo;s PMC. Of course, the new committer may also choose to decline the invitation.\nOnce the invitation has been accepted, the new committer has to take the following steps:\n Subscribe to dev@skywalking.apache.org. Usually this is already done. Choose a Apache ID that is not on the apache committers list page. Download the ICLA (If the new committer contributes to the project as a day job, CCLA is expected). After filling in the icla.pdf (or ccla.pdf) with the correct information, print, sign it by hand, scan it as an PDF, and send it as an attachment to secretary@apache.org. (If electronic signature is preferred, please follow the steps on this page) The PMC will wait for the Apache secretary to confirm the ICLA (or CCLA) filed. The new committer and PMC will receive the following email:  Dear XXX, This message acknowledges receipt of your ICLA, which has been filed in the Apache Software Foundation records. Your account has been requested for you and you should receive email with next steps within the next few days (can take up to a week). Please refer to https://www.apache.org/foundation/how-it-works.html#developers for more information about roles at Apache. In the unlikely event that the account has not yet been requested, the PMC member should contact the project V.P.. The V.P. could request through the Apache Account Submission Helper Form.\nAfter several days, the new committer will receive an email confirming creation of the account, titled Welcome to the Apache Software Foundation (ASF)!. Congratulations! The new committer now has an official Apache ID.\nThe PMC member should add the new committer to the official committer list through roster.\nSet up the Apache ID and dev env  Go to Apache Account Utility Platform, create your password, set up your personal mailbox (Forwarding email address) and GitHub account(Your GitHub Username). An organizational invite will be sent to you via email shortly thereafter (within 2 hours). If you would like to use the xxx@apache.org email service, please refer to here. Gmail is recommended, because this forwarding mode is not easy to find in most mailbox service settings. Follow the authorized GitHub 2FA wiki to enable two-factor authorization (2FA) on Github. When you set 2FA to \u0026ldquo;off\u0026rdquo;, it will be delisted by the corresponding Apache committer write permission group until you set it up again. (NOTE: Treat your recovery codes with the same level of attention as you would your password!) Use GitBox Account Linking Utility to obtain write permission of the SkyWalking project. Follow this doc to update the website.  If you would like to show up publicly in the Apache GitHub org, you need to go to the Apache GitHub org people page, search for yourself, and choose Organization visibility to Public.\nCommitter rights, duties, and responsibilities The SkyWalking project doesn\u0026rsquo;t require continuing contributions from you after you have become a committer, but we truly hope that you will continue to play a part in our community!\nAs a committer, you could\n Review and merge the pull request to the master branch in the Apache repo. A pull request often contains multiple commits. Those commits must be squashed and merged into a single commit with explanatory comments. It is recommended for new committers to request recheck of the pull request from senior committers. Create and push codes to the new branch in the Apache repo. Follow the release process to prepare a new release. Remember to confirm with the committer team that it is the right time to create the release.  The PMC hopes that the new committer will take part in the release process as well as release voting, even though their vote will be regarded as +1 no binding. Being familiar with the release process is key to being promoted to the role of PMC member.\nProject Management Committee The Project Management Committee (PMC) member does not have any special rights in code contributions. They simply oversee the project and make sure that it follows the Apache requirements. Its functions include:\n Binding voting for releases and license checks; New committer and PMC member recognition; Identification of branding issues and brand protection; and Responding to questions raised by the ASF board, and taking necessary actions.  The V.P. and chair of the PMC is the secretary, who is responsible for initializing the board report.\nIn most cases, a new PMC member is nominated from the committer team. But it is also possible to become a PMC member directly, so long as the PMC agrees to the nomination and is confident that the candidate is ready. For instance, this can be demonstrated by the fact that he/she has been an Apache member, an Apache officer, or a PMC member of another project.\nThe new PMC voting process should also follow the [DISCUSS], [VOTE] and [RESULT][VOTE] procedures using a private mail list, just like the voting process for new committers. Before sending the invitation, the PMC must also send a NOTICE mail to the Apache board.\nTo: board@apache.org Cc: private@skywalking.apache.org Subject: [NOTICE] Jane Doe for SkyWalking PMC SkyWalking proposes to invite Jane Doe (janedoe) to join the PMC. (include if a vote was held) The vote result is available here: https://lists.apache.org/... After 72 hours, if the board doesn\u0026rsquo;t object to the nomination (which it won\u0026rsquo;t most cases), an invitation may then be sent to the candidate.\nOnce the invitation is accepted, a PMC member should add the new member to the official PMC list through roster.\n","excerpt":"Apache SkyWalking committer SkyWalking Project Management Committee (PMC) is responsible for …","ref":"/docs/main/v9.0.0/en/guides/asf/committer/","title":"Apache SkyWalking committer"},{"body":"Apache SkyWalking committer SkyWalking Project Management Committee (PMC) is responsible for assessing the contributions of candidates.\nLike many Apache projects, SkyWalking welcome all contributions, including code contributions, blog entries, guides for new users, public speeches, and enhancement of the project in various ways.\nCommitter Nominate new committer In SkyWalking, new committer nomination could only be officially started by existing PMC members. If a new committer feels that he/she is qualified, he/she should contact any existing PMC member and discuss. If this is agreed among some members of the PMC, the process will kick off.\nThe following steps are recommended (to be initiated only by an existing PMC member):\n Send an email titled [DISCUSS] Promote xxx as new committer to private@skywalking.a.o. List the important contributions of the candidate, so you could gather support from other PMC members for your proposal. Keep the discussion open for more than 3 days but no more than 1 week, unless there is any express objection or concern. If the PMC generally agrees to the proposal, send an email titled [VOTE] Promote xxx as new committer to private@skywalking.a.o. Keep the voting process open for more than 3 days, but no more than 1 week. Consider the result as Consensus Approval if there are three +1 votes and +1 votes \u0026gt; -1 votes. Send an email titled [RESULT][VOTE] Promote xxx as new committer to private@skywalking.a.o, and list the voting details, including who the voters are.  Invite new committer The PMC member who starts the promotion is responsible for sending an invitation to the new committer and guiding him/her to set up the ASF env.\nThe PMC member should send an email using the following template to the new committer:\nTo: JoeBloggs@foo.net Cc: private@skywalking.apache.org Subject: Invitation to become SkyWalking committer: Joe Bloggs Hello [invitee name], The SkyWalking Project Management Committee] (PMC) hereby offers you committer privileges to the project. These privileges are offered on the understanding that you'll use them reasonably and with common sense. We like to work on trust rather than unnecessary constraints. Being a committer enables you to more easily make changes without needing to go through the patch submission process. Being a committer does not require you to participate any more than you already do. It does tend to make one even more committed. You will probably find that you spend more time here. Of course, you can decline and instead remain as a contributor, participating as you do now. A. This personal invitation is a chance for you to accept or decline in private. Either way, please let us know in reply to the [private@skywalking.apache.org] address only. B. If you accept, the next step is to register an iCLA: 1. Details of the iCLA and the forms are found through this link: http://www.apache.org/licenses/#clas 2. Instructions for its completion and return to the Secretary of the ASF are found at http://www.apache.org/licenses/#submitting 3. When you transmit the completed iCLA, request to notify the Apache SkyWalking and choose a unique Apache id. Look to see if your preferred id is already taken at http://people.apache.org/committer-index.html This will allow the Secretary to notify the PMC when your iCLA has been recorded. When recording of your iCLA is noticed, you will receive a follow-up message with the next steps for establishing you as a committer. Invitation acceptance process The new committer should reply to private@skywalking.apache.org (choose reply all), and express his/her intention to accept the invitation. Then, this invitation will be treated as accepted by the project\u0026rsquo;s PMC. Of course, the new committer may also choose to decline the invitation.\nOnce the invitation has been accepted, the new committer has to take the following steps:\n Subscribe to dev@skywalking.apache.org. Usually this is already done. Choose a Apache ID that is not on the apache committers list page. Download the ICLA (If the new committer contributes to the project as a day job, CCLA is expected). After filling in the icla.pdf (or ccla.pdf) with the correct information, print, sign it by hand, scan it as an PDF, and send it as an attachment to secretary@apache.org. (If electronic signature is preferred, please follow the steps on this page) The PMC will wait for the Apache secretary to confirm the ICLA (or CCLA) filed. The new committer and PMC will receive the following email:  Dear XXX, This message acknowledges receipt of your ICLA, which has been filed in the Apache Software Foundation records. Your account has been requested for you and you should receive email with next steps within the next few days (can take up to a week). Please refer to https://www.apache.org/foundation/how-it-works.html#developers for more information about roles at Apache. In the unlikely event that the account has not yet been requested, the PMC member should contact the project V.P.. The V.P. could request through the Apache Account Submission Helper Form.\nAfter several days, the new committer will receive an email confirming creation of the account, titled Welcome to the Apache Software Foundation (ASF)!. Congratulations! The new committer now has an official Apache ID.\nThe PMC member should add the new committer to the official committer list through roster.\nSet up the Apache ID and dev env  Go to Apache Account Utility Platform, create your password, set up your personal mailbox (Forwarding email address) and GitHub account(Your GitHub Username). An organizational invite will be sent to you via email shortly thereafter (within 2 hours). If you would like to use the xxx@apache.org email service, please refer to here. Gmail is recommended, because this forwarding mode is not easy to find in most mailbox service settings. Follow the authorized GitHub 2FA wiki to enable two-factor authorization (2FA) on Github. When you set 2FA to \u0026ldquo;off\u0026rdquo;, it will be delisted by the corresponding Apache committer write permission group until you set it up again. (NOTE: Treat your recovery codes with the same level of attention as you would your password!) Use GitBox Account Linking Utility to obtain write permission of the SkyWalking project. Follow this doc to update the website.  If you would like to show up publicly in the Apache GitHub org, you need to go to the Apache GitHub org people page, search for yourself, and choose Organization visibility to Public.\nCommitter rights, duties, and responsibilities The SkyWalking project doesn\u0026rsquo;t require continuing contributions from you after you have become a committer, but we truly hope that you will continue to play a part in our community!\nAs a committer, you could\n Review and merge the pull request to the master branch in the Apache repo. A pull request often contains multiple commits. Those commits must be squashed and merged into a single commit with explanatory comments. It is recommended for new committers to request recheck of the pull request from senior committers. Create and push codes to the new branch in the Apache repo. Follow the release process to prepare a new release. Remember to confirm with the committer team that it is the right time to create the release.  The PMC hopes that the new committer will take part in the release process as well as release voting, even though their vote will be regarded as +1 no binding. Being familiar with the release process is key to being promoted to the role of PMC member.\nProject Management Committee The Project Management Committee (PMC) member does not have any special rights in code contributions. They simply oversee the project and make sure that it follows the Apache requirements. Its functions include:\n Binding voting for releases and license checks; New committer and PMC member recognition; Identification of branding issues and brand protection; and Responding to questions raised by the ASF board, and taking necessary actions.  The V.P. and chair of the PMC is the secretary, who is responsible for initializing the board report.\nIn most cases, a new PMC member is nominated from the committer team. But it is also possible to become a PMC member directly, so long as the PMC agrees to the nomination and is confident that the candidate is ready. For instance, this can be demonstrated by the fact that he/she has been an Apache member, an Apache officer, or a PMC member of another project.\nThe new PMC voting process should also follow the [DISCUSS], [VOTE] and [RESULT][VOTE] procedures using a private mail list, just like the voting process for new committers. Before sending the invitation, the PMC must also send a NOTICE mail to the Apache board.\nTo: board@apache.org Cc: private@skywalking.apache.org Subject: [NOTICE] Jane Doe for SkyWalking PMC SkyWalking proposes to invite Jane Doe (janedoe) to join the PMC. (include if a vote was held) The vote result is available here: https://lists.apache.org/... After 72 hours, if the board doesn\u0026rsquo;t object to the nomination (which it won\u0026rsquo;t most cases), an invitation may then be sent to the candidate.\nOnce the invitation is accepted, a PMC member should add the new member to the official PMC list through roster.\n","excerpt":"Apache SkyWalking committer SkyWalking Project Management Committee (PMC) is responsible for …","ref":"/docs/main/v9.1.0/en/guides/asf/committer/","title":"Apache SkyWalking committer"},{"body":"Apache SkyWalking committer SkyWalking Project Management Committee (PMC) is responsible for assessing the contributions of candidates.\nLike many Apache projects, SkyWalking welcome all contributions, including code contributions, blog entries, guides for new users, public speeches, and enhancement of the project in various ways.\nCommitter Nominate new committer In SkyWalking, new committer nomination could only be officially started by existing PMC members. If a new committer feels that he/she is qualified, he/she should contact any existing PMC member and discuss. If this is agreed among some members of the PMC, the process will kick off.\nThe following steps are recommended (to be initiated only by an existing PMC member):\n Send an email titled [DISCUSS] Promote xxx as new committer to private@skywalking.a.o. List the important contributions of the candidate, so you could gather support from other PMC members for your proposal. Keep the discussion open for more than 3 days but no more than 1 week, unless there is any express objection or concern. If the PMC generally agrees to the proposal, send an email titled [VOTE] Promote xxx as new committer to private@skywalking.a.o. Keep the voting process open for more than 3 days, but no more than 1 week. Consider the result as Consensus Approval if there are three +1 votes and +1 votes \u0026gt; -1 votes. Send an email titled [RESULT][VOTE] Promote xxx as new committer to private@skywalking.a.o, and list the voting details, including who the voters are.  Invite new committer The PMC member who starts the promotion is responsible for sending an invitation to the new committer and guiding him/her to set up the ASF env.\nThe PMC member should send an email using the following template to the new committer:\nTo: JoeBloggs@foo.net Cc: private@skywalking.apache.org Subject: Invitation to become SkyWalking committer: Joe Bloggs Hello [invitee name], The SkyWalking Project Management Committee] (PMC) hereby offers you committer privileges to the project. These privileges are offered on the understanding that you'll use them reasonably and with common sense. We like to work on trust rather than unnecessary constraints. Being a committer enables you to more easily make changes without needing to go through the patch submission process. Being a committer does not require you to participate any more than you already do. It does tend to make one even more committed. You will probably find that you spend more time here. Of course, you can decline and instead remain as a contributor, participating as you do now. A. This personal invitation is a chance for you to accept or decline in private. Either way, please let us know in reply to the [private@skywalking.apache.org] address only. B. If you accept, the next step is to register an iCLA: 1. Details of the iCLA and the forms are found through this link: http://www.apache.org/licenses/#clas 2. Instructions for its completion and return to the Secretary of the ASF are found at http://www.apache.org/licenses/#submitting 3. When you transmit the completed iCLA, request to notify the Apache SkyWalking and choose a unique Apache id. Look to see if your preferred id is already taken at http://people.apache.org/committer-index.html This will allow the Secretary to notify the PMC when your iCLA has been recorded. When recording of your iCLA is noticed, you will receive a follow-up message with the next steps for establishing you as a committer. Invitation acceptance process The new committer should reply to private@skywalking.apache.org (choose reply all), and express his/her intention to accept the invitation. Then, this invitation will be treated as accepted by the project\u0026rsquo;s PMC. Of course, the new committer may also choose to decline the invitation.\nOnce the invitation has been accepted, the new committer has to take the following steps:\n Subscribe to dev@skywalking.apache.org. Usually this is already done. Choose a Apache ID that is not on the apache committers list page. Download the ICLA (If the new committer contributes to the project as a day job, CCLA is expected). After filling in the icla.pdf (or ccla.pdf) with the correct information, print, sign it by hand, scan it as an PDF, and send it as an attachment to secretary@apache.org. (If electronic signature is preferred, please follow the steps on this page) The PMC will wait for the Apache secretary to confirm the ICLA (or CCLA) filed. The new committer and PMC will receive the following email:  Dear XXX, This message acknowledges receipt of your ICLA, which has been filed in the Apache Software Foundation records. Your account has been requested for you and you should receive email with next steps within the next few days (can take up to a week). Please refer to https://www.apache.org/foundation/how-it-works.html#developers for more information about roles at Apache. In the unlikely event that the account has not yet been requested, the PMC member should contact the project V.P.. The V.P. could request through the Apache Account Submission Helper Form.\nAfter several days, the new committer will receive an email confirming creation of the account, titled Welcome to the Apache Software Foundation (ASF)!. Congratulations! The new committer now has an official Apache ID.\nThe PMC member should add the new committer to the official committer list through roster.\nSet up the Apache ID and dev env  Go to Apache Account Utility Platform, create your password, set up your personal mailbox (Forwarding email address) and GitHub account(Your GitHub Username). An organizational invite will be sent to you via email shortly thereafter (within 2 hours). If you would like to use the xxx@apache.org email service, please refer to here. Gmail is recommended, because this forwarding mode is not easy to find in most mailbox service settings. Follow the authorized GitHub 2FA wiki to enable two-factor authorization (2FA) on Github. When you set 2FA to \u0026ldquo;off\u0026rdquo;, it will be delisted by the corresponding Apache committer write permission group until you set it up again. (NOTE: Treat your recovery codes with the same level of attention as you would your password!) Use GitBox Account Linking Utility to obtain write permission of the SkyWalking project. Follow this doc to update the website.  If you would like to show up publicly in the Apache GitHub org, you need to go to the Apache GitHub org people page, search for yourself, and choose Organization visibility to Public.\nCommitter rights, duties, and responsibilities The SkyWalking project doesn\u0026rsquo;t require continuing contributions from you after you have become a committer, but we truly hope that you will continue to play a part in our community!\nAs a committer, you could\n Review and merge the pull request to the master branch in the Apache repo. A pull request often contains multiple commits. Those commits must be squashed and merged into a single commit with explanatory comments. It is recommended for new committers to request recheck of the pull request from senior committers. Create and push codes to the new branch in the Apache repo. Follow the release process to prepare a new release. Remember to confirm with the committer team that it is the right time to create the release.  The PMC hopes that the new committer will take part in the release process as well as release voting, even though their vote will be regarded as +1 no binding. Being familiar with the release process is key to being promoted to the role of PMC member.\nProject Management Committee The Project Management Committee (PMC) member does not have any special rights in code contributions. They simply oversee the project and make sure that it follows the Apache requirements. Its functions include:\n Binding voting for releases and license checks; New committer and PMC member recognition; Identification of branding issues and brand protection; and Responding to questions raised by the ASF board, and taking necessary actions.  The V.P. and chair of the PMC is the secretary, who is responsible for initializing the board report.\nIn most cases, a new PMC member is nominated from the committer team. But it is also possible to become a PMC member directly, so long as the PMC agrees to the nomination and is confident that the candidate is ready. For instance, this can be demonstrated by the fact that he/she has been an Apache member, an Apache officer, or a PMC member of another project.\nThe new PMC voting process should also follow the [DISCUSS], [VOTE] and [RESULT][VOTE] procedures using a private mail list, just like the voting process for new committers. Before sending the invitation, the PMC must also send a NOTICE mail to the Apache board.\nTo: board@apache.org Cc: private@skywalking.apache.org Subject: [NOTICE] Jane Doe for SkyWalking PMC SkyWalking proposes to invite Jane Doe (janedoe) to join the PMC. (include if a vote was held) The vote result is available here: https://lists.apache.org/... After 72 hours, if the board doesn\u0026rsquo;t object to the nomination (which it won\u0026rsquo;t most cases), an invitation may then be sent to the candidate.\nOnce the invitation is accepted, a PMC member should add the new member to the official PMC list through roster.\n","excerpt":"Apache SkyWalking committer SkyWalking Project Management Committee (PMC) is responsible for …","ref":"/docs/main/v9.2.0/en/guides/asf/committer/","title":"Apache SkyWalking committer"},{"body":"Apache SkyWalking committer SkyWalking Project Management Committee (PMC) is responsible for assessing the contributions of candidates.\nLike many Apache projects, SkyWalking welcome all contributions, including code contributions, blog entries, guides for new users, public speeches, and enhancement of the project in various ways.\nCommitter Nominate new committer In SkyWalking, new committer nomination could only be officially started by existing PMC members. If a new committer feels that he/she is qualified, he/she should contact any existing PMC member and discuss. If this is agreed among some members of the PMC, the process will kick off.\nThe following steps are recommended (to be initiated only by an existing PMC member):\n Send an email titled [DISCUSS] Promote xxx as new committer to private@skywalking.a.o. List the important contributions of the candidate, so you could gather support from other PMC members for your proposal. Keep the discussion open for more than 3 days but no more than 1 week, unless there is any express objection or concern. If the PMC generally agrees to the proposal, send an email titled [VOTE] Promote xxx as new committer to private@skywalking.a.o. Keep the voting process open for more than 3 days, but no more than 1 week. Consider the result as Consensus Approval if there are three +1 votes and +1 votes \u0026gt; -1 votes. Send an email titled [RESULT][VOTE] Promote xxx as new committer to private@skywalking.a.o, and list the voting details, including who the voters are.  Invite new committer The PMC member who starts the promotion is responsible for sending an invitation to the new committer and guiding him/her to set up the ASF env.\nThe PMC member should send an email using the following template to the new committer:\nTo: JoeBloggs@foo.net Cc: private@skywalking.apache.org Subject: Invitation to become SkyWalking committer: Joe Bloggs Hello [invitee name], The SkyWalking Project Management Committee] (PMC) hereby offers you committer privileges to the project. These privileges are offered on the understanding that you'll use them reasonably and with common sense. We like to work on trust rather than unnecessary constraints. Being a committer enables you to more easily make changes without needing to go through the patch submission process. Being a committer does not require you to participate any more than you already do. It does tend to make one even more committed. You will probably find that you spend more time here. Of course, you can decline and instead remain as a contributor, participating as you do now. A. This personal invitation is a chance for you to accept or decline in private. Either way, please let us know in reply to the [private@skywalking.apache.org] address only. B. If you accept, the next step is to register an iCLA: 1. Details of the iCLA and the forms are found through this link: http://www.apache.org/licenses/#clas 2. Instructions for its completion and return to the Secretary of the ASF are found at http://www.apache.org/licenses/#submitting 3. When you transmit the completed iCLA, request to notify the Apache SkyWalking and choose a unique Apache id. Look to see if your preferred id is already taken at http://people.apache.org/committer-index.html This will allow the Secretary to notify the PMC when your iCLA has been recorded. When recording of your iCLA is noticed, you will receive a follow-up message with the next steps for establishing you as a committer. Invitation acceptance process The new committer should reply to private@skywalking.apache.org (choose reply all), and express his/her intention to accept the invitation. Then, this invitation will be treated as accepted by the project\u0026rsquo;s PMC. Of course, the new committer may also choose to decline the invitation.\nOnce the invitation has been accepted, the new committer has to take the following steps:\n Subscribe to dev@skywalking.apache.org. Usually this is already done. Choose a Apache ID that is not on the apache committers list page. Download the ICLA (If the new committer contributes to the project as a day job, CCLA is expected). After filling in the icla.pdf (or ccla.pdf) with the correct information, print, sign it by hand, scan it as an PDF, and send it as an attachment to secretary@apache.org. (If electronic signature is preferred, please follow the steps on this page) The PMC will wait for the Apache secretary to confirm the ICLA (or CCLA) filed. The new committer and PMC will receive the following email:  Dear XXX, This message acknowledges receipt of your ICLA, which has been filed in the Apache Software Foundation records. Your account has been requested for you and you should receive email with next steps within the next few days (can take up to a week). Please refer to https://www.apache.org/foundation/how-it-works.html#developers for more information about roles at Apache. In the unlikely event that the account has not yet been requested, the PMC member should contact the project V.P.. The V.P. could request through the Apache Account Submission Helper Form.\nAfter several days, the new committer will receive an email confirming creation of the account, titled Welcome to the Apache Software Foundation (ASF)!. Congratulations! The new committer now has an official Apache ID.\nThe PMC member should add the new committer to the official committer list through roster.\nSet up the Apache ID and dev env  Go to Apache Account Utility Platform, create your password, set up your personal mailbox (Forwarding email address) and GitHub account(Your GitHub Username). An organizational invite will be sent to you via email shortly thereafter (within 2 hours). If you would like to use the xxx@apache.org email service, please refer to here. Gmail is recommended, because this forwarding mode is not easy to find in most mailbox service settings. Follow the authorized GitHub 2FA wiki to enable two-factor authorization (2FA) on Github. When you set 2FA to \u0026ldquo;off\u0026rdquo;, it will be delisted by the corresponding Apache committer write permission group until you set it up again. (NOTE: Treat your recovery codes with the same level of attention as you would your password!) Use GitBox Account Linking Utility to obtain write permission of the SkyWalking project. Follow this doc to update the website.  If you would like to show up publicly in the Apache GitHub org, you need to go to the Apache GitHub org people page, search for yourself, and choose Organization visibility to Public.\nCommitter rights, duties, and responsibilities The SkyWalking project doesn\u0026rsquo;t require continuing contributions from you after you have become a committer, but we truly hope that you will continue to play a part in our community!\nAs a committer, you could\n Review and merge the pull request to the master branch in the Apache repo. A pull request often contains multiple commits. Those commits must be squashed and merged into a single commit with explanatory comments. It is recommended for new committers to request recheck of the pull request from senior committers. Create and push codes to the new branch in the Apache repo. Follow the release process to prepare a new release. Remember to confirm with the committer team that it is the right time to create the release.  The PMC hopes that the new committer will take part in the release process as well as release voting, even though their vote will be regarded as +1 no binding. Being familiar with the release process is key to being promoted to the role of PMC member.\nProject Management Committee The Project Management Committee (PMC) member does not have any special rights in code contributions. They simply oversee the project and make sure that it follows the Apache requirements. Its functions include:\n Binding voting for releases and license checks; New committer and PMC member recognition; Identification of branding issues and brand protection; and Responding to questions raised by the ASF board, and taking necessary actions.  The V.P. and chair of the PMC is the secretary, who is responsible for initializing the board report.\nIn most cases, a new PMC member is nominated from the committer team. But it is also possible to become a PMC member directly, so long as the PMC agrees to the nomination and is confident that the candidate is ready. For instance, this can be demonstrated by the fact that he/she has been an Apache member, an Apache officer, or a PMC member of another project.\nThe new PMC voting process should also follow the [DISCUSS], [VOTE] and [RESULT][VOTE] procedures using a private mail list, just like the voting process for new committers. Before sending the invitation, the PMC must also send a NOTICE mail to the Apache board.\nTo: board@apache.org Cc: private@skywalking.apache.org Subject: [NOTICE] Jane Doe for SkyWalking PMC SkyWalking proposes to invite Jane Doe (janedoe) to join the PMC. (include if a vote was held) The vote result is available here: https://lists.apache.org/... After 72 hours, if the board doesn\u0026rsquo;t object to the nomination (which it won\u0026rsquo;t most cases), an invitation may then be sent to the candidate.\nOnce the invitation is accepted, a PMC member should add the new member to the official PMC list through roster.\n","excerpt":"Apache SkyWalking committer SkyWalking Project Management Committee (PMC) is responsible for …","ref":"/docs/main/v9.3.0/en/guides/asf/committer/","title":"Apache SkyWalking committer"},{"body":"Apache SkyWalking committer SkyWalking Project Management Committee (PMC) is responsible for assessing the contributions of candidates.\nLike many Apache projects, SkyWalking welcome all contributions, including code contributions, blog entries, guides for new users, public speeches, and enhancement of the project in various ways.\nCommitter Nominate new committer In SkyWalking, new committer nomination could only be officially started by existing PMC members. If a new committer feels that he/she is qualified, he/she should contact any existing PMC member and discuss. If this is agreed among some members of the PMC, the process will kick off.\nThe following steps are recommended (to be initiated only by an existing PMC member):\n Send an email titled [DISCUSS] Promote xxx as new committer to private@skywalking.a.o. List the important contributions of the candidate, so you could gather support from other PMC members for your proposal. Keep the discussion open for more than 3 days but no more than 1 week, unless there is any express objection or concern. If the PMC generally agrees to the proposal, send an email titled [VOTE] Promote xxx as new committer to private@skywalking.a.o. Keep the voting process open for more than 3 days, but no more than 1 week. Consider the result as Consensus Approval if there are three +1 votes and +1 votes \u0026gt; -1 votes. Send an email titled [RESULT][VOTE] Promote xxx as new committer to private@skywalking.a.o, and list the voting details, including who the voters are.  Invite new committer The PMC member who starts the promotion is responsible for sending an invitation to the new committer and guiding him/her to set up the ASF env.\nThe PMC member should send an email using the following template to the new committer:\nTo: JoeBloggs@foo.net Cc: private@skywalking.apache.org Subject: Invitation to become SkyWalking committer: Joe Bloggs Hello [invitee name], The SkyWalking Project Management Committee] (PMC) hereby offers you committer privileges to the project. These privileges are offered on the understanding that you'll use them reasonably and with common sense. We like to work on trust rather than unnecessary constraints. Being a committer enables you to more easily make changes without needing to go through the patch submission process. Being a committer does not require you to participate any more than you already do. It does tend to make one even more committed. You will probably find that you spend more time here. Of course, you can decline and instead remain as a contributor, participating as you do now. A. This personal invitation is a chance for you to accept or decline in private. Either way, please let us know in reply to the [private@skywalking.apache.org] address only. B. If you accept, the next step is to register an iCLA: 1. Details of the iCLA and the forms are found through this link: http://www.apache.org/licenses/#clas 2. Instructions for its completion and return to the Secretary of the ASF are found at http://www.apache.org/licenses/#submitting 3. When you transmit the completed iCLA, request to notify the Apache SkyWalking and choose a unique Apache id. Look to see if your preferred id is already taken at http://people.apache.org/committer-index.html This will allow the Secretary to notify the PMC when your iCLA has been recorded. When recording of your iCLA is noticed, you will receive a follow-up message with the next steps for establishing you as a committer. Invitation acceptance process The new committer should reply to private@skywalking.apache.org (choose reply all), and express his/her intention to accept the invitation. Then, this invitation will be treated as accepted by the project\u0026rsquo;s PMC. Of course, the new committer may also choose to decline the invitation.\nOnce the invitation has been accepted, the new committer has to take the following steps:\n Subscribe to dev@skywalking.apache.org. Usually this is already done. Choose a Apache ID that is not on the apache committers list page. Download the ICLA (If the new committer contributes to the project as a day job, CCLA is expected). After filling in the icla.pdf (or ccla.pdf) with the correct information, print, sign it by hand, scan it as an PDF, and send it as an attachment to secretary@apache.org. (If electronic signature is preferred, please follow the steps on this page) The PMC will wait for the Apache secretary to confirm the ICLA (or CCLA) filed. The new committer and PMC will receive the following email:  Dear XXX, This message acknowledges receipt of your ICLA, which has been filed in the Apache Software Foundation records. Your account has been requested for you and you should receive email with next steps within the next few days (can take up to a week). Please refer to https://www.apache.org/foundation/how-it-works.html#developers for more information about roles at Apache. In the unlikely event that the account has not yet been requested, the PMC member should contact the project V.P.. The V.P. could request through the Apache Account Submission Helper Form.\nAfter several days, the new committer will receive an email confirming creation of the account, titled Welcome to the Apache Software Foundation (ASF)!. Congratulations! The new committer now has an official Apache ID.\nThe PMC member should add the new committer to the official committer list through roster.\nSet up the Apache ID and dev env  Go to Apache Account Utility Platform, create your password, set up your personal mailbox (Forwarding email address) and GitHub account(Your GitHub Username). An organizational invite will be sent to you via email shortly thereafter (within 2 hours). If you would like to use the xxx@apache.org email service, please refer to here. Gmail is recommended, because this forwarding mode is not easy to find in most mailbox service settings. Follow the authorized GitHub 2FA wiki to enable two-factor authorization (2FA) on Github. When you set 2FA to \u0026ldquo;off\u0026rdquo;, it will be delisted by the corresponding Apache committer write permission group until you set it up again. (NOTE: Treat your recovery codes with the same level of attention as you would your password!) Use GitBox Account Linking Utility to obtain write permission of the SkyWalking project. Follow this doc to update the website.  If you would like to show up publicly in the Apache GitHub org, you need to go to the Apache GitHub org people page, search for yourself, and choose Organization visibility to Public.\nCommitter rights, duties, and responsibilities The SkyWalking project doesn\u0026rsquo;t require continuing contributions from you after you have become a committer, but we truly hope that you will continue to play a part in our community!\nAs a committer, you could\n Review and merge the pull request to the master branch in the Apache repo. A pull request often contains multiple commits. Those commits must be squashed and merged into a single commit with explanatory comments. It is recommended for new committers to request recheck of the pull request from senior committers. Create and push codes to the new branch in the Apache repo. Follow the release process to prepare a new release. Remember to confirm with the committer team that it is the right time to create the release.  The PMC hopes that the new committer will take part in the release process as well as release voting, even though their vote will be regarded as +1 no binding. Being familiar with the release process is key to being promoted to the role of PMC member.\nProject Management Committee The Project Management Committee (PMC) member does not have any special rights in code contributions. They simply oversee the project and make sure that it follows the Apache requirements. Its functions include:\n Binding voting for releases and license checks; New committer and PMC member recognition; Identification of branding issues and brand protection; and Responding to questions raised by the ASF board, and taking necessary actions.  The V.P. and chair of the PMC is the secretary, who is responsible for initializing the board report.\nIn most cases, a new PMC member is nominated from the committer team. But it is also possible to become a PMC member directly, so long as the PMC agrees to the nomination and is confident that the candidate is ready. For instance, this can be demonstrated by the fact that he/she has been an Apache member, an Apache officer, or a PMC member of another project.\nThe new PMC voting process should also follow the [DISCUSS], [VOTE] and [RESULT][VOTE] procedures using a private mail list, just like the voting process for new committers. Before sending the invitation, the PMC must also send a NOTICE mail to the Apache board.\nTo: board@apache.org Cc: private@skywalking.apache.org Subject: [NOTICE] Jane Doe for SkyWalking PMC SkyWalking proposes to invite Jane Doe (janedoe) to join the PMC. (include if a vote was held) The vote result is available here: https://lists.apache.org/... After 72 hours, if the board doesn\u0026rsquo;t object to the nomination (which it won\u0026rsquo;t most cases), an invitation may then be sent to the candidate.\nOnce the invitation is accepted, a PMC member should add the new member to the official PMC list through roster.\n","excerpt":"Apache SkyWalking committer SkyWalking Project Management Committee (PMC) is responsible for …","ref":"/docs/main/v9.4.0/en/guides/asf/committer/","title":"Apache SkyWalking committer"},{"body":"Apache SkyWalking committer SkyWalking Project Management Committee (PMC) is responsible for assessing the contributions of candidates.\nLike many Apache projects, SkyWalking welcome all contributions, including code contributions, blog entries, guides for new users, public speeches, and enhancement of the project in various ways.\nCommitter Nominate new committer In SkyWalking, new committer nomination could only be officially started by existing PMC members. If a new committer feels that he/she is qualified, he/she should contact any existing PMC member and discuss. If this is agreed among some members of the PMC, the process will kick off.\nThe following steps are recommended (to be initiated only by an existing PMC member):\n Send an email titled [DISCUSS] Promote xxx as new committer to private@skywalking.a.o. List the important contributions of the candidate, so you could gather support from other PMC members for your proposal. Keep the discussion open for more than 3 days but no more than 1 week, unless there is any express objection or concern. If the PMC generally agrees to the proposal, send an email titled [VOTE] Promote xxx as new committer to private@skywalking.a.o. Keep the voting process open for more than 3 days, but no more than 1 week. Consider the result as Consensus Approval if there are three +1 votes and +1 votes \u0026gt; -1 votes. Send an email titled [RESULT][VOTE] Promote xxx as new committer to private@skywalking.a.o, and list the voting details, including who the voters are.  Invite new committer The PMC member who starts the promotion is responsible for sending an invitation to the new committer and guiding him/her to set up the ASF env.\nThe PMC member should send an email using the following template to the new committer:\nTo: JoeBloggs@foo.net Cc: private@skywalking.apache.org Subject: Invitation to become SkyWalking committer: Joe Bloggs Hello [invitee name], The SkyWalking Project Management Committee] (PMC) hereby offers you committer privileges to the project. These privileges are offered on the understanding that you'll use them reasonably and with common sense. We like to work on trust rather than unnecessary constraints. Being a committer enables you to more easily make changes without needing to go through the patch submission process. Being a committer does not require you to participate any more than you already do. It does tend to make one even more committed. You will probably find that you spend more time here. Of course, you can decline and instead remain as a contributor, participating as you do now. A. This personal invitation is a chance for you to accept or decline in private. Either way, please let us know in reply to the [private@skywalking.apache.org] address only. B. If you accept, the next step is to register an iCLA: 1. Details of the iCLA and the forms are found through this link: http://www.apache.org/licenses/#clas 2. Instructions for its completion and return to the Secretary of the ASF are found at http://www.apache.org/licenses/#submitting 3. When you transmit the completed iCLA, request to notify the Apache SkyWalking and choose a unique Apache id. Look to see if your preferred id is already taken at http://people.apache.org/committer-index.html This will allow the Secretary to notify the PMC when your iCLA has been recorded. When recording of your iCLA is noticed, you will receive a follow-up message with the next steps for establishing you as a committer. Invitation acceptance process The new committer should reply to private@skywalking.apache.org (choose reply all), and express his/her intention to accept the invitation. Then, this invitation will be treated as accepted by the project\u0026rsquo;s PMC. Of course, the new committer may also choose to decline the invitation.\nOnce the invitation has been accepted, the new committer has to take the following steps:\n Subscribe to dev@skywalking.apache.org. Usually this is already done. Choose a Apache ID that is not on the apache committers list page. Download the ICLA (If the new committer contributes to the project as a day job, CCLA is expected). After filling in the icla.pdf (or ccla.pdf) with the correct information, print, sign it by hand, scan it as an PDF, and send it as an attachment to secretary@apache.org. (If electronic signature is preferred, please follow the steps on this page) The PMC will wait for the Apache secretary to confirm the ICLA (or CCLA) filed. The new committer and PMC will receive the following email:  Dear XXX, This message acknowledges receipt of your ICLA, which has been filed in the Apache Software Foundation records. Your account has been requested for you and you should receive email with next steps within the next few days (can take up to a week). Please refer to https://www.apache.org/foundation/how-it-works.html#developers for more information about roles at Apache. In the unlikely event that the account has not yet been requested, the PMC member should contact the project V.P.. The V.P. could request through the Apache Account Submission Helper Form.\nAfter several days, the new committer will receive an email confirming creation of the account, titled Welcome to the Apache Software Foundation (ASF)!. Congratulations! The new committer now has an official Apache ID.\nThe PMC member should add the new committer to the official committer list through roster.\nSet up the Apache ID and dev env  Go to Apache Account Utility Platform, create your password, set up your personal mailbox (Forwarding email address) and GitHub account(Your GitHub Username). An organizational invite will be sent to you via email shortly thereafter (within 2 hours). If you would like to use the xxx@apache.org email service, please refer to here. Gmail is recommended, because this forwarding mode is not easy to find in most mailbox service settings. Follow the authorized GitHub 2FA wiki to enable two-factor authorization (2FA) on Github. When you set 2FA to \u0026ldquo;off\u0026rdquo;, it will be delisted by the corresponding Apache committer write permission group until you set it up again. (NOTE: Treat your recovery codes with the same level of attention as you would your password!) Use GitBox Account Linking Utility to obtain write permission of the SkyWalking project. Follow this doc to update the website.  If you would like to show up publicly in the Apache GitHub org, you need to go to the Apache GitHub org people page, search for yourself, and choose Organization visibility to Public.\nCommitter rights, duties, and responsibilities The SkyWalking project doesn\u0026rsquo;t require continuing contributions from you after you have become a committer, but we truly hope that you will continue to play a part in our community!\nAs a committer, you could\n Review and merge the pull request to the master branch in the Apache repo. A pull request often contains multiple commits. Those commits must be squashed and merged into a single commit with explanatory comments. It is recommended for new committers to request recheck of the pull request from senior committers. Create and push codes to the new branch in the Apache repo. Follow the release process to prepare a new release. Remember to confirm with the committer team that it is the right time to create the release.  The PMC hopes that the new committer will take part in the release process as well as release voting, even though their vote will be regarded as +1 no binding. Being familiar with the release process is key to being promoted to the role of PMC member.\nProject Management Committee The Project Management Committee (PMC) member does not have any special rights in code contributions. They simply oversee the project and make sure that it follows the Apache requirements. Its functions include:\n Binding voting for releases and license checks; New committer and PMC member recognition; Identification of branding issues and brand protection; and Responding to questions raised by the ASF board, and taking necessary actions.  The V.P. and chair of the PMC is the secretary, who is responsible for initializing the board report.\nIn most cases, a new PMC member is nominated from the committer team. But it is also possible to become a PMC member directly, so long as the PMC agrees to the nomination and is confident that the candidate is ready. For instance, this can be demonstrated by the fact that he/she has been an Apache member, an Apache officer, or a PMC member of another project.\nThe new PMC voting process should also follow the [DISCUSS], [VOTE] and [RESULT][VOTE] procedures using a private mail list, just like the voting process for new committers. Before sending the invitation, the PMC must also send a NOTICE mail to the Apache board.\nTo: board@apache.org Cc: private@skywalking.apache.org Subject: [NOTICE] Jane Doe for SkyWalking PMC SkyWalking proposes to invite Jane Doe (janedoe) to join the PMC. (include if a vote was held) The vote result is available here: https://lists.apache.org/... After 72 hours, if the board doesn\u0026rsquo;t object to the nomination (which it won\u0026rsquo;t most cases), an invitation may then be sent to the candidate.\nOnce the invitation is accepted, a PMC member should add the new member to the official PMC list through roster.\n","excerpt":"Apache SkyWalking committer SkyWalking Project Management Committee (PMC) is responsible for …","ref":"/docs/main/v9.5.0/en/guides/asf/committer/","title":"Apache SkyWalking committer"},{"body":"Apache SkyWalking committer SkyWalking Project Management Committee (PMC) is responsible for assessing the contributions of candidates.\nLike many Apache projects, SkyWalking welcome all contributions, including code contributions, blog entries, guides for new users, public speeches, and enhancement of the project in various ways.\nCommitter Nominate new committer In SkyWalking, new committer nomination could only be officially started by existing PMC members. If a new committer feels that he/she is qualified, he/she should contact any existing PMC member and discuss. If this is agreed among some members of the PMC, the process will kick off.\nThe following steps are recommended (to be initiated only by an existing PMC member):\n Send an email titled [DISCUSS] Promote xxx as new committer to private@skywalking.a.o. List the important contributions of the candidate, so you could gather support from other PMC members for your proposal. Keep the discussion open for more than 3 days but no more than 1 week, unless there is any express objection or concern. If the PMC generally agrees to the proposal, send an email titled [VOTE] Promote xxx as new committer to private@skywalking.a.o. Keep the voting process open for more than 3 days, but no more than 1 week. Consider the result as Consensus Approval if there are three +1 votes and +1 votes \u0026gt; -1 votes. Send an email titled [RESULT][VOTE] Promote xxx as new committer to private@skywalking.a.o, and list the voting details, including who the voters are.  Invite new committer The PMC member who starts the promotion is responsible for sending an invitation to the new committer and guiding him/her to set up the ASF env.\nThe PMC member should send an email using the following template to the new committer:\nTo: JoeBloggs@foo.net Cc: private@skywalking.apache.org Subject: Invitation to become SkyWalking committer: Joe Bloggs Hello [invitee name], The SkyWalking Project Management Committee] (PMC) hereby offers you committer privileges to the project. These privileges are offered on the understanding that you'll use them reasonably and with common sense. We like to work on trust rather than unnecessary constraints. Being a committer enables you to more easily make changes without needing to go through the patch submission process. Being a committer does not require you to participate any more than you already do. It does tend to make one even more committed. You will probably find that you spend more time here. Of course, you can decline and instead remain as a contributor, participating as you do now. A. This personal invitation is a chance for you to accept or decline in private. Either way, please let us know in reply to the [private@skywalking.apache.org] address only. B. If you accept, the next step is to register an iCLA: 1. Details of the iCLA and the forms are found through this link: http://www.apache.org/licenses/#clas 2. Instructions for its completion and return to the Secretary of the ASF are found at http://www.apache.org/licenses/#submitting 3. When you transmit the completed iCLA, request to notify the Apache SkyWalking and choose a unique Apache id. Look to see if your preferred id is already taken at http://people.apache.org/committer-index.html This will allow the Secretary to notify the PMC when your iCLA has been recorded. When recording of your iCLA is noticed, you will receive a follow-up message with the next steps for establishing you as a committer. Invitation acceptance process The new committer should reply to private@skywalking.apache.org (choose reply all), and express his/her intention to accept the invitation. Then, this invitation will be treated as accepted by the project\u0026rsquo;s PMC. Of course, the new committer may also choose to decline the invitation.\nOnce the invitation has been accepted, the new committer has to take the following steps:\n Subscribe to dev@skywalking.apache.org. Usually this is already done. Choose a Apache ID that is not on the apache committers list page. Download the ICLA (If the new committer contributes to the project as a day job, CCLA is expected). After filling in the icla.pdf (or ccla.pdf) with the correct information, print, sign it by hand, scan it as an PDF, and send it as an attachment to secretary@apache.org. (If electronic signature is preferred, please follow the steps on this page) The PMC will wait for the Apache secretary to confirm the ICLA (or CCLA) filed. The new committer and PMC will receive the following email:  Dear XXX, This message acknowledges receipt of your ICLA, which has been filed in the Apache Software Foundation records. Your account has been requested for you and you should receive email with next steps within the next few days (can take up to a week). Please refer to https://www.apache.org/foundation/how-it-works.html#developers for more information about roles at Apache. In the unlikely event that the account has not yet been requested, the PMC member should contact the project V.P.. The V.P. could request through the Apache Account Submission Helper Form.\nAfter several days, the new committer will receive an email confirming creation of the account, titled Welcome to the Apache Software Foundation (ASF)!. Congratulations! The new committer now has an official Apache ID.\nThe PMC member should add the new committer to the official committer list through roster.\nSet up the Apache ID and dev env  Go to Apache Account Utility Platform, create your password, set up your personal mailbox (Forwarding email address) and GitHub account(Your GitHub Username). An organizational invite will be sent to you via email shortly thereafter (within 2 hours). If you would like to use the xxx@apache.org email service, please refer to here. Gmail is recommended, because this forwarding mode is not easy to find in most mailbox service settings. Follow the authorized GitHub 2FA wiki to enable two-factor authorization (2FA) on Github. When you set 2FA to \u0026ldquo;off\u0026rdquo;, it will be delisted by the corresponding Apache committer write permission group until you set it up again. (NOTE: Treat your recovery codes with the same level of attention as you would your password!) Use GitBox Account Linking Utility to obtain write permission of the SkyWalking project. Follow this doc to update the website.  If you would like to show up publicly in the Apache GitHub org, you need to go to the Apache GitHub org people page, search for yourself, and choose Organization visibility to Public.\nCommitter rights, duties, and responsibilities The SkyWalking project doesn\u0026rsquo;t require continuing contributions from you after you have become a committer, but we truly hope that you will continue to play a part in our community!\nAs a committer, you could\n Review and merge the pull request to the master branch in the Apache repo. A pull request often contains multiple commits. Those commits must be squashed and merged into a single commit with explanatory comments. It is recommended for new committers to request recheck of the pull request from senior committers. Create and push codes to the new branch in the Apache repo. Follow the release process to prepare a new release. Remember to confirm with the committer team that it is the right time to create the release.  The PMC hopes that the new committer will take part in the release process as well as release voting, even though their vote will be regarded as +1 no binding. Being familiar with the release process is key to being promoted to the role of PMC member.\nProject Management Committee The Project Management Committee (PMC) member does not have any special rights in code contributions. They simply oversee the project and make sure that it follows the Apache requirements. Its functions include:\n Binding voting for releases and license checks; New committer and PMC member recognition; Identification of branding issues and brand protection; and Responding to questions raised by the ASF board, and taking necessary actions.  The V.P. and chair of the PMC is the secretary, who is responsible for initializing the board report.\nIn most cases, a new PMC member is nominated from the committer team. But it is also possible to become a PMC member directly, so long as the PMC agrees to the nomination and is confident that the candidate is ready. For instance, this can be demonstrated by the fact that he/she has been an Apache member, an Apache officer, or a PMC member of another project.\nThe new PMC voting process should also follow the [DISCUSS], [VOTE] and [RESULT][VOTE] procedures using a private mail list, just like the voting process for new committers. Before sending the invitation, the PMC must also send a NOTICE mail to the Apache board.\nTo: board@apache.org Cc: private@skywalking.apache.org Subject: [NOTICE] Jane Doe for SkyWalking PMC SkyWalking proposes to invite Jane Doe (janedoe) to join the PMC. (include if a vote was held) The vote result is available here: https://lists.apache.org/... After 72 hours, if the board doesn\u0026rsquo;t object to the nomination (which it won\u0026rsquo;t most cases), an invitation may then be sent to the candidate.\nOnce the invitation is accepted, a PMC member should add the new member to the official PMC list through roster.\n","excerpt":"Apache SkyWalking committer SkyWalking Project Management Committee (PMC) is responsible for …","ref":"/docs/main/v9.6.0/en/guides/asf/committer/","title":"Apache SkyWalking committer"},{"body":"Apache SkyWalking committer SkyWalking Project Management Committee (PMC) is responsible for assessing the contributions of candidates.\nLike many Apache projects, SkyWalking welcome all contributions, including code contributions, blog entries, guides for new users, public speeches, and enhancement of the project in various ways.\nCommitter Nominate new committer In SkyWalking, new committer nomination could only be officially started by existing PMC members. If a new committer feels that he/she is qualified, he/she should contact any existing PMC member and discuss. If this is agreed among some members of the PMC, the process will kick off.\nThe following steps are recommended (to be initiated only by an existing PMC member):\n Send an email titled [DISCUSS] Promote xxx as new committer to private@skywalking.a.o. List the important contributions of the candidate, so you could gather support from other PMC members for your proposal. Keep the discussion open for more than 3 days but no more than 1 week, unless there is any express objection or concern. If the PMC generally agrees to the proposal, send an email titled [VOTE] Promote xxx as new committer to private@skywalking.a.o. Keep the voting process open for more than 3 days, but no more than 1 week. Consider the result as Consensus Approval if there are three +1 votes and +1 votes \u0026gt; -1 votes. Send an email titled [RESULT][VOTE] Promote xxx as new committer to private@skywalking.a.o, and list the voting details, including who the voters are.  Invite new committer The PMC member who starts the promotion is responsible for sending an invitation to the new committer and guiding him/her to set up the ASF env.\nThe PMC member should send an email using the following template to the new committer:\nTo: JoeBloggs@foo.net Cc: private@skywalking.apache.org Subject: Invitation to become SkyWalking committer: Joe Bloggs Hello [invitee name], The SkyWalking Project Management Committee] (PMC) hereby offers you committer privileges to the project. These privileges are offered on the understanding that you'll use them reasonably and with common sense. We like to work on trust rather than unnecessary constraints. Being a committer enables you to more easily make changes without needing to go through the patch submission process. Being a committer does not require you to participate any more than you already do. It does tend to make one even more committed. You will probably find that you spend more time here. Of course, you can decline and instead remain as a contributor, participating as you do now. A. This personal invitation is a chance for you to accept or decline in private. Either way, please let us know in reply to the [private@skywalking.apache.org] address only. B. If you accept, the next step is to register an iCLA: 1. Details of the iCLA and the forms are found through this link: http://www.apache.org/licenses/#clas 2. Instructions for its completion and return to the Secretary of the ASF are found at http://www.apache.org/licenses/#submitting 3. When you transmit the completed iCLA, request to notify the Apache SkyWalking and choose a unique Apache id. Look to see if your preferred id is already taken at http://people.apache.org/committer-index.html This will allow the Secretary to notify the PMC when your iCLA has been recorded. When recording of your iCLA is noticed, you will receive a follow-up message with the next steps for establishing you as a committer. Invitation acceptance process The new committer should reply to private@skywalking.apache.org (choose reply all), and express his/her intention to accept the invitation. Then, this invitation will be treated as accepted by the project\u0026rsquo;s PMC. Of course, the new committer may also choose to decline the invitation.\nOnce the invitation has been accepted, the new committer has to take the following steps:\n Subscribe to dev@skywalking.apache.org. Usually this is already done. Choose a Apache ID that is not on the apache committers list page. Download the ICLA (If the new committer contributes to the project as a day job, CCLA is expected). After filling in the icla.pdf (or ccla.pdf) with the correct information, print, sign it by hand, scan it as an PDF, and send it as an attachment to secretary@apache.org. (If electronic signature is preferred, please follow the steps on this page) The PMC will wait for the Apache secretary to confirm the ICLA (or CCLA) filed. The new committer and PMC will receive the following email:  Dear XXX, This message acknowledges receipt of your ICLA, which has been filed in the Apache Software Foundation records. Your account has been requested for you and you should receive email with next steps within the next few days (can take up to a week). Please refer to https://www.apache.org/foundation/how-it-works.html#developers for more information about roles at Apache. In the unlikely event that the account has not yet been requested, the PMC member should contact the project V.P.. The V.P. could request through the Apache Account Submission Helper Form.\nAfter several days, the new committer will receive an email confirming creation of the account, titled Welcome to the Apache Software Foundation (ASF)!. Congratulations! The new committer now has an official Apache ID.\nThe PMC member should add the new committer to the official committer list through roster.\nSet up the Apache ID and dev env  Go to Apache Account Utility Platform, create your password, set up your personal mailbox (Forwarding email address) and GitHub account(Your GitHub Username). An organizational invite will be sent to you via email shortly thereafter (within 2 hours). If you would like to use the xxx@apache.org email service, please refer to here. Gmail is recommended, because this forwarding mode is not easy to find in most mailbox service settings. Follow the authorized GitHub 2FA wiki to enable two-factor authorization (2FA) on Github. When you set 2FA to \u0026ldquo;off\u0026rdquo;, it will be delisted by the corresponding Apache committer write permission group until you set it up again. (NOTE: Treat your recovery codes with the same level of attention as you would your password!) Use GitBox Account Linking Utility to obtain write permission of the SkyWalking project. Follow this doc to update the website.  If you would like to show up publicly in the Apache GitHub org, you need to go to the Apache GitHub org people page, search for yourself, and choose Organization visibility to Public.\nCommitter rights, duties, and responsibilities The SkyWalking project doesn\u0026rsquo;t require continuing contributions from you after you have become a committer, but we truly hope that you will continue to play a part in our community!\nAs a committer, you could\n Review and merge the pull request to the master branch in the Apache repo. A pull request often contains multiple commits. Those commits must be squashed and merged into a single commit with explanatory comments. It is recommended for new committers to request recheck of the pull request from senior committers. Create and push codes to the new branch in the Apache repo. Follow the release process to prepare a new release. Remember to confirm with the committer team that it is the right time to create the release.  The PMC hopes that the new committer will take part in the release process as well as release voting, even though their vote will be regarded as +1 no binding. Being familiar with the release process is key to being promoted to the role of PMC member.\nProject Management Committee The Project Management Committee (PMC) member does not have any special rights in code contributions. They simply oversee the project and make sure that it follows the Apache requirements. Its functions include:\n Binding voting for releases and license checks; New committer and PMC member recognition; Identification of branding issues and brand protection; and Responding to questions raised by the ASF board, and taking necessary actions.  The V.P. and chair of the PMC is the secretary, who is responsible for initializing the board report.\nIn most cases, a new PMC member is nominated from the committer team. But it is also possible to become a PMC member directly, so long as the PMC agrees to the nomination and is confident that the candidate is ready. For instance, this can be demonstrated by the fact that he/she has been an Apache member, an Apache officer, or a PMC member of another project.\nThe new PMC voting process should also follow the [DISCUSS], [VOTE] and [RESULT][VOTE] procedures using a private mail list, just like the voting process for new committers. Before sending the invitation, the PMC must also send a NOTICE mail to the Apache board.\nTo: board@apache.org Cc: private@skywalking.apache.org Subject: [NOTICE] Jane Doe for SkyWalking PMC SkyWalking proposes to invite Jane Doe (janedoe) to join the PMC. (include if a vote was held) The vote result is available here: https://lists.apache.org/... After 72 hours, if the board doesn\u0026rsquo;t object to the nomination (which it won\u0026rsquo;t most cases), an invitation may then be sent to the candidate.\nOnce the invitation is accepted, a PMC member should add the new member to the official PMC list through roster.\n","excerpt":"Apache SkyWalking committer SkyWalking Project Management Committee (PMC) is responsible for …","ref":"/docs/main/v9.7.0/en/guides/asf/committer/","title":"Apache SkyWalking committer"},{"body":"Apache SkyWalking Go Release Guide This documentation guides the release manager to release the SkyWalking Go in the Apache Way, and also helps people to check the release for vote.\nPrerequisites  Close(if finished, or move to next milestone otherwise) all issues in the current milestone from skywalking-go and skywalking, create a new milestone if needed. Update CHANGES.md. Check the dependency licenses including all dependencies.  Add your GPG public key to Apache svn   Upload your GPG public key to a public GPG site, such as MIT\u0026rsquo;s site.\n  Log in id.apache.org and submit your key fingerprint.\n  Add your GPG public key into SkyWalking GPG KEYS file, you can do this only if you are a PMC member. You can ask a PMC member for help. DO NOT override the existed KEYS file content, only append your key at the end of the file.\n  Build and sign the source code package export VERSION=\u0026lt;the version to release\u0026gt; git clone git@github.com:apache/skywalking-go \u0026amp;\u0026amp; cd skywalking-go git tag -a \u0026#34;v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking-Go v$VERSION\u0026#34; git tag -a \u0026#34;toolkit/v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking-Go Toolkit v$VERSION\u0026#34; git push --tags make release In total, six files should be automatically generated in the directory: apache-skywalking-go-${VERSION}-bin.tgz, apache-skywalking-go-${VERSION}-src.tgz, and their corresponding asc, sha512 files.\nUpload to Apache svn svn co https://dist.apache.org/repos/dist/dev/skywalking/ mkdir -p skywalking/go/\u0026#34;$VERSION\u0026#34; cp skywalking-go/apache-skywalking*.tgz skywalking/go/\u0026#34;$VERSION\u0026#34; cp skywalking-go/apache-skywalking*.tgz.asc skywalking/go/\u0026#34;$VERSION\u0026#34; cp skywalking-go/apache-skywalking*.tgz.sha512 skywalking/go/\u0026#34;$VERSION\u0026#34; cd skywalking/go \u0026amp;\u0026amp; svn add \u0026#34;$VERSION\u0026#34; \u0026amp;\u0026amp; svn commit -m \u0026#34;Draft Apache SkyWalking-Go release $VERSION\u0026#34; Call for vote in dev@ mailing list Call for vote in dev@skywalking.apache.org, please check all links before sending the email.\nSubject: [VOTE] Release Apache SkyWalking Go version $VERSION Content: Hi the SkyWalking Community: This is a call for vote to release Apache SkyWalking Go version $VERSION. Release notes: * https://github.com/apache/skywalking-go/blob/v$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/go/$VERSION * sha512 checksums - sha512xxxxyyyzzz skywalking-go-x.x.x-src.tgz - sha512xxxxyyyzzz skywalking-go-x.x.x-bin.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-go/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-go/blob/v$VERSION/docs/en/development-and-contribution/how-to-release.md Voting will start now and will remain open for at least 72 hours, all PMC members are required to give their votes. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Thanks. [1] https://github.com/apache/skywalking/blob/master/docs/en/guides/How-to-release.md#vote-check Vote Check All PMC members and committers should check these before voting +1:\n Features test. All artifacts in staging repository are published with .asc, .md5, and sha files. Source codes and distribution packages (skywalking-go-$VERSION-{src,bin}.tgz) are in https://dist.apache.org/repos/dist/dev/skywalking/go/$VERSION with .asc, .sha512. LICENSE and NOTICE are in source codes and distribution package. Check shasum -c skywalking-go-$VERSION-{src,bin}.tgz.sha512. Check gpg --verify skywalking-go-$VERSION-{src,bin}.tgz.asc skywalking-go-$VERSION-{src,bin}.tgz. Build distribution from source code package by following this command, make build.  Vote result should follow these:\n  PMC vote is +1 binding, all others is +1 no binding.\n  Within 72 hours, you get at least 3 (+1 binding), and have more +1 than -1. Vote pass.\n  Send the closing vote mail to announce the result. When count the binding and no binding votes, please list the names of voters. An example like this:\n[RESULT][VOTE] Release Apache SkyWalking Go version $VERSION 3 days passed, we’ve got ($NUMBER) +1 bindings (and ... +1 non-bindings): (list names) +1 bindings: xxx ... +1 non-bindings: xxx ... Thank you for voting, I’ll continue the release process.   Publish release   Move source codes tar balls and distributions to https://dist.apache.org/repos/dist/release/skywalking/, you can do this only if you are a PMC member.\nexport SVN_EDITOR=vim svn mv https://dist.apache.org/repos/dist/dev/skywalking/go/$VERSION https://dist.apache.org/repos/dist/release/skywalking/go   Refer to the previous PR, update the event and download links on the website.\n  Update Github release page, follow the previous convention.\n  Send ANNOUNCE email to dev@skywalking.apache.org and announce@apache.org, the sender should use his/her Apache email account, please check all links before sending the email.\nSubject: [ANNOUNCEMENT] Apache SkyWalking Go $VERSION Released Content: Hi the SkyWalking Community On behalf of the SkyWalking Team, I’m glad to announce that SkyWalking Go $VERSION is now released. SkyWalking Go: The Golang auto-instrument Agent for Apache SkyWalking, which provides the native tracing/metrics/logging abilities for Golang projects. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. Download Links: http://skywalking.apache.org/downloads/ Release Notes : https://github.com/apache/skywalking-go/blob/v$VERSION/CHANGES.md Website: http://skywalking.apache.org/ SkyWalking Go Resources: - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalking.apache.org - Documents: https://github.com/apache/skywalking-go/blob/v$VERSION/README.md The Apache SkyWalking Team   Remove Unnecessary Releases Please remember to remove all unnecessary releases in the mirror svn (https://dist.apache.org/repos/dist/release/skywalking/), if you don\u0026rsquo;t recommend users to choose those version. For example, you have removed the download and documentation links from the website. If they want old ones, the Archive repository has all of them.\n","excerpt":"Apache SkyWalking Go Release Guide This documentation guides the release manager to release the …","ref":"/docs/skywalking-go/latest/en/development-and-contribution/how-to-release/","title":"Apache SkyWalking Go Release Guide"},{"body":"Apache SkyWalking Go Release Guide This documentation guides the release manager to release the SkyWalking Go in the Apache Way, and also helps people to check the release for vote.\nPrerequisites  Close(if finished, or move to next milestone otherwise) all issues in the current milestone from skywalking-go and skywalking, create a new milestone if needed. Update CHANGES.md. Check the dependency licenses including all dependencies.  Add your GPG public key to Apache svn   Upload your GPG public key to a public GPG site, such as MIT\u0026rsquo;s site.\n  Log in id.apache.org and submit your key fingerprint.\n  Add your GPG public key into SkyWalking GPG KEYS file, you can do this only if you are a PMC member. You can ask a PMC member for help. DO NOT override the existed KEYS file content, only append your key at the end of the file.\n  Build and sign the source code package export VERSION=\u0026lt;the version to release\u0026gt; git clone git@github.com:apache/skywalking-go \u0026amp;\u0026amp; cd skywalking-go git tag -a \u0026#34;v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking-Go v$VERSION\u0026#34; git tag -a \u0026#34;toolkit/v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking-Go Toolkit v$VERSION\u0026#34; git push --tags make release In total, six files should be automatically generated in the directory: apache-skywalking-go-${VERSION}-bin.tgz, apache-skywalking-go-${VERSION}-src.tgz, and their corresponding asc, sha512 files.\nUpload to Apache svn svn co https://dist.apache.org/repos/dist/dev/skywalking/ mkdir -p skywalking/go/\u0026#34;$VERSION\u0026#34; cp skywalking-go/apache-skywalking*.tgz skywalking/go/\u0026#34;$VERSION\u0026#34; cp skywalking-go/apache-skywalking*.tgz.asc skywalking/go/\u0026#34;$VERSION\u0026#34; cp skywalking-go/apache-skywalking*.tgz.sha512 skywalking/go/\u0026#34;$VERSION\u0026#34; cd skywalking/go \u0026amp;\u0026amp; svn add \u0026#34;$VERSION\u0026#34; \u0026amp;\u0026amp; svn commit -m \u0026#34;Draft Apache SkyWalking-Go release $VERSION\u0026#34; Call for vote in dev@ mailing list Call for vote in dev@skywalking.apache.org, please check all links before sending the email.\nSubject: [VOTE] Release Apache SkyWalking Go version $VERSION Content: Hi the SkyWalking Community: This is a call for vote to release Apache SkyWalking Go version $VERSION. Release notes: * https://github.com/apache/skywalking-go/blob/v$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/go/$VERSION * sha512 checksums - sha512xxxxyyyzzz skywalking-go-x.x.x-src.tgz - sha512xxxxyyyzzz skywalking-go-x.x.x-bin.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-go/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-go/blob/v$VERSION/docs/en/development-and-contribution/how-to-release.md Voting will start now and will remain open for at least 72 hours, all PMC members are required to give their votes. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Thanks. [1] https://github.com/apache/skywalking/blob/master/docs/en/guides/How-to-release.md#vote-check Vote Check All PMC members and committers should check these before voting +1:\n Features test. All artifacts in staging repository are published with .asc, .md5, and sha files. Source codes and distribution packages (skywalking-go-$VERSION-{src,bin}.tgz) are in https://dist.apache.org/repos/dist/dev/skywalking/go/$VERSION with .asc, .sha512. LICENSE and NOTICE are in source codes and distribution package. Check shasum -c skywalking-go-$VERSION-{src,bin}.tgz.sha512. Check gpg --verify skywalking-go-$VERSION-{src,bin}.tgz.asc skywalking-go-$VERSION-{src,bin}.tgz. Build distribution from source code package by following this command, make build.  Vote result should follow these:\n  PMC vote is +1 binding, all others is +1 no binding.\n  Within 72 hours, you get at least 3 (+1 binding), and have more +1 than -1. Vote pass.\n  Send the closing vote mail to announce the result. When count the binding and no binding votes, please list the names of voters. An example like this:\n[RESULT][VOTE] Release Apache SkyWalking Go version $VERSION 3 days passed, we’ve got ($NUMBER) +1 bindings (and ... +1 non-bindings): (list names) +1 bindings: xxx ... +1 non-bindings: xxx ... Thank you for voting, I’ll continue the release process.   Publish release   Move source codes tar balls and distributions to https://dist.apache.org/repos/dist/release/skywalking/, you can do this only if you are a PMC member.\nexport SVN_EDITOR=vim svn mv https://dist.apache.org/repos/dist/dev/skywalking/go/$VERSION https://dist.apache.org/repos/dist/release/skywalking/go   Refer to the previous PR, update the event and download links on the website.\n  Update Github release page, follow the previous convention.\n  Send ANNOUNCE email to dev@skywalking.apache.org and announce@apache.org, the sender should use his/her Apache email account, please check all links before sending the email.\nSubject: [ANNOUNCEMENT] Apache SkyWalking Go $VERSION Released Content: Hi the SkyWalking Community On behalf of the SkyWalking Team, I’m glad to announce that SkyWalking Go $VERSION is now released. SkyWalking Go: The Golang auto-instrument Agent for Apache SkyWalking, which provides the native tracing/metrics/logging abilities for Golang projects. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. Download Links: http://skywalking.apache.org/downloads/ Release Notes : https://github.com/apache/skywalking-go/blob/v$VERSION/CHANGES.md Website: http://skywalking.apache.org/ SkyWalking Go Resources: - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalking.apache.org - Documents: https://github.com/apache/skywalking-go/blob/v$VERSION/README.md The Apache SkyWalking Team   Remove Unnecessary Releases Please remember to remove all unnecessary releases in the mirror svn (https://dist.apache.org/repos/dist/release/skywalking/), if you don\u0026rsquo;t recommend users to choose those version. For example, you have removed the download and documentation links from the website. If they want old ones, the Archive repository has all of them.\n","excerpt":"Apache SkyWalking Go Release Guide This documentation guides the release manager to release the …","ref":"/docs/skywalking-go/next/en/development-and-contribution/how-to-release/","title":"Apache SkyWalking Go Release Guide"},{"body":"Apache SkyWalking Go Release Guide This documentation guides the release manager to release the SkyWalking Go in the Apache Way, and also helps people to check the release for vote.\nPrerequisites  Close(if finished, or move to next milestone otherwise) all issues in the current milestone from skywalking-go and skywalking, create a new milestone if needed. Update CHANGES.md. Check the dependency licenses including all dependencies.  Add your GPG public key to Apache svn   Upload your GPG public key to a public GPG site, such as MIT\u0026rsquo;s site.\n  Log in id.apache.org and submit your key fingerprint.\n  Add your GPG public key into SkyWalking GPG KEYS file, you can do this only if you are a PMC member. You can ask a PMC member for help. DO NOT override the existed KEYS file content, only append your key at the end of the file.\n  Build and sign the source code package export VERSION=\u0026lt;the version to release\u0026gt; git clone git@github.com:apache/skywalking-go \u0026amp;\u0026amp; cd skywalking-go git tag -a \u0026#34;v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking-Go v$VERSION\u0026#34; git tag -a \u0026#34;toolkit/v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking-Go Toolkit v$VERSION\u0026#34; git push --tags make release In total, six files should be automatically generated in the directory: apache-skywalking-go-${VERSION}-bin.tgz, apache-skywalking-go-${VERSION}-src.tgz, and their corresponding asc, sha512 files.\nUpload to Apache svn svn co https://dist.apache.org/repos/dist/dev/skywalking/ mkdir -p skywalking/go/\u0026#34;$VERSION\u0026#34; cp skywalking-go/apache-skywalking*.tgz skywalking/go/\u0026#34;$VERSION\u0026#34; cp skywalking-go/apache-skywalking*.tgz.asc skywalking/go/\u0026#34;$VERSION\u0026#34; cp skywalking-go/apache-skywalking*.tgz.sha512 skywalking/go/\u0026#34;$VERSION\u0026#34; cd skywalking/go \u0026amp;\u0026amp; svn add \u0026#34;$VERSION\u0026#34; \u0026amp;\u0026amp; svn commit -m \u0026#34;Draft Apache SkyWalking-Go release $VERSION\u0026#34; Call for vote in dev@ mailing list Call for vote in dev@skywalking.apache.org, please check all links before sending the email.\nSubject: [VOTE] Release Apache SkyWalking Go version $VERSION Content: Hi the SkyWalking Community: This is a call for vote to release Apache SkyWalking Go version $VERSION. Release notes: * https://github.com/apache/skywalking-go/blob/v$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/go/$VERSION * sha512 checksums - sha512xxxxyyyzzz skywalking-go-x.x.x-src.tgz - sha512xxxxyyyzzz skywalking-go-x.x.x-bin.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-go/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-go/blob/v$VERSION/docs/en/development-and-contribution/how-to-release.md Voting will start now and will remain open for at least 72 hours, all PMC members are required to give their votes. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Thanks. [1] https://github.com/apache/skywalking/blob/master/docs/en/guides/How-to-release.md#vote-check Vote Check All PMC members and committers should check these before voting +1:\n Features test. All artifacts in staging repository are published with .asc, .md5, and sha files. Source codes and distribution packages (skywalking-go-$VERSION-{src,bin}.tgz) are in https://dist.apache.org/repos/dist/dev/skywalking/go/$VERSION with .asc, .sha512. LICENSE and NOTICE are in source codes and distribution package. Check shasum -c skywalking-go-$VERSION-{src,bin}.tgz.sha512. Check gpg --verify skywalking-go-$VERSION-{src,bin}.tgz.asc skywalking-go-$VERSION-{src,bin}.tgz. Build distribution from source code package by following this command, make build.  Vote result should follow these:\n  PMC vote is +1 binding, all others is +1 no binding.\n  Within 72 hours, you get at least 3 (+1 binding), and have more +1 than -1. Vote pass.\n  Send the closing vote mail to announce the result. When count the binding and no binding votes, please list the names of voters. An example like this:\n[RESULT][VOTE] Release Apache SkyWalking Go version $VERSION 3 days passed, we’ve got ($NUMBER) +1 bindings (and ... +1 non-bindings): (list names) +1 bindings: xxx ... +1 non-bindings: xxx ... Thank you for voting, I’ll continue the release process.   Publish release   Move source codes tar balls and distributions to https://dist.apache.org/repos/dist/release/skywalking/, you can do this only if you are a PMC member.\nexport SVN_EDITOR=vim svn mv https://dist.apache.org/repos/dist/dev/skywalking/go/$VERSION https://dist.apache.org/repos/dist/release/skywalking/go   Refer to the previous PR, update the event and download links on the website.\n  Update Github release page, follow the previous convention.\n  Send ANNOUNCE email to dev@skywalking.apache.org and announce@apache.org, the sender should use his/her Apache email account, please check all links before sending the email.\nSubject: [ANNOUNCEMENT] Apache SkyWalking Go $VERSION Released Content: Hi the SkyWalking Community On behalf of the SkyWalking Team, I’m glad to announce that SkyWalking Go $VERSION is now released. SkyWalking Go: The Golang auto-instrument Agent for Apache SkyWalking, which provides the native tracing/metrics/logging abilities for Golang projects. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. Download Links: http://skywalking.apache.org/downloads/ Release Notes : https://github.com/apache/skywalking-go/blob/v$VERSION/CHANGES.md Website: http://skywalking.apache.org/ SkyWalking Go Resources: - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalking.apache.org - Documents: https://github.com/apache/skywalking-go/blob/v$VERSION/README.md The Apache SkyWalking Team   Remove Unnecessary Releases Please remember to remove all unnecessary releases in the mirror svn (https://dist.apache.org/repos/dist/release/skywalking/), if you don\u0026rsquo;t recommend users to choose those version. For example, you have removed the download and documentation links from the website. If they want old ones, the Archive repository has all of them.\n","excerpt":"Apache SkyWalking Go Release Guide This documentation guides the release manager to release the …","ref":"/docs/skywalking-go/v0.4.0/en/development-and-contribution/how-to-release/","title":"Apache SkyWalking Go Release Guide"},{"body":"Apache SkyWalking Infra E2E Release Guide This documentation guides the release manager to release the SkyWalking Infra E2E in the Apache Way, and also helps people to check the release for voting.\nPrerequisites  Close (if finished, or move to next milestone otherwise) all issues in the current milestone from skywalking-infra-e2e and skywalking, create a new milestone if needed. Update CHANGES.md.  Add your GPG public key to Apache svn   Upload your GPG public key to a public GPG site, such as MIT\u0026rsquo;s site.\n  Log in id.apache.org and submit your key fingerprint.\n  Add your GPG public key into SkyWalking GPG KEYS file, you can do this only if you are a PMC member. You can ask a PMC member for help. DO NOT override the existed KEYS file content, only append your key at the end of the file.\n  Build and sign the source code package export VERSION=\u0026lt;the version to release\u0026gt; git clone --recurse-submodules git@github.com:apache/skywalking-infra-e2e.git \u0026amp;\u0026amp; cd skywalking-infra-e2e git tag -a \u0026#34;v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking-Infra-E2E $VERSION\u0026#34; git push --tags make clean make test # this is optional, it runs sanity checks to verify the features make release Upload to Apache svn svn co https://dist.apache.org/repos/dist/dev/skywalking/infra-e2e release/skywalking/infra-e2e mkdir -p release/skywalking/infra-e2e/\u0026#34;$VERSION\u0026#34; cp skywalking-infra-e2e/skywalking*.tgz release/skywalking/infra-e2e/\u0026#34;$VERSION\u0026#34; cp skywalking-infra-e2e/skywalking*.tgz.asc release/skywalking/infra-e2e/\u0026#34;$VERSION\u0026#34; cp skywalking-infra-e2e/skywalking*.tgz.sha512 release/skywalking/infra-e2e/\u0026#34;$VERSION\u0026#34; cd release/skywalking \u0026amp;\u0026amp; svn add infra-e2e/$VERSION \u0026amp;\u0026amp; svn commit infra-e2e -m \u0026#34;Draft Apache SkyWalking-Infra-E2E release $VERSION\u0026#34; Call for vote in dev@ mailing list Call for vote in dev@skywalking.apache.org.\nSubject: [VOTE] Release Apache SkyWalking Infra E2E version $VERSION Content: Hi the SkyWalking Community: This is a call for vote to release Apache SkyWalking Infra E2E version $VERSION. Release notes: * https://github.com/apache/skywalking-infra-e2e/blob/v$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/infra-e2e/$VERSION * sha512 checksums - sha512xxxxyyyzzz skywalking-e2e-$VERSION-bin.tgz - sha512xxxxyyyzzz skywalking-e2e-$VERSION-src.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-infra-e2e/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-infra-e2e/blob/main/docs/en/contribution/Release-Guidance.md Voting will start now and will remain open for at least 72 hours, all PMC members are required to give their votes. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Thanks. [1] https://github.com/apache/skywalking/blob/master/docs/en/guides/How-to-release.md#vote-check Vote Check All PMC members and committers should check these before voting +1:\n Features test. All artifacts in staging repository are published with .asc, and sha files. Source codes and distribution packages (skywalking-e2e-$VERSION-src.tgz) are in https://dist.apache.org/repos/dist/dev/skywalking/infra-e2e/$VERSION with .asc, .sha512. LICENSE and NOTICE are in source codes and distribution package. Check shasum -c skywalking-e2e-$VERSION-src.tgz.sha512. Check gpg --verify skywalking-e2e-$VERSION-src.tgz.asc skywalking-e2e-$VERSION-src.tgz. Build distribution from source code package by following this the build guide.  Vote result should follow these:\n  PMC vote is +1 binding, all others is +1 no binding.\n  Within 72 hours, you get at least 3 (+1 binding), and have more +1 than -1. Vote pass.\n  Send the closing vote mail to announce the result. When count the binding and no binding votes, please list the names of voters. An example like this:\n[RESULT][VOTE] Release Apache SkyWalking Infra E2E version $VERSION 72+ hours passed, we’ve got ($NUMBER) +1 bindings (and ... +1 non-bindings): (list names) +1 bindings: xxx ... +1 non-bindings: xxx ... Thank you for voting, I’ll continue the release process.   Publish release   Move source codes tar balls and distributions to https://dist.apache.org/repos/dist/release/skywalking/, you can do this only if you are a PMC member.\nsvn mv https://dist.apache.org/repos/dist/dev/skywalking/infra-e2e/\u0026#34;$VERSION\u0026#34; https://dist.apache.org/repos/dist/release/skywalking/infra-e2e/\u0026#34;$VERSION\u0026#34;   Refer to the previous PR, update news and links on the website. There are several files need to modify.\n  Update Github release page, follow the previous convention.\n  Send ANNOUNCE email to dev@skywalking.apache.org and announce@apache.org, the sender should use his/her Apache email account.\nSubject: [ANNOUNCEMENT] Apache SkyWalking Infra E2E $VERSION Released Content: Hi the SkyWalking Community On behalf of the SkyWalking Team, I’m glad to announce that SkyWalking Infra E2E $VERSION is now released. SkyWalking Infra E2E: An End-to-End Testing framework that aims to help developers to set up, debug, and verify E2E tests with ease. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. Download Links: http://skywalking.apache.org/downloads/ Release Notes : https://github.com/apache/skywalking-infra-e2e/blob/v$VERSION/CHANGES.md Website: http://skywalking.apache.org/ SkyWalking Infra E2E Resources: - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalking.apache.org - Documents: https://github.com/apache/skywalking-infra-e2e/blob/v$VERSION/README.md The Apache SkyWalking Team   ","excerpt":"Apache SkyWalking Infra E2E Release Guide This documentation guides the release manager to release …","ref":"/docs/skywalking-infra-e2e/latest/en/contribution/release-guidance/","title":"Apache SkyWalking Infra E2E Release Guide"},{"body":"Apache SkyWalking Infra E2E Release Guide This documentation guides the release manager to release the SkyWalking Infra E2E in the Apache Way, and also helps people to check the release for voting.\nPrerequisites  Close (if finished, or move to next milestone otherwise) all issues in the current milestone from skywalking-infra-e2e and skywalking, create a new milestone if needed. Update CHANGES.md.  Add your GPG public key to Apache svn   Upload your GPG public key to a public GPG site, such as MIT\u0026rsquo;s site.\n  Log in id.apache.org and submit your key fingerprint.\n  Add your GPG public key into SkyWalking GPG KEYS file, you can do this only if you are a PMC member. You can ask a PMC member for help. DO NOT override the existed KEYS file content, only append your key at the end of the file.\n  Build and sign the source code package export VERSION=\u0026lt;the version to release\u0026gt; git clone --recurse-submodules git@github.com:apache/skywalking-infra-e2e.git \u0026amp;\u0026amp; cd skywalking-infra-e2e git tag -a \u0026#34;v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking-Infra-E2E $VERSION\u0026#34; git push --tags make clean make test # this is optional, it runs sanity checks to verify the features make release Upload to Apache svn svn co https://dist.apache.org/repos/dist/dev/skywalking/infra-e2e release/skywalking/infra-e2e mkdir -p release/skywalking/infra-e2e/\u0026#34;$VERSION\u0026#34; cp skywalking-infra-e2e/skywalking*.tgz release/skywalking/infra-e2e/\u0026#34;$VERSION\u0026#34; cp skywalking-infra-e2e/skywalking*.tgz.asc release/skywalking/infra-e2e/\u0026#34;$VERSION\u0026#34; cp skywalking-infra-e2e/skywalking*.tgz.sha512 release/skywalking/infra-e2e/\u0026#34;$VERSION\u0026#34; cd release/skywalking \u0026amp;\u0026amp; svn add infra-e2e/$VERSION \u0026amp;\u0026amp; svn commit infra-e2e -m \u0026#34;Draft Apache SkyWalking-Infra-E2E release $VERSION\u0026#34; Call for vote in dev@ mailing list Call for vote in dev@skywalking.apache.org.\nSubject: [VOTE] Release Apache SkyWalking Infra E2E version $VERSION Content: Hi the SkyWalking Community: This is a call for vote to release Apache SkyWalking Infra E2E version $VERSION. Release notes: * https://github.com/apache/skywalking-infra-e2e/blob/v$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/infra-e2e/$VERSION * sha512 checksums - sha512xxxxyyyzzz skywalking-e2e-$VERSION-bin.tgz - sha512xxxxyyyzzz skywalking-e2e-$VERSION-src.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-infra-e2e/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-infra-e2e/blob/main/docs/en/contribution/Release-Guidance.md Voting will start now and will remain open for at least 72 hours, all PMC members are required to give their votes. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Thanks. [1] https://github.com/apache/skywalking/blob/master/docs/en/guides/How-to-release.md#vote-check Vote Check All PMC members and committers should check these before voting +1:\n Features test. All artifacts in staging repository are published with .asc, and sha files. Source codes and distribution packages (skywalking-e2e-$VERSION-src.tgz) are in https://dist.apache.org/repos/dist/dev/skywalking/infra-e2e/$VERSION with .asc, .sha512. LICENSE and NOTICE are in source codes and distribution package. Check shasum -c skywalking-e2e-$VERSION-src.tgz.sha512. Check gpg --verify skywalking-e2e-$VERSION-src.tgz.asc skywalking-e2e-$VERSION-src.tgz. Build distribution from source code package by following this the build guide.  Vote result should follow these:\n  PMC vote is +1 binding, all others is +1 no binding.\n  Within 72 hours, you get at least 3 (+1 binding), and have more +1 than -1. Vote pass.\n  Send the closing vote mail to announce the result. When count the binding and no binding votes, please list the names of voters. An example like this:\n[RESULT][VOTE] Release Apache SkyWalking Infra E2E version $VERSION 72+ hours passed, we’ve got ($NUMBER) +1 bindings (and ... +1 non-bindings): (list names) +1 bindings: xxx ... +1 non-bindings: xxx ... Thank you for voting, I’ll continue the release process.   Publish release   Move source codes tar balls and distributions to https://dist.apache.org/repos/dist/release/skywalking/, you can do this only if you are a PMC member.\nsvn mv https://dist.apache.org/repos/dist/dev/skywalking/infra-e2e/\u0026#34;$VERSION\u0026#34; https://dist.apache.org/repos/dist/release/skywalking/infra-e2e/\u0026#34;$VERSION\u0026#34;   Refer to the previous PR, update news and links on the website. There are several files need to modify.\n  Update Github release page, follow the previous convention.\n  Send ANNOUNCE email to dev@skywalking.apache.org and announce@apache.org, the sender should use his/her Apache email account.\nSubject: [ANNOUNCEMENT] Apache SkyWalking Infra E2E $VERSION Released Content: Hi the SkyWalking Community On behalf of the SkyWalking Team, I’m glad to announce that SkyWalking Infra E2E $VERSION is now released. SkyWalking Infra E2E: An End-to-End Testing framework that aims to help developers to set up, debug, and verify E2E tests with ease. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. Download Links: http://skywalking.apache.org/downloads/ Release Notes : https://github.com/apache/skywalking-infra-e2e/blob/v$VERSION/CHANGES.md Website: http://skywalking.apache.org/ SkyWalking Infra E2E Resources: - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalking.apache.org - Documents: https://github.com/apache/skywalking-infra-e2e/blob/v$VERSION/README.md The Apache SkyWalking Team   ","excerpt":"Apache SkyWalking Infra E2E Release Guide This documentation guides the release manager to release …","ref":"/docs/skywalking-infra-e2e/next/en/contribution/release-guidance/","title":"Apache SkyWalking Infra E2E Release Guide"},{"body":"Apache SkyWalking Infra E2E Release Guide This documentation guides the release manager to release the SkyWalking Infra E2E in the Apache Way, and also helps people to check the release for voting.\nPrerequisites  Close (if finished, or move to next milestone otherwise) all issues in the current milestone from skywalking-infra-e2e and skywalking, create a new milestone if needed. Update CHANGES.md.  Add your GPG public key to Apache svn   Upload your GPG public key to a public GPG site, such as MIT\u0026rsquo;s site.\n  Log in id.apache.org and submit your key fingerprint.\n  Add your GPG public key into SkyWalking GPG KEYS file, you can do this only if you are a PMC member. You can ask a PMC member for help. DO NOT override the existed KEYS file content, only append your key at the end of the file.\n  Build and sign the source code package export VERSION=\u0026lt;the version to release\u0026gt; git clone --recurse-submodules git@github.com:apache/skywalking-infra-e2e.git \u0026amp;\u0026amp; cd skywalking-infra-e2e git tag -a \u0026#34;v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking-Infra-E2E $VERSION\u0026#34; git push --tags make clean make test # this is optional, it runs sanity checks to verify the features make release Upload to Apache svn svn co https://dist.apache.org/repos/dist/dev/skywalking/infra-e2e release/skywalking/infra-e2e mkdir -p release/skywalking/infra-e2e/\u0026#34;$VERSION\u0026#34; cp skywalking-infra-e2e/skywalking*.tgz release/skywalking/infra-e2e/\u0026#34;$VERSION\u0026#34; cp skywalking-infra-e2e/skywalking*.tgz.asc release/skywalking/infra-e2e/\u0026#34;$VERSION\u0026#34; cp skywalking-infra-e2e/skywalking*.tgz.sha512 release/skywalking/infra-e2e/\u0026#34;$VERSION\u0026#34; cd release/skywalking \u0026amp;\u0026amp; svn add infra-e2e/$VERSION \u0026amp;\u0026amp; svn commit infra-e2e -m \u0026#34;Draft Apache SkyWalking-Infra-E2E release $VERSION\u0026#34; Call for vote in dev@ mailing list Call for vote in dev@skywalking.apache.org.\nSubject: [VOTE] Release Apache SkyWalking Infra E2E version $VERSION Content: Hi the SkyWalking Community: This is a call for vote to release Apache SkyWalking Infra E2E version $VERSION. Release notes: * https://github.com/apache/skywalking-infra-e2e/blob/v$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/infra-e2e/$VERSION * sha512 checksums - sha512xxxxyyyzzz skywalking-e2e-$VERSION-bin.tgz - sha512xxxxyyyzzz skywalking-e2e-$VERSION-src.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-infra-e2e/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-infra-e2e/blob/main/docs/en/contribution/Release-Guidance.md Voting will start now and will remain open for at least 72 hours, all PMC members are required to give their votes. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Thanks. [1] https://github.com/apache/skywalking/blob/master/docs/en/guides/How-to-release.md#vote-check Vote Check All PMC members and committers should check these before voting +1:\n Features test. All artifacts in staging repository are published with .asc, and sha files. Source codes and distribution packages (skywalking-e2e-$VERSION-src.tgz) are in https://dist.apache.org/repos/dist/dev/skywalking/infra-e2e/$VERSION with .asc, .sha512. LICENSE and NOTICE are in source codes and distribution package. Check shasum -c skywalking-e2e-$VERSION-src.tgz.sha512. Check gpg --verify skywalking-e2e-$VERSION-src.tgz.asc skywalking-e2e-$VERSION-src.tgz. Build distribution from source code package by following this the build guide.  Vote result should follow these:\n  PMC vote is +1 binding, all others is +1 no binding.\n  Within 72 hours, you get at least 3 (+1 binding), and have more +1 than -1. Vote pass.\n  Send the closing vote mail to announce the result. When count the binding and no binding votes, please list the names of voters. An example like this:\n[RESULT][VOTE] Release Apache SkyWalking Infra E2E version $VERSION 72+ hours passed, we’ve got ($NUMBER) +1 bindings (and ... +1 non-bindings): (list names) +1 bindings: xxx ... +1 non-bindings: xxx ... Thank you for voting, I’ll continue the release process.   Publish release   Move source codes tar balls and distributions to https://dist.apache.org/repos/dist/release/skywalking/, you can do this only if you are a PMC member.\nsvn mv https://dist.apache.org/repos/dist/dev/skywalking/infra-e2e/\u0026#34;$VERSION\u0026#34; https://dist.apache.org/repos/dist/release/skywalking/infra-e2e/\u0026#34;$VERSION\u0026#34;   Refer to the previous PR, update news and links on the website. There are several files need to modify.\n  Update Github release page, follow the previous convention.\n  Send ANNOUNCE email to dev@skywalking.apache.org and announce@apache.org, the sender should use his/her Apache email account.\nSubject: [ANNOUNCEMENT] Apache SkyWalking Infra E2E $VERSION Released Content: Hi the SkyWalking Community On behalf of the SkyWalking Team, I’m glad to announce that SkyWalking Infra E2E $VERSION is now released. SkyWalking Infra E2E: An End-to-End Testing framework that aims to help developers to set up, debug, and verify E2E tests with ease. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. Download Links: http://skywalking.apache.org/downloads/ Release Notes : https://github.com/apache/skywalking-infra-e2e/blob/v$VERSION/CHANGES.md Website: http://skywalking.apache.org/ SkyWalking Infra E2E Resources: - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalking.apache.org - Documents: https://github.com/apache/skywalking-infra-e2e/blob/v$VERSION/README.md The Apache SkyWalking Team   ","excerpt":"Apache SkyWalking Infra E2E Release Guide This documentation guides the release manager to release …","ref":"/docs/skywalking-infra-e2e/v1.3.0/en/contribution/release-guidance/","title":"Apache SkyWalking Infra E2E Release Guide"},{"body":"Apache SkyWalking PHP Agent release guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking SDK in The Apache Way and start the voting process by reading this document.\nRequirements  Rust(rustc) Cargo PHP(php, php-config) Pecl GPG shasum  Add your GPG public key  Add your GPG public key into the SkyWalking GPG KEYS file. If you are a committer, use your Apache ID and password to log in this svn, and update the file. Don\u0026rsquo;t override the existing file.(Notice, only PMC member could update this file) Upload your GPG public key to the public GPG site, such as MIT\u0026rsquo;s site. This site should be in the Apache maven staging repository checklist.  Draft a new release Open Create a new release page, choose the tag, and click the Generate release notes button, then copy the generated text to local /tmp/notes.txt.\nTest your settings and package ## Make sure local compiling passed \u0026gt; cargo build ## Create package.xml from package.xml.tpl \u0026gt; cargo run -p scripts --release -- create-package-xml --version x.y.z --notes \u0026#34;`cat /tmp/notes.txt`\u0026#34; ## Create local package. The skywalking_agent-x.y.z.tgz should be found in project root \u0026gt; pecl package Sign the package Tag the commit ID of this release as vx.y.z.\nAfter set the version in Cargo.toml with the release number, package locally. Then run the following commands to sign your package.\n\u0026gt; export RELEASE_VERSION=x.y.z ## The package should be signed by your Apache committer mail. \u0026gt; gpg --armor --detach-sig skywalking_agent-$RELEASE_VERSION.tgz \u0026gt; shasum -a 512 skywalking_agent-$RELEASE_VERSION.tgz \u0026gt; skywalking_agent-$RELEASE_VERSION.tgz.sha512 After these, the source tar with its signed asc and sha512 are ready.\nUpload to Apache SVN and tag a release  Use your Apache ID to log in to https://dist.apache.org/repos/dist/dev/skywalking/php. Create a folder and name it by the release version and round, such as: x.y.z Upload tar ball, asc, sha512 files to the new folder.  Call a vote in dev Call a vote in dev@skywalking.apache.org\nMail title: [VOTE] Release Apache SkyWalking PHP version x.y.z Mail content: Hi All, This is a call for vote to release Apache SkyWalking PHP version x.y.z. Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/php/x.y.z/ * sha512 checksums - xxxxxxxx skywalking_agent-x.y.z.tgz Release Tag : * (Git Tag) vx.y.z Release CommitID : * https://github.com/apache/skywalking-php/tree/{commit-id} Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-php/blob/master/docs/en/contribution/compiling.md Voting will start now (Date) and will remain open for at least 72 hours, Request all PMC members to give their vote. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Vote Check The voting process is as follows:\n All PMC member votes are +1 binding, and all other votes are +1 but non-binding. If you obtain at least 3 (+1 binding) votes with more +1 than -1 votes within 72 hours, the release will be approved.  Publish the release   Move source codes tar and distribution packages to https://dist.apache.org/repos/dist/release/skywalking/.\n\u0026gt; export SVN_EDITOR=vim \u0026gt; svn mv https://dist.apache.org/repos/dist/dev/skywalking/php/x.y.z https://dist.apache.org/repos/dist/release/skywalking/php .... enter your apache password ....   Pecl publish package on skywalking_agent.\nMake sure you have a PECL account, and list in package.tpl.xml as \u0026lt;developer\u0026gt;, or reach private@skywalking.apache.org if you are a committer/PMC but not listed.\nYou can request a PECL account via https://pecl.php.net/account-request.php.\n  Add an release event, update download and doc releases on the SkyWalking website.\n  Add the new release on ASF addrelease site.\n  Remove the old releases on https://dist.apache.org/repos/dist/release/skywalking/php/{previous-version}.\n  Send a release announcement Send ANNOUNCE email to dev@skywalking.apache.org, announce@apache.org. The sender should use the Apache email account.\nMail title: [ANNOUNCE] Apache SkyWalking PHP x.y.z released Mail content: Hi all, SkyWalking PHP Agent provides the native tracing abilities for PHP project. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. This release contains a number of new features, bug fixes and improvements compared to version a.b.c(last release). The notable changes since x.y.z include: (Highlight key changes) 1. ... 2. ... 3. ... Apache SkyWalking website: http://skywalking.apache.org/ Downloads: http://skywalking.apache.org/downloads/ Twitter: https://twitter.com/ASFSkyWalking SkyWalking Resources: - GitHub: https://github.com/apache/skywalking - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Apache SkyWalking Team ","excerpt":"Apache SkyWalking PHP Agent release guide If you\u0026rsquo;re a committer, you can learn how to release …","ref":"/docs/skywalking-php/latest/en/contribution/release-agent/","title":"Apache SkyWalking PHP Agent release guide"},{"body":"Apache SkyWalking PHP Agent release guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking SDK in The Apache Way and start the voting process by reading this document.\nRequirements  Rust(rustc) Cargo PHP(php, php-config) Pecl GPG shasum  Add your GPG public key  Add your GPG public key into the SkyWalking GPG KEYS file. If you are a committer, use your Apache ID and password to log in this svn, and update the file. Don\u0026rsquo;t override the existing file.(Notice, only PMC member could update this file) Upload your GPG public key to the public GPG site, such as MIT\u0026rsquo;s site. This site should be in the Apache maven staging repository checklist.  Draft a new release Open Create a new release page, choose the tag, and click the Generate release notes button, then copy the generated text to local /tmp/notes.txt.\nTest your settings and package ## Make sure local compiling passed \u0026gt; cargo build ## Create package.xml from package.xml.tpl \u0026gt; cargo run -p scripts --release -- create-package-xml --version x.y.z --notes \u0026#34;`cat /tmp/notes.txt`\u0026#34; ## Create local package. The skywalking_agent-x.y.z.tgz should be found in project root \u0026gt; pecl package Sign the package Tag the commit ID of this release as vx.y.z.\nAfter set the version in Cargo.toml with the release number, package locally. Then run the following commands to sign your package.\n\u0026gt; export RELEASE_VERSION=x.y.z ## The package should be signed by your Apache committer mail. \u0026gt; gpg --armor --detach-sig skywalking_agent-$RELEASE_VERSION.tgz \u0026gt; shasum -a 512 skywalking_agent-$RELEASE_VERSION.tgz \u0026gt; skywalking_agent-$RELEASE_VERSION.tgz.sha512 After these, the source tar with its signed asc and sha512 are ready.\nUpload to Apache SVN and tag a release  Use your Apache ID to log in to https://dist.apache.org/repos/dist/dev/skywalking/php. Create a folder and name it by the release version and round, such as: x.y.z Upload tar ball, asc, sha512 files to the new folder.  Call a vote in dev Call a vote in dev@skywalking.apache.org\nMail title: [VOTE] Release Apache SkyWalking PHP version x.y.z Mail content: Hi All, This is a call for vote to release Apache SkyWalking PHP version x.y.z. Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/php/x.y.z/ * sha512 checksums - xxxxxxxx skywalking_agent-x.y.z.tgz Release Tag : * (Git Tag) vx.y.z Release CommitID : * https://github.com/apache/skywalking-php/tree/{commit-id} Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-php/blob/master/docs/en/contribution/compiling.md Voting will start now (Date) and will remain open for at least 72 hours, Request all PMC members to give their vote. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Vote Check The voting process is as follows:\n All PMC member votes are +1 binding, and all other votes are +1 but non-binding. If you obtain at least 3 (+1 binding) votes with more +1 than -1 votes within 72 hours, the release will be approved.  Publish the release   Move source codes tar and distribution packages to https://dist.apache.org/repos/dist/release/skywalking/.\n\u0026gt; export SVN_EDITOR=vim \u0026gt; svn mv https://dist.apache.org/repos/dist/dev/skywalking/php/x.y.z https://dist.apache.org/repos/dist/release/skywalking/php .... enter your apache password ....   Pecl publish package on skywalking_agent.\nMake sure you have a PECL account, and list in package.tpl.xml as \u0026lt;developer\u0026gt;, or reach private@skywalking.apache.org if you are a committer/PMC but not listed.\nYou can request a PECL account via https://pecl.php.net/account-request.php.\n  Add an release event, update download and doc releases on the SkyWalking website.\n  Add the new release on ASF addrelease site.\n  Remove the old releases on https://dist.apache.org/repos/dist/release/skywalking/php/{previous-version}.\n  Send a release announcement Send ANNOUNCE email to dev@skywalking.apache.org, announce@apache.org. The sender should use the Apache email account.\nMail title: [ANNOUNCE] Apache SkyWalking PHP x.y.z released Mail content: Hi all, SkyWalking PHP Agent provides the native tracing abilities for PHP project. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. This release contains a number of new features, bug fixes and improvements compared to version a.b.c(last release). The notable changes since x.y.z include: (Highlight key changes) 1. ... 2. ... 3. ... Apache SkyWalking website: http://skywalking.apache.org/ Downloads: http://skywalking.apache.org/downloads/ Twitter: https://twitter.com/ASFSkyWalking SkyWalking Resources: - GitHub: https://github.com/apache/skywalking - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Apache SkyWalking Team ","excerpt":"Apache SkyWalking PHP Agent release guide If you\u0026rsquo;re a committer, you can learn how to release …","ref":"/docs/skywalking-php/next/en/contribution/release-agent/","title":"Apache SkyWalking PHP Agent release guide"},{"body":"Apache SkyWalking PHP Agent release guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking SDK in The Apache Way and start the voting process by reading this document.\nRequirements  Rust(rustc) Cargo PHP(php, php-config) Pecl GPG shasum  Add your GPG public key  Add your GPG public key into the SkyWalking GPG KEYS file. If you are a committer, use your Apache ID and password to log in this svn, and update the file. Don\u0026rsquo;t override the existing file.(Notice, only PMC member could update this file) Upload your GPG public key to the public GPG site, such as MIT\u0026rsquo;s site. This site should be in the Apache maven staging repository checklist.  Draft a new release Open Create a new release page, choose the tag, and click the Generate release notes button, then copy the generated text to local /tmp/notes.txt.\nTest your settings and package ## Make sure local compiling passed \u0026gt; cargo build ## Create package.xml from package.xml.tpl \u0026gt; cargo run -p scripts --release -- create-package-xml --version x.y.z --notes \u0026#34;`cat /tmp/notes.txt`\u0026#34; ## Create local package. The skywalking_agent-x.y.z.tgz should be found in project root \u0026gt; pecl package Sign the package Tag the commit ID of this release as vx.y.z.\nAfter set the version in Cargo.toml with the release number, package locally. Then run the following commands to sign your package.\n\u0026gt; export RELEASE_VERSION=x.y.z ## The package should be signed by your Apache committer mail. \u0026gt; gpg --armor --detach-sig skywalking_agent-$RELEASE_VERSION.tgz \u0026gt; shasum -a 512 skywalking_agent-$RELEASE_VERSION.tgz \u0026gt; skywalking_agent-$RELEASE_VERSION.tgz.sha512 After these, the source tar with its signed asc and sha512 are ready.\nUpload to Apache SVN and tag a release  Use your Apache ID to log in to https://dist.apache.org/repos/dist/dev/skywalking/php. Create a folder and name it by the release version and round, such as: x.y.z Upload tar ball, asc, sha512 files to the new folder.  Call a vote in dev Call a vote in dev@skywalking.apache.org\nMail title: [VOTE] Release Apache SkyWalking PHP version x.y.z Mail content: Hi All, This is a call for vote to release Apache SkyWalking PHP version x.y.z. Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/php/x.y.z/ * sha512 checksums - xxxxxxxx skywalking_agent-x.y.z.tgz Release Tag : * (Git Tag) vx.y.z Release CommitID : * https://github.com/apache/skywalking-php/tree/{commit-id} Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-php/blob/master/docs/en/contribution/compiling.md Voting will start now (Date) and will remain open for at least 72 hours, Request all PMC members to give their vote. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Vote Check The voting process is as follows:\n All PMC member votes are +1 binding, and all other votes are +1 but non-binding. If you obtain at least 3 (+1 binding) votes with more +1 than -1 votes within 72 hours, the release will be approved.  Publish the release   Move source codes tar and distribution packages to https://dist.apache.org/repos/dist/release/skywalking/.\n\u0026gt; export SVN_EDITOR=vim \u0026gt; svn mv https://dist.apache.org/repos/dist/dev/skywalking/php/x.y.z https://dist.apache.org/repos/dist/release/skywalking/php .... enter your apache password ....   Pecl publish package on skywalking_agent.\nMake sure you have a PECL account, and list in package.tpl.xml as \u0026lt;developer\u0026gt;, or reach private@skywalking.apache.org if you are a committer/PMC but not listed.\nYou can request a PECL account via https://pecl.php.net/account-request.php.\n  Add an release event, update download and doc releases on the SkyWalking website.\n  Add the new release on ASF addrelease site.\n  Remove the old releases on https://dist.apache.org/repos/dist/release/skywalking/php/{previous-version}.\n  Send a release announcement Send ANNOUNCE email to dev@skywalking.apache.org, announce@apache.org. The sender should use the Apache email account.\nMail title: [ANNOUNCE] Apache SkyWalking PHP x.y.z released Mail content: Hi all, SkyWalking PHP Agent provides the native tracing abilities for PHP project. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. This release contains a number of new features, bug fixes and improvements compared to version a.b.c(last release). The notable changes since x.y.z include: (Highlight key changes) 1. ... 2. ... 3. ... Apache SkyWalking website: http://skywalking.apache.org/ Downloads: http://skywalking.apache.org/downloads/ Twitter: https://twitter.com/ASFSkyWalking SkyWalking Resources: - GitHub: https://github.com/apache/skywalking - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Apache SkyWalking Team ","excerpt":"Apache SkyWalking PHP Agent release guide If you\u0026rsquo;re a committer, you can learn how to release …","ref":"/docs/skywalking-php/v0.7.0/en/contribution/release-agent/","title":"Apache SkyWalking PHP Agent release guide"},{"body":"Apache SkyWalking Python Agent dockerfile and images Docker images are not official ASF releases but provided for convenience. Recommended usage is always to build the source\nThis image hosts the SkyWalking Python agent package on top of official Python base images (full \u0026amp; slim) providing support from Python 3.7 - 3.11.\nHow to use this image The images are hosted at Docker Hub and available from the skywalking.docker.scarf.sh endpoint.\nskywalking.docker.scarf.sh/apache/skywalking-python\nBuild your Python application image on top of this image Start by pulling the skywalking-python image as the base of your application image. Refer to Docker Hub for the list of tags available.\nFROMapache/skywalking-python:0.7.0-grpc-py3.9# ... build your Python applicationYou could start your Python application with CMD. The Python image already sets an entry point ENTRYPOINT [\u0026quot;sw-python\u0026quot;].\nFor example - CMD ['run', '-p', 'gunicorn', 'app.wsgi'] -p is always needed when using with Gunicorn/uWSGI -\u0026gt; This will be translated to sw-python run -p gunicorn app.wsgi\nYou don\u0026rsquo;t need to care about enabling the SkyWalking Python agent manually, it should be adopted and bootstrapped automatically through the sw-python CLI.\nEnvironment variables should be provided to customize the agent behavior.\nBuild an image from the dockerfile Provide the following arguments to build your own image from the dockerfile.\nBASE_PYTHON_IMAGE # the Python base image to build upon SW_PYTHON_AGENT_VERSION # agent version to be pulled from PyPI SW_PYTHON_AGENT_PROTOCOL # agent protocol - grpc/ http/ kafka ","excerpt":"Apache SkyWalking Python Agent dockerfile and images Docker images are not official ASF releases but …","ref":"/docs/skywalking-python/latest/en/setup/container/","title":"Apache SkyWalking Python Agent dockerfile and images"},{"body":"Apache SkyWalking Python Agent dockerfile and images Docker images are not official ASF releases but provided for convenience. Recommended usage is always to build the source\nThis image hosts the SkyWalking Python agent package on top of official Python base images (full \u0026amp; slim) providing support from Python 3.7 - 3.11.\nHow to use this image The images are hosted at Docker Hub.\nThe images come with protocol variants(gRPC, Kafka, HTTP) and base Python variants(Full, Slim).\nBuild your Python application image on top of this image Start by pulling the skywalking-python image as the base of your application image. Refer to Docker Hub for the list of tags available.\nFROMapache/skywalking-python:1.1.0-grpc-py3.10# ... build your Python applicationYou could start your Python application with CMD. The Python image already sets an entry point ENTRYPOINT [\u0026quot;sw-python\u0026quot;].\nFor example - CMD ['run', '-p', 'gunicorn', 'app.wsgi'] -p is always needed when using with Gunicorn/uWSGI -\u0026gt; This will be translated to sw-python run -p gunicorn app.wsgi\nYou don\u0026rsquo;t need to care about enabling the SkyWalking Python agent manually, it should be adopted and bootstrapped automatically through the sw-python CLI.\nEnvironment variables should be provided to customize the agent behavior.\nBuild an image from the dockerfile Provide the following arguments to build your own image from the dockerfile.\nBASE_PYTHON_IMAGE # the Python base image to build upon SW_PYTHON_AGENT_VERSION # agent version to be pulled from PyPI SW_PYTHON_AGENT_PROTOCOL # agent protocol - grpc/ http/ kafka ","excerpt":"Apache SkyWalking Python Agent dockerfile and images Docker images are not official ASF releases but …","ref":"/docs/skywalking-python/next/en/setup/container/","title":"Apache SkyWalking Python Agent dockerfile and images"},{"body":"Apache SkyWalking Python Agent dockerfile and images Docker images are not official ASF releases but provided for convenience. Recommended usage is always to build the source\nThis image hosts the SkyWalking Python agent package on top of official Python base images (full \u0026amp; slim) providing support from Python 3.7 - 3.11.\nHow to use this image The images are hosted at Docker Hub and available from the skywalking.docker.scarf.sh endpoint.\nskywalking.docker.scarf.sh/apache/skywalking-python\nBuild your Python application image on top of this image Start by pulling the skywalking-python image as the base of your application image. Refer to Docker Hub for the list of tags available.\nFROMapache/skywalking-python:0.7.0-grpc-py3.9# ... build your Python applicationYou could start your Python application with CMD. The Python image already sets an entry point ENTRYPOINT [\u0026quot;sw-python\u0026quot;].\nFor example - CMD ['run', '-p', 'gunicorn', 'app.wsgi'] -p is always needed when using with Gunicorn/uWSGI -\u0026gt; This will be translated to sw-python run -p gunicorn app.wsgi\nYou don\u0026rsquo;t need to care about enabling the SkyWalking Python agent manually, it should be adopted and bootstrapped automatically through the sw-python CLI.\nEnvironment variables should be provided to customize the agent behavior.\nBuild an image from the dockerfile Provide the following arguments to build your own image from the dockerfile.\nBASE_PYTHON_IMAGE # the Python base image to build upon SW_PYTHON_AGENT_VERSION # agent version to be pulled from PyPI SW_PYTHON_AGENT_PROTOCOL # agent protocol - grpc/ http/ kafka ","excerpt":"Apache SkyWalking Python Agent dockerfile and images Docker images are not official ASF releases but …","ref":"/docs/skywalking-python/v1.0.1/en/setup/container/","title":"Apache SkyWalking Python Agent dockerfile and images"},{"body":"Apache SkyWalking Python Image Release Guide This documentation shows the way to build and push the SkyWalking Python images to DockerHub.\nPrerequisites Before building the latest release of images, make sure an official release is pushed to PyPI where the dockerfile will depend on.\nImages This process wil generate a list of images covering most used Python versions and variations(grpc/http/kafka) of the Python agent.\nThe convenience images are published to Docker Hub and available from the skywalking.docker.scarf.sh endpoint.\n skywalking.docker.scarf.sh/apache/skywalking-python (Docker Hub)  How to build Issue the following commands to build relevant docker images for the Python agent. The make command will generate three images(grpc, http, kafka) for each Python version supported.\nAt the root folder -\nexport AGENT_VERSION=\u0026lt;version\u0026gt; make build-image Or at the docker folder -\ncd docker export AGENT_VERSION=\u0026lt;version\u0026gt; make How to publish images After a SkyWalking Apache release for the Python agent and wheels have been pushed to PyPI:\n  Build images from the project root, this step pulls agent wheel from PyPI and installs it:\nexport AGENT_VERSION=\u0026lt;version\u0026gt; make build-image   Verify the images built.\n  Push built images to docker hub repos:\nmake push-image   ","excerpt":"Apache SkyWalking Python Image Release Guide This documentation shows the way to build and push the …","ref":"/docs/skywalking-python/latest/en/contribution/how-to-release-docker/","title":"Apache SkyWalking Python Image Release Guide"},{"body":"Apache SkyWalking Python Image Release Guide The official process generating a list of images covering most used Python versions and variations(grpc/http/kafka) of the Python agent is deployed to our GitHub actions and therefore do not rely on this documentation.\nThis documentation shows the way to build and push the SkyWalking Python images manually.\nHow to build manually Before building the latest release of images, make sure an official release is pushed to PyPI where the dockerfile will depend on.\nImages The process generating a list of images covering most used Python versions and variations(grpc/http/kafka) of the Python agent is deployed to our GitHub actions.\nThe convenience images are published to DockerHub\nHow to build Issue the following commands to build relevant docker images for the Python agent. The make command will generate three images(grpc, http, kafka) for each Python version supported.\nAt the root folder -\nexport AGENT_VERSION=\u0026lt;version\u0026gt; make build-image Or at the docker folder -\ncd docker export AGENT_VERSION=\u0026lt;version\u0026gt; make How to publish images After a SkyWalking Apache release for the Python agent and wheels have been pushed to PyPI:\n  Build images from the project root, this step pulls agent wheel from PyPI and installs it:\nexport AGENT_VERSION=\u0026lt;version\u0026gt; make build-image   Verify the images built.\n  Push built images to docker hub repos:\nmake push-image   ","excerpt":"Apache SkyWalking Python Image Release Guide The official process generating a list of images …","ref":"/docs/skywalking-python/next/en/contribution/how-to-release-docker/","title":"Apache SkyWalking Python Image Release Guide"},{"body":"Apache SkyWalking Python Image Release Guide This documentation shows the way to build and push the SkyWalking Python images to DockerHub.\nPrerequisites Before building the latest release of images, make sure an official release is pushed to PyPI where the dockerfile will depend on.\nImages This process wil generate a list of images covering most used Python versions and variations(grpc/http/kafka) of the Python agent.\nThe convenience images are published to Docker Hub and available from the skywalking.docker.scarf.sh endpoint.\n skywalking.docker.scarf.sh/apache/skywalking-python (Docker Hub)  How to build Issue the following commands to build relevant docker images for the Python agent. The make command will generate three images(grpc, http, kafka) for each Python version supported.\nAt the root folder -\nexport AGENT_VERSION=\u0026lt;version\u0026gt; make build-image Or at the docker folder -\ncd docker export AGENT_VERSION=\u0026lt;version\u0026gt; make How to publish images After a SkyWalking Apache release for the Python agent and wheels have been pushed to PyPI:\n  Build images from the project root, this step pulls agent wheel from PyPI and installs it:\nexport AGENT_VERSION=\u0026lt;version\u0026gt; make build-image   Verify the images built.\n  Push built images to docker hub repos:\nmake push-image   ","excerpt":"Apache SkyWalking Python Image Release Guide This documentation shows the way to build and push the …","ref":"/docs/skywalking-python/v1.0.1/en/contribution/how-to-release-docker/","title":"Apache SkyWalking Python Image Release Guide"},{"body":"Apache SkyWalking Python Release Guide This documentation guides the release manager to release the SkyWalking Python in the Apache Way, and also helps people to check the release for vote.\nPrerequisites  Close (if finished, or move to next milestone otherwise) all issues in the current milestone from skywalking-python and skywalking, create a new milestone if needed. Update CHANGELOG.md and version in pyproject.toml.  Add your GPG public key to Apache SVN   Log in id.apache.org and submit your key fingerprint.\n  Add your GPG public key into SkyWalking GPG KEYS file, you can do this only if you are a PMC member. You can ask a PMC member for help. DO NOT override the existed KEYS file content, only append your key at the end of the file.\n  Build and sign the source code package export VERSION=\u0026lt;the version to release\u0026gt; git clone --recurse-submodules git@github.com:apache/skywalking-python \u0026amp;\u0026amp; cd skywalking-python git tag -a \u0026#34;v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking-Python $VERSION\u0026#34; git push --tags make clean \u0026amp;\u0026amp; make release Upload to Apache SVN svn co https://dist.apache.org/repos/dist/dev/skywalking/python release/skywalking/python mkdir -p release/skywalking/python/\u0026#34;$VERSION\u0026#34; cp skywalking-python/skywalking*.tgz release/skywalking/python/\u0026#34;$VERSION\u0026#34; cp skywalking-python/skywalking*.tgz.asc release/skywalking/python/\u0026#34;$VERSION\u0026#34; cp skywalking-python/skywalking-python*.tgz.sha512 release/skywalking/python/\u0026#34;$VERSION\u0026#34; cd release/skywalking \u0026amp;\u0026amp; svn add python/$VERSION \u0026amp;\u0026amp; svn commit python -m \u0026#34;Draft Apache SkyWalking-Python release $VERSION\u0026#34; Make the internal announcement Send an announcement email to dev@ mailing list, please check all links before sending the email, the same below.\nSubject: [ANNOUNCEMENT] Apache SkyWalking Python $VERSION test build available Content: The test build of Apache SkyWalking Python $VERSION is now available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking-python/blob/v$VERSION/CHANGELOG.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/python/$VERSION * sha512 checksums - sha512xxxxyyyzzz skywalking-python-src-x.x.x.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-python/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * http://pgp.mit.edu:11371/pks/lookup?op=get\u0026amp;search=0x8BD99F552D9F33D7 corresponding to kezhenxu94@apache.org Guide to build the release from source : * https://github.com/apache/skywalking-python/blob/master/CONTRIBUTING.md#compiling-and-building A vote regarding the quality of this test build will be initiated within the next couple of days. Wait at least 48 hours for test responses Any PMC, committer or contributor can test features for releasing, and feedback. Based on that, PMC will decide whether to start a vote or not.\nCall for vote in dev@ mailing list Call for vote in dev@skywalking.apache.org.\nSubject: [VOTE] Release Apache SkyWalking Python version $VERSION Content: Hi the SkyWalking Community: This is a call for vote to release Apache SkyWalking Python version $VERSION. Release notes: * https://github.com/apache/skywalking-python/blob/v$VERSION/CHANGELOG.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/python/$VERSION * sha512 checksums - sha512xxxxyyyzzz skywalking-python-src-x.x.x.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-python/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-python/blob/master/CONTRIBUTING.md#compiling-and-building Voting will start now and will remain open for at least 72 hours, all PMC members are required to give their votes. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Thanks. [1] https://github.com/apache/skywalking/blob/master/docs/en/guides/How-to-release.md#vote-check Vote Check All PMC members and committers should check these before voting +1:\n Features test. All artifacts in staging repository are published with .asc, .md5, and sha files. Source codes and distribution packages (skywalking-python-src-$VERSION.tgz) are in https://dist.apache.org/repos/dist/dev/skywalking/python/$VERSION with .asc, .sha512. LICENSE and NOTICE are in source codes and distribution package. Check shasum -c skywalking-python-src-$VERSION.tgz.sha512. Check gpg --verify skywalking-python-src-$VERSION.tgz.asc skywalking-python-src-$VERSION.tgz. Build distribution from source code package by following this the build guide. Licenses check, make license.  Vote result should follow these:\n  PMC vote is +1 binding, all others is +1 no binding.\n  Within 72 hours, you get at least 3 (+1 binding), and have more +1 than -1. Vote pass.\n  Send the closing vote mail to announce the result. When count the binding and no binding votes, please list the names of voters. An example like this:\n[RESULT][VOTE] Release Apache SkyWalking Python version $VERSION 72+ hours passed, we’ve got ($NUMBER) +1 bindings (and ... +1 non-bindings): (list names) +1 bindings: xxx ... +1 non-bindings: xxx ... Thank you for voting, I’ll continue the release process.   Publish release   Move source codes tar balls and distributions to https://dist.apache.org/repos/dist/release/skywalking/, you can do this only if you are a PMC member.\nsvn mv https://dist.apache.org/repos/dist/dev/skywalking/python/\u0026#34;$VERSION\u0026#34; https://dist.apache.org/repos/dist/release/skywalking/python/\u0026#34;$VERSION\u0026#34;   Refer to the previous PR, update news and links on the website. There are several files need to modify.\n  Update Github release page, follow the previous convention.\n  Send ANNOUNCE email to dev@skywalking.apache.org and announce@apache.org, the sender should use his/her Apache email account.\nSubject: [ANNOUNCEMENT] Apache SkyWalking Python $VERSION Released Content: Hi the SkyWalking Community On behalf of the SkyWalking Team, I’m glad to announce that SkyWalking Python $VERSION is now released. SkyWalking Python: The Python Agent for Apache SkyWalking provides the native tracing/metrics/logging/profiling abilities for Python projects. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. Download Links: http://skywalking.apache.org/downloads/ Release Notes : https://github.com/apache/skywalking-python/blob/v$VERSION/CHANGELOG.md Website: http://skywalking.apache.org/ SkyWalking Python Resources: - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalking.apache.org - Documents: https://github.com/apache/skywalking-python/blob/v$VERSION/README.md The Apache SkyWalking Team   ","excerpt":"Apache SkyWalking Python Release Guide This documentation guides the release manager to release the …","ref":"/docs/skywalking-python/latest/en/contribution/how-to-release/","title":"Apache SkyWalking Python Release Guide"},{"body":"Apache SkyWalking Python Release Guide This documentation guides the release manager to release the SkyWalking Python in the Apache Way, and also helps people to check the release for vote.\nPrerequisites  Close (if finished, or move to next milestone otherwise) all issues in the current milestone from skywalking-python and skywalking, create a new milestone if needed. Update CHANGELOG.md and version in pyproject.toml.  Add your GPG public key to Apache SVN   Log in id.apache.org and submit your key fingerprint.\n  Add your GPG public key into SkyWalking GPG KEYS file, you can do this only if you are a PMC member. You can ask a PMC member for help. DO NOT override the existed KEYS file content, only append your key at the end of the file.\n  Build and sign the source code package export VERSION=\u0026lt;the version to release\u0026gt; git clone --recurse-submodules git@github.com:apache/skywalking-python \u0026amp;\u0026amp; cd skywalking-python git tag -a \u0026#34;v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking-Python $VERSION\u0026#34; git push --tags make clean \u0026amp;\u0026amp; make release Upload to Apache SVN svn co https://dist.apache.org/repos/dist/dev/skywalking/python release/skywalking/python mkdir -p release/skywalking/python/\u0026#34;$VERSION\u0026#34; cp skywalking*.tgz release/skywalking/python/\u0026#34;$VERSION\u0026#34; cp skywalking*.tgz.asc release/skywalking/python/\u0026#34;$VERSION\u0026#34; cp skywalking-python*.tgz.sha512 release/skywalking/python/\u0026#34;$VERSION\u0026#34; cd release/skywalking \u0026amp;\u0026amp; svn add python/$VERSION \u0026amp;\u0026amp; svn commit python -m \u0026#34;Draft Apache SkyWalking-Python release $VERSION\u0026#34; Make the internal announcement First, generate a sha512sum for the source code package generated in last step:\nsha512sum release/skywalking/python/\u0026#34;$VERSION\u0026#34;/skywalking-python-src-\u0026#34;$VERSION\u0026#34;.tgz Send an announcement email to dev@ mailing list, please check all links before sending the email, the same as below.\nSubject: [ANNOUNCEMENT] Apache SkyWalking Python $VERSION test build available Content: The test build of Apache SkyWalking Python $VERSION is now available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking-python/blob/v$VERSION/CHANGELOG.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/python/$VERSION * sha512 checksums - sha512xxxxyyyzzz skywalking-python-src-x.x.x.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-python/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * http://pgp.mit.edu:11371/pks/lookup?op=get\u0026amp;search=0x8BD99F552D9F33D7 corresponding to kezhenxu94@apache.org Guide to build the release from source : * https://github.com/apache/skywalking-python/blob/master/CONTRIBUTING.md#compiling-and-building A vote regarding the quality of this test build will be initiated within the next couple of days. Wait at least 48 hours for test responses Any PMC, committer or contributor can test features for releasing, and feedback. Based on that, PMC will decide whether to start a vote or not.\nCall for vote in dev@ mailing list Call for vote in dev@skywalking.apache.org.\nSubject: [VOTE] Release Apache SkyWalking Python version $VERSION Content: Hi the SkyWalking Community: This is a call for vote to release Apache SkyWalking Python version $VERSION. Release notes: * https://github.com/apache/skywalking-python/blob/v$VERSION/CHANGELOG.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/python/$VERSION * sha512 checksums - sha512xxxxyyyzzz skywalking-python-src-x.x.x.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-python/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-python/blob/master/CONTRIBUTING.md#compiling-and-building Voting will start now and will remain open for at least 72 hours, all PMC members are required to give their votes. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Thanks. [1] https://github.com/apache/skywalking/blob/master/docs/en/guides/How-to-release.md#vote-check Vote Check All PMC members and committers should check these before voting +1:\n Features test. All artifacts in staging repository are published with .asc, .md5, and sha files. Source codes and distribution packages (skywalking-python-src-$VERSION.tgz) are in https://dist.apache.org/repos/dist/dev/skywalking/python/$VERSION with .asc, .sha512. LICENSE and NOTICE are in source codes and distribution package. Check shasum -c skywalking-python-src-$VERSION.tgz.sha512. Check gpg --verify skywalking-python-src-$VERSION.tgz.asc skywalking-python-src-$VERSION.tgz. Build distribution from source code package by following this the build guide. Licenses check, make license.  Vote result should follow these:\n  PMC vote is +1 binding, all others is +1 no binding.\n  Within 72 hours, you get at least 3 (+1 binding), and have more +1 than -1. Vote pass.\n  Send the closing vote mail to announce the result. When count the binding and no binding votes, please list the names of voters. An example like this:\n[RESULT][VOTE] Release Apache SkyWalking Python version $VERSION 72+ hours passed, we’ve got ($NUMBER) +1 bindings (and ... +1 non-bindings): (list names) +1 bindings: xxx ... +1 non-bindings: xxx ... Thank you for voting, I’ll continue the release process.   Publish release   Move source codes tar balls and distributions to https://dist.apache.org/repos/dist/release/skywalking/, you can do this only if you are a PMC member.\nsvn mv https://dist.apache.org/repos/dist/dev/skywalking/python/\u0026#34;$VERSION\u0026#34; https://dist.apache.org/repos/dist/release/skywalking/python/\u0026#34;$VERSION\u0026#34;   Refer to the previous PR, update news and links on the website. There are several files need to modify.\n  Publish PyPI package After the official ASF release, we publish the packaged wheel to the PyPI index.\n Make sure the final upload is correct by using the test PyPI index make upload-test. Upload the final artifacts by running make upload.  Publish Docker images After the release on GitHub, a GitHub Action will be triggered to build Docker images based on the latest code.\nImportant We announce the new release by drafting one on Github release page, following the previous convention.\nAn automation via GitHub Actions will automatically trigger upon the mentioned release event to build and upload Docker images to DockerHub.\nSee How-to-release-docker for a detailed description of manual release.\n Send ANNOUNCEMENT email to dev@skywalking.apache.org and announce@apache.org, the sender should use his/her Apache email account.\nSubject: [ANNOUNCEMENT] Apache SkyWalking Python $VERSION Released Content: Hi the SkyWalking Community On behalf of the SkyWalking Team, I’m glad to announce that SkyWalking Python $VERSION is now released. SkyWalking Python: The Python Agent for Apache SkyWalking provides the native tracing/metrics/logging/profiling abilities for Python projects. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. Download Links: http://skywalking.apache.org/downloads/ Release Notes : https://github.com/apache/skywalking-python/blob/v$VERSION/CHANGELOG.md Website: http://skywalking.apache.org/ SkyWalking Python Resources: - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalking.apache.org - Documents: https://github.com/apache/skywalking-python/blob/v$VERSION/README.md The Apache SkyWalking Team   ","excerpt":"Apache SkyWalking Python Release Guide This documentation guides the release manager to release the …","ref":"/docs/skywalking-python/next/en/contribution/how-to-release/","title":"Apache SkyWalking Python Release Guide"},{"body":"Apache SkyWalking Python Release Guide This documentation guides the release manager to release the SkyWalking Python in the Apache Way, and also helps people to check the release for vote.\nPrerequisites  Close (if finished, or move to next milestone otherwise) all issues in the current milestone from skywalking-python and skywalking, create a new milestone if needed. Update CHANGELOG.md and version in pyproject.toml.  Add your GPG public key to Apache SVN   Log in id.apache.org and submit your key fingerprint.\n  Add your GPG public key into SkyWalking GPG KEYS file, you can do this only if you are a PMC member. You can ask a PMC member for help. DO NOT override the existed KEYS file content, only append your key at the end of the file.\n  Build and sign the source code package export VERSION=\u0026lt;the version to release\u0026gt; git clone --recurse-submodules git@github.com:apache/skywalking-python \u0026amp;\u0026amp; cd skywalking-python git tag -a \u0026#34;v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking-Python $VERSION\u0026#34; git push --tags make clean \u0026amp;\u0026amp; make release Upload to Apache SVN svn co https://dist.apache.org/repos/dist/dev/skywalking/python release/skywalking/python mkdir -p release/skywalking/python/\u0026#34;$VERSION\u0026#34; cp skywalking-python/skywalking*.tgz release/skywalking/python/\u0026#34;$VERSION\u0026#34; cp skywalking-python/skywalking*.tgz.asc release/skywalking/python/\u0026#34;$VERSION\u0026#34; cp skywalking-python/skywalking-python*.tgz.sha512 release/skywalking/python/\u0026#34;$VERSION\u0026#34; cd release/skywalking \u0026amp;\u0026amp; svn add python/$VERSION \u0026amp;\u0026amp; svn commit python -m \u0026#34;Draft Apache SkyWalking-Python release $VERSION\u0026#34; Make the internal announcement Send an announcement email to dev@ mailing list, please check all links before sending the email, the same below.\nSubject: [ANNOUNCEMENT] Apache SkyWalking Python $VERSION test build available Content: The test build of Apache SkyWalking Python $VERSION is now available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking-python/blob/v$VERSION/CHANGELOG.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/python/$VERSION * sha512 checksums - sha512xxxxyyyzzz skywalking-python-src-x.x.x.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-python/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * http://pgp.mit.edu:11371/pks/lookup?op=get\u0026amp;search=0x8BD99F552D9F33D7 corresponding to kezhenxu94@apache.org Guide to build the release from source : * https://github.com/apache/skywalking-python/blob/master/CONTRIBUTING.md#compiling-and-building A vote regarding the quality of this test build will be initiated within the next couple of days. Wait at least 48 hours for test responses Any PMC, committer or contributor can test features for releasing, and feedback. Based on that, PMC will decide whether to start a vote or not.\nCall for vote in dev@ mailing list Call for vote in dev@skywalking.apache.org.\nSubject: [VOTE] Release Apache SkyWalking Python version $VERSION Content: Hi the SkyWalking Community: This is a call for vote to release Apache SkyWalking Python version $VERSION. Release notes: * https://github.com/apache/skywalking-python/blob/v$VERSION/CHANGELOG.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/python/$VERSION * sha512 checksums - sha512xxxxyyyzzz skywalking-python-src-x.x.x.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-python/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-python/blob/master/CONTRIBUTING.md#compiling-and-building Voting will start now and will remain open for at least 72 hours, all PMC members are required to give their votes. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Thanks. [1] https://github.com/apache/skywalking/blob/master/docs/en/guides/How-to-release.md#vote-check Vote Check All PMC members and committers should check these before voting +1:\n Features test. All artifacts in staging repository are published with .asc, .md5, and sha files. Source codes and distribution packages (skywalking-python-src-$VERSION.tgz) are in https://dist.apache.org/repos/dist/dev/skywalking/python/$VERSION with .asc, .sha512. LICENSE and NOTICE are in source codes and distribution package. Check shasum -c skywalking-python-src-$VERSION.tgz.sha512. Check gpg --verify skywalking-python-src-$VERSION.tgz.asc skywalking-python-src-$VERSION.tgz. Build distribution from source code package by following this the build guide. Licenses check, make license.  Vote result should follow these:\n  PMC vote is +1 binding, all others is +1 no binding.\n  Within 72 hours, you get at least 3 (+1 binding), and have more +1 than -1. Vote pass.\n  Send the closing vote mail to announce the result. When count the binding and no binding votes, please list the names of voters. An example like this:\n[RESULT][VOTE] Release Apache SkyWalking Python version $VERSION 72+ hours passed, we’ve got ($NUMBER) +1 bindings (and ... +1 non-bindings): (list names) +1 bindings: xxx ... +1 non-bindings: xxx ... Thank you for voting, I’ll continue the release process.   Publish release   Move source codes tar balls and distributions to https://dist.apache.org/repos/dist/release/skywalking/, you can do this only if you are a PMC member.\nsvn mv https://dist.apache.org/repos/dist/dev/skywalking/python/\u0026#34;$VERSION\u0026#34; https://dist.apache.org/repos/dist/release/skywalking/python/\u0026#34;$VERSION\u0026#34;   Refer to the previous PR, update news and links on the website. There are several files need to modify.\n  Update Github release page, follow the previous convention.\n  Send ANNOUNCE email to dev@skywalking.apache.org and announce@apache.org, the sender should use his/her Apache email account.\nSubject: [ANNOUNCEMENT] Apache SkyWalking Python $VERSION Released Content: Hi the SkyWalking Community On behalf of the SkyWalking Team, I’m glad to announce that SkyWalking Python $VERSION is now released. SkyWalking Python: The Python Agent for Apache SkyWalking provides the native tracing/metrics/logging/profiling abilities for Python projects. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. Download Links: http://skywalking.apache.org/downloads/ Release Notes : https://github.com/apache/skywalking-python/blob/v$VERSION/CHANGELOG.md Website: http://skywalking.apache.org/ SkyWalking Python Resources: - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalking.apache.org - Documents: https://github.com/apache/skywalking-python/blob/v$VERSION/README.md The Apache SkyWalking Team   ","excerpt":"Apache SkyWalking Python Release Guide This documentation guides the release manager to release the …","ref":"/docs/skywalking-python/v1.0.1/en/contribution/how-to-release/","title":"Apache SkyWalking Python Release Guide"},{"body":"Apache SkyWalking Rover Release Guide This documentation guides the release manager to release the SkyWalking Rover in the Apache Way, and also helps people to check the release for vote.\nPrerequisites  Close(if finished, or move to next milestone otherwise) all issues in the current milestone from skywalking-rover and skywalking, create a new milestone if needed. Update CHANGES.md. Check the dependency licenses including all dependencies.  Add your GPG public key to Apache svn   Upload your GPG public key to a public GPG site, such as MIT\u0026rsquo;s site.\n  Log in id.apache.org and submit your key fingerprint.\n  Add your GPG public key into SkyWalking GPG KEYS file, you can do this only if you are a PMC member. You can ask a PMC member for help. DO NOT override the existed KEYS file content, only append your key at the end of the file.\n  Build and sign the source code package export VERSION=\u0026lt;the version to release\u0026gt; git clone git@github.com:apache/skywalking-rover \u0026amp;\u0026amp; cd skywalking-rover git tag -a \u0026#34;v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking-Rover v$VERSION\u0026#34; git push --tags make release In total, six files should be automatically generated in the directory: apache-skywalking-rover-${VERSION}-bin.tgz, apache-skywalking-rover-${VERSION}-src.tgz, and their corresponding asc, sha512 files.\nUpload to Apache svn svn co https://dist.apache.org/repos/dist/dev/skywalking/ mkdir -p skywalking/rover/\u0026#34;$VERSION\u0026#34; cp skywalking-rover/apache-skywalking*.tgz skywalking/rover/\u0026#34;$VERSION\u0026#34; cp skywalking-rover/apache-skywalking*.tgz.asc skywalking/rover/\u0026#34;$VERSION\u0026#34; cp skywalking-rover/apache-skywalking-rover*.tgz.sha512 skywalking/rover/\u0026#34;$VERSION\u0026#34; cd skywalking/rover \u0026amp;\u0026amp; svn add \u0026#34;$VERSION\u0026#34; \u0026amp;\u0026amp; svn commit -m \u0026#34;Draft Apache SkyWalking-Rover release $VERSION\u0026#34; Call for vote in dev@ mailing list Call for vote in dev@skywalking.apache.org, please check all links before sending the email.\nSubject: [VOTE] Release Apache SkyWalking Rover version $VERSION Content: Hi the SkyWalking Community: This is a call for vote to release Apache SkyWalking Rover version $VERSION. Release notes: * https://github.com/apache/skywalking-rover/blob/v$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/rover/$VERSION * sha512 checksums - sha512xxxxyyyzzz skywalking-rover-x.x.x-src.tgz - sha512xxxxyyyzzz skywalking-rover-x.x.x-bin.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-rover/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-rover/blob/v$VERSION/docs/en/guides/contribution/how-to-release.md Voting will start now and will remain open for at least 72 hours, all PMC members are required to give their votes. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Thanks. [1] https://github.com/apache/skywalking/blob/master/docs/en/guides/How-to-release.md#vote-check Vote Check All PMC members and committers should check these before voting +1:\n Features test. All artifacts in staging repository are published with .asc, .md5, and sha files. Source codes and distribution packages (skywalking-rover-$VERSION-{src,bin}.tgz) are in https://dist.apache.org/repos/dist/dev/skywalking/rover/$VERSION with .asc, .sha512. LICENSE and NOTICE are in source codes and distribution package. Check shasum -c skywalking-rover-$VERSION-{src,bin}.tgz.sha512. Check gpg --verify skywalking-rover-$VERSION-{src,bin}.tgz.asc skywalking-rover-$VERSION-{src,bin}.tgz. Build distribution from source code package by following this command, make container-generate build.  Vote result should follow these:\n  PMC vote is +1 binding, all others is +1 no binding.\n  Within 72 hours, you get at least 3 (+1 binding), and have more +1 than -1. Vote pass.\n  Send the closing vote mail to announce the result. When count the binding and no binding votes, please list the names of voters. An example like this:\n[RESULT][VOTE] Release Apache SkyWalking Rover version $VERSION 3 days passed, we’ve got ($NUMBER) +1 bindings (and ... +1 non-bindings): (list names) +1 bindings: xxx ... +1 non-bindings: xxx ... Thank you for voting, I’ll continue the release process.   Publish release   Move source codes tar balls and distributions to https://dist.apache.org/repos/dist/release/skywalking/, you can do this only if you are a PMC member.\nexport SVN_EDITOR=vim svn mv https://dist.apache.org/repos/dist/dev/skywalking/rover/$VERSION https://dist.apache.org/repos/dist/release/skywalking/rover   Refer to the previous PR, update the event and download links on the website.\n  Update Github release page, follow the previous convention.\n  Push docker image to the Docker Hub, make sure you have the write permission for push image.\nmake docker \u0026amp;\u0026amp; make docker.push   Send ANNOUNCE email to dev@skywalking.apache.org and announce@apache.org, the sender should use his/her Apache email account, please check all links before sending the email.\nSubject: [ANNOUNCEMENT] Apache SkyWalking Rover $VERSION Released Content: Hi the SkyWalking Community On behalf of the SkyWalking Team, I’m glad to announce that SkyWalking Rover $VERSION is now released. SkyWalking Rover: A lightweight collector/sidecar could be deployed closing to the target monitored system, to collect metrics, traces, and logs. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. Download Links: http://skywalking.apache.org/downloads/ Release Notes : https://github.com/apache/skywalking-rover/blob/v$VERSION/CHANGES.md Website: http://skywalking.apache.org/ SkyWalking Rover Resources: - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalking.apache.org - Documents: https://github.com/apache/skywalking-rover/blob/v$VERSION/README.md The Apache SkyWalking Team   Remove Unnecessary Releases Please remember to remove all unnecessary releases in the mirror svn (https://dist.apache.org/repos/dist/release/skywalking/), if you don\u0026rsquo;t recommend users to choose those version. For example, you have removed the download and documentation links from the website. If they want old ones, the Archive repository has all of them.\n","excerpt":"Apache SkyWalking Rover Release Guide This documentation guides the release manager to release the …","ref":"/docs/skywalking-rover/latest/en/guides/contribution/how-to-release/","title":"Apache SkyWalking Rover Release Guide"},{"body":"Apache SkyWalking Rover Release Guide This documentation guides the release manager to release the SkyWalking Rover in the Apache Way, and also helps people to check the release for vote.\nPrerequisites  Close(if finished, or move to next milestone otherwise) all issues in the current milestone from skywalking-rover and skywalking, create a new milestone if needed. Update CHANGES.md. Check the dependency licenses including all dependencies.  Add your GPG public key to Apache svn   Upload your GPG public key to a public GPG site, such as MIT\u0026rsquo;s site.\n  Log in id.apache.org and submit your key fingerprint.\n  Add your GPG public key into SkyWalking GPG KEYS file, you can do this only if you are a PMC member. You can ask a PMC member for help. DO NOT override the existed KEYS file content, only append your key at the end of the file.\n  Build and sign the source code package export VERSION=\u0026lt;the version to release\u0026gt; git clone git@github.com:apache/skywalking-rover \u0026amp;\u0026amp; cd skywalking-rover git tag -a \u0026#34;v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking-Rover v$VERSION\u0026#34; git push --tags make release In total, six files should be automatically generated in the directory: apache-skywalking-rover-${VERSION}-bin.tgz, apache-skywalking-rover-${VERSION}-src.tgz, and their corresponding asc, sha512 files.\nUpload to Apache svn svn co https://dist.apache.org/repos/dist/dev/skywalking/ mkdir -p skywalking/rover/\u0026#34;$VERSION\u0026#34; cp skywalking-rover/apache-skywalking*.tgz skywalking/rover/\u0026#34;$VERSION\u0026#34; cp skywalking-rover/apache-skywalking*.tgz.asc skywalking/rover/\u0026#34;$VERSION\u0026#34; cp skywalking-rover/apache-skywalking-rover*.tgz.sha512 skywalking/rover/\u0026#34;$VERSION\u0026#34; cd skywalking/rover \u0026amp;\u0026amp; svn add \u0026#34;$VERSION\u0026#34; \u0026amp;\u0026amp; svn commit -m \u0026#34;Draft Apache SkyWalking-Rover release $VERSION\u0026#34; Call for vote in dev@ mailing list Call for vote in dev@skywalking.apache.org, please check all links before sending the email.\nSubject: [VOTE] Release Apache SkyWalking Rover version $VERSION Content: Hi the SkyWalking Community: This is a call for vote to release Apache SkyWalking Rover version $VERSION. Release notes: * https://github.com/apache/skywalking-rover/blob/v$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/rover/$VERSION * sha512 checksums - sha512xxxxyyyzzz skywalking-rover-x.x.x-src.tgz - sha512xxxxyyyzzz skywalking-rover-x.x.x-bin.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-rover/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-rover/blob/v$VERSION/docs/en/guides/contribution/how-to-release.md Voting will start now and will remain open for at least 72 hours, all PMC members are required to give their votes. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Thanks. [1] https://github.com/apache/skywalking/blob/master/docs/en/guides/How-to-release.md#vote-check Vote Check All PMC members and committers should check these before voting +1:\n Features test. All artifacts in staging repository are published with .asc, .md5, and sha files. Source codes and distribution packages (skywalking-rover-$VERSION-{src,bin}.tgz) are in https://dist.apache.org/repos/dist/dev/skywalking/rover/$VERSION with .asc, .sha512. LICENSE and NOTICE are in source codes and distribution package. Check shasum -c skywalking-rover-$VERSION-{src,bin}.tgz.sha512. Check gpg --verify skywalking-rover-$VERSION-{src,bin}.tgz.asc skywalking-rover-$VERSION-{src,bin}.tgz. Build distribution from source code package by following this command, make container-generate build.  Vote result should follow these:\n  PMC vote is +1 binding, all others is +1 no binding.\n  Within 72 hours, you get at least 3 (+1 binding), and have more +1 than -1. Vote pass.\n  Send the closing vote mail to announce the result. When count the binding and no binding votes, please list the names of voters. An example like this:\n[RESULT][VOTE] Release Apache SkyWalking Rover version $VERSION 3 days passed, we’ve got ($NUMBER) +1 bindings (and ... +1 non-bindings): (list names) +1 bindings: xxx ... +1 non-bindings: xxx ... Thank you for voting, I’ll continue the release process.   Publish release   Move source codes tar balls and distributions to https://dist.apache.org/repos/dist/release/skywalking/, you can do this only if you are a PMC member.\nexport SVN_EDITOR=vim svn mv https://dist.apache.org/repos/dist/dev/skywalking/rover/$VERSION https://dist.apache.org/repos/dist/release/skywalking/rover   Refer to the previous PR, update the event and download links on the website.\n  Update Github release page, follow the previous convention.\n  Push docker image to the Docker Hub, make sure you have the write permission for push image.\nmake docker \u0026amp;\u0026amp; make docker.push   Send ANNOUNCE email to dev@skywalking.apache.org and announce@apache.org, the sender should use his/her Apache email account, please check all links before sending the email.\nSubject: [ANNOUNCEMENT] Apache SkyWalking Rover $VERSION Released Content: Hi the SkyWalking Community On behalf of the SkyWalking Team, I’m glad to announce that SkyWalking Rover $VERSION is now released. SkyWalking Rover: A lightweight collector/sidecar could be deployed closing to the target monitored system, to collect metrics, traces, and logs. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. Download Links: http://skywalking.apache.org/downloads/ Release Notes : https://github.com/apache/skywalking-rover/blob/v$VERSION/CHANGES.md Website: http://skywalking.apache.org/ SkyWalking Rover Resources: - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalking.apache.org - Documents: https://github.com/apache/skywalking-rover/blob/v$VERSION/README.md The Apache SkyWalking Team   Remove Unnecessary Releases Please remember to remove all unnecessary releases in the mirror svn (https://dist.apache.org/repos/dist/release/skywalking/), if you don\u0026rsquo;t recommend users to choose those version. For example, you have removed the download and documentation links from the website. If they want old ones, the Archive repository has all of them.\n","excerpt":"Apache SkyWalking Rover Release Guide This documentation guides the release manager to release the …","ref":"/docs/skywalking-rover/next/en/guides/contribution/how-to-release/","title":"Apache SkyWalking Rover Release Guide"},{"body":"Apache SkyWalking Rover Release Guide This documentation guides the release manager to release the SkyWalking Rover in the Apache Way, and also helps people to check the release for vote.\nPrerequisites  Close(if finished, or move to next milestone otherwise) all issues in the current milestone from skywalking-rover and skywalking, create a new milestone if needed. Update CHANGES.md. Check the dependency licenses including all dependencies.  Add your GPG public key to Apache svn   Upload your GPG public key to a public GPG site, such as MIT\u0026rsquo;s site.\n  Log in id.apache.org and submit your key fingerprint.\n  Add your GPG public key into SkyWalking GPG KEYS file, you can do this only if you are a PMC member. You can ask a PMC member for help. DO NOT override the existed KEYS file content, only append your key at the end of the file.\n  Build and sign the source code package export VERSION=\u0026lt;the version to release\u0026gt; git clone git@github.com:apache/skywalking-rover \u0026amp;\u0026amp; cd skywalking-rover git tag -a \u0026#34;v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking-Rover v$VERSION\u0026#34; git push --tags make release In total, six files should be automatically generated in the directory: apache-skywalking-rover-${VERSION}-bin.tgz, apache-skywalking-rover-${VERSION}-src.tgz, and their corresponding asc, sha512 files.\nUpload to Apache svn svn co https://dist.apache.org/repos/dist/dev/skywalking/ mkdir -p skywalking/rover/\u0026#34;$VERSION\u0026#34; cp skywalking-rover/apache-skywalking*.tgz skywalking/rover/\u0026#34;$VERSION\u0026#34; cp skywalking-rover/apache-skywalking*.tgz.asc skywalking/rover/\u0026#34;$VERSION\u0026#34; cp skywalking-rover/apache-skywalking-rover*.tgz.sha512 skywalking/rover/\u0026#34;$VERSION\u0026#34; cd skywalking/rover \u0026amp;\u0026amp; svn add \u0026#34;$VERSION\u0026#34; \u0026amp;\u0026amp; svn commit -m \u0026#34;Draft Apache SkyWalking-Rover release $VERSION\u0026#34; Call for vote in dev@ mailing list Call for vote in dev@skywalking.apache.org, please check all links before sending the email.\nSubject: [VOTE] Release Apache SkyWalking Rover version $VERSION Content: Hi the SkyWalking Community: This is a call for vote to release Apache SkyWalking Rover version $VERSION. Release notes: * https://github.com/apache/skywalking-rover/blob/v$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/rover/$VERSION * sha512 checksums - sha512xxxxyyyzzz skywalking-rover-x.x.x-src.tgz - sha512xxxxyyyzzz skywalking-rover-x.x.x-bin.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-rover/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-rover/blob/v$VERSION/docs/en/guides/contribution/how-to-release.md Voting will start now and will remain open for at least 72 hours, all PMC members are required to give their votes. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Thanks. [1] https://github.com/apache/skywalking/blob/master/docs/en/guides/How-to-release.md#vote-check Vote Check All PMC members and committers should check these before voting +1:\n Features test. All artifacts in staging repository are published with .asc, .md5, and sha files. Source codes and distribution packages (skywalking-rover-$VERSION-{src,bin}.tgz) are in https://dist.apache.org/repos/dist/dev/skywalking/rover/$VERSION with .asc, .sha512. LICENSE and NOTICE are in source codes and distribution package. Check shasum -c skywalking-rover-$VERSION-{src,bin}.tgz.sha512. Check gpg --verify skywalking-rover-$VERSION-{src,bin}.tgz.asc skywalking-rover-$VERSION-{src,bin}.tgz. Build distribution from source code package by following this command, make container-generate build.  Vote result should follow these:\n  PMC vote is +1 binding, all others is +1 no binding.\n  Within 72 hours, you get at least 3 (+1 binding), and have more +1 than -1. Vote pass.\n  Send the closing vote mail to announce the result. When count the binding and no binding votes, please list the names of voters. An example like this:\n[RESULT][VOTE] Release Apache SkyWalking Rover version $VERSION 3 days passed, we’ve got ($NUMBER) +1 bindings (and ... +1 non-bindings): (list names) +1 bindings: xxx ... +1 non-bindings: xxx ... Thank you for voting, I’ll continue the release process.   Publish release   Move source codes tar balls and distributions to https://dist.apache.org/repos/dist/release/skywalking/, you can do this only if you are a PMC member.\nexport SVN_EDITOR=vim svn mv https://dist.apache.org/repos/dist/dev/skywalking/rover/$VERSION https://dist.apache.org/repos/dist/release/skywalking/rover   Refer to the previous PR, update the event and download links on the website.\n  Update Github release page, follow the previous convention.\n  Push docker image to the Docker Hub, make sure you have the write permission for push image.\nmake docker \u0026amp;\u0026amp; make docker.push   Send ANNOUNCE email to dev@skywalking.apache.org and announce@apache.org, the sender should use his/her Apache email account, please check all links before sending the email.\nSubject: [ANNOUNCEMENT] Apache SkyWalking Rover $VERSION Released Content: Hi the SkyWalking Community On behalf of the SkyWalking Team, I’m glad to announce that SkyWalking Rover $VERSION is now released. SkyWalking Rover: A lightweight collector/sidecar could be deployed closing to the target monitored system, to collect metrics, traces, and logs. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. Download Links: http://skywalking.apache.org/downloads/ Release Notes : https://github.com/apache/skywalking-rover/blob/v$VERSION/CHANGES.md Website: http://skywalking.apache.org/ SkyWalking Rover Resources: - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalking.apache.org - Documents: https://github.com/apache/skywalking-rover/blob/v$VERSION/README.md The Apache SkyWalking Team   Remove Unnecessary Releases Please remember to remove all unnecessary releases in the mirror svn (https://dist.apache.org/repos/dist/release/skywalking/), if you don\u0026rsquo;t recommend users to choose those version. For example, you have removed the download and documentation links from the website. If they want old ones, the Archive repository has all of them.\n","excerpt":"Apache SkyWalking Rover Release Guide This documentation guides the release manager to release the …","ref":"/docs/skywalking-rover/v0.6.0/en/guides/contribution/how-to-release/","title":"Apache SkyWalking Rover Release Guide"},{"body":"Apache SkyWalking Satellite Release Guide This documentation guides the release manager to release the SkyWalking Satellite in the Apache Way, and also helps people to check the release for vote.\nPrerequisites  Close(if finished, or move to next milestone otherwise) all issues in the current milestone from skywalking-satellite and skywalking, create a new milestone if needed. Update CHANGES.md.  Add your GPG public key to Apache svn   Upload your GPG public key to a public GPG site, such as MIT\u0026rsquo;s site.\n  Log in id.apache.org and submit your key fingerprint.\n  Add your GPG public key into SkyWalking GPG KEYS file, you can do this only if you are a PMC member. You can ask a PMC member for help. DO NOT override the existed KEYS file content, only append your key at the end of the file.\n  Build and sign the source code package export VERSION=\u0026lt;the version to release\u0026gt; git clone git@github.com:apache/skywalking-satellite \u0026amp;\u0026amp; cd skywalking-satellite git tag -a \u0026#34;v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking-Satellite v$VERSION\u0026#34; git push --tags make release In total, six files should be automatically generated in the directory: apache-skywalking-satellite-${VERSION}-bin.tgz, apache-skywalking-satellite-${VERSION}-src.tgz, and their corresponding asc, sha512 files.\nUpload to Apache svn svn co https://dist.apache.org/repos/dist/dev/skywalking/ mkdir -p skywalking/satellite/\u0026#34;$VERSION\u0026#34; cp skywalking-satellite/apache-skywalking*.tgz skywalking/satellite/\u0026#34;$VERSION\u0026#34; cp skywalking-satellite/apache-skywalking*.tgz.asc skywalking/satellite/\u0026#34;$VERSION\u0026#34; cp skywalking-satellite/apache-skywalking-satellite*.tgz.sha512 skywalking/satellite/\u0026#34;$VERSION\u0026#34; cd skywalking/satellite \u0026amp;\u0026amp; svn add \u0026#34;$VERSION\u0026#34; \u0026amp;\u0026amp; svn commit -m \u0026#34;Draft Apache SkyWalking-Satellite release $VERSION\u0026#34; Make the internal announcement Send an announcement email to dev@ mailing list, please check all links before sending the email.\nSubject: [ANNOUNCEMENT] SkyWalking Satellite $VERSION test build available Content: The test build of SkyWalking Satellite $VERSION is now available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking-satellite/blob/$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/satellite/$VERSION * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-satellite-bin-x.x.x.tgz - sha512xxxxyyyzzz apache-skywalking-satellite-src-x.x.x.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-satellite/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * http://pgp.mit.edu:11371/pks/lookup?op=get\u0026amp;search=0x8BD99F552D9F33D7 corresponding to kezhenxu94@apache.org Guide to build the release from source : * https://github.com/apache/skywalking-satellite/blob/v$VERSION/docs/en/guides/contribution/How-to-release.md A vote regarding the quality of this test build will be initiated within the next couple of days. Wait at least 48 hours for test responses Any PMC, committer or contributor can test features for releasing, and feedback. Based on that, PMC will decide whether to start a vote or not.\nCall for vote in dev@ mailing list Call for vote in dev@skywalking.apache.org, please check all links before sending the email.\nSubject: [VOTE] Release Apache SkyWalking Satellite version $VERSION Content: Hi the SkyWalking Community: This is a call for vote to release Apache SkyWalking Satellite version $VERSION. Release notes: * https://github.com/apache/skywalking-satellite/blob/$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/satellite/$VERSION * sha512 checksums - sha512xxxxyyyzzz skywalking-satellite-x.x.x-src.tgz - sha512xxxxyyyzzz skywalking-satellite-x.x.x-bin.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-satellite/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-satellite/blob/$VERSION/docs/en/guides/contribuation/How-to-release.md Voting will start now and will remain open for at least 72 hours, all PMC members are required to give their votes. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Thanks. [1] https://github.com/apache/skywalking/blob/master/docs/en/guides/How-to-release.md#vote-check Vote Check All PMC members and committers should check these before voting +1:\n Features test. All artifacts in staging repository are published with .asc, .md5, and sha files. Source codes and distribution packages (skywalking-satellite-$VERSION-{src,bin}.tgz) are in https://dist.apache.org/repos/dist/dev/skywalking/satellite/$VERSION with .asc, .sha512. LICENSE and NOTICE are in source codes and distribution package. Check shasum -c skywalking-satellite-$VERSION-{src,bin}.tgz.sha512. Check gpg --verify skywalking-satellite-$VERSION-{src,bin}.tgz.asc skywalking-satellite-$VERSION-{src,bin}.tgz. Build distribution from source code package by following this command, make build. Licenses check, make license.  Vote result should follow these:\n  PMC vote is +1 binding, all others is +1 no binding.\n  Within 72 hours, you get at least 3 (+1 binding), and have more +1 than -1. Vote pass.\n  Send the closing vote mail to announce the result. When count the binding and no binding votes, please list the names of voters. An example like this:\n[RESULT][VOTE] Release Apache SkyWalking Satellite version $VERSION 3 days passed, we’ve got ($NUMBER) +1 bindings (and ... +1 non-bindings): (list names) +1 bindings: xxx ... +1 non-bindings: xxx ... Thank you for voting, I’ll continue the release process.   Publish release   Move source codes tar balls and distributions to https://dist.apache.org/repos/dist/release/skywalking/, you can do this only if you are a PMC member.\nexport SVN_EDITOR=vim svn mv https://dist.apache.org/repos/dist/dev/skywalking/satellite/$VERSION https://dist.apache.org/repos/dist/release/skywalking/satellite   Refer to the previous PR, update the event and download links on the website.\n  Update Github release page, follow the previous convention.\n  Push docker image to the Docker Hub, make sure you have the write permission for push image.\nmake docker \u0026amp;\u0026amp; make docker.push   Send ANNOUNCE email to dev@skywalking.apache.org and announce@apache.org, the sender should use his/her Apache email account, please check all links before sending the email.\nSubject: [ANNOUNCEMENT] Apache SkyWalking Satellite $VERSION Released Content: Hi the SkyWalking Community On behalf of the SkyWalking Team, I’m glad to announce that SkyWalking Satellite $VERSION is now released. SkyWalking Satellite: A lightweight collector/sidecar could be deployed closing to the target monitored system, to collect metrics, traces, and logs. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. Download Links: http://skywalking.apache.org/downloads/ Release Notes : https://github.com/apache/skywalking-satellite/blob/$VERSION/CHANGES.md Website: http://skywalking.apache.org/ SkyWalking Satellite Resources: - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalking.apache.org - Documents: https://github.com/apache/skywalking-satellite/blob/$VERSION/README.md The Apache SkyWalking Team   Remove Unnecessary Releases Please remember to remove all unnecessary releases in the mirror svn (https://dist.apache.org/repos/dist/release/skywalking/), if you don\u0026rsquo;t recommend users to choose those version. For example, you have removed the download and documentation links from the website. If they want old ones, the Archive repository has all of them.\n","excerpt":"Apache SkyWalking Satellite Release Guide This documentation guides the release manager to release …","ref":"/docs/skywalking-satellite/latest/en/guides/contribution/how-to-release/","title":"Apache SkyWalking Satellite Release Guide"},{"body":"Apache SkyWalking Satellite Release Guide This documentation guides the release manager to release the SkyWalking Satellite in the Apache Way, and also helps people to check the release for vote.\nPrerequisites  Close(if finished, or move to next milestone otherwise) all issues in the current milestone from skywalking-satellite and skywalking, create a new milestone if needed. Update CHANGES.md.  Add your GPG public key to Apache svn   Upload your GPG public key to a public GPG site, such as MIT\u0026rsquo;s site.\n  Log in id.apache.org and submit your key fingerprint.\n  Add your GPG public key into SkyWalking GPG KEYS file, you can do this only if you are a PMC member. You can ask a PMC member for help. DO NOT override the existed KEYS file content, only append your key at the end of the file.\n  Build and sign the source code package export VERSION=\u0026lt;the version to release\u0026gt; git clone git@github.com:apache/skywalking-satellite \u0026amp;\u0026amp; cd skywalking-satellite git tag -a \u0026#34;v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking-Satellite v$VERSION\u0026#34; git push --tags make release In total, six files should be automatically generated in the directory: apache-skywalking-satellite-${VERSION}-bin.tgz, apache-skywalking-satellite-${VERSION}-src.tgz, and their corresponding asc, sha512 files.\nUpload to Apache svn svn co https://dist.apache.org/repos/dist/dev/skywalking/ mkdir -p skywalking/satellite/\u0026#34;$VERSION\u0026#34; cp skywalking-satellite/apache-skywalking*.tgz skywalking/satellite/\u0026#34;$VERSION\u0026#34; cp skywalking-satellite/apache-skywalking*.tgz.asc skywalking/satellite/\u0026#34;$VERSION\u0026#34; cp skywalking-satellite/apache-skywalking-satellite*.tgz.sha512 skywalking/satellite/\u0026#34;$VERSION\u0026#34; cd skywalking/satellite \u0026amp;\u0026amp; svn add \u0026#34;$VERSION\u0026#34; \u0026amp;\u0026amp; svn commit -m \u0026#34;Draft Apache SkyWalking-Satellite release $VERSION\u0026#34; Make the internal announcement Send an announcement email to dev@ mailing list, please check all links before sending the email.\nSubject: [ANNOUNCEMENT] SkyWalking Satellite $VERSION test build available Content: The test build of SkyWalking Satellite $VERSION is now available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking-satellite/blob/$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/satellite/$VERSION * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-satellite-bin-x.x.x.tgz - sha512xxxxyyyzzz apache-skywalking-satellite-src-x.x.x.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-satellite/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * http://pgp.mit.edu:11371/pks/lookup?op=get\u0026amp;search=0x8BD99F552D9F33D7 corresponding to kezhenxu94@apache.org Guide to build the release from source : * https://github.com/apache/skywalking-satellite/blob/v$VERSION/docs/en/guides/contribution/How-to-release.md A vote regarding the quality of this test build will be initiated within the next couple of days. Wait at least 48 hours for test responses Any PMC, committer or contributor can test features for releasing, and feedback. Based on that, PMC will decide whether to start a vote or not.\nCall for vote in dev@ mailing list Call for vote in dev@skywalking.apache.org, please check all links before sending the email.\nSubject: [VOTE] Release Apache SkyWalking Satellite version $VERSION Content: Hi the SkyWalking Community: This is a call for vote to release Apache SkyWalking Satellite version $VERSION. Release notes: * https://github.com/apache/skywalking-satellite/blob/$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/satellite/$VERSION * sha512 checksums - sha512xxxxyyyzzz skywalking-satellite-x.x.x-src.tgz - sha512xxxxyyyzzz skywalking-satellite-x.x.x-bin.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-satellite/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-satellite/blob/$VERSION/docs/en/guides/contribuation/How-to-release.md Voting will start now and will remain open for at least 72 hours, all PMC members are required to give their votes. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Thanks. [1] https://github.com/apache/skywalking/blob/master/docs/en/guides/How-to-release.md#vote-check Vote Check All PMC members and committers should check these before voting +1:\n Features test. All artifacts in staging repository are published with .asc, .md5, and sha files. Source codes and distribution packages (skywalking-satellite-$VERSION-{src,bin}.tgz) are in https://dist.apache.org/repos/dist/dev/skywalking/satellite/$VERSION with .asc, .sha512. LICENSE and NOTICE are in source codes and distribution package. Check shasum -c skywalking-satellite-$VERSION-{src,bin}.tgz.sha512. Check gpg --verify skywalking-satellite-$VERSION-{src,bin}.tgz.asc skywalking-satellite-$VERSION-{src,bin}.tgz. Build distribution from source code package by following this command, make build. Licenses check, make license.  Vote result should follow these:\n  PMC vote is +1 binding, all others is +1 no binding.\n  Within 72 hours, you get at least 3 (+1 binding), and have more +1 than -1. Vote pass.\n  Send the closing vote mail to announce the result. When count the binding and no binding votes, please list the names of voters. An example like this:\n[RESULT][VOTE] Release Apache SkyWalking Satellite version $VERSION 3 days passed, we’ve got ($NUMBER) +1 bindings (and ... +1 non-bindings): (list names) +1 bindings: xxx ... +1 non-bindings: xxx ... Thank you for voting, I’ll continue the release process.   Publish release   Move source codes tar balls and distributions to https://dist.apache.org/repos/dist/release/skywalking/, you can do this only if you are a PMC member.\nexport SVN_EDITOR=vim svn mv https://dist.apache.org/repos/dist/dev/skywalking/satellite/$VERSION https://dist.apache.org/repos/dist/release/skywalking/satellite   Refer to the previous PR, update the event and download links on the website.\n  Update Github release page, follow the previous convention.\n  Push docker image to the Docker Hub, make sure you have the write permission for push image.\nmake docker \u0026amp;\u0026amp; make docker.push   Send ANNOUNCE email to dev@skywalking.apache.org and announce@apache.org, the sender should use his/her Apache email account, please check all links before sending the email.\nSubject: [ANNOUNCEMENT] Apache SkyWalking Satellite $VERSION Released Content: Hi the SkyWalking Community On behalf of the SkyWalking Team, I’m glad to announce that SkyWalking Satellite $VERSION is now released. SkyWalking Satellite: A lightweight collector/sidecar could be deployed closing to the target monitored system, to collect metrics, traces, and logs. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. Download Links: http://skywalking.apache.org/downloads/ Release Notes : https://github.com/apache/skywalking-satellite/blob/$VERSION/CHANGES.md Website: http://skywalking.apache.org/ SkyWalking Satellite Resources: - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalking.apache.org - Documents: https://github.com/apache/skywalking-satellite/blob/$VERSION/README.md The Apache SkyWalking Team   Remove Unnecessary Releases Please remember to remove all unnecessary releases in the mirror svn (https://dist.apache.org/repos/dist/release/skywalking/), if you don\u0026rsquo;t recommend users to choose those version. For example, you have removed the download and documentation links from the website. If they want old ones, the Archive repository has all of them.\n","excerpt":"Apache SkyWalking Satellite Release Guide This documentation guides the release manager to release …","ref":"/docs/skywalking-satellite/next/en/guides/contribution/how-to-release/","title":"Apache SkyWalking Satellite Release Guide"},{"body":"Apache SkyWalking Satellite Release Guide This documentation guides the release manager to release the SkyWalking Satellite in the Apache Way, and also helps people to check the release for vote.\nPrerequisites  Close(if finished, or move to next milestone otherwise) all issues in the current milestone from skywalking-satellite and skywalking, create a new milestone if needed. Update CHANGES.md.  Add your GPG public key to Apache svn   Upload your GPG public key to a public GPG site, such as MIT\u0026rsquo;s site.\n  Log in id.apache.org and submit your key fingerprint.\n  Add your GPG public key into SkyWalking GPG KEYS file, you can do this only if you are a PMC member. You can ask a PMC member for help. DO NOT override the existed KEYS file content, only append your key at the end of the file.\n  Build and sign the source code package export VERSION=\u0026lt;the version to release\u0026gt; git clone git@github.com:apache/skywalking-satellite \u0026amp;\u0026amp; cd skywalking-satellite git tag -a \u0026#34;v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking-Satellite v$VERSION\u0026#34; git push --tags make release In total, six files should be automatically generated in the directory: apache-skywalking-satellite-${VERSION}-bin.tgz, apache-skywalking-satellite-${VERSION}-src.tgz, and their corresponding asc, sha512 files.\nUpload to Apache svn svn co https://dist.apache.org/repos/dist/dev/skywalking/ mkdir -p skywalking/satellite/\u0026#34;$VERSION\u0026#34; cp skywalking-satellite/apache-skywalking*.tgz skywalking/satellite/\u0026#34;$VERSION\u0026#34; cp skywalking-satellite/apache-skywalking*.tgz.asc skywalking/satellite/\u0026#34;$VERSION\u0026#34; cp skywalking-satellite/apache-skywalking-satellite*.tgz.sha512 skywalking/satellite/\u0026#34;$VERSION\u0026#34; cd skywalking/satellite \u0026amp;\u0026amp; svn add \u0026#34;$VERSION\u0026#34; \u0026amp;\u0026amp; svn commit -m \u0026#34;Draft Apache SkyWalking-Satellite release $VERSION\u0026#34; Make the internal announcement Send an announcement email to dev@ mailing list, please check all links before sending the email.\nSubject: [ANNOUNCEMENT] SkyWalking Satellite $VERSION test build available Content: The test build of SkyWalking Satellite $VERSION is now available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking-satellite/blob/$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/satellite/$VERSION * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-satellite-bin-x.x.x.tgz - sha512xxxxyyyzzz apache-skywalking-satellite-src-x.x.x.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-satellite/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * http://pgp.mit.edu:11371/pks/lookup?op=get\u0026amp;search=0x8BD99F552D9F33D7 corresponding to kezhenxu94@apache.org Guide to build the release from source : * https://github.com/apache/skywalking-satellite/blob/v$VERSION/docs/en/guides/contribution/How-to-release.md A vote regarding the quality of this test build will be initiated within the next couple of days. Wait at least 48 hours for test responses Any PMC, committer or contributor can test features for releasing, and feedback. Based on that, PMC will decide whether to start a vote or not.\nCall for vote in dev@ mailing list Call for vote in dev@skywalking.apache.org, please check all links before sending the email.\nSubject: [VOTE] Release Apache SkyWalking Satellite version $VERSION Content: Hi the SkyWalking Community: This is a call for vote to release Apache SkyWalking Satellite version $VERSION. Release notes: * https://github.com/apache/skywalking-satellite/blob/$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/satellite/$VERSION * sha512 checksums - sha512xxxxyyyzzz skywalking-satellite-x.x.x-src.tgz - sha512xxxxyyyzzz skywalking-satellite-x.x.x-bin.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-satellite/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-satellite/blob/$VERSION/docs/en/guides/contribuation/How-to-release.md Voting will start now and will remain open for at least 72 hours, all PMC members are required to give their votes. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Thanks. [1] https://github.com/apache/skywalking/blob/master/docs/en/guides/How-to-release.md#vote-check Vote Check All PMC members and committers should check these before voting +1:\n Features test. All artifacts in staging repository are published with .asc, .md5, and sha files. Source codes and distribution packages (skywalking-satellite-$VERSION-{src,bin}.tgz) are in https://dist.apache.org/repos/dist/dev/skywalking/satellite/$VERSION with .asc, .sha512. LICENSE and NOTICE are in source codes and distribution package. Check shasum -c skywalking-satellite-$VERSION-{src,bin}.tgz.sha512. Check gpg --verify skywalking-satellite-$VERSION-{src,bin}.tgz.asc skywalking-satellite-$VERSION-{src,bin}.tgz. Build distribution from source code package by following this command, make build. Licenses check, make license.  Vote result should follow these:\n  PMC vote is +1 binding, all others is +1 no binding.\n  Within 72 hours, you get at least 3 (+1 binding), and have more +1 than -1. Vote pass.\n  Send the closing vote mail to announce the result. When count the binding and no binding votes, please list the names of voters. An example like this:\n[RESULT][VOTE] Release Apache SkyWalking Satellite version $VERSION 3 days passed, we’ve got ($NUMBER) +1 bindings (and ... +1 non-bindings): (list names) +1 bindings: xxx ... +1 non-bindings: xxx ... Thank you for voting, I’ll continue the release process.   Publish release   Move source codes tar balls and distributions to https://dist.apache.org/repos/dist/release/skywalking/, you can do this only if you are a PMC member.\nexport SVN_EDITOR=vim svn mv https://dist.apache.org/repos/dist/dev/skywalking/satellite/$VERSION https://dist.apache.org/repos/dist/release/skywalking/satellite   Refer to the previous PR, update the event and download links on the website.\n  Update Github release page, follow the previous convention.\n  Push docker image to the Docker Hub, make sure you have the write permission for push image.\nmake docker \u0026amp;\u0026amp; make docker.push   Send ANNOUNCE email to dev@skywalking.apache.org and announce@apache.org, the sender should use his/her Apache email account, please check all links before sending the email.\nSubject: [ANNOUNCEMENT] Apache SkyWalking Satellite $VERSION Released Content: Hi the SkyWalking Community On behalf of the SkyWalking Team, I’m glad to announce that SkyWalking Satellite $VERSION is now released. SkyWalking Satellite: A lightweight collector/sidecar could be deployed closing to the target monitored system, to collect metrics, traces, and logs. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. Download Links: http://skywalking.apache.org/downloads/ Release Notes : https://github.com/apache/skywalking-satellite/blob/$VERSION/CHANGES.md Website: http://skywalking.apache.org/ SkyWalking Satellite Resources: - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalking.apache.org - Documents: https://github.com/apache/skywalking-satellite/blob/$VERSION/README.md The Apache SkyWalking Team   Remove Unnecessary Releases Please remember to remove all unnecessary releases in the mirror svn (https://dist.apache.org/repos/dist/release/skywalking/), if you don\u0026rsquo;t recommend users to choose those version. For example, you have removed the download and documentation links from the website. If they want old ones, the Archive repository has all of them.\n","excerpt":"Apache SkyWalking Satellite Release Guide This documentation guides the release manager to release …","ref":"/docs/skywalking-satellite/v1.2.0/en/guides/contribution/how-to-release/","title":"Apache SkyWalking Satellite Release Guide"},{"body":"Apdex threshold Apdex is a measure of response time based against a set threshold. It measures the ratio of satisfactory response times to unsatisfactory response times. The response time is measured from an asset request to completed delivery back to the requestor.\nA user defines a response time threshold T. All responses handled in T or less time satisfy the user.\nFor example, if T is 1.2 seconds and a response completes in 0.5 seconds, then the user is satisfied. All responses greater than 1.2 seconds dissatisfy the user. Responses greater than 4.8 seconds frustrate the user.\nThe apdex threshold T can be configured in service-apdex-threshold.yml file or via Dynamic Configuration. The default item will apply to a service that isn\u0026rsquo;t defined in this configuration as the default threshold.\nConfiguration Format The configuration content includes the names and thresholds of the services:\n# default threshold is 500msdefault:500# example:# the threshold of service \u0026#34;tomcat\u0026#34; is 1s# tomcat: 1000# the threshold of service \u0026#34;springboot1\u0026#34; is 50ms# springboot1: 50","excerpt":"Apdex threshold Apdex is a measure of response time based against a set threshold. It measures the …","ref":"/docs/main/latest/en/setup/backend/apdex-threshold/","title":"Apdex threshold"},{"body":"Apdex threshold Apdex is a measure of response time based against a set threshold. It measures the ratio of satisfactory response times to unsatisfactory response times. The response time is measured from an asset request to completed delivery back to the requestor.\nA user defines a response time threshold T. All responses handled in T or less time satisfy the user.\nFor example, if T is 1.2 seconds and a response completes in 0.5 seconds, then the user is satisfied. All responses greater than 1.2 seconds dissatisfy the user. Responses greater than 4.8 seconds frustrate the user.\nThe apdex threshold T can be configured in service-apdex-threshold.yml file or via Dynamic Configuration. The default item will apply to a service that isn\u0026rsquo;t defined in this configuration as the default threshold.\nConfiguration Format The configuration content includes the names and thresholds of the services:\n# default threshold is 500msdefault:500# example:# the threshold of service \u0026#34;tomcat\u0026#34; is 1s# tomcat: 1000# the threshold of service \u0026#34;springboot1\u0026#34; is 50ms# springboot1: 50","excerpt":"Apdex threshold Apdex is a measure of response time based against a set threshold. It measures the …","ref":"/docs/main/next/en/setup/backend/apdex-threshold/","title":"Apdex threshold"},{"body":"Apdex threshold Apdex is a measure of response time based against a set threshold. It measures the ratio of satisfactory response times to unsatisfactory response times. The response time is measured from an asset request to completed delivery back to the requestor.\nA user defines a response time threshold T. All responses handled in T or less time satisfy the user.\nFor example, if T is 1.2 seconds and a response completes in 0.5 seconds, then the user is satisfied. All responses greater than 1.2 seconds dissatisfy the user. Responses greater than 4.8 seconds frustrate the user.\nThe apdex threshold T can be configured in service-apdex-threshold.yml file or via Dynamic Configuration. The default item will apply to a service that isn\u0026rsquo;t defined in this configuration as the default threshold.\nConfiguration Format The configuration content includes the names and thresholds of the services:\n# default threshold is 500msdefault:500# example:# the threshold of service \u0026#34;tomcat\u0026#34; is 1s# tomcat: 1000# the threshold of service \u0026#34;springboot1\u0026#34; is 50ms# springboot1: 50","excerpt":"Apdex threshold Apdex is a measure of response time based against a set threshold. It measures the …","ref":"/docs/main/v9.0.0/en/setup/backend/apdex-threshold/","title":"Apdex threshold"},{"body":"Apdex threshold Apdex is a measure of response time based against a set threshold. It measures the ratio of satisfactory response times to unsatisfactory response times. The response time is measured from an asset request to completed delivery back to the requestor.\nA user defines a response time threshold T. All responses handled in T or less time satisfy the user.\nFor example, if T is 1.2 seconds and a response completes in 0.5 seconds, then the user is satisfied. All responses greater than 1.2 seconds dissatisfy the user. Responses greater than 4.8 seconds frustrate the user.\nThe apdex threshold T can be configured in service-apdex-threshold.yml file or via Dynamic Configuration. The default item will apply to a service that isn\u0026rsquo;t defined in this configuration as the default threshold.\nConfiguration Format The configuration content includes the names and thresholds of the services:\n# default threshold is 500msdefault:500# example:# the threshold of service \u0026#34;tomcat\u0026#34; is 1s# tomcat: 1000# the threshold of service \u0026#34;springboot1\u0026#34; is 50ms# springboot1: 50","excerpt":"Apdex threshold Apdex is a measure of response time based against a set threshold. It measures the …","ref":"/docs/main/v9.1.0/en/setup/backend/apdex-threshold/","title":"Apdex threshold"},{"body":"Apdex threshold Apdex is a measure of response time based against a set threshold. It measures the ratio of satisfactory response times to unsatisfactory response times. The response time is measured from an asset request to completed delivery back to the requestor.\nA user defines a response time threshold T. All responses handled in T or less time satisfy the user.\nFor example, if T is 1.2 seconds and a response completes in 0.5 seconds, then the user is satisfied. All responses greater than 1.2 seconds dissatisfy the user. Responses greater than 4.8 seconds frustrate the user.\nThe apdex threshold T can be configured in service-apdex-threshold.yml file or via Dynamic Configuration. The default item will apply to a service that isn\u0026rsquo;t defined in this configuration as the default threshold.\nConfiguration Format The configuration content includes the names and thresholds of the services:\n# default threshold is 500msdefault:500# example:# the threshold of service \u0026#34;tomcat\u0026#34; is 1s# tomcat: 1000# the threshold of service \u0026#34;springboot1\u0026#34; is 50ms# springboot1: 50","excerpt":"Apdex threshold Apdex is a measure of response time based against a set threshold. It measures the …","ref":"/docs/main/v9.2.0/en/setup/backend/apdex-threshold/","title":"Apdex threshold"},{"body":"Apdex threshold Apdex is a measure of response time based against a set threshold. It measures the ratio of satisfactory response times to unsatisfactory response times. The response time is measured from an asset request to completed delivery back to the requestor.\nA user defines a response time threshold T. All responses handled in T or less time satisfy the user.\nFor example, if T is 1.2 seconds and a response completes in 0.5 seconds, then the user is satisfied. All responses greater than 1.2 seconds dissatisfy the user. Responses greater than 4.8 seconds frustrate the user.\nThe apdex threshold T can be configured in service-apdex-threshold.yml file or via Dynamic Configuration. The default item will apply to a service that isn\u0026rsquo;t defined in this configuration as the default threshold.\nConfiguration Format The configuration content includes the names and thresholds of the services:\n# default threshold is 500msdefault:500# example:# the threshold of service \u0026#34;tomcat\u0026#34; is 1s# tomcat: 1000# the threshold of service \u0026#34;springboot1\u0026#34; is 50ms# springboot1: 50","excerpt":"Apdex threshold Apdex is a measure of response time based against a set threshold. It measures the …","ref":"/docs/main/v9.3.0/en/setup/backend/apdex-threshold/","title":"Apdex threshold"},{"body":"Apdex threshold Apdex is a measure of response time based against a set threshold. It measures the ratio of satisfactory response times to unsatisfactory response times. The response time is measured from an asset request to completed delivery back to the requestor.\nA user defines a response time threshold T. All responses handled in T or less time satisfy the user.\nFor example, if T is 1.2 seconds and a response completes in 0.5 seconds, then the user is satisfied. All responses greater than 1.2 seconds dissatisfy the user. Responses greater than 4.8 seconds frustrate the user.\nThe apdex threshold T can be configured in service-apdex-threshold.yml file or via Dynamic Configuration. The default item will apply to a service that isn\u0026rsquo;t defined in this configuration as the default threshold.\nConfiguration Format The configuration content includes the names and thresholds of the services:\n# default threshold is 500msdefault:500# example:# the threshold of service \u0026#34;tomcat\u0026#34; is 1s# tomcat: 1000# the threshold of service \u0026#34;springboot1\u0026#34; is 50ms# springboot1: 50","excerpt":"Apdex threshold Apdex is a measure of response time based against a set threshold. It measures the …","ref":"/docs/main/v9.4.0/en/setup/backend/apdex-threshold/","title":"Apdex threshold"},{"body":"Apdex threshold Apdex is a measure of response time based against a set threshold. It measures the ratio of satisfactory response times to unsatisfactory response times. The response time is measured from an asset request to completed delivery back to the requestor.\nA user defines a response time threshold T. All responses handled in T or less time satisfy the user.\nFor example, if T is 1.2 seconds and a response completes in 0.5 seconds, then the user is satisfied. All responses greater than 1.2 seconds dissatisfy the user. Responses greater than 4.8 seconds frustrate the user.\nThe apdex threshold T can be configured in service-apdex-threshold.yml file or via Dynamic Configuration. The default item will apply to a service that isn\u0026rsquo;t defined in this configuration as the default threshold.\nConfiguration Format The configuration content includes the names and thresholds of the services:\n# default threshold is 500msdefault:500# example:# the threshold of service \u0026#34;tomcat\u0026#34; is 1s# tomcat: 1000# the threshold of service \u0026#34;springboot1\u0026#34; is 50ms# springboot1: 50","excerpt":"Apdex threshold Apdex is a measure of response time based against a set threshold. It measures the …","ref":"/docs/main/v9.5.0/en/setup/backend/apdex-threshold/","title":"Apdex threshold"},{"body":"Apdex threshold Apdex is a measure of response time based against a set threshold. It measures the ratio of satisfactory response times to unsatisfactory response times. The response time is measured from an asset request to completed delivery back to the requestor.\nA user defines a response time threshold T. All responses handled in T or less time satisfy the user.\nFor example, if T is 1.2 seconds and a response completes in 0.5 seconds, then the user is satisfied. All responses greater than 1.2 seconds dissatisfy the user. Responses greater than 4.8 seconds frustrate the user.\nThe apdex threshold T can be configured in service-apdex-threshold.yml file or via Dynamic Configuration. The default item will apply to a service that isn\u0026rsquo;t defined in this configuration as the default threshold.\nConfiguration Format The configuration content includes the names and thresholds of the services:\n# default threshold is 500msdefault:500# example:# the threshold of service \u0026#34;tomcat\u0026#34; is 1s# tomcat: 1000# the threshold of service \u0026#34;springboot1\u0026#34; is 50ms# springboot1: 50","excerpt":"Apdex threshold Apdex is a measure of response time based against a set threshold. It measures the …","ref":"/docs/main/v9.6.0/en/setup/backend/apdex-threshold/","title":"Apdex threshold"},{"body":"Apdex threshold Apdex is a measure of response time based against a set threshold. It measures the ratio of satisfactory response times to unsatisfactory response times. The response time is measured from an asset request to completed delivery back to the requestor.\nA user defines a response time threshold T. All responses handled in T or less time satisfy the user.\nFor example, if T is 1.2 seconds and a response completes in 0.5 seconds, then the user is satisfied. All responses greater than 1.2 seconds dissatisfy the user. Responses greater than 4.8 seconds frustrate the user.\nThe apdex threshold T can be configured in service-apdex-threshold.yml file or via Dynamic Configuration. The default item will apply to a service that isn\u0026rsquo;t defined in this configuration as the default threshold.\nConfiguration Format The configuration content includes the names and thresholds of the services:\n# default threshold is 500msdefault:500# example:# the threshold of service \u0026#34;tomcat\u0026#34; is 1s# tomcat: 1000# the threshold of service \u0026#34;springboot1\u0026#34; is 50ms# springboot1: 50","excerpt":"Apdex threshold Apdex is a measure of response time based against a set threshold. It measures the …","ref":"/docs/main/v9.7.0/en/setup/backend/apdex-threshold/","title":"Apdex threshold"},{"body":"APISIX monitoring APISIX performance from apisix prometheus plugin SkyWalking leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  APISIX Prometheus plugin collects metrics data from APSIX. OpenTelemetry Collector fetches metrics from APISIX Prometheus plugin via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Enable APISIX APISIX Prometheus plugin . Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  APISIX Monitoring APISIX prometheus plugin provide multiple dimensions metrics for APISIX server , upstream , route , etc. Accordingly, SkyWalking observes the status, payload, and latency of the APISIX server, which is cataloged as a LAYER: APISIX Service in the OAP. Meanwhile, the instances would be recognized as LAYER: APISIX instances. The route rules and nodes would be recognized as endpoints with route/ and upstream/ prefixes.\nSpecify SkyWalking Service name SkyWalking expects OTEL Collector attribute skywalking_service to be the Service name.\nMake sure skywalking_service attribute exists through static_configs of OTEL Prometheus scape config.\nreceivers:prometheus:config:scrape_configs:- job_name:\u0026#39;apisix-monitoring\u0026#39;static_configs:- targets:[\u0026#39;apisix:9091\u0026#39;]labels:skywalking_service:exmple_service_name # Specify SkyWalking Service name You also could leverage OTEL Collector processor to add skywalking_service attribute , as following :\nprocessors:resource/skywalking-service:attributes:- key:skywalking_service value:exmple_service_name# Specify Skywalking Service name action:insert Notice , if you don\u0026rsquo;t specify skywalking_service attribute, SkyWalking OAP would use APISIX as the default service name\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     HTTP status  meter_apisix_sv_http_status Service The increment rate of the status of HTTP requests APISIX Prometheus plugin   HTTP latency  meter_apisix_sv_http_latency Service The increment rate of the latency of HTTP requests APISIX Prometheus plugin   HTTP bandwidth KB meter_apisix_sv_bandwidth Service The increment rate of the bandwidth of HTTP requests APISIX Prometheus plugin   HTTP status of non-matched requests  meter_apisix_sv_http_status Service The increment rate of the status of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP latency non-matched requests  meter_apisix_sv_http_latency Service The increment rate of the latency of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP bandwidth non-matched requests KB meter_apisix_sv_bandwidth Service The increment rate of the bandwidth of HTTP requests ,which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP connection  meter_apisix_sv_http_connections Service The avg number of the connections APISIX Prometheus plugin   HTTP Request Trend  meter_apisix_http_requests Service The increment rate of HTTP requests APISIX Prometheus plugin   HTTP status  meter_apisix_instance_http_status Instance The increment rate of the status of HTTP requests APISIX Prometheus plugin   HTTP latency  meter_apisix_instance_http_latency Instance The increment rate of the latency of HTTP requests APISIX Prometheus plugin   HTTP bandwidth KB meter_apisix_instance_bandwidth Instance The increment rate of the bandwidth of HTTP requests APISIX Prometheus plugin   HTTP status of non-matched requests  meter_apisix_instance_http_status Instance The increment rate of the status of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP latency non-matched requests  meter_apisix_instance_http_latency Instance The increment rate of the latency of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP bandwidth non-matched requests KB meter_apisix_instance_bandwidth Instance The increment rate of the bandwidth of HTTP requests ,which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP connection  meter_apisix_instance_http_connections Instance The avg number of the connections APISIX Prometheus plugin   HTTP Request Trend  meter_apisix_instance_http_requests Instance The increment rate of HTTP requests APISIX Prometheus plugin   Shared dict capacity MB meter_apisix_instance_shared_dict_capacity_bytes Instance The avg capacity of shared dict capacity APISIX Prometheus plugin   Shared free space MB meter_apisix_instance_shared_dict_free_space_bytes Instance The avg free space of shared dict capacity APISIX Prometheus plugin   etcd index  meter_apisix_instance_sv_etcd_indexes Instance etcd modify index for APISIX keys APISIX Prometheus plugin   etcd latest reachability  meter_apisix_instance_sv_etcd_reachable Instance etcd latest reachable , Refer to APISIX Prometheus plugin APISIX Prometheus plugin   HTTP status  meter_apisix_endpoint_node_http_status Endpoint The increment rate of the status of HTTP requests APISIX Prometheus plugin   HTTP latency  meter_apisix_endpoint_node_http_latency Endpoint The increment rate of the latency of HTTP requests APISIX Prometheus plugin   HTTP bandwidth KB meter_apisix_endpoint_node_bandwidth Endpoint The increment rate of the bandwidth of HTTP requests APISIX Prometheus plugin    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/apisix.yaml. The APISIX dashboard panel configurations are found in /config/ui-initialized-templates/apisix.\n","excerpt":"APISIX monitoring APISIX performance from apisix prometheus plugin SkyWalking leverages …","ref":"/docs/main/latest/en/setup/backend/backend-apisix-monitoring/","title":"APISIX monitoring"},{"body":"APISIX monitoring APISIX performance from apisix prometheus plugin SkyWalking leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  APISIX Prometheus plugin collects metrics data from APSIX. OpenTelemetry Collector fetches metrics from APISIX Prometheus plugin via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Enable APISIX APISIX Prometheus plugin . Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  APISIX Monitoring APISIX prometheus plugin provide multiple dimensions metrics for APISIX server , upstream , route , etc. Accordingly, SkyWalking observes the status, payload, and latency of the APISIX server, which is cataloged as a LAYER: APISIX Service in the OAP. Meanwhile, the instances would be recognized as LAYER: APISIX instances. The route rules and nodes would be recognized as endpoints with route/ and upstream/ prefixes.\nSpecify SkyWalking Service name SkyWalking expects OTEL Collector attribute skywalking_service to be the Service name.\nMake sure skywalking_service attribute exists through static_configs of OTEL Prometheus scape config.\nreceivers:prometheus:config:scrape_configs:- job_name:\u0026#39;apisix-monitoring\u0026#39;static_configs:- targets:[\u0026#39;apisix:9091\u0026#39;]labels:skywalking_service:exmple_service_name # Specify SkyWalking Service name You also could leverage OTEL Collector processor to add skywalking_service attribute , as following :\nprocessors:resource/skywalking-service:attributes:- key:skywalking_service value:exmple_service_name# Specify Skywalking Service name action:insert Notice , if you don\u0026rsquo;t specify skywalking_service attribute, SkyWalking OAP would use APISIX as the default service name\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     HTTP status  meter_apisix_sv_http_status Service The increment rate of the status of HTTP requests APISIX Prometheus plugin   HTTP latency  meter_apisix_sv_http_latency Service The increment rate of the latency of HTTP requests APISIX Prometheus plugin   HTTP bandwidth KB meter_apisix_sv_bandwidth Service The increment rate of the bandwidth of HTTP requests APISIX Prometheus plugin   HTTP status of non-matched requests  meter_apisix_sv_http_status Service The increment rate of the status of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP latency non-matched requests  meter_apisix_sv_http_latency Service The increment rate of the latency of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP bandwidth non-matched requests KB meter_apisix_sv_bandwidth Service The increment rate of the bandwidth of HTTP requests ,which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP connection  meter_apisix_sv_http_connections Service The avg number of the connections APISIX Prometheus plugin   HTTP Request Trend  meter_apisix_http_requests Service The increment rate of HTTP requests APISIX Prometheus plugin   HTTP status  meter_apisix_instance_http_status Instance The increment rate of the status of HTTP requests APISIX Prometheus plugin   HTTP latency  meter_apisix_instance_http_latency Instance The increment rate of the latency of HTTP requests APISIX Prometheus plugin   HTTP bandwidth KB meter_apisix_instance_bandwidth Instance The increment rate of the bandwidth of HTTP requests APISIX Prometheus plugin   HTTP status of non-matched requests  meter_apisix_instance_http_status Instance The increment rate of the status of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP latency non-matched requests  meter_apisix_instance_http_latency Instance The increment rate of the latency of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP bandwidth non-matched requests KB meter_apisix_instance_bandwidth Instance The increment rate of the bandwidth of HTTP requests ,which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP connection  meter_apisix_instance_http_connections Instance The avg number of the connections APISIX Prometheus plugin   HTTP Request Trend  meter_apisix_instance_http_requests Instance The increment rate of HTTP requests APISIX Prometheus plugin   Shared dict capacity MB meter_apisix_instance_shared_dict_capacity_bytes Instance The avg capacity of shared dict capacity APISIX Prometheus plugin   Shared free space MB meter_apisix_instance_shared_dict_free_space_bytes Instance The avg free space of shared dict capacity APISIX Prometheus plugin   etcd index  meter_apisix_instance_sv_etcd_indexes Instance etcd modify index for APISIX keys APISIX Prometheus plugin   etcd latest reachability  meter_apisix_instance_sv_etcd_reachable Instance etcd latest reachable , Refer to APISIX Prometheus plugin APISIX Prometheus plugin   HTTP status  meter_apisix_endpoint_node_http_status Endpoint The increment rate of the status of HTTP requests APISIX Prometheus plugin   HTTP latency  meter_apisix_endpoint_node_http_latency Endpoint The increment rate of the latency of HTTP requests APISIX Prometheus plugin   HTTP bandwidth KB meter_apisix_endpoint_node_bandwidth Endpoint The increment rate of the bandwidth of HTTP requests APISIX Prometheus plugin    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/apisix.yaml. The APISIX dashboard panel configurations are found in /config/ui-initialized-templates/apisix.\n","excerpt":"APISIX monitoring APISIX performance from apisix prometheus plugin SkyWalking leverages …","ref":"/docs/main/next/en/setup/backend/backend-apisix-monitoring/","title":"APISIX monitoring"},{"body":"APISIX monitoring APISIX performance from apisix prometheus plugin SkyWalking leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  APISIX Prometheus plugin collects metrics data from APSIX. OpenTelemetry Collector fetches metrics from APISIX Prometheus plugin via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via the OpenCensus gRPC Exporter or OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Enable APISIX APISIX Prometheus plugin . Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  APISIX Monitoring APISIX prometheus plugin provide multiple dimensions metrics for APISIX server , upstream , route , etc. Accordingly, SkyWalking observes the status, payload, and latency of the APISIX server, which is cataloged as a LAYER: APISIX Service in the OAP. Meanwhile, the instances would be recognized as LAYER: APISIX instances. The route rules and nodes would be recognized as endpoints with route/ and upstream/ prefixes.\nSpecify SkyWalking Service name SkyWalking expects OTEL Collector attribute skywalking_service to be the Service name.\nMake sure skywalking_service attribute exists through static_configs of OTEL Prometheus scape config.\nreceivers:prometheus:config:scrape_configs:- job_name:\u0026#39;apisix-monitoring\u0026#39;static_configs:- targets:[\u0026#39;apisix:9091\u0026#39;]labels:skywalking_service:exmple_service_name # Specify SkyWalking Service name You also could leverage OTEL Collector processor to add skywalking_service attribute , as following :\nprocessors:resource/skywalking-service:attributes:- key:skywalking_service value:exmple_service_name# Specify Skywalking Service name action:insert Notice , if you don\u0026rsquo;t specify skywalking_service attribute, SkyWalking OAP would use APISIX as the default service name\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     HTTP status  meter_apisix_sv_http_status Service The increment rate of the status of HTTP requests APISIX Prometheus plugin   HTTP latency  meter_apisix_sv_http_latency Service The increment rate of the latency of HTTP requests APISIX Prometheus plugin   HTTP bandwidth KB meter_apisix_sv_bandwidth Service The increment rate of the bandwidth of HTTP requests APISIX Prometheus plugin   HTTP status of non-matched requests  meter_apisix_sv_http_status Service The increment rate of the status of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP latency non-matched requests  meter_apisix_sv_http_latency Service The increment rate of the latency of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP bandwidth non-matched requests KB meter_apisix_sv_bandwidth Service The increment rate of the bandwidth of HTTP requests ,which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP connection  meter_apisix_sv_http_connections Service The avg number of the connections APISIX Prometheus plugin   HTTP Request Trend  meter_apisix_http_requests Service The increment rate of HTTP requests APISIX Prometheus plugin   HTTP status  meter_apisix_instance_http_status Instance The increment rate of the status of HTTP requests APISIX Prometheus plugin   HTTP latency  meter_apisix_instance_http_latency Instance The increment rate of the latency of HTTP requests APISIX Prometheus plugin   HTTP bandwidth KB meter_apisix_instance_bandwidth Instance The increment rate of the bandwidth of HTTP requests APISIX Prometheus plugin   HTTP status of non-matched requests  meter_apisix_instance_http_status Instance The increment rate of the status of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP latency non-matched requests  meter_apisix_instance_http_latency Instance The increment rate of the latency of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP bandwidth non-matched requests KB meter_apisix_instance_bandwidth Instance The increment rate of the bandwidth of HTTP requests ,which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP connection  meter_apisix_instance_http_connections Instance The avg number of the connections APISIX Prometheus plugin   HTTP Request Trend  meter_apisix_instance_http_requests Instance The increment rate of HTTP requests APISIX Prometheus plugin   Shared dict capacity MB meter_apisix_instance_shared_dict_capacity_bytes Instance The avg capacity of shared dict capacity APISIX Prometheus plugin   Shared free space MB meter_apisix_instance_shared_dict_free_space_bytes Instance The avg free space of shared dict capacity APISIX Prometheus plugin   etcd index  meter_apisix_instance_sv_etcd_indexes Instance etcd modify index for APISIX keys APISIX Prometheus plugin   etcd latest reachability  meter_apisix_instance_sv_etcd_reachable Instance etcd latest reachable , Refer to APISIX Prometheus plugin APISIX Prometheus plugin   HTTP status  meter_apisix_endpoint_node_http_status Endpoint The increment rate of the status of HTTP requests APISIX Prometheus plugin   HTTP latency  meter_apisix_endpoint_node_http_latency Endpoint The increment rate of the latency of HTTP requests APISIX Prometheus plugin   HTTP bandwidth KB meter_apisix_endpoint_node_bandwidth Endpoint The increment rate of the bandwidth of HTTP requests APISIX Prometheus plugin    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/apisix.yaml. The APISIX dashboard panel configurations are found in /config/ui-initialized-templates/apisix.\n","excerpt":"APISIX monitoring APISIX performance from apisix prometheus plugin SkyWalking leverages …","ref":"/docs/main/v9.3.0/en/setup/backend/backend-apisix-monitoring/","title":"APISIX monitoring"},{"body":"APISIX monitoring APISIX performance from apisix prometheus plugin SkyWalking leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  APISIX Prometheus plugin collects metrics data from APSIX. OpenTelemetry Collector fetches metrics from APISIX Prometheus plugin via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via the OpenCensus gRPC Exporter or OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Enable APISIX APISIX Prometheus plugin . Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  APISIX Monitoring APISIX prometheus plugin provide multiple dimensions metrics for APISIX server , upstream , route , etc. Accordingly, SkyWalking observes the status, payload, and latency of the APISIX server, which is cataloged as a LAYER: APISIX Service in the OAP. Meanwhile, the instances would be recognized as LAYER: APISIX instances. The route rules and nodes would be recognized as endpoints with route/ and upstream/ prefixes.\nSpecify SkyWalking Service name SkyWalking expects OTEL Collector attribute skywalking_service to be the Service name.\nMake sure skywalking_service attribute exists through static_configs of OTEL Prometheus scape config.\nreceivers:prometheus:config:scrape_configs:- job_name:\u0026#39;apisix-monitoring\u0026#39;static_configs:- targets:[\u0026#39;apisix:9091\u0026#39;]labels:skywalking_service:exmple_service_name # Specify SkyWalking Service name You also could leverage OTEL Collector processor to add skywalking_service attribute , as following :\nprocessors:resource/skywalking-service:attributes:- key:skywalking_service value:exmple_service_name# Specify Skywalking Service name action:insert Notice , if you don\u0026rsquo;t specify skywalking_service attribute, SkyWalking OAP would use APISIX as the default service name\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     HTTP status  meter_apisix_sv_http_status Service The increment rate of the status of HTTP requests APISIX Prometheus plugin   HTTP latency  meter_apisix_sv_http_latency Service The increment rate of the latency of HTTP requests APISIX Prometheus plugin   HTTP bandwidth KB meter_apisix_sv_bandwidth Service The increment rate of the bandwidth of HTTP requests APISIX Prometheus plugin   HTTP status of non-matched requests  meter_apisix_sv_http_status Service The increment rate of the status of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP latency non-matched requests  meter_apisix_sv_http_latency Service The increment rate of the latency of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP bandwidth non-matched requests KB meter_apisix_sv_bandwidth Service The increment rate of the bandwidth of HTTP requests ,which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP connection  meter_apisix_sv_http_connections Service The avg number of the connections APISIX Prometheus plugin   HTTP Request Trend  meter_apisix_http_requests Service The increment rate of HTTP requests APISIX Prometheus plugin   HTTP status  meter_apisix_instance_http_status Instance The increment rate of the status of HTTP requests APISIX Prometheus plugin   HTTP latency  meter_apisix_instance_http_latency Instance The increment rate of the latency of HTTP requests APISIX Prometheus plugin   HTTP bandwidth KB meter_apisix_instance_bandwidth Instance The increment rate of the bandwidth of HTTP requests APISIX Prometheus plugin   HTTP status of non-matched requests  meter_apisix_instance_http_status Instance The increment rate of the status of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP latency non-matched requests  meter_apisix_instance_http_latency Instance The increment rate of the latency of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP bandwidth non-matched requests KB meter_apisix_instance_bandwidth Instance The increment rate of the bandwidth of HTTP requests ,which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP connection  meter_apisix_instance_http_connections Instance The avg number of the connections APISIX Prometheus plugin   HTTP Request Trend  meter_apisix_instance_http_requests Instance The increment rate of HTTP requests APISIX Prometheus plugin   Shared dict capacity MB meter_apisix_instance_shared_dict_capacity_bytes Instance The avg capacity of shared dict capacity APISIX Prometheus plugin   Shared free space MB meter_apisix_instance_shared_dict_free_space_bytes Instance The avg free space of shared dict capacity APISIX Prometheus plugin   etcd index  meter_apisix_instance_sv_etcd_indexes Instance etcd modify index for APISIX keys APISIX Prometheus plugin   etcd latest reachability  meter_apisix_instance_sv_etcd_reachable Instance etcd latest reachable , Refer to APISIX Prometheus plugin APISIX Prometheus plugin   HTTP status  meter_apisix_endpoint_node_http_status Endpoint The increment rate of the status of HTTP requests APISIX Prometheus plugin   HTTP latency  meter_apisix_endpoint_node_http_latency Endpoint The increment rate of the latency of HTTP requests APISIX Prometheus plugin   HTTP bandwidth KB meter_apisix_endpoint_node_bandwidth Endpoint The increment rate of the bandwidth of HTTP requests APISIX Prometheus plugin    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/apisix.yaml. The APISIX dashboard panel configurations are found in /config/ui-initialized-templates/apisix.\n","excerpt":"APISIX monitoring APISIX performance from apisix prometheus plugin SkyWalking leverages …","ref":"/docs/main/v9.4.0/en/setup/backend/backend-apisix-monitoring/","title":"APISIX monitoring"},{"body":"APISIX monitoring APISIX performance from apisix prometheus plugin SkyWalking leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  APISIX Prometheus plugin collects metrics data from APSIX. OpenTelemetry Collector fetches metrics from APISIX Prometheus plugin via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Enable APISIX APISIX Prometheus plugin . Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  APISIX Monitoring APISIX prometheus plugin provide multiple dimensions metrics for APISIX server , upstream , route , etc. Accordingly, SkyWalking observes the status, payload, and latency of the APISIX server, which is cataloged as a LAYER: APISIX Service in the OAP. Meanwhile, the instances would be recognized as LAYER: APISIX instances. The route rules and nodes would be recognized as endpoints with route/ and upstream/ prefixes.\nSpecify SkyWalking Service name SkyWalking expects OTEL Collector attribute skywalking_service to be the Service name.\nMake sure skywalking_service attribute exists through static_configs of OTEL Prometheus scape config.\nreceivers:prometheus:config:scrape_configs:- job_name:\u0026#39;apisix-monitoring\u0026#39;static_configs:- targets:[\u0026#39;apisix:9091\u0026#39;]labels:skywalking_service:exmple_service_name # Specify SkyWalking Service name You also could leverage OTEL Collector processor to add skywalking_service attribute , as following :\nprocessors:resource/skywalking-service:attributes:- key:skywalking_service value:exmple_service_name# Specify Skywalking Service name action:insert Notice , if you don\u0026rsquo;t specify skywalking_service attribute, SkyWalking OAP would use APISIX as the default service name\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     HTTP status  meter_apisix_sv_http_status Service The increment rate of the status of HTTP requests APISIX Prometheus plugin   HTTP latency  meter_apisix_sv_http_latency Service The increment rate of the latency of HTTP requests APISIX Prometheus plugin   HTTP bandwidth KB meter_apisix_sv_bandwidth Service The increment rate of the bandwidth of HTTP requests APISIX Prometheus plugin   HTTP status of non-matched requests  meter_apisix_sv_http_status Service The increment rate of the status of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP latency non-matched requests  meter_apisix_sv_http_latency Service The increment rate of the latency of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP bandwidth non-matched requests KB meter_apisix_sv_bandwidth Service The increment rate of the bandwidth of HTTP requests ,which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP connection  meter_apisix_sv_http_connections Service The avg number of the connections APISIX Prometheus plugin   HTTP Request Trend  meter_apisix_http_requests Service The increment rate of HTTP requests APISIX Prometheus plugin   HTTP status  meter_apisix_instance_http_status Instance The increment rate of the status of HTTP requests APISIX Prometheus plugin   HTTP latency  meter_apisix_instance_http_latency Instance The increment rate of the latency of HTTP requests APISIX Prometheus plugin   HTTP bandwidth KB meter_apisix_instance_bandwidth Instance The increment rate of the bandwidth of HTTP requests APISIX Prometheus plugin   HTTP status of non-matched requests  meter_apisix_instance_http_status Instance The increment rate of the status of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP latency non-matched requests  meter_apisix_instance_http_latency Instance The increment rate of the latency of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP bandwidth non-matched requests KB meter_apisix_instance_bandwidth Instance The increment rate of the bandwidth of HTTP requests ,which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP connection  meter_apisix_instance_http_connections Instance The avg number of the connections APISIX Prometheus plugin   HTTP Request Trend  meter_apisix_instance_http_requests Instance The increment rate of HTTP requests APISIX Prometheus plugin   Shared dict capacity MB meter_apisix_instance_shared_dict_capacity_bytes Instance The avg capacity of shared dict capacity APISIX Prometheus plugin   Shared free space MB meter_apisix_instance_shared_dict_free_space_bytes Instance The avg free space of shared dict capacity APISIX Prometheus plugin   etcd index  meter_apisix_instance_sv_etcd_indexes Instance etcd modify index for APISIX keys APISIX Prometheus plugin   etcd latest reachability  meter_apisix_instance_sv_etcd_reachable Instance etcd latest reachable , Refer to APISIX Prometheus plugin APISIX Prometheus plugin   HTTP status  meter_apisix_endpoint_node_http_status Endpoint The increment rate of the status of HTTP requests APISIX Prometheus plugin   HTTP latency  meter_apisix_endpoint_node_http_latency Endpoint The increment rate of the latency of HTTP requests APISIX Prometheus plugin   HTTP bandwidth KB meter_apisix_endpoint_node_bandwidth Endpoint The increment rate of the bandwidth of HTTP requests APISIX Prometheus plugin    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/apisix.yaml. The APISIX dashboard panel configurations are found in /config/ui-initialized-templates/apisix.\n","excerpt":"APISIX monitoring APISIX performance from apisix prometheus plugin SkyWalking leverages …","ref":"/docs/main/v9.5.0/en/setup/backend/backend-apisix-monitoring/","title":"APISIX monitoring"},{"body":"APISIX monitoring APISIX performance from apisix prometheus plugin SkyWalking leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  APISIX Prometheus plugin collects metrics data from APSIX. OpenTelemetry Collector fetches metrics from APISIX Prometheus plugin via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Enable APISIX APISIX Prometheus plugin . Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  APISIX Monitoring APISIX prometheus plugin provide multiple dimensions metrics for APISIX server , upstream , route , etc. Accordingly, SkyWalking observes the status, payload, and latency of the APISIX server, which is cataloged as a LAYER: APISIX Service in the OAP. Meanwhile, the instances would be recognized as LAYER: APISIX instances. The route rules and nodes would be recognized as endpoints with route/ and upstream/ prefixes.\nSpecify SkyWalking Service name SkyWalking expects OTEL Collector attribute skywalking_service to be the Service name.\nMake sure skywalking_service attribute exists through static_configs of OTEL Prometheus scape config.\nreceivers:prometheus:config:scrape_configs:- job_name:\u0026#39;apisix-monitoring\u0026#39;static_configs:- targets:[\u0026#39;apisix:9091\u0026#39;]labels:skywalking_service:exmple_service_name # Specify SkyWalking Service name You also could leverage OTEL Collector processor to add skywalking_service attribute , as following :\nprocessors:resource/skywalking-service:attributes:- key:skywalking_service value:exmple_service_name# Specify Skywalking Service name action:insert Notice , if you don\u0026rsquo;t specify skywalking_service attribute, SkyWalking OAP would use APISIX as the default service name\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     HTTP status  meter_apisix_sv_http_status Service The increment rate of the status of HTTP requests APISIX Prometheus plugin   HTTP latency  meter_apisix_sv_http_latency Service The increment rate of the latency of HTTP requests APISIX Prometheus plugin   HTTP bandwidth KB meter_apisix_sv_bandwidth Service The increment rate of the bandwidth of HTTP requests APISIX Prometheus plugin   HTTP status of non-matched requests  meter_apisix_sv_http_status Service The increment rate of the status of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP latency non-matched requests  meter_apisix_sv_http_latency Service The increment rate of the latency of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP bandwidth non-matched requests KB meter_apisix_sv_bandwidth Service The increment rate of the bandwidth of HTTP requests ,which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP connection  meter_apisix_sv_http_connections Service The avg number of the connections APISIX Prometheus plugin   HTTP Request Trend  meter_apisix_http_requests Service The increment rate of HTTP requests APISIX Prometheus plugin   HTTP status  meter_apisix_instance_http_status Instance The increment rate of the status of HTTP requests APISIX Prometheus plugin   HTTP latency  meter_apisix_instance_http_latency Instance The increment rate of the latency of HTTP requests APISIX Prometheus plugin   HTTP bandwidth KB meter_apisix_instance_bandwidth Instance The increment rate of the bandwidth of HTTP requests APISIX Prometheus plugin   HTTP status of non-matched requests  meter_apisix_instance_http_status Instance The increment rate of the status of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP latency non-matched requests  meter_apisix_instance_http_latency Instance The increment rate of the latency of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP bandwidth non-matched requests KB meter_apisix_instance_bandwidth Instance The increment rate of the bandwidth of HTTP requests ,which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP connection  meter_apisix_instance_http_connections Instance The avg number of the connections APISIX Prometheus plugin   HTTP Request Trend  meter_apisix_instance_http_requests Instance The increment rate of HTTP requests APISIX Prometheus plugin   Shared dict capacity MB meter_apisix_instance_shared_dict_capacity_bytes Instance The avg capacity of shared dict capacity APISIX Prometheus plugin   Shared free space MB meter_apisix_instance_shared_dict_free_space_bytes Instance The avg free space of shared dict capacity APISIX Prometheus plugin   etcd index  meter_apisix_instance_sv_etcd_indexes Instance etcd modify index for APISIX keys APISIX Prometheus plugin   etcd latest reachability  meter_apisix_instance_sv_etcd_reachable Instance etcd latest reachable , Refer to APISIX Prometheus plugin APISIX Prometheus plugin   HTTP status  meter_apisix_endpoint_node_http_status Endpoint The increment rate of the status of HTTP requests APISIX Prometheus plugin   HTTP latency  meter_apisix_endpoint_node_http_latency Endpoint The increment rate of the latency of HTTP requests APISIX Prometheus plugin   HTTP bandwidth KB meter_apisix_endpoint_node_bandwidth Endpoint The increment rate of the bandwidth of HTTP requests APISIX Prometheus plugin    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/apisix.yaml. The APISIX dashboard panel configurations are found in /config/ui-initialized-templates/apisix.\n","excerpt":"APISIX monitoring APISIX performance from apisix prometheus plugin SkyWalking leverages …","ref":"/docs/main/v9.6.0/en/setup/backend/backend-apisix-monitoring/","title":"APISIX monitoring"},{"body":"APISIX monitoring APISIX performance from apisix prometheus plugin SkyWalking leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  APISIX Prometheus plugin collects metrics data from APSIX. OpenTelemetry Collector fetches metrics from APISIX Prometheus plugin via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Enable APISIX APISIX Prometheus plugin . Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  APISIX Monitoring APISIX prometheus plugin provide multiple dimensions metrics for APISIX server , upstream , route , etc. Accordingly, SkyWalking observes the status, payload, and latency of the APISIX server, which is cataloged as a LAYER: APISIX Service in the OAP. Meanwhile, the instances would be recognized as LAYER: APISIX instances. The route rules and nodes would be recognized as endpoints with route/ and upstream/ prefixes.\nSpecify SkyWalking Service name SkyWalking expects OTEL Collector attribute skywalking_service to be the Service name.\nMake sure skywalking_service attribute exists through static_configs of OTEL Prometheus scape config.\nreceivers:prometheus:config:scrape_configs:- job_name:\u0026#39;apisix-monitoring\u0026#39;static_configs:- targets:[\u0026#39;apisix:9091\u0026#39;]labels:skywalking_service:exmple_service_name # Specify SkyWalking Service name You also could leverage OTEL Collector processor to add skywalking_service attribute , as following :\nprocessors:resource/skywalking-service:attributes:- key:skywalking_service value:exmple_service_name# Specify Skywalking Service name action:insert Notice , if you don\u0026rsquo;t specify skywalking_service attribute, SkyWalking OAP would use APISIX as the default service name\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     HTTP status  meter_apisix_sv_http_status Service The increment rate of the status of HTTP requests APISIX Prometheus plugin   HTTP latency  meter_apisix_sv_http_latency Service The increment rate of the latency of HTTP requests APISIX Prometheus plugin   HTTP bandwidth KB meter_apisix_sv_bandwidth Service The increment rate of the bandwidth of HTTP requests APISIX Prometheus plugin   HTTP status of non-matched requests  meter_apisix_sv_http_status Service The increment rate of the status of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP latency non-matched requests  meter_apisix_sv_http_latency Service The increment rate of the latency of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP bandwidth non-matched requests KB meter_apisix_sv_bandwidth Service The increment rate of the bandwidth of HTTP requests ,which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP connection  meter_apisix_sv_http_connections Service The avg number of the connections APISIX Prometheus plugin   HTTP Request Trend  meter_apisix_http_requests Service The increment rate of HTTP requests APISIX Prometheus plugin   HTTP status  meter_apisix_instance_http_status Instance The increment rate of the status of HTTP requests APISIX Prometheus plugin   HTTP latency  meter_apisix_instance_http_latency Instance The increment rate of the latency of HTTP requests APISIX Prometheus plugin   HTTP bandwidth KB meter_apisix_instance_bandwidth Instance The increment rate of the bandwidth of HTTP requests APISIX Prometheus plugin   HTTP status of non-matched requests  meter_apisix_instance_http_status Instance The increment rate of the status of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP latency non-matched requests  meter_apisix_instance_http_latency Instance The increment rate of the latency of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP bandwidth non-matched requests KB meter_apisix_instance_bandwidth Instance The increment rate of the bandwidth of HTTP requests ,which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP connection  meter_apisix_instance_http_connections Instance The avg number of the connections APISIX Prometheus plugin   HTTP Request Trend  meter_apisix_instance_http_requests Instance The increment rate of HTTP requests APISIX Prometheus plugin   Shared dict capacity MB meter_apisix_instance_shared_dict_capacity_bytes Instance The avg capacity of shared dict capacity APISIX Prometheus plugin   Shared free space MB meter_apisix_instance_shared_dict_free_space_bytes Instance The avg free space of shared dict capacity APISIX Prometheus plugin   etcd index  meter_apisix_instance_sv_etcd_indexes Instance etcd modify index for APISIX keys APISIX Prometheus plugin   etcd latest reachability  meter_apisix_instance_sv_etcd_reachable Instance etcd latest reachable , Refer to APISIX Prometheus plugin APISIX Prometheus plugin   HTTP status  meter_apisix_endpoint_node_http_status Endpoint The increment rate of the status of HTTP requests APISIX Prometheus plugin   HTTP latency  meter_apisix_endpoint_node_http_latency Endpoint The increment rate of the latency of HTTP requests APISIX Prometheus plugin   HTTP bandwidth KB meter_apisix_endpoint_node_bandwidth Endpoint The increment rate of the bandwidth of HTTP requests APISIX Prometheus plugin    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/apisix.yaml. The APISIX dashboard panel configurations are found in /config/ui-initialized-templates/apisix.\n","excerpt":"APISIX monitoring APISIX performance from apisix prometheus plugin SkyWalking leverages …","ref":"/docs/main/v9.7.0/en/setup/backend/backend-apisix-monitoring/","title":"APISIX monitoring"},{"body":"AWS API Gateway monitoring Amazon API Gateway is an AWS service for creating, publishing, maintaining, monitoring, and securing REST, HTTP, and WebSocket APIs. SkyWalking leverages AWS Kinesis Data Firehose receiver to transfer the CloudWatch metrics of API Gateway(HTTP and REST APIs) to OpenTelemetry receiver and into the Meter System.\nData flow  AWS CloudWatch collect metrics for API Gateway(REST and HTTP APIs), refer to API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch CloudWatch metric streams stream CloudWatch metrics of API Gateway to AWS Kinesis Data Firehose AWS Kinesis Data Firehose delivery metrics to AWS Kinesis Data Firehose receiver through the HTTP endpoint  Set up  Enable CloudWatch metrics for API Gateway Create an Amazon Kinesis Data Firehose Delivery Stream, and set AWS Kinesis Data Firehose receiver\u0026rsquo;s address as HTTP(s) Destination, refer to Create Delivery Stream Create CloudWatch metric stream, and select the Firehose Delivery Stream which has been created above, set Select namespaces to AWS/ApiGateway, Select output format to OpenTelemetry 0.7. refer to CloudWatch Metric Streams  Gateway Monitoring SkyWalking observes CloudWatch metrics of the AWS API Gateway, which is cataloged as a LAYER: AWS_GATEWAY Service in the OAP. Meanwhile, the routes would be recognized as LAYER: AWS_GATEWAY endpoints\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     Request Count count aws_gateway_service_count Service The total number API requests in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   4xx Count count aws_gateway_service_4xx Service The number of client-side errors captured in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   5xx Count count aws_gateway_service_5xx Service The number of server-side errors captured in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Request Average Latency ms aws_gateway_service_latency Service The time between when API Gateway receives a request from a client and when it returns a response to the client. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Request Average Integration Latency ms aws_gateway_service_integration_latency Service The time between when API Gateway relays a request to the backend and when it receives a response from the backend. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Data Processed KB aws_gateway_service_data_processed Service The amount of data processed API Gateway HTTP APIs monitoring with CloudWatch   Cache Hit Count Rate % aws_gateway_service_cache_hit_rate Service The number of requests served from the API cache API Gateway REST APIs monitoring with CloudWatch   Cache Miss Count Rate % aws_gateway_service_cache_miss_rate Service The number of requests served from the backend API Gateway REST APIs monitoring with CloudWatch   Request Count count aws_gateway_endpoint_count Endpoint The total number API requests in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   4xx Count count aws_gateway_endpoint_4xx Endpoint The number of client-side errors captured in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   5xx Count count aws_gateway_endpoint_5xx Endpoint The number of server-side errors captured in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Request Average Latency ms aws_gateway_endpoint_latency Endpoint The time between when API Gateway receives a request from a client and when it returns a response to the client. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Request Average Integration Latency ms aws_gateway_endpoint_integration_latency Endpoint The time between when API Gateway relays a request to the backend and when it receives a response from the backend. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Data Processed KB aws_gateway_endpoint_data_processed Endpoint The amount of data processed API Gateway HTTP APIs monitoring with CloudWatch   Cache Hit Count Rate % aws_gateway_endpoint_cache_hit_rate Endpoint The number of requests served from the API cache API Gateway REST APIs monitoring with CloudWatch   Cache Miss Count Rate % aws_gateway_endpoint_cache_miss_rate Endpoint The number of requests served from the backend API Gateway REST APIs monitoring with CloudWatch    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-gateway/. The AWS Cloud EKS dashboard panel configurations are found in /config/ui-initialized-templates/aws_gateway.\n","excerpt":"AWS API Gateway monitoring Amazon API Gateway is an AWS service for creating, publishing, …","ref":"/docs/main/latest/en/setup/backend/backend-aws-api-gateway-monitoring/","title":"AWS API Gateway monitoring"},{"body":"AWS API Gateway monitoring Amazon API Gateway is an AWS service for creating, publishing, maintaining, monitoring, and securing REST, HTTP, and WebSocket APIs. SkyWalking leverages AWS Kinesis Data Firehose receiver to transfer the CloudWatch metrics of API Gateway(HTTP and REST APIs) to OpenTelemetry receiver and into the Meter System.\nData flow  AWS CloudWatch collect metrics for API Gateway(REST and HTTP APIs), refer to API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch CloudWatch metric streams stream CloudWatch metrics of API Gateway to AWS Kinesis Data Firehose AWS Kinesis Data Firehose delivery metrics to AWS Kinesis Data Firehose receiver through the HTTP endpoint  Set up  Enable CloudWatch metrics for API Gateway Create an Amazon Kinesis Data Firehose Delivery Stream, and set AWS Kinesis Data Firehose receiver\u0026rsquo;s address as HTTP(s) Destination, refer to Create Delivery Stream Create CloudWatch metric stream, and select the Firehose Delivery Stream which has been created above, set Select namespaces to AWS/ApiGateway, Select output format to OpenTelemetry 0.7. refer to CloudWatch Metric Streams  Gateway Monitoring SkyWalking observes CloudWatch metrics of the AWS API Gateway, which is cataloged as a LAYER: AWS_GATEWAY Service in the OAP. Meanwhile, the routes would be recognized as LAYER: AWS_GATEWAY endpoints\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     Request Count count aws_gateway_service_count Service The total number API requests in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   4xx Count count aws_gateway_service_4xx Service The number of client-side errors captured in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   5xx Count count aws_gateway_service_5xx Service The number of server-side errors captured in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Request Average Latency ms aws_gateway_service_latency Service The time between when API Gateway receives a request from a client and when it returns a response to the client. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Request Average Integration Latency ms aws_gateway_service_integration_latency Service The time between when API Gateway relays a request to the backend and when it receives a response from the backend. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Data Processed KB aws_gateway_service_data_processed Service The amount of data processed API Gateway HTTP APIs monitoring with CloudWatch   Cache Hit Count Rate % aws_gateway_service_cache_hit_rate Service The number of requests served from the API cache API Gateway REST APIs monitoring with CloudWatch   Cache Miss Count Rate % aws_gateway_service_cache_miss_rate Service The number of requests served from the backend API Gateway REST APIs monitoring with CloudWatch   Request Count count aws_gateway_endpoint_count Endpoint The total number API requests in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   4xx Count count aws_gateway_endpoint_4xx Endpoint The number of client-side errors captured in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   5xx Count count aws_gateway_endpoint_5xx Endpoint The number of server-side errors captured in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Request Average Latency ms aws_gateway_endpoint_latency Endpoint The time between when API Gateway receives a request from a client and when it returns a response to the client. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Request Average Integration Latency ms aws_gateway_endpoint_integration_latency Endpoint The time between when API Gateway relays a request to the backend and when it receives a response from the backend. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Data Processed KB aws_gateway_endpoint_data_processed Endpoint The amount of data processed API Gateway HTTP APIs monitoring with CloudWatch   Cache Hit Count Rate % aws_gateway_endpoint_cache_hit_rate Endpoint The number of requests served from the API cache API Gateway REST APIs monitoring with CloudWatch   Cache Miss Count Rate % aws_gateway_endpoint_cache_miss_rate Endpoint The number of requests served from the backend API Gateway REST APIs monitoring with CloudWatch    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-gateway/. The AWS Cloud EKS dashboard panel configurations are found in /config/ui-initialized-templates/aws_gateway.\n","excerpt":"AWS API Gateway monitoring Amazon API Gateway is an AWS service for creating, publishing, …","ref":"/docs/main/next/en/setup/backend/backend-aws-api-gateway-monitoring/","title":"AWS API Gateway monitoring"},{"body":"AWS API Gateway monitoring Amazon API Gateway is an AWS service for creating, publishing, maintaining, monitoring, and securing REST, HTTP, and WebSocket APIs. SkyWalking leverages AWS Kinesis Data Firehose receiver to transfer the CloudWatch metrics of API Gateway(HTTP and REST APIs) to OpenTelemetry receiver and into the Meter System.\nData flow  AWS CloudWatch collect metrics for API Gateway(REST and HTTP APIs), refer to API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch CloudWatch metric streams stream CloudWatch metrics of API Gateway to AWS Kinesis Data Firehose AWS Kinesis Data Firehose delivery metrics to AWS Kinesis Data Firehose receiver through the HTTP endpoint  Set up  Enable CloudWatch metrics for API Gateway Create an Amazon Kinesis Data Firehose Delivery Stream, and set AWS Kinesis Data Firehose receiver\u0026rsquo;s address as HTTP(s) Destination, refer to Create Delivery Stream Create CloudWatch metric stream, and select the Firehose Delivery Stream which has been created above, set Select namespaces to AWS/ApiGateway, Select output format to OpenTelemetry 0.7. refer to CloudWatch Metric Streams  Gateway Monitoring SkyWalking observes CloudWatch metrics of the AWS API Gateway, which is cataloged as a LAYER: AWS_GATEWAY Service in the OAP. Meanwhile, the routes would be recognized as LAYER: AWS_GATEWAY endpoints\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     Request Count count aws_gateway_service_count Service The total number API requests in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   4xx Count count aws_gateway_service_4xx Service The number of client-side errors captured in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   5xx Count count aws_gateway_service_5xx Service The number of server-side errors captured in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Request Average Latency ms aws_gateway_service_latency Service The time between when API Gateway receives a request from a client and when it returns a response to the client. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Request Average Integration Latency ms aws_gateway_service_integration_latency Service The time between when API Gateway relays a request to the backend and when it receives a response from the backend. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Data Processed KB aws_gateway_service_data_processed Service The amount of data processed API Gateway HTTP APIs monitoring with CloudWatch   Cache Hit Count Rate % aws_gateway_service_cache_hit_rate Service The number of requests served from the API cache API Gateway REST APIs monitoring with CloudWatch   Cache Miss Count Rate % aws_gateway_service_cache_miss_rate Service The number of requests served from the backend API Gateway REST APIs monitoring with CloudWatch   Request Count count aws_gateway_endpoint_count Endpoint The total number API requests in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   4xx Count count aws_gateway_endpoint_4xx Endpoint The number of client-side errors captured in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   5xx Count count aws_gateway_endpoint_5xx Endpoint The number of server-side errors captured in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Request Average Latency ms aws_gateway_endpoint_latency Endpoint The time between when API Gateway receives a request from a client and when it returns a response to the client. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Request Average Integration Latency ms aws_gateway_endpoint_integration_latency Endpoint The time between when API Gateway relays a request to the backend and when it receives a response from the backend. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Data Processed KB aws_gateway_endpoint_data_processed Endpoint The amount of data processed API Gateway HTTP APIs monitoring with CloudWatch   Cache Hit Count Rate % aws_gateway_endpoint_cache_hit_rate Endpoint The number of requests served from the API cache API Gateway REST APIs monitoring with CloudWatch   Cache Miss Count Rate % aws_gateway_endpoint_cache_miss_rate Endpoint The number of requests served from the backend API Gateway REST APIs monitoring with CloudWatch    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-gateway/. The AWS Cloud EKS dashboard panel configurations are found in /config/ui-initialized-templates/aws_gateway.\n","excerpt":"AWS API Gateway monitoring Amazon API Gateway is an AWS service for creating, publishing, …","ref":"/docs/main/v9.5.0/en/setup/backend/backend-aws-api-gateway-monitoring/","title":"AWS API Gateway monitoring"},{"body":"AWS API Gateway monitoring Amazon API Gateway is an AWS service for creating, publishing, maintaining, monitoring, and securing REST, HTTP, and WebSocket APIs. SkyWalking leverages AWS Kinesis Data Firehose receiver to transfer the CloudWatch metrics of API Gateway(HTTP and REST APIs) to OpenTelemetry receiver and into the Meter System.\nData flow  AWS CloudWatch collect metrics for API Gateway(REST and HTTP APIs), refer to API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch CloudWatch metric streams stream CloudWatch metrics of API Gateway to AWS Kinesis Data Firehose AWS Kinesis Data Firehose delivery metrics to AWS Kinesis Data Firehose receiver through the HTTP endpoint  Set up  Enable CloudWatch metrics for API Gateway Create an Amazon Kinesis Data Firehose Delivery Stream, and set AWS Kinesis Data Firehose receiver\u0026rsquo;s address as HTTP(s) Destination, refer to Create Delivery Stream Create CloudWatch metric stream, and select the Firehose Delivery Stream which has been created above, set Select namespaces to AWS/ApiGateway, Select output format to OpenTelemetry 0.7. refer to CloudWatch Metric Streams  Gateway Monitoring SkyWalking observes CloudWatch metrics of the AWS API Gateway, which is cataloged as a LAYER: AWS_GATEWAY Service in the OAP. Meanwhile, the routes would be recognized as LAYER: AWS_GATEWAY endpoints\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     Request Count count aws_gateway_service_count Service The total number API requests in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   4xx Count count aws_gateway_service_4xx Service The number of client-side errors captured in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   5xx Count count aws_gateway_service_5xx Service The number of server-side errors captured in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Request Average Latency ms aws_gateway_service_latency Service The time between when API Gateway receives a request from a client and when it returns a response to the client. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Request Average Integration Latency ms aws_gateway_service_integration_latency Service The time between when API Gateway relays a request to the backend and when it receives a response from the backend. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Data Processed KB aws_gateway_service_data_processed Service The amount of data processed API Gateway HTTP APIs monitoring with CloudWatch   Cache Hit Count Rate % aws_gateway_service_cache_hit_rate Service The number of requests served from the API cache API Gateway REST APIs monitoring with CloudWatch   Cache Miss Count Rate % aws_gateway_service_cache_miss_rate Service The number of requests served from the backend API Gateway REST APIs monitoring with CloudWatch   Request Count count aws_gateway_endpoint_count Endpoint The total number API requests in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   4xx Count count aws_gateway_endpoint_4xx Endpoint The number of client-side errors captured in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   5xx Count count aws_gateway_endpoint_5xx Endpoint The number of server-side errors captured in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Request Average Latency ms aws_gateway_endpoint_latency Endpoint The time between when API Gateway receives a request from a client and when it returns a response to the client. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Request Average Integration Latency ms aws_gateway_endpoint_integration_latency Endpoint The time between when API Gateway relays a request to the backend and when it receives a response from the backend. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Data Processed KB aws_gateway_endpoint_data_processed Endpoint The amount of data processed API Gateway HTTP APIs monitoring with CloudWatch   Cache Hit Count Rate % aws_gateway_endpoint_cache_hit_rate Endpoint The number of requests served from the API cache API Gateway REST APIs monitoring with CloudWatch   Cache Miss Count Rate % aws_gateway_endpoint_cache_miss_rate Endpoint The number of requests served from the backend API Gateway REST APIs monitoring with CloudWatch    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-gateway/. The AWS Cloud EKS dashboard panel configurations are found in /config/ui-initialized-templates/aws_gateway.\n","excerpt":"AWS API Gateway monitoring Amazon API Gateway is an AWS service for creating, publishing, …","ref":"/docs/main/v9.6.0/en/setup/backend/backend-aws-api-gateway-monitoring/","title":"AWS API Gateway monitoring"},{"body":"AWS API Gateway monitoring Amazon API Gateway is an AWS service for creating, publishing, maintaining, monitoring, and securing REST, HTTP, and WebSocket APIs. SkyWalking leverages AWS Kinesis Data Firehose receiver to transfer the CloudWatch metrics of API Gateway(HTTP and REST APIs) to OpenTelemetry receiver and into the Meter System.\nData flow  AWS CloudWatch collect metrics for API Gateway(REST and HTTP APIs), refer to API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch CloudWatch metric streams stream CloudWatch metrics of API Gateway to AWS Kinesis Data Firehose AWS Kinesis Data Firehose delivery metrics to AWS Kinesis Data Firehose receiver through the HTTP endpoint  Set up  Enable CloudWatch metrics for API Gateway Create an Amazon Kinesis Data Firehose Delivery Stream, and set AWS Kinesis Data Firehose receiver\u0026rsquo;s address as HTTP(s) Destination, refer to Create Delivery Stream Create CloudWatch metric stream, and select the Firehose Delivery Stream which has been created above, set Select namespaces to AWS/ApiGateway, Select output format to OpenTelemetry 0.7. refer to CloudWatch Metric Streams  Gateway Monitoring SkyWalking observes CloudWatch metrics of the AWS API Gateway, which is cataloged as a LAYER: AWS_GATEWAY Service in the OAP. Meanwhile, the routes would be recognized as LAYER: AWS_GATEWAY endpoints\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     Request Count count aws_gateway_service_count Service The total number API requests in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   4xx Count count aws_gateway_service_4xx Service The number of client-side errors captured in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   5xx Count count aws_gateway_service_5xx Service The number of server-side errors captured in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Request Average Latency ms aws_gateway_service_latency Service The time between when API Gateway receives a request from a client and when it returns a response to the client. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Request Average Integration Latency ms aws_gateway_service_integration_latency Service The time between when API Gateway relays a request to the backend and when it receives a response from the backend. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Data Processed KB aws_gateway_service_data_processed Service The amount of data processed API Gateway HTTP APIs monitoring with CloudWatch   Cache Hit Count Rate % aws_gateway_service_cache_hit_rate Service The number of requests served from the API cache API Gateway REST APIs monitoring with CloudWatch   Cache Miss Count Rate % aws_gateway_service_cache_miss_rate Service The number of requests served from the backend API Gateway REST APIs monitoring with CloudWatch   Request Count count aws_gateway_endpoint_count Endpoint The total number API requests in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   4xx Count count aws_gateway_endpoint_4xx Endpoint The number of client-side errors captured in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   5xx Count count aws_gateway_endpoint_5xx Endpoint The number of server-side errors captured in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Request Average Latency ms aws_gateway_endpoint_latency Endpoint The time between when API Gateway receives a request from a client and when it returns a response to the client. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Request Average Integration Latency ms aws_gateway_endpoint_integration_latency Endpoint The time between when API Gateway relays a request to the backend and when it receives a response from the backend. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Data Processed KB aws_gateway_endpoint_data_processed Endpoint The amount of data processed API Gateway HTTP APIs monitoring with CloudWatch   Cache Hit Count Rate % aws_gateway_endpoint_cache_hit_rate Endpoint The number of requests served from the API cache API Gateway REST APIs monitoring with CloudWatch   Cache Miss Count Rate % aws_gateway_endpoint_cache_miss_rate Endpoint The number of requests served from the backend API Gateway REST APIs monitoring with CloudWatch    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-gateway/. The AWS Cloud EKS dashboard panel configurations are found in /config/ui-initialized-templates/aws_gateway.\n","excerpt":"AWS API Gateway monitoring Amazon API Gateway is an AWS service for creating, publishing, …","ref":"/docs/main/v9.7.0/en/setup/backend/backend-aws-api-gateway-monitoring/","title":"AWS API Gateway monitoring"},{"body":"AWS Cloud EKS monitoring SkyWalking leverages OpenTelemetry Collector with AWS Container Insights Receiver to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  OpenTelemetry Collector fetches metrics from EKS via AWS Container Insights Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Deploy amazon/aws-otel-collector with AWS Container Insights Receiver to EKS Config SkyWalking OpenTelemetry receiver.  Read Monitoring AWS EKS and S3 with SkyWalking for more details\nEKS Monitoring AWS Container Insights Receiver provides multiple dimensions metrics for EKS cluster, node, service, etc. Accordingly, SkyWalking observes the status, and payload of the EKS cluster, which is cataloged as a LAYER: AWS_EKS Service in the OAP. Meanwhile, the k8s nodes would be recognized as LAYER: AWS_EKS instances. The k8s service would be recognized as endpoints.\nSpecify Job Name SkyWalking distinguishes AWS Cloud EKS metrics by attributes job_name, which value is aws-cloud-eks-monitoring. You could leverage OTEL Collector processor to add the attribute as follows:\nprocessors:resource/job-name:attributes:- key:job_namevalue:aws-cloud-eks-monitoringaction:insert Notice, if you don\u0026rsquo;t specify job_name attribute, SkyWalking OAP will ignore the metrics\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     Node Count  eks_cluster_node_count Service The node count of the EKS cluster AWS Container Insights Receiver   Failed Node Count  eks_cluster_failed_node_count Service The failed node count of the EKS cluster AWS Container Insights Receiver   Pod Count (namespace dimension)  eks_cluster_namespace_count Service The count of pod in the EKS cluster(namespace dimension) AWS Container Insights Receiver   Pod Count (service dimension)  eks_cluster_service_count Service The count of pod in the EKS cluster(service dimension) AWS Container Insights Receiver   Network RX Dropped Count (per second) count/s eks_cluster_net_rx_dropped Service Network RX dropped count AWS Container Insights Receiver   Network RX Error Count (per second) count/s eks_cluster_net_rx_error Service Network RX error count AWS Container Insights Receiver   Network TX Dropped Count (per second) count/s eks_cluster_net_rx_dropped Service Network TX dropped count AWS Container Insights Receiver   Network TX Error Count (per second) count/s eks_cluster_net_rx_error Service Network TX error count AWS Container Insights Receiver   Pod Count  eks_cluster_node_pod_number Instance The count of pod running on the node AWS Container Insights Receiver   CPU Utilization percent eks_cluster_node_cpu_utilization Instance The CPU Utilization of the node AWS Container Insights Receiver   Memory Utilization percent eks_cluster_node_memory_utilization Instance The Memory Utilization of the node AWS Container Insights Receiver   Network RX bytes/s eks_cluster_node_net_rx_bytes Instance Network RX bytes of the node AWS Container Insights Receiver   Network RX Error Count count/s eks_cluster_node_net_rx_bytes Instance Network RX error count of the node AWS Container Insights Receiver   Network TX bytes/s eks_cluster_node_net_rx_bytes Instance Network TX bytes of the node AWS Container Insights Receiver   Network TX Error Count count/s eks_cluster_node_net_rx_bytes Instance Network TX error count of the node AWS Container Insights Receiver   Disk IO Write bytes/s eks_cluster_node_net_rx_bytes Instance The IO write bytes of the node AWS Container Insights Receiver   Disk IO Read bytes/s eks_cluster_node_net_rx_bytes Instance The IO read bytes of the node AWS Container Insights Receiver   FS Utilization percent eks_cluster_node_net_rx_bytes Instance The filesystem utilization of the node AWS Container Insights Receiver   CPU Utilization percent eks_cluster_node_pod_cpu_utilization Instance The CPU Utilization of the pod running on the node AWS Container Insights Receiver   Memory Utilization percent eks_cluster_node_pod_memory_utilization Instance The Memory Utilization of the pod running on the node AWS Container Insights Receiver   Network RX bytes/s eks_cluster_node_pod_net_rx_bytes Instance Network RX bytes of the pod running on the node AWS Container Insights Receiver   Network RX Error Count count/s eks_cluster_node_pod_net_rx_error Instance Network RX error count of the pod running on the node AWS Container Insights Receiver   Network TX bytes/s eks_cluster_node_pod_net_tx_bytes Instance Network RX bytes of the pod running on the node AWS Container Insights Receiver   Network TX Error Count count/s eks_cluster_node_pod_net_tx_error Instance Network RX error count of the pod running on the node AWS Container Insights Receiver   CPU Utilization percent eks_cluster_service_pod_cpu_utilization Endpoint The CPU Utilization of pod that belong to the service AWS Container Insights Receiver   Memory Utilization percent eks_cluster_service_pod_memory_utilization Endpoint The Memory Utilization of pod that belong to the service AWS Container Insights Receiver   Network RX bytes/s eks_cluster_service_pod_net_rx_bytes Endpoint Network RX bytes of the pod that belong to the service AWS Container Insights Receiver   Network RX Error Count count/s eks_cluster_service_pod_net_rx_error Endpoint Network TX error count of the pod that belongs to the service AWS Container Insights Receiver   Network TX bytes/s eks_cluster_service_pod_net_tx_bytes Endpoint Network TX bytes of the pod that belong to the service AWS Container Insights Receiver   Network TX Error Count count/s eks_cluster_node_pod_net_tx_error Endpoint Network TX error count of the pod that belongs to the service AWS Container Insights Receiver    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-eks/. The AWS Cloud EKS dashboard panel configurations are found in /config/ui-initialized-templates/aws_eks.\nOTEL Configuration Sample With AWS Container Insights Receiver extensions:health_check:receivers:awscontainerinsightreceiver:processors:resource/job-name:attributes:- key:job_namevalue:aws-cloud-eks-monitoringaction:insertexporters:otlp:endpoint:oap-service:11800tls:insecure:truelogging:loglevel:debugservice:pipelines:metrics:receivers:[awscontainerinsightreceiver]processors:[resource/job-name]exporters:[otlp,logging]extensions:[health_check]Refer to AWS Container Insights Receiver for more information\n","excerpt":"AWS Cloud EKS monitoring SkyWalking leverages OpenTelemetry Collector with AWS Container Insights …","ref":"/docs/main/latest/en/setup/backend/backend-aws-eks-monitoring/","title":"AWS Cloud EKS monitoring"},{"body":"AWS Cloud EKS monitoring SkyWalking leverages OpenTelemetry Collector with AWS Container Insights Receiver to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  OpenTelemetry Collector fetches metrics from EKS via AWS Container Insights Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Deploy amazon/aws-otel-collector with AWS Container Insights Receiver to EKS Config SkyWalking OpenTelemetry receiver.  Read Monitoring AWS EKS and S3 with SkyWalking for more details\nEKS Monitoring AWS Container Insights Receiver provides multiple dimensions metrics for EKS cluster, node, service, etc. Accordingly, SkyWalking observes the status, and payload of the EKS cluster, which is cataloged as a LAYER: AWS_EKS Service in the OAP. Meanwhile, the k8s nodes would be recognized as LAYER: AWS_EKS instances. The k8s service would be recognized as endpoints.\nSpecify Job Name SkyWalking distinguishes AWS Cloud EKS metrics by attributes job_name, which value is aws-cloud-eks-monitoring. You could leverage OTEL Collector processor to add the attribute as follows:\nprocessors:resource/job-name:attributes:- key:job_namevalue:aws-cloud-eks-monitoringaction:insert Notice, if you don\u0026rsquo;t specify job_name attribute, SkyWalking OAP will ignore the metrics\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     Node Count  eks_cluster_node_count Service The node count of the EKS cluster AWS Container Insights Receiver   Failed Node Count  eks_cluster_failed_node_count Service The failed node count of the EKS cluster AWS Container Insights Receiver   Pod Count (namespace dimension)  eks_cluster_namespace_count Service The count of pod in the EKS cluster(namespace dimension) AWS Container Insights Receiver   Pod Count (service dimension)  eks_cluster_service_count Service The count of pod in the EKS cluster(service dimension) AWS Container Insights Receiver   Network RX Dropped Count (per second) count/s eks_cluster_net_rx_dropped Service Network RX dropped count AWS Container Insights Receiver   Network RX Error Count (per second) count/s eks_cluster_net_rx_error Service Network RX error count AWS Container Insights Receiver   Network TX Dropped Count (per second) count/s eks_cluster_net_rx_dropped Service Network TX dropped count AWS Container Insights Receiver   Network TX Error Count (per second) count/s eks_cluster_net_rx_error Service Network TX error count AWS Container Insights Receiver   Pod Count  eks_cluster_node_pod_number Instance The count of pod running on the node AWS Container Insights Receiver   CPU Utilization percent eks_cluster_node_cpu_utilization Instance The CPU Utilization of the node AWS Container Insights Receiver   Memory Utilization percent eks_cluster_node_memory_utilization Instance The Memory Utilization of the node AWS Container Insights Receiver   Network RX bytes/s eks_cluster_node_net_rx_bytes Instance Network RX bytes of the node AWS Container Insights Receiver   Network RX Error Count count/s eks_cluster_node_net_rx_bytes Instance Network RX error count of the node AWS Container Insights Receiver   Network TX bytes/s eks_cluster_node_net_rx_bytes Instance Network TX bytes of the node AWS Container Insights Receiver   Network TX Error Count count/s eks_cluster_node_net_rx_bytes Instance Network TX error count of the node AWS Container Insights Receiver   Disk IO Write bytes/s eks_cluster_node_net_rx_bytes Instance The IO write bytes of the node AWS Container Insights Receiver   Disk IO Read bytes/s eks_cluster_node_net_rx_bytes Instance The IO read bytes of the node AWS Container Insights Receiver   FS Utilization percent eks_cluster_node_net_rx_bytes Instance The filesystem utilization of the node AWS Container Insights Receiver   CPU Utilization percent eks_cluster_node_pod_cpu_utilization Instance The CPU Utilization of the pod running on the node AWS Container Insights Receiver   Memory Utilization percent eks_cluster_node_pod_memory_utilization Instance The Memory Utilization of the pod running on the node AWS Container Insights Receiver   Network RX bytes/s eks_cluster_node_pod_net_rx_bytes Instance Network RX bytes of the pod running on the node AWS Container Insights Receiver   Network RX Error Count count/s eks_cluster_node_pod_net_rx_error Instance Network RX error count of the pod running on the node AWS Container Insights Receiver   Network TX bytes/s eks_cluster_node_pod_net_tx_bytes Instance Network RX bytes of the pod running on the node AWS Container Insights Receiver   Network TX Error Count count/s eks_cluster_node_pod_net_tx_error Instance Network RX error count of the pod running on the node AWS Container Insights Receiver   CPU Utilization percent eks_cluster_service_pod_cpu_utilization Endpoint The CPU Utilization of pod that belong to the service AWS Container Insights Receiver   Memory Utilization percent eks_cluster_service_pod_memory_utilization Endpoint The Memory Utilization of pod that belong to the service AWS Container Insights Receiver   Network RX bytes/s eks_cluster_service_pod_net_rx_bytes Endpoint Network RX bytes of the pod that belong to the service AWS Container Insights Receiver   Network RX Error Count count/s eks_cluster_service_pod_net_rx_error Endpoint Network TX error count of the pod that belongs to the service AWS Container Insights Receiver   Network TX bytes/s eks_cluster_service_pod_net_tx_bytes Endpoint Network TX bytes of the pod that belong to the service AWS Container Insights Receiver   Network TX Error Count count/s eks_cluster_node_pod_net_tx_error Endpoint Network TX error count of the pod that belongs to the service AWS Container Insights Receiver    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-eks/. The AWS Cloud EKS dashboard panel configurations are found in /config/ui-initialized-templates/aws_eks.\nOTEL Configuration Sample With AWS Container Insights Receiver extensions:health_check:receivers:awscontainerinsightreceiver:processors:resource/job-name:attributes:- key:job_namevalue:aws-cloud-eks-monitoringaction:insertexporters:otlp:endpoint:oap-service:11800tls:insecure:truelogging:loglevel:debugservice:pipelines:metrics:receivers:[awscontainerinsightreceiver]processors:[resource/job-name]exporters:[otlp,logging]extensions:[health_check]Refer to AWS Container Insights Receiver for more information\n","excerpt":"AWS Cloud EKS monitoring SkyWalking leverages OpenTelemetry Collector with AWS Container Insights …","ref":"/docs/main/next/en/setup/backend/backend-aws-eks-monitoring/","title":"AWS Cloud EKS monitoring"},{"body":"AWS Cloud EKS monitoring SkyWalking leverages OpenTelemetry Collector with AWS Container Insights Receiver to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  OpenTelemetry Collector fetches metrics from EKS via AWS Container Insights Receiver and pushes metrics to SkyWalking OAP Server via the OpenCensus gRPC Exporter or OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Deploy amazon/aws-otel-collector with AWS Container Insights Receiver to EKS Config SkyWalking OpenTelemetry receiver.  EKS Monitoring AWS Container Insights Receiver provides multiple dimensions metrics for EKS cluster, node, service, etc. Accordingly, SkyWalking observes the status, and payload of the EKS cluster, which is cataloged as a LAYER: AWS_EKS Service in the OAP. Meanwhile, the k8s nodes would be recognized as LAYER: AWS_EKS instances. The k8s service would be recognized as endpoints.\nSpecify Job Name SkyWalking distinguishes AWS Cloud EKS metrics by attributes job_name, which value is aws-cloud-eks-monitoring. You could leverage OTEL Collector processor to add the attribute as follows:\nprocessors:resource/job-name:attributes:- key:job_namevalue:aws-cloud-eks-monitoringaction:insert Notice, if you don\u0026rsquo;t specify job_name attribute, SkyWalking OAP will ignore the metrics\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     Node Count  eks_cluster_node_count Service The node count of the EKS cluster AWS Container Insights Receiver   Failed Node Count  eks_cluster_failed_node_count Service The failed node count of the EKS cluster AWS Container Insights Receiver   Pod Count (namespace dimension)  eks_cluster_namespace_count Service The count of pod in the EKS cluster(namespace dimension) AWS Container Insights Receiver   Pod Count (service dimension)  eks_cluster_service_count Service The count of pod in the EKS cluster(service dimension) AWS Container Insights Receiver   Network RX Dropped Count (per second) count/s eks_cluster_net_rx_dropped Service Network RX dropped count AWS Container Insights Receiver   Network RX Error Count (per second) count/s eks_cluster_net_rx_error Service Network RX error count AWS Container Insights Receiver   Network TX Dropped Count (per second) count/s eks_cluster_net_rx_dropped Service Network TX dropped count AWS Container Insights Receiver   Network TX Error Count (per second) count/s eks_cluster_net_rx_error Service Network TX error count AWS Container Insights Receiver   Pod Count  eks_cluster_node_pod_number Instance The count of pod running on the node AWS Container Insights Receiver   CPU Utilization percent eks_cluster_node_cpu_utilization Instance The CPU Utilization of the node AWS Container Insights Receiver   Memory Utilization percent eks_cluster_node_memory_utilization Instance The Memory Utilization of the node AWS Container Insights Receiver   Network RX bytes/s eks_cluster_node_net_rx_bytes Instance Network RX bytes of the node AWS Container Insights Receiver   Network RX Error Count count/s eks_cluster_node_net_rx_bytes Instance Network RX error count of the node AWS Container Insights Receiver   Network TX bytes/s eks_cluster_node_net_rx_bytes Instance Network TX bytes of the node AWS Container Insights Receiver   Network TX Error Count count/s eks_cluster_node_net_rx_bytes Instance Network TX error count of the node AWS Container Insights Receiver   Disk IO Write bytes/s eks_cluster_node_net_rx_bytes Instance The IO write bytes of the node AWS Container Insights Receiver   Disk IO Read bytes/s eks_cluster_node_net_rx_bytes Instance The IO read bytes of the node AWS Container Insights Receiver   FS Utilization percent eks_cluster_node_net_rx_bytes Instance The filesystem utilization of the node AWS Container Insights Receiver   CPU Utilization percent eks_cluster_node_pod_cpu_utilization Instance The CPU Utilization of the pod running on the node AWS Container Insights Receiver   Memory Utilization percent eks_cluster_node_pod_memory_utilization Instance The Memory Utilization of the pod running on the node AWS Container Insights Receiver   Network RX bytes/s eks_cluster_node_pod_net_rx_bytes Instance Network RX bytes of the pod running on the node AWS Container Insights Receiver   Network RX Error Count count/s eks_cluster_node_pod_net_rx_error Instance Network RX error count of the pod running on the node AWS Container Insights Receiver   Network TX bytes/s eks_cluster_node_pod_net_tx_bytes Instance Network RX bytes of the pod running on the node AWS Container Insights Receiver   Network TX Error Count count/s eks_cluster_node_pod_net_tx_error Instance Network RX error count of the pod running on the node AWS Container Insights Receiver   CPU Utilization percent eks_cluster_service_pod_cpu_utilization Endpoint The CPU Utilization of pod that belong to the service AWS Container Insights Receiver   Memory Utilization percent eks_cluster_service_pod_memory_utilization Endpoint The Memory Utilization of pod that belong to the service AWS Container Insights Receiver   Network RX bytes/s eks_cluster_service_pod_net_rx_bytes Endpoint Network RX bytes of the pod that belong to the service AWS Container Insights Receiver   Network RX Error Count count/s eks_cluster_service_pod_net_rx_error Endpoint Network TX error count of the pod that belongs to the service AWS Container Insights Receiver   Network TX bytes/s eks_cluster_service_pod_net_tx_bytes Endpoint Network TX bytes of the pod that belong to the service AWS Container Insights Receiver   Network TX Error Count count/s eks_cluster_node_pod_net_tx_error Endpoint Network TX error count of the pod that belongs to the service AWS Container Insights Receiver    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-eks/. The AWS Cloud EKS dashboard panel configurations are found in /config/ui-initialized-templates/aws_eks.\nOTEL Configuration Sample With AWS Container Insights Receiver extensions:health_check:receivers:awscontainerinsightreceiver:processors:resource/job-name:attributes:- key:job_namevalue:aws-cloud-eks-monitoringaction:insertexporters:otlp:endpoint:oap-service:11800tls:insecure:truelogging:loglevel:debugservice:pipelines:metrics:receivers:[awscontainerinsightreceiver]processors:[resource/job-name]exporters:[otlp,logging]extensions:[health_check]Refer to AWS Container Insights Receiver for more information\n","excerpt":"AWS Cloud EKS monitoring SkyWalking leverages OpenTelemetry Collector with AWS Container Insights …","ref":"/docs/main/v9.4.0/en/setup/backend/backend-aws-eks-monitoring/","title":"AWS Cloud EKS monitoring"},{"body":"AWS Cloud EKS monitoring SkyWalking leverages OpenTelemetry Collector with AWS Container Insights Receiver to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  OpenTelemetry Collector fetches metrics from EKS via AWS Container Insights Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Deploy amazon/aws-otel-collector with AWS Container Insights Receiver to EKS Config SkyWalking OpenTelemetry receiver.  Read Monitoring AWS EKS and S3 with SkyWalking for more details\nEKS Monitoring AWS Container Insights Receiver provides multiple dimensions metrics for EKS cluster, node, service, etc. Accordingly, SkyWalking observes the status, and payload of the EKS cluster, which is cataloged as a LAYER: AWS_EKS Service in the OAP. Meanwhile, the k8s nodes would be recognized as LAYER: AWS_EKS instances. The k8s service would be recognized as endpoints.\nSpecify Job Name SkyWalking distinguishes AWS Cloud EKS metrics by attributes job_name, which value is aws-cloud-eks-monitoring. You could leverage OTEL Collector processor to add the attribute as follows:\nprocessors:resource/job-name:attributes:- key:job_namevalue:aws-cloud-eks-monitoringaction:insert Notice, if you don\u0026rsquo;t specify job_name attribute, SkyWalking OAP will ignore the metrics\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     Node Count  eks_cluster_node_count Service The node count of the EKS cluster AWS Container Insights Receiver   Failed Node Count  eks_cluster_failed_node_count Service The failed node count of the EKS cluster AWS Container Insights Receiver   Pod Count (namespace dimension)  eks_cluster_namespace_count Service The count of pod in the EKS cluster(namespace dimension) AWS Container Insights Receiver   Pod Count (service dimension)  eks_cluster_service_count Service The count of pod in the EKS cluster(service dimension) AWS Container Insights Receiver   Network RX Dropped Count (per second) count/s eks_cluster_net_rx_dropped Service Network RX dropped count AWS Container Insights Receiver   Network RX Error Count (per second) count/s eks_cluster_net_rx_error Service Network RX error count AWS Container Insights Receiver   Network TX Dropped Count (per second) count/s eks_cluster_net_rx_dropped Service Network TX dropped count AWS Container Insights Receiver   Network TX Error Count (per second) count/s eks_cluster_net_rx_error Service Network TX error count AWS Container Insights Receiver   Pod Count  eks_cluster_node_pod_number Instance The count of pod running on the node AWS Container Insights Receiver   CPU Utilization percent eks_cluster_node_cpu_utilization Instance The CPU Utilization of the node AWS Container Insights Receiver   Memory Utilization percent eks_cluster_node_memory_utilization Instance The Memory Utilization of the node AWS Container Insights Receiver   Network RX bytes/s eks_cluster_node_net_rx_bytes Instance Network RX bytes of the node AWS Container Insights Receiver   Network RX Error Count count/s eks_cluster_node_net_rx_bytes Instance Network RX error count of the node AWS Container Insights Receiver   Network TX bytes/s eks_cluster_node_net_rx_bytes Instance Network TX bytes of the node AWS Container Insights Receiver   Network TX Error Count count/s eks_cluster_node_net_rx_bytes Instance Network TX error count of the node AWS Container Insights Receiver   Disk IO Write bytes/s eks_cluster_node_net_rx_bytes Instance The IO write bytes of the node AWS Container Insights Receiver   Disk IO Read bytes/s eks_cluster_node_net_rx_bytes Instance The IO read bytes of the node AWS Container Insights Receiver   FS Utilization percent eks_cluster_node_net_rx_bytes Instance The filesystem utilization of the node AWS Container Insights Receiver   CPU Utilization percent eks_cluster_node_pod_cpu_utilization Instance The CPU Utilization of the pod running on the node AWS Container Insights Receiver   Memory Utilization percent eks_cluster_node_pod_memory_utilization Instance The Memory Utilization of the pod running on the node AWS Container Insights Receiver   Network RX bytes/s eks_cluster_node_pod_net_rx_bytes Instance Network RX bytes of the pod running on the node AWS Container Insights Receiver   Network RX Error Count count/s eks_cluster_node_pod_net_rx_error Instance Network RX error count of the pod running on the node AWS Container Insights Receiver   Network TX bytes/s eks_cluster_node_pod_net_tx_bytes Instance Network RX bytes of the pod running on the node AWS Container Insights Receiver   Network TX Error Count count/s eks_cluster_node_pod_net_tx_error Instance Network RX error count of the pod running on the node AWS Container Insights Receiver   CPU Utilization percent eks_cluster_service_pod_cpu_utilization Endpoint The CPU Utilization of pod that belong to the service AWS Container Insights Receiver   Memory Utilization percent eks_cluster_service_pod_memory_utilization Endpoint The Memory Utilization of pod that belong to the service AWS Container Insights Receiver   Network RX bytes/s eks_cluster_service_pod_net_rx_bytes Endpoint Network RX bytes of the pod that belong to the service AWS Container Insights Receiver   Network RX Error Count count/s eks_cluster_service_pod_net_rx_error Endpoint Network TX error count of the pod that belongs to the service AWS Container Insights Receiver   Network TX bytes/s eks_cluster_service_pod_net_tx_bytes Endpoint Network TX bytes of the pod that belong to the service AWS Container Insights Receiver   Network TX Error Count count/s eks_cluster_node_pod_net_tx_error Endpoint Network TX error count of the pod that belongs to the service AWS Container Insights Receiver    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-eks/. The AWS Cloud EKS dashboard panel configurations are found in /config/ui-initialized-templates/aws_eks.\nOTEL Configuration Sample With AWS Container Insights Receiver extensions:health_check:receivers:awscontainerinsightreceiver:processors:resource/job-name:attributes:- key:job_namevalue:aws-cloud-eks-monitoringaction:insertexporters:otlp:endpoint:oap-service:11800tls:insecure:truelogging:loglevel:debugservice:pipelines:metrics:receivers:[awscontainerinsightreceiver]processors:[resource/job-name]exporters:[otlp,logging]extensions:[health_check]Refer to AWS Container Insights Receiver for more information\n","excerpt":"AWS Cloud EKS monitoring SkyWalking leverages OpenTelemetry Collector with AWS Container Insights …","ref":"/docs/main/v9.5.0/en/setup/backend/backend-aws-eks-monitoring/","title":"AWS Cloud EKS monitoring"},{"body":"AWS Cloud EKS monitoring SkyWalking leverages OpenTelemetry Collector with AWS Container Insights Receiver to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  OpenTelemetry Collector fetches metrics from EKS via AWS Container Insights Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Deploy amazon/aws-otel-collector with AWS Container Insights Receiver to EKS Config SkyWalking OpenTelemetry receiver.  Read Monitoring AWS EKS and S3 with SkyWalking for more details\nEKS Monitoring AWS Container Insights Receiver provides multiple dimensions metrics for EKS cluster, node, service, etc. Accordingly, SkyWalking observes the status, and payload of the EKS cluster, which is cataloged as a LAYER: AWS_EKS Service in the OAP. Meanwhile, the k8s nodes would be recognized as LAYER: AWS_EKS instances. The k8s service would be recognized as endpoints.\nSpecify Job Name SkyWalking distinguishes AWS Cloud EKS metrics by attributes job_name, which value is aws-cloud-eks-monitoring. You could leverage OTEL Collector processor to add the attribute as follows:\nprocessors:resource/job-name:attributes:- key:job_namevalue:aws-cloud-eks-monitoringaction:insert Notice, if you don\u0026rsquo;t specify job_name attribute, SkyWalking OAP will ignore the metrics\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     Node Count  eks_cluster_node_count Service The node count of the EKS cluster AWS Container Insights Receiver   Failed Node Count  eks_cluster_failed_node_count Service The failed node count of the EKS cluster AWS Container Insights Receiver   Pod Count (namespace dimension)  eks_cluster_namespace_count Service The count of pod in the EKS cluster(namespace dimension) AWS Container Insights Receiver   Pod Count (service dimension)  eks_cluster_service_count Service The count of pod in the EKS cluster(service dimension) AWS Container Insights Receiver   Network RX Dropped Count (per second) count/s eks_cluster_net_rx_dropped Service Network RX dropped count AWS Container Insights Receiver   Network RX Error Count (per second) count/s eks_cluster_net_rx_error Service Network RX error count AWS Container Insights Receiver   Network TX Dropped Count (per second) count/s eks_cluster_net_rx_dropped Service Network TX dropped count AWS Container Insights Receiver   Network TX Error Count (per second) count/s eks_cluster_net_rx_error Service Network TX error count AWS Container Insights Receiver   Pod Count  eks_cluster_node_pod_number Instance The count of pod running on the node AWS Container Insights Receiver   CPU Utilization percent eks_cluster_node_cpu_utilization Instance The CPU Utilization of the node AWS Container Insights Receiver   Memory Utilization percent eks_cluster_node_memory_utilization Instance The Memory Utilization of the node AWS Container Insights Receiver   Network RX bytes/s eks_cluster_node_net_rx_bytes Instance Network RX bytes of the node AWS Container Insights Receiver   Network RX Error Count count/s eks_cluster_node_net_rx_bytes Instance Network RX error count of the node AWS Container Insights Receiver   Network TX bytes/s eks_cluster_node_net_rx_bytes Instance Network TX bytes of the node AWS Container Insights Receiver   Network TX Error Count count/s eks_cluster_node_net_rx_bytes Instance Network TX error count of the node AWS Container Insights Receiver   Disk IO Write bytes/s eks_cluster_node_net_rx_bytes Instance The IO write bytes of the node AWS Container Insights Receiver   Disk IO Read bytes/s eks_cluster_node_net_rx_bytes Instance The IO read bytes of the node AWS Container Insights Receiver   FS Utilization percent eks_cluster_node_net_rx_bytes Instance The filesystem utilization of the node AWS Container Insights Receiver   CPU Utilization percent eks_cluster_node_pod_cpu_utilization Instance The CPU Utilization of the pod running on the node AWS Container Insights Receiver   Memory Utilization percent eks_cluster_node_pod_memory_utilization Instance The Memory Utilization of the pod running on the node AWS Container Insights Receiver   Network RX bytes/s eks_cluster_node_pod_net_rx_bytes Instance Network RX bytes of the pod running on the node AWS Container Insights Receiver   Network RX Error Count count/s eks_cluster_node_pod_net_rx_error Instance Network RX error count of the pod running on the node AWS Container Insights Receiver   Network TX bytes/s eks_cluster_node_pod_net_tx_bytes Instance Network RX bytes of the pod running on the node AWS Container Insights Receiver   Network TX Error Count count/s eks_cluster_node_pod_net_tx_error Instance Network RX error count of the pod running on the node AWS Container Insights Receiver   CPU Utilization percent eks_cluster_service_pod_cpu_utilization Endpoint The CPU Utilization of pod that belong to the service AWS Container Insights Receiver   Memory Utilization percent eks_cluster_service_pod_memory_utilization Endpoint The Memory Utilization of pod that belong to the service AWS Container Insights Receiver   Network RX bytes/s eks_cluster_service_pod_net_rx_bytes Endpoint Network RX bytes of the pod that belong to the service AWS Container Insights Receiver   Network RX Error Count count/s eks_cluster_service_pod_net_rx_error Endpoint Network TX error count of the pod that belongs to the service AWS Container Insights Receiver   Network TX bytes/s eks_cluster_service_pod_net_tx_bytes Endpoint Network TX bytes of the pod that belong to the service AWS Container Insights Receiver   Network TX Error Count count/s eks_cluster_node_pod_net_tx_error Endpoint Network TX error count of the pod that belongs to the service AWS Container Insights Receiver    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-eks/. The AWS Cloud EKS dashboard panel configurations are found in /config/ui-initialized-templates/aws_eks.\nOTEL Configuration Sample With AWS Container Insights Receiver extensions:health_check:receivers:awscontainerinsightreceiver:processors:resource/job-name:attributes:- key:job_namevalue:aws-cloud-eks-monitoringaction:insertexporters:otlp:endpoint:oap-service:11800tls:insecure:truelogging:loglevel:debugservice:pipelines:metrics:receivers:[awscontainerinsightreceiver]processors:[resource/job-name]exporters:[otlp,logging]extensions:[health_check]Refer to AWS Container Insights Receiver for more information\n","excerpt":"AWS Cloud EKS monitoring SkyWalking leverages OpenTelemetry Collector with AWS Container Insights …","ref":"/docs/main/v9.6.0/en/setup/backend/backend-aws-eks-monitoring/","title":"AWS Cloud EKS monitoring"},{"body":"AWS Cloud EKS monitoring SkyWalking leverages OpenTelemetry Collector with AWS Container Insights Receiver to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  OpenTelemetry Collector fetches metrics from EKS via AWS Container Insights Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Deploy amazon/aws-otel-collector with AWS Container Insights Receiver to EKS Config SkyWalking OpenTelemetry receiver.  Read Monitoring AWS EKS and S3 with SkyWalking for more details\nEKS Monitoring AWS Container Insights Receiver provides multiple dimensions metrics for EKS cluster, node, service, etc. Accordingly, SkyWalking observes the status, and payload of the EKS cluster, which is cataloged as a LAYER: AWS_EKS Service in the OAP. Meanwhile, the k8s nodes would be recognized as LAYER: AWS_EKS instances. The k8s service would be recognized as endpoints.\nSpecify Job Name SkyWalking distinguishes AWS Cloud EKS metrics by attributes job_name, which value is aws-cloud-eks-monitoring. You could leverage OTEL Collector processor to add the attribute as follows:\nprocessors:resource/job-name:attributes:- key:job_namevalue:aws-cloud-eks-monitoringaction:insert Notice, if you don\u0026rsquo;t specify job_name attribute, SkyWalking OAP will ignore the metrics\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     Node Count  eks_cluster_node_count Service The node count of the EKS cluster AWS Container Insights Receiver   Failed Node Count  eks_cluster_failed_node_count Service The failed node count of the EKS cluster AWS Container Insights Receiver   Pod Count (namespace dimension)  eks_cluster_namespace_count Service The count of pod in the EKS cluster(namespace dimension) AWS Container Insights Receiver   Pod Count (service dimension)  eks_cluster_service_count Service The count of pod in the EKS cluster(service dimension) AWS Container Insights Receiver   Network RX Dropped Count (per second) count/s eks_cluster_net_rx_dropped Service Network RX dropped count AWS Container Insights Receiver   Network RX Error Count (per second) count/s eks_cluster_net_rx_error Service Network RX error count AWS Container Insights Receiver   Network TX Dropped Count (per second) count/s eks_cluster_net_rx_dropped Service Network TX dropped count AWS Container Insights Receiver   Network TX Error Count (per second) count/s eks_cluster_net_rx_error Service Network TX error count AWS Container Insights Receiver   Pod Count  eks_cluster_node_pod_number Instance The count of pod running on the node AWS Container Insights Receiver   CPU Utilization percent eks_cluster_node_cpu_utilization Instance The CPU Utilization of the node AWS Container Insights Receiver   Memory Utilization percent eks_cluster_node_memory_utilization Instance The Memory Utilization of the node AWS Container Insights Receiver   Network RX bytes/s eks_cluster_node_net_rx_bytes Instance Network RX bytes of the node AWS Container Insights Receiver   Network RX Error Count count/s eks_cluster_node_net_rx_bytes Instance Network RX error count of the node AWS Container Insights Receiver   Network TX bytes/s eks_cluster_node_net_rx_bytes Instance Network TX bytes of the node AWS Container Insights Receiver   Network TX Error Count count/s eks_cluster_node_net_rx_bytes Instance Network TX error count of the node AWS Container Insights Receiver   Disk IO Write bytes/s eks_cluster_node_net_rx_bytes Instance The IO write bytes of the node AWS Container Insights Receiver   Disk IO Read bytes/s eks_cluster_node_net_rx_bytes Instance The IO read bytes of the node AWS Container Insights Receiver   FS Utilization percent eks_cluster_node_net_rx_bytes Instance The filesystem utilization of the node AWS Container Insights Receiver   CPU Utilization percent eks_cluster_node_pod_cpu_utilization Instance The CPU Utilization of the pod running on the node AWS Container Insights Receiver   Memory Utilization percent eks_cluster_node_pod_memory_utilization Instance The Memory Utilization of the pod running on the node AWS Container Insights Receiver   Network RX bytes/s eks_cluster_node_pod_net_rx_bytes Instance Network RX bytes of the pod running on the node AWS Container Insights Receiver   Network RX Error Count count/s eks_cluster_node_pod_net_rx_error Instance Network RX error count of the pod running on the node AWS Container Insights Receiver   Network TX bytes/s eks_cluster_node_pod_net_tx_bytes Instance Network RX bytes of the pod running on the node AWS Container Insights Receiver   Network TX Error Count count/s eks_cluster_node_pod_net_tx_error Instance Network RX error count of the pod running on the node AWS Container Insights Receiver   CPU Utilization percent eks_cluster_service_pod_cpu_utilization Endpoint The CPU Utilization of pod that belong to the service AWS Container Insights Receiver   Memory Utilization percent eks_cluster_service_pod_memory_utilization Endpoint The Memory Utilization of pod that belong to the service AWS Container Insights Receiver   Network RX bytes/s eks_cluster_service_pod_net_rx_bytes Endpoint Network RX bytes of the pod that belong to the service AWS Container Insights Receiver   Network RX Error Count count/s eks_cluster_service_pod_net_rx_error Endpoint Network TX error count of the pod that belongs to the service AWS Container Insights Receiver   Network TX bytes/s eks_cluster_service_pod_net_tx_bytes Endpoint Network TX bytes of the pod that belong to the service AWS Container Insights Receiver   Network TX Error Count count/s eks_cluster_node_pod_net_tx_error Endpoint Network TX error count of the pod that belongs to the service AWS Container Insights Receiver    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-eks/. The AWS Cloud EKS dashboard panel configurations are found in /config/ui-initialized-templates/aws_eks.\nOTEL Configuration Sample With AWS Container Insights Receiver extensions:health_check:receivers:awscontainerinsightreceiver:processors:resource/job-name:attributes:- key:job_namevalue:aws-cloud-eks-monitoringaction:insertexporters:otlp:endpoint:oap-service:11800tls:insecure:truelogging:loglevel:debugservice:pipelines:metrics:receivers:[awscontainerinsightreceiver]processors:[resource/job-name]exporters:[otlp,logging]extensions:[health_check]Refer to AWS Container Insights Receiver for more information\n","excerpt":"AWS Cloud EKS monitoring SkyWalking leverages OpenTelemetry Collector with AWS Container Insights …","ref":"/docs/main/v9.7.0/en/setup/backend/backend-aws-eks-monitoring/","title":"AWS Cloud EKS monitoring"},{"body":"AWS Cloud S3 monitoring Amazon Simple Storage Service (Amazon S3) is an object storage service. SkyWalking leverages AWS Kinesis Data Firehose receiver to transfer the CloudWatch metrics of s3 to OpenTelemetry receiver and into the Meter System.\nData flow  AWS CloudWatch collect metrics for S3, refer to S3 monitoring with CloudWatch CloudWatch metric streams stream CloudWatch metrics of S3 to AWS Kinesis Data Firehose AWS Kinesis Data Firehose delivery metrics to AWS Kinesis Data Firehose receiver through the HTTP endpoint  Set up  Create CloudWatch metrics configuration for S3, refer to S3 metrics configuration Create an Amazon Kinesis Data Firehose Delivery Stream, and set AWS Kinesis Data Firehose receiver\u0026rsquo;s address as HTTP(s) Destination, refer to Create Delivery Stream Create CloudWatch metric stream, and select the Firehose Delivery Stream which has been created above, set Select namespaces to AWS/S3, Select output format to OpenTelemetry 0.7. refer to CloudWatch Metric Streams  Read Monitoring AWS EKS and S3 with SkyWalking for more details\nS3 Monitoring SkyWalking observes CloudWatch metrics of the S3 bucket, which is cataloged as a LAYER: AWS_S3 Service in the OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     4xx Errors count aws_s3_4xx Service The number of HTTP 4xx client error status code requests made to the S3 bucket S3 monitoring with CloudWatch   5xx Errors count aws_s3_5xx Service The number of HTTP 5xx client error status code requests made to the S3 bucket S3 monitoring with CloudWatch   Downloaded bytes aws_s3_downloaded_bytes Service The number of bytes downloaded for requests made to an Amazon S3 bucket S3 monitoring with CloudWatch   Uploaded bytes aws_s3_uploaded_bytes Service The number of bytes uploaded for requests made to an Amazon S3 bucket S3 monitoring with CloudWatch   Request Average Latency bytes aws_s3_request_latency Service The average of elapsed per-request time from the first byte received to the last byte sent to an Amazon S3 bucket S3 monitoring with CloudWatch   First Byte Average Latency bytes aws_s3_request_latency Service The average of per-request time from the complete request being received by an Amazon S3 bucket to when the response starts to be returned S3 monitoring with CloudWatch   All Requests bytes aws_s3_delete_requests Service The number of HTTP All requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch   Get Requests bytes aws_s3_delete_requests Service The number of HTTP Get requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch   Put Requests bytes aws_s3_delete_requests Service The number of HTTP PUT requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch   Delete Requests bytes aws_s3_delete_requests Service The number of HTTP Delete requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-s3/. The AWS Cloud EKS dashboard panel configurations are found in /config/ui-initialized-templates/aws_s3.\n","excerpt":"AWS Cloud S3 monitoring Amazon Simple Storage Service (Amazon S3) is an object storage service. …","ref":"/docs/main/latest/en/setup/backend/backend-aws-s3-monitoring/","title":"AWS Cloud S3 monitoring"},{"body":"AWS Cloud S3 monitoring Amazon Simple Storage Service (Amazon S3) is an object storage service. SkyWalking leverages AWS Kinesis Data Firehose receiver to transfer the CloudWatch metrics of s3 to OpenTelemetry receiver and into the Meter System.\nData flow  AWS CloudWatch collect metrics for S3, refer to S3 monitoring with CloudWatch CloudWatch metric streams stream CloudWatch metrics of S3 to AWS Kinesis Data Firehose AWS Kinesis Data Firehose delivery metrics to AWS Kinesis Data Firehose receiver through the HTTP endpoint  Set up  Create CloudWatch metrics configuration for S3, refer to S3 metrics configuration Create an Amazon Kinesis Data Firehose Delivery Stream, and set AWS Kinesis Data Firehose receiver\u0026rsquo;s address as HTTP(s) Destination, refer to Create Delivery Stream Create CloudWatch metric stream, and select the Firehose Delivery Stream which has been created above, set Select namespaces to AWS/S3, Select output format to OpenTelemetry 0.7. refer to CloudWatch Metric Streams  Read Monitoring AWS EKS and S3 with SkyWalking for more details\nS3 Monitoring SkyWalking observes CloudWatch metrics of the S3 bucket, which is cataloged as a LAYER: AWS_S3 Service in the OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     4xx Errors count aws_s3_4xx Service The number of HTTP 4xx client error status code requests made to the S3 bucket S3 monitoring with CloudWatch   5xx Errors count aws_s3_5xx Service The number of HTTP 5xx client error status code requests made to the S3 bucket S3 monitoring with CloudWatch   Downloaded bytes aws_s3_downloaded_bytes Service The number of bytes downloaded for requests made to an Amazon S3 bucket S3 monitoring with CloudWatch   Uploaded bytes aws_s3_uploaded_bytes Service The number of bytes uploaded for requests made to an Amazon S3 bucket S3 monitoring with CloudWatch   Request Average Latency bytes aws_s3_request_latency Service The average of elapsed per-request time from the first byte received to the last byte sent to an Amazon S3 bucket S3 monitoring with CloudWatch   First Byte Average Latency bytes aws_s3_request_latency Service The average of per-request time from the complete request being received by an Amazon S3 bucket to when the response starts to be returned S3 monitoring with CloudWatch   All Requests bytes aws_s3_delete_requests Service The number of HTTP All requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch   Get Requests bytes aws_s3_delete_requests Service The number of HTTP Get requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch   Put Requests bytes aws_s3_delete_requests Service The number of HTTP PUT requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch   Delete Requests bytes aws_s3_delete_requests Service The number of HTTP Delete requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-s3/. The AWS Cloud EKS dashboard panel configurations are found in /config/ui-initialized-templates/aws_s3.\n","excerpt":"AWS Cloud S3 monitoring Amazon Simple Storage Service (Amazon S3) is an object storage service. …","ref":"/docs/main/next/en/setup/backend/backend-aws-s3-monitoring/","title":"AWS Cloud S3 monitoring"},{"body":"AWS Cloud S3 monitoring Amazon Simple Storage Service (Amazon S3) is an object storage service. SkyWalking leverages AWS Kinesis Data Firehose receiver to transfer the CloudWatch metrics of s3 to OpenTelemetry receiver and into the Meter System.\nData flow  AWS CloudWatch collect metrics for S3, refer to S3 monitoring with CloudWatch CloudWatch metric streams stream CloudWatch metrics of S3 to AWS Kinesis Data Firehose AWS Kinesis Data Firehose delivery metrics to AWS Kinesis Data Firehose receiver through the HTTP endpoint  Set up  Create CloudWatch metrics configuration for S3, refer to S3 metrics configuration Create an Amazon Kinesis Data Firehose Delivery Stream, and set AWS Kinesis Data Firehose receiver\u0026rsquo;s address as HTTP(s) Destination, refer to Create Delivery Stream Create CloudWatch metric stream, and select the Firehose Delivery Stream which has been created above, set Select namespaces to AWS/S3, Select output format to OpenTelemetry 0.7. refer to CloudWatch Metric Streams  S3 Monitoring SkyWalking observes CloudWatch metrics of the S3 bucket, which is cataloged as a LAYER: AWS_S3 Service in the OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     4xx Errors count aws_s3_4xx Service The number of HTTP 4xx client error status code requests made to the S3 bucket S3 monitoring with CloudWatch   5xx Errors count aws_s3_5xx Service The number of HTTP 5xx client error status code requests made to the S3 bucket S3 monitoring with CloudWatch   Downloaded bytes aws_s3_downloaded_bytes Service The number of bytes downloaded for requests made to an Amazon S3 bucket S3 monitoring with CloudWatch   Uploaded bytes aws_s3_uploaded_bytes Service The number of bytes uploaded for requests made to an Amazon S3 bucket S3 monitoring with CloudWatch   Request Average Latency bytes aws_s3_request_latency Service The average of elapsed per-request time from the first byte received to the last byte sent to an Amazon S3 bucket S3 monitoring with CloudWatch   First Byte Average Latency bytes aws_s3_request_latency Service The average of per-request time from the complete request being received by an Amazon S3 bucket to when the response starts to be returned S3 monitoring with CloudWatch   All Requests bytes aws_s3_delete_requests Service The number of HTTP All requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch   Get Requests bytes aws_s3_delete_requests Service The number of HTTP Get requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch   Put Requests bytes aws_s3_delete_requests Service The number of HTTP PUT requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch   Delete Requests bytes aws_s3_delete_requests Service The number of HTTP Delete requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-s3/. The AWS Cloud EKS dashboard panel configurations are found in /config/ui-initialized-templates/aws_s3.\n","excerpt":"AWS Cloud S3 monitoring Amazon Simple Storage Service (Amazon S3) is an object storage service. …","ref":"/docs/main/v9.4.0/en/setup/backend/backend-aws-s3-monitoring/","title":"AWS Cloud S3 monitoring"},{"body":"AWS Cloud S3 monitoring Amazon Simple Storage Service (Amazon S3) is an object storage service. SkyWalking leverages AWS Kinesis Data Firehose receiver to transfer the CloudWatch metrics of s3 to OpenTelemetry receiver and into the Meter System.\nData flow  AWS CloudWatch collect metrics for S3, refer to S3 monitoring with CloudWatch CloudWatch metric streams stream CloudWatch metrics of S3 to AWS Kinesis Data Firehose AWS Kinesis Data Firehose delivery metrics to AWS Kinesis Data Firehose receiver through the HTTP endpoint  Set up  Create CloudWatch metrics configuration for S3, refer to S3 metrics configuration Create an Amazon Kinesis Data Firehose Delivery Stream, and set AWS Kinesis Data Firehose receiver\u0026rsquo;s address as HTTP(s) Destination, refer to Create Delivery Stream Create CloudWatch metric stream, and select the Firehose Delivery Stream which has been created above, set Select namespaces to AWS/S3, Select output format to OpenTelemetry 0.7. refer to CloudWatch Metric Streams  Read Monitoring AWS EKS and S3 with SkyWalking for more details\nS3 Monitoring SkyWalking observes CloudWatch metrics of the S3 bucket, which is cataloged as a LAYER: AWS_S3 Service in the OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     4xx Errors count aws_s3_4xx Service The number of HTTP 4xx client error status code requests made to the S3 bucket S3 monitoring with CloudWatch   5xx Errors count aws_s3_5xx Service The number of HTTP 5xx client error status code requests made to the S3 bucket S3 monitoring with CloudWatch   Downloaded bytes aws_s3_downloaded_bytes Service The number of bytes downloaded for requests made to an Amazon S3 bucket S3 monitoring with CloudWatch   Uploaded bytes aws_s3_uploaded_bytes Service The number of bytes uploaded for requests made to an Amazon S3 bucket S3 monitoring with CloudWatch   Request Average Latency bytes aws_s3_request_latency Service The average of elapsed per-request time from the first byte received to the last byte sent to an Amazon S3 bucket S3 monitoring with CloudWatch   First Byte Average Latency bytes aws_s3_request_latency Service The average of per-request time from the complete request being received by an Amazon S3 bucket to when the response starts to be returned S3 monitoring with CloudWatch   All Requests bytes aws_s3_delete_requests Service The number of HTTP All requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch   Get Requests bytes aws_s3_delete_requests Service The number of HTTP Get requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch   Put Requests bytes aws_s3_delete_requests Service The number of HTTP PUT requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch   Delete Requests bytes aws_s3_delete_requests Service The number of HTTP Delete requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-s3/. The AWS Cloud EKS dashboard panel configurations are found in /config/ui-initialized-templates/aws_s3.\n","excerpt":"AWS Cloud S3 monitoring Amazon Simple Storage Service (Amazon S3) is an object storage service. …","ref":"/docs/main/v9.5.0/en/setup/backend/backend-aws-s3-monitoring/","title":"AWS Cloud S3 monitoring"},{"body":"AWS Cloud S3 monitoring Amazon Simple Storage Service (Amazon S3) is an object storage service. SkyWalking leverages AWS Kinesis Data Firehose receiver to transfer the CloudWatch metrics of s3 to OpenTelemetry receiver and into the Meter System.\nData flow  AWS CloudWatch collect metrics for S3, refer to S3 monitoring with CloudWatch CloudWatch metric streams stream CloudWatch metrics of S3 to AWS Kinesis Data Firehose AWS Kinesis Data Firehose delivery metrics to AWS Kinesis Data Firehose receiver through the HTTP endpoint  Set up  Create CloudWatch metrics configuration for S3, refer to S3 metrics configuration Create an Amazon Kinesis Data Firehose Delivery Stream, and set AWS Kinesis Data Firehose receiver\u0026rsquo;s address as HTTP(s) Destination, refer to Create Delivery Stream Create CloudWatch metric stream, and select the Firehose Delivery Stream which has been created above, set Select namespaces to AWS/S3, Select output format to OpenTelemetry 0.7. refer to CloudWatch Metric Streams  Read Monitoring AWS EKS and S3 with SkyWalking for more details\nS3 Monitoring SkyWalking observes CloudWatch metrics of the S3 bucket, which is cataloged as a LAYER: AWS_S3 Service in the OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     4xx Errors count aws_s3_4xx Service The number of HTTP 4xx client error status code requests made to the S3 bucket S3 monitoring with CloudWatch   5xx Errors count aws_s3_5xx Service The number of HTTP 5xx client error status code requests made to the S3 bucket S3 monitoring with CloudWatch   Downloaded bytes aws_s3_downloaded_bytes Service The number of bytes downloaded for requests made to an Amazon S3 bucket S3 monitoring with CloudWatch   Uploaded bytes aws_s3_uploaded_bytes Service The number of bytes uploaded for requests made to an Amazon S3 bucket S3 monitoring with CloudWatch   Request Average Latency bytes aws_s3_request_latency Service The average of elapsed per-request time from the first byte received to the last byte sent to an Amazon S3 bucket S3 monitoring with CloudWatch   First Byte Average Latency bytes aws_s3_request_latency Service The average of per-request time from the complete request being received by an Amazon S3 bucket to when the response starts to be returned S3 monitoring with CloudWatch   All Requests bytes aws_s3_delete_requests Service The number of HTTP All requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch   Get Requests bytes aws_s3_delete_requests Service The number of HTTP Get requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch   Put Requests bytes aws_s3_delete_requests Service The number of HTTP PUT requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch   Delete Requests bytes aws_s3_delete_requests Service The number of HTTP Delete requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-s3/. The AWS Cloud EKS dashboard panel configurations are found in /config/ui-initialized-templates/aws_s3.\n","excerpt":"AWS Cloud S3 monitoring Amazon Simple Storage Service (Amazon S3) is an object storage service. …","ref":"/docs/main/v9.6.0/en/setup/backend/backend-aws-s3-monitoring/","title":"AWS Cloud S3 monitoring"},{"body":"AWS Cloud S3 monitoring Amazon Simple Storage Service (Amazon S3) is an object storage service. SkyWalking leverages AWS Kinesis Data Firehose receiver to transfer the CloudWatch metrics of s3 to OpenTelemetry receiver and into the Meter System.\nData flow  AWS CloudWatch collect metrics for S3, refer to S3 monitoring with CloudWatch CloudWatch metric streams stream CloudWatch metrics of S3 to AWS Kinesis Data Firehose AWS Kinesis Data Firehose delivery metrics to AWS Kinesis Data Firehose receiver through the HTTP endpoint  Set up  Create CloudWatch metrics configuration for S3, refer to S3 metrics configuration Create an Amazon Kinesis Data Firehose Delivery Stream, and set AWS Kinesis Data Firehose receiver\u0026rsquo;s address as HTTP(s) Destination, refer to Create Delivery Stream Create CloudWatch metric stream, and select the Firehose Delivery Stream which has been created above, set Select namespaces to AWS/S3, Select output format to OpenTelemetry 0.7. refer to CloudWatch Metric Streams  Read Monitoring AWS EKS and S3 with SkyWalking for more details\nS3 Monitoring SkyWalking observes CloudWatch metrics of the S3 bucket, which is cataloged as a LAYER: AWS_S3 Service in the OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     4xx Errors count aws_s3_4xx Service The number of HTTP 4xx client error status code requests made to the S3 bucket S3 monitoring with CloudWatch   5xx Errors count aws_s3_5xx Service The number of HTTP 5xx client error status code requests made to the S3 bucket S3 monitoring with CloudWatch   Downloaded bytes aws_s3_downloaded_bytes Service The number of bytes downloaded for requests made to an Amazon S3 bucket S3 monitoring with CloudWatch   Uploaded bytes aws_s3_uploaded_bytes Service The number of bytes uploaded for requests made to an Amazon S3 bucket S3 monitoring with CloudWatch   Request Average Latency bytes aws_s3_request_latency Service The average of elapsed per-request time from the first byte received to the last byte sent to an Amazon S3 bucket S3 monitoring with CloudWatch   First Byte Average Latency bytes aws_s3_request_latency Service The average of per-request time from the complete request being received by an Amazon S3 bucket to when the response starts to be returned S3 monitoring with CloudWatch   All Requests bytes aws_s3_delete_requests Service The number of HTTP All requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch   Get Requests bytes aws_s3_delete_requests Service The number of HTTP Get requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch   Put Requests bytes aws_s3_delete_requests Service The number of HTTP PUT requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch   Delete Requests bytes aws_s3_delete_requests Service The number of HTTP Delete requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-s3/. The AWS Cloud EKS dashboard panel configurations are found in /config/ui-initialized-templates/aws_s3.\n","excerpt":"AWS Cloud S3 monitoring Amazon Simple Storage Service (Amazon S3) is an object storage service. …","ref":"/docs/main/v9.7.0/en/setup/backend/backend-aws-s3-monitoring/","title":"AWS Cloud S3 monitoring"},{"body":"AWS DynamoDb monitoring SkyWalking leverages Amazon Kinesis Data Filehose with Amazon CloudWatch to transfer the metrics into the Meter System.\nData flow  Amazon CloudWatch fetches metrics from DynamoDB and pushes metrics to SkyWalking OAP Server via Amazon Kinesis data firehose. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Create CloudWatch metrics configuration for DynamoDB, refer to DynamoDB metrics configuration Create an Amazon Kinesis Data Firehose Delivery Stream, and set AWS Kinesis Data Firehose receiver\u0026rsquo;s address as HTTP(s) Destination, refer to Create Delivery Stream3. Create a metric stream, set namespace to DynanoDB, and set Kinesis Data Firehose to the firehose you just created. Config aws-firehose-receiver to receive data. Create CloudWatch metric stream, and select the Firehose Delivery Stream which has been created above, set Select namespaces to AWS/DynamoDB, Select output format to OpenTelemetry 0.7. refer to CloudWatch Metric Streams  Read Monitoring DynamoDB with SkyWalking for more details\nDynamoDB Monitoring DynamoDB monitoring provides monitoring of the status and resources of the DynamoDB server. AWS user id is cataloged as a Layer: AWS_DYNAMODB Service in OAP. Each DynamoDB table is cataloged as an Endpoint in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Read Usage unit/s consumed_read_capacity_units provisioned_read_capacity_units The situation of read capacity units consumed and provisioned over the specified time period Amazon CloudWatch   Write Usage unit/s consumed_write_capacity_units provisioned_write_capacity_units The situation of write capacity units consumed and provisioned over the specified time period Amazon CloudWatch   Successful Request Latency ms get_successful_request_latency put_successful_request_latency query_successful_request_latency scan_successful_request_latency The latency of successful request Amazon CloudWatch   TTL Deleted Item count  time_to_live_deleted_item_count The count of items deleted by TTL Amazon CloudWatch   Throttle Events  read_throttle_events write_throttle_events Requests to DynamoDB that exceed the provisioned read/write capacity units for a table or a global secondary index. Amazon CloudWatch   Throttled Requests  read_throttled_requests write_throttled_requests Requests to DynamoDB that exceed the provisioned throughput limits on a resource (such as a table or an index). Amazon CloudWatch   Scan/Query Operation Returned Item Ccount  scan_returned_item_count query_returned_item_count\n The number of items returned by Query, Scan or ExecuteStatement (select) operations during the specified time period. Amazon CloudWatch   System Errors  read_system_errors\nwrite_system_errors The requests to DynamoDB or Amazon DynamoDB Streams that generate an HTTP 500 status code during the specified time period. Amazon CloudWatch   User Errors  user_errors Requests to DynamoDB or Amazon DynamoDB Streams that generate an HTTP 400 status code during the specified time period. Amazon CloudWatch   Condition Checked Fail Requests  conditional_check_failed_requests The number of failed attempts to perform conditional writes. Amazon CloudWatch   Transaction Conflict  transaction_conflict Rejected item-level requests due to transactional conflicts between concurrent requests on the same items. Amazon CloudWatch    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-dynamodb. The DynamoDB dashboard panel configurations are found in /config/ui-initialized-templates/aws_dynamodb.\n","excerpt":"AWS DynamoDb monitoring SkyWalking leverages Amazon Kinesis Data Filehose with Amazon CloudWatch to …","ref":"/docs/main/latest/en/setup/backend/backend-aws-dynamodb-monitoring/","title":"AWS DynamoDb monitoring"},{"body":"AWS DynamoDb monitoring SkyWalking leverages Amazon Kinesis Data Filehose with Amazon CloudWatch to transfer the metrics into the Meter System.\nData flow  Amazon CloudWatch fetches metrics from DynamoDB and pushes metrics to SkyWalking OAP Server via Amazon Kinesis data firehose. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Create CloudWatch metrics configuration for DynamoDB, refer to DynamoDB metrics configuration Create an Amazon Kinesis Data Firehose Delivery Stream, and set AWS Kinesis Data Firehose receiver\u0026rsquo;s address as HTTP(s) Destination, refer to Create Delivery Stream3. Create a metric stream, set namespace to DynanoDB, and set Kinesis Data Firehose to the firehose you just created. Config aws-firehose-receiver to receive data. Create CloudWatch metric stream, and select the Firehose Delivery Stream which has been created above, set Select namespaces to AWS/DynamoDB, Select output format to OpenTelemetry 0.7. refer to CloudWatch Metric Streams  Read Monitoring DynamoDB with SkyWalking for more details\nDynamoDB Monitoring DynamoDB monitoring provides monitoring of the status and resources of the DynamoDB server. AWS user id is cataloged as a Layer: AWS_DYNAMODB Service in OAP. Each DynamoDB table is cataloged as an Endpoint in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Read Usage unit/s consumed_read_capacity_units provisioned_read_capacity_units The situation of read capacity units consumed and provisioned over the specified time period Amazon CloudWatch   Write Usage unit/s consumed_write_capacity_units provisioned_write_capacity_units The situation of write capacity units consumed and provisioned over the specified time period Amazon CloudWatch   Successful Request Latency ms get_successful_request_latency put_successful_request_latency query_successful_request_latency scan_successful_request_latency The latency of successful request Amazon CloudWatch   TTL Deleted Item count  time_to_live_deleted_item_count The count of items deleted by TTL Amazon CloudWatch   Throttle Events  read_throttle_events write_throttle_events Requests to DynamoDB that exceed the provisioned read/write capacity units for a table or a global secondary index. Amazon CloudWatch   Throttled Requests  read_throttled_requests write_throttled_requests Requests to DynamoDB that exceed the provisioned throughput limits on a resource (such as a table or an index). Amazon CloudWatch   Scan/Query Operation Returned Item Ccount  scan_returned_item_count query_returned_item_count\n The number of items returned by Query, Scan or ExecuteStatement (select) operations during the specified time period. Amazon CloudWatch   System Errors  read_system_errors\nwrite_system_errors The requests to DynamoDB or Amazon DynamoDB Streams that generate an HTTP 500 status code during the specified time period. Amazon CloudWatch   User Errors  user_errors Requests to DynamoDB or Amazon DynamoDB Streams that generate an HTTP 400 status code during the specified time period. Amazon CloudWatch   Condition Checked Fail Requests  conditional_check_failed_requests The number of failed attempts to perform conditional writes. Amazon CloudWatch   Transaction Conflict  transaction_conflict Rejected item-level requests due to transactional conflicts between concurrent requests on the same items. Amazon CloudWatch    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-dynamodb. The DynamoDB dashboard panel configurations are found in /config/ui-initialized-templates/aws_dynamodb.\n","excerpt":"AWS DynamoDb monitoring SkyWalking leverages Amazon Kinesis Data Filehose with Amazon CloudWatch to …","ref":"/docs/main/next/en/setup/backend/backend-aws-dynamodb-monitoring/","title":"AWS DynamoDb monitoring"},{"body":"AWS DynamoDb monitoring SkyWalking leverages Amazon Kinesis Data Filehose with Amazon CloudWatch to transfer the metrics into the Meter System.\nData flow  Amazon CloudWatch fetches metrics from DynamoDB and pushes metrics to SkyWalking OAP Server via Amazon Kinesis data firehose. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Create CloudWatch metrics configuration for DynamoDB, refer to DynamoDB metrics configuration Create an Amazon Kinesis Data Firehose Delivery Stream, and set AWS Kinesis Data Firehose receiver\u0026rsquo;s address as HTTP(s) Destination, refer to Create Delivery Stream3. Create a metric stream, set namespace to DynanoDB, and set Kinesis Data Firehose to the firehose you just created. Config aws-firehose-receiver to receive data. Create CloudWatch metric stream, and select the Firehose Delivery Stream which has been created above, set Select namespaces to AWS/DynamoDB, Select output format to OpenTelemetry 0.7. refer to CloudWatch Metric Streams  DynamoDB Monitoring DynamoDB monitoring provides monitoring of the status and resources of the DynamoDB server. AWS user id is cataloged as a Layer: AWS_DYNAMODB Service in OAP. Each DynamoDB table is cataloged as an Endpoint in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Read Usage unit/s consumed_read_capacity_units provisioned_read_capacity_units The situation of read capacity units consumed and provisioned over the specified time period Amazon CloudWatch   Write Usage unit/s consumed_write_capacity_units provisioned_write_capacity_units The situation of write capacity units consumed and provisioned over the specified time period Amazon CloudWatch   Successful Request Latency ms get_successful_request_latency put_successful_request_latency query_successful_request_latency scan_successful_request_latency The latency of successful request Amazon CloudWatch   TTL Deleted Item count  time_to_live_deleted_item_count The count of items deleted by TTL Amazon CloudWatch   Throttle Events  read_throttle_events write_throttle_events Requests to DynamoDB that exceed the provisioned read/write capacity units for a table or a global secondary index. Amazon CloudWatch   Throttled Requests  read_throttled_requests write_throttled_requests Requests to DynamoDB that exceed the provisioned throughput limits on a resource (such as a table or an index). Amazon CloudWatch   Scan/Query Operation Returned Item Ccount  scan_returned_item_count query_returned_item_count\n The number of items returned by Query, Scan or ExecuteStatement (select) operations during the specified time period. Amazon CloudWatch   System Errors  read_system_errors\nwrite_system_errors The requests to DynamoDB or Amazon DynamoDB Streams that generate an HTTP 500 status code during the specified time period. Amazon CloudWatch   User Errors  user_errors Requests to DynamoDB or Amazon DynamoDB Streams that generate an HTTP 400 status code during the specified time period. Amazon CloudWatch   Condition Checked Fail Requests  conditional_check_failed_requests The number of failed attempts to perform conditional writes. Amazon CloudWatch   Transaction Conflict  transaction_conflict Rejected item-level requests due to transactional conflicts between concurrent requests on the same items. Amazon CloudWatch    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-dynamodb. The DynamoDB dashboard panel configurations are found in /config/ui-initialized-templates/aws_dynamodb.\n","excerpt":"AWS DynamoDb monitoring SkyWalking leverages Amazon Kinesis Data Filehose with Amazon CloudWatch to …","ref":"/docs/main/v9.4.0/en/setup/backend/backend-aws-dynamodb-monitoring/","title":"AWS DynamoDb monitoring"},{"body":"AWS DynamoDb monitoring SkyWalking leverages Amazon Kinesis Data Filehose with Amazon CloudWatch to transfer the metrics into the Meter System.\nData flow  Amazon CloudWatch fetches metrics from DynamoDB and pushes metrics to SkyWalking OAP Server via Amazon Kinesis data firehose. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Create CloudWatch metrics configuration for DynamoDB, refer to DynamoDB metrics configuration Create an Amazon Kinesis Data Firehose Delivery Stream, and set AWS Kinesis Data Firehose receiver\u0026rsquo;s address as HTTP(s) Destination, refer to Create Delivery Stream3. Create a metric stream, set namespace to DynanoDB, and set Kinesis Data Firehose to the firehose you just created. Config aws-firehose-receiver to receive data. Create CloudWatch metric stream, and select the Firehose Delivery Stream which has been created above, set Select namespaces to AWS/DynamoDB, Select output format to OpenTelemetry 0.7. refer to CloudWatch Metric Streams  Read Monitoring DynamoDB with SkyWalking for more details\nDynamoDB Monitoring DynamoDB monitoring provides monitoring of the status and resources of the DynamoDB server. AWS user id is cataloged as a Layer: AWS_DYNAMODB Service in OAP. Each DynamoDB table is cataloged as an Endpoint in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Read Usage unit/s consumed_read_capacity_units provisioned_read_capacity_units The situation of read capacity units consumed and provisioned over the specified time period Amazon CloudWatch   Write Usage unit/s consumed_write_capacity_units provisioned_write_capacity_units The situation of write capacity units consumed and provisioned over the specified time period Amazon CloudWatch   Successful Request Latency ms get_successful_request_latency put_successful_request_latency query_successful_request_latency scan_successful_request_latency The latency of successful request Amazon CloudWatch   TTL Deleted Item count  time_to_live_deleted_item_count The count of items deleted by TTL Amazon CloudWatch   Throttle Events  read_throttle_events write_throttle_events Requests to DynamoDB that exceed the provisioned read/write capacity units for a table or a global secondary index. Amazon CloudWatch   Throttled Requests  read_throttled_requests write_throttled_requests Requests to DynamoDB that exceed the provisioned throughput limits on a resource (such as a table or an index). Amazon CloudWatch   Scan/Query Operation Returned Item Ccount  scan_returned_item_count query_returned_item_count\n The number of items returned by Query, Scan or ExecuteStatement (select) operations during the specified time period. Amazon CloudWatch   System Errors  read_system_errors\nwrite_system_errors The requests to DynamoDB or Amazon DynamoDB Streams that generate an HTTP 500 status code during the specified time period. Amazon CloudWatch   User Errors  user_errors Requests to DynamoDB or Amazon DynamoDB Streams that generate an HTTP 400 status code during the specified time period. Amazon CloudWatch   Condition Checked Fail Requests  conditional_check_failed_requests The number of failed attempts to perform conditional writes. Amazon CloudWatch   Transaction Conflict  transaction_conflict Rejected item-level requests due to transactional conflicts between concurrent requests on the same items. Amazon CloudWatch    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-dynamodb. The DynamoDB dashboard panel configurations are found in /config/ui-initialized-templates/aws_dynamodb.\n","excerpt":"AWS DynamoDb monitoring SkyWalking leverages Amazon Kinesis Data Filehose with Amazon CloudWatch to …","ref":"/docs/main/v9.5.0/en/setup/backend/backend-aws-dynamodb-monitoring/","title":"AWS DynamoDb monitoring"},{"body":"AWS DynamoDb monitoring SkyWalking leverages Amazon Kinesis Data Filehose with Amazon CloudWatch to transfer the metrics into the Meter System.\nData flow  Amazon CloudWatch fetches metrics from DynamoDB and pushes metrics to SkyWalking OAP Server via Amazon Kinesis data firehose. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Create CloudWatch metrics configuration for DynamoDB, refer to DynamoDB metrics configuration Create an Amazon Kinesis Data Firehose Delivery Stream, and set AWS Kinesis Data Firehose receiver\u0026rsquo;s address as HTTP(s) Destination, refer to Create Delivery Stream3. Create a metric stream, set namespace to DynanoDB, and set Kinesis Data Firehose to the firehose you just created. Config aws-firehose-receiver to receive data. Create CloudWatch metric stream, and select the Firehose Delivery Stream which has been created above, set Select namespaces to AWS/DynamoDB, Select output format to OpenTelemetry 0.7. refer to CloudWatch Metric Streams  Read Monitoring DynamoDB with SkyWalking for more details\nDynamoDB Monitoring DynamoDB monitoring provides monitoring of the status and resources of the DynamoDB server. AWS user id is cataloged as a Layer: AWS_DYNAMODB Service in OAP. Each DynamoDB table is cataloged as an Endpoint in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Read Usage unit/s consumed_read_capacity_units provisioned_read_capacity_units The situation of read capacity units consumed and provisioned over the specified time period Amazon CloudWatch   Write Usage unit/s consumed_write_capacity_units provisioned_write_capacity_units The situation of write capacity units consumed and provisioned over the specified time period Amazon CloudWatch   Successful Request Latency ms get_successful_request_latency put_successful_request_latency query_successful_request_latency scan_successful_request_latency The latency of successful request Amazon CloudWatch   TTL Deleted Item count  time_to_live_deleted_item_count The count of items deleted by TTL Amazon CloudWatch   Throttle Events  read_throttle_events write_throttle_events Requests to DynamoDB that exceed the provisioned read/write capacity units for a table or a global secondary index. Amazon CloudWatch   Throttled Requests  read_throttled_requests write_throttled_requests Requests to DynamoDB that exceed the provisioned throughput limits on a resource (such as a table or an index). Amazon CloudWatch   Scan/Query Operation Returned Item Ccount  scan_returned_item_count query_returned_item_count\n The number of items returned by Query, Scan or ExecuteStatement (select) operations during the specified time period. Amazon CloudWatch   System Errors  read_system_errors\nwrite_system_errors The requests to DynamoDB or Amazon DynamoDB Streams that generate an HTTP 500 status code during the specified time period. Amazon CloudWatch   User Errors  user_errors Requests to DynamoDB or Amazon DynamoDB Streams that generate an HTTP 400 status code during the specified time period. Amazon CloudWatch   Condition Checked Fail Requests  conditional_check_failed_requests The number of failed attempts to perform conditional writes. Amazon CloudWatch   Transaction Conflict  transaction_conflict Rejected item-level requests due to transactional conflicts between concurrent requests on the same items. Amazon CloudWatch    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-dynamodb. The DynamoDB dashboard panel configurations are found in /config/ui-initialized-templates/aws_dynamodb.\n","excerpt":"AWS DynamoDb monitoring SkyWalking leverages Amazon Kinesis Data Filehose with Amazon CloudWatch to …","ref":"/docs/main/v9.6.0/en/setup/backend/backend-aws-dynamodb-monitoring/","title":"AWS DynamoDb monitoring"},{"body":"AWS DynamoDb monitoring SkyWalking leverages Amazon Kinesis Data Filehose with Amazon CloudWatch to transfer the metrics into the Meter System.\nData flow  Amazon CloudWatch fetches metrics from DynamoDB and pushes metrics to SkyWalking OAP Server via Amazon Kinesis data firehose. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Create CloudWatch metrics configuration for DynamoDB, refer to DynamoDB metrics configuration Create an Amazon Kinesis Data Firehose Delivery Stream, and set AWS Kinesis Data Firehose receiver\u0026rsquo;s address as HTTP(s) Destination, refer to Create Delivery Stream3. Create a metric stream, set namespace to DynanoDB, and set Kinesis Data Firehose to the firehose you just created. Config aws-firehose-receiver to receive data. Create CloudWatch metric stream, and select the Firehose Delivery Stream which has been created above, set Select namespaces to AWS/DynamoDB, Select output format to OpenTelemetry 0.7. refer to CloudWatch Metric Streams  Read Monitoring DynamoDB with SkyWalking for more details\nDynamoDB Monitoring DynamoDB monitoring provides monitoring of the status and resources of the DynamoDB server. AWS user id is cataloged as a Layer: AWS_DYNAMODB Service in OAP. Each DynamoDB table is cataloged as an Endpoint in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Read Usage unit/s consumed_read_capacity_units provisioned_read_capacity_units The situation of read capacity units consumed and provisioned over the specified time period Amazon CloudWatch   Write Usage unit/s consumed_write_capacity_units provisioned_write_capacity_units The situation of write capacity units consumed and provisioned over the specified time period Amazon CloudWatch   Successful Request Latency ms get_successful_request_latency put_successful_request_latency query_successful_request_latency scan_successful_request_latency The latency of successful request Amazon CloudWatch   TTL Deleted Item count  time_to_live_deleted_item_count The count of items deleted by TTL Amazon CloudWatch   Throttle Events  read_throttle_events write_throttle_events Requests to DynamoDB that exceed the provisioned read/write capacity units for a table or a global secondary index. Amazon CloudWatch   Throttled Requests  read_throttled_requests write_throttled_requests Requests to DynamoDB that exceed the provisioned throughput limits on a resource (such as a table or an index). Amazon CloudWatch   Scan/Query Operation Returned Item Ccount  scan_returned_item_count query_returned_item_count\n The number of items returned by Query, Scan or ExecuteStatement (select) operations during the specified time period. Amazon CloudWatch   System Errors  read_system_errors\nwrite_system_errors The requests to DynamoDB or Amazon DynamoDB Streams that generate an HTTP 500 status code during the specified time period. Amazon CloudWatch   User Errors  user_errors Requests to DynamoDB or Amazon DynamoDB Streams that generate an HTTP 400 status code during the specified time period. Amazon CloudWatch   Condition Checked Fail Requests  conditional_check_failed_requests The number of failed attempts to perform conditional writes. Amazon CloudWatch   Transaction Conflict  transaction_conflict Rejected item-level requests due to transactional conflicts between concurrent requests on the same items. Amazon CloudWatch    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-dynamodb. The DynamoDB dashboard panel configurations are found in /config/ui-initialized-templates/aws_dynamodb.\n","excerpt":"AWS DynamoDb monitoring SkyWalking leverages Amazon Kinesis Data Filehose with Amazon CloudWatch to …","ref":"/docs/main/v9.7.0/en/setup/backend/backend-aws-dynamodb-monitoring/","title":"AWS DynamoDb monitoring"},{"body":"AWS Firehose receiver AWS Firehose receiver listens on 0.0.0.0:12801 by default, and provides an HTTP Endpoint /aws/firehose/metrics that follows Amazon Kinesis Data Firehose Delivery Stream HTTP Endpoint Delivery Specifications You could leverage the receiver to collect AWS CloudWatch metrics, and analysis it through MAL as the receiver bases on OpenTelemetry receiver\nSetup(S3 example)  Create CloudWatch metrics configuration for S3 (refer to S3 CloudWatch metrics) Stream CloudWatch metrics to AWS Kinesis Data Firehose delivery stream by CloudWatch metrics stream Specify AWS Kinesis Data Firehose delivery stream HTTP Endpoint (refer to Choose HTTP Endpoint for Your Destination)  Usually, the AWS CloudWatch metrics process flow with OAP is as follows:\nCloudWatch metrics with S3 --\u0026gt; CloudWatch Metric Stream (OpenTelemetry formart) --\u0026gt; Kinesis Data Firehose Delivery Stream --\u0026gt; AWS Firehose receiver(OAP) --\u0026gt; OpenTelemetry receiver(OAP) The following blogs demonstrate complete setup process for AWS S3 and API Gateway:\n Monitoring DynamoDB with SkyWalking Monitoring AWS EKS and S3 with SkyWalking  Supported metrics    Description Configuration File Data Source     Metrics of AWS Cloud S3 otel-rules/aws-s3/s3-service.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS DynamoDB otel-rules/aws-dynamodb/dynamodb-service.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS DynamoDB otel-rules/aws-dynamodb/dynamodb-endpoint.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS API Gateway otel-rules/aws-gateway/gateway-service.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS API Gateway otel-rules/aws-gateway/gateway-endpoint.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver    Notice  Only OpenTelemetry format is supported (refer to Metric streams output formats) According to HTTPS requirement by AWS Firehose(refer to Amazon Kinesis Data Firehose Delivery Stream HTTP Endpoint Delivery Specifications, users have two options   A proxy(e.g. Nginx, Envoy) is required in front of OAP\u0026rsquo;s Firehose receiver to accept HTTPS requests from AWS Firehose through port 443. (Recommended based on the general security policy) Set aws-firehose/enableTLS=true with suitable cert/key files through aws-firehose/tlsKeyPath and aws-firehose/tlsCertChainPath at OAP side to accept requests from firehose directly.  AWS Firehose receiver support setting accessKey for Kinesis Data Firehose, please refer to configuration vocabulary  ","excerpt":"AWS Firehose receiver AWS Firehose receiver listens on 0.0.0.0:12801 by default, and provides an …","ref":"/docs/main/latest/en/setup/backend/aws-firehose-receiver/","title":"AWS Firehose receiver"},{"body":"AWS Firehose receiver AWS Firehose receiver listens on 0.0.0.0:12801 by default, and provides an HTTP Endpoint /aws/firehose/metrics that follows Amazon Kinesis Data Firehose Delivery Stream HTTP Endpoint Delivery Specifications You could leverage the receiver to collect AWS CloudWatch metrics, and analysis it through MAL as the receiver bases on OpenTelemetry receiver\nSetup(S3 example)  Create CloudWatch metrics configuration for S3 (refer to S3 CloudWatch metrics) Stream CloudWatch metrics to AWS Kinesis Data Firehose delivery stream by CloudWatch metrics stream Specify AWS Kinesis Data Firehose delivery stream HTTP Endpoint (refer to Choose HTTP Endpoint for Your Destination)  Usually, the AWS CloudWatch metrics process flow with OAP is as follows:\nCloudWatch metrics with S3 --\u0026gt; CloudWatch Metric Stream (OpenTelemetry formart) --\u0026gt; Kinesis Data Firehose Delivery Stream --\u0026gt; AWS Firehose receiver(OAP) --\u0026gt; OpenTelemetry receiver(OAP) The following blogs demonstrate complete setup process for AWS S3 and API Gateway:\n Monitoring DynamoDB with SkyWalking Monitoring AWS EKS and S3 with SkyWalking  Supported metrics    Description Configuration File Data Source     Metrics of AWS Cloud S3 otel-rules/aws-s3/s3-service.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS DynamoDB otel-rules/aws-dynamodb/dynamodb-service.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS DynamoDB otel-rules/aws-dynamodb/dynamodb-endpoint.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS API Gateway otel-rules/aws-gateway/gateway-service.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS API Gateway otel-rules/aws-gateway/gateway-endpoint.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver    Notice  Only OpenTelemetry format is supported (refer to Metric streams output formats) According to HTTPS requirement by AWS Firehose(refer to Amazon Kinesis Data Firehose Delivery Stream HTTP Endpoint Delivery Specifications, users have two options   A proxy(e.g. Nginx, Envoy) is required in front of OAP\u0026rsquo;s Firehose receiver to accept HTTPS requests from AWS Firehose through port 443. (Recommended based on the general security policy) Set aws-firehose/enableTLS=true with suitable cert/key files through aws-firehose/tlsKeyPath and aws-firehose/tlsCertChainPath at OAP side to accept requests from firehose directly.  AWS Firehose receiver support setting accessKey for Kinesis Data Firehose, please refer to configuration vocabulary  ","excerpt":"AWS Firehose receiver AWS Firehose receiver listens on 0.0.0.0:12801 by default, and provides an …","ref":"/docs/main/next/en/setup/backend/aws-firehose-receiver/","title":"AWS Firehose receiver"},{"body":"AWS Firehose receiver AWS Firehose receiver listens on 0.0.0.0:12801 by default, and provides an HTTP Endpoint /aws/firehose/metrics that follows Amazon Kinesis Data Firehose Delivery Stream HTTP Endpoint Delivery Specifications You could leverage the receiver to collect AWS CloudWatch metrics, and analysis it through MAL as the receiver bases on OpenTelemetry receiver\nSetup(S3 example)  Create CloudWatch metrics configuration for S3 (refer to S3 CloudWatch metrics) Stream CloudWatch metrics to AWS Kinesis Data Firehose delivery stream by CloudWatch metrics stream Specify AWS Kinesis Data Firehose delivery stream HTTP Endpoint (refer to Choose HTTP Endpoint for Your Destination)  Usually, the AWS CloudWatch metrics process flow with OAP is as follows:\nCloudWatch metrics with S3 --\u0026gt; CloudWatch Metric Stream (OpenTelemetry formart) --\u0026gt; Kinesis Data Firehose Delivery Stream --\u0026gt; AWS Firehose receiver(OAP) --\u0026gt; OpenTelemetry receiver(OAP) Supported metrics    Description Configuration File Data Source     Metrics of AWS Cloud S3 otel-rules/aws-s3/s3-service.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS DynamoDB otel-rules/aws-dynamodb/dynamodb-service.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS DynamoDB otel-rules/aws-dynamodb/dynamodb-endpoint.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver    Notice  Only OpenTelemetry format is supported (refer to Metric streams output formats) A proxy(e.g. Nginx, Envoy) is required in front of OAP\u0026rsquo;s Firehose receiver to accept HTTPS requests from AWS Firehose through port 443 (refer to Amazon Kinesis Data Firehose Delivery Stream HTTP Endpoint Delivery Specifications. AWS Firehose receiver support setting accessKey for Kinesis Data Firehose, please refer to configuration vocabulary  ","excerpt":"AWS Firehose receiver AWS Firehose receiver listens on 0.0.0.0:12801 by default, and provides an …","ref":"/docs/main/v9.4.0/en/setup/backend/aws-firehose-receiver/","title":"AWS Firehose receiver"},{"body":"AWS Firehose receiver AWS Firehose receiver listens on 0.0.0.0:12801 by default, and provides an HTTP Endpoint /aws/firehose/metrics that follows Amazon Kinesis Data Firehose Delivery Stream HTTP Endpoint Delivery Specifications You could leverage the receiver to collect AWS CloudWatch metrics, and analysis it through MAL as the receiver bases on OpenTelemetry receiver\nSetup(S3 example)  Create CloudWatch metrics configuration for S3 (refer to S3 CloudWatch metrics) Stream CloudWatch metrics to AWS Kinesis Data Firehose delivery stream by CloudWatch metrics stream Specify AWS Kinesis Data Firehose delivery stream HTTP Endpoint (refer to Choose HTTP Endpoint for Your Destination)  Usually, the AWS CloudWatch metrics process flow with OAP is as follows:\nCloudWatch metrics with S3 --\u0026gt; CloudWatch Metric Stream (OpenTelemetry formart) --\u0026gt; Kinesis Data Firehose Delivery Stream --\u0026gt; AWS Firehose receiver(OAP) --\u0026gt; OpenTelemetry receiver(OAP) The following blogs demonstrate complete setup process for AWS S3 and API Gateway:\n Monitoring DynamoDB with SkyWalking Monitoring AWS EKS and S3 with SkyWalking  Supported metrics    Description Configuration File Data Source     Metrics of AWS Cloud S3 otel-rules/aws-s3/s3-service.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS DynamoDB otel-rules/aws-dynamodb/dynamodb-service.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS DynamoDB otel-rules/aws-dynamodb/dynamodb-endpoint.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS API Gateway otel-rules/aws-gateway/gateway-service.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS API Gateway otel-rules/aws-gateway/gateway-endpoint.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver    Notice  Only OpenTelemetry format is supported (refer to Metric streams output formats) According to HTTPS requirement by AWS Firehose(refer to Amazon Kinesis Data Firehose Delivery Stream HTTP Endpoint Delivery Specifications, users have two options   A proxy(e.g. Nginx, Envoy) is required in front of OAP\u0026rsquo;s Firehose receiver to accept HTTPS requests from AWS Firehose through port 443. (Recommended based on the general security policy) Set aws-firehose/enableTLS=true with suitable cert/key files through aws-firehose/tlsKeyPath and aws-firehose/tlsCertChainPath at OAP side to accept requests from firehose directly.  AWS Firehose receiver support setting accessKey for Kinesis Data Firehose, please refer to configuration vocabulary  ","excerpt":"AWS Firehose receiver AWS Firehose receiver listens on 0.0.0.0:12801 by default, and provides an …","ref":"/docs/main/v9.5.0/en/setup/backend/aws-firehose-receiver/","title":"AWS Firehose receiver"},{"body":"AWS Firehose receiver AWS Firehose receiver listens on 0.0.0.0:12801 by default, and provides an HTTP Endpoint /aws/firehose/metrics that follows Amazon Kinesis Data Firehose Delivery Stream HTTP Endpoint Delivery Specifications You could leverage the receiver to collect AWS CloudWatch metrics, and analysis it through MAL as the receiver bases on OpenTelemetry receiver\nSetup(S3 example)  Create CloudWatch metrics configuration for S3 (refer to S3 CloudWatch metrics) Stream CloudWatch metrics to AWS Kinesis Data Firehose delivery stream by CloudWatch metrics stream Specify AWS Kinesis Data Firehose delivery stream HTTP Endpoint (refer to Choose HTTP Endpoint for Your Destination)  Usually, the AWS CloudWatch metrics process flow with OAP is as follows:\nCloudWatch metrics with S3 --\u0026gt; CloudWatch Metric Stream (OpenTelemetry formart) --\u0026gt; Kinesis Data Firehose Delivery Stream --\u0026gt; AWS Firehose receiver(OAP) --\u0026gt; OpenTelemetry receiver(OAP) The following blogs demonstrate complete setup process for AWS S3 and API Gateway:\n Monitoring DynamoDB with SkyWalking Monitoring AWS EKS and S3 with SkyWalking  Supported metrics    Description Configuration File Data Source     Metrics of AWS Cloud S3 otel-rules/aws-s3/s3-service.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS DynamoDB otel-rules/aws-dynamodb/dynamodb-service.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS DynamoDB otel-rules/aws-dynamodb/dynamodb-endpoint.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS API Gateway otel-rules/aws-gateway/gateway-service.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS API Gateway otel-rules/aws-gateway/gateway-endpoint.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver    Notice  Only OpenTelemetry format is supported (refer to Metric streams output formats) According to HTTPS requirement by AWS Firehose(refer to Amazon Kinesis Data Firehose Delivery Stream HTTP Endpoint Delivery Specifications, users have two options   A proxy(e.g. Nginx, Envoy) is required in front of OAP\u0026rsquo;s Firehose receiver to accept HTTPS requests from AWS Firehose through port 443. (Recommended based on the general security policy) Set aws-firehose/enableTLS=true with suitable cert/key files through aws-firehose/tlsKeyPath and aws-firehose/tlsCertChainPath at OAP side to accept requests from firehose directly.  AWS Firehose receiver support setting accessKey for Kinesis Data Firehose, please refer to configuration vocabulary  ","excerpt":"AWS Firehose receiver AWS Firehose receiver listens on 0.0.0.0:12801 by default, and provides an …","ref":"/docs/main/v9.6.0/en/setup/backend/aws-firehose-receiver/","title":"AWS Firehose receiver"},{"body":"AWS Firehose receiver AWS Firehose receiver listens on 0.0.0.0:12801 by default, and provides an HTTP Endpoint /aws/firehose/metrics that follows Amazon Kinesis Data Firehose Delivery Stream HTTP Endpoint Delivery Specifications You could leverage the receiver to collect AWS CloudWatch metrics, and analysis it through MAL as the receiver bases on OpenTelemetry receiver\nSetup(S3 example)  Create CloudWatch metrics configuration for S3 (refer to S3 CloudWatch metrics) Stream CloudWatch metrics to AWS Kinesis Data Firehose delivery stream by CloudWatch metrics stream Specify AWS Kinesis Data Firehose delivery stream HTTP Endpoint (refer to Choose HTTP Endpoint for Your Destination)  Usually, the AWS CloudWatch metrics process flow with OAP is as follows:\nCloudWatch metrics with S3 --\u0026gt; CloudWatch Metric Stream (OpenTelemetry formart) --\u0026gt; Kinesis Data Firehose Delivery Stream --\u0026gt; AWS Firehose receiver(OAP) --\u0026gt; OpenTelemetry receiver(OAP) The following blogs demonstrate complete setup process for AWS S3 and API Gateway:\n Monitoring DynamoDB with SkyWalking Monitoring AWS EKS and S3 with SkyWalking  Supported metrics    Description Configuration File Data Source     Metrics of AWS Cloud S3 otel-rules/aws-s3/s3-service.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS DynamoDB otel-rules/aws-dynamodb/dynamodb-service.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS DynamoDB otel-rules/aws-dynamodb/dynamodb-endpoint.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS API Gateway otel-rules/aws-gateway/gateway-service.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS API Gateway otel-rules/aws-gateway/gateway-endpoint.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver    Notice  Only OpenTelemetry format is supported (refer to Metric streams output formats) According to HTTPS requirement by AWS Firehose(refer to Amazon Kinesis Data Firehose Delivery Stream HTTP Endpoint Delivery Specifications, users have two options   A proxy(e.g. Nginx, Envoy) is required in front of OAP\u0026rsquo;s Firehose receiver to accept HTTPS requests from AWS Firehose through port 443. (Recommended based on the general security policy) Set aws-firehose/enableTLS=true with suitable cert/key files through aws-firehose/tlsKeyPath and aws-firehose/tlsCertChainPath at OAP side to accept requests from firehose directly.  AWS Firehose receiver support setting accessKey for Kinesis Data Firehose, please refer to configuration vocabulary  ","excerpt":"AWS Firehose receiver AWS Firehose receiver listens on 0.0.0.0:12801 by default, and provides an …","ref":"/docs/main/v9.7.0/en/setup/backend/aws-firehose-receiver/","title":"AWS Firehose receiver"},{"body":"Backend Load Balancer When setting the Agent or Envoy to connect to the OAP server directly as in default, the OAP server cluster would face the problem of load imbalance. This issue would be severe in high-traffic load scenarios. SkyWalking Satellite is recommended to be used as a native gateway proxy to provide load balancing capabilities for data content before the data from Agent/ Envoy reaches the OAP. The major difference between Satellite and other general wide used proxy(s), like Envoy, is that it would route the data accordingly to contents rather than connection, as gRPC streaming is used widely in SkyWalking.\nFollow instructions in the Setup SkyWalking Satellite to deploy Satellite and connect your application to the Satellite.\nScaling with Apache SkyWalking blog introduces the theory and technology details on how to set up a load balancer for the OAP cluster.\n","excerpt":"Backend Load Balancer When setting the Agent or Envoy to connect to the OAP server directly as in …","ref":"/docs/main/latest/en/setup/backend/backend-load-balancer/","title":"Backend Load Balancer"},{"body":"Backend Load Balancer When setting the Agent or Envoy to connect to the OAP server directly by default, the OAP server cluster would face the problem of load imbalance. This issue becomes severe in high-traffic load scenarios. In this doc, we will introduce two means to solve the problem.\nSkyWalking Satellite Project SkyWalking Satellite is recommended to be used as a native gateway proxy to provide load balancing capabilities for data content before the data from Agent/ Envoy reaches the OAP. The major difference between Satellite and other general wide used proxy(s), like Envoy, is that it would route the data accordingly to contents rather than connection, as gRPC streaming is used widely in SkyWalking.\nFollow instructions in the Setup SkyWalking Satellite to deploy Satellite and connect your application to the Satellite.\nScaling with Apache SkyWalking blog introduces the theory and technology details on how to set up a load balancer for the OAP cluster.\nEnvoy Filter to Limit Connections Per OAP Instance If you don\u0026rsquo;t want to deploy skywalking-satellite, you can enable Istio sidecar injection for SkyWalking OAP Pods,\nkubectl label namespace $SKYWALKING_NAMESPACE istio-injection=enabled kubectl -n $SKYWALKING_NAMESPACE rollout restart -l app=skywalking,component=oap and apply an EnvoyFilter to limit the connections per OAP instance, so that each of the OAP instance can have similar amount of gRPC connections.\nBefore that, you need to calculate the number of connections for each OAP instance as follows:\nNUMBER_OF_SERVICE_PODS=\u0026lt;the-number-of-service-pods-that-are-monitored-by-skywalking\u0026gt; # Each service Pod has 2 connections to OAP NUMBER_OF_TOTAL_CONNECTIONS=$((NUMBER_OF_SERVICE_PODS * 2)) # Divide the total connections by the replicas of OAP NUMBER_OF_CONNECTIONS_PER_OAP=$((NUMBER_OF_TOTAL_CONNECTIONS / $NUMBER_OF_OAP_REPLICAS)) And you can apply an EnvoyFilter to limit connections:\nkubectl -n $SKYWALKING_NAMESPACE apply -f - \u0026lt;\u0026lt;EOF apiVersion: networking.istio.io/v1alpha3 kind: EnvoyFilter metadata: name: oap-limit-connections namespace: istio-system spec: configPatches: - applyTo: NETWORK_FILTER match: context: ANY listener: filterChain: filter: name: envoy.filters.network.http_connection_manager portNumber: 11800 patch: operation: INSERT_BEFORE value: name: envoy.filters.network.ConnectionLimit typed_config: \u0026#39;@type\u0026#39;: type.googleapis.com/envoy.extensions.filters.network.connection_limit.v3.ConnectionLimit max_connections: $NUMBER_OF_CONNECTIONS_PER_OAP stat_prefix: envoy_filters_network_connection_limit workloadSelector: labels: app: oap EOF By this approach, we can limit the connections to port 11800 per OAP instance, but there is another corner case when the amount of service Pods are huge. Because the limiting is on connection level, and each service Pod has 2 connections to OAP port 11800, one for Envoy ALS to send access log, the other one for Envoy metrics, and because the traffic of the 2 connections can vary very much, if the number of service Pods is large enough, an extreme case might happen that one OAP instance is serving all Envoy metrics connections and the other OAP instance is serving all Envoy ALS connections, which in turn might be unbalanced again, to solve this, we can split the ALS connections to a dedicated port, and limit the connections to that port only.\nYou can set the environment variable SW_ALS_GRPC_PORT to a port number other than 0 when deploying skywalking, and limit connections to that port only in the EnvoyFilter:\nexport SW_ALS_GRPC_PORT=11802 kubectl -n $SKYWALKING_NAMESPACE apply -f - \u0026lt;\u0026lt;EOF apiVersion: networking.istio.io/v1alpha3 kind: EnvoyFilter metadata: name: oap-limit-connections namespace: istio-system spec: configPatches: - applyTo: NETWORK_FILTER match: context: ANY listener: filterChain: filter: name: envoy.filters.network.http_connection_manager portNumber: $SW_ALS_GRPC_PORT patch: operation: INSERT_BEFORE value: name: envoy.filters.network.ConnectionLimit typed_config: \u0026#39;@type\u0026#39;: type.googleapis.com/envoy.extensions.filters.network.connection_limit.v3.ConnectionLimit max_connections: $NUMBER_OF_CONNECTIONS_PER_OAP stat_prefix: envoy_filters_network_connection_limit workloadSelector: labels: app: oap EOF ","excerpt":"Backend Load Balancer When setting the Agent or Envoy to connect to the OAP server directly by …","ref":"/docs/main/next/en/setup/backend/backend-load-balancer/","title":"Backend Load Balancer"},{"body":"Backend Load Balancer When set the Agent or Envoy connecting to OAP server directly as in default, OAP server cluster would face the problem of OAP load imbalance. This issue would be very serious in high traffic load scenarios. Satellite is recommended to be used as a native gateway proxy, to provide load balancing capabilities for data content before the data from Agent/Envoy reaches the OAP. The major difference between Satellite and other general wide used proxy(s), like Envoy, is that, Satellite would route the data accordingly to contents rather than connection, as gRPC streaming is used widely in SkyWalking.\nFollow instructions in the Setup SkyWalking Satellite to deploy Satellite and connect your application to the satellite.\nScaling with Apache SkyWalking blog introduces the theory and technology details how to set up load balancer for the OAP cluster.\n","excerpt":"Backend Load Balancer When set the Agent or Envoy connecting to OAP server directly as in default, …","ref":"/docs/main/v9.0.0/en/setup/backend/backend-load-balancer/","title":"Backend Load Balancer"},{"body":"Backend Load Balancer When setting the Agent or Envoy to connect to the OAP server directly as in default, the OAP server cluster would face the problem of load imbalance. This issue would be severe in high-traffic load scenarios. SkyWalking Satellite is recommended to be used as a native gateway proxy to provide load balancing capabilities for data content before the data from Agent/ Envoy reaches the OAP. The major difference between Satellite and other general wide used proxy(s), like Envoy, is that it would route the data accordingly to contents rather than connection, as gRPC streaming is used widely in SkyWalking.\nFollow instructions in the Setup SkyWalking Satellite to deploy Satellite and connect your application to the Satellite.\nScaling with Apache SkyWalking blog introduces the theory and technology details on how to set up a load balancer for the OAP cluster.\n","excerpt":"Backend Load Balancer When setting the Agent or Envoy to connect to the OAP server directly as in …","ref":"/docs/main/v9.1.0/en/setup/backend/backend-load-balancer/","title":"Backend Load Balancer"},{"body":"Backend Load Balancer When setting the Agent or Envoy to connect to the OAP server directly as in default, the OAP server cluster would face the problem of load imbalance. This issue would be severe in high-traffic load scenarios. SkyWalking Satellite is recommended to be used as a native gateway proxy to provide load balancing capabilities for data content before the data from Agent/ Envoy reaches the OAP. The major difference between Satellite and other general wide used proxy(s), like Envoy, is that it would route the data accordingly to contents rather than connection, as gRPC streaming is used widely in SkyWalking.\nFollow instructions in the Setup SkyWalking Satellite to deploy Satellite and connect your application to the Satellite.\nScaling with Apache SkyWalking blog introduces the theory and technology details on how to set up a load balancer for the OAP cluster.\n","excerpt":"Backend Load Balancer When setting the Agent or Envoy to connect to the OAP server directly as in …","ref":"/docs/main/v9.2.0/en/setup/backend/backend-load-balancer/","title":"Backend Load Balancer"},{"body":"Backend Load Balancer When setting the Agent or Envoy to connect to the OAP server directly as in default, the OAP server cluster would face the problem of load imbalance. This issue would be severe in high-traffic load scenarios. SkyWalking Satellite is recommended to be used as a native gateway proxy to provide load balancing capabilities for data content before the data from Agent/ Envoy reaches the OAP. The major difference between Satellite and other general wide used proxy(s), like Envoy, is that it would route the data accordingly to contents rather than connection, as gRPC streaming is used widely in SkyWalking.\nFollow instructions in the Setup SkyWalking Satellite to deploy Satellite and connect your application to the Satellite.\nScaling with Apache SkyWalking blog introduces the theory and technology details on how to set up a load balancer for the OAP cluster.\n","excerpt":"Backend Load Balancer When setting the Agent or Envoy to connect to the OAP server directly as in …","ref":"/docs/main/v9.3.0/en/setup/backend/backend-load-balancer/","title":"Backend Load Balancer"},{"body":"Backend Load Balancer When setting the Agent or Envoy to connect to the OAP server directly as in default, the OAP server cluster would face the problem of load imbalance. This issue would be severe in high-traffic load scenarios. SkyWalking Satellite is recommended to be used as a native gateway proxy to provide load balancing capabilities for data content before the data from Agent/ Envoy reaches the OAP. The major difference between Satellite and other general wide used proxy(s), like Envoy, is that it would route the data accordingly to contents rather than connection, as gRPC streaming is used widely in SkyWalking.\nFollow instructions in the Setup SkyWalking Satellite to deploy Satellite and connect your application to the Satellite.\nScaling with Apache SkyWalking blog introduces the theory and technology details on how to set up a load balancer for the OAP cluster.\n","excerpt":"Backend Load Balancer When setting the Agent or Envoy to connect to the OAP server directly as in …","ref":"/docs/main/v9.4.0/en/setup/backend/backend-load-balancer/","title":"Backend Load Balancer"},{"body":"Backend Load Balancer When setting the Agent or Envoy to connect to the OAP server directly as in default, the OAP server cluster would face the problem of load imbalance. This issue would be severe in high-traffic load scenarios. SkyWalking Satellite is recommended to be used as a native gateway proxy to provide load balancing capabilities for data content before the data from Agent/ Envoy reaches the OAP. The major difference between Satellite and other general wide used proxy(s), like Envoy, is that it would route the data accordingly to contents rather than connection, as gRPC streaming is used widely in SkyWalking.\nFollow instructions in the Setup SkyWalking Satellite to deploy Satellite and connect your application to the Satellite.\nScaling with Apache SkyWalking blog introduces the theory and technology details on how to set up a load balancer for the OAP cluster.\n","excerpt":"Backend Load Balancer When setting the Agent or Envoy to connect to the OAP server directly as in …","ref":"/docs/main/v9.5.0/en/setup/backend/backend-load-balancer/","title":"Backend Load Balancer"},{"body":"Backend Load Balancer When setting the Agent or Envoy to connect to the OAP server directly as in default, the OAP server cluster would face the problem of load imbalance. This issue would be severe in high-traffic load scenarios. SkyWalking Satellite is recommended to be used as a native gateway proxy to provide load balancing capabilities for data content before the data from Agent/ Envoy reaches the OAP. The major difference between Satellite and other general wide used proxy(s), like Envoy, is that it would route the data accordingly to contents rather than connection, as gRPC streaming is used widely in SkyWalking.\nFollow instructions in the Setup SkyWalking Satellite to deploy Satellite and connect your application to the Satellite.\nScaling with Apache SkyWalking blog introduces the theory and technology details on how to set up a load balancer for the OAP cluster.\n","excerpt":"Backend Load Balancer When setting the Agent or Envoy to connect to the OAP server directly as in …","ref":"/docs/main/v9.6.0/en/setup/backend/backend-load-balancer/","title":"Backend Load Balancer"},{"body":"Backend Load Balancer When setting the Agent or Envoy to connect to the OAP server directly as in default, the OAP server cluster would face the problem of load imbalance. This issue would be severe in high-traffic load scenarios. SkyWalking Satellite is recommended to be used as a native gateway proxy to provide load balancing capabilities for data content before the data from Agent/ Envoy reaches the OAP. The major difference between Satellite and other general wide used proxy(s), like Envoy, is that it would route the data accordingly to contents rather than connection, as gRPC streaming is used widely in SkyWalking.\nFollow instructions in the Setup SkyWalking Satellite to deploy Satellite and connect your application to the Satellite.\nScaling with Apache SkyWalking blog introduces the theory and technology details on how to set up a load balancer for the OAP cluster.\n","excerpt":"Backend Load Balancer When setting the Agent or Envoy to connect to the OAP server directly as in …","ref":"/docs/main/v9.7.0/en/setup/backend/backend-load-balancer/","title":"Backend Load Balancer"},{"body":"Backend setup SkyWalking\u0026rsquo;s backend distribution package consists of the following parts:\n  bin/cmd scripts: Located in the /bin folder. Includes startup Linux shell and Windows cmd scripts for the backend server and UI startup.\n  Backend config: Located in the /config folder. Includes settings files of the backend, which are:\n application.yml log4j.xml alarm-settings.yml    Libraries of backend: Located in the /oap-libs folder. All dependencies of the backend can be found there.\n  Webapp env: Located in the webapp folder. UI frontend jar file can be found here, together with its webapp.yml setting file.\n  Requirements and default settings Requirement: JDK11 or JDK17.\nBefore you begin, you should understand that the main purpose of the following quickstart is to help you obtain a basic configuration for previews/demos. Performance and long-term running are NOT among the purposes of the quickstart.\nFor production/QA/tests environments, see Backend and UI deployment documents.\nYou can use bin/startup.sh (or cmd) to start up the backend and UI with their default settings, set out as follows:\n Backend storage uses H2 by default (for an easier start) Backend listens on 0.0.0.0/11800 for gRPC APIs and 0.0.0.0/12800 for HTTP REST APIs.  In Java, DotNetCore, Node.js, and Istio agents/probes, you should set the gRPC service address to ip/host:11800, and IP/host should be where your backend is.\n UI listens on 8080 port and request 127.0.0.1/12800 to run a GraphQL query.  Interaction Before deploying Skywalking in your distributed environment, you should learn about how agents/probes, the backend, and the UI communicate with each other:\n Most native agents and probes, including language-based or mesh probes, use gRPC service (core/default/gRPC* in application.yml) to report data to the backend. Also, the REST service is supported in JSON format. UI uses GraphQL (HTTP) query to access the backend, also in REST service (core/default/rest* in application.yml).  Startup script The default startup scripts are /bin/oapService.sh(.bat). Read the start up mode document to learn other ways to start up the backend.\nKey Parameters In The Booting Logs After the OAP booting process completed, you should be able to see all important parameters listed in the logs.\n2023-11-06 21:10:45,988 org.apache.skywalking.oap.server.starter.OAPServerBootstrap 67 [main] INFO [] - The key booting parameters of Apache SkyWalking OAP are listed as following. Running Mode | null TTL.metrics | 7 TTL.record | 3 Version | 9.7.0-SNAPSHOT-92af797 module.agent-analyzer.provider | default module.ai-pipeline.provider | default module.alarm.provider | default module.aws-firehose.provider | default module.cluster.provider | standalone module.configuration-discovery.provider | default module.configuration.provider | none module.core.provider | default module.envoy-metric.provider | default module.event-analyzer.provider | default module.log-analyzer.provider | default module.logql.provider | default module.promql.provider | default module.query.provider | graphql module.receiver-browser.provider | default module.receiver-clr.provider | default module.receiver-ebpf.provider | default module.receiver-event.provider | default module.receiver-jvm.provider | default module.receiver-log.provider | default module.receiver-meter.provider | default module.receiver-otel.provider | default module.receiver-profile.provider | default module.receiver-register.provider | default module.receiver-sharing-server.provider | default module.receiver-telegraf.provider | default module.receiver-trace.provider | default module.service-mesh.provider | default module.storage.provider | h2 module.telemetry.provider | none oap.external.grpc.host | 0.0.0.0 oap.external.grpc.port | 11800 oap.external.http.host | 0.0.0.0 oap.external.http.port | 12800 oap.internal.comm.host | 0.0.0.0 oap.internal.comm.port | 11800  oap.external.grpc.host:oap.external.grpc.port is for reporting telemetry data through gRPC channel, including native agents, OTEL. oap.external.http.host:oap.external.http.port is for reporting telemetry data through HTTP channel and query, including native GraphQL(UI), PromQL, LogQL. oap.internal.comm.host:oap.internal.comm.port is for OAP cluster internal communication via gRPC/HTTP2 protocol. The default host(0.0.0.0) is not suitable for the cluster mode, unless in k8s deployment. Please read Cluster Doc to understand how to set up the SkyWalking backend in the cluster mode.  application.yml SkyWalking backend startup behaviours are driven by config/application.yml. Understanding the settings file will help you read this document.\nThe core concept behind this setting file is that the SkyWalking collector is based on a pure modular design. End-users can switch or assemble the collector features according to their unique requirements.\nIn application.yml, there are three levels.\n Level 1: Module name. This means that this module is active in running mode. Level 2: Provider option list and provider selector. Available providers are listed here with a selector to indicate which one will actually take effect. If only one provider is listed, the selector is optional and can be omitted. Level 3. Settings of the chosen provider.  Example:\nstorage:selector:mysql# the mysql storage will actually be activated, while the h2 storage takes no effecth2:properties:jdbcUrl:${SW_STORAGE_H2_URL:jdbc:h2:mem:skywalking-oap-db;DB_CLOSE_DELAY=-1;DATABASE_TO_UPPER=FALSE}dataSource.user:${SW_STORAGE_H2_USER:sa}metadataQueryMaxSize:${SW_STORAGE_H2_QUERY_MAX_SIZE:5000}mysql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:3306/swtest?allowMultiQueries=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root@1234}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}# other configurations storage is the module. selector selects one out of all providers listed below. The unselected ones take no effect as if they were deleted. default is the default implementor of the core module. driver, url, \u0026hellip; metadataQueryMaxSize are all setting items of the implementor.  At the same time, there are two types of modules: required and optional. The required modules provide the skeleton of the backend. Even though their modular design supports pluggability, removing those modules does not serve any purpose. For optional modules, some of them have a provider implementation called none, meaning that it only provides a shell with no actual logic, typically such as telemetry. Setting - to the selector means that this whole module will be excluded at runtime. We advise against changing the APIs of those modules unless you understand the SkyWalking project and its codes very well.\nThe required modules are listed here:\n Core. Provides the basic and major skeleton of all data analysis and stream dispatch. Cluster. Manages multiple backend instances in a cluster, which could provide high throughput process capabilities. See Cluster Management for more details. Storage. Makes the analysis result persistent. See Choose storage for more details Query. Provides query interfaces to UI. Receiver and Fetcher. Expose the service to the agents and probes, or read telemetry data from a channel.  FAQs Why do we need to set the timezone? And when do we do it? SkyWalking provides downsampling time-series metrics features. Query and store at each time dimension (minute, hour, day, month metrics indexes) related to timezone when time formatting.\nFor example, metrics time will be formatted like yyyyMMddHHmm in minute dimension metrics, which is timezone-related.\nBy default, SkyWalking\u0026rsquo;s OAP backend chooses the OS default timezone. Please follow the Java and OS documents if you want to override the timezone.\n","excerpt":"Backend setup SkyWalking\u0026rsquo;s backend distribution package consists of the following parts: …","ref":"/docs/main/latest/en/setup/backend/backend-setup/","title":"Backend setup"},{"body":"Backend setup SkyWalking\u0026rsquo;s backend distribution package consists of the following parts:\n  bin/cmd scripts: Located in the /bin folder. Includes startup Linux shell and Windows cmd scripts for the backend server and UI startup.\n  Backend config: Located in the /config folder. Includes settings files of the backend, which are:\n application.yml log4j.xml alarm-settings.yml    Libraries of backend: Located in the /oap-libs folder. All dependencies of the backend can be found there.\n  Webapp env: Located in the webapp folder. UI frontend jar file can be found here, together with its webapp.yml setting file.\n  Requirements and default settings Requirement: Java 11/17/21.\nBefore you begin, you should understand that the main purpose of the following quickstart is to help you obtain a basic configuration for previews/demos. Performance and long-term running are NOT among the purposes of the quickstart.\nFor production/QA/tests environments, see Backend and UI deployment documents.\nYou can use bin/startup.sh (or cmd) to start up the backend and UI with their default settings, set out as follows:\n Backend storage uses H2 by default (for an easier start) Backend listens on 0.0.0.0/11800 for gRPC APIs and 0.0.0.0/12800 for HTTP REST APIs.  In Java, DotNetCore, Node.js, and Istio agents/probes, you should set the gRPC service address to ip/host:11800, and IP/host should be where your backend is.\n UI listens on 8080 port and request 127.0.0.1/12800 to run a GraphQL query.  Interaction Before deploying Skywalking in your distributed environment, you should learn about how agents/probes, the backend, and the UI communicate with each other:\n Most native agents and probes, including language-based or mesh probes, use gRPC service (core/default/gRPC* in application.yml) to report data to the backend. Also, the REST service is supported in JSON format. UI uses GraphQL (HTTP) query to access the backend, also in REST service (core/default/rest* in application.yml).  Startup script The default startup scripts are /bin/oapService.sh(.bat). Read the start up mode document to learn other ways to start up the backend.\nKey Parameters In The Booting Logs After the OAP booting process completed, you should be able to see all important parameters listed in the logs.\n2023-11-06 21:10:45,988 org.apache.skywalking.oap.server.starter.OAPServerBootstrap 67 [main] INFO [] - The key booting parameters of Apache SkyWalking OAP are listed as following. Running Mode | null TTL.metrics | 7 TTL.record | 3 Version | 9.7.0-SNAPSHOT-92af797 module.agent-analyzer.provider | default module.ai-pipeline.provider | default module.alarm.provider | default module.aws-firehose.provider | default module.cluster.provider | standalone module.configuration-discovery.provider | default module.configuration.provider | none module.core.provider | default module.envoy-metric.provider | default module.event-analyzer.provider | default module.log-analyzer.provider | default module.logql.provider | default module.promql.provider | default module.query.provider | graphql module.receiver-browser.provider | default module.receiver-clr.provider | default module.receiver-ebpf.provider | default module.receiver-event.provider | default module.receiver-jvm.provider | default module.receiver-log.provider | default module.receiver-meter.provider | default module.receiver-otel.provider | default module.receiver-profile.provider | default module.receiver-register.provider | default module.receiver-sharing-server.provider | default module.receiver-telegraf.provider | default module.receiver-trace.provider | default module.service-mesh.provider | default module.storage.provider | h2 module.telemetry.provider | none oap.external.grpc.host | 0.0.0.0 oap.external.grpc.port | 11800 oap.external.http.host | 0.0.0.0 oap.external.http.port | 12800 oap.internal.comm.host | 0.0.0.0 oap.internal.comm.port | 11800  oap.external.grpc.host:oap.external.grpc.port is for reporting telemetry data through gRPC channel, including native agents, OTEL. oap.external.http.host:oap.external.http.port is for reporting telemetry data through HTTP channel and query, including native GraphQL(UI), PromQL, LogQL. oap.internal.comm.host:oap.internal.comm.port is for OAP cluster internal communication via gRPC/HTTP2 protocol. The default host(0.0.0.0) is not suitable for the cluster mode, unless in k8s deployment. Please read Cluster Doc to understand how to set up the SkyWalking backend in the cluster mode.  application.yml SkyWalking backend startup behaviours are driven by config/application.yml. Understanding the settings file will help you read this document.\nThe core concept behind this setting file is that the SkyWalking collector is based on a pure modular design. End-users can switch or assemble the collector features according to their unique requirements.\nIn application.yml, there are three levels.\n Level 1: Module name. This means that this module is active in running mode. Level 2: Provider option list and provider selector. Available providers are listed here with a selector to indicate which one will actually take effect. If only one provider is listed, the selector is optional and can be omitted. Level 3. Settings of the chosen provider.  Example:\nstorage:selector:mysql# the mysql storage will actually be activated, while the h2 storage takes no effecth2:properties:jdbcUrl:${SW_STORAGE_H2_URL:jdbc:h2:mem:skywalking-oap-db;DB_CLOSE_DELAY=-1;DATABASE_TO_UPPER=FALSE}dataSource.user:${SW_STORAGE_H2_USER:sa}metadataQueryMaxSize:${SW_STORAGE_H2_QUERY_MAX_SIZE:5000}mysql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:3306/swtest?allowMultiQueries=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root@1234}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}# other configurations storage is the module. selector selects one out of all providers listed below. The unselected ones take no effect as if they were deleted. default is the default implementor of the core module. driver, url, \u0026hellip; metadataQueryMaxSize are all setting items of the implementor.  At the same time, there are two types of modules: required and optional. The required modules provide the skeleton of the backend. Even though their modular design supports pluggability, removing those modules does not serve any purpose. For optional modules, some of them have a provider implementation called none, meaning that it only provides a shell with no actual logic, typically such as telemetry. Setting - to the selector means that this whole module will be excluded at runtime. We advise against changing the APIs of those modules unless you understand the SkyWalking project and its codes very well.\nThe required modules are listed here:\n Core. Provides the basic and major skeleton of all data analysis and stream dispatch. Cluster. Manages multiple backend instances in a cluster, which could provide high throughput process capabilities. See Cluster Management for more details. Storage. Makes the analysis result persistent. See Choose storage for more details Query. Provides query interfaces to UI. Receiver and Fetcher. Expose the service to the agents and probes, or read telemetry data from a channel.  FAQs Why do we need to set the timezone? And when do we do it? SkyWalking provides downsampling time-series metrics features. Query and store at each time dimension (minute, hour, day, month metrics indexes) related to timezone when time formatting.\nFor example, metrics time will be formatted like yyyyMMddHHmm in minute dimension metrics, which is timezone-related.\nBy default, SkyWalking\u0026rsquo;s OAP backend chooses the OS default timezone. Please follow the Java and OS documents if you want to override the timezone.\n","excerpt":"Backend setup SkyWalking\u0026rsquo;s backend distribution package consists of the following parts: …","ref":"/docs/main/next/en/setup/backend/backend-setup/","title":"Backend setup"},{"body":"Backend setup SkyWalking\u0026rsquo;s backend distribution package consists of the following parts:\n  bin/cmd scripts: Located in the /bin folder. Includes startup linux shell and Windows cmd scripts for the backend server and UI startup.\n  Backend config: Located in the /config folder. Includes settings files of the backend, which are:\n application.yml log4j.xml alarm-settings.yml    Libraries of backend: Located in the /oap-libs folder. All dependencies of the backend can be found in it.\n  Webapp env: Located in the webapp folder. UI frontend jar file can be found here, together with its webapp.yml setting file.\n  Requirements and default settings Requirement: JDK8 to JDK17 are tested. Other versions are not tested and may or may not work.\nBefore you start, you should know that the main purpose of quickstart is to help you obtain a basic configuration for previews/demo. Performance and long-term running are not our goals.\nFor production/QA/tests environments, see Backend and UI deployment documents.\nYou can use bin/startup.sh (or cmd) to start up the backend and UI with their default settings, set out as follows:\n Backend storage uses H2 by default (for an easier start) Backend listens on 0.0.0.0/11800 for gRPC APIs and 0.0.0.0/12800 for HTTP REST APIs.  In Java, DotNetCore, Node.js, and Istio agents/probes, you should set the gRPC service address to ip/host:11800, and ip/host should be where your backend is.\n UI listens on 8080 port and request 127.0.0.1/12800 to run a GraphQL query.  Interaction Before deploying Skywalking in your distributed environment, you should learn about how agents/probes, the backend, and the UI communicate with each other:\n All native agents and probes, either language based or mesh probe, use the gRPC service (core/default/gRPC* in application.yml) to report data to the backend. Also, the Jetty service is supported in JSON format. UI uses GraphQL (HTTP) query to access the backend also in Jetty service (core/default/rest* in application.yml).  Startup script The default startup scripts are /bin/oapService.sh(.bat). Read the start up mode document to learn about other ways to start up the backend.\napplication.yml SkyWalking backend startup behaviours are driven by config/application.yml. Understanding the setting file will help you read this document. The core concept behind this setting file is that the SkyWalking collector is based on pure modular design. End users can switch or assemble the collector features according to their own requirements.\nIn application.yml, there are three levels.\n Level 1: Module name. This means that this module is active in running mode. Level 2: Provider option list and provider selector. Available providers are listed here with a selector to indicate which one will actually take effect. If there is only one provider listed, the selector is optional and can be omitted. Level 3. Settings of the provider.  Example:\nstorage:selector:mysql# the mysql storage will actually be activated, while the h2 storage takes no effecth2:driver:${SW_STORAGE_H2_DRIVER:org.h2.jdbcx.JdbcDataSource}url:${SW_STORAGE_H2_URL:jdbc:h2:mem:skywalking-oap-db}user:${SW_STORAGE_H2_USER:sa}metadataQueryMaxSize:${SW_STORAGE_H2_QUERY_MAX_SIZE:5000}mysql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:3306/swtest\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root@1234}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}# other configurations storage is the module. selector selects one out of all providers listed below. The unselected ones take no effect as if they were deleted. default is the default implementor of the core module. driver, url, \u0026hellip; metadataQueryMaxSize are all setting items of the implementor.  At the same time, there are two types of modules: required and optional. The required modules provide the skeleton of the backend. Even though their modular design supports pluggability, removing those modules does not serve any purpose. For optional modules, some of them have a provider implementation called none, meaning that it only provides a shell with no actual logic, typically such as telemetry. Setting - to the selector means that this whole module will be excluded at runtime. We advise against trying to change the APIs of those modules, unless you understand the SkyWalking project and its codes very well.\nThe required modules are listed here:\n Core. Provides the basic and major skeleton of all data analysis and stream dispatch. Cluster. Manages multiple backend instances in a cluster, which could provide high throughput process capabilities. See Cluster Management for more details. Storage. Makes the analysis result persistent. See Choose storage for more details Query. Provides query interfaces to UI. Receiver and Fetcher. Expose the service to the agents and probes, or read telemetry data from a channel.  FAQs Why do we need to set the timezone? And when do we do it? SkyWalking provides downsampling time series metrics features. Query and store at each time dimension (minute, hour, day, month metrics indexes) related to timezone when time formatting.\nFor example, metrics time will be formatted like YYYYMMDDHHmm in minute dimension metrics, which is timezone related.\nBy default, SkyWalking\u0026rsquo;s OAP backend chooses the OS default timezone. If you want to override it, please follow the Java and OS documents.\nHow to query the storage directly from a 3rd party tool? SkyWalking provides different options based on browser UI, CLI and GraphQL to support extensions. But some users may want to query data directly from the storage. For example, in the case of ElasticSearch, Kibana is a great tool for doing this.\nBy default, in order to reduce memory, network and storage space usages, SkyWalking saves based64-encoded ID(s) only in metrics entities. But these tools usually don\u0026rsquo;t support nested query, and are not convenient to work with. For these exceptional reasons, SkyWalking provides a config to add all necessary name column(s) into the final metrics entities with ID as a trade-off.\nTake a look at core/default/activeExtraModelColumns config in the application.yaml, and set it as true to enable this feature.\nNote that this feature is simply for 3rd party integration and doesn\u0026rsquo;t provide any new features to native SkyWalking use cases.\n","excerpt":"Backend setup SkyWalking\u0026rsquo;s backend distribution package consists of the following parts: …","ref":"/docs/main/v9.0.0/en/setup/backend/backend-setup/","title":"Backend setup"},{"body":"Backend setup SkyWalking\u0026rsquo;s backend distribution package consists of the following parts:\n  bin/cmd scripts: Located in the /bin folder. Includes startup Linux shell and Windows cmd scripts for the backend server and UI startup.\n  Backend config: Located in the /config folder. Includes settings files of the backend, which are:\n application.yml log4j.xml alarm-settings.yml    Libraries of backend: Located in the /oap-libs folder. All dependencies of the backend can be found there.\n  Webapp env: Located in the webapp folder. UI frontend jar file can be found here, together with its webapp.yml setting file.\n  Requirements and default settings Requirement: JDK8 to JDK17 are tested. Other versions are not tested and may or may not work.\nBefore you begin, you should understand that the main purpose of the following quickstart is to help you obtain a basic configuration for previews/demos. Performance and long-term running are NOT among the purposes of the quickstart.\nFor production/QA/tests environments, see Backend and UI deployment documents.\nYou can use bin/startup.sh (or cmd) to start up the backend and UI with their default settings, set out as follows:\n Backend storage uses H2 by default (for an easier start) Backend listens on 0.0.0.0/11800 for gRPC APIs and 0.0.0.0/12800 for HTTP REST APIs.  In Java, DotNetCore, Node.js, and Istio agents/probes, you should set the gRPC service address to ip/host:11800, and IP/host should be where your backend is.\n UI listens on 8080 port and request 127.0.0.1/12800 to run a GraphQL query.  Interaction Before deploying Skywalking in your distributed environment, you should learn about how agents/probes, the backend, and the UI communicate with each other:\n Most native agents and probes, including language-based or mesh probes, use gRPC service (core/default/gRPC* in application.yml) to report data to the backend. Also, the REST service is supported in JSON format. UI uses GraphQL (HTTP) query to access the backend, also in REST service (core/default/rest* in application.yml).  Startup script The default startup scripts are /bin/oapService.sh(.bat). Read the start up mode document to learn other ways to start up the backend.\napplication.yml SkyWalking backend startup behaviours are driven by config/application.yml. Understanding the settings file will help you read this document.\nThe core concept behind this setting file is that the SkyWalking collector is based on a pure modular design. End-users can switch or assemble the collector features according to their unique requirements.\nIn application.yml, there are three levels.\n Level 1: Module name. This means that this module is active in running mode. Level 2: Provider option list and provider selector. Available providers are listed here with a selector to indicate which one will actually take effect. If only one provider is listed, the selector is optional and can be omitted. Level 3. Settings of the chosen provider.  Example:\nstorage:selector:mysql# the mysql storage will actually be activated, while the h2 storage takes no effecth2:driver:${SW_STORAGE_H2_DRIVER:org.h2.jdbcx.JdbcDataSource}url:${SW_STORAGE_H2_URL:jdbc:h2:mem:skywalking-oap-db}user:${SW_STORAGE_H2_USER:sa}metadataQueryMaxSize:${SW_STORAGE_H2_QUERY_MAX_SIZE:5000}mysql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:3306/swtest\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root@1234}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}# other configurations storage is the module. selector selects one out of all providers listed below. The unselected ones take no effect as if they were deleted. default is the default implementor of the core module. driver, url, \u0026hellip; metadataQueryMaxSize are all setting items of the implementor.  At the same time, there are two types of modules: required and optional. The required modules provide the skeleton of the backend. Even though their modular design supports pluggability, removing those modules does not serve any purpose. For optional modules, some of them have a provider implementation called none, meaning that it only provides a shell with no actual logic, typically such as telemetry. Setting - to the selector means that this whole module will be excluded at runtime. We advise against changing the APIs of those modules unless you understand the SkyWalking project and its codes very well.\nThe required modules are listed here:\n Core. Provides the basic and major skeleton of all data analysis and stream dispatch. Cluster. Manages multiple backend instances in a cluster, which could provide high throughput process capabilities. See Cluster Management for more details. Storage. Makes the analysis result persistent. See Choose storage for more details Query. Provides query interfaces to UI. Receiver and Fetcher. Expose the service to the agents and probes, or read telemetry data from a channel.  FAQs Why do we need to set the timezone? And when do we do it? SkyWalking provides downsampling time-series metrics features. Query and store at each time dimension (minute, hour, day, month metrics indexes) related to timezone when time formatting.\nFor example, metrics time will be formatted like yyyyMMddHHmm in minute dimension metrics, which is timezone-related.\nBy default, SkyWalking\u0026rsquo;s OAP backend chooses the OS default timezone. Please follow the Java and OS documents if you want to override the timezone.\nHow to query the storage directly from a 3rd party tool? SkyWalking provides different options based on browser UI, CLI and GraphQL to support extensions. But some users may want to query data directly from the storage. For example, in the case of ElasticSearch, Kibana is a great tool for doing this.\nBy default, SkyWalking saves based64-encoded ID(s) only in metrics entities to reduce memory, network and storage space usages. But these tools usually don\u0026rsquo;t support nested queries and are not convenient to work with. For these exceptional reasons, SkyWalking provides a config to add all necessary name column(s) into the final metrics entities with ID as a trade-off.\nTake a look at core/default/activeExtraModelColumns config in the application.yaml, and set it as true to enable this feature.\nNote that this feature is simply for 3rd party integration and doesn\u0026rsquo;t provide any new features to native SkyWalking use cases.\n","excerpt":"Backend setup SkyWalking\u0026rsquo;s backend distribution package consists of the following parts: …","ref":"/docs/main/v9.1.0/en/setup/backend/backend-setup/","title":"Backend setup"},{"body":"Backend setup SkyWalking\u0026rsquo;s backend distribution package consists of the following parts:\n  bin/cmd scripts: Located in the /bin folder. Includes startup Linux shell and Windows cmd scripts for the backend server and UI startup.\n  Backend config: Located in the /config folder. Includes settings files of the backend, which are:\n application.yml log4j.xml alarm-settings.yml    Libraries of backend: Located in the /oap-libs folder. All dependencies of the backend can be found there.\n  Webapp env: Located in the webapp folder. UI frontend jar file can be found here, together with its webapp.yml setting file.\n  Requirements and default settings Requirement: JDK8 to JDK17 are tested. Other versions are not tested and may or may not work.\nBefore you begin, you should understand that the main purpose of the following quickstart is to help you obtain a basic configuration for previews/demos. Performance and long-term running are NOT among the purposes of the quickstart.\nFor production/QA/tests environments, see Backend and UI deployment documents.\nYou can use bin/startup.sh (or cmd) to start up the backend and UI with their default settings, set out as follows:\n Backend storage uses H2 by default (for an easier start) Backend listens on 0.0.0.0/11800 for gRPC APIs and 0.0.0.0/12800 for HTTP REST APIs.  In Java, DotNetCore, Node.js, and Istio agents/probes, you should set the gRPC service address to ip/host:11800, and IP/host should be where your backend is.\n UI listens on 8080 port and request 127.0.0.1/12800 to run a GraphQL query.  Interaction Before deploying Skywalking in your distributed environment, you should learn about how agents/probes, the backend, and the UI communicate with each other:\n Most native agents and probes, including language-based or mesh probes, use gRPC service (core/default/gRPC* in application.yml) to report data to the backend. Also, the REST service is supported in JSON format. UI uses GraphQL (HTTP) query to access the backend, also in REST service (core/default/rest* in application.yml).  Startup script The default startup scripts are /bin/oapService.sh(.bat). Read the start up mode document to learn other ways to start up the backend.\napplication.yml SkyWalking backend startup behaviours are driven by config/application.yml. Understanding the settings file will help you read this document.\nThe core concept behind this setting file is that the SkyWalking collector is based on a pure modular design. End-users can switch or assemble the collector features according to their unique requirements.\nIn application.yml, there are three levels.\n Level 1: Module name. This means that this module is active in running mode. Level 2: Provider option list and provider selector. Available providers are listed here with a selector to indicate which one will actually take effect. If only one provider is listed, the selector is optional and can be omitted. Level 3. Settings of the chosen provider.  Example:\nstorage:selector:mysql# the mysql storage will actually be activated, while the h2 storage takes no effecth2:driver:${SW_STORAGE_H2_DRIVER:org.h2.jdbcx.JdbcDataSource}url:${SW_STORAGE_H2_URL:jdbc:h2:mem:skywalking-oap-db}user:${SW_STORAGE_H2_USER:sa}metadataQueryMaxSize:${SW_STORAGE_H2_QUERY_MAX_SIZE:5000}mysql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:3306/swtest\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root@1234}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}# other configurations storage is the module. selector selects one out of all providers listed below. The unselected ones take no effect as if they were deleted. default is the default implementor of the core module. driver, url, \u0026hellip; metadataQueryMaxSize are all setting items of the implementor.  At the same time, there are two types of modules: required and optional. The required modules provide the skeleton of the backend. Even though their modular design supports pluggability, removing those modules does not serve any purpose. For optional modules, some of them have a provider implementation called none, meaning that it only provides a shell with no actual logic, typically such as telemetry. Setting - to the selector means that this whole module will be excluded at runtime. We advise against changing the APIs of those modules unless you understand the SkyWalking project and its codes very well.\nThe required modules are listed here:\n Core. Provides the basic and major skeleton of all data analysis and stream dispatch. Cluster. Manages multiple backend instances in a cluster, which could provide high throughput process capabilities. See Cluster Management for more details. Storage. Makes the analysis result persistent. See Choose storage for more details Query. Provides query interfaces to UI. Receiver and Fetcher. Expose the service to the agents and probes, or read telemetry data from a channel.  FAQs Why do we need to set the timezone? And when do we do it? SkyWalking provides downsampling time-series metrics features. Query and store at each time dimension (minute, hour, day, month metrics indexes) related to timezone when time formatting.\nFor example, metrics time will be formatted like yyyyMMddHHmm in minute dimension metrics, which is timezone-related.\nBy default, SkyWalking\u0026rsquo;s OAP backend chooses the OS default timezone. Please follow the Java and OS documents if you want to override the timezone.\nHow to query the storage directly from a 3rd party tool? SkyWalking provides different options based on browser UI, CLI and GraphQL to support extensions. But some users may want to query data directly from the storage. For example, in the case of ElasticSearch, Kibana is a great tool for doing this.\nBy default, SkyWalking saves based64-encoded ID(s) only in metrics entities to reduce memory, network and storage space usages. But these tools usually don\u0026rsquo;t support nested queries and are not convenient to work with. For these exceptional reasons, SkyWalking provides a config to add all necessary name column(s) into the final metrics entities with ID as a trade-off.\nTake a look at core/default/activeExtraModelColumns config in the application.yaml, and set it as true to enable this feature.\nNote that this feature is simply for 3rd party integration and doesn\u0026rsquo;t provide any new features to native SkyWalking use cases.\n","excerpt":"Backend setup SkyWalking\u0026rsquo;s backend distribution package consists of the following parts: …","ref":"/docs/main/v9.2.0/en/setup/backend/backend-setup/","title":"Backend setup"},{"body":"Backend setup SkyWalking\u0026rsquo;s backend distribution package consists of the following parts:\n  bin/cmd scripts: Located in the /bin folder. Includes startup Linux shell and Windows cmd scripts for the backend server and UI startup.\n  Backend config: Located in the /config folder. Includes settings files of the backend, which are:\n application.yml log4j.xml alarm-settings.yml    Libraries of backend: Located in the /oap-libs folder. All dependencies of the backend can be found there.\n  Webapp env: Located in the webapp folder. UI frontend jar file can be found here, together with its webapp.yml setting file.\n  Requirements and default settings Requirement: JDK8 to JDK17 are tested. Other versions are not tested and may or may not work.\nBefore you begin, you should understand that the main purpose of the following quickstart is to help you obtain a basic configuration for previews/demos. Performance and long-term running are NOT among the purposes of the quickstart.\nFor production/QA/tests environments, see Backend and UI deployment documents.\nYou can use bin/startup.sh (or cmd) to start up the backend and UI with their default settings, set out as follows:\n Backend storage uses H2 by default (for an easier start) Backend listens on 0.0.0.0/11800 for gRPC APIs and 0.0.0.0/12800 for HTTP REST APIs.  In Java, DotNetCore, Node.js, and Istio agents/probes, you should set the gRPC service address to ip/host:11800, and IP/host should be where your backend is.\n UI listens on 8080 port and request 127.0.0.1/12800 to run a GraphQL query.  Interaction Before deploying Skywalking in your distributed environment, you should learn about how agents/probes, the backend, and the UI communicate with each other:\n Most native agents and probes, including language-based or mesh probes, use gRPC service (core/default/gRPC* in application.yml) to report data to the backend. Also, the REST service is supported in JSON format. UI uses GraphQL (HTTP) query to access the backend, also in REST service (core/default/rest* in application.yml).  Startup script The default startup scripts are /bin/oapService.sh(.bat). Read the start up mode document to learn other ways to start up the backend.\napplication.yml SkyWalking backend startup behaviours are driven by config/application.yml. Understanding the settings file will help you read this document.\nThe core concept behind this setting file is that the SkyWalking collector is based on a pure modular design. End-users can switch or assemble the collector features according to their unique requirements.\nIn application.yml, there are three levels.\n Level 1: Module name. This means that this module is active in running mode. Level 2: Provider option list and provider selector. Available providers are listed here with a selector to indicate which one will actually take effect. If only one provider is listed, the selector is optional and can be omitted. Level 3. Settings of the chosen provider.  Example:\nstorage:selector:mysql# the mysql storage will actually be activated, while the h2 storage takes no effecth2:properties:jdbcUrl:${SW_STORAGE_H2_URL:jdbc:h2:mem:skywalking-oap-db;DB_CLOSE_DELAY=-1}dataSource.user:${SW_STORAGE_H2_USER:sa}metadataQueryMaxSize:${SW_STORAGE_H2_QUERY_MAX_SIZE:5000}mysql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:3306/swtest\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root@1234}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}# other configurations storage is the module. selector selects one out of all providers listed below. The unselected ones take no effect as if they were deleted. default is the default implementor of the core module. driver, url, \u0026hellip; metadataQueryMaxSize are all setting items of the implementor.  At the same time, there are two types of modules: required and optional. The required modules provide the skeleton of the backend. Even though their modular design supports pluggability, removing those modules does not serve any purpose. For optional modules, some of them have a provider implementation called none, meaning that it only provides a shell with no actual logic, typically such as telemetry. Setting - to the selector means that this whole module will be excluded at runtime. We advise against changing the APIs of those modules unless you understand the SkyWalking project and its codes very well.\nThe required modules are listed here:\n Core. Provides the basic and major skeleton of all data analysis and stream dispatch. Cluster. Manages multiple backend instances in a cluster, which could provide high throughput process capabilities. See Cluster Management for more details. Storage. Makes the analysis result persistent. See Choose storage for more details Query. Provides query interfaces to UI. Receiver and Fetcher. Expose the service to the agents and probes, or read telemetry data from a channel.  FAQs Why do we need to set the timezone? And when do we do it? SkyWalking provides downsampling time-series metrics features. Query and store at each time dimension (minute, hour, day, month metrics indexes) related to timezone when time formatting.\nFor example, metrics time will be formatted like yyyyMMddHHmm in minute dimension metrics, which is timezone-related.\nBy default, SkyWalking\u0026rsquo;s OAP backend chooses the OS default timezone. Please follow the Java and OS documents if you want to override the timezone.\nHow to query the storage directly from a 3rd party tool? SkyWalking provides different options based on browser UI, CLI and GraphQL to support extensions. But some users may want to query data directly from the storage. For example, in the case of ElasticSearch, Kibana is a great tool for doing this.\nBy default, SkyWalking saves based64-encoded ID(s) only in metrics entities to reduce memory, network and storage space usages. But these tools usually don\u0026rsquo;t support nested queries and are not convenient to work with. For these exceptional reasons, SkyWalking provides a config to add all necessary name column(s) into the final metrics entities with ID as a trade-off.\nTake a look at core/default/activeExtraModelColumns config in the application.yaml, and set it as true to enable this feature.\nNote that this feature is simply for 3rd party integration and doesn\u0026rsquo;t provide any new features to native SkyWalking use cases.\n","excerpt":"Backend setup SkyWalking\u0026rsquo;s backend distribution package consists of the following parts: …","ref":"/docs/main/v9.3.0/en/setup/backend/backend-setup/","title":"Backend setup"},{"body":"Backend setup SkyWalking\u0026rsquo;s backend distribution package consists of the following parts:\n  bin/cmd scripts: Located in the /bin folder. Includes startup Linux shell and Windows cmd scripts for the backend server and UI startup.\n  Backend config: Located in the /config folder. Includes settings files of the backend, which are:\n application.yml log4j.xml alarm-settings.yml    Libraries of backend: Located in the /oap-libs folder. All dependencies of the backend can be found there.\n  Webapp env: Located in the webapp folder. UI frontend jar file can be found here, together with its webapp.yml setting file.\n  Requirements and default settings Requirement: JDK11 to JDK17 are tested. Other versions are not tested and may or may not work.\nBefore you begin, you should understand that the main purpose of the following quickstart is to help you obtain a basic configuration for previews/demos. Performance and long-term running are NOT among the purposes of the quickstart.\nFor production/QA/tests environments, see Backend and UI deployment documents.\nYou can use bin/startup.sh (or cmd) to start up the backend and UI with their default settings, set out as follows:\n Backend storage uses H2 by default (for an easier start) Backend listens on 0.0.0.0/11800 for gRPC APIs and 0.0.0.0/12800 for HTTP REST APIs.  In Java, DotNetCore, Node.js, and Istio agents/probes, you should set the gRPC service address to ip/host:11800, and IP/host should be where your backend is.\n UI listens on 8080 port and request 127.0.0.1/12800 to run a GraphQL query.  Interaction Before deploying Skywalking in your distributed environment, you should learn about how agents/probes, the backend, and the UI communicate with each other:\n Most native agents and probes, including language-based or mesh probes, use gRPC service (core/default/gRPC* in application.yml) to report data to the backend. Also, the REST service is supported in JSON format. UI uses GraphQL (HTTP) query to access the backend, also in REST service (core/default/rest* in application.yml).  Startup script The default startup scripts are /bin/oapService.sh(.bat). Read the start up mode document to learn other ways to start up the backend.\napplication.yml SkyWalking backend startup behaviours are driven by config/application.yml. Understanding the settings file will help you read this document.\nThe core concept behind this setting file is that the SkyWalking collector is based on a pure modular design. End-users can switch or assemble the collector features according to their unique requirements.\nIn application.yml, there are three levels.\n Level 1: Module name. This means that this module is active in running mode. Level 2: Provider option list and provider selector. Available providers are listed here with a selector to indicate which one will actually take effect. If only one provider is listed, the selector is optional and can be omitted. Level 3. Settings of the chosen provider.  Example:\nstorage:selector:mysql# the mysql storage will actually be activated, while the h2 storage takes no effecth2:properties:jdbcUrl:${SW_STORAGE_H2_URL:jdbc:h2:mem:skywalking-oap-db;DB_CLOSE_DELAY=-1}dataSource.user:${SW_STORAGE_H2_USER:sa}metadataQueryMaxSize:${SW_STORAGE_H2_QUERY_MAX_SIZE:5000}mysql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:3306/swtest\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root@1234}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}# other configurations storage is the module. selector selects one out of all providers listed below. The unselected ones take no effect as if they were deleted. default is the default implementor of the core module. driver, url, \u0026hellip; metadataQueryMaxSize are all setting items of the implementor.  At the same time, there are two types of modules: required and optional. The required modules provide the skeleton of the backend. Even though their modular design supports pluggability, removing those modules does not serve any purpose. For optional modules, some of them have a provider implementation called none, meaning that it only provides a shell with no actual logic, typically such as telemetry. Setting - to the selector means that this whole module will be excluded at runtime. We advise against changing the APIs of those modules unless you understand the SkyWalking project and its codes very well.\nThe required modules are listed here:\n Core. Provides the basic and major skeleton of all data analysis and stream dispatch. Cluster. Manages multiple backend instances in a cluster, which could provide high throughput process capabilities. See Cluster Management for more details. Storage. Makes the analysis result persistent. See Choose storage for more details Query. Provides query interfaces to UI. Receiver and Fetcher. Expose the service to the agents and probes, or read telemetry data from a channel.  FAQs Why do we need to set the timezone? And when do we do it? SkyWalking provides downsampling time-series metrics features. Query and store at each time dimension (minute, hour, day, month metrics indexes) related to timezone when time formatting.\nFor example, metrics time will be formatted like yyyyMMddHHmm in minute dimension metrics, which is timezone-related.\nBy default, SkyWalking\u0026rsquo;s OAP backend chooses the OS default timezone. Please follow the Java and OS documents if you want to override the timezone.\nHow to query the storage directly from a 3rd party tool? SkyWalking provides different options based on browser UI, CLI and GraphQL to support extensions. But some users may want to query data directly from the storage. For example, in the case of ElasticSearch, Kibana is a great tool for doing this.\nBy default, SkyWalking saves based64-encoded ID(s) only in metrics entities to reduce memory, network and storage space usages. But these tools usually don\u0026rsquo;t support nested queries and are not convenient to work with. For these exceptional reasons, SkyWalking provides a config to add all necessary name column(s) into the final metrics entities with ID as a trade-off.\nTake a look at core/default/activeExtraModelColumns config in the application.yaml, and set it as true to enable this feature.\nNote that this feature is simply for 3rd party integration and doesn\u0026rsquo;t provide any new features to native SkyWalking use cases.\n","excerpt":"Backend setup SkyWalking\u0026rsquo;s backend distribution package consists of the following parts: …","ref":"/docs/main/v9.4.0/en/setup/backend/backend-setup/","title":"Backend setup"},{"body":"Backend setup SkyWalking\u0026rsquo;s backend distribution package consists of the following parts:\n  bin/cmd scripts: Located in the /bin folder. Includes startup Linux shell and Windows cmd scripts for the backend server and UI startup.\n  Backend config: Located in the /config folder. Includes settings files of the backend, which are:\n application.yml log4j.xml alarm-settings.yml    Libraries of backend: Located in the /oap-libs folder. All dependencies of the backend can be found there.\n  Webapp env: Located in the webapp folder. UI frontend jar file can be found here, together with its webapp.yml setting file.\n  Requirements and default settings Requirement: JDK11 to JDK17 are tested. Other versions are not tested and may or may not work.\nBefore you begin, you should understand that the main purpose of the following quickstart is to help you obtain a basic configuration for previews/demos. Performance and long-term running are NOT among the purposes of the quickstart.\nFor production/QA/tests environments, see Backend and UI deployment documents.\nYou can use bin/startup.sh (or cmd) to start up the backend and UI with their default settings, set out as follows:\n Backend storage uses H2 by default (for an easier start) Backend listens on 0.0.0.0/11800 for gRPC APIs and 0.0.0.0/12800 for HTTP REST APIs.  In Java, DotNetCore, Node.js, and Istio agents/probes, you should set the gRPC service address to ip/host:11800, and IP/host should be where your backend is.\n UI listens on 8080 port and request 127.0.0.1/12800 to run a GraphQL query.  Interaction Before deploying Skywalking in your distributed environment, you should learn about how agents/probes, the backend, and the UI communicate with each other:\n Most native agents and probes, including language-based or mesh probes, use gRPC service (core/default/gRPC* in application.yml) to report data to the backend. Also, the REST service is supported in JSON format. UI uses GraphQL (HTTP) query to access the backend, also in REST service (core/default/rest* in application.yml).  Startup script The default startup scripts are /bin/oapService.sh(.bat). Read the start up mode document to learn other ways to start up the backend.\napplication.yml SkyWalking backend startup behaviours are driven by config/application.yml. Understanding the settings file will help you read this document.\nThe core concept behind this setting file is that the SkyWalking collector is based on a pure modular design. End-users can switch or assemble the collector features according to their unique requirements.\nIn application.yml, there are three levels.\n Level 1: Module name. This means that this module is active in running mode. Level 2: Provider option list and provider selector. Available providers are listed here with a selector to indicate which one will actually take effect. If only one provider is listed, the selector is optional and can be omitted. Level 3. Settings of the chosen provider.  Example:\nstorage:selector:mysql# the mysql storage will actually be activated, while the h2 storage takes no effecth2:properties:jdbcUrl:${SW_STORAGE_H2_URL:jdbc:h2:mem:skywalking-oap-db;DB_CLOSE_DELAY=-1;DATABASE_TO_UPPER=FALSE}dataSource.user:${SW_STORAGE_H2_USER:sa}metadataQueryMaxSize:${SW_STORAGE_H2_QUERY_MAX_SIZE:5000}mysql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:3306/swtest?allowMultiQueries=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root@1234}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}# other configurations storage is the module. selector selects one out of all providers listed below. The unselected ones take no effect as if they were deleted. default is the default implementor of the core module. driver, url, \u0026hellip; metadataQueryMaxSize are all setting items of the implementor.  At the same time, there are two types of modules: required and optional. The required modules provide the skeleton of the backend. Even though their modular design supports pluggability, removing those modules does not serve any purpose. For optional modules, some of them have a provider implementation called none, meaning that it only provides a shell with no actual logic, typically such as telemetry. Setting - to the selector means that this whole module will be excluded at runtime. We advise against changing the APIs of those modules unless you understand the SkyWalking project and its codes very well.\nThe required modules are listed here:\n Core. Provides the basic and major skeleton of all data analysis and stream dispatch. Cluster. Manages multiple backend instances in a cluster, which could provide high throughput process capabilities. See Cluster Management for more details. Storage. Makes the analysis result persistent. See Choose storage for more details Query. Provides query interfaces to UI. Receiver and Fetcher. Expose the service to the agents and probes, or read telemetry data from a channel.  FAQs Why do we need to set the timezone? And when do we do it? SkyWalking provides downsampling time-series metrics features. Query and store at each time dimension (minute, hour, day, month metrics indexes) related to timezone when time formatting.\nFor example, metrics time will be formatted like yyyyMMddHHmm in minute dimension metrics, which is timezone-related.\nBy default, SkyWalking\u0026rsquo;s OAP backend chooses the OS default timezone. Please follow the Java and OS documents if you want to override the timezone.\nHow to query the storage directly from a 3rd party tool? SkyWalking provides different options based on browser UI, CLI and GraphQL to support extensions. But some users may want to query data directly from the storage. For example, in the case of ElasticSearch, Kibana is a great tool for doing this.\nBy default, SkyWalking saves based64-encoded ID(s) only in metrics entities to reduce memory, network and storage space usages. But these tools usually don\u0026rsquo;t support nested queries and are not convenient to work with. For these exceptional reasons, SkyWalking provides a config to add all necessary name column(s) into the final metrics entities with ID as a trade-off.\nTake a look at core/default/activeExtraModelColumns config in the application.yaml, and set it as true to enable this feature.\nNote that this feature is simply for 3rd party integration and doesn\u0026rsquo;t provide any new features to native SkyWalking use cases.\n","excerpt":"Backend setup SkyWalking\u0026rsquo;s backend distribution package consists of the following parts: …","ref":"/docs/main/v9.5.0/en/setup/backend/backend-setup/","title":"Backend setup"},{"body":"Backend setup SkyWalking\u0026rsquo;s backend distribution package consists of the following parts:\n  bin/cmd scripts: Located in the /bin folder. Includes startup Linux shell and Windows cmd scripts for the backend server and UI startup.\n  Backend config: Located in the /config folder. Includes settings files of the backend, which are:\n application.yml log4j.xml alarm-settings.yml    Libraries of backend: Located in the /oap-libs folder. All dependencies of the backend can be found there.\n  Webapp env: Located in the webapp folder. UI frontend jar file can be found here, together with its webapp.yml setting file.\n  Requirements and default settings Requirement: JDK11 to JDK17 are tested. Other versions are not tested and may or may not work.\nBefore you begin, you should understand that the main purpose of the following quickstart is to help you obtain a basic configuration for previews/demos. Performance and long-term running are NOT among the purposes of the quickstart.\nFor production/QA/tests environments, see Backend and UI deployment documents.\nYou can use bin/startup.sh (or cmd) to start up the backend and UI with their default settings, set out as follows:\n Backend storage uses H2 by default (for an easier start) Backend listens on 0.0.0.0/11800 for gRPC APIs and 0.0.0.0/12800 for HTTP REST APIs.  In Java, DotNetCore, Node.js, and Istio agents/probes, you should set the gRPC service address to ip/host:11800, and IP/host should be where your backend is.\n UI listens on 8080 port and request 127.0.0.1/12800 to run a GraphQL query.  Interaction Before deploying Skywalking in your distributed environment, you should learn about how agents/probes, the backend, and the UI communicate with each other:\n Most native agents and probes, including language-based or mesh probes, use gRPC service (core/default/gRPC* in application.yml) to report data to the backend. Also, the REST service is supported in JSON format. UI uses GraphQL (HTTP) query to access the backend, also in REST service (core/default/rest* in application.yml).  Startup script The default startup scripts are /bin/oapService.sh(.bat). Read the start up mode document to learn other ways to start up the backend.\napplication.yml SkyWalking backend startup behaviours are driven by config/application.yml. Understanding the settings file will help you read this document.\nThe core concept behind this setting file is that the SkyWalking collector is based on a pure modular design. End-users can switch or assemble the collector features according to their unique requirements.\nIn application.yml, there are three levels.\n Level 1: Module name. This means that this module is active in running mode. Level 2: Provider option list and provider selector. Available providers are listed here with a selector to indicate which one will actually take effect. If only one provider is listed, the selector is optional and can be omitted. Level 3. Settings of the chosen provider.  Example:\nstorage:selector:mysql# the mysql storage will actually be activated, while the h2 storage takes no effecth2:properties:jdbcUrl:${SW_STORAGE_H2_URL:jdbc:h2:mem:skywalking-oap-db;DB_CLOSE_DELAY=-1;DATABASE_TO_UPPER=FALSE}dataSource.user:${SW_STORAGE_H2_USER:sa}metadataQueryMaxSize:${SW_STORAGE_H2_QUERY_MAX_SIZE:5000}mysql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:3306/swtest?allowMultiQueries=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root@1234}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}# other configurations storage is the module. selector selects one out of all providers listed below. The unselected ones take no effect as if they were deleted. default is the default implementor of the core module. driver, url, \u0026hellip; metadataQueryMaxSize are all setting items of the implementor.  At the same time, there are two types of modules: required and optional. The required modules provide the skeleton of the backend. Even though their modular design supports pluggability, removing those modules does not serve any purpose. For optional modules, some of them have a provider implementation called none, meaning that it only provides a shell with no actual logic, typically such as telemetry. Setting - to the selector means that this whole module will be excluded at runtime. We advise against changing the APIs of those modules unless you understand the SkyWalking project and its codes very well.\nThe required modules are listed here:\n Core. Provides the basic and major skeleton of all data analysis and stream dispatch. Cluster. Manages multiple backend instances in a cluster, which could provide high throughput process capabilities. See Cluster Management for more details. Storage. Makes the analysis result persistent. See Choose storage for more details Query. Provides query interfaces to UI. Receiver and Fetcher. Expose the service to the agents and probes, or read telemetry data from a channel.  FAQs Why do we need to set the timezone? And when do we do it? SkyWalking provides downsampling time-series metrics features. Query and store at each time dimension (minute, hour, day, month metrics indexes) related to timezone when time formatting.\nFor example, metrics time will be formatted like yyyyMMddHHmm in minute dimension metrics, which is timezone-related.\nBy default, SkyWalking\u0026rsquo;s OAP backend chooses the OS default timezone. Please follow the Java and OS documents if you want to override the timezone.\nHow to query the storage directly from a 3rd party tool? SkyWalking provides different options based on browser UI, CLI and GraphQL to support extensions. But some users may want to query data directly from the storage. For example, in the case of ElasticSearch, Kibana is a great tool for doing this.\nBy default, SkyWalking saves based64-encoded ID(s) only in metrics entities to reduce memory, network and storage space usages. But these tools usually don\u0026rsquo;t support nested queries and are not convenient to work with. For these exceptional reasons, SkyWalking provides a config to add all necessary name column(s) into the final metrics entities with ID as a trade-off.\nTake a look at core/default/activeExtraModelColumns config in the application.yaml, and set it as true to enable this feature.\nNote that this feature is simply for 3rd party integration and doesn\u0026rsquo;t provide any new features to native SkyWalking use cases.\n","excerpt":"Backend setup SkyWalking\u0026rsquo;s backend distribution package consists of the following parts: …","ref":"/docs/main/v9.6.0/en/setup/backend/backend-setup/","title":"Backend setup"},{"body":"Backend setup SkyWalking\u0026rsquo;s backend distribution package consists of the following parts:\n  bin/cmd scripts: Located in the /bin folder. Includes startup Linux shell and Windows cmd scripts for the backend server and UI startup.\n  Backend config: Located in the /config folder. Includes settings files of the backend, which are:\n application.yml log4j.xml alarm-settings.yml    Libraries of backend: Located in the /oap-libs folder. All dependencies of the backend can be found there.\n  Webapp env: Located in the webapp folder. UI frontend jar file can be found here, together with its webapp.yml setting file.\n  Requirements and default settings Requirement: JDK11 or JDK17.\nBefore you begin, you should understand that the main purpose of the following quickstart is to help you obtain a basic configuration for previews/demos. Performance and long-term running are NOT among the purposes of the quickstart.\nFor production/QA/tests environments, see Backend and UI deployment documents.\nYou can use bin/startup.sh (or cmd) to start up the backend and UI with their default settings, set out as follows:\n Backend storage uses H2 by default (for an easier start) Backend listens on 0.0.0.0/11800 for gRPC APIs and 0.0.0.0/12800 for HTTP REST APIs.  In Java, DotNetCore, Node.js, and Istio agents/probes, you should set the gRPC service address to ip/host:11800, and IP/host should be where your backend is.\n UI listens on 8080 port and request 127.0.0.1/12800 to run a GraphQL query.  Interaction Before deploying Skywalking in your distributed environment, you should learn about how agents/probes, the backend, and the UI communicate with each other:\n Most native agents and probes, including language-based or mesh probes, use gRPC service (core/default/gRPC* in application.yml) to report data to the backend. Also, the REST service is supported in JSON format. UI uses GraphQL (HTTP) query to access the backend, also in REST service (core/default/rest* in application.yml).  Startup script The default startup scripts are /bin/oapService.sh(.bat). Read the start up mode document to learn other ways to start up the backend.\nKey Parameters In The Booting Logs After the OAP booting process completed, you should be able to see all important parameters listed in the logs.\n2023-11-06 21:10:45,988 org.apache.skywalking.oap.server.starter.OAPServerBootstrap 67 [main] INFO [] - The key booting parameters of Apache SkyWalking OAP are listed as following. Running Mode | null TTL.metrics | 7 TTL.record | 3 Version | 9.7.0-SNAPSHOT-92af797 module.agent-analyzer.provider | default module.ai-pipeline.provider | default module.alarm.provider | default module.aws-firehose.provider | default module.cluster.provider | standalone module.configuration-discovery.provider | default module.configuration.provider | none module.core.provider | default module.envoy-metric.provider | default module.event-analyzer.provider | default module.log-analyzer.provider | default module.logql.provider | default module.promql.provider | default module.query.provider | graphql module.receiver-browser.provider | default module.receiver-clr.provider | default module.receiver-ebpf.provider | default module.receiver-event.provider | default module.receiver-jvm.provider | default module.receiver-log.provider | default module.receiver-meter.provider | default module.receiver-otel.provider | default module.receiver-profile.provider | default module.receiver-register.provider | default module.receiver-sharing-server.provider | default module.receiver-telegraf.provider | default module.receiver-trace.provider | default module.service-mesh.provider | default module.storage.provider | h2 module.telemetry.provider | none oap.external.grpc.host | 0.0.0.0 oap.external.grpc.port | 11800 oap.external.http.host | 0.0.0.0 oap.external.http.port | 12800 oap.internal.comm.host | 0.0.0.0 oap.internal.comm.port | 11800  oap.external.grpc.host:oap.external.grpc.port is for reporting telemetry data through gRPC channel, including native agents, OTEL. oap.external.http.host:oap.external.http.port is for reporting telemetry data through HTTP channel and query, including native GraphQL(UI), PromQL, LogQL. oap.internal.comm.host:oap.internal.comm.port is for OAP cluster internal communication via gRPC/HTTP2 protocol. The default host(0.0.0.0) is not suitable for the cluster mode, unless in k8s deployment. Please read Cluster Doc to understand how to set up the SkyWalking backend in the cluster mode.  application.yml SkyWalking backend startup behaviours are driven by config/application.yml. Understanding the settings file will help you read this document.\nThe core concept behind this setting file is that the SkyWalking collector is based on a pure modular design. End-users can switch or assemble the collector features according to their unique requirements.\nIn application.yml, there are three levels.\n Level 1: Module name. This means that this module is active in running mode. Level 2: Provider option list and provider selector. Available providers are listed here with a selector to indicate which one will actually take effect. If only one provider is listed, the selector is optional and can be omitted. Level 3. Settings of the chosen provider.  Example:\nstorage:selector:mysql# the mysql storage will actually be activated, while the h2 storage takes no effecth2:properties:jdbcUrl:${SW_STORAGE_H2_URL:jdbc:h2:mem:skywalking-oap-db;DB_CLOSE_DELAY=-1;DATABASE_TO_UPPER=FALSE}dataSource.user:${SW_STORAGE_H2_USER:sa}metadataQueryMaxSize:${SW_STORAGE_H2_QUERY_MAX_SIZE:5000}mysql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:3306/swtest?allowMultiQueries=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root@1234}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}# other configurations storage is the module. selector selects one out of all providers listed below. The unselected ones take no effect as if they were deleted. default is the default implementor of the core module. driver, url, \u0026hellip; metadataQueryMaxSize are all setting items of the implementor.  At the same time, there are two types of modules: required and optional. The required modules provide the skeleton of the backend. Even though their modular design supports pluggability, removing those modules does not serve any purpose. For optional modules, some of them have a provider implementation called none, meaning that it only provides a shell with no actual logic, typically such as telemetry. Setting - to the selector means that this whole module will be excluded at runtime. We advise against changing the APIs of those modules unless you understand the SkyWalking project and its codes very well.\nThe required modules are listed here:\n Core. Provides the basic and major skeleton of all data analysis and stream dispatch. Cluster. Manages multiple backend instances in a cluster, which could provide high throughput process capabilities. See Cluster Management for more details. Storage. Makes the analysis result persistent. See Choose storage for more details Query. Provides query interfaces to UI. Receiver and Fetcher. Expose the service to the agents and probes, or read telemetry data from a channel.  FAQs Why do we need to set the timezone? And when do we do it? SkyWalking provides downsampling time-series metrics features. Query and store at each time dimension (minute, hour, day, month metrics indexes) related to timezone when time formatting.\nFor example, metrics time will be formatted like yyyyMMddHHmm in minute dimension metrics, which is timezone-related.\nBy default, SkyWalking\u0026rsquo;s OAP backend chooses the OS default timezone. Please follow the Java and OS documents if you want to override the timezone.\n","excerpt":"Backend setup SkyWalking\u0026rsquo;s backend distribution package consists of the following parts: …","ref":"/docs/main/v9.7.0/en/setup/backend/backend-setup/","title":"Backend setup"},{"body":"Backend storage The SkyWalking storage is pluggable. We have provided the following storage solutions, which allow you to easily use one of them by specifying it as the selector in application.yml:\nstorage:selector:${SW_STORAGE:elasticsearch}Natively supported storage:\n H2 OpenSearch ElasticSearch 7 and 8. MySQL and its compatible databases PostgreSQL and its compatible databases BanyanDB(alpha stage)  H2 is the default storage option in the distribution package. It is recommended to use H2 for testing and development ONLY. Elasticsearch and OpenSearch are recommended for production environments, specially for large scale deployments. MySQL and PostgreSQL are recommended for production environments for medium scale deployments, especially for low trace and log sampling rate. Some of their compatible databases may support larger scale better, such as TiDB and AWS Aurora.\nBanyanDB is going to be our next generation storage solution. It is still in alpha stage. It has shown high potential performance improvement. Less than 50% CPU usage and 50% memory usage with 40% disk volume compared to Elasticsearch in the same scale with 100% sampling. We are looking for early adoption, and it would be our first-class recommended storage option since 2024.\n","excerpt":"Backend storage The SkyWalking storage is pluggable. We have provided the following storage …","ref":"/docs/main/latest/en/setup/backend/backend-storage/","title":"Backend storage"},{"body":"Backend storage The SkyWalking storage is pluggable. We have provided the following storage solutions, which allow you to easily use one of them by specifying it as the selector in application.yml:\nstorage:selector:${SW_STORAGE:elasticsearch}Natively supported storage:\n H2 OpenSearch ElasticSearch 7 and 8. MySQL and its compatible databases PostgreSQL and its compatible databases BanyanDB(alpha stage)  H2 is the default storage option in the distribution package. It is recommended to use H2 for testing and development ONLY. Elasticsearch and OpenSearch are recommended for production environments, specially for large scale deployments. MySQL and PostgreSQL are recommended for production environments for medium scale deployments, especially for low trace and log sampling rate. Some of their compatible databases may support larger scale better, such as TiDB and AWS Aurora.\nBanyanDB is going to be our next generation storage solution. It is still in alpha stage. It has shown high potential performance improvement. Less than 50% CPU usage and 50% memory usage with 40% disk volume compared to Elasticsearch in the same scale with 100% sampling. We are looking for early adoption, and it would be our first-class recommended storage option since 2024.\n","excerpt":"Backend storage The SkyWalking storage is pluggable. We have provided the following storage …","ref":"/docs/main/next/en/setup/backend/backend-storage/","title":"Backend storage"},{"body":"Backend storage The SkyWalking storage is pluggable. We have provided the following storage solutions, which allows you to easily use one of them by specifying it as the selector in application.yml:\nstorage:selector:${SW_STORAGE:elasticsearch}Natively supported storage:\n H2 OpenSearch ElasticSearch 6, 7, 8 MySQL TiDB InfluxDB PostgreSQL IoTDB  H2 Activate H2 as storage, set storage provider to H2 In-Memory Databases. Default in distribution package. Please read Database URL Overview in H2 official document. You can set the target to H2 in Embedded, Server and Mixed modes.\nSetting fragment example\nstorage:selector:${SW_STORAGE:h2}h2:driver:org.h2.jdbcx.JdbcDataSourceurl:jdbc:h2:mem:skywalking-oap-dbuser:samaxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:100}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:1}OpenSearch OpenSearch storage shares the same configurations as ElasticSearch. In order to activate OpenSearch as storage, set storage provider to elasticsearch.\nElasticSearch NOTE: Elastic announced through their blog that Elasticsearch will be moving over to a Server Side Public License (SSPL), which is incompatible with Apache License 2.0. This license change is effective from Elasticsearch version 7.11. So please choose the suitable ElasticSearch version according to your usage.\nSince 8.8.0, SkyWalking rebuilds the ElasticSearch client on top of ElasticSearch REST API and automatically picks up correct request formats according to the server side version, hence you don\u0026rsquo;t need to download different binaries and don\u0026rsquo;t need to configure different storage selector for different ElasticSearch server side version anymore.\nFor now, SkyWalking supports ElasticSearch 6.x, ElasticSearch 7.x, ElasticSearch 8.x, and OpenSearch 1.x, their configurations are as follows:\nstorage:selector:${SW_STORAGE:elasticsearch}elasticsearch:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}clusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:9200}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;http\u0026#34;}trustStorePath:${SW_STORAGE_ES_SSL_JKS_PATH:\u0026#34;\u0026#34;}trustStorePass:${SW_STORAGE_ES_SSL_JKS_PASS:\u0026#34;\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}password:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}secretsManagementFile:${SW_ES_SECRETS_MANAGEMENT_FILE:\u0026#34;\u0026#34;}# Secrets management file in the properties format includes the username, password, which are managed by 3rd party tool.dayStep:${SW_STORAGE_DAY_STEP:1}# Represent the number of days in the one minute/hour/day index.indexShardsNumber:${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:1}# Shard number of new indexesindexReplicasNumber:${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:1}# Replicas number of new indexes# Super data set has been defined in the codes, such as trace segments.The following 3 config would be improve es performance when storage super size data in es.superDatasetDayStep:${SW_SUPERDATASET_STORAGE_DAY_STEP:-1}# Represent the number of days in the super size dataset record index, the default value is the same as dayStep when the value is less than 0superDatasetIndexShardsFactor:${SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR:5}# This factor provides more shards for the super data set, shards number = indexShardsNumber * superDatasetIndexShardsFactor. Also, this factor effects Zipkin and Jaeger traces.superDatasetIndexReplicasNumber:${SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER:0}# Represent the replicas number in the super size dataset record index, the default value is 0.indexTemplateOrder:${SW_STORAGE_ES_INDEX_TEMPLATE_ORDER:0}# the order of index templatebulkActions:${SW_STORAGE_ES_BULK_ACTIONS:1000}# Execute the async bulk record data every ${SW_STORAGE_ES_BULK_ACTIONS} requestsflushInterval:${SW_STORAGE_ES_FLUSH_INTERVAL:10}# flush the bulk every 10 seconds whatever the number of requestsconcurrentRequests:${SW_STORAGE_ES_CONCURRENT_REQUESTS:2}# the number of concurrent requestsresultWindowMaxSize:${SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE:10000}metadataQueryMaxSize:${SW_STORAGE_ES_QUERY_MAX_SIZE:5000}segmentQueryMaxSize:${SW_STORAGE_ES_QUERY_SEGMENT_SIZE:200}profileTaskQueryMaxSize:${SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE:200}oapAnalyzer:${SW_STORAGE_ES_OAP_ANALYZER:\u0026#34;{\\\u0026#34;analyzer\\\u0026#34;:{\\\u0026#34;oap_analyzer\\\u0026#34;:{\\\u0026#34;type\\\u0026#34;:\\\u0026#34;stop\\\u0026#34;}}}\u0026#34;}# the oap analyzer.oapLogAnalyzer:${SW_STORAGE_ES_OAP_LOG_ANALYZER:\u0026#34;{\\\u0026#34;analyzer\\\u0026#34;:{\\\u0026#34;oap_log_analyzer\\\u0026#34;:{\\\u0026#34;type\\\u0026#34;:\\\u0026#34;standard\\\u0026#34;}}}\u0026#34;}# the oap log analyzer. It could be customized by the ES analyzer configuration to support more language log formats, such as Chinese log, Japanese log and etc.advanced:${SW_STORAGE_ES_ADVANCED:\u0026#34;\u0026#34;}ElasticSearch With Https SSL Encrypting communications. Example:\nstorage:selector:${SW_STORAGE:elasticsearch}elasticsearch:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}# User needs to be set when Http Basic authentication is enabledpassword:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}# Password to be set when Http Basic authentication is enabledclusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:443}trustStorePath:${SW_SW_STORAGE_ES_SSL_JKS_PATH:\u0026#34;../es_keystore.jks\u0026#34;}trustStorePass:${SW_SW_STORAGE_ES_SSL_JKS_PASS:\u0026#34;\u0026#34;}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;https\u0026#34;}... File at trustStorePath is being monitored. Once it is changed, the ElasticSearch client will reconnect. trustStorePass could be changed in the runtime through Secrets Management File Of ElasticSearch Authentication.  Daily Index Step Daily index step(storage/elasticsearch/dayStep, default 1) represents the index creation period. In this period, metrics for several days (dayStep value) are saved.\nIn most cases, users don\u0026rsquo;t need to change the value manually, as SkyWalking is designed to observe large scale distributed systems. But in some cases, users may want to set a long TTL value, such as more than 60 days. However, their ElasticSearch cluster may not be powerful enough due to low traffic in the production environment. This value could be increased to 5 (or more), if users could ensure a single index could support the metrics and traces for these days (5 in this case).\nFor example, if dayStep == 11,\n Data in [2000-01-01, 2000-01-11] will be merged into the index-20000101. Data in [2000-01-12, 2000-01-22] will be merged into the index-20000112.  storage/elasticsearch/superDatasetDayStep overrides the storage/elasticsearch/dayStep if the value is positive. This would affect the record-related entities, such as trace segments. In some cases, the size of metrics is much smaller than the record (trace). This would improve the shards balance in the ElasticSearch cluster.\nNOTE: TTL deletion would be affected by these steps. You should set an extra dayStep in your TTL. For example, if you want to have TTL == 30 days and dayStep == 10, you are commended to set TTL = 40.\nSecrets Management File Of ElasticSearch Authentication The value of secretsManagementFile should point to the secrets management file absolute path. The file includes username, password, and JKS password of the ElasticSearch server in the properties format.\nuser=xxx password=yyy trustStorePass=zzz The major difference between using user, password, trustStorePass configs in the application.yaml file is that the Secrets Management File is being watched by the OAP server. Once it is changed manually or through a 3rd party tool, such as Vault, the storage provider will use the new username, password, and JKS password to establish the connection and close the old one. If the information exists in the file, the user/password will be overrided.\nAdvanced Configurations For Elasticsearch Index You can add advanced configurations in JSON format to set ElasticSearch index settings by following ElasticSearch doc\nFor example, set translog settings:\nstorage:elasticsearch:# ......advanced:${SW_STORAGE_ES_ADVANCED:\u0026#34;{\\\u0026#34;index.translog.durability\\\u0026#34;:\\\u0026#34;request\\\u0026#34;,\\\u0026#34;index.translog.sync_interval\\\u0026#34;:\\\u0026#34;5s\\\u0026#34;}\u0026#34;}Recommended ElasticSearch server-side configurations You could add the following configuration to elasticsearch.yml, and set the value based on your environment.\n# In tracing scenario, consider to set more than this at least.thread_pool.index.queue_size:1000# Only suitable for ElasticSearch 6thread_pool.write.queue_size:1000# Suitable for ElasticSearch 6 and 7# When you face query error at trace page, remember to check this.index.max_result_window:1000000We strongly recommend that you read more about these configurations from ElasticSearch\u0026rsquo;s official document, since they have a direct impact on the performance of ElasticSearch.\nElasticSearch with Zipkin trace extension This implementation is very similar to elasticsearch, except that it extends to support Zipkin span storage. The configurations are largely the same.\nstorage:selector:${SW_STORAGE:zipkin-elasticsearch}zipkin-elasticsearch:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}clusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:9200}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;http\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}password:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}indexShardsNumber:${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:2}indexReplicasNumber:${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:0}# Batch process setting, refer to https://www.elastic.co/guide/en/elasticsearch/client/java-api/5.5/java-docs-bulk-processor.htmlbulkActions:${SW_STORAGE_ES_BULK_ACTIONS:2000}# Execute the bulk every 2000 requestsbulkSize:${SW_STORAGE_ES_BULK_SIZE:20}# flush the bulk every 20mbflushInterval:${SW_STORAGE_ES_FLUSH_INTERVAL:10}# flush the bulk every 10 seconds whatever the number of requestsconcurrentRequests:${SW_STORAGE_ES_CONCURRENT_REQUESTS:2}# the number of concurrent requestsAbout Namespace When namespace is set, all index names in ElasticSearch will use it as prefix.\nMySQL Active MySQL as storage, set storage provider to mysql.\nNOTE: MySQL driver is NOT allowed in Apache official distribution and source codes. Please download MySQL driver on your own. Copy the connection driver jar to oap-libs.\nstorage:selector:${SW_STORAGE:mysql}mysql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:3306/swtest?rewriteBatchedStatements=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root@1234}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password are found in application.yml. Only part of the settings are listed here. See the HikariCP connection pool document for full settings. To understand the function of the parameter rewriteBatchedStatements=true in MySQL, see the MySQL official document.\nTiDB Tested TiDB Server 4.0.8 version and MySQL Client driver 8.0.13 version are currently available. Activate TiDB as storage, and set storage provider to tidb.\nstorage:selector:${SW_STORAGE:tidb}tidb:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:4000/swtest?rewriteBatchedStatements=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:\u0026#34;\u0026#34;}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}dataSource.useAffectedRows:${SW_DATA_SOURCE_USE_AFFECTED_ROWS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfArrayColumn:${SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN:20}numOfSearchableValuesPerTag:${SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG:2}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password are found in application.yml. For details on settings, refer to the configuration of MySQL above. To understand the function of the parameter rewriteBatchedStatements=true in TiDB, see the document of TiDB best practices.\nInfluxDB InfluxDB storage provides a time-series database as a new storage option.\nstorage:selector:${SW_STORAGE:influxdb}influxdb:url:${SW_STORAGE_INFLUXDB_URL:http://localhost:8086}user:${SW_STORAGE_INFLUXDB_USER:root}password:${SW_STORAGE_INFLUXDB_PASSWORD:}database:${SW_STORAGE_INFLUXDB_DATABASE:skywalking}actions:${SW_STORAGE_INFLUXDB_ACTIONS:1000}# the number of actions to collectduration:${SW_STORAGE_INFLUXDB_DURATION:1000}# the time to wait at most (milliseconds)fetchTaskLogMaxSize:${SW_STORAGE_INFLUXDB_FETCH_TASK_LOG_MAX_SIZE:5000}# the max number of fetch task log in a requestAll connection related settings, including URL link, username, and password are found in application.yml. For metadata storage provider settings, refer to the configurations of H2/MySQL above.\nPostgreSQL PostgreSQL jdbc driver uses version 42.3.2. It supports PostgreSQL 8.2 or newer. Activate PostgreSQL as storage, and set storage provider to postgresql.\nstorage:selector:${SW_STORAGE:postgresql}postgresql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:postgresql://localhost:5432/skywalking\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:postgres}dataSource.password:${SW_DATA_SOURCE_PASSWORD:123456}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfArrayColumn:${SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN:20}numOfSearchableValuesPerTag:${SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG:2}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password are found in application.yml. Only part of the settings are listed here. Please follow HikariCP connection pool document for full settings.\nIoTDB IoTDB is a time-series database from Apache, which is one of the storage plugin options.\nIoTDB storage plugin is still in progress. Its efficiency will improve in the future.\nstorage:selector:${SW_STORAGE:iotdb}iotdb:host:${SW_STORAGE_IOTDB_HOST:127.0.0.1}rpcPort:${SW_STORAGE_IOTDB_RPC_PORT:6667}username:${SW_STORAGE_IOTDB_USERNAME:root}password:${SW_STORAGE_IOTDB_PASSWORD:root}storageGroup:${SW_STORAGE_IOTDB_STORAGE_GROUP:root.skywalking}sessionPoolSize:${SW_STORAGE_IOTDB_SESSIONPOOL_SIZE:8}# If it\u0026#39;s zero, the SessionPool size will be 2*CPU_CoresfetchTaskLogMaxSize:${SW_STORAGE_IOTDB_FETCH_TASK_LOG_MAX_SIZE:1000}# the max number of fetch task log in a requestAll connection related settings, including host, rpcPort, username, and password are found in application.yml. Please ensure the IoTDB version \u0026gt;= 0.12.3.\nMore storage extension solutions Follow the Storage extension development guide in the Project Extensions document.\n","excerpt":"Backend storage The SkyWalking storage is pluggable. We have provided the following storage …","ref":"/docs/main/v9.0.0/en/setup/backend/backend-storage/","title":"Backend storage"},{"body":"Backend storage The SkyWalking storage is pluggable. We have provided the following storage solutions, which allow you to easily use one of them by specifying it as the selector in application.yml:\nstorage:selector:${SW_STORAGE:elasticsearch}Natively supported storage:\n H2 OpenSearch ElasticSearch 6, 7, 8 MySQL TiDB PostgreSQL BanyanDB  H2 Activate H2 as storage, set storage provider to H2 In-Memory Databases. Default in the distribution package. Please read Database URL Overview in H2 official document. You can set the target to H2 in Embedded, Server and Mixed modes.\nSetting fragment example\nstorage:selector:${SW_STORAGE:h2}h2:driver:org.h2.jdbcx.JdbcDataSourceurl:jdbc:h2:mem:skywalking-oap-dbuser:samaxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:100}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:1}OpenSearch OpenSearch storage shares the same configurations as ElasticSearch. In order to activate OpenSearch as storage, set the storage provider to elasticsearch.\nElasticSearch NOTE: Elastic announced through their blog that Elasticsearch will be moving over to a Server Side Public License (SSPL), which is incompatible with Apache License 2.0. This license change is effective from Elasticsearch version 7.11. So please choose the suitable ElasticSearch version according to your usage.\nSince 8.8.0, SkyWalking rebuilds the ElasticSearch client on top of ElasticSearch REST API and automatically picks up correct request formats according to the server-side version, hence you don\u0026rsquo;t need to download different binaries and don\u0026rsquo;t need to configure different storage selectors for different ElasticSearch server-side versions anymore.\nFor now, SkyWalking supports ElasticSearch 6.x, ElasticSearch 7.x, ElasticSearch 8.x, and OpenSearch 1.x, their configurations are as follows:\nstorage:selector:${SW_STORAGE:elasticsearch}elasticsearch:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}clusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:9200}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;http\u0026#34;}trustStorePath:${SW_STORAGE_ES_SSL_JKS_PATH:\u0026#34;\u0026#34;}trustStorePass:${SW_STORAGE_ES_SSL_JKS_PASS:\u0026#34;\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}password:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}secretsManagementFile:${SW_ES_SECRETS_MANAGEMENT_FILE:\u0026#34;\u0026#34;}# Secrets management file in the properties format includes the username, password, which are managed by 3rd party tool.dayStep:${SW_STORAGE_DAY_STEP:1}# Represent the number of days in the one minute/hour/day index.indexShardsNumber:${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:1}# Shard number of new indexesindexReplicasNumber:${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:1}# Replicas number of new indexes# Super data set has been defined in the codes, such as trace segments.The following 3 config would be improve es performance when storage super size data in es.superDatasetDayStep:${SW_SUPERDATASET_STORAGE_DAY_STEP:-1}# Represent the number of days in the super size dataset record index, the default value is the same as dayStep when the value is less than 0superDatasetIndexShardsFactor:${SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR:5}# This factor provides more shards for the super data set, shards number = indexShardsNumber * superDatasetIndexShardsFactor. Also, this factor effects Zipkin and Jaeger traces.superDatasetIndexReplicasNumber:${SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER:0}# Represent the replicas number in the super size dataset record index, the default value is 0.indexTemplateOrder:${SW_STORAGE_ES_INDEX_TEMPLATE_ORDER:0}# the order of index templatebulkActions:${SW_STORAGE_ES_BULK_ACTIONS:1000}# Execute the async bulk record data every ${SW_STORAGE_ES_BULK_ACTIONS} requestsflushInterval:${SW_STORAGE_ES_FLUSH_INTERVAL:10}# flush the bulk every 10 seconds whatever the number of requestsconcurrentRequests:${SW_STORAGE_ES_CONCURRENT_REQUESTS:2}# the number of concurrent requestsresultWindowMaxSize:${SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE:10000}metadataQueryMaxSize:${SW_STORAGE_ES_QUERY_MAX_SIZE:5000}segmentQueryMaxSize:${SW_STORAGE_ES_QUERY_SEGMENT_SIZE:200}profileTaskQueryMaxSize:${SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE:200}profileDataQueryScrollBatchSize:${SW_STORAGE_ES_QUERY_PROFILE_DATA_SCROLLING_BATCH_SIZE:100}oapAnalyzer:${SW_STORAGE_ES_OAP_ANALYZER:\u0026#34;{\\\u0026#34;analyzer\\\u0026#34;:{\\\u0026#34;oap_analyzer\\\u0026#34;:{\\\u0026#34;type\\\u0026#34;:\\\u0026#34;stop\\\u0026#34;}}}\u0026#34;}# the oap analyzer.oapLogAnalyzer:${SW_STORAGE_ES_OAP_LOG_ANALYZER:\u0026#34;{\\\u0026#34;analyzer\\\u0026#34;:{\\\u0026#34;oap_log_analyzer\\\u0026#34;:{\\\u0026#34;type\\\u0026#34;:\\\u0026#34;standard\\\u0026#34;}}}\u0026#34;}# the oap log analyzer. It could be customized by the ES analyzer configuration to support more language log formats, such as Chinese log, Japanese log and etc.advanced:${SW_STORAGE_ES_ADVANCED:\u0026#34;\u0026#34;}ElasticSearch With Https SSL Encrypting communications. Example:\nstorage:selector:${SW_STORAGE:elasticsearch}elasticsearch:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}# User needs to be set when Http Basic authentication is enabledpassword:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}# Password to be set when Http Basic authentication is enabledclusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:443}trustStorePath:${SW_SW_STORAGE_ES_SSL_JKS_PATH:\u0026#34;../es_keystore.jks\u0026#34;}trustStorePass:${SW_SW_STORAGE_ES_SSL_JKS_PASS:\u0026#34;\u0026#34;}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;https\u0026#34;}... File at trustStorePath is being monitored. Once it is changed, the ElasticSearch client will reconnect. trustStorePass could be changed in the runtime through Secrets Management File Of ElasticSearch Authentication.  Daily Index Step Daily index step(storage/elasticsearch/dayStep, default 1) represents the index creation period. In this period, metrics for several days (dayStep value) are saved.\nIn most cases, users don\u0026rsquo;t need to change the value manually, as SkyWalking is designed to observe large-scale distributed systems. But in some cases, users may want to set a long TTL value, such as more than 60 days. However, their ElasticSearch cluster may not be powerful enough due to low traffic in the production environment. This value could be increased to 5 (or more) if users could ensure a single index could support the metrics and traces for these days (5 in this case).\nFor example, if dayStep == 11,\n Data in [2000-01-01, 2000-01-11] will be merged into the index-20000101. Data in [2000-01-12, 2000-01-22] will be merged into the index-20000112.  storage/elasticsearch/superDatasetDayStep overrides the storage/elasticsearch/dayStep if the value is positive. This would affect the record-related entities, such as trace segments. In some cases, the size of metrics is much smaller than the record (trace). This would improve the shards balance in the ElasticSearch cluster.\nNOTE: TTL deletion would be affected by these steps. You should set an extra dayStep in your TTL. For example, if you want to have TTL == 30 days and dayStep == 10, you are recommended to set TTL = 40.\nSecrets Management File Of ElasticSearch Authentication The value of secretsManagementFile should point to the secrets management file absolute path. The file includes the username, password, and JKS password of the ElasticSearch server in the properties format.\nuser=xxx password=yyy trustStorePass=zzz The major difference between using user, password, trustStorePass configs in the application.yaml file is that the Secrets Management File is being watched by the OAP server. Once it is changed manually or through a 3rd party tool, such as Vault, the storage provider will use the new username, password, and JKS password to establish the connection and close the old one. If the information exists in the file, the user/password will be overridden.\nAdvanced Configurations For Elasticsearch Index You can add advanced configurations in JSON format to set ElasticSearch index settings by following ElasticSearch doc\nFor example, set translog settings:\nstorage:elasticsearch:# ......advanced:${SW_STORAGE_ES_ADVANCED:\u0026#34;{\\\u0026#34;index.translog.durability\\\u0026#34;:\\\u0026#34;request\\\u0026#34;,\\\u0026#34;index.translog.sync_interval\\\u0026#34;:\\\u0026#34;5s\\\u0026#34;}\u0026#34;}Recommended ElasticSearch server-side configurations You could add the following configuration to elasticsearch.yml, and set the value based on your environment.\n# In tracing scenario, consider to set more than this at least.thread_pool.index.queue_size:1000# Only suitable for ElasticSearch 6thread_pool.write.queue_size:1000# Suitable for ElasticSearch 6 and 7# When you face a query error on the traces page, remember to check this.index.max_result_window:1000000We strongly recommend that you read more about these configurations from ElasticSearch\u0026rsquo;s official documentation since they directly impact the performance of ElasticSearch.\nAbout Namespace When a namespace is set, all index names in ElasticSearch will use it as the prefix.\nMySQL Activate MySQL as storage, and set storage provider to mysql.\nNOTE: MySQL driver is NOT allowed in Apache official distribution and source codes. Please download the MySQL driver on your own. Copy the connection driver jar to oap-libs.\nstorage:selector:${SW_STORAGE:mysql}mysql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:3306/swtest?rewriteBatchedStatements=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root@1234}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password, are found in application.yml. Only part of the settings is listed here. See the HikariCP connection pool document for full settings. To understand the function of the parameter rewriteBatchedStatements=true in MySQL, see the MySQL official document.\nTiDB Tested TiDB Server 4.0.8 version, and MySQL Client driver 8.0.13 version is currently available. Activate TiDB as storage, and set storage provider to tidb.\nstorage:selector:${SW_STORAGE:tidb}tidb:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:4000/swtest?rewriteBatchedStatements=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:\u0026#34;\u0026#34;}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}dataSource.useAffectedRows:${SW_DATA_SOURCE_USE_AFFECTED_ROWS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfArrayColumn:${SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN:20}numOfSearchableValuesPerTag:${SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG:2}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password are found in application.yml. For details on settings, refer to the configuration of MySQL above. To understand the function of the parameter rewriteBatchedStatements=true in TiDB, see the document of TiDB best practices.\nPostgreSQL PostgreSQL JDBC driver uses version 42.3.2. It supports PostgreSQL 8.2 or newer. Activate PostgreSQL as storage, and set storage provider to postgresql.\nstorage:selector:${SW_STORAGE:postgresql}postgresql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:postgresql://localhost:5432/skywalking\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:postgres}dataSource.password:${SW_DATA_SOURCE_PASSWORD:123456}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfArrayColumn:${SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN:20}numOfSearchableValuesPerTag:${SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG:2}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password, are found in application.yml. Only part of the settings is listed here. Please follow HikariCP connection pool document for full settings.\nBanyanDB BanyanDB is a dedicated storage implementation developed by the SkyWalking Team and the community. Activate BanyanDB as the storage, and set storage provider to banyandb.\nstorage:banyandb:host:${SW_STORAGE_BANYANDB_HOST:127.0.0.1}port:${SW_STORAGE_BANYANDB_PORT:17912}maxBulkSize:${SW_STORAGE_BANYANDB_MAX_BULK_SIZE:5000}flushInterval:${SW_STORAGE_BANYANDB_FLUSH_INTERVAL:15}metricsShardsNumber:${SW_STORAGE_BANYANDB_METRICS_SHARDS_NUMBER:1}recordShardsNumber:${SW_STORAGE_BANYANDB_RECORD_SHARDS_NUMBER:1}superDatasetShardsFactor:${SW_STORAGE_BANYANDB_SUPERDATASET_SHARDS_FACTOR:2}concurrentWriteThreads:${SW_STORAGE_BANYANDB_CONCURRENT_WRITE_THREADS:15}profileTaskQueryMaxSize:${SW_STORAGE_BANYANDB_PROFILE_TASK_QUERY_MAX_SIZE:200}# the max number of fetch task in a requestFor more details, please refer to the documents of BanyanDB and BanyanDB Java Client subprojects.\nMore storage extension solutions Follow the Storage extension development guide in the Project Extensions document.\n","excerpt":"Backend storage The SkyWalking storage is pluggable. We have provided the following storage …","ref":"/docs/main/v9.1.0/en/setup/backend/backend-storage/","title":"Backend storage"},{"body":"Backend storage The SkyWalking storage is pluggable. We have provided the following storage solutions, which allow you to easily use one of them by specifying it as the selector in application.yml:\nstorage:selector:${SW_STORAGE:elasticsearch}Natively supported storage:\n H2 OpenSearch ElasticSearch 6, 7, 8 MySQL TiDB PostgreSQL BanyanDB  H2 Activate H2 as storage, set storage provider to H2 In-Memory Databases. Default in the distribution package. Please read Database URL Overview in H2 official document. You can set the target to H2 in Embedded, Server and Mixed modes.\nSetting fragment example\nstorage:selector:${SW_STORAGE:h2}h2:driver:org.h2.jdbcx.JdbcDataSourceurl:jdbc:h2:mem:skywalking-oap-dbuser:samaxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:100}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:1}OpenSearch OpenSearch is a fork from ElasticSearch 7.11 but licensed in Apache 2.0. OpenSearch storage shares the same configurations as ElasticSearch. In order to activate OpenSearch as storage, set the storage provider to elasticsearch.\nElasticSearch NOTE: Elastic announced through their blog that Elasticsearch will be moving over to a Server Side Public License (SSPL), which is incompatible with Apache License 2.0. This license change is effective from Elasticsearch version 7.11. So please choose the suitable ElasticSearch version according to your usage. If you have concerns about SSPL, choose the versions before 7.11 or switch to OpenSearch.\nSince 9.2.0, SkyWalking provides no-sharding/one-index mode to merge all metrics/meter and records(without super datasets) indices into one physical index template metrics-all and records-all on the default setting. In the current one index mode, users still could choose to adjust ElasticSearch\u0026rsquo;s shard number(SW_STORAGE_ES_INDEX_SHARDS_NUMBER) to scale out. After merge all indices, the following indices are available:\n sw_ui_template sw_metrics-all-${day-format} sw_log-${day-format} sw_segment-${day-format} sw_browser_error_log-${day-format} sw_zipkin_span-${day-format} sw_records-all-${day-format}   Provide system environment variable(SW_STORAGE_ES_LOGIC_SHARDING). Set it to true could shard metrics indices into multi-physical indices as same as the versions(one index template per metric/meter aggregation function) before 9.2.0.\n Since 8.8.0, SkyWalking rebuilds the ElasticSearch client on top of ElasticSearch REST API and automatically picks up correct request formats according to the server-side version, hence you don\u0026rsquo;t need to download different binaries and don\u0026rsquo;t need to configure different storage selectors for different ElasticSearch server-side versions anymore.\nFor now, SkyWalking supports ElasticSearch 6.x, ElasticSearch 7.x, ElasticSearch 8.x, and OpenSearch 1.x, their configurations are as follows:\nstorage:selector:${SW_STORAGE:elasticsearch}elasticsearch:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}clusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:9200}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;http\u0026#34;}trustStorePath:${SW_STORAGE_ES_SSL_JKS_PATH:\u0026#34;\u0026#34;}trustStorePass:${SW_STORAGE_ES_SSL_JKS_PASS:\u0026#34;\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}password:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}secretsManagementFile:${SW_ES_SECRETS_MANAGEMENT_FILE:\u0026#34;\u0026#34;}# Secrets management file in the properties format includes the username, password, which are managed by 3rd party tool.dayStep:${SW_STORAGE_DAY_STEP:1}# Represent the number of days in the one minute/hour/day index.indexShardsNumber:${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:1}# Shard number of new indexesindexReplicasNumber:${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:1}# Replicas number of new indexes# Super data set has been defined in the codes, such as trace segments.The following 3 config would be improve es performance when storage super size data in es.superDatasetDayStep:${SW_SUPERDATASET_STORAGE_DAY_STEP:-1}# Represent the number of days in the super size dataset record index, the default value is the same as dayStep when the value is less than 0superDatasetIndexShardsFactor:${SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR:5}# This factor provides more shards for the super data set, shards number = indexShardsNumber * superDatasetIndexShardsFactor. Also, this factor effects Zipkin and Jaeger traces.superDatasetIndexReplicasNumber:${SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER:0}# Represent the replicas number in the super size dataset record index, the default value is 0.indexTemplateOrder:${SW_STORAGE_ES_INDEX_TEMPLATE_ORDER:0}# the order of index templatebulkActions:${SW_STORAGE_ES_BULK_ACTIONS:1000}# Execute the async bulk record data every ${SW_STORAGE_ES_BULK_ACTIONS} requestsflushInterval:${SW_STORAGE_ES_FLUSH_INTERVAL:10}# flush the bulk every 10 seconds whatever the number of requestsconcurrentRequests:${SW_STORAGE_ES_CONCURRENT_REQUESTS:2}# the number of concurrent requestsresultWindowMaxSize:${SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE:10000}metadataQueryMaxSize:${SW_STORAGE_ES_QUERY_MAX_SIZE:5000}segmentQueryMaxSize:${SW_STORAGE_ES_QUERY_SEGMENT_SIZE:200}profileTaskQueryMaxSize:${SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE:200}profileDataQueryScrollBatchSize:${SW_STORAGE_ES_QUERY_PROFILE_DATA_SCROLLING_BATCH_SIZE:100}oapAnalyzer:${SW_STORAGE_ES_OAP_ANALYZER:\u0026#34;{\\\u0026#34;analyzer\\\u0026#34;:{\\\u0026#34;oap_analyzer\\\u0026#34;:{\\\u0026#34;type\\\u0026#34;:\\\u0026#34;stop\\\u0026#34;}}}\u0026#34;}# the oap analyzer.oapLogAnalyzer:${SW_STORAGE_ES_OAP_LOG_ANALYZER:\u0026#34;{\\\u0026#34;analyzer\\\u0026#34;:{\\\u0026#34;oap_log_analyzer\\\u0026#34;:{\\\u0026#34;type\\\u0026#34;:\\\u0026#34;standard\\\u0026#34;}}}\u0026#34;}# the oap log analyzer. It could be customized by the ES analyzer configuration to support more language log formats, such as Chinese log, Japanese log and etc.advanced:${SW_STORAGE_ES_ADVANCED:\u0026#34;\u0026#34;}logicSharding:${SW_STORAGE_ES_LOGIC_SHARDING:false}ElasticSearch With Https SSL Encrypting communications. Example:\nstorage:selector:${SW_STORAGE:elasticsearch}elasticsearch:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}# User needs to be set when Http Basic authentication is enabledpassword:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}# Password to be set when Http Basic authentication is enabledclusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:443}trustStorePath:${SW_SW_STORAGE_ES_SSL_JKS_PATH:\u0026#34;../es_keystore.jks\u0026#34;}trustStorePass:${SW_SW_STORAGE_ES_SSL_JKS_PASS:\u0026#34;\u0026#34;}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;https\u0026#34;}... File at trustStorePath is being monitored. Once it is changed, the ElasticSearch client will reconnect. trustStorePass could be changed in the runtime through Secrets Management File Of ElasticSearch Authentication.  Daily Index Step Daily index step(storage/elasticsearch/dayStep, default 1) represents the index creation period. In this period, metrics for several days (dayStep value) are saved.\nIn most cases, users don\u0026rsquo;t need to change the value manually, as SkyWalking is designed to observe large-scale distributed systems. But in some cases, users may want to set a long TTL value, such as more than 60 days. However, their ElasticSearch cluster may not be powerful enough due to low traffic in the production environment. This value could be increased to 5 (or more) if users could ensure a single index could support the metrics and traces for these days (5 in this case).\nFor example, if dayStep == 11,\n Data in [2000-01-01, 2000-01-11] will be merged into the index-20000101. Data in [2000-01-12, 2000-01-22] will be merged into the index-20000112.  storage/elasticsearch/superDatasetDayStep overrides the storage/elasticsearch/dayStep if the value is positive. This would affect the record-related entities, such as trace segments. In some cases, the size of metrics is much smaller than the record (trace). This would improve the shards balance in the ElasticSearch cluster.\nNOTE: TTL deletion would be affected by these steps. You should set an extra dayStep in your TTL. For example, if you want to have TTL == 30 days and dayStep == 10, you are recommended to set TTL = 40.\nSecrets Management File Of ElasticSearch Authentication The value of secretsManagementFile should point to the secrets management file absolute path. The file includes the username, password, and JKS password of the ElasticSearch server in the properties format.\nuser=xxx password=yyy trustStorePass=zzz The major difference between using user, password, trustStorePass configs in the application.yaml file is that the Secrets Management File is being watched by the OAP server. Once it is changed manually or through a 3rd party tool, such as Vault, the storage provider will use the new username, password, and JKS password to establish the connection and close the old one. If the information exists in the file, the user/password will be overridden.\nAdvanced Configurations For Elasticsearch Index You can add advanced configurations in JSON format to set ElasticSearch index settings by following ElasticSearch doc\nFor example, set translog settings:\nstorage:elasticsearch:# ......advanced:${SW_STORAGE_ES_ADVANCED:\u0026#34;{\\\u0026#34;index.translog.durability\\\u0026#34;:\\\u0026#34;request\\\u0026#34;,\\\u0026#34;index.translog.sync_interval\\\u0026#34;:\\\u0026#34;5s\\\u0026#34;}\u0026#34;}Recommended ElasticSearch server-side configurations You could add the following configuration to elasticsearch.yml, and set the value based on your environment.\n# In tracing scenario, consider to set more than this at least.thread_pool.index.queue_size:1000# Only suitable for ElasticSearch 6thread_pool.write.queue_size:1000# Suitable for ElasticSearch 6 and 7# When you face a query error on the traces page, remember to check this.index.max_result_window:1000000We strongly recommend that you read more about these configurations from ElasticSearch\u0026rsquo;s official documentation since they directly impact the performance of ElasticSearch.\nAbout Namespace When a namespace is set, all index names in ElasticSearch will use it as the prefix.\nMySQL Activate MySQL as storage, and set storage provider to mysql.\nNOTE: MySQL driver is NOT allowed in Apache official distribution and source codes. Please download the MySQL driver on your own. Copy the connection driver jar to oap-libs.\nstorage:selector:${SW_STORAGE:mysql}mysql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:3306/swtest?rewriteBatchedStatements=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root@1234}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password, are found in application.yml. Only part of the settings is listed here. See the HikariCP connection pool document for full settings. To understand the function of the parameter rewriteBatchedStatements=true in MySQL, see the MySQL official document.\nTiDB Tested TiDB Server 4.0.8 version, and MySQL Client driver 8.0.13 version is currently available. Activate TiDB as storage, and set storage provider to tidb.\nstorage:selector:${SW_STORAGE:tidb}tidb:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:4000/swtest?rewriteBatchedStatements=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:\u0026#34;\u0026#34;}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}dataSource.useAffectedRows:${SW_DATA_SOURCE_USE_AFFECTED_ROWS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfArrayColumn:${SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN:20}numOfSearchableValuesPerTag:${SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG:2}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password are found in application.yml. For details on settings, refer to the configuration of MySQL above. To understand the function of the parameter rewriteBatchedStatements=true in TiDB, see the document of TiDB best practices.\nPostgreSQL PostgreSQL JDBC driver uses version 42.3.2. It supports PostgreSQL 8.2 or newer. Activate PostgreSQL as storage, and set storage provider to postgresql.\nstorage:selector:${SW_STORAGE:postgresql}postgresql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:postgresql://localhost:5432/skywalking\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:postgres}dataSource.password:${SW_DATA_SOURCE_PASSWORD:123456}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfArrayColumn:${SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN:20}numOfSearchableValuesPerTag:${SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG:2}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password, are found in application.yml. Only part of the settings is listed here. Please follow HikariCP connection pool document for full settings.\nBanyanDB BanyanDB is a dedicated storage implementation developed by the SkyWalking Team and the community. Activate BanyanDB as the storage, and set storage provider to banyandb.\nstorage:banyandb:host:${SW_STORAGE_BANYANDB_HOST:127.0.0.1}port:${SW_STORAGE_BANYANDB_PORT:17912}maxBulkSize:${SW_STORAGE_BANYANDB_MAX_BULK_SIZE:5000}flushInterval:${SW_STORAGE_BANYANDB_FLUSH_INTERVAL:15}metricsShardsNumber:${SW_STORAGE_BANYANDB_METRICS_SHARDS_NUMBER:1}recordShardsNumber:${SW_STORAGE_BANYANDB_RECORD_SHARDS_NUMBER:1}superDatasetShardsFactor:${SW_STORAGE_BANYANDB_SUPERDATASET_SHARDS_FACTOR:2}concurrentWriteThreads:${SW_STORAGE_BANYANDB_CONCURRENT_WRITE_THREADS:15}profileTaskQueryMaxSize:${SW_STORAGE_BANYANDB_PROFILE_TASK_QUERY_MAX_SIZE:200}# the max number of fetch task in a requestFor more details, please refer to the documents of BanyanDB and BanyanDB Java Client subprojects.\nMore storage extension solutions Follow the Storage extension development guide in the Project Extensions document.\n","excerpt":"Backend storage The SkyWalking storage is pluggable. We have provided the following storage …","ref":"/docs/main/v9.2.0/en/setup/backend/backend-storage/","title":"Backend storage"},{"body":"Backend storage The SkyWalking storage is pluggable. We have provided the following storage solutions, which allow you to easily use one of them by specifying it as the selector in application.yml:\nstorage:selector:${SW_STORAGE:elasticsearch}Natively supported storage:\n H2 OpenSearch ElasticSearch 6, 7, 8 MySQL MySQL-Sharding(Shardingsphere-Proxy 5.1.2) TiDB PostgreSQL BanyanDB  H2 Activate H2 as storage, set storage provider to H2 In-Memory Databases. Default in the distribution package. Please read Database URL Overview in H2 official document. You can set the target to H2 in Embedded, Server and Mixed modes.\nSetting fragment example\nstorage:selector:${SW_STORAGE:h2}h2:driver:org.h2.jdbcx.JdbcDataSourceurl:jdbc:h2:mem:skywalking-oap-dbuser:samaxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:100}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:1}OpenSearch OpenSearch is a fork from ElasticSearch 7.11 but licensed in Apache 2.0. OpenSearch storage shares the same configurations as ElasticSearch. In order to activate OpenSearch as storage, set the storage provider to elasticsearch.\nWe support and tested the following versions of OpenSearch:\n 1.1.0, 1.3.6 2.4.0  ElasticSearch NOTE: Elastic announced through their blog that Elasticsearch will be moving over to a Server Side Public License (SSPL), which is incompatible with Apache License 2.0. This license change is effective from Elasticsearch version 7.11. So please choose the suitable ElasticSearch version according to your usage. If you have concerns about SSPL, choose the versions before 7.11 or switch to OpenSearch.\nBy default, SkyWalking uses following indices for various telemetry data.\n sw_ui_template (UI dashboard settings) sw_metrics-all-${day-format} (All metrics/meters generated through MAL and OAL engines, and metadata of service/instance/endpoint) sw_log-${day-format} (Collected logs, exclude browser logs) sw_segment-${day-format} (Native trace segments) sw_browser_error_log-${day-format} (Collected browser logs) sw_zipkin_span-${day-format} (Zipkin trace spans) sw_records-all-${day-format} (All sampled records, e.g. slow SQLs, agent profiling, and ebpf profiling)  SkyWalking rebuilds the ElasticSearch client on top of ElasticSearch REST API and automatically picks up correct request formats according to the server-side version, hence you don\u0026rsquo;t need to download different binaries and don\u0026rsquo;t need to configure different storage selectors for different ElasticSearch server-side versions anymore.\nFor now, SkyWalking supports ElasticSearch 6.x, ElasticSearch 7.x, ElasticSearch 8.x, and OpenSearch 1.x, their configurations are as follows:\nstorage:selector:${SW_STORAGE:elasticsearch}elasticsearch:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}clusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:9200}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;http\u0026#34;}trustStorePath:${SW_STORAGE_ES_SSL_JKS_PATH:\u0026#34;\u0026#34;}trustStorePass:${SW_STORAGE_ES_SSL_JKS_PASS:\u0026#34;\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}password:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}secretsManagementFile:${SW_ES_SECRETS_MANAGEMENT_FILE:\u0026#34;\u0026#34;}# Secrets management file in the properties format includes the username, password, which are managed by 3rd party tool.dayStep:${SW_STORAGE_DAY_STEP:1}# Represent the number of days in the one minute/hour/day index.indexShardsNumber:${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:1}# Shard number of new indexesindexReplicasNumber:${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:1}# Replicas number of new indexes# Specify the settings for each index individually.# If configured, this setting has the highest priority and overrides the generic settings.specificIndexSettings:${SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS:\u0026#34;\u0026#34;}# Super data set has been defined in the codes, such as trace segments.The following 3 config would be improve es performance when storage super size data in es.superDatasetDayStep:${SW_STORAGE_ES_SUPER_DATASET_DAY_STEP:-1}# Represent the number of days in the super size dataset record index, the default value is the same as dayStep when the value is less than 0superDatasetIndexShardsFactor:${SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR:5}# This factor provides more shards for the super data set, shards number = indexShardsNumber * superDatasetIndexShardsFactor. Also, this factor effects Zipkin traces.superDatasetIndexReplicasNumber:${SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER:0}# Represent the replicas number in the super size dataset record index, the default value is 0.indexTemplateOrder:${SW_STORAGE_ES_INDEX_TEMPLATE_ORDER:0}# the order of index templatebulkActions:${SW_STORAGE_ES_BULK_ACTIONS:1000}# Execute the async bulk record data every ${SW_STORAGE_ES_BULK_ACTIONS} requestsflushInterval:${SW_STORAGE_ES_FLUSH_INTERVAL:10}# flush the bulk every 10 seconds whatever the number of requestsconcurrentRequests:${SW_STORAGE_ES_CONCURRENT_REQUESTS:2}# the number of concurrent requestsresultWindowMaxSize:${SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE:10000}metadataQueryMaxSize:${SW_STORAGE_ES_QUERY_MAX_SIZE:5000}segmentQueryMaxSize:${SW_STORAGE_ES_QUERY_SEGMENT_SIZE:200}profileTaskQueryMaxSize:${SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE:200}profileDataQueryScrollBatchSize:${SW_STORAGE_ES_QUERY_PROFILE_DATA_SCROLLING_BATCH_SIZE:100}oapAnalyzer:${SW_STORAGE_ES_OAP_ANALYZER:\u0026#34;{\\\u0026#34;analyzer\\\u0026#34;:{\\\u0026#34;oap_analyzer\\\u0026#34;:{\\\u0026#34;type\\\u0026#34;:\\\u0026#34;stop\\\u0026#34;}}}\u0026#34;}# the oap analyzer.oapLogAnalyzer:${SW_STORAGE_ES_OAP_LOG_ANALYZER:\u0026#34;{\\\u0026#34;analyzer\\\u0026#34;:{\\\u0026#34;oap_log_analyzer\\\u0026#34;:{\\\u0026#34;type\\\u0026#34;:\\\u0026#34;standard\\\u0026#34;}}}\u0026#34;}# the oap log analyzer. It could be customized by the ES analyzer configuration to support more language log formats, such as Chinese log, Japanese log and etc.advanced:${SW_STORAGE_ES_ADVANCED:\u0026#34;\u0026#34;}# Set it to `true` could shard metrics indices into multi-physical indices# as same as the versions(one index template per metric/meter aggregation function) before 9.2.0.logicSharding:${SW_STORAGE_ES_LOGIC_SHARDING:false}ElasticSearch With Https SSL Encrypting communications. Example:\nstorage:selector:${SW_STORAGE:elasticsearch}elasticsearch:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}# User needs to be set when Http Basic authentication is enabledpassword:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}# Password to be set when Http Basic authentication is enabledclusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:443}trustStorePath:${SW_SW_STORAGE_ES_SSL_JKS_PATH:\u0026#34;../es_keystore.jks\u0026#34;}trustStorePass:${SW_SW_STORAGE_ES_SSL_JKS_PASS:\u0026#34;\u0026#34;}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;https\u0026#34;}... File at trustStorePath is being monitored. Once it is changed, the ElasticSearch client will reconnect. trustStorePass could be changed in the runtime through Secrets Management File Of ElasticSearch Authentication.  Daily Index Step Daily index step(storage/elasticsearch/dayStep, default 1) represents the index creation period. In this period, metrics for several days (dayStep value) are saved.\nIn most cases, users don\u0026rsquo;t need to change the value manually, as SkyWalking is designed to observe large-scale distributed systems. But in some cases, users may want to set a long TTL value, such as more than 60 days. However, their ElasticSearch cluster may not be powerful enough due to low traffic in the production environment. This value could be increased to 5 (or more) if users could ensure a single index could support the metrics and traces for these days (5 in this case).\nFor example, if dayStep == 11,\n Data in [2000-01-01, 2000-01-11] will be merged into the index-20000101. Data in [2000-01-12, 2000-01-22] will be merged into the index-20000112.  storage/elasticsearch/superDatasetDayStep overrides the storage/elasticsearch/dayStep if the value is positive. This would affect the record-related entities, such as trace segments. In some cases, the size of metrics is much smaller than the record (trace). This would improve the shards balance in the ElasticSearch cluster.\nNOTE: TTL deletion would be affected by these steps. You should set an extra dayStep in your TTL. For example, if you want to have TTL == 30 days and dayStep == 10, you are recommended to set TTL = 40.\nSecrets Management File Of ElasticSearch Authentication The value of secretsManagementFile should point to the secrets management file absolute path. The file includes the username, password, and JKS password of the ElasticSearch server in the properties format.\nuser=xxx password=yyy trustStorePass=zzz The major difference between using user, password, trustStorePass configs in the application.yaml file is that the Secrets Management File is being watched by the OAP server. Once it is changed manually or through a 3rd party tool, such as Vault, the storage provider will use the new username, password, and JKS password to establish the connection and close the old one. If the information exists in the file, the user/password will be overridden.\nIndex Settings The following settings control the number of shards and replicas for new and existing index templates. The update only got applied after OAP reboots.\nstorage:elasticsearch:# ......indexShardsNumber:${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:1}indexReplicasNumber:${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:1}specificIndexSettings:${SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS:\u0026#34;\u0026#34;}superDatasetIndexShardsFactor:${SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR:5}superDatasetIndexReplicasNumber:${SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER:0}The following table shows the relationship between those config items and Elasticsearch index number_of_shards/number_of_replicas. And also you can specify the settings for each index individually.\n   index number_of_shards number_of_replicas     sw_ui_template indexShardsNumber indexReplicasNumber   sw_metrics-all-${day-format} indexShardsNumber indexReplicasNumber   sw_log-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_segment-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_browser_error_log-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_zipkin_span-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_records-all-${day-format} indexShardsNumber indexReplicasNumber    Advanced Configurations For Elasticsearch Index You can add advanced configurations in JSON format to set ElasticSearch index settings by following ElasticSearch doc\nFor example, set translog settings:\nstorage:elasticsearch:# ......advanced:${SW_STORAGE_ES_ADVANCED:\u0026#34;{\\\u0026#34;index.translog.durability\\\u0026#34;:\\\u0026#34;request\\\u0026#34;,\\\u0026#34;index.translog.sync_interval\\\u0026#34;:\\\u0026#34;5s\\\u0026#34;}\u0026#34;}Specify Settings For Each Elasticsearch Index Individually You can specify the settings for one or more indexes individually by using SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS.\nNOTE: Supported settings:\n number_of_shards number_of_replicas  NOTE: These settings have the highest priority and will override the existing generic settings mentioned in index settings doc.\nThe settings are in JSON format. The index name here is logic entity name, which should exclude the ${SW_NAMESPACE} which is sw by default, e.g.\n{ \u0026#34;metrics-all\u0026#34;:{ \u0026#34;number_of_shards\u0026#34;:\u0026#34;3\u0026#34;, \u0026#34;number_of_replicas\u0026#34;:\u0026#34;2\u0026#34; }, \u0026#34;segment\u0026#34;:{ \u0026#34;number_of_shards\u0026#34;:\u0026#34;6\u0026#34;, \u0026#34;number_of_replicas\u0026#34;:\u0026#34;1\u0026#34; } } This configuration in the YAML file is like this,\nstorage:elasticsearch:# ......specificIndexSettings:${SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS:\u0026#34;{\\\u0026#34;metrics-all\\\u0026#34;:{\\\u0026#34;number_of_shards\\\u0026#34;:\\\u0026#34;3\\\u0026#34;,\\\u0026#34;number_of_replicas\\\u0026#34;:\\\u0026#34;2\\\u0026#34;},\\\u0026#34;segment\\\u0026#34;:{\\\u0026#34;number_of_shards\\\u0026#34;:\\\u0026#34;6\\\u0026#34;,\\\u0026#34;number_of_replicas\\\u0026#34;:\\\u0026#34;1\\\u0026#34;}}\u0026#34;}Recommended ElasticSearch server-side configurations You could add the following configuration to elasticsearch.yml, and set the value based on your environment.\n# In tracing scenario, consider to set more than this at least.thread_pool.index.queue_size:1000# Only suitable for ElasticSearch 6thread_pool.write.queue_size:1000# Suitable for ElasticSearch 6 and 7# When you face a query error on the traces page, remember to check this.index.max_result_window:1000000We strongly recommend that you read more about these configurations from ElasticSearch\u0026rsquo;s official documentation since they directly impact the performance of ElasticSearch.\nAbout Namespace When a namespace is set, all index names in ElasticSearch will use it as the prefix.\nMySQL Activate MySQL as storage, and set storage provider to mysql.\nNOTE: MySQL driver is NOT allowed in Apache official distribution and source codes. Please download the MySQL driver on your own. Copy the connection driver jar to oap-libs.\nstorage:selector:${SW_STORAGE:mysql}mysql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:3306/swtest?rewriteBatchedStatements=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root@1234}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password, are found in application.yml. Only part of the settings is listed here. See the HikariCP connection pool document for full settings. To understand the function of the parameter rewriteBatchedStatements=true in MySQL, see the MySQL official document.\nMySQL-Sharding MySQL-Sharding plugin provides the MySQL database sharding and table sharding, this feature leverage Shardingsphere-Proxy to manage the JDBC between OAP and multi-database instances, and according to the sharding rules do routing to the database and table sharding.\nTested Shardingsphere-Proxy 5.1.2 version, and MySQL Client driver 8.0.13 version is currently available. Activate MySQL and Shardingsphere-Proxy as storage, and set storage provider to mysql-sharding.\nNOTE: MySQL driver is NOT allowed in Apache official distribution and source codes. Please download the MySQL driver on your own. Copy the connection driver jar to oap-libs.\nstorage:selector:${SW_STORAGE:mysql-sharding}mysql-sharding:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:13307/swtest?rewriteBatchedStatements=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}# The dataSources are configured in ShardingSphere-Proxy config-sharding.yaml# The dataSource name should include the prefix \u0026#34;ds_\u0026#34; and separated by \u0026#34;,\u0026#34;dataSources:${SW_JDBC_SHARDING_DATA_SOURCES:ds_0,ds_1}TiDB Tested TiDB Server 4.0.8 version, and MySQL Client driver 8.0.13 version is currently available. Activate TiDB as storage, and set storage provider to tidb.\nstorage:selector:${SW_STORAGE:tidb}tidb:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:4000/swtest?rewriteBatchedStatements=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:\u0026#34;\u0026#34;}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}dataSource.useAffectedRows:${SW_DATA_SOURCE_USE_AFFECTED_ROWS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfArrayColumn:${SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN:20}numOfSearchableValuesPerTag:${SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG:2}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password are found in application.yml. For details on settings, refer to the configuration of MySQL above. To understand the function of the parameter rewriteBatchedStatements=true in TiDB, see the document of TiDB best practices.\nPostgreSQL PostgreSQL JDBC driver uses version 42.3.2. It supports PostgreSQL 8.2 or newer. Activate PostgreSQL as storage, and set storage provider to postgresql.\nstorage:selector:${SW_STORAGE:postgresql}postgresql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:postgresql://localhost:5432/skywalking\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:postgres}dataSource.password:${SW_DATA_SOURCE_PASSWORD:123456}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfArrayColumn:${SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN:20}numOfSearchableValuesPerTag:${SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG:2}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password, are found in application.yml. Only part of the settings is listed here. Please follow HikariCP connection pool document for full settings.\nBanyanDB BanyanDB is a dedicated storage implementation developed by the SkyWalking Team and the community. Activate BanyanDB as the storage, and set storage provider to banyandb.\nstorage:banyandb:host:${SW_STORAGE_BANYANDB_HOST:127.0.0.1}port:${SW_STORAGE_BANYANDB_PORT:17912}maxBulkSize:${SW_STORAGE_BANYANDB_MAX_BULK_SIZE:5000}flushInterval:${SW_STORAGE_BANYANDB_FLUSH_INTERVAL:15}metricsShardsNumber:${SW_STORAGE_BANYANDB_METRICS_SHARDS_NUMBER:1}recordShardsNumber:${SW_STORAGE_BANYANDB_RECORD_SHARDS_NUMBER:1}superDatasetShardsFactor:${SW_STORAGE_BANYANDB_SUPERDATASET_SHARDS_FACTOR:2}concurrentWriteThreads:${SW_STORAGE_BANYANDB_CONCURRENT_WRITE_THREADS:15}profileTaskQueryMaxSize:${SW_STORAGE_BANYANDB_PROFILE_TASK_QUERY_MAX_SIZE:200}# the max number of fetch task in a requeststreamBlockInterval:${SW_STORAGE_BANYANDB_STREAM_BLOCK_INTERVAL:4}# Unit is hourstreamSegmentInterval:${SW_STORAGE_BANYANDB_STREAM_SEGMENT_INTERVAL:24}# Unit is hourmeasureBlockInterval:${SW_STORAGE_BANYANDB_MEASURE_BLOCK_INTERVAL:4}# Unit is hourmeasureSegmentInterval:${SW_STORAGE_BANYANDB_MEASURE_SEGMENT_INTERVAL:24}# Unit is hourFor more details, please refer to the documents of BanyanDB and BanyanDB Java Client subprojects.\nMore storage extension solutions Follow the Storage extension development guide in the Project Extensions document.\n","excerpt":"Backend storage The SkyWalking storage is pluggable. We have provided the following storage …","ref":"/docs/main/v9.3.0/en/setup/backend/backend-storage/","title":"Backend storage"},{"body":"Backend storage The SkyWalking storage is pluggable. We have provided the following storage solutions, which allow you to easily use one of them by specifying it as the selector in application.yml:\nstorage:selector:${SW_STORAGE:elasticsearch}Natively supported storage:\n H2 OpenSearch ElasticSearch 6, 7, 8 MySQL MySQL-Sharding(Shardingsphere-Proxy 5.3.1) TiDB PostgreSQL BanyanDB  H2 Activate H2 as storage, set storage provider to H2 In-Memory Databases. Default in the distribution package. Please read Database URL Overview in H2 official document. You can set the target to H2 in Embedded, Server and Mixed modes.\nSetting fragment example\nstorage:selector:${SW_STORAGE:h2}h2:driver:org.h2.jdbcx.JdbcDataSourceurl:jdbc:h2:mem:skywalking-oap-dbuser:samaxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:100}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:1}OpenSearch OpenSearch is a fork from ElasticSearch 7.11 but licensed in Apache 2.0. OpenSearch storage shares the same configurations as ElasticSearch. In order to activate OpenSearch as storage, set the storage provider to elasticsearch.\nWe support and tested the following versions of OpenSearch:\n 1.1.0, 1.3.6 2.4.0  ElasticSearch NOTE: Elastic announced through their blog that Elasticsearch will be moving over to a Server Side Public License (SSPL), which is incompatible with Apache License 2.0. This license change is effective from Elasticsearch version 7.11. So please choose the suitable ElasticSearch version according to your usage. If you have concerns about SSPL, choose the versions before 7.11 or switch to OpenSearch.\nBy default, SkyWalking uses following indices for various telemetry data.\n sw_ui_template (UI dashboard settings) sw_metrics-all-${day-format} (All metrics/meters generated through MAL and OAL engines, and metadata of service/instance/endpoint) sw_log-${day-format} (Collected logs, exclude browser logs) sw_segment-${day-format} (Native trace segments) sw_browser_error_log-${day-format} (Collected browser logs) sw_zipkin_span-${day-format} (Zipkin trace spans) sw_records-all-${day-format} (All sampled records, e.g. slow SQLs, agent profiling, and ebpf profiling)  SkyWalking rebuilds the ElasticSearch client on top of ElasticSearch REST API and automatically picks up correct request formats according to the server-side version, hence you don\u0026rsquo;t need to download different binaries and don\u0026rsquo;t need to configure different storage selectors for different ElasticSearch server-side versions anymore.\nFor now, SkyWalking supports ElasticSearch 6.x, ElasticSearch 7.x, ElasticSearch 8.x, and OpenSearch 1.x, their configurations are as follows:\nstorage:selector:${SW_STORAGE:elasticsearch}elasticsearch:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}clusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:9200}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;http\u0026#34;}trustStorePath:${SW_STORAGE_ES_SSL_JKS_PATH:\u0026#34;\u0026#34;}trustStorePass:${SW_STORAGE_ES_SSL_JKS_PASS:\u0026#34;\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}password:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}secretsManagementFile:${SW_ES_SECRETS_MANAGEMENT_FILE:\u0026#34;\u0026#34;}# Secrets management file in the properties format includes the username, password, which are managed by 3rd party tool.dayStep:${SW_STORAGE_DAY_STEP:1}# Represent the number of days in the one minute/hour/day index.indexShardsNumber:${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:1}# Shard number of new indexesindexReplicasNumber:${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:1}# Replicas number of new indexes# Specify the settings for each index individually.# If configured, this setting has the highest priority and overrides the generic settings.specificIndexSettings:${SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS:\u0026#34;\u0026#34;}# Super data set has been defined in the codes, such as trace segments.The following 3 config would be improve es performance when storage super size data in es.superDatasetDayStep:${SW_STORAGE_ES_SUPER_DATASET_DAY_STEP:-1}# Represent the number of days in the super size dataset record index, the default value is the same as dayStep when the value is less than 0superDatasetIndexShardsFactor:${SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR:5}# This factor provides more shards for the super data set, shards number = indexShardsNumber * superDatasetIndexShardsFactor. Also, this factor effects Zipkin traces.superDatasetIndexReplicasNumber:${SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER:0}# Represent the replicas number in the super size dataset record index, the default value is 0.indexTemplateOrder:${SW_STORAGE_ES_INDEX_TEMPLATE_ORDER:0}# the order of index templatebulkActions:${SW_STORAGE_ES_BULK_ACTIONS:1000}# Execute the async bulk record data every ${SW_STORAGE_ES_BULK_ACTIONS} requestsflushInterval:${SW_STORAGE_ES_FLUSH_INTERVAL:10}# flush the bulk every 10 seconds whatever the number of requestsconcurrentRequests:${SW_STORAGE_ES_CONCURRENT_REQUESTS:2}# the number of concurrent requestsresultWindowMaxSize:${SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE:10000}metadataQueryMaxSize:${SW_STORAGE_ES_QUERY_MAX_SIZE:5000}segmentQueryMaxSize:${SW_STORAGE_ES_QUERY_SEGMENT_SIZE:200}profileTaskQueryMaxSize:${SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE:200}profileDataQueryScrollBatchSize:${SW_STORAGE_ES_QUERY_PROFILE_DATA_SCROLLING_BATCH_SIZE:100}oapAnalyzer:${SW_STORAGE_ES_OAP_ANALYZER:\u0026#34;{\\\u0026#34;analyzer\\\u0026#34;:{\\\u0026#34;oap_analyzer\\\u0026#34;:{\\\u0026#34;type\\\u0026#34;:\\\u0026#34;stop\\\u0026#34;}}}\u0026#34;}# the oap analyzer.oapLogAnalyzer:${SW_STORAGE_ES_OAP_LOG_ANALYZER:\u0026#34;{\\\u0026#34;analyzer\\\u0026#34;:{\\\u0026#34;oap_log_analyzer\\\u0026#34;:{\\\u0026#34;type\\\u0026#34;:\\\u0026#34;standard\\\u0026#34;}}}\u0026#34;}# the oap log analyzer. It could be customized by the ES analyzer configuration to support more language log formats, such as Chinese log, Japanese log and etc.advanced:${SW_STORAGE_ES_ADVANCED:\u0026#34;\u0026#34;}# Set it to `true` could shard metrics indices into multi-physical indices# as same as the versions(one index template per metric/meter aggregation function) before 9.2.0.logicSharding:${SW_STORAGE_ES_LOGIC_SHARDING:false}# Custom routing can reduce the impact of searches. Instead of having to fan out a search request to all the shards in an index, the request can be sent to just the shard that matches the specific routing value (or values).enableCustomRouting:${SW_STORAGE_ES_ENABLE_CUSTOM_ROUTING:false}ElasticSearch With Https SSL Encrypting communications. Example:\nstorage:selector:${SW_STORAGE:elasticsearch}elasticsearch:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}# User needs to be set when Http Basic authentication is enabledpassword:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}# Password to be set when Http Basic authentication is enabledclusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:443}trustStorePath:${SW_SW_STORAGE_ES_SSL_JKS_PATH:\u0026#34;../es_keystore.jks\u0026#34;}trustStorePass:${SW_SW_STORAGE_ES_SSL_JKS_PASS:\u0026#34;\u0026#34;}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;https\u0026#34;}... File at trustStorePath is being monitored. Once it is changed, the ElasticSearch client will reconnect. trustStorePass could be changed in the runtime through Secrets Management File Of ElasticSearch Authentication.  Daily Index Step Daily index step(storage/elasticsearch/dayStep, default 1) represents the index creation period. In this period, metrics for several days (dayStep value) are saved.\nIn most cases, users don\u0026rsquo;t need to change the value manually, as SkyWalking is designed to observe large-scale distributed systems. But in some cases, users may want to set a long TTL value, such as more than 60 days. However, their ElasticSearch cluster may not be powerful enough due to low traffic in the production environment. This value could be increased to 5 (or more) if users could ensure a single index could support the metrics and traces for these days (5 in this case).\nFor example, if dayStep == 11,\n Data in [2000-01-01, 2000-01-11] will be merged into the index-20000101. Data in [2000-01-12, 2000-01-22] will be merged into the index-20000112.  storage/elasticsearch/superDatasetDayStep overrides the storage/elasticsearch/dayStep if the value is positive. This would affect the record-related entities, such as trace segments. In some cases, the size of metrics is much smaller than the record (trace). This would improve the shards balance in the ElasticSearch cluster.\nNOTE: TTL deletion would be affected by these steps. You should set an extra dayStep in your TTL. For example, if you want to have TTL == 30 days and dayStep == 10, you are recommended to set TTL = 40.\nSecrets Management File Of ElasticSearch Authentication The value of secretsManagementFile should point to the secrets management file absolute path. The file includes the username, password, and JKS password of the ElasticSearch server in the properties format.\nuser=xxx password=yyy trustStorePass=zzz The major difference between using user, password, trustStorePass configs in the application.yaml file is that the Secrets Management File is being watched by the OAP server. Once it is changed manually or through a 3rd party tool, such as Vault, the storage provider will use the new username, password, and JKS password to establish the connection and close the old one. If the information exists in the file, the user/password will be overridden.\nIndex Settings The following settings control the number of shards and replicas for new and existing index templates. The update only got applied after OAP reboots.\nstorage:elasticsearch:# ......indexShardsNumber:${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:1}indexReplicasNumber:${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:1}specificIndexSettings:${SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS:\u0026#34;\u0026#34;}superDatasetIndexShardsFactor:${SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR:5}superDatasetIndexReplicasNumber:${SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER:0}The following table shows the relationship between those config items and Elasticsearch index number_of_shards/number_of_replicas. And also you can specify the settings for each index individually.\n   index number_of_shards number_of_replicas     sw_ui_template indexShardsNumber indexReplicasNumber   sw_metrics-all-${day-format} indexShardsNumber indexReplicasNumber   sw_log-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_segment-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_browser_error_log-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_zipkin_span-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_records-all-${day-format} indexShardsNumber indexReplicasNumber    Advanced Configurations For Elasticsearch Index You can add advanced configurations in JSON format to set ElasticSearch index settings by following ElasticSearch doc\nFor example, set translog settings:\nstorage:elasticsearch:# ......advanced:${SW_STORAGE_ES_ADVANCED:\u0026#34;{\\\u0026#34;index.translog.durability\\\u0026#34;:\\\u0026#34;request\\\u0026#34;,\\\u0026#34;index.translog.sync_interval\\\u0026#34;:\\\u0026#34;5s\\\u0026#34;}\u0026#34;}Specify Settings For Each Elasticsearch Index Individually You can specify the settings for one or more indexes individually by using SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS.\nNOTE: Supported settings:\n number_of_shards number_of_replicas  NOTE: These settings have the highest priority and will override the existing generic settings mentioned in index settings doc.\nThe settings are in JSON format. The index name here is logic entity name, which should exclude the ${SW_NAMESPACE} which is sw by default, e.g.\n{ \u0026#34;metrics-all\u0026#34;:{ \u0026#34;number_of_shards\u0026#34;:\u0026#34;3\u0026#34;, \u0026#34;number_of_replicas\u0026#34;:\u0026#34;2\u0026#34; }, \u0026#34;segment\u0026#34;:{ \u0026#34;number_of_shards\u0026#34;:\u0026#34;6\u0026#34;, \u0026#34;number_of_replicas\u0026#34;:\u0026#34;1\u0026#34; } } This configuration in the YAML file is like this,\nstorage:elasticsearch:# ......specificIndexSettings:${SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS:\u0026#34;{\\\u0026#34;metrics-all\\\u0026#34;:{\\\u0026#34;number_of_shards\\\u0026#34;:\\\u0026#34;3\\\u0026#34;,\\\u0026#34;number_of_replicas\\\u0026#34;:\\\u0026#34;2\\\u0026#34;},\\\u0026#34;segment\\\u0026#34;:{\\\u0026#34;number_of_shards\\\u0026#34;:\\\u0026#34;6\\\u0026#34;,\\\u0026#34;number_of_replicas\\\u0026#34;:\\\u0026#34;1\\\u0026#34;}}\u0026#34;}Recommended ElasticSearch server-side configurations You could add the following configuration to elasticsearch.yml, and set the value based on your environment.\n# In tracing scenario, consider to set more than this at least.thread_pool.index.queue_size:1000# Only suitable for ElasticSearch 6thread_pool.write.queue_size:1000# Suitable for ElasticSearch 6 and 7# When you face a query error on the traces page, remember to check this.index.max_result_window:1000000We strongly recommend that you read more about these configurations from ElasticSearch\u0026rsquo;s official documentation since they directly impact the performance of ElasticSearch.\nAbout Namespace When a namespace is set, all index names in ElasticSearch will use it as the prefix.\nMySQL Activate MySQL as storage, and set storage provider to mysql.\nNOTE: MySQL driver is NOT allowed in Apache official distribution and source codes. Please download the MySQL driver on your own. Copy the connection driver jar to oap-libs.\nstorage:selector:${SW_STORAGE:mysql}mysql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:3306/swtest?rewriteBatchedStatements=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root@1234}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password, are found in application.yml. Only part of the settings is listed here. See the HikariCP connection pool document for full settings. To understand the function of the parameter rewriteBatchedStatements=true in MySQL, see the MySQL official document.\nMySQL-Sharding MySQL-Sharding plugin provides the MySQL database sharding and table sharding, this feature leverage Shardingsphere-Proxy to manage the JDBC between OAP and multi-database instances, and according to the sharding rules do routing to the database and table sharding.\nTested Shardingsphere-Proxy 5.3.1 version, and MySQL Client driver 8.0.13 version is currently available. Activate MySQL and Shardingsphere-Proxy as storage, and set storage provider to mysql-sharding.\nNOTE: MySQL driver is NOT allowed in Apache official distribution and source codes. Please download the MySQL driver on your own. Copy the connection driver jar to oap-libs.\nstorage:selector:${SW_STORAGE:mysql-sharding}mysql-sharding:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:13307/swtest?rewriteBatchedStatements=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}# The dataSources are configured in ShardingSphere-Proxy config-sharding.yaml# The dataSource name should include the prefix \u0026#34;ds_\u0026#34; and separated by \u0026#34;,\u0026#34;dataSources:${SW_JDBC_SHARDING_DATA_SOURCES:ds_0,ds_1}TiDB Tested TiDB Server 4.0.8 version, and MySQL Client driver 8.0.13 version is currently available. Activate TiDB as storage, and set storage provider to tidb.\nstorage:selector:${SW_STORAGE:tidb}tidb:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:4000/swtest?rewriteBatchedStatements=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:\u0026#34;\u0026#34;}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}dataSource.useAffectedRows:${SW_DATA_SOURCE_USE_AFFECTED_ROWS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfArrayColumn:${SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN:20}numOfSearchableValuesPerTag:${SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG:2}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password are found in application.yml. For details on settings, refer to the configuration of MySQL above. To understand the function of the parameter rewriteBatchedStatements=true in TiDB, see the document of TiDB best practices.\nPostgreSQL PostgreSQL JDBC driver uses version 42.3.2. It supports PostgreSQL 8.2 or newer. Activate PostgreSQL as storage, and set storage provider to postgresql.\nstorage:selector:${SW_STORAGE:postgresql}postgresql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:postgresql://localhost:5432/skywalking\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:postgres}dataSource.password:${SW_DATA_SOURCE_PASSWORD:123456}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfArrayColumn:${SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN:20}numOfSearchableValuesPerTag:${SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG:2}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password, are found in application.yml. Only part of the settings is listed here. Please follow HikariCP connection pool document for full settings.\nBanyanDB BanyanDB is a dedicated storage implementation developed by the SkyWalking Team and the community. Activate BanyanDB as the storage, and set storage provider to banyandb.\nstorage:banyandb:host:${SW_STORAGE_BANYANDB_HOST:127.0.0.1}port:${SW_STORAGE_BANYANDB_PORT:17912}maxBulkSize:${SW_STORAGE_BANYANDB_MAX_BULK_SIZE:5000}flushInterval:${SW_STORAGE_BANYANDB_FLUSH_INTERVAL:15}metricsShardsNumber:${SW_STORAGE_BANYANDB_METRICS_SHARDS_NUMBER:1}recordShardsNumber:${SW_STORAGE_BANYANDB_RECORD_SHARDS_NUMBER:1}superDatasetShardsFactor:${SW_STORAGE_BANYANDB_SUPERDATASET_SHARDS_FACTOR:2}concurrentWriteThreads:${SW_STORAGE_BANYANDB_CONCURRENT_WRITE_THREADS:15}profileTaskQueryMaxSize:${SW_STORAGE_BANYANDB_PROFILE_TASK_QUERY_MAX_SIZE:200}# the max number of fetch task in a requeststreamBlockInterval:${SW_STORAGE_BANYANDB_STREAM_BLOCK_INTERVAL:4}# Unit is hourstreamSegmentInterval:${SW_STORAGE_BANYANDB_STREAM_SEGMENT_INTERVAL:24}# Unit is hourmeasureBlockInterval:${SW_STORAGE_BANYANDB_MEASURE_BLOCK_INTERVAL:4}# Unit is hourmeasureSegmentInterval:${SW_STORAGE_BANYANDB_MEASURE_SEGMENT_INTERVAL:24}# Unit is hourFor more details, please refer to the documents of BanyanDB and BanyanDB Java Client subprojects.\nMore storage extension solutions Follow the Storage extension development guide in the Project Extensions document.\n","excerpt":"Backend storage The SkyWalking storage is pluggable. We have provided the following storage …","ref":"/docs/main/v9.4.0/en/setup/backend/backend-storage/","title":"Backend storage"},{"body":"Backend storage The SkyWalking storage is pluggable. We have provided the following storage solutions, which allow you to easily use one of them by specifying it as the selector in application.yml:\nstorage:selector:${SW_STORAGE:elasticsearch}Natively supported storage:\n H2 OpenSearch ElasticSearch 6, 7, 8 MySQL PostgreSQL BanyanDB  H2 Activate H2 as storage, set storage provider to H2 In-Memory Databases. Default in the distribution package. Please read Database URL Overview in H2 official document. You can set the target to H2 in Embedded, Server and Mixed modes.\nSetting fragment example\nstorage:selector:${SW_STORAGE:h2}h2:driver:org.h2.jdbcx.JdbcDataSourceurl:jdbc:h2:mem:skywalking-oap-dbuser:samaxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:100}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:1}OpenSearch OpenSearch is a fork from ElasticSearch 7.11 but licensed in Apache 2.0. OpenSearch storage shares the same configurations as ElasticSearch. In order to activate OpenSearch as storage, set the storage provider to elasticsearch.\nWe support and tested the following versions of OpenSearch:\n 1.1.0, 1.3.6 2.4.0  ElasticSearch NOTE: Elastic announced through their blog that Elasticsearch will be moving over to a Server Side Public License (SSPL), which is incompatible with Apache License 2.0. This license change is effective from Elasticsearch version 7.11. So please choose the suitable ElasticSearch version according to your usage. If you have concerns about SSPL, choose the versions before 7.11 or switch to OpenSearch.\nBy default, SkyWalking uses following indices for various telemetry data.\n sw_ui_template (UI dashboard settings) sw_metrics-all-${day-format} (All metrics/meters generated through MAL and OAL engines, and metadata of service/instance/endpoint) sw_log-${day-format} (Collected logs, exclude browser logs) sw_segment-${day-format} (Native trace segments) sw_browser_error_log-${day-format} (Collected browser logs) sw_zipkin_span-${day-format} (Zipkin trace spans) sw_records-all-${day-format} (All sampled records, e.g. slow SQLs, agent profiling, and ebpf profiling)  SkyWalking rebuilds the ElasticSearch client on top of ElasticSearch REST API and automatically picks up correct request formats according to the server-side version, hence you don\u0026rsquo;t need to download different binaries and don\u0026rsquo;t need to configure different storage selectors for different ElasticSearch server-side versions anymore.\nFor now, SkyWalking supports ElasticSearch 6.x, ElasticSearch 7.x, ElasticSearch 8.x, and OpenSearch 1.x, their configurations are as follows:\nstorage:selector:${SW_STORAGE:elasticsearch}elasticsearch:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}clusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:9200}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;http\u0026#34;}trustStorePath:${SW_STORAGE_ES_SSL_JKS_PATH:\u0026#34;\u0026#34;}trustStorePass:${SW_STORAGE_ES_SSL_JKS_PASS:\u0026#34;\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}password:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}secretsManagementFile:${SW_ES_SECRETS_MANAGEMENT_FILE:\u0026#34;\u0026#34;}# Secrets management file in the properties format includes the username, password, which are managed by 3rd party tool.dayStep:${SW_STORAGE_DAY_STEP:1}# Represent the number of days in the one minute/hour/day index.indexShardsNumber:${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:1}# Shard number of new indexesindexReplicasNumber:${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:1}# Replicas number of new indexes# Specify the settings for each index individually.# If configured, this setting has the highest priority and overrides the generic settings.specificIndexSettings:${SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS:\u0026#34;\u0026#34;}# Super data set has been defined in the codes, such as trace segments.The following 3 config would be improve es performance when storage super size data in es.superDatasetDayStep:${SW_STORAGE_ES_SUPER_DATASET_DAY_STEP:-1}# Represent the number of days in the super size dataset record index, the default value is the same as dayStep when the value is less than 0superDatasetIndexShardsFactor:${SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR:5}# This factor provides more shards for the super data set, shards number = indexShardsNumber * superDatasetIndexShardsFactor. Also, this factor effects Zipkin traces.superDatasetIndexReplicasNumber:${SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER:0}# Represent the replicas number in the super size dataset record index, the default value is 0.indexTemplateOrder:${SW_STORAGE_ES_INDEX_TEMPLATE_ORDER:0}# the order of index templatebulkActions:${SW_STORAGE_ES_BULK_ACTIONS:1000}# Execute the async bulk record data every ${SW_STORAGE_ES_BULK_ACTIONS} requestsflushInterval:${SW_STORAGE_ES_FLUSH_INTERVAL:10}# flush the bulk every 10 seconds whatever the number of requestsconcurrentRequests:${SW_STORAGE_ES_CONCURRENT_REQUESTS:2}# the number of concurrent requestsresultWindowMaxSize:${SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE:10000}metadataQueryMaxSize:${SW_STORAGE_ES_QUERY_MAX_SIZE:5000}segmentQueryMaxSize:${SW_STORAGE_ES_QUERY_SEGMENT_SIZE:200}profileTaskQueryMaxSize:${SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE:200}profileDataQueryScrollBatchSize:${SW_STORAGE_ES_QUERY_PROFILE_DATA_SCROLLING_BATCH_SIZE:100}oapAnalyzer:${SW_STORAGE_ES_OAP_ANALYZER:\u0026#34;{\\\u0026#34;analyzer\\\u0026#34;:{\\\u0026#34;oap_analyzer\\\u0026#34;:{\\\u0026#34;type\\\u0026#34;:\\\u0026#34;stop\\\u0026#34;}}}\u0026#34;}# the oap analyzer.oapLogAnalyzer:${SW_STORAGE_ES_OAP_LOG_ANALYZER:\u0026#34;{\\\u0026#34;analyzer\\\u0026#34;:{\\\u0026#34;oap_log_analyzer\\\u0026#34;:{\\\u0026#34;type\\\u0026#34;:\\\u0026#34;standard\\\u0026#34;}}}\u0026#34;}# the oap log analyzer. It could be customized by the ES analyzer configuration to support more language log formats, such as Chinese log, Japanese log and etc.advanced:${SW_STORAGE_ES_ADVANCED:\u0026#34;\u0026#34;}# Set it to `true` could shard metrics indices into multi-physical indices# as same as the versions(one index template per metric/meter aggregation function) before 9.2.0.logicSharding:${SW_STORAGE_ES_LOGIC_SHARDING:false}# Custom routing can reduce the impact of searches. Instead of having to fan out a search request to all the shards in an index, the request can be sent to just the shard that matches the specific routing value (or values).enableCustomRouting:${SW_STORAGE_ES_ENABLE_CUSTOM_ROUTING:false}ElasticSearch With Https SSL Encrypting communications. Example:\nstorage:selector:${SW_STORAGE:elasticsearch}elasticsearch:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}# User needs to be set when Http Basic authentication is enabledpassword:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}# Password to be set when Http Basic authentication is enabledclusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:443}trustStorePath:${SW_SW_STORAGE_ES_SSL_JKS_PATH:\u0026#34;../es_keystore.jks\u0026#34;}trustStorePass:${SW_SW_STORAGE_ES_SSL_JKS_PASS:\u0026#34;\u0026#34;}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;https\u0026#34;}... File at trustStorePath is being monitored. Once it is changed, the ElasticSearch client will reconnect. trustStorePass could be changed in the runtime through Secrets Management File Of ElasticSearch Authentication.  Daily Index Step Daily index step(storage/elasticsearch/dayStep, default 1) represents the index creation period. In this period, metrics for several days (dayStep value) are saved.\nIn most cases, users don\u0026rsquo;t need to change the value manually, as SkyWalking is designed to observe large-scale distributed systems. But in some cases, users may want to set a long TTL value, such as more than 60 days. However, their ElasticSearch cluster may not be powerful enough due to low traffic in the production environment. This value could be increased to 5 (or more) if users could ensure a single index could support the metrics and traces for these days (5 in this case).\nFor example, if dayStep == 11,\n Data in [2000-01-01, 2000-01-11] will be merged into the index-20000101. Data in [2000-01-12, 2000-01-22] will be merged into the index-20000112.  storage/elasticsearch/superDatasetDayStep overrides the storage/elasticsearch/dayStep if the value is positive. This would affect the record-related entities, such as trace segments. In some cases, the size of metrics is much smaller than the record (trace). This would improve the shards balance in the ElasticSearch cluster.\nNOTE: TTL deletion would be affected by these steps. You should set an extra dayStep in your TTL. For example, if you want to have TTL == 30 days and dayStep == 10, you are recommended to set TTL = 40.\nSecrets Management File Of ElasticSearch Authentication The value of secretsManagementFile should point to the secrets management file absolute path. The file includes the username, password, and JKS password of the ElasticSearch server in the properties format.\nuser=xxx password=yyy trustStorePass=zzz The major difference between using user, password, trustStorePass configs in the application.yaml file is that the Secrets Management File is being watched by the OAP server. Once it is changed manually or through a 3rd party tool, such as Vault, the storage provider will use the new username, password, and JKS password to establish the connection and close the old one. If the information exists in the file, the user/password will be overridden.\nIndex Settings The following settings control the number of shards and replicas for new and existing index templates. The update only got applied after OAP reboots.\nstorage:elasticsearch:# ......indexShardsNumber:${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:1}indexReplicasNumber:${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:1}specificIndexSettings:${SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS:\u0026#34;\u0026#34;}superDatasetIndexShardsFactor:${SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR:5}superDatasetIndexReplicasNumber:${SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER:0}The following table shows the relationship between those config items and Elasticsearch index number_of_shards/number_of_replicas. And also you can specify the settings for each index individually.\n   index number_of_shards number_of_replicas     sw_ui_template indexShardsNumber indexReplicasNumber   sw_metrics-all-${day-format} indexShardsNumber indexReplicasNumber   sw_log-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_segment-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_browser_error_log-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_zipkin_span-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_records-all-${day-format} indexShardsNumber indexReplicasNumber    Advanced Configurations For Elasticsearch Index You can add advanced configurations in JSON format to set ElasticSearch index settings by following ElasticSearch doc\nFor example, set translog settings:\nstorage:elasticsearch:# ......advanced:${SW_STORAGE_ES_ADVANCED:\u0026#34;{\\\u0026#34;index.translog.durability\\\u0026#34;:\\\u0026#34;request\\\u0026#34;,\\\u0026#34;index.translog.sync_interval\\\u0026#34;:\\\u0026#34;5s\\\u0026#34;}\u0026#34;}Specify Settings For Each Elasticsearch Index Individually You can specify the settings for one or more indexes individually by using SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS.\nNOTE: Supported settings:\n number_of_shards number_of_replicas  NOTE: These settings have the highest priority and will override the existing generic settings mentioned in index settings doc.\nThe settings are in JSON format. The index name here is logic entity name, which should exclude the ${SW_NAMESPACE} which is sw by default, e.g.\n{ \u0026#34;metrics-all\u0026#34;:{ \u0026#34;number_of_shards\u0026#34;:\u0026#34;3\u0026#34;, \u0026#34;number_of_replicas\u0026#34;:\u0026#34;2\u0026#34; }, \u0026#34;segment\u0026#34;:{ \u0026#34;number_of_shards\u0026#34;:\u0026#34;6\u0026#34;, \u0026#34;number_of_replicas\u0026#34;:\u0026#34;1\u0026#34; } } This configuration in the YAML file is like this,\nstorage:elasticsearch:# ......specificIndexSettings:${SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS:\u0026#34;{\\\u0026#34;metrics-all\\\u0026#34;:{\\\u0026#34;number_of_shards\\\u0026#34;:\\\u0026#34;3\\\u0026#34;,\\\u0026#34;number_of_replicas\\\u0026#34;:\\\u0026#34;2\\\u0026#34;},\\\u0026#34;segment\\\u0026#34;:{\\\u0026#34;number_of_shards\\\u0026#34;:\\\u0026#34;6\\\u0026#34;,\\\u0026#34;number_of_replicas\\\u0026#34;:\\\u0026#34;1\\\u0026#34;}}\u0026#34;}Recommended ElasticSearch server-side configurations You could add the following configuration to elasticsearch.yml, and set the value based on your environment.\n# In tracing scenario, consider to set more than this at least.thread_pool.index.queue_size:1000# Only suitable for ElasticSearch 6thread_pool.write.queue_size:1000# Suitable for ElasticSearch 6 and 7# When you face a query error on the traces page, remember to check this.index.max_result_window:1000000We strongly recommend that you read more about these configurations from ElasticSearch\u0026rsquo;s official documentation since they directly impact the performance of ElasticSearch.\nAbout Namespace When a namespace is set, all index names in ElasticSearch will use it as the prefix.\nMySQL Activate MySQL as storage, and set storage provider to mysql.\nNOTE: MySQL driver is NOT allowed in Apache official distribution and source codes. Please download the MySQL driver on your own. Copy the connection driver jar to oap-libs.\nstorage:selector:${SW_STORAGE:mysql}mysql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:3306/swtest?rewriteBatchedStatements=true\u0026amp;allowMultiQueries=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root@1234}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password, are found in application.yml. Only part of the settings is listed here. See the HikariCP connection pool document for full settings. To understand the function of the parameter rewriteBatchedStatements=true in MySQL, see the MySQL official document.\nIn theory, all other databases that are compatible with MySQL protocol should be able to use this storage plugin, such as TiDB. Please compose the JDBC URL according to the database\u0026rsquo;s documentation.\nPostgreSQL PostgreSQL JDBC driver uses version 42.3.2. It supports PostgreSQL 8.2 or newer. Activate PostgreSQL as storage, and set storage provider to postgresql.\nstorage:selector:${SW_STORAGE:postgresql}postgresql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:postgresql://localhost:5432/skywalking\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:postgres}dataSource.password:${SW_DATA_SOURCE_PASSWORD:123456}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfArrayColumn:${SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN:20}numOfSearchableValuesPerTag:${SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG:2}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password, are found in application.yml. Only part of the settings is listed here. Please follow HikariCP connection pool document for full settings.\nBanyanDB BanyanDB is a dedicated storage implementation developed by the SkyWalking Team and the community. Activate BanyanDB as the storage, and set storage provider to banyandb.\nstorage:banyandb:host:${SW_STORAGE_BANYANDB_HOST:127.0.0.1}port:${SW_STORAGE_BANYANDB_PORT:17912}maxBulkSize:${SW_STORAGE_BANYANDB_MAX_BULK_SIZE:5000}flushInterval:${SW_STORAGE_BANYANDB_FLUSH_INTERVAL:15}metricsShardsNumber:${SW_STORAGE_BANYANDB_METRICS_SHARDS_NUMBER:1}recordShardsNumber:${SW_STORAGE_BANYANDB_RECORD_SHARDS_NUMBER:1}superDatasetShardsFactor:${SW_STORAGE_BANYANDB_SUPERDATASET_SHARDS_FACTOR:2}concurrentWriteThreads:${SW_STORAGE_BANYANDB_CONCURRENT_WRITE_THREADS:15}profileTaskQueryMaxSize:${SW_STORAGE_BANYANDB_PROFILE_TASK_QUERY_MAX_SIZE:200}# the max number of fetch task in a requeststreamBlockInterval:${SW_STORAGE_BANYANDB_STREAM_BLOCK_INTERVAL:4}# Unit is hourstreamSegmentInterval:${SW_STORAGE_BANYANDB_STREAM_SEGMENT_INTERVAL:24}# Unit is hourmeasureBlockInterval:${SW_STORAGE_BANYANDB_MEASURE_BLOCK_INTERVAL:4}# Unit is hourmeasureSegmentInterval:${SW_STORAGE_BANYANDB_MEASURE_SEGMENT_INTERVAL:24}# Unit is hourFor more details, please refer to the documents of BanyanDB and BanyanDB Java Client subprojects.\nMore storage extension solutions Follow the Storage extension development guide in the Project Extensions document.\n","excerpt":"Backend storage The SkyWalking storage is pluggable. We have provided the following storage …","ref":"/docs/main/v9.5.0/en/setup/backend/backend-storage/","title":"Backend storage"},{"body":"Backend storage The SkyWalking storage is pluggable. We have provided the following storage solutions, which allow you to easily use one of them by specifying it as the selector in application.yml:\nstorage:selector:${SW_STORAGE:elasticsearch}Natively supported storage:\n H2 OpenSearch ElasticSearch 7 and 8. MySQL PostgreSQL BanyanDB  H2 Activate H2 as storage, set storage provider to H2 In-Memory Databases. Default in the distribution package. Please read Database URL Overview in H2 official document. You can set the target to H2 in Embedded, Server and Mixed modes.\nSetting fragment example\nstorage:selector:${SW_STORAGE:h2}h2:driver:org.h2.jdbcx.JdbcDataSourceurl:jdbc:h2:mem:skywalking-oap-dbuser:samaxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:100}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:1}OpenSearch OpenSearch is a fork from ElasticSearch 7.11 but licensed in Apache 2.0. OpenSearch storage shares the same configurations as ElasticSearch. In order to activate OpenSearch as storage, set the storage provider to elasticsearch.\nWe support and tested the following versions of OpenSearch:\n 1.1.0, 1.3.10 2.4.0, 2.8.0  ElasticSearch NOTE: Elastic announced through their blog that Elasticsearch will be moving over to a Server Side Public License (SSPL) and/or Elastic License 2.0(ELv2), since Feb. 2021, which is incompatible with Apache License 2.0. Both of these licenses are not OSS licenses approved by the Open Source Initiative (OSI). This license change is effective from Elasticsearch version 7.11. So please choose the suitable ElasticSearch version according to your usage. If you have concerns about SSPL/ELv2, choose the versions before 7.11 or switch to OpenSearch.\nBy default, SkyWalking uses following indices for various telemetry data.\n sw_ui_template (UI dashboard settings) sw_metrics-all-${day-format} (All metrics/meters generated through MAL and OAL engines, and metadata of service/instance/endpoint) sw_log-${day-format} (Collected logs, exclude browser logs) sw_segment-${day-format} (Native trace segments) sw_browser_error_log-${day-format} (Collected browser logs) sw_zipkin_span-${day-format} (Zipkin trace spans) sw_records-all-${day-format} (All sampled records, e.g. slow SQLs, agent profiling, and ebpf profiling)  SkyWalking rebuilds the ElasticSearch client on top of ElasticSearch REST API and automatically picks up correct request formats according to the server-side version, hence you don\u0026rsquo;t need to download different binaries and don\u0026rsquo;t need to configure different storage selectors for different ElasticSearch server-side versions anymore.\nFor now, SkyWalking supports ElasticSearch 7.x, ElasticSearch 8.x, and OpenSearch 1.x, their configurations are as follows:\nNotice, ElasticSearch 6 worked and is not promised due to end of life officially.\nstorage:selector:${SW_STORAGE:elasticsearch}elasticsearch:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}clusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:9200}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;http\u0026#34;}trustStorePath:${SW_STORAGE_ES_SSL_JKS_PATH:\u0026#34;\u0026#34;}trustStorePass:${SW_STORAGE_ES_SSL_JKS_PASS:\u0026#34;\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}password:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}secretsManagementFile:${SW_ES_SECRETS_MANAGEMENT_FILE:\u0026#34;\u0026#34;}# Secrets management file in the properties format includes the username, password, which are managed by 3rd party tool.dayStep:${SW_STORAGE_DAY_STEP:1}# Represent the number of days in the one minute/hour/day index.indexShardsNumber:${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:1}# Shard number of new indexesindexReplicasNumber:${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:1}# Replicas number of new indexes# Specify the settings for each index individually.# If configured, this setting has the highest priority and overrides the generic settings.specificIndexSettings:${SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS:\u0026#34;\u0026#34;}# Super data set has been defined in the codes, such as trace segments.The following 3 config would be improve es performance when storage super size data in es.superDatasetDayStep:${SW_STORAGE_ES_SUPER_DATASET_DAY_STEP:-1}# Represent the number of days in the super size dataset record index, the default value is the same as dayStep when the value is less than 0superDatasetIndexShardsFactor:${SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR:5}# This factor provides more shards for the super data set, shards number = indexShardsNumber * superDatasetIndexShardsFactor. Also, this factor effects Zipkin traces.superDatasetIndexReplicasNumber:${SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER:0}# Represent the replicas number in the super size dataset record index, the default value is 0.indexTemplateOrder:${SW_STORAGE_ES_INDEX_TEMPLATE_ORDER:0}# the order of index templatebulkActions:${SW_STORAGE_ES_BULK_ACTIONS:1000}# Execute the async bulk record data every ${SW_STORAGE_ES_BULK_ACTIONS} requestsflushInterval:${SW_STORAGE_ES_FLUSH_INTERVAL:10}# flush the bulk every 10 seconds whatever the number of requestsconcurrentRequests:${SW_STORAGE_ES_CONCURRENT_REQUESTS:2}# the number of concurrent requestsresultWindowMaxSize:${SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE:10000}metadataQueryMaxSize:${SW_STORAGE_ES_QUERY_MAX_SIZE:5000}segmentQueryMaxSize:${SW_STORAGE_ES_QUERY_SEGMENT_SIZE:200}profileTaskQueryMaxSize:${SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE:200}profileDataQueryScrollBatchSize:${SW_STORAGE_ES_QUERY_PROFILE_DATA_SCROLLING_BATCH_SIZE:100}oapAnalyzer:${SW_STORAGE_ES_OAP_ANALYZER:\u0026#34;{\\\u0026#34;analyzer\\\u0026#34;:{\\\u0026#34;oap_analyzer\\\u0026#34;:{\\\u0026#34;type\\\u0026#34;:\\\u0026#34;stop\\\u0026#34;}}}\u0026#34;}# the oap analyzer.oapLogAnalyzer:${SW_STORAGE_ES_OAP_LOG_ANALYZER:\u0026#34;{\\\u0026#34;analyzer\\\u0026#34;:{\\\u0026#34;oap_log_analyzer\\\u0026#34;:{\\\u0026#34;type\\\u0026#34;:\\\u0026#34;standard\\\u0026#34;}}}\u0026#34;}# the oap log analyzer. It could be customized by the ES analyzer configuration to support more language log formats, such as Chinese log, Japanese log and etc.advanced:${SW_STORAGE_ES_ADVANCED:\u0026#34;\u0026#34;}# Set it to `true` could shard metrics indices into multi-physical indices# as same as the versions(one index template per metric/meter aggregation function) before 9.2.0.logicSharding:${SW_STORAGE_ES_LOGIC_SHARDING:false}# Custom routing can reduce the impact of searches. Instead of having to fan out a search request to all the shards in an index, the request can be sent to just the shard that matches the specific routing value (or values).enableCustomRouting:${SW_STORAGE_ES_ENABLE_CUSTOM_ROUTING:false}ElasticSearch With Https SSL Encrypting communications. Example:\nstorage:selector:${SW_STORAGE:elasticsearch}elasticsearch:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}# User needs to be set when Http Basic authentication is enabledpassword:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}# Password to be set when Http Basic authentication is enabledclusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:443}trustStorePath:${SW_SW_STORAGE_ES_SSL_JKS_PATH:\u0026#34;../es_keystore.jks\u0026#34;}trustStorePass:${SW_SW_STORAGE_ES_SSL_JKS_PASS:\u0026#34;\u0026#34;}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;https\u0026#34;}... File at trustStorePath is being monitored. Once it is changed, the ElasticSearch client will reconnect. trustStorePass could be changed in the runtime through Secrets Management File Of ElasticSearch Authentication.  Daily Index Step Daily index step(storage/elasticsearch/dayStep, default 1) represents the index creation period. In this period, metrics for several days (dayStep value) are saved.\nIn most cases, users don\u0026rsquo;t need to change the value manually, as SkyWalking is designed to observe large-scale distributed systems. But in some cases, users may want to set a long TTL value, such as more than 60 days. However, their ElasticSearch cluster may not be powerful enough due to low traffic in the production environment. This value could be increased to 5 (or more) if users could ensure a single index could support the metrics and traces for these days (5 in this case).\nFor example, if dayStep == 11,\n Data in [2000-01-01, 2000-01-11] will be merged into the index-20000101. Data in [2000-01-12, 2000-01-22] will be merged into the index-20000112.  storage/elasticsearch/superDatasetDayStep overrides the storage/elasticsearch/dayStep if the value is positive. This would affect the record-related entities, such as trace segments. In some cases, the size of metrics is much smaller than the record (trace). This would improve the shards balance in the ElasticSearch cluster.\nNOTE: TTL deletion would be affected by these steps. You should set an extra dayStep in your TTL. For example, if you want to have TTL == 30 days and dayStep == 10, you are recommended to set TTL = 40.\nSecrets Management File Of ElasticSearch Authentication The value of secretsManagementFile should point to the secrets management file absolute path. The file includes the username, password, and JKS password of the ElasticSearch server in the properties format.\nuser=xxx password=yyy trustStorePass=zzz The major difference between using user, password, trustStorePass configs in the application.yaml file is that the Secrets Management File is being watched by the OAP server. Once it is changed manually or through a 3rd party tool, such as Vault, the storage provider will use the new username, password, and JKS password to establish the connection and close the old one. If the information exists in the file, the user/password will be overridden.\nIndex Settings The following settings control the number of shards and replicas for new and existing index templates. The update only got applied after OAP reboots.\nstorage:elasticsearch:# ......indexShardsNumber:${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:1}indexReplicasNumber:${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:1}specificIndexSettings:${SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS:\u0026#34;\u0026#34;}superDatasetIndexShardsFactor:${SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR:5}superDatasetIndexReplicasNumber:${SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER:0}The following table shows the relationship between those config items and Elasticsearch index number_of_shards/number_of_replicas. And also you can specify the settings for each index individually.\n   index number_of_shards number_of_replicas     sw_ui_template indexShardsNumber indexReplicasNumber   sw_metrics-all-${day-format} indexShardsNumber indexReplicasNumber   sw_log-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_segment-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_browser_error_log-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_zipkin_span-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_records-all-${day-format} indexShardsNumber indexReplicasNumber    Advanced Configurations For Elasticsearch Index You can add advanced configurations in JSON format to set ElasticSearch index settings by following ElasticSearch doc\nFor example, set translog settings:\nstorage:elasticsearch:# ......advanced:${SW_STORAGE_ES_ADVANCED:\u0026#34;{\\\u0026#34;index.translog.durability\\\u0026#34;:\\\u0026#34;request\\\u0026#34;,\\\u0026#34;index.translog.sync_interval\\\u0026#34;:\\\u0026#34;5s\\\u0026#34;}\u0026#34;}Specify Settings For Each Elasticsearch Index Individually You can specify the settings for one or more indexes individually by using SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS.\nNOTE: Supported settings:\n number_of_shards number_of_replicas  NOTE: These settings have the highest priority and will override the existing generic settings mentioned in index settings doc.\nThe settings are in JSON format. The index name here is logic entity name, which should exclude the ${SW_NAMESPACE} which is sw by default, e.g.\n{ \u0026#34;metrics-all\u0026#34;:{ \u0026#34;number_of_shards\u0026#34;:\u0026#34;3\u0026#34;, \u0026#34;number_of_replicas\u0026#34;:\u0026#34;2\u0026#34; }, \u0026#34;segment\u0026#34;:{ \u0026#34;number_of_shards\u0026#34;:\u0026#34;6\u0026#34;, \u0026#34;number_of_replicas\u0026#34;:\u0026#34;1\u0026#34; } } This configuration in the YAML file is like this,\nstorage:elasticsearch:# ......specificIndexSettings:${SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS:\u0026#34;{\\\u0026#34;metrics-all\\\u0026#34;:{\\\u0026#34;number_of_shards\\\u0026#34;:\\\u0026#34;3\\\u0026#34;,\\\u0026#34;number_of_replicas\\\u0026#34;:\\\u0026#34;2\\\u0026#34;},\\\u0026#34;segment\\\u0026#34;:{\\\u0026#34;number_of_shards\\\u0026#34;:\\\u0026#34;6\\\u0026#34;,\\\u0026#34;number_of_replicas\\\u0026#34;:\\\u0026#34;1\\\u0026#34;}}\u0026#34;}Recommended ElasticSearch server-side configurations You could add the following configuration to elasticsearch.yml, and set the value based on your environment.\n# In tracing scenario, consider to set more than this at least.thread_pool.index.queue_size:1000# Only suitable for ElasticSearch 6thread_pool.write.queue_size:1000# Suitable for ElasticSearch 6 and 7# When you face a query error on the traces page, remember to check this.index.max_result_window:1000000We strongly recommend that you read more about these configurations from ElasticSearch\u0026rsquo;s official documentation since they directly impact the performance of ElasticSearch.\nAbout Namespace When a namespace is set, all index names in ElasticSearch will use it as the prefix.\nMySQL Activate MySQL as storage, and set storage provider to mysql.\nNOTE: MySQL driver is NOT allowed in Apache official distribution and source codes. Please download the MySQL driver on your own. Copy the connection driver jar to oap-libs.\nstorage:selector:${SW_STORAGE:mysql}mysql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:3306/swtest?rewriteBatchedStatements=true\u0026amp;allowMultiQueries=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root@1234}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password, are found in application.yml. Only part of the settings is listed here. See the HikariCP connection pool document for full settings. To understand the function of the parameter rewriteBatchedStatements=true in MySQL, see the MySQL official document.\nIn theory, all other databases that are compatible with MySQL protocol should be able to use this storage plugin, such as TiDB. Please compose the JDBC URL according to the database\u0026rsquo;s documentation.\nPostgreSQL PostgreSQL JDBC driver uses version 42.3.2. It supports PostgreSQL 8.2 or newer. Activate PostgreSQL as storage, and set storage provider to postgresql.\nstorage:selector:${SW_STORAGE:postgresql}postgresql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:postgresql://localhost:5432/skywalking\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:postgres}dataSource.password:${SW_DATA_SOURCE_PASSWORD:123456}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfArrayColumn:${SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN:20}numOfSearchableValuesPerTag:${SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG:2}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password, are found in application.yml. Only part of the settings is listed here. Please follow HikariCP connection pool document for full settings.\nBanyanDB BanyanDB is a dedicated storage implementation developed by the SkyWalking Team and the community. Activate BanyanDB as the storage, and set storage provider to banyandb.\nstorage:banyandb:host:${SW_STORAGE_BANYANDB_HOST:127.0.0.1}port:${SW_STORAGE_BANYANDB_PORT:17912}maxBulkSize:${SW_STORAGE_BANYANDB_MAX_BULK_SIZE:5000}flushInterval:${SW_STORAGE_BANYANDB_FLUSH_INTERVAL:15}metricsShardsNumber:${SW_STORAGE_BANYANDB_METRICS_SHARDS_NUMBER:1}recordShardsNumber:${SW_STORAGE_BANYANDB_RECORD_SHARDS_NUMBER:1}superDatasetShardsFactor:${SW_STORAGE_BANYANDB_SUPERDATASET_SHARDS_FACTOR:2}concurrentWriteThreads:${SW_STORAGE_BANYANDB_CONCURRENT_WRITE_THREADS:15}profileTaskQueryMaxSize:${SW_STORAGE_BANYANDB_PROFILE_TASK_QUERY_MAX_SIZE:200}# the max number of fetch task in a requeststreamBlockInterval:${SW_STORAGE_BANYANDB_STREAM_BLOCK_INTERVAL:4}# Unit is hourstreamSegmentInterval:${SW_STORAGE_BANYANDB_STREAM_SEGMENT_INTERVAL:24}# Unit is hourmeasureBlockInterval:${SW_STORAGE_BANYANDB_MEASURE_BLOCK_INTERVAL:4}# Unit is hourmeasureSegmentInterval:${SW_STORAGE_BANYANDB_MEASURE_SEGMENT_INTERVAL:24}# Unit is hourFor more details, please refer to the documents of BanyanDB and BanyanDB Java Client subprojects.\n","excerpt":"Backend storage The SkyWalking storage is pluggable. We have provided the following storage …","ref":"/docs/main/v9.6.0/en/setup/backend/backend-storage/","title":"Backend storage"},{"body":"Backend storage The SkyWalking storage is pluggable. We have provided the following storage solutions, which allow you to easily use one of them by specifying it as the selector in application.yml:\nstorage:selector:${SW_STORAGE:elasticsearch}Natively supported storage:\n H2 OpenSearch ElasticSearch 7 and 8. MySQL and its compatible databases PostgreSQL and its compatible databases BanyanDB(alpha stage)  H2 is the default storage option in the distribution package. It is recommended to use H2 for testing and development ONLY. Elasticsearch and OpenSearch are recommended for production environments, specially for large scale deployments. MySQL and PostgreSQL are recommended for production environments for medium scale deployments, especially for low trace and log sampling rate. Some of their compatible databases may support larger scale better, such as TiDB and AWS Aurora.\nBanyanDB is going to be our next generation storage solution. It is still in alpha stage. It has shown high potential performance improvement. Less than 50% CPU usage and 50% memory usage with 40% disk volume compared to Elasticsearch in the same scale with 100% sampling. We are looking for early adoption, and it would be our first-class recommended storage option since 2024.\n","excerpt":"Backend storage The SkyWalking storage is pluggable. We have provided the following storage …","ref":"/docs/main/v9.7.0/en/setup/backend/backend-storage/","title":"Backend storage"},{"body":"Background Write Ahead Logging (WAL) is a technique used in databases to ensure that data is not lost due to system crashes or other failures. The basic idea of WAL is to log changes to a database in a separate file before applying them to the database itself. This way, if there is a system failure, the database can be recovered by replaying the log of changes from the WAL file. BanyanDB leverages the WAL to enhance the data buffer for schema resource writing. In such a system, write operations are first written to the WAL file before being applied to the interval buffer. This ensures that the log is written to disk before the actual data is written. Hence the term \u0026ldquo;write ahead\u0026rdquo;.\nFormat A segment refers to a block of data in the WAL file that contains a sequence of database changes. Once rotate is invoked, a new segment is created to continue logging subsequent changes. A \u0026ldquo;WALEntry\u0026rdquo; is a data unit representing a series of changes to a Series. Each WALEntry is written to a segment.\nWAlEntry contains as follows:\n Length:8 bytes, which means the length of a WalEntry. Series ID:8 bytes, the same as request Series ID. Count:4 bytes, how many binary/timestamps in one WalEntry. Timestamp:8 bytes. Binary Length:2 bytes. Binary: value in the write request.  Write process The writing process in WAL is as follows:\n Firstly, the changes are first written to the write buffer. Those with the same series ID will go to the identical WALEntry. When the buffer is full, the WALEntry is created, then flushed to the disk. WAL can optionally use the compression algorithm snappy to compress the data on disk. Each WALEntry is appended to the tail of the WAL file on the disk.  When entries in the buffer are flushed to the disk, the callback function returned by the write operation is invoked. You can ignore this function to improve the writing performance, but it risks losing data.\nRead WAL A client could read a single segment by a segment id. When opening the segment file, the reader will decompress the WAL file if the writing compresses the data.\nRotation WAL supports rotation operation to switch to a new segment. The operation closes the currently open segment and opens a new one, returning the closed segment details.\nDelete A client could delete a segment closed by the rotate operation.\nconfiguration BanyanDB WAL has the following configuration options:\n   Name Default Value Introduction     wal_compression true Compress the WAL entry or not   wal_file_size 64MB The size of the WAL file   wal_buffer_size 16kB The size of WAL buffer.    ","excerpt":"Background Write Ahead Logging (WAL) is a technique used in databases to ensure that data is not …","ref":"/docs/skywalking-banyandb/latest/concept/wal/","title":"Background"},{"body":"Background Write Ahead Logging (WAL) is a technique used in databases to ensure that data is not lost due to system crashes or other failures. The basic idea of WAL is to log changes to a database in a separate file before applying them to the database itself. This way, if there is a system failure, the database can be recovered by replaying the log of changes from the WAL file. BanyanDB leverages the WAL to enhance the data buffer for schema resource writing. In such a system, write operations are first written to the WAL file before being applied to the interval buffer. This ensures that the log is written to disk before the actual data is written. Hence the term \u0026ldquo;write ahead\u0026rdquo;.\nFormat A segment refers to a block of data in the WAL file that contains a sequence of database changes. Once rotate is invoked, a new segment is created to continue logging subsequent changes. A \u0026ldquo;WALEntry\u0026rdquo; is a data unit representing a series of changes to a Series. Each WALEntry is written to a segment.\nWAlEntry contains as follows:\n Length:8 bytes, which means the length of a WalEntry. Series ID:8 bytes, the same as request Series ID. Count:4 bytes, how many binary/timestamps in one WalEntry. Timestamp:8 bytes. Binary Length:2 bytes. Binary: value in the write request.  Write process The writing process in WAL is as follows:\n Firstly, the changes are first written to the write buffer. Those with the same series ID will go to the identical WALEntry. When the buffer is full, the WALEntry is created, then flushed to the disk. WAL can optionally use the compression algorithm snappy to compress the data on disk. Each WALEntry is appended to the tail of the WAL file on the disk.  When entries in the buffer are flushed to the disk, the callback function returned by the write operation is invoked. You can ignore this function to improve the writing performance, but it risks losing data.\nRead WAL A client could read a single segment by a segment id. When opening the segment file, the reader will decompress the WAL file if the writing compresses the data.\nRotation WAL supports rotation operation to switch to a new segment. The operation closes the currently open segment and opens a new one, returning the closed segment details.\nDelete A client could delete a segment closed by the rotate operation.\nconfiguration BanyanDB WAL has the following configuration options:\n   Name Default Value Introduction     wal_compression true Compress the WAL entry or not   wal_file_size 64MB The size of the WAL file   wal_buffer_size 16kB The size of WAL buffer.    ","excerpt":"Background Write Ahead Logging (WAL) is a technique used in databases to ensure that data is not …","ref":"/docs/skywalking-banyandb/v0.5.0/concept/wal/","title":"Background"},{"body":"BanyanDB BanyanDB is a dedicated storage implementation developed by the SkyWalking Team and the community. Activate BanyanDB as the storage, and set storage provider to banyandb.\nThe OAP requires BanyanDB 0.5 server. As BanyanDB is still in the beta phase, we don\u0026rsquo;t provide any compatibility besides the required version.\nstorage:banyandb:targets:${SW_STORAGE_BANYANDB_TARGETS:127.0.0.1:17912}maxBulkSize:${SW_STORAGE_BANYANDB_MAX_BULK_SIZE:5000}flushInterval:${SW_STORAGE_BANYANDB_FLUSH_INTERVAL:15}metricsShardsNumber:${SW_STORAGE_BANYANDB_METRICS_SHARDS_NUMBER:1}recordShardsNumber:${SW_STORAGE_BANYANDB_RECORD_SHARDS_NUMBER:1}superDatasetShardsFactor:${SW_STORAGE_BANYANDB_SUPERDATASET_SHARDS_FACTOR:2}concurrentWriteThreads:${SW_STORAGE_BANYANDB_CONCURRENT_WRITE_THREADS:15}profileTaskQueryMaxSize:${SW_STORAGE_BANYANDB_PROFILE_TASK_QUERY_MAX_SIZE:200}# the max number of fetch task in a requeststreamBlockInterval:${SW_STORAGE_BANYANDB_STREAM_BLOCK_INTERVAL:4}# Unit is hourstreamSegmentInterval:${SW_STORAGE_BANYANDB_STREAM_SEGMENT_INTERVAL:24}# Unit is hourmeasureBlockInterval:${SW_STORAGE_BANYANDB_MEASURE_BLOCK_INTERVAL:4}# Unit is hourmeasureSegmentInterval:${SW_STORAGE_BANYANDB_MEASURE_SEGMENT_INTERVAL:24}# Unit is hourFor more details, please refer to the documents of BanyanDB and BanyanDB Java Client subprojects.\n","excerpt":"BanyanDB BanyanDB is a dedicated storage implementation developed by the SkyWalking Team and the …","ref":"/docs/main/latest/en/setup/backend/storages/banyandb/","title":"BanyanDB"},{"body":"BanyanDB BanyanDB is a dedicated storage implementation developed by the SkyWalking Team and the community. Activate BanyanDB as the storage, and set storage provider to banyandb.\nThe OAP requires BanyanDB 0.5 server. As BanyanDB is still in the beta phase, we don\u0026rsquo;t provide any compatibility besides the required version.\nstorage:banyandb:targets:${SW_STORAGE_BANYANDB_TARGETS:127.0.0.1:17912}maxBulkSize:${SW_STORAGE_BANYANDB_MAX_BULK_SIZE:5000}flushInterval:${SW_STORAGE_BANYANDB_FLUSH_INTERVAL:15}metricsShardsNumber:${SW_STORAGE_BANYANDB_METRICS_SHARDS_NUMBER:1}recordShardsNumber:${SW_STORAGE_BANYANDB_RECORD_SHARDS_NUMBER:1}superDatasetShardsFactor:${SW_STORAGE_BANYANDB_SUPERDATASET_SHARDS_FACTOR:2}concurrentWriteThreads:${SW_STORAGE_BANYANDB_CONCURRENT_WRITE_THREADS:15}profileTaskQueryMaxSize:${SW_STORAGE_BANYANDB_PROFILE_TASK_QUERY_MAX_SIZE:200}# the max number of fetch task in a requeststreamBlockInterval:${SW_STORAGE_BANYANDB_STREAM_BLOCK_INTERVAL:4}# Unit is hourstreamSegmentInterval:${SW_STORAGE_BANYANDB_STREAM_SEGMENT_INTERVAL:24}# Unit is hourmeasureBlockInterval:${SW_STORAGE_BANYANDB_MEASURE_BLOCK_INTERVAL:4}# Unit is hourmeasureSegmentInterval:${SW_STORAGE_BANYANDB_MEASURE_SEGMENT_INTERVAL:24}# Unit is hourFor more details, please refer to the documents of BanyanDB and BanyanDB Java Client subprojects.\n","excerpt":"BanyanDB BanyanDB is a dedicated storage implementation developed by the SkyWalking Team and the …","ref":"/docs/main/next/en/setup/backend/storages/banyandb/","title":"BanyanDB"},{"body":"BanyanDB BanyanDB is a dedicated storage implementation developed by the SkyWalking Team and the community. Activate BanyanDB as the storage, and set storage provider to banyandb.\nThe OAP requires BanyanDB 0.5 server. As BanyanDB is still in the beta phase, we don\u0026rsquo;t provide any compatibility besides the required version.\nstorage:banyandb:targets:${SW_STORAGE_BANYANDB_TARGETS:127.0.0.1:17912}maxBulkSize:${SW_STORAGE_BANYANDB_MAX_BULK_SIZE:5000}flushInterval:${SW_STORAGE_BANYANDB_FLUSH_INTERVAL:15}metricsShardsNumber:${SW_STORAGE_BANYANDB_METRICS_SHARDS_NUMBER:1}recordShardsNumber:${SW_STORAGE_BANYANDB_RECORD_SHARDS_NUMBER:1}superDatasetShardsFactor:${SW_STORAGE_BANYANDB_SUPERDATASET_SHARDS_FACTOR:2}concurrentWriteThreads:${SW_STORAGE_BANYANDB_CONCURRENT_WRITE_THREADS:15}profileTaskQueryMaxSize:${SW_STORAGE_BANYANDB_PROFILE_TASK_QUERY_MAX_SIZE:200}# the max number of fetch task in a requeststreamBlockInterval:${SW_STORAGE_BANYANDB_STREAM_BLOCK_INTERVAL:4}# Unit is hourstreamSegmentInterval:${SW_STORAGE_BANYANDB_STREAM_SEGMENT_INTERVAL:24}# Unit is hourmeasureBlockInterval:${SW_STORAGE_BANYANDB_MEASURE_BLOCK_INTERVAL:4}# Unit is hourmeasureSegmentInterval:${SW_STORAGE_BANYANDB_MEASURE_SEGMENT_INTERVAL:24}# Unit is hourFor more details, please refer to the documents of BanyanDB and BanyanDB Java Client subprojects.\n","excerpt":"BanyanDB BanyanDB is a dedicated storage implementation developed by the SkyWalking Team and the …","ref":"/docs/main/v9.7.0/en/setup/backend/storages/banyandb/","title":"BanyanDB"},{"body":"BanyanDB Clustering BanyanDB Clustering introduces a robust and scalable architecture that comprises \u0026ldquo;Liaison Nodes\u0026rdquo;, \u0026ldquo;Data Nodes\u0026rdquo;, and \u0026ldquo;Meta Nodes\u0026rdquo;. This structure allows for effectively distributing and managing time-series data within the system.\n1. Architectural Overview A BanyanDB installation includes three distinct types of nodes: Data Nodes, Meta Nodes, and Liaison Nodes.\n1.1 Data Nodes Data Nodes hold all the raw time series data, metadata, and indexed data. They handle the storage and management of data, including streams and measures, tag keys and values, as well as field keys and values.\nData Nodes also handle the local query execution. When a query is made, it is directed to a Liaison, which then interacts with Data Nodes to execute the distributed query and return results.\nIn addition to persistent raw data, Data Nodes also handle TopN aggregation calculation or other computational tasks.\n1.2 Meta Nodes Meta Nodes is implemented by etcd. They are responsible for maintaining high-level metadata of the cluster, which includes:\n All nodes in the cluster All database schemas  1.3 Liaison Nodes Liaison Nodes serve as gateways, routing traffic to Data Nodes. In addition to routing, they also provide authentication, TTL, and other security services to ensure secure and effective communication without the cluster.\nLiaison Nodes are also responsible for handling computational tasks associated with distributed querying the database. They build query tasks and search for data from Data Nodes.\n1.4 Standalone Mode BanyanDB integrates multiple roles into a single process in the standalone mode, making it simpler and faster to deploy. This mode is especially useful for scenarios with a limited number of data points or for testing and development purposes.\nIn this mode, the single process performs the roles of the Liaison Node, Data Node, and Meta Node. It receives requests, maintains metadata, processes queries, and handles data, all within a unified setup.\n2. Communication within a Cluster All nodes within a BanyanDB cluster communicate with other nodes according to their roles:\n Meta Nodes share high-level metadata about the cluster. Data Nodes store and manage the raw time series data and communicate with Meta Nodes. Liaison Nodes distribute incoming data to the appropriate Data Nodes. They also handle distributed query execution and communicate with Meta Nodes.  Nodes Discovery All nodes in the cluster are discovered by the Meta Nodes. When a node starts up, it registers itself with the Meta Nodes. The Meta Nodes then share this information with the Liaison Nodes which use it to route requests to the appropriate nodes.\n3. Data Organization Different nodes in BanyanDB are responsible for different parts of the database, while Query and Liaison Nodes manage the routing and processing of queries.\n3.1 Meta Nodes Meta Nodes store all high-level metadata that describes the cluster. This data is kept in an etcd-backed database on disk, including information about the shard allocation of each Data Node. This information is used by the Liaison Nodes to route data to the appropriate Data Nodes, based on the sharding key of the data.\nBy storing shard allocation information, Meta Nodes help ensure that data is routed efficiently and accurately across the cluster. This information is constantly updated as the cluster changes, allowing for dynamic allocation of resources and efficient use of available capacity.\n3.2 Data Nodes Data Nodes store all raw time series data, metadata, and indexed data. On disk, the data is organized by \u0026lt;group\u0026gt;/shard-\u0026lt;shard_id\u0026gt;/\u0026lt;segment_id\u0026gt;/. The segment is designed to support retention policy.\n3.3 Liaison Nodes Liaison Nodes do not store data but manage the routing of incoming requests to the appropriate Query or Data Nodes. They also provide authentication, TTL, and other security services.\nThey also handle the computational tasks associated with data queries, interacting directly with Data Nodes to execute queries and return results.\n4. Determining Optimal Node Counts When creating a BanyanDB cluster, choosing the appropriate number of each node type to configure and connect is crucial. The number of Meta Nodes should always be odd, for instance, “3”. The number of Data Nodes scales based on your storage and query needs. The number of Liaison Nodes depends on the expected query load and routing complexity.\nIf the write and read load is from different sources, it is recommended to separate the Liaison Nodes for write and read. For instance, if the write load is from metrics, trace or log collectors and the read load is from a web application, it is recommended to separate the Liaison Nodes for write and read.\nThis separation allows for more efficient routing of requests and better performance. It also allows for scaling out of the cluster based on the specific needs of each type of request. For instance, if the write load is high, you can scale out the write Liaison Nodes to handle the increased load.\nThe BanyanDB architecture allows for efficient clustering, scaling, and high availability, making it a robust choice for time series data management.\n5. Writes in a Cluster In BanyanDB, writing data in a cluster is designed to take advantage of the robust capabilities of underlying storage systems, such as Google Compute Persistent Disk or Amazon S3(TBD). These platforms ensure high levels of data durability, making them an optimal choice for storing raw time series data.\n5.1 Data Replication Unlike some other systems, BanyanDB does not support application-level replication, which can consume significant disk space. Instead, it delegates the task of replication to these underlying storage systems. This approach simplifies the BanyanDB architecture and reduces the complexity of managing replication at the application level. This approach also results in significant data savings.\nThe comparison between using a storage system and application-level replication boils down to several key factors: reliability, scalability, and complexity.\nReliability: A storage system provides built-in data durability by automatically storing data across multiple systems. It\u0026rsquo;s designed to deliver 99.999999999% durability, ensuring data is reliably stored and available when needed. While replication can increase data availability, it\u0026rsquo;s dependent on the application\u0026rsquo;s implementation. Any bugs or issues in the replication logic can lead to data loss or inconsistencies.\nScalability: A storage system is highly scalable by design and can store and retrieve any amount of data from anywhere. As your data grows, the system grows with you. You don\u0026rsquo;t need to worry about outgrowing your storage capacity. Scaling application-level replication can be challenging. As data grows, so does the need for more disk space and compute resources, potentially leading to increased costs and management complexity.\nComplexity: With the storage system handling replication, the complexity is abstracted away from the user. The user need not concern themselves with the details of how replication is handled. Managing replication at the application level can be complex. It requires careful configuration, monitoring, and potentially significant engineering effort to maintain.\nFuthermore, the storage system might be cheaper. For instance, S3 can be more cost-effective because it eliminates the need for additional resources required for application-level replication. Application-level replication also requires ongoing maintenance, potentially increasing operational costs.\n5.2 Data Sharding Data distribution across the cluster is determined based on the shard_num setting for a group and the specified entity in each resource, be it a stream or measure. The resource’s name with its entity is the sharding key, guiding data distribution to the appropriate Data Node during write operations.\nLiaison Nodes retrieve shard mapping information from Meta Nodes to achieve efficient data routing. This information is used to route data to the appropriate Data Nodes based on the sharding key of the data.\nThis sharding strategy ensures the write load is evenly distributed across the cluster, enhancing write performance and overall system efficiency. BanyanDB uses a hash algorithm for sharding. The hash function maps the sharding key (resource name and entity) to a node in the cluster. Each shard is assigned to the node returned by the hash function.\n5.3 Data Write Path Here\u0026rsquo;s a text-based diagram illustrating the data write path in BanyanDB:\nUser | | API Request (Write) | v ------------------------------------ | Liaison Node | \u0026lt;--- Stateless Node, Routes Request | (Identifies relevant Data Nodes | | and dispatches write request) | ------------------------------------ | v ----------------- ----------------- ----------------- | Data Node 1 | | Data Node 2 | | Data Node 3 | | (Shard 1) | | (Shard 2) | | (Shard 3) | ----------------- ----------------- -----------------  A user makes an API request to the Liaison Node. This request is a write request, containing the data to be written to the database. The Liaison Node, which is stateless, identifies the relevant Data Nodes that will store the data based on the entity specified in the request. The write request is executed across the identified Data Nodes. Each Data Node writes the data to its shard.  This architecture allows BanyanDB to execute write requests efficiently across a distributed system, leveraging the stateless nature and routing/writing capabilities of the Liaison Node, and the distributed storage of Data Nodes.\n6. Queries in a Cluster BanyanDB utilizes a distributed architecture that allows for efficient query processing. When a query is made, it is directed to a Liaison Node.\n6.1 Query Routing Liaison Nodes do not use shard mapping information from Meta Nodes to execute distributed queries. Instead, they access all Data Nodes to retrieve the necessary data for queries. As the query load is lower, it is practical for liaison nodes to access all data nodes for this purpose. It may increase network traffic, but simplifies scaling out of the cluster.\nCompared to the write load, the query load is relatively low. For instance, in a time series database, the write load is typically 100x higher than the query load. This is because the write load is driven by the number of devices sending data to the database, while the query load is driven by the number of users accessing the data.\nThis strategy enables scaling out of the cluster. When the cluster scales out, the liaison node can access all data nodes without any mapping info changes. It eliminates the need to backup previous shard mapping information, reducing complexity of scaling out.\n6.2 Query Execution Parallel execution significantly enhances the efficiency of data retrieval and reduces the overall query processing time. It allows for faster response times as the workload of the query is shared across multiple shards, each working on their part of the problem simultaneously. This feature makes BanyanDB particularly effective for large-scale data analysis tasks.\nIn summary, BanyanDB\u0026rsquo;s approach to querying leverages its unique distributed architecture, enabling high-performance data retrieval across multiple shards in parallel.\n6.3 Query Path User | | API Request (Query) | v ------------------------------------ | Liaison Node | \u0026lt;--- Stateless Node, Distributes Query | (Access all Data nodes to | | execute distributed queries) | ------------------------------------ | | | v v v ----------------- ----------------- ----------------- | Data Node 1 | | Data Node 2 | | Data Node 3 | | (Shard 1) | | (Shard 2) | | (Shard 3) | ----------------- ----------------- -----------------  A user makes an API request to the Liaison Node. This request may be a query for specific data. The Liaison Node builds a distributed query to select all data nodes. The query is executed in parallel across all Data Nodes. Each Data Node execute a local query plan to process the data stored in its shard concurrently with the others. The results from each shard are then returned to the Liaison Node, which consolidates them into a single response to the user.  This architecture allows BanyanDB to execute queries efficiently across a distributed system, leveraging the distributed query capabilities of the Liaison Node and the parallel processing of Data Nodes.\n","excerpt":"BanyanDB Clustering BanyanDB Clustering introduces a robust and scalable architecture that comprises …","ref":"/docs/skywalking-banyandb/latest/concept/clustering/","title":"BanyanDB Clustering"},{"body":"BanyanDB Clustering BanyanDB Clustering introduces a robust and scalable architecture that comprises \u0026ldquo;Liaison Nodes\u0026rdquo;, \u0026ldquo;Data Nodes\u0026rdquo;, and \u0026ldquo;Meta Nodes\u0026rdquo;. This structure allows for effectively distributing and managing time-series data within the system.\n1. Architectural Overview A BanyanDB installation includes three distinct types of nodes: Data Nodes, Meta Nodes, and Liaison Nodes.\n1.1 Data Nodes Data Nodes hold all the raw time series data, metadata, and indexed data. They handle the storage and management of data, including streams and measures, tag keys and values, as well as field keys and values.\nData Nodes also handle the local query execution. When a query is made, it is directed to a Liaison, which then interacts with Data Nodes to execute the distributed query and return results.\nIn addition to persistent raw data, Data Nodes also handle TopN aggregation calculation or other computational tasks.\n1.2 Meta Nodes Meta Nodes is implemented by etcd. They are responsible for maintaining high-level metadata of the cluster, which includes:\n All nodes in the cluster All database schemas  1.3 Liaison Nodes Liaison Nodes serve as gateways, routing traffic to Data Nodes. In addition to routing, they also provide authentication, TTL, and other security services to ensure secure and effective communication without the cluster.\nLiaison Nodes are also responsible for handling computational tasks associated with distributed querying the database. They build query tasks and search for data from Data Nodes.\n1.4 Standalone Mode BanyanDB integrates multiple roles into a single process in the standalone mode, making it simpler and faster to deploy. This mode is especially useful for scenarios with a limited number of data points or for testing and development purposes.\nIn this mode, the single process performs the roles of the Liaison Node, Data Node, and Meta Node. It receives requests, maintains metadata, processes queries, and handles data, all within a unified setup.\n2. Communication within a Cluster All nodes within a BanyanDB cluster communicate with other nodes according to their roles:\n Meta Nodes share high-level metadata about the cluster. Data Nodes store and manage the raw time series data and communicate with Meta Nodes. Liaison Nodes distribute incoming data to the appropriate Data Nodes. They also handle distributed query execution and communicate with Meta Nodes.  Nodes Discovery All nodes in the cluster are discovered by the Meta Nodes. When a node starts up, it registers itself with the Meta Nodes. The Meta Nodes then share this information with the Liaison Nodes which use it to route requests to the appropriate nodes.\nIf data nodes are unable to connect to the meta nodes due to network partition or other issues, they will be removed from the meta nodes. However, the liaison nodes will not remove the data nodes from their routing list until the data nodes are also unreachable from the liaison nodes' perspective. This approach ensures that the system can continue to function even if some data nodes are temporarily unavailable from the meta nodes.\n3. Data Organization Different nodes in BanyanDB are responsible for different parts of the database, while Query and Liaison Nodes manage the routing and processing of queries.\n3.1 Meta Nodes Meta Nodes store all high-level metadata that describes the cluster. This data is kept in an etcd-backed database on disk, including information about the shard allocation of each Data Node. This information is used by the Liaison Nodes to route data to the appropriate Data Nodes, based on the sharding key of the data.\nBy storing shard allocation information, Meta Nodes help ensure that data is routed efficiently and accurately across the cluster. This information is constantly updated as the cluster changes, allowing for dynamic allocation of resources and efficient use of available capacity.\n3.2 Data Nodes Data Nodes store all raw time series data, metadata, and indexed data. On disk, the data is organized by \u0026lt;group\u0026gt;/shard-\u0026lt;shard_id\u0026gt;/\u0026lt;segment_id\u0026gt;/. The segment is designed to support retention policy.\n3.3 Liaison Nodes Liaison Nodes do not store data but manage the routing of incoming requests to the appropriate Query or Data Nodes. They also provide authentication, TTL, and other security services.\nThey also handle the computational tasks associated with data queries, interacting directly with Data Nodes to execute queries and return results.\n4. Determining Optimal Node Counts When creating a BanyanDB cluster, choosing the appropriate number of each node type to configure and connect is crucial. The number of Meta Nodes should always be odd, for instance, “3”. The number of Data Nodes scales based on your storage and query needs. The number of Liaison Nodes depends on the expected query load and routing complexity.\nIf the write and read load is from different sources, it is recommended to separate the Liaison Nodes for write and read. For instance, if the write load is from metrics, trace or log collectors and the read load is from a web application, it is recommended to separate the Liaison Nodes for write and read.\nThis separation allows for more efficient routing of requests and better performance. It also allows for scaling out of the cluster based on the specific needs of each type of request. For instance, if the write load is high, you can scale out the write Liaison Nodes to handle the increased load.\nThe BanyanDB architecture allows for efficient clustering, scaling, and high availability, making it a robust choice for time series data management.\n5. Writes in a Cluster In BanyanDB, writing data in a cluster is designed to take advantage of the robust capabilities of underlying storage systems, such as Google Compute Persistent Disk or Amazon S3(TBD). These platforms ensure high levels of data durability, making them an optimal choice for storing raw time series data.\n5.1 Data Replication Unlike some other systems, BanyanDB does not support application-level replication, which can consume significant disk space. Instead, it delegates the task of replication to these underlying storage systems. This approach simplifies the BanyanDB architecture and reduces the complexity of managing replication at the application level. This approach also results in significant data savings.\nThe comparison between using a storage system and application-level replication boils down to several key factors: reliability, scalability, and complexity.\nReliability: A storage system provides built-in data durability by automatically storing data across multiple systems. It\u0026rsquo;s designed to deliver 99.999999999% durability, ensuring data is reliably stored and available when needed. While replication can increase data availability, it\u0026rsquo;s dependent on the application\u0026rsquo;s implementation. Any bugs or issues in the replication logic can lead to data loss or inconsistencies.\nScalability: A storage system is highly scalable by design and can store and retrieve any amount of data from anywhere. As your data grows, the system grows with you. You don\u0026rsquo;t need to worry about outgrowing your storage capacity. Scaling application-level replication can be challenging. As data grows, so does the need for more disk space and compute resources, potentially leading to increased costs and management complexity.\nComplexity: With the storage system handling replication, the complexity is abstracted away from the user. The user need not concern themselves with the details of how replication is handled. Managing replication at the application level can be complex. It requires careful configuration, monitoring, and potentially significant engineering effort to maintain.\nFuthermore, the storage system might be cheaper. For instance, S3 can be more cost-effective because it eliminates the need for additional resources required for application-level replication. Application-level replication also requires ongoing maintenance, potentially increasing operational costs.\n5.2 Data Sharding Data distribution across the cluster is determined based on the shard_num setting for a group and the specified entity in each resource, be it a stream or measure. The resource’s name with its entity is the sharding key, guiding data distribution to the appropriate Data Node during write operations.\nLiaison Nodes retrieve shard mapping information from Meta Nodes to achieve efficient data routing. This information is used to route data to the appropriate Data Nodes based on the sharding key of the data.\nThis sharding strategy ensures the write load is evenly distributed across the cluster, enhancing write performance and overall system efficiency. BanyanDB uses a hash algorithm for sharding. The hash function maps the sharding key (resource name and entity) to a node in the cluster. Each shard is assigned to the node returned by the hash function.\n5.3 Data Write Path Here\u0026rsquo;s a text-based diagram illustrating the data write path in BanyanDB:\nUser | | API Request (Write) | v ------------------------------------ | Liaison Node | \u0026lt;--- Stateless Node, Routes Request | (Identifies relevant Data Nodes | | and dispatches write request) | ------------------------------------ | v ----------------- ----------------- ----------------- | Data Node 1 | | Data Node 2 | | Data Node 3 | | (Shard 1) | | (Shard 2) | | (Shard 3) | ----------------- ----------------- -----------------  A user makes an API request to the Liaison Node. This request is a write request, containing the data to be written to the database. The Liaison Node, which is stateless, identifies the relevant Data Nodes that will store the data based on the entity specified in the request. The write request is executed across the identified Data Nodes. Each Data Node writes the data to its shard.  This architecture allows BanyanDB to execute write requests efficiently across a distributed system, leveraging the stateless nature and routing/writing capabilities of the Liaison Node, and the distributed storage of Data Nodes.\n6. Queries in a Cluster BanyanDB utilizes a distributed architecture that allows for efficient query processing. When a query is made, it is directed to a Liaison Node.\n6.1 Query Routing Liaison Nodes do not use shard mapping information from Meta Nodes to execute distributed queries. Instead, they access all Data Nodes to retrieve the necessary data for queries. As the query load is lower, it is practical for liaison nodes to access all data nodes for this purpose. It may increase network traffic, but simplifies scaling out of the cluster.\nCompared to the write load, the query load is relatively low. For instance, in a time series database, the write load is typically 100x higher than the query load. This is because the write load is driven by the number of devices sending data to the database, while the query load is driven by the number of users accessing the data.\nThis strategy enables scaling out of the cluster. When the cluster scales out, the liaison node can access all data nodes without any mapping info changes. It eliminates the need to backup previous shard mapping information, reducing complexity of scaling out.\n6.2 Query Execution Parallel execution significantly enhances the efficiency of data retrieval and reduces the overall query processing time. It allows for faster response times as the workload of the query is shared across multiple shards, each working on their part of the problem simultaneously. This feature makes BanyanDB particularly effective for large-scale data analysis tasks.\nIn summary, BanyanDB\u0026rsquo;s approach to querying leverages its unique distributed architecture, enabling high-performance data retrieval across multiple shards in parallel.\n6.3 Query Path User | | API Request (Query) | v ------------------------------------ | Liaison Node | \u0026lt;--- Stateless Node, Distributes Query | (Access all Data nodes to | | execute distributed queries) | ------------------------------------ | | | v v v ----------------- ----------------- ----------------- | Data Node 1 | | Data Node 2 | | Data Node 3 | | (Shard 1) | | (Shard 2) | | (Shard 3) | ----------------- ----------------- -----------------  A user makes an API request to the Liaison Node. This request may be a query for specific data. The Liaison Node builds a distributed query to select all data nodes. The query is executed in parallel across all Data Nodes. Each Data Node execute a local query plan to process the data stored in its shard concurrently with the others. The results from each shard are then returned to the Liaison Node, which consolidates them into a single response to the user.  This architecture allows BanyanDB to execute queries efficiently across a distributed system, leveraging the distributed query capabilities of the Liaison Node and the parallel processing of Data Nodes.\n7. Failover BanyanDB is designed to be highly available and fault-tolerant.\nIn case of a Data Node failure, the system can automatically recover and continue to operate.\nLiaison nodes have a built-in mechanism to detect the failure of a Data Node. When a Data Node fails, the Liaison Node will automatically route requests to other available Data Nodes with the same shard. This ensures that the system remains operational even in the face of node failures. Thanks to the query mode, which allows Liaison Nodes to access all Data Nodes, the system can continue to function even if some Data Nodes are unavailable. When the failed data nodes are restored, the system won\u0026rsquo;t reply data to them since the data is still retrieved from other nodes.\nIn the case of a Liaison Node failure, the system can be configured to have multiple Liaison Nodes for redundancy. If one Liaison Node fails, the other Liaison Nodes can take over its responsibilities, ensuring that the system remains available.\n Please note that any written request which triggers the failover process will be rejected, and the client should re-send the request.\n ","excerpt":"BanyanDB Clustering BanyanDB Clustering introduces a robust and scalable architecture that comprises …","ref":"/docs/skywalking-banyandb/next/concept/clustering/","title":"BanyanDB Clustering"},{"body":"BanyanDB Clustering BanyanDB Clustering introduces a robust and scalable architecture that comprises \u0026ldquo;Liaison Nodes\u0026rdquo;, \u0026ldquo;Data Nodes\u0026rdquo;, and \u0026ldquo;Meta Nodes\u0026rdquo;. This structure allows for effectively distributing and managing time-series data within the system.\n1. Architectural Overview A BanyanDB installation includes three distinct types of nodes: Data Nodes, Meta Nodes, and Liaison Nodes.\n1.1 Data Nodes Data Nodes hold all the raw time series data, metadata, and indexed data. They handle the storage and management of data, including streams and measures, tag keys and values, as well as field keys and values.\nData Nodes also handle the local query execution. When a query is made, it is directed to a Liaison, which then interacts with Data Nodes to execute the distributed query and return results.\nIn addition to persistent raw data, Data Nodes also handle TopN aggregation calculation or other computational tasks.\n1.2 Meta Nodes Meta Nodes is implemented by etcd. They are responsible for maintaining high-level metadata of the cluster, which includes:\n All nodes in the cluster All database schemas  1.3 Liaison Nodes Liaison Nodes serve as gateways, routing traffic to Data Nodes. In addition to routing, they also provide authentication, TTL, and other security services to ensure secure and effective communication without the cluster.\nLiaison Nodes are also responsible for handling computational tasks associated with distributed querying the database. They build query tasks and search for data from Data Nodes.\n1.4 Standalone Mode BanyanDB integrates multiple roles into a single process in the standalone mode, making it simpler and faster to deploy. This mode is especially useful for scenarios with a limited number of data points or for testing and development purposes.\nIn this mode, the single process performs the roles of the Liaison Node, Data Node, and Meta Node. It receives requests, maintains metadata, processes queries, and handles data, all within a unified setup.\n2. Communication within a Cluster All nodes within a BanyanDB cluster communicate with other nodes according to their roles:\n Meta Nodes share high-level metadata about the cluster. Data Nodes store and manage the raw time series data and communicate with Meta Nodes. Liaison Nodes distribute incoming data to the appropriate Data Nodes. They also handle distributed query execution and communicate with Meta Nodes.  Nodes Discovery All nodes in the cluster are discovered by the Meta Nodes. When a node starts up, it registers itself with the Meta Nodes. The Meta Nodes then share this information with the Liaison Nodes which use it to route requests to the appropriate nodes.\n3. Data Organization Different nodes in BanyanDB are responsible for different parts of the database, while Query and Liaison Nodes manage the routing and processing of queries.\n3.1 Meta Nodes Meta Nodes store all high-level metadata that describes the cluster. This data is kept in an etcd-backed database on disk, including information about the shard allocation of each Data Node. This information is used by the Liaison Nodes to route data to the appropriate Data Nodes, based on the sharding key of the data.\nBy storing shard allocation information, Meta Nodes help ensure that data is routed efficiently and accurately across the cluster. This information is constantly updated as the cluster changes, allowing for dynamic allocation of resources and efficient use of available capacity.\n3.2 Data Nodes Data Nodes store all raw time series data, metadata, and indexed data. On disk, the data is organized by \u0026lt;group\u0026gt;/shard-\u0026lt;shard_id\u0026gt;/\u0026lt;segment_id\u0026gt;/. The segment is designed to support retention policy.\n3.3 Liaison Nodes Liaison Nodes do not store data but manage the routing of incoming requests to the appropriate Query or Data Nodes. They also provide authentication, TTL, and other security services.\nThey also handle the computational tasks associated with data queries, interacting directly with Data Nodes to execute queries and return results.\n4. Determining Optimal Node Counts When creating a BanyanDB cluster, choosing the appropriate number of each node type to configure and connect is crucial. The number of Meta Nodes should always be odd, for instance, “3”. The number of Data Nodes scales based on your storage and query needs. The number of Liaison Nodes depends on the expected query load and routing complexity.\nIf the write and read load is from different sources, it is recommended to separate the Liaison Nodes for write and read. For instance, if the write load is from metrics, trace or log collectors and the read load is from a web application, it is recommended to separate the Liaison Nodes for write and read.\nThis separation allows for more efficient routing of requests and better performance. It also allows for scaling out of the cluster based on the specific needs of each type of request. For instance, if the write load is high, you can scale out the write Liaison Nodes to handle the increased load.\nThe BanyanDB architecture allows for efficient clustering, scaling, and high availability, making it a robust choice for time series data management.\n5. Writes in a Cluster In BanyanDB, writing data in a cluster is designed to take advantage of the robust capabilities of underlying storage systems, such as Google Compute Persistent Disk or Amazon S3(TBD). These platforms ensure high levels of data durability, making them an optimal choice for storing raw time series data.\n5.1 Data Replication Unlike some other systems, BanyanDB does not support application-level replication, which can consume significant disk space. Instead, it delegates the task of replication to these underlying storage systems. This approach simplifies the BanyanDB architecture and reduces the complexity of managing replication at the application level. This approach also results in significant data savings.\nThe comparison between using a storage system and application-level replication boils down to several key factors: reliability, scalability, and complexity.\nReliability: A storage system provides built-in data durability by automatically storing data across multiple systems. It\u0026rsquo;s designed to deliver 99.999999999% durability, ensuring data is reliably stored and available when needed. While replication can increase data availability, it\u0026rsquo;s dependent on the application\u0026rsquo;s implementation. Any bugs or issues in the replication logic can lead to data loss or inconsistencies.\nScalability: A storage system is highly scalable by design and can store and retrieve any amount of data from anywhere. As your data grows, the system grows with you. You don\u0026rsquo;t need to worry about outgrowing your storage capacity. Scaling application-level replication can be challenging. As data grows, so does the need for more disk space and compute resources, potentially leading to increased costs and management complexity.\nComplexity: With the storage system handling replication, the complexity is abstracted away from the user. The user need not concern themselves with the details of how replication is handled. Managing replication at the application level can be complex. It requires careful configuration, monitoring, and potentially significant engineering effort to maintain.\nFuthermore, the storage system might be cheaper. For instance, S3 can be more cost-effective because it eliminates the need for additional resources required for application-level replication. Application-level replication also requires ongoing maintenance, potentially increasing operational costs.\n5.2 Data Sharding Data distribution across the cluster is determined based on the shard_num setting for a group and the specified entity in each resource, be it a stream or measure. The resource’s name with its entity is the sharding key, guiding data distribution to the appropriate Data Node during write operations.\nLiaison Nodes retrieve shard mapping information from Meta Nodes to achieve efficient data routing. This information is used to route data to the appropriate Data Nodes based on the sharding key of the data.\nThis sharding strategy ensures the write load is evenly distributed across the cluster, enhancing write performance and overall system efficiency. BanyanDB uses a hash algorithm for sharding. The hash function maps the sharding key (resource name and entity) to a node in the cluster. Each shard is assigned to the node returned by the hash function.\n5.3 Data Write Path Here\u0026rsquo;s a text-based diagram illustrating the data write path in BanyanDB:\nUser | | API Request (Write) | v ------------------------------------ | Liaison Node | \u0026lt;--- Stateless Node, Routes Request | (Identifies relevant Data Nodes | | and dispatches write request) | ------------------------------------ | v ----------------- ----------------- ----------------- | Data Node 1 | | Data Node 2 | | Data Node 3 | | (Shard 1) | | (Shard 2) | | (Shard 3) | ----------------- ----------------- -----------------  A user makes an API request to the Liaison Node. This request is a write request, containing the data to be written to the database. The Liaison Node, which is stateless, identifies the relevant Data Nodes that will store the data based on the entity specified in the request. The write request is executed across the identified Data Nodes. Each Data Node writes the data to its shard.  This architecture allows BanyanDB to execute write requests efficiently across a distributed system, leveraging the stateless nature and routing/writing capabilities of the Liaison Node, and the distributed storage of Data Nodes.\n6. Queries in a Cluster BanyanDB utilizes a distributed architecture that allows for efficient query processing. When a query is made, it is directed to a Liaison Node.\n6.1 Query Routing Liaison Nodes do not use shard mapping information from Meta Nodes to execute distributed queries. Instead, they access all Data Nodes to retrieve the necessary data for queries. As the query load is lower, it is practical for liaison nodes to access all data nodes for this purpose. It may increase network traffic, but simplifies scaling out of the cluster.\nCompared to the write load, the query load is relatively low. For instance, in a time series database, the write load is typically 100x higher than the query load. This is because the write load is driven by the number of devices sending data to the database, while the query load is driven by the number of users accessing the data.\nThis strategy enables scaling out of the cluster. When the cluster scales out, the liaison node can access all data nodes without any mapping info changes. It eliminates the need to backup previous shard mapping information, reducing complexity of scaling out.\n6.2 Query Execution Parallel execution significantly enhances the efficiency of data retrieval and reduces the overall query processing time. It allows for faster response times as the workload of the query is shared across multiple shards, each working on their part of the problem simultaneously. This feature makes BanyanDB particularly effective for large-scale data analysis tasks.\nIn summary, BanyanDB\u0026rsquo;s approach to querying leverages its unique distributed architecture, enabling high-performance data retrieval across multiple shards in parallel.\n6.3 Query Path User | | API Request (Query) | v ------------------------------------ | Liaison Node | \u0026lt;--- Stateless Node, Distributes Query | (Access all Data nodes to | | execute distributed queries) | ------------------------------------ | | | v v v ----------------- ----------------- ----------------- | Data Node 1 | | Data Node 2 | | Data Node 3 | | (Shard 1) | | (Shard 2) | | (Shard 3) | ----------------- ----------------- -----------------  A user makes an API request to the Liaison Node. This request may be a query for specific data. The Liaison Node builds a distributed query to select all data nodes. The query is executed in parallel across all Data Nodes. Each Data Node execute a local query plan to process the data stored in its shard concurrently with the others. The results from each shard are then returned to the Liaison Node, which consolidates them into a single response to the user.  This architecture allows BanyanDB to execute queries efficiently across a distributed system, leveraging the distributed query capabilities of the Liaison Node and the parallel processing of Data Nodes.\n","excerpt":"BanyanDB Clustering BanyanDB Clustering introduces a robust and scalable architecture that comprises …","ref":"/docs/skywalking-banyandb/v0.5.0/concept/clustering/","title":"BanyanDB Clustering"},{"body":"This is the blog section. It has two categories: News and Releases.\nFiles in these directories will be listed in reverse chronological order.\n","excerpt":"This is the blog section. It has two categories: News and Releases.\nFiles in these directories will …","ref":"/blog/","title":"Blog"},{"body":"BookKeeper monitoring SkyWalking leverages OpenTelemetry Collector to collect metrics data from the BookKeeper and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. Kafka entity as a Service in OAP and on the `Layer: BOOKKEEPER.\nData flow  BookKeeper exposes metrics through Prometheus endpoint. OpenTelemetry Collector fetches metrics from BookKeeper cluster via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.`  Setup  Set up BookKeeper Cluster. Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  BookKeeper Monitoring Bookkeeper monitoring provides multidimensional metrics monitoring of BookKeeper cluster as Layer: BOOKKEEPER Service in the OAP. In each cluster, the nodes are represented as Instance.\nBookKeeper Cluster Supported Metrics    Monitoring Panel Metric Name Description Data Source     Bookie Ledgers Count meter_bookkeeper_bookie_ledgers_count The number of the bookie ledgers. Bookkeeper Cluster   Bookie Ledger Writable Dirs meter_bookkeeper_bookie_ledger_writable_dirs The number of writable directories in the bookie. Bookkeeper Cluster   Bookie Ledger Dir Usage meter_bookkeeper_bookie_ledger_dir_data_bookkeeper_ledgers_usage The number of successfully created connections. Bookkeeper Cluster   Bookie Entries Count meter_bookkeeper_bookie_entries_count The number of the bookie write entries. Bookkeeper Cluster   Bookie Write Cache Size meter_bookkeeper_bookie_write_cache_size The size of the bookie write cache (MB). Bookkeeper Cluster   Bookie Write Cache Entry Count meter_bookkeeper_bookie_write_cache_count The entry count in the bookie write cache. Bookkeeper Cluster   Bookie Read Cache Size meter_bookkeeper_bookie_read_cache_size The size of the bookie read cache (MB). Bookkeeper Cluster   Bookie Read Cache Entry Count meter_bookkeeper_bookie_read_cache_count The entry count in the bookie read cache. Bookkeeper Cluster   Bookie Read Rate meter_bookkeeper_bookie_read_rate The bookie read rate (bytes/s). Bookkeeper Cluster   Bookie Write Rate meter_bookkeeper_bookie_write_rate The bookie write rate (bytes/s). Bookkeeper Cluster    BookKeeper Node Supported Metrics    Monitoring Panel Metric Name Description Data Source     JVM Memory Pool Used meter_bookkeeper_node_jvm_memory_pool_used The usage of the broker jvm memory pool. Bookkeeper Bookie   JVM Memory meter_bookkeeper_node_jvm_memory_used meter_bookkeeper_node_jvm_memory_committed meter_bookkeeper_node_jvm_memory_init The usage of the broker jvm memory. Bookkeeper Bookie   JVM Threads meter_bookkeeper_node_jvm_threads_current meter_bookkeeper_node_jvm_threads_daemon meter_bookkeeper_node_jvm_threads_peak meter_bookkeeper_node_jvm_threads_deadlocked The count of the jvm threads. Bookkeeper Bookie   GC Time meter_bookkeeper_node_jvm_gc_collection_seconds_sum Time spent in a given JVM garbage collector in seconds. Bookkeeper Bookie   GC Count meter_bookkeeper_node_jvm_gc_collection_seconds_count The count of a given JVM garbage. Bookkeeper Bookie   Thread Executor Completed meter_bookkeeper_node_thread_executor_completed The count of the executor thread. Bookkeeper Bookie   Thread Executor Tasks meter_bookkeeper_node_thread_executor_tasks_completed meter_bookkeeper_node_thread_executor_tasks_rejected meter_bookkeeper_node_thread_executor_tasks_failed The count of the executor tasks. Bookkeeper Bookie   Pooled Threads meter_bookkeeper_node_high_priority_threads meter_bookkeeper_node_read_thread_pool_threads The count of the pooled thread. Bookkeeper Bookie   Pooled Threads Max Queue Size meter_bookkeeper_node_high_priority_thread_max_queue_size meter_bookkeeper_node_read_thread_pool_max_queue_size The count of the pooled threads max queue size. Bookkeeper Bookie    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in otel-rules/bookkeeper/bookkeeper-cluster.yaml, otel-rules/bookkeeper/bookkeeper-node.yaml. The RabbitMQ dashboard panel configurations are found in /config/ui-initialized-templates/bookkeeper.\n","excerpt":"BookKeeper monitoring SkyWalking leverages OpenTelemetry Collector to collect metrics data from the …","ref":"/docs/main/latest/en/setup/backend/backend-bookkeeper-monitoring/","title":"BookKeeper monitoring"},{"body":"BookKeeper monitoring SkyWalking leverages OpenTelemetry Collector to collect metrics data from the BookKeeper and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. Kafka entity as a Service in OAP and on the `Layer: BOOKKEEPER.\nData flow  BookKeeper exposes metrics through Prometheus endpoint. OpenTelemetry Collector fetches metrics from BookKeeper cluster via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.`  Setup  Set up BookKeeper Cluster. Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  BookKeeper Monitoring Bookkeeper monitoring provides multidimensional metrics monitoring of BookKeeper cluster as Layer: BOOKKEEPER Service in the OAP. In each cluster, the nodes are represented as Instance.\nBookKeeper Cluster Supported Metrics    Monitoring Panel Metric Name Description Data Source     Bookie Ledgers Count meter_bookkeeper_bookie_ledgers_count The number of the bookie ledgers. Bookkeeper Cluster   Bookie Ledger Writable Dirs meter_bookkeeper_bookie_ledger_writable_dirs The number of writable directories in the bookie. Bookkeeper Cluster   Bookie Ledger Dir Usage meter_bookkeeper_bookie_ledger_dir_data_bookkeeper_ledgers_usage The number of successfully created connections. Bookkeeper Cluster   Bookie Entries Count meter_bookkeeper_bookie_entries_count The number of the bookie write entries. Bookkeeper Cluster   Bookie Write Cache Size meter_bookkeeper_bookie_write_cache_size The size of the bookie write cache (MB). Bookkeeper Cluster   Bookie Write Cache Entry Count meter_bookkeeper_bookie_write_cache_count The entry count in the bookie write cache. Bookkeeper Cluster   Bookie Read Cache Size meter_bookkeeper_bookie_read_cache_size The size of the bookie read cache (MB). Bookkeeper Cluster   Bookie Read Cache Entry Count meter_bookkeeper_bookie_read_cache_count The entry count in the bookie read cache. Bookkeeper Cluster   Bookie Read Rate meter_bookkeeper_bookie_read_rate The bookie read rate (bytes/s). Bookkeeper Cluster   Bookie Write Rate meter_bookkeeper_bookie_write_rate The bookie write rate (bytes/s). Bookkeeper Cluster    BookKeeper Node Supported Metrics    Monitoring Panel Metric Name Description Data Source     JVM Memory Pool Used meter_bookkeeper_node_jvm_memory_pool_used The usage of the broker jvm memory pool. Bookkeeper Bookie   JVM Memory meter_bookkeeper_node_jvm_memory_used meter_bookkeeper_node_jvm_memory_committed meter_bookkeeper_node_jvm_memory_init The usage of the broker jvm memory. Bookkeeper Bookie   JVM Threads meter_bookkeeper_node_jvm_threads_current meter_bookkeeper_node_jvm_threads_daemon meter_bookkeeper_node_jvm_threads_peak meter_bookkeeper_node_jvm_threads_deadlocked The count of the jvm threads. Bookkeeper Bookie   GC Time meter_bookkeeper_node_jvm_gc_collection_seconds_sum Time spent in a given JVM garbage collector in seconds. Bookkeeper Bookie   GC Count meter_bookkeeper_node_jvm_gc_collection_seconds_count The count of a given JVM garbage. Bookkeeper Bookie   Thread Executor Completed meter_bookkeeper_node_thread_executor_completed The count of the executor thread. Bookkeeper Bookie   Thread Executor Tasks meter_bookkeeper_node_thread_executor_tasks_completed meter_bookkeeper_node_thread_executor_tasks_rejected meter_bookkeeper_node_thread_executor_tasks_failed The count of the executor tasks. Bookkeeper Bookie   Pooled Threads meter_bookkeeper_node_high_priority_threads meter_bookkeeper_node_read_thread_pool_threads The count of the pooled thread. Bookkeeper Bookie   Pooled Threads Max Queue Size meter_bookkeeper_node_high_priority_thread_max_queue_size meter_bookkeeper_node_read_thread_pool_max_queue_size The count of the pooled threads max queue size. Bookkeeper Bookie    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in otel-rules/bookkeeper/bookkeeper-cluster.yaml, otel-rules/bookkeeper/bookkeeper-node.yaml. The Bookkeeper dashboard panel configurations are found in /config/ui-initialized-templates/bookkeeper.\n","excerpt":"BookKeeper monitoring SkyWalking leverages OpenTelemetry Collector to collect metrics data from the …","ref":"/docs/main/next/en/setup/backend/backend-bookkeeper-monitoring/","title":"BookKeeper monitoring"},{"body":"BookKeeper monitoring SkyWalking leverages OpenTelemetry Collector to collect metrics data from the BookKeeper and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. Kafka entity as a Service in OAP and on the `Layer: BOOKKEEPER.\nData flow  BookKeeper exposes metrics through Prometheus endpoint. OpenTelemetry Collector fetches metrics from BookKeeper cluster via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.`  Setup  Set up BookKeeper Cluster. Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  BookKeeper Monitoring Bookkeeper monitoring provides multidimensional metrics monitoring of BookKeeper cluster as Layer: BOOKKEEPER Service in the OAP. In each cluster, the nodes are represented as Instance.\nBookKeeper Cluster Supported Metrics    Monitoring Panel Metric Name Description Data Source     Bookie Ledgers Count meter_bookkeeper_bookie_ledgers_count The number of the bookie ledgers. Bookkeeper Cluster   Bookie Ledger Writable Dirs meter_bookkeeper_bookie_ledger_writable_dirs The number of writable directories in the bookie. Bookkeeper Cluster   Bookie Ledger Dir Usage meter_bookkeeper_bookie_ledger_dir_data_bookkeeper_ledgers_usage The number of successfully created connections. Bookkeeper Cluster   Bookie Entries Count meter_bookkeeper_bookie_entries_count The number of the bookie write entries. Bookkeeper Cluster   Bookie Write Cache Size meter_bookkeeper_bookie_write_cache_size The size of the bookie write cache (MB). Bookkeeper Cluster   Bookie Write Cache Entry Count meter_bookkeeper_bookie_write_cache_count The entry count in the bookie write cache. Bookkeeper Cluster   Bookie Read Cache Size meter_bookkeeper_bookie_read_cache_size The size of the bookie read cache (MB). Bookkeeper Cluster   Bookie Read Cache Entry Count meter_bookkeeper_bookie_read_cache_count The entry count in the bookie read cache. Bookkeeper Cluster   Bookie Read Rate meter_bookkeeper_bookie_read_rate The bookie read rate (bytes/s). Bookkeeper Cluster   Bookie Write Rate meter_bookkeeper_bookie_write_rate The bookie write rate (bytes/s). Bookkeeper Cluster    BookKeeper Node Supported Metrics    Monitoring Panel Metric Name Description Data Source     JVM Memory Pool Used meter_bookkeeper_node_jvm_memory_pool_used The usage of the broker jvm memory pool. Bookkeeper Bookie   JVM Memory meter_bookkeeper_node_jvm_memory_used meter_bookkeeper_node_jvm_memory_committed meter_bookkeeper_node_jvm_memory_init The usage of the broker jvm memory. Bookkeeper Bookie   JVM Threads meter_bookkeeper_node_jvm_threads_current meter_bookkeeper_node_jvm_threads_daemon meter_bookkeeper_node_jvm_threads_peak meter_bookkeeper_node_jvm_threads_deadlocked The count of the jvm threads. Bookkeeper Bookie   GC Time meter_bookkeeper_node_jvm_gc_collection_seconds_sum Time spent in a given JVM garbage collector in seconds. Bookkeeper Bookie   GC Count meter_bookkeeper_node_jvm_gc_collection_seconds_count The count of a given JVM garbage. Bookkeeper Bookie   Thread Executor Completed meter_bookkeeper_node_thread_executor_completed The count of the executor thread. Bookkeeper Bookie   Thread Executor Tasks meter_bookkeeper_node_thread_executor_tasks_completed meter_bookkeeper_node_thread_executor_tasks_rejected meter_bookkeeper_node_thread_executor_tasks_failed The count of the executor tasks. Bookkeeper Bookie   Pooled Threads meter_bookkeeper_node_high_priority_threads meter_bookkeeper_node_read_thread_pool_threads The count of the pooled thread. Bookkeeper Bookie   Pooled Threads Max Queue Size meter_bookkeeper_node_high_priority_thread_max_queue_size meter_bookkeeper_node_read_thread_pool_max_queue_size The count of the pooled threads max queue size. Bookkeeper Bookie    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in otel-rules/bookkeeper/bookkeeper-cluster.yaml, otel-rules/bookkeeper/bookkeeper-node.yaml. The RabbitMQ dashboard panel configurations are found in /config/ui-initialized-templates/bookkeeper.\n","excerpt":"BookKeeper monitoring SkyWalking leverages OpenTelemetry Collector to collect metrics data from the …","ref":"/docs/main/v9.7.0/en/setup/backend/backend-bookkeeper-monitoring/","title":"BookKeeper monitoring"},{"body":"Bootstrap class plugins All bootstrap plugins are optional, due to unexpected risk. Bootstrap plugins are provided in bootstrap-plugins folder. For using these plugins, you need to put the target plugin jar file into /plugins.\nNow, we have the following known bootstrap plugins.\n Plugin of JDK HttpURLConnection. Agent is compatible with JDK 1.8+ Plugin of JDK Callable and Runnable. Agent is compatible with JDK 1.8+ Plugin of JDK ThreadPoolExecutor. Agent is compatible with JDK 1.8+ Plugin of JDK ForkJoinPool. Agent is compatible with JDK 1.8+  HttpURLConnection Plugin Notice The plugin of JDK HttpURLConnection depended on sun.net.*. When using Java 9+, You should add some JVM options as follows:\n   Java version JVM option     9-15 Nothing to do. Because --illegal-access default model is permitted.   16 Add --add-exports java.base/sun.net.www=ALL-UNNAMED or --illegal-access=permit   17+ Add --add-exports java.base/sun.net.www=ALL-UNNAMED    For more information\n JEP 403: Strongly Encapsulate JDK Internals A peek into Java 17: Encapsulating the Java runtime internals  ","excerpt":"Bootstrap class plugins All bootstrap plugins are optional, due to unexpected risk. Bootstrap …","ref":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/bootstrap-plugins/","title":"Bootstrap class plugins"},{"body":"Bootstrap class plugins All bootstrap plugins are optional, due to unexpected risk. Bootstrap plugins are provided in bootstrap-plugins folder. For using these plugins, you need to put the target plugin jar file into /plugins.\nNow, we have the following known bootstrap plugins.\n Plugin of JDK HttpURLConnection. Agent is compatible with JDK 1.8+ Plugin of JDK Callable and Runnable. Agent is compatible with JDK 1.8+ Plugin of JDK ThreadPoolExecutor. Agent is compatible with JDK 1.8+ Plugin of JDK ForkJoinPool. Agent is compatible with JDK 1.8+  HttpURLConnection Plugin Notice The plugin of JDK HttpURLConnection depended on sun.net.*. When using Java 9+, You should add some JVM options as follows:\n   Java version JVM option     9-15 Nothing to do. Because --illegal-access default model is permitted.   16 Add --add-exports java.base/sun.net.www=ALL-UNNAMED or --illegal-access=permit   17+ Add --add-exports java.base/sun.net.www=ALL-UNNAMED    For more information\n JEP 403: Strongly Encapsulate JDK Internals A peek into Java 17: Encapsulating the Java runtime internals  ","excerpt":"Bootstrap class plugins All bootstrap plugins are optional, due to unexpected risk. Bootstrap …","ref":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/bootstrap-plugins/","title":"Bootstrap class plugins"},{"body":"Bootstrap class plugins All bootstrap plugins are optional, due to unexpected risk. Bootstrap plugins are provided in bootstrap-plugins folder. For using these plugins, you need to put the target plugin jar file into /plugins.\nNow, we have the following known bootstrap plugins.\n Plugin of JDK HttpURLConnection. Agent is compatible with JDK 1.8+ Plugin of JDK Callable and Runnable. Agent is compatible with JDK 1.8+ Plugin of JDK ThreadPoolExecutor. Agent is compatible with JDK 1.8+ Plugin of JDK ForkJoinPool. Agent is compatible with JDK 1.8+  HttpURLConnection Plugin Notice The plugin of JDK HttpURLConnection depended on sun.net.*. When using Java 9+, You should add some JVM options as follows:\n   Java version JVM option     9-15 Nothing to do. Because --illegal-access default model is permitted.   16 Add --add-exports java.base/sun.net.www=ALL-UNNAMED or --illegal-access=permit   17+ Add --add-exports java.base/sun.net.www=ALL-UNNAMED    For more information\n JEP 403: Strongly Encapsulate JDK Internals A peek into Java 17: Encapsulating the Java runtime internals  ","excerpt":"Bootstrap class plugins All bootstrap plugins are optional, due to unexpected risk. Bootstrap …","ref":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/bootstrap-plugins/","title":"Bootstrap class plugins"},{"body":"Bootstrap class plugins All bootstrap plugins are optional, due to unexpected risk. Bootstrap plugins are provided in bootstrap-plugins folder. For using these plugins, you need to put the target plugin jar file into /plugins.\nNow, we have the following known bootstrap plugins.\n Plugin of JDK HttpURLConnection. Agent is compatible with JDK 1.8+ Plugin of JDK Callable and Runnable. Agent is compatible with JDK 1.8+ Plugin of JDK ThreadPoolExecutor. Agent is compatible with JDK 1.8+ Plugin of JDK ForkJoinPool. Agent is compatible with JDK 1.8+  HttpURLConnection Plugin Notice The plugin of JDK HttpURLConnection depended on sun.net.*. When using Java 9+, You should add some JVM options as follows:\n   Java version JVM option     9-15 Nothing to do. Because --illegal-access default model is permitted.   16 Add --add-exports java.base/sun.net.www=ALL-UNNAMED or --illegal-access=permit   17+ Add --add-exports java.base/sun.net.www=ALL-UNNAMED    For more information\n JEP 403: Strongly Encapsulate JDK Internals A peek into Java 17: Encapsulating the Java runtime internals  ","excerpt":"Bootstrap class plugins All bootstrap plugins are optional, due to unexpected risk. Bootstrap …","ref":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/bootstrap-plugins/","title":"Bootstrap class plugins"},{"body":"Bootstrap class plugins All bootstrap plugins are optional, due to unexpected risk. Bootstrap plugins are provided in bootstrap-plugins folder. For using these plugins, you need to put the target plugin jar file into /plugins.\nNow, we have the following known bootstrap plugins.\n Plugin of JDK HttpURLConnection. Agent is compatible with JDK 1.8+ Plugin of JDK Callable and Runnable. Agent is compatible with JDK 1.8+ Plugin of JDK ThreadPoolExecutor. Agent is compatible with JDK 1.8+ Plugin of JDK ForkJoinPool. Agent is compatible with JDK 1.8+  HttpURLConnection Plugin Notice The plugin of JDK HttpURLConnection depended on sun.net.*. When using Java 9+, You should add some JVM options as follows:\n   Java version JVM option     9-15 Nothing to do. Because --illegal-access default model is permitted.   16 Add --add-exports java.base/sun.net.www=ALL-UNNAMED or --illegal-access=permit   17+ Add --add-exports java.base/sun.net.www=ALL-UNNAMED    For more information\n JEP 403: Strongly Encapsulate JDK Internals A peek into Java 17: Encapsulating the Java runtime internals  ","excerpt":"Bootstrap class plugins All bootstrap plugins are optional, due to unexpected risk. Bootstrap …","ref":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/bootstrap-plugins/","title":"Bootstrap class plugins"},{"body":"Browser Monitoring Apache SkyWalking Client JS is a client-side JavaScript exception and tracing library.\nIt has these features:\n Provides metrics and error collection to SkyWalking backend. Lightweight. A simple JavaScript library. No browser plugin is required. Browser serves as a starting point for the entire distributed tracing system.  See Client JS official doc for more information.\nNote: Make sure receiver-browser is enabled. It is ON by default since version 8.2.0.\nreceiver-browser:selector:${SW_RECEIVER_BROWSER:default} // This means activated.default:# The sample rate precision is 1/10000. 10000 means 100% sample in default.sampleRate:${SW_RECEIVER_BROWSER_SAMPLE_RATE:10000}","excerpt":"Browser Monitoring Apache SkyWalking Client JS is a client-side JavaScript exception and tracing …","ref":"/docs/main/latest/en/setup/service-agent/browser-agent/","title":"Browser Monitoring"},{"body":"Browser Monitoring Apache SkyWalking Client JS is a client-side JavaScript exception and tracing library.\nIt has these features:\n Provides metrics and error collection to SkyWalking backend. Lightweight. A simple JavaScript library. No browser plugin is required. Browser serves as a starting point for the entire distributed tracing system.  See Client JS official doc for more information.\nNote: Make sure receiver-browser is enabled. It is ON by default since version 8.2.0.\nreceiver-browser:selector:${SW_RECEIVER_BROWSER:default} // This means activated.default:# The sample rate precision is 1/10000. 10000 means 100% sample in default.sampleRate:${SW_RECEIVER_BROWSER_SAMPLE_RATE:10000}","excerpt":"Browser Monitoring Apache SkyWalking Client JS is a client-side JavaScript exception and tracing …","ref":"/docs/main/next/en/setup/service-agent/browser-agent/","title":"Browser Monitoring"},{"body":"Browser Monitoring Apache SkyWalking Client JS is a client-side JavaScript exception and tracing library.\nIt has these features:\n Provides metrics and error collection to SkyWalking backend. Lightweight. No browser plugin required. A simple JavaScript library. Browser serves as a starting point for the entire distributed tracing system.  See Client JS official doc for more information.\nNote: Make sure receiver-browser is enabled. It is ON by default since version 8.2.0.\nreceiver-browser:selector:${SW_RECEIVER_BROWSER:default} // This means activated.default:# The sample rate precision is 1/10000. 10000 means 100% sample in default.sampleRate:${SW_RECEIVER_BROWSER_SAMPLE_RATE:10000}","excerpt":"Browser Monitoring Apache SkyWalking Client JS is a client-side JavaScript exception and tracing …","ref":"/docs/main/v9.0.0/en/setup/service-agent/browser-agent/","title":"Browser Monitoring"},{"body":"Browser Monitoring Apache SkyWalking Client JS is a client-side JavaScript exception and tracing library.\nIt has these features:\n Provides metrics and error collection to SkyWalking backend. Lightweight. A simple JavaScript library. No browser plugin is required. Browser serves as a starting point for the entire distributed tracing system.  See Client JS official doc for more information.\nNote: Make sure receiver-browser is enabled. It is ON by default since version 8.2.0.\nreceiver-browser:selector:${SW_RECEIVER_BROWSER:default} // This means activated.default:# The sample rate precision is 1/10000. 10000 means 100% sample in default.sampleRate:${SW_RECEIVER_BROWSER_SAMPLE_RATE:10000}","excerpt":"Browser Monitoring Apache SkyWalking Client JS is a client-side JavaScript exception and tracing …","ref":"/docs/main/v9.1.0/en/setup/service-agent/browser-agent/","title":"Browser Monitoring"},{"body":"Browser Monitoring Apache SkyWalking Client JS is a client-side JavaScript exception and tracing library.\nIt has these features:\n Provides metrics and error collection to SkyWalking backend. Lightweight. A simple JavaScript library. No browser plugin is required. Browser serves as a starting point for the entire distributed tracing system.  See Client JS official doc for more information.\nNote: Make sure receiver-browser is enabled. It is ON by default since version 8.2.0.\nreceiver-browser:selector:${SW_RECEIVER_BROWSER:default} // This means activated.default:# The sample rate precision is 1/10000. 10000 means 100% sample in default.sampleRate:${SW_RECEIVER_BROWSER_SAMPLE_RATE:10000}","excerpt":"Browser Monitoring Apache SkyWalking Client JS is a client-side JavaScript exception and tracing …","ref":"/docs/main/v9.2.0/en/setup/service-agent/browser-agent/","title":"Browser Monitoring"},{"body":"Browser Monitoring Apache SkyWalking Client JS is a client-side JavaScript exception and tracing library.\nIt has these features:\n Provides metrics and error collection to SkyWalking backend. Lightweight. A simple JavaScript library. No browser plugin is required. Browser serves as a starting point for the entire distributed tracing system.  See Client JS official doc for more information.\nNote: Make sure receiver-browser is enabled. It is ON by default since version 8.2.0.\nreceiver-browser:selector:${SW_RECEIVER_BROWSER:default} // This means activated.default:# The sample rate precision is 1/10000. 10000 means 100% sample in default.sampleRate:${SW_RECEIVER_BROWSER_SAMPLE_RATE:10000}","excerpt":"Browser Monitoring Apache SkyWalking Client JS is a client-side JavaScript exception and tracing …","ref":"/docs/main/v9.3.0/en/setup/service-agent/browser-agent/","title":"Browser Monitoring"},{"body":"Browser Monitoring Apache SkyWalking Client JS is a client-side JavaScript exception and tracing library.\nIt has these features:\n Provides metrics and error collection to SkyWalking backend. Lightweight. A simple JavaScript library. No browser plugin is required. Browser serves as a starting point for the entire distributed tracing system.  See Client JS official doc for more information.\nNote: Make sure receiver-browser is enabled. It is ON by default since version 8.2.0.\nreceiver-browser:selector:${SW_RECEIVER_BROWSER:default} // This means activated.default:# The sample rate precision is 1/10000. 10000 means 100% sample in default.sampleRate:${SW_RECEIVER_BROWSER_SAMPLE_RATE:10000}","excerpt":"Browser Monitoring Apache SkyWalking Client JS is a client-side JavaScript exception and tracing …","ref":"/docs/main/v9.4.0/en/setup/service-agent/browser-agent/","title":"Browser Monitoring"},{"body":"Browser Monitoring Apache SkyWalking Client JS is a client-side JavaScript exception and tracing library.\nIt has these features:\n Provides metrics and error collection to SkyWalking backend. Lightweight. A simple JavaScript library. No browser plugin is required. Browser serves as a starting point for the entire distributed tracing system.  See Client JS official doc for more information.\nNote: Make sure receiver-browser is enabled. It is ON by default since version 8.2.0.\nreceiver-browser:selector:${SW_RECEIVER_BROWSER:default} // This means activated.default:# The sample rate precision is 1/10000. 10000 means 100% sample in default.sampleRate:${SW_RECEIVER_BROWSER_SAMPLE_RATE:10000}","excerpt":"Browser Monitoring Apache SkyWalking Client JS is a client-side JavaScript exception and tracing …","ref":"/docs/main/v9.5.0/en/setup/service-agent/browser-agent/","title":"Browser Monitoring"},{"body":"Browser Monitoring Apache SkyWalking Client JS is a client-side JavaScript exception and tracing library.\nIt has these features:\n Provides metrics and error collection to SkyWalking backend. Lightweight. A simple JavaScript library. No browser plugin is required. Browser serves as a starting point for the entire distributed tracing system.  See Client JS official doc for more information.\nNote: Make sure receiver-browser is enabled. It is ON by default since version 8.2.0.\nreceiver-browser:selector:${SW_RECEIVER_BROWSER:default} // This means activated.default:# The sample rate precision is 1/10000. 10000 means 100% sample in default.sampleRate:${SW_RECEIVER_BROWSER_SAMPLE_RATE:10000}","excerpt":"Browser Monitoring Apache SkyWalking Client JS is a client-side JavaScript exception and tracing …","ref":"/docs/main/v9.6.0/en/setup/service-agent/browser-agent/","title":"Browser Monitoring"},{"body":"Browser Monitoring Apache SkyWalking Client JS is a client-side JavaScript exception and tracing library.\nIt has these features:\n Provides metrics and error collection to SkyWalking backend. Lightweight. A simple JavaScript library. No browser plugin is required. Browser serves as a starting point for the entire distributed tracing system.  See Client JS official doc for more information.\nNote: Make sure receiver-browser is enabled. It is ON by default since version 8.2.0.\nreceiver-browser:selector:${SW_RECEIVER_BROWSER:default} // This means activated.default:# The sample rate precision is 1/10000. 10000 means 100% sample in default.sampleRate:${SW_RECEIVER_BROWSER_SAMPLE_RATE:10000}","excerpt":"Browser Monitoring Apache SkyWalking Client JS is a client-side JavaScript exception and tracing …","ref":"/docs/main/v9.7.0/en/setup/service-agent/browser-agent/","title":"Browser Monitoring"},{"body":"Browser Protocol Browser protocol describes the data format between skywalking-client-js and the backend.\nOverview Browser protocol is defined and provided in gRPC format, and also implemented in HTTP 1.1\nSend performance data and error logs You can send performance data and error logs using the following services:\n BrowserPerfService#collectPerfData for performance data format. BrowserPerfService#collectErrorLogs for error log format.  For error log format, note that:\n BrowserErrorLog#uniqueId should be unique in all distributed environments.  ","excerpt":"Browser Protocol Browser protocol describes the data format between skywalking-client-js and the …","ref":"/docs/main/latest/en/api/browser-protocol/","title":"Browser Protocol"},{"body":"Browser Protocol Browser protocol describes the data format between skywalking-client-js and the backend.\nOverview Browser protocol is defined and provided in gRPC format, and also implemented in HTTP 1.1\nSend performance data and error logs You can send performance data and error logs using the following services:\n BrowserPerfService#collectPerfData for performance data format. BrowserPerfService#collectErrorLogs for error log format.  For error log format, note that:\n BrowserErrorLog#uniqueId should be unique in all distributed environments.  ","excerpt":"Browser Protocol Browser protocol describes the data format between skywalking-client-js and the …","ref":"/docs/main/next/en/api/browser-protocol/","title":"Browser Protocol"},{"body":"Browser Protocol Browser protocol describes the data format between skywalking-client-js and the backend.\nOverview Browser protocol is defined and provided in gRPC format, and also implemented in HTTP 1.1\nSend performance data and error logs You can send performance data and error logs using the following services:\n BrowserPerfService#collectPerfData for performance data format. BrowserPerfService#collectErrorLogs for error log format.  For error log format, note that:\n BrowserErrorLog#uniqueId should be unique in all distributed environments.  ","excerpt":"Browser Protocol Browser protocol describes the data format between skywalking-client-js and the …","ref":"/docs/main/v9.0.0/en/protocols/browser-protocol/","title":"Browser Protocol"},{"body":"Browser Protocol Browser protocol describes the data format between skywalking-client-js and the backend.\nOverview Browser protocol is defined and provided in gRPC format, and also implemented in HTTP 1.1\nSend performance data and error logs You can send performance data and error logs using the following services:\n BrowserPerfService#collectPerfData for performance data format. BrowserPerfService#collectErrorLogs for error log format.  For error log format, note that:\n BrowserErrorLog#uniqueId should be unique in all distributed environments.  ","excerpt":"Browser Protocol Browser protocol describes the data format between skywalking-client-js and the …","ref":"/docs/main/v9.1.0/en/protocols/browser-protocol/","title":"Browser Protocol"},{"body":"Browser Protocol Browser protocol describes the data format between skywalking-client-js and the backend.\nOverview Browser protocol is defined and provided in gRPC format, and also implemented in HTTP 1.1\nSend performance data and error logs You can send performance data and error logs using the following services:\n BrowserPerfService#collectPerfData for performance data format. BrowserPerfService#collectErrorLogs for error log format.  For error log format, note that:\n BrowserErrorLog#uniqueId should be unique in all distributed environments.  ","excerpt":"Browser Protocol Browser protocol describes the data format between skywalking-client-js and the …","ref":"/docs/main/v9.2.0/en/protocols/browser-protocol/","title":"Browser Protocol"},{"body":"Browser Protocol Browser protocol describes the data format between skywalking-client-js and the backend.\nOverview Browser protocol is defined and provided in gRPC format, and also implemented in HTTP 1.1\nSend performance data and error logs You can send performance data and error logs using the following services:\n BrowserPerfService#collectPerfData for performance data format. BrowserPerfService#collectErrorLogs for error log format.  For error log format, note that:\n BrowserErrorLog#uniqueId should be unique in all distributed environments.  ","excerpt":"Browser Protocol Browser protocol describes the data format between skywalking-client-js and the …","ref":"/docs/main/v9.3.0/en/protocols/browser-protocol/","title":"Browser Protocol"},{"body":"Browser Protocol Browser protocol describes the data format between skywalking-client-js and the backend.\nOverview Browser protocol is defined and provided in gRPC format, and also implemented in HTTP 1.1\nSend performance data and error logs You can send performance data and error logs using the following services:\n BrowserPerfService#collectPerfData for performance data format. BrowserPerfService#collectErrorLogs for error log format.  For error log format, note that:\n BrowserErrorLog#uniqueId should be unique in all distributed environments.  ","excerpt":"Browser Protocol Browser protocol describes the data format between skywalking-client-js and the …","ref":"/docs/main/v9.4.0/en/api/browser-protocol/","title":"Browser Protocol"},{"body":"Browser Protocol Browser protocol describes the data format between skywalking-client-js and the backend.\nOverview Browser protocol is defined and provided in gRPC format, and also implemented in HTTP 1.1\nSend performance data and error logs You can send performance data and error logs using the following services:\n BrowserPerfService#collectPerfData for performance data format. BrowserPerfService#collectErrorLogs for error log format.  For error log format, note that:\n BrowserErrorLog#uniqueId should be unique in all distributed environments.  ","excerpt":"Browser Protocol Browser protocol describes the data format between skywalking-client-js and the …","ref":"/docs/main/v9.5.0/en/api/browser-protocol/","title":"Browser Protocol"},{"body":"Browser Protocol Browser protocol describes the data format between skywalking-client-js and the backend.\nOverview Browser protocol is defined and provided in gRPC format, and also implemented in HTTP 1.1\nSend performance data and error logs You can send performance data and error logs using the following services:\n BrowserPerfService#collectPerfData for performance data format. BrowserPerfService#collectErrorLogs for error log format.  For error log format, note that:\n BrowserErrorLog#uniqueId should be unique in all distributed environments.  ","excerpt":"Browser Protocol Browser protocol describes the data format between skywalking-client-js and the …","ref":"/docs/main/v9.6.0/en/api/browser-protocol/","title":"Browser Protocol"},{"body":"Browser Protocol Browser protocol describes the data format between skywalking-client-js and the backend.\nOverview Browser protocol is defined and provided in gRPC format, and also implemented in HTTP 1.1\nSend performance data and error logs You can send performance data and error logs using the following services:\n BrowserPerfService#collectPerfData for performance data format. BrowserPerfService#collectErrorLogs for error log format.  For error log format, note that:\n BrowserErrorLog#uniqueId should be unique in all distributed environments.  ","excerpt":"Browser Protocol Browser protocol describes the data format between skywalking-client-js and the …","ref":"/docs/main/v9.7.0/en/api/browser-protocol/","title":"Browser Protocol"},{"body":"Build and use the Agent from source codes When you want to build and use the Agent from source code, please follow these steps.\nInstall SkyWalking Go Use go get to import the skywalking-go program.\n// latest or any commit ID go get github.com/apache/skywalking-go@latest Also, import the module to your main package:\nimport _ \u0026#34;github.com/apache/skywalking-go\u0026#34; Build the Agent When building the project, you need to clone the project and build it.\n// git clone the same version(tag or commit ID) as your dependency version. git clone https://github.com/apache/skywalking-go.git cd skywalking-go \u0026amp;\u0026amp; make build Next, you would find several versions of the Go Agent program for different systems in the bin directory of the current project. When you need to compile the program, please add the following statement with the agent program which matches your system:\n-toolexec=\u0026#34;/path/to/go-agent\u0026#34; -a  -toolexec is the path to the Golang enhancement program. -a is the parameter for rebuilding all packages forcibly.  If you want to customize the configuration information for the current service, please add the following parameters, read more please refer the settings override documentation):\n-toolexec=\u0026#34;/path/to/go-agent -config /path/to/config.yaml\u0026#34; -a ","excerpt":"Build and use the Agent from source codes When you want to build and use the Agent from source code, …","ref":"/docs/skywalking-go/latest/en/development-and-contribution/build-and-use-agent/","title":"Build and use the Agent from source codes"},{"body":"Build and use the Agent from source codes When you want to build and use the Agent from source code, please follow these steps.\nInstall SkyWalking Go Use go get to import the skywalking-go program.\n// latest or any commit ID go get github.com/apache/skywalking-go@latest Also, import the module to your main package:\nimport _ \u0026#34;github.com/apache/skywalking-go\u0026#34; Build the Agent When building the project, you need to clone the project and build it.\n// git clone the same version(tag or commit ID) as your dependency version. git clone https://github.com/apache/skywalking-go.git cd skywalking-go \u0026amp;\u0026amp; make build Next, you would find several versions of the Go Agent program for different systems in the bin directory of the current project. When you need to compile the program, please add the following statement with the agent program which matches your system:\n-toolexec=\u0026#34;/path/to/go-agent\u0026#34; -a  -toolexec is the path to the Golang enhancement program. -a is the parameter for rebuilding all packages forcibly.  If you want to customize the configuration information for the current service, please add the following parameters, read more please refer the settings override documentation):\n-toolexec=\u0026#34;/path/to/go-agent -config /path/to/config.yaml\u0026#34; -a ","excerpt":"Build and use the Agent from source codes When you want to build and use the Agent from source code, …","ref":"/docs/skywalking-go/next/en/development-and-contribution/build-and-use-agent/","title":"Build and use the Agent from source codes"},{"body":"Build and use the Agent from source codes When you want to build and use the Agent from source code, please follow these steps.\nInstall SkyWalking Go Use go get to import the skywalking-go program.\n// latest or any commit ID go get github.com/apache/skywalking-go@latest Also, import the module to your main package:\nimport _ \u0026#34;github.com/apache/skywalking-go\u0026#34; Build the Agent When building the project, you need to clone the project and build it.\n// git clone the same version(tag or commit ID) as your dependency version. git clone https://github.com/apache/skywalking-go.git cd skywalking-go \u0026amp;\u0026amp; make build Next, you would find several versions of the Go Agent program for different systems in the bin directory of the current project. When you need to compile the program, please add the following statement with the agent program which matches your system:\n-toolexec=\u0026#34;/path/to/go-agent\u0026#34; -a  -toolexec is the path to the Golang enhancement program. -a is the parameter for rebuilding all packages forcibly.  If you want to customize the configuration information for the current service, please add the following parameters, read more please refer the settings override documentation):\n-toolexec=\u0026#34;/path/to/go-agent -config /path/to/config.yaml\u0026#34; -a ","excerpt":"Build and use the Agent from source codes When you want to build and use the Agent from source code, …","ref":"/docs/skywalking-go/v0.4.0/en/development-and-contribution/build-and-use-agent/","title":"Build and use the Agent from source codes"},{"body":"Building This document will help you compile and build the project in golang environment.\nPlatform Linux, macOS, and Windows are supported in SkyWalking Infra E2E.\nCommand git clone https://github.com/apache/skywalking-infra-e2e.git cd skywalking-infra-e2e make build After these commands, the e2e execute file path is bin/$PLATFORM/e2e.\n","excerpt":"Building This document will help you compile and build the project in golang environment.\nPlatform …","ref":"/docs/skywalking-infra-e2e/latest/en/contribution/compiling-guidance/","title":"Building"},{"body":"Building This document will help you compile and build the project in golang environment.\nPlatform Linux, macOS, and Windows are supported in SkyWalking Infra E2E.\nCommand git clone https://github.com/apache/skywalking-infra-e2e.git cd skywalking-infra-e2e make build After these commands, the e2e execute file path is bin/$PLATFORM/e2e.\n","excerpt":"Building This document will help you compile and build the project in golang environment.\nPlatform …","ref":"/docs/skywalking-infra-e2e/next/en/contribution/compiling-guidance/","title":"Building"},{"body":"Building This document will help you compile and build the project in golang environment.\nPlatform Linux, macOS, and Windows are supported in SkyWalking Infra E2E.\nCommand git clone https://github.com/apache/skywalking-infra-e2e.git cd skywalking-infra-e2e make build After these commands, the e2e execute file path is bin/$PLATFORM/e2e.\n","excerpt":"Building This document will help you compile and build the project in golang environment.\nPlatform …","ref":"/docs/skywalking-infra-e2e/v1.3.0/en/contribution/compiling-guidance/","title":"Building"},{"body":"CDS - Configuration Discovery Service CDS - Configuration Discovery Service provides the dynamic configuration for the agent, defined in gRPC.\nConfiguration Format The configuration content includes the service name and their configs. The\nconfigurations://service nameserviceA:// Configurations of service A// Key and Value are determined by the agent side.// Check the agent setup doc for all available configurations.key1:value1key2:value2...serviceB:...Available key(s) and value(s) in Java Agent. Java agent supports the following dynamic configurations.\n   Config Key Value Description Value Format Example Required Plugin(s)     agent.sample_n_per_3_secs The number of sampled traces per 3 seconds -1 -   agent.ignore_suffix If the operation name of the first span is included in this set, this segment should be ignored. Multiple values should be separated by , .txt,.log -   agent.trace.ignore_path The value is the path that you need to ignore, multiple paths should be separated by , more details /your/path/1/**,/your/path/2/** apm-trace-ignore-plugin   agent.span_limit_per_segment The max number of spans per segment. 300 -   plugin.jdbc.trace_sql_parameters If set to true, the parameters of the sql (typically java.sql.PreparedStatement) would be collected. false -     Required plugin(s), the configuration affects only when the required plugins activated.  ","excerpt":"CDS - Configuration Discovery Service CDS - Configuration Discovery Service provides the dynamic …","ref":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/configuration-discovery/","title":"CDS - Configuration Discovery Service"},{"body":"CDS - Configuration Discovery Service CDS - Configuration Discovery Service provides the dynamic configuration for the agent, defined in gRPC.\nConfiguration Format The configuration content includes the service name and their configs. The\nconfigurations://service nameserviceA:// Configurations of service A// Key and Value are determined by the agent side.// Check the agent setup doc for all available configurations.key1:value1key2:value2...serviceB:...Available key(s) and value(s) in Java Agent. Java agent supports the following dynamic configurations.\n   Config Key Value Description Value Format Example Required Plugin(s)     agent.sample_n_per_3_secs The number of sampled traces per 3 seconds -1 -   agent.ignore_suffix If the operation name of the first span is included in this set, this segment should be ignored. Multiple values should be separated by , .txt,.log -   agent.trace.ignore_path The value is the path that you need to ignore, multiple paths should be separated by , more details /your/path/1/**,/your/path/2/** apm-trace-ignore-plugin   agent.span_limit_per_segment The max number of spans per segment. 300 -   plugin.jdbc.trace_sql_parameters If set to true, the parameters of the sql (typically java.sql.PreparedStatement) would be collected. false -     Required plugin(s), the configuration affects only when the required plugins activated.  ","excerpt":"CDS - Configuration Discovery Service CDS - Configuration Discovery Service provides the dynamic …","ref":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/configuration-discovery/","title":"CDS - Configuration Discovery Service"},{"body":"CDS - Configuration Discovery Service CDS - Configuration Discovery Service provides the dynamic configuration for the agent, defined in gRPC.\nConfiguration Format The configuration content includes the service name and their configs. The\nconfigurations://service nameserviceA:// Configurations of service A// Key and Value are determined by the agent side.// Check the agent setup doc for all available configurations.key1:value1key2:value2...serviceB:...Available key(s) and value(s) in Java Agent. Java agent supports the following dynamic configurations.\n   Config Key Value Description Value Format Example Required Plugin(s)     agent.sample_n_per_3_secs The number of sampled traces per 3 seconds -1 -   agent.ignore_suffix If the operation name of the first span is included in this set, this segment should be ignored. Multiple values should be separated by , .txt,.log -   agent.trace.ignore_path The value is the path that you need to ignore, multiple paths should be separated by , more details /your/path/1/**,/your/path/2/** apm-trace-ignore-plugin   agent.span_limit_per_segment The max number of spans per segment. 300 -   plugin.jdbc.trace_sql_parameters If set to true, the parameters of the sql (typically java.sql.PreparedStatement) would be collected. false -     Required plugin(s), the configuration affects only when the required plugins activated.  ","excerpt":"CDS - Configuration Discovery Service CDS - Configuration Discovery Service provides the dynamic …","ref":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/configuration-discovery/","title":"CDS - Configuration Discovery Service"},{"body":"CDS - Configuration Discovery Service CDS - Configuration Discovery Service provides the dynamic configuration for the agent, defined in gRPC.\nConfiguration Format The configuration content includes the service name and their configs. The\nconfigurations://service nameserviceA:// Configurations of service A// Key and Value are determined by the agent side.// Check the agent setup doc for all available configurations.key1:value1key2:value2...serviceB:...Available key(s) and value(s) in Java Agent. Java agent supports the following dynamic configurations.\n   Config Key Value Description Value Format Example Required Plugin(s)     agent.sample_n_per_3_secs The number of sampled traces per 3 seconds -1 -   agent.ignore_suffix If the operation name of the first span is included in this set, this segment should be ignored. Multiple values should be separated by , .txt,.log -   agent.trace.ignore_path The value is the path that you need to ignore, multiple paths should be separated by , more details /your/path/1/**,/your/path/2/** apm-trace-ignore-plugin   agent.span_limit_per_segment The max number of spans per segment. 300 -   plugin.jdbc.trace_sql_parameters If set to true, the parameters of the sql (typically java.sql.PreparedStatement) would be collected. false -     Required plugin(s), the configuration affects only when the required plugins activated.  ","excerpt":"CDS - Configuration Discovery Service CDS - Configuration Discovery Service provides the dynamic …","ref":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/configuration-discovery/","title":"CDS - Configuration Discovery Service"},{"body":"CDS - Configuration Discovery Service CDS - Configuration Discovery Service provides the dynamic configuration for the agent, defined in gRPC.\nConfiguration Format The configuration content includes the service name and their configs. The\nconfigurations://service nameserviceA:// Configurations of service A// Key and Value are determined by the agent side.// Check the agent setup doc for all available configurations.key1:value1key2:value2...serviceB:...Available key(s) and value(s) in Java Agent. Java agent supports the following dynamic configurations.\n   Config Key Value Description Value Format Example Required Plugin(s)     agent.sample_n_per_3_secs The number of sampled traces per 3 seconds -1 -   agent.ignore_suffix If the operation name of the first span is included in this set, this segment should be ignored. Multiple values should be separated by , .txt,.log -   agent.trace.ignore_path The value is the path that you need to ignore, multiple paths should be separated by , more details /your/path/1/**,/your/path/2/** apm-trace-ignore-plugin   agent.span_limit_per_segment The max number of spans per segment. 300 -   plugin.jdbc.trace_sql_parameters If set to true, the parameters of the sql (typically java.sql.PreparedStatement) would be collected. false -     Required plugin(s), the configuration affects only when the required plugins activated.  ","excerpt":"CDS - Configuration Discovery Service CDS - Configuration Discovery Service provides the dynamic …","ref":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/configuration-discovery/","title":"CDS - Configuration Discovery Service"},{"body":"ClickHouse monitoring ClickHouse server performance from built-in metrics data SkyWalking leverages ClickHouse built-in metrics data since v20.1.2.4. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  Configure ClickHouse to expose metrics data for scraping from Prometheus. OpenTelemetry Collector fetches metrics from ClickeHouse server through Prometheus endpoint, and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up built-in prometheus endpoint . Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  ClickHouse Monitoring ClickHouse monitoring provides monitoring of the metrics 、events and asynchronous_metrics of the ClickHouse server. ClickHouse cluster is cataloged as a Layer: CLICKHOUSE Service in OAP. Each ClickHouse server is cataloged as an Instance in OAP.\nClickHouse Instance Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     CpuUsage count meter_clickhouse_instance_cpu_usage CPU time spent seen by OS per second(according to ClickHouse.system.dashboard.CPU Usage (cores)). ClickHouse   MemoryUsage percentage meter_clickhouse_instance_memory_usage Total amount of memory (bytes) allocated by the server/ total amount of OS memory. ClickHouse   MemoryAvailable percentage meter_clickhouse_instance_memory_available Total amount of memory (bytes) available for program / total amount of OS memory. ClickHouse   Uptime sec meter_clickhouse_instance_uptime The server uptime in seconds. It includes the time spent for server initialization before accepting connections. ClickHouse   Version string meter_clickhouse_instance_version Version of the server in a single integer number in base-1000. ClickHouse   FileOpen count meter_clickhouse_instance_file_open Number of files opened. ClickHouse    ClickHouse Network Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     TcpConnections count meter_clickhouse_instance_tcp_connectionsmeter_clickhouse_tcp_connections Number of connections to TCP server. ClickHouse   MysqlConnections count meter_clickhouse_instance_mysql_connectionsmeter_clickhouse_mysql_connections Number of client connections using MySQL protocol. ClickHouse   HttpConnections count meter_clickhouse_instance_http_connectionsmeter_clickhouse_mysql_connections Number of connections to HTTP server. ClickHouse   InterserverConnections count meter_clickhouse_instance_interserver_connectionsmeter_clickhouse_interserver_connections Number of connections from other replicas to fetch parts. ClickHouse   PostgresqlConnections count meter_clickhouse_instance_postgresql_connectionsmeter_clickhouse_postgresql_connections Number of client connections using PostgreSQL protocol. ClickHouse   ReceiveBytes bytes meter_clickhouse_instance_network_receive_bytesmeter_clickhouse_network_receive_bytes Total number of bytes received from network. ClickHouse   SendBytes bytes meter_clickhouse_instance_network_send_bytesmeter_clickhouse_network_send_bytes Total number of bytes send to network. ClickHouse    ClickHouse Query Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     QueryCount count meter_clickhouse_instance_querymeter_clickhouse_query Number of executing queries. ClickHouse   SelectQueryCount count meter_clickhouse_instance_query_selectmeter_clickhouse_query_select Number of executing queries, but only for SELECT queries. ClickHouse   InsertQueryCount count meter_clickhouse_instance_query_insertmeter_clickhouse_query_insert Number of executing queries, but only for INSERT queries. ClickHouse   SelectQueryRate count/sec meter_clickhouse_instance_query_select_ratemeter_clickhouse_query_select_rate Number of SELECT queries per second. ClickHouse   InsertQueryRate count/sec meter_clickhouse_instance_query_insert_ratemeter_clickhouse_query_insert_rate Number of INSERT queries per second. ClickHouse   Querytime microsec meter_clickhouse_instance_querytime_microsecondsmeter_clickhouse_querytime_microseconds Total time of all queries. ClickHouse   SelectQuerytime microsec meter_clickhouse_instance_querytime_select_microsecondsmeter_clickhouse_querytime_select_microseconds Total time of SELECT queries. ClickHouse   InsertQuerytime microsec meter_clickhouse_instance_querytime_insert_microsecondsmeter_clickhouse_querytime_insert_microseconds Total time of INSERT queries. ClickHouse   OtherQuerytime microsec meter_clickhouse_instance_querytime_other_microsecondsmeter_clickhouse_querytime_other_microseconds Total time of queries that are not SELECT or INSERT. ClickHouse   QuerySlowCount count meter_clickhouse_instance_query_slowmeter_clickhouse_query_slow Number of reads from a file that were slow. ClickHouse    ClickHouse Insertion Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     InsertQueryCount count meter_clickhouse_instance_query_insertmeter_clickhouse_query_insert Number of executing queries, but only for INSERT queries. ClickHouse   InsertedRowCount count meter_clickhouse_instance_inserted_rowsmeter_clickhouse_inserted_rows Number of rows INSERTed to all tables. ClickHouse   InsertedBytes bytes meter_clickhouse_instance_inserted_bytesmeter_clickhouse_inserted_bytes Number of bytes INSERTed to all tables. ClickHouse   DelayedInsertCount count meter_clickhouse_instance_delayed_insertmeter_clickhouse_delayed_insert Number of times the INSERT of a block to a MergeTree table was throttled due to high number of active data parts for partition. ClickHouse    ClickHouse Replicas Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     ReplicatedChecks count meter_clickhouse_instance_replicated_checksmeter_clickhouse_replicated_checks Number of data parts checking for consistency. ClickHouse   ReplicatedFetch count meter_clickhouse_instance_replicated_fetchmeter_clickhouse_replicated_fetch Number of data parts being fetched from replica. ClickHouse   ReplicatedSend count meter_clickhouse_instance_replicated_sendmeter_clickhouse_replicated_send Number of data parts being sent to replicas. ClickHouse    ClickHouse MergeTree Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     BackgroundMergeCount count meter_clickhouse_instance_background_mergemeter_clickhouse_background_merge Number of executing background merges. ClickHouse   MergeRows count meter_clickhouse_instance_merge_rowsmeter_clickhouse_merge_rows Rows read for background merges. This is the number of rows before merge. ClickHouse   MergeUncompressedBytes bytes meter_clickhouse_instance_merge_uncompressed_bytesmeter_clickhouse_merge_uncompressed_bytes Uncompressed bytes (for columns as they stored in memory) that was read for background merges. This is the number before merge. ClickHouse   MoveCount count meter_clickhouse_instance_movemeter_clickhouse_move Number of currently executing moves. ClickHouse   PartsActive Count meter_clickhouse_instance_parts_activemeter_clickhouse_parts_active Active data part, used by current and upcoming SELECTs. ClickHouse   MutationsCount count meter_clickhouse_instance_mutationsmeter_clickhouse_mutations Number of mutations (ALTER DELETE/UPDATE). ClickHouse    ClickHouse Kafka Table Engine Supported Metrics When table engine works with Apache Kafka.\nKafka lets you:\n Publish or subscribe to data flows. Organize fault-tolerant storage. Process streams as they become available.     Monitoring Panel Unit Metric Name Description Data Source     KafkaMessagesRead count meter_clickhouse_instance_kafka_messages_readmeter_clickhouse_kafka_messages_read Number of Kafka messages already processed by ClickHouse. ClickHouse   KafkaWrites count meter_clickhouse_instance_kafka_writesmeter_clickhouse_kafka_writes Number of writes (inserts) to Kafka tables. ClickHouse   KafkaConsumers count meter_clickhouse_instance_kafka_consumersmeter_clickhouse_kafka_consumers Number of active Kafka consumers. ClickHouse   KafkProducers count meter_clickhouse_instance_kafka_producersmeter_clickhouse_kafka_producers Number of active Kafka producer created. ClickHouse    ClickHouse ZooKeeper Supported Metrics ClickHouse uses ZooKeeper for storing metadata of replicas when using replicated tables. If replicated tables are not used, this section of parameters can be omitted.\n   Monitoring Panel Unit Metric Name Description Data Source     ZookeeperSession count meter_clickhouse_instance_zookeeper_sessionmeter_clickhouse_zookeeper_session Number of sessions (connections) to ZooKeeper. ClickHouse   ZookeeperWatch count meter_clickhouse_instance_zookeeper_watchmeter_clickhouse_zookeeper_watch Number of watches (event subscriptions) in ZooKeeper. ClickHouse   ZookeeperBytesSent bytes meter_clickhouse_instance_zookeeper_bytes_sentmeter_clickhouse_zookeeper_bytes_sent Number of bytes send over network while communicating with ZooKeeper. ClickHouse   ZookeeperBytesReceive bytes meter_clickhouse_instance_zookeeper_bytes_receivedmeter_clickhouse_zookeeper_bytes_received Number of bytes send over network while communicating with ZooKeeper. ClickHouse    ClickHouse Keeper Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     KeeperAliveConnections count meter_clickhouse_instance_keeper_connections_alivemeter_clickhouse_keeper_connections_alive Number of alive connections for embedded ClickHouse Keeper. ClickHouse   KeeperOutstandingRequets count meter_clickhouse_instance_keeper_outstanding_requestsmeter_clickhouse_keeper_outstanding_requests Number of outstanding requests for embedded ClickHouse Keeper. ClickHouse    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/clickhouse. The ClickHouse dashboard panel configurations are found in /config/ui-initialized-templates/clickhouse.\n","excerpt":"ClickHouse monitoring ClickHouse server performance from built-in metrics data SkyWalking leverages …","ref":"/docs/main/next/en/setup/backend/backend-clickhouse-monitoring/","title":"ClickHouse monitoring"},{"body":"Client/grpc-client Description The gRPC client is a sharing plugin to keep connection with the gRPC server and delivery the data to it.\nDefaultConfig # The gRPC client finder typefinder_type:\u0026#34;static\u0026#34;# The gRPC server address (default localhost:11800), multiple addresses are split by \u0026#34;,\u0026#34;.server_addr:localhost:11800# The gRPC kubernetes server address finderkubernetes_config:# The kind of resourcekind:pod# The resource namespacesnamespaces:- default# How to get the address exported portextra_port:# Resource target portport:11800# The TLS switch (default false).enable_TLS:false# The file path of client.pem. The config only works when opening the TLS switch.client_pem_path:\u0026#34;\u0026#34;# The file path of client.key. The config only works when opening the TLS switch.client_key_path:\u0026#34;\u0026#34;# The file path oca.pem. The config only works when opening the TLS switch.ca_pem_path:\u0026#34;\u0026#34;# InsecureSkipVerify controls whether a client verifies the server\u0026#39;s certificate chain and host name.insecure_skip_verify:true# The auth value when send requestauthentication:\u0026#34;\u0026#34;# How frequently to check the connection(second)check_period:5# The gRPC send request timeouttimeout:# The timeout for unary single requestunary:5s# The timeout for unary stream requeststream:20sConfiguration    Name Type Description     finder_type string The gRPC server address finder type, support \u0026ldquo;static\u0026rdquo; and \u0026ldquo;kubernetes\u0026rdquo;   server_addr string The gRPC server address, only works for \u0026ldquo;static\u0026rdquo; address finder   kubernetes_config *resolvers.KubernetesConfig The kubernetes config to lookup addresses, only works for \u0026ldquo;kubernetes\u0026rdquo; address finder   kubernetes_config.api_server string The kubernetes API server address, If not define means using in kubernetes mode to connect   kubernetes_config.basic_auth *resolvers.BasicAuth The HTTP basic authentication credentials for the targets.   kubernetes_config.basic_auth.username string    kubernetes_config.basic_auth.password resolvers.Secret    kubernetes_config.basic_auth.password_file string    kubernetes_config.bearer_token resolvers.Secret The bearer token for the targets.   kubernetes_config.bearer_token_file string The bearer token file for the targets.   kubernetes_config.proxy_url string HTTP proxy server to use to connect to the targets.   kubernetes_config.tls_config resolvers.TLSConfig TLSConfig to use to connect to the targets.   kubernetes_config.namespaces []string Support to lookup namespaces   kubernetes_config.kind string The kind of api   kubernetes_config.selector resolvers.Selector The kind selector   kubernetes_config.extra_port resolvers.ExtraPort How to get the address exported port   enable_TLS bool Enable TLS connect to server   client_pem_path string The file path of client.pem. The config only works when opening the TLS switch.   client_key_path string The file path of client.key. The config only works when opening the TLS switch.   ca_pem_path string The file path oca.pem. The config only works when opening the TLS switch.   insecure_skip_verify bool Controls whether a client verifies the server\u0026rsquo;s certificate chain and host name.   authentication string The auth value when send request   check_period int How frequently to check the connection(second)   timeout grpc.TimeoutConfig The gRPC send request timeout    ","excerpt":"Client/grpc-client Description The gRPC client is a sharing plugin to keep connection with the gRPC …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/client_grpc-client/","title":"Client/grpc-client"},{"body":"Client/grpc-client Description The gRPC client is a sharing plugin to keep connection with the gRPC server and delivery the data to it.\nDefaultConfig # The gRPC client finder typefinder_type:\u0026#34;static\u0026#34;# The gRPC server address (default localhost:11800), multiple addresses are split by \u0026#34;,\u0026#34;.server_addr:localhost:11800# The gRPC kubernetes server address finderkubernetes_config:# The kind of resourcekind:pod# The resource namespacesnamespaces:- default# How to get the address exported portextra_port:# Resource target portport:11800# The TLS switch (default false).enable_TLS:false# The file path of client.pem. The config only works when opening the TLS switch.client_pem_path:\u0026#34;\u0026#34;# The file path of client.key. The config only works when opening the TLS switch.client_key_path:\u0026#34;\u0026#34;# The file path oca.pem. The config only works when opening the TLS switch.ca_pem_path:\u0026#34;\u0026#34;# InsecureSkipVerify controls whether a client verifies the server\u0026#39;s certificate chain and host name.insecure_skip_verify:true# The auth value when send requestauthentication:\u0026#34;\u0026#34;# How frequently to check the connection(second)check_period:5# The gRPC send request timeouttimeout:# The timeout for unary single requestunary:5s# The timeout for unary stream requeststream:20sConfiguration    Name Type Description     finder_type string The gRPC server address finder type, support \u0026ldquo;static\u0026rdquo; and \u0026ldquo;kubernetes\u0026rdquo;   server_addr string The gRPC server address, only works for \u0026ldquo;static\u0026rdquo; address finder   kubernetes_config *resolvers.KubernetesConfig The kubernetes config to lookup addresses, only works for \u0026ldquo;kubernetes\u0026rdquo; address finder   kubernetes_config.api_server string The kubernetes API server address, If not define means using in kubernetes mode to connect   kubernetes_config.basic_auth *resolvers.BasicAuth The HTTP basic authentication credentials for the targets.   kubernetes_config.basic_auth.username string    kubernetes_config.basic_auth.password resolvers.Secret    kubernetes_config.basic_auth.password_file string    kubernetes_config.bearer_token resolvers.Secret The bearer token for the targets.   kubernetes_config.bearer_token_file string The bearer token file for the targets.   kubernetes_config.proxy_url string HTTP proxy server to use to connect to the targets.   kubernetes_config.tls_config resolvers.TLSConfig TLSConfig to use to connect to the targets.   kubernetes_config.namespaces []string Support to lookup namespaces   kubernetes_config.kind string The kind of api   kubernetes_config.selector resolvers.Selector The kind selector   kubernetes_config.extra_port resolvers.ExtraPort How to get the address exported port   enable_TLS bool Enable TLS connect to server   client_pem_path string The file path of client.pem. The config only works when opening the TLS switch.   client_key_path string The file path of client.key. The config only works when opening the TLS switch.   ca_pem_path string The file path oca.pem. The config only works when opening the TLS switch.   insecure_skip_verify bool Controls whether a client verifies the server\u0026rsquo;s certificate chain and host name.   authentication string The auth value when send request   check_period int How frequently to check the connection(second)   timeout grpc.TimeoutConfig The gRPC send request timeout    ","excerpt":"Client/grpc-client Description The gRPC client is a sharing plugin to keep connection with the gRPC …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/client_grpc-client/","title":"Client/grpc-client"},{"body":"Client/grpc-client Description The gRPC client is a sharing plugin to keep connection with the gRPC server and delivery the data to it.\nDefaultConfig # The gRPC client finder typefinder_type:\u0026#34;static\u0026#34;# The gRPC server address (default localhost:11800), multiple addresses are split by \u0026#34;,\u0026#34;.server_addr:localhost:11800# The gRPC kubernetes server address finderkubernetes_config:# The kind of resourcekind:pod# The resource namespacesnamespaces:- default# How to get the address exported portextra_port:# Resource target portport:11800# The TLS switch (default false).enable_TLS:false# The file path of client.pem. The config only works when opening the TLS switch.client_pem_path:\u0026#34;\u0026#34;# The file path of client.key. The config only works when opening the TLS switch.client_key_path:\u0026#34;\u0026#34;# The file path oca.pem. The config only works when opening the TLS switch.ca_pem_path:\u0026#34;\u0026#34;# InsecureSkipVerify controls whether a client verifies the server\u0026#39;s certificate chain and host name.insecure_skip_verify:true# The auth value when send requestauthentication:\u0026#34;\u0026#34;# How frequently to check the connection(second)check_period:5# The gRPC send request timeouttimeout:# The timeout for unary single requestunary:5s# The timeout for unary stream requeststream:20sConfiguration    Name Type Description     finder_type string The gRPC server address finder type, support \u0026ldquo;static\u0026rdquo; and \u0026ldquo;kubernetes\u0026rdquo;   server_addr string The gRPC server address, only works for \u0026ldquo;static\u0026rdquo; address finder   kubernetes_config *resolvers.KubernetesConfig The kubernetes config to lookup addresses, only works for \u0026ldquo;kubernetes\u0026rdquo; address finder   kubernetes_config.api_server string The kubernetes API server address, If not define means using in kubernetes mode to connect   kubernetes_config.basic_auth *resolvers.BasicAuth The HTTP basic authentication credentials for the targets.   kubernetes_config.basic_auth.username string    kubernetes_config.basic_auth.password resolvers.Secret    kubernetes_config.basic_auth.password_file string    kubernetes_config.bearer_token resolvers.Secret The bearer token for the targets.   kubernetes_config.bearer_token_file string The bearer token file for the targets.   kubernetes_config.proxy_url string HTTP proxy server to use to connect to the targets.   kubernetes_config.tls_config resolvers.TLSConfig TLSConfig to use to connect to the targets.   kubernetes_config.namespaces []string Support to lookup namespaces   kubernetes_config.kind string The kind of api   kubernetes_config.selector resolvers.Selector The kind selector   kubernetes_config.extra_port resolvers.ExtraPort How to get the address exported port   enable_TLS bool Enable TLS connect to server   client_pem_path string The file path of client.pem. The config only works when opening the TLS switch.   client_key_path string The file path of client.key. The config only works when opening the TLS switch.   ca_pem_path string The file path oca.pem. The config only works when opening the TLS switch.   insecure_skip_verify bool Controls whether a client verifies the server\u0026rsquo;s certificate chain and host name.   authentication string The auth value when send request   check_period int How frequently to check the connection(second)   timeout grpc.TimeoutConfig The gRPC send request timeout    ","excerpt":"Client/grpc-client Description The gRPC client is a sharing plugin to keep connection with the gRPC …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/client_grpc-client/","title":"Client/grpc-client"},{"body":"Client/kafka-client Description The Kafka client is a sharing plugin to keep connection with the Kafka brokers and delivery the data to it.\nDefaultConfig # The Kafka broker addresses (default localhost:9092). Multiple values are separated by commas.brokers:localhost:9092# The Kafka version should follow this pattern, which is major_minor_veryMinor_patch (default 1.0.0.0).version:1.0.0.0# The TLS switch (default false).enable_TLS:false# The file path of client.pem. The config only works when opening the TLS switch.client_pem_path:\u0026#34;\u0026#34;# The file path of client.key. The config only works when opening the TLS switch.client_key_path:\u0026#34;\u0026#34;# The file path oca.pem. The config only works when opening the TLS switch.ca_pem_path:\u0026#34;\u0026#34;# 0 means NoResponse, 1 means WaitForLocal and -1 means WaitForAll (default 1).required_acks:1# The producer max retry times (default 3).producer_max_retry:3# The meta max retry times (default 3).meta_max_retry:3# How long to wait for the cluster to settle between retries (default 100ms). Time unit is ms.retry_backoff:100# The max message bytes.max_message_bytes:1000000# If enabled, the producer will ensure that exactly one copy of each message is written (default false).idempotent_writes:false# A user-provided string sent with every request to the brokers for logging, debugging, and auditing purposes (default Satellite).client_id:Satellite# Compression codec represents the various compression codecs recognized by Kafka in messages. 0 : None, 1 : Gzip, 2 : Snappy, 3 : LZ4, 4 : ZSTDcompression_codec:0# How frequently to refresh the cluster metadata in the background. Defaults to 10 minutes. The unit is minute.refresh_period:10# InsecureSkipVerify controls whether a client verifies the server\u0026#39;s certificate chain and host name.insecure_skip_verify:trueConfiguration    Name Type Description     brokers string The Kafka broker addresses (default localhost:9092).   version string The version should follow this pattern, which is major.minor.veryMinor.patch.   enable_TLS bool The TLS switch (default false).   client_pem_path string The file path of client.pem. The config only works when opening the TLS switch.   client_key_path string The file path of client.key. The config only works when opening the TLS switch.   ca_pem_path string The file path oca.pem. The config only works when opening the TLS switch.   required_acks int16 0 means NoResponse, 1 means WaitForLocal and -1 means WaitForAll (default 1).   producer_max_retry int The producer max retry times (default 3).   meta_max_retry int The meta max retry times (default 3).   retry_backoff int How long to wait for the cluster to settle between retries (default 100ms).   max_message_bytes int The max message bytes.   idempotent_writes bool Ensure that exactly one copy of each message is written when is true.   client_id string A user-provided string sent with every request to the brokers.   compression_codec int Represents the various compression codecs recognized by Kafka in messages.   refresh_period int How frequently to refresh the cluster metadata.   insecure_skip_verify bool Controls whether a client verifies the server\u0026rsquo;s certificate chain and host name.    ","excerpt":"Client/kafka-client Description The Kafka client is a sharing plugin to keep connection with the …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/client_kafka-client/","title":"Client/kafka-client"},{"body":"Client/kafka-client Description The Kafka client is a sharing plugin to keep connection with the Kafka brokers and delivery the data to it.\nDefaultConfig # The Kafka broker addresses (default localhost:9092). Multiple values are separated by commas.brokers:localhost:9092# The Kafka version should follow this pattern, which is major_minor_veryMinor_patch (default 1.0.0.0).version:1.0.0.0# The TLS switch (default false).enable_TLS:false# The file path of client.pem. The config only works when opening the TLS switch.client_pem_path:\u0026#34;\u0026#34;# The file path of client.key. The config only works when opening the TLS switch.client_key_path:\u0026#34;\u0026#34;# The file path oca.pem. The config only works when opening the TLS switch.ca_pem_path:\u0026#34;\u0026#34;# 0 means NoResponse, 1 means WaitForLocal and -1 means WaitForAll (default 1).required_acks:1# The producer max retry times (default 3).producer_max_retry:3# The meta max retry times (default 3).meta_max_retry:3# How long to wait for the cluster to settle between retries (default 100ms). Time unit is ms.retry_backoff:100# The max message bytes.max_message_bytes:1000000# If enabled, the producer will ensure that exactly one copy of each message is written (default false).idempotent_writes:false# A user-provided string sent with every request to the brokers for logging, debugging, and auditing purposes (default Satellite).client_id:Satellite# Compression codec represents the various compression codecs recognized by Kafka in messages. 0 : None, 1 : Gzip, 2 : Snappy, 3 : LZ4, 4 : ZSTDcompression_codec:0# How frequently to refresh the cluster metadata in the background. Defaults to 10 minutes. The unit is minute.refresh_period:10# InsecureSkipVerify controls whether a client verifies the server\u0026#39;s certificate chain and host name.insecure_skip_verify:trueConfiguration    Name Type Description     brokers string The Kafka broker addresses (default localhost:9092).   version string The version should follow this pattern, which is major.minor.veryMinor.patch.   enable_TLS bool The TLS switch (default false).   client_pem_path string The file path of client.pem. The config only works when opening the TLS switch.   client_key_path string The file path of client.key. The config only works when opening the TLS switch.   ca_pem_path string The file path oca.pem. The config only works when opening the TLS switch.   required_acks int16 0 means NoResponse, 1 means WaitForLocal and -1 means WaitForAll (default 1).   producer_max_retry int The producer max retry times (default 3).   meta_max_retry int The meta max retry times (default 3).   retry_backoff int How long to wait for the cluster to settle between retries (default 100ms).   max_message_bytes int The max message bytes.   idempotent_writes bool Ensure that exactly one copy of each message is written when is true.   client_id string A user-provided string sent with every request to the brokers.   compression_codec int Represents the various compression codecs recognized by Kafka in messages.   refresh_period int How frequently to refresh the cluster metadata.   insecure_skip_verify bool Controls whether a client verifies the server\u0026rsquo;s certificate chain and host name.    ","excerpt":"Client/kafka-client Description The Kafka client is a sharing plugin to keep connection with the …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/client_kafka-client/","title":"Client/kafka-client"},{"body":"Client/kafka-client Description The Kafka client is a sharing plugin to keep connection with the Kafka brokers and delivery the data to it.\nDefaultConfig # The Kafka broker addresses (default localhost:9092). Multiple values are separated by commas.brokers:localhost:9092# The Kafka version should follow this pattern, which is major_minor_veryMinor_patch (default 1.0.0.0).version:1.0.0.0# The TLS switch (default false).enable_TLS:false# The file path of client.pem. The config only works when opening the TLS switch.client_pem_path:\u0026#34;\u0026#34;# The file path of client.key. The config only works when opening the TLS switch.client_key_path:\u0026#34;\u0026#34;# The file path oca.pem. The config only works when opening the TLS switch.ca_pem_path:\u0026#34;\u0026#34;# 0 means NoResponse, 1 means WaitForLocal and -1 means WaitForAll (default 1).required_acks:1# The producer max retry times (default 3).producer_max_retry:3# The meta max retry times (default 3).meta_max_retry:3# How long to wait for the cluster to settle between retries (default 100ms). Time unit is ms.retry_backoff:100# The max message bytes.max_message_bytes:1000000# If enabled, the producer will ensure that exactly one copy of each message is written (default false).idempotent_writes:false# A user-provided string sent with every request to the brokers for logging, debugging, and auditing purposes (default Satellite).client_id:Satellite# Compression codec represents the various compression codecs recognized by Kafka in messages. 0 : None, 1 : Gzip, 2 : Snappy, 3 : LZ4, 4 : ZSTDcompression_codec:0# How frequently to refresh the cluster metadata in the background. Defaults to 10 minutes. The unit is minute.refresh_period:10# InsecureSkipVerify controls whether a client verifies the server\u0026#39;s certificate chain and host name.insecure_skip_verify:trueConfiguration    Name Type Description     brokers string The Kafka broker addresses (default localhost:9092).   version string The version should follow this pattern, which is major.minor.veryMinor.patch.   enable_TLS bool The TLS switch (default false).   client_pem_path string The file path of client.pem. The config only works when opening the TLS switch.   client_key_path string The file path of client.key. The config only works when opening the TLS switch.   ca_pem_path string The file path oca.pem. The config only works when opening the TLS switch.   required_acks int16 0 means NoResponse, 1 means WaitForLocal and -1 means WaitForAll (default 1).   producer_max_retry int The producer max retry times (default 3).   meta_max_retry int The meta max retry times (default 3).   retry_backoff int How long to wait for the cluster to settle between retries (default 100ms).   max_message_bytes int The max message bytes.   idempotent_writes bool Ensure that exactly one copy of each message is written when is true.   client_id string A user-provided string sent with every request to the brokers.   compression_codec int Represents the various compression codecs recognized by Kafka in messages.   refresh_period int How frequently to refresh the cluster metadata.   insecure_skip_verify bool Controls whether a client verifies the server\u0026rsquo;s certificate chain and host name.    ","excerpt":"Client/kafka-client Description The Kafka client is a sharing plugin to keep connection with the …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/client_kafka-client/","title":"Client/kafka-client"},{"body":"Clients Command Line The command line tool named bydbctl improves users' interactive experience. The examples listed in this folder show how to use this command to create, update, read and delete schemas. Furthermore, bydbctl could help in querying data stored in streams, measures and properties.\nThese are several ways to install:\n Get binaries from download. Build from sources to get latest features.  The config file named .bydbctl.yaml will be created in $HOME folder after the first CRUD command is applied.\n\u0026gt; more ~/.bydbctl.yaml addr: http://127.0.0.1:64299 group: \u0026#34;\u0026#34; bydbctl leverages HTTP endpoints to retrieve data instead of gRPC.\nHTTP client Users could select any HTTP client to access the HTTP based endpoints. The default address is localhost:17913/api\nJava Client The java native client is hosted at skywalking-banyandb-java-client.\nWeb application The web application is hosted at skywalking-banyandb-webapp when you boot up the BanyanDB server.\ngRPC command-line tool Users have a chance to use any command-line tool to interact with the Banyand server\u0026rsquo;s gRPC endpoints. The only limitation is the CLI tool has to support file descriptor files since the database server does not support server reflection.\nBuf is a Protobuf building tooling the BanyanDB relies on. It can provide FileDescriptorSets usable by gRPC CLI tools like grpcurl\nBanyanDB recommends installing Buf by issuing\n$ make -C api generate Protobuf schema files are compiled Above command will compile *.proto after downloading buf into \u0026lt;project_root\u0026gt;/bin\nUsers could leverage buf\u0026rsquo;s internal compiler to generate the FileDescriptorSets\n$ cd api $ ../bin/buf build -o image.bin If grpcurl is the CLI tool to access the APIs of BanyanDb. To use image.bin with it on the fly:\n$ grpcurl -plaintext -protoset image.bin localhost:17912 ... ","excerpt":"Clients Command Line The command line tool named bydbctl improves users' interactive experience. The …","ref":"/docs/skywalking-banyandb/latest/clients/","title":"Clients"},{"body":"Clients Command Line The command line tool named bydbctl improves users' interactive experience. The examples listed in this folder show how to use this command to create, update, read and delete schemas. Furthermore, bydbctl could help in querying data stored in streams, measures and properties.\nThese are several ways to install:\n Get binaries from download. Build from sources to get latest features.  The config file named .bydbctl.yaml will be created in $HOME folder after the first CRUD command is applied.\n\u0026gt; more ~/.bydbctl.yaml addr: http://127.0.0.1:64299 group: \u0026#34;\u0026#34; bydbctl leverages HTTP endpoints to retrieve data instead of gRPC.\nHTTP client Users could select any HTTP client to access the HTTP based endpoints. The default address is localhost:17913/api\nJava Client The java native client is hosted at skywalking-banyandb-java-client.\nWeb application The web application is hosted at skywalking-banyandb-webapp when you boot up the BanyanDB server.\ngRPC command-line tool Users have a chance to use any command-line tool to interact with the Banyand server\u0026rsquo;s gRPC endpoints. The only limitation is the CLI tool has to support file descriptor files since the database server does not support server reflection.\nBuf is a Protobuf building tooling the BanyanDB relies on. It can provide FileDescriptorSets usable by gRPC CLI tools like grpcurl\nBanyanDB recommends installing Buf by issuing\n$ make -C api generate Protobuf schema files are compiled Above command will compile *.proto after downloading buf into \u0026lt;project_root\u0026gt;/bin\nUsers could leverage buf\u0026rsquo;s internal compiler to generate the FileDescriptorSets\n$ cd api $ ../bin/buf build -o image.bin If grpcurl is the CLI tool to access the APIs of BanyanDb. To use image.bin with it on the fly:\n$ grpcurl -plaintext -protoset image.bin localhost:17912 ... ","excerpt":"Clients Command Line The command line tool named bydbctl improves users' interactive experience. The …","ref":"/docs/skywalking-banyandb/next/clients/","title":"Clients"},{"body":"Clients Command Line The command line tool named bydbctl improves users' interactive experience. The examples listed in this folder show how to use this command to create, update, read and delete schemas. Furthermore, bydbctl could help in querying data stored in streams, measures and properties.\nThese are several ways to install:\n Get binaries from download. Build from sources to get latest features.  The config file named .bydbctl.yaml will be created in $HOME folder after the first CRUD command is applied.\n\u0026gt; more ~/.bydbctl.yaml addr: http://127.0.0.1:64299 group: \u0026#34;\u0026#34; bydbctl leverages HTTP endpoints to retrieve data instead of gRPC.\nHTTP client Users could select any HTTP client to access the HTTP based endpoints. The default address is localhost:17913/api\nJava Client The java native client is hosted at skywalking-banyandb-java-client.\nWeb application The web application is hosted at skywalking-banyandb-webapp when you boot up the BanyanDB server.\ngRPC command-line tool Users have a chance to use any command-line tool to interact with the Banyand server\u0026rsquo;s gRPC endpoints. The only limitation is the CLI tool has to support file descriptor files since the database server does not support server reflection.\nBuf is a Protobuf building tooling the BanyanDB relies on. It can provide FileDescriptorSets usable by gRPC CLI tools like grpcurl\nBanyanDB recommends installing Buf by issuing\n$ make -C api generate Protobuf schema files are compiled Above command will compile *.proto after downloading buf into \u0026lt;project_root\u0026gt;/bin\nUsers could leverage buf\u0026rsquo;s internal compiler to generate the FileDescriptorSets\n$ cd api $ ../bin/buf build -o image.bin If grpcurl is the CLI tool to access the APIs of BanyanDb. To use image.bin with it on the fly:\n$ grpcurl -plaintext -protoset image.bin localhost:17912 ... ","excerpt":"Clients Command Line The command line tool named bydbctl improves users' interactive experience. The …","ref":"/docs/skywalking-banyandb/v0.5.0/clients/","title":"Clients"},{"body":"Cluster Installation Setup Meta Nodes Meta nodes are a etcd cluster which is required for the metadata module to provide the metadata service and nodes discovery service for the whole cluster.\nThe etcd cluster can be setup by the etcd installation guide\nRole-base Banyand Cluster There is an example: The etcd cluster is spread across three nodes with the addresses 10.0.0.1:2379, 10.0.0.2:2379, and 10.0.0.3:2379.\nData nodes and liaison nodes are running as independent processes by\n$ ./banyand-server storage --etcd-endpoints=http://10.0.0.1:2379,http://10.0.0.2:2379,http://10.0.0.3:2379 \u0026lt;flags\u0026gt; $ ./banyand-server storage --etcd-endpoints=http://10.0.0.1:2379,http://10.0.0.2:2379,http://10.0.0.3:2379 \u0026lt;flags\u0026gt; $ ./banyand-server storage --etcd-endpoints=http://10.0.0.1:2379,http://10.0.0.2:2379,http://10.0.0.3:2379 \u0026lt;flags\u0026gt; $ ./banyand-server liaison --etcd-endpoints=http://10.0.0.1:2379,http://10.0.0.2:2379,http://10.0.0.3:2379 \u0026lt;flags\u0026gt; Node Discovery The node discovery is based on the etcd cluster. The etcd cluster is required for the metadata module to provide the metadata service and nodes discovery service for the whole cluster.\nThe host is registered to the etcd cluster by the banyand-server automatically based on node-host-provider :\n node-host-provider=hostname : Default. The OS\u0026rsquo;s hostname is registered as the host part in the address. node-host-provider=ip : The OS\u0026rsquo;s the first non-loopback active IP address(IPv4) is registered as the host part in the address. node-host-provider=flag : node-host is registered as the host part in the address.  ","excerpt":"Cluster Installation Setup Meta Nodes Meta nodes are a etcd cluster which is required for the …","ref":"/docs/skywalking-banyandb/latest/installation/cluster/","title":"Cluster Installation"},{"body":"Cluster Installation Setup Meta Nodes Meta nodes are a etcd cluster which is required for the metadata module to provide the metadata service and nodes discovery service for the whole cluster.\nThe etcd cluster can be setup by the etcd installation guide\nRole-base Banyand Cluster There is an example: The etcd cluster is spread across three nodes with the addresses 10.0.0.1:2379, 10.0.0.2:2379, and 10.0.0.3:2379.\nData nodes and liaison nodes are running as independent processes by\n$ ./banyand-server-static storage --etcd-endpoints=http://10.0.0.1:2379,http://10.0.0.2:2379,http://10.0.0.3:2379 \u0026lt;flags\u0026gt; $ ./banyand-server-static storage --etcd-endpoints=http://10.0.0.1:2379,http://10.0.0.2:2379,http://10.0.0.3:2379 \u0026lt;flags\u0026gt; $ ./banyand-server-static storage --etcd-endpoints=http://10.0.0.1:2379,http://10.0.0.2:2379,http://10.0.0.3:2379 \u0026lt;flags\u0026gt; $ ./banyand-server-static liaison --etcd-endpoints=http://10.0.0.1:2379,http://10.0.0.2:2379,http://10.0.0.3:2379 \u0026lt;flags\u0026gt; Node Discovery The node discovery is based on the etcd cluster. The etcd cluster is required for the metadata module to provide the metadata service and nodes discovery service for the whole cluster.\nThe host is registered to the etcd cluster by the banyand-server-static automatically based on node-host-provider :\n node-host-provider=hostname : Default. The OS\u0026rsquo;s hostname is registered as the host part in the address. node-host-provider=ip : The OS\u0026rsquo;s the first non-loopback active IP address(IPv4) is registered as the host part in the address. node-host-provider=flag : node-host is registered as the host part in the address.  Etcd Authentication etcd supports through tls certificates and RBAC-based authentication for both clients to server communication. This section tends to help users set up authentication for BanyanDB.\nAuthentication with username/password The etcd user can be setup by the etcd authentication guide\nThe username/password is configured in the following command:\n etcd-username: The username for etcd client authentication. etcd-password: The password for etcd client authentication.  Note: recommended using environment variables to set username/password for higher security.\n$ ./banyand-server-static storage --etcd-endpoints=your-endpoints --etcd-username=your-username --etcd-password=your-password \u0026lt;flags\u0026gt; $ ./banyand-server-static liaison --etcd-endpoints=your-endpoints --etcd-username=your-username --etcd-password=your-password \u0026lt;flags\u0026gt; Transport security with HTTPS The etcd trusted certificate file can be setup by the etcd transport security model\n etcd-tls-ca-file: The path of the trusted certificate file.  $ ./banyand-server-static storage --etcd-endpoints=your-https-endpoints --etcd-tls-ca-file=youf-file-path \u0026lt;flags\u0026gt; $ ./banyand-server-static liaison --etcd-endpoints=your-https-endpoints --etcd-tls-ca-file=youf-file-path \u0026lt;flags\u0026gt; Authentication with HTTPS client certificates The etcd client certificates can be setup by the etcd transport security model\n etcd-tls-ca-file: The path of the trusted certificate file. etcd-tls-cert-file: Certificate used for SSL/TLS connections to etcd. When this option is set, advertise-client-urls can use the HTTPS schema. etcd-tls-key-file: Key for the certificate. Must be unencrypted.  $ ./banyand-server-static storage --etcd-endpoints=your-https-endpoints --etcd-tls-ca-file=youf-file-path --etcd-tls-cert-file=youf-file-path --etcd-tls-key-file=youf-file-path \u0026lt;flags\u0026gt; $ ./banyand-server-static liaison --etcd-endpoints=your-https-endpoints --etcd-tls-ca-file=youf-file-path --etcd-tls-cert-file=youf-file-path --etcd-tls-key-file=youf-file-path \u0026lt;flags\u0026gt; ","excerpt":"Cluster Installation Setup Meta Nodes Meta nodes are a etcd cluster which is required for the …","ref":"/docs/skywalking-banyandb/next/installation/cluster/","title":"Cluster Installation"},{"body":"Cluster Installation Setup Meta Nodes Meta nodes are a etcd cluster which is required for the metadata module to provide the metadata service and nodes discovery service for the whole cluster.\nThe etcd cluster can be setup by the etcd installation guide\nRole-base Banyand Cluster There is an example: The etcd cluster is spread across three nodes with the addresses 10.0.0.1:2379, 10.0.0.2:2379, and 10.0.0.3:2379.\nData nodes and liaison nodes are running as independent processes by\n$ ./banyand-server storage --etcd-endpoints=http://10.0.0.1:2379,http://10.0.0.2:2379,http://10.0.0.3:2379 \u0026lt;flags\u0026gt; $ ./banyand-server storage --etcd-endpoints=http://10.0.0.1:2379,http://10.0.0.2:2379,http://10.0.0.3:2379 \u0026lt;flags\u0026gt; $ ./banyand-server storage --etcd-endpoints=http://10.0.0.1:2379,http://10.0.0.2:2379,http://10.0.0.3:2379 \u0026lt;flags\u0026gt; $ ./banyand-server liaison --etcd-endpoints=http://10.0.0.1:2379,http://10.0.0.2:2379,http://10.0.0.3:2379 \u0026lt;flags\u0026gt; Node Discovery The node discovery is based on the etcd cluster. The etcd cluster is required for the metadata module to provide the metadata service and nodes discovery service for the whole cluster.\nThe host is registered to the etcd cluster by the banyand-server automatically based on node-host-provider :\n node-host-provider=hostname : Default. The OS\u0026rsquo;s hostname is registered as the host part in the address. node-host-provider=ip : The OS\u0026rsquo;s the first non-loopback active IP address(IPv4) is registered as the host part in the address. node-host-provider=flag : node-host is registered as the host part in the address.  ","excerpt":"Cluster Installation Setup Meta Nodes Meta nodes are a etcd cluster which is required for the …","ref":"/docs/skywalking-banyandb/v0.5.0/installation/cluster/","title":"Cluster Installation"},{"body":"Cluster Management In many production environments, the backend needs to support distributed aggregation, high throughput and provide high availability (HA) to maintain robustness, so you always need to setup CLUSTER management in product env. Otherwise, you would face metrics inaccurate.\ncore/gRPCHost is listening on 0.0.0.0 for quick start as the single mode for most cases. Besides the Kubernetes coordinator, which is using the cloud-native mode to establish cluster, all other coordinators requires core/gRPCHost updated to real IP addresses or take reference of internalComHost and internalComPort in each coordinator doc.\nNOTICE, cluster management doesn\u0026rsquo;t provide a service discovery mechanism for agents and probes. We recommend agents/probes using gateway to load balancer to access OAP clusters.\nThere are various ways to manage the cluster in the backend. Choose the one that best suits your needs.\n Kubernetes. When the backend clusters are deployed inside Kubernetes, you could make use of this method by using k8s native APIs to manage clusters. Zookeeper coordinator. Use Zookeeper to let the backend instances detect and communicate with each other. Consul. Use Consul as the backend cluster management implementor and coordinate backend instances. Etcd. Use Etcd to coordinate backend instances. Nacos. Use Nacos to coordinate backend instances.  In the application.yml file, there are default configurations for the aforementioned coordinators under the section cluster. You can specify any of them in the selector property to enable it.\nCloud Native Kubernetes The required backend clusters are deployed inside Kubernetes. See the guides in Deploy in kubernetes. Set the selector to kubernetes.\ncluster:selector:${SW_CLUSTER:kubernetes}# other configurationsMeanwhile, the OAP cluster requires the pod\u0026rsquo;s UID which is laid at metadata.uid as the value of the system environment variable SKYWALKING_COLLECTOR_UID\ncontainers:# Original configurations of OAP container- name:{{.Values.oap.name }}image:{{.Values.oap.image.repository }}:{{ required \u0026#34;oap.image.tag is required\u0026#34; .Values.oap.image.tag }}# ...# ...env:# Add metadata.uid as the system environment variable, SKYWALKING_COLLECTOR_UID - name:SKYWALKING_COLLECTOR_UIDvalueFrom:fieldRef:fieldPath:metadata.uidRead the complete helm for more details.\nTraditional Coordinator NOTICE In all the following coordinators, oap.internal.comm.host:oap.internal.comm.port is registered as the ID and address for the current OAP node. By default, because they are same in all OAP nodes, the registrations are conflicted, and (may) show as one registered node, which actually would be the node itself. In this case, the cluster mode is NOT working.\nPlease check the registered nodes on your coordinator servers, to make the registration information unique for every node. You could have two options\n Change core/gRPCHost(oap.internal.comm.host) and core/gRPCPort(oap.internal.comm.port) for internal, and setup external communication channels for data reporting and query. Use internalComHost and internalComPort in the config to provide a unique host and port for every OAP node. This host name port should be accessible for other OAP nodes.  Zookeeper coordinator Zookeeper is a very common and widely used cluster coordinator. Set the cluster/selector to zookeeper in the yml to enable it.\nRequired Zookeeper version: 3.5+\ncluster:selector:${SW_CLUSTER:zookeeper}# other configurations hostPort is the list of zookeeper servers. Format is IP1:PORT1,IP2:PORT2,...,IPn:PORTn enableACL enable Zookeeper ACL to control access to its znode. schema is Zookeeper ACL schemas. expression is a expression of ACL. The format of the expression is specific to the schema. hostPort, baseSleepTimeMs and maxRetries are settings of Zookeeper curator client.  Note:\n If Zookeeper ACL is enabled and /skywalking exists, you must ensure that SkyWalking has CREATE, READ and WRITE permissions. If /skywalking does not exist, it will be created by SkyWalking, and all permissions to the specified user will be granted. Simultaneously, znode grants READ permission to anyone. If you set schema as digest, the password of the expression is set in clear text.  In some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes, such as the default host(0.0.0.0) should not be used in cluster mode. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The exposed host name for other OAP nodes in the cluster internal communication. internalComPort: the exposed port for other OAP nodes in the cluster internal communication.  zookeeper:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}hostPort:${SW_CLUSTER_ZK_HOST_PORT:localhost:2181}#Retry PolicybaseSleepTimeMs:${SW_CLUSTER_ZK_SLEEP_TIME:1000}# initial amount of time to wait between retriesmaxRetries:${SW_CLUSTER_ZK_MAX_RETRIES:3}# max number of times to retryinternalComHost:${SW_CLUSTER_INTERNAL_COM_HOST:172.10.4.10}internalComPort:${SW_CLUSTER_INTERNAL_COM_PORT:11800}# Enable ACLenableACL:${SW_ZK_ENABLE_ACL:false}# disable ACL in defaultschema:${SW_ZK_SCHEMA:digest}# only support digest schemaexpression:${SW_ZK_EXPRESSION:skywalking:skywalking}Consul Recently, the Consul system has become more and more popular, and many companies and developers now use Consul as their service discovery solution. Set the cluster/selector to consul in the yml to enable it.\ncluster:selector:${SW_CLUSTER:consul}# other configurationsSame as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes, such as the default host(0.0.0.0) should not be used in cluster mode. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The exposed host name for other OAP nodes in the cluster internal communication. internalComPort: the exposed port for other OAP nodes in the cluster internal communication.  Etcd Set the cluster/selector to etcd in the yml to enable it. The Etcd client has upgraded to v3 protocol and changed to the CoreOS official library. Since 8.7.0, only the v3 protocol is supported for Etcd.\ncluster:selector:${SW_CLUSTER:etcd}# other configurationsetcd:# etcd cluster nodes, example: 10.0.0.1:2379,10.0.0.2:2379,10.0.0.3:2379endpoints:${SW_CLUSTER_ETCD_ENDPOINTS:localhost:2379}namespace:${SW_CLUSTER_ETCD_NAMESPACE:/skywalking}serviceName:${SW_CLUSTER_ETCD_SERVICE_NAME:\u0026#34;SkyWalking_OAP_Cluster\u0026#34;}authentication:${SW_CLUSTER_ETCD_AUTHENTICATION:false}user:${SW_CLUSTER_ETCD_USER:}password:${SW_CLUSTER_ETCD_PASSWORD:}Same as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes, such as the default host(0.0.0.0) should not be used in cluster mode. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The exposed host name for other OAP nodes in the cluster internal communication. internalComPort: the exposed port for other OAP nodes in the cluster internal communication.  Nacos Set the cluster/selector to nacos in the yml to enable it.\ncluster:selector:${SW_CLUSTER:nacos}# other configurationsNacos supports authentication by username or accessKey. Empty means that there is no need for authentication. Extra config is as follows:\nnacos:username:password:accessKey:secretKey:Same as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes, such as the default host(0.0.0.0) should not be used in cluster mode. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The exposed host name for other OAP nodes in the cluster internal communication. internalComPort: the exposed port for other OAP nodes in the cluster internal communication.  ","excerpt":"Cluster Management In many production environments, the backend needs to support distributed …","ref":"/docs/main/latest/en/setup/backend/backend-cluster/","title":"Cluster Management"},{"body":"Cluster Management In many production environments, the backend needs to support distributed aggregation, high throughput and provide high availability (HA) to maintain robustness, so you always need to setup CLUSTER management in product env. Otherwise, you would face metrics inaccurate.\ncore/gRPCHost is listening on 0.0.0.0 for quick start as the single mode for most cases. Besides the Kubernetes coordinator, which is using the cloud-native mode to establish cluster, all other coordinators requires core/gRPCHost updated to real IP addresses or take reference of internalComHost and internalComPort in each coordinator doc.\nNOTICE, cluster management doesn\u0026rsquo;t provide a service discovery mechanism for agents and probes. We recommend agents/probes using gateway to load balancer to access OAP clusters.\nThere are various ways to manage the cluster in the backend. Choose the one that best suits your needs.\n Kubernetes. When the backend clusters are deployed inside Kubernetes, you could make use of this method by using k8s native APIs to manage clusters. Zookeeper coordinator. Use Zookeeper to let the backend instances detect and communicate with each other. Consul. Use Consul as the backend cluster management implementor and coordinate backend instances. Etcd. Use Etcd to coordinate backend instances. Nacos. Use Nacos to coordinate backend instances.  In the application.yml file, there are default configurations for the aforementioned coordinators under the section cluster. You can specify any of them in the selector property to enable it.\nCloud Native Kubernetes The required backend clusters are deployed inside Kubernetes. See the guides in Deploy in kubernetes. Set the selector to kubernetes.\ncluster:selector:${SW_CLUSTER:kubernetes}# other configurationsMeanwhile, the OAP cluster requires the pod\u0026rsquo;s UID which is laid at metadata.uid as the value of the system environment variable SKYWALKING_COLLECTOR_UID\ncontainers:# Original configurations of OAP container- name:{{.Values.oap.name }}image:{{.Values.oap.image.repository }}:{{ required \u0026#34;oap.image.tag is required\u0026#34; .Values.oap.image.tag }}# ...# ...env:# Add metadata.uid as the system environment variable, SKYWALKING_COLLECTOR_UID - name:SKYWALKING_COLLECTOR_UIDvalueFrom:fieldRef:fieldPath:metadata.uidRead the complete helm for more details.\nTraditional Coordinator NOTICE In all the following coordinators, oap.internal.comm.host:oap.internal.comm.port is registered as the ID and address for the current OAP node. By default, because they are same in all OAP nodes, the registrations are conflicted, and (may) show as one registered node, which actually would be the node itself. In this case, the cluster mode is NOT working.\nPlease check the registered nodes on your coordinator servers, to make the registration information unique for every node. You could have two options\n Change core/gRPCHost(oap.internal.comm.host) and core/gRPCPort(oap.internal.comm.port) for internal, and setup external communication channels for data reporting and query. Use internalComHost and internalComPort in the config to provide a unique host and port for every OAP node. This host name port should be accessible for other OAP nodes.  Zookeeper coordinator Zookeeper is a very common and widely used cluster coordinator. Set the cluster/selector to zookeeper in the yml to enable it.\nRequired Zookeeper version: 3.5+\ncluster:selector:${SW_CLUSTER:zookeeper}# other configurations hostPort is the list of zookeeper servers. Format is IP1:PORT1,IP2:PORT2,...,IPn:PORTn enableACL enable Zookeeper ACL to control access to its znode. schema is Zookeeper ACL schemas. expression is a expression of ACL. The format of the expression is specific to the schema. hostPort, baseSleepTimeMs and maxRetries are settings of Zookeeper curator client.  Note:\n If Zookeeper ACL is enabled and /skywalking exists, you must ensure that SkyWalking has CREATE, READ and WRITE permissions. If /skywalking does not exist, it will be created by SkyWalking, and all permissions to the specified user will be granted. Simultaneously, znode grants READ permission to anyone. If you set schema as digest, the password of the expression is set in clear text.  In some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes, such as the default host(0.0.0.0) should not be used in cluster mode. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The exposed host name for other OAP nodes in the cluster internal communication. internalComPort: the exposed port for other OAP nodes in the cluster internal communication.  cluster:selector:${SW_CLUSTER:zookeeper}...zookeeper:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}hostPort:${SW_CLUSTER_ZK_HOST_PORT:localhost:2181}#Retry PolicybaseSleepTimeMs:${SW_CLUSTER_ZK_SLEEP_TIME:1000}# initial amount of time to wait between retriesmaxRetries:${SW_CLUSTER_ZK_MAX_RETRIES:3}# max number of times to retryinternalComHost:${SW_CLUSTER_INTERNAL_COM_HOST:172.10.4.10}internalComPort:${SW_CLUSTER_INTERNAL_COM_PORT:11800}# Enable ACLenableACL:${SW_ZK_ENABLE_ACL:false}# disable ACL in defaultschema:${SW_ZK_SCHEMA:digest}# only support digest schemaexpression:${SW_ZK_EXPRESSION:skywalking:skywalking}Consul Recently, the Consul system has become more and more popular, and many companies and developers now use Consul as their service discovery solution. Set the cluster/selector to consul in the yml to enable it.\ncluster:selector:${SW_CLUSTER:consul}...consul:serviceName:${SW_SERVICE_NAME:\u0026#34;SkyWalking_OAP_Cluster\u0026#34;}# Consul cluster nodes, example: 10.0.0.1:8500,10.0.0.2:8500,10.0.0.3:8500hostPort:${SW_CLUSTER_CONSUL_HOST_PORT:localhost:8500}aclToken:${SW_CLUSTER_CONSUL_ACLTOKEN:\u0026#34;\u0026#34;}internalComHost:${SW_CLUSTER_INTERNAL_COM_HOST:\u0026#34;\u0026#34;}internalComPort:${SW_CLUSTER_INTERNAL_COM_PORT:-1}Same as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes, such as the default host(0.0.0.0) should not be used in cluster mode. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The exposed host name for other OAP nodes in the cluster internal communication. internalComPort: the exposed port for other OAP nodes in the cluster internal communication.  Etcd Set the cluster/selector to etcd in the yml to enable it. The Etcd client has upgraded to v3 protocol and changed to the CoreOS official library. Since 8.7.0, only the v3 protocol is supported for Etcd.\ncluster:selector:${SW_CLUSTER:etcd}# other configurationsetcd:# etcd cluster nodes, example: 10.0.0.1:2379,10.0.0.2:2379,10.0.0.3:2379endpoints:${SW_CLUSTER_ETCD_ENDPOINTS:localhost:2379}namespace:${SW_CLUSTER_ETCD_NAMESPACE:/skywalking}serviceName:${SW_CLUSTER_ETCD_SERVICE_NAME:\u0026#34;SkyWalking_OAP_Cluster\u0026#34;}authentication:${SW_CLUSTER_ETCD_AUTHENTICATION:false}user:${SW_CLUSTER_ETCD_USER:}password:${SW_CLUSTER_ETCD_PASSWORD:}Same as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes, such as the default host(0.0.0.0) should not be used in cluster mode. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The exposed host name for other OAP nodes in the cluster internal communication. internalComPort: the exposed port for other OAP nodes in the cluster internal communication.  Nacos Set the cluster/selector to nacos in the yml to enable it.\ncluster:selector:${SW_CLUSTER:nacos}# other configurationsNacos supports authentication by username or accessKey. Empty means that there is no need for authentication. Extra config is as follows:\nnacos:username:password:accessKey:secretKey:Same as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes, such as the default host(0.0.0.0) should not be used in cluster mode. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The exposed host name for other OAP nodes in the cluster internal communication. internalComPort: the exposed port for other OAP nodes in the cluster internal communication.  ","excerpt":"Cluster Management In many production environments, the backend needs to support distributed …","ref":"/docs/main/next/en/setup/backend/backend-cluster/","title":"Cluster Management"},{"body":"Cluster Management In many product environments, the backend needs to support high throughput and provide HA to maintain robustness, so you always need cluster management in product env.\nNOTICE, cluster management doesn\u0026rsquo;t provide service discovery mechanism for agents and probes. We recommend agents/probes using gateway to load balancer to access OAP clusters.\nThe core feature of cluster management is supporting the whole OAP cluster running distributed aggregation and analysis for telemetry data.\nThere are various ways to manage the cluster in the backend. Choose the one that best suits your needs.\n Zookeeper coordinator. Use Zookeeper to let the backend instances detect and communicate with each other. Kubernetes. When the backend clusters are deployed inside Kubernetes, you could make use of this method by using k8s native APIs to manage clusters. Consul. Use Consul as the backend cluster management implementor and coordinate backend instances. Etcd. Use Etcd to coordinate backend instances. Nacos. Use Nacos to coordinate backend instances. In the application.yml file, there are default configurations for the aforementioned coordinators under the section cluster. You can specify any of them in the selector property to enable it.  Zookeeper coordinator Zookeeper is a very common and widely used cluster coordinator. Set the cluster/selector to zookeeper in the yml to enable it.\nRequired Zookeeper version: 3.5+\ncluster:selector:${SW_CLUSTER:zookeeper}# other configurations hostPort is the list of zookeeper servers. Format is IP1:PORT1,IP2:PORT2,...,IPn:PORTn enableACL enable Zookeeper ACL to control access to its znode. schema is Zookeeper ACL schemas. expression is a expression of ACL. The format of the expression is specific to the schema. hostPort, baseSleepTimeMs and maxRetries are settings of Zookeeper curator client.  Note:\n If Zookeeper ACL is enabled and /skywalking exists, you must make sure that SkyWalking has CREATE, READ and WRITE permissions. If /skywalking does not exist, it will be created by SkyWalking and all permissions to the specified user will be granted. Simultaneously, znode grants the READ permission to anyone. If you set schema as digest, the password of the expression is set in clear text.  In some cases, the OAP default gRPC host and port in core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: the registered port and other OAP nodes use this to communicate with the current node.  zookeeper:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}hostPort:${SW_CLUSTER_ZK_HOST_PORT:localhost:2181}#Retry PolicybaseSleepTimeMs:${SW_CLUSTER_ZK_SLEEP_TIME:1000}# initial amount of time to wait between retriesmaxRetries:${SW_CLUSTER_ZK_MAX_RETRIES:3}# max number of times to retryinternalComHost:${SW_CLUSTER_INTERNAL_COM_HOST:172.10.4.10}internalComPort:${SW_CLUSTER_INTERNAL_COM_PORT:11800}# Enable ACLenableACL:${SW_ZK_ENABLE_ACL:false}# disable ACL in defaultschema:${SW_ZK_SCHEMA:digest}# only support digest schemaexpression:${SW_ZK_EXPRESSION:skywalking:skywalking}Kubernetes The require backend clusters are deployed inside Kubernetes. See the guides in Deploy in kubernetes. Set the selector to kubernetes.\ncluster:selector:${SW_CLUSTER:kubernetes}# other configurationsConsul Recently, the Consul system has become more and more popular, and many companies and developers now use Consul as their service discovery solution. Set the cluster/selector to consul in the yml to enable it.\ncluster:selector:${SW_CLUSTER:consul}# other configurationsSame as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registed host and other OAP nodes use this to communicate with the current node. internalComPort: The registered port and other OAP nodes use this to communicate with the current node.  Etcd Set the cluster/selector to etcd in the yml to enable it. The Etcd client has upgraded to v3 protocol and changed to the CoreOS official library. Since 8.7.0, only the v3 protocol is supported for Etcd.\ncluster:selector:${SW_CLUSTER:etcd}# other configurationsetcd:# etcd cluster nodes, example: 10.0.0.1:2379,10.0.0.2:2379,10.0.0.3:2379endpoints:${SW_CLUSTER_ETCD_ENDPOINTS:localhost:2379}namespace:${SW_CLUSTER_ETCD_NAMESPACE:/skywalking}serviceName:${SW_CLUSTER_ETCD_SERVICE_NAME:\u0026#34;SkyWalking_OAP_Cluster\u0026#34;}authentication:${SW_CLUSTER_ETCD_AUTHENTICATION:false}user:${SW_CLUSTER_ETCD_USER:}password:${SW_CLUSTER_ETCD_PASSWORD:}Same as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in core are not suitable for internal communication among the oap nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: The registered port and other OAP nodes use this to communicate with the current node.  Nacos Set the cluster/selector to nacos in the yml to enable it.\ncluster:selector:${SW_CLUSTER:nacos}# other configurationsNacos supports authentication by username or accessKey. Empty means that there is no need for authentication. Extra config is as follows:\nnacos:username:password:accessKey:secretKey:Same as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: The registered port and other OAP nodes use this to communicate with the current node.  ","excerpt":"Cluster Management In many product environments, the backend needs to support high throughput and …","ref":"/docs/main/v9.0.0/en/setup/backend/backend-cluster/","title":"Cluster Management"},{"body":"Cluster Management In many production environments, the backend needs to support high throughput and provide high availability (HA) to maintain robustness, so you always need cluster management in product env.\nNOTICE, cluster management doesn\u0026rsquo;t provide a service discovery mechanism for agents and probes. We recommend agents/probes using gateway to load balancer to access OAP clusters.\nThe core feature of cluster management is supporting the whole OAP cluster running distributed aggregation and analysis for telemetry data.\nThere are various ways to manage the cluster in the backend. Choose the one that best suits your needs.\n Zookeeper coordinator. Use Zookeeper to let the backend instances detect and communicate with each other. Kubernetes. When the backend clusters are deployed inside Kubernetes, you could make use of this method by using k8s native APIs to manage clusters. Consul. Use Consul as the backend cluster management implementor and coordinate backend instances. Etcd. Use Etcd to coordinate backend instances. Nacos. Use Nacos to coordinate backend instances. In the application.yml file, there are default configurations for the aforementioned coordinators under the section cluster. You can specify any of them in the selector property to enable it.  Zookeeper coordinator Zookeeper is a very common and widely used cluster coordinator. Set the cluster/selector to zookeeper in the yml to enable it.\nRequired Zookeeper version: 3.5+\ncluster:selector:${SW_CLUSTER:zookeeper}# other configurations hostPort is the list of zookeeper servers. Format is IP1:PORT1,IP2:PORT2,...,IPn:PORTn enableACL enable Zookeeper ACL to control access to its znode. schema is Zookeeper ACL schemas. expression is a expression of ACL. The format of the expression is specific to the schema. hostPort, baseSleepTimeMs and maxRetries are settings of Zookeeper curator client.  Note:\n If Zookeeper ACL is enabled and /skywalking exists, you must ensure that SkyWalking has CREATE, READ and WRITE permissions. If /skywalking does not exist, it will be created by SkyWalking, and all permissions to the specified user will be granted. Simultaneously, znode grants READ permission to anyone. If you set schema as digest, the password of the expression is set in clear text.  In some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: the registered port and other OAP nodes use this to communicate with the current node.  zookeeper:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}hostPort:${SW_CLUSTER_ZK_HOST_PORT:localhost:2181}#Retry PolicybaseSleepTimeMs:${SW_CLUSTER_ZK_SLEEP_TIME:1000}# initial amount of time to wait between retriesmaxRetries:${SW_CLUSTER_ZK_MAX_RETRIES:3}# max number of times to retryinternalComHost:${SW_CLUSTER_INTERNAL_COM_HOST:172.10.4.10}internalComPort:${SW_CLUSTER_INTERNAL_COM_PORT:11800}# Enable ACLenableACL:${SW_ZK_ENABLE_ACL:false}# disable ACL in defaultschema:${SW_ZK_SCHEMA:digest}# only support digest schemaexpression:${SW_ZK_EXPRESSION:skywalking:skywalking}Kubernetes The required backend clusters are deployed inside Kubernetes. See the guides in Deploy in kubernetes. Set the selector to kubernetes.\ncluster:selector:${SW_CLUSTER:kubernetes}# other configurationsConsul Recently, the Consul system has become more and more popular, and many companies and developers now use Consul as their service discovery solution. Set the cluster/selector to consul in the yml to enable it.\ncluster:selector:${SW_CLUSTER:consul}# other configurationsSame as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: The registered port and other OAP nodes use this to communicate with the current node.  Etcd Set the cluster/selector to etcd in the yml to enable it. The Etcd client has upgraded to v3 protocol and changed to the CoreOS official library. Since 8.7.0, only the v3 protocol is supported for Etcd.\ncluster:selector:${SW_CLUSTER:etcd}# other configurationsetcd:# etcd cluster nodes, example: 10.0.0.1:2379,10.0.0.2:2379,10.0.0.3:2379endpoints:${SW_CLUSTER_ETCD_ENDPOINTS:localhost:2379}namespace:${SW_CLUSTER_ETCD_NAMESPACE:/skywalking}serviceName:${SW_CLUSTER_ETCD_SERVICE_NAME:\u0026#34;SkyWalking_OAP_Cluster\u0026#34;}authentication:${SW_CLUSTER_ETCD_AUTHENTICATION:false}user:${SW_CLUSTER_ETCD_USER:}password:${SW_CLUSTER_ETCD_PASSWORD:}Same as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: The registered port and other OAP nodes use this to communicate with the current node.  Nacos Set the cluster/selector to nacos in the yml to enable it.\ncluster:selector:${SW_CLUSTER:nacos}# other configurationsNacos supports authentication by username or accessKey. Empty means that there is no need for authentication. Extra config is as follows:\nnacos:username:password:accessKey:secretKey:Same as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: The registered port and other OAP nodes use this to communicate with the current node.  ","excerpt":"Cluster Management In many production environments, the backend needs to support high throughput and …","ref":"/docs/main/v9.1.0/en/setup/backend/backend-cluster/","title":"Cluster Management"},{"body":"Cluster Management In many production environments, the backend needs to support high throughput and provide high availability (HA) to maintain robustness, so you always need cluster management in product env.\nNOTICE, cluster management doesn\u0026rsquo;t provide a service discovery mechanism for agents and probes. We recommend agents/probes using gateway to load balancer to access OAP clusters.\nThe core feature of cluster management is supporting the whole OAP cluster running distributed aggregation and analysis for telemetry data.\nThere are various ways to manage the cluster in the backend. Choose the one that best suits your needs.\n Zookeeper coordinator. Use Zookeeper to let the backend instances detect and communicate with each other. Kubernetes. When the backend clusters are deployed inside Kubernetes, you could make use of this method by using k8s native APIs to manage clusters. Consul. Use Consul as the backend cluster management implementor and coordinate backend instances. Etcd. Use Etcd to coordinate backend instances. Nacos. Use Nacos to coordinate backend instances. In the application.yml file, there are default configurations for the aforementioned coordinators under the section cluster. You can specify any of them in the selector property to enable it.  Zookeeper coordinator Zookeeper is a very common and widely used cluster coordinator. Set the cluster/selector to zookeeper in the yml to enable it.\nRequired Zookeeper version: 3.5+\ncluster:selector:${SW_CLUSTER:zookeeper}# other configurations hostPort is the list of zookeeper servers. Format is IP1:PORT1,IP2:PORT2,...,IPn:PORTn enableACL enable Zookeeper ACL to control access to its znode. schema is Zookeeper ACL schemas. expression is a expression of ACL. The format of the expression is specific to the schema. hostPort, baseSleepTimeMs and maxRetries are settings of Zookeeper curator client.  Note:\n If Zookeeper ACL is enabled and /skywalking exists, you must ensure that SkyWalking has CREATE, READ and WRITE permissions. If /skywalking does not exist, it will be created by SkyWalking, and all permissions to the specified user will be granted. Simultaneously, znode grants READ permission to anyone. If you set schema as digest, the password of the expression is set in clear text.  In some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: the registered port and other OAP nodes use this to communicate with the current node.  zookeeper:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}hostPort:${SW_CLUSTER_ZK_HOST_PORT:localhost:2181}#Retry PolicybaseSleepTimeMs:${SW_CLUSTER_ZK_SLEEP_TIME:1000}# initial amount of time to wait between retriesmaxRetries:${SW_CLUSTER_ZK_MAX_RETRIES:3}# max number of times to retryinternalComHost:${SW_CLUSTER_INTERNAL_COM_HOST:172.10.4.10}internalComPort:${SW_CLUSTER_INTERNAL_COM_PORT:11800}# Enable ACLenableACL:${SW_ZK_ENABLE_ACL:false}# disable ACL in defaultschema:${SW_ZK_SCHEMA:digest}# only support digest schemaexpression:${SW_ZK_EXPRESSION:skywalking:skywalking}Kubernetes The required backend clusters are deployed inside Kubernetes. See the guides in Deploy in kubernetes. Set the selector to kubernetes.\ncluster:selector:${SW_CLUSTER:kubernetes}# other configurationsConsul Recently, the Consul system has become more and more popular, and many companies and developers now use Consul as their service discovery solution. Set the cluster/selector to consul in the yml to enable it.\ncluster:selector:${SW_CLUSTER:consul}# other configurationsSame as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: The registered port and other OAP nodes use this to communicate with the current node.  Etcd Set the cluster/selector to etcd in the yml to enable it. The Etcd client has upgraded to v3 protocol and changed to the CoreOS official library. Since 8.7.0, only the v3 protocol is supported for Etcd.\ncluster:selector:${SW_CLUSTER:etcd}# other configurationsetcd:# etcd cluster nodes, example: 10.0.0.1:2379,10.0.0.2:2379,10.0.0.3:2379endpoints:${SW_CLUSTER_ETCD_ENDPOINTS:localhost:2379}namespace:${SW_CLUSTER_ETCD_NAMESPACE:/skywalking}serviceName:${SW_CLUSTER_ETCD_SERVICE_NAME:\u0026#34;SkyWalking_OAP_Cluster\u0026#34;}authentication:${SW_CLUSTER_ETCD_AUTHENTICATION:false}user:${SW_CLUSTER_ETCD_USER:}password:${SW_CLUSTER_ETCD_PASSWORD:}Same as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: The registered port and other OAP nodes use this to communicate with the current node.  Nacos Set the cluster/selector to nacos in the yml to enable it.\ncluster:selector:${SW_CLUSTER:nacos}# other configurationsNacos supports authentication by username or accessKey. Empty means that there is no need for authentication. Extra config is as follows:\nnacos:username:password:accessKey:secretKey:Same as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: The registered port and other OAP nodes use this to communicate with the current node.  ","excerpt":"Cluster Management In many production environments, the backend needs to support high throughput and …","ref":"/docs/main/v9.2.0/en/setup/backend/backend-cluster/","title":"Cluster Management"},{"body":"Cluster Management In many production environments, the backend needs to support high throughput and provide high availability (HA) to maintain robustness, so you always need cluster management in product env.\nNOTICE, cluster management doesn\u0026rsquo;t provide a service discovery mechanism for agents and probes. We recommend agents/probes using gateway to load balancer to access OAP clusters.\nThe core feature of cluster management is supporting the whole OAP cluster running distributed aggregation and analysis for telemetry data.\nThere are various ways to manage the cluster in the backend. Choose the one that best suits your needs.\n Zookeeper coordinator. Use Zookeeper to let the backend instances detect and communicate with each other. Kubernetes. When the backend clusters are deployed inside Kubernetes, you could make use of this method by using k8s native APIs to manage clusters. Consul. Use Consul as the backend cluster management implementor and coordinate backend instances. Etcd. Use Etcd to coordinate backend instances. Nacos. Use Nacos to coordinate backend instances. In the application.yml file, there are default configurations for the aforementioned coordinators under the section cluster. You can specify any of them in the selector property to enable it.  Zookeeper coordinator Zookeeper is a very common and widely used cluster coordinator. Set the cluster/selector to zookeeper in the yml to enable it.\nRequired Zookeeper version: 3.5+\ncluster:selector:${SW_CLUSTER:zookeeper}# other configurations hostPort is the list of zookeeper servers. Format is IP1:PORT1,IP2:PORT2,...,IPn:PORTn enableACL enable Zookeeper ACL to control access to its znode. schema is Zookeeper ACL schemas. expression is a expression of ACL. The format of the expression is specific to the schema. hostPort, baseSleepTimeMs and maxRetries are settings of Zookeeper curator client.  Note:\n If Zookeeper ACL is enabled and /skywalking exists, you must ensure that SkyWalking has CREATE, READ and WRITE permissions. If /skywalking does not exist, it will be created by SkyWalking, and all permissions to the specified user will be granted. Simultaneously, znode grants READ permission to anyone. If you set schema as digest, the password of the expression is set in clear text.  In some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: the registered port and other OAP nodes use this to communicate with the current node.  zookeeper:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}hostPort:${SW_CLUSTER_ZK_HOST_PORT:localhost:2181}#Retry PolicybaseSleepTimeMs:${SW_CLUSTER_ZK_SLEEP_TIME:1000}# initial amount of time to wait between retriesmaxRetries:${SW_CLUSTER_ZK_MAX_RETRIES:3}# max number of times to retryinternalComHost:${SW_CLUSTER_INTERNAL_COM_HOST:172.10.4.10}internalComPort:${SW_CLUSTER_INTERNAL_COM_PORT:11800}# Enable ACLenableACL:${SW_ZK_ENABLE_ACL:false}# disable ACL in defaultschema:${SW_ZK_SCHEMA:digest}# only support digest schemaexpression:${SW_ZK_EXPRESSION:skywalking:skywalking}Kubernetes The required backend clusters are deployed inside Kubernetes. See the guides in Deploy in kubernetes. Set the selector to kubernetes.\ncluster:selector:${SW_CLUSTER:kubernetes}# other configurationsMeanwhile, the OAP cluster requires the pod\u0026rsquo;s UID which is laid at metadata.uid as the value of the system environment variable SKYWALKING_COLLECTOR_UID\ncontainers:# Original configurations of OAP container- name:{{.Values.oap.name }}image:{{.Values.oap.image.repository }}:{{ required \u0026#34;oap.image.tag is required\u0026#34; .Values.oap.image.tag }}# ...# ...env:# Add metadata.uid as the system environment variable, SKYWALKING_COLLECTOR_UID - name:SKYWALKING_COLLECTOR_UIDvalueFrom:fieldRef:fieldPath:metadata.uidRead the complete helm for more details.\nConsul Recently, the Consul system has become more and more popular, and many companies and developers now use Consul as their service discovery solution. Set the cluster/selector to consul in the yml to enable it.\ncluster:selector:${SW_CLUSTER:consul}# other configurationsSame as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: The registered port and other OAP nodes use this to communicate with the current node.  Etcd Set the cluster/selector to etcd in the yml to enable it. The Etcd client has upgraded to v3 protocol and changed to the CoreOS official library. Since 8.7.0, only the v3 protocol is supported for Etcd.\ncluster:selector:${SW_CLUSTER:etcd}# other configurationsetcd:# etcd cluster nodes, example: 10.0.0.1:2379,10.0.0.2:2379,10.0.0.3:2379endpoints:${SW_CLUSTER_ETCD_ENDPOINTS:localhost:2379}namespace:${SW_CLUSTER_ETCD_NAMESPACE:/skywalking}serviceName:${SW_CLUSTER_ETCD_SERVICE_NAME:\u0026#34;SkyWalking_OAP_Cluster\u0026#34;}authentication:${SW_CLUSTER_ETCD_AUTHENTICATION:false}user:${SW_CLUSTER_ETCD_USER:}password:${SW_CLUSTER_ETCD_PASSWORD:}Same as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: The registered port and other OAP nodes use this to communicate with the current node.  Nacos Set the cluster/selector to nacos in the yml to enable it.\ncluster:selector:${SW_CLUSTER:nacos}# other configurationsNacos supports authentication by username or accessKey. Empty means that there is no need for authentication. Extra config is as follows:\nnacos:username:password:accessKey:secretKey:Same as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: The registered port and other OAP nodes use this to communicate with the current node.  ","excerpt":"Cluster Management In many production environments, the backend needs to support high throughput and …","ref":"/docs/main/v9.3.0/en/setup/backend/backend-cluster/","title":"Cluster Management"},{"body":"Cluster Management In many production environments, the backend needs to support high throughput and provide high availability (HA) to maintain robustness, so you always need cluster management in product env.\nNOTICE, cluster management doesn\u0026rsquo;t provide a service discovery mechanism for agents and probes. We recommend agents/probes using gateway to load balancer to access OAP clusters.\nThe core feature of cluster management is supporting the whole OAP cluster running distributed aggregation and analysis for telemetry data.\nThere are various ways to manage the cluster in the backend. Choose the one that best suits your needs.\n Zookeeper coordinator. Use Zookeeper to let the backend instances detect and communicate with each other. Kubernetes. When the backend clusters are deployed inside Kubernetes, you could make use of this method by using k8s native APIs to manage clusters. Consul. Use Consul as the backend cluster management implementor and coordinate backend instances. Etcd. Use Etcd to coordinate backend instances. Nacos. Use Nacos to coordinate backend instances. In the application.yml file, there are default configurations for the aforementioned coordinators under the section cluster. You can specify any of them in the selector property to enable it.  Zookeeper coordinator Zookeeper is a very common and widely used cluster coordinator. Set the cluster/selector to zookeeper in the yml to enable it.\nRequired Zookeeper version: 3.5+\ncluster:selector:${SW_CLUSTER:zookeeper}# other configurations hostPort is the list of zookeeper servers. Format is IP1:PORT1,IP2:PORT2,...,IPn:PORTn enableACL enable Zookeeper ACL to control access to its znode. schema is Zookeeper ACL schemas. expression is a expression of ACL. The format of the expression is specific to the schema. hostPort, baseSleepTimeMs and maxRetries are settings of Zookeeper curator client.  Note:\n If Zookeeper ACL is enabled and /skywalking exists, you must ensure that SkyWalking has CREATE, READ and WRITE permissions. If /skywalking does not exist, it will be created by SkyWalking, and all permissions to the specified user will be granted. Simultaneously, znode grants READ permission to anyone. If you set schema as digest, the password of the expression is set in clear text.  In some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: the registered port and other OAP nodes use this to communicate with the current node.  zookeeper:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}hostPort:${SW_CLUSTER_ZK_HOST_PORT:localhost:2181}#Retry PolicybaseSleepTimeMs:${SW_CLUSTER_ZK_SLEEP_TIME:1000}# initial amount of time to wait between retriesmaxRetries:${SW_CLUSTER_ZK_MAX_RETRIES:3}# max number of times to retryinternalComHost:${SW_CLUSTER_INTERNAL_COM_HOST:172.10.4.10}internalComPort:${SW_CLUSTER_INTERNAL_COM_PORT:11800}# Enable ACLenableACL:${SW_ZK_ENABLE_ACL:false}# disable ACL in defaultschema:${SW_ZK_SCHEMA:digest}# only support digest schemaexpression:${SW_ZK_EXPRESSION:skywalking:skywalking}Kubernetes The required backend clusters are deployed inside Kubernetes. See the guides in Deploy in kubernetes. Set the selector to kubernetes.\ncluster:selector:${SW_CLUSTER:kubernetes}# other configurationsMeanwhile, the OAP cluster requires the pod\u0026rsquo;s UID which is laid at metadata.uid as the value of the system environment variable SKYWALKING_COLLECTOR_UID\ncontainers:# Original configurations of OAP container- name:{{.Values.oap.name }}image:{{.Values.oap.image.repository }}:{{ required \u0026#34;oap.image.tag is required\u0026#34; .Values.oap.image.tag }}# ...# ...env:# Add metadata.uid as the system environment variable, SKYWALKING_COLLECTOR_UID - name:SKYWALKING_COLLECTOR_UIDvalueFrom:fieldRef:fieldPath:metadata.uidRead the complete helm for more details.\nConsul Recently, the Consul system has become more and more popular, and many companies and developers now use Consul as their service discovery solution. Set the cluster/selector to consul in the yml to enable it.\ncluster:selector:${SW_CLUSTER:consul}# other configurationsSame as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: The registered port and other OAP nodes use this to communicate with the current node.  Etcd Set the cluster/selector to etcd in the yml to enable it. The Etcd client has upgraded to v3 protocol and changed to the CoreOS official library. Since 8.7.0, only the v3 protocol is supported for Etcd.\ncluster:selector:${SW_CLUSTER:etcd}# other configurationsetcd:# etcd cluster nodes, example: 10.0.0.1:2379,10.0.0.2:2379,10.0.0.3:2379endpoints:${SW_CLUSTER_ETCD_ENDPOINTS:localhost:2379}namespace:${SW_CLUSTER_ETCD_NAMESPACE:/skywalking}serviceName:${SW_CLUSTER_ETCD_SERVICE_NAME:\u0026#34;SkyWalking_OAP_Cluster\u0026#34;}authentication:${SW_CLUSTER_ETCD_AUTHENTICATION:false}user:${SW_CLUSTER_ETCD_USER:}password:${SW_CLUSTER_ETCD_PASSWORD:}Same as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: The registered port and other OAP nodes use this to communicate with the current node.  Nacos Set the cluster/selector to nacos in the yml to enable it.\ncluster:selector:${SW_CLUSTER:nacos}# other configurationsNacos supports authentication by username or accessKey. Empty means that there is no need for authentication. Extra config is as follows:\nnacos:username:password:accessKey:secretKey:Same as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: The registered port and other OAP nodes use this to communicate with the current node.  ","excerpt":"Cluster Management In many production environments, the backend needs to support high throughput and …","ref":"/docs/main/v9.4.0/en/setup/backend/backend-cluster/","title":"Cluster Management"},{"body":"Cluster Management In many production environments, the backend needs to support distributed aggregation, high throughput and provide high availability (HA) to maintain robustness, so you always need to setup CLUSTER management in product env. Otherwise, you would face metrics inaccurate.\ncore/gRPCHost is listening on 0.0.0.0 for quick start as the single mode for most cases. Besides the Kubernetes coordinator, which is using the cloud-native mode to establish cluster, all other coordinators requires core/gRPCHost updated to real IP addresses or take reference of internalComHost and internalComPort in each coordinator doc.\nNOTICE, cluster management doesn\u0026rsquo;t provide a service discovery mechanism for agents and probes. We recommend agents/probes using gateway to load balancer to access OAP clusters.\nThere are various ways to manage the cluster in the backend. Choose the one that best suits your needs.\n Kubernetes. When the backend clusters are deployed inside Kubernetes, you could make use of this method by using k8s native APIs to manage clusters. Zookeeper coordinator. Use Zookeeper to let the backend instances detect and communicate with each other. Consul. Use Consul as the backend cluster management implementor and coordinate backend instances. Etcd. Use Etcd to coordinate backend instances. Nacos. Use Nacos to coordinate backend instances.  In the application.yml file, there are default configurations for the aforementioned coordinators under the section cluster. You can specify any of them in the selector property to enable it.\nKubernetes The required backend clusters are deployed inside Kubernetes. See the guides in Deploy in kubernetes. Set the selector to kubernetes.\ncluster:selector:${SW_CLUSTER:kubernetes}# other configurationsMeanwhile, the OAP cluster requires the pod\u0026rsquo;s UID which is laid at metadata.uid as the value of the system environment variable SKYWALKING_COLLECTOR_UID\ncontainers:# Original configurations of OAP container- name:{{.Values.oap.name }}image:{{.Values.oap.image.repository }}:{{ required \u0026#34;oap.image.tag is required\u0026#34; .Values.oap.image.tag }}# ...# ...env:# Add metadata.uid as the system environment variable, SKYWALKING_COLLECTOR_UID - name:SKYWALKING_COLLECTOR_UIDvalueFrom:fieldRef:fieldPath:metadata.uidRead the complete helm for more details.\nZookeeper coordinator Zookeeper is a very common and widely used cluster coordinator. Set the cluster/selector to zookeeper in the yml to enable it.\nRequired Zookeeper version: 3.5+\ncluster:selector:${SW_CLUSTER:zookeeper}# other configurations hostPort is the list of zookeeper servers. Format is IP1:PORT1,IP2:PORT2,...,IPn:PORTn enableACL enable Zookeeper ACL to control access to its znode. schema is Zookeeper ACL schemas. expression is a expression of ACL. The format of the expression is specific to the schema. hostPort, baseSleepTimeMs and maxRetries are settings of Zookeeper curator client.  Note:\n If Zookeeper ACL is enabled and /skywalking exists, you must ensure that SkyWalking has CREATE, READ and WRITE permissions. If /skywalking does not exist, it will be created by SkyWalking, and all permissions to the specified user will be granted. Simultaneously, znode grants READ permission to anyone. If you set schema as digest, the password of the expression is set in clear text.  In some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: the registered port and other OAP nodes use this to communicate with the current node.  zookeeper:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}hostPort:${SW_CLUSTER_ZK_HOST_PORT:localhost:2181}#Retry PolicybaseSleepTimeMs:${SW_CLUSTER_ZK_SLEEP_TIME:1000}# initial amount of time to wait between retriesmaxRetries:${SW_CLUSTER_ZK_MAX_RETRIES:3}# max number of times to retryinternalComHost:${SW_CLUSTER_INTERNAL_COM_HOST:172.10.4.10}internalComPort:${SW_CLUSTER_INTERNAL_COM_PORT:11800}# Enable ACLenableACL:${SW_ZK_ENABLE_ACL:false}# disable ACL in defaultschema:${SW_ZK_SCHEMA:digest}# only support digest schemaexpression:${SW_ZK_EXPRESSION:skywalking:skywalking}Consul Recently, the Consul system has become more and more popular, and many companies and developers now use Consul as their service discovery solution. Set the cluster/selector to consul in the yml to enable it.\ncluster:selector:${SW_CLUSTER:consul}# other configurationsSame as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: The registered port and other OAP nodes use this to communicate with the current node.  Etcd Set the cluster/selector to etcd in the yml to enable it. The Etcd client has upgraded to v3 protocol and changed to the CoreOS official library. Since 8.7.0, only the v3 protocol is supported for Etcd.\ncluster:selector:${SW_CLUSTER:etcd}# other configurationsetcd:# etcd cluster nodes, example: 10.0.0.1:2379,10.0.0.2:2379,10.0.0.3:2379endpoints:${SW_CLUSTER_ETCD_ENDPOINTS:localhost:2379}namespace:${SW_CLUSTER_ETCD_NAMESPACE:/skywalking}serviceName:${SW_CLUSTER_ETCD_SERVICE_NAME:\u0026#34;SkyWalking_OAP_Cluster\u0026#34;}authentication:${SW_CLUSTER_ETCD_AUTHENTICATION:false}user:${SW_CLUSTER_ETCD_USER:}password:${SW_CLUSTER_ETCD_PASSWORD:}Same as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: The registered port and other OAP nodes use this to communicate with the current node.  Nacos Set the cluster/selector to nacos in the yml to enable it.\ncluster:selector:${SW_CLUSTER:nacos}# other configurationsNacos supports authentication by username or accessKey. Empty means that there is no need for authentication. Extra config is as follows:\nnacos:username:password:accessKey:secretKey:Same as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: The registered port and other OAP nodes use this to communicate with the current node.  ","excerpt":"Cluster Management In many production environments, the backend needs to support distributed …","ref":"/docs/main/v9.5.0/en/setup/backend/backend-cluster/","title":"Cluster Management"},{"body":"Cluster Management In many production environments, the backend needs to support distributed aggregation, high throughput and provide high availability (HA) to maintain robustness, so you always need to setup CLUSTER management in product env. Otherwise, you would face metrics inaccurate.\ncore/gRPCHost is listening on 0.0.0.0 for quick start as the single mode for most cases. Besides the Kubernetes coordinator, which is using the cloud-native mode to establish cluster, all other coordinators requires core/gRPCHost updated to real IP addresses or take reference of internalComHost and internalComPort in each coordinator doc.\nNOTICE, cluster management doesn\u0026rsquo;t provide a service discovery mechanism for agents and probes. We recommend agents/probes using gateway to load balancer to access OAP clusters.\nThere are various ways to manage the cluster in the backend. Choose the one that best suits your needs.\n Kubernetes. When the backend clusters are deployed inside Kubernetes, you could make use of this method by using k8s native APIs to manage clusters. Zookeeper coordinator. Use Zookeeper to let the backend instances detect and communicate with each other. Consul. Use Consul as the backend cluster management implementor and coordinate backend instances. Etcd. Use Etcd to coordinate backend instances. Nacos. Use Nacos to coordinate backend instances.  In the application.yml file, there are default configurations for the aforementioned coordinators under the section cluster. You can specify any of them in the selector property to enable it.\nKubernetes The required backend clusters are deployed inside Kubernetes. See the guides in Deploy in kubernetes. Set the selector to kubernetes.\ncluster:selector:${SW_CLUSTER:kubernetes}# other configurationsMeanwhile, the OAP cluster requires the pod\u0026rsquo;s UID which is laid at metadata.uid as the value of the system environment variable SKYWALKING_COLLECTOR_UID\ncontainers:# Original configurations of OAP container- name:{{.Values.oap.name }}image:{{.Values.oap.image.repository }}:{{ required \u0026#34;oap.image.tag is required\u0026#34; .Values.oap.image.tag }}# ...# ...env:# Add metadata.uid as the system environment variable, SKYWALKING_COLLECTOR_UID - name:SKYWALKING_COLLECTOR_UIDvalueFrom:fieldRef:fieldPath:metadata.uidRead the complete helm for more details.\nZookeeper coordinator Zookeeper is a very common and widely used cluster coordinator. Set the cluster/selector to zookeeper in the yml to enable it.\nRequired Zookeeper version: 3.5+\ncluster:selector:${SW_CLUSTER:zookeeper}# other configurations hostPort is the list of zookeeper servers. Format is IP1:PORT1,IP2:PORT2,...,IPn:PORTn enableACL enable Zookeeper ACL to control access to its znode. schema is Zookeeper ACL schemas. expression is a expression of ACL. The format of the expression is specific to the schema. hostPort, baseSleepTimeMs and maxRetries are settings of Zookeeper curator client.  Note:\n If Zookeeper ACL is enabled and /skywalking exists, you must ensure that SkyWalking has CREATE, READ and WRITE permissions. If /skywalking does not exist, it will be created by SkyWalking, and all permissions to the specified user will be granted. Simultaneously, znode grants READ permission to anyone. If you set schema as digest, the password of the expression is set in clear text.  In some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: the registered port and other OAP nodes use this to communicate with the current node.  zookeeper:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}hostPort:${SW_CLUSTER_ZK_HOST_PORT:localhost:2181}#Retry PolicybaseSleepTimeMs:${SW_CLUSTER_ZK_SLEEP_TIME:1000}# initial amount of time to wait between retriesmaxRetries:${SW_CLUSTER_ZK_MAX_RETRIES:3}# max number of times to retryinternalComHost:${SW_CLUSTER_INTERNAL_COM_HOST:172.10.4.10}internalComPort:${SW_CLUSTER_INTERNAL_COM_PORT:11800}# Enable ACLenableACL:${SW_ZK_ENABLE_ACL:false}# disable ACL in defaultschema:${SW_ZK_SCHEMA:digest}# only support digest schemaexpression:${SW_ZK_EXPRESSION:skywalking:skywalking}Consul Recently, the Consul system has become more and more popular, and many companies and developers now use Consul as their service discovery solution. Set the cluster/selector to consul in the yml to enable it.\ncluster:selector:${SW_CLUSTER:consul}# other configurationsSame as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: The registered port and other OAP nodes use this to communicate with the current node.  Etcd Set the cluster/selector to etcd in the yml to enable it. The Etcd client has upgraded to v3 protocol and changed to the CoreOS official library. Since 8.7.0, only the v3 protocol is supported for Etcd.\ncluster:selector:${SW_CLUSTER:etcd}# other configurationsetcd:# etcd cluster nodes, example: 10.0.0.1:2379,10.0.0.2:2379,10.0.0.3:2379endpoints:${SW_CLUSTER_ETCD_ENDPOINTS:localhost:2379}namespace:${SW_CLUSTER_ETCD_NAMESPACE:/skywalking}serviceName:${SW_CLUSTER_ETCD_SERVICE_NAME:\u0026#34;SkyWalking_OAP_Cluster\u0026#34;}authentication:${SW_CLUSTER_ETCD_AUTHENTICATION:false}user:${SW_CLUSTER_ETCD_USER:}password:${SW_CLUSTER_ETCD_PASSWORD:}Same as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: The registered port and other OAP nodes use this to communicate with the current node.  Nacos Set the cluster/selector to nacos in the yml to enable it.\ncluster:selector:${SW_CLUSTER:nacos}# other configurationsNacos supports authentication by username or accessKey. Empty means that there is no need for authentication. Extra config is as follows:\nnacos:username:password:accessKey:secretKey:Same as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: The registered port and other OAP nodes use this to communicate with the current node.  ","excerpt":"Cluster Management In many production environments, the backend needs to support distributed …","ref":"/docs/main/v9.6.0/en/setup/backend/backend-cluster/","title":"Cluster Management"},{"body":"Cluster Management In many production environments, the backend needs to support distributed aggregation, high throughput and provide high availability (HA) to maintain robustness, so you always need to setup CLUSTER management in product env. Otherwise, you would face metrics inaccurate.\ncore/gRPCHost is listening on 0.0.0.0 for quick start as the single mode for most cases. Besides the Kubernetes coordinator, which is using the cloud-native mode to establish cluster, all other coordinators requires core/gRPCHost updated to real IP addresses or take reference of internalComHost and internalComPort in each coordinator doc.\nNOTICE, cluster management doesn\u0026rsquo;t provide a service discovery mechanism for agents and probes. We recommend agents/probes using gateway to load balancer to access OAP clusters.\nThere are various ways to manage the cluster in the backend. Choose the one that best suits your needs.\n Kubernetes. When the backend clusters are deployed inside Kubernetes, you could make use of this method by using k8s native APIs to manage clusters. Zookeeper coordinator. Use Zookeeper to let the backend instances detect and communicate with each other. Consul. Use Consul as the backend cluster management implementor and coordinate backend instances. Etcd. Use Etcd to coordinate backend instances. Nacos. Use Nacos to coordinate backend instances.  In the application.yml file, there are default configurations for the aforementioned coordinators under the section cluster. You can specify any of them in the selector property to enable it.\nCloud Native Kubernetes The required backend clusters are deployed inside Kubernetes. See the guides in Deploy in kubernetes. Set the selector to kubernetes.\ncluster:selector:${SW_CLUSTER:kubernetes}# other configurationsMeanwhile, the OAP cluster requires the pod\u0026rsquo;s UID which is laid at metadata.uid as the value of the system environment variable SKYWALKING_COLLECTOR_UID\ncontainers:# Original configurations of OAP container- name:{{.Values.oap.name }}image:{{.Values.oap.image.repository }}:{{ required \u0026#34;oap.image.tag is required\u0026#34; .Values.oap.image.tag }}# ...# ...env:# Add metadata.uid as the system environment variable, SKYWALKING_COLLECTOR_UID - name:SKYWALKING_COLLECTOR_UIDvalueFrom:fieldRef:fieldPath:metadata.uidRead the complete helm for more details.\nTraditional Coordinator NOTICE In all the following coordinators, oap.internal.comm.host:oap.internal.comm.port is registered as the ID and address for the current OAP node. By default, because they are same in all OAP nodes, the registrations are conflicted, and (may) show as one registered node, which actually would be the node itself. In this case, the cluster mode is NOT working.\nPlease check the registered nodes on your coordinator servers, to make the registration information unique for every node. You could have two options\n Change core/gRPCHost(oap.internal.comm.host) and core/gRPCPort(oap.internal.comm.port) for internal, and setup external communication channels for data reporting and query. Use internalComHost and internalComPort in the config to provide a unique host and port for every OAP node. This host name port should be accessible for other OAP nodes.  Zookeeper coordinator Zookeeper is a very common and widely used cluster coordinator. Set the cluster/selector to zookeeper in the yml to enable it.\nRequired Zookeeper version: 3.5+\ncluster:selector:${SW_CLUSTER:zookeeper}# other configurations hostPort is the list of zookeeper servers. Format is IP1:PORT1,IP2:PORT2,...,IPn:PORTn enableACL enable Zookeeper ACL to control access to its znode. schema is Zookeeper ACL schemas. expression is a expression of ACL. The format of the expression is specific to the schema. hostPort, baseSleepTimeMs and maxRetries are settings of Zookeeper curator client.  Note:\n If Zookeeper ACL is enabled and /skywalking exists, you must ensure that SkyWalking has CREATE, READ and WRITE permissions. If /skywalking does not exist, it will be created by SkyWalking, and all permissions to the specified user will be granted. Simultaneously, znode grants READ permission to anyone. If you set schema as digest, the password of the expression is set in clear text.  In some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes, such as the default host(0.0.0.0) should not be used in cluster mode. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The exposed host name for other OAP nodes in the cluster internal communication. internalComPort: the exposed port for other OAP nodes in the cluster internal communication.  zookeeper:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}hostPort:${SW_CLUSTER_ZK_HOST_PORT:localhost:2181}#Retry PolicybaseSleepTimeMs:${SW_CLUSTER_ZK_SLEEP_TIME:1000}# initial amount of time to wait between retriesmaxRetries:${SW_CLUSTER_ZK_MAX_RETRIES:3}# max number of times to retryinternalComHost:${SW_CLUSTER_INTERNAL_COM_HOST:172.10.4.10}internalComPort:${SW_CLUSTER_INTERNAL_COM_PORT:11800}# Enable ACLenableACL:${SW_ZK_ENABLE_ACL:false}# disable ACL in defaultschema:${SW_ZK_SCHEMA:digest}# only support digest schemaexpression:${SW_ZK_EXPRESSION:skywalking:skywalking}Consul Recently, the Consul system has become more and more popular, and many companies and developers now use Consul as their service discovery solution. Set the cluster/selector to consul in the yml to enable it.\ncluster:selector:${SW_CLUSTER:consul}# other configurationsSame as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes, such as the default host(0.0.0.0) should not be used in cluster mode. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The exposed host name for other OAP nodes in the cluster internal communication. internalComPort: the exposed port for other OAP nodes in the cluster internal communication.  Etcd Set the cluster/selector to etcd in the yml to enable it. The Etcd client has upgraded to v3 protocol and changed to the CoreOS official library. Since 8.7.0, only the v3 protocol is supported for Etcd.\ncluster:selector:${SW_CLUSTER:etcd}# other configurationsetcd:# etcd cluster nodes, example: 10.0.0.1:2379,10.0.0.2:2379,10.0.0.3:2379endpoints:${SW_CLUSTER_ETCD_ENDPOINTS:localhost:2379}namespace:${SW_CLUSTER_ETCD_NAMESPACE:/skywalking}serviceName:${SW_CLUSTER_ETCD_SERVICE_NAME:\u0026#34;SkyWalking_OAP_Cluster\u0026#34;}authentication:${SW_CLUSTER_ETCD_AUTHENTICATION:false}user:${SW_CLUSTER_ETCD_USER:}password:${SW_CLUSTER_ETCD_PASSWORD:}Same as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes, such as the default host(0.0.0.0) should not be used in cluster mode. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The exposed host name for other OAP nodes in the cluster internal communication. internalComPort: the exposed port for other OAP nodes in the cluster internal communication.  Nacos Set the cluster/selector to nacos in the yml to enable it.\ncluster:selector:${SW_CLUSTER:nacos}# other configurationsNacos supports authentication by username or accessKey. Empty means that there is no need for authentication. Extra config is as follows:\nnacos:username:password:accessKey:secretKey:Same as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes, such as the default host(0.0.0.0) should not be used in cluster mode. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The exposed host name for other OAP nodes in the cluster internal communication. internalComPort: the exposed port for other OAP nodes in the cluster internal communication.  ","excerpt":"Cluster Management In many production environments, the backend needs to support distributed …","ref":"/docs/main/v9.7.0/en/setup/backend/backend-cluster/","title":"Cluster Management"},{"body":"Coding Style for SkyWalking Python String formatting Since Python 3.5 is end of life, we fully utilize the clarity and performance boost brought by f-strings. Please do not use other styles - +, % or .format unless f-string is absolutely unfeasible in the context, or it is a logger message, which is optimized for the % style\nRun make dev-fix to invoke flynt to convert other formats to f-string, pay extra care to possible corner cases leading to a semantically different conversion.\nQuotes As we know both single quotes and double quotes are both acceptable in Python. For a better coding style, we enforce a check for using single quotes when possible.\nPlease only use double quotes on the outside when there are inevitable single quotes inside the string, or when there are nest quotes.\nFor example -\nfoo = f\u0026#34;I\u0026#39;m a string\u0026#34; bar = f\u0026#34;This repo is called \u0026#39;skywalking-python\u0026#39;\u0026#34; Run make dev-fix to invoke unify to deal with your quotes if flake8 complaints about it.\nDebug messages Please import the logger_debug_enabled variable and wrap your debug messages with a check.\nThis should be done for all performance critical components.\nif logger_debug_enabled: logger.debug(\u0026#39;Message - %s\u0026#39;, some_func()) Imports Please make sure the imports are placed in a good order, or flake8-isort will notify you of the violations.\nRun make dev-fix to automatically fix the sorting problem.\nNaming In PEP8 convention, we are required to use snake_case as the accepted style.\nHowever, there are special cases. For example, you are overriding/monkey-patching a method which happens to use the old style camelCase naming, then it is acceptable to have the original naming convention to preserve context.\nPlease mark the line with # noqa to avoid linting.\n","excerpt":"Coding Style for SkyWalking Python String formatting Since Python 3.5 is end of life, we fully …","ref":"/docs/skywalking-python/latest/en/contribution/codingstyle/","title":"Coding Style for SkyWalking Python"},{"body":"Coding Style for SkyWalking Python String formatting Since Python 3.5 is end of life, we fully utilize the clarity and performance boost brought by f-strings. Please do not use other styles - +, % or .format unless f-string is absolutely unfeasible in the context, or it is a logger message, which is optimized for the % style\nRun make dev-fix to invoke flynt to convert other formats to f-string, pay extra care to possible corner cases leading to a semantically different conversion.\nQuotes As we know both single quotes and double quotes are both acceptable in Python. For a better coding style, we enforce a check for using single quotes when possible.\nPlease only use double quotes on the outside when there are inevitable single quotes inside the string, or when there are nest quotes.\nFor example -\nfoo = f\u0026#34;I\u0026#39;m a string\u0026#34; bar = f\u0026#34;This repo is called \u0026#39;skywalking-python\u0026#39;\u0026#34; Run make dev-fix to invoke unify to deal with your quotes if flake8 complaints about it.\nDebug messages Please import the logger_debug_enabled variable and wrap your debug messages with a check.\nThis should be done for all performance critical components.\nif logger_debug_enabled: logger.debug(\u0026#39;Message - %s\u0026#39;, some_func()) Imports Please make sure the imports are placed in a good order, or flake8-isort will notify you of the violations.\nRun make dev-fix to automatically fix the sorting problem.\nNaming In PEP8 convention, we are required to use snake_case as the accepted style.\nHowever, there are special cases. For example, you are overriding/monkey-patching a method which happens to use the old style camelCase naming, then it is acceptable to have the original naming convention to preserve context.\nPlease mark the line with # noqa to avoid linting.\n","excerpt":"Coding Style for SkyWalking Python String formatting Since Python 3.5 is end of life, we fully …","ref":"/docs/skywalking-python/next/en/contribution/codingstyle/","title":"Coding Style for SkyWalking Python"},{"body":"Coding Style for SkyWalking Python String formatting Since Python 3.5 is end of life, we fully utilize the clarity and performance boost brought by f-strings. Please do not use other styles - +, % or .format unless f-string is absolutely unfeasible in the context, or it is a logger message, which is optimized for the % style\nRun make dev-fix to invoke flynt to convert other formats to f-string, pay extra care to possible corner cases leading to a semantically different conversion.\nQuotes As we know both single quotes and double quotes are both acceptable in Python. For a better coding style, we enforce a check for using single quotes when possible.\nPlease only use double quotes on the outside when there are inevitable single quotes inside the string, or when there are nest quotes.\nFor example -\nfoo = f\u0026#34;I\u0026#39;m a string\u0026#34; bar = f\u0026#34;This repo is called \u0026#39;skywalking-python\u0026#39;\u0026#34; Run make dev-fix to invoke unify to deal with your quotes if flake8 complaints about it.\nDebug messages Please import the logger_debug_enabled variable and wrap your debug messages with a check.\nThis should be done for all performance critical components.\nif logger_debug_enabled: logger.debug(\u0026#39;Message - %s\u0026#39;, some_func()) Imports Please make sure the imports are placed in a good order, or flake8-isort will notify you of the violations.\nRun make dev-fix to automatically fix the sorting problem.\nNaming In PEP8 convention, we are required to use snake_case as the accepted style.\nHowever, there are special cases. For example, you are overriding/monkey-patching a method which happens to use the old style camelCase naming, then it is acceptable to have the original naming convention to preserve context.\nPlease mark the line with # noqa to avoid linting.\n","excerpt":"Coding Style for SkyWalking Python String formatting Since Python 3.5 is end of life, we fully …","ref":"/docs/skywalking-python/v1.0.1/en/contribution/codingstyle/","title":"Coding Style for SkyWalking Python"},{"body":"Collecting and Gathering Kubernetes Monitoring Data Motivation SkyWalking has provided an access log collector based on the Agent layer and Service Mesh layer, and can generate corresponding topology maps and metrics based on the data. However, the Kubernetes Layer still lacks corresponding access log collector and analysis work.\nThis proposal is dedicated to collecting and analyzing network access logs in Kubernetes.\nArchitecture Graph There is no significant architecture-level change. Still using the Rover project to collect data and report it to SkyWalking OAP using the gRPC protocol.\nPropose Changes Based on the content in Motivation, if we want to ignore the application types(different program languages) and only monitor network logs, using eBPF is a good choice. It mainly reflects in the following aspects:\n Non-intrusive: When monitoring network access logs with eBPF, the application do not need to make any changes to be monitored. Language-unrestricted: Regardless of which programming language is used in the application, network data will ultimately be accessed through Linux Syscalls. Therefore, we can monitor network data by attaching eBPF to the syscalls layer, thus ignoring programming languages. Kernel interception: Since eBPF can attach to the kernel methods, it can obtain the execution status of each packet at L2-L4 layers and generate more detailed metrics.  Based on these reasons and collected data, they can be implemented in SkyWalking Rover and collected and monitored based on the following steps:\n Monitor the network execution status of all processes in Kubernetes when the Rover system starts. Periodically report data content via gRPC protocol to SkyWalking OAP. SkyWalking OAP parses network access logs and generates corresponding network topology, metrics, etc.  Limitation For content that uses TLS for data transmission, Rover will detect whether the current language uses libraries such as OpenSSL. If it is used, it will asynchronously intercept relevant OpenSSL methods when the process starts to perceive the original data content.\nHowever, this approach is not feasible for Java because Java does not use the OpenSSL library but performs encryption/decryption through Java code. Currently, eBPF cannot intercept Java method calls. Therefore, it results in an inability to perceive the TLS data protocol in Java.\nService with Istio sidecar scenario If the Service is deployed in Istio sidecar, it will still monitor each process. If the Service is a Java service and uses TLS, it can analyze the relevant traffic generated in the sidecar (envoy).\nImported Dependencies libs and their licenses. No new library is planned to be added to the codebase.\nCompatibility About the protocol, there should be no breaking changes, but enhancements only:\n Rover: adding a new gRPC data collection protocol for reporting the access logs. OAP: It should have no protocol updates. The existing query protocols are already sufficient for querying Kubernetes topology and metric data.  Data Generation Entity  service_traffic     column data type value description     name string kubernetes service name   short_name string same with name   service_id string base64(name).1   group string empty string   layer string KUBERNETES     instance_traffic     column data type value description     service_id string base64(service_name).1   name string pod name   last_ping long last access log message timestamp(millisecond)   properties json empty string     endpoint_traffic     column data type value description     service_id string base64(service_name).1   name string access log endpoint name(for HTTP1, is URI)    Entity Relation All entity information is built on connections. If the target address is remote, the name will be resolved in the following order:\n If it is a pod IP, it will be resolved as pod information. If it is a service IP, it will be resolved as service information. If neither exists, only pod information will be displayed.  Different entities have different displays for remote addresses. Please refer to the following table.\n   table name remote info(display by following order)     service_relation service name, remote IP address   instance_relation pod name, remote IP address    NOTICE: If it is the internal data interaction within the pod, such as exchanging data between services and sidecar (envoy), no corresponding traffic will be generated. We only generate and interact with external pods.\nLimitation If the service IP is used to send requests to the upstream, we will use eBPF to perceive the real target PodIP by perceiving relevant conntrack records.\nHowever, if conntrack technology is not used, it is difficult to perceive the real target IP address. In this case, instance relation data of this kind will be dropped, but we will mark all discarded relationship generation counts through a metric for better understanding of the situation.\nMetrics Integrate the data into the OAL system and generate corresponding metrics through predefined data combined with OAL statements.\nGeneral usage docs This proposal will only add a module to Rover that explains the configuration of access logs, and changes in the Kubernetes module on the UI.\nIn the Kubernetes UI, users can see the following additions:\n Topology: A topology diagram showing the calling relationships between services, instances, and processes. Entity Metrics: Metric data for services, instances, and processes. Call Relationship Metrics: Metrics for call relationships between different entities.  ","excerpt":"Collecting and Gathering Kubernetes Monitoring Data Motivation SkyWalking has provided an access log …","ref":"/docs/main/next/en/swip/swip-2/","title":"Collecting and Gathering Kubernetes Monitoring Data"},{"body":"Collecting File Log Application\u0026rsquo;s logs are important data for troubleshooting, usually they are persistent through local or network file system. SkyWalking provides ways to collect logs from those files by leveraging popular open-source tools.\nLog files collector You can use Filebeat, Fluentd and FluentBit to collect logs, and then transport the logs to SkyWalking OAP through Kafka or HTTP protocol, with the formats Kafka JSON or HTTP JSON array.\nFilebeat Filebeat supports using Kafka to transport logs. Open kafka-fetcher and enable configs enableNativeJsonLog.\nTake the following Filebeat config YAML as an example to set up Filebeat:\n filebeat.yml  Fluentd Fluentd supports using Kafka to transport logs. Open kafka-fetcher and enable configs enableNativeJsonLog.\nTake the following fluentd config file as an example to set up Fluentd:\n fluentd.conf  Fluent-bit Fluent-bit sends logs to OAP directly through HTTP(rest port). Point the output address to restHost:restPort of receiver-sharing-server or core(if receiver-sharing-server is inactivated)\nTake the following fluent-bit config files as an example to set up Fluent-bit:\n fluent-bit.conf  ","excerpt":"Collecting File Log Application\u0026rsquo;s logs are important data for troubleshooting, usually they …","ref":"/docs/main/latest/en/setup/backend/filelog-native/","title":"Collecting File Log"},{"body":"Collecting File Log Application\u0026rsquo;s logs are important data for troubleshooting, usually they are persistent through local or network file system. SkyWalking provides ways to collect logs from those files by leveraging popular open-source tools.\nLog files collector You can use Filebeat, Fluentd and FluentBit to collect logs, and then transport the logs to SkyWalking OAP through Kafka or HTTP protocol, with the formats Kafka JSON or HTTP JSON array.\nFilebeat Filebeat supports using Kafka to transport logs. Open kafka-fetcher and enable configs enableNativeJsonLog.\nTake the following Filebeat config YAML as an example to set up Filebeat:\n filebeat.yml  Fluentd Fluentd supports using Kafka to transport logs. Open kafka-fetcher and enable configs enableNativeJsonLog.\nTake the following fluentd config file as an example to set up Fluentd:\n fluentd.conf  Fluent-bit Fluent-bit sends logs to OAP directly through HTTP(rest port). Point the output address to restHost:restPort of receiver-sharing-server or core(if receiver-sharing-server is inactivated)\nTake the following fluent-bit config files as an example to set up Fluent-bit:\n fluent-bit.conf  ","excerpt":"Collecting File Log Application\u0026rsquo;s logs are important data for troubleshooting, usually they …","ref":"/docs/main/next/en/setup/backend/filelog-native/","title":"Collecting File Log"},{"body":"Collecting File Log Application\u0026rsquo;s logs are important data for troubleshooting, usually they are persistent through local or network file system. SkyWalking provides ways to collect logs from those files by leveraging popular open-source tools.\nLog files collector You can use Filebeat, Fluentd and FluentBit to collect logs, and then transport the logs to SkyWalking OAP through Kafka or HTTP protocol, with the formats Kafka JSON or HTTP JSON array.\nFilebeat Filebeat supports using Kafka to transport logs. Open kafka-fetcher and enable configs enableNativeJsonLog.\nTake the following Filebeat config YAML as an example to set up Filebeat:\n filebeat.yml  Fluentd Fluentd supports using Kafka to transport logs. Open kafka-fetcher and enable configs enableNativeJsonLog.\nTake the following fluentd config file as an example to set up Fluentd:\n fluentd.conf  Fluent-bit Fluent-bit sends logs to OAP directly through HTTP(rest port). Point the output address to restHost:restPort of receiver-sharing-server or core(if receiver-sharing-server is inactivated)\nTake the following fluent-bit config files as an example to set up Fluent-bit:\n fluent-bit.conf  ","excerpt":"Collecting File Log Application\u0026rsquo;s logs are important data for troubleshooting, usually they …","ref":"/docs/main/v9.5.0/en/setup/backend/filelog-native/","title":"Collecting File Log"},{"body":"Collecting File Log Application\u0026rsquo;s logs are important data for troubleshooting, usually they are persistent through local or network file system. SkyWalking provides ways to collect logs from those files by leveraging popular open-source tools.\nLog files collector You can use Filebeat, Fluentd and FluentBit to collect logs, and then transport the logs to SkyWalking OAP through Kafka or HTTP protocol, with the formats Kafka JSON or HTTP JSON array.\nFilebeat Filebeat supports using Kafka to transport logs. Open kafka-fetcher and enable configs enableNativeJsonLog.\nTake the following Filebeat config YAML as an example to set up Filebeat:\n filebeat.yml  Fluentd Fluentd supports using Kafka to transport logs. Open kafka-fetcher and enable configs enableNativeJsonLog.\nTake the following fluentd config file as an example to set up Fluentd:\n fluentd.conf  Fluent-bit Fluent-bit sends logs to OAP directly through HTTP(rest port). Point the output address to restHost:restPort of receiver-sharing-server or core(if receiver-sharing-server is inactivated)\nTake the following fluent-bit config files as an example to set up Fluent-bit:\n fluent-bit.conf  ","excerpt":"Collecting File Log Application\u0026rsquo;s logs are important data for troubleshooting, usually they …","ref":"/docs/main/v9.6.0/en/setup/backend/filelog-native/","title":"Collecting File Log"},{"body":"Collecting File Log Application\u0026rsquo;s logs are important data for troubleshooting, usually they are persistent through local or network file system. SkyWalking provides ways to collect logs from those files by leveraging popular open-source tools.\nLog files collector You can use Filebeat, Fluentd and FluentBit to collect logs, and then transport the logs to SkyWalking OAP through Kafka or HTTP protocol, with the formats Kafka JSON or HTTP JSON array.\nFilebeat Filebeat supports using Kafka to transport logs. Open kafka-fetcher and enable configs enableNativeJsonLog.\nTake the following Filebeat config YAML as an example to set up Filebeat:\n filebeat.yml  Fluentd Fluentd supports using Kafka to transport logs. Open kafka-fetcher and enable configs enableNativeJsonLog.\nTake the following fluentd config file as an example to set up Fluentd:\n fluentd.conf  Fluent-bit Fluent-bit sends logs to OAP directly through HTTP(rest port). Point the output address to restHost:restPort of receiver-sharing-server or core(if receiver-sharing-server is inactivated)\nTake the following fluent-bit config files as an example to set up Fluent-bit:\n fluent-bit.conf  ","excerpt":"Collecting File Log Application\u0026rsquo;s logs are important data for troubleshooting, usually they …","ref":"/docs/main/v9.7.0/en/setup/backend/filelog-native/","title":"Collecting File Log"},{"body":"Collecting Logs by Agents Some of SkyWalking native agents support collecting logs and sending them to OAP server without local files and/or file agents, which are listed in here.\nJava agent\u0026rsquo;s toolkits Java agent provides toolkits for log4j, log4j2, and logback to report logs through gRPC with automatically injected trace context.\nSkyWalking Satellite sidecar is a recommended proxy/side that forwards logs (including the use of Kafka MQ to transport logs). When using this, open kafka-fetcher and enable configs enableNativeProtoLog.\nJava agent provides toolkits for log4j, log4j2, and logback to report logs through files with automatically injected trace context.\nLog framework config examples:\n log4j1.x fileAppender log4j2.x fileAppender logback fileAppender  Python agent log reporter SkyWalking Python Agent implements a log reporter for the logging module with functionalities aligning with the Java toolkits.\nTo explore how to enable the reporting features for your use cases, please refer to the Log Reporter Doc for a detailed guide.\n","excerpt":"Collecting Logs by Agents Some of SkyWalking native agents support collecting logs and sending them …","ref":"/docs/main/latest/en/setup/backend/log-agent-native/","title":"Collecting Logs by Agents"},{"body":"Collecting Logs by Agents Some of SkyWalking native agents support collecting logs and sending them to OAP server without local files and/or file agents, which are listed in here.\nJava agent\u0026rsquo;s toolkits Java agent provides toolkits for log4j, log4j2, and logback to report logs through gRPC with automatically injected trace context.\nSkyWalking Satellite sidecar is a recommended proxy/side that forwards logs (including the use of Kafka MQ to transport logs). When using this, open kafka-fetcher and enable configs enableNativeProtoLog.\nJava agent provides toolkits for log4j, log4j2, and logback to report logs through files with automatically injected trace context.\nLog framework config examples:\n log4j1.x fileAppender log4j2.x fileAppender logback fileAppender  Python agent log reporter SkyWalking Python Agent implements a log reporter for the logging module with functionalities aligning with the Java toolkits.\nTo explore how to enable the reporting features for your use cases, please refer to the Log Reporter Doc for a detailed guide.\n","excerpt":"Collecting Logs by Agents Some of SkyWalking native agents support collecting logs and sending them …","ref":"/docs/main/next/en/setup/backend/log-agent-native/","title":"Collecting Logs by Agents"},{"body":"Collecting Logs by Agents Some of SkyWalking native agents support collecting logs and sending them to OAP server without local files and/or file agents, which are listed in here.\nJava agent\u0026rsquo;s toolkits Java agent provides toolkits for log4j, log4j2, and logback to report logs through gRPC with automatically injected trace context.\nSkyWalking Satellite sidecar is a recommended proxy/side that forwards logs (including the use of Kafka MQ to transport logs). When using this, open kafka-fetcher and enable configs enableNativeProtoLog.\nJava agent provides toolkits for log4j, log4j2, and logback to report logs through files with automatically injected trace context.\nLog framework config examples:\n log4j1.x fileAppender log4j2.x fileAppender logback fileAppender  Python agent log reporter SkyWalking Python Agent implements a log reporter for the logging module with functionalities aligning with the Java toolkits.\nTo explore how to enable the reporting features for your use cases, please refer to the Log Reporter Doc for a detailed guide.\n","excerpt":"Collecting Logs by Agents Some of SkyWalking native agents support collecting logs and sending them …","ref":"/docs/main/v9.5.0/en/setup/backend/log-agent-native/","title":"Collecting Logs by Agents"},{"body":"Collecting Logs by Agents Some of SkyWalking native agents support collecting logs and sending them to OAP server without local files and/or file agents, which are listed in here.\nJava agent\u0026rsquo;s toolkits Java agent provides toolkits for log4j, log4j2, and logback to report logs through gRPC with automatically injected trace context.\nSkyWalking Satellite sidecar is a recommended proxy/side that forwards logs (including the use of Kafka MQ to transport logs). When using this, open kafka-fetcher and enable configs enableNativeProtoLog.\nJava agent provides toolkits for log4j, log4j2, and logback to report logs through files with automatically injected trace context.\nLog framework config examples:\n log4j1.x fileAppender log4j2.x fileAppender logback fileAppender  Python agent log reporter SkyWalking Python Agent implements a log reporter for the logging module with functionalities aligning with the Java toolkits.\nTo explore how to enable the reporting features for your use cases, please refer to the Log Reporter Doc for a detailed guide.\n","excerpt":"Collecting Logs by Agents Some of SkyWalking native agents support collecting logs and sending them …","ref":"/docs/main/v9.6.0/en/setup/backend/log-agent-native/","title":"Collecting Logs by Agents"},{"body":"Collecting Logs by Agents Some of SkyWalking native agents support collecting logs and sending them to OAP server without local files and/or file agents, which are listed in here.\nJava agent\u0026rsquo;s toolkits Java agent provides toolkits for log4j, log4j2, and logback to report logs through gRPC with automatically injected trace context.\nSkyWalking Satellite sidecar is a recommended proxy/side that forwards logs (including the use of Kafka MQ to transport logs). When using this, open kafka-fetcher and enable configs enableNativeProtoLog.\nJava agent provides toolkits for log4j, log4j2, and logback to report logs through files with automatically injected trace context.\nLog framework config examples:\n log4j1.x fileAppender log4j2.x fileAppender logback fileAppender  Python agent log reporter SkyWalking Python Agent implements a log reporter for the logging module with functionalities aligning with the Java toolkits.\nTo explore how to enable the reporting features for your use cases, please refer to the Log Reporter Doc for a detailed guide.\n","excerpt":"Collecting Logs by Agents Some of SkyWalking native agents support collecting logs and sending them …","ref":"/docs/main/v9.7.0/en/setup/backend/log-agent-native/","title":"Collecting Logs by Agents"},{"body":"Common configuration Logger Logger is used to configure the system log.\n   Name Default Environment Key Description     logger.level INFO ROVER_LOGGER_LEVEL The lowest level of printing allowed.    Core Core is used to communicate with the backend server. It provides APIs for other modules to establish connections with the backend.\n   Name Default Environment Key Description     core.cluster_name  ROVER_CORE_CLUSTER_NAME The name of the cluster.   core.backend.addr localhost:11800 ROVER_BACKEND_ADDR The backend server address.   core.backend.enable_TLS false ROVER_BACKEND_ENABLE_TLS The TLS switch.   core.backend.client_pem_path client.pem ROVER_BACKEND_PEM_PATH The file path of client.pem. The config only works when opening the TLS switch.   core.backend.client_key_path client.key ROVER_BACKEND_KEY_PATH The file path of client.key. The config only works when opening the TLS switch.   core.backend.insecure_skip_verify false ROVER_BACKEND_INSECURE_SKIP_VERIFY InsecureSkipVerify controls whether a client verifies the server\u0026rsquo;s certificate chain and host name.   core.backend.ca_pem_path ca.pem ROVER_BACKEND_CA_PEM_PATH The file path oca.pem. The config only works when opening the TLS switch.   core.backend.check_period 5 ROVER_BACKEND_CHECK_PERIOD How frequently to check the connection(second).   core.backend.authentication  ROVER_BACKEND_AUTHENTICATION The auth value when send request.    ","excerpt":"Common configuration Logger Logger is used to configure the system log.\n   Name Default Environment …","ref":"/docs/skywalking-rover/latest/en/setup/configuration/common/","title":"Common configuration"},{"body":"Common configuration Logger Logger is used to configure the system log.\n   Name Default Environment Key Description     logger.level INFO ROVER_LOGGER_LEVEL The lowest level of printing allowed.    Core Core is used to communicate with the backend server. It provides APIs for other modules to establish connections with the backend.\n   Name Default Environment Key Description     core.cluster_name  ROVER_CORE_CLUSTER_NAME The name of the cluster.   core.backend.addr localhost:11800 ROVER_BACKEND_ADDR The backend server address.   core.backend.enable_TLS false ROVER_BACKEND_ENABLE_TLS The TLS switch.   core.backend.client_pem_path client.pem ROVER_BACKEND_PEM_PATH The file path of client.pem. The config only works when opening the TLS switch.   core.backend.client_key_path client.key ROVER_BACKEND_KEY_PATH The file path of client.key. The config only works when opening the TLS switch.   core.backend.insecure_skip_verify false ROVER_BACKEND_INSECURE_SKIP_VERIFY InsecureSkipVerify controls whether a client verifies the server\u0026rsquo;s certificate chain and host name.   core.backend.ca_pem_path ca.pem ROVER_BACKEND_CA_PEM_PATH The file path oca.pem. The config only works when opening the TLS switch.   core.backend.check_period 5 ROVER_BACKEND_CHECK_PERIOD How frequently to check the connection(second).   core.backend.authentication  ROVER_BACKEND_AUTHENTICATION The auth value when send request.    ","excerpt":"Common configuration Logger Logger is used to configure the system log.\n   Name Default Environment …","ref":"/docs/skywalking-rover/next/en/setup/configuration/common/","title":"Common configuration"},{"body":"Common configuration Logger Logger is used to configure the system log.\n   Name Default Environment Key Description     logger.level INFO ROVER_LOGGER_LEVEL The lowest level of printing allowed.    Core Core is used to communicate with the backend server. It provides APIs for other modules to establish connections with the backend.\n   Name Default Environment Key Description     core.cluster_name  ROVER_CORE_CLUSTER_NAME The name of the cluster.   core.backend.addr localhost:11800 ROVER_BACKEND_ADDR The backend server address.   core.backend.enable_TLS false ROVER_BACKEND_ENABLE_TLS The TLS switch.   core.backend.client_pem_path client.pem ROVER_BACKEND_PEM_PATH The file path of client.pem. The config only works when opening the TLS switch.   core.backend.client_key_path client.key ROVER_BACKEND_KEY_PATH The file path of client.key. The config only works when opening the TLS switch.   core.backend.insecure_skip_verify false ROVER_BACKEND_INSECURE_SKIP_VERIFY InsecureSkipVerify controls whether a client verifies the server\u0026rsquo;s certificate chain and host name.   core.backend.ca_pem_path ca.pem ROVER_BACKEND_CA_PEM_PATH The file path oca.pem. The config only works when opening the TLS switch.   core.backend.check_period 5 ROVER_BACKEND_CHECK_PERIOD How frequently to check the connection(second).   core.backend.authentication  ROVER_BACKEND_AUTHENTICATION The auth value when send request.    ","excerpt":"Common configuration Logger Logger is used to configure the system log.\n   Name Default Environment …","ref":"/docs/skywalking-rover/v0.6.0/en/setup/configuration/common/","title":"Common configuration"},{"body":"Common configuration The common configuration has 2 parts, which are logger configuration and the telemetry configuration.\nLogger    Config Default Description     log_pattern %time [%level][%field] - %msg The log format pattern configuration.   time_pattern 2006-01-02 15:04:05.000 The time format pattern configuration.   level info The lowest level of printing allowed.    Self Telemetry    Config Default Description     cluster default-cluster The space concept for the deployment, such as the namespace concept in the Kubernetes.   service default-service The group concept for the deployment, such as the service resource concept in the Kubernetes.   instance default-instance The minimum running unit, such as the pod concept in the Kubernetes.    ","excerpt":"Common configuration The common configuration has 2 parts, which are logger configuration and the …","ref":"/docs/skywalking-satellite/latest/en/setup/configuration/common/","title":"Common configuration"},{"body":"Common configuration The common configuration has 2 parts, which are logger configuration and the telemetry configuration.\nLogger    Config Default Description     log_pattern %time [%level][%field] - %msg The log format pattern configuration.   time_pattern 2006-01-02 15:04:05.000 The time format pattern configuration.   level info The lowest level of printing allowed.    Self Telemetry    Config Default Description     cluster default-cluster The space concept for the deployment, such as the namespace concept in the Kubernetes.   service default-service The group concept for the deployment, such as the service resource concept in the Kubernetes.   instance default-instance The minimum running unit, such as the pod concept in the Kubernetes.    ","excerpt":"Common configuration The common configuration has 2 parts, which are logger configuration and the …","ref":"/docs/skywalking-satellite/next/en/setup/configuration/common/","title":"Common configuration"},{"body":"Common configuration The common configuration has 2 parts, which are logger configuration and the telemetry configuration.\nLogger    Config Default Description     log_pattern %time [%level][%field] - %msg The log format pattern configuration.   time_pattern 2006-01-02 15:04:05.000 The time format pattern configuration.   level info The lowest level of printing allowed.    Self Telemetry    Config Default Description     cluster default-cluster The space concept for the deployment, such as the namespace concept in the Kubernetes.   service default-service The group concept for the deployment, such as the service resource concept in the Kubernetes.   instance default-instance The minimum running unit, such as the pod concept in the Kubernetes.    ","excerpt":"Common configuration The common configuration has 2 parts, which are logger configuration and the …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/configuration/common/","title":"Common configuration"},{"body":"Compatibility SkyWalking 8.0+ uses v3 protocols. Agents don\u0026rsquo;t have to keep the identical versions as the OAP backend.\nSkyWalking Native Agents    OAP Server Version Java Python NodeJS LUA Kong Browser Agent Rust PHP Go Rover Satellite     8.0.1 - 8.1.0 8.0.0 - 8.3.0 \u0026lt; = 0.6.0 \u0026lt; = 0.3.0 All All No All No No No No   8.2.0 - 8.3.0 8.0.0 - 8.3.0 \u0026lt; = 0.6.0 \u0026lt; = 0.3.0 All All All All No No No No   8.4.0 - 8.8.1 \u0026gt; = 8.0.0 All All All All All All All No No No   8.9.0+ \u0026gt; = 8.0.0 All All All All All All All No No \u0026gt; = 0.4.0   9.0.0 \u0026gt; = 8.0.0 All All All All All All All No No \u0026gt; = 0.4.0   9.1.0+ \u0026gt; = 8.0.0 All All All All All All All No \u0026gt; = 0.1.0 \u0026gt; = 1.0.0   9.5.0+ \u0026gt; = 8.0.0 \u0026amp; \u0026gt; = 9.0.0 All All All All All All All \u0026gt; = 0.1.0 \u0026gt; = 0.5.0 \u0026gt; = 1.2.0    Ecosystem Agents All following agent implementations are a part of the SkyWalking ecosystem. All the source codes and their distributions don\u0026rsquo;t belong to the Apache Software Foundation.\n   OAP Server Version DotNet cpp2sky     8.0.1 - 8.3.0 1.0.0 - 1.3.0 \u0026lt; = 0.2.0   8.4.0+ \u0026gt; = 1.0.0 All   9.0.0+ \u0026gt; = 1.0.0 All    All these projects are maintained by their own communities, and please reach them if you face any compatibility issues.\n All above compatibility are only references, and if you face an unimplemented error, it means you need to upgrade the OAP backend to support newer features in the agents.\n","excerpt":"Compatibility SkyWalking 8.0+ uses v3 protocols. Agents don\u0026rsquo;t have to keep the identical …","ref":"/docs/main/latest/en/setup/service-agent/agent-compatibility/","title":"Compatibility"},{"body":"Compatibility SkyWalking 8.0+ uses v3 protocols. Agents don\u0026rsquo;t have to keep the identical versions as the OAP backend.\nSkyWalking Native Agents    OAP Server Version Java Python NodeJS LUA Kong Browser Agent Rust PHP Go Rover Satellite     8.0.1 - 8.1.0 8.0.0 - 8.3.0 \u0026lt; = 0.6.0 \u0026lt; = 0.3.0 All All No All No No No No   8.2.0 - 8.3.0 8.0.0 - 8.3.0 \u0026lt; = 0.6.0 \u0026lt; = 0.3.0 All All All All No No No No   8.4.0 - 8.8.1 \u0026gt; = 8.0.0 All All All All All All All No No No   8.9.0+ \u0026gt; = 8.0.0 All All All All All All All No No \u0026gt; = 0.4.0   9.0.0 \u0026gt; = 8.0.0 All All All All All All All No No \u0026gt; = 0.4.0   9.1.0+ \u0026gt; = 8.0.0 All All All All All All All No \u0026gt; = 0.1.0 \u0026gt; = 1.0.0   9.5.0+ \u0026gt; = 8.0.0 \u0026amp; \u0026gt; = 9.0.0 All All All All All All All \u0026gt; = 0.1.0 \u0026gt; = 0.5.0 \u0026gt; = 1.2.0    Ecosystem Agents All following agent implementations are a part of the SkyWalking ecosystem. All the source codes and their distributions don\u0026rsquo;t belong to the Apache Software Foundation.\n   OAP Server Version DotNet cpp2sky     8.0.1 - 8.3.0 1.0.0 - 1.3.0 \u0026lt; = 0.2.0   8.4.0+ \u0026gt; = 1.0.0 All   9.0.0+ \u0026gt; = 1.0.0 All    All these projects are maintained by their own communities, and please reach them if you face any compatibility issues.\n All above compatibility are only references, and if you face an unimplemented error, it means you need to upgrade the OAP backend to support newer features in the agents.\n","excerpt":"Compatibility SkyWalking 8.0+ uses v3 protocols. Agents don\u0026rsquo;t have to keep the identical …","ref":"/docs/main/next/en/setup/service-agent/agent-compatibility/","title":"Compatibility"},{"body":"Compatibility SkyWalking 8.0+ uses v3 protocols. Agents don\u0026rsquo;t have to keep the same versions as the OAP backend.\nSkyWalking Native Agents    OAP Server Version Java Python NodeJS LUA Kong Browser Agent Rust Satellite     8.0.1 - 8.1.0 8.0.0 - 8.3.0 \u0026lt; = 0.6.0 \u0026lt; = 0.3.0 All All No All No   8.2.0 - 8.3.0 8.0.0 - 8.3.0 \u0026lt; = 0.6.0 \u0026lt; = 0.3.0 All All All All No   8.4.0 - 8.8.1 \u0026gt; = 8.0.0 All All All All All All No   8.9.0+ \u0026gt; = 8.0.0 All All All All All All \u0026gt; = 0.4.0   9.0.0+ \u0026gt; = 8.0.0 All All All All All All \u0026gt; = 0.4.0    Ecosystem Agents All following agent implementations are a part of SkyWalking ecosystem. All the source codes and their distributions don\u0026rsquo;t belong to the Apache Software Foundation.\n   OAP Server Version DotNet Go2sky cpp2sky PHP agent     8.0.1 - 8.3.0 1.0.0 - 1.3.0 0.4.0 - 0.6.0 \u0026lt; = 0.2.0 \u0026gt; = 3.0.0   8.4.0+ \u0026gt; = 1.0.0 \u0026gt; = 0.4.0 All \u0026gt; = 3.0.0   9.0.0+ \u0026gt; = 1.0.0 \u0026gt; = 0.4.0 All \u0026gt; = 3.0.0    All these projects are maintained by their own communities, please reach them if you face any compatibility issue.\n All above compatibility are only references, if you face unimplemented error, it means you need to upgrade OAP backend to support newer features in the agents.\n","excerpt":"Compatibility SkyWalking 8.0+ uses v3 protocols. Agents don\u0026rsquo;t have to keep the same versions …","ref":"/docs/main/v9.0.0/en/setup/service-agent/agent-compatibility/","title":"Compatibility"},{"body":"Compatibility SkyWalking 8.0+ uses v3 protocols. Agents don\u0026rsquo;t have to keep the identical versions as the OAP backend.\nSkyWalking Native Agents    OAP Server Version Java Python NodeJS LUA Kong Browser Agent Rust Rover(ebpf agnet) Satellite     8.0.1 - 8.1.0 8.0.0 - 8.3.0 \u0026lt; = 0.6.0 \u0026lt; = 0.3.0 All All No All No No   8.2.0 - 8.3.0 8.0.0 - 8.3.0 \u0026lt; = 0.6.0 \u0026lt; = 0.3.0 All All All All No No   8.4.0 - 8.8.1 \u0026gt; = 8.0.0 All All All All All All No No   8.9.0+ \u0026gt; = 8.0.0 All All All All All All No \u0026gt; = 0.4.0   9.0.0 \u0026gt; = 8.0.0 All All All All All All No \u0026gt; = 0.4.0   9.1.0+ \u0026gt; = 8.0.0 All All All All All All \u0026gt; = 0.1.0 \u0026gt; = 1.0.0    Ecosystem Agents All following agent implementations are a part of the SkyWalking ecosystem. All the source codes and their distributions don\u0026rsquo;t belong to the Apache Software Foundation.\n   OAP Server Version DotNet Go2sky cpp2sky PHP agent     8.0.1 - 8.3.0 1.0.0 - 1.3.0 0.4.0 - 0.6.0 \u0026lt; = 0.2.0 \u0026gt; = 3.0.0   8.4.0+ \u0026gt; = 1.0.0 \u0026gt; = 0.4.0 All \u0026gt; = 3.0.0   9.0.0+ \u0026gt; = 1.0.0 \u0026gt; = 0.4.0 All \u0026gt; = 3.0.0    All these projects are maintained by their own communities, and please reach them if you face any compatibility issues.\n All above compatibility are only references, and if you face an unimplemented error, it means you need to upgrade the OAP backend to support newer features in the agents.\n","excerpt":"Compatibility SkyWalking 8.0+ uses v3 protocols. Agents don\u0026rsquo;t have to keep the identical …","ref":"/docs/main/v9.1.0/en/setup/service-agent/agent-compatibility/","title":"Compatibility"},{"body":"Compatibility SkyWalking 8.0+ uses v3 protocols. Agents don\u0026rsquo;t have to keep the identical versions as the OAP backend.\nSkyWalking Native Agents    OAP Server Version Java Python NodeJS LUA Kong Browser Agent Rust Rover(ebpf agnet) Satellite     8.0.1 - 8.1.0 8.0.0 - 8.3.0 \u0026lt; = 0.6.0 \u0026lt; = 0.3.0 All All No All No No   8.2.0 - 8.3.0 8.0.0 - 8.3.0 \u0026lt; = 0.6.0 \u0026lt; = 0.3.0 All All All All No No   8.4.0 - 8.8.1 \u0026gt; = 8.0.0 All All All All All All No No   8.9.0+ \u0026gt; = 8.0.0 All All All All All All No \u0026gt; = 0.4.0   9.0.0 \u0026gt; = 8.0.0 All All All All All All No \u0026gt; = 0.4.0   9.1.0+ \u0026gt; = 8.0.0 All All All All All All \u0026gt; = 0.1.0 \u0026gt; = 1.0.0    Ecosystem Agents All following agent implementations are a part of the SkyWalking ecosystem. All the source codes and their distributions don\u0026rsquo;t belong to the Apache Software Foundation.\n   OAP Server Version DotNet Go2sky cpp2sky PHP agent     8.0.1 - 8.3.0 1.0.0 - 1.3.0 0.4.0 - 0.6.0 \u0026lt; = 0.2.0 \u0026gt; = 3.0.0 \u0026amp;\u0026amp; \u0026lt; 5.0.0   8.4.0+ \u0026gt; = 1.0.0 \u0026gt; = 0.4.0 All \u0026gt; = 5.0.0   9.0.0+ \u0026gt; = 1.0.0 \u0026gt; = 0.4.0 All \u0026gt; = 5.0.0    All these projects are maintained by their own communities, and please reach them if you face any compatibility issues.\n All above compatibility are only references, and if you face an unimplemented error, it means you need to upgrade the OAP backend to support newer features in the agents.\n","excerpt":"Compatibility SkyWalking 8.0+ uses v3 protocols. Agents don\u0026rsquo;t have to keep the identical …","ref":"/docs/main/v9.2.0/en/setup/service-agent/agent-compatibility/","title":"Compatibility"},{"body":"Compatibility SkyWalking 8.0+ uses v3 protocols. Agents don\u0026rsquo;t have to keep the identical versions as the OAP backend.\nSkyWalking Native Agents    OAP Server Version Java Python NodeJS LUA Kong Browser Agent Rust Rover(ebpf agent) Satellite PHP     8.0.1 - 8.1.0 8.0.0 - 8.3.0 \u0026lt; = 0.6.0 \u0026lt; = 0.3.0 All All No All No No No   8.2.0 - 8.3.0 8.0.0 - 8.3.0 \u0026lt; = 0.6.0 \u0026lt; = 0.3.0 All All All All No No No   8.4.0 - 8.8.1 \u0026gt; = 8.0.0 All All All All All All No No All   8.9.0+ \u0026gt; = 8.0.0 All All All All All All No \u0026gt; = 0.4.0 All   9.0.0 \u0026gt; = 8.0.0 All All All All All All No \u0026gt; = 0.4.0 All   9.1.0+ \u0026gt; = 8.0.0 All All All All All All \u0026gt; = 0.1.0 \u0026gt; = 1.0.0 All    Ecosystem Agents All following agent implementations are a part of the SkyWalking ecosystem. All the source codes and their distributions don\u0026rsquo;t belong to the Apache Software Foundation.\n   OAP Server Version DotNet Go2sky cpp2sky     8.0.1 - 8.3.0 1.0.0 - 1.3.0 0.4.0 - 0.6.0 \u0026lt; = 0.2.0   8.4.0+ \u0026gt; = 1.0.0 \u0026gt; = 0.4.0 All   9.0.0+ \u0026gt; = 1.0.0 \u0026gt; = 0.4.0 All    All these projects are maintained by their own communities, and please reach them if you face any compatibility issues.\n All above compatibility are only references, and if you face an unimplemented error, it means you need to upgrade the OAP backend to support newer features in the agents.\n","excerpt":"Compatibility SkyWalking 8.0+ uses v3 protocols. Agents don\u0026rsquo;t have to keep the identical …","ref":"/docs/main/v9.3.0/en/setup/service-agent/agent-compatibility/","title":"Compatibility"},{"body":"Compatibility SkyWalking 8.0+ uses v3 protocols. Agents don\u0026rsquo;t have to keep the identical versions as the OAP backend.\nSkyWalking Native Agents    OAP Server Version Java Python NodeJS LUA Kong Browser Agent Rust Rover(ebpf agent) Satellite PHP     8.0.1 - 8.1.0 8.0.0 - 8.3.0 \u0026lt; = 0.6.0 \u0026lt; = 0.3.0 All All No All No No No   8.2.0 - 8.3.0 8.0.0 - 8.3.0 \u0026lt; = 0.6.0 \u0026lt; = 0.3.0 All All All All No No No   8.4.0 - 8.8.1 \u0026gt; = 8.0.0 All All All All All All No No All   8.9.0+ \u0026gt; = 8.0.0 All All All All All All No \u0026gt; = 0.4.0 All   9.0.0 \u0026gt; = 8.0.0 All All All All All All No \u0026gt; = 0.4.0 All   9.1.0+ \u0026gt; = 8.0.0 All All All All All All \u0026gt; = 0.1.0 \u0026gt; = 1.0.0 All    Ecosystem Agents All following agent implementations are a part of the SkyWalking ecosystem. All the source codes and their distributions don\u0026rsquo;t belong to the Apache Software Foundation.\n   OAP Server Version DotNet Go2sky cpp2sky     8.0.1 - 8.3.0 1.0.0 - 1.3.0 0.4.0 - 0.6.0 \u0026lt; = 0.2.0   8.4.0+ \u0026gt; = 1.0.0 \u0026gt; = 0.4.0 All   9.0.0+ \u0026gt; = 1.0.0 \u0026gt; = 0.4.0 All    All these projects are maintained by their own communities, and please reach them if you face any compatibility issues.\n All above compatibility are only references, and if you face an unimplemented error, it means you need to upgrade the OAP backend to support newer features in the agents.\n","excerpt":"Compatibility SkyWalking 8.0+ uses v3 protocols. Agents don\u0026rsquo;t have to keep the identical …","ref":"/docs/main/v9.4.0/en/setup/service-agent/agent-compatibility/","title":"Compatibility"},{"body":"Compatibility SkyWalking 8.0+ uses v3 protocols. Agents don\u0026rsquo;t have to keep the identical versions as the OAP backend.\nSkyWalking Native Agents    OAP Server Version Java Python NodeJS LUA Kong Browser Agent Rust Rover(ebpf agent) Satellite PHP     8.0.1 - 8.1.0 8.0.0 - 8.3.0 \u0026lt; = 0.6.0 \u0026lt; = 0.3.0 All All No All No No No   8.2.0 - 8.3.0 8.0.0 - 8.3.0 \u0026lt; = 0.6.0 \u0026lt; = 0.3.0 All All All All No No No   8.4.0 - 8.8.1 \u0026gt; = 8.0.0 All All All All All All No No All   8.9.0+ \u0026gt; = 8.0.0 All All All All All All No \u0026gt; = 0.4.0 All   9.0.0 \u0026gt; = 8.0.0 All All All All All All No \u0026gt; = 0.4.0 All   9.1.0+ \u0026gt; = 8.0.0 All All All All All All \u0026gt; = 0.1.0 \u0026gt; = 1.0.0 All   9.5.0+ \u0026gt; = 8.0.0 All All All All All All \u0026gt; = 0.5.0 \u0026gt; = 1.2.0 All    Ecosystem Agents All following agent implementations are a part of the SkyWalking ecosystem. All the source codes and their distributions don\u0026rsquo;t belong to the Apache Software Foundation.\n   OAP Server Version DotNet Go2sky cpp2sky     8.0.1 - 8.3.0 1.0.0 - 1.3.0 0.4.0 - 0.6.0 \u0026lt; = 0.2.0   8.4.0+ \u0026gt; = 1.0.0 \u0026gt; = 0.4.0 All   9.0.0+ \u0026gt; = 1.0.0 \u0026gt; = 0.4.0 All    All these projects are maintained by their own communities, and please reach them if you face any compatibility issues.\n All above compatibility are only references, and if you face an unimplemented error, it means you need to upgrade the OAP backend to support newer features in the agents.\n","excerpt":"Compatibility SkyWalking 8.0+ uses v3 protocols. Agents don\u0026rsquo;t have to keep the identical …","ref":"/docs/main/v9.5.0/en/setup/service-agent/agent-compatibility/","title":"Compatibility"},{"body":"Compatibility SkyWalking 8.0+ uses v3 protocols. Agents don\u0026rsquo;t have to keep the identical versions as the OAP backend.\nSkyWalking Native Agents    OAP Server Version Java Python NodeJS LUA Kong Browser Agent Rust PHP Go Rover Satellite     8.0.1 - 8.1.0 8.0.0 - 8.3.0 \u0026lt; = 0.6.0 \u0026lt; = 0.3.0 All All No All No No No No   8.2.0 - 8.3.0 8.0.0 - 8.3.0 \u0026lt; = 0.6.0 \u0026lt; = 0.3.0 All All All All No No No No   8.4.0 - 8.8.1 \u0026gt; = 8.0.0 All All All All All All All No No No   8.9.0+ \u0026gt; = 8.0.0 All All All All All All All No No \u0026gt; = 0.4.0   9.0.0 \u0026gt; = 8.0.0 All All All All All All All No No \u0026gt; = 0.4.0   9.1.0+ \u0026gt; = 8.0.0 All All All All All All All No \u0026gt; = 0.1.0 \u0026gt; = 1.0.0   9.5.0+ \u0026gt; = 8.0.0 \u0026amp; \u0026gt; = 9.0.0 All All All All All All All \u0026gt; = 0.1.0 \u0026gt; = 0.5.0 \u0026gt; = 1.2.0    Ecosystem Agents All following agent implementations are a part of the SkyWalking ecosystem. All the source codes and their distributions don\u0026rsquo;t belong to the Apache Software Foundation.\n   OAP Server Version DotNet cpp2sky     8.0.1 - 8.3.0 1.0.0 - 1.3.0 \u0026lt; = 0.2.0   8.4.0+ \u0026gt; = 1.0.0 All   9.0.0+ \u0026gt; = 1.0.0 All    All these projects are maintained by their own communities, and please reach them if you face any compatibility issues.\n All above compatibility are only references, and if you face an unimplemented error, it means you need to upgrade the OAP backend to support newer features in the agents.\n","excerpt":"Compatibility SkyWalking 8.0+ uses v3 protocols. Agents don\u0026rsquo;t have to keep the identical …","ref":"/docs/main/v9.6.0/en/setup/service-agent/agent-compatibility/","title":"Compatibility"},{"body":"Compatibility SkyWalking 8.0+ uses v3 protocols. Agents don\u0026rsquo;t have to keep the identical versions as the OAP backend.\nSkyWalking Native Agents    OAP Server Version Java Python NodeJS LUA Kong Browser Agent Rust PHP Go Rover Satellite     8.0.1 - 8.1.0 8.0.0 - 8.3.0 \u0026lt; = 0.6.0 \u0026lt; = 0.3.0 All All No All No No No No   8.2.0 - 8.3.0 8.0.0 - 8.3.0 \u0026lt; = 0.6.0 \u0026lt; = 0.3.0 All All All All No No No No   8.4.0 - 8.8.1 \u0026gt; = 8.0.0 All All All All All All All No No No   8.9.0+ \u0026gt; = 8.0.0 All All All All All All All No No \u0026gt; = 0.4.0   9.0.0 \u0026gt; = 8.0.0 All All All All All All All No No \u0026gt; = 0.4.0   9.1.0+ \u0026gt; = 8.0.0 All All All All All All All No \u0026gt; = 0.1.0 \u0026gt; = 1.0.0   9.5.0+ \u0026gt; = 8.0.0 \u0026amp; \u0026gt; = 9.0.0 All All All All All All All \u0026gt; = 0.1.0 \u0026gt; = 0.5.0 \u0026gt; = 1.2.0    Ecosystem Agents All following agent implementations are a part of the SkyWalking ecosystem. All the source codes and their distributions don\u0026rsquo;t belong to the Apache Software Foundation.\n   OAP Server Version DotNet cpp2sky     8.0.1 - 8.3.0 1.0.0 - 1.3.0 \u0026lt; = 0.2.0   8.4.0+ \u0026gt; = 1.0.0 All   9.0.0+ \u0026gt; = 1.0.0 All    All these projects are maintained by their own communities, and please reach them if you face any compatibility issues.\n All above compatibility are only references, and if you face an unimplemented error, it means you need to upgrade the OAP backend to support newer features in the agents.\n","excerpt":"Compatibility SkyWalking 8.0+ uses v3 protocols. Agents don\u0026rsquo;t have to keep the identical …","ref":"/docs/main/v9.7.0/en/setup/service-agent/agent-compatibility/","title":"Compatibility"},{"body":"Compatibility with other Java agent bytecode processes Problem   When using the SkyWalking agent, some other agents, such as Arthas, can\u0026rsquo;t work properly. https://github.com/apache/skywalking/pull/4858\n  The retransform classes in the Java agent conflict with the SkyWalking agent, as illustrated in this demo\n  Cause The SkyWalking agent uses ByteBuddy to transform classes when the Java application starts. ByteBuddy generates auxiliary classes with different random names every time.\nWhen another Java agent retransforms the same class, it triggers the SkyWalking agent to enhance the class again. Since the bytecode has been regenerated by ByteBuddy, the fields and imported class names have been modified, and the JVM verifications on class bytecode have failed, the retransform classes would therefore be unsuccessful.\nResolution 1. Enable the class cache feature\nAdd JVM parameters:\n-Dskywalking.agent.is_cache_enhanced_class=true -Dskywalking.agent.class_cache_mode=MEMORY\nOr uncomment the following options in agent.conf:\n# If true, the SkyWalking agent will cache all instrumented classes files to memory or disk files (as determined by the class cache mode), # Allow other Java agents to enhance those classes that are enhanced by the SkyWalking agent. agent.is_cache_enhanced_class = ${SW_AGENT_CACHE_CLASS:false} # The instrumented classes cache mode: MEMORY or FILE # MEMORY: cache class bytes to memory; if there are too many instrumented classes or if their sizes are too large, it may take up more memory # FILE: cache class bytes to user temp folder starts with 'class-cache', and automatically clean up cached class files when the application exits agent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:MEMORY} If the class cache feature is enabled, save the instrumented class bytecode to memory or a temporary file. When other Java agents retransform the same class, the SkyWalking agent first attempts to load from the cache.\nIf the cached class is found, it will be used directly without regenerating an auxiliary class with a new random name. Then, the process of the subsequent Java agent will not be affected.\n2. Class cache save mode\nWe recommend saving cache classes to memory, if it takes up more memory space. Alternatively, you can use the local file system. Set the class cache mode in one of the folliwng ways:\n-Dskywalking.agent.class_cache_mode=MEMORY : save cache classes to Java memory. -Dskywalking.agent.class_cache_mode=FILE : save cache classes to SkyWalking agent path \u0026lsquo;/class-cache\u0026rsquo;.\nOr modify these options in agent.conf:\nagent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:MEMORY} agent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:FILE}\n","excerpt":"Compatibility with other Java agent bytecode processes Problem   When using the SkyWalking agent, …","ref":"/docs/main/latest/en/faq/compatible-with-other-javaagent-bytecode-processing/","title":"Compatibility with other Java agent bytecode processes"},{"body":"Compatibility with other Java agent bytecode processes Problem   When using the SkyWalking agent, some other agents, such as Arthas, can\u0026rsquo;t work properly. https://github.com/apache/skywalking/pull/4858\n  The retransform classes in the Java agent conflict with the SkyWalking agent, as illustrated in this demo\n  Cause The SkyWalking agent uses ByteBuddy to transform classes when the Java application starts. ByteBuddy generates auxiliary classes with different random names every time.\nWhen another Java agent retransforms the same class, it triggers the SkyWalking agent to enhance the class again. Since the bytecode has been regenerated by ByteBuddy, the fields and imported class names have been modified, and the JVM verifications on class bytecode have failed, the retransform classes would therefore be unsuccessful.\nResolution 1. Enable the class cache feature\nAdd JVM parameters:\n-Dskywalking.agent.is_cache_enhanced_class=true -Dskywalking.agent.class_cache_mode=MEMORY\nOr uncomment the following options in agent.conf:\n# If true, the SkyWalking agent will cache all instrumented classes files to memory or disk files (as determined by the class cache mode), # Allow other Java agents to enhance those classes that are enhanced by the SkyWalking agent. agent.is_cache_enhanced_class = ${SW_AGENT_CACHE_CLASS:false} # The instrumented classes cache mode: MEMORY or FILE # MEMORY: cache class bytes to memory; if there are too many instrumented classes or if their sizes are too large, it may take up more memory # FILE: cache class bytes to user temp folder starts with 'class-cache', and automatically clean up cached class files when the application exits agent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:MEMORY} If the class cache feature is enabled, save the instrumented class bytecode to memory or a temporary file. When other Java agents retransform the same class, the SkyWalking agent first attempts to load from the cache.\nIf the cached class is found, it will be used directly without regenerating an auxiliary class with a new random name. Then, the process of the subsequent Java agent will not be affected.\n2. Class cache save mode\nWe recommend saving cache classes to memory, if it takes up more memory space. Alternatively, you can use the local file system. Set the class cache mode in one of the folliwng ways:\n-Dskywalking.agent.class_cache_mode=MEMORY : save cache classes to Java memory. -Dskywalking.agent.class_cache_mode=FILE : save cache classes to SkyWalking agent path \u0026lsquo;/class-cache\u0026rsquo;.\nOr modify these options in agent.conf:\nagent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:MEMORY} agent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:FILE}\n","excerpt":"Compatibility with other Java agent bytecode processes Problem   When using the SkyWalking agent, …","ref":"/docs/main/next/en/faq/compatible-with-other-javaagent-bytecode-processing/","title":"Compatibility with other Java agent bytecode processes"},{"body":"Compatibility with other Java agent bytecode processes Problem   When using the SkyWalking agent, some other agents, such as Arthas, can\u0026rsquo;t work properly. https://github.com/apache/skywalking/pull/4858\n  The retransform classes in the Java agent conflict with the SkyWalking agent, as illustrated in this demo\n  Cause The SkyWalking agent uses ByteBuddy to transform classes when the Java application starts. ByteBuddy generates auxiliary classes with different random names every time.\nWhen another Java agent retransforms the same class, it triggers the SkyWalking agent to enhance the class again. Since the bytecode has been regenerated by ByteBuddy, the fields and imported class names have been modified, and the JVM verifications on class bytecode have failed, the retransform classes would therefore be unsuccessful.\nResolution 1. Enable the class cache feature\nAdd JVM parameters:\n-Dskywalking.agent.is_cache_enhanced_class=true -Dskywalking.agent.class_cache_mode=MEMORY\nOr uncomment the following options in agent.conf:\n# If true, the SkyWalking agent will cache all instrumented classes files to memory or disk files (as determined by the class cache mode), # Allow other Java agents to enhance those classes that are enhanced by the SkyWalking agent. agent.is_cache_enhanced_class = ${SW_AGENT_CACHE_CLASS:false} # The instrumented classes cache mode: MEMORY or FILE # MEMORY: cache class bytes to memory; if there are too many instrumented classes or if their sizes are too large, it may take up more memory # FILE: cache class bytes to user temp folder starts with 'class-cache', and automatically clean up cached class files when the application exits agent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:MEMORY} If the class cache feature is enabled, save the instrumented class bytecode to memory or a temporary file. When other Java agents retransform the same class, the SkyWalking agent first attempts to load from the cache.\nIf the cached class is found, it will be used directly without regenerating an auxiliary class with a new random name. Then, the process of the subsequent Java agent will not be affected.\n2. Class cache save mode\nWe recommend saving cache classes to memory, if it takes up more memory space. Alternatively, you can use the local file system. Set the class cache mode in one of the folliwng ways:\n-Dskywalking.agent.class_cache_mode=MEMORY : save cache classes to Java memory. -Dskywalking.agent.class_cache_mode=FILE : save cache classes to SkyWalking agent path \u0026lsquo;/class-cache\u0026rsquo;.\nOr modify these options in agent.conf:\nagent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:MEMORY} agent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:FILE}\n","excerpt":"Compatibility with other Java agent bytecode processes Problem   When using the SkyWalking agent, …","ref":"/docs/main/v9.0.0/en/faq/compatible-with-other-javaagent-bytecode-processing/","title":"Compatibility with other Java agent bytecode processes"},{"body":"Compatibility with other Java agent bytecode processes Problem   When using the SkyWalking agent, some other agents, such as Arthas, can\u0026rsquo;t work properly. https://github.com/apache/skywalking/pull/4858\n  The retransform classes in the Java agent conflict with the SkyWalking agent, as illustrated in this demo\n  Cause The SkyWalking agent uses ByteBuddy to transform classes when the Java application starts. ByteBuddy generates auxiliary classes with different random names every time.\nWhen another Java agent retransforms the same class, it triggers the SkyWalking agent to enhance the class again. Since the bytecode has been regenerated by ByteBuddy, the fields and imported class names have been modified, and the JVM verifications on class bytecode have failed, the retransform classes would therefore be unsuccessful.\nResolution 1. Enable the class cache feature\nAdd JVM parameters:\n-Dskywalking.agent.is_cache_enhanced_class=true -Dskywalking.agent.class_cache_mode=MEMORY\nOr uncomment the following options in agent.conf:\n# If true, the SkyWalking agent will cache all instrumented classes files to memory or disk files (as determined by the class cache mode), # Allow other Java agents to enhance those classes that are enhanced by the SkyWalking agent. agent.is_cache_enhanced_class = ${SW_AGENT_CACHE_CLASS:false} # The instrumented classes cache mode: MEMORY or FILE # MEMORY: cache class bytes to memory; if there are too many instrumented classes or if their sizes are too large, it may take up more memory # FILE: cache class bytes to user temp folder starts with 'class-cache', and automatically clean up cached class files when the application exits agent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:MEMORY} If the class cache feature is enabled, save the instrumented class bytecode to memory or a temporary file. When other Java agents retransform the same class, the SkyWalking agent first attempts to load from the cache.\nIf the cached class is found, it will be used directly without regenerating an auxiliary class with a new random name. Then, the process of the subsequent Java agent will not be affected.\n2. Class cache save mode\nWe recommend saving cache classes to memory, if it takes up more memory space. Alternatively, you can use the local file system. Set the class cache mode in one of the folliwng ways:\n-Dskywalking.agent.class_cache_mode=MEMORY : save cache classes to Java memory. -Dskywalking.agent.class_cache_mode=FILE : save cache classes to SkyWalking agent path \u0026lsquo;/class-cache\u0026rsquo;.\nOr modify these options in agent.conf:\nagent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:MEMORY} agent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:FILE}\n","excerpt":"Compatibility with other Java agent bytecode processes Problem   When using the SkyWalking agent, …","ref":"/docs/main/v9.1.0/en/faq/compatible-with-other-javaagent-bytecode-processing/","title":"Compatibility with other Java agent bytecode processes"},{"body":"Compatibility with other Java agent bytecode processes Problem   When using the SkyWalking agent, some other agents, such as Arthas, can\u0026rsquo;t work properly. https://github.com/apache/skywalking/pull/4858\n  The retransform classes in the Java agent conflict with the SkyWalking agent, as illustrated in this demo\n  Cause The SkyWalking agent uses ByteBuddy to transform classes when the Java application starts. ByteBuddy generates auxiliary classes with different random names every time.\nWhen another Java agent retransforms the same class, it triggers the SkyWalking agent to enhance the class again. Since the bytecode has been regenerated by ByteBuddy, the fields and imported class names have been modified, and the JVM verifications on class bytecode have failed, the retransform classes would therefore be unsuccessful.\nResolution 1. Enable the class cache feature\nAdd JVM parameters:\n-Dskywalking.agent.is_cache_enhanced_class=true -Dskywalking.agent.class_cache_mode=MEMORY\nOr uncomment the following options in agent.conf:\n# If true, the SkyWalking agent will cache all instrumented classes files to memory or disk files (as determined by the class cache mode), # Allow other Java agents to enhance those classes that are enhanced by the SkyWalking agent. agent.is_cache_enhanced_class = ${SW_AGENT_CACHE_CLASS:false} # The instrumented classes cache mode: MEMORY or FILE # MEMORY: cache class bytes to memory; if there are too many instrumented classes or if their sizes are too large, it may take up more memory # FILE: cache class bytes to user temp folder starts with 'class-cache', and automatically clean up cached class files when the application exits agent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:MEMORY} If the class cache feature is enabled, save the instrumented class bytecode to memory or a temporary file. When other Java agents retransform the same class, the SkyWalking agent first attempts to load from the cache.\nIf the cached class is found, it will be used directly without regenerating an auxiliary class with a new random name. Then, the process of the subsequent Java agent will not be affected.\n2. Class cache save mode\nWe recommend saving cache classes to memory, if it takes up more memory space. Alternatively, you can use the local file system. Set the class cache mode in one of the folliwng ways:\n-Dskywalking.agent.class_cache_mode=MEMORY : save cache classes to Java memory. -Dskywalking.agent.class_cache_mode=FILE : save cache classes to SkyWalking agent path \u0026lsquo;/class-cache\u0026rsquo;.\nOr modify these options in agent.conf:\nagent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:MEMORY} agent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:FILE}\n","excerpt":"Compatibility with other Java agent bytecode processes Problem   When using the SkyWalking agent, …","ref":"/docs/main/v9.2.0/en/faq/compatible-with-other-javaagent-bytecode-processing/","title":"Compatibility with other Java agent bytecode processes"},{"body":"Compatibility with other Java agent bytecode processes Problem   When using the SkyWalking agent, some other agents, such as Arthas, can\u0026rsquo;t work properly. https://github.com/apache/skywalking/pull/4858\n  The retransform classes in the Java agent conflict with the SkyWalking agent, as illustrated in this demo\n  Cause The SkyWalking agent uses ByteBuddy to transform classes when the Java application starts. ByteBuddy generates auxiliary classes with different random names every time.\nWhen another Java agent retransforms the same class, it triggers the SkyWalking agent to enhance the class again. Since the bytecode has been regenerated by ByteBuddy, the fields and imported class names have been modified, and the JVM verifications on class bytecode have failed, the retransform classes would therefore be unsuccessful.\nResolution 1. Enable the class cache feature\nAdd JVM parameters:\n-Dskywalking.agent.is_cache_enhanced_class=true -Dskywalking.agent.class_cache_mode=MEMORY\nOr uncomment the following options in agent.conf:\n# If true, the SkyWalking agent will cache all instrumented classes files to memory or disk files (as determined by the class cache mode), # Allow other Java agents to enhance those classes that are enhanced by the SkyWalking agent. agent.is_cache_enhanced_class = ${SW_AGENT_CACHE_CLASS:false} # The instrumented classes cache mode: MEMORY or FILE # MEMORY: cache class bytes to memory; if there are too many instrumented classes or if their sizes are too large, it may take up more memory # FILE: cache class bytes to user temp folder starts with 'class-cache', and automatically clean up cached class files when the application exits agent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:MEMORY} If the class cache feature is enabled, save the instrumented class bytecode to memory or a temporary file. When other Java agents retransform the same class, the SkyWalking agent first attempts to load from the cache.\nIf the cached class is found, it will be used directly without regenerating an auxiliary class with a new random name. Then, the process of the subsequent Java agent will not be affected.\n2. Class cache save mode\nWe recommend saving cache classes to memory, if it takes up more memory space. Alternatively, you can use the local file system. Set the class cache mode in one of the folliwng ways:\n-Dskywalking.agent.class_cache_mode=MEMORY : save cache classes to Java memory. -Dskywalking.agent.class_cache_mode=FILE : save cache classes to SkyWalking agent path \u0026lsquo;/class-cache\u0026rsquo;.\nOr modify these options in agent.conf:\nagent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:MEMORY} agent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:FILE}\n","excerpt":"Compatibility with other Java agent bytecode processes Problem   When using the SkyWalking agent, …","ref":"/docs/main/v9.3.0/en/faq/compatible-with-other-javaagent-bytecode-processing/","title":"Compatibility with other Java agent bytecode processes"},{"body":"Compatibility with other Java agent bytecode processes Problem   When using the SkyWalking agent, some other agents, such as Arthas, can\u0026rsquo;t work properly. https://github.com/apache/skywalking/pull/4858\n  The retransform classes in the Java agent conflict with the SkyWalking agent, as illustrated in this demo\n  Cause The SkyWalking agent uses ByteBuddy to transform classes when the Java application starts. ByteBuddy generates auxiliary classes with different random names every time.\nWhen another Java agent retransforms the same class, it triggers the SkyWalking agent to enhance the class again. Since the bytecode has been regenerated by ByteBuddy, the fields and imported class names have been modified, and the JVM verifications on class bytecode have failed, the retransform classes would therefore be unsuccessful.\nResolution 1. Enable the class cache feature\nAdd JVM parameters:\n-Dskywalking.agent.is_cache_enhanced_class=true -Dskywalking.agent.class_cache_mode=MEMORY\nOr uncomment the following options in agent.conf:\n# If true, the SkyWalking agent will cache all instrumented classes files to memory or disk files (as determined by the class cache mode), # Allow other Java agents to enhance those classes that are enhanced by the SkyWalking agent. agent.is_cache_enhanced_class = ${SW_AGENT_CACHE_CLASS:false} # The instrumented classes cache mode: MEMORY or FILE # MEMORY: cache class bytes to memory; if there are too many instrumented classes or if their sizes are too large, it may take up more memory # FILE: cache class bytes to user temp folder starts with 'class-cache', and automatically clean up cached class files when the application exits agent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:MEMORY} If the class cache feature is enabled, save the instrumented class bytecode to memory or a temporary file. When other Java agents retransform the same class, the SkyWalking agent first attempts to load from the cache.\nIf the cached class is found, it will be used directly without regenerating an auxiliary class with a new random name. Then, the process of the subsequent Java agent will not be affected.\n2. Class cache save mode\nWe recommend saving cache classes to memory, if it takes up more memory space. Alternatively, you can use the local file system. Set the class cache mode in one of the folliwng ways:\n-Dskywalking.agent.class_cache_mode=MEMORY : save cache classes to Java memory. -Dskywalking.agent.class_cache_mode=FILE : save cache classes to SkyWalking agent path \u0026lsquo;/class-cache\u0026rsquo;.\nOr modify these options in agent.conf:\nagent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:MEMORY} agent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:FILE}\n","excerpt":"Compatibility with other Java agent bytecode processes Problem   When using the SkyWalking agent, …","ref":"/docs/main/v9.4.0/en/faq/compatible-with-other-javaagent-bytecode-processing/","title":"Compatibility with other Java agent bytecode processes"},{"body":"Compatibility with other Java agent bytecode processes Problem   When using the SkyWalking agent, some other agents, such as Arthas, can\u0026rsquo;t work properly. https://github.com/apache/skywalking/pull/4858\n  The retransform classes in the Java agent conflict with the SkyWalking agent, as illustrated in this demo\n  Cause The SkyWalking agent uses ByteBuddy to transform classes when the Java application starts. ByteBuddy generates auxiliary classes with different random names every time.\nWhen another Java agent retransforms the same class, it triggers the SkyWalking agent to enhance the class again. Since the bytecode has been regenerated by ByteBuddy, the fields and imported class names have been modified, and the JVM verifications on class bytecode have failed, the retransform classes would therefore be unsuccessful.\nResolution 1. Enable the class cache feature\nAdd JVM parameters:\n-Dskywalking.agent.is_cache_enhanced_class=true -Dskywalking.agent.class_cache_mode=MEMORY\nOr uncomment the following options in agent.conf:\n# If true, the SkyWalking agent will cache all instrumented classes files to memory or disk files (as determined by the class cache mode), # Allow other Java agents to enhance those classes that are enhanced by the SkyWalking agent. agent.is_cache_enhanced_class = ${SW_AGENT_CACHE_CLASS:false} # The instrumented classes cache mode: MEMORY or FILE # MEMORY: cache class bytes to memory; if there are too many instrumented classes or if their sizes are too large, it may take up more memory # FILE: cache class bytes to user temp folder starts with 'class-cache', and automatically clean up cached class files when the application exits agent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:MEMORY} If the class cache feature is enabled, save the instrumented class bytecode to memory or a temporary file. When other Java agents retransform the same class, the SkyWalking agent first attempts to load from the cache.\nIf the cached class is found, it will be used directly without regenerating an auxiliary class with a new random name. Then, the process of the subsequent Java agent will not be affected.\n2. Class cache save mode\nWe recommend saving cache classes to memory, if it takes up more memory space. Alternatively, you can use the local file system. Set the class cache mode in one of the folliwng ways:\n-Dskywalking.agent.class_cache_mode=MEMORY : save cache classes to Java memory. -Dskywalking.agent.class_cache_mode=FILE : save cache classes to SkyWalking agent path \u0026lsquo;/class-cache\u0026rsquo;.\nOr modify these options in agent.conf:\nagent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:MEMORY} agent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:FILE}\n","excerpt":"Compatibility with other Java agent bytecode processes Problem   When using the SkyWalking agent, …","ref":"/docs/main/v9.5.0/en/faq/compatible-with-other-javaagent-bytecode-processing/","title":"Compatibility with other Java agent bytecode processes"},{"body":"Compatibility with other Java agent bytecode processes Problem   When using the SkyWalking agent, some other agents, such as Arthas, can\u0026rsquo;t work properly. https://github.com/apache/skywalking/pull/4858\n  The retransform classes in the Java agent conflict with the SkyWalking agent, as illustrated in this demo\n  Cause The SkyWalking agent uses ByteBuddy to transform classes when the Java application starts. ByteBuddy generates auxiliary classes with different random names every time.\nWhen another Java agent retransforms the same class, it triggers the SkyWalking agent to enhance the class again. Since the bytecode has been regenerated by ByteBuddy, the fields and imported class names have been modified, and the JVM verifications on class bytecode have failed, the retransform classes would therefore be unsuccessful.\nResolution 1. Enable the class cache feature\nAdd JVM parameters:\n-Dskywalking.agent.is_cache_enhanced_class=true -Dskywalking.agent.class_cache_mode=MEMORY\nOr uncomment the following options in agent.conf:\n# If true, the SkyWalking agent will cache all instrumented classes files to memory or disk files (as determined by the class cache mode), # Allow other Java agents to enhance those classes that are enhanced by the SkyWalking agent. agent.is_cache_enhanced_class = ${SW_AGENT_CACHE_CLASS:false} # The instrumented classes cache mode: MEMORY or FILE # MEMORY: cache class bytes to memory; if there are too many instrumented classes or if their sizes are too large, it may take up more memory # FILE: cache class bytes to user temp folder starts with 'class-cache', and automatically clean up cached class files when the application exits agent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:MEMORY} If the class cache feature is enabled, save the instrumented class bytecode to memory or a temporary file. When other Java agents retransform the same class, the SkyWalking agent first attempts to load from the cache.\nIf the cached class is found, it will be used directly without regenerating an auxiliary class with a new random name. Then, the process of the subsequent Java agent will not be affected.\n2. Class cache save mode\nWe recommend saving cache classes to memory, if it takes up more memory space. Alternatively, you can use the local file system. Set the class cache mode in one of the folliwng ways:\n-Dskywalking.agent.class_cache_mode=MEMORY : save cache classes to Java memory. -Dskywalking.agent.class_cache_mode=FILE : save cache classes to SkyWalking agent path \u0026lsquo;/class-cache\u0026rsquo;.\nOr modify these options in agent.conf:\nagent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:MEMORY} agent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:FILE}\n","excerpt":"Compatibility with other Java agent bytecode processes Problem   When using the SkyWalking agent, …","ref":"/docs/main/v9.6.0/en/faq/compatible-with-other-javaagent-bytecode-processing/","title":"Compatibility with other Java agent bytecode processes"},{"body":"Compatibility with other Java agent bytecode processes Problem   When using the SkyWalking agent, some other agents, such as Arthas, can\u0026rsquo;t work properly. https://github.com/apache/skywalking/pull/4858\n  The retransform classes in the Java agent conflict with the SkyWalking agent, as illustrated in this demo\n  Cause The SkyWalking agent uses ByteBuddy to transform classes when the Java application starts. ByteBuddy generates auxiliary classes with different random names every time.\nWhen another Java agent retransforms the same class, it triggers the SkyWalking agent to enhance the class again. Since the bytecode has been regenerated by ByteBuddy, the fields and imported class names have been modified, and the JVM verifications on class bytecode have failed, the retransform classes would therefore be unsuccessful.\nResolution 1. Enable the class cache feature\nAdd JVM parameters:\n-Dskywalking.agent.is_cache_enhanced_class=true -Dskywalking.agent.class_cache_mode=MEMORY\nOr uncomment the following options in agent.conf:\n# If true, the SkyWalking agent will cache all instrumented classes files to memory or disk files (as determined by the class cache mode), # Allow other Java agents to enhance those classes that are enhanced by the SkyWalking agent. agent.is_cache_enhanced_class = ${SW_AGENT_CACHE_CLASS:false} # The instrumented classes cache mode: MEMORY or FILE # MEMORY: cache class bytes to memory; if there are too many instrumented classes or if their sizes are too large, it may take up more memory # FILE: cache class bytes to user temp folder starts with 'class-cache', and automatically clean up cached class files when the application exits agent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:MEMORY} If the class cache feature is enabled, save the instrumented class bytecode to memory or a temporary file. When other Java agents retransform the same class, the SkyWalking agent first attempts to load from the cache.\nIf the cached class is found, it will be used directly without regenerating an auxiliary class with a new random name. Then, the process of the subsequent Java agent will not be affected.\n2. Class cache save mode\nWe recommend saving cache classes to memory, if it takes up more memory space. Alternatively, you can use the local file system. Set the class cache mode in one of the folliwng ways:\n-Dskywalking.agent.class_cache_mode=MEMORY : save cache classes to Java memory. -Dskywalking.agent.class_cache_mode=FILE : save cache classes to SkyWalking agent path \u0026lsquo;/class-cache\u0026rsquo;.\nOr modify these options in agent.conf:\nagent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:MEMORY} agent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:FILE}\n","excerpt":"Compatibility with other Java agent bytecode processes Problem   When using the SkyWalking agent, …","ref":"/docs/main/v9.7.0/en/faq/compatible-with-other-javaagent-bytecode-processing/","title":"Compatibility with other Java agent bytecode processes"},{"body":"Compiling Go version Go version 1.18 or higher is supported for compilation.\nPlatform Linux Linux version \u0026gt;= 4.4, and dependency these tools:\n llvm \u0026gt;= 13. libbpf-dev.  MacOS or Windows Make sure it already has a docker environment.\nCommand git clone https://github.com/apache/skywalking-rover cd skywalking-rover # Linux platform make generate build # MacOS or Windows make container-generate build ","excerpt":"Compiling Go version Go version 1.18 or higher is supported for compilation.\nPlatform Linux Linux …","ref":"/docs/skywalking-rover/latest/en/guides/compile/how-to-compile/","title":"Compiling"},{"body":"Compiling Go version Go version 1.18 or higher is supported for compilation.\nPlatform Linux Linux version \u0026gt;= 4.4, and dependency these tools:\n llvm \u0026gt;= 13. libbpf-dev.  MacOS or Windows Make sure it already has a docker environment.\nCommand git clone https://github.com/apache/skywalking-rover cd skywalking-rover # Linux platform make generate build # MacOS or Windows make container-generate build ","excerpt":"Compiling Go version Go version 1.18 or higher is supported for compilation.\nPlatform Linux Linux …","ref":"/docs/skywalking-rover/next/en/guides/compile/how-to-compile/","title":"Compiling"},{"body":"Compiling Go version Go version 1.18 or higher is supported for compilation.\nPlatform Linux Linux version \u0026gt;= 4.4, and dependency these tools:\n llvm \u0026gt;= 13. libbpf-dev.  MacOS or Windows Make sure it already has a docker environment.\nCommand git clone https://github.com/apache/skywalking-rover cd skywalking-rover # Linux platform make generate build # MacOS or Windows make container-generate build ","excerpt":"Compiling Go version Go version 1.18 or higher is supported for compilation.\nPlatform Linux Linux …","ref":"/docs/skywalking-rover/v0.6.0/en/guides/compile/how-to-compile/","title":"Compiling"},{"body":"Compiling Go version Go version 1.18 and 1.19 are supported for compilation.\nPlatform Linux, MacOS and Windows are supported in SkyWalking Satellite. However, some components don\u0026rsquo;t fit the Windows platform, including:\n mmap-queue  Command git clone https://github.com/apache/skywalking-satellite cd skywalking-satellite make build ","excerpt":"Compiling Go version Go version 1.18 and 1.19 are supported for compilation.\nPlatform Linux, MacOS …","ref":"/docs/skywalking-satellite/latest/en/guides/compile/how-to-compile/","title":"Compiling"},{"body":"Compiling Go version Go version 1.19 is required for compilation.\nPlatform Linux, MacOS and Windows are supported in SkyWalking Satellite. However, some components don\u0026rsquo;t fit the Windows platform, including:\n mmap-queue  Command git clone https://github.com/apache/skywalking-satellite cd skywalking-satellite make build ","excerpt":"Compiling Go version Go version 1.19 is required for compilation.\nPlatform Linux, MacOS and Windows …","ref":"/docs/skywalking-satellite/next/en/guides/compile/how-to-compile/","title":"Compiling"},{"body":"Compiling Go version Go version 1.18 and 1.19 are supported for compilation.\nPlatform Linux, MacOS and Windows are supported in SkyWalking Satellite. However, some components don\u0026rsquo;t fit the Windows platform, including:\n mmap-queue  Command git clone https://github.com/apache/skywalking-satellite cd skywalking-satellite make build ","excerpt":"Compiling Go version Go version 1.18 and 1.19 are supported for compilation.\nPlatform Linux, MacOS …","ref":"/docs/skywalking-satellite/v1.2.0/en/guides/compile/how-to-compile/","title":"Compiling"},{"body":"Compiling issues on Mac\u0026rsquo;s M1 chip Problem  When compiling according to How-to-build, The following problems may occur, causing the build to fail.  [ERROR] Failed to execute goal org.xolstice.maven.plugins:protobuf-maven-plugin:0.6.1:compile (grpc-build) on project apm-network: Unable to resolve artifact: Missing: [ERROR] ---------- [ERROR] 1) com.google.protobuf:protoc:exe:osx-aarch_64:3.12.0 [ERROR] [ERROR] Try downloading the file manually from the project website. [ERROR] [ERROR] Then, install it using the command: [ERROR] mvn install:install-file -DgroupId=com.google.protobuf -DartifactId=protoc -Dversion=3.12.0 -Dclassifier=osx-aarch_64 -Dpackaging=exe -Dfile=/path/to/file [ERROR] [ERROR] Alternatively, if you host your own repository you can deploy the file there: [ERROR] mvn deploy:deploy-file -DgroupId=com.google.protobuf -DartifactId=protoc -Dversion=3.12.0 -Dclassifier=osx-aarch_64 -Dpackaging=exe -Dfile=/path/to/file -Durl=[url] -DrepositoryId=[id] [ERROR] [ERROR] Path to dependency: [ERROR] 1) org.apache.skywalking:apm-network:jar:8.4.0-SNAPSHOT [ERROR] 2) com.google.protobuf:protoc:exe:osx-aarch_64:3.12.0 [ERROR] [ERROR] ---------- [ERROR] 1 required artifact is missing. Reason The dependent Protocol Buffers v3.14.0 does not come with an osx-aarch_64 version. You may find the osx-aarch_64 version at the Protocol Buffers Releases link here: https://github.com/protocolbuffers/protobuf/releases. Since Mac\u0026rsquo;s M1 is compatible with the osx-x86_64 version, before this version is available for downloading, you need to manually specify the osx-x86_64 version.\nResolution You may add -Dos.detected.classifier=osx-x86_64 after the original compilation parameters, such as: ./mvnw clean package -DskipTests -Dos.detected.classifier=osx-x86_64. After specifying the version, compile and run normally.\n","excerpt":"Compiling issues on Mac\u0026rsquo;s M1 chip Problem  When compiling according to How-to-build, The …","ref":"/docs/main/latest/en/faq/how-to-build-with-mac-m1/","title":"Compiling issues on Mac's M1 chip"},{"body":"Compiling issues on Mac\u0026rsquo;s M1 chip Problem  When compiling according to How-to-build, The following problems may occur, causing the build to fail.  [ERROR] Failed to execute goal org.xolstice.maven.plugins:protobuf-maven-plugin:0.6.1:compile (grpc-build) on project apm-network: Unable to resolve artifact: Missing: [ERROR] ---------- [ERROR] 1) com.google.protobuf:protoc:exe:osx-aarch_64:3.12.0 [ERROR] [ERROR] Try downloading the file manually from the project website. [ERROR] [ERROR] Then, install it using the command: [ERROR] mvn install:install-file -DgroupId=com.google.protobuf -DartifactId=protoc -Dversion=3.12.0 -Dclassifier=osx-aarch_64 -Dpackaging=exe -Dfile=/path/to/file [ERROR] [ERROR] Alternatively, if you host your own repository you can deploy the file there: [ERROR] mvn deploy:deploy-file -DgroupId=com.google.protobuf -DartifactId=protoc -Dversion=3.12.0 -Dclassifier=osx-aarch_64 -Dpackaging=exe -Dfile=/path/to/file -Durl=[url] -DrepositoryId=[id] [ERROR] [ERROR] Path to dependency: [ERROR] 1) org.apache.skywalking:apm-network:jar:8.4.0-SNAPSHOT [ERROR] 2) com.google.protobuf:protoc:exe:osx-aarch_64:3.12.0 [ERROR] [ERROR] ---------- [ERROR] 1 required artifact is missing. Reason The dependent Protocol Buffers v3.14.0 does not come with an osx-aarch_64 version. You may find the osx-aarch_64 version at the Protocol Buffers Releases link here: https://github.com/protocolbuffers/protobuf/releases. Since Mac\u0026rsquo;s M1 is compatible with the osx-x86_64 version, before this version is available for downloading, you need to manually specify the osx-x86_64 version.\nResolution You may add -Dos.detected.classifier=osx-x86_64 after the original compilation parameters, such as: ./mvnw clean package -DskipTests -Dos.detected.classifier=osx-x86_64. After specifying the version, compile and run normally.\n","excerpt":"Compiling issues on Mac\u0026rsquo;s M1 chip Problem  When compiling according to How-to-build, The …","ref":"/docs/main/next/en/faq/how-to-build-with-mac-m1/","title":"Compiling issues on Mac's M1 chip"},{"body":"Compiling issues on Mac\u0026rsquo;s M1 chip Problem  When compiling according to How-to-build, The following problems may occur, causing the build to fail.  [ERROR] Failed to execute goal org.xolstice.maven.plugins:protobuf-maven-plugin:0.6.1:compile (grpc-build) on project apm-network: Unable to resolve artifact: Missing: [ERROR] ---------- [ERROR] 1) com.google.protobuf:protoc:exe:osx-aarch_64:3.12.0 [ERROR] [ERROR] Try downloading the file manually from the project website. [ERROR] [ERROR] Then, install it using the command: [ERROR] mvn install:install-file -DgroupId=com.google.protobuf -DartifactId=protoc -Dversion=3.12.0 -Dclassifier=osx-aarch_64 -Dpackaging=exe -Dfile=/path/to/file [ERROR] [ERROR] Alternatively, if you host your own repository you can deploy the file there: [ERROR] mvn deploy:deploy-file -DgroupId=com.google.protobuf -DartifactId=protoc -Dversion=3.12.0 -Dclassifier=osx-aarch_64 -Dpackaging=exe -Dfile=/path/to/file -Durl=[url] -DrepositoryId=[id] [ERROR] [ERROR] Path to dependency: [ERROR] 1) org.apache.skywalking:apm-network:jar:8.4.0-SNAPSHOT [ERROR] 2) com.google.protobuf:protoc:exe:osx-aarch_64:3.12.0 [ERROR] [ERROR] ---------- [ERROR] 1 required artifact is missing. Reason The dependent Protocol Buffers v3.14.0 does not come with an osx-aarch_64 version. You may find the osx-aarch_64 version at the Protocol Buffers Releases link here: https://github.com/protocolbuffers/protobuf/releases. Since Mac\u0026rsquo;s M1 is compatible with the osx-x86_64 version, before this version is available for downloading, you need to manually specify the osx-x86_64 version.\nResolution You may add -Dos.detected.classifier=osx-x86_64 after the original compilation parameters, such as: ./mvnw clean package -DskipTests -Dos.detected.classifier=osx-x86_64. After specifying the version, compile and run normally.\n","excerpt":"Compiling issues on Mac\u0026rsquo;s M1 chip Problem  When compiling according to How-to-build, The …","ref":"/docs/main/v9.0.0/en/faq/how-to-build-with-mac-m1/","title":"Compiling issues on Mac's M1 chip"},{"body":"Compiling issues on Mac\u0026rsquo;s M1 chip Problem  When compiling according to How-to-build, The following problems may occur, causing the build to fail.  [ERROR] Failed to execute goal org.xolstice.maven.plugins:protobuf-maven-plugin:0.6.1:compile (grpc-build) on project apm-network: Unable to resolve artifact: Missing: [ERROR] ---------- [ERROR] 1) com.google.protobuf:protoc:exe:osx-aarch_64:3.12.0 [ERROR] [ERROR] Try downloading the file manually from the project website. [ERROR] [ERROR] Then, install it using the command: [ERROR] mvn install:install-file -DgroupId=com.google.protobuf -DartifactId=protoc -Dversion=3.12.0 -Dclassifier=osx-aarch_64 -Dpackaging=exe -Dfile=/path/to/file [ERROR] [ERROR] Alternatively, if you host your own repository you can deploy the file there: [ERROR] mvn deploy:deploy-file -DgroupId=com.google.protobuf -DartifactId=protoc -Dversion=3.12.0 -Dclassifier=osx-aarch_64 -Dpackaging=exe -Dfile=/path/to/file -Durl=[url] -DrepositoryId=[id] [ERROR] [ERROR] Path to dependency: [ERROR] 1) org.apache.skywalking:apm-network:jar:8.4.0-SNAPSHOT [ERROR] 2) com.google.protobuf:protoc:exe:osx-aarch_64:3.12.0 [ERROR] [ERROR] ---------- [ERROR] 1 required artifact is missing. Reason The dependent Protocol Buffers v3.14.0 does not come with an osx-aarch_64 version. You may find the osx-aarch_64 version at the Protocol Buffers Releases link here: https://github.com/protocolbuffers/protobuf/releases. Since Mac\u0026rsquo;s M1 is compatible with the osx-x86_64 version, before this version is available for downloading, you need to manually specify the osx-x86_64 version.\nResolution You may add -Dos.detected.classifier=osx-x86_64 after the original compilation parameters, such as: ./mvnw clean package -DskipTests -Dos.detected.classifier=osx-x86_64. After specifying the version, compile and run normally.\n","excerpt":"Compiling issues on Mac\u0026rsquo;s M1 chip Problem  When compiling according to How-to-build, The …","ref":"/docs/main/v9.1.0/en/faq/how-to-build-with-mac-m1/","title":"Compiling issues on Mac's M1 chip"},{"body":"Compiling issues on Mac\u0026rsquo;s M1 chip Problem  When compiling according to How-to-build, The following problems may occur, causing the build to fail.  [ERROR] Failed to execute goal org.xolstice.maven.plugins:protobuf-maven-plugin:0.6.1:compile (grpc-build) on project apm-network: Unable to resolve artifact: Missing: [ERROR] ---------- [ERROR] 1) com.google.protobuf:protoc:exe:osx-aarch_64:3.12.0 [ERROR] [ERROR] Try downloading the file manually from the project website. [ERROR] [ERROR] Then, install it using the command: [ERROR] mvn install:install-file -DgroupId=com.google.protobuf -DartifactId=protoc -Dversion=3.12.0 -Dclassifier=osx-aarch_64 -Dpackaging=exe -Dfile=/path/to/file [ERROR] [ERROR] Alternatively, if you host your own repository you can deploy the file there: [ERROR] mvn deploy:deploy-file -DgroupId=com.google.protobuf -DartifactId=protoc -Dversion=3.12.0 -Dclassifier=osx-aarch_64 -Dpackaging=exe -Dfile=/path/to/file -Durl=[url] -DrepositoryId=[id] [ERROR] [ERROR] Path to dependency: [ERROR] 1) org.apache.skywalking:apm-network:jar:8.4.0-SNAPSHOT [ERROR] 2) com.google.protobuf:protoc:exe:osx-aarch_64:3.12.0 [ERROR] [ERROR] ---------- [ERROR] 1 required artifact is missing. Reason The dependent Protocol Buffers v3.14.0 does not come with an osx-aarch_64 version. You may find the osx-aarch_64 version at the Protocol Buffers Releases link here: https://github.com/protocolbuffers/protobuf/releases. Since Mac\u0026rsquo;s M1 is compatible with the osx-x86_64 version, before this version is available for downloading, you need to manually specify the osx-x86_64 version.\nResolution You may add -Dos.detected.classifier=osx-x86_64 after the original compilation parameters, such as: ./mvnw clean package -DskipTests -Dos.detected.classifier=osx-x86_64. After specifying the version, compile and run normally.\n","excerpt":"Compiling issues on Mac\u0026rsquo;s M1 chip Problem  When compiling according to How-to-build, The …","ref":"/docs/main/v9.2.0/en/faq/how-to-build-with-mac-m1/","title":"Compiling issues on Mac's M1 chip"},{"body":"Compiling issues on Mac\u0026rsquo;s M1 chip Problem  When compiling according to How-to-build, The following problems may occur, causing the build to fail.  [ERROR] Failed to execute goal org.xolstice.maven.plugins:protobuf-maven-plugin:0.6.1:compile (grpc-build) on project apm-network: Unable to resolve artifact: Missing: [ERROR] ---------- [ERROR] 1) com.google.protobuf:protoc:exe:osx-aarch_64:3.12.0 [ERROR] [ERROR] Try downloading the file manually from the project website. [ERROR] [ERROR] Then, install it using the command: [ERROR] mvn install:install-file -DgroupId=com.google.protobuf -DartifactId=protoc -Dversion=3.12.0 -Dclassifier=osx-aarch_64 -Dpackaging=exe -Dfile=/path/to/file [ERROR] [ERROR] Alternatively, if you host your own repository you can deploy the file there: [ERROR] mvn deploy:deploy-file -DgroupId=com.google.protobuf -DartifactId=protoc -Dversion=3.12.0 -Dclassifier=osx-aarch_64 -Dpackaging=exe -Dfile=/path/to/file -Durl=[url] -DrepositoryId=[id] [ERROR] [ERROR] Path to dependency: [ERROR] 1) org.apache.skywalking:apm-network:jar:8.4.0-SNAPSHOT [ERROR] 2) com.google.protobuf:protoc:exe:osx-aarch_64:3.12.0 [ERROR] [ERROR] ---------- [ERROR] 1 required artifact is missing. Reason The dependent Protocol Buffers v3.14.0 does not come with an osx-aarch_64 version. You may find the osx-aarch_64 version at the Protocol Buffers Releases link here: https://github.com/protocolbuffers/protobuf/releases. Since Mac\u0026rsquo;s M1 is compatible with the osx-x86_64 version, before this version is available for downloading, you need to manually specify the osx-x86_64 version.\nResolution You may add -Dos.detected.classifier=osx-x86_64 after the original compilation parameters, such as: ./mvnw clean package -DskipTests -Dos.detected.classifier=osx-x86_64. After specifying the version, compile and run normally.\n","excerpt":"Compiling issues on Mac\u0026rsquo;s M1 chip Problem  When compiling according to How-to-build, The …","ref":"/docs/main/v9.3.0/en/faq/how-to-build-with-mac-m1/","title":"Compiling issues on Mac's M1 chip"},{"body":"Compiling issues on Mac\u0026rsquo;s M1 chip Problem  When compiling according to How-to-build, The following problems may occur, causing the build to fail.  [ERROR] Failed to execute goal org.xolstice.maven.plugins:protobuf-maven-plugin:0.6.1:compile (grpc-build) on project apm-network: Unable to resolve artifact: Missing: [ERROR] ---------- [ERROR] 1) com.google.protobuf:protoc:exe:osx-aarch_64:3.12.0 [ERROR] [ERROR] Try downloading the file manually from the project website. [ERROR] [ERROR] Then, install it using the command: [ERROR] mvn install:install-file -DgroupId=com.google.protobuf -DartifactId=protoc -Dversion=3.12.0 -Dclassifier=osx-aarch_64 -Dpackaging=exe -Dfile=/path/to/file [ERROR] [ERROR] Alternatively, if you host your own repository you can deploy the file there: [ERROR] mvn deploy:deploy-file -DgroupId=com.google.protobuf -DartifactId=protoc -Dversion=3.12.0 -Dclassifier=osx-aarch_64 -Dpackaging=exe -Dfile=/path/to/file -Durl=[url] -DrepositoryId=[id] [ERROR] [ERROR] Path to dependency: [ERROR] 1) org.apache.skywalking:apm-network:jar:8.4.0-SNAPSHOT [ERROR] 2) com.google.protobuf:protoc:exe:osx-aarch_64:3.12.0 [ERROR] [ERROR] ---------- [ERROR] 1 required artifact is missing. Reason The dependent Protocol Buffers v3.14.0 does not come with an osx-aarch_64 version. You may find the osx-aarch_64 version at the Protocol Buffers Releases link here: https://github.com/protocolbuffers/protobuf/releases. Since Mac\u0026rsquo;s M1 is compatible with the osx-x86_64 version, before this version is available for downloading, you need to manually specify the osx-x86_64 version.\nResolution You may add -Dos.detected.classifier=osx-x86_64 after the original compilation parameters, such as: ./mvnw clean package -DskipTests -Dos.detected.classifier=osx-x86_64. After specifying the version, compile and run normally.\n","excerpt":"Compiling issues on Mac\u0026rsquo;s M1 chip Problem  When compiling according to How-to-build, The …","ref":"/docs/main/v9.4.0/en/faq/how-to-build-with-mac-m1/","title":"Compiling issues on Mac's M1 chip"},{"body":"Compiling issues on Mac\u0026rsquo;s M1 chip Problem  When compiling according to How-to-build, The following problems may occur, causing the build to fail.  [ERROR] Failed to execute goal org.xolstice.maven.plugins:protobuf-maven-plugin:0.6.1:compile (grpc-build) on project apm-network: Unable to resolve artifact: Missing: [ERROR] ---------- [ERROR] 1) com.google.protobuf:protoc:exe:osx-aarch_64:3.12.0 [ERROR] [ERROR] Try downloading the file manually from the project website. [ERROR] [ERROR] Then, install it using the command: [ERROR] mvn install:install-file -DgroupId=com.google.protobuf -DartifactId=protoc -Dversion=3.12.0 -Dclassifier=osx-aarch_64 -Dpackaging=exe -Dfile=/path/to/file [ERROR] [ERROR] Alternatively, if you host your own repository you can deploy the file there: [ERROR] mvn deploy:deploy-file -DgroupId=com.google.protobuf -DartifactId=protoc -Dversion=3.12.0 -Dclassifier=osx-aarch_64 -Dpackaging=exe -Dfile=/path/to/file -Durl=[url] -DrepositoryId=[id] [ERROR] [ERROR] Path to dependency: [ERROR] 1) org.apache.skywalking:apm-network:jar:8.4.0-SNAPSHOT [ERROR] 2) com.google.protobuf:protoc:exe:osx-aarch_64:3.12.0 [ERROR] [ERROR] ---------- [ERROR] 1 required artifact is missing. Reason The dependent Protocol Buffers v3.14.0 does not come with an osx-aarch_64 version. You may find the osx-aarch_64 version at the Protocol Buffers Releases link here: https://github.com/protocolbuffers/protobuf/releases. Since Mac\u0026rsquo;s M1 is compatible with the osx-x86_64 version, before this version is available for downloading, you need to manually specify the osx-x86_64 version.\nResolution You may add -Dos.detected.classifier=osx-x86_64 after the original compilation parameters, such as: ./mvnw clean package -DskipTests -Dos.detected.classifier=osx-x86_64. After specifying the version, compile and run normally.\n","excerpt":"Compiling issues on Mac\u0026rsquo;s M1 chip Problem  When compiling according to How-to-build, The …","ref":"/docs/main/v9.5.0/en/faq/how-to-build-with-mac-m1/","title":"Compiling issues on Mac's M1 chip"},{"body":"Compiling issues on Mac\u0026rsquo;s M1 chip Problem  When compiling according to How-to-build, The following problems may occur, causing the build to fail.  [ERROR] Failed to execute goal org.xolstice.maven.plugins:protobuf-maven-plugin:0.6.1:compile (grpc-build) on project apm-network: Unable to resolve artifact: Missing: [ERROR] ---------- [ERROR] 1) com.google.protobuf:protoc:exe:osx-aarch_64:3.12.0 [ERROR] [ERROR] Try downloading the file manually from the project website. [ERROR] [ERROR] Then, install it using the command: [ERROR] mvn install:install-file -DgroupId=com.google.protobuf -DartifactId=protoc -Dversion=3.12.0 -Dclassifier=osx-aarch_64 -Dpackaging=exe -Dfile=/path/to/file [ERROR] [ERROR] Alternatively, if you host your own repository you can deploy the file there: [ERROR] mvn deploy:deploy-file -DgroupId=com.google.protobuf -DartifactId=protoc -Dversion=3.12.0 -Dclassifier=osx-aarch_64 -Dpackaging=exe -Dfile=/path/to/file -Durl=[url] -DrepositoryId=[id] [ERROR] [ERROR] Path to dependency: [ERROR] 1) org.apache.skywalking:apm-network:jar:8.4.0-SNAPSHOT [ERROR] 2) com.google.protobuf:protoc:exe:osx-aarch_64:3.12.0 [ERROR] [ERROR] ---------- [ERROR] 1 required artifact is missing. Reason The dependent Protocol Buffers v3.14.0 does not come with an osx-aarch_64 version. You may find the osx-aarch_64 version at the Protocol Buffers Releases link here: https://github.com/protocolbuffers/protobuf/releases. Since Mac\u0026rsquo;s M1 is compatible with the osx-x86_64 version, before this version is available for downloading, you need to manually specify the osx-x86_64 version.\nResolution You may add -Dos.detected.classifier=osx-x86_64 after the original compilation parameters, such as: ./mvnw clean package -DskipTests -Dos.detected.classifier=osx-x86_64. After specifying the version, compile and run normally.\n","excerpt":"Compiling issues on Mac\u0026rsquo;s M1 chip Problem  When compiling according to How-to-build, The …","ref":"/docs/main/v9.6.0/en/faq/how-to-build-with-mac-m1/","title":"Compiling issues on Mac's M1 chip"},{"body":"Compiling issues on Mac\u0026rsquo;s M1 chip Problem  When compiling according to How-to-build, The following problems may occur, causing the build to fail.  [ERROR] Failed to execute goal org.xolstice.maven.plugins:protobuf-maven-plugin:0.6.1:compile (grpc-build) on project apm-network: Unable to resolve artifact: Missing: [ERROR] ---------- [ERROR] 1) com.google.protobuf:protoc:exe:osx-aarch_64:3.12.0 [ERROR] [ERROR] Try downloading the file manually from the project website. [ERROR] [ERROR] Then, install it using the command: [ERROR] mvn install:install-file -DgroupId=com.google.protobuf -DartifactId=protoc -Dversion=3.12.0 -Dclassifier=osx-aarch_64 -Dpackaging=exe -Dfile=/path/to/file [ERROR] [ERROR] Alternatively, if you host your own repository you can deploy the file there: [ERROR] mvn deploy:deploy-file -DgroupId=com.google.protobuf -DartifactId=protoc -Dversion=3.12.0 -Dclassifier=osx-aarch_64 -Dpackaging=exe -Dfile=/path/to/file -Durl=[url] -DrepositoryId=[id] [ERROR] [ERROR] Path to dependency: [ERROR] 1) org.apache.skywalking:apm-network:jar:8.4.0-SNAPSHOT [ERROR] 2) com.google.protobuf:protoc:exe:osx-aarch_64:3.12.0 [ERROR] [ERROR] ---------- [ERROR] 1 required artifact is missing. Reason The dependent Protocol Buffers v3.14.0 does not come with an osx-aarch_64 version. You may find the osx-aarch_64 version at the Protocol Buffers Releases link here: https://github.com/protocolbuffers/protobuf/releases. Since Mac\u0026rsquo;s M1 is compatible with the osx-x86_64 version, before this version is available for downloading, you need to manually specify the osx-x86_64 version.\nResolution You may add -Dos.detected.classifier=osx-x86_64 after the original compilation parameters, such as: ./mvnw clean package -DskipTests -Dos.detected.classifier=osx-x86_64. After specifying the version, compile and run normally.\n","excerpt":"Compiling issues on Mac\u0026rsquo;s M1 chip Problem  When compiling according to How-to-build, The …","ref":"/docs/main/v9.7.0/en/faq/how-to-build-with-mac-m1/","title":"Compiling issues on Mac's M1 chip"},{"body":"Compiling project This document will help you compile and build a project in your maven and set your IDE.\nPrepare JDK 17 or 21.\n If you clone codes from https://github.com/apache/skywalking-java  git clone https://github.com/apache/skywalking-java.git cd skywalking-java ./mvnw clean package -Pall  If you download source codes tar from https://skywalking.apache.org/downloads/  ./mvnw clean package The agent binary package is generated in skywalking-agent folder.\nSet Generated Source Codes(grpc-java and java folders in apm-protocol/apm-network/target/generated-sources/protobuf) folders if you are using IntelliJ IDE.\nBuilding Docker images After you have compiled the project and have generated the skywalking-agent folder, you can build Docker images. [make docker] builds the agent Docker images based on alpine image, java8, java11 and java 17 images by default. If you want to only build part of the images, add suffix .alpine or .java\u0026lt;x\u0026gt; to the make target, for example:\n Build Docker images based on alpine, Java 8 and Java 11. make docker.alpine docker.java8 docker.java11   You can also customize the Docker registry and Docker image names by specifying the variable HUB, NAME.\n Set private Docker registry to gcr.io/skywalking and custom name to sw-agent. make docker.alpine HUB=gcr.io/skywalking NAME=sw-agent This will name the Docker image to gcr.io/skywalking/sw-agent:latest-alpine\n  If you want to push the Docker images, add suffix to the make target docker., for example:\n Build and push images based on alpine, Java 8 and Java 11. make docker.push.alpine docker.push.java8 docker.push.java11   ","excerpt":"Compiling project This document will help you compile and build a project in your maven and set your …","ref":"/docs/skywalking-java/latest/en/contribution/compiling/","title":"Compiling project"},{"body":"Compiling project This document will help you compile and build a project in your maven and set your IDE.\nPrepare JDK 17 or 21.\n If you clone codes from https://github.com/apache/skywalking-java  git clone https://github.com/apache/skywalking-java.git cd skywalking-java ./mvnw clean package -Pall  If you download source codes tar from https://skywalking.apache.org/downloads/  ./mvnw clean package The agent binary package is generated in skywalking-agent folder.\nSet Generated Source Codes(grpc-java and java folders in apm-protocol/apm-network/target/generated-sources/protobuf) folders if you are using IntelliJ IDE.\nBuilding Docker images After you have compiled the project and have generated the skywalking-agent folder, you can build Docker images. [make docker] builds the agent Docker images based on alpine image, java8, java11 and java 17 images by default. If you want to only build part of the images, add suffix .alpine or .java\u0026lt;x\u0026gt; to the make target, for example:\n Build Docker images based on alpine, Java 8 and Java 11. make docker.alpine docker.java8 docker.java11   You can also customize the Docker registry and Docker image names by specifying the variable HUB, NAME.\n Set private Docker registry to gcr.io/skywalking and custom name to sw-agent. make docker.alpine HUB=gcr.io/skywalking NAME=sw-agent This will name the Docker image to gcr.io/skywalking/sw-agent:latest-alpine\n  If you want to push the Docker images, add suffix to the make target docker., for example:\n Build and push images based on alpine, Java 8 and Java 11. make docker.push.alpine docker.push.java8 docker.push.java11   ","excerpt":"Compiling project This document will help you compile and build a project in your maven and set your …","ref":"/docs/skywalking-java/next/en/contribution/compiling/","title":"Compiling project"},{"body":"Compiling project This document will help you compile and build a project in your maven and set your IDE.\nPrepare JDK 8+.\n If you clone codes from https://github.com/apache/skywalking-java  git clone https://github.com/apache/skywalking-java.git cd skywalking-java ./mvnw clean package -Pall  If you download source codes tar from https://skywalking.apache.org/downloads/  ./mvnw clean package The agent binary package is generated in skywalking-agent folder.\nSet Generated Source Codes(grpc-java and java folders in apm-protocol/apm-network/target/generated-sources/protobuf) folders if you are using IntelliJ IDE.\nBuilding Docker images After you have compiled the project and have generated the skywalking-agent folder, you can build Docker images. [make docker] builds the agent Docker images based on alpine image, java8, java11 and java 17 images by default. If you want to only build part of the images, add suffix .alpine or .java\u0026lt;x\u0026gt; to the make target, for example:\n Build Docker images based on alpine, Java 8 and Java 11. make docker.alpine docker.java8 docker.java11   You can also customize the Docker registry and Docker image names by specifying the variable HUB, NAME.\n Set private Docker registry to gcr.io/skywalking and custom name to sw-agent. make docker.alpine HUB=gcr.io/skywalking NAME=sw-agent This will name the Docker image to gcr.io/skywalking/sw-agent:latest-alpine\n  If you want to push the Docker images, add suffix to the make target docker., for example:\n Build and push images based on alpine, Java 8 and Java 11. make docker.push.alpine docker.push.java8 docker.push.java11   ","excerpt":"Compiling project This document will help you compile and build a project in your maven and set your …","ref":"/docs/skywalking-java/v9.0.0/en/contribution/compiling/","title":"Compiling project"},{"body":"Compiling project This document will help you compile and build a project in your maven and set your IDE.\nPrepare JDK 17 or 21.\n If you clone codes from https://github.com/apache/skywalking-java  git clone https://github.com/apache/skywalking-java.git cd skywalking-java ./mvnw clean package -Pall  If you download source codes tar from https://skywalking.apache.org/downloads/  ./mvnw clean package The agent binary package is generated in skywalking-agent folder.\nSet Generated Source Codes(grpc-java and java folders in apm-protocol/apm-network/target/generated-sources/protobuf) folders if you are using IntelliJ IDE.\nBuilding Docker images After you have compiled the project and have generated the skywalking-agent folder, you can build Docker images. [make docker] builds the agent Docker images based on alpine image, java8, java11 and java 17 images by default. If you want to only build part of the images, add suffix .alpine or .java\u0026lt;x\u0026gt; to the make target, for example:\n Build Docker images based on alpine, Java 8 and Java 11. make docker.alpine docker.java8 docker.java11   You can also customize the Docker registry and Docker image names by specifying the variable HUB, NAME.\n Set private Docker registry to gcr.io/skywalking and custom name to sw-agent. make docker.alpine HUB=gcr.io/skywalking NAME=sw-agent This will name the Docker image to gcr.io/skywalking/sw-agent:latest-alpine\n  If you want to push the Docker images, add suffix to the make target docker., for example:\n Build and push images based on alpine, Java 8 and Java 11. make docker.push.alpine docker.push.java8 docker.push.java11   ","excerpt":"Compiling project This document will help you compile and build a project in your maven and set your …","ref":"/docs/skywalking-java/v9.1.0/en/contribution/compiling/","title":"Compiling project"},{"body":"Compiling project This document will help you compile and build a project in your maven and set your IDE.\nPrepare JDK 17 or 21.\n If you clone codes from https://github.com/apache/skywalking-java  git clone https://github.com/apache/skywalking-java.git cd skywalking-java ./mvnw clean package -Pall  If you download source codes tar from https://skywalking.apache.org/downloads/  ./mvnw clean package The agent binary package is generated in skywalking-agent folder.\nSet Generated Source Codes(grpc-java and java folders in apm-protocol/apm-network/target/generated-sources/protobuf) folders if you are using IntelliJ IDE.\nBuilding Docker images After you have compiled the project and have generated the skywalking-agent folder, you can build Docker images. [make docker] builds the agent Docker images based on alpine image, java8, java11 and java 17 images by default. If you want to only build part of the images, add suffix .alpine or .java\u0026lt;x\u0026gt; to the make target, for example:\n Build Docker images based on alpine, Java 8 and Java 11. make docker.alpine docker.java8 docker.java11   You can also customize the Docker registry and Docker image names by specifying the variable HUB, NAME.\n Set private Docker registry to gcr.io/skywalking and custom name to sw-agent. make docker.alpine HUB=gcr.io/skywalking NAME=sw-agent This will name the Docker image to gcr.io/skywalking/sw-agent:latest-alpine\n  If you want to push the Docker images, add suffix to the make target docker., for example:\n Build and push images based on alpine, Java 8 and Java 11. make docker.push.alpine docker.push.java8 docker.push.java11   ","excerpt":"Compiling project This document will help you compile and build a project in your maven and set your …","ref":"/docs/skywalking-java/v9.2.0/en/contribution/compiling/","title":"Compiling project"},{"body":"Compiling project This document will help you compile and build the package file.\nPrepare PHP and Rust environments.\nInstall PHP Environment For Debian user:\nsudo apt install php-cli php-dev For MacOS user:\nbrew install php Install Rust Environment Install Rust 1.65.0+.\nFor Linux user:\ncurl --proto \u0026#39;=https\u0026#39; --tlsv1.2 -sSf https://sh.rustup.rs | sh For MacOS user:\nbrew install rust Install requirement For Debian user:\nsudo apt install gcc make llvm-dev libclang-dev clang protobuf-compiler For MacOS user:\nbrew install protobuf Build and install Skywalking PHP Agent from archive file For Linux user:\nsudo pecl install skywalking_agent-x.y.z.tgz For MacOS user:\n Running the pecl install command with the php installed in brew may encounter the problem of mkdir, please refer to Installing PHP and PECL Extensions on MacOS.\n pecl install skywalking_agent-x.y.z.tgz The extension file skywalking_agent.so is generated in the php extension folder, get it by run php-config --extension-dir.\n","excerpt":"Compiling project This document will help you compile and build the package file.\nPrepare PHP and …","ref":"/docs/skywalking-php/latest/en/contribution/compiling/","title":"Compiling project"},{"body":"Compiling project This document will help you compile and build the package file.\nPrepare PHP and Rust environments.\nInstall PHP Environment For Debian user:\nsudo apt install php-cli php-dev For MacOS user:\nbrew install php Install Rust Environment Install Rust 1.65.0+.\nFor Linux user:\ncurl --proto \u0026#39;=https\u0026#39; --tlsv1.2 -sSf https://sh.rustup.rs | sh For MacOS user:\nbrew install rust Install requirement For Debian user:\nsudo apt install gcc make llvm-dev libclang-dev clang protobuf-compiler For MacOS user:\nbrew install protobuf Build and install Skywalking PHP Agent from archive file For Linux user:\nsudo pecl install skywalking_agent-x.y.z.tgz For MacOS user:\n Running the pecl install command with the php installed in brew may encounter the problem of mkdir, please refer to Installing PHP and PECL Extensions on MacOS.\n pecl install skywalking_agent-x.y.z.tgz The extension file skywalking_agent.so is generated in the php extension folder, get it by run php-config --extension-dir.\n","excerpt":"Compiling project This document will help you compile and build the package file.\nPrepare PHP and …","ref":"/docs/skywalking-php/next/en/contribution/compiling/","title":"Compiling project"},{"body":"Compiling project This document will help you compile and build the package file.\nPrepare PHP and Rust environments.\nInstall PHP Environment For Debian user:\nsudo apt install php-cli php-dev For MacOS user:\nbrew install php Install Rust Environment Install Rust 1.65.0+.\nFor Linux user:\ncurl --proto \u0026#39;=https\u0026#39; --tlsv1.2 -sSf https://sh.rustup.rs | sh For MacOS user:\nbrew install rust Install requirement For Debian user:\nsudo apt install gcc make llvm-dev libclang-dev clang protobuf-compiler For MacOS user:\nbrew install protobuf Build and install Skywalking PHP Agent from archive file For Linux user:\nsudo pecl install skywalking_agent-x.y.z.tgz For MacOS user:\n Running the pecl install command with the php installed in brew may encounter the problem of mkdir, please refer to Installing PHP and PECL Extensions on MacOS.\n pecl install skywalking_agent-x.y.z.tgz The extension file skywalking_agent.so is generated in the php extension folder, get it by run php-config --extension-dir.\n","excerpt":"Compiling project This document will help you compile and build the package file.\nPrepare PHP and …","ref":"/docs/skywalking-php/v0.7.0/en/contribution/compiling/","title":"Compiling project"},{"body":"Component library settings Component library settings are about your own or third-party libraries used in the monitored application.\nIn agent or SDK, regardless of whether the library name is collected as ID or String (literally, e.g. SpringMVC), the collector formats data in ID for better performance and less storage requirements.\nAlso, the collector conjectures the remote service based on the component library. For example: if the component library is MySQL Driver library, then the remote service should be MySQL Server.\nFor these two reasons, the collector requires two parts of settings in this file:\n Component library ID, names and languages. Remote server mapping based on the local library.  All component names and IDs must be defined in this file.\nComponent Library ID Define all names and IDs from component libraries which are used in the monitored application. This uses a two-way mapping strategy. The agent or SDK could use the value (ID) to represent the component name in uplink data.\n Name: the component name used in agent and UI ID: Unique ID. All IDs are reserved once they are released. Languages: Program languages may use this component. Multi languages should be separated by ,.  ID rules  Java and multi languages shared: (0, 3000) .NET Platform reserved: [3000, 4000) Node.js Platform reserved: [4000, 5000) Go reserved: [5000, 6000) Lua reserved: [6000, 7000) Python reserved: [7000, 8000) PHP reserved: [8000, 9000) C++ reserved: [9000, 10000) Javascript reserved: [10000, 11000) Rust reserved: [11000, 12000)  Example:\nTomcat:id:1languages:JavaHttpClient:id:2languages:Java,C#,Node.jsDubbo:id:3languages:JavaH2:id:4languages:JavaRemote server mapping The remote server will be conjectured by the local component. The mappings are based on names in the component library.\n Key: client component library name Value: server component name  Component-Server-Mappings:Jedis:RedisStackExchange.Redis:RedisRedisson:RedisLettuce:RedisZookeeper:ZookeeperSqlClient:SqlServerNpgsql:PostgreSQLMySqlConnector:MysqlEntityFrameworkCore.InMemory:InMemoryDatabase","excerpt":"Component library settings Component library settings are about your own or third-party libraries …","ref":"/docs/main/latest/en/guides/component-library-settings/","title":"Component library settings"},{"body":"Component library settings Component library settings are about your own or third-party libraries used in the monitored application.\nIn agent or SDK, regardless of whether the library name is collected as ID or String (literally, e.g. SpringMVC), the collector formats data in ID for better performance and less storage requirements.\nAlso, the collector conjectures the remote service based on the component library. For example: if the component library is MySQL Driver library, then the remote service should be MySQL Server.\nFor these two reasons, the collector requires two parts of settings in this file:\n Component library ID, names and languages. Remote server mapping based on the local library.  All component names and IDs must be defined in this file.\nComponent Library ID Define all names and IDs from component libraries which are used in the monitored application. This uses a two-way mapping strategy. The agent or SDK could use the value (ID) to represent the component name in uplink data.\n Name: the component name used in agent and UI ID: Unique ID. All IDs are reserved once they are released. Languages: Program languages may use this component. Multi languages should be separated by ,.  ID rules  Java and multi languages shared: (0, 3000) .NET Platform reserved: [3000, 4000) Node.js Platform reserved: [4000, 5000) Go reserved: [5000, 6000) Lua reserved: [6000, 7000) Python reserved: [7000, 8000) PHP reserved: [8000, 9000) C++ reserved: [9000, 10000) Javascript reserved: [10000, 11000) Rust reserved: [11000, 12000)  Example:\nTomcat:id:1languages:JavaHttpClient:id:2languages:Java,C#,Node.jsDubbo:id:3languages:JavaH2:id:4languages:JavaRemote server mapping The remote server will be conjectured by the local component. The mappings are based on names in the component library.\n Key: client component library name Value: server component name  Component-Server-Mappings:Jedis:RedisStackExchange.Redis:RedisRedisson:RedisLettuce:RedisZookeeper:ZookeeperSqlClient:SqlServerNpgsql:PostgreSQLMySqlConnector:MysqlEntityFrameworkCore.InMemory:InMemoryDatabase","excerpt":"Component library settings Component library settings are about your own or third-party libraries …","ref":"/docs/main/next/en/guides/component-library-settings/","title":"Component library settings"},{"body":"Component library settings Component library settings are about your own or third-party libraries used in the monitored application.\nIn agent or SDK, regardless of whether the library name is collected as ID or String (literally, e.g. SpringMVC), the collector formats data in ID for better performance and less storage requirements.\nAlso, the collector conjectures the remote service based on the component library. For example: if the component library is MySQL Driver library, then the remote service should be MySQL Server.\nFor these two reasons, the collector requires two parts of settings in this file:\n Component library ID, names and languages. Remote server mapping based on the local library.  All component names and IDs must be defined in this file.\nComponent Library ID Define all names and IDs from component libraries which are used in the monitored application. This uses a two-way mapping strategy. The agent or SDK could use the value (ID) to represent the component name in uplink data.\n Name: the component name used in agent and UI ID: Unique ID. All IDs are reserved once they are released. Languages: Program languages may use this component. Multi languages should be separated by ,.  ID rules  Java and multi languages shared: (0, 3000) .NET Platform reserved: [3000, 4000) Node.js Platform reserved: [4000, 5000) Go reserved: [5000, 6000) Lua reserved: [6000, 7000) Python reserved: [7000, 8000) PHP reserved: [8000, 9000) C++ reserved: [9000, 10000)  Example:\nTomcat:id:1languages:JavaHttpClient:id:2languages:Java,C#,Node.jsDubbo:id:3languages:JavaH2:id:4languages:JavaRemote server mapping The remote server will be conjectured by the local component. The mappings are based on names in the component library.\n Key: client component library name Value: server component name  Component-Server-Mappings:Jedis:RedisStackExchange.Redis:RedisRedisson:RedisLettuce:RedisZookeeper:ZookeeperSqlClient:SqlServerNpgsql:PostgreSQLMySqlConnector:MysqlEntityFrameworkCore.InMemory:InMemoryDatabase","excerpt":"Component library settings Component library settings are about your own or third-party libraries …","ref":"/docs/main/v9.0.0/en/guides/component-library-settings/","title":"Component library settings"},{"body":"Component library settings Component library settings are about your own or third-party libraries used in the monitored application.\nIn agent or SDK, regardless of whether the library name is collected as ID or String (literally, e.g. SpringMVC), the collector formats data in ID for better performance and less storage requirements.\nAlso, the collector conjectures the remote service based on the component library. For example: if the component library is MySQL Driver library, then the remote service should be MySQL Server.\nFor these two reasons, the collector requires two parts of settings in this file:\n Component library ID, names and languages. Remote server mapping based on the local library.  All component names and IDs must be defined in this file.\nComponent Library ID Define all names and IDs from component libraries which are used in the monitored application. This uses a two-way mapping strategy. The agent or SDK could use the value (ID) to represent the component name in uplink data.\n Name: the component name used in agent and UI ID: Unique ID. All IDs are reserved once they are released. Languages: Program languages may use this component. Multi languages should be separated by ,.  ID rules  Java and multi languages shared: (0, 3000) .NET Platform reserved: [3000, 4000) Node.js Platform reserved: [4000, 5000) Go reserved: [5000, 6000) Lua reserved: [6000, 7000) Python reserved: [7000, 8000) PHP reserved: [8000, 9000) C++ reserved: [9000, 10000)  Example:\nTomcat:id:1languages:JavaHttpClient:id:2languages:Java,C#,Node.jsDubbo:id:3languages:JavaH2:id:4languages:JavaRemote server mapping The remote server will be conjectured by the local component. The mappings are based on names in the component library.\n Key: client component library name Value: server component name  Component-Server-Mappings:Jedis:RedisStackExchange.Redis:RedisRedisson:RedisLettuce:RedisZookeeper:ZookeeperSqlClient:SqlServerNpgsql:PostgreSQLMySqlConnector:MysqlEntityFrameworkCore.InMemory:InMemoryDatabase","excerpt":"Component library settings Component library settings are about your own or third-party libraries …","ref":"/docs/main/v9.1.0/en/guides/component-library-settings/","title":"Component library settings"},{"body":"Component library settings Component library settings are about your own or third-party libraries used in the monitored application.\nIn agent or SDK, regardless of whether the library name is collected as ID or String (literally, e.g. SpringMVC), the collector formats data in ID for better performance and less storage requirements.\nAlso, the collector conjectures the remote service based on the component library. For example: if the component library is MySQL Driver library, then the remote service should be MySQL Server.\nFor these two reasons, the collector requires two parts of settings in this file:\n Component library ID, names and languages. Remote server mapping based on the local library.  All component names and IDs must be defined in this file.\nComponent Library ID Define all names and IDs from component libraries which are used in the monitored application. This uses a two-way mapping strategy. The agent or SDK could use the value (ID) to represent the component name in uplink data.\n Name: the component name used in agent and UI ID: Unique ID. All IDs are reserved once they are released. Languages: Program languages may use this component. Multi languages should be separated by ,.  ID rules  Java and multi languages shared: (0, 3000) .NET Platform reserved: [3000, 4000) Node.js Platform reserved: [4000, 5000) Go reserved: [5000, 6000) Lua reserved: [6000, 7000) Python reserved: [7000, 8000) PHP reserved: [8000, 9000) C++ reserved: [9000, 10000)  Example:\nTomcat:id:1languages:JavaHttpClient:id:2languages:Java,C#,Node.jsDubbo:id:3languages:JavaH2:id:4languages:JavaRemote server mapping The remote server will be conjectured by the local component. The mappings are based on names in the component library.\n Key: client component library name Value: server component name  Component-Server-Mappings:Jedis:RedisStackExchange.Redis:RedisRedisson:RedisLettuce:RedisZookeeper:ZookeeperSqlClient:SqlServerNpgsql:PostgreSQLMySqlConnector:MysqlEntityFrameworkCore.InMemory:InMemoryDatabase","excerpt":"Component library settings Component library settings are about your own or third-party libraries …","ref":"/docs/main/v9.2.0/en/guides/component-library-settings/","title":"Component library settings"},{"body":"Component library settings Component library settings are about your own or third-party libraries used in the monitored application.\nIn agent or SDK, regardless of whether the library name is collected as ID or String (literally, e.g. SpringMVC), the collector formats data in ID for better performance and less storage requirements.\nAlso, the collector conjectures the remote service based on the component library. For example: if the component library is MySQL Driver library, then the remote service should be MySQL Server.\nFor these two reasons, the collector requires two parts of settings in this file:\n Component library ID, names and languages. Remote server mapping based on the local library.  All component names and IDs must be defined in this file.\nComponent Library ID Define all names and IDs from component libraries which are used in the monitored application. This uses a two-way mapping strategy. The agent or SDK could use the value (ID) to represent the component name in uplink data.\n Name: the component name used in agent and UI ID: Unique ID. All IDs are reserved once they are released. Languages: Program languages may use this component. Multi languages should be separated by ,.  ID rules  Java and multi languages shared: (0, 3000) .NET Platform reserved: [3000, 4000) Node.js Platform reserved: [4000, 5000) Go reserved: [5000, 6000) Lua reserved: [6000, 7000) Python reserved: [7000, 8000) PHP reserved: [8000, 9000) C++ reserved: [9000, 10000) Javascript reserved: [10000, 11000) Rust reserved: [11000, 12000)  Example:\nTomcat:id:1languages:JavaHttpClient:id:2languages:Java,C#,Node.jsDubbo:id:3languages:JavaH2:id:4languages:JavaRemote server mapping The remote server will be conjectured by the local component. The mappings are based on names in the component library.\n Key: client component library name Value: server component name  Component-Server-Mappings:Jedis:RedisStackExchange.Redis:RedisRedisson:RedisLettuce:RedisZookeeper:ZookeeperSqlClient:SqlServerNpgsql:PostgreSQLMySqlConnector:MysqlEntityFrameworkCore.InMemory:InMemoryDatabase","excerpt":"Component library settings Component library settings are about your own or third-party libraries …","ref":"/docs/main/v9.3.0/en/guides/component-library-settings/","title":"Component library settings"},{"body":"Component library settings Component library settings are about your own or third-party libraries used in the monitored application.\nIn agent or SDK, regardless of whether the library name is collected as ID or String (literally, e.g. SpringMVC), the collector formats data in ID for better performance and less storage requirements.\nAlso, the collector conjectures the remote service based on the component library. For example: if the component library is MySQL Driver library, then the remote service should be MySQL Server.\nFor these two reasons, the collector requires two parts of settings in this file:\n Component library ID, names and languages. Remote server mapping based on the local library.  All component names and IDs must be defined in this file.\nComponent Library ID Define all names and IDs from component libraries which are used in the monitored application. This uses a two-way mapping strategy. The agent or SDK could use the value (ID) to represent the component name in uplink data.\n Name: the component name used in agent and UI ID: Unique ID. All IDs are reserved once they are released. Languages: Program languages may use this component. Multi languages should be separated by ,.  ID rules  Java and multi languages shared: (0, 3000) .NET Platform reserved: [3000, 4000) Node.js Platform reserved: [4000, 5000) Go reserved: [5000, 6000) Lua reserved: [6000, 7000) Python reserved: [7000, 8000) PHP reserved: [8000, 9000) C++ reserved: [9000, 10000) Javascript reserved: [10000, 11000) Rust reserved: [11000, 12000)  Example:\nTomcat:id:1languages:JavaHttpClient:id:2languages:Java,C#,Node.jsDubbo:id:3languages:JavaH2:id:4languages:JavaRemote server mapping The remote server will be conjectured by the local component. The mappings are based on names in the component library.\n Key: client component library name Value: server component name  Component-Server-Mappings:Jedis:RedisStackExchange.Redis:RedisRedisson:RedisLettuce:RedisZookeeper:ZookeeperSqlClient:SqlServerNpgsql:PostgreSQLMySqlConnector:MysqlEntityFrameworkCore.InMemory:InMemoryDatabase","excerpt":"Component library settings Component library settings are about your own or third-party libraries …","ref":"/docs/main/v9.4.0/en/guides/component-library-settings/","title":"Component library settings"},{"body":"Component library settings Component library settings are about your own or third-party libraries used in the monitored application.\nIn agent or SDK, regardless of whether the library name is collected as ID or String (literally, e.g. SpringMVC), the collector formats data in ID for better performance and less storage requirements.\nAlso, the collector conjectures the remote service based on the component library. For example: if the component library is MySQL Driver library, then the remote service should be MySQL Server.\nFor these two reasons, the collector requires two parts of settings in this file:\n Component library ID, names and languages. Remote server mapping based on the local library.  All component names and IDs must be defined in this file.\nComponent Library ID Define all names and IDs from component libraries which are used in the monitored application. This uses a two-way mapping strategy. The agent or SDK could use the value (ID) to represent the component name in uplink data.\n Name: the component name used in agent and UI ID: Unique ID. All IDs are reserved once they are released. Languages: Program languages may use this component. Multi languages should be separated by ,.  ID rules  Java and multi languages shared: (0, 3000) .NET Platform reserved: [3000, 4000) Node.js Platform reserved: [4000, 5000) Go reserved: [5000, 6000) Lua reserved: [6000, 7000) Python reserved: [7000, 8000) PHP reserved: [8000, 9000) C++ reserved: [9000, 10000) Javascript reserved: [10000, 11000) Rust reserved: [11000, 12000)  Example:\nTomcat:id:1languages:JavaHttpClient:id:2languages:Java,C#,Node.jsDubbo:id:3languages:JavaH2:id:4languages:JavaRemote server mapping The remote server will be conjectured by the local component. The mappings are based on names in the component library.\n Key: client component library name Value: server component name  Component-Server-Mappings:Jedis:RedisStackExchange.Redis:RedisRedisson:RedisLettuce:RedisZookeeper:ZookeeperSqlClient:SqlServerNpgsql:PostgreSQLMySqlConnector:MysqlEntityFrameworkCore.InMemory:InMemoryDatabase","excerpt":"Component library settings Component library settings are about your own or third-party libraries …","ref":"/docs/main/v9.5.0/en/guides/component-library-settings/","title":"Component library settings"},{"body":"Component library settings Component library settings are about your own or third-party libraries used in the monitored application.\nIn agent or SDK, regardless of whether the library name is collected as ID or String (literally, e.g. SpringMVC), the collector formats data in ID for better performance and less storage requirements.\nAlso, the collector conjectures the remote service based on the component library. For example: if the component library is MySQL Driver library, then the remote service should be MySQL Server.\nFor these two reasons, the collector requires two parts of settings in this file:\n Component library ID, names and languages. Remote server mapping based on the local library.  All component names and IDs must be defined in this file.\nComponent Library ID Define all names and IDs from component libraries which are used in the monitored application. This uses a two-way mapping strategy. The agent or SDK could use the value (ID) to represent the component name in uplink data.\n Name: the component name used in agent and UI ID: Unique ID. All IDs are reserved once they are released. Languages: Program languages may use this component. Multi languages should be separated by ,.  ID rules  Java and multi languages shared: (0, 3000) .NET Platform reserved: [3000, 4000) Node.js Platform reserved: [4000, 5000) Go reserved: [5000, 6000) Lua reserved: [6000, 7000) Python reserved: [7000, 8000) PHP reserved: [8000, 9000) C++ reserved: [9000, 10000) Javascript reserved: [10000, 11000) Rust reserved: [11000, 12000)  Example:\nTomcat:id:1languages:JavaHttpClient:id:2languages:Java,C#,Node.jsDubbo:id:3languages:JavaH2:id:4languages:JavaRemote server mapping The remote server will be conjectured by the local component. The mappings are based on names in the component library.\n Key: client component library name Value: server component name  Component-Server-Mappings:Jedis:RedisStackExchange.Redis:RedisRedisson:RedisLettuce:RedisZookeeper:ZookeeperSqlClient:SqlServerNpgsql:PostgreSQLMySqlConnector:MysqlEntityFrameworkCore.InMemory:InMemoryDatabase","excerpt":"Component library settings Component library settings are about your own or third-party libraries …","ref":"/docs/main/v9.6.0/en/guides/component-library-settings/","title":"Component library settings"},{"body":"Component library settings Component library settings are about your own or third-party libraries used in the monitored application.\nIn agent or SDK, regardless of whether the library name is collected as ID or String (literally, e.g. SpringMVC), the collector formats data in ID for better performance and less storage requirements.\nAlso, the collector conjectures the remote service based on the component library. For example: if the component library is MySQL Driver library, then the remote service should be MySQL Server.\nFor these two reasons, the collector requires two parts of settings in this file:\n Component library ID, names and languages. Remote server mapping based on the local library.  All component names and IDs must be defined in this file.\nComponent Library ID Define all names and IDs from component libraries which are used in the monitored application. This uses a two-way mapping strategy. The agent or SDK could use the value (ID) to represent the component name in uplink data.\n Name: the component name used in agent and UI ID: Unique ID. All IDs are reserved once they are released. Languages: Program languages may use this component. Multi languages should be separated by ,.  ID rules  Java and multi languages shared: (0, 3000) .NET Platform reserved: [3000, 4000) Node.js Platform reserved: [4000, 5000) Go reserved: [5000, 6000) Lua reserved: [6000, 7000) Python reserved: [7000, 8000) PHP reserved: [8000, 9000) C++ reserved: [9000, 10000) Javascript reserved: [10000, 11000) Rust reserved: [11000, 12000)  Example:\nTomcat:id:1languages:JavaHttpClient:id:2languages:Java,C#,Node.jsDubbo:id:3languages:JavaH2:id:4languages:JavaRemote server mapping The remote server will be conjectured by the local component. The mappings are based on names in the component library.\n Key: client component library name Value: server component name  Component-Server-Mappings:Jedis:RedisStackExchange.Redis:RedisRedisson:RedisLettuce:RedisZookeeper:ZookeeperSqlClient:SqlServerNpgsql:PostgreSQLMySqlConnector:MysqlEntityFrameworkCore.InMemory:InMemoryDatabase","excerpt":"Component library settings Component library settings are about your own or third-party libraries …","ref":"/docs/main/v9.7.0/en/guides/component-library-settings/","title":"Component library settings"},{"body":"Concepts and Designs Concepts and Designs help you to learn and understand the SkyWalking Infra E2E and the landscape.\n What is SkyWalking Infra E2E?  Project Goals. Provides the goals, which SkyWalking Infra E2E is trying to focus on and provides features about them.    After you read the above documents, you should understand the basic goals of the SkyWalking Infra E2E. Now, you can choose which following parts you are interested, then dive in.\n Module Design  ","excerpt":"Concepts and Designs Concepts and Designs help you to learn and understand the SkyWalking Infra E2E …","ref":"/docs/skywalking-infra-e2e/latest/en/concepts-and-designs/readme/","title":"Concepts and Designs"},{"body":"Concepts and Designs Concepts and Designs help you to learn and understand the SkyWalking Infra E2E and the landscape.\n What is SkyWalking Infra E2E?  Project Goals. Provides the goals, which SkyWalking Infra E2E is trying to focus on and provides features about them.    After you read the above documents, you should understand the basic goals of the SkyWalking Infra E2E. Now, you can choose which following parts you are interested, then dive in.\n Module Design  ","excerpt":"Concepts and Designs Concepts and Designs help you to learn and understand the SkyWalking Infra E2E …","ref":"/docs/skywalking-infra-e2e/next/en/concepts-and-designs/readme/","title":"Concepts and Designs"},{"body":"Concepts and Designs Concepts and Designs help you to learn and understand the SkyWalking Infra E2E and the landscape.\n What is SkyWalking Infra E2E?  Project Goals. Provides the goals, which SkyWalking Infra E2E is trying to focus on and provides features about them.    After you read the above documents, you should understand the basic goals of the SkyWalking Infra E2E. Now, you can choose which following parts you are interested, then dive in.\n Module Design  ","excerpt":"Concepts and Designs Concepts and Designs help you to learn and understand the SkyWalking Infra E2E …","ref":"/docs/skywalking-infra-e2e/v1.3.0/en/concepts-and-designs/readme/","title":"Concepts and Designs"},{"body":"Concepts and Designs Concepts and Designs help you to learn and understand the SkyWalking Satellite and the landscape.\n What is SkyWalking Satellite?  Overview and Core concepts. Provides a high-level description and introduction, including the problems the project solves. Project Goals. Provides the goals, which SkyWalking Satellite is trying to focus and provide features about them.    After you read the above documents, you should understand basic goals of the SkyWalking Satellite. Now, you can choose which following parts you are interested, then dive in.\n Module Design Plugin Mechanism Project Structure Memory mapped Queue  ","excerpt":"Concepts and Designs Concepts and Designs help you to learn and understand the SkyWalking Satellite …","ref":"/docs/skywalking-satellite/latest/en/concepts-and-designs/readme/","title":"Concepts and Designs"},{"body":"Concepts and Designs Concepts and Designs help you to learn and understand the SkyWalking Satellite and the landscape.\n What is SkyWalking Satellite?  Overview and Core concepts. Provides a high-level description and introduction, including the problems the project solves. Project Goals. Provides the goals, which SkyWalking Satellite is trying to focus and provide features about them.    After you read the above documents, you should understand basic goals of the SkyWalking Satellite. Now, you can choose which following parts you are interested, then dive in.\n Module Design Plugin Mechanism Project Structure Memory mapped Queue  ","excerpt":"Concepts and Designs Concepts and Designs help you to learn and understand the SkyWalking Satellite …","ref":"/docs/skywalking-satellite/next/en/concepts-and-designs/readme/","title":"Concepts and Designs"},{"body":"Concepts and Designs Concepts and Designs help you to learn and understand the SkyWalking Satellite and the landscape.\n What is SkyWalking Satellite?  Overview and Core concepts. Provides a high-level description and introduction, including the problems the project solves. Project Goals. Provides the goals, which SkyWalking Satellite is trying to focus and provide features about them.    After you read the above documents, you should understand basic goals of the SkyWalking Satellite. Now, you can choose which following parts you are interested, then dive in.\n Module Design Plugin Mechanism Project Structure Memory mapped Queue  ","excerpt":"Concepts and Designs Concepts and Designs help you to learn and understand the SkyWalking Satellite …","ref":"/docs/skywalking-satellite/v1.2.0/en/concepts-and-designs/readme/","title":"Concepts and Designs"},{"body":"Configuration Vocabulary The Configuration Vocabulary lists all available configurations provided by application.yml.\n   Module Provider Settings Value(s) and Explanation System Environment Variable¹ Default     core default role Option values: Mixed/Receiver/Aggregator. Receiver mode OAP opens the service to the agents, then analyzes and aggregates the results, and forwards the results for distributed aggregation. Aggregator mode OAP receives data from Mixer and Receiver role OAP nodes, and performs 2nd level aggregation. Mixer means both Receiver and Aggregator. SW_CORE_ROLE Mixed   - - restHost Binding IP of RESTful services. Services include GraphQL query and HTTP data report. SW_CORE_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_CORE_REST_PORT 12800   - - restContextPath Web context path of RESTful services. SW_CORE_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_CORE_REST_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_CORE_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize ServerSocketChannel Backlog of RESTful services. SW_CORE_REST_QUEUE_SIZE 0   - - httpMaxRequestHeaderSize Maximum request header size accepted. SW_CORE_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - gRPCHost Binding IP of gRPC services, including gRPC data report and internal communication among OAP nodes. SW_CORE_GRPC_HOST 0.0.0.0   - - gRPCPort Binding port of gRPC services. SW_CORE_GRPC_PORT 11800   - - gRPCSslEnabled Activates SSL for gRPC services. SW_CORE_GRPC_SSL_ENABLED false   - - gRPCSslKeyPath File path of gRPC SSL key. SW_CORE_GRPC_SSL_KEY_PATH -   - - gRPCSslCertChainPath File path of gRPC SSL cert chain. SW_CORE_GRPC_SSL_CERT_CHAIN_PATH -   - - gRPCSslTrustedCAPath File path of gRPC trusted CA. SW_CORE_GRPC_SSL_TRUSTED_CA_PATH -   - - downsampling Activated level of down sampling aggregation.  Hour,Day   - - enableDataKeeperExecutor Controller of TTL scheduler. Once disabled, TTL wouldn\u0026rsquo;t work. SW_CORE_ENABLE_DATA_KEEPER_EXECUTOR true   - - dataKeeperExecutePeriod Execution period of TTL scheduler (in minutes). Execution doesn\u0026rsquo;t mean deleting data. The storage provider (e.g. ElasticSearch storage) could override this. SW_CORE_DATA_KEEPER_EXECUTE_PERIOD 5   - - recordDataTTL The lifecycle of record data (in days). Record data includes traces, top N sample records, and logs. Minimum value is 2. SW_CORE_RECORD_DATA_TTL 3   - - metricsDataTTL The lifecycle of metrics data (in days), including metadata. We recommend setting metricsDataTTL \u0026gt;= recordDataTTL. Minimum value is 2. SW_CORE_METRICS_DATA_TTL 7   - - l1FlushPeriod The period of L1 aggregation flush to L2 aggregation (in milliseconds). SW_CORE_L1_AGGREGATION_FLUSH_PERIOD 500   - - storageSessionTimeout The threshold of session time (in milliseconds). Default value is 70000. SW_CORE_STORAGE_SESSION_TIMEOUT 70000   - - persistentPeriod The period of doing data persistence. Unit is second.Default value is 25s SW_CORE_PERSISTENT_PERIOD 25   - - topNReportPeriod The execution period (in minutes) of top N sampler, which saves sampled data into the storage. SW_CORE_TOPN_REPORT_PERIOD 10   - - activeExtraModelColumns Appends entity names (e.g. service names) into metrics storage entities. SW_CORE_ACTIVE_EXTRA_MODEL_COLUMNS false   - - serviceNameMaxLength Maximum length limit of service names. SW_SERVICE_NAME_MAX_LENGTH 70   - - instanceNameMaxLength Maximum length limit of service instance names. The maximum length of service + instance names should be less than 200. SW_INSTANCE_NAME_MAX_LENGTH 70   - - endpointNameMaxLength Maximum length limit of endpoint names. The maximum length of service + endpoint names should be less than 240. SW_ENDPOINT_NAME_MAX_LENGTH 150   - - searchableTracesTags Defines a set of span tag keys which are searchable through GraphQL. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_SEARCHABLE_TAG_KEYS http.method,http.status_code,rpc.status_code,db.type,db.instance,mq.queue,mq.topic,mq.broker   - - searchableLogsTags Defines a set of log tag keys which are searchable through GraphQL. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_SEARCHABLE_LOGS_TAG_KEYS level   - - searchableAlarmTags Defines a set of alarm tag keys which are searchable through GraphQL. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_SEARCHABLE_ALARM_TAG_KEYS level   - - autocompleteTagKeysQueryMaxSize The max size of tags keys for autocomplete select. SW_AUTOCOMPLETE_TAG_KEYS_QUERY_MAX_SIZE 100   - - autocompleteTagValuesQueryMaxSize The max size of tags values for autocomplete select. SW_AUTOCOMPLETE_TAG_VALUES_QUERY_MAX_SIZE 100   - - gRPCThreadPoolSize Pool size of gRPC server. SW_CORE_GRPC_THREAD_POOL_SIZE CPU core * 4   - - gRPCThreadPoolQueueSize Queue size of gRPC server. SW_CORE_GRPC_POOL_QUEUE_SIZE 10000   - - maxConcurrentCallsPerConnection The maximum number of concurrent calls permitted for each incoming connection. Defaults to no limit. SW_CORE_GRPC_MAX_CONCURRENT_CALL -   - - maxMessageSize Sets the maximum message size allowed to be received on the server. Empty means 4 MiB. SW_CORE_GRPC_MAX_MESSAGE_SIZE 4M(based on Netty)   - - remoteTimeout Timeout for cluster internal communication (in seconds). - 20   - - maxSizeOfNetworkAddressAlias The maximum size of network address detected in the system being monitored. - 1_000_000   - - maxPageSizeOfQueryProfileSnapshot The maximum size for snapshot analysis in an OAP query. - 500   - - maxSizeOfAnalyzeProfileSnapshot The maximum number of snapshots analyzed by the OAP. - 12000   - - prepareThreads The number of threads used to prepare metrics data to the storage. SW_CORE_PREPARE_THREADS 2   - - enableEndpointNameGroupingByOpenapi Automatically groups endpoints by the given OpenAPI definitions. SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI true   - - maxDurationOfQueryEBPFProfilingData The maximum duration(in second) of query the eBPF profiling data from database. - 30   - - maxThreadCountOfQueryEBPFProfilingData The maximum thread count of query the eBPF profiling data from database. - System CPU core size   - - uiMenuRefreshInterval The period(in seconds) of refreshing the status of all UI menu items. - 20   - - serviceCacheRefreshInterval The period(in seconds) of refreshing the service cache. SW_SERVICE_CACHE_REFRESH_INTERVAL 10   cluster standalone - Standalone is not suitable for running on a single node running. No configuration available. - -   - zookeeper namespace The namespace, represented by root path, isolates the configurations in Zookeeper. SW_NAMESPACE /, root path   - - hostPort Hosts and ports of Zookeeper Cluster. SW_CLUSTER_ZK_HOST_PORT localhost:2181   - - baseSleepTimeMs The period of Zookeeper client between two retries (in milliseconds). SW_CLUSTER_ZK_SLEEP_TIME 1000   - - maxRetries The maximum retry time. SW_CLUSTER_ZK_MAX_RETRIES 3   - - enableACL Opens ACL using schema and expression. SW_ZK_ENABLE_ACL false   - - schema Schema for the authorization. SW_ZK_SCHEMA digest   - - expression Expression for the authorization. SW_ZK_EXPRESSION skywalking:skywalking   - - internalComHost The hostname registered in Zookeeper for the internal communication of OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Zookeeper for the internal communication of OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - kubernetes namespace Namespace deployed by SkyWalking in k8s. SW_CLUSTER_K8S_NAMESPACE default   - - labelSelector Labels used for filtering OAP deployment in k8s. SW_CLUSTER_K8S_LABEL app=collector,release=skywalking   - - uidEnvName Environment variable name for reading uid. SW_CLUSTER_K8S_UID SKYWALKING_COLLECTOR_UID   - consul serviceName Service name for SkyWalking cluster. SW_SERVICE_NAME SkyWalking_OAP_Cluster   - - hostPort Hosts and ports for Consul cluster. SW_CLUSTER_CONSUL_HOST_PORT localhost:8500   - - aclToken ACL Token of Consul. Empty string means without ALC token. SW_CLUSTER_CONSUL_ACLTOKEN -   - - internalComHost The hostname registered in Consul for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Consul for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - etcd serviceName Service name for SkyWalking cluster. SW_CLUSTER_ETCD_SERVICE_NAME SkyWalking_OAP_Cluster   - - endpoints Hosts and ports for etcd cluster. SW_CLUSTER_ETCD_ENDPOINTS localhost:2379   - - namespace Namespace for SkyWalking cluster. SW_CLUSTER_ETCD_NAMESPACE /skywalking   - - authentication Indicates whether there is authentication. SW_CLUSTER_ETCD_AUTHENTICATION false   - - user Etcd auth username. SW_CLUSTER_ETCD_USER    - - password Etcd auth password. SW_CLUSTER_ETCD_PASSWORD    - - internalComHost The hostname registered in etcd for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in etcd for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - Nacos serviceName Service name for SkyWalking cluster. SW_SERVICE_NAME SkyWalking_OAP_Cluster   - - hostPort Hosts and ports for Nacos cluster. SW_CLUSTER_NACOS_HOST_PORT localhost:8848   - - namespace Namespace used by SkyWalking node coordination. SW_CLUSTER_NACOS_NAMESPACE public   - - internalComHost The hostname registered in Nacos for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Nacos for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - - username Nacos Auth username. SW_CLUSTER_NACOS_USERNAME -   - - password Nacos Auth password. SW_CLUSTER_NACOS_PASSWORD -   - - accessKey Nacos Auth accessKey. SW_CLUSTER_NACOS_ACCESSKEY -   - - secretKey Nacos Auth secretKey. SW_CLUSTER_NACOS_SECRETKEY -   - - syncPeriodHttpUriRecognitionPattern The period of HTTP URI recognition pattern synchronization (in seconds). SW_CORE_SYNC_PERIOD_HTTP_URI_RECOGNITION_PATTERN 10   - - trainingPeriodHttpUriRecognitionPattern The period of HTTP URI recognition pattern training (in seconds). SW_CORE_TRAINING_PERIOD_HTTP_URI_RECOGNITION_PATTERN 60   - - maxHttpUrisNumberPerService The maximum number of HTTP URIs per service. SW_MAX_HTTP_URIS_NUMBER_PER_SERVICE 3000   storage elasticsearch - ElasticSearch (and OpenSearch) storage implementation. - -   - - namespace Prefix of indexes created and used by SkyWalking. SW_NAMESPACE -   - - clusterNodes ElasticSearch cluster nodes for client connection. SW_STORAGE_ES_CLUSTER_NODES localhost   - - protocol HTTP or HTTPs. SW_STORAGE_ES_HTTP_PROTOCOL HTTP   - - connectTimeout Connect timeout of ElasticSearch client (in milliseconds). SW_STORAGE_ES_CONNECT_TIMEOUT 3000   - - socketTimeout Socket timeout of ElasticSearch client (in milliseconds). SW_STORAGE_ES_SOCKET_TIMEOUT 30000   - - responseTimeout Response timeout of ElasticSearch client (in milliseconds), 0 disables the timeout. SW_STORAGE_ES_RESPONSE_TIMEOUT 1500   - - numHttpClientThread The number of threads for the underlying HTTP client to perform socket I/O. If the value is \u0026lt;= 0, the number of available processors will be used. SW_STORAGE_ES_NUM_HTTP_CLIENT_THREAD 0   - - user Username of ElasticSearch cluster. SW_ES_USER -   - - password Password of ElasticSearch cluster. SW_ES_PASSWORD -   - - trustStorePath Trust JKS file path. Only works when username and password are enabled. SW_STORAGE_ES_SSL_JKS_PATH -   - - trustStorePass Trust JKS file password. Only works when username and password are enabled. SW_STORAGE_ES_SSL_JKS_PASS -   - - secretsManagementFile Secrets management file in the properties format, including username and password, which are managed by a 3rd party tool. Capable of being updated them at runtime. SW_ES_SECRETS_MANAGEMENT_FILE -   - - dayStep Represents the number of days in the one-minute/hour/day index. SW_STORAGE_DAY_STEP 1   - - indexShardsNumber Shard number of new indexes. SW_STORAGE_ES_INDEX_SHARDS_NUMBER 1   - - indexReplicasNumber Replicas number of new indexes. SW_STORAGE_ES_INDEX_REPLICAS_NUMBER 0   - - specificIndexSettings Specify the settings for each index individually. If configured, this setting has the highest priority and overrides the generic settings. SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS -   - - superDatasetDayStep Represents the number of days in the super size dataset record index. Default value is the same as dayStep when the value is less than 0. SW_STORAGE_ES_SUPER_DATASET_DAY_STEP -1   - - superDatasetIndexShardsFactor Super dataset is defined in the code (e.g. trace segments). This factor provides more shards for the super dataset: shards number = indexShardsNumber * superDatasetIndexShardsFactor. This factor also affects Zipkin and Jaeger traces. SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR 5   - - superDatasetIndexReplicasNumber Represents the replicas number in the super size dataset record index. SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER 0   - - indexTemplateOrder The order of index template. SW_STORAGE_ES_INDEX_TEMPLATE_ORDER 0   - - bulkActions Async bulk size of the record data batch execution. SW_STORAGE_ES_BULK_ACTIONS 5000   - - batchOfBytes A threshold to control the max body size of ElasticSearch Bulk flush. SW_STORAGE_ES_BATCH_OF_BYTES 10485760 (10m)   - - flushInterval Period of flush (in seconds). Does not matter whether bulkActions is reached or not. SW_STORAGE_ES_FLUSH_INTERVAL 5   - - concurrentRequests The number of concurrent requests allowed to be executed. SW_STORAGE_ES_CONCURRENT_REQUESTS 2   - - resultWindowMaxSize The maximum size of dataset when the OAP loads cache, such as network aliases. SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE 10000   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_ES_QUERY_MAX_SIZE 10000   - - scrollingBatchSize The batch size of metadata per iteration when metadataQueryMaxSize or resultWindowMaxSize is too large to be retrieved in a single query. SW_STORAGE_ES_SCROLLING_BATCH_SIZE 5000   - - segmentQueryMaxSize The maximum size of trace segments per query. SW_STORAGE_ES_QUERY_SEGMENT_SIZE 200   - - profileTaskQueryMaxSize The maximum size of profile task per query. SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE 200   - - profileDataQueryScrollBatchSize The batch size of query profiling data. SW_STORAGE_ES_QUERY_PROFILE_DATA_BATCH_SIZE 100   - - advanced All settings of ElasticSearch index creation. The value should be in JSON format. SW_STORAGE_ES_ADVANCED -   - - logicSharding Shard metrics and records indices into multi-physical indices, one index template per metric/meter aggregation function or record. SW_STORAGE_ES_LOGIC_SHARDING false   - h2 - H2 storage is designed for demonstration and running in short term (i.e. 1-2 hours) only. - -   - - url H2 connection URL. Defaults to H2 memory mode. SW_STORAGE_H2_URL jdbc:h2:mem:skywalking-oap-db   - - user Username of H2 database. SW_STORAGE_H2_USER sa   - - password Password of H2 database. - -   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_H2_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 100   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 1   - mysql - MySQL Storage. The MySQL JDBC Driver is not in the dist. Please copy it into the oap-lib folder manually. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - postgresql - PostgreSQL storage. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - banyandb - BanyanDB storage. - -   - - targets Hosts with ports of the BanyanDB. SW_STORAGE_BANYANDB_TARGETS 127.0.0.1:17912   - - maxBulkSize The maximum size of write entities in a single batch write call. SW_STORAGE_BANYANDB_MAX_BULK_SIZE 5000   - - flushInterval Period of flush interval. In the timeunit of seconds. SW_STORAGE_BANYANDB_FLUSH_INTERVAL 15   - - metricsShardsNumber Shards Number for measure/metrics. SW_STORAGE_BANYANDB_METRICS_SHARDS_NUMBER 1   - - recordShardsNumber Shards Number for a normal record. SW_STORAGE_BANYANDB_RECORD_SHARDS_NUMBER 1   - - superDatasetShardsFactor Shards Factor for a super dataset record, i.e. Shard number of a super dataset is recordShardsNumber*superDatasetShardsFactor. SW_STORAGE_BANYANDB_SUPERDATASET_SHARDS_FACTOR 2   - - concurrentWriteThreads Concurrent consumer threads for batch writing. SW_STORAGE_BANYANDB_CONCURRENT_WRITE_THREADS 15   - - profileTaskQueryMaxSize Max size of ProfileTask to be fetched. SW_STORAGE_BANYANDB_PROFILE_TASK_QUERY_MAX_SIZE 200   agent-analyzer default Agent Analyzer. SW_AGENT_ANALYZER default    - - traceSamplingPolicySettingsFile The sampling policy including sampling rate and the threshold of trace segment latency can be configured by the traceSamplingPolicySettingsFile file. SW_TRACE_SAMPLING_POLICY_SETTINGS_FILE trace-sampling-policy-settings.yml   - - slowDBAccessThreshold The slow database access threshold (in milliseconds). SW_SLOW_DB_THRESHOLD default:200,mongodb:100   - - forceSampleErrorSegment When sampling mechanism is activated, this config samples the error status segment and ignores the sampling rate. SW_FORCE_SAMPLE_ERROR_SEGMENT true   - - segmentStatusAnalysisStrategy Determines the final segment status from span status. Available values are FROM_SPAN_STATUS , FROM_ENTRY_SPAN, and FROM_FIRST_SPAN. FROM_SPAN_STATUS indicates that the segment status would be error if any span has an error status. FROM_ENTRY_SPAN means that the segment status would only be determined by the status of entry spans. FROM_FIRST_SPAN means that the segment status would only be determined by the status of the first span. SW_SEGMENT_STATUS_ANALYSIS_STRATEGY FROM_SPAN_STATUS   - - noUpstreamRealAddressAgents Exit spans with the component in the list would not generate client-side instance relation metrics, since some tracing plugins (e.g. Nginx-LUA and Envoy) can\u0026rsquo;t collect the real peer IP address. SW_NO_UPSTREAM_REAL_ADDRESS 6000,9000   - - meterAnalyzerActiveFiles Indicates which files could be instrumented and analyzed. Multiple files are split by \u0026ldquo;,\u0026rdquo;. SW_METER_ANALYZER_ACTIVE_FILES    - - slowCacheWriteThreshold The threshold of slow command which is used for writing operation (in milliseconds). SW_SLOW_CACHE_WRITE_THRESHOLD default:20,redis:10   - - slowCacheReadThreshold The threshold of slow command which is used for reading (getting) operation (in milliseconds). SW_SLOW_CACHE_READ_THRESHOLD default:20,redis:10   receiver-sharing-server default Sharing server provides new gRPC and restful servers for data collection. Ana designates that servers in the core module are to be used for internal communication only. - -    - - restHost Binding IP of RESTful services. Services include GraphQL query and HTTP data report. SW_RECEIVER_SHARING_REST_HOST -   - - restPort Binding port of RESTful services. SW_RECEIVER_SHARING_REST_PORT -   - - restContextPath Web context path of RESTful services. SW_RECEIVER_SHARING_REST_CONTEXT_PATH -   - - restMaxThreads Maximum thread number of RESTful services. SW_RECEIVER_SHARING_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_RECEIVER_SHARING_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize ServerSocketChannel backlog of RESTful services. SW_RECEIVER_SHARING_REST_QUEUE_SIZE 0   - - httpMaxRequestHeaderSize Maximum request header size accepted. SW_RECEIVER_SHARING_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - gRPCHost Binding IP of gRPC services. Services include gRPC data report and internal communication among OAP nodes. SW_RECEIVER_GRPC_HOST 0.0.0.0. Not Activated   - - gRPCPort Binding port of gRPC services. SW_RECEIVER_GRPC_PORT Not Activated   - - gRPCThreadPoolSize Pool size of gRPC server. SW_RECEIVER_GRPC_THREAD_POOL_SIZE CPU core * 4   - - gRPCThreadPoolQueueSize Queue size of gRPC server. SW_RECEIVER_GRPC_POOL_QUEUE_SIZE 10000   - - gRPCSslEnabled Activates SSL for gRPC services. SW_RECEIVER_GRPC_SSL_ENABLED false   - - gRPCSslKeyPath File path of gRPC SSL key. SW_RECEIVER_GRPC_SSL_KEY_PATH -   - - gRPCSslCertChainPath File path of gRPC SSL cert chain. SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH -   - - maxConcurrentCallsPerConnection The maximum number of concurrent calls permitted for each incoming connection. Defaults to no limit. SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL -   - - authentication The token text for authentication. Works for gRPC connection only. Once this is set, the client is required to use the same token. SW_AUTHENTICATION -   log-analyzer default Log Analyzer. SW_LOG_ANALYZER default    - - lalFiles The LAL configuration file names (without file extension) to be activated. Read LAL for more details. SW_LOG_LAL_FILES default   - - malFiles The MAL configuration file names (without file extension) to be activated. Read LAL for more details. SW_LOG_MAL_FILES \u0026quot;\u0026quot;   event-analyzer default Event Analyzer. SW_EVENT_ANALYZER default    receiver-register default gRPC and HTTPRestful services that provide service, service instance and endpoint register. - -    receiver-trace default gRPC and HTTPRestful services that accept SkyWalking format traces. - -    receiver-jvm default gRPC services that accept JVM metrics data. - -    receiver-clr default gRPC services that accept .Net CLR metrics data. - -    receiver-profile default gRPC services that accept profile task status and snapshot reporter. - -    receiver-zabbix default TCP receiver accepts Zabbix format metrics. - -    - - port Exported TCP port. Zabbix agent could connect and transport data. SW_RECEIVER_ZABBIX_PORT 10051   - - host Binds to host. SW_RECEIVER_ZABBIX_HOST 0.0.0.0   - - activeFiles Enables config when agent request is received. SW_RECEIVER_ZABBIX_ACTIVE_FILES agent   service-mesh default gRPC services that accept data from inbound mesh probes. - -    envoy-metric default Envoy metrics_service and ALS(access log service) are supported by this receiver. The OAL script supports all GAUGE type metrics. - -    - - acceptMetricsService Starts Envoy Metrics Service analysis. SW_ENVOY_METRIC_SERVICE true   - - alsHTTPAnalysis Starts Envoy HTTP Access Log Service analysis. Value = k8s-mesh means starting the analysis. SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS -   - - alsTCPAnalysis Starts Envoy TCP Access Log Service analysis. Value = k8s-mesh means starting the analysis. SW_ENVOY_METRIC_ALS_TCP_ANALYSIS -   - - k8sServiceNameRule k8sServiceNameRule allows you to customize the service name in ALS via Kubernetes metadata. The available variables are pod and service. E.g. you can use ${service.metadata.name}-${pod.metadata.labels.version} to append the version number to the service name. Note that when using environment variables to pass this configuration, use single quotes('') to avoid being evaluated by the shell. K8S_SERVICE_NAME_RULE ${pod.metadata.labels.(service.istio.io/canonical-name)}   - - istioServiceNameRule istioServiceNameRule allows you to customize the service name in ALS via Kubernetes metadata. The available variables are serviceEntry. E.g. you can use ${serviceEntry.metadata.name}-${serviceEntry.metadata.labels.version} to append the version number to the service name. Note that when using environment variables to pass this configuration, use single quotes('') to avoid being evaluated by the shell. ISTIO_SERVICE_NAME_RULE ${serviceEntry.metadata.name}   receiver-otel default A receiver for analyzing metrics data from OpenTelemetry. - -    - - enabledHandlers Enabled handlers for otel. SW_OTEL_RECEIVER_ENABLED_HANDLERS -   - - enabledOtelMetricsRules Enabled metric rules for OTLP handler. SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES -   receiver-zipkin default A receiver for Zipkin traces. - -    - - sampleRate The sample rate precision is 1/10000, should be between 0 and 10000 SW_ZIPKIN_SAMPLE_RATE 10000   - - searchableTracesTags Defines a set of span tag keys which are searchable. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_ZIPKIN_SEARCHABLE_TAG_KEYS http.method   - - enableHttpCollector Enable Http Collector. SW_ZIPKIN_HTTP_COLLECTOR_ENABLED true   - - restHost Binding IP of RESTful services. SW_RECEIVER_ZIPKIN_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_RECEIVER_ZIPKIN_REST_PORT 9411   - - restContextPath Web context path of RESTful services. SW_RECEIVER_ZIPKIN_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_RECEIVER_ZIPKIN_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_RECEIVER_ZIPKIN_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_RECEIVER_ZIPKIN_REST_QUEUE_SIZE 0   - - enableKafkaCollector Enable Kafka Collector. SW_ZIPKIN_KAFKA_COLLECTOR_ENABLED false   - - kafkaBootstrapServers Kafka ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG. SW_ZIPKIN_KAFKA_SERVERS localhost:9092   - - kafkaGroupId Kafka ConsumerConfig.GROUP_ID_CONFIG. SW_ZIPKIN_KAFKA_GROUP_ID zipkin   - - kafkaTopic Kafka Topics. SW_ZIPKIN_KAFKA_TOPIC zipkin   - - kafkaConsumerConfig Kafka consumer config, JSON format as Properties. If it contains the same key with above, would override. SW_ZIPKIN_KAFKA_CONSUMER_CONFIG \u0026ldquo;{\u0026quot;auto.offset.reset\u0026quot;:\u0026quot;earliest\u0026quot;,\u0026quot;enable.auto.commit\u0026quot;:true}\u0026rdquo;   - - kafkaConsumers The number of consumers to create. SW_ZIPKIN_KAFKA_CONSUMERS 1   - - kafkaHandlerThreadPoolSize Pool size of Kafka message handler executor. SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_SIZE CPU core * 2   - - kafkaHandlerThreadPoolQueueSize Queue size of Kafka message handler executor. SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE 10000   kafka-fetcher default Read SkyWalking\u0026rsquo;s native metrics/logs/traces through Kafka server. - -    - - bootstrapServers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_KAFKA_FETCHER_SERVERS localhost:9092   - - namespace Namespace aims to isolate multi OAP cluster when using the same Kafka cluster. If you set a namespace for Kafka fetcher, OAP will add a prefix to topic name. You should also set namespace in agent.config. The property is named plugin.kafka.namespace. SW_NAMESPACE -   - - groupId A unique string that identifies the consumer group to which this consumer belongs. - skywalking-consumer   - - createTopicIfNotExist If true, this creates Kafka topic (if it does not already exist). - true   - - partitions The number of partitions for the topic being created. SW_KAFKA_FETCHER_PARTITIONS 3   - - consumers The number of consumers to create. SW_KAFKA_FETCHER_CONSUMERS 1   - - enableNativeProtoLog Enables fetching and handling native proto log data. SW_KAFKA_FETCHER_ENABLE_NATIVE_PROTO_LOG true   - - enableNativeJsonLog Enables fetching and handling native json log data. SW_KAFKA_FETCHER_ENABLE_NATIVE_JSON_LOG true   - - replicationFactor The replication factor for each partition in the topic being created. SW_KAFKA_FETCHER_PARTITIONS_FACTOR 2   - - kafkaHandlerThreadPoolSize Pool size of Kafka message handler executor. SW_KAFKA_HANDLER_THREAD_POOL_SIZE CPU core * 2   - - kafkaHandlerThreadPoolQueueSize Queue size of Kafka message handler executor. SW_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE 10000   - - topicNameOfMeters Kafka topic name for meter system data. - skywalking-meters   - - topicNameOfMetrics Kafka topic name for JVM metrics data. - skywalking-metrics   - - topicNameOfProfiling Kafka topic name for profiling data. - skywalking-profilings   - - topicNameOfTracingSegments Kafka topic name for tracing data. - skywalking-segments   - - topicNameOfManagements Kafka topic name for service instance reporting and registration. - skywalking-managements   - - topicNameOfLogs Kafka topic name for native proto log data. - skywalking-logs   - - topicNameOfJsonLogs Kafka topic name for native json log data. - skywalking-logs-json   receiver-browser default gRPC services that accept browser performance data and error log. - - -   - - sampleRate Sampling rate for receiving trace. Precise to 1/10000. 10000 means sampling rate of 100% by default. SW_RECEIVER_BROWSER_SAMPLE_RATE 10000   query graphql - GraphQL query implementation. -    - - enableLogTestTool Enable the log testing API to test the LAL. NOTE: This API evaluates untrusted code on the OAP server. A malicious script can do significant damage (steal keys and secrets, remove files and directories, install malware, etc). As such, please enable this API only when you completely trust your users. SW_QUERY_GRAPHQL_ENABLE_LOG_TEST_TOOL false   - - maxQueryComplexity Maximum complexity allowed for the GraphQL query that can be used to abort a query if the total number of data fields queried exceeds the defined threshold. SW_QUERY_MAX_QUERY_COMPLEXITY 3000   - - enableUpdateUITemplate Allow user add,disable and update UI template. SW_ENABLE_UPDATE_UI_TEMPLATE false   - - enableOnDemandPodLog Ondemand Pod log: fetch the Pod logs on users' demand, the logs are fetched and displayed in real time, and are not persisted in any kind. This is helpful when users want to do some experiments and monitor the logs and see what\u0026rsquo;s happing inside the service. Note: if you print secrets in the logs, they are also visible to the UI, so for the sake of security, this feature is disabled by default, please set this configuration to enable the feature manually. SW_ENABLE_ON_DEMAND_POD_LOG false   query-zipkin default - This module is for Zipkin query API and support zipkin-lens UI -    - - restHost Binding IP of RESTful services. SW_QUERY_ZIPKIN_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_QUERY_ZIPKIN_REST_PORT 9412   - - restContextPath Web context path of RESTful services. SW_QUERY_ZIPKIN_REST_CONTEXT_PATH zipkin   - - restMaxThreads Maximum thread number of RESTful services. SW_QUERY_ZIPKIN_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_QUERY_ZIPKIN_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_QUERY_ZIPKIN_REST_QUEUE_SIZE 0   - - lookback Default look back for traces and autocompleteTags, 1 day in millis SW_QUERY_ZIPKIN_LOOKBACK 86400000   - - namesMaxAge The Cache-Control max-age (seconds) for serviceNames, remoteServiceNames and spanNames SW_QUERY_ZIPKIN_NAMES_MAX_AGE 300   - - uiQueryLimit Default traces query max size SW_QUERY_ZIPKIN_UI_QUERY_LIMIT 10   - - uiDefaultLookback Default look back on the UI for search traces, 15 minutes in millis SW_QUERY_ZIPKIN_UI_DEFAULT_LOOKBACK 900000   promql default - This module is for PromQL API. -    - - restHost Binding IP of RESTful services. SW_PROMQL_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_PROMQL_REST_PORT 9090   - - restContextPath Web context path of RESTful services. SW_PROMQL_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_PROMQL_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_PROMQL_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_PROMQL_REST_QUEUE_SIZE 0   alarm default - Read alarm doc for more details. -    telemetry - - Read telemetry doc for more details. -    - none - No op implementation. -    - prometheus host Binding host for Prometheus server fetching data. SW_TELEMETRY_PROMETHEUS_HOST 0.0.0.0   - - port Binding port for Prometheus server fetching data. SW_TELEMETRY_PROMETHEUS_PORT 1234   configuration - - Read dynamic configuration doc for more details. -    - grpc host DCS server binding hostname. SW_DCS_SERVER_HOST -   - - port DCS server binding port. SW_DCS_SERVER_PORT 80   - - clusterName Cluster name when reading the latest configuration from DSC server. SW_DCS_CLUSTER_NAME SkyWalking   - - period The period of reading data from DSC server by the OAP (in seconds). SW_DCS_PERIOD 20   - apollo apolloMeta apollo.meta in Apollo. SW_CONFIG_APOLLO http://localhost:8080   - - apolloCluster apollo.cluster in Apollo. SW_CONFIG_APOLLO_CLUSTER default   - - apolloEnv env in Apollo. SW_CONFIG_APOLLO_ENV -   - - appId app.id in Apollo. SW_CONFIG_APOLLO_APP_ID skywalking   - zookeeper namespace The namespace (represented by root path) that isolates the configurations in the Zookeeper. SW_CONFIG_ZK_NAMESPACE /, root path   - - hostPort Hosts and ports of Zookeeper Cluster. SW_CONFIG_ZK_HOST_PORT localhost:2181   - - baseSleepTimeMs The period of Zookeeper client between two retries (in milliseconds). SW_CONFIG_ZK_BASE_SLEEP_TIME_MS 1000   - - maxRetries The maximum retry time. SW_CONFIG_ZK_MAX_RETRIES 3   - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - etcd endpoints Hosts and ports for etcd cluster (separated by commas if multiple). SW_CONFIG_ETCD_ENDPOINTS http://localhost:2379   - - namespace Namespace for SkyWalking cluster. SW_CONFIG_ETCD_NAMESPACE /skywalking   - - authentication Indicates whether there is authentication. SW_CONFIG_ETCD_AUTHENTICATION false   - - user Etcd auth username. SW_CONFIG_ETCD_USER    - - password Etcd auth password. SW_CONFIG_ETCD_PASSWORD    - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - consul hostPort Hosts and ports for Consul cluster. SW_CONFIG_CONSUL_HOST_AND_PORTS localhost:8500   - - aclToken ACL Token of Consul. Empty string means without ACL token. SW_CONFIG_CONSUL_ACL_TOKEN -   - - period The period of data sync (in seconds). SW_CONFIG_CONSUL_PERIOD 60   - k8s-configmap namespace Deployment namespace of the config map. SW_CLUSTER_K8S_NAMESPACE default   - - labelSelector Labels for locating configmap. SW_CLUSTER_K8S_LABEL app=collector,release=skywalking   - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - nacos serverAddr Nacos Server Host. SW_CONFIG_NACOS_SERVER_ADDR 127.0.0.1   - - port Nacos Server Port. SW_CONFIG_NACOS_SERVER_PORT 8848   - - group Nacos Configuration namespace. SW_CONFIG_NACOS_SERVER_NAMESPACE -   - - period The period of data sync (in seconds). SW_CONFIG_CONFIG_NACOS_PERIOD 60   - - username Nacos Auth username. SW_CONFIG_NACOS_USERNAME -   - - password Nacos Auth password. SW_CONFIG_NACOS_PASSWORD -   - - accessKey Nacos Auth accessKey. SW_CONFIG_NACOS_ACCESSKEY -   - - secretKey Nacos Auth secretKey. SW_CONFIG_NACOS_SECRETKEY -   exporter default enableGRPCMetrics Enable gRPC metrics exporter. SW_EXPORTER_ENABLE_GRPC_METRICS false   - - gRPCTargetHost The host of target gRPC server for receiving export data SW_EXPORTER_GRPC_HOST 127.0.0.1   - - gRPCTargetPort The port of target gRPC server for receiving export data. SW_EXPORTER_GRPC_PORT 9870   - - enableKafkaTrace Enable Kafka trace exporter. SW_EXPORTER_ENABLE_KAFKA_TRACE false   - - enableKafkaLog Enable Kafka log exporter. SW_EXPORTER_ENABLE_KAFKA_LOG false   - - kafkaBootstrapServers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_EXPORTER_KAFKA_SERVERS localhost:9092   - - kafkaProducerConfig Kafka producer config, JSON format as Properties. SW_EXPORTER_KAFKA_PRODUCER_CONFIG -   - - kafkaTopicTrace Kafka topic name for trace. SW_EXPORTER_KAFKA_TOPIC_TRACE skywalking-export-trace   - - kafkaTopicLog Kafka topic name for log. SW_EXPORTER_KAFKA_TOPIC_LOG skywalking-export-log   - - exportErrorStatusTraceOnly Export error status trace segments through the Kafka channel. SW_EXPORTER_KAFKA_TRACE_FILTER_ERROR false   health-checker default checkIntervalSeconds The period of checking OAP internal health status (in seconds). SW_HEALTH_CHECKER_INTERVAL_SECONDS 5   debugging-query default       - - keywords4MaskingSecretsOfConfig Include the list of keywords to filter configurations including secrets. Separate keywords by a comma. SW_DEBUGGING_QUERY_KEYWORDS_FOR_MASKING_SECRETS user,password,token,accessKey,secretKey,authentication   configuration-discovery default disableMessageDigest If true, agent receives the latest configuration every time, even without making any changes. By default, OAP uses the SHA512 message digest mechanism to detect changes in configuration. SW_DISABLE_MESSAGE_DIGEST false   receiver-event default gRPC services that handle events data. - -    aws-firehose-receiver default host Binding IP of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_HOST 0.0.0.0   - - port Binding port of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_PORT 12801   - - contextPath Context path of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_CONTEXT_PATH /   - - maxThreads Max Thtread number of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_MAX_THREADS 200   - - idleTimeOut Idle timeout of a connection for keep-alive. SW_RECEIVER_AWS_FIREHOSE_HTTP_IDLE_TIME_OUT 30000   - - acceptQueueSize Maximum allowed number of open connections SW_RECEIVER_AWS_FIREHOSE_HTTP_ACCEPT_QUEUE_SIZE 0   - - maxRequestHeaderSize Maximum length of all headers in an HTTP/1 response SW_RECEIVER_AWS_FIREHOSE_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - firehoseAccessKey The AccessKey of AWS firhose SW_RECEIVER_AWS_FIREHOSE_ACCESS_KEY    - - enableTLS Indicate if enable HTTPS for the server SW_RECEIVER_AWS_FIREHOSE_HTTP_ENABLE_TLS false   - - tlsKeyPath TLS key path SW_RECEIVER_AWS_FIREHOSE_HTTP_TLS_KEY_PATH    - - tlsCertChainPath TLS certificate chain path SW_RECEIVER_AWS_FIREHOSE_HTTP_TLS_CERT_CHAIN_PATH    ai-pipeline default       - - uriRecognitionServerAddr The address of the URI recognition server. SW_AI_PIPELINE_URI_RECOGNITION_SERVER_ADDR -   - - uriRecognitionServerPort The port of the URI recognition server. SW_AI_PIPELINE_URI_RECOGNITION_SERVER_PORT 17128    Note ¹ System Environment Variable name could be declared and changed in application.yml. The names listed here are simply provided in the default application.yml file.\n","excerpt":"Configuration Vocabulary The Configuration Vocabulary lists all available configurations provided by …","ref":"/docs/main/latest/en/setup/backend/configuration-vocabulary/","title":"Configuration Vocabulary"},{"body":"Configuration Vocabulary The Configuration Vocabulary lists all available configurations provided by application.yml.\n   Module Provider Settings Value(s) and Explanation System Environment Variable¹ Default     core default role Option values: Mixed/Receiver/Aggregator. Receiver mode OAP opens the service to the agents, then analyzes and aggregates the results, and forwards the results for distributed aggregation. Aggregator mode OAP receives data from Mixer and Receiver role OAP nodes, and performs 2nd level aggregation. Mixer means both Receiver and Aggregator. SW_CORE_ROLE Mixed   - - restHost Binding IP of RESTful services. Services include GraphQL query and HTTP data report. SW_CORE_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_CORE_REST_PORT 12800   - - restContextPath Web context path of RESTful services. SW_CORE_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_CORE_REST_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_CORE_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize ServerSocketChannel Backlog of RESTful services. SW_CORE_REST_QUEUE_SIZE 0   - - httpMaxRequestHeaderSize Maximum request header size accepted. SW_CORE_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - gRPCHost Binding IP of gRPC services, including gRPC data report and internal communication among OAP nodes. SW_CORE_GRPC_HOST 0.0.0.0   - - gRPCPort Binding port of gRPC services. SW_CORE_GRPC_PORT 11800   - - gRPCSslEnabled Activates SSL for gRPC services. SW_CORE_GRPC_SSL_ENABLED false   - - gRPCSslKeyPath File path of gRPC SSL key. SW_CORE_GRPC_SSL_KEY_PATH -   - - gRPCSslCertChainPath File path of gRPC SSL cert chain. SW_CORE_GRPC_SSL_CERT_CHAIN_PATH -   - - gRPCSslTrustedCAPath File path of gRPC trusted CA. SW_CORE_GRPC_SSL_TRUSTED_CA_PATH -   - - downsampling Activated level of down sampling aggregation.  Hour,Day   - - enableDataKeeperExecutor Controller of TTL scheduler. Once disabled, TTL wouldn\u0026rsquo;t work. SW_CORE_ENABLE_DATA_KEEPER_EXECUTOR true   - - dataKeeperExecutePeriod Execution period of TTL scheduler (in minutes). Execution doesn\u0026rsquo;t mean deleting data. The storage provider (e.g. ElasticSearch storage) could override this. SW_CORE_DATA_KEEPER_EXECUTE_PERIOD 5   - - recordDataTTL The lifecycle of record data (in days). Record data includes traces, top N sample records, and logs. Minimum value is 2. SW_CORE_RECORD_DATA_TTL 3   - - metricsDataTTL The lifecycle of metrics data (in days), including metadata. We recommend setting metricsDataTTL \u0026gt;= recordDataTTL. Minimum value is 2. SW_CORE_METRICS_DATA_TTL 7   - - l1FlushPeriod The period of L1 aggregation flush to L2 aggregation (in milliseconds). SW_CORE_L1_AGGREGATION_FLUSH_PERIOD 500   - - storageSessionTimeout The threshold of session time (in milliseconds). Default value is 70000. SW_CORE_STORAGE_SESSION_TIMEOUT 70000   - - persistentPeriod The period of doing data persistence. Unit is second.Default value is 25s SW_CORE_PERSISTENT_PERIOD 25   - - topNReportPeriod The execution period (in minutes) of top N sampler, which saves sampled data into the storage. SW_CORE_TOPN_REPORT_PERIOD 10   - - activeExtraModelColumns Appends entity names (e.g. service names) into metrics storage entities. SW_CORE_ACTIVE_EXTRA_MODEL_COLUMNS false   - - serviceNameMaxLength Maximum length limit of service names. SW_SERVICE_NAME_MAX_LENGTH 70   - - instanceNameMaxLength Maximum length limit of service instance names. The maximum length of service + instance names should be less than 200. SW_INSTANCE_NAME_MAX_LENGTH 70   - - endpointNameMaxLength Maximum length limit of endpoint names. The maximum length of service + endpoint names should be less than 240. SW_ENDPOINT_NAME_MAX_LENGTH 150   - - searchableTracesTags Defines a set of span tag keys which are searchable through GraphQL. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_SEARCHABLE_TAG_KEYS http.method,http.status_code,rpc.status_code,db.type,db.instance,mq.queue,mq.topic,mq.broker   - - searchableLogsTags Defines a set of log tag keys which are searchable through GraphQL. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_SEARCHABLE_LOGS_TAG_KEYS level   - - searchableAlarmTags Defines a set of alarm tag keys which are searchable through GraphQL. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_SEARCHABLE_ALARM_TAG_KEYS level   - - autocompleteTagKeysQueryMaxSize The max size of tags keys for autocomplete select. SW_AUTOCOMPLETE_TAG_KEYS_QUERY_MAX_SIZE 100   - - autocompleteTagValuesQueryMaxSize The max size of tags values for autocomplete select. SW_AUTOCOMPLETE_TAG_VALUES_QUERY_MAX_SIZE 100   - - gRPCThreadPoolSize Pool size of gRPC server. SW_CORE_GRPC_THREAD_POOL_SIZE Default to gRPC\u0026rsquo;s implementation, which is a cached thread pool that can grow infinitely.   - - maxConcurrentCallsPerConnection The maximum number of concurrent calls permitted for each incoming connection. Defaults to no limit. SW_CORE_GRPC_MAX_CONCURRENT_CALL -   - - maxMessageSize Sets the maximum message size allowed to be received on the server. Empty means 4 MiB. SW_CORE_GRPC_MAX_MESSAGE_SIZE 4M(based on Netty)   - - remoteTimeout Timeout for cluster internal communication (in seconds). - 20   - - maxSizeOfNetworkAddressAlias The maximum size of network address detected in the system being monitored. - 1_000_000   - - maxPageSizeOfQueryProfileSnapshot The maximum size for snapshot analysis in an OAP query. - 500   - - maxSizeOfAnalyzeProfileSnapshot The maximum number of snapshots analyzed by the OAP. - 12000   - - prepareThreads The number of threads used to prepare metrics data to the storage. SW_CORE_PREPARE_THREADS 2   - - enableEndpointNameGroupingByOpenapi Automatically groups endpoints by the given OpenAPI definitions. SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI true   - - maxDurationOfQueryEBPFProfilingData The maximum duration(in second) of query the eBPF profiling data from database. - 30   - - maxThreadCountOfQueryEBPFProfilingData The maximum thread count of query the eBPF profiling data from database. - System CPU core size   - - uiMenuRefreshInterval The period(in seconds) of refreshing the status of all UI menu items. - 20   - - serviceCacheRefreshInterval The period(in seconds) of refreshing the service cache. SW_SERVICE_CACHE_REFRESH_INTERVAL 10   - - enableHierarchy If disable the hierarchy, the service and instance hierarchy relation will not be built. And the query of hierarchy will return empty result. All the hierarchy relations are defined in the hierarchy-definition.yml. Notice: some of the configurations only available for kubernetes environments. SW_CORE_ENABLE_HIERARCHY true   cluster standalone - Standalone is not suitable for running on a single node running. No configuration available. - -   - zookeeper namespace The namespace, represented by root path, isolates the configurations in Zookeeper. SW_NAMESPACE /, root path   - - hostPort Hosts and ports of Zookeeper Cluster. SW_CLUSTER_ZK_HOST_PORT localhost:2181   - - baseSleepTimeMs The period of Zookeeper client between two retries (in milliseconds). SW_CLUSTER_ZK_SLEEP_TIME 1000   - - maxRetries The maximum retry time. SW_CLUSTER_ZK_MAX_RETRIES 3   - - enableACL Opens ACL using schema and expression. SW_ZK_ENABLE_ACL false   - - schema Schema for the authorization. SW_ZK_SCHEMA digest   - - expression Expression for the authorization. SW_ZK_EXPRESSION skywalking:skywalking   - - internalComHost The hostname registered in Zookeeper for the internal communication of OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Zookeeper for the internal communication of OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - kubernetes namespace Namespace deployed by SkyWalking in k8s. SW_CLUSTER_K8S_NAMESPACE default   - - labelSelector Labels used for filtering OAP deployment in k8s. SW_CLUSTER_K8S_LABEL app=collector,release=skywalking   - - uidEnvName Environment variable name for reading uid. SW_CLUSTER_K8S_UID SKYWALKING_COLLECTOR_UID   - consul serviceName Service name for SkyWalking cluster. SW_SERVICE_NAME SkyWalking_OAP_Cluster   - - hostPort Hosts and ports for Consul cluster. SW_CLUSTER_CONSUL_HOST_PORT localhost:8500   - - aclToken ACL Token of Consul. Empty string means without ALC token. SW_CLUSTER_CONSUL_ACLTOKEN -   - - internalComHost The hostname registered in Consul for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Consul for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - etcd serviceName Service name for SkyWalking cluster. SW_CLUSTER_ETCD_SERVICE_NAME SkyWalking_OAP_Cluster   - - endpoints Hosts and ports for etcd cluster. SW_CLUSTER_ETCD_ENDPOINTS localhost:2379   - - namespace Namespace for SkyWalking cluster. SW_CLUSTER_ETCD_NAMESPACE /skywalking   - - authentication Indicates whether there is authentication. SW_CLUSTER_ETCD_AUTHENTICATION false   - - user Etcd auth username. SW_CLUSTER_ETCD_USER    - - password Etcd auth password. SW_CLUSTER_ETCD_PASSWORD    - - internalComHost The hostname registered in etcd for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in etcd for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - Nacos serviceName Service name for SkyWalking cluster. SW_SERVICE_NAME SkyWalking_OAP_Cluster   - - hostPort Hosts and ports for Nacos cluster. SW_CLUSTER_NACOS_HOST_PORT localhost:8848   - - namespace Namespace used by SkyWalking node coordination. SW_CLUSTER_NACOS_NAMESPACE public   - - internalComHost The hostname registered in Nacos for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Nacos for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - - username Nacos Auth username. SW_CLUSTER_NACOS_USERNAME -   - - password Nacos Auth password. SW_CLUSTER_NACOS_PASSWORD -   - - accessKey Nacos Auth accessKey. SW_CLUSTER_NACOS_ACCESSKEY -   - - secretKey Nacos Auth secretKey. SW_CLUSTER_NACOS_SECRETKEY -   - - syncPeriodHttpUriRecognitionPattern The period of HTTP URI recognition pattern synchronization (in seconds). SW_CORE_SYNC_PERIOD_HTTP_URI_RECOGNITION_PATTERN 10   - - trainingPeriodHttpUriRecognitionPattern The period of HTTP URI recognition pattern training (in seconds). SW_CORE_TRAINING_PERIOD_HTTP_URI_RECOGNITION_PATTERN 60   - - maxHttpUrisNumberPerService The maximum number of HTTP URIs per service. SW_MAX_HTTP_URIS_NUMBER_PER_SERVICE 3000   storage elasticsearch - ElasticSearch (and OpenSearch) storage implementation. - -   - - namespace Prefix of indexes created and used by SkyWalking. SW_NAMESPACE -   - - clusterNodes ElasticSearch cluster nodes for client connection. SW_STORAGE_ES_CLUSTER_NODES localhost   - - protocol HTTP or HTTPs. SW_STORAGE_ES_HTTP_PROTOCOL HTTP   - - connectTimeout Connect timeout of ElasticSearch client (in milliseconds). SW_STORAGE_ES_CONNECT_TIMEOUT 3000   - - socketTimeout Socket timeout of ElasticSearch client (in milliseconds). SW_STORAGE_ES_SOCKET_TIMEOUT 30000   - - responseTimeout Response timeout of ElasticSearch client (in milliseconds), 0 disables the timeout. SW_STORAGE_ES_RESPONSE_TIMEOUT 1500   - - numHttpClientThread The number of threads for the underlying HTTP client to perform socket I/O. If the value is \u0026lt;= 0, the number of available processors will be used. SW_STORAGE_ES_NUM_HTTP_CLIENT_THREAD 0   - - user Username of ElasticSearch cluster. SW_ES_USER -   - - password Password of ElasticSearch cluster. SW_ES_PASSWORD -   - - trustStorePath Trust JKS file path. Only works when username and password are enabled. SW_STORAGE_ES_SSL_JKS_PATH -   - - trustStorePass Trust JKS file password. Only works when username and password are enabled. SW_STORAGE_ES_SSL_JKS_PASS -   - - secretsManagementFile Secrets management file in the properties format, including username and password, which are managed by a 3rd party tool. Capable of being updated them at runtime. SW_ES_SECRETS_MANAGEMENT_FILE -   - - dayStep Represents the number of days in the one-minute/hour/day index. SW_STORAGE_DAY_STEP 1   - - indexShardsNumber Shard number of new indexes. SW_STORAGE_ES_INDEX_SHARDS_NUMBER 1   - - indexReplicasNumber Replicas number of new indexes. SW_STORAGE_ES_INDEX_REPLICAS_NUMBER 0   - - specificIndexSettings Specify the settings for each index individually. If configured, this setting has the highest priority and overrides the generic settings. SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS -   - - superDatasetDayStep Represents the number of days in the super size dataset record index. Default value is the same as dayStep when the value is less than 0. SW_STORAGE_ES_SUPER_DATASET_DAY_STEP -1   - - superDatasetIndexShardsFactor Super dataset is defined in the code (e.g. trace segments). This factor provides more shards for the super dataset: shards number = indexShardsNumber * superDatasetIndexShardsFactor. This factor also affects Zipkin and Jaeger traces. SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR 5   - - superDatasetIndexReplicasNumber Represents the replicas number in the super size dataset record index. SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER 0   - - indexTemplateOrder The order of index template. SW_STORAGE_ES_INDEX_TEMPLATE_ORDER 0   - - bulkActions Async bulk size of the record data batch execution. SW_STORAGE_ES_BULK_ACTIONS 5000   - - batchOfBytes A threshold to control the max body size of ElasticSearch Bulk flush. SW_STORAGE_ES_BATCH_OF_BYTES 10485760 (10m)   - - flushInterval Period of flush (in seconds). Does not matter whether bulkActions is reached or not. SW_STORAGE_ES_FLUSH_INTERVAL 5   - - concurrentRequests The number of concurrent requests allowed to be executed. SW_STORAGE_ES_CONCURRENT_REQUESTS 2   - - resultWindowMaxSize The maximum size of dataset when the OAP loads cache, such as network aliases. SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE 10000   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_ES_QUERY_MAX_SIZE 10000   - - scrollingBatchSize The batch size of metadata per iteration when metadataQueryMaxSize or resultWindowMaxSize is too large to be retrieved in a single query. SW_STORAGE_ES_SCROLLING_BATCH_SIZE 5000   - - segmentQueryMaxSize The maximum size of trace segments per query. SW_STORAGE_ES_QUERY_SEGMENT_SIZE 200   - - profileTaskQueryMaxSize The maximum size of profile task per query. SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE 200   - - profileDataQueryScrollBatchSize The batch size of query profiling data. SW_STORAGE_ES_QUERY_PROFILE_DATA_BATCH_SIZE 100   - - advanced All settings of ElasticSearch index creation. The value should be in JSON format. SW_STORAGE_ES_ADVANCED -   - - logicSharding Shard metrics and records indices into multi-physical indices, one index template per metric/meter aggregation function or record. SW_STORAGE_ES_LOGIC_SHARDING false   - h2 - H2 storage is designed for demonstration and running in short term (i.e. 1-2 hours) only. - -   - - url H2 connection URL. Defaults to H2 memory mode. SW_STORAGE_H2_URL jdbc:h2:mem:skywalking-oap-db   - - user Username of H2 database. SW_STORAGE_H2_USER sa   - - password Password of H2 database. - -   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_H2_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 100   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 1   - mysql - MySQL Storage. The MySQL JDBC Driver is not in the dist. Please copy it into the oap-lib folder manually. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - postgresql - PostgreSQL storage. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - banyandb - BanyanDB storage. - -   - - targets Hosts with ports of the BanyanDB. SW_STORAGE_BANYANDB_TARGETS 127.0.0.1:17912   - - maxBulkSize The maximum size of write entities in a single batch write call. SW_STORAGE_BANYANDB_MAX_BULK_SIZE 5000   - - flushInterval Period of flush interval. In the timeunit of seconds. SW_STORAGE_BANYANDB_FLUSH_INTERVAL 15   - - metricsShardsNumber Shards Number for measure/metrics. SW_STORAGE_BANYANDB_METRICS_SHARDS_NUMBER 1   - - recordShardsNumber Shards Number for a normal record. SW_STORAGE_BANYANDB_RECORD_SHARDS_NUMBER 1   - - superDatasetShardsFactor Shards Factor for a super dataset record, i.e. Shard number of a super dataset is recordShardsNumber*superDatasetShardsFactor. SW_STORAGE_BANYANDB_SUPERDATASET_SHARDS_FACTOR 2   - - concurrentWriteThreads Concurrent consumer threads for batch writing. SW_STORAGE_BANYANDB_CONCURRENT_WRITE_THREADS 15   - - profileTaskQueryMaxSize Max size of ProfileTask to be fetched. SW_STORAGE_BANYANDB_PROFILE_TASK_QUERY_MAX_SIZE 200   agent-analyzer default Agent Analyzer. SW_AGENT_ANALYZER default    - - traceSamplingPolicySettingsFile The sampling policy including sampling rate and the threshold of trace segment latency can be configured by the traceSamplingPolicySettingsFile file. SW_TRACE_SAMPLING_POLICY_SETTINGS_FILE trace-sampling-policy-settings.yml   - - slowDBAccessThreshold The slow database access threshold (in milliseconds). SW_SLOW_DB_THRESHOLD default:200,mongodb:100   - - forceSampleErrorSegment When sampling mechanism is activated, this config samples the error status segment and ignores the sampling rate. SW_FORCE_SAMPLE_ERROR_SEGMENT true   - - segmentStatusAnalysisStrategy Determines the final segment status from span status. Available values are FROM_SPAN_STATUS , FROM_ENTRY_SPAN, and FROM_FIRST_SPAN. FROM_SPAN_STATUS indicates that the segment status would be error if any span has an error status. FROM_ENTRY_SPAN means that the segment status would only be determined by the status of entry spans. FROM_FIRST_SPAN means that the segment status would only be determined by the status of the first span. SW_SEGMENT_STATUS_ANALYSIS_STRATEGY FROM_SPAN_STATUS   - - noUpstreamRealAddressAgents Exit spans with the component in the list would not generate client-side instance relation metrics, since some tracing plugins (e.g. Nginx-LUA and Envoy) can\u0026rsquo;t collect the real peer IP address. SW_NO_UPSTREAM_REAL_ADDRESS 6000,9000   - - meterAnalyzerActiveFiles Indicates which files could be instrumented and analyzed. Multiple files are split by \u0026ldquo;,\u0026rdquo;. SW_METER_ANALYZER_ACTIVE_FILES    - - slowCacheWriteThreshold The threshold of slow command which is used for writing operation (in milliseconds). SW_SLOW_CACHE_WRITE_THRESHOLD default:20,redis:10   - - slowCacheReadThreshold The threshold of slow command which is used for reading (getting) operation (in milliseconds). SW_SLOW_CACHE_READ_THRESHOLD default:20,redis:10   receiver-sharing-server default Sharing server provides new gRPC and restful servers for data collection. Ana designates that servers in the core module are to be used for internal communication only. - -    - - restHost Binding IP of RESTful services. Services include GraphQL query and HTTP data report. SW_RECEIVER_SHARING_REST_HOST -   - - restPort Binding port of RESTful services. SW_RECEIVER_SHARING_REST_PORT -   - - restContextPath Web context path of RESTful services. SW_RECEIVER_SHARING_REST_CONTEXT_PATH -   - - restMaxThreads Maximum thread number of RESTful services. SW_RECEIVER_SHARING_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_RECEIVER_SHARING_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize ServerSocketChannel backlog of RESTful services. SW_RECEIVER_SHARING_REST_QUEUE_SIZE 0   - - httpMaxRequestHeaderSize Maximum request header size accepted. SW_RECEIVER_SHARING_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - gRPCHost Binding IP of gRPC services. Services include gRPC data report and internal communication among OAP nodes. SW_RECEIVER_GRPC_HOST 0.0.0.0. Not Activated   - - gRPCPort Binding port of gRPC services. SW_RECEIVER_GRPC_PORT Not Activated   - - gRPCThreadPoolSize Pool size of gRPC server. SW_RECEIVER_GRPC_THREAD_POOL_SIZE Default to gRPC\u0026rsquo;s implementation, which is a cached thread pool that can grow infinitely.   - - gRPCSslEnabled Activates SSL for gRPC services. SW_RECEIVER_GRPC_SSL_ENABLED false   - - gRPCSslKeyPath File path of gRPC SSL key. SW_RECEIVER_GRPC_SSL_KEY_PATH -   - - gRPCSslCertChainPath File path of gRPC SSL cert chain. SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH -   - - maxConcurrentCallsPerConnection The maximum number of concurrent calls permitted for each incoming connection. Defaults to no limit. SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL -   - - authentication The token text for authentication. Works for gRPC connection only. Once this is set, the client is required to use the same token. SW_AUTHENTICATION -   log-analyzer default Log Analyzer. SW_LOG_ANALYZER default    - - lalFiles The LAL configuration file names (without file extension) to be activated. Read LAL for more details. SW_LOG_LAL_FILES default   - - malFiles The MAL configuration file names (without file extension) to be activated. Read LAL for more details. SW_LOG_MAL_FILES \u0026quot;\u0026quot;   event-analyzer default Event Analyzer. SW_EVENT_ANALYZER default    receiver-register default gRPC and HTTPRestful services that provide service, service instance and endpoint register. - -    receiver-trace default gRPC and HTTPRestful services that accept SkyWalking format traces. - -    receiver-jvm default gRPC services that accept JVM metrics data. - -    receiver-clr default gRPC services that accept .Net CLR metrics data. - -    receiver-profile default gRPC services that accept profile task status and snapshot reporter. - -    receiver-zabbix default TCP receiver accepts Zabbix format metrics. - -    - - port Exported TCP port. Zabbix agent could connect and transport data. SW_RECEIVER_ZABBIX_PORT 10051   - - host Binds to host. SW_RECEIVER_ZABBIX_HOST 0.0.0.0   - - activeFiles Enables config when agent request is received. SW_RECEIVER_ZABBIX_ACTIVE_FILES agent   service-mesh default gRPC services that accept data from inbound mesh probes. - -    envoy-metric default Envoy metrics_service and ALS(access log service) are supported by this receiver. The OAL script supports all GAUGE type metrics. - -    - - acceptMetricsService Starts Envoy Metrics Service analysis. SW_ENVOY_METRIC_SERVICE true   - - alsHTTPAnalysis Starts Envoy HTTP Access Log Service analysis. Value = k8s-mesh means starting the analysis. SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS -   - - alsTCPAnalysis Starts Envoy TCP Access Log Service analysis. Value = k8s-mesh means starting the analysis. SW_ENVOY_METRIC_ALS_TCP_ANALYSIS -   - - k8sServiceNameRule k8sServiceNameRule allows you to customize the service name in ALS via Kubernetes metadata. The available variables are pod and service. E.g. you can use ${service.metadata.name}-${pod.metadata.labels.version} to append the version number to the service name. Note that when using environment variables to pass this configuration, use single quotes('') to avoid being evaluated by the shell. K8S_SERVICE_NAME_RULE ${pod.metadata.labels.(service.istio.io/canonical-name)}.${pod.metadata.namespace}   - - istioServiceNameRule istioServiceNameRule allows you to customize the service name in ALS via Kubernetes metadata. The available variables are serviceEntry. E.g. you can use ${serviceEntry.metadata.name}-${serviceEntry.metadata.labels.version} to append the version number to the service name. Note that when using environment variables to pass this configuration, use single quotes('') to avoid being evaluated by the shell. ISTIO_SERVICE_NAME_RULE ${serviceEntry.metadata.name}.${serviceEntry.metadata.namespace}   - - istioServiceEntryIgnoredNamespaces When looking up service informations from the Istio ServiceEntries, some of the ServiceEntries might be created in several namespaces automatically by some components, and OAP will randomly pick one of them to build the service name, users can use this config to exclude ServiceEntries that they don\u0026rsquo;t want to be used. Comma separated. SW_ISTIO_SERVICE_ENTRY_IGNORED_NAMESPACES -   - - gRPCHost Binding IP of gRPC service for Envoy access log service. SW_ALS_GRPC_HOST 0.0.0.0. Not Activated   - - gRPCPort Binding port of gRPC service for Envoy access log service. SW_ALS_GRPC_PORT Not Activated   - - gRPCThreadPoolSize Pool size of gRPC server. SW_ALS_GRPC_THREAD_POOL_SIZE Default to gRPC\u0026rsquo;s implementation, which is a cached thread pool that can grow infinitely.   - - gRPCSslEnabled Activates SSL for gRPC services. SW_ALS_GRPC_SSL_ENABLED false   - - gRPCSslKeyPath File path of gRPC SSL key. SW_ALS_GRPC_SSL_KEY_PATH -   - - gRPCSslCertChainPath File path of gRPC SSL cert chain. SW_ALS_GRPC_SSL_CERT_CHAIN_PATH -   - - maxConcurrentCallsPerConnection The maximum number of concurrent calls permitted for each incoming connection. Defaults to no limit. SW_ALS_GRPC_MAX_CONCURRENT_CALL -   - - maxMessageSize Sets the maximum message size allowed to be received on the server. Empty means 4 MiB. SW_ALS_GRPC_MAX_MESSAGE_SIZE 4M(based on Netty)   receiver-otel default A receiver for analyzing metrics data from OpenTelemetry. - -    - - enabledHandlers Enabled handlers for otel. SW_OTEL_RECEIVER_ENABLED_HANDLERS -   - - enabledOtelMetricsRules Enabled metric rules for OTLP handler. SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES -   receiver-zipkin default A receiver for Zipkin traces. - -    - - sampleRate The sample rate precision is 1/10000, should be between 0 and 10000 SW_ZIPKIN_SAMPLE_RATE 10000   - - searchableTracesTags Defines a set of span tag keys which are searchable. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_ZIPKIN_SEARCHABLE_TAG_KEYS http.method   - - enableHttpCollector Enable Http Collector. SW_ZIPKIN_HTTP_COLLECTOR_ENABLED true   - - restHost Binding IP of RESTful services. SW_RECEIVER_ZIPKIN_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_RECEIVER_ZIPKIN_REST_PORT 9411   - - restContextPath Web context path of RESTful services. SW_RECEIVER_ZIPKIN_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_RECEIVER_ZIPKIN_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_RECEIVER_ZIPKIN_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_RECEIVER_ZIPKIN_REST_QUEUE_SIZE 0   - - enableKafkaCollector Enable Kafka Collector. SW_ZIPKIN_KAFKA_COLLECTOR_ENABLED false   - - kafkaBootstrapServers Kafka ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG. SW_ZIPKIN_KAFKA_SERVERS localhost:9092   - - kafkaGroupId Kafka ConsumerConfig.GROUP_ID_CONFIG. SW_ZIPKIN_KAFKA_GROUP_ID zipkin   - - kafkaTopic Kafka Topics. SW_ZIPKIN_KAFKA_TOPIC zipkin   - - kafkaConsumerConfig Kafka consumer config, JSON format as Properties. If it contains the same key with above, would override. SW_ZIPKIN_KAFKA_CONSUMER_CONFIG \u0026ldquo;{\u0026quot;auto.offset.reset\u0026quot;:\u0026quot;earliest\u0026quot;,\u0026quot;enable.auto.commit\u0026quot;:true}\u0026rdquo;   - - kafkaConsumers The number of consumers to create. SW_ZIPKIN_KAFKA_CONSUMERS 1   - - kafkaHandlerThreadPoolSize Pool size of Kafka message handler executor. SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_SIZE CPU core * 2   - - kafkaHandlerThreadPoolQueueSize Queue size of Kafka message handler executor. SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE 10000   kafka-fetcher default Read SkyWalking\u0026rsquo;s native metrics/logs/traces through Kafka server. - -    - - bootstrapServers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_KAFKA_FETCHER_SERVERS localhost:9092   - - namespace Namespace aims to isolate multi OAP cluster when using the same Kafka cluster. If you set a namespace for Kafka fetcher, OAP will add a prefix to topic name. You should also set namespace in agent.config. The property is named plugin.kafka.namespace. SW_NAMESPACE -   - - groupId A unique string that identifies the consumer group to which this consumer belongs. - skywalking-consumer   - - partitions The number of partitions for the topic being created. SW_KAFKA_FETCHER_PARTITIONS 3   - - consumers The number of consumers to create. SW_KAFKA_FETCHER_CONSUMERS 1   - - enableNativeProtoLog Enables fetching and handling native proto log data. SW_KAFKA_FETCHER_ENABLE_NATIVE_PROTO_LOG true   - - enableNativeJsonLog Enables fetching and handling native json log data. SW_KAFKA_FETCHER_ENABLE_NATIVE_JSON_LOG true   - - replicationFactor The replication factor for each partition in the topic being created. SW_KAFKA_FETCHER_PARTITIONS_FACTOR 2   - - kafkaHandlerThreadPoolSize Pool size of Kafka message handler executor. SW_KAFKA_HANDLER_THREAD_POOL_SIZE CPU core * 2   - - kafkaHandlerThreadPoolQueueSize Queue size of Kafka message handler executor. SW_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE 10000   - - topicNameOfMeters Kafka topic name for meter system data. - skywalking-meters   - - topicNameOfMetrics Kafka topic name for JVM metrics data. - skywalking-metrics   - - topicNameOfProfiling Kafka topic name for profiling data. - skywalking-profilings   - - topicNameOfTracingSegments Kafka topic name for tracing data. - skywalking-segments   - - topicNameOfManagements Kafka topic name for service instance reporting and registration. - skywalking-managements   - - topicNameOfLogs Kafka topic name for native proto log data. - skywalking-logs   - - topicNameOfJsonLogs Kafka topic name for native json log data. - skywalking-logs-json   receiver-browser default gRPC services that accept browser performance data and error log. - - -   - - sampleRate Sampling rate for receiving trace. Precise to 1/10000. 10000 means sampling rate of 100% by default. SW_RECEIVER_BROWSER_SAMPLE_RATE 10000   query graphql - GraphQL query implementation. -    - - enableLogTestTool Enable the log testing API to test the LAL. NOTE: This API evaluates untrusted code on the OAP server. A malicious script can do significant damage (steal keys and secrets, remove files and directories, install malware, etc). As such, please enable this API only when you completely trust your users. SW_QUERY_GRAPHQL_ENABLE_LOG_TEST_TOOL false   - - maxQueryComplexity Maximum complexity allowed for the GraphQL query that can be used to abort a query if the total number of data fields queried exceeds the defined threshold. SW_QUERY_MAX_QUERY_COMPLEXITY 3000   - - enableUpdateUITemplate Allow user add,disable and update UI template. SW_ENABLE_UPDATE_UI_TEMPLATE false   - - enableOnDemandPodLog Ondemand Pod log: fetch the Pod logs on users' demand, the logs are fetched and displayed in real time, and are not persisted in any kind. This is helpful when users want to do some experiments and monitor the logs and see what\u0026rsquo;s happing inside the service. Note: if you print secrets in the logs, they are also visible to the UI, so for the sake of security, this feature is disabled by default, please set this configuration to enable the feature manually. SW_ENABLE_ON_DEMAND_POD_LOG false   query-zipkin default - This module is for Zipkin query API and support zipkin-lens UI -    - - restHost Binding IP of RESTful services. SW_QUERY_ZIPKIN_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_QUERY_ZIPKIN_REST_PORT 9412   - - restContextPath Web context path of RESTful services. SW_QUERY_ZIPKIN_REST_CONTEXT_PATH zipkin   - - restMaxThreads Maximum thread number of RESTful services. SW_QUERY_ZIPKIN_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_QUERY_ZIPKIN_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_QUERY_ZIPKIN_REST_QUEUE_SIZE 0   - - lookback Default look back for traces and autocompleteTags, 1 day in millis SW_QUERY_ZIPKIN_LOOKBACK 86400000   - - namesMaxAge The Cache-Control max-age (seconds) for serviceNames, remoteServiceNames and spanNames SW_QUERY_ZIPKIN_NAMES_MAX_AGE 300   - - uiQueryLimit Default traces query max size SW_QUERY_ZIPKIN_UI_QUERY_LIMIT 10   - - uiDefaultLookback Default look back on the UI for search traces, 15 minutes in millis SW_QUERY_ZIPKIN_UI_DEFAULT_LOOKBACK 900000   promql default - This module is for PromQL API. -    - - restHost Binding IP of RESTful services. SW_PROMQL_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_PROMQL_REST_PORT 9090   - - restContextPath Web context path of RESTful services. SW_PROMQL_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_PROMQL_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_PROMQL_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_PROMQL_REST_QUEUE_SIZE 0   - - buildInfoVersion Mock version for API buildInfo SW_PROMQL_BUILD_INFO_VERSION 2.45.0   - - buildInfoRevision Mock revision for API buildInfo SW_PROMQL_BUILD_INFO_REVISION    - - buildInfoBranch Mock branch for API buildInfo SW_PROMQL_BUILD_INFO_BRANCH    - - buildInfoBuildUser Mock build user for API buildInfo SW_PROMQL_BUILD_INFO_BUILD_USER    - - buildInfoBuildDate Mock build date for API buildInfo SW_PROMQL_BUILD_INFO_BUILD_DATE    - - buildInfoGoVersion Mock go version for API buildInfo SW_PROMQL_BUILD_INFO_GO_VERSION    alarm default - Read alarm doc for more details. -    telemetry - - Read telemetry doc for more details. -    - none - No op implementation. -    - prometheus host Binding host for Prometheus server fetching data. SW_TELEMETRY_PROMETHEUS_HOST 0.0.0.0   - - port Binding port for Prometheus server fetching data. SW_TELEMETRY_PROMETHEUS_PORT 1234   configuration - - Read dynamic configuration doc for more details. -    - grpc host DCS server binding hostname. SW_DCS_SERVER_HOST -   - - port DCS server binding port. SW_DCS_SERVER_PORT 80   - - clusterName Cluster name when reading the latest configuration from DSC server. SW_DCS_CLUSTER_NAME SkyWalking   - - period The period of reading data from DSC server by the OAP (in seconds). SW_DCS_PERIOD 20   - - maxInboundMessageSize The max inbound message size of gRPC. SW_DCS_MAX_INBOUND_MESSAGE_SIZE 4194304   - apollo apolloMeta apollo.meta in Apollo. SW_CONFIG_APOLLO http://localhost:8080   - - apolloCluster apollo.cluster in Apollo. SW_CONFIG_APOLLO_CLUSTER default   - - apolloEnv env in Apollo. SW_CONFIG_APOLLO_ENV -   - - appId app.id in Apollo. SW_CONFIG_APOLLO_APP_ID skywalking   - zookeeper namespace The namespace (represented by root path) that isolates the configurations in the Zookeeper. SW_CONFIG_ZK_NAMESPACE /, root path   - - hostPort Hosts and ports of Zookeeper Cluster. SW_CONFIG_ZK_HOST_PORT localhost:2181   - - baseSleepTimeMs The period of Zookeeper client between two retries (in milliseconds). SW_CONFIG_ZK_BASE_SLEEP_TIME_MS 1000   - - maxRetries The maximum retry time. SW_CONFIG_ZK_MAX_RETRIES 3   - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - etcd endpoints Hosts and ports for etcd cluster (separated by commas if multiple). SW_CONFIG_ETCD_ENDPOINTS http://localhost:2379   - - namespace Namespace for SkyWalking cluster. SW_CONFIG_ETCD_NAMESPACE /skywalking   - - authentication Indicates whether there is authentication. SW_CONFIG_ETCD_AUTHENTICATION false   - - user Etcd auth username. SW_CONFIG_ETCD_USER    - - password Etcd auth password. SW_CONFIG_ETCD_PASSWORD    - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - consul hostPort Hosts and ports for Consul cluster. SW_CONFIG_CONSUL_HOST_AND_PORTS localhost:8500   - - aclToken ACL Token of Consul. Empty string means without ACL token. SW_CONFIG_CONSUL_ACL_TOKEN -   - - period The period of data sync (in seconds). SW_CONFIG_CONSUL_PERIOD 60   - k8s-configmap namespace Deployment namespace of the config map. SW_CLUSTER_K8S_NAMESPACE default   - - labelSelector Labels for locating configmap. SW_CLUSTER_K8S_LABEL app=collector,release=skywalking   - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - nacos serverAddr Nacos Server Host. SW_CONFIG_NACOS_SERVER_ADDR 127.0.0.1   - - port Nacos Server Port. SW_CONFIG_NACOS_SERVER_PORT 8848   - - group Nacos Configuration namespace. SW_CONFIG_NACOS_SERVER_NAMESPACE -   - - period The period of data sync (in seconds). SW_CONFIG_CONFIG_NACOS_PERIOD 60   - - username Nacos Auth username. SW_CONFIG_NACOS_USERNAME -   - - password Nacos Auth password. SW_CONFIG_NACOS_PASSWORD -   - - accessKey Nacos Auth accessKey. SW_CONFIG_NACOS_ACCESSKEY -   - - secretKey Nacos Auth secretKey. SW_CONFIG_NACOS_SECRETKEY -   exporter default enableGRPCMetrics Enable gRPC metrics exporter. SW_EXPORTER_ENABLE_GRPC_METRICS false   - - gRPCTargetHost The host of target gRPC server for receiving export data SW_EXPORTER_GRPC_HOST 127.0.0.1   - - gRPCTargetPort The port of target gRPC server for receiving export data. SW_EXPORTER_GRPC_PORT 9870   - - enableKafkaTrace Enable Kafka trace exporter. SW_EXPORTER_ENABLE_KAFKA_TRACE false   - - enableKafkaLog Enable Kafka log exporter. SW_EXPORTER_ENABLE_KAFKA_LOG false   - - kafkaBootstrapServers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_EXPORTER_KAFKA_SERVERS localhost:9092   - - kafkaProducerConfig Kafka producer config, JSON format as Properties. SW_EXPORTER_KAFKA_PRODUCER_CONFIG -   - - kafkaTopicTrace Kafka topic name for trace. SW_EXPORTER_KAFKA_TOPIC_TRACE skywalking-export-trace   - - kafkaTopicLog Kafka topic name for log. SW_EXPORTER_KAFKA_TOPIC_LOG skywalking-export-log   - - exportErrorStatusTraceOnly Export error status trace segments through the Kafka channel. SW_EXPORTER_KAFKA_TRACE_FILTER_ERROR false   health-checker default checkIntervalSeconds The period of checking OAP internal health status (in seconds). SW_HEALTH_CHECKER_INTERVAL_SECONDS 5   debugging-query default       - - keywords4MaskingSecretsOfConfig Include the list of keywords to filter configurations including secrets. Separate keywords by a comma. SW_DEBUGGING_QUERY_KEYWORDS_FOR_MASKING_SECRETS user,password,token,accessKey,secretKey,authentication   configuration-discovery default disableMessageDigest If true, agent receives the latest configuration every time, even without making any changes. By default, OAP uses the SHA512 message digest mechanism to detect changes in configuration. SW_DISABLE_MESSAGE_DIGEST false   receiver-event default gRPC services that handle events data. - -    aws-firehose-receiver default host Binding IP of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_HOST 0.0.0.0   - - port Binding port of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_PORT 12801   - - contextPath Context path of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_CONTEXT_PATH /   - - maxThreads Max Thtread number of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_MAX_THREADS 200   - - idleTimeOut Idle timeout of a connection for keep-alive. SW_RECEIVER_AWS_FIREHOSE_HTTP_IDLE_TIME_OUT 30000   - - acceptQueueSize Maximum allowed number of open connections SW_RECEIVER_AWS_FIREHOSE_HTTP_ACCEPT_QUEUE_SIZE 0   - - maxRequestHeaderSize Maximum length of all headers in an HTTP/1 response SW_RECEIVER_AWS_FIREHOSE_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - firehoseAccessKey The AccessKey of AWS firhose SW_RECEIVER_AWS_FIREHOSE_ACCESS_KEY    - - enableTLS Indicate if enable HTTPS for the server SW_RECEIVER_AWS_FIREHOSE_HTTP_ENABLE_TLS false   - - tlsKeyPath TLS key path SW_RECEIVER_AWS_FIREHOSE_HTTP_TLS_KEY_PATH    - - tlsCertChainPath TLS certificate chain path SW_RECEIVER_AWS_FIREHOSE_HTTP_TLS_CERT_CHAIN_PATH    ai-pipeline default       - - uriRecognitionServerAddr The address of the URI recognition server. SW_AI_PIPELINE_URI_RECOGNITION_SERVER_ADDR -   - - uriRecognitionServerPort The port of the URI recognition server. SW_AI_PIPELINE_URI_RECOGNITION_SERVER_PORT 17128    Note ¹ System Environment Variable name could be declared and changed in application.yml. The names listed here are simply provided in the default application.yml file.\n","excerpt":"Configuration Vocabulary The Configuration Vocabulary lists all available configurations provided by …","ref":"/docs/main/next/en/setup/backend/configuration-vocabulary/","title":"Configuration Vocabulary"},{"body":"Configuration Vocabulary The Configuration Vocabulary lists all available configurations provided by application.yml.\n   Module Provider Settings Value(s) and Explanation System Environment Variable¹ Default     core default role Option values: Mixed/Receiver/Aggregator. Receiver mode OAP opens the service to the agents, then analyzes and aggregates the results, and forwards the results for distributed aggregation. Aggregator mode OAP receives data from Mixer and Receiver role OAP nodes, and performs 2nd level aggregation. Mixer means both Receiver and Aggregator. SW_CORE_ROLE Mixed   - - restHost Binding IP of RESTful services. Services include GraphQL query and HTTP data report. SW_CORE_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_CORE_REST_PORT 12800   - - restContextPath Web context path of RESTful services. SW_CORE_REST_CONTEXT_PATH /   - - restMinThreads Minimum thread number of RESTful services. SW_CORE_REST_JETTY_MIN_THREADS 1   - - restMaxThreads Maximum thread number of RESTful services. SW_CORE_REST_JETTY_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_CORE_REST_JETTY_IDLE_TIMEOUT 30000   - - restAcceptQueueSize ServerSocketChannel Backlog of RESTful services. SW_CORE_REST_JETTY_QUEUE_SIZE 0   - - httpMaxRequestHeaderSize Maximum request header size accepted. SW_CORE_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - gRPCHost Binding IP of gRPC services, including gRPC data report and internal communication among OAP nodes. SW_CORE_GRPC_HOST 0.0.0.0   - - gRPCPort Binding port of gRPC services. SW_CORE_GRPC_PORT 11800   - - gRPCSslEnabled Activates SSL for gRPC services. SW_CORE_GRPC_SSL_ENABLED false   - - gRPCSslKeyPath File path of gRPC SSL key. SW_CORE_GRPC_SSL_KEY_PATH -   - - gRPCSslCertChainPath File path of gRPC SSL cert chain. SW_CORE_GRPC_SSL_CERT_CHAIN_PATH -   - - gRPCSslTrustedCAPath File path of gRPC trusted CA. SW_CORE_GRPC_SSL_TRUSTED_CA_PATH -   - - downsampling Activated level of down sampling aggregation.  Hour,Day   - - persistentPeriod Execution period of the persistent timer (in seconds).  25   - - enableDataKeeperExecutor Controller of TTL scheduler. Once disabled, TTL wouldn\u0026rsquo;t work. SW_CORE_ENABLE_DATA_KEEPER_EXECUTOR true   - - dataKeeperExecutePeriod Execution period of TTL scheduler (in minutes). Execution doesn\u0026rsquo;t mean deleting data. The storage provider (e.g. ElasticSearch storage) could override this. SW_CORE_DATA_KEEPER_EXECUTE_PERIOD 5   - - recordDataTTL The lifecycle of record data (in days). Record data includes traces, top N sample records, and logs. Minimum value is 2. SW_CORE_RECORD_DATA_TTL 3   - - metricsDataTTL The lifecycle of metrics data (in days), including metadata. We recommend setting metricsDataTTL \u0026gt;= recordDataTTL. Minimum value is 2. SW_CORE_METRICS_DATA_TTL 7   - - l1FlushPeriod The period of L1 aggregation flush to L2 aggregation (in milliseconds). SW_CORE_L1_AGGREGATION_FLUSH_PERIOD 500   - - storageSessionTimeout The threshold of session time (in milliseconds). Default value is 70000. SW_CORE_STORAGE_SESSION_TIMEOUT 70000   - - persistentPeriod The period of doing data persistence. Unit is second.Default value is 25s SW_CORE_PERSISTENT_PERIOD 25   - - enableDatabaseSession Cache metrics data for 1 minute to reduce database queries, and if the OAP cluster changes within that minute. SW_CORE_ENABLE_DATABASE_SESSION true   - - topNReportPeriod The execution period (in minutes) of top N sampler, which saves sampled data into the storage. SW_CORE_TOPN_REPORT_PERIOD 10   - - activeExtraModelColumns Appends entity names (e.g. service names) into metrics storage entities. SW_CORE_ACTIVE_EXTRA_MODEL_COLUMNS false   - - serviceNameMaxLength Maximum length limit of service names. SW_SERVICE_NAME_MAX_LENGTH 70   - - instanceNameMaxLength Maximum length limit of service instance names. The maximum length of service + instance names should be less than 200. SW_INSTANCE_NAME_MAX_LENGTH 70   - - endpointNameMaxLength Maximum length limit of endpoint names. The maximum length of service + endpoint names should be less than 240. SW_ENDPOINT_NAME_MAX_LENGTH 150   - - searchableTracesTags Defines a set of span tag keys which are searchable through GraphQL. Multiple values are separated by commas. SW_SEARCHABLE_TAG_KEYS http.method,status_code,db.type,db.instance,mq.queue,mq.topic,mq.broker   - - searchableLogsTags Defines a set of log tag keys which are searchable through GraphQL. Multiple values are separated by commas. SW_SEARCHABLE_LOGS_TAG_KEYS level   - - searchableAlarmTags Defines a set of alarm tag keys which are searchable through GraphQL. Multiple values are separated by commas. SW_SEARCHABLE_ALARM_TAG_KEYS level   - - gRPCThreadPoolSize Pool size of gRPC server. SW_CORE_GRPC_THREAD_POOL_SIZE CPU core * 4   - - gRPCThreadPoolQueueSize Queue size of gRPC server. SW_CORE_GRPC_POOL_QUEUE_SIZE 10000   - - maxConcurrentCallsPerConnection The maximum number of concurrent calls permitted for each incoming connection. Defaults to no limit. SW_CORE_GRPC_MAX_CONCURRENT_CALL -   - - maxMessageSize Sets the maximum message size allowed to be received on the server. Empty means 4 MiB. SW_CORE_GRPC_MAX_MESSAGE_SIZE 4M(based on Netty)   - - remoteTimeout Timeout for cluster internal communication (in seconds). - 20   - - maxSizeOfNetworkAddressAlias The maximum size of network address detected in the system being monitored. - 1_000_000   - - maxPageSizeOfQueryProfileSnapshot The maximum size for snapshot analysis in an OAP query. - 500   - - maxSizeOfAnalyzeProfileSnapshot The maximum number of snapshots analyzed by the OAP. - 12000   - - prepareThreads The number of threads used to prepare metrics data to the storage. SW_CORE_PREPARE_THREADS 2   - - enableEndpointNameGroupingByOpenapi Automatically groups endpoints by the given OpenAPI definitions. SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPAENAPI true   - - maxDurationOfAnalyzeEBPFProfiling The maximum duration(in minute) of analyze the eBPF profiling data. - 10   - - maxDurationOfQueryEBPFProfilingData The maximum duration(in second) of query the eBPF profiling data from database. - 30   - - maxThreadCountOfQueryEBPFProfilingData The maximum thread count of query the eBPF profiling data from database. - System CPU core size   cluster standalone - Standalone is not suitable for running on a single node running. No configuration available. - -   - zookeeper namespace The namespace, represented by root path, isolates the configurations in Zookeeper. SW_NAMESPACE /, root path   - - hostPort Hosts and ports of Zookeeper Cluster. SW_CLUSTER_ZK_HOST_PORT localhost:2181   - - baseSleepTimeMs The period of Zookeeper client between two retries (in milliseconds). SW_CLUSTER_ZK_SLEEP_TIME 1000   - - maxRetries The maximum retry time. SW_CLUSTER_ZK_MAX_RETRIES 3   - - enableACL Opens ACL using schema and expression. SW_ZK_ENABLE_ACL false   - - schema Schema for the authorization. SW_ZK_SCHEMA digest   - - expression Expression for the authorization. SW_ZK_EXPRESSION skywalking:skywalking   - - internalComHost The hostname registered in Zookeeper for the internal communication of OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Zookeeper for the internal communication of OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - kubernetes namespace Namespace deployed by SkyWalking in k8s. SW_CLUSTER_K8S_NAMESPACE default   - - labelSelector Labels used for filtering OAP deployment in k8s. SW_CLUSTER_K8S_LABEL app=collector,release=skywalking   - - uidEnvName Environment variable name for reading uid. SW_CLUSTER_K8S_UID SKYWALKING_COLLECTOR_UID   - consul serviceName Service name for SkyWalking cluster. SW_SERVICE_NAME SkyWalking_OAP_Cluster   - - hostPort Hosts and ports for Consul cluster. SW_CLUSTER_CONSUL_HOST_PORT localhost:8500   - - aclToken ACL Token of Consul. Empty string means without ALC token. SW_CLUSTER_CONSUL_ACLTOKEN -   - - internalComHost The hostname registered in Consul for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Consul for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - etcd serviceName Service name for SkyWalking cluster. SW_CLUSTER_ETCD_SERVICE_NAME SkyWalking_OAP_Cluster   - - endpoints Hosts and ports for etcd cluster. SW_CLUSTER_ETCD_ENDPOINTS localhost:2379   - - namespace Namespace for SkyWalking cluster. SW_CLUSTER_ETCD_NAMESPACE /skywalking   - - authentication Indicates whether there is authentication. SW_CLUSTER_ETCD_AUTHENTICATION false   - - user Etcd auth username. SW_CLUSTER_ETCD_USER    - - password Etcd auth password. SW_CLUSTER_ETCD_PASSWORD    - - internalComHost The hostname registered in etcd for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in etcd for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - Nacos serviceName Service name for SkyWalking cluster. SW_SERVICE_NAME SkyWalking_OAP_Cluster   - - hostPort Hosts and ports for Nacos cluster. SW_CLUSTER_NACOS_HOST_PORT localhost:8848   - - namespace Namespace used by SkyWalking node coordination. SW_CLUSTER_NACOS_NAMESPACE public   - - internalComHost The hostname registered in Nacos for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Nacos for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - - username Nacos Auth username. SW_CLUSTER_NACOS_USERNAME -   - - password Nacos Auth password. SW_CLUSTER_NACOS_PASSWORD -   - - accessKey Nacos Auth accessKey. SW_CLUSTER_NACOS_ACCESSKEY -   - - secretKey Nacos Auth secretKey. SW_CLUSTER_NACOS_SECRETKEY -   storage elasticsearch - ElasticSearch (and OpenSearch) storage implementation. - -   - - namespace Prefix of indexes created and used by SkyWalking. SW_NAMESPACE -   - - clusterNodes ElasticSearch cluster nodes for client connection. SW_STORAGE_ES_CLUSTER_NODES localhost   - - protocol HTTP or HTTPs. SW_STORAGE_ES_HTTP_PROTOCOL HTTP   - - connectTimeout Connect timeout of ElasticSearch client (in milliseconds). SW_STORAGE_ES_CONNECT_TIMEOUT 3000   - - socketTimeout Socket timeout of ElasticSearch client (in milliseconds). SW_STORAGE_ES_SOCKET_TIMEOUT 30000   - - responseTimeout Response timeout of ElasticSearch client (in milliseconds), 0 disables the timeout. SW_STORAGE_ES_RESPONSE_TIMEOUT 1500   - - numHttpClientThread The number of threads for the underlying HTTP client to perform socket I/O. If the value is \u0026lt;= 0, the number of available processors will be used. SW_STORAGE_ES_NUM_HTTP_CLIENT_THREAD 0   - - user Username of ElasticSearch cluster. SW_ES_USER -   - - password Password of ElasticSearch cluster. SW_ES_PASSWORD -   - - trustStorePath Trust JKS file path. Only works when username and password are enabled. SW_STORAGE_ES_SSL_JKS_PATH -   - - trustStorePass Trust JKS file password. Only works when username and password are enabled. SW_STORAGE_ES_SSL_JKS_PASS -   - - secretsManagementFile Secrets management file in the properties format, including username and password, which are managed by a 3rd party tool. Capable of being updated them at runtime. SW_ES_SECRETS_MANAGEMENT_FILE -   - - dayStep Represents the number of days in the one-minute/hour/day index. SW_STORAGE_DAY_STEP 1   - - indexShardsNumber Shard number of new indexes. SW_STORAGE_ES_INDEX_SHARDS_NUMBER 1   - - indexReplicasNumber Replicas number of new indexes. SW_STORAGE_ES_INDEX_REPLICAS_NUMBER 0   - - superDatasetDayStep Represents the number of days in the super size dataset record index. Default value is the same as dayStep when the value is less than 0. SW_SUPERDATASET_STORAGE_DAY_STEP -1   - - superDatasetIndexShardsFactor Super dataset is defined in the code (e.g. trace segments). This factor provides more shards for the super dataset: shards number = indexShardsNumber * superDatasetIndexShardsFactor. This factor also affects Zipkin and Jaeger traces. SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR 5   - - superDatasetIndexReplicasNumber Represents the replicas number in the super size dataset record index. SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER 0   - - indexTemplateOrder The order of index template. SW_STORAGE_ES_INDEX_TEMPLATE_ORDER 0   - - bulkActions Async bulk size of the record data batch execution. SW_STORAGE_ES_BULK_ACTIONS 5000   - - flushInterval Period of flush (in seconds). Does not matter whether bulkActions is reached or not. INT(flushInterval * 2/3) is used for index refresh period. SW_STORAGE_ES_FLUSH_INTERVAL 15 (index refresh period = 10)   - - concurrentRequests The number of concurrent requests allowed to be executed. SW_STORAGE_ES_CONCURRENT_REQUESTS 2   - - resultWindowMaxSize The maximum size of dataset when the OAP loads cache, such as network aliases. SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE 10000   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_ES_QUERY_MAX_SIZE 10000   - - scrollingBatchSize The batch size of metadata per iteration when metadataQueryMaxSize or resultWindowMaxSize is too large to be retrieved in a single query. SW_STORAGE_ES_SCROLLING_BATCH_SIZE 5000   - - segmentQueryMaxSize The maximum size of trace segments per query. SW_STORAGE_ES_QUERY_SEGMENT_SIZE 200   - - profileTaskQueryMaxSize The maximum size of profile task per query. SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE 200   - - advanced All settings of ElasticSearch index creation. The value should be in JSON format. SW_STORAGE_ES_ADVANCED -   - h2 - H2 storage is designed for demonstration and running in short term (i.e. 1-2 hours) only. - -   - - driver H2 JDBC driver. SW_STORAGE_H2_DRIVER org.h2.jdbcx.JdbcDataSource   - - url H2 connection URL. Defaults to H2 memory mode. SW_STORAGE_H2_URL jdbc:h2:mem:skywalking-oap-db   - - user Username of H2 database. SW_STORAGE_H2_USER sa   - - password Password of H2 database. - -   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_H2_QUERY_MAX_SIZE 5000   - - maxSizeOfArrayColumn Some entities (e.g. trace segments) include the logic column with multiple values. In H2, we use multiple physical columns to host the values: e.g. change column_a with values [1,2,3,4,5] to column_a_0 = 1, column_a_1 = 2, column_a_2 = 3 , column_a_3 = 4, column_a_4 = 5. SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN 20   - - numOfSearchableValuesPerTag In a trace segment, this includes multiple spans with multiple tags. Different spans may have the same tag key, e.g. multiple HTTP exit spans all have their own http.method tags. This configuration sets the limit on the maximum number of values for the same tag key. SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG 2   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 100   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 1   - mysql - MySQL Storage. The MySQL JDBC Driver is not in the dist. Please copy it into the oap-lib folder manually. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfArrayColumn Some entities (e.g. trace segments) include the logic column with multiple values. In MySQL, we use multiple physical columns to host the values, e.g. change column_a with values [1,2,3,4,5] to column_a_0 = 1, column_a_1 = 2, column_a_2 = 3 , column_a_3 = 4, column_a_4 = 5. SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN 20   - - numOfSearchableValuesPerTag In a trace segment, this includes multiple spans with multiple tags. Different spans may have same tag key, e.g. multiple HTTP exit spans all have their own http.method tags. This configuration sets the limit on the maximum number of values for the same tag key. SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG 2   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - postgresql - PostgreSQL storage. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfArrayColumn Some entities (e.g. trace segments) include the logic column with multiple values. In PostgreSQL, we use multiple physical columns to host the values, e.g. change column_a with values [1,2,3,4,5] to column_a_0 = 1, column_a_1 = 2, column_a_2 = 3 , column_a_3 = 4, column_a_4 = 5 SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN 20   - - numOfSearchableValuesPerTag In a trace segment, this includes multiple spans with multiple tags. Different spans may have same tag key, e.g. multiple HTTP exit spans all have their own http.method tags. This configuration sets the limit on the maximum number of values for the same tag key. SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG 2   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - influxdb - InfluxDB storage. - -   - - url InfluxDB connection URL. SW_STORAGE_INFLUXDB_URL http://localhost:8086   - - user User name of InfluxDB. SW_STORAGE_INFLUXDB_USER root   - - password Password of InfluxDB. SW_STORAGE_INFLUXDB_PASSWORD -   - - database Database of InfluxDB. SW_STORAGE_INFLUXDB_DATABASE skywalking   - - actions The number of actions to collect. SW_STORAGE_INFLUXDB_ACTIONS 1000   - - duration The maximum waiting time (in milliseconds). SW_STORAGE_INFLUXDB_DURATION 1000   - - batchEnabled If true, write points with batch API. SW_STORAGE_INFLUXDB_BATCH_ENABLED true   - - fetchTaskLogMaxSize The maximum number of fetch task log in a request. SW_STORAGE_INFLUXDB_FETCH_TASK_LOG_MAX_SIZE 5000   - - connectionResponseFormat The response format of connection to influxDB. It can only be MSGPACK or JSON. SW_STORAGE_INFLUXDB_CONNECTION_RESPONSE_FORMAT MSGPACK   - iotdb - IoTDB storage. - -   - - host The host of IoTDB server. SW_STORAGE_IOTDB_HOST 127.0.0.1   - - rpcPort The port listened by IoTDB server. SW_STORAGE_IOTDB_RPC_PORT 6667   - - username The username of IoTDB SW_STORAGE_IOTDB_USERNAME root   - - password The password of IoTDB SW_STORAGE_IOTDB_PASSWORD root   - - storageGroup The path of Storage Group and it must start with root. SW_STORAGE_IOTDB_STORAGE_GROUP root.skywalking   - - sessionPoolSize The connection pool size for IoTDB. If the value is 0, the size of SessionPool will be 2 * CPU_Cores SW_STORAGE_IOTDB_SESSIONPOOL_SIZE 8   - - fetchTaskLogMaxSize the max number of fetch task log in a request SW_STORAGE_IOTDB_FETCH_TASK_LOG_MAX_SIZE 1000   agent-analyzer default Agent Analyzer. SW_AGENT_ANALYZER default    - - traceSamplingPolicySettingsFile The sampling policy including sampling rate and the threshold of trace segment latency can be configured by the traceSamplingPolicySettingsFile file. SW_TRACE_SAMPLING_POLICY_SETTINGS_FILE trace-sampling-policy-settings.yml   - - slowDBAccessThreshold The slow database access threshold (in milliseconds). SW_SLOW_DB_THRESHOLD default:200,mongodb:100   - - forceSampleErrorSegment When sampling mechanism is activated, this config samples the error status segment and ignores the sampling rate. SW_FORCE_SAMPLE_ERROR_SEGMENT true   - - segmentStatusAnalysisStrategy Determines the final segment status from span status. Available values are FROM_SPAN_STATUS , FROM_ENTRY_SPAN, and FROM_FIRST_SPAN. FROM_SPAN_STATUS indicates that the segment status would be error if any span has an error status. FROM_ENTRY_SPAN means that the segment status would only be determined by the status of entry spans. FROM_FIRST_SPAN means that the segment status would only be determined by the status of the first span. SW_SEGMENT_STATUS_ANALYSIS_STRATEGY FROM_SPAN_STATUS   - - noUpstreamRealAddressAgents Exit spans with the component in the list would not generate client-side instance relation metrics, since some tracing plugins (e.g. Nginx-LUA and Envoy) can\u0026rsquo;t collect the real peer IP address. SW_NO_UPSTREAM_REAL_ADDRESS 6000,9000   - - meterAnalyzerActiveFiles Indicates which files could be instrumented and analyzed. Multiple files are split by \u0026ldquo;,\u0026rdquo;. SW_METER_ANALYZER_ACTIVE_FILES    receiver-sharing-server default Sharing server provides new gRPC and restful servers for data collection. Ana designates that servers in the core module are to be used for internal communication only. - -    - - restHost Binding IP of RESTful services. Services include GraphQL query and HTTP data report. SW_RECEIVER_SHARING_REST_HOST -   - - restPort Binding port of RESTful services. SW_RECEIVER_SHARING_REST_PORT -   - - restContextPath Web context path of RESTful services. SW_RECEIVER_SHARING_REST_CONTEXT_PATH -   - - restMinThreads Minimum thread number of RESTful services. SW_RECEIVER_SHARING_JETTY_MIN_THREADS 1   - - restMaxThreads Maximum thread number of RESTful services. SW_RECEIVER_SHARING_JETTY_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_RECEIVER_SHARING_JETTY_IDLE_TIMEOUT 30000   - - restAcceptQueueSize ServerSocketChannel backlog of RESTful services. SW_RECEIVER_SHARING_JETTY_QUEUE_SIZE 0   - - httpMaxRequestHeaderSize Maximum request header size accepted. SW_RECEIVER_SHARING_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - gRPCHost Binding IP of gRPC services. Services include gRPC data report and internal communication among OAP nodes. SW_RECEIVER_GRPC_HOST 0.0.0.0. Not Activated   - - gRPCPort Binding port of gRPC services. SW_RECEIVER_GRPC_PORT Not Activated   - - gRPCThreadPoolSize Pool size of gRPC server. SW_RECEIVER_GRPC_THREAD_POOL_SIZE CPU core * 4   - - gRPCThreadPoolQueueSize Queue size of gRPC server. SW_RECEIVER_GRPC_POOL_QUEUE_SIZE 10000   - - gRPCSslEnabled Activates SSL for gRPC services. SW_RECEIVER_GRPC_SSL_ENABLED false   - - gRPCSslKeyPath File path of gRPC SSL key. SW_RECEIVER_GRPC_SSL_KEY_PATH -   - - gRPCSslCertChainPath File path of gRPC SSL cert chain. SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH -   - - maxConcurrentCallsPerConnection The maximum number of concurrent calls permitted for each incoming connection. Defaults to no limit. SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL -   - - authentication The token text for authentication. Works for gRPC connection only. Once this is set, the client is required to use the same token. SW_AUTHENTICATION -   log-analyzer default Log Analyzer. SW_LOG_ANALYZER default    - - lalFiles The LAL configuration file names (without file extension) to be activated. Read LAL for more details. SW_LOG_LAL_FILES default   - - malFiles The MAL configuration file names (without file extension) to be activated. Read LAL for more details. SW_LOG_MAL_FILES \u0026quot;\u0026quot;   event-analyzer default Event Analyzer. SW_EVENT_ANALYZER default    receiver-register default gRPC and HTTPRestful services that provide service, service instance and endpoint register. - -    receiver-trace default gRPC and HTTPRestful services that accept SkyWalking format traces. - -    receiver-jvm default gRPC services that accept JVM metrics data. - -    receiver-clr default gRPC services that accept .Net CLR metrics data. - -    receiver-profile default gRPC services that accept profile task status and snapshot reporter. - -    receiver-zabbix default TCP receiver accepts Zabbix format metrics. - -    - - port Exported TCP port. Zabbix agent could connect and transport data. SW_RECEIVER_ZABBIX_PORT 10051   - - host Binds to host. SW_RECEIVER_ZABBIX_HOST 0.0.0.0   - - activeFiles Enables config when agent request is received. SW_RECEIVER_ZABBIX_ACTIVE_FILES agent   service-mesh default gRPC services that accept data from inbound mesh probes. - -    envoy-metric default Envoy metrics_service and ALS(access log service) are supported by this receiver. The OAL script supports all GAUGE type metrics. - -    - - acceptMetricsService Starts Envoy Metrics Service analysis. SW_ENVOY_METRIC_SERVICE true   - - alsHTTPAnalysis Starts Envoy HTTP Access Log Service analysis. Value = k8s-mesh means starting the analysis. SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS -   - - alsTCPAnalysis Starts Envoy TCP Access Log Service analysis. Value = k8s-mesh means starting the analysis. SW_ENVOY_METRIC_ALS_TCP_ANALYSIS -   - - k8sServiceNameRule k8sServiceNameRule allows you to customize the service name in ALS via Kubernetes metadata. The available variables are pod and service. E.g. you can use ${service.metadata.name}-${pod.metadata.labels.version} to append the version number to the service name. Note that when using environment variables to pass this configuration, use single quotes('') to avoid being evaluated by the shell. -    receiver-otel default A receiver for analyzing metrics data from OpenTelemetry. - -    - - enabledHandlers Enabled handlers for otel. SW_OTEL_RECEIVER_ENABLED_HANDLERS -   - - enabledOcRules Enabled metric rules for OC handler. SW_OTEL_RECEIVER_ENABLED_OC_RULES -   receiver-zipkin default A receiver for Zipkin traces. - -    - - restHost Binding IP of RESTful services. SW_RECEIVER_ZIPKIN_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_RECEIVER_ZIPKIN_PORT 9411   - - restContextPath Web context path of RESTful services. SW_RECEIVER_ZIPKIN_CONTEXT_PATH /   prometheus-fetcher default Prometheus fetcher reads metrics from Prometheus endpoint, and transfer the metrics into SkyWalking native format for the MAL engine. - -    - - enabledRules Enabled rules. SW_PROMETHEUS_FETCHER_ENABLED_RULES self   - - maxConvertWorker The maximize meter convert worker. SW_PROMETHEUS_FETCHER_NUM_CONVERT_WORKER -1(by default, half the number of CPU core(s))   kafka-fetcher default Read SkyWalking\u0026rsquo;s native metrics/logs/traces through Kafka server. - -    - - bootstrapServers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_KAFKA_FETCHER_SERVERS localhost:9092   - - namespace Namespace aims to isolate multi OAP cluster when using the same Kafka cluster. If you set a namespace for Kafka fetcher, OAP will add a prefix to topic name. You should also set namespace in agent.config. The property is named plugin.kafka.namespace. SW_NAMESPACE -   - - groupId A unique string that identifies the consumer group to which this consumer belongs. - skywalking-consumer   - - consumePartitions Indicates which PartitionId(s) of the topics is/are assigned to the OAP server. Separated by commas if multiple. SW_KAFKA_FETCHER_CONSUME_PARTITIONS -   - - isSharding True when OAP Server is in cluster. SW_KAFKA_FETCHER_IS_SHARDING false   - - createTopicIfNotExist If true, this creates Kafka topic (if it does not already exist). - true   - - partitions The number of partitions for the topic being created. SW_KAFKA_FETCHER_PARTITIONS 3   - - enableNativeProtoLog Enables fetching and handling native proto log data. SW_KAFKA_FETCHER_ENABLE_NATIVE_PROTO_LOG true   - - enableNativeJsonLog Enables fetching and handling native json log data. SW_KAFKA_FETCHER_ENABLE_NATIVE_JSON_LOG true   - - replicationFactor The replication factor for each partition in the topic being created. SW_KAFKA_FETCHER_PARTITIONS_FACTOR 2   - - kafkaHandlerThreadPoolSize Pool size of Kafka message handler executor. SW_KAFKA_HANDLER_THREAD_POOL_SIZE CPU core * 2   - - kafkaHandlerThreadPoolQueueSize Queue size of Kafka message handler executor. SW_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE 10000   - - topicNameOfMeters Kafka topic name for meter system data. - skywalking-meters   - - topicNameOfMetrics Kafka topic name for JVM metrics data. - skywalking-metrics   - - topicNameOfProfiling Kafka topic name for profiling data. - skywalking-profilings   - - topicNameOfTracingSegments Kafka topic name for tracing data. - skywalking-segments   - - topicNameOfManagements Kafka topic name for service instance reporting and registration. - skywalking-managements   - - topicNameOfLogs Kafka topic name for native proto log data. - skywalking-logs   - - topicNameOfJsonLogs Kafka topic name for native json log data. - skywalking-logs-json   receiver-browser default gRPC services that accept browser performance data and error log. - - -   - - sampleRate Sampling rate for receiving trace. Precise to 1/10000. 10000 means sampling rate of 100% by default. SW_RECEIVER_BROWSER_SAMPLE_RATE 10000   query graphql - GraphQL query implementation. -    - - enableLogTestTool Enable the log testing API to test the LAL. NOTE: This API evaluates untrusted code on the OAP server. A malicious script can do significant damage (steal keys and secrets, remove files and directories, install malware, etc). As such, please enable this API only when you completely trust your users. SW_QUERY_GRAPHQL_ENABLE_LOG_TEST_TOOL false   - - maxQueryComplexity Maximum complexity allowed for the GraphQL query that can be used to abort a query if the total number of data fields queried exceeds the defined threshold. SW_QUERY_MAX_QUERY_COMPLEXITY 100   - - enableUpdateUITemplate Allow user add,disable and update UI template. SW_ENABLE_UPDATE_UI_TEMPLATE false   alarm default - Read alarm doc for more details. -    telemetry - - Read telemetry doc for more details. -    - none - No op implementation. -    - prometheus host Binding host for Prometheus server fetching data. SW_TELEMETRY_PROMETHEUS_HOST 0.0.0.0   - - port Binding port for Prometheus server fetching data. SW_TELEMETRY_PROMETHEUS_PORT 1234   configuration - - Read dynamic configuration doc for more details. -    - grpc host DCS server binding hostname. SW_DCS_SERVER_HOST -   - - port DCS server binding port. SW_DCS_SERVER_PORT 80   - - clusterName Cluster name when reading the latest configuration from DSC server. SW_DCS_CLUSTER_NAME SkyWalking   - - period The period of reading data from DSC server by the OAP (in seconds). SW_DCS_PERIOD 20   - apollo apolloMeta apollo.meta in Apollo. SW_CONFIG_APOLLO http://localhost:8080   - - apolloCluster apollo.cluster in Apollo. SW_CONFIG_APOLLO_CLUSTER default   - - apolloEnv env in Apollo. SW_CONFIG_APOLLO_ENV -   - - appId app.id in Apollo. SW_CONFIG_APOLLO_APP_ID skywalking   - - period The period of data sync (in seconds). SW_CONFIG_APOLLO_PERIOD 60   - zookeeper namespace The namespace (represented by root path) that isolates the configurations in the Zookeeper. SW_CONFIG_ZK_NAMESPACE /, root path   - - hostPort Hosts and ports of Zookeeper Cluster. SW_CONFIG_ZK_HOST_PORT localhost:2181   - - baseSleepTimeMs The period of Zookeeper client between two retries (in milliseconds). SW_CONFIG_ZK_BASE_SLEEP_TIME_MS 1000   - - maxRetries The maximum retry time. SW_CONFIG_ZK_MAX_RETRIES 3   - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - etcd endpoints Hosts and ports for etcd cluster (separated by commas if multiple). SW_CONFIG_ETCD_ENDPOINTS http://localhost:2379   - - namespace Namespace for SkyWalking cluster. SW_CONFIG_ETCD_NAMESPACE /skywalking   - - authentication Indicates whether there is authentication. SW_CONFIG_ETCD_AUTHENTICATION false   - - user Etcd auth username. SW_CONFIG_ETCD_USER    - - password Etcd auth password. SW_CONFIG_ETCD_PASSWORD    - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - consul hostPort Hosts and ports for Consul cluster. SW_CONFIG_CONSUL_HOST_AND_PORTS localhost:8500   - - aclToken ACL Token of Consul. Empty string means without ACL token. SW_CONFIG_CONSUL_ACL_TOKEN -   - - period The period of data sync (in seconds). SW_CONFIG_CONSUL_PERIOD 60   - k8s-configmap namespace Deployment namespace of the config map. SW_CLUSTER_K8S_NAMESPACE default   - - labelSelector Labels for locating configmap. SW_CLUSTER_K8S_LABEL app=collector,release=skywalking   - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - nacos serverAddr Nacos Server Host. SW_CONFIG_NACOS_SERVER_ADDR 127.0.0.1   - - port Nacos Server Port. SW_CONFIG_NACOS_SERVER_PORT 8848   - - group Nacos Configuration namespace. SW_CONFIG_NACOS_SERVER_NAMESPACE -   - - period The period of data sync (in seconds). SW_CONFIG_CONFIG_NACOS_PERIOD 60   - - username Nacos Auth username. SW_CONFIG_NACOS_USERNAME -   - - password Nacos Auth password. SW_CONFIG_NACOS_PASSWORD -   - - accessKey Nacos Auth accessKey. SW_CONFIG_NACOS_ACCESSKEY -   - - secretKey Nacos Auth secretKey. SW_CONFIG_NACOS_SECRETKEY -   exporter grpc targetHost The host of target gRPC server for receiving export data. SW_EXPORTER_GRPC_HOST 127.0.0.1   - - targetPort The port of target gRPC server for receiving export data. SW_EXPORTER_GRPC_PORT 9870   health-checker default checkIntervalSeconds The period of checking OAP internal health status (in seconds). SW_HEALTH_CHECKER_INTERVAL_SECONDS 5   configuration-discovery default disableMessageDigest If true, agent receives the latest configuration every time, even without making any changes. By default, OAP uses the SHA512 message digest mechanism to detect changes in configuration. SW_DISABLE_MESSAGE_DIGEST false   receiver-event default gRPC services that handle events data. - -     Note ¹ System Environment Variable name could be declared and changed in application.yml. The names listed here are simply provided in the default application.yml file.\n","excerpt":"Configuration Vocabulary The Configuration Vocabulary lists all available configurations provided by …","ref":"/docs/main/v9.0.0/en/setup/backend/configuration-vocabulary/","title":"Configuration Vocabulary"},{"body":"Configuration Vocabulary The Configuration Vocabulary lists all available configurations provided by application.yml.\n   Module Provider Settings Value(s) and Explanation System Environment Variable¹ Default     core default role Option values: Mixed/Receiver/Aggregator. Receiver mode OAP opens the service to the agents, then analyzes and aggregates the results, and forwards the results for distributed aggregation. Aggregator mode OAP receives data from Mixer and Receiver role OAP nodes, and performs 2nd level aggregation. Mixer means both Receiver and Aggregator. SW_CORE_ROLE Mixed   - - restHost Binding IP of RESTful services. Services include GraphQL query and HTTP data report. SW_CORE_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_CORE_REST_PORT 12800   - - restContextPath Web context path of RESTful services. SW_CORE_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_CORE_REST_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_CORE_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize ServerSocketChannel Backlog of RESTful services. SW_CORE_REST_QUEUE_SIZE 0   - - httpMaxRequestHeaderSize Maximum request header size accepted. SW_CORE_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - gRPCHost Binding IP of gRPC services, including gRPC data report and internal communication among OAP nodes. SW_CORE_GRPC_HOST 0.0.0.0   - - gRPCPort Binding port of gRPC services. SW_CORE_GRPC_PORT 11800   - - gRPCSslEnabled Activates SSL for gRPC services. SW_CORE_GRPC_SSL_ENABLED false   - - gRPCSslKeyPath File path of gRPC SSL key. SW_CORE_GRPC_SSL_KEY_PATH -   - - gRPCSslCertChainPath File path of gRPC SSL cert chain. SW_CORE_GRPC_SSL_CERT_CHAIN_PATH -   - - gRPCSslTrustedCAPath File path of gRPC trusted CA. SW_CORE_GRPC_SSL_TRUSTED_CA_PATH -   - - downsampling Activated level of down sampling aggregation.  Hour,Day   - - persistentPeriod Execution period of the persistent timer (in seconds).  25   - - enableDataKeeperExecutor Controller of TTL scheduler. Once disabled, TTL wouldn\u0026rsquo;t work. SW_CORE_ENABLE_DATA_KEEPER_EXECUTOR true   - - dataKeeperExecutePeriod Execution period of TTL scheduler (in minutes). Execution doesn\u0026rsquo;t mean deleting data. The storage provider (e.g. ElasticSearch storage) could override this. SW_CORE_DATA_KEEPER_EXECUTE_PERIOD 5   - - recordDataTTL The lifecycle of record data (in days). Record data includes traces, top N sample records, and logs. Minimum value is 2. SW_CORE_RECORD_DATA_TTL 3   - - metricsDataTTL The lifecycle of metrics data (in days), including metadata. We recommend setting metricsDataTTL \u0026gt;= recordDataTTL. Minimum value is 2. SW_CORE_METRICS_DATA_TTL 7   - - l1FlushPeriod The period of L1 aggregation flush to L2 aggregation (in milliseconds). SW_CORE_L1_AGGREGATION_FLUSH_PERIOD 500   - - storageSessionTimeout The threshold of session time (in milliseconds). Default value is 70000. SW_CORE_STORAGE_SESSION_TIMEOUT 70000   - - persistentPeriod The period of doing data persistence. Unit is second.Default value is 25s SW_CORE_PERSISTENT_PERIOD 25   - - enableDatabaseSession Cache metrics data for 1 minute to reduce database queries, and if the OAP cluster changes within that minute. SW_CORE_ENABLE_DATABASE_SESSION true   - - topNReportPeriod The execution period (in minutes) of top N sampler, which saves sampled data into the storage. SW_CORE_TOPN_REPORT_PERIOD 10   - - activeExtraModelColumns Appends entity names (e.g. service names) into metrics storage entities. SW_CORE_ACTIVE_EXTRA_MODEL_COLUMNS false   - - serviceNameMaxLength Maximum length limit of service names. SW_SERVICE_NAME_MAX_LENGTH 70   - - instanceNameMaxLength Maximum length limit of service instance names. The maximum length of service + instance names should be less than 200. SW_INSTANCE_NAME_MAX_LENGTH 70   - - endpointNameMaxLength Maximum length limit of endpoint names. The maximum length of service + endpoint names should be less than 240. SW_ENDPOINT_NAME_MAX_LENGTH 150   - - searchableTracesTags Defines a set of span tag keys which are searchable through GraphQL. Multiple values are separated by commas. SW_SEARCHABLE_TAG_KEYS http.method,http.status_code,rpc.status_code,db.type,db.instance,mq.queue,mq.topic,mq.broker   - - searchableLogsTags Defines a set of log tag keys which are searchable through GraphQL. Multiple values are separated by commas. SW_SEARCHABLE_LOGS_TAG_KEYS level   - - searchableAlarmTags Defines a set of alarm tag keys which are searchable through GraphQL. Multiple values are separated by commas. SW_SEARCHABLE_ALARM_TAG_KEYS level   - - autocompleteTagKeysQueryMaxSize The max size of tags keys for autocomplete select. SW_AUTOCOMPLETE_TAG_KEYS_QUERY_MAX_SIZE 100   - - autocompleteTagValuesQueryMaxSize The max size of tags values for autocomplete select. SW_AUTOCOMPLETE_TAG_VALUES_QUERY_MAX_SIZE 100   - - gRPCThreadPoolSize Pool size of gRPC server. SW_CORE_GRPC_THREAD_POOL_SIZE CPU core * 4   - - gRPCThreadPoolQueueSize Queue size of gRPC server. SW_CORE_GRPC_POOL_QUEUE_SIZE 10000   - - maxConcurrentCallsPerConnection The maximum number of concurrent calls permitted for each incoming connection. Defaults to no limit. SW_CORE_GRPC_MAX_CONCURRENT_CALL -   - - maxMessageSize Sets the maximum message size allowed to be received on the server. Empty means 4 MiB. SW_CORE_GRPC_MAX_MESSAGE_SIZE 4M(based on Netty)   - - remoteTimeout Timeout for cluster internal communication (in seconds). - 20   - - maxSizeOfNetworkAddressAlias The maximum size of network address detected in the system being monitored. - 1_000_000   - - maxPageSizeOfQueryProfileSnapshot The maximum size for snapshot analysis in an OAP query. - 500   - - maxSizeOfAnalyzeProfileSnapshot The maximum number of snapshots analyzed by the OAP. - 12000   - - prepareThreads The number of threads used to prepare metrics data to the storage. SW_CORE_PREPARE_THREADS 2   - - enableEndpointNameGroupingByOpenapi Automatically groups endpoints by the given OpenAPI definitions. SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPAENAPI true   - - maxDurationOfQueryEBPFProfilingData The maximum duration(in second) of query the eBPF profiling data from database. - 30   - - maxThreadCountOfQueryEBPFProfilingData The maximum thread count of query the eBPF profiling data from database. - System CPU core size   cluster standalone - Standalone is not suitable for running on a single node running. No configuration available. - -   - zookeeper namespace The namespace, represented by root path, isolates the configurations in Zookeeper. SW_NAMESPACE /, root path   - - hostPort Hosts and ports of Zookeeper Cluster. SW_CLUSTER_ZK_HOST_PORT localhost:2181   - - baseSleepTimeMs The period of Zookeeper client between two retries (in milliseconds). SW_CLUSTER_ZK_SLEEP_TIME 1000   - - maxRetries The maximum retry time. SW_CLUSTER_ZK_MAX_RETRIES 3   - - enableACL Opens ACL using schema and expression. SW_ZK_ENABLE_ACL false   - - schema Schema for the authorization. SW_ZK_SCHEMA digest   - - expression Expression for the authorization. SW_ZK_EXPRESSION skywalking:skywalking   - - internalComHost The hostname registered in Zookeeper for the internal communication of OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Zookeeper for the internal communication of OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - kubernetes namespace Namespace deployed by SkyWalking in k8s. SW_CLUSTER_K8S_NAMESPACE default   - - labelSelector Labels used for filtering OAP deployment in k8s. SW_CLUSTER_K8S_LABEL app=collector,release=skywalking   - - uidEnvName Environment variable name for reading uid. SW_CLUSTER_K8S_UID SKYWALKING_COLLECTOR_UID   - consul serviceName Service name for SkyWalking cluster. SW_SERVICE_NAME SkyWalking_OAP_Cluster   - - hostPort Hosts and ports for Consul cluster. SW_CLUSTER_CONSUL_HOST_PORT localhost:8500   - - aclToken ACL Token of Consul. Empty string means without ALC token. SW_CLUSTER_CONSUL_ACLTOKEN -   - - internalComHost The hostname registered in Consul for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Consul for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - etcd serviceName Service name for SkyWalking cluster. SW_CLUSTER_ETCD_SERVICE_NAME SkyWalking_OAP_Cluster   - - endpoints Hosts and ports for etcd cluster. SW_CLUSTER_ETCD_ENDPOINTS localhost:2379   - - namespace Namespace for SkyWalking cluster. SW_CLUSTER_ETCD_NAMESPACE /skywalking   - - authentication Indicates whether there is authentication. SW_CLUSTER_ETCD_AUTHENTICATION false   - - user Etcd auth username. SW_CLUSTER_ETCD_USER    - - password Etcd auth password. SW_CLUSTER_ETCD_PASSWORD    - - internalComHost The hostname registered in etcd for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in etcd for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - Nacos serviceName Service name for SkyWalking cluster. SW_SERVICE_NAME SkyWalking_OAP_Cluster   - - hostPort Hosts and ports for Nacos cluster. SW_CLUSTER_NACOS_HOST_PORT localhost:8848   - - namespace Namespace used by SkyWalking node coordination. SW_CLUSTER_NACOS_NAMESPACE public   - - internalComHost The hostname registered in Nacos for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Nacos for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - - username Nacos Auth username. SW_CLUSTER_NACOS_USERNAME -   - - password Nacos Auth password. SW_CLUSTER_NACOS_PASSWORD -   - - accessKey Nacos Auth accessKey. SW_CLUSTER_NACOS_ACCESSKEY -   - - secretKey Nacos Auth secretKey. SW_CLUSTER_NACOS_SECRETKEY -   storage elasticsearch - ElasticSearch (and OpenSearch) storage implementation. - -   - - namespace Prefix of indexes created and used by SkyWalking. SW_NAMESPACE -   - - clusterNodes ElasticSearch cluster nodes for client connection. SW_STORAGE_ES_CLUSTER_NODES localhost   - - protocol HTTP or HTTPs. SW_STORAGE_ES_HTTP_PROTOCOL HTTP   - - connectTimeout Connect timeout of ElasticSearch client (in milliseconds). SW_STORAGE_ES_CONNECT_TIMEOUT 3000   - - socketTimeout Socket timeout of ElasticSearch client (in milliseconds). SW_STORAGE_ES_SOCKET_TIMEOUT 30000   - - responseTimeout Response timeout of ElasticSearch client (in milliseconds), 0 disables the timeout. SW_STORAGE_ES_RESPONSE_TIMEOUT 1500   - - numHttpClientThread The number of threads for the underlying HTTP client to perform socket I/O. If the value is \u0026lt;= 0, the number of available processors will be used. SW_STORAGE_ES_NUM_HTTP_CLIENT_THREAD 0   - - user Username of ElasticSearch cluster. SW_ES_USER -   - - password Password of ElasticSearch cluster. SW_ES_PASSWORD -   - - trustStorePath Trust JKS file path. Only works when username and password are enabled. SW_STORAGE_ES_SSL_JKS_PATH -   - - trustStorePass Trust JKS file password. Only works when username and password are enabled. SW_STORAGE_ES_SSL_JKS_PASS -   - - secretsManagementFile Secrets management file in the properties format, including username and password, which are managed by a 3rd party tool. Capable of being updated them at runtime. SW_ES_SECRETS_MANAGEMENT_FILE -   - - dayStep Represents the number of days in the one-minute/hour/day index. SW_STORAGE_DAY_STEP 1   - - indexShardsNumber Shard number of new indexes. SW_STORAGE_ES_INDEX_SHARDS_NUMBER 1   - - indexReplicasNumber Replicas number of new indexes. SW_STORAGE_ES_INDEX_REPLICAS_NUMBER 0   - - superDatasetDayStep Represents the number of days in the super size dataset record index. Default value is the same as dayStep when the value is less than 0. SW_SUPERDATASET_STORAGE_DAY_STEP -1   - - superDatasetIndexShardsFactor Super dataset is defined in the code (e.g. trace segments). This factor provides more shards for the super dataset: shards number = indexShardsNumber * superDatasetIndexShardsFactor. This factor also affects Zipkin and Jaeger traces. SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR 5   - - superDatasetIndexReplicasNumber Represents the replicas number in the super size dataset record index. SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER 0   - - indexTemplateOrder The order of index template. SW_STORAGE_ES_INDEX_TEMPLATE_ORDER 0   - - bulkActions Async bulk size of the record data batch execution. SW_STORAGE_ES_BULK_ACTIONS 5000   - - flushInterval Period of flush (in seconds). Does not matter whether bulkActions is reached or not. INT(flushInterval * 2/3) is used for index refresh period. SW_STORAGE_ES_FLUSH_INTERVAL 15 (index refresh period = 10)   - - concurrentRequests The number of concurrent requests allowed to be executed. SW_STORAGE_ES_CONCURRENT_REQUESTS 2   - - resultWindowMaxSize The maximum size of dataset when the OAP loads cache, such as network aliases. SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE 10000   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_ES_QUERY_MAX_SIZE 10000   - - scrollingBatchSize The batch size of metadata per iteration when metadataQueryMaxSize or resultWindowMaxSize is too large to be retrieved in a single query. SW_STORAGE_ES_SCROLLING_BATCH_SIZE 5000   - - segmentQueryMaxSize The maximum size of trace segments per query. SW_STORAGE_ES_QUERY_SEGMENT_SIZE 200   - - profileTaskQueryMaxSize The maximum size of profile task per query. SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE 200   - - profileDataQueryScrollBatchSize The batch size of query profiling data. SW_STORAGE_ES_QUERY_PROFILE_DATA_BATCH_SIZE 100   - - advanced All settings of ElasticSearch index creation. The value should be in JSON format. SW_STORAGE_ES_ADVANCED -   - h2 - H2 storage is designed for demonstration and running in short term (i.e. 1-2 hours) only. - -   - - driver H2 JDBC driver. SW_STORAGE_H2_DRIVER org.h2.jdbcx.JdbcDataSource   - - url H2 connection URL. Defaults to H2 memory mode. SW_STORAGE_H2_URL jdbc:h2:mem:skywalking-oap-db   - - user Username of H2 database. SW_STORAGE_H2_USER sa   - - password Password of H2 database. - -   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_H2_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 100   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 1   - mysql - MySQL Storage. The MySQL JDBC Driver is not in the dist. Please copy it into the oap-lib folder manually. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfArrayColumn Some entities (e.g. trace segments) include the logic column with multiple values. In MySQL, we use multiple physical columns to host the values, e.g. change column_a with values [1,2,3,4,5] to column_a_0 = 1, column_a_1 = 2, column_a_2 = 3 , column_a_3 = 4, column_a_4 = 5. SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN 20   - - numOfSearchableValuesPerTag In a trace segment, this includes multiple spans with multiple tags. Different spans may have same tag key, e.g. multiple HTTP exit spans all have their own http.method tags. This configuration sets the limit on the maximum number of values for the same tag key. SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG 2   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - postgresql - PostgreSQL storage. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfArrayColumn Some entities (e.g. trace segments) include the logic column with multiple values. In PostgreSQL, we use multiple physical columns to host the values, e.g. change column_a with values [1,2,3,4,5] to column_a_0 = 1, column_a_1 = 2, column_a_2 = 3 , column_a_3 = 4, column_a_4 = 5 SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN 20   - - numOfSearchableValuesPerTag In a trace segment, this includes multiple spans with multiple tags. Different spans may have same tag key, e.g. multiple HTTP exit spans all have their own http.method tags. This configuration sets the limit on the maximum number of values for the same tag key. SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG 2   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - banyandb - BanyanDB storage. - -   - - host Host of the BanyanDB. SW_STORAGE_BANYANDB_HOST 127.0.0.1   - - port Port of the BanyanDB. SW_STORAGE_BANYANDB_PORT 17912   - - maxBulkSize The maximum size of write entities in a single batch write call. SW_STORAGE_BANYANDB_MAX_BULK_SIZE 5000   - - flushInterval Period of flush interval. In the timeunit of seconds. SW_STORAGE_BANYANDB_FLUSH_INTERVAL 15   - - metricsShardsNumber Shards Number for measure/metrics. SW_STORAGE_BANYANDB_METRICS_SHARDS_NUMBER 1   - - recordShardsNumber Shards Number for a normal record. SW_STORAGE_BANYANDB_RECORD_SHARDS_NUMBER 1   - - superDatasetShardsFactor Shards Factor for a super dataset record, i.e. Shard number of a super dataset is recordShardsNumber*superDatasetShardsFactor. SW_STORAGE_BANYANDB_SUPERDATASET_SHARDS_FACTOR 2   - - concurrentWriteThreads Concurrent consumer threads for batch writing. SW_STORAGE_BANYANDB_CONCURRENT_WRITE_THREADS 15   - - profileTaskQueryMaxSize Max size of ProfileTask to be fetched. SW_STORAGE_BANYANDB_PROFILE_TASK_QUERY_MAX_SIZE 200   agent-analyzer default Agent Analyzer. SW_AGENT_ANALYZER default    - - traceSamplingPolicySettingsFile The sampling policy including sampling rate and the threshold of trace segment latency can be configured by the traceSamplingPolicySettingsFile file. SW_TRACE_SAMPLING_POLICY_SETTINGS_FILE trace-sampling-policy-settings.yml   - - slowDBAccessThreshold The slow database access threshold (in milliseconds). SW_SLOW_DB_THRESHOLD default:200,mongodb:100   - - forceSampleErrorSegment When sampling mechanism is activated, this config samples the error status segment and ignores the sampling rate. SW_FORCE_SAMPLE_ERROR_SEGMENT true   - - segmentStatusAnalysisStrategy Determines the final segment status from span status. Available values are FROM_SPAN_STATUS , FROM_ENTRY_SPAN, and FROM_FIRST_SPAN. FROM_SPAN_STATUS indicates that the segment status would be error if any span has an error status. FROM_ENTRY_SPAN means that the segment status would only be determined by the status of entry spans. FROM_FIRST_SPAN means that the segment status would only be determined by the status of the first span. SW_SEGMENT_STATUS_ANALYSIS_STRATEGY FROM_SPAN_STATUS   - - noUpstreamRealAddressAgents Exit spans with the component in the list would not generate client-side instance relation metrics, since some tracing plugins (e.g. Nginx-LUA and Envoy) can\u0026rsquo;t collect the real peer IP address. SW_NO_UPSTREAM_REAL_ADDRESS 6000,9000   - - meterAnalyzerActiveFiles Indicates which files could be instrumented and analyzed. Multiple files are split by \u0026ldquo;,\u0026rdquo;. SW_METER_ANALYZER_ACTIVE_FILES    receiver-sharing-server default Sharing server provides new gRPC and restful servers for data collection. Ana designates that servers in the core module are to be used for internal communication only. - -    - - restHost Binding IP of RESTful services. Services include GraphQL query and HTTP data report. SW_RECEIVER_SHARING_REST_HOST -   - - restPort Binding port of RESTful services. SW_RECEIVER_SHARING_REST_PORT -   - - restContextPath Web context path of RESTful services. SW_RECEIVER_SHARING_REST_CONTEXT_PATH -   - - restMaxThreads Maximum thread number of RESTful services. SW_RECEIVER_SHARING_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_RECEIVER_SHARING_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize ServerSocketChannel backlog of RESTful services. SW_RECEIVER_SHARING_REST_QUEUE_SIZE 0   - - httpMaxRequestHeaderSize Maximum request header size accepted. SW_RECEIVER_SHARING_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - gRPCHost Binding IP of gRPC services. Services include gRPC data report and internal communication among OAP nodes. SW_RECEIVER_GRPC_HOST 0.0.0.0. Not Activated   - - gRPCPort Binding port of gRPC services. SW_RECEIVER_GRPC_PORT Not Activated   - - gRPCThreadPoolSize Pool size of gRPC server. SW_RECEIVER_GRPC_THREAD_POOL_SIZE CPU core * 4   - - gRPCThreadPoolQueueSize Queue size of gRPC server. SW_RECEIVER_GRPC_POOL_QUEUE_SIZE 10000   - - gRPCSslEnabled Activates SSL for gRPC services. SW_RECEIVER_GRPC_SSL_ENABLED false   - - gRPCSslKeyPath File path of gRPC SSL key. SW_RECEIVER_GRPC_SSL_KEY_PATH -   - - gRPCSslCertChainPath File path of gRPC SSL cert chain. SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH -   - - maxConcurrentCallsPerConnection The maximum number of concurrent calls permitted for each incoming connection. Defaults to no limit. SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL -   - - authentication The token text for authentication. Works for gRPC connection only. Once this is set, the client is required to use the same token. SW_AUTHENTICATION -   log-analyzer default Log Analyzer. SW_LOG_ANALYZER default    - - lalFiles The LAL configuration file names (without file extension) to be activated. Read LAL for more details. SW_LOG_LAL_FILES default   - - malFiles The MAL configuration file names (without file extension) to be activated. Read LAL for more details. SW_LOG_MAL_FILES \u0026quot;\u0026quot;   event-analyzer default Event Analyzer. SW_EVENT_ANALYZER default    receiver-register default gRPC and HTTPRestful services that provide service, service instance and endpoint register. - -    receiver-trace default gRPC and HTTPRestful services that accept SkyWalking format traces. - -    receiver-jvm default gRPC services that accept JVM metrics data. - -    receiver-clr default gRPC services that accept .Net CLR metrics data. - -    receiver-profile default gRPC services that accept profile task status and snapshot reporter. - -    receiver-zabbix default TCP receiver accepts Zabbix format metrics. - -    - - port Exported TCP port. Zabbix agent could connect and transport data. SW_RECEIVER_ZABBIX_PORT 10051   - - host Binds to host. SW_RECEIVER_ZABBIX_HOST 0.0.0.0   - - activeFiles Enables config when agent request is received. SW_RECEIVER_ZABBIX_ACTIVE_FILES agent   service-mesh default gRPC services that accept data from inbound mesh probes. - -    envoy-metric default Envoy metrics_service and ALS(access log service) are supported by this receiver. The OAL script supports all GAUGE type metrics. - -    - - acceptMetricsService Starts Envoy Metrics Service analysis. SW_ENVOY_METRIC_SERVICE true   - - alsHTTPAnalysis Starts Envoy HTTP Access Log Service analysis. Value = k8s-mesh means starting the analysis. SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS -   - - alsTCPAnalysis Starts Envoy TCP Access Log Service analysis. Value = k8s-mesh means starting the analysis. SW_ENVOY_METRIC_ALS_TCP_ANALYSIS -   - - k8sServiceNameRule k8sServiceNameRule allows you to customize the service name in ALS via Kubernetes metadata. The available variables are pod and service. E.g. you can use ${service.metadata.name}-${pod.metadata.labels.version} to append the version number to the service name. Note that when using environment variables to pass this configuration, use single quotes('') to avoid being evaluated by the shell. -    receiver-otel default A receiver for analyzing metrics data from OpenTelemetry. - -    - - enabledHandlers Enabled handlers for otel. SW_OTEL_RECEIVER_ENABLED_HANDLERS -   - - enabledOcRules Enabled metric rules for OC handler. SW_OTEL_RECEIVER_ENABLED_OC_RULES -   receiver-zipkin default A receiver for Zipkin traces. - -    - - restHost Binding IP of RESTful services. SW_RECEIVER_ZIPKIN_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_RECEIVER_ZIPKIN_REST_PORT 9411   - - restContextPath Web context path of RESTful services. SW_RECEIVER_ZIPKIN_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_RECEIVER_ZIPKIN_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_RECEIVER_ZIPKIN_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_RECEIVER_ZIPKIN_REST_QUEUE_SIZE 0   - - sampleRate The sample rate precision is 1/10000, should be between 0 and 10000 SW_ZIPKIN_SAMPLE_RATE 10000   - - searchableTracesTags Defines a set of span tag keys which are searchable. Multiple values are separated by commas. SW_ZIPKIN_SEARCHABLE_TAG_KEYS http.method   prometheus-fetcher default Prometheus fetcher reads metrics from Prometheus endpoint, and transfer the metrics into SkyWalking native format for the MAL engine. - -    - - enabledRules Enabled rules. SW_PROMETHEUS_FETCHER_ENABLED_RULES self   - - maxConvertWorker The maximize meter convert worker. SW_PROMETHEUS_FETCHER_NUM_CONVERT_WORKER -1(by default, half the number of CPU core(s))   kafka-fetcher default Read SkyWalking\u0026rsquo;s native metrics/logs/traces through Kafka server. - -    - - bootstrapServers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_KAFKA_FETCHER_SERVERS localhost:9092   - - namespace Namespace aims to isolate multi OAP cluster when using the same Kafka cluster. If you set a namespace for Kafka fetcher, OAP will add a prefix to topic name. You should also set namespace in agent.config. The property is named plugin.kafka.namespace. SW_NAMESPACE -   - - groupId A unique string that identifies the consumer group to which this consumer belongs. - skywalking-consumer   - - createTopicIfNotExist If true, this creates Kafka topic (if it does not already exist). - true   - - partitions The number of partitions for the topic being created. SW_KAFKA_FETCHER_PARTITIONS 3   - - consumers The number of consumers to create. SW_KAFKA_FETCHER_CONSUMERS 1   - - enableNativeProtoLog Enables fetching and handling native proto log data. SW_KAFKA_FETCHER_ENABLE_NATIVE_PROTO_LOG true   - - enableNativeJsonLog Enables fetching and handling native json log data. SW_KAFKA_FETCHER_ENABLE_NATIVE_JSON_LOG true   - - replicationFactor The replication factor for each partition in the topic being created. SW_KAFKA_FETCHER_PARTITIONS_FACTOR 2   - - kafkaHandlerThreadPoolSize Pool size of Kafka message handler executor. SW_KAFKA_HANDLER_THREAD_POOL_SIZE CPU core * 2   - - kafkaHandlerThreadPoolQueueSize Queue size of Kafka message handler executor. SW_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE 10000   - - topicNameOfMeters Kafka topic name for meter system data. - skywalking-meters   - - topicNameOfMetrics Kafka topic name for JVM metrics data. - skywalking-metrics   - - topicNameOfProfiling Kafka topic name for profiling data. - skywalking-profilings   - - topicNameOfTracingSegments Kafka topic name for tracing data. - skywalking-segments   - - topicNameOfManagements Kafka topic name for service instance reporting and registration. - skywalking-managements   - - topicNameOfLogs Kafka topic name for native proto log data. - skywalking-logs   - - topicNameOfJsonLogs Kafka topic name for native json log data. - skywalking-logs-json   receiver-browser default gRPC services that accept browser performance data and error log. - - -   - - sampleRate Sampling rate for receiving trace. Precise to 1/10000. 10000 means sampling rate of 100% by default. SW_RECEIVER_BROWSER_SAMPLE_RATE 10000   query graphql - GraphQL query implementation. -    - - enableLogTestTool Enable the log testing API to test the LAL. NOTE: This API evaluates untrusted code on the OAP server. A malicious script can do significant damage (steal keys and secrets, remove files and directories, install malware, etc). As such, please enable this API only when you completely trust your users. SW_QUERY_GRAPHQL_ENABLE_LOG_TEST_TOOL false   - - maxQueryComplexity Maximum complexity allowed for the GraphQL query that can be used to abort a query if the total number of data fields queried exceeds the defined threshold. SW_QUERY_MAX_QUERY_COMPLEXITY 1000   - - enableUpdateUITemplate Allow user add,disable and update UI template. SW_ENABLE_UPDATE_UI_TEMPLATE false   - - enableOnDemandPodLog Ondemand Pod log: fetch the Pod logs on users' demand, the logs are fetched and displayed in real time, and are not persisted in any kind. This is helpful when users want to do some experiments and monitor the logs and see what\u0026rsquo;s happing inside the service. Note: if you print secrets in the logs, they are also visible to the UI, so for the sake of security, this feature is disabled by default, please set this configuration to enable the feature manually. SW_ENABLE_ON_DEMAND_POD_LOG false   query graphql - GraphQL query implementation. -    - - restHost Binding IP of RESTful services. SW_QUERY_ZIPKIN_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_QUERY_ZIPKIN_REST_PORT 9412   - - restContextPath Web context path of RESTful services. SW_QUERY_ZIPKIN_REST_CONTEXT_PATH zipkin   - - restMaxThreads Maximum thread number of RESTful services. SW_QUERY_ZIPKIN_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_QUERY_ZIPKIN_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_QUERY_ZIPKIN_REST_QUEUE_SIZE 0   - - lookback Default look back for serviceNames, remoteServiceNames and spanNames, 1 day in millis SW_QUERY_ZIPKIN_LOOKBACK 86400000   - - namesMaxAge The Cache-Control max-age (seconds) for serviceNames, remoteServiceNames and spanNames SW_QUERY_ZIPKIN_NAMES_MAX_AGE 300   - - uiQueryLimit Default traces query max size SW_QUERY_ZIPKIN_UI_QUERY_LIMIT 10   - - uiDefaultLookback Default look back for search traces, 15 minutes in millis SW_QUERY_ZIPKIN_UI_DEFAULT_LOOKBACK 900000   alarm default - Read alarm doc for more details. -    telemetry - - Read telemetry doc for more details. -    - none - No op implementation. -    - prometheus host Binding host for Prometheus server fetching data. SW_TELEMETRY_PROMETHEUS_HOST 0.0.0.0   - - port Binding port for Prometheus server fetching data. SW_TELEMETRY_PROMETHEUS_PORT 1234   configuration - - Read dynamic configuration doc for more details. -    - grpc host DCS server binding hostname. SW_DCS_SERVER_HOST -   - - port DCS server binding port. SW_DCS_SERVER_PORT 80   - - clusterName Cluster name when reading the latest configuration from DSC server. SW_DCS_CLUSTER_NAME SkyWalking   - - period The period of reading data from DSC server by the OAP (in seconds). SW_DCS_PERIOD 20   - apollo apolloMeta apollo.meta in Apollo. SW_CONFIG_APOLLO http://localhost:8080   - - apolloCluster apollo.cluster in Apollo. SW_CONFIG_APOLLO_CLUSTER default   - - apolloEnv env in Apollo. SW_CONFIG_APOLLO_ENV -   - - appId app.id in Apollo. SW_CONFIG_APOLLO_APP_ID skywalking   - - period The period of data sync (in seconds). SW_CONFIG_APOLLO_PERIOD 60   - zookeeper namespace The namespace (represented by root path) that isolates the configurations in the Zookeeper. SW_CONFIG_ZK_NAMESPACE /, root path   - - hostPort Hosts and ports of Zookeeper Cluster. SW_CONFIG_ZK_HOST_PORT localhost:2181   - - baseSleepTimeMs The period of Zookeeper client between two retries (in milliseconds). SW_CONFIG_ZK_BASE_SLEEP_TIME_MS 1000   - - maxRetries The maximum retry time. SW_CONFIG_ZK_MAX_RETRIES 3   - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - etcd endpoints Hosts and ports for etcd cluster (separated by commas if multiple). SW_CONFIG_ETCD_ENDPOINTS http://localhost:2379   - - namespace Namespace for SkyWalking cluster. SW_CONFIG_ETCD_NAMESPACE /skywalking   - - authentication Indicates whether there is authentication. SW_CONFIG_ETCD_AUTHENTICATION false   - - user Etcd auth username. SW_CONFIG_ETCD_USER    - - password Etcd auth password. SW_CONFIG_ETCD_PASSWORD    - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - consul hostPort Hosts and ports for Consul cluster. SW_CONFIG_CONSUL_HOST_AND_PORTS localhost:8500   - - aclToken ACL Token of Consul. Empty string means without ACL token. SW_CONFIG_CONSUL_ACL_TOKEN -   - - period The period of data sync (in seconds). SW_CONFIG_CONSUL_PERIOD 60   - k8s-configmap namespace Deployment namespace of the config map. SW_CLUSTER_K8S_NAMESPACE default   - - labelSelector Labels for locating configmap. SW_CLUSTER_K8S_LABEL app=collector,release=skywalking   - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - nacos serverAddr Nacos Server Host. SW_CONFIG_NACOS_SERVER_ADDR 127.0.0.1   - - port Nacos Server Port. SW_CONFIG_NACOS_SERVER_PORT 8848   - - group Nacos Configuration namespace. SW_CONFIG_NACOS_SERVER_NAMESPACE -   - - period The period of data sync (in seconds). SW_CONFIG_CONFIG_NACOS_PERIOD 60   - - username Nacos Auth username. SW_CONFIG_NACOS_USERNAME -   - - password Nacos Auth password. SW_CONFIG_NACOS_PASSWORD -   - - accessKey Nacos Auth accessKey. SW_CONFIG_NACOS_ACCESSKEY -   - - secretKey Nacos Auth secretKey. SW_CONFIG_NACOS_SECRETKEY -   exporter grpc targetHost The host of target gRPC server for receiving export data. SW_EXPORTER_GRPC_HOST 127.0.0.1   - - targetPort The port of target gRPC server for receiving export data. SW_EXPORTER_GRPC_PORT 9870   health-checker default checkIntervalSeconds The period of checking OAP internal health status (in seconds). SW_HEALTH_CHECKER_INTERVAL_SECONDS 5   configuration-discovery default disableMessageDigest If true, agent receives the latest configuration every time, even without making any changes. By default, OAP uses the SHA512 message digest mechanism to detect changes in configuration. SW_DISABLE_MESSAGE_DIGEST false   receiver-event default gRPC services that handle events data. - -     Note ¹ System Environment Variable name could be declared and changed in application.yml. The names listed here are simply provided in the default application.yml file.\n","excerpt":"Configuration Vocabulary The Configuration Vocabulary lists all available configurations provided by …","ref":"/docs/main/v9.1.0/en/setup/backend/configuration-vocabulary/","title":"Configuration Vocabulary"},{"body":"Configuration Vocabulary The Configuration Vocabulary lists all available configurations provided by application.yml.\n   Module Provider Settings Value(s) and Explanation System Environment Variable¹ Default     core default role Option values: Mixed/Receiver/Aggregator. Receiver mode OAP opens the service to the agents, then analyzes and aggregates the results, and forwards the results for distributed aggregation. Aggregator mode OAP receives data from Mixer and Receiver role OAP nodes, and performs 2nd level aggregation. Mixer means both Receiver and Aggregator. SW_CORE_ROLE Mixed   - - restHost Binding IP of RESTful services. Services include GraphQL query and HTTP data report. SW_CORE_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_CORE_REST_PORT 12800   - - restContextPath Web context path of RESTful services. SW_CORE_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_CORE_REST_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_CORE_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize ServerSocketChannel Backlog of RESTful services. SW_CORE_REST_QUEUE_SIZE 0   - - httpMaxRequestHeaderSize Maximum request header size accepted. SW_CORE_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - gRPCHost Binding IP of gRPC services, including gRPC data report and internal communication among OAP nodes. SW_CORE_GRPC_HOST 0.0.0.0   - - gRPCPort Binding port of gRPC services. SW_CORE_GRPC_PORT 11800   - - gRPCSslEnabled Activates SSL for gRPC services. SW_CORE_GRPC_SSL_ENABLED false   - - gRPCSslKeyPath File path of gRPC SSL key. SW_CORE_GRPC_SSL_KEY_PATH -   - - gRPCSslCertChainPath File path of gRPC SSL cert chain. SW_CORE_GRPC_SSL_CERT_CHAIN_PATH -   - - gRPCSslTrustedCAPath File path of gRPC trusted CA. SW_CORE_GRPC_SSL_TRUSTED_CA_PATH -   - - downsampling Activated level of down sampling aggregation.  Hour,Day   - - persistentPeriod Execution period of the persistent timer (in seconds).  25   - - enableDataKeeperExecutor Controller of TTL scheduler. Once disabled, TTL wouldn\u0026rsquo;t work. SW_CORE_ENABLE_DATA_KEEPER_EXECUTOR true   - - dataKeeperExecutePeriod Execution period of TTL scheduler (in minutes). Execution doesn\u0026rsquo;t mean deleting data. The storage provider (e.g. ElasticSearch storage) could override this. SW_CORE_DATA_KEEPER_EXECUTE_PERIOD 5   - - recordDataTTL The lifecycle of record data (in days). Record data includes traces, top N sample records, and logs. Minimum value is 2. SW_CORE_RECORD_DATA_TTL 3   - - metricsDataTTL The lifecycle of metrics data (in days), including metadata. We recommend setting metricsDataTTL \u0026gt;= recordDataTTL. Minimum value is 2. SW_CORE_METRICS_DATA_TTL 7   - - l1FlushPeriod The period of L1 aggregation flush to L2 aggregation (in milliseconds). SW_CORE_L1_AGGREGATION_FLUSH_PERIOD 500   - - storageSessionTimeout The threshold of session time (in milliseconds). Default value is 70000. SW_CORE_STORAGE_SESSION_TIMEOUT 70000   - - persistentPeriod The period of doing data persistence. Unit is second.Default value is 25s SW_CORE_PERSISTENT_PERIOD 25   - - enableDatabaseSession Cache metrics data for 1 minute to reduce database queries, and if the OAP cluster changes within that minute. SW_CORE_ENABLE_DATABASE_SESSION true   - - topNReportPeriod The execution period (in minutes) of top N sampler, which saves sampled data into the storage. SW_CORE_TOPN_REPORT_PERIOD 10   - - activeExtraModelColumns Appends entity names (e.g. service names) into metrics storage entities. SW_CORE_ACTIVE_EXTRA_MODEL_COLUMNS false   - - serviceNameMaxLength Maximum length limit of service names. SW_SERVICE_NAME_MAX_LENGTH 70   - - instanceNameMaxLength Maximum length limit of service instance names. The maximum length of service + instance names should be less than 200. SW_INSTANCE_NAME_MAX_LENGTH 70   - - endpointNameMaxLength Maximum length limit of endpoint names. The maximum length of service + endpoint names should be less than 240. SW_ENDPOINT_NAME_MAX_LENGTH 150   - - searchableTracesTags Defines a set of span tag keys which are searchable through GraphQL. Multiple values are separated by commas. SW_SEARCHABLE_TAG_KEYS http.method,http.status_code,rpc.status_code,db.type,db.instance,mq.queue,mq.topic,mq.broker   - - searchableLogsTags Defines a set of log tag keys which are searchable through GraphQL. Multiple values are separated by commas. SW_SEARCHABLE_LOGS_TAG_KEYS level   - - searchableAlarmTags Defines a set of alarm tag keys which are searchable through GraphQL. Multiple values are separated by commas. SW_SEARCHABLE_ALARM_TAG_KEYS level   - - autocompleteTagKeysQueryMaxSize The max size of tags keys for autocomplete select. SW_AUTOCOMPLETE_TAG_KEYS_QUERY_MAX_SIZE 100   - - autocompleteTagValuesQueryMaxSize The max size of tags values for autocomplete select. SW_AUTOCOMPLETE_TAG_VALUES_QUERY_MAX_SIZE 100   - - gRPCThreadPoolSize Pool size of gRPC server. SW_CORE_GRPC_THREAD_POOL_SIZE CPU core * 4   - - gRPCThreadPoolQueueSize Queue size of gRPC server. SW_CORE_GRPC_POOL_QUEUE_SIZE 10000   - - maxConcurrentCallsPerConnection The maximum number of concurrent calls permitted for each incoming connection. Defaults to no limit. SW_CORE_GRPC_MAX_CONCURRENT_CALL -   - - maxMessageSize Sets the maximum message size allowed to be received on the server. Empty means 4 MiB. SW_CORE_GRPC_MAX_MESSAGE_SIZE 4M(based on Netty)   - - remoteTimeout Timeout for cluster internal communication (in seconds). - 20   - - maxSizeOfNetworkAddressAlias The maximum size of network address detected in the system being monitored. - 1_000_000   - - maxPageSizeOfQueryProfileSnapshot The maximum size for snapshot analysis in an OAP query. - 500   - - maxSizeOfAnalyzeProfileSnapshot The maximum number of snapshots analyzed by the OAP. - 12000   - - prepareThreads The number of threads used to prepare metrics data to the storage. SW_CORE_PREPARE_THREADS 2   - - enableEndpointNameGroupingByOpenapi Automatically groups endpoints by the given OpenAPI definitions. SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPAENAPI true   - - maxDurationOfQueryEBPFProfilingData The maximum duration(in second) of query the eBPF profiling data from database. - 30   - - maxThreadCountOfQueryEBPFProfilingData The maximum thread count of query the eBPF profiling data from database. - System CPU core size   cluster standalone - Standalone is not suitable for running on a single node running. No configuration available. - -   - zookeeper namespace The namespace, represented by root path, isolates the configurations in Zookeeper. SW_NAMESPACE /, root path   - - hostPort Hosts and ports of Zookeeper Cluster. SW_CLUSTER_ZK_HOST_PORT localhost:2181   - - baseSleepTimeMs The period of Zookeeper client between two retries (in milliseconds). SW_CLUSTER_ZK_SLEEP_TIME 1000   - - maxRetries The maximum retry time. SW_CLUSTER_ZK_MAX_RETRIES 3   - - enableACL Opens ACL using schema and expression. SW_ZK_ENABLE_ACL false   - - schema Schema for the authorization. SW_ZK_SCHEMA digest   - - expression Expression for the authorization. SW_ZK_EXPRESSION skywalking:skywalking   - - internalComHost The hostname registered in Zookeeper for the internal communication of OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Zookeeper for the internal communication of OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - kubernetes namespace Namespace deployed by SkyWalking in k8s. SW_CLUSTER_K8S_NAMESPACE default   - - labelSelector Labels used for filtering OAP deployment in k8s. SW_CLUSTER_K8S_LABEL app=collector,release=skywalking   - - uidEnvName Environment variable name for reading uid. SW_CLUSTER_K8S_UID SKYWALKING_COLLECTOR_UID   - consul serviceName Service name for SkyWalking cluster. SW_SERVICE_NAME SkyWalking_OAP_Cluster   - - hostPort Hosts and ports for Consul cluster. SW_CLUSTER_CONSUL_HOST_PORT localhost:8500   - - aclToken ACL Token of Consul. Empty string means without ALC token. SW_CLUSTER_CONSUL_ACLTOKEN -   - - internalComHost The hostname registered in Consul for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Consul for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - etcd serviceName Service name for SkyWalking cluster. SW_CLUSTER_ETCD_SERVICE_NAME SkyWalking_OAP_Cluster   - - endpoints Hosts and ports for etcd cluster. SW_CLUSTER_ETCD_ENDPOINTS localhost:2379   - - namespace Namespace for SkyWalking cluster. SW_CLUSTER_ETCD_NAMESPACE /skywalking   - - authentication Indicates whether there is authentication. SW_CLUSTER_ETCD_AUTHENTICATION false   - - user Etcd auth username. SW_CLUSTER_ETCD_USER    - - password Etcd auth password. SW_CLUSTER_ETCD_PASSWORD    - - internalComHost The hostname registered in etcd for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in etcd for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - Nacos serviceName Service name for SkyWalking cluster. SW_SERVICE_NAME SkyWalking_OAP_Cluster   - - hostPort Hosts and ports for Nacos cluster. SW_CLUSTER_NACOS_HOST_PORT localhost:8848   - - namespace Namespace used by SkyWalking node coordination. SW_CLUSTER_NACOS_NAMESPACE public   - - internalComHost The hostname registered in Nacos for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Nacos for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - - username Nacos Auth username. SW_CLUSTER_NACOS_USERNAME -   - - password Nacos Auth password. SW_CLUSTER_NACOS_PASSWORD -   - - accessKey Nacos Auth accessKey. SW_CLUSTER_NACOS_ACCESSKEY -   - - secretKey Nacos Auth secretKey. SW_CLUSTER_NACOS_SECRETKEY -   storage elasticsearch - ElasticSearch (and OpenSearch) storage implementation. - -   - - namespace Prefix of indexes created and used by SkyWalking. SW_NAMESPACE -   - - clusterNodes ElasticSearch cluster nodes for client connection. SW_STORAGE_ES_CLUSTER_NODES localhost   - - protocol HTTP or HTTPs. SW_STORAGE_ES_HTTP_PROTOCOL HTTP   - - connectTimeout Connect timeout of ElasticSearch client (in milliseconds). SW_STORAGE_ES_CONNECT_TIMEOUT 3000   - - socketTimeout Socket timeout of ElasticSearch client (in milliseconds). SW_STORAGE_ES_SOCKET_TIMEOUT 30000   - - responseTimeout Response timeout of ElasticSearch client (in milliseconds), 0 disables the timeout. SW_STORAGE_ES_RESPONSE_TIMEOUT 1500   - - numHttpClientThread The number of threads for the underlying HTTP client to perform socket I/O. If the value is \u0026lt;= 0, the number of available processors will be used. SW_STORAGE_ES_NUM_HTTP_CLIENT_THREAD 0   - - user Username of ElasticSearch cluster. SW_ES_USER -   - - password Password of ElasticSearch cluster. SW_ES_PASSWORD -   - - trustStorePath Trust JKS file path. Only works when username and password are enabled. SW_STORAGE_ES_SSL_JKS_PATH -   - - trustStorePass Trust JKS file password. Only works when username and password are enabled. SW_STORAGE_ES_SSL_JKS_PASS -   - - secretsManagementFile Secrets management file in the properties format, including username and password, which are managed by a 3rd party tool. Capable of being updated them at runtime. SW_ES_SECRETS_MANAGEMENT_FILE -   - - dayStep Represents the number of days in the one-minute/hour/day index. SW_STORAGE_DAY_STEP 1   - - indexShardsNumber Shard number of new indexes. SW_STORAGE_ES_INDEX_SHARDS_NUMBER 1   - - indexReplicasNumber Replicas number of new indexes. SW_STORAGE_ES_INDEX_REPLICAS_NUMBER 0   - - superDatasetDayStep Represents the number of days in the super size dataset record index. Default value is the same as dayStep when the value is less than 0. SW_SUPERDATASET_STORAGE_DAY_STEP -1   - - superDatasetIndexShardsFactor Super dataset is defined in the code (e.g. trace segments). This factor provides more shards for the super dataset: shards number = indexShardsNumber * superDatasetIndexShardsFactor. This factor also affects Zipkin and Jaeger traces. SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR 5   - - superDatasetIndexReplicasNumber Represents the replicas number in the super size dataset record index. SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER 0   - - indexTemplateOrder The order of index template. SW_STORAGE_ES_INDEX_TEMPLATE_ORDER 0   - - bulkActions Async bulk size of the record data batch execution. SW_STORAGE_ES_BULK_ACTIONS 5000   - - flushInterval Period of flush (in seconds). Does not matter whether bulkActions is reached or not. INT(flushInterval * 2/3) is used for index refresh period. SW_STORAGE_ES_FLUSH_INTERVAL 15 (index refresh period = 10)   - - concurrentRequests The number of concurrent requests allowed to be executed. SW_STORAGE_ES_CONCURRENT_REQUESTS 2   - - resultWindowMaxSize The maximum size of dataset when the OAP loads cache, such as network aliases. SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE 10000   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_ES_QUERY_MAX_SIZE 10000   - - scrollingBatchSize The batch size of metadata per iteration when metadataQueryMaxSize or resultWindowMaxSize is too large to be retrieved in a single query. SW_STORAGE_ES_SCROLLING_BATCH_SIZE 5000   - - segmentQueryMaxSize The maximum size of trace segments per query. SW_STORAGE_ES_QUERY_SEGMENT_SIZE 200   - - profileTaskQueryMaxSize The maximum size of profile task per query. SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE 200   - - profileDataQueryScrollBatchSize The batch size of query profiling data. SW_STORAGE_ES_QUERY_PROFILE_DATA_BATCH_SIZE 100   - - advanced All settings of ElasticSearch index creation. The value should be in JSON format. SW_STORAGE_ES_ADVANCED -   - - logicSharding Shard metrics and records indices into multi-physical indices, one index template per metric/meter aggregation function or record. SW_STORAGE_ES_LOGIC_SHARDING false   - h2 - H2 storage is designed for demonstration and running in short term (i.e. 1-2 hours) only. - -   - - driver H2 JDBC driver. SW_STORAGE_H2_DRIVER org.h2.jdbcx.JdbcDataSource   - - url H2 connection URL. Defaults to H2 memory mode. SW_STORAGE_H2_URL jdbc:h2:mem:skywalking-oap-db   - - user Username of H2 database. SW_STORAGE_H2_USER sa   - - password Password of H2 database. - -   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_H2_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 100   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 1   - mysql - MySQL Storage. The MySQL JDBC Driver is not in the dist. Please copy it into the oap-lib folder manually. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfArrayColumn Some entities (e.g. trace segments) include the logic column with multiple values. In MySQL, we use multiple physical columns to host the values, e.g. change column_a with values [1,2,3,4,5] to column_a_0 = 1, column_a_1 = 2, column_a_2 = 3 , column_a_3 = 4, column_a_4 = 5. SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN 20   - - numOfSearchableValuesPerTag In a trace segment, this includes multiple spans with multiple tags. Different spans may have same tag key, e.g. multiple HTTP exit spans all have their own http.method tags. This configuration sets the limit on the maximum number of values for the same tag key. SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG 2   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - postgresql - PostgreSQL storage. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfArrayColumn Some entities (e.g. trace segments) include the logic column with multiple values. In PostgreSQL, we use multiple physical columns to host the values, e.g. change column_a with values [1,2,3,4,5] to column_a_0 = 1, column_a_1 = 2, column_a_2 = 3 , column_a_3 = 4, column_a_4 = 5 SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN 20   - - numOfSearchableValuesPerTag In a trace segment, this includes multiple spans with multiple tags. Different spans may have same tag key, e.g. multiple HTTP exit spans all have their own http.method tags. This configuration sets the limit on the maximum number of values for the same tag key. SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG 2   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - banyandb - BanyanDB storage. - -   - - host Host of the BanyanDB. SW_STORAGE_BANYANDB_HOST 127.0.0.1   - - port Port of the BanyanDB. SW_STORAGE_BANYANDB_PORT 17912   - - maxBulkSize The maximum size of write entities in a single batch write call. SW_STORAGE_BANYANDB_MAX_BULK_SIZE 5000   - - flushInterval Period of flush interval. In the timeunit of seconds. SW_STORAGE_BANYANDB_FLUSH_INTERVAL 15   - - metricsShardsNumber Shards Number for measure/metrics. SW_STORAGE_BANYANDB_METRICS_SHARDS_NUMBER 1   - - recordShardsNumber Shards Number for a normal record. SW_STORAGE_BANYANDB_RECORD_SHARDS_NUMBER 1   - - superDatasetShardsFactor Shards Factor for a super dataset record, i.e. Shard number of a super dataset is recordShardsNumber*superDatasetShardsFactor. SW_STORAGE_BANYANDB_SUPERDATASET_SHARDS_FACTOR 2   - - concurrentWriteThreads Concurrent consumer threads for batch writing. SW_STORAGE_BANYANDB_CONCURRENT_WRITE_THREADS 15   - - profileTaskQueryMaxSize Max size of ProfileTask to be fetched. SW_STORAGE_BANYANDB_PROFILE_TASK_QUERY_MAX_SIZE 200   agent-analyzer default Agent Analyzer. SW_AGENT_ANALYZER default    - - traceSamplingPolicySettingsFile The sampling policy including sampling rate and the threshold of trace segment latency can be configured by the traceSamplingPolicySettingsFile file. SW_TRACE_SAMPLING_POLICY_SETTINGS_FILE trace-sampling-policy-settings.yml   - - slowDBAccessThreshold The slow database access threshold (in milliseconds). SW_SLOW_DB_THRESHOLD default:200,mongodb:100   - - forceSampleErrorSegment When sampling mechanism is activated, this config samples the error status segment and ignores the sampling rate. SW_FORCE_SAMPLE_ERROR_SEGMENT true   - - segmentStatusAnalysisStrategy Determines the final segment status from span status. Available values are FROM_SPAN_STATUS , FROM_ENTRY_SPAN, and FROM_FIRST_SPAN. FROM_SPAN_STATUS indicates that the segment status would be error if any span has an error status. FROM_ENTRY_SPAN means that the segment status would only be determined by the status of entry spans. FROM_FIRST_SPAN means that the segment status would only be determined by the status of the first span. SW_SEGMENT_STATUS_ANALYSIS_STRATEGY FROM_SPAN_STATUS   - - noUpstreamRealAddressAgents Exit spans with the component in the list would not generate client-side instance relation metrics, since some tracing plugins (e.g. Nginx-LUA and Envoy) can\u0026rsquo;t collect the real peer IP address. SW_NO_UPSTREAM_REAL_ADDRESS 6000,9000   - - meterAnalyzerActiveFiles Indicates which files could be instrumented and analyzed. Multiple files are split by \u0026ldquo;,\u0026rdquo;. SW_METER_ANALYZER_ACTIVE_FILES    receiver-sharing-server default Sharing server provides new gRPC and restful servers for data collection. Ana designates that servers in the core module are to be used for internal communication only. - -    - - restHost Binding IP of RESTful services. Services include GraphQL query and HTTP data report. SW_RECEIVER_SHARING_REST_HOST -   - - restPort Binding port of RESTful services. SW_RECEIVER_SHARING_REST_PORT -   - - restContextPath Web context path of RESTful services. SW_RECEIVER_SHARING_REST_CONTEXT_PATH -   - - restMaxThreads Maximum thread number of RESTful services. SW_RECEIVER_SHARING_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_RECEIVER_SHARING_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize ServerSocketChannel backlog of RESTful services. SW_RECEIVER_SHARING_REST_QUEUE_SIZE 0   - - httpMaxRequestHeaderSize Maximum request header size accepted. SW_RECEIVER_SHARING_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - gRPCHost Binding IP of gRPC services. Services include gRPC data report and internal communication among OAP nodes. SW_RECEIVER_GRPC_HOST 0.0.0.0. Not Activated   - - gRPCPort Binding port of gRPC services. SW_RECEIVER_GRPC_PORT Not Activated   - - gRPCThreadPoolSize Pool size of gRPC server. SW_RECEIVER_GRPC_THREAD_POOL_SIZE CPU core * 4   - - gRPCThreadPoolQueueSize Queue size of gRPC server. SW_RECEIVER_GRPC_POOL_QUEUE_SIZE 10000   - - gRPCSslEnabled Activates SSL for gRPC services. SW_RECEIVER_GRPC_SSL_ENABLED false   - - gRPCSslKeyPath File path of gRPC SSL key. SW_RECEIVER_GRPC_SSL_KEY_PATH -   - - gRPCSslCertChainPath File path of gRPC SSL cert chain. SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH -   - - maxConcurrentCallsPerConnection The maximum number of concurrent calls permitted for each incoming connection. Defaults to no limit. SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL -   - - authentication The token text for authentication. Works for gRPC connection only. Once this is set, the client is required to use the same token. SW_AUTHENTICATION -   log-analyzer default Log Analyzer. SW_LOG_ANALYZER default    - - lalFiles The LAL configuration file names (without file extension) to be activated. Read LAL for more details. SW_LOG_LAL_FILES default   - - malFiles The MAL configuration file names (without file extension) to be activated. Read LAL for more details. SW_LOG_MAL_FILES \u0026quot;\u0026quot;   event-analyzer default Event Analyzer. SW_EVENT_ANALYZER default    receiver-register default gRPC and HTTPRestful services that provide service, service instance and endpoint register. - -    receiver-trace default gRPC and HTTPRestful services that accept SkyWalking format traces. - -    receiver-jvm default gRPC services that accept JVM metrics data. - -    receiver-clr default gRPC services that accept .Net CLR metrics data. - -    receiver-profile default gRPC services that accept profile task status and snapshot reporter. - -    receiver-zabbix default TCP receiver accepts Zabbix format metrics. - -    - - port Exported TCP port. Zabbix agent could connect and transport data. SW_RECEIVER_ZABBIX_PORT 10051   - - host Binds to host. SW_RECEIVER_ZABBIX_HOST 0.0.0.0   - - activeFiles Enables config when agent request is received. SW_RECEIVER_ZABBIX_ACTIVE_FILES agent   service-mesh default gRPC services that accept data from inbound mesh probes. - -    envoy-metric default Envoy metrics_service and ALS(access log service) are supported by this receiver. The OAL script supports all GAUGE type metrics. - -    - - acceptMetricsService Starts Envoy Metrics Service analysis. SW_ENVOY_METRIC_SERVICE true   - - alsHTTPAnalysis Starts Envoy HTTP Access Log Service analysis. Value = k8s-mesh means starting the analysis. SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS -   - - alsTCPAnalysis Starts Envoy TCP Access Log Service analysis. Value = k8s-mesh means starting the analysis. SW_ENVOY_METRIC_ALS_TCP_ANALYSIS -   - - k8sServiceNameRule k8sServiceNameRule allows you to customize the service name in ALS via Kubernetes metadata. The available variables are pod and service. E.g. you can use ${service.metadata.name}-${pod.metadata.labels.version} to append the version number to the service name. Note that when using environment variables to pass this configuration, use single quotes('') to avoid being evaluated by the shell. -    receiver-otel default A receiver for analyzing metrics data from OpenTelemetry. - -    - - enabledHandlers Enabled handlers for otel. SW_OTEL_RECEIVER_ENABLED_HANDLERS -   - - enabledOtelRules Enabled metric rules for OC handler. SW_OTEL_RECEIVER_ENABLED_OTEL_RULES -   receiver-zipkin default A receiver for Zipkin traces. - -    - - sampleRate The sample rate precision is 1/10000, should be between 0 and 10000 SW_ZIPKIN_SAMPLE_RATE 10000   - - searchableTracesTags Defines a set of span tag keys which are searchable. Multiple values are separated by commas. SW_ZIPKIN_SEARCHABLE_TAG_KEYS http.method   - - enableHttpCollector Enable Http Collector. SW_ZIPKIN_HTTP_COLLECTOR_ENABLED true   - - restHost Binding IP of RESTful services. SW_RECEIVER_ZIPKIN_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_RECEIVER_ZIPKIN_REST_PORT 9411   - - restContextPath Web context path of RESTful services. SW_RECEIVER_ZIPKIN_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_RECEIVER_ZIPKIN_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_RECEIVER_ZIPKIN_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_RECEIVER_ZIPKIN_REST_QUEUE_SIZE 0   - - enableKafkaCollector Enable Kafka Collector. SW_ZIPKIN_KAFKA_COLLECTOR_ENABLED false   - - kafkaBootstrapServers Kafka ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG. SW_ZIPKIN_KAFKA_SERVERS localhost:9092   - - kafkaGroupId Kafka ConsumerConfig.GROUP_ID_CONFIG. SW_ZIPKIN_KAFKA_GROUP_ID zipkin   - - kafkaTopic Kafka Topics. SW_ZIPKIN_KAFKA_TOPIC zipkin   - - kafkaConsumerConfig Kafka consumer config, JSON format as Properties. If it contains the same key with above, would override. SW_ZIPKIN_KAFKA_CONSUMER_CONFIG \u0026ldquo;{\u0026quot;auto.offset.reset\u0026quot;:\u0026quot;earliest\u0026quot;,\u0026quot;enable.auto.commit\u0026quot;:true}\u0026rdquo;   - - kafkaConsumers The number of consumers to create. SW_ZIPKIN_KAFKA_CONSUMERS 1   - - kafkaHandlerThreadPoolSize Pool size of Kafka message handler executor. SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_SIZE CPU core * 2   - - kafkaHandlerThreadPoolQueueSize Queue size of Kafka message handler executor. SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE 10000   prometheus-fetcher default Prometheus fetcher reads metrics from Prometheus endpoint, and transfer the metrics into SkyWalking native format for the MAL engine. - -    - - enabledRules Enabled rules. SW_PROMETHEUS_FETCHER_ENABLED_RULES self   - - maxConvertWorker The maximize meter convert worker. SW_PROMETHEUS_FETCHER_NUM_CONVERT_WORKER -1(by default, half the number of CPU core(s))   kafka-fetcher default Read SkyWalking\u0026rsquo;s native metrics/logs/traces through Kafka server. - -    - - bootstrapServers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_KAFKA_FETCHER_SERVERS localhost:9092   - - namespace Namespace aims to isolate multi OAP cluster when using the same Kafka cluster. If you set a namespace for Kafka fetcher, OAP will add a prefix to topic name. You should also set namespace in agent.config. The property is named plugin.kafka.namespace. SW_NAMESPACE -   - - groupId A unique string that identifies the consumer group to which this consumer belongs. - skywalking-consumer   - - createTopicIfNotExist If true, this creates Kafka topic (if it does not already exist). - true   - - partitions The number of partitions for the topic being created. SW_KAFKA_FETCHER_PARTITIONS 3   - - consumers The number of consumers to create. SW_KAFKA_FETCHER_CONSUMERS 1   - - enableNativeProtoLog Enables fetching and handling native proto log data. SW_KAFKA_FETCHER_ENABLE_NATIVE_PROTO_LOG true   - - enableNativeJsonLog Enables fetching and handling native json log data. SW_KAFKA_FETCHER_ENABLE_NATIVE_JSON_LOG true   - - replicationFactor The replication factor for each partition in the topic being created. SW_KAFKA_FETCHER_PARTITIONS_FACTOR 2   - - kafkaHandlerThreadPoolSize Pool size of Kafka message handler executor. SW_KAFKA_HANDLER_THREAD_POOL_SIZE CPU core * 2   - - kafkaHandlerThreadPoolQueueSize Queue size of Kafka message handler executor. SW_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE 10000   - - topicNameOfMeters Kafka topic name for meter system data. - skywalking-meters   - - topicNameOfMetrics Kafka topic name for JVM metrics data. - skywalking-metrics   - - topicNameOfProfiling Kafka topic name for profiling data. - skywalking-profilings   - - topicNameOfTracingSegments Kafka topic name for tracing data. - skywalking-segments   - - topicNameOfManagements Kafka topic name for service instance reporting and registration. - skywalking-managements   - - topicNameOfLogs Kafka topic name for native proto log data. - skywalking-logs   - - topicNameOfJsonLogs Kafka topic name for native json log data. - skywalking-logs-json   receiver-browser default gRPC services that accept browser performance data and error log. - - -   - - sampleRate Sampling rate for receiving trace. Precise to 1/10000. 10000 means sampling rate of 100% by default. SW_RECEIVER_BROWSER_SAMPLE_RATE 10000   query graphql - GraphQL query implementation. -    - - enableLogTestTool Enable the log testing API to test the LAL. NOTE: This API evaluates untrusted code on the OAP server. A malicious script can do significant damage (steal keys and secrets, remove files and directories, install malware, etc). As such, please enable this API only when you completely trust your users. SW_QUERY_GRAPHQL_ENABLE_LOG_TEST_TOOL false   - - maxQueryComplexity Maximum complexity allowed for the GraphQL query that can be used to abort a query if the total number of data fields queried exceeds the defined threshold. SW_QUERY_MAX_QUERY_COMPLEXITY 1000   - - enableUpdateUITemplate Allow user add,disable and update UI template. SW_ENABLE_UPDATE_UI_TEMPLATE false   - - enableOnDemandPodLog Ondemand Pod log: fetch the Pod logs on users' demand, the logs are fetched and displayed in real time, and are not persisted in any kind. This is helpful when users want to do some experiments and monitor the logs and see what\u0026rsquo;s happing inside the service. Note: if you print secrets in the logs, they are also visible to the UI, so for the sake of security, this feature is disabled by default, please set this configuration to enable the feature manually. SW_ENABLE_ON_DEMAND_POD_LOG false   query graphql - GraphQL query implementation. -    - - restHost Binding IP of RESTful services. SW_QUERY_ZIPKIN_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_QUERY_ZIPKIN_REST_PORT 9412   - - restContextPath Web context path of RESTful services. SW_QUERY_ZIPKIN_REST_CONTEXT_PATH zipkin   - - restMaxThreads Maximum thread number of RESTful services. SW_QUERY_ZIPKIN_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_QUERY_ZIPKIN_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_QUERY_ZIPKIN_REST_QUEUE_SIZE 0   - - lookback Default look back for traces and autocompleteTags, 1 day in millis SW_QUERY_ZIPKIN_LOOKBACK 86400000   - - namesMaxAge The Cache-Control max-age (seconds) for serviceNames, remoteServiceNames and spanNames SW_QUERY_ZIPKIN_NAMES_MAX_AGE 300   - - uiQueryLimit Default traces query max size SW_QUERY_ZIPKIN_UI_QUERY_LIMIT 10   - - uiDefaultLookback Default look back on the UI for search traces, 15 minutes in millis SW_QUERY_ZIPKIN_UI_DEFAULT_LOOKBACK 900000   alarm default - Read alarm doc for more details. -    telemetry - - Read telemetry doc for more details. -    - none - No op implementation. -    - prometheus host Binding host for Prometheus server fetching data. SW_TELEMETRY_PROMETHEUS_HOST 0.0.0.0   - - port Binding port for Prometheus server fetching data. SW_TELEMETRY_PROMETHEUS_PORT 1234   configuration - - Read dynamic configuration doc for more details. -    - grpc host DCS server binding hostname. SW_DCS_SERVER_HOST -   - - port DCS server binding port. SW_DCS_SERVER_PORT 80   - - clusterName Cluster name when reading the latest configuration from DSC server. SW_DCS_CLUSTER_NAME SkyWalking   - - period The period of reading data from DSC server by the OAP (in seconds). SW_DCS_PERIOD 20   - apollo apolloMeta apollo.meta in Apollo. SW_CONFIG_APOLLO http://localhost:8080   - - apolloCluster apollo.cluster in Apollo. SW_CONFIG_APOLLO_CLUSTER default   - - apolloEnv env in Apollo. SW_CONFIG_APOLLO_ENV -   - - appId app.id in Apollo. SW_CONFIG_APOLLO_APP_ID skywalking   - - period The period of data sync (in seconds). SW_CONFIG_APOLLO_PERIOD 60   - zookeeper namespace The namespace (represented by root path) that isolates the configurations in the Zookeeper. SW_CONFIG_ZK_NAMESPACE /, root path   - - hostPort Hosts and ports of Zookeeper Cluster. SW_CONFIG_ZK_HOST_PORT localhost:2181   - - baseSleepTimeMs The period of Zookeeper client between two retries (in milliseconds). SW_CONFIG_ZK_BASE_SLEEP_TIME_MS 1000   - - maxRetries The maximum retry time. SW_CONFIG_ZK_MAX_RETRIES 3   - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - etcd endpoints Hosts and ports for etcd cluster (separated by commas if multiple). SW_CONFIG_ETCD_ENDPOINTS http://localhost:2379   - - namespace Namespace for SkyWalking cluster. SW_CONFIG_ETCD_NAMESPACE /skywalking   - - authentication Indicates whether there is authentication. SW_CONFIG_ETCD_AUTHENTICATION false   - - user Etcd auth username. SW_CONFIG_ETCD_USER    - - password Etcd auth password. SW_CONFIG_ETCD_PASSWORD    - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - consul hostPort Hosts and ports for Consul cluster. SW_CONFIG_CONSUL_HOST_AND_PORTS localhost:8500   - - aclToken ACL Token of Consul. Empty string means without ACL token. SW_CONFIG_CONSUL_ACL_TOKEN -   - - period The period of data sync (in seconds). SW_CONFIG_CONSUL_PERIOD 60   - k8s-configmap namespace Deployment namespace of the config map. SW_CLUSTER_K8S_NAMESPACE default   - - labelSelector Labels for locating configmap. SW_CLUSTER_K8S_LABEL app=collector,release=skywalking   - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - nacos serverAddr Nacos Server Host. SW_CONFIG_NACOS_SERVER_ADDR 127.0.0.1   - - port Nacos Server Port. SW_CONFIG_NACOS_SERVER_PORT 8848   - - group Nacos Configuration namespace. SW_CONFIG_NACOS_SERVER_NAMESPACE -   - - period The period of data sync (in seconds). SW_CONFIG_CONFIG_NACOS_PERIOD 60   - - username Nacos Auth username. SW_CONFIG_NACOS_USERNAME -   - - password Nacos Auth password. SW_CONFIG_NACOS_PASSWORD -   - - accessKey Nacos Auth accessKey. SW_CONFIG_NACOS_ACCESSKEY -   - - secretKey Nacos Auth secretKey. SW_CONFIG_NACOS_SECRETKEY -   exporter grpc targetHost The host of target gRPC server for receiving export data. SW_EXPORTER_GRPC_HOST 127.0.0.1   - - targetPort The port of target gRPC server for receiving export data. SW_EXPORTER_GRPC_PORT 9870   health-checker default checkIntervalSeconds The period of checking OAP internal health status (in seconds). SW_HEALTH_CHECKER_INTERVAL_SECONDS 5   configuration-discovery default disableMessageDigest If true, agent receives the latest configuration every time, even without making any changes. By default, OAP uses the SHA512 message digest mechanism to detect changes in configuration. SW_DISABLE_MESSAGE_DIGEST false   receiver-event default gRPC services that handle events data. - -     Note ¹ System Environment Variable name could be declared and changed in application.yml. The names listed here are simply provided in the default application.yml file.\n","excerpt":"Configuration Vocabulary The Configuration Vocabulary lists all available configurations provided by …","ref":"/docs/main/v9.2.0/en/setup/backend/configuration-vocabulary/","title":"Configuration Vocabulary"},{"body":"Configuration Vocabulary The Configuration Vocabulary lists all available configurations provided by application.yml.\n   Module Provider Settings Value(s) and Explanation System Environment Variable¹ Default     core default role Option values: Mixed/Receiver/Aggregator. Receiver mode OAP opens the service to the agents, then analyzes and aggregates the results, and forwards the results for distributed aggregation. Aggregator mode OAP receives data from Mixer and Receiver role OAP nodes, and performs 2nd level aggregation. Mixer means both Receiver and Aggregator. SW_CORE_ROLE Mixed   - - restHost Binding IP of RESTful services. Services include GraphQL query and HTTP data report. SW_CORE_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_CORE_REST_PORT 12800   - - restContextPath Web context path of RESTful services. SW_CORE_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_CORE_REST_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_CORE_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize ServerSocketChannel Backlog of RESTful services. SW_CORE_REST_QUEUE_SIZE 0   - - httpMaxRequestHeaderSize Maximum request header size accepted. SW_CORE_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - gRPCHost Binding IP of gRPC services, including gRPC data report and internal communication among OAP nodes. SW_CORE_GRPC_HOST 0.0.0.0   - - gRPCPort Binding port of gRPC services. SW_CORE_GRPC_PORT 11800   - - gRPCSslEnabled Activates SSL for gRPC services. SW_CORE_GRPC_SSL_ENABLED false   - - gRPCSslKeyPath File path of gRPC SSL key. SW_CORE_GRPC_SSL_KEY_PATH -   - - gRPCSslCertChainPath File path of gRPC SSL cert chain. SW_CORE_GRPC_SSL_CERT_CHAIN_PATH -   - - gRPCSslTrustedCAPath File path of gRPC trusted CA. SW_CORE_GRPC_SSL_TRUSTED_CA_PATH -   - - downsampling Activated level of down sampling aggregation.  Hour,Day   - - persistentPeriod Execution period of the persistent timer (in seconds).  25   - - enableDataKeeperExecutor Controller of TTL scheduler. Once disabled, TTL wouldn\u0026rsquo;t work. SW_CORE_ENABLE_DATA_KEEPER_EXECUTOR true   - - dataKeeperExecutePeriod Execution period of TTL scheduler (in minutes). Execution doesn\u0026rsquo;t mean deleting data. The storage provider (e.g. ElasticSearch storage) could override this. SW_CORE_DATA_KEEPER_EXECUTE_PERIOD 5   - - recordDataTTL The lifecycle of record data (in days). Record data includes traces, top N sample records, and logs. Minimum value is 2. SW_CORE_RECORD_DATA_TTL 3   - - metricsDataTTL The lifecycle of metrics data (in days), including metadata. We recommend setting metricsDataTTL \u0026gt;= recordDataTTL. Minimum value is 2. SW_CORE_METRICS_DATA_TTL 7   - - l1FlushPeriod The period of L1 aggregation flush to L2 aggregation (in milliseconds). SW_CORE_L1_AGGREGATION_FLUSH_PERIOD 500   - - storageSessionTimeout The threshold of session time (in milliseconds). Default value is 70000. SW_CORE_STORAGE_SESSION_TIMEOUT 70000   - - persistentPeriod The period of doing data persistence. Unit is second.Default value is 25s SW_CORE_PERSISTENT_PERIOD 25   - - topNReportPeriod The execution period (in minutes) of top N sampler, which saves sampled data into the storage. SW_CORE_TOPN_REPORT_PERIOD 10   - - activeExtraModelColumns Appends entity names (e.g. service names) into metrics storage entities. SW_CORE_ACTIVE_EXTRA_MODEL_COLUMNS false   - - serviceNameMaxLength Maximum length limit of service names. SW_SERVICE_NAME_MAX_LENGTH 70   - - instanceNameMaxLength Maximum length limit of service instance names. The maximum length of service + instance names should be less than 200. SW_INSTANCE_NAME_MAX_LENGTH 70   - - endpointNameMaxLength Maximum length limit of endpoint names. The maximum length of service + endpoint names should be less than 240. SW_ENDPOINT_NAME_MAX_LENGTH 150   - - searchableTracesTags Defines a set of span tag keys which are searchable through GraphQL. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_SEARCHABLE_TAG_KEYS http.method,http.status_code,rpc.status_code,db.type,db.instance,mq.queue,mq.topic,mq.broker   - - searchableLogsTags Defines a set of log tag keys which are searchable through GraphQL. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_SEARCHABLE_LOGS_TAG_KEYS level   - - searchableAlarmTags Defines a set of alarm tag keys which are searchable through GraphQL. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_SEARCHABLE_ALARM_TAG_KEYS level   - - autocompleteTagKeysQueryMaxSize The max size of tags keys for autocomplete select. SW_AUTOCOMPLETE_TAG_KEYS_QUERY_MAX_SIZE 100   - - autocompleteTagValuesQueryMaxSize The max size of tags values for autocomplete select. SW_AUTOCOMPLETE_TAG_VALUES_QUERY_MAX_SIZE 100   - - gRPCThreadPoolSize Pool size of gRPC server. SW_CORE_GRPC_THREAD_POOL_SIZE CPU core * 4   - - gRPCThreadPoolQueueSize Queue size of gRPC server. SW_CORE_GRPC_POOL_QUEUE_SIZE 10000   - - maxConcurrentCallsPerConnection The maximum number of concurrent calls permitted for each incoming connection. Defaults to no limit. SW_CORE_GRPC_MAX_CONCURRENT_CALL -   - - maxMessageSize Sets the maximum message size allowed to be received on the server. Empty means 4 MiB. SW_CORE_GRPC_MAX_MESSAGE_SIZE 4M(based on Netty)   - - remoteTimeout Timeout for cluster internal communication (in seconds). - 20   - - maxSizeOfNetworkAddressAlias The maximum size of network address detected in the system being monitored. - 1_000_000   - - maxPageSizeOfQueryProfileSnapshot The maximum size for snapshot analysis in an OAP query. - 500   - - maxSizeOfAnalyzeProfileSnapshot The maximum number of snapshots analyzed by the OAP. - 12000   - - prepareThreads The number of threads used to prepare metrics data to the storage. SW_CORE_PREPARE_THREADS 2   - - enableEndpointNameGroupingByOpenapi Automatically groups endpoints by the given OpenAPI definitions. SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI true   - - maxDurationOfQueryEBPFProfilingData The maximum duration(in second) of query the eBPF profiling data from database. - 30   - - maxThreadCountOfQueryEBPFProfilingData The maximum thread count of query the eBPF profiling data from database. - System CPU core size   cluster standalone - Standalone is not suitable for running on a single node running. No configuration available. - -   - zookeeper namespace The namespace, represented by root path, isolates the configurations in Zookeeper. SW_NAMESPACE /, root path   - - hostPort Hosts and ports of Zookeeper Cluster. SW_CLUSTER_ZK_HOST_PORT localhost:2181   - - baseSleepTimeMs The period of Zookeeper client between two retries (in milliseconds). SW_CLUSTER_ZK_SLEEP_TIME 1000   - - maxRetries The maximum retry time. SW_CLUSTER_ZK_MAX_RETRIES 3   - - enableACL Opens ACL using schema and expression. SW_ZK_ENABLE_ACL false   - - schema Schema for the authorization. SW_ZK_SCHEMA digest   - - expression Expression for the authorization. SW_ZK_EXPRESSION skywalking:skywalking   - - internalComHost The hostname registered in Zookeeper for the internal communication of OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Zookeeper for the internal communication of OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - kubernetes namespace Namespace deployed by SkyWalking in k8s. SW_CLUSTER_K8S_NAMESPACE default   - - labelSelector Labels used for filtering OAP deployment in k8s. SW_CLUSTER_K8S_LABEL app=collector,release=skywalking   - - uidEnvName Environment variable name for reading uid. SW_CLUSTER_K8S_UID SKYWALKING_COLLECTOR_UID   - consul serviceName Service name for SkyWalking cluster. SW_SERVICE_NAME SkyWalking_OAP_Cluster   - - hostPort Hosts and ports for Consul cluster. SW_CLUSTER_CONSUL_HOST_PORT localhost:8500   - - aclToken ACL Token of Consul. Empty string means without ALC token. SW_CLUSTER_CONSUL_ACLTOKEN -   - - internalComHost The hostname registered in Consul for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Consul for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - etcd serviceName Service name for SkyWalking cluster. SW_CLUSTER_ETCD_SERVICE_NAME SkyWalking_OAP_Cluster   - - endpoints Hosts and ports for etcd cluster. SW_CLUSTER_ETCD_ENDPOINTS localhost:2379   - - namespace Namespace for SkyWalking cluster. SW_CLUSTER_ETCD_NAMESPACE /skywalking   - - authentication Indicates whether there is authentication. SW_CLUSTER_ETCD_AUTHENTICATION false   - - user Etcd auth username. SW_CLUSTER_ETCD_USER    - - password Etcd auth password. SW_CLUSTER_ETCD_PASSWORD    - - internalComHost The hostname registered in etcd for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in etcd for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - Nacos serviceName Service name for SkyWalking cluster. SW_SERVICE_NAME SkyWalking_OAP_Cluster   - - hostPort Hosts and ports for Nacos cluster. SW_CLUSTER_NACOS_HOST_PORT localhost:8848   - - namespace Namespace used by SkyWalking node coordination. SW_CLUSTER_NACOS_NAMESPACE public   - - internalComHost The hostname registered in Nacos for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Nacos for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - - username Nacos Auth username. SW_CLUSTER_NACOS_USERNAME -   - - password Nacos Auth password. SW_CLUSTER_NACOS_PASSWORD -   - - accessKey Nacos Auth accessKey. SW_CLUSTER_NACOS_ACCESSKEY -   - - secretKey Nacos Auth secretKey. SW_CLUSTER_NACOS_SECRETKEY -   storage elasticsearch - ElasticSearch (and OpenSearch) storage implementation. - -   - - namespace Prefix of indexes created and used by SkyWalking. SW_NAMESPACE -   - - clusterNodes ElasticSearch cluster nodes for client connection. SW_STORAGE_ES_CLUSTER_NODES localhost   - - protocol HTTP or HTTPs. SW_STORAGE_ES_HTTP_PROTOCOL HTTP   - - connectTimeout Connect timeout of ElasticSearch client (in milliseconds). SW_STORAGE_ES_CONNECT_TIMEOUT 3000   - - socketTimeout Socket timeout of ElasticSearch client (in milliseconds). SW_STORAGE_ES_SOCKET_TIMEOUT 30000   - - responseTimeout Response timeout of ElasticSearch client (in milliseconds), 0 disables the timeout. SW_STORAGE_ES_RESPONSE_TIMEOUT 1500   - - numHttpClientThread The number of threads for the underlying HTTP client to perform socket I/O. If the value is \u0026lt;= 0, the number of available processors will be used. SW_STORAGE_ES_NUM_HTTP_CLIENT_THREAD 0   - - user Username of ElasticSearch cluster. SW_ES_USER -   - - password Password of ElasticSearch cluster. SW_ES_PASSWORD -   - - trustStorePath Trust JKS file path. Only works when username and password are enabled. SW_STORAGE_ES_SSL_JKS_PATH -   - - trustStorePass Trust JKS file password. Only works when username and password are enabled. SW_STORAGE_ES_SSL_JKS_PASS -   - - secretsManagementFile Secrets management file in the properties format, including username and password, which are managed by a 3rd party tool. Capable of being updated them at runtime. SW_ES_SECRETS_MANAGEMENT_FILE -   - - dayStep Represents the number of days in the one-minute/hour/day index. SW_STORAGE_DAY_STEP 1   - - indexShardsNumber Shard number of new indexes. SW_STORAGE_ES_INDEX_SHARDS_NUMBER 1   - - indexReplicasNumber Replicas number of new indexes. SW_STORAGE_ES_INDEX_REPLICAS_NUMBER 0   - - specificIndexSettings Specify the settings for each index individually. If configured, this setting has the highest priority and overrides the generic settings. SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS -   - - superDatasetDayStep Represents the number of days in the super size dataset record index. Default value is the same as dayStep when the value is less than 0. SW_STORAGE_ES_SUPER_DATASET_DAY_STEP -1   - - superDatasetIndexShardsFactor Super dataset is defined in the code (e.g. trace segments). This factor provides more shards for the super dataset: shards number = indexShardsNumber * superDatasetIndexShardsFactor. This factor also affects Zipkin and Jaeger traces. SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR 5   - - superDatasetIndexReplicasNumber Represents the replicas number in the super size dataset record index. SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER 0   - - indexTemplateOrder The order of index template. SW_STORAGE_ES_INDEX_TEMPLATE_ORDER 0   - - bulkActions Async bulk size of the record data batch execution. SW_STORAGE_ES_BULK_ACTIONS 5000   - - flushInterval Period of flush (in seconds). Does not matter whether bulkActions is reached or not. INT(flushInterval * 2/3) is used for index refresh period. SW_STORAGE_ES_FLUSH_INTERVAL 15 (index refresh period = 10)   - - concurrentRequests The number of concurrent requests allowed to be executed. SW_STORAGE_ES_CONCURRENT_REQUESTS 2   - - resultWindowMaxSize The maximum size of dataset when the OAP loads cache, such as network aliases. SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE 10000   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_ES_QUERY_MAX_SIZE 10000   - - scrollingBatchSize The batch size of metadata per iteration when metadataQueryMaxSize or resultWindowMaxSize is too large to be retrieved in a single query. SW_STORAGE_ES_SCROLLING_BATCH_SIZE 5000   - - segmentQueryMaxSize The maximum size of trace segments per query. SW_STORAGE_ES_QUERY_SEGMENT_SIZE 200   - - profileTaskQueryMaxSize The maximum size of profile task per query. SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE 200   - - profileDataQueryScrollBatchSize The batch size of query profiling data. SW_STORAGE_ES_QUERY_PROFILE_DATA_BATCH_SIZE 100   - - advanced All settings of ElasticSearch index creation. The value should be in JSON format. SW_STORAGE_ES_ADVANCED -   - - logicSharding Shard metrics and records indices into multi-physical indices, one index template per metric/meter aggregation function or record. SW_STORAGE_ES_LOGIC_SHARDING false   - h2 - H2 storage is designed for demonstration and running in short term (i.e. 1-2 hours) only. - -   - - url H2 connection URL. Defaults to H2 memory mode. SW_STORAGE_H2_URL jdbc:h2:mem:skywalking-oap-db   - - user Username of H2 database. SW_STORAGE_H2_USER sa   - - password Password of H2 database. - -   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_H2_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 100   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 1   - mysql - MySQL Storage. The MySQL JDBC Driver is not in the dist. Please copy it into the oap-lib folder manually. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - mysql-sharding - Sharding-Proxy for MySQL properties. The MySQL JDBC Driver is not in the dist. Please copy it into the oap-lib folder manually. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - - dataSources The dataSources are configured in ShardingSphere-Proxy config-sharding.yaml.The dataSource name should include the prefix \u0026ldquo;ds_\u0026rdquo; and separated by \u0026ldquo;,\u0026rdquo; and start from ds_0 SW_JDBC_SHARDING_DATA_SOURCES ds_0,ds_1   - postgresql - PostgreSQL storage. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - banyandb - BanyanDB storage. - -   - - host Host of the BanyanDB. SW_STORAGE_BANYANDB_HOST 127.0.0.1   - - port Port of the BanyanDB. SW_STORAGE_BANYANDB_PORT 17912   - - maxBulkSize The maximum size of write entities in a single batch write call. SW_STORAGE_BANYANDB_MAX_BULK_SIZE 5000   - - flushInterval Period of flush interval. In the timeunit of seconds. SW_STORAGE_BANYANDB_FLUSH_INTERVAL 15   - - metricsShardsNumber Shards Number for measure/metrics. SW_STORAGE_BANYANDB_METRICS_SHARDS_NUMBER 1   - - recordShardsNumber Shards Number for a normal record. SW_STORAGE_BANYANDB_RECORD_SHARDS_NUMBER 1   - - superDatasetShardsFactor Shards Factor for a super dataset record, i.e. Shard number of a super dataset is recordShardsNumber*superDatasetShardsFactor. SW_STORAGE_BANYANDB_SUPERDATASET_SHARDS_FACTOR 2   - - concurrentWriteThreads Concurrent consumer threads for batch writing. SW_STORAGE_BANYANDB_CONCURRENT_WRITE_THREADS 15   - - profileTaskQueryMaxSize Max size of ProfileTask to be fetched. SW_STORAGE_BANYANDB_PROFILE_TASK_QUERY_MAX_SIZE 200   agent-analyzer default Agent Analyzer. SW_AGENT_ANALYZER default    - - traceSamplingPolicySettingsFile The sampling policy including sampling rate and the threshold of trace segment latency can be configured by the traceSamplingPolicySettingsFile file. SW_TRACE_SAMPLING_POLICY_SETTINGS_FILE trace-sampling-policy-settings.yml   - - slowDBAccessThreshold The slow database access threshold (in milliseconds). SW_SLOW_DB_THRESHOLD default:200,mongodb:100   - - forceSampleErrorSegment When sampling mechanism is activated, this config samples the error status segment and ignores the sampling rate. SW_FORCE_SAMPLE_ERROR_SEGMENT true   - - segmentStatusAnalysisStrategy Determines the final segment status from span status. Available values are FROM_SPAN_STATUS , FROM_ENTRY_SPAN, and FROM_FIRST_SPAN. FROM_SPAN_STATUS indicates that the segment status would be error if any span has an error status. FROM_ENTRY_SPAN means that the segment status would only be determined by the status of entry spans. FROM_FIRST_SPAN means that the segment status would only be determined by the status of the first span. SW_SEGMENT_STATUS_ANALYSIS_STRATEGY FROM_SPAN_STATUS   - - noUpstreamRealAddressAgents Exit spans with the component in the list would not generate client-side instance relation metrics, since some tracing plugins (e.g. Nginx-LUA and Envoy) can\u0026rsquo;t collect the real peer IP address. SW_NO_UPSTREAM_REAL_ADDRESS 6000,9000   - - meterAnalyzerActiveFiles Indicates which files could be instrumented and analyzed. Multiple files are split by \u0026ldquo;,\u0026rdquo;. SW_METER_ANALYZER_ACTIVE_FILES    - - slowCacheWriteThreshold The threshold of slow command which is used for writing operation (in milliseconds). SW_SLOW_CACHE_WRITE_THRESHOLD default:20,redis:10   - - slowCacheReadThreshold The threshold of slow command which is used for reading (getting) operation (in milliseconds). SW_SLOW_CACHE_READ_THRESHOLD default:20,redis:10   receiver-sharing-server default Sharing server provides new gRPC and restful servers for data collection. Ana designates that servers in the core module are to be used for internal communication only. - -    - - restHost Binding IP of RESTful services. Services include GraphQL query and HTTP data report. SW_RECEIVER_SHARING_REST_HOST -   - - restPort Binding port of RESTful services. SW_RECEIVER_SHARING_REST_PORT -   - - restContextPath Web context path of RESTful services. SW_RECEIVER_SHARING_REST_CONTEXT_PATH -   - - restMaxThreads Maximum thread number of RESTful services. SW_RECEIVER_SHARING_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_RECEIVER_SHARING_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize ServerSocketChannel backlog of RESTful services. SW_RECEIVER_SHARING_REST_QUEUE_SIZE 0   - - httpMaxRequestHeaderSize Maximum request header size accepted. SW_RECEIVER_SHARING_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - gRPCHost Binding IP of gRPC services. Services include gRPC data report and internal communication among OAP nodes. SW_RECEIVER_GRPC_HOST 0.0.0.0. Not Activated   - - gRPCPort Binding port of gRPC services. SW_RECEIVER_GRPC_PORT Not Activated   - - gRPCThreadPoolSize Pool size of gRPC server. SW_RECEIVER_GRPC_THREAD_POOL_SIZE CPU core * 4   - - gRPCThreadPoolQueueSize Queue size of gRPC server. SW_RECEIVER_GRPC_POOL_QUEUE_SIZE 10000   - - gRPCSslEnabled Activates SSL for gRPC services. SW_RECEIVER_GRPC_SSL_ENABLED false   - - gRPCSslKeyPath File path of gRPC SSL key. SW_RECEIVER_GRPC_SSL_KEY_PATH -   - - gRPCSslCertChainPath File path of gRPC SSL cert chain. SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH -   - - maxConcurrentCallsPerConnection The maximum number of concurrent calls permitted for each incoming connection. Defaults to no limit. SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL -   - - authentication The token text for authentication. Works for gRPC connection only. Once this is set, the client is required to use the same token. SW_AUTHENTICATION -   log-analyzer default Log Analyzer. SW_LOG_ANALYZER default    - - lalFiles The LAL configuration file names (without file extension) to be activated. Read LAL for more details. SW_LOG_LAL_FILES default   - - malFiles The MAL configuration file names (without file extension) to be activated. Read LAL for more details. SW_LOG_MAL_FILES \u0026quot;\u0026quot;   event-analyzer default Event Analyzer. SW_EVENT_ANALYZER default    receiver-register default gRPC and HTTPRestful services that provide service, service instance and endpoint register. - -    receiver-trace default gRPC and HTTPRestful services that accept SkyWalking format traces. - -    receiver-jvm default gRPC services that accept JVM metrics data. - -    receiver-clr default gRPC services that accept .Net CLR metrics data. - -    receiver-profile default gRPC services that accept profile task status and snapshot reporter. - -    receiver-zabbix default TCP receiver accepts Zabbix format metrics. - -    - - port Exported TCP port. Zabbix agent could connect and transport data. SW_RECEIVER_ZABBIX_PORT 10051   - - host Binds to host. SW_RECEIVER_ZABBIX_HOST 0.0.0.0   - - activeFiles Enables config when agent request is received. SW_RECEIVER_ZABBIX_ACTIVE_FILES agent   service-mesh default gRPC services that accept data from inbound mesh probes. - -    envoy-metric default Envoy metrics_service and ALS(access log service) are supported by this receiver. The OAL script supports all GAUGE type metrics. - -    - - acceptMetricsService Starts Envoy Metrics Service analysis. SW_ENVOY_METRIC_SERVICE true   - - alsHTTPAnalysis Starts Envoy HTTP Access Log Service analysis. Value = k8s-mesh means starting the analysis. SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS -   - - alsTCPAnalysis Starts Envoy TCP Access Log Service analysis. Value = k8s-mesh means starting the analysis. SW_ENVOY_METRIC_ALS_TCP_ANALYSIS -   - - k8sServiceNameRule k8sServiceNameRule allows you to customize the service name in ALS via Kubernetes metadata. The available variables are pod and service. E.g. you can use ${service.metadata.name}-${pod.metadata.labels.version} to append the version number to the service name. Note that when using environment variables to pass this configuration, use single quotes('') to avoid being evaluated by the shell. -    receiver-otel default A receiver for analyzing metrics data from OpenTelemetry. - -    - - enabledHandlers Enabled handlers for otel. SW_OTEL_RECEIVER_ENABLED_HANDLERS -   - - enabledOtelRules Enabled metric rules for OC handler. SW_OTEL_RECEIVER_ENABLED_OTEL_RULES -   receiver-zipkin default A receiver for Zipkin traces. - -    - - sampleRate The sample rate precision is 1/10000, should be between 0 and 10000 SW_ZIPKIN_SAMPLE_RATE 10000   - - searchableTracesTags Defines a set of span tag keys which are searchable. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_ZIPKIN_SEARCHABLE_TAG_KEYS http.method   - - enableHttpCollector Enable Http Collector. SW_ZIPKIN_HTTP_COLLECTOR_ENABLED true   - - restHost Binding IP of RESTful services. SW_RECEIVER_ZIPKIN_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_RECEIVER_ZIPKIN_REST_PORT 9411   - - restContextPath Web context path of RESTful services. SW_RECEIVER_ZIPKIN_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_RECEIVER_ZIPKIN_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_RECEIVER_ZIPKIN_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_RECEIVER_ZIPKIN_REST_QUEUE_SIZE 0   - - enableKafkaCollector Enable Kafka Collector. SW_ZIPKIN_KAFKA_COLLECTOR_ENABLED false   - - kafkaBootstrapServers Kafka ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG. SW_ZIPKIN_KAFKA_SERVERS localhost:9092   - - kafkaGroupId Kafka ConsumerConfig.GROUP_ID_CONFIG. SW_ZIPKIN_KAFKA_GROUP_ID zipkin   - - kafkaTopic Kafka Topics. SW_ZIPKIN_KAFKA_TOPIC zipkin   - - kafkaConsumerConfig Kafka consumer config, JSON format as Properties. If it contains the same key with above, would override. SW_ZIPKIN_KAFKA_CONSUMER_CONFIG \u0026ldquo;{\u0026quot;auto.offset.reset\u0026quot;:\u0026quot;earliest\u0026quot;,\u0026quot;enable.auto.commit\u0026quot;:true}\u0026rdquo;   - - kafkaConsumers The number of consumers to create. SW_ZIPKIN_KAFKA_CONSUMERS 1   - - kafkaHandlerThreadPoolSize Pool size of Kafka message handler executor. SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_SIZE CPU core * 2   - - kafkaHandlerThreadPoolQueueSize Queue size of Kafka message handler executor. SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE 10000   kafka-fetcher default Read SkyWalking\u0026rsquo;s native metrics/logs/traces through Kafka server. - -    - - bootstrapServers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_KAFKA_FETCHER_SERVERS localhost:9092   - - namespace Namespace aims to isolate multi OAP cluster when using the same Kafka cluster. If you set a namespace for Kafka fetcher, OAP will add a prefix to topic name. You should also set namespace in agent.config. The property is named plugin.kafka.namespace. SW_NAMESPACE -   - - groupId A unique string that identifies the consumer group to which this consumer belongs. - skywalking-consumer   - - createTopicIfNotExist If true, this creates Kafka topic (if it does not already exist). - true   - - partitions The number of partitions for the topic being created. SW_KAFKA_FETCHER_PARTITIONS 3   - - consumers The number of consumers to create. SW_KAFKA_FETCHER_CONSUMERS 1   - - enableNativeProtoLog Enables fetching and handling native proto log data. SW_KAFKA_FETCHER_ENABLE_NATIVE_PROTO_LOG true   - - enableNativeJsonLog Enables fetching and handling native json log data. SW_KAFKA_FETCHER_ENABLE_NATIVE_JSON_LOG true   - - replicationFactor The replication factor for each partition in the topic being created. SW_KAFKA_FETCHER_PARTITIONS_FACTOR 2   - - kafkaHandlerThreadPoolSize Pool size of Kafka message handler executor. SW_KAFKA_HANDLER_THREAD_POOL_SIZE CPU core * 2   - - kafkaHandlerThreadPoolQueueSize Queue size of Kafka message handler executor. SW_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE 10000   - - topicNameOfMeters Kafka topic name for meter system data. - skywalking-meters   - - topicNameOfMetrics Kafka topic name for JVM metrics data. - skywalking-metrics   - - topicNameOfProfiling Kafka topic name for profiling data. - skywalking-profilings   - - topicNameOfTracingSegments Kafka topic name for tracing data. - skywalking-segments   - - topicNameOfManagements Kafka topic name for service instance reporting and registration. - skywalking-managements   - - topicNameOfLogs Kafka topic name for native proto log data. - skywalking-logs   - - topicNameOfJsonLogs Kafka topic name for native json log data. - skywalking-logs-json   receiver-browser default gRPC services that accept browser performance data and error log. - - -   - - sampleRate Sampling rate for receiving trace. Precise to 1/10000. 10000 means sampling rate of 100% by default. SW_RECEIVER_BROWSER_SAMPLE_RATE 10000   query graphql - GraphQL query implementation. -    - - enableLogTestTool Enable the log testing API to test the LAL. NOTE: This API evaluates untrusted code on the OAP server. A malicious script can do significant damage (steal keys and secrets, remove files and directories, install malware, etc). As such, please enable this API only when you completely trust your users. SW_QUERY_GRAPHQL_ENABLE_LOG_TEST_TOOL false   - - maxQueryComplexity Maximum complexity allowed for the GraphQL query that can be used to abort a query if the total number of data fields queried exceeds the defined threshold. SW_QUERY_MAX_QUERY_COMPLEXITY 1000   - - enableUpdateUITemplate Allow user add,disable and update UI template. SW_ENABLE_UPDATE_UI_TEMPLATE false   - - enableOnDemandPodLog Ondemand Pod log: fetch the Pod logs on users' demand, the logs are fetched and displayed in real time, and are not persisted in any kind. This is helpful when users want to do some experiments and monitor the logs and see what\u0026rsquo;s happing inside the service. Note: if you print secrets in the logs, they are also visible to the UI, so for the sake of security, this feature is disabled by default, please set this configuration to enable the feature manually. SW_ENABLE_ON_DEMAND_POD_LOG false   query graphql - GraphQL query implementation. -    - - restHost Binding IP of RESTful services. SW_QUERY_ZIPKIN_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_QUERY_ZIPKIN_REST_PORT 9412   - - restContextPath Web context path of RESTful services. SW_QUERY_ZIPKIN_REST_CONTEXT_PATH zipkin   - - restMaxThreads Maximum thread number of RESTful services. SW_QUERY_ZIPKIN_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_QUERY_ZIPKIN_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_QUERY_ZIPKIN_REST_QUEUE_SIZE 0   - - lookback Default look back for traces and autocompleteTags, 1 day in millis SW_QUERY_ZIPKIN_LOOKBACK 86400000   - - namesMaxAge The Cache-Control max-age (seconds) for serviceNames, remoteServiceNames and spanNames SW_QUERY_ZIPKIN_NAMES_MAX_AGE 300   - - uiQueryLimit Default traces query max size SW_QUERY_ZIPKIN_UI_QUERY_LIMIT 10   - - uiDefaultLookback Default look back on the UI for search traces, 15 minutes in millis SW_QUERY_ZIPKIN_UI_DEFAULT_LOOKBACK 900000   alarm default - Read alarm doc for more details. -    telemetry - - Read telemetry doc for more details. -    - none - No op implementation. -    - prometheus host Binding host for Prometheus server fetching data. SW_TELEMETRY_PROMETHEUS_HOST 0.0.0.0   - - port Binding port for Prometheus server fetching data. SW_TELEMETRY_PROMETHEUS_PORT 1234   configuration - - Read dynamic configuration doc for more details. -    - grpc host DCS server binding hostname. SW_DCS_SERVER_HOST -   - - port DCS server binding port. SW_DCS_SERVER_PORT 80   - - clusterName Cluster name when reading the latest configuration from DSC server. SW_DCS_CLUSTER_NAME SkyWalking   - - period The period of reading data from DSC server by the OAP (in seconds). SW_DCS_PERIOD 20   - apollo apolloMeta apollo.meta in Apollo. SW_CONFIG_APOLLO http://localhost:8080   - - apolloCluster apollo.cluster in Apollo. SW_CONFIG_APOLLO_CLUSTER default   - - apolloEnv env in Apollo. SW_CONFIG_APOLLO_ENV -   - - appId app.id in Apollo. SW_CONFIG_APOLLO_APP_ID skywalking   - - period The period of data sync (in seconds). SW_CONFIG_APOLLO_PERIOD 60   - zookeeper namespace The namespace (represented by root path) that isolates the configurations in the Zookeeper. SW_CONFIG_ZK_NAMESPACE /, root path   - - hostPort Hosts and ports of Zookeeper Cluster. SW_CONFIG_ZK_HOST_PORT localhost:2181   - - baseSleepTimeMs The period of Zookeeper client between two retries (in milliseconds). SW_CONFIG_ZK_BASE_SLEEP_TIME_MS 1000   - - maxRetries The maximum retry time. SW_CONFIG_ZK_MAX_RETRIES 3   - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - etcd endpoints Hosts and ports for etcd cluster (separated by commas if multiple). SW_CONFIG_ETCD_ENDPOINTS http://localhost:2379   - - namespace Namespace for SkyWalking cluster. SW_CONFIG_ETCD_NAMESPACE /skywalking   - - authentication Indicates whether there is authentication. SW_CONFIG_ETCD_AUTHENTICATION false   - - user Etcd auth username. SW_CONFIG_ETCD_USER    - - password Etcd auth password. SW_CONFIG_ETCD_PASSWORD    - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - consul hostPort Hosts and ports for Consul cluster. SW_CONFIG_CONSUL_HOST_AND_PORTS localhost:8500   - - aclToken ACL Token of Consul. Empty string means without ACL token. SW_CONFIG_CONSUL_ACL_TOKEN -   - - period The period of data sync (in seconds). SW_CONFIG_CONSUL_PERIOD 60   - k8s-configmap namespace Deployment namespace of the config map. SW_CLUSTER_K8S_NAMESPACE default   - - labelSelector Labels for locating configmap. SW_CLUSTER_K8S_LABEL app=collector,release=skywalking   - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - nacos serverAddr Nacos Server Host. SW_CONFIG_NACOS_SERVER_ADDR 127.0.0.1   - - port Nacos Server Port. SW_CONFIG_NACOS_SERVER_PORT 8848   - - group Nacos Configuration namespace. SW_CONFIG_NACOS_SERVER_NAMESPACE -   - - period The period of data sync (in seconds). SW_CONFIG_CONFIG_NACOS_PERIOD 60   - - username Nacos Auth username. SW_CONFIG_NACOS_USERNAME -   - - password Nacos Auth password. SW_CONFIG_NACOS_PASSWORD -   - - accessKey Nacos Auth accessKey. SW_CONFIG_NACOS_ACCESSKEY -   - - secretKey Nacos Auth secretKey. SW_CONFIG_NACOS_SECRETKEY -   exporter default enableGRPCMetrics Enable gRPC metrics exporter. SW_EXPORTER_ENABLE_GRPC_METRICS false   - - gRPCTargetHost The host of target gRPC server for receiving export data SW_EXPORTER_GRPC_HOST 127.0.0.1   - - gRPCTargetPort The port of target gRPC server for receiving export data. SW_EXPORTER_GRPC_PORT 9870   - - enableKafkaTrace Enable Kafka trace exporter. SW_EXPORTER_ENABLE_KAFKA_TRACE false   - - enableKafkaLog Enable Kafka log exporter. SW_EXPORTER_ENABLE_KAFKA_LOG false   - - kafkaBootstrapServers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_EXPORTER_KAFKA_SERVERS localhost:9092   - - kafkaProducerConfig Kafka producer config, JSON format as Properties. SW_EXPORTER_KAFKA_PRODUCER_CONFIG -   - - kafkaTopicTrace Kafka topic name for trace. SW_EXPORTER_KAFKA_TOPIC_TRACE skywalking-export-trace   - - kafkaTopicLog Kafka topic name for log. SW_EXPORTER_KAFKA_TOPIC_LOG skywalking-export-log   health-checker default checkIntervalSeconds The period of checking OAP internal health status (in seconds). SW_HEALTH_CHECKER_INTERVAL_SECONDS 5   configuration-discovery default disableMessageDigest If true, agent receives the latest configuration every time, even without making any changes. By default, OAP uses the SHA512 message digest mechanism to detect changes in configuration. SW_DISABLE_MESSAGE_DIGEST false   receiver-event default gRPC services that handle events data. - -     Note ¹ System Environment Variable name could be declared and changed in application.yml. The names listed here are simply provided in the default application.yml file.\n","excerpt":"Configuration Vocabulary The Configuration Vocabulary lists all available configurations provided by …","ref":"/docs/main/v9.3.0/en/setup/backend/configuration-vocabulary/","title":"Configuration Vocabulary"},{"body":"Configuration Vocabulary The Configuration Vocabulary lists all available configurations provided by application.yml.\n   Module Provider Settings Value(s) and Explanation System Environment Variable¹ Default     core default role Option values: Mixed/Receiver/Aggregator. Receiver mode OAP opens the service to the agents, then analyzes and aggregates the results, and forwards the results for distributed aggregation. Aggregator mode OAP receives data from Mixer and Receiver role OAP nodes, and performs 2nd level aggregation. Mixer means both Receiver and Aggregator. SW_CORE_ROLE Mixed   - - restHost Binding IP of RESTful services. Services include GraphQL query and HTTP data report. SW_CORE_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_CORE_REST_PORT 12800   - - restContextPath Web context path of RESTful services. SW_CORE_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_CORE_REST_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_CORE_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize ServerSocketChannel Backlog of RESTful services. SW_CORE_REST_QUEUE_SIZE 0   - - httpMaxRequestHeaderSize Maximum request header size accepted. SW_CORE_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - gRPCHost Binding IP of gRPC services, including gRPC data report and internal communication among OAP nodes. SW_CORE_GRPC_HOST 0.0.0.0   - - gRPCPort Binding port of gRPC services. SW_CORE_GRPC_PORT 11800   - - gRPCSslEnabled Activates SSL for gRPC services. SW_CORE_GRPC_SSL_ENABLED false   - - gRPCSslKeyPath File path of gRPC SSL key. SW_CORE_GRPC_SSL_KEY_PATH -   - - gRPCSslCertChainPath File path of gRPC SSL cert chain. SW_CORE_GRPC_SSL_CERT_CHAIN_PATH -   - - gRPCSslTrustedCAPath File path of gRPC trusted CA. SW_CORE_GRPC_SSL_TRUSTED_CA_PATH -   - - downsampling Activated level of down sampling aggregation.  Hour,Day   - - persistentPeriod Execution period of the persistent timer (in seconds).  25   - - enableDataKeeperExecutor Controller of TTL scheduler. Once disabled, TTL wouldn\u0026rsquo;t work. SW_CORE_ENABLE_DATA_KEEPER_EXECUTOR true   - - dataKeeperExecutePeriod Execution period of TTL scheduler (in minutes). Execution doesn\u0026rsquo;t mean deleting data. The storage provider (e.g. ElasticSearch storage) could override this. SW_CORE_DATA_KEEPER_EXECUTE_PERIOD 5   - - recordDataTTL The lifecycle of record data (in days). Record data includes traces, top N sample records, and logs. Minimum value is 2. SW_CORE_RECORD_DATA_TTL 3   - - metricsDataTTL The lifecycle of metrics data (in days), including metadata. We recommend setting metricsDataTTL \u0026gt;= recordDataTTL. Minimum value is 2. SW_CORE_METRICS_DATA_TTL 7   - - l1FlushPeriod The period of L1 aggregation flush to L2 aggregation (in milliseconds). SW_CORE_L1_AGGREGATION_FLUSH_PERIOD 500   - - storageSessionTimeout The threshold of session time (in milliseconds). Default value is 70000. SW_CORE_STORAGE_SESSION_TIMEOUT 70000   - - persistentPeriod The period of doing data persistence. Unit is second.Default value is 25s SW_CORE_PERSISTENT_PERIOD 25   - - topNReportPeriod The execution period (in minutes) of top N sampler, which saves sampled data into the storage. SW_CORE_TOPN_REPORT_PERIOD 10   - - activeExtraModelColumns Appends entity names (e.g. service names) into metrics storage entities. SW_CORE_ACTIVE_EXTRA_MODEL_COLUMNS false   - - serviceNameMaxLength Maximum length limit of service names. SW_SERVICE_NAME_MAX_LENGTH 70   - - instanceNameMaxLength Maximum length limit of service instance names. The maximum length of service + instance names should be less than 200. SW_INSTANCE_NAME_MAX_LENGTH 70   - - endpointNameMaxLength Maximum length limit of endpoint names. The maximum length of service + endpoint names should be less than 240. SW_ENDPOINT_NAME_MAX_LENGTH 150   - - searchableTracesTags Defines a set of span tag keys which are searchable through GraphQL. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_SEARCHABLE_TAG_KEYS http.method,http.status_code,rpc.status_code,db.type,db.instance,mq.queue,mq.topic,mq.broker   - - searchableLogsTags Defines a set of log tag keys which are searchable through GraphQL. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_SEARCHABLE_LOGS_TAG_KEYS level   - - searchableAlarmTags Defines a set of alarm tag keys which are searchable through GraphQL. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_SEARCHABLE_ALARM_TAG_KEYS level   - - autocompleteTagKeysQueryMaxSize The max size of tags keys for autocomplete select. SW_AUTOCOMPLETE_TAG_KEYS_QUERY_MAX_SIZE 100   - - autocompleteTagValuesQueryMaxSize The max size of tags values for autocomplete select. SW_AUTOCOMPLETE_TAG_VALUES_QUERY_MAX_SIZE 100   - - gRPCThreadPoolSize Pool size of gRPC server. SW_CORE_GRPC_THREAD_POOL_SIZE CPU core * 4   - - gRPCThreadPoolQueueSize Queue size of gRPC server. SW_CORE_GRPC_POOL_QUEUE_SIZE 10000   - - maxConcurrentCallsPerConnection The maximum number of concurrent calls permitted for each incoming connection. Defaults to no limit. SW_CORE_GRPC_MAX_CONCURRENT_CALL -   - - maxMessageSize Sets the maximum message size allowed to be received on the server. Empty means 4 MiB. SW_CORE_GRPC_MAX_MESSAGE_SIZE 4M(based on Netty)   - - remoteTimeout Timeout for cluster internal communication (in seconds). - 20   - - maxSizeOfNetworkAddressAlias The maximum size of network address detected in the system being monitored. - 1_000_000   - - maxPageSizeOfQueryProfileSnapshot The maximum size for snapshot analysis in an OAP query. - 500   - - maxSizeOfAnalyzeProfileSnapshot The maximum number of snapshots analyzed by the OAP. - 12000   - - prepareThreads The number of threads used to prepare metrics data to the storage. SW_CORE_PREPARE_THREADS 2   - - enableEndpointNameGroupingByOpenapi Automatically groups endpoints by the given OpenAPI definitions. SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI true   - - maxDurationOfQueryEBPFProfilingData The maximum duration(in second) of query the eBPF profiling data from database. - 30   - - maxThreadCountOfQueryEBPFProfilingData The maximum thread count of query the eBPF profiling data from database. - System CPU core size   cluster standalone - Standalone is not suitable for running on a single node running. No configuration available. - -   - zookeeper namespace The namespace, represented by root path, isolates the configurations in Zookeeper. SW_NAMESPACE /, root path   - - hostPort Hosts and ports of Zookeeper Cluster. SW_CLUSTER_ZK_HOST_PORT localhost:2181   - - baseSleepTimeMs The period of Zookeeper client between two retries (in milliseconds). SW_CLUSTER_ZK_SLEEP_TIME 1000   - - maxRetries The maximum retry time. SW_CLUSTER_ZK_MAX_RETRIES 3   - - enableACL Opens ACL using schema and expression. SW_ZK_ENABLE_ACL false   - - schema Schema for the authorization. SW_ZK_SCHEMA digest   - - expression Expression for the authorization. SW_ZK_EXPRESSION skywalking:skywalking   - - internalComHost The hostname registered in Zookeeper for the internal communication of OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Zookeeper for the internal communication of OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - kubernetes namespace Namespace deployed by SkyWalking in k8s. SW_CLUSTER_K8S_NAMESPACE default   - - labelSelector Labels used for filtering OAP deployment in k8s. SW_CLUSTER_K8S_LABEL app=collector,release=skywalking   - - uidEnvName Environment variable name for reading uid. SW_CLUSTER_K8S_UID SKYWALKING_COLLECTOR_UID   - consul serviceName Service name for SkyWalking cluster. SW_SERVICE_NAME SkyWalking_OAP_Cluster   - - hostPort Hosts and ports for Consul cluster. SW_CLUSTER_CONSUL_HOST_PORT localhost:8500   - - aclToken ACL Token of Consul. Empty string means without ALC token. SW_CLUSTER_CONSUL_ACLTOKEN -   - - internalComHost The hostname registered in Consul for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Consul for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - etcd serviceName Service name for SkyWalking cluster. SW_CLUSTER_ETCD_SERVICE_NAME SkyWalking_OAP_Cluster   - - endpoints Hosts and ports for etcd cluster. SW_CLUSTER_ETCD_ENDPOINTS localhost:2379   - - namespace Namespace for SkyWalking cluster. SW_CLUSTER_ETCD_NAMESPACE /skywalking   - - authentication Indicates whether there is authentication. SW_CLUSTER_ETCD_AUTHENTICATION false   - - user Etcd auth username. SW_CLUSTER_ETCD_USER    - - password Etcd auth password. SW_CLUSTER_ETCD_PASSWORD    - - internalComHost The hostname registered in etcd for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in etcd for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - Nacos serviceName Service name for SkyWalking cluster. SW_SERVICE_NAME SkyWalking_OAP_Cluster   - - hostPort Hosts and ports for Nacos cluster. SW_CLUSTER_NACOS_HOST_PORT localhost:8848   - - namespace Namespace used by SkyWalking node coordination. SW_CLUSTER_NACOS_NAMESPACE public   - - internalComHost The hostname registered in Nacos for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Nacos for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - - username Nacos Auth username. SW_CLUSTER_NACOS_USERNAME -   - - password Nacos Auth password. SW_CLUSTER_NACOS_PASSWORD -   - - accessKey Nacos Auth accessKey. SW_CLUSTER_NACOS_ACCESSKEY -   - - secretKey Nacos Auth secretKey. SW_CLUSTER_NACOS_SECRETKEY -   storage elasticsearch - ElasticSearch (and OpenSearch) storage implementation. - -   - - namespace Prefix of indexes created and used by SkyWalking. SW_NAMESPACE -   - - clusterNodes ElasticSearch cluster nodes for client connection. SW_STORAGE_ES_CLUSTER_NODES localhost   - - protocol HTTP or HTTPs. SW_STORAGE_ES_HTTP_PROTOCOL HTTP   - - connectTimeout Connect timeout of ElasticSearch client (in milliseconds). SW_STORAGE_ES_CONNECT_TIMEOUT 3000   - - socketTimeout Socket timeout of ElasticSearch client (in milliseconds). SW_STORAGE_ES_SOCKET_TIMEOUT 30000   - - responseTimeout Response timeout of ElasticSearch client (in milliseconds), 0 disables the timeout. SW_STORAGE_ES_RESPONSE_TIMEOUT 1500   - - numHttpClientThread The number of threads for the underlying HTTP client to perform socket I/O. If the value is \u0026lt;= 0, the number of available processors will be used. SW_STORAGE_ES_NUM_HTTP_CLIENT_THREAD 0   - - user Username of ElasticSearch cluster. SW_ES_USER -   - - password Password of ElasticSearch cluster. SW_ES_PASSWORD -   - - trustStorePath Trust JKS file path. Only works when username and password are enabled. SW_STORAGE_ES_SSL_JKS_PATH -   - - trustStorePass Trust JKS file password. Only works when username and password are enabled. SW_STORAGE_ES_SSL_JKS_PASS -   - - secretsManagementFile Secrets management file in the properties format, including username and password, which are managed by a 3rd party tool. Capable of being updated them at runtime. SW_ES_SECRETS_MANAGEMENT_FILE -   - - dayStep Represents the number of days in the one-minute/hour/day index. SW_STORAGE_DAY_STEP 1   - - indexShardsNumber Shard number of new indexes. SW_STORAGE_ES_INDEX_SHARDS_NUMBER 1   - - indexReplicasNumber Replicas number of new indexes. SW_STORAGE_ES_INDEX_REPLICAS_NUMBER 0   - - specificIndexSettings Specify the settings for each index individually. If configured, this setting has the highest priority and overrides the generic settings. SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS -   - - superDatasetDayStep Represents the number of days in the super size dataset record index. Default value is the same as dayStep when the value is less than 0. SW_STORAGE_ES_SUPER_DATASET_DAY_STEP -1   - - superDatasetIndexShardsFactor Super dataset is defined in the code (e.g. trace segments). This factor provides more shards for the super dataset: shards number = indexShardsNumber * superDatasetIndexShardsFactor. This factor also affects Zipkin and Jaeger traces. SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR 5   - - superDatasetIndexReplicasNumber Represents the replicas number in the super size dataset record index. SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER 0   - - indexTemplateOrder The order of index template. SW_STORAGE_ES_INDEX_TEMPLATE_ORDER 0   - - bulkActions Async bulk size of the record data batch execution. SW_STORAGE_ES_BULK_ACTIONS 5000   - - batchOfBytes A threshold to control the max body size of ElasticSearch Bulk flush. SW_STORAGE_ES_BATCH_OF_BYTES 10485760 (10m)   - - flushInterval Period of flush (in seconds). Does not matter whether bulkActions is reached or not. SW_STORAGE_ES_FLUSH_INTERVAL 5   - - concurrentRequests The number of concurrent requests allowed to be executed. SW_STORAGE_ES_CONCURRENT_REQUESTS 2   - - resultWindowMaxSize The maximum size of dataset when the OAP loads cache, such as network aliases. SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE 10000   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_ES_QUERY_MAX_SIZE 10000   - - scrollingBatchSize The batch size of metadata per iteration when metadataQueryMaxSize or resultWindowMaxSize is too large to be retrieved in a single query. SW_STORAGE_ES_SCROLLING_BATCH_SIZE 5000   - - segmentQueryMaxSize The maximum size of trace segments per query. SW_STORAGE_ES_QUERY_SEGMENT_SIZE 200   - - profileTaskQueryMaxSize The maximum size of profile task per query. SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE 200   - - profileDataQueryScrollBatchSize The batch size of query profiling data. SW_STORAGE_ES_QUERY_PROFILE_DATA_BATCH_SIZE 100   - - advanced All settings of ElasticSearch index creation. The value should be in JSON format. SW_STORAGE_ES_ADVANCED -   - - logicSharding Shard metrics and records indices into multi-physical indices, one index template per metric/meter aggregation function or record. SW_STORAGE_ES_LOGIC_SHARDING false   - h2 - H2 storage is designed for demonstration and running in short term (i.e. 1-2 hours) only. - -   - - url H2 connection URL. Defaults to H2 memory mode. SW_STORAGE_H2_URL jdbc:h2:mem:skywalking-oap-db   - - user Username of H2 database. SW_STORAGE_H2_USER sa   - - password Password of H2 database. - -   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_H2_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 100   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 1   - mysql - MySQL Storage. The MySQL JDBC Driver is not in the dist. Please copy it into the oap-lib folder manually. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - mysql-sharding - Sharding-Proxy for MySQL properties. The MySQL JDBC Driver is not in the dist. Please copy it into the oap-lib folder manually. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - - dataSources The dataSources are configured in ShardingSphere-Proxy config-sharding.yaml.The dataSource name should include the prefix \u0026ldquo;ds_\u0026rdquo; and separated by \u0026ldquo;,\u0026rdquo; and start from ds_0 SW_JDBC_SHARDING_DATA_SOURCES ds_0,ds_1   - postgresql - PostgreSQL storage. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - banyandb - BanyanDB storage. - -   - - host Host of the BanyanDB. SW_STORAGE_BANYANDB_HOST 127.0.0.1   - - port Port of the BanyanDB. SW_STORAGE_BANYANDB_PORT 17912   - - maxBulkSize The maximum size of write entities in a single batch write call. SW_STORAGE_BANYANDB_MAX_BULK_SIZE 5000   - - flushInterval Period of flush interval. In the timeunit of seconds. SW_STORAGE_BANYANDB_FLUSH_INTERVAL 15   - - metricsShardsNumber Shards Number for measure/metrics. SW_STORAGE_BANYANDB_METRICS_SHARDS_NUMBER 1   - - recordShardsNumber Shards Number for a normal record. SW_STORAGE_BANYANDB_RECORD_SHARDS_NUMBER 1   - - superDatasetShardsFactor Shards Factor for a super dataset record, i.e. Shard number of a super dataset is recordShardsNumber*superDatasetShardsFactor. SW_STORAGE_BANYANDB_SUPERDATASET_SHARDS_FACTOR 2   - - concurrentWriteThreads Concurrent consumer threads for batch writing. SW_STORAGE_BANYANDB_CONCURRENT_WRITE_THREADS 15   - - profileTaskQueryMaxSize Max size of ProfileTask to be fetched. SW_STORAGE_BANYANDB_PROFILE_TASK_QUERY_MAX_SIZE 200   agent-analyzer default Agent Analyzer. SW_AGENT_ANALYZER default    - - traceSamplingPolicySettingsFile The sampling policy including sampling rate and the threshold of trace segment latency can be configured by the traceSamplingPolicySettingsFile file. SW_TRACE_SAMPLING_POLICY_SETTINGS_FILE trace-sampling-policy-settings.yml   - - slowDBAccessThreshold The slow database access threshold (in milliseconds). SW_SLOW_DB_THRESHOLD default:200,mongodb:100   - - forceSampleErrorSegment When sampling mechanism is activated, this config samples the error status segment and ignores the sampling rate. SW_FORCE_SAMPLE_ERROR_SEGMENT true   - - segmentStatusAnalysisStrategy Determines the final segment status from span status. Available values are FROM_SPAN_STATUS , FROM_ENTRY_SPAN, and FROM_FIRST_SPAN. FROM_SPAN_STATUS indicates that the segment status would be error if any span has an error status. FROM_ENTRY_SPAN means that the segment status would only be determined by the status of entry spans. FROM_FIRST_SPAN means that the segment status would only be determined by the status of the first span. SW_SEGMENT_STATUS_ANALYSIS_STRATEGY FROM_SPAN_STATUS   - - noUpstreamRealAddressAgents Exit spans with the component in the list would not generate client-side instance relation metrics, since some tracing plugins (e.g. Nginx-LUA and Envoy) can\u0026rsquo;t collect the real peer IP address. SW_NO_UPSTREAM_REAL_ADDRESS 6000,9000   - - meterAnalyzerActiveFiles Indicates which files could be instrumented and analyzed. Multiple files are split by \u0026ldquo;,\u0026rdquo;. SW_METER_ANALYZER_ACTIVE_FILES    - - slowCacheWriteThreshold The threshold of slow command which is used for writing operation (in milliseconds). SW_SLOW_CACHE_WRITE_THRESHOLD default:20,redis:10   - - slowCacheReadThreshold The threshold of slow command which is used for reading (getting) operation (in milliseconds). SW_SLOW_CACHE_READ_THRESHOLD default:20,redis:10   receiver-sharing-server default Sharing server provides new gRPC and restful servers for data collection. Ana designates that servers in the core module are to be used for internal communication only. - -    - - restHost Binding IP of RESTful services. Services include GraphQL query and HTTP data report. SW_RECEIVER_SHARING_REST_HOST -   - - restPort Binding port of RESTful services. SW_RECEIVER_SHARING_REST_PORT -   - - restContextPath Web context path of RESTful services. SW_RECEIVER_SHARING_REST_CONTEXT_PATH -   - - restMaxThreads Maximum thread number of RESTful services. SW_RECEIVER_SHARING_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_RECEIVER_SHARING_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize ServerSocketChannel backlog of RESTful services. SW_RECEIVER_SHARING_REST_QUEUE_SIZE 0   - - httpMaxRequestHeaderSize Maximum request header size accepted. SW_RECEIVER_SHARING_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - gRPCHost Binding IP of gRPC services. Services include gRPC data report and internal communication among OAP nodes. SW_RECEIVER_GRPC_HOST 0.0.0.0. Not Activated   - - gRPCPort Binding port of gRPC services. SW_RECEIVER_GRPC_PORT Not Activated   - - gRPCThreadPoolSize Pool size of gRPC server. SW_RECEIVER_GRPC_THREAD_POOL_SIZE CPU core * 4   - - gRPCThreadPoolQueueSize Queue size of gRPC server. SW_RECEIVER_GRPC_POOL_QUEUE_SIZE 10000   - - gRPCSslEnabled Activates SSL for gRPC services. SW_RECEIVER_GRPC_SSL_ENABLED false   - - gRPCSslKeyPath File path of gRPC SSL key. SW_RECEIVER_GRPC_SSL_KEY_PATH -   - - gRPCSslCertChainPath File path of gRPC SSL cert chain. SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH -   - - maxConcurrentCallsPerConnection The maximum number of concurrent calls permitted for each incoming connection. Defaults to no limit. SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL -   - - authentication The token text for authentication. Works for gRPC connection only. Once this is set, the client is required to use the same token. SW_AUTHENTICATION -   log-analyzer default Log Analyzer. SW_LOG_ANALYZER default    - - lalFiles The LAL configuration file names (without file extension) to be activated. Read LAL for more details. SW_LOG_LAL_FILES default   - - malFiles The MAL configuration file names (without file extension) to be activated. Read LAL for more details. SW_LOG_MAL_FILES \u0026quot;\u0026quot;   event-analyzer default Event Analyzer. SW_EVENT_ANALYZER default    receiver-register default gRPC and HTTPRestful services that provide service, service instance and endpoint register. - -    receiver-trace default gRPC and HTTPRestful services that accept SkyWalking format traces. - -    receiver-jvm default gRPC services that accept JVM metrics data. - -    receiver-clr default gRPC services that accept .Net CLR metrics data. - -    receiver-profile default gRPC services that accept profile task status and snapshot reporter. - -    receiver-zabbix default TCP receiver accepts Zabbix format metrics. - -    - - port Exported TCP port. Zabbix agent could connect and transport data. SW_RECEIVER_ZABBIX_PORT 10051   - - host Binds to host. SW_RECEIVER_ZABBIX_HOST 0.0.0.0   - - activeFiles Enables config when agent request is received. SW_RECEIVER_ZABBIX_ACTIVE_FILES agent   service-mesh default gRPC services that accept data from inbound mesh probes. - -    envoy-metric default Envoy metrics_service and ALS(access log service) are supported by this receiver. The OAL script supports all GAUGE type metrics. - -    - - acceptMetricsService Starts Envoy Metrics Service analysis. SW_ENVOY_METRIC_SERVICE true   - - alsHTTPAnalysis Starts Envoy HTTP Access Log Service analysis. Value = k8s-mesh means starting the analysis. SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS -   - - alsTCPAnalysis Starts Envoy TCP Access Log Service analysis. Value = k8s-mesh means starting the analysis. SW_ENVOY_METRIC_ALS_TCP_ANALYSIS -   - - k8sServiceNameRule k8sServiceNameRule allows you to customize the service name in ALS via Kubernetes metadata. The available variables are pod and service. E.g. you can use ${service.metadata.name}-${pod.metadata.labels.version} to append the version number to the service name. Note that when using environment variables to pass this configuration, use single quotes('') to avoid being evaluated by the shell. -    receiver-otel default A receiver for analyzing metrics data from OpenTelemetry. - -    - - enabledHandlers Enabled handlers for otel. SW_OTEL_RECEIVER_ENABLED_HANDLERS -   - - enabledOtelRules Enabled metric rules for OC handler. SW_OTEL_RECEIVER_ENABLED_OTEL_RULES -   receiver-zipkin default A receiver for Zipkin traces. - -    - - sampleRate The sample rate precision is 1/10000, should be between 0 and 10000 SW_ZIPKIN_SAMPLE_RATE 10000   - - searchableTracesTags Defines a set of span tag keys which are searchable. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_ZIPKIN_SEARCHABLE_TAG_KEYS http.method   - - enableHttpCollector Enable Http Collector. SW_ZIPKIN_HTTP_COLLECTOR_ENABLED true   - - restHost Binding IP of RESTful services. SW_RECEIVER_ZIPKIN_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_RECEIVER_ZIPKIN_REST_PORT 9411   - - restContextPath Web context path of RESTful services. SW_RECEIVER_ZIPKIN_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_RECEIVER_ZIPKIN_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_RECEIVER_ZIPKIN_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_RECEIVER_ZIPKIN_REST_QUEUE_SIZE 0   - - enableKafkaCollector Enable Kafka Collector. SW_ZIPKIN_KAFKA_COLLECTOR_ENABLED false   - - kafkaBootstrapServers Kafka ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG. SW_ZIPKIN_KAFKA_SERVERS localhost:9092   - - kafkaGroupId Kafka ConsumerConfig.GROUP_ID_CONFIG. SW_ZIPKIN_KAFKA_GROUP_ID zipkin   - - kafkaTopic Kafka Topics. SW_ZIPKIN_KAFKA_TOPIC zipkin   - - kafkaConsumerConfig Kafka consumer config, JSON format as Properties. If it contains the same key with above, would override. SW_ZIPKIN_KAFKA_CONSUMER_CONFIG \u0026ldquo;{\u0026quot;auto.offset.reset\u0026quot;:\u0026quot;earliest\u0026quot;,\u0026quot;enable.auto.commit\u0026quot;:true}\u0026rdquo;   - - kafkaConsumers The number of consumers to create. SW_ZIPKIN_KAFKA_CONSUMERS 1   - - kafkaHandlerThreadPoolSize Pool size of Kafka message handler executor. SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_SIZE CPU core * 2   - - kafkaHandlerThreadPoolQueueSize Queue size of Kafka message handler executor. SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE 10000   kafka-fetcher default Read SkyWalking\u0026rsquo;s native metrics/logs/traces through Kafka server. - -    - - bootstrapServers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_KAFKA_FETCHER_SERVERS localhost:9092   - - namespace Namespace aims to isolate multi OAP cluster when using the same Kafka cluster. If you set a namespace for Kafka fetcher, OAP will add a prefix to topic name. You should also set namespace in agent.config. The property is named plugin.kafka.namespace. SW_NAMESPACE -   - - groupId A unique string that identifies the consumer group to which this consumer belongs. - skywalking-consumer   - - createTopicIfNotExist If true, this creates Kafka topic (if it does not already exist). - true   - - partitions The number of partitions for the topic being created. SW_KAFKA_FETCHER_PARTITIONS 3   - - consumers The number of consumers to create. SW_KAFKA_FETCHER_CONSUMERS 1   - - enableNativeProtoLog Enables fetching and handling native proto log data. SW_KAFKA_FETCHER_ENABLE_NATIVE_PROTO_LOG true   - - enableNativeJsonLog Enables fetching and handling native json log data. SW_KAFKA_FETCHER_ENABLE_NATIVE_JSON_LOG true   - - replicationFactor The replication factor for each partition in the topic being created. SW_KAFKA_FETCHER_PARTITIONS_FACTOR 2   - - kafkaHandlerThreadPoolSize Pool size of Kafka message handler executor. SW_KAFKA_HANDLER_THREAD_POOL_SIZE CPU core * 2   - - kafkaHandlerThreadPoolQueueSize Queue size of Kafka message handler executor. SW_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE 10000   - - topicNameOfMeters Kafka topic name for meter system data. - skywalking-meters   - - topicNameOfMetrics Kafka topic name for JVM metrics data. - skywalking-metrics   - - topicNameOfProfiling Kafka topic name for profiling data. - skywalking-profilings   - - topicNameOfTracingSegments Kafka topic name for tracing data. - skywalking-segments   - - topicNameOfManagements Kafka topic name for service instance reporting and registration. - skywalking-managements   - - topicNameOfLogs Kafka topic name for native proto log data. - skywalking-logs   - - topicNameOfJsonLogs Kafka topic name for native json log data. - skywalking-logs-json   receiver-browser default gRPC services that accept browser performance data and error log. - - -   - - sampleRate Sampling rate for receiving trace. Precise to 1/10000. 10000 means sampling rate of 100% by default. SW_RECEIVER_BROWSER_SAMPLE_RATE 10000   query graphql - GraphQL query implementation. -    - - enableLogTestTool Enable the log testing API to test the LAL. NOTE: This API evaluates untrusted code on the OAP server. A malicious script can do significant damage (steal keys and secrets, remove files and directories, install malware, etc). As such, please enable this API only when you completely trust your users. SW_QUERY_GRAPHQL_ENABLE_LOG_TEST_TOOL false   - - maxQueryComplexity Maximum complexity allowed for the GraphQL query that can be used to abort a query if the total number of data fields queried exceeds the defined threshold. SW_QUERY_MAX_QUERY_COMPLEXITY 1000   - - enableUpdateUITemplate Allow user add,disable and update UI template. SW_ENABLE_UPDATE_UI_TEMPLATE false   - - enableOnDemandPodLog Ondemand Pod log: fetch the Pod logs on users' demand, the logs are fetched and displayed in real time, and are not persisted in any kind. This is helpful when users want to do some experiments and monitor the logs and see what\u0026rsquo;s happing inside the service. Note: if you print secrets in the logs, they are also visible to the UI, so for the sake of security, this feature is disabled by default, please set this configuration to enable the feature manually. SW_ENABLE_ON_DEMAND_POD_LOG false   query-zipkin default - This module is for Zipkin query API and support zipkin-lens UI -    - - restHost Binding IP of RESTful services. SW_QUERY_ZIPKIN_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_QUERY_ZIPKIN_REST_PORT 9412   - - restContextPath Web context path of RESTful services. SW_QUERY_ZIPKIN_REST_CONTEXT_PATH zipkin   - - restMaxThreads Maximum thread number of RESTful services. SW_QUERY_ZIPKIN_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_QUERY_ZIPKIN_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_QUERY_ZIPKIN_REST_QUEUE_SIZE 0   - - lookback Default look back for traces and autocompleteTags, 1 day in millis SW_QUERY_ZIPKIN_LOOKBACK 86400000   - - namesMaxAge The Cache-Control max-age (seconds) for serviceNames, remoteServiceNames and spanNames SW_QUERY_ZIPKIN_NAMES_MAX_AGE 300   - - uiQueryLimit Default traces query max size SW_QUERY_ZIPKIN_UI_QUERY_LIMIT 10   - - uiDefaultLookback Default look back on the UI for search traces, 15 minutes in millis SW_QUERY_ZIPKIN_UI_DEFAULT_LOOKBACK 900000   promql default - This module is for PromQL API. -    - - restHost Binding IP of RESTful services. SW_PROMQL_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_PROMQL_REST_PORT 9090   - - restContextPath Web context path of RESTful services. SW_PROMQL_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_PROMQL_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_PROMQL_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_PROMQL_REST_QUEUE_SIZE 0   alarm default - Read alarm doc for more details. -    telemetry - - Read telemetry doc for more details. -    - none - No op implementation. -    - prometheus host Binding host for Prometheus server fetching data. SW_TELEMETRY_PROMETHEUS_HOST 0.0.0.0   - - port Binding port for Prometheus server fetching data. SW_TELEMETRY_PROMETHEUS_PORT 1234   configuration - - Read dynamic configuration doc for more details. -    - grpc host DCS server binding hostname. SW_DCS_SERVER_HOST -   - - port DCS server binding port. SW_DCS_SERVER_PORT 80   - - clusterName Cluster name when reading the latest configuration from DSC server. SW_DCS_CLUSTER_NAME SkyWalking   - - period The period of reading data from DSC server by the OAP (in seconds). SW_DCS_PERIOD 20   - apollo apolloMeta apollo.meta in Apollo. SW_CONFIG_APOLLO http://localhost:8080   - - apolloCluster apollo.cluster in Apollo. SW_CONFIG_APOLLO_CLUSTER default   - - apolloEnv env in Apollo. SW_CONFIG_APOLLO_ENV -   - - appId app.id in Apollo. SW_CONFIG_APOLLO_APP_ID skywalking   - - period The period of data sync (in seconds). SW_CONFIG_APOLLO_PERIOD 60   - zookeeper namespace The namespace (represented by root path) that isolates the configurations in the Zookeeper. SW_CONFIG_ZK_NAMESPACE /, root path   - - hostPort Hosts and ports of Zookeeper Cluster. SW_CONFIG_ZK_HOST_PORT localhost:2181   - - baseSleepTimeMs The period of Zookeeper client between two retries (in milliseconds). SW_CONFIG_ZK_BASE_SLEEP_TIME_MS 1000   - - maxRetries The maximum retry time. SW_CONFIG_ZK_MAX_RETRIES 3   - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - etcd endpoints Hosts and ports for etcd cluster (separated by commas if multiple). SW_CONFIG_ETCD_ENDPOINTS http://localhost:2379   - - namespace Namespace for SkyWalking cluster. SW_CONFIG_ETCD_NAMESPACE /skywalking   - - authentication Indicates whether there is authentication. SW_CONFIG_ETCD_AUTHENTICATION false   - - user Etcd auth username. SW_CONFIG_ETCD_USER    - - password Etcd auth password. SW_CONFIG_ETCD_PASSWORD    - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - consul hostPort Hosts and ports for Consul cluster. SW_CONFIG_CONSUL_HOST_AND_PORTS localhost:8500   - - aclToken ACL Token of Consul. Empty string means without ACL token. SW_CONFIG_CONSUL_ACL_TOKEN -   - - period The period of data sync (in seconds). SW_CONFIG_CONSUL_PERIOD 60   - k8s-configmap namespace Deployment namespace of the config map. SW_CLUSTER_K8S_NAMESPACE default   - - labelSelector Labels for locating configmap. SW_CLUSTER_K8S_LABEL app=collector,release=skywalking   - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - nacos serverAddr Nacos Server Host. SW_CONFIG_NACOS_SERVER_ADDR 127.0.0.1   - - port Nacos Server Port. SW_CONFIG_NACOS_SERVER_PORT 8848   - - group Nacos Configuration namespace. SW_CONFIG_NACOS_SERVER_NAMESPACE -   - - period The period of data sync (in seconds). SW_CONFIG_CONFIG_NACOS_PERIOD 60   - - username Nacos Auth username. SW_CONFIG_NACOS_USERNAME -   - - password Nacos Auth password. SW_CONFIG_NACOS_PASSWORD -   - - accessKey Nacos Auth accessKey. SW_CONFIG_NACOS_ACCESSKEY -   - - secretKey Nacos Auth secretKey. SW_CONFIG_NACOS_SECRETKEY -   exporter default enableGRPCMetrics Enable gRPC metrics exporter. SW_EXPORTER_ENABLE_GRPC_METRICS false   - - gRPCTargetHost The host of target gRPC server for receiving export data SW_EXPORTER_GRPC_HOST 127.0.0.1   - - gRPCTargetPort The port of target gRPC server for receiving export data. SW_EXPORTER_GRPC_PORT 9870   - - enableKafkaTrace Enable Kafka trace exporter. SW_EXPORTER_ENABLE_KAFKA_TRACE false   - - enableKafkaLog Enable Kafka log exporter. SW_EXPORTER_ENABLE_KAFKA_LOG false   - - kafkaBootstrapServers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_EXPORTER_KAFKA_SERVERS localhost:9092   - - kafkaProducerConfig Kafka producer config, JSON format as Properties. SW_EXPORTER_KAFKA_PRODUCER_CONFIG -   - - kafkaTopicTrace Kafka topic name for trace. SW_EXPORTER_KAFKA_TOPIC_TRACE skywalking-export-trace   - - kafkaTopicLog Kafka topic name for log. SW_EXPORTER_KAFKA_TOPIC_LOG skywalking-export-log   health-checker default checkIntervalSeconds The period of checking OAP internal health status (in seconds). SW_HEALTH_CHECKER_INTERVAL_SECONDS 5   configuration-discovery default disableMessageDigest If true, agent receives the latest configuration every time, even without making any changes. By default, OAP uses the SHA512 message digest mechanism to detect changes in configuration. SW_DISABLE_MESSAGE_DIGEST false   receiver-event default gRPC services that handle events data. - -    aws-firehose-receiver default host Binding IP of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_HOST 0.0.0.0   - - port Binding port of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_PORT 12801   - - contextPath Context path of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_CONTEXT_PATH /   - - maxThreads Max Thtread number of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_MAX_THREADS 200   - - idleTimeOut Idle timeout of a connection for keep-alive. SW_RECEIVER_AWS_FIREHOSE_HTTP_IDLE_TIME_OUT 30000   - - acceptQueueSize Maximum allowed number of open connections SW_RECEIVER_AWS_FIREHOSE_HTTP_ACCEPT_QUEUE_SIZE 0   - - maxRequestHeaderSize Maximum length of all headers in an HTTP/1 response SW_RECEIVER_AWS_FIREHOSE_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - firehoseAccessKey The AccessKey of AWS firhose SW_RECEIVER_AWS_FIREHOSE_ACCESS_KEY     Note ¹ System Environment Variable name could be declared and changed in application.yml. The names listed here are simply provided in the default application.yml file.\n","excerpt":"Configuration Vocabulary The Configuration Vocabulary lists all available configurations provided by …","ref":"/docs/main/v9.4.0/en/setup/backend/configuration-vocabulary/","title":"Configuration Vocabulary"},{"body":"Configuration Vocabulary The Configuration Vocabulary lists all available configurations provided by application.yml.\n   Module Provider Settings Value(s) and Explanation System Environment Variable¹ Default     core default role Option values: Mixed/Receiver/Aggregator. Receiver mode OAP opens the service to the agents, then analyzes and aggregates the results, and forwards the results for distributed aggregation. Aggregator mode OAP receives data from Mixer and Receiver role OAP nodes, and performs 2nd level aggregation. Mixer means both Receiver and Aggregator. SW_CORE_ROLE Mixed   - - restHost Binding IP of RESTful services. Services include GraphQL query and HTTP data report. SW_CORE_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_CORE_REST_PORT 12800   - - restContextPath Web context path of RESTful services. SW_CORE_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_CORE_REST_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_CORE_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize ServerSocketChannel Backlog of RESTful services. SW_CORE_REST_QUEUE_SIZE 0   - - httpMaxRequestHeaderSize Maximum request header size accepted. SW_CORE_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - gRPCHost Binding IP of gRPC services, including gRPC data report and internal communication among OAP nodes. SW_CORE_GRPC_HOST 0.0.0.0   - - gRPCPort Binding port of gRPC services. SW_CORE_GRPC_PORT 11800   - - gRPCSslEnabled Activates SSL for gRPC services. SW_CORE_GRPC_SSL_ENABLED false   - - gRPCSslKeyPath File path of gRPC SSL key. SW_CORE_GRPC_SSL_KEY_PATH -   - - gRPCSslCertChainPath File path of gRPC SSL cert chain. SW_CORE_GRPC_SSL_CERT_CHAIN_PATH -   - - gRPCSslTrustedCAPath File path of gRPC trusted CA. SW_CORE_GRPC_SSL_TRUSTED_CA_PATH -   - - downsampling Activated level of down sampling aggregation.  Hour,Day   - - enableDataKeeperExecutor Controller of TTL scheduler. Once disabled, TTL wouldn\u0026rsquo;t work. SW_CORE_ENABLE_DATA_KEEPER_EXECUTOR true   - - dataKeeperExecutePeriod Execution period of TTL scheduler (in minutes). Execution doesn\u0026rsquo;t mean deleting data. The storage provider (e.g. ElasticSearch storage) could override this. SW_CORE_DATA_KEEPER_EXECUTE_PERIOD 5   - - recordDataTTL The lifecycle of record data (in days). Record data includes traces, top N sample records, and logs. Minimum value is 2. SW_CORE_RECORD_DATA_TTL 3   - - metricsDataTTL The lifecycle of metrics data (in days), including metadata. We recommend setting metricsDataTTL \u0026gt;= recordDataTTL. Minimum value is 2. SW_CORE_METRICS_DATA_TTL 7   - - l1FlushPeriod The period of L1 aggregation flush to L2 aggregation (in milliseconds). SW_CORE_L1_AGGREGATION_FLUSH_PERIOD 500   - - storageSessionTimeout The threshold of session time (in milliseconds). Default value is 70000. SW_CORE_STORAGE_SESSION_TIMEOUT 70000   - - persistentPeriod The period of doing data persistence. Unit is second.Default value is 25s SW_CORE_PERSISTENT_PERIOD 25   - - topNReportPeriod The execution period (in minutes) of top N sampler, which saves sampled data into the storage. SW_CORE_TOPN_REPORT_PERIOD 10   - - activeExtraModelColumns Appends entity names (e.g. service names) into metrics storage entities. SW_CORE_ACTIVE_EXTRA_MODEL_COLUMNS false   - - serviceNameMaxLength Maximum length limit of service names. SW_SERVICE_NAME_MAX_LENGTH 70   - - instanceNameMaxLength Maximum length limit of service instance names. The maximum length of service + instance names should be less than 200. SW_INSTANCE_NAME_MAX_LENGTH 70   - - endpointNameMaxLength Maximum length limit of endpoint names. The maximum length of service + endpoint names should be less than 240. SW_ENDPOINT_NAME_MAX_LENGTH 150   - - searchableTracesTags Defines a set of span tag keys which are searchable through GraphQL. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_SEARCHABLE_TAG_KEYS http.method,http.status_code,rpc.status_code,db.type,db.instance,mq.queue,mq.topic,mq.broker   - - searchableLogsTags Defines a set of log tag keys which are searchable through GraphQL. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_SEARCHABLE_LOGS_TAG_KEYS level   - - searchableAlarmTags Defines a set of alarm tag keys which are searchable through GraphQL. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_SEARCHABLE_ALARM_TAG_KEYS level   - - autocompleteTagKeysQueryMaxSize The max size of tags keys for autocomplete select. SW_AUTOCOMPLETE_TAG_KEYS_QUERY_MAX_SIZE 100   - - autocompleteTagValuesQueryMaxSize The max size of tags values for autocomplete select. SW_AUTOCOMPLETE_TAG_VALUES_QUERY_MAX_SIZE 100   - - gRPCThreadPoolSize Pool size of gRPC server. SW_CORE_GRPC_THREAD_POOL_SIZE CPU core * 4   - - gRPCThreadPoolQueueSize Queue size of gRPC server. SW_CORE_GRPC_POOL_QUEUE_SIZE 10000   - - maxConcurrentCallsPerConnection The maximum number of concurrent calls permitted for each incoming connection. Defaults to no limit. SW_CORE_GRPC_MAX_CONCURRENT_CALL -   - - maxMessageSize Sets the maximum message size allowed to be received on the server. Empty means 4 MiB. SW_CORE_GRPC_MAX_MESSAGE_SIZE 4M(based on Netty)   - - remoteTimeout Timeout for cluster internal communication (in seconds). - 20   - - maxSizeOfNetworkAddressAlias The maximum size of network address detected in the system being monitored. - 1_000_000   - - maxPageSizeOfQueryProfileSnapshot The maximum size for snapshot analysis in an OAP query. - 500   - - maxSizeOfAnalyzeProfileSnapshot The maximum number of snapshots analyzed by the OAP. - 12000   - - prepareThreads The number of threads used to prepare metrics data to the storage. SW_CORE_PREPARE_THREADS 2   - - enableEndpointNameGroupingByOpenapi Automatically groups endpoints by the given OpenAPI definitions. SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI true   - - maxDurationOfQueryEBPFProfilingData The maximum duration(in second) of query the eBPF profiling data from database. - 30   - - maxThreadCountOfQueryEBPFProfilingData The maximum thread count of query the eBPF profiling data from database. - System CPU core size   cluster standalone - Standalone is not suitable for running on a single node running. No configuration available. - -   - zookeeper namespace The namespace, represented by root path, isolates the configurations in Zookeeper. SW_NAMESPACE /, root path   - - hostPort Hosts and ports of Zookeeper Cluster. SW_CLUSTER_ZK_HOST_PORT localhost:2181   - - baseSleepTimeMs The period of Zookeeper client between two retries (in milliseconds). SW_CLUSTER_ZK_SLEEP_TIME 1000   - - maxRetries The maximum retry time. SW_CLUSTER_ZK_MAX_RETRIES 3   - - enableACL Opens ACL using schema and expression. SW_ZK_ENABLE_ACL false   - - schema Schema for the authorization. SW_ZK_SCHEMA digest   - - expression Expression for the authorization. SW_ZK_EXPRESSION skywalking:skywalking   - - internalComHost The hostname registered in Zookeeper for the internal communication of OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Zookeeper for the internal communication of OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - kubernetes namespace Namespace deployed by SkyWalking in k8s. SW_CLUSTER_K8S_NAMESPACE default   - - labelSelector Labels used for filtering OAP deployment in k8s. SW_CLUSTER_K8S_LABEL app=collector,release=skywalking   - - uidEnvName Environment variable name for reading uid. SW_CLUSTER_K8S_UID SKYWALKING_COLLECTOR_UID   - consul serviceName Service name for SkyWalking cluster. SW_SERVICE_NAME SkyWalking_OAP_Cluster   - - hostPort Hosts and ports for Consul cluster. SW_CLUSTER_CONSUL_HOST_PORT localhost:8500   - - aclToken ACL Token of Consul. Empty string means without ALC token. SW_CLUSTER_CONSUL_ACLTOKEN -   - - internalComHost The hostname registered in Consul for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Consul for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - etcd serviceName Service name for SkyWalking cluster. SW_CLUSTER_ETCD_SERVICE_NAME SkyWalking_OAP_Cluster   - - endpoints Hosts and ports for etcd cluster. SW_CLUSTER_ETCD_ENDPOINTS localhost:2379   - - namespace Namespace for SkyWalking cluster. SW_CLUSTER_ETCD_NAMESPACE /skywalking   - - authentication Indicates whether there is authentication. SW_CLUSTER_ETCD_AUTHENTICATION false   - - user Etcd auth username. SW_CLUSTER_ETCD_USER    - - password Etcd auth password. SW_CLUSTER_ETCD_PASSWORD    - - internalComHost The hostname registered in etcd for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in etcd for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - Nacos serviceName Service name for SkyWalking cluster. SW_SERVICE_NAME SkyWalking_OAP_Cluster   - - hostPort Hosts and ports for Nacos cluster. SW_CLUSTER_NACOS_HOST_PORT localhost:8848   - - namespace Namespace used by SkyWalking node coordination. SW_CLUSTER_NACOS_NAMESPACE public   - - internalComHost The hostname registered in Nacos for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Nacos for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - - username Nacos Auth username. SW_CLUSTER_NACOS_USERNAME -   - - password Nacos Auth password. SW_CLUSTER_NACOS_PASSWORD -   - - accessKey Nacos Auth accessKey. SW_CLUSTER_NACOS_ACCESSKEY -   - - secretKey Nacos Auth secretKey. SW_CLUSTER_NACOS_SECRETKEY -   - - maxHttpUrisNumberPerService The maximum number of HTTP URIs per service. SW_MAX_HTTP_URIS_NUMBER_PER_SERVICE 3000   storage elasticsearch - ElasticSearch (and OpenSearch) storage implementation. - -   - - namespace Prefix of indexes created and used by SkyWalking. SW_NAMESPACE -   - - clusterNodes ElasticSearch cluster nodes for client connection. SW_STORAGE_ES_CLUSTER_NODES localhost   - - protocol HTTP or HTTPs. SW_STORAGE_ES_HTTP_PROTOCOL HTTP   - - connectTimeout Connect timeout of ElasticSearch client (in milliseconds). SW_STORAGE_ES_CONNECT_TIMEOUT 3000   - - socketTimeout Socket timeout of ElasticSearch client (in milliseconds). SW_STORAGE_ES_SOCKET_TIMEOUT 30000   - - responseTimeout Response timeout of ElasticSearch client (in milliseconds), 0 disables the timeout. SW_STORAGE_ES_RESPONSE_TIMEOUT 1500   - - numHttpClientThread The number of threads for the underlying HTTP client to perform socket I/O. If the value is \u0026lt;= 0, the number of available processors will be used. SW_STORAGE_ES_NUM_HTTP_CLIENT_THREAD 0   - - user Username of ElasticSearch cluster. SW_ES_USER -   - - password Password of ElasticSearch cluster. SW_ES_PASSWORD -   - - trustStorePath Trust JKS file path. Only works when username and password are enabled. SW_STORAGE_ES_SSL_JKS_PATH -   - - trustStorePass Trust JKS file password. Only works when username and password are enabled. SW_STORAGE_ES_SSL_JKS_PASS -   - - secretsManagementFile Secrets management file in the properties format, including username and password, which are managed by a 3rd party tool. Capable of being updated them at runtime. SW_ES_SECRETS_MANAGEMENT_FILE -   - - dayStep Represents the number of days in the one-minute/hour/day index. SW_STORAGE_DAY_STEP 1   - - indexShardsNumber Shard number of new indexes. SW_STORAGE_ES_INDEX_SHARDS_NUMBER 1   - - indexReplicasNumber Replicas number of new indexes. SW_STORAGE_ES_INDEX_REPLICAS_NUMBER 0   - - specificIndexSettings Specify the settings for each index individually. If configured, this setting has the highest priority and overrides the generic settings. SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS -   - - superDatasetDayStep Represents the number of days in the super size dataset record index. Default value is the same as dayStep when the value is less than 0. SW_STORAGE_ES_SUPER_DATASET_DAY_STEP -1   - - superDatasetIndexShardsFactor Super dataset is defined in the code (e.g. trace segments). This factor provides more shards for the super dataset: shards number = indexShardsNumber * superDatasetIndexShardsFactor. This factor also affects Zipkin and Jaeger traces. SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR 5   - - superDatasetIndexReplicasNumber Represents the replicas number in the super size dataset record index. SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER 0   - - indexTemplateOrder The order of index template. SW_STORAGE_ES_INDEX_TEMPLATE_ORDER 0   - - bulkActions Async bulk size of the record data batch execution. SW_STORAGE_ES_BULK_ACTIONS 5000   - - batchOfBytes A threshold to control the max body size of ElasticSearch Bulk flush. SW_STORAGE_ES_BATCH_OF_BYTES 10485760 (10m)   - - flushInterval Period of flush (in seconds). Does not matter whether bulkActions is reached or not. SW_STORAGE_ES_FLUSH_INTERVAL 5   - - concurrentRequests The number of concurrent requests allowed to be executed. SW_STORAGE_ES_CONCURRENT_REQUESTS 2   - - resultWindowMaxSize The maximum size of dataset when the OAP loads cache, such as network aliases. SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE 10000   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_ES_QUERY_MAX_SIZE 10000   - - scrollingBatchSize The batch size of metadata per iteration when metadataQueryMaxSize or resultWindowMaxSize is too large to be retrieved in a single query. SW_STORAGE_ES_SCROLLING_BATCH_SIZE 5000   - - segmentQueryMaxSize The maximum size of trace segments per query. SW_STORAGE_ES_QUERY_SEGMENT_SIZE 200   - - profileTaskQueryMaxSize The maximum size of profile task per query. SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE 200   - - profileDataQueryScrollBatchSize The batch size of query profiling data. SW_STORAGE_ES_QUERY_PROFILE_DATA_BATCH_SIZE 100   - - advanced All settings of ElasticSearch index creation. The value should be in JSON format. SW_STORAGE_ES_ADVANCED -   - - logicSharding Shard metrics and records indices into multi-physical indices, one index template per metric/meter aggregation function or record. SW_STORAGE_ES_LOGIC_SHARDING false   - h2 - H2 storage is designed for demonstration and running in short term (i.e. 1-2 hours) only. - -   - - url H2 connection URL. Defaults to H2 memory mode. SW_STORAGE_H2_URL jdbc:h2:mem:skywalking-oap-db   - - user Username of H2 database. SW_STORAGE_H2_USER sa   - - password Password of H2 database. - -   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_H2_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 100   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 1   - mysql - MySQL Storage. The MySQL JDBC Driver is not in the dist. Please copy it into the oap-lib folder manually. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - postgresql - PostgreSQL storage. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - banyandb - BanyanDB storage. - -   - - host Host of the BanyanDB. SW_STORAGE_BANYANDB_HOST 127.0.0.1   - - port Port of the BanyanDB. SW_STORAGE_BANYANDB_PORT 17912   - - maxBulkSize The maximum size of write entities in a single batch write call. SW_STORAGE_BANYANDB_MAX_BULK_SIZE 5000   - - flushInterval Period of flush interval. In the timeunit of seconds. SW_STORAGE_BANYANDB_FLUSH_INTERVAL 15   - - metricsShardsNumber Shards Number for measure/metrics. SW_STORAGE_BANYANDB_METRICS_SHARDS_NUMBER 1   - - recordShardsNumber Shards Number for a normal record. SW_STORAGE_BANYANDB_RECORD_SHARDS_NUMBER 1   - - superDatasetShardsFactor Shards Factor for a super dataset record, i.e. Shard number of a super dataset is recordShardsNumber*superDatasetShardsFactor. SW_STORAGE_BANYANDB_SUPERDATASET_SHARDS_FACTOR 2   - - concurrentWriteThreads Concurrent consumer threads for batch writing. SW_STORAGE_BANYANDB_CONCURRENT_WRITE_THREADS 15   - - profileTaskQueryMaxSize Max size of ProfileTask to be fetched. SW_STORAGE_BANYANDB_PROFILE_TASK_QUERY_MAX_SIZE 200   agent-analyzer default Agent Analyzer. SW_AGENT_ANALYZER default    - - traceSamplingPolicySettingsFile The sampling policy including sampling rate and the threshold of trace segment latency can be configured by the traceSamplingPolicySettingsFile file. SW_TRACE_SAMPLING_POLICY_SETTINGS_FILE trace-sampling-policy-settings.yml   - - slowDBAccessThreshold The slow database access threshold (in milliseconds). SW_SLOW_DB_THRESHOLD default:200,mongodb:100   - - forceSampleErrorSegment When sampling mechanism is activated, this config samples the error status segment and ignores the sampling rate. SW_FORCE_SAMPLE_ERROR_SEGMENT true   - - segmentStatusAnalysisStrategy Determines the final segment status from span status. Available values are FROM_SPAN_STATUS , FROM_ENTRY_SPAN, and FROM_FIRST_SPAN. FROM_SPAN_STATUS indicates that the segment status would be error if any span has an error status. FROM_ENTRY_SPAN means that the segment status would only be determined by the status of entry spans. FROM_FIRST_SPAN means that the segment status would only be determined by the status of the first span. SW_SEGMENT_STATUS_ANALYSIS_STRATEGY FROM_SPAN_STATUS   - - noUpstreamRealAddressAgents Exit spans with the component in the list would not generate client-side instance relation metrics, since some tracing plugins (e.g. Nginx-LUA and Envoy) can\u0026rsquo;t collect the real peer IP address. SW_NO_UPSTREAM_REAL_ADDRESS 6000,9000   - - meterAnalyzerActiveFiles Indicates which files could be instrumented and analyzed. Multiple files are split by \u0026ldquo;,\u0026rdquo;. SW_METER_ANALYZER_ACTIVE_FILES    - - slowCacheWriteThreshold The threshold of slow command which is used for writing operation (in milliseconds). SW_SLOW_CACHE_WRITE_THRESHOLD default:20,redis:10   - - slowCacheReadThreshold The threshold of slow command which is used for reading (getting) operation (in milliseconds). SW_SLOW_CACHE_READ_THRESHOLD default:20,redis:10   receiver-sharing-server default Sharing server provides new gRPC and restful servers for data collection. Ana designates that servers in the core module are to be used for internal communication only. - -    - - restHost Binding IP of RESTful services. Services include GraphQL query and HTTP data report. SW_RECEIVER_SHARING_REST_HOST -   - - restPort Binding port of RESTful services. SW_RECEIVER_SHARING_REST_PORT -   - - restContextPath Web context path of RESTful services. SW_RECEIVER_SHARING_REST_CONTEXT_PATH -   - - restMaxThreads Maximum thread number of RESTful services. SW_RECEIVER_SHARING_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_RECEIVER_SHARING_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize ServerSocketChannel backlog of RESTful services. SW_RECEIVER_SHARING_REST_QUEUE_SIZE 0   - - httpMaxRequestHeaderSize Maximum request header size accepted. SW_RECEIVER_SHARING_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - gRPCHost Binding IP of gRPC services. Services include gRPC data report and internal communication among OAP nodes. SW_RECEIVER_GRPC_HOST 0.0.0.0. Not Activated   - - gRPCPort Binding port of gRPC services. SW_RECEIVER_GRPC_PORT Not Activated   - - gRPCThreadPoolSize Pool size of gRPC server. SW_RECEIVER_GRPC_THREAD_POOL_SIZE CPU core * 4   - - gRPCThreadPoolQueueSize Queue size of gRPC server. SW_RECEIVER_GRPC_POOL_QUEUE_SIZE 10000   - - gRPCSslEnabled Activates SSL for gRPC services. SW_RECEIVER_GRPC_SSL_ENABLED false   - - gRPCSslKeyPath File path of gRPC SSL key. SW_RECEIVER_GRPC_SSL_KEY_PATH -   - - gRPCSslCertChainPath File path of gRPC SSL cert chain. SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH -   - - maxConcurrentCallsPerConnection The maximum number of concurrent calls permitted for each incoming connection. Defaults to no limit. SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL -   - - authentication The token text for authentication. Works for gRPC connection only. Once this is set, the client is required to use the same token. SW_AUTHENTICATION -   log-analyzer default Log Analyzer. SW_LOG_ANALYZER default    - - lalFiles The LAL configuration file names (without file extension) to be activated. Read LAL for more details. SW_LOG_LAL_FILES default   - - malFiles The MAL configuration file names (without file extension) to be activated. Read LAL for more details. SW_LOG_MAL_FILES \u0026quot;\u0026quot;   event-analyzer default Event Analyzer. SW_EVENT_ANALYZER default    receiver-register default gRPC and HTTPRestful services that provide service, service instance and endpoint register. - -    receiver-trace default gRPC and HTTPRestful services that accept SkyWalking format traces. - -    receiver-jvm default gRPC services that accept JVM metrics data. - -    receiver-clr default gRPC services that accept .Net CLR metrics data. - -    receiver-profile default gRPC services that accept profile task status and snapshot reporter. - -    receiver-zabbix default TCP receiver accepts Zabbix format metrics. - -    - - port Exported TCP port. Zabbix agent could connect and transport data. SW_RECEIVER_ZABBIX_PORT 10051   - - host Binds to host. SW_RECEIVER_ZABBIX_HOST 0.0.0.0   - - activeFiles Enables config when agent request is received. SW_RECEIVER_ZABBIX_ACTIVE_FILES agent   service-mesh default gRPC services that accept data from inbound mesh probes. - -    envoy-metric default Envoy metrics_service and ALS(access log service) are supported by this receiver. The OAL script supports all GAUGE type metrics. - -    - - acceptMetricsService Starts Envoy Metrics Service analysis. SW_ENVOY_METRIC_SERVICE true   - - alsHTTPAnalysis Starts Envoy HTTP Access Log Service analysis. Value = k8s-mesh means starting the analysis. SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS -   - - alsTCPAnalysis Starts Envoy TCP Access Log Service analysis. Value = k8s-mesh means starting the analysis. SW_ENVOY_METRIC_ALS_TCP_ANALYSIS -   - - k8sServiceNameRule k8sServiceNameRule allows you to customize the service name in ALS via Kubernetes metadata. The available variables are pod and service. E.g. you can use ${service.metadata.name}-${pod.metadata.labels.version} to append the version number to the service name. Note that when using environment variables to pass this configuration, use single quotes('') to avoid being evaluated by the shell. -    receiver-otel default A receiver for analyzing metrics data from OpenTelemetry. - -    - - enabledHandlers Enabled handlers for otel. SW_OTEL_RECEIVER_ENABLED_HANDLERS -   - - enabledOtelMetricsRules Enabled metric rules for OTLP handler. SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES -   receiver-zipkin default A receiver for Zipkin traces. - -    - - sampleRate The sample rate precision is 1/10000, should be between 0 and 10000 SW_ZIPKIN_SAMPLE_RATE 10000   - - searchableTracesTags Defines a set of span tag keys which are searchable. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_ZIPKIN_SEARCHABLE_TAG_KEYS http.method   - - enableHttpCollector Enable Http Collector. SW_ZIPKIN_HTTP_COLLECTOR_ENABLED true   - - restHost Binding IP of RESTful services. SW_RECEIVER_ZIPKIN_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_RECEIVER_ZIPKIN_REST_PORT 9411   - - restContextPath Web context path of RESTful services. SW_RECEIVER_ZIPKIN_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_RECEIVER_ZIPKIN_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_RECEIVER_ZIPKIN_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_RECEIVER_ZIPKIN_REST_QUEUE_SIZE 0   - - enableKafkaCollector Enable Kafka Collector. SW_ZIPKIN_KAFKA_COLLECTOR_ENABLED false   - - kafkaBootstrapServers Kafka ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG. SW_ZIPKIN_KAFKA_SERVERS localhost:9092   - - kafkaGroupId Kafka ConsumerConfig.GROUP_ID_CONFIG. SW_ZIPKIN_KAFKA_GROUP_ID zipkin   - - kafkaTopic Kafka Topics. SW_ZIPKIN_KAFKA_TOPIC zipkin   - - kafkaConsumerConfig Kafka consumer config, JSON format as Properties. If it contains the same key with above, would override. SW_ZIPKIN_KAFKA_CONSUMER_CONFIG \u0026ldquo;{\u0026quot;auto.offset.reset\u0026quot;:\u0026quot;earliest\u0026quot;,\u0026quot;enable.auto.commit\u0026quot;:true}\u0026rdquo;   - - kafkaConsumers The number of consumers to create. SW_ZIPKIN_KAFKA_CONSUMERS 1   - - kafkaHandlerThreadPoolSize Pool size of Kafka message handler executor. SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_SIZE CPU core * 2   - - kafkaHandlerThreadPoolQueueSize Queue size of Kafka message handler executor. SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE 10000   kafka-fetcher default Read SkyWalking\u0026rsquo;s native metrics/logs/traces through Kafka server. - -    - - bootstrapServers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_KAFKA_FETCHER_SERVERS localhost:9092   - - namespace Namespace aims to isolate multi OAP cluster when using the same Kafka cluster. If you set a namespace for Kafka fetcher, OAP will add a prefix to topic name. You should also set namespace in agent.config. The property is named plugin.kafka.namespace. SW_NAMESPACE -   - - groupId A unique string that identifies the consumer group to which this consumer belongs. - skywalking-consumer   - - createTopicIfNotExist If true, this creates Kafka topic (if it does not already exist). - true   - - partitions The number of partitions for the topic being created. SW_KAFKA_FETCHER_PARTITIONS 3   - - consumers The number of consumers to create. SW_KAFKA_FETCHER_CONSUMERS 1   - - enableNativeProtoLog Enables fetching and handling native proto log data. SW_KAFKA_FETCHER_ENABLE_NATIVE_PROTO_LOG true   - - enableNativeJsonLog Enables fetching and handling native json log data. SW_KAFKA_FETCHER_ENABLE_NATIVE_JSON_LOG true   - - replicationFactor The replication factor for each partition in the topic being created. SW_KAFKA_FETCHER_PARTITIONS_FACTOR 2   - - kafkaHandlerThreadPoolSize Pool size of Kafka message handler executor. SW_KAFKA_HANDLER_THREAD_POOL_SIZE CPU core * 2   - - kafkaHandlerThreadPoolQueueSize Queue size of Kafka message handler executor. SW_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE 10000   - - topicNameOfMeters Kafka topic name for meter system data. - skywalking-meters   - - topicNameOfMetrics Kafka topic name for JVM metrics data. - skywalking-metrics   - - topicNameOfProfiling Kafka topic name for profiling data. - skywalking-profilings   - - topicNameOfTracingSegments Kafka topic name for tracing data. - skywalking-segments   - - topicNameOfManagements Kafka topic name for service instance reporting and registration. - skywalking-managements   - - topicNameOfLogs Kafka topic name for native proto log data. - skywalking-logs   - - topicNameOfJsonLogs Kafka topic name for native json log data. - skywalking-logs-json   receiver-browser default gRPC services that accept browser performance data and error log. - - -   - - sampleRate Sampling rate for receiving trace. Precise to 1/10000. 10000 means sampling rate of 100% by default. SW_RECEIVER_BROWSER_SAMPLE_RATE 10000   query graphql - GraphQL query implementation. -    - - enableLogTestTool Enable the log testing API to test the LAL. NOTE: This API evaluates untrusted code on the OAP server. A malicious script can do significant damage (steal keys and secrets, remove files and directories, install malware, etc). As such, please enable this API only when you completely trust your users. SW_QUERY_GRAPHQL_ENABLE_LOG_TEST_TOOL false   - - maxQueryComplexity Maximum complexity allowed for the GraphQL query that can be used to abort a query if the total number of data fields queried exceeds the defined threshold. SW_QUERY_MAX_QUERY_COMPLEXITY 1000   - - enableUpdateUITemplate Allow user add,disable and update UI template. SW_ENABLE_UPDATE_UI_TEMPLATE false   - - enableOnDemandPodLog Ondemand Pod log: fetch the Pod logs on users' demand, the logs are fetched and displayed in real time, and are not persisted in any kind. This is helpful when users want to do some experiments and monitor the logs and see what\u0026rsquo;s happing inside the service. Note: if you print secrets in the logs, they are also visible to the UI, so for the sake of security, this feature is disabled by default, please set this configuration to enable the feature manually. SW_ENABLE_ON_DEMAND_POD_LOG false   query-zipkin default - This module is for Zipkin query API and support zipkin-lens UI -    - - restHost Binding IP of RESTful services. SW_QUERY_ZIPKIN_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_QUERY_ZIPKIN_REST_PORT 9412   - - restContextPath Web context path of RESTful services. SW_QUERY_ZIPKIN_REST_CONTEXT_PATH zipkin   - - restMaxThreads Maximum thread number of RESTful services. SW_QUERY_ZIPKIN_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_QUERY_ZIPKIN_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_QUERY_ZIPKIN_REST_QUEUE_SIZE 0   - - lookback Default look back for traces and autocompleteTags, 1 day in millis SW_QUERY_ZIPKIN_LOOKBACK 86400000   - - namesMaxAge The Cache-Control max-age (seconds) for serviceNames, remoteServiceNames and spanNames SW_QUERY_ZIPKIN_NAMES_MAX_AGE 300   - - uiQueryLimit Default traces query max size SW_QUERY_ZIPKIN_UI_QUERY_LIMIT 10   - - uiDefaultLookback Default look back on the UI for search traces, 15 minutes in millis SW_QUERY_ZIPKIN_UI_DEFAULT_LOOKBACK 900000   promql default - This module is for PromQL API. -    - - restHost Binding IP of RESTful services. SW_PROMQL_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_PROMQL_REST_PORT 9090   - - restContextPath Web context path of RESTful services. SW_PROMQL_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_PROMQL_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_PROMQL_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_PROMQL_REST_QUEUE_SIZE 0   alarm default - Read alarm doc for more details. -    telemetry - - Read telemetry doc for more details. -    - none - No op implementation. -    - prometheus host Binding host for Prometheus server fetching data. SW_TELEMETRY_PROMETHEUS_HOST 0.0.0.0   - - port Binding port for Prometheus server fetching data. SW_TELEMETRY_PROMETHEUS_PORT 1234   configuration - - Read dynamic configuration doc for more details. -    - grpc host DCS server binding hostname. SW_DCS_SERVER_HOST -   - - port DCS server binding port. SW_DCS_SERVER_PORT 80   - - clusterName Cluster name when reading the latest configuration from DSC server. SW_DCS_CLUSTER_NAME SkyWalking   - - period The period of reading data from DSC server by the OAP (in seconds). SW_DCS_PERIOD 20   - apollo apolloMeta apollo.meta in Apollo. SW_CONFIG_APOLLO http://localhost:8080   - - apolloCluster apollo.cluster in Apollo. SW_CONFIG_APOLLO_CLUSTER default   - - apolloEnv env in Apollo. SW_CONFIG_APOLLO_ENV -   - - appId app.id in Apollo. SW_CONFIG_APOLLO_APP_ID skywalking   - - period The period of data sync (in seconds). SW_CONFIG_APOLLO_PERIOD 60   - zookeeper namespace The namespace (represented by root path) that isolates the configurations in the Zookeeper. SW_CONFIG_ZK_NAMESPACE /, root path   - - hostPort Hosts and ports of Zookeeper Cluster. SW_CONFIG_ZK_HOST_PORT localhost:2181   - - baseSleepTimeMs The period of Zookeeper client between two retries (in milliseconds). SW_CONFIG_ZK_BASE_SLEEP_TIME_MS 1000   - - maxRetries The maximum retry time. SW_CONFIG_ZK_MAX_RETRIES 3   - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - etcd endpoints Hosts and ports for etcd cluster (separated by commas if multiple). SW_CONFIG_ETCD_ENDPOINTS http://localhost:2379   - - namespace Namespace for SkyWalking cluster. SW_CONFIG_ETCD_NAMESPACE /skywalking   - - authentication Indicates whether there is authentication. SW_CONFIG_ETCD_AUTHENTICATION false   - - user Etcd auth username. SW_CONFIG_ETCD_USER    - - password Etcd auth password. SW_CONFIG_ETCD_PASSWORD    - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - consul hostPort Hosts and ports for Consul cluster. SW_CONFIG_CONSUL_HOST_AND_PORTS localhost:8500   - - aclToken ACL Token of Consul. Empty string means without ACL token. SW_CONFIG_CONSUL_ACL_TOKEN -   - - period The period of data sync (in seconds). SW_CONFIG_CONSUL_PERIOD 60   - k8s-configmap namespace Deployment namespace of the config map. SW_CLUSTER_K8S_NAMESPACE default   - - labelSelector Labels for locating configmap. SW_CLUSTER_K8S_LABEL app=collector,release=skywalking   - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - nacos serverAddr Nacos Server Host. SW_CONFIG_NACOS_SERVER_ADDR 127.0.0.1   - - port Nacos Server Port. SW_CONFIG_NACOS_SERVER_PORT 8848   - - group Nacos Configuration namespace. SW_CONFIG_NACOS_SERVER_NAMESPACE -   - - period The period of data sync (in seconds). SW_CONFIG_CONFIG_NACOS_PERIOD 60   - - username Nacos Auth username. SW_CONFIG_NACOS_USERNAME -   - - password Nacos Auth password. SW_CONFIG_NACOS_PASSWORD -   - - accessKey Nacos Auth accessKey. SW_CONFIG_NACOS_ACCESSKEY -   - - secretKey Nacos Auth secretKey. SW_CONFIG_NACOS_SECRETKEY -   exporter default enableGRPCMetrics Enable gRPC metrics exporter. SW_EXPORTER_ENABLE_GRPC_METRICS false   - - gRPCTargetHost The host of target gRPC server for receiving export data SW_EXPORTER_GRPC_HOST 127.0.0.1   - - gRPCTargetPort The port of target gRPC server for receiving export data. SW_EXPORTER_GRPC_PORT 9870   - - enableKafkaTrace Enable Kafka trace exporter. SW_EXPORTER_ENABLE_KAFKA_TRACE false   - - enableKafkaLog Enable Kafka log exporter. SW_EXPORTER_ENABLE_KAFKA_LOG false   - - kafkaBootstrapServers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_EXPORTER_KAFKA_SERVERS localhost:9092   - - kafkaProducerConfig Kafka producer config, JSON format as Properties. SW_EXPORTER_KAFKA_PRODUCER_CONFIG -   - - kafkaTopicTrace Kafka topic name for trace. SW_EXPORTER_KAFKA_TOPIC_TRACE skywalking-export-trace   - - kafkaTopicLog Kafka topic name for log. SW_EXPORTER_KAFKA_TOPIC_LOG skywalking-export-log   - - exportErrorStatusTraceOnly Export error status trace segments through the Kafka channel. SW_EXPORTER_KAFKA_TRACE_FILTER_ERROR false   health-checker default checkIntervalSeconds The period of checking OAP internal health status (in seconds). SW_HEALTH_CHECKER_INTERVAL_SECONDS 5   configuration-discovery default disableMessageDigest If true, agent receives the latest configuration every time, even without making any changes. By default, OAP uses the SHA512 message digest mechanism to detect changes in configuration. SW_DISABLE_MESSAGE_DIGEST false   receiver-event default gRPC services that handle events data. - -    aws-firehose-receiver default host Binding IP of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_HOST 0.0.0.0   - - port Binding port of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_PORT 12801   - - contextPath Context path of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_CONTEXT_PATH /   - - maxThreads Max Thtread number of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_MAX_THREADS 200   - - idleTimeOut Idle timeout of a connection for keep-alive. SW_RECEIVER_AWS_FIREHOSE_HTTP_IDLE_TIME_OUT 30000   - - acceptQueueSize Maximum allowed number of open connections SW_RECEIVER_AWS_FIREHOSE_HTTP_ACCEPT_QUEUE_SIZE 0   - - maxRequestHeaderSize Maximum length of all headers in an HTTP/1 response SW_RECEIVER_AWS_FIREHOSE_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - firehoseAccessKey The AccessKey of AWS firhose SW_RECEIVER_AWS_FIREHOSE_ACCESS_KEY    - - enableTLS Indicate if enable HTTPS for the server SW_RECEIVER_AWS_FIREHOSE_HTTP_ENABLE_TLS false   - - tlsKeyPath TLS key path SW_RECEIVER_AWS_FIREHOSE_HTTP_TLS_KEY_PATH    - - tlsCertChainPath TLS certificate chain path SW_RECEIVER_AWS_FIREHOSE_HTTP_TLS_CERT_CHAIN_PATH    ai-pipeline default       - - uriRecognitionServerAddr The address of the URI recognition server. SW_AI_PIPELINE_URI_RECOGNITION_SERVER_ADDR -   - - uriRecognitionServerPort The port of the URI recognition server. SW_AI_PIPELINE_URI_RECOGNITION_SERVER_PORT 17128    Note ¹ System Environment Variable name could be declared and changed in application.yml. The names listed here are simply provided in the default application.yml file.\n","excerpt":"Configuration Vocabulary The Configuration Vocabulary lists all available configurations provided by …","ref":"/docs/main/v9.5.0/en/setup/backend/configuration-vocabulary/","title":"Configuration Vocabulary"},{"body":"Configuration Vocabulary The Configuration Vocabulary lists all available configurations provided by application.yml.\n   Module Provider Settings Value(s) and Explanation System Environment Variable¹ Default     core default role Option values: Mixed/Receiver/Aggregator. Receiver mode OAP opens the service to the agents, then analyzes and aggregates the results, and forwards the results for distributed aggregation. Aggregator mode OAP receives data from Mixer and Receiver role OAP nodes, and performs 2nd level aggregation. Mixer means both Receiver and Aggregator. SW_CORE_ROLE Mixed   - - restHost Binding IP of RESTful services. Services include GraphQL query and HTTP data report. SW_CORE_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_CORE_REST_PORT 12800   - - restContextPath Web context path of RESTful services. SW_CORE_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_CORE_REST_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_CORE_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize ServerSocketChannel Backlog of RESTful services. SW_CORE_REST_QUEUE_SIZE 0   - - httpMaxRequestHeaderSize Maximum request header size accepted. SW_CORE_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - gRPCHost Binding IP of gRPC services, including gRPC data report and internal communication among OAP nodes. SW_CORE_GRPC_HOST 0.0.0.0   - - gRPCPort Binding port of gRPC services. SW_CORE_GRPC_PORT 11800   - - gRPCSslEnabled Activates SSL for gRPC services. SW_CORE_GRPC_SSL_ENABLED false   - - gRPCSslKeyPath File path of gRPC SSL key. SW_CORE_GRPC_SSL_KEY_PATH -   - - gRPCSslCertChainPath File path of gRPC SSL cert chain. SW_CORE_GRPC_SSL_CERT_CHAIN_PATH -   - - gRPCSslTrustedCAPath File path of gRPC trusted CA. SW_CORE_GRPC_SSL_TRUSTED_CA_PATH -   - - downsampling Activated level of down sampling aggregation.  Hour,Day   - - enableDataKeeperExecutor Controller of TTL scheduler. Once disabled, TTL wouldn\u0026rsquo;t work. SW_CORE_ENABLE_DATA_KEEPER_EXECUTOR true   - - dataKeeperExecutePeriod Execution period of TTL scheduler (in minutes). Execution doesn\u0026rsquo;t mean deleting data. The storage provider (e.g. ElasticSearch storage) could override this. SW_CORE_DATA_KEEPER_EXECUTE_PERIOD 5   - - recordDataTTL The lifecycle of record data (in days). Record data includes traces, top N sample records, and logs. Minimum value is 2. SW_CORE_RECORD_DATA_TTL 3   - - metricsDataTTL The lifecycle of metrics data (in days), including metadata. We recommend setting metricsDataTTL \u0026gt;= recordDataTTL. Minimum value is 2. SW_CORE_METRICS_DATA_TTL 7   - - l1FlushPeriod The period of L1 aggregation flush to L2 aggregation (in milliseconds). SW_CORE_L1_AGGREGATION_FLUSH_PERIOD 500   - - storageSessionTimeout The threshold of session time (in milliseconds). Default value is 70000. SW_CORE_STORAGE_SESSION_TIMEOUT 70000   - - persistentPeriod The period of doing data persistence. Unit is second.Default value is 25s SW_CORE_PERSISTENT_PERIOD 25   - - topNReportPeriod The execution period (in minutes) of top N sampler, which saves sampled data into the storage. SW_CORE_TOPN_REPORT_PERIOD 10   - - activeExtraModelColumns Appends entity names (e.g. service names) into metrics storage entities. SW_CORE_ACTIVE_EXTRA_MODEL_COLUMNS false   - - serviceNameMaxLength Maximum length limit of service names. SW_SERVICE_NAME_MAX_LENGTH 70   - - instanceNameMaxLength Maximum length limit of service instance names. The maximum length of service + instance names should be less than 200. SW_INSTANCE_NAME_MAX_LENGTH 70   - - endpointNameMaxLength Maximum length limit of endpoint names. The maximum length of service + endpoint names should be less than 240. SW_ENDPOINT_NAME_MAX_LENGTH 150   - - searchableTracesTags Defines a set of span tag keys which are searchable through GraphQL. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_SEARCHABLE_TAG_KEYS http.method,http.status_code,rpc.status_code,db.type,db.instance,mq.queue,mq.topic,mq.broker   - - searchableLogsTags Defines a set of log tag keys which are searchable through GraphQL. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_SEARCHABLE_LOGS_TAG_KEYS level   - - searchableAlarmTags Defines a set of alarm tag keys which are searchable through GraphQL. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_SEARCHABLE_ALARM_TAG_KEYS level   - - autocompleteTagKeysQueryMaxSize The max size of tags keys for autocomplete select. SW_AUTOCOMPLETE_TAG_KEYS_QUERY_MAX_SIZE 100   - - autocompleteTagValuesQueryMaxSize The max size of tags values for autocomplete select. SW_AUTOCOMPLETE_TAG_VALUES_QUERY_MAX_SIZE 100   - - gRPCThreadPoolSize Pool size of gRPC server. SW_CORE_GRPC_THREAD_POOL_SIZE CPU core * 4   - - gRPCThreadPoolQueueSize Queue size of gRPC server. SW_CORE_GRPC_POOL_QUEUE_SIZE 10000   - - maxConcurrentCallsPerConnection The maximum number of concurrent calls permitted for each incoming connection. Defaults to no limit. SW_CORE_GRPC_MAX_CONCURRENT_CALL -   - - maxMessageSize Sets the maximum message size allowed to be received on the server. Empty means 4 MiB. SW_CORE_GRPC_MAX_MESSAGE_SIZE 4M(based on Netty)   - - remoteTimeout Timeout for cluster internal communication (in seconds). - 20   - - maxSizeOfNetworkAddressAlias The maximum size of network address detected in the system being monitored. - 1_000_000   - - maxPageSizeOfQueryProfileSnapshot The maximum size for snapshot analysis in an OAP query. - 500   - - maxSizeOfAnalyzeProfileSnapshot The maximum number of snapshots analyzed by the OAP. - 12000   - - prepareThreads The number of threads used to prepare metrics data to the storage. SW_CORE_PREPARE_THREADS 2   - - enableEndpointNameGroupingByOpenapi Automatically groups endpoints by the given OpenAPI definitions. SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI true   - - maxDurationOfQueryEBPFProfilingData The maximum duration(in second) of query the eBPF profiling data from database. - 30   - - maxThreadCountOfQueryEBPFProfilingData The maximum thread count of query the eBPF profiling data from database. - System CPU core size   - - uiMenuRefreshInterval The period(in seconds) of refreshing the status of all UI menu items. - 20   - - serviceCacheRefreshInterval The period(in seconds) of refreshing the service cache. SW_SERVICE_CACHE_REFRESH_INTERVAL 10   cluster standalone - Standalone is not suitable for running on a single node running. No configuration available. - -   - zookeeper namespace The namespace, represented by root path, isolates the configurations in Zookeeper. SW_NAMESPACE /, root path   - - hostPort Hosts and ports of Zookeeper Cluster. SW_CLUSTER_ZK_HOST_PORT localhost:2181   - - baseSleepTimeMs The period of Zookeeper client between two retries (in milliseconds). SW_CLUSTER_ZK_SLEEP_TIME 1000   - - maxRetries The maximum retry time. SW_CLUSTER_ZK_MAX_RETRIES 3   - - enableACL Opens ACL using schema and expression. SW_ZK_ENABLE_ACL false   - - schema Schema for the authorization. SW_ZK_SCHEMA digest   - - expression Expression for the authorization. SW_ZK_EXPRESSION skywalking:skywalking   - - internalComHost The hostname registered in Zookeeper for the internal communication of OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Zookeeper for the internal communication of OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - kubernetes namespace Namespace deployed by SkyWalking in k8s. SW_CLUSTER_K8S_NAMESPACE default   - - labelSelector Labels used for filtering OAP deployment in k8s. SW_CLUSTER_K8S_LABEL app=collector,release=skywalking   - - uidEnvName Environment variable name for reading uid. SW_CLUSTER_K8S_UID SKYWALKING_COLLECTOR_UID   - consul serviceName Service name for SkyWalking cluster. SW_SERVICE_NAME SkyWalking_OAP_Cluster   - - hostPort Hosts and ports for Consul cluster. SW_CLUSTER_CONSUL_HOST_PORT localhost:8500   - - aclToken ACL Token of Consul. Empty string means without ALC token. SW_CLUSTER_CONSUL_ACLTOKEN -   - - internalComHost The hostname registered in Consul for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Consul for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - etcd serviceName Service name for SkyWalking cluster. SW_CLUSTER_ETCD_SERVICE_NAME SkyWalking_OAP_Cluster   - - endpoints Hosts and ports for etcd cluster. SW_CLUSTER_ETCD_ENDPOINTS localhost:2379   - - namespace Namespace for SkyWalking cluster. SW_CLUSTER_ETCD_NAMESPACE /skywalking   - - authentication Indicates whether there is authentication. SW_CLUSTER_ETCD_AUTHENTICATION false   - - user Etcd auth username. SW_CLUSTER_ETCD_USER    - - password Etcd auth password. SW_CLUSTER_ETCD_PASSWORD    - - internalComHost The hostname registered in etcd for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in etcd for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - Nacos serviceName Service name for SkyWalking cluster. SW_SERVICE_NAME SkyWalking_OAP_Cluster   - - hostPort Hosts and ports for Nacos cluster. SW_CLUSTER_NACOS_HOST_PORT localhost:8848   - - namespace Namespace used by SkyWalking node coordination. SW_CLUSTER_NACOS_NAMESPACE public   - - internalComHost The hostname registered in Nacos for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Nacos for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - - username Nacos Auth username. SW_CLUSTER_NACOS_USERNAME -   - - password Nacos Auth password. SW_CLUSTER_NACOS_PASSWORD -   - - accessKey Nacos Auth accessKey. SW_CLUSTER_NACOS_ACCESSKEY -   - - secretKey Nacos Auth secretKey. SW_CLUSTER_NACOS_SECRETKEY -   - - syncPeriodHttpUriRecognitionPattern The period of HTTP URI recognition pattern synchronization (in seconds). SW_CORE_SYNC_PERIOD_HTTP_URI_RECOGNITION_PATTERN 10   - - trainingPeriodHttpUriRecognitionPattern The period of HTTP URI recognition pattern training (in seconds). SW_CORE_TRAINING_PERIOD_HTTP_URI_RECOGNITION_PATTERN 60   - - maxHttpUrisNumberPerService The maximum number of HTTP URIs per service. SW_MAX_HTTP_URIS_NUMBER_PER_SERVICE 3000   storage elasticsearch - ElasticSearch (and OpenSearch) storage implementation. - -   - - namespace Prefix of indexes created and used by SkyWalking. SW_NAMESPACE -   - - clusterNodes ElasticSearch cluster nodes for client connection. SW_STORAGE_ES_CLUSTER_NODES localhost   - - protocol HTTP or HTTPs. SW_STORAGE_ES_HTTP_PROTOCOL HTTP   - - connectTimeout Connect timeout of ElasticSearch client (in milliseconds). SW_STORAGE_ES_CONNECT_TIMEOUT 3000   - - socketTimeout Socket timeout of ElasticSearch client (in milliseconds). SW_STORAGE_ES_SOCKET_TIMEOUT 30000   - - responseTimeout Response timeout of ElasticSearch client (in milliseconds), 0 disables the timeout. SW_STORAGE_ES_RESPONSE_TIMEOUT 1500   - - numHttpClientThread The number of threads for the underlying HTTP client to perform socket I/O. If the value is \u0026lt;= 0, the number of available processors will be used. SW_STORAGE_ES_NUM_HTTP_CLIENT_THREAD 0   - - user Username of ElasticSearch cluster. SW_ES_USER -   - - password Password of ElasticSearch cluster. SW_ES_PASSWORD -   - - trustStorePath Trust JKS file path. Only works when username and password are enabled. SW_STORAGE_ES_SSL_JKS_PATH -   - - trustStorePass Trust JKS file password. Only works when username and password are enabled. SW_STORAGE_ES_SSL_JKS_PASS -   - - secretsManagementFile Secrets management file in the properties format, including username and password, which are managed by a 3rd party tool. Capable of being updated them at runtime. SW_ES_SECRETS_MANAGEMENT_FILE -   - - dayStep Represents the number of days in the one-minute/hour/day index. SW_STORAGE_DAY_STEP 1   - - indexShardsNumber Shard number of new indexes. SW_STORAGE_ES_INDEX_SHARDS_NUMBER 1   - - indexReplicasNumber Replicas number of new indexes. SW_STORAGE_ES_INDEX_REPLICAS_NUMBER 0   - - specificIndexSettings Specify the settings for each index individually. If configured, this setting has the highest priority and overrides the generic settings. SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS -   - - superDatasetDayStep Represents the number of days in the super size dataset record index. Default value is the same as dayStep when the value is less than 0. SW_STORAGE_ES_SUPER_DATASET_DAY_STEP -1   - - superDatasetIndexShardsFactor Super dataset is defined in the code (e.g. trace segments). This factor provides more shards for the super dataset: shards number = indexShardsNumber * superDatasetIndexShardsFactor. This factor also affects Zipkin and Jaeger traces. SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR 5   - - superDatasetIndexReplicasNumber Represents the replicas number in the super size dataset record index. SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER 0   - - indexTemplateOrder The order of index template. SW_STORAGE_ES_INDEX_TEMPLATE_ORDER 0   - - bulkActions Async bulk size of the record data batch execution. SW_STORAGE_ES_BULK_ACTIONS 5000   - - batchOfBytes A threshold to control the max body size of ElasticSearch Bulk flush. SW_STORAGE_ES_BATCH_OF_BYTES 10485760 (10m)   - - flushInterval Period of flush (in seconds). Does not matter whether bulkActions is reached or not. SW_STORAGE_ES_FLUSH_INTERVAL 5   - - concurrentRequests The number of concurrent requests allowed to be executed. SW_STORAGE_ES_CONCURRENT_REQUESTS 2   - - resultWindowMaxSize The maximum size of dataset when the OAP loads cache, such as network aliases. SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE 10000   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_ES_QUERY_MAX_SIZE 10000   - - scrollingBatchSize The batch size of metadata per iteration when metadataQueryMaxSize or resultWindowMaxSize is too large to be retrieved in a single query. SW_STORAGE_ES_SCROLLING_BATCH_SIZE 5000   - - segmentQueryMaxSize The maximum size of trace segments per query. SW_STORAGE_ES_QUERY_SEGMENT_SIZE 200   - - profileTaskQueryMaxSize The maximum size of profile task per query. SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE 200   - - profileDataQueryScrollBatchSize The batch size of query profiling data. SW_STORAGE_ES_QUERY_PROFILE_DATA_BATCH_SIZE 100   - - advanced All settings of ElasticSearch index creation. The value should be in JSON format. SW_STORAGE_ES_ADVANCED -   - - logicSharding Shard metrics and records indices into multi-physical indices, one index template per metric/meter aggregation function or record. SW_STORAGE_ES_LOGIC_SHARDING false   - h2 - H2 storage is designed for demonstration and running in short term (i.e. 1-2 hours) only. - -   - - url H2 connection URL. Defaults to H2 memory mode. SW_STORAGE_H2_URL jdbc:h2:mem:skywalking-oap-db   - - user Username of H2 database. SW_STORAGE_H2_USER sa   - - password Password of H2 database. - -   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_H2_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 100   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 1   - mysql - MySQL Storage. The MySQL JDBC Driver is not in the dist. Please copy it into the oap-lib folder manually. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - postgresql - PostgreSQL storage. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - banyandb - BanyanDB storage. - -   - - host Host of the BanyanDB. SW_STORAGE_BANYANDB_HOST 127.0.0.1   - - port Port of the BanyanDB. SW_STORAGE_BANYANDB_PORT 17912   - - maxBulkSize The maximum size of write entities in a single batch write call. SW_STORAGE_BANYANDB_MAX_BULK_SIZE 5000   - - flushInterval Period of flush interval. In the timeunit of seconds. SW_STORAGE_BANYANDB_FLUSH_INTERVAL 15   - - metricsShardsNumber Shards Number for measure/metrics. SW_STORAGE_BANYANDB_METRICS_SHARDS_NUMBER 1   - - recordShardsNumber Shards Number for a normal record. SW_STORAGE_BANYANDB_RECORD_SHARDS_NUMBER 1   - - superDatasetShardsFactor Shards Factor for a super dataset record, i.e. Shard number of a super dataset is recordShardsNumber*superDatasetShardsFactor. SW_STORAGE_BANYANDB_SUPERDATASET_SHARDS_FACTOR 2   - - concurrentWriteThreads Concurrent consumer threads for batch writing. SW_STORAGE_BANYANDB_CONCURRENT_WRITE_THREADS 15   - - profileTaskQueryMaxSize Max size of ProfileTask to be fetched. SW_STORAGE_BANYANDB_PROFILE_TASK_QUERY_MAX_SIZE 200   agent-analyzer default Agent Analyzer. SW_AGENT_ANALYZER default    - - traceSamplingPolicySettingsFile The sampling policy including sampling rate and the threshold of trace segment latency can be configured by the traceSamplingPolicySettingsFile file. SW_TRACE_SAMPLING_POLICY_SETTINGS_FILE trace-sampling-policy-settings.yml   - - slowDBAccessThreshold The slow database access threshold (in milliseconds). SW_SLOW_DB_THRESHOLD default:200,mongodb:100   - - forceSampleErrorSegment When sampling mechanism is activated, this config samples the error status segment and ignores the sampling rate. SW_FORCE_SAMPLE_ERROR_SEGMENT true   - - segmentStatusAnalysisStrategy Determines the final segment status from span status. Available values are FROM_SPAN_STATUS , FROM_ENTRY_SPAN, and FROM_FIRST_SPAN. FROM_SPAN_STATUS indicates that the segment status would be error if any span has an error status. FROM_ENTRY_SPAN means that the segment status would only be determined by the status of entry spans. FROM_FIRST_SPAN means that the segment status would only be determined by the status of the first span. SW_SEGMENT_STATUS_ANALYSIS_STRATEGY FROM_SPAN_STATUS   - - noUpstreamRealAddressAgents Exit spans with the component in the list would not generate client-side instance relation metrics, since some tracing plugins (e.g. Nginx-LUA and Envoy) can\u0026rsquo;t collect the real peer IP address. SW_NO_UPSTREAM_REAL_ADDRESS 6000,9000   - - meterAnalyzerActiveFiles Indicates which files could be instrumented and analyzed. Multiple files are split by \u0026ldquo;,\u0026rdquo;. SW_METER_ANALYZER_ACTIVE_FILES    - - slowCacheWriteThreshold The threshold of slow command which is used for writing operation (in milliseconds). SW_SLOW_CACHE_WRITE_THRESHOLD default:20,redis:10   - - slowCacheReadThreshold The threshold of slow command which is used for reading (getting) operation (in milliseconds). SW_SLOW_CACHE_READ_THRESHOLD default:20,redis:10   receiver-sharing-server default Sharing server provides new gRPC and restful servers for data collection. Ana designates that servers in the core module are to be used for internal communication only. - -    - - restHost Binding IP of RESTful services. Services include GraphQL query and HTTP data report. SW_RECEIVER_SHARING_REST_HOST -   - - restPort Binding port of RESTful services. SW_RECEIVER_SHARING_REST_PORT -   - - restContextPath Web context path of RESTful services. SW_RECEIVER_SHARING_REST_CONTEXT_PATH -   - - restMaxThreads Maximum thread number of RESTful services. SW_RECEIVER_SHARING_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_RECEIVER_SHARING_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize ServerSocketChannel backlog of RESTful services. SW_RECEIVER_SHARING_REST_QUEUE_SIZE 0   - - httpMaxRequestHeaderSize Maximum request header size accepted. SW_RECEIVER_SHARING_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - gRPCHost Binding IP of gRPC services. Services include gRPC data report and internal communication among OAP nodes. SW_RECEIVER_GRPC_HOST 0.0.0.0. Not Activated   - - gRPCPort Binding port of gRPC services. SW_RECEIVER_GRPC_PORT Not Activated   - - gRPCThreadPoolSize Pool size of gRPC server. SW_RECEIVER_GRPC_THREAD_POOL_SIZE CPU core * 4   - - gRPCThreadPoolQueueSize Queue size of gRPC server. SW_RECEIVER_GRPC_POOL_QUEUE_SIZE 10000   - - gRPCSslEnabled Activates SSL for gRPC services. SW_RECEIVER_GRPC_SSL_ENABLED false   - - gRPCSslKeyPath File path of gRPC SSL key. SW_RECEIVER_GRPC_SSL_KEY_PATH -   - - gRPCSslCertChainPath File path of gRPC SSL cert chain. SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH -   - - maxConcurrentCallsPerConnection The maximum number of concurrent calls permitted for each incoming connection. Defaults to no limit. SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL -   - - authentication The token text for authentication. Works for gRPC connection only. Once this is set, the client is required to use the same token. SW_AUTHENTICATION -   log-analyzer default Log Analyzer. SW_LOG_ANALYZER default    - - lalFiles The LAL configuration file names (without file extension) to be activated. Read LAL for more details. SW_LOG_LAL_FILES default   - - malFiles The MAL configuration file names (without file extension) to be activated. Read LAL for more details. SW_LOG_MAL_FILES \u0026quot;\u0026quot;   event-analyzer default Event Analyzer. SW_EVENT_ANALYZER default    receiver-register default gRPC and HTTPRestful services that provide service, service instance and endpoint register. - -    receiver-trace default gRPC and HTTPRestful services that accept SkyWalking format traces. - -    receiver-jvm default gRPC services that accept JVM metrics data. - -    receiver-clr default gRPC services that accept .Net CLR metrics data. - -    receiver-profile default gRPC services that accept profile task status and snapshot reporter. - -    receiver-zabbix default TCP receiver accepts Zabbix format metrics. - -    - - port Exported TCP port. Zabbix agent could connect and transport data. SW_RECEIVER_ZABBIX_PORT 10051   - - host Binds to host. SW_RECEIVER_ZABBIX_HOST 0.0.0.0   - - activeFiles Enables config when agent request is received. SW_RECEIVER_ZABBIX_ACTIVE_FILES agent   service-mesh default gRPC services that accept data from inbound mesh probes. - -    envoy-metric default Envoy metrics_service and ALS(access log service) are supported by this receiver. The OAL script supports all GAUGE type metrics. - -    - - acceptMetricsService Starts Envoy Metrics Service analysis. SW_ENVOY_METRIC_SERVICE true   - - alsHTTPAnalysis Starts Envoy HTTP Access Log Service analysis. Value = k8s-mesh means starting the analysis. SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS -   - - alsTCPAnalysis Starts Envoy TCP Access Log Service analysis. Value = k8s-mesh means starting the analysis. SW_ENVOY_METRIC_ALS_TCP_ANALYSIS -   - - k8sServiceNameRule k8sServiceNameRule allows you to customize the service name in ALS via Kubernetes metadata. The available variables are pod and service. E.g. you can use ${service.metadata.name}-${pod.metadata.labels.version} to append the version number to the service name. Note that when using environment variables to pass this configuration, use single quotes('') to avoid being evaluated by the shell. K8S_SERVICE_NAME_RULE ${pod.metadata.labels.(service.istio.io/canonical-name)}   - - istioServiceNameRule istioServiceNameRule allows you to customize the service name in ALS via Kubernetes metadata. The available variables are serviceEntry. E.g. you can use ${serviceEntry.metadata.name}-${serviceEntry.metadata.labels.version} to append the version number to the service name. Note that when using environment variables to pass this configuration, use single quotes('') to avoid being evaluated by the shell. ISTIO_SERVICE_NAME_RULE ${serviceEntry.metadata.name}   receiver-otel default A receiver for analyzing metrics data from OpenTelemetry. - -    - - enabledHandlers Enabled handlers for otel. SW_OTEL_RECEIVER_ENABLED_HANDLERS -   - - enabledOtelMetricsRules Enabled metric rules for OTLP handler. SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES -   receiver-zipkin default A receiver for Zipkin traces. - -    - - sampleRate The sample rate precision is 1/10000, should be between 0 and 10000 SW_ZIPKIN_SAMPLE_RATE 10000   - - searchableTracesTags Defines a set of span tag keys which are searchable. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_ZIPKIN_SEARCHABLE_TAG_KEYS http.method   - - enableHttpCollector Enable Http Collector. SW_ZIPKIN_HTTP_COLLECTOR_ENABLED true   - - restHost Binding IP of RESTful services. SW_RECEIVER_ZIPKIN_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_RECEIVER_ZIPKIN_REST_PORT 9411   - - restContextPath Web context path of RESTful services. SW_RECEIVER_ZIPKIN_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_RECEIVER_ZIPKIN_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_RECEIVER_ZIPKIN_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_RECEIVER_ZIPKIN_REST_QUEUE_SIZE 0   - - enableKafkaCollector Enable Kafka Collector. SW_ZIPKIN_KAFKA_COLLECTOR_ENABLED false   - - kafkaBootstrapServers Kafka ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG. SW_ZIPKIN_KAFKA_SERVERS localhost:9092   - - kafkaGroupId Kafka ConsumerConfig.GROUP_ID_CONFIG. SW_ZIPKIN_KAFKA_GROUP_ID zipkin   - - kafkaTopic Kafka Topics. SW_ZIPKIN_KAFKA_TOPIC zipkin   - - kafkaConsumerConfig Kafka consumer config, JSON format as Properties. If it contains the same key with above, would override. SW_ZIPKIN_KAFKA_CONSUMER_CONFIG \u0026ldquo;{\u0026quot;auto.offset.reset\u0026quot;:\u0026quot;earliest\u0026quot;,\u0026quot;enable.auto.commit\u0026quot;:true}\u0026rdquo;   - - kafkaConsumers The number of consumers to create. SW_ZIPKIN_KAFKA_CONSUMERS 1   - - kafkaHandlerThreadPoolSize Pool size of Kafka message handler executor. SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_SIZE CPU core * 2   - - kafkaHandlerThreadPoolQueueSize Queue size of Kafka message handler executor. SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE 10000   kafka-fetcher default Read SkyWalking\u0026rsquo;s native metrics/logs/traces through Kafka server. - -    - - bootstrapServers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_KAFKA_FETCHER_SERVERS localhost:9092   - - namespace Namespace aims to isolate multi OAP cluster when using the same Kafka cluster. If you set a namespace for Kafka fetcher, OAP will add a prefix to topic name. You should also set namespace in agent.config. The property is named plugin.kafka.namespace. SW_NAMESPACE -   - - groupId A unique string that identifies the consumer group to which this consumer belongs. - skywalking-consumer   - - createTopicIfNotExist If true, this creates Kafka topic (if it does not already exist). - true   - - partitions The number of partitions for the topic being created. SW_KAFKA_FETCHER_PARTITIONS 3   - - consumers The number of consumers to create. SW_KAFKA_FETCHER_CONSUMERS 1   - - enableNativeProtoLog Enables fetching and handling native proto log data. SW_KAFKA_FETCHER_ENABLE_NATIVE_PROTO_LOG true   - - enableNativeJsonLog Enables fetching and handling native json log data. SW_KAFKA_FETCHER_ENABLE_NATIVE_JSON_LOG true   - - replicationFactor The replication factor for each partition in the topic being created. SW_KAFKA_FETCHER_PARTITIONS_FACTOR 2   - - kafkaHandlerThreadPoolSize Pool size of Kafka message handler executor. SW_KAFKA_HANDLER_THREAD_POOL_SIZE CPU core * 2   - - kafkaHandlerThreadPoolQueueSize Queue size of Kafka message handler executor. SW_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE 10000   - - topicNameOfMeters Kafka topic name for meter system data. - skywalking-meters   - - topicNameOfMetrics Kafka topic name for JVM metrics data. - skywalking-metrics   - - topicNameOfProfiling Kafka topic name for profiling data. - skywalking-profilings   - - topicNameOfTracingSegments Kafka topic name for tracing data. - skywalking-segments   - - topicNameOfManagements Kafka topic name for service instance reporting and registration. - skywalking-managements   - - topicNameOfLogs Kafka topic name for native proto log data. - skywalking-logs   - - topicNameOfJsonLogs Kafka topic name for native json log data. - skywalking-logs-json   receiver-browser default gRPC services that accept browser performance data and error log. - - -   - - sampleRate Sampling rate for receiving trace. Precise to 1/10000. 10000 means sampling rate of 100% by default. SW_RECEIVER_BROWSER_SAMPLE_RATE 10000   query graphql - GraphQL query implementation. -    - - enableLogTestTool Enable the log testing API to test the LAL. NOTE: This API evaluates untrusted code on the OAP server. A malicious script can do significant damage (steal keys and secrets, remove files and directories, install malware, etc). As such, please enable this API only when you completely trust your users. SW_QUERY_GRAPHQL_ENABLE_LOG_TEST_TOOL false   - - maxQueryComplexity Maximum complexity allowed for the GraphQL query that can be used to abort a query if the total number of data fields queried exceeds the defined threshold. SW_QUERY_MAX_QUERY_COMPLEXITY 3000   - - enableUpdateUITemplate Allow user add,disable and update UI template. SW_ENABLE_UPDATE_UI_TEMPLATE false   - - enableOnDemandPodLog Ondemand Pod log: fetch the Pod logs on users' demand, the logs are fetched and displayed in real time, and are not persisted in any kind. This is helpful when users want to do some experiments and monitor the logs and see what\u0026rsquo;s happing inside the service. Note: if you print secrets in the logs, they are also visible to the UI, so for the sake of security, this feature is disabled by default, please set this configuration to enable the feature manually. SW_ENABLE_ON_DEMAND_POD_LOG false   query-zipkin default - This module is for Zipkin query API and support zipkin-lens UI -    - - restHost Binding IP of RESTful services. SW_QUERY_ZIPKIN_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_QUERY_ZIPKIN_REST_PORT 9412   - - restContextPath Web context path of RESTful services. SW_QUERY_ZIPKIN_REST_CONTEXT_PATH zipkin   - - restMaxThreads Maximum thread number of RESTful services. SW_QUERY_ZIPKIN_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_QUERY_ZIPKIN_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_QUERY_ZIPKIN_REST_QUEUE_SIZE 0   - - lookback Default look back for traces and autocompleteTags, 1 day in millis SW_QUERY_ZIPKIN_LOOKBACK 86400000   - - namesMaxAge The Cache-Control max-age (seconds) for serviceNames, remoteServiceNames and spanNames SW_QUERY_ZIPKIN_NAMES_MAX_AGE 300   - - uiQueryLimit Default traces query max size SW_QUERY_ZIPKIN_UI_QUERY_LIMIT 10   - - uiDefaultLookback Default look back on the UI for search traces, 15 minutes in millis SW_QUERY_ZIPKIN_UI_DEFAULT_LOOKBACK 900000   promql default - This module is for PromQL API. -    - - restHost Binding IP of RESTful services. SW_PROMQL_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_PROMQL_REST_PORT 9090   - - restContextPath Web context path of RESTful services. SW_PROMQL_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_PROMQL_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_PROMQL_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_PROMQL_REST_QUEUE_SIZE 0   alarm default - Read alarm doc for more details. -    telemetry - - Read telemetry doc for more details. -    - none - No op implementation. -    - prometheus host Binding host for Prometheus server fetching data. SW_TELEMETRY_PROMETHEUS_HOST 0.0.0.0   - - port Binding port for Prometheus server fetching data. SW_TELEMETRY_PROMETHEUS_PORT 1234   configuration - - Read dynamic configuration doc for more details. -    - grpc host DCS server binding hostname. SW_DCS_SERVER_HOST -   - - port DCS server binding port. SW_DCS_SERVER_PORT 80   - - clusterName Cluster name when reading the latest configuration from DSC server. SW_DCS_CLUSTER_NAME SkyWalking   - - period The period of reading data from DSC server by the OAP (in seconds). SW_DCS_PERIOD 20   - apollo apolloMeta apollo.meta in Apollo. SW_CONFIG_APOLLO http://localhost:8080   - - apolloCluster apollo.cluster in Apollo. SW_CONFIG_APOLLO_CLUSTER default   - - apolloEnv env in Apollo. SW_CONFIG_APOLLO_ENV -   - - appId app.id in Apollo. SW_CONFIG_APOLLO_APP_ID skywalking   - zookeeper namespace The namespace (represented by root path) that isolates the configurations in the Zookeeper. SW_CONFIG_ZK_NAMESPACE /, root path   - - hostPort Hosts and ports of Zookeeper Cluster. SW_CONFIG_ZK_HOST_PORT localhost:2181   - - baseSleepTimeMs The period of Zookeeper client between two retries (in milliseconds). SW_CONFIG_ZK_BASE_SLEEP_TIME_MS 1000   - - maxRetries The maximum retry time. SW_CONFIG_ZK_MAX_RETRIES 3   - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - etcd endpoints Hosts and ports for etcd cluster (separated by commas if multiple). SW_CONFIG_ETCD_ENDPOINTS http://localhost:2379   - - namespace Namespace for SkyWalking cluster. SW_CONFIG_ETCD_NAMESPACE /skywalking   - - authentication Indicates whether there is authentication. SW_CONFIG_ETCD_AUTHENTICATION false   - - user Etcd auth username. SW_CONFIG_ETCD_USER    - - password Etcd auth password. SW_CONFIG_ETCD_PASSWORD    - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - consul hostPort Hosts and ports for Consul cluster. SW_CONFIG_CONSUL_HOST_AND_PORTS localhost:8500   - - aclToken ACL Token of Consul. Empty string means without ACL token. SW_CONFIG_CONSUL_ACL_TOKEN -   - - period The period of data sync (in seconds). SW_CONFIG_CONSUL_PERIOD 60   - k8s-configmap namespace Deployment namespace of the config map. SW_CLUSTER_K8S_NAMESPACE default   - - labelSelector Labels for locating configmap. SW_CLUSTER_K8S_LABEL app=collector,release=skywalking   - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - nacos serverAddr Nacos Server Host. SW_CONFIG_NACOS_SERVER_ADDR 127.0.0.1   - - port Nacos Server Port. SW_CONFIG_NACOS_SERVER_PORT 8848   - - group Nacos Configuration namespace. SW_CONFIG_NACOS_SERVER_NAMESPACE -   - - period The period of data sync (in seconds). SW_CONFIG_CONFIG_NACOS_PERIOD 60   - - username Nacos Auth username. SW_CONFIG_NACOS_USERNAME -   - - password Nacos Auth password. SW_CONFIG_NACOS_PASSWORD -   - - accessKey Nacos Auth accessKey. SW_CONFIG_NACOS_ACCESSKEY -   - - secretKey Nacos Auth secretKey. SW_CONFIG_NACOS_SECRETKEY -   exporter default enableGRPCMetrics Enable gRPC metrics exporter. SW_EXPORTER_ENABLE_GRPC_METRICS false   - - gRPCTargetHost The host of target gRPC server for receiving export data SW_EXPORTER_GRPC_HOST 127.0.0.1   - - gRPCTargetPort The port of target gRPC server for receiving export data. SW_EXPORTER_GRPC_PORT 9870   - - enableKafkaTrace Enable Kafka trace exporter. SW_EXPORTER_ENABLE_KAFKA_TRACE false   - - enableKafkaLog Enable Kafka log exporter. SW_EXPORTER_ENABLE_KAFKA_LOG false   - - kafkaBootstrapServers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_EXPORTER_KAFKA_SERVERS localhost:9092   - - kafkaProducerConfig Kafka producer config, JSON format as Properties. SW_EXPORTER_KAFKA_PRODUCER_CONFIG -   - - kafkaTopicTrace Kafka topic name for trace. SW_EXPORTER_KAFKA_TOPIC_TRACE skywalking-export-trace   - - kafkaTopicLog Kafka topic name for log. SW_EXPORTER_KAFKA_TOPIC_LOG skywalking-export-log   - - exportErrorStatusTraceOnly Export error status trace segments through the Kafka channel. SW_EXPORTER_KAFKA_TRACE_FILTER_ERROR false   health-checker default checkIntervalSeconds The period of checking OAP internal health status (in seconds). SW_HEALTH_CHECKER_INTERVAL_SECONDS 5   configuration-discovery default disableMessageDigest If true, agent receives the latest configuration every time, even without making any changes. By default, OAP uses the SHA512 message digest mechanism to detect changes in configuration. SW_DISABLE_MESSAGE_DIGEST false   receiver-event default gRPC services that handle events data. - -    aws-firehose-receiver default host Binding IP of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_HOST 0.0.0.0   - - port Binding port of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_PORT 12801   - - contextPath Context path of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_CONTEXT_PATH /   - - maxThreads Max Thtread number of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_MAX_THREADS 200   - - idleTimeOut Idle timeout of a connection for keep-alive. SW_RECEIVER_AWS_FIREHOSE_HTTP_IDLE_TIME_OUT 30000   - - acceptQueueSize Maximum allowed number of open connections SW_RECEIVER_AWS_FIREHOSE_HTTP_ACCEPT_QUEUE_SIZE 0   - - maxRequestHeaderSize Maximum length of all headers in an HTTP/1 response SW_RECEIVER_AWS_FIREHOSE_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - firehoseAccessKey The AccessKey of AWS firhose SW_RECEIVER_AWS_FIREHOSE_ACCESS_KEY    - - enableTLS Indicate if enable HTTPS for the server SW_RECEIVER_AWS_FIREHOSE_HTTP_ENABLE_TLS false   - - tlsKeyPath TLS key path SW_RECEIVER_AWS_FIREHOSE_HTTP_TLS_KEY_PATH    - - tlsCertChainPath TLS certificate chain path SW_RECEIVER_AWS_FIREHOSE_HTTP_TLS_CERT_CHAIN_PATH    ai-pipeline default       - - uriRecognitionServerAddr The address of the URI recognition server. SW_AI_PIPELINE_URI_RECOGNITION_SERVER_ADDR -   - - uriRecognitionServerPort The port of the URI recognition server. SW_AI_PIPELINE_URI_RECOGNITION_SERVER_PORT 17128    Note ¹ System Environment Variable name could be declared and changed in application.yml. The names listed here are simply provided in the default application.yml file.\n","excerpt":"Configuration Vocabulary The Configuration Vocabulary lists all available configurations provided by …","ref":"/docs/main/v9.6.0/en/setup/backend/configuration-vocabulary/","title":"Configuration Vocabulary"},{"body":"Configuration Vocabulary The Configuration Vocabulary lists all available configurations provided by application.yml.\n   Module Provider Settings Value(s) and Explanation System Environment Variable¹ Default     core default role Option values: Mixed/Receiver/Aggregator. Receiver mode OAP opens the service to the agents, then analyzes and aggregates the results, and forwards the results for distributed aggregation. Aggregator mode OAP receives data from Mixer and Receiver role OAP nodes, and performs 2nd level aggregation. Mixer means both Receiver and Aggregator. SW_CORE_ROLE Mixed   - - restHost Binding IP of RESTful services. Services include GraphQL query and HTTP data report. SW_CORE_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_CORE_REST_PORT 12800   - - restContextPath Web context path of RESTful services. SW_CORE_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_CORE_REST_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_CORE_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize ServerSocketChannel Backlog of RESTful services. SW_CORE_REST_QUEUE_SIZE 0   - - httpMaxRequestHeaderSize Maximum request header size accepted. SW_CORE_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - gRPCHost Binding IP of gRPC services, including gRPC data report and internal communication among OAP nodes. SW_CORE_GRPC_HOST 0.0.0.0   - - gRPCPort Binding port of gRPC services. SW_CORE_GRPC_PORT 11800   - - gRPCSslEnabled Activates SSL for gRPC services. SW_CORE_GRPC_SSL_ENABLED false   - - gRPCSslKeyPath File path of gRPC SSL key. SW_CORE_GRPC_SSL_KEY_PATH -   - - gRPCSslCertChainPath File path of gRPC SSL cert chain. SW_CORE_GRPC_SSL_CERT_CHAIN_PATH -   - - gRPCSslTrustedCAPath File path of gRPC trusted CA. SW_CORE_GRPC_SSL_TRUSTED_CA_PATH -   - - downsampling Activated level of down sampling aggregation.  Hour,Day   - - enableDataKeeperExecutor Controller of TTL scheduler. Once disabled, TTL wouldn\u0026rsquo;t work. SW_CORE_ENABLE_DATA_KEEPER_EXECUTOR true   - - dataKeeperExecutePeriod Execution period of TTL scheduler (in minutes). Execution doesn\u0026rsquo;t mean deleting data. The storage provider (e.g. ElasticSearch storage) could override this. SW_CORE_DATA_KEEPER_EXECUTE_PERIOD 5   - - recordDataTTL The lifecycle of record data (in days). Record data includes traces, top N sample records, and logs. Minimum value is 2. SW_CORE_RECORD_DATA_TTL 3   - - metricsDataTTL The lifecycle of metrics data (in days), including metadata. We recommend setting metricsDataTTL \u0026gt;= recordDataTTL. Minimum value is 2. SW_CORE_METRICS_DATA_TTL 7   - - l1FlushPeriod The period of L1 aggregation flush to L2 aggregation (in milliseconds). SW_CORE_L1_AGGREGATION_FLUSH_PERIOD 500   - - storageSessionTimeout The threshold of session time (in milliseconds). Default value is 70000. SW_CORE_STORAGE_SESSION_TIMEOUT 70000   - - persistentPeriod The period of doing data persistence. Unit is second.Default value is 25s SW_CORE_PERSISTENT_PERIOD 25   - - topNReportPeriod The execution period (in minutes) of top N sampler, which saves sampled data into the storage. SW_CORE_TOPN_REPORT_PERIOD 10   - - activeExtraModelColumns Appends entity names (e.g. service names) into metrics storage entities. SW_CORE_ACTIVE_EXTRA_MODEL_COLUMNS false   - - serviceNameMaxLength Maximum length limit of service names. SW_SERVICE_NAME_MAX_LENGTH 70   - - instanceNameMaxLength Maximum length limit of service instance names. The maximum length of service + instance names should be less than 200. SW_INSTANCE_NAME_MAX_LENGTH 70   - - endpointNameMaxLength Maximum length limit of endpoint names. The maximum length of service + endpoint names should be less than 240. SW_ENDPOINT_NAME_MAX_LENGTH 150   - - searchableTracesTags Defines a set of span tag keys which are searchable through GraphQL. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_SEARCHABLE_TAG_KEYS http.method,http.status_code,rpc.status_code,db.type,db.instance,mq.queue,mq.topic,mq.broker   - - searchableLogsTags Defines a set of log tag keys which are searchable through GraphQL. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_SEARCHABLE_LOGS_TAG_KEYS level   - - searchableAlarmTags Defines a set of alarm tag keys which are searchable through GraphQL. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_SEARCHABLE_ALARM_TAG_KEYS level   - - autocompleteTagKeysQueryMaxSize The max size of tags keys for autocomplete select. SW_AUTOCOMPLETE_TAG_KEYS_QUERY_MAX_SIZE 100   - - autocompleteTagValuesQueryMaxSize The max size of tags values for autocomplete select. SW_AUTOCOMPLETE_TAG_VALUES_QUERY_MAX_SIZE 100   - - gRPCThreadPoolSize Pool size of gRPC server. SW_CORE_GRPC_THREAD_POOL_SIZE CPU core * 4   - - gRPCThreadPoolQueueSize Queue size of gRPC server. SW_CORE_GRPC_POOL_QUEUE_SIZE 10000   - - maxConcurrentCallsPerConnection The maximum number of concurrent calls permitted for each incoming connection. Defaults to no limit. SW_CORE_GRPC_MAX_CONCURRENT_CALL -   - - maxMessageSize Sets the maximum message size allowed to be received on the server. Empty means 4 MiB. SW_CORE_GRPC_MAX_MESSAGE_SIZE 4M(based on Netty)   - - remoteTimeout Timeout for cluster internal communication (in seconds). - 20   - - maxSizeOfNetworkAddressAlias The maximum size of network address detected in the system being monitored. - 1_000_000   - - maxPageSizeOfQueryProfileSnapshot The maximum size for snapshot analysis in an OAP query. - 500   - - maxSizeOfAnalyzeProfileSnapshot The maximum number of snapshots analyzed by the OAP. - 12000   - - prepareThreads The number of threads used to prepare metrics data to the storage. SW_CORE_PREPARE_THREADS 2   - - enableEndpointNameGroupingByOpenapi Automatically groups endpoints by the given OpenAPI definitions. SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI true   - - maxDurationOfQueryEBPFProfilingData The maximum duration(in second) of query the eBPF profiling data from database. - 30   - - maxThreadCountOfQueryEBPFProfilingData The maximum thread count of query the eBPF profiling data from database. - System CPU core size   - - uiMenuRefreshInterval The period(in seconds) of refreshing the status of all UI menu items. - 20   - - serviceCacheRefreshInterval The period(in seconds) of refreshing the service cache. SW_SERVICE_CACHE_REFRESH_INTERVAL 10   cluster standalone - Standalone is not suitable for running on a single node running. No configuration available. - -   - zookeeper namespace The namespace, represented by root path, isolates the configurations in Zookeeper. SW_NAMESPACE /, root path   - - hostPort Hosts and ports of Zookeeper Cluster. SW_CLUSTER_ZK_HOST_PORT localhost:2181   - - baseSleepTimeMs The period of Zookeeper client between two retries (in milliseconds). SW_CLUSTER_ZK_SLEEP_TIME 1000   - - maxRetries The maximum retry time. SW_CLUSTER_ZK_MAX_RETRIES 3   - - enableACL Opens ACL using schema and expression. SW_ZK_ENABLE_ACL false   - - schema Schema for the authorization. SW_ZK_SCHEMA digest   - - expression Expression for the authorization. SW_ZK_EXPRESSION skywalking:skywalking   - - internalComHost The hostname registered in Zookeeper for the internal communication of OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Zookeeper for the internal communication of OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - kubernetes namespace Namespace deployed by SkyWalking in k8s. SW_CLUSTER_K8S_NAMESPACE default   - - labelSelector Labels used for filtering OAP deployment in k8s. SW_CLUSTER_K8S_LABEL app=collector,release=skywalking   - - uidEnvName Environment variable name for reading uid. SW_CLUSTER_K8S_UID SKYWALKING_COLLECTOR_UID   - consul serviceName Service name for SkyWalking cluster. SW_SERVICE_NAME SkyWalking_OAP_Cluster   - - hostPort Hosts and ports for Consul cluster. SW_CLUSTER_CONSUL_HOST_PORT localhost:8500   - - aclToken ACL Token of Consul. Empty string means without ALC token. SW_CLUSTER_CONSUL_ACLTOKEN -   - - internalComHost The hostname registered in Consul for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Consul for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - etcd serviceName Service name for SkyWalking cluster. SW_CLUSTER_ETCD_SERVICE_NAME SkyWalking_OAP_Cluster   - - endpoints Hosts and ports for etcd cluster. SW_CLUSTER_ETCD_ENDPOINTS localhost:2379   - - namespace Namespace for SkyWalking cluster. SW_CLUSTER_ETCD_NAMESPACE /skywalking   - - authentication Indicates whether there is authentication. SW_CLUSTER_ETCD_AUTHENTICATION false   - - user Etcd auth username. SW_CLUSTER_ETCD_USER    - - password Etcd auth password. SW_CLUSTER_ETCD_PASSWORD    - - internalComHost The hostname registered in etcd for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in etcd for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - Nacos serviceName Service name for SkyWalking cluster. SW_SERVICE_NAME SkyWalking_OAP_Cluster   - - hostPort Hosts and ports for Nacos cluster. SW_CLUSTER_NACOS_HOST_PORT localhost:8848   - - namespace Namespace used by SkyWalking node coordination. SW_CLUSTER_NACOS_NAMESPACE public   - - internalComHost The hostname registered in Nacos for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Nacos for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - - username Nacos Auth username. SW_CLUSTER_NACOS_USERNAME -   - - password Nacos Auth password. SW_CLUSTER_NACOS_PASSWORD -   - - accessKey Nacos Auth accessKey. SW_CLUSTER_NACOS_ACCESSKEY -   - - secretKey Nacos Auth secretKey. SW_CLUSTER_NACOS_SECRETKEY -   - - syncPeriodHttpUriRecognitionPattern The period of HTTP URI recognition pattern synchronization (in seconds). SW_CORE_SYNC_PERIOD_HTTP_URI_RECOGNITION_PATTERN 10   - - trainingPeriodHttpUriRecognitionPattern The period of HTTP URI recognition pattern training (in seconds). SW_CORE_TRAINING_PERIOD_HTTP_URI_RECOGNITION_PATTERN 60   - - maxHttpUrisNumberPerService The maximum number of HTTP URIs per service. SW_MAX_HTTP_URIS_NUMBER_PER_SERVICE 3000   storage elasticsearch - ElasticSearch (and OpenSearch) storage implementation. - -   - - namespace Prefix of indexes created and used by SkyWalking. SW_NAMESPACE -   - - clusterNodes ElasticSearch cluster nodes for client connection. SW_STORAGE_ES_CLUSTER_NODES localhost   - - protocol HTTP or HTTPs. SW_STORAGE_ES_HTTP_PROTOCOL HTTP   - - connectTimeout Connect timeout of ElasticSearch client (in milliseconds). SW_STORAGE_ES_CONNECT_TIMEOUT 3000   - - socketTimeout Socket timeout of ElasticSearch client (in milliseconds). SW_STORAGE_ES_SOCKET_TIMEOUT 30000   - - responseTimeout Response timeout of ElasticSearch client (in milliseconds), 0 disables the timeout. SW_STORAGE_ES_RESPONSE_TIMEOUT 1500   - - numHttpClientThread The number of threads for the underlying HTTP client to perform socket I/O. If the value is \u0026lt;= 0, the number of available processors will be used. SW_STORAGE_ES_NUM_HTTP_CLIENT_THREAD 0   - - user Username of ElasticSearch cluster. SW_ES_USER -   - - password Password of ElasticSearch cluster. SW_ES_PASSWORD -   - - trustStorePath Trust JKS file path. Only works when username and password are enabled. SW_STORAGE_ES_SSL_JKS_PATH -   - - trustStorePass Trust JKS file password. Only works when username and password are enabled. SW_STORAGE_ES_SSL_JKS_PASS -   - - secretsManagementFile Secrets management file in the properties format, including username and password, which are managed by a 3rd party tool. Capable of being updated them at runtime. SW_ES_SECRETS_MANAGEMENT_FILE -   - - dayStep Represents the number of days in the one-minute/hour/day index. SW_STORAGE_DAY_STEP 1   - - indexShardsNumber Shard number of new indexes. SW_STORAGE_ES_INDEX_SHARDS_NUMBER 1   - - indexReplicasNumber Replicas number of new indexes. SW_STORAGE_ES_INDEX_REPLICAS_NUMBER 0   - - specificIndexSettings Specify the settings for each index individually. If configured, this setting has the highest priority and overrides the generic settings. SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS -   - - superDatasetDayStep Represents the number of days in the super size dataset record index. Default value is the same as dayStep when the value is less than 0. SW_STORAGE_ES_SUPER_DATASET_DAY_STEP -1   - - superDatasetIndexShardsFactor Super dataset is defined in the code (e.g. trace segments). This factor provides more shards for the super dataset: shards number = indexShardsNumber * superDatasetIndexShardsFactor. This factor also affects Zipkin and Jaeger traces. SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR 5   - - superDatasetIndexReplicasNumber Represents the replicas number in the super size dataset record index. SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER 0   - - indexTemplateOrder The order of index template. SW_STORAGE_ES_INDEX_TEMPLATE_ORDER 0   - - bulkActions Async bulk size of the record data batch execution. SW_STORAGE_ES_BULK_ACTIONS 5000   - - batchOfBytes A threshold to control the max body size of ElasticSearch Bulk flush. SW_STORAGE_ES_BATCH_OF_BYTES 10485760 (10m)   - - flushInterval Period of flush (in seconds). Does not matter whether bulkActions is reached or not. SW_STORAGE_ES_FLUSH_INTERVAL 5   - - concurrentRequests The number of concurrent requests allowed to be executed. SW_STORAGE_ES_CONCURRENT_REQUESTS 2   - - resultWindowMaxSize The maximum size of dataset when the OAP loads cache, such as network aliases. SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE 10000   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_ES_QUERY_MAX_SIZE 10000   - - scrollingBatchSize The batch size of metadata per iteration when metadataQueryMaxSize or resultWindowMaxSize is too large to be retrieved in a single query. SW_STORAGE_ES_SCROLLING_BATCH_SIZE 5000   - - segmentQueryMaxSize The maximum size of trace segments per query. SW_STORAGE_ES_QUERY_SEGMENT_SIZE 200   - - profileTaskQueryMaxSize The maximum size of profile task per query. SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE 200   - - profileDataQueryScrollBatchSize The batch size of query profiling data. SW_STORAGE_ES_QUERY_PROFILE_DATA_BATCH_SIZE 100   - - advanced All settings of ElasticSearch index creation. The value should be in JSON format. SW_STORAGE_ES_ADVANCED -   - - logicSharding Shard metrics and records indices into multi-physical indices, one index template per metric/meter aggregation function or record. SW_STORAGE_ES_LOGIC_SHARDING false   - h2 - H2 storage is designed for demonstration and running in short term (i.e. 1-2 hours) only. - -   - - url H2 connection URL. Defaults to H2 memory mode. SW_STORAGE_H2_URL jdbc:h2:mem:skywalking-oap-db   - - user Username of H2 database. SW_STORAGE_H2_USER sa   - - password Password of H2 database. - -   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_H2_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 100   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 1   - mysql - MySQL Storage. The MySQL JDBC Driver is not in the dist. Please copy it into the oap-lib folder manually. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - postgresql - PostgreSQL storage. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - banyandb - BanyanDB storage. - -   - - targets Hosts with ports of the BanyanDB. SW_STORAGE_BANYANDB_TARGETS 127.0.0.1:17912   - - maxBulkSize The maximum size of write entities in a single batch write call. SW_STORAGE_BANYANDB_MAX_BULK_SIZE 5000   - - flushInterval Period of flush interval. In the timeunit of seconds. SW_STORAGE_BANYANDB_FLUSH_INTERVAL 15   - - metricsShardsNumber Shards Number for measure/metrics. SW_STORAGE_BANYANDB_METRICS_SHARDS_NUMBER 1   - - recordShardsNumber Shards Number for a normal record. SW_STORAGE_BANYANDB_RECORD_SHARDS_NUMBER 1   - - superDatasetShardsFactor Shards Factor for a super dataset record, i.e. Shard number of a super dataset is recordShardsNumber*superDatasetShardsFactor. SW_STORAGE_BANYANDB_SUPERDATASET_SHARDS_FACTOR 2   - - concurrentWriteThreads Concurrent consumer threads for batch writing. SW_STORAGE_BANYANDB_CONCURRENT_WRITE_THREADS 15   - - profileTaskQueryMaxSize Max size of ProfileTask to be fetched. SW_STORAGE_BANYANDB_PROFILE_TASK_QUERY_MAX_SIZE 200   agent-analyzer default Agent Analyzer. SW_AGENT_ANALYZER default    - - traceSamplingPolicySettingsFile The sampling policy including sampling rate and the threshold of trace segment latency can be configured by the traceSamplingPolicySettingsFile file. SW_TRACE_SAMPLING_POLICY_SETTINGS_FILE trace-sampling-policy-settings.yml   - - slowDBAccessThreshold The slow database access threshold (in milliseconds). SW_SLOW_DB_THRESHOLD default:200,mongodb:100   - - forceSampleErrorSegment When sampling mechanism is activated, this config samples the error status segment and ignores the sampling rate. SW_FORCE_SAMPLE_ERROR_SEGMENT true   - - segmentStatusAnalysisStrategy Determines the final segment status from span status. Available values are FROM_SPAN_STATUS , FROM_ENTRY_SPAN, and FROM_FIRST_SPAN. FROM_SPAN_STATUS indicates that the segment status would be error if any span has an error status. FROM_ENTRY_SPAN means that the segment status would only be determined by the status of entry spans. FROM_FIRST_SPAN means that the segment status would only be determined by the status of the first span. SW_SEGMENT_STATUS_ANALYSIS_STRATEGY FROM_SPAN_STATUS   - - noUpstreamRealAddressAgents Exit spans with the component in the list would not generate client-side instance relation metrics, since some tracing plugins (e.g. Nginx-LUA and Envoy) can\u0026rsquo;t collect the real peer IP address. SW_NO_UPSTREAM_REAL_ADDRESS 6000,9000   - - meterAnalyzerActiveFiles Indicates which files could be instrumented and analyzed. Multiple files are split by \u0026ldquo;,\u0026rdquo;. SW_METER_ANALYZER_ACTIVE_FILES    - - slowCacheWriteThreshold The threshold of slow command which is used for writing operation (in milliseconds). SW_SLOW_CACHE_WRITE_THRESHOLD default:20,redis:10   - - slowCacheReadThreshold The threshold of slow command which is used for reading (getting) operation (in milliseconds). SW_SLOW_CACHE_READ_THRESHOLD default:20,redis:10   receiver-sharing-server default Sharing server provides new gRPC and restful servers for data collection. Ana designates that servers in the core module are to be used for internal communication only. - -    - - restHost Binding IP of RESTful services. Services include GraphQL query and HTTP data report. SW_RECEIVER_SHARING_REST_HOST -   - - restPort Binding port of RESTful services. SW_RECEIVER_SHARING_REST_PORT -   - - restContextPath Web context path of RESTful services. SW_RECEIVER_SHARING_REST_CONTEXT_PATH -   - - restMaxThreads Maximum thread number of RESTful services. SW_RECEIVER_SHARING_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_RECEIVER_SHARING_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize ServerSocketChannel backlog of RESTful services. SW_RECEIVER_SHARING_REST_QUEUE_SIZE 0   - - httpMaxRequestHeaderSize Maximum request header size accepted. SW_RECEIVER_SHARING_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - gRPCHost Binding IP of gRPC services. Services include gRPC data report and internal communication among OAP nodes. SW_RECEIVER_GRPC_HOST 0.0.0.0. Not Activated   - - gRPCPort Binding port of gRPC services. SW_RECEIVER_GRPC_PORT Not Activated   - - gRPCThreadPoolSize Pool size of gRPC server. SW_RECEIVER_GRPC_THREAD_POOL_SIZE CPU core * 4   - - gRPCThreadPoolQueueSize Queue size of gRPC server. SW_RECEIVER_GRPC_POOL_QUEUE_SIZE 10000   - - gRPCSslEnabled Activates SSL for gRPC services. SW_RECEIVER_GRPC_SSL_ENABLED false   - - gRPCSslKeyPath File path of gRPC SSL key. SW_RECEIVER_GRPC_SSL_KEY_PATH -   - - gRPCSslCertChainPath File path of gRPC SSL cert chain. SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH -   - - maxConcurrentCallsPerConnection The maximum number of concurrent calls permitted for each incoming connection. Defaults to no limit. SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL -   - - authentication The token text for authentication. Works for gRPC connection only. Once this is set, the client is required to use the same token. SW_AUTHENTICATION -   log-analyzer default Log Analyzer. SW_LOG_ANALYZER default    - - lalFiles The LAL configuration file names (without file extension) to be activated. Read LAL for more details. SW_LOG_LAL_FILES default   - - malFiles The MAL configuration file names (without file extension) to be activated. Read LAL for more details. SW_LOG_MAL_FILES \u0026quot;\u0026quot;   event-analyzer default Event Analyzer. SW_EVENT_ANALYZER default    receiver-register default gRPC and HTTPRestful services that provide service, service instance and endpoint register. - -    receiver-trace default gRPC and HTTPRestful services that accept SkyWalking format traces. - -    receiver-jvm default gRPC services that accept JVM metrics data. - -    receiver-clr default gRPC services that accept .Net CLR metrics data. - -    receiver-profile default gRPC services that accept profile task status and snapshot reporter. - -    receiver-zabbix default TCP receiver accepts Zabbix format metrics. - -    - - port Exported TCP port. Zabbix agent could connect and transport data. SW_RECEIVER_ZABBIX_PORT 10051   - - host Binds to host. SW_RECEIVER_ZABBIX_HOST 0.0.0.0   - - activeFiles Enables config when agent request is received. SW_RECEIVER_ZABBIX_ACTIVE_FILES agent   service-mesh default gRPC services that accept data from inbound mesh probes. - -    envoy-metric default Envoy metrics_service and ALS(access log service) are supported by this receiver. The OAL script supports all GAUGE type metrics. - -    - - acceptMetricsService Starts Envoy Metrics Service analysis. SW_ENVOY_METRIC_SERVICE true   - - alsHTTPAnalysis Starts Envoy HTTP Access Log Service analysis. Value = k8s-mesh means starting the analysis. SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS -   - - alsTCPAnalysis Starts Envoy TCP Access Log Service analysis. Value = k8s-mesh means starting the analysis. SW_ENVOY_METRIC_ALS_TCP_ANALYSIS -   - - k8sServiceNameRule k8sServiceNameRule allows you to customize the service name in ALS via Kubernetes metadata. The available variables are pod and service. E.g. you can use ${service.metadata.name}-${pod.metadata.labels.version} to append the version number to the service name. Note that when using environment variables to pass this configuration, use single quotes('') to avoid being evaluated by the shell. K8S_SERVICE_NAME_RULE ${pod.metadata.labels.(service.istio.io/canonical-name)}   - - istioServiceNameRule istioServiceNameRule allows you to customize the service name in ALS via Kubernetes metadata. The available variables are serviceEntry. E.g. you can use ${serviceEntry.metadata.name}-${serviceEntry.metadata.labels.version} to append the version number to the service name. Note that when using environment variables to pass this configuration, use single quotes('') to avoid being evaluated by the shell. ISTIO_SERVICE_NAME_RULE ${serviceEntry.metadata.name}   receiver-otel default A receiver for analyzing metrics data from OpenTelemetry. - -    - - enabledHandlers Enabled handlers for otel. SW_OTEL_RECEIVER_ENABLED_HANDLERS -   - - enabledOtelMetricsRules Enabled metric rules for OTLP handler. SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES -   receiver-zipkin default A receiver for Zipkin traces. - -    - - sampleRate The sample rate precision is 1/10000, should be between 0 and 10000 SW_ZIPKIN_SAMPLE_RATE 10000   - - searchableTracesTags Defines a set of span tag keys which are searchable. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_ZIPKIN_SEARCHABLE_TAG_KEYS http.method   - - enableHttpCollector Enable Http Collector. SW_ZIPKIN_HTTP_COLLECTOR_ENABLED true   - - restHost Binding IP of RESTful services. SW_RECEIVER_ZIPKIN_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_RECEIVER_ZIPKIN_REST_PORT 9411   - - restContextPath Web context path of RESTful services. SW_RECEIVER_ZIPKIN_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_RECEIVER_ZIPKIN_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_RECEIVER_ZIPKIN_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_RECEIVER_ZIPKIN_REST_QUEUE_SIZE 0   - - enableKafkaCollector Enable Kafka Collector. SW_ZIPKIN_KAFKA_COLLECTOR_ENABLED false   - - kafkaBootstrapServers Kafka ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG. SW_ZIPKIN_KAFKA_SERVERS localhost:9092   - - kafkaGroupId Kafka ConsumerConfig.GROUP_ID_CONFIG. SW_ZIPKIN_KAFKA_GROUP_ID zipkin   - - kafkaTopic Kafka Topics. SW_ZIPKIN_KAFKA_TOPIC zipkin   - - kafkaConsumerConfig Kafka consumer config, JSON format as Properties. If it contains the same key with above, would override. SW_ZIPKIN_KAFKA_CONSUMER_CONFIG \u0026ldquo;{\u0026quot;auto.offset.reset\u0026quot;:\u0026quot;earliest\u0026quot;,\u0026quot;enable.auto.commit\u0026quot;:true}\u0026rdquo;   - - kafkaConsumers The number of consumers to create. SW_ZIPKIN_KAFKA_CONSUMERS 1   - - kafkaHandlerThreadPoolSize Pool size of Kafka message handler executor. SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_SIZE CPU core * 2   - - kafkaHandlerThreadPoolQueueSize Queue size of Kafka message handler executor. SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE 10000   kafka-fetcher default Read SkyWalking\u0026rsquo;s native metrics/logs/traces through Kafka server. - -    - - bootstrapServers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_KAFKA_FETCHER_SERVERS localhost:9092   - - namespace Namespace aims to isolate multi OAP cluster when using the same Kafka cluster. If you set a namespace for Kafka fetcher, OAP will add a prefix to topic name. You should also set namespace in agent.config. The property is named plugin.kafka.namespace. SW_NAMESPACE -   - - groupId A unique string that identifies the consumer group to which this consumer belongs. - skywalking-consumer   - - createTopicIfNotExist If true, this creates Kafka topic (if it does not already exist). - true   - - partitions The number of partitions for the topic being created. SW_KAFKA_FETCHER_PARTITIONS 3   - - consumers The number of consumers to create. SW_KAFKA_FETCHER_CONSUMERS 1   - - enableNativeProtoLog Enables fetching and handling native proto log data. SW_KAFKA_FETCHER_ENABLE_NATIVE_PROTO_LOG true   - - enableNativeJsonLog Enables fetching and handling native json log data. SW_KAFKA_FETCHER_ENABLE_NATIVE_JSON_LOG true   - - replicationFactor The replication factor for each partition in the topic being created. SW_KAFKA_FETCHER_PARTITIONS_FACTOR 2   - - kafkaHandlerThreadPoolSize Pool size of Kafka message handler executor. SW_KAFKA_HANDLER_THREAD_POOL_SIZE CPU core * 2   - - kafkaHandlerThreadPoolQueueSize Queue size of Kafka message handler executor. SW_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE 10000   - - topicNameOfMeters Kafka topic name for meter system data. - skywalking-meters   - - topicNameOfMetrics Kafka topic name for JVM metrics data. - skywalking-metrics   - - topicNameOfProfiling Kafka topic name for profiling data. - skywalking-profilings   - - topicNameOfTracingSegments Kafka topic name for tracing data. - skywalking-segments   - - topicNameOfManagements Kafka topic name for service instance reporting and registration. - skywalking-managements   - - topicNameOfLogs Kafka topic name for native proto log data. - skywalking-logs   - - topicNameOfJsonLogs Kafka topic name for native json log data. - skywalking-logs-json   receiver-browser default gRPC services that accept browser performance data and error log. - - -   - - sampleRate Sampling rate for receiving trace. Precise to 1/10000. 10000 means sampling rate of 100% by default. SW_RECEIVER_BROWSER_SAMPLE_RATE 10000   query graphql - GraphQL query implementation. -    - - enableLogTestTool Enable the log testing API to test the LAL. NOTE: This API evaluates untrusted code on the OAP server. A malicious script can do significant damage (steal keys and secrets, remove files and directories, install malware, etc). As such, please enable this API only when you completely trust your users. SW_QUERY_GRAPHQL_ENABLE_LOG_TEST_TOOL false   - - maxQueryComplexity Maximum complexity allowed for the GraphQL query that can be used to abort a query if the total number of data fields queried exceeds the defined threshold. SW_QUERY_MAX_QUERY_COMPLEXITY 3000   - - enableUpdateUITemplate Allow user add,disable and update UI template. SW_ENABLE_UPDATE_UI_TEMPLATE false   - - enableOnDemandPodLog Ondemand Pod log: fetch the Pod logs on users' demand, the logs are fetched and displayed in real time, and are not persisted in any kind. This is helpful when users want to do some experiments and monitor the logs and see what\u0026rsquo;s happing inside the service. Note: if you print secrets in the logs, they are also visible to the UI, so for the sake of security, this feature is disabled by default, please set this configuration to enable the feature manually. SW_ENABLE_ON_DEMAND_POD_LOG false   query-zipkin default - This module is for Zipkin query API and support zipkin-lens UI -    - - restHost Binding IP of RESTful services. SW_QUERY_ZIPKIN_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_QUERY_ZIPKIN_REST_PORT 9412   - - restContextPath Web context path of RESTful services. SW_QUERY_ZIPKIN_REST_CONTEXT_PATH zipkin   - - restMaxThreads Maximum thread number of RESTful services. SW_QUERY_ZIPKIN_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_QUERY_ZIPKIN_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_QUERY_ZIPKIN_REST_QUEUE_SIZE 0   - - lookback Default look back for traces and autocompleteTags, 1 day in millis SW_QUERY_ZIPKIN_LOOKBACK 86400000   - - namesMaxAge The Cache-Control max-age (seconds) for serviceNames, remoteServiceNames and spanNames SW_QUERY_ZIPKIN_NAMES_MAX_AGE 300   - - uiQueryLimit Default traces query max size SW_QUERY_ZIPKIN_UI_QUERY_LIMIT 10   - - uiDefaultLookback Default look back on the UI for search traces, 15 minutes in millis SW_QUERY_ZIPKIN_UI_DEFAULT_LOOKBACK 900000   promql default - This module is for PromQL API. -    - - restHost Binding IP of RESTful services. SW_PROMQL_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_PROMQL_REST_PORT 9090   - - restContextPath Web context path of RESTful services. SW_PROMQL_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_PROMQL_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_PROMQL_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_PROMQL_REST_QUEUE_SIZE 0   alarm default - Read alarm doc for more details. -    telemetry - - Read telemetry doc for more details. -    - none - No op implementation. -    - prometheus host Binding host for Prometheus server fetching data. SW_TELEMETRY_PROMETHEUS_HOST 0.0.0.0   - - port Binding port for Prometheus server fetching data. SW_TELEMETRY_PROMETHEUS_PORT 1234   configuration - - Read dynamic configuration doc for more details. -    - grpc host DCS server binding hostname. SW_DCS_SERVER_HOST -   - - port DCS server binding port. SW_DCS_SERVER_PORT 80   - - clusterName Cluster name when reading the latest configuration from DSC server. SW_DCS_CLUSTER_NAME SkyWalking   - - period The period of reading data from DSC server by the OAP (in seconds). SW_DCS_PERIOD 20   - apollo apolloMeta apollo.meta in Apollo. SW_CONFIG_APOLLO http://localhost:8080   - - apolloCluster apollo.cluster in Apollo. SW_CONFIG_APOLLO_CLUSTER default   - - apolloEnv env in Apollo. SW_CONFIG_APOLLO_ENV -   - - appId app.id in Apollo. SW_CONFIG_APOLLO_APP_ID skywalking   - zookeeper namespace The namespace (represented by root path) that isolates the configurations in the Zookeeper. SW_CONFIG_ZK_NAMESPACE /, root path   - - hostPort Hosts and ports of Zookeeper Cluster. SW_CONFIG_ZK_HOST_PORT localhost:2181   - - baseSleepTimeMs The period of Zookeeper client between two retries (in milliseconds). SW_CONFIG_ZK_BASE_SLEEP_TIME_MS 1000   - - maxRetries The maximum retry time. SW_CONFIG_ZK_MAX_RETRIES 3   - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - etcd endpoints Hosts and ports for etcd cluster (separated by commas if multiple). SW_CONFIG_ETCD_ENDPOINTS http://localhost:2379   - - namespace Namespace for SkyWalking cluster. SW_CONFIG_ETCD_NAMESPACE /skywalking   - - authentication Indicates whether there is authentication. SW_CONFIG_ETCD_AUTHENTICATION false   - - user Etcd auth username. SW_CONFIG_ETCD_USER    - - password Etcd auth password. SW_CONFIG_ETCD_PASSWORD    - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - consul hostPort Hosts and ports for Consul cluster. SW_CONFIG_CONSUL_HOST_AND_PORTS localhost:8500   - - aclToken ACL Token of Consul. Empty string means without ACL token. SW_CONFIG_CONSUL_ACL_TOKEN -   - - period The period of data sync (in seconds). SW_CONFIG_CONSUL_PERIOD 60   - k8s-configmap namespace Deployment namespace of the config map. SW_CLUSTER_K8S_NAMESPACE default   - - labelSelector Labels for locating configmap. SW_CLUSTER_K8S_LABEL app=collector,release=skywalking   - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - nacos serverAddr Nacos Server Host. SW_CONFIG_NACOS_SERVER_ADDR 127.0.0.1   - - port Nacos Server Port. SW_CONFIG_NACOS_SERVER_PORT 8848   - - group Nacos Configuration namespace. SW_CONFIG_NACOS_SERVER_NAMESPACE -   - - period The period of data sync (in seconds). SW_CONFIG_CONFIG_NACOS_PERIOD 60   - - username Nacos Auth username. SW_CONFIG_NACOS_USERNAME -   - - password Nacos Auth password. SW_CONFIG_NACOS_PASSWORD -   - - accessKey Nacos Auth accessKey. SW_CONFIG_NACOS_ACCESSKEY -   - - secretKey Nacos Auth secretKey. SW_CONFIG_NACOS_SECRETKEY -   exporter default enableGRPCMetrics Enable gRPC metrics exporter. SW_EXPORTER_ENABLE_GRPC_METRICS false   - - gRPCTargetHost The host of target gRPC server for receiving export data SW_EXPORTER_GRPC_HOST 127.0.0.1   - - gRPCTargetPort The port of target gRPC server for receiving export data. SW_EXPORTER_GRPC_PORT 9870   - - enableKafkaTrace Enable Kafka trace exporter. SW_EXPORTER_ENABLE_KAFKA_TRACE false   - - enableKafkaLog Enable Kafka log exporter. SW_EXPORTER_ENABLE_KAFKA_LOG false   - - kafkaBootstrapServers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_EXPORTER_KAFKA_SERVERS localhost:9092   - - kafkaProducerConfig Kafka producer config, JSON format as Properties. SW_EXPORTER_KAFKA_PRODUCER_CONFIG -   - - kafkaTopicTrace Kafka topic name for trace. SW_EXPORTER_KAFKA_TOPIC_TRACE skywalking-export-trace   - - kafkaTopicLog Kafka topic name for log. SW_EXPORTER_KAFKA_TOPIC_LOG skywalking-export-log   - - exportErrorStatusTraceOnly Export error status trace segments through the Kafka channel. SW_EXPORTER_KAFKA_TRACE_FILTER_ERROR false   health-checker default checkIntervalSeconds The period of checking OAP internal health status (in seconds). SW_HEALTH_CHECKER_INTERVAL_SECONDS 5   debugging-query default       - - keywords4MaskingSecretsOfConfig Include the list of keywords to filter configurations including secrets. Separate keywords by a comma. SW_DEBUGGING_QUERY_KEYWORDS_FOR_MASKING_SECRETS user,password,token,accessKey,secretKey,authentication   configuration-discovery default disableMessageDigest If true, agent receives the latest configuration every time, even without making any changes. By default, OAP uses the SHA512 message digest mechanism to detect changes in configuration. SW_DISABLE_MESSAGE_DIGEST false   receiver-event default gRPC services that handle events data. - -    aws-firehose-receiver default host Binding IP of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_HOST 0.0.0.0   - - port Binding port of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_PORT 12801   - - contextPath Context path of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_CONTEXT_PATH /   - - maxThreads Max Thtread number of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_MAX_THREADS 200   - - idleTimeOut Idle timeout of a connection for keep-alive. SW_RECEIVER_AWS_FIREHOSE_HTTP_IDLE_TIME_OUT 30000   - - acceptQueueSize Maximum allowed number of open connections SW_RECEIVER_AWS_FIREHOSE_HTTP_ACCEPT_QUEUE_SIZE 0   - - maxRequestHeaderSize Maximum length of all headers in an HTTP/1 response SW_RECEIVER_AWS_FIREHOSE_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - firehoseAccessKey The AccessKey of AWS firhose SW_RECEIVER_AWS_FIREHOSE_ACCESS_KEY    - - enableTLS Indicate if enable HTTPS for the server SW_RECEIVER_AWS_FIREHOSE_HTTP_ENABLE_TLS false   - - tlsKeyPath TLS key path SW_RECEIVER_AWS_FIREHOSE_HTTP_TLS_KEY_PATH    - - tlsCertChainPath TLS certificate chain path SW_RECEIVER_AWS_FIREHOSE_HTTP_TLS_CERT_CHAIN_PATH    ai-pipeline default       - - uriRecognitionServerAddr The address of the URI recognition server. SW_AI_PIPELINE_URI_RECOGNITION_SERVER_ADDR -   - - uriRecognitionServerPort The port of the URI recognition server. SW_AI_PIPELINE_URI_RECOGNITION_SERVER_PORT 17128    Note ¹ System Environment Variable name could be declared and changed in application.yml. The names listed here are simply provided in the default application.yml file.\n","excerpt":"Configuration Vocabulary The Configuration Vocabulary lists all available configurations provided by …","ref":"/docs/main/v9.7.0/en/setup/backend/configuration-vocabulary/","title":"Configuration Vocabulary"},{"body":"Context injection If you want to fetch the SkyWalking Context in your PHP code, which is super helpful for debugging and observability, You can enable the configuration item skywalking_agent.inject_context.\nDescription skywalking_agent.inject_context\nWhether to enable automatic injection of skywalking context variables (such as SW_TRACE_ID). For php-fpm mode, it will be injected into the $_SERVER variable. For swoole mode, it will be injected into the $request-\u0026gt;server variable.\nConfiguration [skywalking_agent] extension = skywalking_agent.so skywalking_agent.inject_context = On Usage For php-fpm mode:\n\u0026lt;?php echo $_SERVER[\u0026#34;SW_SERVICE_NAME\u0026#34;]; // get service name echo $_SERVER[\u0026#34;SW_INSTANCE_NAME\u0026#34;]; // get instance name echo $_SERVER[\u0026#34;SW_TRACE_ID\u0026#34;]; // get trace id For swoole mode:\n\u0026lt;?php $http = new Swoole\\Http\\Server(\u0026#39;127.0.0.1\u0026#39;, 9501); $http-\u0026gt;on(\u0026#39;request\u0026#39;, function ($request, $response) { echo $request-\u0026gt;server[\u0026#34;SW_SERVICE_NAME\u0026#34;]; // get service name  echo $request-\u0026gt;server[\u0026#34;SW_INSTANCE_NAME\u0026#34;]; // get instance name  echo $request-\u0026gt;server[\u0026#34;SW_TRACE_ID\u0026#34;]; // get trace id }); ","excerpt":"Context injection If you want to fetch the SkyWalking Context in your PHP code, which is super …","ref":"/docs/skywalking-php/next/en/configuration/context-injection/","title":"Context injection"},{"body":"Continuous Profiling Continuous profiling utilizes eBPF, process monitoring, and other technologies to collect data. When the configured threshold is met, it would automatically start profiling tasks. Corresponds to Continuous Profiling in the concepts and designs. This approach helps identify performance bottlenecks and potential issues in a proactive manner, allowing users to optimize their applications and systems more effectively.\nActive in the OAP Continuous profiling uses the same protocol service as eBPF Profiling, so you only need to ensure that the eBPF Profiling receiver is running.\nreceiver-ebpf:selector:${SW_RECEIVER_EBPF:default}default:Configuration of Continuous Profiling Policy Continuous profiling can be configured on a service entity, with the following fields in the configuration:\n Service: The service entity for which you want to monitor the processes. Targets: Configuration conditions.  Target Type: Target profiling type, currently supporting On CPU Profiling, Off CPU Profiling, and Network Profiling. Check Items: Detection conditions, only one of the multiple condition rules needs to be met to start the task.  Type: Monitoring type, currently supporting \u0026ldquo;System Load\u0026rdquo;, \u0026ldquo;Process CPU\u0026rdquo;, \u0026ldquo;Process Thread Count\u0026rdquo;, \u0026ldquo;HTTP Error Rate\u0026rdquo;, \u0026ldquo;HTTP Avg Response Time\u0026rdquo;. Threshold: Check if the monitoring value meets the specified expectations. Period: The time period(seconds) for monitoring data, which can also be understood as the most recent duration. Count: The number of times(seconds) the threshold is triggered within the detection period, which can also be understood as the total number of times the specified threshold rule is triggered in the most recent duration(seconds). Once the count check is met, the specified Profiling task will be started. URI: For HTTP-related monitoring types, used to filter specific URIs.      Monitoring After saving the configuration, the eBPF agent can perform monitoring operations on the processes under the specified service based on the service-level configuration.\nMetrics While performing monitoring, the eBPF agent would report the monitoring data to OAP for storage, making it more convenient to understand the real-time monitoring status. The main metrics include:\n   Monitor Type Unit Description     System Load Load System load average over a specified period.   Process CPU Percentage The CPU usage of the process as a percentage.   Process Thread Count Count The number of threads in the process.   HTTP Error Rate Percentage The percentage of HTTP requests that result in error responses (e.g., 4xx or 5xx status codes).   HTTP Avg Response Time Millisecond The average response time for HTTP requests.    Threshold With Trigger In the eBPF agent, data is collected periodically, and the sliding time window technique is used to store the data from the most recent Period cycles. The Threshold rule is used to verify whether the data within each cycle meets the specified criteria. If the number of times the conditions are met within the sliding time window exceeds the Count value, the corresponding Profiling task would be triggered.\nThe sliding time window technique ensures that the most recent and relevant data is considered when evaluating the conditions. This approach allows for a more accurate and dynamic assessment of the system\u0026rsquo;s performance, making it possible to identify and respond to issues in a timely manner. By triggering Profiling tasks when specific conditions are met, the system can automatically initiate performance analysis and help uncover potential bottlenecks or areas for improvement.\nCauses When the eBPF agent reports a Profiling task, it also reports the reason for triggering the Profiling task, which mainly includes the following information:\n Process: The specific process that triggered the policy. Monitor Type: The type of monitoring that was triggered. Threshold: The configured threshold value. Current: The monitoring value at the time the rule was triggered.  Silence Period Upon triggering a continuous profiling task, the eBPF agent supports a feature that prevents re-triggering tasks within a specified period. This feature is designed to prevent an unlimited number of profiling tasks from being initiated if the process continuously reaches the threshold, which could potentially cause system issues.\n","excerpt":"Continuous Profiling Continuous profiling utilizes eBPF, process monitoring, and other technologies …","ref":"/docs/main/latest/en/setup/backend/backend-continuous-profiling/","title":"Continuous Profiling"},{"body":"Continuous Profiling Continuous profiling utilizes eBPF, process monitoring, and other technologies to collect data. When the configured threshold is met, it would automatically start profiling tasks. Corresponds to Continuous Profiling in the concepts and designs. This approach helps identify performance bottlenecks and potential issues in a proactive manner, allowing users to optimize their applications and systems more effectively.\nActive in the OAP Continuous profiling uses the same protocol service as eBPF Profiling, so you only need to ensure that the eBPF Profiling receiver is running.\nreceiver-ebpf:selector:${SW_RECEIVER_EBPF:default}default:Configuration of Continuous Profiling Policy Continuous profiling can be configured on a service entity, with the following fields in the configuration:\n Service: The service entity for which you want to monitor the processes. Targets: Configuration conditions.  Target Type: Target profiling type, currently supporting On CPU Profiling, Off CPU Profiling, and Network Profiling. Check Items: Detection conditions, only one of the multiple condition rules needs to be met to start the task.  Type: Monitoring type, currently supporting \u0026ldquo;System Load\u0026rdquo;, \u0026ldquo;Process CPU\u0026rdquo;, \u0026ldquo;Process Thread Count\u0026rdquo;, \u0026ldquo;HTTP Error Rate\u0026rdquo;, \u0026ldquo;HTTP Avg Response Time\u0026rdquo;. Threshold: Check if the monitoring value meets the specified expectations. Period: The time period(seconds) for monitoring data, which can also be understood as the most recent duration. Count: The number of times(seconds) the threshold is triggered within the detection period, which can also be understood as the total number of times the specified threshold rule is triggered in the most recent duration(seconds). Once the count check is met, the specified Profiling task will be started. URI: For HTTP-related monitoring types, used to filter specific URIs.      Monitoring After saving the configuration, the eBPF agent can perform monitoring operations on the processes under the specified service based on the service-level configuration.\nMetrics While performing monitoring, the eBPF agent would report the monitoring data to OAP for storage, making it more convenient to understand the real-time monitoring status. The main metrics include:\n   Monitor Type Unit Description     System Load Load System load average over a specified period.   Process CPU Percentage The CPU usage of the process as a percentage.   Process Thread Count Count The number of threads in the process.   HTTP Error Rate Percentage The percentage of HTTP requests that result in error responses (e.g., 4xx or 5xx status codes).   HTTP Avg Response Time Millisecond The average response time for HTTP requests.    Threshold With Trigger In the eBPF agent, data is collected periodically, and the sliding time window technique is used to store the data from the most recent Period cycles. The Threshold rule is used to verify whether the data within each cycle meets the specified criteria. If the number of times the conditions are met within the sliding time window exceeds the Count value, the corresponding Profiling task would be triggered.\nThe sliding time window technique ensures that the most recent and relevant data is considered when evaluating the conditions. This approach allows for a more accurate and dynamic assessment of the system\u0026rsquo;s performance, making it possible to identify and respond to issues in a timely manner. By triggering Profiling tasks when specific conditions are met, the system can automatically initiate performance analysis and help uncover potential bottlenecks or areas for improvement.\nCauses When the eBPF agent reports a Profiling task, it also reports the reason for triggering the Profiling task, which mainly includes the following information:\n Process: The specific process that triggered the policy. Monitor Type: The type of monitoring that was triggered. Threshold: The configured threshold value. Current: The monitoring value at the time the rule was triggered.  Silence Period Upon triggering a continuous profiling task, the eBPF agent supports a feature that prevents re-triggering tasks within a specified period. This feature is designed to prevent an unlimited number of profiling tasks from being initiated if the process continuously reaches the threshold, which could potentially cause system issues.\n","excerpt":"Continuous Profiling Continuous profiling utilizes eBPF, process monitoring, and other technologies …","ref":"/docs/main/next/en/setup/backend/backend-continuous-profiling/","title":"Continuous Profiling"},{"body":"Continuous Profiling Continuous profiling utilizes eBPF, process monitoring, and other technologies to collect data. When the configured threshold is met, it would automatically start profiling tasks. Corresponds to Continuous Profiling in the concepts and designs. This approach helps identify performance bottlenecks and potential issues in a proactive manner, allowing users to optimize their applications and systems more effectively.\nActive in the OAP Continuous profiling uses the same protocol service as eBPF Profiling, so you only need to ensure that the eBPF Profiling receiver is running.\nreceiver-ebpf:selector:${SW_RECEIVER_EBPF:default}default:Configuration of Continuous Profiling Policy Continuous profiling can be configured on a service entity, with the following fields in the configuration:\n Service: The service entity for which you want to monitor the processes. Targets: Configuration conditions.  Target Type: Target profiling type, currently supporting On CPU Profiling, Off CPU Profiling, and Network Profiling. Check Items: Detection conditions, only one of the multiple condition rules needs to be met to start the task.  Type: Monitoring type, currently supporting \u0026ldquo;System Load\u0026rdquo;, \u0026ldquo;Process CPU\u0026rdquo;, \u0026ldquo;Process Thread Count\u0026rdquo;, \u0026ldquo;HTTP Error Rate\u0026rdquo;, \u0026ldquo;HTTP Avg Response Time\u0026rdquo;. Threshold: Check if the monitoring value meets the specified expectations. Period: The time period for monitoring data, which can also be understood as the most recent duration. Count: The number of times the threshold is triggered within the detection period, which can also be understood as the total number of times the specified threshold rule is triggered in the most recent duration. Once the count check is met, the specified Profiling task will be started. URI: For HTTP-related monitoring types, used to filter specific URIs.      Monitoring After saving the configuration, the eBPF agent can perform monitoring operations on the processes under the specified service based on the service-level configuration.\nMetrics While performing monitoring, the eBPF agent would report the monitoring data to OAP for storage, making it more convenient to understand the real-time monitoring status. The main metrics include:\n   Monitor Type Unit Description     System Load Load System load average over a specified period.   Process CPU Percentage The CPU usage of the process as a percentage.   Process Thread Count Count The number of threads in the process.   HTTP Error Rate Percentage The percentage of HTTP requests that result in error responses (e.g., 4xx or 5xx status codes).   HTTP Avg Response Time Millisecond The average response time for HTTP requests.    Threshold With Trigger In the eBPF agent, data is collected periodically, and the sliding time window technique is used to store the data from the most recent Period cycles. The Threshold rule is used to verify whether the data within each cycle meets the specified criteria. If the number of times the conditions are met within the sliding time window exceeds the Count value, the corresponding Profiling task would be triggered.\nThe sliding time window technique ensures that the most recent and relevant data is considered when evaluating the conditions. This approach allows for a more accurate and dynamic assessment of the system\u0026rsquo;s performance, making it possible to identify and respond to issues in a timely manner. By triggering Profiling tasks when specific conditions are met, the system can automatically initiate performance analysis and help uncover potential bottlenecks or areas for improvement.\nCauses When the eBPF agent reports a Profiling task, it also reports the reason for triggering the Profiling task, which mainly includes the following information:\n Process: The specific process that triggered the policy. Monitor Type: The type of monitoring that was triggered. Threshold: The configured threshold value. Current: The monitoring value at the time the rule was triggered.  Silence Period Upon triggering a continuous profiling task, the eBPF agent supports a feature that prevents re-triggering tasks within a specified period. This feature is designed to prevent an unlimited number of profiling tasks from being initiated if the process continuously reaches the threshold, which could potentially cause system issues.\n","excerpt":"Continuous Profiling Continuous profiling utilizes eBPF, process monitoring, and other technologies …","ref":"/docs/main/v9.5.0/en/setup/backend/backend-continuous-profiling/","title":"Continuous Profiling"},{"body":"Continuous Profiling Continuous profiling utilizes eBPF, process monitoring, and other technologies to collect data. When the configured threshold is met, it would automatically start profiling tasks. Corresponds to Continuous Profiling in the concepts and designs. This approach helps identify performance bottlenecks and potential issues in a proactive manner, allowing users to optimize their applications and systems more effectively.\nActive in the OAP Continuous profiling uses the same protocol service as eBPF Profiling, so you only need to ensure that the eBPF Profiling receiver is running.\nreceiver-ebpf:selector:${SW_RECEIVER_EBPF:default}default:Configuration of Continuous Profiling Policy Continuous profiling can be configured on a service entity, with the following fields in the configuration:\n Service: The service entity for which you want to monitor the processes. Targets: Configuration conditions.  Target Type: Target profiling type, currently supporting On CPU Profiling, Off CPU Profiling, and Network Profiling. Check Items: Detection conditions, only one of the multiple condition rules needs to be met to start the task.  Type: Monitoring type, currently supporting \u0026ldquo;System Load\u0026rdquo;, \u0026ldquo;Process CPU\u0026rdquo;, \u0026ldquo;Process Thread Count\u0026rdquo;, \u0026ldquo;HTTP Error Rate\u0026rdquo;, \u0026ldquo;HTTP Avg Response Time\u0026rdquo;. Threshold: Check if the monitoring value meets the specified expectations. Period: The time period(seconds) for monitoring data, which can also be understood as the most recent duration. Count: The number of times(seconds) the threshold is triggered within the detection period, which can also be understood as the total number of times the specified threshold rule is triggered in the most recent duration(seconds). Once the count check is met, the specified Profiling task will be started. URI: For HTTP-related monitoring types, used to filter specific URIs.      Monitoring After saving the configuration, the eBPF agent can perform monitoring operations on the processes under the specified service based on the service-level configuration.\nMetrics While performing monitoring, the eBPF agent would report the monitoring data to OAP for storage, making it more convenient to understand the real-time monitoring status. The main metrics include:\n   Monitor Type Unit Description     System Load Load System load average over a specified period.   Process CPU Percentage The CPU usage of the process as a percentage.   Process Thread Count Count The number of threads in the process.   HTTP Error Rate Percentage The percentage of HTTP requests that result in error responses (e.g., 4xx or 5xx status codes).   HTTP Avg Response Time Millisecond The average response time for HTTP requests.    Threshold With Trigger In the eBPF agent, data is collected periodically, and the sliding time window technique is used to store the data from the most recent Period cycles. The Threshold rule is used to verify whether the data within each cycle meets the specified criteria. If the number of times the conditions are met within the sliding time window exceeds the Count value, the corresponding Profiling task would be triggered.\nThe sliding time window technique ensures that the most recent and relevant data is considered when evaluating the conditions. This approach allows for a more accurate and dynamic assessment of the system\u0026rsquo;s performance, making it possible to identify and respond to issues in a timely manner. By triggering Profiling tasks when specific conditions are met, the system can automatically initiate performance analysis and help uncover potential bottlenecks or areas for improvement.\nCauses When the eBPF agent reports a Profiling task, it also reports the reason for triggering the Profiling task, which mainly includes the following information:\n Process: The specific process that triggered the policy. Monitor Type: The type of monitoring that was triggered. Threshold: The configured threshold value. Current: The monitoring value at the time the rule was triggered.  Silence Period Upon triggering a continuous profiling task, the eBPF agent supports a feature that prevents re-triggering tasks within a specified period. This feature is designed to prevent an unlimited number of profiling tasks from being initiated if the process continuously reaches the threshold, which could potentially cause system issues.\n","excerpt":"Continuous Profiling Continuous profiling utilizes eBPF, process monitoring, and other technologies …","ref":"/docs/main/v9.6.0/en/setup/backend/backend-continuous-profiling/","title":"Continuous Profiling"},{"body":"Continuous Profiling Continuous profiling utilizes eBPF, process monitoring, and other technologies to collect data. When the configured threshold is met, it would automatically start profiling tasks. Corresponds to Continuous Profiling in the concepts and designs. This approach helps identify performance bottlenecks and potential issues in a proactive manner, allowing users to optimize their applications and systems more effectively.\nActive in the OAP Continuous profiling uses the same protocol service as eBPF Profiling, so you only need to ensure that the eBPF Profiling receiver is running.\nreceiver-ebpf:selector:${SW_RECEIVER_EBPF:default}default:Configuration of Continuous Profiling Policy Continuous profiling can be configured on a service entity, with the following fields in the configuration:\n Service: The service entity for which you want to monitor the processes. Targets: Configuration conditions.  Target Type: Target profiling type, currently supporting On CPU Profiling, Off CPU Profiling, and Network Profiling. Check Items: Detection conditions, only one of the multiple condition rules needs to be met to start the task.  Type: Monitoring type, currently supporting \u0026ldquo;System Load\u0026rdquo;, \u0026ldquo;Process CPU\u0026rdquo;, \u0026ldquo;Process Thread Count\u0026rdquo;, \u0026ldquo;HTTP Error Rate\u0026rdquo;, \u0026ldquo;HTTP Avg Response Time\u0026rdquo;. Threshold: Check if the monitoring value meets the specified expectations. Period: The time period(seconds) for monitoring data, which can also be understood as the most recent duration. Count: The number of times(seconds) the threshold is triggered within the detection period, which can also be understood as the total number of times the specified threshold rule is triggered in the most recent duration(seconds). Once the count check is met, the specified Profiling task will be started. URI: For HTTP-related monitoring types, used to filter specific URIs.      Monitoring After saving the configuration, the eBPF agent can perform monitoring operations on the processes under the specified service based on the service-level configuration.\nMetrics While performing monitoring, the eBPF agent would report the monitoring data to OAP for storage, making it more convenient to understand the real-time monitoring status. The main metrics include:\n   Monitor Type Unit Description     System Load Load System load average over a specified period.   Process CPU Percentage The CPU usage of the process as a percentage.   Process Thread Count Count The number of threads in the process.   HTTP Error Rate Percentage The percentage of HTTP requests that result in error responses (e.g., 4xx or 5xx status codes).   HTTP Avg Response Time Millisecond The average response time for HTTP requests.    Threshold With Trigger In the eBPF agent, data is collected periodically, and the sliding time window technique is used to store the data from the most recent Period cycles. The Threshold rule is used to verify whether the data within each cycle meets the specified criteria. If the number of times the conditions are met within the sliding time window exceeds the Count value, the corresponding Profiling task would be triggered.\nThe sliding time window technique ensures that the most recent and relevant data is considered when evaluating the conditions. This approach allows for a more accurate and dynamic assessment of the system\u0026rsquo;s performance, making it possible to identify and respond to issues in a timely manner. By triggering Profiling tasks when specific conditions are met, the system can automatically initiate performance analysis and help uncover potential bottlenecks or areas for improvement.\nCauses When the eBPF agent reports a Profiling task, it also reports the reason for triggering the Profiling task, which mainly includes the following information:\n Process: The specific process that triggered the policy. Monitor Type: The type of monitoring that was triggered. Threshold: The configured threshold value. Current: The monitoring value at the time the rule was triggered.  Silence Period Upon triggering a continuous profiling task, the eBPF agent supports a feature that prevents re-triggering tasks within a specified period. This feature is designed to prevent an unlimited number of profiling tasks from being initiated if the process continuously reaches the threshold, which could potentially cause system issues.\n","excerpt":"Continuous Profiling Continuous profiling utilizes eBPF, process monitoring, and other technologies …","ref":"/docs/main/v9.7.0/en/setup/backend/backend-continuous-profiling/","title":"Continuous Profiling"},{"body":"Contribution If you want to debug or develop SkyWalking Infra E2E, The following documentations would guide you.\n  Compiling\n Compiling Guidance    Release\n Release Guidance    ","excerpt":"Contribution If you want to debug or develop SkyWalking Infra E2E, The following documentations …","ref":"/docs/skywalking-infra-e2e/latest/en/contribution/readme/","title":"Contribution"},{"body":"Contribution If you want to debug or develop SkyWalking Infra E2E, The following documentations would guide you.\n  Compiling\n Compiling Guidance    Release\n Release Guidance    ","excerpt":"Contribution If you want to debug or develop SkyWalking Infra E2E, The following documentations …","ref":"/docs/skywalking-infra-e2e/next/en/contribution/readme/","title":"Contribution"},{"body":"Contribution If you want to debug or develop SkyWalking Infra E2E, The following documentations would guide you.\n  Compiling\n Compiling Guidance    Release\n Release Guidance    ","excerpt":"Contribution If you want to debug or develop SkyWalking Infra E2E, The following documentations …","ref":"/docs/skywalking-infra-e2e/v1.3.0/en/contribution/readme/","title":"Contribution"},{"body":" Contributor   Project Contributions Ranking  SkyWalking Showcase     kezhenxu94   109 1  wu-sheng   39 2  wankai123   34 3  mrproliu   33 4  Fine0830   6 5  JaredTan95   4 6  pg-yang   4 7  arugal   4 8  weixiang1862   3 9  dashanji   2 10  innerpeacez   2 11  yswdqz   2 12  peachisai   2 13  CodePrometheus   2 14  hanahmily   1 15  JohnDuncan5171   1 16  nisiyong   1 17  Superskyyy   1 18  azibhassan   1 19  chenxiaohu   1 20  jmjoy   1 21  sacloudy   1 22    SkyWalking Website     wu-sheng   405 1  Jtrust   133 2  kezhenxu94   83 3  mrproliu   50 4  hanahmily   33 5  rootsongjc   20 6  fgksgf   18 7  Superskyyy   18 8  jmjoy   16 9  JaredTan95   14 10  Fine0830   12 11  arugal   12 12  dmsolr   12 13  innerpeacez   11 14  BFergerson   11 15  zhaoyuguang   9 16  wankai123   9 17  dashanji   8 18  TinyAllen   7 19  weixiang1862   7 20  EvanLjp   5 21  peng-yongsheng   5 22  heyanlong   5 23  Humbertzhang   4 24  yswdqz   4 25  yanmaipian   4 26  lujiajing1126   4 27  FingerLeader   3 28  gxthrj   3 29  Ax1an   3 30  YunaiV   2 31  LIU-WEI-git   2 32  langyan1022   2 33  pg-yang   2 34  libinglong   2 35  alonelaval   2 36  nisiyong   2 37  x22x22   2 38  HHoflittlefish777   2 39  CzyerChen   2 40  cheenursn   2 41  thebouv   2 42  Alipebt   2 43  PGDream   1 44  liuhaoyang   1 45  LiteSun   1 46  liqiangz   1 47  geomonlin   1 48  lijing-21   1 49  leimeng-ma   1 50  klboke   1 51  kehuili   1 52  JoeCqupt   1 53  jjlu521016   1 54  jacentsao   1 55  hutaishi   1 56  hailin0   1 57  fushiqinghuan111   1 58  chopin-d   1 59  apmplus   1 60  jxnu-liguobin   1 61  zhang98722   1 62  yimeng   1 63  xu1009   1 64  xiongshiyan   1 65  xdRight   1 66   bing**   1 67  weiqiang333   1 68  vcjmhg   1 69  tristan-tsl   1 70  tisonkun   1 71  tevahp   1 72  sebbASF   1 73  FeynmanZhou   1 74  peachisai   1 75  nic-chen   1 76  lucperkins   1 77  lilien1010   1 78  Dylan-beicheng   1 79  devkanro   1 80  Johor03   1 81  ButterBright   1 82  harshaskumar05   1 83  kylixs   1 84  crl228   1 85  Humbedooh   1 86  thisisgpy   1 87  CharlesMaster   1 88  andrewgkew   1 89  wayilau   1 90  feelwing1314   1 91  adriancole   1 92  agile6v   1 93   394102339**   1 94  YoungHu   1 95  wang-yeliang   1 96  withlin   1 97  moonming   1 98   983708408**   1 99    SkyWalking     wu-sheng   2967 1  peng-yongsheng   874 2  kezhenxu94   470 3   ascrutae**   381 4  ascrutae   352 5  acurtain   251 6  wankai123   212 7  mrproliu   176 8  hanahmily   176 9  Fine0830   133 10  JaredTan95   100 11  dmsolr   83 12  arugal   76 13  zhaoyuguang   65 14  lytscu   64 15  wingwong-knh   53 16   zhangxin**   47 17  BFergerson   45 18  pg-yang   28 19   ascrutae**   28 20  lujiajing1126   28 21  Ax1an   27 22  yswdqz   26 23  wayilau   26 24  EvanLjp   25 25  zifeihan   25 26  IanCao   23 27   295198088**   22 28  weixiang1862   22 29  x22x22   22 30  innerpeacez   21 31   394102339**   20 32  Superskyyy   19 33  clevertension   17 34  liuhaoyang   17 35  withlin   17 36  liqiangz   16 37  xbkaishui   16 38   renliangbu**   16 39  carlvine500   15 40  candyleer   15 41  peachisai   14 42  hailin0   12 43  zhangkewei   11 44  bai-yang   11 45  heyanlong   11 46  tom-pytel   10 47  TinyAllen   10 48  adermxzs   10 49  songzhendong   10 50   55846420**   10 51  wallezhang   10 52  Jtrust   9 53  IluckySi   9 54  qxo   9 55  smartboy37597   9 56  CzyerChen   9 57  alonelaval   8 58  heihaozi   8 59  wendal   8 60  LIU-WEI-git   8 61  CodePrometheus   8 62  Humbertzhang   8 63  toffentoffen   8 64  CalvinKirs   8 65  tristaZero   7 66   liufei**   6 67  zhyyu   6 68  stalary   6 69  honganan   6 70   lxin96**   6 71  jjtyro   6 72  xuanyu66   6 73  J-Cod3r   6 74  YunaiV   5 75  langyan1022   5 76  Liu-XinYuan   5 77  SataQiu   5 78  Cool-Coding   5 79  harvies   5 80  xu1009   5 81  wuwen5   5 82   55846420**   5 83  tuohai666   5 84  flycash   5 85  JohnNiang   5 86  yaojingguo   5 87  fgksgf   5 88  adriancole   5 89  codeglzhang   4 90  yu199195   4 91  yangyiweigege   4 92  VictorZeng   4 93  TeslaCN   4 94  LiWenGu   4 95  haoyann   4 96  chidaodezhongsheng   4 97  xinzhuxiansheng   4 98  aiyanbo   4 99  darcyda1   4 100  sN0wpeak   4 101  FatihErdem   4 102  chenhaipeng   4 103  nisiyong   4 104  Z-Beatles   4 105  YczYanchengzhe   4 106  cyberdak   4 107  dagmom   4 108  codelipenghui   4 109  dominicqi   4 110  dio   3 111  libinglong   3 112  liuzc9   3 113   lizl9**   3 114  neeuq   3 115  snakorse   3 116  xiaospider   3 117  xiaoy00   3 118  Indifer   3 119  huangyoje   3 120  s00373198   3 121  cyejing   3 122  Ahoo-Wang   3 123  yanfch   3 124  devkanro   3 125  oflebbe   3 126  rabajaj0509   3 127  Shikugawa   3 128  LinuxSuRen   3 129  ScienJus   3 130  liu-junchi   3 131  WillemJiang   3 132  chenpengfei   3 133  gnr163   3 134  jiang1997   3 135  jmjoy   2 136  viswaramamoorthy   2 137  vcjmhg   2 138  tzy1316106836   2 139  terranhu   2 140  scolia   2 141  osiriswd   2 142   2278966200**   2 143  novayoung   2 144  muyun12   2 145  mgsheng   2 146  makingtime   2 147  klboke   2 148  katelei6   2 149  karott   2 150  jinlongwang   2 151  hutaishi   2 152  Hen1ng   2 153  kuaikuai   2 154  lkxiaolou   2 155  purgeyao   2 156  michaelsembwever   2 157   bwh12398**   2 158  YunfengGao   2 159  WildWolfBang   2 160  juzhiyuan   2 161  SoberChina   2 162  KangZhiDong   2 163  mufiye   2 164   yushuqiang**   2 165  zxbu   2 166  yazong   2 167  xzyJavaX   2 168  xcaspar   2 169  wuguangkuo   2 170  webb2019   2 171  evanxuhe   2 172  yang-xiaodong   2 173  RaigorJiang   2 174  Qiliang   2 175  Oliverwqcwrw   2 176  buxingzhe   2 177  tsuilouis   2 178  leizhiyuan   2 179  Jargon9   2 180  potiuk   2 181   iluckysi   2 182  kim-up   2 183  HarryFQG   2 184  easonyipj   2 185  willseeyou   2 186  AlexanderWert   2 187  ajanthan   2 188  chen-ni   2 189  844067874   2 190  elk-g   2 191  dsc6636926   2 192  heihei180   2 193  amwyyyy   2 194  dengliming   2 195  cuiweiwei   2 196  coki230   2 197  coder-yqj   2 198  cngdkxw   2 199  chenmudu   2 200  beckhampu   2 201  cheetah012   2 202  ZhuWang1112   2 203  zaunist   2 204  shichaoyuan   2 205  XhangUeiJong   2 206  Switch-vov   2 207  SummerOfServenteen   2 208  maxiaoguang64   1 209  maclong1989   1 210  sourcelliu   1 211  margauxcabrera   1 212  Yebemeto   1 213  momo0313   1 214  Xlinlin   1 215   cheatbeater**   1 216  lxliuxuankb   1 217  lu-xiaoshuang   1 218  lpcy   1 219  louis-zhou   1 220  lngmountain   1 221   lixin40**   1 222  liuyanggithup   1 223  linliaoy   1 224   xlz35429674**   1 225   seiferhu**   1 226   seiferhu**   1 227   72372815\u0026#43;royal-dargon**   1 228   72775443\u0026#43;raybi-asus**   1 229  ralphgj   1 230  qiuyu-d   1 231  thanq   1 232  probeyang   1 233  carrypann   1 234  pkxiuluo   1 235  FeynmanZhou   1 236  ooi22   1 237  onecloud360   1 238  nileblack   1 239  chenyi19851209   1 240  neatlife   1 241  lijial   1 242  inversionhourglass   1 243  huliangdream   1 244  hsoftxl   1 245  hi-sb   1 246  Heguoya   1 247  hardzhang   1 248  haotian2015   1 249  gzlicanyi   1 250  guyukou   1 251  gy09535   1 252  guochen2   1 253  kylixs   1 254  gonedays   1 255  guodongq   1 256  ggndnn   1 257  GerryYuan   1 258  geekymv   1 259  geektcp   1 260  leemove   1 261  lazycathome   1 262  langke93   1 263  landonzeng   1 264  lagagain   1 265  ksewen   1 266  killGC   1 267  kikupotter   1 268  kevinyyyy   1 269  ken-duck   1 270  kayleyang   1 271  aeolusheath   1 272  justeene   1 273  jsbxyyx   1 274  zhangjianweibj   1 275  jianglin1008   1 276  jialong121   1 277  jjlu521016   1 278   zhousiliang163**   1 279   45602777\u0026#43;zhangzhanhong2**   1 280   zcai2**   1 281   zaygrzx**   1 282   yuyujulin**   1 283   yurunchuan**   1 284   182148432**   1 285   wu_yan_tao**   1 286   yanmingbi**   1 287   yangxb2010000**   1 288   yanbinwei2851**   1 289   978861768**   1 290   48479214\u0026#43;xuxiawei**   1 291   9313869\u0026#43;xuchangjunjx**   1 292   yexingren23**   1 293   1903636211**   1 294   xiaozheng**   1 295   281890899**   1 296   66098854\u0026#43;tangshan-brs**   1 297   88840672\u0026#43;wangwang89**   1 298   loushuiyifan**   1 299   305542043**   1 300   381321959**   1 301   zhangliang**   1 302   kzd666**   1 303   45203823\u0026#43;gzshilu**   1 304   28707699**   1 305   yqjdcyy**   1 306   tanjunchen20**   1 307   liuzhengyang**   1 308   hey.yanlong**   1 309   zygfengyuwuzu**   1 310   tmac.back**   1 311   xtha**   1 312   345434645**   1 313   zoidbergwill**   1 314   tbdp.hi**   1 315   tanzhen**   1 316   973117150**   1 317   89574863\u0026#43;4ydx3906**   1 318   sxzaihua**   1 319   hpy253215039**   1 320   814464284**   1 321   stone_wlg**   1 322   stenio**   1 323   hoolooday**   1 324   songzhe_fish**   1 325   wang-yaozheng**   1 326   sk163**   1 327   101088629\u0026#43;simonluo345**   1 328   simonlei**   1 329   41794887\u0026#43;sialais**   1 330   31874857\u0026#43;sikelangya**   1 331   mestarshine**   1 332   34833891\u0026#43;xdright**   1 333   bing**   1 334   23226334**   1 335   wujun8**   1 336   zzhxccw**   1 337   qrw_email**   1 338   wind2008hxy**   1 339   36367435\u0026#43;whl12345**   1 340   45580443\u0026#43;whfjam**   1 341   zwj777**   1 342   xiongchuang**   1 343   lyzhang1999**   1 344   52819067\u0026#43;weiqiang-w**   1 345   55177318\u0026#43;vcjmhg**   1 346   46754544\u0026#43;tristan-tsl**   1 347   wander4096**   1 348   136082619**   1 349   montecristosoul**   1 350  Lin1997   1 351  coolbeevip   1 352  LazyLei   1 353  leileiluoluo   1 354  lt5227   1 355  mostcool   1 356  Alipebt   1 357  zhentaoJin   1 358  kagaya85   1 359  augustowebd   1 360  j-s-3   1 361  JohnDuncan5171   1 362  jbampton   1 363  zouyx   1 364  JoeKerouac   1 365  Linda-pan   1 366  jim075960758   1 367  jiekun   1 368  c1ay   1 369   chenglei**   1 370   chenyao**   1 371  npmmirror   1 372  nikitap492   1 373  nickwongwong   1 374  ZhuoSiChen   1 375  mikechengwei   1 376  mikkeschiren   1 377  zeaposs   1 378  TheRealHaui   1 379  doddi   1 380  marcingrzejszczak   1 381  maolie   1 382  mahmoud-anwer   1 383  donotstopplz   1 384  liuhaoXD   1 385  linghengqian   1 386  darcydai   1 387  sdanzo   1 388  chanjarster   1 389  damonxue   1 390  cvimer   1 391  CommissarXia   1 392  ChengDaqi2023   1 393  CharlesMaster   1 394  shiluo34   1 395  brucewu-fly   1 396   qq327568824**   1 397  ArjenDavid-sjtu   1 398  AngryMills   1 399   andyzzlms**   1 400  AirTrioa   1 401  lunchboxav   1 402  50168383   1 403  1095071913   1 404  Jedore   1 405  mustangxu   1 406   zhongjianno1**   1 407  DeadLion   1 408  Lighfer   1 409  Henry75m39   1 410  onurccn   1 411  tankilo   1 412  Gallardot   1 413  AbelCha0   1 414  bootsrc   1 415  FingerLiu   1 416  Felixnoo   1 417  DuanYuePeng   1 418  efekaptan   1 419  qijianbo010   1 420  qqeasonchen   1 421  devon-ye   1 422   295198088**   1 423   c feng   1 424  buzuotaxuan   1 425  mmm9527   1 426  wolfboys   1 427  beiwangnull   1 428  amogege   1 429  alidisi   1 430  alexkarezin   1 431  aix3   1 432  adamni135   1 433  absorprofess   1 434  ZhengBing520   1 435  ZhHong   1 436  chenbeitang   1 437  ZS-Oliver   1 438  panniyuyu   1 439  fuhuo   1 440  ethan256   1 441  eoeac   1 442  echooymxq   1 443  dzx2018   1 444  IceSoda177   1 445  dvsv2   1 446  drgnchan   1 447  donbing007   1 448  dogblues   1 449  divyakumarjain   1 450  dd1k   1 451  dashanji   1 452  cutePanda123   1 453  cui-liqiang   1 454  cuishuang   1 455  crystaldust   1 456  wbpcode   1 457  TerrellChen   1 458  Technoboy-   1 459  StreamLang   1 460  stevehu   1 461  kun-song   1 462   826245622**   1 463  compilerduck   1 464  SheltonZSL   1 465  sergicastro   1 466  zhangsean   1 467  yymoth   1 468  ruibaby   1 469  rlenferink   1 470  remicollet   1 471  RandyAbernethy   1 472  QHWG67   1 473  pengyongqiang666   1 474  Patrick0308   1 475  yuqichou   1 476  Miss-you   1 477  ycoe   1 478   me**   1 479  yanickxia   1 480  XinweiLyu   1 481  liangyepianzhou   1 482  Wooo0   1 483  ViberW   1 484  wilsonwu   1 485  moonming   1 486  wyt   1 487  victor-yi   1 488  Videl   1 489  trustin   1 490  TomMD   1 491  ThisSeanZhang   1 492  gitter-badger   1 493  Adrian Cole    494  github-actions[bot]    495  dependabot[bot]    496    Booster UI     Fine0830   425 1  wu-sheng   15 2  heyanlong   12 3  pg-yang   9 4  CzyerChen   3 5  yswdqz   3 6  techbirds   3 7  Superskyyy   2 8  peachisai   2 9  zhourunjie1988   2 10  xu1009   2 11  weixiang1862   2 12  lsq27   2 13  innerpeacez   2 14  horochx   2 15  drgnchan   2 16  smartboy37597   2 17  CodePrometheus   2 18  WitMiao   1 19  liuyib   1 20  arugal   1 21  wuwen5   1 22  songzhendong   1 23  pw151294   1 24  kezhenxu94   1 25  jiang1997   1 26  hutaishi   1 27  heihei180   1 28  hadesy   1 29  ZhuWang1112   1 30  XinweiLyu   1 31  liangyepianzhou   1 32  SimonHu1993   1 33  LinuxSuRen   1 34  binbin666   1 35  marcingrzejszczak   1 36  toffentoffen   1 37  mahmoud-anwer   1 38  donotstopplz   1 39  BFergerson   1 40    Plugin for Service Topology     Fine0830   63 1  wu-sheng   4 2  Superskyyy   1 3   fine**   1 4    Java Agent     wu-sheng   2747 1  peng-yongsheng   874 2   ascrutae**   381 3  ascrutae   352 4  kezhenxu94   275 5  acurtain   251 6  hanahmily   165 7  JaredTan95   96 8  dmsolr   87 9  mrproliu   68 10  arugal   66 11  zhaoyuguang   65 12  lytscu   64 13  Fine0830   53 14   zhangxin**   47 15  wingwong-knh   45 16  BFergerson   44 17  wankai123   31 18   ascrutae**   28 19  Ax1an   27 20  wayilau   26 21  zifeihan   26 22  EvanLjp   25 23  IanCao   23 24   295198088**   22 25  x22x22   22 26   394102339**   20 27  xu1009   19 28  pg-yang   19 29  clevertension   17 30  withlin   17 31  xbkaishui   16 32   renliangbu**   16 33  liuhaoyang   16 34  lujiajing1126   16 35  candyleer   15 36  carlvine500   15 37  nisiyong   13 38  liqiangz   13 39  hailin0   12 40  wallezhang   11 41  zhangkewei   11 42  bai-yang   11 43  Jtrust   10 44  heyanlong   10 45  xzyJavaX   10 46  songzhendong   10 47  adermxzs   10 48   55846420**   10 49  TinyAllen   10 50  heihaozi   9 51  CzyerChen   9 52  qxo   9 53  IluckySi   9 54  alonelaval   8 55  wendal   8 56  tristaZero   7 57  Humbertzhang   7 58  zhyyu   7 59  J-Cod3r   6 60  Cool-Coding   6 61  jjtyro   6 62  honganan   6 63  stalary   6 64  wuwen5   6 65   liufei**   6 66  gzlicanyi   6 67   lxin96**   6 68  tom-pytel   6 69  xuanyu66   6 70  devkanro   6 71  hutaishi   5 72  harvies   5 73  langyan1022   5 74  Liu-XinYuan   5 75  YunaiV   5 76  SataQiu   5 77  adriancole   5 78  darcyda1   5 79  yaojingguo   5 80  JohnNiang   5 81  flycash   5 82  tuohai666   5 83  cyberdak   5 84  codelipenghui   5 85  peachisai   5 86   55846420**   5 87  LiWenGu   4 88  kylixs   4 89  TeslaCN   4 90  haoyann   4 91  chidaodezhongsheng   4 92  xinzhuxiansheng   4 93  VictorZeng   4 94  xiaqi1210   4 95  yu199195   4 96  chanjarster   4 97  FatihErdem   4 98  aiyanbo   4 99  sN0wpeak   4 100  fgksgf   4 101  Oliverwqcwrw   4 102  Z-Beatles   4 103  alanlvle   4 104  dagmom   4 105  innerpeacez   4 106  dominicqi   4 107  weixiang1862   4 108  vcjmhg   3 109  cyejing   3 110  s00373198   3 111  huangyoje   3 112  Indifer   3 113  xiaoy00   3 114  snakorse   3 115  neeuq   3 116   lizl9**   3 117  libinglong   3 118  gnr163   3 119  chenpengfei   3 120  YczYanchengzhe   3 121  WillemJiang   3 122  liu-junchi   3 123  ScienJus   3 124  oflebbe   3 125  yanfch   3 126  Ahoo-Wang   3 127  dio   3 128  codeglzhang   3 129  osiriswd   2 130  scolia   2 131  terranhu   2 132  tzy1316106836   2 133  viswaramamoorthy   2 134  webb2019   2 135  gglzf4   2 136  kuaikuai   2 137   2278966200**   2 138  novayoung   2 139  muyun12   2 140  mgsheng   2 141  makingtime   2 142  lpcy   2 143  klboke   2 144  karott   2 145  jinlongwang   2 146  Hen1ng   2 147  Superskyyy   2 148  seifeHu   2 149  lkxiaolou   2 150  purgeyao   2 151  PepoRobert   2 152  michaelsembwever   2 153  marcingrzejszczak   2 154   bwh12398**   2 155  YunfengGao   2 156  WildWolfBang   2 157  shichaoyuan   2 158  juzhiyuan   2 159  SoberChina   2 160  KangZhiDong   2 161   yushuqiang**   2 162  zxbu   2 163  yazong   2 164  xcaspar   2 165  wuguangkuo   2 166  geekymv   2 167  yang-xiaodong   2 168  Shikugawa   2 169  Qiliang   2 170  buxingzhe   2 171  tsuilouis   2 172  Leibnizhu   2 173  leizhiyuan   2 174  CalvinKirs   2 175  Jargon9   2 176  potiuk   2 177   iluckysi   2 178  2han9wen71an   2 179  844067874   2 180  HarryFQG   2 181  ForrestWang123   2 182  ajanthan   2 183  AlexanderWert   2 184  willseeyou   2 185  ArjenDavid-sjtu   2 186  evanxuhe   2 187  elk-g   2 188  dsc6636926   2 189  amwyyyy   2 190  dengliming   2 191  dashanji   2 192  cylx3126   2 193  cuiweiwei   2 194  coki230   2 195  SummerOfServenteen   2 196  Switch-vov   2 197  tjiuming   2 198  XhangUeiJong   2 199  zaunist   2 200  cheetah012   2 201  beckhampu   2 202  chenmudu   2 203  coder-yqj   2 204  cngdkxw   2 205  githubcheng2978   1 206  FeynmanZhou   1 207  onecloud360   1 208  nileblack   1 209  neatlife   1 210  Xlinlin   1 211  momo0313   1 212  Yebemeto   1 213  margauxcabrera   1 214  sourcelliu   1 215  maxiaoguang64   1 216  lxliuxuankb   1 217  lvxiao1   1 218  guodongq   1 219  louis-zhou   1 220   lixin40**   1 221  pkxiuluo   1 222  carrypann   1 223  probeyang   1 224  qiaoxingxing   1 225  thanq   1 226  qiuyu-d   1 227  ggndnn   1 228  ralphgj   1 229  raybi-asus   1 230  GerryYuan   1 231  geektcp   1 232  mestarshine   1 233   chenyao**   1 234  sikelangya   1 235  simonlei   1 236  sk163   1 237  zhangjianweibj   1 238  JoeCqupt   1 239  jialong121   1 240  jjlu521016   1 241  hyhyf   1 242  hxd123456   1 243  huliangdream   1 244  xiaomiusa87   1 245  hsoftxl   1 246  hi-sb   1 247  Heguoya   1 248  hardzhang   1 249  haotian2015   1 250  guyukou   1 251  gy09535   1 252  rechardguo   1 253  gonedays   1 254  liuyanggithup   1 255  linliaoy   1 256  lijial   1 257  leemove   1 258  lbc97   1 259  lazycathome   1 260  langke93   1 261  landonzeng   1 262  ksewen   1 263  killGC   1 264  kikupotter   1 265  kevinyyyy   1 266  kayleyang   1 267  aeolusheath   1 268  justeene   1 269  jsbxyyx   1 270  jmjoy   1 271   tmac.back**   1 272   345434645**   1 273   zoidbergwill**   1 274   zhousiliang163**   1 275   45602777\u0026#43;zhangzhanhong2**   1 276   zcai2**   1 277   zaygrzx**   1 278   yuyujulin**   1 279   yurunchuan**   1 280   74546965\u0026#43;yswdqz**   1 281   182148432**   1 282   wu_yan_tao**   1 283   yanmingbi**   1 284   yangxb2010000**   1 285   yanbinwei2851**   1 286   249021408**   1 287   9313869\u0026#43;xuchangjunjx**   1 288   xiongchuang**   1 289   cheatbeater**   1 290   66098854\u0026#43;tangshan-brs**   1 291   42414099\u0026#43;yanye666**   1 292   893979653**   1 293   88840672\u0026#43;wangwang89**   1 294   loushuiyifan**   1 295   lcbiao34**   1 296   305542043**   1 297   381321959**   1 298   orezsilence**   1 299   zhangliang**   1 300   kzd666**   1 301   45203823\u0026#43;gzshilu**   1 302   28707699**   1 303   tanjunchen20**   1 304   70845636\u0026#43;mufiye**   1 305   liuzhengyang**   1 306   zygfengyuwuzu**   1 307   lyzhang1999**   1 308   wqp1987**   1 309  w2dp   1 310  weiqiang-w   1 311  tristan-tsl   1 312  tincopper   1 313  angty   1 314  tedli   1 315  tbdpmi   1 316   tanzhen**   1 317  tangxqa   1 318  sxzaihua   1 319  hepyu   1 320  surechen   1 321  stone-wlg   1 322  stenio2011   1 323  zhe1926   1 324   xubinghaozs**   1 325   yexingren23**   1 326   1903636211**   1 327   1612202137**   1 328   281890899**   1 329   34833891\u0026#43;xdright**   1 330   bing**   1 331   23226334**   1 332   wujun8**   1 333   809697469**   1 334   zzhxccw**   1 335   qrw_email**   1 336   wind2008hxy**   1 337   63728367\u0026#43;will2020-power**   1 338   36367435\u0026#43;whl12345**   1 339   45580443\u0026#43;whfjam**   1 340   zwj777**   1 341   weihubeats**   1 342  augustowebd   1 343  jbampton   1 344  zouyx   1 345  JoeKerouac   1 346  Linda-pan   1 347  leihuazhe   1 348   zhongjianno1**   1 349  DeadLion   1 350  Lighfer   1 351  kim-up   1 352  hardy4yooz   1 353  onurccn   1 354  guillaume-alvarez   1 355  GuiSong01   1 356  tankilo   1 357  Gallardot   1 358  AbelCha0   1 359  nikitap492   1 360  nickwongwong   1 361  ZhuoSiChen   1 362  mikkeschiren   1 363  zeaposs   1 364  TheRealHaui   1 365  maolie   1 366  donotstopplz   1 367  liuhaoXD   1 368  lishuo5263   1 369  Lin1997   1 370  coolbeevip   1 371  LazyLei   1 372  leileiluoluo   1 373  lt5227   1 374  zhentaoJin   1 375  kagaya85   1 376  CharlesMaster   1 377  shiluo34   1 378  wapkch   1 379  thisisgpy   1 380  brucewu-fly   1 381  BigXin0109   1 382  bmk15897   1 383   qq327568824**   1 384  AngryMills   1 385   andyzzlms**   1 386  guoxiaod   1 387  adaivskenan   1 388  Alceatraz   1 389  AirTrioa   1 390  lunchboxav   1 391  50168383   1 392  1095071913   1 393  bootsrc   1 394  ForestWang123   1 395  FingerLiu   1 396  DuanYuePeng   1 397  efekaptan   1 398  qijianbo010   1 399  qqeasonchen   1 400  DominikHubacek   1 401  devon-ye   1 402  darknesstm   1 403  zhaoxiaojie0415   1 404  darcydai   1 405  sdanzo   1 406  dachuan9e   1 407  cvimer   1 408  CommissarXia   1 409  Chenfx-git   1 410  furaul   1 411  HScarb   1 412  c1ay   1 413   295198088**   1 414   c feng   1 415  buzuotaxuan   1 416  mmm9527   1 417  beiwangnull   1 418  andotorg   1 419  amogege   1 420  alexkarezin   1 421  aix3   1 422  adamni135   1 423  zimmem   1 424  ZhHong   1 425  chenbeitang   1 426  ZS-Oliver   1 427  panniyuyu   1 428  fuhuo   1 429  eoeac   1 430  life-   1 431  echooymxq   1 432  dzx2018   1 433  IceSoda177   1 434  dvsv2   1 435  drgnchan   1 436  donbing007   1 437  divyakumarjain   1 438  AlchemyDing   1 439  dd1k   1 440  cutePanda123   1 441  cui-liqiang   1 442  crystaldust   1 443  jinrongzhang   1 444  wbpcode   1 445  TerrellChen   1 446  Technoboy-   1 447  stevehu   1 448  kun-song   1 449   826245622**   1 450  compilerduck   1 451  sergicastro   1 452  zhangsean   1 453  yymoth   1 454  SWHHEART   1 455  ruibaby   1 456  rlenferink   1 457  RickyLau   1 458  RandyAbernethy   1 459  QHWG67   1 460  Patrick0308   1 461   chenglei**   1 462  yuqichou   1 463  yoyofx   1 464  Miss-you   1 465  ycoe   1 466   me**   1 467  yanickxia   1 468  yangyulely   1 469  Wooo0   1 470  ViberW   1 471  wilsonwu   1 472  moonming   1 473  victor-yi   1 474  Videl   1 475  trustin   1 476  TomMD   1 477  ThisSeanZhang   1 478  gitter-badger   1 479    Python Agent     kezhenxu94   97 1  Superskyyy   63 2  tom-pytel   47 3  alonelaval   21 4  jiang1997   14 5  Humbertzhang   10 6  Jedore   5 7  ZEALi   4 8  katelei6   4 9  SheltonZSL   3 10  jaychoww   3 11  FAWC438   3 12  wu-sheng   2 13  probeyang   2 14  langyizhao   2 15  arcosx   2 16  zkscpqm   1 17  wuwen5   1 18  dafu-wu   1 19  VxCoder   1 20  taskmgr   1 21  Forstwith   1 22  fuhuo   1 23  dcryans   1 24   32413353\u0026#43;cooolr**   1 25  c1ay   1 26  chestarss   1 27  alidisi   1 28  XinweiLyu   1 29  TomMD   1 30  CodePrometheus   1 31  shenxiangzhuang   1 32  doddi   1 33  sungitly   1 34  wzy960520   1 35  JarvisG495   1 36  JaredTan95   1 37  fgksgf   1 38  zgfh   1 39    NodeJS Agent     kezhenxu94   61 1  tom-pytel   38 2  ruleeeer   4 3  BFergerson   3 4  wu-sheng   3 5  michaelzangl   2 6  alanlvle   2 7  tianyk   2 8  ErosZy   1 9  QuanjieDeng   1 10  TonyKingdom   1 11  liu-zhizhu   1 12   wxb17742006482**   1 13  nd-lqj   1 14  wuwen5   1 15    Go Agent     mrproliu   61 1  CodePrometheus   8 2  Alipebt   8 3  wu-sheng   6 4  LinuxSuRen   4 5  ShyunnY   2 6  IceSoda177   2 7  vearne   2 8  rfyiamcool   2 9  ethan256   2 10  jiekun   2 11  zheheBao   1 12  xuyue97   1 13  jarvis-u   1 14  icodeasy   1 15  YenchangChan   1 16  kikoroc   1 17  darknos   1 18  Ecostack   1 19  Ruff-nono   1 20  0o001   1 21  lujiajing1126   1 22  GlqEason   1 23    Rust Agent     jmjoy   39 1  wu-sheng   20 2  Shikugawa   5 3  tisonkun   4 4  CherishCai   2 5  dkkb   2 6  kezhenxu94   1 7    PHP Agent     jmjoy   74 1  heyanlong   12 2  phanalpha   4 3  wu-sheng   2 4  matikij   1 5    Client JavaScript     Fine0830   143 1  wu-sheng   33 2  arugal   19 3  Lighfer   2 4  kezhenxu94   2 5  tianyk   2 6  wuwen5   2 7  Leo555   1 8  qinhang3   1 9  min918   1 10  tthallos   1 11  i7guokui   1 12  aoxls   1 13  givingwu   1 14  Jtrust   1 15  JaredTan95   1 16  AliceTWu   1 17  airene   1 18    Nginx LUA Agent     wu-sheng   50 1  dmsolr   26 2  membphis   10 3  moonming   7 4  mrproliu   6 5  spacewander   3 6  kezhenxu94   2 7  WALL-E   2 8  arugal   2 9  wangrzneu   2 10  yxudong   2 11  JaredTan95   2 12  jeremie1112   1 13  dingdongnigetou   1 14  CalvinKirs   1 15  lilien1010   1 16  Jijun   1 17  Dofine-dufei   1 18  alonelaval   1 19  Frapschen   1 20  tzssangglass   1 21    Kong Agent     dmsolr   15 1  wu-sheng   4 2  kezhenxu94   2 3  CalvinKirs   1 4    SkyWalking Satellite     mrproliu   64 1  EvanLjp   32 2  kezhenxu94   20 3  gxthrj   7 4  wu-sheng   6 5  wangrzneu   2 6  BFergerson   1 7  fgksgf   1 8  CalvinKirs   1 9  guangdashao   1 10  inversionhourglass   1 11  nic-chen   1 12  arugal   1 13    Kubernetes Event Exporter     kezhenxu94   16 1  wu-sheng   6 2  fgksgf   4 3  dmsolr   2 4  CalvinKirs   1 5    SkyWalking Rover     mrproliu   92 1  wu-sheng   5 2  spacewander   3 3  jelipo   2 4  hkmdxlftjf   1 5  IluckySi   1 6  LinuxSuRen   1 7  caiwc   1 8  kezhenxu94   1 9    SkyWalking CLI     kezhenxu94   79 1  mrproliu   46 2  fgksgf   44 3  wu-sheng   11 4  hanahmily   6 5  try-agaaain   5 6  JarvisG495   4 7  arugal   4 8  alonelaval   3 9  BFergerson   2 10  heyanlong   2 11  Alexxxing   1 12  Superskyyy   1 13  clk1st   1 14  innerpeacez   1 15    Kubernetes Helm     innerpeacez   58 1  kezhenxu94   38 2  wu-sheng   32 3  hanahmily   19 4  mrproliu   6 5  JaredTan95   6 6  ButterBright   4 7  dashanji   3 8  rh-at   2 9  chengshiwen   2 10  eric-sailfish   1 11  geffzhang   1 12  glongzh   1 13  chenvista   1 14  swartz-k   1 15  tristan-tsl   1 16  vision-ken   1 17   wang_weihan**   1 18  wayilau   1 19  williamyao1982   1 20  zshrine   1 21  aikin-vip   1 22  wankai123   1 23  SeanKilleen   1 24  ScribblerCoder   1 25  rabajaj0509   1 26  CalvinKirs   1 27  carllhw   1 28  zalintyre   1 29  Yangfisher1   1 30  aviaviavi   1 31    SkyWalking Cloud on Kubernetes     hanahmily   59 1  dashanji   26 2  kezhenxu94   8 3  mrproliu   5 4  weixiang1862   4 5  wu-sheng   3 6  ESonata   2 7  jichengzhi   2 8  heyanlong   1 9  hwzhuhao   1 10  SzyWilliam   1 11   rolandma**   1 12  robberphex   1 13  toffentoffen   1 14  CalvinKirs   1 15  fgksgf   1 16  Duncan-tree-zhou   1 17  ButterBright   1 18  BFergerson   1 19    Data Collect Protocol     wu-sheng   76 1  mrproliu   27 2  arugal   11 3  kezhenxu94   11 4  liuhaoyang   4 5  EvanLjp   3 6  Shikugawa   3 7  peng-yongsheng   2 8  zifeihan   2 9  Switch-vov   2 10  dmsolr   1 11  hanahmily   1 12  fgksgf   1 13  nacx   1 14  yaojingguo   1 15  SataQiu   1 16  stalary   1 17  Z-Beatles   1 18  liqiangz   1 19  snakorse   1 20  xu1009   1 21  heyanlong   1 22  Liu-XinYuan   1 23    Query Protocol     wu-sheng   99 1  mrproliu   39 2  wankai123   17 3  arugal   15 4  peng-yongsheng   11 5  kezhenxu94   10 6  hanahmily   9 7  x22x22   3 8  JaredTan95   3 9  BFergerson   1 10  MiracleDx   1 11  fgksgf   1 12  liuhaoyang   1 13  Fine0830   1 14  chenmudu   1 15  liqiangz   1 16  heyanlong   1 17    Go API     mrproliu   57 1  wu-sheng   17 2  kezhenxu94   6 3  arugal   3 4  fgksgf   2 5   dalekliuhan**   2 6  gxthrj   2 7  liqiangz   2 8  EvanLjp   2 9  JaredTan95   1 10  CalvinKirs   1 11   mrproliu**   1 12    BanyanDB     hanahmily   238 1  lujiajing1126   98 2  Fine0830   23 3  WuChuSheng1   21 4  ButterBright   16 5  wu-sheng   12 6  HHoflittlefish777   10 7  hailin0   9 8  zesiar0   6 9  sivasathyaseeelan   5 10  mikechengwei   5 11  Sylvie-Wxr   5 12  innerpeacez   4 13  sacloudy   4 14  caicancai   4 15  tisonkun   3 16  DevPJ9   2 17  LinuxSuRen   2 18  sksDonni   2 19  mrproliu   2 20  BFergerson   1 21  Muyu-art   1 22  CalvinKirs   1 23  qazxcdswe123   1 24  achintya-7   1 25  e1ijah1   1 26  kezhenxu94   1 27    BanyanDB Java Client     lujiajing1126   44 1  wu-sheng   22 2  hanahmily   15 3  kezhenxu94   2 4  hailin0   1 5    BanyanDB Helm     ButterBright   6 1  wu-sheng   5 2  hanahmily   3 3  wankai123   1 4  kezhenxu94   1 5    Agent Test Tool     dmsolr   13 1  kezhenxu94   6 2  mrproliu   5 3  wu-sheng   5 4  arugal   4 5  nisiyong   2 6  zhyyu   2 7  EvanLjp   1 8  yaojingguo   1 9  CalvinKirs   1 10  LeePui   1 11  marcingrzejszczak   1 12  Shikugawa   1 13  dagmom   1 14  harvies   1 15  alonelaval   1 16  jmjoy   1 17  pg-yang   1 18  OrezzerO   1 19    SkyWalking Eyes     kezhenxu94   108 1  fgksgf   19 2  wu-sheng   16 3  zooltd   7 4  emschu   6 5  tisonkun   5 6  jmjoy   5 7  keiranmraine   4 8  MoGuGuai-hzr   4 9  mrproliu   4 10  dongzl   3 11  spacewander   3 12  gdams   3 13  rovast   2 14  elijaholmos   2 15  ryanmrichard   2 16  freeqaz   2 17  heyanlong   1 18  zifeihan   1 19  mohammedtabish0   1 20  acelyc111   1 21  Xuanwo   1 22  xiaoyawei   1 23  stumins   1 24  steveklabnik   1 25  chengshiwen   1 26  crholm   1 27  fulmicoton   1 28  Two-Hearts   1 29  kevgo   1 30  halacs   1 31  FushuWang   1 32  Juneezee   1 33  ddlees   1 34  dave-tucker   1 35  antgamdia   1 36  guilload   1 37    SkyWalking Infra E2E     mrproliu   35 1  kezhenxu94   26 2  Humbertzhang   10 3  fgksgf   9 4  chunriyeqiongsaigao   8 5  ethan256   4 6  Superskyyy   3 7  dashanji   3 8  lujiajing1126   2 9  JohnNiang   2 10  CalvinKirs   1 11  FeynmanZhou   1 12  arugal   1 13  heyanlong   1 14  wu-sheng   1 15    (Archived) Docker Files     hanahmily   34 1  wu-sheng   14 2  JaredTan95   8 3  kezhenxu94   4 4   lixin40**   2 5  aviaviavi   1 6  andrewgkew   1 7  carlvine500   1 8  kkl129   1 9  tristan-tsl   1 10  arugal   1 11  heyanlong   1 12    (Archived) Rocketbot UI     TinyAllen   179 1  Fine0830   126 2  x22x22   27 3  wu-sheng   20 4  JaredTan95   15 5  kezhenxu94   13 6  heihaozi   8 7  bigflybrother   8 8  Jtrust   7 9  dmsolr   5 10  zhaoyuguang   5 11  alonelaval   4 12  tom-pytel   4 13  hanahmily   3 14  aeolusheath   3 15  arugal   3 16  hailin0   2 17  Indifer   2 18   zhaoyuguang**   2 19  xuchangjunjx   2 20  wuguangkuo   2 21  whfjam   2 22  shiluo34   2 23  ruibaby   2 24  wilsonwu   2 25  constanine   2 26  horber   2 27  liqiangz   2 28  leemove   2 29  fuhuo   1 30   denghaobo**   1 31  jianglin1008   1 32  codelipenghui   1 33  lunamagic1978   1 34  novayoung   1 35  probeyang   1 36  dominicqi   1 37  stone-wlg   1 38  surechen   1 39  wallezhang   1 40  wuwen5   1 41   bing**   1 42  xu1009   1 43  huangyoje   1 44  heyanlong   1 45  llissery   1 46   437376068**   1 47  aiyanbo   1 48  BFergerson   1 49  efekaptan   1 50  yanfch   1 51  grissom-grissom   1 52  grissomsh   1 53  Humbertzhang   1 54  kagaya85   1 55  liuhaoyang   1 56  tsuilouis   1 57  masterxxo   1 58  zeaposs   1 59  QHWG67   1 60  Doublemine   1 61  zaunist   1 62  xiaoxiangmoe   1 63  c1ay   1 64  dagmom   1 65  fredster33   1 66    (Archived) Legacy UI     hanahmily   227 1  wu-sheng   123 2  peng-yongsheng   73 3  ascrutae   36 4  TinyAllen   18 5   zhangxin**   7 6   295198088**   5 7   qiu_jy**   5 8  zhaoyuguang   4 9  zuohl   4 10  wendal   3 11  jjlu521016   2 12  withlin   2 13  bai-yang   1 14  zhangkewei   1 15  wynn5a   1 16  clevertension   1 17  cloudgc   1 18   baiyang06**   1 19  WillemJiang   1 20  liuhaoyang   1 21  leizhiyuan   1 22  ajanthan   1 23    (Archived) OAL Generator     wu-sheng   64 1  peng-yongsheng   15 2    SkyAPM-dotnet     liuhaoyang   127 1  snakorse   28 2  wu-sheng   20 3  lu-xiaoshuang   8 4  ElderJames   7 5  yang-xiaodong   7 6  pengweiqhca   7 7  Ahoo-Wang   6 8  inversionhourglass   5 9  feiyun0112   4 10  sampsonye   4 11  KawhiWei   3 12  zeaposs   3 13  kaanid   3 14  qq362220083   3 15  withlin   2 16   xiaoweiyu**   2 17  witskeeper   2 18  beckjin   2 19  ShaoHans   2 20  misaya   1 21  itsvse   1 22  zhujinhu21   1 23  xclw2000   1 24  startewho   1 25  refactor2   1 26  rider11-dev   1 27  linkinshi   1 28  limfriend   1 29  guochen2   1 30  WeihanLi   1 31  SeanKilleen   1 32  cnlangzi   1 33  joesdu   1 34  SpringHgui   1 35  dimaaan   1 36  ChaunceyLin5152   1 37  catcherwong   1 38  BoydenYubin   1 39  andyliyuze   1 40  AlseinX   1 41    cpp2sky     Shikugawa   55 1   wbphub**   13 2  wuwen5   2 3  wu-sheng   2 4  makefriend8   2 5  wbpcode   2 6  JayInnn   1 7    SourceMarker     BFergerson   761 1  MrMineO5   9 2  voqaldev   3 3  chess-equality   3 4  javamak   2 5    Java Plugin Extensions     wu-sheng   17 1  ascrutae   8 2  JaredTan95   2 3  raybi-asus   2 4  zifeihan   2 5  nisiyong   1 6  bitray   1 7  li20020439   1 8  pg-yang   1 9    uranus     harvies   5 1  wu-sheng   1 2    (outdated) CN Documentations     kezhenxu94   23 1  SataQiu   8 2  wu-sheng   4 3  nikyotensai   2 4  ccccye123   2 5  Frapschen   2 6  shalk   2 7  wujun8   2 8  zhangnew   1 9  yazong   1 10  xiaoping378   1 11  thelight1   1 12   lilulu**   1 13  Hen1ng   1 14  harvies   1 15  dagmom   1 16  alienwow   1 17  system-designer   1 18  Superskyyy   1 19  JaredTan95   1 20  fgksgf   1 21  xing-yin   1 22    (Retired) Transporter Plugins     codeglzhang   3 1  wu-sheng   3 2  dmsolr   2 3  Jargon9   2 4  kezhenxu94   1 5    (Retired) Go2Sky     arugal   27 1  wu-sheng   15 2  hanahmily   12 3  mrproliu   12 4  kagaya85   3 5  easonyipj   2 6  nacx   2 7  Luckyboys   2 8  fgksgf   1 9  Humbertzhang   1 10  JaredTan95   1 11  JJ-Jasmin   1 12  withlin   1 13  yaojingguo   1 14  Just-maple   1 15  kuaikuai   1 16  zhuCheer   1 17  chwjbn   1 18  kehuili   1 19  kezhenxu94   1 20  limfriend   1 21  matianjun1   1 22  lokichoggio   1 23   bing**   1 24  liweiv   1 25    (Retired) Go2Sky Plugins     arugal   15 1  kagaya85   7 2  mrproliu   6 3  wu-sheng   5 4  elza2   3 5  matianjun1   1 6  dgqypl   1 7  zaunist   1 8  kehuili   1 9  newyue588cc   1 10  royal-dargon   1 11    (Retired) SkyAPM PHP Agent     heyanlong   379 1   wangbo78978**   40 2  lpf32   30 3   songzhian**   17 4  songzhian   11 5  wu-sheng   9 6  jmjoy   8 7  remicollet   4 8  kilingzhang   3 9   songzhian**   3 10  xonze   3 11  iamif3000   2 12  mikkeschiren   2 13  anynone   2 14  lvxiao1   2 15  xinfeingxia85   2 16  cyhii   1 17  silverkorn   1 18  AlpherJang   1 19  LJX22222   1 20  MrYzys   1 21  rovast   1 22  SP-66666   1 23  tinyu0   1 24  xudianyang   1 25  huohuanhuan   1 26  kezhenxu94   1 27  limfriend   1 28  ljf-6666   1 29  qjgszzx   1 30  dickens7   1 31  xybingbing   1 32  yaowenqiang   1 33  az13js   1 34    (Retired) SkyAPM Node.js     ascrutae   74 1  kezhenxu94   13 2  wu-sheng   9 3  zouyx   4 4  Jozdortraz   1 5  a526672351   1 6  rovast   1 7  Runrioter   1 8  jasper-zsh   1 9  TJ666   1 10     Loading...  ","excerpt":"Contributor   Project Contributions Ranking  SkyWalking Showcase     kezhenxu94   109 1  wu-sheng …","ref":"/contributors/","title":"Contributors"},{"body":"Create and detect Service Hierarchy Relationship Motivation Service relationship is one of the most important parts of collaborating data in the APM. Service Map is supported for years from tracing to trace analysis. But still due to the means of the probs, a service could be detected from multiple methods, which is the same service in multiple layers. v9 proposal mentioned the concept of the layer. Through this proposal, we plan to establish a kernel-level concept to connect services detected in different layers.\nArchitecture Graph There is no significant architecture-level change.\nPropose Changes The data sources of SkyWalking APM have covered traditional agent installed service, VMs, cloud infra, k8s, etc.\nFor example, a Java service is built in a docker image and is going to be deployed in a k8s cluster, with a sidecar injected due to service mesh managed. The following services would be able to detect cross-layers\n Java service, detected as Java agent installed. A pod of k8s service is detected, due to k8s layer monitoring. Side car perspective service is detected. VM Linux monitoring for a general process, as the container of Java service is deployed on this specific k8s node. Virtual databases, caches, and queues conjectured by agents, and also monitored through k8s monitoring, even traffic monitored by service mesh.  All these services have logic connections or are identical from a physical perspective, but currently, they may be just similar on name(s), no further metadata connection.\nBy those, we have a chance to move one step ahead to connect the dots of the whole infrastructure. This means, for the first time, we are going to establish the connections among services detected from various layers.\nIn the v10, I am proposing a new concept Service Hierarchy. Service Hierarchy defines the relationships of existing services in various layers. With more kinds of agent tech involved(such as eBPF) and deployment tools(such as operator and agent injector), we could inject relative service/instance metadata and try to build the connections, including,\n Agent injector injects the pod ID into the system env, then Java agent could report the relationship through system properties. Rover(eBPF agent) reveals its next iteration forward k8s monitoring rather than profiling. And add the capabilities to establish connections among k8s pods and service mesh srv.  Meanwhile, as usual with the new major version change, I would expect UI side changes as well. UI should have flexible capabilities to show hierarchy services from the service view and topology view. Also, we could consider a deeper view of the instance part as well.\nImported Dependencies libs and their licenses. No new library is planned to be added to the codebase.\nCompatibility About the protocol, there should be no breaking changes, but enhancements only. New query protocols( service-hierarchy and instance-hierarchy) are considered to be added, some new fields should be added on things like topology query and instance dependencies to list relative services/instances from other layers directly rather than an extra query.\nAbout the data structure, due to the new data concept is going to be created, service hierarchy relative data models are going to be added. If the user is using Elasticsearch and BanyanDB, this should be compatible, they just need to re-run init-mode OAP to extend the existing models. But for SQL database users(MySQL, PostgreSQL), this could require new tables.\nGraphQL query protocol New query protocol hierarchy.graphqls is going to be added.\ntypeHierarchyRelatedService{# The related service ID.id:ID!# The literal name of the #id.name:String!# The related service\u0026#39;s Layer name.layer:String!normal:Boolean!}typeHierarchyRelatedInstance{# The related instance ID.id:ID!# The literal name of the #id. Instance Name.name:String!# Service idserviceId:ID!# The literal name of the #serviceId.serviceName:String!# The service\u0026#39;s Layer name.# Service could have multiple layers, this is the layer of the service that the instance belongs to.layer:String!normal:Boolean!}typeHierarchyServiceRelation{upperService:HierarchyRelatedService!lowerService:HierarchyRelatedService!}typeHierarchyInstanceRelation{upperInstance:HierarchyRelatedInstance!lowerInstance:HierarchyRelatedInstance!}typeServiceHierarchy{relations:[HierarchyServiceRelation!]!}typeInstanceHierarchy{relations:[HierarchyInstanceRelation!]!}typeLayerLevel{# The layer name.layer:String!# The layer level.# The level of the upper service should greater than the level of the lower service.level:Int!}extendtypeQuery{# Query the service hierarchy, based on the given service. Will recursively return all related layers services in the hierarchy.getServiceHierarchy(serviceId:ID!,layer:String!):ServiceHierarchy!# Query the instance hierarchy, based on the given instance. Will return all direct related layers instances in the hierarchy, no recursive.getInstanceHierarchy(instanceId:ID!,layer:String!):InstanceHierarchy!# List layer hierarchy levels. The layer levels are defined in the `hierarchy-definition.yml`.listLayerLevels:[LayerLevel!]!}New data models   service_hierarchy_relation\n   Column name Data type Description     id String serviceId.servicelayer-relatedServiceId.relatedServiceLayer   service_id String upper service id   service_layer int upper service layer value   related_service_id String lower service id   related_service_layer int lower service layer value   time_bucket long       instance_hierarchy_relation\n   Column name Data type Description     id String instanceId.servicelayer-relateInstanceId.relatedServiceLayer   instance_id String upper instance id   service_layer int upper service layer value   related_instance_id String lower instance id   related_service_layer int lower service layer value   time_bucket long       Internal APIs Internal APIs should be exposed in the Core module to support building the hierarchy relationship.\npublic void toServiceHierarchyRelation(String upperServiceName, Layer upperServiceLayer, String lowerServiceName, Layer lowerServiceLayer); public void toInstanceHierarchyRelation(String upperInstanceName, String upperServiceName, Layer upperServiceLayer, String lowerInstanceName, String lowerServiceName, Layer lowerServiceLayer); Hierarchy Definition All layers hierarchy relations are defined in the hierarchy-definition.yml file. OAP will check the hierarchy relations before building and use the matching rules to auto match the relations. Here is an example:\n# Define the hierarchy of service layers, the layers under the specific layer are related lower of the layer.# The relation could have a matching rule for auto matching, which are defined in the `auto-matching-rules` section.# All the layers are defined in the file `org.apache.skywalking.oap.server.core.analysis.Layers.java`.hierarchy:MESH:MESH_DP:nameK8S_SERVICE:short-nameMESH_DP:K8S_SERVICE:short-nameGENERAL:K8S_SERVICE:lower-short-name-remove-nsMYSQL:K8S_SERVICE:~VIRTUAL_DATABASE:MYSQL:~# Use Groovy script to define the matching rules, the input parameters are the upper service(u) and the lower service(l) and the return value is a boolean.# which are used to match the relation between the upper service(u) and the lower service(l) on the different layers.auto-matching-rules:# the name of the upper service is equal to the name of the lower servicename:\u0026#34;{ (u, l) -\u0026gt; u.name == l.name }\u0026#34;# the short name of the upper service is equal to the short name of the lower serviceshort-name:\u0026#34;{ (u, l) -\u0026gt; u.shortName == l.shortName }\u0026#34;# remove the namespace from the lower service short namelower-short-name-remove-ns:\u0026#34;{ (u, l) -\u0026gt; u.shortName == l.shortName.substring(0, l.shortName.lastIndexOf(\u0026#39;.\u0026#39;)) }\u0026#34;# The hierarchy level of the service layer, the level is used to define the order of the service layer for UI presentation,# The level of the upper service should greater than the level of the lower service in `hierarchy` section.layer-levels:MESH:3GENERAL:3VIRTUAL_DATABASE:3MYSQL:2MESH_DP:1K8S_SERVICE:0General usage docs This proposal doesn\u0026rsquo;t impact the end user in any way of using SkyWalking. The remarkable change will be in the UI. On the service dashboard and topology map, the user should be able to see the hierarchy relationship, which means other services in other layers are logically the same as the current one. UI would provide the link to jump to the relative service\u0026rsquo;s dashboard.\nNo Goal This proposal doesn\u0026rsquo;t cover all the logic about how to detect the service hierarchy structure. All those should be in a separate SWIP.\n","excerpt":"Create and detect Service Hierarchy Relationship Motivation Service relationship is one of the most …","ref":"/docs/main/next/en/swip/swip-1/","title":"Create and detect Service Hierarchy Relationship"},{"body":"Create Span   Use Tracer.createEntrySpan() API to create entry span, and then use SpanRef to contain the reference of created span in agent kernel. The first parameter is operation name of span and the second parameter is the ContextCarrierRef instance which is the reference of contextcarrier in agent kernel. If the second parameter is not null, the process of creating entry span will do the extract operation which will be introduced in inject/extract scenario.\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... SpanRef spanRef = Tracer.createEntrySpan(\u0026#34;${operationName}\u0026#34;, null);   Use Tracer.createLocalSpan() API to create local span, the only parameter is the operation name of span.\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... SpanRef spanRef = Tracer.createLocalSpan(\u0026#34;${operationName}\u0026#34;);   Use Tracer.createExitSpan() API to create exit span\n  two parameters case: the first parameter is the operation name of span, the second parameter is the remote peer which means the peer address of exit operation.\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... SpanRef spanRef = Tracer.createExitSpan(\u0026#34;${operationName}\u0026#34;, \u0026#34;${remotePeer}\u0026#34;);   three parameters case: the first parameter is the operation name of span, the second parameter is the ContextCarrierRef instance and the third parameter is the remote peer. This case will be introduced in inject/extract scenario.\n    Use Tracer.stopSpan() API to stop current span\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... Tracer.stopSpan();   Inject/Extract Context Carrier The Inject/extract is to pass context information between different process. The ContextCarrierRef contains the reference of ContextCarrier and the CarrierItemRef contains the reference of CarrierItem. The CarrierItem instances compose a linked list.\n Use Tracer.inject() to inject information of current context into carrier Use Tracer.extract() to extract info from contextCarrier. Use items() of ContextCarrierRef instance to get head CarrierItemRef instance. Use hasNext() of CarrierItemRef instance to judge if the CarrierItemRef has next item. Use next() of CarrierItemRef instance to get next item Use getHeadKey of CarrierItemRef instance to get key of current item Use getHeadValue of CarrierItemRef instance to get value of current item Use setHeadValue of CarrierItemRef instance to set value of current item  /* You can consider map as the message\u0026#39;s header/metadata, such as Http, MQ and RPC. Do the inject operation in one process and then pass the map in header/metadata. */ ContextCarrierRef contextCarrierRef = new ContextCarrierRef(); Tracer.inject(contextCarrierRef); Map\u0026lt;String, String\u0026gt; map = new HashMap\u0026lt;\u0026gt;(); CarrierItemRef next = contextCarrierRef.items(); while (next.hasNext()) { next = next.next(); map.put(next.getHeadKey(), next.getHeadValue()); } ... note: Inject can be done only in Exit Span\n// Receive the map representing a header/metadata and do the extract operation in another process. ... ContextCarrierRef contextCarrierRef = new ContextCarrierRef(); CarrierItemRef next = contextCarrierRef.items(); while ((next.hasNext())) { next = next.next(); String value = map.get(next.getHeadKey()); if (value != null){ next.setHeadValue(value); } } Tracer.extract(contextCarrierRef); Also, you can do the inject/extract operation when creating exit/entry span.\nContextCarrierRef contextCarrierRef = new ContextCarrierRef(); SpanRef spanRef = Tracer.createExitSpan(\u0026#34;operationName\u0026#34;, contextCarrierRef, \u0026#34;remotePeer\u0026#34;); Map\u0026lt;String, String\u0026gt; map = new HashMap\u0026lt;\u0026gt;(); CarrierItemRef next = contextCarrierRef.items(); while (next.hasNext()) { next = next.next(); map.put(next.getHeadKey(), next.getHeadValue()); } ... ... ContextCarrierRef contextCarrierRef = new ContextCarrierRef(); CarrierItemRef next = contextCarrierRef.items(); while ((next.hasNext())) { next = next.next(); String value = map.get(next.getHeadKey()); if (value != null){ next.setHeadValue(value); } } SpanRef spanRef = Tracer.createEntrySpan(\u0026#34;${operationName}\u0026#34;, contextCarrierRef); Capture/Continue Context Snapshot   Use Tracer.capture() to capture the segment info and store it in ContextSnapshotRef, and then use Tracer.continued() to load the snapshot as the ref segment info. The capture/continue is used for tracing context in the x-thread tracing.\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... ContextSnapshotRef contextSnapshotRef = Tracer.capture(); Thread thread = new Thread(() -\u0026gt; { SpanRef spanRef = Tracer.createLocalSpan(\u0026#34;${operationName}\u0026#34;); Tracer.continued(contextSnapshotRef); ... }); thread.start(); thread.join();   Add Span\u0026rsquo;s Tag and Log   Use log of SpanRef instance to record log in span\nimport org.apache.skywalking.apm.toolkit.trace.SpanRef; ... SpanRef spanRef = Tracer.createLocalSpan(\u0026#34;${operationName}\u0026#34;); // Throwable parameter spanRef.log(new RuntimeException(\u0026#34;${exception_message}\u0026#34;)); // Map parameter Map\u0026lt;String, String\u0026gt; logMap = new HashMap\u0026lt;\u0026gt;(); logMap.put(\u0026#34;event\u0026#34;, \u0026#34;${event_type}\u0026#34;); logMap.put(\u0026#34;message\u0026#34;, \u0026#34;${message_value}\u0026#34;); spanRef.log(logMap);   Use tag of SpanRef instance to add tag to span, the parameters of tag are two String which are key and value respectively.\nimport org.apache.skywalking.apm.toolkit.trace.SpanRef; ... SpanRef spanRef = Tracer.createLocalSpan(operationName); spanRef.tag(\u0026#34;${key}\u0026#34;, \u0026#34;${value}\u0026#34;);   Async Prepare/Finish   Use prepareForAsync of SpanRef instance to make the span still alive until asyncFinish called, and then in specific time use asyncFinish of this SpanRef instance to notify this span that it could be finished.\nimport org.apache.skywalking.apm.toolkit.trace.SpanRef; ... SpanRef spanRef = Tracer.createLocalSpan(\u0026#34;${operationName}\u0026#34;); spanRef.prepareForAsync(); // the span does not finish because of the prepareForAsync() operation Tracer.stopSpan(); Thread thread = new Thread(() -\u0026gt; { ... spanRef.asyncFinish(); }); thread.start(); thread.join();   ActiveSpan You can use the ActiveSpan to get the current span and do some operations.\n  Add custom tag in the context of traced method, ActiveSpan.tag(\u0026quot;key\u0026quot;, \u0026quot;val\u0026quot;).\n  ActiveSpan.error() Mark the current span as error status.\n  ActiveSpan.error(String errorMsg) Mark the current span as error status with a message.\n  ActiveSpan.error(Throwable throwable) Mark the current span as error status with a Throwable.\n  ActiveSpan.debug(String debugMsg) Add a debug level log message in the current span.\n  ActiveSpan.info(String infoMsg) Add an info level log message in the current span.\n  ActiveSpan.setOperationName(String operationName) Customize an operation name.\n  ActiveSpan.tag(\u0026#34;my_tag\u0026#34;, \u0026#34;my_value\u0026#34;); ActiveSpan.error(); ActiveSpan.error(\u0026#34;Test-Error-Reason\u0026#34;); ActiveSpan.error(new RuntimeException(\u0026#34;Test-Error-Throwable\u0026#34;)); ActiveSpan.info(\u0026#34;Test-Info-Msg\u0026#34;); ActiveSpan.debug(\u0026#34;Test-debug-Msg\u0026#34;); ActiveSpan.setOperationName(\u0026#34;${opetationName}\u0026#34;); Sample codes only\n","excerpt":"Create Span   Use Tracer.createEntrySpan() API to create entry span, and then use SpanRef to contain …","ref":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/application-toolkit-tracer/","title":"Create Span"},{"body":"Create Span   Use Tracer.createEntrySpan() API to create entry span, and then use SpanRef to contain the reference of created span in agent kernel. The first parameter is operation name of span and the second parameter is the ContextCarrierRef instance which is the reference of contextcarrier in agent kernel. If the second parameter is not null, the process of creating entry span will do the extract operation which will be introduced in inject/extract scenario.\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... SpanRef spanRef = Tracer.createEntrySpan(\u0026#34;${operationName}\u0026#34;, null);   Use Tracer.createLocalSpan() API to create local span, the only parameter is the operation name of span.\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... SpanRef spanRef = Tracer.createLocalSpan(\u0026#34;${operationName}\u0026#34;);   Use Tracer.createExitSpan() API to create exit span\n  two parameters case: the first parameter is the operation name of span, the second parameter is the remote peer which means the peer address of exit operation.\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... SpanRef spanRef = Tracer.createExitSpan(\u0026#34;${operationName}\u0026#34;, \u0026#34;${remotePeer}\u0026#34;);   three parameters case: the first parameter is the operation name of span, the second parameter is the ContextCarrierRef instance and the third parameter is the remote peer. This case will be introduced in inject/extract scenario.\n    Use Tracer.stopSpan() API to stop current span\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... Tracer.stopSpan();   Inject/Extract Context Carrier The Inject/extract is to pass context information between different process. The ContextCarrierRef contains the reference of ContextCarrier and the CarrierItemRef contains the reference of CarrierItem. The CarrierItem instances compose a linked list.\n Use Tracer.inject() to inject information of current context into carrier Use Tracer.extract() to extract info from contextCarrier. Use items() of ContextCarrierRef instance to get head CarrierItemRef instance. Use hasNext() of CarrierItemRef instance to judge if the CarrierItemRef has next item. Use next() of CarrierItemRef instance to get next item Use getHeadKey of CarrierItemRef instance to get key of current item Use getHeadValue of CarrierItemRef instance to get value of current item Use setHeadValue of CarrierItemRef instance to set value of current item  /* You can consider map as the message\u0026#39;s header/metadata, such as Http, MQ and RPC. Do the inject operation in one process and then pass the map in header/metadata. */ ContextCarrierRef contextCarrierRef = new ContextCarrierRef(); Tracer.inject(contextCarrierRef); Map\u0026lt;String, String\u0026gt; map = new HashMap\u0026lt;\u0026gt;(); CarrierItemRef next = contextCarrierRef.items(); while (next.hasNext()) { next = next.next(); map.put(next.getHeadKey(), next.getHeadValue()); } ... note: Inject can be done only in Exit Span\n// Receive the map representing a header/metadata and do the extract operation in another process. ... ContextCarrierRef contextCarrierRef = new ContextCarrierRef(); CarrierItemRef next = contextCarrierRef.items(); while ((next.hasNext())) { next = next.next(); String value = map.get(next.getHeadKey()); if (value != null){ next.setHeadValue(value); } } Tracer.extract(contextCarrierRef); Also, you can do the inject/extract operation when creating exit/entry span.\nContextCarrierRef contextCarrierRef = new ContextCarrierRef(); SpanRef spanRef = Tracer.createExitSpan(\u0026#34;operationName\u0026#34;, contextCarrierRef, \u0026#34;remotePeer\u0026#34;); Map\u0026lt;String, String\u0026gt; map = new HashMap\u0026lt;\u0026gt;(); CarrierItemRef next = contextCarrierRef.items(); while (next.hasNext()) { next = next.next(); map.put(next.getHeadKey(), next.getHeadValue()); } ... ... ContextCarrierRef contextCarrierRef = new ContextCarrierRef(); CarrierItemRef next = contextCarrierRef.items(); while ((next.hasNext())) { next = next.next(); String value = map.get(next.getHeadKey()); if (value != null){ next.setHeadValue(value); } } SpanRef spanRef = Tracer.createEntrySpan(\u0026#34;${operationName}\u0026#34;, contextCarrierRef); Capture/Continue Context Snapshot   Use Tracer.capture() to capture the segment info and store it in ContextSnapshotRef, and then use Tracer.continued() to load the snapshot as the ref segment info. The capture/continue is used for tracing context in the x-thread tracing.\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... ContextSnapshotRef contextSnapshotRef = Tracer.capture(); Thread thread = new Thread(() -\u0026gt; { SpanRef spanRef = Tracer.createLocalSpan(\u0026#34;${operationName}\u0026#34;); Tracer.continued(contextSnapshotRef); ... }); thread.start(); thread.join();   Add Span\u0026rsquo;s Tag and Log   Use log of SpanRef instance to record log in span\nimport org.apache.skywalking.apm.toolkit.trace.SpanRef; ... SpanRef spanRef = Tracer.createLocalSpan(\u0026#34;${operationName}\u0026#34;); // Throwable parameter spanRef.log(new RuntimeException(\u0026#34;${exception_message}\u0026#34;)); // Map parameter Map\u0026lt;String, String\u0026gt; logMap = new HashMap\u0026lt;\u0026gt;(); logMap.put(\u0026#34;event\u0026#34;, \u0026#34;${event_type}\u0026#34;); logMap.put(\u0026#34;message\u0026#34;, \u0026#34;${message_value}\u0026#34;); spanRef.log(logMap);   Use tag of SpanRef instance to add tag to span, the parameters of tag are two String which are key and value respectively.\nimport org.apache.skywalking.apm.toolkit.trace.SpanRef; ... SpanRef spanRef = Tracer.createLocalSpan(operationName); spanRef.tag(\u0026#34;${key}\u0026#34;, \u0026#34;${value}\u0026#34;);   Async Prepare/Finish   Use prepareForAsync of SpanRef instance to make the span still alive until asyncFinish called, and then in specific time use asyncFinish of this SpanRef instance to notify this span that it could be finished.\nimport org.apache.skywalking.apm.toolkit.trace.SpanRef; ... SpanRef spanRef = Tracer.createLocalSpan(\u0026#34;${operationName}\u0026#34;); spanRef.prepareForAsync(); // the span does not finish because of the prepareForAsync() operation Tracer.stopSpan(); Thread thread = new Thread(() -\u0026gt; { ... spanRef.asyncFinish(); }); thread.start(); thread.join();   ActiveSpan You can use the ActiveSpan to get the current span and do some operations.\n  Add custom tag in the context of traced method, ActiveSpan.tag(\u0026quot;key\u0026quot;, \u0026quot;val\u0026quot;).\n  ActiveSpan.error() Mark the current span as error status.\n  ActiveSpan.error(String errorMsg) Mark the current span as error status with a message.\n  ActiveSpan.error(Throwable throwable) Mark the current span as error status with a Throwable.\n  ActiveSpan.debug(String debugMsg) Add a debug level log message in the current span.\n  ActiveSpan.info(String infoMsg) Add an info level log message in the current span.\n  ActiveSpan.setOperationName(String operationName) Customize an operation name.\n  ActiveSpan.tag(\u0026#34;my_tag\u0026#34;, \u0026#34;my_value\u0026#34;); ActiveSpan.error(); ActiveSpan.error(\u0026#34;Test-Error-Reason\u0026#34;); ActiveSpan.error(new RuntimeException(\u0026#34;Test-Error-Throwable\u0026#34;)); ActiveSpan.info(\u0026#34;Test-Info-Msg\u0026#34;); ActiveSpan.debug(\u0026#34;Test-debug-Msg\u0026#34;); ActiveSpan.setOperationName(\u0026#34;${opetationName}\u0026#34;); Sample codes only\n","excerpt":"Create Span   Use Tracer.createEntrySpan() API to create entry span, and then use SpanRef to contain …","ref":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-tracer/","title":"Create Span"},{"body":"Create Span   Use Tracer.createEntrySpan() API to create entry span, and then use SpanRef to contain the reference of created span in agent kernel. The first parameter is operation name of span and the second parameter is the ContextCarrierRef instance which is the reference of contextcarrier in agent kernel. If the second parameter is not null, the process of creating entry span will do the extract operation which will be introduced in inject/extract scenario.\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... SpanRef spanRef = Tracer.createEnteySpan(\u0026#34;${operationName}\u0026#34;, null);   Use Tracer.createLocalSpan() API to create local span, the only parameter is the operation name of span.\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... SpanRef spanRef = Tracer.createLocalSpan(\u0026#34;${operationName}\u0026#34;);   Use Tracer.createExitSpan() API to create exit span\n  two parameters case: the first parameter is the operation name of span, the second parameter is the remote peer which means the peer address of exit operation.\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... SpanRef spanRef = Tracer.createExitSpan(\u0026#34;${operationName}\u0026#34;, \u0026#34;${remotePeer}\u0026#34;);   three parameters case: the first parameter is the operation name of span, the second parameter is the ContextCarrierRef instance and the third parameter is the remote peer. This case will be introduced in inject/extract scenario.\n    Use Tracer.stopSpan() API to stop current span\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... Tracer.stopSpan();   Inject/Extract Context Carrier The Inject/extract is to pass context information between different process. The ContextCarrierRef contains the reference of ContextCarrier and the CarrierItemRef contains the reference of CarrierItem. The CarrierItem instances compose a linked list.\n Use Tracer.inject() to inject information of current context into carrier Use Tracer.extract() to extract info from contextCarrier. Use items() of ContextCarrierRef instance to get head CarrierItemRef instance. Use hasNext() of CarrierItemRef instance to judge if the CarrierItemRef has next item. Use next() of CarrierItemRef instance to get next item Use getHeadKey of CarrierItemRef instance to get key of current item Use getHeadValue of CarrierItemRef instance to get value of current item Use setHeadValue of CarrierItemRef instance to set value of current item  /* You can consider map as the message\u0026#39;s header/metadata, such as Http, MQ and RPC. Do the inject operation in one process and then pass the map in header/metadata. */ ContextCarrierRef contextCarrierRef = new ContextCarrierRef(); Tracer.inject(contextCarrierRef); Map\u0026lt;String, String\u0026gt; map = new HashMap\u0026lt;\u0026gt;(); CarrierItemRef next = contextCarrierRef.items(); while (next.hasNext()) { next = next.next(); map.put(next.getHeadKey(), next.getHeadValue()); } ... // Receive the map representing a header/metadata and do the extract operation in another process. ... ContextCarrierRef contextCarrierRef = new ContextCarrierRef(); CarrierItemRef next = contextCarrierRef.items(); for (Map.Entry\u0026lt;String, String\u0026gt; entry : map.entrySet()) { if (next.hasNext()) { next = next.next(); if (entry.getKey().equals(next.getHeadKey())) next.setHeadValue(entry.getValue()); } } Tracer.extract(contextCarrierRef); Also, you can do the inject/extract operation when creating exit/entry span.\nContextCarrierRef contextCarrierRef = new ContextCarrierRef(); SpanRef spanRef = Tracer.createExitSpan(\u0026#34;operationName\u0026#34;, contextCarrierRef, \u0026#34;remotePeer\u0026#34;); Map\u0026lt;String, String\u0026gt; map = new HashMap\u0026lt;\u0026gt;(); CarrierItemRef next = contextCarrierRef.items(); while (next.hasNext()) { next = next.next(); map.put(next.getHeadKey(), next.getHeadValue()); } ... ... ContextCarrierRef contextCarrierRef = new ContextCarrierRef(); CarrierItemRef next = contextCarrierRef.items(); for (Map.Entry\u0026lt;String, String\u0026gt; entry : map.entrySet()) { if (next.hasNext()) { next = next.next(); if (entry.getKey().equals(next.getHeadKey())) next.setHeadValue(entry.getValue()); } } SpanRef spanRef = Tracer.createEntrySpan(\u0026#34;${operationName}\u0026#34;, contextCarrierRef); Capture/Continue Context Snapshot   Use Tracer.capture() to capture the segment info and store it in ContextSnapshotRef, and then use Tracer.continued() to load the snapshot as the ref segment info. The capture/continue is used for tracing context in the x-thread tracing.\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... ContextSnapshotRef contextSnapshotRef = Tracer.capture(); Thread thread = new Thread(() -\u0026gt; { SpanRef spanRef = Tracer.createLocalSpan(\u0026#34;${operationName}\u0026#34;); Tracer.continued(contextSnapshotRef); ... }); thread.start(); thread.join();   Add Span\u0026rsquo;s Tag and Log   Use log of SpanRef instance to record log in span\nimport org.apache.skywalking.apm.toolkit.trace.SpanRef; ... SpanRef spanRef = Tracer.createLocalSpan(\u0026#34;${operationName}\u0026#34;); // Throwable parameter spanRef.log(new RuntimeException(\u0026#34;${exception_message}\u0026#34;)); // Map parameter Map\u0026lt;String, String\u0026gt; logMap = new HashMap\u0026lt;\u0026gt;(); logMap.put(\u0026#34;event\u0026#34;, \u0026#34;${event_type}\u0026#34;); logMap.put(\u0026#34;message\u0026#34;, \u0026#34;${message_value}\u0026#34;); spanRef.log(logMap);   Use tag of SpanRef instance to add tag to span, the parameters of tag are two String which are key and value respectively.\nimport org.apache.skywalking.apm.toolkit.trace.SpanRef; ... SpanRef spanRef = Tracer.createLocalSpan(operationName); spanRef.tag(\u0026#34;${key}\u0026#34;, \u0026#34;${value}\u0026#34;);   Async Prepare/Finish   Use prepareForAsync of SpanRef instance to make the span still alive until asyncFinish called, and then in specific time use asyncFinish of this SpanRef instance to notify this span that it could be finished.\nimport org.apache.skywalking.apm.toolkit.trace.SpanRef; ... SpanRef spanRef = Tracer.createLocalSpan(\u0026#34;${operationName}\u0026#34;); spanRef.prepareForAsync(); // the span does not finish because of the prepareForAsync() operation Tracer.stopSpan(); Thread thread = new Thread(() -\u0026gt; { ... spanRef.asyncFinish(); }); thread.start(); thread.join();   ActiveSpan You can use the ActiveSpan to get the current span and do some operations.\n  Add custom tag in the context of traced method, ActiveSpan.tag(\u0026quot;key\u0026quot;, \u0026quot;val\u0026quot;).\n  ActiveSpan.error() Mark the current span as error status.\n  ActiveSpan.error(String errorMsg) Mark the current span as error status with a message.\n  ActiveSpan.error(Throwable throwable) Mark the current span as error status with a Throwable.\n  ActiveSpan.debug(String debugMsg) Add a debug level log message in the current span.\n  ActiveSpan.info(String infoMsg) Add an info level log message in the current span.\n  ActiveSpan.setOperationName(String operationName) Customize an operation name.\n  ActiveSpan.tag(\u0026#34;my_tag\u0026#34;, \u0026#34;my_value\u0026#34;); ActiveSpan.error(); ActiveSpan.error(\u0026#34;Test-Error-Reason\u0026#34;); ActiveSpan.error(new RuntimeException(\u0026#34;Test-Error-Throwable\u0026#34;)); ActiveSpan.info(\u0026#34;Test-Info-Msg\u0026#34;); ActiveSpan.debug(\u0026#34;Test-debug-Msg\u0026#34;); ActiveSpan.setOperationName(\u0026#34;${opetationName}\u0026#34;); Sample codes only\n","excerpt":"Create Span   Use Tracer.createEntrySpan() API to create entry span, and then use SpanRef to contain …","ref":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/application-toolkit-tracer/","title":"Create Span"},{"body":"Create Span   Use Tracer.createEntrySpan() API to create entry span, and then use SpanRef to contain the reference of created span in agent kernel. The first parameter is operation name of span and the second parameter is the ContextCarrierRef instance which is the reference of contextcarrier in agent kernel. If the second parameter is not null, the process of creating entry span will do the extract operation which will be introduced in inject/extract scenario.\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... SpanRef spanRef = Tracer.createEntrySpan(\u0026#34;${operationName}\u0026#34;, null);   Use Tracer.createLocalSpan() API to create local span, the only parameter is the operation name of span.\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... SpanRef spanRef = Tracer.createLocalSpan(\u0026#34;${operationName}\u0026#34;);   Use Tracer.createExitSpan() API to create exit span\n  two parameters case: the first parameter is the operation name of span, the second parameter is the remote peer which means the peer address of exit operation.\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... SpanRef spanRef = Tracer.createExitSpan(\u0026#34;${operationName}\u0026#34;, \u0026#34;${remotePeer}\u0026#34;);   three parameters case: the first parameter is the operation name of span, the second parameter is the ContextCarrierRef instance and the third parameter is the remote peer. This case will be introduced in inject/extract scenario.\n    Use Tracer.stopSpan() API to stop current span\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... Tracer.stopSpan();   Inject/Extract Context Carrier The Inject/extract is to pass context information between different process. The ContextCarrierRef contains the reference of ContextCarrier and the CarrierItemRef contains the reference of CarrierItem. The CarrierItem instances compose a linked list.\n Use Tracer.inject() to inject information of current context into carrier Use Tracer.extract() to extract info from contextCarrier. Use items() of ContextCarrierRef instance to get head CarrierItemRef instance. Use hasNext() of CarrierItemRef instance to judge if the CarrierItemRef has next item. Use next() of CarrierItemRef instance to get next item Use getHeadKey of CarrierItemRef instance to get key of current item Use getHeadValue of CarrierItemRef instance to get value of current item Use setHeadValue of CarrierItemRef instance to set value of current item  /* You can consider map as the message\u0026#39;s header/metadata, such as Http, MQ and RPC. Do the inject operation in one process and then pass the map in header/metadata. */ ContextCarrierRef contextCarrierRef = new ContextCarrierRef(); Tracer.inject(contextCarrierRef); Map\u0026lt;String, String\u0026gt; map = new HashMap\u0026lt;\u0026gt;(); CarrierItemRef next = contextCarrierRef.items(); while (next.hasNext()) { next = next.next(); map.put(next.getHeadKey(), next.getHeadValue()); } ... note: Inject can be done only in Exit Span\n// Receive the map representing a header/metadata and do the extract operation in another process. ... ContextCarrierRef contextCarrierRef = new ContextCarrierRef(); CarrierItemRef next = contextCarrierRef.items(); while ((next.hasNext())) { next = next.next(); String value = map.get(next.getHeadKey()); if (value != null){ next.setHeadValue(value); } } Tracer.extract(contextCarrierRef); Also, you can do the inject/extract operation when creating exit/entry span.\nContextCarrierRef contextCarrierRef = new ContextCarrierRef(); SpanRef spanRef = Tracer.createExitSpan(\u0026#34;operationName\u0026#34;, contextCarrierRef, \u0026#34;remotePeer\u0026#34;); Map\u0026lt;String, String\u0026gt; map = new HashMap\u0026lt;\u0026gt;(); CarrierItemRef next = contextCarrierRef.items(); while (next.hasNext()) { next = next.next(); map.put(next.getHeadKey(), next.getHeadValue()); } ... ... ContextCarrierRef contextCarrierRef = new ContextCarrierRef(); CarrierItemRef next = contextCarrierRef.items(); while ((next.hasNext())) { next = next.next(); String value = map.get(next.getHeadKey()); if (value != null){ next.setHeadValue(value); } } SpanRef spanRef = Tracer.createEntrySpan(\u0026#34;${operationName}\u0026#34;, contextCarrierRef); Capture/Continue Context Snapshot   Use Tracer.capture() to capture the segment info and store it in ContextSnapshotRef, and then use Tracer.continued() to load the snapshot as the ref segment info. The capture/continue is used for tracing context in the x-thread tracing.\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... ContextSnapshotRef contextSnapshotRef = Tracer.capture(); Thread thread = new Thread(() -\u0026gt; { SpanRef spanRef = Tracer.createLocalSpan(\u0026#34;${operationName}\u0026#34;); Tracer.continued(contextSnapshotRef); ... }); thread.start(); thread.join();   Add Span\u0026rsquo;s Tag and Log   Use log of SpanRef instance to record log in span\nimport org.apache.skywalking.apm.toolkit.trace.SpanRef; ... SpanRef spanRef = Tracer.createLocalSpan(\u0026#34;${operationName}\u0026#34;); // Throwable parameter spanRef.log(new RuntimeException(\u0026#34;${exception_message}\u0026#34;)); // Map parameter Map\u0026lt;String, String\u0026gt; logMap = new HashMap\u0026lt;\u0026gt;(); logMap.put(\u0026#34;event\u0026#34;, \u0026#34;${event_type}\u0026#34;); logMap.put(\u0026#34;message\u0026#34;, \u0026#34;${message_value}\u0026#34;); spanRef.log(logMap);   Use tag of SpanRef instance to add tag to span, the parameters of tag are two String which are key and value respectively.\nimport org.apache.skywalking.apm.toolkit.trace.SpanRef; ... SpanRef spanRef = Tracer.createLocalSpan(operationName); spanRef.tag(\u0026#34;${key}\u0026#34;, \u0026#34;${value}\u0026#34;);   Async Prepare/Finish   Use prepareForAsync of SpanRef instance to make the span still alive until asyncFinish called, and then in specific time use asyncFinish of this SpanRef instance to notify this span that it could be finished.\nimport org.apache.skywalking.apm.toolkit.trace.SpanRef; ... SpanRef spanRef = Tracer.createLocalSpan(\u0026#34;${operationName}\u0026#34;); spanRef.prepareForAsync(); // the span does not finish because of the prepareForAsync() operation Tracer.stopSpan(); Thread thread = new Thread(() -\u0026gt; { ... spanRef.asyncFinish(); }); thread.start(); thread.join();   ActiveSpan You can use the ActiveSpan to get the current span and do some operations.\n  Add custom tag in the context of traced method, ActiveSpan.tag(\u0026quot;key\u0026quot;, \u0026quot;val\u0026quot;).\n  ActiveSpan.error() Mark the current span as error status.\n  ActiveSpan.error(String errorMsg) Mark the current span as error status with a message.\n  ActiveSpan.error(Throwable throwable) Mark the current span as error status with a Throwable.\n  ActiveSpan.debug(String debugMsg) Add a debug level log message in the current span.\n  ActiveSpan.info(String infoMsg) Add an info level log message in the current span.\n  ActiveSpan.setOperationName(String operationName) Customize an operation name.\n  ActiveSpan.tag(\u0026#34;my_tag\u0026#34;, \u0026#34;my_value\u0026#34;); ActiveSpan.error(); ActiveSpan.error(\u0026#34;Test-Error-Reason\u0026#34;); ActiveSpan.error(new RuntimeException(\u0026#34;Test-Error-Throwable\u0026#34;)); ActiveSpan.info(\u0026#34;Test-Info-Msg\u0026#34;); ActiveSpan.debug(\u0026#34;Test-debug-Msg\u0026#34;); ActiveSpan.setOperationName(\u0026#34;${opetationName}\u0026#34;); Sample codes only\n","excerpt":"Create Span   Use Tracer.createEntrySpan() API to create entry span, and then use SpanRef to contain …","ref":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/application-toolkit-tracer/","title":"Create Span"},{"body":"Create Span   Use Tracer.createEntrySpan() API to create entry span, and then use SpanRef to contain the reference of created span in agent kernel. The first parameter is operation name of span and the second parameter is the ContextCarrierRef instance which is the reference of contextcarrier in agent kernel. If the second parameter is not null, the process of creating entry span will do the extract operation which will be introduced in inject/extract scenario.\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... SpanRef spanRef = Tracer.createEntrySpan(\u0026#34;${operationName}\u0026#34;, null);   Use Tracer.createLocalSpan() API to create local span, the only parameter is the operation name of span.\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... SpanRef spanRef = Tracer.createLocalSpan(\u0026#34;${operationName}\u0026#34;);   Use Tracer.createExitSpan() API to create exit span\n  two parameters case: the first parameter is the operation name of span, the second parameter is the remote peer which means the peer address of exit operation.\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... SpanRef spanRef = Tracer.createExitSpan(\u0026#34;${operationName}\u0026#34;, \u0026#34;${remotePeer}\u0026#34;);   three parameters case: the first parameter is the operation name of span, the second parameter is the ContextCarrierRef instance and the third parameter is the remote peer. This case will be introduced in inject/extract scenario.\n    Use Tracer.stopSpan() API to stop current span\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... Tracer.stopSpan();   Inject/Extract Context Carrier The Inject/extract is to pass context information between different process. The ContextCarrierRef contains the reference of ContextCarrier and the CarrierItemRef contains the reference of CarrierItem. The CarrierItem instances compose a linked list.\n Use Tracer.inject() to inject information of current context into carrier Use Tracer.extract() to extract info from contextCarrier. Use items() of ContextCarrierRef instance to get head CarrierItemRef instance. Use hasNext() of CarrierItemRef instance to judge if the CarrierItemRef has next item. Use next() of CarrierItemRef instance to get next item Use getHeadKey of CarrierItemRef instance to get key of current item Use getHeadValue of CarrierItemRef instance to get value of current item Use setHeadValue of CarrierItemRef instance to set value of current item  /* You can consider map as the message\u0026#39;s header/metadata, such as Http, MQ and RPC. Do the inject operation in one process and then pass the map in header/metadata. */ ContextCarrierRef contextCarrierRef = new ContextCarrierRef(); Tracer.inject(contextCarrierRef); Map\u0026lt;String, String\u0026gt; map = new HashMap\u0026lt;\u0026gt;(); CarrierItemRef next = contextCarrierRef.items(); while (next.hasNext()) { next = next.next(); map.put(next.getHeadKey(), next.getHeadValue()); } ... note: Inject can be done only in Exit Span\n// Receive the map representing a header/metadata and do the extract operation in another process. ... ContextCarrierRef contextCarrierRef = new ContextCarrierRef(); CarrierItemRef next = contextCarrierRef.items(); while ((next.hasNext())) { next = next.next(); String value = map.get(next.getHeadKey()); if (value != null){ next.setHeadValue(value); } } Tracer.extract(contextCarrierRef); Also, you can do the inject/extract operation when creating exit/entry span.\nContextCarrierRef contextCarrierRef = new ContextCarrierRef(); SpanRef spanRef = Tracer.createExitSpan(\u0026#34;operationName\u0026#34;, contextCarrierRef, \u0026#34;remotePeer\u0026#34;); Map\u0026lt;String, String\u0026gt; map = new HashMap\u0026lt;\u0026gt;(); CarrierItemRef next = contextCarrierRef.items(); while (next.hasNext()) { next = next.next(); map.put(next.getHeadKey(), next.getHeadValue()); } ... ... ContextCarrierRef contextCarrierRef = new ContextCarrierRef(); CarrierItemRef next = contextCarrierRef.items(); while ((next.hasNext())) { next = next.next(); String value = map.get(next.getHeadKey()); if (value != null){ next.setHeadValue(value); } } SpanRef spanRef = Tracer.createEntrySpan(\u0026#34;${operationName}\u0026#34;, contextCarrierRef); Capture/Continue Context Snapshot   Use Tracer.capture() to capture the segment info and store it in ContextSnapshotRef, and then use Tracer.continued() to load the snapshot as the ref segment info. The capture/continue is used for tracing context in the x-thread tracing.\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... ContextSnapshotRef contextSnapshotRef = Tracer.capture(); Thread thread = new Thread(() -\u0026gt; { SpanRef spanRef = Tracer.createLocalSpan(\u0026#34;${operationName}\u0026#34;); Tracer.continued(contextSnapshotRef); ... }); thread.start(); thread.join();   Add Span\u0026rsquo;s Tag and Log   Use log of SpanRef instance to record log in span\nimport org.apache.skywalking.apm.toolkit.trace.SpanRef; ... SpanRef spanRef = Tracer.createLocalSpan(\u0026#34;${operationName}\u0026#34;); // Throwable parameter spanRef.log(new RuntimeException(\u0026#34;${exception_message}\u0026#34;)); // Map parameter Map\u0026lt;String, String\u0026gt; logMap = new HashMap\u0026lt;\u0026gt;(); logMap.put(\u0026#34;event\u0026#34;, \u0026#34;${event_type}\u0026#34;); logMap.put(\u0026#34;message\u0026#34;, \u0026#34;${message_value}\u0026#34;); spanRef.log(logMap);   Use tag of SpanRef instance to add tag to span, the parameters of tag are two String which are key and value respectively.\nimport org.apache.skywalking.apm.toolkit.trace.SpanRef; ... SpanRef spanRef = Tracer.createLocalSpan(operationName); spanRef.tag(\u0026#34;${key}\u0026#34;, \u0026#34;${value}\u0026#34;);   Async Prepare/Finish   Use prepareForAsync of SpanRef instance to make the span still alive until asyncFinish called, and then in specific time use asyncFinish of this SpanRef instance to notify this span that it could be finished.\nimport org.apache.skywalking.apm.toolkit.trace.SpanRef; ... SpanRef spanRef = Tracer.createLocalSpan(\u0026#34;${operationName}\u0026#34;); spanRef.prepareForAsync(); // the span does not finish because of the prepareForAsync() operation Tracer.stopSpan(); Thread thread = new Thread(() -\u0026gt; { ... spanRef.asyncFinish(); }); thread.start(); thread.join();   ActiveSpan You can use the ActiveSpan to get the current span and do some operations.\n  Add custom tag in the context of traced method, ActiveSpan.tag(\u0026quot;key\u0026quot;, \u0026quot;val\u0026quot;).\n  ActiveSpan.error() Mark the current span as error status.\n  ActiveSpan.error(String errorMsg) Mark the current span as error status with a message.\n  ActiveSpan.error(Throwable throwable) Mark the current span as error status with a Throwable.\n  ActiveSpan.debug(String debugMsg) Add a debug level log message in the current span.\n  ActiveSpan.info(String infoMsg) Add an info level log message in the current span.\n  ActiveSpan.setOperationName(String operationName) Customize an operation name.\n  ActiveSpan.tag(\u0026#34;my_tag\u0026#34;, \u0026#34;my_value\u0026#34;); ActiveSpan.error(); ActiveSpan.error(\u0026#34;Test-Error-Reason\u0026#34;); ActiveSpan.error(new RuntimeException(\u0026#34;Test-Error-Throwable\u0026#34;)); ActiveSpan.info(\u0026#34;Test-Info-Msg\u0026#34;); ActiveSpan.debug(\u0026#34;Test-debug-Msg\u0026#34;); ActiveSpan.setOperationName(\u0026#34;${opetationName}\u0026#34;); Sample codes only\n","excerpt":"Create Span   Use Tracer.createEntrySpan() API to create entry span, and then use SpanRef to contain …","ref":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/application-toolkit-tracer/","title":"Create Span"},{"body":"CRUD Groups CRUD operations create, read, update and delete groups.\nThe group represents a collection of a class of resources. Each resource has a name unique to a group.\nbydbctl is the command line tool in examples.\nCreate operation Create operation adds a new group to the database\u0026rsquo;s metadata registry repository. If the group does not currently exist, create operation will create the schema.\nExamples of creating $ bydbctl group create -f - \u0026lt;\u0026lt;EOF metadata: name: sw_metric catalog: CATALOG_MEASURE resource_opts: shard_num: 2 block_interval: unit: UNIT_HOUR num: 2 segment_interval: unit: UNIT_DAY num: 1 ttl: unit: UNIT_DAY num: 7 EOF The group creates two shards to store group data points. Every day, it would create a segment that will generate a block every 2 hours.\nThe data in this group will keep 7 days.\nGet operation Get operation gets a group\u0026rsquo;s schema.\nExamples of getting $ bydbctl group get -g sw_metric Update operation Update operation updates a group\u0026rsquo;s schema.\nExamples of updating If we want to change the ttl of the data in this group to be 1 day, use the command:\n$ bydbctl group update -f - \u0026lt;\u0026lt;EOF metadata: name: sw_metric catalog: CATALOG_MEASURE resource_opts: shard_num: 2 block_interval: unit: UNIT_HOUR num: 2 segment_interval: unit: UNIT_DAY num: 1 ttl: unit: UNIT_DAY num: 1 EOF Delete operation Delete operation deletes a group\u0026rsquo;s schema.\nExamples of deleting $ bydbctl group delete -g sw_metric List operation The list operation shows all groups' schema.\nExamples $ bydbctl group list API Reference GroupService v1\n","excerpt":"CRUD Groups CRUD operations create, read, update and delete groups.\nThe group represents a …","ref":"/docs/skywalking-banyandb/latest/crud/group/","title":"CRUD Groups"},{"body":"CRUD Groups CRUD operations create, read, update and delete groups.\nThe group represents a collection of a class of resources. Each resource has a name unique to a group.\nbydbctl is the command line tool in examples.\nCreate operation Create operation adds a new group to the database\u0026rsquo;s metadata registry repository. If the group does not currently exist, create operation will create the schema.\nExamples of creating $ bydbctl group create -f - \u0026lt;\u0026lt;EOF metadata: name: sw_metric catalog: CATALOG_MEASURE resource_opts: shard_num: 2 segment_interval: unit: UNIT_DAY num: 1 ttl: unit: UNIT_DAY num: 7 EOF The group creates two shards to store group data points. Every day, it would create a segment that will generate a block every 2 hours.\nThe data in this group will keep 7 days.\nGet operation Get operation gets a group\u0026rsquo;s schema.\nExamples of getting $ bydbctl group get -g sw_metric Update operation Update operation updates a group\u0026rsquo;s schema.\nExamples of updating If we want to change the ttl of the data in this group to be 1 day, use the command:\n$ bydbctl group update -f - \u0026lt;\u0026lt;EOF metadata: name: sw_metric catalog: CATALOG_MEASURE resource_opts: shard_num: 2 segment_interval: unit: UNIT_DAY num: 1 ttl: unit: UNIT_DAY num: 1 EOF Delete operation Delete operation deletes a group\u0026rsquo;s schema.\nExamples of deleting $ bydbctl group delete -g sw_metric List operation The list operation shows all groups' schema.\nExamples $ bydbctl group list API Reference GroupService v1\n","excerpt":"CRUD Groups CRUD operations create, read, update and delete groups.\nThe group represents a …","ref":"/docs/skywalking-banyandb/next/crud/group/","title":"CRUD Groups"},{"body":"CRUD Groups CRUD operations create, read, update and delete groups.\nThe group represents a collection of a class of resources. Each resource has a name unique to a group.\nbydbctl is the command line tool in examples.\nCreate operation Create operation adds a new group to the database\u0026rsquo;s metadata registry repository. If the group does not currently exist, create operation will create the schema.\nExamples of creating $ bydbctl group create -f - \u0026lt;\u0026lt;EOF metadata: name: sw_metric catalog: CATALOG_MEASURE resource_opts: shard_num: 2 block_interval: unit: UNIT_HOUR num: 2 segment_interval: unit: UNIT_DAY num: 1 ttl: unit: UNIT_DAY num: 7 EOF The group creates two shards to store group data points. Every day, it would create a segment that will generate a block every 2 hours.\nThe data in this group will keep 7 days.\nGet operation Get operation gets a group\u0026rsquo;s schema.\nExamples of getting $ bydbctl group get -g sw_metric Update operation Update operation updates a group\u0026rsquo;s schema.\nExamples of updating If we want to change the ttl of the data in this group to be 1 day, use the command:\n$ bydbctl group update -f - \u0026lt;\u0026lt;EOF metadata: name: sw_metric catalog: CATALOG_MEASURE resource_opts: shard_num: 2 block_interval: unit: UNIT_HOUR num: 2 segment_interval: unit: UNIT_DAY num: 1 ttl: unit: UNIT_DAY num: 1 EOF Delete operation Delete operation deletes a group\u0026rsquo;s schema.\nExamples of deleting $ bydbctl group delete -g sw_metric List operation The list operation shows all groups' schema.\nExamples $ bydbctl group list API Reference GroupService v1\n","excerpt":"CRUD Groups CRUD operations create, read, update and delete groups.\nThe group represents a …","ref":"/docs/skywalking-banyandb/v0.5.0/crud/group/","title":"CRUD Groups"},{"body":"CRUD indexRuleBindings CRUD operations create, read, update and delete index rule bindings.\nAn index rule binding is a bridge to connect several index rules to a subject. This binding is valid between begin_at_nanoseconds and expire_at_nanoseconds, that provides flexible strategies to control how to generate time series indices.\nbydbctl is the command line tool in examples.\nCreate operation Create operation adds a new index rule binding to the database\u0026rsquo;s metadata registry repository. If the index rule binding does not currently exist, create operation will create the schema.\nExamples An index rule binding belongs to a unique group. We should create such a group with a catalog CATALOG_STREAM before creating a index rule binding. The subject(stream/measure) and index rule MUST live in the same group with the binding.\n$ bydbctl group create -f - \u0026lt;\u0026lt;EOF metadata: name: default catalog: CATALOG_STREAM resource_opts: shard_num: 2 block_interval: unit: UNIT_HOUR num: 2 segment_interval: unit: UNIT_DAY num: 1 ttl: unit: UNIT_DAY num: 7 EOF The group creates two shards to store indexRuleBinding data points. Every one day, it would create a segment which will generate a block every 2 hours.\nThe data in this group will keep 7 days.\nThen, below command will create a new indexRuleBinding:\n$ bydbctl indexRuleBinding create -f - \u0026lt;\u0026lt;EOF metadata: name: stream_binding group: sw_stream rules: - trace_id - duration - endpoint_id - status_code - http.method - db.instance - db.type - mq.broker - mq.queue - mq.topic - extended_tags subject: catalog: CATALOG_STREAM name: sw begin_at: \u0026#39;2021-04-15T01:30:15.01Z\u0026#39; expire_at: \u0026#39;2121-04-15T01:30:15.01Z\u0026#39; EOF The YAML contains:\n rules: references to the name of index rules. subject: stream or measure\u0026rsquo;s name and catalog. begin_at and expire_at: the TTL of this binding.  Get operation Get(Read) operation gets an index rule binding\u0026rsquo;s schema.\nExamples of getting $ bydbctl indexRuleBinding get -g sw_stream -n stream_binding Update operation Update operation update an index rule binding\u0026rsquo;s schema.\nExamples updating $ bydbctl indexRuleBinding update -f - \u0026lt;\u0026lt;EOF metadata: name: stream_binding group: sw_stream rules: - trace_id - duration - endpoint_id - status_code - http.method - db.instance - db.type - mq.broker - mq.queue - mq.topic # Remove this rule # - extended_tags subject: catalog: CATALOG_STREAM name: sw begin_at: \u0026#39;2021-04-15T01:30:15.01Z\u0026#39; expire_at: \u0026#39;2121-04-15T01:30:15.01Z\u0026#39; EOF The new YAML removed the index rule extended_tags\u0026rsquo;s binding.\nDelete operation Delete operation delete an index rule binding\u0026rsquo;s schema.\nExamples of deleting $ bydbctl indexRuleBinding delete -g sw_stream -n stream_binding List operation List operation list all index rule bindings in a group.\nExamples of listing $ bydbctl indexRuleBinding list -g sw_stream API Reference indexRuleBindingService v1\n","excerpt":"CRUD indexRuleBindings CRUD operations create, read, update and delete index rule bindings.\nAn index …","ref":"/docs/skywalking-banyandb/latest/crud/index_rule_binding/","title":"CRUD indexRuleBindings"},{"body":"CRUD indexRuleBindings CRUD operations create, read, update and delete index rule bindings.\nAn index rule binding is a bridge to connect several index rules to a subject. This binding is valid between begin_at_nanoseconds and expire_at_nanoseconds, that provides flexible strategies to control how to generate time series indices.\nbydbctl is the command line tool in examples.\nCreate operation Create operation adds a new index rule binding to the database\u0026rsquo;s metadata registry repository. If the index rule binding does not currently exist, create operation will create the schema.\nExamples An index rule binding belongs to a unique group. We should create such a group with a catalog CATALOG_STREAM before creating a index rule binding. The subject(stream/measure) and index rule MUST live in the same group with the binding.\n$ bydbctl group create -f - \u0026lt;\u0026lt;EOF metadata: name: default catalog: CATALOG_STREAM resource_opts: shard_num: 2 segment_interval: unit: UNIT_DAY num: 1 ttl: unit: UNIT_DAY num: 7 EOF The group creates two shards to store indexRuleBinding data points. Every one day, it would create a segment which will generate a block every 2 hours.\nThe data in this group will keep 7 days.\nThen, below command will create a new indexRuleBinding:\n$ bydbctl indexRuleBinding create -f - \u0026lt;\u0026lt;EOF metadata: name: stream_binding group: sw_stream rules: - trace_id - duration - endpoint_id - status_code - http.method - db.instance - db.type - mq.broker - mq.queue - mq.topic - extended_tags subject: catalog: CATALOG_STREAM name: sw begin_at: \u0026#39;2021-04-15T01:30:15.01Z\u0026#39; expire_at: \u0026#39;2121-04-15T01:30:15.01Z\u0026#39; EOF The YAML contains:\n rules: references to the name of index rules. subject: stream or measure\u0026rsquo;s name and catalog. begin_at and expire_at: the TTL of this binding.  Get operation Get(Read) operation gets an index rule binding\u0026rsquo;s schema.\nExamples of getting $ bydbctl indexRuleBinding get -g sw_stream -n stream_binding Update operation Update operation update an index rule binding\u0026rsquo;s schema.\nExamples updating $ bydbctl indexRuleBinding update -f - \u0026lt;\u0026lt;EOF metadata: name: stream_binding group: sw_stream rules: - trace_id - duration - endpoint_id - status_code - http.method - db.instance - db.type - mq.broker - mq.queue - mq.topic # Remove this rule # - extended_tags subject: catalog: CATALOG_STREAM name: sw begin_at: \u0026#39;2021-04-15T01:30:15.01Z\u0026#39; expire_at: \u0026#39;2121-04-15T01:30:15.01Z\u0026#39; EOF The new YAML removed the index rule extended_tags\u0026rsquo;s binding.\nDelete operation Delete operation delete an index rule binding\u0026rsquo;s schema.\nExamples of deleting $ bydbctl indexRuleBinding delete -g sw_stream -n stream_binding List operation List operation list all index rule bindings in a group.\nExamples of listing $ bydbctl indexRuleBinding list -g sw_stream API Reference indexRuleBindingService v1\n","excerpt":"CRUD indexRuleBindings CRUD operations create, read, update and delete index rule bindings.\nAn index …","ref":"/docs/skywalking-banyandb/next/crud/index_rule_binding/","title":"CRUD indexRuleBindings"},{"body":"CRUD indexRuleBindings CRUD operations create, read, update and delete index rule bindings.\nAn index rule binding is a bridge to connect several index rules to a subject. This binding is valid between begin_at_nanoseconds and expire_at_nanoseconds, that provides flexible strategies to control how to generate time series indices.\nbydbctl is the command line tool in examples.\nCreate operation Create operation adds a new index rule binding to the database\u0026rsquo;s metadata registry repository. If the index rule binding does not currently exist, create operation will create the schema.\nExamples An index rule binding belongs to a unique group. We should create such a group with a catalog CATALOG_STREAM before creating a index rule binding. The subject(stream/measure) and index rule MUST live in the same group with the binding.\n$ bydbctl group create -f - \u0026lt;\u0026lt;EOF metadata: name: default catalog: CATALOG_STREAM resource_opts: shard_num: 2 block_interval: unit: UNIT_HOUR num: 2 segment_interval: unit: UNIT_DAY num: 1 ttl: unit: UNIT_DAY num: 7 EOF The group creates two shards to store indexRuleBinding data points. Every one day, it would create a segment which will generate a block every 2 hours.\nThe data in this group will keep 7 days.\nThen, below command will create a new indexRuleBinding:\n$ bydbctl indexRuleBinding create -f - \u0026lt;\u0026lt;EOF metadata: name: stream_binding group: sw_stream rules: - trace_id - duration - endpoint_id - status_code - http.method - db.instance - db.type - mq.broker - mq.queue - mq.topic - extended_tags subject: catalog: CATALOG_STREAM name: sw begin_at: \u0026#39;2021-04-15T01:30:15.01Z\u0026#39; expire_at: \u0026#39;2121-04-15T01:30:15.01Z\u0026#39; EOF The YAML contains:\n rules: references to the name of index rules. subject: stream or measure\u0026rsquo;s name and catalog. begin_at and expire_at: the TTL of this binding.  Get operation Get(Read) operation gets an index rule binding\u0026rsquo;s schema.\nExamples of getting $ bydbctl indexRuleBinding get -g sw_stream -n stream_binding Update operation Update operation update an index rule binding\u0026rsquo;s schema.\nExamples updating $ bydbctl indexRuleBinding update -f - \u0026lt;\u0026lt;EOF metadata: name: stream_binding group: sw_stream rules: - trace_id - duration - endpoint_id - status_code - http.method - db.instance - db.type - mq.broker - mq.queue - mq.topic # Remove this rule # - extended_tags subject: catalog: CATALOG_STREAM name: sw begin_at: \u0026#39;2021-04-15T01:30:15.01Z\u0026#39; expire_at: \u0026#39;2121-04-15T01:30:15.01Z\u0026#39; EOF The new YAML removed the index rule extended_tags\u0026rsquo;s binding.\nDelete operation Delete operation delete an index rule binding\u0026rsquo;s schema.\nExamples of deleting $ bydbctl indexRuleBinding delete -g sw_stream -n stream_binding List operation List operation list all index rule bindings in a group.\nExamples of listing $ bydbctl indexRuleBinding list -g sw_stream API Reference indexRuleBindingService v1\n","excerpt":"CRUD indexRuleBindings CRUD operations create, read, update and delete index rule bindings.\nAn index …","ref":"/docs/skywalking-banyandb/v0.5.0/crud/index_rule_binding/","title":"CRUD indexRuleBindings"},{"body":"CRUD IndexRules CRUD operations create, read, update and delete index rules.\nIndexRule defines how to generate indices based on tags and the index type. IndexRule should bind to a subject(stream or measure) through an IndexRuleBinding to generate proper indices.\nbydbctl is the command line tool in examples.\nCreate operation Create operation adds a new index rule to the database\u0026rsquo;s metadata registry repository. If the index rule does not currently exist, create operation will create the schema.\nExamples of creating An index rule belongs to its subjects' group. We should create such a group if there is no such group.\nThe command supposes that the index rule will bind to streams. So it creates a CATALOG_STREAM group here.\n$ bydbctl group create -f - \u0026lt;\u0026lt;EOF metadata: name: sw_stream catalog: CATALOG_STREAM resource_opts: shard_num: 2 block_interval: unit: UNIT_HOUR num: 2 segment_interval: unit: UNIT_DAY num: 1 ttl: unit: UNIT_DAY num: 7 EOF The group creates two shards to store indexRule data points. Every day, it would create a segment that will generate a block every 2 hours.\nThe data in this group will keep 7 days.\nThen, the next command will create a new index rule:\n$ bydbctl indexRule create -f - \u0026lt;\u0026lt;EOF metadata: name: trace_id group: sw_stream tags: - trace_id type: TYPE_TREE location: LOCATION_GLOBAL EOF This YAML creates an index rule which uses the tag trace_id to generate a TREE_TYPE index which is located at GLOBAL.\nGet operation Get(Read) operation gets an index rule\u0026rsquo;s schema.\nExamples of getting $ bydbctl indexRule get -g sw_stream -n trace_id Update operation Update operation updates an index rule\u0026rsquo;s schema.\nExamples of updating This example changes the type from TREE to INVERTED.\n$ bydbctl indexRule update -f - \u0026lt;\u0026lt;EOF metadata: name: trace_id group: sw_stream tags: - trace_id type: TYPE_INVERTED location: LOCATION_GLOBAL EOF Delete operation Delete operation deletes an index rule\u0026rsquo;s schema.\nExamples of deleting $ bydbctl indexRule delete -g sw_stream -n trace_id List operation List operation list all index rules' schema in a group.\nExamples of listing $ bydbctl indexRule list -g sw_stream API Reference indexRuleService v1\n","excerpt":"CRUD IndexRules CRUD operations create, read, update and delete index rules.\nIndexRule defines how …","ref":"/docs/skywalking-banyandb/latest/crud/index_rule/","title":"CRUD IndexRules"},{"body":"CRUD IndexRules CRUD operations create, read, update and delete index rules.\nIndexRule defines how to generate indices based on tags and the index type. IndexRule should bind to a subject(stream or measure) through an IndexRuleBinding to generate proper indices.\nbydbctl is the command line tool in examples.\nCreate operation Create operation adds a new index rule to the database\u0026rsquo;s metadata registry repository. If the index rule does not currently exist, create operation will create the schema.\nExamples of creating An index rule belongs to its subjects' group. We should create such a group if there is no such group.\nThe command supposes that the index rule will bind to streams. So it creates a CATALOG_STREAM group here.\n$ bydbctl group create -f - \u0026lt;\u0026lt;EOF metadata: name: sw_stream catalog: CATALOG_STREAM resource_opts: shard_num: 2 segment_interval: unit: UNIT_DAY num: 1 ttl: unit: UNIT_DAY num: 7 EOF The group creates two shards to store indexRule data points. Every day, it would create a segment that will generate a block every 2 hours.\nThe data in this group will keep 7 days.\nThen, the next command will create a new index rule:\n$ bydbctl indexRule create -f - \u0026lt;\u0026lt;EOF metadata: name: trace_id group: sw_stream tags: - trace_id type: TYPE_INVERTED EOF This YAML creates an index rule which uses the tag trace_id to generate a TYPE_INVERTED index.\nGet operation Get(Read) operation gets an index rule\u0026rsquo;s schema.\nExamples of getting $ bydbctl indexRule get -g sw_stream -n trace_id Update operation Update operation updates an index rule\u0026rsquo;s schema.\nExamples of updating This example changes the type from TREE to INVERTED.\n$ bydbctl indexRule update -f - \u0026lt;\u0026lt;EOF metadata: name: trace_id group: sw_stream tags: - trace_id type: TYPE_INVERTED EOF Delete operation Delete operation deletes an index rule\u0026rsquo;s schema.\nExamples of deleting $ bydbctl indexRule delete -g sw_stream -n trace_id List operation List operation list all index rules' schema in a group.\nExamples of listing $ bydbctl indexRule list -g sw_stream API Reference indexRuleService v1\n","excerpt":"CRUD IndexRules CRUD operations create, read, update and delete index rules.\nIndexRule defines how …","ref":"/docs/skywalking-banyandb/next/crud/index_rule/","title":"CRUD IndexRules"},{"body":"CRUD IndexRules CRUD operations create, read, update and delete index rules.\nIndexRule defines how to generate indices based on tags and the index type. IndexRule should bind to a subject(stream or measure) through an IndexRuleBinding to generate proper indices.\nbydbctl is the command line tool in examples.\nCreate operation Create operation adds a new index rule to the database\u0026rsquo;s metadata registry repository. If the index rule does not currently exist, create operation will create the schema.\nExamples of creating An index rule belongs to its subjects' group. We should create such a group if there is no such group.\nThe command supposes that the index rule will bind to streams. So it creates a CATALOG_STREAM group here.\n$ bydbctl group create -f - \u0026lt;\u0026lt;EOF metadata: name: sw_stream catalog: CATALOG_STREAM resource_opts: shard_num: 2 block_interval: unit: UNIT_HOUR num: 2 segment_interval: unit: UNIT_DAY num: 1 ttl: unit: UNIT_DAY num: 7 EOF The group creates two shards to store indexRule data points. Every day, it would create a segment that will generate a block every 2 hours.\nThe data in this group will keep 7 days.\nThen, the next command will create a new index rule:\n$ bydbctl indexRule create -f - \u0026lt;\u0026lt;EOF metadata: name: trace_id group: sw_stream tags: - trace_id type: TYPE_TREE location: LOCATION_GLOBAL EOF This YAML creates an index rule which uses the tag trace_id to generate a TREE_TYPE index which is located at GLOBAL.\nGet operation Get(Read) operation gets an index rule\u0026rsquo;s schema.\nExamples of getting $ bydbctl indexRule get -g sw_stream -n trace_id Update operation Update operation updates an index rule\u0026rsquo;s schema.\nExamples of updating This example changes the type from TREE to INVERTED.\n$ bydbctl indexRule update -f - \u0026lt;\u0026lt;EOF metadata: name: trace_id group: sw_stream tags: - trace_id type: TYPE_INVERTED location: LOCATION_GLOBAL EOF Delete operation Delete operation deletes an index rule\u0026rsquo;s schema.\nExamples of deleting $ bydbctl indexRule delete -g sw_stream -n trace_id List operation List operation list all index rules' schema in a group.\nExamples of listing $ bydbctl indexRule list -g sw_stream API Reference indexRuleService v1\n","excerpt":"CRUD IndexRules CRUD operations create, read, update and delete index rules.\nIndexRule defines how …","ref":"/docs/skywalking-banyandb/v0.5.0/crud/index_rule/","title":"CRUD IndexRules"},{"body":"CRUD Measures CRUD operations create, read, update and delete measures.\nbydbctl is the command line tool in examples.\nCreate operation Create operation adds a new measure to the database\u0026rsquo;s metadata registry repository. If the measure does not currently exist, create operation will create the schema.\nExamples of creating A measure belongs to a unique group. We should create such a group with a catalog CATALOG_MEASURE before creating a measure.\n$ bydbctl group create -f - \u0026lt;\u0026lt;EOF metadata: name: sw_metric catalog: CATALOG_MEASURE resource_opts: shard_num: 2 block_interval: unit: UNIT_HOUR num: 2 segment_interval: unit: UNIT_DAY num: 1 ttl: unit: UNIT_DAY num: 7 EOF The group creates two shards to store data points. Every day, it would create a segment that will generate a block every 2 hours.\nThe data in this group will keep 7 days.\nThen, the below command will create a new measure:\n$ bydbctl measure create -f - \u0026lt;\u0026lt;EOF metadata: name: service_cpm_minute group: sw_metric tag_families: - name: default tags: - name: id type: TAG_TYPE_STRING - name: entity_id type: TAG_TYPE_STRING fields: - name: total field_type: FIELD_TYPE_INT encoding_method: ENCODING_METHOD_GORILLA compression_method: COMPRESSION_METHOD_ZSTD - name: value field_type: FIELD_TYPE_INT encoding_method: ENCODING_METHOD_GORILLA compression_method: COMPRESSION_METHOD_ZSTD entity: tag_names: - entity_id interval: 1m EOF service_cpm_minute expects to ingest a series of data points with a minute interval.\nGet operation Get(Read) operation gets a measure\u0026rsquo;s schema.\nExamples of getting $ bydbctl measure get -g sw_metric -n service_cpm_minute Update operation Update operation changes a measure\u0026rsquo;s schema.\nExamples of updating $ bydbctl measure update -f - \u0026lt;\u0026lt;EOF metadata: name: service_cpm_minute group: sw_metric tagFamilies: - name: searchable tags: - name: trace_id type: TAG_TYPE_STRING entity: tag_names: - entity_id EOF Delete operation Delete operation removes a measure\u0026rsquo;s schema.\nExamples of deleting $ bydbctl measure delete -g sw_metric -n service_cpm_minute List operation The list operation shows all measures' schema in a group.\nExamples of listing $ bydbctl measure list -g sw_metric API Reference MeasureService v1\n","excerpt":"CRUD Measures CRUD operations create, read, update and delete measures.\nbydbctl is the command line …","ref":"/docs/skywalking-banyandb/latest/crud/measure/schema/","title":"CRUD Measures"},{"body":"CRUD Measures CRUD operations create, read, update and delete measures.\nbydbctl is the command line tool in examples.\nCreate operation Create operation adds a new measure to the database\u0026rsquo;s metadata registry repository. If the measure does not currently exist, create operation will create the schema.\nExamples of creating A measure belongs to a unique group. We should create such a group with a catalog CATALOG_MEASURE before creating a measure.\n$ bydbctl group create -f - \u0026lt;\u0026lt;EOF metadata: name: sw_metric catalog: CATALOG_MEASURE resource_opts: shard_num: 2 segment_interval: unit: UNIT_DAY num: 1 ttl: unit: UNIT_DAY num: 7 EOF The group creates two shards to store data points. Every day, it would create a segment that will generate a block every 2 hours.\nThe data in this group will keep 7 days.\nThen, the below command will create a new measure:\n$ bydbctl measure create -f - \u0026lt;\u0026lt;EOF metadata: name: service_cpm_minute group: sw_metric tag_families: - name: default tags: - name: id type: TAG_TYPE_STRING - name: entity_id type: TAG_TYPE_STRING fields: - name: total field_type: FIELD_TYPE_INT encoding_method: ENCODING_METHOD_GORILLA compression_method: COMPRESSION_METHOD_ZSTD - name: value field_type: FIELD_TYPE_INT encoding_method: ENCODING_METHOD_GORILLA compression_method: COMPRESSION_METHOD_ZSTD entity: tag_names: - entity_id interval: 1m EOF service_cpm_minute expects to ingest a series of data points with a minute interval.\nGet operation Get(Read) operation gets a measure\u0026rsquo;s schema.\nExamples of getting $ bydbctl measure get -g sw_metric -n service_cpm_minute Update operation Update operation changes a measure\u0026rsquo;s schema.\nExamples of updating $ bydbctl measure update -f - \u0026lt;\u0026lt;EOF metadata: name: service_cpm_minute group: sw_metric tagFamilies: - name: searchable tags: - name: trace_id type: TAG_TYPE_STRING entity: tag_names: - entity_id EOF Delete operation Delete operation removes a measure\u0026rsquo;s schema.\nExamples of deleting $ bydbctl measure delete -g sw_metric -n service_cpm_minute List operation The list operation shows all measures' schema in a group.\nExamples of listing $ bydbctl measure list -g sw_metric API Reference MeasureService v1\n","excerpt":"CRUD Measures CRUD operations create, read, update and delete measures.\nbydbctl is the command line …","ref":"/docs/skywalking-banyandb/next/crud/measure/schema/","title":"CRUD Measures"},{"body":"CRUD Measures CRUD operations create, read, update and delete measures.\nbydbctl is the command line tool in examples.\nCreate operation Create operation adds a new measure to the database\u0026rsquo;s metadata registry repository. If the measure does not currently exist, create operation will create the schema.\nExamples of creating A measure belongs to a unique group. We should create such a group with a catalog CATALOG_MEASURE before creating a measure.\n$ bydbctl group create -f - \u0026lt;\u0026lt;EOF metadata: name: sw_metric catalog: CATALOG_MEASURE resource_opts: shard_num: 2 block_interval: unit: UNIT_HOUR num: 2 segment_interval: unit: UNIT_DAY num: 1 ttl: unit: UNIT_DAY num: 7 EOF The group creates two shards to store data points. Every day, it would create a segment that will generate a block every 2 hours.\nThe data in this group will keep 7 days.\nThen, the below command will create a new measure:\n$ bydbctl measure create -f - \u0026lt;\u0026lt;EOF metadata: name: service_cpm_minute group: sw_metric tag_families: - name: default tags: - name: id type: TAG_TYPE_STRING - name: entity_id type: TAG_TYPE_STRING fields: - name: total field_type: FIELD_TYPE_INT encoding_method: ENCODING_METHOD_GORILLA compression_method: COMPRESSION_METHOD_ZSTD - name: value field_type: FIELD_TYPE_INT encoding_method: ENCODING_METHOD_GORILLA compression_method: COMPRESSION_METHOD_ZSTD entity: tag_names: - entity_id interval: 1m EOF service_cpm_minute expects to ingest a series of data points with a minute interval.\nGet operation Get(Read) operation gets a measure\u0026rsquo;s schema.\nExamples of getting $ bydbctl measure get -g sw_metric -n service_cpm_minute Update operation Update operation changes a measure\u0026rsquo;s schema.\nExamples of updating $ bydbctl measure update -f - \u0026lt;\u0026lt;EOF metadata: name: service_cpm_minute group: sw_metric tagFamilies: - name: searchable tags: - name: trace_id type: TAG_TYPE_STRING entity: tag_names: - entity_id EOF Delete operation Delete operation removes a measure\u0026rsquo;s schema.\nExamples of deleting $ bydbctl measure delete -g sw_metric -n service_cpm_minute List operation The list operation shows all measures' schema in a group.\nExamples of listing $ bydbctl measure list -g sw_metric API Reference MeasureService v1\n","excerpt":"CRUD Measures CRUD operations create, read, update and delete measures.\nbydbctl is the command line …","ref":"/docs/skywalking-banyandb/v0.5.0/crud/measure/schema/","title":"CRUD Measures"},{"body":"CRUD Property CRUD operations create/update, read and delete property.\nProperty stores the user defined data.\nbydbctl is the command line tool in examples.\nApply (Create/Update) operation Apply creates a property if it\u0026rsquo;s absent, or updates an existed one based on a strategy. If the property does not currently exist, create operation will create the property.\nExamples of applying A property belongs to a unique group. We should create such a group before creating a property.\nThe group\u0026rsquo;s catalog should be empty.\n$ bydbctl group create -f - \u0026lt;\u0026lt;EOF metadata: name: sw EOF Then, below command will create a new property:\n$ bydbctl property apply -f - \u0026lt;\u0026lt;EOF metadata: container: group: sw name: temp_data id: General-Service tags: - key: name value: str: value: \u0026#34;hello\u0026#34; - key: state value: str: value: \u0026#34;succeed\u0026#34; EOF The operation supports updating partial tags.\n$ bydbctl property apply -f - \u0026lt;\u0026lt;EOF metadata: container: group: sw name: temp_data id: General-Service tags: - key: state value: str: value: \u0026#34;failed\u0026#34; EOF TTL is supported in the operation.\n$ bydbctl property apply -f - \u0026lt;\u0026lt;EOF metadata: container: group: sw name: temp_data id: General-Service tags: - key: state value: str: value: \u0026#34;failed\u0026#34; ttl: \u0026#34;1h\u0026#34; Get operation Get operation gets a property.\nExamples of getting $ bydbctl property get -g sw -n temp_data --id General-Service The operation could filter data by tags.\n$ bydbctl property get -g sw -n temp_data --id General-Service --tags state Delete operation Delete operation delete a property.\nExamples of deleting $ bydbctl property delete -g sw -n temp_data --id General-Service The delete operation could remove specific tags instead of the whole property.\n$ bydbctl property delete -g sw -n temp_data --id General-Service --tags state List operation List operation lists all properties in a group.\nExamples of listing in a group $ bydbctl property list -g sw List operation lists all properties in a group with a name.\nExamples of listing in a group with a name $ bydbctl property list -g sw -n temp_data TTL field in a property TTL field in a property is used to set the time to live of the property. The property will be deleted automatically after the TTL.\nThis functionality is supported by the lease mechanism. The readonly lease_id field is used to identify the lease of the property.\nExamples of setting TTL $ bydbctl property apply -f - \u0026lt;\u0026lt;EOF metadata: container: group: sw name: temp_data id: General-Service tags: - key: state value: str: value: \u0026#34;failed\u0026#34; ttl: \u0026#34;1h\u0026#34; EOF The lease_id is returned in the response. You can use get operation to get the property with the lease_id as well.\n$ bydbctl property get -g sw -n temp_data --id General-Service The lease_id is used to keep the property alive. You can use keepalive operation to keep the property alive. When the keepalive operation is called, the property\u0026rsquo;s TTL will be reset to the original value.\n$ bydbctl property keepalive --lease_id 1 API Reference MeasureService v1\n","excerpt":"CRUD Property CRUD operations create/update, read and delete property.\nProperty stores the user …","ref":"/docs/skywalking-banyandb/latest/crud/property/","title":"CRUD Property"},{"body":"CRUD Property CRUD operations create/update, read and delete property.\nProperty stores the user defined data.\nbydbctl is the command line tool in examples.\nApply (Create/Update) operation Apply creates a property if it\u0026rsquo;s absent, or updates an existed one based on a strategy. If the property does not currently exist, create operation will create the property.\nExamples of applying A property belongs to a unique group. We should create such a group before creating a property.\nThe group\u0026rsquo;s catalog should be empty.\n$ bydbctl group create -f - \u0026lt;\u0026lt;EOF metadata: name: sw EOF Then, below command will create a new property:\n$ bydbctl property apply -f - \u0026lt;\u0026lt;EOF metadata: container: group: sw name: temp_data id: General-Service tags: - key: name value: str: value: \u0026#34;hello\u0026#34; - key: state value: str: value: \u0026#34;succeed\u0026#34; EOF The operation supports updating partial tags.\n$ bydbctl property apply -f - \u0026lt;\u0026lt;EOF metadata: container: group: sw name: temp_data id: General-Service tags: - key: state value: str: value: \u0026#34;failed\u0026#34; EOF TTL is supported in the operation.\n$ bydbctl property apply -f - \u0026lt;\u0026lt;EOF metadata: container: group: sw name: temp_data id: General-Service tags: - key: state value: str: value: \u0026#34;failed\u0026#34; ttl: \u0026#34;1h\u0026#34; Get operation Get operation gets a property.\nExamples of getting $ bydbctl property get -g sw -n temp_data --id General-Service The operation could filter data by tags.\n$ bydbctl property get -g sw -n temp_data --id General-Service --tags state Delete operation Delete operation delete a property.\nExamples of deleting $ bydbctl property delete -g sw -n temp_data --id General-Service The delete operation could remove specific tags instead of the whole property.\n$ bydbctl property delete -g sw -n temp_data --id General-Service --tags state List operation List operation lists all properties in a group.\nExamples of listing in a group $ bydbctl property list -g sw List operation lists all properties in a group with a name.\nExamples of listing in a group with a name $ bydbctl property list -g sw -n temp_data TTL field in a property TTL field in a property is used to set the time to live of the property. The property will be deleted automatically after the TTL.\nThis functionality is supported by the lease mechanism. The readonly lease_id field is used to identify the lease of the property.\nExamples of setting TTL $ bydbctl property apply -f - \u0026lt;\u0026lt;EOF metadata: container: group: sw name: temp_data id: General-Service tags: - key: state value: str: value: \u0026#34;failed\u0026#34; ttl: \u0026#34;1h\u0026#34; EOF The lease_id is returned in the response. You can use get operation to get the property with the lease_id as well.\n$ bydbctl property get -g sw -n temp_data --id General-Service The lease_id is used to keep the property alive. You can use keepalive operation to keep the property alive. When the keepalive operation is called, the property\u0026rsquo;s TTL will be reset to the original value.\n$ bydbctl property keepalive --lease_id 1 API Reference MeasureService v1\n","excerpt":"CRUD Property CRUD operations create/update, read and delete property.\nProperty stores the user …","ref":"/docs/skywalking-banyandb/next/crud/property/","title":"CRUD Property"},{"body":"CRUD Property CRUD operations create/update, read and delete property.\nProperty stores the user defined data.\nbydbctl is the command line tool in examples.\nApply (Create/Update) operation Apply creates a property if it\u0026rsquo;s absent, or updates an existed one based on a strategy. If the property does not currently exist, create operation will create the property.\nExamples of applying A property belongs to a unique group. We should create such a group before creating a property.\nThe group\u0026rsquo;s catalog should be empty.\n$ bydbctl group create -f - \u0026lt;\u0026lt;EOF metadata: name: sw EOF Then, below command will create a new property:\n$ bydbctl property apply -f - \u0026lt;\u0026lt;EOF metadata: container: group: sw name: temp_data id: General-Service tags: - key: name value: str: value: \u0026#34;hello\u0026#34; - key: state value: str: value: \u0026#34;succeed\u0026#34; EOF The operation supports updating partial tags.\n$ bydbctl property apply -f - \u0026lt;\u0026lt;EOF metadata: container: group: sw name: temp_data id: General-Service tags: - key: state value: str: value: \u0026#34;failed\u0026#34; EOF TTL is supported in the operation.\n$ bydbctl property apply -f - \u0026lt;\u0026lt;EOF metadata: container: group: sw name: temp_data id: General-Service tags: - key: state value: str: value: \u0026#34;failed\u0026#34; ttl: \u0026#34;1h\u0026#34; Get operation Get operation gets a property.\nExamples of getting $ bydbctl property get -g sw -n temp_data --id General-Service The operation could filter data by tags.\n$ bydbctl property get -g sw -n temp_data --id General-Service --tags state Delete operation Delete operation delete a property.\nExamples of deleting $ bydbctl property delete -g sw -n temp_data --id General-Service The delete operation could remove specific tags instead of the whole property.\n$ bydbctl property delete -g sw -n temp_data --id General-Service --tags state List operation List operation lists all properties in a group.\nExamples of listing in a group $ bydbctl property list -g sw List operation lists all properties in a group with a name.\nExamples of listing in a group with a name $ bydbctl property list -g sw -n temp_data TTL field in a property TTL field in a property is used to set the time to live of the property. The property will be deleted automatically after the TTL.\nThis functionality is supported by the lease mechanism. The readonly lease_id field is used to identify the lease of the property.\nExamples of setting TTL $ bydbctl property apply -f - \u0026lt;\u0026lt;EOF metadata: container: group: sw name: temp_data id: General-Service tags: - key: state value: str: value: \u0026#34;failed\u0026#34; ttl: \u0026#34;1h\u0026#34; EOF The lease_id is returned in the response. You can use get operation to get the property with the lease_id as well.\n$ bydbctl property get -g sw -n temp_data --id General-Service The lease_id is used to keep the property alive. You can use keepalive operation to keep the property alive. When the keepalive operation is called, the property\u0026rsquo;s TTL will be reset to the original value.\n$ bydbctl property keepalive --lease_id 1 API Reference MeasureService v1\n","excerpt":"CRUD Property CRUD operations create/update, read and delete property.\nProperty stores the user …","ref":"/docs/skywalking-banyandb/v0.5.0/crud/property/","title":"CRUD Property"},{"body":"CRUD Streams CRUD operations create, read, update and delete streams.\nbydbctl is the command line tool in examples.\nStream intends to store streaming data, for example, traces or logs.\nCreate operation Create operation adds a new stream to the database\u0026rsquo;s metadata registry repository. If the stream does not currently exist, create operation will create the schema.\nExamples of creating A stream belongs to a unique group. We should create such a group with a catalog CATALOG_STREAM before creating a stream.\n$ bydbctl group create -f - \u0026lt;\u0026lt;EOF metadata: name: default catalog: CATALOG_STREAM resource_opts: shard_num: 2 block_interval: unit: UNIT_HOUR num: 2 segment_interval: unit: UNIT_DAY num: 1 ttl: unit: UNIT_DAY num: 7 EOF The group creates two shards to store stream data points. Every one day, it would create a segment which will generate a block every 2 hours.\nThe data in this group will keep 7 days.\nThen, below command will create a new stream:\n$ bydbctl stream create -f - \u0026lt;\u0026lt;EOF metadata: name: sw group: default tagFamilies: - name: searchable tags: - name: trace_id type: TAG_TYPE_STRING entity: tagNames: - stream_id EOF Get operation Get(Read) operation get a stream\u0026rsquo;s schema.\nExamples of getting $ bydbctl stream get -g default -n sw Update operation Update operation update a stream\u0026rsquo;s schema.\nExamples of updating bydbctl is the command line tool to update a stream in this example.\n$ bydbctl stream update -f - \u0026lt;\u0026lt;EOF metadata: name: sw group: default tagFamilies: - name: searchable tags: - name: trace_id type: TAG_TYPE_STRING entity: tagNames: - stream_id EOF Delete operation Delete operation delete a stream\u0026rsquo;s schema.\nExamples of deleting bydbctl is the command line tool to delete a stream in this example.\n$ bydbctl stream delete -g default -n sw List operation List operation list all streams' schema in a group.\nExamples of listing $ bydbctl stream list -g default API Reference StreamService v1\n","excerpt":"CRUD Streams CRUD operations create, read, update and delete streams.\nbydbctl is the command line …","ref":"/docs/skywalking-banyandb/latest/crud/stream/schema/","title":"CRUD Streams"},{"body":"CRUD Streams CRUD operations create, read, update and delete streams.\nbydbctl is the command line tool in examples.\nStream intends to store streaming data, for example, traces or logs.\nCreate operation Create operation adds a new stream to the database\u0026rsquo;s metadata registry repository. If the stream does not currently exist, create operation will create the schema.\nExamples of creating A stream belongs to a unique group. We should create such a group with a catalog CATALOG_STREAM before creating a stream.\n$ bydbctl group create -f - \u0026lt;\u0026lt;EOF metadata: name: default catalog: CATALOG_STREAM resource_opts: shard_num: 2 segment_interval: unit: UNIT_DAY num: 1 ttl: unit: UNIT_DAY num: 7 EOF The group creates two shards to store stream data points. Every one day, it would create a segment which will generate a block every 2 hours.\nThe data in this group will keep 7 days.\nThen, below command will create a new stream:\n$ bydbctl stream create -f - \u0026lt;\u0026lt;EOF metadata: name: sw group: default tagFamilies: - name: searchable tags: - name: trace_id type: TAG_TYPE_STRING entity: tagNames: - stream_id EOF Get operation Get(Read) operation get a stream\u0026rsquo;s schema.\nExamples of getting $ bydbctl stream get -g default -n sw Update operation Update operation update a stream\u0026rsquo;s schema.\nExamples of updating bydbctl is the command line tool to update a stream in this example.\n$ bydbctl stream update -f - \u0026lt;\u0026lt;EOF metadata: name: sw group: default tagFamilies: - name: searchable tags: - name: trace_id type: TAG_TYPE_STRING entity: tagNames: - stream_id EOF Delete operation Delete operation delete a stream\u0026rsquo;s schema.\nExamples of deleting bydbctl is the command line tool to delete a stream in this example.\n$ bydbctl stream delete -g default -n sw List operation List operation list all streams' schema in a group.\nExamples of listing $ bydbctl stream list -g default API Reference StreamService v1\n","excerpt":"CRUD Streams CRUD operations create, read, update and delete streams.\nbydbctl is the command line …","ref":"/docs/skywalking-banyandb/next/crud/stream/schema/","title":"CRUD Streams"},{"body":"CRUD Streams CRUD operations create, read, update and delete streams.\nbydbctl is the command line tool in examples.\nStream intends to store streaming data, for example, traces or logs.\nCreate operation Create operation adds a new stream to the database\u0026rsquo;s metadata registry repository. If the stream does not currently exist, create operation will create the schema.\nExamples of creating A stream belongs to a unique group. We should create such a group with a catalog CATALOG_STREAM before creating a stream.\n$ bydbctl group create -f - \u0026lt;\u0026lt;EOF metadata: name: default catalog: CATALOG_STREAM resource_opts: shard_num: 2 block_interval: unit: UNIT_HOUR num: 2 segment_interval: unit: UNIT_DAY num: 1 ttl: unit: UNIT_DAY num: 7 EOF The group creates two shards to store stream data points. Every one day, it would create a segment which will generate a block every 2 hours.\nThe data in this group will keep 7 days.\nThen, below command will create a new stream:\n$ bydbctl stream create -f - \u0026lt;\u0026lt;EOF metadata: name: sw group: default tagFamilies: - name: searchable tags: - name: trace_id type: TAG_TYPE_STRING entity: tagNames: - stream_id EOF Get operation Get(Read) operation get a stream\u0026rsquo;s schema.\nExamples of getting $ bydbctl stream get -g default -n sw Update operation Update operation update a stream\u0026rsquo;s schema.\nExamples of updating bydbctl is the command line tool to update a stream in this example.\n$ bydbctl stream update -f - \u0026lt;\u0026lt;EOF metadata: name: sw group: default tagFamilies: - name: searchable tags: - name: trace_id type: TAG_TYPE_STRING entity: tagNames: - stream_id EOF Delete operation Delete operation delete a stream\u0026rsquo;s schema.\nExamples of deleting bydbctl is the command line tool to delete a stream in this example.\n$ bydbctl stream delete -g default -n sw List operation List operation list all streams' schema in a group.\nExamples of listing $ bydbctl stream list -g default API Reference StreamService v1\n","excerpt":"CRUD Streams CRUD operations create, read, update and delete streams.\nbydbctl is the command line …","ref":"/docs/skywalking-banyandb/v0.5.0/crud/stream/schema/","title":"CRUD Streams"},{"body":"Custom metrics Adapter This adapter contains an implementation of external metrics API. It is therefore suitable for use with the autoscaling/v2 Horizontal Pod Autoscaler in Kubernetes 1.9+.\nUse kustomize to customise your deployment  Clone the source code:  git clone git@github.com:apache/skywalking-swck.git  Edit file adapter/config/adapter/kustomization.yaml file to change your preferences. If you prefer to your private docker image, a quick path to override ADAPTER_IMG environment variable : export ADAPTER_IMG=\u0026lt;private registry\u0026gt;/metrics-adapter:\u0026lt;tag\u0026gt;\n  Use make to generate the final manifests and deploy:\n  make -C adapter deploy Configuration The adapter takes the standard Kubernetes generic API server arguments (including those for authentication and authorization). By default, it will attempt to using Kubernetes in-cluster config to connect to the cluster.\nIt takes the following addition arguments specific to configuring how the adapter talks to SkyWalking OAP cluster:\n --oap-addr The address of OAP cluster. --metric-filter-regex A regular expression to filter metrics retrieved from OAP cluster. --refresh-interval This is the interval at which to update the cache of available metrics from OAP cluster. --namespace A prefix to which metrics are appended. The format is \u0026lsquo;namespace|metric_name\u0026rsquo;, defaults to skywalking.apache.org  HPA Configuration External metrics allow you to autoscale your cluster based on any metric available in OAP cluster. Just provide a metric block with a name and selector, and use the External metric type.\n- type:Externalexternal:metric:name:\u0026lt;metric_name\u0026gt;selector:matchLabels:\u0026lt;label_key\u0026gt;:\u0026lt;label_value\u0026gt;...target:.... metric_name: The name of metric generated by OAL or other subsystem. label: label_key is the entity name of skywalking metrics. if the label value contains special characters more than ., - and _, service.str.\u0026lt;number\u0026gt; represent the literal of label value, and service.byte.\u0026lt;number\u0026gt; could encode these special characters to hex bytes.  Supposing the service name is v1|productpage|bookinfo|demo, the matchLabels should be like the below piece:\nmatchLabels:\u0026#34;service.str.0\u0026#34;: \u0026#34;v1\u0026#34;\u0026#34;service.byte.1\u0026#34;: \u0026#34;7c\u0026#34;// the hex byte of \u0026#34;|\u0026#34;\u0026#34;service.str.2\u0026#34;: \u0026#34;productpage\u0026#34;\u0026#34;service.byte.3\u0026#34;: \u0026#34;7c\u0026#34;\u0026#34;service.str.4\u0026#34;: \u0026#34;bookinfo\u0026#34;\u0026#34;service.byte.5\u0026#34;: \u0026#34;7c\u0026#34;\u0026#34;service.str.6\u0026#34;: \u0026#34;demo\u0026#34; Caveats: byte label only accept a single character. That means || should be transformed to service.byte.0:\u0026quot;7c\u0026quot; and service.byte.1:\u0026quot;7c\u0026quot; instead of service.byte.0:\u0026quot;7c7c\u0026quot;\n The options of label keys are:\n service, service.str.\u0026lt;number\u0026gt; or service.byte.\u0026lt;number\u0026gt; The name of the service. instance, instance.str.\u0026lt;number\u0026gt; or instance.byte.\u0026lt;number\u0026gt; The name of the service instance. endpoint, endpoint.str.\u0026lt;number\u0026gt; or endpoint.byte.\u0026lt;number\u0026gt; The name of the endpoint. label, label.str.\u0026lt;number\u0026gt; or label.byte.\u0026lt;number\u0026gt; is optional, The labels you need to query, used for querying multi-labels metrics. Unlike swctl, this key only supports a single label due to the specification of the custom metrics API.  For example, if your application name is front_gateway, you could add the following section to your HorizontalPodAutoscaler manifest to specify that you need less than 80ms of 90th latency.\n- type:Externalexternal:metric:name:skywalking.apache.org|service_percentileselector:matchLabels:service:front_gateway# The index of [P50, P75, P90, P95, P99]. 2 is the index of P90(90%)label:\u0026#34;2\u0026#34;target:type:Valuevalue:80If the service is v1|productpage|bookinfo|demo|-:\n- type:Externalexternal:metric:name:skywalking.apache.org|service_cpmselector:matchLabels:\u0026#34;service.str.0\u0026#34;: \u0026#34;v1\u0026#34;\u0026#34;service.byte.1\u0026#34;: \u0026#34;7c\u0026#34;\u0026#34;service.str.2\u0026#34;: \u0026#34;productpage\u0026#34;\u0026#34;service.byte.3\u0026#34;: \u0026#34;7c\u0026#34;\u0026#34;service.str.4\u0026#34;: \u0026#34;bookinfo\u0026#34;\u0026#34;service.byte.5\u0026#34;: \u0026#34;7c\u0026#34;\u0026#34;service.str.6\u0026#34;: \u0026#34;demo\u0026#34;\u0026#34;service.byte.7\u0026#34;: \u0026#34;7c\u0026#34;\u0026#34;service.byte.8\u0026#34;: \u0026#34;2d\u0026#34;target:type:Valuevalue:80","excerpt":"Custom metrics Adapter This adapter contains an implementation of external metrics API. It is …","ref":"/docs/skywalking-swck/latest/custom-metrics-adapter/","title":"Custom metrics Adapter"},{"body":"Custom metrics Adapter This adapter contains an implementation of external metrics API. It is therefore suitable for use with the autoscaling/v2 Horizontal Pod Autoscaler in Kubernetes 1.9+.\nUse kustomize to customise your deployment  Clone the source code:  git clone git@github.com:apache/skywalking-swck.git  Edit file adapter/config/adapter/kustomization.yaml file to change your preferences. If you prefer to your private docker image, a quick path to override ADAPTER_IMG environment variable : export ADAPTER_IMG=\u0026lt;private registry\u0026gt;/metrics-adapter:\u0026lt;tag\u0026gt;\n  Use make to generate the final manifests and deploy:\n  make -C adapter deploy Configuration The adapter takes the standard Kubernetes generic API server arguments (including those for authentication and authorization). By default, it will attempt to using Kubernetes in-cluster config to connect to the cluster.\nIt takes the following addition arguments specific to configuring how the adapter talks to SkyWalking OAP cluster:\n --oap-addr The address of OAP cluster. --metric-filter-regex A regular expression to filter metrics retrieved from OAP cluster. --refresh-interval This is the interval at which to update the cache of available metrics from OAP cluster. --namespace A prefix to which metrics are appended. The format is \u0026lsquo;namespace|metric_name\u0026rsquo;, defaults to skywalking.apache.org  HPA Configuration External metrics allow you to autoscale your cluster based on any metric available in OAP cluster. Just provide a metric block with a name and selector, and use the External metric type.\n- type:Externalexternal:metric:name:\u0026lt;metric_name\u0026gt;selector:matchLabels:\u0026lt;label_key\u0026gt;:\u0026lt;label_value\u0026gt;...target:.... metric_name: The name of metric generated by OAL or other subsystem. label: label_key is the entity name of skywalking metrics. if the label value contains special characters more than ., - and _, service.str.\u0026lt;number\u0026gt; represent the literal of label value, and service.byte.\u0026lt;number\u0026gt; could encode these special characters to hex bytes.  Supposing the service name is v1|productpage|bookinfo|demo, the matchLabels should be like the below piece:\nmatchLabels:\u0026#34;service.str.0\u0026#34;: \u0026#34;v1\u0026#34;\u0026#34;service.byte.1\u0026#34;: \u0026#34;7c\u0026#34;// the hex byte of \u0026#34;|\u0026#34;\u0026#34;service.str.2\u0026#34;: \u0026#34;productpage\u0026#34;\u0026#34;service.byte.3\u0026#34;: \u0026#34;7c\u0026#34;\u0026#34;service.str.4\u0026#34;: \u0026#34;bookinfo\u0026#34;\u0026#34;service.byte.5\u0026#34;: \u0026#34;7c\u0026#34;\u0026#34;service.str.6\u0026#34;: \u0026#34;demo\u0026#34; Caveats: byte label only accept a single character. That means || should be transformed to service.byte.0:\u0026quot;7c\u0026quot; and service.byte.1:\u0026quot;7c\u0026quot; instead of service.byte.0:\u0026quot;7c7c\u0026quot;\n The options of label keys are:\n service, service.str.\u0026lt;number\u0026gt; or service.byte.\u0026lt;number\u0026gt; The name of the service. instance, instance.str.\u0026lt;number\u0026gt; or instance.byte.\u0026lt;number\u0026gt; The name of the service instance. endpoint, endpoint.str.\u0026lt;number\u0026gt; or endpoint.byte.\u0026lt;number\u0026gt; The name of the endpoint. label, label.str.\u0026lt;number\u0026gt; or label.byte.\u0026lt;number\u0026gt; is optional, The labels you need to query, used for querying multi-labels metrics. Unlike swctl, this key only supports a single label due to the specification of the custom metrics API.  For example, if your application name is front_gateway, you could add the following section to your HorizontalPodAutoscaler manifest to specify that you need less than 80ms of 90th latency.\n- type:Externalexternal:metric:name:skywalking.apache.org|service_percentileselector:matchLabels:service:front_gateway# The index of [P50, P75, P90, P95, P99]. 2 is the index of P90(90%)label:\u0026#34;2\u0026#34;target:type:Valuevalue:80If the service is v1|productpage|bookinfo|demo|-:\n- type:Externalexternal:metric:name:skywalking.apache.org|service_cpmselector:matchLabels:\u0026#34;service.str.0\u0026#34;: \u0026#34;v1\u0026#34;\u0026#34;service.byte.1\u0026#34;: \u0026#34;7c\u0026#34;\u0026#34;service.str.2\u0026#34;: \u0026#34;productpage\u0026#34;\u0026#34;service.byte.3\u0026#34;: \u0026#34;7c\u0026#34;\u0026#34;service.str.4\u0026#34;: \u0026#34;bookinfo\u0026#34;\u0026#34;service.byte.5\u0026#34;: \u0026#34;7c\u0026#34;\u0026#34;service.str.6\u0026#34;: \u0026#34;demo\u0026#34;\u0026#34;service.byte.7\u0026#34;: \u0026#34;7c\u0026#34;\u0026#34;service.byte.8\u0026#34;: \u0026#34;2d\u0026#34;target:type:Valuevalue:80","excerpt":"Custom metrics Adapter This adapter contains an implementation of external metrics API. It is …","ref":"/docs/skywalking-swck/next/custom-metrics-adapter/","title":"Custom metrics Adapter"},{"body":"Custom metrics Adapter This adapter contains an implementation of external metrics API. It is therefore suitable for use with the autoscaling/v2 Horizontal Pod Autoscaler in Kubernetes 1.9+.\nUse kustomize to customise your deployment  Clone the source code:  git clone git@github.com:apache/skywalking-swck.git  Edit file adapter/config/adapter/kustomization.yaml file to change your preferences. If you prefer to your private docker image, a quick path to override ADAPTER_IMG environment variable : export ADAPTER_IMG=\u0026lt;private registry\u0026gt;/metrics-adapter:\u0026lt;tag\u0026gt;\n  Use make to generate the final manifests and deploy:\n  make -C adapter deploy Configuration The adapter takes the standard Kubernetes generic API server arguments (including those for authentication and authorization). By default, it will attempt to using Kubernetes in-cluster config to connect to the cluster.\nIt takes the following addition arguments specific to configuring how the adapter talks to SkyWalking OAP cluster:\n --oap-addr The address of OAP cluster. --metric-filter-regex A regular expression to filter metrics retrieved from OAP cluster. --refresh-interval This is the interval at which to update the cache of available metrics from OAP cluster. --namespace A prefix to which metrics are appended. The format is \u0026lsquo;namespace|metric_name\u0026rsquo;, defaults to skywalking.apache.org  HPA Configuration External metrics allow you to autoscale your cluster based on any metric available in OAP cluster. Just provide a metric block with a name and selector, and use the External metric type.\n- type:Externalexternal:metric:name:\u0026lt;metric_name\u0026gt;selector:matchLabels:\u0026lt;label_key\u0026gt;:\u0026lt;label_value\u0026gt;...target:.... metric_name: The name of metric generated by OAL or other subsystem. label: label_key is the entity name of skywalking metrics. if the label value contains special characters more than ., - and _, service.str.\u0026lt;number\u0026gt; represent the literal of label value, and service.byte.\u0026lt;number\u0026gt; could encode these special characters to hex bytes.  Supposing the service name is v1|productpage|bookinfo|demo, the matchLabels should be like the below piece:\nmatchLabels:\u0026#34;service.str.0\u0026#34;: \u0026#34;v1\u0026#34;\u0026#34;service.byte.1\u0026#34;: \u0026#34;7c\u0026#34;// the hex byte of \u0026#34;|\u0026#34;\u0026#34;service.str.2\u0026#34;: \u0026#34;productpage\u0026#34;\u0026#34;service.byte.3\u0026#34;: \u0026#34;7c\u0026#34;\u0026#34;service.str.4\u0026#34;: \u0026#34;bookinfo\u0026#34;\u0026#34;service.byte.5\u0026#34;: \u0026#34;7c\u0026#34;\u0026#34;service.str.6\u0026#34;: \u0026#34;demo\u0026#34; Caveats: byte label only accept a single character. That means || should be transformed to service.byte.0:\u0026quot;7c\u0026quot; and service.byte.1:\u0026quot;7c\u0026quot; instead of service.byte.0:\u0026quot;7c7c\u0026quot;\n The options of label keys are:\n service, service.str.\u0026lt;number\u0026gt; or service.byte.\u0026lt;number\u0026gt; The name of the service. instance, instance.str.\u0026lt;number\u0026gt; or instance.byte.\u0026lt;number\u0026gt; The name of the service instance. endpoint, endpoint.str.\u0026lt;number\u0026gt; or endpoint.byte.\u0026lt;number\u0026gt; The name of the endpoint. label, label.str.\u0026lt;number\u0026gt; or label.byte.\u0026lt;number\u0026gt; is optional, The labels you need to query, used for querying multi-labels metrics. Unlike swctl, this key only supports a single label due to the specification of the custom metrics API.  For example, if your application name is front_gateway, you could add the following section to your HorizontalPodAutoscaler manifest to specify that you need less than 80ms of 90th latency.\n- type:Externalexternal:metric:name:skywalking.apache.org|service_percentileselector:matchLabels:service:front_gateway# The index of [P50, P75, P90, P95, P99]. 2 is the index of P90(90%)label:\u0026#34;2\u0026#34;target:type:Valuevalue:80If the service is v1|productpage|bookinfo|demo|-:\n- type:Externalexternal:metric:name:skywalking.apache.org|service_cpmselector:matchLabels:\u0026#34;service.str.0\u0026#34;: \u0026#34;v1\u0026#34;\u0026#34;service.byte.1\u0026#34;: \u0026#34;7c\u0026#34;\u0026#34;service.str.2\u0026#34;: \u0026#34;productpage\u0026#34;\u0026#34;service.byte.3\u0026#34;: \u0026#34;7c\u0026#34;\u0026#34;service.str.4\u0026#34;: \u0026#34;bookinfo\u0026#34;\u0026#34;service.byte.5\u0026#34;: \u0026#34;7c\u0026#34;\u0026#34;service.str.6\u0026#34;: \u0026#34;demo\u0026#34;\u0026#34;service.byte.7\u0026#34;: \u0026#34;7c\u0026#34;\u0026#34;service.byte.8\u0026#34;: \u0026#34;2d\u0026#34;target:type:Valuevalue:80","excerpt":"Custom metrics Adapter This adapter contains an implementation of external metrics API. It is …","ref":"/docs/skywalking-swck/v0.9.0/custom-metrics-adapter/","title":"Custom metrics Adapter"},{"body":"Data Model This chapter introduces BanyanDB\u0026rsquo;s data models and covers the following:\n the high-level data organization data model data retrieval  You can also find examples of how to interact with BanyanDB using bydbctl, how to create and drop groups, or how to create, read, update and drop streams/measures.\nStructure of BanyanDB The hierarchy that data is organized into streams, measures and properties in groups.\nGroups Group does not provide a mechanism for isolating groups of resources within a single banyand-server but is the minimal unit to manage physical structures. Each group contains a set of options, like retention policy, shard number, etc. Several shards distribute in a group.\nmetadata:name:othersor\nmetadata:name:sw_metriccatalog:CATALOG_MEASUREresource_opts:shard_num:2block_interval:unit:UNIT_HOURnum:2segment_interval:unit:UNIT_DAYnum:1ttl:unit:UNIT_DAYnum:7The group creates two shards to store data points. Every day, it would create a segment that will generate a block every 2 hours. The available units are HOUR and DAY. The data in this group will keep 7 days.\nEvery other resource should belong to a group. The catalog indicates which kind of data model the group contains.\n UNSPECIFIED: Property or other data models. MEASURE: Measure. STREAM: Stream.  Group Registration Operations\nMeasures BanyanDB lets you define a measure as follows:\nmetadata:name:service_cpm_minutegroup:sw_metrictag_families:- name:defaulttags:- name:idtype:TAG_TYPE_STRING- name:entity_idtype:TAG_TYPE_STRINGfields:- name:totalfield_type:FIELD_TYPE_INTencoding_method:ENCODING_METHOD_GORILLAcompression_method:COMPRESSION_METHOD_ZSTD- name:valuefield_type:FIELD_TYPE_INTencoding_method:ENCODING_METHOD_GORILLAcompression_method:COMPRESSION_METHOD_ZSTDentity:tag_names:- entity_idinterval:1mMeasure consists of a sequence of data points. Each data point contains tags and fields.\nTags are key-value pairs. The database engine can index tag values by referring to the index rules and rule bindings, confining the query to filtering data points based on tags bound to an index rule.\nTags are grouped into unique tag_families which are the logical and physical grouping of tags.\nMeasure supports the following tag types:\n STRING : Text INT : 64 bits long integer STRING_ARRAY : A group of strings INT_ARRAY : A group of integers DATA_BINARY : Raw binary  A group of selected tags composite an entity that points out a specific time series the data point belongs to. The database engine has capacities to encode and compress values in the same time series. Users should select appropriate tag combinations to optimize the data size. Another role of entity is the sharding key of data points, determining how to fragment data between shards.\nFields are also key-value pairs like tags. But the value of each field is the actual value of a single data point. The database engine would encode and compress the field\u0026rsquo;s values in the same time series. The query operation is forbidden to filter data points based on a field\u0026rsquo;s value. You could apply aggregation functions to them.\nMeasure supports the following fields types:\n STRING : Text INT : 64 bits long integer DATA_BINARY : Raw binary FLOAT : 64 bits double-precision floating-point number  Measure supports the following encoding methods:\n GORILLA : GORILLA encoding is lossless. It is more suitable for a numerical sequence with similar values and is not recommended for sequence data with large fluctuations.  Measure supports the types of the following fields:\n ZSTD : Zstandard is a real-time compression algorithm, that provides high compression ratios. It offers a very wide range of compression/speed trade-offs, while being backed by a very fast decoder. For BanyanDB focus on speed.  Another option named interval plays a critical role in encoding. It indicates the time range between two adjacent data points in a time series and implies that all data points belonging to the same time series are distributed based on a fixed interval. A better practice for the naming measure is to append the interval literal to the tail, for example, service_cpm_minute. It\u0026rsquo;s a parameter of GORILLA encoding method.\nMeasure Registration Operations\nTopNAggregation Find the Top-N entities from a dataset in a time range is a common scenario. We could see the diagrams like \u0026ldquo;Top 10 throughput endpoints\u0026rdquo;, and \u0026ldquo;Most slow 20 endpoints\u0026rdquo;, etc on SkyWalking\u0026rsquo;s UI. Exploring and analyzing the top entities can always reveal some high-value information.\nBanyanDB introduces the TopNAggregation, aiming to pre-calculate the top/bottom entities during the measure writing phase. In the query phase, BanyanDB can quickly retrieve the top/bottom records. The performance would be much better than top() function which is based on the query phase aggregation procedure.\n Caveat: TopNAggregation is an approximate realization, to use it well you need have a good understanding with the algorithm as well as the data distribution.\n ---metadata:name:endpoint_cpm_minute_top_bottomgroup:sw_metricsource_measure:name:endpoint_cpm_minutegroup:sw_metricfield_name:valuefield_value_sort:SORT_UNSPECIFIEDgroup_by_tag_names:- entity_idcounters_number:10000lru_size:10endpoint_cpm_minute_top_bottom is watching the data ingesting of the source measure endpoint_cpm_minute to generate both top 1000 and bottom 1000 entity cardinalities. If only Top 1000 or Bottom 1000 is needed, the field_value_sort could be DESC or ASC respectively.\n SORT_DESC: Top-N. In a series of 1,2,3...1000. Top10\u0026rsquo;s result is 1000,999...991. SORT_ASC: Bottom-N. In a series of 1,2,3...1000. Bottom10\u0026rsquo;s result is 1,2...10.  Tags in group_by_tag_names are used as dimensions. These tags can be searched (only equality is supported) in the query phase. Tags do not exist in group_by_tag_names will be dropped in the pre-calculating phase.\ncounters_number denotes the number of entity cardinality. As the above example shows, calculating the Top 100 among 10 thousands is easier than among 10 millions.\nlru_size is a late data optimizing flag. The higher the number, the more late data, but the more memory space is consumed.\nTopNAggregation Registration Operations\nStreams Stream shares many details with Measure except for abandoning field. Stream focuses on high throughput data collection, for example, tracing and logging. The database engine also supports compressing stream entries based on entity, but no encoding process is involved.\nStream Registration Operations\nProperties Property is a schema-less or schema-free data model. That means you DO NOT have to define a schema before writing a Property\nProperty is a standard key-value store. Users could store their metadata or items on a property and get a sequential consistency guarantee. BanyanDB\u0026rsquo;s motivation for introducing such a particular structure is to support most APM scenarios that need to store critical data, especially for a distributed database cluster.\nWe should create a group before creating a property.\nCreating group.\nmetadata:name:swCreating property.\nmetadata:container:group:swname:temp_dataid:General-Servicetags:- key:namevalue:str:value:\u0026#34;hello\u0026#34;- key:statevalue:str:value:\u0026#34;succeed\u0026#34;Property supports a three-level hierarchy, group/name/id, that is more flexible than schemaful data models.\nThe property supports the TTL mechanism. You could set the ttl field to specify the time to live.\nmetadata:container:group:swname:temp_dataid:General-Servicetags:- key:namevalue:str:value:\u0026#34;hello\u0026#34;- key:statevalue:str:value:\u0026#34;succeed\u0026#34;ttl:\u0026#34;1h\u0026#34;\u0026ldquo;General-Service\u0026rdquo; will be dropped after 1 hour. If you want to extend the TTL, you could use the \u0026ldquo;keepalive\u0026rdquo; operation. The \u0026ldquo;lease_id\u0026rdquo; is returned in the apply response. You can use get operation to get the property with the lease_id as well.\nlease_id:1\u0026ldquo;General-Service\u0026rdquo; lives another 1 hour.\nYou could Create, Read, Update and Drop a property, and update or drop several tags instead of the entire property.\nProperty Operations\nData Models Data models in BanyanDB derive from some classic data models.\nTimeSeries Model A time series is a series of data points indexed in time order. Most commonly, a time series is a sequence taken at successive equally spaced points in time. Thus it is a sequence of discrete-time data.\nYou can store time series data points through Stream or Measure. Examples of Stream are logs, traces and events. Measure could ingest metrics, profiles, etc.\nKey-Value Model The key-value data model is a subset of the Property data model. Every property has a key \u0026lt;group\u0026gt;/\u0026lt;name\u0026gt;/\u0026lt;id\u0026gt; that identifies a property within a collection. This key acts as the primary key to retrieve the data. You can set it when creating a key. It cannot be changed later because the attribute is immutable.\nThere are several Key-Value pairs in a property, named Tags. You could add, update and drop them based on the tag\u0026rsquo;s key.\nData Retrieval Queries and Writes are used to filter schemaful data models, Stream, Measure or TopNAggregation based on certain criteria, as well as to compute or store new data.\n MeasureService provides Write, Query and TopN StreamService provides Write, Query  IndexRule \u0026amp; IndexRuleBinding An IndexRule indicates which tags are indexed. An IndexRuleBinding binds an index rule to the target resources or the subject. There might be several rule bindings to a single resource, but their effective time range could NOT overlap.\nmetadata:name:trace_idgroup:sw_streamtags:- trace_idtype:TYPE_TREElocation:LOCATION_GLOBALIndexRule supports selecting two distinct kinds of index structures. The INVERTED index is the primary option when users set up an index rule. It\u0026rsquo;s suitable for most tag indexing due to a better memory usage ratio and query performance. When there are many unique tag values here, such as the ID tag and numeric duration tag, the TREE index could be better. This index saves much memory space with high-cardinality data sets.\nMost IndexRule\u0026rsquo;s location is LOCAL which places indices with their indexed data together. IndexRule also provides a GLOBAL location to place some indices on a higher layer of hierarchical structure. This option intends to optimize the full-scan operation for some querying cases of no time range specification, such as finding spans from a trace by trace_id.\nmetadata:name:stream_bindinggroup:sw_streamrules:- trace_id- duration- endpoint_id- status_code- http.method- db.instance- db.type- mq.broker- mq.queue- mq.topic- extended_tagssubject:catalog:CATALOG_STREAMname:swbegin_at:\u0026#39;2021-04-15T01:30:15.01Z\u0026#39;expire_at:\u0026#39;2121-04-15T01:30:15.01Z\u0026#39;IndexRuleBinding binds IndexRules to a subject, Stream or Measure. The time range between begin_at and expire_at is the effective time.\nIndexRule Registration Operations\nIndexRuleBinding Registration Operations\nIndex Granularity In BanyanDB, Stream and Measure have different levels of index granularity.\nFor Measure, the indexed target is a data point with specific tag values. The query processor uses the tag values defined in the entity field of the Measure to compose a series ID, which is used to find the several series that match the query criteria. The entity field is a set of tags that defines the unique identity of a time series, and it restricts the tags that can be used as indexed target.\nEach series contains a sequence of data points that share the same tag values. Once the query processor has identified the relevant series, it scans the data points between the desired time range in those series to find the data that matches the query criteria.\nFor example, suppose we have a Measure with the following entity field: {service, operation, instance}. If we get a data point with the following tag values: service=shopping, operation=search, and instance=prod-1, then the query processor would use those tag values to construct a series ID that uniquely identifies the series containing that data point. The query processor would then scan the relevant data points in that series to find the data that matches the query criteria.\nThe side effect of the measure index is that each indexed value has to represent a unique seriesID. This is because the series ID is constructed by concatenating the indexed tag values in the entity field. If two series have the same entity field, they would have the same series ID and would be indistinguishable from one another. This means that if you want to index a tag that is not part of the entity field, you would need to ensure that it is unique across all series. One way to do this would be to include the tag in the entity field, but this may not always be feasible or desirable depending on your use case.\nFor Stream, the indexed target is an element that is a combination of the series ID and timestamp. The Stream query processor uses the time range to find target files. The indexed result points to the target element. The processor doesn\u0026rsquo;t have to scan a series of elements in this time range, which reduces the query time.\nFor example, suppose we have a Stream with the following tags: service, operation, instance, and status_code. If we get a data point with the following tag values: service=shopping, operation=search, instance=prod-1, and status_code=200, and the data point\u0026rsquo;s time is 1:00pm on January 1st, 2022, then the series ID for this data point would be shopping_search_prod-1_200_1641052800, where 1641052800 is the Unix timestamp representing 1:00pm on January 1st, 2022.\nThe indexed target would be the combination of the series ID and timestamp, which in this case would be shopping_search_prod-1_200_1641052800. The Stream query processor would use the time range specified in the query to find target files and then search within those files for the indexed target.\nThe following is a comparison of the indexing granularity, performance, and flexibility of Stream and Measure indices:\n   Indexing Granularity Performance Flexibility     Measure indices are constructed for each series and are based on the entity field of the Measure. Each indexed value has to represent a unique seriesID. Measure index is faster than Stream index. Measure index is less flexible and requires more care when indexing tags that are not part of the entity field.   Stream indices are constructed for each element and are based on the series ID and timestamp. Stream index is slower than Measure index. Stream index is more flexible than Measure index and can index any tag value.    In general, Measure indices are faster and more efficient, but they require more care when indexing tags that are not part of the entity field. Stream indices, on the other hand, are slower and take up more space, but they can index any tag value and do not have the same side effects as Measure indices.\n","excerpt":"Data Model This chapter introduces BanyanDB\u0026rsquo;s data models and covers the following:\n the …","ref":"/docs/skywalking-banyandb/latest/concept/data-model/","title":"Data Model"},{"body":"Data Model This chapter introduces BanyanDB\u0026rsquo;s data models and covers the following:\n the high-level data organization data model data retrieval  You can also find examples of how to interact with BanyanDB using bydbctl, how to create and drop groups, or how to create, read, update and drop streams/measures.\nStructure of BanyanDB The hierarchy that data is organized into streams, measures and properties in groups.\nGroups Group does not provide a mechanism for isolating groups of resources within a single banyand-server but is the minimal unit to manage physical structures. Each group contains a set of options, like retention policy, shard number, etc. Several shards distribute in a group.\nmetadata:name:othersor\nmetadata:name:sw_metriccatalog:CATALOG_MEASUREresource_opts:shard_num:2segment_interval:unit:UNIT_DAYnum:1ttl:unit:UNIT_DAYnum:7The group creates two shards to store data points. Every day, it would create a segment that will generate a block every 2 hours. The available units are HOUR and DAY. The data in this group will keep 7 days.\nEvery other resource should belong to a group. The catalog indicates which kind of data model the group contains.\n UNSPECIFIED: Property or other data models. MEASURE: Measure. STREAM: Stream.  Group Registration Operations\nMeasures BanyanDB lets you define a measure as follows:\nmetadata:name:service_cpm_minutegroup:sw_metrictag_families:- name:defaulttags:- name:idtype:TAG_TYPE_STRING- name:entity_idtype:TAG_TYPE_STRINGfields:- name:totalfield_type:FIELD_TYPE_INTencoding_method:ENCODING_METHOD_GORILLAcompression_method:COMPRESSION_METHOD_ZSTD- name:valuefield_type:FIELD_TYPE_INTencoding_method:ENCODING_METHOD_GORILLAcompression_method:COMPRESSION_METHOD_ZSTDentity:tag_names:- entity_idinterval:1mMeasure consists of a sequence of data points. Each data point contains tags and fields.\nTags are key-value pairs. The database engine can index tag values by referring to the index rules and rule bindings, confining the query to filtering data points based on tags bound to an index rule.\nTags are grouped into unique tag_families which are the logical and physical grouping of tags.\nMeasure supports the following tag types:\n STRING : Text INT : 64 bits long integer STRING_ARRAY : A group of strings INT_ARRAY : A group of integers DATA_BINARY : Raw binary  A group of selected tags composite an entity that points out a specific time series the data point belongs to. The database engine has capacities to encode and compress values in the same time series. Users should select appropriate tag combinations to optimize the data size. Another role of entity is the sharding key of data points, determining how to fragment data between shards.\nFields are also key-value pairs like tags. But the value of each field is the actual value of a single data point. The database engine would encode and compress the field\u0026rsquo;s values in the same time series. The query operation is forbidden to filter data points based on a field\u0026rsquo;s value. You could apply aggregation functions to them.\nMeasure supports the following fields types:\n STRING : Text INT : 64 bits long integer DATA_BINARY : Raw binary FLOAT : 64 bits double-precision floating-point number  Measure supports the following encoding methods:\n GORILLA : GORILLA encoding is lossless. It is more suitable for a numerical sequence with similar values and is not recommended for sequence data with large fluctuations.  Measure supports the types of the following fields:\n ZSTD : Zstandard is a real-time compression algorithm, that provides high compression ratios. It offers a very wide range of compression/speed trade-offs, while being backed by a very fast decoder. For BanyanDB focus on speed.  Another option named interval plays a critical role in encoding. It indicates the time range between two adjacent data points in a time series and implies that all data points belonging to the same time series are distributed based on a fixed interval. A better practice for the naming measure is to append the interval literal to the tail, for example, service_cpm_minute. It\u0026rsquo;s a parameter of GORILLA encoding method.\nMeasure Registration Operations\nTopNAggregation Find the Top-N entities from a dataset in a time range is a common scenario. We could see the diagrams like \u0026ldquo;Top 10 throughput endpoints\u0026rdquo;, and \u0026ldquo;Most slow 20 endpoints\u0026rdquo;, etc on SkyWalking\u0026rsquo;s UI. Exploring and analyzing the top entities can always reveal some high-value information.\nBanyanDB introduces the TopNAggregation, aiming to pre-calculate the top/bottom entities during the measure writing phase. In the query phase, BanyanDB can quickly retrieve the top/bottom records. The performance would be much better than top() function which is based on the query phase aggregation procedure.\n Caveat: TopNAggregation is an approximate realization, to use it well you need have a good understanding with the algorithm as well as the data distribution.\n ---metadata:name:endpoint_cpm_minute_top_bottomgroup:sw_metricsource_measure:name:endpoint_cpm_minutegroup:sw_metricfield_name:valuefield_value_sort:SORT_UNSPECIFIEDgroup_by_tag_names:- entity_idcounters_number:10000lru_size:10endpoint_cpm_minute_top_bottom is watching the data ingesting of the source measure endpoint_cpm_minute to generate both top 1000 and bottom 1000 entity cardinalities. If only Top 1000 or Bottom 1000 is needed, the field_value_sort could be DESC or ASC respectively.\n SORT_DESC: Top-N. In a series of 1,2,3...1000. Top10\u0026rsquo;s result is 1000,999...991. SORT_ASC: Bottom-N. In a series of 1,2,3...1000. Bottom10\u0026rsquo;s result is 1,2...10.  Tags in group_by_tag_names are used as dimensions. These tags can be searched (only equality is supported) in the query phase. Tags do not exist in group_by_tag_names will be dropped in the pre-calculating phase.\ncounters_number denotes the number of entity cardinality. As the above example shows, calculating the Top 100 among 10 thousands is easier than among 10 millions.\nlru_size is a late data optimizing flag. The higher the number, the more late data, but the more memory space is consumed.\nTopNAggregation Registration Operations\nStreams Stream shares many details with Measure except for abandoning field. Stream focuses on high throughput data collection, for example, tracing and logging. The database engine also supports compressing stream entries based on entity, but no encoding process is involved.\nStream Registration Operations\nProperties Property is a schema-less or schema-free data model. That means you DO NOT have to define a schema before writing a Property\nProperty is a standard key-value store. Users could store their metadata or items on a property and get a sequential consistency guarantee. BanyanDB\u0026rsquo;s motivation for introducing such a particular structure is to support most APM scenarios that need to store critical data, especially for a distributed database cluster.\nWe should create a group before creating a property.\nCreating group.\nmetadata:name:swCreating property.\nmetadata:container:group:swname:temp_dataid:General-Servicetags:- key:namevalue:str:value:\u0026#34;hello\u0026#34;- key:statevalue:str:value:\u0026#34;succeed\u0026#34;Property supports a three-level hierarchy, group/name/id, that is more flexible than schemaful data models.\nThe property supports the TTL mechanism. You could set the ttl field to specify the time to live.\nmetadata:container:group:swname:temp_dataid:General-Servicetags:- key:namevalue:str:value:\u0026#34;hello\u0026#34;- key:statevalue:str:value:\u0026#34;succeed\u0026#34;ttl:\u0026#34;1h\u0026#34;\u0026ldquo;General-Service\u0026rdquo; will be dropped after 1 hour. If you want to extend the TTL, you could use the \u0026ldquo;keepalive\u0026rdquo; operation. The \u0026ldquo;lease_id\u0026rdquo; is returned in the apply response. You can use get operation to get the property with the lease_id as well.\nlease_id:1\u0026ldquo;General-Service\u0026rdquo; lives another 1 hour.\nYou could Create, Read, Update and Drop a property, and update or drop several tags instead of the entire property.\nProperty Operations\nData Models Data models in BanyanDB derive from some classic data models.\nTimeSeries Model A time series is a series of data points indexed in time order. Most commonly, a time series is a sequence taken at successive equally spaced points in time. Thus it is a sequence of discrete-time data.\nYou can store time series data points through Stream or Measure. Examples of Stream are logs, traces and events. Measure could ingest metrics, profiles, etc.\nKey-Value Model The key-value data model is a subset of the Property data model. Every property has a key \u0026lt;group\u0026gt;/\u0026lt;name\u0026gt;/\u0026lt;id\u0026gt; that identifies a property within a collection. This key acts as the primary key to retrieve the data. You can set it when creating a key. It cannot be changed later because the attribute is immutable.\nThere are several Key-Value pairs in a property, named Tags. You could add, update and drop them based on the tag\u0026rsquo;s key.\nData Retrieval Queries and Writes are used to filter schemaful data models, Stream, Measure or TopNAggregation based on certain criteria, as well as to compute or store new data.\n MeasureService provides Write, Query and TopN StreamService provides Write, Query  IndexRule \u0026amp; IndexRuleBinding An IndexRule indicates which tags are indexed. An IndexRuleBinding binds an index rule to the target resources or the subject. There might be several rule bindings to a single resource, but their effective time range could NOT overlap.\nmetadata:name:trace_idgroup:sw_streamtags:- trace_idtype:TYPE_INVERTEDIndexRule supports selecting two distinct kinds of index structures. The INVERTED index is the primary option when users set up an index rule. It\u0026rsquo;s suitable for most tag indexing due to a better memory usage ratio and query performance.\nmetadata:name:stream_bindinggroup:sw_streamrules:- trace_id- duration- endpoint_id- status_code- http.method- db.instance- db.type- mq.broker- mq.queue- mq.topic- extended_tagssubject:catalog:CATALOG_STREAMname:swbegin_at:\u0026#39;2021-04-15T01:30:15.01Z\u0026#39;expire_at:\u0026#39;2121-04-15T01:30:15.01Z\u0026#39;IndexRuleBinding binds IndexRules to a subject, Stream or Measure. The time range between begin_at and expire_at is the effective time.\nIndexRule Registration Operations\nIndexRuleBinding Registration Operations\nIndex Granularity In BanyanDB, Stream and Measure have different levels of index granularity.\nFor Measure, the indexed target is a data point with specific tag values. The query processor uses the tag values defined in the entity field of the Measure to compose a series ID, which is used to find the several series that match the query criteria. The entity field is a set of tags that defines the unique identity of a time series, and it restricts the tags that can be used as indexed target.\nEach series contains a sequence of data points that share the same tag values. Once the query processor has identified the relevant series, it scans the data points between the desired time range in those series to find the data that matches the query criteria.\nFor example, suppose we have a Measure with the following entity field: {service, operation, instance}. If we get a data point with the following tag values: service=shopping, operation=search, and instance=prod-1, then the query processor would use those tag values to construct a series ID that uniquely identifies the series containing that data point. The query processor would then scan the relevant data points in that series to find the data that matches the query criteria.\nThe side effect of the measure index is that each indexed value has to represent a unique seriesID. This is because the series ID is constructed by concatenating the indexed tag values in the entity field. If two series have the same entity field, they would have the same series ID and would be indistinguishable from one another. This means that if you want to index a tag that is not part of the entity field, you would need to ensure that it is unique across all series. One way to do this would be to include the tag in the entity field, but this may not always be feasible or desirable depending on your use case.\nFor Stream, the indexed target is an element that is a combination of the series ID and timestamp. The Stream query processor uses the time range to find target files. The indexed result points to the target element. The processor doesn\u0026rsquo;t have to scan a series of elements in this time range, which reduces the query time.\nFor example, suppose we have a Stream with the following tags: service, operation, instance, and status_code. If we get a data point with the following tag values: service=shopping, operation=search, instance=prod-1, and status_code=200, and the data point\u0026rsquo;s time is 1:00pm on January 1st, 2022, then the series ID for this data point would be shopping_search_prod-1_200_1641052800, where 1641052800 is the Unix timestamp representing 1:00pm on January 1st, 2022.\nThe indexed target would be the combination of the series ID and timestamp, which in this case would be shopping_search_prod-1_200_1641052800. The Stream query processor would use the time range specified in the query to find target files and then search within those files for the indexed target.\nThe following is a comparison of the indexing granularity, performance, and flexibility of Stream and Measure indices:\n   Indexing Granularity Performance Flexibility     Measure indices are constructed for each series and are based on the entity field of the Measure. Each indexed value has to represent a unique seriesID. Measure index is faster than Stream index. Measure index is less flexible and requires more care when indexing tags that are not part of the entity field.   Stream indices are constructed for each element and are based on the series ID and timestamp. Stream index is slower than Measure index. Stream index is more flexible than Measure index and can index any tag value.    In general, Measure indices are faster and more efficient, but they require more care when indexing tags that are not part of the entity field. Stream indices, on the other hand, are slower and take up more space, but they can index any tag value and do not have the same side effects as Measure indices.\n","excerpt":"Data Model This chapter introduces BanyanDB\u0026rsquo;s data models and covers the following:\n the …","ref":"/docs/skywalking-banyandb/next/concept/data-model/","title":"Data Model"},{"body":"Data Model This chapter introduces BanyanDB\u0026rsquo;s data models and covers the following:\n the high-level data organization data model data retrieval  You can also find examples of how to interact with BanyanDB using bydbctl, how to create and drop groups, or how to create, read, update and drop streams/measures.\nStructure of BanyanDB The hierarchy that data is organized into streams, measures and properties in groups.\nGroups Group does not provide a mechanism for isolating groups of resources within a single banyand-server but is the minimal unit to manage physical structures. Each group contains a set of options, like retention policy, shard number, etc. Several shards distribute in a group.\nmetadata:name:othersor\nmetadata:name:sw_metriccatalog:CATALOG_MEASUREresource_opts:shard_num:2block_interval:unit:UNIT_HOURnum:2segment_interval:unit:UNIT_DAYnum:1ttl:unit:UNIT_DAYnum:7The group creates two shards to store data points. Every day, it would create a segment that will generate a block every 2 hours. The available units are HOUR and DAY. The data in this group will keep 7 days.\nEvery other resource should belong to a group. The catalog indicates which kind of data model the group contains.\n UNSPECIFIED: Property or other data models. MEASURE: Measure. STREAM: Stream.  Group Registration Operations\nMeasures BanyanDB lets you define a measure as follows:\nmetadata:name:service_cpm_minutegroup:sw_metrictag_families:- name:defaulttags:- name:idtype:TAG_TYPE_STRING- name:entity_idtype:TAG_TYPE_STRINGfields:- name:totalfield_type:FIELD_TYPE_INTencoding_method:ENCODING_METHOD_GORILLAcompression_method:COMPRESSION_METHOD_ZSTD- name:valuefield_type:FIELD_TYPE_INTencoding_method:ENCODING_METHOD_GORILLAcompression_method:COMPRESSION_METHOD_ZSTDentity:tag_names:- entity_idinterval:1mMeasure consists of a sequence of data points. Each data point contains tags and fields.\nTags are key-value pairs. The database engine can index tag values by referring to the index rules and rule bindings, confining the query to filtering data points based on tags bound to an index rule.\nTags are grouped into unique tag_families which are the logical and physical grouping of tags.\nMeasure supports the following tag types:\n STRING : Text INT : 64 bits long integer STRING_ARRAY : A group of strings INT_ARRAY : A group of integers DATA_BINARY : Raw binary  A group of selected tags composite an entity that points out a specific time series the data point belongs to. The database engine has capacities to encode and compress values in the same time series. Users should select appropriate tag combinations to optimize the data size. Another role of entity is the sharding key of data points, determining how to fragment data between shards.\nFields are also key-value pairs like tags. But the value of each field is the actual value of a single data point. The database engine would encode and compress the field\u0026rsquo;s values in the same time series. The query operation is forbidden to filter data points based on a field\u0026rsquo;s value. You could apply aggregation functions to them.\nMeasure supports the following fields types:\n STRING : Text INT : 64 bits long integer DATA_BINARY : Raw binary FLOAT : 64 bits double-precision floating-point number  Measure supports the following encoding methods:\n GORILLA : GORILLA encoding is lossless. It is more suitable for a numerical sequence with similar values and is not recommended for sequence data with large fluctuations.  Measure supports the types of the following fields:\n ZSTD : Zstandard is a real-time compression algorithm, that provides high compression ratios. It offers a very wide range of compression/speed trade-offs, while being backed by a very fast decoder. For BanyanDB focus on speed.  Another option named interval plays a critical role in encoding. It indicates the time range between two adjacent data points in a time series and implies that all data points belonging to the same time series are distributed based on a fixed interval. A better practice for the naming measure is to append the interval literal to the tail, for example, service_cpm_minute. It\u0026rsquo;s a parameter of GORILLA encoding method.\nMeasure Registration Operations\nTopNAggregation Find the Top-N entities from a dataset in a time range is a common scenario. We could see the diagrams like \u0026ldquo;Top 10 throughput endpoints\u0026rdquo;, and \u0026ldquo;Most slow 20 endpoints\u0026rdquo;, etc on SkyWalking\u0026rsquo;s UI. Exploring and analyzing the top entities can always reveal some high-value information.\nBanyanDB introduces the TopNAggregation, aiming to pre-calculate the top/bottom entities during the measure writing phase. In the query phase, BanyanDB can quickly retrieve the top/bottom records. The performance would be much better than top() function which is based on the query phase aggregation procedure.\n Caveat: TopNAggregation is an approximate realization, to use it well you need have a good understanding with the algorithm as well as the data distribution.\n ---metadata:name:endpoint_cpm_minute_top_bottomgroup:sw_metricsource_measure:name:endpoint_cpm_minutegroup:sw_metricfield_name:valuefield_value_sort:SORT_UNSPECIFIEDgroup_by_tag_names:- entity_idcounters_number:10000lru_size:10endpoint_cpm_minute_top_bottom is watching the data ingesting of the source measure endpoint_cpm_minute to generate both top 1000 and bottom 1000 entity cardinalities. If only Top 1000 or Bottom 1000 is needed, the field_value_sort could be DESC or ASC respectively.\n SORT_DESC: Top-N. In a series of 1,2,3...1000. Top10\u0026rsquo;s result is 1000,999...991. SORT_ASC: Bottom-N. In a series of 1,2,3...1000. Bottom10\u0026rsquo;s result is 1,2...10.  Tags in group_by_tag_names are used as dimensions. These tags can be searched (only equality is supported) in the query phase. Tags do not exist in group_by_tag_names will be dropped in the pre-calculating phase.\ncounters_number denotes the number of entity cardinality. As the above example shows, calculating the Top 100 among 10 thousands is easier than among 10 millions.\nlru_size is a late data optimizing flag. The higher the number, the more late data, but the more memory space is consumed.\nTopNAggregation Registration Operations\nStreams Stream shares many details with Measure except for abandoning field. Stream focuses on high throughput data collection, for example, tracing and logging. The database engine also supports compressing stream entries based on entity, but no encoding process is involved.\nStream Registration Operations\nProperties Property is a schema-less or schema-free data model. That means you DO NOT have to define a schema before writing a Property\nProperty is a standard key-value store. Users could store their metadata or items on a property and get a sequential consistency guarantee. BanyanDB\u0026rsquo;s motivation for introducing such a particular structure is to support most APM scenarios that need to store critical data, especially for a distributed database cluster.\nWe should create a group before creating a property.\nCreating group.\nmetadata:name:swCreating property.\nmetadata:container:group:swname:temp_dataid:General-Servicetags:- key:namevalue:str:value:\u0026#34;hello\u0026#34;- key:statevalue:str:value:\u0026#34;succeed\u0026#34;Property supports a three-level hierarchy, group/name/id, that is more flexible than schemaful data models.\nThe property supports the TTL mechanism. You could set the ttl field to specify the time to live.\nmetadata:container:group:swname:temp_dataid:General-Servicetags:- key:namevalue:str:value:\u0026#34;hello\u0026#34;- key:statevalue:str:value:\u0026#34;succeed\u0026#34;ttl:\u0026#34;1h\u0026#34;\u0026ldquo;General-Service\u0026rdquo; will be dropped after 1 hour. If you want to extend the TTL, you could use the \u0026ldquo;keepalive\u0026rdquo; operation. The \u0026ldquo;lease_id\u0026rdquo; is returned in the apply response. You can use get operation to get the property with the lease_id as well.\nlease_id:1\u0026ldquo;General-Service\u0026rdquo; lives another 1 hour.\nYou could Create, Read, Update and Drop a property, and update or drop several tags instead of the entire property.\nProperty Operations\nData Models Data models in BanyanDB derive from some classic data models.\nTimeSeries Model A time series is a series of data points indexed in time order. Most commonly, a time series is a sequence taken at successive equally spaced points in time. Thus it is a sequence of discrete-time data.\nYou can store time series data points through Stream or Measure. Examples of Stream are logs, traces and events. Measure could ingest metrics, profiles, etc.\nKey-Value Model The key-value data model is a subset of the Property data model. Every property has a key \u0026lt;group\u0026gt;/\u0026lt;name\u0026gt;/\u0026lt;id\u0026gt; that identifies a property within a collection. This key acts as the primary key to retrieve the data. You can set it when creating a key. It cannot be changed later because the attribute is immutable.\nThere are several Key-Value pairs in a property, named Tags. You could add, update and drop them based on the tag\u0026rsquo;s key.\nData Retrieval Queries and Writes are used to filter schemaful data models, Stream, Measure or TopNAggregation based on certain criteria, as well as to compute or store new data.\n MeasureService provides Write, Query and TopN StreamService provides Write, Query  IndexRule \u0026amp; IndexRuleBinding An IndexRule indicates which tags are indexed. An IndexRuleBinding binds an index rule to the target resources or the subject. There might be several rule bindings to a single resource, but their effective time range could NOT overlap.\nmetadata:name:trace_idgroup:sw_streamtags:- trace_idtype:TYPE_TREElocation:LOCATION_GLOBALIndexRule supports selecting two distinct kinds of index structures. The INVERTED index is the primary option when users set up an index rule. It\u0026rsquo;s suitable for most tag indexing due to a better memory usage ratio and query performance. When there are many unique tag values here, such as the ID tag and numeric duration tag, the TREE index could be better. This index saves much memory space with high-cardinality data sets.\nMost IndexRule\u0026rsquo;s location is LOCAL which places indices with their indexed data together. IndexRule also provides a GLOBAL location to place some indices on a higher layer of hierarchical structure. This option intends to optimize the full-scan operation for some querying cases of no time range specification, such as finding spans from a trace by trace_id.\nmetadata:name:stream_bindinggroup:sw_streamrules:- trace_id- duration- endpoint_id- status_code- http.method- db.instance- db.type- mq.broker- mq.queue- mq.topic- extended_tagssubject:catalog:CATALOG_STREAMname:swbegin_at:\u0026#39;2021-04-15T01:30:15.01Z\u0026#39;expire_at:\u0026#39;2121-04-15T01:30:15.01Z\u0026#39;IndexRuleBinding binds IndexRules to a subject, Stream or Measure. The time range between begin_at and expire_at is the effective time.\nIndexRule Registration Operations\nIndexRuleBinding Registration Operations\nIndex Granularity In BanyanDB, Stream and Measure have different levels of index granularity.\nFor Measure, the indexed target is a data point with specific tag values. The query processor uses the tag values defined in the entity field of the Measure to compose a series ID, which is used to find the several series that match the query criteria. The entity field is a set of tags that defines the unique identity of a time series, and it restricts the tags that can be used as indexed target.\nEach series contains a sequence of data points that share the same tag values. Once the query processor has identified the relevant series, it scans the data points between the desired time range in those series to find the data that matches the query criteria.\nFor example, suppose we have a Measure with the following entity field: {service, operation, instance}. If we get a data point with the following tag values: service=shopping, operation=search, and instance=prod-1, then the query processor would use those tag values to construct a series ID that uniquely identifies the series containing that data point. The query processor would then scan the relevant data points in that series to find the data that matches the query criteria.\nThe side effect of the measure index is that each indexed value has to represent a unique seriesID. This is because the series ID is constructed by concatenating the indexed tag values in the entity field. If two series have the same entity field, they would have the same series ID and would be indistinguishable from one another. This means that if you want to index a tag that is not part of the entity field, you would need to ensure that it is unique across all series. One way to do this would be to include the tag in the entity field, but this may not always be feasible or desirable depending on your use case.\nFor Stream, the indexed target is an element that is a combination of the series ID and timestamp. The Stream query processor uses the time range to find target files. The indexed result points to the target element. The processor doesn\u0026rsquo;t have to scan a series of elements in this time range, which reduces the query time.\nFor example, suppose we have a Stream with the following tags: service, operation, instance, and status_code. If we get a data point with the following tag values: service=shopping, operation=search, instance=prod-1, and status_code=200, and the data point\u0026rsquo;s time is 1:00pm on January 1st, 2022, then the series ID for this data point would be shopping_search_prod-1_200_1641052800, where 1641052800 is the Unix timestamp representing 1:00pm on January 1st, 2022.\nThe indexed target would be the combination of the series ID and timestamp, which in this case would be shopping_search_prod-1_200_1641052800. The Stream query processor would use the time range specified in the query to find target files and then search within those files for the indexed target.\nThe following is a comparison of the indexing granularity, performance, and flexibility of Stream and Measure indices:\n   Indexing Granularity Performance Flexibility     Measure indices are constructed for each series and are based on the entity field of the Measure. Each indexed value has to represent a unique seriesID. Measure index is faster than Stream index. Measure index is less flexible and requires more care when indexing tags that are not part of the entity field.   Stream indices are constructed for each element and are based on the series ID and timestamp. Stream index is slower than Measure index. Stream index is more flexible than Measure index and can index any tag value.    In general, Measure indices are faster and more efficient, but they require more care when indexing tags that are not part of the entity field. Stream indices, on the other hand, are slower and take up more space, but they can index any tag value and do not have the same side effects as Measure indices.\n","excerpt":"Data Model This chapter introduces BanyanDB\u0026rsquo;s data models and covers the following:\n the …","ref":"/docs/skywalking-banyandb/v0.5.0/concept/data-model/","title":"Data Model"},{"body":"Define Service Hierarchy SkyWalking v10 introduces a new concept Service Hierarchy which defines the relationships of existing logically same services in various layers. The concept and design could be found here.\nService Hierarchy Configuration All the relationships defined in the config/hierarchy-definition.yml file. You can customize it according to your own needs. Here is an example:\nhierarchy:MESH:MESH_DP:nameK8S_SERVICE:short-nameMESH_DP:K8S_SERVICE:short-nameGENERAL:K8S_SERVICE:lower-short-name-remove-nsMYSQL:K8S_SERVICE:short-namePOSTGRESQL:K8S_SERVICE:short-nameSO11Y_OAP:K8S_SERVICE:short-nameVIRTUAL_DATABASE:MYSQL:lower-short-name-with-fqdnPOSTGRESQL:lower-short-name-with-fqdnauto-matching-rules:# the name of the upper service is equal to the name of the lower servicename:\u0026#34;{ (u, l) -\u0026gt; u.name == l.name }\u0026#34;# the short name of the upper service is equal to the short name of the lower serviceshort-name:\u0026#34;{ (u, l) -\u0026gt; u.shortName == l.shortName }\u0026#34;# remove the k8s namespace from the lower service short name# this rule is only works on k8s env.lower-short-name-remove-ns:\u0026#34;{ (u, l) -\u0026gt; { if(l.shortName.lastIndexOf(\u0026#39;.\u0026#39;) \u0026gt; 0) return u.shortName == l.shortName.substring(0, l.shortName.lastIndexOf(\u0026#39;.\u0026#39;)); return false; } }\u0026#34;# the short name of the upper remove port is equal to the short name of the lower service with fqdn suffix# this rule is only works on k8s env.lower-short-name-with-fqdn:\u0026#34;{ (u, l) -\u0026gt; { if(u.shortName.lastIndexOf(\u0026#39;:\u0026#39;) \u0026gt; 0) return u.shortName.substring(0, u.shortName.lastIndexOf(\u0026#39;:\u0026#39;)) == l.shortName.concat(\u0026#39;.svc.cluster.local\u0026#39;); return false; } }\u0026#34;layer-levels:# The hierarchy level of the service layer, the level is used to define the order of the service layer for UI presentation.# The level of the upper service should greater than the level of the lower service in `hierarchy` section.MESH:3GENERAL:3SO11Y_OAP:3VIRTUAL_DATABASE:3MYSQL:2POSTGRESQL:2MESH_DP:1K8S_SERVICE:0Hierarchy  The hierarchy of service layers are defined in the hierarchy section. The layers under the specific layer are related lower of the layer. The relation could have a matching rule for auto matching, which are defined in the auto-matching-rules section. The relation without a matching rule should be built through the internal API. All the layers are defined in the file org.apache.skywalking.oap.server.core.analysis.Layers.java. If the hierarchy is not defined, the service hierarchy relationship will not be built. If you want to add a new relationship, you should certainly know they can be matched automatically by Auto Matching Rules. Notice: some hierarchy relations and auto matching rules are only works on k8s env.  Auto Matching Rules  The auto matching rules are defined in the auto-matching-rules section. Use Groovy script to define the matching rules, the input parameters are the upper service(u) and the lower service(l) and the return value is a boolean, which are used to match the relation between the upper service(u) and the lower service(l) on the different layers. The default matching rules required the service name configured as SkyWalking default and follow the Showcase. If you customized the service name in any layer, you should customize the related matching rules according your service name rules.  Layer Levels  Define the hierarchy level of the service layer in the layer-levels section. The level is used to define the order of the service layer for UI presentation. The level of the upper service should greater than the level of the lower service in hierarchy section.  ","excerpt":"Define Service Hierarchy SkyWalking v10 introduces a new concept Service Hierarchy which defines the …","ref":"/docs/main/next/en/concepts-and-designs/service-hierarchy-configuration/","title":"Define Service Hierarchy"},{"body":" Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-log4j-1.x\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Print trace ID in your logs  Config a layout  log4j.appender.CONSOLE.layout=org.apache.skywalking.apm.toolkit.log.log4j.v1.x.TraceIdPatternLayout  set %T in layout.ConversionPattern ( In 2.0-2016, you should use %x, Why change? )  log4j.appender.CONSOLE.layout.ConversionPattern=%d [%T] %-5p %c{1}:%L - %m%n  When you use -javaagent to active the SkyWalking tracer, log4j will output traceId, if it existed. If the tracer is inactive, the output will be TID: N/A.  Print SkyWalking context in your logs   Your only need to replace pattern %T with %T{SW_CTX}.\n  When you use -javaagent to active the SkyWalking tracer, log4j will output SW_CTX: [$serviceName,$instanceName,$traceId,$traceSegmentId,$spanId], if it existed. If the tracer is inactive, the output will be SW_CTX: N/A.\n  gRPC reporter The gRPC report could forward the collected logs to SkyWalking OAP server, or SkyWalking Satellite sidecar. Trace id, segment id, and span id will attach to logs automatically. You don\u0026rsquo;t need to change the layout.\n Add GRPCLogClientAppender in log4j.properties  log4j.rootLogger=INFO,CustomAppender log4j.appender.CustomAppender=org.apache.skywalking.apm.toolkit.log.log4j.v1.x.log.GRPCLogClientAppender log4j.appender.CustomAppender.layout=org.apache.log4j.PatternLayout log4j.appender.CustomAppender.layout.ConversionPattern=[%t] %-5p %c %x - %m%n  Add config of the plugin or use default  log.max_message_size=${SW_GRPC_LOG_MAX_MESSAGE_SIZE:10485760} ","excerpt":"Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; …","ref":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/application-toolkit-log4j-1.x/","title":"Dependency the toolkit, such as using maven or gradle"},{"body":" Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-log4j-2.x\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Print trace ID in your logs  Config the [%traceId] pattern in your log4j2.xml  \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout pattern=\u0026#34;%d [%traceId] %-5p %c{1}:%L - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;/Appenders\u0026gt;  Support log4j2 AsyncRoot , No additional configuration is required. Refer to the demo of log4j2.xml below. For details: Log4j2 Async Loggers  \u0026lt;Configuration\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout pattern=\u0026#34;%d [%traceId] %-5p %c{1}:%L - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;AsyncRoot level=\u0026#34;INFO\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Console\u0026#34;/\u0026gt; \u0026lt;/AsyncRoot\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;   Support log4j2 AsyncAppender , No additional configuration is required. Refer to the demo of log4j2.xml below.\nFor details: All Loggers Async\nLog4j-2.9 and higher require disruptor-3.3.4.jar or higher on the classpath. Prior to Log4j-2.9, disruptor-3.0.0.jar or higher was required. This is simplest to configure and gives the best performance. To make all loggers asynchronous, add the disruptor jar to the classpath and set the system property log4j2.contextSelector to org.apache.logging.log4j.core.async.AsyncLoggerContextSelector.\n\u0026lt;Configuration status=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;!-- Async Loggers will auto-flush in batches, so switch off immediateFlush. --\u0026gt; \u0026lt;RandomAccessFile name=\u0026#34;RandomAccessFile\u0026#34; fileName=\u0026#34;async.log\u0026#34; immediateFlush=\u0026#34;false\u0026#34; append=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;PatternLayout\u0026gt; \u0026lt;Pattern\u0026gt;%d %p %c{1.} [%t] [%traceId] %m %ex%n\u0026lt;/Pattern\u0026gt; \u0026lt;/PatternLayout\u0026gt; \u0026lt;/RandomAccessFile\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;Root level=\u0026#34;info\u0026#34; includeLocation=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;RandomAccessFile\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt; For details: Mixed Sync \u0026amp; Async\nLog4j-2.9 and higher require disruptor-3.3.4.jar or higher on the classpath. Prior to Log4j-2.9, disruptor-3.0.0.jar or higher was required. There is no need to set system property Log4jContextSelector to any value.\n\u0026lt;Configuration status=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;!-- Async Loggers will auto-flush in batches, so switch off immediateFlush. --\u0026gt; \u0026lt;RandomAccessFile name=\u0026#34;RandomAccessFile\u0026#34; fileName=\u0026#34;asyncWithLocation.log\u0026#34; immediateFlush=\u0026#34;false\u0026#34; append=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;PatternLayout\u0026gt; \u0026lt;Pattern\u0026gt;%d %p %class{1.} [%t] [%traceId] %location %m %ex%n\u0026lt;/Pattern\u0026gt; \u0026lt;/PatternLayout\u0026gt; \u0026lt;/RandomAccessFile\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;!-- pattern layout actually uses location, so we need to include it --\u0026gt; \u0026lt;AsyncLogger name=\u0026#34;com.foo.Bar\u0026#34; level=\u0026#34;trace\u0026#34; includeLocation=\u0026#34;true\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;RandomAccessFile\u0026#34;/\u0026gt; \u0026lt;/AsyncLogger\u0026gt; \u0026lt;Root level=\u0026#34;info\u0026#34; includeLocation=\u0026#34;true\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;RandomAccessFile\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;   Support log4j2 AsyncAppender, For details: Log4j2 AsyncAppender\n  \u0026lt;Configuration\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout pattern=\u0026#34;%d [%traceId] %-5p %c{1}:%L - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;Async name=\u0026#34;Async\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Console\u0026#34;/\u0026gt; \u0026lt;/Async\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;Root level=\u0026#34;INFO\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Async\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;  When you use -javaagent to active the SkyWalking tracer, log4j2 will output traceId, if it existed. If the tracer is inactive, the output will be TID: N/A.  Print SkyWalking context in your logs   Your only need to replace pattern %traceId with %sw_ctx.\n  When you use -javaagent to active the SkyWalking tracer, log4j2 will output SW_CTX: [$serviceName,$instanceName,$traceId,$traceSegmentId,$spanId], if it existed. If the tracer is inactive, the output will be SW_CTX: N/A.\n  gRPC reporter The gRPC report could forward the collected logs to SkyWalking OAP server, or SkyWalking Satellite sidecar. Trace id, segment id, and span id will attach to logs automatically. You don\u0026rsquo;t need to change the layout.\n Add GRPCLogClientAppender in log4j2.xml  \u0026lt;GRPCLogClientAppender name=\u0026#34;grpc-log\u0026#34;\u0026gt; \u0026lt;PatternLayout pattern=\u0026#34;%d{HH:mm:ss.SSS} [%t] %-5level %logger{36} - %msg%n\u0026#34;/\u0026gt; \u0026lt;/GRPCLogClientAppender\u0026gt;  Add config of the plugin or use default  log.max_message_size=${SW_GRPC_LOG_MAX_MESSAGE_SIZE:10485760}  Support -Dlog4j2.contextSelector=org.apache.logging.log4j.core.async.AsyncLoggerContextSelector in gRPC log report.  Transmitting un-formatted messages The log4j 2.x gRPC reporter supports transmitting logs as formatted or un-formatted. Transmitting formatted data is the default but can be disabled by adding the following to the agent config:\nplugin.toolkit.log.transmit_formatted=false The above will result in the content field being used for the log pattern with additional log tags of argument.0, argument.1, and so on representing each logged argument as well as an additional exception tag which is only present if a throwable is also logged.\nFor example, the following code:\nlog.info(\u0026#34;{} {} {}\u0026#34;, 1, 2, 3); Will result in:\n{ \u0026#34;content\u0026#34;: \u0026#34;{} {} {}\u0026#34;, \u0026#34;tags\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;argument.0\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.1\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.2\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;3\u0026#34; } ] } ","excerpt":"Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; …","ref":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/application-toolkit-log4j-2.x/","title":"Dependency the toolkit, such as using maven or gradle"},{"body":" Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-meter\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; If you\u0026rsquo;re using Spring sleuth, you could use Spring Sleuth Setup at the OAP server.\n Counter API represents a single monotonically increasing counter, automatic collect data and report to backend.  import org.apache.skywalking.apm.toolkit.meter.MeterFactory; Counter counter = MeterFactory.counter(meterName).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).mode(Counter.Mode.INCREMENT).build(); counter.increment(1d);  MeterFactory.counter Create a new counter builder with the meter name. Counter.Builder.tag(String key, String value) Mark a tag key/value pair. Counter.Builder.mode(Counter.Mode mode) Change the counter mode, RATE mode means reporting rate to the backend. Counter.Builder.build() Build a new Counter which is collected and reported to the backend. Counter.increment(double count) Increment count to the Counter, It could be a positive value.   Gauge API represents a single numerical value.  import org.apache.skywalking.apm.toolkit.meter.MeterFactory; ThreadPoolExecutor threadPool = ...; Gauge gauge = MeterFactory.gauge(meterName, () -\u0026gt; threadPool.getActiveCount()).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).build();  MeterFactory.gauge(String name, Supplier\u0026lt;Double\u0026gt; getter) Create a new gauge builder with the meter name and supplier function, this function need to return a double value. Gauge.Builder.tag(String key, String value) Mark a tag key/value pair. Gauge.Builder.build() Build a new Gauge which is collected and reported to the backend.   Histogram API represents a summary sample observations with customize buckets.  import org.apache.skywalking.apm.toolkit.meter.MeterFactory; Histogram histogram = MeterFactory.histogram(\u0026#34;test\u0026#34;).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).steps(Arrays.asList(1, 5, 10)).minValue(0).build(); histogram.addValue(3);  MeterFactory.histogram(String name) Create a new histogram builder with the meter name. Histogram.Builder.tag(String key, String value) Mark a tag key/value pair. Histogram.Builder.steps(List\u0026lt;Double\u0026gt; steps) Set up the max values of every histogram buckets. Histogram.Builder.minValue(double value) Set up the minimal value of this histogram, default is 0. Histogram.Builder.build() Build a new Histogram which is collected and reported to the backend. Histogram.addValue(double value) Add value into the histogram, automatically analyze what bucket count needs to be increment. rule: count into [step1, step2).  ","excerpt":"Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; …","ref":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/application-toolkit-meter/","title":"Dependency the toolkit, such as using maven or gradle"},{"body":" Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-trace\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  Use TraceContext.traceId() API to obtain traceId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;traceId\u0026#34;, TraceContext.traceId());  Use TraceContext.segmentId() API to obtain segmentId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;segmentId\u0026#34;, TraceContext.segmentId());  Use TraceContext.spanId() API to obtain spanId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;spanId\u0026#34;, TraceContext.spanId()); Sample codes only\n  Add @Trace to any method you want to trace. After that, you can see the span in the Stack.\n  Methods annotated with @Tag will try to tag the current active span with the given key (Tag#key()) and (Tag#value()), if there is no active span at all, this annotation takes no effect. @Tag can be repeated, and can be used in companion with @Trace, see examples below. The value of Tag is the same as what are supported in Customize Enhance Trace.\n  Add custom tag in the context of traced method, ActiveSpan.tag(\u0026quot;key\u0026quot;, \u0026quot;val\u0026quot;).\n  ActiveSpan.error() Mark the current span as error status.\n  ActiveSpan.error(String errorMsg) Mark the current span as error status with a message.\n  ActiveSpan.error(Throwable throwable) Mark the current span as error status with a Throwable.\n  ActiveSpan.debug(String debugMsg) Add a debug level log message in the current span.\n  ActiveSpan.info(String infoMsg) Add an info level log message in the current span.\n  ActiveSpan.setOperationName(String operationName) Customize an operation name.\n  ActiveSpan.tag(\u0026#34;my_tag\u0026#34;, \u0026#34;my_value\u0026#34;); ActiveSpan.error(); ActiveSpan.error(\u0026#34;Test-Error-Reason\u0026#34;); ActiveSpan.error(new RuntimeException(\u0026#34;Test-Error-Throwable\u0026#34;)); ActiveSpan.info(\u0026#34;Test-Info-Msg\u0026#34;); ActiveSpan.debug(\u0026#34;Test-debug-Msg\u0026#34;); /** * The codes below will generate a span, * and two types of tags, one type tag: keys are `tag1` and `tag2`, values are the passed-in parameters, respectively, the other type tag: keys are `username` and `age`, values are the return value in User, respectively */ @Trace @Tag(key = \u0026#34;tag1\u0026#34;, value = \u0026#34;arg[0]\u0026#34;) @Tag(key = \u0026#34;tag2\u0026#34;, value = \u0026#34;arg[1]\u0026#34;) @Tag(key = \u0026#34;username\u0026#34;, value = \u0026#34;returnedObj.username\u0026#34;) @Tag(key = \u0026#34;age\u0026#34;, value = \u0026#34;returnedObj.age\u0026#34;) public User methodYouWantToTrace(String param1, String param2) { // ActiveSpan.setOperationName(\u0026#34;Customize your own operation name, if this is an entry span, this would be an endpoint name\u0026#34;);  // ... }  Use TraceContext.putCorrelation() API to put custom data in tracing context.  Optional\u0026lt;String\u0026gt; previous = TraceContext.putCorrelation(\u0026#34;customKey\u0026#34;, \u0026#34;customValue\u0026#34;); CorrelationContext will remove the item when the value is null or empty.\n Use TraceContext.getCorrelation() API to get custom data.  Optional\u0026lt;String\u0026gt; value = TraceContext.getCorrelation(\u0026#34;customKey\u0026#34;); CorrelationContext configuration descriptions could be found in the agent configuration documentation, with correlation. as the prefix.\n","excerpt":"Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; …","ref":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/application-toolkit-trace/","title":"Dependency the toolkit, such as using maven or gradle"},{"body":" Dependency the toolkit, such as using maven or gradle  OpenTracing (Deprecated) OpenTracing is a vendor-neutral standard for distributed tracing. It is a set of APIs that can be used to instrument, generate, collect, and report telemetry data for distributed systems. It is designed to be extensible so that new implementations can be created for new platforms or languages. It had been archived by the CNCF TOC. Learn more.\nSkyWalking community keeps the API compatible with 0.30.0 only. All further development will not be accepted.\n\u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-opentracing\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  Use our OpenTracing tracer implementation  Tracer tracer = new SkywalkingTracer(); Tracer.SpanBuilder spanBuilder = tracer.buildSpan(\u0026#34;/yourApplication/yourService\u0026#34;); ","excerpt":"Dependency the toolkit, such as using maven or gradle  OpenTracing (Deprecated) OpenTracing is a …","ref":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/opentracing/","title":"Dependency the toolkit, such as using maven or gradle"},{"body":" Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-log4j-1.x\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Print trace ID in your logs  Config a layout  log4j.appender.CONSOLE.layout=org.apache.skywalking.apm.toolkit.log.log4j.v1.x.TraceIdPatternLayout  set %T in layout.ConversionPattern ( In 2.0-2016, you should use %x, Why change? )  log4j.appender.CONSOLE.layout.ConversionPattern=%d [%T] %-5p %c{1}:%L - %m%n  When you use -javaagent to active the SkyWalking tracer, log4j will output traceId, if it existed. If the tracer is inactive, the output will be TID: N/A.  Print SkyWalking context in your logs   Your only need to replace pattern %T with %T{SW_CTX}.\n  When you use -javaagent to active the SkyWalking tracer, log4j will output SW_CTX: [$serviceName,$instanceName,$traceId,$traceSegmentId,$spanId], if it existed. If the tracer is inactive, the output will be SW_CTX: N/A.\n  gRPC reporter The gRPC report could forward the collected logs to SkyWalking OAP server, or SkyWalking Satellite sidecar. Trace id, segment id, and span id will attach to logs automatically. You don\u0026rsquo;t need to change the layout.\n Add GRPCLogClientAppender in log4j.properties  log4j.rootLogger=INFO,CustomAppender log4j.appender.CustomAppender=org.apache.skywalking.apm.toolkit.log.log4j.v1.x.log.GRPCLogClientAppender log4j.appender.CustomAppender.layout=org.apache.log4j.PatternLayout log4j.appender.CustomAppender.layout.ConversionPattern=[%t] %-5p %c %x - %m%n  Add config of the plugin or use default  log.max_message_size=${SW_GRPC_LOG_MAX_MESSAGE_SIZE:10485760} ","excerpt":"Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; …","ref":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-log4j-1.x/","title":"Dependency the toolkit, such as using maven or gradle"},{"body":" Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-log4j-2.x\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Print trace ID in your logs  Config the [%traceId] pattern in your log4j2.xml  \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout pattern=\u0026#34;%d [%traceId] %-5p %c{1}:%L - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;/Appenders\u0026gt;  Support log4j2 AsyncRoot , No additional configuration is required. Refer to the demo of log4j2.xml below. For details: Log4j2 Async Loggers  \u0026lt;Configuration\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout pattern=\u0026#34;%d [%traceId] %-5p %c{1}:%L - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;AsyncRoot level=\u0026#34;INFO\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Console\u0026#34;/\u0026gt; \u0026lt;/AsyncRoot\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;   Support log4j2 AsyncAppender , No additional configuration is required. Refer to the demo of log4j2.xml below.\nFor details: All Loggers Async\nLog4j-2.9 and higher require disruptor-3.3.4.jar or higher on the classpath. Prior to Log4j-2.9, disruptor-3.0.0.jar or higher was required. This is simplest to configure and gives the best performance. To make all loggers asynchronous, add the disruptor jar to the classpath and set the system property log4j2.contextSelector to org.apache.logging.log4j.core.async.AsyncLoggerContextSelector.\n\u0026lt;Configuration status=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;!-- Async Loggers will auto-flush in batches, so switch off immediateFlush. --\u0026gt; \u0026lt;RandomAccessFile name=\u0026#34;RandomAccessFile\u0026#34; fileName=\u0026#34;async.log\u0026#34; immediateFlush=\u0026#34;false\u0026#34; append=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;PatternLayout\u0026gt; \u0026lt;Pattern\u0026gt;%d %p %c{1.} [%t] [%traceId] %m %ex%n\u0026lt;/Pattern\u0026gt; \u0026lt;/PatternLayout\u0026gt; \u0026lt;/RandomAccessFile\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;Root level=\u0026#34;info\u0026#34; includeLocation=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;RandomAccessFile\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt; For details: Mixed Sync \u0026amp; Async\nLog4j-2.9 and higher require disruptor-3.3.4.jar or higher on the classpath. Prior to Log4j-2.9, disruptor-3.0.0.jar or higher was required. There is no need to set system property Log4jContextSelector to any value.\n\u0026lt;Configuration status=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;!-- Async Loggers will auto-flush in batches, so switch off immediateFlush. --\u0026gt; \u0026lt;RandomAccessFile name=\u0026#34;RandomAccessFile\u0026#34; fileName=\u0026#34;asyncWithLocation.log\u0026#34; immediateFlush=\u0026#34;false\u0026#34; append=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;PatternLayout\u0026gt; \u0026lt;Pattern\u0026gt;%d %p %class{1.} [%t] [%traceId] %location %m %ex%n\u0026lt;/Pattern\u0026gt; \u0026lt;/PatternLayout\u0026gt; \u0026lt;/RandomAccessFile\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;!-- pattern layout actually uses location, so we need to include it --\u0026gt; \u0026lt;AsyncLogger name=\u0026#34;com.foo.Bar\u0026#34; level=\u0026#34;trace\u0026#34; includeLocation=\u0026#34;true\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;RandomAccessFile\u0026#34;/\u0026gt; \u0026lt;/AsyncLogger\u0026gt; \u0026lt;Root level=\u0026#34;info\u0026#34; includeLocation=\u0026#34;true\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;RandomAccessFile\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;   Support log4j2 AsyncAppender, For details: Log4j2 AsyncAppender\n  \u0026lt;Configuration\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout pattern=\u0026#34;%d [%traceId] %-5p %c{1}:%L - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;Async name=\u0026#34;Async\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Console\u0026#34;/\u0026gt; \u0026lt;/Async\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;Root level=\u0026#34;INFO\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Async\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;  When you use -javaagent to active the SkyWalking tracer, log4j2 will output traceId, if it existed. If the tracer is inactive, the output will be TID: N/A.  Print SkyWalking context in your logs   Your only need to replace pattern %traceId with %sw_ctx.\n  When you use -javaagent to active the SkyWalking tracer, log4j2 will output SW_CTX: [$serviceName,$instanceName,$traceId,$traceSegmentId,$spanId], if it existed. If the tracer is inactive, the output will be SW_CTX: N/A.\n  gRPC reporter The gRPC report could forward the collected logs to SkyWalking OAP server, or SkyWalking Satellite sidecar. Trace id, segment id, and span id will attach to logs automatically. You don\u0026rsquo;t need to change the layout.\n Add GRPCLogClientAppender in log4j2.xml  \u0026lt;GRPCLogClientAppender name=\u0026#34;grpc-log\u0026#34;\u0026gt; \u0026lt;PatternLayout pattern=\u0026#34;%d{HH:mm:ss.SSS} [%t] %-5level %logger{36} - %msg%n\u0026#34;/\u0026gt; \u0026lt;/GRPCLogClientAppender\u0026gt;  Add config of the plugin or use default  log.max_message_size=${SW_GRPC_LOG_MAX_MESSAGE_SIZE:10485760}  Support -Dlog4j2.contextSelector=org.apache.logging.log4j.core.async.AsyncLoggerContextSelector in gRPC log report.  Transmitting un-formatted messages The log4j 2.x gRPC reporter supports transmitting logs as formatted or un-formatted. Transmitting formatted data is the default but can be disabled by adding the following to the agent config:\nplugin.toolkit.log.transmit_formatted=false The above will result in the content field being used for the log pattern with additional log tags of argument.0, argument.1, and so on representing each logged argument as well as an additional exception tag which is only present if a throwable is also logged.\nFor example, the following code:\nlog.info(\u0026#34;{} {} {}\u0026#34;, 1, 2, 3); Will result in:\n{ \u0026#34;content\u0026#34;: \u0026#34;{} {} {}\u0026#34;, \u0026#34;tags\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;argument.0\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.1\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.2\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;3\u0026#34; } ] } ","excerpt":"Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; …","ref":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-log4j-2.x/","title":"Dependency the toolkit, such as using maven or gradle"},{"body":" Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-meter\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; If you\u0026rsquo;re using Spring sleuth, you could use Spring Sleuth Setup at the OAP server.\n Counter API represents a single monotonically increasing counter, automatic collect data and report to backend.  import org.apache.skywalking.apm.toolkit.meter.MeterFactory; Counter counter = MeterFactory.counter(meterName).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).mode(Counter.Mode.INCREMENT).build(); counter.increment(1d);  MeterFactory.counter Create a new counter builder with the meter name. Counter.Builder.tag(String key, String value) Mark a tag key/value pair. Counter.Builder.mode(Counter.Mode mode) Change the counter mode, RATE mode means reporting rate to the backend. Counter.Builder.build() Build a new Counter which is collected and reported to the backend. Counter.increment(double count) Increment count to the Counter, It could be a positive value.   Gauge API represents a single numerical value.  import org.apache.skywalking.apm.toolkit.meter.MeterFactory; ThreadPoolExecutor threadPool = ...; Gauge gauge = MeterFactory.gauge(meterName, () -\u0026gt; threadPool.getActiveCount()).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).build();  MeterFactory.gauge(String name, Supplier\u0026lt;Double\u0026gt; getter) Create a new gauge builder with the meter name and supplier function, this function need to return a double value. Gauge.Builder.tag(String key, String value) Mark a tag key/value pair. Gauge.Builder.build() Build a new Gauge which is collected and reported to the backend.   Histogram API represents a summary sample observations with customize buckets.  import org.apache.skywalking.apm.toolkit.meter.MeterFactory; Histogram histogram = MeterFactory.histogram(\u0026#34;test\u0026#34;).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).steps(Arrays.asList(1, 5, 10)).minValue(0).build(); histogram.addValue(3);  MeterFactory.histogram(String name) Create a new histogram builder with the meter name. Histogram.Builder.tag(String key, String value) Mark a tag key/value pair. Histogram.Builder.steps(List\u0026lt;Double\u0026gt; steps) Set up the max values of every histogram buckets. Histogram.Builder.minValue(double value) Set up the minimal value of this histogram, default is 0. Histogram.Builder.build() Build a new Histogram which is collected and reported to the backend. Histogram.addValue(double value) Add value into the histogram, automatically analyze what bucket count needs to be increment. rule: count into [step1, step2).  ","excerpt":"Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; …","ref":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-meter/","title":"Dependency the toolkit, such as using maven or gradle"},{"body":" Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-trace\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  Use TraceContext.traceId() API to obtain traceId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;traceId\u0026#34;, TraceContext.traceId());  Use TraceContext.segmentId() API to obtain segmentId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;segmentId\u0026#34;, TraceContext.segmentId());  Use TraceContext.spanId() API to obtain spanId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;spanId\u0026#34;, TraceContext.spanId()); Sample codes only\n  Add @Trace to any method you want to trace. After that, you can see the span in the Stack.\n  Methods annotated with @Tag will try to tag the current active span with the given key (Tag#key()) and (Tag#value()), if there is no active span at all, this annotation takes no effect. @Tag can be repeated, and can be used in companion with @Trace, see examples below. The value of Tag is the same as what are supported in Customize Enhance Trace.\n  Add custom tag in the context of traced method, ActiveSpan.tag(\u0026quot;key\u0026quot;, \u0026quot;val\u0026quot;).\n  ActiveSpan.error() Mark the current span as error status.\n  ActiveSpan.error(String errorMsg) Mark the current span as error status with a message.\n  ActiveSpan.error(Throwable throwable) Mark the current span as error status with a Throwable.\n  ActiveSpan.debug(String debugMsg) Add a debug level log message in the current span.\n  ActiveSpan.info(String infoMsg) Add an info level log message in the current span.\n  ActiveSpan.setOperationName(String operationName) Customize an operation name.\n  ActiveSpan.tag(\u0026#34;my_tag\u0026#34;, \u0026#34;my_value\u0026#34;); ActiveSpan.error(); ActiveSpan.error(\u0026#34;Test-Error-Reason\u0026#34;); ActiveSpan.error(new RuntimeException(\u0026#34;Test-Error-Throwable\u0026#34;)); ActiveSpan.info(\u0026#34;Test-Info-Msg\u0026#34;); ActiveSpan.debug(\u0026#34;Test-debug-Msg\u0026#34;); /** * The codes below will generate a span, * and two types of tags, one type tag: keys are `tag1` and `tag2`, values are the passed-in parameters, respectively, the other type tag: keys are `username` and `age`, values are the return value in User, respectively */ @Trace @Tag(key = \u0026#34;tag1\u0026#34;, value = \u0026#34;arg[0]\u0026#34;) @Tag(key = \u0026#34;tag2\u0026#34;, value = \u0026#34;arg[1]\u0026#34;) @Tag(key = \u0026#34;username\u0026#34;, value = \u0026#34;returnedObj.username\u0026#34;) @Tag(key = \u0026#34;age\u0026#34;, value = \u0026#34;returnedObj.age\u0026#34;) public User methodYouWantToTrace(String param1, String param2) { // ActiveSpan.setOperationName(\u0026#34;Customize your own operation name, if this is an entry span, this would be an endpoint name\u0026#34;);  // ... }  Use TraceContext.putCorrelation() API to put custom data in tracing context.  Optional\u0026lt;String\u0026gt; previous = TraceContext.putCorrelation(\u0026#34;customKey\u0026#34;, \u0026#34;customValue\u0026#34;); CorrelationContext will remove the item when the value is null or empty.\n Use TraceContext.getCorrelation() API to get custom data.  Optional\u0026lt;String\u0026gt; value = TraceContext.getCorrelation(\u0026#34;customKey\u0026#34;); CorrelationContext configuration descriptions could be found in the agent configuration documentation, with correlation. as the prefix.\n","excerpt":"Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; …","ref":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-trace/","title":"Dependency the toolkit, such as using maven or gradle"},{"body":" Dependency the toolkit, such as using maven or gradle  OpenTracing (Deprecated) OpenTracing is a vendor-neutral standard for distributed tracing. It is a set of APIs that can be used to instrument, generate, collect, and report telemetry data for distributed systems. It is designed to be extensible so that new implementations can be created for new platforms or languages. It had been archived by the CNCF TOC. Learn more.\nSkyWalking community keeps the API compatible with 0.30.0 only. All further development will not be accepted.\n\u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-opentracing\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  Use our OpenTracing tracer implementation  Tracer tracer = new SkywalkingTracer(); Tracer.SpanBuilder spanBuilder = tracer.buildSpan(\u0026#34;/yourApplication/yourService\u0026#34;); ","excerpt":"Dependency the toolkit, such as using maven or gradle  OpenTracing (Deprecated) OpenTracing is a …","ref":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/opentracing/","title":"Dependency the toolkit, such as using maven or gradle"},{"body":" Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-log4j-1.x\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Print trace ID in your logs  Config a layout  log4j.appender.CONSOLE.layout=org.apache.skywalking.apm.toolkit.log.log4j.v1.x.TraceIdPatternLayout  set %T in layout.ConversionPattern ( In 2.0-2016, you should use %x, Why change? )  log4j.appender.CONSOLE.layout.ConversionPattern=%d [%T] %-5p %c{1}:%L - %m%n  When you use -javaagent to active the SkyWalking tracer, log4j will output traceId, if it existed. If the tracer is inactive, the output will be TID: N/A.  Print SkyWalking context in your logs   Your only need to replace pattern %T with %T{SW_CTX}.\n  When you use -javaagent to active the SkyWalking tracer, log4j will output SW_CTX: [$serviceName,$instanceName,$traceId,$traceSegmentId,$spanId], if it existed. If the tracer is inactive, the output will be SW_CTX: N/A.\n  gRPC reporter The gRPC report could forward the collected logs to SkyWalking OAP server, or SkyWalking Satellite sidecar. Trace id, segment id, and span id will attach to logs automatically. You don\u0026rsquo;t need to change the layout.\n Add GRPCLogClientAppender in log4j.properties  log4j.rootLogger=INFO,CustomAppender log4j.appender.CustomAppender=org.apache.skywalking.apm.toolkit.log.log4j.v1.x.log.GRPCLogClientAppender log4j.appender.CustomAppender.layout=org.apache.log4j.PatternLayout log4j.appender.CustomAppender.layout.ConversionPattern=[%t] %-5p %c %x - %m%n  Add config of the plugin or use default  log.max_message_size=${SW_GRPC_LOG_MAX_MESSAGE_SIZE:10485760} ","excerpt":"Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; …","ref":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/application-toolkit-log4j-1.x/","title":"Dependency the toolkit, such as using maven or gradle"},{"body":" Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-log4j-2.x\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Print trace ID in your logs  Config the [%traceId] pattern in your log4j2.xml  \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout pattern=\u0026#34;%d [%traceId] %-5p %c{1}:%L - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;/Appenders\u0026gt;  Support log4j2 AsyncRoot , No additional configuration is required. Refer to the demo of log4j2.xml below. For details: Log4j2 Async Loggers  \u0026lt;Configuration\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout pattern=\u0026#34;%d [%traceId] %-5p %c{1}:%L - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;AsyncRoot level=\u0026#34;INFO\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Console\u0026#34;/\u0026gt; \u0026lt;/AsyncRoot\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;   Support log4j2 AsyncAppender , No additional configuration is required. Refer to the demo of log4j2.xml below.\nFor details: All Loggers Async\nLog4j-2.9 and higher require disruptor-3.3.4.jar or higher on the classpath. Prior to Log4j-2.9, disruptor-3.0.0.jar or higher was required. This is simplest to configure and gives the best performance. To make all loggers asynchronous, add the disruptor jar to the classpath and set the system property log4j2.contextSelector to org.apache.logging.log4j.core.async.AsyncLoggerContextSelector.\n\u0026lt;Configuration status=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;!-- Async Loggers will auto-flush in batches, so switch off immediateFlush. --\u0026gt; \u0026lt;RandomAccessFile name=\u0026#34;RandomAccessFile\u0026#34; fileName=\u0026#34;async.log\u0026#34; immediateFlush=\u0026#34;false\u0026#34; append=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;PatternLayout\u0026gt; \u0026lt;Pattern\u0026gt;%d %p %c{1.} [%t] [%traceId] %m %ex%n\u0026lt;/Pattern\u0026gt; \u0026lt;/PatternLayout\u0026gt; \u0026lt;/RandomAccessFile\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;Root level=\u0026#34;info\u0026#34; includeLocation=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;RandomAccessFile\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt; For details: Mixed Sync \u0026amp; Async\nLog4j-2.9 and higher require disruptor-3.3.4.jar or higher on the classpath. Prior to Log4j-2.9, disruptor-3.0.0.jar or higher was required. There is no need to set system property Log4jContextSelector to any value.\n\u0026lt;Configuration status=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;!-- Async Loggers will auto-flush in batches, so switch off immediateFlush. --\u0026gt; \u0026lt;RandomAccessFile name=\u0026#34;RandomAccessFile\u0026#34; fileName=\u0026#34;asyncWithLocation.log\u0026#34; immediateFlush=\u0026#34;false\u0026#34; append=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;PatternLayout\u0026gt; \u0026lt;Pattern\u0026gt;%d %p %class{1.} [%t] [%traceId] %location %m %ex%n\u0026lt;/Pattern\u0026gt; \u0026lt;/PatternLayout\u0026gt; \u0026lt;/RandomAccessFile\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;!-- pattern layout actually uses location, so we need to include it --\u0026gt; \u0026lt;AsyncLogger name=\u0026#34;com.foo.Bar\u0026#34; level=\u0026#34;trace\u0026#34; includeLocation=\u0026#34;true\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;RandomAccessFile\u0026#34;/\u0026gt; \u0026lt;/AsyncLogger\u0026gt; \u0026lt;Root level=\u0026#34;info\u0026#34; includeLocation=\u0026#34;true\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;RandomAccessFile\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;   Support log4j2 AsyncAppender, For details: Log4j2 AsyncAppender\n  \u0026lt;Configuration\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout pattern=\u0026#34;%d [%traceId] %-5p %c{1}:%L - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;Async name=\u0026#34;Async\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Console\u0026#34;/\u0026gt; \u0026lt;/Async\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;Root level=\u0026#34;INFO\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Async\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;  When you use -javaagent to active the SkyWalking tracer, log4j2 will output traceId, if it existed. If the tracer is inactive, the output will be TID: N/A.  Print SkyWalking context in your logs   Your only need to replace pattern %traceId with %sw_ctx.\n  When you use -javaagent to active the SkyWalking tracer, log4j2 will output SW_CTX: [$serviceName,$instanceName,$traceId,$traceSegmentId,$spanId], if it existed. If the tracer is inactive, the output will be SW_CTX: N/A.\n  gRPC reporter The gRPC report could forward the collected logs to SkyWalking OAP server, or SkyWalking Satellite sidecar. Trace id, segment id, and span id will attach to logs automatically. You don\u0026rsquo;t need to change the layout.\n Add GRPCLogClientAppender in log4j2.xml  \u0026lt;GRPCLogClientAppender name=\u0026#34;grpc-log\u0026#34;\u0026gt; \u0026lt;PatternLayout pattern=\u0026#34;%d{HH:mm:ss.SSS} [%t] %-5level %logger{36} - %msg%n\u0026#34;/\u0026gt; \u0026lt;/GRPCLogClientAppender\u0026gt;  Add config of the plugin or use default  log.max_message_size=${SW_GRPC_LOG_MAX_MESSAGE_SIZE:10485760}  Support -Dlog4j2.contextSelector=org.apache.logging.log4j.core.async.AsyncLoggerContextSelector in gRPC log report.  Transmitting un-formatted messages The log4j 2.x gRPC reporter supports transmitting logs as formatted or un-formatted. Transmitting formatted data is the default but can be disabled by adding the following to the agent config:\nplugin.toolkit.log.transmit_formatted=false The above will result in the content field being used for the log pattern with additional log tags of argument.0, argument.1, and so on representing each logged argument as well as an additional exception tag which is only present if a throwable is also logged.\nFor example, the following code:\nlog.info(\u0026#34;{} {} {}\u0026#34;, 1, 2, 3); Will result in:\n{ \u0026#34;content\u0026#34;: \u0026#34;{} {} {}\u0026#34;, \u0026#34;tags\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;argument.0\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.1\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.2\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;3\u0026#34; } ] } ","excerpt":"Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; …","ref":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/application-toolkit-log4j-2.x/","title":"Dependency the toolkit, such as using maven or gradle"},{"body":" Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-meter\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; If you\u0026rsquo;re using Spring sleuth, you could use Spring Sleuth Setup at the OAP server.\n Counter API represents a single monotonically increasing counter, automatic collect data and report to backend.  import org.apache.skywalking.apm.toolkit.meter.MeterFactory; Counter counter = MeterFactory.counter(meterName).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).mode(Counter.Mode.INCREMENT).build(); counter.increment(1d);  MeterFactory.counter Create a new counter builder with the meter name. Counter.Builder.tag(String key, String value) Mark a tag key/value pair. Counter.Builder.mode(Counter.Mode mode) Change the counter mode, RATE mode means reporting rate to the backend. Counter.Builder.build() Build a new Counter which is collected and reported to the backend. Counter.increment(double count) Increment count to the Counter, It could be a positive value.   Gauge API represents a single numerical value.  import org.apache.skywalking.apm.toolkit.meter.MeterFactory; ThreadPoolExecutor threadPool = ...; Gauge gauge = MeterFactory.gauge(meterName, () -\u0026gt; threadPool.getActiveCount()).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).build();  MeterFactory.gauge(String name, Supplier\u0026lt;Double\u0026gt; getter) Create a new gauge builder with the meter name and supplier function, this function need to return a double value. Gauge.Builder.tag(String key, String value) Mark a tag key/value pair. Gauge.Builder.build() Build a new Gauge which is collected and reported to the backend.   Histogram API represents a summary sample observations with customize buckets.  import org.apache.skywalking.apm.toolkit.meter.MeterFactory; Histogram histogram = MeterFactory.histogram(\u0026#34;test\u0026#34;).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).steps(Arrays.asList(1, 5, 10)).minValue(0).build(); histogram.addValue(3);  MeterFactory.histogram(String name) Create a new histogram builder with the meter name. Histogram.Builder.tag(String key, String value) Mark a tag key/value pair. Histogram.Builder.steps(List\u0026lt;Double\u0026gt; steps) Set up the max values of every histogram buckets. Histogram.Builder.minValue(double value) Set up the minimal value of this histogram, default is 0. Histogram.Builder.build() Build a new Histogram which is collected and reported to the backend. Histogram.addValue(double value) Add value into the histogram, automatically analyze what bucket count needs to be increment. rule: count into [step1, step2).  ","excerpt":"Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; …","ref":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/application-toolkit-meter/","title":"Dependency the toolkit, such as using maven or gradle"},{"body":" Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-trace\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  Use TraceContext.traceId() API to obtain traceId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;traceId\u0026#34;, TraceContext.traceId());  Use TraceContext.segmentId() API to obtain segmentId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;segmentId\u0026#34;, TraceContext.segmentId());  Use TraceContext.spanId() API to obtain spanId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;spanId\u0026#34;, TraceContext.spanId()); Sample codes only\n  Add @Trace to any method you want to trace. After that, you can see the span in the Stack.\n  Methods annotated with @Tag will try to tag the current active span with the given key (Tag#key()) and (Tag#value()), if there is no active span at all, this annotation takes no effect. @Tag can be repeated, and can be used in companion with @Trace, see examples below. The value of Tag is the same as what are supported in Customize Enhance Trace.\n  Add custom tag in the context of traced method, ActiveSpan.tag(\u0026quot;key\u0026quot;, \u0026quot;val\u0026quot;).\n  ActiveSpan.error() Mark the current span as error status.\n  ActiveSpan.error(String errorMsg) Mark the current span as error status with a message.\n  ActiveSpan.error(Throwable throwable) Mark the current span as error status with a Throwable.\n  ActiveSpan.debug(String debugMsg) Add a debug level log message in the current span.\n  ActiveSpan.info(String infoMsg) Add an info level log message in the current span.\n  ActiveSpan.setOperationName(String operationName) Customize an operation name.\n  ActiveSpan.tag(\u0026#34;my_tag\u0026#34;, \u0026#34;my_value\u0026#34;); ActiveSpan.error(); ActiveSpan.error(\u0026#34;Test-Error-Reason\u0026#34;); ActiveSpan.error(new RuntimeException(\u0026#34;Test-Error-Throwable\u0026#34;)); ActiveSpan.info(\u0026#34;Test-Info-Msg\u0026#34;); ActiveSpan.debug(\u0026#34;Test-debug-Msg\u0026#34;); /** * The codes below will generate a span, * and two types of tags, one type tag: keys are `tag1` and `tag2`, values are the passed-in parameters, respectively, the other type tag: keys are `username` and `age`, values are the return value in User, respectively */ @Trace @Tag(key = \u0026#34;tag1\u0026#34;, value = \u0026#34;arg[0]\u0026#34;) @Tag(key = \u0026#34;tag2\u0026#34;, value = \u0026#34;arg[1]\u0026#34;) @Tag(key = \u0026#34;username\u0026#34;, value = \u0026#34;returnedObj.username\u0026#34;) @Tag(key = \u0026#34;age\u0026#34;, value = \u0026#34;returnedObj.age\u0026#34;) public User methodYouWantToTrace(String param1, String param2) { // ActiveSpan.setOperationName(\u0026#34;Customize your own operation name, if this is an entry span, this would be an endpoint name\u0026#34;);  // ... }  Use TraceContext.putCorrelation() API to put custom data in tracing context.  Optional\u0026lt;String\u0026gt; previous = TraceContext.putCorrelation(\u0026#34;customKey\u0026#34;, \u0026#34;customValue\u0026#34;); CorrelationContext will remove the item when the value is null or empty.\n Use TraceContext.getCorrelation() API to get custom data.  Optional\u0026lt;String\u0026gt; value = TraceContext.getCorrelation(\u0026#34;customKey\u0026#34;); CorrelationContext configuration descriptions could be found in the agent configuration documentation, with correlation. as the prefix.\n","excerpt":"Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; …","ref":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/application-toolkit-trace/","title":"Dependency the toolkit, such as using maven or gradle"},{"body":" Dependency the toolkit, such as using maven or gradle  OpenTracing (Deprecated) OpenTracing is a vendor-neutral standard for distributed tracing. It is a set of APIs that can be used to instrument, generate, collect, and report telemetry data for distributed systems. It is designed to be extensible so that new implementations can be created for new platforms or languages. It had been archived by the CNCF TOC. Learn more.\nSkyWalking community keeps the API compatible with 0.30.0 only. All further development will not be accepted.\n\u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-opentracing\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  Use our OpenTracing tracer implementation  Tracer tracer = new SkywalkingTracer(); Tracer.SpanBuilder spanBuilder = tracer.buildSpan(\u0026#34;/yourApplication/yourService\u0026#34;); ","excerpt":"Dependency the toolkit, such as using maven or gradle  OpenTracing (Deprecated) OpenTracing is a …","ref":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/opentracing/","title":"Dependency the toolkit, such as using maven or gradle"},{"body":" Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-log4j-1.x\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Print trace ID in your logs  Config a layout  log4j.appender.CONSOLE.layout=org.apache.skywalking.apm.toolkit.log.log4j.v1.x.TraceIdPatternLayout  set %T in layout.ConversionPattern ( In 2.0-2016, you should use %x, Why change? )  log4j.appender.CONSOLE.layout.ConversionPattern=%d [%T] %-5p %c{1}:%L - %m%n  When you use -javaagent to active the SkyWalking tracer, log4j will output traceId, if it existed. If the tracer is inactive, the output will be TID: N/A.  Print SkyWalking context in your logs   Your only need to replace pattern %T with %T{SW_CTX}.\n  When you use -javaagent to active the SkyWalking tracer, log4j will output SW_CTX: [$serviceName,$instanceName,$traceId,$traceSegmentId,$spanId], if it existed. If the tracer is inactive, the output will be SW_CTX: N/A.\n  gRPC reporter The gRPC report could forward the collected logs to SkyWalking OAP server, or SkyWalking Satellite sidecar. Trace id, segment id, and span id will attach to logs automatically. You don\u0026rsquo;t need to change the layout.\n Add GRPCLogClientAppender in log4j.properties  log4j.rootLogger=INFO,CustomAppender log4j.appender.CustomAppender=org.apache.skywalking.apm.toolkit.log.log4j.v1.x.log.GRPCLogClientAppender log4j.appender.CustomAppender.layout=org.apache.log4j.PatternLayout log4j.appender.CustomAppender.layout.ConversionPattern=[%t] %-5p %c %x - %m%n  Add config of the plugin or use default  log.max_message_size=${SW_GRPC_LOG_MAX_MESSAGE_SIZE:10485760} ","excerpt":"Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; …","ref":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/application-toolkit-log4j-1.x/","title":"Dependency the toolkit, such as using maven or gradle"},{"body":" Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-log4j-2.x\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Print trace ID in your logs  Config the [%traceId] pattern in your log4j2.xml  \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout pattern=\u0026#34;%d [%traceId] %-5p %c{1}:%L - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;/Appenders\u0026gt;  Support log4j2 AsyncRoot , No additional configuration is required. Refer to the demo of log4j2.xml below. For details: Log4j2 Async Loggers  \u0026lt;Configuration\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout pattern=\u0026#34;%d [%traceId] %-5p %c{1}:%L - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;AsyncRoot level=\u0026#34;INFO\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Console\u0026#34;/\u0026gt; \u0026lt;/AsyncRoot\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;   Support log4j2 AsyncAppender , No additional configuration is required. Refer to the demo of log4j2.xml below.\nFor details: All Loggers Async\nLog4j-2.9 and higher require disruptor-3.3.4.jar or higher on the classpath. Prior to Log4j-2.9, disruptor-3.0.0.jar or higher was required. This is simplest to configure and gives the best performance. To make all loggers asynchronous, add the disruptor jar to the classpath and set the system property log4j2.contextSelector to org.apache.logging.log4j.core.async.AsyncLoggerContextSelector.\n\u0026lt;Configuration status=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;!-- Async Loggers will auto-flush in batches, so switch off immediateFlush. --\u0026gt; \u0026lt;RandomAccessFile name=\u0026#34;RandomAccessFile\u0026#34; fileName=\u0026#34;async.log\u0026#34; immediateFlush=\u0026#34;false\u0026#34; append=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;PatternLayout\u0026gt; \u0026lt;Pattern\u0026gt;%d %p %c{1.} [%t] [%traceId] %m %ex%n\u0026lt;/Pattern\u0026gt; \u0026lt;/PatternLayout\u0026gt; \u0026lt;/RandomAccessFile\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;Root level=\u0026#34;info\u0026#34; includeLocation=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;RandomAccessFile\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt; For details: Mixed Sync \u0026amp; Async\nLog4j-2.9 and higher require disruptor-3.3.4.jar or higher on the classpath. Prior to Log4j-2.9, disruptor-3.0.0.jar or higher was required. There is no need to set system property Log4jContextSelector to any value.\n\u0026lt;Configuration status=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;!-- Async Loggers will auto-flush in batches, so switch off immediateFlush. --\u0026gt; \u0026lt;RandomAccessFile name=\u0026#34;RandomAccessFile\u0026#34; fileName=\u0026#34;asyncWithLocation.log\u0026#34; immediateFlush=\u0026#34;false\u0026#34; append=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;PatternLayout\u0026gt; \u0026lt;Pattern\u0026gt;%d %p %class{1.} [%t] [%traceId] %location %m %ex%n\u0026lt;/Pattern\u0026gt; \u0026lt;/PatternLayout\u0026gt; \u0026lt;/RandomAccessFile\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;!-- pattern layout actually uses location, so we need to include it --\u0026gt; \u0026lt;AsyncLogger name=\u0026#34;com.foo.Bar\u0026#34; level=\u0026#34;trace\u0026#34; includeLocation=\u0026#34;true\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;RandomAccessFile\u0026#34;/\u0026gt; \u0026lt;/AsyncLogger\u0026gt; \u0026lt;Root level=\u0026#34;info\u0026#34; includeLocation=\u0026#34;true\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;RandomAccessFile\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;   Support log4j2 AsyncAppender, For details: Log4j2 AsyncAppender\n  \u0026lt;Configuration\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout pattern=\u0026#34;%d [%traceId] %-5p %c{1}:%L - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;Async name=\u0026#34;Async\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Console\u0026#34;/\u0026gt; \u0026lt;/Async\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;Root level=\u0026#34;INFO\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Async\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;  When you use -javaagent to active the SkyWalking tracer, log4j2 will output traceId, if it existed. If the tracer is inactive, the output will be TID: N/A.  Print SkyWalking context in your logs   Your only need to replace pattern %traceId with %sw_ctx.\n  When you use -javaagent to active the SkyWalking tracer, log4j2 will output SW_CTX: [$serviceName,$instanceName,$traceId,$traceSegmentId,$spanId], if it existed. If the tracer is inactive, the output will be SW_CTX: N/A.\n  gRPC reporter The gRPC report could forward the collected logs to SkyWalking OAP server, or SkyWalking Satellite sidecar. Trace id, segment id, and span id will attach to logs automatically. You don\u0026rsquo;t need to change the layout.\n Add GRPCLogClientAppender in log4j2.xml  \u0026lt;GRPCLogClientAppender name=\u0026#34;grpc-log\u0026#34;\u0026gt; \u0026lt;PatternLayout pattern=\u0026#34;%d{HH:mm:ss.SSS} [%t] %-5level %logger{36} - %msg%n\u0026#34;/\u0026gt; \u0026lt;/GRPCLogClientAppender\u0026gt;  Add config of the plugin or use default  log.max_message_size=${SW_GRPC_LOG_MAX_MESSAGE_SIZE:10485760}  Support -Dlog4j2.contextSelector=org.apache.logging.log4j.core.async.AsyncLoggerContextSelector in gRPC log report.  Transmitting un-formatted messages The log4j 2.x gRPC reporter supports transmitting logs as formatted or un-formatted. Transmitting formatted data is the default but can be disabled by adding the following to the agent config:\nplugin.toolkit.log.transmit_formatted=false The above will result in the content field being used for the log pattern with additional log tags of argument.0, argument.1, and so on representing each logged argument as well as an additional exception tag which is only present if a throwable is also logged.\nFor example, the following code:\nlog.info(\u0026#34;{} {} {}\u0026#34;, 1, 2, 3); Will result in:\n{ \u0026#34;content\u0026#34;: \u0026#34;{} {} {}\u0026#34;, \u0026#34;tags\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;argument.0\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.1\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.2\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;3\u0026#34; } ] } ","excerpt":"Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; …","ref":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/application-toolkit-log4j-2.x/","title":"Dependency the toolkit, such as using maven or gradle"},{"body":" Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-meter\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; If you\u0026rsquo;re using Spring sleuth, you could use Spring Sleuth Setup at the OAP server.\n Counter API represents a single monotonically increasing counter, automatic collect data and report to backend.  import org.apache.skywalking.apm.toolkit.meter.MeterFactory; Counter counter = MeterFactory.counter(meterName).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).mode(Counter.Mode.INCREMENT).build(); counter.increment(1d);  MeterFactory.counter Create a new counter builder with the meter name. Counter.Builder.tag(String key, String value) Mark a tag key/value pair. Counter.Builder.mode(Counter.Mode mode) Change the counter mode, RATE mode means reporting rate to the backend. Counter.Builder.build() Build a new Counter which is collected and reported to the backend. Counter.increment(double count) Increment count to the Counter, It could be a positive value.   Gauge API represents a single numerical value.  import org.apache.skywalking.apm.toolkit.meter.MeterFactory; ThreadPoolExecutor threadPool = ...; Gauge gauge = MeterFactory.gauge(meterName, () -\u0026gt; threadPool.getActiveCount()).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).build();  MeterFactory.gauge(String name, Supplier\u0026lt;Double\u0026gt; getter) Create a new gauge builder with the meter name and supplier function, this function need to return a double value. Gauge.Builder.tag(String key, String value) Mark a tag key/value pair. Gauge.Builder.build() Build a new Gauge which is collected and reported to the backend.   Histogram API represents a summary sample observations with customize buckets.  import org.apache.skywalking.apm.toolkit.meter.MeterFactory; Histogram histogram = MeterFactory.histogram(\u0026#34;test\u0026#34;).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).steps(Arrays.asList(1, 5, 10)).minValue(0).build(); histogram.addValue(3);  MeterFactory.histogram(String name) Create a new histogram builder with the meter name. Histogram.Builder.tag(String key, String value) Mark a tag key/value pair. Histogram.Builder.steps(List\u0026lt;Double\u0026gt; steps) Set up the max values of every histogram buckets. Histogram.Builder.minValue(double value) Set up the minimal value of this histogram, default is 0. Histogram.Builder.build() Build a new Histogram which is collected and reported to the backend. Histogram.addValue(double value) Add value into the histogram, automatically analyze what bucket count needs to be increment. rule: count into [step1, step2).  ","excerpt":"Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; …","ref":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/application-toolkit-meter/","title":"Dependency the toolkit, such as using maven or gradle"},{"body":" Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-trace\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  Use TraceContext.traceId() API to obtain traceId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;traceId\u0026#34;, TraceContext.traceId());  Use TraceContext.segmentId() API to obtain segmentId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;segmentId\u0026#34;, TraceContext.segmentId());  Use TraceContext.spanId() API to obtain spanId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;spanId\u0026#34;, TraceContext.spanId()); Sample codes only\n  Add @Trace to any method you want to trace. After that, you can see the span in the Stack.\n  Methods annotated with @Tag will try to tag the current active span with the given key (Tag#key()) and (Tag#value()), if there is no active span at all, this annotation takes no effect. @Tag can be repeated, and can be used in companion with @Trace, see examples below. The value of Tag is the same as what are supported in Customize Enhance Trace.\n  Add custom tag in the context of traced method, ActiveSpan.tag(\u0026quot;key\u0026quot;, \u0026quot;val\u0026quot;).\n  ActiveSpan.error() Mark the current span as error status.\n  ActiveSpan.error(String errorMsg) Mark the current span as error status with a message.\n  ActiveSpan.error(Throwable throwable) Mark the current span as error status with a Throwable.\n  ActiveSpan.debug(String debugMsg) Add a debug level log message in the current span.\n  ActiveSpan.info(String infoMsg) Add an info level log message in the current span.\n  ActiveSpan.setOperationName(String operationName) Customize an operation name.\n  ActiveSpan.tag(\u0026#34;my_tag\u0026#34;, \u0026#34;my_value\u0026#34;); ActiveSpan.error(); ActiveSpan.error(\u0026#34;Test-Error-Reason\u0026#34;); ActiveSpan.error(new RuntimeException(\u0026#34;Test-Error-Throwable\u0026#34;)); ActiveSpan.info(\u0026#34;Test-Info-Msg\u0026#34;); ActiveSpan.debug(\u0026#34;Test-debug-Msg\u0026#34;); /** * The codes below will generate a span, * and two types of tags, one type tag: keys are `tag1` and `tag2`, values are the passed-in parameters, respectively, the other type tag: keys are `username` and `age`, values are the return value in User, respectively */ @Trace @Tag(key = \u0026#34;tag1\u0026#34;, value = \u0026#34;arg[0]\u0026#34;) @Tag(key = \u0026#34;tag2\u0026#34;, value = \u0026#34;arg[1]\u0026#34;) @Tag(key = \u0026#34;username\u0026#34;, value = \u0026#34;returnedObj.username\u0026#34;) @Tag(key = \u0026#34;age\u0026#34;, value = \u0026#34;returnedObj.age\u0026#34;) public User methodYouWantToTrace(String param1, String param2) { // ActiveSpan.setOperationName(\u0026#34;Customize your own operation name, if this is an entry span, this would be an endpoint name\u0026#34;);  // ... }  Use TraceContext.putCorrelation() API to put custom data in tracing context.  Optional\u0026lt;String\u0026gt; previous = TraceContext.putCorrelation(\u0026#34;customKey\u0026#34;, \u0026#34;customValue\u0026#34;); CorrelationContext will remove the item when the value is null or empty.\n Use TraceContext.getCorrelation() API to get custom data.  Optional\u0026lt;String\u0026gt; value = TraceContext.getCorrelation(\u0026#34;customKey\u0026#34;); CorrelationContext configuration descriptions could be found in the agent configuration documentation, with correlation. as the prefix.\n","excerpt":"Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; …","ref":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/application-toolkit-trace/","title":"Dependency the toolkit, such as using maven or gradle"},{"body":" Dependency the toolkit, such as using maven or gradle  OpenTracing (Deprecated) OpenTracing is a vendor-neutral standard for distributed tracing. It is a set of APIs that can be used to instrument, generate, collect, and report telemetry data for distributed systems. It is designed to be extensible so that new implementations can be created for new platforms or languages. It had been archived by the CNCF TOC. Learn more.\nSkyWalking community keeps the API compatible with 0.30.0 only. All further development will not be accepted.\n\u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-opentracing\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  Use our OpenTracing tracer implementation  Tracer tracer = new SkywalkingTracer(); Tracer.SpanBuilder spanBuilder = tracer.buildSpan(\u0026#34;/yourApplication/yourService\u0026#34;); ","excerpt":"Dependency the toolkit, such as using maven or gradle  OpenTracing (Deprecated) OpenTracing is a …","ref":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/opentracing/","title":"Dependency the toolkit, such as using maven or gradle"},{"body":" Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-log4j-1.x\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Print trace ID in your logs  Config a layout  log4j.appender.CONSOLE.layout=org.apache.skywalking.apm.toolkit.log.log4j.v1.x.TraceIdPatternLayout  set %T in layout.ConversionPattern ( In 2.0-2016, you should use %x, Why change? )  log4j.appender.CONSOLE.layout.ConversionPattern=%d [%T] %-5p %c{1}:%L - %m%n  When you use -javaagent to active the SkyWalking tracer, log4j will output traceId, if it existed. If the tracer is inactive, the output will be TID: N/A.  Print SkyWalking context in your logs   Your only need to replace pattern %T with %T{SW_CTX}.\n  When you use -javaagent to active the SkyWalking tracer, log4j will output SW_CTX: [$serviceName,$instanceName,$traceId,$traceSegmentId,$spanId], if it existed. If the tracer is inactive, the output will be SW_CTX: N/A.\n  gRPC reporter The gRPC report could forward the collected logs to SkyWalking OAP server, or SkyWalking Satellite sidecar. Trace id, segment id, and span id will attach to logs automatically. You don\u0026rsquo;t need to change the layout.\n Add GRPCLogClientAppender in log4j.properties  log4j.rootLogger=INFO,CustomAppender log4j.appender.CustomAppender=org.apache.skywalking.apm.toolkit.log.log4j.v1.x.log.GRPCLogClientAppender log4j.appender.CustomAppender.layout=org.apache.log4j.PatternLayout log4j.appender.CustomAppender.layout.ConversionPattern=[%t] %-5p %c %x - %m%n  Add config of the plugin or use default  log.max_message_size=${SW_GRPC_LOG_MAX_MESSAGE_SIZE:10485760} ","excerpt":"Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; …","ref":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/application-toolkit-log4j-1.x/","title":"Dependency the toolkit, such as using maven or gradle"},{"body":" Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-log4j-2.x\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Print trace ID in your logs  Config the [%traceId] pattern in your log4j2.xml  \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout pattern=\u0026#34;%d [%traceId] %-5p %c{1}:%L - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;/Appenders\u0026gt;  Support log4j2 AsyncRoot , No additional configuration is required. Refer to the demo of log4j2.xml below. For details: Log4j2 Async Loggers  \u0026lt;Configuration\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout pattern=\u0026#34;%d [%traceId] %-5p %c{1}:%L - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;AsyncRoot level=\u0026#34;INFO\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Console\u0026#34;/\u0026gt; \u0026lt;/AsyncRoot\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;   Support log4j2 AsyncAppender , No additional configuration is required. Refer to the demo of log4j2.xml below.\nFor details: All Loggers Async\nLog4j-2.9 and higher require disruptor-3.3.4.jar or higher on the classpath. Prior to Log4j-2.9, disruptor-3.0.0.jar or higher was required. This is simplest to configure and gives the best performance. To make all loggers asynchronous, add the disruptor jar to the classpath and set the system property log4j2.contextSelector to org.apache.logging.log4j.core.async.AsyncLoggerContextSelector.\n\u0026lt;Configuration status=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;!-- Async Loggers will auto-flush in batches, so switch off immediateFlush. --\u0026gt; \u0026lt;RandomAccessFile name=\u0026#34;RandomAccessFile\u0026#34; fileName=\u0026#34;async.log\u0026#34; immediateFlush=\u0026#34;false\u0026#34; append=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;PatternLayout\u0026gt; \u0026lt;Pattern\u0026gt;%d %p %c{1.} [%t] [%traceId] %m %ex%n\u0026lt;/Pattern\u0026gt; \u0026lt;/PatternLayout\u0026gt; \u0026lt;/RandomAccessFile\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;Root level=\u0026#34;info\u0026#34; includeLocation=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;RandomAccessFile\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt; For details: Mixed Sync \u0026amp; Async\nLog4j-2.9 and higher require disruptor-3.3.4.jar or higher on the classpath. Prior to Log4j-2.9, disruptor-3.0.0.jar or higher was required. There is no need to set system property Log4jContextSelector to any value.\n\u0026lt;Configuration status=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;!-- Async Loggers will auto-flush in batches, so switch off immediateFlush. --\u0026gt; \u0026lt;RandomAccessFile name=\u0026#34;RandomAccessFile\u0026#34; fileName=\u0026#34;asyncWithLocation.log\u0026#34; immediateFlush=\u0026#34;false\u0026#34; append=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;PatternLayout\u0026gt; \u0026lt;Pattern\u0026gt;%d %p %class{1.} [%t] [%traceId] %location %m %ex%n\u0026lt;/Pattern\u0026gt; \u0026lt;/PatternLayout\u0026gt; \u0026lt;/RandomAccessFile\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;!-- pattern layout actually uses location, so we need to include it --\u0026gt; \u0026lt;AsyncLogger name=\u0026#34;com.foo.Bar\u0026#34; level=\u0026#34;trace\u0026#34; includeLocation=\u0026#34;true\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;RandomAccessFile\u0026#34;/\u0026gt; \u0026lt;/AsyncLogger\u0026gt; \u0026lt;Root level=\u0026#34;info\u0026#34; includeLocation=\u0026#34;true\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;RandomAccessFile\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;   Support log4j2 AsyncAppender, For details: Log4j2 AsyncAppender\n  \u0026lt;Configuration\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout pattern=\u0026#34;%d [%traceId] %-5p %c{1}:%L - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;Async name=\u0026#34;Async\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Console\u0026#34;/\u0026gt; \u0026lt;/Async\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;Root level=\u0026#34;INFO\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Async\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;  When you use -javaagent to active the SkyWalking tracer, log4j2 will output traceId, if it existed. If the tracer is inactive, the output will be TID: N/A.  Print SkyWalking context in your logs   Your only need to replace pattern %traceId with %sw_ctx.\n  When you use -javaagent to active the SkyWalking tracer, log4j2 will output SW_CTX: [$serviceName,$instanceName,$traceId,$traceSegmentId,$spanId], if it existed. If the tracer is inactive, the output will be SW_CTX: N/A.\n  gRPC reporter The gRPC report could forward the collected logs to SkyWalking OAP server, or SkyWalking Satellite sidecar. Trace id, segment id, and span id will attach to logs automatically. You don\u0026rsquo;t need to change the layout.\n Add GRPCLogClientAppender in log4j2.xml  \u0026lt;GRPCLogClientAppender name=\u0026#34;grpc-log\u0026#34;\u0026gt; \u0026lt;PatternLayout pattern=\u0026#34;%d{HH:mm:ss.SSS} [%t] %-5level %logger{36} - %msg%n\u0026#34;/\u0026gt; \u0026lt;/GRPCLogClientAppender\u0026gt;  Add config of the plugin or use default  log.max_message_size=${SW_GRPC_LOG_MAX_MESSAGE_SIZE:10485760}  Support -Dlog4j2.contextSelector=org.apache.logging.log4j.core.async.AsyncLoggerContextSelector in gRPC log report.  Transmitting un-formatted messages The log4j 2.x gRPC reporter supports transmitting logs as formatted or un-formatted. Transmitting formatted data is the default but can be disabled by adding the following to the agent config:\nplugin.toolkit.log.transmit_formatted=false The above will result in the content field being used for the log pattern with additional log tags of argument.0, argument.1, and so on representing each logged argument as well as an additional exception tag which is only present if a throwable is also logged.\nFor example, the following code:\nlog.info(\u0026#34;{} {} {}\u0026#34;, 1, 2, 3); Will result in:\n{ \u0026#34;content\u0026#34;: \u0026#34;{} {} {}\u0026#34;, \u0026#34;tags\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;argument.0\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.1\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.2\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;3\u0026#34; } ] } ","excerpt":"Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; …","ref":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/application-toolkit-log4j-2.x/","title":"Dependency the toolkit, such as using maven or gradle"},{"body":" Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-meter\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; If you\u0026rsquo;re using Spring sleuth, you could use Spring Sleuth Setup at the OAP server.\n Counter API represents a single monotonically increasing counter, automatic collect data and report to backend.  import org.apache.skywalking.apm.toolkit.meter.MeterFactory; Counter counter = MeterFactory.counter(meterName).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).mode(Counter.Mode.INCREMENT).build(); counter.increment(1d);  MeterFactory.counter Create a new counter builder with the meter name. Counter.Builder.tag(String key, String value) Mark a tag key/value pair. Counter.Builder.mode(Counter.Mode mode) Change the counter mode, RATE mode means reporting rate to the backend. Counter.Builder.build() Build a new Counter which is collected and reported to the backend. Counter.increment(double count) Increment count to the Counter, It could be a positive value.   Gauge API represents a single numerical value.  import org.apache.skywalking.apm.toolkit.meter.MeterFactory; ThreadPoolExecutor threadPool = ...; Gauge gauge = MeterFactory.gauge(meterName, () -\u0026gt; threadPool.getActiveCount()).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).build();  MeterFactory.gauge(String name, Supplier\u0026lt;Double\u0026gt; getter) Create a new gauge builder with the meter name and supplier function, this function need to return a double value. Gauge.Builder.tag(String key, String value) Mark a tag key/value pair. Gauge.Builder.build() Build a new Gauge which is collected and reported to the backend.   Histogram API represents a summary sample observations with customize buckets.  import org.apache.skywalking.apm.toolkit.meter.MeterFactory; Histogram histogram = MeterFactory.histogram(\u0026#34;test\u0026#34;).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).steps(Arrays.asList(1, 5, 10)).minValue(0).build(); histogram.addValue(3);  MeterFactory.histogram(String name) Create a new histogram builder with the meter name. Histogram.Builder.tag(String key, String value) Mark a tag key/value pair. Histogram.Builder.steps(List\u0026lt;Double\u0026gt; steps) Set up the max values of every histogram buckets. Histogram.Builder.minValue(double value) Set up the minimal value of this histogram, default is 0. Histogram.Builder.build() Build a new Histogram which is collected and reported to the backend. Histogram.addValue(double value) Add value into the histogram, automatically analyze what bucket count needs to be increment. rule: count into [step1, step2).  ","excerpt":"Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; …","ref":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/application-toolkit-meter/","title":"Dependency the toolkit, such as using maven or gradle"},{"body":" Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-trace\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  Use TraceContext.traceId() API to obtain traceId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;traceId\u0026#34;, TraceContext.traceId());  Use TraceContext.segmentId() API to obtain segmentId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;segmentId\u0026#34;, TraceContext.segmentId());  Use TraceContext.spanId() API to obtain spanId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;spanId\u0026#34;, TraceContext.spanId()); Sample codes only\n  Add @Trace to any method you want to trace. After that, you can see the span in the Stack.\n  Methods annotated with @Tag will try to tag the current active span with the given key (Tag#key()) and (Tag#value()), if there is no active span at all, this annotation takes no effect. @Tag can be repeated, and can be used in companion with @Trace, see examples below. The value of Tag is the same as what are supported in Customize Enhance Trace.\n  Add custom tag in the context of traced method, ActiveSpan.tag(\u0026quot;key\u0026quot;, \u0026quot;val\u0026quot;).\n  ActiveSpan.error() Mark the current span as error status.\n  ActiveSpan.error(String errorMsg) Mark the current span as error status with a message.\n  ActiveSpan.error(Throwable throwable) Mark the current span as error status with a Throwable.\n  ActiveSpan.debug(String debugMsg) Add a debug level log message in the current span.\n  ActiveSpan.info(String infoMsg) Add an info level log message in the current span.\n  ActiveSpan.setOperationName(String operationName) Customize an operation name.\n  ActiveSpan.tag(\u0026#34;my_tag\u0026#34;, \u0026#34;my_value\u0026#34;); ActiveSpan.error(); ActiveSpan.error(\u0026#34;Test-Error-Reason\u0026#34;); ActiveSpan.error(new RuntimeException(\u0026#34;Test-Error-Throwable\u0026#34;)); ActiveSpan.info(\u0026#34;Test-Info-Msg\u0026#34;); ActiveSpan.debug(\u0026#34;Test-debug-Msg\u0026#34;); /** * The codes below will generate a span, * and two types of tags, one type tag: keys are `tag1` and `tag2`, values are the passed-in parameters, respectively, the other type tag: keys are `username` and `age`, values are the return value in User, respectively */ @Trace @Tag(key = \u0026#34;tag1\u0026#34;, value = \u0026#34;arg[0]\u0026#34;) @Tag(key = \u0026#34;tag2\u0026#34;, value = \u0026#34;arg[1]\u0026#34;) @Tag(key = \u0026#34;username\u0026#34;, value = \u0026#34;returnedObj.username\u0026#34;) @Tag(key = \u0026#34;age\u0026#34;, value = \u0026#34;returnedObj.age\u0026#34;) public User methodYouWantToTrace(String param1, String param2) { // ActiveSpan.setOperationName(\u0026#34;Customize your own operation name, if this is an entry span, this would be an endpoint name\u0026#34;);  // ... }  Use TraceContext.putCorrelation() API to put custom data in tracing context.  Optional\u0026lt;String\u0026gt; previous = TraceContext.putCorrelation(\u0026#34;customKey\u0026#34;, \u0026#34;customValue\u0026#34;); CorrelationContext will remove the item when the value is null or empty.\n Use TraceContext.getCorrelation() API to get custom data.  Optional\u0026lt;String\u0026gt; value = TraceContext.getCorrelation(\u0026#34;customKey\u0026#34;); CorrelationContext configuration descriptions could be found in the agent configuration documentation, with correlation. as the prefix.\n","excerpt":"Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; …","ref":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/application-toolkit-trace/","title":"Dependency the toolkit, such as using maven or gradle"},{"body":" Dependency the toolkit, such as using maven or gradle  OpenTracing (Deprecated) OpenTracing is a vendor-neutral standard for distributed tracing. It is a set of APIs that can be used to instrument, generate, collect, and report telemetry data for distributed systems. It is designed to be extensible so that new implementations can be created for new platforms or languages. It had been archived by the CNCF TOC. Learn more.\nSkyWalking community keeps the API compatible with 0.30.0 only. All further development will not be accepted.\n\u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-opentracing\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  Use our OpenTracing tracer implementation  Tracer tracer = new SkywalkingTracer(); Tracer.SpanBuilder spanBuilder = tracer.buildSpan(\u0026#34;/yourApplication/yourService\u0026#34;); ","excerpt":"Dependency the toolkit, such as using maven or gradle  OpenTracing (Deprecated) OpenTracing is a …","ref":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/opentracing/","title":"Dependency the toolkit, such as using maven or gradle"},{"body":"Deploy OAP server and UI with default settings In this example, we will deploy an OAP server and UI to Kubernetes cluster with default settings specified by their Custom Resource Defines(CRD).\nInstall Operator Follow Operator installation instrument to install the operator.\nDeploy OAP server and UI with default setting Clone this repo, then change current directory to samples.\nIssue the below command to deploy an OAP server and UI.\nkubectl apply -f default.yaml Get created custom resources as below:\n$ kubectl get oapserver,ui NAME INSTANCES RUNNING ADDRESS oapserver.operator.skywalking.apache.org/default 1 1 default-oap.skywalking-swck-system NAME INSTANCES RUNNING INTERNALADDRESS EXTERNALIPS PORTS ui.operator.skywalking.apache.org/default 1 1 default-ui.skywalking-swck-system [80] View the UI In order to view the UI from your browser, you should get the external address from the ingress generated by the UI custom resource firstly.\n$ kubectl get ingresses NAME HOSTS ADDRESS PORTS AGE default-ui demo.ui.skywalking \u0026lt;External_IP\u0026gt; 80 33h Edit your local /etc/hosts to append the following host-ip mapping.\ndemo.ui.skywalking \u0026lt;External_IP\u0026gt; Finally, navigate your browser to demo.ui.skywalking to access UI service.\nNotice, please install an ingress controller to your Kubernetes environment.\n","excerpt":"Deploy OAP server and UI with default settings In this example, we will deploy an OAP server and UI …","ref":"/docs/skywalking-swck/latest/examples/default-backend/","title":"Deploy OAP server and UI with default settings"},{"body":"Deploy OAP server and UI with default settings In this example, we will deploy an OAP server and UI to Kubernetes cluster with default settings specified by their Custom Resource Defines(CRD).\nInstall Operator Follow Operator installation instrument to install the operator.\nDeploy OAP server and UI with default setting Clone this repo, then change current directory to samples.\nIssue the below command to deploy an OAP server and UI.\nkubectl apply -f default.yaml Get created custom resources as below:\n$ kubectl get oapserver,ui NAME INSTANCES RUNNING ADDRESS oapserver.operator.skywalking.apache.org/default 1 1 default-oap.skywalking-swck-system NAME INSTANCES RUNNING INTERNALADDRESS EXTERNALIPS PORTS ui.operator.skywalking.apache.org/default 1 1 default-ui.skywalking-swck-system [80] View the UI In order to view the UI from your browser, you should get the external address from the ingress generated by the UI custom resource firstly.\n$ kubectl get ingresses NAME HOSTS ADDRESS PORTS AGE default-ui demo.ui.skywalking \u0026lt;External_IP\u0026gt; 80 33h Edit your local /etc/hosts to append the following host-ip mapping.\ndemo.ui.skywalking \u0026lt;External_IP\u0026gt; Finally, navigate your browser to demo.ui.skywalking to access UI service.\nNotice, please install an ingress controller to your Kubernetes environment.\n","excerpt":"Deploy OAP server and UI with default settings In this example, we will deploy an OAP server and UI …","ref":"/docs/skywalking-swck/next/examples/default-backend/","title":"Deploy OAP server and UI with default settings"},{"body":"Deploy OAP server and UI with default settings In this example, we will deploy an OAP server and UI to Kubernetes cluster with default settings specified by their Custom Resource Defines(CRD).\nInstall Operator Follow Operator installation instrument to install the operator.\nDeploy OAP server and UI with default setting Clone this repo, then change current directory to samples.\nIssue the below command to deploy an OAP server and UI.\nkubectl apply -f default.yaml Get created custom resources as below:\n$ kubectl get oapserver,ui NAME INSTANCES RUNNING ADDRESS oapserver.operator.skywalking.apache.org/default 1 1 default-oap.skywalking-swck-system NAME INSTANCES RUNNING INTERNALADDRESS EXTERNALIPS PORTS ui.operator.skywalking.apache.org/default 1 1 default-ui.skywalking-swck-system [80] View the UI In order to view the UI from your browser, you should get the external address from the ingress generated by the UI custom resource firstly.\n$ kubectl get ingresses NAME HOSTS ADDRESS PORTS AGE default-ui demo.ui.skywalking \u0026lt;External_IP\u0026gt; 80 33h Edit your local /etc/hosts to append the following host-ip mapping.\ndemo.ui.skywalking \u0026lt;External_IP\u0026gt; Finally, navigate your browser to demo.ui.skywalking to access UI service.\nNotice, please install an ingress controller to your Kubernetes environment.\n","excerpt":"Deploy OAP server and UI with default settings In this example, we will deploy an OAP server and UI …","ref":"/docs/skywalking-swck/v0.9.0/examples/default-backend/","title":"Deploy OAP server and UI with default settings"},{"body":"Deploy on Kubernetes This documentation helps you to set up the rover in the Kubernetes environment.\nStartup Kubernetes Make sure that you already have a Kubernetes cluster.\nIf you don\u0026rsquo;t have a running cluster, you can also leverage KinD (Kubernetes in Docker) or minikube to create a cluster.\nDeploy Rover Please follow the rover-daemonset.yml to deploy the rover in your Kubernetes cluster. Update the comment in the file, which includes two configs:\n Rover docker image: You could use make docker to build an image and upload it to your private registry, or update from the public image. OAP address: Update the OAP address.  Then, you could use kuberctl apply -f rover-daemonset.yml to deploy the skywalking-rover into your cluster. It would deploy in each node as a DaemonSet.\n","excerpt":"Deploy on Kubernetes This documentation helps you to set up the rover in the Kubernetes environment. …","ref":"/docs/skywalking-rover/latest/en/setup/deployment/kubernetes/readme/","title":"Deploy on Kubernetes"},{"body":"Deploy on Kubernetes This documentation helps you to set up the rover in the Kubernetes environment.\nStartup Kubernetes Make sure that you already have a Kubernetes cluster.\nIf you don\u0026rsquo;t have a running cluster, you can also leverage KinD (Kubernetes in Docker) or minikube to create a cluster.\nDeploy Rover Please follow the rover-daemonset.yml to deploy the rover in your Kubernetes cluster. Update the comment in the file, which includes two configs:\n Rover docker image: You could use make docker to build an image and upload it to your private registry, or update from the public image. OAP address: Update the OAP address.  Then, you could use kubectl apply -f rover-daemonset.yml to deploy the skywalking-rover into your cluster. It would deploy in each node as a DaemonSet.\n","excerpt":"Deploy on Kubernetes This documentation helps you to set up the rover in the Kubernetes environment. …","ref":"/docs/skywalking-rover/next/en/setup/deployment/kubernetes/readme/","title":"Deploy on Kubernetes"},{"body":"Deploy on Kubernetes This documentation helps you to set up the rover in the Kubernetes environment.\nStartup Kubernetes Make sure that you already have a Kubernetes cluster.\nIf you don\u0026rsquo;t have a running cluster, you can also leverage KinD (Kubernetes in Docker) or minikube to create a cluster.\nDeploy Rover Please follow the rover-daemonset.yml to deploy the rover in your Kubernetes cluster. Update the comment in the file, which includes two configs:\n Rover docker image: You could use make docker to build an image and upload it to your private registry, or update from the public image. OAP address: Update the OAP address.  Then, you could use kuberctl apply -f rover-daemonset.yml to deploy the skywalking-rover into your cluster. It would deploy in each node as a DaemonSet.\n","excerpt":"Deploy on Kubernetes This documentation helps you to set up the rover in the Kubernetes environment. …","ref":"/docs/skywalking-rover/v0.6.0/en/setup/deployment/kubernetes/readme/","title":"Deploy on Kubernetes"},{"body":"Deploy on Kubernetes It could help you run the Satellite as a gateway in Kubernetes environment.\nInstall We recommend install the Satellite by helm, follow command below, it could start the latest release version of SkyWalking Backend, UI and Satellite.\nexport SKYWALKING_RELEASE_NAME=skywalking # change the release name according to your scenario export SKYWALKING_RELEASE_NAMESPACE=default # change the namespace to where you want to install SkyWalking export REPO=skywalking helm repo add ${REPO} https://apache.jfrog.io/artifactory/skywalking-helm helm install \u0026#34;${SKYWALKING_RELEASE_NAME}\u0026#34; ${REPO}/skywalking -n \u0026#34;${SKYWALKING_RELEASE_NAMESPACE}\u0026#34; \\  --set oap.image.tag=8.8.1 \\  --set oap.storageType=elasticsearch \\  --set ui.image.tag=8.8.1 \\  --set elasticsearch.imageTag=6.8.6 \\  --set satellite.enabled=true \\  --set satellite.image.tag=v0.4.0 Change Address After the Satellite and Backend started, need to change the address from agent/node. Then the satellite could load balance the request from agent/node to OAP backend.\nSuch as in Java Agent, you should change the property value in collector.backend_service forward to this: skywalking-satellite.${SKYWALKING_RELEASE_NAMESPACE}:11800.\n","excerpt":"Deploy on Kubernetes It could help you run the Satellite as a gateway in Kubernetes environment. …","ref":"/docs/skywalking-satellite/latest/en/setup/examples/deploy/kubernetes/readme/","title":"Deploy on Kubernetes"},{"body":"Deploy on Kubernetes It could help you run the Satellite as a gateway in Kubernetes environment.\nInstall We recommend install the Satellite by helm, follow command below, it could start the latest release version of SkyWalking Backend, UI and Satellite.\nexport SKYWALKING_RELEASE_NAME=skywalking # change the release name according to your scenario export SKYWALKING_RELEASE_NAMESPACE=default # change the namespace to where you want to install SkyWalking export REPO=skywalking helm repo add ${REPO} https://apache.jfrog.io/artifactory/skywalking-helm helm install \u0026#34;${SKYWALKING_RELEASE_NAME}\u0026#34; ${REPO}/skywalking -n \u0026#34;${SKYWALKING_RELEASE_NAMESPACE}\u0026#34; \\  --set oap.image.tag=8.8.1 \\  --set oap.storageType=elasticsearch \\  --set ui.image.tag=8.8.1 \\  --set elasticsearch.imageTag=6.8.6 \\  --set satellite.enabled=true \\  --set satellite.image.tag=v0.4.0 Change Address After the Satellite and Backend started, need to change the address from agent/node. Then the satellite could load balance the request from agent/node to OAP backend.\nSuch as in Java Agent, you should change the property value in collector.backend_service forward to this: skywalking-satellite.${SKYWALKING_RELEASE_NAMESPACE}:11800.\n","excerpt":"Deploy on Kubernetes It could help you run the Satellite as a gateway in Kubernetes environment. …","ref":"/docs/skywalking-satellite/next/en/setup/examples/deploy/kubernetes/readme/","title":"Deploy on Kubernetes"},{"body":"Deploy on Kubernetes It could help you run the Satellite as a gateway in Kubernetes environment.\nInstall We recommend install the Satellite by helm, follow command below, it could start the latest release version of SkyWalking Backend, UI and Satellite.\nexport SKYWALKING_RELEASE_NAME=skywalking # change the release name according to your scenario export SKYWALKING_RELEASE_NAMESPACE=default # change the namespace to where you want to install SkyWalking export REPO=skywalking helm repo add ${REPO} https://apache.jfrog.io/artifactory/skywalking-helm helm install \u0026#34;${SKYWALKING_RELEASE_NAME}\u0026#34; ${REPO}/skywalking -n \u0026#34;${SKYWALKING_RELEASE_NAMESPACE}\u0026#34; \\  --set oap.image.tag=8.8.1 \\  --set oap.storageType=elasticsearch \\  --set ui.image.tag=8.8.1 \\  --set elasticsearch.imageTag=6.8.6 \\  --set satellite.enabled=true \\  --set satellite.image.tag=v0.4.0 Change Address After the Satellite and Backend started, need to change the address from agent/node. Then the satellite could load balance the request from agent/node to OAP backend.\nSuch as in Java Agent, you should change the property value in collector.backend_service forward to this: skywalking-satellite.${SKYWALKING_RELEASE_NAMESPACE}:11800.\n","excerpt":"Deploy on Kubernetes It could help you run the Satellite as a gateway in Kubernetes environment. …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/examples/deploy/kubernetes/readme/","title":"Deploy on Kubernetes"},{"body":"Deploy on Linux and Windows It could help you run the Satellite as a gateway in Linux or Windows instance.\nInstall Download Download the latest release version from SkyWalking Release Page.\nChange OAP Server addresses Update the OAP Server address in the config file, then satellite could connect to them and use round-robin policy for load-balance server before send each request.\nSupport two ways to locate the server list, using finder_type to change the type to find:\n static: Define the server address list. kubernetes: Define kubernetes pod/service/endpoint, it could be found addresses and dynamic update automatically.  Static server list You could see there define two server address and split by \u0026ldquo;,\u0026rdquo;.\nsharing:clients:- plugin_name:\u0026#34;grpc-client\u0026#34;# The gRPC server address finder typefinder_type:${SATELLITE_GRPC_CLIENT_FINDER:static}# The gRPC server address (default localhost:11800).server_addr:${SATELLITE_GRPC_CLIENT:127.0.0.1:11800,127.0.0.2:11800}# The TLS switchenable_TLS:${SATELLITE_GRPC_ENABLE_TLS:false}# The file path of client.pem. The config only works when opening the TLS switch.client_pem_path:${SATELLITE_GRPC_CLIENT_PEM_PATH:\u0026#34;client.pem\u0026#34;}# The file path of client.key. The config only works when opening the TLS switch.client_key_path:${SATELLITE_GRPC_CLIENT_KEY_PATH:\u0026#34;client.key\u0026#34;}# InsecureSkipVerify controls whether a client verifies the server\u0026#39;s certificate chain and host name.insecure_skip_verify:${SATELLITE_GRPC_INSECURE_SKIP_VERIFY:false}# The file path oca.pem. The config only works when opening the TLS switch.ca_pem_path:${SATELLITE_grpc_CA_PEM_PATH:\u0026#34;ca.pem\u0026#34;}# How frequently to check the connection(second)check_period:${SATELLITE_GRPC_CHECK_PERIOD:5}# The auth value when send requestauthentication:${SATELLITE_GRPC_AUTHENTICATION:\u0026#34;\u0026#34;}Kubernetes selector Using kubernetes_config to define the address\u0026rsquo;s finder.\nsharing:clients:- plugin_name:\u0026#34;grpc-client\u0026#34;# The gRPC server address finder typefinder_type:${SATELLITE_GRPC_CLIENT_FINDER:kubernetes}# The kubernetes config to lookup addresseskubernetes_config:# The kubernetes API server address, If not define means using in kubernetes mode to connectapi_server:http://localhost:8001/# The kind of apikind:endpoints# Support to lookup namespacesnamespaces:- default# The kind selectorselector:label:app=productpage# How to get the address exported portextra_port:port:9080# The TLS switchenable_TLS:${SATELLITE_GRPC_ENABLE_TLS:false}# The file path of client.pem. The config only works when opening the TLS switch.client_pem_path:${SATELLITE_GRPC_CLIENT_PEM_PATH:\u0026#34;client.pem\u0026#34;}# The file path of client.key. The config only works when opening the TLS switch.client_key_path:${SATELLITE_GRPC_CLIENT_KEY_PATH:\u0026#34;client.key\u0026#34;}# InsecureSkipVerify controls whether a client verifies the server\u0026#39;s certificate chain and host name.insecure_skip_verify:${SATELLITE_GRPC_INSECURE_SKIP_VERIFY:false}# The file path oca.pem. The config only works when opening the TLS switch.ca_pem_path:${SATELLITE_grpc_CA_PEM_PATH:\u0026#34;ca.pem\u0026#34;}# How frequently to check the connection(second)check_period:${SATELLITE_GRPC_CHECK_PERIOD:5}# The auth value when send requestauthentication:${SATELLITE_GRPC_AUTHENTICATION:\u0026#34;\u0026#34;}Start Satellite Execute the script bin/startup.sh(linux) or bin/startup.cmd(windows) to start. Then It could start these port:\n gRPC port(11800): listen the gRPC request, It could handle request from SkyWalking Agent protocol and Envoy ALS/Metrics protocol. Prometheus(1234): listen the HTTP request, It could get all SO11Y metrics from /metrics endpoint using Prometheus format.  Change Address After the satellite start, need to change the address from agent/node. Then the satellite could load balance the request from agent/node to OAP backend.\nSuch as in Java Agent, you should change the property value in collector.backend_service forward to the satellite gRPC port.\n","excerpt":"Deploy on Linux and Windows It could help you run the Satellite as a gateway in Linux or Windows …","ref":"/docs/skywalking-satellite/latest/en/setup/examples/deploy/linux-windows/readme/","title":"Deploy on Linux and Windows"},{"body":"Deploy on Linux and Windows It could help you run the Satellite as a gateway in Linux or Windows instance.\nInstall Download Download the latest release version from SkyWalking Release Page.\nChange OAP Server addresses Update the OAP Server address in the config file, then satellite could connect to them and use round-robin policy for load-balance server before send each request.\nSupport two ways to locate the server list, using finder_type to change the type to find:\n static: Define the server address list. kubernetes: Define kubernetes pod/service/endpoint, it could be found addresses and dynamic update automatically.  Static server list You could see there define two server address and split by \u0026ldquo;,\u0026rdquo;.\nsharing:clients:- plugin_name:\u0026#34;grpc-client\u0026#34;# The gRPC server address finder typefinder_type:${SATELLITE_GRPC_CLIENT_FINDER:static}# The gRPC server address (default localhost:11800).server_addr:${SATELLITE_GRPC_CLIENT:127.0.0.1:11800,127.0.0.2:11800}# The TLS switchenable_TLS:${SATELLITE_GRPC_ENABLE_TLS:false}# The file path of client.pem. The config only works when opening the TLS switch.client_pem_path:${SATELLITE_GRPC_CLIENT_PEM_PATH:\u0026#34;client.pem\u0026#34;}# The file path of client.key. The config only works when opening the TLS switch.client_key_path:${SATELLITE_GRPC_CLIENT_KEY_PATH:\u0026#34;client.key\u0026#34;}# InsecureSkipVerify controls whether a client verifies the server\u0026#39;s certificate chain and host name.insecure_skip_verify:${SATELLITE_GRPC_INSECURE_SKIP_VERIFY:false}# The file path oca.pem. The config only works when opening the TLS switch.ca_pem_path:${SATELLITE_grpc_CA_PEM_PATH:\u0026#34;ca.pem\u0026#34;}# How frequently to check the connection(second)check_period:${SATELLITE_GRPC_CHECK_PERIOD:5}# The auth value when send requestauthentication:${SATELLITE_GRPC_AUTHENTICATION:\u0026#34;\u0026#34;}Kubernetes selector Using kubernetes_config to define the address\u0026rsquo;s finder.\nsharing:clients:- plugin_name:\u0026#34;grpc-client\u0026#34;# The gRPC server address finder typefinder_type:${SATELLITE_GRPC_CLIENT_FINDER:kubernetes}# The kubernetes config to lookup addresseskubernetes_config:# The kubernetes API server address, If not define means using in kubernetes mode to connectapi_server:http://localhost:8001/# The kind of apikind:endpoints# Support to lookup namespacesnamespaces:- default# The kind selectorselector:label:app=productpage# How to get the address exported portextra_port:port:9080# The TLS switchenable_TLS:${SATELLITE_GRPC_ENABLE_TLS:false}# The file path of client.pem. The config only works when opening the TLS switch.client_pem_path:${SATELLITE_GRPC_CLIENT_PEM_PATH:\u0026#34;client.pem\u0026#34;}# The file path of client.key. The config only works when opening the TLS switch.client_key_path:${SATELLITE_GRPC_CLIENT_KEY_PATH:\u0026#34;client.key\u0026#34;}# InsecureSkipVerify controls whether a client verifies the server\u0026#39;s certificate chain and host name.insecure_skip_verify:${SATELLITE_GRPC_INSECURE_SKIP_VERIFY:false}# The file path oca.pem. The config only works when opening the TLS switch.ca_pem_path:${SATELLITE_grpc_CA_PEM_PATH:\u0026#34;ca.pem\u0026#34;}# How frequently to check the connection(second)check_period:${SATELLITE_GRPC_CHECK_PERIOD:5}# The auth value when send requestauthentication:${SATELLITE_GRPC_AUTHENTICATION:\u0026#34;\u0026#34;}Start Satellite Execute the script bin/startup.sh(linux) or bin/startup.cmd(windows) to start. Then It could start these port:\n gRPC port(11800): listen the gRPC request, It could handle request from SkyWalking Agent protocol and Envoy ALS/Metrics protocol. Prometheus(1234): listen the HTTP request, It could get all SO11Y metrics from /metrics endpoint using Prometheus format.  Change Address After the satellite start, need to change the address from agent/node. Then the satellite could load balance the request from agent/node to OAP backend.\nSuch as in Java Agent, you should change the property value in collector.backend_service forward to the satellite gRPC port.\n","excerpt":"Deploy on Linux and Windows It could help you run the Satellite as a gateway in Linux or Windows …","ref":"/docs/skywalking-satellite/next/en/setup/examples/deploy/linux-windows/readme/","title":"Deploy on Linux and Windows"},{"body":"Deploy on Linux and Windows It could help you run the Satellite as a gateway in Linux or Windows instance.\nInstall Download Download the latest release version from SkyWalking Release Page.\nChange OAP Server addresses Update the OAP Server address in the config file, then satellite could connect to them and use round-robin policy for load-balance server before send each request.\nSupport two ways to locate the server list, using finder_type to change the type to find:\n static: Define the server address list. kubernetes: Define kubernetes pod/service/endpoint, it could be found addresses and dynamic update automatically.  Static server list You could see there define two server address and split by \u0026ldquo;,\u0026rdquo;.\nsharing:clients:- plugin_name:\u0026#34;grpc-client\u0026#34;# The gRPC server address finder typefinder_type:${SATELLITE_GRPC_CLIENT_FINDER:static}# The gRPC server address (default localhost:11800).server_addr:${SATELLITE_GRPC_CLIENT:127.0.0.1:11800,127.0.0.2:11800}# The TLS switchenable_TLS:${SATELLITE_GRPC_ENABLE_TLS:false}# The file path of client.pem. The config only works when opening the TLS switch.client_pem_path:${SATELLITE_GRPC_CLIENT_PEM_PATH:\u0026#34;client.pem\u0026#34;}# The file path of client.key. The config only works when opening the TLS switch.client_key_path:${SATELLITE_GRPC_CLIENT_KEY_PATH:\u0026#34;client.key\u0026#34;}# InsecureSkipVerify controls whether a client verifies the server\u0026#39;s certificate chain and host name.insecure_skip_verify:${SATELLITE_GRPC_INSECURE_SKIP_VERIFY:false}# The file path oca.pem. The config only works when opening the TLS switch.ca_pem_path:${SATELLITE_grpc_CA_PEM_PATH:\u0026#34;ca.pem\u0026#34;}# How frequently to check the connection(second)check_period:${SATELLITE_GRPC_CHECK_PERIOD:5}# The auth value when send requestauthentication:${SATELLITE_GRPC_AUTHENTICATION:\u0026#34;\u0026#34;}Kubernetes selector Using kubernetes_config to define the address\u0026rsquo;s finder.\nsharing:clients:- plugin_name:\u0026#34;grpc-client\u0026#34;# The gRPC server address finder typefinder_type:${SATELLITE_GRPC_CLIENT_FINDER:kubernetes}# The kubernetes config to lookup addresseskubernetes_config:# The kubernetes API server address, If not define means using in kubernetes mode to connectapi_server:http://localhost:8001/# The kind of apikind:endpoints# Support to lookup namespacesnamespaces:- default# The kind selectorselector:label:app=productpage# How to get the address exported portextra_port:port:9080# The TLS switchenable_TLS:${SATELLITE_GRPC_ENABLE_TLS:false}# The file path of client.pem. The config only works when opening the TLS switch.client_pem_path:${SATELLITE_GRPC_CLIENT_PEM_PATH:\u0026#34;client.pem\u0026#34;}# The file path of client.key. The config only works when opening the TLS switch.client_key_path:${SATELLITE_GRPC_CLIENT_KEY_PATH:\u0026#34;client.key\u0026#34;}# InsecureSkipVerify controls whether a client verifies the server\u0026#39;s certificate chain and host name.insecure_skip_verify:${SATELLITE_GRPC_INSECURE_SKIP_VERIFY:false}# The file path oca.pem. The config only works when opening the TLS switch.ca_pem_path:${SATELLITE_grpc_CA_PEM_PATH:\u0026#34;ca.pem\u0026#34;}# How frequently to check the connection(second)check_period:${SATELLITE_GRPC_CHECK_PERIOD:5}# The auth value when send requestauthentication:${SATELLITE_GRPC_AUTHENTICATION:\u0026#34;\u0026#34;}Start Satellite Execute the script bin/startup.sh(linux) or bin/startup.cmd(windows) to start. Then It could start these port:\n gRPC port(11800): listen the gRPC request, It could handle request from SkyWalking Agent protocol and Envoy ALS/Metrics protocol. Prometheus(1234): listen the HTTP request, It could get all SO11Y metrics from /metrics endpoint using Prometheus format.  Change Address After the satellite start, need to change the address from agent/node. Then the satellite could load balance the request from agent/node to OAP backend.\nSuch as in Java Agent, you should change the property value in collector.backend_service forward to the satellite gRPC port.\n","excerpt":"Deploy on Linux and Windows It could help you run the Satellite as a gateway in Linux or Windows …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/examples/deploy/linux-windows/readme/","title":"Deploy on Linux and Windows"},{"body":"Deploy SkyWalking backend and UI in Kubernetes Before you read Kubernetes deployment guidance, please make sure you have read Quick Start and Advanced Setup documents. Most SkyWalking OAP settings are controlled through System environment variables when applying helm deployment.\nFollow instructions in the deploying SkyWalking backend to Kubernetes cluster to deploy OAP and UI to a Kubernetes cluster.\nPlease refer to the Readme file.\n","excerpt":"Deploy SkyWalking backend and UI in Kubernetes Before you read Kubernetes deployment guidance, …","ref":"/docs/main/latest/en/setup/backend/backend-k8s/","title":"Deploy SkyWalking backend and UI in Kubernetes"},{"body":"Deploy SkyWalking backend and UI in Kubernetes Before you read Kubernetes deployment guidance, please make sure you have read Quick Start and Advanced Setup documents. Most SkyWalking OAP settings are controlled through System environment variables when applying helm deployment.\nFollow instructions in the deploying SkyWalking backend to Kubernetes cluster to deploy OAP and UI to a Kubernetes cluster.\nPlease refer to the Readme file.\n","excerpt":"Deploy SkyWalking backend and UI in Kubernetes Before you read Kubernetes deployment guidance, …","ref":"/docs/main/next/en/setup/backend/backend-k8s/","title":"Deploy SkyWalking backend and UI in Kubernetes"},{"body":"Deploy SkyWalking backend and UI in Kubernetes Before you read Kubernetes deployment guidance, please make sure you have read Quick Start and Advanced Setup documents. Most SkyWalking OAP settings are controlled through System environment variables when applying helm deployment.\nFollow instructions in the deploying SkyWalking backend to Kubernetes cluster to deploy OAP and UI to a Kubernetes cluster.\nPlease refer to the Readme file.\n","excerpt":"Deploy SkyWalking backend and UI in Kubernetes Before you read Kubernetes deployment guidance, …","ref":"/docs/main/v9.1.0/en/setup/backend/backend-k8s/","title":"Deploy SkyWalking backend and UI in Kubernetes"},{"body":"Deploy SkyWalking backend and UI in Kubernetes Before you read Kubernetes deployment guidance, please make sure you have read Quick Start and Advanced Setup documents. Most SkyWalking OAP settings are controlled through System environment variables when applying helm deployment.\nFollow instructions in the deploying SkyWalking backend to Kubernetes cluster to deploy OAP and UI to a Kubernetes cluster.\nPlease refer to the Readme file.\n","excerpt":"Deploy SkyWalking backend and UI in Kubernetes Before you read Kubernetes deployment guidance, …","ref":"/docs/main/v9.2.0/en/setup/backend/backend-k8s/","title":"Deploy SkyWalking backend and UI in Kubernetes"},{"body":"Deploy SkyWalking backend and UI in Kubernetes Before you read Kubernetes deployment guidance, please make sure you have read Quick Start and Advanced Setup documents. Most SkyWalking OAP settings are controlled through System environment variables when applying helm deployment.\nFollow instructions in the deploying SkyWalking backend to Kubernetes cluster to deploy OAP and UI to a Kubernetes cluster.\nPlease refer to the Readme file.\n","excerpt":"Deploy SkyWalking backend and UI in Kubernetes Before you read Kubernetes deployment guidance, …","ref":"/docs/main/v9.3.0/en/setup/backend/backend-k8s/","title":"Deploy SkyWalking backend and UI in Kubernetes"},{"body":"Deploy SkyWalking backend and UI in Kubernetes Before you read Kubernetes deployment guidance, please make sure you have read Quick Start and Advanced Setup documents. Most SkyWalking OAP settings are controlled through System environment variables when applying helm deployment.\nFollow instructions in the deploying SkyWalking backend to Kubernetes cluster to deploy OAP and UI to a Kubernetes cluster.\nPlease refer to the Readme file.\n","excerpt":"Deploy SkyWalking backend and UI in Kubernetes Before you read Kubernetes deployment guidance, …","ref":"/docs/main/v9.4.0/en/setup/backend/backend-k8s/","title":"Deploy SkyWalking backend and UI in Kubernetes"},{"body":"Deploy SkyWalking backend and UI in Kubernetes Before you read Kubernetes deployment guidance, please make sure you have read Quick Start and Advanced Setup documents. Most SkyWalking OAP settings are controlled through System environment variables when applying helm deployment.\nFollow instructions in the deploying SkyWalking backend to Kubernetes cluster to deploy OAP and UI to a Kubernetes cluster.\nPlease refer to the Readme file.\n","excerpt":"Deploy SkyWalking backend and UI in Kubernetes Before you read Kubernetes deployment guidance, …","ref":"/docs/main/v9.5.0/en/setup/backend/backend-k8s/","title":"Deploy SkyWalking backend and UI in Kubernetes"},{"body":"Deploy SkyWalking backend and UI in Kubernetes Before you read Kubernetes deployment guidance, please make sure you have read Quick Start and Advanced Setup documents. Most SkyWalking OAP settings are controlled through System environment variables when applying helm deployment.\nFollow instructions in the deploying SkyWalking backend to Kubernetes cluster to deploy OAP and UI to a Kubernetes cluster.\nPlease refer to the Readme file.\n","excerpt":"Deploy SkyWalking backend and UI in Kubernetes Before you read Kubernetes deployment guidance, …","ref":"/docs/main/v9.6.0/en/setup/backend/backend-k8s/","title":"Deploy SkyWalking backend and UI in Kubernetes"},{"body":"Deploy SkyWalking backend and UI in Kubernetes Before you read Kubernetes deployment guidance, please make sure you have read Quick Start and Advanced Setup documents. Most SkyWalking OAP settings are controlled through System environment variables when applying helm deployment.\nFollow instructions in the deploying SkyWalking backend to Kubernetes cluster to deploy OAP and UI to a Kubernetes cluster.\nPlease refer to the Readme file.\n","excerpt":"Deploy SkyWalking backend and UI in Kubernetes Before you read Kubernetes deployment guidance, …","ref":"/docs/main/v9.7.0/en/setup/backend/backend-k8s/","title":"Deploy SkyWalking backend and UI in Kubernetes"},{"body":"Deploy SkyWalking backend and UI in kubernetes Before you read Kubernetes deployment guidance, please make sure you have read Quick Start and Advanced Setup documents. Most SkyWalking OAP settings are controlled through System environment variables when apply helm deployment.\nFollow instructions in the deploying SkyWalking backend to Kubernetes cluster to deploy oap and ui to a kubernetes cluster.\nPlease read the Readme file.\n","excerpt":"Deploy SkyWalking backend and UI in kubernetes Before you read Kubernetes deployment guidance, …","ref":"/docs/main/v9.0.0/en/setup/backend/backend-k8s/","title":"Deploy SkyWalking backend and UI in kubernetes"},{"body":"Deprecated Query Protocol The following query services are deprecated since 9.5.0. All these queries are still available for the short term to keep compatibility.\nQuery protocol official repository, https://github.com/apache/skywalking-query-protocol.\nMetrics Metrics query targets all objects defined in OAL script and MAL. You may obtain the metrics data in linear or thermodynamic matrix formats based on the aggregation functions in script.\nV2 APIs Provide Metrics V2 query APIs since 8.0.0, including metadata, single/multiple values, heatmap, and sampled records metrics.\nextendtypeQuery{# Read metrics single value in the duration of required metricsreadMetricsValue(condition:MetricsCondition!,duration:Duration!):Long!# Read metrics single value in the duration of required metrics# NullableValue#isEmptyValue == true indicates no telemetry data rather than aggregated value is actually zero.readNullableMetricsValue(condition:MetricsCondition!,duration:Duration!):NullableValue!# Read time-series values in the duration of required metricsreadMetricsValues(condition:MetricsCondition!,duration:Duration!):MetricsValues!# Read entity list of required metrics and parent entity type.sortMetrics(condition:TopNCondition!,duration:Duration!):[SelectedRecord!]!# Read value in the given time duration, usually as a linear.# labels: the labels you need to query.readLabeledMetricsValues(condition:MetricsCondition!,labels:[String!]!,duration:Duration!):[MetricsValues!]!# Heatmap is bucket based value statistic result.readHeatMap(condition:MetricsCondition!,duration:Duration!):HeatMap# Deprecated since 9.3.0, replaced by readRecords defined in record.graphqls# Read the sampled records# TopNCondition#scope is not required.readSampledRecords(condition:TopNCondition!,duration:Duration!):[SelectedRecord!]!}V1 APIs 3 types of metrics can be queried. V1 APIs were introduced since 6.x. Now they are a shell to V2 APIs.\n Single value. Most default metrics are in single value. getValues and getLinearIntValues are suitable for this purpose. Multiple value. A metric defined in OAL includes multiple value calculations. Use getMultipleLinearIntValues to obtain all values. percentile is a typical multiple value function in OAL. Heatmap value. Read Heatmap in WIKI for details. thermodynamic is the only OAL function. Use getThermodynamic to get the values.  extendtypeQuery{getValues(metric:BatchMetricConditions!,duration:Duration!):IntValuesgetLinearIntValues(metric:MetricCondition!,duration:Duration!):IntValues# Query the type of metrics including multiple values, and format them as multiple lines.# The seq of these multiple lines base on the calculation func in OAL# Such as, should us this to query the result of func percentile(50,75,90,95,99) in OAL,# then five lines will be responded, p50 is the first element of return value.getMultipleLinearIntValues(metric:MetricCondition!,numOfLinear:Int!,duration:Duration!):[IntValues!]!getThermodynamic(metric:MetricCondition!,duration:Duration!):Thermodynamic}Aggregation Aggregation query means that the metrics data need a secondary aggregation at query stage, which causes the query interfaces to have some different arguments. A typical example of aggregation query is the TopN list of services. Metrics stream aggregation simply calculates the metrics values of each service, but the expected list requires ordering metrics data by their values.\nAggregation query is for single value metrics only.\n# The aggregation query is different with the metric query.# All aggregation queries require backend or/and storage do aggregation in query time.extendtypeQuery{# TopN is an aggregation query.getServiceTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getAllServiceInstanceTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getServiceInstanceTopN(serviceId:ID!,name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getAllEndpointTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getEndpointTopN(serviceId:ID!,name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!}Record Record is a general and abstract type for collected raw data. In the observability, traces and logs have specific and well-defined meanings, meanwhile, the general records represent other collected records. Such as sampled slow SQL statement, HTTP request raw data(request/response header/body)\nextendtypeQuery{# Query collected records with given metric name and parent entity conditions, and return in the requested order.readRecords(condition:RecordCondition!,duration:Duration!):[Record!]!}","excerpt":"Deprecated Query Protocol The following query services are deprecated since 9.5.0. All these queries …","ref":"/docs/main/latest/en/api/query-protocol-deprecated/","title":"Deprecated Query Protocol"},{"body":"Deprecated Query Protocol The following query services are deprecated since 9.5.0. All these queries are still available for the short term to keep compatibility.\nQuery protocol official repository, https://github.com/apache/skywalking-query-protocol.\nMetadata Metadata contains concise information on all services and their instances, endpoints, etc. under monitoring. You may query the metadata in different ways.\nV1 APIs V1 APIs were introduced since 6.x. Now they are a shell to V2 APIs since 9.0.0.\nextendtypeQuery{# Normal service related meta info getAllServices(duration:Duration!,group:String):[Service!]!searchServices(duration:Duration!,keyword:String!):[Service!]!searchService(serviceCode:String!):Service# Fetch all services of Browser typegetAllBrowserServices(duration:Duration!):[Service!]!searchBrowserServices(duration:Duration!,keyword:String!):[Service!]!searchBrowserService(serviceCode:String!):Service# Service instance querygetServiceInstances(duration:Duration!,serviceId:ID!):[ServiceInstance!]!# Endpoint query# Consider there are huge numbers of endpoint,# must use endpoint owner\u0026#39;s service id, keyword and limit filter to do query.searchEndpoint(keyword:String!,serviceId:ID!,limit:Int!):[Endpoint!]!# Database related meta info.getAllDatabases(duration:Duration!):[Database!]!}Metrics Metrics query targets all objects defined in OAL script and MAL. You may obtain the metrics data in linear or thermodynamic matrix formats based on the aggregation functions in script.\nV2 APIs Provide Metrics V2 query APIs since 8.0.0, including metadata, single/multiple values, heatmap, and sampled records metrics.\nextendtypeQuery{# Read metrics single value in the duration of required metricsreadMetricsValue(condition:MetricsCondition!,duration:Duration!):Long!# Read metrics single value in the duration of required metrics# NullableValue#isEmptyValue == true indicates no telemetry data rather than aggregated value is actually zero.readNullableMetricsValue(condition:MetricsCondition!,duration:Duration!):NullableValue!# Read time-series values in the duration of required metricsreadMetricsValues(condition:MetricsCondition!,duration:Duration!):MetricsValues!# Read entity list of required metrics and parent entity type.sortMetrics(condition:TopNCondition!,duration:Duration!):[SelectedRecord!]!# Read value in the given time duration, usually as a linear.# labels: the labels you need to query.readLabeledMetricsValues(condition:MetricsCondition!,labels:[String!]!,duration:Duration!):[MetricsValues!]!# Heatmap is bucket based value statistic result.readHeatMap(condition:MetricsCondition!,duration:Duration!):HeatMap# Deprecated since 9.3.0, replaced by readRecords defined in record.graphqls# Read the sampled records# TopNCondition#scope is not required.readSampledRecords(condition:TopNCondition!,duration:Duration!):[SelectedRecord!]!}V1 APIs 3 types of metrics can be queried. V1 APIs were introduced since 6.x. Now they are a shell to V2 APIs.\n Single value. Most default metrics are in single value. getValues and getLinearIntValues are suitable for this purpose. Multiple value. A metric defined in OAL includes multiple value calculations. Use getMultipleLinearIntValues to obtain all values. percentile is a typical multiple value function in OAL. Heatmap value. Read Heatmap in WIKI for details. thermodynamic is the only OAL function. Use getThermodynamic to get the values.  extendtypeQuery{getValues(metric:BatchMetricConditions!,duration:Duration!):IntValuesgetLinearIntValues(metric:MetricCondition!,duration:Duration!):IntValues# Query the type of metrics including multiple values, and format them as multiple lines.# The seq of these multiple lines base on the calculation func in OAL# Such as, should us this to query the result of func percentile(50,75,90,95,99) in OAL,# then five lines will be responded, p50 is the first element of return value.getMultipleLinearIntValues(metric:MetricCondition!,numOfLinear:Int!,duration:Duration!):[IntValues!]!getThermodynamic(metric:MetricCondition!,duration:Duration!):Thermodynamic}Aggregation Aggregation query means that the metrics data need a secondary aggregation at query stage, which causes the query interfaces to have some different arguments. A typical example of aggregation query is the TopN list of services. Metrics stream aggregation simply calculates the metrics values of each service, but the expected list requires ordering metrics data by their values.\nAggregation query is for single value metrics only.\n# The aggregation query is different with the metric query.# All aggregation queries require backend or/and storage do aggregation in query time.extendtypeQuery{# TopN is an aggregation query.getServiceTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getAllServiceInstanceTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getServiceInstanceTopN(serviceId:ID!,name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getAllEndpointTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getEndpointTopN(serviceId:ID!,name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!}Record Record is a general and abstract type for collected raw data. In the observability, traces and logs have specific and well-defined meanings, meanwhile, the general records represent other collected records. Such as sampled slow SQL statement, HTTP request raw data(request/response header/body)\nextendtypeQuery{# Query collected records with given metric name and parent entity conditions, and return in the requested order.readRecords(condition:RecordCondition!,duration:Duration!):[Record!]!}","excerpt":"Deprecated Query Protocol The following query services are deprecated since 9.5.0. All these queries …","ref":"/docs/main/next/en/api/query-protocol-deprecated/","title":"Deprecated Query Protocol"},{"body":"Deprecated Query Protocol The following query services are deprecated since 9.5.0. All these queries are still available for the short term to keep compatibility.\nQuery protocol official repository, https://github.com/apache/skywalking-query-protocol.\nMetrics Metrics query targets all objects defined in OAL script and MAL. You may obtain the metrics data in linear or thermodynamic matrix formats based on the aggregation functions in script.\nV2 APIs Provide Metrics V2 query APIs since 8.0.0, including metadata, single/multiple values, heatmap, and sampled records metrics.\nextendtypeQuery{# Read metrics single value in the duration of required metricsreadMetricsValue(condition:MetricsCondition!,duration:Duration!):Long!# Read metrics single value in the duration of required metrics# NullableValue#isEmptyValue == true indicates no telemetry data rather than aggregated value is actually zero.readNullableMetricsValue(condition:MetricsCondition!,duration:Duration!):NullableValue!# Read time-series values in the duration of required metricsreadMetricsValues(condition:MetricsCondition!,duration:Duration!):MetricsValues!# Read entity list of required metrics and parent entity type.sortMetrics(condition:TopNCondition!,duration:Duration!):[SelectedRecord!]!# Read value in the given time duration, usually as a linear.# labels: the labels you need to query.readLabeledMetricsValues(condition:MetricsCondition!,labels:[String!]!,duration:Duration!):[MetricsValues!]!# Heatmap is bucket based value statistic result.readHeatMap(condition:MetricsCondition!,duration:Duration!):HeatMap# Deprecated since 9.3.0, replaced by readRecords defined in record.graphqls# Read the sampled records# TopNCondition#scope is not required.readSampledRecords(condition:TopNCondition!,duration:Duration!):[SelectedRecord!]!}V1 APIs 3 types of metrics can be queried. V1 APIs were introduced since 6.x. Now they are a shell to V2 APIs.\n Single value. Most default metrics are in single value. getValues and getLinearIntValues are suitable for this purpose. Multiple value. A metric defined in OAL includes multiple value calculations. Use getMultipleLinearIntValues to obtain all values. percentile is a typical multiple value function in OAL. Heatmap value. Read Heatmap in WIKI for details. thermodynamic is the only OAL function. Use getThermodynamic to get the values.  extendtypeQuery{getValues(metric:BatchMetricConditions!,duration:Duration!):IntValuesgetLinearIntValues(metric:MetricCondition!,duration:Duration!):IntValues# Query the type of metrics including multiple values, and format them as multiple lines.# The seq of these multiple lines base on the calculation func in OAL# Such as, should us this to query the result of func percentile(50,75,90,95,99) in OAL,# then five lines will be responded, p50 is the first element of return value.getMultipleLinearIntValues(metric:MetricCondition!,numOfLinear:Int!,duration:Duration!):[IntValues!]!getThermodynamic(metric:MetricCondition!,duration:Duration!):Thermodynamic}Aggregation Aggregation query means that the metrics data need a secondary aggregation at query stage, which causes the query interfaces to have some different arguments. A typical example of aggregation query is the TopN list of services. Metrics stream aggregation simply calculates the metrics values of each service, but the expected list requires ordering metrics data by their values.\nAggregation query is for single value metrics only.\n# The aggregation query is different with the metric query.# All aggregation queries require backend or/and storage do aggregation in query time.extendtypeQuery{# TopN is an aggregation query.getServiceTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getAllServiceInstanceTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getServiceInstanceTopN(serviceId:ID!,name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getAllEndpointTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getEndpointTopN(serviceId:ID!,name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!}Record Record is a general and abstract type for collected raw data. In the observability, traces and logs have specific and well-defined meanings, meanwhile, the general records represent other collected records. Such as sampled slow SQL statement, HTTP request raw data(request/response header/body)\nextendtypeQuery{# Query collected records with given metric name and parent entity conditions, and return in the requested order.readRecords(condition:RecordCondition!,duration:Duration!):[Record!]!}","excerpt":"Deprecated Query Protocol The following query services are deprecated since 9.5.0. All these queries …","ref":"/docs/main/v9.5.0/en/api/query-protocol-deprecated/","title":"Deprecated Query Protocol"},{"body":"Deprecated Query Protocol The following query services are deprecated since 9.5.0. All these queries are still available for the short term to keep compatibility.\nQuery protocol official repository, https://github.com/apache/skywalking-query-protocol.\nMetrics Metrics query targets all objects defined in OAL script and MAL. You may obtain the metrics data in linear or thermodynamic matrix formats based on the aggregation functions in script.\nV2 APIs Provide Metrics V2 query APIs since 8.0.0, including metadata, single/multiple values, heatmap, and sampled records metrics.\nextendtypeQuery{# Read metrics single value in the duration of required metricsreadMetricsValue(condition:MetricsCondition!,duration:Duration!):Long!# Read metrics single value in the duration of required metrics# NullableValue#isEmptyValue == true indicates no telemetry data rather than aggregated value is actually zero.readNullableMetricsValue(condition:MetricsCondition!,duration:Duration!):NullableValue!# Read time-series values in the duration of required metricsreadMetricsValues(condition:MetricsCondition!,duration:Duration!):MetricsValues!# Read entity list of required metrics and parent entity type.sortMetrics(condition:TopNCondition!,duration:Duration!):[SelectedRecord!]!# Read value in the given time duration, usually as a linear.# labels: the labels you need to query.readLabeledMetricsValues(condition:MetricsCondition!,labels:[String!]!,duration:Duration!):[MetricsValues!]!# Heatmap is bucket based value statistic result.readHeatMap(condition:MetricsCondition!,duration:Duration!):HeatMap# Deprecated since 9.3.0, replaced by readRecords defined in record.graphqls# Read the sampled records# TopNCondition#scope is not required.readSampledRecords(condition:TopNCondition!,duration:Duration!):[SelectedRecord!]!}V1 APIs 3 types of metrics can be queried. V1 APIs were introduced since 6.x. Now they are a shell to V2 APIs.\n Single value. Most default metrics are in single value. getValues and getLinearIntValues are suitable for this purpose. Multiple value. A metric defined in OAL includes multiple value calculations. Use getMultipleLinearIntValues to obtain all values. percentile is a typical multiple value function in OAL. Heatmap value. Read Heatmap in WIKI for details. thermodynamic is the only OAL function. Use getThermodynamic to get the values.  extendtypeQuery{getValues(metric:BatchMetricConditions!,duration:Duration!):IntValuesgetLinearIntValues(metric:MetricCondition!,duration:Duration!):IntValues# Query the type of metrics including multiple values, and format them as multiple lines.# The seq of these multiple lines base on the calculation func in OAL# Such as, should us this to query the result of func percentile(50,75,90,95,99) in OAL,# then five lines will be responded, p50 is the first element of return value.getMultipleLinearIntValues(metric:MetricCondition!,numOfLinear:Int!,duration:Duration!):[IntValues!]!getThermodynamic(metric:MetricCondition!,duration:Duration!):Thermodynamic}Aggregation Aggregation query means that the metrics data need a secondary aggregation at query stage, which causes the query interfaces to have some different arguments. A typical example of aggregation query is the TopN list of services. Metrics stream aggregation simply calculates the metrics values of each service, but the expected list requires ordering metrics data by their values.\nAggregation query is for single value metrics only.\n# The aggregation query is different with the metric query.# All aggregation queries require backend or/and storage do aggregation in query time.extendtypeQuery{# TopN is an aggregation query.getServiceTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getAllServiceInstanceTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getServiceInstanceTopN(serviceId:ID!,name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getAllEndpointTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getEndpointTopN(serviceId:ID!,name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!}Record Record is a general and abstract type for collected raw data. In the observability, traces and logs have specific and well-defined meanings, meanwhile, the general records represent other collected records. Such as sampled slow SQL statement, HTTP request raw data(request/response header/body)\nextendtypeQuery{# Query collected records with given metric name and parent entity conditions, and return in the requested order.readRecords(condition:RecordCondition!,duration:Duration!):[Record!]!}","excerpt":"Deprecated Query Protocol The following query services are deprecated since 9.5.0. All these queries …","ref":"/docs/main/v9.6.0/en/api/query-protocol-deprecated/","title":"Deprecated Query Protocol"},{"body":"Deprecated Query Protocol The following query services are deprecated since 9.5.0. All these queries are still available for the short term to keep compatibility.\nQuery protocol official repository, https://github.com/apache/skywalking-query-protocol.\nMetrics Metrics query targets all objects defined in OAL script and MAL. You may obtain the metrics data in linear or thermodynamic matrix formats based on the aggregation functions in script.\nV2 APIs Provide Metrics V2 query APIs since 8.0.0, including metadata, single/multiple values, heatmap, and sampled records metrics.\nextendtypeQuery{# Read metrics single value in the duration of required metricsreadMetricsValue(condition:MetricsCondition!,duration:Duration!):Long!# Read metrics single value in the duration of required metrics# NullableValue#isEmptyValue == true indicates no telemetry data rather than aggregated value is actually zero.readNullableMetricsValue(condition:MetricsCondition!,duration:Duration!):NullableValue!# Read time-series values in the duration of required metricsreadMetricsValues(condition:MetricsCondition!,duration:Duration!):MetricsValues!# Read entity list of required metrics and parent entity type.sortMetrics(condition:TopNCondition!,duration:Duration!):[SelectedRecord!]!# Read value in the given time duration, usually as a linear.# labels: the labels you need to query.readLabeledMetricsValues(condition:MetricsCondition!,labels:[String!]!,duration:Duration!):[MetricsValues!]!# Heatmap is bucket based value statistic result.readHeatMap(condition:MetricsCondition!,duration:Duration!):HeatMap# Deprecated since 9.3.0, replaced by readRecords defined in record.graphqls# Read the sampled records# TopNCondition#scope is not required.readSampledRecords(condition:TopNCondition!,duration:Duration!):[SelectedRecord!]!}V1 APIs 3 types of metrics can be queried. V1 APIs were introduced since 6.x. Now they are a shell to V2 APIs.\n Single value. Most default metrics are in single value. getValues and getLinearIntValues are suitable for this purpose. Multiple value. A metric defined in OAL includes multiple value calculations. Use getMultipleLinearIntValues to obtain all values. percentile is a typical multiple value function in OAL. Heatmap value. Read Heatmap in WIKI for details. thermodynamic is the only OAL function. Use getThermodynamic to get the values.  extendtypeQuery{getValues(metric:BatchMetricConditions!,duration:Duration!):IntValuesgetLinearIntValues(metric:MetricCondition!,duration:Duration!):IntValues# Query the type of metrics including multiple values, and format them as multiple lines.# The seq of these multiple lines base on the calculation func in OAL# Such as, should us this to query the result of func percentile(50,75,90,95,99) in OAL,# then five lines will be responded, p50 is the first element of return value.getMultipleLinearIntValues(metric:MetricCondition!,numOfLinear:Int!,duration:Duration!):[IntValues!]!getThermodynamic(metric:MetricCondition!,duration:Duration!):Thermodynamic}Aggregation Aggregation query means that the metrics data need a secondary aggregation at query stage, which causes the query interfaces to have some different arguments. A typical example of aggregation query is the TopN list of services. Metrics stream aggregation simply calculates the metrics values of each service, but the expected list requires ordering metrics data by their values.\nAggregation query is for single value metrics only.\n# The aggregation query is different with the metric query.# All aggregation queries require backend or/and storage do aggregation in query time.extendtypeQuery{# TopN is an aggregation query.getServiceTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getAllServiceInstanceTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getServiceInstanceTopN(serviceId:ID!,name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getAllEndpointTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getEndpointTopN(serviceId:ID!,name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!}Record Record is a general and abstract type for collected raw data. In the observability, traces and logs have specific and well-defined meanings, meanwhile, the general records represent other collected records. Such as sampled slow SQL statement, HTTP request raw data(request/response header/body)\nextendtypeQuery{# Query collected records with given metric name and parent entity conditions, and return in the requested order.readRecords(condition:RecordCondition!,duration:Duration!):[Record!]!}","excerpt":"Deprecated Query Protocol The following query services are deprecated since 9.5.0. All these queries …","ref":"/docs/main/v9.7.0/en/api/query-protocol-deprecated/","title":"Deprecated Query Protocol"},{"body":"Design The mmap-queue is a big, fast, and persistent queue based on the memory-mapped files. One mmap-queue has a directory to store the whole data. The queue directory is made up of many segments and 1 metafile. This is originally implemented by bigqueue project, we changed it a little for fitting the Satellite project requirements.\n Segment: Segment is the real data store center, that provides large-space storage and does not reduce read and write performance as much as possible by using mmap. And we will avoid deleting files by reusing them. Meta: The purpose of meta is to find the data that the consumer needs.  Meta Metadata only needs 80B to store the Metadata for the pipe. But for memory alignment, it takes at least one memory page size, which is generally 4K.\n[ 8Bit ][ 8Bit ][ 8Bit ][ 8Bit ][ 8Bit ][ 8Bit ][ 8Bit ][ 8Bit ][ 8Bit ][ 8Bit ] [metaVersion][ ID ][ offset][ ID ][ offset][ ID ][ offset][ ID ][ offset][capacity] [metaVersion][writing offset][watermark offset][committed offset][reading offset][capacity] Transforming BenchmarkTest Test machine: macbook pro 2018\nModel Name:\tMacBook Pro Model Identifier:\tMacBookPro15,1 Processor Name:\t6-Core Intel Core i7 Processor Speed:\t2.2 GHz Number of Processors:\t1 Total Number of Cores:\t6 L2 Cache (per Core):\t256 KB L3 Cache:\t9 MB Hyper-Threading Technology:\tEnabled Memory:\t16 GB System Firmware Version:\t1554.60.15.0.0 (iBridge: 18.16.13030.0.0,0 push operation goos: darwin goarch: amd64 pkg: github.com/apache/skywalking-satellite/plugins/queue/mmap BenchmarkEnqueue BenchmarkEnqueue/segmentSize:_128KB_maxInMemSegments:18_message:8KB_queueCapacity:10000 27585\t43559 ns/op\t9889 B/op\t9 allocs/op BenchmarkEnqueue/segmentSize:_256KB_maxInMemSegments:10_message:8KB_queueCapacity:10000 39326\t31773 ns/op\t9840 B/op\t9 allocs/op BenchmarkEnqueue/segmentSize:_512KB_maxInMemSegments:6_message:8KB_queueCapacity:10000 56770\t22990 ns/op\t9816 B/op\t9 allocs/op BenchmarkEnqueue/segmentSize:_256KB_maxInMemSegments:20_message:8KB_queueCapacity:10000 43803\t29778 ns/op\t9840 B/op\t9 allocs/op BenchmarkEnqueue/segmentSize:_128KB_maxInMemSegments:10_message:16KB_queueCapacity:10000 16870\t80576 ns/op\t18944 B/op\t10 allocs/op BenchmarkEnqueue/segmentSize:_128KB_maxInMemSegments:10_message:8KB_queueCapacity:100000 36922\t39085 ns/op\t9889 B/op\t9 allocs/op PASS push and pop operation goos: darwin goarch: amd64 pkg: github.com/apache/skywalking-satellite/plugins/queue/mmap BenchmarkEnqueueAndDequeue BenchmarkEnqueueAndDequeue/segmentSize:_128KB_maxInMemSegments:18_message:8KB_queueCapacity:10000 21030\t60728 ns/op\t28774 B/op\t42 allocs/op BenchmarkEnqueueAndDequeue/segmentSize:_256KB_maxInMemSegments:10_message:8KB_queueCapacity:10000 30327\t41274 ns/op\t28726 B/op\t42 allocs/op BenchmarkEnqueueAndDequeue/segmentSize:_512KB_maxInMemSegments:6_message:8KB_queueCapacity:10000 32738\t37923 ns/op\t28700 B/op\t42 allocs/op BenchmarkEnqueueAndDequeue/segmentSize:_256KB_maxInMemSegments:20_message:8KB_queueCapacity:10000 28209\t41169 ns/op\t28726 B/op\t42 allocs/op BenchmarkEnqueueAndDequeue/segmentSize:_128KB_maxInMemSegments:10_message:16KB_queueCapacity:10000 14677\t89637 ns/op\t54981 B/op\t43 allocs/op BenchmarkEnqueueAndDequeue/segmentSize:_128KB_maxInMemSegments:10_message:8KB_queueCapacity:100000 22228\t54963 ns/op\t28774 B/op\t42 allocs/op PASS ","excerpt":"Design The mmap-queue is a big, fast, and persistent queue based on the memory-mapped files. One …","ref":"/docs/skywalking-satellite/latest/en/concepts-and-designs/mmap-queue/","title":"Design"},{"body":"Design The mmap-queue is a big, fast, and persistent queue based on the memory-mapped files. One mmap-queue has a directory to store the whole data. The queue directory is made up of many segments and 1 metafile. This is originally implemented by bigqueue project, we changed it a little for fitting the Satellite project requirements.\n Segment: Segment is the real data store center, that provides large-space storage and does not reduce read and write performance as much as possible by using mmap. And we will avoid deleting files by reusing them. Meta: The purpose of meta is to find the data that the consumer needs.  Meta Metadata only needs 80B to store the Metadata for the pipe. But for memory alignment, it takes at least one memory page size, which is generally 4K.\n[ 8Bit ][ 8Bit ][ 8Bit ][ 8Bit ][ 8Bit ][ 8Bit ][ 8Bit ][ 8Bit ][ 8Bit ][ 8Bit ] [metaVersion][ ID ][ offset][ ID ][ offset][ ID ][ offset][ ID ][ offset][capacity] [metaVersion][writing offset][watermark offset][committed offset][reading offset][capacity] Transforming BenchmarkTest Test machine: macbook pro 2018\nModel Name:\tMacBook Pro Model Identifier:\tMacBookPro15,1 Processor Name:\t6-Core Intel Core i7 Processor Speed:\t2.2 GHz Number of Processors:\t1 Total Number of Cores:\t6 L2 Cache (per Core):\t256 KB L3 Cache:\t9 MB Hyper-Threading Technology:\tEnabled Memory:\t16 GB System Firmware Version:\t1554.60.15.0.0 (iBridge: 18.16.13030.0.0,0 push operation goos: darwin goarch: amd64 pkg: github.com/apache/skywalking-satellite/plugins/queue/mmap BenchmarkEnqueue BenchmarkEnqueue/segmentSize:_128KB_maxInMemSegments:18_message:8KB_queueCapacity:10000 27585\t43559 ns/op\t9889 B/op\t9 allocs/op BenchmarkEnqueue/segmentSize:_256KB_maxInMemSegments:10_message:8KB_queueCapacity:10000 39326\t31773 ns/op\t9840 B/op\t9 allocs/op BenchmarkEnqueue/segmentSize:_512KB_maxInMemSegments:6_message:8KB_queueCapacity:10000 56770\t22990 ns/op\t9816 B/op\t9 allocs/op BenchmarkEnqueue/segmentSize:_256KB_maxInMemSegments:20_message:8KB_queueCapacity:10000 43803\t29778 ns/op\t9840 B/op\t9 allocs/op BenchmarkEnqueue/segmentSize:_128KB_maxInMemSegments:10_message:16KB_queueCapacity:10000 16870\t80576 ns/op\t18944 B/op\t10 allocs/op BenchmarkEnqueue/segmentSize:_128KB_maxInMemSegments:10_message:8KB_queueCapacity:100000 36922\t39085 ns/op\t9889 B/op\t9 allocs/op PASS push and pop operation goos: darwin goarch: amd64 pkg: github.com/apache/skywalking-satellite/plugins/queue/mmap BenchmarkEnqueueAndDequeue BenchmarkEnqueueAndDequeue/segmentSize:_128KB_maxInMemSegments:18_message:8KB_queueCapacity:10000 21030\t60728 ns/op\t28774 B/op\t42 allocs/op BenchmarkEnqueueAndDequeue/segmentSize:_256KB_maxInMemSegments:10_message:8KB_queueCapacity:10000 30327\t41274 ns/op\t28726 B/op\t42 allocs/op BenchmarkEnqueueAndDequeue/segmentSize:_512KB_maxInMemSegments:6_message:8KB_queueCapacity:10000 32738\t37923 ns/op\t28700 B/op\t42 allocs/op BenchmarkEnqueueAndDequeue/segmentSize:_256KB_maxInMemSegments:20_message:8KB_queueCapacity:10000 28209\t41169 ns/op\t28726 B/op\t42 allocs/op BenchmarkEnqueueAndDequeue/segmentSize:_128KB_maxInMemSegments:10_message:16KB_queueCapacity:10000 14677\t89637 ns/op\t54981 B/op\t43 allocs/op BenchmarkEnqueueAndDequeue/segmentSize:_128KB_maxInMemSegments:10_message:8KB_queueCapacity:100000 22228\t54963 ns/op\t28774 B/op\t42 allocs/op PASS ","excerpt":"Design The mmap-queue is a big, fast, and persistent queue based on the memory-mapped files. One …","ref":"/docs/skywalking-satellite/next/en/concepts-and-designs/mmap-queue/","title":"Design"},{"body":"Design The mmap-queue is a big, fast, and persistent queue based on the memory-mapped files. One mmap-queue has a directory to store the whole data. The queue directory is made up of many segments and 1 metafile. This is originally implemented by bigqueue project, we changed it a little for fitting the Satellite project requirements.\n Segment: Segment is the real data store center, that provides large-space storage and does not reduce read and write performance as much as possible by using mmap. And we will avoid deleting files by reusing them. Meta: The purpose of meta is to find the data that the consumer needs.  Meta Metadata only needs 80B to store the Metadata for the pipe. But for memory alignment, it takes at least one memory page size, which is generally 4K.\n[ 8Bit ][ 8Bit ][ 8Bit ][ 8Bit ][ 8Bit ][ 8Bit ][ 8Bit ][ 8Bit ][ 8Bit ][ 8Bit ] [metaVersion][ ID ][ offset][ ID ][ offset][ ID ][ offset][ ID ][ offset][capacity] [metaVersion][writing offset][watermark offset][committed offset][reading offset][capacity] Transforming BenchmarkTest Test machine: macbook pro 2018\nModel Name:\tMacBook Pro Model Identifier:\tMacBookPro15,1 Processor Name:\t6-Core Intel Core i7 Processor Speed:\t2.2 GHz Number of Processors:\t1 Total Number of Cores:\t6 L2 Cache (per Core):\t256 KB L3 Cache:\t9 MB Hyper-Threading Technology:\tEnabled Memory:\t16 GB System Firmware Version:\t1554.60.15.0.0 (iBridge: 18.16.13030.0.0,0 push operation goos: darwin goarch: amd64 pkg: github.com/apache/skywalking-satellite/plugins/queue/mmap BenchmarkEnqueue BenchmarkEnqueue/segmentSize:_128KB_maxInMemSegments:18_message:8KB_queueCapacity:10000 27585\t43559 ns/op\t9889 B/op\t9 allocs/op BenchmarkEnqueue/segmentSize:_256KB_maxInMemSegments:10_message:8KB_queueCapacity:10000 39326\t31773 ns/op\t9840 B/op\t9 allocs/op BenchmarkEnqueue/segmentSize:_512KB_maxInMemSegments:6_message:8KB_queueCapacity:10000 56770\t22990 ns/op\t9816 B/op\t9 allocs/op BenchmarkEnqueue/segmentSize:_256KB_maxInMemSegments:20_message:8KB_queueCapacity:10000 43803\t29778 ns/op\t9840 B/op\t9 allocs/op BenchmarkEnqueue/segmentSize:_128KB_maxInMemSegments:10_message:16KB_queueCapacity:10000 16870\t80576 ns/op\t18944 B/op\t10 allocs/op BenchmarkEnqueue/segmentSize:_128KB_maxInMemSegments:10_message:8KB_queueCapacity:100000 36922\t39085 ns/op\t9889 B/op\t9 allocs/op PASS push and pop operation goos: darwin goarch: amd64 pkg: github.com/apache/skywalking-satellite/plugins/queue/mmap BenchmarkEnqueueAndDequeue BenchmarkEnqueueAndDequeue/segmentSize:_128KB_maxInMemSegments:18_message:8KB_queueCapacity:10000 21030\t60728 ns/op\t28774 B/op\t42 allocs/op BenchmarkEnqueueAndDequeue/segmentSize:_256KB_maxInMemSegments:10_message:8KB_queueCapacity:10000 30327\t41274 ns/op\t28726 B/op\t42 allocs/op BenchmarkEnqueueAndDequeue/segmentSize:_512KB_maxInMemSegments:6_message:8KB_queueCapacity:10000 32738\t37923 ns/op\t28700 B/op\t42 allocs/op BenchmarkEnqueueAndDequeue/segmentSize:_256KB_maxInMemSegments:20_message:8KB_queueCapacity:10000 28209\t41169 ns/op\t28726 B/op\t42 allocs/op BenchmarkEnqueueAndDequeue/segmentSize:_128KB_maxInMemSegments:10_message:16KB_queueCapacity:10000 14677\t89637 ns/op\t54981 B/op\t43 allocs/op BenchmarkEnqueueAndDequeue/segmentSize:_128KB_maxInMemSegments:10_message:8KB_queueCapacity:100000 22228\t54963 ns/op\t28774 B/op\t42 allocs/op PASS ","excerpt":"Design The mmap-queue is a big, fast, and persistent queue based on the memory-mapped files. One …","ref":"/docs/skywalking-satellite/v1.2.0/en/concepts-and-designs/mmap-queue/","title":"Design"},{"body":"Design Goals This document outlines the core design goals for the SkyWalking project.\n  Maintaining Observability. Regardless of the deployment method of the target system, SkyWalking provides an integration solution for it to maintain observability. Based on this, SkyWalking provides multiple runtime forms and probes.\n  Topology, Metrics and Trace Together. The first step to understanding a distributed system is the topology map. It visualizes the entire complex system in an easy-to-read layout. Under the topology, the OSS personnel have higher requirements in terms of the metrics for service, instance, endpoint and calls. Traces are in the form of detailed logs to make sense of those metrics. For example, when the endpoint latency becomes long, you want to see the slowest the trace to find out why. So you can see, they are from big picture to details, they are all needed. SkyWalking integrates and provides a lot of features to make this possible and easy understand.\n  Light Weight. There two parts of light weight are needed. (1) In probe, we just depend on network communication framework, prefer gRPC. By that, the probe should be as small as possible, to avoid the library conflicts and the payload of VM, such as permsize requirement in JVM. (2) As an observability platform, it is secondary and third level system in your project environment. So we are using our own light weight framework to build the backend core. Then you don\u0026rsquo;t need to deploy big data tech platform and maintain them. SkyWalking should be simple in tech stack.\n  Pluggable. SkyWalking core team provides many default implementations, but definitely it is not enough, and also don\u0026rsquo;t fit every scenario. So, we provide a lot of features for being pluggable.\n  Portability. SkyWalking can run in multiple environments, including: (1) Use traditional register center like eureka. (2) Use RPC framework including service discovery, like Spring Cloud, Apache Dubbo. (3) Use Service Mesh in modern infrastructure. (4) Use cloud services. (5) Across cloud deployment. SkyWalking should run well in all of these cases.\n  Interoperability. The observability landscape is so vast that it is virtually impossible for SkyWalking to support all systems, even with the support of its community. Currently, it supports interoperability with other OSS systems, especially probes, such as Zipkin, Jaeger, and OpenTelemetry. It is very important to end users that SkyWalking has the ability to accept and read these data formats, since the users are not required to switch their libraries.\n  What is next?  See probe Introduction to learn about SkyWalking\u0026rsquo;s probe groups. From backend overview, you can understand what the backend does after it receives probe data.  ","excerpt":"Design Goals This document outlines the core design goals for the SkyWalking project.\n  Maintaining …","ref":"/docs/main/latest/en/concepts-and-designs/project-goals/","title":"Design Goals"},{"body":"Design Goals This document outlines the core design goals for the SkyWalking project.\n  Maintaining Observability. Regardless of the deployment method of the target system, SkyWalking provides an integration solution for it to maintain observability. Based on this, SkyWalking provides multiple runtime forms and probes.\n  Topology, Metrics and Trace Together. The first step to understanding a distributed system is the topology map. It visualizes the entire complex system in an easy-to-read layout. Under the topology, the OSS personnel have higher requirements in terms of the metrics for service, instance, endpoint and calls. Traces are in the form of detailed logs to make sense of those metrics. For example, when the endpoint latency becomes long, you want to see the slowest the trace to find out why. So you can see, they are from big picture to details, they are all needed. SkyWalking integrates and provides a lot of features to make this possible and easy understand.\n  Light Weight. There two parts of light weight are needed. (1) In probe, we just depend on network communication framework, prefer gRPC. By that, the probe should be as small as possible, to avoid the library conflicts and the payload of VM, such as permsize requirement in JVM. (2) As an observability platform, it is secondary and third level system in your project environment. So we are using our own light weight framework to build the backend core. Then you don\u0026rsquo;t need to deploy big data tech platform and maintain them. SkyWalking should be simple in tech stack.\n  Pluggable. SkyWalking core team provides many default implementations, but definitely it is not enough, and also don\u0026rsquo;t fit every scenario. So, we provide a lot of features for being pluggable.\n  Portability. SkyWalking can run in multiple environments, including: (1) Use traditional register center like eureka. (2) Use RPC framework including service discovery, like Spring Cloud, Apache Dubbo. (3) Use Service Mesh in modern infrastructure. (4) Use cloud services. (5) Across cloud deployment. SkyWalking should run well in all of these cases.\n  Interoperability. The observability landscape is so vast that it is virtually impossible for SkyWalking to support all systems, even with the support of its community. Currently, it supports interoperability with other OSS systems, especially probes, such as Zipkin, Jaeger, and OpenTelemetry. It is very important to end users that SkyWalking has the ability to accept and read these data formats, since the users are not required to switch their libraries.\n  What is next?  See probe Introduction to learn about SkyWalking\u0026rsquo;s probe groups. From backend overview, you can understand what the backend does after it receives probe data.  ","excerpt":"Design Goals This document outlines the core design goals for the SkyWalking project.\n  Maintaining …","ref":"/docs/main/next/en/concepts-and-designs/project-goals/","title":"Design Goals"},{"body":"Design Goals This document outlines the core design goals for the SkyWalking project.\n  Maintaining Observability. Regardless of the deployment method of the target system, SkyWalking provides an integration solution for it to maintain observability. Based on this, SkyWalking provides multiple runtime forms and probes.\n  Topology, Metrics and Trace Together. The first step to understanding a distributed system is the topology map. It visualizes the entire complex system in an easy-to-read layout. Under the topology, the OSS personnel have higher requirements in terms of the metrics for service, instance, endpoint and calls. Traces are in the form of detailed logs to make sense of those metrics. For example, when the endpoint latency becomes long, you want to see the slowest the trace to find out why. So you can see, they are from big picture to details, they are all needed. SkyWalking integrates and provides a lot of features to make this possible and easy understand.\n  Light Weight. There two parts of light weight are needed. (1) In probe, we just depend on network communication framework, prefer gRPC. By that, the probe should be as small as possible, to avoid the library conflicts and the payload of VM, such as permsize requirement in JVM. (2) As an observability platform, it is secondary and third level system in your project environment. So we are using our own light weight framework to build the backend core. Then you don\u0026rsquo;t need to deploy big data tech platform and maintain them. SkyWalking should be simple in tech stack.\n  Pluggable. SkyWalking core team provides many default implementations, but definitely it is not enough, and also don\u0026rsquo;t fit every scenario. So, we provide a lot of features for being pluggable.\n  Portability. SkyWalking can run in multiple environments, including: (1) Use traditional register center like eureka. (2) Use RPC framework including service discovery, like Spring Cloud, Apache Dubbo. (3) Use Service Mesh in modern infrastructure. (4) Use cloud services. (5) Across cloud deployment. SkyWalking should run well in all of these cases.\n  Interoperability. The observability landscape is so vast that it is virtually impossible for SkyWalking to support all systems, even with the support of its community. Currently, it supports interoperability with other OSS systems, especially probes, such as Zipkin, Jaeger, OpenTracing, and OpenCensus. It is very important to end users that SkyWalking has the ability to accept and read these data formats, since the users are not required to switch their libraries.\n  What is next?  See probe Introduction to learn about SkyWalking\u0026rsquo;s probe groups. From backend overview, you can understand what the backend does after it receives probe data.  ","excerpt":"Design Goals This document outlines the core design goals for the SkyWalking project.\n  Maintaining …","ref":"/docs/main/v9.0.0/en/concepts-and-designs/project-goals/","title":"Design Goals"},{"body":"Design Goals This document outlines the core design goals for the SkyWalking project.\n  Maintaining Observability. Regardless of the deployment method of the target system, SkyWalking provides an integration solution for it to maintain observability. Based on this, SkyWalking provides multiple runtime forms and probes.\n  Topology, Metrics and Trace Together. The first step to understanding a distributed system is the topology map. It visualizes the entire complex system in an easy-to-read layout. Under the topology, the OSS personnel have higher requirements in terms of the metrics for service, instance, endpoint and calls. Traces are in the form of detailed logs to make sense of those metrics. For example, when the endpoint latency becomes long, you want to see the slowest the trace to find out why. So you can see, they are from big picture to details, they are all needed. SkyWalking integrates and provides a lot of features to make this possible and easy understand.\n  Light Weight. There two parts of light weight are needed. (1) In probe, we just depend on network communication framework, prefer gRPC. By that, the probe should be as small as possible, to avoid the library conflicts and the payload of VM, such as permsize requirement in JVM. (2) As an observability platform, it is secondary and third level system in your project environment. So we are using our own light weight framework to build the backend core. Then you don\u0026rsquo;t need to deploy big data tech platform and maintain them. SkyWalking should be simple in tech stack.\n  Pluggable. SkyWalking core team provides many default implementations, but definitely it is not enough, and also don\u0026rsquo;t fit every scenario. So, we provide a lot of features for being pluggable.\n  Portability. SkyWalking can run in multiple environments, including: (1) Use traditional register center like eureka. (2) Use RPC framework including service discovery, like Spring Cloud, Apache Dubbo. (3) Use Service Mesh in modern infrastructure. (4) Use cloud services. (5) Across cloud deployment. SkyWalking should run well in all of these cases.\n  Interoperability. The observability landscape is so vast that it is virtually impossible for SkyWalking to support all systems, even with the support of its community. Currently, it supports interoperability with other OSS systems, especially probes, such as Zipkin, Jaeger, OpenTracing, and OpenCensus. It is very important to end users that SkyWalking has the ability to accept and read these data formats, since the users are not required to switch their libraries.\n  What is next?  See probe Introduction to learn about SkyWalking\u0026rsquo;s probe groups. From backend overview, you can understand what the backend does after it receives probe data.  ","excerpt":"Design Goals This document outlines the core design goals for the SkyWalking project.\n  Maintaining …","ref":"/docs/main/v9.1.0/en/concepts-and-designs/project-goals/","title":"Design Goals"},{"body":"Design Goals This document outlines the core design goals for the SkyWalking project.\n  Maintaining Observability. Regardless of the deployment method of the target system, SkyWalking provides an integration solution for it to maintain observability. Based on this, SkyWalking provides multiple runtime forms and probes.\n  Topology, Metrics and Trace Together. The first step to understanding a distributed system is the topology map. It visualizes the entire complex system in an easy-to-read layout. Under the topology, the OSS personnel have higher requirements in terms of the metrics for service, instance, endpoint and calls. Traces are in the form of detailed logs to make sense of those metrics. For example, when the endpoint latency becomes long, you want to see the slowest the trace to find out why. So you can see, they are from big picture to details, they are all needed. SkyWalking integrates and provides a lot of features to make this possible and easy understand.\n  Light Weight. There two parts of light weight are needed. (1) In probe, we just depend on network communication framework, prefer gRPC. By that, the probe should be as small as possible, to avoid the library conflicts and the payload of VM, such as permsize requirement in JVM. (2) As an observability platform, it is secondary and third level system in your project environment. So we are using our own light weight framework to build the backend core. Then you don\u0026rsquo;t need to deploy big data tech platform and maintain them. SkyWalking should be simple in tech stack.\n  Pluggable. SkyWalking core team provides many default implementations, but definitely it is not enough, and also don\u0026rsquo;t fit every scenario. So, we provide a lot of features for being pluggable.\n  Portability. SkyWalking can run in multiple environments, including: (1) Use traditional register center like eureka. (2) Use RPC framework including service discovery, like Spring Cloud, Apache Dubbo. (3) Use Service Mesh in modern infrastructure. (4) Use cloud services. (5) Across cloud deployment. SkyWalking should run well in all of these cases.\n  Interoperability. The observability landscape is so vast that it is virtually impossible for SkyWalking to support all systems, even with the support of its community. Currently, it supports interoperability with other OSS systems, especially probes, such as Zipkin, Jaeger, OpenTracing, and OpenCensus. It is very important to end users that SkyWalking has the ability to accept and read these data formats, since the users are not required to switch their libraries.\n  What is next?  See probe Introduction to learn about SkyWalking\u0026rsquo;s probe groups. From backend overview, you can understand what the backend does after it receives probe data.  ","excerpt":"Design Goals This document outlines the core design goals for the SkyWalking project.\n  Maintaining …","ref":"/docs/main/v9.2.0/en/concepts-and-designs/project-goals/","title":"Design Goals"},{"body":"Design Goals This document outlines the core design goals for the SkyWalking project.\n  Maintaining Observability. Regardless of the deployment method of the target system, SkyWalking provides an integration solution for it to maintain observability. Based on this, SkyWalking provides multiple runtime forms and probes.\n  Topology, Metrics and Trace Together. The first step to understanding a distributed system is the topology map. It visualizes the entire complex system in an easy-to-read layout. Under the topology, the OSS personnel have higher requirements in terms of the metrics for service, instance, endpoint and calls. Traces are in the form of detailed logs to make sense of those metrics. For example, when the endpoint latency becomes long, you want to see the slowest the trace to find out why. So you can see, they are from big picture to details, they are all needed. SkyWalking integrates and provides a lot of features to make this possible and easy understand.\n  Light Weight. There two parts of light weight are needed. (1) In probe, we just depend on network communication framework, prefer gRPC. By that, the probe should be as small as possible, to avoid the library conflicts and the payload of VM, such as permsize requirement in JVM. (2) As an observability platform, it is secondary and third level system in your project environment. So we are using our own light weight framework to build the backend core. Then you don\u0026rsquo;t need to deploy big data tech platform and maintain them. SkyWalking should be simple in tech stack.\n  Pluggable. SkyWalking core team provides many default implementations, but definitely it is not enough, and also don\u0026rsquo;t fit every scenario. So, we provide a lot of features for being pluggable.\n  Portability. SkyWalking can run in multiple environments, including: (1) Use traditional register center like eureka. (2) Use RPC framework including service discovery, like Spring Cloud, Apache Dubbo. (3) Use Service Mesh in modern infrastructure. (4) Use cloud services. (5) Across cloud deployment. SkyWalking should run well in all of these cases.\n  Interoperability. The observability landscape is so vast that it is virtually impossible for SkyWalking to support all systems, even with the support of its community. Currently, it supports interoperability with other OSS systems, especially probes, such as Zipkin, Jaeger, OpenTracing, and OpenCensus. It is very important to end users that SkyWalking has the ability to accept and read these data formats, since the users are not required to switch their libraries.\n  What is next?  See probe Introduction to learn about SkyWalking\u0026rsquo;s probe groups. From backend overview, you can understand what the backend does after it receives probe data.  ","excerpt":"Design Goals This document outlines the core design goals for the SkyWalking project.\n  Maintaining …","ref":"/docs/main/v9.3.0/en/concepts-and-designs/project-goals/","title":"Design Goals"},{"body":"Design Goals This document outlines the core design goals for the SkyWalking project.\n  Maintaining Observability. Regardless of the deployment method of the target system, SkyWalking provides an integration solution for it to maintain observability. Based on this, SkyWalking provides multiple runtime forms and probes.\n  Topology, Metrics and Trace Together. The first step to understanding a distributed system is the topology map. It visualizes the entire complex system in an easy-to-read layout. Under the topology, the OSS personnel have higher requirements in terms of the metrics for service, instance, endpoint and calls. Traces are in the form of detailed logs to make sense of those metrics. For example, when the endpoint latency becomes long, you want to see the slowest the trace to find out why. So you can see, they are from big picture to details, they are all needed. SkyWalking integrates and provides a lot of features to make this possible and easy understand.\n  Light Weight. There two parts of light weight are needed. (1) In probe, we just depend on network communication framework, prefer gRPC. By that, the probe should be as small as possible, to avoid the library conflicts and the payload of VM, such as permsize requirement in JVM. (2) As an observability platform, it is secondary and third level system in your project environment. So we are using our own light weight framework to build the backend core. Then you don\u0026rsquo;t need to deploy big data tech platform and maintain them. SkyWalking should be simple in tech stack.\n  Pluggable. SkyWalking core team provides many default implementations, but definitely it is not enough, and also don\u0026rsquo;t fit every scenario. So, we provide a lot of features for being pluggable.\n  Portability. SkyWalking can run in multiple environments, including: (1) Use traditional register center like eureka. (2) Use RPC framework including service discovery, like Spring Cloud, Apache Dubbo. (3) Use Service Mesh in modern infrastructure. (4) Use cloud services. (5) Across cloud deployment. SkyWalking should run well in all of these cases.\n  Interoperability. The observability landscape is so vast that it is virtually impossible for SkyWalking to support all systems, even with the support of its community. Currently, it supports interoperability with other OSS systems, especially probes, such as Zipkin, Jaeger, OpenTracing, and OpenCensus. It is very important to end users that SkyWalking has the ability to accept and read these data formats, since the users are not required to switch their libraries.\n  What is next?  See probe Introduction to learn about SkyWalking\u0026rsquo;s probe groups. From backend overview, you can understand what the backend does after it receives probe data.  ","excerpt":"Design Goals This document outlines the core design goals for the SkyWalking project.\n  Maintaining …","ref":"/docs/main/v9.4.0/en/concepts-and-designs/project-goals/","title":"Design Goals"},{"body":"Design Goals This document outlines the core design goals for the SkyWalking project.\n  Maintaining Observability. Regardless of the deployment method of the target system, SkyWalking provides an integration solution for it to maintain observability. Based on this, SkyWalking provides multiple runtime forms and probes.\n  Topology, Metrics and Trace Together. The first step to understanding a distributed system is the topology map. It visualizes the entire complex system in an easy-to-read layout. Under the topology, the OSS personnel have higher requirements in terms of the metrics for service, instance, endpoint and calls. Traces are in the form of detailed logs to make sense of those metrics. For example, when the endpoint latency becomes long, you want to see the slowest the trace to find out why. So you can see, they are from big picture to details, they are all needed. SkyWalking integrates and provides a lot of features to make this possible and easy understand.\n  Light Weight. There two parts of light weight are needed. (1) In probe, we just depend on network communication framework, prefer gRPC. By that, the probe should be as small as possible, to avoid the library conflicts and the payload of VM, such as permsize requirement in JVM. (2) As an observability platform, it is secondary and third level system in your project environment. So we are using our own light weight framework to build the backend core. Then you don\u0026rsquo;t need to deploy big data tech platform and maintain them. SkyWalking should be simple in tech stack.\n  Pluggable. SkyWalking core team provides many default implementations, but definitely it is not enough, and also don\u0026rsquo;t fit every scenario. So, we provide a lot of features for being pluggable.\n  Portability. SkyWalking can run in multiple environments, including: (1) Use traditional register center like eureka. (2) Use RPC framework including service discovery, like Spring Cloud, Apache Dubbo. (3) Use Service Mesh in modern infrastructure. (4) Use cloud services. (5) Across cloud deployment. SkyWalking should run well in all of these cases.\n  Interoperability. The observability landscape is so vast that it is virtually impossible for SkyWalking to support all systems, even with the support of its community. Currently, it supports interoperability with other OSS systems, especially probes, such as Zipkin, Jaeger, and OpenTelemetry. It is very important to end users that SkyWalking has the ability to accept and read these data formats, since the users are not required to switch their libraries.\n  What is next?  See probe Introduction to learn about SkyWalking\u0026rsquo;s probe groups. From backend overview, you can understand what the backend does after it receives probe data.  ","excerpt":"Design Goals This document outlines the core design goals for the SkyWalking project.\n  Maintaining …","ref":"/docs/main/v9.5.0/en/concepts-and-designs/project-goals/","title":"Design Goals"},{"body":"Design Goals This document outlines the core design goals for the SkyWalking project.\n  Maintaining Observability. Regardless of the deployment method of the target system, SkyWalking provides an integration solution for it to maintain observability. Based on this, SkyWalking provides multiple runtime forms and probes.\n  Topology, Metrics and Trace Together. The first step to understanding a distributed system is the topology map. It visualizes the entire complex system in an easy-to-read layout. Under the topology, the OSS personnel have higher requirements in terms of the metrics for service, instance, endpoint and calls. Traces are in the form of detailed logs to make sense of those metrics. For example, when the endpoint latency becomes long, you want to see the slowest the trace to find out why. So you can see, they are from big picture to details, they are all needed. SkyWalking integrates and provides a lot of features to make this possible and easy understand.\n  Light Weight. There two parts of light weight are needed. (1) In probe, we just depend on network communication framework, prefer gRPC. By that, the probe should be as small as possible, to avoid the library conflicts and the payload of VM, such as permsize requirement in JVM. (2) As an observability platform, it is secondary and third level system in your project environment. So we are using our own light weight framework to build the backend core. Then you don\u0026rsquo;t need to deploy big data tech platform and maintain them. SkyWalking should be simple in tech stack.\n  Pluggable. SkyWalking core team provides many default implementations, but definitely it is not enough, and also don\u0026rsquo;t fit every scenario. So, we provide a lot of features for being pluggable.\n  Portability. SkyWalking can run in multiple environments, including: (1) Use traditional register center like eureka. (2) Use RPC framework including service discovery, like Spring Cloud, Apache Dubbo. (3) Use Service Mesh in modern infrastructure. (4) Use cloud services. (5) Across cloud deployment. SkyWalking should run well in all of these cases.\n  Interoperability. The observability landscape is so vast that it is virtually impossible for SkyWalking to support all systems, even with the support of its community. Currently, it supports interoperability with other OSS systems, especially probes, such as Zipkin, Jaeger, and OpenTelemetry. It is very important to end users that SkyWalking has the ability to accept and read these data formats, since the users are not required to switch their libraries.\n  What is next?  See probe Introduction to learn about SkyWalking\u0026rsquo;s probe groups. From backend overview, you can understand what the backend does after it receives probe data.  ","excerpt":"Design Goals This document outlines the core design goals for the SkyWalking project.\n  Maintaining …","ref":"/docs/main/v9.6.0/en/concepts-and-designs/project-goals/","title":"Design Goals"},{"body":"Design Goals This document outlines the core design goals for the SkyWalking project.\n  Maintaining Observability. Regardless of the deployment method of the target system, SkyWalking provides an integration solution for it to maintain observability. Based on this, SkyWalking provides multiple runtime forms and probes.\n  Topology, Metrics and Trace Together. The first step to understanding a distributed system is the topology map. It visualizes the entire complex system in an easy-to-read layout. Under the topology, the OSS personnel have higher requirements in terms of the metrics for service, instance, endpoint and calls. Traces are in the form of detailed logs to make sense of those metrics. For example, when the endpoint latency becomes long, you want to see the slowest the trace to find out why. So you can see, they are from big picture to details, they are all needed. SkyWalking integrates and provides a lot of features to make this possible and easy understand.\n  Light Weight. There two parts of light weight are needed. (1) In probe, we just depend on network communication framework, prefer gRPC. By that, the probe should be as small as possible, to avoid the library conflicts and the payload of VM, such as permsize requirement in JVM. (2) As an observability platform, it is secondary and third level system in your project environment. So we are using our own light weight framework to build the backend core. Then you don\u0026rsquo;t need to deploy big data tech platform and maintain them. SkyWalking should be simple in tech stack.\n  Pluggable. SkyWalking core team provides many default implementations, but definitely it is not enough, and also don\u0026rsquo;t fit every scenario. So, we provide a lot of features for being pluggable.\n  Portability. SkyWalking can run in multiple environments, including: (1) Use traditional register center like eureka. (2) Use RPC framework including service discovery, like Spring Cloud, Apache Dubbo. (3) Use Service Mesh in modern infrastructure. (4) Use cloud services. (5) Across cloud deployment. SkyWalking should run well in all of these cases.\n  Interoperability. The observability landscape is so vast that it is virtually impossible for SkyWalking to support all systems, even with the support of its community. Currently, it supports interoperability with other OSS systems, especially probes, such as Zipkin, Jaeger, and OpenTelemetry. It is very important to end users that SkyWalking has the ability to accept and read these data formats, since the users are not required to switch their libraries.\n  What is next?  See probe Introduction to learn about SkyWalking\u0026rsquo;s probe groups. From backend overview, you can understand what the backend does after it receives probe data.  ","excerpt":"Design Goals This document outlines the core design goals for the SkyWalking project.\n  Maintaining …","ref":"/docs/main/v9.7.0/en/concepts-and-designs/project-goals/","title":"Design Goals"},{"body":"Design Goals The document outlines the core design goals for the SkyWalking Infra E2E project.\n Support various E2E testing requirements in SkyWalking main repository with other ecosystem repositories. Support both docker-compose and KinD to orchestrate the tested services under different environments. Be language-independent as much as possible, users only need to configure YAMLs and run commands, without writing code.  Non-Goal  This framework is not involved with the build process, i.e. it won’t do something like mvn package or docker build, the artifacts (.tar, docker images) should be ready in an earlier process before this; This project doesn’t take the plugin tests into account, at least for now;  ","excerpt":"Design Goals The document outlines the core design goals for the SkyWalking Infra E2E project. …","ref":"/docs/skywalking-infra-e2e/latest/en/concepts-and-designs/project-goals/","title":"Design Goals"},{"body":"Design Goals The document outlines the core design goals for the SkyWalking Infra E2E project.\n Support various E2E testing requirements in SkyWalking main repository with other ecosystem repositories. Support both docker-compose and KinD to orchestrate the tested services under different environments. Be language-independent as much as possible, users only need to configure YAMLs and run commands, without writing code.  Non-Goal  This framework is not involved with the build process, i.e. it won’t do something like mvn package or docker build, the artifacts (.tar, docker images) should be ready in an earlier process before this; This project doesn’t take the plugin tests into account, at least for now;  ","excerpt":"Design Goals The document outlines the core design goals for the SkyWalking Infra E2E project. …","ref":"/docs/skywalking-infra-e2e/next/en/concepts-and-designs/project-goals/","title":"Design Goals"},{"body":"Design Goals The document outlines the core design goals for the SkyWalking Infra E2E project.\n Support various E2E testing requirements in SkyWalking main repository with other ecosystem repositories. Support both docker-compose and KinD to orchestrate the tested services under different environments. Be language-independent as much as possible, users only need to configure YAMLs and run commands, without writing code.  Non-Goal  This framework is not involved with the build process, i.e. it won’t do something like mvn package or docker build, the artifacts (.tar, docker images) should be ready in an earlier process before this; This project doesn’t take the plugin tests into account, at least for now;  ","excerpt":"Design Goals The document outlines the core design goals for the SkyWalking Infra E2E project. …","ref":"/docs/skywalking-infra-e2e/v1.3.0/en/concepts-and-designs/project-goals/","title":"Design Goals"},{"body":"Design Goals The document outlines the core design goals for SkyWalking Satellite project.\n  Light Weight. SkyWalking Satellite has a limited cost for resources and high-performance because of the requirements of the sidecar deployment model.\n  Pluggability. SkyWalking Satellite core team provides many default implementations, but definitely it is not enough, and also don\u0026rsquo;t fit every scenario. So, we provide a lot of features for being pluggable.\n  Portability. SkyWalking Satellite can run in multiple environments, including:\n Use traditional deployment as a daemon process to collect data. Use cloud services as a sidecar, such as in the Kubernetes platform.    Interoperability. Observability is a big landscape, SkyWalking is impossible to support all, even by its community. So SkyWalking Satellite is compatible with many protocols, including:\n SkyWalking protocol (WIP) Prometheus protocol.    ","excerpt":"Design Goals The document outlines the core design goals for SkyWalking Satellite project.\n  Light …","ref":"/docs/skywalking-satellite/latest/en/concepts-and-designs/project-goals/","title":"Design Goals"},{"body":"Design Goals The document outlines the core design goals for SkyWalking Satellite project.\n  Light Weight. SkyWalking Satellite has a limited cost for resources and high-performance because of the requirements of the sidecar deployment model.\n  Pluggability. SkyWalking Satellite core team provides many default implementations, but definitely it is not enough, and also don\u0026rsquo;t fit every scenario. So, we provide a lot of features for being pluggable.\n  Portability. SkyWalking Satellite can run in multiple environments, including:\n Use traditional deployment as a daemon process to collect data. Use cloud services as a sidecar, such as in the Kubernetes platform.    Interoperability. Observability is a big landscape, SkyWalking is impossible to support all, even by its community. So SkyWalking Satellite is compatible with many protocols, including:\n SkyWalking protocol (WIP) Prometheus protocol.    ","excerpt":"Design Goals The document outlines the core design goals for SkyWalking Satellite project.\n  Light …","ref":"/docs/skywalking-satellite/next/en/concepts-and-designs/project-goals/","title":"Design Goals"},{"body":"Design Goals The document outlines the core design goals for SkyWalking Satellite project.\n  Light Weight. SkyWalking Satellite has a limited cost for resources and high-performance because of the requirements of the sidecar deployment model.\n  Pluggability. SkyWalking Satellite core team provides many default implementations, but definitely it is not enough, and also don\u0026rsquo;t fit every scenario. So, we provide a lot of features for being pluggable.\n  Portability. SkyWalking Satellite can run in multiple environments, including:\n Use traditional deployment as a daemon process to collect data. Use cloud services as a sidecar, such as in the Kubernetes platform.    Interoperability. Observability is a big landscape, SkyWalking is impossible to support all, even by its community. So SkyWalking Satellite is compatible with many protocols, including:\n SkyWalking protocol (WIP) Prometheus protocol.    ","excerpt":"Design Goals The document outlines the core design goals for SkyWalking Satellite project.\n  Light …","ref":"/docs/skywalking-satellite/v1.2.0/en/concepts-and-designs/project-goals/","title":"Design Goals"},{"body":"Diagnose Service Mesh Network Performance with eBPF Background This article will show how to use Apache SkyWalking with eBPF to make network troubleshooting easier in a service mesh environment.\nApache SkyWalking is an application performance monitor tool for distributed systems. It observes metrics, logs, traces, and events in the service mesh environment and uses that data to generate a dependency graph of your pods and services. This dependency graph can provide quick insights into your system, especially when there\u0026rsquo;s an issue.\nHowever, when troubleshooting network issues in SkyWalking\u0026rsquo;s service topology, it is not always easy to pinpoint where the error actually is. There are two reasons for the difficulty:\n Traffic through the Envoy sidecar is not easy to observe. Data from Envoy\u0026rsquo;s Access Log Service (ALS) shows traffic between services (sidecar-to-sidecar), but not metrics on communication between the Envoy sidecar and the service it proxies. Without that information, it is more difficult to understand the impact of the sidecar. There is a lack of data from transport layer (OSI Layer 4) communication. Since services generally use application layer (OSI Layer 7) protocols such as HTTP, observability data is generally restricted to application layer communication. However, the root cause may actually be in the transport layer, which is typically opaque to observability tools.  Access to metrics from Envoy-to-service and transport layer communication can make it easier to diagnose service issues. To this end, SkyWalking needs to collect and analyze transport layer metrics between processes inside Kubernetes pods - a task well suited to eBPF. We investigated using eBPF for this purpose and present our results and a demo below.\nMonitoring Kubernetes Networks with eBPF With its origins as the Extended Berkeley Packet Filter, eBPF is a general purpose mechanism for injecting and running your own code into the Linux kernel and is an excellent tool for monitoring network traffic in Kubernetes Pods. In the next few sections, we'll provide an overview of how to use eBPF for network monitoring as background for introducing Skywalking Rover, a metrics collector and profiler powered by eBPF to diagnose CPU and network performance.\nHow Applications and the Network Interact Interactions between the application and the network can generally be divided into the following steps from higher to lower levels of abstraction:\n User Code: Application code uses high-level network libraries in the application stack to exchange data across the network, like sending and receiving HTTP requests. Network Library: When the network library receives a network request, it interacts with the language API to send the network data. Language API: Each language provides an API for operating the network, system, etc. When a request is received, it interacts with the system API. In Linux, this API is called syscalls. Linux API: When the Linux kernel receives the request through the API, it communicates with the socket to send the data, which is usually closer to an OSI Layer 4 protocol, such as TCP, UDP, etc. Socket Ops: Sending or receiving the data to/from the NIC.  Our hypothesis is that eBPF can monitor the network. There are two ways to implement the interception: User space (uprobe) or Kernel space (kprobe). The table below summarizes the differences.\n    Pros Cons     uprobe •\tGet more application-related contexts, such as whether the current request is HTTP or HTTPS.•\tRequests and responses can be intercepted by a single method •\tData structures can be unstable, so it is more difficult to get the desired data.  •\tImplementation may differ between language/library versions.  •\tDoes not work in applications without symbol tables.   kprobe •\tAvailable for all languages.  •\tThe data structure and methods are stable and do not require much adaptation.  •\tEasier correlation with underlying data, such as getting the destination address of TCP, OSI Layer 4 protocol metrics, etc. •\tA single request and response may be split into multiple probes.  •\tContextual information is not easy to get for stateful requests. For example header compression in HTTP/2.    For the general network performance monitor, we chose to use the kprobe (intercept the syscalls) for the following reasons:\n It\u0026rsquo;s available for applications written in any programming language, and it\u0026rsquo;s stable, so it saves a lot of development/adaptation costs. It can be correlated with metrics from the system level, which makes it easier to troubleshoot. As a single request and response are split into multiple probes, we can use technology to correlate them. For contextual information, It\u0026rsquo;s usually used in OSI Layer 7 protocol network analysis. So, if we just monitor the network performance, then they can be ignored.  Kprobes and network monitoring Following the network syscalls of Linux documentation, we can implement network monitoring by intercepting two types of methods: socket operations and send/receive methods.\nSocket Operations When accepting or connecting with another socket, we can get the following information:\n Connection information: Includes the remote address from the connection which helps us to understand which pod is connected. Connection statics: Includes basic metrics from sockets, such as round-trip time (RTT), lost packet count in TCP, etc. Socket and file descriptor (FD) mapping: Includes the relationship between the Linux file descriptor and socket object. It is useful when sending and receiving data through a Linux file descriptor.  Send/Receive The interface related to sending or receiving data is the focus of performance analysis. It mainly contains the following parameters:\n Socket file descriptor: The file descriptor of the current operation corresponding to the socket. Buffer: The data sent or received, passed as a byte array.  Based on the above parameters, we can analyze the following data:\n Bytes: The size of the packet in bytes. Protocol: The protocol analysis according to the buffer data, such as HTTP, MySQL, etc. Execution Time: The time it takes to send/receive the data.  At this point (Figure 1) we can analyze the following steps for the whole lifecycle of the connection:\n Connect/Accept: When the connection is created. Transform: Sending and receiving data on the connection. Close: When the connection is closed.  Figure 1\nProtocol and TLS The previous section described how to analyze connections using send or receive buffer data. For example, following the HTTP/1.1 message specification to analyze the connection. However, this does not work for TLS requests/responses.\nFigure 2\nWhen TLS is in use, the Linux Kernel transmits data encrypted in user space. In the figure above, The application usually transmits SSL data through a third-party library (such as OpenSSL). For this case, the Linux API can only get the encrypted data, so it cannot recognize any higher layer protocol. To decrypt inside eBPF, we need to follow these steps:\n Read unencrypted data through uprobe: Compatible multiple languages, using uprobe to capture the data that is not encrypted before sending or after receiving. In this way, we can get the original data and associate it with the socket. Associate with socket: We can associate unencrypted data with the socket.  OpenSSL Use case For example, the most common way to send/receive SSL data is to use OpenSSL as a shared library, specifically the SSL_read and SSL_write methods to submit the buffer data with the socket.\nFollowing the documentation, we can intercept these two methods, which are almost identical to the API in Linux. The source code of the SSL structure in OpenSSL shows that the Socket FD exists in the BIO object of the SSL structure, and we can get it by the offset.\nIn summary, with knowledge of how OpenSSL works, we can read unencrypted data in an eBPF function.\nIntroducing SkyWalking Rover, an eBPF-based Metrics Collector and Profiler SkyWalking Rover introduces the eBPF network profiling feature into the SkyWalking ecosystem. It\u0026rsquo;s currently supported in a Kubernetes environment, so must be deployed inside a Kubernetes cluster. Once the deployment is complete, SkyWalking Rover can monitor the network for all processes inside a given Pod. Based on the monitoring data, SkyWalking can generate the topology relationship diagram and metrics between processes.\nTopology Diagram The topology diagram can help us understand the network access between processes inside the same Pod, and between the process and external environment (other Pod or service). Additionally, it can identify the data direction of traffic based on the line flow direction.\nIn Figure 3 below, all nodes within the hexagon are the internal process of a Pod, and nodes outside the hexagon are externally associated services or Pods. Nodes are connected by lines, which indicate the direction of requests or responses between nodes (client or server). The protocol is indicated on the line, and it\u0026rsquo;s either HTTP(S), TCP, or TCP(TLS). Also, we can see in this figure that the line between Envoy and Python applications is bidirectional because Envoy intercepts all application traffic.\nFigure 3\nMetrics Once we recognize the network call relationship between processes through the topology, we can select a specific line and view the TCP metrics between the two processes.\nThe diagram below (Figure 4) shows the metrics of network monitoring between two processes. There are four metrics in each line. Two on the left side are on the client side, and two on the right side are on the server side. If the remote process is not in the same Pod, only one side of the metrics is displayed.\nFigure 4\nThe following two metric types are available:\n Counter: Records the total number of data in a certain period. Each counter contains the following data: a. Count: Execution count. b. Bytes: Packet size in bytes. c. Execution time: Execution duration. Histogram: Records the distribution of data in the buckets.  Based on the above data types, the following metrics are exposed:\n   Name Type Unit Description     Write Counter and histogram Millisecond The socket write counter.   Read Counter and histogram Millisecond The socket read counter.   Write RTT Counter and histogram Microsecond The socket write round trip time (RTT) counter.   Connect Counter and histogram Millisecond The socket connect/accept with another server/client counter.   Close Counter and histogram Millisecond The socket with other socket counter.   Retransmit Counter Millisecond The socket retransmit package counter.   Drop Counter Millisecond The socket drop package counter.    Demo In this section, we demonstrate how to perform network profiling in the service mesh. To follow along, you will need a running Kubernetes environment.\nNOTE: All commands and scripts are available in this GitHub repository.\nInstall Istio Istio is the most widely deployed service mesh, and comes with a complete demo application that we can use for testing. To install Istio and the demo application, follow these steps:\n Install Istio using the demo configuration profile. Label the default namespace, so Istio automatically injects Envoy sidecar proxies when we\u0026rsquo;ll deploy the application. Deploy the bookinfo application to the cluster. Deploy the traffic generator to generate some traffic to the application.  export ISTIO_VERSION=1.13.1 # install istio istioctl install -y --set profile=demo kubectl label namespace default istio-injection=enabled # deploy the bookinfo applications kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/platform/kube/bookinfo.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/bookinfo-gateway.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/destination-rule-all.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/virtual-service-all-v1.yaml # generate traffic kubectl apply -f https://raw.githubusercontent.com/mrproliu/skywalking-network-profiling-demo/main/resources/traffic-generator.yaml Install SkyWalking The following will install the storage, backend, and UI needed for SkyWalking:\ngit clone https://github.com/apache/skywalking-helm.git cd skywalking-helm cd chart helm dep up skywalking helm -n istio-system install skywalking skywalking \\  --set fullnameOverride=skywalking \\  --set elasticsearch.minimumMasterNodes=1 \\  --set elasticsearch.imageTag=7.5.1 \\  --set oap.replicas=1 \\  --set ui.image.repository=apache/skywalking-ui \\  --set ui.image.tag=9.2.0 \\  --set oap.image.tag=9.2.0 \\  --set oap.envoy.als.enabled=true \\  --set oap.image.repository=apache/skywalking-oap-server \\  --set oap.storageType=elasticsearch \\  --set oap.env.SW_METER_ANALYZER_ACTIVE_FILES=\u0026#39;network-profiling\u0026#39; Install SkyWalking Rover SkyWalking Rover is deployed on every node in Kubernetes, and it automatically detects the services in the Kubernetes cluster. The network profiling feature has been released in the version 0.3.0 of SkyWalking Rover. When a network monitoring task is created, the SkyWalking rover sends the data to the SkyWalking backend.\nkubectl apply -f https://raw.githubusercontent.com/mrproliu/skywalking-network-profiling-demo/main/resources/skywalking-rover.yaml Start the Network Profiling Task Once all deployments are completed, we must create a network profiling task for a specific instance of the service in the SkyWalking UI.\nTo open SkyWalking UI, run:\nkubectl port-forward svc/skywalking-ui 8080:80 --namespace istio-system Currently, we can select the specific instances that we wish to monitor by clicking the Data Plane item in the Service Mesh panel and the Service item in the Kubernetes panel.\nIn the figure below, we have selected an instance with a list of tasks in the network profiling tab. When we click the start button, the SkyWalking Rover starts monitoring this instance\u0026rsquo;s network.\nFigure 5\nDone! After a few seconds, you will see the process topology appear on the right side of the page.\nFigure 6\nWhen you click on the line between processes, you can see the TCP metrics between the two processes.\nFigure 7\nConclusion In this article, we detailed a problem that makes troubleshooting service mesh architectures difficult: lack of context between layers in the network stack. These are the cases when eBPF begins to really help with debugging/productivity when existing service mesh/envoy cannot. Then, we researched how eBPF could be applied to common communication, such as TLS. Finally, we demo the implementation of this process with SkyWalking Rover.\nFor now, we have completed the performance analysis for OSI layer 4 (mostly TCP). In the future, we will also introduce the analysis for OSI layer 7 protocols like HTTP.\n","excerpt":"Diagnose Service Mesh Network Performance with eBPF Background This article will show how to use …","ref":"/docs/main/latest/en/academy/diagnose-service-mesh-network-performance-with-ebpf/","title":"Diagnose Service Mesh Network Performance with eBPF"},{"body":"Diagnose Service Mesh Network Performance with eBPF Background This article will show how to use Apache SkyWalking with eBPF to make network troubleshooting easier in a service mesh environment.\nApache SkyWalking is an application performance monitor tool for distributed systems. It observes metrics, logs, traces, and events in the service mesh environment and uses that data to generate a dependency graph of your pods and services. This dependency graph can provide quick insights into your system, especially when there\u0026rsquo;s an issue.\nHowever, when troubleshooting network issues in SkyWalking\u0026rsquo;s service topology, it is not always easy to pinpoint where the error actually is. There are two reasons for the difficulty:\n Traffic through the Envoy sidecar is not easy to observe. Data from Envoy\u0026rsquo;s Access Log Service (ALS) shows traffic between services (sidecar-to-sidecar), but not metrics on communication between the Envoy sidecar and the service it proxies. Without that information, it is more difficult to understand the impact of the sidecar. There is a lack of data from transport layer (OSI Layer 4) communication. Since services generally use application layer (OSI Layer 7) protocols such as HTTP, observability data is generally restricted to application layer communication. However, the root cause may actually be in the transport layer, which is typically opaque to observability tools.  Access to metrics from Envoy-to-service and transport layer communication can make it easier to diagnose service issues. To this end, SkyWalking needs to collect and analyze transport layer metrics between processes inside Kubernetes pods - a task well suited to eBPF. We investigated using eBPF for this purpose and present our results and a demo below.\nMonitoring Kubernetes Networks with eBPF With its origins as the Extended Berkeley Packet Filter, eBPF is a general purpose mechanism for injecting and running your own code into the Linux kernel and is an excellent tool for monitoring network traffic in Kubernetes Pods. In the next few sections, we'll provide an overview of how to use eBPF for network monitoring as background for introducing Skywalking Rover, a metrics collector and profiler powered by eBPF to diagnose CPU and network performance.\nHow Applications and the Network Interact Interactions between the application and the network can generally be divided into the following steps from higher to lower levels of abstraction:\n User Code: Application code uses high-level network libraries in the application stack to exchange data across the network, like sending and receiving HTTP requests. Network Library: When the network library receives a network request, it interacts with the language API to send the network data. Language API: Each language provides an API for operating the network, system, etc. When a request is received, it interacts with the system API. In Linux, this API is called syscalls. Linux API: When the Linux kernel receives the request through the API, it communicates with the socket to send the data, which is usually closer to an OSI Layer 4 protocol, such as TCP, UDP, etc. Socket Ops: Sending or receiving the data to/from the NIC.  Our hypothesis is that eBPF can monitor the network. There are two ways to implement the interception: User space (uprobe) or Kernel space (kprobe). The table below summarizes the differences.\n    Pros Cons     uprobe •\tGet more application-related contexts, such as whether the current request is HTTP or HTTPS.•\tRequests and responses can be intercepted by a single method •\tData structures can be unstable, so it is more difficult to get the desired data.  •\tImplementation may differ between language/library versions.  •\tDoes not work in applications without symbol tables.   kprobe •\tAvailable for all languages.  •\tThe data structure and methods are stable and do not require much adaptation.  •\tEasier correlation with underlying data, such as getting the destination address of TCP, OSI Layer 4 protocol metrics, etc. •\tA single request and response may be split into multiple probes.  •\tContextual information is not easy to get for stateful requests. For example header compression in HTTP/2.    For the general network performance monitor, we chose to use the kprobe (intercept the syscalls) for the following reasons:\n It\u0026rsquo;s available for applications written in any programming language, and it\u0026rsquo;s stable, so it saves a lot of development/adaptation costs. It can be correlated with metrics from the system level, which makes it easier to troubleshoot. As a single request and response are split into multiple probes, we can use technology to correlate them. For contextual information, It\u0026rsquo;s usually used in OSI Layer 7 protocol network analysis. So, if we just monitor the network performance, then they can be ignored.  Kprobes and network monitoring Following the network syscalls of Linux documentation, we can implement network monitoring by intercepting two types of methods: socket operations and send/receive methods.\nSocket Operations When accepting or connecting with another socket, we can get the following information:\n Connection information: Includes the remote address from the connection which helps us to understand which pod is connected. Connection statics: Includes basic metrics from sockets, such as round-trip time (RTT), lost packet count in TCP, etc. Socket and file descriptor (FD) mapping: Includes the relationship between the Linux file descriptor and socket object. It is useful when sending and receiving data through a Linux file descriptor.  Send/Receive The interface related to sending or receiving data is the focus of performance analysis. It mainly contains the following parameters:\n Socket file descriptor: The file descriptor of the current operation corresponding to the socket. Buffer: The data sent or received, passed as a byte array.  Based on the above parameters, we can analyze the following data:\n Bytes: The size of the packet in bytes. Protocol: The protocol analysis according to the buffer data, such as HTTP, MySQL, etc. Execution Time: The time it takes to send/receive the data.  At this point (Figure 1) we can analyze the following steps for the whole lifecycle of the connection:\n Connect/Accept: When the connection is created. Transform: Sending and receiving data on the connection. Close: When the connection is closed.  Figure 1\nProtocol and TLS The previous section described how to analyze connections using send or receive buffer data. For example, following the HTTP/1.1 message specification to analyze the connection. However, this does not work for TLS requests/responses.\nFigure 2\nWhen TLS is in use, the Linux Kernel transmits data encrypted in user space. In the figure above, The application usually transmits SSL data through a third-party library (such as OpenSSL). For this case, the Linux API can only get the encrypted data, so it cannot recognize any higher layer protocol. To decrypt inside eBPF, we need to follow these steps:\n Read unencrypted data through uprobe: Compatible multiple languages, using uprobe to capture the data that is not encrypted before sending or after receiving. In this way, we can get the original data and associate it with the socket. Associate with socket: We can associate unencrypted data with the socket.  OpenSSL Use case For example, the most common way to send/receive SSL data is to use OpenSSL as a shared library, specifically the SSL_read and SSL_write methods to submit the buffer data with the socket.\nFollowing the documentation, we can intercept these two methods, which are almost identical to the API in Linux. The source code of the SSL structure in OpenSSL shows that the Socket FD exists in the BIO object of the SSL structure, and we can get it by the offset.\nIn summary, with knowledge of how OpenSSL works, we can read unencrypted data in an eBPF function.\nIntroducing SkyWalking Rover, an eBPF-based Metrics Collector and Profiler SkyWalking Rover introduces the eBPF network profiling feature into the SkyWalking ecosystem. It\u0026rsquo;s currently supported in a Kubernetes environment, so must be deployed inside a Kubernetes cluster. Once the deployment is complete, SkyWalking Rover can monitor the network for all processes inside a given Pod. Based on the monitoring data, SkyWalking can generate the topology relationship diagram and metrics between processes.\nTopology Diagram The topology diagram can help us understand the network access between processes inside the same Pod, and between the process and external environment (other Pod or service). Additionally, it can identify the data direction of traffic based on the line flow direction.\nIn Figure 3 below, all nodes within the hexagon are the internal process of a Pod, and nodes outside the hexagon are externally associated services or Pods. Nodes are connected by lines, which indicate the direction of requests or responses between nodes (client or server). The protocol is indicated on the line, and it\u0026rsquo;s either HTTP(S), TCP, or TCP(TLS). Also, we can see in this figure that the line between Envoy and Python applications is bidirectional because Envoy intercepts all application traffic.\nFigure 3\nMetrics Once we recognize the network call relationship between processes through the topology, we can select a specific line and view the TCP metrics between the two processes.\nThe diagram below (Figure 4) shows the metrics of network monitoring between two processes. There are four metrics in each line. Two on the left side are on the client side, and two on the right side are on the server side. If the remote process is not in the same Pod, only one side of the metrics is displayed.\nFigure 4\nThe following two metric types are available:\n Counter: Records the total number of data in a certain period. Each counter contains the following data: a. Count: Execution count. b. Bytes: Packet size in bytes. c. Execution time: Execution duration. Histogram: Records the distribution of data in the buckets.  Based on the above data types, the following metrics are exposed:\n   Name Type Unit Description     Write Counter and histogram Millisecond The socket write counter.   Read Counter and histogram Millisecond The socket read counter.   Write RTT Counter and histogram Microsecond The socket write round trip time (RTT) counter.   Connect Counter and histogram Millisecond The socket connect/accept with another server/client counter.   Close Counter and histogram Millisecond The socket with other socket counter.   Retransmit Counter Millisecond The socket retransmit package counter.   Drop Counter Millisecond The socket drop package counter.    Demo In this section, we demonstrate how to perform network profiling in the service mesh. To follow along, you will need a running Kubernetes environment.\nNOTE: All commands and scripts are available in this GitHub repository.\nInstall Istio Istio is the most widely deployed service mesh, and comes with a complete demo application that we can use for testing. To install Istio and the demo application, follow these steps:\n Install Istio using the demo configuration profile. Label the default namespace, so Istio automatically injects Envoy sidecar proxies when we\u0026rsquo;ll deploy the application. Deploy the bookinfo application to the cluster. Deploy the traffic generator to generate some traffic to the application.  export ISTIO_VERSION=1.13.1 # install istio istioctl install -y --set profile=demo kubectl label namespace default istio-injection=enabled # deploy the bookinfo applications kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/platform/kube/bookinfo.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/bookinfo-gateway.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/destination-rule-all.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/virtual-service-all-v1.yaml # generate traffic kubectl apply -f https://raw.githubusercontent.com/mrproliu/skywalking-network-profiling-demo/main/resources/traffic-generator.yaml Install SkyWalking The following will install the storage, backend, and UI needed for SkyWalking:\ngit clone https://github.com/apache/skywalking-helm.git cd skywalking-helm cd chart helm dep up skywalking helm -n istio-system install skywalking skywalking \\  --set fullnameOverride=skywalking \\  --set elasticsearch.minimumMasterNodes=1 \\  --set elasticsearch.imageTag=7.5.1 \\  --set oap.replicas=1 \\  --set ui.image.repository=apache/skywalking-ui \\  --set ui.image.tag=9.2.0 \\  --set oap.image.tag=9.2.0 \\  --set oap.envoy.als.enabled=true \\  --set oap.image.repository=apache/skywalking-oap-server \\  --set oap.storageType=elasticsearch \\  --set oap.env.SW_METER_ANALYZER_ACTIVE_FILES=\u0026#39;network-profiling\u0026#39; Install SkyWalking Rover SkyWalking Rover is deployed on every node in Kubernetes, and it automatically detects the services in the Kubernetes cluster. The network profiling feature has been released in the version 0.3.0 of SkyWalking Rover. When a network monitoring task is created, the SkyWalking rover sends the data to the SkyWalking backend.\nkubectl apply -f https://raw.githubusercontent.com/mrproliu/skywalking-network-profiling-demo/main/resources/skywalking-rover.yaml Start the Network Profiling Task Once all deployments are completed, we must create a network profiling task for a specific instance of the service in the SkyWalking UI.\nTo open SkyWalking UI, run:\nkubectl port-forward svc/skywalking-ui 8080:80 --namespace istio-system Currently, we can select the specific instances that we wish to monitor by clicking the Data Plane item in the Service Mesh panel and the Service item in the Kubernetes panel.\nIn the figure below, we have selected an instance with a list of tasks in the network profiling tab. When we click the start button, the SkyWalking Rover starts monitoring this instance\u0026rsquo;s network.\nFigure 5\nDone! After a few seconds, you will see the process topology appear on the right side of the page.\nFigure 6\nWhen you click on the line between processes, you can see the TCP metrics between the two processes.\nFigure 7\nConclusion In this article, we detailed a problem that makes troubleshooting service mesh architectures difficult: lack of context between layers in the network stack. These are the cases when eBPF begins to really help with debugging/productivity when existing service mesh/envoy cannot. Then, we researched how eBPF could be applied to common communication, such as TLS. Finally, we demo the implementation of this process with SkyWalking Rover.\nFor now, we have completed the performance analysis for OSI layer 4 (mostly TCP). In the future, we will also introduce the analysis for OSI layer 7 protocols like HTTP.\n","excerpt":"Diagnose Service Mesh Network Performance with eBPF Background This article will show how to use …","ref":"/docs/main/next/en/academy/diagnose-service-mesh-network-performance-with-ebpf/","title":"Diagnose Service Mesh Network Performance with eBPF"},{"body":"Diagnose Service Mesh Network Performance with eBPF Background This article will show how to use Apache SkyWalking with eBPF to make network troubleshooting easier in a service mesh environment.\nApache SkyWalking is an application performance monitor tool for distributed systems. It observes metrics, logs, traces, and events in the service mesh environment and uses that data to generate a dependency graph of your pods and services. This dependency graph can provide quick insights into your system, especially when there\u0026rsquo;s an issue.\nHowever, when troubleshooting network issues in SkyWalking\u0026rsquo;s service topology, it is not always easy to pinpoint where the error actually is. There are two reasons for the difficulty:\n Traffic through the Envoy sidecar is not easy to observe. Data from Envoy\u0026rsquo;s Access Log Service (ALS) shows traffic between services (sidecar-to-sidecar), but not metrics on communication between the Envoy sidecar and the service it proxies. Without that information, it is more difficult to understand the impact of the sidecar. There is a lack of data from transport layer (OSI Layer 4) communication. Since services generally use application layer (OSI Layer 7) protocols such as HTTP, observability data is generally restricted to application layer communication. However, the root cause may actually be in the transport layer, which is typically opaque to observability tools.  Access to metrics from Envoy-to-service and transport layer communication can make it easier to diagnose service issues. To this end, SkyWalking needs to collect and analyze transport layer metrics between processes inside Kubernetes pods - a task well suited to eBPF. We investigated using eBPF for this purpose and present our results and a demo below.\nMonitoring Kubernetes Networks with eBPF With its origins as the Extended Berkeley Packet Filter, eBPF is a general purpose mechanism for injecting and running your own code into the Linux kernel and is an excellent tool for monitoring network traffic in Kubernetes Pods. In the next few sections, we'll provide an overview of how to use eBPF for network monitoring as background for introducing Skywalking Rover, a metrics collector and profiler powered by eBPF to diagnose CPU and network performance.\nHow Applications and the Network Interact Interactions between the application and the network can generally be divided into the following steps from higher to lower levels of abstraction:\n User Code: Application code uses high-level network libraries in the application stack to exchange data across the network, like sending and receiving HTTP requests. Network Library: When the network library receives a network request, it interacts with the language API to send the network data. Language API: Each language provides an API for operating the network, system, etc. When a request is received, it interacts with the system API. In Linux, this API is called syscalls. Linux API: When the Linux kernel receives the request through the API, it communicates with the socket to send the data, which is usually closer to an OSI Layer 4 protocol, such as TCP, UDP, etc. Socket Ops: Sending or receiving the data to/from the NIC.  Our hypothesis is that eBPF can monitor the network. There are two ways to implement the interception: User space (uprobe) or Kernel space (kprobe). The table below summarizes the differences.\n    Pros Cons     uprobe •\tGet more application-related contexts, such as whether the current request is HTTP or HTTPS.•\tRequests and responses can be intercepted by a single method •\tData structures can be unstable, so it is more difficult to get the desired data.  •\tImplementation may differ between language/library versions.  •\tDoes not work in applications without symbol tables.   kprobe •\tAvailable for all languages.  •\tThe data structure and methods are stable and do not require much adaptation.  •\tEasier correlation with underlying data, such as getting the destination address of TCP, OSI Layer 4 protocol metrics, etc. •\tA single request and response may be split into multiple probes.  •\tContextual information is not easy to get for stateful requests. For example header compression in HTTP/2.    For the general network performance monitor, we chose to use the kprobe (intercept the syscalls) for the following reasons:\n It\u0026rsquo;s available for applications written in any programming language, and it\u0026rsquo;s stable, so it saves a lot of development/adaptation costs. It can be correlated with metrics from the system level, which makes it easier to troubleshoot. As a single request and response are split into multiple probes, we can use technology to correlate them. For contextual information, It\u0026rsquo;s usually used in OSI Layer 7 protocol network analysis. So, if we just monitor the network performance, then they can be ignored.  Kprobes and network monitoring Following the network syscalls of Linux documentation, we can implement network monitoring by intercepting two types of methods: socket operations and send/receive methods.\nSocket Operations When accepting or connecting with another socket, we can get the following information:\n Connection information: Includes the remote address from the connection which helps us to understand which pod is connected. Connection statics: Includes basic metrics from sockets, such as round-trip time (RTT), lost packet count in TCP, etc. Socket and file descriptor (FD) mapping: Includes the relationship between the Linux file descriptor and socket object. It is useful when sending and receiving data through a Linux file descriptor.  Send/Receive The interface related to sending or receiving data is the focus of performance analysis. It mainly contains the following parameters:\n Socket file descriptor: The file descriptor of the current operation corresponding to the socket. Buffer: The data sent or received, passed as a byte array.  Based on the above parameters, we can analyze the following data:\n Bytes: The size of the packet in bytes. Protocol: The protocol analysis according to the buffer data, such as HTTP, MySQL, etc. Execution Time: The time it takes to send/receive the data.  At this point (Figure 1) we can analyze the following steps for the whole lifecycle of the connection:\n Connect/Accept: When the connection is created. Transform: Sending and receiving data on the connection. Close: When the connection is closed.  Figure 1\nProtocol and TLS The previous section described how to analyze connections using send or receive buffer data. For example, following the HTTP/1.1 message specification to analyze the connection. However, this does not work for TLS requests/responses.\nFigure 2\nWhen TLS is in use, the Linux Kernel transmits data encrypted in user space. In the figure above, The application usually transmits SSL data through a third-party library (such as OpenSSL). For this case, the Linux API can only get the encrypted data, so it cannot recognize any higher layer protocol. To decrypt inside eBPF, we need to follow these steps:\n Read unencrypted data through uprobe: Compatible multiple languages, using uprobe to capture the data that is not encrypted before sending or after receiving. In this way, we can get the original data and associate it with the socket. Associate with socket: We can associate unencrypted data with the socket.  OpenSSL Use case For example, the most common way to send/receive SSL data is to use OpenSSL as a shared library, specifically the SSL_read and SSL_write methods to submit the buffer data with the socket.\nFollowing the documentation, we can intercept these two methods, which are almost identical to the API in Linux. The source code of the SSL structure in OpenSSL shows that the Socket FD exists in the BIO object of the SSL structure, and we can get it by the offset.\nIn summary, with knowledge of how OpenSSL works, we can read unencrypted data in an eBPF function.\nIntroducing SkyWalking Rover, an eBPF-based Metrics Collector and Profiler SkyWalking Rover introduces the eBPF network profiling feature into the SkyWalking ecosystem. It\u0026rsquo;s currently supported in a Kubernetes environment, so must be deployed inside a Kubernetes cluster. Once the deployment is complete, SkyWalking Rover can monitor the network for all processes inside a given Pod. Based on the monitoring data, SkyWalking can generate the topology relationship diagram and metrics between processes.\nTopology Diagram The topology diagram can help us understand the network access between processes inside the same Pod, and between the process and external environment (other Pod or service). Additionally, it can identify the data direction of traffic based on the line flow direction.\nIn Figure 3 below, all nodes within the hexagon are the internal process of a Pod, and nodes outside the hexagon are externally associated services or Pods. Nodes are connected by lines, which indicate the direction of requests or responses between nodes (client or server). The protocol is indicated on the line, and it\u0026rsquo;s either HTTP(S), TCP, or TCP(TLS). Also, we can see in this figure that the line between Envoy and Python applications is bidirectional because Envoy intercepts all application traffic.\nFigure 3\nMetrics Once we recognize the network call relationship between processes through the topology, we can select a specific line and view the TCP metrics between the two processes.\nThe diagram below (Figure 4) shows the metrics of network monitoring between two processes. There are four metrics in each line. Two on the left side are on the client side, and two on the right side are on the server side. If the remote process is not in the same Pod, only one side of the metrics is displayed.\nFigure 4\nThe following two metric types are available:\n Counter: Records the total number of data in a certain period. Each counter contains the following data: a. Count: Execution count. b. Bytes: Packet size in bytes. c. Execution time: Execution duration. Histogram: Records the distribution of data in the buckets.  Based on the above data types, the following metrics are exposed:\n   Name Type Unit Description     Write Counter and histogram Millisecond The socket write counter.   Read Counter and histogram Millisecond The socket read counter.   Write RTT Counter and histogram Microsecond The socket write round trip time (RTT) counter.   Connect Counter and histogram Millisecond The socket connect/accept with another server/client counter.   Close Counter and histogram Millisecond The socket with other socket counter.   Retransmit Counter Millisecond The socket retransmit package counter.   Drop Counter Millisecond The socket drop package counter.    Demo In this section, we demonstrate how to perform network profiling in the service mesh. To follow along, you will need a running Kubernetes environment.\nNOTE: All commands and scripts are available in this GitHub repository.\nInstall Istio Istio is the most widely deployed service mesh, and comes with a complete demo application that we can use for testing. To install Istio and the demo application, follow these steps:\n Install Istio using the demo configuration profile. Label the default namespace, so Istio automatically injects Envoy sidecar proxies when we\u0026rsquo;ll deploy the application. Deploy the bookinfo application to the cluster. Deploy the traffic generator to generate some traffic to the application.  export ISTIO_VERSION=1.13.1 # install istio istioctl install -y --set profile=demo kubectl label namespace default istio-injection=enabled # deploy the bookinfo applications kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/platform/kube/bookinfo.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/bookinfo-gateway.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/destination-rule-all.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/virtual-service-all-v1.yaml # generate traffic kubectl apply -f https://raw.githubusercontent.com/mrproliu/skywalking-network-profiling-demo/main/resources/traffic-generator.yaml Install SkyWalking The following will install the storage, backend, and UI needed for SkyWalking:\ngit clone https://github.com/apache/skywalking-kubernetes.git cd skywalking-kubernetes cd chart helm dep up skywalking helm -n istio-system install skywalking skywalking \\  --set fullnameOverride=skywalking \\  --set elasticsearch.minimumMasterNodes=1 \\  --set elasticsearch.imageTag=7.5.1 \\  --set oap.replicas=1 \\  --set ui.image.repository=apache/skywalking-ui \\  --set ui.image.tag=9.2.0 \\  --set oap.image.tag=9.2.0 \\  --set oap.envoy.als.enabled=true \\  --set oap.image.repository=apache/skywalking-oap-server \\  --set oap.storageType=elasticsearch \\  --set oap.env.SW_METER_ANALYZER_ACTIVE_FILES=\u0026#39;network-profiling\u0026#39; Install SkyWalking Rover SkyWalking Rover is deployed on every node in Kubernetes, and it automatically detects the services in the Kubernetes cluster. The network profiling feature has been released in the version 0.3.0 of SkyWalking Rover. When a network monitoring task is created, the SkyWalking rover sends the data to the SkyWalking backend.\nkubectl apply -f https://raw.githubusercontent.com/mrproliu/skywalking-network-profiling-demo/main/resources/skywalking-rover.yaml Start the Network Profiling Task Once all deployments are completed, we must create a network profiling task for a specific instance of the service in the SkyWalking UI.\nTo open SkyWalking UI, run:\nkubectl port-forward svc/skywalking-ui 8080:80 --namespace istio-system Currently, we can select the specific instances that we wish to monitor by clicking the Data Plane item in the Service Mesh panel and the Service item in the Kubernetes panel.\nIn the figure below, we have selected an instance with a list of tasks in the network profiling tab. When we click the start button, the SkyWalking Rover starts monitoring this instance\u0026rsquo;s network.\nFigure 5\nDone! After a few seconds, you will see the process topology appear on the right side of the page.\nFigure 6\nWhen you click on the line between processes, you can see the TCP metrics between the two processes.\nFigure 7\nConclusion In this article, we detailed a problem that makes troubleshooting service mesh architectures difficult: lack of context between layers in the network stack. These are the cases when eBPF begins to really help with debugging/productivity when existing service mesh/envoy cannot. Then, we researched how eBPF could be applied to common communication, such as TLS. Finally, we demo the implementation of this process with SkyWalking Rover.\nFor now, we have completed the performance analysis for OSI layer 4 (mostly TCP). In the future, we will also introduce the analysis for OSI layer 7 protocols like HTTP.\n","excerpt":"Diagnose Service Mesh Network Performance with eBPF Background This article will show how to use …","ref":"/docs/main/v9.3.0/en/academy/diagnose-service-mesh-network-performance-with-ebpf/","title":"Diagnose Service Mesh Network Performance with eBPF"},{"body":"Diagnose Service Mesh Network Performance with eBPF Background This article will show how to use Apache SkyWalking with eBPF to make network troubleshooting easier in a service mesh environment.\nApache SkyWalking is an application performance monitor tool for distributed systems. It observes metrics, logs, traces, and events in the service mesh environment and uses that data to generate a dependency graph of your pods and services. This dependency graph can provide quick insights into your system, especially when there\u0026rsquo;s an issue.\nHowever, when troubleshooting network issues in SkyWalking\u0026rsquo;s service topology, it is not always easy to pinpoint where the error actually is. There are two reasons for the difficulty:\n Traffic through the Envoy sidecar is not easy to observe. Data from Envoy\u0026rsquo;s Access Log Service (ALS) shows traffic between services (sidecar-to-sidecar), but not metrics on communication between the Envoy sidecar and the service it proxies. Without that information, it is more difficult to understand the impact of the sidecar. There is a lack of data from transport layer (OSI Layer 4) communication. Since services generally use application layer (OSI Layer 7) protocols such as HTTP, observability data is generally restricted to application layer communication. However, the root cause may actually be in the transport layer, which is typically opaque to observability tools.  Access to metrics from Envoy-to-service and transport layer communication can make it easier to diagnose service issues. To this end, SkyWalking needs to collect and analyze transport layer metrics between processes inside Kubernetes pods - a task well suited to eBPF. We investigated using eBPF for this purpose and present our results and a demo below.\nMonitoring Kubernetes Networks with eBPF With its origins as the Extended Berkeley Packet Filter, eBPF is a general purpose mechanism for injecting and running your own code into the Linux kernel and is an excellent tool for monitoring network traffic in Kubernetes Pods. In the next few sections, we'll provide an overview of how to use eBPF for network monitoring as background for introducing Skywalking Rover, a metrics collector and profiler powered by eBPF to diagnose CPU and network performance.\nHow Applications and the Network Interact Interactions between the application and the network can generally be divided into the following steps from higher to lower levels of abstraction:\n User Code: Application code uses high-level network libraries in the application stack to exchange data across the network, like sending and receiving HTTP requests. Network Library: When the network library receives a network request, it interacts with the language API to send the network data. Language API: Each language provides an API for operating the network, system, etc. When a request is received, it interacts with the system API. In Linux, this API is called syscalls. Linux API: When the Linux kernel receives the request through the API, it communicates with the socket to send the data, which is usually closer to an OSI Layer 4 protocol, such as TCP, UDP, etc. Socket Ops: Sending or receiving the data to/from the NIC.  Our hypothesis is that eBPF can monitor the network. There are two ways to implement the interception: User space (uprobe) or Kernel space (kprobe). The table below summarizes the differences.\n    Pros Cons     uprobe •\tGet more application-related contexts, such as whether the current request is HTTP or HTTPS.•\tRequests and responses can be intercepted by a single method •\tData structures can be unstable, so it is more difficult to get the desired data.  •\tImplementation may differ between language/library versions.  •\tDoes not work in applications without symbol tables.   kprobe •\tAvailable for all languages.  •\tThe data structure and methods are stable and do not require much adaptation.  •\tEasier correlation with underlying data, such as getting the destination address of TCP, OSI Layer 4 protocol metrics, etc. •\tA single request and response may be split into multiple probes.  •\tContextual information is not easy to get for stateful requests. For example header compression in HTTP/2.    For the general network performance monitor, we chose to use the kprobe (intercept the syscalls) for the following reasons:\n It\u0026rsquo;s available for applications written in any programming language, and it\u0026rsquo;s stable, so it saves a lot of development/adaptation costs. It can be correlated with metrics from the system level, which makes it easier to troubleshoot. As a single request and response are split into multiple probes, we can use technology to correlate them. For contextual information, It\u0026rsquo;s usually used in OSI Layer 7 protocol network analysis. So, if we just monitor the network performance, then they can be ignored.  Kprobes and network monitoring Following the network syscalls of Linux documentation, we can implement network monitoring by intercepting two types of methods: socket operations and send/receive methods.\nSocket Operations When accepting or connecting with another socket, we can get the following information:\n Connection information: Includes the remote address from the connection which helps us to understand which pod is connected. Connection statics: Includes basic metrics from sockets, such as round-trip time (RTT), lost packet count in TCP, etc. Socket and file descriptor (FD) mapping: Includes the relationship between the Linux file descriptor and socket object. It is useful when sending and receiving data through a Linux file descriptor.  Send/Receive The interface related to sending or receiving data is the focus of performance analysis. It mainly contains the following parameters:\n Socket file descriptor: The file descriptor of the current operation corresponding to the socket. Buffer: The data sent or received, passed as a byte array.  Based on the above parameters, we can analyze the following data:\n Bytes: The size of the packet in bytes. Protocol: The protocol analysis according to the buffer data, such as HTTP, MySQL, etc. Execution Time: The time it takes to send/receive the data.  At this point (Figure 1) we can analyze the following steps for the whole lifecycle of the connection:\n Connect/Accept: When the connection is created. Transform: Sending and receiving data on the connection. Close: When the connection is closed.  Figure 1\nProtocol and TLS The previous section described how to analyze connections using send or receive buffer data. For example, following the HTTP/1.1 message specification to analyze the connection. However, this does not work for TLS requests/responses.\nFigure 2\nWhen TLS is in use, the Linux Kernel transmits data encrypted in user space. In the figure above, The application usually transmits SSL data through a third-party library (such as OpenSSL). For this case, the Linux API can only get the encrypted data, so it cannot recognize any higher layer protocol. To decrypt inside eBPF, we need to follow these steps:\n Read unencrypted data through uprobe: Compatible multiple languages, using uprobe to capture the data that is not encrypted before sending or after receiving. In this way, we can get the original data and associate it with the socket. Associate with socket: We can associate unencrypted data with the socket.  OpenSSL Use case For example, the most common way to send/receive SSL data is to use OpenSSL as a shared library, specifically the SSL_read and SSL_write methods to submit the buffer data with the socket.\nFollowing the documentation, we can intercept these two methods, which are almost identical to the API in Linux. The source code of the SSL structure in OpenSSL shows that the Socket FD exists in the BIO object of the SSL structure, and we can get it by the offset.\nIn summary, with knowledge of how OpenSSL works, we can read unencrypted data in an eBPF function.\nIntroducing SkyWalking Rover, an eBPF-based Metrics Collector and Profiler SkyWalking Rover introduces the eBPF network profiling feature into the SkyWalking ecosystem. It\u0026rsquo;s currently supported in a Kubernetes environment, so must be deployed inside a Kubernetes cluster. Once the deployment is complete, SkyWalking Rover can monitor the network for all processes inside a given Pod. Based on the monitoring data, SkyWalking can generate the topology relationship diagram and metrics between processes.\nTopology Diagram The topology diagram can help us understand the network access between processes inside the same Pod, and between the process and external environment (other Pod or service). Additionally, it can identify the data direction of traffic based on the line flow direction.\nIn Figure 3 below, all nodes within the hexagon are the internal process of a Pod, and nodes outside the hexagon are externally associated services or Pods. Nodes are connected by lines, which indicate the direction of requests or responses between nodes (client or server). The protocol is indicated on the line, and it\u0026rsquo;s either HTTP(S), TCP, or TCP(TLS). Also, we can see in this figure that the line between Envoy and Python applications is bidirectional because Envoy intercepts all application traffic.\nFigure 3\nMetrics Once we recognize the network call relationship between processes through the topology, we can select a specific line and view the TCP metrics between the two processes.\nThe diagram below (Figure 4) shows the metrics of network monitoring between two processes. There are four metrics in each line. Two on the left side are on the client side, and two on the right side are on the server side. If the remote process is not in the same Pod, only one side of the metrics is displayed.\nFigure 4\nThe following two metric types are available:\n Counter: Records the total number of data in a certain period. Each counter contains the following data: a. Count: Execution count. b. Bytes: Packet size in bytes. c. Execution time: Execution duration. Histogram: Records the distribution of data in the buckets.  Based on the above data types, the following metrics are exposed:\n   Name Type Unit Description     Write Counter and histogram Millisecond The socket write counter.   Read Counter and histogram Millisecond The socket read counter.   Write RTT Counter and histogram Microsecond The socket write round trip time (RTT) counter.   Connect Counter and histogram Millisecond The socket connect/accept with another server/client counter.   Close Counter and histogram Millisecond The socket with other socket counter.   Retransmit Counter Millisecond The socket retransmit package counter.   Drop Counter Millisecond The socket drop package counter.    Demo In this section, we demonstrate how to perform network profiling in the service mesh. To follow along, you will need a running Kubernetes environment.\nNOTE: All commands and scripts are available in this GitHub repository.\nInstall Istio Istio is the most widely deployed service mesh, and comes with a complete demo application that we can use for testing. To install Istio and the demo application, follow these steps:\n Install Istio using the demo configuration profile. Label the default namespace, so Istio automatically injects Envoy sidecar proxies when we\u0026rsquo;ll deploy the application. Deploy the bookinfo application to the cluster. Deploy the traffic generator to generate some traffic to the application.  export ISTIO_VERSION=1.13.1 # install istio istioctl install -y --set profile=demo kubectl label namespace default istio-injection=enabled # deploy the bookinfo applications kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/platform/kube/bookinfo.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/bookinfo-gateway.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/destination-rule-all.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/virtual-service-all-v1.yaml # generate traffic kubectl apply -f https://raw.githubusercontent.com/mrproliu/skywalking-network-profiling-demo/main/resources/traffic-generator.yaml Install SkyWalking The following will install the storage, backend, and UI needed for SkyWalking:\ngit clone https://github.com/apache/skywalking-kubernetes.git cd skywalking-kubernetes cd chart helm dep up skywalking helm -n istio-system install skywalking skywalking \\  --set fullnameOverride=skywalking \\  --set elasticsearch.minimumMasterNodes=1 \\  --set elasticsearch.imageTag=7.5.1 \\  --set oap.replicas=1 \\  --set ui.image.repository=apache/skywalking-ui \\  --set ui.image.tag=9.2.0 \\  --set oap.image.tag=9.2.0 \\  --set oap.envoy.als.enabled=true \\  --set oap.image.repository=apache/skywalking-oap-server \\  --set oap.storageType=elasticsearch \\  --set oap.env.SW_METER_ANALYZER_ACTIVE_FILES=\u0026#39;network-profiling\u0026#39; Install SkyWalking Rover SkyWalking Rover is deployed on every node in Kubernetes, and it automatically detects the services in the Kubernetes cluster. The network profiling feature has been released in the version 0.3.0 of SkyWalking Rover. When a network monitoring task is created, the SkyWalking rover sends the data to the SkyWalking backend.\nkubectl apply -f https://raw.githubusercontent.com/mrproliu/skywalking-network-profiling-demo/main/resources/skywalking-rover.yaml Start the Network Profiling Task Once all deployments are completed, we must create a network profiling task for a specific instance of the service in the SkyWalking UI.\nTo open SkyWalking UI, run:\nkubectl port-forward svc/skywalking-ui 8080:80 --namespace istio-system Currently, we can select the specific instances that we wish to monitor by clicking the Data Plane item in the Service Mesh panel and the Service item in the Kubernetes panel.\nIn the figure below, we have selected an instance with a list of tasks in the network profiling tab. When we click the start button, the SkyWalking Rover starts monitoring this instance\u0026rsquo;s network.\nFigure 5\nDone! After a few seconds, you will see the process topology appear on the right side of the page.\nFigure 6\nWhen you click on the line between processes, you can see the TCP metrics between the two processes.\nFigure 7\nConclusion In this article, we detailed a problem that makes troubleshooting service mesh architectures difficult: lack of context between layers in the network stack. These are the cases when eBPF begins to really help with debugging/productivity when existing service mesh/envoy cannot. Then, we researched how eBPF could be applied to common communication, such as TLS. Finally, we demo the implementation of this process with SkyWalking Rover.\nFor now, we have completed the performance analysis for OSI layer 4 (mostly TCP). In the future, we will also introduce the analysis for OSI layer 7 protocols like HTTP.\n","excerpt":"Diagnose Service Mesh Network Performance with eBPF Background This article will show how to use …","ref":"/docs/main/v9.4.0/en/academy/diagnose-service-mesh-network-performance-with-ebpf/","title":"Diagnose Service Mesh Network Performance with eBPF"},{"body":"Diagnose Service Mesh Network Performance with eBPF Background This article will show how to use Apache SkyWalking with eBPF to make network troubleshooting easier in a service mesh environment.\nApache SkyWalking is an application performance monitor tool for distributed systems. It observes metrics, logs, traces, and events in the service mesh environment and uses that data to generate a dependency graph of your pods and services. This dependency graph can provide quick insights into your system, especially when there\u0026rsquo;s an issue.\nHowever, when troubleshooting network issues in SkyWalking\u0026rsquo;s service topology, it is not always easy to pinpoint where the error actually is. There are two reasons for the difficulty:\n Traffic through the Envoy sidecar is not easy to observe. Data from Envoy\u0026rsquo;s Access Log Service (ALS) shows traffic between services (sidecar-to-sidecar), but not metrics on communication between the Envoy sidecar and the service it proxies. Without that information, it is more difficult to understand the impact of the sidecar. There is a lack of data from transport layer (OSI Layer 4) communication. Since services generally use application layer (OSI Layer 7) protocols such as HTTP, observability data is generally restricted to application layer communication. However, the root cause may actually be in the transport layer, which is typically opaque to observability tools.  Access to metrics from Envoy-to-service and transport layer communication can make it easier to diagnose service issues. To this end, SkyWalking needs to collect and analyze transport layer metrics between processes inside Kubernetes pods - a task well suited to eBPF. We investigated using eBPF for this purpose and present our results and a demo below.\nMonitoring Kubernetes Networks with eBPF With its origins as the Extended Berkeley Packet Filter, eBPF is a general purpose mechanism for injecting and running your own code into the Linux kernel and is an excellent tool for monitoring network traffic in Kubernetes Pods. In the next few sections, we'll provide an overview of how to use eBPF for network monitoring as background for introducing Skywalking Rover, a metrics collector and profiler powered by eBPF to diagnose CPU and network performance.\nHow Applications and the Network Interact Interactions between the application and the network can generally be divided into the following steps from higher to lower levels of abstraction:\n User Code: Application code uses high-level network libraries in the application stack to exchange data across the network, like sending and receiving HTTP requests. Network Library: When the network library receives a network request, it interacts with the language API to send the network data. Language API: Each language provides an API for operating the network, system, etc. When a request is received, it interacts with the system API. In Linux, this API is called syscalls. Linux API: When the Linux kernel receives the request through the API, it communicates with the socket to send the data, which is usually closer to an OSI Layer 4 protocol, such as TCP, UDP, etc. Socket Ops: Sending or receiving the data to/from the NIC.  Our hypothesis is that eBPF can monitor the network. There are two ways to implement the interception: User space (uprobe) or Kernel space (kprobe). The table below summarizes the differences.\n    Pros Cons     uprobe •\tGet more application-related contexts, such as whether the current request is HTTP or HTTPS.•\tRequests and responses can be intercepted by a single method •\tData structures can be unstable, so it is more difficult to get the desired data.  •\tImplementation may differ between language/library versions.  •\tDoes not work in applications without symbol tables.   kprobe •\tAvailable for all languages.  •\tThe data structure and methods are stable and do not require much adaptation.  •\tEasier correlation with underlying data, such as getting the destination address of TCP, OSI Layer 4 protocol metrics, etc. •\tA single request and response may be split into multiple probes.  •\tContextual information is not easy to get for stateful requests. For example header compression in HTTP/2.    For the general network performance monitor, we chose to use the kprobe (intercept the syscalls) for the following reasons:\n It\u0026rsquo;s available for applications written in any programming language, and it\u0026rsquo;s stable, so it saves a lot of development/adaptation costs. It can be correlated with metrics from the system level, which makes it easier to troubleshoot. As a single request and response are split into multiple probes, we can use technology to correlate them. For contextual information, It\u0026rsquo;s usually used in OSI Layer 7 protocol network analysis. So, if we just monitor the network performance, then they can be ignored.  Kprobes and network monitoring Following the network syscalls of Linux documentation, we can implement network monitoring by intercepting two types of methods: socket operations and send/receive methods.\nSocket Operations When accepting or connecting with another socket, we can get the following information:\n Connection information: Includes the remote address from the connection which helps us to understand which pod is connected. Connection statics: Includes basic metrics from sockets, such as round-trip time (RTT), lost packet count in TCP, etc. Socket and file descriptor (FD) mapping: Includes the relationship between the Linux file descriptor and socket object. It is useful when sending and receiving data through a Linux file descriptor.  Send/Receive The interface related to sending or receiving data is the focus of performance analysis. It mainly contains the following parameters:\n Socket file descriptor: The file descriptor of the current operation corresponding to the socket. Buffer: The data sent or received, passed as a byte array.  Based on the above parameters, we can analyze the following data:\n Bytes: The size of the packet in bytes. Protocol: The protocol analysis according to the buffer data, such as HTTP, MySQL, etc. Execution Time: The time it takes to send/receive the data.  At this point (Figure 1) we can analyze the following steps for the whole lifecycle of the connection:\n Connect/Accept: When the connection is created. Transform: Sending and receiving data on the connection. Close: When the connection is closed.  Figure 1\nProtocol and TLS The previous section described how to analyze connections using send or receive buffer data. For example, following the HTTP/1.1 message specification to analyze the connection. However, this does not work for TLS requests/responses.\nFigure 2\nWhen TLS is in use, the Linux Kernel transmits data encrypted in user space. In the figure above, The application usually transmits SSL data through a third-party library (such as OpenSSL). For this case, the Linux API can only get the encrypted data, so it cannot recognize any higher layer protocol. To decrypt inside eBPF, we need to follow these steps:\n Read unencrypted data through uprobe: Compatible multiple languages, using uprobe to capture the data that is not encrypted before sending or after receiving. In this way, we can get the original data and associate it with the socket. Associate with socket: We can associate unencrypted data with the socket.  OpenSSL Use case For example, the most common way to send/receive SSL data is to use OpenSSL as a shared library, specifically the SSL_read and SSL_write methods to submit the buffer data with the socket.\nFollowing the documentation, we can intercept these two methods, which are almost identical to the API in Linux. The source code of the SSL structure in OpenSSL shows that the Socket FD exists in the BIO object of the SSL structure, and we can get it by the offset.\nIn summary, with knowledge of how OpenSSL works, we can read unencrypted data in an eBPF function.\nIntroducing SkyWalking Rover, an eBPF-based Metrics Collector and Profiler SkyWalking Rover introduces the eBPF network profiling feature into the SkyWalking ecosystem. It\u0026rsquo;s currently supported in a Kubernetes environment, so must be deployed inside a Kubernetes cluster. Once the deployment is complete, SkyWalking Rover can monitor the network for all processes inside a given Pod. Based on the monitoring data, SkyWalking can generate the topology relationship diagram and metrics between processes.\nTopology Diagram The topology diagram can help us understand the network access between processes inside the same Pod, and between the process and external environment (other Pod or service). Additionally, it can identify the data direction of traffic based on the line flow direction.\nIn Figure 3 below, all nodes within the hexagon are the internal process of a Pod, and nodes outside the hexagon are externally associated services or Pods. Nodes are connected by lines, which indicate the direction of requests or responses between nodes (client or server). The protocol is indicated on the line, and it\u0026rsquo;s either HTTP(S), TCP, or TCP(TLS). Also, we can see in this figure that the line between Envoy and Python applications is bidirectional because Envoy intercepts all application traffic.\nFigure 3\nMetrics Once we recognize the network call relationship between processes through the topology, we can select a specific line and view the TCP metrics between the two processes.\nThe diagram below (Figure 4) shows the metrics of network monitoring between two processes. There are four metrics in each line. Two on the left side are on the client side, and two on the right side are on the server side. If the remote process is not in the same Pod, only one side of the metrics is displayed.\nFigure 4\nThe following two metric types are available:\n Counter: Records the total number of data in a certain period. Each counter contains the following data: a. Count: Execution count. b. Bytes: Packet size in bytes. c. Execution time: Execution duration. Histogram: Records the distribution of data in the buckets.  Based on the above data types, the following metrics are exposed:\n   Name Type Unit Description     Write Counter and histogram Millisecond The socket write counter.   Read Counter and histogram Millisecond The socket read counter.   Write RTT Counter and histogram Microsecond The socket write round trip time (RTT) counter.   Connect Counter and histogram Millisecond The socket connect/accept with another server/client counter.   Close Counter and histogram Millisecond The socket with other socket counter.   Retransmit Counter Millisecond The socket retransmit package counter.   Drop Counter Millisecond The socket drop package counter.    Demo In this section, we demonstrate how to perform network profiling in the service mesh. To follow along, you will need a running Kubernetes environment.\nNOTE: All commands and scripts are available in this GitHub repository.\nInstall Istio Istio is the most widely deployed service mesh, and comes with a complete demo application that we can use for testing. To install Istio and the demo application, follow these steps:\n Install Istio using the demo configuration profile. Label the default namespace, so Istio automatically injects Envoy sidecar proxies when we\u0026rsquo;ll deploy the application. Deploy the bookinfo application to the cluster. Deploy the traffic generator to generate some traffic to the application.  export ISTIO_VERSION=1.13.1 # install istio istioctl install -y --set profile=demo kubectl label namespace default istio-injection=enabled # deploy the bookinfo applications kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/platform/kube/bookinfo.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/bookinfo-gateway.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/destination-rule-all.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/virtual-service-all-v1.yaml # generate traffic kubectl apply -f https://raw.githubusercontent.com/mrproliu/skywalking-network-profiling-demo/main/resources/traffic-generator.yaml Install SkyWalking The following will install the storage, backend, and UI needed for SkyWalking:\ngit clone https://github.com/apache/skywalking-kubernetes.git cd skywalking-kubernetes cd chart helm dep up skywalking helm -n istio-system install skywalking skywalking \\  --set fullnameOverride=skywalking \\  --set elasticsearch.minimumMasterNodes=1 \\  --set elasticsearch.imageTag=7.5.1 \\  --set oap.replicas=1 \\  --set ui.image.repository=apache/skywalking-ui \\  --set ui.image.tag=9.2.0 \\  --set oap.image.tag=9.2.0 \\  --set oap.envoy.als.enabled=true \\  --set oap.image.repository=apache/skywalking-oap-server \\  --set oap.storageType=elasticsearch \\  --set oap.env.SW_METER_ANALYZER_ACTIVE_FILES=\u0026#39;network-profiling\u0026#39; Install SkyWalking Rover SkyWalking Rover is deployed on every node in Kubernetes, and it automatically detects the services in the Kubernetes cluster. The network profiling feature has been released in the version 0.3.0 of SkyWalking Rover. When a network monitoring task is created, the SkyWalking rover sends the data to the SkyWalking backend.\nkubectl apply -f https://raw.githubusercontent.com/mrproliu/skywalking-network-profiling-demo/main/resources/skywalking-rover.yaml Start the Network Profiling Task Once all deployments are completed, we must create a network profiling task for a specific instance of the service in the SkyWalking UI.\nTo open SkyWalking UI, run:\nkubectl port-forward svc/skywalking-ui 8080:80 --namespace istio-system Currently, we can select the specific instances that we wish to monitor by clicking the Data Plane item in the Service Mesh panel and the Service item in the Kubernetes panel.\nIn the figure below, we have selected an instance with a list of tasks in the network profiling tab. When we click the start button, the SkyWalking Rover starts monitoring this instance\u0026rsquo;s network.\nFigure 5\nDone! After a few seconds, you will see the process topology appear on the right side of the page.\nFigure 6\nWhen you click on the line between processes, you can see the TCP metrics between the two processes.\nFigure 7\nConclusion In this article, we detailed a problem that makes troubleshooting service mesh architectures difficult: lack of context between layers in the network stack. These are the cases when eBPF begins to really help with debugging/productivity when existing service mesh/envoy cannot. Then, we researched how eBPF could be applied to common communication, such as TLS. Finally, we demo the implementation of this process with SkyWalking Rover.\nFor now, we have completed the performance analysis for OSI layer 4 (mostly TCP). In the future, we will also introduce the analysis for OSI layer 7 protocols like HTTP.\n","excerpt":"Diagnose Service Mesh Network Performance with eBPF Background This article will show how to use …","ref":"/docs/main/v9.5.0/en/academy/diagnose-service-mesh-network-performance-with-ebpf/","title":"Diagnose Service Mesh Network Performance with eBPF"},{"body":"Diagnose Service Mesh Network Performance with eBPF Background This article will show how to use Apache SkyWalking with eBPF to make network troubleshooting easier in a service mesh environment.\nApache SkyWalking is an application performance monitor tool for distributed systems. It observes metrics, logs, traces, and events in the service mesh environment and uses that data to generate a dependency graph of your pods and services. This dependency graph can provide quick insights into your system, especially when there\u0026rsquo;s an issue.\nHowever, when troubleshooting network issues in SkyWalking\u0026rsquo;s service topology, it is not always easy to pinpoint where the error actually is. There are two reasons for the difficulty:\n Traffic through the Envoy sidecar is not easy to observe. Data from Envoy\u0026rsquo;s Access Log Service (ALS) shows traffic between services (sidecar-to-sidecar), but not metrics on communication between the Envoy sidecar and the service it proxies. Without that information, it is more difficult to understand the impact of the sidecar. There is a lack of data from transport layer (OSI Layer 4) communication. Since services generally use application layer (OSI Layer 7) protocols such as HTTP, observability data is generally restricted to application layer communication. However, the root cause may actually be in the transport layer, which is typically opaque to observability tools.  Access to metrics from Envoy-to-service and transport layer communication can make it easier to diagnose service issues. To this end, SkyWalking needs to collect and analyze transport layer metrics between processes inside Kubernetes pods - a task well suited to eBPF. We investigated using eBPF for this purpose and present our results and a demo below.\nMonitoring Kubernetes Networks with eBPF With its origins as the Extended Berkeley Packet Filter, eBPF is a general purpose mechanism for injecting and running your own code into the Linux kernel and is an excellent tool for monitoring network traffic in Kubernetes Pods. In the next few sections, we'll provide an overview of how to use eBPF for network monitoring as background for introducing Skywalking Rover, a metrics collector and profiler powered by eBPF to diagnose CPU and network performance.\nHow Applications and the Network Interact Interactions between the application and the network can generally be divided into the following steps from higher to lower levels of abstraction:\n User Code: Application code uses high-level network libraries in the application stack to exchange data across the network, like sending and receiving HTTP requests. Network Library: When the network library receives a network request, it interacts with the language API to send the network data. Language API: Each language provides an API for operating the network, system, etc. When a request is received, it interacts with the system API. In Linux, this API is called syscalls. Linux API: When the Linux kernel receives the request through the API, it communicates with the socket to send the data, which is usually closer to an OSI Layer 4 protocol, such as TCP, UDP, etc. Socket Ops: Sending or receiving the data to/from the NIC.  Our hypothesis is that eBPF can monitor the network. There are two ways to implement the interception: User space (uprobe) or Kernel space (kprobe). The table below summarizes the differences.\n    Pros Cons     uprobe •\tGet more application-related contexts, such as whether the current request is HTTP or HTTPS.•\tRequests and responses can be intercepted by a single method •\tData structures can be unstable, so it is more difficult to get the desired data.  •\tImplementation may differ between language/library versions.  •\tDoes not work in applications without symbol tables.   kprobe •\tAvailable for all languages.  •\tThe data structure and methods are stable and do not require much adaptation.  •\tEasier correlation with underlying data, such as getting the destination address of TCP, OSI Layer 4 protocol metrics, etc. •\tA single request and response may be split into multiple probes.  •\tContextual information is not easy to get for stateful requests. For example header compression in HTTP/2.    For the general network performance monitor, we chose to use the kprobe (intercept the syscalls) for the following reasons:\n It\u0026rsquo;s available for applications written in any programming language, and it\u0026rsquo;s stable, so it saves a lot of development/adaptation costs. It can be correlated with metrics from the system level, which makes it easier to troubleshoot. As a single request and response are split into multiple probes, we can use technology to correlate them. For contextual information, It\u0026rsquo;s usually used in OSI Layer 7 protocol network analysis. So, if we just monitor the network performance, then they can be ignored.  Kprobes and network monitoring Following the network syscalls of Linux documentation, we can implement network monitoring by intercepting two types of methods: socket operations and send/receive methods.\nSocket Operations When accepting or connecting with another socket, we can get the following information:\n Connection information: Includes the remote address from the connection which helps us to understand which pod is connected. Connection statics: Includes basic metrics from sockets, such as round-trip time (RTT), lost packet count in TCP, etc. Socket and file descriptor (FD) mapping: Includes the relationship between the Linux file descriptor and socket object. It is useful when sending and receiving data through a Linux file descriptor.  Send/Receive The interface related to sending or receiving data is the focus of performance analysis. It mainly contains the following parameters:\n Socket file descriptor: The file descriptor of the current operation corresponding to the socket. Buffer: The data sent or received, passed as a byte array.  Based on the above parameters, we can analyze the following data:\n Bytes: The size of the packet in bytes. Protocol: The protocol analysis according to the buffer data, such as HTTP, MySQL, etc. Execution Time: The time it takes to send/receive the data.  At this point (Figure 1) we can analyze the following steps for the whole lifecycle of the connection:\n Connect/Accept: When the connection is created. Transform: Sending and receiving data on the connection. Close: When the connection is closed.  Figure 1\nProtocol and TLS The previous section described how to analyze connections using send or receive buffer data. For example, following the HTTP/1.1 message specification to analyze the connection. However, this does not work for TLS requests/responses.\nFigure 2\nWhen TLS is in use, the Linux Kernel transmits data encrypted in user space. In the figure above, The application usually transmits SSL data through a third-party library (such as OpenSSL). For this case, the Linux API can only get the encrypted data, so it cannot recognize any higher layer protocol. To decrypt inside eBPF, we need to follow these steps:\n Read unencrypted data through uprobe: Compatible multiple languages, using uprobe to capture the data that is not encrypted before sending or after receiving. In this way, we can get the original data and associate it with the socket. Associate with socket: We can associate unencrypted data with the socket.  OpenSSL Use case For example, the most common way to send/receive SSL data is to use OpenSSL as a shared library, specifically the SSL_read and SSL_write methods to submit the buffer data with the socket.\nFollowing the documentation, we can intercept these two methods, which are almost identical to the API in Linux. The source code of the SSL structure in OpenSSL shows that the Socket FD exists in the BIO object of the SSL structure, and we can get it by the offset.\nIn summary, with knowledge of how OpenSSL works, we can read unencrypted data in an eBPF function.\nIntroducing SkyWalking Rover, an eBPF-based Metrics Collector and Profiler SkyWalking Rover introduces the eBPF network profiling feature into the SkyWalking ecosystem. It\u0026rsquo;s currently supported in a Kubernetes environment, so must be deployed inside a Kubernetes cluster. Once the deployment is complete, SkyWalking Rover can monitor the network for all processes inside a given Pod. Based on the monitoring data, SkyWalking can generate the topology relationship diagram and metrics between processes.\nTopology Diagram The topology diagram can help us understand the network access between processes inside the same Pod, and between the process and external environment (other Pod or service). Additionally, it can identify the data direction of traffic based on the line flow direction.\nIn Figure 3 below, all nodes within the hexagon are the internal process of a Pod, and nodes outside the hexagon are externally associated services or Pods. Nodes are connected by lines, which indicate the direction of requests or responses between nodes (client or server). The protocol is indicated on the line, and it\u0026rsquo;s either HTTP(S), TCP, or TCP(TLS). Also, we can see in this figure that the line between Envoy and Python applications is bidirectional because Envoy intercepts all application traffic.\nFigure 3\nMetrics Once we recognize the network call relationship between processes through the topology, we can select a specific line and view the TCP metrics between the two processes.\nThe diagram below (Figure 4) shows the metrics of network monitoring between two processes. There are four metrics in each line. Two on the left side are on the client side, and two on the right side are on the server side. If the remote process is not in the same Pod, only one side of the metrics is displayed.\nFigure 4\nThe following two metric types are available:\n Counter: Records the total number of data in a certain period. Each counter contains the following data: a. Count: Execution count. b. Bytes: Packet size in bytes. c. Execution time: Execution duration. Histogram: Records the distribution of data in the buckets.  Based on the above data types, the following metrics are exposed:\n   Name Type Unit Description     Write Counter and histogram Millisecond The socket write counter.   Read Counter and histogram Millisecond The socket read counter.   Write RTT Counter and histogram Microsecond The socket write round trip time (RTT) counter.   Connect Counter and histogram Millisecond The socket connect/accept with another server/client counter.   Close Counter and histogram Millisecond The socket with other socket counter.   Retransmit Counter Millisecond The socket retransmit package counter.   Drop Counter Millisecond The socket drop package counter.    Demo In this section, we demonstrate how to perform network profiling in the service mesh. To follow along, you will need a running Kubernetes environment.\nNOTE: All commands and scripts are available in this GitHub repository.\nInstall Istio Istio is the most widely deployed service mesh, and comes with a complete demo application that we can use for testing. To install Istio and the demo application, follow these steps:\n Install Istio using the demo configuration profile. Label the default namespace, so Istio automatically injects Envoy sidecar proxies when we\u0026rsquo;ll deploy the application. Deploy the bookinfo application to the cluster. Deploy the traffic generator to generate some traffic to the application.  export ISTIO_VERSION=1.13.1 # install istio istioctl install -y --set profile=demo kubectl label namespace default istio-injection=enabled # deploy the bookinfo applications kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/platform/kube/bookinfo.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/bookinfo-gateway.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/destination-rule-all.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/virtual-service-all-v1.yaml # generate traffic kubectl apply -f https://raw.githubusercontent.com/mrproliu/skywalking-network-profiling-demo/main/resources/traffic-generator.yaml Install SkyWalking The following will install the storage, backend, and UI needed for SkyWalking:\ngit clone https://github.com/apache/skywalking-kubernetes.git cd skywalking-kubernetes cd chart helm dep up skywalking helm -n istio-system install skywalking skywalking \\  --set fullnameOverride=skywalking \\  --set elasticsearch.minimumMasterNodes=1 \\  --set elasticsearch.imageTag=7.5.1 \\  --set oap.replicas=1 \\  --set ui.image.repository=apache/skywalking-ui \\  --set ui.image.tag=9.2.0 \\  --set oap.image.tag=9.2.0 \\  --set oap.envoy.als.enabled=true \\  --set oap.image.repository=apache/skywalking-oap-server \\  --set oap.storageType=elasticsearch \\  --set oap.env.SW_METER_ANALYZER_ACTIVE_FILES=\u0026#39;network-profiling\u0026#39; Install SkyWalking Rover SkyWalking Rover is deployed on every node in Kubernetes, and it automatically detects the services in the Kubernetes cluster. The network profiling feature has been released in the version 0.3.0 of SkyWalking Rover. When a network monitoring task is created, the SkyWalking rover sends the data to the SkyWalking backend.\nkubectl apply -f https://raw.githubusercontent.com/mrproliu/skywalking-network-profiling-demo/main/resources/skywalking-rover.yaml Start the Network Profiling Task Once all deployments are completed, we must create a network profiling task for a specific instance of the service in the SkyWalking UI.\nTo open SkyWalking UI, run:\nkubectl port-forward svc/skywalking-ui 8080:80 --namespace istio-system Currently, we can select the specific instances that we wish to monitor by clicking the Data Plane item in the Service Mesh panel and the Service item in the Kubernetes panel.\nIn the figure below, we have selected an instance with a list of tasks in the network profiling tab. When we click the start button, the SkyWalking Rover starts monitoring this instance\u0026rsquo;s network.\nFigure 5\nDone! After a few seconds, you will see the process topology appear on the right side of the page.\nFigure 6\nWhen you click on the line between processes, you can see the TCP metrics between the two processes.\nFigure 7\nConclusion In this article, we detailed a problem that makes troubleshooting service mesh architectures difficult: lack of context between layers in the network stack. These are the cases when eBPF begins to really help with debugging/productivity when existing service mesh/envoy cannot. Then, we researched how eBPF could be applied to common communication, such as TLS. Finally, we demo the implementation of this process with SkyWalking Rover.\nFor now, we have completed the performance analysis for OSI layer 4 (mostly TCP). In the future, we will also introduce the analysis for OSI layer 7 protocols like HTTP.\n","excerpt":"Diagnose Service Mesh Network Performance with eBPF Background This article will show how to use …","ref":"/docs/main/v9.6.0/en/academy/diagnose-service-mesh-network-performance-with-ebpf/","title":"Diagnose Service Mesh Network Performance with eBPF"},{"body":"Diagnose Service Mesh Network Performance with eBPF Background This article will show how to use Apache SkyWalking with eBPF to make network troubleshooting easier in a service mesh environment.\nApache SkyWalking is an application performance monitor tool for distributed systems. It observes metrics, logs, traces, and events in the service mesh environment and uses that data to generate a dependency graph of your pods and services. This dependency graph can provide quick insights into your system, especially when there\u0026rsquo;s an issue.\nHowever, when troubleshooting network issues in SkyWalking\u0026rsquo;s service topology, it is not always easy to pinpoint where the error actually is. There are two reasons for the difficulty:\n Traffic through the Envoy sidecar is not easy to observe. Data from Envoy\u0026rsquo;s Access Log Service (ALS) shows traffic between services (sidecar-to-sidecar), but not metrics on communication between the Envoy sidecar and the service it proxies. Without that information, it is more difficult to understand the impact of the sidecar. There is a lack of data from transport layer (OSI Layer 4) communication. Since services generally use application layer (OSI Layer 7) protocols such as HTTP, observability data is generally restricted to application layer communication. However, the root cause may actually be in the transport layer, which is typically opaque to observability tools.  Access to metrics from Envoy-to-service and transport layer communication can make it easier to diagnose service issues. To this end, SkyWalking needs to collect and analyze transport layer metrics between processes inside Kubernetes pods - a task well suited to eBPF. We investigated using eBPF for this purpose and present our results and a demo below.\nMonitoring Kubernetes Networks with eBPF With its origins as the Extended Berkeley Packet Filter, eBPF is a general purpose mechanism for injecting and running your own code into the Linux kernel and is an excellent tool for monitoring network traffic in Kubernetes Pods. In the next few sections, we'll provide an overview of how to use eBPF for network monitoring as background for introducing Skywalking Rover, a metrics collector and profiler powered by eBPF to diagnose CPU and network performance.\nHow Applications and the Network Interact Interactions between the application and the network can generally be divided into the following steps from higher to lower levels of abstraction:\n User Code: Application code uses high-level network libraries in the application stack to exchange data across the network, like sending and receiving HTTP requests. Network Library: When the network library receives a network request, it interacts with the language API to send the network data. Language API: Each language provides an API for operating the network, system, etc. When a request is received, it interacts with the system API. In Linux, this API is called syscalls. Linux API: When the Linux kernel receives the request through the API, it communicates with the socket to send the data, which is usually closer to an OSI Layer 4 protocol, such as TCP, UDP, etc. Socket Ops: Sending or receiving the data to/from the NIC.  Our hypothesis is that eBPF can monitor the network. There are two ways to implement the interception: User space (uprobe) or Kernel space (kprobe). The table below summarizes the differences.\n    Pros Cons     uprobe •\tGet more application-related contexts, such as whether the current request is HTTP or HTTPS.•\tRequests and responses can be intercepted by a single method •\tData structures can be unstable, so it is more difficult to get the desired data.  •\tImplementation may differ between language/library versions.  •\tDoes not work in applications without symbol tables.   kprobe •\tAvailable for all languages.  •\tThe data structure and methods are stable and do not require much adaptation.  •\tEasier correlation with underlying data, such as getting the destination address of TCP, OSI Layer 4 protocol metrics, etc. •\tA single request and response may be split into multiple probes.  •\tContextual information is not easy to get for stateful requests. For example header compression in HTTP/2.    For the general network performance monitor, we chose to use the kprobe (intercept the syscalls) for the following reasons:\n It\u0026rsquo;s available for applications written in any programming language, and it\u0026rsquo;s stable, so it saves a lot of development/adaptation costs. It can be correlated with metrics from the system level, which makes it easier to troubleshoot. As a single request and response are split into multiple probes, we can use technology to correlate them. For contextual information, It\u0026rsquo;s usually used in OSI Layer 7 protocol network analysis. So, if we just monitor the network performance, then they can be ignored.  Kprobes and network monitoring Following the network syscalls of Linux documentation, we can implement network monitoring by intercepting two types of methods: socket operations and send/receive methods.\nSocket Operations When accepting or connecting with another socket, we can get the following information:\n Connection information: Includes the remote address from the connection which helps us to understand which pod is connected. Connection statics: Includes basic metrics from sockets, such as round-trip time (RTT), lost packet count in TCP, etc. Socket and file descriptor (FD) mapping: Includes the relationship between the Linux file descriptor and socket object. It is useful when sending and receiving data through a Linux file descriptor.  Send/Receive The interface related to sending or receiving data is the focus of performance analysis. It mainly contains the following parameters:\n Socket file descriptor: The file descriptor of the current operation corresponding to the socket. Buffer: The data sent or received, passed as a byte array.  Based on the above parameters, we can analyze the following data:\n Bytes: The size of the packet in bytes. Protocol: The protocol analysis according to the buffer data, such as HTTP, MySQL, etc. Execution Time: The time it takes to send/receive the data.  At this point (Figure 1) we can analyze the following steps for the whole lifecycle of the connection:\n Connect/Accept: When the connection is created. Transform: Sending and receiving data on the connection. Close: When the connection is closed.  Figure 1\nProtocol and TLS The previous section described how to analyze connections using send or receive buffer data. For example, following the HTTP/1.1 message specification to analyze the connection. However, this does not work for TLS requests/responses.\nFigure 2\nWhen TLS is in use, the Linux Kernel transmits data encrypted in user space. In the figure above, The application usually transmits SSL data through a third-party library (such as OpenSSL). For this case, the Linux API can only get the encrypted data, so it cannot recognize any higher layer protocol. To decrypt inside eBPF, we need to follow these steps:\n Read unencrypted data through uprobe: Compatible multiple languages, using uprobe to capture the data that is not encrypted before sending or after receiving. In this way, we can get the original data and associate it with the socket. Associate with socket: We can associate unencrypted data with the socket.  OpenSSL Use case For example, the most common way to send/receive SSL data is to use OpenSSL as a shared library, specifically the SSL_read and SSL_write methods to submit the buffer data with the socket.\nFollowing the documentation, we can intercept these two methods, which are almost identical to the API in Linux. The source code of the SSL structure in OpenSSL shows that the Socket FD exists in the BIO object of the SSL structure, and we can get it by the offset.\nIn summary, with knowledge of how OpenSSL works, we can read unencrypted data in an eBPF function.\nIntroducing SkyWalking Rover, an eBPF-based Metrics Collector and Profiler SkyWalking Rover introduces the eBPF network profiling feature into the SkyWalking ecosystem. It\u0026rsquo;s currently supported in a Kubernetes environment, so must be deployed inside a Kubernetes cluster. Once the deployment is complete, SkyWalking Rover can monitor the network for all processes inside a given Pod. Based on the monitoring data, SkyWalking can generate the topology relationship diagram and metrics between processes.\nTopology Diagram The topology diagram can help us understand the network access between processes inside the same Pod, and between the process and external environment (other Pod or service). Additionally, it can identify the data direction of traffic based on the line flow direction.\nIn Figure 3 below, all nodes within the hexagon are the internal process of a Pod, and nodes outside the hexagon are externally associated services or Pods. Nodes are connected by lines, which indicate the direction of requests or responses between nodes (client or server). The protocol is indicated on the line, and it\u0026rsquo;s either HTTP(S), TCP, or TCP(TLS). Also, we can see in this figure that the line between Envoy and Python applications is bidirectional because Envoy intercepts all application traffic.\nFigure 3\nMetrics Once we recognize the network call relationship between processes through the topology, we can select a specific line and view the TCP metrics between the two processes.\nThe diagram below (Figure 4) shows the metrics of network monitoring between two processes. There are four metrics in each line. Two on the left side are on the client side, and two on the right side are on the server side. If the remote process is not in the same Pod, only one side of the metrics is displayed.\nFigure 4\nThe following two metric types are available:\n Counter: Records the total number of data in a certain period. Each counter contains the following data: a. Count: Execution count. b. Bytes: Packet size in bytes. c. Execution time: Execution duration. Histogram: Records the distribution of data in the buckets.  Based on the above data types, the following metrics are exposed:\n   Name Type Unit Description     Write Counter and histogram Millisecond The socket write counter.   Read Counter and histogram Millisecond The socket read counter.   Write RTT Counter and histogram Microsecond The socket write round trip time (RTT) counter.   Connect Counter and histogram Millisecond The socket connect/accept with another server/client counter.   Close Counter and histogram Millisecond The socket with other socket counter.   Retransmit Counter Millisecond The socket retransmit package counter.   Drop Counter Millisecond The socket drop package counter.    Demo In this section, we demonstrate how to perform network profiling in the service mesh. To follow along, you will need a running Kubernetes environment.\nNOTE: All commands and scripts are available in this GitHub repository.\nInstall Istio Istio is the most widely deployed service mesh, and comes with a complete demo application that we can use for testing. To install Istio and the demo application, follow these steps:\n Install Istio using the demo configuration profile. Label the default namespace, so Istio automatically injects Envoy sidecar proxies when we\u0026rsquo;ll deploy the application. Deploy the bookinfo application to the cluster. Deploy the traffic generator to generate some traffic to the application.  export ISTIO_VERSION=1.13.1 # install istio istioctl install -y --set profile=demo kubectl label namespace default istio-injection=enabled # deploy the bookinfo applications kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/platform/kube/bookinfo.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/bookinfo-gateway.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/destination-rule-all.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/virtual-service-all-v1.yaml # generate traffic kubectl apply -f https://raw.githubusercontent.com/mrproliu/skywalking-network-profiling-demo/main/resources/traffic-generator.yaml Install SkyWalking The following will install the storage, backend, and UI needed for SkyWalking:\ngit clone https://github.com/apache/skywalking-helm.git cd skywalking-helm cd chart helm dep up skywalking helm -n istio-system install skywalking skywalking \\  --set fullnameOverride=skywalking \\  --set elasticsearch.minimumMasterNodes=1 \\  --set elasticsearch.imageTag=7.5.1 \\  --set oap.replicas=1 \\  --set ui.image.repository=apache/skywalking-ui \\  --set ui.image.tag=9.2.0 \\  --set oap.image.tag=9.2.0 \\  --set oap.envoy.als.enabled=true \\  --set oap.image.repository=apache/skywalking-oap-server \\  --set oap.storageType=elasticsearch \\  --set oap.env.SW_METER_ANALYZER_ACTIVE_FILES=\u0026#39;network-profiling\u0026#39; Install SkyWalking Rover SkyWalking Rover is deployed on every node in Kubernetes, and it automatically detects the services in the Kubernetes cluster. The network profiling feature has been released in the version 0.3.0 of SkyWalking Rover. When a network monitoring task is created, the SkyWalking rover sends the data to the SkyWalking backend.\nkubectl apply -f https://raw.githubusercontent.com/mrproliu/skywalking-network-profiling-demo/main/resources/skywalking-rover.yaml Start the Network Profiling Task Once all deployments are completed, we must create a network profiling task for a specific instance of the service in the SkyWalking UI.\nTo open SkyWalking UI, run:\nkubectl port-forward svc/skywalking-ui 8080:80 --namespace istio-system Currently, we can select the specific instances that we wish to monitor by clicking the Data Plane item in the Service Mesh panel and the Service item in the Kubernetes panel.\nIn the figure below, we have selected an instance with a list of tasks in the network profiling tab. When we click the start button, the SkyWalking Rover starts monitoring this instance\u0026rsquo;s network.\nFigure 5\nDone! After a few seconds, you will see the process topology appear on the right side of the page.\nFigure 6\nWhen you click on the line between processes, you can see the TCP metrics between the two processes.\nFigure 7\nConclusion In this article, we detailed a problem that makes troubleshooting service mesh architectures difficult: lack of context between layers in the network stack. These are the cases when eBPF begins to really help with debugging/productivity when existing service mesh/envoy cannot. Then, we researched how eBPF could be applied to common communication, such as TLS. Finally, we demo the implementation of this process with SkyWalking Rover.\nFor now, we have completed the performance analysis for OSI layer 4 (mostly TCP). In the future, we will also introduce the analysis for OSI layer 7 protocols like HTTP.\n","excerpt":"Diagnose Service Mesh Network Performance with eBPF Background This article will show how to use …","ref":"/docs/main/v9.7.0/en/academy/diagnose-service-mesh-network-performance-with-ebpf/","title":"Diagnose Service Mesh Network Performance with eBPF"},{"body":"Disable plugins Delete or remove the specific libraries / jars in skywalking-agent/plugins/*.jar\n+-- skywalking-agent +-- activations apm-toolkit-log4j-1.x-activation.jar apm-toolkit-log4j-2.x-activation.jar apm-toolkit-logback-1.x-activation.jar ... +-- config agent.config +-- plugins apm-dubbo-plugin.jar apm-feign-default-http-9.x.jar apm-httpClient-4.x-plugin.jar ..... skywalking-agent.jar ","excerpt":"Disable plugins Delete or remove the specific libraries / jars in skywalking-agent/plugins/*.jar\n+-- …","ref":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/how-to-disable-plugin/","title":"Disable plugins"},{"body":"Disable plugins Delete or remove the specific libraries / jars in skywalking-agent/plugins/*.jar\n+-- skywalking-agent +-- activations apm-toolkit-log4j-1.x-activation.jar apm-toolkit-log4j-2.x-activation.jar apm-toolkit-logback-1.x-activation.jar ... +-- config agent.config +-- plugins apm-dubbo-plugin.jar apm-feign-default-http-9.x.jar apm-httpClient-4.x-plugin.jar ..... skywalking-agent.jar ","excerpt":"Disable plugins Delete or remove the specific libraries / jars in skywalking-agent/plugins/*.jar\n+-- …","ref":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/how-to-disable-plugin/","title":"Disable plugins"},{"body":"Disable plugins Delete or remove the specific libraries / jars in skywalking-agent/plugins/*.jar\n+-- skywalking-agent +-- activations apm-toolkit-log4j-1.x-activation.jar apm-toolkit-log4j-2.x-activation.jar apm-toolkit-logback-1.x-activation.jar ... +-- config agent.config +-- plugins apm-dubbo-plugin.jar apm-feign-default-http-9.x.jar apm-httpClient-4.x-plugin.jar ..... skywalking-agent.jar ","excerpt":"Disable plugins Delete or remove the specific libraries / jars in skywalking-agent/plugins/*.jar\n+-- …","ref":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/how-to-disable-plugin/","title":"Disable plugins"},{"body":"Disable plugins Delete or remove the specific libraries / jars in skywalking-agent/plugins/*.jar\n+-- skywalking-agent +-- activations apm-toolkit-log4j-1.x-activation.jar apm-toolkit-log4j-2.x-activation.jar apm-toolkit-logback-1.x-activation.jar ... +-- config agent.config +-- plugins apm-dubbo-plugin.jar apm-feign-default-http-9.x.jar apm-httpClient-4.x-plugin.jar ..... skywalking-agent.jar ","excerpt":"Disable plugins Delete or remove the specific libraries / jars in skywalking-agent/plugins/*.jar\n+-- …","ref":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/how-to-disable-plugin/","title":"Disable plugins"},{"body":"Disable plugins Delete or remove the specific libraries / jars in skywalking-agent/plugins/*.jar\n+-- skywalking-agent +-- activations apm-toolkit-log4j-1.x-activation.jar apm-toolkit-log4j-2.x-activation.jar apm-toolkit-logback-1.x-activation.jar ... +-- config agent.config +-- plugins apm-dubbo-plugin.jar apm-feign-default-http-9.x.jar apm-httpClient-4.x-plugin.jar ..... skywalking-agent.jar ","excerpt":"Disable plugins Delete or remove the specific libraries / jars in skywalking-agent/plugins/*.jar\n+-- …","ref":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/how-to-disable-plugin/","title":"Disable plugins"},{"body":"","excerpt":"","ref":"/docs/","title":"Documentation"},{"body":"Download Use the links below to download Apache SkyWalking releases from one of our mirrors. Don’t forget to verify the files downloaded. Please note that only source code releases are official Apache releases, binary distributions are just for end user convenience. Foundations  Agents  Operation  Database  Tools  Archived Releases  Docker images   Foundations SkyWalking APM    SkyWalking is an Observability Analysis Platform and Application Performance Management system.\n Source   v9.7.0 | Nov. 28th, 2023 [src] [asc] [sha512]  v9.6.0 | Sep. 4th, 2023 [src] [asc] [sha512]  v9.5.0 | Jun. 16th, 2023 [src] [asc] [sha512]  v9.4.0 | Mar. 12th, 2023 [src] [asc] [sha512]  v9.3.0 | Dec. 4th, 2022 [src] [asc] [sha512]  v9.2.0 | Sep. 2nd, 2022 [src] [asc] [sha512]  v9.1.0 | Jun. 11th, 2022 [src] [asc] [sha512]  v9.0.0 | Apr. 9th, 2022 [src] [asc] [sha512]      Distribution   v9.7.0 | Nov. 28th, 2023 [tar] [asc] [sha512]  v9.6.0 | Sep. 4th, 2023 [tar] [asc] [sha512]  v9.5.0 | Jun. 16th, 2023 [tar] [asc] [sha512]  v9.4.0 | Mar. 12th, 2023 [tar] [asc] [sha512]  v9.3.0 | Dec. 4th, 2022 [tar] [asc] [sha512]  v9.2.0 | Sep. 2nd, 2022 [tar] [asc] [sha512]  v9.1.0 | Jun. 10th, 2022 [tar] [asc] [sha512]  v9.0.0 | Apr. 9th, 2022 [tar] [asc] [sha512]        Booster UI    SkyWalking\u0026#39;s primary UI. All source codes have been included in the main repo release.\n Included in the main repo release     Grafana Plugins    SkyWalking Grafana Plugins provide extensions to visualize topology on Grafana.\n Source   0.1.0 | Sep. 12th, 2023 [src] [asc] [sha512]      Distribution   0.1.0 | Sep. 12th, 2023 [Install via Grafana Plugins]        SkyWalking Website    All source codes of https://skywalking.apache.org\n Deployed       Agents Java Agent    The Java Agent for Apache SkyWalking, which provides the native tracing/metrics/logging/event/profiling abilities for Java projects.\n Source   9.2.0 | Apr. 1st, 2024 [src] [asc] [sha512]  9.1.0 | Dec. 4th, 2023 [src] [asc] [sha512]  9.0.0 | Aug. 31st, 2023 [src] [asc] [sha512]      Distribution   v9.2.0 | Apr. 1st, 2024 [tar] [asc] [sha512]  v9.1.0 | Dec. 4th, 2023 [tar] [asc] [sha512]  v9.0.0 | Aug. 31st, 2023 [tar] [asc] [sha512]        Python Agent    The Python Agent for Apache SkyWalking, which provides the native tracing/metrics/logging/profiling abilities for Python projects.\n Source   v1.0.1 | Apr. 29th, 2023 [src] [asc] [sha512]      Distribution   v1.0.1 | Apr. 29th, 2023 [Install via pip]        Go Agent    The Go Agent for Apache SkyWalking, which provides the native tracing/metrics/logging abilities for Golang projects.\n Source   v0.4.0 | Feb. 27th, 2024 [src] [asc] [sha512]      Distribution   v0.4.0 | Feb. 27th, 2024 [tar] [asc] [sha512]        NodeJS Agent    The NodeJS Agent for Apache SkyWalking, which provides the native tracing abilities for NodeJS projects.\n Source   v0.7.0 | Nov. 8th, 2023 [src] [asc] [sha512]      Distribution   v0.7.0 | Nov. 8th, 2023 [Install via npm]        SkyWalking Rust    The Rust Agent for Apache SkyWalking, which provides the native tracing/metrics/logging abilities for Rust projects.\n Source   v0.8.0 | Aug. 2nd, 2023 [src] [asc] [sha512]      Distribution   v0.8.0 | Aug. 2nd, 2023 [Install via crates.io]        SkyWalking PHP    The PHP Agent for Apache SkyWalking, which provides the native tracing abilities for PHP projects.\n Source   v0.7.0 | Sep. 28th, 2023 [tar] [asc] [sha512]      Distribution   v0.7.0 | Sep. 28th, 2023 [Install via PECL]        Client JavaScript    Apache SkyWalking Client-side JavaScript exception and tracing library.\n Source   v0.11.0 | Mar. 18th, 2024 [src] [asc] [sha512]      Distribution   v0.11.0 | Mar. 18th, 2024 [Install via npm]        Nginx LUA Agent    SkyWalking Nginx Agent provides the native tracing capability for Nginx powered by Nginx LUA module.\n Source   v0.6.0 | Dec. 25th, 2021 [src] [asc] [sha512]      Distribution   v0.6.0 | Dec. 25th, 2021 [Install via luarocks]        Kong Agent    SkyWalking Kong Agent provides the native tracing capability.\n Source   v0.2.0 | Jan. 9th, 2022 [src] [asc] [sha512]      Distribution   v0.2.0 | Jan. 9th, 2022 [Install via luarocks]        SkyWalking Satellite    A lightweight collector/sidecar could be deployed closing to the target monitored system, to collect metrics, traces, and logs.\n Source   v1.2.0 | Jun. 25th, 2023 [src] [asc] [sha512]      Distribution   v1.2.0 | Jun. 25th, 2023 [tar] [asc] [sha512]        Kubernetes Event Exporter    Watch, filter, and send Kubernetes events into Apache SkyWalking.\n Source   v1.0.0 | Apr. 25th, 2022 [src] [asc] [sha512]      Distribution   v1.0.0 | Apr. 25th, 2022 [tar] [asc] [sha512]        SkyWalking Rover    Metrics collector and profiler powered by eBPF to diagnose CPU and network performance.\n Source   v0.6.0 | Mar. 31th, 2024 [src] [asc] [sha512]      Distribution   v0.6.0 | Mar. 31th, 2024 [tar] [asc] [sha512]          Operation SkyWalking CLI    SkyWalking CLI is a command interaction tool for the SkyWalking user or OPS team.\n Source   v0.13.0 | Dec. 4th, 2023 [src] [asc] [sha512]      Distribution   v0.13.0 | Dec. 4th, 2023 [tar] [asc] [sha512]        Kubernetes Helm    SkyWalking Kubernetes Helm repository provides ways to install and configure SkyWalking in a Kubernetes cluster. The scripts are written in Helm 3.\n Source   v4.5.0 | Jul. 16th, 2023 [src] [asc] [sha512]         SkyWalking Cloud on Kubernetes    A bridge project between Apache SkyWalking and Kubernetes.\n Source   v0.9.0 | Mar. 4th, 2024 [src] [asc] [sha512]      Distribution   v0.9.0 | Mar. 4th, 2024 [tar] [asc] [sha512]          Database BanyanDB Server(BanyanD)    The BanyanDB Server\n Source   v0.5.0 | Oct 23th, 2023 [src] [asc] [sha512]      Distribution   v0.5.0 | Oct 23th, 2023 [tar] [asc] [sha512]        BanyanDB Java Client    The client implementation for SkyWalking BanyanDB in Java\n Source   v0.5.0 | Sep. 28th, 2023 [src] [asc] [sha512]      Distribution   v0.5.0 | Sep. 18th, 2023 [Install via maven]        BanyanDB Helm    BanyanDB Helm repository provides ways to install and configure BanyanDB. The scripts are written in Helm 3.\n Source   v0.1.0 | Sep. 25th, 2023 [src] [asc] [sha512]           Support tools for development and testing SkyWalking Eyes    A full-featured license tool to check and fix license headers and resolve dependencies\u0026#39; licenses.\n Source   v0.6.0 | Apr. 12th, 2024 [src] [asc] [sha512]      Distribution   v0.6.0 | Apr. 12th, 2024 [tar] [asc] [sha512]        SkyWalking Infra E2E    An End-to-End Testing framework that aims to help developers to set up, debug, and verify E2E tests with ease.\n Source   v1.3.0 | Nov. 13th, 2023 [src] [asc] [sha512]      Distribution   v1.3.0 | Nov. 13th, 2023 [tar] [asc] [sha512]          Archived Releases    Older releases are not recommended for new users, because they are not maintained, but you still can find them(source codes and binaries) if you have specific reasons. Find all SkyWalking releases in the Archive repository.  Archive incubating repository hosts older releases when SkyWalking was an incubator project.  Docker Images for convenience SkyWalking OAP Server    This image would start up SkyWalking OAP server only.\nDocker Image     SkyWalking UI Image    This image would start up SkyWalking UI only.\nDocker Image     SkyWalking Kubernetes Helm    SkyWalking Kubernetes Helm repository provides ways to install and configure SkyWalking in a Kubernetes cluster. The scripts are written in Helm 3.\nDocker Image     SkyWalking Cloud on Kubernetes    A platform for the SkyWalking user, provisions, upgrades, maintains SkyWalking relevant components, and makes them work natively on Kubernetes.\nDocker Image     SkyWalking Java Agent    The Docker image for Java users to conveniently use SkyWalking agent in containerized scenario.\nDocker Image     SkyWalking Python Agent    The Docker image for Python users to conveniently use SkyWalking agent in containerized scenario.\nDocker Image     SkyWalking Satellite    A lightweight collector/sidecar could be deployed closing to the target monitored system, to collect metrics, traces, and logs.\nDocker Image     SkyWalking CLI    SkyWalking CLI is a command interaction tool for the SkyWalking user or OPS team.\nDocker Image     SkyWalking Eyes Image    A full-featured license tool to check and fix license headers and resolve dependencies\u0026#39; licenses.\nDocker Image     SkyWalking Kubernetes Event Exporter    Watch, filter, and send Kubernetes events into Apache SkyWalking backend.\nDocker Image     SkyWalking Rover    Metrics collector and ebpf-based profiler for C, C\u0026#43;\u0026#43;, Golang, and Rust.\nDocker Image         Verify the releases It is essential that you verify the integrity of the downloaded files using the PGP or SHA signatures. Please download the KEYS as well as the .asc/.sha512 signature files for relevant distribution. It is recommended to get these files from the main distribution directory and not from the mirrors.\n Verify using GPG/PGP Download PGP signatures KEYS, and the release with its .asc signature file. And then:\n# GPG verification gpg --import KEYS gpg --verify apache-skywalking-apm-***.asc apache-skywalking-apm-***   Verify using SHA512 Download the release with its .sha512 signature file. And then:\n# SHA-512 verification shasum -a 512 hadoop-X.Y.Z-src.tar.gz    ","excerpt":"Download Use the links below to download Apache SkyWalking releases from one of our mirrors. Don’t …","ref":"/downloads/","title":"Downloads"},{"body":"Dynamic Configuration SkyWalking Configurations are mostly set through application.yml and OS system environment variables.\nAt the same time, some of them support dynamic settings from an upstream management system.\nCurrently, SkyWalking supports two types of dynamic configurations: Single and Group.\nThis feature depends on upstream service, so it is DISABLED by default.\nconfiguration:selector:${SW_CONFIGURATION:none}none:grpc:host:${SW_DCS_SERVER_HOST:\u0026#34;\u0026#34;}port:${SW_DCS_SERVER_PORT:80}clusterName:${SW_DCS_CLUSTER_NAME:SkyWalking}period:${SW_DCS_PERIOD:20}# ... other implementationsSingle Configuration Single Configuration is a config key that corresponds to a specific config value. The logic structure is:\n{configKey}:{configValue} For example:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} Supported configurations are as follows:\n   Config Key Value Description Value Format Example     agent-analyzer.default.slowDBAccessThreshold Thresholds of slow Database statement. Overrides agent-analyzer/default/slowDBAccessThreshold of application.yml. default:200,mongodb:50   agent-analyzer.default.uninstrumentedGateways The uninstrumented gateways. Overrides gateways.yml. Same as gateways.yml.   alarm.default.alarm-settings The alarm settings. Overrides alarm-settings.yml. Same as alarm-settings.yml.   core.default.apdexThreshold The apdex threshold settings. Overrides service-apdex-threshold.yml. Same as service-apdex-threshold.yml.   core.default.endpoint-name-grouping The endpoint name grouping setting. Overrides endpoint-name-grouping.yml. Same as endpoint-name-grouping.yml.   core.default.log4j-xml The log4j xml configuration. Overrides log4j2.xml. Same as log4j2.xml.   core.default.searchableTracesTags The searchableTracesTags configuration. Override core/default/searchableTracesTags in the application.yml. http.method,http.status_code,rpc.status_code,db.type,db.instance,mq.queue,mq.topic,mq.broker   agent-analyzer.default.traceSamplingPolicy The sampling policy for default and service dimension, override trace-sampling-policy-settings.yml. same as trace-sampling-policy-settings.yml   configuration-discovery.default.agentConfigurations The ConfigurationDiscovery settings. See configuration-discovery.md.    Group Configuration Group Configuration is a config key corresponding to a group sub config item. A sub config item is a key-value pair. The logic structure is:\n{configKey}: |{subItemkey1}:{subItemValue1} |{subItemkey2}:{subItemValue2} |{subItemkey3}:{subItemValue3} ... For example:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} Supported configurations are as follows:\n   Config Key SubItem Key Description Value Description Value Format Example     core.default.endpoint-name-grouping-openapi The serviceName relevant to openAPI definition file. eg. serviceA. If the serviceName relevant to multiple files should add subItems for each files, and each subItem key should split serviceName and fileName with . eg. serviceA.API-file1,serviceA.API-file2 The openAPI definitions file contents(yaml format) for create endpoint name grouping rules. Same as productAPI-v2.yaml    Dynamic Configuration Implementations  Dynamic Configuration Service, DCS Zookeeper Implementation Etcd Implementation Consul Implementation Apollo Implementation Kubernetes Configmap Implementation Nacos Implementation  ","excerpt":"Dynamic Configuration SkyWalking Configurations are mostly set through application.yml and OS system …","ref":"/docs/main/latest/en/setup/backend/dynamic-config/","title":"Dynamic Configuration"},{"body":"Dynamic Configuration SkyWalking Configurations are mostly set through application.yml and OS system environment variables.\nAt the same time, some of them support dynamic settings from an upstream management system.\nCurrently, SkyWalking supports two types of dynamic configurations: Single and Group.\nThis feature depends on upstream service, so it is DISABLED by default.\nconfiguration:selector:${SW_CONFIGURATION:none}none:grpc:host:${SW_DCS_SERVER_HOST:\u0026#34;\u0026#34;}port:${SW_DCS_SERVER_PORT:80}clusterName:${SW_DCS_CLUSTER_NAME:SkyWalking}period:${SW_DCS_PERIOD:20}# ... other implementationsSingle Configuration Single Configuration is a config key that corresponds to a specific config value. The logic structure is:\n{configKey}:{configValue} For example:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} Supported configurations are as follows:\n   Config Key Value Description Value Format Example     agent-analyzer.default.slowDBAccessThreshold Thresholds of slow Database statement. Overrides agent-analyzer/default/slowDBAccessThreshold of application.yml. default:200,mongodb:50   agent-analyzer.default.uninstrumentedGateways The uninstrumented gateways. Overrides gateways.yml. Same as gateways.yml.   alarm.default.alarm-settings The alarm settings. Overrides alarm-settings.yml. Same as alarm-settings.yml.   core.default.apdexThreshold The apdex threshold settings. Overrides service-apdex-threshold.yml. Same as service-apdex-threshold.yml.   core.default.endpoint-name-grouping The endpoint name grouping setting. Overrides endpoint-name-grouping.yml. Same as endpoint-name-grouping.yml.   core.default.log4j-xml The log4j xml configuration. Overrides log4j2.xml. Same as log4j2.xml.   core.default.searchableTracesTags The searchableTracesTags configuration. Override core/default/searchableTracesTags in the application.yml. http.method,http.status_code,rpc.status_code,db.type,db.instance,mq.queue,mq.topic,mq.broker   agent-analyzer.default.traceSamplingPolicy The sampling policy for default and service dimension, override trace-sampling-policy-settings.yml. same as trace-sampling-policy-settings.yml   configuration-discovery.default.agentConfigurations The ConfigurationDiscovery settings. See configuration-discovery.md.    Group Configuration Group Configuration is a config key corresponding to a group sub config item. A sub config item is a key-value pair. The logic structure is:\n{configKey}: |{subItemkey1}:{subItemValue1} |{subItemkey2}:{subItemValue2} |{subItemkey3}:{subItemValue3} ... For example:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} Supported configurations are as follows:\n   Config Key SubItem Key Description Value Description Value Format Example     core.default.endpoint-name-grouping-openapi The serviceName relevant to openAPI definition file. eg. serviceA. If the serviceName relevant to multiple files should add subItems for each files, and each subItem key should split serviceName and fileName with . eg. serviceA.API-file1,serviceA.API-file2 The openAPI definitions file contents(yaml format) for create endpoint name grouping rules. Same as productAPI-v2.yaml    Dynamic Configuration Implementations  Dynamic Configuration Service, DCS Zookeeper Implementation Etcd Implementation Consul Implementation Apollo Implementation Kubernetes Configmap Implementation Nacos Implementation  ","excerpt":"Dynamic Configuration SkyWalking Configurations are mostly set through application.yml and OS system …","ref":"/docs/main/next/en/setup/backend/dynamic-config/","title":"Dynamic Configuration"},{"body":"Dynamic Configuration SkyWalking Configurations are mostly set through application.yml and OS system environment variables. At the same time, some of them support dynamic settings from upstream management system.\nCurrently, SkyWalking supports the 2 types of dynamic configurations: Single and Group.\nThis feature depends on upstream service, so it is DISABLED by default.\nconfiguration:selector:${SW_CONFIGURATION:none}none:grpc:host:${SW_DCS_SERVER_HOST:\u0026#34;\u0026#34;}port:${SW_DCS_SERVER_PORT:80}clusterName:${SW_DCS_CLUSTER_NAME:SkyWalking}period:${SW_DCS_PERIOD:20}# ... other implementationsSingle Configuration Single Configuration is a config key that corresponds to a specific config value. The logic structure is:\n{configKey}:{configVaule} For example:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} Supported configurations are as follows:\n   Config Key Value Description Value Format Example     agent-analyzer.default.slowDBAccessThreshold Thresholds of slow Database statement. Overrides receiver-trace/default/slowDBAccessThreshold of application.yml. default:200,mongodb:50   agent-analyzer.default.uninstrumentedGateways The uninstrumented gateways. Overrides gateways.yml. Same as gateways.yml.   alarm.default.alarm-settings The alarm settings. Overrides alarm-settings.yml. Same as alarm-settings.yml.   core.default.apdexThreshold The apdex threshold settings. Overrides service-apdex-threshold.yml. Same as service-apdex-threshold.yml.   core.default.endpoint-name-grouping The endpoint name grouping setting. Overrides endpoint-name-grouping.yml. Same as endpoint-name-grouping.yml.   core.default.log4j-xml The log4j xml configuration. Overrides log4j2.xml. Same as log4j2.xml.   agent-analyzer.default.traceSamplingPolicy The sampling policy for default and service dimension, override trace-sampling-policy-settings.yml. same as trace-sampling-policy-settings.yml   configuration-discovery.default.agentConfigurations The ConfigurationDiscovery settings. See configuration-discovery.md.    Group Configuration Group Configuration is a config key that corresponds to a group sub config items. A sub config item is a key value pair. The logic structure is:\n{configKey}: |{subItemkey1}:{subItemValue1} |{subItemkey2}:{subItemValue2} |{subItemkey3}:{subItemValue3} ... For example:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} Supported configurations are as follows:\n   Config Key SubItem Key Description Value Description Value Format Example     core.default.endpoint-name-grouping-openapi The serviceName relevant to openAPI definition file. eg. serviceA. If the serviceName relevant to multiple files should add subItems for each files, and each subItem key should split serviceName and fileName with . eg. serviceA.API-file1,serviceA.API-file2 The openAPI definitions file contents(yaml format) for create endpoint name grouping rules. Same as productAPI-v2.yaml    Dynamic Configuration Implementations  Dynamic Configuration Service, DCS Zookeeper Implementation Etcd Implementation Consul Implementation Apollo Implementation Kuberbetes Configmap Implementation Nacos Implementation  ","excerpt":"Dynamic Configuration SkyWalking Configurations are mostly set through application.yml and OS system …","ref":"/docs/main/v9.0.0/en/setup/backend/dynamic-config/","title":"Dynamic Configuration"},{"body":"Dynamic Configuration SkyWalking Configurations are mostly set through application.yml and OS system environment variables.\nAt the same time, some of them support dynamic settings from an upstream management system.\nCurrently, SkyWalking supports two types of dynamic configurations: Single and Group.\nThis feature depends on upstream service, so it is DISABLED by default.\nconfiguration:selector:${SW_CONFIGURATION:none}none:grpc:host:${SW_DCS_SERVER_HOST:\u0026#34;\u0026#34;}port:${SW_DCS_SERVER_PORT:80}clusterName:${SW_DCS_CLUSTER_NAME:SkyWalking}period:${SW_DCS_PERIOD:20}# ... other implementationsSingle Configuration Single Configuration is a config key that corresponds to a specific config value. The logic structure is:\n{configKey}:{configVaule} For example:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} Supported configurations are as follows:\n   Config Key Value Description Value Format Example     agent-analyzer.default.slowDBAccessThreshold Thresholds of slow Database statement. Overrides agent-analyzer/default/slowDBAccessThreshold of application.yml. default:200,mongodb:50   agent-analyzer.default.uninstrumentedGateways The uninstrumented gateways. Overrides gateways.yml. Same as gateways.yml.   alarm.default.alarm-settings The alarm settings. Overrides alarm-settings.yml. Same as alarm-settings.yml.   core.default.apdexThreshold The apdex threshold settings. Overrides service-apdex-threshold.yml. Same as service-apdex-threshold.yml.   core.default.endpoint-name-grouping The endpoint name grouping setting. Overrides endpoint-name-grouping.yml. Same as endpoint-name-grouping.yml.   core.default.log4j-xml The log4j xml configuration. Overrides log4j2.xml. Same as log4j2.xml.   agent-analyzer.default.traceSamplingPolicy The sampling policy for default and service dimension, override trace-sampling-policy-settings.yml. same as trace-sampling-policy-settings.yml   configuration-discovery.default.agentConfigurations The ConfigurationDiscovery settings. See configuration-discovery.md.    Group Configuration Group Configuration is a config key corresponding to a group sub config item. A sub config item is a key-value pair. The logic structure is:\n{configKey}: |{subItemkey1}:{subItemValue1} |{subItemkey2}:{subItemValue2} |{subItemkey3}:{subItemValue3} ... For example:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} Supported configurations are as follows:\n   Config Key SubItem Key Description Value Description Value Format Example     core.default.endpoint-name-grouping-openapi The serviceName relevant to openAPI definition file. eg. serviceA. If the serviceName relevant to multiple files should add subItems for each files, and each subItem key should split serviceName and fileName with . eg. serviceA.API-file1,serviceA.API-file2 The openAPI definitions file contents(yaml format) for create endpoint name grouping rules. Same as productAPI-v2.yaml    Dynamic Configuration Implementations  Dynamic Configuration Service, DCS Zookeeper Implementation Etcd Implementation Consul Implementation Apollo Implementation Kuberbetes Configmap Implementation Nacos Implementation  ","excerpt":"Dynamic Configuration SkyWalking Configurations are mostly set through application.yml and OS system …","ref":"/docs/main/v9.1.0/en/setup/backend/dynamic-config/","title":"Dynamic Configuration"},{"body":"Dynamic Configuration SkyWalking Configurations are mostly set through application.yml and OS system environment variables.\nAt the same time, some of them support dynamic settings from an upstream management system.\nCurrently, SkyWalking supports two types of dynamic configurations: Single and Group.\nThis feature depends on upstream service, so it is DISABLED by default.\nconfiguration:selector:${SW_CONFIGURATION:none}none:grpc:host:${SW_DCS_SERVER_HOST:\u0026#34;\u0026#34;}port:${SW_DCS_SERVER_PORT:80}clusterName:${SW_DCS_CLUSTER_NAME:SkyWalking}period:${SW_DCS_PERIOD:20}# ... other implementationsSingle Configuration Single Configuration is a config key that corresponds to a specific config value. The logic structure is:\n{configKey}:{configValue} For example:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} Supported configurations are as follows:\n   Config Key Value Description Value Format Example     agent-analyzer.default.slowDBAccessThreshold Thresholds of slow Database statement. Overrides agent-analyzer/default/slowDBAccessThreshold of application.yml. default:200,mongodb:50   agent-analyzer.default.uninstrumentedGateways The uninstrumented gateways. Overrides gateways.yml. Same as gateways.yml.   alarm.default.alarm-settings The alarm settings. Overrides alarm-settings.yml. Same as alarm-settings.yml.   core.default.apdexThreshold The apdex threshold settings. Overrides service-apdex-threshold.yml. Same as service-apdex-threshold.yml.   core.default.endpoint-name-grouping The endpoint name grouping setting. Overrides endpoint-name-grouping.yml. Same as endpoint-name-grouping.yml.   core.default.log4j-xml The log4j xml configuration. Overrides log4j2.xml. Same as log4j2.xml.   agent-analyzer.default.traceSamplingPolicy The sampling policy for default and service dimension, override trace-sampling-policy-settings.yml. same as trace-sampling-policy-settings.yml   configuration-discovery.default.agentConfigurations The ConfigurationDiscovery settings. See configuration-discovery.md.    Group Configuration Group Configuration is a config key corresponding to a group sub config item. A sub config item is a key-value pair. The logic structure is:\n{configKey}: |{subItemkey1}:{subItemValue1} |{subItemkey2}:{subItemValue2} |{subItemkey3}:{subItemValue3} ... For example:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} Supported configurations are as follows:\n   Config Key SubItem Key Description Value Description Value Format Example     core.default.endpoint-name-grouping-openapi The serviceName relevant to openAPI definition file. eg. serviceA. If the serviceName relevant to multiple files should add subItems for each files, and each subItem key should split serviceName and fileName with . eg. serviceA.API-file1,serviceA.API-file2 The openAPI definitions file contents(yaml format) for create endpoint name grouping rules. Same as productAPI-v2.yaml    Dynamic Configuration Implementations  Dynamic Configuration Service, DCS Zookeeper Implementation Etcd Implementation Consul Implementation Apollo Implementation Kubernetes Configmap Implementation Nacos Implementation  ","excerpt":"Dynamic Configuration SkyWalking Configurations are mostly set through application.yml and OS system …","ref":"/docs/main/v9.2.0/en/setup/backend/dynamic-config/","title":"Dynamic Configuration"},{"body":"Dynamic Configuration SkyWalking Configurations are mostly set through application.yml and OS system environment variables.\nAt the same time, some of them support dynamic settings from an upstream management system.\nCurrently, SkyWalking supports two types of dynamic configurations: Single and Group.\nThis feature depends on upstream service, so it is DISABLED by default.\nconfiguration:selector:${SW_CONFIGURATION:none}none:grpc:host:${SW_DCS_SERVER_HOST:\u0026#34;\u0026#34;}port:${SW_DCS_SERVER_PORT:80}clusterName:${SW_DCS_CLUSTER_NAME:SkyWalking}period:${SW_DCS_PERIOD:20}# ... other implementationsSingle Configuration Single Configuration is a config key that corresponds to a specific config value. The logic structure is:\n{configKey}:{configValue} For example:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} Supported configurations are as follows:\n   Config Key Value Description Value Format Example     agent-analyzer.default.slowDBAccessThreshold Thresholds of slow Database statement. Overrides agent-analyzer/default/slowDBAccessThreshold of application.yml. default:200,mongodb:50   agent-analyzer.default.uninstrumentedGateways The uninstrumented gateways. Overrides gateways.yml. Same as gateways.yml.   alarm.default.alarm-settings The alarm settings. Overrides alarm-settings.yml. Same as alarm-settings.yml.   core.default.apdexThreshold The apdex threshold settings. Overrides service-apdex-threshold.yml. Same as service-apdex-threshold.yml.   core.default.endpoint-name-grouping The endpoint name grouping setting. Overrides endpoint-name-grouping.yml. Same as endpoint-name-grouping.yml.   core.default.log4j-xml The log4j xml configuration. Overrides log4j2.xml. Same as log4j2.xml.   agent-analyzer.default.traceSamplingPolicy The sampling policy for default and service dimension, override trace-sampling-policy-settings.yml. same as trace-sampling-policy-settings.yml   configuration-discovery.default.agentConfigurations The ConfigurationDiscovery settings. See configuration-discovery.md.    Group Configuration Group Configuration is a config key corresponding to a group sub config item. A sub config item is a key-value pair. The logic structure is:\n{configKey}: |{subItemkey1}:{subItemValue1} |{subItemkey2}:{subItemValue2} |{subItemkey3}:{subItemValue3} ... For example:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} Supported configurations are as follows:\n   Config Key SubItem Key Description Value Description Value Format Example     core.default.endpoint-name-grouping-openapi The serviceName relevant to openAPI definition file. eg. serviceA. If the serviceName relevant to multiple files should add subItems for each files, and each subItem key should split serviceName and fileName with . eg. serviceA.API-file1,serviceA.API-file2 The openAPI definitions file contents(yaml format) for create endpoint name grouping rules. Same as productAPI-v2.yaml    Dynamic Configuration Implementations  Dynamic Configuration Service, DCS Zookeeper Implementation Etcd Implementation Consul Implementation Apollo Implementation Kubernetes Configmap Implementation Nacos Implementation  ","excerpt":"Dynamic Configuration SkyWalking Configurations are mostly set through application.yml and OS system …","ref":"/docs/main/v9.3.0/en/setup/backend/dynamic-config/","title":"Dynamic Configuration"},{"body":"Dynamic Configuration SkyWalking Configurations are mostly set through application.yml and OS system environment variables.\nAt the same time, some of them support dynamic settings from an upstream management system.\nCurrently, SkyWalking supports two types of dynamic configurations: Single and Group.\nThis feature depends on upstream service, so it is DISABLED by default.\nconfiguration:selector:${SW_CONFIGURATION:none}none:grpc:host:${SW_DCS_SERVER_HOST:\u0026#34;\u0026#34;}port:${SW_DCS_SERVER_PORT:80}clusterName:${SW_DCS_CLUSTER_NAME:SkyWalking}period:${SW_DCS_PERIOD:20}# ... other implementationsSingle Configuration Single Configuration is a config key that corresponds to a specific config value. The logic structure is:\n{configKey}:{configValue} For example:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} Supported configurations are as follows:\n   Config Key Value Description Value Format Example     agent-analyzer.default.slowDBAccessThreshold Thresholds of slow Database statement. Overrides agent-analyzer/default/slowDBAccessThreshold of application.yml. default:200,mongodb:50   agent-analyzer.default.uninstrumentedGateways The uninstrumented gateways. Overrides gateways.yml. Same as gateways.yml.   alarm.default.alarm-settings The alarm settings. Overrides alarm-settings.yml. Same as alarm-settings.yml.   core.default.apdexThreshold The apdex threshold settings. Overrides service-apdex-threshold.yml. Same as service-apdex-threshold.yml.   core.default.endpoint-name-grouping The endpoint name grouping setting. Overrides endpoint-name-grouping.yml. Same as endpoint-name-grouping.yml.   core.default.log4j-xml The log4j xml configuration. Overrides log4j2.xml. Same as log4j2.xml.   agent-analyzer.default.traceSamplingPolicy The sampling policy for default and service dimension, override trace-sampling-policy-settings.yml. same as trace-sampling-policy-settings.yml   configuration-discovery.default.agentConfigurations The ConfigurationDiscovery settings. See configuration-discovery.md.    Group Configuration Group Configuration is a config key corresponding to a group sub config item. A sub config item is a key-value pair. The logic structure is:\n{configKey}: |{subItemkey1}:{subItemValue1} |{subItemkey2}:{subItemValue2} |{subItemkey3}:{subItemValue3} ... For example:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} Supported configurations are as follows:\n   Config Key SubItem Key Description Value Description Value Format Example     core.default.endpoint-name-grouping-openapi The serviceName relevant to openAPI definition file. eg. serviceA. If the serviceName relevant to multiple files should add subItems for each files, and each subItem key should split serviceName and fileName with . eg. serviceA.API-file1,serviceA.API-file2 The openAPI definitions file contents(yaml format) for create endpoint name grouping rules. Same as productAPI-v2.yaml    Dynamic Configuration Implementations  Dynamic Configuration Service, DCS Zookeeper Implementation Etcd Implementation Consul Implementation Apollo Implementation Kubernetes Configmap Implementation Nacos Implementation  ","excerpt":"Dynamic Configuration SkyWalking Configurations are mostly set through application.yml and OS system …","ref":"/docs/main/v9.4.0/en/setup/backend/dynamic-config/","title":"Dynamic Configuration"},{"body":"Dynamic Configuration SkyWalking Configurations are mostly set through application.yml and OS system environment variables.\nAt the same time, some of them support dynamic settings from an upstream management system.\nCurrently, SkyWalking supports two types of dynamic configurations: Single and Group.\nThis feature depends on upstream service, so it is DISABLED by default.\nconfiguration:selector:${SW_CONFIGURATION:none}none:grpc:host:${SW_DCS_SERVER_HOST:\u0026#34;\u0026#34;}port:${SW_DCS_SERVER_PORT:80}clusterName:${SW_DCS_CLUSTER_NAME:SkyWalking}period:${SW_DCS_PERIOD:20}# ... other implementationsSingle Configuration Single Configuration is a config key that corresponds to a specific config value. The logic structure is:\n{configKey}:{configValue} For example:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} Supported configurations are as follows:\n   Config Key Value Description Value Format Example     agent-analyzer.default.slowDBAccessThreshold Thresholds of slow Database statement. Overrides agent-analyzer/default/slowDBAccessThreshold of application.yml. default:200,mongodb:50   agent-analyzer.default.uninstrumentedGateways The uninstrumented gateways. Overrides gateways.yml. Same as gateways.yml.   alarm.default.alarm-settings The alarm settings. Overrides alarm-settings.yml. Same as alarm-settings.yml.   core.default.apdexThreshold The apdex threshold settings. Overrides service-apdex-threshold.yml. Same as service-apdex-threshold.yml.   core.default.endpoint-name-grouping The endpoint name grouping setting. Overrides endpoint-name-grouping.yml. Same as endpoint-name-grouping.yml.   core.default.log4j-xml The log4j xml configuration. Overrides log4j2.xml. Same as log4j2.xml.   core.default.searchableTracesTags The searchableTracesTags configuration. Override core/default/searchableTracesTags in the application.yml. http.method,http.status_code,rpc.status_code,db.type,db.instance,mq.queue,mq.topic,mq.broker   agent-analyzer.default.traceSamplingPolicy The sampling policy for default and service dimension, override trace-sampling-policy-settings.yml. same as trace-sampling-policy-settings.yml   configuration-discovery.default.agentConfigurations The ConfigurationDiscovery settings. See configuration-discovery.md.    Group Configuration Group Configuration is a config key corresponding to a group sub config item. A sub config item is a key-value pair. The logic structure is:\n{configKey}: |{subItemkey1}:{subItemValue1} |{subItemkey2}:{subItemValue2} |{subItemkey3}:{subItemValue3} ... For example:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} Supported configurations are as follows:\n   Config Key SubItem Key Description Value Description Value Format Example     core.default.endpoint-name-grouping-openapi The serviceName relevant to openAPI definition file. eg. serviceA. If the serviceName relevant to multiple files should add subItems for each files, and each subItem key should split serviceName and fileName with . eg. serviceA.API-file1,serviceA.API-file2 The openAPI definitions file contents(yaml format) for create endpoint name grouping rules. Same as productAPI-v2.yaml    Dynamic Configuration Implementations  Dynamic Configuration Service, DCS Zookeeper Implementation Etcd Implementation Consul Implementation Apollo Implementation Kubernetes Configmap Implementation Nacos Implementation  ","excerpt":"Dynamic Configuration SkyWalking Configurations are mostly set through application.yml and OS system …","ref":"/docs/main/v9.5.0/en/setup/backend/dynamic-config/","title":"Dynamic Configuration"},{"body":"Dynamic Configuration SkyWalking Configurations are mostly set through application.yml and OS system environment variables.\nAt the same time, some of them support dynamic settings from an upstream management system.\nCurrently, SkyWalking supports two types of dynamic configurations: Single and Group.\nThis feature depends on upstream service, so it is DISABLED by default.\nconfiguration:selector:${SW_CONFIGURATION:none}none:grpc:host:${SW_DCS_SERVER_HOST:\u0026#34;\u0026#34;}port:${SW_DCS_SERVER_PORT:80}clusterName:${SW_DCS_CLUSTER_NAME:SkyWalking}period:${SW_DCS_PERIOD:20}# ... other implementationsSingle Configuration Single Configuration is a config key that corresponds to a specific config value. The logic structure is:\n{configKey}:{configValue} For example:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} Supported configurations are as follows:\n   Config Key Value Description Value Format Example     agent-analyzer.default.slowDBAccessThreshold Thresholds of slow Database statement. Overrides agent-analyzer/default/slowDBAccessThreshold of application.yml. default:200,mongodb:50   agent-analyzer.default.uninstrumentedGateways The uninstrumented gateways. Overrides gateways.yml. Same as gateways.yml.   alarm.default.alarm-settings The alarm settings. Overrides alarm-settings.yml. Same as alarm-settings.yml.   core.default.apdexThreshold The apdex threshold settings. Overrides service-apdex-threshold.yml. Same as service-apdex-threshold.yml.   core.default.endpoint-name-grouping The endpoint name grouping setting. Overrides endpoint-name-grouping.yml. Same as endpoint-name-grouping.yml.   core.default.log4j-xml The log4j xml configuration. Overrides log4j2.xml. Same as log4j2.xml.   core.default.searchableTracesTags The searchableTracesTags configuration. Override core/default/searchableTracesTags in the application.yml. http.method,http.status_code,rpc.status_code,db.type,db.instance,mq.queue,mq.topic,mq.broker   agent-analyzer.default.traceSamplingPolicy The sampling policy for default and service dimension, override trace-sampling-policy-settings.yml. same as trace-sampling-policy-settings.yml   configuration-discovery.default.agentConfigurations The ConfigurationDiscovery settings. See configuration-discovery.md.    Group Configuration Group Configuration is a config key corresponding to a group sub config item. A sub config item is a key-value pair. The logic structure is:\n{configKey}: |{subItemkey1}:{subItemValue1} |{subItemkey2}:{subItemValue2} |{subItemkey3}:{subItemValue3} ... For example:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} Supported configurations are as follows:\n   Config Key SubItem Key Description Value Description Value Format Example     core.default.endpoint-name-grouping-openapi The serviceName relevant to openAPI definition file. eg. serviceA. If the serviceName relevant to multiple files should add subItems for each files, and each subItem key should split serviceName and fileName with . eg. serviceA.API-file1,serviceA.API-file2 The openAPI definitions file contents(yaml format) for create endpoint name grouping rules. Same as productAPI-v2.yaml    Dynamic Configuration Implementations  Dynamic Configuration Service, DCS Zookeeper Implementation Etcd Implementation Consul Implementation Apollo Implementation Kubernetes Configmap Implementation Nacos Implementation  ","excerpt":"Dynamic Configuration SkyWalking Configurations are mostly set through application.yml and OS system …","ref":"/docs/main/v9.6.0/en/setup/backend/dynamic-config/","title":"Dynamic Configuration"},{"body":"Dynamic Configuration SkyWalking Configurations are mostly set through application.yml and OS system environment variables.\nAt the same time, some of them support dynamic settings from an upstream management system.\nCurrently, SkyWalking supports two types of dynamic configurations: Single and Group.\nThis feature depends on upstream service, so it is DISABLED by default.\nconfiguration:selector:${SW_CONFIGURATION:none}none:grpc:host:${SW_DCS_SERVER_HOST:\u0026#34;\u0026#34;}port:${SW_DCS_SERVER_PORT:80}clusterName:${SW_DCS_CLUSTER_NAME:SkyWalking}period:${SW_DCS_PERIOD:20}# ... other implementationsSingle Configuration Single Configuration is a config key that corresponds to a specific config value. The logic structure is:\n{configKey}:{configValue} For example:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} Supported configurations are as follows:\n   Config Key Value Description Value Format Example     agent-analyzer.default.slowDBAccessThreshold Thresholds of slow Database statement. Overrides agent-analyzer/default/slowDBAccessThreshold of application.yml. default:200,mongodb:50   agent-analyzer.default.uninstrumentedGateways The uninstrumented gateways. Overrides gateways.yml. Same as gateways.yml.   alarm.default.alarm-settings The alarm settings. Overrides alarm-settings.yml. Same as alarm-settings.yml.   core.default.apdexThreshold The apdex threshold settings. Overrides service-apdex-threshold.yml. Same as service-apdex-threshold.yml.   core.default.endpoint-name-grouping The endpoint name grouping setting. Overrides endpoint-name-grouping.yml. Same as endpoint-name-grouping.yml.   core.default.log4j-xml The log4j xml configuration. Overrides log4j2.xml. Same as log4j2.xml.   core.default.searchableTracesTags The searchableTracesTags configuration. Override core/default/searchableTracesTags in the application.yml. http.method,http.status_code,rpc.status_code,db.type,db.instance,mq.queue,mq.topic,mq.broker   agent-analyzer.default.traceSamplingPolicy The sampling policy for default and service dimension, override trace-sampling-policy-settings.yml. same as trace-sampling-policy-settings.yml   configuration-discovery.default.agentConfigurations The ConfigurationDiscovery settings. See configuration-discovery.md.    Group Configuration Group Configuration is a config key corresponding to a group sub config item. A sub config item is a key-value pair. The logic structure is:\n{configKey}: |{subItemkey1}:{subItemValue1} |{subItemkey2}:{subItemValue2} |{subItemkey3}:{subItemValue3} ... For example:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} Supported configurations are as follows:\n   Config Key SubItem Key Description Value Description Value Format Example     core.default.endpoint-name-grouping-openapi The serviceName relevant to openAPI definition file. eg. serviceA. If the serviceName relevant to multiple files should add subItems for each files, and each subItem key should split serviceName and fileName with . eg. serviceA.API-file1,serviceA.API-file2 The openAPI definitions file contents(yaml format) for create endpoint name grouping rules. Same as productAPI-v2.yaml    Dynamic Configuration Implementations  Dynamic Configuration Service, DCS Zookeeper Implementation Etcd Implementation Consul Implementation Apollo Implementation Kubernetes Configmap Implementation Nacos Implementation  ","excerpt":"Dynamic Configuration SkyWalking Configurations are mostly set through application.yml and OS system …","ref":"/docs/main/v9.7.0/en/setup/backend/dynamic-config/","title":"Dynamic Configuration"},{"body":"Dynamic Configuration Apollo Implementation Apollo is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:apollo}apollo:apolloMeta:${SW_CONFIG_APOLLO:http://localhost:8080}apolloCluster:${SW_CONFIG_APOLLO_CLUSTER:default}apolloEnv:${SW_CONFIG_APOLLO_ENV:\u0026#34;\u0026#34;}appId:${SW_CONFIG_APOLLO_APP_ID:skywalking}Config Storage Single Config Single configs in Apollo are key/value pairs:\n   Key Value     configKey configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in Apollo is:\n   Key Value     agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in Apollo are key/value pairs as well, and the key is composited by configKey and subItemKey with ..\n   Key Value     configKey.subItemkey1 subItemValue1   configKey.subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config in Apollo is:\n   Key Value     core.default.endpoint-name-grouping-openapi.customerAPI-v1 value of customerAPI-v1   core.default.endpoint-name-grouping-openapi.productAPI-v1 value of productAPI-v1   core.default.endpoint-name-grouping-openapi.productAPI-v2 value of productAPI-v2    ","excerpt":"Dynamic Configuration Apollo Implementation Apollo is also supported as a Dynamic Configuration …","ref":"/docs/main/latest/en/setup/backend/dynamic-config-apollo/","title":"Dynamic Configuration Apollo Implementation"},{"body":"Dynamic Configuration Apollo Implementation Apollo is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:apollo}apollo:apolloMeta:${SW_CONFIG_APOLLO:http://localhost:8080}apolloCluster:${SW_CONFIG_APOLLO_CLUSTER:default}apolloEnv:${SW_CONFIG_APOLLO_ENV:\u0026#34;\u0026#34;}appId:${SW_CONFIG_APOLLO_APP_ID:skywalking}Config Storage Single Config Single configs in Apollo are key/value pairs:\n   Key Value     configKey configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in Apollo is:\n   Key Value     agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in Apollo are key/value pairs as well, and the key is composited by configKey and subItemKey with ..\n   Key Value     configKey.subItemkey1 subItemValue1   configKey.subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config in Apollo is:\n   Key Value     core.default.endpoint-name-grouping-openapi.customerAPI-v1 value of customerAPI-v1   core.default.endpoint-name-grouping-openapi.productAPI-v1 value of productAPI-v1   core.default.endpoint-name-grouping-openapi.productAPI-v2 value of productAPI-v2    ","excerpt":"Dynamic Configuration Apollo Implementation Apollo is also supported as a Dynamic Configuration …","ref":"/docs/main/next/en/setup/backend/dynamic-config-apollo/","title":"Dynamic Configuration Apollo Implementation"},{"body":"Dynamic Configuration Apollo Implementation Apollo is also supported as Dynamic Configuration Center (DCC). To use it, please configure as follows:\nconfiguration:selector:${SW_CONFIGURATION:apollo}apollo:apolloMeta:${SW_CONFIG_APOLLO:http://localhost:8080}apolloCluster:${SW_CONFIG_APOLLO_CLUSTER:default}apolloEnv:${SW_CONFIG_APOLLO_ENV:\u0026#34;\u0026#34;}appId:${SW_CONFIG_APOLLO_APP_ID:skywalking}period:${SW_CONFIG_APOLLO_PERIOD:60}Config Storage Single Config Single configs in apollo are key/value pairs:\n   Key Value     configKey configVaule    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in apollo is:\n   Key Value     agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in apollo are key/value pairs as well, and the key is composited by configKey and subItemKey with ..\n   Key Value     configKey.subItemkey1 subItemValue1   configKey.subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config in apollo is:\n   Key Value     core.default.endpoint-name-grouping-openapi.customerAPI-v1 value of customerAPI-v1   core.default.endpoint-name-grouping-openapi.productAPI-v1 value of productAPI-v1   core.default.endpoint-name-grouping-openapi.productAPI-v2 value of productAPI-v2    ","excerpt":"Dynamic Configuration Apollo Implementation Apollo is also supported as Dynamic Configuration Center …","ref":"/docs/main/v9.0.0/en/setup/backend/dynamic-config-apollo/","title":"Dynamic Configuration Apollo Implementation"},{"body":"Dynamic Configuration Apollo Implementation Apollo is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:apollo}apollo:apolloMeta:${SW_CONFIG_APOLLO:http://localhost:8080}apolloCluster:${SW_CONFIG_APOLLO_CLUSTER:default}apolloEnv:${SW_CONFIG_APOLLO_ENV:\u0026#34;\u0026#34;}appId:${SW_CONFIG_APOLLO_APP_ID:skywalking}period:${SW_CONFIG_APOLLO_PERIOD:60}Config Storage Single Config Single configs in Apollo are key/value pairs:\n   Key Value     configKey configVaule    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in Apollo is:\n   Key Value     agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in Apollo are key/value pairs as well, and the key is composited by configKey and subItemKey with ..\n   Key Value     configKey.subItemkey1 subItemValue1   configKey.subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config in Apollo is:\n   Key Value     core.default.endpoint-name-grouping-openapi.customerAPI-v1 value of customerAPI-v1   core.default.endpoint-name-grouping-openapi.productAPI-v1 value of productAPI-v1   core.default.endpoint-name-grouping-openapi.productAPI-v2 value of productAPI-v2    ","excerpt":"Dynamic Configuration Apollo Implementation Apollo is also supported as a Dynamic Configuration …","ref":"/docs/main/v9.1.0/en/setup/backend/dynamic-config-apollo/","title":"Dynamic Configuration Apollo Implementation"},{"body":"Dynamic Configuration Apollo Implementation Apollo is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:apollo}apollo:apolloMeta:${SW_CONFIG_APOLLO:http://localhost:8080}apolloCluster:${SW_CONFIG_APOLLO_CLUSTER:default}apolloEnv:${SW_CONFIG_APOLLO_ENV:\u0026#34;\u0026#34;}appId:${SW_CONFIG_APOLLO_APP_ID:skywalking}period:${SW_CONFIG_APOLLO_PERIOD:60}Config Storage Single Config Single configs in Apollo are key/value pairs:\n   Key Value     configKey configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in Apollo is:\n   Key Value     agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in Apollo are key/value pairs as well, and the key is composited by configKey and subItemKey with ..\n   Key Value     configKey.subItemkey1 subItemValue1   configKey.subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config in Apollo is:\n   Key Value     core.default.endpoint-name-grouping-openapi.customerAPI-v1 value of customerAPI-v1   core.default.endpoint-name-grouping-openapi.productAPI-v1 value of productAPI-v1   core.default.endpoint-name-grouping-openapi.productAPI-v2 value of productAPI-v2    ","excerpt":"Dynamic Configuration Apollo Implementation Apollo is also supported as a Dynamic Configuration …","ref":"/docs/main/v9.2.0/en/setup/backend/dynamic-config-apollo/","title":"Dynamic Configuration Apollo Implementation"},{"body":"Dynamic Configuration Apollo Implementation Apollo is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:apollo}apollo:apolloMeta:${SW_CONFIG_APOLLO:http://localhost:8080}apolloCluster:${SW_CONFIG_APOLLO_CLUSTER:default}apolloEnv:${SW_CONFIG_APOLLO_ENV:\u0026#34;\u0026#34;}appId:${SW_CONFIG_APOLLO_APP_ID:skywalking}period:${SW_CONFIG_APOLLO_PERIOD:60}Config Storage Single Config Single configs in Apollo are key/value pairs:\n   Key Value     configKey configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in Apollo is:\n   Key Value     agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in Apollo are key/value pairs as well, and the key is composited by configKey and subItemKey with ..\n   Key Value     configKey.subItemkey1 subItemValue1   configKey.subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config in Apollo is:\n   Key Value     core.default.endpoint-name-grouping-openapi.customerAPI-v1 value of customerAPI-v1   core.default.endpoint-name-grouping-openapi.productAPI-v1 value of productAPI-v1   core.default.endpoint-name-grouping-openapi.productAPI-v2 value of productAPI-v2    ","excerpt":"Dynamic Configuration Apollo Implementation Apollo is also supported as a Dynamic Configuration …","ref":"/docs/main/v9.3.0/en/setup/backend/dynamic-config-apollo/","title":"Dynamic Configuration Apollo Implementation"},{"body":"Dynamic Configuration Apollo Implementation Apollo is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:apollo}apollo:apolloMeta:${SW_CONFIG_APOLLO:http://localhost:8080}apolloCluster:${SW_CONFIG_APOLLO_CLUSTER:default}apolloEnv:${SW_CONFIG_APOLLO_ENV:\u0026#34;\u0026#34;}appId:${SW_CONFIG_APOLLO_APP_ID:skywalking}period:${SW_CONFIG_APOLLO_PERIOD:60}Config Storage Single Config Single configs in Apollo are key/value pairs:\n   Key Value     configKey configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in Apollo is:\n   Key Value     agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in Apollo are key/value pairs as well, and the key is composited by configKey and subItemKey with ..\n   Key Value     configKey.subItemkey1 subItemValue1   configKey.subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config in Apollo is:\n   Key Value     core.default.endpoint-name-grouping-openapi.customerAPI-v1 value of customerAPI-v1   core.default.endpoint-name-grouping-openapi.productAPI-v1 value of productAPI-v1   core.default.endpoint-name-grouping-openapi.productAPI-v2 value of productAPI-v2    ","excerpt":"Dynamic Configuration Apollo Implementation Apollo is also supported as a Dynamic Configuration …","ref":"/docs/main/v9.4.0/en/setup/backend/dynamic-config-apollo/","title":"Dynamic Configuration Apollo Implementation"},{"body":"Dynamic Configuration Apollo Implementation Apollo is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:apollo}apollo:apolloMeta:${SW_CONFIG_APOLLO:http://localhost:8080}apolloCluster:${SW_CONFIG_APOLLO_CLUSTER:default}apolloEnv:${SW_CONFIG_APOLLO_ENV:\u0026#34;\u0026#34;}appId:${SW_CONFIG_APOLLO_APP_ID:skywalking}period:${SW_CONFIG_APOLLO_PERIOD:60}Config Storage Single Config Single configs in Apollo are key/value pairs:\n   Key Value     configKey configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in Apollo is:\n   Key Value     agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in Apollo are key/value pairs as well, and the key is composited by configKey and subItemKey with ..\n   Key Value     configKey.subItemkey1 subItemValue1   configKey.subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config in Apollo is:\n   Key Value     core.default.endpoint-name-grouping-openapi.customerAPI-v1 value of customerAPI-v1   core.default.endpoint-name-grouping-openapi.productAPI-v1 value of productAPI-v1   core.default.endpoint-name-grouping-openapi.productAPI-v2 value of productAPI-v2    ","excerpt":"Dynamic Configuration Apollo Implementation Apollo is also supported as a Dynamic Configuration …","ref":"/docs/main/v9.5.0/en/setup/backend/dynamic-config-apollo/","title":"Dynamic Configuration Apollo Implementation"},{"body":"Dynamic Configuration Apollo Implementation Apollo is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:apollo}apollo:apolloMeta:${SW_CONFIG_APOLLO:http://localhost:8080}apolloCluster:${SW_CONFIG_APOLLO_CLUSTER:default}apolloEnv:${SW_CONFIG_APOLLO_ENV:\u0026#34;\u0026#34;}appId:${SW_CONFIG_APOLLO_APP_ID:skywalking}Config Storage Single Config Single configs in Apollo are key/value pairs:\n   Key Value     configKey configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in Apollo is:\n   Key Value     agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in Apollo are key/value pairs as well, and the key is composited by configKey and subItemKey with ..\n   Key Value     configKey.subItemkey1 subItemValue1   configKey.subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config in Apollo is:\n   Key Value     core.default.endpoint-name-grouping-openapi.customerAPI-v1 value of customerAPI-v1   core.default.endpoint-name-grouping-openapi.productAPI-v1 value of productAPI-v1   core.default.endpoint-name-grouping-openapi.productAPI-v2 value of productAPI-v2    ","excerpt":"Dynamic Configuration Apollo Implementation Apollo is also supported as a Dynamic Configuration …","ref":"/docs/main/v9.6.0/en/setup/backend/dynamic-config-apollo/","title":"Dynamic Configuration Apollo Implementation"},{"body":"Dynamic Configuration Apollo Implementation Apollo is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:apollo}apollo:apolloMeta:${SW_CONFIG_APOLLO:http://localhost:8080}apolloCluster:${SW_CONFIG_APOLLO_CLUSTER:default}apolloEnv:${SW_CONFIG_APOLLO_ENV:\u0026#34;\u0026#34;}appId:${SW_CONFIG_APOLLO_APP_ID:skywalking}Config Storage Single Config Single configs in Apollo are key/value pairs:\n   Key Value     configKey configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in Apollo is:\n   Key Value     agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in Apollo are key/value pairs as well, and the key is composited by configKey and subItemKey with ..\n   Key Value     configKey.subItemkey1 subItemValue1   configKey.subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config in Apollo is:\n   Key Value     core.default.endpoint-name-grouping-openapi.customerAPI-v1 value of customerAPI-v1   core.default.endpoint-name-grouping-openapi.productAPI-v1 value of productAPI-v1   core.default.endpoint-name-grouping-openapi.productAPI-v2 value of productAPI-v2    ","excerpt":"Dynamic Configuration Apollo Implementation Apollo is also supported as a Dynamic Configuration …","ref":"/docs/main/v9.7.0/en/setup/backend/dynamic-config-apollo/","title":"Dynamic Configuration Apollo Implementation"},{"body":"Dynamic Configuration Consul Implementation Consul is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:consul}consul:# Consul host and ports, separated by comma, e.g. 1.2.3.4:8500,2.3.4.5:8500hostAndPorts:${SW_CONFIG_CONSUL_HOST_AND_PORTS:1.2.3.4:8500}# Sync period in seconds. Defaults to 60 seconds.period:${SW_CONFIG_CONSUL_PERIOD:1}# Consul aclTokenaclToken:${SW_CONFIG_CONSUL_ACL_TOKEN:\u0026#34;\u0026#34;}Config Storage Single Config Single configs in Consul are key/value pairs:\n   Key Value     configKey configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in Consul is:\n   Key Value     agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in Consul are key/value pairs as well, but according to the level keys organized by /.\n   Key Value     configKey/subItemkey1 subItemValue1   configKey/subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    If we use Consul UI, we can see keys organized like a folder:\nconfigKey -- subItemkey1 -- subItemkey2 ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config in Consul is:\n   Key Value     core.default.endpoint-name-grouping-openapi/customerAPI-v1 value of customerAPI-v1   core.default.endpoint-name-grouping-openapi/productAPI-v1 value of productAPI-v1   core.default.endpoint-name-grouping-openapi/productAPI-v2 value of productAPI-v2    ","excerpt":"Dynamic Configuration Consul Implementation Consul is also supported as a Dynamic Configuration …","ref":"/docs/main/latest/en/setup/backend/dynamic-config-consul/","title":"Dynamic Configuration Consul Implementation"},{"body":"Dynamic Configuration Consul Implementation Consul is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:consul}consul:# Consul host and ports, separated by comma, e.g. 1.2.3.4:8500,2.3.4.5:8500hostAndPorts:${SW_CONFIG_CONSUL_HOST_AND_PORTS:1.2.3.4:8500}# Sync period in seconds. Defaults to 60 seconds.period:${SW_CONFIG_CONSUL_PERIOD:1}# Consul aclTokenaclToken:${SW_CONFIG_CONSUL_ACL_TOKEN:\u0026#34;\u0026#34;}Config Storage Single Config Single configs in Consul are key/value pairs:\n   Key Value     configKey configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in Consul is:\n   Key Value     agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in Consul are key/value pairs as well, but according to the level keys organized by /.\n   Key Value     configKey/subItemkey1 subItemValue1   configKey/subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    If we use Consul UI, we can see keys organized like a folder:\nconfigKey -- subItemkey1 -- subItemkey2 ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config in Consul is:\n   Key Value     core.default.endpoint-name-grouping-openapi/customerAPI-v1 value of customerAPI-v1   core.default.endpoint-name-grouping-openapi/productAPI-v1 value of productAPI-v1   core.default.endpoint-name-grouping-openapi/productAPI-v2 value of productAPI-v2    ","excerpt":"Dynamic Configuration Consul Implementation Consul is also supported as a Dynamic Configuration …","ref":"/docs/main/next/en/setup/backend/dynamic-config-consul/","title":"Dynamic Configuration Consul Implementation"},{"body":"Dynamic Configuration Consul Implementation Consul is also supported as Dynamic Configuration Center (DCC). To use it, please configure as follows:\nconfiguration:selector:${SW_CONFIGURATION:consul}consul:# Consul host and ports, separated by comma, e.g. 1.2.3.4:8500,2.3.4.5:8500hostAndPorts:${SW_CONFIG_CONSUL_HOST_AND_PORTS:1.2.3.4:8500}# Sync period in seconds. Defaults to 60 seconds.period:${SW_CONFIG_CONSUL_PERIOD:1}# Consul aclTokenaclToken:${SW_CONFIG_CONSUL_ACL_TOKEN:\u0026#34;\u0026#34;}Config Storage Single Config Single configs in Consul are key/value pairs:\n   Key Value     configKey configVaule    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in Consul is:\n   Key Value     agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in Consul are key/value pairs as well, but according to the level keys organized by /, see: https://www.consul.io/docs/dynamic-app-config/kv#using-consul-kv\n   Key Value     configKey/subItemkey1 subItemValue1   configKey/subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    If use Consul UI we can see keys organized like folder:\nconfigKey -- subItemkey1 -- subItemkey2 ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config in Consul is:\n   Key Value     core.default.endpoint-name-grouping-openapi/customerAPI-v1 value of customerAPI-v1   core.default.endpoint-name-grouping-openapi/productAPI-v1 value of productAPI-v1   core.default.endpoint-name-grouping-openapi/productAPI-v2 value of productAPI-v2    ","excerpt":"Dynamic Configuration Consul Implementation Consul is also supported as Dynamic Configuration Center …","ref":"/docs/main/v9.0.0/en/setup/backend/dynamic-config-consul/","title":"Dynamic Configuration Consul Implementation"},{"body":"Dynamic Configuration Consul Implementation Consul is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:consul}consul:# Consul host and ports, separated by comma, e.g. 1.2.3.4:8500,2.3.4.5:8500hostAndPorts:${SW_CONFIG_CONSUL_HOST_AND_PORTS:1.2.3.4:8500}# Sync period in seconds. Defaults to 60 seconds.period:${SW_CONFIG_CONSUL_PERIOD:1}# Consul aclTokenaclToken:${SW_CONFIG_CONSUL_ACL_TOKEN:\u0026#34;\u0026#34;}Config Storage Single Config Single configs in Consul are key/value pairs:\n   Key Value     configKey configVaule    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in Consul is:\n   Key Value     agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in Consul are key/value pairs as well, but according to the level keys organized by /, see: https://www.consul.io/docs/dynamic-app-config/kv#using-consul-kv\n   Key Value     configKey/subItemkey1 subItemValue1   configKey/subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    If we use Consul UI, we can see keys organized like a folder:\nconfigKey -- subItemkey1 -- subItemkey2 ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config in Consul is:\n   Key Value     core.default.endpoint-name-grouping-openapi/customerAPI-v1 value of customerAPI-v1   core.default.endpoint-name-grouping-openapi/productAPI-v1 value of productAPI-v1   core.default.endpoint-name-grouping-openapi/productAPI-v2 value of productAPI-v2    ","excerpt":"Dynamic Configuration Consul Implementation Consul is also supported as a Dynamic Configuration …","ref":"/docs/main/v9.1.0/en/setup/backend/dynamic-config-consul/","title":"Dynamic Configuration Consul Implementation"},{"body":"Dynamic Configuration Consul Implementation Consul is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:consul}consul:# Consul host and ports, separated by comma, e.g. 1.2.3.4:8500,2.3.4.5:8500hostAndPorts:${SW_CONFIG_CONSUL_HOST_AND_PORTS:1.2.3.4:8500}# Sync period in seconds. Defaults to 60 seconds.period:${SW_CONFIG_CONSUL_PERIOD:1}# Consul aclTokenaclToken:${SW_CONFIG_CONSUL_ACL_TOKEN:\u0026#34;\u0026#34;}Config Storage Single Config Single configs in Consul are key/value pairs:\n   Key Value     configKey configVaule    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in Consul is:\n   Key Value     agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in Consul are key/value pairs as well, but according to the level keys organized by /, see: https://www.consul.io/docs/dynamic-app-config/kv#using-consul-kv\n   Key Value     configKey/subItemkey1 subItemValue1   configKey/subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    If we use Consul UI, we can see keys organized like a folder:\nconfigKey -- subItemkey1 -- subItemkey2 ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config in Consul is:\n   Key Value     core.default.endpoint-name-grouping-openapi/customerAPI-v1 value of customerAPI-v1   core.default.endpoint-name-grouping-openapi/productAPI-v1 value of productAPI-v1   core.default.endpoint-name-grouping-openapi/productAPI-v2 value of productAPI-v2    ","excerpt":"Dynamic Configuration Consul Implementation Consul is also supported as a Dynamic Configuration …","ref":"/docs/main/v9.2.0/en/setup/backend/dynamic-config-consul/","title":"Dynamic Configuration Consul Implementation"},{"body":"Dynamic Configuration Consul Implementation Consul is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:consul}consul:# Consul host and ports, separated by comma, e.g. 1.2.3.4:8500,2.3.4.5:8500hostAndPorts:${SW_CONFIG_CONSUL_HOST_AND_PORTS:1.2.3.4:8500}# Sync period in seconds. Defaults to 60 seconds.period:${SW_CONFIG_CONSUL_PERIOD:1}# Consul aclTokenaclToken:${SW_CONFIG_CONSUL_ACL_TOKEN:\u0026#34;\u0026#34;}Config Storage Single Config Single configs in Consul are key/value pairs:\n   Key Value     configKey configVaule    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in Consul is:\n   Key Value     agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in Consul are key/value pairs as well, but according to the level keys organized by /.\n   Key Value     configKey/subItemkey1 subItemValue1   configKey/subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    If we use Consul UI, we can see keys organized like a folder:\nconfigKey -- subItemkey1 -- subItemkey2 ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config in Consul is:\n   Key Value     core.default.endpoint-name-grouping-openapi/customerAPI-v1 value of customerAPI-v1   core.default.endpoint-name-grouping-openapi/productAPI-v1 value of productAPI-v1   core.default.endpoint-name-grouping-openapi/productAPI-v2 value of productAPI-v2    ","excerpt":"Dynamic Configuration Consul Implementation Consul is also supported as a Dynamic Configuration …","ref":"/docs/main/v9.3.0/en/setup/backend/dynamic-config-consul/","title":"Dynamic Configuration Consul Implementation"},{"body":"Dynamic Configuration Consul Implementation Consul is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:consul}consul:# Consul host and ports, separated by comma, e.g. 1.2.3.4:8500,2.3.4.5:8500hostAndPorts:${SW_CONFIG_CONSUL_HOST_AND_PORTS:1.2.3.4:8500}# Sync period in seconds. Defaults to 60 seconds.period:${SW_CONFIG_CONSUL_PERIOD:1}# Consul aclTokenaclToken:${SW_CONFIG_CONSUL_ACL_TOKEN:\u0026#34;\u0026#34;}Config Storage Single Config Single configs in Consul are key/value pairs:\n   Key Value     configKey configVaule    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in Consul is:\n   Key Value     agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in Consul are key/value pairs as well, but according to the level keys organized by /.\n   Key Value     configKey/subItemkey1 subItemValue1   configKey/subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    If we use Consul UI, we can see keys organized like a folder:\nconfigKey -- subItemkey1 -- subItemkey2 ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config in Consul is:\n   Key Value     core.default.endpoint-name-grouping-openapi/customerAPI-v1 value of customerAPI-v1   core.default.endpoint-name-grouping-openapi/productAPI-v1 value of productAPI-v1   core.default.endpoint-name-grouping-openapi/productAPI-v2 value of productAPI-v2    ","excerpt":"Dynamic Configuration Consul Implementation Consul is also supported as a Dynamic Configuration …","ref":"/docs/main/v9.4.0/en/setup/backend/dynamic-config-consul/","title":"Dynamic Configuration Consul Implementation"},{"body":"Dynamic Configuration Consul Implementation Consul is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:consul}consul:# Consul host and ports, separated by comma, e.g. 1.2.3.4:8500,2.3.4.5:8500hostAndPorts:${SW_CONFIG_CONSUL_HOST_AND_PORTS:1.2.3.4:8500}# Sync period in seconds. Defaults to 60 seconds.period:${SW_CONFIG_CONSUL_PERIOD:1}# Consul aclTokenaclToken:${SW_CONFIG_CONSUL_ACL_TOKEN:\u0026#34;\u0026#34;}Config Storage Single Config Single configs in Consul are key/value pairs:\n   Key Value     configKey configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in Consul is:\n   Key Value     agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in Consul are key/value pairs as well, but according to the level keys organized by /.\n   Key Value     configKey/subItemkey1 subItemValue1   configKey/subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    If we use Consul UI, we can see keys organized like a folder:\nconfigKey -- subItemkey1 -- subItemkey2 ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config in Consul is:\n   Key Value     core.default.endpoint-name-grouping-openapi/customerAPI-v1 value of customerAPI-v1   core.default.endpoint-name-grouping-openapi/productAPI-v1 value of productAPI-v1   core.default.endpoint-name-grouping-openapi/productAPI-v2 value of productAPI-v2    ","excerpt":"Dynamic Configuration Consul Implementation Consul is also supported as a Dynamic Configuration …","ref":"/docs/main/v9.5.0/en/setup/backend/dynamic-config-consul/","title":"Dynamic Configuration Consul Implementation"},{"body":"Dynamic Configuration Consul Implementation Consul is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:consul}consul:# Consul host and ports, separated by comma, e.g. 1.2.3.4:8500,2.3.4.5:8500hostAndPorts:${SW_CONFIG_CONSUL_HOST_AND_PORTS:1.2.3.4:8500}# Sync period in seconds. Defaults to 60 seconds.period:${SW_CONFIG_CONSUL_PERIOD:1}# Consul aclTokenaclToken:${SW_CONFIG_CONSUL_ACL_TOKEN:\u0026#34;\u0026#34;}Config Storage Single Config Single configs in Consul are key/value pairs:\n   Key Value     configKey configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in Consul is:\n   Key Value     agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in Consul are key/value pairs as well, but according to the level keys organized by /.\n   Key Value     configKey/subItemkey1 subItemValue1   configKey/subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    If we use Consul UI, we can see keys organized like a folder:\nconfigKey -- subItemkey1 -- subItemkey2 ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config in Consul is:\n   Key Value     core.default.endpoint-name-grouping-openapi/customerAPI-v1 value of customerAPI-v1   core.default.endpoint-name-grouping-openapi/productAPI-v1 value of productAPI-v1   core.default.endpoint-name-grouping-openapi/productAPI-v2 value of productAPI-v2    ","excerpt":"Dynamic Configuration Consul Implementation Consul is also supported as a Dynamic Configuration …","ref":"/docs/main/v9.6.0/en/setup/backend/dynamic-config-consul/","title":"Dynamic Configuration Consul Implementation"},{"body":"Dynamic Configuration Consul Implementation Consul is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:consul}consul:# Consul host and ports, separated by comma, e.g. 1.2.3.4:8500,2.3.4.5:8500hostAndPorts:${SW_CONFIG_CONSUL_HOST_AND_PORTS:1.2.3.4:8500}# Sync period in seconds. Defaults to 60 seconds.period:${SW_CONFIG_CONSUL_PERIOD:1}# Consul aclTokenaclToken:${SW_CONFIG_CONSUL_ACL_TOKEN:\u0026#34;\u0026#34;}Config Storage Single Config Single configs in Consul are key/value pairs:\n   Key Value     configKey configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in Consul is:\n   Key Value     agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in Consul are key/value pairs as well, but according to the level keys organized by /.\n   Key Value     configKey/subItemkey1 subItemValue1   configKey/subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    If we use Consul UI, we can see keys organized like a folder:\nconfigKey -- subItemkey1 -- subItemkey2 ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config in Consul is:\n   Key Value     core.default.endpoint-name-grouping-openapi/customerAPI-v1 value of customerAPI-v1   core.default.endpoint-name-grouping-openapi/productAPI-v1 value of productAPI-v1   core.default.endpoint-name-grouping-openapi/productAPI-v2 value of productAPI-v2    ","excerpt":"Dynamic Configuration Consul Implementation Consul is also supported as a Dynamic Configuration …","ref":"/docs/main/v9.7.0/en/setup/backend/dynamic-config-consul/","title":"Dynamic Configuration Consul Implementation"},{"body":"Dynamic Configuration Etcd Implementation Etcd is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:etcd}etcd:period:${SW_CONFIG_ETCD_PERIOD:60}# Unit seconds, sync period. Default fetch every 60 seconds.endpoints:${SW_CONFIG_ETCD_ENDPOINTS:http://localhost:2379}namespace:${SW_CONFIG_ETCD_NAMESPACE:/skywalking}authentication:${SW_CONFIG_ETCD_AUTHENTICATION:false}user:${SW_CONFIG_ETCD_USER:}password:${SW_CONFIG_ETCD_password:}NOTE: Since 8.7.0, only the v3 protocol is supported.\nConfig Storage Single Config Single configs in etcd are key/value pairs:\n   Key Value     {namespace}/configKey configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If namespace = /skywalking the config in etcd is:\n   Key Value     /skywalking/agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in etcd are key/value pairs as well, and the key is composited by configKey and subItemKey with /.\n   Key Value     {namespace}/configKey/subItemkey1 subItemValue1   {namespace}/configKey/subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If namespace = /skywalking the config in etcd is:\n   Key Value     /skywalking/core.default.endpoint-name-grouping-openapi/customerAPI-v1 value of customerAPI-v1   /skywalking/core.default.endpoint-name-grouping-openapi/productAPI-v1 value of productAPI-v1   /skywalking/core.default.endpoint-name-grouping-openapi/productAPI-v2 value of productAPI-v2    ","excerpt":"Dynamic Configuration Etcd Implementation Etcd is also supported as a Dynamic Configuration Center …","ref":"/docs/main/latest/en/setup/backend/dynamic-config-etcd/","title":"Dynamic Configuration Etcd Implementation"},{"body":"Dynamic Configuration Etcd Implementation Etcd is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:etcd}etcd:period:${SW_CONFIG_ETCD_PERIOD:60}# Unit seconds, sync period. Default fetch every 60 seconds.endpoints:${SW_CONFIG_ETCD_ENDPOINTS:http://localhost:2379}namespace:${SW_CONFIG_ETCD_NAMESPACE:/skywalking}authentication:${SW_CONFIG_ETCD_AUTHENTICATION:false}user:${SW_CONFIG_ETCD_USER:}password:${SW_CONFIG_ETCD_password:}NOTE: Since 8.7.0, only the v3 protocol is supported.\nConfig Storage Single Config Single configs in etcd are key/value pairs:\n   Key Value     {namespace}/configKey configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If namespace = /skywalking the config in etcd is:\n   Key Value     /skywalking/agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in etcd are key/value pairs as well, and the key is composited by configKey and subItemKey with /.\n   Key Value     {namespace}/configKey/subItemkey1 subItemValue1   {namespace}/configKey/subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If namespace = /skywalking the config in etcd is:\n   Key Value     /skywalking/core.default.endpoint-name-grouping-openapi/customerAPI-v1 value of customerAPI-v1   /skywalking/core.default.endpoint-name-grouping-openapi/productAPI-v1 value of productAPI-v1   /skywalking/core.default.endpoint-name-grouping-openapi/productAPI-v2 value of productAPI-v2    ","excerpt":"Dynamic Configuration Etcd Implementation Etcd is also supported as a Dynamic Configuration Center …","ref":"/docs/main/next/en/setup/backend/dynamic-config-etcd/","title":"Dynamic Configuration Etcd Implementation"},{"body":"Dynamic Configuration Etcd Implementation Etcd is also supported as Dynamic Configuration Center (DCC). To use it, please configure as follows:\nconfiguration:selector:${SW_CONFIGURATION:etcd}etcd:period:${SW_CONFIG_ETCD_PERIOD:60}# Unit seconds, sync period. Default fetch every 60 seconds.endpoints:${SW_CONFIG_ETCD_ENDPOINTS:http://localhost:2379}namespace:${SW_CONFIG_ETCD_NAMESPACE:/skywalking}authentication:${SW_CONFIG_ETCD_AUTHENTICATION:false}user:${SW_CONFIG_ETCD_USER:}password:${SW_CONFIG_ETCD_password:}NOTE: Only the v3 protocol is supported since 8.7.0.\nConfig Storage Single Config Single configs in etcd are key/value pairs:\n   Key Value     {namespace}/configKey configVaule    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If namespace = /skywalking the config in etcd is:\n   Key Value     /skywalking/agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in etcd are key/value pairs as well and the key is composited by configKey and subItemKey with /.\n   Key Value     {namespace}/configKey/subItemkey1 subItemValue1   {namespace}/configKey/subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If namespace = /skywalking the config in etcd is:\n   Key Value     /skywalking/core.default.endpoint-name-grouping-openapi/customerAPI-v1 value of customerAPI-v1   /skywalking/core.default.endpoint-name-grouping-openapi/productAPI-v1 value of productAPI-v1   /skywalking/core.default.endpoint-name-grouping-openapi/productAPI-v2 value of productAPI-v2    ","excerpt":"Dynamic Configuration Etcd Implementation Etcd is also supported as Dynamic Configuration Center …","ref":"/docs/main/v9.0.0/en/setup/backend/dynamic-config-etcd/","title":"Dynamic Configuration Etcd Implementation"},{"body":"Dynamic Configuration Etcd Implementation Etcd is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:etcd}etcd:period:${SW_CONFIG_ETCD_PERIOD:60}# Unit seconds, sync period. Default fetch every 60 seconds.endpoints:${SW_CONFIG_ETCD_ENDPOINTS:http://localhost:2379}namespace:${SW_CONFIG_ETCD_NAMESPACE:/skywalking}authentication:${SW_CONFIG_ETCD_AUTHENTICATION:false}user:${SW_CONFIG_ETCD_USER:}password:${SW_CONFIG_ETCD_password:}NOTE: Since 8.7.0, only the v3 protocol is supported.\nConfig Storage Single Config Single configs in etcd are key/value pairs:\n   Key Value     {namespace}/configKey configVaule    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If namespace = /skywalking the config in etcd is:\n   Key Value     /skywalking/agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in etcd are key/value pairs as well, and the key is composited by configKey and subItemKey with /.\n   Key Value     {namespace}/configKey/subItemkey1 subItemValue1   {namespace}/configKey/subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If namespace = /skywalking the config in etcd is:\n   Key Value     /skywalking/core.default.endpoint-name-grouping-openapi/customerAPI-v1 value of customerAPI-v1   /skywalking/core.default.endpoint-name-grouping-openapi/productAPI-v1 value of productAPI-v1   /skywalking/core.default.endpoint-name-grouping-openapi/productAPI-v2 value of productAPI-v2    ","excerpt":"Dynamic Configuration Etcd Implementation Etcd is also supported as a Dynamic Configuration Center …","ref":"/docs/main/v9.1.0/en/setup/backend/dynamic-config-etcd/","title":"Dynamic Configuration Etcd Implementation"},{"body":"Dynamic Configuration Etcd Implementation Etcd is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:etcd}etcd:period:${SW_CONFIG_ETCD_PERIOD:60}# Unit seconds, sync period. Default fetch every 60 seconds.endpoints:${SW_CONFIG_ETCD_ENDPOINTS:http://localhost:2379}namespace:${SW_CONFIG_ETCD_NAMESPACE:/skywalking}authentication:${SW_CONFIG_ETCD_AUTHENTICATION:false}user:${SW_CONFIG_ETCD_USER:}password:${SW_CONFIG_ETCD_password:}NOTE: Since 8.7.0, only the v3 protocol is supported.\nConfig Storage Single Config Single configs in etcd are key/value pairs:\n   Key Value     {namespace}/configKey configVaule    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If namespace = /skywalking the config in etcd is:\n   Key Value     /skywalking/agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in etcd are key/value pairs as well, and the key is composited by configKey and subItemKey with /.\n   Key Value     {namespace}/configKey/subItemkey1 subItemValue1   {namespace}/configKey/subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If namespace = /skywalking the config in etcd is:\n   Key Value     /skywalking/core.default.endpoint-name-grouping-openapi/customerAPI-v1 value of customerAPI-v1   /skywalking/core.default.endpoint-name-grouping-openapi/productAPI-v1 value of productAPI-v1   /skywalking/core.default.endpoint-name-grouping-openapi/productAPI-v2 value of productAPI-v2    ","excerpt":"Dynamic Configuration Etcd Implementation Etcd is also supported as a Dynamic Configuration Center …","ref":"/docs/main/v9.2.0/en/setup/backend/dynamic-config-etcd/","title":"Dynamic Configuration Etcd Implementation"},{"body":"Dynamic Configuration Etcd Implementation Etcd is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:etcd}etcd:period:${SW_CONFIG_ETCD_PERIOD:60}# Unit seconds, sync period. Default fetch every 60 seconds.endpoints:${SW_CONFIG_ETCD_ENDPOINTS:http://localhost:2379}namespace:${SW_CONFIG_ETCD_NAMESPACE:/skywalking}authentication:${SW_CONFIG_ETCD_AUTHENTICATION:false}user:${SW_CONFIG_ETCD_USER:}password:${SW_CONFIG_ETCD_password:}NOTE: Since 8.7.0, only the v3 protocol is supported.\nConfig Storage Single Config Single configs in etcd are key/value pairs:\n   Key Value     {namespace}/configKey configVaule    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If namespace = /skywalking the config in etcd is:\n   Key Value     /skywalking/agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in etcd are key/value pairs as well, and the key is composited by configKey and subItemKey with /.\n   Key Value     {namespace}/configKey/subItemkey1 subItemValue1   {namespace}/configKey/subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If namespace = /skywalking the config in etcd is:\n   Key Value     /skywalking/core.default.endpoint-name-grouping-openapi/customerAPI-v1 value of customerAPI-v1   /skywalking/core.default.endpoint-name-grouping-openapi/productAPI-v1 value of productAPI-v1   /skywalking/core.default.endpoint-name-grouping-openapi/productAPI-v2 value of productAPI-v2    ","excerpt":"Dynamic Configuration Etcd Implementation Etcd is also supported as a Dynamic Configuration Center …","ref":"/docs/main/v9.3.0/en/setup/backend/dynamic-config-etcd/","title":"Dynamic Configuration Etcd Implementation"},{"body":"Dynamic Configuration Etcd Implementation Etcd is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:etcd}etcd:period:${SW_CONFIG_ETCD_PERIOD:60}# Unit seconds, sync period. Default fetch every 60 seconds.endpoints:${SW_CONFIG_ETCD_ENDPOINTS:http://localhost:2379}namespace:${SW_CONFIG_ETCD_NAMESPACE:/skywalking}authentication:${SW_CONFIG_ETCD_AUTHENTICATION:false}user:${SW_CONFIG_ETCD_USER:}password:${SW_CONFIG_ETCD_password:}NOTE: Since 8.7.0, only the v3 protocol is supported.\nConfig Storage Single Config Single configs in etcd are key/value pairs:\n   Key Value     {namespace}/configKey configVaule    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If namespace = /skywalking the config in etcd is:\n   Key Value     /skywalking/agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in etcd are key/value pairs as well, and the key is composited by configKey and subItemKey with /.\n   Key Value     {namespace}/configKey/subItemkey1 subItemValue1   {namespace}/configKey/subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If namespace = /skywalking the config in etcd is:\n   Key Value     /skywalking/core.default.endpoint-name-grouping-openapi/customerAPI-v1 value of customerAPI-v1   /skywalking/core.default.endpoint-name-grouping-openapi/productAPI-v1 value of productAPI-v1   /skywalking/core.default.endpoint-name-grouping-openapi/productAPI-v2 value of productAPI-v2    ","excerpt":"Dynamic Configuration Etcd Implementation Etcd is also supported as a Dynamic Configuration Center …","ref":"/docs/main/v9.4.0/en/setup/backend/dynamic-config-etcd/","title":"Dynamic Configuration Etcd Implementation"},{"body":"Dynamic Configuration Etcd Implementation Etcd is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:etcd}etcd:period:${SW_CONFIG_ETCD_PERIOD:60}# Unit seconds, sync period. Default fetch every 60 seconds.endpoints:${SW_CONFIG_ETCD_ENDPOINTS:http://localhost:2379}namespace:${SW_CONFIG_ETCD_NAMESPACE:/skywalking}authentication:${SW_CONFIG_ETCD_AUTHENTICATION:false}user:${SW_CONFIG_ETCD_USER:}password:${SW_CONFIG_ETCD_password:}NOTE: Since 8.7.0, only the v3 protocol is supported.\nConfig Storage Single Config Single configs in etcd are key/value pairs:\n   Key Value     {namespace}/configKey configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If namespace = /skywalking the config in etcd is:\n   Key Value     /skywalking/agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in etcd are key/value pairs as well, and the key is composited by configKey and subItemKey with /.\n   Key Value     {namespace}/configKey/subItemkey1 subItemValue1   {namespace}/configKey/subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If namespace = /skywalking the config in etcd is:\n   Key Value     /skywalking/core.default.endpoint-name-grouping-openapi/customerAPI-v1 value of customerAPI-v1   /skywalking/core.default.endpoint-name-grouping-openapi/productAPI-v1 value of productAPI-v1   /skywalking/core.default.endpoint-name-grouping-openapi/productAPI-v2 value of productAPI-v2    ","excerpt":"Dynamic Configuration Etcd Implementation Etcd is also supported as a Dynamic Configuration Center …","ref":"/docs/main/v9.5.0/en/setup/backend/dynamic-config-etcd/","title":"Dynamic Configuration Etcd Implementation"},{"body":"Dynamic Configuration Etcd Implementation Etcd is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:etcd}etcd:period:${SW_CONFIG_ETCD_PERIOD:60}# Unit seconds, sync period. Default fetch every 60 seconds.endpoints:${SW_CONFIG_ETCD_ENDPOINTS:http://localhost:2379}namespace:${SW_CONFIG_ETCD_NAMESPACE:/skywalking}authentication:${SW_CONFIG_ETCD_AUTHENTICATION:false}user:${SW_CONFIG_ETCD_USER:}password:${SW_CONFIG_ETCD_password:}NOTE: Since 8.7.0, only the v3 protocol is supported.\nConfig Storage Single Config Single configs in etcd are key/value pairs:\n   Key Value     {namespace}/configKey configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If namespace = /skywalking the config in etcd is:\n   Key Value     /skywalking/agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in etcd are key/value pairs as well, and the key is composited by configKey and subItemKey with /.\n   Key Value     {namespace}/configKey/subItemkey1 subItemValue1   {namespace}/configKey/subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If namespace = /skywalking the config in etcd is:\n   Key Value     /skywalking/core.default.endpoint-name-grouping-openapi/customerAPI-v1 value of customerAPI-v1   /skywalking/core.default.endpoint-name-grouping-openapi/productAPI-v1 value of productAPI-v1   /skywalking/core.default.endpoint-name-grouping-openapi/productAPI-v2 value of productAPI-v2    ","excerpt":"Dynamic Configuration Etcd Implementation Etcd is also supported as a Dynamic Configuration Center …","ref":"/docs/main/v9.6.0/en/setup/backend/dynamic-config-etcd/","title":"Dynamic Configuration Etcd Implementation"},{"body":"Dynamic Configuration Etcd Implementation Etcd is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:etcd}etcd:period:${SW_CONFIG_ETCD_PERIOD:60}# Unit seconds, sync period. Default fetch every 60 seconds.endpoints:${SW_CONFIG_ETCD_ENDPOINTS:http://localhost:2379}namespace:${SW_CONFIG_ETCD_NAMESPACE:/skywalking}authentication:${SW_CONFIG_ETCD_AUTHENTICATION:false}user:${SW_CONFIG_ETCD_USER:}password:${SW_CONFIG_ETCD_password:}NOTE: Since 8.7.0, only the v3 protocol is supported.\nConfig Storage Single Config Single configs in etcd are key/value pairs:\n   Key Value     {namespace}/configKey configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If namespace = /skywalking the config in etcd is:\n   Key Value     /skywalking/agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in etcd are key/value pairs as well, and the key is composited by configKey and subItemKey with /.\n   Key Value     {namespace}/configKey/subItemkey1 subItemValue1   {namespace}/configKey/subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If namespace = /skywalking the config in etcd is:\n   Key Value     /skywalking/core.default.endpoint-name-grouping-openapi/customerAPI-v1 value of customerAPI-v1   /skywalking/core.default.endpoint-name-grouping-openapi/productAPI-v1 value of productAPI-v1   /skywalking/core.default.endpoint-name-grouping-openapi/productAPI-v2 value of productAPI-v2    ","excerpt":"Dynamic Configuration Etcd Implementation Etcd is also supported as a Dynamic Configuration Center …","ref":"/docs/main/v9.7.0/en/setup/backend/dynamic-config-etcd/","title":"Dynamic Configuration Etcd Implementation"},{"body":"Dynamic Configuration Kuberbetes Configmap Implementation configmap is also supported as Dynamic Configuration Center (DCC). To use it, please configure as follows:\nconfiguration:selector:${SW_CONFIGURATION:k8s-configmap}# [example] (../../../../oap-server/server-configuration/configuration-k8s-configmap/src/test/resources/skywalking-dynamic-configmap.example.yaml)k8s-configmap:# Sync period in seconds. Defaults to 60 seconds.period:${SW_CONFIG_CONFIGMAP_PERIOD:60}# Which namespace is configmap deployed in.namespace:${SW_CLUSTER_K8S_NAMESPACE:default}# Labelselector is used to locate specific configmaplabelSelector:${SW_CLUSTER_K8S_LABEL:app=collector,release=skywalking}{namespace} is the k8s namespace to which the configmap belongs. {labelSelector} is used to identify which configmaps would be selected.\ne.g. These 2 configmaps would be selected by the above config:\napiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: configKey1: configValue1 configKey2: configValue2 ... apiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config2 namespace: default labels: app: collector release: skywalking data: configKey3: configValue3 ... Config Storage The configs is configmap data items as the above example shows. we can organize the configs in 1 or more configmap files.\nSingle Config Under configmap.data:\n configKey: configValue e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in configmap is:\napiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: agent-analyzer.default.slowDBAccessThreshold: default:200,mongodb:50 Group Config The data key is composited by configKey and subItemKey to identify it is a group config:\nconfigKey.subItemKey1: subItemValue1 configKey.subItemKey2: subItemValue2 ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config can separate into 2 configmaps is:\napiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: core.default.endpoint-name-grouping-openapi.customerAPI-v1: value of customerAPI-v1 core.default.endpoint-name-grouping-openapi.productAPI-v1: value of productAPI-v1 apiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config2 namespace: default labels: app: collector release: skywalking data: core.default.endpoint-name-grouping-openapi.productAPI-v2: value of productAPI-v2 ","excerpt":"Dynamic Configuration Kuberbetes Configmap Implementation configmap is also supported as Dynamic …","ref":"/docs/main/v9.0.0/en/setup/backend/dynamic-config-configmap/","title":"Dynamic Configuration Kuberbetes Configmap Implementation"},{"body":"Dynamic Configuration Kuberbetes Configmap Implementation configmap is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:k8s-configmap}# [example] (../../../../oap-server/server-configuration/configuration-k8s-configmap/src/test/resources/skywalking-dynamic-configmap.example.yaml)k8s-configmap:# Sync period in seconds. Defaults to 60 seconds.period:${SW_CONFIG_CONFIGMAP_PERIOD:60}# Which namespace is configmap deployed in.namespace:${SW_CLUSTER_K8S_NAMESPACE:default}# Labelselector is used to locate specific configmaplabelSelector:${SW_CLUSTER_K8S_LABEL:app=collector,release=skywalking}{namespace} is the k8s namespace to which the configmap belongs. {labelSelector} is used to identify which configmaps would be selected.\ne.g. These 2 configmaps would be selected by the above config:\napiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: configKey1: configValue1 configKey2: configValue2 ... apiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config2 namespace: default labels: app: collector release: skywalking data: configKey3: configValue3 ... Config Storage The configs are configmap data items, as the above example shows. we can organize the configs in 1 or more configmap files.\nSingle Config Under configmap.data:\n configKey: configValue e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in configmap is:\napiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: agent-analyzer.default.slowDBAccessThreshold: default:200,mongodb:50 Group Config The data key is composited by configKey and subItemKey to identify it is a group config:\nconfigKey.subItemKey1: subItemValue1 configKey.subItemKey2: subItemValue2 ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config can separate into 2 configmaps is:\napiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: core.default.endpoint-name-grouping-openapi.customerAPI-v1: value of customerAPI-v1 core.default.endpoint-name-grouping-openapi.productAPI-v1: value of productAPI-v1 apiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config2 namespace: default labels: app: collector release: skywalking data: core.default.endpoint-name-grouping-openapi.productAPI-v2: value of productAPI-v2 ","excerpt":"Dynamic Configuration Kuberbetes Configmap Implementation configmap is also supported as a Dynamic …","ref":"/docs/main/v9.1.0/en/setup/backend/dynamic-config-configmap/","title":"Dynamic Configuration Kuberbetes Configmap Implementation"},{"body":"Dynamic Configuration Kubernetes Configmap Implementation configmap is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:k8s-configmap}# [example] (../../../../oap-server/server-configuration/configuration-k8s-configmap/src/test/resources/skywalking-dynamic-configmap.example.yaml)k8s-configmap:# Sync period in seconds. Defaults to 60 seconds.period:${SW_CONFIG_CONFIGMAP_PERIOD:60}# Which namespace is configmap deployed in.namespace:${SW_CLUSTER_K8S_NAMESPACE:default}# Labelselector is used to locate specific configmaplabelSelector:${SW_CLUSTER_K8S_LABEL:app=collector,release=skywalking}{namespace} is the k8s namespace to which the configmap belongs. {labelSelector} is used to identify which configmaps would be selected.\ne.g. These 2 configmaps would be selected by the above config:\napiVersion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: configKey1: configValue1 configKey2: configValue2 ... apiVersion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config2 namespace: default labels: app: collector release: skywalking data: configKey3: configValue3 ... Config Storage The configs are configmap data items, as the above example shows. we can organize the configs in 1 or more configmap files.\nSingle Config Under configmap.data:\n configKey: configValue e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in configmap is:\napiVersion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: agent-analyzer.default.slowDBAccessThreshold: default:200,mongodb:50 Group Config The data key is composited by configKey and subItemKey to identify it is a group config:\nconfigKey.subItemKey1: subItemValue1 configKey.subItemKey2: subItemValue2 ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config can separate into 2 configmaps is:\napiVersion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: core.default.endpoint-name-grouping-openapi.customerAPI-v1: value of customerAPI-v1 core.default.endpoint-name-grouping-openapi.productAPI-v1: value of productAPI-v1 apiVersion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config2 namespace: default labels: app: collector release: skywalking data: core.default.endpoint-name-grouping-openapi.productAPI-v2: value of productAPI-v2 ","excerpt":"Dynamic Configuration Kubernetes Configmap Implementation configmap is also supported as a Dynamic …","ref":"/docs/main/latest/en/setup/backend/dynamic-config-configmap/","title":"Dynamic Configuration Kubernetes Configmap Implementation"},{"body":"Dynamic Configuration Kubernetes Configmap Implementation configmap is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:k8s-configmap}# [example] (../../../../oap-server/server-configuration/configuration-k8s-configmap/src/test/resources/skywalking-dynamic-configmap.example.yaml)k8s-configmap:# Sync period in seconds. Defaults to 60 seconds.period:${SW_CONFIG_CONFIGMAP_PERIOD:60}# Which namespace is configmap deployed in.namespace:${SW_CLUSTER_K8S_NAMESPACE:default}# Labelselector is used to locate specific configmaplabelSelector:${SW_CLUSTER_K8S_LABEL:app=collector,release=skywalking}{namespace} is the k8s namespace to which the configmap belongs. {labelSelector} is used to identify which configmaps would be selected.\ne.g. These 2 configmaps would be selected by the above config:\napiVersion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: configKey1: configValue1 configKey2: configValue2 ... apiVersion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config2 namespace: default labels: app: collector release: skywalking data: configKey3: configValue3 ... Config Storage The configs are configmap data items, as the above example shows. we can organize the configs in 1 or more configmap files.\nSingle Config Under configmap.data:\n configKey: configValue e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in configmap is:\napiVersion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: agent-analyzer.default.slowDBAccessThreshold: default:200,mongodb:50 Group Config The data key is composited by configKey and subItemKey to identify it is a group config:\nconfigKey.subItemKey1: subItemValue1 configKey.subItemKey2: subItemValue2 ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config can separate into 2 configmaps is:\napiVersion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: core.default.endpoint-name-grouping-openapi.customerAPI-v1: value of customerAPI-v1 core.default.endpoint-name-grouping-openapi.productAPI-v1: value of productAPI-v1 apiVersion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config2 namespace: default labels: app: collector release: skywalking data: core.default.endpoint-name-grouping-openapi.productAPI-v2: value of productAPI-v2 ","excerpt":"Dynamic Configuration Kubernetes Configmap Implementation configmap is also supported as a Dynamic …","ref":"/docs/main/next/en/setup/backend/dynamic-config-configmap/","title":"Dynamic Configuration Kubernetes Configmap Implementation"},{"body":"Dynamic Configuration Kubernetes Configmap Implementation configmap is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:k8s-configmap}# [example] (../../../../oap-server/server-configuration/configuration-k8s-configmap/src/test/resources/skywalking-dynamic-configmap.example.yaml)k8s-configmap:# Sync period in seconds. Defaults to 60 seconds.period:${SW_CONFIG_CONFIGMAP_PERIOD:60}# Which namespace is configmap deployed in.namespace:${SW_CLUSTER_K8S_NAMESPACE:default}# Labelselector is used to locate specific configmaplabelSelector:${SW_CLUSTER_K8S_LABEL:app=collector,release=skywalking}{namespace} is the k8s namespace to which the configmap belongs. {labelSelector} is used to identify which configmaps would be selected.\ne.g. These 2 configmaps would be selected by the above config:\napiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: configKey1: configValue1 configKey2: configValue2 ... apiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config2 namespace: default labels: app: collector release: skywalking data: configKey3: configValue3 ... Config Storage The configs are configmap data items, as the above example shows. we can organize the configs in 1 or more configmap files.\nSingle Config Under configmap.data:\n configKey: configValue e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in configmap is:\napiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: agent-analyzer.default.slowDBAccessThreshold: default:200,mongodb:50 Group Config The data key is composited by configKey and subItemKey to identify it is a group config:\nconfigKey.subItemKey1: subItemValue1 configKey.subItemKey2: subItemValue2 ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config can separate into 2 configmaps is:\napiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: core.default.endpoint-name-grouping-openapi.customerAPI-v1: value of customerAPI-v1 core.default.endpoint-name-grouping-openapi.productAPI-v1: value of productAPI-v1 apiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config2 namespace: default labels: app: collector release: skywalking data: core.default.endpoint-name-grouping-openapi.productAPI-v2: value of productAPI-v2 ","excerpt":"Dynamic Configuration Kubernetes Configmap Implementation configmap is also supported as a Dynamic …","ref":"/docs/main/v9.2.0/en/setup/backend/dynamic-config-configmap/","title":"Dynamic Configuration Kubernetes Configmap Implementation"},{"body":"Dynamic Configuration Kubernetes Configmap Implementation configmap is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:k8s-configmap}# [example] (../../../../oap-server/server-configuration/configuration-k8s-configmap/src/test/resources/skywalking-dynamic-configmap.example.yaml)k8s-configmap:# Sync period in seconds. Defaults to 60 seconds.period:${SW_CONFIG_CONFIGMAP_PERIOD:60}# Which namespace is configmap deployed in.namespace:${SW_CLUSTER_K8S_NAMESPACE:default}# Labelselector is used to locate specific configmaplabelSelector:${SW_CLUSTER_K8S_LABEL:app=collector,release=skywalking}{namespace} is the k8s namespace to which the configmap belongs. {labelSelector} is used to identify which configmaps would be selected.\ne.g. These 2 configmaps would be selected by the above config:\napiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: configKey1: configValue1 configKey2: configValue2 ... apiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config2 namespace: default labels: app: collector release: skywalking data: configKey3: configValue3 ... Config Storage The configs are configmap data items, as the above example shows. we can organize the configs in 1 or more configmap files.\nSingle Config Under configmap.data:\n configKey: configValue e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in configmap is:\napiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: agent-analyzer.default.slowDBAccessThreshold: default:200,mongodb:50 Group Config The data key is composited by configKey and subItemKey to identify it is a group config:\nconfigKey.subItemKey1: subItemValue1 configKey.subItemKey2: subItemValue2 ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config can separate into 2 configmaps is:\napiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: core.default.endpoint-name-grouping-openapi.customerAPI-v1: value of customerAPI-v1 core.default.endpoint-name-grouping-openapi.productAPI-v1: value of productAPI-v1 apiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config2 namespace: default labels: app: collector release: skywalking data: core.default.endpoint-name-grouping-openapi.productAPI-v2: value of productAPI-v2 ","excerpt":"Dynamic Configuration Kubernetes Configmap Implementation configmap is also supported as a Dynamic …","ref":"/docs/main/v9.3.0/en/setup/backend/dynamic-config-configmap/","title":"Dynamic Configuration Kubernetes Configmap Implementation"},{"body":"Dynamic Configuration Kubernetes Configmap Implementation configmap is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:k8s-configmap}# [example] (../../../../oap-server/server-configuration/configuration-k8s-configmap/src/test/resources/skywalking-dynamic-configmap.example.yaml)k8s-configmap:# Sync period in seconds. Defaults to 60 seconds.period:${SW_CONFIG_CONFIGMAP_PERIOD:60}# Which namespace is configmap deployed in.namespace:${SW_CLUSTER_K8S_NAMESPACE:default}# Labelselector is used to locate specific configmaplabelSelector:${SW_CLUSTER_K8S_LABEL:app=collector,release=skywalking}{namespace} is the k8s namespace to which the configmap belongs. {labelSelector} is used to identify which configmaps would be selected.\ne.g. These 2 configmaps would be selected by the above config:\napiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: configKey1: configValue1 configKey2: configValue2 ... apiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config2 namespace: default labels: app: collector release: skywalking data: configKey3: configValue3 ... Config Storage The configs are configmap data items, as the above example shows. we can organize the configs in 1 or more configmap files.\nSingle Config Under configmap.data:\n configKey: configValue e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in configmap is:\napiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: agent-analyzer.default.slowDBAccessThreshold: default:200,mongodb:50 Group Config The data key is composited by configKey and subItemKey to identify it is a group config:\nconfigKey.subItemKey1: subItemValue1 configKey.subItemKey2: subItemValue2 ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config can separate into 2 configmaps is:\napiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: core.default.endpoint-name-grouping-openapi.customerAPI-v1: value of customerAPI-v1 core.default.endpoint-name-grouping-openapi.productAPI-v1: value of productAPI-v1 apiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config2 namespace: default labels: app: collector release: skywalking data: core.default.endpoint-name-grouping-openapi.productAPI-v2: value of productAPI-v2 ","excerpt":"Dynamic Configuration Kubernetes Configmap Implementation configmap is also supported as a Dynamic …","ref":"/docs/main/v9.4.0/en/setup/backend/dynamic-config-configmap/","title":"Dynamic Configuration Kubernetes Configmap Implementation"},{"body":"Dynamic Configuration Kubernetes Configmap Implementation configmap is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:k8s-configmap}# [example] (../../../../oap-server/server-configuration/configuration-k8s-configmap/src/test/resources/skywalking-dynamic-configmap.example.yaml)k8s-configmap:# Sync period in seconds. Defaults to 60 seconds.period:${SW_CONFIG_CONFIGMAP_PERIOD:60}# Which namespace is configmap deployed in.namespace:${SW_CLUSTER_K8S_NAMESPACE:default}# Labelselector is used to locate specific configmaplabelSelector:${SW_CLUSTER_K8S_LABEL:app=collector,release=skywalking}{namespace} is the k8s namespace to which the configmap belongs. {labelSelector} is used to identify which configmaps would be selected.\ne.g. These 2 configmaps would be selected by the above config:\napiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: configKey1: configValue1 configKey2: configValue2 ... apiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config2 namespace: default labels: app: collector release: skywalking data: configKey3: configValue3 ... Config Storage The configs are configmap data items, as the above example shows. we can organize the configs in 1 or more configmap files.\nSingle Config Under configmap.data:\n configKey: configValue e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in configmap is:\napiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: agent-analyzer.default.slowDBAccessThreshold: default:200,mongodb:50 Group Config The data key is composited by configKey and subItemKey to identify it is a group config:\nconfigKey.subItemKey1: subItemValue1 configKey.subItemKey2: subItemValue2 ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config can separate into 2 configmaps is:\napiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: core.default.endpoint-name-grouping-openapi.customerAPI-v1: value of customerAPI-v1 core.default.endpoint-name-grouping-openapi.productAPI-v1: value of productAPI-v1 apiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config2 namespace: default labels: app: collector release: skywalking data: core.default.endpoint-name-grouping-openapi.productAPI-v2: value of productAPI-v2 ","excerpt":"Dynamic Configuration Kubernetes Configmap Implementation configmap is also supported as a Dynamic …","ref":"/docs/main/v9.5.0/en/setup/backend/dynamic-config-configmap/","title":"Dynamic Configuration Kubernetes Configmap Implementation"},{"body":"Dynamic Configuration Kubernetes Configmap Implementation configmap is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:k8s-configmap}# [example] (../../../../oap-server/server-configuration/configuration-k8s-configmap/src/test/resources/skywalking-dynamic-configmap.example.yaml)k8s-configmap:# Sync period in seconds. Defaults to 60 seconds.period:${SW_CONFIG_CONFIGMAP_PERIOD:60}# Which namespace is configmap deployed in.namespace:${SW_CLUSTER_K8S_NAMESPACE:default}# Labelselector is used to locate specific configmaplabelSelector:${SW_CLUSTER_K8S_LABEL:app=collector,release=skywalking}{namespace} is the k8s namespace to which the configmap belongs. {labelSelector} is used to identify which configmaps would be selected.\ne.g. These 2 configmaps would be selected by the above config:\napiVersion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: configKey1: configValue1 configKey2: configValue2 ... apiVersion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config2 namespace: default labels: app: collector release: skywalking data: configKey3: configValue3 ... Config Storage The configs are configmap data items, as the above example shows. we can organize the configs in 1 or more configmap files.\nSingle Config Under configmap.data:\n configKey: configValue e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in configmap is:\napiVersion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: agent-analyzer.default.slowDBAccessThreshold: default:200,mongodb:50 Group Config The data key is composited by configKey and subItemKey to identify it is a group config:\nconfigKey.subItemKey1: subItemValue1 configKey.subItemKey2: subItemValue2 ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config can separate into 2 configmaps is:\napiVersion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: core.default.endpoint-name-grouping-openapi.customerAPI-v1: value of customerAPI-v1 core.default.endpoint-name-grouping-openapi.productAPI-v1: value of productAPI-v1 apiVersion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config2 namespace: default labels: app: collector release: skywalking data: core.default.endpoint-name-grouping-openapi.productAPI-v2: value of productAPI-v2 ","excerpt":"Dynamic Configuration Kubernetes Configmap Implementation configmap is also supported as a Dynamic …","ref":"/docs/main/v9.6.0/en/setup/backend/dynamic-config-configmap/","title":"Dynamic Configuration Kubernetes Configmap Implementation"},{"body":"Dynamic Configuration Kubernetes Configmap Implementation configmap is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:k8s-configmap}# [example] (../../../../oap-server/server-configuration/configuration-k8s-configmap/src/test/resources/skywalking-dynamic-configmap.example.yaml)k8s-configmap:# Sync period in seconds. Defaults to 60 seconds.period:${SW_CONFIG_CONFIGMAP_PERIOD:60}# Which namespace is configmap deployed in.namespace:${SW_CLUSTER_K8S_NAMESPACE:default}# Labelselector is used to locate specific configmaplabelSelector:${SW_CLUSTER_K8S_LABEL:app=collector,release=skywalking}{namespace} is the k8s namespace to which the configmap belongs. {labelSelector} is used to identify which configmaps would be selected.\ne.g. These 2 configmaps would be selected by the above config:\napiVersion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: configKey1: configValue1 configKey2: configValue2 ... apiVersion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config2 namespace: default labels: app: collector release: skywalking data: configKey3: configValue3 ... Config Storage The configs are configmap data items, as the above example shows. we can organize the configs in 1 or more configmap files.\nSingle Config Under configmap.data:\n configKey: configValue e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in configmap is:\napiVersion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: agent-analyzer.default.slowDBAccessThreshold: default:200,mongodb:50 Group Config The data key is composited by configKey and subItemKey to identify it is a group config:\nconfigKey.subItemKey1: subItemValue1 configKey.subItemKey2: subItemValue2 ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config can separate into 2 configmaps is:\napiVersion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: core.default.endpoint-name-grouping-openapi.customerAPI-v1: value of customerAPI-v1 core.default.endpoint-name-grouping-openapi.productAPI-v1: value of productAPI-v1 apiVersion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config2 namespace: default labels: app: collector release: skywalking data: core.default.endpoint-name-grouping-openapi.productAPI-v2: value of productAPI-v2 ","excerpt":"Dynamic Configuration Kubernetes Configmap Implementation configmap is also supported as a Dynamic …","ref":"/docs/main/v9.7.0/en/setup/backend/dynamic-config-configmap/","title":"Dynamic Configuration Kubernetes Configmap Implementation"},{"body":"Dynamic Configuration Nacos Implementation Nacos is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:nacos}nacos:# Nacos Server HostserverAddr:${SW_CONFIG_NACOS_SERVER_ADDR:127.0.0.1}# Nacos Server Portport:${SW_CONFIG_NACOS_SERVER_PORT:8848}# Nacos Configuration Groupgroup:${SW_CONFIG_NACOS_SERVER_GROUP:skywalking}# Nacos Configuration namespacenamespace:${SW_CONFIG_NACOS_SERVER_NAMESPACE:}# Unit seconds, sync period. Default fetch every 60 seconds.period:${SW_CONFIG_NACOS_PERIOD:60}# the name of current cluster, set the name if you want to upstream system known.clusterName:${SW_CONFIG_NACOS_CLUSTER_NAME:default}Config Storage Single Config    Data Id Group Config Value     configKey {group} configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If group = skywalking, the config in Nacos is:\n   Data Id Group Config Value     agent-analyzer.default.slowDBAccessThreshold skywalking default:200,mongodb:50    Group Config    Data Id Group Config Value Config Type     configKey {group} subItemkey1subItemkey2\u0026hellip; TEXT   subItemkey1 {group} subItemValue1    subItemkey2 {group} subItemValue2    \u0026hellip; \u0026hellip; \u0026hellip;     Notice: If you add/remove a subItem, you need to add/remove the subItemKey from the group to which the subItem belongs:\n   Data Id Group Config Value Config Type     configKey {group} subItemkey1subItemkey2\u0026hellip; TEXT    We separate subItemkeys by \\n or \\r\\n, trim leading and trailing whitespace; if you set the config by Nacos UI, each subItemkey should be in a new line:\nsubItemValue1 subItemValue2 ... If you set the config by API, each subItemkey should be separated by \\n or \\r\\n:\nconfigService.publishConfig(\u0026quot;test-module.default.testKeyGroup\u0026quot;, \u0026quot;skywalking\u0026quot;, \u0026quot;subItemkey1\\n subItemkey2\u0026quot;)); e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If group = skywalking, the config in Nacos is:\n   Data Id Group Config Value Config Type     core.default.endpoint-name-grouping-openapi skywalking customerAPI-v1productAPI-v1productAPI-v2 TEXT   customerAPI-v1 skywalking value of customerAPI-v1    productAPI-v1 skywalking value of productAPI-v1    productAPI-v2 skywalking value of productAPI-v2     ","excerpt":"Dynamic Configuration Nacos Implementation Nacos is also supported as a Dynamic Configuration Center …","ref":"/docs/main/latest/en/setup/backend/dynamic-config-nacos/","title":"Dynamic Configuration Nacos Implementation"},{"body":"Dynamic Configuration Nacos Implementation Nacos is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:nacos}nacos:# Nacos Server HostserverAddr:${SW_CONFIG_NACOS_SERVER_ADDR:127.0.0.1}# Nacos Server Portport:${SW_CONFIG_NACOS_SERVER_PORT:8848}# Nacos Configuration Groupgroup:${SW_CONFIG_NACOS_SERVER_GROUP:skywalking}# Nacos Configuration namespacenamespace:${SW_CONFIG_NACOS_SERVER_NAMESPACE:}# Unit seconds, sync period. Default fetch every 60 seconds.period:${SW_CONFIG_NACOS_PERIOD:60}# the name of current cluster, set the name if you want to upstream system known.clusterName:${SW_CONFIG_NACOS_CLUSTER_NAME:default}Config Storage Single Config    Data Id Group Config Value     configKey {group} configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If group = skywalking, the config in Nacos is:\n   Data Id Group Config Value     agent-analyzer.default.slowDBAccessThreshold skywalking default:200,mongodb:50    Group Config    Data Id Group Config Value Config Type     configKey {group} subItemkey1subItemkey2\u0026hellip; TEXT   subItemkey1 {group} subItemValue1    subItemkey2 {group} subItemValue2    \u0026hellip; \u0026hellip; \u0026hellip;     Notice: If you add/remove a subItem, you need to add/remove the subItemKey from the group to which the subItem belongs:\n   Data Id Group Config Value Config Type     configKey {group} subItemkey1subItemkey2\u0026hellip; TEXT    We separate subItemkeys by \\n or \\r\\n, trim leading and trailing whitespace; if you set the config by Nacos UI, each subItemkey should be in a new line:\nsubItemValue1 subItemValue2 ... If you set the config by API, each subItemkey should be separated by \\n or \\r\\n:\nconfigService.publishConfig(\u0026quot;test-module.default.testKeyGroup\u0026quot;, \u0026quot;skywalking\u0026quot;, \u0026quot;subItemkey1\\n subItemkey2\u0026quot;)); e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If group = skywalking, the config in Nacos is:\n   Data Id Group Config Value Config Type     core.default.endpoint-name-grouping-openapi skywalking customerAPI-v1productAPI-v1productAPI-v2 TEXT   customerAPI-v1 skywalking value of customerAPI-v1    productAPI-v1 skywalking value of productAPI-v1    productAPI-v2 skywalking value of productAPI-v2     ","excerpt":"Dynamic Configuration Nacos Implementation Nacos is also supported as a Dynamic Configuration Center …","ref":"/docs/main/next/en/setup/backend/dynamic-config-nacos/","title":"Dynamic Configuration Nacos Implementation"},{"body":"Dynamic Configuration Nacos Implementation Nacos is also supported as Dynamic Configuration Center (DCC). To use it, please configure as follows:\nconfiguration:selector:${SW_CONFIGURATION:nacos}nacos:# Nacos Server HostserverAddr:${SW_CONFIG_NACOS_SERVER_ADDR:127.0.0.1}# Nacos Server Portport:${SW_CONFIG_NACOS_SERVER_PORT:8848}# Nacos Configuration Groupgroup:${SW_CONFIG_NACOS_SERVER_GROUP:skywalking}# Nacos Configuration namespacenamespace:${SW_CONFIG_NACOS_SERVER_NAMESPACE:}# Unit seconds, sync period. Default fetch every 60 seconds.period:${SW_CONFIG_NACOS_PERIOD:60}# the name of current cluster, set the name if you want to upstream system known.clusterName:${SW_CONFIG_NACOS_CLUSTER_NAME:default}Config Storage Single Config    Data Id Group Config Value     configKey {group} configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If group = skywalking the config in nacos is:\n   Data Id Group Config Value     agent-analyzer.default.slowDBAccessThreshold skywalking default:200,mongodb:50    Group Config    Data Id Group Config Value Config Type     configKey {group} subItemkey1subItemkey2\u0026hellip; TEXT   subItemkey1 {group} subItemValue1    subItemkey2 {group} subItemValue2    \u0026hellip; \u0026hellip; \u0026hellip;     Notice: If you add/remove a subItem, you need to add/remove the subItemKey from the group which the subItem belongs:\n   Data Id Group Config Value Config Type     configKey {group} subItemkey1subItemkey2\u0026hellip; TEXT    We separate subItemkeys by \\n or \\r\\n, trim leading and trailing whitespace, if you set the config by Nacos UI each subItemkey should in a new line:\nsubItemValue1 subItemValue2 ... If you set the config by API each subItemkey should separated by \\n or \\r\\n:\nconfigService.publishConfig(\u0026quot;test-module.default.testKeyGroup\u0026quot;, \u0026quot;skywalking\u0026quot;, \u0026quot;subItemkey1\\n subItemkey2\u0026quot;)); e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If group = skywalking the config in nacos is:\n   Data Id Group Config Value Config Type     core.default.endpoint-name-grouping-openapi skywalking customerAPI-v1productAPI-v1productAPI-v2 TEXT   customerAPI-v1 skywalking value of customerAPI-v1    productAPI-v1 skywalking value of productAPI-v1    productAPI-v2 skywalking value of productAPI-v2     ","excerpt":"Dynamic Configuration Nacos Implementation Nacos is also supported as Dynamic Configuration Center …","ref":"/docs/main/v9.0.0/en/setup/backend/dynamic-config-nacos/","title":"Dynamic Configuration Nacos Implementation"},{"body":"Dynamic Configuration Nacos Implementation Nacos is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:nacos}nacos:# Nacos Server HostserverAddr:${SW_CONFIG_NACOS_SERVER_ADDR:127.0.0.1}# Nacos Server Portport:${SW_CONFIG_NACOS_SERVER_PORT:8848}# Nacos Configuration Groupgroup:${SW_CONFIG_NACOS_SERVER_GROUP:skywalking}# Nacos Configuration namespacenamespace:${SW_CONFIG_NACOS_SERVER_NAMESPACE:}# Unit seconds, sync period. Default fetch every 60 seconds.period:${SW_CONFIG_NACOS_PERIOD:60}# the name of current cluster, set the name if you want to upstream system known.clusterName:${SW_CONFIG_NACOS_CLUSTER_NAME:default}Config Storage Single Config    Data Id Group Config Value     configKey {group} configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If group = skywalking, the config in Nacos is:\n   Data Id Group Config Value     agent-analyzer.default.slowDBAccessThreshold skywalking default:200,mongodb:50    Group Config    Data Id Group Config Value Config Type     configKey {group} subItemkey1subItemkey2\u0026hellip; TEXT   subItemkey1 {group} subItemValue1    subItemkey2 {group} subItemValue2    \u0026hellip; \u0026hellip; \u0026hellip;     Notice: If you add/remove a subItem, you need to add/remove the subItemKey from the group to which the subItem belongs:\n   Data Id Group Config Value Config Type     configKey {group} subItemkey1subItemkey2\u0026hellip; TEXT    We separate subItemkeys by \\n or \\r\\n, trim leading and trailing whitespace; if you set the config by Nacos UI, each subItemkey should be in a new line:\nsubItemValue1 subItemValue2 ... If you set the config by API, each subItemkey should be separated by \\n or \\r\\n:\nconfigService.publishConfig(\u0026quot;test-module.default.testKeyGroup\u0026quot;, \u0026quot;skywalking\u0026quot;, \u0026quot;subItemkey1\\n subItemkey2\u0026quot;)); e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If group = skywalking, the config in Nacos is:\n   Data Id Group Config Value Config Type     core.default.endpoint-name-grouping-openapi skywalking customerAPI-v1productAPI-v1productAPI-v2 TEXT   customerAPI-v1 skywalking value of customerAPI-v1    productAPI-v1 skywalking value of productAPI-v1    productAPI-v2 skywalking value of productAPI-v2     ","excerpt":"Dynamic Configuration Nacos Implementation Nacos is also supported as a Dynamic Configuration Center …","ref":"/docs/main/v9.1.0/en/setup/backend/dynamic-config-nacos/","title":"Dynamic Configuration Nacos Implementation"},{"body":"Dynamic Configuration Nacos Implementation Nacos is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:nacos}nacos:# Nacos Server HostserverAddr:${SW_CONFIG_NACOS_SERVER_ADDR:127.0.0.1}# Nacos Server Portport:${SW_CONFIG_NACOS_SERVER_PORT:8848}# Nacos Configuration Groupgroup:${SW_CONFIG_NACOS_SERVER_GROUP:skywalking}# Nacos Configuration namespacenamespace:${SW_CONFIG_NACOS_SERVER_NAMESPACE:}# Unit seconds, sync period. Default fetch every 60 seconds.period:${SW_CONFIG_NACOS_PERIOD:60}# the name of current cluster, set the name if you want to upstream system known.clusterName:${SW_CONFIG_NACOS_CLUSTER_NAME:default}Config Storage Single Config    Data Id Group Config Value     configKey {group} configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If group = skywalking, the config in Nacos is:\n   Data Id Group Config Value     agent-analyzer.default.slowDBAccessThreshold skywalking default:200,mongodb:50    Group Config    Data Id Group Config Value Config Type     configKey {group} subItemkey1subItemkey2\u0026hellip; TEXT   subItemkey1 {group} subItemValue1    subItemkey2 {group} subItemValue2    \u0026hellip; \u0026hellip; \u0026hellip;     Notice: If you add/remove a subItem, you need to add/remove the subItemKey from the group to which the subItem belongs:\n   Data Id Group Config Value Config Type     configKey {group} subItemkey1subItemkey2\u0026hellip; TEXT    We separate subItemkeys by \\n or \\r\\n, trim leading and trailing whitespace; if you set the config by Nacos UI, each subItemkey should be in a new line:\nsubItemValue1 subItemValue2 ... If you set the config by API, each subItemkey should be separated by \\n or \\r\\n:\nconfigService.publishConfig(\u0026quot;test-module.default.testKeyGroup\u0026quot;, \u0026quot;skywalking\u0026quot;, \u0026quot;subItemkey1\\n subItemkey2\u0026quot;)); e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If group = skywalking, the config in Nacos is:\n   Data Id Group Config Value Config Type     core.default.endpoint-name-grouping-openapi skywalking customerAPI-v1productAPI-v1productAPI-v2 TEXT   customerAPI-v1 skywalking value of customerAPI-v1    productAPI-v1 skywalking value of productAPI-v1    productAPI-v2 skywalking value of productAPI-v2     ","excerpt":"Dynamic Configuration Nacos Implementation Nacos is also supported as a Dynamic Configuration Center …","ref":"/docs/main/v9.2.0/en/setup/backend/dynamic-config-nacos/","title":"Dynamic Configuration Nacos Implementation"},{"body":"Dynamic Configuration Nacos Implementation Nacos is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:nacos}nacos:# Nacos Server HostserverAddr:${SW_CONFIG_NACOS_SERVER_ADDR:127.0.0.1}# Nacos Server Portport:${SW_CONFIG_NACOS_SERVER_PORT:8848}# Nacos Configuration Groupgroup:${SW_CONFIG_NACOS_SERVER_GROUP:skywalking}# Nacos Configuration namespacenamespace:${SW_CONFIG_NACOS_SERVER_NAMESPACE:}# Unit seconds, sync period. Default fetch every 60 seconds.period:${SW_CONFIG_NACOS_PERIOD:60}# the name of current cluster, set the name if you want to upstream system known.clusterName:${SW_CONFIG_NACOS_CLUSTER_NAME:default}Config Storage Single Config    Data Id Group Config Value     configKey {group} configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If group = skywalking, the config in Nacos is:\n   Data Id Group Config Value     agent-analyzer.default.slowDBAccessThreshold skywalking default:200,mongodb:50    Group Config    Data Id Group Config Value Config Type     configKey {group} subItemkey1subItemkey2\u0026hellip; TEXT   subItemkey1 {group} subItemValue1    subItemkey2 {group} subItemValue2    \u0026hellip; \u0026hellip; \u0026hellip;     Notice: If you add/remove a subItem, you need to add/remove the subItemKey from the group to which the subItem belongs:\n   Data Id Group Config Value Config Type     configKey {group} subItemkey1subItemkey2\u0026hellip; TEXT    We separate subItemkeys by \\n or \\r\\n, trim leading and trailing whitespace; if you set the config by Nacos UI, each subItemkey should be in a new line:\nsubItemValue1 subItemValue2 ... If you set the config by API, each subItemkey should be separated by \\n or \\r\\n:\nconfigService.publishConfig(\u0026quot;test-module.default.testKeyGroup\u0026quot;, \u0026quot;skywalking\u0026quot;, \u0026quot;subItemkey1\\n subItemkey2\u0026quot;)); e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If group = skywalking, the config in Nacos is:\n   Data Id Group Config Value Config Type     core.default.endpoint-name-grouping-openapi skywalking customerAPI-v1productAPI-v1productAPI-v2 TEXT   customerAPI-v1 skywalking value of customerAPI-v1    productAPI-v1 skywalking value of productAPI-v1    productAPI-v2 skywalking value of productAPI-v2     ","excerpt":"Dynamic Configuration Nacos Implementation Nacos is also supported as a Dynamic Configuration Center …","ref":"/docs/main/v9.3.0/en/setup/backend/dynamic-config-nacos/","title":"Dynamic Configuration Nacos Implementation"},{"body":"Dynamic Configuration Nacos Implementation Nacos is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:nacos}nacos:# Nacos Server HostserverAddr:${SW_CONFIG_NACOS_SERVER_ADDR:127.0.0.1}# Nacos Server Portport:${SW_CONFIG_NACOS_SERVER_PORT:8848}# Nacos Configuration Groupgroup:${SW_CONFIG_NACOS_SERVER_GROUP:skywalking}# Nacos Configuration namespacenamespace:${SW_CONFIG_NACOS_SERVER_NAMESPACE:}# Unit seconds, sync period. Default fetch every 60 seconds.period:${SW_CONFIG_NACOS_PERIOD:60}# the name of current cluster, set the name if you want to upstream system known.clusterName:${SW_CONFIG_NACOS_CLUSTER_NAME:default}Config Storage Single Config    Data Id Group Config Value     configKey {group} configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If group = skywalking, the config in Nacos is:\n   Data Id Group Config Value     agent-analyzer.default.slowDBAccessThreshold skywalking default:200,mongodb:50    Group Config    Data Id Group Config Value Config Type     configKey {group} subItemkey1subItemkey2\u0026hellip; TEXT   subItemkey1 {group} subItemValue1    subItemkey2 {group} subItemValue2    \u0026hellip; \u0026hellip; \u0026hellip;     Notice: If you add/remove a subItem, you need to add/remove the subItemKey from the group to which the subItem belongs:\n   Data Id Group Config Value Config Type     configKey {group} subItemkey1subItemkey2\u0026hellip; TEXT    We separate subItemkeys by \\n or \\r\\n, trim leading and trailing whitespace; if you set the config by Nacos UI, each subItemkey should be in a new line:\nsubItemValue1 subItemValue2 ... If you set the config by API, each subItemkey should be separated by \\n or \\r\\n:\nconfigService.publishConfig(\u0026quot;test-module.default.testKeyGroup\u0026quot;, \u0026quot;skywalking\u0026quot;, \u0026quot;subItemkey1\\n subItemkey2\u0026quot;)); e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If group = skywalking, the config in Nacos is:\n   Data Id Group Config Value Config Type     core.default.endpoint-name-grouping-openapi skywalking customerAPI-v1productAPI-v1productAPI-v2 TEXT   customerAPI-v1 skywalking value of customerAPI-v1    productAPI-v1 skywalking value of productAPI-v1    productAPI-v2 skywalking value of productAPI-v2     ","excerpt":"Dynamic Configuration Nacos Implementation Nacos is also supported as a Dynamic Configuration Center …","ref":"/docs/main/v9.4.0/en/setup/backend/dynamic-config-nacos/","title":"Dynamic Configuration Nacos Implementation"},{"body":"Dynamic Configuration Nacos Implementation Nacos is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:nacos}nacos:# Nacos Server HostserverAddr:${SW_CONFIG_NACOS_SERVER_ADDR:127.0.0.1}# Nacos Server Portport:${SW_CONFIG_NACOS_SERVER_PORT:8848}# Nacos Configuration Groupgroup:${SW_CONFIG_NACOS_SERVER_GROUP:skywalking}# Nacos Configuration namespacenamespace:${SW_CONFIG_NACOS_SERVER_NAMESPACE:}# Unit seconds, sync period. Default fetch every 60 seconds.period:${SW_CONFIG_NACOS_PERIOD:60}# the name of current cluster, set the name if you want to upstream system known.clusterName:${SW_CONFIG_NACOS_CLUSTER_NAME:default}Config Storage Single Config    Data Id Group Config Value     configKey {group} configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If group = skywalking, the config in Nacos is:\n   Data Id Group Config Value     agent-analyzer.default.slowDBAccessThreshold skywalking default:200,mongodb:50    Group Config    Data Id Group Config Value Config Type     configKey {group} subItemkey1subItemkey2\u0026hellip; TEXT   subItemkey1 {group} subItemValue1    subItemkey2 {group} subItemValue2    \u0026hellip; \u0026hellip; \u0026hellip;     Notice: If you add/remove a subItem, you need to add/remove the subItemKey from the group to which the subItem belongs:\n   Data Id Group Config Value Config Type     configKey {group} subItemkey1subItemkey2\u0026hellip; TEXT    We separate subItemkeys by \\n or \\r\\n, trim leading and trailing whitespace; if you set the config by Nacos UI, each subItemkey should be in a new line:\nsubItemValue1 subItemValue2 ... If you set the config by API, each subItemkey should be separated by \\n or \\r\\n:\nconfigService.publishConfig(\u0026quot;test-module.default.testKeyGroup\u0026quot;, \u0026quot;skywalking\u0026quot;, \u0026quot;subItemkey1\\n subItemkey2\u0026quot;)); e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If group = skywalking, the config in Nacos is:\n   Data Id Group Config Value Config Type     core.default.endpoint-name-grouping-openapi skywalking customerAPI-v1productAPI-v1productAPI-v2 TEXT   customerAPI-v1 skywalking value of customerAPI-v1    productAPI-v1 skywalking value of productAPI-v1    productAPI-v2 skywalking value of productAPI-v2     ","excerpt":"Dynamic Configuration Nacos Implementation Nacos is also supported as a Dynamic Configuration Center …","ref":"/docs/main/v9.5.0/en/setup/backend/dynamic-config-nacos/","title":"Dynamic Configuration Nacos Implementation"},{"body":"Dynamic Configuration Nacos Implementation Nacos is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:nacos}nacos:# Nacos Server HostserverAddr:${SW_CONFIG_NACOS_SERVER_ADDR:127.0.0.1}# Nacos Server Portport:${SW_CONFIG_NACOS_SERVER_PORT:8848}# Nacos Configuration Groupgroup:${SW_CONFIG_NACOS_SERVER_GROUP:skywalking}# Nacos Configuration namespacenamespace:${SW_CONFIG_NACOS_SERVER_NAMESPACE:}# Unit seconds, sync period. Default fetch every 60 seconds.period:${SW_CONFIG_NACOS_PERIOD:60}# the name of current cluster, set the name if you want to upstream system known.clusterName:${SW_CONFIG_NACOS_CLUSTER_NAME:default}Config Storage Single Config    Data Id Group Config Value     configKey {group} configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If group = skywalking, the config in Nacos is:\n   Data Id Group Config Value     agent-analyzer.default.slowDBAccessThreshold skywalking default:200,mongodb:50    Group Config    Data Id Group Config Value Config Type     configKey {group} subItemkey1subItemkey2\u0026hellip; TEXT   subItemkey1 {group} subItemValue1    subItemkey2 {group} subItemValue2    \u0026hellip; \u0026hellip; \u0026hellip;     Notice: If you add/remove a subItem, you need to add/remove the subItemKey from the group to which the subItem belongs:\n   Data Id Group Config Value Config Type     configKey {group} subItemkey1subItemkey2\u0026hellip; TEXT    We separate subItemkeys by \\n or \\r\\n, trim leading and trailing whitespace; if you set the config by Nacos UI, each subItemkey should be in a new line:\nsubItemValue1 subItemValue2 ... If you set the config by API, each subItemkey should be separated by \\n or \\r\\n:\nconfigService.publishConfig(\u0026quot;test-module.default.testKeyGroup\u0026quot;, \u0026quot;skywalking\u0026quot;, \u0026quot;subItemkey1\\n subItemkey2\u0026quot;)); e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If group = skywalking, the config in Nacos is:\n   Data Id Group Config Value Config Type     core.default.endpoint-name-grouping-openapi skywalking customerAPI-v1productAPI-v1productAPI-v2 TEXT   customerAPI-v1 skywalking value of customerAPI-v1    productAPI-v1 skywalking value of productAPI-v1    productAPI-v2 skywalking value of productAPI-v2     ","excerpt":"Dynamic Configuration Nacos Implementation Nacos is also supported as a Dynamic Configuration Center …","ref":"/docs/main/v9.6.0/en/setup/backend/dynamic-config-nacos/","title":"Dynamic Configuration Nacos Implementation"},{"body":"Dynamic Configuration Nacos Implementation Nacos is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:nacos}nacos:# Nacos Server HostserverAddr:${SW_CONFIG_NACOS_SERVER_ADDR:127.0.0.1}# Nacos Server Portport:${SW_CONFIG_NACOS_SERVER_PORT:8848}# Nacos Configuration Groupgroup:${SW_CONFIG_NACOS_SERVER_GROUP:skywalking}# Nacos Configuration namespacenamespace:${SW_CONFIG_NACOS_SERVER_NAMESPACE:}# Unit seconds, sync period. Default fetch every 60 seconds.period:${SW_CONFIG_NACOS_PERIOD:60}# the name of current cluster, set the name if you want to upstream system known.clusterName:${SW_CONFIG_NACOS_CLUSTER_NAME:default}Config Storage Single Config    Data Id Group Config Value     configKey {group} configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If group = skywalking, the config in Nacos is:\n   Data Id Group Config Value     agent-analyzer.default.slowDBAccessThreshold skywalking default:200,mongodb:50    Group Config    Data Id Group Config Value Config Type     configKey {group} subItemkey1subItemkey2\u0026hellip; TEXT   subItemkey1 {group} subItemValue1    subItemkey2 {group} subItemValue2    \u0026hellip; \u0026hellip; \u0026hellip;     Notice: If you add/remove a subItem, you need to add/remove the subItemKey from the group to which the subItem belongs:\n   Data Id Group Config Value Config Type     configKey {group} subItemkey1subItemkey2\u0026hellip; TEXT    We separate subItemkeys by \\n or \\r\\n, trim leading and trailing whitespace; if you set the config by Nacos UI, each subItemkey should be in a new line:\nsubItemValue1 subItemValue2 ... If you set the config by API, each subItemkey should be separated by \\n or \\r\\n:\nconfigService.publishConfig(\u0026quot;test-module.default.testKeyGroup\u0026quot;, \u0026quot;skywalking\u0026quot;, \u0026quot;subItemkey1\\n subItemkey2\u0026quot;)); e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If group = skywalking, the config in Nacos is:\n   Data Id Group Config Value Config Type     core.default.endpoint-name-grouping-openapi skywalking customerAPI-v1productAPI-v1productAPI-v2 TEXT   customerAPI-v1 skywalking value of customerAPI-v1    productAPI-v1 skywalking value of productAPI-v1    productAPI-v2 skywalking value of productAPI-v2     ","excerpt":"Dynamic Configuration Nacos Implementation Nacos is also supported as a Dynamic Configuration Center …","ref":"/docs/main/v9.7.0/en/setup/backend/dynamic-config-nacos/","title":"Dynamic Configuration Nacos Implementation"},{"body":"Dynamic Configuration Service, DCS Dynamic Configuration Service is a gRPC service which requires implementation of the upstream system. The SkyWalking OAP fetches the configuration from the implementation (any system) after you open the implementation like this:\nconfiguration:selector:${SW_CONFIGURATION:grpc}grpc:host:${SW_DCS_SERVER_HOST:\u0026#34;\u0026#34;}port:${SW_DCS_SERVER_PORT:80}clusterName:${SW_DCS_CLUSTER_NAME:SkyWalking}period:${SW_DCS_PERIOD:20}Config Server Response uuid: To identify whether the config data changed, if uuid is the same, it is not required to respond to the config data.\nSingle Config Implement:\nrpc call (ConfigurationRequest) returns (ConfigurationResponse) { } e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The response configTable is:\nconfigTable { name: \u0026quot;agent-analyzer.default.slowDBAccessThreshold\u0026quot; value: \u0026quot;default:200,mongodb:50\u0026quot; } Group Config Implement:\nrpc callGroup (ConfigurationRequest) returns (GroupConfigurationResponse) {} Respond config data GroupConfigItems groupConfigTable\ne.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The response groupConfigTable is:\ngroupConfigTable { groupName: \u0026quot;core.default.endpoint-name-grouping-openapi\u0026quot; items { name: \u0026quot;customerAPI-v1\u0026quot; value: \u0026quot;value of customerAPI-v1\u0026quot; } items { name: \u0026quot;productAPI-v1\u0026quot; value: \u0026quot;value of productAPI-v1\u0026quot; } items { name: \u0026quot;productAPI-v2\u0026quot; value: \u0026quot;value of productAPI-v2\u0026quot; } } ","excerpt":"Dynamic Configuration Service, DCS Dynamic Configuration Service is a gRPC service which requires …","ref":"/docs/main/latest/en/setup/backend/dynamic-config-service/","title":"Dynamic Configuration Service, DCS"},{"body":"Dynamic Configuration Service, DCS Dynamic Configuration Service is a gRPC service which requires implementation of the upstream system. The SkyWalking OAP fetches the configuration from the implementation (any system) after you open the implementation like this:\nconfiguration:selector:${SW_CONFIGURATION:grpc}grpc:host:${SW_DCS_SERVER_HOST:\u0026#34;\u0026#34;}port:${SW_DCS_SERVER_PORT:80}clusterName:${SW_DCS_CLUSTER_NAME:SkyWalking}period:${SW_DCS_PERIOD:20}Config Server Response uuid: To identify whether the config data changed, if uuid is the same, it is not required to respond to the config data.\nSingle Config Implement:\nrpc call (ConfigurationRequest) returns (ConfigurationResponse) { } e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The response configTable is:\nconfigTable { name: \u0026quot;agent-analyzer.default.slowDBAccessThreshold\u0026quot; value: \u0026quot;default:200,mongodb:50\u0026quot; } Group Config Implement:\nrpc callGroup (ConfigurationRequest) returns (GroupConfigurationResponse) {} Respond config data GroupConfigItems groupConfigTable\ne.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The response groupConfigTable is:\ngroupConfigTable { groupName: \u0026quot;core.default.endpoint-name-grouping-openapi\u0026quot; items { name: \u0026quot;customerAPI-v1\u0026quot; value: \u0026quot;value of customerAPI-v1\u0026quot; } items { name: \u0026quot;productAPI-v1\u0026quot; value: \u0026quot;value of productAPI-v1\u0026quot; } items { name: \u0026quot;productAPI-v2\u0026quot; value: \u0026quot;value of productAPI-v2\u0026quot; } } ","excerpt":"Dynamic Configuration Service, DCS Dynamic Configuration Service is a gRPC service which requires …","ref":"/docs/main/next/en/setup/backend/dynamic-config-service/","title":"Dynamic Configuration Service, DCS"},{"body":"Dynamic Configuration Service, DCS Dynamic Configuration Service is a gRPC service which requires implementation of the upstream system. The SkyWalking OAP fetches the configuration from the implementation (any system), after you open the implementation like this:\nconfiguration:selector:${SW_CONFIGURATION:grpc}grpc:host:${SW_DCS_SERVER_HOST:\u0026#34;\u0026#34;}port:${SW_DCS_SERVER_PORT:80}clusterName:${SW_DCS_CLUSTER_NAME:SkyWalking}period:${SW_DCS_PERIOD:20}Config Server Response uuid: To identify whether the config data changed, if uuid is the same not required to respond the config data.\nSingle Config Implement:\nrpc call (ConfigurationRequest) returns (ConfigurationResponse) { } e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The response configTable is:\nconfigTable { name: \u0026quot;agent-analyzer.default.slowDBAccessThreshold\u0026quot; value: \u0026quot;default:200,mongodb:50\u0026quot; } Group Config Implement:\nrpc callGroup (ConfigurationRequest) returns (GroupConfigurationResponse) {} Respond config data GroupConfigItems groupConfigTable\ne.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The response groupConfigTable is:\ngroupConfigTable { groupName: \u0026quot;core.default.endpoint-name-grouping-openapi\u0026quot; items { name: \u0026quot;customerAPI-v1\u0026quot; value: \u0026quot;value of customerAPI-v1\u0026quot; } items { name: \u0026quot;productAPI-v1\u0026quot; value: \u0026quot;value of productAPI-v1\u0026quot; } items { name: \u0026quot;productAPI-v2\u0026quot; value: \u0026quot;value of productAPI-v2\u0026quot; } } ","excerpt":"Dynamic Configuration Service, DCS Dynamic Configuration Service is a gRPC service which requires …","ref":"/docs/main/v9.0.0/en/setup/backend/dynamic-config-service/","title":"Dynamic Configuration Service, DCS"},{"body":"Dynamic Configuration Service, DCS Dynamic Configuration Service is a gRPC service which requires implementation of the upstream system. The SkyWalking OAP fetches the configuration from the implementation (any system) after you open the implementation like this:\nconfiguration:selector:${SW_CONFIGURATION:grpc}grpc:host:${SW_DCS_SERVER_HOST:\u0026#34;\u0026#34;}port:${SW_DCS_SERVER_PORT:80}clusterName:${SW_DCS_CLUSTER_NAME:SkyWalking}period:${SW_DCS_PERIOD:20}Config Server Response uuid: To identify whether the config data changed, if uuid is the same, it is not required to respond to the config data.\nSingle Config Implement:\nrpc call (ConfigurationRequest) returns (ConfigurationResponse) { } e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The response configTable is:\nconfigTable { name: \u0026quot;agent-analyzer.default.slowDBAccessThreshold\u0026quot; value: \u0026quot;default:200,mongodb:50\u0026quot; } Group Config Implement:\nrpc callGroup (ConfigurationRequest) returns (GroupConfigurationResponse) {} Respond config data GroupConfigItems groupConfigTable\ne.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The response groupConfigTable is:\ngroupConfigTable { groupName: \u0026quot;core.default.endpoint-name-grouping-openapi\u0026quot; items { name: \u0026quot;customerAPI-v1\u0026quot; value: \u0026quot;value of customerAPI-v1\u0026quot; } items { name: \u0026quot;productAPI-v1\u0026quot; value: \u0026quot;value of productAPI-v1\u0026quot; } items { name: \u0026quot;productAPI-v2\u0026quot; value: \u0026quot;value of productAPI-v2\u0026quot; } } ","excerpt":"Dynamic Configuration Service, DCS Dynamic Configuration Service is a gRPC service which requires …","ref":"/docs/main/v9.1.0/en/setup/backend/dynamic-config-service/","title":"Dynamic Configuration Service, DCS"},{"body":"Dynamic Configuration Service, DCS Dynamic Configuration Service is a gRPC service which requires implementation of the upstream system. The SkyWalking OAP fetches the configuration from the implementation (any system) after you open the implementation like this:\nconfiguration:selector:${SW_CONFIGURATION:grpc}grpc:host:${SW_DCS_SERVER_HOST:\u0026#34;\u0026#34;}port:${SW_DCS_SERVER_PORT:80}clusterName:${SW_DCS_CLUSTER_NAME:SkyWalking}period:${SW_DCS_PERIOD:20}Config Server Response uuid: To identify whether the config data changed, if uuid is the same, it is not required to respond to the config data.\nSingle Config Implement:\nrpc call (ConfigurationRequest) returns (ConfigurationResponse) { } e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The response configTable is:\nconfigTable { name: \u0026quot;agent-analyzer.default.slowDBAccessThreshold\u0026quot; value: \u0026quot;default:200,mongodb:50\u0026quot; } Group Config Implement:\nrpc callGroup (ConfigurationRequest) returns (GroupConfigurationResponse) {} Respond config data GroupConfigItems groupConfigTable\ne.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The response groupConfigTable is:\ngroupConfigTable { groupName: \u0026quot;core.default.endpoint-name-grouping-openapi\u0026quot; items { name: \u0026quot;customerAPI-v1\u0026quot; value: \u0026quot;value of customerAPI-v1\u0026quot; } items { name: \u0026quot;productAPI-v1\u0026quot; value: \u0026quot;value of productAPI-v1\u0026quot; } items { name: \u0026quot;productAPI-v2\u0026quot; value: \u0026quot;value of productAPI-v2\u0026quot; } } ","excerpt":"Dynamic Configuration Service, DCS Dynamic Configuration Service is a gRPC service which requires …","ref":"/docs/main/v9.2.0/en/setup/backend/dynamic-config-service/","title":"Dynamic Configuration Service, DCS"},{"body":"Dynamic Configuration Service, DCS Dynamic Configuration Service is a gRPC service which requires implementation of the upstream system. The SkyWalking OAP fetches the configuration from the implementation (any system) after you open the implementation like this:\nconfiguration:selector:${SW_CONFIGURATION:grpc}grpc:host:${SW_DCS_SERVER_HOST:\u0026#34;\u0026#34;}port:${SW_DCS_SERVER_PORT:80}clusterName:${SW_DCS_CLUSTER_NAME:SkyWalking}period:${SW_DCS_PERIOD:20}Config Server Response uuid: To identify whether the config data changed, if uuid is the same, it is not required to respond to the config data.\nSingle Config Implement:\nrpc call (ConfigurationRequest) returns (ConfigurationResponse) { } e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The response configTable is:\nconfigTable { name: \u0026quot;agent-analyzer.default.slowDBAccessThreshold\u0026quot; value: \u0026quot;default:200,mongodb:50\u0026quot; } Group Config Implement:\nrpc callGroup (ConfigurationRequest) returns (GroupConfigurationResponse) {} Respond config data GroupConfigItems groupConfigTable\ne.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The response groupConfigTable is:\ngroupConfigTable { groupName: \u0026quot;core.default.endpoint-name-grouping-openapi\u0026quot; items { name: \u0026quot;customerAPI-v1\u0026quot; value: \u0026quot;value of customerAPI-v1\u0026quot; } items { name: \u0026quot;productAPI-v1\u0026quot; value: \u0026quot;value of productAPI-v1\u0026quot; } items { name: \u0026quot;productAPI-v2\u0026quot; value: \u0026quot;value of productAPI-v2\u0026quot; } } ","excerpt":"Dynamic Configuration Service, DCS Dynamic Configuration Service is a gRPC service which requires …","ref":"/docs/main/v9.3.0/en/setup/backend/dynamic-config-service/","title":"Dynamic Configuration Service, DCS"},{"body":"Dynamic Configuration Service, DCS Dynamic Configuration Service is a gRPC service which requires implementation of the upstream system. The SkyWalking OAP fetches the configuration from the implementation (any system) after you open the implementation like this:\nconfiguration:selector:${SW_CONFIGURATION:grpc}grpc:host:${SW_DCS_SERVER_HOST:\u0026#34;\u0026#34;}port:${SW_DCS_SERVER_PORT:80}clusterName:${SW_DCS_CLUSTER_NAME:SkyWalking}period:${SW_DCS_PERIOD:20}Config Server Response uuid: To identify whether the config data changed, if uuid is the same, it is not required to respond to the config data.\nSingle Config Implement:\nrpc call (ConfigurationRequest) returns (ConfigurationResponse) { } e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The response configTable is:\nconfigTable { name: \u0026quot;agent-analyzer.default.slowDBAccessThreshold\u0026quot; value: \u0026quot;default:200,mongodb:50\u0026quot; } Group Config Implement:\nrpc callGroup (ConfigurationRequest) returns (GroupConfigurationResponse) {} Respond config data GroupConfigItems groupConfigTable\ne.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The response groupConfigTable is:\ngroupConfigTable { groupName: \u0026quot;core.default.endpoint-name-grouping-openapi\u0026quot; items { name: \u0026quot;customerAPI-v1\u0026quot; value: \u0026quot;value of customerAPI-v1\u0026quot; } items { name: \u0026quot;productAPI-v1\u0026quot; value: \u0026quot;value of productAPI-v1\u0026quot; } items { name: \u0026quot;productAPI-v2\u0026quot; value: \u0026quot;value of productAPI-v2\u0026quot; } } ","excerpt":"Dynamic Configuration Service, DCS Dynamic Configuration Service is a gRPC service which requires …","ref":"/docs/main/v9.4.0/en/setup/backend/dynamic-config-service/","title":"Dynamic Configuration Service, DCS"},{"body":"Dynamic Configuration Service, DCS Dynamic Configuration Service is a gRPC service which requires implementation of the upstream system. The SkyWalking OAP fetches the configuration from the implementation (any system) after you open the implementation like this:\nconfiguration:selector:${SW_CONFIGURATION:grpc}grpc:host:${SW_DCS_SERVER_HOST:\u0026#34;\u0026#34;}port:${SW_DCS_SERVER_PORT:80}clusterName:${SW_DCS_CLUSTER_NAME:SkyWalking}period:${SW_DCS_PERIOD:20}Config Server Response uuid: To identify whether the config data changed, if uuid is the same, it is not required to respond to the config data.\nSingle Config Implement:\nrpc call (ConfigurationRequest) returns (ConfigurationResponse) { } e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The response configTable is:\nconfigTable { name: \u0026quot;agent-analyzer.default.slowDBAccessThreshold\u0026quot; value: \u0026quot;default:200,mongodb:50\u0026quot; } Group Config Implement:\nrpc callGroup (ConfigurationRequest) returns (GroupConfigurationResponse) {} Respond config data GroupConfigItems groupConfigTable\ne.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The response groupConfigTable is:\ngroupConfigTable { groupName: \u0026quot;core.default.endpoint-name-grouping-openapi\u0026quot; items { name: \u0026quot;customerAPI-v1\u0026quot; value: \u0026quot;value of customerAPI-v1\u0026quot; } items { name: \u0026quot;productAPI-v1\u0026quot; value: \u0026quot;value of productAPI-v1\u0026quot; } items { name: \u0026quot;productAPI-v2\u0026quot; value: \u0026quot;value of productAPI-v2\u0026quot; } } ","excerpt":"Dynamic Configuration Service, DCS Dynamic Configuration Service is a gRPC service which requires …","ref":"/docs/main/v9.5.0/en/setup/backend/dynamic-config-service/","title":"Dynamic Configuration Service, DCS"},{"body":"Dynamic Configuration Service, DCS Dynamic Configuration Service is a gRPC service which requires implementation of the upstream system. The SkyWalking OAP fetches the configuration from the implementation (any system) after you open the implementation like this:\nconfiguration:selector:${SW_CONFIGURATION:grpc}grpc:host:${SW_DCS_SERVER_HOST:\u0026#34;\u0026#34;}port:${SW_DCS_SERVER_PORT:80}clusterName:${SW_DCS_CLUSTER_NAME:SkyWalking}period:${SW_DCS_PERIOD:20}Config Server Response uuid: To identify whether the config data changed, if uuid is the same, it is not required to respond to the config data.\nSingle Config Implement:\nrpc call (ConfigurationRequest) returns (ConfigurationResponse) { } e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The response configTable is:\nconfigTable { name: \u0026quot;agent-analyzer.default.slowDBAccessThreshold\u0026quot; value: \u0026quot;default:200,mongodb:50\u0026quot; } Group Config Implement:\nrpc callGroup (ConfigurationRequest) returns (GroupConfigurationResponse) {} Respond config data GroupConfigItems groupConfigTable\ne.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The response groupConfigTable is:\ngroupConfigTable { groupName: \u0026quot;core.default.endpoint-name-grouping-openapi\u0026quot; items { name: \u0026quot;customerAPI-v1\u0026quot; value: \u0026quot;value of customerAPI-v1\u0026quot; } items { name: \u0026quot;productAPI-v1\u0026quot; value: \u0026quot;value of productAPI-v1\u0026quot; } items { name: \u0026quot;productAPI-v2\u0026quot; value: \u0026quot;value of productAPI-v2\u0026quot; } } ","excerpt":"Dynamic Configuration Service, DCS Dynamic Configuration Service is a gRPC service which requires …","ref":"/docs/main/v9.6.0/en/setup/backend/dynamic-config-service/","title":"Dynamic Configuration Service, DCS"},{"body":"Dynamic Configuration Service, DCS Dynamic Configuration Service is a gRPC service which requires implementation of the upstream system. The SkyWalking OAP fetches the configuration from the implementation (any system) after you open the implementation like this:\nconfiguration:selector:${SW_CONFIGURATION:grpc}grpc:host:${SW_DCS_SERVER_HOST:\u0026#34;\u0026#34;}port:${SW_DCS_SERVER_PORT:80}clusterName:${SW_DCS_CLUSTER_NAME:SkyWalking}period:${SW_DCS_PERIOD:20}Config Server Response uuid: To identify whether the config data changed, if uuid is the same, it is not required to respond to the config data.\nSingle Config Implement:\nrpc call (ConfigurationRequest) returns (ConfigurationResponse) { } e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The response configTable is:\nconfigTable { name: \u0026quot;agent-analyzer.default.slowDBAccessThreshold\u0026quot; value: \u0026quot;default:200,mongodb:50\u0026quot; } Group Config Implement:\nrpc callGroup (ConfigurationRequest) returns (GroupConfigurationResponse) {} Respond config data GroupConfigItems groupConfigTable\ne.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The response groupConfigTable is:\ngroupConfigTable { groupName: \u0026quot;core.default.endpoint-name-grouping-openapi\u0026quot; items { name: \u0026quot;customerAPI-v1\u0026quot; value: \u0026quot;value of customerAPI-v1\u0026quot; } items { name: \u0026quot;productAPI-v1\u0026quot; value: \u0026quot;value of productAPI-v1\u0026quot; } items { name: \u0026quot;productAPI-v2\u0026quot; value: \u0026quot;value of productAPI-v2\u0026quot; } } ","excerpt":"Dynamic Configuration Service, DCS Dynamic Configuration Service is a gRPC service which requires …","ref":"/docs/main/v9.7.0/en/setup/backend/dynamic-config-service/","title":"Dynamic Configuration Service, DCS"},{"body":"Dynamic Configuration Zookeeper Implementation Zookeeper is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:zookeeper}zookeeper:period:${SW_CONFIG_ZK_PERIOD:60}# Unit seconds, sync period. Default fetch every 60 seconds.namespace:${SW_CONFIG_ZK_NAMESPACE:/default}hostPort:${SW_CONFIG_ZK_HOST_PORT:localhost:2181}# Retry PolicybaseSleepTimeMs:${SW_CONFIG_ZK_BASE_SLEEP_TIME_MS:1000}# initial amount of time to wait between retriesmaxRetries:${SW_CONFIG_ZK_MAX_RETRIES:3}# max number of times to retryThe namespace is the ZooKeeper path. The config key and value are the properties of the namespace folder.\nConfig Storage Single Config znode.path = {namespace}/configKey configValue = znode.data e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If namespace = /default the config in zookeeper is:\nznode.path = /default/agent-analyzer.default.slowDBAccessThreshold znode.data = default:200,mongodb:50 Group Config znode.path = {namespace}/configKey znode.child1.path = {znode.path}/subItemkey1 znode.child2.path = {znode.path}/subItemkey2 ... subItemValue1 = znode.child1.data subItemValue2 = znode.child2.data ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If namespace = /default the config in zookeeper is:\nznode.path = /default/core.default.endpoint-name-grouping-openapi znode.customerAPI-v1.path = /default/core.default.endpoint-name-grouping-openapi/customerAPI-v1 znode.productAPI-v1.path = /default/core.default.endpoint-name-grouping-openapi/productAPI-v1 znode.productAPI-v2.path = /default/core.default.endpoint-name-grouping-openapi/productAPI-v2 znode.customerAPI-v1.data = value of customerAPI-v1 znode.productAPI-v1.data = value of productAPI-v1 znode.productAPI-v2.data = value of productAPI-v2 ","excerpt":"Dynamic Configuration Zookeeper Implementation Zookeeper is also supported as a Dynamic …","ref":"/docs/main/latest/en/setup/backend/dynamic-config-zookeeper/","title":"Dynamic Configuration Zookeeper Implementation"},{"body":"Dynamic Configuration Zookeeper Implementation Zookeeper is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:zookeeper}zookeeper:period:${SW_CONFIG_ZK_PERIOD:60}# Unit seconds, sync period. Default fetch every 60 seconds.namespace:${SW_CONFIG_ZK_NAMESPACE:/default}hostPort:${SW_CONFIG_ZK_HOST_PORT:localhost:2181}# Retry PolicybaseSleepTimeMs:${SW_CONFIG_ZK_BASE_SLEEP_TIME_MS:1000}# initial amount of time to wait between retriesmaxRetries:${SW_CONFIG_ZK_MAX_RETRIES:3}# max number of times to retryThe namespace is the ZooKeeper path. The config key and value are the properties of the namespace folder.\nConfig Storage Single Config znode.path = {namespace}/configKey configValue = znode.data e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If namespace = /default the config in zookeeper is:\nznode.path = /default/agent-analyzer.default.slowDBAccessThreshold znode.data = default:200,mongodb:50 Group Config znode.path = {namespace}/configKey znode.child1.path = {znode.path}/subItemkey1 znode.child2.path = {znode.path}/subItemkey2 ... subItemValue1 = znode.child1.data subItemValue2 = znode.child2.data ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If namespace = /default the config in zookeeper is:\nznode.path = /default/core.default.endpoint-name-grouping-openapi znode.customerAPI-v1.path = /default/core.default.endpoint-name-grouping-openapi/customerAPI-v1 znode.productAPI-v1.path = /default/core.default.endpoint-name-grouping-openapi/productAPI-v1 znode.productAPI-v2.path = /default/core.default.endpoint-name-grouping-openapi/productAPI-v2 znode.customerAPI-v1.data = value of customerAPI-v1 znode.productAPI-v1.data = value of productAPI-v1 znode.productAPI-v2.data = value of productAPI-v2 ","excerpt":"Dynamic Configuration Zookeeper Implementation Zookeeper is also supported as a Dynamic …","ref":"/docs/main/next/en/setup/backend/dynamic-config-zookeeper/","title":"Dynamic Configuration Zookeeper Implementation"},{"body":"Dynamic Configuration Zookeeper Implementation Zookeeper is also supported as Dynamic Configuration Center (DCC). To use it, please configure as follows:\nconfiguration:selector:${SW_CONFIGURATION:zookeeper}zookeeper:period:${SW_CONFIG_ZK_PERIOD:60}# Unit seconds, sync period. Default fetch every 60 seconds.namespace:${SW_CONFIG_ZK_NAMESPACE:/default}hostPort:${SW_CONFIG_ZK_HOST_PORT:localhost:2181}# Retry PolicybaseSleepTimeMs:${SW_CONFIG_ZK_BASE_SLEEP_TIME_MS:1000}# initial amount of time to wait between retriesmaxRetries:${SW_CONFIG_ZK_MAX_RETRIES:3}# max number of times to retryThe namespace is the ZooKeeper path. The config key and value are the properties of the namespace folder.\nConfig Storage Single Config znode.path = {namespace}/configKey configValue = znode.data e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If namespace = /default the config in zookeeper is:\nznode.path = /default/agent-analyzer.default.slowDBAccessThreshold znode.data = default:200,mongodb:50 Group Config znode.path = {namespace}/configKey znode.child1.path = {znode.path}/subItemkey1 znode.child2.path = {znode.path}/subItemkey2 ... subItemValue1 = znode.child1.data subItemValue2 = znode.child2.data ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If namespace = /default the config in zookeeper is:\nznode.path = /default/core.default.endpoint-name-grouping-openapi znode.customerAPI-v1.path = /default/core.default.endpoint-name-grouping-openapi/customerAPI-v1 znode.productAPI-v1.path = /default/core.default.endpoint-name-grouping-openapi/productAPI-v1 znode.productAPI-v2.path = /default/core.default.endpoint-name-grouping-openapi/productAPI-v2 znode.customerAPI-v1.data = value of customerAPI-v1 znode.productAPI-v1.data = value of productAPI-v1 znode.productAPI-v2.data = value of productAPI-v2 ","excerpt":"Dynamic Configuration Zookeeper Implementation Zookeeper is also supported as Dynamic Configuration …","ref":"/docs/main/v9.0.0/en/setup/backend/dynamic-config-zookeeper/","title":"Dynamic Configuration Zookeeper Implementation"},{"body":"Dynamic Configuration Zookeeper Implementation Zookeeper is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:zookeeper}zookeeper:period:${SW_CONFIG_ZK_PERIOD:60}# Unit seconds, sync period. Default fetch every 60 seconds.namespace:${SW_CONFIG_ZK_NAMESPACE:/default}hostPort:${SW_CONFIG_ZK_HOST_PORT:localhost:2181}# Retry PolicybaseSleepTimeMs:${SW_CONFIG_ZK_BASE_SLEEP_TIME_MS:1000}# initial amount of time to wait between retriesmaxRetries:${SW_CONFIG_ZK_MAX_RETRIES:3}# max number of times to retryThe namespace is the ZooKeeper path. The config key and value are the properties of the namespace folder.\nConfig Storage Single Config znode.path = {namespace}/configKey configValue = znode.data e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If namespace = /default the config in zookeeper is:\nznode.path = /default/agent-analyzer.default.slowDBAccessThreshold znode.data = default:200,mongodb:50 Group Config znode.path = {namespace}/configKey znode.child1.path = {znode.path}/subItemkey1 znode.child2.path = {znode.path}/subItemkey2 ... subItemValue1 = znode.child1.data subItemValue2 = znode.child2.data ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If namespace = /default the config in zookeeper is:\nznode.path = /default/core.default.endpoint-name-grouping-openapi znode.customerAPI-v1.path = /default/core.default.endpoint-name-grouping-openapi/customerAPI-v1 znode.productAPI-v1.path = /default/core.default.endpoint-name-grouping-openapi/productAPI-v1 znode.productAPI-v2.path = /default/core.default.endpoint-name-grouping-openapi/productAPI-v2 znode.customerAPI-v1.data = value of customerAPI-v1 znode.productAPI-v1.data = value of productAPI-v1 znode.productAPI-v2.data = value of productAPI-v2 ","excerpt":"Dynamic Configuration Zookeeper Implementation Zookeeper is also supported as a Dynamic …","ref":"/docs/main/v9.1.0/en/setup/backend/dynamic-config-zookeeper/","title":"Dynamic Configuration Zookeeper Implementation"},{"body":"Dynamic Configuration Zookeeper Implementation Zookeeper is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:zookeeper}zookeeper:period:${SW_CONFIG_ZK_PERIOD:60}# Unit seconds, sync period. Default fetch every 60 seconds.namespace:${SW_CONFIG_ZK_NAMESPACE:/default}hostPort:${SW_CONFIG_ZK_HOST_PORT:localhost:2181}# Retry PolicybaseSleepTimeMs:${SW_CONFIG_ZK_BASE_SLEEP_TIME_MS:1000}# initial amount of time to wait between retriesmaxRetries:${SW_CONFIG_ZK_MAX_RETRIES:3}# max number of times to retryThe namespace is the ZooKeeper path. The config key and value are the properties of the namespace folder.\nConfig Storage Single Config znode.path = {namespace}/configKey configValue = znode.data e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If namespace = /default the config in zookeeper is:\nznode.path = /default/agent-analyzer.default.slowDBAccessThreshold znode.data = default:200,mongodb:50 Group Config znode.path = {namespace}/configKey znode.child1.path = {znode.path}/subItemkey1 znode.child2.path = {znode.path}/subItemkey2 ... subItemValue1 = znode.child1.data subItemValue2 = znode.child2.data ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If namespace = /default the config in zookeeper is:\nznode.path = /default/core.default.endpoint-name-grouping-openapi znode.customerAPI-v1.path = /default/core.default.endpoint-name-grouping-openapi/customerAPI-v1 znode.productAPI-v1.path = /default/core.default.endpoint-name-grouping-openapi/productAPI-v1 znode.productAPI-v2.path = /default/core.default.endpoint-name-grouping-openapi/productAPI-v2 znode.customerAPI-v1.data = value of customerAPI-v1 znode.productAPI-v1.data = value of productAPI-v1 znode.productAPI-v2.data = value of productAPI-v2 ","excerpt":"Dynamic Configuration Zookeeper Implementation Zookeeper is also supported as a Dynamic …","ref":"/docs/main/v9.2.0/en/setup/backend/dynamic-config-zookeeper/","title":"Dynamic Configuration Zookeeper Implementation"},{"body":"Dynamic Configuration Zookeeper Implementation Zookeeper is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:zookeeper}zookeeper:period:${SW_CONFIG_ZK_PERIOD:60}# Unit seconds, sync period. Default fetch every 60 seconds.namespace:${SW_CONFIG_ZK_NAMESPACE:/default}hostPort:${SW_CONFIG_ZK_HOST_PORT:localhost:2181}# Retry PolicybaseSleepTimeMs:${SW_CONFIG_ZK_BASE_SLEEP_TIME_MS:1000}# initial amount of time to wait between retriesmaxRetries:${SW_CONFIG_ZK_MAX_RETRIES:3}# max number of times to retryThe namespace is the ZooKeeper path. The config key and value are the properties of the namespace folder.\nConfig Storage Single Config znode.path = {namespace}/configKey configValue = znode.data e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If namespace = /default the config in zookeeper is:\nznode.path = /default/agent-analyzer.default.slowDBAccessThreshold znode.data = default:200,mongodb:50 Group Config znode.path = {namespace}/configKey znode.child1.path = {znode.path}/subItemkey1 znode.child2.path = {znode.path}/subItemkey2 ... subItemValue1 = znode.child1.data subItemValue2 = znode.child2.data ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If namespace = /default the config in zookeeper is:\nznode.path = /default/core.default.endpoint-name-grouping-openapi znode.customerAPI-v1.path = /default/core.default.endpoint-name-grouping-openapi/customerAPI-v1 znode.productAPI-v1.path = /default/core.default.endpoint-name-grouping-openapi/productAPI-v1 znode.productAPI-v2.path = /default/core.default.endpoint-name-grouping-openapi/productAPI-v2 znode.customerAPI-v1.data = value of customerAPI-v1 znode.productAPI-v1.data = value of productAPI-v1 znode.productAPI-v2.data = value of productAPI-v2 ","excerpt":"Dynamic Configuration Zookeeper Implementation Zookeeper is also supported as a Dynamic …","ref":"/docs/main/v9.3.0/en/setup/backend/dynamic-config-zookeeper/","title":"Dynamic Configuration Zookeeper Implementation"},{"body":"Dynamic Configuration Zookeeper Implementation Zookeeper is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:zookeeper}zookeeper:period:${SW_CONFIG_ZK_PERIOD:60}# Unit seconds, sync period. Default fetch every 60 seconds.namespace:${SW_CONFIG_ZK_NAMESPACE:/default}hostPort:${SW_CONFIG_ZK_HOST_PORT:localhost:2181}# Retry PolicybaseSleepTimeMs:${SW_CONFIG_ZK_BASE_SLEEP_TIME_MS:1000}# initial amount of time to wait between retriesmaxRetries:${SW_CONFIG_ZK_MAX_RETRIES:3}# max number of times to retryThe namespace is the ZooKeeper path. The config key and value are the properties of the namespace folder.\nConfig Storage Single Config znode.path = {namespace}/configKey configValue = znode.data e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If namespace = /default the config in zookeeper is:\nznode.path = /default/agent-analyzer.default.slowDBAccessThreshold znode.data = default:200,mongodb:50 Group Config znode.path = {namespace}/configKey znode.child1.path = {znode.path}/subItemkey1 znode.child2.path = {znode.path}/subItemkey2 ... subItemValue1 = znode.child1.data subItemValue2 = znode.child2.data ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If namespace = /default the config in zookeeper is:\nznode.path = /default/core.default.endpoint-name-grouping-openapi znode.customerAPI-v1.path = /default/core.default.endpoint-name-grouping-openapi/customerAPI-v1 znode.productAPI-v1.path = /default/core.default.endpoint-name-grouping-openapi/productAPI-v1 znode.productAPI-v2.path = /default/core.default.endpoint-name-grouping-openapi/productAPI-v2 znode.customerAPI-v1.data = value of customerAPI-v1 znode.productAPI-v1.data = value of productAPI-v1 znode.productAPI-v2.data = value of productAPI-v2 ","excerpt":"Dynamic Configuration Zookeeper Implementation Zookeeper is also supported as a Dynamic …","ref":"/docs/main/v9.4.0/en/setup/backend/dynamic-config-zookeeper/","title":"Dynamic Configuration Zookeeper Implementation"},{"body":"Dynamic Configuration Zookeeper Implementation Zookeeper is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:zookeeper}zookeeper:period:${SW_CONFIG_ZK_PERIOD:60}# Unit seconds, sync period. Default fetch every 60 seconds.namespace:${SW_CONFIG_ZK_NAMESPACE:/default}hostPort:${SW_CONFIG_ZK_HOST_PORT:localhost:2181}# Retry PolicybaseSleepTimeMs:${SW_CONFIG_ZK_BASE_SLEEP_TIME_MS:1000}# initial amount of time to wait between retriesmaxRetries:${SW_CONFIG_ZK_MAX_RETRIES:3}# max number of times to retryThe namespace is the ZooKeeper path. The config key and value are the properties of the namespace folder.\nConfig Storage Single Config znode.path = {namespace}/configKey configValue = znode.data e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If namespace = /default the config in zookeeper is:\nznode.path = /default/agent-analyzer.default.slowDBAccessThreshold znode.data = default:200,mongodb:50 Group Config znode.path = {namespace}/configKey znode.child1.path = {znode.path}/subItemkey1 znode.child2.path = {znode.path}/subItemkey2 ... subItemValue1 = znode.child1.data subItemValue2 = znode.child2.data ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If namespace = /default the config in zookeeper is:\nznode.path = /default/core.default.endpoint-name-grouping-openapi znode.customerAPI-v1.path = /default/core.default.endpoint-name-grouping-openapi/customerAPI-v1 znode.productAPI-v1.path = /default/core.default.endpoint-name-grouping-openapi/productAPI-v1 znode.productAPI-v2.path = /default/core.default.endpoint-name-grouping-openapi/productAPI-v2 znode.customerAPI-v1.data = value of customerAPI-v1 znode.productAPI-v1.data = value of productAPI-v1 znode.productAPI-v2.data = value of productAPI-v2 ","excerpt":"Dynamic Configuration Zookeeper Implementation Zookeeper is also supported as a Dynamic …","ref":"/docs/main/v9.5.0/en/setup/backend/dynamic-config-zookeeper/","title":"Dynamic Configuration Zookeeper Implementation"},{"body":"Dynamic Configuration Zookeeper Implementation Zookeeper is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:zookeeper}zookeeper:period:${SW_CONFIG_ZK_PERIOD:60}# Unit seconds, sync period. Default fetch every 60 seconds.namespace:${SW_CONFIG_ZK_NAMESPACE:/default}hostPort:${SW_CONFIG_ZK_HOST_PORT:localhost:2181}# Retry PolicybaseSleepTimeMs:${SW_CONFIG_ZK_BASE_SLEEP_TIME_MS:1000}# initial amount of time to wait between retriesmaxRetries:${SW_CONFIG_ZK_MAX_RETRIES:3}# max number of times to retryThe namespace is the ZooKeeper path. The config key and value are the properties of the namespace folder.\nConfig Storage Single Config znode.path = {namespace}/configKey configValue = znode.data e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If namespace = /default the config in zookeeper is:\nznode.path = /default/agent-analyzer.default.slowDBAccessThreshold znode.data = default:200,mongodb:50 Group Config znode.path = {namespace}/configKey znode.child1.path = {znode.path}/subItemkey1 znode.child2.path = {znode.path}/subItemkey2 ... subItemValue1 = znode.child1.data subItemValue2 = znode.child2.data ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If namespace = /default the config in zookeeper is:\nznode.path = /default/core.default.endpoint-name-grouping-openapi znode.customerAPI-v1.path = /default/core.default.endpoint-name-grouping-openapi/customerAPI-v1 znode.productAPI-v1.path = /default/core.default.endpoint-name-grouping-openapi/productAPI-v1 znode.productAPI-v2.path = /default/core.default.endpoint-name-grouping-openapi/productAPI-v2 znode.customerAPI-v1.data = value of customerAPI-v1 znode.productAPI-v1.data = value of productAPI-v1 znode.productAPI-v2.data = value of productAPI-v2 ","excerpt":"Dynamic Configuration Zookeeper Implementation Zookeeper is also supported as a Dynamic …","ref":"/docs/main/v9.6.0/en/setup/backend/dynamic-config-zookeeper/","title":"Dynamic Configuration Zookeeper Implementation"},{"body":"Dynamic Configuration Zookeeper Implementation Zookeeper is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:zookeeper}zookeeper:period:${SW_CONFIG_ZK_PERIOD:60}# Unit seconds, sync period. Default fetch every 60 seconds.namespace:${SW_CONFIG_ZK_NAMESPACE:/default}hostPort:${SW_CONFIG_ZK_HOST_PORT:localhost:2181}# Retry PolicybaseSleepTimeMs:${SW_CONFIG_ZK_BASE_SLEEP_TIME_MS:1000}# initial amount of time to wait between retriesmaxRetries:${SW_CONFIG_ZK_MAX_RETRIES:3}# max number of times to retryThe namespace is the ZooKeeper path. The config key and value are the properties of the namespace folder.\nConfig Storage Single Config znode.path = {namespace}/configKey configValue = znode.data e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If namespace = /default the config in zookeeper is:\nznode.path = /default/agent-analyzer.default.slowDBAccessThreshold znode.data = default:200,mongodb:50 Group Config znode.path = {namespace}/configKey znode.child1.path = {znode.path}/subItemkey1 znode.child2.path = {znode.path}/subItemkey2 ... subItemValue1 = znode.child1.data subItemValue2 = znode.child2.data ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If namespace = /default the config in zookeeper is:\nznode.path = /default/core.default.endpoint-name-grouping-openapi znode.customerAPI-v1.path = /default/core.default.endpoint-name-grouping-openapi/customerAPI-v1 znode.productAPI-v1.path = /default/core.default.endpoint-name-grouping-openapi/productAPI-v1 znode.productAPI-v2.path = /default/core.default.endpoint-name-grouping-openapi/productAPI-v2 znode.customerAPI-v1.data = value of customerAPI-v1 znode.productAPI-v1.data = value of productAPI-v1 znode.productAPI-v2.data = value of productAPI-v2 ","excerpt":"Dynamic Configuration Zookeeper Implementation Zookeeper is also supported as a Dynamic …","ref":"/docs/main/v9.7.0/en/setup/backend/dynamic-config-zookeeper/","title":"Dynamic Configuration Zookeeper Implementation"},{"body":"Dynamical Logging The OAP server leverages log4j2 to manage the logging system. log4j2 supports changing the configuration at runtime, but you have to manually update the XML configuration file, which could be time-consuming and prone to man-made mistakes.\nDynamical logging, which depends on dynamic configuration, provides us with an agile way to update all OAP log4j configurations through a single operation.\nThe key of the configuration item is core.default.log4j-xml, and you can select any of the configuration implements to store the content of log4j.xml. In the booting phase, once the core module gets started, core.default.log4j-xml would come into the OAP log4j context.\nIf the configuration is changed after the OAP startup, you have to wait for a while for the changes to be applied. The default value is 60 seconds, which you could change through configuration.\u0026lt;configuration implement\u0026gt;.period in application.yaml.\nIf you remove core.default.log4j-xml from the configuration center or disable the configuration module, log4j.xml in the config directory would be affected.\n Caveat: The OAP only supports the XML configuration format.\n This is an example of configuring dynamical logging through a ConfigMap in a Kubernetes cluster. You may set up other configuration clusters following the same procedures.\napiVersion:v1data:core.default.log4j-xml:|-\u0026lt;Configuration status=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout charset=\u0026#34;UTF-8\u0026#34; pattern=\u0026#34;%d - %c - %L [%t] %-5p %x - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;logger name=\u0026#34;io.grpc.netty\u0026#34; level=\u0026#34;INFO\u0026#34;/\u0026gt; \u0026lt;logger name=\u0026#34;org.apache.skywalking.oap.server.configuration.api\u0026#34; level=\u0026#34;TRACE\u0026#34;/\u0026gt; \u0026lt;logger name=\u0026#34;org.apache.skywalking.oap.server.configuration.configmap\u0026#34; level=\u0026#34;DEBUG\u0026#34;/\u0026gt; \u0026lt;Root level=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Console\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;kind:ConfigMapmetadata:labels:app:collectorrelease:skywalkingname:skywalking-oapnamespace:default","excerpt":"Dynamical Logging The OAP server leverages log4j2 to manage the logging system. log4j2 supports …","ref":"/docs/main/latest/en/setup/backend/dynamical-logging/","title":"Dynamical Logging"},{"body":"Dynamical Logging The OAP server leverages log4j2 to manage the logging system. log4j2 supports changing the configuration at runtime, but you have to manually update the XML configuration file, which could be time-consuming and prone to man-made mistakes.\nDynamical logging, which depends on dynamic configuration, provides us with an agile way to update all OAP log4j configurations through a single operation.\nThe key of the configuration item is core.default.log4j-xml, and you can select any of the configuration implements to store the content of log4j.xml. In the booting phase, once the core module gets started, core.default.log4j-xml would come into the OAP log4j context.\nIf the configuration is changed after the OAP startup, you have to wait for a while for the changes to be applied. The default value is 60 seconds, which you could change through configuration.\u0026lt;configuration implement\u0026gt;.period in application.yaml.\nIf you remove core.default.log4j-xml from the configuration center or disable the configuration module, log4j.xml in the config directory would be affected.\n Caveat: The OAP only supports the XML configuration format.\n This is an example of configuring dynamical logging through a ConfigMap in a Kubernetes cluster. You may set up other configuration clusters following the same procedures.\napiVersion:v1data:core.default.log4j-xml:|-\u0026lt;Configuration status=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout charset=\u0026#34;UTF-8\u0026#34; pattern=\u0026#34;%d - %c - %L [%t] %-5p %x - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;logger name=\u0026#34;io.grpc.netty\u0026#34; level=\u0026#34;INFO\u0026#34;/\u0026gt; \u0026lt;logger name=\u0026#34;org.apache.skywalking.oap.server.configuration.api\u0026#34; level=\u0026#34;TRACE\u0026#34;/\u0026gt; \u0026lt;logger name=\u0026#34;org.apache.skywalking.oap.server.configuration.configmap\u0026#34; level=\u0026#34;DEBUG\u0026#34;/\u0026gt; \u0026lt;Root level=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Console\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;kind:ConfigMapmetadata:labels:app:collectorrelease:skywalkingname:skywalking-oapnamespace:default","excerpt":"Dynamical Logging The OAP server leverages log4j2 to manage the logging system. log4j2 supports …","ref":"/docs/main/next/en/setup/backend/dynamical-logging/","title":"Dynamical Logging"},{"body":"Dynamical Logging The OAP server leverages log4j2 to manage the logging system. log4j2 supports changing the configuration at runtime, but you have to update the XML configuration file manually, which could be time-consuming and prone to manmade mistakes.\nDynamical logging, which depends on dynamic configuration, provides us with an agile way to update all OAP log4j configurations through a single operation.\nThe key of the configuration item is core.default.log4j-xml, and you can select any of the configuration implements to store the content of log4j.xml. In the booting phase, once the core module gets started, core.default.log4j-xml would come into the OAP log4j context.\nIf the configuration is changed after the OAP has started, you have to wait for a while for the changes to be applied. The default value is 60 seconds, which you could change through configuration.\u0026lt;configuration implement\u0026gt;.peroid in application.yaml.\nIf you remove core.default.log4j-xml from the configuration center or disable the configuration module, log4j.xml in the config directory would be affected.\n Caveat: The OAP only supports the XML configuration format.\n This is an example on how to config dynamical logging through a ConfigMap in a Kubernetes cluster. You may set up other configuration clusters following the same procedures.\napiVersion:v1data:core.default.log4j-xml:|-\u0026lt;Configuration status=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout charset=\u0026#34;UTF-8\u0026#34; pattern=\u0026#34;%d - %c - %L [%t] %-5p %x - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;logger name=\u0026#34;io.grpc.netty\u0026#34; level=\u0026#34;INFO\u0026#34;/\u0026gt; \u0026lt;logger name=\u0026#34;org.apache.skywalking.oap.server.configuration.api\u0026#34; level=\u0026#34;TRACE\u0026#34;/\u0026gt; \u0026lt;logger name=\u0026#34;org.apache.skywalking.oap.server.configuration.configmap\u0026#34; level=\u0026#34;DEBUG\u0026#34;/\u0026gt; \u0026lt;Root level=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Console\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;kind:ConfigMapmetadata:labels:app:collectorrelease:skywalkingname:skywalking-oapnamespace:default","excerpt":"Dynamical Logging The OAP server leverages log4j2 to manage the logging system. log4j2 supports …","ref":"/docs/main/v9.0.0/en/setup/backend/dynamical-logging/","title":"Dynamical Logging"},{"body":"Dynamical Logging The OAP server leverages log4j2 to manage the logging system. log4j2 supports changing the configuration at runtime, but you have to manually update the XML configuration file, which could be time-consuming and prone to man-made mistakes.\nDynamical logging, which depends on dynamic configuration, provides us with an agile way to update all OAP log4j configurations through a single operation.\nThe key of the configuration item is core.default.log4j-xml, and you can select any of the configuration implements to store the content of log4j.xml. In the booting phase, once the core module gets started, core.default.log4j-xml would come into the OAP log4j context.\nIf the configuration is changed after the OAP startup, you have to wait for a while for the changes to be applied. The default value is 60 seconds, which you could change through configuration.\u0026lt;configuration implement\u0026gt;.peroid in application.yaml.\nIf you remove core.default.log4j-xml from the configuration center or disable the configuration module, log4j.xml in the config directory would be affected.\n Caveat: The OAP only supports the XML configuration format.\n This is an example of configuring dynamical logging through a ConfigMap in a Kubernetes cluster. You may set up other configuration clusters following the same procedures.\napiVersion:v1data:core.default.log4j-xml:|-\u0026lt;Configuration status=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout charset=\u0026#34;UTF-8\u0026#34; pattern=\u0026#34;%d - %c - %L [%t] %-5p %x - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;logger name=\u0026#34;io.grpc.netty\u0026#34; level=\u0026#34;INFO\u0026#34;/\u0026gt; \u0026lt;logger name=\u0026#34;org.apache.skywalking.oap.server.configuration.api\u0026#34; level=\u0026#34;TRACE\u0026#34;/\u0026gt; \u0026lt;logger name=\u0026#34;org.apache.skywalking.oap.server.configuration.configmap\u0026#34; level=\u0026#34;DEBUG\u0026#34;/\u0026gt; \u0026lt;Root level=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Console\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;kind:ConfigMapmetadata:labels:app:collectorrelease:skywalkingname:skywalking-oapnamespace:default","excerpt":"Dynamical Logging The OAP server leverages log4j2 to manage the logging system. log4j2 supports …","ref":"/docs/main/v9.1.0/en/setup/backend/dynamical-logging/","title":"Dynamical Logging"},{"body":"Dynamical Logging The OAP server leverages log4j2 to manage the logging system. log4j2 supports changing the configuration at runtime, but you have to manually update the XML configuration file, which could be time-consuming and prone to man-made mistakes.\nDynamical logging, which depends on dynamic configuration, provides us with an agile way to update all OAP log4j configurations through a single operation.\nThe key of the configuration item is core.default.log4j-xml, and you can select any of the configuration implements to store the content of log4j.xml. In the booting phase, once the core module gets started, core.default.log4j-xml would come into the OAP log4j context.\nIf the configuration is changed after the OAP startup, you have to wait for a while for the changes to be applied. The default value is 60 seconds, which you could change through configuration.\u0026lt;configuration implement\u0026gt;.period in application.yaml.\nIf you remove core.default.log4j-xml from the configuration center or disable the configuration module, log4j.xml in the config directory would be affected.\n Caveat: The OAP only supports the XML configuration format.\n This is an example of configuring dynamical logging through a ConfigMap in a Kubernetes cluster. You may set up other configuration clusters following the same procedures.\napiVersion:v1data:core.default.log4j-xml:|-\u0026lt;Configuration status=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout charset=\u0026#34;UTF-8\u0026#34; pattern=\u0026#34;%d - %c - %L [%t] %-5p %x - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;logger name=\u0026#34;io.grpc.netty\u0026#34; level=\u0026#34;INFO\u0026#34;/\u0026gt; \u0026lt;logger name=\u0026#34;org.apache.skywalking.oap.server.configuration.api\u0026#34; level=\u0026#34;TRACE\u0026#34;/\u0026gt; \u0026lt;logger name=\u0026#34;org.apache.skywalking.oap.server.configuration.configmap\u0026#34; level=\u0026#34;DEBUG\u0026#34;/\u0026gt; \u0026lt;Root level=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Console\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;kind:ConfigMapmetadata:labels:app:collectorrelease:skywalkingname:skywalking-oapnamespace:default","excerpt":"Dynamical Logging The OAP server leverages log4j2 to manage the logging system. log4j2 supports …","ref":"/docs/main/v9.2.0/en/setup/backend/dynamical-logging/","title":"Dynamical Logging"},{"body":"Dynamical Logging The OAP server leverages log4j2 to manage the logging system. log4j2 supports changing the configuration at runtime, but you have to manually update the XML configuration file, which could be time-consuming and prone to man-made mistakes.\nDynamical logging, which depends on dynamic configuration, provides us with an agile way to update all OAP log4j configurations through a single operation.\nThe key of the configuration item is core.default.log4j-xml, and you can select any of the configuration implements to store the content of log4j.xml. In the booting phase, once the core module gets started, core.default.log4j-xml would come into the OAP log4j context.\nIf the configuration is changed after the OAP startup, you have to wait for a while for the changes to be applied. The default value is 60 seconds, which you could change through configuration.\u0026lt;configuration implement\u0026gt;.period in application.yaml.\nIf you remove core.default.log4j-xml from the configuration center or disable the configuration module, log4j.xml in the config directory would be affected.\n Caveat: The OAP only supports the XML configuration format.\n This is an example of configuring dynamical logging through a ConfigMap in a Kubernetes cluster. You may set up other configuration clusters following the same procedures.\napiVersion:v1data:core.default.log4j-xml:|-\u0026lt;Configuration status=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout charset=\u0026#34;UTF-8\u0026#34; pattern=\u0026#34;%d - %c - %L [%t] %-5p %x - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;logger name=\u0026#34;io.grpc.netty\u0026#34; level=\u0026#34;INFO\u0026#34;/\u0026gt; \u0026lt;logger name=\u0026#34;org.apache.skywalking.oap.server.configuration.api\u0026#34; level=\u0026#34;TRACE\u0026#34;/\u0026gt; \u0026lt;logger name=\u0026#34;org.apache.skywalking.oap.server.configuration.configmap\u0026#34; level=\u0026#34;DEBUG\u0026#34;/\u0026gt; \u0026lt;Root level=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Console\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;kind:ConfigMapmetadata:labels:app:collectorrelease:skywalkingname:skywalking-oapnamespace:default","excerpt":"Dynamical Logging The OAP server leverages log4j2 to manage the logging system. log4j2 supports …","ref":"/docs/main/v9.3.0/en/setup/backend/dynamical-logging/","title":"Dynamical Logging"},{"body":"Dynamical Logging The OAP server leverages log4j2 to manage the logging system. log4j2 supports changing the configuration at runtime, but you have to manually update the XML configuration file, which could be time-consuming and prone to man-made mistakes.\nDynamical logging, which depends on dynamic configuration, provides us with an agile way to update all OAP log4j configurations through a single operation.\nThe key of the configuration item is core.default.log4j-xml, and you can select any of the configuration implements to store the content of log4j.xml. In the booting phase, once the core module gets started, core.default.log4j-xml would come into the OAP log4j context.\nIf the configuration is changed after the OAP startup, you have to wait for a while for the changes to be applied. The default value is 60 seconds, which you could change through configuration.\u0026lt;configuration implement\u0026gt;.period in application.yaml.\nIf you remove core.default.log4j-xml from the configuration center or disable the configuration module, log4j.xml in the config directory would be affected.\n Caveat: The OAP only supports the XML configuration format.\n This is an example of configuring dynamical logging through a ConfigMap in a Kubernetes cluster. You may set up other configuration clusters following the same procedures.\napiVersion:v1data:core.default.log4j-xml:|-\u0026lt;Configuration status=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout charset=\u0026#34;UTF-8\u0026#34; pattern=\u0026#34;%d - %c - %L [%t] %-5p %x - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;logger name=\u0026#34;io.grpc.netty\u0026#34; level=\u0026#34;INFO\u0026#34;/\u0026gt; \u0026lt;logger name=\u0026#34;org.apache.skywalking.oap.server.configuration.api\u0026#34; level=\u0026#34;TRACE\u0026#34;/\u0026gt; \u0026lt;logger name=\u0026#34;org.apache.skywalking.oap.server.configuration.configmap\u0026#34; level=\u0026#34;DEBUG\u0026#34;/\u0026gt; \u0026lt;Root level=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Console\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;kind:ConfigMapmetadata:labels:app:collectorrelease:skywalkingname:skywalking-oapnamespace:default","excerpt":"Dynamical Logging The OAP server leverages log4j2 to manage the logging system. log4j2 supports …","ref":"/docs/main/v9.4.0/en/setup/backend/dynamical-logging/","title":"Dynamical Logging"},{"body":"Dynamical Logging The OAP server leverages log4j2 to manage the logging system. log4j2 supports changing the configuration at runtime, but you have to manually update the XML configuration file, which could be time-consuming and prone to man-made mistakes.\nDynamical logging, which depends on dynamic configuration, provides us with an agile way to update all OAP log4j configurations through a single operation.\nThe key of the configuration item is core.default.log4j-xml, and you can select any of the configuration implements to store the content of log4j.xml. In the booting phase, once the core module gets started, core.default.log4j-xml would come into the OAP log4j context.\nIf the configuration is changed after the OAP startup, you have to wait for a while for the changes to be applied. The default value is 60 seconds, which you could change through configuration.\u0026lt;configuration implement\u0026gt;.period in application.yaml.\nIf you remove core.default.log4j-xml from the configuration center or disable the configuration module, log4j.xml in the config directory would be affected.\n Caveat: The OAP only supports the XML configuration format.\n This is an example of configuring dynamical logging through a ConfigMap in a Kubernetes cluster. You may set up other configuration clusters following the same procedures.\napiVersion:v1data:core.default.log4j-xml:|-\u0026lt;Configuration status=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout charset=\u0026#34;UTF-8\u0026#34; pattern=\u0026#34;%d - %c - %L [%t] %-5p %x - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;logger name=\u0026#34;io.grpc.netty\u0026#34; level=\u0026#34;INFO\u0026#34;/\u0026gt; \u0026lt;logger name=\u0026#34;org.apache.skywalking.oap.server.configuration.api\u0026#34; level=\u0026#34;TRACE\u0026#34;/\u0026gt; \u0026lt;logger name=\u0026#34;org.apache.skywalking.oap.server.configuration.configmap\u0026#34; level=\u0026#34;DEBUG\u0026#34;/\u0026gt; \u0026lt;Root level=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Console\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;kind:ConfigMapmetadata:labels:app:collectorrelease:skywalkingname:skywalking-oapnamespace:default","excerpt":"Dynamical Logging The OAP server leverages log4j2 to manage the logging system. log4j2 supports …","ref":"/docs/main/v9.5.0/en/setup/backend/dynamical-logging/","title":"Dynamical Logging"},{"body":"Dynamical Logging The OAP server leverages log4j2 to manage the logging system. log4j2 supports changing the configuration at runtime, but you have to manually update the XML configuration file, which could be time-consuming and prone to man-made mistakes.\nDynamical logging, which depends on dynamic configuration, provides us with an agile way to update all OAP log4j configurations through a single operation.\nThe key of the configuration item is core.default.log4j-xml, and you can select any of the configuration implements to store the content of log4j.xml. In the booting phase, once the core module gets started, core.default.log4j-xml would come into the OAP log4j context.\nIf the configuration is changed after the OAP startup, you have to wait for a while for the changes to be applied. The default value is 60 seconds, which you could change through configuration.\u0026lt;configuration implement\u0026gt;.period in application.yaml.\nIf you remove core.default.log4j-xml from the configuration center or disable the configuration module, log4j.xml in the config directory would be affected.\n Caveat: The OAP only supports the XML configuration format.\n This is an example of configuring dynamical logging through a ConfigMap in a Kubernetes cluster. You may set up other configuration clusters following the same procedures.\napiVersion:v1data:core.default.log4j-xml:|-\u0026lt;Configuration status=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout charset=\u0026#34;UTF-8\u0026#34; pattern=\u0026#34;%d - %c - %L [%t] %-5p %x - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;logger name=\u0026#34;io.grpc.netty\u0026#34; level=\u0026#34;INFO\u0026#34;/\u0026gt; \u0026lt;logger name=\u0026#34;org.apache.skywalking.oap.server.configuration.api\u0026#34; level=\u0026#34;TRACE\u0026#34;/\u0026gt; \u0026lt;logger name=\u0026#34;org.apache.skywalking.oap.server.configuration.configmap\u0026#34; level=\u0026#34;DEBUG\u0026#34;/\u0026gt; \u0026lt;Root level=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Console\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;kind:ConfigMapmetadata:labels:app:collectorrelease:skywalkingname:skywalking-oapnamespace:default","excerpt":"Dynamical Logging The OAP server leverages log4j2 to manage the logging system. log4j2 supports …","ref":"/docs/main/v9.6.0/en/setup/backend/dynamical-logging/","title":"Dynamical Logging"},{"body":"Dynamical Logging The OAP server leverages log4j2 to manage the logging system. log4j2 supports changing the configuration at runtime, but you have to manually update the XML configuration file, which could be time-consuming and prone to man-made mistakes.\nDynamical logging, which depends on dynamic configuration, provides us with an agile way to update all OAP log4j configurations through a single operation.\nThe key of the configuration item is core.default.log4j-xml, and you can select any of the configuration implements to store the content of log4j.xml. In the booting phase, once the core module gets started, core.default.log4j-xml would come into the OAP log4j context.\nIf the configuration is changed after the OAP startup, you have to wait for a while for the changes to be applied. The default value is 60 seconds, which you could change through configuration.\u0026lt;configuration implement\u0026gt;.period in application.yaml.\nIf you remove core.default.log4j-xml from the configuration center or disable the configuration module, log4j.xml in the config directory would be affected.\n Caveat: The OAP only supports the XML configuration format.\n This is an example of configuring dynamical logging through a ConfigMap in a Kubernetes cluster. You may set up other configuration clusters following the same procedures.\napiVersion:v1data:core.default.log4j-xml:|-\u0026lt;Configuration status=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout charset=\u0026#34;UTF-8\u0026#34; pattern=\u0026#34;%d - %c - %L [%t] %-5p %x - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;logger name=\u0026#34;io.grpc.netty\u0026#34; level=\u0026#34;INFO\u0026#34;/\u0026gt; \u0026lt;logger name=\u0026#34;org.apache.skywalking.oap.server.configuration.api\u0026#34; level=\u0026#34;TRACE\u0026#34;/\u0026gt; \u0026lt;logger name=\u0026#34;org.apache.skywalking.oap.server.configuration.configmap\u0026#34; level=\u0026#34;DEBUG\u0026#34;/\u0026gt; \u0026lt;Root level=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Console\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;kind:ConfigMapmetadata:labels:app:collectorrelease:skywalkingname:skywalking-oapnamespace:default","excerpt":"Dynamical Logging The OAP server leverages log4j2 to manage the logging system. log4j2 supports …","ref":"/docs/main/v9.7.0/en/setup/backend/dynamical-logging/","title":"Dynamical Logging"},{"body":"eBPF Profiling eBPF Profiling utilizes the eBPF technology to monitor applications without requiring any modifications to the application itself. Corresponds to Out-Process Profiling.\nTo use eBPF Profiling, the SkyWalking Rover application (eBPF Agent) needs to be installed on the host machine. When the agent receives a Profiling task, it starts the Profiling task for the specific application to analyze performance bottlenecks for the corresponding type of Profiling.\nLean more about the eBPF profiling in following blogs:\n Pinpoint Service Mesh Critical Performance Impact by using eBPF Diagnose Service Mesh Network Performance with eBPF  Active in the OAP OAP and the agent use a brand-new protocol to exchange eBPF Profiling data, so it is necessary to start OAP with the following configuration:\nreceiver-ebpf:selector:${SW_RECEIVER_EBPF:default}default:Profiling type eBPF Profiling leverages eBPF technology to provide support for the following types of tasks:\n On CPU Profiling: Periodically samples the thread stacks of the current program while it\u0026rsquo;s executing on the CPU using PERF_COUNT_SW_CPU_CLOCK. Off CPU Profiling: Collects and aggregates thread stacks when the program executes the kernel function finish_task_switch. Network Profiling: Collects the execution details of the application when performing network-related syscalls, and then aggregates them into a topology map and metrics for different network protocols.  On CPU Profiling On CPU Profiling periodically samples the thread stacks of the target program while it\u0026rsquo;s executing on the CPU and aggregates the thread stacks to create a flame graph. This helps users identify performance bottlenecks based on the flame graph information.\nCreating task When creating an On CPU Profiling task, you need to specify which eligible processes need to be sampled. The required configuration information is as follows:\n Service: The processes under which service entity need to perform Profiling tasks. Labels: Specifies which processes with certain labels under the service entity can perform profiling tasks. If left blank, all processes under the specified service will require profiling. Start Time: Whether the current task needs to be executed immediately or at a future point in time. Duration: The execution time of the current profiling task.  The eBPF agent would periodically request from the OAP whether there are any eligible tasks among all the processes collected by the current eBPF agent. When the eBPF agent receives a task, it would start the profiling task with the process.\nProfiling analyze Once the eBPF agent starts a profiling task for a specific process, it would periodically collect data and report it to the OAP. At this point, a scheduling of task is generated. The scheduling data contains the following information:\n Schedule ID: The ID of current schedule. Task: The task to which the current scheduling data belongs. Process: The process for which the current scheduling Profiling data is being collected. Start Time: The execution start time of the current schedule. End Time: The time when the last sampling of the current schedule was completed.  Once the schedule is created, we can use the existing scheduling ID and time range to query the CPU execution situation of the specified process within a specific time period. The query contains the following fields:\n Schedule ID: The schedule ID you want to query. Time: The start and end times you want to query.  After the query, the following data would be returned. With the data, it\u0026rsquo;s easy to generate a flame graph:\n Id: Element ID. Parent ID: Parent element ID. The dependency relationship between elements can be determined using the element ID and parent element ID. Symbol: The symbol name of the current element. Usually, it represents the method names of thread stacks in different languages. Stack Type: The type of thread stack where the current element is located. Supports KERNEL_SPACE and USER_SPACE, which represent user mode and kernel mode, respectively. Dump Count: The number of times the current element was sampled. The more samples of symbol, means the longer the method execution time.  Off CPU Profiling Off CPU Profiling can analyze the thread state when a thread switch occurs in the current process, thereby determining performance loss caused by blocked on I/O, locks, timers, paging/swapping, and other reasons. The execution flow between the eBPF agent and OAP in Off CPU Profiling is the same as in On CPU Profiling, but the data content being analyzed is different.\nCreate task The process of creating an Off CPU Profiling task is the same as creating an On CPU Profiling task, with the only difference being that the Profiling task type is changed to OFF CPU Profiling. For specific parameters, please refer to the previous section.\nProfiling analyze When the eBPF agent receives a Off CPU Profiling task, it would also collect data and generate a schedule. When analyzing data, unlike On CPU Profiling, Off CPU Profiling can generate different flame graphs based on the following two aggregation methods:\n By Time: Aggregate based on the time consumed by each method, allowing you to analyze which methods take longer. By Count: Aggregate based on the number of times a method switches to non-CPU execution, allowing you to analyze which methods cause more non-CPU executions for the task.  Network Profiling Network Profiling can analyze and monitor network requests related to process, and based on the data, generate topology diagrams, metrics, and other information. Furthermore, it can be integrated with existing Tracing systems to enhance the data content.\nCreate task Unlike On/Off CPU Profiling, Network Profiling requires specifying the instance entity information when creating a task. For example, in a Service Mesh, there may be multiple processes under a single instance(Pod), such as an application and Envoy. In network analysis, they usually work together, so analyzing them together can give you a better understanding of the network execution situation of the Pod. The following parameters are needed:\n Instance: The current Instance entity. Sampling: Sampling information for network requests.  Sampling represents how the current system samples raw data and combines it with the existing Tracing system, allowing you to see the complete network data corresponding to a Span in Tracing Span. Currently, it supports sampling Raw information for Spans using HTTP/1.x as RPC and parsing SkyWalking and Zipkin protocols. The sampling information configuration is as follows:\n URI Regex: Only collect requests that match the specified URI. If empty, all requests will be collected. Min Duration: Only sample data with a response time greater than or equal to the specified duration. If empty, all requests will be collected. When 4XX: Only sample data with a response status code between 400 and 500 (exclusive). When 5XX: Only sample data with a response status code between 500 and 600 (exclusive). Settings: When network data meets the above rules, how to collect the data.  Require Complete Request: Whether to collect request data. Max Request Size: The maximum data size for collecting requests. If empty, all data will be collected. Require Complete Response: Whether to collect response data. Max Response Size: The maximum data size for collecting responses. If empty, all data will be collected.    Profiling analysis After starting the task, the following data can be analyzed:\n Topology: Analyze the data flow and data types when the current instance interacts internally and externally. TCP Metrics: Network Layer-4 metrics between two process. HTTP/1.x Metrics: If there are HTTP/1.x requests between two nodes, the HTTP/1.x metrics would be analyzed based on the data content. HTTP Request: If two nodes use HTTP/1.x and include a tracing system, the tracing data would be extended with events.  Topology The topology can generate two types of data:\n Internal entities: The network call relationships between all processes within the current instance. Entities and external: The call relationships between processes inside the entity and external network nodes.  For external nodes, since eBPF can only collect remote IP and port information during data collection, OAP can use Kubernetes cluster information to recognize the corresponding Service or Pod names.\nBetween two nodes, data flow direction can be detected, and the following types of data protocols can be identified:\n HTTP: Two nodes communicate using HTTP/1.x or HTTP/2.x protocol. HTTPS: Two nodes communicate using HTTPS. TLS: Two nodes use encrypted data for transition, such as when using OpenSSL. TCP: There is TCP data transmission between two nodes.  TCP Metrics In the TCP metrics, each metric includes both client-side and server-side data. The metrics are as follows:\n   Name Unit Description     Write CPM Count Number of write requests initiated per minute   Write Total Bytes B Total data size written per minute   Write Avg Execute Time ns Average execution time for each write operation   Write RTT ns Round Trip Time (RTT)   Read CPM Count Number of read requests per minute   Read Total Bytes B Total data size read per minute   Read Avg Execute Time ns Average execution time for each read operation   Connect CPM Count Number of new connections established   Connect Execute Time ns Time taken to establish a connection   Close CPM Count Number of closed connections   Close Execute Time ns Time taken to close a connection   Retransmit CPM Count Number of data retransmissions per minute   Drop CPM Count Number of dropped packets per minute    HTTP/1.x Metrics If there is HTTP/1.x protocol communication between two nodes, the eBPF agent can recognize the request data and parse the following metric information:\n   Name Unit Description     Request CPM Count Number of requests received per minute   Response Status CPM Count Number of occurrences of each response status code per minute   Request Package Size B Average request package data size   Response Package Size B Average response package data size   Client Duration ns Time taken for the client to receive a response   Server Duration ns Time taken for the server to send a response    HTTP Request If two nodes communicate using the HTTP/1.x protocol, and they employ a distributed tracing system, then eBPf agent can collect raw data according to the sampling rules configured in the previous sections.\nSampling Raw Data When the sampling conditions are met, the original request or response data would be collected, including the following fields:\n Data Size: The data size of the current request/response content. Data Content: The raw data content. Non-plain format content would not be collected. Data Direction: The data transfer direction, either Ingress or Egress. Data Type: The data type, either Request or Response. Connection Role: The current node\u0026rsquo;s role as a client or server. Entity: The entity information of the current process. Time: The Request or response sent/received time.  Syscall Event When sampling rules are applied, the related Syscall invocations for the request or response would also be collected, including the following information:\n Method Name: System Syscall method names such as read, write, readv, writev, etc. Packet Size: The current TCP packet size. Packet Count: The number of sent or received packets. Network Interface Information: The network interface from which the packet was sent.  ","excerpt":"eBPF Profiling eBPF Profiling utilizes the eBPF technology to monitor applications without requiring …","ref":"/docs/main/latest/en/setup/backend/backend-ebpf-profiling/","title":"eBPF Profiling"},{"body":"eBPF Profiling eBPF Profiling utilizes the eBPF technology to monitor applications without requiring any modifications to the application itself. Corresponds to Out-Process Profiling.\nTo use eBPF Profiling, the SkyWalking Rover application (eBPF Agent) needs to be installed on the host machine. When the agent receives a Profiling task, it starts the Profiling task for the specific application to analyze performance bottlenecks for the corresponding type of Profiling.\nLean more about the eBPF profiling in following blogs:\n Pinpoint Service Mesh Critical Performance Impact by using eBPF Diagnose Service Mesh Network Performance with eBPF  Active in the OAP OAP and the agent use a brand-new protocol to exchange eBPF Profiling data, so it is necessary to start OAP with the following configuration:\nreceiver-ebpf:selector:${SW_RECEIVER_EBPF:default}default:Profiling type eBPF Profiling leverages eBPF technology to provide support for the following types of tasks:\n On CPU Profiling: Periodically samples the thread stacks of the current program while it\u0026rsquo;s executing on the CPU using PERF_COUNT_SW_CPU_CLOCK. Off CPU Profiling: Collects and aggregates thread stacks when the program executes the kernel function finish_task_switch. Network Profiling: Collects the execution details of the application when performing network-related syscalls, and then aggregates them into a topology map and metrics for different network protocols.  On CPU Profiling On CPU Profiling periodically samples the thread stacks of the target program while it\u0026rsquo;s executing on the CPU and aggregates the thread stacks to create a flame graph. This helps users identify performance bottlenecks based on the flame graph information.\nCreating task When creating an On CPU Profiling task, you need to specify which eligible processes need to be sampled. The required configuration information is as follows:\n Service: The processes under which service entity need to perform Profiling tasks. Labels: Specifies which processes with certain labels under the service entity can perform profiling tasks. If left blank, all processes under the specified service will require profiling. Start Time: Whether the current task needs to be executed immediately or at a future point in time. Duration: The execution time of the current profiling task.  The eBPF agent would periodically request from the OAP whether there are any eligible tasks among all the processes collected by the current eBPF agent. When the eBPF agent receives a task, it would start the profiling task with the process.\nProfiling analyze Once the eBPF agent starts a profiling task for a specific process, it would periodically collect data and report it to the OAP. At this point, a scheduling of task is generated. The scheduling data contains the following information:\n Schedule ID: The ID of current schedule. Task: The task to which the current scheduling data belongs. Process: The process for which the current scheduling Profiling data is being collected. Start Time: The execution start time of the current schedule. End Time: The time when the last sampling of the current schedule was completed.  Once the schedule is created, we can use the existing scheduling ID and time range to query the CPU execution situation of the specified process within a specific time period. The query contains the following fields:\n Schedule ID: The schedule ID you want to query. Time: The start and end times you want to query.  After the query, the following data would be returned. With the data, it\u0026rsquo;s easy to generate a flame graph:\n Id: Element ID. Parent ID: Parent element ID. The dependency relationship between elements can be determined using the element ID and parent element ID. Symbol: The symbol name of the current element. Usually, it represents the method names of thread stacks in different languages. Stack Type: The type of thread stack where the current element is located. Supports KERNEL_SPACE and USER_SPACE, which represent user mode and kernel mode, respectively. Dump Count: The number of times the current element was sampled. The more samples of symbol, means the longer the method execution time.  Off CPU Profiling Off CPU Profiling can analyze the thread state when a thread switch occurs in the current process, thereby determining performance loss caused by blocked on I/O, locks, timers, paging/swapping, and other reasons. The execution flow between the eBPF agent and OAP in Off CPU Profiling is the same as in On CPU Profiling, but the data content being analyzed is different.\nCreate task The process of creating an Off CPU Profiling task is the same as creating an On CPU Profiling task, with the only difference being that the Profiling task type is changed to OFF CPU Profiling. For specific parameters, please refer to the previous section.\nProfiling analyze When the eBPF agent receives a Off CPU Profiling task, it would also collect data and generate a schedule. When analyzing data, unlike On CPU Profiling, Off CPU Profiling can generate different flame graphs based on the following two aggregation methods:\n By Time: Aggregate based on the time consumed by each method, allowing you to analyze which methods take longer. By Count: Aggregate based on the number of times a method switches to non-CPU execution, allowing you to analyze which methods cause more non-CPU executions for the task.  Network Profiling Network Profiling can analyze and monitor network requests related to process, and based on the data, generate topology diagrams, metrics, and other information. Furthermore, it can be integrated with existing Tracing systems to enhance the data content.\nCreate task Unlike On/Off CPU Profiling, Network Profiling requires specifying the instance entity information when creating a task. For example, in a Service Mesh, there may be multiple processes under a single instance(Pod), such as an application and Envoy. In network analysis, they usually work together, so analyzing them together can give you a better understanding of the network execution situation of the Pod. The following parameters are needed:\n Instance: The current Instance entity. Sampling: Sampling information for network requests.  Sampling represents how the current system samples raw data and combines it with the existing Tracing system, allowing you to see the complete network data corresponding to a Span in Tracing Span. Currently, it supports sampling Raw information for Spans using HTTP/1.x as RPC and parsing SkyWalking and Zipkin protocols. The sampling information configuration is as follows:\n URI Regex: Only collect requests that match the specified URI. If empty, all requests will be collected. Min Duration: Only sample data with a response time greater than or equal to the specified duration. If empty, all requests will be collected. When 4XX: Only sample data with a response status code between 400 and 500 (exclusive). When 5XX: Only sample data with a response status code between 500 and 600 (exclusive). Settings: When network data meets the above rules, how to collect the data.  Require Complete Request: Whether to collect request data. Max Request Size: The maximum data size for collecting requests. If empty, all data will be collected. Require Complete Response: Whether to collect response data. Max Response Size: The maximum data size for collecting responses. If empty, all data will be collected.    Profiling analysis After starting the task, the following data can be analyzed:\n Topology: Analyze the data flow and data types when the current instance interacts internally and externally. TCP Metrics: Network Layer-4 metrics between two process. HTTP/1.x Metrics: If there are HTTP/1.x requests between two nodes, the HTTP/1.x metrics would be analyzed based on the data content. HTTP Request: If two nodes use HTTP/1.x and include a tracing system, the tracing data would be extended with events.  Topology The topology can generate two types of data:\n Internal entities: The network call relationships between all processes within the current instance. Entities and external: The call relationships between processes inside the entity and external network nodes.  For external nodes, since eBPF can only collect remote IP and port information during data collection, OAP can use Kubernetes cluster information to recognize the corresponding Service or Pod names.\nBetween two nodes, data flow direction can be detected, and the following types of data protocols can be identified:\n HTTP: Two nodes communicate using HTTP/1.x or HTTP/2.x protocol. HTTPS: Two nodes communicate using HTTPS. TLS: Two nodes use encrypted data for transition, such as when using OpenSSL. TCP: There is TCP data transmission between two nodes.  TCP Metrics In the TCP metrics, each metric includes both client-side and server-side data. The metrics are as follows:\n   Name Unit Description     Write CPM Count Number of write requests initiated per minute   Write Total Bytes B Total data size written per minute   Write Avg Execute Time ns Average execution time for each write operation   Write RTT ns Round Trip Time (RTT)   Read CPM Count Number of read requests per minute   Read Total Bytes B Total data size read per minute   Read Avg Execute Time ns Average execution time for each read operation   Connect CPM Count Number of new connections established   Connect Execute Time ns Time taken to establish a connection   Close CPM Count Number of closed connections   Close Execute Time ns Time taken to close a connection   Retransmit CPM Count Number of data retransmissions per minute   Drop CPM Count Number of dropped packets per minute    HTTP/1.x Metrics If there is HTTP/1.x protocol communication between two nodes, the eBPF agent can recognize the request data and parse the following metric information:\n   Name Unit Description     Request CPM Count Number of requests received per minute   Response Status CPM Count Number of occurrences of each response status code per minute   Request Package Size B Average request package data size   Response Package Size B Average response package data size   Client Duration ns Time taken for the client to receive a response   Server Duration ns Time taken for the server to send a response    HTTP Request If two nodes communicate using the HTTP/1.x protocol, and they employ a distributed tracing system, then eBPf agent can collect raw data according to the sampling rules configured in the previous sections.\nSampling Raw Data When the sampling conditions are met, the original request or response data would be collected, including the following fields:\n Data Size: The data size of the current request/response content. Data Content: The raw data content. Non-plain format content would not be collected. Data Direction: The data transfer direction, either Ingress or Egress. Data Type: The data type, either Request or Response. Connection Role: The current node\u0026rsquo;s role as a client or server. Entity: The entity information of the current process. Time: The Request or response sent/received time.  Syscall Event When sampling rules are applied, the related Syscall invocations for the request or response would also be collected, including the following information:\n Method Name: System Syscall method names such as read, write, readv, writev, etc. Packet Size: The current TCP packet size. Packet Count: The number of sent or received packets. Network Interface Information: The network interface from which the packet was sent.  ","excerpt":"eBPF Profiling eBPF Profiling utilizes the eBPF technology to monitor applications without requiring …","ref":"/docs/main/next/en/setup/backend/backend-ebpf-profiling/","title":"eBPF Profiling"},{"body":"eBPF Profiling eBPF Profiling utilizes the eBPF technology to monitor applications without requiring any modifications to the application itself. Corresponds to Out-Process Profiling.\nTo use eBPF Profiling, the SkyWalking Rover application (eBPF Agent) needs to be installed on the host machine. When the agent receives a Profiling task, it starts the Profiling task for the specific application to analyze performance bottlenecks for the corresponding type of Profiling.\nLean more about the eBPF profiling in following blogs:\n Pinpoint Service Mesh Critical Performance Impact by using eBPF Diagnose Service Mesh Network Performance with eBPF  Active in the OAP OAP and the agent use a brand-new protocol to exchange eBPF Profiling data, so it is necessary to start OAP with the following configuration:\nreceiver-ebpf:selector:${SW_RECEIVER_EBPF:default}default:Profiling type eBPF Profiling leverages eBPF technology to provide support for the following types of tasks:\n On CPU Profiling: Periodically samples the thread stacks of the current program while it\u0026rsquo;s executing on the CPU using PERF_COUNT_SW_CPU_CLOCK. Off CPU Profiling: Collects and aggregates thread stacks when the program executes the kernel function finish_task_switch. Network Profiling: Collects the execution details of the application when performing network-related syscalls, and then aggregates them into a topology map and metrics for different network protocols.  On CPU Profiling On CPU Profiling periodically samples the thread stacks of the target program while it\u0026rsquo;s executing on the CPU and aggregates the thread stacks to create a flame graph. This helps users identify performance bottlenecks based on the flame graph information.\nCreating task When creating an On CPU Profiling task, you need to specify which eligible processes need to be sampled. The required configuration information is as follows:\n Service: The processes under which service entity need to perform Profiling tasks. Labels: Specifies which processes with certain labels under the service entity can perform profiling tasks. If left blank, all processes under the specified service will require profiling. Start Time: Whether the current task needs to be executed immediately or at a future point in time. Duration: The execution time of the current profiling task.  The eBPF agent would periodically request from the OAP whether there are any eligible tasks among all the processes collected by the current eBPF agent. When the eBPF agent receives a task, it would start the profiling task with the process.\nProfiling analyze Once the eBPF agent starts a profiling task for a specific process, it would periodically collect data and report it to the OAP. At this point, a scheduling of task is generated. The scheduling data contains the following information:\n Schedule ID: The ID of current schedule. Task: The task to which the current scheduling data belongs. Process: The process for which the current scheduling Profiling data is being collected. Start Time: The execution start time of the current schedule. End Time: The time when the last sampling of the current schedule was completed.  Once the schedule is created, we can use the existing scheduling ID and time range to query the CPU execution situation of the specified process within a specific time period. The query contains the following fields:\n Schedule ID: The schedule ID you want to query. Time: The start and end times you want to query.  After the query, the following data would be returned. With the data, it\u0026rsquo;s easy to generate a flame graph:\n Id: Element ID. Parent ID: Parent element ID. The dependency relationship between elements can be determined using the element ID and parent element ID. Symbol: The symbol name of the current element. Usually, it represents the method names of thread stacks in different languages. Stack Type: The type of thread stack where the current element is located. Supports KERNEL_SPACE and USER_SPACE, which represent user mode and kernel mode, respectively. Dump Count: The number of times the current element was sampled. The more samples of symbol, means the longer the method execution time.  Off CPU Profiling Off CPU Profiling can analyze the thread state when a thread switch occurs in the current process, thereby determining performance loss caused by blocked on I/O, locks, timers, paging/swapping, and other reasons. The execution flow between the eBPF agent and OAP in Off CPU Profiling is the same as in On CPU Profiling, but the data content being analyzed is different.\nCreate task The process of creating an Off CPU Profiling task is the same as creating an On CPU Profiling task, with the only difference being that the Profiling task type is changed to OFF CPU Profiling. For specific parameters, please refer to the previous section.\nProfiling analyze When the eBPF agent receives a Off CPU Profiling task, it would also collect data and generate a schedule. When analyzing data, unlike On CPU Profiling, Off CPU Profiling can generate different flame graphs based on the following two aggregation methods:\n By Time: Aggregate based on the time consumed by each method, allowing you to analyze which methods take longer. By Count: Aggregate based on the number of times a method switches to non-CPU execution, allowing you to analyze which methods cause more non-CPU executions for the task.  Network Profiling Network Profiling can analyze and monitor network requests related to process, and based on the data, generate topology diagrams, metrics, and other information. Furthermore, it can be integrated with existing Tracing systems to enhance the data content.\nCreate task Unlike On/Off CPU Profiling, Network Profiling requires specifying the instance entity information when creating a task. For example, in a Service Mesh, there may be multiple processes under a single instance(Pod), such as an application and Envoy. In network analysis, they usually work together, so analyzing them together can give you a better understanding of the network execution situation of the Pod. The following parameters are needed:\n Instance: The current Instance entity. Sampling: Sampling information for network requests.  Sampling represents how the current system samples raw data and combines it with the existing Tracing system, allowing you to see the complete network data corresponding to a Span in Tracing Span. Currently, it supports sampling Raw information for Spans using HTTP/1.x as RPC and parsing SkyWalking and Zipkin protocols. The sampling information configuration is as follows:\n URI Regex: Only collect requests that match the specified URI. If empty, all requests will be collected. Min Duration: Only sample data with a response time greater than or equal to the specified duration. If empty, all requests will be collected. When 4XX: Only sample data with a response status code between 400 and 500 (exclusive). When 5XX: Only sample data with a response status code between 500 and 600 (exclusive). Settings: When network data meets the above rules, how to collect the data.  Require Complete Request: Whether to collect request data. Max Request Size: The maximum data size for collecting requests. If empty, all data will be collected. Require Complete Response: Whether to collect response data. Max Response Size: The maximum data size for collecting responses. If empty, all data will be collected.    Profiling analysis After starting the task, the following data can be analyzed:\n Topology: Analyze the data flow and data types when the current instance interacts internally and externally. TCP Metrics: Network Layer-4 metrics between two process. HTTP/1.x Metrics: If there are HTTP/1.x requests between two nodes, the HTTP/1.x metrics would be analyzed based on the data content. HTTP Request: If two nodes use HTTP/1.x and include a tracing system, the tracing data would be extended with events.  Topology The topology can generate two types of data:\n Internal entities: The network call relationships between all processes within the current instance. Entities and external: The call relationships between processes inside the entity and external network nodes.  For external nodes, since eBPF can only collect remote IP and port information during data collection, OAP can use Kubernetes cluster information to recognize the corresponding Service or Pod names.\nBetween two nodes, data flow direction can be detected, and the following types of data protocols can be identified:\n HTTP: Two nodes communicate using HTTP/1.x or HTTP/2.x protocol. HTTPS: Two nodes communicate using HTTPS. TLS: Two nodes use encrypted data for transition, such as when using OpenSSL. TCP: There is TCP data transmission between two nodes.  TCP Metrics In the TCP metrics, each metric includes both client-side and server-side data. The metrics are as follows:\n   Name Unit Description     Write CPM Count Number of write requests initiated per minute   Write Total Bytes B Total data size written per minute   Write Avg Execute Time ns Average execution time for each write operation   Write RTT ns Round Trip Time (RTT)   Read CPM Count Number of read requests per minute   Read Total Bytes B Total data size read per minute   Read Avg Execute Time ns Average execution time for each read operation   Connect CPM Count Number of new connections established   Connect Execute Time ns Time taken to establish a connection   Close CPM Count Number of closed connections   Close Execute Time ns Time taken to close a connection   Retransmit CPM Count Number of data retransmissions per minute   Drop CPM Count Number of dropped packets per minute    HTTP/1.x Metrics If there is HTTP/1.x protocol communication between two nodes, the eBPF agent can recognize the request data and parse the following metric information:\n   Name Unit Description     Request CPM Count Number of requests received per minute   Response Status CPM Count Number of occurrences of each response status code per minute   Request Package Size B Average request package data size   Response Package Size B Average response package data size   Client Duration ns Time taken for the client to receive a response   Server Duration ns Time taken for the server to send a response    HTTP Request If two nodes communicate using the HTTP/1.x protocol, and they employ a distributed tracing system, then eBPf agent can collect raw data according to the sampling rules configured in the previous sections.\nSampling Raw Data When the sampling conditions are met, the original request or response data would be collected, including the following fields:\n Data Size: The data size of the current request/response content. Data Content: The raw data content. Non-plain format content would not be collected. Data Direction: The data transfer direction, either Ingress or Egress. Data Type: The data type, either Request or Response. Connection Role: The current node\u0026rsquo;s role as a client or server. Entity: The entity information of the current process. Time: The Request or response sent/received time.  Syscall Event When sampling rules are applied, the related Syscall invocations for the request or response would also be collected, including the following information:\n Method Name: System Syscall method names such as read, write, readv, writev, etc. Packet Size: The current TCP packet size. Packet Count: The number of sent or received packets. Network Interface Information: The network interface from which the packet was sent.  ","excerpt":"eBPF Profiling eBPF Profiling utilizes the eBPF technology to monitor applications without requiring …","ref":"/docs/main/v9.5.0/en/setup/backend/backend-ebpf-profiling/","title":"eBPF Profiling"},{"body":"eBPF Profiling eBPF Profiling utilizes the eBPF technology to monitor applications without requiring any modifications to the application itself. Corresponds to Out-Process Profiling.\nTo use eBPF Profiling, the SkyWalking Rover application (eBPF Agent) needs to be installed on the host machine. When the agent receives a Profiling task, it starts the Profiling task for the specific application to analyze performance bottlenecks for the corresponding type of Profiling.\nLean more about the eBPF profiling in following blogs:\n Pinpoint Service Mesh Critical Performance Impact by using eBPF Diagnose Service Mesh Network Performance with eBPF  Active in the OAP OAP and the agent use a brand-new protocol to exchange eBPF Profiling data, so it is necessary to start OAP with the following configuration:\nreceiver-ebpf:selector:${SW_RECEIVER_EBPF:default}default:Profiling type eBPF Profiling leverages eBPF technology to provide support for the following types of tasks:\n On CPU Profiling: Periodically samples the thread stacks of the current program while it\u0026rsquo;s executing on the CPU using PERF_COUNT_SW_CPU_CLOCK. Off CPU Profiling: Collects and aggregates thread stacks when the program executes the kernel function finish_task_switch. Network Profiling: Collects the execution details of the application when performing network-related syscalls, and then aggregates them into a topology map and metrics for different network protocols.  On CPU Profiling On CPU Profiling periodically samples the thread stacks of the target program while it\u0026rsquo;s executing on the CPU and aggregates the thread stacks to create a flame graph. This helps users identify performance bottlenecks based on the flame graph information.\nCreating task When creating an On CPU Profiling task, you need to specify which eligible processes need to be sampled. The required configuration information is as follows:\n Service: The processes under which service entity need to perform Profiling tasks. Labels: Specifies which processes with certain labels under the service entity can perform profiling tasks. If left blank, all processes under the specified service will require profiling. Start Time: Whether the current task needs to be executed immediately or at a future point in time. Duration: The execution time of the current profiling task.  The eBPF agent would periodically request from the OAP whether there are any eligible tasks among all the processes collected by the current eBPF agent. When the eBPF agent receives a task, it would start the profiling task with the process.\nProfiling analyze Once the eBPF agent starts a profiling task for a specific process, it would periodically collect data and report it to the OAP. At this point, a scheduling of task is generated. The scheduling data contains the following information:\n Schedule ID: The ID of current schedule. Task: The task to which the current scheduling data belongs. Process: The process for which the current scheduling Profiling data is being collected. Start Time: The execution start time of the current schedule. End Time: The time when the last sampling of the current schedule was completed.  Once the schedule is created, we can use the existing scheduling ID and time range to query the CPU execution situation of the specified process within a specific time period. The query contains the following fields:\n Schedule ID: The schedule ID you want to query. Time: The start and end times you want to query.  After the query, the following data would be returned. With the data, it\u0026rsquo;s easy to generate a flame graph:\n Id: Element ID. Parent ID: Parent element ID. The dependency relationship between elements can be determined using the element ID and parent element ID. Symbol: The symbol name of the current element. Usually, it represents the method names of thread stacks in different languages. Stack Type: The type of thread stack where the current element is located. Supports KERNEL_SPACE and USER_SPACE, which represent user mode and kernel mode, respectively. Dump Count: The number of times the current element was sampled. The more samples of symbol, means the longer the method execution time.  Off CPU Profiling Off CPU Profiling can analyze the thread state when a thread switch occurs in the current process, thereby determining performance loss caused by blocked on I/O, locks, timers, paging/swapping, and other reasons. The execution flow between the eBPF agent and OAP in Off CPU Profiling is the same as in On CPU Profiling, but the data content being analyzed is different.\nCreate task The process of creating an Off CPU Profiling task is the same as creating an On CPU Profiling task, with the only difference being that the Profiling task type is changed to OFF CPU Profiling. For specific parameters, please refer to the previous section.\nProfiling analyze When the eBPF agent receives a Off CPU Profiling task, it would also collect data and generate a schedule. When analyzing data, unlike On CPU Profiling, Off CPU Profiling can generate different flame graphs based on the following two aggregation methods:\n By Time: Aggregate based on the time consumed by each method, allowing you to analyze which methods take longer. By Count: Aggregate based on the number of times a method switches to non-CPU execution, allowing you to analyze which methods cause more non-CPU executions for the task.  Network Profiling Network Profiling can analyze and monitor network requests related to process, and based on the data, generate topology diagrams, metrics, and other information. Furthermore, it can be integrated with existing Tracing systems to enhance the data content.\nCreate task Unlike On/Off CPU Profiling, Network Profiling requires specifying the instance entity information when creating a task. For example, in a Service Mesh, there may be multiple processes under a single instance(Pod), such as an application and Envoy. In network analysis, they usually work together, so analyzing them together can give you a better understanding of the network execution situation of the Pod. The following parameters are needed:\n Instance: The current Instance entity. Sampling: Sampling information for network requests.  Sampling represents how the current system samples raw data and combines it with the existing Tracing system, allowing you to see the complete network data corresponding to a Span in Tracing Span. Currently, it supports sampling Raw information for Spans using HTTP/1.x as RPC and parsing SkyWalking and Zipkin protocols. The sampling information configuration is as follows:\n URI Regex: Only collect requests that match the specified URI. If empty, all requests will be collected. Min Duration: Only sample data with a response time greater than or equal to the specified duration. If empty, all requests will be collected. When 4XX: Only sample data with a response status code between 400 and 500 (exclusive). When 5XX: Only sample data with a response status code between 500 and 600 (exclusive). Settings: When network data meets the above rules, how to collect the data.  Require Complete Request: Whether to collect request data. Max Request Size: The maximum data size for collecting requests. If empty, all data will be collected. Require Complete Response: Whether to collect response data. Max Response Size: The maximum data size for collecting responses. If empty, all data will be collected.    Profiling analysis After starting the task, the following data can be analyzed:\n Topology: Analyze the data flow and data types when the current instance interacts internally and externally. TCP Metrics: Network Layer-4 metrics between two process. HTTP/1.x Metrics: If there are HTTP/1.x requests between two nodes, the HTTP/1.x metrics would be analyzed based on the data content. HTTP Request: If two nodes use HTTP/1.x and include a tracing system, the tracing data would be extended with events.  Topology The topology can generate two types of data:\n Internal entities: The network call relationships between all processes within the current instance. Entities and external: The call relationships between processes inside the entity and external network nodes.  For external nodes, since eBPF can only collect remote IP and port information during data collection, OAP can use Kubernetes cluster information to recognize the corresponding Service or Pod names.\nBetween two nodes, data flow direction can be detected, and the following types of data protocols can be identified:\n HTTP: Two nodes communicate using HTTP/1.x or HTTP/2.x protocol. HTTPS: Two nodes communicate using HTTPS. TLS: Two nodes use encrypted data for transition, such as when using OpenSSL. TCP: There is TCP data transmission between two nodes.  TCP Metrics In the TCP metrics, each metric includes both client-side and server-side data. The metrics are as follows:\n   Name Unit Description     Write CPM Count Number of write requests initiated per minute   Write Total Bytes B Total data size written per minute   Write Avg Execute Time ns Average execution time for each write operation   Write RTT ns Round Trip Time (RTT)   Read CPM Count Number of read requests per minute   Read Total Bytes B Total data size read per minute   Read Avg Execute Time ns Average execution time for each read operation   Connect CPM Count Number of new connections established   Connect Execute Time ns Time taken to establish a connection   Close CPM Count Number of closed connections   Close Execute Time ns Time taken to close a connection   Retransmit CPM Count Number of data retransmissions per minute   Drop CPM Count Number of dropped packets per minute    HTTP/1.x Metrics If there is HTTP/1.x protocol communication between two nodes, the eBPF agent can recognize the request data and parse the following metric information:\n   Name Unit Description     Request CPM Count Number of requests received per minute   Response Status CPM Count Number of occurrences of each response status code per minute   Request Package Size B Average request package data size   Response Package Size B Average response package data size   Client Duration ns Time taken for the client to receive a response   Server Duration ns Time taken for the server to send a response    HTTP Request If two nodes communicate using the HTTP/1.x protocol, and they employ a distributed tracing system, then eBPf agent can collect raw data according to the sampling rules configured in the previous sections.\nSampling Raw Data When the sampling conditions are met, the original request or response data would be collected, including the following fields:\n Data Size: The data size of the current request/response content. Data Content: The raw data content. Non-plain format content would not be collected. Data Direction: The data transfer direction, either Ingress or Egress. Data Type: The data type, either Request or Response. Connection Role: The current node\u0026rsquo;s role as a client or server. Entity: The entity information of the current process. Time: The Request or response sent/received time.  Syscall Event When sampling rules are applied, the related Syscall invocations for the request or response would also be collected, including the following information:\n Method Name: System Syscall method names such as read, write, readv, writev, etc. Packet Size: The current TCP packet size. Packet Count: The number of sent or received packets. Network Interface Information: The network interface from which the packet was sent.  ","excerpt":"eBPF Profiling eBPF Profiling utilizes the eBPF technology to monitor applications without requiring …","ref":"/docs/main/v9.6.0/en/setup/backend/backend-ebpf-profiling/","title":"eBPF Profiling"},{"body":"eBPF Profiling eBPF Profiling utilizes the eBPF technology to monitor applications without requiring any modifications to the application itself. Corresponds to Out-Process Profiling.\nTo use eBPF Profiling, the SkyWalking Rover application (eBPF Agent) needs to be installed on the host machine. When the agent receives a Profiling task, it starts the Profiling task for the specific application to analyze performance bottlenecks for the corresponding type of Profiling.\nLean more about the eBPF profiling in following blogs:\n Pinpoint Service Mesh Critical Performance Impact by using eBPF Diagnose Service Mesh Network Performance with eBPF  Active in the OAP OAP and the agent use a brand-new protocol to exchange eBPF Profiling data, so it is necessary to start OAP with the following configuration:\nreceiver-ebpf:selector:${SW_RECEIVER_EBPF:default}default:Profiling type eBPF Profiling leverages eBPF technology to provide support for the following types of tasks:\n On CPU Profiling: Periodically samples the thread stacks of the current program while it\u0026rsquo;s executing on the CPU using PERF_COUNT_SW_CPU_CLOCK. Off CPU Profiling: Collects and aggregates thread stacks when the program executes the kernel function finish_task_switch. Network Profiling: Collects the execution details of the application when performing network-related syscalls, and then aggregates them into a topology map and metrics for different network protocols.  On CPU Profiling On CPU Profiling periodically samples the thread stacks of the target program while it\u0026rsquo;s executing on the CPU and aggregates the thread stacks to create a flame graph. This helps users identify performance bottlenecks based on the flame graph information.\nCreating task When creating an On CPU Profiling task, you need to specify which eligible processes need to be sampled. The required configuration information is as follows:\n Service: The processes under which service entity need to perform Profiling tasks. Labels: Specifies which processes with certain labels under the service entity can perform profiling tasks. If left blank, all processes under the specified service will require profiling. Start Time: Whether the current task needs to be executed immediately or at a future point in time. Duration: The execution time of the current profiling task.  The eBPF agent would periodically request from the OAP whether there are any eligible tasks among all the processes collected by the current eBPF agent. When the eBPF agent receives a task, it would start the profiling task with the process.\nProfiling analyze Once the eBPF agent starts a profiling task for a specific process, it would periodically collect data and report it to the OAP. At this point, a scheduling of task is generated. The scheduling data contains the following information:\n Schedule ID: The ID of current schedule. Task: The task to which the current scheduling data belongs. Process: The process for which the current scheduling Profiling data is being collected. Start Time: The execution start time of the current schedule. End Time: The time when the last sampling of the current schedule was completed.  Once the schedule is created, we can use the existing scheduling ID and time range to query the CPU execution situation of the specified process within a specific time period. The query contains the following fields:\n Schedule ID: The schedule ID you want to query. Time: The start and end times you want to query.  After the query, the following data would be returned. With the data, it\u0026rsquo;s easy to generate a flame graph:\n Id: Element ID. Parent ID: Parent element ID. The dependency relationship between elements can be determined using the element ID and parent element ID. Symbol: The symbol name of the current element. Usually, it represents the method names of thread stacks in different languages. Stack Type: The type of thread stack where the current element is located. Supports KERNEL_SPACE and USER_SPACE, which represent user mode and kernel mode, respectively. Dump Count: The number of times the current element was sampled. The more samples of symbol, means the longer the method execution time.  Off CPU Profiling Off CPU Profiling can analyze the thread state when a thread switch occurs in the current process, thereby determining performance loss caused by blocked on I/O, locks, timers, paging/swapping, and other reasons. The execution flow between the eBPF agent and OAP in Off CPU Profiling is the same as in On CPU Profiling, but the data content being analyzed is different.\nCreate task The process of creating an Off CPU Profiling task is the same as creating an On CPU Profiling task, with the only difference being that the Profiling task type is changed to OFF CPU Profiling. For specific parameters, please refer to the previous section.\nProfiling analyze When the eBPF agent receives a Off CPU Profiling task, it would also collect data and generate a schedule. When analyzing data, unlike On CPU Profiling, Off CPU Profiling can generate different flame graphs based on the following two aggregation methods:\n By Time: Aggregate based on the time consumed by each method, allowing you to analyze which methods take longer. By Count: Aggregate based on the number of times a method switches to non-CPU execution, allowing you to analyze which methods cause more non-CPU executions for the task.  Network Profiling Network Profiling can analyze and monitor network requests related to process, and based on the data, generate topology diagrams, metrics, and other information. Furthermore, it can be integrated with existing Tracing systems to enhance the data content.\nCreate task Unlike On/Off CPU Profiling, Network Profiling requires specifying the instance entity information when creating a task. For example, in a Service Mesh, there may be multiple processes under a single instance(Pod), such as an application and Envoy. In network analysis, they usually work together, so analyzing them together can give you a better understanding of the network execution situation of the Pod. The following parameters are needed:\n Instance: The current Instance entity. Sampling: Sampling information for network requests.  Sampling represents how the current system samples raw data and combines it with the existing Tracing system, allowing you to see the complete network data corresponding to a Span in Tracing Span. Currently, it supports sampling Raw information for Spans using HTTP/1.x as RPC and parsing SkyWalking and Zipkin protocols. The sampling information configuration is as follows:\n URI Regex: Only collect requests that match the specified URI. If empty, all requests will be collected. Min Duration: Only sample data with a response time greater than or equal to the specified duration. If empty, all requests will be collected. When 4XX: Only sample data with a response status code between 400 and 500 (exclusive). When 5XX: Only sample data with a response status code between 500 and 600 (exclusive). Settings: When network data meets the above rules, how to collect the data.  Require Complete Request: Whether to collect request data. Max Request Size: The maximum data size for collecting requests. If empty, all data will be collected. Require Complete Response: Whether to collect response data. Max Response Size: The maximum data size for collecting responses. If empty, all data will be collected.    Profiling analysis After starting the task, the following data can be analyzed:\n Topology: Analyze the data flow and data types when the current instance interacts internally and externally. TCP Metrics: Network Layer-4 metrics between two process. HTTP/1.x Metrics: If there are HTTP/1.x requests between two nodes, the HTTP/1.x metrics would be analyzed based on the data content. HTTP Request: If two nodes use HTTP/1.x and include a tracing system, the tracing data would be extended with events.  Topology The topology can generate two types of data:\n Internal entities: The network call relationships between all processes within the current instance. Entities and external: The call relationships between processes inside the entity and external network nodes.  For external nodes, since eBPF can only collect remote IP and port information during data collection, OAP can use Kubernetes cluster information to recognize the corresponding Service or Pod names.\nBetween two nodes, data flow direction can be detected, and the following types of data protocols can be identified:\n HTTP: Two nodes communicate using HTTP/1.x or HTTP/2.x protocol. HTTPS: Two nodes communicate using HTTPS. TLS: Two nodes use encrypted data for transition, such as when using OpenSSL. TCP: There is TCP data transmission between two nodes.  TCP Metrics In the TCP metrics, each metric includes both client-side and server-side data. The metrics are as follows:\n   Name Unit Description     Write CPM Count Number of write requests initiated per minute   Write Total Bytes B Total data size written per minute   Write Avg Execute Time ns Average execution time for each write operation   Write RTT ns Round Trip Time (RTT)   Read CPM Count Number of read requests per minute   Read Total Bytes B Total data size read per minute   Read Avg Execute Time ns Average execution time for each read operation   Connect CPM Count Number of new connections established   Connect Execute Time ns Time taken to establish a connection   Close CPM Count Number of closed connections   Close Execute Time ns Time taken to close a connection   Retransmit CPM Count Number of data retransmissions per minute   Drop CPM Count Number of dropped packets per minute    HTTP/1.x Metrics If there is HTTP/1.x protocol communication between two nodes, the eBPF agent can recognize the request data and parse the following metric information:\n   Name Unit Description     Request CPM Count Number of requests received per minute   Response Status CPM Count Number of occurrences of each response status code per minute   Request Package Size B Average request package data size   Response Package Size B Average response package data size   Client Duration ns Time taken for the client to receive a response   Server Duration ns Time taken for the server to send a response    HTTP Request If two nodes communicate using the HTTP/1.x protocol, and they employ a distributed tracing system, then eBPf agent can collect raw data according to the sampling rules configured in the previous sections.\nSampling Raw Data When the sampling conditions are met, the original request or response data would be collected, including the following fields:\n Data Size: The data size of the current request/response content. Data Content: The raw data content. Non-plain format content would not be collected. Data Direction: The data transfer direction, either Ingress or Egress. Data Type: The data type, either Request or Response. Connection Role: The current node\u0026rsquo;s role as a client or server. Entity: The entity information of the current process. Time: The Request or response sent/received time.  Syscall Event When sampling rules are applied, the related Syscall invocations for the request or response would also be collected, including the following information:\n Method Name: System Syscall method names such as read, write, readv, writev, etc. Packet Size: The current TCP packet size. Packet Count: The number of sent or received packets. Network Interface Information: The network interface from which the packet was sent.  ","excerpt":"eBPF Profiling eBPF Profiling utilizes the eBPF technology to monitor applications without requiring …","ref":"/docs/main/v9.7.0/en/setup/backend/backend-ebpf-profiling/","title":"eBPF Profiling"},{"body":"ElasticSearch Some new users may encounter the following issues:\n The performance of ElasticSearch is not as good as expected. For instance, the latest data cannot be accessed after some time.  Or\n ERROR CODE 429.   Suppressed: org.elasticsearch.client.ResponseException: method [POST], host [http://127.0.0.1:9200], URI [/service_instance_inventory/type/6_tcc-app-gateway-77b98ff6ff-crblx.cards_0_0/_update?refresh=true\u0026amp;timeout=1m], status line [HTTP/1.1 429 Too Many Requests] {\u0026quot;error\u0026quot;:{\u0026quot;root_cause\u0026quot;:[{\u0026quot;type\u0026quot;:\u0026quot;remote_transport_exception\u0026quot;,\u0026quot;reason\u0026quot;:\u0026quot;[elasticsearch-0][10.16.9.130:9300][indices:data/write/update[s]]\u0026quot;}],\u0026quot;type\u0026quot;:\u0026quot;es_rejected_execution_exception\u0026quot;,\u0026quot;reason\u0026quot;:\u0026quot;rejected execution of org.elasticsearch.transport.TransportService$7@19a5cf02 on EsThreadPoolExecutor[name = elasticsearch-0/write, queue capacity = 200, org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor@389297ad[Running, pool size = 2, active threads = 2, queued tasks = 200, completed tasks = 147611]]\u0026quot;},\u0026quot;status\u0026quot;:429} at org.elasticsearch.client.RestClient$SyncResponseListener.get(RestClient.java:705) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestClient.performRequest(RestClient.java:235) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestClient.performRequest(RestClient.java:198) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestHighLevelClient.performRequest(RestHighLevelClient.java:522) ~[elasticsearch You could add the following config to elasticsearch.yml, and set the value based on your environment variable.\n# In the case of tracing, consider setting a value higher than this.thread_pool.index.queue_size:1000thread_pool.write.queue_size:1000# When you face query error at trace page, remember to check this.index.max_result_window:1000000For more information, see ElasticSearch\u0026rsquo;s official documentation.\n","excerpt":"ElasticSearch Some new users may encounter the following issues:\n The performance of ElasticSearch …","ref":"/docs/main/latest/en/faq/es-server-faq/","title":"ElasticSearch"},{"body":"ElasticSearch Some new users may encounter the following issues:\n The performance of ElasticSearch is not as good as expected. For instance, the latest data cannot be accessed after some time.  Or\n ERROR CODE 429.   Suppressed: org.elasticsearch.client.ResponseException: method [POST], host [http://127.0.0.1:9200], URI [/service_instance_inventory/type/6_tcc-app-gateway-77b98ff6ff-crblx.cards_0_0/_update?refresh=true\u0026amp;timeout=1m], status line [HTTP/1.1 429 Too Many Requests] {\u0026quot;error\u0026quot;:{\u0026quot;root_cause\u0026quot;:[{\u0026quot;type\u0026quot;:\u0026quot;remote_transport_exception\u0026quot;,\u0026quot;reason\u0026quot;:\u0026quot;[elasticsearch-0][10.16.9.130:9300][indices:data/write/update[s]]\u0026quot;}],\u0026quot;type\u0026quot;:\u0026quot;es_rejected_execution_exception\u0026quot;,\u0026quot;reason\u0026quot;:\u0026quot;rejected execution of org.elasticsearch.transport.TransportService$7@19a5cf02 on EsThreadPoolExecutor[name = elasticsearch-0/write, queue capacity = 200, org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor@389297ad[Running, pool size = 2, active threads = 2, queued tasks = 200, completed tasks = 147611]]\u0026quot;},\u0026quot;status\u0026quot;:429} at org.elasticsearch.client.RestClient$SyncResponseListener.get(RestClient.java:705) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestClient.performRequest(RestClient.java:235) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestClient.performRequest(RestClient.java:198) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestHighLevelClient.performRequest(RestHighLevelClient.java:522) ~[elasticsearch You could add the following config to elasticsearch.yml, and set the value based on your environment variable.\n# In the case of tracing, consider setting a value higher than this.thread_pool.index.queue_size:1000thread_pool.write.queue_size:1000# When you face query error at trace page, remember to check this.index.max_result_window:1000000For more information, see ElasticSearch\u0026rsquo;s official documentation.\n","excerpt":"ElasticSearch Some new users may encounter the following issues:\n The performance of ElasticSearch …","ref":"/docs/main/next/en/faq/es-server-faq/","title":"ElasticSearch"},{"body":"ElasticSearch Some new users may encounter the following issues:\n The performance of ElasticSearch is not as good as expected. For instance, the latest data cannot be accessed after some time.  Or\n ERROR CODE 429.   Suppressed: org.elasticsearch.client.ResponseException: method [POST], host [http://127.0.0.1:9200], URI [/service_instance_inventory/type/6_tcc-app-gateway-77b98ff6ff-crblx.cards_0_0/_update?refresh=true\u0026amp;timeout=1m], status line [HTTP/1.1 429 Too Many Requests] {\u0026quot;error\u0026quot;:{\u0026quot;root_cause\u0026quot;:[{\u0026quot;type\u0026quot;:\u0026quot;remote_transport_exception\u0026quot;,\u0026quot;reason\u0026quot;:\u0026quot;[elasticsearch-0][10.16.9.130:9300][indices:data/write/update[s]]\u0026quot;}],\u0026quot;type\u0026quot;:\u0026quot;es_rejected_execution_exception\u0026quot;,\u0026quot;reason\u0026quot;:\u0026quot;rejected execution of org.elasticsearch.transport.TransportService$7@19a5cf02 on EsThreadPoolExecutor[name = elasticsearch-0/write, queue capacity = 200, org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor@389297ad[Running, pool size = 2, active threads = 2, queued tasks = 200, completed tasks = 147611]]\u0026quot;},\u0026quot;status\u0026quot;:429} at org.elasticsearch.client.RestClient$SyncResponseListener.get(RestClient.java:705) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestClient.performRequest(RestClient.java:235) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestClient.performRequest(RestClient.java:198) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestHighLevelClient.performRequest(RestHighLevelClient.java:522) ~[elasticsearch You could add the following config to elasticsearch.yml, and set the value based on your environment variable.\n# In the case of tracing, consider setting a value higher than this.thread_pool.index.queue_size:1000thread_pool.write.queue_size:1000# When you face query error at trace page, remember to check this.index.max_result_window:1000000For more information, see ElasticSearch\u0026rsquo;s official documentation.\n","excerpt":"ElasticSearch Some new users may encounter the following issues:\n The performance of ElasticSearch …","ref":"/docs/main/v9.0.0/en/faq/es-server-faq/","title":"ElasticSearch"},{"body":"ElasticSearch Some new users may encounter the following issues:\n The performance of ElasticSearch is not as good as expected. For instance, the latest data cannot be accessed after some time.  Or\n ERROR CODE 429.   Suppressed: org.elasticsearch.client.ResponseException: method [POST], host [http://127.0.0.1:9200], URI [/service_instance_inventory/type/6_tcc-app-gateway-77b98ff6ff-crblx.cards_0_0/_update?refresh=true\u0026amp;timeout=1m], status line [HTTP/1.1 429 Too Many Requests] {\u0026quot;error\u0026quot;:{\u0026quot;root_cause\u0026quot;:[{\u0026quot;type\u0026quot;:\u0026quot;remote_transport_exception\u0026quot;,\u0026quot;reason\u0026quot;:\u0026quot;[elasticsearch-0][10.16.9.130:9300][indices:data/write/update[s]]\u0026quot;}],\u0026quot;type\u0026quot;:\u0026quot;es_rejected_execution_exception\u0026quot;,\u0026quot;reason\u0026quot;:\u0026quot;rejected execution of org.elasticsearch.transport.TransportService$7@19a5cf02 on EsThreadPoolExecutor[name = elasticsearch-0/write, queue capacity = 200, org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor@389297ad[Running, pool size = 2, active threads = 2, queued tasks = 200, completed tasks = 147611]]\u0026quot;},\u0026quot;status\u0026quot;:429} at org.elasticsearch.client.RestClient$SyncResponseListener.get(RestClient.java:705) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestClient.performRequest(RestClient.java:235) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestClient.performRequest(RestClient.java:198) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestHighLevelClient.performRequest(RestHighLevelClient.java:522) ~[elasticsearch You could add the following config to elasticsearch.yml, and set the value based on your environment variable.\n# In the case of tracing, consider setting a value higher than this.thread_pool.index.queue_size:1000thread_pool.write.queue_size:1000# When you face query error at trace page, remember to check this.index.max_result_window:1000000For more information, see ElasticSearch\u0026rsquo;s official documentation.\n","excerpt":"ElasticSearch Some new users may encounter the following issues:\n The performance of ElasticSearch …","ref":"/docs/main/v9.1.0/en/faq/es-server-faq/","title":"ElasticSearch"},{"body":"ElasticSearch Some new users may encounter the following issues:\n The performance of ElasticSearch is not as good as expected. For instance, the latest data cannot be accessed after some time.  Or\n ERROR CODE 429.   Suppressed: org.elasticsearch.client.ResponseException: method [POST], host [http://127.0.0.1:9200], URI [/service_instance_inventory/type/6_tcc-app-gateway-77b98ff6ff-crblx.cards_0_0/_update?refresh=true\u0026amp;timeout=1m], status line [HTTP/1.1 429 Too Many Requests] {\u0026quot;error\u0026quot;:{\u0026quot;root_cause\u0026quot;:[{\u0026quot;type\u0026quot;:\u0026quot;remote_transport_exception\u0026quot;,\u0026quot;reason\u0026quot;:\u0026quot;[elasticsearch-0][10.16.9.130:9300][indices:data/write/update[s]]\u0026quot;}],\u0026quot;type\u0026quot;:\u0026quot;es_rejected_execution_exception\u0026quot;,\u0026quot;reason\u0026quot;:\u0026quot;rejected execution of org.elasticsearch.transport.TransportService$7@19a5cf02 on EsThreadPoolExecutor[name = elasticsearch-0/write, queue capacity = 200, org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor@389297ad[Running, pool size = 2, active threads = 2, queued tasks = 200, completed tasks = 147611]]\u0026quot;},\u0026quot;status\u0026quot;:429} at org.elasticsearch.client.RestClient$SyncResponseListener.get(RestClient.java:705) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestClient.performRequest(RestClient.java:235) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestClient.performRequest(RestClient.java:198) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestHighLevelClient.performRequest(RestHighLevelClient.java:522) ~[elasticsearch You could add the following config to elasticsearch.yml, and set the value based on your environment variable.\n# In the case of tracing, consider setting a value higher than this.thread_pool.index.queue_size:1000thread_pool.write.queue_size:1000# When you face query error at trace page, remember to check this.index.max_result_window:1000000For more information, see ElasticSearch\u0026rsquo;s official documentation.\n","excerpt":"ElasticSearch Some new users may encounter the following issues:\n The performance of ElasticSearch …","ref":"/docs/main/v9.2.0/en/faq/es-server-faq/","title":"ElasticSearch"},{"body":"ElasticSearch Some new users may encounter the following issues:\n The performance of ElasticSearch is not as good as expected. For instance, the latest data cannot be accessed after some time.  Or\n ERROR CODE 429.   Suppressed: org.elasticsearch.client.ResponseException: method [POST], host [http://127.0.0.1:9200], URI [/service_instance_inventory/type/6_tcc-app-gateway-77b98ff6ff-crblx.cards_0_0/_update?refresh=true\u0026amp;timeout=1m], status line [HTTP/1.1 429 Too Many Requests] {\u0026quot;error\u0026quot;:{\u0026quot;root_cause\u0026quot;:[{\u0026quot;type\u0026quot;:\u0026quot;remote_transport_exception\u0026quot;,\u0026quot;reason\u0026quot;:\u0026quot;[elasticsearch-0][10.16.9.130:9300][indices:data/write/update[s]]\u0026quot;}],\u0026quot;type\u0026quot;:\u0026quot;es_rejected_execution_exception\u0026quot;,\u0026quot;reason\u0026quot;:\u0026quot;rejected execution of org.elasticsearch.transport.TransportService$7@19a5cf02 on EsThreadPoolExecutor[name = elasticsearch-0/write, queue capacity = 200, org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor@389297ad[Running, pool size = 2, active threads = 2, queued tasks = 200, completed tasks = 147611]]\u0026quot;},\u0026quot;status\u0026quot;:429} at org.elasticsearch.client.RestClient$SyncResponseListener.get(RestClient.java:705) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestClient.performRequest(RestClient.java:235) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestClient.performRequest(RestClient.java:198) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestHighLevelClient.performRequest(RestHighLevelClient.java:522) ~[elasticsearch You could add the following config to elasticsearch.yml, and set the value based on your environment variable.\n# In the case of tracing, consider setting a value higher than this.thread_pool.index.queue_size:1000thread_pool.write.queue_size:1000# When you face query error at trace page, remember to check this.index.max_result_window:1000000For more information, see ElasticSearch\u0026rsquo;s official documentation.\n","excerpt":"ElasticSearch Some new users may encounter the following issues:\n The performance of ElasticSearch …","ref":"/docs/main/v9.3.0/en/faq/es-server-faq/","title":"ElasticSearch"},{"body":"ElasticSearch Some new users may encounter the following issues:\n The performance of ElasticSearch is not as good as expected. For instance, the latest data cannot be accessed after some time.  Or\n ERROR CODE 429.   Suppressed: org.elasticsearch.client.ResponseException: method [POST], host [http://127.0.0.1:9200], URI [/service_instance_inventory/type/6_tcc-app-gateway-77b98ff6ff-crblx.cards_0_0/_update?refresh=true\u0026amp;timeout=1m], status line [HTTP/1.1 429 Too Many Requests] {\u0026quot;error\u0026quot;:{\u0026quot;root_cause\u0026quot;:[{\u0026quot;type\u0026quot;:\u0026quot;remote_transport_exception\u0026quot;,\u0026quot;reason\u0026quot;:\u0026quot;[elasticsearch-0][10.16.9.130:9300][indices:data/write/update[s]]\u0026quot;}],\u0026quot;type\u0026quot;:\u0026quot;es_rejected_execution_exception\u0026quot;,\u0026quot;reason\u0026quot;:\u0026quot;rejected execution of org.elasticsearch.transport.TransportService$7@19a5cf02 on EsThreadPoolExecutor[name = elasticsearch-0/write, queue capacity = 200, org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor@389297ad[Running, pool size = 2, active threads = 2, queued tasks = 200, completed tasks = 147611]]\u0026quot;},\u0026quot;status\u0026quot;:429} at org.elasticsearch.client.RestClient$SyncResponseListener.get(RestClient.java:705) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestClient.performRequest(RestClient.java:235) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestClient.performRequest(RestClient.java:198) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestHighLevelClient.performRequest(RestHighLevelClient.java:522) ~[elasticsearch You could add the following config to elasticsearch.yml, and set the value based on your environment variable.\n# In the case of tracing, consider setting a value higher than this.thread_pool.index.queue_size:1000thread_pool.write.queue_size:1000# When you face query error at trace page, remember to check this.index.max_result_window:1000000For more information, see ElasticSearch\u0026rsquo;s official documentation.\n","excerpt":"ElasticSearch Some new users may encounter the following issues:\n The performance of ElasticSearch …","ref":"/docs/main/v9.4.0/en/faq/es-server-faq/","title":"ElasticSearch"},{"body":"ElasticSearch Some new users may encounter the following issues:\n The performance of ElasticSearch is not as good as expected. For instance, the latest data cannot be accessed after some time.  Or\n ERROR CODE 429.   Suppressed: org.elasticsearch.client.ResponseException: method [POST], host [http://127.0.0.1:9200], URI [/service_instance_inventory/type/6_tcc-app-gateway-77b98ff6ff-crblx.cards_0_0/_update?refresh=true\u0026amp;timeout=1m], status line [HTTP/1.1 429 Too Many Requests] {\u0026quot;error\u0026quot;:{\u0026quot;root_cause\u0026quot;:[{\u0026quot;type\u0026quot;:\u0026quot;remote_transport_exception\u0026quot;,\u0026quot;reason\u0026quot;:\u0026quot;[elasticsearch-0][10.16.9.130:9300][indices:data/write/update[s]]\u0026quot;}],\u0026quot;type\u0026quot;:\u0026quot;es_rejected_execution_exception\u0026quot;,\u0026quot;reason\u0026quot;:\u0026quot;rejected execution of org.elasticsearch.transport.TransportService$7@19a5cf02 on EsThreadPoolExecutor[name = elasticsearch-0/write, queue capacity = 200, org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor@389297ad[Running, pool size = 2, active threads = 2, queued tasks = 200, completed tasks = 147611]]\u0026quot;},\u0026quot;status\u0026quot;:429} at org.elasticsearch.client.RestClient$SyncResponseListener.get(RestClient.java:705) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestClient.performRequest(RestClient.java:235) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestClient.performRequest(RestClient.java:198) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestHighLevelClient.performRequest(RestHighLevelClient.java:522) ~[elasticsearch You could add the following config to elasticsearch.yml, and set the value based on your environment variable.\n# In the case of tracing, consider setting a value higher than this.thread_pool.index.queue_size:1000thread_pool.write.queue_size:1000# When you face query error at trace page, remember to check this.index.max_result_window:1000000For more information, see ElasticSearch\u0026rsquo;s official documentation.\n","excerpt":"ElasticSearch Some new users may encounter the following issues:\n The performance of ElasticSearch …","ref":"/docs/main/v9.5.0/en/faq/es-server-faq/","title":"ElasticSearch"},{"body":"ElasticSearch Some new users may encounter the following issues:\n The performance of ElasticSearch is not as good as expected. For instance, the latest data cannot be accessed after some time.  Or\n ERROR CODE 429.   Suppressed: org.elasticsearch.client.ResponseException: method [POST], host [http://127.0.0.1:9200], URI [/service_instance_inventory/type/6_tcc-app-gateway-77b98ff6ff-crblx.cards_0_0/_update?refresh=true\u0026amp;timeout=1m], status line [HTTP/1.1 429 Too Many Requests] {\u0026quot;error\u0026quot;:{\u0026quot;root_cause\u0026quot;:[{\u0026quot;type\u0026quot;:\u0026quot;remote_transport_exception\u0026quot;,\u0026quot;reason\u0026quot;:\u0026quot;[elasticsearch-0][10.16.9.130:9300][indices:data/write/update[s]]\u0026quot;}],\u0026quot;type\u0026quot;:\u0026quot;es_rejected_execution_exception\u0026quot;,\u0026quot;reason\u0026quot;:\u0026quot;rejected execution of org.elasticsearch.transport.TransportService$7@19a5cf02 on EsThreadPoolExecutor[name = elasticsearch-0/write, queue capacity = 200, org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor@389297ad[Running, pool size = 2, active threads = 2, queued tasks = 200, completed tasks = 147611]]\u0026quot;},\u0026quot;status\u0026quot;:429} at org.elasticsearch.client.RestClient$SyncResponseListener.get(RestClient.java:705) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestClient.performRequest(RestClient.java:235) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestClient.performRequest(RestClient.java:198) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestHighLevelClient.performRequest(RestHighLevelClient.java:522) ~[elasticsearch You could add the following config to elasticsearch.yml, and set the value based on your environment variable.\n# In the case of tracing, consider setting a value higher than this.thread_pool.index.queue_size:1000thread_pool.write.queue_size:1000# When you face query error at trace page, remember to check this.index.max_result_window:1000000For more information, see ElasticSearch\u0026rsquo;s official documentation.\n","excerpt":"ElasticSearch Some new users may encounter the following issues:\n The performance of ElasticSearch …","ref":"/docs/main/v9.6.0/en/faq/es-server-faq/","title":"ElasticSearch"},{"body":"ElasticSearch Some new users may encounter the following issues:\n The performance of ElasticSearch is not as good as expected. For instance, the latest data cannot be accessed after some time.  Or\n ERROR CODE 429.   Suppressed: org.elasticsearch.client.ResponseException: method [POST], host [http://127.0.0.1:9200], URI [/service_instance_inventory/type/6_tcc-app-gateway-77b98ff6ff-crblx.cards_0_0/_update?refresh=true\u0026amp;timeout=1m], status line [HTTP/1.1 429 Too Many Requests] {\u0026quot;error\u0026quot;:{\u0026quot;root_cause\u0026quot;:[{\u0026quot;type\u0026quot;:\u0026quot;remote_transport_exception\u0026quot;,\u0026quot;reason\u0026quot;:\u0026quot;[elasticsearch-0][10.16.9.130:9300][indices:data/write/update[s]]\u0026quot;}],\u0026quot;type\u0026quot;:\u0026quot;es_rejected_execution_exception\u0026quot;,\u0026quot;reason\u0026quot;:\u0026quot;rejected execution of org.elasticsearch.transport.TransportService$7@19a5cf02 on EsThreadPoolExecutor[name = elasticsearch-0/write, queue capacity = 200, org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor@389297ad[Running, pool size = 2, active threads = 2, queued tasks = 200, completed tasks = 147611]]\u0026quot;},\u0026quot;status\u0026quot;:429} at org.elasticsearch.client.RestClient$SyncResponseListener.get(RestClient.java:705) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestClient.performRequest(RestClient.java:235) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestClient.performRequest(RestClient.java:198) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestHighLevelClient.performRequest(RestHighLevelClient.java:522) ~[elasticsearch You could add the following config to elasticsearch.yml, and set the value based on your environment variable.\n# In the case of tracing, consider setting a value higher than this.thread_pool.index.queue_size:1000thread_pool.write.queue_size:1000# When you face query error at trace page, remember to check this.index.max_result_window:1000000For more information, see ElasticSearch\u0026rsquo;s official documentation.\n","excerpt":"ElasticSearch Some new users may encounter the following issues:\n The performance of ElasticSearch …","ref":"/docs/main/v9.7.0/en/faq/es-server-faq/","title":"ElasticSearch"},{"body":"Elasticsearch and OpenSearch Elasticsearch and OpenSearch are supported as storage. The storage provider is elasticsearch. This storage option is recommended for a large scale production environment, such as more than 1000 services, 10000 endpoints, and 100000 traces per minute, and plan to 100% sampling rate for the persistent in the storage.\nOpenSearch OpenSearch is a fork from ElasticSearch 7.11 but licensed in Apache 2.0. OpenSearch storage shares the same configurations as ElasticSearch. In order to activate OpenSearch as storage, set the storage provider to elasticsearch.\nWe support and tested the following versions of OpenSearch:\n 1.1.0, 1.3.10 2.4.0, 2.8.0  Elasticsearch NOTE: Elastic announced through their blog that Elasticsearch will be moving over to a Server Side Public License (SSPL) and/or Elastic License 2.0(ELv2), since Feb. 2021, which is incompatible with Apache License 2.0. Both of these licenses are not OSS licenses approved by the Open Source Initiative (OSI). This license change is effective from Elasticsearch version 7.11. So please choose the suitable ElasticSearch version according to your usage. If you have concerns about SSPL/ELv2, choose the versions before 7.11 or switch to OpenSearch.\nBy default, SkyWalking uses following indices for various telemetry data.\n sw_management (All SkyWalking management data, e.g. UI dashboard settings, UI Menu, Continuous profiling policy) sw_metrics-all-${day-format} (All metrics/meters generated through MAL and OAL engines, and metadata of service/instance/endpoint) sw_log-${day-format} (Collected logs, exclude browser logs) sw_segment-${day-format} (Native trace segments) sw_browser_error_log-${day-format} (Collected browser logs) sw_zipkin_span-${day-format} (Zipkin trace spans) sw_records-all-${day-format} (All sampled records, e.g. slow SQLs, agent profiling, and ebpf profiling)  SkyWalking rebuilds the ElasticSearch client on top of ElasticSearch REST API and automatically picks up correct request formats according to the server-side version, hence you don\u0026rsquo;t need to download different binaries and don\u0026rsquo;t need to configure different storage selectors for different ElasticSearch server-side versions anymore.\nFor now, SkyWalking supports ElasticSearch 7.x, ElasticSearch 8.x, and OpenSearch 1.x, their configurations are as follows:\nNotice, ElasticSearch 6 worked and is not promised due to end of life officially.\nstorage:selector:${SW_STORAGE:elasticsearch}elasticsearch:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}clusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:9200}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;http\u0026#34;}trustStorePath:${SW_STORAGE_ES_SSL_JKS_PATH:\u0026#34;\u0026#34;}trustStorePass:${SW_STORAGE_ES_SSL_JKS_PASS:\u0026#34;\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}password:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}secretsManagementFile:${SW_ES_SECRETS_MANAGEMENT_FILE:\u0026#34;\u0026#34;}# Secrets management file in the properties format includes the username, password, which are managed by 3rd party tool.dayStep:${SW_STORAGE_DAY_STEP:1}# Represent the number of days in the one minute/hour/day index.indexShardsNumber:${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:1}# Shard number of new indexesindexReplicasNumber:${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:1}# Replicas number of new indexes# Specify the settings for each index individually.# If configured, this setting has the highest priority and overrides the generic settings.specificIndexSettings:${SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS:\u0026#34;\u0026#34;}# Super data set has been defined in the codes, such as trace segments.The following 3 config would be improve es performance when storage super size data in es.superDatasetDayStep:${SW_STORAGE_ES_SUPER_DATASET_DAY_STEP:-1}# Represent the number of days in the super size dataset record index, the default value is the same as dayStep when the value is less than 0superDatasetIndexShardsFactor:${SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR:5}# This factor provides more shards for the super data set, shards number = indexShardsNumber * superDatasetIndexShardsFactor. Also, this factor effects Zipkin traces.superDatasetIndexReplicasNumber:${SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER:0}# Represent the replicas number in the super size dataset record index, the default value is 0.indexTemplateOrder:${SW_STORAGE_ES_INDEX_TEMPLATE_ORDER:0}# the order of index templatebulkActions:${SW_STORAGE_ES_BULK_ACTIONS:1000}# Execute the async bulk record data every ${SW_STORAGE_ES_BULK_ACTIONS} requestsflushInterval:${SW_STORAGE_ES_FLUSH_INTERVAL:10}# flush the bulk every 10 seconds whatever the number of requestsconcurrentRequests:${SW_STORAGE_ES_CONCURRENT_REQUESTS:2}# the number of concurrent requestsresultWindowMaxSize:${SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE:10000}metadataQueryMaxSize:${SW_STORAGE_ES_QUERY_MAX_SIZE:5000}segmentQueryMaxSize:${SW_STORAGE_ES_QUERY_SEGMENT_SIZE:200}profileTaskQueryMaxSize:${SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE:200}profileDataQueryScrollBatchSize:${SW_STORAGE_ES_QUERY_PROFILE_DATA_SCROLLING_BATCH_SIZE:100}oapAnalyzer:${SW_STORAGE_ES_OAP_ANALYZER:\u0026#34;{\\\u0026#34;analyzer\\\u0026#34;:{\\\u0026#34;oap_analyzer\\\u0026#34;:{\\\u0026#34;type\\\u0026#34;:\\\u0026#34;stop\\\u0026#34;}}}\u0026#34;}# the oap analyzer.oapLogAnalyzer:${SW_STORAGE_ES_OAP_LOG_ANALYZER:\u0026#34;{\\\u0026#34;analyzer\\\u0026#34;:{\\\u0026#34;oap_log_analyzer\\\u0026#34;:{\\\u0026#34;type\\\u0026#34;:\\\u0026#34;standard\\\u0026#34;}}}\u0026#34;}# the oap log analyzer. It could be customized by the ES analyzer configuration to support more language log formats, such as Chinese log, Japanese log and etc.advanced:${SW_STORAGE_ES_ADVANCED:\u0026#34;\u0026#34;}# Set it to `true` could shard metrics indices into multi-physical indices# as same as the versions(one index template per metric/meter aggregation function) before 9.2.0.logicSharding:${SW_STORAGE_ES_LOGIC_SHARDING:false}# Custom routing can reduce the impact of searches. Instead of having to fan out a search request to all the shards in an index, the request can be sent to just the shard that matches the specific routing value (or values).enableCustomRouting:${SW_STORAGE_ES_ENABLE_CUSTOM_ROUTING:false}ElasticSearch With Https SSL Encrypting communications. Example:\nstorage:selector:${SW_STORAGE:elasticsearch}elasticsearch:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}# User needs to be set when Http Basic authentication is enabledpassword:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}# Password to be set when Http Basic authentication is enabledclusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:443}trustStorePath:${SW_SW_STORAGE_ES_SSL_JKS_PATH:\u0026#34;../es_keystore.jks\u0026#34;}trustStorePass:${SW_SW_STORAGE_ES_SSL_JKS_PASS:\u0026#34;\u0026#34;}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;https\u0026#34;}... File at trustStorePath is being monitored. Once it is changed, the ElasticSearch client will reconnect. trustStorePass could be changed in the runtime through Secrets Management File Of ElasticSearch Authentication.  Daily Index Step Daily index step(storage/elasticsearch/dayStep, default 1) represents the index creation period. In this period, metrics for several days (dayStep value) are saved.\nIn most cases, users don\u0026rsquo;t need to change the value manually, as SkyWalking is designed to observe large-scale distributed systems. But in some cases, users may want to set a long TTL value, such as more than 60 days. However, their ElasticSearch cluster may not be powerful enough due to low traffic in the production environment. This value could be increased to 5 (or more) if users could ensure a single index could support the metrics and traces for these days (5 in this case).\nFor example, if dayStep == 11,\n Data in [2000-01-01, 2000-01-11] will be merged into the index-20000101. Data in [2000-01-12, 2000-01-22] will be merged into the index-20000112.  storage/elasticsearch/superDatasetDayStep overrides the storage/elasticsearch/dayStep if the value is positive. This would affect the record-related entities, such as trace segments. In some cases, the size of metrics is much smaller than the record (trace). This would improve the shards balance in the ElasticSearch cluster.\nNOTE: TTL deletion would be affected by these steps. You should set an extra dayStep in your TTL. For example, if you want to have TTL == 30 days and dayStep == 10, you are recommended to set TTL = 40.\nSecrets Management File Of ElasticSearch Authentication The value of secretsManagementFile should point to the secrets management file absolute path. The file includes the username, password, and JKS password of the ElasticSearch server in the properties format.\nuser=xxx password=yyy trustStorePass=zzz The major difference between using user, password, trustStorePass configs in the application.yaml file is that the Secrets Management File is being watched by the OAP server. Once it is changed manually or through a 3rd party tool, such as Vault, the storage provider will use the new username, password, and JKS password to establish the connection and close the old one. If the information exists in the file, the user/password will be overridden.\nIndex Settings The following settings control the number of shards and replicas for new and existing index templates. The update only got applied after OAP reboots.\nstorage:elasticsearch:# ......indexShardsNumber:${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:1}indexReplicasNumber:${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:1}specificIndexSettings:${SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS:\u0026#34;\u0026#34;}superDatasetIndexShardsFactor:${SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR:5}superDatasetIndexReplicasNumber:${SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER:0}The following table shows the relationship between those config items and Elasticsearch index number_of_shards/number_of_replicas. And also you can specify the settings for each index individually.\n   index number_of_shards number_of_replicas     sw_ui_template indexShardsNumber indexReplicasNumber   sw_metrics-all-${day-format} indexShardsNumber indexReplicasNumber   sw_log-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_segment-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_browser_error_log-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_zipkin_span-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_records-all-${day-format} indexShardsNumber indexReplicasNumber    Advanced Configurations For Elasticsearch Index You can add advanced configurations in JSON format to set ElasticSearch index settings by following ElasticSearch doc\nFor example, set translog settings:\nstorage:elasticsearch:# ......advanced:${SW_STORAGE_ES_ADVANCED:\u0026#34;{\\\u0026#34;index.translog.durability\\\u0026#34;:\\\u0026#34;request\\\u0026#34;,\\\u0026#34;index.translog.sync_interval\\\u0026#34;:\\\u0026#34;5s\\\u0026#34;}\u0026#34;}Specify Settings For Each Elasticsearch Index Individually You can specify the settings for one or more indexes individually by using SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS.\nNOTE: Supported settings:\n number_of_shards number_of_replicas  NOTE: These settings have the highest priority and will override the existing generic settings mentioned in index settings doc.\nThe settings are in JSON format. The index name here is logic entity name, which should exclude the ${SW_NAMESPACE} which is sw by default, e.g.\n{ \u0026#34;metrics-all\u0026#34;:{ \u0026#34;number_of_shards\u0026#34;:\u0026#34;3\u0026#34;, \u0026#34;number_of_replicas\u0026#34;:\u0026#34;2\u0026#34; }, \u0026#34;segment\u0026#34;:{ \u0026#34;number_of_shards\u0026#34;:\u0026#34;6\u0026#34;, \u0026#34;number_of_replicas\u0026#34;:\u0026#34;1\u0026#34; } } This configuration in the YAML file is like this,\nstorage:elasticsearch:# ......specificIndexSettings:${SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS:\u0026#34;{\\\u0026#34;metrics-all\\\u0026#34;:{\\\u0026#34;number_of_shards\\\u0026#34;:\\\u0026#34;3\\\u0026#34;,\\\u0026#34;number_of_replicas\\\u0026#34;:\\\u0026#34;2\\\u0026#34;},\\\u0026#34;segment\\\u0026#34;:{\\\u0026#34;number_of_shards\\\u0026#34;:\\\u0026#34;6\\\u0026#34;,\\\u0026#34;number_of_replicas\\\u0026#34;:\\\u0026#34;1\\\u0026#34;}}\u0026#34;}Recommended ElasticSearch server-side configurations You could add the following configuration to elasticsearch.yml, and set the value based on your environment.\n# In tracing scenario, consider to set more than this at least.thread_pool.index.queue_size:1000# Only suitable for ElasticSearch 6thread_pool.write.queue_size:1000# Suitable for ElasticSearch 6 and 7# When you face a query error on the traces page, remember to check this.index.max_result_window:1000000We strongly recommend that you read more about these configurations from ElasticSearch\u0026rsquo;s official documentation since they directly impact the performance of ElasticSearch.\nAbout Namespace When a namespace is set, all index names in ElasticSearch will use it as the prefix.\n","excerpt":"Elasticsearch and OpenSearch Elasticsearch and OpenSearch are supported as storage. The storage …","ref":"/docs/main/latest/en/setup/backend/storages/elasticsearch/","title":"Elasticsearch and OpenSearch"},{"body":"Elasticsearch and OpenSearch Elasticsearch and OpenSearch are supported as storage. The storage provider is elasticsearch. This storage option is recommended for a large scale production environment, such as more than 1000 services, 10000 endpoints, and 100000 traces per minute, and plan to 100% sampling rate for the persistent in the storage.\nOpenSearch OpenSearch is a fork from ElasticSearch 7.11 but licensed in Apache 2.0. OpenSearch storage shares the same configurations as ElasticSearch. In order to activate OpenSearch as storage, set the storage provider to elasticsearch.\nWe support and tested the following versions of OpenSearch:\n 1.1.0, 1.3.10 2.4.0, 2.8.0  Elasticsearch NOTE: Elastic announced through their blog that Elasticsearch will be moving over to a Server Side Public License (SSPL) and/or Elastic License 2.0(ELv2), since Feb. 2021, which is incompatible with Apache License 2.0. Both of these licenses are not OSS licenses approved by the Open Source Initiative (OSI). This license change is effective from Elasticsearch version 7.11. So please choose the suitable ElasticSearch version according to your usage. If you have concerns about SSPL/ELv2, choose the versions before 7.11 or switch to OpenSearch.\nBy default, SkyWalking uses following indices for various telemetry data.\n sw_management (All SkyWalking management data, e.g. UI dashboard settings, UI Menu, Continuous profiling policy) sw_metrics-all-${day-format} (All metrics/meters generated through MAL and OAL engines, and metadata of service/instance/endpoint) sw_log-${day-format} (Collected logs, exclude browser logs) sw_segment-${day-format} (Native trace segments) sw_browser_error_log-${day-format} (Collected browser logs) sw_zipkin_span-${day-format} (Zipkin trace spans) sw_records-all-${day-format} (All sampled records, e.g. slow SQLs, agent profiling, and ebpf profiling)  SkyWalking rebuilds the ElasticSearch client on top of ElasticSearch REST API and automatically picks up correct request formats according to the server-side version, hence you don\u0026rsquo;t need to download different binaries and don\u0026rsquo;t need to configure different storage selectors for different ElasticSearch server-side versions anymore.\nFor now, SkyWalking supports ElasticSearch 7.x, ElasticSearch 8.x, and OpenSearch 1.x, their configurations are as follows:\nNotice, ElasticSearch 6 worked and is not promised due to end of life officially.\nstorage:selector:${SW_STORAGE:elasticsearch}elasticsearch:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}clusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:9200}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;http\u0026#34;}trustStorePath:${SW_STORAGE_ES_SSL_JKS_PATH:\u0026#34;\u0026#34;}trustStorePass:${SW_STORAGE_ES_SSL_JKS_PASS:\u0026#34;\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}password:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}secretsManagementFile:${SW_ES_SECRETS_MANAGEMENT_FILE:\u0026#34;\u0026#34;}# Secrets management file in the properties format includes the username, password, which are managed by 3rd party tool.dayStep:${SW_STORAGE_DAY_STEP:1}# Represent the number of days in the one minute/hour/day index.indexShardsNumber:${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:1}# Shard number of new indexesindexReplicasNumber:${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:1}# Replicas number of new indexes# Specify the settings for each index individually.# If configured, this setting has the highest priority and overrides the generic settings.specificIndexSettings:${SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS:\u0026#34;\u0026#34;}# Super data set has been defined in the codes, such as trace segments.The following 3 config would be improve es performance when storage super size data in es.superDatasetDayStep:${SW_STORAGE_ES_SUPER_DATASET_DAY_STEP:-1}# Represent the number of days in the super size dataset record index, the default value is the same as dayStep when the value is less than 0superDatasetIndexShardsFactor:${SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR:5}# This factor provides more shards for the super data set, shards number = indexShardsNumber * superDatasetIndexShardsFactor. Also, this factor effects Zipkin traces.superDatasetIndexReplicasNumber:${SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER:0}# Represent the replicas number in the super size dataset record index, the default value is 0.indexTemplateOrder:${SW_STORAGE_ES_INDEX_TEMPLATE_ORDER:0}# the order of index templatebulkActions:${SW_STORAGE_ES_BULK_ACTIONS:1000}# Execute the async bulk record data every ${SW_STORAGE_ES_BULK_ACTIONS} requestsflushInterval:${SW_STORAGE_ES_FLUSH_INTERVAL:10}# flush the bulk every 10 seconds whatever the number of requestsconcurrentRequests:${SW_STORAGE_ES_CONCURRENT_REQUESTS:2}# the number of concurrent requestsresultWindowMaxSize:${SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE:10000}metadataQueryMaxSize:${SW_STORAGE_ES_QUERY_MAX_SIZE:5000}segmentQueryMaxSize:${SW_STORAGE_ES_QUERY_SEGMENT_SIZE:200}profileTaskQueryMaxSize:${SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE:200}profileDataQueryScrollBatchSize:${SW_STORAGE_ES_QUERY_PROFILE_DATA_SCROLLING_BATCH_SIZE:100}oapAnalyzer:${SW_STORAGE_ES_OAP_ANALYZER:\u0026#34;{\\\u0026#34;analyzer\\\u0026#34;:{\\\u0026#34;oap_analyzer\\\u0026#34;:{\\\u0026#34;type\\\u0026#34;:\\\u0026#34;stop\\\u0026#34;}}}\u0026#34;}# the oap analyzer.oapLogAnalyzer:${SW_STORAGE_ES_OAP_LOG_ANALYZER:\u0026#34;{\\\u0026#34;analyzer\\\u0026#34;:{\\\u0026#34;oap_log_analyzer\\\u0026#34;:{\\\u0026#34;type\\\u0026#34;:\\\u0026#34;standard\\\u0026#34;}}}\u0026#34;}# the oap log analyzer. It could be customized by the ES analyzer configuration to support more language log formats, such as Chinese log, Japanese log and etc.advanced:${SW_STORAGE_ES_ADVANCED:\u0026#34;\u0026#34;}# Set it to `true` could shard metrics indices into multi-physical indices# as same as the versions(one index template per metric/meter aggregation function) before 9.2.0.logicSharding:${SW_STORAGE_ES_LOGIC_SHARDING:false}# Custom routing can reduce the impact of searches. Instead of having to fan out a search request to all the shards in an index, the request can be sent to just the shard that matches the specific routing value (or values).enableCustomRouting:${SW_STORAGE_ES_ENABLE_CUSTOM_ROUTING:false}ElasticSearch With Https SSL Encrypting communications. Example:\nstorage:selector:${SW_STORAGE:elasticsearch}elasticsearch:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}# User needs to be set when Http Basic authentication is enabledpassword:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}# Password to be set when Http Basic authentication is enabledclusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:443}trustStorePath:${SW_STORAGE_ES_SSL_JKS_PATH:\u0026#34;../es_keystore.jks\u0026#34;}trustStorePass:${SW_STORAGE_ES_SSL_JKS_PASS:\u0026#34;\u0026#34;}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;https\u0026#34;}... File at trustStorePath is being monitored. Once it is changed, the ElasticSearch client will reconnect. trustStorePass could be changed in the runtime through Secrets Management File Of ElasticSearch Authentication.  Daily Index Step Daily index step(storage/elasticsearch/dayStep, default 1) represents the index creation period. In this period, metrics for several days (dayStep value) are saved.\nIn most cases, users don\u0026rsquo;t need to change the value manually, as SkyWalking is designed to observe large-scale distributed systems. But in some cases, users may want to set a long TTL value, such as more than 60 days. However, their ElasticSearch cluster may not be powerful enough due to low traffic in the production environment. This value could be increased to 5 (or more) if users could ensure a single index could support the metrics and traces for these days (5 in this case).\nFor example, if dayStep == 11,\n Data in [2000-01-01, 2000-01-11] will be merged into the index-20000101. Data in [2000-01-12, 2000-01-22] will be merged into the index-20000112.  storage/elasticsearch/superDatasetDayStep overrides the storage/elasticsearch/dayStep if the value is positive. This would affect the record-related entities, such as trace segments. In some cases, the size of metrics is much smaller than the record (trace). This would improve the shards balance in the ElasticSearch cluster.\nNOTE: TTL deletion would be affected by these steps. You should set an extra dayStep in your TTL. For example, if you want to have TTL == 30 days and dayStep == 10, you are recommended to set TTL = 40.\nSecrets Management File Of ElasticSearch Authentication The value of secretsManagementFile should point to the secrets management file absolute path. The file includes the username, password, and JKS password of the ElasticSearch server in the properties format.\nuser=xxx password=yyy trustStorePass=zzz The major difference between using user, password, trustStorePass configs in the application.yaml file is that the Secrets Management File is being watched by the OAP server. Once it is changed manually or through a 3rd party tool, such as Vault, the storage provider will use the new username, password, and JKS password to establish the connection and close the old one. If the information exists in the file, the user/password will be overridden.\nIndex Settings The following settings control the number of shards and replicas for new and existing index templates. The update only got applied after OAP reboots.\nstorage:elasticsearch:# ......indexShardsNumber:${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:1}indexReplicasNumber:${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:1}specificIndexSettings:${SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS:\u0026#34;\u0026#34;}superDatasetIndexShardsFactor:${SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR:5}superDatasetIndexReplicasNumber:${SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER:0}The following table shows the relationship between those config items and Elasticsearch index number_of_shards/number_of_replicas. And also you can specify the settings for each index individually.\n   index number_of_shards number_of_replicas     sw_ui_template indexShardsNumber indexReplicasNumber   sw_metrics-all-${day-format} indexShardsNumber indexReplicasNumber   sw_log-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_segment-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_browser_error_log-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_zipkin_span-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_records-all-${day-format} indexShardsNumber indexReplicasNumber    Advanced Configurations For Elasticsearch Index You can add advanced configurations in JSON format to set ElasticSearch index settings by following ElasticSearch doc\nFor example, set translog settings:\nstorage:elasticsearch:# ......advanced:${SW_STORAGE_ES_ADVANCED:\u0026#34;{\\\u0026#34;index.translog.durability\\\u0026#34;:\\\u0026#34;request\\\u0026#34;,\\\u0026#34;index.translog.sync_interval\\\u0026#34;:\\\u0026#34;5s\\\u0026#34;}\u0026#34;}Specify Settings For Each Elasticsearch Index Individually You can specify the settings for one or more indexes individually by using SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS.\nNOTE: Supported settings:\n number_of_shards number_of_replicas  NOTE: These settings have the highest priority and will override the existing generic settings mentioned in index settings doc.\nThe settings are in JSON format. The index name here is logic entity name, which should exclude the ${SW_NAMESPACE} which is sw by default, e.g.\n{ \u0026#34;metrics-all\u0026#34;:{ \u0026#34;number_of_shards\u0026#34;:\u0026#34;3\u0026#34;, \u0026#34;number_of_replicas\u0026#34;:\u0026#34;2\u0026#34; }, \u0026#34;segment\u0026#34;:{ \u0026#34;number_of_shards\u0026#34;:\u0026#34;6\u0026#34;, \u0026#34;number_of_replicas\u0026#34;:\u0026#34;1\u0026#34; } } This configuration in the YAML file is like this,\nstorage:elasticsearch:# ......specificIndexSettings:${SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS:\u0026#34;{\\\u0026#34;metrics-all\\\u0026#34;:{\\\u0026#34;number_of_shards\\\u0026#34;:\\\u0026#34;3\\\u0026#34;,\\\u0026#34;number_of_replicas\\\u0026#34;:\\\u0026#34;2\\\u0026#34;},\\\u0026#34;segment\\\u0026#34;:{\\\u0026#34;number_of_shards\\\u0026#34;:\\\u0026#34;6\\\u0026#34;,\\\u0026#34;number_of_replicas\\\u0026#34;:\\\u0026#34;1\\\u0026#34;}}\u0026#34;}Recommended ElasticSearch server-side configurations You could add the following configuration to elasticsearch.yml, and set the value based on your environment.\n# In tracing scenario, consider to set more than this at least.thread_pool.index.queue_size:1000# Only suitable for ElasticSearch 6thread_pool.write.queue_size:1000# Suitable for ElasticSearch 6 and 7# When you face a query error on the traces page, remember to check this.index.max_result_window:1000000We strongly recommend that you read more about these configurations from ElasticSearch\u0026rsquo;s official documentation since they directly impact the performance of ElasticSearch.\nAbout Namespace When a namespace is set, all index names in ElasticSearch will use it as the prefix.\n","excerpt":"Elasticsearch and OpenSearch Elasticsearch and OpenSearch are supported as storage. The storage …","ref":"/docs/main/next/en/setup/backend/storages/elasticsearch/","title":"Elasticsearch and OpenSearch"},{"body":"Elasticsearch and OpenSearch Elasticsearch and OpenSearch are supported as storage. The storage provider is elasticsearch. This storage option is recommended for a large scale production environment, such as more than 1000 services, 10000 endpoints, and 100000 traces per minute, and plan to 100% sampling rate for the persistent in the storage.\nOpenSearch OpenSearch is a fork from ElasticSearch 7.11 but licensed in Apache 2.0. OpenSearch storage shares the same configurations as ElasticSearch. In order to activate OpenSearch as storage, set the storage provider to elasticsearch.\nWe support and tested the following versions of OpenSearch:\n 1.1.0, 1.3.10 2.4.0, 2.8.0  Elasticsearch NOTE: Elastic announced through their blog that Elasticsearch will be moving over to a Server Side Public License (SSPL) and/or Elastic License 2.0(ELv2), since Feb. 2021, which is incompatible with Apache License 2.0. Both of these licenses are not OSS licenses approved by the Open Source Initiative (OSI). This license change is effective from Elasticsearch version 7.11. So please choose the suitable ElasticSearch version according to your usage. If you have concerns about SSPL/ELv2, choose the versions before 7.11 or switch to OpenSearch.\nBy default, SkyWalking uses following indices for various telemetry data.\n sw_management (All SkyWalking management data, e.g. UI dashboard settings, UI Menu, Continuous profiling policy) sw_metrics-all-${day-format} (All metrics/meters generated through MAL and OAL engines, and metadata of service/instance/endpoint) sw_log-${day-format} (Collected logs, exclude browser logs) sw_segment-${day-format} (Native trace segments) sw_browser_error_log-${day-format} (Collected browser logs) sw_zipkin_span-${day-format} (Zipkin trace spans) sw_records-all-${day-format} (All sampled records, e.g. slow SQLs, agent profiling, and ebpf profiling)  SkyWalking rebuilds the ElasticSearch client on top of ElasticSearch REST API and automatically picks up correct request formats according to the server-side version, hence you don\u0026rsquo;t need to download different binaries and don\u0026rsquo;t need to configure different storage selectors for different ElasticSearch server-side versions anymore.\nFor now, SkyWalking supports ElasticSearch 7.x, ElasticSearch 8.x, and OpenSearch 1.x, their configurations are as follows:\nNotice, ElasticSearch 6 worked and is not promised due to end of life officially.\nstorage:selector:${SW_STORAGE:elasticsearch}elasticsearch:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}clusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:9200}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;http\u0026#34;}trustStorePath:${SW_STORAGE_ES_SSL_JKS_PATH:\u0026#34;\u0026#34;}trustStorePass:${SW_STORAGE_ES_SSL_JKS_PASS:\u0026#34;\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}password:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}secretsManagementFile:${SW_ES_SECRETS_MANAGEMENT_FILE:\u0026#34;\u0026#34;}# Secrets management file in the properties format includes the username, password, which are managed by 3rd party tool.dayStep:${SW_STORAGE_DAY_STEP:1}# Represent the number of days in the one minute/hour/day index.indexShardsNumber:${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:1}# Shard number of new indexesindexReplicasNumber:${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:1}# Replicas number of new indexes# Specify the settings for each index individually.# If configured, this setting has the highest priority and overrides the generic settings.specificIndexSettings:${SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS:\u0026#34;\u0026#34;}# Super data set has been defined in the codes, such as trace segments.The following 3 config would be improve es performance when storage super size data in es.superDatasetDayStep:${SW_STORAGE_ES_SUPER_DATASET_DAY_STEP:-1}# Represent the number of days in the super size dataset record index, the default value is the same as dayStep when the value is less than 0superDatasetIndexShardsFactor:${SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR:5}# This factor provides more shards for the super data set, shards number = indexShardsNumber * superDatasetIndexShardsFactor. Also, this factor effects Zipkin traces.superDatasetIndexReplicasNumber:${SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER:0}# Represent the replicas number in the super size dataset record index, the default value is 0.indexTemplateOrder:${SW_STORAGE_ES_INDEX_TEMPLATE_ORDER:0}# the order of index templatebulkActions:${SW_STORAGE_ES_BULK_ACTIONS:1000}# Execute the async bulk record data every ${SW_STORAGE_ES_BULK_ACTIONS} requestsflushInterval:${SW_STORAGE_ES_FLUSH_INTERVAL:10}# flush the bulk every 10 seconds whatever the number of requestsconcurrentRequests:${SW_STORAGE_ES_CONCURRENT_REQUESTS:2}# the number of concurrent requestsresultWindowMaxSize:${SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE:10000}metadataQueryMaxSize:${SW_STORAGE_ES_QUERY_MAX_SIZE:5000}segmentQueryMaxSize:${SW_STORAGE_ES_QUERY_SEGMENT_SIZE:200}profileTaskQueryMaxSize:${SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE:200}profileDataQueryScrollBatchSize:${SW_STORAGE_ES_QUERY_PROFILE_DATA_SCROLLING_BATCH_SIZE:100}oapAnalyzer:${SW_STORAGE_ES_OAP_ANALYZER:\u0026#34;{\\\u0026#34;analyzer\\\u0026#34;:{\\\u0026#34;oap_analyzer\\\u0026#34;:{\\\u0026#34;type\\\u0026#34;:\\\u0026#34;stop\\\u0026#34;}}}\u0026#34;}# the oap analyzer.oapLogAnalyzer:${SW_STORAGE_ES_OAP_LOG_ANALYZER:\u0026#34;{\\\u0026#34;analyzer\\\u0026#34;:{\\\u0026#34;oap_log_analyzer\\\u0026#34;:{\\\u0026#34;type\\\u0026#34;:\\\u0026#34;standard\\\u0026#34;}}}\u0026#34;}# the oap log analyzer. It could be customized by the ES analyzer configuration to support more language log formats, such as Chinese log, Japanese log and etc.advanced:${SW_STORAGE_ES_ADVANCED:\u0026#34;\u0026#34;}# Set it to `true` could shard metrics indices into multi-physical indices# as same as the versions(one index template per metric/meter aggregation function) before 9.2.0.logicSharding:${SW_STORAGE_ES_LOGIC_SHARDING:false}# Custom routing can reduce the impact of searches. Instead of having to fan out a search request to all the shards in an index, the request can be sent to just the shard that matches the specific routing value (or values).enableCustomRouting:${SW_STORAGE_ES_ENABLE_CUSTOM_ROUTING:false}ElasticSearch With Https SSL Encrypting communications. Example:\nstorage:selector:${SW_STORAGE:elasticsearch}elasticsearch:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}# User needs to be set when Http Basic authentication is enabledpassword:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}# Password to be set when Http Basic authentication is enabledclusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:443}trustStorePath:${SW_SW_STORAGE_ES_SSL_JKS_PATH:\u0026#34;../es_keystore.jks\u0026#34;}trustStorePass:${SW_SW_STORAGE_ES_SSL_JKS_PASS:\u0026#34;\u0026#34;}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;https\u0026#34;}... File at trustStorePath is being monitored. Once it is changed, the ElasticSearch client will reconnect. trustStorePass could be changed in the runtime through Secrets Management File Of ElasticSearch Authentication.  Daily Index Step Daily index step(storage/elasticsearch/dayStep, default 1) represents the index creation period. In this period, metrics for several days (dayStep value) are saved.\nIn most cases, users don\u0026rsquo;t need to change the value manually, as SkyWalking is designed to observe large-scale distributed systems. But in some cases, users may want to set a long TTL value, such as more than 60 days. However, their ElasticSearch cluster may not be powerful enough due to low traffic in the production environment. This value could be increased to 5 (or more) if users could ensure a single index could support the metrics and traces for these days (5 in this case).\nFor example, if dayStep == 11,\n Data in [2000-01-01, 2000-01-11] will be merged into the index-20000101. Data in [2000-01-12, 2000-01-22] will be merged into the index-20000112.  storage/elasticsearch/superDatasetDayStep overrides the storage/elasticsearch/dayStep if the value is positive. This would affect the record-related entities, such as trace segments. In some cases, the size of metrics is much smaller than the record (trace). This would improve the shards balance in the ElasticSearch cluster.\nNOTE: TTL deletion would be affected by these steps. You should set an extra dayStep in your TTL. For example, if you want to have TTL == 30 days and dayStep == 10, you are recommended to set TTL = 40.\nSecrets Management File Of ElasticSearch Authentication The value of secretsManagementFile should point to the secrets management file absolute path. The file includes the username, password, and JKS password of the ElasticSearch server in the properties format.\nuser=xxx password=yyy trustStorePass=zzz The major difference between using user, password, trustStorePass configs in the application.yaml file is that the Secrets Management File is being watched by the OAP server. Once it is changed manually or through a 3rd party tool, such as Vault, the storage provider will use the new username, password, and JKS password to establish the connection and close the old one. If the information exists in the file, the user/password will be overridden.\nIndex Settings The following settings control the number of shards and replicas for new and existing index templates. The update only got applied after OAP reboots.\nstorage:elasticsearch:# ......indexShardsNumber:${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:1}indexReplicasNumber:${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:1}specificIndexSettings:${SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS:\u0026#34;\u0026#34;}superDatasetIndexShardsFactor:${SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR:5}superDatasetIndexReplicasNumber:${SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER:0}The following table shows the relationship between those config items and Elasticsearch index number_of_shards/number_of_replicas. And also you can specify the settings for each index individually.\n   index number_of_shards number_of_replicas     sw_ui_template indexShardsNumber indexReplicasNumber   sw_metrics-all-${day-format} indexShardsNumber indexReplicasNumber   sw_log-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_segment-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_browser_error_log-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_zipkin_span-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_records-all-${day-format} indexShardsNumber indexReplicasNumber    Advanced Configurations For Elasticsearch Index You can add advanced configurations in JSON format to set ElasticSearch index settings by following ElasticSearch doc\nFor example, set translog settings:\nstorage:elasticsearch:# ......advanced:${SW_STORAGE_ES_ADVANCED:\u0026#34;{\\\u0026#34;index.translog.durability\\\u0026#34;:\\\u0026#34;request\\\u0026#34;,\\\u0026#34;index.translog.sync_interval\\\u0026#34;:\\\u0026#34;5s\\\u0026#34;}\u0026#34;}Specify Settings For Each Elasticsearch Index Individually You can specify the settings for one or more indexes individually by using SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS.\nNOTE: Supported settings:\n number_of_shards number_of_replicas  NOTE: These settings have the highest priority and will override the existing generic settings mentioned in index settings doc.\nThe settings are in JSON format. The index name here is logic entity name, which should exclude the ${SW_NAMESPACE} which is sw by default, e.g.\n{ \u0026#34;metrics-all\u0026#34;:{ \u0026#34;number_of_shards\u0026#34;:\u0026#34;3\u0026#34;, \u0026#34;number_of_replicas\u0026#34;:\u0026#34;2\u0026#34; }, \u0026#34;segment\u0026#34;:{ \u0026#34;number_of_shards\u0026#34;:\u0026#34;6\u0026#34;, \u0026#34;number_of_replicas\u0026#34;:\u0026#34;1\u0026#34; } } This configuration in the YAML file is like this,\nstorage:elasticsearch:# ......specificIndexSettings:${SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS:\u0026#34;{\\\u0026#34;metrics-all\\\u0026#34;:{\\\u0026#34;number_of_shards\\\u0026#34;:\\\u0026#34;3\\\u0026#34;,\\\u0026#34;number_of_replicas\\\u0026#34;:\\\u0026#34;2\\\u0026#34;},\\\u0026#34;segment\\\u0026#34;:{\\\u0026#34;number_of_shards\\\u0026#34;:\\\u0026#34;6\\\u0026#34;,\\\u0026#34;number_of_replicas\\\u0026#34;:\\\u0026#34;1\\\u0026#34;}}\u0026#34;}Recommended ElasticSearch server-side configurations You could add the following configuration to elasticsearch.yml, and set the value based on your environment.\n# In tracing scenario, consider to set more than this at least.thread_pool.index.queue_size:1000# Only suitable for ElasticSearch 6thread_pool.write.queue_size:1000# Suitable for ElasticSearch 6 and 7# When you face a query error on the traces page, remember to check this.index.max_result_window:1000000We strongly recommend that you read more about these configurations from ElasticSearch\u0026rsquo;s official documentation since they directly impact the performance of ElasticSearch.\nAbout Namespace When a namespace is set, all index names in ElasticSearch will use it as the prefix.\n","excerpt":"Elasticsearch and OpenSearch Elasticsearch and OpenSearch are supported as storage. The storage …","ref":"/docs/main/v9.7.0/en/setup/backend/storages/elasticsearch/","title":"Elasticsearch and OpenSearch"},{"body":"Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Since 8.7.0, we did the following optimization to reduce Elasticsearch load.\nPerformance: remove the synchronous persistence mechanism from batch ElasticSearch DAO. Because the current enhanced persistent session mechanism, don\u0026#39;t require the data queryable immediately after the insert and update anymore. Due to this, we flush the metrics into Elasticsearch without using WriteRequest.RefreshPolicy.WAIT_UNTIL. This reduces the load of persistent works in OAP server and load of Elasticsearch CPU dramatically.\nMeanwhile, there is little chance you could see following warns in your logs.\n{ \u0026quot;timeMillis\u0026quot;: 1626247722647, \u0026quot;thread\u0026quot;: \u0026quot;I/O dispatcher 4\u0026quot;, \u0026quot;level\u0026quot;: \u0026quot;WARN\u0026quot;, \u0026quot;loggerName\u0026quot;: \u0026quot;org.apache.skywalking.oap.server.library.client.elasticsearch.ElasticSearchClient\u0026quot;, \u0026quot;message\u0026quot;: \u0026quot;Bulk [70] executed with failures:[failure in bulk execution:\\n[18875]: index [sw8_service_relation_client_side-20210714], type [_doc], id [20210714_b3BlcmF0aW9uLXJ1bGUtc2VydmVyQDExNDgx.1-bWFya2V0LXJlZmVycmFsLXNlcnZlckAxMDI1MQ==.1], message [[sw8_service_relation_client_side-20210714/D7qzncbeRq6qh2QF5MogTw][[sw8_service_relation_client_side-20210714][0]] ElasticsearchException[Elasticsearch exception [type=version_conflict_engine_exception, reason=[20210714_b3BlcmF0aW9uLXJ1bGUtc2VydmVyQDExNDgx.1-bWFya2V0LXJlZmVycmFsLXNlcnZlckAxMDI1MQ==.1]: version conflict, required seqNo [14012594], primary term [1]. current document has seqNo [14207928] and primary term [1]]]]]\u0026quot;, \u0026quot;endOfBatch\u0026quot;: false, \u0026quot;loggerFqcn\u0026quot;: \u0026quot;org.apache.logging.slf4j.Log4jLogger\u0026quot;, \u0026quot;threadId\u0026quot;: 44, \u0026quot;threadPriority\u0026quot;: 5, \u0026quot;timestamp\u0026quot;: \u0026quot;2021-07-14 15:28:42.647\u0026quot; } This would not affect the system much, just a possibility of inaccurate of metrics. If this wouldn\u0026rsquo;t show up in high frequency, you could ignore this directly.\nIn case you could see many logs like this. Then it is a signal, that the flush period of your ElasticSearch template can\u0026rsquo;t catch up your setting. Or you set the persistentPeriod less than the flush period.\n","excerpt":"Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Since 8.7.0, we did the …","ref":"/docs/main/latest/en/faq/es-version-conflict/","title":"Elasticsearch exception `type=version_conflict_engine_exception` since 8.7.0"},{"body":"Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Since 8.7.0, we did the following optimization to reduce Elasticsearch load.\nPerformance: remove the synchronous persistence mechanism from batch ElasticSearch DAO. Because the current enhanced persistent session mechanism, don\u0026#39;t require the data queryable immediately after the insert and update anymore. Due to this, we flush the metrics into Elasticsearch without using WriteRequest.RefreshPolicy.WAIT_UNTIL. This reduces the load of persistent works in OAP server and load of Elasticsearch CPU dramatically.\nMeanwhile, there is little chance you could see following warns in your logs.\n{ \u0026quot;timeMillis\u0026quot;: 1626247722647, \u0026quot;thread\u0026quot;: \u0026quot;I/O dispatcher 4\u0026quot;, \u0026quot;level\u0026quot;: \u0026quot;WARN\u0026quot;, \u0026quot;loggerName\u0026quot;: \u0026quot;org.apache.skywalking.oap.server.library.client.elasticsearch.ElasticSearchClient\u0026quot;, \u0026quot;message\u0026quot;: \u0026quot;Bulk [70] executed with failures:[failure in bulk execution:\\n[18875]: index [sw8_service_relation_client_side-20210714], type [_doc], id [20210714_b3BlcmF0aW9uLXJ1bGUtc2VydmVyQDExNDgx.1-bWFya2V0LXJlZmVycmFsLXNlcnZlckAxMDI1MQ==.1], message [[sw8_service_relation_client_side-20210714/D7qzncbeRq6qh2QF5MogTw][[sw8_service_relation_client_side-20210714][0]] ElasticsearchException[Elasticsearch exception [type=version_conflict_engine_exception, reason=[20210714_b3BlcmF0aW9uLXJ1bGUtc2VydmVyQDExNDgx.1-bWFya2V0LXJlZmVycmFsLXNlcnZlckAxMDI1MQ==.1]: version conflict, required seqNo [14012594], primary term [1]. current document has seqNo [14207928] and primary term [1]]]]]\u0026quot;, \u0026quot;endOfBatch\u0026quot;: false, \u0026quot;loggerFqcn\u0026quot;: \u0026quot;org.apache.logging.slf4j.Log4jLogger\u0026quot;, \u0026quot;threadId\u0026quot;: 44, \u0026quot;threadPriority\u0026quot;: 5, \u0026quot;timestamp\u0026quot;: \u0026quot;2021-07-14 15:28:42.647\u0026quot; } This would not affect the system much, just a possibility of inaccurate of metrics. If this wouldn\u0026rsquo;t show up in high frequency, you could ignore this directly.\nIn case you could see many logs like this. Then it is a signal, that the flush period of your ElasticSearch template can\u0026rsquo;t catch up your setting. Or you set the persistentPeriod less than the flush period.\n","excerpt":"Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Since 8.7.0, we did the …","ref":"/docs/main/next/en/faq/es-version-conflict/","title":"Elasticsearch exception `type=version_conflict_engine_exception` since 8.7.0"},{"body":"Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Since 8.7.0, we did the following optimization to reduce Elasticsearch load.\nPerformance: remove the synchronous persistence mechanism from batch ElasticSearch DAO. Because the current enhanced persistent session mechanism, don\u0026#39;t require the data queryable immediately after the insert and update anymore. Due to this, we flush the metrics into Elasticsearch without using WriteRequest.RefreshPolicy.WAIT_UNTIL. This reduces the load of persistent works in OAP server and load of Elasticsearch CPU dramatically.\nMeanwhile, there is little chance you could see following warns in your logs.\n{ \u0026quot;timeMillis\u0026quot;: 1626247722647, \u0026quot;thread\u0026quot;: \u0026quot;I/O dispatcher 4\u0026quot;, \u0026quot;level\u0026quot;: \u0026quot;WARN\u0026quot;, \u0026quot;loggerName\u0026quot;: \u0026quot;org.apache.skywalking.oap.server.library.client.elasticsearch.ElasticSearchClient\u0026quot;, \u0026quot;message\u0026quot;: \u0026quot;Bulk [70] executed with failures:[failure in bulk execution:\\n[18875]: index [sw8_service_relation_client_side-20210714], type [_doc], id [20210714_b3BlcmF0aW9uLXJ1bGUtc2VydmVyQDExNDgx.1-bWFya2V0LXJlZmVycmFsLXNlcnZlckAxMDI1MQ==.1], message [[sw8_service_relation_client_side-20210714/D7qzncbeRq6qh2QF5MogTw][[sw8_service_relation_client_side-20210714][0]] ElasticsearchException[Elasticsearch exception [type=version_conflict_engine_exception, reason=[20210714_b3BlcmF0aW9uLXJ1bGUtc2VydmVyQDExNDgx.1-bWFya2V0LXJlZmVycmFsLXNlcnZlckAxMDI1MQ==.1]: version conflict, required seqNo [14012594], primary term [1]. current document has seqNo [14207928] and primary term [1]]]]]\u0026quot;, \u0026quot;endOfBatch\u0026quot;: false, \u0026quot;loggerFqcn\u0026quot;: \u0026quot;org.apache.logging.slf4j.Log4jLogger\u0026quot;, \u0026quot;threadId\u0026quot;: 44, \u0026quot;threadPriority\u0026quot;: 5, \u0026quot;timestamp\u0026quot;: \u0026quot;2021-07-14 15:28:42.647\u0026quot; } This would not affect the system much, just a possibility of inaccurate of metrics. If this wouldn\u0026rsquo;t show up in high frequency, you could ignore this directly.\nIn case you could see many logs like this. Then it is a signal, that the flush period of your ElasticSearch template can\u0026rsquo;t catch up your setting. Or you set the persistentPeriod less than the flush period.\n","excerpt":"Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Since 8.7.0, we did the …","ref":"/docs/main/v9.0.0/en/faq/es-version-conflict/","title":"Elasticsearch exception `type=version_conflict_engine_exception` since 8.7.0"},{"body":"Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Since 8.7.0, we did the following optimization to reduce Elasticsearch load.\nPerformance: remove the synchronous persistence mechanism from batch ElasticSearch DAO. Because the current enhanced persistent session mechanism, don\u0026#39;t require the data queryable immediately after the insert and update anymore. Due to this, we flush the metrics into Elasticsearch without using WriteRequest.RefreshPolicy.WAIT_UNTIL. This reduces the load of persistent works in OAP server and load of Elasticsearch CPU dramatically.\nMeanwhile, there is little chance you could see following warns in your logs.\n{ \u0026quot;timeMillis\u0026quot;: 1626247722647, \u0026quot;thread\u0026quot;: \u0026quot;I/O dispatcher 4\u0026quot;, \u0026quot;level\u0026quot;: \u0026quot;WARN\u0026quot;, \u0026quot;loggerName\u0026quot;: \u0026quot;org.apache.skywalking.oap.server.library.client.elasticsearch.ElasticSearchClient\u0026quot;, \u0026quot;message\u0026quot;: \u0026quot;Bulk [70] executed with failures:[failure in bulk execution:\\n[18875]: index [sw8_service_relation_client_side-20210714], type [_doc], id [20210714_b3BlcmF0aW9uLXJ1bGUtc2VydmVyQDExNDgx.1-bWFya2V0LXJlZmVycmFsLXNlcnZlckAxMDI1MQ==.1], message [[sw8_service_relation_client_side-20210714/D7qzncbeRq6qh2QF5MogTw][[sw8_service_relation_client_side-20210714][0]] ElasticsearchException[Elasticsearch exception [type=version_conflict_engine_exception, reason=[20210714_b3BlcmF0aW9uLXJ1bGUtc2VydmVyQDExNDgx.1-bWFya2V0LXJlZmVycmFsLXNlcnZlckAxMDI1MQ==.1]: version conflict, required seqNo [14012594], primary term [1]. current document has seqNo [14207928] and primary term [1]]]]]\u0026quot;, \u0026quot;endOfBatch\u0026quot;: false, \u0026quot;loggerFqcn\u0026quot;: \u0026quot;org.apache.logging.slf4j.Log4jLogger\u0026quot;, \u0026quot;threadId\u0026quot;: 44, \u0026quot;threadPriority\u0026quot;: 5, \u0026quot;timestamp\u0026quot;: \u0026quot;2021-07-14 15:28:42.647\u0026quot; } This would not affect the system much, just a possibility of inaccurate of metrics. If this wouldn\u0026rsquo;t show up in high frequency, you could ignore this directly.\nIn case you could see many logs like this. Then it is a signal, that the flush period of your ElasticSearch template can\u0026rsquo;t catch up your setting. Or you set the persistentPeriod less than the flush period.\n","excerpt":"Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Since 8.7.0, we did the …","ref":"/docs/main/v9.1.0/en/faq/es-version-conflict/","title":"Elasticsearch exception `type=version_conflict_engine_exception` since 8.7.0"},{"body":"Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Since 8.7.0, we did the following optimization to reduce Elasticsearch load.\nPerformance: remove the synchronous persistence mechanism from batch ElasticSearch DAO. Because the current enhanced persistent session mechanism, don\u0026#39;t require the data queryable immediately after the insert and update anymore. Due to this, we flush the metrics into Elasticsearch without using WriteRequest.RefreshPolicy.WAIT_UNTIL. This reduces the load of persistent works in OAP server and load of Elasticsearch CPU dramatically.\nMeanwhile, there is little chance you could see following warns in your logs.\n{ \u0026quot;timeMillis\u0026quot;: 1626247722647, \u0026quot;thread\u0026quot;: \u0026quot;I/O dispatcher 4\u0026quot;, \u0026quot;level\u0026quot;: \u0026quot;WARN\u0026quot;, \u0026quot;loggerName\u0026quot;: \u0026quot;org.apache.skywalking.oap.server.library.client.elasticsearch.ElasticSearchClient\u0026quot;, \u0026quot;message\u0026quot;: \u0026quot;Bulk [70] executed with failures:[failure in bulk execution:\\n[18875]: index [sw8_service_relation_client_side-20210714], type [_doc], id [20210714_b3BlcmF0aW9uLXJ1bGUtc2VydmVyQDExNDgx.1-bWFya2V0LXJlZmVycmFsLXNlcnZlckAxMDI1MQ==.1], message [[sw8_service_relation_client_side-20210714/D7qzncbeRq6qh2QF5MogTw][[sw8_service_relation_client_side-20210714][0]] ElasticsearchException[Elasticsearch exception [type=version_conflict_engine_exception, reason=[20210714_b3BlcmF0aW9uLXJ1bGUtc2VydmVyQDExNDgx.1-bWFya2V0LXJlZmVycmFsLXNlcnZlckAxMDI1MQ==.1]: version conflict, required seqNo [14012594], primary term [1]. current document has seqNo [14207928] and primary term [1]]]]]\u0026quot;, \u0026quot;endOfBatch\u0026quot;: false, \u0026quot;loggerFqcn\u0026quot;: \u0026quot;org.apache.logging.slf4j.Log4jLogger\u0026quot;, \u0026quot;threadId\u0026quot;: 44, \u0026quot;threadPriority\u0026quot;: 5, \u0026quot;timestamp\u0026quot;: \u0026quot;2021-07-14 15:28:42.647\u0026quot; } This would not affect the system much, just a possibility of inaccurate of metrics. If this wouldn\u0026rsquo;t show up in high frequency, you could ignore this directly.\nIn case you could see many logs like this. Then it is a signal, that the flush period of your ElasticSearch template can\u0026rsquo;t catch up your setting. Or you set the persistentPeriod less than the flush period.\n","excerpt":"Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Since 8.7.0, we did the …","ref":"/docs/main/v9.2.0/en/faq/es-version-conflict/","title":"Elasticsearch exception `type=version_conflict_engine_exception` since 8.7.0"},{"body":"Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Since 8.7.0, we did the following optimization to reduce Elasticsearch load.\nPerformance: remove the synchronous persistence mechanism from batch ElasticSearch DAO. Because the current enhanced persistent session mechanism, don\u0026#39;t require the data queryable immediately after the insert and update anymore. Due to this, we flush the metrics into Elasticsearch without using WriteRequest.RefreshPolicy.WAIT_UNTIL. This reduces the load of persistent works in OAP server and load of Elasticsearch CPU dramatically.\nMeanwhile, there is little chance you could see following warns in your logs.\n{ \u0026quot;timeMillis\u0026quot;: 1626247722647, \u0026quot;thread\u0026quot;: \u0026quot;I/O dispatcher 4\u0026quot;, \u0026quot;level\u0026quot;: \u0026quot;WARN\u0026quot;, \u0026quot;loggerName\u0026quot;: \u0026quot;org.apache.skywalking.oap.server.library.client.elasticsearch.ElasticSearchClient\u0026quot;, \u0026quot;message\u0026quot;: \u0026quot;Bulk [70] executed with failures:[failure in bulk execution:\\n[18875]: index [sw8_service_relation_client_side-20210714], type [_doc], id [20210714_b3BlcmF0aW9uLXJ1bGUtc2VydmVyQDExNDgx.1-bWFya2V0LXJlZmVycmFsLXNlcnZlckAxMDI1MQ==.1], message [[sw8_service_relation_client_side-20210714/D7qzncbeRq6qh2QF5MogTw][[sw8_service_relation_client_side-20210714][0]] ElasticsearchException[Elasticsearch exception [type=version_conflict_engine_exception, reason=[20210714_b3BlcmF0aW9uLXJ1bGUtc2VydmVyQDExNDgx.1-bWFya2V0LXJlZmVycmFsLXNlcnZlckAxMDI1MQ==.1]: version conflict, required seqNo [14012594], primary term [1]. current document has seqNo [14207928] and primary term [1]]]]]\u0026quot;, \u0026quot;endOfBatch\u0026quot;: false, \u0026quot;loggerFqcn\u0026quot;: \u0026quot;org.apache.logging.slf4j.Log4jLogger\u0026quot;, \u0026quot;threadId\u0026quot;: 44, \u0026quot;threadPriority\u0026quot;: 5, \u0026quot;timestamp\u0026quot;: \u0026quot;2021-07-14 15:28:42.647\u0026quot; } This would not affect the system much, just a possibility of inaccurate of metrics. If this wouldn\u0026rsquo;t show up in high frequency, you could ignore this directly.\nIn case you could see many logs like this. Then it is a signal, that the flush period of your ElasticSearch template can\u0026rsquo;t catch up your setting. Or you set the persistentPeriod less than the flush period.\n","excerpt":"Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Since 8.7.0, we did the …","ref":"/docs/main/v9.3.0/en/faq/es-version-conflict/","title":"Elasticsearch exception `type=version_conflict_engine_exception` since 8.7.0"},{"body":"Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Since 8.7.0, we did the following optimization to reduce Elasticsearch load.\nPerformance: remove the synchronous persistence mechanism from batch ElasticSearch DAO. Because the current enhanced persistent session mechanism, don\u0026#39;t require the data queryable immediately after the insert and update anymore. Due to this, we flush the metrics into Elasticsearch without using WriteRequest.RefreshPolicy.WAIT_UNTIL. This reduces the load of persistent works in OAP server and load of Elasticsearch CPU dramatically.\nMeanwhile, there is little chance you could see following warns in your logs.\n{ \u0026quot;timeMillis\u0026quot;: 1626247722647, \u0026quot;thread\u0026quot;: \u0026quot;I/O dispatcher 4\u0026quot;, \u0026quot;level\u0026quot;: \u0026quot;WARN\u0026quot;, \u0026quot;loggerName\u0026quot;: \u0026quot;org.apache.skywalking.oap.server.library.client.elasticsearch.ElasticSearchClient\u0026quot;, \u0026quot;message\u0026quot;: \u0026quot;Bulk [70] executed with failures:[failure in bulk execution:\\n[18875]: index [sw8_service_relation_client_side-20210714], type [_doc], id [20210714_b3BlcmF0aW9uLXJ1bGUtc2VydmVyQDExNDgx.1-bWFya2V0LXJlZmVycmFsLXNlcnZlckAxMDI1MQ==.1], message [[sw8_service_relation_client_side-20210714/D7qzncbeRq6qh2QF5MogTw][[sw8_service_relation_client_side-20210714][0]] ElasticsearchException[Elasticsearch exception [type=version_conflict_engine_exception, reason=[20210714_b3BlcmF0aW9uLXJ1bGUtc2VydmVyQDExNDgx.1-bWFya2V0LXJlZmVycmFsLXNlcnZlckAxMDI1MQ==.1]: version conflict, required seqNo [14012594], primary term [1]. current document has seqNo [14207928] and primary term [1]]]]]\u0026quot;, \u0026quot;endOfBatch\u0026quot;: false, \u0026quot;loggerFqcn\u0026quot;: \u0026quot;org.apache.logging.slf4j.Log4jLogger\u0026quot;, \u0026quot;threadId\u0026quot;: 44, \u0026quot;threadPriority\u0026quot;: 5, \u0026quot;timestamp\u0026quot;: \u0026quot;2021-07-14 15:28:42.647\u0026quot; } This would not affect the system much, just a possibility of inaccurate of metrics. If this wouldn\u0026rsquo;t show up in high frequency, you could ignore this directly.\nIn case you could see many logs like this. Then it is a signal, that the flush period of your ElasticSearch template can\u0026rsquo;t catch up your setting. Or you set the persistentPeriod less than the flush period.\n","excerpt":"Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Since 8.7.0, we did the …","ref":"/docs/main/v9.4.0/en/faq/es-version-conflict/","title":"Elasticsearch exception `type=version_conflict_engine_exception` since 8.7.0"},{"body":"Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Since 8.7.0, we did the following optimization to reduce Elasticsearch load.\nPerformance: remove the synchronous persistence mechanism from batch ElasticSearch DAO. Because the current enhanced persistent session mechanism, don\u0026#39;t require the data queryable immediately after the insert and update anymore. Due to this, we flush the metrics into Elasticsearch without using WriteRequest.RefreshPolicy.WAIT_UNTIL. This reduces the load of persistent works in OAP server and load of Elasticsearch CPU dramatically.\nMeanwhile, there is little chance you could see following warns in your logs.\n{ \u0026quot;timeMillis\u0026quot;: 1626247722647, \u0026quot;thread\u0026quot;: \u0026quot;I/O dispatcher 4\u0026quot;, \u0026quot;level\u0026quot;: \u0026quot;WARN\u0026quot;, \u0026quot;loggerName\u0026quot;: \u0026quot;org.apache.skywalking.oap.server.library.client.elasticsearch.ElasticSearchClient\u0026quot;, \u0026quot;message\u0026quot;: \u0026quot;Bulk [70] executed with failures:[failure in bulk execution:\\n[18875]: index [sw8_service_relation_client_side-20210714], type [_doc], id [20210714_b3BlcmF0aW9uLXJ1bGUtc2VydmVyQDExNDgx.1-bWFya2V0LXJlZmVycmFsLXNlcnZlckAxMDI1MQ==.1], message [[sw8_service_relation_client_side-20210714/D7qzncbeRq6qh2QF5MogTw][[sw8_service_relation_client_side-20210714][0]] ElasticsearchException[Elasticsearch exception [type=version_conflict_engine_exception, reason=[20210714_b3BlcmF0aW9uLXJ1bGUtc2VydmVyQDExNDgx.1-bWFya2V0LXJlZmVycmFsLXNlcnZlckAxMDI1MQ==.1]: version conflict, required seqNo [14012594], primary term [1]. current document has seqNo [14207928] and primary term [1]]]]]\u0026quot;, \u0026quot;endOfBatch\u0026quot;: false, \u0026quot;loggerFqcn\u0026quot;: \u0026quot;org.apache.logging.slf4j.Log4jLogger\u0026quot;, \u0026quot;threadId\u0026quot;: 44, \u0026quot;threadPriority\u0026quot;: 5, \u0026quot;timestamp\u0026quot;: \u0026quot;2021-07-14 15:28:42.647\u0026quot; } This would not affect the system much, just a possibility of inaccurate of metrics. If this wouldn\u0026rsquo;t show up in high frequency, you could ignore this directly.\nIn case you could see many logs like this. Then it is a signal, that the flush period of your ElasticSearch template can\u0026rsquo;t catch up your setting. Or you set the persistentPeriod less than the flush period.\n","excerpt":"Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Since 8.7.0, we did the …","ref":"/docs/main/v9.5.0/en/faq/es-version-conflict/","title":"Elasticsearch exception `type=version_conflict_engine_exception` since 8.7.0"},{"body":"Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Since 8.7.0, we did the following optimization to reduce Elasticsearch load.\nPerformance: remove the synchronous persistence mechanism from batch ElasticSearch DAO. Because the current enhanced persistent session mechanism, don\u0026#39;t require the data queryable immediately after the insert and update anymore. Due to this, we flush the metrics into Elasticsearch without using WriteRequest.RefreshPolicy.WAIT_UNTIL. This reduces the load of persistent works in OAP server and load of Elasticsearch CPU dramatically.\nMeanwhile, there is little chance you could see following warns in your logs.\n{ \u0026quot;timeMillis\u0026quot;: 1626247722647, \u0026quot;thread\u0026quot;: \u0026quot;I/O dispatcher 4\u0026quot;, \u0026quot;level\u0026quot;: \u0026quot;WARN\u0026quot;, \u0026quot;loggerName\u0026quot;: \u0026quot;org.apache.skywalking.oap.server.library.client.elasticsearch.ElasticSearchClient\u0026quot;, \u0026quot;message\u0026quot;: \u0026quot;Bulk [70] executed with failures:[failure in bulk execution:\\n[18875]: index [sw8_service_relation_client_side-20210714], type [_doc], id [20210714_b3BlcmF0aW9uLXJ1bGUtc2VydmVyQDExNDgx.1-bWFya2V0LXJlZmVycmFsLXNlcnZlckAxMDI1MQ==.1], message [[sw8_service_relation_client_side-20210714/D7qzncbeRq6qh2QF5MogTw][[sw8_service_relation_client_side-20210714][0]] ElasticsearchException[Elasticsearch exception [type=version_conflict_engine_exception, reason=[20210714_b3BlcmF0aW9uLXJ1bGUtc2VydmVyQDExNDgx.1-bWFya2V0LXJlZmVycmFsLXNlcnZlckAxMDI1MQ==.1]: version conflict, required seqNo [14012594], primary term [1]. current document has seqNo [14207928] and primary term [1]]]]]\u0026quot;, \u0026quot;endOfBatch\u0026quot;: false, \u0026quot;loggerFqcn\u0026quot;: \u0026quot;org.apache.logging.slf4j.Log4jLogger\u0026quot;, \u0026quot;threadId\u0026quot;: 44, \u0026quot;threadPriority\u0026quot;: 5, \u0026quot;timestamp\u0026quot;: \u0026quot;2021-07-14 15:28:42.647\u0026quot; } This would not affect the system much, just a possibility of inaccurate of metrics. If this wouldn\u0026rsquo;t show up in high frequency, you could ignore this directly.\nIn case you could see many logs like this. Then it is a signal, that the flush period of your ElasticSearch template can\u0026rsquo;t catch up your setting. Or you set the persistentPeriod less than the flush period.\n","excerpt":"Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Since 8.7.0, we did the …","ref":"/docs/main/v9.6.0/en/faq/es-version-conflict/","title":"Elasticsearch exception `type=version_conflict_engine_exception` since 8.7.0"},{"body":"Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Since 8.7.0, we did the following optimization to reduce Elasticsearch load.\nPerformance: remove the synchronous persistence mechanism from batch ElasticSearch DAO. Because the current enhanced persistent session mechanism, don\u0026#39;t require the data queryable immediately after the insert and update anymore. Due to this, we flush the metrics into Elasticsearch without using WriteRequest.RefreshPolicy.WAIT_UNTIL. This reduces the load of persistent works in OAP server and load of Elasticsearch CPU dramatically.\nMeanwhile, there is little chance you could see following warns in your logs.\n{ \u0026quot;timeMillis\u0026quot;: 1626247722647, \u0026quot;thread\u0026quot;: \u0026quot;I/O dispatcher 4\u0026quot;, \u0026quot;level\u0026quot;: \u0026quot;WARN\u0026quot;, \u0026quot;loggerName\u0026quot;: \u0026quot;org.apache.skywalking.oap.server.library.client.elasticsearch.ElasticSearchClient\u0026quot;, \u0026quot;message\u0026quot;: \u0026quot;Bulk [70] executed with failures:[failure in bulk execution:\\n[18875]: index [sw8_service_relation_client_side-20210714], type [_doc], id [20210714_b3BlcmF0aW9uLXJ1bGUtc2VydmVyQDExNDgx.1-bWFya2V0LXJlZmVycmFsLXNlcnZlckAxMDI1MQ==.1], message [[sw8_service_relation_client_side-20210714/D7qzncbeRq6qh2QF5MogTw][[sw8_service_relation_client_side-20210714][0]] ElasticsearchException[Elasticsearch exception [type=version_conflict_engine_exception, reason=[20210714_b3BlcmF0aW9uLXJ1bGUtc2VydmVyQDExNDgx.1-bWFya2V0LXJlZmVycmFsLXNlcnZlckAxMDI1MQ==.1]: version conflict, required seqNo [14012594], primary term [1]. current document has seqNo [14207928] and primary term [1]]]]]\u0026quot;, \u0026quot;endOfBatch\u0026quot;: false, \u0026quot;loggerFqcn\u0026quot;: \u0026quot;org.apache.logging.slf4j.Log4jLogger\u0026quot;, \u0026quot;threadId\u0026quot;: 44, \u0026quot;threadPriority\u0026quot;: 5, \u0026quot;timestamp\u0026quot;: \u0026quot;2021-07-14 15:28:42.647\u0026quot; } This would not affect the system much, just a possibility of inaccurate of metrics. If this wouldn\u0026rsquo;t show up in high frequency, you could ignore this directly.\nIn case you could see many logs like this. Then it is a signal, that the flush period of your ElasticSearch template can\u0026rsquo;t catch up your setting. Or you set the persistentPeriod less than the flush period.\n","excerpt":"Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Since 8.7.0, we did the …","ref":"/docs/main/v9.7.0/en/faq/es-version-conflict/","title":"Elasticsearch exception `type=version_conflict_engine_exception` since 8.7.0"},{"body":"Elasticsearch monitoring SkyWalking leverages elasticsearch-exporter for collecting metrics data from Elasticsearch. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  The elasticsearch-exporter collect metrics data from Elasticsearch. OpenTelemetry Collector fetches metrics from elasticsearch-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup elasticsearch-exporter. Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  Elasticsearch Monitoring Elasticsearch monitoring provides multidimensional metrics monitoring of Elasticsearch clusters as Layer: ELASTICSEARCH Service in the OAP. In each cluster, the nodes are represented as Instance and indices are Endpoints.\nElasticsearch Cluster Supported Metrics    Monitoring Panel Metric Name Description Data Source     Cluster Health meter_elasticsearch_cluster_health_status Whether all primary and replica shards are allocated elasticsearch-exporter   Tripped Of Breakers meter_elasticsearch_cluster_breakers_tripped Tripped for breaker elasticsearch-exporter   Nodes meter_elasticsearch_cluster_nodes Number of nodes in the cluster. elasticsearch-exporter   Data Nodes meter_elasticsearch_cluster_data_nodes Number of data nodes in the cluster elasticsearch-exporter   Pending Tasks meter_elasticsearch_cluster_pending_tasks_total Cluster level changes which have not yet been executed elasticsearch-exporter   CPU Usage Avg. (%) meter_elasticsearch_cluster_cpu_usage_avg Cluster level percent CPU used by process elasticsearch-exporter   JVM Memory Used Avg. (%) meter_elasticsearch_cluster_jvm_memory_used_avg Cluster level percent JVM memory used elasticsearch-exporter   Open Files meter_elasticsearch_cluster_open_file_count Open file descriptors elasticsearch-exporter   Active Primary Shards meter_elasticsearch_cluster_primary_shards_total The number of primary shards in your cluster. This is an aggregate total across all indices elasticsearch-exporter   Active Shards meter_elasticsearch_cluster_shards_total Aggregate total of all shards across all indices, which includes replica shards elasticsearch-exporter   Initializing Shards meter_elasticsearch_cluster_initializing_shards_total Count of shards that are being freshly created elasticsearch-exporter   Delayed Unassigned Shards meter_elasticsearch_cluster_delayed_unassigned_shards_total Shards delayed to reduce reallocation overhead elasticsearch-exporter   Relocating Shards meter_elasticsearch_cluster_relocating_shards_total The number of shards that are currently moving from one node to another node elasticsearch-exporter   Unassigned Shards meter_elasticsearch_cluster_unassigned_shards_total The number of shards that exist in the cluster state, but cannot be found in the cluster itself elasticsearch-exporter    Elasticsearch Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Node Rules  meter_elasticsearch_node_rules Node roles elasticsearch-exporter   JVM Memory Used MB meter_elasticsearch_node_jvm_memory_used Node level JVM memory used size elasticsearch-exporter   CPU Percent % meter_elasticsearch_node_process_cpu_percent Node level percent CPU used by process elasticsearch-exporter   Documents  meter_elasticsearch_node_indices_docs Count of index documents on this node elasticsearch-exporter   Segments  meter_elasticsearch_node_segment_count Count of index segments on this node elasticsearch-exporter   Disk Free Space GB meter_elasticsearch_node_all_disk_free_space Available space on all block device elasticsearch-exporter   Open Files  meter_elasticsearch_node_open_file_count Open file descriptors elasticsearch-exporter   Process CPU Usage Percent % meter_elasticsearch_node_process_cpu_percent Percent CPU used by process elasticsearch-exporter   OS CPU usage percent % meter_elasticsearch_node_os_cpu_percent Percent CPU used by the OS elasticsearch-exporter   Load Average  meter_elasticsearch_node_os_load1 meter_elasticsearch_node_os_load5meter_elasticsearch_node_os_load15 Shortterm, Midterm, Longterm load average elasticsearch-exporter   JVM Memory Usage MB meter_elasticsearch_node_jvm_memory_nonheap_used\nmeter_elasticsearch_node_jvm_memory_heap_usedmeter_elasticsearch_node_jvm_memory_heap_max JVM memory currently usage by area elasticsearch-exporter   JVM Pool Peak Used MB meter_elasticsearch_node_jvm_memory_pool_peak_used JVM memory currently used by pool elasticsearch-exporter   GC Count  meter_elasticsearch_node_jvm_gc_count Count of JVM GC runs elasticsearch-exporter   GC Time ms/min meter_elasticsearch_node_jvm_gc_time GC run time elasticsearch-exporter   All Operations ReqRate  meter_elasticsearch_node_indices_*_req_rate All Operations ReqRate on node elasticsearch-exporter   Indexing Rate reqps meter_elasticsearch_node_indices_indexing_index_total_req_rate\nmeter_elasticsearch_node_indices_indexing_index_total_proc_rate Indexing rate on node elasticsearch-exporter   Searching Rate reqps meter_elasticsearch_node_indices_search_fetch_total_req_rate\nmeter_elasticsearch_node_indices_search_query_time_seconds_proc_rate Searching rate on node elasticsearch-exporter   Total Translog Operations  meter_elasticsearch_node_indices_translog_operations Total translog operations elasticsearch-exporter   Total Translog Size MB meter_elasticsearch_node_indices_translog_size Total translog size elasticsearch-exporter   Tripped For Breakers  meter_elasticsearch_node_breakers_tripped Tripped for breaker elasticsearch-exporter   Estimated Size Of Breaker MB meter_elasticsearch_node_breakers_estimated_size Estimated size of breaker elasticsearch-exporter   Documents Count KB/s meter_elasticsearch_node_indices_docs Count of documents on this node elasticsearch-exporter   Merged Documents Count count/s meter_elasticsearch_node_indices_merges_docs_total Cumulative docs merged elasticsearch-exporter   Deleted Documents Count  meter_elasticsearch_node_indices_docs_deleted_total Count of deleted documents on this node elasticsearch-exporter   Documents Index Rate calls/s meter_elasticsearch_node_indices_indexing_index_total_req_rate Total index calls per second elasticsearch-exporter   Merged Documents Rate MB / s meter_elasticsearch_node_indices_merges_total_size_bytes_total Total merge size per second elasticsearch-exporter   Documents Deleted Rate docs/s meter_elasticsearch_node_indices_docs_deleted Count of deleted documents per second on this node elasticsearch-exporter   Count Of Index Segments  meter_elasticsearch_node_segment_count Count of index segments on this node elasticsearch-exporter   Current Memory Size Of Segments MB meter_elasticsearch_node_segment_memory Current memory size of segments elasticsearch-exporter   Network bytes/sec meter_elasticsearch_node_network_send_bytesmeter_elasticsearch_node_network_receive_bytes Total number of bytes sent and receive elasticsearch-exporter   Disk Usage Percent % meter_elasticsearch_node_disk_usage_percent Used space on block device elasticsearch-exporter   Disk Usage GB meter_elasticsearch_node_disk_usage Used space size of block device elasticsearch-exporter   Disk Read KBs meter_elasticsearch_node_disk_io_read_bytes Total kilobytes read from disk elasticsearch-exporter   Disk Write KBs meter_elasticsearch_node_disk_io_write_bytes Total kilobytes write from disk elasticsearch-exporter    Elasticsearch Index Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Documents Primary  meter_elasticsearch_index_indices_docs_primary Count of documents with only primary shards on all nodes elasticsearch-exporter   Deleted Documents Primary  meter_elasticsearch_index_indices_deleted_docs_primary Count of deleted documents with only primary shards elasticsearch-exporter   Data Primary GB meter_elasticsearch_index_indices_store_size_bytes_primary Current total size of stored index data with only primary shards on all nodes elasticsearch-exporter   Data GB meter_elasticsearch_index_indices_store_size_bytes_total Current total size of stored index data with all shards on all nodes elasticsearch-exporter   Segments Primary  meter_elasticsearch_index_indices_segment_count_primary Current number of segments with only primary shards on all nodes elasticsearch-exporter   Segments Memory Primary MB meter_elasticsearch_index_indices_segment_memory_bytes_primary Current size of segments with only primary shards on all nodes elasticsearch-exporter   Segments  meter_elasticsearch_index_indices_segment_count_total Current number of segments with all shards on all nodes elasticsearch-exporter   Segments Memory MB meter_elasticsearch_index_indices_segment_memory_bytes_total Current size of segments with all shards on all nodes elasticsearch-exporter   Indexing Rate  meter_elasticsearch_index_stats_indexing_index_total_req_ratemeter_elasticsearch_index_stats_indexing_index_total_proc_rate Indexing rate on index elasticsearch-exporter   Searching Rate  meter_elasticsearch_index_stats_search_query_total_req_ratemeter_elasticsearch_index_stats_search_query_total_proc_rate Searching rate on index elasticsearch-exporter   All Operations ReqRate  meter_elasticsearch_index_stats_*_req_rate All Operations ReqRate on index elasticsearch-exporter   All Operations Runtime  meter_elasticsearch_index_stats_*_time_seconds_total All Operations Runtime/s on index elasticsearch-exporter   Avg. Search Time Execute / Request s meter_elasticsearch_index_search_fetch_avg_timemeter_elasticsearch_index_search_query_avg_timemeter_elasticsearch_index_search_scroll_avg_timemeter_elasticsearch_index_search_suggest_avg_time Search Operation Avg. time on index elasticsearch-exporter   Search Operations Rate req/s meter_elasticsearch_index_stats_search_query_total_req_ratemeter_elasticsearch_index_stats_search_fetch_total_req_ratemeter_elasticsearch_index_stats_search_scroll_total_req_ratemeter_elasticsearch_index_stats_search_suggest_total_req_rate Search Operations ReqRate on index elasticsearch-exporter   Shards Documents  meter_elasticsearch_index_indices_shards_docs Count of documents per shards on index elasticsearch-exporter   Documents (Primary Shards)  meter_elasticsearch_index_indices_docs_primary Count of documents with only primary shards on index elasticsearch-exporter   Documents Created Per Min (Primary Shards)  meter_elasticsearch_index_indices_docs_primary_rate Documents rate with only primary shards on index elasticsearch-exporter   Total Size Of Index (Primary Shards) MB meter_elasticsearch_index_indices_store_size_bytes_primary Current total size of stored index data in bytes with only primary shards on all nodes elasticsearch-exporter   Documents (All Shards)  meter_elasticsearch_index_indices_docs_total Count of documents with all shards on index elasticsearch-exporter   Documents Created Per Min (All Shards)  meter_elasticsearch_index_indices_docs_total_rate Documents rate with only all shards on index elasticsearch-exporter   Total Size Of Index (All Shards) MB meter_elasticsearch_index_indices_store_size_bytes_total Current total size of stored index data in bytes with all shards on all nodes elasticsearch-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/elasticsearch/elasticsearch-cluster.yaml, /config/otel-rules/elasticsearch/elasticsearch-node.yaml, /config/otel-rules/elasticsearch/elasticsearch-index.yaml. The Elasticsearch dashboard panel configurations are found in /config/ui-initialized-templates/elasticsearch.\n","excerpt":"Elasticsearch monitoring SkyWalking leverages elasticsearch-exporter for collecting metrics data …","ref":"/docs/main/latest/en/setup/backend/backend-elasticsearch-monitoring/","title":"Elasticsearch monitoring"},{"body":"Elasticsearch monitoring SkyWalking leverages elasticsearch-exporter for collecting metrics data from Elasticsearch. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  The elasticsearch-exporter collect metrics data from Elasticsearch. OpenTelemetry Collector fetches metrics from elasticsearch-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup elasticsearch-exporter. Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  Elasticsearch Monitoring Elasticsearch monitoring provides multidimensional metrics monitoring of Elasticsearch clusters as Layer: ELASTICSEARCH Service in the OAP. In each cluster, the nodes are represented as Instance and indices are Endpoints.\nElasticsearch Cluster Supported Metrics    Monitoring Panel Metric Name Description Data Source     Cluster Health meter_elasticsearch_cluster_health_status Whether all primary and replica shards are allocated elasticsearch-exporter   Tripped Of Breakers meter_elasticsearch_cluster_breakers_tripped Tripped for breaker elasticsearch-exporter   Nodes meter_elasticsearch_cluster_nodes Number of nodes in the cluster. elasticsearch-exporter   Data Nodes meter_elasticsearch_cluster_data_nodes Number of data nodes in the cluster elasticsearch-exporter   Pending Tasks meter_elasticsearch_cluster_pending_tasks_total Cluster level changes which have not yet been executed elasticsearch-exporter   CPU Usage Avg. (%) meter_elasticsearch_cluster_cpu_usage_avg Cluster level percent CPU used by process elasticsearch-exporter   JVM Memory Used Avg. (%) meter_elasticsearch_cluster_jvm_memory_used_avg Cluster level percent JVM memory used elasticsearch-exporter   Open Files meter_elasticsearch_cluster_open_file_count Open file descriptors elasticsearch-exporter   Active Primary Shards meter_elasticsearch_cluster_primary_shards_total The number of primary shards in your cluster. This is an aggregate total across all indices elasticsearch-exporter   Active Shards meter_elasticsearch_cluster_shards_total Aggregate total of all shards across all indices, which includes replica shards elasticsearch-exporter   Initializing Shards meter_elasticsearch_cluster_initializing_shards_total Count of shards that are being freshly created elasticsearch-exporter   Delayed Unassigned Shards meter_elasticsearch_cluster_delayed_unassigned_shards_total Shards delayed to reduce reallocation overhead elasticsearch-exporter   Relocating Shards meter_elasticsearch_cluster_relocating_shards_total The number of shards that are currently moving from one node to another node elasticsearch-exporter   Unassigned Shards meter_elasticsearch_cluster_unassigned_shards_total The number of shards that exist in the cluster state, but cannot be found in the cluster itself elasticsearch-exporter    Elasticsearch Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Node Rules  meter_elasticsearch_node_rules Node roles elasticsearch-exporter   JVM Memory Used MB meter_elasticsearch_node_jvm_memory_used Node level JVM memory used size elasticsearch-exporter   CPU Percent % meter_elasticsearch_node_process_cpu_percent Node level percent CPU used by process elasticsearch-exporter   Documents  meter_elasticsearch_node_indices_docs Count of index documents on this node elasticsearch-exporter   Segments  meter_elasticsearch_node_segment_count Count of index segments on this node elasticsearch-exporter   Disk Free Space GB meter_elasticsearch_node_all_disk_free_space Available space on all block device elasticsearch-exporter   Open Files  meter_elasticsearch_node_open_file_count Open file descriptors elasticsearch-exporter   Process CPU Usage Percent % meter_elasticsearch_node_process_cpu_percent Percent CPU used by process elasticsearch-exporter   OS CPU usage percent % meter_elasticsearch_node_os_cpu_percent Percent CPU used by the OS elasticsearch-exporter   Load Average  meter_elasticsearch_node_os_load1 meter_elasticsearch_node_os_load5meter_elasticsearch_node_os_load15 Shortterm, Midterm, Longterm load average elasticsearch-exporter   JVM Memory Usage MB meter_elasticsearch_node_jvm_memory_nonheap_used\nmeter_elasticsearch_node_jvm_memory_heap_usedmeter_elasticsearch_node_jvm_memory_heap_max JVM memory currently usage by area elasticsearch-exporter   JVM Pool Peak Used MB meter_elasticsearch_node_jvm_memory_pool_peak_used JVM memory currently used by pool elasticsearch-exporter   GC Count  meter_elasticsearch_node_jvm_gc_count Count of JVM GC runs elasticsearch-exporter   GC Time ms/min meter_elasticsearch_node_jvm_gc_time GC run time elasticsearch-exporter   All Operations ReqRate  meter_elasticsearch_node_indices_*_req_rate All Operations ReqRate on node elasticsearch-exporter   Indexing Rate reqps meter_elasticsearch_node_indices_indexing_index_total_req_rate\nmeter_elasticsearch_node_indices_indexing_index_total_proc_rate Indexing rate on node elasticsearch-exporter   Searching Rate reqps meter_elasticsearch_node_indices_search_fetch_total_req_rate\nmeter_elasticsearch_node_indices_search_query_time_seconds_proc_rate Searching rate on node elasticsearch-exporter   Total Translog Operations  meter_elasticsearch_node_indices_translog_operations Total translog operations elasticsearch-exporter   Total Translog Size MB meter_elasticsearch_node_indices_translog_size Total translog size elasticsearch-exporter   Tripped For Breakers  meter_elasticsearch_node_breakers_tripped Tripped for breaker elasticsearch-exporter   Estimated Size Of Breaker MB meter_elasticsearch_node_breakers_estimated_size Estimated size of breaker elasticsearch-exporter   Documents Count KB/s meter_elasticsearch_node_indices_docs Count of documents on this node elasticsearch-exporter   Merged Documents Count count/s meter_elasticsearch_node_indices_merges_docs_total Cumulative docs merged elasticsearch-exporter   Deleted Documents Count  meter_elasticsearch_node_indices_docs_deleted_total Count of deleted documents on this node elasticsearch-exporter   Documents Index Rate calls/s meter_elasticsearch_node_indices_indexing_index_total_req_rate Total index calls per second elasticsearch-exporter   Merged Documents Rate MB / s meter_elasticsearch_node_indices_merges_total_size_bytes_total Total merge size per second elasticsearch-exporter   Documents Deleted Rate docs/s meter_elasticsearch_node_indices_docs_deleted Count of deleted documents per second on this node elasticsearch-exporter   Count Of Index Segments  meter_elasticsearch_node_segment_count Count of index segments on this node elasticsearch-exporter   Current Memory Size Of Segments MB meter_elasticsearch_node_segment_memory Current memory size of segments elasticsearch-exporter   Network bytes/sec meter_elasticsearch_node_network_send_bytesmeter_elasticsearch_node_network_receive_bytes Total number of bytes sent and receive elasticsearch-exporter   Disk Usage Percent % meter_elasticsearch_node_disk_usage_percent Used space on block device elasticsearch-exporter   Disk Usage GB meter_elasticsearch_node_disk_usage Used space size of block device elasticsearch-exporter   Disk Read KBs meter_elasticsearch_node_disk_io_read_bytes Total kilobytes read from disk elasticsearch-exporter   Disk Write KBs meter_elasticsearch_node_disk_io_write_bytes Total kilobytes write from disk elasticsearch-exporter    Elasticsearch Index Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Documents Primary  meter_elasticsearch_index_indices_docs_primary Count of documents with only primary shards on all nodes elasticsearch-exporter   Deleted Documents Primary  meter_elasticsearch_index_indices_deleted_docs_primary Count of deleted documents with only primary shards elasticsearch-exporter   Data Primary GB meter_elasticsearch_index_indices_store_size_bytes_primary Current total size of stored index data with only primary shards on all nodes elasticsearch-exporter   Data GB meter_elasticsearch_index_indices_store_size_bytes_total Current total size of stored index data with all shards on all nodes elasticsearch-exporter   Segments Primary  meter_elasticsearch_index_indices_segment_count_primary Current number of segments with only primary shards on all nodes elasticsearch-exporter   Segments Memory Primary MB meter_elasticsearch_index_indices_segment_memory_bytes_primary Current size of segments with only primary shards on all nodes elasticsearch-exporter   Segments  meter_elasticsearch_index_indices_segment_count_total Current number of segments with all shards on all nodes elasticsearch-exporter   Segments Memory MB meter_elasticsearch_index_indices_segment_memory_bytes_total Current size of segments with all shards on all nodes elasticsearch-exporter   Indexing Rate  meter_elasticsearch_index_stats_indexing_index_total_req_ratemeter_elasticsearch_index_stats_indexing_index_total_proc_rate Indexing rate on index elasticsearch-exporter   Searching Rate  meter_elasticsearch_index_stats_search_query_total_req_ratemeter_elasticsearch_index_stats_search_query_total_proc_rate Searching rate on index elasticsearch-exporter   All Operations ReqRate  meter_elasticsearch_index_stats_*_req_rate All Operations ReqRate on index elasticsearch-exporter   All Operations Runtime  meter_elasticsearch_index_stats_*_time_seconds_total All Operations Runtime/s on index elasticsearch-exporter   Avg. Search Time Execute / Request s meter_elasticsearch_index_search_fetch_avg_timemeter_elasticsearch_index_search_query_avg_timemeter_elasticsearch_index_search_scroll_avg_timemeter_elasticsearch_index_search_suggest_avg_time Search Operation Avg. time on index elasticsearch-exporter   Search Operations Rate req/s meter_elasticsearch_index_stats_search_query_total_req_ratemeter_elasticsearch_index_stats_search_fetch_total_req_ratemeter_elasticsearch_index_stats_search_scroll_total_req_ratemeter_elasticsearch_index_stats_search_suggest_total_req_rate Search Operations ReqRate on index elasticsearch-exporter   Shards Documents  meter_elasticsearch_index_indices_shards_docs Count of documents per shards on index elasticsearch-exporter   Documents (Primary Shards)  meter_elasticsearch_index_indices_docs_primary Count of documents with only primary shards on index elasticsearch-exporter   Documents Created Per Min (Primary Shards)  meter_elasticsearch_index_indices_docs_primary_rate Documents rate with only primary shards on index elasticsearch-exporter   Total Size Of Index (Primary Shards) MB meter_elasticsearch_index_indices_store_size_bytes_primary Current total size of stored index data in bytes with only primary shards on all nodes elasticsearch-exporter   Documents (All Shards)  meter_elasticsearch_index_indices_docs_total Count of documents with all shards on index elasticsearch-exporter   Documents Created Per Min (All Shards)  meter_elasticsearch_index_indices_docs_total_rate Documents rate with only all shards on index elasticsearch-exporter   Total Size Of Index (All Shards) MB meter_elasticsearch_index_indices_store_size_bytes_total Current total size of stored index data in bytes with all shards on all nodes elasticsearch-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/elasticsearch/elasticsearch-cluster.yaml, /config/otel-rules/elasticsearch/elasticsearch-node.yaml, /config/otel-rules/elasticsearch/elasticsearch-index.yaml. The Elasticsearch dashboard panel configurations are found in /config/ui-initialized-templates/elasticsearch.\n","excerpt":"Elasticsearch monitoring SkyWalking leverages elasticsearch-exporter for collecting metrics data …","ref":"/docs/main/next/en/setup/backend/backend-elasticsearch-monitoring/","title":"Elasticsearch monitoring"},{"body":"Elasticsearch monitoring SkyWalking leverages elasticsearch-exporter for collecting metrics data from Elasticsearch. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  The elasticsearch-exporter collect metrics data from Elasticsearch. OpenTelemetry Collector fetches metrics from elasticsearch-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup elasticsearch-exporter. Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  Elasticsearch Monitoring Elasticsearch monitoring provides multidimensional metrics monitoring of Elasticsearch clusters as Layer: ELASTICSEARCH Service in the OAP. In each cluster, the nodes are represented as Instance and indices are Endpoints.\nElasticsearch Cluster Supported Metrics    Monitoring Panel Metric Name Description Data Source     Cluster Health meter_elasticsearch_cluster_health_status Whether all primary and replica shards are allocated elasticsearch-exporter   Tripped Of Breakers meter_elasticsearch_cluster_breakers_tripped Tripped for breaker elasticsearch-exporter   Nodes meter_elasticsearch_cluster_nodes Number of nodes in the cluster. elasticsearch-exporter   Data Nodes meter_elasticsearch_cluster_data_nodes Number of data nodes in the cluster elasticsearch-exporter   Pending Tasks meter_elasticsearch_cluster_pending_tasks_total Cluster level changes which have not yet been executed elasticsearch-exporter   CPU Usage Avg. (%) meter_elasticsearch_cluster_cpu_usage_avg Cluster level percent CPU used by process elasticsearch-exporter   JVM Memory Used Avg. (%) meter_elasticsearch_cluster_jvm_memory_used_avg Cluster level percent JVM memory used elasticsearch-exporter   Open Files meter_elasticsearch_cluster_open_file_count Open file descriptors elasticsearch-exporter   Active Primary Shards meter_elasticsearch_cluster_primary_shards_total The number of primary shards in your cluster. This is an aggregate total across all indices elasticsearch-exporter   Active Shards meter_elasticsearch_cluster_shards_total Aggregate total of all shards across all indices, which includes replica shards elasticsearch-exporter   Initializing Shards meter_elasticsearch_cluster_initializing_shards_total Count of shards that are being freshly created elasticsearch-exporter   Delayed Unassigned Shards meter_elasticsearch_cluster_delayed_unassigned_shards_total Shards delayed to reduce reallocation overhead elasticsearch-exporter   Relocating Shards meter_elasticsearch_cluster_relocating_shards_total The number of shards that are currently moving from one node to another node elasticsearch-exporter   Unassigned Shards meter_elasticsearch_cluster_unassigned_shards_total The number of shards that exist in the cluster state, but cannot be found in the cluster itself elasticsearch-exporter    Elasticsearch Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Node Rules  meter_elasticsearch_node_rules Node roles elasticsearch-exporter   JVM Memory Used MB meter_elasticsearch_node_jvm_memory_used Node level JVM memory used size elasticsearch-exporter   CPU Percent % meter_elasticsearch_node_process_cpu_percent Node level percent CPU used by process elasticsearch-exporter   Documents  meter_elasticsearch_node_indices_docs Count of index documents on this node elasticsearch-exporter   Segments  meter_elasticsearch_node_segment_count Count of index segments on this node elasticsearch-exporter   Disk Free Space GB meter_elasticsearch_node_all_disk_free_space Available space on all block device elasticsearch-exporter   Open Files  meter_elasticsearch_node_open_file_count Open file descriptors elasticsearch-exporter   Process CPU Usage Percent % meter_elasticsearch_node_process_cpu_percent Percent CPU used by process elasticsearch-exporter   OS CPU usage percent % meter_elasticsearch_node_os_cpu_percent Percent CPU used by the OS elasticsearch-exporter   Load Average  meter_elasticsearch_node_os_load1 meter_elasticsearch_node_os_load5meter_elasticsearch_node_os_load15 Shortterm, Midterm, Longterm load average elasticsearch-exporter   JVM Memory Usage MB meter_elasticsearch_node_jvm_memory_nonheap_used\nmeter_elasticsearch_node_jvm_memory_heap_usedmeter_elasticsearch_node_jvm_memory_heap_max JVM memory currently usage by area elasticsearch-exporter   JVM Pool Peak Used MB meter_elasticsearch_node_jvm_memory_pool_peak_used JVM memory currently used by pool elasticsearch-exporter   GC Count  meter_elasticsearch_node_jvm_gc_count Count of JVM GC runs elasticsearch-exporter   GC Time ms/min meter_elasticsearch_node_jvm_gc_time GC run time elasticsearch-exporter   All Operations ReqRate  meter_elasticsearch_node_indices_*_req_rate All Operations ReqRate on node elasticsearch-exporter   Indexing Rate reqps meter_elasticsearch_node_indices_indexing_index_total_req_rate\nmeter_elasticsearch_node_indices_indexing_index_total_proc_rate Indexing rate on node elasticsearch-exporter   Searching Rate reqps meter_elasticsearch_node_indices_search_fetch_total_req_rate\nmeter_elasticsearch_node_indices_search_query_time_seconds_proc_rate Searching rate on node elasticsearch-exporter   Total Translog Operations  meter_elasticsearch_node_indices_translog_operations Total translog operations elasticsearch-exporter   Total Translog Size MB meter_elasticsearch_node_indices_translog_size Total translog size elasticsearch-exporter   Tripped For Breakers  meter_elasticsearch_node_breakers_tripped Tripped for breaker elasticsearch-exporter   Estimated Size Of Breaker MB meter_elasticsearch_node_breakers_estimated_size Estimated size of breaker elasticsearch-exporter   Documents Count KB/s meter_elasticsearch_node_indices_docs Count of documents on this node elasticsearch-exporter   Merged Documents Count count/s meter_elasticsearch_node_indices_merges_docs_total Cumulative docs merged elasticsearch-exporter   Deleted Documents Count  meter_elasticsearch_node_indices_docs_deleted_total Count of deleted documents on this node elasticsearch-exporter   Documents Index Rate calls/s meter_elasticsearch_node_indices_indexing_index_total_req_rate Total index calls per second elasticsearch-exporter   Merged Documents Rate MB / s meter_elasticsearch_node_indices_merges_total_size_bytes_total Total merge size per second elasticsearch-exporter   Documents Deleted Rate docs/s meter_elasticsearch_node_indices_docs_deleted Count of deleted documents per second on this node elasticsearch-exporter   Count Of Index Segments  meter_elasticsearch_node_segment_count Count of index segments on this node elasticsearch-exporter   Current Memory Size Of Segments MB meter_elasticsearch_node_segment_memory Current memory size of segments elasticsearch-exporter   Network bytes/sec meter_elasticsearch_node_network_send_bytesmeter_elasticsearch_node_network_receive_bytes Total number of bytes sent and receive elasticsearch-exporter   Disk Usage Percent % meter_elasticsearch_node_disk_usage_percent Used space on block device elasticsearch-exporter   Disk Usage GB meter_elasticsearch_node_disk_usage Used space size of block device elasticsearch-exporter   Disk Read KBs meter_elasticsearch_node_disk_io_read_bytes Total kilobytes read from disk elasticsearch-exporter   Disk Write KBs meter_elasticsearch_node_disk_io_write_bytes Total kilobytes write from disk elasticsearch-exporter    Elasticsearch Index Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Documents Primary  meter_elasticsearch_index_indices_docs_primary Count of documents with only primary shards on all nodes elasticsearch-exporter   Deleted Documents Primary  meter_elasticsearch_index_indices_deleted_docs_primary Count of deleted documents with only primary shards elasticsearch-exporter   Data Primary GB meter_elasticsearch_index_indices_store_size_bytes_primary Current total size of stored index data with only primary shards on all nodes elasticsearch-exporter   Data GB meter_elasticsearch_index_indices_store_size_bytes_total Current total size of stored index data with all shards on all nodes elasticsearch-exporter   Segments Primary  meter_elasticsearch_index_indices_segment_count_primary Current number of segments with only primary shards on all nodes elasticsearch-exporter   Segments Memory Primary MB meter_elasticsearch_index_indices_segment_memory_bytes_primary Current size of segments with only primary shards on all nodes elasticsearch-exporter   Segments  meter_elasticsearch_index_indices_segment_count_total Current number of segments with all shards on all nodes elasticsearch-exporter   Segments Memory MB meter_elasticsearch_index_indices_segment_memory_bytes_total Current size of segments with all shards on all nodes elasticsearch-exporter   Indexing Rate  meter_elasticsearch_index_stats_indexing_index_total_req_ratemeter_elasticsearch_index_stats_indexing_index_total_proc_rate Indexing rate on index elasticsearch-exporter   Searching Rate  meter_elasticsearch_index_stats_search_query_total_req_ratemeter_elasticsearch_index_stats_search_query_total_proc_rate Searching rate on index elasticsearch-exporter   All Operations ReqRate  meter_elasticsearch_index_stats_*_req_rate All Operations ReqRate on index elasticsearch-exporter   All Operations Runtime  meter_elasticsearch_index_stats_*_time_seconds_total All Operations Runtime/s on index elasticsearch-exporter   Avg. Search Time Execute / Request s meter_elasticsearch_index_search_fetch_avg_timemeter_elasticsearch_index_search_query_avg_timemeter_elasticsearch_index_search_scroll_avg_timemeter_elasticsearch_index_search_suggest_avg_time Search Operation Avg. time on index elasticsearch-exporter   Search Operations Rate req/s meter_elasticsearch_index_stats_search_query_total_req_ratemeter_elasticsearch_index_stats_search_fetch_total_req_ratemeter_elasticsearch_index_stats_search_scroll_total_req_ratemeter_elasticsearch_index_stats_search_suggest_total_req_rate Search Operations ReqRate on index elasticsearch-exporter   Shards Documents  meter_elasticsearch_index_indices_shards_docs Count of documents per shards on index elasticsearch-exporter   Documents (Primary Shards)  meter_elasticsearch_index_indices_docs_primary Count of documents with only primary shards on index elasticsearch-exporter   Documents Created Per Min (Primary Shards)  meter_elasticsearch_index_indices_docs_primary_rate Documents rate with only primary shards on index elasticsearch-exporter   Total Size Of Index (Primary Shards) MB meter_elasticsearch_index_indices_store_size_bytes_primary Current total size of stored index data in bytes with only primary shards on all nodes elasticsearch-exporter   Documents (All Shards)  meter_elasticsearch_index_indices_docs_total Count of documents with all shards on index elasticsearch-exporter   Documents Created Per Min (All Shards)  meter_elasticsearch_index_indices_docs_total_rate Documents rate with only all shards on index elasticsearch-exporter   Total Size Of Index (All Shards) MB meter_elasticsearch_index_indices_store_size_bytes_total Current total size of stored index data in bytes with all shards on all nodes elasticsearch-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/elasticsearch/elasticsearch-cluster.yaml, /config/otel-rules/elasticsearch/elasticsearch-node.yaml, /config/otel-rules/elasticsearch/elasticsearch-index.yaml. The Elasticsearch dashboard panel configurations are found in /config/ui-initialized-templates/elasticsearch.\n","excerpt":"Elasticsearch monitoring SkyWalking leverages elasticsearch-exporter for collecting metrics data …","ref":"/docs/main/v9.5.0/en/setup/backend/backend-elasticsearch-monitoring/","title":"Elasticsearch monitoring"},{"body":"Elasticsearch monitoring SkyWalking leverages elasticsearch-exporter for collecting metrics data from Elasticsearch. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  The elasticsearch-exporter collect metrics data from Elasticsearch. OpenTelemetry Collector fetches metrics from elasticsearch-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup elasticsearch-exporter. Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  Elasticsearch Monitoring Elasticsearch monitoring provides multidimensional metrics monitoring of Elasticsearch clusters as Layer: ELASTICSEARCH Service in the OAP. In each cluster, the nodes are represented as Instance and indices are Endpoints.\nElasticsearch Cluster Supported Metrics    Monitoring Panel Metric Name Description Data Source     Cluster Health meter_elasticsearch_cluster_health_status Whether all primary and replica shards are allocated elasticsearch-exporter   Tripped Of Breakers meter_elasticsearch_cluster_breakers_tripped Tripped for breaker elasticsearch-exporter   Nodes meter_elasticsearch_cluster_nodes Number of nodes in the cluster. elasticsearch-exporter   Data Nodes meter_elasticsearch_cluster_data_nodes Number of data nodes in the cluster elasticsearch-exporter   Pending Tasks meter_elasticsearch_cluster_pending_tasks_total Cluster level changes which have not yet been executed elasticsearch-exporter   CPU Usage Avg. (%) meter_elasticsearch_cluster_cpu_usage_avg Cluster level percent CPU used by process elasticsearch-exporter   JVM Memory Used Avg. (%) meter_elasticsearch_cluster_jvm_memory_used_avg Cluster level percent JVM memory used elasticsearch-exporter   Open Files meter_elasticsearch_cluster_open_file_count Open file descriptors elasticsearch-exporter   Active Primary Shards meter_elasticsearch_cluster_primary_shards_total The number of primary shards in your cluster. This is an aggregate total across all indices elasticsearch-exporter   Active Shards meter_elasticsearch_cluster_shards_total Aggregate total of all shards across all indices, which includes replica shards elasticsearch-exporter   Initializing Shards meter_elasticsearch_cluster_initializing_shards_total Count of shards that are being freshly created elasticsearch-exporter   Delayed Unassigned Shards meter_elasticsearch_cluster_delayed_unassigned_shards_total Shards delayed to reduce reallocation overhead elasticsearch-exporter   Relocating Shards meter_elasticsearch_cluster_relocating_shards_total The number of shards that are currently moving from one node to another node elasticsearch-exporter   Unassigned Shards meter_elasticsearch_cluster_unassigned_shards_total The number of shards that exist in the cluster state, but cannot be found in the cluster itself elasticsearch-exporter    Elasticsearch Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Node Rules  meter_elasticsearch_node_rules Node roles elasticsearch-exporter   JVM Memory Used MB meter_elasticsearch_node_jvm_memory_used Node level JVM memory used size elasticsearch-exporter   CPU Percent % meter_elasticsearch_node_process_cpu_percent Node level percent CPU used by process elasticsearch-exporter   Documents  meter_elasticsearch_node_indices_docs Count of index documents on this node elasticsearch-exporter   Segments  meter_elasticsearch_node_segment_count Count of index segments on this node elasticsearch-exporter   Disk Free Space GB meter_elasticsearch_node_all_disk_free_space Available space on all block device elasticsearch-exporter   Open Files  meter_elasticsearch_node_open_file_count Open file descriptors elasticsearch-exporter   Process CPU Usage Percent % meter_elasticsearch_node_process_cpu_percent Percent CPU used by process elasticsearch-exporter   OS CPU usage percent % meter_elasticsearch_node_os_cpu_percent Percent CPU used by the OS elasticsearch-exporter   Load Average  meter_elasticsearch_node_os_load1 meter_elasticsearch_node_os_load5meter_elasticsearch_node_os_load15 Shortterm, Midterm, Longterm load average elasticsearch-exporter   JVM Memory Usage MB meter_elasticsearch_node_jvm_memory_nonheap_used\nmeter_elasticsearch_node_jvm_memory_heap_usedmeter_elasticsearch_node_jvm_memory_heap_max JVM memory currently usage by area elasticsearch-exporter   JVM Pool Peak Used MB meter_elasticsearch_node_jvm_memory_pool_peak_used JVM memory currently used by pool elasticsearch-exporter   GC Count  meter_elasticsearch_node_jvm_gc_count Count of JVM GC runs elasticsearch-exporter   GC Time ms/min meter_elasticsearch_node_jvm_gc_time GC run time elasticsearch-exporter   All Operations ReqRate  meter_elasticsearch_node_indices_*_req_rate All Operations ReqRate on node elasticsearch-exporter   Indexing Rate reqps meter_elasticsearch_node_indices_indexing_index_total_req_rate\nmeter_elasticsearch_node_indices_indexing_index_total_proc_rate Indexing rate on node elasticsearch-exporter   Searching Rate reqps meter_elasticsearch_node_indices_search_fetch_total_req_rate\nmeter_elasticsearch_node_indices_search_query_time_seconds_proc_rate Searching rate on node elasticsearch-exporter   Total Translog Operations  meter_elasticsearch_node_indices_translog_operations Total translog operations elasticsearch-exporter   Total Translog Size MB meter_elasticsearch_node_indices_translog_size Total translog size elasticsearch-exporter   Tripped For Breakers  meter_elasticsearch_node_breakers_tripped Tripped for breaker elasticsearch-exporter   Estimated Size Of Breaker MB meter_elasticsearch_node_breakers_estimated_size Estimated size of breaker elasticsearch-exporter   Documents Count KB/s meter_elasticsearch_node_indices_docs Count of documents on this node elasticsearch-exporter   Merged Documents Count count/s meter_elasticsearch_node_indices_merges_docs_total Cumulative docs merged elasticsearch-exporter   Deleted Documents Count  meter_elasticsearch_node_indices_docs_deleted_total Count of deleted documents on this node elasticsearch-exporter   Documents Index Rate calls/s meter_elasticsearch_node_indices_indexing_index_total_req_rate Total index calls per second elasticsearch-exporter   Merged Documents Rate MB / s meter_elasticsearch_node_indices_merges_total_size_bytes_total Total merge size per second elasticsearch-exporter   Documents Deleted Rate docs/s meter_elasticsearch_node_indices_docs_deleted Count of deleted documents per second on this node elasticsearch-exporter   Count Of Index Segments  meter_elasticsearch_node_segment_count Count of index segments on this node elasticsearch-exporter   Current Memory Size Of Segments MB meter_elasticsearch_node_segment_memory Current memory size of segments elasticsearch-exporter   Network bytes/sec meter_elasticsearch_node_network_send_bytesmeter_elasticsearch_node_network_receive_bytes Total number of bytes sent and receive elasticsearch-exporter   Disk Usage Percent % meter_elasticsearch_node_disk_usage_percent Used space on block device elasticsearch-exporter   Disk Usage GB meter_elasticsearch_node_disk_usage Used space size of block device elasticsearch-exporter   Disk Read KBs meter_elasticsearch_node_disk_io_read_bytes Total kilobytes read from disk elasticsearch-exporter   Disk Write KBs meter_elasticsearch_node_disk_io_write_bytes Total kilobytes write from disk elasticsearch-exporter    Elasticsearch Index Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Documents Primary  meter_elasticsearch_index_indices_docs_primary Count of documents with only primary shards on all nodes elasticsearch-exporter   Deleted Documents Primary  meter_elasticsearch_index_indices_deleted_docs_primary Count of deleted documents with only primary shards elasticsearch-exporter   Data Primary GB meter_elasticsearch_index_indices_store_size_bytes_primary Current total size of stored index data with only primary shards on all nodes elasticsearch-exporter   Data GB meter_elasticsearch_index_indices_store_size_bytes_total Current total size of stored index data with all shards on all nodes elasticsearch-exporter   Segments Primary  meter_elasticsearch_index_indices_segment_count_primary Current number of segments with only primary shards on all nodes elasticsearch-exporter   Segments Memory Primary MB meter_elasticsearch_index_indices_segment_memory_bytes_primary Current size of segments with only primary shards on all nodes elasticsearch-exporter   Segments  meter_elasticsearch_index_indices_segment_count_total Current number of segments with all shards on all nodes elasticsearch-exporter   Segments Memory MB meter_elasticsearch_index_indices_segment_memory_bytes_total Current size of segments with all shards on all nodes elasticsearch-exporter   Indexing Rate  meter_elasticsearch_index_stats_indexing_index_total_req_ratemeter_elasticsearch_index_stats_indexing_index_total_proc_rate Indexing rate on index elasticsearch-exporter   Searching Rate  meter_elasticsearch_index_stats_search_query_total_req_ratemeter_elasticsearch_index_stats_search_query_total_proc_rate Searching rate on index elasticsearch-exporter   All Operations ReqRate  meter_elasticsearch_index_stats_*_req_rate All Operations ReqRate on index elasticsearch-exporter   All Operations Runtime  meter_elasticsearch_index_stats_*_time_seconds_total All Operations Runtime/s on index elasticsearch-exporter   Avg. Search Time Execute / Request s meter_elasticsearch_index_search_fetch_avg_timemeter_elasticsearch_index_search_query_avg_timemeter_elasticsearch_index_search_scroll_avg_timemeter_elasticsearch_index_search_suggest_avg_time Search Operation Avg. time on index elasticsearch-exporter   Search Operations Rate req/s meter_elasticsearch_index_stats_search_query_total_req_ratemeter_elasticsearch_index_stats_search_fetch_total_req_ratemeter_elasticsearch_index_stats_search_scroll_total_req_ratemeter_elasticsearch_index_stats_search_suggest_total_req_rate Search Operations ReqRate on index elasticsearch-exporter   Shards Documents  meter_elasticsearch_index_indices_shards_docs Count of documents per shards on index elasticsearch-exporter   Documents (Primary Shards)  meter_elasticsearch_index_indices_docs_primary Count of documents with only primary shards on index elasticsearch-exporter   Documents Created Per Min (Primary Shards)  meter_elasticsearch_index_indices_docs_primary_rate Documents rate with only primary shards on index elasticsearch-exporter   Total Size Of Index (Primary Shards) MB meter_elasticsearch_index_indices_store_size_bytes_primary Current total size of stored index data in bytes with only primary shards on all nodes elasticsearch-exporter   Documents (All Shards)  meter_elasticsearch_index_indices_docs_total Count of documents with all shards on index elasticsearch-exporter   Documents Created Per Min (All Shards)  meter_elasticsearch_index_indices_docs_total_rate Documents rate with only all shards on index elasticsearch-exporter   Total Size Of Index (All Shards) MB meter_elasticsearch_index_indices_store_size_bytes_total Current total size of stored index data in bytes with all shards on all nodes elasticsearch-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/elasticsearch/elasticsearch-cluster.yaml, /config/otel-rules/elasticsearch/elasticsearch-node.yaml, /config/otel-rules/elasticsearch/elasticsearch-index.yaml. The Elasticsearch dashboard panel configurations are found in /config/ui-initialized-templates/elasticsearch.\n","excerpt":"Elasticsearch monitoring SkyWalking leverages elasticsearch-exporter for collecting metrics data …","ref":"/docs/main/v9.6.0/en/setup/backend/backend-elasticsearch-monitoring/","title":"Elasticsearch monitoring"},{"body":"Elasticsearch monitoring SkyWalking leverages elasticsearch-exporter for collecting metrics data from Elasticsearch. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  The elasticsearch-exporter collect metrics data from Elasticsearch. OpenTelemetry Collector fetches metrics from elasticsearch-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup elasticsearch-exporter. Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  Elasticsearch Monitoring Elasticsearch monitoring provides multidimensional metrics monitoring of Elasticsearch clusters as Layer: ELASTICSEARCH Service in the OAP. In each cluster, the nodes are represented as Instance and indices are Endpoints.\nElasticsearch Cluster Supported Metrics    Monitoring Panel Metric Name Description Data Source     Cluster Health meter_elasticsearch_cluster_health_status Whether all primary and replica shards are allocated elasticsearch-exporter   Tripped Of Breakers meter_elasticsearch_cluster_breakers_tripped Tripped for breaker elasticsearch-exporter   Nodes meter_elasticsearch_cluster_nodes Number of nodes in the cluster. elasticsearch-exporter   Data Nodes meter_elasticsearch_cluster_data_nodes Number of data nodes in the cluster elasticsearch-exporter   Pending Tasks meter_elasticsearch_cluster_pending_tasks_total Cluster level changes which have not yet been executed elasticsearch-exporter   CPU Usage Avg. (%) meter_elasticsearch_cluster_cpu_usage_avg Cluster level percent CPU used by process elasticsearch-exporter   JVM Memory Used Avg. (%) meter_elasticsearch_cluster_jvm_memory_used_avg Cluster level percent JVM memory used elasticsearch-exporter   Open Files meter_elasticsearch_cluster_open_file_count Open file descriptors elasticsearch-exporter   Active Primary Shards meter_elasticsearch_cluster_primary_shards_total The number of primary shards in your cluster. This is an aggregate total across all indices elasticsearch-exporter   Active Shards meter_elasticsearch_cluster_shards_total Aggregate total of all shards across all indices, which includes replica shards elasticsearch-exporter   Initializing Shards meter_elasticsearch_cluster_initializing_shards_total Count of shards that are being freshly created elasticsearch-exporter   Delayed Unassigned Shards meter_elasticsearch_cluster_delayed_unassigned_shards_total Shards delayed to reduce reallocation overhead elasticsearch-exporter   Relocating Shards meter_elasticsearch_cluster_relocating_shards_total The number of shards that are currently moving from one node to another node elasticsearch-exporter   Unassigned Shards meter_elasticsearch_cluster_unassigned_shards_total The number of shards that exist in the cluster state, but cannot be found in the cluster itself elasticsearch-exporter    Elasticsearch Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Node Rules  meter_elasticsearch_node_rules Node roles elasticsearch-exporter   JVM Memory Used MB meter_elasticsearch_node_jvm_memory_used Node level JVM memory used size elasticsearch-exporter   CPU Percent % meter_elasticsearch_node_process_cpu_percent Node level percent CPU used by process elasticsearch-exporter   Documents  meter_elasticsearch_node_indices_docs Count of index documents on this node elasticsearch-exporter   Segments  meter_elasticsearch_node_segment_count Count of index segments on this node elasticsearch-exporter   Disk Free Space GB meter_elasticsearch_node_all_disk_free_space Available space on all block device elasticsearch-exporter   Open Files  meter_elasticsearch_node_open_file_count Open file descriptors elasticsearch-exporter   Process CPU Usage Percent % meter_elasticsearch_node_process_cpu_percent Percent CPU used by process elasticsearch-exporter   OS CPU usage percent % meter_elasticsearch_node_os_cpu_percent Percent CPU used by the OS elasticsearch-exporter   Load Average  meter_elasticsearch_node_os_load1 meter_elasticsearch_node_os_load5meter_elasticsearch_node_os_load15 Shortterm, Midterm, Longterm load average elasticsearch-exporter   JVM Memory Usage MB meter_elasticsearch_node_jvm_memory_nonheap_used\nmeter_elasticsearch_node_jvm_memory_heap_usedmeter_elasticsearch_node_jvm_memory_heap_max JVM memory currently usage by area elasticsearch-exporter   JVM Pool Peak Used MB meter_elasticsearch_node_jvm_memory_pool_peak_used JVM memory currently used by pool elasticsearch-exporter   GC Count  meter_elasticsearch_node_jvm_gc_count Count of JVM GC runs elasticsearch-exporter   GC Time ms/min meter_elasticsearch_node_jvm_gc_time GC run time elasticsearch-exporter   All Operations ReqRate  meter_elasticsearch_node_indices_*_req_rate All Operations ReqRate on node elasticsearch-exporter   Indexing Rate reqps meter_elasticsearch_node_indices_indexing_index_total_req_rate\nmeter_elasticsearch_node_indices_indexing_index_total_proc_rate Indexing rate on node elasticsearch-exporter   Searching Rate reqps meter_elasticsearch_node_indices_search_fetch_total_req_rate\nmeter_elasticsearch_node_indices_search_query_time_seconds_proc_rate Searching rate on node elasticsearch-exporter   Total Translog Operations  meter_elasticsearch_node_indices_translog_operations Total translog operations elasticsearch-exporter   Total Translog Size MB meter_elasticsearch_node_indices_translog_size Total translog size elasticsearch-exporter   Tripped For Breakers  meter_elasticsearch_node_breakers_tripped Tripped for breaker elasticsearch-exporter   Estimated Size Of Breaker MB meter_elasticsearch_node_breakers_estimated_size Estimated size of breaker elasticsearch-exporter   Documents Count KB/s meter_elasticsearch_node_indices_docs Count of documents on this node elasticsearch-exporter   Merged Documents Count count/s meter_elasticsearch_node_indices_merges_docs_total Cumulative docs merged elasticsearch-exporter   Deleted Documents Count  meter_elasticsearch_node_indices_docs_deleted_total Count of deleted documents on this node elasticsearch-exporter   Documents Index Rate calls/s meter_elasticsearch_node_indices_indexing_index_total_req_rate Total index calls per second elasticsearch-exporter   Merged Documents Rate MB / s meter_elasticsearch_node_indices_merges_total_size_bytes_total Total merge size per second elasticsearch-exporter   Documents Deleted Rate docs/s meter_elasticsearch_node_indices_docs_deleted Count of deleted documents per second on this node elasticsearch-exporter   Count Of Index Segments  meter_elasticsearch_node_segment_count Count of index segments on this node elasticsearch-exporter   Current Memory Size Of Segments MB meter_elasticsearch_node_segment_memory Current memory size of segments elasticsearch-exporter   Network bytes/sec meter_elasticsearch_node_network_send_bytesmeter_elasticsearch_node_network_receive_bytes Total number of bytes sent and receive elasticsearch-exporter   Disk Usage Percent % meter_elasticsearch_node_disk_usage_percent Used space on block device elasticsearch-exporter   Disk Usage GB meter_elasticsearch_node_disk_usage Used space size of block device elasticsearch-exporter   Disk Read KBs meter_elasticsearch_node_disk_io_read_bytes Total kilobytes read from disk elasticsearch-exporter   Disk Write KBs meter_elasticsearch_node_disk_io_write_bytes Total kilobytes write from disk elasticsearch-exporter    Elasticsearch Index Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Documents Primary  meter_elasticsearch_index_indices_docs_primary Count of documents with only primary shards on all nodes elasticsearch-exporter   Deleted Documents Primary  meter_elasticsearch_index_indices_deleted_docs_primary Count of deleted documents with only primary shards elasticsearch-exporter   Data Primary GB meter_elasticsearch_index_indices_store_size_bytes_primary Current total size of stored index data with only primary shards on all nodes elasticsearch-exporter   Data GB meter_elasticsearch_index_indices_store_size_bytes_total Current total size of stored index data with all shards on all nodes elasticsearch-exporter   Segments Primary  meter_elasticsearch_index_indices_segment_count_primary Current number of segments with only primary shards on all nodes elasticsearch-exporter   Segments Memory Primary MB meter_elasticsearch_index_indices_segment_memory_bytes_primary Current size of segments with only primary shards on all nodes elasticsearch-exporter   Segments  meter_elasticsearch_index_indices_segment_count_total Current number of segments with all shards on all nodes elasticsearch-exporter   Segments Memory MB meter_elasticsearch_index_indices_segment_memory_bytes_total Current size of segments with all shards on all nodes elasticsearch-exporter   Indexing Rate  meter_elasticsearch_index_stats_indexing_index_total_req_ratemeter_elasticsearch_index_stats_indexing_index_total_proc_rate Indexing rate on index elasticsearch-exporter   Searching Rate  meter_elasticsearch_index_stats_search_query_total_req_ratemeter_elasticsearch_index_stats_search_query_total_proc_rate Searching rate on index elasticsearch-exporter   All Operations ReqRate  meter_elasticsearch_index_stats_*_req_rate All Operations ReqRate on index elasticsearch-exporter   All Operations Runtime  meter_elasticsearch_index_stats_*_time_seconds_total All Operations Runtime/s on index elasticsearch-exporter   Avg. Search Time Execute / Request s meter_elasticsearch_index_search_fetch_avg_timemeter_elasticsearch_index_search_query_avg_timemeter_elasticsearch_index_search_scroll_avg_timemeter_elasticsearch_index_search_suggest_avg_time Search Operation Avg. time on index elasticsearch-exporter   Search Operations Rate req/s meter_elasticsearch_index_stats_search_query_total_req_ratemeter_elasticsearch_index_stats_search_fetch_total_req_ratemeter_elasticsearch_index_stats_search_scroll_total_req_ratemeter_elasticsearch_index_stats_search_suggest_total_req_rate Search Operations ReqRate on index elasticsearch-exporter   Shards Documents  meter_elasticsearch_index_indices_shards_docs Count of documents per shards on index elasticsearch-exporter   Documents (Primary Shards)  meter_elasticsearch_index_indices_docs_primary Count of documents with only primary shards on index elasticsearch-exporter   Documents Created Per Min (Primary Shards)  meter_elasticsearch_index_indices_docs_primary_rate Documents rate with only primary shards on index elasticsearch-exporter   Total Size Of Index (Primary Shards) MB meter_elasticsearch_index_indices_store_size_bytes_primary Current total size of stored index data in bytes with only primary shards on all nodes elasticsearch-exporter   Documents (All Shards)  meter_elasticsearch_index_indices_docs_total Count of documents with all shards on index elasticsearch-exporter   Documents Created Per Min (All Shards)  meter_elasticsearch_index_indices_docs_total_rate Documents rate with only all shards on index elasticsearch-exporter   Total Size Of Index (All Shards) MB meter_elasticsearch_index_indices_store_size_bytes_total Current total size of stored index data in bytes with all shards on all nodes elasticsearch-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/elasticsearch/elasticsearch-cluster.yaml, /config/otel-rules/elasticsearch/elasticsearch-node.yaml, /config/otel-rules/elasticsearch/elasticsearch-index.yaml. The Elasticsearch dashboard panel configurations are found in /config/ui-initialized-templates/elasticsearch.\n","excerpt":"Elasticsearch monitoring SkyWalking leverages elasticsearch-exporter for collecting metrics data …","ref":"/docs/main/v9.7.0/en/setup/backend/backend-elasticsearch-monitoring/","title":"Elasticsearch monitoring"},{"body":"Enable/Disable Channel Different channels mean that different protocols can be transparently transmitted to upstream services(OAP).\nConfig In the Satellite configuration, a channel is represented under the configured pipes. By default, we open all channels and process all known protocols.\nYou could delete the channel if you don\u0026rsquo;t want to receive and transmit in satellite.\nAfter restart the satellite service, then the channel what you delete is disable.\n","excerpt":"Enable/Disable Channel Different channels mean that different protocols can be transparently …","ref":"/docs/skywalking-satellite/latest/en/setup/examples/feature/enable-disable-channel/readme/","title":"Enable/Disable Channel"},{"body":"Enable/Disable Channel Different channels mean that different protocols can be transparently transmitted to upstream services(OAP).\nConfig In the Satellite configuration, a channel is represented under the configured pipes. By default, we open all channels and process all known protocols.\nYou could delete the channel if you don\u0026rsquo;t want to receive and transmit in satellite.\nAfter restart the satellite service, then the channel what you delete is disable.\n","excerpt":"Enable/Disable Channel Different channels mean that different protocols can be transparently …","ref":"/docs/skywalking-satellite/next/en/setup/examples/feature/enable-disable-channel/readme/","title":"Enable/Disable Channel"},{"body":"Enable/Disable Channel Different channels mean that different protocols can be transparently transmitted to upstream services(OAP).\nConfig In the Satellite configuration, a channel is represented under the configured pipes. By default, we open all channels and process all known protocols.\nYou could delete the channel if you don\u0026rsquo;t want to receive and transmit in satellite.\nAfter restart the satellite service, then the channel what you delete is disable.\n","excerpt":"Enable/Disable Channel Different channels mean that different protocols can be transparently …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/examples/feature/enable-disable-channel/readme/","title":"Enable/Disable Channel"},{"body":"End to End Tests (E2E) SkyWalking heavily rely more automatic tests to perform software quality assurance. E2E is an integral part of it.\n End-to-end testing is a methodology used to test whether the flow of an application is performing as designed from start to finish. The purpose of carrying out end-to-end tests is to identify system dependencies and to ensure that the right information is passed between various system components and systems.\n E2E in SkyWalking is always setting the OAP, monitored services and relative remote server dependencies in a real environment, and verify the dataflow and ultimate query results.\nThe E2E test involves some/all of the OAP server, storage, coordinator, webapp, and the instrumented services, all of which are orchestrated by docker-compose or KinD. Since version 8.9.0, we immigrate to e2e-v2 which leverage skywalking-infra-e2e and skywalking-cli to do the whole e2e process. skywalking-infra-e2e is used to control the e2e process and skywalking-cli is used to interact with the OAP such as request and get response metrics from OAP.\nWriting E2E Cases  Set up the environment   Set up skywalking-infra-e2e Set up skywalking-cli, yq (generally these 2 are enough) and others tools if your cases need. Can reference the script under skywalking/test/e2e-v2/script/prepare/setup-e2e-shell.   Orchestrate the components  The goal of the E2E tests is to test the SkyWalking project as a whole, including the OAP server, storage, coordinator, webapp, and even the frontend UI (not for now), on the single node mode as well as the cluster mode. Therefore, the first step is to determine what case we are going to verify, and orchestrate the components.\nTo make the orchestration process easier, we\u0026rsquo;re using a docker-compose that provides a simple file format (docker-compose.yml) for orchestrating the required containers, and offers an opportunity to define the dependencies of the components.\nFollow these steps:\n Decide what (and how many) containers will be needed. For example, for cluster testing, you\u0026rsquo;ll need \u0026gt; 2 OAP nodes, coordinators (e.g. zookeeper), storage (e.g. ElasticSearch), and instrumented services; Define the containers in docker-compose.yml, and carefully specify the dependencies, starting orders, and most importantly, link them together, e.g. set the correct OAP address on the agent end, and set the correct coordinator address in OAP, etc. Define the e2e case config in e2e.yaml. Write the expected data(yml) for verify.   Run e2e test  All e2e cases should under skywalking/test/e2e-v2/cases. You could execute e2e run command in skywalking/ e.g.\ne2e run -c test/e2e-v2/cases/alarm/h2/e2e.yaml  Troubleshooting  We expose all logs from all containers to the stdout in the non-CI (local) mode, but save and upload them to the GitHub server. You can download them (only when the tests have failed) at \u0026ldquo;Artifacts/Download artifacts/logs\u0026rdquo; (see top right) for debugging.\nNOTE: Please verify the newly-added E2E test case locally first. However, if you find that it has passed locally but failed in the PR check status, make sure that all the updated/newly-added files (especially those in the submodules) are committed and included in the PR, or reset the git HEAD to the remote and verify locally again.\n","excerpt":"End to End Tests (E2E) SkyWalking heavily rely more automatic tests to perform software quality …","ref":"/docs/main/latest/en/guides/e2e/","title":"End to End Tests (E2E)"},{"body":"End to End Tests (E2E) SkyWalking heavily rely more automatic tests to perform software quality assurance. E2E is an integral part of it.\n End-to-end testing is a methodology used to test whether the flow of an application is performing as designed from start to finish. The purpose of carrying out end-to-end tests is to identify system dependencies and to ensure that the right information is passed between various system components and systems.\n E2E in SkyWalking is always setting the OAP, monitored services and relative remote server dependencies in a real environment, and verify the dataflow and ultimate query results.\nThe E2E test involves some/all of the OAP server, storage, coordinator, webapp, and the instrumented services, all of which are orchestrated by docker-compose or KinD. Since version 8.9.0, we immigrate to e2e-v2 which leverage skywalking-infra-e2e and skywalking-cli to do the whole e2e process. skywalking-infra-e2e is used to control the e2e process and skywalking-cli is used to interact with the OAP such as request and get response metrics from OAP.\nWriting E2E Cases  Set up the environment   Set up skywalking-infra-e2e Set up skywalking-cli, yq (generally these 2 are enough) and others tools if your cases need. Can reference the script under skywalking/test/e2e-v2/script/prepare/setup-e2e-shell.   Orchestrate the components  The goal of the E2E tests is to test the SkyWalking project as a whole, including the OAP server, storage, coordinator, webapp, and even the frontend UI (not for now), on the single node mode as well as the cluster mode. Therefore, the first step is to determine what case we are going to verify, and orchestrate the components.\nTo make the orchestration process easier, we\u0026rsquo;re using a docker-compose that provides a simple file format (docker-compose.yml) for orchestrating the required containers, and offers an opportunity to define the dependencies of the components.\nFollow these steps:\n Decide what (and how many) containers will be needed. For example, for cluster testing, you\u0026rsquo;ll need \u0026gt; 2 OAP nodes, coordinators (e.g. zookeeper), storage (e.g. ElasticSearch), and instrumented services; Define the containers in docker-compose.yml, and carefully specify the dependencies, starting orders, and most importantly, link them together, e.g. set the correct OAP address on the agent end, and set the correct coordinator address in OAP, etc. Define the e2e case config in e2e.yaml. Write the expected data(yml) for verify.   Run e2e test  All e2e cases should under skywalking/test/e2e-v2/cases. You could execute e2e run command in skywalking/ e.g.\ne2e run -c test/e2e-v2/cases/alarm/h2/e2e.yaml  Troubleshooting  We expose all logs from all containers to the stdout in the non-CI (local) mode, but save and upload them to the GitHub server. You can download them (only when the tests have failed) at \u0026ldquo;Artifacts/Download artifacts/logs\u0026rdquo; (see top right) for debugging.\nNOTE: Please verify the newly-added E2E test case locally first. However, if you find that it has passed locally but failed in the PR check status, make sure that all the updated/newly-added files (especially those in the submodules) are committed and included in the PR, or reset the git HEAD to the remote and verify locally again.\n","excerpt":"End to End Tests (E2E) SkyWalking heavily rely more automatic tests to perform software quality …","ref":"/docs/main/next/en/guides/e2e/","title":"End to End Tests (E2E)"},{"body":"End to End Tests (E2E) SkyWalking heavily rely more automatic tests to perform software quality assurance. E2E is an integral part of it.\n End-to-end testing is a methodology used to test whether the flow of an application is performing as designed from start to finish. The purpose of carrying out end-to-end tests is to identify system dependencies and to ensure that the right information is passed between various system components and systems.\n E2E in SkyWalking is always setting the OAP, monitored services and relative remote server dependencies in a real environment, and verify the dataflow and ultimate query results.\nThe E2E test involves some/all of the OAP server, storage, coordinator, webapp, and the instrumented services, all of which are orchestrated by docker-compose or KinD. Since version 8.9.0, we immigrate to e2e-v2 which leverage skywalking-infra-e2e and skywalking-cli to do the whole e2e process. skywalking-infra-e2e is used to control the e2e process and skywalking-cli is used to interact with the OAP such as request and get response metrics from OAP.\nWriting E2E Cases  Set up the environment   Set up skywalking-infra-e2e Set up skywalking-cli, yq (generally these 2 are enough) and others tools if your cases need. Can reference the script under skywalking/test/e2e-v2/script/prepare/setup-e2e-shell.   Orchestrate the components  The goal of the E2E tests is to test the SkyWalking project as a whole, including the OAP server, storage, coordinator, webapp, and even the frontend UI (not for now), on the single node mode as well as the cluster mode. Therefore, the first step is to determine what case we are going to verify, and orchestrate the components.\nTo make the orchestration process easier, we\u0026rsquo;re using a docker-compose that provides a simple file format (docker-compose.yml) for orchestrating the required containers, and offers an opportunity to define the dependencies of the components.\nFollow these steps:\n Decide what (and how many) containers will be needed. For example, for cluster testing, you\u0026rsquo;ll need \u0026gt; 2 OAP nodes, coordinators (e.g. zookeeper), storage (e.g. ElasticSearch), and instrumented services; Define the containers in docker-compose.yml, and carefully specify the dependencies, starting orders, and most importantly, link them together, e.g. set the correct OAP address on the agent end, and set the correct coordinator address in OAP, etc. Define the e2e case config in e2e.yaml. Write the expected data(yml) for verify.   Run e2e test  All e2e cases should under skywalking/test/e2e-v2/cases. You could execute e2e run command in skywalking/ e.g.\ne2e run -c test/e2e-v2/cases/alarm/h2/e2e.yaml  Troubleshooting  We expose all logs from all containers to the stdout in the non-CI (local) mode, but save and upload them to the GitHub server. You can download them (only when the tests have failed) at \u0026ldquo;Artifacts/Download artifacts/logs\u0026rdquo; (see top right) for debugging.\nNOTE: Please verify the newly-added E2E test case locally first. However, if you find that it has passed locally but failed in the PR check status, make sure that all the updated/newly-added files (especially those in the submodules) are committed and included in the PR, or reset the git HEAD to the remote and verify locally again.\n","excerpt":"End to End Tests (E2E) SkyWalking heavily rely more automatic tests to perform software quality …","ref":"/docs/main/v9.6.0/en/guides/e2e/","title":"End to End Tests (E2E)"},{"body":"End to End Tests (E2E) SkyWalking heavily rely more automatic tests to perform software quality assurance. E2E is an integral part of it.\n End-to-end testing is a methodology used to test whether the flow of an application is performing as designed from start to finish. The purpose of carrying out end-to-end tests is to identify system dependencies and to ensure that the right information is passed between various system components and systems.\n E2E in SkyWalking is always setting the OAP, monitored services and relative remote server dependencies in a real environment, and verify the dataflow and ultimate query results.\nThe E2E test involves some/all of the OAP server, storage, coordinator, webapp, and the instrumented services, all of which are orchestrated by docker-compose or KinD. Since version 8.9.0, we immigrate to e2e-v2 which leverage skywalking-infra-e2e and skywalking-cli to do the whole e2e process. skywalking-infra-e2e is used to control the e2e process and skywalking-cli is used to interact with the OAP such as request and get response metrics from OAP.\nWriting E2E Cases  Set up the environment   Set up skywalking-infra-e2e Set up skywalking-cli, yq (generally these 2 are enough) and others tools if your cases need. Can reference the script under skywalking/test/e2e-v2/script/prepare/setup-e2e-shell.   Orchestrate the components  The goal of the E2E tests is to test the SkyWalking project as a whole, including the OAP server, storage, coordinator, webapp, and even the frontend UI (not for now), on the single node mode as well as the cluster mode. Therefore, the first step is to determine what case we are going to verify, and orchestrate the components.\nTo make the orchestration process easier, we\u0026rsquo;re using a docker-compose that provides a simple file format (docker-compose.yml) for orchestrating the required containers, and offers an opportunity to define the dependencies of the components.\nFollow these steps:\n Decide what (and how many) containers will be needed. For example, for cluster testing, you\u0026rsquo;ll need \u0026gt; 2 OAP nodes, coordinators (e.g. zookeeper), storage (e.g. ElasticSearch), and instrumented services; Define the containers in docker-compose.yml, and carefully specify the dependencies, starting orders, and most importantly, link them together, e.g. set the correct OAP address on the agent end, and set the correct coordinator address in OAP, etc. Define the e2e case config in e2e.yaml. Write the expected data(yml) for verify.   Run e2e test  All e2e cases should under skywalking/test/e2e-v2/cases. You could execute e2e run command in skywalking/ e.g.\ne2e run -c test/e2e-v2/cases/alarm/h2/e2e.yaml  Troubleshooting  We expose all logs from all containers to the stdout in the non-CI (local) mode, but save and upload them to the GitHub server. You can download them (only when the tests have failed) at \u0026ldquo;Artifacts/Download artifacts/logs\u0026rdquo; (see top right) for debugging.\nNOTE: Please verify the newly-added E2E test case locally first. However, if you find that it has passed locally but failed in the PR check status, make sure that all the updated/newly-added files (especially those in the submodules) are committed and included in the PR, or reset the git HEAD to the remote and verify locally again.\n","excerpt":"End to End Tests (E2E) SkyWalking heavily rely more automatic tests to perform software quality …","ref":"/docs/main/v9.7.0/en/guides/e2e/","title":"End to End Tests (E2E)"},{"body":"Events SkyWalking already supports the three pillars of observability, namely logs, metrics, and traces. In reality, a production system experiences many other events that may affect the performance of the system, such as upgrading, rebooting, chaos testing, etc. Although some of these events are reflected in the logs, many others are not. Hence, SkyWalking provides a more native way to collect these events. This doc details how SkyWalking collects events and what events look like in SkyWalking.\nHow to Report Events The SkyWalking backend supports three protocols to collect events: gRPC, HTTP, and Kafka. Any agent or CLI that implements one of these protocols can report events to SkyWalking. Currently, the officially supported clients to report events are:\n Java Agent Toolkit: Using the Java agent toolkit to report events within the applications. SkyWalking CLI: Using the CLI to report events from the command line interface. Kubernetes Event Exporter: Deploying an event exporter to refine and report Kubernetes events.  Event Definitions An event contains the following fields. The definitions of event can be found at the protocol repo.\nUUID Unique ID of the event. Since an event may span a long period of time, the UUID is necessary to associate the start time with the end time of the same event.\nSource The source object on which the event occurs. In SkyWalking, the object is typically a service, service instance, etc.\nName Name of the event. For example, Start, Stop, Crash, Reboot, Upgrade, etc.\nType Type of the event. This field is friendly for UI visualization, where events of type Normal are considered normal operations, while Error is considered unexpected operations, such as Crash events. Marking them with different colors allows us to more easily identify them.\nMessage The detail of the event that describes why this event happened. This should be a one-line message that briefly describes why the event is reported. Examples of an Upgrade event may be something like Upgrade from ${from_version} to ${to_version}. It\u0026rsquo;s NOT recommended to include the detailed logs of this event, such as the exception stack trace.\nParameters The parameters in the message field. This is a simple \u0026lt;string,string\u0026gt; map.\nStart Time The start time of the event. This field is mandatory when an event occurs.\nEnd Time The end time of the event. This field may be empty if the event has not ended yet, otherwise there should be a valid timestamp after startTime.\nNOTE: When reporting an event, you typically call the report function twice, the first time for starting of the event and the second time for ending of the event, both with the same UUID. There are also cases where you would already have both the start time and end time. For example, when exporting events from a third-party system, the start time and end time are already known so you may simply call the report function once.\nCorrelation between events and metrics SkyWalking UI visualizes the events in the dashboard when the event service / instance / endpoint matches the displayed service / instance / endpoint.\nKnown Events    Name Type When Where     Start Normal When your Java Application starts with SkyWalking Agent installed, the Start Event will be created. Reported from SkyWalking agent.   Shutdown Normal When your Java Application stops with SkyWalking Agent installed, the Shutdown Event will be created. Reported from SkyWalking agent.   Alarm Error When the Alarm is triggered, the corresponding Alarm Event will is created. Reported from internal SkyWalking OAP.    The following events are all reported by Kubernetes Event Exporter, in order to see these events, please make sure you have deployed the exporter.\n   Name Type When Where     Killing Normal When the Kubernetes Pod is being killing. Reporter by Kubernetes Event Exporter.   Pulling Normal When a docker image is being pulled for deployment. Reporter by Kubernetes Event Exporter.   Pulled Normal When a docker image is pulled for deployment. Reporter by Kubernetes Event Exporter.   Created Normal When a container inside a Pod is created. Reporter by Kubernetes Event Exporter.   Started Normal When a container inside a Pod is started. Reporter by Kubernetes Event Exporter.   Unhealthy Error When the readiness probe failed. Reporter by Kubernetes Event Exporter.    The complete event lists can be found in the Kubernetes codebase, please note that not all the events are supported by the exporter for now.\n","excerpt":"Events SkyWalking already supports the three pillars of observability, namely logs, metrics, and …","ref":"/docs/main/latest/en/concepts-and-designs/event/","title":"Events"},{"body":"Events SkyWalking already supports the three pillars of observability, namely logs, metrics, and traces. In reality, a production system experiences many other events that may affect the performance of the system, such as upgrading, rebooting, chaos testing, etc. Although some of these events are reflected in the logs, many others are not. Hence, SkyWalking provides a more native way to collect these events. This doc details how SkyWalking collects events and what events look like in SkyWalking.\nHow to Report Events The SkyWalking backend supports three protocols to collect events: gRPC, HTTP, and Kafka. Any agent or CLI that implements one of these protocols can report events to SkyWalking. Currently, the officially supported clients to report events are:\n Java Agent Toolkit: Using the Java agent toolkit to report events within the applications. SkyWalking CLI: Using the CLI to report events from the command line interface. Kubernetes Event Exporter: Deploying an event exporter to refine and report Kubernetes events.  Event Definitions An event contains the following fields. The definitions of event can be found at the protocol repo.\nUUID Unique ID of the event. Since an event may span a long period of time, the UUID is necessary to associate the start time with the end time of the same event.\nSource The source object on which the event occurs. In SkyWalking, the object is typically a service, service instance, etc.\nName Name of the event. For example, Start, Stop, Crash, Reboot, Upgrade, etc.\nType Type of the event. This field is friendly for UI visualization, where events of type Normal are considered normal operations, while Error is considered unexpected operations, such as Crash events. Marking them with different colors allows us to more easily identify them.\nMessage The detail of the event that describes why this event happened. This should be a one-line message that briefly describes why the event is reported. Examples of an Upgrade event may be something like Upgrade from ${from_version} to ${to_version}. It\u0026rsquo;s NOT recommended to include the detailed logs of this event, such as the exception stack trace.\nParameters The parameters in the message field. This is a simple \u0026lt;string,string\u0026gt; map.\nStart Time The start time of the event. This field is mandatory when an event occurs.\nEnd Time The end time of the event. This field may be empty if the event has not ended yet, otherwise there should be a valid timestamp after startTime.\nNOTE: When reporting an event, you typically call the report function twice, the first time for starting of the event and the second time for ending of the event, both with the same UUID. There are also cases where you would already have both the start time and end time. For example, when exporting events from a third-party system, the start time and end time are already known so you may simply call the report function once.\nCorrelation between events and metrics SkyWalking UI visualizes the events in the dashboard when the event service / instance / endpoint matches the displayed service / instance / endpoint.\nKnown Events    Name Type When Where     Start Normal When your Java Application starts with SkyWalking Agent installed, the Start Event will be created. Reported from SkyWalking agent.   Shutdown Normal When your Java Application stops with SkyWalking Agent installed, the Shutdown Event will be created. Reported from SkyWalking agent.   Alarm Error When the Alarm is triggered, the corresponding Alarm Event will is created. Reported from internal SkyWalking OAP.    The following events are all reported by Kubernetes Event Exporter, in order to see these events, please make sure you have deployed the exporter.\n   Name Type When Where     Killing Normal When the Kubernetes Pod is being killing. Reporter by Kubernetes Event Exporter.   Pulling Normal When a docker image is being pulled for deployment. Reporter by Kubernetes Event Exporter.   Pulled Normal When a docker image is pulled for deployment. Reporter by Kubernetes Event Exporter.   Created Normal When a container inside a Pod is created. Reporter by Kubernetes Event Exporter.   Started Normal When a container inside a Pod is started. Reporter by Kubernetes Event Exporter.   Unhealthy Error When the readiness probe failed. Reporter by Kubernetes Event Exporter.    The complete event lists can be found in the Kubernetes codebase, please note that not all the events are supported by the exporter for now.\n","excerpt":"Events SkyWalking already supports the three pillars of observability, namely logs, metrics, and …","ref":"/docs/main/next/en/concepts-and-designs/event/","title":"Events"},{"body":"Events SkyWalking already supports the three pillars of observability, namely logs, metrics, and traces. In reality, a production system experiences many other events that may affect the performance of the system, such as upgrading, rebooting, chaos testing, etc. Although some of these events are reflected in the logs, many others are not. Hence, SkyWalking provides a more native way to collect these events. This doc details how SkyWalking collects events and what events look like in SkyWalking.\nHow to Report Events The SkyWalking backend supports three protocols to collect events: gRPC, HTTP, and Kafka. Any agent or CLI that implements one of these protocols can report events to SkyWalking. Currently, the officially supported clients to report events are:\n Java Agent Toolkit: Using the Java agent toolkit to report events within the applications. SkyWalking CLI: Using the CLI to report events from the command line interface. Kubernetes Event Exporter: Deploying an event exporter to refine and report Kubernetes events.  Event Definitions An event contains the following fields. The definitions of event can be found at the protocol repo.\nUUID Unique ID of the event. Since an event may span a long period of time, the UUID is necessary to associate the start time with the end time of the same event.\nSource The source object on which the event occurs. In SkyWalking, the object is typically a service, service instance, etc.\nName Name of the event. For example, Start, Stop, Crash, Reboot, Upgrade, etc.\nType Type of the event. This field is friendly for UI visualization, where events of type Normal are considered normal operations, while Error is considered unexpected operations, such as Crash events. Marking them with different colors allows us to more easily identify them.\nMessage The detail of the event that describes why this event happened. This should be a one-line message that briefly describes why the event is reported. Examples of an Upgrade event may be something like Upgrade from ${from_version} to ${to_version}. It\u0026rsquo;s NOT recommended to include the detailed logs of this event, such as the exception stack trace.\nParameters The parameters in the message field. This is a simple \u0026lt;string,string\u0026gt; map.\nStart Time The start time of the event. This field is mandatory when an event occurs.\nEnd Time The end time of the event. This field may be empty if the event has not ended yet, otherwise there should be a valid timestamp after startTime.\nNOTE: When reporting an event, you typically call the report function twice, the first time for starting of the event and the second time for ending of the event, both with the same UUID. There are also cases where you would already have both the start time and end time. For example, when exporting events from a third-party system, the start time and end time are already known so you may simply call the report function once.\nHow to Configure Alarms for Events Events derive from metrics, and can be the source to trigger alarms. For example, if a specific event occurs for a certain times in a period, alarms can be triggered and sent.\nEvery event has a default value = 1, when n events with the same name are reported, they are aggregated into value = n as follows.\nEvent{name=Unhealthy, source={service=A,instance=a}, ...} Event{name=Unhealthy, source={service=A,instance=a}, ...} Event{name=Unhealthy, source={service=A,instance=a}, ...} Event{name=Unhealthy, source={service=A,instance=a}, ...} Event{name=Unhealthy, source={service=A,instance=a}, ...} Event{name=Unhealthy, source={service=A,instance=a}, ...} will be aggregated into\nEvent{name=Unhealthy, source={service=A,instance=a}, ...} \u0026lt;value = 6\u0026gt; so you can configure the following alarm rule to trigger alarm when Unhealthy event occurs more than 5 times within 10 minutes.\nrules:unhealthy_event_rule:metrics-name:Unhealthy# Healthiness check is usually a scheduled task,# they may be unhealthy for the first few times,# and can be unhealthy occasionally due to network jitter,# please adjust the threshold as per your actual situation.threshold:5op:\u0026#34;\u0026gt;\u0026#34;period:10count:1message:Service instance has been unhealthy for 10 minutesFor more alarm configuration details, please refer to the alarm doc.\nNote that the Unhealthy event above is only for demonstration, they are not detected by default in SkyWalking, however, you can use the methods in How to Report Events to report this kind of events.\nCorrelation between events and metrics SkyWalking UI visualizes the events in the dashboard when the event service / instance / endpoint matches the displayed service / instance / endpoint.\nBy default, SkyWalking also generates some metrics for events by using OAL. The default metrics list of event may change over time, you can find the complete list in event.oal. If you want to generate you custom metrics from events, please refer to OAL about how to write OAL rules.\nKnown Events    Name Type When Where     Start Normal When your Java Application starts with SkyWalking Agent installed, the Start Event will be created. Reported from SkyWalking agent.   Shutdown Normal When your Java Application stops with SkyWalking Agent installed, the Shutdown Event will be created. Reported from SkyWalking agent.   Alarm Error When the Alarm is triggered, the corresponding Alarm Event will is created. Reported from internal SkyWalking OAP.    The following events are all reported by Kubernetes Event Exporter, in order to see these events, please make sure you have deployed the exporter.\n   Name Type When Where     Killing Normal When the Kubernetes Pod is being killing. Reporter by Kubernetes Event Exporter.   Pulling Normal When a docker image is being pulled for deployment. Reporter by Kubernetes Event Exporter.   Pulled Normal When a docker image is pulled for deployment. Reporter by Kubernetes Event Exporter.   Created Normal When a container inside a Pod is created. Reporter by Kubernetes Event Exporter.   Started Normal When a container inside a Pod is started. Reporter by Kubernetes Event Exporter.   Unhealthy Error When the readiness probe failed. Reporter by Kubernetes Event Exporter.    The complete event lists can be found in the Kubernetes codebase, please note that not all the events are supported by the exporter for now.\n","excerpt":"Events SkyWalking already supports the three pillars of observability, namely logs, metrics, and …","ref":"/docs/main/v9.0.0/en/concepts-and-designs/event/","title":"Events"},{"body":"Events SkyWalking already supports the three pillars of observability, namely logs, metrics, and traces. In reality, a production system experiences many other events that may affect the performance of the system, such as upgrading, rebooting, chaos testing, etc. Although some of these events are reflected in the logs, many others are not. Hence, SkyWalking provides a more native way to collect these events. This doc details how SkyWalking collects events and what events look like in SkyWalking.\nHow to Report Events The SkyWalking backend supports three protocols to collect events: gRPC, HTTP, and Kafka. Any agent or CLI that implements one of these protocols can report events to SkyWalking. Currently, the officially supported clients to report events are:\n Java Agent Toolkit: Using the Java agent toolkit to report events within the applications. SkyWalking CLI: Using the CLI to report events from the command line interface. Kubernetes Event Exporter: Deploying an event exporter to refine and report Kubernetes events.  Event Definitions An event contains the following fields. The definitions of event can be found at the protocol repo.\nUUID Unique ID of the event. Since an event may span a long period of time, the UUID is necessary to associate the start time with the end time of the same event.\nSource The source object on which the event occurs. In SkyWalking, the object is typically a service, service instance, etc.\nName Name of the event. For example, Start, Stop, Crash, Reboot, Upgrade, etc.\nType Type of the event. This field is friendly for UI visualization, where events of type Normal are considered normal operations, while Error is considered unexpected operations, such as Crash events. Marking them with different colors allows us to more easily identify them.\nMessage The detail of the event that describes why this event happened. This should be a one-line message that briefly describes why the event is reported. Examples of an Upgrade event may be something like Upgrade from ${from_version} to ${to_version}. It\u0026rsquo;s NOT recommended to include the detailed logs of this event, such as the exception stack trace.\nParameters The parameters in the message field. This is a simple \u0026lt;string,string\u0026gt; map.\nStart Time The start time of the event. This field is mandatory when an event occurs.\nEnd Time The end time of the event. This field may be empty if the event has not ended yet, otherwise there should be a valid timestamp after startTime.\nNOTE: When reporting an event, you typically call the report function twice, the first time for starting of the event and the second time for ending of the event, both with the same UUID. There are also cases where you would already have both the start time and end time. For example, when exporting events from a third-party system, the start time and end time are already known so you may simply call the report function once.\nHow to Configure Alarms for Events Events derive from metrics, and can be the source to trigger alarms. For example, if a specific event occurs for a certain times in a period, alarms can be triggered and sent.\nEvery event has a default value = 1, when n events with the same name are reported, they are aggregated into value = n as follows.\nEvent{name=Unhealthy, source={service=A,instance=a}, ...} Event{name=Unhealthy, source={service=A,instance=a}, ...} Event{name=Unhealthy, source={service=A,instance=a}, ...} Event{name=Unhealthy, source={service=A,instance=a}, ...} Event{name=Unhealthy, source={service=A,instance=a}, ...} Event{name=Unhealthy, source={service=A,instance=a}, ...} will be aggregated into\nEvent{name=Unhealthy, source={service=A,instance=a}, ...} \u0026lt;value = 6\u0026gt; so you can configure the following alarm rule to trigger alarm when Unhealthy event occurs more than 5 times within 10 minutes.\nrules:unhealthy_event_rule:metrics-name:Unhealthy# Healthiness check is usually a scheduled task,# they may be unhealthy for the first few times,# and can be unhealthy occasionally due to network jitter,# please adjust the threshold as per your actual situation.threshold:5op:\u0026#34;\u0026gt;\u0026#34;period:10count:1message:Service instance has been unhealthy for 10 minutesFor more alarm configuration details, please refer to the alarm doc.\nNote that the Unhealthy event above is only for demonstration, they are not detected by default in SkyWalking, however, you can use the methods in How to Report Events to report this kind of events.\nCorrelation between events and metrics SkyWalking UI visualizes the events in the dashboard when the event service / instance / endpoint matches the displayed service / instance / endpoint.\nKnown Events    Name Type When Where     Start Normal When your Java Application starts with SkyWalking Agent installed, the Start Event will be created. Reported from SkyWalking agent.   Shutdown Normal When your Java Application stops with SkyWalking Agent installed, the Shutdown Event will be created. Reported from SkyWalking agent.   Alarm Error When the Alarm is triggered, the corresponding Alarm Event will is created. Reported from internal SkyWalking OAP.    The following events are all reported by Kubernetes Event Exporter, in order to see these events, please make sure you have deployed the exporter.\n   Name Type When Where     Killing Normal When the Kubernetes Pod is being killing. Reporter by Kubernetes Event Exporter.   Pulling Normal When a docker image is being pulled for deployment. Reporter by Kubernetes Event Exporter.   Pulled Normal When a docker image is pulled for deployment. Reporter by Kubernetes Event Exporter.   Created Normal When a container inside a Pod is created. Reporter by Kubernetes Event Exporter.   Started Normal When a container inside a Pod is started. Reporter by Kubernetes Event Exporter.   Unhealthy Error When the readiness probe failed. Reporter by Kubernetes Event Exporter.    The complete event lists can be found in the Kubernetes codebase, please note that not all the events are supported by the exporter for now.\n","excerpt":"Events SkyWalking already supports the three pillars of observability, namely logs, metrics, and …","ref":"/docs/main/v9.1.0/en/concepts-and-designs/event/","title":"Events"},{"body":"Events SkyWalking already supports the three pillars of observability, namely logs, metrics, and traces. In reality, a production system experiences many other events that may affect the performance of the system, such as upgrading, rebooting, chaos testing, etc. Although some of these events are reflected in the logs, many others are not. Hence, SkyWalking provides a more native way to collect these events. This doc details how SkyWalking collects events and what events look like in SkyWalking.\nHow to Report Events The SkyWalking backend supports three protocols to collect events: gRPC, HTTP, and Kafka. Any agent or CLI that implements one of these protocols can report events to SkyWalking. Currently, the officially supported clients to report events are:\n Java Agent Toolkit: Using the Java agent toolkit to report events within the applications. SkyWalking CLI: Using the CLI to report events from the command line interface. Kubernetes Event Exporter: Deploying an event exporter to refine and report Kubernetes events.  Event Definitions An event contains the following fields. The definitions of event can be found at the protocol repo.\nUUID Unique ID of the event. Since an event may span a long period of time, the UUID is necessary to associate the start time with the end time of the same event.\nSource The source object on which the event occurs. In SkyWalking, the object is typically a service, service instance, etc.\nName Name of the event. For example, Start, Stop, Crash, Reboot, Upgrade, etc.\nType Type of the event. This field is friendly for UI visualization, where events of type Normal are considered normal operations, while Error is considered unexpected operations, such as Crash events. Marking them with different colors allows us to more easily identify them.\nMessage The detail of the event that describes why this event happened. This should be a one-line message that briefly describes why the event is reported. Examples of an Upgrade event may be something like Upgrade from ${from_version} to ${to_version}. It\u0026rsquo;s NOT recommended to include the detailed logs of this event, such as the exception stack trace.\nParameters The parameters in the message field. This is a simple \u0026lt;string,string\u0026gt; map.\nStart Time The start time of the event. This field is mandatory when an event occurs.\nEnd Time The end time of the event. This field may be empty if the event has not ended yet, otherwise there should be a valid timestamp after startTime.\nNOTE: When reporting an event, you typically call the report function twice, the first time for starting of the event and the second time for ending of the event, both with the same UUID. There are also cases where you would already have both the start time and end time. For example, when exporting events from a third-party system, the start time and end time are already known so you may simply call the report function once.\nCorrelation between events and metrics SkyWalking UI visualizes the events in the dashboard when the event service / instance / endpoint matches the displayed service / instance / endpoint.\nKnown Events    Name Type When Where     Start Normal When your Java Application starts with SkyWalking Agent installed, the Start Event will be created. Reported from SkyWalking agent.   Shutdown Normal When your Java Application stops with SkyWalking Agent installed, the Shutdown Event will be created. Reported from SkyWalking agent.   Alarm Error When the Alarm is triggered, the corresponding Alarm Event will is created. Reported from internal SkyWalking OAP.    The following events are all reported by Kubernetes Event Exporter, in order to see these events, please make sure you have deployed the exporter.\n   Name Type When Where     Killing Normal When the Kubernetes Pod is being killing. Reporter by Kubernetes Event Exporter.   Pulling Normal When a docker image is being pulled for deployment. Reporter by Kubernetes Event Exporter.   Pulled Normal When a docker image is pulled for deployment. Reporter by Kubernetes Event Exporter.   Created Normal When a container inside a Pod is created. Reporter by Kubernetes Event Exporter.   Started Normal When a container inside a Pod is started. Reporter by Kubernetes Event Exporter.   Unhealthy Error When the readiness probe failed. Reporter by Kubernetes Event Exporter.    The complete event lists can be found in the Kubernetes codebase, please note that not all the events are supported by the exporter for now.\n","excerpt":"Events SkyWalking already supports the three pillars of observability, namely logs, metrics, and …","ref":"/docs/main/v9.2.0/en/concepts-and-designs/event/","title":"Events"},{"body":"Events SkyWalking already supports the three pillars of observability, namely logs, metrics, and traces. In reality, a production system experiences many other events that may affect the performance of the system, such as upgrading, rebooting, chaos testing, etc. Although some of these events are reflected in the logs, many others are not. Hence, SkyWalking provides a more native way to collect these events. This doc details how SkyWalking collects events and what events look like in SkyWalking.\nHow to Report Events The SkyWalking backend supports three protocols to collect events: gRPC, HTTP, and Kafka. Any agent or CLI that implements one of these protocols can report events to SkyWalking. Currently, the officially supported clients to report events are:\n Java Agent Toolkit: Using the Java agent toolkit to report events within the applications. SkyWalking CLI: Using the CLI to report events from the command line interface. Kubernetes Event Exporter: Deploying an event exporter to refine and report Kubernetes events.  Event Definitions An event contains the following fields. The definitions of event can be found at the protocol repo.\nUUID Unique ID of the event. Since an event may span a long period of time, the UUID is necessary to associate the start time with the end time of the same event.\nSource The source object on which the event occurs. In SkyWalking, the object is typically a service, service instance, etc.\nName Name of the event. For example, Start, Stop, Crash, Reboot, Upgrade, etc.\nType Type of the event. This field is friendly for UI visualization, where events of type Normal are considered normal operations, while Error is considered unexpected operations, such as Crash events. Marking them with different colors allows us to more easily identify them.\nMessage The detail of the event that describes why this event happened. This should be a one-line message that briefly describes why the event is reported. Examples of an Upgrade event may be something like Upgrade from ${from_version} to ${to_version}. It\u0026rsquo;s NOT recommended to include the detailed logs of this event, such as the exception stack trace.\nParameters The parameters in the message field. This is a simple \u0026lt;string,string\u0026gt; map.\nStart Time The start time of the event. This field is mandatory when an event occurs.\nEnd Time The end time of the event. This field may be empty if the event has not ended yet, otherwise there should be a valid timestamp after startTime.\nNOTE: When reporting an event, you typically call the report function twice, the first time for starting of the event and the second time for ending of the event, both with the same UUID. There are also cases where you would already have both the start time and end time. For example, when exporting events from a third-party system, the start time and end time are already known so you may simply call the report function once.\nCorrelation between events and metrics SkyWalking UI visualizes the events in the dashboard when the event service / instance / endpoint matches the displayed service / instance / endpoint.\nKnown Events    Name Type When Where     Start Normal When your Java Application starts with SkyWalking Agent installed, the Start Event will be created. Reported from SkyWalking agent.   Shutdown Normal When your Java Application stops with SkyWalking Agent installed, the Shutdown Event will be created. Reported from SkyWalking agent.   Alarm Error When the Alarm is triggered, the corresponding Alarm Event will is created. Reported from internal SkyWalking OAP.    The following events are all reported by Kubernetes Event Exporter, in order to see these events, please make sure you have deployed the exporter.\n   Name Type When Where     Killing Normal When the Kubernetes Pod is being killing. Reporter by Kubernetes Event Exporter.   Pulling Normal When a docker image is being pulled for deployment. Reporter by Kubernetes Event Exporter.   Pulled Normal When a docker image is pulled for deployment. Reporter by Kubernetes Event Exporter.   Created Normal When a container inside a Pod is created. Reporter by Kubernetes Event Exporter.   Started Normal When a container inside a Pod is started. Reporter by Kubernetes Event Exporter.   Unhealthy Error When the readiness probe failed. Reporter by Kubernetes Event Exporter.    The complete event lists can be found in the Kubernetes codebase, please note that not all the events are supported by the exporter for now.\n","excerpt":"Events SkyWalking already supports the three pillars of observability, namely logs, metrics, and …","ref":"/docs/main/v9.3.0/en/concepts-and-designs/event/","title":"Events"},{"body":"Events SkyWalking already supports the three pillars of observability, namely logs, metrics, and traces. In reality, a production system experiences many other events that may affect the performance of the system, such as upgrading, rebooting, chaos testing, etc. Although some of these events are reflected in the logs, many others are not. Hence, SkyWalking provides a more native way to collect these events. This doc details how SkyWalking collects events and what events look like in SkyWalking.\nHow to Report Events The SkyWalking backend supports three protocols to collect events: gRPC, HTTP, and Kafka. Any agent or CLI that implements one of these protocols can report events to SkyWalking. Currently, the officially supported clients to report events are:\n Java Agent Toolkit: Using the Java agent toolkit to report events within the applications. SkyWalking CLI: Using the CLI to report events from the command line interface. Kubernetes Event Exporter: Deploying an event exporter to refine and report Kubernetes events.  Event Definitions An event contains the following fields. The definitions of event can be found at the protocol repo.\nUUID Unique ID of the event. Since an event may span a long period of time, the UUID is necessary to associate the start time with the end time of the same event.\nSource The source object on which the event occurs. In SkyWalking, the object is typically a service, service instance, etc.\nName Name of the event. For example, Start, Stop, Crash, Reboot, Upgrade, etc.\nType Type of the event. This field is friendly for UI visualization, where events of type Normal are considered normal operations, while Error is considered unexpected operations, such as Crash events. Marking them with different colors allows us to more easily identify them.\nMessage The detail of the event that describes why this event happened. This should be a one-line message that briefly describes why the event is reported. Examples of an Upgrade event may be something like Upgrade from ${from_version} to ${to_version}. It\u0026rsquo;s NOT recommended to include the detailed logs of this event, such as the exception stack trace.\nParameters The parameters in the message field. This is a simple \u0026lt;string,string\u0026gt; map.\nStart Time The start time of the event. This field is mandatory when an event occurs.\nEnd Time The end time of the event. This field may be empty if the event has not ended yet, otherwise there should be a valid timestamp after startTime.\nNOTE: When reporting an event, you typically call the report function twice, the first time for starting of the event and the second time for ending of the event, both with the same UUID. There are also cases where you would already have both the start time and end time. For example, when exporting events from a third-party system, the start time and end time are already known so you may simply call the report function once.\nCorrelation between events and metrics SkyWalking UI visualizes the events in the dashboard when the event service / instance / endpoint matches the displayed service / instance / endpoint.\nKnown Events    Name Type When Where     Start Normal When your Java Application starts with SkyWalking Agent installed, the Start Event will be created. Reported from SkyWalking agent.   Shutdown Normal When your Java Application stops with SkyWalking Agent installed, the Shutdown Event will be created. Reported from SkyWalking agent.   Alarm Error When the Alarm is triggered, the corresponding Alarm Event will is created. Reported from internal SkyWalking OAP.    The following events are all reported by Kubernetes Event Exporter, in order to see these events, please make sure you have deployed the exporter.\n   Name Type When Where     Killing Normal When the Kubernetes Pod is being killing. Reporter by Kubernetes Event Exporter.   Pulling Normal When a docker image is being pulled for deployment. Reporter by Kubernetes Event Exporter.   Pulled Normal When a docker image is pulled for deployment. Reporter by Kubernetes Event Exporter.   Created Normal When a container inside a Pod is created. Reporter by Kubernetes Event Exporter.   Started Normal When a container inside a Pod is started. Reporter by Kubernetes Event Exporter.   Unhealthy Error When the readiness probe failed. Reporter by Kubernetes Event Exporter.    The complete event lists can be found in the Kubernetes codebase, please note that not all the events are supported by the exporter for now.\n","excerpt":"Events SkyWalking already supports the three pillars of observability, namely logs, metrics, and …","ref":"/docs/main/v9.4.0/en/concepts-and-designs/event/","title":"Events"},{"body":"Events SkyWalking already supports the three pillars of observability, namely logs, metrics, and traces. In reality, a production system experiences many other events that may affect the performance of the system, such as upgrading, rebooting, chaos testing, etc. Although some of these events are reflected in the logs, many others are not. Hence, SkyWalking provides a more native way to collect these events. This doc details how SkyWalking collects events and what events look like in SkyWalking.\nHow to Report Events The SkyWalking backend supports three protocols to collect events: gRPC, HTTP, and Kafka. Any agent or CLI that implements one of these protocols can report events to SkyWalking. Currently, the officially supported clients to report events are:\n Java Agent Toolkit: Using the Java agent toolkit to report events within the applications. SkyWalking CLI: Using the CLI to report events from the command line interface. Kubernetes Event Exporter: Deploying an event exporter to refine and report Kubernetes events.  Event Definitions An event contains the following fields. The definitions of event can be found at the protocol repo.\nUUID Unique ID of the event. Since an event may span a long period of time, the UUID is necessary to associate the start time with the end time of the same event.\nSource The source object on which the event occurs. In SkyWalking, the object is typically a service, service instance, etc.\nName Name of the event. For example, Start, Stop, Crash, Reboot, Upgrade, etc.\nType Type of the event. This field is friendly for UI visualization, where events of type Normal are considered normal operations, while Error is considered unexpected operations, such as Crash events. Marking them with different colors allows us to more easily identify them.\nMessage The detail of the event that describes why this event happened. This should be a one-line message that briefly describes why the event is reported. Examples of an Upgrade event may be something like Upgrade from ${from_version} to ${to_version}. It\u0026rsquo;s NOT recommended to include the detailed logs of this event, such as the exception stack trace.\nParameters The parameters in the message field. This is a simple \u0026lt;string,string\u0026gt; map.\nStart Time The start time of the event. This field is mandatory when an event occurs.\nEnd Time The end time of the event. This field may be empty if the event has not ended yet, otherwise there should be a valid timestamp after startTime.\nNOTE: When reporting an event, you typically call the report function twice, the first time for starting of the event and the second time for ending of the event, both with the same UUID. There are also cases where you would already have both the start time and end time. For example, when exporting events from a third-party system, the start time and end time are already known so you may simply call the report function once.\nCorrelation between events and metrics SkyWalking UI visualizes the events in the dashboard when the event service / instance / endpoint matches the displayed service / instance / endpoint.\nKnown Events    Name Type When Where     Start Normal When your Java Application starts with SkyWalking Agent installed, the Start Event will be created. Reported from SkyWalking agent.   Shutdown Normal When your Java Application stops with SkyWalking Agent installed, the Shutdown Event will be created. Reported from SkyWalking agent.   Alarm Error When the Alarm is triggered, the corresponding Alarm Event will is created. Reported from internal SkyWalking OAP.    The following events are all reported by Kubernetes Event Exporter, in order to see these events, please make sure you have deployed the exporter.\n   Name Type When Where     Killing Normal When the Kubernetes Pod is being killing. Reporter by Kubernetes Event Exporter.   Pulling Normal When a docker image is being pulled for deployment. Reporter by Kubernetes Event Exporter.   Pulled Normal When a docker image is pulled for deployment. Reporter by Kubernetes Event Exporter.   Created Normal When a container inside a Pod is created. Reporter by Kubernetes Event Exporter.   Started Normal When a container inside a Pod is started. Reporter by Kubernetes Event Exporter.   Unhealthy Error When the readiness probe failed. Reporter by Kubernetes Event Exporter.    The complete event lists can be found in the Kubernetes codebase, please note that not all the events are supported by the exporter for now.\n","excerpt":"Events SkyWalking already supports the three pillars of observability, namely logs, metrics, and …","ref":"/docs/main/v9.5.0/en/concepts-and-designs/event/","title":"Events"},{"body":"Events SkyWalking already supports the three pillars of observability, namely logs, metrics, and traces. In reality, a production system experiences many other events that may affect the performance of the system, such as upgrading, rebooting, chaos testing, etc. Although some of these events are reflected in the logs, many others are not. Hence, SkyWalking provides a more native way to collect these events. This doc details how SkyWalking collects events and what events look like in SkyWalking.\nHow to Report Events The SkyWalking backend supports three protocols to collect events: gRPC, HTTP, and Kafka. Any agent or CLI that implements one of these protocols can report events to SkyWalking. Currently, the officially supported clients to report events are:\n Java Agent Toolkit: Using the Java agent toolkit to report events within the applications. SkyWalking CLI: Using the CLI to report events from the command line interface. Kubernetes Event Exporter: Deploying an event exporter to refine and report Kubernetes events.  Event Definitions An event contains the following fields. The definitions of event can be found at the protocol repo.\nUUID Unique ID of the event. Since an event may span a long period of time, the UUID is necessary to associate the start time with the end time of the same event.\nSource The source object on which the event occurs. In SkyWalking, the object is typically a service, service instance, etc.\nName Name of the event. For example, Start, Stop, Crash, Reboot, Upgrade, etc.\nType Type of the event. This field is friendly for UI visualization, where events of type Normal are considered normal operations, while Error is considered unexpected operations, such as Crash events. Marking them with different colors allows us to more easily identify them.\nMessage The detail of the event that describes why this event happened. This should be a one-line message that briefly describes why the event is reported. Examples of an Upgrade event may be something like Upgrade from ${from_version} to ${to_version}. It\u0026rsquo;s NOT recommended to include the detailed logs of this event, such as the exception stack trace.\nParameters The parameters in the message field. This is a simple \u0026lt;string,string\u0026gt; map.\nStart Time The start time of the event. This field is mandatory when an event occurs.\nEnd Time The end time of the event. This field may be empty if the event has not ended yet, otherwise there should be a valid timestamp after startTime.\nNOTE: When reporting an event, you typically call the report function twice, the first time for starting of the event and the second time for ending of the event, both with the same UUID. There are also cases where you would already have both the start time and end time. For example, when exporting events from a third-party system, the start time and end time are already known so you may simply call the report function once.\nCorrelation between events and metrics SkyWalking UI visualizes the events in the dashboard when the event service / instance / endpoint matches the displayed service / instance / endpoint.\nKnown Events    Name Type When Where     Start Normal When your Java Application starts with SkyWalking Agent installed, the Start Event will be created. Reported from SkyWalking agent.   Shutdown Normal When your Java Application stops with SkyWalking Agent installed, the Shutdown Event will be created. Reported from SkyWalking agent.   Alarm Error When the Alarm is triggered, the corresponding Alarm Event will is created. Reported from internal SkyWalking OAP.    The following events are all reported by Kubernetes Event Exporter, in order to see these events, please make sure you have deployed the exporter.\n   Name Type When Where     Killing Normal When the Kubernetes Pod is being killing. Reporter by Kubernetes Event Exporter.   Pulling Normal When a docker image is being pulled for deployment. Reporter by Kubernetes Event Exporter.   Pulled Normal When a docker image is pulled for deployment. Reporter by Kubernetes Event Exporter.   Created Normal When a container inside a Pod is created. Reporter by Kubernetes Event Exporter.   Started Normal When a container inside a Pod is started. Reporter by Kubernetes Event Exporter.   Unhealthy Error When the readiness probe failed. Reporter by Kubernetes Event Exporter.    The complete event lists can be found in the Kubernetes codebase, please note that not all the events are supported by the exporter for now.\n","excerpt":"Events SkyWalking already supports the three pillars of observability, namely logs, metrics, and …","ref":"/docs/main/v9.6.0/en/concepts-and-designs/event/","title":"Events"},{"body":"Events SkyWalking already supports the three pillars of observability, namely logs, metrics, and traces. In reality, a production system experiences many other events that may affect the performance of the system, such as upgrading, rebooting, chaos testing, etc. Although some of these events are reflected in the logs, many others are not. Hence, SkyWalking provides a more native way to collect these events. This doc details how SkyWalking collects events and what events look like in SkyWalking.\nHow to Report Events The SkyWalking backend supports three protocols to collect events: gRPC, HTTP, and Kafka. Any agent or CLI that implements one of these protocols can report events to SkyWalking. Currently, the officially supported clients to report events are:\n Java Agent Toolkit: Using the Java agent toolkit to report events within the applications. SkyWalking CLI: Using the CLI to report events from the command line interface. Kubernetes Event Exporter: Deploying an event exporter to refine and report Kubernetes events.  Event Definitions An event contains the following fields. The definitions of event can be found at the protocol repo.\nUUID Unique ID of the event. Since an event may span a long period of time, the UUID is necessary to associate the start time with the end time of the same event.\nSource The source object on which the event occurs. In SkyWalking, the object is typically a service, service instance, etc.\nName Name of the event. For example, Start, Stop, Crash, Reboot, Upgrade, etc.\nType Type of the event. This field is friendly for UI visualization, where events of type Normal are considered normal operations, while Error is considered unexpected operations, such as Crash events. Marking them with different colors allows us to more easily identify them.\nMessage The detail of the event that describes why this event happened. This should be a one-line message that briefly describes why the event is reported. Examples of an Upgrade event may be something like Upgrade from ${from_version} to ${to_version}. It\u0026rsquo;s NOT recommended to include the detailed logs of this event, such as the exception stack trace.\nParameters The parameters in the message field. This is a simple \u0026lt;string,string\u0026gt; map.\nStart Time The start time of the event. This field is mandatory when an event occurs.\nEnd Time The end time of the event. This field may be empty if the event has not ended yet, otherwise there should be a valid timestamp after startTime.\nNOTE: When reporting an event, you typically call the report function twice, the first time for starting of the event and the second time for ending of the event, both with the same UUID. There are also cases where you would already have both the start time and end time. For example, when exporting events from a third-party system, the start time and end time are already known so you may simply call the report function once.\nCorrelation between events and metrics SkyWalking UI visualizes the events in the dashboard when the event service / instance / endpoint matches the displayed service / instance / endpoint.\nKnown Events    Name Type When Where     Start Normal When your Java Application starts with SkyWalking Agent installed, the Start Event will be created. Reported from SkyWalking agent.   Shutdown Normal When your Java Application stops with SkyWalking Agent installed, the Shutdown Event will be created. Reported from SkyWalking agent.   Alarm Error When the Alarm is triggered, the corresponding Alarm Event will is created. Reported from internal SkyWalking OAP.    The following events are all reported by Kubernetes Event Exporter, in order to see these events, please make sure you have deployed the exporter.\n   Name Type When Where     Killing Normal When the Kubernetes Pod is being killing. Reporter by Kubernetes Event Exporter.   Pulling Normal When a docker image is being pulled for deployment. Reporter by Kubernetes Event Exporter.   Pulled Normal When a docker image is pulled for deployment. Reporter by Kubernetes Event Exporter.   Created Normal When a container inside a Pod is created. Reporter by Kubernetes Event Exporter.   Started Normal When a container inside a Pod is started. Reporter by Kubernetes Event Exporter.   Unhealthy Error When the readiness probe failed. Reporter by Kubernetes Event Exporter.    The complete event lists can be found in the Kubernetes codebase, please note that not all the events are supported by the exporter for now.\n","excerpt":"Events SkyWalking already supports the three pillars of observability, namely logs, metrics, and …","ref":"/docs/main/v9.7.0/en/concepts-and-designs/event/","title":"Events"},{"body":"SkyWalking events.\n","excerpt":"SkyWalking events.","ref":"/events/","title":"Events"},{"body":"Events Report Protocol The protocol is used to report events to the backend. The doc introduces the definition of an event, and the protocol repository defines gRPC services and message formats of events.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.event.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/event/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;service EventService { // When reporting an event, you typically call the collect function twice, one for starting of the event and the other one for ending of the event, with the same UUID.  // There are also cases where you have both start time and end time already, for example, when exporting events from a 3rd-party system,  // the start time and end time are already known so that you can call the collect function only once.  rpc collect (stream Event) returns (Commands) { }}message Event { // Unique ID of the event. Because an event may span a long period of time, the UUID is necessary to associate the  // start time with the end time of the same event.  string uuid = 1; // The source object that the event occurs on.  Source source = 2; // The name of the event. For example, `Reboot`, `Upgrade` etc.  string name = 3; // The type of the event. This field is friendly for UI visualization, where events of type `Normal` are considered as normal operations,  // while `Error` is considered as unexpected operations, such as `Crash` events, therefore we can mark them with different colors to be easier identified.  Type type = 4; // The detail of the event that describes why this event happened. This should be a one-line message that briefly describes why the event is reported.  // Examples of an `Upgrade` event may be something like `Upgrade from ${from_version} to ${to_version}`.  // It\u0026#39;s NOT encouraged to include the detailed logs of this event, such as the exception stack trace.  string message = 5; // The parameters in the `message` field.  map\u0026lt;string, string\u0026gt; parameters = 6; // The start time (in milliseconds) of the event, measured between the current time and midnight, January 1, 1970 UTC.  // This field is mandatory when an event occurs.  int64 startTime = 7; // The end time (in milliseconds) of the event. , measured between the current time and midnight, January 1, 1970 UTC.  // This field may be empty if the event has not stopped yet, otherwise it should be a valid timestamp after `startTime`.  int64 endTime = 8;  // [Required] Since 9.0.0  // Name of the layer to which the event belongs.  string layer = 9;}enum Type { Normal = 0; Error = 1;}// If the event occurs on a service ONLY, the `service` field is mandatory, the serviceInstance field and endpoint field are optional; // If the event occurs on a service instance, the `service` and `serviceInstance` are mandatory and endpoint is optional; // If the event occurs on an endpoint, `service` and `endpoint` are mandatory, `serviceInstance` is optional; message Source { string service = 1; string serviceInstance = 2; string endpoint = 3;}JSON format events can be reported via HTTP API. The endpoint is http://\u0026lt;oap-address\u0026gt;:12800/v3/events. Example of a JSON event record:\n[ { \u0026#34;uuid\u0026#34;: \u0026#34;f498b3c0-8bca-438d-a5b0-3701826ae21c\u0026#34;, \u0026#34;source\u0026#34;: { \u0026#34;service\u0026#34;: \u0026#34;SERVICE-A\u0026#34;, \u0026#34;instance\u0026#34;: \u0026#34;INSTANCE-1\u0026#34; }, \u0026#34;name\u0026#34;: \u0026#34;Reboot\u0026#34;, \u0026#34;type\u0026#34;: \u0026#34;Normal\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;App reboot.\u0026#34;, \u0026#34;parameters\u0026#34;: {}, \u0026#34;startTime\u0026#34;: 1628044330000, \u0026#34;endTime\u0026#34;: 1628044331000 } ] ","excerpt":"Events Report Protocol The protocol is used to report events to the backend. The doc introduces the …","ref":"/docs/main/latest/en/api/event/","title":"Events Report Protocol"},{"body":"Events Report Protocol The protocol is used to report events to the backend. The doc introduces the definition of an event, and the protocol repository defines gRPC services and message formats of events.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.event.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/event/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;service EventService { // When reporting an event, you typically call the collect function twice, one for starting of the event and the other one for ending of the event, with the same UUID.  // There are also cases where you have both start time and end time already, for example, when exporting events from a 3rd-party system,  // the start time and end time are already known so that you can call the collect function only once.  rpc collect (stream Event) returns (Commands) { }}message Event { // Unique ID of the event. Because an event may span a long period of time, the UUID is necessary to associate the  // start time with the end time of the same event.  string uuid = 1; // The source object that the event occurs on.  Source source = 2; // The name of the event. For example, `Reboot`, `Upgrade` etc.  string name = 3; // The type of the event. This field is friendly for UI visualization, where events of type `Normal` are considered as normal operations,  // while `Error` is considered as unexpected operations, such as `Crash` events, therefore we can mark them with different colors to be easier identified.  Type type = 4; // The detail of the event that describes why this event happened. This should be a one-line message that briefly describes why the event is reported.  // Examples of an `Upgrade` event may be something like `Upgrade from ${from_version} to ${to_version}`.  // It\u0026#39;s NOT encouraged to include the detailed logs of this event, such as the exception stack trace.  string message = 5; // The parameters in the `message` field.  map\u0026lt;string, string\u0026gt; parameters = 6; // The start time (in milliseconds) of the event, measured between the current time and midnight, January 1, 1970 UTC.  // This field is mandatory when an event occurs.  int64 startTime = 7; // The end time (in milliseconds) of the event. , measured between the current time and midnight, January 1, 1970 UTC.  // This field may be empty if the event has not stopped yet, otherwise it should be a valid timestamp after `startTime`.  int64 endTime = 8;  // [Required] Since 9.0.0  // Name of the layer to which the event belongs.  string layer = 9;}enum Type { Normal = 0; Error = 1;}// If the event occurs on a service ONLY, the `service` field is mandatory, the serviceInstance field and endpoint field are optional; // If the event occurs on a service instance, the `service` and `serviceInstance` are mandatory and endpoint is optional; // If the event occurs on an endpoint, `service` and `endpoint` are mandatory, `serviceInstance` is optional; message Source { string service = 1; string serviceInstance = 2; string endpoint = 3;}JSON format events can be reported via HTTP API. The endpoint is http://\u0026lt;oap-address\u0026gt;:12800/v3/events. Example of a JSON event record:\n[ { \u0026#34;uuid\u0026#34;: \u0026#34;f498b3c0-8bca-438d-a5b0-3701826ae21c\u0026#34;, \u0026#34;source\u0026#34;: { \u0026#34;service\u0026#34;: \u0026#34;SERVICE-A\u0026#34;, \u0026#34;instance\u0026#34;: \u0026#34;INSTANCE-1\u0026#34; }, \u0026#34;name\u0026#34;: \u0026#34;Reboot\u0026#34;, \u0026#34;type\u0026#34;: \u0026#34;Normal\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;App reboot.\u0026#34;, \u0026#34;parameters\u0026#34;: {}, \u0026#34;startTime\u0026#34;: 1628044330000, \u0026#34;endTime\u0026#34;: 1628044331000 } ] ","excerpt":"Events Report Protocol The protocol is used to report events to the backend. The doc introduces the …","ref":"/docs/main/next/en/api/event/","title":"Events Report Protocol"},{"body":"Events Report Protocol The protocol is used to report events to the backend. The doc introduces the definition of an event, and the protocol repository defines gRPC services and message formats of events.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.event.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/event/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;service EventService { // When reporting an event, you typically call the collect function twice, one for starting of the event and the other one for ending of the event, with the same UUID.  // There are also cases where you have both start time and end time already, for example, when exporting events from a 3rd-party system,  // the start time and end time are already known so that you can call the collect function only once.  rpc collect (stream Event) returns (Commands) { }}message Event { // Unique ID of the event. Because an event may span a long period of time, the UUID is necessary to associate the  // start time with the end time of the same event.  string uuid = 1; // The source object that the event occurs on.  Source source = 2; // The name of the event. For example, `Reboot`, `Upgrade` etc.  string name = 3; // The type of the event. This field is friendly for UI visualization, where events of type `Normal` are considered as normal operations,  // while `Error` is considered as unexpected operations, such as `Crash` events, therefore we can mark them with different colors to be easier identified.  Type type = 4; // The detail of the event that describes why this event happened. This should be a one-line message that briefly describes why the event is reported.  // Examples of an `Upgrade` event may be something like `Upgrade from ${from_version} to ${to_version}`.  // It\u0026#39;s NOT encouraged to include the detailed logs of this event, such as the exception stack trace.  string message = 5; // The parameters in the `message` field.  map\u0026lt;string, string\u0026gt; parameters = 6; // The start time (in milliseconds) of the event, measured between the current time and midnight, January 1, 1970 UTC.  // This field is mandatory when an event occurs.  int64 startTime = 7; // The end time (in milliseconds) of the event. , measured between the current time and midnight, January 1, 1970 UTC.  // This field may be empty if the event has not stopped yet, otherwise it should be a valid timestamp after `startTime`.  int64 endTime = 8;  // [Required] Since 9.0.0  // Name of the layer to which the event belongs.  string layer = 9;}enum Type { Normal = 0; Error = 1;}// If the event occurs on a service ONLY, the `service` field is mandatory, the serviceInstance field and endpoint field are optional; // If the event occurs on a service instance, the `service` and `serviceInstance` are mandatory and endpoint is optional; // If the event occurs on an endpoint, `service` and `endpoint` are mandatory, `serviceInstance` is optional; message Source { string service = 1; string serviceInstance = 2; string endpoint = 3;}JSON format events can be reported via HTTP API. The endpoint is http://\u0026lt;oap-address\u0026gt;:12800/v3/events. Example of a JSON event record:\n[ { \u0026#34;uuid\u0026#34;: \u0026#34;f498b3c0-8bca-438d-a5b0-3701826ae21c\u0026#34;, \u0026#34;source\u0026#34;: { \u0026#34;service\u0026#34;: \u0026#34;SERVICE-A\u0026#34;, \u0026#34;instance\u0026#34;: \u0026#34;INSTANCE-1\u0026#34; }, \u0026#34;name\u0026#34;: \u0026#34;Reboot\u0026#34;, \u0026#34;type\u0026#34;: \u0026#34;Normal\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;App reboot.\u0026#34;, \u0026#34;parameters\u0026#34;: {}, \u0026#34;startTime\u0026#34;: 1628044330000, \u0026#34;endTime\u0026#34;: 1628044331000 } ] ","excerpt":"Events Report Protocol The protocol is used to report events to the backend. The doc introduces the …","ref":"/docs/main/v9.4.0/en/api/event/","title":"Events Report Protocol"},{"body":"Events Report Protocol The protocol is used to report events to the backend. The doc introduces the definition of an event, and the protocol repository defines gRPC services and message formats of events.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.event.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/event/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;service EventService { // When reporting an event, you typically call the collect function twice, one for starting of the event and the other one for ending of the event, with the same UUID.  // There are also cases where you have both start time and end time already, for example, when exporting events from a 3rd-party system,  // the start time and end time are already known so that you can call the collect function only once.  rpc collect (stream Event) returns (Commands) { }}message Event { // Unique ID of the event. Because an event may span a long period of time, the UUID is necessary to associate the  // start time with the end time of the same event.  string uuid = 1; // The source object that the event occurs on.  Source source = 2; // The name of the event. For example, `Reboot`, `Upgrade` etc.  string name = 3; // The type of the event. This field is friendly for UI visualization, where events of type `Normal` are considered as normal operations,  // while `Error` is considered as unexpected operations, such as `Crash` events, therefore we can mark them with different colors to be easier identified.  Type type = 4; // The detail of the event that describes why this event happened. This should be a one-line message that briefly describes why the event is reported.  // Examples of an `Upgrade` event may be something like `Upgrade from ${from_version} to ${to_version}`.  // It\u0026#39;s NOT encouraged to include the detailed logs of this event, such as the exception stack trace.  string message = 5; // The parameters in the `message` field.  map\u0026lt;string, string\u0026gt; parameters = 6; // The start time (in milliseconds) of the event, measured between the current time and midnight, January 1, 1970 UTC.  // This field is mandatory when an event occurs.  int64 startTime = 7; // The end time (in milliseconds) of the event. , measured between the current time and midnight, January 1, 1970 UTC.  // This field may be empty if the event has not stopped yet, otherwise it should be a valid timestamp after `startTime`.  int64 endTime = 8;  // [Required] Since 9.0.0  // Name of the layer to which the event belongs.  string layer = 9;}enum Type { Normal = 0; Error = 1;}// If the event occurs on a service ONLY, the `service` field is mandatory, the serviceInstance field and endpoint field are optional; // If the event occurs on a service instance, the `service` and `serviceInstance` are mandatory and endpoint is optional; // If the event occurs on an endpoint, `service` and `endpoint` are mandatory, `serviceInstance` is optional; message Source { string service = 1; string serviceInstance = 2; string endpoint = 3;}JSON format events can be reported via HTTP API. The endpoint is http://\u0026lt;oap-address\u0026gt;:12800/v3/events. Example of a JSON event record:\n[ { \u0026#34;uuid\u0026#34;: \u0026#34;f498b3c0-8bca-438d-a5b0-3701826ae21c\u0026#34;, \u0026#34;source\u0026#34;: { \u0026#34;service\u0026#34;: \u0026#34;SERVICE-A\u0026#34;, \u0026#34;instance\u0026#34;: \u0026#34;INSTANCE-1\u0026#34; }, \u0026#34;name\u0026#34;: \u0026#34;Reboot\u0026#34;, \u0026#34;type\u0026#34;: \u0026#34;Normal\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;App reboot.\u0026#34;, \u0026#34;parameters\u0026#34;: {}, \u0026#34;startTime\u0026#34;: 1628044330000, \u0026#34;endTime\u0026#34;: 1628044331000 } ] ","excerpt":"Events Report Protocol The protocol is used to report events to the backend. The doc introduces the …","ref":"/docs/main/v9.5.0/en/api/event/","title":"Events Report Protocol"},{"body":"Events Report Protocol The protocol is used to report events to the backend. The doc introduces the definition of an event, and the protocol repository defines gRPC services and message formats of events.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.event.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/event/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;service EventService { // When reporting an event, you typically call the collect function twice, one for starting of the event and the other one for ending of the event, with the same UUID.  // There are also cases where you have both start time and end time already, for example, when exporting events from a 3rd-party system,  // the start time and end time are already known so that you can call the collect function only once.  rpc collect (stream Event) returns (Commands) { }}message Event { // Unique ID of the event. Because an event may span a long period of time, the UUID is necessary to associate the  // start time with the end time of the same event.  string uuid = 1; // The source object that the event occurs on.  Source source = 2; // The name of the event. For example, `Reboot`, `Upgrade` etc.  string name = 3; // The type of the event. This field is friendly for UI visualization, where events of type `Normal` are considered as normal operations,  // while `Error` is considered as unexpected operations, such as `Crash` events, therefore we can mark them with different colors to be easier identified.  Type type = 4; // The detail of the event that describes why this event happened. This should be a one-line message that briefly describes why the event is reported.  // Examples of an `Upgrade` event may be something like `Upgrade from ${from_version} to ${to_version}`.  // It\u0026#39;s NOT encouraged to include the detailed logs of this event, such as the exception stack trace.  string message = 5; // The parameters in the `message` field.  map\u0026lt;string, string\u0026gt; parameters = 6; // The start time (in milliseconds) of the event, measured between the current time and midnight, January 1, 1970 UTC.  // This field is mandatory when an event occurs.  int64 startTime = 7; // The end time (in milliseconds) of the event. , measured between the current time and midnight, January 1, 1970 UTC.  // This field may be empty if the event has not stopped yet, otherwise it should be a valid timestamp after `startTime`.  int64 endTime = 8;  // [Required] Since 9.0.0  // Name of the layer to which the event belongs.  string layer = 9;}enum Type { Normal = 0; Error = 1;}// If the event occurs on a service ONLY, the `service` field is mandatory, the serviceInstance field and endpoint field are optional; // If the event occurs on a service instance, the `service` and `serviceInstance` are mandatory and endpoint is optional; // If the event occurs on an endpoint, `service` and `endpoint` are mandatory, `serviceInstance` is optional; message Source { string service = 1; string serviceInstance = 2; string endpoint = 3;}JSON format events can be reported via HTTP API. The endpoint is http://\u0026lt;oap-address\u0026gt;:12800/v3/events. Example of a JSON event record:\n[ { \u0026#34;uuid\u0026#34;: \u0026#34;f498b3c0-8bca-438d-a5b0-3701826ae21c\u0026#34;, \u0026#34;source\u0026#34;: { \u0026#34;service\u0026#34;: \u0026#34;SERVICE-A\u0026#34;, \u0026#34;instance\u0026#34;: \u0026#34;INSTANCE-1\u0026#34; }, \u0026#34;name\u0026#34;: \u0026#34;Reboot\u0026#34;, \u0026#34;type\u0026#34;: \u0026#34;Normal\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;App reboot.\u0026#34;, \u0026#34;parameters\u0026#34;: {}, \u0026#34;startTime\u0026#34;: 1628044330000, \u0026#34;endTime\u0026#34;: 1628044331000 } ] ","excerpt":"Events Report Protocol The protocol is used to report events to the backend. The doc introduces the …","ref":"/docs/main/v9.6.0/en/api/event/","title":"Events Report Protocol"},{"body":"Events Report Protocol The protocol is used to report events to the backend. The doc introduces the definition of an event, and the protocol repository defines gRPC services and message formats of events.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.event.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/event/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;service EventService { // When reporting an event, you typically call the collect function twice, one for starting of the event and the other one for ending of the event, with the same UUID.  // There are also cases where you have both start time and end time already, for example, when exporting events from a 3rd-party system,  // the start time and end time are already known so that you can call the collect function only once.  rpc collect (stream Event) returns (Commands) { }}message Event { // Unique ID of the event. Because an event may span a long period of time, the UUID is necessary to associate the  // start time with the end time of the same event.  string uuid = 1; // The source object that the event occurs on.  Source source = 2; // The name of the event. For example, `Reboot`, `Upgrade` etc.  string name = 3; // The type of the event. This field is friendly for UI visualization, where events of type `Normal` are considered as normal operations,  // while `Error` is considered as unexpected operations, such as `Crash` events, therefore we can mark them with different colors to be easier identified.  Type type = 4; // The detail of the event that describes why this event happened. This should be a one-line message that briefly describes why the event is reported.  // Examples of an `Upgrade` event may be something like `Upgrade from ${from_version} to ${to_version}`.  // It\u0026#39;s NOT encouraged to include the detailed logs of this event, such as the exception stack trace.  string message = 5; // The parameters in the `message` field.  map\u0026lt;string, string\u0026gt; parameters = 6; // The start time (in milliseconds) of the event, measured between the current time and midnight, January 1, 1970 UTC.  // This field is mandatory when an event occurs.  int64 startTime = 7; // The end time (in milliseconds) of the event. , measured between the current time and midnight, January 1, 1970 UTC.  // This field may be empty if the event has not stopped yet, otherwise it should be a valid timestamp after `startTime`.  int64 endTime = 8;  // [Required] Since 9.0.0  // Name of the layer to which the event belongs.  string layer = 9;}enum Type { Normal = 0; Error = 1;}// If the event occurs on a service ONLY, the `service` field is mandatory, the serviceInstance field and endpoint field are optional; // If the event occurs on a service instance, the `service` and `serviceInstance` are mandatory and endpoint is optional; // If the event occurs on an endpoint, `service` and `endpoint` are mandatory, `serviceInstance` is optional; message Source { string service = 1; string serviceInstance = 2; string endpoint = 3;}JSON format events can be reported via HTTP API. The endpoint is http://\u0026lt;oap-address\u0026gt;:12800/v3/events. Example of a JSON event record:\n[ { \u0026#34;uuid\u0026#34;: \u0026#34;f498b3c0-8bca-438d-a5b0-3701826ae21c\u0026#34;, \u0026#34;source\u0026#34;: { \u0026#34;service\u0026#34;: \u0026#34;SERVICE-A\u0026#34;, \u0026#34;instance\u0026#34;: \u0026#34;INSTANCE-1\u0026#34; }, \u0026#34;name\u0026#34;: \u0026#34;Reboot\u0026#34;, \u0026#34;type\u0026#34;: \u0026#34;Normal\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;App reboot.\u0026#34;, \u0026#34;parameters\u0026#34;: {}, \u0026#34;startTime\u0026#34;: 1628044330000, \u0026#34;endTime\u0026#34;: 1628044331000 } ] ","excerpt":"Events Report Protocol The protocol is used to report events to the backend. The doc introduces the …","ref":"/docs/main/v9.7.0/en/api/event/","title":"Events Report Protocol"},{"body":"Exporter SkyWalking provides the essential functions of observability, including metrics aggregation, trace, log, alerting, and profiling. In many real-world scenarios, users may want to forward their data to a 3rd party system for further in-depth analysis. Exporter has made that possible.\nThe exporter is an independent module that has to be manually activated.\nRight now, we provide the following exporting channels:\n gRPC Exporter   Metrics  Kafka Exporter   Trace Log  gRPC Exporter Metrics gRPC Exporter Metrics gRPC exporter uses SkyWalking\u0026rsquo;s native export service definition. Here is the proto definition: metric-exporter.proto.\nservice MetricExportService { rpc export (stream ExportMetricValue) returns (ExportResponse) { } rpc subscription (SubscriptionReq) returns (SubscriptionsResp) { }}To activate the exporter, you should set ${SW_EXPORTER_ENABLE_GRPC_METRICS:true} and config the target gRPC server address.\nexporter:default:# gRPC exporterenableGRPCMetrics:${SW_EXPORTER_ENABLE_GRPC_METRICS:true}gRPCTargetHost:${SW_EXPORTER_GRPC_HOST:127.0.0.1}gRPCTargetPort:${SW_EXPORTER_GRPC_PORT:9870}... gRPCTargetHost:gRPCTargetPort is the expected target service address. You could set any gRPC server to receive the data. Target gRPC service needs to go on standby; otherwise, the OAP startup may fail.  Target exporter service   Subscription implementation. Return the expected metrics name list with event type (incremental or total). All names must match the OAL/MAL script definition. Return empty list, if you want to export all metrics in the incremental event type.\n  Export implementation. Stream service. All subscribed metrics will be sent here based on the OAP core schedule. Also, if the OAP is deployed as a cluster, this method will be called concurrently. For metrics value, you need to follow #type to choose #longValue or #doubleValue.\n  Kafka Exporter Trace Kafka Exporter Trace kafka exporter pushes messages to the Kafka Broker and Topic skywalking-export-trace to export the trace. Here is the message:\nProducerRecord\u0026lt;String, Bytes\u0026gt; Key: TraceSegmentId Value: Bytes of SegmentObject The SegmentObject definition follows the protocol: SkyWalking data collect protocol#Tracing.proto.\n// The segment is a collection of spans. It includes all collected spans in a simple one request context, such as a HTTP request process. message SegmentObject { string traceId = 1; string traceSegmentId = 2; repeated SpanObject spans = 3; string service = 4; string serviceInstance = 5; bool isSizeLimited = 6;}To activate the exporter, you should set ${SW_EXPORTER_ENABLE_KAFKA_TRACE:true} and config the Kafka server.\nexporter:default:# Kafka exporterenableKafkaTrace:${SW_EXPORTER_ENABLE_KAFKA_TRACE:true}kafkaBootstrapServers:${SW_EXPORTER_KAFKA_SERVERS:localhost:9092}# Kafka producer config, JSON format as Properties.kafkaProducerConfig:${SW_EXPORTER_KAFKA_PRODUCER_CONFIG:\u0026#34;\u0026#34;}kafkaTopicTrace:${SW_EXPORTER_KAFKA_TOPIC_TRACE:skywalking-export-trace}exportErrorStatusTraceOnly:${SW_EXPORTER_KAFKA_TRACE_FILTER_ERROR:false}... exportErrorStatusTraceOnly=true represents that only export the error status trace segments through the Kafka channel.  Log Kafka Exporter Log kafka exporter pushes messages to the Kafka Broker and Topic skywalking-export-log to export the log. Here is the message:\nProducerRecord\u0026lt;String, Bytes\u0026gt; Key: LogRecordId Value: Bytes of LogData The LogData definition follows the protocol: SkyWalking data collect protocol#Logging.proto.\nmessage LogData { int64 timestamp = 1; string service = 2; string serviceInstance = 3; string endpoint = 4; LogDataBody body = 5; TraceContext traceContext = 6; LogTags tags = 7; string layer = 8;}To activate the exporter, you should set ${SW_EXPORTER_ENABLE_KAFKA_LOG:true} and config the Kafka server.\nexporter:default:# Kafka exporterenableKafkaLog:${SW_EXPORTER_ENABLE_KAFKA_LOG:true}kafkaBootstrapServers:${SW_EXPORTER_KAFKA_SERVERS:localhost:9092}# Kafka producer config, JSON format as Properties.kafkaProducerConfig:${SW_EXPORTER_KAFKA_PRODUCER_CONFIG:\u0026#34;\u0026#34;}kafkaTopicLog:${SW_EXPORTER_KAFKA_TOPIC_LOG:skywalking-export-log}...","excerpt":"Exporter SkyWalking provides the essential functions of observability, including metrics …","ref":"/docs/main/latest/en/setup/backend/exporter/","title":"Exporter"},{"body":"Exporter SkyWalking provides the essential functions of observability, including metrics aggregation, trace, log, alerting, and profiling. In many real-world scenarios, users may want to forward their data to a 3rd party system for further in-depth analysis. Exporter has made that possible.\nThe exporter is an independent module that has to be manually activated.\nRight now, we provide the following exporting channels:\n gRPC Exporter   Metrics  Kafka Exporter   Trace Log  gRPC Exporter Metrics gRPC Exporter Metrics gRPC exporter uses SkyWalking\u0026rsquo;s native export service definition. Here is the proto definition: metric-exporter.proto.\nservice MetricExportService { rpc export (stream ExportMetricValue) returns (ExportResponse) { } rpc subscription (SubscriptionReq) returns (SubscriptionsResp) { }}To activate the exporter, you should set ${SW_EXPORTER:default} and ${SW_EXPORTER_ENABLE_GRPC_METRICS:true}, configure the target gRPC server address.\nexporter:selector:${SW_EXPORTER:default}default:# gRPC exporterenableGRPCMetrics:${SW_EXPORTER_ENABLE_GRPC_METRICS:true}gRPCTargetHost:${SW_EXPORTER_GRPC_HOST:127.0.0.1}gRPCTargetPort:${SW_EXPORTER_GRPC_PORT:9870}... gRPCTargetHost:gRPCTargetPort is the expected target service address. You could set any gRPC server to receive the data. Target gRPC service needs to go on standby; otherwise, the OAP startup may fail.  Target exporter service   Subscription implementation. Return the expected metrics name list with event type (incremental or total). All names must match the OAL/MAL script definition. Return empty list, if you want to export all metrics in the incremental event type.\n  Export implementation. Stream service. All subscribed metrics will be sent here based on the OAP core schedule. Also, if the OAP is deployed as a cluster, this method will be called concurrently.\n  Kafka Exporter Trace Kafka Exporter Trace kafka exporter pushes messages to the Kafka Broker and Topic skywalking-export-trace to export the trace. Here is the message:\nProducerRecord\u0026lt;String, Bytes\u0026gt; Key: TraceSegmentId Value: Bytes of SegmentObject The SegmentObject definition follows the protocol: SkyWalking data collect protocol#Tracing.proto.\n// The segment is a collection of spans. It includes all collected spans in a simple one request context, such as a HTTP request process. message SegmentObject { string traceId = 1; string traceSegmentId = 2; repeated SpanObject spans = 3; string service = 4; string serviceInstance = 5; bool isSizeLimited = 6;}To activate the exporter, you should set ${SW_EXPORTER:default} and ${SW_EXPORTER_ENABLE_KAFKA_TRACE:true}, configure the Kafka server addresses.\nexporter:selector:${SW_EXPORTER:default}default:# Kafka exporterenableKafkaTrace:${SW_EXPORTER_ENABLE_KAFKA_TRACE:true}kafkaBootstrapServers:${SW_EXPORTER_KAFKA_SERVERS:localhost:9092}# Kafka producer config, JSON format as Properties.kafkaProducerConfig:${SW_EXPORTER_KAFKA_PRODUCER_CONFIG:\u0026#34;\u0026#34;}kafkaTopicTrace:${SW_EXPORTER_KAFKA_TOPIC_TRACE:skywalking-export-trace}exportErrorStatusTraceOnly:${SW_EXPORTER_KAFKA_TRACE_FILTER_ERROR:false}... exportErrorStatusTraceOnly=true represents that only export the error status trace segments through the Kafka channel.  Log Kafka Exporter Log kafka exporter pushes messages to the Kafka Broker and Topic skywalking-export-log to export the log. Here is the message:\nProducerRecord\u0026lt;String, Bytes\u0026gt; Key: LogRecordId Value: Bytes of LogData The LogData definition follows the protocol: SkyWalking data collect protocol#Logging.proto.\nmessage LogData { int64 timestamp = 1; string service = 2; string serviceInstance = 3; string endpoint = 4; LogDataBody body = 5; TraceContext traceContext = 6; LogTags tags = 7; string layer = 8;}To activate the exporter, you should set ${SW_EXPORTER:default} and ${SW_EXPORTER_ENABLE_KAFKA_LOG:true}, configure the Kafka server addresses.\nexporter:selector:${SW_EXPORTER:default}default:# Kafka exporterenableKafkaLog:${SW_EXPORTER_ENABLE_KAFKA_LOG:true}kafkaBootstrapServers:${SW_EXPORTER_KAFKA_SERVERS:localhost:9092}# Kafka producer config, JSON format as Properties.kafkaProducerConfig:${SW_EXPORTER_KAFKA_PRODUCER_CONFIG:\u0026#34;\u0026#34;}kafkaTopicLog:${SW_EXPORTER_KAFKA_TOPIC_LOG:skywalking-export-log}...","excerpt":"Exporter SkyWalking provides the essential functions of observability, including metrics …","ref":"/docs/main/next/en/setup/backend/exporter/","title":"Exporter"},{"body":"Exporter SkyWalking provides the essential functions of observability, including metrics aggregation, trace, log, alerting, and profiling. In many real-world scenarios, users may want to forward their data to a 3rd party system for further in-depth analysis. Exporter has made that possible.\nThe exporter is an independent module that has to be manually activated.\nRight now, we provide the following exporting channels:\n gRPC Exporter   Metrics   Kafka Exporter   Trace Log  gRPC Exporter Metrics gRPC Exporter Metrics gRPC exporter uses SkyWalking\u0026rsquo;s native export service definition. Here is the proto definition: metric-exporter.proto.\nservice MetricExportService { rpc export (stream ExportMetricValue) returns (ExportResponse) { } rpc subscription (SubscriptionReq) returns (SubscriptionsResp) { }}To activate the exporter, you should set ${SW_EXPORTER_ENABLE_GRPC_METRICS:true} and config the target gRPC server address.\nexporter:default:# gRPC exporterenableGRPCMetrics:${SW_EXPORTER_ENABLE_GRPC_METRICS:true}gRPCTargetHost:${SW_EXPORTER_GRPC_HOST:127.0.0.1}gRPCTargetPort:${SW_EXPORTER_GRPC_PORT:9870}... gRPCTargetHost:gRPCTargetPort is the expected target service address. You could set any gRPC server to receive the data. Target gRPC service needs to go on standby; otherwise, the OAP startup may fail.  Target exporter service   Subscription implementation. Return the expected metrics name list with event type (incremental or total). All names must match the OAL/MAL script definition. Return empty list, if you want to export all metrics in the incremental event type.\n  Export implementation. Stream service. All subscribed metrics will be sent here based on the OAP core schedule. Also, if the OAP is deployed as a cluster, this method will be called concurrently. For metrics value, you need to follow #type to choose #longValue or #doubleValue.\n  Kafka Exporter Trace Kafka Exporter Trace kafka exporter pushes messages to the Kafka Broker and Topic skywalking-export-trace to export the trace. Here is the message:\nProducerRecord\u0026lt;String, Bytes\u0026gt; Key: TraceSegmentId Value: Bytes of SegmentObject The SegmentObject definition follows the protocol: SkyWalking data collect protocol#Tracing.proto.\n// The segment is a collection of spans. It includes all collected spans in a simple one request context, such as a HTTP request process. message SegmentObject { string traceId = 1; string traceSegmentId = 2; repeated SpanObject spans = 3; string service = 4; string serviceInstance = 5; bool isSizeLimited = 6;}To activate the exporter, you should set ${SW_EXPORTER_ENABLE_KAFKA_TRACE:true} and config the Kafka server.\nexporter:default:# Kafka exporterenableKafkaTrace:${SW_EXPORTER_ENABLE_KAFKA_TRACE:true}kafkaBootstrapServers:${SW_EXPORTER_KAFKA_SERVERS:localhost:9092}# Kafka producer config, JSON format as Properties.kafkaProducerConfig:${SW_EXPORTER_KAFKA_PRODUCER_CONFIG:\u0026#34;\u0026#34;}kafkaTopicTrace:${SW_EXPORTER_KAFKA_TOPIC_TRACE:skywalking-export-trace}...Log Kafka Exporter Log kafka exporter pushes messages to the Kafka Broker and Topic skywalking-export-log to export the log. Here is the message:\nProducerRecord\u0026lt;String, Bytes\u0026gt; Key: LogRecordId Value: Bytes of LogData The LogData definition follows the protocol: SkyWalking data collect protocol#Logging.proto.\nmessage LogData { int64 timestamp = 1; string service = 2; string serviceInstance = 3; string endpoint = 4; LogDataBody body = 5; TraceContext traceContext = 6; LogTags tags = 7; string layer = 8;}To activate the exporter, you should set ${SW_EXPORTER_ENABLE_KAFKA_LOG:true} and config the Kafka server.\nexporter:default:# Kafka exporterenableKafkaLog:${SW_EXPORTER_ENABLE_KAFKA_LOG:true}kafkaBootstrapServers:${SW_EXPORTER_KAFKA_SERVERS:localhost:9092}# Kafka producer config, JSON format as Properties.kafkaProducerConfig:${SW_EXPORTER_KAFKA_PRODUCER_CONFIG:\u0026#34;\u0026#34;}kafkaTopicLog:${SW_EXPORTER_KAFKA_TOPIC_LOG:skywalking-export-log}...","excerpt":"Exporter SkyWalking provides the essential functions of observability, including metrics …","ref":"/docs/main/v9.3.0/en/setup/backend/exporter/","title":"Exporter"},{"body":"Exporter SkyWalking provides the essential functions of observability, including metrics aggregation, trace, log, alerting, and profiling. In many real-world scenarios, users may want to forward their data to a 3rd party system for further in-depth analysis. Exporter has made that possible.\nThe exporter is an independent module that has to be manually activated.\nRight now, we provide the following exporting channels:\n gRPC Exporter   Metrics   Kafka Exporter   Trace Log  gRPC Exporter Metrics gRPC Exporter Metrics gRPC exporter uses SkyWalking\u0026rsquo;s native export service definition. Here is the proto definition: metric-exporter.proto.\nservice MetricExportService { rpc export (stream ExportMetricValue) returns (ExportResponse) { } rpc subscription (SubscriptionReq) returns (SubscriptionsResp) { }}To activate the exporter, you should set ${SW_EXPORTER_ENABLE_GRPC_METRICS:true} and config the target gRPC server address.\nexporter:default:# gRPC exporterenableGRPCMetrics:${SW_EXPORTER_ENABLE_GRPC_METRICS:true}gRPCTargetHost:${SW_EXPORTER_GRPC_HOST:127.0.0.1}gRPCTargetPort:${SW_EXPORTER_GRPC_PORT:9870}... gRPCTargetHost:gRPCTargetPort is the expected target service address. You could set any gRPC server to receive the data. Target gRPC service needs to go on standby; otherwise, the OAP startup may fail.  Target exporter service   Subscription implementation. Return the expected metrics name list with event type (incremental or total). All names must match the OAL/MAL script definition. Return empty list, if you want to export all metrics in the incremental event type.\n  Export implementation. Stream service. All subscribed metrics will be sent here based on the OAP core schedule. Also, if the OAP is deployed as a cluster, this method will be called concurrently. For metrics value, you need to follow #type to choose #longValue or #doubleValue.\n  Kafka Exporter Trace Kafka Exporter Trace kafka exporter pushes messages to the Kafka Broker and Topic skywalking-export-trace to export the trace. Here is the message:\nProducerRecord\u0026lt;String, Bytes\u0026gt; Key: TraceSegmentId Value: Bytes of SegmentObject The SegmentObject definition follows the protocol: SkyWalking data collect protocol#Tracing.proto.\n// The segment is a collection of spans. It includes all collected spans in a simple one request context, such as a HTTP request process. message SegmentObject { string traceId = 1; string traceSegmentId = 2; repeated SpanObject spans = 3; string service = 4; string serviceInstance = 5; bool isSizeLimited = 6;}To activate the exporter, you should set ${SW_EXPORTER_ENABLE_KAFKA_TRACE:true} and config the Kafka server.\nexporter:default:# Kafka exporterenableKafkaTrace:${SW_EXPORTER_ENABLE_KAFKA_TRACE:true}kafkaBootstrapServers:${SW_EXPORTER_KAFKA_SERVERS:localhost:9092}# Kafka producer config, JSON format as Properties.kafkaProducerConfig:${SW_EXPORTER_KAFKA_PRODUCER_CONFIG:\u0026#34;\u0026#34;}kafkaTopicTrace:${SW_EXPORTER_KAFKA_TOPIC_TRACE:skywalking-export-trace}...Log Kafka Exporter Log kafka exporter pushes messages to the Kafka Broker and Topic skywalking-export-log to export the log. Here is the message:\nProducerRecord\u0026lt;String, Bytes\u0026gt; Key: LogRecordId Value: Bytes of LogData The LogData definition follows the protocol: SkyWalking data collect protocol#Logging.proto.\nmessage LogData { int64 timestamp = 1; string service = 2; string serviceInstance = 3; string endpoint = 4; LogDataBody body = 5; TraceContext traceContext = 6; LogTags tags = 7; string layer = 8;}To activate the exporter, you should set ${SW_EXPORTER_ENABLE_KAFKA_LOG:true} and config the Kafka server.\nexporter:default:# Kafka exporterenableKafkaLog:${SW_EXPORTER_ENABLE_KAFKA_LOG:true}kafkaBootstrapServers:${SW_EXPORTER_KAFKA_SERVERS:localhost:9092}# Kafka producer config, JSON format as Properties.kafkaProducerConfig:${SW_EXPORTER_KAFKA_PRODUCER_CONFIG:\u0026#34;\u0026#34;}kafkaTopicLog:${SW_EXPORTER_KAFKA_TOPIC_LOG:skywalking-export-log}...","excerpt":"Exporter SkyWalking provides the essential functions of observability, including metrics …","ref":"/docs/main/v9.4.0/en/setup/backend/exporter/","title":"Exporter"},{"body":"Exporter SkyWalking provides the essential functions of observability, including metrics aggregation, trace, log, alerting, and profiling. In many real-world scenarios, users may want to forward their data to a 3rd party system for further in-depth analysis. Exporter has made that possible.\nThe exporter is an independent module that has to be manually activated.\nRight now, we provide the following exporting channels:\n gRPC Exporter   Metrics  Kafka Exporter   Trace Log  gRPC Exporter Metrics gRPC Exporter Metrics gRPC exporter uses SkyWalking\u0026rsquo;s native export service definition. Here is the proto definition: metric-exporter.proto.\nservice MetricExportService { rpc export (stream ExportMetricValue) returns (ExportResponse) { } rpc subscription (SubscriptionReq) returns (SubscriptionsResp) { }}To activate the exporter, you should set ${SW_EXPORTER_ENABLE_GRPC_METRICS:true} and config the target gRPC server address.\nexporter:default:# gRPC exporterenableGRPCMetrics:${SW_EXPORTER_ENABLE_GRPC_METRICS:true}gRPCTargetHost:${SW_EXPORTER_GRPC_HOST:127.0.0.1}gRPCTargetPort:${SW_EXPORTER_GRPC_PORT:9870}... gRPCTargetHost:gRPCTargetPort is the expected target service address. You could set any gRPC server to receive the data. Target gRPC service needs to go on standby; otherwise, the OAP startup may fail.  Target exporter service   Subscription implementation. Return the expected metrics name list with event type (incremental or total). All names must match the OAL/MAL script definition. Return empty list, if you want to export all metrics in the incremental event type.\n  Export implementation. Stream service. All subscribed metrics will be sent here based on the OAP core schedule. Also, if the OAP is deployed as a cluster, this method will be called concurrently. For metrics value, you need to follow #type to choose #longValue or #doubleValue.\n  Kafka Exporter Trace Kafka Exporter Trace kafka exporter pushes messages to the Kafka Broker and Topic skywalking-export-trace to export the trace. Here is the message:\nProducerRecord\u0026lt;String, Bytes\u0026gt; Key: TraceSegmentId Value: Bytes of SegmentObject The SegmentObject definition follows the protocol: SkyWalking data collect protocol#Tracing.proto.\n// The segment is a collection of spans. It includes all collected spans in a simple one request context, such as a HTTP request process. message SegmentObject { string traceId = 1; string traceSegmentId = 2; repeated SpanObject spans = 3; string service = 4; string serviceInstance = 5; bool isSizeLimited = 6;}To activate the exporter, you should set ${SW_EXPORTER_ENABLE_KAFKA_TRACE:true} and config the Kafka server.\nexporter:default:# Kafka exporterenableKafkaTrace:${SW_EXPORTER_ENABLE_KAFKA_TRACE:true}kafkaBootstrapServers:${SW_EXPORTER_KAFKA_SERVERS:localhost:9092}# Kafka producer config, JSON format as Properties.kafkaProducerConfig:${SW_EXPORTER_KAFKA_PRODUCER_CONFIG:\u0026#34;\u0026#34;}kafkaTopicTrace:${SW_EXPORTER_KAFKA_TOPIC_TRACE:skywalking-export-trace}exportErrorStatusTraceOnly:${SW_EXPORTER_KAFKA_TRACE_FILTER_ERROR:false}... exportErrorStatusTraceOnly=true represents that only export the error status trace segments through the Kafka channel.  Log Kafka Exporter Log kafka exporter pushes messages to the Kafka Broker and Topic skywalking-export-log to export the log. Here is the message:\nProducerRecord\u0026lt;String, Bytes\u0026gt; Key: LogRecordId Value: Bytes of LogData The LogData definition follows the protocol: SkyWalking data collect protocol#Logging.proto.\nmessage LogData { int64 timestamp = 1; string service = 2; string serviceInstance = 3; string endpoint = 4; LogDataBody body = 5; TraceContext traceContext = 6; LogTags tags = 7; string layer = 8;}To activate the exporter, you should set ${SW_EXPORTER_ENABLE_KAFKA_LOG:true} and config the Kafka server.\nexporter:default:# Kafka exporterenableKafkaLog:${SW_EXPORTER_ENABLE_KAFKA_LOG:true}kafkaBootstrapServers:${SW_EXPORTER_KAFKA_SERVERS:localhost:9092}# Kafka producer config, JSON format as Properties.kafkaProducerConfig:${SW_EXPORTER_KAFKA_PRODUCER_CONFIG:\u0026#34;\u0026#34;}kafkaTopicLog:${SW_EXPORTER_KAFKA_TOPIC_LOG:skywalking-export-log}...","excerpt":"Exporter SkyWalking provides the essential functions of observability, including metrics …","ref":"/docs/main/v9.5.0/en/setup/backend/exporter/","title":"Exporter"},{"body":"Exporter SkyWalking provides the essential functions of observability, including metrics aggregation, trace, log, alerting, and profiling. In many real-world scenarios, users may want to forward their data to a 3rd party system for further in-depth analysis. Exporter has made that possible.\nThe exporter is an independent module that has to be manually activated.\nRight now, we provide the following exporting channels:\n gRPC Exporter   Metrics  Kafka Exporter   Trace Log  gRPC Exporter Metrics gRPC Exporter Metrics gRPC exporter uses SkyWalking\u0026rsquo;s native export service definition. Here is the proto definition: metric-exporter.proto.\nservice MetricExportService { rpc export (stream ExportMetricValue) returns (ExportResponse) { } rpc subscription (SubscriptionReq) returns (SubscriptionsResp) { }}To activate the exporter, you should set ${SW_EXPORTER_ENABLE_GRPC_METRICS:true} and config the target gRPC server address.\nexporter:default:# gRPC exporterenableGRPCMetrics:${SW_EXPORTER_ENABLE_GRPC_METRICS:true}gRPCTargetHost:${SW_EXPORTER_GRPC_HOST:127.0.0.1}gRPCTargetPort:${SW_EXPORTER_GRPC_PORT:9870}... gRPCTargetHost:gRPCTargetPort is the expected target service address. You could set any gRPC server to receive the data. Target gRPC service needs to go on standby; otherwise, the OAP startup may fail.  Target exporter service   Subscription implementation. Return the expected metrics name list with event type (incremental or total). All names must match the OAL/MAL script definition. Return empty list, if you want to export all metrics in the incremental event type.\n  Export implementation. Stream service. All subscribed metrics will be sent here based on the OAP core schedule. Also, if the OAP is deployed as a cluster, this method will be called concurrently. For metrics value, you need to follow #type to choose #longValue or #doubleValue.\n  Kafka Exporter Trace Kafka Exporter Trace kafka exporter pushes messages to the Kafka Broker and Topic skywalking-export-trace to export the trace. Here is the message:\nProducerRecord\u0026lt;String, Bytes\u0026gt; Key: TraceSegmentId Value: Bytes of SegmentObject The SegmentObject definition follows the protocol: SkyWalking data collect protocol#Tracing.proto.\n// The segment is a collection of spans. It includes all collected spans in a simple one request context, such as a HTTP request process. message SegmentObject { string traceId = 1; string traceSegmentId = 2; repeated SpanObject spans = 3; string service = 4; string serviceInstance = 5; bool isSizeLimited = 6;}To activate the exporter, you should set ${SW_EXPORTER_ENABLE_KAFKA_TRACE:true} and config the Kafka server.\nexporter:default:# Kafka exporterenableKafkaTrace:${SW_EXPORTER_ENABLE_KAFKA_TRACE:true}kafkaBootstrapServers:${SW_EXPORTER_KAFKA_SERVERS:localhost:9092}# Kafka producer config, JSON format as Properties.kafkaProducerConfig:${SW_EXPORTER_KAFKA_PRODUCER_CONFIG:\u0026#34;\u0026#34;}kafkaTopicTrace:${SW_EXPORTER_KAFKA_TOPIC_TRACE:skywalking-export-trace}exportErrorStatusTraceOnly:${SW_EXPORTER_KAFKA_TRACE_FILTER_ERROR:false}... exportErrorStatusTraceOnly=true represents that only export the error status trace segments through the Kafka channel.  Log Kafka Exporter Log kafka exporter pushes messages to the Kafka Broker and Topic skywalking-export-log to export the log. Here is the message:\nProducerRecord\u0026lt;String, Bytes\u0026gt; Key: LogRecordId Value: Bytes of LogData The LogData definition follows the protocol: SkyWalking data collect protocol#Logging.proto.\nmessage LogData { int64 timestamp = 1; string service = 2; string serviceInstance = 3; string endpoint = 4; LogDataBody body = 5; TraceContext traceContext = 6; LogTags tags = 7; string layer = 8;}To activate the exporter, you should set ${SW_EXPORTER_ENABLE_KAFKA_LOG:true} and config the Kafka server.\nexporter:default:# Kafka exporterenableKafkaLog:${SW_EXPORTER_ENABLE_KAFKA_LOG:true}kafkaBootstrapServers:${SW_EXPORTER_KAFKA_SERVERS:localhost:9092}# Kafka producer config, JSON format as Properties.kafkaProducerConfig:${SW_EXPORTER_KAFKA_PRODUCER_CONFIG:\u0026#34;\u0026#34;}kafkaTopicLog:${SW_EXPORTER_KAFKA_TOPIC_LOG:skywalking-export-log}...","excerpt":"Exporter SkyWalking provides the essential functions of observability, including metrics …","ref":"/docs/main/v9.6.0/en/setup/backend/exporter/","title":"Exporter"},{"body":"Exporter SkyWalking provides the essential functions of observability, including metrics aggregation, trace, log, alerting, and profiling. In many real-world scenarios, users may want to forward their data to a 3rd party system for further in-depth analysis. Exporter has made that possible.\nThe exporter is an independent module that has to be manually activated.\nRight now, we provide the following exporting channels:\n gRPC Exporter   Metrics  Kafka Exporter   Trace Log  gRPC Exporter Metrics gRPC Exporter Metrics gRPC exporter uses SkyWalking\u0026rsquo;s native export service definition. Here is the proto definition: metric-exporter.proto.\nservice MetricExportService { rpc export (stream ExportMetricValue) returns (ExportResponse) { } rpc subscription (SubscriptionReq) returns (SubscriptionsResp) { }}To activate the exporter, you should set ${SW_EXPORTER_ENABLE_GRPC_METRICS:true} and config the target gRPC server address.\nexporter:default:# gRPC exporterenableGRPCMetrics:${SW_EXPORTER_ENABLE_GRPC_METRICS:true}gRPCTargetHost:${SW_EXPORTER_GRPC_HOST:127.0.0.1}gRPCTargetPort:${SW_EXPORTER_GRPC_PORT:9870}... gRPCTargetHost:gRPCTargetPort is the expected target service address. You could set any gRPC server to receive the data. Target gRPC service needs to go on standby; otherwise, the OAP startup may fail.  Target exporter service   Subscription implementation. Return the expected metrics name list with event type (incremental or total). All names must match the OAL/MAL script definition. Return empty list, if you want to export all metrics in the incremental event type.\n  Export implementation. Stream service. All subscribed metrics will be sent here based on the OAP core schedule. Also, if the OAP is deployed as a cluster, this method will be called concurrently. For metrics value, you need to follow #type to choose #longValue or #doubleValue.\n  Kafka Exporter Trace Kafka Exporter Trace kafka exporter pushes messages to the Kafka Broker and Topic skywalking-export-trace to export the trace. Here is the message:\nProducerRecord\u0026lt;String, Bytes\u0026gt; Key: TraceSegmentId Value: Bytes of SegmentObject The SegmentObject definition follows the protocol: SkyWalking data collect protocol#Tracing.proto.\n// The segment is a collection of spans. It includes all collected spans in a simple one request context, such as a HTTP request process. message SegmentObject { string traceId = 1; string traceSegmentId = 2; repeated SpanObject spans = 3; string service = 4; string serviceInstance = 5; bool isSizeLimited = 6;}To activate the exporter, you should set ${SW_EXPORTER_ENABLE_KAFKA_TRACE:true} and config the Kafka server.\nexporter:default:# Kafka exporterenableKafkaTrace:${SW_EXPORTER_ENABLE_KAFKA_TRACE:true}kafkaBootstrapServers:${SW_EXPORTER_KAFKA_SERVERS:localhost:9092}# Kafka producer config, JSON format as Properties.kafkaProducerConfig:${SW_EXPORTER_KAFKA_PRODUCER_CONFIG:\u0026#34;\u0026#34;}kafkaTopicTrace:${SW_EXPORTER_KAFKA_TOPIC_TRACE:skywalking-export-trace}exportErrorStatusTraceOnly:${SW_EXPORTER_KAFKA_TRACE_FILTER_ERROR:false}... exportErrorStatusTraceOnly=true represents that only export the error status trace segments through the Kafka channel.  Log Kafka Exporter Log kafka exporter pushes messages to the Kafka Broker and Topic skywalking-export-log to export the log. Here is the message:\nProducerRecord\u0026lt;String, Bytes\u0026gt; Key: LogRecordId Value: Bytes of LogData The LogData definition follows the protocol: SkyWalking data collect protocol#Logging.proto.\nmessage LogData { int64 timestamp = 1; string service = 2; string serviceInstance = 3; string endpoint = 4; LogDataBody body = 5; TraceContext traceContext = 6; LogTags tags = 7; string layer = 8;}To activate the exporter, you should set ${SW_EXPORTER_ENABLE_KAFKA_LOG:true} and config the Kafka server.\nexporter:default:# Kafka exporterenableKafkaLog:${SW_EXPORTER_ENABLE_KAFKA_LOG:true}kafkaBootstrapServers:${SW_EXPORTER_KAFKA_SERVERS:localhost:9092}# Kafka producer config, JSON format as Properties.kafkaProducerConfig:${SW_EXPORTER_KAFKA_PRODUCER_CONFIG:\u0026#34;\u0026#34;}kafkaTopicLog:${SW_EXPORTER_KAFKA_TOPIC_LOG:skywalking-export-log}...","excerpt":"Exporter SkyWalking provides the essential functions of observability, including metrics …","ref":"/docs/main/v9.7.0/en/setup/backend/exporter/","title":"Exporter"},{"body":"Exporter tool for profile raw data When visualization doesn\u0026rsquo;t work well on the official UI, users may submit issue reports. This tool helps users package the original profile data to assist the community in locating the issues in the users' cases. NOTE: This report includes the class name, method name, line number, etc. Before making your submission, please make sure that the security of your system wouldn\u0026rsquo;t be compromised.\nExport using command line  Set the storage in the tools/profile-exporter/application.yml file based on your use case. Prepare the data  Profile task ID: Profile task ID Trace ID: Trace ID of the profile error Export dir: Directory exported by the data   Enter the Skywalking root path Execute shell command bash tools/profile-exporter/profile_exporter.sh --taskid={profileTaskId} --traceid={traceId} {exportDir}  The file {traceId}.tar.gz will be generated after executing shell.  Exported data content  basic.yml: Contains the complete information of the profiled segments in the trace. snapshot.data: All monitored thread snapshot data in the current segment.  Report profile issues  Provide exported data generated from this tool. Provide the operation name and the mode of analysis (including/excluding child span) for the span. Issue description. (It would be great if you could provide UI screenshots.)  ","excerpt":"Exporter tool for profile raw data When visualization doesn\u0026rsquo;t work well on the official UI, …","ref":"/docs/main/latest/en/guides/backend-profile-export/","title":"Exporter tool for profile raw data"},{"body":"Exporter tool for profile raw data When visualization doesn\u0026rsquo;t work well on the official UI, users may submit issue reports. This tool helps users package the original profile data to assist the community in locating the issues in the users' cases. NOTE: This report includes the class name, method name, line number, etc. Before making your submission, please make sure that the security of your system wouldn\u0026rsquo;t be compromised.\nExport using command line  Set the storage in the tools/profile-exporter/application.yml file based on your use case. Prepare the data  Profile task ID: Profile task ID Trace ID: Trace ID of the profile error Export dir: Directory exported by the data   Enter the Skywalking root path Execute shell command bash tools/profile-exporter/profile_exporter.sh --taskid={profileTaskId} --traceid={traceId} {exportDir}  The file {traceId}.tar.gz will be generated after executing shell.  Exported data content  basic.yml: Contains the complete information of the profiled segments in the trace. snapshot.data: All monitored thread snapshot data in the current segment.  Report profile issues  Provide exported data generated from this tool. Provide the operation name and the mode of analysis (including/excluding child span) for the span. Issue description. (It would be great if you could provide UI screenshots.)  ","excerpt":"Exporter tool for profile raw data When visualization doesn\u0026rsquo;t work well on the official UI, …","ref":"/docs/main/next/en/guides/backend-profile-export/","title":"Exporter tool for profile raw data"},{"body":"Exporter tool for profile raw data When visualization doesn\u0026rsquo;t work well on the official UI, users may submit issue reports. This tool helps users package the original profile data to assist the community in locating the issues in the users' cases. NOTE: This report includes the class name, method name, line number, etc. Before making your submission, please make sure that the security of your system wouldn\u0026rsquo;t be compromised.\nExport using command line  Set the storage in the tools/profile-exporter/application.yml file based on your use case. Prepare the data  Profile task ID: Profile task ID Trace ID: Trace ID of the profile error Export dir: Directory exported by the data   Enter the Skywalking root path Execute shell command bash tools/profile-exporter/profile_exporter.sh --taskid={profileTaskId} --traceid={traceId} {exportDir}  The file {traceId}.tar.gz will be generated after executing shell.  Exported data content  basic.yml: Contains the complete information of the profiled segments in the trace. snapshot.data: All monitored thread snapshot data in the current segment.  Report profile issues  Provide exported data generated from this tool. Provide the operation name and the mode of analysis (including/excluding child span) for the span. Issue description. (It would be great if you could provide UI screenshots.)  ","excerpt":"Exporter tool for profile raw data When visualization doesn\u0026rsquo;t work well on the official UI, …","ref":"/docs/main/v9.0.0/en/guides/backend-profile-export/","title":"Exporter tool for profile raw data"},{"body":"Exporter tool for profile raw data When visualization doesn\u0026rsquo;t work well on the official UI, users may submit issue reports. This tool helps users package the original profile data to assist the community in locating the issues in the users' cases. NOTE: This report includes the class name, method name, line number, etc. Before making your submission, please make sure that the security of your system wouldn\u0026rsquo;t be compromised.\nExport using command line  Set the storage in the tools/profile-exporter/application.yml file based on your use case. Prepare the data  Profile task ID: Profile task ID Trace ID: Trace ID of the profile error Export dir: Directory exported by the data   Enter the Skywalking root path Execute shell command bash tools/profile-exporter/profile_exporter.sh --taskid={profileTaskId} --traceid={traceId} {exportDir}  The file {traceId}.tar.gz will be generated after executing shell.  Exported data content  basic.yml: Contains the complete information of the profiled segments in the trace. snapshot.data: All monitored thread snapshot data in the current segment.  Report profile issues  Provide exported data generated from this tool. Provide the operation name and the mode of analysis (including/excluding child span) for the span. Issue description. (It would be great if you could provide UI screenshots.)  ","excerpt":"Exporter tool for profile raw data When visualization doesn\u0026rsquo;t work well on the official UI, …","ref":"/docs/main/v9.1.0/en/guides/backend-profile-export/","title":"Exporter tool for profile raw data"},{"body":"Exporter tool for profile raw data When visualization doesn\u0026rsquo;t work well on the official UI, users may submit issue reports. This tool helps users package the original profile data to assist the community in locating the issues in the users' cases. NOTE: This report includes the class name, method name, line number, etc. Before making your submission, please make sure that the security of your system wouldn\u0026rsquo;t be compromised.\nExport using command line  Set the storage in the tools/profile-exporter/application.yml file based on your use case. Prepare the data  Profile task ID: Profile task ID Trace ID: Trace ID of the profile error Export dir: Directory exported by the data   Enter the Skywalking root path Execute shell command bash tools/profile-exporter/profile_exporter.sh --taskid={profileTaskId} --traceid={traceId} {exportDir}  The file {traceId}.tar.gz will be generated after executing shell.  Exported data content  basic.yml: Contains the complete information of the profiled segments in the trace. snapshot.data: All monitored thread snapshot data in the current segment.  Report profile issues  Provide exported data generated from this tool. Provide the operation name and the mode of analysis (including/excluding child span) for the span. Issue description. (It would be great if you could provide UI screenshots.)  ","excerpt":"Exporter tool for profile raw data When visualization doesn\u0026rsquo;t work well on the official UI, …","ref":"/docs/main/v9.2.0/en/guides/backend-profile-export/","title":"Exporter tool for profile raw data"},{"body":"Exporter tool for profile raw data When visualization doesn\u0026rsquo;t work well on the official UI, users may submit issue reports. This tool helps users package the original profile data to assist the community in locating the issues in the users' cases. NOTE: This report includes the class name, method name, line number, etc. Before making your submission, please make sure that the security of your system wouldn\u0026rsquo;t be compromised.\nExport using command line  Set the storage in the tools/profile-exporter/application.yml file based on your use case. Prepare the data  Profile task ID: Profile task ID Trace ID: Trace ID of the profile error Export dir: Directory exported by the data   Enter the Skywalking root path Execute shell command bash tools/profile-exporter/profile_exporter.sh --taskid={profileTaskId} --traceid={traceId} {exportDir}  The file {traceId}.tar.gz will be generated after executing shell.  Exported data content  basic.yml: Contains the complete information of the profiled segments in the trace. snapshot.data: All monitored thread snapshot data in the current segment.  Report profile issues  Provide exported data generated from this tool. Provide the operation name and the mode of analysis (including/excluding child span) for the span. Issue description. (It would be great if you could provide UI screenshots.)  ","excerpt":"Exporter tool for profile raw data When visualization doesn\u0026rsquo;t work well on the official UI, …","ref":"/docs/main/v9.3.0/en/guides/backend-profile-export/","title":"Exporter tool for profile raw data"},{"body":"Exporter tool for profile raw data When visualization doesn\u0026rsquo;t work well on the official UI, users may submit issue reports. This tool helps users package the original profile data to assist the community in locating the issues in the users' cases. NOTE: This report includes the class name, method name, line number, etc. Before making your submission, please make sure that the security of your system wouldn\u0026rsquo;t be compromised.\nExport using command line  Set the storage in the tools/profile-exporter/application.yml file based on your use case. Prepare the data  Profile task ID: Profile task ID Trace ID: Trace ID of the profile error Export dir: Directory exported by the data   Enter the Skywalking root path Execute shell command bash tools/profile-exporter/profile_exporter.sh --taskid={profileTaskId} --traceid={traceId} {exportDir}  The file {traceId}.tar.gz will be generated after executing shell.  Exported data content  basic.yml: Contains the complete information of the profiled segments in the trace. snapshot.data: All monitored thread snapshot data in the current segment.  Report profile issues  Provide exported data generated from this tool. Provide the operation name and the mode of analysis (including/excluding child span) for the span. Issue description. (It would be great if you could provide UI screenshots.)  ","excerpt":"Exporter tool for profile raw data When visualization doesn\u0026rsquo;t work well on the official UI, …","ref":"/docs/main/v9.4.0/en/guides/backend-profile-export/","title":"Exporter tool for profile raw data"},{"body":"Exporter tool for profile raw data When visualization doesn\u0026rsquo;t work well on the official UI, users may submit issue reports. This tool helps users package the original profile data to assist the community in locating the issues in the users' cases. NOTE: This report includes the class name, method name, line number, etc. Before making your submission, please make sure that the security of your system wouldn\u0026rsquo;t be compromised.\nExport using command line  Set the storage in the tools/profile-exporter/application.yml file based on your use case. Prepare the data  Profile task ID: Profile task ID Trace ID: Trace ID of the profile error Export dir: Directory exported by the data   Enter the Skywalking root path Execute shell command bash tools/profile-exporter/profile_exporter.sh --taskid={profileTaskId} --traceid={traceId} {exportDir}  The file {traceId}.tar.gz will be generated after executing shell.  Exported data content  basic.yml: Contains the complete information of the profiled segments in the trace. snapshot.data: All monitored thread snapshot data in the current segment.  Report profile issues  Provide exported data generated from this tool. Provide the operation name and the mode of analysis (including/excluding child span) for the span. Issue description. (It would be great if you could provide UI screenshots.)  ","excerpt":"Exporter tool for profile raw data When visualization doesn\u0026rsquo;t work well on the official UI, …","ref":"/docs/main/v9.5.0/en/guides/backend-profile-export/","title":"Exporter tool for profile raw data"},{"body":"Exporter tool for profile raw data When visualization doesn\u0026rsquo;t work well on the official UI, users may submit issue reports. This tool helps users package the original profile data to assist the community in locating the issues in the users' cases. NOTE: This report includes the class name, method name, line number, etc. Before making your submission, please make sure that the security of your system wouldn\u0026rsquo;t be compromised.\nExport using command line  Set the storage in the tools/profile-exporter/application.yml file based on your use case. Prepare the data  Profile task ID: Profile task ID Trace ID: Trace ID of the profile error Export dir: Directory exported by the data   Enter the Skywalking root path Execute shell command bash tools/profile-exporter/profile_exporter.sh --taskid={profileTaskId} --traceid={traceId} {exportDir}  The file {traceId}.tar.gz will be generated after executing shell.  Exported data content  basic.yml: Contains the complete information of the profiled segments in the trace. snapshot.data: All monitored thread snapshot data in the current segment.  Report profile issues  Provide exported data generated from this tool. Provide the operation name and the mode of analysis (including/excluding child span) for the span. Issue description. (It would be great if you could provide UI screenshots.)  ","excerpt":"Exporter tool for profile raw data When visualization doesn\u0026rsquo;t work well on the official UI, …","ref":"/docs/main/v9.6.0/en/guides/backend-profile-export/","title":"Exporter tool for profile raw data"},{"body":"Exporter tool for profile raw data When visualization doesn\u0026rsquo;t work well on the official UI, users may submit issue reports. This tool helps users package the original profile data to assist the community in locating the issues in the users' cases. NOTE: This report includes the class name, method name, line number, etc. Before making your submission, please make sure that the security of your system wouldn\u0026rsquo;t be compromised.\nExport using command line  Set the storage in the tools/profile-exporter/application.yml file based on your use case. Prepare the data  Profile task ID: Profile task ID Trace ID: Trace ID of the profile error Export dir: Directory exported by the data   Enter the Skywalking root path Execute shell command bash tools/profile-exporter/profile_exporter.sh --taskid={profileTaskId} --traceid={traceId} {exportDir}  The file {traceId}.tar.gz will be generated after executing shell.  Exported data content  basic.yml: Contains the complete information of the profiled segments in the trace. snapshot.data: All monitored thread snapshot data in the current segment.  Report profile issues  Provide exported data generated from this tool. Provide the operation name and the mode of analysis (including/excluding child span) for the span. Issue description. (It would be great if you could provide UI screenshots.)  ","excerpt":"Exporter tool for profile raw data When visualization doesn\u0026rsquo;t work well on the official UI, …","ref":"/docs/main/v9.7.0/en/guides/backend-profile-export/","title":"Exporter tool for profile raw data"},{"body":"Extend storage SkyWalking has already provided several storage solutions. In this document, you could learn how to easily implement a new storage.\nDefine your storage provider  Define class extension org.apache.skywalking.oap.server.library.module.ModuleProvider. Set this provider targeting to storage module.  @Override public Class\u0026lt;? extends ModuleDefine\u0026gt; module() { return StorageModule.class; } Implement all DAOs Here\u0026rsquo;s a list of all DAO interfaces in storage:\n  IServiceInventoryCacheDAO\n  IServiceInstanceInventoryCacheDAO\n  IEndpointInventoryCacheDAO\n  INetworkAddressInventoryCacheDAO\n  IBatchDAO\n  StorageDAO\n  IRegisterLockDAO\n  ITopologyQueryDAO\n  IMetricsQueryDAO\n  ITraceQueryDAO\n  IMetadataQueryDAO\n  IAggregationQueryDAO\n  IAlarmQueryDAO\n  IHistoryDeleteDAO\n  IMetricsDAO\n  IRecordDAO\n  IRegisterDAO\n  ILogQueryDAO\n  ITopNRecordsQueryDAO\n  IBrowserLogQueryDAO\n  IProfileTaskQueryDAO\n  IProfileTaskLogQueryDAO\n  IProfileThreadSnapshotQueryDAO\n  UITemplateManagementDAO\n  Register all service implementations In public void prepare(), use this#registerServiceImplementation method to register and bind with your implementation of the above interfaces.\nExample org.apache.skywalking.oap.server.storage.plugin.elasticsearch.StorageModuleElasticsearchProvider and org.apache.skywalking.oap.server.storage.plugin.jdbc.mysql.MySQLStorageProvider are good examples.\n","excerpt":"Extend storage SkyWalking has already provided several storage solutions. In this document, you …","ref":"/docs/main/v9.0.0/en/guides/storage-extention/","title":"Extend storage"},{"body":"Extend storage SkyWalking has already provided several storage solutions. In this document, you could learn how to easily implement a new storage.\nDefine your storage provider  Define class extension org.apache.skywalking.oap.server.library.module.ModuleProvider. Set this provider targeting to storage module.  @Override public Class\u0026lt;? extends ModuleDefine\u0026gt; module() { return StorageModule.class; } Implement all DAOs Here\u0026rsquo;s a list of all DAO interfaces in storage:\n  IServiceInventoryCacheDAO\n  IServiceInstanceInventoryCacheDAO\n  IEndpointInventoryCacheDAO\n  INetworkAddressInventoryCacheDAO\n  IBatchDAO\n  StorageDAO\n  IRegisterLockDAO\n  ITopologyQueryDAO\n  IMetricsQueryDAO\n  ITraceQueryDAO\n  IMetadataQueryDAO\n  IAggregationQueryDAO\n  IAlarmQueryDAO\n  IHistoryDeleteDAO\n  IMetricsDAO\n  IRecordDAO\n  IRegisterDAO\n  ILogQueryDAO\n  ITopNRecordsQueryDAO\n  IBrowserLogQueryDAO\n  IProfileTaskQueryDAO\n  IProfileTaskLogQueryDAO\n  IProfileThreadSnapshotQueryDAO\n  UITemplateManagementDAO\n  Register all service implementations In public void prepare(), use this#registerServiceImplementation method to register and bind with your implementation of the above interfaces.\nExample org.apache.skywalking.oap.server.storage.plugin.elasticsearch.StorageModuleElasticsearchProvider and org.apache.skywalking.oap.server.storage.plugin.jdbc.mysql.MySQLStorageProvider are good examples.\n","excerpt":"Extend storage SkyWalking has already provided several storage solutions. In this document, you …","ref":"/docs/main/v9.1.0/en/guides/storage-extention/","title":"Extend storage"},{"body":"Extend storage SkyWalking has already provided several storage solutions. In this document, you could learn how to easily implement a new storage.\nDefine your storage provider  Define class extension org.apache.skywalking.oap.server.library.module.ModuleProvider. Set this provider targeting to storage module.  @Override public Class\u0026lt;? extends ModuleDefine\u0026gt; module() { return StorageModule.class; } Implement all DAOs Here\u0026rsquo;s a list of all DAO interfaces in storage:\n  IServiceInventoryCacheDAO\n  IServiceInstanceInventoryCacheDAO\n  IEndpointInventoryCacheDAO\n  INetworkAddressInventoryCacheDAO\n  IBatchDAO\n  StorageDAO\n  IRegisterLockDAO\n  ITopologyQueryDAO\n  IMetricsQueryDAO\n  ITraceQueryDAO\n  IMetadataQueryDAO\n  IAggregationQueryDAO\n  IAlarmQueryDAO\n  IHistoryDeleteDAO\n  IMetricsDAO\n  IRecordDAO\n  IRegisterDAO\n  ILogQueryDAO\n  ITopNRecordsQueryDAO\n  IBrowserLogQueryDAO\n  IProfileTaskQueryDAO\n  IProfileTaskLogQueryDAO\n  IProfileThreadSnapshotQueryDAO\n  UITemplateManagementDAO\n  Register all service implementations In public void prepare(), use this#registerServiceImplementation method to register and bind with your implementation of the above interfaces.\nExample org.apache.skywalking.oap.server.storage.plugin.elasticsearch.StorageModuleElasticsearchProvider and org.apache.skywalking.oap.server.storage.plugin.jdbc.mysql.MySQLStorageProvider are good examples.\n","excerpt":"Extend storage SkyWalking has already provided several storage solutions. In this document, you …","ref":"/docs/main/v9.2.0/en/guides/storage-extention/","title":"Extend storage"},{"body":"Extend storage SkyWalking has already provided several storage solutions. In this document, you could learn how to easily implement a new storage.\nDefine your storage provider  Define class extension org.apache.skywalking.oap.server.library.module.ModuleProvider. Set this provider targeting to storage module.  @Override public Class\u0026lt;? extends ModuleDefine\u0026gt; module() { return StorageModule.class; } Implement all DAOs Here\u0026rsquo;s a list of all DAO interfaces in storage:\n  IServiceInventoryCacheDAO\n  IServiceInstanceInventoryCacheDAO\n  IEndpointInventoryCacheDAO\n  INetworkAddressInventoryCacheDAO\n  IBatchDAO\n  StorageDAO\n  IRegisterLockDAO\n  ITopologyQueryDAO\n  IMetricsQueryDAO\n  ITraceQueryDAO\n  IMetadataQueryDAO\n  IAggregationQueryDAO\n  IAlarmQueryDAO\n  IHistoryDeleteDAO\n  IMetricsDAO\n  IRecordDAO\n  IRegisterDAO\n  ILogQueryDAO\n  ITopNRecordsQueryDAO\n  IBrowserLogQueryDAO\n  IProfileTaskQueryDAO\n  IProfileTaskLogQueryDAO\n  IProfileThreadSnapshotQueryDAO\n  UITemplateManagementDAO\n  Register all service implementations In public void prepare(), use this#registerServiceImplementation method to register and bind with your implementation of the above interfaces.\nExample org.apache.skywalking.oap.server.storage.plugin.elasticsearch.StorageModuleElasticsearchProvider and org.apache.skywalking.oap.server.storage.plugin.jdbc.mysql.MySQLStorageProvider are good examples.\n","excerpt":"Extend storage SkyWalking has already provided several storage solutions. In this document, you …","ref":"/docs/main/v9.3.0/en/guides/storage-extention/","title":"Extend storage"},{"body":"Extend storage SkyWalking has already provided several storage solutions. In this document, you could learn how to easily implement a new storage.\nDefine your storage provider  Define class extension org.apache.skywalking.oap.server.library.module.ModuleProvider. Set this provider targeting to storage module.  @Override public Class\u0026lt;? extends ModuleDefine\u0026gt; module() { return StorageModule.class; } Implement all DAOs Here\u0026rsquo;s a list of all DAO interfaces in storage:\n  IServiceInventoryCacheDAO\n  IServiceInstanceInventoryCacheDAO\n  IEndpointInventoryCacheDAO\n  INetworkAddressInventoryCacheDAO\n  IBatchDAO\n  StorageDAO\n  IRegisterLockDAO\n  ITopologyQueryDAO\n  IMetricsQueryDAO\n  ITraceQueryDAO\n  IMetadataQueryDAO\n  IAggregationQueryDAO\n  IAlarmQueryDAO\n  IHistoryDeleteDAO\n  IMetricsDAO\n  IRecordDAO\n  IRegisterDAO\n  ILogQueryDAO\n  ITopNRecordsQueryDAO\n  IBrowserLogQueryDAO\n  IProfileTaskQueryDAO\n  IProfileTaskLogQueryDAO\n  IProfileThreadSnapshotQueryDAO\n  UITemplateManagementDAO\n  Register all service implementations In public void prepare(), use this#registerServiceImplementation method to register and bind with your implementation of the above interfaces.\nExample org.apache.skywalking.oap.server.storage.plugin.elasticsearch.StorageModuleElasticsearchProvider and org.apache.skywalking.oap.server.storage.plugin.jdbc.mysql.MySQLStorageProvider are good examples.\n","excerpt":"Extend storage SkyWalking has already provided several storage solutions. In this document, you …","ref":"/docs/main/v9.4.0/en/guides/storage-extention/","title":"Extend storage"},{"body":"Extend storage SkyWalking has already provided several storage solutions. In this document, you could learn how to easily implement a new storage.\nDefine your storage provider  Define class extension org.apache.skywalking.oap.server.library.module.ModuleProvider. Set this provider targeting to storage module.  @Override public Class\u0026lt;? extends ModuleDefine\u0026gt; module() { return StorageModule.class; } Implement all DAOs Here\u0026rsquo;s a list of all DAO interfaces in storage:\n  IServiceInventoryCacheDAO\n  IServiceInstanceInventoryCacheDAO\n  IEndpointInventoryCacheDAO\n  INetworkAddressInventoryCacheDAO\n  IBatchDAO\n  StorageDAO\n  IRegisterLockDAO\n  ITopologyQueryDAO\n  IMetricsQueryDAO\n  ITraceQueryDAO\n  IMetadataQueryDAO\n  IAggregationQueryDAO\n  IAlarmQueryDAO\n  IHistoryDeleteDAO\n  IMetricsDAO\n  IRecordDAO\n  IRegisterDAO\n  ILogQueryDAO\n  ITopNRecordsQueryDAO\n  IBrowserLogQueryDAO\n  IProfileTaskQueryDAO\n  IProfileTaskLogQueryDAO\n  IProfileThreadSnapshotQueryDAO\n  UITemplateManagementDAO\n  Register all service implementations In public void prepare(), use this#registerServiceImplementation method to register and bind with your implementation of the above interfaces.\nExample org.apache.skywalking.oap.server.storage.plugin.elasticsearch.StorageModuleElasticsearchProvider and org.apache.skywalking.oap.server.storage.plugin.jdbc.mysql.MySQLStorageProvider are good examples.\n","excerpt":"Extend storage SkyWalking has already provided several storage solutions. In this document, you …","ref":"/docs/main/v9.5.0/en/guides/storage-extention/","title":"Extend storage"},{"body":"Fallbacker/none-fallbacker Description The fallbacker would do nothing when facing failure data.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Fallbacker/none-fallbacker Description The fallbacker would do nothing when facing failure data. …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/fallbacker_none-fallbacker/","title":"Fallbacker/none-fallbacker"},{"body":"Fallbacker/none-fallbacker Description The fallbacker would do nothing when facing failure data.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Fallbacker/none-fallbacker Description The fallbacker would do nothing when facing failure data. …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/fallbacker_none-fallbacker/","title":"Fallbacker/none-fallbacker"},{"body":"Fallbacker/none-fallbacker Description The fallbacker would do nothing when facing failure data.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Fallbacker/none-fallbacker Description The fallbacker would do nothing when facing failure data. …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/fallbacker_none-fallbacker/","title":"Fallbacker/none-fallbacker"},{"body":"Fallbacker/timer-fallbacker Description This is a timer fallback trigger to process the forward failure data.\nDefaultConfig # The forwarder max attempt times.max_attempts:3# The exponential_backoff is the standard retry duration, and the time for each retry is expanded# by 2 times until the number of retries reaches the maximum.(Time unit is millisecond.)exponential_backoff:2000# The max backoff time used in retrying, which would override the latency time when the latency time# with exponential increasing larger than it.(Time unit is millisecond.)max_backoff:5000Configuration    Name Type Description     max_attempts int    exponential_backoff int    max_backoff int     ","excerpt":"Fallbacker/timer-fallbacker Description This is a timer fallback trigger to process the forward …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/fallbacker_timer-fallbacker/","title":"Fallbacker/timer-fallbacker"},{"body":"Fallbacker/timer-fallbacker Description This is a timer fallback trigger to process the forward failure data.\nDefaultConfig # The forwarder max attempt times.max_attempts:3# The exponential_backoff is the standard retry duration, and the time for each retry is expanded# by 2 times until the number of retries reaches the maximum.(Time unit is millisecond.)exponential_backoff:2000# The max backoff time used in retrying, which would override the latency time when the latency time# with exponential increasing larger than it.(Time unit is millisecond.)max_backoff:5000Configuration    Name Type Description     max_attempts int    exponential_backoff int    max_backoff int     ","excerpt":"Fallbacker/timer-fallbacker Description This is a timer fallback trigger to process the forward …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/fallbacker_timer-fallbacker/","title":"Fallbacker/timer-fallbacker"},{"body":"Fallbacker/timer-fallbacker Description This is a timer fallback trigger to process the forward failure data.\nDefaultConfig # The forwarder max attempt times.max_attempts:3# The exponential_backoff is the standard retry duration, and the time for each retry is expanded# by 2 times until the number of retries reaches the maximum.(Time unit is millisecond.)exponential_backoff:2000# The max backoff time used in retrying, which would override the latency time when the latency time# with exponential increasing larger than it.(Time unit is millisecond.)max_backoff:5000Configuration    Name Type Description     max_attempts int    exponential_backoff int    max_backoff int     ","excerpt":"Fallbacker/timer-fallbacker Description This is a timer fallback trigger to process the forward …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/fallbacker_timer-fallbacker/","title":"Fallbacker/timer-fallbacker"},{"body":"FAQs These are known and frequently asked questions about SkyWalking. We welcome you to contribute here.\nDesign  Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture?  Compiling  Protoc plugin fails in maven build Required items could not be found when importing project into Eclipse Maven compilation failure with error such as python2 not found Compiling issues on Mac\u0026rsquo;s M1 chip  Runtime  New ElasticSearch storage option explanation in 9.2.0 Version 9.x+ upgrade Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Version 8.x+ upgrade Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? Version 6.x upgrade Why are there only traces in UI? Tracing doesn\u0026rsquo;t work on the Kafka consumer end Agent or collector version upgrade, 3.x -\u0026gt; 5.0.0-alpha EnhanceRequireObjectCache class cast exception ElasticSearch server performance issues, including ERROR CODE:429 IllegalStateException when installing Java agent on WebSphere 7 \u0026ldquo;FORBIDDEN/12/index read-only / allow delete (api)\u0026rdquo; appears in the log No data shown and backend replies with \u0026ldquo;Variable \u0026lsquo;serviceId\u0026rsquo; has coerced Null value for NonNull type \u0026lsquo;ID!'\u0026quot; Unexpected endpoint register warning after 6.6.0 Use the profile exporter tool if the profile analysis is not right Compatibility with other javaagent bytecode processes Java agent memory leak when enhancing Worker thread at Thread Pool Thrift plugin  UI  What is VNode? And why does SkyWalking have that?  ","excerpt":"FAQs These are known and frequently asked questions about SkyWalking. We welcome you to contribute …","ref":"/docs/main/latest/en/faq/readme/","title":"FAQs"},{"body":"FAQs These are known and frequently asked questions about SkyWalking. We welcome you to contribute here.\nDesign  Why does SkyWalking use RPC(gRPC and RESTful) rather than MQ as transport layer by default? Why is Clickhouse or Loki or xxx not supported as a storage option?  Compiling  Protoc plugin fails in maven build Required items could not be found when importing project into Eclipse Maven compilation failure with error such as python2 not found Compiling issues on Mac\u0026rsquo;s M1 chip  Runtime  New ElasticSearch storage option explanation in 9.2.0 Version 9.x+ upgrade Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Version 8.x+ upgrade Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? Version 6.x upgrade Why are there only traces in UI? Tracing doesn\u0026rsquo;t work on the Kafka consumer end Agent or collector version upgrade, 3.x -\u0026gt; 5.0.0-alpha EnhanceRequireObjectCache class cast exception ElasticSearch server performance issues, including ERROR CODE:429 IllegalStateException when installing Java agent on WebSphere 7 \u0026ldquo;FORBIDDEN/12/index read-only / allow delete (api)\u0026rdquo; appears in the log No data shown and backend replies with \u0026ldquo;Variable \u0026lsquo;serviceId\u0026rsquo; has coerced Null value for NonNull type \u0026lsquo;ID!'\u0026quot; Unexpected endpoint register warning after 6.6.0 Use the profile exporter tool if the profile analysis is not right Compatibility with other javaagent bytecode processes Java agent memory leak when enhancing Worker thread at Thread Pool Thrift plugin  UI  What is VNode? And why does SkyWalking have that?  ","excerpt":"FAQs These are known and frequently asked questions about SkyWalking. We welcome you to contribute …","ref":"/docs/main/next/en/faq/readme/","title":"FAQs"},{"body":"FAQs These are known and frequently asked questions about SkyWalking. We welcome you to contribute here.\nDesign  Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture?  Compiling  Protoc plugin fails in maven build Required items could not be found when importing project into Eclipse Maven compilation failure with error such as python2 not found Compiling issues on Mac\u0026rsquo;s M1 chip  Runtime  Version 9.x+ upgrade Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Version 8.x+ upgrade Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? Version 6.x upgrade Why are there only traces in UI? Tracing doesn\u0026rsquo;t work on the Kafka consumer end Agent or collector version upgrade, 3.x -\u0026gt; 5.0.0-alpha EnhanceRequireObjectCache class cast exception ElasticSearch server performance issues, including ERROR CODE:429 IllegalStateException when installing Java agent on WebSphere 7 \u0026ldquo;FORBIDDEN/12/index read-only / allow delete (api)\u0026rdquo; appears in the log No data shown and backend replies with \u0026ldquo;Variable \u0026lsquo;serviceId\u0026rsquo; has coerced Null value for NonNull type \u0026lsquo;ID!'\u0026quot; Unexpected endpoint register warning after 6.6.0 Use the profile exporter tool if the profile analysis is not right Compatibility with other javaagent bytecode processes Java agent memory leak when enhancing Worker thread at Thread Pool Thrift plugin  UI  What is VNode? And why does SkyWalking have that?  ","excerpt":"FAQs These are known and frequently asked questions about SkyWalking. We welcome you to contribute …","ref":"/docs/main/v9.0.0/en/faq/readme/","title":"FAQs"},{"body":"FAQs These are known and frequently asked questions about SkyWalking. We welcome you to contribute here.\nDesign  Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture?  Compiling  Protoc plugin fails in maven build Required items could not be found when importing project into Eclipse Maven compilation failure with error such as python2 not found Compiling issues on Mac\u0026rsquo;s M1 chip  Runtime  Version 9.x+ upgrade Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Version 8.x+ upgrade Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? Version 6.x upgrade Why are there only traces in UI? Tracing doesn\u0026rsquo;t work on the Kafka consumer end Agent or collector version upgrade, 3.x -\u0026gt; 5.0.0-alpha EnhanceRequireObjectCache class cast exception ElasticSearch server performance issues, including ERROR CODE:429 IllegalStateException when installing Java agent on WebSphere 7 \u0026ldquo;FORBIDDEN/12/index read-only / allow delete (api)\u0026rdquo; appears in the log No data shown and backend replies with \u0026ldquo;Variable \u0026lsquo;serviceId\u0026rsquo; has coerced Null value for NonNull type \u0026lsquo;ID!'\u0026quot; Unexpected endpoint register warning after 6.6.0 Use the profile exporter tool if the profile analysis is not right Compatibility with other javaagent bytecode processes Java agent memory leak when enhancing Worker thread at Thread Pool Thrift plugin  UI  What is VNode? And why does SkyWalking have that?  ","excerpt":"FAQs These are known and frequently asked questions about SkyWalking. We welcome you to contribute …","ref":"/docs/main/v9.1.0/en/faq/readme/","title":"FAQs"},{"body":"FAQs These are known and frequently asked questions about SkyWalking. We welcome you to contribute here.\nDesign  Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture?  Compiling  Protoc plugin fails in maven build Required items could not be found when importing project into Eclipse Maven compilation failure with error such as python2 not found Compiling issues on Mac\u0026rsquo;s M1 chip  Runtime  New ElasticSearch storage option explanation in 9.2.0 Version 9.x+ upgrade Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Version 8.x+ upgrade Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? Version 6.x upgrade Why are there only traces in UI? Tracing doesn\u0026rsquo;t work on the Kafka consumer end Agent or collector version upgrade, 3.x -\u0026gt; 5.0.0-alpha EnhanceRequireObjectCache class cast exception ElasticSearch server performance issues, including ERROR CODE:429 IllegalStateException when installing Java agent on WebSphere 7 \u0026ldquo;FORBIDDEN/12/index read-only / allow delete (api)\u0026rdquo; appears in the log No data shown and backend replies with \u0026ldquo;Variable \u0026lsquo;serviceId\u0026rsquo; has coerced Null value for NonNull type \u0026lsquo;ID!'\u0026quot; Unexpected endpoint register warning after 6.6.0 Use the profile exporter tool if the profile analysis is not right Compatibility with other javaagent bytecode processes Java agent memory leak when enhancing Worker thread at Thread Pool Thrift plugin  UI  What is VNode? And why does SkyWalking have that?  ","excerpt":"FAQs These are known and frequently asked questions about SkyWalking. We welcome you to contribute …","ref":"/docs/main/v9.2.0/en/faq/readme/","title":"FAQs"},{"body":"FAQs These are known and frequently asked questions about SkyWalking. We welcome you to contribute here.\nDesign  Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture?  Compiling  Protoc plugin fails in maven build Required items could not be found when importing project into Eclipse Maven compilation failure with error such as python2 not found Compiling issues on Mac\u0026rsquo;s M1 chip  Runtime  New ElasticSearch storage option explanation in 9.2.0 Version 9.x+ upgrade Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Version 8.x+ upgrade Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? Version 6.x upgrade Why are there only traces in UI? Tracing doesn\u0026rsquo;t work on the Kafka consumer end Agent or collector version upgrade, 3.x -\u0026gt; 5.0.0-alpha EnhanceRequireObjectCache class cast exception ElasticSearch server performance issues, including ERROR CODE:429 IllegalStateException when installing Java agent on WebSphere 7 \u0026ldquo;FORBIDDEN/12/index read-only / allow delete (api)\u0026rdquo; appears in the log No data shown and backend replies with \u0026ldquo;Variable \u0026lsquo;serviceId\u0026rsquo; has coerced Null value for NonNull type \u0026lsquo;ID!'\u0026quot; Unexpected endpoint register warning after 6.6.0 Use the profile exporter tool if the profile analysis is not right Compatibility with other javaagent bytecode processes Java agent memory leak when enhancing Worker thread at Thread Pool Thrift plugin  UI  What is VNode? And why does SkyWalking have that?  ","excerpt":"FAQs These are known and frequently asked questions about SkyWalking. We welcome you to contribute …","ref":"/docs/main/v9.3.0/en/faq/readme/","title":"FAQs"},{"body":"FAQs These are known and frequently asked questions about SkyWalking. We welcome you to contribute here.\nDesign  Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture?  Compiling  Protoc plugin fails in maven build Required items could not be found when importing project into Eclipse Maven compilation failure with error such as python2 not found Compiling issues on Mac\u0026rsquo;s M1 chip  Runtime  New ElasticSearch storage option explanation in 9.2.0 Version 9.x+ upgrade Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Version 8.x+ upgrade Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? Version 6.x upgrade Why are there only traces in UI? Tracing doesn\u0026rsquo;t work on the Kafka consumer end Agent or collector version upgrade, 3.x -\u0026gt; 5.0.0-alpha EnhanceRequireObjectCache class cast exception ElasticSearch server performance issues, including ERROR CODE:429 IllegalStateException when installing Java agent on WebSphere 7 \u0026ldquo;FORBIDDEN/12/index read-only / allow delete (api)\u0026rdquo; appears in the log No data shown and backend replies with \u0026ldquo;Variable \u0026lsquo;serviceId\u0026rsquo; has coerced Null value for NonNull type \u0026lsquo;ID!'\u0026quot; Unexpected endpoint register warning after 6.6.0 Use the profile exporter tool if the profile analysis is not right Compatibility with other javaagent bytecode processes Java agent memory leak when enhancing Worker thread at Thread Pool Thrift plugin  UI  What is VNode? And why does SkyWalking have that?  ","excerpt":"FAQs These are known and frequently asked questions about SkyWalking. We welcome you to contribute …","ref":"/docs/main/v9.4.0/en/faq/readme/","title":"FAQs"},{"body":"FAQs These are known and frequently asked questions about SkyWalking. We welcome you to contribute here.\nDesign  Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture?  Compiling  Protoc plugin fails in maven build Required items could not be found when importing project into Eclipse Maven compilation failure with error such as python2 not found Compiling issues on Mac\u0026rsquo;s M1 chip  Runtime  New ElasticSearch storage option explanation in 9.2.0 Version 9.x+ upgrade Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Version 8.x+ upgrade Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? Version 6.x upgrade Why are there only traces in UI? Tracing doesn\u0026rsquo;t work on the Kafka consumer end Agent or collector version upgrade, 3.x -\u0026gt; 5.0.0-alpha EnhanceRequireObjectCache class cast exception ElasticSearch server performance issues, including ERROR CODE:429 IllegalStateException when installing Java agent on WebSphere 7 \u0026ldquo;FORBIDDEN/12/index read-only / allow delete (api)\u0026rdquo; appears in the log No data shown and backend replies with \u0026ldquo;Variable \u0026lsquo;serviceId\u0026rsquo; has coerced Null value for NonNull type \u0026lsquo;ID!'\u0026quot; Unexpected endpoint register warning after 6.6.0 Use the profile exporter tool if the profile analysis is not right Compatibility with other javaagent bytecode processes Java agent memory leak when enhancing Worker thread at Thread Pool Thrift plugin  UI  What is VNode? And why does SkyWalking have that?  ","excerpt":"FAQs These are known and frequently asked questions about SkyWalking. We welcome you to contribute …","ref":"/docs/main/v9.5.0/en/faq/readme/","title":"FAQs"},{"body":"FAQs These are known and frequently asked questions about SkyWalking. We welcome you to contribute here.\nDesign  Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture?  Compiling  Protoc plugin fails in maven build Required items could not be found when importing project into Eclipse Maven compilation failure with error such as python2 not found Compiling issues on Mac\u0026rsquo;s M1 chip  Runtime  New ElasticSearch storage option explanation in 9.2.0 Version 9.x+ upgrade Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Version 8.x+ upgrade Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? Version 6.x upgrade Why are there only traces in UI? Tracing doesn\u0026rsquo;t work on the Kafka consumer end Agent or collector version upgrade, 3.x -\u0026gt; 5.0.0-alpha EnhanceRequireObjectCache class cast exception ElasticSearch server performance issues, including ERROR CODE:429 IllegalStateException when installing Java agent on WebSphere 7 \u0026ldquo;FORBIDDEN/12/index read-only / allow delete (api)\u0026rdquo; appears in the log No data shown and backend replies with \u0026ldquo;Variable \u0026lsquo;serviceId\u0026rsquo; has coerced Null value for NonNull type \u0026lsquo;ID!'\u0026quot; Unexpected endpoint register warning after 6.6.0 Use the profile exporter tool if the profile analysis is not right Compatibility with other javaagent bytecode processes Java agent memory leak when enhancing Worker thread at Thread Pool Thrift plugin  UI  What is VNode? And why does SkyWalking have that?  ","excerpt":"FAQs These are known and frequently asked questions about SkyWalking. We welcome you to contribute …","ref":"/docs/main/v9.6.0/en/faq/readme/","title":"FAQs"},{"body":"FAQs These are known and frequently asked questions about SkyWalking. We welcome you to contribute here.\nDesign  Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture?  Compiling  Protoc plugin fails in maven build Required items could not be found when importing project into Eclipse Maven compilation failure with error such as python2 not found Compiling issues on Mac\u0026rsquo;s M1 chip  Runtime  New ElasticSearch storage option explanation in 9.2.0 Version 9.x+ upgrade Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Version 8.x+ upgrade Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? Version 6.x upgrade Why are there only traces in UI? Tracing doesn\u0026rsquo;t work on the Kafka consumer end Agent or collector version upgrade, 3.x -\u0026gt; 5.0.0-alpha EnhanceRequireObjectCache class cast exception ElasticSearch server performance issues, including ERROR CODE:429 IllegalStateException when installing Java agent on WebSphere 7 \u0026ldquo;FORBIDDEN/12/index read-only / allow delete (api)\u0026rdquo; appears in the log No data shown and backend replies with \u0026ldquo;Variable \u0026lsquo;serviceId\u0026rsquo; has coerced Null value for NonNull type \u0026lsquo;ID!'\u0026quot; Unexpected endpoint register warning after 6.6.0 Use the profile exporter tool if the profile analysis is not right Compatibility with other javaagent bytecode processes Java agent memory leak when enhancing Worker thread at Thread Pool Thrift plugin  UI  What is VNode? And why does SkyWalking have that?  ","excerpt":"FAQs These are known and frequently asked questions about SkyWalking. We welcome you to contribute …","ref":"/docs/main/v9.7.0/en/faq/readme/","title":"FAQs"},{"body":"Fetch metrics from the Istio control plane(istiod) In this example, you will learn how to setup a Fetcher to fetch Istio control plane metrics, then push them to OAP server.\nInstall Operator Follow Operator installation instrument to install the operator.\nInstall Istio control plane Follow Install with istioctl to install a istiod.\nDeploy Fetcher, OAP server and UI with default settings Clone this repo, then change current directory to samples.\nIssue the below command to deploy an OAP server and UI.\nkubectl apply -f fetcher.yaml Get created custom resources as below:\n$ kubectl get oapserver,ui,fetcher NAME INSTANCES RUNNING ADDRESS oapserver.operator.skywalking.apache.org/default 1 1 default-oap.skywalking-swck-system NAME INSTANCES RUNNING INTERNALADDRESS EXTERNALIPS PORTS ui.operator.skywalking.apache.org/default 1 1 default-ui.skywalking-swck-system [80] NAME AGE fetcher.operator.skywalking.apache.org/istio-prod-cluster 36h View Istio Control Plane Dashboard from UI Follow View the UI to access the UI service.\nNavigate to Dashboard-\u0026gt;Istio Control Plane to view relevant metric diagrams.\n","excerpt":"Fetch metrics from the Istio control plane(istiod) In this example, you will learn how to setup a …","ref":"/docs/skywalking-swck/latest/examples/istio-controlplane/","title":"Fetch metrics from the Istio control plane(istiod)"},{"body":"Fetch metrics from the Istio control plane(istiod) In this example, you will learn how to setup a Fetcher to fetch Istio control plane metrics, then push them to OAP server.\nInstall Operator Follow Operator installation instrument to install the operator.\nInstall Istio control plane Follow Install with istioctl to install a istiod.\nDeploy Fetcher, OAP server and UI with default settings Clone this repo, then change current directory to samples.\nIssue the below command to deploy an OAP server and UI.\nkubectl apply -f fetcher.yaml Get created custom resources as below:\n$ kubectl get oapserver,ui,fetcher NAME INSTANCES RUNNING ADDRESS oapserver.operator.skywalking.apache.org/default 1 1 default-oap.skywalking-swck-system NAME INSTANCES RUNNING INTERNALADDRESS EXTERNALIPS PORTS ui.operator.skywalking.apache.org/default 1 1 default-ui.skywalking-swck-system [80] NAME AGE fetcher.operator.skywalking.apache.org/istio-prod-cluster 36h View Istio Control Plane Dashboard from UI Follow View the UI to access the UI service.\nNavigate to Dashboard-\u0026gt;Istio Control Plane to view relevant metric diagrams.\n","excerpt":"Fetch metrics from the Istio control plane(istiod) In this example, you will learn how to setup a …","ref":"/docs/skywalking-swck/next/examples/istio-controlplane/","title":"Fetch metrics from the Istio control plane(istiod)"},{"body":"Fetch metrics from the Istio control plane(istiod) In this example, you will learn how to setup a Fetcher to fetch Istio control plane metrics, then push them to OAP server.\nInstall Operator Follow Operator installation instrument to install the operator.\nInstall Istio control plane Follow Install with istioctl to install a istiod.\nDeploy Fetcher, OAP server and UI with default settings Clone this repo, then change current directory to samples.\nIssue the below command to deploy an OAP server and UI.\nkubectl apply -f fetcher.yaml Get created custom resources as below:\n$ kubectl get oapserver,ui,fetcher NAME INSTANCES RUNNING ADDRESS oapserver.operator.skywalking.apache.org/default 1 1 default-oap.skywalking-swck-system NAME INSTANCES RUNNING INTERNALADDRESS EXTERNALIPS PORTS ui.operator.skywalking.apache.org/default 1 1 default-ui.skywalking-swck-system [80] NAME AGE fetcher.operator.skywalking.apache.org/istio-prod-cluster 36h View Istio Control Plane Dashboard from UI Follow View the UI to access the UI service.\nNavigate to Dashboard-\u0026gt;Istio Control Plane to view relevant metric diagrams.\n","excerpt":"Fetch metrics from the Istio control plane(istiod) In this example, you will learn how to setup a …","ref":"/docs/skywalking-swck/v0.9.0/examples/istio-controlplane/","title":"Fetch metrics from the Istio control plane(istiod)"},{"body":"Forwarder/envoy-als-v2-grpc-forwarder Description This is a synchronization ALS v2 grpc forwarder with the Envoy ALS protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Forwarder/envoy-als-v2-grpc-forwarder Description This is a synchronization ALS v2 grpc forwarder …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/forwarder_envoy-als-v2-grpc-forwarder/","title":"Forwarder/envoy-als-v2-grpc-forwarder"},{"body":"Forwarder/envoy-als-v2-grpc-forwarder Description This is a synchronization ALS v2 grpc forwarder with the Envoy ALS protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Forwarder/envoy-als-v2-grpc-forwarder Description This is a synchronization ALS v2 grpc forwarder …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/forwarder_envoy-als-v2-grpc-forwarder/","title":"Forwarder/envoy-als-v2-grpc-forwarder"},{"body":"Forwarder/envoy-als-v2-grpc-forwarder Description This is a synchronization ALS v2 grpc forwarder with the Envoy ALS protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Forwarder/envoy-als-v2-grpc-forwarder Description This is a synchronization ALS v2 grpc forwarder …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/forwarder_envoy-als-v2-grpc-forwarder/","title":"Forwarder/envoy-als-v2-grpc-forwarder"},{"body":"Forwarder/envoy-als-v3-grpc-forwarder Description This is a synchronization ALS v3 grpc forwarder with the Envoy ALS protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Forwarder/envoy-als-v3-grpc-forwarder Description This is a synchronization ALS v3 grpc forwarder …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/forwarder_envoy-als-v3-grpc-forwarder/","title":"Forwarder/envoy-als-v3-grpc-forwarder"},{"body":"Forwarder/envoy-als-v3-grpc-forwarder Description This is a synchronization ALS v3 grpc forwarder with the Envoy ALS protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Forwarder/envoy-als-v3-grpc-forwarder Description This is a synchronization ALS v3 grpc forwarder …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/forwarder_envoy-als-v3-grpc-forwarder/","title":"Forwarder/envoy-als-v3-grpc-forwarder"},{"body":"Forwarder/envoy-als-v3-grpc-forwarder Description This is a synchronization ALS v3 grpc forwarder with the Envoy ALS protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Forwarder/envoy-als-v3-grpc-forwarder Description This is a synchronization ALS v3 grpc forwarder …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/forwarder_envoy-als-v3-grpc-forwarder/","title":"Forwarder/envoy-als-v3-grpc-forwarder"},{"body":"Forwarder/envoy-metrics-v2-grpc-forwarder Description This is a synchronization Metrics v2 grpc forwarder with the Envoy metrics protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Forwarder/envoy-metrics-v2-grpc-forwarder Description This is a synchronization Metrics v2 grpc …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/forwarder_envoy-metrics-v2-grpc-forwarder/","title":"Forwarder/envoy-metrics-v2-grpc-forwarder"},{"body":"Forwarder/envoy-metrics-v2-grpc-forwarder Description This is a synchronization Metrics v2 grpc forwarder with the Envoy metrics protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Forwarder/envoy-metrics-v2-grpc-forwarder Description This is a synchronization Metrics v2 grpc …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/forwarder_envoy-metrics-v2-grpc-forwarder/","title":"Forwarder/envoy-metrics-v2-grpc-forwarder"},{"body":"Forwarder/envoy-metrics-v2-grpc-forwarder Description This is a synchronization Metrics v2 grpc forwarder with the Envoy metrics protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Forwarder/envoy-metrics-v2-grpc-forwarder Description This is a synchronization Metrics v2 grpc …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/forwarder_envoy-metrics-v2-grpc-forwarder/","title":"Forwarder/envoy-metrics-v2-grpc-forwarder"},{"body":"Forwarder/envoy-metrics-v3-grpc-forwarder Description This is a synchronization Metrics v3 grpc forwarder with the Envoy metrics protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Forwarder/envoy-metrics-v3-grpc-forwarder Description This is a synchronization Metrics v3 grpc …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/forwarder_envoy-metrics-v3-grpc-forwarder/","title":"Forwarder/envoy-metrics-v3-grpc-forwarder"},{"body":"Forwarder/envoy-metrics-v3-grpc-forwarder Description This is a synchronization Metrics v3 grpc forwarder with the Envoy metrics protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Forwarder/envoy-metrics-v3-grpc-forwarder Description This is a synchronization Metrics v3 grpc …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/forwarder_envoy-metrics-v3-grpc-forwarder/","title":"Forwarder/envoy-metrics-v3-grpc-forwarder"},{"body":"Forwarder/envoy-metrics-v3-grpc-forwarder Description This is a synchronization Metrics v3 grpc forwarder with the Envoy metrics protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Forwarder/envoy-metrics-v3-grpc-forwarder Description This is a synchronization Metrics v3 grpc …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/forwarder_envoy-metrics-v3-grpc-forwarder/","title":"Forwarder/envoy-metrics-v3-grpc-forwarder"},{"body":"Forwarder/native-cds-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native Configuration Discovery Service protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Forwarder/native-cds-grpc-forwarder Description This is a synchronization grpc forwarder with the …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/forwarder_native-cds-grpc-forwarder/","title":"Forwarder/native-cds-grpc-forwarder"},{"body":"Forwarder/native-cds-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native Configuration Discovery Service protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Forwarder/native-cds-grpc-forwarder Description This is a synchronization grpc forwarder with the …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/forwarder_native-cds-grpc-forwarder/","title":"Forwarder/native-cds-grpc-forwarder"},{"body":"Forwarder/native-cds-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native Configuration Discovery Service protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Forwarder/native-cds-grpc-forwarder Description This is a synchronization grpc forwarder with the …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/forwarder_native-cds-grpc-forwarder/","title":"Forwarder/native-cds-grpc-forwarder"},{"body":"Forwarder/native-clr-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native Configuration Discovery Service protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Forwarder/native-clr-grpc-forwarder Description This is a synchronization grpc forwarder with the …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/forwarder_native-clr-grpc-forwarder/","title":"Forwarder/native-clr-grpc-forwarder"},{"body":"Forwarder/native-clr-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native Configuration Discovery Service protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Forwarder/native-clr-grpc-forwarder Description This is a synchronization grpc forwarder with the …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/forwarder_native-clr-grpc-forwarder/","title":"Forwarder/native-clr-grpc-forwarder"},{"body":"Forwarder/native-clr-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native Configuration Discovery Service protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Forwarder/native-clr-grpc-forwarder Description This is a synchronization grpc forwarder with the …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/forwarder_native-clr-grpc-forwarder/","title":"Forwarder/native-clr-grpc-forwarder"},{"body":"Forwarder/native-ebpf-accesslog-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native eBPF access log protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Forwarder/native-ebpf-accesslog-grpc-forwarder Description This is a synchronization grpc forwarder …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/forwarder_native-ebpf-accesslog-grpc-forwarder/","title":"Forwarder/native-ebpf-accesslog-grpc-forwarder"},{"body":"Forwarder/native-ebpf-profiling-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native process protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Forwarder/native-ebpf-profiling-grpc-forwarder Description This is a synchronization grpc forwarder …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/forwarder_native-ebpf-profiling-grpc-forwarder/","title":"Forwarder/native-ebpf-profiling-grpc-forwarder"},{"body":"Forwarder/native-ebpf-profiling-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native process protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Forwarder/native-ebpf-profiling-grpc-forwarder Description This is a synchronization grpc forwarder …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/forwarder_native-ebpf-profiling-grpc-forwarder/","title":"Forwarder/native-ebpf-profiling-grpc-forwarder"},{"body":"Forwarder/native-ebpf-profiling-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native process protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Forwarder/native-ebpf-profiling-grpc-forwarder Description This is a synchronization grpc forwarder …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/forwarder_native-ebpf-profiling-grpc-forwarder/","title":"Forwarder/native-ebpf-profiling-grpc-forwarder"},{"body":"Forwarder/native-event-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native event protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Forwarder/native-event-grpc-forwarder Description This is a synchronization grpc forwarder with the …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/forwarder_native-event-grpc-forwarder/","title":"Forwarder/native-event-grpc-forwarder"},{"body":"Forwarder/native-event-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native event protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Forwarder/native-event-grpc-forwarder Description This is a synchronization grpc forwarder with the …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/forwarder_native-event-grpc-forwarder/","title":"Forwarder/native-event-grpc-forwarder"},{"body":"Forwarder/native-event-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native event protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Forwarder/native-event-grpc-forwarder Description This is a synchronization grpc forwarder with the …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/forwarder_native-event-grpc-forwarder/","title":"Forwarder/native-event-grpc-forwarder"},{"body":"Forwarder/native-jvm-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native Configuration Discovery Service protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Forwarder/native-jvm-grpc-forwarder Description This is a synchronization grpc forwarder with the …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/forwarder_native-jvm-grpc-forwarder/","title":"Forwarder/native-jvm-grpc-forwarder"},{"body":"Forwarder/native-jvm-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native Configuration Discovery Service protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Forwarder/native-jvm-grpc-forwarder Description This is a synchronization grpc forwarder with the …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/forwarder_native-jvm-grpc-forwarder/","title":"Forwarder/native-jvm-grpc-forwarder"},{"body":"Forwarder/native-jvm-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native Configuration Discovery Service protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Forwarder/native-jvm-grpc-forwarder Description This is a synchronization grpc forwarder with the …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/forwarder_native-jvm-grpc-forwarder/","title":"Forwarder/native-jvm-grpc-forwarder"},{"body":"Forwarder/native-log-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native log protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Forwarder/native-log-grpc-forwarder Description This is a synchronization grpc forwarder with the …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/forwarder_native-log-grpc-forwarder/","title":"Forwarder/native-log-grpc-forwarder"},{"body":"Forwarder/native-log-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native log protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Forwarder/native-log-grpc-forwarder Description This is a synchronization grpc forwarder with the …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/forwarder_native-log-grpc-forwarder/","title":"Forwarder/native-log-grpc-forwarder"},{"body":"Forwarder/native-log-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native log protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Forwarder/native-log-grpc-forwarder Description This is a synchronization grpc forwarder with the …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/forwarder_native-log-grpc-forwarder/","title":"Forwarder/native-log-grpc-forwarder"},{"body":"Forwarder/native-log-kafka-forwarder Description This is a synchronization Kafka forwarder with the SkyWalking native log protocol.\nDefaultConfig # The remote topic. topic:\u0026#34;log-topic\u0026#34;Configuration    Name Type Description     topic string The forwarder topic.    ","excerpt":"Forwarder/native-log-kafka-forwarder Description This is a synchronization Kafka forwarder with the …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/forwarder_native-log-kafka-forwarder/","title":"Forwarder/native-log-kafka-forwarder"},{"body":"Forwarder/native-log-kafka-forwarder Description This is a synchronization Kafka forwarder with the SkyWalking native log protocol.\nDefaultConfig # The remote topic. topic:\u0026#34;log-topic\u0026#34;Configuration    Name Type Description     topic string The forwarder topic.    ","excerpt":"Forwarder/native-log-kafka-forwarder Description This is a synchronization Kafka forwarder with the …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/forwarder_native-log-kafka-forwarder/","title":"Forwarder/native-log-kafka-forwarder"},{"body":"Forwarder/native-log-kafka-forwarder Description This is a synchronization Kafka forwarder with the SkyWalking native log protocol.\nDefaultConfig # The remote topic. topic:\u0026#34;log-topic\u0026#34;Configuration    Name Type Description     topic string The forwarder topic.    ","excerpt":"Forwarder/native-log-kafka-forwarder Description This is a synchronization Kafka forwarder with the …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/forwarder_native-log-kafka-forwarder/","title":"Forwarder/native-log-kafka-forwarder"},{"body":"Forwarder/native-management-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native management protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Forwarder/native-management-grpc-forwarder Description This is a synchronization grpc forwarder with …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/forwarder_native-management-grpc-forwarder/","title":"Forwarder/native-management-grpc-forwarder"},{"body":"Forwarder/native-management-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native management protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Forwarder/native-management-grpc-forwarder Description This is a synchronization grpc forwarder with …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/forwarder_native-management-grpc-forwarder/","title":"Forwarder/native-management-grpc-forwarder"},{"body":"Forwarder/native-management-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native management protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Forwarder/native-management-grpc-forwarder Description This is a synchronization grpc forwarder with …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/forwarder_native-management-grpc-forwarder/","title":"Forwarder/native-management-grpc-forwarder"},{"body":"Forwarder/native-meter-grpc-forwarder Description This is a synchronization meter grpc forwarder with the SkyWalking meter protocol.\nDefaultConfig # The LRU policy cache size for hosting routine rules of service instance.routing_rule_lru_cache_size:5000# The TTL of the LRU cache size for hosting routine rules of service instance.routing_rule_lru_cache_ttl:180Configuration    Name Type Description     routing_rule_lru_cache_size int The LRU policy cache size for hosting routine rules of service instance.   routing_rule_lru_cache_ttl int The TTL of the LRU cache size for hosting routine rules of service instance.    ","excerpt":"Forwarder/native-meter-grpc-forwarder Description This is a synchronization meter grpc forwarder …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/forwarder_native-meter-grpc-forwarder/","title":"Forwarder/native-meter-grpc-forwarder"},{"body":"Forwarder/native-meter-grpc-forwarder Description This is a synchronization meter grpc forwarder with the SkyWalking meter protocol.\nDefaultConfig # The LRU policy cache size for hosting routine rules of service instance.routing_rule_lru_cache_size:5000# The TTL of the LRU cache size for hosting routine rules of service instance.routing_rule_lru_cache_ttl:180Configuration    Name Type Description     routing_rule_lru_cache_size int The LRU policy cache size for hosting routine rules of service instance.   routing_rule_lru_cache_ttl int The TTL of the LRU cache size for hosting routine rules of service instance.    ","excerpt":"Forwarder/native-meter-grpc-forwarder Description This is a synchronization meter grpc forwarder …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/forwarder_native-meter-grpc-forwarder/","title":"Forwarder/native-meter-grpc-forwarder"},{"body":"Forwarder/native-meter-grpc-forwarder Description This is a synchronization meter grpc forwarder with the SkyWalking meter protocol.\nDefaultConfig # The LRU policy cache size for hosting routine rules of service instance.routing_rule_lru_cache_size:5000# The TTL of the LRU cache size for hosting routine rules of service instance.routing_rule_lru_cache_ttl:180Configuration    Name Type Description     routing_rule_lru_cache_size int The LRU policy cache size for hosting routine rules of service instance.   routing_rule_lru_cache_ttl int The TTL of the LRU cache size for hosting routine rules of service instance.    ","excerpt":"Forwarder/native-meter-grpc-forwarder Description This is a synchronization meter grpc forwarder …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/forwarder_native-meter-grpc-forwarder/","title":"Forwarder/native-meter-grpc-forwarder"},{"body":"Forwarder/native-process-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native process protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Forwarder/native-process-grpc-forwarder Description This is a synchronization grpc forwarder with …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/forwarder_native-process-grpc-forwarder/","title":"Forwarder/native-process-grpc-forwarder"},{"body":"Forwarder/native-process-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native process protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Forwarder/native-process-grpc-forwarder Description This is a synchronization grpc forwarder with …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/forwarder_native-process-grpc-forwarder/","title":"Forwarder/native-process-grpc-forwarder"},{"body":"Forwarder/native-process-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native process protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Forwarder/native-process-grpc-forwarder Description This is a synchronization grpc forwarder with …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/forwarder_native-process-grpc-forwarder/","title":"Forwarder/native-process-grpc-forwarder"},{"body":"Forwarder/native-profile-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native log protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Forwarder/native-profile-grpc-forwarder Description This is a synchronization grpc forwarder with …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/forwarder_native-profile-grpc-forwarder/","title":"Forwarder/native-profile-grpc-forwarder"},{"body":"Forwarder/native-profile-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native log protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Forwarder/native-profile-grpc-forwarder Description This is a synchronization grpc forwarder with …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/forwarder_native-profile-grpc-forwarder/","title":"Forwarder/native-profile-grpc-forwarder"},{"body":"Forwarder/native-profile-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native log protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Forwarder/native-profile-grpc-forwarder Description This is a synchronization grpc forwarder with …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/forwarder_native-profile-grpc-forwarder/","title":"Forwarder/native-profile-grpc-forwarder"},{"body":"Forwarder/native-tracing-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native tracing protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Forwarder/native-tracing-grpc-forwarder Description This is a synchronization grpc forwarder with …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/forwarder_native-tracing-grpc-forwarder/","title":"Forwarder/native-tracing-grpc-forwarder"},{"body":"Forwarder/native-tracing-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native tracing protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Forwarder/native-tracing-grpc-forwarder Description This is a synchronization grpc forwarder with …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/forwarder_native-tracing-grpc-forwarder/","title":"Forwarder/native-tracing-grpc-forwarder"},{"body":"Forwarder/native-tracing-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native tracing protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Forwarder/native-tracing-grpc-forwarder Description This is a synchronization grpc forwarder with …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/forwarder_native-tracing-grpc-forwarder/","title":"Forwarder/native-tracing-grpc-forwarder"},{"body":"Forwarder/otlp-metrics-v1-grpc-forwarder Description This is a synchronization grpc forwarder with the OpenTelemetry metrics v1 protocol.\nDefaultConfig # The LRU policy cache size for hosting routine rules of service instance.routing_rule_lru_cache_size:5000# The TTL of the LRU cache size for hosting routine rules of service instance.routing_rule_lru_cache_ttl:180# The label key of the routing data, multiple keys are split by \u0026#34;,\u0026#34;routing_label_keys:net.host.name,host.name,job,service.nameConfiguration    Name Type Description     routing_label_keys string The label key of the routing data, multiple keys are split by \u0026ldquo;,\u0026rdquo;   routing_rule_lru_cache_size int The LRU policy cache size for hosting routine rules of service instance.   routing_rule_lru_cache_ttl int The TTL of the LRU cache size for hosting routine rules of service instance.    ","excerpt":"Forwarder/otlp-metrics-v1-grpc-forwarder Description This is a synchronization grpc forwarder with …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/forwarder_otlp-metrics-v1-grpc-forwarder/","title":"Forwarder/otlp-metrics-v1-grpc-forwarder"},{"body":"Forwarder/otlp-metrics-v1-grpc-forwarder Description This is a synchronization grpc forwarder with the OpenTelemetry metrics v1 protocol.\nDefaultConfig # The LRU policy cache size for hosting routine rules of service instance.routing_rule_lru_cache_size:5000# The TTL of the LRU cache size for hosting routine rules of service instance.routing_rule_lru_cache_ttl:180# The label key of the routing data, multiple keys are split by \u0026#34;,\u0026#34;routing_label_keys:net.host.name,host.name,job,service.nameConfiguration    Name Type Description     routing_label_keys string The label key of the routing data, multiple keys are split by \u0026ldquo;,\u0026rdquo;   routing_rule_lru_cache_size int The LRU policy cache size for hosting routine rules of service instance.   routing_rule_lru_cache_ttl int The TTL of the LRU cache size for hosting routine rules of service instance.    ","excerpt":"Forwarder/otlp-metrics-v1-grpc-forwarder Description This is a synchronization grpc forwarder with …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/forwarder_otlp-metrics-v1-grpc-forwarder/","title":"Forwarder/otlp-metrics-v1-grpc-forwarder"},{"body":"Forwarder/otlp-metrics-v1-grpc-forwarder Description This is a synchronization grpc forwarder with the OpenTelemetry metrics v1 protocol.\nDefaultConfig # The LRU policy cache size for hosting routine rules of service instance.routing_rule_lru_cache_size:5000# The TTL of the LRU cache size for hosting routine rules of service instance.routing_rule_lru_cache_ttl:180# The label key of the routing data, multiple keys are split by \u0026#34;,\u0026#34;routing_label_keys:net.host.name,host.name,job,service.nameConfiguration    Name Type Description     routing_label_keys string The label key of the routing data, multiple keys are split by \u0026ldquo;,\u0026rdquo;   routing_rule_lru_cache_size int The LRU policy cache size for hosting routine rules of service instance.   routing_rule_lru_cache_ttl int The TTL of the LRU cache size for hosting routine rules of service instance.    ","excerpt":"Forwarder/otlp-metrics-v1-grpc-forwarder Description This is a synchronization grpc forwarder with …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/forwarder_otlp-metrics-v1-grpc-forwarder/","title":"Forwarder/otlp-metrics-v1-grpc-forwarder"},{"body":"Get Binaries This page shows how to get binaries of Banyand.\nPrebuilt Released binaries Get binaries from the download.\nBuild From Source Requirements Users who want to build a binary from sources have to set up:\n Go 1.20 Node 18.16 Git \u0026gt;= 2.30 Linux, macOS or Windows+WSL2 GNU make  Windows BanyanDB is built on Linux and macOS that introduced several platform-specific characters to the building system. Therefore, we highly recommend you use WSL2+Ubuntu to execute tasks of the Makefile.\nBuild Binaries To issue the below command to get basic binaries of banyand and bydbctl.\n$ make generate ... $ make build ... --- banyand: all --- make[1]: Entering directory \u0026#39;\u0026lt;path_to_project_root\u0026gt;/banyand\u0026#39; ... chmod +x build/bin/banyand-server Done building banyand server make[1]: Leaving directory \u0026#39;\u0026lt;path_to_project_root\u0026gt;/banyand\u0026#39; ... --- bydbctl: all --- make[1]: Entering directory \u0026#39;\u0026lt;path_to_project_root\u0026gt;/bydbctl\u0026#39; ... chmod +x build/bin/bydbctl Done building bydbctl make[1]: Leaving directory \u0026#39;\u0026lt;path_to_project_root\u0026gt;/bydbctl\u0026#39; The build system provides a series of binary options as well.\n make -C banyand banyand-server generates a basic banyand-server. make -C banyand release builds out a static binary for releasing. make -C banyand debug gives a binary for debugging without the complier\u0026rsquo;s optimizations. make -C banyand debug-static is a static binary for debugging. make -C bydbctl release cross-builds several binaries for multi-platforms.  Then users get binaries as below\n$ ls banyand/build/bin banyand-server banyand-server-debug banyand-server-debug-static banyand-server-static $ ls banyand/build/bin bydbctl ","excerpt":"Get Binaries This page shows how to get binaries of Banyand.\nPrebuilt Released binaries Get binaries …","ref":"/docs/skywalking-banyandb/latest/installation/binaries/","title":"Get Binaries"},{"body":"Get Binaries This page shows how to get binaries of Banyand.\nPrebuilt Released binaries Get binaries from the download.\nBuild From Source Requirements Users who want to build a binary from sources have to set up:\n Go 1.22 Node 20.12 Git \u0026gt;= 2.30 Linux, macOS or Windows+WSL2 GNU make  Windows BanyanDB is built on Linux and macOS that introduced several platform-specific characters to the building system. Therefore, we highly recommend you use WSL2+Ubuntu to execute tasks of the Makefile.\nBuild Binaries To issue the below command to get basic binaries of banyand and bydbctl.\n$ make generate ... $ make build ... --- banyand: all --- make[1]: Entering directory \u0026#39;\u0026lt;path_to_project_root\u0026gt;/banyand\u0026#39; ... chmod +x build/bin/banyand-server Done building banyand server make[1]: Leaving directory \u0026#39;\u0026lt;path_to_project_root\u0026gt;/banyand\u0026#39; ... --- bydbctl: all --- make[1]: Entering directory \u0026#39;\u0026lt;path_to_project_root\u0026gt;/bydbctl\u0026#39; ... chmod +x build/bin/bydbctl Done building bydbctl make[1]: Leaving directory \u0026#39;\u0026lt;path_to_project_root\u0026gt;/bydbctl\u0026#39; The build system provides a series of binary options as well.\n make -C banyand banyand-server generates a basic banyand-server. make -C banyand release or make -C banyand static builds out a static binary banyand-server-static for releasing. make -C banyand debug gives a binary for debugging without the complier\u0026rsquo;s optimizations. make -C banyand debug-static is a static binary for debugging. make -C bydbctl release cross-builds several binaries for multi-platforms.  Then users get binaries as below\n$ ls banyand/build/bin banyand-server banyand-server-debug banyand-server-debug-static $ ls bydbctl/build/bin bydbctl bydbctl--darwin-amd64 bydbctl--darwin-arm64 bydbctl--linux-386 bydbctl--linux-amd64 bydbctl--linux-arm64 bydbctl--windows-386 bydbctl--windows-amd64 ","excerpt":"Get Binaries This page shows how to get binaries of Banyand.\nPrebuilt Released binaries Get binaries …","ref":"/docs/skywalking-banyandb/next/installation/binaries/","title":"Get Binaries"},{"body":"Get Binaries This page shows how to get binaries of Banyand.\nPrebuilt Released binaries Get binaries from the download.\nBuild From Source Requirements Users who want to build a binary from sources have to set up:\n Go 1.20 Node 18.16 Git \u0026gt;= 2.30 Linux, macOS or Windows+WSL2 GNU make  Windows BanyanDB is built on Linux and macOS that introduced several platform-specific characters to the building system. Therefore, we highly recommend you use WSL2+Ubuntu to execute tasks of the Makefile.\nBuild Binaries To issue the below command to get basic binaries of banyand and bydbctl.\n$ make generate ... $ make build ... --- banyand: all --- make[1]: Entering directory \u0026#39;\u0026lt;path_to_project_root\u0026gt;/banyand\u0026#39; ... chmod +x build/bin/banyand-server Done building banyand server make[1]: Leaving directory \u0026#39;\u0026lt;path_to_project_root\u0026gt;/banyand\u0026#39; ... --- bydbctl: all --- make[1]: Entering directory \u0026#39;\u0026lt;path_to_project_root\u0026gt;/bydbctl\u0026#39; ... chmod +x build/bin/bydbctl Done building bydbctl make[1]: Leaving directory \u0026#39;\u0026lt;path_to_project_root\u0026gt;/bydbctl\u0026#39; The build system provides a series of binary options as well.\n make -C banyand banyand-server generates a basic banyand-server. make -C banyand release builds out a static binary for releasing. make -C banyand debug gives a binary for debugging without the complier\u0026rsquo;s optimizations. make -C banyand debug-static is a static binary for debugging. make -C bydbctl release cross-builds several binaries for multi-platforms.  Then users get binaries as below\n$ ls banyand/build/bin banyand-server banyand-server-debug banyand-server-debug-static banyand-server-static $ ls banyand/build/bin bydbctl ","excerpt":"Get Binaries This page shows how to get binaries of Banyand.\nPrebuilt Released binaries Get binaries …","ref":"/docs/skywalking-banyandb/v0.5.0/installation/binaries/","title":"Get Binaries"},{"body":"Getting Started This document introduces how to create a kubernetes cluster locally using kind and how to deploy the basic skywalking components to the cluster.\nPrerequisites  docker \u0026gt;= v20.10.6 kubectl \u0026gt;= v1.21.0 kind \u0026gt;= v0.20.0 swctl \u0026gt;= v0.10.0  Step1: Create a kubernetes cluster locally using kind  Note: If you have a kubernetes cluster (\u0026gt; v1.21.10) already, you can skip this step.\n Here we create a kubernetes cluster with 1 control-plane node and 1 worker nodes.\n$ cat \u0026lt;\u0026lt;EOF | kind create cluster --config=- kind: Cluster apiVersion: kind.x-k8s.io/v1alpha4 nodes: - role: control-plane image: kindest/node:v1.21.10 - role: worker image: kindest/node:v1.21.10 EOF  Expected output Creating cluster \u0026#34;kind\u0026#34; ... ✓ Ensuring node image (kindest/node:v1.21.10) 🖼 ✓ Preparing nodes 📦 📦 ✓ Writing configuration 📜 ✓ Starting control-plane 🕹️ ✓ Installing CNI 🔌 ✓ Installing StorageClass 💾 ✓ Joining worker nodes 🚜 Set kubectl context to \u0026#34;kind-kind\u0026#34; You can now use your cluster with: kubectl cluster-info --context kind-kind Not sure what to do next? 😅 Check out https://kind.sigs.k8s.io/docs/user/quick-start/  Check all pods in the cluster.\n$ kubectl get pods -A  Expected output NAMESPACE NAME READY STATUS RESTARTS AGE kube-system coredns-558bd4d5db-h5gxt 1/1 Running 0 106s kube-system coredns-558bd4d5db-lhnvz 1/1 Running 0 106s kube-system etcd-kind-control-plane 1/1 Running 0 116s kube-system kindnet-fxlkm 1/1 Running 0 106s kube-system kindnet-vmcvl 1/1 Running 0 91s kube-system kube-apiserver-kind-control-plane 1/1 Running 0 116s kube-system kube-controller-manager-kind-control-plane 1/1 Running 0 116s kube-system kube-proxy-nr4f4 1/1 Running 0 91s kube-system kube-proxy-zl4h2 1/1 Running 0 106s kube-system kube-scheduler-kind-control-plane 1/1 Running 0 116s local-path-storage local-path-provisioner-74567d47b4-kmtjh 1/1 Running 0 106s  Step2: Build the operator image Check into the root directory of SWCK and build the operator image as follows.\n$ cd operator # Build the operator image $ make docker-build You will get the operator image controller:latest as follows.\n$ docker images REPOSITORY TAG IMAGE ID CREATED SIZE controller latest 84da7509092a 22 seconds ago 53.6MB Load the operator image into the kind cluster or push the image to a registry that your kubernetes cluster can access.\n$ kind load docker-image controller or\n$ docker push $(YOUR_REGISTRY)/controller Step3: Deploy operator on the kubernetes cluster Install the CRDs as follows.\n$ make install Check the CRDs are installed successfully.\n Expected output kubectl get crd | grep skywalking banyandbs.operator.skywalking.apache.org 2023-11-05T03:30:43Z fetchers.operator.skywalking.apache.org 2023-11-05T03:30:43Z javaagents.operator.skywalking.apache.org 2023-11-05T03:30:43Z oapserverconfigs.operator.skywalking.apache.org 2023-11-05T03:30:43Z oapserverdynamicconfigs.operator.skywalking.apache.org 2023-11-05T03:30:43Z oapservers.operator.skywalking.apache.org 2023-11-05T03:30:43Z satellites.operator.skywalking.apache.org 2023-11-05T03:30:43Z storages.operator.skywalking.apache.org 2023-11-05T03:30:43Z swagents.operator.skywalking.apache.org 2023-11-05T03:30:43Z uis.operator.skywalking.apache.org 2023-11-05T03:30:43Z  Deploy the SWCK operator to the cluster.\n$ make deploy Or deploy the SWCK operator to the cluster with your own image.\n$ make deploy OPERATOR_IMG=$(YOUR_REGISTRY)/controller Get the status of the SWCK operator pod.\n$ kubectl get pod -n skywalking-swck-system NAME READY STATUS RESTARTS AGE skywalking-swck-controller-manager-5f5bbd4fd-9wdw6 2/2 Running 0 34s Step4: Deploy skywalking componentes on the kubernetes cluster Create the skywalking-system namespace.\n$ kubectl create namespace skywalking-system Deploy the skywalking components to the cluster.\n$ cat \u0026lt;\u0026lt;EOF | kubectl apply -f - apiVersion: operator.skywalking.apache.org/v1alpha1 kind: OAPServer metadata: name: skywalking-system namespace: skywalking-system spec: version: 9.5.0 instances: 1 image: apache/skywalking-oap-server:9.5.0 service: template: type: ClusterIP --- apiVersion: operator.skywalking.apache.org/v1alpha1 kind: UI metadata: name: skywalking-system namespace: skywalking-system spec: version: 9.5.0 instances: 1 image: apache/skywalking-ui:9.5.0 OAPServerAddress: http://skywalking-system-oap.skywalking-system:12800 service: template: type: ClusterIP ingress: host: demo.ui.skywalking EOF Check the status of the skywalking components.\n$ kubectl get pod -n skywalking-system NAME READY STATUS RESTARTS AGE skywalking-system-oap-68bd877f57-fhzdz 1/1 Running 0 6m23s skywalking-system-ui-6db8579b47-rphtl 1/1 Running 0 6m23s Step5: Use the java agent injector to inject the java agent into the application pod Label the namespace where the application pod is located with swck-injection=enabled.\n$ kubectl label namespace skywalking-system swck-injection=enabled Create the application pod.\n Note: The application pod must be labeled with swck-java-agent-injected=true and the agent.skywalking.apache.org/collector.backend_service annotation must be set to the address of the OAP server. For more configurations, please refer to the guide.\n $ cat \u0026lt;\u0026lt;EOF | kubectl apply -f - apiVersion: apps/v1 kind: Deployment metadata: name: demo namespace: skywalking-system spec: selector: matchLabels: app: demo template: metadata: labels: # enable the java agent injector swck-java-agent-injected: \u0026#34;true\u0026#34; app: demo annotations: agent.skywalking.apache.org/collector.backend_service: \u0026#34;skywalking-system-oap.skywalking-system:11800\u0026#34; spec: containers: - name: demo1 imagePullPolicy: IfNotPresent image: ghcr.io/apache/skywalking-swck-spring-demo:v0.0.1 command: [\u0026#34;java\u0026#34;] args: [\u0026#34;-jar\u0026#34;,\u0026#34;/app.jar\u0026#34;] ports: - containerPort: 8085 readinessProbe: httpGet: path: /hello port: 8085 initialDelaySeconds: 3 periodSeconds: 3 failureThreshold: 10 --- apiVersion: v1 kind: Service metadata: name: demo namespace: skywalking-system spec: type: ClusterIP ports: - name: 8085-tcp port: 8085 protocol: TCP targetPort: 8085 selector: app: demo EOF Check the status of the application pod and make sure the java agent is injected into the application pod.\n$ kubectl get pod -n skywalking-system -l app=demo -ojsonpath=\u0026#39;{.items[0].spec.initContainers[0]}\u0026#39;  Expected output {\u0026#34;args\u0026#34;:[\u0026#34;-c\u0026#34;,\u0026#34;mkdir -p /sky/agent \\u0026\\u0026 cp -r /skywalking/agent/* /sky/agent\u0026#34;],\u0026#34;command\u0026#34;:[\u0026#34;sh\u0026#34;],\u0026#34;image\u0026#34;:\u0026#34;apache/skywalking-java-agent:8.16.0-java8\u0026#34;,\u0026#34;imagePullPolicy\u0026#34;:\u0026#34;IfNotPresent\u0026#34;,\u0026#34;name\u0026#34;:\u0026#34;inject-skywalking-agent\u0026#34;,\u0026#34;resources\u0026#34;:{},\u0026#34;terminationMessagePath\u0026#34;:\u0026#34;/dev/termination-log\u0026#34;,\u0026#34;terminationMessagePolicy\u0026#34;:\u0026#34;File\u0026#34;,\u0026#34;volumeMounts\u0026#34;:[{\u0026#34;mountPath\u0026#34;:\u0026#34;/sky/agent\u0026#34;,\u0026#34;name\u0026#34;:\u0026#34;sky-agent\u0026#34;},{\u0026#34;mountPath\u0026#34;:\u0026#34;/var/run/secrets/kubernetes.io/serviceaccount\u0026#34;,\u0026#34;name\u0026#34;:\u0026#34;kube-api-access-4qk26\u0026#34;,\u0026#34;readOnly\u0026#34;:true}]}  Also, you could check the final java agent configurations with the following command.\n$ kubectl get javaagent -n skywalking-system -l app=demo -oyaml  Expected output apiVersion: v1 items: - apiVersion: operator.skywalking.apache.org/v1alpha1 kind: JavaAgent metadata: creationTimestamp: \u0026#34;2023-11-19T05:34:03Z\u0026#34; generation: 1 labels: app: demo name: app-demo-javaagent namespace: skywalking-system ownerReferences: - apiVersion: apps/v1 blockOwnerDeletion: true controller: true kind: ReplicaSet name: demo-75d8d995cc uid: 8cb64abc-9b50-4f67-9304-2e09de476168 resourceVersion: \u0026#34;21515\u0026#34; uid: 6cbafb3d-9f43-4448-95e8-bda1f7c72bc3 spec: agentConfiguration: collector.backend_service: skywalking-system-oap.skywalking-system:11800 optional-plugin: webflux|cloud-gateway-2.1.x backendService: skywalking-system-oap.skywalking-system:11800 podSelector: app=demo serviceName: Your_ApplicationName status: creationTime: \u0026#34;2023-11-19T05:34:03Z\u0026#34; expectedInjectiedNum: 1 lastUpdateTime: \u0026#34;2023-11-19T05:34:46Z\u0026#34; realInjectedNum: 1 kind: List metadata: resourceVersion: \u0026#34;\u0026#34; selfLink: \u0026#34;\u0026#34;  If you want to check the logs of the java agent, you can run the following command.\n$ kubectl logs -f -n skywalking-system -l app=demo -c inject-skywalking-agent Step6: Check the application metrics in the skywalking UI First, port-forward the demo service to your local machine.\n$ kubectl port-forward svc/demo 8085:8085 -n skywalking-system Then, trigger the application to generate some metrics.\n$ for i in {1..10}; do curl http://127.0.0.1:8085/hello \u0026amp;\u0026amp; echo \u0026#34;\u0026#34;; done After that, you can port-forward the skywalking UI to your local machine.\n$ kubectl port-forward svc/skywalking-system-ui 8080:80 -n skywalking-system Open the skywalking UI in your browser and navigate to http://127.0.0.1:8080 to check the application metrics.\n Expected output  Also, if you want to expose the external metrics to the kubernetes HPA, you can follow the guide to deploy the custom metrics adapter and you may get some inspiration from the e2e test.\n","excerpt":"Getting Started This document introduces how to create a kubernetes cluster locally using kind and …","ref":"/docs/skywalking-swck/next/getting-started/","title":"Getting Started"},{"body":"Getting Started This document introduces how to create a kubernetes cluster locally using kind and how to deploy the basic skywalking components to the cluster.\nPrerequisites  docker \u0026gt;= v20.10.6 kubectl \u0026gt;= v1.21.0 kind \u0026gt;= v0.20.0 swctl \u0026gt;= v0.10.0  Step1: Create a kubernetes cluster locally using kind  Note: If you have a kubernetes cluster (\u0026gt; v1.21.10) already, you can skip this step.\n Here we create a kubernetes cluster with 1 control-plane node and 1 worker nodes.\n$ cat \u0026lt;\u0026lt;EOF | kind create cluster --config=- kind: Cluster apiVersion: kind.x-k8s.io/v1alpha4 nodes: - role: control-plane image: kindest/node:v1.21.10 - role: worker image: kindest/node:v1.21.10 EOF  Expected output Creating cluster \u0026#34;kind\u0026#34; ... ✓ Ensuring node image (kindest/node:v1.21.10) 🖼 ✓ Preparing nodes 📦 📦 ✓ Writing configuration 📜 ✓ Starting control-plane 🕹️ ✓ Installing CNI 🔌 ✓ Installing StorageClass 💾 ✓ Joining worker nodes 🚜 Set kubectl context to \u0026#34;kind-kind\u0026#34; You can now use your cluster with: kubectl cluster-info --context kind-kind Not sure what to do next? 😅 Check out https://kind.sigs.k8s.io/docs/user/quick-start/  Check all pods in the cluster.\n$ kubectl get pods -A  Expected output NAMESPACE NAME READY STATUS RESTARTS AGE kube-system coredns-558bd4d5db-h5gxt 1/1 Running 0 106s kube-system coredns-558bd4d5db-lhnvz 1/1 Running 0 106s kube-system etcd-kind-control-plane 1/1 Running 0 116s kube-system kindnet-fxlkm 1/1 Running 0 106s kube-system kindnet-vmcvl 1/1 Running 0 91s kube-system kube-apiserver-kind-control-plane 1/1 Running 0 116s kube-system kube-controller-manager-kind-control-plane 1/1 Running 0 116s kube-system kube-proxy-nr4f4 1/1 Running 0 91s kube-system kube-proxy-zl4h2 1/1 Running 0 106s kube-system kube-scheduler-kind-control-plane 1/1 Running 0 116s local-path-storage local-path-provisioner-74567d47b4-kmtjh 1/1 Running 0 106s  Step2: Build the operator image Check into the root directory of SWCK and build the operator image as follows.\n$ cd operator # Build the operator image $ make docker-build You will get the operator image controller:latest as follows.\n$ docker images REPOSITORY TAG IMAGE ID CREATED SIZE controller latest 84da7509092a 22 seconds ago 53.6MB Load the operator image into the kind cluster or push the image to a registry that your kubernetes cluster can access.\n$ kind load docker-image controller or\n$ docker push $(YOUR_REGISTRY)/controller Step3: Deploy operator on the kubernetes cluster Install the CRDs as follows.\n$ make install Check the CRDs are installed successfully.\n Expected output kubectl get crd | grep skywalking banyandbs.operator.skywalking.apache.org 2023-11-05T03:30:43Z fetchers.operator.skywalking.apache.org 2023-11-05T03:30:43Z javaagents.operator.skywalking.apache.org 2023-11-05T03:30:43Z oapserverconfigs.operator.skywalking.apache.org 2023-11-05T03:30:43Z oapserverdynamicconfigs.operator.skywalking.apache.org 2023-11-05T03:30:43Z oapservers.operator.skywalking.apache.org 2023-11-05T03:30:43Z satellites.operator.skywalking.apache.org 2023-11-05T03:30:43Z storages.operator.skywalking.apache.org 2023-11-05T03:30:43Z swagents.operator.skywalking.apache.org 2023-11-05T03:30:43Z uis.operator.skywalking.apache.org 2023-11-05T03:30:43Z  Deploy the SWCK operator to the cluster.\n$ make deploy Or deploy the SWCK operator to the cluster with your own image.\n$ make deploy OPERATOR_IMG=$(YOUR_REGISTRY)/controller Get the status of the SWCK operator pod.\n$ kubectl get pod -n skywalking-swck-system NAME READY STATUS RESTARTS AGE skywalking-swck-controller-manager-5f5bbd4fd-9wdw6 2/2 Running 0 34s Step4: Deploy skywalking componentes on the kubernetes cluster Create the skywalking-system namespace.\n$ kubectl create namespace skywalking-system Deploy the skywalking components to the cluster.\n$ cat \u0026lt;\u0026lt;EOF | kubectl apply -f - apiVersion: operator.skywalking.apache.org/v1alpha1 kind: OAPServer metadata: name: skywalking-system namespace: skywalking-system spec: version: 9.5.0 instances: 1 image: apache/skywalking-oap-server:9.5.0 service: template: type: ClusterIP --- apiVersion: operator.skywalking.apache.org/v1alpha1 kind: UI metadata: name: skywalking-system namespace: skywalking-system spec: version: 9.5.0 instances: 1 image: apache/skywalking-ui:9.5.0 OAPServerAddress: http://skywalking-system-oap.skywalking-system:12800 service: template: type: ClusterIP ingress: host: demo.ui.skywalking EOF Check the status of the skywalking components.\n$ kubectl get pod -n skywalking-system NAME READY STATUS RESTARTS AGE skywalking-system-oap-68bd877f57-fhzdz 1/1 Running 0 6m23s skywalking-system-ui-6db8579b47-rphtl 1/1 Running 0 6m23s Step5: Use the java agent injector to inject the java agent into the application pod Label the namespace where the application pod is located with swck-injection=enabled.\n$ kubectl label namespace skywalking-system swck-injection=enabled Create the application pod.\n Note: The application pod must be labeled with swck-java-agent-injected=true and the agent.skywalking.apache.org/collector.backend_service annotation must be set to the address of the OAP server. For more configurations, please refer to the guide.\n $ cat \u0026lt;\u0026lt;EOF | kubectl apply -f - apiVersion: apps/v1 kind: Deployment metadata: name: demo namespace: skywalking-system spec: selector: matchLabels: app: demo template: metadata: labels: # enable the java agent injector swck-java-agent-injected: \u0026#34;true\u0026#34; app: demo annotations: agent.skywalking.apache.org/collector.backend_service: \u0026#34;skywalking-system-oap.skywalking-system:11800\u0026#34; spec: containers: - name: demo1 imagePullPolicy: IfNotPresent image: ghcr.io/apache/skywalking-swck-spring-demo:v0.0.1 command: [\u0026#34;java\u0026#34;] args: [\u0026#34;-jar\u0026#34;,\u0026#34;/app.jar\u0026#34;] ports: - containerPort: 8085 readinessProbe: httpGet: path: /hello port: 8085 initialDelaySeconds: 3 periodSeconds: 3 failureThreshold: 10 --- apiVersion: v1 kind: Service metadata: name: demo namespace: skywalking-system spec: type: ClusterIP ports: - name: 8085-tcp port: 8085 protocol: TCP targetPort: 8085 selector: app: demo EOF Check the status of the application pod and make sure the java agent is injected into the application pod.\n$ kubectl get pod -n skywalking-system -l app=demo -ojsonpath=\u0026#39;{.items[0].spec.initContainers[0]}\u0026#39;  Expected output {\u0026#34;args\u0026#34;:[\u0026#34;-c\u0026#34;,\u0026#34;mkdir -p /sky/agent \\u0026\\u0026 cp -r /skywalking/agent/* /sky/agent\u0026#34;],\u0026#34;command\u0026#34;:[\u0026#34;sh\u0026#34;],\u0026#34;image\u0026#34;:\u0026#34;apache/skywalking-java-agent:8.16.0-java8\u0026#34;,\u0026#34;imagePullPolicy\u0026#34;:\u0026#34;IfNotPresent\u0026#34;,\u0026#34;name\u0026#34;:\u0026#34;inject-skywalking-agent\u0026#34;,\u0026#34;resources\u0026#34;:{},\u0026#34;terminationMessagePath\u0026#34;:\u0026#34;/dev/termination-log\u0026#34;,\u0026#34;terminationMessagePolicy\u0026#34;:\u0026#34;File\u0026#34;,\u0026#34;volumeMounts\u0026#34;:[{\u0026#34;mountPath\u0026#34;:\u0026#34;/sky/agent\u0026#34;,\u0026#34;name\u0026#34;:\u0026#34;sky-agent\u0026#34;},{\u0026#34;mountPath\u0026#34;:\u0026#34;/var/run/secrets/kubernetes.io/serviceaccount\u0026#34;,\u0026#34;name\u0026#34;:\u0026#34;kube-api-access-4qk26\u0026#34;,\u0026#34;readOnly\u0026#34;:true}]}  Also, you could check the final java agent configurations with the following command.\n$ kubectl get javaagent -n skywalking-system -l app=demo -oyaml  Expected output apiVersion: v1 items: - apiVersion: operator.skywalking.apache.org/v1alpha1 kind: JavaAgent metadata: creationTimestamp: \u0026#34;2023-11-19T05:34:03Z\u0026#34; generation: 1 labels: app: demo name: app-demo-javaagent namespace: skywalking-system ownerReferences: - apiVersion: apps/v1 blockOwnerDeletion: true controller: true kind: ReplicaSet name: demo-75d8d995cc uid: 8cb64abc-9b50-4f67-9304-2e09de476168 resourceVersion: \u0026#34;21515\u0026#34; uid: 6cbafb3d-9f43-4448-95e8-bda1f7c72bc3 spec: agentConfiguration: collector.backend_service: skywalking-system-oap.skywalking-system:11800 optional-plugin: webflux|cloud-gateway-2.1.x backendService: skywalking-system-oap.skywalking-system:11800 podSelector: app=demo serviceName: Your_ApplicationName status: creationTime: \u0026#34;2023-11-19T05:34:03Z\u0026#34; expectedInjectiedNum: 1 lastUpdateTime: \u0026#34;2023-11-19T05:34:46Z\u0026#34; realInjectedNum: 1 kind: List metadata: resourceVersion: \u0026#34;\u0026#34; selfLink: \u0026#34;\u0026#34;  If you want to check the logs of the java agent, you can run the following command.\n$ kubectl logs -f -n skywalking-system -l app=demo -c inject-skywalking-agent Step6: Check the application metrics in the skywalking UI First, port-forward the demo service to your local machine.\n$ kubectl port-forward svc/demo 8085:8085 -n skywalking-system Then, trigger the application to generate some metrics.\n$ for i in {1..10}; do curl http://127.0.0.1:8085/hello \u0026amp;\u0026amp; echo \u0026#34;\u0026#34;; done After that, you can port-forward the skywalking UI to your local machine.\n$ kubectl port-forward svc/skywalking-system-ui 8080:80 -n skywalking-system Open the skywalking UI in your browser and navigate to http://127.0.0.1:8080 to check the application metrics.\n Expected output  Also, if you want to expose the external metrics to the kubernetes HPA, you can follow the guide to deploy the custom metrics adapter and you may get some inspiration from the e2e test.\n","excerpt":"Getting Started This document introduces how to create a kubernetes cluster locally using kind and …","ref":"/docs/skywalking-swck/v0.9.0/getting-started/","title":"Getting Started"},{"body":"Group Parameterized Endpoints In most cases, endpoints are detected automatically through language agents, service mesh observability solutions, or meter system configurations.\nThere are some special cases, especially when REST-style URI is used, where the application codes include the parameter in the endpoint name, such as putting order ID in the URI. Examples are /prod/ORDER123 and /prod/ORDER456. But logically, most would expect to have an endpoint name like prod/{order-id}. This is a specially designed feature in parameterized endpoint grouping.\nIf the incoming endpoint name accords with the rules, SkyWalking will group the endpoint by rules.\nThere are two approaches in which SkyWalking supports endpoint grouping:\n Endpoint name grouping by OpenAPI definitions. Endpoint name grouping by custom configurations.  Both grouping approaches can work together in sequence.\nEndpoint name grouping by OpenAPI definitions The OpenAPI definitions are documents based on the OpenAPI Specification (OAS), which is used to define a standard, language-agnostic interface for HTTP APIs.\nSkyWalking now supports OAS v2.0+. It could parse the documents (yaml) and build grouping rules from them automatically.\nHow to use   Add Specification Extensions for SkyWalking config in the OpenAPI definition documents; otherwise, all configs are default:\n${METHOD} is a reserved placeholder which represents the HTTP method, e.g. POST/GET... . ${PATH} is a reserved placeholder which represents the path, e.g. /products/{id}.\n   Extension Name Required Description Default Value     x-sw-service-name false The service name to which these endpoints belong. The directory name to which the OpenAPI definition documents belong.   x-sw-endpoint-name-match-rule false The rule used to match the endpoint. ${METHOD}:${PATH}   x-sw-endpoint-name-format false The endpoint name after grouping. ${METHOD}:${PATH}    These extensions are under OpenAPI Object. For example, the document below has a full custom config:\n  openapi:3.0.0x-sw-service-name:serviceBx-sw-endpoint-name-match-rule:\u0026#34;${METHOD}:${PATH}\u0026#34;x-sw-endpoint-name-format:\u0026#34;${METHOD}:${PATH}\u0026#34;info:description:OpenAPI definition for SkyWalking test.version:v2title:Product API...We highly recommend using the default config. The custom config (x-sw-endpoint-name-match-rule/x-sw-endpoint-name-format) is considered part of the match rules (regex pattern). We have provided some use cases in org.apache.skywalking.oap.server.core.config.group.openapi.EndpointGroupingRuleReader4OpenapiTest. You may validate your custom config as well.\nAll OpenAPI definition documents are located in the openapi-definitions directory, with directories having at most two levels. We recommend using the service name as the subDirectory name, as you will then not be required to set x-sw-service-name. For example:  ├── openapi-definitions │ ├── serviceA │ │ ├── customerAPI-v1.yaml │ │ └── productAPI-v1.yaml │ └── serviceB │ └── productAPI-v2.yaml The feature is enabled by default. You can disable it by setting the Core Module configuration ${SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI:false}.  Rules match priority We recommend designing the API path as clearly as possible. If the API path is fuzzy and an endpoint name matches multiple paths, SkyWalking would select a path according to the match priority set out below:\n The exact path is matched. E.g. /products or /products/inventory The path with fewer variables. E.g. In the case of /products/{var1}/{var2} and /products/{var1}/abc, endpoint name /products/123/abc will match the second one. If the paths have the same number of variables, the longest path is matched, and the vars are considered to be 1. E.g. In the case of /products/abc/{var1} and products/{var12345}/ef, endpoint name /products/abc/ef will match the first one, because length(\u0026quot;abc\u0026quot;) = 3 is larger than length(\u0026quot;ef\u0026quot;) = 2.  Examples If we have an OpenAPI definition doc productAPI-v2.yaml in directory serviceB, it will look like this:\nopenapi:3.0.0info:description:OpenAPI definition for SkyWalking test.version:v2title:Product APItags:- name:productdescription:product- name:relatedProductsdescription:Related Productspaths:/products:get:tags:- productsummary:Get all products listdescription:Get all products list.operationId:getProductsresponses:\u0026#34;200\u0026#34;:description:Successcontent:application/json:schema:type:arrayitems:$ref:\u0026#34;#/components/schemas/Product\u0026#34;/products/{region}/{country}:get:tags:- productsummary:Get products regionaldescription:Get products regional with the given id.operationId:getProductRegionalparameters:- name:regionin:pathdescription:Products regionrequired:trueschema:type:string- name:countryin:pathdescription:Products countryrequired:trueschema:type:stringresponses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/Product\u0026#34;\u0026#34;400\u0026#34;:description:Invalid parameters supplied/products/{id}:get:tags:- productsummary:Get product detailsdescription:Get product details with the given id.operationId:getProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/ProductDetails\u0026#34;\u0026#34;400\u0026#34;:description:Invalid product idpost:tags:- productsummary:Update product detailsdescription:Update product details with the given id.operationId:updateProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64- name:namein:querydescription:Product namerequired:trueschema:type:stringresponses:\u0026#34;200\u0026#34;:description:successful operationdelete:tags:- productsummary:Delete product detailsdescription:Delete product details with the given id.operationId:deleteProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operation/products/{id}/relatedProducts:get:tags:- relatedProductssummary:Get related productsdescription:Get related products with the given product id.operationId:getRelatedProductsparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/RelatedProducts\u0026#34;\u0026#34;400\u0026#34;:description:Invalid product idcomponents:schemas:Product:type:objectdescription:Product id and nameproperties:id:type:integerformat:int64description:Product idname:type:stringdescription:Product namerequired:- id- nameProductDetails:type:objectdescription:Product detailsproperties:id:type:integerformat:int64description:Product idname:type:stringdescription:Product namedescription:type:stringdescription:Product descriptionrequired:- id- nameRelatedProducts:type:objectdescription:Related Productsproperties:id:type:integerformat:int32description:Product idrelatedProducts:type:arraydescription:List of related productsitems:$ref:\u0026#34;#/components/schemas/Product\u0026#34;Here are some use cases:\n   Incoming Endpoint Incoming Service x-sw-service-name x-sw-endpoint-name-match-rule x-sw-endpoint-name-format Matched Grouping Result     GET:/products serviceB default default default true GET:/products   GET:/products/asia/cn serviceB default default default true GET:/products/{region}/{country}   GET:/products/123 serviceB default default default true GET:/products{id}   GET:/products/123/abc/efg serviceB default default default false GET:/products/123/abc/efg   \u0026lt;GET\u0026gt;:/products/123 serviceB default default default false \u0026lt;GET\u0026gt;:/products/123   GET:/products/123 serviceC default default default false GET:/products/123   GET:/products/123 serviceC serviceC default default true GET:/products/123   \u0026lt;GET\u0026gt;:/products/123 serviceB default \u0026lt;${METHOD}\u0026gt;:${PATH} \u0026lt;${METHOD}\u0026gt;:${PATH} true \u0026lt;GET\u0026gt;:/products/{id}   GET:/products/123 serviceB default default ${PATH}:\u0026lt;${METHOD}\u0026gt; true /products/{id}:\u0026lt;GET\u0026gt;   /products/123:\u0026lt;GET\u0026gt; serviceB default ${PATH}:\u0026lt;${METHOD}\u0026gt; default true GET:/products/{id}    Initialize and update the OpenAPI definitions dynamically Use Dynamic Configuration to initialize and update OpenAPI definitions, the endpoint grouping rules from OpenAPI will re-create by the new config.\nEndpoint name grouping by custom configuration Currently, a user could set up grouping rules through the static YAML file named endpoint-name-grouping.yml, or use Dynamic Configuration to initialize and update endpoint grouping rules.\nConfiguration Format Both the static local file and dynamic configuration value share the same YAML format.\ngrouping:# Endpoint of the service would follow the following rules- service-name:serviceArules:# {var} represents any variable string in the URI.- /prod/{var}","excerpt":"Group Parameterized Endpoints In most cases, endpoints are detected automatically through language …","ref":"/docs/main/latest/en/setup/backend/endpoint-grouping-rules/","title":"Group Parameterized Endpoints"},{"body":"Group Parameterized Endpoints In most cases, endpoints are detected automatically through language agents, service mesh observability solutions, or meter system configurations.\nThere are some special cases, especially when REST-style URI is used, where the application codes include the parameter in the endpoint name, such as putting order ID in the URI. Examples are /prod/ORDER123 and /prod/ORDER456. But logically, most would expect to have an endpoint name like prod/{order-id}. This is a specially designed feature in parameterized endpoint grouping.\nIf the incoming endpoint name accords with the rules, SkyWalking will group the endpoint by rules.\nThere are two approaches in which SkyWalking supports endpoint grouping:\n Endpoint name grouping by OpenAPI definitions. Endpoint name grouping by custom configurations.  Both grouping approaches can work together in sequence.\nEndpoint name grouping by OpenAPI definitions The OpenAPI definitions are documents based on the OpenAPI Specification (OAS), which is used to define a standard, language-agnostic interface for HTTP APIs.\nSkyWalking now supports OAS v2.0+. It could parse the documents (yaml) and build grouping rules from them automatically.\nHow to use   Add Specification Extensions for SkyWalking config in the OpenAPI definition documents; otherwise, all configs are default:\n${METHOD} is a reserved placeholder which represents the HTTP method, e.g. POST/GET... . ${PATH} is a reserved placeholder which represents the path, e.g. /products/{id}.\n   Extension Name Required Description Default Value     x-sw-service-name false The service name to which these endpoints belong. The directory name to which the OpenAPI definition documents belong.   x-sw-endpoint-name-match-rule false The rule used to match the endpoint. ${METHOD}:${PATH}   x-sw-endpoint-name-format false The endpoint name after grouping. ${METHOD}:${PATH}    These extensions are under OpenAPI Object. For example, the document below has a full custom config:\n  openapi:3.0.0x-sw-service-name:serviceBx-sw-endpoint-name-match-rule:\u0026#34;${METHOD}:${PATH}\u0026#34;x-sw-endpoint-name-format:\u0026#34;${METHOD}:${PATH}\u0026#34;info:description:OpenAPI definition for SkyWalking test.version:v2title:Product API...We highly recommend using the default config. The custom config (x-sw-endpoint-name-match-rule/x-sw-endpoint-name-format) is considered part of the match rules (regex pattern). We have provided some use cases in org.apache.skywalking.oap.server.core.config.group.openapi.EndpointGroupingRuleReader4OpenapiTest. You may validate your custom config as well.\nAll OpenAPI definition documents are located in the openapi-definitions directory, with directories having at most two levels. We recommend using the service name as the subDirectory name, as you will then not be required to set x-sw-service-name. For example:  ├── openapi-definitions │ ├── serviceA │ │ ├── customerAPI-v1.yaml │ │ └── productAPI-v1.yaml │ └── serviceB │ └── productAPI-v2.yaml The feature is enabled by default. You can disable it by setting the Core Module configuration ${SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI:false}.  Rules match priority We recommend designing the API path as clearly as possible. If the API path is fuzzy and an endpoint name matches multiple paths, SkyWalking would select a path according to the match priority set out below:\n The exact path is matched. E.g. /products or /products/inventory The path with fewer variables. E.g. In the case of /products/{var1}/{var2} and /products/{var1}/abc, endpoint name /products/123/abc will match the second one. If the paths have the same number of variables, the longest path is matched, and the vars are considered to be 1. E.g. In the case of /products/abc/{var1} and products/{var12345}/ef, endpoint name /products/abc/ef will match the first one, because length(\u0026quot;abc\u0026quot;) = 3 is larger than length(\u0026quot;ef\u0026quot;) = 2.  Examples If we have an OpenAPI definition doc productAPI-v2.yaml in directory serviceB, it will look like this:\nopenapi:3.0.0info:description:OpenAPI definition for SkyWalking test.version:v2title:Product APItags:- name:productdescription:product- name:relatedProductsdescription:Related Productspaths:/products:get:tags:- productsummary:Get all products listdescription:Get all products list.operationId:getProductsresponses:\u0026#34;200\u0026#34;:description:Successcontent:application/json:schema:type:arrayitems:$ref:\u0026#34;#/components/schemas/Product\u0026#34;/products/{region}/{country}:get:tags:- productsummary:Get products regionaldescription:Get products regional with the given id.operationId:getProductRegionalparameters:- name:regionin:pathdescription:Products regionrequired:trueschema:type:string- name:countryin:pathdescription:Products countryrequired:trueschema:type:stringresponses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/Product\u0026#34;\u0026#34;400\u0026#34;:description:Invalid parameters supplied/products/{id}:get:tags:- productsummary:Get product detailsdescription:Get product details with the given id.operationId:getProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/ProductDetails\u0026#34;\u0026#34;400\u0026#34;:description:Invalid product idpost:tags:- productsummary:Update product detailsdescription:Update product details with the given id.operationId:updateProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64- name:namein:querydescription:Product namerequired:trueschema:type:stringresponses:\u0026#34;200\u0026#34;:description:successful operationdelete:tags:- productsummary:Delete product detailsdescription:Delete product details with the given id.operationId:deleteProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operation/products/{id}/relatedProducts:get:tags:- relatedProductssummary:Get related productsdescription:Get related products with the given product id.operationId:getRelatedProductsparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/RelatedProducts\u0026#34;\u0026#34;400\u0026#34;:description:Invalid product idcomponents:schemas:Product:type:objectdescription:Product id and nameproperties:id:type:integerformat:int64description:Product idname:type:stringdescription:Product namerequired:- id- nameProductDetails:type:objectdescription:Product detailsproperties:id:type:integerformat:int64description:Product idname:type:stringdescription:Product namedescription:type:stringdescription:Product descriptionrequired:- id- nameRelatedProducts:type:objectdescription:Related Productsproperties:id:type:integerformat:int32description:Product idrelatedProducts:type:arraydescription:List of related productsitems:$ref:\u0026#34;#/components/schemas/Product\u0026#34;Here are some use cases:\n   Incoming Endpoint Incoming Service x-sw-service-name x-sw-endpoint-name-match-rule x-sw-endpoint-name-format Matched Grouping Result     GET:/products serviceB default default default true GET:/products   GET:/products/asia/cn serviceB default default default true GET:/products/{region}/{country}   GET:/products/123 serviceB default default default true GET:/products{id}   GET:/products/123/abc/efg serviceB default default default false GET:/products/123/abc/efg   \u0026lt;GET\u0026gt;:/products/123 serviceB default default default false \u0026lt;GET\u0026gt;:/products/123   GET:/products/123 serviceC default default default false GET:/products/123   GET:/products/123 serviceC serviceC default default true GET:/products/123   \u0026lt;GET\u0026gt;:/products/123 serviceB default \u0026lt;${METHOD}\u0026gt;:${PATH} \u0026lt;${METHOD}\u0026gt;:${PATH} true \u0026lt;GET\u0026gt;:/products/{id}   GET:/products/123 serviceB default default ${PATH}:\u0026lt;${METHOD}\u0026gt; true /products/{id}:\u0026lt;GET\u0026gt;   /products/123:\u0026lt;GET\u0026gt; serviceB default ${PATH}:\u0026lt;${METHOD}\u0026gt; default true GET:/products/{id}    Initialize and update the OpenAPI definitions dynamically Use Dynamic Configuration to initialize and update OpenAPI definitions, the endpoint grouping rules from OpenAPI will re-create by the new config.\nEndpoint name grouping by custom configuration Currently, a user could set up grouping rules through the static YAML file named endpoint-name-grouping.yml, or use Dynamic Configuration to initialize and update endpoint grouping rules.\nConfiguration Format Both the static local file and dynamic configuration value share the same YAML format.\ngrouping:# Endpoint of the service would follow the following rules- service-name:serviceArules:# {var} represents any variable string in the URI.- /prod/{var}","excerpt":"Group Parameterized Endpoints In most cases, endpoints are detected automatically through language …","ref":"/docs/main/next/en/setup/backend/endpoint-grouping-rules/","title":"Group Parameterized Endpoints"},{"body":"Group Parameterized Endpoints In most cases, endpoints are detected automatically through language agents, service mesh observability solutions, or meter system configurations.\nThere are some special cases, especially when REST style URI is used, where the application codes include the parameter in the endpoint name, such as putting order ID in the URI. Examples are /prod/ORDER123 and /prod/ORDER456. But logically, most would expect to have an endpoint name like prod/{order-id}. This is a specially designed feature in parameterized endpoint grouping.\nIf the incoming endpoint name accords with the rules, SkyWalking will group the endpoint by rules.\nThere are two approaches in which SkyWalking supports endpoint grouping:\n Endpoint name grouping by OpenAPI definitions. Endpoint name grouping by custom configurations.  Both grouping approaches can work together in sequence.\nEndpoint name grouping by OpenAPI definitions The OpenAPI definitions are documents based on the OpenAPI Specification (OAS), which is used to define a standard, language-agnostic interface for HTTP APIs.\nSkyWalking now supports OAS v2.0+. It could parse the documents (yaml) and build grouping rules from them automatically.\nHow to use   Add Specification Extensions for SkyWalking config in the OpenAPI definition documents; otherwise, all configs are default:\n${METHOD} is a reserved placeholder which represents the HTTP method, e.g. POST/GET... . ${PATH} is a reserved placeholder which represents the path, e.g. /products/{id}.\n   Extension Name Required Description Default Value     x-sw-service-name false The service name to which these endpoints belong. The directory name to which the OpenAPI definition documents belong.   x-sw-endpoint-name-match-rule false The rule used to match the endpoint. ${METHOD}:${PATH}   x-sw-endpoint-name-format false The endpoint name after grouping. ${METHOD}:${PATH}    These extensions are under OpenAPI Object. For example, the document below has a full custom config:\n  openapi:3.0.0x-sw-service-name:serviceBx-sw-endpoint-name-match-rule:\u0026#34;${METHOD}:${PATH}\u0026#34;x-sw-endpoint-name-format:\u0026#34;${METHOD}:${PATH}\u0026#34;info:description:OpenAPI definition for SkyWalking test.version:v2title:Product API...We highly recommend using the default config. The custom config (x-sw-endpoint-name-match-rule/x-sw-endpoint-name-format) is considered part of the match rules (regex pattern). We have provided some use cases in org.apache.skywalking.oap.server.core.config.group.openapi.EndpointGroupingRuleReader4OpenapiTest. You may validate your custom config as well.\nAll OpenAPI definition documents are located in the openapi-definitions directory, with directories having at most two levels. We recommend using the service name as the subDirectory name, as you will then not be required to set x-sw-service-name. For example:  ├── openapi-definitions │ ├── serviceA │ │ ├── customerAPI-v1.yaml │ │ └── productAPI-v1.yaml │ └── serviceB │ └── productAPI-v2.yaml The feature is enabled by default. You can disable it by setting the Core Module configuration ${SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPAENAPI:false}.  Rules match priority We recommend designing the API path as clearly as possible. If the API path is fuzzy and an endpoint name matches multiple paths, SkyWalking would select a path according to the match priority set out below:\n The exact path being matched. E.g. /products or /products/inventory The path which has less variables. E.g. In the case of /products/{var1}/{var2} and /products/{var1}/abc, endpoint name /products/123/abc will match the second one. If the paths have the same number of variables, the longest path is matched, and the vars are considered to be 1. E.g. In the case of /products/abc/{var1} and products/{var12345}/ef, endpoint name /products/abc/ef will match the first one, because length(\u0026quot;abc\u0026quot;) = 3 is larger than length(\u0026quot;ef\u0026quot;) = 2.  Examples If we have an OpenAPI definition doc productAPI-v2.yaml in directory serviceB, it will look like this:\nopenapi:3.0.0info:description:OpenAPI definition for SkyWalking test.version:v2title:Product APItags:- name:productdescription:product- name:relatedProductsdescription:Related Productspaths:/products:get:tags:- productsummary:Get all products listdescription:Get all products list.operationId:getProductsresponses:\u0026#34;200\u0026#34;:description:Successcontent:application/json:schema:type:arrayitems:$ref:\u0026#34;#/components/schemas/Product\u0026#34;/products/{region}/{country}:get:tags:- productsummary:Get products regionaldescription:Get products regional with the given id.operationId:getProductRegionalparameters:- name:regionin:pathdescription:Products regionrequired:trueschema:type:string- name:countryin:pathdescription:Products countryrequired:trueschema:type:stringresponses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/Product\u0026#34;\u0026#34;400\u0026#34;:description:Invalid parameters supplied/products/{id}:get:tags:- productsummary:Get product detailsdescription:Get product details with the given id.operationId:getProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/ProductDetails\u0026#34;\u0026#34;400\u0026#34;:description:Invalid product idpost:tags:- productsummary:Update product detailsdescription:Update product details with the given id.operationId:updateProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64- name:namein:querydescription:Product namerequired:trueschema:type:stringresponses:\u0026#34;200\u0026#34;:description:successful operationdelete:tags:- productsummary:Delete product detailsdescription:Delete product details with the given id.operationId:deleteProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operation/products/{id}/relatedProducts:get:tags:- relatedProductssummary:Get related productsdescription:Get related products with the given product id.operationId:getRelatedProductsparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/RelatedProducts\u0026#34;\u0026#34;400\u0026#34;:description:Invalid product idcomponents:schemas:Product:type:objectdescription:Product id and nameproperties:id:type:integerformat:int64description:Product idname:type:stringdescription:Product namerequired:- id- nameProductDetails:type:objectdescription:Product detailsproperties:id:type:integerformat:int64description:Product idname:type:stringdescription:Product namedescription:type:stringdescription:Product descriptionrequired:- id- nameRelatedProducts:type:objectdescription:Related Productsproperties:id:type:integerformat:int32description:Product idrelatedProducts:type:arraydescription:List of related productsitems:$ref:\u0026#34;#/components/schemas/Product\u0026#34;Here are some use cases:\n   Incoming Endpiont Incoming Service x-sw-service-name x-sw-endpoint-name-match-rule x-sw-endpoint-name-format Matched Grouping Result     GET:/products serviceB default default default true GET:/products   GET:/products/123 serviceB default default default true GET:/products{id}   GET:/products/asia/cn serviceB default default default true GET:/products/{region}/{country}   GET:/products/123/abc/efg serviceB default default default false GET:/products/123/abc/efg   \u0026lt;GET\u0026gt;:/products/123 serviceB default default default false \u0026lt;GET\u0026gt;:/products/123   GET:/products/123 serviceC default default default false GET:/products/123   GET:/products/123 serviceC serviceC default default true GET:/products/123   \u0026lt;GET\u0026gt;:/products/123 serviceB default \u0026lt;${METHOD}\u0026gt;:${PATH} \u0026lt;${METHOD}\u0026gt;:${PATH} true \u0026lt;GET\u0026gt;:/products/{id}   GET:/products/123 serviceB default default ${PATH}:\u0026lt;${METHOD}\u0026gt; true /products/{id}:\u0026lt;GET\u0026gt;   /products/123:\u0026lt;GET\u0026gt; serviceB default ${PATH}:\u0026lt;${METHOD}\u0026gt; default true GET:/products/{id}    Initialize and update the OpenAPI definitions dynamically Use Dynamic Configuration to initialize and update OpenAPI definitions, the endpoint grouping rules from OpenAPI will re-create by new config.\nEndpoint name grouping by custom configuration Currently, a user could set up grouping rules through the static YAML file named endpoint-name-grouping.yml, or use Dynamic Configuration to initialize and update endpoint grouping rules.\nConfiguration Format Both the static local file and dynamic configuration value share the same YAML format.\ngrouping:# Endpoint of the service would follow the following rules- service-name:serviceArules:# Logic name when the regex expression matched.- endpoint-name:/prod/{id}regex:\\/prod\\/.+","excerpt":"Group Parameterized Endpoints In most cases, endpoints are detected automatically through language …","ref":"/docs/main/v9.0.0/en/setup/backend/endpoint-grouping-rules/","title":"Group Parameterized Endpoints"},{"body":"Group Parameterized Endpoints In most cases, endpoints are detected automatically through language agents, service mesh observability solutions, or meter system configurations.\nThere are some special cases, especially when REST-style URI is used, where the application codes include the parameter in the endpoint name, such as putting order ID in the URI. Examples are /prod/ORDER123 and /prod/ORDER456. But logically, most would expect to have an endpoint name like prod/{order-id}. This is a specially designed feature in parameterized endpoint grouping.\nIf the incoming endpoint name accords with the rules, SkyWalking will group the endpoint by rules.\nThere are two approaches in which SkyWalking supports endpoint grouping:\n Endpoint name grouping by OpenAPI definitions. Endpoint name grouping by custom configurations.  Both grouping approaches can work together in sequence.\nEndpoint name grouping by OpenAPI definitions The OpenAPI definitions are documents based on the OpenAPI Specification (OAS), which is used to define a standard, language-agnostic interface for HTTP APIs.\nSkyWalking now supports OAS v2.0+. It could parse the documents (yaml) and build grouping rules from them automatically.\nHow to use   Add Specification Extensions for SkyWalking config in the OpenAPI definition documents; otherwise, all configs are default:\n${METHOD} is a reserved placeholder which represents the HTTP method, e.g. POST/GET... . ${PATH} is a reserved placeholder which represents the path, e.g. /products/{id}.\n   Extension Name Required Description Default Value     x-sw-service-name false The service name to which these endpoints belong. The directory name to which the OpenAPI definition documents belong.   x-sw-endpoint-name-match-rule false The rule used to match the endpoint. ${METHOD}:${PATH}   x-sw-endpoint-name-format false The endpoint name after grouping. ${METHOD}:${PATH}    These extensions are under OpenAPI Object. For example, the document below has a full custom config:\n  openapi:3.0.0x-sw-service-name:serviceBx-sw-endpoint-name-match-rule:\u0026#34;${METHOD}:${PATH}\u0026#34;x-sw-endpoint-name-format:\u0026#34;${METHOD}:${PATH}\u0026#34;info:description:OpenAPI definition for SkyWalking test.version:v2title:Product API...We highly recommend using the default config. The custom config (x-sw-endpoint-name-match-rule/x-sw-endpoint-name-format) is considered part of the match rules (regex pattern). We have provided some use cases in org.apache.skywalking.oap.server.core.config.group.openapi.EndpointGroupingRuleReader4OpenapiTest. You may validate your custom config as well.\nAll OpenAPI definition documents are located in the openapi-definitions directory, with directories having at most two levels. We recommend using the service name as the subDirectory name, as you will then not be required to set x-sw-service-name. For example:  ├── openapi-definitions │ ├── serviceA │ │ ├── customerAPI-v1.yaml │ │ └── productAPI-v1.yaml │ └── serviceB │ └── productAPI-v2.yaml The feature is enabled by default. You can disable it by setting the Core Module configuration ${SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPAENAPI:false}.  Rules match priority We recommend designing the API path as clearly as possible. If the API path is fuzzy and an endpoint name matches multiple paths, SkyWalking would select a path according to the match priority set out below:\n The exact path is matched. E.g. /products or /products/inventory The path with fewer variables. E.g. In the case of /products/{var1}/{var2} and /products/{var1}/abc, endpoint name /products/123/abc will match the second one. If the paths have the same number of variables, the longest path is matched, and the vars are considered to be 1. E.g. In the case of /products/abc/{var1} and products/{var12345}/ef, endpoint name /products/abc/ef will match the first one, because length(\u0026quot;abc\u0026quot;) = 3 is larger than length(\u0026quot;ef\u0026quot;) = 2.  Examples If we have an OpenAPI definition doc productAPI-v2.yaml in directory serviceB, it will look like this:\nopenapi:3.0.0info:description:OpenAPI definition for SkyWalking test.version:v2title:Product APItags:- name:productdescription:product- name:relatedProductsdescription:Related Productspaths:/products:get:tags:- productsummary:Get all products listdescription:Get all products list.operationId:getProductsresponses:\u0026#34;200\u0026#34;:description:Successcontent:application/json:schema:type:arrayitems:$ref:\u0026#34;#/components/schemas/Product\u0026#34;/products/{region}/{country}:get:tags:- productsummary:Get products regionaldescription:Get products regional with the given id.operationId:getProductRegionalparameters:- name:regionin:pathdescription:Products regionrequired:trueschema:type:string- name:countryin:pathdescription:Products countryrequired:trueschema:type:stringresponses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/Product\u0026#34;\u0026#34;400\u0026#34;:description:Invalid parameters supplied/products/{id}:get:tags:- productsummary:Get product detailsdescription:Get product details with the given id.operationId:getProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/ProductDetails\u0026#34;\u0026#34;400\u0026#34;:description:Invalid product idpost:tags:- productsummary:Update product detailsdescription:Update product details with the given id.operationId:updateProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64- name:namein:querydescription:Product namerequired:trueschema:type:stringresponses:\u0026#34;200\u0026#34;:description:successful operationdelete:tags:- productsummary:Delete product detailsdescription:Delete product details with the given id.operationId:deleteProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operation/products/{id}/relatedProducts:get:tags:- relatedProductssummary:Get related productsdescription:Get related products with the given product id.operationId:getRelatedProductsparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/RelatedProducts\u0026#34;\u0026#34;400\u0026#34;:description:Invalid product idcomponents:schemas:Product:type:objectdescription:Product id and nameproperties:id:type:integerformat:int64description:Product idname:type:stringdescription:Product namerequired:- id- nameProductDetails:type:objectdescription:Product detailsproperties:id:type:integerformat:int64description:Product idname:type:stringdescription:Product namedescription:type:stringdescription:Product descriptionrequired:- id- nameRelatedProducts:type:objectdescription:Related Productsproperties:id:type:integerformat:int32description:Product idrelatedProducts:type:arraydescription:List of related productsitems:$ref:\u0026#34;#/components/schemas/Product\u0026#34;Here are some use cases:\n   Incoming Endpiont Incoming Service x-sw-service-name x-sw-endpoint-name-match-rule x-sw-endpoint-name-format Matched Grouping Result     GET:/products serviceB default default default true GET:/products   GET:/products/123 serviceB default default default true GET:/products{id}   GET:/products/asia/cn serviceB default default default true GET:/products/{region}/{country}   GET:/products/123/abc/efg serviceB default default default false GET:/products/123/abc/efg   \u0026lt;GET\u0026gt;:/products/123 serviceB default default default false \u0026lt;GET\u0026gt;:/products/123   GET:/products/123 serviceC default default default false GET:/products/123   GET:/products/123 serviceC serviceC default default true GET:/products/123   \u0026lt;GET\u0026gt;:/products/123 serviceB default \u0026lt;${METHOD}\u0026gt;:${PATH} \u0026lt;${METHOD}\u0026gt;:${PATH} true \u0026lt;GET\u0026gt;:/products/{id}   GET:/products/123 serviceB default default ${PATH}:\u0026lt;${METHOD}\u0026gt; true /products/{id}:\u0026lt;GET\u0026gt;   /products/123:\u0026lt;GET\u0026gt; serviceB default ${PATH}:\u0026lt;${METHOD}\u0026gt; default true GET:/products/{id}    Initialize and update the OpenAPI definitions dynamically Use Dynamic Configuration to initialize and update OpenAPI definitions, the endpoint grouping rules from OpenAPI will re-create by the new config.\nEndpoint name grouping by custom configuration Currently, a user could set up grouping rules through the static YAML file named endpoint-name-grouping.yml, or use Dynamic Configuration to initialize and update endpoint grouping rules.\nConfiguration Format Both the static local file and dynamic configuration value share the same YAML format.\ngrouping:# Endpoint of the service would follow the following rules- service-name:serviceArules:# Logic name when the regex expression matched.- endpoint-name:/prod/{id}regex:\\/prod\\/.+","excerpt":"Group Parameterized Endpoints In most cases, endpoints are detected automatically through language …","ref":"/docs/main/v9.1.0/en/setup/backend/endpoint-grouping-rules/","title":"Group Parameterized Endpoints"},{"body":"Group Parameterized Endpoints In most cases, endpoints are detected automatically through language agents, service mesh observability solutions, or meter system configurations.\nThere are some special cases, especially when REST-style URI is used, where the application codes include the parameter in the endpoint name, such as putting order ID in the URI. Examples are /prod/ORDER123 and /prod/ORDER456. But logically, most would expect to have an endpoint name like prod/{order-id}. This is a specially designed feature in parameterized endpoint grouping.\nIf the incoming endpoint name accords with the rules, SkyWalking will group the endpoint by rules.\nThere are two approaches in which SkyWalking supports endpoint grouping:\n Endpoint name grouping by OpenAPI definitions. Endpoint name grouping by custom configurations.  Both grouping approaches can work together in sequence.\nEndpoint name grouping by OpenAPI definitions The OpenAPI definitions are documents based on the OpenAPI Specification (OAS), which is used to define a standard, language-agnostic interface for HTTP APIs.\nSkyWalking now supports OAS v2.0+. It could parse the documents (yaml) and build grouping rules from them automatically.\nHow to use   Add Specification Extensions for SkyWalking config in the OpenAPI definition documents; otherwise, all configs are default:\n${METHOD} is a reserved placeholder which represents the HTTP method, e.g. POST/GET... . ${PATH} is a reserved placeholder which represents the path, e.g. /products/{id}.\n   Extension Name Required Description Default Value     x-sw-service-name false The service name to which these endpoints belong. The directory name to which the OpenAPI definition documents belong.   x-sw-endpoint-name-match-rule false The rule used to match the endpoint. ${METHOD}:${PATH}   x-sw-endpoint-name-format false The endpoint name after grouping. ${METHOD}:${PATH}    These extensions are under OpenAPI Object. For example, the document below has a full custom config:\n  openapi:3.0.0x-sw-service-name:serviceBx-sw-endpoint-name-match-rule:\u0026#34;${METHOD}:${PATH}\u0026#34;x-sw-endpoint-name-format:\u0026#34;${METHOD}:${PATH}\u0026#34;info:description:OpenAPI definition for SkyWalking test.version:v2title:Product API...We highly recommend using the default config. The custom config (x-sw-endpoint-name-match-rule/x-sw-endpoint-name-format) is considered part of the match rules (regex pattern). We have provided some use cases in org.apache.skywalking.oap.server.core.config.group.openapi.EndpointGroupingRuleReader4OpenapiTest. You may validate your custom config as well.\nAll OpenAPI definition documents are located in the openapi-definitions directory, with directories having at most two levels. We recommend using the service name as the subDirectory name, as you will then not be required to set x-sw-service-name. For example:  ├── openapi-definitions │ ├── serviceA │ │ ├── customerAPI-v1.yaml │ │ └── productAPI-v1.yaml │ └── serviceB │ └── productAPI-v2.yaml The feature is enabled by default. You can disable it by setting the Core Module configuration ${SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPAENAPI:false}.  Rules match priority We recommend designing the API path as clearly as possible. If the API path is fuzzy and an endpoint name matches multiple paths, SkyWalking would select a path according to the match priority set out below:\n The exact path is matched. E.g. /products or /products/inventory The path with fewer variables. E.g. In the case of /products/{var1}/{var2} and /products/{var1}/abc, endpoint name /products/123/abc will match the second one. If the paths have the same number of variables, the longest path is matched, and the vars are considered to be 1. E.g. In the case of /products/abc/{var1} and products/{var12345}/ef, endpoint name /products/abc/ef will match the first one, because length(\u0026quot;abc\u0026quot;) = 3 is larger than length(\u0026quot;ef\u0026quot;) = 2.  Examples If we have an OpenAPI definition doc productAPI-v2.yaml in directory serviceB, it will look like this:\nopenapi:3.0.0info:description:OpenAPI definition for SkyWalking test.version:v2title:Product APItags:- name:productdescription:product- name:relatedProductsdescription:Related Productspaths:/products:get:tags:- productsummary:Get all products listdescription:Get all products list.operationId:getProductsresponses:\u0026#34;200\u0026#34;:description:Successcontent:application/json:schema:type:arrayitems:$ref:\u0026#34;#/components/schemas/Product\u0026#34;/products/{region}/{country}:get:tags:- productsummary:Get products regionaldescription:Get products regional with the given id.operationId:getProductRegionalparameters:- name:regionin:pathdescription:Products regionrequired:trueschema:type:string- name:countryin:pathdescription:Products countryrequired:trueschema:type:stringresponses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/Product\u0026#34;\u0026#34;400\u0026#34;:description:Invalid parameters supplied/products/{id}:get:tags:- productsummary:Get product detailsdescription:Get product details with the given id.operationId:getProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/ProductDetails\u0026#34;\u0026#34;400\u0026#34;:description:Invalid product idpost:tags:- productsummary:Update product detailsdescription:Update product details with the given id.operationId:updateProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64- name:namein:querydescription:Product namerequired:trueschema:type:stringresponses:\u0026#34;200\u0026#34;:description:successful operationdelete:tags:- productsummary:Delete product detailsdescription:Delete product details with the given id.operationId:deleteProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operation/products/{id}/relatedProducts:get:tags:- relatedProductssummary:Get related productsdescription:Get related products with the given product id.operationId:getRelatedProductsparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/RelatedProducts\u0026#34;\u0026#34;400\u0026#34;:description:Invalid product idcomponents:schemas:Product:type:objectdescription:Product id and nameproperties:id:type:integerformat:int64description:Product idname:type:stringdescription:Product namerequired:- id- nameProductDetails:type:objectdescription:Product detailsproperties:id:type:integerformat:int64description:Product idname:type:stringdescription:Product namedescription:type:stringdescription:Product descriptionrequired:- id- nameRelatedProducts:type:objectdescription:Related Productsproperties:id:type:integerformat:int32description:Product idrelatedProducts:type:arraydescription:List of related productsitems:$ref:\u0026#34;#/components/schemas/Product\u0026#34;Here are some use cases:\n   Incoming Endpoint Incoming Service x-sw-service-name x-sw-endpoint-name-match-rule x-sw-endpoint-name-format Matched Grouping Result     GET:/products serviceB default default default true GET:/products   GET:/products/123 serviceB default default default true GET:/products{id}   GET:/products/asia/cn serviceB default default default true GET:/products/{region}/{country}   GET:/products/123/abc/efg serviceB default default default false GET:/products/123/abc/efg   \u0026lt;GET\u0026gt;:/products/123 serviceB default default default false \u0026lt;GET\u0026gt;:/products/123   GET:/products/123 serviceC default default default false GET:/products/123   GET:/products/123 serviceC serviceC default default true GET:/products/123   \u0026lt;GET\u0026gt;:/products/123 serviceB default \u0026lt;${METHOD}\u0026gt;:${PATH} \u0026lt;${METHOD}\u0026gt;:${PATH} true \u0026lt;GET\u0026gt;:/products/{id}   GET:/products/123 serviceB default default ${PATH}:\u0026lt;${METHOD}\u0026gt; true /products/{id}:\u0026lt;GET\u0026gt;   /products/123:\u0026lt;GET\u0026gt; serviceB default ${PATH}:\u0026lt;${METHOD}\u0026gt; default true GET:/products/{id}    Initialize and update the OpenAPI definitions dynamically Use Dynamic Configuration to initialize and update OpenAPI definitions, the endpoint grouping rules from OpenAPI will re-create by the new config.\nEndpoint name grouping by custom configuration Currently, a user could set up grouping rules through the static YAML file named endpoint-name-grouping.yml, or use Dynamic Configuration to initialize and update endpoint grouping rules.\nConfiguration Format Both the static local file and dynamic configuration value share the same YAML format.\ngrouping:# Endpoint of the service would follow the following rules- service-name:serviceArules:# Logic name when the regex expression matched.- endpoint-name:/prod/{id}regex:\\/prod\\/.+","excerpt":"Group Parameterized Endpoints In most cases, endpoints are detected automatically through language …","ref":"/docs/main/v9.2.0/en/setup/backend/endpoint-grouping-rules/","title":"Group Parameterized Endpoints"},{"body":"Group Parameterized Endpoints In most cases, endpoints are detected automatically through language agents, service mesh observability solutions, or meter system configurations.\nThere are some special cases, especially when REST-style URI is used, where the application codes include the parameter in the endpoint name, such as putting order ID in the URI. Examples are /prod/ORDER123 and /prod/ORDER456. But logically, most would expect to have an endpoint name like prod/{order-id}. This is a specially designed feature in parameterized endpoint grouping.\nIf the incoming endpoint name accords with the rules, SkyWalking will group the endpoint by rules.\nThere are two approaches in which SkyWalking supports endpoint grouping:\n Endpoint name grouping by OpenAPI definitions. Endpoint name grouping by custom configurations.  Both grouping approaches can work together in sequence.\nEndpoint name grouping by OpenAPI definitions The OpenAPI definitions are documents based on the OpenAPI Specification (OAS), which is used to define a standard, language-agnostic interface for HTTP APIs.\nSkyWalking now supports OAS v2.0+. It could parse the documents (yaml) and build grouping rules from them automatically.\nHow to use   Add Specification Extensions for SkyWalking config in the OpenAPI definition documents; otherwise, all configs are default:\n${METHOD} is a reserved placeholder which represents the HTTP method, e.g. POST/GET... . ${PATH} is a reserved placeholder which represents the path, e.g. /products/{id}.\n   Extension Name Required Description Default Value     x-sw-service-name false The service name to which these endpoints belong. The directory name to which the OpenAPI definition documents belong.   x-sw-endpoint-name-match-rule false The rule used to match the endpoint. ${METHOD}:${PATH}   x-sw-endpoint-name-format false The endpoint name after grouping. ${METHOD}:${PATH}    These extensions are under OpenAPI Object. For example, the document below has a full custom config:\n  openapi:3.0.0x-sw-service-name:serviceBx-sw-endpoint-name-match-rule:\u0026#34;${METHOD}:${PATH}\u0026#34;x-sw-endpoint-name-format:\u0026#34;${METHOD}:${PATH}\u0026#34;info:description:OpenAPI definition for SkyWalking test.version:v2title:Product API...We highly recommend using the default config. The custom config (x-sw-endpoint-name-match-rule/x-sw-endpoint-name-format) is considered part of the match rules (regex pattern). We have provided some use cases in org.apache.skywalking.oap.server.core.config.group.openapi.EndpointGroupingRuleReader4OpenapiTest. You may validate your custom config as well.\nAll OpenAPI definition documents are located in the openapi-definitions directory, with directories having at most two levels. We recommend using the service name as the subDirectory name, as you will then not be required to set x-sw-service-name. For example:  ├── openapi-definitions │ ├── serviceA │ │ ├── customerAPI-v1.yaml │ │ └── productAPI-v1.yaml │ └── serviceB │ └── productAPI-v2.yaml The feature is enabled by default. You can disable it by setting the Core Module configuration ${SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI:false}.  Rules match priority We recommend designing the API path as clearly as possible. If the API path is fuzzy and an endpoint name matches multiple paths, SkyWalking would select a path according to the match priority set out below:\n The exact path is matched. E.g. /products or /products/inventory The path with fewer variables. E.g. In the case of /products/{var1}/{var2} and /products/{var1}/abc, endpoint name /products/123/abc will match the second one. If the paths have the same number of variables, the longest path is matched, and the vars are considered to be 1. E.g. In the case of /products/abc/{var1} and products/{var12345}/ef, endpoint name /products/abc/ef will match the first one, because length(\u0026quot;abc\u0026quot;) = 3 is larger than length(\u0026quot;ef\u0026quot;) = 2.  Examples If we have an OpenAPI definition doc productAPI-v2.yaml in directory serviceB, it will look like this:\nopenapi:3.0.0info:description:OpenAPI definition for SkyWalking test.version:v2title:Product APItags:- name:productdescription:product- name:relatedProductsdescription:Related Productspaths:/products:get:tags:- productsummary:Get all products listdescription:Get all products list.operationId:getProductsresponses:\u0026#34;200\u0026#34;:description:Successcontent:application/json:schema:type:arrayitems:$ref:\u0026#34;#/components/schemas/Product\u0026#34;/products/{region}/{country}:get:tags:- productsummary:Get products regionaldescription:Get products regional with the given id.operationId:getProductRegionalparameters:- name:regionin:pathdescription:Products regionrequired:trueschema:type:string- name:countryin:pathdescription:Products countryrequired:trueschema:type:stringresponses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/Product\u0026#34;\u0026#34;400\u0026#34;:description:Invalid parameters supplied/products/{id}:get:tags:- productsummary:Get product detailsdescription:Get product details with the given id.operationId:getProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/ProductDetails\u0026#34;\u0026#34;400\u0026#34;:description:Invalid product idpost:tags:- productsummary:Update product detailsdescription:Update product details with the given id.operationId:updateProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64- name:namein:querydescription:Product namerequired:trueschema:type:stringresponses:\u0026#34;200\u0026#34;:description:successful operationdelete:tags:- productsummary:Delete product detailsdescription:Delete product details with the given id.operationId:deleteProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operation/products/{id}/relatedProducts:get:tags:- relatedProductssummary:Get related productsdescription:Get related products with the given product id.operationId:getRelatedProductsparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/RelatedProducts\u0026#34;\u0026#34;400\u0026#34;:description:Invalid product idcomponents:schemas:Product:type:objectdescription:Product id and nameproperties:id:type:integerformat:int64description:Product idname:type:stringdescription:Product namerequired:- id- nameProductDetails:type:objectdescription:Product detailsproperties:id:type:integerformat:int64description:Product idname:type:stringdescription:Product namedescription:type:stringdescription:Product descriptionrequired:- id- nameRelatedProducts:type:objectdescription:Related Productsproperties:id:type:integerformat:int32description:Product idrelatedProducts:type:arraydescription:List of related productsitems:$ref:\u0026#34;#/components/schemas/Product\u0026#34;Here are some use cases:\n   Incoming Endpoint Incoming Service x-sw-service-name x-sw-endpoint-name-match-rule x-sw-endpoint-name-format Matched Grouping Result     GET:/products serviceB default default default true GET:/products   GET:/products/123 serviceB default default default true GET:/products{id}   GET:/products/asia/cn serviceB default default default true GET:/products/{region}/{country}   GET:/products/123/abc/efg serviceB default default default false GET:/products/123/abc/efg   \u0026lt;GET\u0026gt;:/products/123 serviceB default default default false \u0026lt;GET\u0026gt;:/products/123   GET:/products/123 serviceC default default default false GET:/products/123   GET:/products/123 serviceC serviceC default default true GET:/products/123   \u0026lt;GET\u0026gt;:/products/123 serviceB default \u0026lt;${METHOD}\u0026gt;:${PATH} \u0026lt;${METHOD}\u0026gt;:${PATH} true \u0026lt;GET\u0026gt;:/products/{id}   GET:/products/123 serviceB default default ${PATH}:\u0026lt;${METHOD}\u0026gt; true /products/{id}:\u0026lt;GET\u0026gt;   /products/123:\u0026lt;GET\u0026gt; serviceB default ${PATH}:\u0026lt;${METHOD}\u0026gt; default true GET:/products/{id}    Initialize and update the OpenAPI definitions dynamically Use Dynamic Configuration to initialize and update OpenAPI definitions, the endpoint grouping rules from OpenAPI will re-create by the new config.\nEndpoint name grouping by custom configuration Currently, a user could set up grouping rules through the static YAML file named endpoint-name-grouping.yml, or use Dynamic Configuration to initialize and update endpoint grouping rules.\nConfiguration Format Both the static local file and dynamic configuration value share the same YAML format.\ngrouping:# Endpoint of the service would follow the following rules- service-name:serviceArules:# Logic name when the regex expression matched.- endpoint-name:/prod/{id}regex:\\/prod\\/.+","excerpt":"Group Parameterized Endpoints In most cases, endpoints are detected automatically through language …","ref":"/docs/main/v9.3.0/en/setup/backend/endpoint-grouping-rules/","title":"Group Parameterized Endpoints"},{"body":"Group Parameterized Endpoints In most cases, endpoints are detected automatically through language agents, service mesh observability solutions, or meter system configurations.\nThere are some special cases, especially when REST-style URI is used, where the application codes include the parameter in the endpoint name, such as putting order ID in the URI. Examples are /prod/ORDER123 and /prod/ORDER456. But logically, most would expect to have an endpoint name like prod/{order-id}. This is a specially designed feature in parameterized endpoint grouping.\nIf the incoming endpoint name accords with the rules, SkyWalking will group the endpoint by rules.\nThere are two approaches in which SkyWalking supports endpoint grouping:\n Endpoint name grouping by OpenAPI definitions. Endpoint name grouping by custom configurations.  Both grouping approaches can work together in sequence.\nEndpoint name grouping by OpenAPI definitions The OpenAPI definitions are documents based on the OpenAPI Specification (OAS), which is used to define a standard, language-agnostic interface for HTTP APIs.\nSkyWalking now supports OAS v2.0+. It could parse the documents (yaml) and build grouping rules from them automatically.\nHow to use   Add Specification Extensions for SkyWalking config in the OpenAPI definition documents; otherwise, all configs are default:\n${METHOD} is a reserved placeholder which represents the HTTP method, e.g. POST/GET... . ${PATH} is a reserved placeholder which represents the path, e.g. /products/{id}.\n   Extension Name Required Description Default Value     x-sw-service-name false The service name to which these endpoints belong. The directory name to which the OpenAPI definition documents belong.   x-sw-endpoint-name-match-rule false The rule used to match the endpoint. ${METHOD}:${PATH}   x-sw-endpoint-name-format false The endpoint name after grouping. ${METHOD}:${PATH}    These extensions are under OpenAPI Object. For example, the document below has a full custom config:\n  openapi:3.0.0x-sw-service-name:serviceBx-sw-endpoint-name-match-rule:\u0026#34;${METHOD}:${PATH}\u0026#34;x-sw-endpoint-name-format:\u0026#34;${METHOD}:${PATH}\u0026#34;info:description:OpenAPI definition for SkyWalking test.version:v2title:Product API...We highly recommend using the default config. The custom config (x-sw-endpoint-name-match-rule/x-sw-endpoint-name-format) is considered part of the match rules (regex pattern). We have provided some use cases in org.apache.skywalking.oap.server.core.config.group.openapi.EndpointGroupingRuleReader4OpenapiTest. You may validate your custom config as well.\nAll OpenAPI definition documents are located in the openapi-definitions directory, with directories having at most two levels. We recommend using the service name as the subDirectory name, as you will then not be required to set x-sw-service-name. For example:  ├── openapi-definitions │ ├── serviceA │ │ ├── customerAPI-v1.yaml │ │ └── productAPI-v1.yaml │ └── serviceB │ └── productAPI-v2.yaml The feature is enabled by default. You can disable it by setting the Core Module configuration ${SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI:false}.  Rules match priority We recommend designing the API path as clearly as possible. If the API path is fuzzy and an endpoint name matches multiple paths, SkyWalking would select a path according to the match priority set out below:\n The exact path is matched. E.g. /products or /products/inventory The path with fewer variables. E.g. In the case of /products/{var1}/{var2} and /products/{var1}/abc, endpoint name /products/123/abc will match the second one. If the paths have the same number of variables, the longest path is matched, and the vars are considered to be 1. E.g. In the case of /products/abc/{var1} and products/{var12345}/ef, endpoint name /products/abc/ef will match the first one, because length(\u0026quot;abc\u0026quot;) = 3 is larger than length(\u0026quot;ef\u0026quot;) = 2.  Examples If we have an OpenAPI definition doc productAPI-v2.yaml in directory serviceB, it will look like this:\nopenapi:3.0.0info:description:OpenAPI definition for SkyWalking test.version:v2title:Product APItags:- name:productdescription:product- name:relatedProductsdescription:Related Productspaths:/products:get:tags:- productsummary:Get all products listdescription:Get all products list.operationId:getProductsresponses:\u0026#34;200\u0026#34;:description:Successcontent:application/json:schema:type:arrayitems:$ref:\u0026#34;#/components/schemas/Product\u0026#34;/products/{region}/{country}:get:tags:- productsummary:Get products regionaldescription:Get products regional with the given id.operationId:getProductRegionalparameters:- name:regionin:pathdescription:Products regionrequired:trueschema:type:string- name:countryin:pathdescription:Products countryrequired:trueschema:type:stringresponses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/Product\u0026#34;\u0026#34;400\u0026#34;:description:Invalid parameters supplied/products/{id}:get:tags:- productsummary:Get product detailsdescription:Get product details with the given id.operationId:getProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/ProductDetails\u0026#34;\u0026#34;400\u0026#34;:description:Invalid product idpost:tags:- productsummary:Update product detailsdescription:Update product details with the given id.operationId:updateProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64- name:namein:querydescription:Product namerequired:trueschema:type:stringresponses:\u0026#34;200\u0026#34;:description:successful operationdelete:tags:- productsummary:Delete product detailsdescription:Delete product details with the given id.operationId:deleteProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operation/products/{id}/relatedProducts:get:tags:- relatedProductssummary:Get related productsdescription:Get related products with the given product id.operationId:getRelatedProductsparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/RelatedProducts\u0026#34;\u0026#34;400\u0026#34;:description:Invalid product idcomponents:schemas:Product:type:objectdescription:Product id and nameproperties:id:type:integerformat:int64description:Product idname:type:stringdescription:Product namerequired:- id- nameProductDetails:type:objectdescription:Product detailsproperties:id:type:integerformat:int64description:Product idname:type:stringdescription:Product namedescription:type:stringdescription:Product descriptionrequired:- id- nameRelatedProducts:type:objectdescription:Related Productsproperties:id:type:integerformat:int32description:Product idrelatedProducts:type:arraydescription:List of related productsitems:$ref:\u0026#34;#/components/schemas/Product\u0026#34;Here are some use cases:\n   Incoming Endpoint Incoming Service x-sw-service-name x-sw-endpoint-name-match-rule x-sw-endpoint-name-format Matched Grouping Result     GET:/products serviceB default default default true GET:/products   GET:/products/123 serviceB default default default true GET:/products{id}   GET:/products/asia/cn serviceB default default default true GET:/products/{region}/{country}   GET:/products/123/abc/efg serviceB default default default false GET:/products/123/abc/efg   \u0026lt;GET\u0026gt;:/products/123 serviceB default default default false \u0026lt;GET\u0026gt;:/products/123   GET:/products/123 serviceC default default default false GET:/products/123   GET:/products/123 serviceC serviceC default default true GET:/products/123   \u0026lt;GET\u0026gt;:/products/123 serviceB default \u0026lt;${METHOD}\u0026gt;:${PATH} \u0026lt;${METHOD}\u0026gt;:${PATH} true \u0026lt;GET\u0026gt;:/products/{id}   GET:/products/123 serviceB default default ${PATH}:\u0026lt;${METHOD}\u0026gt; true /products/{id}:\u0026lt;GET\u0026gt;   /products/123:\u0026lt;GET\u0026gt; serviceB default ${PATH}:\u0026lt;${METHOD}\u0026gt; default true GET:/products/{id}    Initialize and update the OpenAPI definitions dynamically Use Dynamic Configuration to initialize and update OpenAPI definitions, the endpoint grouping rules from OpenAPI will re-create by the new config.\nEndpoint name grouping by custom configuration Currently, a user could set up grouping rules through the static YAML file named endpoint-name-grouping.yml, or use Dynamic Configuration to initialize and update endpoint grouping rules.\nConfiguration Format Both the static local file and dynamic configuration value share the same YAML format.\ngrouping:# Endpoint of the service would follow the following rules- service-name:serviceArules:# Logic name when the regex expression matched.- endpoint-name:/prod/{id}regex:\\/prod\\/.+","excerpt":"Group Parameterized Endpoints In most cases, endpoints are detected automatically through language …","ref":"/docs/main/v9.4.0/en/setup/backend/endpoint-grouping-rules/","title":"Group Parameterized Endpoints"},{"body":"Group Parameterized Endpoints In most cases, endpoints are detected automatically through language agents, service mesh observability solutions, or meter system configurations.\nThere are some special cases, especially when REST-style URI is used, where the application codes include the parameter in the endpoint name, such as putting order ID in the URI. Examples are /prod/ORDER123 and /prod/ORDER456. But logically, most would expect to have an endpoint name like prod/{order-id}. This is a specially designed feature in parameterized endpoint grouping.\nIf the incoming endpoint name accords with the rules, SkyWalking will group the endpoint by rules.\nThere are two approaches in which SkyWalking supports endpoint grouping:\n Endpoint name grouping by OpenAPI definitions. Endpoint name grouping by custom configurations.  Both grouping approaches can work together in sequence.\nEndpoint name grouping by OpenAPI definitions The OpenAPI definitions are documents based on the OpenAPI Specification (OAS), which is used to define a standard, language-agnostic interface for HTTP APIs.\nSkyWalking now supports OAS v2.0+. It could parse the documents (yaml) and build grouping rules from them automatically.\nHow to use   Add Specification Extensions for SkyWalking config in the OpenAPI definition documents; otherwise, all configs are default:\n${METHOD} is a reserved placeholder which represents the HTTP method, e.g. POST/GET... . ${PATH} is a reserved placeholder which represents the path, e.g. /products/{id}.\n   Extension Name Required Description Default Value     x-sw-service-name false The service name to which these endpoints belong. The directory name to which the OpenAPI definition documents belong.   x-sw-endpoint-name-match-rule false The rule used to match the endpoint. ${METHOD}:${PATH}   x-sw-endpoint-name-format false The endpoint name after grouping. ${METHOD}:${PATH}    These extensions are under OpenAPI Object. For example, the document below has a full custom config:\n  openapi:3.0.0x-sw-service-name:serviceBx-sw-endpoint-name-match-rule:\u0026#34;${METHOD}:${PATH}\u0026#34;x-sw-endpoint-name-format:\u0026#34;${METHOD}:${PATH}\u0026#34;info:description:OpenAPI definition for SkyWalking test.version:v2title:Product API...We highly recommend using the default config. The custom config (x-sw-endpoint-name-match-rule/x-sw-endpoint-name-format) is considered part of the match rules (regex pattern). We have provided some use cases in org.apache.skywalking.oap.server.core.config.group.openapi.EndpointGroupingRuleReader4OpenapiTest. You may validate your custom config as well.\nAll OpenAPI definition documents are located in the openapi-definitions directory, with directories having at most two levels. We recommend using the service name as the subDirectory name, as you will then not be required to set x-sw-service-name. For example:  ├── openapi-definitions │ ├── serviceA │ │ ├── customerAPI-v1.yaml │ │ └── productAPI-v1.yaml │ └── serviceB │ └── productAPI-v2.yaml The feature is enabled by default. You can disable it by setting the Core Module configuration ${SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI:false}.  Rules match priority We recommend designing the API path as clearly as possible. If the API path is fuzzy and an endpoint name matches multiple paths, SkyWalking would select a path according to the match priority set out below:\n The exact path is matched. E.g. /products or /products/inventory The path with fewer variables. E.g. In the case of /products/{var1}/{var2} and /products/{var1}/abc, endpoint name /products/123/abc will match the second one. If the paths have the same number of variables, the longest path is matched, and the vars are considered to be 1. E.g. In the case of /products/abc/{var1} and products/{var12345}/ef, endpoint name /products/abc/ef will match the first one, because length(\u0026quot;abc\u0026quot;) = 3 is larger than length(\u0026quot;ef\u0026quot;) = 2.  Examples If we have an OpenAPI definition doc productAPI-v2.yaml in directory serviceB, it will look like this:\nopenapi:3.0.0info:description:OpenAPI definition for SkyWalking test.version:v2title:Product APItags:- name:productdescription:product- name:relatedProductsdescription:Related Productspaths:/products:get:tags:- productsummary:Get all products listdescription:Get all products list.operationId:getProductsresponses:\u0026#34;200\u0026#34;:description:Successcontent:application/json:schema:type:arrayitems:$ref:\u0026#34;#/components/schemas/Product\u0026#34;/products/{region}/{country}:get:tags:- productsummary:Get products regionaldescription:Get products regional with the given id.operationId:getProductRegionalparameters:- name:regionin:pathdescription:Products regionrequired:trueschema:type:string- name:countryin:pathdescription:Products countryrequired:trueschema:type:stringresponses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/Product\u0026#34;\u0026#34;400\u0026#34;:description:Invalid parameters supplied/products/{id}:get:tags:- productsummary:Get product detailsdescription:Get product details with the given id.operationId:getProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/ProductDetails\u0026#34;\u0026#34;400\u0026#34;:description:Invalid product idpost:tags:- productsummary:Update product detailsdescription:Update product details with the given id.operationId:updateProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64- name:namein:querydescription:Product namerequired:trueschema:type:stringresponses:\u0026#34;200\u0026#34;:description:successful operationdelete:tags:- productsummary:Delete product detailsdescription:Delete product details with the given id.operationId:deleteProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operation/products/{id}/relatedProducts:get:tags:- relatedProductssummary:Get related productsdescription:Get related products with the given product id.operationId:getRelatedProductsparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/RelatedProducts\u0026#34;\u0026#34;400\u0026#34;:description:Invalid product idcomponents:schemas:Product:type:objectdescription:Product id and nameproperties:id:type:integerformat:int64description:Product idname:type:stringdescription:Product namerequired:- id- nameProductDetails:type:objectdescription:Product detailsproperties:id:type:integerformat:int64description:Product idname:type:stringdescription:Product namedescription:type:stringdescription:Product descriptionrequired:- id- nameRelatedProducts:type:objectdescription:Related Productsproperties:id:type:integerformat:int32description:Product idrelatedProducts:type:arraydescription:List of related productsitems:$ref:\u0026#34;#/components/schemas/Product\u0026#34;Here are some use cases:\n   Incoming Endpoint Incoming Service x-sw-service-name x-sw-endpoint-name-match-rule x-sw-endpoint-name-format Matched Grouping Result     GET:/products serviceB default default default true GET:/products   GET:/products/asia/cn serviceB default default default true GET:/products/{region}/{country}   GET:/products/123 serviceB default default default true GET:/products{id}   GET:/products/123/abc/efg serviceB default default default false GET:/products/123/abc/efg   \u0026lt;GET\u0026gt;:/products/123 serviceB default default default false \u0026lt;GET\u0026gt;:/products/123   GET:/products/123 serviceC default default default false GET:/products/123   GET:/products/123 serviceC serviceC default default true GET:/products/123   \u0026lt;GET\u0026gt;:/products/123 serviceB default \u0026lt;${METHOD}\u0026gt;:${PATH} \u0026lt;${METHOD}\u0026gt;:${PATH} true \u0026lt;GET\u0026gt;:/products/{id}   GET:/products/123 serviceB default default ${PATH}:\u0026lt;${METHOD}\u0026gt; true /products/{id}:\u0026lt;GET\u0026gt;   /products/123:\u0026lt;GET\u0026gt; serviceB default ${PATH}:\u0026lt;${METHOD}\u0026gt; default true GET:/products/{id}    Initialize and update the OpenAPI definitions dynamically Use Dynamic Configuration to initialize and update OpenAPI definitions, the endpoint grouping rules from OpenAPI will re-create by the new config.\nEndpoint name grouping by custom configuration Currently, a user could set up grouping rules through the static YAML file named endpoint-name-grouping.yml, or use Dynamic Configuration to initialize and update endpoint grouping rules.\nConfiguration Format Both the static local file and dynamic configuration value share the same YAML format.\ngrouping:# Endpoint of the service would follow the following rules- service-name:serviceArules:# {var} represents any variable string in the URI.- /prod/{var}","excerpt":"Group Parameterized Endpoints In most cases, endpoints are detected automatically through language …","ref":"/docs/main/v9.5.0/en/setup/backend/endpoint-grouping-rules/","title":"Group Parameterized Endpoints"},{"body":"Group Parameterized Endpoints In most cases, endpoints are detected automatically through language agents, service mesh observability solutions, or meter system configurations.\nThere are some special cases, especially when REST-style URI is used, where the application codes include the parameter in the endpoint name, such as putting order ID in the URI. Examples are /prod/ORDER123 and /prod/ORDER456. But logically, most would expect to have an endpoint name like prod/{order-id}. This is a specially designed feature in parameterized endpoint grouping.\nIf the incoming endpoint name accords with the rules, SkyWalking will group the endpoint by rules.\nThere are two approaches in which SkyWalking supports endpoint grouping:\n Endpoint name grouping by OpenAPI definitions. Endpoint name grouping by custom configurations.  Both grouping approaches can work together in sequence.\nEndpoint name grouping by OpenAPI definitions The OpenAPI definitions are documents based on the OpenAPI Specification (OAS), which is used to define a standard, language-agnostic interface for HTTP APIs.\nSkyWalking now supports OAS v2.0+. It could parse the documents (yaml) and build grouping rules from them automatically.\nHow to use   Add Specification Extensions for SkyWalking config in the OpenAPI definition documents; otherwise, all configs are default:\n${METHOD} is a reserved placeholder which represents the HTTP method, e.g. POST/GET... . ${PATH} is a reserved placeholder which represents the path, e.g. /products/{id}.\n   Extension Name Required Description Default Value     x-sw-service-name false The service name to which these endpoints belong. The directory name to which the OpenAPI definition documents belong.   x-sw-endpoint-name-match-rule false The rule used to match the endpoint. ${METHOD}:${PATH}   x-sw-endpoint-name-format false The endpoint name after grouping. ${METHOD}:${PATH}    These extensions are under OpenAPI Object. For example, the document below has a full custom config:\n  openapi:3.0.0x-sw-service-name:serviceBx-sw-endpoint-name-match-rule:\u0026#34;${METHOD}:${PATH}\u0026#34;x-sw-endpoint-name-format:\u0026#34;${METHOD}:${PATH}\u0026#34;info:description:OpenAPI definition for SkyWalking test.version:v2title:Product API...We highly recommend using the default config. The custom config (x-sw-endpoint-name-match-rule/x-sw-endpoint-name-format) is considered part of the match rules (regex pattern). We have provided some use cases in org.apache.skywalking.oap.server.core.config.group.openapi.EndpointGroupingRuleReader4OpenapiTest. You may validate your custom config as well.\nAll OpenAPI definition documents are located in the openapi-definitions directory, with directories having at most two levels. We recommend using the service name as the subDirectory name, as you will then not be required to set x-sw-service-name. For example:  ├── openapi-definitions │ ├── serviceA │ │ ├── customerAPI-v1.yaml │ │ └── productAPI-v1.yaml │ └── serviceB │ └── productAPI-v2.yaml The feature is enabled by default. You can disable it by setting the Core Module configuration ${SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI:false}.  Rules match priority We recommend designing the API path as clearly as possible. If the API path is fuzzy and an endpoint name matches multiple paths, SkyWalking would select a path according to the match priority set out below:\n The exact path is matched. E.g. /products or /products/inventory The path with fewer variables. E.g. In the case of /products/{var1}/{var2} and /products/{var1}/abc, endpoint name /products/123/abc will match the second one. If the paths have the same number of variables, the longest path is matched, and the vars are considered to be 1. E.g. In the case of /products/abc/{var1} and products/{var12345}/ef, endpoint name /products/abc/ef will match the first one, because length(\u0026quot;abc\u0026quot;) = 3 is larger than length(\u0026quot;ef\u0026quot;) = 2.  Examples If we have an OpenAPI definition doc productAPI-v2.yaml in directory serviceB, it will look like this:\nopenapi:3.0.0info:description:OpenAPI definition for SkyWalking test.version:v2title:Product APItags:- name:productdescription:product- name:relatedProductsdescription:Related Productspaths:/products:get:tags:- productsummary:Get all products listdescription:Get all products list.operationId:getProductsresponses:\u0026#34;200\u0026#34;:description:Successcontent:application/json:schema:type:arrayitems:$ref:\u0026#34;#/components/schemas/Product\u0026#34;/products/{region}/{country}:get:tags:- productsummary:Get products regionaldescription:Get products regional with the given id.operationId:getProductRegionalparameters:- name:regionin:pathdescription:Products regionrequired:trueschema:type:string- name:countryin:pathdescription:Products countryrequired:trueschema:type:stringresponses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/Product\u0026#34;\u0026#34;400\u0026#34;:description:Invalid parameters supplied/products/{id}:get:tags:- productsummary:Get product detailsdescription:Get product details with the given id.operationId:getProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/ProductDetails\u0026#34;\u0026#34;400\u0026#34;:description:Invalid product idpost:tags:- productsummary:Update product detailsdescription:Update product details with the given id.operationId:updateProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64- name:namein:querydescription:Product namerequired:trueschema:type:stringresponses:\u0026#34;200\u0026#34;:description:successful operationdelete:tags:- productsummary:Delete product detailsdescription:Delete product details with the given id.operationId:deleteProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operation/products/{id}/relatedProducts:get:tags:- relatedProductssummary:Get related productsdescription:Get related products with the given product id.operationId:getRelatedProductsparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/RelatedProducts\u0026#34;\u0026#34;400\u0026#34;:description:Invalid product idcomponents:schemas:Product:type:objectdescription:Product id and nameproperties:id:type:integerformat:int64description:Product idname:type:stringdescription:Product namerequired:- id- nameProductDetails:type:objectdescription:Product detailsproperties:id:type:integerformat:int64description:Product idname:type:stringdescription:Product namedescription:type:stringdescription:Product descriptionrequired:- id- nameRelatedProducts:type:objectdescription:Related Productsproperties:id:type:integerformat:int32description:Product idrelatedProducts:type:arraydescription:List of related productsitems:$ref:\u0026#34;#/components/schemas/Product\u0026#34;Here are some use cases:\n   Incoming Endpoint Incoming Service x-sw-service-name x-sw-endpoint-name-match-rule x-sw-endpoint-name-format Matched Grouping Result     GET:/products serviceB default default default true GET:/products   GET:/products/asia/cn serviceB default default default true GET:/products/{region}/{country}   GET:/products/123 serviceB default default default true GET:/products{id}   GET:/products/123/abc/efg serviceB default default default false GET:/products/123/abc/efg   \u0026lt;GET\u0026gt;:/products/123 serviceB default default default false \u0026lt;GET\u0026gt;:/products/123   GET:/products/123 serviceC default default default false GET:/products/123   GET:/products/123 serviceC serviceC default default true GET:/products/123   \u0026lt;GET\u0026gt;:/products/123 serviceB default \u0026lt;${METHOD}\u0026gt;:${PATH} \u0026lt;${METHOD}\u0026gt;:${PATH} true \u0026lt;GET\u0026gt;:/products/{id}   GET:/products/123 serviceB default default ${PATH}:\u0026lt;${METHOD}\u0026gt; true /products/{id}:\u0026lt;GET\u0026gt;   /products/123:\u0026lt;GET\u0026gt; serviceB default ${PATH}:\u0026lt;${METHOD}\u0026gt; default true GET:/products/{id}    Initialize and update the OpenAPI definitions dynamically Use Dynamic Configuration to initialize and update OpenAPI definitions, the endpoint grouping rules from OpenAPI will re-create by the new config.\nEndpoint name grouping by custom configuration Currently, a user could set up grouping rules through the static YAML file named endpoint-name-grouping.yml, or use Dynamic Configuration to initialize and update endpoint grouping rules.\nConfiguration Format Both the static local file and dynamic configuration value share the same YAML format.\ngrouping:# Endpoint of the service would follow the following rules- service-name:serviceArules:# {var} represents any variable string in the URI.- /prod/{var}","excerpt":"Group Parameterized Endpoints In most cases, endpoints are detected automatically through language …","ref":"/docs/main/v9.6.0/en/setup/backend/endpoint-grouping-rules/","title":"Group Parameterized Endpoints"},{"body":"Group Parameterized Endpoints In most cases, endpoints are detected automatically through language agents, service mesh observability solutions, or meter system configurations.\nThere are some special cases, especially when REST-style URI is used, where the application codes include the parameter in the endpoint name, such as putting order ID in the URI. Examples are /prod/ORDER123 and /prod/ORDER456. But logically, most would expect to have an endpoint name like prod/{order-id}. This is a specially designed feature in parameterized endpoint grouping.\nIf the incoming endpoint name accords with the rules, SkyWalking will group the endpoint by rules.\nThere are two approaches in which SkyWalking supports endpoint grouping:\n Endpoint name grouping by OpenAPI definitions. Endpoint name grouping by custom configurations.  Both grouping approaches can work together in sequence.\nEndpoint name grouping by OpenAPI definitions The OpenAPI definitions are documents based on the OpenAPI Specification (OAS), which is used to define a standard, language-agnostic interface for HTTP APIs.\nSkyWalking now supports OAS v2.0+. It could parse the documents (yaml) and build grouping rules from them automatically.\nHow to use   Add Specification Extensions for SkyWalking config in the OpenAPI definition documents; otherwise, all configs are default:\n${METHOD} is a reserved placeholder which represents the HTTP method, e.g. POST/GET... . ${PATH} is a reserved placeholder which represents the path, e.g. /products/{id}.\n   Extension Name Required Description Default Value     x-sw-service-name false The service name to which these endpoints belong. The directory name to which the OpenAPI definition documents belong.   x-sw-endpoint-name-match-rule false The rule used to match the endpoint. ${METHOD}:${PATH}   x-sw-endpoint-name-format false The endpoint name after grouping. ${METHOD}:${PATH}    These extensions are under OpenAPI Object. For example, the document below has a full custom config:\n  openapi:3.0.0x-sw-service-name:serviceBx-sw-endpoint-name-match-rule:\u0026#34;${METHOD}:${PATH}\u0026#34;x-sw-endpoint-name-format:\u0026#34;${METHOD}:${PATH}\u0026#34;info:description:OpenAPI definition for SkyWalking test.version:v2title:Product API...We highly recommend using the default config. The custom config (x-sw-endpoint-name-match-rule/x-sw-endpoint-name-format) is considered part of the match rules (regex pattern). We have provided some use cases in org.apache.skywalking.oap.server.core.config.group.openapi.EndpointGroupingRuleReader4OpenapiTest. You may validate your custom config as well.\nAll OpenAPI definition documents are located in the openapi-definitions directory, with directories having at most two levels. We recommend using the service name as the subDirectory name, as you will then not be required to set x-sw-service-name. For example:  ├── openapi-definitions │ ├── serviceA │ │ ├── customerAPI-v1.yaml │ │ └── productAPI-v1.yaml │ └── serviceB │ └── productAPI-v2.yaml The feature is enabled by default. You can disable it by setting the Core Module configuration ${SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI:false}.  Rules match priority We recommend designing the API path as clearly as possible. If the API path is fuzzy and an endpoint name matches multiple paths, SkyWalking would select a path according to the match priority set out below:\n The exact path is matched. E.g. /products or /products/inventory The path with fewer variables. E.g. In the case of /products/{var1}/{var2} and /products/{var1}/abc, endpoint name /products/123/abc will match the second one. If the paths have the same number of variables, the longest path is matched, and the vars are considered to be 1. E.g. In the case of /products/abc/{var1} and products/{var12345}/ef, endpoint name /products/abc/ef will match the first one, because length(\u0026quot;abc\u0026quot;) = 3 is larger than length(\u0026quot;ef\u0026quot;) = 2.  Examples If we have an OpenAPI definition doc productAPI-v2.yaml in directory serviceB, it will look like this:\nopenapi:3.0.0info:description:OpenAPI definition for SkyWalking test.version:v2title:Product APItags:- name:productdescription:product- name:relatedProductsdescription:Related Productspaths:/products:get:tags:- productsummary:Get all products listdescription:Get all products list.operationId:getProductsresponses:\u0026#34;200\u0026#34;:description:Successcontent:application/json:schema:type:arrayitems:$ref:\u0026#34;#/components/schemas/Product\u0026#34;/products/{region}/{country}:get:tags:- productsummary:Get products regionaldescription:Get products regional with the given id.operationId:getProductRegionalparameters:- name:regionin:pathdescription:Products regionrequired:trueschema:type:string- name:countryin:pathdescription:Products countryrequired:trueschema:type:stringresponses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/Product\u0026#34;\u0026#34;400\u0026#34;:description:Invalid parameters supplied/products/{id}:get:tags:- productsummary:Get product detailsdescription:Get product details with the given id.operationId:getProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/ProductDetails\u0026#34;\u0026#34;400\u0026#34;:description:Invalid product idpost:tags:- productsummary:Update product detailsdescription:Update product details with the given id.operationId:updateProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64- name:namein:querydescription:Product namerequired:trueschema:type:stringresponses:\u0026#34;200\u0026#34;:description:successful operationdelete:tags:- productsummary:Delete product detailsdescription:Delete product details with the given id.operationId:deleteProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operation/products/{id}/relatedProducts:get:tags:- relatedProductssummary:Get related productsdescription:Get related products with the given product id.operationId:getRelatedProductsparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/RelatedProducts\u0026#34;\u0026#34;400\u0026#34;:description:Invalid product idcomponents:schemas:Product:type:objectdescription:Product id and nameproperties:id:type:integerformat:int64description:Product idname:type:stringdescription:Product namerequired:- id- nameProductDetails:type:objectdescription:Product detailsproperties:id:type:integerformat:int64description:Product idname:type:stringdescription:Product namedescription:type:stringdescription:Product descriptionrequired:- id- nameRelatedProducts:type:objectdescription:Related Productsproperties:id:type:integerformat:int32description:Product idrelatedProducts:type:arraydescription:List of related productsitems:$ref:\u0026#34;#/components/schemas/Product\u0026#34;Here are some use cases:\n   Incoming Endpoint Incoming Service x-sw-service-name x-sw-endpoint-name-match-rule x-sw-endpoint-name-format Matched Grouping Result     GET:/products serviceB default default default true GET:/products   GET:/products/asia/cn serviceB default default default true GET:/products/{region}/{country}   GET:/products/123 serviceB default default default true GET:/products{id}   GET:/products/123/abc/efg serviceB default default default false GET:/products/123/abc/efg   \u0026lt;GET\u0026gt;:/products/123 serviceB default default default false \u0026lt;GET\u0026gt;:/products/123   GET:/products/123 serviceC default default default false GET:/products/123   GET:/products/123 serviceC serviceC default default true GET:/products/123   \u0026lt;GET\u0026gt;:/products/123 serviceB default \u0026lt;${METHOD}\u0026gt;:${PATH} \u0026lt;${METHOD}\u0026gt;:${PATH} true \u0026lt;GET\u0026gt;:/products/{id}   GET:/products/123 serviceB default default ${PATH}:\u0026lt;${METHOD}\u0026gt; true /products/{id}:\u0026lt;GET\u0026gt;   /products/123:\u0026lt;GET\u0026gt; serviceB default ${PATH}:\u0026lt;${METHOD}\u0026gt; default true GET:/products/{id}    Initialize and update the OpenAPI definitions dynamically Use Dynamic Configuration to initialize and update OpenAPI definitions, the endpoint grouping rules from OpenAPI will re-create by the new config.\nEndpoint name grouping by custom configuration Currently, a user could set up grouping rules through the static YAML file named endpoint-name-grouping.yml, or use Dynamic Configuration to initialize and update endpoint grouping rules.\nConfiguration Format Both the static local file and dynamic configuration value share the same YAML format.\ngrouping:# Endpoint of the service would follow the following rules- service-name:serviceArules:# {var} represents any variable string in the URI.- /prod/{var}","excerpt":"Group Parameterized Endpoints In most cases, endpoints are detected automatically through language …","ref":"/docs/main/v9.7.0/en/setup/backend/endpoint-grouping-rules/","title":"Group Parameterized Endpoints"},{"body":"gRPC SSL transportation support for OAP server For OAP communication, we are currently using gRPC, a multi-platform RPC framework that uses protocol buffers for message serialization. The nice part about gRPC is that it promotes the use of SSL/TLS to authenticate and encrypt exchanges. Now OAP supports enabling SSL transportation for gRPC receivers. Since 8.8.0, OAP supports enabling mutual TLS authentication between probes and OAP servers.\nTo enable this feature, follow the steps below.\nPreparation By default, the communication between OAP nodes and the communication between receiver and probe share the same gRPC server. Its configuration is in application.yml/core/default section.\nThe advanced gRPC receiver is only for communication with the probes. This configuration is in application.yml/receiver-sharing-server/default section.\nThe first step is to generate certificates and private key files for encrypting communication.\nCreating SSL/TLS Certificates The first step is to generate certificates and key files for encrypting communication. This is fairly straightforward: use openssl from the command line.\nUse this script if you are not familiar with how to generate key files.\nWe need the following files:\n ca.crt: A certificate authority public key for a client to validate the server\u0026rsquo;s certificate. server.pem, client.pem: A private RSA key to sign and authenticate the public key. It\u0026rsquo;s either a PKCS#8(PEM) or PKCS#1(DER). server.crt, client.crt: Self-signed X.509 public keys for distribution.  TLS on OAP servers By default, the communication between OAP nodes and the communication between receiver and probe share the same gRPC server. That means once you enable SSL for receivers and probes, the OAP nodes will enable it too.\nNOTE: SkyWalking does not support enabling mTLS on OAP server nodes communication. That means you have to enable receiver-sharing-server for enabling mTLS on communication between probes and OAP servers. More details see Enable mTLS mode on gRPC receiver.\nYou can enable gRPC SSL by adding the following lines to application.yml/core/default.\ngRPCSslEnabled:truegRPCSslKeyPath:/path/to/server.pemgRPCSslCertChainPath:/path/to/server.crtgRPCSslTrustedCAPath:/path/to/ca.crtgRPCSslKeyPath and gRPCSslCertChainPath are loaded by the OAP server to encrypt communication. gRPCSslTrustedCAPath helps the gRPC client to verify server certificates in cluster mode.\n There is a gRPC client and server in every OAP server node. The gRPC client communicates with OAP servers in cluster mode. They are sharing the core module configuration.\n When new files are in place, they can be loaded dynamically, and you won\u0026rsquo;t have to restart an OAP instance.\nEnable TLS on independent gRPC receiver If you enable receiver-sharing-server to ingest data from an external source, add the following lines to application.yml/receiver-sharing-server/default:\ngRPCPort:${SW_RECEIVER_GRPC_PORT:\u0026#34;changeMe\u0026#34;}gRPCSslEnabled:truegRPCSslKeyPath:/path/to/server.pemgRPCSslCertChainPath:/path/to/server.crtSince receiver-sharing-server only receives data from an external source, it doesn\u0026rsquo;t need a CA at all. But you have to configure the CA for the clients, such as Java agent, Satellite. If you port to the Java agent, refer to the Java agent repo to configure the Java agent and enable TLS.\nNOTE: change the SW_RECEIVER_GRPC_PORT as non-zero to enable receiver-sharing-server. And the port is open for the clients.\nEnable mTLS mode on gRPC receiver Since 8.8.0, SkyWalking has supported mutual TLS authentication for transporting between clients and OAP servers. Enable mTLS mode for the gRPC channel requires Sharing gRPC Server enabled, as the following configuration.\nreceiver-sharing-server:selector:${SW_RECEIVER_SHARING_SERVER:default}default:# For gRPC servergRPCHost:${SW_RECEIVER_GRPC_HOST:0.0.0.0}gRPCPort:${SW_RECEIVER_GRPC_PORT:\u0026#34;changeMe\u0026#34;}maxConcurrentCallsPerConnection:${SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL:0}maxMessageSize:${SW_RECEIVER_GRPC_MAX_MESSAGE_SIZE:0}gRPCThreadPoolQueueSize:${SW_RECEIVER_GRPC_POOL_QUEUE_SIZE:0}gRPCThreadPoolSize:${SW_RECEIVER_GRPC_THREAD_POOL_SIZE:0}gRPCSslEnabled:${SW_RECEIVER_GRPC_SSL_ENABLED:true}gRPCSslKeyPath:${SW_RECEIVER_GRPC_SSL_KEY_PATH:\u0026#34;/path/to/server.pem\u0026#34;}gRPCSslCertChainPath:${SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH:\u0026#34;/path/to/server.crt\u0026#34;}gRPCSslTrustedCAsPath:${SW_RECEIVER_GRPC_SSL_TRUSTED_CAS_PATH:\u0026#34;/path/to/ca.crt\u0026#34;}authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}You can still use this script to generate CA certificate and the key files of server-side(for OAP Server) and client-side(for Agent/Satellite). You have to notice the keys, including server and client-side, are from the same CA certificate.\n","excerpt":"gRPC SSL transportation support for OAP server For OAP communication, we are currently using gRPC, a …","ref":"/docs/main/latest/en/setup/backend/grpc-security/","title":"gRPC SSL transportation support for OAP server"},{"body":"gRPC SSL transportation support for OAP server For OAP communication, we are currently using gRPC, a multi-platform RPC framework that uses protocol buffers for message serialization. The nice part about gRPC is that it promotes the use of SSL/TLS to authenticate and encrypt exchanges. Now OAP supports enabling SSL transportation for gRPC receivers. Since 8.8.0, OAP supports enabling mutual TLS authentication between probes and OAP servers.\nTo enable this feature, follow the steps below.\nPreparation By default, the communication between OAP nodes and the communication between receiver and probe share the same gRPC server. Its configuration is in application.yml/core/default section.\nThe advanced gRPC receiver is only for communication with the probes. This configuration is in application.yml/receiver-sharing-server/default section.\nThe first step is to generate certificates and private key files for encrypting communication.\nCreating SSL/TLS Certificates The first step is to generate certificates and key files for encrypting communication. This is fairly straightforward: use openssl from the command line.\nUse this script if you are not familiar with how to generate key files.\nWe need the following files:\n ca.crt: A certificate authority public key for a client to validate the server\u0026rsquo;s certificate. server.pem, client.pem: A private RSA key to sign and authenticate the public key. It\u0026rsquo;s either a PKCS#8(PEM) or PKCS#1(DER). server.crt, client.crt: Self-signed X.509 public keys for distribution.  TLS on OAP servers By default, the communication between OAP nodes and the communication between receiver and probe share the same gRPC server. That means once you enable SSL for receivers and probes, the OAP nodes will enable it too.\nNOTE: SkyWalking does not support enabling mTLS on OAP server nodes communication. That means you have to enable receiver-sharing-server for enabling mTLS on communication between probes and OAP servers. More details see Enable mTLS mode on gRPC receiver.\nYou can enable gRPC SSL by adding the following lines to application.yml/core/default.\ngRPCSslEnabled:truegRPCSslKeyPath:/path/to/server.pemgRPCSslCertChainPath:/path/to/server.crtgRPCSslTrustedCAPath:/path/to/ca.crtgRPCSslKeyPath and gRPCSslCertChainPath are loaded by the OAP server to encrypt communication. gRPCSslTrustedCAPath helps the gRPC client to verify server certificates in cluster mode.\n There is a gRPC client and server in every OAP server node. The gRPC client communicates with OAP servers in cluster mode. They are sharing the core module configuration.\n When new files are in place, they can be loaded dynamically, and you won\u0026rsquo;t have to restart an OAP instance.\nEnable TLS on independent gRPC receiver If you enable receiver-sharing-server to ingest data from an external source, add the following lines to application.yml/receiver-sharing-server/default:\ngRPCPort:${SW_RECEIVER_GRPC_PORT:\u0026#34;changeMe\u0026#34;}gRPCSslEnabled:truegRPCSslKeyPath:/path/to/server.pemgRPCSslCertChainPath:/path/to/server.crtSince receiver-sharing-server only receives data from an external source, it doesn\u0026rsquo;t need a CA at all. But you have to configure the CA for the clients, such as Java agent, Satellite. If you port to the Java agent, refer to the Java agent repo to configure the Java agent and enable TLS.\nNOTE: change the SW_RECEIVER_GRPC_PORT as non-zero to enable receiver-sharing-server. And the port is open for the clients.\nEnable mTLS mode on gRPC receiver Since 8.8.0, SkyWalking has supported mutual TLS authentication for transporting between clients and OAP servers. Enable mTLS mode for the gRPC channel requires Sharing gRPC Server enabled, as the following configuration.\nreceiver-sharing-server:selector:${SW_RECEIVER_SHARING_SERVER:default}default:# For gRPC servergRPCHost:${SW_RECEIVER_GRPC_HOST:0.0.0.0}gRPCPort:${SW_RECEIVER_GRPC_PORT:\u0026#34;changeMe\u0026#34;}maxConcurrentCallsPerConnection:${SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL:0}maxMessageSize:${SW_RECEIVER_GRPC_MAX_MESSAGE_SIZE:0}gRPCThreadPoolSize:${SW_RECEIVER_GRPC_THREAD_POOL_SIZE:0}gRPCSslEnabled:${SW_RECEIVER_GRPC_SSL_ENABLED:true}gRPCSslKeyPath:${SW_RECEIVER_GRPC_SSL_KEY_PATH:\u0026#34;/path/to/server.pem\u0026#34;}gRPCSslCertChainPath:${SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH:\u0026#34;/path/to/server.crt\u0026#34;}gRPCSslTrustedCAsPath:${SW_RECEIVER_GRPC_SSL_TRUSTED_CAS_PATH:\u0026#34;/path/to/ca.crt\u0026#34;}authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}You can still use this script to generate CA certificate and the key files of server-side(for OAP Server) and client-side(for Agent/Satellite). You have to notice the keys, including server and client-side, are from the same CA certificate.\n","excerpt":"gRPC SSL transportation support for OAP server For OAP communication, we are currently using gRPC, a …","ref":"/docs/main/next/en/setup/backend/grpc-security/","title":"gRPC SSL transportation support for OAP server"},{"body":"gRPC SSL transportation support for OAP server For OAP communication, we are currently using gRPC, a multi-platform RPC framework that uses protocol buffers for message serialization. The nice part about gRPC is that it promotes the use of SSL/TLS to authenticate and encrypt exchanges. Now OAP supports enabling SSL transportation for gRPC receivers. Since 8.8.0, OAP supports enabling mutual TLS authentication between probes and OAP servers.\nTo enable this feature, follow the steps below.\nPreparation By default, the communication between OAP nodes and the communication between receiver and probe share the same gRPC server. Its configuration is in application.yml/core/default section.\nThe advanced gRPC receiver is only for communication with the probes. This configuration is in application.yml/receiver-sharing-server/default section.\nThe first step is to generate certificates and private key files for encrypting communication.\nCreating SSL/TLS Certificates The first step is to generate certificates and key files for encrypting communication. This is fairly straightforward: use openssl from the command line.\nUse this script if you are not familiar with how to generate key files.\nWe need the following files:\n ca.crt: A certificate authority public key for a client to validate the server\u0026rsquo;s certificate. server.pem, client.pem: A private RSA key to sign and authenticate the public key. It\u0026rsquo;s either a PKCS#8(PEM) or PKCS#1(DER). server.crt, client.crt: Self-signed X.509 public keys for distribution.  TLS on OAP servers By default, the communication between OAP nodes and the communication between receiver and probe share the same gRPC server. That means once you enabling SSL for receivers and probes, the OAP nodes will enable it too.\nNOTE: SkyWalking does not support to enable mTLS on OAP server nodes communication. That means you have to enable receiver-sharing-server for enabling mTLS on communication between probes ang OAP servers. More details see Enable mTLS mode on gRPC receiver.\nYou can enable gRPC SSL by adding the following lines to application.yml/core/default.\ngRPCSslEnabled:truegRPCSslKeyPath:/path/to/server.pemgRPCSslCertChainPath:/path/to/server.crtgRPCSslTrustedCAPath:/path/to/ca.crtgRPCSslKeyPath and gRPCSslCertChainPath are loaded by the OAP server to encrypt communication. gRPCSslTrustedCAPath helps the gRPC client to verify server certificates in cluster mode.\n There is a gRPC client and server in every OAP server node. The gRPC client comunicates with OAP servers in cluster mode. They are sharing the core module configuration.\n When new files are in place, they can be loaded dynamically, and you won\u0026rsquo;t have to restart an OAP instance.\nEnable TLS on independent gRPC receiver If you enable receiver-sharing-server to ingest data from an external source, add the following lines to application.yml/receiver-sharing-server/default:\ngRPCPort:${SW_RECEIVER_GRPC_PORT:\u0026#34;changeMe\u0026#34;}gRPCSslEnabled:truegRPCSslKeyPath:/path/to/server.pemgRPCSslCertChainPath:/path/to/server.crtSince recevier-sharing-server only receives data from an external source, it doesn\u0026rsquo;t need a CA at all. But you have to configure the CA for the clients, such as Java agent, Satellite. If you port to Java agent, refer to the Java agent repo to configure java agent and enable TLS.\nNOTE: change the SW_RECEIVER_GRPC_PORT as non-zore to enable receiver-sharing-server. And the port is open for the clients.\nEnable mTLS mode on gRPC receiver Since 8.8.0, SkyWalking supports enable mutual TLS authentication for transporting between clients and OAP servers. To enable mTLS mode for gRPC channel requires Sharing gRPC Server enabled, as the following configuration.\nreceiver-sharing-server:selector:${SW_RECEIVER_SHARING_SERVER:default}default:# For gRPC servergRPCHost:${SW_RECEIVER_GRPC_HOST:0.0.0.0}gRPCPort:${SW_RECEIVER_GRPC_PORT:\u0026#34;changeMe\u0026#34;}maxConcurrentCallsPerConnection:${SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL:0}maxMessageSize:${SW_RECEIVER_GRPC_MAX_MESSAGE_SIZE:0}gRPCThreadPoolQueueSize:${SW_RECEIVER_GRPC_POOL_QUEUE_SIZE:0}gRPCThreadPoolSize:${SW_RECEIVER_GRPC_THREAD_POOL_SIZE:0}gRPCSslEnabled:${SW_RECEIVER_GRPC_SSL_ENABLED:true}gRPCSslKeyPath:${SW_RECEIVER_GRPC_SSL_KEY_PATH:\u0026#34;/path/to/server.pem\u0026#34;}gRPCSslCertChainPath:${SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH:\u0026#34;/path/to/server.crt\u0026#34;}gRPCSslTrustedCAsPath:${SW_RECEIVER_GRPC_SSL_TRUSTED_CAS_PATH:\u0026#34;/path/to/ca.crt\u0026#34;}authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}You can still use this script to generate CA certificate and the key files of server-side(for OAP Server) and client-side(for Agent/Satellite). You have to notice the keys, including server and client-side, are from the same CA certificate.\n","excerpt":"gRPC SSL transportation support for OAP server For OAP communication, we are currently using gRPC, a …","ref":"/docs/main/v9.0.0/en/setup/backend/grpc-security/","title":"gRPC SSL transportation support for OAP server"},{"body":"gRPC SSL transportation support for OAP server For OAP communication, we are currently using gRPC, a multi-platform RPC framework that uses protocol buffers for message serialization. The nice part about gRPC is that it promotes the use of SSL/TLS to authenticate and encrypt exchanges. Now OAP supports enabling SSL transportation for gRPC receivers. Since 8.8.0, OAP supports enabling mutual TLS authentication between probes and OAP servers.\nTo enable this feature, follow the steps below.\nPreparation By default, the communication between OAP nodes and the communication between receiver and probe share the same gRPC server. Its configuration is in application.yml/core/default section.\nThe advanced gRPC receiver is only for communication with the probes. This configuration is in application.yml/receiver-sharing-server/default section.\nThe first step is to generate certificates and private key files for encrypting communication.\nCreating SSL/TLS Certificates The first step is to generate certificates and key files for encrypting communication. This is fairly straightforward: use openssl from the command line.\nUse this script if you are not familiar with how to generate key files.\nWe need the following files:\n ca.crt: A certificate authority public key for a client to validate the server\u0026rsquo;s certificate. server.pem, client.pem: A private RSA key to sign and authenticate the public key. It\u0026rsquo;s either a PKCS#8(PEM) or PKCS#1(DER). server.crt, client.crt: Self-signed X.509 public keys for distribution.  TLS on OAP servers By default, the communication between OAP nodes and the communication between receiver and probe share the same gRPC server. That means once you enable SSL for receivers and probes, the OAP nodes will enable it too.\nNOTE: SkyWalking does not support enabling mTLS on OAP server nodes communication. That means you have to enable receiver-sharing-server for enabling mTLS on communication between probes and OAP servers. More details see Enable mTLS mode on gRPC receiver.\nYou can enable gRPC SSL by adding the following lines to application.yml/core/default.\ngRPCSslEnabled:truegRPCSslKeyPath:/path/to/server.pemgRPCSslCertChainPath:/path/to/server.crtgRPCSslTrustedCAPath:/path/to/ca.crtgRPCSslKeyPath and gRPCSslCertChainPath are loaded by the OAP server to encrypt communication. gRPCSslTrustedCAPath helps the gRPC client to verify server certificates in cluster mode.\n There is a gRPC client and server in every OAP server node. The gRPC client communicates with OAP servers in cluster mode. They are sharing the core module configuration.\n When new files are in place, they can be loaded dynamically, and you won\u0026rsquo;t have to restart an OAP instance.\nEnable TLS on independent gRPC receiver If you enable receiver-sharing-server to ingest data from an external source, add the following lines to application.yml/receiver-sharing-server/default:\ngRPCPort:${SW_RECEIVER_GRPC_PORT:\u0026#34;changeMe\u0026#34;}gRPCSslEnabled:truegRPCSslKeyPath:/path/to/server.pemgRPCSslCertChainPath:/path/to/server.crtSince receiver-sharing-server only receives data from an external source, it doesn\u0026rsquo;t need a CA at all. But you have to configure the CA for the clients, such as Java agent, Satellite. If you port to the Java agent, refer to the Java agent repo to configure the Java agent and enable TLS.\nNOTE: change the SW_RECEIVER_GRPC_PORT as non-zero to enable receiver-sharing-server. And the port is open for the clients.\nEnable mTLS mode on gRPC receiver Since 8.8.0, SkyWalking has supported mutual TLS authentication for transporting between clients and OAP servers. Enable mTLS mode for the gRPC channel requires Sharing gRPC Server enabled, as the following configuration.\nreceiver-sharing-server:selector:${SW_RECEIVER_SHARING_SERVER:default}default:# For gRPC servergRPCHost:${SW_RECEIVER_GRPC_HOST:0.0.0.0}gRPCPort:${SW_RECEIVER_GRPC_PORT:\u0026#34;changeMe\u0026#34;}maxConcurrentCallsPerConnection:${SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL:0}maxMessageSize:${SW_RECEIVER_GRPC_MAX_MESSAGE_SIZE:0}gRPCThreadPoolQueueSize:${SW_RECEIVER_GRPC_POOL_QUEUE_SIZE:0}gRPCThreadPoolSize:${SW_RECEIVER_GRPC_THREAD_POOL_SIZE:0}gRPCSslEnabled:${SW_RECEIVER_GRPC_SSL_ENABLED:true}gRPCSslKeyPath:${SW_RECEIVER_GRPC_SSL_KEY_PATH:\u0026#34;/path/to/server.pem\u0026#34;}gRPCSslCertChainPath:${SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH:\u0026#34;/path/to/server.crt\u0026#34;}gRPCSslTrustedCAsPath:${SW_RECEIVER_GRPC_SSL_TRUSTED_CAS_PATH:\u0026#34;/path/to/ca.crt\u0026#34;}authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}You can still use this script to generate CA certificate and the key files of server-side(for OAP Server) and client-side(for Agent/Satellite). You have to notice the keys, including server and client-side, are from the same CA certificate.\n","excerpt":"gRPC SSL transportation support for OAP server For OAP communication, we are currently using gRPC, a …","ref":"/docs/main/v9.1.0/en/setup/backend/grpc-security/","title":"gRPC SSL transportation support for OAP server"},{"body":"gRPC SSL transportation support for OAP server For OAP communication, we are currently using gRPC, a multi-platform RPC framework that uses protocol buffers for message serialization. The nice part about gRPC is that it promotes the use of SSL/TLS to authenticate and encrypt exchanges. Now OAP supports enabling SSL transportation for gRPC receivers. Since 8.8.0, OAP supports enabling mutual TLS authentication between probes and OAP servers.\nTo enable this feature, follow the steps below.\nPreparation By default, the communication between OAP nodes and the communication between receiver and probe share the same gRPC server. Its configuration is in application.yml/core/default section.\nThe advanced gRPC receiver is only for communication with the probes. This configuration is in application.yml/receiver-sharing-server/default section.\nThe first step is to generate certificates and private key files for encrypting communication.\nCreating SSL/TLS Certificates The first step is to generate certificates and key files for encrypting communication. This is fairly straightforward: use openssl from the command line.\nUse this script if you are not familiar with how to generate key files.\nWe need the following files:\n ca.crt: A certificate authority public key for a client to validate the server\u0026rsquo;s certificate. server.pem, client.pem: A private RSA key to sign and authenticate the public key. It\u0026rsquo;s either a PKCS#8(PEM) or PKCS#1(DER). server.crt, client.crt: Self-signed X.509 public keys for distribution.  TLS on OAP servers By default, the communication between OAP nodes and the communication between receiver and probe share the same gRPC server. That means once you enable SSL for receivers and probes, the OAP nodes will enable it too.\nNOTE: SkyWalking does not support enabling mTLS on OAP server nodes communication. That means you have to enable receiver-sharing-server for enabling mTLS on communication between probes and OAP servers. More details see Enable mTLS mode on gRPC receiver.\nYou can enable gRPC SSL by adding the following lines to application.yml/core/default.\ngRPCSslEnabled:truegRPCSslKeyPath:/path/to/server.pemgRPCSslCertChainPath:/path/to/server.crtgRPCSslTrustedCAPath:/path/to/ca.crtgRPCSslKeyPath and gRPCSslCertChainPath are loaded by the OAP server to encrypt communication. gRPCSslTrustedCAPath helps the gRPC client to verify server certificates in cluster mode.\n There is a gRPC client and server in every OAP server node. The gRPC client communicates with OAP servers in cluster mode. They are sharing the core module configuration.\n When new files are in place, they can be loaded dynamically, and you won\u0026rsquo;t have to restart an OAP instance.\nEnable TLS on independent gRPC receiver If you enable receiver-sharing-server to ingest data from an external source, add the following lines to application.yml/receiver-sharing-server/default:\ngRPCPort:${SW_RECEIVER_GRPC_PORT:\u0026#34;changeMe\u0026#34;}gRPCSslEnabled:truegRPCSslKeyPath:/path/to/server.pemgRPCSslCertChainPath:/path/to/server.crtSince receiver-sharing-server only receives data from an external source, it doesn\u0026rsquo;t need a CA at all. But you have to configure the CA for the clients, such as Java agent, Satellite. If you port to the Java agent, refer to the Java agent repo to configure the Java agent and enable TLS.\nNOTE: change the SW_RECEIVER_GRPC_PORT as non-zero to enable receiver-sharing-server. And the port is open for the clients.\nEnable mTLS mode on gRPC receiver Since 8.8.0, SkyWalking has supported mutual TLS authentication for transporting between clients and OAP servers. Enable mTLS mode for the gRPC channel requires Sharing gRPC Server enabled, as the following configuration.\nreceiver-sharing-server:selector:${SW_RECEIVER_SHARING_SERVER:default}default:# For gRPC servergRPCHost:${SW_RECEIVER_GRPC_HOST:0.0.0.0}gRPCPort:${SW_RECEIVER_GRPC_PORT:\u0026#34;changeMe\u0026#34;}maxConcurrentCallsPerConnection:${SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL:0}maxMessageSize:${SW_RECEIVER_GRPC_MAX_MESSAGE_SIZE:0}gRPCThreadPoolQueueSize:${SW_RECEIVER_GRPC_POOL_QUEUE_SIZE:0}gRPCThreadPoolSize:${SW_RECEIVER_GRPC_THREAD_POOL_SIZE:0}gRPCSslEnabled:${SW_RECEIVER_GRPC_SSL_ENABLED:true}gRPCSslKeyPath:${SW_RECEIVER_GRPC_SSL_KEY_PATH:\u0026#34;/path/to/server.pem\u0026#34;}gRPCSslCertChainPath:${SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH:\u0026#34;/path/to/server.crt\u0026#34;}gRPCSslTrustedCAsPath:${SW_RECEIVER_GRPC_SSL_TRUSTED_CAS_PATH:\u0026#34;/path/to/ca.crt\u0026#34;}authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}You can still use this script to generate CA certificate and the key files of server-side(for OAP Server) and client-side(for Agent/Satellite). You have to notice the keys, including server and client-side, are from the same CA certificate.\n","excerpt":"gRPC SSL transportation support for OAP server For OAP communication, we are currently using gRPC, a …","ref":"/docs/main/v9.2.0/en/setup/backend/grpc-security/","title":"gRPC SSL transportation support for OAP server"},{"body":"gRPC SSL transportation support for OAP server For OAP communication, we are currently using gRPC, a multi-platform RPC framework that uses protocol buffers for message serialization. The nice part about gRPC is that it promotes the use of SSL/TLS to authenticate and encrypt exchanges. Now OAP supports enabling SSL transportation for gRPC receivers. Since 8.8.0, OAP supports enabling mutual TLS authentication between probes and OAP servers.\nTo enable this feature, follow the steps below.\nPreparation By default, the communication between OAP nodes and the communication between receiver and probe share the same gRPC server. Its configuration is in application.yml/core/default section.\nThe advanced gRPC receiver is only for communication with the probes. This configuration is in application.yml/receiver-sharing-server/default section.\nThe first step is to generate certificates and private key files for encrypting communication.\nCreating SSL/TLS Certificates The first step is to generate certificates and key files for encrypting communication. This is fairly straightforward: use openssl from the command line.\nUse this script if you are not familiar with how to generate key files.\nWe need the following files:\n ca.crt: A certificate authority public key for a client to validate the server\u0026rsquo;s certificate. server.pem, client.pem: A private RSA key to sign and authenticate the public key. It\u0026rsquo;s either a PKCS#8(PEM) or PKCS#1(DER). server.crt, client.crt: Self-signed X.509 public keys for distribution.  TLS on OAP servers By default, the communication between OAP nodes and the communication between receiver and probe share the same gRPC server. That means once you enable SSL for receivers and probes, the OAP nodes will enable it too.\nNOTE: SkyWalking does not support enabling mTLS on OAP server nodes communication. That means you have to enable receiver-sharing-server for enabling mTLS on communication between probes and OAP servers. More details see Enable mTLS mode on gRPC receiver.\nYou can enable gRPC SSL by adding the following lines to application.yml/core/default.\ngRPCSslEnabled:truegRPCSslKeyPath:/path/to/server.pemgRPCSslCertChainPath:/path/to/server.crtgRPCSslTrustedCAPath:/path/to/ca.crtgRPCSslKeyPath and gRPCSslCertChainPath are loaded by the OAP server to encrypt communication. gRPCSslTrustedCAPath helps the gRPC client to verify server certificates in cluster mode.\n There is a gRPC client and server in every OAP server node. The gRPC client communicates with OAP servers in cluster mode. They are sharing the core module configuration.\n When new files are in place, they can be loaded dynamically, and you won\u0026rsquo;t have to restart an OAP instance.\nEnable TLS on independent gRPC receiver If you enable receiver-sharing-server to ingest data from an external source, add the following lines to application.yml/receiver-sharing-server/default:\ngRPCPort:${SW_RECEIVER_GRPC_PORT:\u0026#34;changeMe\u0026#34;}gRPCSslEnabled:truegRPCSslKeyPath:/path/to/server.pemgRPCSslCertChainPath:/path/to/server.crtSince receiver-sharing-server only receives data from an external source, it doesn\u0026rsquo;t need a CA at all. But you have to configure the CA for the clients, such as Java agent, Satellite. If you port to the Java agent, refer to the Java agent repo to configure the Java agent and enable TLS.\nNOTE: change the SW_RECEIVER_GRPC_PORT as non-zero to enable receiver-sharing-server. And the port is open for the clients.\nEnable mTLS mode on gRPC receiver Since 8.8.0, SkyWalking has supported mutual TLS authentication for transporting between clients and OAP servers. Enable mTLS mode for the gRPC channel requires Sharing gRPC Server enabled, as the following configuration.\nreceiver-sharing-server:selector:${SW_RECEIVER_SHARING_SERVER:default}default:# For gRPC servergRPCHost:${SW_RECEIVER_GRPC_HOST:0.0.0.0}gRPCPort:${SW_RECEIVER_GRPC_PORT:\u0026#34;changeMe\u0026#34;}maxConcurrentCallsPerConnection:${SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL:0}maxMessageSize:${SW_RECEIVER_GRPC_MAX_MESSAGE_SIZE:0}gRPCThreadPoolQueueSize:${SW_RECEIVER_GRPC_POOL_QUEUE_SIZE:0}gRPCThreadPoolSize:${SW_RECEIVER_GRPC_THREAD_POOL_SIZE:0}gRPCSslEnabled:${SW_RECEIVER_GRPC_SSL_ENABLED:true}gRPCSslKeyPath:${SW_RECEIVER_GRPC_SSL_KEY_PATH:\u0026#34;/path/to/server.pem\u0026#34;}gRPCSslCertChainPath:${SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH:\u0026#34;/path/to/server.crt\u0026#34;}gRPCSslTrustedCAsPath:${SW_RECEIVER_GRPC_SSL_TRUSTED_CAS_PATH:\u0026#34;/path/to/ca.crt\u0026#34;}authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}You can still use this script to generate CA certificate and the key files of server-side(for OAP Server) and client-side(for Agent/Satellite). You have to notice the keys, including server and client-side, are from the same CA certificate.\n","excerpt":"gRPC SSL transportation support for OAP server For OAP communication, we are currently using gRPC, a …","ref":"/docs/main/v9.3.0/en/setup/backend/grpc-security/","title":"gRPC SSL transportation support for OAP server"},{"body":"gRPC SSL transportation support for OAP server For OAP communication, we are currently using gRPC, a multi-platform RPC framework that uses protocol buffers for message serialization. The nice part about gRPC is that it promotes the use of SSL/TLS to authenticate and encrypt exchanges. Now OAP supports enabling SSL transportation for gRPC receivers. Since 8.8.0, OAP supports enabling mutual TLS authentication between probes and OAP servers.\nTo enable this feature, follow the steps below.\nPreparation By default, the communication between OAP nodes and the communication between receiver and probe share the same gRPC server. Its configuration is in application.yml/core/default section.\nThe advanced gRPC receiver is only for communication with the probes. This configuration is in application.yml/receiver-sharing-server/default section.\nThe first step is to generate certificates and private key files for encrypting communication.\nCreating SSL/TLS Certificates The first step is to generate certificates and key files for encrypting communication. This is fairly straightforward: use openssl from the command line.\nUse this script if you are not familiar with how to generate key files.\nWe need the following files:\n ca.crt: A certificate authority public key for a client to validate the server\u0026rsquo;s certificate. server.pem, client.pem: A private RSA key to sign and authenticate the public key. It\u0026rsquo;s either a PKCS#8(PEM) or PKCS#1(DER). server.crt, client.crt: Self-signed X.509 public keys for distribution.  TLS on OAP servers By default, the communication between OAP nodes and the communication between receiver and probe share the same gRPC server. That means once you enable SSL for receivers and probes, the OAP nodes will enable it too.\nNOTE: SkyWalking does not support enabling mTLS on OAP server nodes communication. That means you have to enable receiver-sharing-server for enabling mTLS on communication between probes and OAP servers. More details see Enable mTLS mode on gRPC receiver.\nYou can enable gRPC SSL by adding the following lines to application.yml/core/default.\ngRPCSslEnabled:truegRPCSslKeyPath:/path/to/server.pemgRPCSslCertChainPath:/path/to/server.crtgRPCSslTrustedCAPath:/path/to/ca.crtgRPCSslKeyPath and gRPCSslCertChainPath are loaded by the OAP server to encrypt communication. gRPCSslTrustedCAPath helps the gRPC client to verify server certificates in cluster mode.\n There is a gRPC client and server in every OAP server node. The gRPC client communicates with OAP servers in cluster mode. They are sharing the core module configuration.\n When new files are in place, they can be loaded dynamically, and you won\u0026rsquo;t have to restart an OAP instance.\nEnable TLS on independent gRPC receiver If you enable receiver-sharing-server to ingest data from an external source, add the following lines to application.yml/receiver-sharing-server/default:\ngRPCPort:${SW_RECEIVER_GRPC_PORT:\u0026#34;changeMe\u0026#34;}gRPCSslEnabled:truegRPCSslKeyPath:/path/to/server.pemgRPCSslCertChainPath:/path/to/server.crtSince receiver-sharing-server only receives data from an external source, it doesn\u0026rsquo;t need a CA at all. But you have to configure the CA for the clients, such as Java agent, Satellite. If you port to the Java agent, refer to the Java agent repo to configure the Java agent and enable TLS.\nNOTE: change the SW_RECEIVER_GRPC_PORT as non-zero to enable receiver-sharing-server. And the port is open for the clients.\nEnable mTLS mode on gRPC receiver Since 8.8.0, SkyWalking has supported mutual TLS authentication for transporting between clients and OAP servers. Enable mTLS mode for the gRPC channel requires Sharing gRPC Server enabled, as the following configuration.\nreceiver-sharing-server:selector:${SW_RECEIVER_SHARING_SERVER:default}default:# For gRPC servergRPCHost:${SW_RECEIVER_GRPC_HOST:0.0.0.0}gRPCPort:${SW_RECEIVER_GRPC_PORT:\u0026#34;changeMe\u0026#34;}maxConcurrentCallsPerConnection:${SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL:0}maxMessageSize:${SW_RECEIVER_GRPC_MAX_MESSAGE_SIZE:0}gRPCThreadPoolQueueSize:${SW_RECEIVER_GRPC_POOL_QUEUE_SIZE:0}gRPCThreadPoolSize:${SW_RECEIVER_GRPC_THREAD_POOL_SIZE:0}gRPCSslEnabled:${SW_RECEIVER_GRPC_SSL_ENABLED:true}gRPCSslKeyPath:${SW_RECEIVER_GRPC_SSL_KEY_PATH:\u0026#34;/path/to/server.pem\u0026#34;}gRPCSslCertChainPath:${SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH:\u0026#34;/path/to/server.crt\u0026#34;}gRPCSslTrustedCAsPath:${SW_RECEIVER_GRPC_SSL_TRUSTED_CAS_PATH:\u0026#34;/path/to/ca.crt\u0026#34;}authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}You can still use this script to generate CA certificate and the key files of server-side(for OAP Server) and client-side(for Agent/Satellite). You have to notice the keys, including server and client-side, are from the same CA certificate.\n","excerpt":"gRPC SSL transportation support for OAP server For OAP communication, we are currently using gRPC, a …","ref":"/docs/main/v9.4.0/en/setup/backend/grpc-security/","title":"gRPC SSL transportation support for OAP server"},{"body":"gRPC SSL transportation support for OAP server For OAP communication, we are currently using gRPC, a multi-platform RPC framework that uses protocol buffers for message serialization. The nice part about gRPC is that it promotes the use of SSL/TLS to authenticate and encrypt exchanges. Now OAP supports enabling SSL transportation for gRPC receivers. Since 8.8.0, OAP supports enabling mutual TLS authentication between probes and OAP servers.\nTo enable this feature, follow the steps below.\nPreparation By default, the communication between OAP nodes and the communication between receiver and probe share the same gRPC server. Its configuration is in application.yml/core/default section.\nThe advanced gRPC receiver is only for communication with the probes. This configuration is in application.yml/receiver-sharing-server/default section.\nThe first step is to generate certificates and private key files for encrypting communication.\nCreating SSL/TLS Certificates The first step is to generate certificates and key files for encrypting communication. This is fairly straightforward: use openssl from the command line.\nUse this script if you are not familiar with how to generate key files.\nWe need the following files:\n ca.crt: A certificate authority public key for a client to validate the server\u0026rsquo;s certificate. server.pem, client.pem: A private RSA key to sign and authenticate the public key. It\u0026rsquo;s either a PKCS#8(PEM) or PKCS#1(DER). server.crt, client.crt: Self-signed X.509 public keys for distribution.  TLS on OAP servers By default, the communication between OAP nodes and the communication between receiver and probe share the same gRPC server. That means once you enable SSL for receivers and probes, the OAP nodes will enable it too.\nNOTE: SkyWalking does not support enabling mTLS on OAP server nodes communication. That means you have to enable receiver-sharing-server for enabling mTLS on communication between probes and OAP servers. More details see Enable mTLS mode on gRPC receiver.\nYou can enable gRPC SSL by adding the following lines to application.yml/core/default.\ngRPCSslEnabled:truegRPCSslKeyPath:/path/to/server.pemgRPCSslCertChainPath:/path/to/server.crtgRPCSslTrustedCAPath:/path/to/ca.crtgRPCSslKeyPath and gRPCSslCertChainPath are loaded by the OAP server to encrypt communication. gRPCSslTrustedCAPath helps the gRPC client to verify server certificates in cluster mode.\n There is a gRPC client and server in every OAP server node. The gRPC client communicates with OAP servers in cluster mode. They are sharing the core module configuration.\n When new files are in place, they can be loaded dynamically, and you won\u0026rsquo;t have to restart an OAP instance.\nEnable TLS on independent gRPC receiver If you enable receiver-sharing-server to ingest data from an external source, add the following lines to application.yml/receiver-sharing-server/default:\ngRPCPort:${SW_RECEIVER_GRPC_PORT:\u0026#34;changeMe\u0026#34;}gRPCSslEnabled:truegRPCSslKeyPath:/path/to/server.pemgRPCSslCertChainPath:/path/to/server.crtSince receiver-sharing-server only receives data from an external source, it doesn\u0026rsquo;t need a CA at all. But you have to configure the CA for the clients, such as Java agent, Satellite. If you port to the Java agent, refer to the Java agent repo to configure the Java agent and enable TLS.\nNOTE: change the SW_RECEIVER_GRPC_PORT as non-zero to enable receiver-sharing-server. And the port is open for the clients.\nEnable mTLS mode on gRPC receiver Since 8.8.0, SkyWalking has supported mutual TLS authentication for transporting between clients and OAP servers. Enable mTLS mode for the gRPC channel requires Sharing gRPC Server enabled, as the following configuration.\nreceiver-sharing-server:selector:${SW_RECEIVER_SHARING_SERVER:default}default:# For gRPC servergRPCHost:${SW_RECEIVER_GRPC_HOST:0.0.0.0}gRPCPort:${SW_RECEIVER_GRPC_PORT:\u0026#34;changeMe\u0026#34;}maxConcurrentCallsPerConnection:${SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL:0}maxMessageSize:${SW_RECEIVER_GRPC_MAX_MESSAGE_SIZE:0}gRPCThreadPoolQueueSize:${SW_RECEIVER_GRPC_POOL_QUEUE_SIZE:0}gRPCThreadPoolSize:${SW_RECEIVER_GRPC_THREAD_POOL_SIZE:0}gRPCSslEnabled:${SW_RECEIVER_GRPC_SSL_ENABLED:true}gRPCSslKeyPath:${SW_RECEIVER_GRPC_SSL_KEY_PATH:\u0026#34;/path/to/server.pem\u0026#34;}gRPCSslCertChainPath:${SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH:\u0026#34;/path/to/server.crt\u0026#34;}gRPCSslTrustedCAsPath:${SW_RECEIVER_GRPC_SSL_TRUSTED_CAS_PATH:\u0026#34;/path/to/ca.crt\u0026#34;}authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}You can still use this script to generate CA certificate and the key files of server-side(for OAP Server) and client-side(for Agent/Satellite). You have to notice the keys, including server and client-side, are from the same CA certificate.\n","excerpt":"gRPC SSL transportation support for OAP server For OAP communication, we are currently using gRPC, a …","ref":"/docs/main/v9.5.0/en/setup/backend/grpc-security/","title":"gRPC SSL transportation support for OAP server"},{"body":"gRPC SSL transportation support for OAP server For OAP communication, we are currently using gRPC, a multi-platform RPC framework that uses protocol buffers for message serialization. The nice part about gRPC is that it promotes the use of SSL/TLS to authenticate and encrypt exchanges. Now OAP supports enabling SSL transportation for gRPC receivers. Since 8.8.0, OAP supports enabling mutual TLS authentication between probes and OAP servers.\nTo enable this feature, follow the steps below.\nPreparation By default, the communication between OAP nodes and the communication between receiver and probe share the same gRPC server. Its configuration is in application.yml/core/default section.\nThe advanced gRPC receiver is only for communication with the probes. This configuration is in application.yml/receiver-sharing-server/default section.\nThe first step is to generate certificates and private key files for encrypting communication.\nCreating SSL/TLS Certificates The first step is to generate certificates and key files for encrypting communication. This is fairly straightforward: use openssl from the command line.\nUse this script if you are not familiar with how to generate key files.\nWe need the following files:\n ca.crt: A certificate authority public key for a client to validate the server\u0026rsquo;s certificate. server.pem, client.pem: A private RSA key to sign and authenticate the public key. It\u0026rsquo;s either a PKCS#8(PEM) or PKCS#1(DER). server.crt, client.crt: Self-signed X.509 public keys for distribution.  TLS on OAP servers By default, the communication between OAP nodes and the communication between receiver and probe share the same gRPC server. That means once you enable SSL for receivers and probes, the OAP nodes will enable it too.\nNOTE: SkyWalking does not support enabling mTLS on OAP server nodes communication. That means you have to enable receiver-sharing-server for enabling mTLS on communication between probes and OAP servers. More details see Enable mTLS mode on gRPC receiver.\nYou can enable gRPC SSL by adding the following lines to application.yml/core/default.\ngRPCSslEnabled:truegRPCSslKeyPath:/path/to/server.pemgRPCSslCertChainPath:/path/to/server.crtgRPCSslTrustedCAPath:/path/to/ca.crtgRPCSslKeyPath and gRPCSslCertChainPath are loaded by the OAP server to encrypt communication. gRPCSslTrustedCAPath helps the gRPC client to verify server certificates in cluster mode.\n There is a gRPC client and server in every OAP server node. The gRPC client communicates with OAP servers in cluster mode. They are sharing the core module configuration.\n When new files are in place, they can be loaded dynamically, and you won\u0026rsquo;t have to restart an OAP instance.\nEnable TLS on independent gRPC receiver If you enable receiver-sharing-server to ingest data from an external source, add the following lines to application.yml/receiver-sharing-server/default:\ngRPCPort:${SW_RECEIVER_GRPC_PORT:\u0026#34;changeMe\u0026#34;}gRPCSslEnabled:truegRPCSslKeyPath:/path/to/server.pemgRPCSslCertChainPath:/path/to/server.crtSince receiver-sharing-server only receives data from an external source, it doesn\u0026rsquo;t need a CA at all. But you have to configure the CA for the clients, such as Java agent, Satellite. If you port to the Java agent, refer to the Java agent repo to configure the Java agent and enable TLS.\nNOTE: change the SW_RECEIVER_GRPC_PORT as non-zero to enable receiver-sharing-server. And the port is open for the clients.\nEnable mTLS mode on gRPC receiver Since 8.8.0, SkyWalking has supported mutual TLS authentication for transporting between clients and OAP servers. Enable mTLS mode for the gRPC channel requires Sharing gRPC Server enabled, as the following configuration.\nreceiver-sharing-server:selector:${SW_RECEIVER_SHARING_SERVER:default}default:# For gRPC servergRPCHost:${SW_RECEIVER_GRPC_HOST:0.0.0.0}gRPCPort:${SW_RECEIVER_GRPC_PORT:\u0026#34;changeMe\u0026#34;}maxConcurrentCallsPerConnection:${SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL:0}maxMessageSize:${SW_RECEIVER_GRPC_MAX_MESSAGE_SIZE:0}gRPCThreadPoolQueueSize:${SW_RECEIVER_GRPC_POOL_QUEUE_SIZE:0}gRPCThreadPoolSize:${SW_RECEIVER_GRPC_THREAD_POOL_SIZE:0}gRPCSslEnabled:${SW_RECEIVER_GRPC_SSL_ENABLED:true}gRPCSslKeyPath:${SW_RECEIVER_GRPC_SSL_KEY_PATH:\u0026#34;/path/to/server.pem\u0026#34;}gRPCSslCertChainPath:${SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH:\u0026#34;/path/to/server.crt\u0026#34;}gRPCSslTrustedCAsPath:${SW_RECEIVER_GRPC_SSL_TRUSTED_CAS_PATH:\u0026#34;/path/to/ca.crt\u0026#34;}authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}You can still use this script to generate CA certificate and the key files of server-side(for OAP Server) and client-side(for Agent/Satellite). You have to notice the keys, including server and client-side, are from the same CA certificate.\n","excerpt":"gRPC SSL transportation support for OAP server For OAP communication, we are currently using gRPC, a …","ref":"/docs/main/v9.6.0/en/setup/backend/grpc-security/","title":"gRPC SSL transportation support for OAP server"},{"body":"gRPC SSL transportation support for OAP server For OAP communication, we are currently using gRPC, a multi-platform RPC framework that uses protocol buffers for message serialization. The nice part about gRPC is that it promotes the use of SSL/TLS to authenticate and encrypt exchanges. Now OAP supports enabling SSL transportation for gRPC receivers. Since 8.8.0, OAP supports enabling mutual TLS authentication between probes and OAP servers.\nTo enable this feature, follow the steps below.\nPreparation By default, the communication between OAP nodes and the communication between receiver and probe share the same gRPC server. Its configuration is in application.yml/core/default section.\nThe advanced gRPC receiver is only for communication with the probes. This configuration is in application.yml/receiver-sharing-server/default section.\nThe first step is to generate certificates and private key files for encrypting communication.\nCreating SSL/TLS Certificates The first step is to generate certificates and key files for encrypting communication. This is fairly straightforward: use openssl from the command line.\nUse this script if you are not familiar with how to generate key files.\nWe need the following files:\n ca.crt: A certificate authority public key for a client to validate the server\u0026rsquo;s certificate. server.pem, client.pem: A private RSA key to sign and authenticate the public key. It\u0026rsquo;s either a PKCS#8(PEM) or PKCS#1(DER). server.crt, client.crt: Self-signed X.509 public keys for distribution.  TLS on OAP servers By default, the communication between OAP nodes and the communication between receiver and probe share the same gRPC server. That means once you enable SSL for receivers and probes, the OAP nodes will enable it too.\nNOTE: SkyWalking does not support enabling mTLS on OAP server nodes communication. That means you have to enable receiver-sharing-server for enabling mTLS on communication between probes and OAP servers. More details see Enable mTLS mode on gRPC receiver.\nYou can enable gRPC SSL by adding the following lines to application.yml/core/default.\ngRPCSslEnabled:truegRPCSslKeyPath:/path/to/server.pemgRPCSslCertChainPath:/path/to/server.crtgRPCSslTrustedCAPath:/path/to/ca.crtgRPCSslKeyPath and gRPCSslCertChainPath are loaded by the OAP server to encrypt communication. gRPCSslTrustedCAPath helps the gRPC client to verify server certificates in cluster mode.\n There is a gRPC client and server in every OAP server node. The gRPC client communicates with OAP servers in cluster mode. They are sharing the core module configuration.\n When new files are in place, they can be loaded dynamically, and you won\u0026rsquo;t have to restart an OAP instance.\nEnable TLS on independent gRPC receiver If you enable receiver-sharing-server to ingest data from an external source, add the following lines to application.yml/receiver-sharing-server/default:\ngRPCPort:${SW_RECEIVER_GRPC_PORT:\u0026#34;changeMe\u0026#34;}gRPCSslEnabled:truegRPCSslKeyPath:/path/to/server.pemgRPCSslCertChainPath:/path/to/server.crtSince receiver-sharing-server only receives data from an external source, it doesn\u0026rsquo;t need a CA at all. But you have to configure the CA for the clients, such as Java agent, Satellite. If you port to the Java agent, refer to the Java agent repo to configure the Java agent and enable TLS.\nNOTE: change the SW_RECEIVER_GRPC_PORT as non-zero to enable receiver-sharing-server. And the port is open for the clients.\nEnable mTLS mode on gRPC receiver Since 8.8.0, SkyWalking has supported mutual TLS authentication for transporting between clients and OAP servers. Enable mTLS mode for the gRPC channel requires Sharing gRPC Server enabled, as the following configuration.\nreceiver-sharing-server:selector:${SW_RECEIVER_SHARING_SERVER:default}default:# For gRPC servergRPCHost:${SW_RECEIVER_GRPC_HOST:0.0.0.0}gRPCPort:${SW_RECEIVER_GRPC_PORT:\u0026#34;changeMe\u0026#34;}maxConcurrentCallsPerConnection:${SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL:0}maxMessageSize:${SW_RECEIVER_GRPC_MAX_MESSAGE_SIZE:0}gRPCThreadPoolQueueSize:${SW_RECEIVER_GRPC_POOL_QUEUE_SIZE:0}gRPCThreadPoolSize:${SW_RECEIVER_GRPC_THREAD_POOL_SIZE:0}gRPCSslEnabled:${SW_RECEIVER_GRPC_SSL_ENABLED:true}gRPCSslKeyPath:${SW_RECEIVER_GRPC_SSL_KEY_PATH:\u0026#34;/path/to/server.pem\u0026#34;}gRPCSslCertChainPath:${SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH:\u0026#34;/path/to/server.crt\u0026#34;}gRPCSslTrustedCAsPath:${SW_RECEIVER_GRPC_SSL_TRUSTED_CAS_PATH:\u0026#34;/path/to/ca.crt\u0026#34;}authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}You can still use this script to generate CA certificate and the key files of server-side(for OAP Server) and client-side(for Agent/Satellite). You have to notice the keys, including server and client-side, are from the same CA certificate.\n","excerpt":"gRPC SSL transportation support for OAP server For OAP communication, we are currently using gRPC, a …","ref":"/docs/main/v9.7.0/en/setup/backend/grpc-security/","title":"gRPC SSL transportation support for OAP server"},{"body":"Guide  This section explains how to manage translations for internationalization of menu items.\n SkyWalking UI\u0026rsquo;s internationalization translations are in the src/locales/lang. The translations include menu name and description. The translation key of menu name is the value of i18nKey from menu definition file. The translation key of description consists of the i18nKey value and _desc suffix. The description contents will be displayed on the Marketplace page.\nThe following is a typical menu name and description for i18nKey=general_service\n{ \u0026#34;general_service\u0026#34;: \u0026#34;General Service\u0026#34;, \u0026#34;general_service_desc\u0026#34;: \u0026#34;Observe services and relative direct dependencies through telemetry data collected from SkyWalking Agents.\u0026#34; } ","excerpt":"Guide  This section explains how to manage translations for internationalization of menu items. …","ref":"/docs/main/latest/en/guides/i18n/","title":"Guide"},{"body":"Guide  This section explains how to manage translations for internationalization of menu items.\n SkyWalking UI\u0026rsquo;s internationalization translations are in the src/locales/lang. The translations include menu name and description. The translation key of menu name is the value of i18nKey from menu definition file. The translation key of description consists of the i18nKey value and _desc suffix. The description contents will be displayed on the Marketplace page.\nThe following is a typical menu name and description for i18nKey=general_service\n{ \u0026#34;general_service\u0026#34;: \u0026#34;General Service\u0026#34;, \u0026#34;general_service_desc\u0026#34;: \u0026#34;Observe services and relative direct dependencies through telemetry data collected from SkyWalking Agents.\u0026#34; } ","excerpt":"Guide  This section explains how to manage translations for internationalization of menu items. …","ref":"/docs/main/next/en/guides/i18n/","title":"Guide"},{"body":"Guide  This section explains how to manage translations for internationalization of menu items.\n SkyWalking UI\u0026rsquo;s internationalization translations are in the src/locales/lang. The translations include menu name and description. The translation key of menu name is the value of i18nKey from menu definition file. The translation key of description consists of the i18nKey value and _desc suffix. The description contents will be displayed on the Marketplace page.\nThe following is a typical menu name and description for i18nKey=general_service\n{ \u0026#34;general_service\u0026#34;: \u0026#34;General Service\u0026#34;, \u0026#34;general_service_desc\u0026#34;: \u0026#34;Observe services and relative direct dependencies through telemetry data collected from SkyWalking Agents.\u0026#34; } ","excerpt":"Guide  This section explains how to manage translations for internationalization of menu items. …","ref":"/docs/main/v9.6.0/en/guides/i18n/","title":"Guide"},{"body":"Guide  This section explains how to manage translations for internationalization of menu items.\n SkyWalking UI\u0026rsquo;s internationalization translations are in the src/locales/lang. The translations include menu name and description. The translation key of menu name is the value of i18nKey from menu definition file. The translation key of description consists of the i18nKey value and _desc suffix. The description contents will be displayed on the Marketplace page.\nThe following is a typical menu name and description for i18nKey=general_service\n{ \u0026#34;general_service\u0026#34;: \u0026#34;General Service\u0026#34;, \u0026#34;general_service_desc\u0026#34;: \u0026#34;Observe services and relative direct dependencies through telemetry data collected from SkyWalking Agents.\u0026#34; } ","excerpt":"Guide  This section explains how to manage translations for internationalization of menu items. …","ref":"/docs/main/v9.7.0/en/guides/i18n/","title":"Guide"},{"body":"Guides There are many ways you can connect and contribute to the SkyWalking community.\n Submit an issue for an addressed issue or feature implementation plan. Submit a discussion to ask questions, feature proposal and uncertain bug discussion. Mail list: dev@skywalking.apache.org. Mail to dev-subscribe@skywalking.apache.org. Follow the instructions in the reply to subscribe to the mail list. Send Request to join SkyWalking slack mail to the mail list(dev@skywalking.apache.org), we will invite you in. For Chinese speaker, send [CN] Request to join SkyWalking slack mail to the mail list(dev@skywalking.apache.org), we will invite you in.  ","excerpt":"Guides There are many ways you can connect and contribute to the SkyWalking community.\n Submit an …","ref":"/docs/main/latest/en/guides/community/","title":"Guides"},{"body":"Guides There are many ways you can connect and contribute to the SkyWalking community.\n Submit an issue for an addressed issue or feature implementation plan. Submit a discussion to ask questions, feature proposal and uncertain bug discussion. Mail list: dev@skywalking.apache.org. Mail to dev-subscribe@skywalking.apache.org. Follow the instructions in the reply to subscribe to the mail list. Send Request to join SkyWalking slack mail to the mail list(dev@skywalking.apache.org), we will invite you in. For Chinese speaker, send [CN] Request to join SkyWalking slack mail to the mail list(dev@skywalking.apache.org), we will invite you in.  ","excerpt":"Guides There are many ways you can connect and contribute to the SkyWalking community.\n Submit an …","ref":"/docs/main/next/en/guides/community/","title":"Guides"},{"body":"Guides There are many ways you can contribute to the SkyWalking community.\n Go through our documents, and point out or fix a problem. Translate the documents into other languages. Download our releases, try to monitor your applications, and provide feedback to us. Read our source codes. For details, reach out to us. If you find any bugs, submit an issue. You can also try to fix it. Find good first issue issues. This is a good place for you to start. Submit an issue or start a discussion at GitHub issue. See all mail list discussions at website list review. If you are already a SkyWalking committer, you can log in and use the mail list in the browser mode. Otherwise, subscribe following the step below. Issue reports and discussions may also take place via dev@skywalking.apache.org. Mail to dev-subscribe@skywalking.apache.org, and follow the instructions in the reply to subscribe to the mail list.  Contact Us All the following channels are open to the community.\n Submit an issue for an issue or feature proposal. Mail list: dev@skywalking.apache.org. Mail to dev-subscribe@skywalking.apache.org. Follow the instructions in the reply to subscribe to the mail list. Submit a discussion to ask questions.  Become an official Apache SkyWalking Committer The PMC assesses the contributions of every contributor, including their code contributions. It also promotes, votes on, and invites new committers and PMC members according to the Apache guides. See Become official Apache SkyWalking Committer for more details.\nFor code developer For developers, the starting point is the Compiling Guide. It guides developers on how to build the project in local and set up the environment.\nIntegration Tests After setting up the environment and writing your codes, to facilitate integration with the SkyWalking project, you\u0026rsquo;ll need to run tests locally to verify that your codes would not break any existing features, as well as write some unit test (UT) codes to verify that the new codes would work well. This will prevent them from being broken by future contributors. If the new codes involve other components or libraries, you should also write integration tests (IT).\nSkyWalking leverages the plugin maven-surefire-plugin to run the UTs and uses maven-failsafe-plugin to run the ITs. maven-surefire-plugin excludes ITs (whose class name starts with IT) and leaves them for maven-failsafe-plugin to run, which is bound to the verify goal. Therefore, to run the UTs, try ./mvnw clean test, which only runs the UTs but not the ITs.\nIf you would like to run the ITs, please set the property skipITs to false as well as the profiles of the modules whose ITs you want to run. E.g. if you would like to run the ITs in oap-server, try ./mvnw -Pbackend clean verify -DskipITs=false, and if you would like to run all the ITs, simply run ./mvnw clean verify -DskipITs=false.\nPlease be advised that if you\u0026rsquo;re writing integration tests, name it with the pattern IT* so they would only run when property skipITs is set to false.\nJava Microbenchmark Harness (JMH) JMH is a Java harness for building, running, and analysing nano/micro/milli/macro benchmarks written in Java and other languages targeting the JVM.\nWe have a module called microbench which performs a series of micro-benchmark tests for JMH testing. Make new JMH tests extend the org.apache.skywalking.oap.server.microbench.base.AbstractMicrobenchmark to customize runtime conditions (Measurement, Fork, Warmup, etc.).\nJMH tests could run as a normal unit test. And they could run as an independent uber jar via java -jar benchmark.jar for all benchmarks, or via java -jar /benchmarks.jar exampleClassName for a specific test.\nOutput test results in JSON format, you can add -rf json like java -jar benchmarks.jar -rf json, if you run through the IDE, you can configure the -DperfReportDir=savePath parameter to set the JMH report result save path, a report results in JSON format will be generated when the run ends.\nMore information about JMH can be found here: jmh docs.\nEnd to End Tests (E2E) Since version 6.3.0, we have introduced more automatic tests to perform software quality assurance. E2E is an integral part of it.\n End-to-end testing is a methodology used to test whether the flow of an application is performing as designed from start to finish. The purpose of carrying out end-to-end tests is to identify system dependencies and to ensure that the right information is passed between various system components and systems.\n The E2E test involves some/all of the OAP server, storage, coordinator, webapp, and the instrumented services, all of which are orchestrated by docker-compose or KinD. Since version 8.9.0, we immigrate to e2e-v2 which leverage skywalking-infra-e2e and skywalking-cli to do the whole e2e process. skywalking-infra-e2e is used to control the e2e process and skywalking-cli is used to interact with the OAP such as request and get response metris from OAP.\nWriting E2E Cases  Set up the environment   Set up skywalking-infra-e2e Set up skywalking-cli, yq (generally these 2 are enough) and others tools if your cases need. Can reference the script under skywalking/test/e2e-v2/script/prepare/setup-e2e-shell.   Orchestrate the components  The goal of the E2E tests is to test the SkyWalking project as a whole, including the OAP server, storage, coordinator, webapp, and even the frontend UI (not for now), on the single node mode as well as the cluster mode. Therefore, the first step is to determine what case we are going to verify, and orchestrate the components.\nTo make the orchestration process easier, we\u0026rsquo;re using a docker-compose that provides a simple file format (docker-compose.yml) for orchestrating the required containers, and offers an opportunity to define the dependencies of the components.\nFollow these steps:\n Decide what (and how many) containers will be needed. For example, for cluster testing, you\u0026rsquo;ll need \u0026gt; 2 OAP nodes, coordinators (e.g. zookeeper), storage (e.g. ElasticSearch), and instrumented services; Define the containers in docker-compose.yml, and carefully specify the dependencies, starting orders, and most importantly, link them together, e.g. set the correct OAP address on the agent end, and set the correct coordinator address in OAP, etc. Define the e2e case config in e2e.yaml. Write the expected data(yml) for verify.   Run e2e test  All e2e cases should under skywalking/test/e2e-v2/cases. You could execute e2e run command in skywalking/ e.g.\ne2e run -c test/e2e-v2/cases/alarm/h2/e2e.yaml  Troubleshooting  We expose all logs from all containers to the stdout in the non-CI (local) mode, but save and upload them to the GitHub server. You can download them (only when the tests have failed) at \u0026ldquo;Artifacts/Download artifacts/logs\u0026rdquo; (see top right) for debugging.\nNOTE: Please verify the newly-added E2E test case locally first. However, if you find that it has passed locally but failed in the PR check status, make sure that all the updated/newly-added files (especially those in the submodules) are committed and included in the PR, or reset the git HEAD to the remote and verify locally again.\nProject Extensions The SkyWalking project supports various extensions of existing features. If you are interesting in writing extensions, read the following guides.\nThis guides you in developing SkyWalking agent plugins to support more frameworks. Developers for both open source and private plugins should read this.\n If you would like to build a new probe or plugin in any language, please read the Component library definition and extension document. Storage extension development guide. Potential contributors can learn how to build a new storage implementor in addition to the official one. Customize analysis using OAL scripts. OAL scripts are located in config/oal/*.oal. You could modify them and reboot the OAP server. Read Observability Analysis Language Introduction to learn more about OAL scripts. Source and scope extension for new metrics. For analysis of a new metric which SkyWalking hasn\u0026rsquo;t yet provided, add a new receiver. You would most likely have to add a new source and scope. To learn how to do this, read the document.  OAP backend dependency management  This section is only applicable to dependencies of the backend module.\n As one of the Top Level Projects of The Apache Software Foundation (ASF), SkyWalking must follow the ASF 3RD PARTY LICENSE POLICY. So if you\u0026rsquo;re adding new dependencies to the project, you should make sure that the new dependencies would not break the policy, and add their LICENSE and NOTICE to the project.\nWe have a simple script to help you make sure that you haven\u0026rsquo;t missed out any new dependencies:\n Build a distribution package and unzip/untar it to folder dist. Run the script in the root directory. It will print out all new dependencies. Check the LICENSE and NOTICE of those dependencies to make sure that they can be included in an ASF project. Add them to the apm-dist/release-docs/{LICENSE,NOTICE} file. Add the names of these dependencies to the tools/dependencies/known-oap-backend-dependencies.txt file (in alphabetical order). check-LICENSE.sh should pass in the next run.  Profile The performance profile is an enhancement feature in the APM system. We use thread dump to estimate the method execution time, rather than adding multiple local spans. In this way, the cost would be significantly reduced compared to using distributed tracing to locate the slow method. This feature is suitable in the production environment. The following documents are key to understanding the essential parts of this feature.\n Profile data report protocol is provided through gRPC, just like other traces and JVM data. Thread dump merging mechanism introduces the merging mechanism. This mechanism helps end users understand profile reports. Exporter tool of profile raw data guides you on how to package the original profile data for issue reports when the visualization doesn\u0026rsquo;t work well on the official UI.  Release If you\u0026rsquo;re a committer, read the Apache Release Guide to learn about how to create an official Apache version release in accordance with avoid Apache\u0026rsquo;s rules. As long as you keep our LICENSE and NOTICE, the Apache license allows everyone to redistribute.\n","excerpt":"Guides There are many ways you can contribute to the SkyWalking community.\n Go through our …","ref":"/docs/main/v9.0.0/en/guides/readme/","title":"Guides"},{"body":"Guides There are many ways you can contribute to the SkyWalking community.\n Go through our documents, and point out or fix a problem. Translate the documents into other languages. Download our releases, try to monitor your applications, and provide feedback to us. Read our source codes. For details, reach out to us. If you find any bugs, submit an issue. You can also try to fix it. Find good first issue issues. This is a good place for you to start. Submit an issue or start a discussion at GitHub issue. See all mail list discussions at website list review. If you are already a SkyWalking committer, you can log in and use the mail list in the browser mode. Otherwise, subscribe following the step below. Issue reports and discussions may also take place via dev@skywalking.apache.org. Mail to dev-subscribe@skywalking.apache.org, and follow the instructions in the reply to subscribe to the mail list.  Contact Us All the following channels are open to the community.\n Submit an issue for an issue or feature proposal. Mail list: dev@skywalking.apache.org. Mail to dev-subscribe@skywalking.apache.org. Follow the instructions in the reply to subscribe to the mail list. Submit a discussion to ask questions.  Become an official Apache SkyWalking Committer The PMC assesses the contributions of every contributor, including their code contributions. It also promotes, votes on, and invites new committers and PMC members according to the Apache guides. See Become official Apache SkyWalking Committer for more details.\nFor code developer For developers, the starting point is the Compiling Guide. It guides developers on how to build the project in local and set up the environment.\nIntegration Tests After setting up the environment and writing your codes, to facilitate integration with the SkyWalking project, you\u0026rsquo;ll need to run tests locally to verify that your codes would not break any existing features, as well as write some unit test (UT) codes to verify that the new codes would work well. This will prevent them from being broken by future contributors. If the new codes involve other components or libraries, you should also write integration tests (IT).\nSkyWalking leverages the plugin maven-surefire-plugin to run the UTs and uses maven-failsafe-plugin to run the ITs. maven-surefire-plugin excludes ITs (whose class name starts with IT) and leaves them for maven-failsafe-plugin to run, which is bound to the verify goal. Therefore, to run the UTs, try ./mvnw clean test, which only runs the UTs but not the ITs.\nIf you would like to run the ITs, please set the property skipITs to false as well as the profiles of the modules whose ITs you want to run. E.g. if you would like to run the ITs in oap-server, try ./mvnw -Pbackend clean verify -DskipITs=false, and if you would like to run all the ITs, simply run ./mvnw clean verify -DskipITs=false.\nPlease be advised that if you\u0026rsquo;re writing integration tests, name it with the pattern IT* so they would only run when property skipITs is set to false.\nJava Microbenchmark Harness (JMH) JMH is a Java harness for building, running, and analysing nano/micro/milli/macro benchmarks written in Java and other languages targeting the JVM.\nWe have a module called microbench which performs a series of micro-benchmark tests for JMH testing. Make new JMH tests extend the org.apache.skywalking.oap.server.microbench.base.AbstractMicrobenchmark to customize runtime conditions (Measurement, Fork, Warmup, etc.).\nJMH tests could run as a normal unit test. And they could run as an independent uber jar via java -jar benchmark.jar for all benchmarks, or via java -jar /benchmarks.jar exampleClassName for a specific test.\nOutput test results in JSON format, you can add -rf json like java -jar benchmarks.jar -rf json, if you run through the IDE, you can configure the -DperfReportDir=savePath parameter to set the JMH report result save path, a report results in JSON format will be generated when the run ends.\nMore information about JMH can be found here: jmh docs.\nEnd to End Tests (E2E) Since version 6.3.0, we have introduced more automatic tests to perform software quality assurance. E2E is an integral part of it.\n End-to-end testing is a methodology used to test whether the flow of an application is performing as designed from start to finish. The purpose of carrying out end-to-end tests is to identify system dependencies and to ensure that the right information is passed between various system components and systems.\n The E2E test involves some/all of the OAP server, storage, coordinator, webapp, and the instrumented services, all of which are orchestrated by docker-compose or KinD. Since version 8.9.0, we immigrate to e2e-v2 which leverage skywalking-infra-e2e and skywalking-cli to do the whole e2e process. skywalking-infra-e2e is used to control the e2e process and skywalking-cli is used to interact with the OAP such as request and get response metris from OAP.\nWriting E2E Cases  Set up the environment   Set up skywalking-infra-e2e Set up skywalking-cli, yq (generally these 2 are enough) and others tools if your cases need. Can reference the script under skywalking/test/e2e-v2/script/prepare/setup-e2e-shell.   Orchestrate the components  The goal of the E2E tests is to test the SkyWalking project as a whole, including the OAP server, storage, coordinator, webapp, and even the frontend UI (not for now), on the single node mode as well as the cluster mode. Therefore, the first step is to determine what case we are going to verify, and orchestrate the components.\nTo make the orchestration process easier, we\u0026rsquo;re using a docker-compose that provides a simple file format (docker-compose.yml) for orchestrating the required containers, and offers an opportunity to define the dependencies of the components.\nFollow these steps:\n Decide what (and how many) containers will be needed. For example, for cluster testing, you\u0026rsquo;ll need \u0026gt; 2 OAP nodes, coordinators (e.g. zookeeper), storage (e.g. ElasticSearch), and instrumented services; Define the containers in docker-compose.yml, and carefully specify the dependencies, starting orders, and most importantly, link them together, e.g. set the correct OAP address on the agent end, and set the correct coordinator address in OAP, etc. Define the e2e case config in e2e.yaml. Write the expected data(yml) for verify.   Run e2e test  All e2e cases should under skywalking/test/e2e-v2/cases. You could execute e2e run command in skywalking/ e.g.\ne2e run -c test/e2e-v2/cases/alarm/h2/e2e.yaml  Troubleshooting  We expose all logs from all containers to the stdout in the non-CI (local) mode, but save and upload them to the GitHub server. You can download them (only when the tests have failed) at \u0026ldquo;Artifacts/Download artifacts/logs\u0026rdquo; (see top right) for debugging.\nNOTE: Please verify the newly-added E2E test case locally first. However, if you find that it has passed locally but failed in the PR check status, make sure that all the updated/newly-added files (especially those in the submodules) are committed and included in the PR, or reset the git HEAD to the remote and verify locally again.\nProject Extensions The SkyWalking project supports various extensions of existing features. If you are interesting in writing extensions, read the following guides.\nThis guides you in developing SkyWalking agent plugins to support more frameworks. Developers for both open source and private plugins should read this.\n If you would like to build a new probe or plugin in any language, please read the Component library definition and extension document. Storage extension development guide. Potential contributors can learn how to build a new storage implementor in addition to the official one. Customize analysis using OAL scripts. OAL scripts are located in config/oal/*.oal. You could modify them and reboot the OAP server. Read Observability Analysis Language Introduction to learn more about OAL scripts. Source and scope extension for new metrics. For analysis of a new metric which SkyWalking hasn\u0026rsquo;t yet provided, add a new receiver. You would most likely have to add a new source and scope. To learn how to do this, read the document.  OAP backend dependency management  This section is only applicable to dependencies of the backend module.\n As one of the Top Level Projects of The Apache Software Foundation (ASF), SkyWalking must follow the ASF 3RD PARTY LICENSE POLICY. So if you\u0026rsquo;re adding new dependencies to the project, you should make sure that the new dependencies would not break the policy, and add their LICENSE and NOTICE to the project.\nWe have a simple script to help you make sure that you haven\u0026rsquo;t missed out any new dependencies:\n Build a distribution package and unzip/untar it to folder dist. Run the script in the root directory. It will print out all new dependencies. Check the LICENSE and NOTICE of those dependencies to make sure that they can be included in an ASF project. Add them to the apm-dist/release-docs/{LICENSE,NOTICE} file. Add the names of these dependencies to the tools/dependencies/known-oap-backend-dependencies.txt file (in alphabetical order). check-LICENSE.sh should pass in the next run.  Profile The performance profile is an enhancement feature in the APM system. We use thread dump to estimate the method execution time, rather than adding multiple local spans. In this way, the cost would be significantly reduced compared to using distributed tracing to locate the slow method. This feature is suitable in the production environment. The following documents are key to understanding the essential parts of this feature.\n Profile data report protocol is provided through gRPC, just like other traces and JVM data. Thread dump merging mechanism introduces the merging mechanism. This mechanism helps end users understand profile reports. Exporter tool of profile raw data guides you on how to package the original profile data for issue reports when the visualization doesn\u0026rsquo;t work well on the official UI.  Release If you\u0026rsquo;re a committer, read the Apache Release Guide to learn about how to create an official Apache version release in accordance with avoid Apache\u0026rsquo;s rules. As long as you keep our LICENSE and NOTICE, the Apache license allows everyone to redistribute.\n","excerpt":"Guides There are many ways you can contribute to the SkyWalking community.\n Go through our …","ref":"/docs/main/v9.1.0/en/guides/readme/","title":"Guides"},{"body":"Guides There are many ways you can contribute to the SkyWalking community.\n Go through our documents, and point out or fix a problem. Translate the documents into other languages. Download our releases, try to monitor your applications, and provide feedback to us. Read our source codes. For details, reach out to us. If you find any bugs, submit an issue. You can also try to fix it. Find good first issue issues. This is a good place for you to start. Submit an issue or start a discussion at GitHub issue. See all mail list discussions at website list review. If you are already a SkyWalking committer, you can log in and use the mail list in the browser mode. Otherwise, subscribe following the step below. Issue reports and discussions may also take place via dev@skywalking.apache.org. Mail to dev-subscribe@skywalking.apache.org, and follow the instructions in the reply to subscribe to the mail list.  Contact Us All the following channels are open to the community.\n Submit an issue for an issue or feature proposal. Mail list: dev@skywalking.apache.org. Mail to dev-subscribe@skywalking.apache.org. Follow the instructions in the reply to subscribe to the mail list. Submit a discussion to ask questions.  Become an official Apache SkyWalking Committer The PMC assesses the contributions of every contributor, including their code contributions. It also promotes, votes on, and invites new committers and PMC members according to the Apache guides. See Become official Apache SkyWalking Committer for more details.\nFor code developer For developers, the starting point is the Compiling Guide. It guides developers on how to build the project in local and set up the environment.\nIntegration Tests After setting up the environment and writing your codes, to facilitate integration with the SkyWalking project, you\u0026rsquo;ll need to run tests locally to verify that your codes would not break any existing features, as well as write some unit test (UT) codes to verify that the new codes would work well. This will prevent them from being broken by future contributors. If the new codes involve other components or libraries, you should also write integration tests (IT).\nSkyWalking leverages the plugin maven-surefire-plugin to run the UTs and uses maven-failsafe-plugin to run the ITs. maven-surefire-plugin excludes ITs (whose class name starts with IT) and leaves them for maven-failsafe-plugin to run, which is bound to the verify goal. Therefore, to run the UTs, try ./mvnw clean test, which only runs the UTs but not the ITs.\nIf you would like to run the ITs, please set the property skipITs to false as well as the profiles of the modules whose ITs you want to run. E.g. if you would like to run the ITs in oap-server, try ./mvnw -Pbackend clean verify -DskipITs=false, and if you would like to run all the ITs, simply run ./mvnw clean verify -DskipITs=false.\nPlease be advised that if you\u0026rsquo;re writing integration tests, name it with the pattern IT* so they would only run when property skipITs is set to false.\nJava Microbenchmark Harness (JMH) JMH is a Java harness for building, running, and analysing nano/micro/milli/macro benchmarks written in Java and other languages targeting the JVM.\nWe have a module called microbench which performs a series of micro-benchmark tests for JMH testing. Make new JMH tests extend the org.apache.skywalking.oap.server.microbench.base.AbstractMicrobenchmark to customize runtime conditions (Measurement, Fork, Warmup, etc.).\nJMH tests could run as a normal unit test. And they could run as an independent uber jar via java -jar benchmark.jar for all benchmarks, or via java -jar /benchmarks.jar exampleClassName for a specific test.\nOutput test results in JSON format, you can add -rf json like java -jar benchmarks.jar -rf json, if you run through the IDE, you can configure the -DperfReportDir=savePath parameter to set the JMH report result save path, a report results in JSON format will be generated when the run ends.\nMore information about JMH can be found here: jmh docs.\nEnd to End Tests (E2E) Since version 6.3.0, we have introduced more automatic tests to perform software quality assurance. E2E is an integral part of it.\n End-to-end testing is a methodology used to test whether the flow of an application is performing as designed from start to finish. The purpose of carrying out end-to-end tests is to identify system dependencies and to ensure that the right information is passed between various system components and systems.\n The E2E test involves some/all of the OAP server, storage, coordinator, webapp, and the instrumented services, all of which are orchestrated by docker-compose or KinD. Since version 8.9.0, we immigrate to e2e-v2 which leverage skywalking-infra-e2e and skywalking-cli to do the whole e2e process. skywalking-infra-e2e is used to control the e2e process and skywalking-cli is used to interact with the OAP such as request and get response metrics from OAP.\nWriting E2E Cases  Set up the environment   Set up skywalking-infra-e2e Set up skywalking-cli, yq (generally these 2 are enough) and others tools if your cases need. Can reference the script under skywalking/test/e2e-v2/script/prepare/setup-e2e-shell.   Orchestrate the components  The goal of the E2E tests is to test the SkyWalking project as a whole, including the OAP server, storage, coordinator, webapp, and even the frontend UI (not for now), on the single node mode as well as the cluster mode. Therefore, the first step is to determine what case we are going to verify, and orchestrate the components.\nTo make the orchestration process easier, we\u0026rsquo;re using a docker-compose that provides a simple file format (docker-compose.yml) for orchestrating the required containers, and offers an opportunity to define the dependencies of the components.\nFollow these steps:\n Decide what (and how many) containers will be needed. For example, for cluster testing, you\u0026rsquo;ll need \u0026gt; 2 OAP nodes, coordinators (e.g. zookeeper), storage (e.g. ElasticSearch), and instrumented services; Define the containers in docker-compose.yml, and carefully specify the dependencies, starting orders, and most importantly, link them together, e.g. set the correct OAP address on the agent end, and set the correct coordinator address in OAP, etc. Define the e2e case config in e2e.yaml. Write the expected data(yml) for verify.   Run e2e test  All e2e cases should under skywalking/test/e2e-v2/cases. You could execute e2e run command in skywalking/ e.g.\ne2e run -c test/e2e-v2/cases/alarm/h2/e2e.yaml  Troubleshooting  We expose all logs from all containers to the stdout in the non-CI (local) mode, but save and upload them to the GitHub server. You can download them (only when the tests have failed) at \u0026ldquo;Artifacts/Download artifacts/logs\u0026rdquo; (see top right) for debugging.\nNOTE: Please verify the newly-added E2E test case locally first. However, if you find that it has passed locally but failed in the PR check status, make sure that all the updated/newly-added files (especially those in the submodules) are committed and included in the PR, or reset the git HEAD to the remote and verify locally again.\nProject Extensions The SkyWalking project supports various extensions of existing features. If you are interesting in writing extensions, read the following guides.\nThis guides you in developing SkyWalking agent plugins to support more frameworks. Developers for both open source and private plugins should read this.\n If you would like to build a new probe or plugin in any language, please read the Component library definition and extension document. Storage extension development guide. Potential contributors can learn how to build a new storage implementor in addition to the official one. Customize analysis using OAL scripts. OAL scripts are located in config/oal/*.oal. You could modify them and reboot the OAP server. Read Observability Analysis Language Introduction to learn more about OAL scripts. Source and scope extension for new metrics. For analysis of a new metric which SkyWalking hasn\u0026rsquo;t yet provided, add a new receiver. You would most likely have to add a new source and scope. To learn how to do this, read the document.  OAP backend dependency management  This section is only applicable to dependencies of the backend module.\n As one of the Top Level Projects of The Apache Software Foundation (ASF), SkyWalking must follow the ASF 3RD PARTY LICENSE POLICY. So if you\u0026rsquo;re adding new dependencies to the project, you should make sure that the new dependencies would not break the policy, and add their LICENSE and NOTICE to the project.\nWe use license-eye to help you make sure that you haven\u0026rsquo;t missed out any new dependencies:\n Install license-eye according to the doc. Run license-eye dependency resolve --summary ./dist-material/release-docs/LICENSE.tpl in the root directory of this project. Check the modified lines in ./dist-material/release-docs/LICENSE (via command git diff -U0 ./dist-material/release-docs/LICENSE) and check whether the new dependencies' licenses are compatible with Apache 2.0. Add the new dependencies' notice files (if any) to ./dist-material/release-docs/NOTICE if they are Apache 2.0 license. Copy their license files to ./dist-material/release-docs/licenses if they are not standard Apache 2.0 license. Copy the new dependencies' license file to ./dist-material/release-docs/licenses if they are not standard Apache 2.0 license.  Profile The performance profile is an enhancement feature in the APM system. We use thread dump to estimate the method execution time, rather than adding multiple local spans. In this way, the cost would be significantly reduced compared to using distributed tracing to locate the slow method. This feature is suitable in the production environment. The following documents are key to understanding the essential parts of this feature.\n Profile data report protocol is provided through gRPC, just like other traces and JVM data. Thread dump merging mechanism introduces the merging mechanism. This mechanism helps end users understand profile reports. Exporter tool of profile raw data guides you on how to package the original profile data for issue reports when the visualization doesn\u0026rsquo;t work well on the official UI.  Release If you\u0026rsquo;re a committer, read the Apache Release Guide to learn about how to create an official Apache version release in accordance with avoid Apache\u0026rsquo;s rules. As long as you keep our LICENSE and NOTICE, the Apache license allows everyone to redistribute.\n","excerpt":"Guides There are many ways you can contribute to the SkyWalking community.\n Go through our …","ref":"/docs/main/v9.2.0/en/guides/readme/","title":"Guides"},{"body":"Guides There are many ways you can contribute to the SkyWalking community.\n Go through our documents, and point out or fix a problem. Translate the documents into other languages. Download our releases, try to monitor your applications, and provide feedback to us. Read our source codes. For details, reach out to us. If you find any bugs, submit an issue. You can also try to fix it. Find good first issue issues. This is a good place for you to start. Submit an issue or start a discussion at GitHub issue. See all mail list discussions at website list review. If you are already a SkyWalking committer, you can log in and use the mail list in the browser mode. Otherwise, subscribe following the step below. Issue reports and discussions may also take place via dev@skywalking.apache.org. Mail to dev-subscribe@skywalking.apache.org, and follow the instructions in the reply to subscribe to the mail list.  Contact Us All the following channels are open to the community.\n Submit an issue for an issue or feature proposal. Mail list: dev@skywalking.apache.org. Mail to dev-subscribe@skywalking.apache.org. Follow the instructions in the reply to subscribe to the mail list. Submit a discussion to ask questions.  Become an official Apache SkyWalking Committer The PMC assesses the contributions of every contributor, including their code contributions. It also promotes, votes on, and invites new committers and PMC members according to the Apache guides. See Become official Apache SkyWalking Committer for more details.\nFor code developer For developers, the starting point is the Compiling Guide. It guides developers on how to build the project in local and set up the environment.\nIntegration Tests After setting up the environment and writing your codes, to facilitate integration with the SkyWalking project, you\u0026rsquo;ll need to run tests locally to verify that your codes would not break any existing features, as well as write some unit test (UT) codes to verify that the new codes would work well. This will prevent them from being broken by future contributors. If the new codes involve other components or libraries, you should also write integration tests (IT).\nSkyWalking leverages the plugin maven-surefire-plugin to run the UTs and uses maven-failsafe-plugin to run the ITs. maven-surefire-plugin excludes ITs (whose class name starts with IT) and leaves them for maven-failsafe-plugin to run, which is bound to the verify goal. Therefore, to run the UTs, try ./mvnw clean test, which only runs the UTs but not the ITs.\nIf you would like to run the ITs, please set the property skipITs to false as well as the profiles of the modules whose ITs you want to run. E.g. if you would like to run the ITs in oap-server, try ./mvnw -Pbackend clean verify -DskipITs=false, and if you would like to run all the ITs, simply run ./mvnw clean verify -DskipITs=false.\nPlease be advised that if you\u0026rsquo;re writing integration tests, name it with the pattern IT* so they would only run when property skipITs is set to false.\nJava Microbenchmark Harness (JMH) JMH is a Java harness for building, running, and analysing nano/micro/milli/macro benchmarks written in Java and other languages targeting the JVM.\nWe have a module called microbench which performs a series of micro-benchmark tests for JMH testing. Make new JMH tests extend the org.apache.skywalking.oap.server.microbench.base.AbstractMicrobenchmark to customize runtime conditions (Measurement, Fork, Warmup, etc.).\nJMH tests could run as a normal unit test. And they could run as an independent uber jar via java -jar benchmark.jar for all benchmarks, or via java -jar /benchmarks.jar exampleClassName for a specific test.\nOutput test results in JSON format, you can add -rf json like java -jar benchmarks.jar -rf json, if you run through the IDE, you can configure the -DperfReportDir=savePath parameter to set the JMH report result save path, a report results in JSON format will be generated when the run ends.\nMore information about JMH can be found here: jmh docs.\nEnd to End Tests (E2E) Since version 6.3.0, we have introduced more automatic tests to perform software quality assurance. E2E is an integral part of it.\n End-to-end testing is a methodology used to test whether the flow of an application is performing as designed from start to finish. The purpose of carrying out end-to-end tests is to identify system dependencies and to ensure that the right information is passed between various system components and systems.\n The E2E test involves some/all of the OAP server, storage, coordinator, webapp, and the instrumented services, all of which are orchestrated by docker-compose or KinD. Since version 8.9.0, we immigrate to e2e-v2 which leverage skywalking-infra-e2e and skywalking-cli to do the whole e2e process. skywalking-infra-e2e is used to control the e2e process and skywalking-cli is used to interact with the OAP such as request and get response metrics from OAP.\nWriting E2E Cases  Set up the environment   Set up skywalking-infra-e2e Set up skywalking-cli, yq (generally these 2 are enough) and others tools if your cases need. Can reference the script under skywalking/test/e2e-v2/script/prepare/setup-e2e-shell.   Orchestrate the components  The goal of the E2E tests is to test the SkyWalking project as a whole, including the OAP server, storage, coordinator, webapp, and even the frontend UI (not for now), on the single node mode as well as the cluster mode. Therefore, the first step is to determine what case we are going to verify, and orchestrate the components.\nTo make the orchestration process easier, we\u0026rsquo;re using a docker-compose that provides a simple file format (docker-compose.yml) for orchestrating the required containers, and offers an opportunity to define the dependencies of the components.\nFollow these steps:\n Decide what (and how many) containers will be needed. For example, for cluster testing, you\u0026rsquo;ll need \u0026gt; 2 OAP nodes, coordinators (e.g. zookeeper), storage (e.g. ElasticSearch), and instrumented services; Define the containers in docker-compose.yml, and carefully specify the dependencies, starting orders, and most importantly, link them together, e.g. set the correct OAP address on the agent end, and set the correct coordinator address in OAP, etc. Define the e2e case config in e2e.yaml. Write the expected data(yml) for verify.   Run e2e test  All e2e cases should under skywalking/test/e2e-v2/cases. You could execute e2e run command in skywalking/ e.g.\ne2e run -c test/e2e-v2/cases/alarm/h2/e2e.yaml  Troubleshooting  We expose all logs from all containers to the stdout in the non-CI (local) mode, but save and upload them to the GitHub server. You can download them (only when the tests have failed) at \u0026ldquo;Artifacts/Download artifacts/logs\u0026rdquo; (see top right) for debugging.\nNOTE: Please verify the newly-added E2E test case locally first. However, if you find that it has passed locally but failed in the PR check status, make sure that all the updated/newly-added files (especially those in the submodules) are committed and included in the PR, or reset the git HEAD to the remote and verify locally again.\nProject Extensions The SkyWalking project supports various extensions of existing features. If you are interesting in writing extensions, read the following guides.\nThis guides you in developing SkyWalking agent plugins to support more frameworks. Developers for both open source and private plugins should read this.\n If you would like to build a new probe or plugin in any language, please read the Component library definition and extension document. Storage extension development guide. Potential contributors can learn how to build a new storage implementor in addition to the official one. Customize analysis using OAL scripts. OAL scripts are located in config/oal/*.oal. You could modify them and reboot the OAP server. Read Observability Analysis Language Introduction to learn more about OAL scripts. Source and scope extension for new metrics. For analysis of a new metric which SkyWalking hasn\u0026rsquo;t yet provided, add a new receiver. You would most likely have to add a new source and scope. To learn how to do this, read the document. If you would like to add a new root menu or sub-menu to booster UI, read the UI menu control document.  OAP backend dependency management  This section is only applicable to dependencies of the backend module.\n As one of the Top Level Projects of The Apache Software Foundation (ASF), SkyWalking must follow the ASF 3RD PARTY LICENSE POLICY. So if you\u0026rsquo;re adding new dependencies to the project, you should make sure that the new dependencies would not break the policy, and add their LICENSE and NOTICE to the project.\nWe use license-eye to help you make sure that you haven\u0026rsquo;t missed out any new dependencies:\n Install license-eye according to the doc. Run license-eye dependency resolve --summary ./dist-material/release-docs/LICENSE.tpl in the root directory of this project. Check the modified lines in ./dist-material/release-docs/LICENSE (via command git diff -U0 ./dist-material/release-docs/LICENSE) and check whether the new dependencies' licenses are compatible with Apache 2.0. Add the new dependencies' notice files (if any) to ./dist-material/release-docs/NOTICE if they are Apache 2.0 license. Copy their license files to ./dist-material/release-docs/licenses if they are not standard Apache 2.0 license. Copy the new dependencies' license file to ./dist-material/release-docs/licenses if they are not standard Apache 2.0 license.  Profile The performance profile is an enhancement feature in the APM system. We use thread dump to estimate the method execution time, rather than adding multiple local spans. In this way, the cost would be significantly reduced compared to using distributed tracing to locate the slow method. This feature is suitable in the production environment. The following documents are key to understanding the essential parts of this feature.\n Profile data report protocol is provided through gRPC, just like other traces and JVM data. Thread dump merging mechanism introduces the merging mechanism. This mechanism helps end users understand profile reports. Exporter tool of profile raw data guides you on how to package the original profile data for issue reports when the visualization doesn\u0026rsquo;t work well on the official UI.  Release If you\u0026rsquo;re a committer, read the Apache Release Guide to learn about how to create an official Apache version release in accordance with avoid Apache\u0026rsquo;s rules. As long as you keep our LICENSE and NOTICE, the Apache license allows everyone to redistribute.\n","excerpt":"Guides There are many ways you can contribute to the SkyWalking community.\n Go through our …","ref":"/docs/main/v9.3.0/en/guides/readme/","title":"Guides"},{"body":"Guides There are many ways you can contribute to the SkyWalking community.\n Go through our documents, and point out or fix a problem. Translate the documents into other languages. Download our releases, try to monitor your applications, and provide feedback to us. Read our source codes. For details, reach out to us. If you find any bugs, submit an issue. You can also try to fix it. Find good first issue issues. This is a good place for you to start. Submit an issue or start a discussion at GitHub issue. See all mail list discussions at website list review. If you are already a SkyWalking committer, you can log in and use the mail list in the browser mode. Otherwise, subscribe following the step below. Issue reports and discussions may also take place via dev@skywalking.apache.org. Mail to dev-subscribe@skywalking.apache.org, and follow the instructions in the reply to subscribe to the mail list.  Contact Us All the following channels are open to the community.\n Submit an issue for an issue or feature proposal. Mail list: dev@skywalking.apache.org. Mail to dev-subscribe@skywalking.apache.org. Follow the instructions in the reply to subscribe to the mail list. Submit a discussion to ask questions.  Become an official Apache SkyWalking Committer The PMC assesses the contributions of every contributor, including their code contributions. It also promotes, votes on, and invites new committers and PMC members according to the Apache guides. See Become official Apache SkyWalking Committer for more details.\nFor code developer For developers, the starting point is the Compiling Guide. It guides developers on how to build the project in local and set up the environment.\nIntegration Tests After setting up the environment and writing your codes, to facilitate integration with the SkyWalking project, you\u0026rsquo;ll need to run tests locally to verify that your codes would not break any existing features, as well as write some unit test (UT) codes to verify that the new codes would work well. This will prevent them from being broken by future contributors. If the new codes involve other components or libraries, you should also write integration tests (IT).\nSkyWalking leverages the plugin maven-surefire-plugin to run the UTs and uses maven-failsafe-plugin to run the ITs. maven-surefire-plugin excludes ITs (whose class name starts or ends with *IT, IT*) and leaves them for maven-failsafe-plugin to run, which is bound to the integration-test goal. Therefore, to run the UTs, try ./mvnw clean test, which only runs the UTs but not the ITs.\nIf you would like to run the ITs, please run ./mvnw integration-test as well as the profiles of the modules whose ITs you want to run. If you don\u0026rsquo;t want to run UTs, please add -DskipUTs=true. E.g. if you would like to only run the ITs in oap-server, try ./mvnw -Pbackend clean verify -DskipUTs=true, and if you would like to run all the ITs, simply run ./mvnw clean integration-test -DskipUTs=true.\nPlease be advised that if you\u0026rsquo;re writing integration tests, name it with the pattern IT* or *IT so they would only run in goal integration-test.\nJava Microbenchmark Harness (JMH) JMH is a Java harness for building, running, and analysing nano/micro/milli/macro benchmarks written in Java and other languages targeting the JVM.\nWe have a module called microbench which performs a series of micro-benchmark tests for JMH testing. Make new JMH tests extend the org.apache.skywalking.oap.server.microbench.base.AbstractMicrobenchmark to customize runtime conditions (Measurement, Fork, Warmup, etc.).\nJMH tests could run as a normal unit test. And they could run as an independent uber jar via java -jar benchmark.jar for all benchmarks, or via java -jar /benchmarks.jar exampleClassName for a specific test.\nOutput test results in JSON format, you can add -rf json like java -jar benchmarks.jar -rf json, if you run through the IDE, you can configure the -DperfReportDir=savePath parameter to set the JMH report result save path, a report results in JSON format will be generated when the run ends.\nMore information about JMH can be found here: jmh docs.\nEnd to End Tests (E2E) Since version 6.3.0, we have introduced more automatic tests to perform software quality assurance. E2E is an integral part of it.\n End-to-end testing is a methodology used to test whether the flow of an application is performing as designed from start to finish. The purpose of carrying out end-to-end tests is to identify system dependencies and to ensure that the right information is passed between various system components and systems.\n The E2E test involves some/all of the OAP server, storage, coordinator, webapp, and the instrumented services, all of which are orchestrated by docker-compose or KinD. Since version 8.9.0, we immigrate to e2e-v2 which leverage skywalking-infra-e2e and skywalking-cli to do the whole e2e process. skywalking-infra-e2e is used to control the e2e process and skywalking-cli is used to interact with the OAP such as request and get response metrics from OAP.\nWriting E2E Cases  Set up the environment   Set up skywalking-infra-e2e Set up skywalking-cli, yq (generally these 2 are enough) and others tools if your cases need. Can reference the script under skywalking/test/e2e-v2/script/prepare/setup-e2e-shell.   Orchestrate the components  The goal of the E2E tests is to test the SkyWalking project as a whole, including the OAP server, storage, coordinator, webapp, and even the frontend UI (not for now), on the single node mode as well as the cluster mode. Therefore, the first step is to determine what case we are going to verify, and orchestrate the components.\nTo make the orchestration process easier, we\u0026rsquo;re using a docker-compose that provides a simple file format (docker-compose.yml) for orchestrating the required containers, and offers an opportunity to define the dependencies of the components.\nFollow these steps:\n Decide what (and how many) containers will be needed. For example, for cluster testing, you\u0026rsquo;ll need \u0026gt; 2 OAP nodes, coordinators (e.g. zookeeper), storage (e.g. ElasticSearch), and instrumented services; Define the containers in docker-compose.yml, and carefully specify the dependencies, starting orders, and most importantly, link them together, e.g. set the correct OAP address on the agent end, and set the correct coordinator address in OAP, etc. Define the e2e case config in e2e.yaml. Write the expected data(yml) for verify.   Run e2e test  All e2e cases should under skywalking/test/e2e-v2/cases. You could execute e2e run command in skywalking/ e.g.\ne2e run -c test/e2e-v2/cases/alarm/h2/e2e.yaml  Troubleshooting  We expose all logs from all containers to the stdout in the non-CI (local) mode, but save and upload them to the GitHub server. You can download them (only when the tests have failed) at \u0026ldquo;Artifacts/Download artifacts/logs\u0026rdquo; (see top right) for debugging.\nNOTE: Please verify the newly-added E2E test case locally first. However, if you find that it has passed locally but failed in the PR check status, make sure that all the updated/newly-added files (especially those in the submodules) are committed and included in the PR, or reset the git HEAD to the remote and verify locally again.\nProject Extensions The SkyWalking project supports various extensions of existing features. If you are interesting in writing extensions, read the following guides.\nThis guides you in developing SkyWalking agent plugins to support more frameworks. Developers for both open source and private plugins should read this.\n If you would like to build a new probe or plugin in any language, please read the Component library definition and extension document. Storage extension development guide. Potential contributors can learn how to build a new storage implementor in addition to the official one. Customize analysis using OAL scripts. OAL scripts are located in config/oal/*.oal. You could modify them and reboot the OAP server. Read Observability Analysis Language Introduction to learn more about OAL scripts. Source and scope extension for new metrics. For analysis of a new metric which SkyWalking hasn\u0026rsquo;t yet provided, add a new receiver. You would most likely have to add a new source and scope. To learn how to do this, read the document. If you would like to add a new root menu or sub-menu to booster UI, read the UI menu control document.  OAP backend dependency management  This section is only applicable to dependencies of the backend module.\n As one of the Top Level Projects of The Apache Software Foundation (ASF), SkyWalking must follow the ASF 3RD PARTY LICENSE POLICY. So if you\u0026rsquo;re adding new dependencies to the project, you should make sure that the new dependencies would not break the policy, and add their LICENSE and NOTICE to the project.\nWe use license-eye to help you make sure that you haven\u0026rsquo;t missed out any new dependencies:\n Install license-eye according to the doc. Run license-eye dependency resolve --summary ./dist-material/release-docs/LICENSE.tpl in the root directory of this project. Check the modified lines in ./dist-material/release-docs/LICENSE (via command git diff -U0 ./dist-material/release-docs/LICENSE) and check whether the new dependencies' licenses are compatible with Apache 2.0. Add the new dependencies' notice files (if any) to ./dist-material/release-docs/NOTICE if they are Apache 2.0 license. Copy their license files to ./dist-material/release-docs/licenses if they are not standard Apache 2.0 license. Copy the new dependencies' license file to ./dist-material/release-docs/licenses if they are not standard Apache 2.0 license.  Profile The performance profile is an enhancement feature in the APM system. We use thread dump to estimate the method execution time, rather than adding multiple local spans. In this way, the cost would be significantly reduced compared to using distributed tracing to locate the slow method. This feature is suitable in the production environment. The following documents are key to understanding the essential parts of this feature.\n Profile data report protocol is provided through gRPC, just like other traces and JVM data. Thread dump merging mechanism introduces the merging mechanism. This mechanism helps end users understand profile reports. Exporter tool of profile raw data guides you on how to package the original profile data for issue reports when the visualization doesn\u0026rsquo;t work well on the official UI.  Release If you\u0026rsquo;re a committer, read the Apache Release Guide to learn about how to create an official Apache version release in accordance with avoid Apache\u0026rsquo;s rules. As long as you keep our LICENSE and NOTICE, the Apache license allows everyone to redistribute.\n","excerpt":"Guides There are many ways you can contribute to the SkyWalking community.\n Go through our …","ref":"/docs/main/v9.4.0/en/guides/readme/","title":"Guides"},{"body":"Guides There are many ways you can contribute to the SkyWalking community.\n Go through our documents, and point out or fix a problem. Translate the documents into other languages. Download our releases, try to monitor your applications, and provide feedback to us. Read our source codes. For details, reach out to us. If you find any bugs, submit an issue. You can also try to fix it. Find good first issue issues. This is a good place for you to start. Submit an issue or start a discussion at GitHub issue. See all mail list discussions at website list review. If you are already a SkyWalking committer, you can log in and use the mail list in the browser mode. Otherwise, subscribe following the step below. Issue reports and discussions may also take place via dev@skywalking.apache.org. Mail to dev-subscribe@skywalking.apache.org, and follow the instructions in the reply to subscribe to the mail list.  Contact Us All the following channels are open to the community.\n Submit an issue for an issue or feature proposal. Mail list: dev@skywalking.apache.org. Mail to dev-subscribe@skywalking.apache.org. Follow the instructions in the reply to subscribe to the mail list. Submit a discussion to ask questions.  Become an official Apache SkyWalking Committer The PMC assesses the contributions of every contributor, including their code contributions. It also promotes, votes on, and invites new committers and PMC members according to the Apache guides. See Become official Apache SkyWalking Committer for more details.\nFor code developer For developers, the starting point is the Compiling Guide. It guides developers on how to build the project in local and set up the environment.\nIntegration Tests After setting up the environment and writing your codes, to facilitate integration with the SkyWalking project, you\u0026rsquo;ll need to run tests locally to verify that your codes would not break any existing features, as well as write some unit test (UT) codes to verify that the new codes would work well. This will prevent them from being broken by future contributors. If the new codes involve other components or libraries, you should also write integration tests (IT).\nSkyWalking leverages the plugin maven-surefire-plugin to run the UTs and uses maven-failsafe-plugin to run the ITs. maven-surefire-plugin excludes ITs (whose class name starts or ends with *IT, IT*) and leaves them for maven-failsafe-plugin to run, which is bound to the integration-test goal. Therefore, to run the UTs, try ./mvnw clean test, which only runs the UTs but not the ITs.\nIf you would like to run the ITs, please run ./mvnw integration-test as well as the profiles of the modules whose ITs you want to run. If you don\u0026rsquo;t want to run UTs, please add -DskipUTs=true. E.g. if you would like to only run the ITs in oap-server, try ./mvnw -Pbackend clean verify -DskipUTs=true, and if you would like to run all the ITs, simply run ./mvnw clean integration-test -DskipUTs=true.\nPlease be advised that if you\u0026rsquo;re writing integration tests, name it with the pattern IT* or *IT so they would only run in goal integration-test.\nJava Microbenchmark Harness (JMH) JMH is a Java harness for building, running, and analysing nano/micro/milli/macro benchmarks written in Java and other languages targeting the JVM.\nWe have a module called microbench which performs a series of micro-benchmark tests for JMH testing. Make new JMH tests extend the org.apache.skywalking.oap.server.microbench.base.AbstractMicrobenchmark to customize runtime conditions (Measurement, Fork, Warmup, etc.).\nJMH tests could run as a normal unit test. And they could run as an independent uber jar via java -jar benchmark.jar for all benchmarks, or via java -jar /benchmarks.jar exampleClassName for a specific test.\nOutput test results in JSON format, you can add -rf json like java -jar benchmarks.jar -rf json, if you run through the IDE, you can configure the -DperfReportDir=savePath parameter to set the JMH report result save path, a report results in JSON format will be generated when the run ends.\nMore information about JMH can be found here: jmh docs.\nEnd to End Tests (E2E) Since version 6.3.0, we have introduced more automatic tests to perform software quality assurance. E2E is an integral part of it.\n End-to-end testing is a methodology used to test whether the flow of an application is performing as designed from start to finish. The purpose of carrying out end-to-end tests is to identify system dependencies and to ensure that the right information is passed between various system components and systems.\n The E2E test involves some/all of the OAP server, storage, coordinator, webapp, and the instrumented services, all of which are orchestrated by docker-compose or KinD. Since version 8.9.0, we immigrate to e2e-v2 which leverage skywalking-infra-e2e and skywalking-cli to do the whole e2e process. skywalking-infra-e2e is used to control the e2e process and skywalking-cli is used to interact with the OAP such as request and get response metrics from OAP.\nWriting E2E Cases  Set up the environment   Set up skywalking-infra-e2e Set up skywalking-cli, yq (generally these 2 are enough) and others tools if your cases need. Can reference the script under skywalking/test/e2e-v2/script/prepare/setup-e2e-shell.   Orchestrate the components  The goal of the E2E tests is to test the SkyWalking project as a whole, including the OAP server, storage, coordinator, webapp, and even the frontend UI (not for now), on the single node mode as well as the cluster mode. Therefore, the first step is to determine what case we are going to verify, and orchestrate the components.\nTo make the orchestration process easier, we\u0026rsquo;re using a docker-compose that provides a simple file format (docker-compose.yml) for orchestrating the required containers, and offers an opportunity to define the dependencies of the components.\nFollow these steps:\n Decide what (and how many) containers will be needed. For example, for cluster testing, you\u0026rsquo;ll need \u0026gt; 2 OAP nodes, coordinators (e.g. zookeeper), storage (e.g. ElasticSearch), and instrumented services; Define the containers in docker-compose.yml, and carefully specify the dependencies, starting orders, and most importantly, link them together, e.g. set the correct OAP address on the agent end, and set the correct coordinator address in OAP, etc. Define the e2e case config in e2e.yaml. Write the expected data(yml) for verify.   Run e2e test  All e2e cases should under skywalking/test/e2e-v2/cases. You could execute e2e run command in skywalking/ e.g.\ne2e run -c test/e2e-v2/cases/alarm/h2/e2e.yaml  Troubleshooting  We expose all logs from all containers to the stdout in the non-CI (local) mode, but save and upload them to the GitHub server. You can download them (only when the tests have failed) at \u0026ldquo;Artifacts/Download artifacts/logs\u0026rdquo; (see top right) for debugging.\nNOTE: Please verify the newly-added E2E test case locally first. However, if you find that it has passed locally but failed in the PR check status, make sure that all the updated/newly-added files (especially those in the submodules) are committed and included in the PR, or reset the git HEAD to the remote and verify locally again.\nProject Extensions The SkyWalking project supports various extensions of existing features. If you are interesting in writing extensions, read the following guides.\nThis guides you in developing SkyWalking agent plugins to support more frameworks. Developers for both open source and private plugins should read this.\n If you would like to build a new probe or plugin in any language, please read the Component library definition and extension document. Storage extension development guide. Potential contributors can learn how to build a new storage implementor in addition to the official one. Customize analysis using OAL scripts. OAL scripts are located in config/oal/*.oal. You could modify them and reboot the OAP server. Read Observability Analysis Language Introduction to learn more about OAL scripts. Source and scope extension for new metrics. For analysis of a new metric which SkyWalking hasn\u0026rsquo;t yet provided, add a new receiver. You would most likely have to add a new source and scope. To learn how to do this, read the document. If you would like to add a new root menu or sub-menu to booster UI, read the UI menu control document.  OAP backend dependency management  This section is only applicable to dependencies of the backend module.\n As one of the Top Level Projects of The Apache Software Foundation (ASF), SkyWalking must follow the ASF 3RD PARTY LICENSE POLICY. So if you\u0026rsquo;re adding new dependencies to the project, you should make sure that the new dependencies would not break the policy, and add their LICENSE and NOTICE to the project.\nWe use license-eye to help you make sure that you haven\u0026rsquo;t missed out any new dependencies:\n Install license-eye according to the doc. Run license-eye dependency resolve --summary ./dist-material/release-docs/LICENSE.tpl in the root directory of this project. Check the modified lines in ./dist-material/release-docs/LICENSE (via command git diff -U0 ./dist-material/release-docs/LICENSE) and check whether the new dependencies' licenses are compatible with Apache 2.0. Add the new dependencies' notice files (if any) to ./dist-material/release-docs/NOTICE if they are Apache 2.0 license. Copy their license files to ./dist-material/release-docs/licenses if they are not standard Apache 2.0 license. Copy the new dependencies' license file to ./dist-material/release-docs/licenses if they are not standard Apache 2.0 license.  Release If you\u0026rsquo;re a committer, read the Apache Release Guide to learn about how to create an official Apache version release in accordance with avoid Apache\u0026rsquo;s rules. As long as you keep our LICENSE and NOTICE, the Apache license allows everyone to redistribute.\n","excerpt":"Guides There are many ways you can contribute to the SkyWalking community.\n Go through our …","ref":"/docs/main/v9.5.0/en/guides/readme/","title":"Guides"},{"body":"Guides There are many ways you can connect and contribute to the SkyWalking community.\n Submit an issue for an addressed issue or feature implementation plan. Submit a discussion to ask questions, feature proposal and uncertain bug discussion. Mail list: dev@skywalking.apache.org. Mail to dev-subscribe@skywalking.apache.org. Follow the instructions in the reply to subscribe to the mail list. Send Request to join SkyWalking slack mail to the mail list(dev@skywalking.apache.org), we will invite you in. For Chinese speaker, send [CN] Request to join SkyWalking slack mail to the mail list(dev@skywalking.apache.org), we will invite you in.  ","excerpt":"Guides There are many ways you can connect and contribute to the SkyWalking community.\n Submit an …","ref":"/docs/main/v9.6.0/en/guides/community/","title":"Guides"},{"body":"Guides There are many ways you can connect and contribute to the SkyWalking community.\n Submit an issue for an addressed issue or feature implementation plan. Submit a discussion to ask questions, feature proposal and uncertain bug discussion. Mail list: dev@skywalking.apache.org. Mail to dev-subscribe@skywalking.apache.org. Follow the instructions in the reply to subscribe to the mail list. Send Request to join SkyWalking slack mail to the mail list(dev@skywalking.apache.org), we will invite you in. For Chinese speaker, send [CN] Request to join SkyWalking slack mail to the mail list(dev@skywalking.apache.org), we will invite you in.  ","excerpt":"Guides There are many ways you can connect and contribute to the SkyWalking community.\n Submit an …","ref":"/docs/main/v9.7.0/en/guides/community/","title":"Guides"},{"body":"Guides If you want to debug or develop SkyWalking Rover, The following documentations would guide you.\n Contribution  How to contribute a module?   Compile  How to compile SkyWalking Rover?    ","excerpt":"Guides If you want to debug or develop SkyWalking Rover, The following documentations would guide …","ref":"/docs/skywalking-rover/latest/en/guides/readme/","title":"Guides"},{"body":"Guides If you want to debug or develop SkyWalking Rover, The following documentations would guide you.\n Contribution  How to contribute a module?   Compile  How to compile SkyWalking Rover?    ","excerpt":"Guides If you want to debug or develop SkyWalking Rover, The following documentations would guide …","ref":"/docs/skywalking-rover/next/en/guides/readme/","title":"Guides"},{"body":"Guides If you want to debug or develop SkyWalking Rover, The following documentations would guide you.\n Contribution  How to contribute a module?   Compile  How to compile SkyWalking Rover?    ","excerpt":"Guides If you want to debug or develop SkyWalking Rover, The following documentations would guide …","ref":"/docs/skywalking-rover/v0.6.0/en/guides/readme/","title":"Guides"},{"body":"Guides If you want to debug or develop SkyWalking Satellite, The following documentations would guide you.\n Contribution  How to contribute a plugin? How to release SkyWalking Satellite?   Compile  How to compile SkyWalking Satellite?   Test  How to add unit test for a plugin?    ","excerpt":"Guides If you want to debug or develop SkyWalking Satellite, The following documentations would …","ref":"/docs/skywalking-satellite/latest/en/guides/readme/","title":"Guides"},{"body":"Guides If you want to debug or develop SkyWalking Satellite, The following documentations would guide you.\n Contribution  How to contribute a plugin? How to release SkyWalking Satellite?   Compile  How to compile SkyWalking Satellite?   Test  How to add unit test for a plugin?    ","excerpt":"Guides If you want to debug or develop SkyWalking Satellite, The following documentations would …","ref":"/docs/skywalking-satellite/next/en/guides/readme/","title":"Guides"},{"body":"Guides If you want to debug or develop SkyWalking Satellite, The following documentations would guide you.\n Contribution  How to contribute a plugin? How to release SkyWalking Satellite?   Compile  How to compile SkyWalking Satellite?   Test  How to add unit test for a plugin?    ","excerpt":"Guides If you want to debug or develop SkyWalking Satellite, The following documentations would …","ref":"/docs/skywalking-satellite/v1.2.0/en/guides/readme/","title":"Guides"},{"body":"H2 Activate H2 as storage, set storage provider to H2 In-Memory Databases by default in the distribution package. Please read Database URL Overview in H2 official document. You can set the target to H2 in Embedded, Server and Mixed modes.\nSetting fragment example\nstorage:selector:${SW_STORAGE:h2}h2:driver:org.h2.jdbcx.JdbcDataSourceurl:jdbc:h2:mem:skywalking-oap-dbuser:samaxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:100}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:1}","excerpt":"H2 Activate H2 as storage, set storage provider to H2 In-Memory Databases by default in the …","ref":"/docs/main/latest/en/setup/backend/storages/h2/","title":"H2"},{"body":"H2 Activate H2 as storage, set storage provider to H2 In-Memory Databases by default in the distribution package. Please read Database URL Overview in H2 official document. You can set the target to H2 in Embedded, Server and Mixed modes.\nSetting fragment example\nstorage:selector:${SW_STORAGE:h2}h2:driver:org.h2.jdbcx.JdbcDataSourceurl:jdbc:h2:mem:skywalking-oap-dbuser:samaxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:100}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:1}","excerpt":"H2 Activate H2 as storage, set storage provider to H2 In-Memory Databases by default in the …","ref":"/docs/main/next/en/setup/backend/storages/h2/","title":"H2"},{"body":"H2 Activate H2 as storage, set storage provider to H2 In-Memory Databases by default in the distribution package. Please read Database URL Overview in H2 official document. You can set the target to H2 in Embedded, Server and Mixed modes.\nSetting fragment example\nstorage:selector:${SW_STORAGE:h2}h2:driver:org.h2.jdbcx.JdbcDataSourceurl:jdbc:h2:mem:skywalking-oap-dbuser:samaxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:100}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:1}","excerpt":"H2 Activate H2 as storage, set storage provider to H2 In-Memory Databases by default in the …","ref":"/docs/main/v9.7.0/en/setup/backend/storages/h2/","title":"H2"},{"body":"Health Check Health check intends to provide a unique approach to checking the health status of the OAP server. It includes the health status of modules, GraphQL, and gRPC services readiness.\n 0 means healthy, and more than 0 means unhealthy. less than 0 means that the OAP doesn\u0026rsquo;t start up.\n Health Checker Module. The Health Checker module helps observe the health status of modules. You may activate it as follows:\nhealth-checker:selector:${SW_HEALTH_CHECKER:default}default:checkIntervalSeconds:${SW_HEALTH_CHECKER_INTERVAL_SECONDS:5}Note: The telemetry module should be enabled at the same time. This means that the provider should not be - and none.\nAfter that, we can check the OAP server health status by querying GraphQL:\nquery{ checkHealth{ score details } } If the OAP server is healthy, the response should be\n{ \u0026#34;data\u0026#34;: { \u0026#34;checkHealth\u0026#34;: { \u0026#34;score\u0026#34;: 0, \u0026#34;details\u0026#34;: \u0026#34;\u0026#34; } } } If some modules are unhealthy (e.g. storage H2 is down), then the result may look as follows:\n{ \u0026#34;data\u0026#34;: { \u0026#34;checkHealth\u0026#34;: { \u0026#34;score\u0026#34;: 1, \u0026#34;details\u0026#34;: \u0026#34;storage_h2,\u0026#34; } } } Refer to checkHealth query for more details.\nThe readiness of GraphQL and gRPC Use the query above to check the readiness of GraphQL.\nOAP has implemented the gRPC Health Checking Protocol. You may use the grpc-health-probe or any other tools to check the health of OAP gRPC services.\nCLI tool Please follow the CLI doc to get the health status score directly through the checkhealth command.\n","excerpt":"Health Check Health check intends to provide a unique approach to checking the health status of the …","ref":"/docs/main/latest/en/setup/backend/backend-health-check/","title":"Health Check"},{"body":"Health Check Health check intends to provide a unique approach to checking the health status of the OAP server. It includes the health status of modules, GraphQL, and gRPC services readiness.\n 0 means healthy, and more than 0 means unhealthy. less than 0 means that the OAP doesn\u0026rsquo;t start up.\n Health Checker Module. The Health Checker module helps observe the health status of modules. You may activate it as follows:\nhealth-checker:selector:${SW_HEALTH_CHECKER:default}default:checkIntervalSeconds:${SW_HEALTH_CHECKER_INTERVAL_SECONDS:5}Note: The telemetry module should be enabled at the same time. This means that the provider should not be - and none.\nAfter that, we can check the OAP server health status by querying GraphQL:\nquery{ checkHealth{ score details } } If the OAP server is healthy, the response should be\n{ \u0026#34;data\u0026#34;: { \u0026#34;checkHealth\u0026#34;: { \u0026#34;score\u0026#34;: 0, \u0026#34;details\u0026#34;: \u0026#34;\u0026#34; } } } If some modules are unhealthy (e.g. storage H2 is down), then the result may look as follows:\n{ \u0026#34;data\u0026#34;: { \u0026#34;checkHealth\u0026#34;: { \u0026#34;score\u0026#34;: 1, \u0026#34;details\u0026#34;: \u0026#34;storage_h2,\u0026#34; } } } Refer to checkHealth query for more details.\nThe readiness of GraphQL and gRPC Use the query above to check the readiness of GraphQL.\nOAP has implemented the gRPC Health Checking Protocol. You may use the grpc-health-probe or any other tools to check the health of OAP gRPC services.\nCLI tool Please follow the CLI doc to get the health status score directly through the checkhealth command.\n","excerpt":"Health Check Health check intends to provide a unique approach to checking the health status of the …","ref":"/docs/main/next/en/setup/backend/backend-health-check/","title":"Health Check"},{"body":"Health Check Health check intends to provide a unique approach to check the health status of the OAP server. It includes the health status of modules, GraphQL, and gRPC services readiness.\n 0 means healthy, and more than 0 means unhealthy. less than 0 means that the OAP doesn\u0026rsquo;t start up.\n Health Checker Module. The Health Checker module helps observe the health status of modules. You may activate it as follows:\nhealth-checker:selector:${SW_HEALTH_CHECKER:default}default:checkIntervalSeconds:${SW_HEALTH_CHECKER_INTERVAL_SECONDS:5}Note: The telemetry module should be enabled at the same time. This means that the provider should not be - and none.\nAfter that, we can check the OAP server health status by querying GraphQL:\nquery{ checkHealth{ score details } } If the OAP server is healthy, the response should be\n{ \u0026#34;data\u0026#34;: { \u0026#34;checkHealth\u0026#34;: { \u0026#34;score\u0026#34;: 0, \u0026#34;details\u0026#34;: \u0026#34;\u0026#34; } } } If some modules are unhealthy (e.g. storage H2 is down), then the result may look as follows:\n{ \u0026#34;data\u0026#34;: { \u0026#34;checkHealth\u0026#34;: { \u0026#34;score\u0026#34;: 1, \u0026#34;details\u0026#34;: \u0026#34;storage_h2,\u0026#34; } } } Refer to checkHealth query for more details.\nThe readiness of GraphQL and gRPC Use the query above to check the readiness of GraphQL.\nOAP has implemented the gRPC Health Checking Protocol. You may use the grpc-health-probe or any other tools to check the health of OAP gRPC services.\nCLI tool Please follow the CLI doc to get the health status score directly through the checkhealth command.\n","excerpt":"Health Check Health check intends to provide a unique approach to check the health status of the OAP …","ref":"/docs/main/v9.0.0/en/setup/backend/backend-health-check/","title":"Health Check"},{"body":"Health Check Health check intends to provide a unique approach to checking the health status of the OAP server. It includes the health status of modules, GraphQL, and gRPC services readiness.\n 0 means healthy, and more than 0 means unhealthy. less than 0 means that the OAP doesn\u0026rsquo;t start up.\n Health Checker Module. The Health Checker module helps observe the health status of modules. You may activate it as follows:\nhealth-checker:selector:${SW_HEALTH_CHECKER:default}default:checkIntervalSeconds:${SW_HEALTH_CHECKER_INTERVAL_SECONDS:5}Note: The telemetry module should be enabled at the same time. This means that the provider should not be - and none.\nAfter that, we can check the OAP server health status by querying GraphQL:\nquery{ checkHealth{ score details } } If the OAP server is healthy, the response should be\n{ \u0026#34;data\u0026#34;: { \u0026#34;checkHealth\u0026#34;: { \u0026#34;score\u0026#34;: 0, \u0026#34;details\u0026#34;: \u0026#34;\u0026#34; } } } If some modules are unhealthy (e.g. storage H2 is down), then the result may look as follows:\n{ \u0026#34;data\u0026#34;: { \u0026#34;checkHealth\u0026#34;: { \u0026#34;score\u0026#34;: 1, \u0026#34;details\u0026#34;: \u0026#34;storage_h2,\u0026#34; } } } Refer to checkHealth query for more details.\nThe readiness of GraphQL and gRPC Use the query above to check the readiness of GraphQL.\nOAP has implemented the gRPC Health Checking Protocol. You may use the grpc-health-probe or any other tools to check the health of OAP gRPC services.\nCLI tool Please follow the CLI doc to get the health status score directly through the checkhealth command.\n","excerpt":"Health Check Health check intends to provide a unique approach to checking the health status of the …","ref":"/docs/main/v9.1.0/en/setup/backend/backend-health-check/","title":"Health Check"},{"body":"Health Check Health check intends to provide a unique approach to checking the health status of the OAP server. It includes the health status of modules, GraphQL, and gRPC services readiness.\n 0 means healthy, and more than 0 means unhealthy. less than 0 means that the OAP doesn\u0026rsquo;t start up.\n Health Checker Module. The Health Checker module helps observe the health status of modules. You may activate it as follows:\nhealth-checker:selector:${SW_HEALTH_CHECKER:default}default:checkIntervalSeconds:${SW_HEALTH_CHECKER_INTERVAL_SECONDS:5}Note: The telemetry module should be enabled at the same time. This means that the provider should not be - and none.\nAfter that, we can check the OAP server health status by querying GraphQL:\nquery{ checkHealth{ score details } } If the OAP server is healthy, the response should be\n{ \u0026#34;data\u0026#34;: { \u0026#34;checkHealth\u0026#34;: { \u0026#34;score\u0026#34;: 0, \u0026#34;details\u0026#34;: \u0026#34;\u0026#34; } } } If some modules are unhealthy (e.g. storage H2 is down), then the result may look as follows:\n{ \u0026#34;data\u0026#34;: { \u0026#34;checkHealth\u0026#34;: { \u0026#34;score\u0026#34;: 1, \u0026#34;details\u0026#34;: \u0026#34;storage_h2,\u0026#34; } } } Refer to checkHealth query for more details.\nThe readiness of GraphQL and gRPC Use the query above to check the readiness of GraphQL.\nOAP has implemented the gRPC Health Checking Protocol. You may use the grpc-health-probe or any other tools to check the health of OAP gRPC services.\nCLI tool Please follow the CLI doc to get the health status score directly through the checkhealth command.\n","excerpt":"Health Check Health check intends to provide a unique approach to checking the health status of the …","ref":"/docs/main/v9.2.0/en/setup/backend/backend-health-check/","title":"Health Check"},{"body":"Health Check Health check intends to provide a unique approach to checking the health status of the OAP server. It includes the health status of modules, GraphQL, and gRPC services readiness.\n 0 means healthy, and more than 0 means unhealthy. less than 0 means that the OAP doesn\u0026rsquo;t start up.\n Health Checker Module. The Health Checker module helps observe the health status of modules. You may activate it as follows:\nhealth-checker:selector:${SW_HEALTH_CHECKER:default}default:checkIntervalSeconds:${SW_HEALTH_CHECKER_INTERVAL_SECONDS:5}Note: The telemetry module should be enabled at the same time. This means that the provider should not be - and none.\nAfter that, we can check the OAP server health status by querying GraphQL:\nquery{ checkHealth{ score details } } If the OAP server is healthy, the response should be\n{ \u0026#34;data\u0026#34;: { \u0026#34;checkHealth\u0026#34;: { \u0026#34;score\u0026#34;: 0, \u0026#34;details\u0026#34;: \u0026#34;\u0026#34; } } } If some modules are unhealthy (e.g. storage H2 is down), then the result may look as follows:\n{ \u0026#34;data\u0026#34;: { \u0026#34;checkHealth\u0026#34;: { \u0026#34;score\u0026#34;: 1, \u0026#34;details\u0026#34;: \u0026#34;storage_h2,\u0026#34; } } } Refer to checkHealth query for more details.\nThe readiness of GraphQL and gRPC Use the query above to check the readiness of GraphQL.\nOAP has implemented the gRPC Health Checking Protocol. You may use the grpc-health-probe or any other tools to check the health of OAP gRPC services.\nCLI tool Please follow the CLI doc to get the health status score directly through the checkhealth command.\n","excerpt":"Health Check Health check intends to provide a unique approach to checking the health status of the …","ref":"/docs/main/v9.3.0/en/setup/backend/backend-health-check/","title":"Health Check"},{"body":"Health Check Health check intends to provide a unique approach to checking the health status of the OAP server. It includes the health status of modules, GraphQL, and gRPC services readiness.\n 0 means healthy, and more than 0 means unhealthy. less than 0 means that the OAP doesn\u0026rsquo;t start up.\n Health Checker Module. The Health Checker module helps observe the health status of modules. You may activate it as follows:\nhealth-checker:selector:${SW_HEALTH_CHECKER:default}default:checkIntervalSeconds:${SW_HEALTH_CHECKER_INTERVAL_SECONDS:5}Note: The telemetry module should be enabled at the same time. This means that the provider should not be - and none.\nAfter that, we can check the OAP server health status by querying GraphQL:\nquery{ checkHealth{ score details } } If the OAP server is healthy, the response should be\n{ \u0026#34;data\u0026#34;: { \u0026#34;checkHealth\u0026#34;: { \u0026#34;score\u0026#34;: 0, \u0026#34;details\u0026#34;: \u0026#34;\u0026#34; } } } If some modules are unhealthy (e.g. storage H2 is down), then the result may look as follows:\n{ \u0026#34;data\u0026#34;: { \u0026#34;checkHealth\u0026#34;: { \u0026#34;score\u0026#34;: 1, \u0026#34;details\u0026#34;: \u0026#34;storage_h2,\u0026#34; } } } Refer to checkHealth query for more details.\nThe readiness of GraphQL and gRPC Use the query above to check the readiness of GraphQL.\nOAP has implemented the gRPC Health Checking Protocol. You may use the grpc-health-probe or any other tools to check the health of OAP gRPC services.\nCLI tool Please follow the CLI doc to get the health status score directly through the checkhealth command.\n","excerpt":"Health Check Health check intends to provide a unique approach to checking the health status of the …","ref":"/docs/main/v9.4.0/en/setup/backend/backend-health-check/","title":"Health Check"},{"body":"Health Check Health check intends to provide a unique approach to checking the health status of the OAP server. It includes the health status of modules, GraphQL, and gRPC services readiness.\n 0 means healthy, and more than 0 means unhealthy. less than 0 means that the OAP doesn\u0026rsquo;t start up.\n Health Checker Module. The Health Checker module helps observe the health status of modules. You may activate it as follows:\nhealth-checker:selector:${SW_HEALTH_CHECKER:default}default:checkIntervalSeconds:${SW_HEALTH_CHECKER_INTERVAL_SECONDS:5}Note: The telemetry module should be enabled at the same time. This means that the provider should not be - and none.\nAfter that, we can check the OAP server health status by querying GraphQL:\nquery{ checkHealth{ score details } } If the OAP server is healthy, the response should be\n{ \u0026#34;data\u0026#34;: { \u0026#34;checkHealth\u0026#34;: { \u0026#34;score\u0026#34;: 0, \u0026#34;details\u0026#34;: \u0026#34;\u0026#34; } } } If some modules are unhealthy (e.g. storage H2 is down), then the result may look as follows:\n{ \u0026#34;data\u0026#34;: { \u0026#34;checkHealth\u0026#34;: { \u0026#34;score\u0026#34;: 1, \u0026#34;details\u0026#34;: \u0026#34;storage_h2,\u0026#34; } } } Refer to checkHealth query for more details.\nThe readiness of GraphQL and gRPC Use the query above to check the readiness of GraphQL.\nOAP has implemented the gRPC Health Checking Protocol. You may use the grpc-health-probe or any other tools to check the health of OAP gRPC services.\nCLI tool Please follow the CLI doc to get the health status score directly through the checkhealth command.\n","excerpt":"Health Check Health check intends to provide a unique approach to checking the health status of the …","ref":"/docs/main/v9.5.0/en/setup/backend/backend-health-check/","title":"Health Check"},{"body":"Health Check Health check intends to provide a unique approach to checking the health status of the OAP server. It includes the health status of modules, GraphQL, and gRPC services readiness.\n 0 means healthy, and more than 0 means unhealthy. less than 0 means that the OAP doesn\u0026rsquo;t start up.\n Health Checker Module. The Health Checker module helps observe the health status of modules. You may activate it as follows:\nhealth-checker:selector:${SW_HEALTH_CHECKER:default}default:checkIntervalSeconds:${SW_HEALTH_CHECKER_INTERVAL_SECONDS:5}Note: The telemetry module should be enabled at the same time. This means that the provider should not be - and none.\nAfter that, we can check the OAP server health status by querying GraphQL:\nquery{ checkHealth{ score details } } If the OAP server is healthy, the response should be\n{ \u0026#34;data\u0026#34;: { \u0026#34;checkHealth\u0026#34;: { \u0026#34;score\u0026#34;: 0, \u0026#34;details\u0026#34;: \u0026#34;\u0026#34; } } } If some modules are unhealthy (e.g. storage H2 is down), then the result may look as follows:\n{ \u0026#34;data\u0026#34;: { \u0026#34;checkHealth\u0026#34;: { \u0026#34;score\u0026#34;: 1, \u0026#34;details\u0026#34;: \u0026#34;storage_h2,\u0026#34; } } } Refer to checkHealth query for more details.\nThe readiness of GraphQL and gRPC Use the query above to check the readiness of GraphQL.\nOAP has implemented the gRPC Health Checking Protocol. You may use the grpc-health-probe or any other tools to check the health of OAP gRPC services.\nCLI tool Please follow the CLI doc to get the health status score directly through the checkhealth command.\n","excerpt":"Health Check Health check intends to provide a unique approach to checking the health status of the …","ref":"/docs/main/v9.6.0/en/setup/backend/backend-health-check/","title":"Health Check"},{"body":"Health Check Health check intends to provide a unique approach to checking the health status of the OAP server. It includes the health status of modules, GraphQL, and gRPC services readiness.\n 0 means healthy, and more than 0 means unhealthy. less than 0 means that the OAP doesn\u0026rsquo;t start up.\n Health Checker Module. The Health Checker module helps observe the health status of modules. You may activate it as follows:\nhealth-checker:selector:${SW_HEALTH_CHECKER:default}default:checkIntervalSeconds:${SW_HEALTH_CHECKER_INTERVAL_SECONDS:5}Note: The telemetry module should be enabled at the same time. This means that the provider should not be - and none.\nAfter that, we can check the OAP server health status by querying GraphQL:\nquery{ checkHealth{ score details } } If the OAP server is healthy, the response should be\n{ \u0026#34;data\u0026#34;: { \u0026#34;checkHealth\u0026#34;: { \u0026#34;score\u0026#34;: 0, \u0026#34;details\u0026#34;: \u0026#34;\u0026#34; } } } If some modules are unhealthy (e.g. storage H2 is down), then the result may look as follows:\n{ \u0026#34;data\u0026#34;: { \u0026#34;checkHealth\u0026#34;: { \u0026#34;score\u0026#34;: 1, \u0026#34;details\u0026#34;: \u0026#34;storage_h2,\u0026#34; } } } Refer to checkHealth query for more details.\nThe readiness of GraphQL and gRPC Use the query above to check the readiness of GraphQL.\nOAP has implemented the gRPC Health Checking Protocol. You may use the grpc-health-probe or any other tools to check the health of OAP gRPC services.\nCLI tool Please follow the CLI doc to get the health status score directly through the checkhealth command.\n","excerpt":"Health Check Health check intends to provide a unique approach to checking the health status of the …","ref":"/docs/main/v9.7.0/en/setup/backend/backend-health-check/","title":"Health Check"},{"body":"How does threading-profiler (the default mode) work These blogs skywalking-profiling and skywalking-python-profiling described how the threading-profiler works\nAnd this figure demonstrates how the profiler works as well:\nsequenceDiagram API-\u0026gt;\u0026gt;+working thread: get: /api/v1/user/ rect rgb(0,200,0) API-\u0026gt;\u0026gt;+profiling thread: start profiling profiling thread-\u0026gt;\u0026gt;working thread: snapshot profiling thread-\u0026gt;\u0026gt;working thread: snapshot profiling thread-\u0026gt;\u0026gt;working thread: snapshot profiling thread-\u0026gt;\u0026gt;-working thread: snapshot end working thread--\u0026gt;\u0026gt;-API: response It works well with threading mode because the whole process will be executed in the same thread, so the profiling thread can fetch the complete profiling info of the process of the API request.\nWhy doesn\u0026rsquo;t threading-profiler work in greenlet mode When the python program runs with gevent + greenlet, the process would be like this:\nsequenceDiagram API-\u0026gt;\u0026gt;+working thread 1: get: /api/v1/user/ rect rgb(0,200,0) greenlet.HUB--\u0026gt;\u0026gt;+working thread 1: swap in the profiled greenlet API-\u0026gt;\u0026gt;+profiling thread: start profiling profiling thread-\u0026gt;\u0026gt;working thread 1: snapshot working thread 1--\u0026gt;\u0026gt;-greenlet.HUB : swap out the profiled greenlet end greenlet.HUB--\u0026gt;\u0026gt;+working thread 1: swap in the other greenlet profiling thread-\u0026gt;\u0026gt;working thread 1: snapshot greenlet.HUB--\u0026gt;\u0026gt;+working thread 2: swap in the profiled greenlet profiling thread-\u0026gt;\u0026gt;working thread 1: snapshot profiling thread-\u0026gt;\u0026gt;working thread 1: snapshot working thread 2--\u0026gt;-greenlet.HUB : swap out the profiled greenlet profiling thread-\u0026gt;\u0026gt;working thread 1: snapshot profiling thread-\u0026gt;\u0026gt;-working thread 1: snapshot working thread 1--\u0026gt;\u0026gt;-greenlet.HUB : swap out the other greenlet working thread 1--\u0026gt;\u0026gt;-API: response In this circumstance, the snapshot of the working thread includes multi contexts of different greenlets, which will make skywalking confused to build the trace stack.\nFortunately, greenlet has an API for profiling, the doc is here. We can implement a greenlet profiler to solve this issue.\nHow the greenlet profiler works A greenlet profiler leverages the trace callback of greenlet, it works like this:\nsequenceDiagram API-\u0026gt;\u0026gt;+working thread 1: get: /api/v1/user/ rect rgb(0,200,0) greenlet.HUB--\u0026gt;\u0026gt;+working thread 1: swap in the profiled greenlet and snapshot working thread 1--\u0026gt;\u0026gt;-greenlet.HUB : swap out the profiled greenlet and snapshot end greenlet.HUB--\u0026gt;\u0026gt;+working thread 1: swap in the other greenlet rect rgb(0,200,0) greenlet.HUB--\u0026gt;\u0026gt;+working thread 2: swap in the profiled greenlet and snapshot working thread 2--\u0026gt;-greenlet.HUB : swap out the profiled greenlet and snapshot end working thread 1--\u0026gt;\u0026gt;-greenlet.HUB : swap out the other greenlet working thread 1--\u0026gt;\u0026gt;-API: response We can set a callback function to the greenlet that we need to profiling, then when the greenlet.HUB switches the context in/out to the working thread, the callback will build a snapshot of the greenlet\u0026rsquo;s traceback and send it to skywalking.\nThe difference between these two profilers The greenlet profiler will significantly reduce the snapshot times of the profiling process, which means that it will cost less CPU time than the threading profiler.\n","excerpt":"How does threading-profiler (the default mode) work These blogs skywalking-profiling and …","ref":"/docs/skywalking-python/latest/en/profiling/profiling/","title":"How does threading-profiler (the default mode) work"},{"body":"How does threading-profiler (the default mode) work These blogs skywalking-profiling and skywalking-python-profiling described how the threading-profiler works\nAnd this figure demonstrates how the profiler works as well:\nsequenceDiagram API-\u0026gt;\u0026gt;+working thread: get: /api/v1/user/ rect rgb(0,200,0) API-\u0026gt;\u0026gt;+profiling thread: start profiling profiling thread-\u0026gt;\u0026gt;working thread: snapshot profiling thread-\u0026gt;\u0026gt;working thread: snapshot profiling thread-\u0026gt;\u0026gt;working thread: snapshot profiling thread-\u0026gt;\u0026gt;-working thread: snapshot end working thread--\u0026gt;\u0026gt;-API: response It works well with threading mode because the whole process will be executed in the same thread, so the profiling thread can fetch the complete profiling info of the process of the API request.\nWhy doesn\u0026rsquo;t threading-profiler work in greenlet mode When the python program runs with gevent + greenlet, the process would be like this:\nsequenceDiagram API-\u0026gt;\u0026gt;+working thread 1: get: /api/v1/user/ rect rgb(0,200,0) greenlet.HUB--\u0026gt;\u0026gt;+working thread 1: swap in the profiled greenlet API-\u0026gt;\u0026gt;+profiling thread: start profiling profiling thread-\u0026gt;\u0026gt;working thread 1: snapshot working thread 1--\u0026gt;\u0026gt;-greenlet.HUB : swap out the profiled greenlet end greenlet.HUB--\u0026gt;\u0026gt;+working thread 1: swap in the other greenlet profiling thread-\u0026gt;\u0026gt;working thread 1: snapshot greenlet.HUB--\u0026gt;\u0026gt;+working thread 2: swap in the profiled greenlet profiling thread-\u0026gt;\u0026gt;working thread 1: snapshot profiling thread-\u0026gt;\u0026gt;working thread 1: snapshot working thread 2--\u0026gt;-greenlet.HUB : swap out the profiled greenlet profiling thread-\u0026gt;\u0026gt;working thread 1: snapshot profiling thread-\u0026gt;\u0026gt;-working thread 1: snapshot working thread 1--\u0026gt;\u0026gt;-greenlet.HUB : swap out the other greenlet working thread 1--\u0026gt;\u0026gt;-API: response In this circumstance, the snapshot of the working thread includes multi contexts of different greenlets, which will make skywalking confused to build the trace stack.\nFortunately, greenlet has an API for profiling, the doc is here. We can implement a greenlet profiler to solve this issue.\nHow the greenlet profiler works A greenlet profiler leverages the trace callback of greenlet, it works like this:\nsequenceDiagram API-\u0026gt;\u0026gt;+working thread 1: get: /api/v1/user/ rect rgb(0,200,0) greenlet.HUB--\u0026gt;\u0026gt;+working thread 1: swap in the profiled greenlet and snapshot working thread 1--\u0026gt;\u0026gt;-greenlet.HUB : swap out the profiled greenlet and snapshot end greenlet.HUB--\u0026gt;\u0026gt;+working thread 1: swap in the other greenlet rect rgb(0,200,0) greenlet.HUB--\u0026gt;\u0026gt;+working thread 2: swap in the profiled greenlet and snapshot working thread 2--\u0026gt;-greenlet.HUB : swap out the profiled greenlet and snapshot end working thread 1--\u0026gt;\u0026gt;-greenlet.HUB : swap out the other greenlet working thread 1--\u0026gt;\u0026gt;-API: response We can set a callback function to the greenlet that we need to profiling, then when the greenlet.HUB switches the context in/out to the working thread, the callback will build a snapshot of the greenlet\u0026rsquo;s traceback and send it to skywalking.\nThe difference between these two profilers The greenlet profiler will significantly reduce the snapshot times of the profiling process, which means that it will cost less CPU time than the threading profiler.\n","excerpt":"How does threading-profiler (the default mode) work These blogs skywalking-profiling and …","ref":"/docs/skywalking-python/next/en/profiling/profiling/","title":"How does threading-profiler (the default mode) work"},{"body":"How does threading-profiler (the default mode) work These blogs skywalking-profiling and skywalking-python-profiling described how the threading-profiler works\nAnd this figure demonstrates how the profiler works as well:\nsequenceDiagram API-\u0026gt;\u0026gt;+working thread: get: /api/v1/user/ rect rgb(0,200,0) API-\u0026gt;\u0026gt;+profiling thread: start profiling profiling thread-\u0026gt;\u0026gt;working thread: snapshot profiling thread-\u0026gt;\u0026gt;working thread: snapshot profiling thread-\u0026gt;\u0026gt;working thread: snapshot profiling thread-\u0026gt;\u0026gt;-working thread: snapshot end working thread--\u0026gt;\u0026gt;-API: response It works well with threading mode because the whole process will be executed in the same thread, so the profiling thread can fetch the complete profiling info of the process of the API request.\nWhy doesn\u0026rsquo;t threading-profiler work in greenlet mode When the python program runs with gevent + greenlet, the process would be like this:\nsequenceDiagram API-\u0026gt;\u0026gt;+working thread 1: get: /api/v1/user/ rect rgb(0,200,0) greenlet.HUB--\u0026gt;\u0026gt;+working thread 1: swap in the profiled greenlet API-\u0026gt;\u0026gt;+profiling thread: start profiling profiling thread-\u0026gt;\u0026gt;working thread 1: snapshot working thread 1--\u0026gt;\u0026gt;-greenlet.HUB : swap out the profiled greenlet end greenlet.HUB--\u0026gt;\u0026gt;+working thread 1: swap in the other greenlet profiling thread-\u0026gt;\u0026gt;working thread 1: snapshot greenlet.HUB--\u0026gt;\u0026gt;+working thread 2: swap in the profiled greenlet profiling thread-\u0026gt;\u0026gt;working thread 1: snapshot profiling thread-\u0026gt;\u0026gt;working thread 1: snapshot working thread 2--\u0026gt;-greenlet.HUB : swap out the profiled greenlet profiling thread-\u0026gt;\u0026gt;working thread 1: snapshot profiling thread-\u0026gt;\u0026gt;-working thread 1: snapshot working thread 1--\u0026gt;\u0026gt;-greenlet.HUB : swap out the other greenlet working thread 1--\u0026gt;\u0026gt;-API: response In this circumstance, the snapshot of the working thread includes multi contexts of different greenlets, which will make skywalking confused to build the trace stack.\nFortunately, greenlet has an API for profiling, the doc is here. We can implement a greenlet profiler to solve this issue.\nHow the greenlet profiler works A greenlet profiler leverages the trace callback of greenlet, it works like this:\nsequenceDiagram API-\u0026gt;\u0026gt;+working thread 1: get: /api/v1/user/ rect rgb(0,200,0) greenlet.HUB--\u0026gt;\u0026gt;+working thread 1: swap in the profiled greenlet and snapshot working thread 1--\u0026gt;\u0026gt;-greenlet.HUB : swap out the profiled greenlet and snapshot end greenlet.HUB--\u0026gt;\u0026gt;+working thread 1: swap in the other greenlet rect rgb(0,200,0) greenlet.HUB--\u0026gt;\u0026gt;+working thread 2: swap in the profiled greenlet and snapshot working thread 2--\u0026gt;-greenlet.HUB : swap out the profiled greenlet and snapshot end working thread 1--\u0026gt;\u0026gt;-greenlet.HUB : swap out the other greenlet working thread 1--\u0026gt;\u0026gt;-API: response We can set a callback function to the greenlet that we need to profiling, then when the greenlet.HUB switches the context in/out to the working thread, the callback will build a snapshot of the greenlet\u0026rsquo;s traceback and send it to skywalking.\nThe difference between these two profilers The greenlet profiler will significantly reduce the snapshot times of the profiling process, which means that it will cost less CPU time than the threading profiler.\n","excerpt":"How does threading-profiler (the default mode) work These blogs skywalking-profiling and …","ref":"/docs/skywalking-python/v1.0.1/en/profiling/profiling/","title":"How does threading-profiler (the default mode) work"},{"body":"How to add a new root menu or sub-menu to booster UI If you would like to add a new root menu or sub-menu, you should add data to src/router/data/xx and add translation contents for the title to src/locales/lang/xx in booster UI.\n Create a new file called xxx.ts in src/router/data. Add configurations to the xxx.ts, configurations should be like this.  export default [ { // Add `Infrastructure` menu  path: \u0026#34;\u0026#34;, name: \u0026#34;Infrastructure\u0026#34;, meta: { title: \u0026#34;infrastructure\u0026#34;, icon: \u0026#34;scatter_plot\u0026#34;, hasGroup: true, }, redirect: \u0026#34;/linux\u0026#34;, children: [ // Add a sub menu of the `Infrastructure`  { path: \u0026#34;/linux\u0026#34;, name: \u0026#34;Linux\u0026#34;, meta: { title: \u0026#34;linux\u0026#34;, layer: \u0026#34;OS_LINUX\u0026#34;, }, }, // If there are Tabs widgets in your dashboards, add following extra configuration to provide static links to the specific tab.  { path: \u0026#34;/linux/tab/:activeTabIndex\u0026#34;, name: \u0026#34;LinuxActiveTabIndex\u0026#34;, meta: { title: \u0026#34;linux\u0026#34;, notShow: true, layer: \u0026#34;OS_LINUX\u0026#34;, }, }, ], }, ]; import configurations in src/router/data/index.ts.  import name from \u0026#34;./xxx\u0026#34;; ","excerpt":"How to add a new root menu or sub-menu to booster UI If you would like to add a new root menu or …","ref":"/docs/main/v9.3.0/en/guides/how-to-add-menu/","title":"How to add a new root menu or sub-menu to booster UI"},{"body":"How to add a new root menu or sub-menu to booster UI If you would like to add a new root menu or sub-menu, you should add data to src/router/data/xx and add translation contents for the title to src/locales/lang/xx in booster UI.\n Create a new file called xxx.ts in src/router/data. Add configurations to the xxx.ts, configurations should be like this.  export default [ { // Add `Infrastructure` menu  path: \u0026#34;\u0026#34;, name: \u0026#34;Infrastructure\u0026#34;, meta: { title: \u0026#34;infrastructure\u0026#34;, icon: \u0026#34;scatter_plot\u0026#34;, hasGroup: true, }, redirect: \u0026#34;/linux\u0026#34;, children: [ // Add a sub menu of the `Infrastructure`  { path: \u0026#34;/linux\u0026#34;, name: \u0026#34;Linux\u0026#34;, meta: { title: \u0026#34;linux\u0026#34;, layer: \u0026#34;OS_LINUX\u0026#34;, }, }, // If there are Tabs widgets in your dashboards, add following extra configuration to provide static links to the specific tab.  { path: \u0026#34;/linux/tab/:activeTabIndex\u0026#34;, name: \u0026#34;LinuxActiveTabIndex\u0026#34;, meta: { title: \u0026#34;linux\u0026#34;, notShow: true, layer: \u0026#34;OS_LINUX\u0026#34;, }, }, ], }, ]; import configurations in src/router/data/index.ts.  import name from \u0026#34;./xxx\u0026#34;; ","excerpt":"How to add a new root menu or sub-menu to booster UI If you would like to add a new root menu or …","ref":"/docs/main/v9.4.0/en/guides/how-to-add-menu/","title":"How to add a new root menu or sub-menu to booster UI"},{"body":"How to add a new root menu or sub-menu to booster UI If you would like to add a new root menu or sub-menu, you should add data to src/router/data/xx and add translation contents for the title to src/locales/lang/xx in booster UI.\n Create a new file called xxx.ts in src/router/data. Add configurations to the xxx.ts, configurations should be like this.  export default [ { // Add `Infrastructure` menu  path: \u0026#34;\u0026#34;, name: \u0026#34;Infrastructure\u0026#34;, meta: { title: \u0026#34;infrastructure\u0026#34;, icon: \u0026#34;scatter_plot\u0026#34;, hasGroup: true, }, redirect: \u0026#34;/linux\u0026#34;, children: [ // Add a sub menu of the `Infrastructure`  { path: \u0026#34;/linux\u0026#34;, name: \u0026#34;Linux\u0026#34;, meta: { title: \u0026#34;linux\u0026#34;, layer: \u0026#34;OS_LINUX\u0026#34;, }, }, // If there are Tabs widgets in your dashboards, add following extra configuration to provide static links to the specific tab.  { path: \u0026#34;/linux/tab/:activeTabIndex\u0026#34;, name: \u0026#34;LinuxActiveTabIndex\u0026#34;, meta: { title: \u0026#34;linux\u0026#34;, notShow: true, layer: \u0026#34;OS_LINUX\u0026#34;, }, }, ], }, ]; import configurations in src/router/data/index.ts.  import name from \u0026#34;./xxx\u0026#34;; ","excerpt":"How to add a new root menu or sub-menu to booster UI If you would like to add a new root menu or …","ref":"/docs/main/v9.5.0/en/guides/how-to-add-menu/","title":"How to add a new root menu or sub-menu to booster UI"},{"body":"How to add CRD and Controller in SWCK? The guide intends to help contributors who want to add CRDs and Controllers in SWCK.\n1. Install the kubebuilder  Notice, SWCK is built by kubebuilder v3.2.0, so you need to install it at first.\n SWCK is based on the kubebuilder, and you could download the kubebuilder by the script.\n2. Create CRD and Controller You can use kubebuilder create api to scaffold a new Kind and corresponding controller. Here we use the Demo as an example.\n$ cd operator \u0026amp;\u0026amp; kubebuilder create api --group operator --version v1alpha1 --kind Demo(Your CRD) Then you need to input twice y to create the Resource and Controller, and there will be some newly added files.\n$ git status On branch master Your branch is up to date with \u0026#39;origin/master\u0026#39;. Changes not staged for commit: (use \u0026#34;git add \u0026lt;file\u0026gt;...\u0026#34; to update what will be committed) (use \u0026#34;git restore \u0026lt;file\u0026gt;...\u0026#34; to discard changes in working directory) modified: PROJECT modified: apis/operator/v1alpha1/zz_generated.deepcopy.go modified: config/crd/bases/operator.skywalking.apache.org_swagents.yaml modified: config/crd/kustomization.yaml modified: config/rbac/role.yaml modified: go.mod modified: go.sum modified: main.go Untracked files: (use \u0026#34;git add \u0026lt;file\u0026gt;...\u0026#34; to include in what will be committed) apis/operator/v1alpha1/demo_types.go config/crd/bases/operator.skywalking.apache.org_demoes.yaml config/crd/patches/cainjection_in_operator_demoes.yaml config/crd/patches/webhook_in_operator_demoes.yaml config/rbac/operator_demo_editor_role.yaml config/rbac/operator_demo_viewer_role.yaml config/samples/operator_v1alpha1_demo.yaml controllers/operator/demo_controller.go controllers/operator/suite_test.go no changes added to commit (use \u0026#34;git add\u0026#34; and/or \u0026#34;git commit -a\u0026#34;) Next, we need to focus on the file apis/operator/v1alpha1/demo_types.go which defines your CRD, and the file controllers/operator/configuration_controller.go which defines the Controller. The others files are some configurations generated by the kubebuilder markers. Here are some references:\n  Kubebuilder project demo, in which you can understand the overall architecture.\n  How to add new-api, which you can find more details for oapserverconfig_types.go.\n  Controller-overview, where you can find more details about oapserverconfig_controller.go.\n  3. Create webhook If you want to fields or set defaults to CRs, creating webhooks is a good practice:\nkubebuilder create webhook --group operator --version v1alpha1 --kind Demo --defaulting --programmatic-validation The newly generated files are as follows.\n$ git status On branch master Your branch is ahead of \u0026#39;origin/master\u0026#39; by 1 commit. (use \u0026#34;git push\u0026#34; to publish your local commits) Changes not staged for commit: (use \u0026#34;git add \u0026lt;file\u0026gt;...\u0026#34; to update what will be committed) (use \u0026#34;git restore \u0026lt;file\u0026gt;...\u0026#34; to discard changes in working directory) modified: PROJECT modified: config/webhook/manifests.yaml modified: main.go Untracked files: (use \u0026#34;git add \u0026lt;file\u0026gt;...\u0026#34; to include in what will be committed) apis/operator/v1alpha1/demo_webhook.go apis/operator/v1alpha1/webhook_suite_test.go no changes added to commit (use \u0026#34;git add\u0026#34; and/or \u0026#34;git commit -a\u0026#34;) You can get more details through webhook-overview.\n4. Create the template Generally, a controller would generate a series of resources, such as workload, rbac, service, etc based on CRDs. SWCK is using the Go standard template engine to generate these resources. All template files are stored in the ./operator/pkg/operator/manifests. You could create a directory there such as demo to hold templates. The framework would transfer the CR as the arguments to these templates. More than CR, it supports passing custom rendering functions by setting up the TmplFunc. At last, you need to change the comment and add a field demo there to embed the template files into golang binaries.\n Notice, every file under the template directory can only contain one resource and we can\u0026rsquo;t use the --- to create multiple resources in a single file.\n 5. Build and Test SWCK needs to run in the k8s environment, so we highly recommend using the kind if you don\u0026rsquo;t have a cluster in hand. There are currently two ways to test your implementation.\n Before testing, please make sure you have the kind installed.\n  Test locally. After finishing your implementation, you could use the following steps to test locally:   Disable the webhook  export ENABLE_WEBHOOKS=false Run the main.go with the kubeconfig file.  go run main.go --kubeconfig=(use your kubeconfig file here, and the default is ~/.kube/config)  If you want to test the webhook, please refer the guide.\n  Test in-cluster.   Before testing the swck, please install cert-manager to provide the certificate for webhook in swck.  kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.9.1/cert-manager.yaml At first, you should build the swck image and load it into the kind cluster, and then you could install the crds and the operator as follows.  make docker-build \u0026amp;\u0026amp; kind load docker-image controller:latest \u0026amp;\u0026amp; make install \u0026amp;\u0026amp; make deploy After the swck is installed, and then you could use the following command to get the logs produced by the operator.  kubectl logs -f [skywalking-swck-controller-manager-*](../use the swck deployment name) -n skywalking-swck-system ","excerpt":"How to add CRD and Controller in SWCK? The guide intends to help contributors who want to add CRDs …","ref":"/docs/skywalking-swck/latest/how-to-add-new-crd-and-controller/","title":"How to add CRD and Controller in SWCK?"},{"body":"How to add CRD and Controller in SWCK? The guide intends to help contributors who want to add CRDs and Controllers in SWCK.\n1. Install the kubebuilder  Notice, SWCK is built by kubebuilder v3.2.0, so you need to install it at first.\n SWCK is based on the kubebuilder, and you could download the kubebuilder by the script.\n2. Create CRD and Controller You can use kubebuilder create api to scaffold a new Kind and corresponding controller. Here we use the Demo as an example.\n$ cd operator \u0026amp;\u0026amp; kubebuilder create api --group operator --version v1alpha1 --kind Demo(Your CRD) Then you need to input twice y to create the Resource and Controller, and there will be some newly added files.\n$ git status On branch master Your branch is up to date with \u0026#39;origin/master\u0026#39;. Changes not staged for commit: (use \u0026#34;git add \u0026lt;file\u0026gt;...\u0026#34; to update what will be committed) (use \u0026#34;git restore \u0026lt;file\u0026gt;...\u0026#34; to discard changes in working directory) modified: PROJECT modified: apis/operator/v1alpha1/zz_generated.deepcopy.go modified: config/crd/bases/operator.skywalking.apache.org_swagents.yaml modified: config/crd/kustomization.yaml modified: config/rbac/role.yaml modified: go.mod modified: go.sum modified: main.go Untracked files: (use \u0026#34;git add \u0026lt;file\u0026gt;...\u0026#34; to include in what will be committed) apis/operator/v1alpha1/demo_types.go config/crd/bases/operator.skywalking.apache.org_demoes.yaml config/crd/patches/cainjection_in_operator_demoes.yaml config/crd/patches/webhook_in_operator_demoes.yaml config/rbac/operator_demo_editor_role.yaml config/rbac/operator_demo_viewer_role.yaml config/samples/operator_v1alpha1_demo.yaml controllers/operator/demo_controller.go controllers/operator/suite_test.go no changes added to commit (use \u0026#34;git add\u0026#34; and/or \u0026#34;git commit -a\u0026#34;) Next, we need to focus on the file apis/operator/v1alpha1/demo_types.go which defines your CRD, and the file controllers/operator/configuration_controller.go which defines the Controller. The others files are some configurations generated by the kubebuilder markers. Here are some references:\n  Kubebuilder project demo, in which you can understand the overall architecture.\n  How to add new-api, which you can find more details for oapserverconfig_types.go.\n  Controller-overview, where you can find more details about oapserverconfig_controller.go.\n  3. Create webhook If you want to fields or set defaults to CRs, creating webhooks is a good practice:\nkubebuilder create webhook --group operator --version v1alpha1 --kind Demo --defaulting --programmatic-validation The newly generated files are as follows.\n$ git status On branch master Your branch is ahead of \u0026#39;origin/master\u0026#39; by 1 commit. (use \u0026#34;git push\u0026#34; to publish your local commits) Changes not staged for commit: (use \u0026#34;git add \u0026lt;file\u0026gt;...\u0026#34; to update what will be committed) (use \u0026#34;git restore \u0026lt;file\u0026gt;...\u0026#34; to discard changes in working directory) modified: PROJECT modified: config/webhook/manifests.yaml modified: main.go Untracked files: (use \u0026#34;git add \u0026lt;file\u0026gt;...\u0026#34; to include in what will be committed) apis/operator/v1alpha1/demo_webhook.go apis/operator/v1alpha1/webhook_suite_test.go no changes added to commit (use \u0026#34;git add\u0026#34; and/or \u0026#34;git commit -a\u0026#34;) You can get more details through webhook-overview.\n4. Create the template Generally, a controller would generate a series of resources, such as workload, rbac, service, etc based on CRDs. SWCK is using the Go standard template engine to generate these resources. All template files are stored in the ./operator/pkg/operator/manifests. You could create a directory there such as demo to hold templates. The framework would transfer the CR as the arguments to these templates. More than CR, it supports passing custom rendering functions by setting up the TmplFunc. At last, you need to change the comment and add a field demo there to embed the template files into golang binaries.\n Notice, every file under the template directory can only contain one resource and we can\u0026rsquo;t use the --- to create multiple resources in a single file.\n 5. Build and Test SWCK needs to run in the k8s environment, so we highly recommend using the kind if you don\u0026rsquo;t have a cluster in hand. There are currently two ways to test your implementation.\n Before testing, please make sure you have the kind installed.\n  Test locally. After finishing your implementation, you could use the following steps to test locally:   Disable the webhook  export ENABLE_WEBHOOKS=false Run the main.go with the kubeconfig file.  go run main.go --kubeconfig=(use your kubeconfig file here, and the default is ~/.kube/config)  If you want to test the webhook, please refer the guide.\n  Test in-cluster.   Before testing the swck, please install cert-manager to provide the certificate for webhook in swck.  kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.9.1/cert-manager.yaml At first, you should build the swck image and load it into the kind cluster, and then you could install the crds and the operator as follows.  make docker-build \u0026amp;\u0026amp; kind load docker-image controller:latest \u0026amp;\u0026amp; make install \u0026amp;\u0026amp; make deploy After the swck is installed, and then you could use the following command to get the logs produced by the operator.  kubectl logs -f [skywalking-swck-controller-manager-*](../use the swck deployment name) -n skywalking-swck-system ","excerpt":"How to add CRD and Controller in SWCK? The guide intends to help contributors who want to add CRDs …","ref":"/docs/skywalking-swck/next/how-to-add-new-crd-and-controller/","title":"How to add CRD and Controller in SWCK?"},{"body":"How to add CRD and Controller in SWCK? The guide intends to help contributors who want to add CRDs and Controllers in SWCK.\n1. Install the kubebuilder  Notice, SWCK is built by kubebuilder v3.2.0, so you need to install it at first.\n SWCK is based on the kubebuilder, and you could download the kubebuilder by the script.\n2. Create CRD and Controller You can use kubebuilder create api to scaffold a new Kind and corresponding controller. Here we use the Demo as an example.\n$ cd operator \u0026amp;\u0026amp; kubebuilder create api --group operator --version v1alpha1 --kind Demo(Your CRD) Then you need to input twice y to create the Resource and Controller, and there will be some newly added files.\n$ git status On branch master Your branch is up to date with \u0026#39;origin/master\u0026#39;. Changes not staged for commit: (use \u0026#34;git add \u0026lt;file\u0026gt;...\u0026#34; to update what will be committed) (use \u0026#34;git restore \u0026lt;file\u0026gt;...\u0026#34; to discard changes in working directory) modified: PROJECT modified: apis/operator/v1alpha1/zz_generated.deepcopy.go modified: config/crd/bases/operator.skywalking.apache.org_swagents.yaml modified: config/crd/kustomization.yaml modified: config/rbac/role.yaml modified: go.mod modified: go.sum modified: main.go Untracked files: (use \u0026#34;git add \u0026lt;file\u0026gt;...\u0026#34; to include in what will be committed) apis/operator/v1alpha1/demo_types.go config/crd/bases/operator.skywalking.apache.org_demoes.yaml config/crd/patches/cainjection_in_operator_demoes.yaml config/crd/patches/webhook_in_operator_demoes.yaml config/rbac/operator_demo_editor_role.yaml config/rbac/operator_demo_viewer_role.yaml config/samples/operator_v1alpha1_demo.yaml controllers/operator/demo_controller.go controllers/operator/suite_test.go no changes added to commit (use \u0026#34;git add\u0026#34; and/or \u0026#34;git commit -a\u0026#34;) Next, we need to focus on the file apis/operator/v1alpha1/demo_types.go which defines your CRD, and the file controllers/operator/configuration_controller.go which defines the Controller. The others files are some configurations generated by the kubebuilder markers. Here are some references:\n  Kubebuilder project demo, in which you can understand the overall architecture.\n  How to add new-api, which you can find more details for oapserverconfig_types.go.\n  Controller-overview, where you can find more details about oapserverconfig_controller.go.\n  3. Create webhook If you want to fields or set defaults to CRs, creating webhooks is a good practice:\nkubebuilder create webhook --group operator --version v1alpha1 --kind Demo --defaulting --programmatic-validation The newly generated files are as follows.\n$ git status On branch master Your branch is ahead of \u0026#39;origin/master\u0026#39; by 1 commit. (use \u0026#34;git push\u0026#34; to publish your local commits) Changes not staged for commit: (use \u0026#34;git add \u0026lt;file\u0026gt;...\u0026#34; to update what will be committed) (use \u0026#34;git restore \u0026lt;file\u0026gt;...\u0026#34; to discard changes in working directory) modified: PROJECT modified: config/webhook/manifests.yaml modified: main.go Untracked files: (use \u0026#34;git add \u0026lt;file\u0026gt;...\u0026#34; to include in what will be committed) apis/operator/v1alpha1/demo_webhook.go apis/operator/v1alpha1/webhook_suite_test.go no changes added to commit (use \u0026#34;git add\u0026#34; and/or \u0026#34;git commit -a\u0026#34;) You can get more details through webhook-overview.\n4. Create the template Generally, a controller would generate a series of resources, such as workload, rbac, service, etc based on CRDs. SWCK is using the Go standard template engine to generate these resources. All template files are stored in the ./operator/pkg/operator/manifests. You could create a directory there such as demo to hold templates. The framework would transfer the CR as the arguments to these templates. More than CR, it supports passing custom rendering functions by setting up the TmplFunc. At last, you need to change the comment and add a field demo there to embed the template files into golang binaries.\n Notice, every file under the template directory can only contain one resource and we can\u0026rsquo;t use the --- to create multiple resources in a single file.\n 5. Build and Test SWCK needs to run in the k8s environment, so we highly recommend using the kind if you don\u0026rsquo;t have a cluster in hand. There are currently two ways to test your implementation.\n Before testing, please make sure you have the kind installed.\n  Test locally. After finishing your implementation, you could use the following steps to test locally:   Disable the webhook  export ENABLE_WEBHOOKS=false Run the main.go with the kubeconfig file.  go run main.go --kubeconfig=(use your kubeconfig file here, and the default is ~/.kube/config)  If you want to test the webhook, please refer the guide.\n  Test in-cluster.   Before testing the swck, please install cert-manager to provide the certificate for webhook in swck.  kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.9.1/cert-manager.yaml At first, you should build the swck image and load it into the kind cluster, and then you could install the crds and the operator as follows.  make docker-build \u0026amp;\u0026amp; kind load docker-image controller:latest \u0026amp;\u0026amp; make install \u0026amp;\u0026amp; make deploy After the swck is installed, and then you could use the following command to get the logs produced by the operator.  kubectl logs -f [skywalking-swck-controller-manager-*](../use the swck deployment name) -n skywalking-swck-system ","excerpt":"How to add CRD and Controller in SWCK? The guide intends to help contributors who want to add CRDs …","ref":"/docs/skywalking-swck/v0.9.0/how-to-add-new-crd-and-controller/","title":"How to add CRD and Controller in SWCK?"},{"body":"How to build a project This document will help you compile and build a project in your maven and set your IDE.\nBuilding the Project Since we are using Git submodule, we do not recommend using the GitHub tag or release page to download source codes for compiling.\nMaven behind the Proxy If you need to execute build behind the proxy, edit the .mvn/jvm.config and set the follow properties:\n-Dhttp.proxyHost=proxy_ip -Dhttp.proxyPort=proxy_port -Dhttps.proxyHost=proxy_ip -Dhttps.proxyPort=proxy_port -Dhttp.proxyUser=username -Dhttp.proxyPassword=password Building from GitHub   Prepare git, JDK 11 or 17 (LTS versions), and Maven 3.6+.\n  Clone the project.\nIf you want to build a release from source codes, set a tag name by using git clone -b [tag_name] ... while cloning.\ngit clone --recurse-submodules https://github.com/apache/skywalking.git cd skywalking/ OR git clone https://github.com/apache/skywalking.git cd skywalking/ git submodule init git submodule update   Run ./mvnw clean package -Dmaven.test.skip\n  All packages are in /dist (.tar.gz for Linux and .zip for Windows).\n  Building from Apache source code release  What is the Apache source code release?  For each official Apache release, there is a complete and independent source code tar, which includes all source codes. You could download it from SkyWalking Apache download page. There is no requirement related to git when compiling this. Just follow these steps.\n Prepare JDK11+ and Maven 3.6+. Run ./mvnw clean package -Dmaven.test.skip. All packages are in /dist.(.tar.gz for Linux and .zip for Windows).  Advanced compiling SkyWalking is a complex maven project that has many modules. Therefore, the time to compile may be a bit longer than usual. If you just want to recompile part of the project, you have the following options:\n Compile backend and package   ./mvnw package -Pbackend,dist\n or\n make build.backend\n If you intend to compile a single plugin, such as one in the dev stage, you could\n cd plugin_module_dir \u0026amp; mvn clean package\n  Compile UI and package   ./mvnw package -Pui,dist\n or\n make build.ui\n Building docker images You can build docker images of backend and ui with Makefile located in root folder.\nRefer to Build docker image for more details.\nSetting up your IntelliJ IDEA NOTE: If you clone the codes from GitHub, please make sure that you have finished steps 1 to 3 in section Build from GitHub. If you download the source codes from the official website of SkyWalking, please make sure that you have followed the steps in section Build from Apache source code release.\n Import the project as a maven project. Run ./mvnw compile -Dmaven.test.skip=true to compile project and generate source codes. The reason is that we use gRPC and protobuf. Set Generated Source Codes folders.  grpc-java and java folders in apm-protocol/apm-network/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-core/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-receiver-plugin/receiver-proto/target/generated-sources/fbs grpc-java and java folders in oap-server/server-receiver-plugin/receiver-proto/target/generated-sources/protobuf grpc-java and java folders in oap-server/exporter/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-configuration/grpc-configuration-sync/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-alarm-plugin/target/generated-sources/protobuf antlr4 folder in oap-server/oal-grammar/target/generated-sources    ","excerpt":"How to build a project This document will help you compile and build a project in your maven and set …","ref":"/docs/main/latest/en/guides/how-to-build/","title":"How to build a project"},{"body":"How to build a project This document will help you compile and build a project in your maven and set your IDE.\nBuilding the Project Since we are using Git submodule, we do not recommend using the GitHub tag or release page to download source codes for compiling.\nMaven behind the Proxy If you need to execute build behind the proxy, edit the .mvn/jvm.config and set the follow properties:\n-Dhttp.proxyHost=proxy_ip -Dhttp.proxyPort=proxy_port -Dhttps.proxyHost=proxy_ip -Dhttps.proxyPort=proxy_port -Dhttp.proxyUser=username -Dhttp.proxyPassword=password Building from GitHub   Prepare git, JDK 11, 17, 21 (LTS versions), and Maven 3.6+.\n  Clone the project.\nIf you want to build a release from source codes, set a tag name by using git clone -b [tag_name] ... while cloning.\ngit clone --recurse-submodules https://github.com/apache/skywalking.git cd skywalking/ OR git clone https://github.com/apache/skywalking.git cd skywalking/ git submodule init git submodule update   Run ./mvnw clean package -Dmaven.test.skip\n  All packages are in /dist (.tar.gz for Linux and .zip for Windows).\n  Building from Apache source code release  What is the Apache source code release?  For each official Apache release, there is a complete and independent source code tar, which includes all source codes. You could download it from SkyWalking Apache download page. There is no requirement related to git when compiling this. Just follow these steps.\n Prepare JDK11+ and Maven 3.6+. Run ./mvnw clean package -Dmaven.test.skip. All packages are in /dist.(.tar.gz for Linux and .zip for Windows).  Advanced compiling SkyWalking is a complex maven project that has many modules. Therefore, the time to compile may be a bit longer than usual. If you just want to recompile part of the project, you have the following options:\n Compile backend and package   ./mvnw package -Pbackend,dist\n or\n make build.backend\n If you intend to compile a single plugin, such as one in the dev stage, you could\n cd plugin_module_dir \u0026amp; mvn clean package\n  Compile UI and package   ./mvnw package -Pui,dist\n or\n make build.ui\n Building docker images You can build docker images of backend and ui with Makefile located in root folder.\nRefer to Build docker image for more details.\nSetting up your IntelliJ IDEA NOTE: If you clone the codes from GitHub, please make sure that you have finished steps 1 to 3 in section Build from GitHub. If you download the source codes from the official website of SkyWalking, please make sure that you have followed the steps in section Build from Apache source code release.\n Import the project as a maven project. Run ./mvnw compile -Dmaven.test.skip=true to compile project and generate source codes. The reason is that we use gRPC and protobuf. Set Generated Source Codes folders.  grpc-java and java folders in apm-protocol/apm-network/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-core/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-receiver-plugin/receiver-proto/target/generated-sources/fbs grpc-java and java folders in oap-server/server-receiver-plugin/receiver-proto/target/generated-sources/protobuf grpc-java and java folders in oap-server/exporter/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-configuration/grpc-configuration-sync/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-alarm-plugin/target/generated-sources/protobuf antlr4 folder in oap-server/oal-grammar/target/generated-sources    ","excerpt":"How to build a project This document will help you compile and build a project in your maven and set …","ref":"/docs/main/next/en/guides/how-to-build/","title":"How to build a project"},{"body":"How to build a project This document will help you compile and build a project in your maven and set your IDE.\nBuilding the Project Since we are using Git submodule, we do not recommend using the GitHub tag or release page to download source codes for compiling.\nMaven behind the Proxy If you need to execute build behind the proxy, edit the .mvn/jvm.config and set the follow properties:\n-Dhttp.proxyHost=proxy_ip -Dhttp.proxyPort=proxy_port -Dhttps.proxyHost=proxy_ip -Dhttps.proxyPort=proxy_port -Dhttp.proxyUser=username -Dhttp.proxyPassword=password Building from GitHub   Prepare git, JDK8+, and Maven 3.6+.\n  Clone the project.\nIf you want to build a release from source codes, set a tag name by using git clone -b [tag_name] ... while cloning.\ngit clone --recurse-submodules https://github.com/apache/skywalking.git cd skywalking/ OR git clone https://github.com/apache/skywalking.git cd skywalking/ git submodule init git submodule update   Run ./mvnw clean package -Dmaven.test.skip\n  All packages are in /dist (.tar.gz for Linux and .zip for Windows).\n  Building from Apache source code release  What is the Apache source code release?  For each official Apache release, there is a complete and independent source code tar, which includes all source codes. You could download it from SkyWalking Apache download page. There is no requirement related to git when compiling this. Just follow these steps.\n Prepare JDK8+ and Maven 3.6+. Run ./mvnw clean package -Dmaven.test.skip. All packages are in /dist.(.tar.gz for Linux and .zip for Windows).  Advanced compiling SkyWalking is a complex maven project that has many modules. Therefore, the time to compile may be a bit longer than usual. If you just want to recompile part of the project, you have the following options:\n Compile backend and package   ./mvnw package -Pbackend,dist\n or\n make build.backend\n If you intend to compile a single plugin, such as one in the dev stage, you could\n cd plugin_module_dir \u0026amp; mvn clean package\n  Compile UI and package   ./mvnw package -Pui,dist\n or\n make build.ui\n Building docker images You can build docker images of backend and ui with Makefile located in root folder.\nRefer to Build docker image for more details.\nSetting up your IntelliJ IDEA NOTE: If you clone the codes from GitHub, please make sure that you have finished steps 1 to 3 in section Build from GitHub. If you download the source codes from the official website of SkyWalking, please make sure that you have followed the steps in section Build from Apache source code release.\n Import the project as a maven project. Run ./mvnw compile -Dmaven.test.skip=true to compile project and generate source codes. The reason is that we use gRPC and protobuf. Set Generated Source Codes folders.  grpc-java and java folders in apm-protocol/apm-network/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-core/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-receiver-plugin/receiver-proto/target/generated-sources/fbs grpc-java and java folders in oap-server/server-receiver-plugin/receiver-proto/target/generated-sources/protobuf grpc-java and java folders in oap-server/exporter/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-configuration/grpc-configuration-sync/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-alarm-plugin/target/generated-sources/protobuf antlr4 folder in oap-server/oal-grammar/target/generated-sources    ","excerpt":"How to build a project This document will help you compile and build a project in your maven and set …","ref":"/docs/main/v9.0.0/en/guides/how-to-build/","title":"How to build a project"},{"body":"How to build a project This document will help you compile and build a project in your maven and set your IDE.\nBuilding the Project Since we are using Git submodule, we do not recommend using the GitHub tag or release page to download source codes for compiling.\nMaven behind the Proxy If you need to execute build behind the proxy, edit the .mvn/jvm.config and set the follow properties:\n-Dhttp.proxyHost=proxy_ip -Dhttp.proxyPort=proxy_port -Dhttps.proxyHost=proxy_ip -Dhttps.proxyPort=proxy_port -Dhttp.proxyUser=username -Dhttp.proxyPassword=password Building from GitHub   Prepare git, JDK8+, and Maven 3.6+.\n  Clone the project.\nIf you want to build a release from source codes, set a tag name by using git clone -b [tag_name] ... while cloning.\ngit clone --recurse-submodules https://github.com/apache/skywalking.git cd skywalking/ OR git clone https://github.com/apache/skywalking.git cd skywalking/ git submodule init git submodule update   Run ./mvnw clean package -Dmaven.test.skip\n  All packages are in /dist (.tar.gz for Linux and .zip for Windows).\n  Building from Apache source code release  What is the Apache source code release?  For each official Apache release, there is a complete and independent source code tar, which includes all source codes. You could download it from SkyWalking Apache download page. There is no requirement related to git when compiling this. Just follow these steps.\n Prepare JDK8+ and Maven 3.6+. Run ./mvnw clean package -Dmaven.test.skip. All packages are in /dist.(.tar.gz for Linux and .zip for Windows).  Advanced compiling SkyWalking is a complex maven project that has many modules. Therefore, the time to compile may be a bit longer than usual. If you just want to recompile part of the project, you have the following options:\n Compile backend and package   ./mvnw package -Pbackend,dist\n or\n make build.backend\n If you intend to compile a single plugin, such as one in the dev stage, you could\n cd plugin_module_dir \u0026amp; mvn clean package\n  Compile UI and package   ./mvnw package -Pui,dist\n or\n make build.ui\n Building docker images You can build docker images of backend and ui with Makefile located in root folder.\nRefer to Build docker image for more details.\nSetting up your IntelliJ IDEA NOTE: If you clone the codes from GitHub, please make sure that you have finished steps 1 to 3 in section Build from GitHub. If you download the source codes from the official website of SkyWalking, please make sure that you have followed the steps in section Build from Apache source code release.\n Import the project as a maven project. Run ./mvnw compile -Dmaven.test.skip=true to compile project and generate source codes. The reason is that we use gRPC and protobuf. Set Generated Source Codes folders.  grpc-java and java folders in apm-protocol/apm-network/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-core/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-receiver-plugin/receiver-proto/target/generated-sources/fbs grpc-java and java folders in oap-server/server-receiver-plugin/receiver-proto/target/generated-sources/protobuf grpc-java and java folders in oap-server/exporter/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-configuration/grpc-configuration-sync/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-alarm-plugin/target/generated-sources/protobuf antlr4 folder in oap-server/oal-grammar/target/generated-sources    ","excerpt":"How to build a project This document will help you compile and build a project in your maven and set …","ref":"/docs/main/v9.1.0/en/guides/how-to-build/","title":"How to build a project"},{"body":"How to build a project This document will help you compile and build a project in your maven and set your IDE.\nBuilding the Project Since we are using Git submodule, we do not recommend using the GitHub tag or release page to download source codes for compiling.\nMaven behind the Proxy If you need to execute build behind the proxy, edit the .mvn/jvm.config and set the follow properties:\n-Dhttp.proxyHost=proxy_ip -Dhttp.proxyPort=proxy_port -Dhttps.proxyHost=proxy_ip -Dhttps.proxyPort=proxy_port -Dhttp.proxyUser=username -Dhttp.proxyPassword=password Building from GitHub   Prepare git, JDK8+, and Maven 3.6+.\n  Clone the project.\nIf you want to build a release from source codes, set a tag name by using git clone -b [tag_name] ... while cloning.\ngit clone --recurse-submodules https://github.com/apache/skywalking.git cd skywalking/ OR git clone https://github.com/apache/skywalking.git cd skywalking/ git submodule init git submodule update   Run ./mvnw clean package -Dmaven.test.skip\n  All packages are in /dist (.tar.gz for Linux and .zip for Windows).\n  Building from Apache source code release  What is the Apache source code release?  For each official Apache release, there is a complete and independent source code tar, which includes all source codes. You could download it from SkyWalking Apache download page. There is no requirement related to git when compiling this. Just follow these steps.\n Prepare JDK8+ and Maven 3.6+. Run ./mvnw clean package -Dmaven.test.skip. All packages are in /dist.(.tar.gz for Linux and .zip for Windows).  Advanced compiling SkyWalking is a complex maven project that has many modules. Therefore, the time to compile may be a bit longer than usual. If you just want to recompile part of the project, you have the following options:\n Compile backend and package   ./mvnw package -Pbackend,dist\n or\n make build.backend\n If you intend to compile a single plugin, such as one in the dev stage, you could\n cd plugin_module_dir \u0026amp; mvn clean package\n  Compile UI and package   ./mvnw package -Pui,dist\n or\n make build.ui\n Building docker images You can build docker images of backend and ui with Makefile located in root folder.\nRefer to Build docker image for more details.\nSetting up your IntelliJ IDEA NOTE: If you clone the codes from GitHub, please make sure that you have finished steps 1 to 3 in section Build from GitHub. If you download the source codes from the official website of SkyWalking, please make sure that you have followed the steps in section Build from Apache source code release.\n Import the project as a maven project. Run ./mvnw compile -Dmaven.test.skip=true to compile project and generate source codes. The reason is that we use gRPC and protobuf. Set Generated Source Codes folders.  grpc-java and java folders in apm-protocol/apm-network/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-core/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-receiver-plugin/receiver-proto/target/generated-sources/fbs grpc-java and java folders in oap-server/server-receiver-plugin/receiver-proto/target/generated-sources/protobuf grpc-java and java folders in oap-server/exporter/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-configuration/grpc-configuration-sync/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-alarm-plugin/target/generated-sources/protobuf antlr4 folder in oap-server/oal-grammar/target/generated-sources    ","excerpt":"How to build a project This document will help you compile and build a project in your maven and set …","ref":"/docs/main/v9.2.0/en/guides/how-to-build/","title":"How to build a project"},{"body":"How to build a project This document will help you compile and build a project in your maven and set your IDE.\nBuilding the Project Since we are using Git submodule, we do not recommend using the GitHub tag or release page to download source codes for compiling.\nMaven behind the Proxy If you need to execute build behind the proxy, edit the .mvn/jvm.config and set the follow properties:\n-Dhttp.proxyHost=proxy_ip -Dhttp.proxyPort=proxy_port -Dhttps.proxyHost=proxy_ip -Dhttps.proxyPort=proxy_port -Dhttp.proxyUser=username -Dhttp.proxyPassword=password Building from GitHub   Prepare git, JDK8+, and Maven 3.6+.\n  Clone the project.\nIf you want to build a release from source codes, set a tag name by using git clone -b [tag_name] ... while cloning.\ngit clone --recurse-submodules https://github.com/apache/skywalking.git cd skywalking/ OR git clone https://github.com/apache/skywalking.git cd skywalking/ git submodule init git submodule update   Run ./mvnw clean package -Dmaven.test.skip\n  All packages are in /dist (.tar.gz for Linux and .zip for Windows).\n  Building from Apache source code release  What is the Apache source code release?  For each official Apache release, there is a complete and independent source code tar, which includes all source codes. You could download it from SkyWalking Apache download page. There is no requirement related to git when compiling this. Just follow these steps.\n Prepare JDK8+ and Maven 3.6+. Run ./mvnw clean package -Dmaven.test.skip. All packages are in /dist.(.tar.gz for Linux and .zip for Windows).  Advanced compiling SkyWalking is a complex maven project that has many modules. Therefore, the time to compile may be a bit longer than usual. If you just want to recompile part of the project, you have the following options:\n Compile backend and package   ./mvnw package -Pbackend,dist\n or\n make build.backend\n If you intend to compile a single plugin, such as one in the dev stage, you could\n cd plugin_module_dir \u0026amp; mvn clean package\n  Compile UI and package   ./mvnw package -Pui,dist\n or\n make build.ui\n Building docker images You can build docker images of backend and ui with Makefile located in root folder.\nRefer to Build docker image for more details.\nSetting up your IntelliJ IDEA NOTE: If you clone the codes from GitHub, please make sure that you have finished steps 1 to 3 in section Build from GitHub. If you download the source codes from the official website of SkyWalking, please make sure that you have followed the steps in section Build from Apache source code release.\n Import the project as a maven project. Run ./mvnw compile -Dmaven.test.skip=true to compile project and generate source codes. The reason is that we use gRPC and protobuf. Set Generated Source Codes folders.  grpc-java and java folders in apm-protocol/apm-network/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-core/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-receiver-plugin/receiver-proto/target/generated-sources/fbs grpc-java and java folders in oap-server/server-receiver-plugin/receiver-proto/target/generated-sources/protobuf grpc-java and java folders in oap-server/exporter/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-configuration/grpc-configuration-sync/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-alarm-plugin/target/generated-sources/protobuf antlr4 folder in oap-server/oal-grammar/target/generated-sources    ","excerpt":"How to build a project This document will help you compile and build a project in your maven and set …","ref":"/docs/main/v9.3.0/en/guides/how-to-build/","title":"How to build a project"},{"body":"How to build a project This document will help you compile and build a project in your maven and set your IDE.\nBuilding the Project Since we are using Git submodule, we do not recommend using the GitHub tag or release page to download source codes for compiling.\nMaven behind the Proxy If you need to execute build behind the proxy, edit the .mvn/jvm.config and set the follow properties:\n-Dhttp.proxyHost=proxy_ip -Dhttp.proxyPort=proxy_port -Dhttps.proxyHost=proxy_ip -Dhttps.proxyPort=proxy_port -Dhttp.proxyUser=username -Dhttp.proxyPassword=password Building from GitHub   Prepare git, JDK11+, and Maven 3.6+.\n  Clone the project.\nIf you want to build a release from source codes, set a tag name by using git clone -b [tag_name] ... while cloning.\ngit clone --recurse-submodules https://github.com/apache/skywalking.git cd skywalking/ OR git clone https://github.com/apache/skywalking.git cd skywalking/ git submodule init git submodule update   Run ./mvnw clean package -Dmaven.test.skip\n  All packages are in /dist (.tar.gz for Linux and .zip for Windows).\n  Building from Apache source code release  What is the Apache source code release?  For each official Apache release, there is a complete and independent source code tar, which includes all source codes. You could download it from SkyWalking Apache download page. There is no requirement related to git when compiling this. Just follow these steps.\n Prepare JDK11+ and Maven 3.6+. Run ./mvnw clean package -Dmaven.test.skip. All packages are in /dist.(.tar.gz for Linux and .zip for Windows).  Advanced compiling SkyWalking is a complex maven project that has many modules. Therefore, the time to compile may be a bit longer than usual. If you just want to recompile part of the project, you have the following options:\n Compile backend and package   ./mvnw package -Pbackend,dist\n or\n make build.backend\n If you intend to compile a single plugin, such as one in the dev stage, you could\n cd plugin_module_dir \u0026amp; mvn clean package\n  Compile UI and package   ./mvnw package -Pui,dist\n or\n make build.ui\n Building docker images You can build docker images of backend and ui with Makefile located in root folder.\nRefer to Build docker image for more details.\nSetting up your IntelliJ IDEA NOTE: If you clone the codes from GitHub, please make sure that you have finished steps 1 to 3 in section Build from GitHub. If you download the source codes from the official website of SkyWalking, please make sure that you have followed the steps in section Build from Apache source code release.\n Import the project as a maven project. Run ./mvnw compile -Dmaven.test.skip=true to compile project and generate source codes. The reason is that we use gRPC and protobuf. Set Generated Source Codes folders.  grpc-java and java folders in apm-protocol/apm-network/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-core/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-receiver-plugin/receiver-proto/target/generated-sources/fbs grpc-java and java folders in oap-server/server-receiver-plugin/receiver-proto/target/generated-sources/protobuf grpc-java and java folders in oap-server/exporter/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-configuration/grpc-configuration-sync/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-alarm-plugin/target/generated-sources/protobuf antlr4 folder in oap-server/oal-grammar/target/generated-sources    ","excerpt":"How to build a project This document will help you compile and build a project in your maven and set …","ref":"/docs/main/v9.4.0/en/guides/how-to-build/","title":"How to build a project"},{"body":"How to build a project This document will help you compile and build a project in your maven and set your IDE.\nBuilding the Project Since we are using Git submodule, we do not recommend using the GitHub tag or release page to download source codes for compiling.\nMaven behind the Proxy If you need to execute build behind the proxy, edit the .mvn/jvm.config and set the follow properties:\n-Dhttp.proxyHost=proxy_ip -Dhttp.proxyPort=proxy_port -Dhttps.proxyHost=proxy_ip -Dhttps.proxyPort=proxy_port -Dhttp.proxyUser=username -Dhttp.proxyPassword=password Building from GitHub   Prepare git, JDK11+, and Maven 3.6+.\n  Clone the project.\nIf you want to build a release from source codes, set a tag name by using git clone -b [tag_name] ... while cloning.\ngit clone --recurse-submodules https://github.com/apache/skywalking.git cd skywalking/ OR git clone https://github.com/apache/skywalking.git cd skywalking/ git submodule init git submodule update   Run ./mvnw clean package -Dmaven.test.skip\n  All packages are in /dist (.tar.gz for Linux and .zip for Windows).\n  Building from Apache source code release  What is the Apache source code release?  For each official Apache release, there is a complete and independent source code tar, which includes all source codes. You could download it from SkyWalking Apache download page. There is no requirement related to git when compiling this. Just follow these steps.\n Prepare JDK11+ and Maven 3.6+. Run ./mvnw clean package -Dmaven.test.skip. All packages are in /dist.(.tar.gz for Linux and .zip for Windows).  Advanced compiling SkyWalking is a complex maven project that has many modules. Therefore, the time to compile may be a bit longer than usual. If you just want to recompile part of the project, you have the following options:\n Compile backend and package   ./mvnw package -Pbackend,dist\n or\n make build.backend\n If you intend to compile a single plugin, such as one in the dev stage, you could\n cd plugin_module_dir \u0026amp; mvn clean package\n  Compile UI and package   ./mvnw package -Pui,dist\n or\n make build.ui\n Building docker images You can build docker images of backend and ui with Makefile located in root folder.\nRefer to Build docker image for more details.\nSetting up your IntelliJ IDEA NOTE: If you clone the codes from GitHub, please make sure that you have finished steps 1 to 3 in section Build from GitHub. If you download the source codes from the official website of SkyWalking, please make sure that you have followed the steps in section Build from Apache source code release.\n Import the project as a maven project. Run ./mvnw compile -Dmaven.test.skip=true to compile project and generate source codes. The reason is that we use gRPC and protobuf. Set Generated Source Codes folders.  grpc-java and java folders in apm-protocol/apm-network/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-core/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-receiver-plugin/receiver-proto/target/generated-sources/fbs grpc-java and java folders in oap-server/server-receiver-plugin/receiver-proto/target/generated-sources/protobuf grpc-java and java folders in oap-server/exporter/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-configuration/grpc-configuration-sync/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-alarm-plugin/target/generated-sources/protobuf antlr4 folder in oap-server/oal-grammar/target/generated-sources    ","excerpt":"How to build a project This document will help you compile and build a project in your maven and set …","ref":"/docs/main/v9.5.0/en/guides/how-to-build/","title":"How to build a project"},{"body":"How to build a project This document will help you compile and build a project in your maven and set your IDE.\nBuilding the Project Since we are using Git submodule, we do not recommend using the GitHub tag or release page to download source codes for compiling.\nMaven behind the Proxy If you need to execute build behind the proxy, edit the .mvn/jvm.config and set the follow properties:\n-Dhttp.proxyHost=proxy_ip -Dhttp.proxyPort=proxy_port -Dhttps.proxyHost=proxy_ip -Dhttps.proxyPort=proxy_port -Dhttp.proxyUser=username -Dhttp.proxyPassword=password Building from GitHub   Prepare git, JDK11+, and Maven 3.6+.\n  Clone the project.\nIf you want to build a release from source codes, set a tag name by using git clone -b [tag_name] ... while cloning.\ngit clone --recurse-submodules https://github.com/apache/skywalking.git cd skywalking/ OR git clone https://github.com/apache/skywalking.git cd skywalking/ git submodule init git submodule update   Run ./mvnw clean package -Dmaven.test.skip\n  All packages are in /dist (.tar.gz for Linux and .zip for Windows).\n  Building from Apache source code release  What is the Apache source code release?  For each official Apache release, there is a complete and independent source code tar, which includes all source codes. You could download it from SkyWalking Apache download page. There is no requirement related to git when compiling this. Just follow these steps.\n Prepare JDK11+ and Maven 3.6+. Run ./mvnw clean package -Dmaven.test.skip. All packages are in /dist.(.tar.gz for Linux and .zip for Windows).  Advanced compiling SkyWalking is a complex maven project that has many modules. Therefore, the time to compile may be a bit longer than usual. If you just want to recompile part of the project, you have the following options:\n Compile backend and package   ./mvnw package -Pbackend,dist\n or\n make build.backend\n If you intend to compile a single plugin, such as one in the dev stage, you could\n cd plugin_module_dir \u0026amp; mvn clean package\n  Compile UI and package   ./mvnw package -Pui,dist\n or\n make build.ui\n Building docker images You can build docker images of backend and ui with Makefile located in root folder.\nRefer to Build docker image for more details.\nSetting up your IntelliJ IDEA NOTE: If you clone the codes from GitHub, please make sure that you have finished steps 1 to 3 in section Build from GitHub. If you download the source codes from the official website of SkyWalking, please make sure that you have followed the steps in section Build from Apache source code release.\n Import the project as a maven project. Run ./mvnw compile -Dmaven.test.skip=true to compile project and generate source codes. The reason is that we use gRPC and protobuf. Set Generated Source Codes folders.  grpc-java and java folders in apm-protocol/apm-network/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-core/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-receiver-plugin/receiver-proto/target/generated-sources/fbs grpc-java and java folders in oap-server/server-receiver-plugin/receiver-proto/target/generated-sources/protobuf grpc-java and java folders in oap-server/exporter/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-configuration/grpc-configuration-sync/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-alarm-plugin/target/generated-sources/protobuf antlr4 folder in oap-server/oal-grammar/target/generated-sources    ","excerpt":"How to build a project This document will help you compile and build a project in your maven and set …","ref":"/docs/main/v9.6.0/en/guides/how-to-build/","title":"How to build a project"},{"body":"How to build a project This document will help you compile and build a project in your maven and set your IDE.\nBuilding the Project Since we are using Git submodule, we do not recommend using the GitHub tag or release page to download source codes for compiling.\nMaven behind the Proxy If you need to execute build behind the proxy, edit the .mvn/jvm.config and set the follow properties:\n-Dhttp.proxyHost=proxy_ip -Dhttp.proxyPort=proxy_port -Dhttps.proxyHost=proxy_ip -Dhttps.proxyPort=proxy_port -Dhttp.proxyUser=username -Dhttp.proxyPassword=password Building from GitHub   Prepare git, JDK 11 or 17 (LTS versions), and Maven 3.6+.\n  Clone the project.\nIf you want to build a release from source codes, set a tag name by using git clone -b [tag_name] ... while cloning.\ngit clone --recurse-submodules https://github.com/apache/skywalking.git cd skywalking/ OR git clone https://github.com/apache/skywalking.git cd skywalking/ git submodule init git submodule update   Run ./mvnw clean package -Dmaven.test.skip\n  All packages are in /dist (.tar.gz for Linux and .zip for Windows).\n  Building from Apache source code release  What is the Apache source code release?  For each official Apache release, there is a complete and independent source code tar, which includes all source codes. You could download it from SkyWalking Apache download page. There is no requirement related to git when compiling this. Just follow these steps.\n Prepare JDK11+ and Maven 3.6+. Run ./mvnw clean package -Dmaven.test.skip. All packages are in /dist.(.tar.gz for Linux and .zip for Windows).  Advanced compiling SkyWalking is a complex maven project that has many modules. Therefore, the time to compile may be a bit longer than usual. If you just want to recompile part of the project, you have the following options:\n Compile backend and package   ./mvnw package -Pbackend,dist\n or\n make build.backend\n If you intend to compile a single plugin, such as one in the dev stage, you could\n cd plugin_module_dir \u0026amp; mvn clean package\n  Compile UI and package   ./mvnw package -Pui,dist\n or\n make build.ui\n Building docker images You can build docker images of backend and ui with Makefile located in root folder.\nRefer to Build docker image for more details.\nSetting up your IntelliJ IDEA NOTE: If you clone the codes from GitHub, please make sure that you have finished steps 1 to 3 in section Build from GitHub. If you download the source codes from the official website of SkyWalking, please make sure that you have followed the steps in section Build from Apache source code release.\n Import the project as a maven project. Run ./mvnw compile -Dmaven.test.skip=true to compile project and generate source codes. The reason is that we use gRPC and protobuf. Set Generated Source Codes folders.  grpc-java and java folders in apm-protocol/apm-network/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-core/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-receiver-plugin/receiver-proto/target/generated-sources/fbs grpc-java and java folders in oap-server/server-receiver-plugin/receiver-proto/target/generated-sources/protobuf grpc-java and java folders in oap-server/exporter/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-configuration/grpc-configuration-sync/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-alarm-plugin/target/generated-sources/protobuf antlr4 folder in oap-server/oal-grammar/target/generated-sources    ","excerpt":"How to build a project This document will help you compile and build a project in your maven and set …","ref":"/docs/main/v9.7.0/en/guides/how-to-build/","title":"How to build a project"},{"body":"How to build from sources? Download the source tar from the official website, and run the following commands to build from source\nMake sure you have Python 3.7+ and the python3 command available\n$ tar -zxf skywalking-python-src-\u0026lt;version\u0026gt;.tgz $ cd skywalking-python-src-\u0026lt;version\u0026gt; $ make install If you want to build from the latest source codes from GitHub for some reasons, for example, you want to try the latest features that are not released yet, please clone the source codes from GitHub and make install it:\n$ git clone https://github.com/apache/skywalking-python $ cd skywalking-python $ git submodule update --init $ make install NOTE that only releases from the website are official Apache releases.\n","excerpt":"How to build from sources? Download the source tar from the official website, and run the following …","ref":"/docs/skywalking-python/latest/en/setup/faq/how-to-build-from-sources/","title":"How to build from sources?"},{"body":"How to build from sources? Download the source tar from the official website, and run the following commands to build from source\nMake sure you have Python 3.7+ and the python3 command available\n$ tar -zxf skywalking-python-src-\u0026lt;version\u0026gt;.tgz $ cd skywalking-python-src-\u0026lt;version\u0026gt; $ make install If you want to build from the latest source codes from GitHub for some reasons, for example, you want to try the latest features that are not released yet, please clone the source codes from GitHub and make install it:\n$ git clone https://github.com/apache/skywalking-python $ cd skywalking-python $ git submodule update --init $ make install NOTE that only releases from the website are official Apache releases.\n","excerpt":"How to build from sources? Download the source tar from the official website, and run the following …","ref":"/docs/skywalking-python/next/en/setup/faq/how-to-build-from-sources/","title":"How to build from sources?"},{"body":"How to build from sources? Download the source tar from the official website, and run the following commands to build from source\nMake sure you have Python 3.7+ and the python3 command available\n$ tar -zxf skywalking-python-src-\u0026lt;version\u0026gt;.tgz $ cd skywalking-python-src-\u0026lt;version\u0026gt; $ make install If you want to build from the latest source codes from GitHub for some reasons, for example, you want to try the latest features that are not released yet, please clone the source codes from GitHub and make install it:\n$ git clone https://github.com/apache/skywalking-python $ cd skywalking-python $ git submodule update --init $ make install NOTE that only releases from the website are official Apache releases.\n","excerpt":"How to build from sources? Download the source tar from the official website, and run the following …","ref":"/docs/skywalking-python/v1.0.1/en/setup/faq/how-to-build-from-sources/","title":"How to build from sources?"},{"body":"How to bump up Zipkin Lens dependency Because SkyWalking embeds Zipkin Lens UI as a part of the SkyWalking UI, and Zipkin Lens UI contains a lot of other front-end dependencies that we also distribute in SkyWalking binary tars, so we have to take care of the dependencies' licenses when we bump up the Zipkin Lens dependency.\nMake sure to do the following steps when you bump up the Zipkin Lens dependency:\n Clone the Zipkin project into a directory.  ZIPKIN_VERSION=\u0026lt;the Zipkin version you want to bump to\u0026gt; git clone https://github.com/openzipkin/zipkin \u0026amp;\u0026amp; cd zipkin git checkout $ZIPKIN_VERSION cd zipkin-lens  Create .licenserc.yaml with the following content.  cat \u0026gt; .licenserc.yaml \u0026lt;\u0026lt; EOF header: license: spdx-id: Apache-2.0 copyright-owner: Apache Software Foundation dependency: files: - package.json licenses: - name: cli-table version: 0.3.1 license: MIT - name: domutils version: 1.5.1 license: BSD-2-Clause - name: rework version: 1.0.1 license: MIT EOF  Create license template LICENSE.tpl with the following content.  {{ range .Groups }} ======================================================================== {{ .LicenseID }} licenses ======================================================================== The following components are provided under the {{ .LicenseID }} License. See project link for details. {{- if eq .LicenseID \u0026quot;Apache-2.0\u0026quot; }} The text of each license is the standard Apache 2.0 license. {{- else }} The text of each license is also included in licenses/LICENSE-[project].txt. {{ end }} {{- range .Deps }} https://npmjs.com/package/{{ .Name }}/v/{{ .Version }} {{ .Version }} {{ .LicenseID }} {{- end }} {{ end }}  Make sure you\u0026rsquo;re using the supported NodeJS version and NPM version.  node -v # should be v14.x.x npm -v # should be 6.x.x  Run the following command to generate the license file.  license-eye dependency resolve --summary LICENSE.tpl  Copy the generated file LICENSE to replace the zipkin-LICENSE in SkyWalking repo.  Note: if there are dependencies that license-eye failed to identify the license, you should manually identify the license and add it to the step above in .licenserc.yaml.\n","excerpt":"How to bump up Zipkin Lens dependency Because SkyWalking embeds Zipkin Lens UI as a part of the …","ref":"/docs/main/latest/en/guides/how-to-bump-up-zipkin/","title":"How to bump up Zipkin Lens dependency"},{"body":"How to bump up Zipkin Lens dependency Because SkyWalking embeds Zipkin Lens UI as a part of the SkyWalking UI, and Zipkin Lens UI contains a lot of other front-end dependencies that we also distribute in SkyWalking binary tars, so we have to take care of the dependencies' licenses when we bump up the Zipkin Lens dependency.\nMake sure to do the following steps when you bump up the Zipkin Lens dependency:\n Clone the Zipkin project into a directory.  ZIPKIN_VERSION=\u0026lt;the Zipkin version you want to bump to\u0026gt; git clone https://github.com/openzipkin/zipkin \u0026amp;\u0026amp; cd zipkin git checkout $ZIPKIN_VERSION cd zipkin-lens  Create .licenserc.yaml with the following content.  cat \u0026gt; .licenserc.yaml \u0026lt;\u0026lt; EOF header: license: spdx-id: Apache-2.0 copyright-owner: Apache Software Foundation dependency: files: - package.json licenses: - name: cli-table version: 0.3.1 license: MIT - name: domutils version: 1.5.1 license: BSD-2-Clause - name: rework version: 1.0.1 license: MIT EOF  Create license template LICENSE.tpl with the following content.  {{ range .Groups }} ======================================================================== {{ .LicenseID }} licenses ======================================================================== The following components are provided under the {{ .LicenseID }} License. See project link for details. {{- if eq .LicenseID \u0026quot;Apache-2.0\u0026quot; }} The text of each license is the standard Apache 2.0 license. {{- else }} The text of each license is also included in licenses/LICENSE-[project].txt. {{ end }} {{- range .Deps }} https://npmjs.com/package/{{ .Name }}/v/{{ .Version }} {{ .Version }} {{ .LicenseID }} {{- end }} {{ end }}  Make sure you\u0026rsquo;re using the supported NodeJS version and NPM version.  node -v # should be v14.x.x npm -v # should be 6.x.x  Run the following command to generate the license file.  license-eye dependency resolve --summary LICENSE.tpl  Copy the generated file LICENSE to replace the zipkin-LICENSE in SkyWalking repo.  Note: if there are dependencies that license-eye failed to identify the license, you should manually identify the license and add it to the step above in .licenserc.yaml.\n","excerpt":"How to bump up Zipkin Lens dependency Because SkyWalking embeds Zipkin Lens UI as a part of the …","ref":"/docs/main/next/en/guides/how-to-bump-up-zipkin/","title":"How to bump up Zipkin Lens dependency"},{"body":"How to bump up Zipkin Lens dependency Because SkyWalking embeds Zipkin Lens UI as a part of the SkyWalking UI, and Zipkin Lens UI contains a lot of other front-end dependencies that we also distribute in SkyWalking binary tars, so we have to take care of the dependencies' licenses when we bump up the Zipkin Lens dependency.\nMake sure to do the following steps when you bump up the Zipkin Lens dependency:\n Clone the Zipkin project into a directory.  ZIPKIN_VERSION=\u0026lt;the Zipkin version you want to bump to\u0026gt; git clone https://github.com/openzipkin/zipkin \u0026amp;\u0026amp; cd zipkin git checkout $ZIPKIN_VERSION cd zipkin-lens  Create .licenserc.yaml with the following content.  cat \u0026gt; .licenserc.yaml \u0026lt;\u0026lt; EOF header: license: spdx-id: Apache-2.0 copyright-owner: Apache Software Foundation dependency: files: - package.json licenses: - name: cli-table version: 0.3.1 license: MIT - name: domutils version: 1.5.1 license: BSD-2-Clause - name: rework version: 1.0.1 license: MIT EOF  Create license template LICENSE.tpl with the following content.  {{ range .Groups }} ======================================================================== {{ .LicenseID }} licenses ======================================================================== The following components are provided under the {{ .LicenseID }} License. See project link for details. {{- if eq .LicenseID \u0026quot;Apache-2.0\u0026quot; }} The text of each license is the standard Apache 2.0 license. {{- else }} The text of each license is also included in licenses/LICENSE-[project].txt. {{ end }} {{- range .Deps }} https://npmjs.com/package/{{ .Name }}/v/{{ .Version }} {{ .Version }} {{ .LicenseID }} {{- end }} {{ end }}  Make sure you\u0026rsquo;re using the supported NodeJS version and NPM version.  node -v # should be v14.x.x npm -v # should be 6.x.x  Run the following command to generate the license file.  license-eye dependency resolve --summary LICENSE.tpl  Copy the generated file LICENSE to replace the zipkin-LICENSE in SkyWalking repo.  Note: if there are dependencies that license-eye failed to identify the license, you should manually identify the license and add it to the step above in .licenserc.yaml.\n","excerpt":"How to bump up Zipkin Lens dependency Because SkyWalking embeds Zipkin Lens UI as a part of the …","ref":"/docs/main/v9.4.0/en/guides/how-to-bump-up-zipkin/","title":"How to bump up Zipkin Lens dependency"},{"body":"How to bump up Zipkin Lens dependency Because SkyWalking embeds Zipkin Lens UI as a part of the SkyWalking UI, and Zipkin Lens UI contains a lot of other front-end dependencies that we also distribute in SkyWalking binary tars, so we have to take care of the dependencies' licenses when we bump up the Zipkin Lens dependency.\nMake sure to do the following steps when you bump up the Zipkin Lens dependency:\n Clone the Zipkin project into a directory.  ZIPKIN_VERSION=\u0026lt;the Zipkin version you want to bump to\u0026gt; git clone https://github.com/openzipkin/zipkin \u0026amp;\u0026amp; cd zipkin git checkout $ZIPKIN_VERSION cd zipkin-lens  Create .licenserc.yaml with the following content.  cat \u0026gt; .licenserc.yaml \u0026lt;\u0026lt; EOF header: license: spdx-id: Apache-2.0 copyright-owner: Apache Software Foundation dependency: files: - package.json licenses: - name: cli-table version: 0.3.1 license: MIT - name: domutils version: 1.5.1 license: BSD-2-Clause - name: rework version: 1.0.1 license: MIT EOF  Create license template LICENSE.tpl with the following content.  {{ range .Groups }} ======================================================================== {{ .LicenseID }} licenses ======================================================================== The following components are provided under the {{ .LicenseID }} License. See project link for details. {{- if eq .LicenseID \u0026quot;Apache-2.0\u0026quot; }} The text of each license is the standard Apache 2.0 license. {{- else }} The text of each license is also included in licenses/LICENSE-[project].txt. {{ end }} {{- range .Deps }} https://npmjs.com/package/{{ .Name }}/v/{{ .Version }} {{ .Version }} {{ .LicenseID }} {{- end }} {{ end }}  Make sure you\u0026rsquo;re using the supported NodeJS version and NPM version.  node -v # should be v14.x.x npm -v # should be 6.x.x  Run the following command to generate the license file.  license-eye dependency resolve --summary LICENSE.tpl  Copy the generated file LICENSE to replace the zipkin-LICENSE in SkyWalking repo.  Note: if there are dependencies that license-eye failed to identify the license, you should manually identify the license and add it to the step above in .licenserc.yaml.\n","excerpt":"How to bump up Zipkin Lens dependency Because SkyWalking embeds Zipkin Lens UI as a part of the …","ref":"/docs/main/v9.5.0/en/guides/how-to-bump-up-zipkin/","title":"How to bump up Zipkin Lens dependency"},{"body":"How to bump up Zipkin Lens dependency Because SkyWalking embeds Zipkin Lens UI as a part of the SkyWalking UI, and Zipkin Lens UI contains a lot of other front-end dependencies that we also distribute in SkyWalking binary tars, so we have to take care of the dependencies' licenses when we bump up the Zipkin Lens dependency.\nMake sure to do the following steps when you bump up the Zipkin Lens dependency:\n Clone the Zipkin project into a directory.  ZIPKIN_VERSION=\u0026lt;the Zipkin version you want to bump to\u0026gt; git clone https://github.com/openzipkin/zipkin \u0026amp;\u0026amp; cd zipkin git checkout $ZIPKIN_VERSION cd zipkin-lens  Create .licenserc.yaml with the following content.  cat \u0026gt; .licenserc.yaml \u0026lt;\u0026lt; EOF header: license: spdx-id: Apache-2.0 copyright-owner: Apache Software Foundation dependency: files: - package.json licenses: - name: cli-table version: 0.3.1 license: MIT - name: domutils version: 1.5.1 license: BSD-2-Clause - name: rework version: 1.0.1 license: MIT EOF  Create license template LICENSE.tpl with the following content.  {{ range .Groups }} ======================================================================== {{ .LicenseID }} licenses ======================================================================== The following components are provided under the {{ .LicenseID }} License. See project link for details. {{- if eq .LicenseID \u0026quot;Apache-2.0\u0026quot; }} The text of each license is the standard Apache 2.0 license. {{- else }} The text of each license is also included in licenses/LICENSE-[project].txt. {{ end }} {{- range .Deps }} https://npmjs.com/package/{{ .Name }}/v/{{ .Version }} {{ .Version }} {{ .LicenseID }} {{- end }} {{ end }}  Make sure you\u0026rsquo;re using the supported NodeJS version and NPM version.  node -v # should be v14.x.x npm -v # should be 6.x.x  Run the following command to generate the license file.  license-eye dependency resolve --summary LICENSE.tpl  Copy the generated file LICENSE to replace the zipkin-LICENSE in SkyWalking repo.  Note: if there are dependencies that license-eye failed to identify the license, you should manually identify the license and add it to the step above in .licenserc.yaml.\n","excerpt":"How to bump up Zipkin Lens dependency Because SkyWalking embeds Zipkin Lens UI as a part of the …","ref":"/docs/main/v9.6.0/en/guides/how-to-bump-up-zipkin/","title":"How to bump up Zipkin Lens dependency"},{"body":"How to bump up Zipkin Lens dependency Because SkyWalking embeds Zipkin Lens UI as a part of the SkyWalking UI, and Zipkin Lens UI contains a lot of other front-end dependencies that we also distribute in SkyWalking binary tars, so we have to take care of the dependencies' licenses when we bump up the Zipkin Lens dependency.\nMake sure to do the following steps when you bump up the Zipkin Lens dependency:\n Clone the Zipkin project into a directory.  ZIPKIN_VERSION=\u0026lt;the Zipkin version you want to bump to\u0026gt; git clone https://github.com/openzipkin/zipkin \u0026amp;\u0026amp; cd zipkin git checkout $ZIPKIN_VERSION cd zipkin-lens  Create .licenserc.yaml with the following content.  cat \u0026gt; .licenserc.yaml \u0026lt;\u0026lt; EOF header: license: spdx-id: Apache-2.0 copyright-owner: Apache Software Foundation dependency: files: - package.json licenses: - name: cli-table version: 0.3.1 license: MIT - name: domutils version: 1.5.1 license: BSD-2-Clause - name: rework version: 1.0.1 license: MIT EOF  Create license template LICENSE.tpl with the following content.  {{ range .Groups }} ======================================================================== {{ .LicenseID }} licenses ======================================================================== The following components are provided under the {{ .LicenseID }} License. See project link for details. {{- if eq .LicenseID \u0026quot;Apache-2.0\u0026quot; }} The text of each license is the standard Apache 2.0 license. {{- else }} The text of each license is also included in licenses/LICENSE-[project].txt. {{ end }} {{- range .Deps }} https://npmjs.com/package/{{ .Name }}/v/{{ .Version }} {{ .Version }} {{ .LicenseID }} {{- end }} {{ end }}  Make sure you\u0026rsquo;re using the supported NodeJS version and NPM version.  node -v # should be v14.x.x npm -v # should be 6.x.x  Run the following command to generate the license file.  license-eye dependency resolve --summary LICENSE.tpl  Copy the generated file LICENSE to replace the zipkin-LICENSE in SkyWalking repo.  Note: if there are dependencies that license-eye failed to identify the license, you should manually identify the license and add it to the step above in .licenserc.yaml.\n","excerpt":"How to bump up Zipkin Lens dependency Because SkyWalking embeds Zipkin Lens UI as a part of the …","ref":"/docs/main/v9.7.0/en/guides/how-to-bump-up-zipkin/","title":"How to bump up Zipkin Lens dependency"},{"body":"How to disable some plugins? You can find the plugin name in the list and disable one or more plugins by following methods.\nfrom skywalking import config config.agent_disable_plugins = [\u0026#39;sw_http_server\u0026#39;, \u0026#39;sw_urllib_request\u0026#39;] # can be also CSV format, i.e. \u0026#39;sw_http_server,sw_urllib_request\u0026#39; You can also disable the plugins via environment variables SW_AGENT_DISABLE_PLUGINS, please check the Environment Variables List for an explanation.\n","excerpt":"How to disable some plugins? You can find the plugin name in the list and disable one or more …","ref":"/docs/skywalking-python/latest/en/setup/faq/how-to-disable-plugin/","title":"How to disable some plugins?"},{"body":"How to disable some plugins? You can find the plugin name in the list and disable one or more plugins by following methods.\nfrom skywalking import config config.agent_disable_plugins = [\u0026#39;sw_http_server\u0026#39;, \u0026#39;sw_urllib_request\u0026#39;] # can be also CSV format, i.e. \u0026#39;sw_http_server,sw_urllib_request\u0026#39; You can also disable the plugins via environment variables SW_AGENT_DISABLE_PLUGINS, please check the Environment Variables List for an explanation.\n","excerpt":"How to disable some plugins? You can find the plugin name in the list and disable one or more …","ref":"/docs/skywalking-python/next/en/setup/faq/how-to-disable-plugin/","title":"How to disable some plugins?"},{"body":"How to disable some plugins? You can find the plugin name in the list and disable one or more plugins by following methods.\nfrom skywalking import config config.agent_disable_plugins = [\u0026#39;sw_http_server\u0026#39;, \u0026#39;sw_urllib_request\u0026#39;] # can be also CSV format, i.e. \u0026#39;sw_http_server,sw_urllib_request\u0026#39; You can also disable the plugins via environment variables SW_AGENT_DISABLE_PLUGINS, please check the Environment Variables List for an explanation.\n","excerpt":"How to disable some plugins? You can find the plugin name in the list and disable one or more …","ref":"/docs/skywalking-python/v1.0.1/en/setup/faq/how-to-disable-plugin/","title":"How to disable some plugins?"},{"body":"How to make SkyWalking agent works in OSGI environment? OSGI implements its own set of modularity, which means that each Bundle has its own unique class loader for isolating different versions of classes. By default, OSGI runtime uses the boot classloader for the bundle codes, which makes the java.lang.NoClassDefFoundError exception in the booting stage.\njava.lang.NoClassDefFoundError: org/apache/skywalking/apm/agent/core/plugin/interceptor/enhance/EnhancedInstance at ch.qos.logback.classic.Logger.buildLoggingEventAndAppend(Logger.java:419) at ch.qos.logback.classic.Logger.filterAndLog_0_Or3Plus(Logger.java:383) at ch.qos.logback.classic.Logger.log(Logger.java:765) at org.apache.commons.logging.impl.SLF4JLocationAwareLog.error(SLF4JLocationAwareLog.java:216) at org.springframework.boot.SpringApplication.reportFailure(SpringApplication.java:771) at org.springframework.boot.SpringApplication.handleRunFailure(SpringApplication.java:748) at org.springframework.boot.SpringApplication.run(SpringApplication.java:314) at org.springframework.boot.SpringApplication.run(SpringApplication.java:1118) at org.springframework.boot.SpringApplication.run(SpringApplication.java:1107) at by.kolodyuk.osgi.springboot.SpringBootBundleActivator.start(SpringBootBundleActivator.java:21) at org.apache.felix.framework.util.SecureAction.startActivator(SecureAction.java:849) at org.apache.felix.framework.Felix.activateBundle(Felix.java:2429) at org.apache.felix.framework.Felix.startBundle(Felix.java:2335) at org.apache.felix.framework.Felix.setActiveStartLevel(Felix.java:1566) at org.apache.felix.framework.FrameworkStartLevelImpl.run(FrameworkStartLevelImpl.java:297) at java.base/java.lang.Thread.run(Thread.java:829) How to resolve this issue?  we need to set the parent classloader in OSGI to AppClassLoader, through the specific parameter org.osgi.framework.bundle.parent=app. The list of parameters can be found in the OSGI API Load the SkyWalking related classes to the bundle parent class loader, AppClassLoader, with the parameter org.osgi.framework.bootdelegation=org.apache.skywalking.apm.* or org.osgi.framework.bootdelegation=*. This step is optional. Some OSGi implementations (i.e. Equinox) enable them by default  ","excerpt":"How to make SkyWalking agent works in OSGI environment? OSGI implements its own set of modularity, …","ref":"/docs/skywalking-java/latest/en/faq/osgi/","title":"How to make SkyWalking agent works in `OSGI` environment?"},{"body":"How to make SkyWalking agent works in OSGI environment? OSGI implements its own set of modularity, which means that each Bundle has its own unique class loader for isolating different versions of classes. By default, OSGI runtime uses the boot classloader for the bundle codes, which makes the java.lang.NoClassDefFoundError exception in the booting stage.\njava.lang.NoClassDefFoundError: org/apache/skywalking/apm/agent/core/plugin/interceptor/enhance/EnhancedInstance at ch.qos.logback.classic.Logger.buildLoggingEventAndAppend(Logger.java:419) at ch.qos.logback.classic.Logger.filterAndLog_0_Or3Plus(Logger.java:383) at ch.qos.logback.classic.Logger.log(Logger.java:765) at org.apache.commons.logging.impl.SLF4JLocationAwareLog.error(SLF4JLocationAwareLog.java:216) at org.springframework.boot.SpringApplication.reportFailure(SpringApplication.java:771) at org.springframework.boot.SpringApplication.handleRunFailure(SpringApplication.java:748) at org.springframework.boot.SpringApplication.run(SpringApplication.java:314) at org.springframework.boot.SpringApplication.run(SpringApplication.java:1118) at org.springframework.boot.SpringApplication.run(SpringApplication.java:1107) at by.kolodyuk.osgi.springboot.SpringBootBundleActivator.start(SpringBootBundleActivator.java:21) at org.apache.felix.framework.util.SecureAction.startActivator(SecureAction.java:849) at org.apache.felix.framework.Felix.activateBundle(Felix.java:2429) at org.apache.felix.framework.Felix.startBundle(Felix.java:2335) at org.apache.felix.framework.Felix.setActiveStartLevel(Felix.java:1566) at org.apache.felix.framework.FrameworkStartLevelImpl.run(FrameworkStartLevelImpl.java:297) at java.base/java.lang.Thread.run(Thread.java:829) How to resolve this issue?  we need to set the parent classloader in OSGI to AppClassLoader, through the specific parameter org.osgi.framework.bundle.parent=app. The list of parameters can be found in the OSGI API Load the SkyWalking related classes to the bundle parent class loader, AppClassLoader, with the parameter org.osgi.framework.bootdelegation=org.apache.skywalking.apm.* or org.osgi.framework.bootdelegation=*. This step is optional. Some OSGi implementations (i.e. Equinox) enable them by default  ","excerpt":"How to make SkyWalking agent works in OSGI environment? OSGI implements its own set of modularity, …","ref":"/docs/skywalking-java/next/en/faq/osgi/","title":"How to make SkyWalking agent works in `OSGI` environment?"},{"body":"How to make SkyWalking agent works in OSGI environment? OSGI implements its own set of modularity, which means that each Bundle has its own unique class loader for isolating different versions of classes. By default, OSGI runtime uses the boot classloader for the bundle codes, which makes the java.lang.NoClassDefFoundError exception in the booting stage.\njava.lang.NoClassDefFoundError: org/apache/skywalking/apm/agent/core/plugin/interceptor/enhance/EnhancedInstance at ch.qos.logback.classic.Logger.buildLoggingEventAndAppend(Logger.java:419) at ch.qos.logback.classic.Logger.filterAndLog_0_Or3Plus(Logger.java:383) at ch.qos.logback.classic.Logger.log(Logger.java:765) at org.apache.commons.logging.impl.SLF4JLocationAwareLog.error(SLF4JLocationAwareLog.java:216) at org.springframework.boot.SpringApplication.reportFailure(SpringApplication.java:771) at org.springframework.boot.SpringApplication.handleRunFailure(SpringApplication.java:748) at org.springframework.boot.SpringApplication.run(SpringApplication.java:314) at org.springframework.boot.SpringApplication.run(SpringApplication.java:1118) at org.springframework.boot.SpringApplication.run(SpringApplication.java:1107) at by.kolodyuk.osgi.springboot.SpringBootBundleActivator.start(SpringBootBundleActivator.java:21) at org.apache.felix.framework.util.SecureAction.startActivator(SecureAction.java:849) at org.apache.felix.framework.Felix.activateBundle(Felix.java:2429) at org.apache.felix.framework.Felix.startBundle(Felix.java:2335) at org.apache.felix.framework.Felix.setActiveStartLevel(Felix.java:1566) at org.apache.felix.framework.FrameworkStartLevelImpl.run(FrameworkStartLevelImpl.java:297) at java.base/java.lang.Thread.run(Thread.java:829) How to resolve this issue?  we need to set the parent classloader in OSGI to AppClassLoader, through the specific parameter org.osgi.framework.bundle.parent=app. The list of parameters can be found in the OSGI API Load the SkyWalking related classes to the bundle parent class loader, AppClassLoader, with the parameter org.osgi.framework.bootdelegation=org.apache.skywalking.apm.* or org.osgi.framework.bootdelegation=*. This step is optional. Some OSGi implementations (i.e. Equinox) enable them by default  ","excerpt":"How to make SkyWalking agent works in OSGI environment? OSGI implements its own set of modularity, …","ref":"/docs/skywalking-java/v9.0.0/en/faq/osgi/","title":"How to make SkyWalking agent works in `OSGI` environment?"},{"body":"How to make SkyWalking agent works in OSGI environment? OSGI implements its own set of modularity, which means that each Bundle has its own unique class loader for isolating different versions of classes. By default, OSGI runtime uses the boot classloader for the bundle codes, which makes the java.lang.NoClassDefFoundError exception in the booting stage.\njava.lang.NoClassDefFoundError: org/apache/skywalking/apm/agent/core/plugin/interceptor/enhance/EnhancedInstance at ch.qos.logback.classic.Logger.buildLoggingEventAndAppend(Logger.java:419) at ch.qos.logback.classic.Logger.filterAndLog_0_Or3Plus(Logger.java:383) at ch.qos.logback.classic.Logger.log(Logger.java:765) at org.apache.commons.logging.impl.SLF4JLocationAwareLog.error(SLF4JLocationAwareLog.java:216) at org.springframework.boot.SpringApplication.reportFailure(SpringApplication.java:771) at org.springframework.boot.SpringApplication.handleRunFailure(SpringApplication.java:748) at org.springframework.boot.SpringApplication.run(SpringApplication.java:314) at org.springframework.boot.SpringApplication.run(SpringApplication.java:1118) at org.springframework.boot.SpringApplication.run(SpringApplication.java:1107) at by.kolodyuk.osgi.springboot.SpringBootBundleActivator.start(SpringBootBundleActivator.java:21) at org.apache.felix.framework.util.SecureAction.startActivator(SecureAction.java:849) at org.apache.felix.framework.Felix.activateBundle(Felix.java:2429) at org.apache.felix.framework.Felix.startBundle(Felix.java:2335) at org.apache.felix.framework.Felix.setActiveStartLevel(Felix.java:1566) at org.apache.felix.framework.FrameworkStartLevelImpl.run(FrameworkStartLevelImpl.java:297) at java.base/java.lang.Thread.run(Thread.java:829) How to resolve this issue?  we need to set the parent classloader in OSGI to AppClassLoader, through the specific parameter org.osgi.framework.bundle.parent=app. The list of parameters can be found in the OSGI API Load the SkyWalking related classes to the bundle parent class loader, AppClassLoader, with the parameter org.osgi.framework.bootdelegation=org.apache.skywalking.apm.* or org.osgi.framework.bootdelegation=*. This step is optional. Some OSGi implementations (i.e. Equinox) enable them by default  ","excerpt":"How to make SkyWalking agent works in OSGI environment? OSGI implements its own set of modularity, …","ref":"/docs/skywalking-java/v9.1.0/en/faq/osgi/","title":"How to make SkyWalking agent works in `OSGI` environment?"},{"body":"How to make SkyWalking agent works in OSGI environment? OSGI implements its own set of modularity, which means that each Bundle has its own unique class loader for isolating different versions of classes. By default, OSGI runtime uses the boot classloader for the bundle codes, which makes the java.lang.NoClassDefFoundError exception in the booting stage.\njava.lang.NoClassDefFoundError: org/apache/skywalking/apm/agent/core/plugin/interceptor/enhance/EnhancedInstance at ch.qos.logback.classic.Logger.buildLoggingEventAndAppend(Logger.java:419) at ch.qos.logback.classic.Logger.filterAndLog_0_Or3Plus(Logger.java:383) at ch.qos.logback.classic.Logger.log(Logger.java:765) at org.apache.commons.logging.impl.SLF4JLocationAwareLog.error(SLF4JLocationAwareLog.java:216) at org.springframework.boot.SpringApplication.reportFailure(SpringApplication.java:771) at org.springframework.boot.SpringApplication.handleRunFailure(SpringApplication.java:748) at org.springframework.boot.SpringApplication.run(SpringApplication.java:314) at org.springframework.boot.SpringApplication.run(SpringApplication.java:1118) at org.springframework.boot.SpringApplication.run(SpringApplication.java:1107) at by.kolodyuk.osgi.springboot.SpringBootBundleActivator.start(SpringBootBundleActivator.java:21) at org.apache.felix.framework.util.SecureAction.startActivator(SecureAction.java:849) at org.apache.felix.framework.Felix.activateBundle(Felix.java:2429) at org.apache.felix.framework.Felix.startBundle(Felix.java:2335) at org.apache.felix.framework.Felix.setActiveStartLevel(Felix.java:1566) at org.apache.felix.framework.FrameworkStartLevelImpl.run(FrameworkStartLevelImpl.java:297) at java.base/java.lang.Thread.run(Thread.java:829) How to resolve this issue?  we need to set the parent classloader in OSGI to AppClassLoader, through the specific parameter org.osgi.framework.bundle.parent=app. The list of parameters can be found in the OSGI API Load the SkyWalking related classes to the bundle parent class loader, AppClassLoader, with the parameter org.osgi.framework.bootdelegation=org.apache.skywalking.apm.* or org.osgi.framework.bootdelegation=*. This step is optional. Some OSGi implementations (i.e. Equinox) enable them by default  ","excerpt":"How to make SkyWalking agent works in OSGI environment? OSGI implements its own set of modularity, …","ref":"/docs/skywalking-java/v9.2.0/en/faq/osgi/","title":"How to make SkyWalking agent works in `OSGI` environment?"},{"body":"How to test locally? This guide assumes you just cloned the repo and are ready to make some changes.\nAfter cloning the repo, make sure you also have cloned the submodule for protocol. Otherwise, run the command below.\ngit submodule update --init Please first refer to the Developer Guide to set up a development environment.\nTL;DR: run make env. This will create virtual environments for python and generate the protocol folder needed for the agent.\nNote: Make sure you have python3 aliased to python available on Windows computers instead of pointing to the Microsoft app store.\nBy now, you can do what you want. Let\u0026rsquo;s get to the topic of how to test.\nThe test process requires docker and docker-compose throughout. If you haven\u0026rsquo;t installed them, please install them first.\nThen run make test, which will generate a list of plugin versions based on the support_matrix variable in each Plugin and orchestrate the tests automatically. Remember to inspect the outcomes carefully to debug your plugin.\nAlternatively, you can run full tests via our GitHub action workflow on your own GitHub fork, it is usually easier since local environment can be tricky to setup for new contributors.\nTo do so, you need to fork this repo on GitHub and enable GitHub actions on your forked repo. Then, you can simply push your changes and open a Pull Request to the fork\u0026rsquo;s master branch.\nNote: GitHub automatically targets Pull Requests to the upstream repo, be careful when you open them to avoid accidental PRs to upstream.\n","excerpt":"How to test locally? This guide assumes you just cloned the repo and are ready to make some changes. …","ref":"/docs/skywalking-python/latest/en/contribution/how-to-test-locally/","title":"How to test locally?"},{"body":"How to test locally? This guide assumes you just cloned the repo and are ready to make some changes.\nAfter cloning the repo, make sure you also have cloned the submodule for protocol. Otherwise, run the command below.\ngit submodule update --init Please first refer to the Developer Guide to set up a development environment.\nTL;DR: run make env. This will create virtual environments for python and generate the protocol folder needed for the agent.\nNote: Make sure you have python3 aliased to python available on Windows computers instead of pointing to the Microsoft app store.\nBy now, you can do what you want. Let\u0026rsquo;s get to the topic of how to test.\nThe test process requires docker and docker-compose throughout. If you haven\u0026rsquo;t installed them, please install them first.\nThen run make test, which will generate a list of plugin versions based on the support_matrix variable in each Plugin and orchestrate the tests automatically. Remember to inspect the outcomes carefully to debug your plugin.\nAlternatively, you can run full tests via our GitHub action workflow on your own GitHub fork, it is usually easier since local environment can be tricky to setup for new contributors.\nTo do so, you need to fork this repo on GitHub and enable GitHub actions on your forked repo. Then, you can simply push your changes and open a Pull Request to the fork\u0026rsquo;s master branch.\nNote: GitHub automatically targets Pull Requests to the upstream repo, be careful when you open them to avoid accidental PRs to upstream.\n","excerpt":"How to test locally? This guide assumes you just cloned the repo and are ready to make some changes. …","ref":"/docs/skywalking-python/next/en/contribution/how-to-test-locally/","title":"How to test locally?"},{"body":"How to test locally? This guide assumes you just cloned the repo and are ready to make some changes.\nAfter cloning the repo, make sure you also have cloned the submodule for protocol. Otherwise, run the command below.\ngit submodule update --init Please first refer to the Developer Guide to set up a development environment.\nTL;DR: run make env. This will create virtual environments for python and generate the protocol folder needed for the agent.\nNote: Make sure you have python3 aliased to python available on Windows computers instead of pointing to the Microsoft app store.\nBy now, you can do what you want. Let\u0026rsquo;s get to the topic of how to test.\nThe test process requires docker and docker-compose throughout. If you haven\u0026rsquo;t installed them, please install them first.\nThen run make test, which will generate a list of plugin versions based on the support_matrix variable in each Plugin and orchestrate the tests automatically. Remember to inspect the outcomes carefully to debug your plugin.\nAlternatively, you can run full tests via our GitHub action workflow on your own GitHub fork, it is usually easier since local environment can be tricky to setup for new contributors.\nTo do so, you need to fork this repo on GitHub and enable GitHub actions on your forked repo. Then, you can simply push your changes and open a Pull Request to the fork\u0026rsquo;s master branch.\nNote: GitHub automatically targets Pull Requests to the upstream repo, be careful when you open them to avoid accidental PRs to upstream.\n","excerpt":"How to test locally? This guide assumes you just cloned the repo and are ready to make some changes. …","ref":"/docs/skywalking-python/v1.0.1/en/contribution/how-to-test-locally/","title":"How to test locally?"},{"body":"How to tolerate custom exceptions In some codes, the exception is being used as a way of controlling business flow. Skywalking provides 2 ways to tolerate an exception which is traced in a span.\n Set the names of exception classes in the agent config Use our annotation in the codes.  Set the names of exception classes in the agent config The property named \u0026ldquo;statuscheck.ignored_exceptions\u0026rdquo; is used to set up class names in the agent configuration file. if the exception listed here are detected in the agent, the agent core would flag the related span as the error status.\nDemo   A custom exception.\n TestNamedMatchException  package org.apache.skywalking.apm.agent.core.context.status; public class TestNamedMatchException extends RuntimeException { public TestNamedMatchException() { } public TestNamedMatchException(final String message) { super(message); } ... }  TestHierarchyMatchException  package org.apache.skywalking.apm.agent.core.context.status; public class TestHierarchyMatchException extends TestNamedMatchException { public TestHierarchyMatchException() { } public TestHierarchyMatchException(final String message) { super(message); } ... }   When the above exceptions traced in some spans, the status is like the following.\n   The traced exception Final span status     org.apache.skywalking.apm.agent.core.context.status.TestNamedMatchException true   org.apache.skywalking.apm.agent.core.context.status.TestHierarchyMatchException true      After set these class names through \u0026ldquo;statuscheck.ignored_exceptions\u0026rdquo;, the status of spans would be changed.\nstatuscheck.ignored_exceptions=org.apache.skywalking.apm.agent.core.context.status.TestNamedMatchException    The traced exception Final span status     org.apache.skywalking.apm.agent.core.context.status.TestNamedMatchException false   org.apache.skywalking.apm.agent.core.context.status.TestHierarchyMatchException false      Use our annotation in the codes. If an exception has the @IgnoredException annotation, the exception wouldn\u0026rsquo;t be marked as error status when tracing. Because the annotation supports inheritance, also affects the subclasses.\nDependency  Dependency the toolkit, such as using maven or gradle. Since 8.2.0.  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-trace\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Demo   A custom exception.\npackage org.apache.skywalking.apm.agent.core.context.status; public class TestAnnotatedException extends RuntimeException { public TestAnnotatedException() { } public TestAnnotatedException(final String message) { super(message); } ... }   When the above exception traced in some spans, the status is like the following.\n   The traced exception Final span status     org.apache.skywalking.apm.agent.core.context.status.TestAnnotatedException true      However, when the exception annotated with the annotation, the status would be changed.\npackage org.apache.skywalking.apm.agent.core.context.status; @IgnoredException public class TestAnnotatedException extends RuntimeException { public TestAnnotatedException() { } public TestAnnotatedException(final String message) { super(message); } ... }    The traced exception Final span status     org.apache.skywalking.apm.agent.core.context.status.TestAnnotatedException false      Recursive check Due to the wrapper nature of Java exceptions, sometimes users need recursive checking. Skywalking also supports it.\nstatuscheck.max_recursive_depth=${SW_STATUSCHECK_MAX_RECURSIVE_DEPTH:1} The following report shows the benchmark results of the exception checks with different recursive depths,\n# JMH version: 1.33 # VM version: JDK 1.8.0_292, OpenJDK 64-Bit Server VM, 25.292-b10 # VM invoker: /Library/Java/JavaVirtualMachines/adoptopenjdk-8.jdk/Contents/Home/jre/bin/java # VM options: -javaagent:/Applications/IntelliJ IDEA.app/Contents/lib/idea_rt.jar=54972:/Applications/IntelliJ IDEA.app/Contents/bin -Dfile.encoding=UTF-8 # Blackhole mode: full + dont-inline hint (default, use -Djmh.blackhole.autoDetect=true to auto-detect) # Warmup: 5 iterations, 10 s each # Measurement: 5 iterations, 10 s each # Timeout: 10 min per iteration # Threads: 1 thread, will synchronize iterations # Benchmark mode: Average time, time/op Benchmark Mode Cnt Score Error Units HierarchyMatchExceptionBenchmark.depthOneBenchmark avgt 25 31.050 ± 0.731 ns/op HierarchyMatchExceptionBenchmark.depthTwoBenchmark avgt 25 64.918 ± 2.537 ns/op HierarchyMatchExceptionBenchmark.depthThreeBenchmark avgt 25 89.645 ± 2.556 ns/op According to the reported results above, the exception check time is nearly proportional to the recursive depth being set. For each single check, it costs about ten of nanoseconds (~30 nanoseconds in the report, but may vary according to different hardware and platforms).\nTypically, we don\u0026rsquo;t recommend setting this more than 10, which could cause a performance issue. Negative value and 0 would be ignored, which means all exceptions would make the span tagged in error status.\n","excerpt":"How to tolerate custom exceptions In some codes, the exception is being used as a way of controlling …","ref":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/how-to-tolerate-exceptions/","title":"How to tolerate custom exceptions"},{"body":"How to tolerate custom exceptions In some codes, the exception is being used as a way of controlling business flow. Skywalking provides 2 ways to tolerate an exception which is traced in a span.\n Set the names of exception classes in the agent config Use our annotation in the codes.  Set the names of exception classes in the agent config The property named \u0026ldquo;statuscheck.ignored_exceptions\u0026rdquo; is used to set up class names in the agent configuration file. if the exception listed here are detected in the agent, the agent core would flag the related span as the error status.\nDemo   A custom exception.\n TestNamedMatchException  package org.apache.skywalking.apm.agent.core.context.status; public class TestNamedMatchException extends RuntimeException { public TestNamedMatchException() { } public TestNamedMatchException(final String message) { super(message); } ... }  TestHierarchyMatchException  package org.apache.skywalking.apm.agent.core.context.status; public class TestHierarchyMatchException extends TestNamedMatchException { public TestHierarchyMatchException() { } public TestHierarchyMatchException(final String message) { super(message); } ... }   When the above exceptions traced in some spans, the status is like the following.\n   The traced exception Final span status     org.apache.skywalking.apm.agent.core.context.status.TestNamedMatchException true   org.apache.skywalking.apm.agent.core.context.status.TestHierarchyMatchException true      After set these class names through \u0026ldquo;statuscheck.ignored_exceptions\u0026rdquo;, the status of spans would be changed.\nstatuscheck.ignored_exceptions=org.apache.skywalking.apm.agent.core.context.status.TestNamedMatchException    The traced exception Final span status     org.apache.skywalking.apm.agent.core.context.status.TestNamedMatchException false   org.apache.skywalking.apm.agent.core.context.status.TestHierarchyMatchException false      Use our annotation in the codes. If an exception has the @IgnoredException annotation, the exception wouldn\u0026rsquo;t be marked as error status when tracing. Because the annotation supports inheritance, also affects the subclasses.\nDependency  Dependency the toolkit, such as using maven or gradle. Since 8.2.0.  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-trace\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Demo   A custom exception.\npackage org.apache.skywalking.apm.agent.core.context.status; public class TestAnnotatedException extends RuntimeException { public TestAnnotatedException() { } public TestAnnotatedException(final String message) { super(message); } ... }   When the above exception traced in some spans, the status is like the following.\n   The traced exception Final span status     org.apache.skywalking.apm.agent.core.context.status.TestAnnotatedException true      However, when the exception annotated with the annotation, the status would be changed.\npackage org.apache.skywalking.apm.agent.core.context.status; @IgnoredException public class TestAnnotatedException extends RuntimeException { public TestAnnotatedException() { } public TestAnnotatedException(final String message) { super(message); } ... }    The traced exception Final span status     org.apache.skywalking.apm.agent.core.context.status.TestAnnotatedException false      Recursive check Due to the wrapper nature of Java exceptions, sometimes users need recursive checking. Skywalking also supports it.\nstatuscheck.max_recursive_depth=${SW_STATUSCHECK_MAX_RECURSIVE_DEPTH:1} The following report shows the benchmark results of the exception checks with different recursive depths,\n# JMH version: 1.33 # VM version: JDK 1.8.0_292, OpenJDK 64-Bit Server VM, 25.292-b10 # VM invoker: /Library/Java/JavaVirtualMachines/adoptopenjdk-8.jdk/Contents/Home/jre/bin/java # VM options: -javaagent:/Applications/IntelliJ IDEA.app/Contents/lib/idea_rt.jar=54972:/Applications/IntelliJ IDEA.app/Contents/bin -Dfile.encoding=UTF-8 # Blackhole mode: full + dont-inline hint (default, use -Djmh.blackhole.autoDetect=true to auto-detect) # Warmup: 5 iterations, 10 s each # Measurement: 5 iterations, 10 s each # Timeout: 10 min per iteration # Threads: 1 thread, will synchronize iterations # Benchmark mode: Average time, time/op Benchmark Mode Cnt Score Error Units HierarchyMatchExceptionBenchmark.depthOneBenchmark avgt 25 31.050 ± 0.731 ns/op HierarchyMatchExceptionBenchmark.depthTwoBenchmark avgt 25 64.918 ± 2.537 ns/op HierarchyMatchExceptionBenchmark.depthThreeBenchmark avgt 25 89.645 ± 2.556 ns/op According to the reported results above, the exception check time is nearly proportional to the recursive depth being set. For each single check, it costs about ten of nanoseconds (~30 nanoseconds in the report, but may vary according to different hardware and platforms).\nTypically, we don\u0026rsquo;t recommend setting this more than 10, which could cause a performance issue. Negative value and 0 would be ignored, which means all exceptions would make the span tagged in error status.\n","excerpt":"How to tolerate custom exceptions In some codes, the exception is being used as a way of controlling …","ref":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/how-to-tolerate-exceptions/","title":"How to tolerate custom exceptions"},{"body":"How to tolerate custom exceptions In some codes, the exception is being used as a way of controlling business flow. Skywalking provides 2 ways to tolerate an exception which is traced in a span.\n Set the names of exception classes in the agent config Use our annotation in the codes.  Set the names of exception classes in the agent config The property named \u0026ldquo;statuscheck.ignored_exceptions\u0026rdquo; is used to set up class names in the agent configuration file. if the exception listed here are detected in the agent, the agent core would flag the related span as the error status.\nDemo   A custom exception.\n TestNamedMatchException  package org.apache.skywalking.apm.agent.core.context.status; public class TestNamedMatchException extends RuntimeException { public TestNamedMatchException() { } public TestNamedMatchException(final String message) { super(message); } ... }  TestHierarchyMatchException  package org.apache.skywalking.apm.agent.core.context.status; public class TestHierarchyMatchException extends TestNamedMatchException { public TestHierarchyMatchException() { } public TestHierarchyMatchException(final String message) { super(message); } ... }   When the above exceptions traced in some spans, the status is like the following.\n   The traced exception Final span status     org.apache.skywalking.apm.agent.core.context.status.TestNamedMatchException true   org.apache.skywalking.apm.agent.core.context.status.TestHierarchyMatchException true      After set these class names through \u0026ldquo;statuscheck.ignored_exceptions\u0026rdquo;, the status of spans would be changed.\nstatuscheck.ignored_exceptions=org.apache.skywalking.apm.agent.core.context.status.TestNamedMatchException    The traced exception Final span status     org.apache.skywalking.apm.agent.core.context.status.TestNamedMatchException false   org.apache.skywalking.apm.agent.core.context.status.TestHierarchyMatchException false      Use our annotation in the codes. If an exception has the @IgnoredException annotation, the exception wouldn\u0026rsquo;t be marked as error status when tracing. Because the annotation supports inheritance, also affects the subclasses.\nDependency  Dependency the toolkit, such as using maven or gradle. Since 8.2.0.  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-trace\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Demo   A custom exception.\npackage org.apache.skywalking.apm.agent.core.context.status; public class TestAnnotatedException extends RuntimeException { public TestAnnotatedException() { } public TestAnnotatedException(final String message) { super(message); } ... }   When the above exception traced in some spans, the status is like the following.\n   The traced exception Final span status     org.apache.skywalking.apm.agent.core.context.status.TestAnnotatedException true      However, when the exception annotated with the annotation, the status would be changed.\npackage org.apache.skywalking.apm.agent.core.context.status; @IgnoredException public class TestAnnotatedException extends RuntimeException { public TestAnnotatedException() { } public TestAnnotatedException(final String message) { super(message); } ... }    The traced exception Final span status     org.apache.skywalking.apm.agent.core.context.status.TestAnnotatedException false      Recursive check Due to the wrapper nature of Java exceptions, sometimes users need recursive checking. Skywalking also supports it.\nstatuscheck.max_recursive_depth=${SW_STATUSCHECK_MAX_RECURSIVE_DEPTH:1} The following report shows the benchmark results of the exception checks with different recursive depths,\n# JMH version: 1.33 # VM version: JDK 1.8.0_292, OpenJDK 64-Bit Server VM, 25.292-b10 # VM invoker: /Library/Java/JavaVirtualMachines/adoptopenjdk-8.jdk/Contents/Home/jre/bin/java # VM options: -javaagent:/Applications/IntelliJ IDEA.app/Contents/lib/idea_rt.jar=54972:/Applications/IntelliJ IDEA.app/Contents/bin -Dfile.encoding=UTF-8 # Blackhole mode: full + dont-inline hint (default, use -Djmh.blackhole.autoDetect=true to auto-detect) # Warmup: 5 iterations, 10 s each # Measurement: 5 iterations, 10 s each # Timeout: 10 min per iteration # Threads: 1 thread, will synchronize iterations # Benchmark mode: Average time, time/op Benchmark Mode Cnt Score Error Units HierarchyMatchExceptionBenchmark.depthOneBenchmark avgt 25 31.050 ± 0.731 ns/op HierarchyMatchExceptionBenchmark.depthTwoBenchmark avgt 25 64.918 ± 2.537 ns/op HierarchyMatchExceptionBenchmark.depthThreeBenchmark avgt 25 89.645 ± 2.556 ns/op According to the reported results above, the exception check time is nearly proportional to the recursive depth being set. For each single check, it costs about ten of nanoseconds (~30 nanoseconds in the report, but may vary according to different hardware and platforms).\nTypically, we don\u0026rsquo;t recommend setting this more than 10, which could cause a performance issue. Negative value and 0 would be ignored, which means all exceptions would make the span tagged in error status.\n","excerpt":"How to tolerate custom exceptions In some codes, the exception is being used as a way of controlling …","ref":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/how-to-tolerate-exceptions/","title":"How to tolerate custom exceptions"},{"body":"How to tolerate custom exceptions In some codes, the exception is being used as a way of controlling business flow. Skywalking provides 2 ways to tolerate an exception which is traced in a span.\n Set the names of exception classes in the agent config Use our annotation in the codes.  Set the names of exception classes in the agent config The property named \u0026ldquo;statuscheck.ignored_exceptions\u0026rdquo; is used to set up class names in the agent configuration file. if the exception listed here are detected in the agent, the agent core would flag the related span as the error status.\nDemo   A custom exception.\n TestNamedMatchException  package org.apache.skywalking.apm.agent.core.context.status; public class TestNamedMatchException extends RuntimeException { public TestNamedMatchException() { } public TestNamedMatchException(final String message) { super(message); } ... }  TestHierarchyMatchException  package org.apache.skywalking.apm.agent.core.context.status; public class TestHierarchyMatchException extends TestNamedMatchException { public TestHierarchyMatchException() { } public TestHierarchyMatchException(final String message) { super(message); } ... }   When the above exceptions traced in some spans, the status is like the following.\n   The traced exception Final span status     org.apache.skywalking.apm.agent.core.context.status.TestNamedMatchException true   org.apache.skywalking.apm.agent.core.context.status.TestHierarchyMatchException true      After set these class names through \u0026ldquo;statuscheck.ignored_exceptions\u0026rdquo;, the status of spans would be changed.\nstatuscheck.ignored_exceptions=org.apache.skywalking.apm.agent.core.context.status.TestNamedMatchException    The traced exception Final span status     org.apache.skywalking.apm.agent.core.context.status.TestNamedMatchException false   org.apache.skywalking.apm.agent.core.context.status.TestHierarchyMatchException false      Use our annotation in the codes. If an exception has the @IgnoredException annotation, the exception wouldn\u0026rsquo;t be marked as error status when tracing. Because the annotation supports inheritance, also affects the subclasses.\nDependency  Dependency the toolkit, such as using maven or gradle. Since 8.2.0.  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-trace\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Demo   A custom exception.\npackage org.apache.skywalking.apm.agent.core.context.status; public class TestAnnotatedException extends RuntimeException { public TestAnnotatedException() { } public TestAnnotatedException(final String message) { super(message); } ... }   When the above exception traced in some spans, the status is like the following.\n   The traced exception Final span status     org.apache.skywalking.apm.agent.core.context.status.TestAnnotatedException true      However, when the exception annotated with the annotation, the status would be changed.\npackage org.apache.skywalking.apm.agent.core.context.status; @IgnoredException public class TestAnnotatedException extends RuntimeException { public TestAnnotatedException() { } public TestAnnotatedException(final String message) { super(message); } ... }    The traced exception Final span status     org.apache.skywalking.apm.agent.core.context.status.TestAnnotatedException false      Recursive check Due to the wrapper nature of Java exceptions, sometimes users need recursive checking. Skywalking also supports it.\nstatuscheck.max_recursive_depth=${SW_STATUSCHECK_MAX_RECURSIVE_DEPTH:1} The following report shows the benchmark results of the exception checks with different recursive depths,\n# JMH version: 1.33 # VM version: JDK 1.8.0_292, OpenJDK 64-Bit Server VM, 25.292-b10 # VM invoker: /Library/Java/JavaVirtualMachines/adoptopenjdk-8.jdk/Contents/Home/jre/bin/java # VM options: -javaagent:/Applications/IntelliJ IDEA.app/Contents/lib/idea_rt.jar=54972:/Applications/IntelliJ IDEA.app/Contents/bin -Dfile.encoding=UTF-8 # Blackhole mode: full + dont-inline hint (default, use -Djmh.blackhole.autoDetect=true to auto-detect) # Warmup: 5 iterations, 10 s each # Measurement: 5 iterations, 10 s each # Timeout: 10 min per iteration # Threads: 1 thread, will synchronize iterations # Benchmark mode: Average time, time/op Benchmark Mode Cnt Score Error Units HierarchyMatchExceptionBenchmark.depthOneBenchmark avgt 25 31.050 ± 0.731 ns/op HierarchyMatchExceptionBenchmark.depthTwoBenchmark avgt 25 64.918 ± 2.537 ns/op HierarchyMatchExceptionBenchmark.depthThreeBenchmark avgt 25 89.645 ± 2.556 ns/op According to the reported results above, the exception check time is nearly proportional to the recursive depth being set. For each single check, it costs about ten of nanoseconds (~30 nanoseconds in the report, but may vary according to different hardware and platforms).\nTypically, we don\u0026rsquo;t recommend setting this more than 10, which could cause a performance issue. Negative value and 0 would be ignored, which means all exceptions would make the span tagged in error status.\n","excerpt":"How to tolerate custom exceptions In some codes, the exception is being used as a way of controlling …","ref":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/how-to-tolerate-exceptions/","title":"How to tolerate custom exceptions"},{"body":"How to tolerate custom exceptions In some codes, the exception is being used as a way of controlling business flow. Skywalking provides 2 ways to tolerate an exception which is traced in a span.\n Set the names of exception classes in the agent config Use our annotation in the codes.  Set the names of exception classes in the agent config The property named \u0026ldquo;statuscheck.ignored_exceptions\u0026rdquo; is used to set up class names in the agent configuration file. if the exception listed here are detected in the agent, the agent core would flag the related span as the error status.\nDemo   A custom exception.\n TestNamedMatchException  package org.apache.skywalking.apm.agent.core.context.status; public class TestNamedMatchException extends RuntimeException { public TestNamedMatchException() { } public TestNamedMatchException(final String message) { super(message); } ... }  TestHierarchyMatchException  package org.apache.skywalking.apm.agent.core.context.status; public class TestHierarchyMatchException extends TestNamedMatchException { public TestHierarchyMatchException() { } public TestHierarchyMatchException(final String message) { super(message); } ... }   When the above exceptions traced in some spans, the status is like the following.\n   The traced exception Final span status     org.apache.skywalking.apm.agent.core.context.status.TestNamedMatchException true   org.apache.skywalking.apm.agent.core.context.status.TestHierarchyMatchException true      After set these class names through \u0026ldquo;statuscheck.ignored_exceptions\u0026rdquo;, the status of spans would be changed.\nstatuscheck.ignored_exceptions=org.apache.skywalking.apm.agent.core.context.status.TestNamedMatchException    The traced exception Final span status     org.apache.skywalking.apm.agent.core.context.status.TestNamedMatchException false   org.apache.skywalking.apm.agent.core.context.status.TestHierarchyMatchException false      Use our annotation in the codes. If an exception has the @IgnoredException annotation, the exception wouldn\u0026rsquo;t be marked as error status when tracing. Because the annotation supports inheritance, also affects the subclasses.\nDependency  Dependency the toolkit, such as using maven or gradle. Since 8.2.0.  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-trace\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Demo   A custom exception.\npackage org.apache.skywalking.apm.agent.core.context.status; public class TestAnnotatedException extends RuntimeException { public TestAnnotatedException() { } public TestAnnotatedException(final String message) { super(message); } ... }   When the above exception traced in some spans, the status is like the following.\n   The traced exception Final span status     org.apache.skywalking.apm.agent.core.context.status.TestAnnotatedException true      However, when the exception annotated with the annotation, the status would be changed.\npackage org.apache.skywalking.apm.agent.core.context.status; @IgnoredException public class TestAnnotatedException extends RuntimeException { public TestAnnotatedException() { } public TestAnnotatedException(final String message) { super(message); } ... }    The traced exception Final span status     org.apache.skywalking.apm.agent.core.context.status.TestAnnotatedException false      Recursive check Due to the wrapper nature of Java exceptions, sometimes users need recursive checking. Skywalking also supports it.\nstatuscheck.max_recursive_depth=${SW_STATUSCHECK_MAX_RECURSIVE_DEPTH:1} The following report shows the benchmark results of the exception checks with different recursive depths,\n# JMH version: 1.33 # VM version: JDK 1.8.0_292, OpenJDK 64-Bit Server VM, 25.292-b10 # VM invoker: /Library/Java/JavaVirtualMachines/adoptopenjdk-8.jdk/Contents/Home/jre/bin/java # VM options: -javaagent:/Applications/IntelliJ IDEA.app/Contents/lib/idea_rt.jar=54972:/Applications/IntelliJ IDEA.app/Contents/bin -Dfile.encoding=UTF-8 # Blackhole mode: full + dont-inline hint (default, use -Djmh.blackhole.autoDetect=true to auto-detect) # Warmup: 5 iterations, 10 s each # Measurement: 5 iterations, 10 s each # Timeout: 10 min per iteration # Threads: 1 thread, will synchronize iterations # Benchmark mode: Average time, time/op Benchmark Mode Cnt Score Error Units HierarchyMatchExceptionBenchmark.depthOneBenchmark avgt 25 31.050 ± 0.731 ns/op HierarchyMatchExceptionBenchmark.depthTwoBenchmark avgt 25 64.918 ± 2.537 ns/op HierarchyMatchExceptionBenchmark.depthThreeBenchmark avgt 25 89.645 ± 2.556 ns/op According to the reported results above, the exception check time is nearly proportional to the recursive depth being set. For each single check, it costs about ten of nanoseconds (~30 nanoseconds in the report, but may vary according to different hardware and platforms).\nTypically, we don\u0026rsquo;t recommend setting this more than 10, which could cause a performance issue. Negative value and 0 would be ignored, which means all exceptions would make the span tagged in error status.\n","excerpt":"How to tolerate custom exceptions In some codes, the exception is being used as a way of controlling …","ref":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/how-to-tolerate-exceptions/","title":"How to tolerate custom exceptions"},{"body":"How to use the Docker images Start a standalone container with H2 storage docker run --name oap --restart always -d apache/skywalking-oap-server:9.0.0 Start a standalone container with ElasticSearch 7 as storage whose address is elasticsearch:9200 docker run --name oap --restart always -d -e SW_STORAGE=elasticsearch -e SW_STORAGE_ES_CLUSTER_NODES=elasticsearch:9200 apache/skywalking-oap-server:9.0.0 Configuration We could set up environment variables to configure this image. They are defined in backend-setup.\nExtend image If you intend to override or add config files in /skywalking/config, /skywalking/ext-config is the location for you to put extra files. The files with the same name will be overridden; otherwise, they will be added to /skywalking/config.\nIf you want to add more libs/jars into the classpath of OAP, for example, new metrics for OAL. These jars can be mounted into /skywalking/ext-libs, then entrypoint bash will append them into the classpath. Notice, you can\u0026rsquo;t override an existing jar in classpath.\n","excerpt":"How to use the Docker images Start a standalone container with H2 storage docker run --name oap …","ref":"/docs/main/latest/en/setup/backend/backend-docker/","title":"How to use the Docker images"},{"body":"How to use the Docker images Start the storage, OAP and Booster UI with docker-compose As a quick start, you can use our one-liner script to start ElasticSearch or BanyanDB as the storage, OAP server and Booster UI, please make sure you have installed Docker.\nLinux, macOS, Windows (WSL)\nbash \u0026lt;(curl -sSL https://skywalking.apache.org/quickstart-docker.sh) Windows (Powershell)\nInvoke-Expression ([System.Text.Encoding]::UTF8.GetString((Invoke-WebRequest -Uri https://skywalking.apache.org/quickstart-docker.ps1 -UseBasicParsing).Content)) You will be prompted to choose the storage type, and then the script will start the backend cluster with the selected storage.\nTo tear down the cluster, run the following command:\ndocker compose --project-name=skywalking-quickstart down Start a standalone container with H2 storage docker run --name oap --restart always -d apache/skywalking-oap-server:9.7.0 Start a standalone container with BanyanDB as storage, whose address is banyandb:17912 docker run --name oap --restart always -d -e SW_STORAGE=banyandb -e SW_STORAGE_BANYANDB_TARGETS=banyandb:17912 apache/skywalking-oap-server:9.7.0 Start a standalone container with ElasticSearch 7 as storage, whose address is elasticsearch:9200 docker run --name oap --restart always -d -e SW_STORAGE=elasticsearch -e SW_STORAGE_ES_CLUSTER_NODES=elasticsearch:9200 apache/skywalking-oap-server:9.7.0 Configuration We could set up environment variables to configure this image. They are defined in backend-setup.\nExtend image If you intend to override or add config files in /skywalking/config, /skywalking/ext-config is the location for you to put extra files. The files with the same name will be overridden; otherwise, they will be added to /skywalking/config.\nIf you want to add more libs/jars into the classpath of OAP, for example, new metrics for OAL. These jars can be mounted into /skywalking/ext-libs, then entrypoint bash will append them into the classpath. Notice, you can\u0026rsquo;t override an existing jar in classpath.\n","excerpt":"How to use the Docker images Start the storage, OAP and Booster UI with docker-compose As a quick …","ref":"/docs/main/next/en/setup/backend/backend-docker/","title":"How to use the Docker images"},{"body":"How to use the Docker images Start a standlone container with H2 storage docker run --name oap --restart always -d apache/skywalking-oap-server:8.8.0 Start a standlone container with ElasticSearch 7 as storage whose address is elasticsearch:9200 docker run --name oap --restart always -d -e SW_STORAGE=elasticsearch -e SW_STORAGE_ES_CLUSTER_NODES=elasticsearch:9200 apache/skywalking-oap-server:8.8.0 Configuration We could set up environment variables to configure this image. They are defined in backend-setup.\nExtend image If you intend to override or add config files in /skywalking/config, /skywalking/ext-config is the location for you to put extra files. The files with the same name will be overridden, otherwise, they will be added in /skywalking/config.\nIf you want to add more libs/jars into the classpath of OAP, for example, new metrics for OAL. These jars can be mounted into /skywalking/ext-libs, then entrypoint bash will append them into the classpath. Notice, you can\u0026rsquo;t override an existing jar in classpath.\n","excerpt":"How to use the Docker images Start a standlone container with H2 storage docker run --name oap …","ref":"/docs/main/v9.0.0/en/setup/backend/backend-docker/","title":"How to use the Docker images"},{"body":"How to use the Docker images Start a standlone container with H2 storage docker run --name oap --restart always -d apache/skywalking-oap-server:9.0.0 Start a standlone container with ElasticSearch 7 as storage whose address is elasticsearch:9200 docker run --name oap --restart always -d -e SW_STORAGE=elasticsearch -e SW_STORAGE_ES_CLUSTER_NODES=elasticsearch:9200 apache/skywalking-oap-server:9.0.0 Configuration We could set up environment variables to configure this image. They are defined in backend-setup.\nExtend image If you intend to override or add config files in /skywalking/config, /skywalking/ext-config is the location for you to put extra files. The files with the same name will be overridden; otherwise, they will be added to /skywalking/config.\nIf you want to add more libs/jars into the classpath of OAP, for example, new metrics for OAL. These jars can be mounted into /skywalking/ext-libs, then entrypoint bash will append them into the classpath. Notice, you can\u0026rsquo;t override an existing jar in classpath.\n","excerpt":"How to use the Docker images Start a standlone container with H2 storage docker run --name oap …","ref":"/docs/main/v9.1.0/en/setup/backend/backend-docker/","title":"How to use the Docker images"},{"body":"How to use the Docker images Start a standalone container with H2 storage docker run --name oap --restart always -d apache/skywalking-oap-server:9.0.0 Start a standalone container with ElasticSearch 7 as storage whose address is elasticsearch:9200 docker run --name oap --restart always -d -e SW_STORAGE=elasticsearch -e SW_STORAGE_ES_CLUSTER_NODES=elasticsearch:9200 apache/skywalking-oap-server:9.0.0 Configuration We could set up environment variables to configure this image. They are defined in backend-setup.\nExtend image If you intend to override or add config files in /skywalking/config, /skywalking/ext-config is the location for you to put extra files. The files with the same name will be overridden; otherwise, they will be added to /skywalking/config.\nIf you want to add more libs/jars into the classpath of OAP, for example, new metrics for OAL. These jars can be mounted into /skywalking/ext-libs, then entrypoint bash will append them into the classpath. Notice, you can\u0026rsquo;t override an existing jar in classpath.\n","excerpt":"How to use the Docker images Start a standalone container with H2 storage docker run --name oap …","ref":"/docs/main/v9.2.0/en/setup/backend/backend-docker/","title":"How to use the Docker images"},{"body":"How to use the Docker images Start a standalone container with H2 storage docker run --name oap --restart always -d apache/skywalking-oap-server:9.0.0 Start a standalone container with ElasticSearch 7 as storage whose address is elasticsearch:9200 docker run --name oap --restart always -d -e SW_STORAGE=elasticsearch -e SW_STORAGE_ES_CLUSTER_NODES=elasticsearch:9200 apache/skywalking-oap-server:9.0.0 Configuration We could set up environment variables to configure this image. They are defined in backend-setup.\nExtend image If you intend to override or add config files in /skywalking/config, /skywalking/ext-config is the location for you to put extra files. The files with the same name will be overridden; otherwise, they will be added to /skywalking/config.\nIf you want to add more libs/jars into the classpath of OAP, for example, new metrics for OAL. These jars can be mounted into /skywalking/ext-libs, then entrypoint bash will append them into the classpath. Notice, you can\u0026rsquo;t override an existing jar in classpath.\n","excerpt":"How to use the Docker images Start a standalone container with H2 storage docker run --name oap …","ref":"/docs/main/v9.3.0/en/setup/backend/backend-docker/","title":"How to use the Docker images"},{"body":"How to use the Docker images Start a standalone container with H2 storage docker run --name oap --restart always -d apache/skywalking-oap-server:9.0.0 Start a standalone container with ElasticSearch 7 as storage whose address is elasticsearch:9200 docker run --name oap --restart always -d -e SW_STORAGE=elasticsearch -e SW_STORAGE_ES_CLUSTER_NODES=elasticsearch:9200 apache/skywalking-oap-server:9.0.0 Configuration We could set up environment variables to configure this image. They are defined in backend-setup.\nExtend image If you intend to override or add config files in /skywalking/config, /skywalking/ext-config is the location for you to put extra files. The files with the same name will be overridden; otherwise, they will be added to /skywalking/config.\nIf you want to add more libs/jars into the classpath of OAP, for example, new metrics for OAL. These jars can be mounted into /skywalking/ext-libs, then entrypoint bash will append them into the classpath. Notice, you can\u0026rsquo;t override an existing jar in classpath.\n","excerpt":"How to use the Docker images Start a standalone container with H2 storage docker run --name oap …","ref":"/docs/main/v9.4.0/en/setup/backend/backend-docker/","title":"How to use the Docker images"},{"body":"How to use the Docker images Start a standalone container with H2 storage docker run --name oap --restart always -d apache/skywalking-oap-server:9.0.0 Start a standalone container with ElasticSearch 7 as storage whose address is elasticsearch:9200 docker run --name oap --restart always -d -e SW_STORAGE=elasticsearch -e SW_STORAGE_ES_CLUSTER_NODES=elasticsearch:9200 apache/skywalking-oap-server:9.0.0 Configuration We could set up environment variables to configure this image. They are defined in backend-setup.\nExtend image If you intend to override or add config files in /skywalking/config, /skywalking/ext-config is the location for you to put extra files. The files with the same name will be overridden; otherwise, they will be added to /skywalking/config.\nIf you want to add more libs/jars into the classpath of OAP, for example, new metrics for OAL. These jars can be mounted into /skywalking/ext-libs, then entrypoint bash will append them into the classpath. Notice, you can\u0026rsquo;t override an existing jar in classpath.\n","excerpt":"How to use the Docker images Start a standalone container with H2 storage docker run --name oap …","ref":"/docs/main/v9.5.0/en/setup/backend/backend-docker/","title":"How to use the Docker images"},{"body":"How to use the Docker images Start a standalone container with H2 storage docker run --name oap --restart always -d apache/skywalking-oap-server:9.0.0 Start a standalone container with ElasticSearch 7 as storage whose address is elasticsearch:9200 docker run --name oap --restart always -d -e SW_STORAGE=elasticsearch -e SW_STORAGE_ES_CLUSTER_NODES=elasticsearch:9200 apache/skywalking-oap-server:9.0.0 Configuration We could set up environment variables to configure this image. They are defined in backend-setup.\nExtend image If you intend to override or add config files in /skywalking/config, /skywalking/ext-config is the location for you to put extra files. The files with the same name will be overridden; otherwise, they will be added to /skywalking/config.\nIf you want to add more libs/jars into the classpath of OAP, for example, new metrics for OAL. These jars can be mounted into /skywalking/ext-libs, then entrypoint bash will append them into the classpath. Notice, you can\u0026rsquo;t override an existing jar in classpath.\n","excerpt":"How to use the Docker images Start a standalone container with H2 storage docker run --name oap …","ref":"/docs/main/v9.6.0/en/setup/backend/backend-docker/","title":"How to use the Docker images"},{"body":"How to use the Docker images Start a standalone container with H2 storage docker run --name oap --restart always -d apache/skywalking-oap-server:9.0.0 Start a standalone container with ElasticSearch 7 as storage whose address is elasticsearch:9200 docker run --name oap --restart always -d -e SW_STORAGE=elasticsearch -e SW_STORAGE_ES_CLUSTER_NODES=elasticsearch:9200 apache/skywalking-oap-server:9.0.0 Configuration We could set up environment variables to configure this image. They are defined in backend-setup.\nExtend image If you intend to override or add config files in /skywalking/config, /skywalking/ext-config is the location for you to put extra files. The files with the same name will be overridden; otherwise, they will be added to /skywalking/config.\nIf you want to add more libs/jars into the classpath of OAP, for example, new metrics for OAL. These jars can be mounted into /skywalking/ext-libs, then entrypoint bash will append them into the classpath. Notice, you can\u0026rsquo;t override an existing jar in classpath.\n","excerpt":"How to use the Docker images Start a standalone container with H2 storage docker run --name oap …","ref":"/docs/main/v9.7.0/en/setup/backend/backend-docker/","title":"How to use the Docker images"},{"body":"How to use with Gunicorn? Gunicorn is another popular process manager and prefork server widely used in production. The state-of-the-art practice is to use Gunicorn as the process manager for ASGI applications such as FastAPI to get resilient \u0026amp; blazing fast services.\nSince Gunicorn is a prefork server, it will fork a new process for each worker, and the forked process will be the one that actually serves requests.\n Tired of understanding these complicated multiprocessing behaviors? Try the new sw-python run --prefork/-p support for Gunicorn first! You can always fall back to the manual approach (although it\u0026rsquo;s also non-intrusive for application).\n Automatic Injection Approach (Non-intrusive)  Caveat: Although E2E test passes for Python3.7, there\u0026rsquo;s a small chance that this approach won\u0026rsquo;t work on Python 3.7 if your application uses gPRC protocol AND subprocess AND fork together (you will immediately see service is not starting normally, not randomly breaking after)\nThis is due to an unfixed bug in gRPC core that leads to deadlock if Python 3.7 application involves subprocess (like debug mode). You should upgrade to Python 3.8+ soon since the EOL is approaching on 2023 June 27th, or fallback to manual approach should this case happen, or simply use HTTP/Kafka protocol.\n TL;DR: specify -p or --prefork in sw-python run -p and all Gunicorn workers and master will get their own working agent.\nImportant: if the call to gunicorn is prefixed with other commands, this approach will fail since agent currently looks for the command line input at index 0 for safety as an experimental feature.\nsw-python run -p gunicorn gunicorn_consumer_prefork:app --workers 2 --worker-class uvicorn.workers.UvicornWorker --bind 0.0.0.0:8088 Long version: (notice this is different from how uWSGI equivalent works)\nBy specifying the -p or \u0026ndash;prefork option in sw-python CLI, the agent_experimental_fork_support agent option will be turned on automatically.\nStartup flow: sw-python -\u0026gt; gunicorn -\u0026gt; master process (agent starts) -\u0026gt; fork -\u0026gt; worker process (agent restarts due to os.register_at_fork)\nThe master process will get its own agent, although it won\u0026rsquo;t report any trace, since obviously it doesn\u0026rsquo;t take requests, it still reports metrics that is useful for debugging\n A runnable example can be found in the demo folder of skywalking-python GitHub repository\n Manual Approach (only use when sw-python doesn\u0026rsquo;t work) Limitation: Using normal postfork hook will not add observability to the master process, you could also define a prefork hook to start an agent in the master process, with a instance name like instance-name-master(\u0026lt;pid\u0026gt;)\nThe following is just an example, since Gunicorn\u0026rsquo;s automatic injection approach is likely to work in many situations.\n The manual approach should not be used together with the agent\u0026rsquo;s fork support. Otherwise, agent will be dual booted and raise an error saying that you should not do so.\n # Usage explained here: https://docs.gunicorn.org/en/stable/settings.html#post-fork bind = '0.0.0.0:8088' workers = 3 def post_fork(server, worker): # Important: The import of skywalking should be inside the post_fork function import os from skywalking import agent, config # append pid-suffix to instance name # This must be done to distinguish instances if you give your instance customized names # (highly recommended to identify workers) # Notice the -child(pid) part is required to tell the difference of each worker. agent_instance_name = f'\u0026lt;some_good_name\u0026gt;-child({os.getpid()})' config.init(agent_collector_backend_services='127.0.0.1:11800', agent_name='your awesome service', agent_instance_name=agent_instance_name) agent.start() Run Gunicorn normally without sw-python CLI:\ngunicorn gunicorn_consumer_prefork:app --workers 2 --worker-class uvicorn.workers.UvicornWorker --bind 0.0.0.0:8088 ","excerpt":"How to use with Gunicorn? Gunicorn is another popular process manager and prefork server widely used …","ref":"/docs/skywalking-python/latest/en/setup/faq/how-to-use-with-gunicorn/","title":"How to use with Gunicorn?"},{"body":"How to use with Gunicorn? Gunicorn is another popular process manager and prefork server widely used in production. The state-of-the-art practice is to use Gunicorn as the process manager for ASGI applications such as FastAPI to get resilient \u0026amp; blazing fast services.\nSince Gunicorn is a prefork server, it will fork a new process for each worker, and the forked process will be the one that actually serves requests.\n Tired of understanding these complicated multiprocessing behaviors? Try the new sw-python run --prefork/-p support for Gunicorn first! You can always fall back to the manual approach (although it\u0026rsquo;s also non-intrusive for application).\n Automatic Injection Approach (Non-intrusive)  Caveat: Although E2E test passes for Python3.7, there\u0026rsquo;s a small chance that this approach won\u0026rsquo;t work on Python 3.7 if your application uses gPRC protocol AND subprocess AND fork together (you will immediately see service is not starting normally, not randomly breaking after)\nThis is due to an unfixed bug in gRPC core that leads to deadlock if Python 3.7 application involves subprocess (like debug mode). You should upgrade to Python 3.8+ soon since the EOL is approaching on 2023 June 27th, or fallback to manual approach should this case happen, or simply use HTTP/Kafka protocol.\n TL;DR: specify -p or --prefork in sw-python run -p and all Gunicorn workers and master will get their own working agent.\nImportant: if the call to gunicorn is prefixed with other commands, this approach will fail since agent currently looks for the command line input at index 0 for safety as an experimental feature.\nsw-python run -p gunicorn gunicorn_consumer_prefork:app --workers 2 --worker-class uvicorn.workers.UvicornWorker --bind 0.0.0.0:8088 Long version: (notice this is different from how uWSGI equivalent works)\nBy specifying the -p or \u0026ndash;prefork option in sw-python CLI, the agent_experimental_fork_support agent option will be turned on automatically.\nStartup flow: sw-python -\u0026gt; gunicorn -\u0026gt; master process (agent starts) -\u0026gt; fork -\u0026gt; worker process (agent restarts due to os.register_at_fork)\nThe master process will get its own agent, although it won\u0026rsquo;t report any trace, since obviously it doesn\u0026rsquo;t take requests, it still reports metrics that is useful for debugging\n A runnable example can be found in the demo folder of skywalking-python GitHub repository\n Manual Approach (only use when sw-python doesn\u0026rsquo;t work) Limitation: Using normal postfork hook will not add observability to the master process, you could also define a prefork hook to start an agent in the master process, with a instance name like instance-name-master(\u0026lt;pid\u0026gt;)\nThe following is just an example, since Gunicorn\u0026rsquo;s automatic injection approach is likely to work in many situations.\n The manual approach should not be used together with the agent\u0026rsquo;s fork support. Otherwise, agent will be dual booted and raise an error saying that you should not do so.\n # Usage explained here: https://docs.gunicorn.org/en/stable/settings.html#post-fork bind = '0.0.0.0:8088' workers = 3 def post_fork(server, worker): # Important: The import of skywalking should be inside the post_fork function import os from skywalking import agent, config # append pid-suffix to instance name # This must be done to distinguish instances if you give your instance customized names # (highly recommended to identify workers) # Notice the -child(pid) part is required to tell the difference of each worker. agent_instance_name = f'\u0026lt;some_good_name\u0026gt;-child({os.getpid()})' config.init(agent_collector_backend_services='127.0.0.1:11800', agent_name='your awesome service', agent_instance_name=agent_instance_name) agent.start() Run Gunicorn normally without sw-python CLI:\ngunicorn gunicorn_consumer_prefork:app --workers 2 --worker-class uvicorn.workers.UvicornWorker --bind 0.0.0.0:8088 ","excerpt":"How to use with Gunicorn? Gunicorn is another popular process manager and prefork server widely used …","ref":"/docs/skywalking-python/next/en/setup/faq/how-to-use-with-gunicorn/","title":"How to use with Gunicorn?"},{"body":"How to use with Gunicorn? Gunicorn is another popular process manager and prefork server widely used in production. The state-of-the-art practice is to use Gunicorn as the process manager for ASGI applications such as FastAPI to get resilient \u0026amp; blazing fast services.\nSince Gunicorn is a prefork server, it will fork a new process for each worker, and the forked process will be the one that actually serves requests.\n Tired of understanding these complicated multiprocessing behaviors? Try the new sw-python run --prefork/-p support for Gunicorn first! You can always fall back to the manual approach (although it\u0026rsquo;s also non-intrusive for application).\n Automatic Injection Approach (Non-intrusive)  Caveat: Although E2E test passes for Python3.7, there\u0026rsquo;s a small chance that this approach won\u0026rsquo;t work on Python 3.7 if your application uses gPRC protocol AND subprocess AND fork together (you will immediately see service is not starting normally, not randomly breaking after)\nThis is due to an unfixed bug in gRPC core that leads to deadlock if Python 3.7 application involves subprocess (like debug mode). You should upgrade to Python 3.8+ soon since the EOL is approaching on 2023 June 27th, or fallback to manual approach should this case happen, or simply use HTTP/Kafka protocol.\n TL;DR: specify -p or --prefork in sw-python run -p and all Gunicorn workers and master will get their own working agent.\nImportant: if the call to gunicorn is prefixed with other commands, this approach will fail since agent currently looks for the command line input at index 0 for safety as an experimental feature.\nsw-python run -p gunicorn gunicorn_consumer_prefork:app --workers 2 --worker-class uvicorn.workers.UvicornWorker --bind 0.0.0.0:8088 Long version: (notice this is different from how uWSGI equivalent works)\nBy specifying the -p or \u0026ndash;prefork option in sw-python CLI, the agent_experimental_fork_support agent option will be turned on automatically.\nStartup flow: sw-python -\u0026gt; gunicorn -\u0026gt; master process (agent starts) -\u0026gt; fork -\u0026gt; worker process (agent restarts due to os.register_at_fork)\nThe master process will get its own agent, although it won\u0026rsquo;t report any trace, since obviously it doesn\u0026rsquo;t take requests, it still reports metrics that is useful for debugging\n A runnable example can be found in the demo folder of skywalking-python GitHub repository\n Manual Approach (only use when sw-python doesn\u0026rsquo;t work) Limitation: Using normal postfork hook will not add observability to the master process, you could also define a prefork hook to start an agent in the master process, with a instance name like instance-name-master(\u0026lt;pid\u0026gt;)\nThe following is just an example, since Gunicorn\u0026rsquo;s automatic injection approach is likely to work in many situations.\n The manual approach should not be used together with the agent\u0026rsquo;s fork support. Otherwise, agent will be dual booted and raise an error saying that you should not do so.\n # Usage explained here: https://docs.gunicorn.org/en/stable/settings.html#post-fork bind = '0.0.0.0:8088' workers = 3 def post_fork(server, worker): # Important: The import of skywalking should be inside the post_fork function import os from skywalking import agent, config # append pid-suffix to instance name # This must be done to distinguish instances if you give your instance customized names # (highly recommended to identify workers) # Notice the -child(pid) part is required to tell the difference of each worker. agent_instance_name = f'\u0026lt;some_good_name\u0026gt;-child({os.getpid()})' config.init(agent_collector_backend_services='127.0.0.1:11800', agent_name='your awesome service', agent_instance_name=agent_instance_name) agent.start() Run Gunicorn normally without sw-python CLI:\ngunicorn gunicorn_consumer_prefork:app --workers 2 --worker-class uvicorn.workers.UvicornWorker --bind 0.0.0.0:8088 ","excerpt":"How to use with Gunicorn? Gunicorn is another popular process manager and prefork server widely used …","ref":"/docs/skywalking-python/v1.0.1/en/setup/faq/how-to-use-with-gunicorn/","title":"How to use with Gunicorn?"},{"body":"How to use with uWSGI? uWSGI is popular in the Python ecosystem. It is a lightweight, fast, and easy-to-use web server.\nSince uWSGI is relatively old and offers multi-language support, it can get quite troublesome due to the usage of a system-level fork.\nSome of the original discussion can be found here:\n [Python] Apache Skywalking, flask uwsgi, no metrics send to server · Issue #6324 · apache/skywalking [Bug] skywalking-python not work with uwsgi + flask in master workers mode and threads mode · Issue #8566 · apache/skywalking   Tired of understanding these complicated multiprocessing behaviours? Try the new sw-python run --prefork/-p support for uWSGI first! You can always fall back to the manual approach. (although it\u0026rsquo;s also possible to pass postfork hook without changing code, which is essentially how sw-python is implemented)\n  Limitation: regardless of the approach used, uWSGI master process cannot be safely monitored. Since it doesn\u0026rsquo;t take any requests, it is generally acceptable. Alternatively, you could switch to Gunicorn, where its master process can be monitored properly along with all child workers.\n Important: The --enable-threads and --master option must be given to allow the usage of post_fork hooks and threading in workers. In the sw-python CLI, these two options will be automatically injected for you in addition to the post_fork hook.\nAutomatic Injection Approach (Non-intrusive) TL;DR: specify -p or --prefork in sw-python run -p and all uWSGI workers will get their own working agent.\nImportant: if the call to uwsgi is prefixed with other commands, this approach will fail since agent currently looks for the command line input at index 0 for safety as an experimental feature.\nsw-python run -p uwsgi --die-on-term \\  --http 0.0.0.0:9090 \\  --http-manage-expect \\  --master --workers 2 \\  --enable-threads \\  --threads 2 \\  --manage-script-name \\  --mount /=flask_consumer_prefork:app Long version: (notice this is different from how Gunicorn equivalent works)\nBy specifying the -p or \u0026ndash;prefork option in sw-python CLI, a uwsgi_hook will be registered by the CLI by adding the environment variable into one of [\u0026lsquo;UWSGI_SHARED_PYTHON_IMPORT\u0026rsquo;, \u0026lsquo;UWSGI_SHARED_IMPORT\u0026rsquo;, \u0026lsquo;UWSGI_SHARED_PYIMPORT\u0026rsquo;, \u0026lsquo;UWSGI_SHARED_PY_IMPORT\u0026rsquo;]. uWSGI will then import the module and start the agent in forked workers.\nStartup flow: sw-python -\u0026gt; uwsgi -\u0026gt; master process (agent doesn\u0026rsquo;t start here) -\u0026gt; fork -\u0026gt; worker process (agent starts due to post_fork hook)\nThe master process (which doesn\u0026rsquo;t accept requests) currently does not get its own agent as it can not be safely started and handled by os.register_at_fork() handlers.\n A runnable example can be found in the demo folder of skywalking-python GitHub repository\n Manual Approach (only use when sw-python doesn\u0026rsquo;t work) If you get some problems when using SkyWalking Python agent, you can try to use the following manual method to call @postfork, the low-level API of uWSGI to initialize the agent.\nThe following is an example of the use of uWSGI and flask.\nImportant: Never directly start the agent in the app, forked workers are unlikely to work properly (even if they do, it\u0026rsquo;s out of luck) you should either add the following postfork, or try our new experimental automatic startup through sw-python CLI (see above).\n# main.py # Note: The --master uwsgi flag must be on, otherwise the decorators will not be available to import from uwsgidecorators import postfork @postfork def init_tracing(): # Important: The import of skywalking must be inside the postfork function from skywalking import agent, config # append pid-suffix to instance name # This must be done to distinguish instances if you give your instance customized names # (highly recommended to identify workers) # Notice the -child(pid) part is required to tell the difference of each worker. agent_instance_name = f'\u0026lt;some_good_name\u0026gt;-child({os.getpid()})' config.init(agent_collector_backend_services='127.0.0.1:11800', agent_name='your awesome service', agent_instance_name=agent_instance_name) agent.start() from flask import Flask app = Flask(__name__) @app.route('/') def hello_world(): return 'Hello World!' if __name__ == '__main__': app.run() Run uWSGI normally without sw-python CLI:\nuwsgi --die-on-term \\  --http 0.0.0.0:5000 \\  --http-manage-expect \\  --master --workers 3 \\  --enable-threads \\  --threads 3 \\  --manage-script-name \\  --mount /=main:app ","excerpt":"How to use with uWSGI? uWSGI is popular in the Python ecosystem. It is a lightweight, fast, and …","ref":"/docs/skywalking-python/latest/en/setup/faq/how-to-use-with-uwsgi/","title":"How to use with uWSGI?"},{"body":"How to use with uWSGI? uWSGI is popular in the Python ecosystem. It is a lightweight, fast, and easy-to-use web server.\nSince uWSGI is relatively old and offers multi-language support, it can get quite troublesome due to the usage of a system-level fork.\nSome of the original discussion can be found here:\n [Python] Apache Skywalking, flask uwsgi, no metrics send to server · Issue #6324 · apache/skywalking [Bug] skywalking-python not work with uwsgi + flask in master workers mode and threads mode · Issue #8566 · apache/skywalking   Tired of understanding these complicated multiprocessing behaviours? Try the new sw-python run --prefork/-p support for uWSGI first! You can always fall back to the manual approach. (although it\u0026rsquo;s also possible to pass postfork hook without changing code, which is essentially how sw-python is implemented)\n  Limitation: regardless of the approach used, uWSGI master process cannot be safely monitored. Since it doesn\u0026rsquo;t take any requests, it is generally acceptable. Alternatively, you could switch to Gunicorn, where its master process can be monitored properly along with all child workers.\n Important: The --enable-threads and --master option must be given to allow the usage of post_fork hooks and threading in workers. In the sw-python CLI, these two options will be automatically injected for you in addition to the post_fork hook.\nAutomatic Injection Approach (Non-intrusive) TL;DR: specify -p or --prefork in sw-python run -p and all uWSGI workers will get their own working agent.\nImportant: if the call to uwsgi is prefixed with other commands, this approach will fail since agent currently looks for the command line input at index 0 for safety as an experimental feature.\nsw-python run -p uwsgi --die-on-term \\  --http 0.0.0.0:9090 \\  --http-manage-expect \\  --master --workers 2 \\  --enable-threads \\  --threads 2 \\  --manage-script-name \\  --mount /=flask_consumer_prefork:app Long version: (notice this is different from how Gunicorn equivalent works)\nBy specifying the -p or \u0026ndash;prefork option in sw-python CLI, a uwsgi_hook will be registered by the CLI by adding the environment variable into one of [\u0026lsquo;UWSGI_SHARED_PYTHON_IMPORT\u0026rsquo;, \u0026lsquo;UWSGI_SHARED_IMPORT\u0026rsquo;, \u0026lsquo;UWSGI_SHARED_PYIMPORT\u0026rsquo;, \u0026lsquo;UWSGI_SHARED_PY_IMPORT\u0026rsquo;]. uWSGI will then import the module and start the agent in forked workers.\nStartup flow: sw-python -\u0026gt; uwsgi -\u0026gt; master process (agent doesn\u0026rsquo;t start here) -\u0026gt; fork -\u0026gt; worker process (agent starts due to post_fork hook)\nThe master process (which doesn\u0026rsquo;t accept requests) currently does not get its own agent as it can not be safely started and handled by os.register_at_fork() handlers.\n A runnable example can be found in the demo folder of skywalking-python GitHub repository\n Manual Approach (only use when sw-python doesn\u0026rsquo;t work) If you get some problems when using SkyWalking Python agent, you can try to use the following manual method to call @postfork, the low-level API of uWSGI to initialize the agent.\nThe following is an example of the use of uWSGI and flask.\nImportant: Never directly start the agent in the app, forked workers are unlikely to work properly (even if they do, it\u0026rsquo;s out of luck) you should either add the following postfork, or try our new experimental automatic startup through sw-python CLI (see above).\n# main.py # Note: The --master uwsgi flag must be on, otherwise the decorators will not be available to import from uwsgidecorators import postfork @postfork def init_tracing(): # Important: The import of skywalking must be inside the postfork function from skywalking import agent, config # append pid-suffix to instance name # This must be done to distinguish instances if you give your instance customized names # (highly recommended to identify workers) # Notice the -child(pid) part is required to tell the difference of each worker. agent_instance_name = f'\u0026lt;some_good_name\u0026gt;-child({os.getpid()})' config.init(agent_collector_backend_services='127.0.0.1:11800', agent_name='your awesome service', agent_instance_name=agent_instance_name) agent.start() from flask import Flask app = Flask(__name__) @app.route('/') def hello_world(): return 'Hello World!' if __name__ == '__main__': app.run() Run uWSGI normally without sw-python CLI:\nuwsgi --die-on-term \\  --http 0.0.0.0:5000 \\  --http-manage-expect \\  --master --workers 3 \\  --enable-threads \\  --threads 3 \\  --manage-script-name \\  --mount /=main:app ","excerpt":"How to use with uWSGI? uWSGI is popular in the Python ecosystem. It is a lightweight, fast, and …","ref":"/docs/skywalking-python/next/en/setup/faq/how-to-use-with-uwsgi/","title":"How to use with uWSGI?"},{"body":"How to use with uWSGI? uWSGI is popular in the Python ecosystem. It is a lightweight, fast, and easy-to-use web server.\nSince uWSGI is relatively old and offers multi-language support, it can get quite troublesome due to the usage of a system-level fork.\nSome of the original discussion can be found here:\n [Python] Apache Skywalking, flask uwsgi, no metrics send to server · Issue #6324 · apache/skywalking [Bug] skywalking-python not work with uwsgi + flask in master workers mode and threads mode · Issue #8566 · apache/skywalking   Tired of understanding these complicated multiprocessing behaviours? Try the new sw-python run --prefork/-p support for uWSGI first! You can always fall back to the manual approach. (although it\u0026rsquo;s also possible to pass postfork hook without changing code, which is essentially how sw-python is implemented)\n  Limitation: regardless of the approach used, uWSGI master process cannot be safely monitored. Since it doesn\u0026rsquo;t take any requests, it is generally acceptable. Alternatively, you could switch to Gunicorn, where its master process can be monitored properly along with all child workers.\n Important: The --enable-threads and --master option must be given to allow the usage of post_fork hooks and threading in workers. In the sw-python CLI, these two options will be automatically injected for you in addition to the post_fork hook.\nAutomatic Injection Approach (Non-intrusive) TL;DR: specify -p or --prefork in sw-python run -p and all uWSGI workers will get their own working agent.\nImportant: if the call to uwsgi is prefixed with other commands, this approach will fail since agent currently looks for the command line input at index 0 for safety as an experimental feature.\nsw-python run -p uwsgi --die-on-term \\  --http 0.0.0.0:9090 \\  --http-manage-expect \\  --master --workers 2 \\  --enable-threads \\  --threads 2 \\  --manage-script-name \\  --mount /=flask_consumer_prefork:app Long version: (notice this is different from how Gunicorn equivalent works)\nBy specifying the -p or \u0026ndash;prefork option in sw-python CLI, a uwsgi_hook will be registered by the CLI by adding the environment variable into one of [\u0026lsquo;UWSGI_SHARED_PYTHON_IMPORT\u0026rsquo;, \u0026lsquo;UWSGI_SHARED_IMPORT\u0026rsquo;, \u0026lsquo;UWSGI_SHARED_PYIMPORT\u0026rsquo;, \u0026lsquo;UWSGI_SHARED_PY_IMPORT\u0026rsquo;]. uWSGI will then import the module and start the agent in forked workers.\nStartup flow: sw-python -\u0026gt; uwsgi -\u0026gt; master process (agent doesn\u0026rsquo;t start here) -\u0026gt; fork -\u0026gt; worker process (agent starts due to post_fork hook)\nThe master process (which doesn\u0026rsquo;t accept requests) currently does not get its own agent as it can not be safely started and handled by os.register_at_fork() handlers.\n A runnable example can be found in the demo folder of skywalking-python GitHub repository\n Manual Approach (only use when sw-python doesn\u0026rsquo;t work) If you get some problems when using SkyWalking Python agent, you can try to use the following manual method to call @postfork, the low-level API of uWSGI to initialize the agent.\nThe following is an example of the use of uWSGI and flask.\nImportant: Never directly start the agent in the app, forked workers are unlikely to work properly (even if they do, it\u0026rsquo;s out of luck) you should either add the following postfork, or try our new experimental automatic startup through sw-python CLI (see above).\n# main.py # Note: The --master uwsgi flag must be on, otherwise the decorators will not be available to import from uwsgidecorators import postfork @postfork def init_tracing(): # Important: The import of skywalking must be inside the postfork function from skywalking import agent, config # append pid-suffix to instance name # This must be done to distinguish instances if you give your instance customized names # (highly recommended to identify workers) # Notice the -child(pid) part is required to tell the difference of each worker. agent_instance_name = f'\u0026lt;some_good_name\u0026gt;-child({os.getpid()})' config.init(agent_collector_backend_services='127.0.0.1:11800', agent_name='your awesome service', agent_instance_name=agent_instance_name) agent.start() from flask import Flask app = Flask(__name__) @app.route('/') def hello_world(): return 'Hello World!' if __name__ == '__main__': app.run() Run uWSGI normally without sw-python CLI:\nuwsgi --die-on-term \\  --http 0.0.0.0:5000 \\  --http-manage-expect \\  --master --workers 3 \\  --enable-threads \\  --threads 3 \\  --manage-script-name \\  --mount /=main:app ","excerpt":"How to use with uWSGI? uWSGI is popular in the Python ecosystem. It is a lightweight, fast, and …","ref":"/docs/skywalking-python/v1.0.1/en/setup/faq/how-to-use-with-uwsgi/","title":"How to use with uWSGI?"},{"body":"How to write a new module? If you want to add a custom module to SkyWalking Rover, the following contents would guide you. Let\u0026rsquo;s use the profiling module as an example of how to write a module.\n Please read the Module Design to understand what is module. The module should be written in the skywalking-rover/pkg directory. So we create a new directory called profiling as the module codes space. Implement the interface in the skywalking-rover/pkg/module. Each module has 6 methods, which are Name, RequiredModules, Config, Start, NotifyStartSuccess, and Shutdown.  Name returns the unique name of the module, also this name is used to define in the configuration file. RequiredModules returns this needs depended on module names. In the profiling module, it needs to query the existing process and send snapshots to the backend, so it needs the core and process module. Config returns the config content of this module, which relate to the configuration file, and you could declare the tag(mapstructure) with the field to define the name in the configuration file. Start is triggered when the module needs to start. if this module start failure, please return the error. NotifyStartSuccess is triggered after all the active modules are Start method success. Shutdown   Add the configuration into the skywalking-rover/configs/rover_configs.yaml. It should same as the config declaration. Register the module into skywalking-rover/pkg/boot/register.go. Add the Unit test or E2E testing for testing the module is works well. Write the documentation under the skywalking-rover/docs/en directory and add it to the documentation index file skywalking-rover/docs/menu.yml.  ","excerpt":"How to write a new module? If you want to add a custom module to SkyWalking Rover, the following …","ref":"/docs/skywalking-rover/latest/en/guides/contribution/how-to-write-module/","title":"How to write a new module?"},{"body":"How to write a new module? If you want to add a custom module to SkyWalking Rover, the following contents would guide you. Let\u0026rsquo;s use the profiling module as an example of how to write a module.\n Please read the Module Design to understand what is module. The module should be written in the skywalking-rover/pkg directory. So we create a new directory called profiling as the module codes space. Implement the interface in the skywalking-rover/pkg/module. Each module has 6 methods, which are Name, RequiredModules, Config, Start, NotifyStartSuccess, and Shutdown.  Name returns the unique name of the module, also this name is used to define in the configuration file. RequiredModules returns this needs depended on module names. In the profiling module, it needs to query the existing process and send snapshots to the backend, so it needs the core and process module. Config returns the config content of this module, which relate to the configuration file, and you could declare the tag(mapstructure) with the field to define the name in the configuration file. Start is triggered when the module needs to start. if this module start failure, please return the error. NotifyStartSuccess is triggered after all the active modules are Start method success. Shutdown   Add the configuration into the skywalking-rover/configs/rover_configs.yaml. It should same as the config declaration. Register the module into skywalking-rover/pkg/boot/register.go. Add the Unit test or E2E testing for testing the module is works well. Write the documentation under the skywalking-rover/docs/en directory and add it to the documentation index file skywalking-rover/docs/menu.yml.  ","excerpt":"How to write a new module? If you want to add a custom module to SkyWalking Rover, the following …","ref":"/docs/skywalking-rover/next/en/guides/contribution/how-to-write-module/","title":"How to write a new module?"},{"body":"How to write a new module? If you want to add a custom module to SkyWalking Rover, the following contents would guide you. Let\u0026rsquo;s use the profiling module as an example of how to write a module.\n Please read the Module Design to understand what is module. The module should be written in the skywalking-rover/pkg directory. So we create a new directory called profiling as the module codes space. Implement the interface in the skywalking-rover/pkg/module. Each module has 6 methods, which are Name, RequiredModules, Config, Start, NotifyStartSuccess, and Shutdown.  Name returns the unique name of the module, also this name is used to define in the configuration file. RequiredModules returns this needs depended on module names. In the profiling module, it needs to query the existing process and send snapshots to the backend, so it needs the core and process module. Config returns the config content of this module, which relate to the configuration file, and you could declare the tag(mapstructure) with the field to define the name in the configuration file. Start is triggered when the module needs to start. if this module start failure, please return the error. NotifyStartSuccess is triggered after all the active modules are Start method success. Shutdown   Add the configuration into the skywalking-rover/configs/rover_configs.yaml. It should same as the config declaration. Register the module into skywalking-rover/pkg/boot/register.go. Add the Unit test or E2E testing for testing the module is works well. Write the documentation under the skywalking-rover/docs/en directory and add it to the documentation index file skywalking-rover/docs/menu.yml.  ","excerpt":"How to write a new module? If you want to add a custom module to SkyWalking Rover, the following …","ref":"/docs/skywalking-rover/v0.6.0/en/guides/contribution/how-to-write-module/","title":"How to write a new module?"},{"body":"How to write a new plugin? If you want to add a custom plugin in SkyWalking Satellite, the following contents would guide you. Let\u0026rsquo;s use memory-queue as an example of how to write a plugin.\n  Choose the plugin category. As the memory-queue is a queue, the plugin should be written in the skywalking-satellite/plugins/queue directory. So we create a new directory called memory as the plugin codes space.\n  Implement the interface in the skywalking-satellite/plugins/queue/api. Each plugin has 3 common methods, which are Name(), Description(), DefaultConfig().\n Name() returns the unique name in the plugin category. Description() returns the description of the plugin, which would be used to generate the plugin documentation. DefaultConfig() returns the default plugin config with yaml pattern, which would be used as the default value in the plugin struct and to generate the plugin documentation.  type Queue struct { config.CommonFields // config  EventBufferSize int `mapstructure:\u0026#34;event_buffer_size\u0026#34;` // The maximum buffer event size.  // components  buffer *goconcurrentqueue.FixedFIFO } func (q *Queue) Name() string { return Name } func (q *Queue) Description() string { return \u0026#34;this is a memory queue to buffer the input event.\u0026#34; } func (q *Queue) DefaultConfig() string { return ` # The maximum buffer event size. event_buffer_size: 5000   Add unit test.\n  Generate the plugin docs.\n  make gen-docs ","excerpt":"How to write a new plugin? If you want to add a custom plugin in SkyWalking Satellite, the following …","ref":"/docs/skywalking-satellite/latest/en/guides/contribution/how-to-write-plugin/","title":"How to write a new plugin?"},{"body":"How to write a new plugin? If you want to add a custom plugin in SkyWalking Satellite, the following contents would guide you. Let\u0026rsquo;s use memory-queue as an example of how to write a plugin.\n  Choose the plugin category. As the memory-queue is a queue, the plugin should be written in the skywalking-satellite/plugins/queue directory. So we create a new directory called memory as the plugin codes space.\n  Implement the interface in the skywalking-satellite/plugins/queue/api. Each plugin has 3 common methods, which are Name(), Description(), DefaultConfig().\n Name() returns the unique name in the plugin category. Description() returns the description of the plugin, which would be used to generate the plugin documentation. DefaultConfig() returns the default plugin config with yaml pattern, which would be used as the default value in the plugin struct and to generate the plugin documentation.  type Queue struct { config.CommonFields // config  EventBufferSize int `mapstructure:\u0026#34;event_buffer_size\u0026#34;` // The maximum buffer event size.  // components  buffer *goconcurrentqueue.FixedFIFO } func (q *Queue) Name() string { return Name } func (q *Queue) Description() string { return \u0026#34;this is a memory queue to buffer the input event.\u0026#34; } func (q *Queue) DefaultConfig() string { return ` # The maximum buffer event size. event_buffer_size: 5000   Add unit test.\n  Generate the plugin docs.\n  make gen-docs ","excerpt":"How to write a new plugin? If you want to add a custom plugin in SkyWalking Satellite, the following …","ref":"/docs/skywalking-satellite/next/en/guides/contribution/how-to-write-plugin/","title":"How to write a new plugin?"},{"body":"How to write a new plugin? If you want to add a custom plugin in SkyWalking Satellite, the following contents would guide you. Let\u0026rsquo;s use memory-queue as an example of how to write a plugin.\n  Choose the plugin category. As the memory-queue is a queue, the plugin should be written in the skywalking-satellite/plugins/queue directory. So we create a new directory called memory as the plugin codes space.\n  Implement the interface in the skywalking-satellite/plugins/queue/api. Each plugin has 3 common methods, which are Name(), Description(), DefaultConfig().\n Name() returns the unique name in the plugin category. Description() returns the description of the plugin, which would be used to generate the plugin documentation. DefaultConfig() returns the default plugin config with yaml pattern, which would be used as the default value in the plugin struct and to generate the plugin documentation.  type Queue struct { config.CommonFields // config  EventBufferSize int `mapstructure:\u0026#34;event_buffer_size\u0026#34;` // The maximum buffer event size.  // components  buffer *goconcurrentqueue.FixedFIFO } func (q *Queue) Name() string { return Name } func (q *Queue) Description() string { return \u0026#34;this is a memory queue to buffer the input event.\u0026#34; } func (q *Queue) DefaultConfig() string { return ` # The maximum buffer event size. event_buffer_size: 5000   Add unit test.\n  Generate the plugin docs.\n  make gen-docs ","excerpt":"How to write a new plugin? If you want to add a custom plugin in SkyWalking Satellite, the following …","ref":"/docs/skywalking-satellite/v1.2.0/en/guides/contribution/how-to-write-plugin/","title":"How to write a new plugin?"},{"body":"HTTP API Protocol HTTP API Protocol defines the API data format, including API request and response data format. They use the HTTP1.1 wrapper of the official SkyWalking Browser Protocol. Read it for more details.\nPerformance Data Report Detailed information about data format can be found in BrowserPerf.proto.\nPOST http://localhost:12800/browser/perfData Send a performance data object in JSON format.\nInput:\n{ \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;redirectTime\u0026#34;: 10, \u0026#34;dnsTime\u0026#34;: 10, \u0026#34;ttfbTime\u0026#34;: 10, \u0026#34;tcpTime\u0026#34;: 10, \u0026#34;transTime\u0026#34;: 10, \u0026#34;domAnalysisTime\u0026#34;: 10, \u0026#34;fptTime\u0026#34;: 10, \u0026#34;domReadyTime\u0026#34;: 10, \u0026#34;loadPageTime\u0026#34;: 10, \u0026#34;resTime\u0026#34;: 10, \u0026#34;sslTime\u0026#34;: 10, \u0026#34;ttlTime\u0026#34;: 10, \u0026#34;firstPackTime\u0026#34;: 10, \u0026#34;fmpTime\u0026#34;: 10 } OutPut:\nHttp Status: 204\nError Log Report Detailed information about data format can be found in BrowserPerf.proto.\nPOST http://localhost:12800/browser/errorLogs Send an error log object list in JSON format.\nInput:\n[ { \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b01\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; }, { \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b02\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; } ] OutPut:\nHttp Status: 204\nPOST http://localhost:12800/browser/errorLog Send a single error log object in JSON format.\nInput:\n{ \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b01\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; } OutPut:\nHttp Status: 204\n","excerpt":"HTTP API Protocol HTTP API Protocol defines the API data format, including API request and response …","ref":"/docs/main/latest/en/api/browser-http-api-protocol/","title":"HTTP API Protocol"},{"body":"HTTP API Protocol HTTP API Protocol defines the API data format, including API request and response data format. They use the HTTP1.1 wrapper of the official SkyWalking Browser Protocol. Read it for more details.\nPerformance Data Report Detailed information about data format can be found in BrowserPerf.proto.\nPOST http://localhost:12800/browser/perfData Send a performance data object in JSON format.\nInput:\n{ \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;redirectTime\u0026#34;: 10, \u0026#34;dnsTime\u0026#34;: 10, \u0026#34;ttfbTime\u0026#34;: 10, \u0026#34;tcpTime\u0026#34;: 10, \u0026#34;transTime\u0026#34;: 10, \u0026#34;domAnalysisTime\u0026#34;: 10, \u0026#34;fptTime\u0026#34;: 10, \u0026#34;domReadyTime\u0026#34;: 10, \u0026#34;loadPageTime\u0026#34;: 10, \u0026#34;resTime\u0026#34;: 10, \u0026#34;sslTime\u0026#34;: 10, \u0026#34;ttlTime\u0026#34;: 10, \u0026#34;firstPackTime\u0026#34;: 10, \u0026#34;fmpTime\u0026#34;: 10 } OutPut:\nHttp Status: 204\nError Log Report Detailed information about data format can be found in BrowserPerf.proto.\nPOST http://localhost:12800/browser/errorLogs Send an error log object list in JSON format.\nInput:\n[ { \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b01\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; }, { \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b02\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; } ] OutPut:\nHttp Status: 204\nPOST http://localhost:12800/browser/errorLog Send a single error log object in JSON format.\nInput:\n{ \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b01\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; } OutPut:\nHttp Status: 204\n","excerpt":"HTTP API Protocol HTTP API Protocol defines the API data format, including API request and response …","ref":"/docs/main/next/en/api/browser-http-api-protocol/","title":"HTTP API Protocol"},{"body":"HTTP API Protocol HTTP API Protocol defines the API data format, including API request and response data format. They use the HTTP1.1 wrapper of the official SkyWalking Browser Protocol. Read it for more details.\nPerformance Data Report Detailed information about data format can be found in BrowserPerf.proto.\nPOST http://localhost:12800/browser/perfData Send a performance data object in JSON format.\nInput:\n{ \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;redirectTime\u0026#34;: 10, \u0026#34;dnsTime\u0026#34;: 10, \u0026#34;ttfbTime\u0026#34;: 10, \u0026#34;tcpTime\u0026#34;: 10, \u0026#34;transTime\u0026#34;: 10, \u0026#34;domAnalysisTime\u0026#34;: 10, \u0026#34;fptTime\u0026#34;: 10, \u0026#34;domReadyTime\u0026#34;: 10, \u0026#34;loadPageTime\u0026#34;: 10, \u0026#34;resTime\u0026#34;: 10, \u0026#34;sslTime\u0026#34;: 10, \u0026#34;ttlTime\u0026#34;: 10, \u0026#34;firstPackTime\u0026#34;: 10, \u0026#34;fmpTime\u0026#34;: 10 } OutPut:\nHttp Status: 204\nError Log Report Detailed information about data format can be found in BrowserPerf.proto.\nPOST http://localhost:12800/browser/errorLogs Send an error log object list in JSON format.\nInput:\n[ { \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b01\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; }, { \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b02\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; } ] OutPut:\nHttp Status: 204\nPOST http://localhost:12800/browser/errorLog Send a single error log object in JSON format.\nInput:\n{ \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b01\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; } OutPut:\nHttp Status: 204\n","excerpt":"HTTP API Protocol HTTP API Protocol defines the API data format, including API request and response …","ref":"/docs/main/v9.0.0/en/protocols/browser-http-api-protocol/","title":"HTTP API Protocol"},{"body":"HTTP API Protocol HTTP API Protocol defines the API data format, including API request and response data format. They use the HTTP1.1 wrapper of the official SkyWalking Trace Data Protocol v3. Read it for more details.\nInstance Management Detailed information about data format can be found in Instance Management.\n Report service instance properties   POST http://localhost:12800/v3/management/reportProperties\n Input:\n{ \u0026#34;service\u0026#34;: \u0026#34;User Service Name\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User Service Instance Name\u0026#34;, \u0026#34;properties\u0026#34;: [{ \u0026#34;language\u0026#34;: \u0026#34;Lua\u0026#34; }] } Output JSON Array:\n{}  Service instance ping   POST http://localhost:12800/v3/management/keepAlive\n Input:\n{ \u0026#34;service\u0026#34;: \u0026#34;User Service Name\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User Service Instance Name\u0026#34; } OutPut:\n{} Trace Report Detailed information about data format can be found in Instance Management. There are two ways to report segment data: one segment per request or segment array in bulk mode.\nPOST http://localhost:12800/v3/segment Send a single segment object in JSON format.\nInput:\n{ \u0026#34;traceId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34; } OutPut:\nPOST http://localhost:12800/v3/segments Send a segment object list in JSON format.\nInput:\n[{ \u0026#34;traceId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34; }, { \u0026#34;traceId\u0026#34;: \u0026#34;f956699e-5106-4ea3-95e5-da748c55bac1\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577250, \u0026#34;endTime\u0026#34;: 1588664577250, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577250, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577250, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;f956699e-5106-4ea3-95e5-da748c55bac1\u0026#34; }] OutPut:\n","excerpt":"HTTP API Protocol HTTP API Protocol defines the API data format, including API request and response …","ref":"/docs/main/v9.0.0/en/protocols/http-api-protocol/","title":"HTTP API Protocol"},{"body":"HTTP API Protocol HTTP API Protocol defines the API data format, including API request and response data format. They use the HTTP1.1 wrapper of the official SkyWalking Browser Protocol. Read it for more details.\nPerformance Data Report Detailed information about data format can be found in BrowserPerf.proto.\nPOST http://localhost:12800/browser/perfData Send a performance data object in JSON format.\nInput:\n{ \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;redirectTime\u0026#34;: 10, \u0026#34;dnsTime\u0026#34;: 10, \u0026#34;ttfbTime\u0026#34;: 10, \u0026#34;tcpTime\u0026#34;: 10, \u0026#34;transTime\u0026#34;: 10, \u0026#34;domAnalysisTime\u0026#34;: 10, \u0026#34;fptTime\u0026#34;: 10, \u0026#34;domReadyTime\u0026#34;: 10, \u0026#34;loadPageTime\u0026#34;: 10, \u0026#34;resTime\u0026#34;: 10, \u0026#34;sslTime\u0026#34;: 10, \u0026#34;ttlTime\u0026#34;: 10, \u0026#34;firstPackTime\u0026#34;: 10, \u0026#34;fmpTime\u0026#34;: 10 } OutPut:\nHttp Status: 204\nError Log Report Detailed information about data format can be found in BrowserPerf.proto.\nPOST http://localhost:12800/browser/errorLogs Send an error log object list in JSON format.\nInput:\n[ { \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b01\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; }, { \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b02\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; } ] OutPut:\nHttp Status: 204\nPOST http://localhost:12800/browser/errorLog Send a single error log object in JSON format.\nInput:\n{ \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b01\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; } OutPut:\nHttp Status: 204\n","excerpt":"HTTP API Protocol HTTP API Protocol defines the API data format, including API request and response …","ref":"/docs/main/v9.1.0/en/protocols/browser-http-api-protocol/","title":"HTTP API Protocol"},{"body":"HTTP API Protocol HTTP API Protocol defines the API data format, including API request and response data format. They use the HTTP1.1 wrapper of the official SkyWalking Trace Data Protocol v3. Read it for more details.\nInstance Management Detailed information about data format can be found in Instance Management.\n Report service instance properties   POST http://localhost:12800/v3/management/reportProperties\n Input:\n{ \u0026#34;service\u0026#34;: \u0026#34;User Service Name\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User Service Instance Name\u0026#34;, \u0026#34;properties\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;language\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;Lua\u0026#34; } ] } Output JSON Array:\n{}  Service instance ping   POST http://localhost:12800/v3/management/keepAlive\n Input:\n{ \u0026#34;service\u0026#34;: \u0026#34;User Service Name\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User Service Instance Name\u0026#34; } OutPut:\n{} Trace Report Detailed information about data format can be found in Instance Management. There are two ways to report segment data: one segment per request or segment array in bulk mode.\nPOST http://localhost:12800/v3/segment Send a single segment object in JSON format.\nInput:\n{ \u0026#34;traceId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34; } OutPut:\nPOST http://localhost:12800/v3/segments Send a segment object list in JSON format.\nInput:\n[{ \u0026#34;traceId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34; }, { \u0026#34;traceId\u0026#34;: \u0026#34;f956699e-5106-4ea3-95e5-da748c55bac1\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577250, \u0026#34;endTime\u0026#34;: 1588664577250, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577250, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577250, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;f956699e-5106-4ea3-95e5-da748c55bac1\u0026#34; }] OutPut:\n","excerpt":"HTTP API Protocol HTTP API Protocol defines the API data format, including API request and response …","ref":"/docs/main/v9.1.0/en/protocols/http-api-protocol/","title":"HTTP API Protocol"},{"body":"HTTP API Protocol HTTP API Protocol defines the API data format, including API request and response data format. They use the HTTP1.1 wrapper of the official SkyWalking Browser Protocol. Read it for more details.\nPerformance Data Report Detailed information about data format can be found in BrowserPerf.proto.\nPOST http://localhost:12800/browser/perfData Send a performance data object in JSON format.\nInput:\n{ \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;redirectTime\u0026#34;: 10, \u0026#34;dnsTime\u0026#34;: 10, \u0026#34;ttfbTime\u0026#34;: 10, \u0026#34;tcpTime\u0026#34;: 10, \u0026#34;transTime\u0026#34;: 10, \u0026#34;domAnalysisTime\u0026#34;: 10, \u0026#34;fptTime\u0026#34;: 10, \u0026#34;domReadyTime\u0026#34;: 10, \u0026#34;loadPageTime\u0026#34;: 10, \u0026#34;resTime\u0026#34;: 10, \u0026#34;sslTime\u0026#34;: 10, \u0026#34;ttlTime\u0026#34;: 10, \u0026#34;firstPackTime\u0026#34;: 10, \u0026#34;fmpTime\u0026#34;: 10 } OutPut:\nHttp Status: 204\nError Log Report Detailed information about data format can be found in BrowserPerf.proto.\nPOST http://localhost:12800/browser/errorLogs Send an error log object list in JSON format.\nInput:\n[ { \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b01\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; }, { \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b02\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; } ] OutPut:\nHttp Status: 204\nPOST http://localhost:12800/browser/errorLog Send a single error log object in JSON format.\nInput:\n{ \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b01\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; } OutPut:\nHttp Status: 204\n","excerpt":"HTTP API Protocol HTTP API Protocol defines the API data format, including API request and response …","ref":"/docs/main/v9.2.0/en/protocols/browser-http-api-protocol/","title":"HTTP API Protocol"},{"body":"HTTP API Protocol HTTP API Protocol defines the API data format, including API request and response data format. They use the HTTP1.1 wrapper of the official SkyWalking Trace Data Protocol v3. Read it for more details.\nInstance Management Detailed information about data format can be found in Instance Management.\n Report service instance properties   POST http://localhost:12800/v3/management/reportProperties\n Input:\n{ \u0026#34;service\u0026#34;: \u0026#34;User Service Name\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User Service Instance Name\u0026#34;, \u0026#34;properties\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;language\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;Lua\u0026#34; } ] } Output JSON Array:\n{}  Service instance ping   POST http://localhost:12800/v3/management/keepAlive\n Input:\n{ \u0026#34;service\u0026#34;: \u0026#34;User Service Name\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User Service Instance Name\u0026#34; } OutPut:\n{} Trace Report Detailed information about data format can be found in Instance Management. There are two ways to report segment data: one segment per request or segment array in bulk mode.\nPOST http://localhost:12800/v3/segment Send a single segment object in JSON format.\nInput:\n{ \u0026#34;traceId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34; } OutPut:\nPOST http://localhost:12800/v3/segments Send a segment object list in JSON format.\nInput:\n[{ \u0026#34;traceId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34; }, { \u0026#34;traceId\u0026#34;: \u0026#34;f956699e-5106-4ea3-95e5-da748c55bac1\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577250, \u0026#34;endTime\u0026#34;: 1588664577250, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577250, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577250, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;f956699e-5106-4ea3-95e5-da748c55bac1\u0026#34; }] OutPut:\n","excerpt":"HTTP API Protocol HTTP API Protocol defines the API data format, including API request and response …","ref":"/docs/main/v9.2.0/en/protocols/http-api-protocol/","title":"HTTP API Protocol"},{"body":"HTTP API Protocol HTTP API Protocol defines the API data format, including API request and response data format. They use the HTTP1.1 wrapper of the official SkyWalking Browser Protocol. Read it for more details.\nPerformance Data Report Detailed information about data format can be found in BrowserPerf.proto.\nPOST http://localhost:12800/browser/perfData Send a performance data object in JSON format.\nInput:\n{ \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;redirectTime\u0026#34;: 10, \u0026#34;dnsTime\u0026#34;: 10, \u0026#34;ttfbTime\u0026#34;: 10, \u0026#34;tcpTime\u0026#34;: 10, \u0026#34;transTime\u0026#34;: 10, \u0026#34;domAnalysisTime\u0026#34;: 10, \u0026#34;fptTime\u0026#34;: 10, \u0026#34;domReadyTime\u0026#34;: 10, \u0026#34;loadPageTime\u0026#34;: 10, \u0026#34;resTime\u0026#34;: 10, \u0026#34;sslTime\u0026#34;: 10, \u0026#34;ttlTime\u0026#34;: 10, \u0026#34;firstPackTime\u0026#34;: 10, \u0026#34;fmpTime\u0026#34;: 10 } OutPut:\nHttp Status: 204\nError Log Report Detailed information about data format can be found in BrowserPerf.proto.\nPOST http://localhost:12800/browser/errorLogs Send an error log object list in JSON format.\nInput:\n[ { \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b01\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; }, { \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b02\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; } ] OutPut:\nHttp Status: 204\nPOST http://localhost:12800/browser/errorLog Send a single error log object in JSON format.\nInput:\n{ \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b01\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; } OutPut:\nHttp Status: 204\n","excerpt":"HTTP API Protocol HTTP API Protocol defines the API data format, including API request and response …","ref":"/docs/main/v9.3.0/en/protocols/browser-http-api-protocol/","title":"HTTP API Protocol"},{"body":"HTTP API Protocol HTTP API Protocol defines the API data format, including API request and response data format. They use the HTTP1.1 wrapper of the official SkyWalking Trace Data Protocol v3. Read it for more details.\nInstance Management Detailed information about data format can be found in Instance Management.\n Report service instance properties   POST http://localhost:12800/v3/management/reportProperties\n Input:\n{ \u0026#34;service\u0026#34;: \u0026#34;User Service Name\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User Service Instance Name\u0026#34;, \u0026#34;properties\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;language\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;Lua\u0026#34; } ] } Output JSON Array:\n{}  Service instance ping   POST http://localhost:12800/v3/management/keepAlive\n Input:\n{ \u0026#34;service\u0026#34;: \u0026#34;User Service Name\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User Service Instance Name\u0026#34; } OutPut:\n{} Trace Report Detailed information about data format can be found in Instance Management. There are two ways to report segment data: one segment per request or segment array in bulk mode.\nPOST http://localhost:12800/v3/segment Send a single segment object in JSON format.\nInput:\n{ \u0026#34;traceId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34; } OutPut:\nPOST http://localhost:12800/v3/segments Send a segment object list in JSON format.\nInput:\n[{ \u0026#34;traceId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34; }, { \u0026#34;traceId\u0026#34;: \u0026#34;f956699e-5106-4ea3-95e5-da748c55bac1\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577250, \u0026#34;endTime\u0026#34;: 1588664577250, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577250, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577250, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;f956699e-5106-4ea3-95e5-da748c55bac1\u0026#34; }] OutPut:\n","excerpt":"HTTP API Protocol HTTP API Protocol defines the API data format, including API request and response …","ref":"/docs/main/v9.3.0/en/protocols/http-api-protocol/","title":"HTTP API Protocol"},{"body":"HTTP API Protocol HTTP API Protocol defines the API data format, including API request and response data format. They use the HTTP1.1 wrapper of the official SkyWalking Browser Protocol. Read it for more details.\nPerformance Data Report Detailed information about data format can be found in BrowserPerf.proto.\nPOST http://localhost:12800/browser/perfData Send a performance data object in JSON format.\nInput:\n{ \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;redirectTime\u0026#34;: 10, \u0026#34;dnsTime\u0026#34;: 10, \u0026#34;ttfbTime\u0026#34;: 10, \u0026#34;tcpTime\u0026#34;: 10, \u0026#34;transTime\u0026#34;: 10, \u0026#34;domAnalysisTime\u0026#34;: 10, \u0026#34;fptTime\u0026#34;: 10, \u0026#34;domReadyTime\u0026#34;: 10, \u0026#34;loadPageTime\u0026#34;: 10, \u0026#34;resTime\u0026#34;: 10, \u0026#34;sslTime\u0026#34;: 10, \u0026#34;ttlTime\u0026#34;: 10, \u0026#34;firstPackTime\u0026#34;: 10, \u0026#34;fmpTime\u0026#34;: 10 } OutPut:\nHttp Status: 204\nError Log Report Detailed information about data format can be found in BrowserPerf.proto.\nPOST http://localhost:12800/browser/errorLogs Send an error log object list in JSON format.\nInput:\n[ { \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b01\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; }, { \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b02\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; } ] OutPut:\nHttp Status: 204\nPOST http://localhost:12800/browser/errorLog Send a single error log object in JSON format.\nInput:\n{ \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b01\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; } OutPut:\nHttp Status: 204\n","excerpt":"HTTP API Protocol HTTP API Protocol defines the API data format, including API request and response …","ref":"/docs/main/v9.4.0/en/api/browser-http-api-protocol/","title":"HTTP API Protocol"},{"body":"HTTP API Protocol HTTP API Protocol defines the API data format, including API request and response data format. They use the HTTP1.1 wrapper of the official SkyWalking Browser Protocol. Read it for more details.\nPerformance Data Report Detailed information about data format can be found in BrowserPerf.proto.\nPOST http://localhost:12800/browser/perfData Send a performance data object in JSON format.\nInput:\n{ \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;redirectTime\u0026#34;: 10, \u0026#34;dnsTime\u0026#34;: 10, \u0026#34;ttfbTime\u0026#34;: 10, \u0026#34;tcpTime\u0026#34;: 10, \u0026#34;transTime\u0026#34;: 10, \u0026#34;domAnalysisTime\u0026#34;: 10, \u0026#34;fptTime\u0026#34;: 10, \u0026#34;domReadyTime\u0026#34;: 10, \u0026#34;loadPageTime\u0026#34;: 10, \u0026#34;resTime\u0026#34;: 10, \u0026#34;sslTime\u0026#34;: 10, \u0026#34;ttlTime\u0026#34;: 10, \u0026#34;firstPackTime\u0026#34;: 10, \u0026#34;fmpTime\u0026#34;: 10 } OutPut:\nHttp Status: 204\nError Log Report Detailed information about data format can be found in BrowserPerf.proto.\nPOST http://localhost:12800/browser/errorLogs Send an error log object list in JSON format.\nInput:\n[ { \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b01\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; }, { \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b02\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; } ] OutPut:\nHttp Status: 204\nPOST http://localhost:12800/browser/errorLog Send a single error log object in JSON format.\nInput:\n{ \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b01\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; } OutPut:\nHttp Status: 204\n","excerpt":"HTTP API Protocol HTTP API Protocol defines the API data format, including API request and response …","ref":"/docs/main/v9.5.0/en/api/browser-http-api-protocol/","title":"HTTP API Protocol"},{"body":"HTTP API Protocol HTTP API Protocol defines the API data format, including API request and response data format. They use the HTTP1.1 wrapper of the official SkyWalking Browser Protocol. Read it for more details.\nPerformance Data Report Detailed information about data format can be found in BrowserPerf.proto.\nPOST http://localhost:12800/browser/perfData Send a performance data object in JSON format.\nInput:\n{ \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;redirectTime\u0026#34;: 10, \u0026#34;dnsTime\u0026#34;: 10, \u0026#34;ttfbTime\u0026#34;: 10, \u0026#34;tcpTime\u0026#34;: 10, \u0026#34;transTime\u0026#34;: 10, \u0026#34;domAnalysisTime\u0026#34;: 10, \u0026#34;fptTime\u0026#34;: 10, \u0026#34;domReadyTime\u0026#34;: 10, \u0026#34;loadPageTime\u0026#34;: 10, \u0026#34;resTime\u0026#34;: 10, \u0026#34;sslTime\u0026#34;: 10, \u0026#34;ttlTime\u0026#34;: 10, \u0026#34;firstPackTime\u0026#34;: 10, \u0026#34;fmpTime\u0026#34;: 10 } OutPut:\nHttp Status: 204\nError Log Report Detailed information about data format can be found in BrowserPerf.proto.\nPOST http://localhost:12800/browser/errorLogs Send an error log object list in JSON format.\nInput:\n[ { \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b01\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; }, { \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b02\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; } ] OutPut:\nHttp Status: 204\nPOST http://localhost:12800/browser/errorLog Send a single error log object in JSON format.\nInput:\n{ \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b01\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; } OutPut:\nHttp Status: 204\n","excerpt":"HTTP API Protocol HTTP API Protocol defines the API data format, including API request and response …","ref":"/docs/main/v9.6.0/en/api/browser-http-api-protocol/","title":"HTTP API Protocol"},{"body":"HTTP API Protocol HTTP API Protocol defines the API data format, including API request and response data format. They use the HTTP1.1 wrapper of the official SkyWalking Browser Protocol. Read it for more details.\nPerformance Data Report Detailed information about data format can be found in BrowserPerf.proto.\nPOST http://localhost:12800/browser/perfData Send a performance data object in JSON format.\nInput:\n{ \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;redirectTime\u0026#34;: 10, \u0026#34;dnsTime\u0026#34;: 10, \u0026#34;ttfbTime\u0026#34;: 10, \u0026#34;tcpTime\u0026#34;: 10, \u0026#34;transTime\u0026#34;: 10, \u0026#34;domAnalysisTime\u0026#34;: 10, \u0026#34;fptTime\u0026#34;: 10, \u0026#34;domReadyTime\u0026#34;: 10, \u0026#34;loadPageTime\u0026#34;: 10, \u0026#34;resTime\u0026#34;: 10, \u0026#34;sslTime\u0026#34;: 10, \u0026#34;ttlTime\u0026#34;: 10, \u0026#34;firstPackTime\u0026#34;: 10, \u0026#34;fmpTime\u0026#34;: 10 } OutPut:\nHttp Status: 204\nError Log Report Detailed information about data format can be found in BrowserPerf.proto.\nPOST http://localhost:12800/browser/errorLogs Send an error log object list in JSON format.\nInput:\n[ { \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b01\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; }, { \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b02\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; } ] OutPut:\nHttp Status: 204\nPOST http://localhost:12800/browser/errorLog Send a single error log object in JSON format.\nInput:\n{ \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b01\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; } OutPut:\nHttp Status: 204\n","excerpt":"HTTP API Protocol HTTP API Protocol defines the API data format, including API request and response …","ref":"/docs/main/v9.7.0/en/api/browser-http-api-protocol/","title":"HTTP API Protocol"},{"body":"HTTP Restful URI recognition As introduced in the Group Parameterized Endpoints doc, HTTP Restful URIs are identified as endpoints. With some additional rules, we can identify the parameters in the URI and group the endpoints in case of annoying and huge size of endpoint candidates with low value of the metrics.\nIn the ML/AI specific fields, decision trees or neural networks can be trained on labeled URI data to automatically recognize and classify different URI patterns, as well as many other ways.\nIn this pipeline, OAP has the capabilities to cache the URI candidates with occurrence count, and push the data to 3rd party for further analysis. Then OAP would pull the analyzed results for processing the further telemetry traffic.\nSet up OAP to connect remote URI recognition server uriRecognitionServerAddr and uriRecognitionServerPort are the configurations to set up the remote URI recognition server.\nThe URI recognition server is a gRPC server, which is defined in URIRecognition.proto.\nservice HttpUriRecognitionService { // Sync for the pattern recognition dictionary.  rpc fetchAllPatterns(HttpUriRecognitionSyncRequest) returns (HttpUriRecognitionResponse) {} // Feed new raw data and matched patterns to the AI-server.  rpc feedRawData(HttpUriRecognitionRequest) returns (google.protobuf.Empty) {}} fetchAllPatterns service  fetchAllPatterns is up and running in 1 minute period from every OAP to fetch all recognized patterns from the remote server.\n feedRawData service  feedRawData is running in 25-30 minutes period to push the raw data to the remote server for training.\nConfigurations  core/maxHttpUrisNumberPerService The max number of HTTP URIs per service for further URI pattern recognition. core/syncPeriodHttpUriRecognitionPattern The period of HTTP URI pattern recognition(feedRawData). Unit is second, 10s by default. core/trainingPeriodHttpUriRecognitionPattern The training period of HTTP URI pattern recognition(fetchAllPatterns). Unit is second, 60s by default.  Optional Server Implementation R3 RESTful Pattern Recognition(R3) is an Apache 2.0 licensed implementation for the URI recognition, and natively supports URIRecognition.proto defined in OAP.\n","excerpt":"HTTP Restful URI recognition As introduced in the Group Parameterized Endpoints doc, HTTP Restful …","ref":"/docs/main/latest/en/setup/ai-pipeline/http-restful-uri-pattern/","title":"HTTP Restful URI recognition"},{"body":"HTTP Restful URI recognition As introduced in the Group Parameterized Endpoints doc, HTTP Restful URIs are identified as endpoints. With some additional rules, we can identify the parameters in the URI and group the endpoints in case of annoying and huge size of endpoint candidates with low value of the metrics.\nIn the ML/AI specific fields, decision trees or neural networks can be trained on labeled URI data to automatically recognize and classify different URI patterns, as well as many other ways.\nIn this pipeline, OAP has the capabilities to cache the URI candidates with occurrence count, and push the data to 3rd party for further analysis. Then OAP would pull the analyzed results for processing the further telemetry traffic.\nSet up OAP to connect remote URI recognition server uriRecognitionServerAddr and uriRecognitionServerPort are the configurations to set up the remote URI recognition server.\nThe URI recognition server is a gRPC server, which is defined in URIRecognition.proto.\nservice HttpUriRecognitionService { // Sync for the pattern recognition dictionary.  rpc fetchAllPatterns(HttpUriRecognitionSyncRequest) returns (HttpUriRecognitionResponse) {} // Feed new raw data and matched patterns to the AI-server.  rpc feedRawData(HttpUriRecognitionRequest) returns (google.protobuf.Empty) {}} fetchAllPatterns service  fetchAllPatterns is up and running in 1 minute period from every OAP to fetch all recognized patterns from the remote server.\n feedRawData service  feedRawData is running in 25-30 minutes period to push the raw data to the remote server for training.\nConfigurations  core/maxHttpUrisNumberPerService The max number of HTTP URIs per service for further URI pattern recognition. core/syncPeriodHttpUriRecognitionPattern The period of HTTP URI pattern recognition(feedRawData). Unit is second, 10s by default. core/trainingPeriodHttpUriRecognitionPattern The training period of HTTP URI pattern recognition(fetchAllPatterns). Unit is second, 60s by default.  Optional Server Implementation R3 RESTful Pattern Recognition(R3) is an Apache 2.0 licensed implementation for the URI recognition, and natively supports URIRecognition.proto defined in OAP.\n","excerpt":"HTTP Restful URI recognition As introduced in the Group Parameterized Endpoints doc, HTTP Restful …","ref":"/docs/main/next/en/setup/ai-pipeline/http-restful-uri-pattern/","title":"HTTP Restful URI recognition"},{"body":"HTTP Restful URI recognition As introduced in the Group Parameterized Endpoints doc, HTTP Restful URIs are identified as endpoints. With some additional rules, we can identify the parameters in the URI and group the endpoints in case of annoying and huge size of endpoint candidates with low value of the metrics.\nIn the ML/AI specific fields, decision trees or neural networks can be trained on labeled URI data to automatically recognize and classify different URI patterns, as well as many other ways.\nIn this pipeline, OAP has the capabilities to cache the URI candidates with occurrence count, and push the data to 3rd party for further analysis. Then OAP would pull the analyzed results for processing the further telemetry traffic.\nSet up OAP to connect remote URI recognition server uriRecognitionServerAddr and uriRecognitionServerPort are the configurations to set up the remote URI recognition server.\nThe URI recognition server is a gRPC server, which is defined in URIRecognition.proto.\nservice HttpUriRecognitionService { // Sync for the pattern recognition dictionary.  rpc fetchAllPatterns(HttpUriRecognitionSyncRequest) returns (HttpUriRecognitionResponse) {} // Feed new raw data and matched patterns to the AI-server.  rpc feedRawData(HttpUriRecognitionRequest) returns (google.protobuf.Empty) {}} fetchAllPatterns service  fetchAllPatterns is up and running in 1 minute period from every OAP to fetch all recognized patterns from the remote server.\n feedRawData service  feedRawData is running in 25-30 minutes period to push the raw data to the remote server for training.\nConfigurations  core/maxHttpUrisNumberPerService The max number of HTTP URIs per service for further URI pattern recognition. No configuration to set periods of feedRawData and fetchAllPatterns services.  Optional Server Implementation R3 RESTful Pattern Recognition(R3) is an Apache 2.0 licensed implementation for the URI recognition, and natively supports URIRecognition.proto defined in OAP.\n","excerpt":"HTTP Restful URI recognition As introduced in the Group Parameterized Endpoints doc, HTTP Restful …","ref":"/docs/main/v9.5.0/en/setup/ai-pipeline/http-restful-uri-pattern/","title":"HTTP Restful URI recognition"},{"body":"HTTP Restful URI recognition As introduced in the Group Parameterized Endpoints doc, HTTP Restful URIs are identified as endpoints. With some additional rules, we can identify the parameters in the URI and group the endpoints in case of annoying and huge size of endpoint candidates with low value of the metrics.\nIn the ML/AI specific fields, decision trees or neural networks can be trained on labeled URI data to automatically recognize and classify different URI patterns, as well as many other ways.\nIn this pipeline, OAP has the capabilities to cache the URI candidates with occurrence count, and push the data to 3rd party for further analysis. Then OAP would pull the analyzed results for processing the further telemetry traffic.\nSet up OAP to connect remote URI recognition server uriRecognitionServerAddr and uriRecognitionServerPort are the configurations to set up the remote URI recognition server.\nThe URI recognition server is a gRPC server, which is defined in URIRecognition.proto.\nservice HttpUriRecognitionService { // Sync for the pattern recognition dictionary.  rpc fetchAllPatterns(HttpUriRecognitionSyncRequest) returns (HttpUriRecognitionResponse) {} // Feed new raw data and matched patterns to the AI-server.  rpc feedRawData(HttpUriRecognitionRequest) returns (google.protobuf.Empty) {}} fetchAllPatterns service  fetchAllPatterns is up and running in 1 minute period from every OAP to fetch all recognized patterns from the remote server.\n feedRawData service  feedRawData is running in 25-30 minutes period to push the raw data to the remote server for training.\nConfigurations  core/maxHttpUrisNumberPerService The max number of HTTP URIs per service for further URI pattern recognition. core/syncPeriodHttpUriRecognitionPattern The period of HTTP URI pattern recognition(feedRawData). Unit is second, 10s by default. core/trainingPeriodHttpUriRecognitionPattern The training period of HTTP URI pattern recognition(fetchAllPatterns). Unit is second, 60s by default.  Optional Server Implementation R3 RESTful Pattern Recognition(R3) is an Apache 2.0 licensed implementation for the URI recognition, and natively supports URIRecognition.proto defined in OAP.\n","excerpt":"HTTP Restful URI recognition As introduced in the Group Parameterized Endpoints doc, HTTP Restful …","ref":"/docs/main/v9.6.0/en/setup/ai-pipeline/http-restful-uri-pattern/","title":"HTTP Restful URI recognition"},{"body":"HTTP Restful URI recognition As introduced in the Group Parameterized Endpoints doc, HTTP Restful URIs are identified as endpoints. With some additional rules, we can identify the parameters in the URI and group the endpoints in case of annoying and huge size of endpoint candidates with low value of the metrics.\nIn the ML/AI specific fields, decision trees or neural networks can be trained on labeled URI data to automatically recognize and classify different URI patterns, as well as many other ways.\nIn this pipeline, OAP has the capabilities to cache the URI candidates with occurrence count, and push the data to 3rd party for further analysis. Then OAP would pull the analyzed results for processing the further telemetry traffic.\nSet up OAP to connect remote URI recognition server uriRecognitionServerAddr and uriRecognitionServerPort are the configurations to set up the remote URI recognition server.\nThe URI recognition server is a gRPC server, which is defined in URIRecognition.proto.\nservice HttpUriRecognitionService { // Sync for the pattern recognition dictionary.  rpc fetchAllPatterns(HttpUriRecognitionSyncRequest) returns (HttpUriRecognitionResponse) {} // Feed new raw data and matched patterns to the AI-server.  rpc feedRawData(HttpUriRecognitionRequest) returns (google.protobuf.Empty) {}} fetchAllPatterns service  fetchAllPatterns is up and running in 1 minute period from every OAP to fetch all recognized patterns from the remote server.\n feedRawData service  feedRawData is running in 25-30 minutes period to push the raw data to the remote server for training.\nConfigurations  core/maxHttpUrisNumberPerService The max number of HTTP URIs per service for further URI pattern recognition. core/syncPeriodHttpUriRecognitionPattern The period of HTTP URI pattern recognition(feedRawData). Unit is second, 10s by default. core/trainingPeriodHttpUriRecognitionPattern The training period of HTTP URI pattern recognition(fetchAllPatterns). Unit is second, 60s by default.  Optional Server Implementation R3 RESTful Pattern Recognition(R3) is an Apache 2.0 licensed implementation for the URI recognition, and natively supports URIRecognition.proto defined in OAP.\n","excerpt":"HTTP Restful URI recognition As introduced in the Group Parameterized Endpoints doc, HTTP Restful …","ref":"/docs/main/v9.7.0/en/setup/ai-pipeline/http-restful-uri-pattern/","title":"HTTP Restful URI recognition"},{"body":"Hybrid Compilation Hybrid compilation technology is the base of SkyWalking Go\u0026rsquo;s implementation.\nIt utilizes the -toolexec flag during Golang compilation to introduce custom programs that intercept all original files in the compilation stage. This allows for the modification or addition of files to be completed seamlessly.\nToolchain in Golang The -toolexec flag in Golang is a powerful feature that can be used during stages such as build, test, and others. When this flag is used, developers can provide a custom program or script to replace the default go tools functionality. This offers greater flexibility and control over the build, test, or analysis processes.\nWhen passing this flag during a go build, it can intercept the execution flow of commands such as compile, asm, and link, which are required during Golang\u0026rsquo;s compilation process. These commands are also referred to as the toolchain within Golang.\nInformation about the Toolchain The following command demonstrates the parameter information for the specified -toolexec program when it is invoked:\n/usr/bin/skywalking-go /usr/local/opt/go/libexec/pkg/tool/darwin_amd64/compile -o /var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build452071603/b011/_pkg_.a -trimpath /var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build452071603/b011=\u0026gt; -p runtime -std -+ -buildid zSeDyjJh0lgXlIqBZScI/zSeDyjJh0lgXlIqBZScI -goversion go1.19.2 -symabis /var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build452071603/b011/symabis -c=4 -nolocalimports -importcfg /var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build452071603/b011/importcfg -pack -asmhdr /var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build452071603/b011/go_asm.h /usr/local/opt/go/libexec/src/runtime/alg.go /usr/local/opt/go/libexec/src/runtime/asan0.go ... The code above demonstrates the parameters used when a custom program is executed, which mainly includes the following information:\n Current toolchain tool: In this example, it is a compilation tool with the path: /usr/local/opt/go/libexec/pkg/tool/darwin_amd64/compile. Target file of the tool: The final target file that the current tool needs to generate. Package information: The module package path information being compiled, which is the parameter value of the -p flag. The current package path is runtime. Temporary directory address: For each compilation, the Go program would generate a corresponding temporary directory. This directory contains all the temporary files required for the compilation. Files to be compiled: Many .go file paths can be seen at the end of the command, which are the file path list of the module that needs to be compiled.  Toolchain with SkyWalking Go Agent SkyWalking Go Agent works by intercepting the compile program through the toolchain and making changes to the program based on the information above. The main parts include:\n AST: Using AST to parse and manipulate the codes. File copying/generation: Copy or generate files to the temporary directory required for the compilation, and add file path addresses when the compilation command is executed. Proxy command execution: After completing the modification of the specified package, the new codes are weaved into the target.  Hybrid Compilation After enhancing the program with SkyWalking Go Agent, the following parts of the program will be enhanced:\n SkyWalking Go: The agent core part of the code would be dynamically copied to the agent path for plugin use. Plugins: Enhance the specified framework code according to the enhancement rules of the plugins. Runtime: Enhance the runtime package in Go, including extensions for goroutines and other content. Main: Enhance the main package during system startup, for stating the system with Agent.  ","excerpt":"Hybrid Compilation Hybrid compilation technology is the base of SkyWalking Go\u0026rsquo;s …","ref":"/docs/skywalking-go/latest/en/concepts-and-designs/hybrid-compilation/","title":"Hybrid Compilation"},{"body":"Hybrid Compilation Hybrid compilation technology is the base of SkyWalking Go\u0026rsquo;s implementation.\nIt utilizes the -toolexec flag during Golang compilation to introduce custom programs that intercept all original files in the compilation stage. This allows for the modification or addition of files to be completed seamlessly.\nToolchain in Golang The -toolexec flag in Golang is a powerful feature that can be used during stages such as build, test, and others. When this flag is used, developers can provide a custom program or script to replace the default go tools functionality. This offers greater flexibility and control over the build, test, or analysis processes.\nWhen passing this flag during a go build, it can intercept the execution flow of commands such as compile, asm, and link, which are required during Golang\u0026rsquo;s compilation process. These commands are also referred to as the toolchain within Golang.\nInformation about the Toolchain The following command demonstrates the parameter information for the specified -toolexec program when it is invoked:\n/usr/bin/skywalking-go /usr/local/opt/go/libexec/pkg/tool/darwin_amd64/compile -o /var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build452071603/b011/_pkg_.a -trimpath /var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build452071603/b011=\u0026gt; -p runtime -std -+ -buildid zSeDyjJh0lgXlIqBZScI/zSeDyjJh0lgXlIqBZScI -goversion go1.19.2 -symabis /var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build452071603/b011/symabis -c=4 -nolocalimports -importcfg /var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build452071603/b011/importcfg -pack -asmhdr /var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build452071603/b011/go_asm.h /usr/local/opt/go/libexec/src/runtime/alg.go /usr/local/opt/go/libexec/src/runtime/asan0.go ... The code above demonstrates the parameters used when a custom program is executed, which mainly includes the following information:\n Current toolchain tool: In this example, it is a compilation tool with the path: /usr/local/opt/go/libexec/pkg/tool/darwin_amd64/compile. Target file of the tool: The final target file that the current tool needs to generate. Package information: The module package path information being compiled, which is the parameter value of the -p flag. The current package path is runtime. Temporary directory address: For each compilation, the Go program would generate a corresponding temporary directory. This directory contains all the temporary files required for the compilation. Files to be compiled: Many .go file paths can be seen at the end of the command, which are the file path list of the module that needs to be compiled.  Toolchain with SkyWalking Go Agent SkyWalking Go Agent works by intercepting the compile program through the toolchain and making changes to the program based on the information above. The main parts include:\n AST: Using AST to parse and manipulate the codes. File copying/generation: Copy or generate files to the temporary directory required for the compilation, and add file path addresses when the compilation command is executed. Proxy command execution: After completing the modification of the specified package, the new codes are weaved into the target.  Hybrid Compilation After enhancing the program with SkyWalking Go Agent, the following parts of the program will be enhanced:\n SkyWalking Go: The agent core part of the code would be dynamically copied to the agent path for plugin use. Plugins: Enhance the specified framework code according to the enhancement rules of the plugins. Runtime: Enhance the runtime package in Go, including extensions for goroutines and other content. Main: Enhance the main package during system startup, for stating the system with Agent.  ","excerpt":"Hybrid Compilation Hybrid compilation technology is the base of SkyWalking Go\u0026rsquo;s …","ref":"/docs/skywalking-go/next/en/concepts-and-designs/hybrid-compilation/","title":"Hybrid Compilation"},{"body":"Hybrid Compilation Hybrid compilation technology is the base of SkyWalking Go\u0026rsquo;s implementation.\nIt utilizes the -toolexec flag during Golang compilation to introduce custom programs that intercept all original files in the compilation stage. This allows for the modification or addition of files to be completed seamlessly.\nToolchain in Golang The -toolexec flag in Golang is a powerful feature that can be used during stages such as build, test, and others. When this flag is used, developers can provide a custom program or script to replace the default go tools functionality. This offers greater flexibility and control over the build, test, or analysis processes.\nWhen passing this flag during a go build, it can intercept the execution flow of commands such as compile, asm, and link, which are required during Golang\u0026rsquo;s compilation process. These commands are also referred to as the toolchain within Golang.\nInformation about the Toolchain The following command demonstrates the parameter information for the specified -toolexec program when it is invoked:\n/usr/bin/skywalking-go /usr/local/opt/go/libexec/pkg/tool/darwin_amd64/compile -o /var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build452071603/b011/_pkg_.a -trimpath /var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build452071603/b011=\u0026gt; -p runtime -std -+ -buildid zSeDyjJh0lgXlIqBZScI/zSeDyjJh0lgXlIqBZScI -goversion go1.19.2 -symabis /var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build452071603/b011/symabis -c=4 -nolocalimports -importcfg /var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build452071603/b011/importcfg -pack -asmhdr /var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build452071603/b011/go_asm.h /usr/local/opt/go/libexec/src/runtime/alg.go /usr/local/opt/go/libexec/src/runtime/asan0.go ... The code above demonstrates the parameters used when a custom program is executed, which mainly includes the following information:\n Current toolchain tool: In this example, it is a compilation tool with the path: /usr/local/opt/go/libexec/pkg/tool/darwin_amd64/compile. Target file of the tool: The final target file that the current tool needs to generate. Package information: The module package path information being compiled, which is the parameter value of the -p flag. The current package path is runtime. Temporary directory address: For each compilation, the Go program would generate a corresponding temporary directory. This directory contains all the temporary files required for the compilation. Files to be compiled: Many .go file paths can be seen at the end of the command, which are the file path list of the module that needs to be compiled.  Toolchain with SkyWalking Go Agent SkyWalking Go Agent works by intercepting the compile program through the toolchain and making changes to the program based on the information above. The main parts include:\n AST: Using AST to parse and manipulate the codes. File copying/generation: Copy or generate files to the temporary directory required for the compilation, and add file path addresses when the compilation command is executed. Proxy command execution: After completing the modification of the specified package, the new codes are weaved into the target.  Hybrid Compilation After enhancing the program with SkyWalking Go Agent, the following parts of the program will be enhanced:\n SkyWalking Go: The agent core part of the code would be dynamically copied to the agent path for plugin use. Plugins: Enhance the specified framework code according to the enhancement rules of the plugins. Runtime: Enhance the runtime package in Go, including extensions for goroutines and other content. Main: Enhance the main package during system startup, for stating the system with Agent.  ","excerpt":"Hybrid Compilation Hybrid compilation technology is the base of SkyWalking Go\u0026rsquo;s …","ref":"/docs/skywalking-go/v0.4.0/en/concepts-and-designs/hybrid-compilation/","title":"Hybrid Compilation"},{"body":"IllegalStateException when installing Java agent on WebSphere This issue was found in our community discussion and feedback. A user installed the SkyWalking Java agent on WebSphere 7.0.0.11 and ibm jdk 1.8_20160719 and 1.7.0_20150407, and experienced the following error logs:\nWARN 2019-05-09 17:01:35:905 SkywalkingAgent-1-GRPCChannelManager-0 ProtectiveShieldMatcher : Byte-buddy occurs exception when match type. java.lang.IllegalStateException: Cannot resolve type description for java.security.PrivilegedAction at org.apache.skywalking.apm.dependencies.net.bytebuddy.pool.TypePool$Resolution$Illegal.resolve(TypePool.java:144) at org.apache.skywalking.apm.dependencies.net.bytebuddy.pool.TypePool$Default$WithLazyResolution$LazyTypeDescription.delegate(TypePool.java:1392) at org.apache.skywalking.apm.dependencies.net.bytebuddy.description.type.TypeDescription$AbstractBase$OfSimpleType$WithDelegation.getInterfaces(TypeDescription.java:8016) at org.apache.skywalking.apm.dependencies.net.bytebuddy.description.type.TypeDescription$Generic$OfNonGenericType.getInterfaces(TypeDescription.java:3621) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.hasInterface(HasSuperTypeMatcher.java:53) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.hasInterface(HasSuperTypeMatcher.java:54) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.matches(HasSuperTypeMatcher.java:38) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.matches(HasSuperTypeMatcher.java:15) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Conjunction.matches(ElementMatcher.java:107) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) ... The exception occurred because access grant was required in WebSphere. Simply follow these steps:\n Set the agent\u0026rsquo;s owner to the owner of WebSphere. Add \u0026ldquo;grant codeBase \u0026ldquo;file:${agent_dir}/-\u0026rdquo; { permission java.security.AllPermission; };\u0026rdquo; in the file of \u0026ldquo;server.policy\u0026rdquo;.  ","excerpt":"IllegalStateException when installing Java agent on WebSphere This issue was found in our community …","ref":"/docs/main/latest/en/faq/install_agent_on_websphere/","title":"IllegalStateException when installing Java agent on WebSphere"},{"body":"IllegalStateException when installing Java agent on WebSphere This issue was found in our community discussion and feedback. A user installed the SkyWalking Java agent on WebSphere 7.0.0.11 and ibm jdk 1.8_20160719 and 1.7.0_20150407, and experienced the following error logs:\nWARN 2019-05-09 17:01:35:905 SkywalkingAgent-1-GRPCChannelManager-0 ProtectiveShieldMatcher : Byte-buddy occurs exception when match type. java.lang.IllegalStateException: Cannot resolve type description for java.security.PrivilegedAction at org.apache.skywalking.apm.dependencies.net.bytebuddy.pool.TypePool$Resolution$Illegal.resolve(TypePool.java:144) at org.apache.skywalking.apm.dependencies.net.bytebuddy.pool.TypePool$Default$WithLazyResolution$LazyTypeDescription.delegate(TypePool.java:1392) at org.apache.skywalking.apm.dependencies.net.bytebuddy.description.type.TypeDescription$AbstractBase$OfSimpleType$WithDelegation.getInterfaces(TypeDescription.java:8016) at org.apache.skywalking.apm.dependencies.net.bytebuddy.description.type.TypeDescription$Generic$OfNonGenericType.getInterfaces(TypeDescription.java:3621) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.hasInterface(HasSuperTypeMatcher.java:53) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.hasInterface(HasSuperTypeMatcher.java:54) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.matches(HasSuperTypeMatcher.java:38) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.matches(HasSuperTypeMatcher.java:15) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Conjunction.matches(ElementMatcher.java:107) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) ... The exception occurred because access grant was required in WebSphere. Simply follow these steps:\n Set the agent\u0026rsquo;s owner to the owner of WebSphere. Add \u0026ldquo;grant codeBase \u0026ldquo;file:${agent_dir}/-\u0026rdquo; { permission java.security.AllPermission; };\u0026rdquo; in the file of \u0026ldquo;server.policy\u0026rdquo;.  ","excerpt":"IllegalStateException when installing Java agent on WebSphere This issue was found in our community …","ref":"/docs/main/next/en/faq/install_agent_on_websphere/","title":"IllegalStateException when installing Java agent on WebSphere"},{"body":"IllegalStateException when installing Java agent on WebSphere This issue was found in our community discussion and feedback. A user installed the SkyWalking Java agent on WebSphere 7.0.0.11 and ibm jdk 1.8_20160719 and 1.7.0_20150407, and experienced the following error logs:\nWARN 2019-05-09 17:01:35:905 SkywalkingAgent-1-GRPCChannelManager-0 ProtectiveShieldMatcher : Byte-buddy occurs exception when match type. java.lang.IllegalStateException: Cannot resolve type description for java.security.PrivilegedAction at org.apache.skywalking.apm.dependencies.net.bytebuddy.pool.TypePool$Resolution$Illegal.resolve(TypePool.java:144) at org.apache.skywalking.apm.dependencies.net.bytebuddy.pool.TypePool$Default$WithLazyResolution$LazyTypeDescription.delegate(TypePool.java:1392) at org.apache.skywalking.apm.dependencies.net.bytebuddy.description.type.TypeDescription$AbstractBase$OfSimpleType$WithDelegation.getInterfaces(TypeDescription.java:8016) at org.apache.skywalking.apm.dependencies.net.bytebuddy.description.type.TypeDescription$Generic$OfNonGenericType.getInterfaces(TypeDescription.java:3621) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.hasInterface(HasSuperTypeMatcher.java:53) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.hasInterface(HasSuperTypeMatcher.java:54) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.matches(HasSuperTypeMatcher.java:38) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.matches(HasSuperTypeMatcher.java:15) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Conjunction.matches(ElementMatcher.java:107) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) ... The exception occured because access grant was required in WebSphere. Simply follow these steps:\n Set the agent\u0026rsquo;s owner to the owner of WebSphere. Add \u0026ldquo;grant codeBase \u0026ldquo;file:${agent_dir}/-\u0026rdquo; { permission java.security.AllPermission; };\u0026rdquo; in the file of \u0026ldquo;server.policy\u0026rdquo;.  ","excerpt":"IllegalStateException when installing Java agent on WebSphere This issue was found in our community …","ref":"/docs/main/v9.0.0/en/faq/install_agent_on_websphere/","title":"IllegalStateException when installing Java agent on WebSphere"},{"body":"IllegalStateException when installing Java agent on WebSphere This issue was found in our community discussion and feedback. A user installed the SkyWalking Java agent on WebSphere 7.0.0.11 and ibm jdk 1.8_20160719 and 1.7.0_20150407, and experienced the following error logs:\nWARN 2019-05-09 17:01:35:905 SkywalkingAgent-1-GRPCChannelManager-0 ProtectiveShieldMatcher : Byte-buddy occurs exception when match type. java.lang.IllegalStateException: Cannot resolve type description for java.security.PrivilegedAction at org.apache.skywalking.apm.dependencies.net.bytebuddy.pool.TypePool$Resolution$Illegal.resolve(TypePool.java:144) at org.apache.skywalking.apm.dependencies.net.bytebuddy.pool.TypePool$Default$WithLazyResolution$LazyTypeDescription.delegate(TypePool.java:1392) at org.apache.skywalking.apm.dependencies.net.bytebuddy.description.type.TypeDescription$AbstractBase$OfSimpleType$WithDelegation.getInterfaces(TypeDescription.java:8016) at org.apache.skywalking.apm.dependencies.net.bytebuddy.description.type.TypeDescription$Generic$OfNonGenericType.getInterfaces(TypeDescription.java:3621) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.hasInterface(HasSuperTypeMatcher.java:53) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.hasInterface(HasSuperTypeMatcher.java:54) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.matches(HasSuperTypeMatcher.java:38) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.matches(HasSuperTypeMatcher.java:15) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Conjunction.matches(ElementMatcher.java:107) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) ... The exception occured because access grant was required in WebSphere. Simply follow these steps:\n Set the agent\u0026rsquo;s owner to the owner of WebSphere. Add \u0026ldquo;grant codeBase \u0026ldquo;file:${agent_dir}/-\u0026rdquo; { permission java.security.AllPermission; };\u0026rdquo; in the file of \u0026ldquo;server.policy\u0026rdquo;.  ","excerpt":"IllegalStateException when installing Java agent on WebSphere This issue was found in our community …","ref":"/docs/main/v9.1.0/en/faq/install_agent_on_websphere/","title":"IllegalStateException when installing Java agent on WebSphere"},{"body":"IllegalStateException when installing Java agent on WebSphere This issue was found in our community discussion and feedback. A user installed the SkyWalking Java agent on WebSphere 7.0.0.11 and ibm jdk 1.8_20160719 and 1.7.0_20150407, and experienced the following error logs:\nWARN 2019-05-09 17:01:35:905 SkywalkingAgent-1-GRPCChannelManager-0 ProtectiveShieldMatcher : Byte-buddy occurs exception when match type. java.lang.IllegalStateException: Cannot resolve type description for java.security.PrivilegedAction at org.apache.skywalking.apm.dependencies.net.bytebuddy.pool.TypePool$Resolution$Illegal.resolve(TypePool.java:144) at org.apache.skywalking.apm.dependencies.net.bytebuddy.pool.TypePool$Default$WithLazyResolution$LazyTypeDescription.delegate(TypePool.java:1392) at org.apache.skywalking.apm.dependencies.net.bytebuddy.description.type.TypeDescription$AbstractBase$OfSimpleType$WithDelegation.getInterfaces(TypeDescription.java:8016) at org.apache.skywalking.apm.dependencies.net.bytebuddy.description.type.TypeDescription$Generic$OfNonGenericType.getInterfaces(TypeDescription.java:3621) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.hasInterface(HasSuperTypeMatcher.java:53) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.hasInterface(HasSuperTypeMatcher.java:54) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.matches(HasSuperTypeMatcher.java:38) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.matches(HasSuperTypeMatcher.java:15) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Conjunction.matches(ElementMatcher.java:107) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) ... The exception occurred because access grant was required in WebSphere. Simply follow these steps:\n Set the agent\u0026rsquo;s owner to the owner of WebSphere. Add \u0026ldquo;grant codeBase \u0026ldquo;file:${agent_dir}/-\u0026rdquo; { permission java.security.AllPermission; };\u0026rdquo; in the file of \u0026ldquo;server.policy\u0026rdquo;.  ","excerpt":"IllegalStateException when installing Java agent on WebSphere This issue was found in our community …","ref":"/docs/main/v9.2.0/en/faq/install_agent_on_websphere/","title":"IllegalStateException when installing Java agent on WebSphere"},{"body":"IllegalStateException when installing Java agent on WebSphere This issue was found in our community discussion and feedback. A user installed the SkyWalking Java agent on WebSphere 7.0.0.11 and ibm jdk 1.8_20160719 and 1.7.0_20150407, and experienced the following error logs:\nWARN 2019-05-09 17:01:35:905 SkywalkingAgent-1-GRPCChannelManager-0 ProtectiveShieldMatcher : Byte-buddy occurs exception when match type. java.lang.IllegalStateException: Cannot resolve type description for java.security.PrivilegedAction at org.apache.skywalking.apm.dependencies.net.bytebuddy.pool.TypePool$Resolution$Illegal.resolve(TypePool.java:144) at org.apache.skywalking.apm.dependencies.net.bytebuddy.pool.TypePool$Default$WithLazyResolution$LazyTypeDescription.delegate(TypePool.java:1392) at org.apache.skywalking.apm.dependencies.net.bytebuddy.description.type.TypeDescription$AbstractBase$OfSimpleType$WithDelegation.getInterfaces(TypeDescription.java:8016) at org.apache.skywalking.apm.dependencies.net.bytebuddy.description.type.TypeDescription$Generic$OfNonGenericType.getInterfaces(TypeDescription.java:3621) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.hasInterface(HasSuperTypeMatcher.java:53) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.hasInterface(HasSuperTypeMatcher.java:54) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.matches(HasSuperTypeMatcher.java:38) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.matches(HasSuperTypeMatcher.java:15) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Conjunction.matches(ElementMatcher.java:107) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) ... The exception occurred because access grant was required in WebSphere. Simply follow these steps:\n Set the agent\u0026rsquo;s owner to the owner of WebSphere. Add \u0026ldquo;grant codeBase \u0026ldquo;file:${agent_dir}/-\u0026rdquo; { permission java.security.AllPermission; };\u0026rdquo; in the file of \u0026ldquo;server.policy\u0026rdquo;.  ","excerpt":"IllegalStateException when installing Java agent on WebSphere This issue was found in our community …","ref":"/docs/main/v9.3.0/en/faq/install_agent_on_websphere/","title":"IllegalStateException when installing Java agent on WebSphere"},{"body":"IllegalStateException when installing Java agent on WebSphere This issue was found in our community discussion and feedback. A user installed the SkyWalking Java agent on WebSphere 7.0.0.11 and ibm jdk 1.8_20160719 and 1.7.0_20150407, and experienced the following error logs:\nWARN 2019-05-09 17:01:35:905 SkywalkingAgent-1-GRPCChannelManager-0 ProtectiveShieldMatcher : Byte-buddy occurs exception when match type. java.lang.IllegalStateException: Cannot resolve type description for java.security.PrivilegedAction at org.apache.skywalking.apm.dependencies.net.bytebuddy.pool.TypePool$Resolution$Illegal.resolve(TypePool.java:144) at org.apache.skywalking.apm.dependencies.net.bytebuddy.pool.TypePool$Default$WithLazyResolution$LazyTypeDescription.delegate(TypePool.java:1392) at org.apache.skywalking.apm.dependencies.net.bytebuddy.description.type.TypeDescription$AbstractBase$OfSimpleType$WithDelegation.getInterfaces(TypeDescription.java:8016) at org.apache.skywalking.apm.dependencies.net.bytebuddy.description.type.TypeDescription$Generic$OfNonGenericType.getInterfaces(TypeDescription.java:3621) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.hasInterface(HasSuperTypeMatcher.java:53) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.hasInterface(HasSuperTypeMatcher.java:54) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.matches(HasSuperTypeMatcher.java:38) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.matches(HasSuperTypeMatcher.java:15) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Conjunction.matches(ElementMatcher.java:107) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) ... The exception occurred because access grant was required in WebSphere. Simply follow these steps:\n Set the agent\u0026rsquo;s owner to the owner of WebSphere. Add \u0026ldquo;grant codeBase \u0026ldquo;file:${agent_dir}/-\u0026rdquo; { permission java.security.AllPermission; };\u0026rdquo; in the file of \u0026ldquo;server.policy\u0026rdquo;.  ","excerpt":"IllegalStateException when installing Java agent on WebSphere This issue was found in our community …","ref":"/docs/main/v9.4.0/en/faq/install_agent_on_websphere/","title":"IllegalStateException when installing Java agent on WebSphere"},{"body":"IllegalStateException when installing Java agent on WebSphere This issue was found in our community discussion and feedback. A user installed the SkyWalking Java agent on WebSphere 7.0.0.11 and ibm jdk 1.8_20160719 and 1.7.0_20150407, and experienced the following error logs:\nWARN 2019-05-09 17:01:35:905 SkywalkingAgent-1-GRPCChannelManager-0 ProtectiveShieldMatcher : Byte-buddy occurs exception when match type. java.lang.IllegalStateException: Cannot resolve type description for java.security.PrivilegedAction at org.apache.skywalking.apm.dependencies.net.bytebuddy.pool.TypePool$Resolution$Illegal.resolve(TypePool.java:144) at org.apache.skywalking.apm.dependencies.net.bytebuddy.pool.TypePool$Default$WithLazyResolution$LazyTypeDescription.delegate(TypePool.java:1392) at org.apache.skywalking.apm.dependencies.net.bytebuddy.description.type.TypeDescription$AbstractBase$OfSimpleType$WithDelegation.getInterfaces(TypeDescription.java:8016) at org.apache.skywalking.apm.dependencies.net.bytebuddy.description.type.TypeDescription$Generic$OfNonGenericType.getInterfaces(TypeDescription.java:3621) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.hasInterface(HasSuperTypeMatcher.java:53) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.hasInterface(HasSuperTypeMatcher.java:54) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.matches(HasSuperTypeMatcher.java:38) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.matches(HasSuperTypeMatcher.java:15) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Conjunction.matches(ElementMatcher.java:107) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) ... The exception occurred because access grant was required in WebSphere. Simply follow these steps:\n Set the agent\u0026rsquo;s owner to the owner of WebSphere. Add \u0026ldquo;grant codeBase \u0026ldquo;file:${agent_dir}/-\u0026rdquo; { permission java.security.AllPermission; };\u0026rdquo; in the file of \u0026ldquo;server.policy\u0026rdquo;.  ","excerpt":"IllegalStateException when installing Java agent on WebSphere This issue was found in our community …","ref":"/docs/main/v9.5.0/en/faq/install_agent_on_websphere/","title":"IllegalStateException when installing Java agent on WebSphere"},{"body":"IllegalStateException when installing Java agent on WebSphere This issue was found in our community discussion and feedback. A user installed the SkyWalking Java agent on WebSphere 7.0.0.11 and ibm jdk 1.8_20160719 and 1.7.0_20150407, and experienced the following error logs:\nWARN 2019-05-09 17:01:35:905 SkywalkingAgent-1-GRPCChannelManager-0 ProtectiveShieldMatcher : Byte-buddy occurs exception when match type. java.lang.IllegalStateException: Cannot resolve type description for java.security.PrivilegedAction at org.apache.skywalking.apm.dependencies.net.bytebuddy.pool.TypePool$Resolution$Illegal.resolve(TypePool.java:144) at org.apache.skywalking.apm.dependencies.net.bytebuddy.pool.TypePool$Default$WithLazyResolution$LazyTypeDescription.delegate(TypePool.java:1392) at org.apache.skywalking.apm.dependencies.net.bytebuddy.description.type.TypeDescription$AbstractBase$OfSimpleType$WithDelegation.getInterfaces(TypeDescription.java:8016) at org.apache.skywalking.apm.dependencies.net.bytebuddy.description.type.TypeDescription$Generic$OfNonGenericType.getInterfaces(TypeDescription.java:3621) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.hasInterface(HasSuperTypeMatcher.java:53) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.hasInterface(HasSuperTypeMatcher.java:54) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.matches(HasSuperTypeMatcher.java:38) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.matches(HasSuperTypeMatcher.java:15) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Conjunction.matches(ElementMatcher.java:107) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) ... The exception occurred because access grant was required in WebSphere. Simply follow these steps:\n Set the agent\u0026rsquo;s owner to the owner of WebSphere. Add \u0026ldquo;grant codeBase \u0026ldquo;file:${agent_dir}/-\u0026rdquo; { permission java.security.AllPermission; };\u0026rdquo; in the file of \u0026ldquo;server.policy\u0026rdquo;.  ","excerpt":"IllegalStateException when installing Java agent on WebSphere This issue was found in our community …","ref":"/docs/main/v9.6.0/en/faq/install_agent_on_websphere/","title":"IllegalStateException when installing Java agent on WebSphere"},{"body":"IllegalStateException when installing Java agent on WebSphere This issue was found in our community discussion and feedback. A user installed the SkyWalking Java agent on WebSphere 7.0.0.11 and ibm jdk 1.8_20160719 and 1.7.0_20150407, and experienced the following error logs:\nWARN 2019-05-09 17:01:35:905 SkywalkingAgent-1-GRPCChannelManager-0 ProtectiveShieldMatcher : Byte-buddy occurs exception when match type. java.lang.IllegalStateException: Cannot resolve type description for java.security.PrivilegedAction at org.apache.skywalking.apm.dependencies.net.bytebuddy.pool.TypePool$Resolution$Illegal.resolve(TypePool.java:144) at org.apache.skywalking.apm.dependencies.net.bytebuddy.pool.TypePool$Default$WithLazyResolution$LazyTypeDescription.delegate(TypePool.java:1392) at org.apache.skywalking.apm.dependencies.net.bytebuddy.description.type.TypeDescription$AbstractBase$OfSimpleType$WithDelegation.getInterfaces(TypeDescription.java:8016) at org.apache.skywalking.apm.dependencies.net.bytebuddy.description.type.TypeDescription$Generic$OfNonGenericType.getInterfaces(TypeDescription.java:3621) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.hasInterface(HasSuperTypeMatcher.java:53) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.hasInterface(HasSuperTypeMatcher.java:54) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.matches(HasSuperTypeMatcher.java:38) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.matches(HasSuperTypeMatcher.java:15) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Conjunction.matches(ElementMatcher.java:107) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) ... The exception occurred because access grant was required in WebSphere. Simply follow these steps:\n Set the agent\u0026rsquo;s owner to the owner of WebSphere. Add \u0026ldquo;grant codeBase \u0026ldquo;file:${agent_dir}/-\u0026rdquo; { permission java.security.AllPermission; };\u0026rdquo; in the file of \u0026ldquo;server.policy\u0026rdquo;.  ","excerpt":"IllegalStateException when installing Java agent on WebSphere This issue was found in our community …","ref":"/docs/main/v9.7.0/en/faq/install_agent_on_websphere/","title":"IllegalStateException when installing Java agent on WebSphere"},{"body":"INI Settings This is the configuration list supported in php.ini.\n   Configuration Item Description Default Value     skywalking_agent.enable Enable skywalking_agent extension or not. Off   skywalking_agent.log_file Log file path. /tmp/skywalking-agent.log   skywalking_agent.log_level Log level: one of OFF, TRACE, DEBUG, INFO, WARN, ERROR. INFO   skywalking_agent.runtime_dir Skywalking agent runtime directory. /tmp/skywalking-agent   skywalking_agent.server_addr Address of skywalking oap server. Only available when reporter_type is grpc. 127.0.0.1:11800   skywalking_agent.service_name Application service name. hello-skywalking   skywalking_agent.skywalking_version Skywalking version, 8 or 9. 8   skywalking_agent.authentication Skywalking authentication token, let it empty if the backend isn\u0026rsquo;t enabled. Only available when reporter_type is grpc.    skywalking_agent.worker_threads Skywalking worker threads, 0 will auto set as the cpu core size. 0   skywalking_agent.enable_tls Wether to enable tls for gPRC, default is false. Only available when reporter_type is grpc. Off   skywalking_agent.ssl_trusted_ca_path The gRPC SSL trusted ca file. Only available when reporter_type is grpc.    skywalking_agent.ssl_key_path The private key file. Enable mTLS when ssl_key_path and ssl_cert_chain_path exist. Only available when reporter_type is grpc.    skywalking_agent.ssl_cert_chain_path The certificate file. Enable mTLS when ssl_key_path and ssl_cert_chain_path exist. Only available when reporter_type is grpc.    skywalking_agent.heartbeat_period Agent heartbeat report period. Unit, second. 30   skywalking_agent.properties_report_period_factor The agent sends the instance properties to the backend every heartbeat_period * properties_report_period_factor seconds. 10   skywalking_agent.enable_zend_observer Whether to use zend observer instead of zend_execute_ex to hook the functions, this feature is only available for PHP8+. Off   skywalking_agent.reporter_type Reporter type, optional values are grpc and kafka. grpc   skywalking_agent.kafka_bootstrap_servers A list of host/port pairs to use for connect to the Kafka cluster. Only available when reporter_type is kafka.    skywalking_agent.kafka_producer_config Configure Kafka Producer configuration in JSON format {\u0026quot;key\u0026quot;: \u0026quot;value}. Only available when reporter_type is kafka. {}    ","excerpt":"INI Settings This is the configuration list supported in php.ini.\n   Configuration Item Description …","ref":"/docs/skywalking-php/latest/en/configuration/ini-settings/","title":"INI Settings"},{"body":"INI Settings This is the configuration list supported in php.ini.\n   Configuration Item Description Default Value     skywalking_agent.enable Enable skywalking_agent extension or not. Off   skywalking_agent.log_file Log file path. /tmp/skywalking-agent.log   skywalking_agent.log_level Log level: one of OFF, TRACE, DEBUG, INFO, WARN, ERROR. INFO   skywalking_agent.runtime_dir Skywalking agent runtime directory. /tmp/skywalking-agent   skywalking_agent.server_addr Address of skywalking oap server. Only available when reporter_type is grpc. 127.0.0.1:11800   skywalking_agent.service_name Application service name. hello-skywalking   skywalking_agent.skywalking_version Skywalking version, 8 or 9. 8   skywalking_agent.authentication Skywalking authentication token, let it empty if the backend isn\u0026rsquo;t enabled. Only available when reporter_type is grpc.    skywalking_agent.worker_threads Skywalking worker threads, 0 will auto set as the cpu core size. 0   skywalking_agent.enable_tls Wether to enable tls for gPRC, default is false. Only available when reporter_type is grpc. Off   skywalking_agent.ssl_trusted_ca_path The gRPC SSL trusted ca file. Only available when reporter_type is grpc.    skywalking_agent.ssl_key_path The private key file. Enable mTLS when ssl_key_path and ssl_cert_chain_path exist. Only available when reporter_type is grpc.    skywalking_agent.ssl_cert_chain_path The certificate file. Enable mTLS when ssl_key_path and ssl_cert_chain_path exist. Only available when reporter_type is grpc.    skywalking_agent.heartbeat_period Agent heartbeat report period. Unit, second. 30   skywalking_agent.properties_report_period_factor The agent sends the instance properties to the backend every heartbeat_period * properties_report_period_factor seconds. 10   skywalking_agent.enable_zend_observer Whether to use zend observer instead of zend_execute_ex to hook the functions, this feature is only available for PHP8+. Off   skywalking_agent.reporter_type Reporter type, optional values are grpc and kafka. grpc   skywalking_agent.kafka_bootstrap_servers A list of host/port pairs to use for connect to the Kafka cluster. Only available when reporter_type is kafka.    skywalking_agent.kafka_producer_config Configure Kafka Producer configuration in JSON format {\u0026quot;key\u0026quot;: \u0026quot;value}. Only available when reporter_type is kafka. {}   skywalking_agent.inject_context Whether to enable automatic injection of skywalking context variables (such as SW_TRACE_ID). For php-fpm mode, it will be injected into the $_SERVER variable. For swoole mode, it will be injected into the $request-\u0026gt;server variable. Off    ","excerpt":"INI Settings This is the configuration list supported in php.ini.\n   Configuration Item Description …","ref":"/docs/skywalking-php/next/en/configuration/ini-settings/","title":"INI Settings"},{"body":"INI Settings This is the configuration list supported in php.ini.\n   Configuration Item Description Default Value     skywalking_agent.enable Enable skywalking_agent extension or not. Off   skywalking_agent.log_file Log file path. /tmp/skywalking-agent.log   skywalking_agent.log_level Log level: one of OFF, TRACE, DEBUG, INFO, WARN, ERROR. INFO   skywalking_agent.runtime_dir Skywalking agent runtime directory. /tmp/skywalking-agent   skywalking_agent.server_addr Address of skywalking oap server. Only available when reporter_type is grpc. 127.0.0.1:11800   skywalking_agent.service_name Application service name. hello-skywalking   skywalking_agent.skywalking_version Skywalking version, 8 or 9. 8   skywalking_agent.authentication Skywalking authentication token, let it empty if the backend isn\u0026rsquo;t enabled. Only available when reporter_type is grpc.    skywalking_agent.worker_threads Skywalking worker threads, 0 will auto set as the cpu core size. 0   skywalking_agent.enable_tls Wether to enable tls for gPRC, default is false. Only available when reporter_type is grpc. Off   skywalking_agent.ssl_trusted_ca_path The gRPC SSL trusted ca file. Only available when reporter_type is grpc.    skywalking_agent.ssl_key_path The private key file. Enable mTLS when ssl_key_path and ssl_cert_chain_path exist. Only available when reporter_type is grpc.    skywalking_agent.ssl_cert_chain_path The certificate file. Enable mTLS when ssl_key_path and ssl_cert_chain_path exist. Only available when reporter_type is grpc.    skywalking_agent.heartbeat_period Agent heartbeat report period. Unit, second. 30   skywalking_agent.properties_report_period_factor The agent sends the instance properties to the backend every heartbeat_period * properties_report_period_factor seconds. 10   skywalking_agent.enable_zend_observer Whether to use zend observer instead of zend_execute_ex to hook the functions, this feature is only available for PHP8+. Off   skywalking_agent.reporter_type Reporter type, optional values are grpc and kafka. grpc   skywalking_agent.kafka_bootstrap_servers A list of host/port pairs to use for connect to the Kafka cluster. Only available when reporter_type is kafka.    skywalking_agent.kafka_producer_config Configure Kafka Producer configuration in JSON format {\u0026quot;key\u0026quot;: \u0026quot;value}. Only available when reporter_type is kafka. {}    ","excerpt":"INI Settings This is the configuration list supported in php.ini.\n   Configuration Item Description …","ref":"/docs/skywalking-php/v0.7.0/en/configuration/ini-settings/","title":"INI Settings"},{"body":"Init mode The SkyWalking backend supports multiple storage implementors. Most of them would automatically initialize the storage, such as Elastic Search or Database, when the backend starts up first.\nBut there may be some unexpected events that may occur with the storage, such as When multiple Elastic Search indexes are created concurrently, these backend instances would startup at the same time., When there is a change, the APIs of Elastic Search would be blocked without reporting any exception. This often happens on container management platforms, such as k8s.\nThis is where you need the Init mode startup.\nSolution Only a single instance should run in the Init mode before other instances start up. And this instance will exit graciously after all initialization steps are done.\nUse oapServiceInit.sh/oapServiceInit.bat to start up backend. You should see the following logs:\n 2018-11-09 23:04:39,465 - org.apache.skywalking.oap.server.starter.OAPServerStartUp -2214 [main] INFO [] - OAP starts up in init mode successfully, exit now\u0026hellip;\n Kubernetes Initialization in this mode would be included in our Kubernetes scripts and Helm.\n","excerpt":"Init mode The SkyWalking backend supports multiple storage implementors. Most of them would …","ref":"/docs/main/latest/en/setup/backend/backend-init-mode/","title":"Init mode"},{"body":"Init mode The SkyWalking backend supports multiple storage implementors. Most of them would automatically initialize the storage, such as Elastic Search or Database, when the backend starts up first.\nBut there may be some unexpected events that may occur with the storage, such as When multiple Elastic Search indexes are created concurrently, these backend instances would startup at the same time., When there is a change, the APIs of Elastic Search would be blocked without reporting any exception. This often happens on container management platforms, such as k8s.\nThis is where you need the Init mode startup.\nSolution Only a single instance should run in the Init mode before other instances start up. And this instance will exit graciously after all initialization steps are done.\nUse oapServiceInit.sh/oapServiceInit.bat to start up backend. You should see the following logs:\n 2018-11-09 23:04:39,465 - org.apache.skywalking.oap.server.starter.OAPServerStartUp -2214 [main] INFO [] - OAP starts up in init mode successfully, exit now\u0026hellip;\n Kubernetes Initialization in this mode would be included in our Kubernetes scripts and Helm.\n","excerpt":"Init mode The SkyWalking backend supports multiple storage implementors. Most of them would …","ref":"/docs/main/next/en/setup/backend/backend-init-mode/","title":"Init mode"},{"body":"Init mode The SkyWalking backend supports multiple storage implementors. Most of them would automatically initialize the storage, such as Elastic Search or Database, when the backend starts up at first.\nBut there may be some unexpected events that may occur with the storage, such as When multiple Elastic Search indexes are created concurrently, these backend instances would start up at the same time., When there is a change, the APIs of Elastic Search would be blocked without reporting any exception. This often happens on container management platforms, such as k8s.\nThis is where you need the Init mode startup.\nSolution Only one single instance should run in the Init mode before other instances start up. And this instance will exit graciously after all initialization steps are done.\nUse oapServiceInit.sh/oapServiceInit.bat to start up backend. You should see the following logs:\n 2018-11-09 23:04:39,465 - org.apache.skywalking.oap.server.starter.OAPServerStartUp -2214 [main] INFO [] - OAP starts up in init mode successfully, exit now\u0026hellip;\n Kubernetes Initialization in this mode would be included in our Kubernetes scripts and Helm.\n","excerpt":"Init mode The SkyWalking backend supports multiple storage implementors. Most of them would …","ref":"/docs/main/v9.0.0/en/setup/backend/backend-init-mode/","title":"Init mode"},{"body":"Init mode The SkyWalking backend supports multiple storage implementors. Most of them would automatically initialize the storage, such as Elastic Search or Database, when the backend starts up first.\nBut there may be some unexpected events that may occur with the storage, such as When multiple Elastic Search indexes are created concurrently, these backend instances would startup at the same time., When there is a change, the APIs of Elastic Search would be blocked without reporting any exception. This often happens on container management platforms, such as k8s.\nThis is where you need the Init mode startup.\nSolution Only a single instance should run in the Init mode before other instances start up. And this instance will exit graciously after all initialization steps are done.\nUse oapServiceInit.sh/oapServiceInit.bat to start up backend. You should see the following logs:\n 2018-11-09 23:04:39,465 - org.apache.skywalking.oap.server.starter.OAPServerStartUp -2214 [main] INFO [] - OAP starts up in init mode successfully, exit now\u0026hellip;\n Kubernetes Initialization in this mode would be included in our Kubernetes scripts and Helm.\n","excerpt":"Init mode The SkyWalking backend supports multiple storage implementors. Most of them would …","ref":"/docs/main/v9.1.0/en/setup/backend/backend-init-mode/","title":"Init mode"},{"body":"Init mode The SkyWalking backend supports multiple storage implementors. Most of them would automatically initialize the storage, such as Elastic Search or Database, when the backend starts up first.\nBut there may be some unexpected events that may occur with the storage, such as When multiple Elastic Search indexes are created concurrently, these backend instances would startup at the same time., When there is a change, the APIs of Elastic Search would be blocked without reporting any exception. This often happens on container management platforms, such as k8s.\nThis is where you need the Init mode startup.\nSolution Only a single instance should run in the Init mode before other instances start up. And this instance will exit graciously after all initialization steps are done.\nUse oapServiceInit.sh/oapServiceInit.bat to start up backend. You should see the following logs:\n 2018-11-09 23:04:39,465 - org.apache.skywalking.oap.server.starter.OAPServerStartUp -2214 [main] INFO [] - OAP starts up in init mode successfully, exit now\u0026hellip;\n Kubernetes Initialization in this mode would be included in our Kubernetes scripts and Helm.\n","excerpt":"Init mode The SkyWalking backend supports multiple storage implementors. Most of them would …","ref":"/docs/main/v9.2.0/en/setup/backend/backend-init-mode/","title":"Init mode"},{"body":"Init mode The SkyWalking backend supports multiple storage implementors. Most of them would automatically initialize the storage, such as Elastic Search or Database, when the backend starts up first.\nBut there may be some unexpected events that may occur with the storage, such as When multiple Elastic Search indexes are created concurrently, these backend instances would startup at the same time., When there is a change, the APIs of Elastic Search would be blocked without reporting any exception. This often happens on container management platforms, such as k8s.\nThis is where you need the Init mode startup.\nSolution Only a single instance should run in the Init mode before other instances start up. And this instance will exit graciously after all initialization steps are done.\nUse oapServiceInit.sh/oapServiceInit.bat to start up backend. You should see the following logs:\n 2018-11-09 23:04:39,465 - org.apache.skywalking.oap.server.starter.OAPServerStartUp -2214 [main] INFO [] - OAP starts up in init mode successfully, exit now\u0026hellip;\n Kubernetes Initialization in this mode would be included in our Kubernetes scripts and Helm.\n","excerpt":"Init mode The SkyWalking backend supports multiple storage implementors. Most of them would …","ref":"/docs/main/v9.3.0/en/setup/backend/backend-init-mode/","title":"Init mode"},{"body":"Init mode The SkyWalking backend supports multiple storage implementors. Most of them would automatically initialize the storage, such as Elastic Search or Database, when the backend starts up first.\nBut there may be some unexpected events that may occur with the storage, such as When multiple Elastic Search indexes are created concurrently, these backend instances would startup at the same time., When there is a change, the APIs of Elastic Search would be blocked without reporting any exception. This often happens on container management platforms, such as k8s.\nThis is where you need the Init mode startup.\nSolution Only a single instance should run in the Init mode before other instances start up. And this instance will exit graciously after all initialization steps are done.\nUse oapServiceInit.sh/oapServiceInit.bat to start up backend. You should see the following logs:\n 2018-11-09 23:04:39,465 - org.apache.skywalking.oap.server.starter.OAPServerStartUp -2214 [main] INFO [] - OAP starts up in init mode successfully, exit now\u0026hellip;\n Kubernetes Initialization in this mode would be included in our Kubernetes scripts and Helm.\n","excerpt":"Init mode The SkyWalking backend supports multiple storage implementors. Most of them would …","ref":"/docs/main/v9.4.0/en/setup/backend/backend-init-mode/","title":"Init mode"},{"body":"Init mode The SkyWalking backend supports multiple storage implementors. Most of them would automatically initialize the storage, such as Elastic Search or Database, when the backend starts up first.\nBut there may be some unexpected events that may occur with the storage, such as When multiple Elastic Search indexes are created concurrently, these backend instances would startup at the same time., When there is a change, the APIs of Elastic Search would be blocked without reporting any exception. This often happens on container management platforms, such as k8s.\nThis is where you need the Init mode startup.\nSolution Only a single instance should run in the Init mode before other instances start up. And this instance will exit graciously after all initialization steps are done.\nUse oapServiceInit.sh/oapServiceInit.bat to start up backend. You should see the following logs:\n 2018-11-09 23:04:39,465 - org.apache.skywalking.oap.server.starter.OAPServerStartUp -2214 [main] INFO [] - OAP starts up in init mode successfully, exit now\u0026hellip;\n Kubernetes Initialization in this mode would be included in our Kubernetes scripts and Helm.\n","excerpt":"Init mode The SkyWalking backend supports multiple storage implementors. Most of them would …","ref":"/docs/main/v9.5.0/en/setup/backend/backend-init-mode/","title":"Init mode"},{"body":"Init mode The SkyWalking backend supports multiple storage implementors. Most of them would automatically initialize the storage, such as Elastic Search or Database, when the backend starts up first.\nBut there may be some unexpected events that may occur with the storage, such as When multiple Elastic Search indexes are created concurrently, these backend instances would startup at the same time., When there is a change, the APIs of Elastic Search would be blocked without reporting any exception. This often happens on container management platforms, such as k8s.\nThis is where you need the Init mode startup.\nSolution Only a single instance should run in the Init mode before other instances start up. And this instance will exit graciously after all initialization steps are done.\nUse oapServiceInit.sh/oapServiceInit.bat to start up backend. You should see the following logs:\n 2018-11-09 23:04:39,465 - org.apache.skywalking.oap.server.starter.OAPServerStartUp -2214 [main] INFO [] - OAP starts up in init mode successfully, exit now\u0026hellip;\n Kubernetes Initialization in this mode would be included in our Kubernetes scripts and Helm.\n","excerpt":"Init mode The SkyWalking backend supports multiple storage implementors. Most of them would …","ref":"/docs/main/v9.6.0/en/setup/backend/backend-init-mode/","title":"Init mode"},{"body":"Init mode The SkyWalking backend supports multiple storage implementors. Most of them would automatically initialize the storage, such as Elastic Search or Database, when the backend starts up first.\nBut there may be some unexpected events that may occur with the storage, such as When multiple Elastic Search indexes are created concurrently, these backend instances would startup at the same time., When there is a change, the APIs of Elastic Search would be blocked without reporting any exception. This often happens on container management platforms, such as k8s.\nThis is where you need the Init mode startup.\nSolution Only a single instance should run in the Init mode before other instances start up. And this instance will exit graciously after all initialization steps are done.\nUse oapServiceInit.sh/oapServiceInit.bat to start up backend. You should see the following logs:\n 2018-11-09 23:04:39,465 - org.apache.skywalking.oap.server.starter.OAPServerStartUp -2214 [main] INFO [] - OAP starts up in init mode successfully, exit now\u0026hellip;\n Kubernetes Initialization in this mode would be included in our Kubernetes scripts and Helm.\n","excerpt":"Init mode The SkyWalking backend supports multiple storage implementors. Most of them would …","ref":"/docs/main/v9.7.0/en/setup/backend/backend-init-mode/","title":"Init mode"},{"body":"Install SkyWalking Infra E2E Download pre-built binaries Download the pre-built binaries from our website, currently we have pre-built binaries for macOS, Linux and Windows. Extract the tarball and add bin/\u0026lt;os\u0026gt;/e2e to you PATH environment variable.\nInstall from source codes If you want to try some features that are not released yet, you can compile from the source code.\nmkdir skywalking-infra-e2e \u0026amp;\u0026amp; cd skywalking-infra-e2e git clone https://github.com/apache/skywalking-infra-e2e.git . make build Then add the binary in bin/\u0026lt;os\u0026gt;/e2e to your PATH.\nInstall via go install If you already have Go SDK installed, you can also directly install e2e via go install.\ngo install github.com/apache/skywalking-infra-e2e/cmd/e2e@\u0026lt;revision\u0026gt; Note that installation via go install is only supported after Git commit 2a33478 so you can only go install a revision afterwards.\n","excerpt":"Install SkyWalking Infra E2E Download pre-built binaries Download the pre-built binaries from our …","ref":"/docs/skywalking-infra-e2e/latest/en/setup/install/","title":"Install SkyWalking Infra E2E"},{"body":"Install SkyWalking Infra E2E Download pre-built binaries Download the pre-built binaries from our website, currently we have pre-built binaries for macOS, Linux and Windows. Extract the tarball and add bin/\u0026lt;os\u0026gt;/e2e to you PATH environment variable.\nInstall from source codes If you want to try some features that are not released yet, you can compile from the source code.\nmkdir skywalking-infra-e2e \u0026amp;\u0026amp; cd skywalking-infra-e2e git clone https://github.com/apache/skywalking-infra-e2e.git . make build Then add the binary in bin/\u0026lt;os\u0026gt;/e2e to your PATH.\nInstall via go install If you already have Go SDK installed, you can also directly install e2e via go install.\ngo install github.com/apache/skywalking-infra-e2e/cmd/e2e@\u0026lt;revision\u0026gt; Note that installation via go install is only supported after Git commit 2a33478 so you can only go install a revision afterwards.\n","excerpt":"Install SkyWalking Infra E2E Download pre-built binaries Download the pre-built binaries from our …","ref":"/docs/skywalking-infra-e2e/next/en/setup/install/","title":"Install SkyWalking Infra E2E"},{"body":"Install SkyWalking Infra E2E Download pre-built binaries Download the pre-built binaries from our website, currently we have pre-built binaries for macOS, Linux and Windows. Extract the tarball and add bin/\u0026lt;os\u0026gt;/e2e to you PATH environment variable.\nInstall from source codes If you want to try some features that are not released yet, you can compile from the source code.\nmkdir skywalking-infra-e2e \u0026amp;\u0026amp; cd skywalking-infra-e2e git clone https://github.com/apache/skywalking-infra-e2e.git . make build Then add the binary in bin/\u0026lt;os\u0026gt;/e2e to your PATH.\nInstall via go install If you already have Go SDK installed, you can also directly install e2e via go install.\ngo install github.com/apache/skywalking-infra-e2e/cmd/e2e@\u0026lt;revision\u0026gt; Note that installation via go install is only supported after Git commit 2a33478 so you can only go install a revision afterwards.\n","excerpt":"Install SkyWalking Infra E2E Download pre-built binaries Download the pre-built binaries from our …","ref":"/docs/skywalking-infra-e2e/v1.3.0/en/setup/install/","title":"Install SkyWalking Infra E2E"},{"body":"Installation Banyand is the daemon server of the BanyanDB database. This section will show several paths installing it in your environment.\nGet Binaries Released binaries Get binaries from the download.\nBuild From Source Requirements Users who want to build a binary from sources have to set up:\n Go 1.20 Node 18.16 Git \u0026gt;= 2.30 Linux, macOS or Windows+WSL2 GNU make  Windows BanyanDB is built on Linux and macOS that introduced several platform-specific characters to the building system. Therefore, we highly recommend you use WSL2+Ubuntu to execute tasks of the Makefile.\nBuild Binaries To issue the below command to get basic binaries of banyand and bydbctl.\n$ make generate ... $ make build ... --- banyand: all --- make[1]: Entering directory \u0026#39;\u0026lt;path_to_project_root\u0026gt;/banyand\u0026#39; ... chmod +x build/bin/banyand-server Done building banyand server make[1]: Leaving directory \u0026#39;\u0026lt;path_to_project_root\u0026gt;/banyand\u0026#39; ... --- bydbctl: all --- make[1]: Entering directory \u0026#39;\u0026lt;path_to_project_root\u0026gt;/bydbctl\u0026#39; ... chmod +x build/bin/bydbctl Done building bydbctl make[1]: Leaving directory \u0026#39;\u0026lt;path_to_project_root\u0026gt;/bydbctl\u0026#39; The build system provides a series of binary options as well.\n make -C banyand banyand-server generates a basic banyand-server. make -C banyand release builds out a static binary for releasing. make -C banyand debug gives a binary for debugging without the complier\u0026rsquo;s optimizations. make -C banyand debug-static is a static binary for debugging. make -C bydbctl release cross-builds several binaries for multi-platforms.  Then users get binaries as below\n$ ls banyand/build/bin banyand-server banyand-server-debug banyand-server-debug-static banyand-server-static $ ls banyand/build/bin bydbctl Setup Banyand Banyand shows its available commands and arguments by\n$ ./banyand-server ██████╗ █████╗ ███╗ ██╗██╗ ██╗ █████╗ ███╗ ██╗██████╗ ██████╗ ██╔══██╗██╔══██╗████╗ ██║╚██╗ ██╔╝██╔══██╗████╗ ██║██╔══██╗██╔══██╗ ██████╔╝███████║██╔██╗ ██║ ╚████╔╝ ███████║██╔██╗ ██║██║ ██║██████╔╝ ██╔══██╗██╔══██║██║╚██╗██║ ╚██╔╝ ██╔══██║██║╚██╗██║██║ ██║██╔══██╗ ██████╔╝██║ ██║██║ ╚████║ ██║ ██║ ██║██║ ╚████║██████╔╝██████╔╝ ╚═════╝ ╚═╝ ╚═╝╚═╝ ╚═══╝ ╚═╝ ╚═╝ ╚═╝╚═╝ ╚═══╝╚═════╝ ╚═════╝ BanyanDB, as an observability database, aims to ingest, analyze and store Metrics, Tracing and Logging data Usage: [command] Available Commands: completion generate the autocompletion script for the specified shell help Help about any command liaison Run as the liaison server meta Run as the meta server standalone Run as the standalone server storage Run as the storage server Flags: -h, --help help for this command -v, --version version for this command Use \u0026#34; [command] --help\u0026#34; for more information about a command. Banyand is running as a standalone server by\n$ ./banyand-server standalone ██████╗ █████╗ ███╗ ██╗██╗ ██╗ █████╗ ███╗ ██╗██████╗ ██████╗ ██╔══██╗██╔══██╗████╗ ██║╚██╗ ██╔╝██╔══██╗████╗ ██║██╔══██╗██╔══██╗ ██████╔╝███████║██╔██╗ ██║ ╚████╔╝ ███████║██╔██╗ ██║██║ ██║██████╔╝ ██╔══██╗██╔══██║██║╚██╗██║ ╚██╔╝ ██╔══██║██║╚██╗██║██║ ██║██╔══██╗ ██████╔╝██║ ██║██║ ╚████║ ██║ ██║ ██║██║ ╚████║██████╔╝██████╔╝ ╚═════╝ ╚═╝ ╚═╝╚═╝ ╚═══╝ ╚═╝ ╚═╝ ╚═╝╚═╝ ╚═══╝╚═════╝ ╚═════╝ ***starting as a standalone server**** ... ... ***Listening to**** addr::17912 module:LIAISON-GRPC The banyand-server would be listening on the 0.0.0.0:17912 if no errors occurred.\nSetup Multiple Banyand as Cluster Firstly, you need to setup a etcd cluster which is required for the metadata module to provide the metadata service and nodes discovery service for the whole cluster. The etcd cluster can be setup by the etcd installation guide. The etcd version should be v3.1 or above.\nThen, you can start the metadata module by\nConsidering the etcd cluster is spread across three nodes with the addresses `10.0.0.1:2379`, `10.0.0.2:2379`, and `10.0.0.3:2379`, Data nodes and liaison nodes are running as independent processes by ```shell $ ./banyand-server storage --etcd-endpoints=http://10.0.0.1:2379,http://10.0.0.2:2379,http://10.0.0.3:2379 \u0026lt;flags\u0026gt; $ ./banyand-server storage --etcd-endpoints=http://10.0.0.1:2379,http://10.0.0.2:2379,http://10.0.0.3:2379 \u0026lt;flags\u0026gt; $ ./banyand-server storage --etcd-endpoints=http://10.0.0.1:2379,http://10.0.0.2:2379,http://10.0.0.3:2379 \u0026lt;flags\u0026gt; $ ./banyand-server liaison --etcd-endpoints=http://10.0.0.1:2379,http://10.0.0.2:2379,http://10.0.0.3:2379 \u0026lt;flags\u0026gt; Docker \u0026amp; Kubernetes The docker image of banyandb is available on Docker Hub.\nIf you want to onboard banyandb to the Kubernetes, you can refer to the banyandb-helm.\n","excerpt":"Installation Banyand is the daemon server of the BanyanDB database. This section will show several …","ref":"/docs/skywalking-banyandb/latest/installation/","title":"Installation"},{"body":"Installation Banyand is the daemon server of the BanyanDB database. This section will show several paths installing it in your environment.\nGet Binaries Released binaries Get binaries from the download.\nBuild From Source Requirements Users who want to build a binary from sources have to set up:\n Go 1.20 Node 18.16 Git \u0026gt;= 2.30 Linux, macOS or Windows+WSL2 GNU make  Windows BanyanDB is built on Linux and macOS that introduced several platform-specific characters to the building system. Therefore, we highly recommend you use WSL2+Ubuntu to execute tasks of the Makefile.\nBuild Binaries To issue the below command to get basic binaries of banyand and bydbctl.\n$ make generate ... $ make build ... --- banyand: all --- make[1]: Entering directory \u0026#39;\u0026lt;path_to_project_root\u0026gt;/banyand\u0026#39; ... chmod +x build/bin/banyand-server Done building banyand server make[1]: Leaving directory \u0026#39;\u0026lt;path_to_project_root\u0026gt;/banyand\u0026#39; ... --- bydbctl: all --- make[1]: Entering directory \u0026#39;\u0026lt;path_to_project_root\u0026gt;/bydbctl\u0026#39; ... chmod +x build/bin/bydbctl Done building bydbctl make[1]: Leaving directory \u0026#39;\u0026lt;path_to_project_root\u0026gt;/bydbctl\u0026#39; The build system provides a series of binary options as well.\n make -C banyand banyand-server generates a basic banyand-server. make -C banyand release builds out a static binary for releasing. make -C banyand debug gives a binary for debugging without the complier\u0026rsquo;s optimizations. make -C banyand debug-static is a static binary for debugging. make -C bydbctl release cross-builds several binaries for multi-platforms.  Then users get binaries as below\n$ ls banyand/build/bin banyand-server banyand-server-debug banyand-server-debug-static banyand-server-static $ ls banyand/build/bin bydbctl Setup Banyand Banyand shows its available commands and arguments by\n$ ./banyand-server ██████╗ █████╗ ███╗ ██╗██╗ ██╗ █████╗ ███╗ ██╗██████╗ ██████╗ ██╔══██╗██╔══██╗████╗ ██║╚██╗ ██╔╝██╔══██╗████╗ ██║██╔══██╗██╔══██╗ ██████╔╝███████║██╔██╗ ██║ ╚████╔╝ ███████║██╔██╗ ██║██║ ██║██████╔╝ ██╔══██╗██╔══██║██║╚██╗██║ ╚██╔╝ ██╔══██║██║╚██╗██║██║ ██║██╔══██╗ ██████╔╝██║ ██║██║ ╚████║ ██║ ██║ ██║██║ ╚████║██████╔╝██████╔╝ ╚═════╝ ╚═╝ ╚═╝╚═╝ ╚═══╝ ╚═╝ ╚═╝ ╚═╝╚═╝ ╚═══╝╚═════╝ ╚═════╝ BanyanDB, as an observability database, aims to ingest, analyze and store Metrics, Tracing and Logging data Usage: [command] Available Commands: completion generate the autocompletion script for the specified shell help Help about any command liaison Run as the liaison server meta Run as the meta server standalone Run as the standalone server storage Run as the storage server Flags: -h, --help help for this command -v, --version version for this command Use \u0026#34; [command] --help\u0026#34; for more information about a command. Banyand is running as a standalone server by\n$ ./banyand-server standalone ██████╗ █████╗ ███╗ ██╗██╗ ██╗ █████╗ ███╗ ██╗██████╗ ██████╗ ██╔══██╗██╔══██╗████╗ ██║╚██╗ ██╔╝██╔══██╗████╗ ██║██╔══██╗██╔══██╗ ██████╔╝███████║██╔██╗ ██║ ╚████╔╝ ███████║██╔██╗ ██║██║ ██║██████╔╝ ██╔══██╗██╔══██║██║╚██╗██║ ╚██╔╝ ██╔══██║██║╚██╗██║██║ ██║██╔══██╗ ██████╔╝██║ ██║██║ ╚████║ ██║ ██║ ██║██║ ╚████║██████╔╝██████╔╝ ╚═════╝ ╚═╝ ╚═╝╚═╝ ╚═══╝ ╚═╝ ╚═╝ ╚═╝╚═╝ ╚═══╝╚═════╝ ╚═════╝ ***starting as a standalone server**** ... ... ***Listening to**** addr::17912 module:LIAISON-GRPC The banyand-server would be listening on the 0.0.0.0:17912 if no errors occurred.\nSetup Multiple Banyand as Cluster Firstly, you need to setup a etcd cluster which is required for the metadata module to provide the metadata service and nodes discovery service for the whole cluster. The etcd cluster can be setup by the etcd installation guide. The etcd version should be v3.1 or above.\nThen, you can start the metadata module by\nConsidering the etcd cluster is spread across three nodes with the addresses `10.0.0.1:2379`, `10.0.0.2:2379`, and `10.0.0.3:2379`, Data nodes and liaison nodes are running as independent processes by ```shell $ ./banyand-server storage --etcd-endpoints=http://10.0.0.1:2379,http://10.0.0.2:2379,http://10.0.0.3:2379 \u0026lt;flags\u0026gt; $ ./banyand-server storage --etcd-endpoints=http://10.0.0.1:2379,http://10.0.0.2:2379,http://10.0.0.3:2379 \u0026lt;flags\u0026gt; $ ./banyand-server storage --etcd-endpoints=http://10.0.0.1:2379,http://10.0.0.2:2379,http://10.0.0.3:2379 \u0026lt;flags\u0026gt; $ ./banyand-server liaison --etcd-endpoints=http://10.0.0.1:2379,http://10.0.0.2:2379,http://10.0.0.3:2379 \u0026lt;flags\u0026gt; Docker \u0026amp; Kubernetes The docker image of banyandb is available on Docker Hub.\nIf you want to onboard banyandb to the Kubernetes, you can refer to the banyandb-helm.\n","excerpt":"Installation Banyand is the daemon server of the BanyanDB database. This section will show several …","ref":"/docs/skywalking-banyandb/v0.5.0/installation/","title":"Installation"},{"body":"Installation SkyWalking Python agent requires SkyWalking 8.0+ and Python 3.7+\nYou can install the SkyWalking Python agent via various ways described next.\n Already installed? Check out easy ways to start the agent in your application\n  Non-intrusive  | Intrusive  | Containerization\n  All available configurations are listed here\n Important Note on Different Reporter Protocols Currently only gRPC protocol fully supports all available telemetry capabilities in the Python agent.\nWhile gRPC is highly recommended, we provide alternative protocols to suit your production requirements.\nPlease refer to the table below before deciding which report protocol suits best for you.\n   Reporter Protocol Trace Reporter Log Reporter Meter Reporter Profiling     gRPC ✅ ✅ ✅ ✅   HTTP ✅ ✅ ❌ ❌   Kafka ✅ ✅ ✅ ❌    From PyPI  If you want to try out the latest features that are not released yet, please refer to this guide to build from sources.\n The Python agent module is published to PyPI, from where you can use pip to install:\n# Install the latest version, using the default gRPC protocol to report data to OAP pip install \u0026#34;apache-skywalking\u0026#34; # Install support for every protocol (gRPC, HTTP, Kafka) pip install \u0026#34;apache-skywalking[all]\u0026#34; # Install the latest version, using the http protocol to report data to OAP pip install \u0026#34;apache-skywalking[http]\u0026#34; # Install the latest version, using the kafka protocol to report data to OAP pip install \u0026#34;apache-skywalking[kafka]\u0026#34; # Install a specific version x.y.z # pip install apache-skywalking==x.y.z pip install apache-skywalking==0.1.0 # For example, install version 0.1.0 no matter what the latest version is From Docker Hub SkyWalking Python agent provides convenient dockerfile and images for easy integration utilizing its auto-bootstrap capability.\nSimply pull SkyWalking Python image from Docker Hub based on desired agent version, protocol and Python version.\nFROMapache/skywalking-python:0.8.0-grpc-py3.10# ... build your Python application# If you prefer compact images (built from official Python slim image)FROMapache/skywalking-python:0.8.0-grpc-py3.10-slim# ... build your Python applicationThen, You can build your Python application image based on our agent-enabled Python images and start your applications with SkyWalking agent enabled for you. Please refer to our Containerization Guide for further instructions on integration and configuring.\nFrom Source Code Please refer to the How-to-build-from-sources FAQ.\n","excerpt":"Installation SkyWalking Python agent requires SkyWalking 8.0+ and Python 3.7+\nYou can install the …","ref":"/docs/skywalking-python/latest/en/setup/installation/","title":"Installation"},{"body":"Installation SkyWalking Python agent requires SkyWalking 8.0+ and Python 3.7+\nYou can install the SkyWalking Python agent via various ways described next.\n Already installed? Check out easy ways to start the agent in your application\n  Non-intrusive  | Intrusive  | Containerization\n  All available configurations are listed here\n Important Note on Different Reporter Protocols Currently only gRPC protocol fully supports all available telemetry capabilities in the Python agent.\nWhile gRPC is highly recommended, we provide alternative protocols to suit your production requirements.\nPlease refer to the table below before deciding which report protocol suits best for you.\n   Reporter Protocol Trace Reporter Log Reporter Meter Reporter Profiling     gRPC ✅ ✅ ✅ ✅   HTTP ✅ ✅ ❌ ❌   Kafka ✅ ✅ ✅ ❌    From PyPI  If you want to try out the latest features that are not released yet, please refer to this guide to build from sources.\n The Python agent module is published to PyPI, from where you can use pip to install:\n# Install the latest version, using the default gRPC protocol to report data to OAP pip install \u0026#34;apache-skywalking\u0026#34; # Install support for every protocol (gRPC, HTTP, Kafka) pip install \u0026#34;apache-skywalking[all]\u0026#34; # Install the latest version, using the http protocol to report data to OAP pip install \u0026#34;apache-skywalking[http]\u0026#34; # Install the latest version, using the kafka protocol to report data to OAP pip install \u0026#34;apache-skywalking[kafka]\u0026#34; # Install a specific version x.y.z # pip install apache-skywalking==x.y.z pip install apache-skywalking==0.1.0 # For example, install version 0.1.0 no matter what the latest version is From Docker Hub SkyWalking Python agent provides convenient dockerfile and images for easy integration utilizing its auto-bootstrap capability.\nSimply pull SkyWalking Python image from Docker Hub based on desired agent version, protocol and Python version.\nFROMapache/skywalking-python:0.8.0-grpc-py3.10# ... build your Python application# If you prefer compact images (built from official Python slim image)FROMapache/skywalking-python:0.8.0-grpc-py3.10-slim# ... build your Python applicationThen, You can build your Python application image based on our agent-enabled Python images and start your applications with SkyWalking agent enabled for you. Please refer to our Containerization Guide for further instructions on integration and configuring.\nFrom Source Code Please refer to the How-to-build-from-sources FAQ.\n","excerpt":"Installation SkyWalking Python agent requires SkyWalking 8.0+ and Python 3.7+\nYou can install the …","ref":"/docs/skywalking-python/next/en/setup/installation/","title":"Installation"},{"body":"Installation SkyWalking Python agent requires SkyWalking 8.0+ and Python 3.7+\nYou can install the SkyWalking Python agent via various ways described next.\n Already installed? Check out easy ways to start the agent in your application\n  Non-intrusive  | Intrusive  | Containerization\n  All available configurations are listed here\n Important Note on Different Reporter Protocols Currently only gRPC protocol fully supports all available telemetry capabilities in the Python agent.\nWhile gRPC is highly recommended, we provide alternative protocols to suit your production requirements.\nPlease refer to the table below before deciding which report protocol suits best for you.\n   Reporter Protocol Trace Reporter Log Reporter Meter Reporter Profiling     gRPC ✅ ✅ ✅ ✅   HTTP ✅ ✅ ❌ ❌   Kafka ✅ ✅ ✅ ❌    From PyPI  If you want to try out the latest features that are not released yet, please refer to this guide to build from sources.\n The Python agent module is published to PyPI, from where you can use pip to install:\n# Install the latest version, using the default gRPC protocol to report data to OAP pip install \u0026#34;apache-skywalking\u0026#34; # Install support for every protocol (gRPC, HTTP, Kafka) pip install \u0026#34;apache-skywalking[all]\u0026#34; # Install the latest version, using the http protocol to report data to OAP pip install \u0026#34;apache-skywalking[http]\u0026#34; # Install the latest version, using the kafka protocol to report data to OAP pip install \u0026#34;apache-skywalking[kafka]\u0026#34; # Install a specific version x.y.z # pip install apache-skywalking==x.y.z pip install apache-skywalking==0.1.0 # For example, install version 0.1.0 no matter what the latest version is From Docker Hub SkyWalking Python agent provides convenient dockerfile and images for easy integration utilizing its auto-bootstrap capability.\nSimply pull SkyWalking Python image from Docker Hub based on desired agent version, protocol and Python version.\nFROMapache/skywalking-python:0.8.0-grpc-py3.10# ... build your Python application# If you prefer compact images (built from official Python slim image)FROMapache/skywalking-python:0.8.0-grpc-py3.10-slim# ... build your Python applicationThen, You can build your Python application image based on our agent-enabled Python images and start your applications with SkyWalking agent enabled for you. Please refer to our Containerization Guide for further instructions on integration and configuring.\nFrom Source Code Please refer to the How-to-build-from-sources FAQ.\n","excerpt":"Installation SkyWalking Python agent requires SkyWalking 8.0+ and Python 3.7+\nYou can install the …","ref":"/docs/skywalking-python/v1.0.1/en/setup/installation/","title":"Installation"},{"body":"Integration Tests IT(Integration Tests) represents the JUnit driven integration test to verify the features and compatibility between lib and known server with various versions.\nAfter setting up the environment and writing your codes, to facilitate integration with the SkyWalking project, you\u0026rsquo;ll need to run tests locally to verify that your codes would not break any existing features, as well as write some unit test (UT) codes to verify that the new codes would work well. This will prevent them from being broken by future contributors. If the new codes involve other components or libraries, you should also write integration tests (IT).\nSkyWalking leverages the plugin maven-surefire-plugin to run the UTs and uses maven-failsafe-plugin to run the ITs. maven-surefire-plugin excludes ITs (whose class name starts or ends with *IT, IT*) and leaves them for maven-failsafe-plugin to run, which is bound to the integration-test goal. Therefore, to run the UTs, try ./mvnw clean test, which only runs the UTs but not the ITs.\nIf you would like to run the ITs, please run ./mvnw integration-test as well as the profiles of the modules whose ITs you want to run. If you don\u0026rsquo;t want to run UTs, please add -DskipUTs=true. E.g. if you would like to only run the ITs in oap-server, try ./mvnw -Pbackend clean verify -DskipUTs=true, and if you would like to run all the ITs, simply run ./mvnw clean integration-test -DskipUTs=true.\nPlease be advised that if you\u0026rsquo;re writing integration tests, name it with the pattern IT* or *IT so they would only run in goal integration-test.\n","excerpt":"Integration Tests IT(Integration Tests) represents the JUnit driven integration test to verify the …","ref":"/docs/main/latest/en/guides/it-guide/","title":"Integration Tests"},{"body":"Integration Tests IT(Integration Tests) represents the JUnit driven integration test to verify the features and compatibility between lib and known server with various versions.\nAfter setting up the environment and writing your codes, to facilitate integration with the SkyWalking project, you\u0026rsquo;ll need to run tests locally to verify that your codes would not break any existing features, as well as write some unit test (UT) codes to verify that the new codes would work well. This will prevent them from being broken by future contributors. If the new codes involve other components or libraries, you should also write integration tests (IT).\nSkyWalking leverages the plugin maven-surefire-plugin to run the UTs and uses maven-failsafe-plugin to run the ITs. maven-surefire-plugin excludes ITs (whose class name starts or ends with *IT, IT*) and leaves them for maven-failsafe-plugin to run, which is bound to the integration-test goal. Therefore, to run the UTs, try ./mvnw clean test, which only runs the UTs but not the ITs.\nIf you would like to run the ITs, please run ./mvnw integration-test as well as the profiles of the modules whose ITs you want to run. If you don\u0026rsquo;t want to run UTs, please add -DskipUTs=true. E.g. if you would like to only run the ITs in oap-server, try ./mvnw -Pbackend clean verify -DskipUTs=true, and if you would like to run all the ITs, simply run ./mvnw clean integration-test -DskipUTs=true.\nPlease be advised that if you\u0026rsquo;re writing integration tests, name it with the pattern IT* or *IT so they would only run in goal integration-test.\n","excerpt":"Integration Tests IT(Integration Tests) represents the JUnit driven integration test to verify the …","ref":"/docs/main/next/en/guides/it-guide/","title":"Integration Tests"},{"body":"Integration Tests IT(Integration Tests) represents the JUnit driven integration test to verify the features and compatibility between lib and known server with various versions.\nAfter setting up the environment and writing your codes, to facilitate integration with the SkyWalking project, you\u0026rsquo;ll need to run tests locally to verify that your codes would not break any existing features, as well as write some unit test (UT) codes to verify that the new codes would work well. This will prevent them from being broken by future contributors. If the new codes involve other components or libraries, you should also write integration tests (IT).\nSkyWalking leverages the plugin maven-surefire-plugin to run the UTs and uses maven-failsafe-plugin to run the ITs. maven-surefire-plugin excludes ITs (whose class name starts or ends with *IT, IT*) and leaves them for maven-failsafe-plugin to run, which is bound to the integration-test goal. Therefore, to run the UTs, try ./mvnw clean test, which only runs the UTs but not the ITs.\nIf you would like to run the ITs, please run ./mvnw integration-test as well as the profiles of the modules whose ITs you want to run. If you don\u0026rsquo;t want to run UTs, please add -DskipUTs=true. E.g. if you would like to only run the ITs in oap-server, try ./mvnw -Pbackend clean verify -DskipUTs=true, and if you would like to run all the ITs, simply run ./mvnw clean integration-test -DskipUTs=true.\nPlease be advised that if you\u0026rsquo;re writing integration tests, name it with the pattern IT* or *IT so they would only run in goal integration-test.\n","excerpt":"Integration Tests IT(Integration Tests) represents the JUnit driven integration test to verify the …","ref":"/docs/main/v9.6.0/en/guides/it-guide/","title":"Integration Tests"},{"body":"Integration Tests IT(Integration Tests) represents the JUnit driven integration test to verify the features and compatibility between lib and known server with various versions.\nAfter setting up the environment and writing your codes, to facilitate integration with the SkyWalking project, you\u0026rsquo;ll need to run tests locally to verify that your codes would not break any existing features, as well as write some unit test (UT) codes to verify that the new codes would work well. This will prevent them from being broken by future contributors. If the new codes involve other components or libraries, you should also write integration tests (IT).\nSkyWalking leverages the plugin maven-surefire-plugin to run the UTs and uses maven-failsafe-plugin to run the ITs. maven-surefire-plugin excludes ITs (whose class name starts or ends with *IT, IT*) and leaves them for maven-failsafe-plugin to run, which is bound to the integration-test goal. Therefore, to run the UTs, try ./mvnw clean test, which only runs the UTs but not the ITs.\nIf you would like to run the ITs, please run ./mvnw integration-test as well as the profiles of the modules whose ITs you want to run. If you don\u0026rsquo;t want to run UTs, please add -DskipUTs=true. E.g. if you would like to only run the ITs in oap-server, try ./mvnw -Pbackend clean verify -DskipUTs=true, and if you would like to run all the ITs, simply run ./mvnw clean integration-test -DskipUTs=true.\nPlease be advised that if you\u0026rsquo;re writing integration tests, name it with the pattern IT* or *IT so they would only run in goal integration-test.\n","excerpt":"Integration Tests IT(Integration Tests) represents the JUnit driven integration test to verify the …","ref":"/docs/main/v9.7.0/en/guides/it-guide/","title":"Integration Tests"},{"body":"Introduction to UI The SkyWalking official UI provides the default and powerful visualization capabilities for SkyWalking to observe full-stack applications.\nThe left side menu lists all available supported stacks with default dashboards.\nFollow the Official Dashboards menu to explore all default dashboards on their ways to monitor different tech stacks.\nSidebar Menu and Marketplace All available feature menu items are only listed in the marketplace(since 9.6.0). They are only visible on the Sidebar Menu when there are relative services being observed by various supported observation agents, such as installed language agents, service mesh platform, OTEL integration.\nThe menu items defined in ui-initialized-templates/menu.yaml are the universal marketplace for all default-supported integration. The menu definition supports one and two levels items. The leaf menu item should have the layer for navigation.\nmenus:- name:GeneralServiceicon:general_servicemenus:- name:Serviceslayer:GENERAL- name:VisualDatabaselayer:VIRTUAL_DATABASE- name:VisualCachelayer:VIRTUAL_CACHE- name:VisualMQlayer:VIRTUAL_MQ....- name:SelfObservabilityicon:self_observabilitymenus:- name:SkyWalkingServerlayer:SO11Y_OAP- name:Satellitelayer:SO11Y_SATELLITEThe menu items would automatically pop up on the left after short period of time that at least one service was observed. For more details, please refer to the \u0026ldquo;uiMenuRefreshInterval\u0026rdquo; configuration item in the backend settings\nCustom Dashboard Besides official dashboards, Dashboards provide customization capabilities to end-users to add new tabs/pages/widgets, and flexibility to re-config the dashboard on your own preference.\nThe dashboard has two key attributes, Layer and Entity Type. Learn these two concepts first before you begin any customization. Also, trace, metrics, and log analysis are relative to OAL, MAL, and LAL engines in the SkyWalking kernel. It would help if you learned them first, too.\nService and All entity type dashboard could be set as root(set this to root), which means this dashboard would be used as the entrance of its Layer. If you have multiple root dashboards, UI will choose one randomly (We don\u0026rsquo;t recommend doing so).\nNotice, dashboard editable is disabled on release; set system env(SW_ENABLE_UPDATE_UI_TEMPLATE=true) to activate them. Before you save the edited dashboard, it is just stored in memory. Closing a tab would LOSE the change permanently.\nA new dashboard should be added through New Dashboard in the Dashboards menu. Meanwhile, there are two ways to edit an existing dashboard.\n Dashboard List in the Dashboard menu provides edit/delete/set-as-root features to manage existing dashboards. In every dashboard page, click the right top V toggle, and turn to E(representing Edit) mode.  Widget A dashboard consists of various widget. In the Edit mode, widgets could be added/moved/removed/edit according to the Layer.(Every widget declares its suitable layer.)\nThe widget provides the ability to visualize the metrics, generated through OAL, MAL, or LAL scripts.\nMetrics To display one or more metrics in a graph, the following information is required:\n Name: The name of the metric. Data Type: The way of reading the metrics data according to various metric types. Visualization: The graph options to visualize the metric. Each data type has its own matched graph options. See the mapping doc for more details. Unit: The unit of the metrics data. Calculation: The calculation formula for the metric. The available formulas are here.  Common Graphs    Metrics Data Type Visualization Demo     read all values in the duration Line    get sorted top N values Top List    read all values of labels in the duration Table    read all values in the duration Area    read all values in the duration Service/Instance/Endpoint List    read sampled records in the duration Records List     Calculations    Label Calculation     Percentage Value / 100   Apdex Value / 10000   Average Sum of values / Count of values   Percentage + Avg-preview Sum of values / Count of values / 100   Apdex + Avg-preview Sum of values / Count of values / 10000   Byte to KB Value / 1024   Byte to MB Value / 1024 / 1024   Byte to GB Value / 1024 / 1024 / 1024   Seconds to YYYY-MM-DD HH:mm:ss dayjs(value * 1000).format(\u0026ldquo;YYYY-MM-DD HH:mm:ss\u0026rdquo;)   Milliseconds to YYYY-MM-DD HH:mm:ss dayjs(value).format(\u0026ldquo;YYYY-MM-DD HH:mm:ss\u0026rdquo;)   Precision Value.toFixed(2)   Milliseconds to seconds Value / 1000   Seconds to days Value / 86400    Graph styles Graph advanced style options.\nWidget options Define the following properties of the widget:\n Name: The name of the widget, which used to associate with other widget in the dashboard. Title: The title name of the widget. Tooltip Content: Additional explanation of the widget.  Association Options Widget provides the ability to associate with other widgets to show axis pointer with tips for the same time point, in order to help users to understand the connectivity among metrics.\nWidget Static Link On the right top of every widget on the dashboard, there is a Generate Link option, which could generate a static link to represent this widget. By using this link, users could share this widget, or integrate it into any 3rd party iFrame to build a network operations center(NOC) dashboard on the wall easily. About this link, there are several customizable options\n Lock Query Duration. Set the query duration manually. It is OFF by default. Auto Fresh option is ON with 6s query period and last 30 mins time range. Query period and range are customizable.  Settings Settings provide language, server time zone, and auto-fresh options. These settings are stored in the browser\u0026rsquo;s local storage. Unless you clear them manually, those will not change.\nFAQ Login and Authentication SkyWalking doesn\u0026rsquo;t provide login and authentication as usual for years. If you need, a lot of Gateway solutions have provides well-established solutions, such as the Nginx ecosystem.\n","excerpt":"Introduction to UI The SkyWalking official UI provides the default and powerful visualization …","ref":"/docs/main/latest/en/ui/readme/","title":"Introduction to UI"},{"body":"Introduction to UI The SkyWalking official UI provides the default and powerful visualization capabilities for SkyWalking to observe full-stack applications.\nThe left side menu lists all available supported stacks with default dashboards.\nFollow the Official Dashboards menu to explore all default dashboards on their ways to monitor different tech stacks.\nSidebar Menu and Marketplace All available feature menu items are only listed in the marketplace(since 9.6.0). They are only visible on the Sidebar Menu when there are relative services being observed by various supported observation agents, such as installed language agents, service mesh platform, OTEL integration.\nThe menu items defined in ui-initialized-templates/menu.yaml are the universal marketplace for all default-supported integration. The menu definition supports one and two levels items. The leaf menu item should have the layer for navigation.\nmenus:- name:GeneralServiceicon:general_servicemenus:- name:Serviceslayer:GENERAL- name:VisualDatabaselayer:VIRTUAL_DATABASE- name:VisualCachelayer:VIRTUAL_CACHE- name:VisualMQlayer:VIRTUAL_MQ....- name:SelfObservabilityicon:self_observabilitymenus:- name:SkyWalkingServerlayer:SO11Y_OAP- name:Satellitelayer:SO11Y_SATELLITEThe menu items would automatically pop up on the left after short period of time that at least one service was observed. For more details, please refer to the \u0026ldquo;uiMenuRefreshInterval\u0026rdquo; configuration item in the backend settings\nCustom Dashboard Besides official dashboards, Dashboards provide customization capabilities to end-users to add new tabs/pages/widgets, and flexibility to re-config the dashboard on your own preference.\nThe dashboard has two key attributes, Layer and Entity Type. Learn these two concepts first before you begin any customization. Also, trace, metrics, and log analysis are relative to OAL, MAL, and LAL engines in the SkyWalking kernel. It would help if you learned them first, too.\nService and All entity type dashboard could be set as root(set this to root), which means this dashboard would be used as the entrance of its Layer. If you have multiple root dashboards, UI will choose one randomly (We don\u0026rsquo;t recommend doing so).\nNotice, dashboard editable is disabled on release; set system env(SW_ENABLE_UPDATE_UI_TEMPLATE=true) to activate them. Before you save the edited dashboard, it is just stored in memory. Closing a tab would LOSE the change permanently.\nA new dashboard should be added through New Dashboard in the Dashboards menu. Meanwhile, there are two ways to edit an existing dashboard.\n Dashboard List in the Dashboard menu provides edit/delete/set-as-root features to manage existing dashboards. In every dashboard page, click the right top V toggle, and turn to E(representing Edit) mode.  Widget A dashboard consists of various widget. In the Edit mode, widgets could be added/moved/removed/edit according to the Layer.(Every widget declares its suitable layer.)\nThe widget provides the ability to visualize the metrics, generated through OAL, MAL, or LAL scripts.\nMetrics To display one or more metrics in a graph, the following information is required:\n Name: The name of the metric. Data Type: The way of reading the metrics data according to various metric types. Visualization: The graph options to visualize the metric. Each data type has its own matched graph options. See the mapping doc for more details. Unit: The unit of the metrics data. Calculation: The calculation formula for the metric. The available formulas are here.  Common Graphs    Metrics Data Type Visualization Demo     read all values in the duration Line    get sorted top N values Top List    read all values of labels in the duration Table    read all values in the duration Area    read all values in the duration Service/Instance/Endpoint List    read sampled records in the duration Records List     Calculations    Label Calculation     Percentage Value / 100   Apdex Value / 10000   Average Sum of values / Count of values   Percentage + Avg-preview Sum of values / Count of values / 100   Apdex + Avg-preview Sum of values / Count of values / 10000   Byte to KB Value / 1024   Byte to MB Value / 1024 / 1024   Byte to GB Value / 1024 / 1024 / 1024   Seconds to YYYY-MM-DD HH:mm:ss dayjs(value * 1000).format(\u0026ldquo;YYYY-MM-DD HH:mm:ss\u0026rdquo;)   Milliseconds to YYYY-MM-DD HH:mm:ss dayjs(value).format(\u0026ldquo;YYYY-MM-DD HH:mm:ss\u0026rdquo;)   Precision Value.toFixed(2)   Milliseconds to seconds Value / 1000   Seconds to days Value / 86400    Graph styles Graph advanced style options.\nWidget options Define the following properties of the widget:\n Name: The name of the widget, which used to associate with other widget in the dashboard. Title: The title name of the widget. Tooltip Content: Additional explanation of the widget.  Association Options Widget provides the ability to associate with other widgets to show axis pointer with tips for the same time point, in order to help users to understand the connectivity among metrics.\nWidget Static Link On the right top of every widget on the dashboard, there is a Generate Link option, which could generate a static link to represent this widget. By using this link, users could share this widget, or integrate it into any 3rd party iFrame to build a network operations center(NOC) dashboard on the wall easily. About this link, there are several customizable options\n Lock Query Duration. Set the query duration manually. It is OFF by default. Auto Fresh option is ON with 6s query period and last 30 mins time range. Query period and range are customizable.  Settings Settings provide language, server time zone, and auto-fresh options. These settings are stored in the browser\u0026rsquo;s local storage. Unless you clear them manually, those will not change.\nFAQ Login and Authentication SkyWalking doesn\u0026rsquo;t provide login and authentication as usual for years. If you need, a lot of Gateway solutions have provides well-established solutions, such as the Nginx ecosystem.\n","excerpt":"Introduction to UI The SkyWalking official UI provides the default and powerful visualization …","ref":"/docs/main/next/en/ui/readme/","title":"Introduction to UI"},{"body":"Introduction to UI The SkyWalking official UI provides the default and powerful visualization capabilities for SkyWalking to observe full-stack application.\nThe left side menu lists all available supported stack, with default dashboards.\nFollow Official Dashboards menu explores all default dashboards about how to monitor different tech stacks.\nCustom Dashboard Besides, official dashboards, Dashboards provides customization to end users to add new tabs/pages/widgets, and flexibility to re-config the dashboard on your own preference.\nThe dashboard has two key attributes, Layer and Entity Type. Learn these two concepts first before you begin any customization. Also, trace, metrics, log analysis are relative to OAL, MAL, and LAL engines in SkyWalking kernel. You should learn them first too.\nService and All entity type dashboard could be set as root(set this to root), which mean this dashboard would be used as the entrance of its layer. If you have multiple root dashboards, UI could choose one randomly(Don\u0026rsquo;t recommend doing so).\nNotice, dashboard editable is disabled on release, set system env(SW_ENABLE_UPDATE_UI_TEMPLATE=true) to activate them. Before you save the edited dashboard, it is just stored in memory, closing tab would LOSE the change permanently.\nSettings Settings provide language, server time zone, and auto-fresh option. These settings are stored in browser local storage. Unless you clear them manually, those would not change.\nFAQ Login and Authentication SkyWalking doesn\u0026rsquo;t provide login and authentication as usual for years. If you need, a lot of Gateway solutions have provides well-established solutions, such as Nginx ecosystem.\n","excerpt":"Introduction to UI The SkyWalking official UI provides the default and powerful visualization …","ref":"/docs/main/v9.0.0/en/ui/readme/","title":"Introduction to UI"},{"body":"Introduction to UI The SkyWalking official UI provides the default and powerful visualization capabilities for SkyWalking to observe full-stack applications.\nThe left side menu lists all available supported stacks with default dashboards.\nFollow the Official Dashboards menu to explore all default dashboards on their ways to monitor different tech stacks.\nCustom Dashboard Besides official dashboards, Dashboards provide customization capabilities to end-users to add new tabs/pages/widgets, and flexibility to re-config the dashboard on your own preference.\nThe dashboard has two key attributes, Layer and Entity Type. Learn these two concepts first before you begin any customization. Also, trace, metrics, and log analysis are relative to OAL, MAL, and LAL engines in the SkyWalking kernel. It would help if you learned them first, too.\nService and All entity type dashboard could be set as root(set this to root), which means this dashboard would be used as the entrance of its Layer. If you have multiple root dashboards, UI will choose one randomly (We don\u0026rsquo;t recommend doing so).\nNotice, dashboard editable is disabled on release; set system env(SW_ENABLE_UPDATE_UI_TEMPLATE=true) to activate them. Before you save the edited dashboard, it is just stored in memory. Closing a tab would LOSE the change permanently.\nSettings Settings provide language, server time zone, and auto-fresh options. These settings are stored in the browser\u0026rsquo;s local storage. Unless you clear them manually, those will not change.\nFAQ Login and Authentication SkyWalking doesn\u0026rsquo;t provide login and authentication as usual for years. If you need, a lot of Gateway solutions have provides well-established solutions, such as the Nginx ecosystem.\n","excerpt":"Introduction to UI The SkyWalking official UI provides the default and powerful visualization …","ref":"/docs/main/v9.1.0/en/ui/readme/","title":"Introduction to UI"},{"body":"Introduction to UI The SkyWalking official UI provides the default and powerful visualization capabilities for SkyWalking to observe full-stack applications.\nThe left side menu lists all available supported stacks with default dashboards.\nFollow the Official Dashboards menu to explore all default dashboards on their ways to monitor different tech stacks.\nCustom Dashboard Besides official dashboards, Dashboards provide customization capabilities to end-users to add new tabs/pages/widgets, and flexibility to re-config the dashboard on your own preference.\nThe dashboard has two key attributes, Layer and Entity Type. Learn these two concepts first before you begin any customization. Also, trace, metrics, and log analysis are relative to OAL, MAL, and LAL engines in the SkyWalking kernel. It would help if you learned them first, too.\nService and All entity type dashboard could be set as root(set this to root), which means this dashboard would be used as the entrance of its Layer. If you have multiple root dashboards, UI will choose one randomly (We don\u0026rsquo;t recommend doing so).\nNotice, dashboard editable is disabled on release; set system env(SW_ENABLE_UPDATE_UI_TEMPLATE=true) to activate them. Before you save the edited dashboard, it is just stored in memory. Closing a tab would LOSE the change permanently.\nWidget The widget provides the ability to visualize the metrics, generated through OAL, MAL, or LAL scripts.\nMetrics To display one or more metrics in a graph, the following information is required:\n Name: The name of the metric. Data Type: The way of reading the metrics data according to various metric types. Visualization: The graph options to visualize the metric. Each data type has its own matched graph options. See the mapping doc for more details. Unit: The unit of the metrics data. Calculation: The calculation formula for the metric. The available formulas are here.  Common Graphs    Metrics Data Type Visualization Demo     read all values in the duration Line    get sorted top N values Top List    read all values of labels in the duration Table    read all values in the duration Area    read all values in the duration Service/Instance/Endpoint List     Calculations    Label Calculation     Percentage Value / 100   Apdex Value / 10000   Average Sum of values / Count of values   Percentage + Avg-preview Sum of values / Count of values / 100   Apdex + Avg-preview Sum of values / Count of values / 10000   Byte to KB Value / 1024   Byte to MB Value / 1024 / 1024   Byte to GB Value / 1024 / 1024 / 1024   Seconds to YYYY-MM-DD HH:mm:ss dayjs(value * 1000).format(\u0026ldquo;YYYY-MM-DD HH:mm:ss\u0026rdquo;)   Milliseconds to YYYY-MM-DD HH:mm:ss dayjs(value).format(\u0026ldquo;YYYY-MM-DD HH:mm:ss\u0026rdquo;)   Precision Value.toFixed(2)   Milliseconds to seconds Value / 1000   Seconds to days Value / 86400    Graph styles Graph advanced style options.\nWidget options Define the following properties of the widget:\n Name: The name of the widget, which used to associate with other widget in the dashboard. Title: The title name of the widget. Tooltip Content: Additional explanation of the widget.  Association Options Widget provides the ability to associate with other widgets to show axis pointer with tips for the same time point, in order to help users to understand the connectivity among metrics.\nSettings Settings provide language, server time zone, and auto-fresh options. These settings are stored in the browser\u0026rsquo;s local storage. Unless you clear them manually, those will not change.\nFAQ Login and Authentication SkyWalking doesn\u0026rsquo;t provide login and authentication as usual for years. If you need, a lot of Gateway solutions have provides well-established solutions, such as the Nginx ecosystem.\n","excerpt":"Introduction to UI The SkyWalking official UI provides the default and powerful visualization …","ref":"/docs/main/v9.2.0/en/ui/readme/","title":"Introduction to UI"},{"body":"Introduction to UI The SkyWalking official UI provides the default and powerful visualization capabilities for SkyWalking to observe full-stack applications.\nThe left side menu lists all available supported stacks with default dashboards.\nFollow the Official Dashboards menu to explore all default dashboards on their ways to monitor different tech stacks.\nCustom Dashboard Besides official dashboards, Dashboards provide customization capabilities to end-users to add new tabs/pages/widgets, and flexibility to re-config the dashboard on your own preference.\nThe dashboard has two key attributes, Layer and Entity Type. Learn these two concepts first before you begin any customization. Also, trace, metrics, and log analysis are relative to OAL, MAL, and LAL engines in the SkyWalking kernel. It would help if you learned them first, too.\nService and All entity type dashboard could be set as root(set this to root), which means this dashboard would be used as the entrance of its Layer. If you have multiple root dashboards, UI will choose one randomly (We don\u0026rsquo;t recommend doing so).\nNotice, dashboard editable is disabled on release; set system env(SW_ENABLE_UPDATE_UI_TEMPLATE=true) to activate them. Before you save the edited dashboard, it is just stored in memory. Closing a tab would LOSE the change permanently.\nA new dashboard should be added through New Dashboard in the Dashboards menu. Meanwhile, there are two ways to edit an existing dashboard.\n Dashboard List in the Dashboard menu provides edit/delete/set-as-root features to manage existing dashboards. In every dashboard page, click the right top V toggle, and turn to E(representing Edit) mode.  Widget A dashboard consists of various widget. In the Edit mode, widgets could be added/moved/removed/edit according to the Layer.(Every widget declares its suitable layer.)\nThe widget provides the ability to visualize the metrics, generated through OAL, MAL, or LAL scripts.\nMetrics To display one or more metrics in a graph, the following information is required:\n Name: The name of the metric. Data Type: The way of reading the metrics data according to various metric types. Visualization: The graph options to visualize the metric. Each data type has its own matched graph options. See the mapping doc for more details. Unit: The unit of the metrics data. Calculation: The calculation formula for the metric. The available formulas are here.  Common Graphs    Metrics Data Type Visualization Demo     read all values in the duration Line    get sorted top N values Top List    read all values of labels in the duration Table    read all values in the duration Area    read all values in the duration Service/Instance/Endpoint List    read sampled records in the duration Records List     Calculations    Label Calculation     Percentage Value / 100   Apdex Value / 10000   Average Sum of values / Count of values   Percentage + Avg-preview Sum of values / Count of values / 100   Apdex + Avg-preview Sum of values / Count of values / 10000   Byte to KB Value / 1024   Byte to MB Value / 1024 / 1024   Byte to GB Value / 1024 / 1024 / 1024   Seconds to YYYY-MM-DD HH:mm:ss dayjs(value * 1000).format(\u0026ldquo;YYYY-MM-DD HH:mm:ss\u0026rdquo;)   Milliseconds to YYYY-MM-DD HH:mm:ss dayjs(value).format(\u0026ldquo;YYYY-MM-DD HH:mm:ss\u0026rdquo;)   Precision Value.toFixed(2)   Milliseconds to seconds Value / 1000   Seconds to days Value / 86400    Graph styles Graph advanced style options.\nWidget options Define the following properties of the widget:\n Name: The name of the widget, which used to associate with other widget in the dashboard. Title: The title name of the widget. Tooltip Content: Additional explanation of the widget.  Association Options Widget provides the ability to associate with other widgets to show axis pointer with tips for the same time point, in order to help users to understand the connectivity among metrics.\nSettings Settings provide language, server time zone, and auto-fresh options. These settings are stored in the browser\u0026rsquo;s local storage. Unless you clear them manually, those will not change.\nFAQ Login and Authentication SkyWalking doesn\u0026rsquo;t provide login and authentication as usual for years. If you need, a lot of Gateway solutions have provides well-established solutions, such as the Nginx ecosystem.\n","excerpt":"Introduction to UI The SkyWalking official UI provides the default and powerful visualization …","ref":"/docs/main/v9.3.0/en/ui/readme/","title":"Introduction to UI"},{"body":"Introduction to UI The SkyWalking official UI provides the default and powerful visualization capabilities for SkyWalking to observe full-stack applications.\nThe left side menu lists all available supported stacks with default dashboards.\nFollow the Official Dashboards menu to explore all default dashboards on their ways to monitor different tech stacks.\nCustom Dashboard Besides official dashboards, Dashboards provide customization capabilities to end-users to add new tabs/pages/widgets, and flexibility to re-config the dashboard on your own preference.\nThe dashboard has two key attributes, Layer and Entity Type. Learn these two concepts first before you begin any customization. Also, trace, metrics, and log analysis are relative to OAL, MAL, and LAL engines in the SkyWalking kernel. It would help if you learned them first, too.\nService and All entity type dashboard could be set as root(set this to root), which means this dashboard would be used as the entrance of its Layer. If you have multiple root dashboards, UI will choose one randomly (We don\u0026rsquo;t recommend doing so).\nNotice, dashboard editable is disabled on release; set system env(SW_ENABLE_UPDATE_UI_TEMPLATE=true) to activate them. Before you save the edited dashboard, it is just stored in memory. Closing a tab would LOSE the change permanently.\nA new dashboard should be added through New Dashboard in the Dashboards menu. Meanwhile, there are two ways to edit an existing dashboard.\n Dashboard List in the Dashboard menu provides edit/delete/set-as-root features to manage existing dashboards. In every dashboard page, click the right top V toggle, and turn to E(representing Edit) mode.  Widget A dashboard consists of various widget. In the Edit mode, widgets could be added/moved/removed/edit according to the Layer.(Every widget declares its suitable layer.)\nThe widget provides the ability to visualize the metrics, generated through OAL, MAL, or LAL scripts.\nMetrics To display one or more metrics in a graph, the following information is required:\n Name: The name of the metric. Data Type: The way of reading the metrics data according to various metric types. Visualization: The graph options to visualize the metric. Each data type has its own matched graph options. See the mapping doc for more details. Unit: The unit of the metrics data. Calculation: The calculation formula for the metric. The available formulas are here.  Common Graphs    Metrics Data Type Visualization Demo     read all values in the duration Line    get sorted top N values Top List    read all values of labels in the duration Table    read all values in the duration Area    read all values in the duration Service/Instance/Endpoint List    read sampled records in the duration Records List     Calculations    Label Calculation     Percentage Value / 100   Apdex Value / 10000   Average Sum of values / Count of values   Percentage + Avg-preview Sum of values / Count of values / 100   Apdex + Avg-preview Sum of values / Count of values / 10000   Byte to KB Value / 1024   Byte to MB Value / 1024 / 1024   Byte to GB Value / 1024 / 1024 / 1024   Seconds to YYYY-MM-DD HH:mm:ss dayjs(value * 1000).format(\u0026ldquo;YYYY-MM-DD HH:mm:ss\u0026rdquo;)   Milliseconds to YYYY-MM-DD HH:mm:ss dayjs(value).format(\u0026ldquo;YYYY-MM-DD HH:mm:ss\u0026rdquo;)   Precision Value.toFixed(2)   Milliseconds to seconds Value / 1000   Seconds to days Value / 86400    Graph styles Graph advanced style options.\nWidget options Define the following properties of the widget:\n Name: The name of the widget, which used to associate with other widget in the dashboard. Title: The title name of the widget. Tooltip Content: Additional explanation of the widget.  Association Options Widget provides the ability to associate with other widgets to show axis pointer with tips for the same time point, in order to help users to understand the connectivity among metrics.\nWidget Static Link On the right top of every widget on the dashboard, there is a Generate Link option, which could generate a static link to represent this widget. By using this link, users could share this widget, or integrate it into any 3rd party iFrame to build a network operations center(NOC) dashboard on the wall easily. About this link, there are several customizable options\n Lock Query Duration. Set the query duration manually. It is OFF by default. Auto Fresh option is ON with 6s query period and last 30 mins time range. Query period and range are customizable.  Settings Settings provide language, server time zone, and auto-fresh options. These settings are stored in the browser\u0026rsquo;s local storage. Unless you clear them manually, those will not change.\nFAQ Login and Authentication SkyWalking doesn\u0026rsquo;t provide login and authentication as usual for years. If you need, a lot of Gateway solutions have provides well-established solutions, such as the Nginx ecosystem.\n","excerpt":"Introduction to UI The SkyWalking official UI provides the default and powerful visualization …","ref":"/docs/main/v9.4.0/en/ui/readme/","title":"Introduction to UI"},{"body":"Introduction to UI The SkyWalking official UI provides the default and powerful visualization capabilities for SkyWalking to observe full-stack applications.\nThe left side menu lists all available supported stacks with default dashboards.\nFollow the Official Dashboards menu to explore all default dashboards on their ways to monitor different tech stacks.\nCustom Dashboard Besides official dashboards, Dashboards provide customization capabilities to end-users to add new tabs/pages/widgets, and flexibility to re-config the dashboard on your own preference.\nThe dashboard has two key attributes, Layer and Entity Type. Learn these two concepts first before you begin any customization. Also, trace, metrics, and log analysis are relative to OAL, MAL, and LAL engines in the SkyWalking kernel. It would help if you learned them first, too.\nService and All entity type dashboard could be set as root(set this to root), which means this dashboard would be used as the entrance of its Layer. If you have multiple root dashboards, UI will choose one randomly (We don\u0026rsquo;t recommend doing so).\nNotice, dashboard editable is disabled on release; set system env(SW_ENABLE_UPDATE_UI_TEMPLATE=true) to activate them. Before you save the edited dashboard, it is just stored in memory. Closing a tab would LOSE the change permanently.\nA new dashboard should be added through New Dashboard in the Dashboards menu. Meanwhile, there are two ways to edit an existing dashboard.\n Dashboard List in the Dashboard menu provides edit/delete/set-as-root features to manage existing dashboards. In every dashboard page, click the right top V toggle, and turn to E(representing Edit) mode.  Widget A dashboard consists of various widget. In the Edit mode, widgets could be added/moved/removed/edit according to the Layer.(Every widget declares its suitable layer.)\nThe widget provides the ability to visualize the metrics, generated through OAL, MAL, or LAL scripts.\nMetrics To display one or more metrics in a graph, the following information is required:\n Name: The name of the metric. Data Type: The way of reading the metrics data according to various metric types. Visualization: The graph options to visualize the metric. Each data type has its own matched graph options. See the mapping doc for more details. Unit: The unit of the metrics data. Calculation: The calculation formula for the metric. The available formulas are here.  Common Graphs    Metrics Data Type Visualization Demo     read all values in the duration Line    get sorted top N values Top List    read all values of labels in the duration Table    read all values in the duration Area    read all values in the duration Service/Instance/Endpoint List    read sampled records in the duration Records List     Calculations    Label Calculation     Percentage Value / 100   Apdex Value / 10000   Average Sum of values / Count of values   Percentage + Avg-preview Sum of values / Count of values / 100   Apdex + Avg-preview Sum of values / Count of values / 10000   Byte to KB Value / 1024   Byte to MB Value / 1024 / 1024   Byte to GB Value / 1024 / 1024 / 1024   Seconds to YYYY-MM-DD HH:mm:ss dayjs(value * 1000).format(\u0026ldquo;YYYY-MM-DD HH:mm:ss\u0026rdquo;)   Milliseconds to YYYY-MM-DD HH:mm:ss dayjs(value).format(\u0026ldquo;YYYY-MM-DD HH:mm:ss\u0026rdquo;)   Precision Value.toFixed(2)   Milliseconds to seconds Value / 1000   Seconds to days Value / 86400    Graph styles Graph advanced style options.\nWidget options Define the following properties of the widget:\n Name: The name of the widget, which used to associate with other widget in the dashboard. Title: The title name of the widget. Tooltip Content: Additional explanation of the widget.  Association Options Widget provides the ability to associate with other widgets to show axis pointer with tips for the same time point, in order to help users to understand the connectivity among metrics.\nWidget Static Link On the right top of every widget on the dashboard, there is a Generate Link option, which could generate a static link to represent this widget. By using this link, users could share this widget, or integrate it into any 3rd party iFrame to build a network operations center(NOC) dashboard on the wall easily. About this link, there are several customizable options\n Lock Query Duration. Set the query duration manually. It is OFF by default. Auto Fresh option is ON with 6s query period and last 30 mins time range. Query period and range are customizable.  Settings Settings provide language, server time zone, and auto-fresh options. These settings are stored in the browser\u0026rsquo;s local storage. Unless you clear them manually, those will not change.\nFAQ Login and Authentication SkyWalking doesn\u0026rsquo;t provide login and authentication as usual for years. If you need, a lot of Gateway solutions have provides well-established solutions, such as the Nginx ecosystem.\n","excerpt":"Introduction to UI The SkyWalking official UI provides the default and powerful visualization …","ref":"/docs/main/v9.5.0/en/ui/readme/","title":"Introduction to UI"},{"body":"Introduction to UI The SkyWalking official UI provides the default and powerful visualization capabilities for SkyWalking to observe full-stack applications.\nThe left side menu lists all available supported stacks with default dashboards.\nFollow the Official Dashboards menu to explore all default dashboards on their ways to monitor different tech stacks.\nSidebar Menu and Marketplace All available feature menu items are only listed in the marketplace(since 9.6.0). They are only visible on the Sidebar Menu when there are relative services being observed by various supported observation agents, such as installed language agents, service mesh platform, OTEL integration.\nThe menu items defined in ui-initialized-templates/menu.yaml are the universal marketplace for all default-supported integration. The menu definition supports one and two levels items. The leaf menu item should have the layer for navigation.\nmenus:- name:GeneralServiceicon:general_servicemenus:- name:Serviceslayer:GENERAL- name:VisualDatabaselayer:VIRTUAL_DATABASE- name:VisualCachelayer:VIRTUAL_CACHE- name:VisualMQlayer:VIRTUAL_MQ....- name:SelfObservabilityicon:self_observabilitymenus:- name:SkyWalkingServerlayer:SO11Y_OAP- name:Satellitelayer:SO11Y_SATELLITEThe menu items would automatically pop up on the left after short period of time that at least one service was observed. For more details, please refer to the \u0026ldquo;uiMenuRefreshInterval\u0026rdquo; configuration item in the backend settings\nCustom Dashboard Besides official dashboards, Dashboards provide customization capabilities to end-users to add new tabs/pages/widgets, and flexibility to re-config the dashboard on your own preference.\nThe dashboard has two key attributes, Layer and Entity Type. Learn these two concepts first before you begin any customization. Also, trace, metrics, and log analysis are relative to OAL, MAL, and LAL engines in the SkyWalking kernel. It would help if you learned them first, too.\nService and All entity type dashboard could be set as root(set this to root), which means this dashboard would be used as the entrance of its Layer. If you have multiple root dashboards, UI will choose one randomly (We don\u0026rsquo;t recommend doing so).\nNotice, dashboard editable is disabled on release; set system env(SW_ENABLE_UPDATE_UI_TEMPLATE=true) to activate them. Before you save the edited dashboard, it is just stored in memory. Closing a tab would LOSE the change permanently.\nA new dashboard should be added through New Dashboard in the Dashboards menu. Meanwhile, there are two ways to edit an existing dashboard.\n Dashboard List in the Dashboard menu provides edit/delete/set-as-root features to manage existing dashboards. In every dashboard page, click the right top V toggle, and turn to E(representing Edit) mode.  Widget A dashboard consists of various widget. In the Edit mode, widgets could be added/moved/removed/edit according to the Layer.(Every widget declares its suitable layer.)\nThe widget provides the ability to visualize the metrics, generated through OAL, MAL, or LAL scripts.\nMetrics To display one or more metrics in a graph, the following information is required:\n Name: The name of the metric. Data Type: The way of reading the metrics data according to various metric types. Visualization: The graph options to visualize the metric. Each data type has its own matched graph options. See the mapping doc for more details. Unit: The unit of the metrics data. Calculation: The calculation formula for the metric. The available formulas are here.  Common Graphs    Metrics Data Type Visualization Demo     read all values in the duration Line    get sorted top N values Top List    read all values of labels in the duration Table    read all values in the duration Area    read all values in the duration Service/Instance/Endpoint List    read sampled records in the duration Records List     Calculations    Label Calculation     Percentage Value / 100   Apdex Value / 10000   Average Sum of values / Count of values   Percentage + Avg-preview Sum of values / Count of values / 100   Apdex + Avg-preview Sum of values / Count of values / 10000   Byte to KB Value / 1024   Byte to MB Value / 1024 / 1024   Byte to GB Value / 1024 / 1024 / 1024   Seconds to YYYY-MM-DD HH:mm:ss dayjs(value * 1000).format(\u0026ldquo;YYYY-MM-DD HH:mm:ss\u0026rdquo;)   Milliseconds to YYYY-MM-DD HH:mm:ss dayjs(value).format(\u0026ldquo;YYYY-MM-DD HH:mm:ss\u0026rdquo;)   Precision Value.toFixed(2)   Milliseconds to seconds Value / 1000   Seconds to days Value / 86400    Graph styles Graph advanced style options.\nWidget options Define the following properties of the widget:\n Name: The name of the widget, which used to associate with other widget in the dashboard. Title: The title name of the widget. Tooltip Content: Additional explanation of the widget.  Association Options Widget provides the ability to associate with other widgets to show axis pointer with tips for the same time point, in order to help users to understand the connectivity among metrics.\nWidget Static Link On the right top of every widget on the dashboard, there is a Generate Link option, which could generate a static link to represent this widget. By using this link, users could share this widget, or integrate it into any 3rd party iFrame to build a network operations center(NOC) dashboard on the wall easily. About this link, there are several customizable options\n Lock Query Duration. Set the query duration manually. It is OFF by default. Auto Fresh option is ON with 6s query period and last 30 mins time range. Query period and range are customizable.  Settings Settings provide language, server time zone, and auto-fresh options. These settings are stored in the browser\u0026rsquo;s local storage. Unless you clear them manually, those will not change.\nFAQ Login and Authentication SkyWalking doesn\u0026rsquo;t provide login and authentication as usual for years. If you need, a lot of Gateway solutions have provides well-established solutions, such as the Nginx ecosystem.\n","excerpt":"Introduction to UI The SkyWalking official UI provides the default and powerful visualization …","ref":"/docs/main/v9.6.0/en/ui/readme/","title":"Introduction to UI"},{"body":"Introduction to UI The SkyWalking official UI provides the default and powerful visualization capabilities for SkyWalking to observe full-stack applications.\nThe left side menu lists all available supported stacks with default dashboards.\nFollow the Official Dashboards menu to explore all default dashboards on their ways to monitor different tech stacks.\nSidebar Menu and Marketplace All available feature menu items are only listed in the marketplace(since 9.6.0). They are only visible on the Sidebar Menu when there are relative services being observed by various supported observation agents, such as installed language agents, service mesh platform, OTEL integration.\nThe menu items defined in ui-initialized-templates/menu.yaml are the universal marketplace for all default-supported integration. The menu definition supports one and two levels items. The leaf menu item should have the layer for navigation.\nmenus:- name:GeneralServiceicon:general_servicemenus:- name:Serviceslayer:GENERAL- name:VisualDatabaselayer:VIRTUAL_DATABASE- name:VisualCachelayer:VIRTUAL_CACHE- name:VisualMQlayer:VIRTUAL_MQ....- name:SelfObservabilityicon:self_observabilitymenus:- name:SkyWalkingServerlayer:SO11Y_OAP- name:Satellitelayer:SO11Y_SATELLITEThe menu items would automatically pop up on the left after short period of time that at least one service was observed. For more details, please refer to the \u0026ldquo;uiMenuRefreshInterval\u0026rdquo; configuration item in the backend settings\nCustom Dashboard Besides official dashboards, Dashboards provide customization capabilities to end-users to add new tabs/pages/widgets, and flexibility to re-config the dashboard on your own preference.\nThe dashboard has two key attributes, Layer and Entity Type. Learn these two concepts first before you begin any customization. Also, trace, metrics, and log analysis are relative to OAL, MAL, and LAL engines in the SkyWalking kernel. It would help if you learned them first, too.\nService and All entity type dashboard could be set as root(set this to root), which means this dashboard would be used as the entrance of its Layer. If you have multiple root dashboards, UI will choose one randomly (We don\u0026rsquo;t recommend doing so).\nNotice, dashboard editable is disabled on release; set system env(SW_ENABLE_UPDATE_UI_TEMPLATE=true) to activate them. Before you save the edited dashboard, it is just stored in memory. Closing a tab would LOSE the change permanently.\nA new dashboard should be added through New Dashboard in the Dashboards menu. Meanwhile, there are two ways to edit an existing dashboard.\n Dashboard List in the Dashboard menu provides edit/delete/set-as-root features to manage existing dashboards. In every dashboard page, click the right top V toggle, and turn to E(representing Edit) mode.  Widget A dashboard consists of various widget. In the Edit mode, widgets could be added/moved/removed/edit according to the Layer.(Every widget declares its suitable layer.)\nThe widget provides the ability to visualize the metrics, generated through OAL, MAL, or LAL scripts.\nMetrics To display one or more metrics in a graph, the following information is required:\n Name: The name of the metric. Data Type: The way of reading the metrics data according to various metric types. Visualization: The graph options to visualize the metric. Each data type has its own matched graph options. See the mapping doc for more details. Unit: The unit of the metrics data. Calculation: The calculation formula for the metric. The available formulas are here.  Common Graphs    Metrics Data Type Visualization Demo     read all values in the duration Line    get sorted top N values Top List    read all values of labels in the duration Table    read all values in the duration Area    read all values in the duration Service/Instance/Endpoint List    read sampled records in the duration Records List     Calculations    Label Calculation     Percentage Value / 100   Apdex Value / 10000   Average Sum of values / Count of values   Percentage + Avg-preview Sum of values / Count of values / 100   Apdex + Avg-preview Sum of values / Count of values / 10000   Byte to KB Value / 1024   Byte to MB Value / 1024 / 1024   Byte to GB Value / 1024 / 1024 / 1024   Seconds to YYYY-MM-DD HH:mm:ss dayjs(value * 1000).format(\u0026ldquo;YYYY-MM-DD HH:mm:ss\u0026rdquo;)   Milliseconds to YYYY-MM-DD HH:mm:ss dayjs(value).format(\u0026ldquo;YYYY-MM-DD HH:mm:ss\u0026rdquo;)   Precision Value.toFixed(2)   Milliseconds to seconds Value / 1000   Seconds to days Value / 86400    Graph styles Graph advanced style options.\nWidget options Define the following properties of the widget:\n Name: The name of the widget, which used to associate with other widget in the dashboard. Title: The title name of the widget. Tooltip Content: Additional explanation of the widget.  Association Options Widget provides the ability to associate with other widgets to show axis pointer with tips for the same time point, in order to help users to understand the connectivity among metrics.\nWidget Static Link On the right top of every widget on the dashboard, there is a Generate Link option, which could generate a static link to represent this widget. By using this link, users could share this widget, or integrate it into any 3rd party iFrame to build a network operations center(NOC) dashboard on the wall easily. About this link, there are several customizable options\n Lock Query Duration. Set the query duration manually. It is OFF by default. Auto Fresh option is ON with 6s query period and last 30 mins time range. Query period and range are customizable.  Settings Settings provide language, server time zone, and auto-fresh options. These settings are stored in the browser\u0026rsquo;s local storage. Unless you clear them manually, those will not change.\nFAQ Login and Authentication SkyWalking doesn\u0026rsquo;t provide login and authentication as usual for years. If you need, a lot of Gateway solutions have provides well-established solutions, such as the Nginx ecosystem.\n","excerpt":"Introduction to UI The SkyWalking official UI provides the default and powerful visualization …","ref":"/docs/main/v9.7.0/en/ui/readme/","title":"Introduction to UI"},{"body":"IP and port setting The backend uses IP and port binding in order to allow the OS to have multiple IPs. The binding/listening IP and port are specified by the core module\ncore:default:restHost:0.0.0.0restPort:12800restContextPath:/gRPCHost:0.0.0.0gRPCPort:11800There are two IP/port pairs for gRPC and HTTP REST services.\n Most agents and probes use gRPC service for better performance and code readability. Some agents use REST service because gRPC may not be supported in that language. The UI uses REST service, but the data is always in GraphQL format.  Note IP binding For users unfamiliar with IP binding, note that once IP binding is complete, the client could only use this IP to access the service. For example, if 172.09.13.28 is bound, even if you are in this machine, you must use 172.09.13.28, rather than 127.0.0.1 or localhost, to access the service.\nModule provider specified IP and port The IP and port in the core module are provided by default. But it is common for some module providers, such as receiver modules, to provide other IP and port settings.\n","excerpt":"IP and port setting The backend uses IP and port binding in order to allow the OS to have multiple …","ref":"/docs/main/latest/en/setup/backend/backend-ip-port/","title":"IP and port setting"},{"body":"IP and port setting The backend uses IP and port binding in order to allow the OS to have multiple IPs. The binding/listening IP and port are specified by the core module\ncore:default:restHost:0.0.0.0restPort:12800restContextPath:/gRPCHost:0.0.0.0gRPCPort:11800There are two IP/port pairs for gRPC and HTTP REST services.\n Most agents and probes use gRPC service for better performance and code readability. Some agents use REST service because gRPC may not be supported in that language. The UI uses REST service, but the data is always in GraphQL format.  Note IP binding For users unfamiliar with IP binding, note that once IP binding is complete, the client could only use this IP to access the service. For example, if 172.09.13.28 is bound, even if you are in this machine, you must use 172.09.13.28, rather than 127.0.0.1 or localhost, to access the service.\nModule provider specified IP and port The IP and port in the core module are provided by default. But it is common for some module providers, such as receiver modules, to provide other IP and port settings.\n","excerpt":"IP and port setting The backend uses IP and port binding in order to allow the OS to have multiple …","ref":"/docs/main/next/en/setup/backend/backend-ip-port/","title":"IP and port setting"},{"body":"IP and port setting The backend uses IP and port binding in order to allow the OS to have multiple IPs. The binding/listening IP and port are specified by the core module\ncore:default:restHost:0.0.0.0restPort:12800restContextPath:/gRPCHost:0.0.0.0gRPCPort:11800There are two IP/port pairs for gRPC and HTTP REST services.\n Most agents and probes use gRPC service for better performance and code readability. Some agents use REST service, because gRPC may be not supported in that language. The UI uses REST service, but the data is always in GraphQL format.  Note IP binding For users who are not familiar with IP binding, note that once IP binding is complete, the client could only use this IP to access the service. For example, if 172.09.13.28 is bound, even if you are in this machine, you must use 172.09.13.28, rather than 127.0.0.1 or localhost, to access the service.\nModule provider specified IP and port The IP and port in the core module are provided by default. But it is common for some module providers, such as receiver modules, to provide other IP and port settings.\n","excerpt":"IP and port setting The backend uses IP and port binding in order to allow the OS to have multiple …","ref":"/docs/main/v9.0.0/en/setup/backend/backend-ip-port/","title":"IP and port setting"},{"body":"IP and port setting The backend uses IP and port binding in order to allow the OS to have multiple IPs. The binding/listening IP and port are specified by the core module\ncore:default:restHost:0.0.0.0restPort:12800restContextPath:/gRPCHost:0.0.0.0gRPCPort:11800There are two IP/port pairs for gRPC and HTTP REST services.\n Most agents and probes use gRPC service for better performance and code readability. Some agents use REST service because gRPC may not be supported in that language. The UI uses REST service, but the data is always in GraphQL format.  Note IP binding For users unfamiliar with IP binding, note that once IP binding is complete, the client could only use this IP to access the service. For example, if 172.09.13.28 is bound, even if you are in this machine, you must use 172.09.13.28, rather than 127.0.0.1 or localhost, to access the service.\nModule provider specified IP and port The IP and port in the core module are provided by default. But it is common for some module providers, such as receiver modules, to provide other IP and port settings.\n","excerpt":"IP and port setting The backend uses IP and port binding in order to allow the OS to have multiple …","ref":"/docs/main/v9.1.0/en/setup/backend/backend-ip-port/","title":"IP and port setting"},{"body":"IP and port setting The backend uses IP and port binding in order to allow the OS to have multiple IPs. The binding/listening IP and port are specified by the core module\ncore:default:restHost:0.0.0.0restPort:12800restContextPath:/gRPCHost:0.0.0.0gRPCPort:11800There are two IP/port pairs for gRPC and HTTP REST services.\n Most agents and probes use gRPC service for better performance and code readability. Some agents use REST service because gRPC may not be supported in that language. The UI uses REST service, but the data is always in GraphQL format.  Note IP binding For users unfamiliar with IP binding, note that once IP binding is complete, the client could only use this IP to access the service. For example, if 172.09.13.28 is bound, even if you are in this machine, you must use 172.09.13.28, rather than 127.0.0.1 or localhost, to access the service.\nModule provider specified IP and port The IP and port in the core module are provided by default. But it is common for some module providers, such as receiver modules, to provide other IP and port settings.\n","excerpt":"IP and port setting The backend uses IP and port binding in order to allow the OS to have multiple …","ref":"/docs/main/v9.2.0/en/setup/backend/backend-ip-port/","title":"IP and port setting"},{"body":"IP and port setting The backend uses IP and port binding in order to allow the OS to have multiple IPs. The binding/listening IP and port are specified by the core module\ncore:default:restHost:0.0.0.0restPort:12800restContextPath:/gRPCHost:0.0.0.0gRPCPort:11800There are two IP/port pairs for gRPC and HTTP REST services.\n Most agents and probes use gRPC service for better performance and code readability. Some agents use REST service because gRPC may not be supported in that language. The UI uses REST service, but the data is always in GraphQL format.  Note IP binding For users unfamiliar with IP binding, note that once IP binding is complete, the client could only use this IP to access the service. For example, if 172.09.13.28 is bound, even if you are in this machine, you must use 172.09.13.28, rather than 127.0.0.1 or localhost, to access the service.\nModule provider specified IP and port The IP and port in the core module are provided by default. But it is common for some module providers, such as receiver modules, to provide other IP and port settings.\n","excerpt":"IP and port setting The backend uses IP and port binding in order to allow the OS to have multiple …","ref":"/docs/main/v9.3.0/en/setup/backend/backend-ip-port/","title":"IP and port setting"},{"body":"IP and port setting The backend uses IP and port binding in order to allow the OS to have multiple IPs. The binding/listening IP and port are specified by the core module\ncore:default:restHost:0.0.0.0restPort:12800restContextPath:/gRPCHost:0.0.0.0gRPCPort:11800There are two IP/port pairs for gRPC and HTTP REST services.\n Most agents and probes use gRPC service for better performance and code readability. Some agents use REST service because gRPC may not be supported in that language. The UI uses REST service, but the data is always in GraphQL format.  Note IP binding For users unfamiliar with IP binding, note that once IP binding is complete, the client could only use this IP to access the service. For example, if 172.09.13.28 is bound, even if you are in this machine, you must use 172.09.13.28, rather than 127.0.0.1 or localhost, to access the service.\nModule provider specified IP and port The IP and port in the core module are provided by default. But it is common for some module providers, such as receiver modules, to provide other IP and port settings.\n","excerpt":"IP and port setting The backend uses IP and port binding in order to allow the OS to have multiple …","ref":"/docs/main/v9.4.0/en/setup/backend/backend-ip-port/","title":"IP and port setting"},{"body":"IP and port setting The backend uses IP and port binding in order to allow the OS to have multiple IPs. The binding/listening IP and port are specified by the core module\ncore:default:restHost:0.0.0.0restPort:12800restContextPath:/gRPCHost:0.0.0.0gRPCPort:11800There are two IP/port pairs for gRPC and HTTP REST services.\n Most agents and probes use gRPC service for better performance and code readability. Some agents use REST service because gRPC may not be supported in that language. The UI uses REST service, but the data is always in GraphQL format.  Note IP binding For users unfamiliar with IP binding, note that once IP binding is complete, the client could only use this IP to access the service. For example, if 172.09.13.28 is bound, even if you are in this machine, you must use 172.09.13.28, rather than 127.0.0.1 or localhost, to access the service.\nModule provider specified IP and port The IP and port in the core module are provided by default. But it is common for some module providers, such as receiver modules, to provide other IP and port settings.\n","excerpt":"IP and port setting The backend uses IP and port binding in order to allow the OS to have multiple …","ref":"/docs/main/v9.5.0/en/setup/backend/backend-ip-port/","title":"IP and port setting"},{"body":"IP and port setting The backend uses IP and port binding in order to allow the OS to have multiple IPs. The binding/listening IP and port are specified by the core module\ncore:default:restHost:0.0.0.0restPort:12800restContextPath:/gRPCHost:0.0.0.0gRPCPort:11800There are two IP/port pairs for gRPC and HTTP REST services.\n Most agents and probes use gRPC service for better performance and code readability. Some agents use REST service because gRPC may not be supported in that language. The UI uses REST service, but the data is always in GraphQL format.  Note IP binding For users unfamiliar with IP binding, note that once IP binding is complete, the client could only use this IP to access the service. For example, if 172.09.13.28 is bound, even if you are in this machine, you must use 172.09.13.28, rather than 127.0.0.1 or localhost, to access the service.\nModule provider specified IP and port The IP and port in the core module are provided by default. But it is common for some module providers, such as receiver modules, to provide other IP and port settings.\n","excerpt":"IP and port setting The backend uses IP and port binding in order to allow the OS to have multiple …","ref":"/docs/main/v9.6.0/en/setup/backend/backend-ip-port/","title":"IP and port setting"},{"body":"IP and port setting The backend uses IP and port binding in order to allow the OS to have multiple IPs. The binding/listening IP and port are specified by the core module\ncore:default:restHost:0.0.0.0restPort:12800restContextPath:/gRPCHost:0.0.0.0gRPCPort:11800There are two IP/port pairs for gRPC and HTTP REST services.\n Most agents and probes use gRPC service for better performance and code readability. Some agents use REST service because gRPC may not be supported in that language. The UI uses REST service, but the data is always in GraphQL format.  Note IP binding For users unfamiliar with IP binding, note that once IP binding is complete, the client could only use this IP to access the service. For example, if 172.09.13.28 is bound, even if you are in this machine, you must use 172.09.13.28, rather than 127.0.0.1 or localhost, to access the service.\nModule provider specified IP and port The IP and port in the core module are provided by default. But it is common for some module providers, such as receiver modules, to provide other IP and port settings.\n","excerpt":"IP and port setting The backend uses IP and port binding in order to allow the OS to have multiple …","ref":"/docs/main/v9.7.0/en/setup/backend/backend-ip-port/","title":"IP and port setting"},{"body":"Java agent injector Manual To use the java agent more natively, we propose the java agent injector to inject the agent sidecar into a pod.\nWhen enabled in a pod\u0026rsquo;s namespace, the injector injects the java agent container at pod creation time using a mutating webhook admission controller. By rendering the java agent to a shared volume, containers within the pod can use the java agent.\nThe following sections describe how to configure the agent, if you want to try it directly, please see Usage for more details.\nInstall Injector The java agent injector is a component of the operator, so you need to follow Operator installation instrument to install the operator firstly.\nActive the java agent injection We have two granularities here: namespace and pod.\n   Resource Label Enabled value Disabled value     Namespace swck-injection enabled disabled   Pod swck-java-agent-injected \u0026ldquo;true\u0026rdquo; \u0026ldquo;false\u0026rdquo;    The injector is configured with the following logic:\n If either label is disabled, the pod is not injected. If two labels are enabled, the pod is injected.  Follow the next steps to active java agent injection.\n Label the namespace with swck-injection=enabled  $ kubectl label namespace default(your namespace) swck-injection=enabled  Add label swck-java-agent-injected: \u0026quot;true\u0026quot; to the pod, and get the result as below.  $ kubectl get pod -l swck-java-agent-injected=true NAME READY STATUS RESTARTS AGE inject-demo 1/1 Running 0 2d2h The ways to configure the agent The java agent injector supports a precedence order to configure the agent:\n Annotations \u0026gt; SwAgent \u0026gt; Configmap (Deprecated) \u0026gt; Default Configmap (Deprecated)\nAnnotations Annotations are described in kubernetes annotations doc.\nWe support annotations in agent annotations and sidecar annotations.\nSwAgent SwAgent is a Customer Resource defined by SWCK.\nWe support SwAgent in SwAgent usage guide\nConfigmap (Deprecated) Configmap is described in kubernetes configmap doc.\nWe need to use configmap to set agent.config so that we can modify the agent configuration without entering the container.\nIf there are different configmap in the namepsace, you can choose a configmap by setting sidecar annotations; If there is no configmap, the injector will create a default configmap.\nDefault configmap (Deprecated) The injector will create the default configmap to overlay the agent.config in the agent container.\nThe default configmap is shown as below, one is agent.service_name and the string can\u0026rsquo;t be empty; the other is collector.backend_service and it needs to be a legal IP address and port, the other fields need to be guaranteed by users themselves. Users can change it as their default configmap.\ndata: agent.config: | # The service name in UI agent.service_name=${SW_AGENT_NAME:Your_ApplicationName} # Backend service addresses. collector.backend_service=${SW_AGENT_COLLECTOR_BACKEND_SERVICES:127.0.0.1:11800} # Please refer to https://skywalking.apache.org/docs/skywalking-java/latest/en/setup/service-agent/java-agent/configurations/#table-of-agent-configuration-properties to get more details. To avoid the default configmap deleting by mistake, we use a configmap controller to watch the default configmap. In addition, if the user applies an invalid configuration, such as a malformed backend_service, the controller will use the default configmap.\nConfigure the agent The injector supports two methods to configure agent:\n Only use the default configuration. Use annotations to overlay the default configuration.  Use the default agent configuration After activating the java agent injection, if not set the annotations, the injector will use the default agent configuration directly as below.\ninitContainers: - args: - -c - mkdir -p /sky/agent \u0026amp;\u0026amp; cp -r /skywalking/agent/* /sky/agent command: - sh image: apache/skywalking-java-agent:8.16.0-java8 name: inject-skywalking-agent volumeMounts: - mountPath: /sky/agent name: sky-agent volumes: - emptyDir: {} name: sky-agent - configMap: name: skywalking-swck-java-agent-configmap name: java-agent-configmap-volume Use SwAgent to overlay default agent configuration The injector will read the SwAgent CR when pods creating.\nSwAgent CRD basic structure is like:\napiVersion:operator.skywalking.apache.org/v1alpha1kind:SwAgentmetadata:name:swagent-demonamespace:defaultspec:containerMatcher:\u0026#39;\u0026#39;selector:javaSidecar:name:swagent-demoimage:apache/skywalking-java-agent:8.16.0-java8env:- name:\u0026#34;SW_LOGGING_LEVEL\u0026#34;value:\u0026#34;DEBUG\u0026#34;- name:\u0026#34;SW_AGENT_COLLECTOR_BACKEND_SERVICES\u0026#34;value:\u0026#34;skywalking-system-oap:11800\u0026#34;sharedVolumeName:\u0026#34;sky-agent-demo\u0026#34;optionalPlugins:- \u0026#34;webflux\u0026#34;- \u0026#34;cloud-gateway-2.1.x\u0026#34;There are three kind of configs in SwAgent CR.\n1. label selector and container matcher label selector and container matcher decides which pod and container should be injected.\n   key path description default value     spec.selector label selector for pods which should be effected during injection. if no label selector was set, SwAgent CR config will affect every pod during injection. no default value   spec.containerMatcher container matcher is used to decide which container to be inject during injection. regular expression is supported. default value \u0026lsquo;.*\u0026rsquo; would match any container name. .*    2. injection configuration injection configuration will affect on agent injection behaviour\n   key path description default value     javaSidecar javaSidecar is the configs for init container, which holds agent sdk and take agent sdk to the target containers.    javaSidecar.name the name of the init container. inject-skywalking-agent   javaSidecar.image the image of the init container. apache/skywalking-java-agent:8.16.0-java8   SharedVolumeName SharedVolume is the name of an empty volume which shared by initContainer and target containers. sky-agent   OptionalPlugins Select the optional plugin which needs to be moved to the directory(/plugins). Such as trace,webflux,cloud-gateway-2.1.x. no default value   OptionalReporterPlugins Select the optional reporter plugin which needs to be moved to the directory(/plugins). such as kafka. no default value    3. skywalking agent configuration skywalking agent configuration is for agent SDK.\n   key path description default value     javaSidecar.env the env list to be appended to target containers. usually we can use it to setup agent configuration at container level. no default value.    Use annotations to overlay default agent configuration The injector can recognize five kinds of annotations to configure the agent as below.\n1. strategy configuration The strategy configuration is the annotation as below.\n   Annotation key Description Annotation Default value     strategy.skywalking.apache.org/inject.Container Select the injected container, if not set, inject all containers. not set    2. agent configuration The agent configuration is the annotation like agent.skywalking.apache.org/{option}: {value}, and the option support agent.xxx 、osinfo.xxx 、collector.xxx 、 logging.xxx 、statuscheck.xxx 、correlation.xxx 、jvm.xxx 、buffer.xxx 、 profile.xxx 、 meter.xxx 、 log.xxx in agent.config, such as agent.skywalking.apache.org/agent.namespace, agent.skywalking.apache.org/meter.max_meter_size, etc.\n3. plugins configuration The plugins configuration is the annotation like plugins.skywalking.apache.org/{option}: {value}, and the option only support plugin.xxx in the agent.config, such as plugins.skywalking.apache.org/plugin.mount, plugins.skywalking.apache.org/plugin.mongodb.trace_param, etc.\n4. optional plugin configuration The optional plugin configuration is the annotation as below.\n   Annotation key Description Annotation value     optional.skywalking.apache.org Select the optional plugin which needs to be moved to the directory(/plugins). Users can select several optional plugins by separating from |, such as trace|webflux|cloud-gateway-2.1.x. not set    5. optional reporter plugin configuration The optional reporter plugin configuration is the annotation as below.\n   Annotation key Description Annotation value     optional-reporter.skywalking.apache.org Select the optional reporter plugin which needs to be moved to the directory(/plugins). Users can select several optional reporter plugins by separating from |, such as kafka. not set    Configure sidecar The injector can recognize the following annotations to configure the sidecar:\n   Annotation key Description Annotation Default value     sidecar.skywalking.apache.org/initcontainer.Name The name of the injected java agent container. inject-skywalking-agent   sidecar.skywalking.apache.org/initcontainer.Image The container image of the injected java agent container. apache/skywalking-java-agent:8.16.0-java8   sidecar.skywalking.apache.org/initcontainer.Command The command of the injected java agent container. sh   sidecar.skywalking.apache.org/initcontainer.args.Option The args option of the injected java agent container. -c   sidecar.skywalking.apache.org/initcontainer.args.Command The args command of the injected java agent container. mkdir -p /sky/agent \u0026amp;\u0026amp; cp -r /skywalking/agent/* /sky/agent   sidecar.skywalking.apache.org/initcontainer.resources.limits The resources limits of the injected java agent container. You should use json type to define it such as {\u0026quot;memory\u0026quot;: \u0026quot;100Mi\u0026quot;,\u0026quot;cpu\u0026quot;: \u0026quot;100m\u0026quot;} nil   sidecar.skywalking.apache.org/initcontainer.resources.requests The resources requests of the injected java agent container. You should use json type to define it such as {\u0026quot;memory\u0026quot;: \u0026quot;100Mi\u0026quot;,\u0026quot;cpu\u0026quot;: \u0026quot;100m\u0026quot;} nil   sidecar.skywalking.apache.org/sidecarVolume.Name The name of sidecar Volume. sky-agent   sidecar.skywalking.apache.org/sidecarVolumeMount.MountPath Mount path of the agent directory in the injected container. /sky/agent   sidecar.skywalking.apache.org/env.Name Environment Name used by the injected container (application container). JAVA_TOOL_OPTIONS   sidecar.skywalking.apache.org/env.Value Environment variables used by the injected container (application container). -javaagent:/sky/agent/skywalking-agent.jar    The ways to get the final injected agent\u0026rsquo;s configuration Please see javaagent introduction for details.\n","excerpt":"Java agent injector Manual To use the java agent more natively, we propose the java agent injector …","ref":"/docs/skywalking-swck/latest/java-agent-injector/","title":"Java agent injector Manual"},{"body":"Java agent injector Manual To use the java agent more natively, we propose the java agent injector to inject the agent sidecar into a pod.\nWhen enabled in a pod\u0026rsquo;s namespace, the injector injects the java agent container at pod creation time using a mutating webhook admission controller. By rendering the java agent to a shared volume, containers within the pod can use the java agent.\nThe following sections describe how to configure the agent, if you want to try it directly, please see Usage for more details.\nInstall Injector The java agent injector is a component of the operator, so you need to follow Operator installation instrument to install the operator firstly.\nActive the java agent injection We have two granularities here: namespace and pod.\n   Resource Label Enabled value Disabled value     Namespace swck-injection enabled disabled   Pod swck-java-agent-injected \u0026ldquo;true\u0026rdquo; \u0026ldquo;false\u0026rdquo;    The injector is configured with the following logic:\n If either label is disabled, the pod is not injected. If two labels are enabled, the pod is injected.  Follow the next steps to active java agent injection.\n Label the namespace with swck-injection=enabled  $ kubectl label namespace default(your namespace) swck-injection=enabled  Add label swck-java-agent-injected: \u0026quot;true\u0026quot; to the pod, and get the result as below.  $ kubectl get pod -l swck-java-agent-injected=true NAME READY STATUS RESTARTS AGE inject-demo 1/1 Running 0 2d2h The ways to configure the agent The java agent injector supports a precedence order to configure the agent:\n Annotations \u0026gt; SwAgent \u0026gt; Configmap (Deprecated) \u0026gt; Default Configmap (Deprecated)\nAnnotations Annotations are described in kubernetes annotations doc.\nWe support annotations in agent annotations and sidecar annotations.\nSwAgent SwAgent is a Customer Resource defined by SWCK.\nWe support SwAgent in SwAgent usage guide\nConfigmap (Deprecated) Configmap is described in kubernetes configmap doc.\nWe need to use configmap to set agent.config so that we can modify the agent configuration without entering the container.\nIf there are different configmap in the namepsace, you can choose a configmap by setting sidecar annotations; If there is no configmap, the injector will create a default configmap.\nDefault configmap (Deprecated) The injector will create the default configmap to overlay the agent.config in the agent container.\nThe default configmap is shown as below, one is agent.service_name and the string can\u0026rsquo;t be empty; the other is collector.backend_service and it needs to be a legal IP address and port, the other fields need to be guaranteed by users themselves. Users can change it as their default configmap.\ndata: agent.config: | # The service name in UI agent.service_name=${SW_AGENT_NAME:Your_ApplicationName} # Backend service addresses. collector.backend_service=${SW_AGENT_COLLECTOR_BACKEND_SERVICES:127.0.0.1:11800} # Please refer to https://skywalking.apache.org/docs/skywalking-java/latest/en/setup/service-agent/java-agent/configurations/#table-of-agent-configuration-properties to get more details. To avoid the default configmap deleting by mistake, we use a configmap controller to watch the default configmap. In addition, if the user applies an invalid configuration, such as a malformed backend_service, the controller will use the default configmap.\nConfigure the agent The injector supports two methods to configure agent:\n Only use the default configuration. Use annotations to overlay the default configuration.  Use the default agent configuration After activating the java agent injection, if not set the annotations, the injector will use the default agent configuration directly as below.\ninitContainers: - args: - -c - mkdir -p /sky/agent \u0026amp;\u0026amp; cp -r /skywalking/agent/* /sky/agent command: - sh image: apache/skywalking-java-agent:8.16.0-java8 name: inject-skywalking-agent volumeMounts: - mountPath: /sky/agent name: sky-agent volumes: - emptyDir: {} name: sky-agent - configMap: name: skywalking-swck-java-agent-configmap name: java-agent-configmap-volume Use SwAgent to overlay default agent configuration The injector will read the SwAgent CR when pods creating.\nSwAgent CRD basic structure is like:\napiVersion:operator.skywalking.apache.org/v1alpha1kind:SwAgentmetadata:name:swagent-demonamespace:defaultspec:containerMatcher:\u0026#39;\u0026#39;selector:javaSidecar:name:swagent-demoimage:apache/skywalking-java-agent:8.16.0-java8env:- name:\u0026#34;SW_LOGGING_LEVEL\u0026#34;value:\u0026#34;DEBUG\u0026#34;- name:\u0026#34;SW_AGENT_COLLECTOR_BACKEND_SERVICES\u0026#34;value:\u0026#34;skywalking-system-oap:11800\u0026#34;sharedVolumeName:\u0026#34;sky-agent-demo\u0026#34;optionalPlugins:- \u0026#34;webflux\u0026#34;- \u0026#34;cloud-gateway-2.1.x\u0026#34;bootstrapPlugins:- \u0026#34;jdk-threading\u0026#34;There are three kind of configs in SwAgent CR.\n1. label selector and container matcher label selector and container matcher decides which pod and container should be injected.\n   key path description default value     spec.selector label selector for pods which should be effected during injection. if no label selector was set, SwAgent CR config will affect every pod during injection. no default value   spec.containerMatcher container matcher is used to decide which container to be inject during injection. regular expression is supported. default value \u0026lsquo;.*\u0026rsquo; would match any container name. .*    2. injection configuration injection configuration will affect on agent injection behaviour\n   key path description default value     javaSidecar javaSidecar is the configs for init container, which holds agent sdk and take agent sdk to the target containers.    javaSidecar.name the name of the init container. inject-skywalking-agent   javaSidecar.image the image of the init container. apache/skywalking-java-agent:8.16.0-java8   SharedVolumeName SharedVolume is the name of an empty volume which shared by initContainer and target containers. sky-agent   OptionalPlugins Select the optional plugin which needs to be moved to the directory(/plugins). Such as trace,webflux,cloud-gateway-2.1.x. no default value   OptionalReporterPlugins Select the optional reporter plugin which needs to be moved to the directory(/plugins). such as kafka. no default value   BootstrapPlugins Select the bootstrap plugin which needs to be moved to the directory(/plugins). such as jdk-threading. no default value    3. skywalking agent configuration skywalking agent configuration is for agent SDK.\n   key path description default value     javaSidecar.env the env list to be appended to target containers. usually we can use it to setup agent configuration at container level. no default value.    Use annotations to overlay default agent configuration The injector can recognize five kinds of annotations to configure the agent as below.\n1. strategy configuration The strategy configuration is the annotation as below.\n   Annotation key Description Annotation Default value     strategy.skywalking.apache.org/inject.Container Select the injected container, if not set, inject all containers. not set    2. agent configuration The agent configuration is the annotation like agent.skywalking.apache.org/{option}: {value}, and the option support agent.xxx 、osinfo.xxx 、collector.xxx 、 logging.xxx 、statuscheck.xxx 、correlation.xxx 、jvm.xxx 、buffer.xxx 、 profile.xxx 、 meter.xxx 、 log.xxx in agent.config, such as agent.skywalking.apache.org/agent.namespace, agent.skywalking.apache.org/meter.max_meter_size, etc.\n3. plugins configuration The plugins configuration is the annotation like plugins.skywalking.apache.org/{option}: {value}, and the option only support plugin.xxx in the agent.config, such as plugins.skywalking.apache.org/plugin.mount, plugins.skywalking.apache.org/plugin.mongodb.trace_param, etc.\n4. optional plugin configuration The optional plugin configuration is the annotation as below.\n   Annotation key Description Annotation value     optional.skywalking.apache.org Select the optional plugin which needs to be moved to the directory(/plugins). Users can select several optional plugins by separating from |, such as trace|webflux|cloud-gateway-2.1.x. not set    5. optional reporter plugin configuration The optional reporter plugin configuration is the annotation as below.\n   Annotation key Description Annotation value     optional-reporter.skywalking.apache.org Select the optional reporter plugin which needs to be moved to the directory(/plugins). Users can select several optional reporter plugins by separating from |, such as kafka. not set    6. bootstrap plugin configuration The bootstrap plugin configuration is the annotation as below.\n   Annotation key Description Annotation value     bootstrap.skywalking.apache.org Select the bootstrap plugin which needs to be moved to the directory(/plugins). Users can select several bootstrap plugins by separating from |, such as jdk-threading. not set    Configure sidecar The injector can recognize the following annotations to configure the sidecar:\n   Annotation key Description Annotation Default value     sidecar.skywalking.apache.org/initcontainer.Name The name of the injected java agent container. inject-skywalking-agent   sidecar.skywalking.apache.org/initcontainer.Image The container image of the injected java agent container. apache/skywalking-java-agent:8.16.0-java8   sidecar.skywalking.apache.org/initcontainer.Command The command of the injected java agent container. sh   sidecar.skywalking.apache.org/initcontainer.args.Option The args option of the injected java agent container. -c   sidecar.skywalking.apache.org/initcontainer.args.Command The args command of the injected java agent container. mkdir -p /sky/agent \u0026amp;\u0026amp; cp -r /skywalking/agent/* /sky/agent   sidecar.skywalking.apache.org/initcontainer.resources.limits The resources limits of the injected java agent container. You should use json type to define it such as {\u0026quot;memory\u0026quot;: \u0026quot;100Mi\u0026quot;,\u0026quot;cpu\u0026quot;: \u0026quot;100m\u0026quot;} nil   sidecar.skywalking.apache.org/initcontainer.resources.requests The resources requests of the injected java agent container. You should use json type to define it such as {\u0026quot;memory\u0026quot;: \u0026quot;100Mi\u0026quot;,\u0026quot;cpu\u0026quot;: \u0026quot;100m\u0026quot;} nil   sidecar.skywalking.apache.org/sidecarVolume.Name The name of sidecar Volume. sky-agent   sidecar.skywalking.apache.org/sidecarVolumeMount.MountPath Mount path of the agent directory in the injected container. /sky/agent   sidecar.skywalking.apache.org/env.Name Environment Name used by the injected container (application container). JAVA_TOOL_OPTIONS   sidecar.skywalking.apache.org/env.Value Environment variables used by the injected container (application container). -javaagent:/sky/agent/skywalking-agent.jar    The ways to get the final injected agent\u0026rsquo;s configuration Please see javaagent introduction for details.\n","excerpt":"Java agent injector Manual To use the java agent more natively, we propose the java agent injector …","ref":"/docs/skywalking-swck/next/java-agent-injector/","title":"Java agent injector Manual"},{"body":"Java agent injector Manual To use the java agent more natively, we propose the java agent injector to inject the agent sidecar into a pod.\nWhen enabled in a pod\u0026rsquo;s namespace, the injector injects the java agent container at pod creation time using a mutating webhook admission controller. By rendering the java agent to a shared volume, containers within the pod can use the java agent.\nThe following sections describe how to configure the agent, if you want to try it directly, please see Usage for more details.\nInstall Injector The java agent injector is a component of the operator, so you need to follow Operator installation instrument to install the operator firstly.\nActive the java agent injection We have two granularities here: namespace and pod.\n   Resource Label Enabled value Disabled value     Namespace swck-injection enabled disabled   Pod swck-java-agent-injected \u0026ldquo;true\u0026rdquo; \u0026ldquo;false\u0026rdquo;    The injector is configured with the following logic:\n If either label is disabled, the pod is not injected. If two labels are enabled, the pod is injected.  Follow the next steps to active java agent injection.\n Label the namespace with swck-injection=enabled  $ kubectl label namespace default(your namespace) swck-injection=enabled  Add label swck-java-agent-injected: \u0026quot;true\u0026quot; to the pod, and get the result as below.  $ kubectl get pod -l swck-java-agent-injected=true NAME READY STATUS RESTARTS AGE inject-demo 1/1 Running 0 2d2h The ways to configure the agent The java agent injector supports a precedence order to configure the agent:\n Annotations \u0026gt; SwAgent \u0026gt; Configmap (Deprecated) \u0026gt; Default Configmap (Deprecated)\nAnnotations Annotations are described in kubernetes annotations doc.\nWe support annotations in agent annotations and sidecar annotations.\nSwAgent SwAgent is a Customer Resource defined by SWCK.\nWe support SwAgent in SwAgent usage guide\nConfigmap (Deprecated) Configmap is described in kubernetes configmap doc.\nWe need to use configmap to set agent.config so that we can modify the agent configuration without entering the container.\nIf there are different configmap in the namepsace, you can choose a configmap by setting sidecar annotations; If there is no configmap, the injector will create a default configmap.\nDefault configmap (Deprecated) The injector will create the default configmap to overlay the agent.config in the agent container.\nThe default configmap is shown as below, one is agent.service_name and the string can\u0026rsquo;t be empty; the other is collector.backend_service and it needs to be a legal IP address and port, the other fields need to be guaranteed by users themselves. Users can change it as their default configmap.\ndata: agent.config: | # The service name in UI agent.service_name=${SW_AGENT_NAME:Your_ApplicationName} # Backend service addresses. collector.backend_service=${SW_AGENT_COLLECTOR_BACKEND_SERVICES:127.0.0.1:11800} # Please refer to https://skywalking.apache.org/docs/skywalking-java/latest/en/setup/service-agent/java-agent/configurations/#table-of-agent-configuration-properties to get more details. To avoid the default configmap deleting by mistake, we use a configmap controller to watch the default configmap. In addition, if the user applies an invalid configuration, such as a malformed backend_service, the controller will use the default configmap.\nConfigure the agent The injector supports two methods to configure agent:\n Only use the default configuration. Use annotations to overlay the default configuration.  Use the default agent configuration After activating the java agent injection, if not set the annotations, the injector will use the default agent configuration directly as below.\ninitContainers: - args: - -c - mkdir -p /sky/agent \u0026amp;\u0026amp; cp -r /skywalking/agent/* /sky/agent command: - sh image: apache/skywalking-java-agent:8.16.0-java8 name: inject-skywalking-agent volumeMounts: - mountPath: /sky/agent name: sky-agent volumes: - emptyDir: {} name: sky-agent - configMap: name: skywalking-swck-java-agent-configmap name: java-agent-configmap-volume Use SwAgent to overlay default agent configuration The injector will read the SwAgent CR when pods creating.\nSwAgent CRD basic structure is like:\napiVersion:operator.skywalking.apache.org/v1alpha1kind:SwAgentmetadata:name:swagent-demonamespace:defaultspec:containerMatcher:\u0026#39;\u0026#39;selector:javaSidecar:name:swagent-demoimage:apache/skywalking-java-agent:8.16.0-java8env:- name:\u0026#34;SW_LOGGING_LEVEL\u0026#34;value:\u0026#34;DEBUG\u0026#34;- name:\u0026#34;SW_AGENT_COLLECTOR_BACKEND_SERVICES\u0026#34;value:\u0026#34;skywalking-system-oap:11800\u0026#34;sharedVolumeName:\u0026#34;sky-agent-demo\u0026#34;optionalPlugins:- \u0026#34;webflux\u0026#34;- \u0026#34;cloud-gateway-2.1.x\u0026#34;bootstrapPlugins:- \u0026#34;jdk-threading\u0026#34;There are three kind of configs in SwAgent CR.\n1. label selector and container matcher label selector and container matcher decides which pod and container should be injected.\n   key path description default value     spec.selector label selector for pods which should be effected during injection. if no label selector was set, SwAgent CR config will affect every pod during injection. no default value   spec.containerMatcher container matcher is used to decide which container to be inject during injection. regular expression is supported. default value \u0026lsquo;.*\u0026rsquo; would match any container name. .*    2. injection configuration injection configuration will affect on agent injection behaviour\n   key path description default value     javaSidecar javaSidecar is the configs for init container, which holds agent sdk and take agent sdk to the target containers.    javaSidecar.name the name of the init container. inject-skywalking-agent   javaSidecar.image the image of the init container. apache/skywalking-java-agent:8.16.0-java8   SharedVolumeName SharedVolume is the name of an empty volume which shared by initContainer and target containers. sky-agent   OptionalPlugins Select the optional plugin which needs to be moved to the directory(/plugins). Such as trace,webflux,cloud-gateway-2.1.x. no default value   OptionalReporterPlugins Select the optional reporter plugin which needs to be moved to the directory(/plugins). such as kafka. no default value   BootstrapPlugins Select the bootstrap plugin which needs to be moved to the directory(/plugins). such as jdk-threading. no default value    3. skywalking agent configuration skywalking agent configuration is for agent SDK.\n   key path description default value     javaSidecar.env the env list to be appended to target containers. usually we can use it to setup agent configuration at container level. no default value.    Use annotations to overlay default agent configuration The injector can recognize five kinds of annotations to configure the agent as below.\n1. strategy configuration The strategy configuration is the annotation as below.\n   Annotation key Description Annotation Default value     strategy.skywalking.apache.org/inject.Container Select the injected container, if not set, inject all containers. not set    2. agent configuration The agent configuration is the annotation like agent.skywalking.apache.org/{option}: {value}, and the option support agent.xxx 、osinfo.xxx 、collector.xxx 、 logging.xxx 、statuscheck.xxx 、correlation.xxx 、jvm.xxx 、buffer.xxx 、 profile.xxx 、 meter.xxx 、 log.xxx in agent.config, such as agent.skywalking.apache.org/agent.namespace, agent.skywalking.apache.org/meter.max_meter_size, etc.\n3. plugins configuration The plugins configuration is the annotation like plugins.skywalking.apache.org/{option}: {value}, and the option only support plugin.xxx in the agent.config, such as plugins.skywalking.apache.org/plugin.mount, plugins.skywalking.apache.org/plugin.mongodb.trace_param, etc.\n4. optional plugin configuration The optional plugin configuration is the annotation as below.\n   Annotation key Description Annotation value     optional.skywalking.apache.org Select the optional plugin which needs to be moved to the directory(/plugins). Users can select several optional plugins by separating from |, such as trace|webflux|cloud-gateway-2.1.x. not set    5. optional reporter plugin configuration The optional reporter plugin configuration is the annotation as below.\n   Annotation key Description Annotation value     optional-reporter.skywalking.apache.org Select the optional reporter plugin which needs to be moved to the directory(/plugins). Users can select several optional reporter plugins by separating from |, such as kafka. not set    6. bootstrap plugin configuration The bootstrap plugin configuration is the annotation as below.\n   Annotation key Description Annotation value     bootstrap.skywalking.apache.org Select the bootstrap plugin which needs to be moved to the directory(/plugins). Users can select several bootstrap plugins by separating from |, such as jdk-threading. not set    Configure sidecar The injector can recognize the following annotations to configure the sidecar:\n   Annotation key Description Annotation Default value     sidecar.skywalking.apache.org/initcontainer.Name The name of the injected java agent container. inject-skywalking-agent   sidecar.skywalking.apache.org/initcontainer.Image The container image of the injected java agent container. apache/skywalking-java-agent:8.16.0-java8   sidecar.skywalking.apache.org/initcontainer.Command The command of the injected java agent container. sh   sidecar.skywalking.apache.org/initcontainer.args.Option The args option of the injected java agent container. -c   sidecar.skywalking.apache.org/initcontainer.args.Command The args command of the injected java agent container. mkdir -p /sky/agent \u0026amp;\u0026amp; cp -r /skywalking/agent/* /sky/agent   sidecar.skywalking.apache.org/initcontainer.resources.limits The resources limits of the injected java agent container. You should use json type to define it such as {\u0026quot;memory\u0026quot;: \u0026quot;100Mi\u0026quot;,\u0026quot;cpu\u0026quot;: \u0026quot;100m\u0026quot;} nil   sidecar.skywalking.apache.org/initcontainer.resources.requests The resources requests of the injected java agent container. You should use json type to define it such as {\u0026quot;memory\u0026quot;: \u0026quot;100Mi\u0026quot;,\u0026quot;cpu\u0026quot;: \u0026quot;100m\u0026quot;} nil   sidecar.skywalking.apache.org/sidecarVolume.Name The name of sidecar Volume. sky-agent   sidecar.skywalking.apache.org/sidecarVolumeMount.MountPath Mount path of the agent directory in the injected container. /sky/agent   sidecar.skywalking.apache.org/env.Name Environment Name used by the injected container (application container). JAVA_TOOL_OPTIONS   sidecar.skywalking.apache.org/env.Value Environment variables used by the injected container (application container). -javaagent:/sky/agent/skywalking-agent.jar    The ways to get the final injected agent\u0026rsquo;s configuration Please see javaagent introduction for details.\n","excerpt":"Java agent injector Manual To use the java agent more natively, we propose the java agent injector …","ref":"/docs/skywalking-swck/v0.9.0/java-agent-injector/","title":"Java agent injector Manual"},{"body":"Java agent injector Usage In this example, you will learn how to use the java agent injector.\nInstall injector The java agent injector is a component of the operator, so you need to follow Operator installation instrument to install the operator firstly.\nDeployment Example Let\u0026rsquo;s take a demo deployment for example.\n# demo1.yamlapiVersion:apps/v1kind:Deploymentmetadata:name:demo1namespace:defaultspec:selector:matchLabels:app:demo1template:metadata:labels:app:demo1spec:containers:- name:demo1image:ghcr.io/apache/skywalking-swck-spring-demo:v0.0.1command:[\u0026#34;java\u0026#34;]args:[\u0026#34;-jar\u0026#34;,\u0026#34;/app.jar\u0026#34;]ports:- containerPort:8085readinessProbe:httpGet:path:/helloport:8085initialDelaySeconds:3periodSeconds:3failureThreshold:10Enable Injection for Namespace and Deployments/StatefulSets. Firstly, set the injection label in your namespace as below.\nkubectl label namespace default(your namespace) swck-injection=enabled Secondly, set the injection label for your target Deployment/StatefulSet.\nkubectl -n default patch deployment demo1 --patch \u0026#39;{ \u0026#34;spec\u0026#34;: { \u0026#34;template\u0026#34;: { \u0026#34;metadata\u0026#34;: { \u0026#34;labels\u0026#34;: { \u0026#34;swck-java-agent-injected\u0026#34;: \u0026#34;true\u0026#34; } } } } }\u0026#39; Then the pods create by the Deployments/StatefulSets would be recreated with agent injected.\nThe injected pods would be like this:\nspec:containers:- args:- -jar- /app.jarcommand:- javaenv:- name:JAVA_TOOL_OPTIONSvalue:-javaagent:/sky/agent/skywalking-agent.jarimage:ghcr.io/apache/skywalking-swck-spring-demo:v0.0.1name:demo1- mountPath:/sky/agentname:sky-agentinitContainers:- args:- -c- mkdir -p /sky/agent \u0026amp;\u0026amp; cp -r /skywalking/agent/* /sky/agentcommand:- shimage:apache/skywalking-java-agent:8.10.0-java8name:inject-skywalking-agentvolumeMounts:- mountPath:/sky/agentname:sky-agentvolumes:- emptyDir:{}name:sky-agentThen you can get the final agent configuration and the pod as below.\n$ kubectl get javaagent NAME PODSELECTOR SERVICENAME BACKENDSERVICE app-demo1-javaagent app=demo1 demo1 127.0.0.1:11800 $ kubectl get pod -l app=demo1(the podSelector) NAME READY STATUS RESTARTS AGE demo1-5fbb6fcd98-cq5ws 1/1 Running 0 54s Get the javaagent\u0026rsquo;s yaml for more datails.\n$ kubectl get javaagent app-demo1-javaagent -o yaml apiVersion: operator.skywalking.apache.org/v1alpha1 kind: JavaAgent metadata: creationTimestamp: \u0026#34;2022-08-16T12:09:34Z\u0026#34; generation: 1 name: app-demo1-javaagent namespace: default ownerReferences: - apiVersion: apps/v1 blockOwnerDeletion: true controller: true kind: ReplicaSet name: demo1-7fdffc7b95 uid: 417c413f-0cc0-41f9-b6eb-0192eb8c8622 resourceVersion: \u0026#34;25067\u0026#34; uid: 1cdab012-784c-4efb-b5d2-c032eb2fb22a spec: backendService: 127.0.0.1:11800 podSelector: app=demo1 serviceName: Your_ApplicationName status: creationTime: \u0026#34;2022-08-16T12:09:34Z\u0026#34; expectedInjectiedNum: 1 lastUpdateTime: \u0026#34;2022-08-16T12:10:04Z\u0026#34; realInjectedNum: 1 Use SwAgent CR to setup override default configuration Suppose that injection label had been set for Namespace and Deployments/StatefulSets as previous said.\nApply SwAgent CR with correct label selector and container matcher:\n# SwAgent.yamlapiVersion:operator.skywalking.apache.org/v1alpha1kind:SwAgentmetadata:name:swagent-demonamespace:defaultspec:containerMatcher:\u0026#39;\u0026#39;selector:javaSidecar:name:swagent-demoimage:apache/skywalking-java-agent:8.16.0-java8env:- name:\u0026#34;SW_LOGGING_LEVEL\u0026#34;value:\u0026#34;DEBUG\u0026#34;- name:\u0026#34;SW_AGENT_COLLECTOR_BACKEND_SERVICES\u0026#34;value:\u0026#34;skywalking-system-oap:11800\u0026#34;sharedVolumeName:\u0026#34;sky-agent-demo\u0026#34;optionalPlugins:- \u0026#34;webflux\u0026#34;- \u0026#34;cloud-gateway-2.1.x\u0026#34;kubectl -n default apply swagent.yaml You can also get SwAgent CR by:\nkubectl -n default get SwAgent NAME AGE swagent-demo 38s Now the pod is still the old one, because pod could not load the SwAgent config automatically.\nSo you need to recreate pod to load SwAgent config. For the pods created by Deployment/StatefulSet, you can just simply delete the old pod.\n# verify pods to be delete  kubectl -n default get pods -l app=demo1 # delete pods kubectl -n default delete pods -l app=demo1 After the pods recreated, we can get injected pod as below.\nkubectl -n default get pods -l app=demo1 spec:containers:- args:- -jar- /app.jarcommand:- javaenv:- name:JAVA_TOOL_OPTIONSvalue:-javaagent:/sky/agent/skywalking-agent.jar=agent.service_name=demo1,collector.backend_service=skywalking-system-oap.skywalking-system:11800- name:SW_LOGGING_LEVELvalue:DEBUG- name:SW_AGENT_COLLECTOR_BACKEND_SERVICESvalue:skywalking-system-oap.default.svc:11800image:ghcr.io/apache/skywalking-swck-spring-demo:v0.0.1name:demo1- mountPath:/sky/agentname:sky-agent-demoinitContainers:- args:- -c- mkdir -p /sky/agent \u0026amp;\u0026amp; cp -r /skywalking/agent/* /sky/agent \u0026amp;\u0026amp; cd /sky/agent/optional-plugins/\u0026amp;\u0026amp;ls | grep -E \u0026#34;webflux|cloud-gateway-2.1.x\u0026#34; | xargs -i cp {} /sky/agent/plugins/command:- shimage:apache/skywalking-java-agent:8.16.0-java8name:swagent-demovolumeMounts:- mountPath:/sky/agentname:sky-agent-demovolumes:- emptyDir:{}name:sky-agent-demoUse annotation to override sidecar configuration Suppose that injection label had been set for Namespace and Deployments/StatefulSets as previous said.\nThen add agent configuration and sidecar configuration to annotations as below.\n# demo1_anno.yamlapiVersion:apps/v1kind:Deploymentmetadata:name:demo1namespace:defaultspec:selector:matchLabels:app:demo1template:metadata:annotations:strategy.skywalking.apache.org/inject.Container:\u0026#34;demo1\u0026#34;agent.skywalking.apache.org/agent.service_name:\u0026#34;app\u0026#34;agent.skywalking.apache.org/agent.sample_n_per_3_secs:\u0026#34;6\u0026#34;agent.skywalking.apache.org/agent.class_cache_mode:\u0026#34;MEMORY\u0026#34;agent.skywalking.apache.org/agent.ignore_suffix:\u0026#34;\u0026#39;jpg,.jpeg\u0026#39;\u0026#34;plugins.skywalking.apache.org/plugin.mount:\u0026#34;\u0026#39;plugins,activations\u0026#39;\u0026#34;plugins.skywalking.apache.org/plugin.mongodb.trace_param:\u0026#34;true\u0026#34;plugins.skywalking.apache.org/plugin.influxdb.trace_influxql:\u0026#34;false\u0026#34;optional.skywalking.apache.org:\u0026#34;trace|webflux|cloud-gateway-2.1.x\u0026#34;optional-reporter.skywalking.apache.org:\u0026#34;kafka\u0026#34;labels:swck-java-agent-injected:\u0026#34;true\u0026#34;app:demo1spec:containers:- name:demo1image:ghcr.io/apache/skywalking-swck-spring-demo:v0.0.1command:[\u0026#34;java\u0026#34;]args:[\u0026#34;-jar\u0026#34;,\u0026#34;/app.jar\u0026#34;]ports:- containerPort:8085readinessProbe:httpGet:path:/helloport:8085initialDelaySeconds:3periodSeconds:3failureThreshold:10Then we can get injected pod as below:\nkubectl -n default get pods -l app=demo1 spec:containers:- image:nginx:1.16.1imagePullPolicy:IfNotPresentname:nginx- args:- -jar- /app.jarcommand:- javaenv:- name:JAVA_TOOL_OPTIONSvalue:-javaagent:/sky/agent/skywalking-agent.jar=agent.ignore_suffix=\u0026#39;jpg,.jpeg\u0026#39;,agent.service_name=app,agent.class_cache_mode=MEMORY,agent.sample_n_per_3_secs=6,plugin.mongodb.trace_param=true,plugin.influxdb.trace_influxql=false,plugin.mount=\u0026#39;plugins,activations\u0026#39;image:ghcr.io/apache/skywalking-swck-spring-demo:v0.0.1name:demo1ports:- containerPort:8085protocol:TCPreadinessProbe:failureThreshold:10httpGet:path:/helloport:8085scheme:HTTPinitialDelaySeconds:3periodSeconds:3successThreshold:1timeoutSeconds:1volumeMounts:- mountPath:/sky/agentname:sky-agentinitContainers:- args:- -c- mkdir -p /sky/agent \u0026amp;\u0026amp; cp -r /skywalking/agent/* /sky/agent \u0026amp;\u0026amp; cd /sky/agent/optional-plugins/\u0026amp;\u0026amp;ls | grep -E \u0026#34;trace|webflux|cloud-gateway-2.1.x\u0026#34; | xargs -i cp {} /sky/agent/plugins/\u0026amp;\u0026amp;cd /sky/agent/optional-reporter-plugins/ \u0026amp;\u0026amp; ls | grep -E \u0026#34;kafka\u0026#34; | xargs-i cp {} /sky/agent/plugins/command:- shimage:apache/skywalking-java-agent:8.16.0-java8name:inject-skywalking-agentvolumeMounts:- mountPath:/sky/agentname:sky-agentvolumes:- emptyDir:{}name:sky-agentThen you can get the final agent configuration and the pod as below.\n$ kubectl get javaagent NAME PODSELECTOR SERVICENAME BACKENDSERVICE app-demo1-javaagent app=demo1 app 127.0.0.1:11800 $ kubectl get pod -l app=demo1(the podSelector) NAME READY STATUS RESTARTS AGE demo1-d48b96467-p7zrv 1/1 Running 0 5m25s Get the javaagent\u0026rsquo;s yaml for more datails.\n$ kubectl get javaagent app-demo1-javaagent -o yaml apiVersion: operator.skywalking.apache.org/v1alpha1 kind: JavaAgent metadata: creationTimestamp: \u0026#34;2022-08-16T12:18:53Z\u0026#34; generation: 1 name: app-demo1-javaagent namespace: default ownerReferences: - apiVersion: apps/v1 blockOwnerDeletion: true controller: true kind: ReplicaSet name: demo1-d48b96467 uid: 2b7f1ac4-b459-41cd-8568-ecd4578ca457 resourceVersion: \u0026#34;26187\u0026#34; uid: c2b2f3e2-9442-4465-9423-d24249b2c53b spec: agentConfiguration: agent.class_cache_mode: MEMORY agent.ignore_suffix: \u0026#39;\u0026#39;\u0026#39;jpg,.jpeg\u0026#39;\u0026#39;\u0026#39; agent.sample_n_per_3_secs: \u0026#34;6\u0026#34; agent.service_name: app optional-plugin: trace|webflux|cloud-gateway-2.1.x optional-reporter-plugin: kafka plugin.influxdb.trace_influxql: \u0026#34;false\u0026#34; plugin.mongodb.trace_param: \u0026#34;true\u0026#34; plugin.mount: \u0026#39;\u0026#39;\u0026#39;plugins,activations\u0026#39;\u0026#39;\u0026#39; backendService: 127.0.0.1:11800 podSelector: app=demo1 serviceName: app status: creationTime: \u0026#34;2022-08-16T12:18:53Z\u0026#34; expectedInjectiedNum: 1 lastUpdateTime: \u0026#34;2022-08-16T12:19:18Z\u0026#34; realInjectedNum: 1 ","excerpt":"Java agent injector Usage In this example, you will learn how to use the java agent injector. …","ref":"/docs/skywalking-swck/latest/examples/java-agent-injector-usage/","title":"Java agent injector Usage"},{"body":"Java agent injector Usage In this example, you will learn how to use the java agent injector.\nInstall injector The java agent injector is a component of the operator, so you need to follow Operator installation instrument to install the operator firstly.\nDeployment Example Let\u0026rsquo;s take a demo deployment for example.\n# demo1.yamlapiVersion:apps/v1kind:Deploymentmetadata:name:demo1namespace:defaultspec:selector:matchLabels:app:demo1template:metadata:labels:app:demo1spec:containers:- name:demo1image:ghcr.io/apache/skywalking-swck-spring-demo:v0.0.1command:[\u0026#34;java\u0026#34;]args:[\u0026#34;-jar\u0026#34;,\u0026#34;/app.jar\u0026#34;]ports:- containerPort:8085readinessProbe:httpGet:path:/helloport:8085initialDelaySeconds:3periodSeconds:3failureThreshold:10Enable Injection for Namespace and Deployments/StatefulSets. Firstly, set the injection label in your namespace as below.\nkubectl label namespace default(your namespace) swck-injection=enabled Secondly, set the injection label for your target Deployment/StatefulSet.\nkubectl -n default patch deployment demo1 --patch \u0026#39;{ \u0026#34;spec\u0026#34;: { \u0026#34;template\u0026#34;: { \u0026#34;metadata\u0026#34;: { \u0026#34;labels\u0026#34;: { \u0026#34;swck-java-agent-injected\u0026#34;: \u0026#34;true\u0026#34; } } } } }\u0026#39; Then the pods create by the Deployments/StatefulSets would be recreated with agent injected.\nThe injected pods would be like this:\nspec:containers:- args:- -jar- /app.jarcommand:- javaenv:- name:JAVA_TOOL_OPTIONSvalue:-javaagent:/sky/agent/skywalking-agent.jarimage:ghcr.io/apache/skywalking-swck-spring-demo:v0.0.1name:demo1- mountPath:/sky/agentname:sky-agentinitContainers:- args:- -c- mkdir -p /sky/agent \u0026amp;\u0026amp; cp -r /skywalking/agent/* /sky/agentcommand:- shimage:apache/skywalking-java-agent:8.10.0-java8name:inject-skywalking-agentvolumeMounts:- mountPath:/sky/agentname:sky-agentvolumes:- emptyDir:{}name:sky-agentThen you can get the final agent configuration and the pod as below.\n$ kubectl get javaagent NAME PODSELECTOR SERVICENAME BACKENDSERVICE app-demo1-javaagent app=demo1 demo1 127.0.0.1:11800 $ kubectl get pod -l app=demo1(the podSelector) NAME READY STATUS RESTARTS AGE demo1-5fbb6fcd98-cq5ws 1/1 Running 0 54s Get the javaagent\u0026rsquo;s yaml for more datails.\n$ kubectl get javaagent app-demo1-javaagent -o yaml apiVersion: operator.skywalking.apache.org/v1alpha1 kind: JavaAgent metadata: creationTimestamp: \u0026#34;2022-08-16T12:09:34Z\u0026#34; generation: 1 name: app-demo1-javaagent namespace: default ownerReferences: - apiVersion: apps/v1 blockOwnerDeletion: true controller: true kind: ReplicaSet name: demo1-7fdffc7b95 uid: 417c413f-0cc0-41f9-b6eb-0192eb8c8622 resourceVersion: \u0026#34;25067\u0026#34; uid: 1cdab012-784c-4efb-b5d2-c032eb2fb22a spec: backendService: 127.0.0.1:11800 podSelector: app=demo1 serviceName: Your_ApplicationName status: creationTime: \u0026#34;2022-08-16T12:09:34Z\u0026#34; expectedInjectiedNum: 1 lastUpdateTime: \u0026#34;2022-08-16T12:10:04Z\u0026#34; realInjectedNum: 1 Use SwAgent CR to setup override default configuration Suppose that injection label had been set for Namespace and Deployments/StatefulSets as previous said.\nApply SwAgent CR with correct label selector and container matcher:\n# SwAgent.yamlapiVersion:operator.skywalking.apache.org/v1alpha1kind:SwAgentmetadata:name:swagent-demonamespace:defaultspec:containerMatcher:\u0026#39;\u0026#39;selector:javaSidecar:name:swagent-demoimage:apache/skywalking-java-agent:8.16.0-java8env:- name:\u0026#34;SW_LOGGING_LEVEL\u0026#34;value:\u0026#34;DEBUG\u0026#34;- name:\u0026#34;SW_AGENT_COLLECTOR_BACKEND_SERVICES\u0026#34;value:\u0026#34;skywalking-system-oap:11800\u0026#34;sharedVolumeName:\u0026#34;sky-agent-demo\u0026#34;optionalPlugins:- \u0026#34;webflux\u0026#34;- \u0026#34;cloud-gateway-2.1.x\u0026#34;kubectl -n default apply swagent.yaml You can also get SwAgent CR by:\nkubectl -n default get SwAgent NAME AGE swagent-demo 38s Now the pod is still the old one, because pod could not load the SwAgent config automatically.\nSo you need to recreate pod to load SwAgent config. For the pods created by Deployment/StatefulSet, you can just simply delete the old pod.\n# verify pods to be delete  kubectl -n default get pods -l app=demo1 # delete pods kubectl -n default delete pods -l app=demo1 After the pods recreated, we can get injected pod as below.\nkubectl -n default get pods -l app=demo1 spec:containers:- args:- -jar- /app.jarcommand:- javaenv:- name:JAVA_TOOL_OPTIONSvalue:-javaagent:/sky/agent/skywalking-agent.jar=agent.service_name=demo1,collector.backend_service=skywalking-system-oap.skywalking-system:11800- name:SW_LOGGING_LEVELvalue:DEBUG- name:SW_AGENT_COLLECTOR_BACKEND_SERVICESvalue:skywalking-system-oap.default.svc:11800image:ghcr.io/apache/skywalking-swck-spring-demo:v0.0.1name:demo1- mountPath:/sky/agentname:sky-agent-demoinitContainers:- args:- -c- mkdir -p /sky/agent \u0026amp;\u0026amp; cp -r /skywalking/agent/* /sky/agent \u0026amp;\u0026amp; cd /sky/agent/optional-plugins/\u0026amp;\u0026amp;ls | grep -E \u0026#34;webflux|cloud-gateway-2.1.x\u0026#34; | xargs -i cp {} /sky/agent/plugins/command:- shimage:apache/skywalking-java-agent:8.16.0-java8name:swagent-demovolumeMounts:- mountPath:/sky/agentname:sky-agent-demovolumes:- emptyDir:{}name:sky-agent-demoUse annotation to override sidecar configuration Suppose that injection label had been set for Namespace and Deployments/StatefulSets as previous said.\nThen add agent configuration and sidecar configuration to annotations as below.\n# demo1_anno.yamlapiVersion:apps/v1kind:Deploymentmetadata:name:demo1namespace:defaultspec:selector:matchLabels:app:demo1template:metadata:annotations:strategy.skywalking.apache.org/inject.Container:\u0026#34;demo1\u0026#34;agent.skywalking.apache.org/agent.service_name:\u0026#34;app\u0026#34;agent.skywalking.apache.org/agent.sample_n_per_3_secs:\u0026#34;6\u0026#34;agent.skywalking.apache.org/agent.class_cache_mode:\u0026#34;MEMORY\u0026#34;agent.skywalking.apache.org/agent.ignore_suffix:\u0026#34;\u0026#39;jpg,.jpeg\u0026#39;\u0026#34;plugins.skywalking.apache.org/plugin.mount:\u0026#34;\u0026#39;plugins,activations\u0026#39;\u0026#34;plugins.skywalking.apache.org/plugin.mongodb.trace_param:\u0026#34;true\u0026#34;plugins.skywalking.apache.org/plugin.influxdb.trace_influxql:\u0026#34;false\u0026#34;optional.skywalking.apache.org:\u0026#34;trace|webflux|cloud-gateway-2.1.x\u0026#34;optional-reporter.skywalking.apache.org:\u0026#34;kafka\u0026#34;labels:swck-java-agent-injected:\u0026#34;true\u0026#34;app:demo1spec:containers:- name:demo1image:ghcr.io/apache/skywalking-swck-spring-demo:v0.0.1command:[\u0026#34;java\u0026#34;]args:[\u0026#34;-jar\u0026#34;,\u0026#34;/app.jar\u0026#34;]ports:- containerPort:8085readinessProbe:httpGet:path:/helloport:8085initialDelaySeconds:3periodSeconds:3failureThreshold:10Then we can get injected pod as below:\nkubectl -n default get pods -l app=demo1 spec:containers:- image:nginx:1.16.1imagePullPolicy:IfNotPresentname:nginx- args:- -jar- /app.jarcommand:- javaenv:- name:JAVA_TOOL_OPTIONSvalue:-javaagent:/sky/agent/skywalking-agent.jar=agent.ignore_suffix=\u0026#39;jpg,.jpeg\u0026#39;,agent.service_name=app,agent.class_cache_mode=MEMORY,agent.sample_n_per_3_secs=6,plugin.mongodb.trace_param=true,plugin.influxdb.trace_influxql=false,plugin.mount=\u0026#39;plugins,activations\u0026#39;image:ghcr.io/apache/skywalking-swck-spring-demo:v0.0.1name:demo1ports:- containerPort:8085protocol:TCPreadinessProbe:failureThreshold:10httpGet:path:/helloport:8085scheme:HTTPinitialDelaySeconds:3periodSeconds:3successThreshold:1timeoutSeconds:1volumeMounts:- mountPath:/sky/agentname:sky-agentinitContainers:- args:- -c- mkdir -p /sky/agent \u0026amp;\u0026amp; cp -r /skywalking/agent/* /sky/agent \u0026amp;\u0026amp; cd /sky/agent/optional-plugins/\u0026amp;\u0026amp;ls | grep -E \u0026#34;trace|webflux|cloud-gateway-2.1.x\u0026#34; | xargs -i cp {} /sky/agent/plugins/\u0026amp;\u0026amp;cd /sky/agent/optional-reporter-plugins/ \u0026amp;\u0026amp; ls | grep -E \u0026#34;kafka\u0026#34; | xargs-i cp {} /sky/agent/plugins/command:- shimage:apache/skywalking-java-agent:8.16.0-java8name:inject-skywalking-agentvolumeMounts:- mountPath:/sky/agentname:sky-agentvolumes:- emptyDir:{}name:sky-agentThen you can get the final agent configuration and the pod as below.\n$ kubectl get javaagent NAME PODSELECTOR SERVICENAME BACKENDSERVICE app-demo1-javaagent app=demo1 app 127.0.0.1:11800 $ kubectl get pod -l app=demo1(the podSelector) NAME READY STATUS RESTARTS AGE demo1-d48b96467-p7zrv 1/1 Running 0 5m25s Get the javaagent\u0026rsquo;s yaml for more datails.\n$ kubectl get javaagent app-demo1-javaagent -o yaml apiVersion: operator.skywalking.apache.org/v1alpha1 kind: JavaAgent metadata: creationTimestamp: \u0026#34;2022-08-16T12:18:53Z\u0026#34; generation: 1 name: app-demo1-javaagent namespace: default ownerReferences: - apiVersion: apps/v1 blockOwnerDeletion: true controller: true kind: ReplicaSet name: demo1-d48b96467 uid: 2b7f1ac4-b459-41cd-8568-ecd4578ca457 resourceVersion: \u0026#34;26187\u0026#34; uid: c2b2f3e2-9442-4465-9423-d24249b2c53b spec: agentConfiguration: agent.class_cache_mode: MEMORY agent.ignore_suffix: \u0026#39;\u0026#39;\u0026#39;jpg,.jpeg\u0026#39;\u0026#39;\u0026#39; agent.sample_n_per_3_secs: \u0026#34;6\u0026#34; agent.service_name: app optional-plugin: trace|webflux|cloud-gateway-2.1.x optional-reporter-plugin: kafka plugin.influxdb.trace_influxql: \u0026#34;false\u0026#34; plugin.mongodb.trace_param: \u0026#34;true\u0026#34; plugin.mount: \u0026#39;\u0026#39;\u0026#39;plugins,activations\u0026#39;\u0026#39;\u0026#39; backendService: 127.0.0.1:11800 podSelector: app=demo1 serviceName: app status: creationTime: \u0026#34;2022-08-16T12:18:53Z\u0026#34; expectedInjectiedNum: 1 lastUpdateTime: \u0026#34;2022-08-16T12:19:18Z\u0026#34; realInjectedNum: 1 ","excerpt":"Java agent injector Usage In this example, you will learn how to use the java agent injector. …","ref":"/docs/skywalking-swck/next/examples/java-agent-injector-usage/","title":"Java agent injector Usage"},{"body":"Java agent injector Usage In this example, you will learn how to use the java agent injector.\nInstall injector The java agent injector is a component of the operator, so you need to follow Operator installation instrument to install the operator firstly.\nDeployment Example Let\u0026rsquo;s take a demo deployment for example.\n# demo1.yamlapiVersion:apps/v1kind:Deploymentmetadata:name:demo1namespace:defaultspec:selector:matchLabels:app:demo1template:metadata:labels:app:demo1spec:containers:- name:demo1image:ghcr.io/apache/skywalking-swck-spring-demo:v0.0.1command:[\u0026#34;java\u0026#34;]args:[\u0026#34;-jar\u0026#34;,\u0026#34;/app.jar\u0026#34;]ports:- containerPort:8085readinessProbe:httpGet:path:/helloport:8085initialDelaySeconds:3periodSeconds:3failureThreshold:10Enable Injection for Namespace and Deployments/StatefulSets. Firstly, set the injection label in your namespace as below.\nkubectl label namespace default(your namespace) swck-injection=enabled Secondly, set the injection label for your target Deployment/StatefulSet.\nkubectl -n default patch deployment demo1 --patch \u0026#39;{ \u0026#34;spec\u0026#34;: { \u0026#34;template\u0026#34;: { \u0026#34;metadata\u0026#34;: { \u0026#34;labels\u0026#34;: { \u0026#34;swck-java-agent-injected\u0026#34;: \u0026#34;true\u0026#34; } } } } }\u0026#39; Then the pods create by the Deployments/StatefulSets would be recreated with agent injected.\nThe injected pods would be like this:\nspec:containers:- args:- -jar- /app.jarcommand:- javaenv:- name:JAVA_TOOL_OPTIONSvalue:-javaagent:/sky/agent/skywalking-agent.jarimage:ghcr.io/apache/skywalking-swck-spring-demo:v0.0.1name:demo1- mountPath:/sky/agentname:sky-agentinitContainers:- args:- -c- mkdir -p /sky/agent \u0026amp;\u0026amp; cp -r /skywalking/agent/* /sky/agentcommand:- shimage:apache/skywalking-java-agent:8.10.0-java8name:inject-skywalking-agentvolumeMounts:- mountPath:/sky/agentname:sky-agentvolumes:- emptyDir:{}name:sky-agentThen you can get the final agent configuration and the pod as below.\n$ kubectl get javaagent NAME PODSELECTOR SERVICENAME BACKENDSERVICE app-demo1-javaagent app=demo1 demo1 127.0.0.1:11800 $ kubectl get pod -l app=demo1(the podSelector) NAME READY STATUS RESTARTS AGE demo1-5fbb6fcd98-cq5ws 1/1 Running 0 54s Get the javaagent\u0026rsquo;s yaml for more datails.\n$ kubectl get javaagent app-demo1-javaagent -o yaml apiVersion: operator.skywalking.apache.org/v1alpha1 kind: JavaAgent metadata: creationTimestamp: \u0026#34;2022-08-16T12:09:34Z\u0026#34; generation: 1 name: app-demo1-javaagent namespace: default ownerReferences: - apiVersion: apps/v1 blockOwnerDeletion: true controller: true kind: ReplicaSet name: demo1-7fdffc7b95 uid: 417c413f-0cc0-41f9-b6eb-0192eb8c8622 resourceVersion: \u0026#34;25067\u0026#34; uid: 1cdab012-784c-4efb-b5d2-c032eb2fb22a spec: backendService: 127.0.0.1:11800 podSelector: app=demo1 serviceName: Your_ApplicationName status: creationTime: \u0026#34;2022-08-16T12:09:34Z\u0026#34; expectedInjectiedNum: 1 lastUpdateTime: \u0026#34;2022-08-16T12:10:04Z\u0026#34; realInjectedNum: 1 Use SwAgent CR to setup override default configuration Suppose that injection label had been set for Namespace and Deployments/StatefulSets as previous said.\nApply SwAgent CR with correct label selector and container matcher:\n# SwAgent.yamlapiVersion:operator.skywalking.apache.org/v1alpha1kind:SwAgentmetadata:name:swagent-demonamespace:defaultspec:containerMatcher:\u0026#39;\u0026#39;selector:javaSidecar:name:swagent-demoimage:apache/skywalking-java-agent:8.16.0-java8env:- name:\u0026#34;SW_LOGGING_LEVEL\u0026#34;value:\u0026#34;DEBUG\u0026#34;- name:\u0026#34;SW_AGENT_COLLECTOR_BACKEND_SERVICES\u0026#34;value:\u0026#34;skywalking-system-oap:11800\u0026#34;sharedVolumeName:\u0026#34;sky-agent-demo\u0026#34;optionalPlugins:- \u0026#34;webflux\u0026#34;- \u0026#34;cloud-gateway-2.1.x\u0026#34;kubectl -n default apply swagent.yaml You can also get SwAgent CR by:\nkubectl -n default get SwAgent NAME AGE swagent-demo 38s Now the pod is still the old one, because pod could not load the SwAgent config automatically.\nSo you need to recreate pod to load SwAgent config. For the pods created by Deployment/StatefulSet, you can just simply delete the old pod.\n# verify pods to be delete  kubectl -n default get pods -l app=demo1 # delete pods kubectl -n default delete pods -l app=demo1 After the pods recreated, we can get injected pod as below.\nkubectl -n default get pods -l app=demo1 spec:containers:- args:- -jar- /app.jarcommand:- javaenv:- name:JAVA_TOOL_OPTIONSvalue:-javaagent:/sky/agent/skywalking-agent.jar=agent.service_name=demo1,collector.backend_service=skywalking-system-oap.skywalking-system:11800- name:SW_LOGGING_LEVELvalue:DEBUG- name:SW_AGENT_COLLECTOR_BACKEND_SERVICESvalue:skywalking-system-oap.default.svc:11800image:ghcr.io/apache/skywalking-swck-spring-demo:v0.0.1name:demo1- mountPath:/sky/agentname:sky-agent-demoinitContainers:- args:- -c- mkdir -p /sky/agent \u0026amp;\u0026amp; cp -r /skywalking/agent/* /sky/agent \u0026amp;\u0026amp; cd /sky/agent/optional-plugins/\u0026amp;\u0026amp;ls | grep -E \u0026#34;webflux|cloud-gateway-2.1.x\u0026#34; | xargs -i cp {} /sky/agent/plugins/command:- shimage:apache/skywalking-java-agent:8.16.0-java8name:swagent-demovolumeMounts:- mountPath:/sky/agentname:sky-agent-demovolumes:- emptyDir:{}name:sky-agent-demoUse annotation to override sidecar configuration Suppose that injection label had been set for Namespace and Deployments/StatefulSets as previous said.\nThen add agent configuration and sidecar configuration to annotations as below.\n# demo1_anno.yamlapiVersion:apps/v1kind:Deploymentmetadata:name:demo1namespace:defaultspec:selector:matchLabels:app:demo1template:metadata:annotations:strategy.skywalking.apache.org/inject.Container:\u0026#34;demo1\u0026#34;agent.skywalking.apache.org/agent.service_name:\u0026#34;app\u0026#34;agent.skywalking.apache.org/agent.sample_n_per_3_secs:\u0026#34;6\u0026#34;agent.skywalking.apache.org/agent.class_cache_mode:\u0026#34;MEMORY\u0026#34;agent.skywalking.apache.org/agent.ignore_suffix:\u0026#34;\u0026#39;jpg,.jpeg\u0026#39;\u0026#34;plugins.skywalking.apache.org/plugin.mount:\u0026#34;\u0026#39;plugins,activations\u0026#39;\u0026#34;plugins.skywalking.apache.org/plugin.mongodb.trace_param:\u0026#34;true\u0026#34;plugins.skywalking.apache.org/plugin.influxdb.trace_influxql:\u0026#34;false\u0026#34;optional.skywalking.apache.org:\u0026#34;trace|webflux|cloud-gateway-2.1.x\u0026#34;optional-reporter.skywalking.apache.org:\u0026#34;kafka\u0026#34;labels:swck-java-agent-injected:\u0026#34;true\u0026#34;app:demo1spec:containers:- name:demo1image:ghcr.io/apache/skywalking-swck-spring-demo:v0.0.1command:[\u0026#34;java\u0026#34;]args:[\u0026#34;-jar\u0026#34;,\u0026#34;/app.jar\u0026#34;]ports:- containerPort:8085readinessProbe:httpGet:path:/helloport:8085initialDelaySeconds:3periodSeconds:3failureThreshold:10Then we can get injected pod as below:\nkubectl -n default get pods -l app=demo1 spec:containers:- image:nginx:1.16.1imagePullPolicy:IfNotPresentname:nginx- args:- -jar- /app.jarcommand:- javaenv:- name:JAVA_TOOL_OPTIONSvalue:-javaagent:/sky/agent/skywalking-agent.jar=agent.ignore_suffix=\u0026#39;jpg,.jpeg\u0026#39;,agent.service_name=app,agent.class_cache_mode=MEMORY,agent.sample_n_per_3_secs=6,plugin.mongodb.trace_param=true,plugin.influxdb.trace_influxql=false,plugin.mount=\u0026#39;plugins,activations\u0026#39;image:ghcr.io/apache/skywalking-swck-spring-demo:v0.0.1name:demo1ports:- containerPort:8085protocol:TCPreadinessProbe:failureThreshold:10httpGet:path:/helloport:8085scheme:HTTPinitialDelaySeconds:3periodSeconds:3successThreshold:1timeoutSeconds:1volumeMounts:- mountPath:/sky/agentname:sky-agentinitContainers:- args:- -c- mkdir -p /sky/agent \u0026amp;\u0026amp; cp -r /skywalking/agent/* /sky/agent \u0026amp;\u0026amp; cd /sky/agent/optional-plugins/\u0026amp;\u0026amp;ls | grep -E \u0026#34;trace|webflux|cloud-gateway-2.1.x\u0026#34; | xargs -i cp {} /sky/agent/plugins/\u0026amp;\u0026amp;cd /sky/agent/optional-reporter-plugins/ \u0026amp;\u0026amp; ls | grep -E \u0026#34;kafka\u0026#34; | xargs-i cp {} /sky/agent/plugins/command:- shimage:apache/skywalking-java-agent:8.16.0-java8name:inject-skywalking-agentvolumeMounts:- mountPath:/sky/agentname:sky-agentvolumes:- emptyDir:{}name:sky-agentThen you can get the final agent configuration and the pod as below.\n$ kubectl get javaagent NAME PODSELECTOR SERVICENAME BACKENDSERVICE app-demo1-javaagent app=demo1 app 127.0.0.1:11800 $ kubectl get pod -l app=demo1(the podSelector) NAME READY STATUS RESTARTS AGE demo1-d48b96467-p7zrv 1/1 Running 0 5m25s Get the javaagent\u0026rsquo;s yaml for more datails.\n$ kubectl get javaagent app-demo1-javaagent -o yaml apiVersion: operator.skywalking.apache.org/v1alpha1 kind: JavaAgent metadata: creationTimestamp: \u0026#34;2022-08-16T12:18:53Z\u0026#34; generation: 1 name: app-demo1-javaagent namespace: default ownerReferences: - apiVersion: apps/v1 blockOwnerDeletion: true controller: true kind: ReplicaSet name: demo1-d48b96467 uid: 2b7f1ac4-b459-41cd-8568-ecd4578ca457 resourceVersion: \u0026#34;26187\u0026#34; uid: c2b2f3e2-9442-4465-9423-d24249b2c53b spec: agentConfiguration: agent.class_cache_mode: MEMORY agent.ignore_suffix: \u0026#39;\u0026#39;\u0026#39;jpg,.jpeg\u0026#39;\u0026#39;\u0026#39; agent.sample_n_per_3_secs: \u0026#34;6\u0026#34; agent.service_name: app optional-plugin: trace|webflux|cloud-gateway-2.1.x optional-reporter-plugin: kafka plugin.influxdb.trace_influxql: \u0026#34;false\u0026#34; plugin.mongodb.trace_param: \u0026#34;true\u0026#34; plugin.mount: \u0026#39;\u0026#39;\u0026#39;plugins,activations\u0026#39;\u0026#39;\u0026#39; backendService: 127.0.0.1:11800 podSelector: app=demo1 serviceName: app status: creationTime: \u0026#34;2022-08-16T12:18:53Z\u0026#34; expectedInjectiedNum: 1 lastUpdateTime: \u0026#34;2022-08-16T12:19:18Z\u0026#34; realInjectedNum: 1 ","excerpt":"Java agent injector Usage In this example, you will learn how to use the java agent injector. …","ref":"/docs/skywalking-swck/v0.9.0/examples/java-agent-injector-usage/","title":"Java agent injector Usage"},{"body":"Java Microbenchmark Harness (JMH) JMH is a Java harness for building, running, and analysing nano/micro/milli/macro benchmarks written in Java and other languages targeting the JVM.\nWe have a module called microbench which performs a series of micro-benchmark tests for JMH testing. Make new JMH tests extend the org.apache.skywalking.oap.server.microbench.base.AbstractMicrobenchmark to customize runtime conditions (Measurement, Fork, Warmup, etc.).\nYou can build the jar with command ./mvnw -Dmaven.test.skip -DskipTests -pl :microbench package -am -Pbenchmark.\nJMH tests could run as a normal unit test. And they could run as an independent uber jar via java -jar benchmark.jar for all benchmarks, or via java -jar /benchmarks.jar exampleClassName for a specific test.\nOutput test results in JSON format, you can add -rf json like java -jar benchmarks.jar -rf json, if you run through the IDE, you can configure the -DperfReportDir=savePath parameter to set the JMH report result save path, a report results in JSON format will be generated when the run ends.\nMore information about JMH can be found here: jmh docs.\n","excerpt":"Java Microbenchmark Harness (JMH) JMH is a Java harness for building, running, and analysing …","ref":"/docs/main/latest/en/guides/benchmark/","title":"Java Microbenchmark Harness (JMH)"},{"body":"Java Microbenchmark Harness (JMH) JMH is a Java harness for building, running, and analysing nano/micro/milli/macro benchmarks written in Java and other languages targeting the JVM.\nWe have a module called microbench which performs a series of micro-benchmark tests for JMH testing. Make new JMH tests extend the org.apache.skywalking.oap.server.microbench.base.AbstractMicrobenchmark to customize runtime conditions (Measurement, Fork, Warmup, etc.).\nYou can build the jar with command ./mvnw -Dmaven.test.skip -DskipTests -pl :microbench package -am -Pbenchmark.\nJMH tests could run as a normal unit test. And they could run as an independent uber jar via java -jar benchmark.jar for all benchmarks, or via java -jar /benchmarks.jar exampleClassName for a specific test.\nOutput test results in JSON format, you can add -rf json like java -jar benchmarks.jar -rf json, if you run through the IDE, you can configure the -DperfReportDir=savePath parameter to set the JMH report result save path, a report results in JSON format will be generated when the run ends.\nMore information about JMH can be found here: jmh docs.\n","excerpt":"Java Microbenchmark Harness (JMH) JMH is a Java harness for building, running, and analysing …","ref":"/docs/main/next/en/guides/benchmark/","title":"Java Microbenchmark Harness (JMH)"},{"body":"Java Microbenchmark Harness (JMH) JMH is a Java harness for building, running, and analysing nano/micro/milli/macro benchmarks written in Java and other languages targeting the JVM.\nWe have a module called microbench which performs a series of micro-benchmark tests for JMH testing. Make new JMH tests extend the org.apache.skywalking.oap.server.microbench.base.AbstractMicrobenchmark to customize runtime conditions (Measurement, Fork, Warmup, etc.).\nYou can build the jar with command ./mvnw -Dmaven.test.skip -DskipTests -pl :microbench package -am -Pbenchmark.\nJMH tests could run as a normal unit test. And they could run as an independent uber jar via java -jar benchmark.jar for all benchmarks, or via java -jar /benchmarks.jar exampleClassName for a specific test.\nOutput test results in JSON format, you can add -rf json like java -jar benchmarks.jar -rf json, if you run through the IDE, you can configure the -DperfReportDir=savePath parameter to set the JMH report result save path, a report results in JSON format will be generated when the run ends.\nMore information about JMH can be found here: jmh docs.\n","excerpt":"Java Microbenchmark Harness (JMH) JMH is a Java harness for building, running, and analysing …","ref":"/docs/main/v9.6.0/en/guides/benchmark/","title":"Java Microbenchmark Harness (JMH)"},{"body":"Java Microbenchmark Harness (JMH) JMH is a Java harness for building, running, and analysing nano/micro/milli/macro benchmarks written in Java and other languages targeting the JVM.\nWe have a module called microbench which performs a series of micro-benchmark tests for JMH testing. Make new JMH tests extend the org.apache.skywalking.oap.server.microbench.base.AbstractMicrobenchmark to customize runtime conditions (Measurement, Fork, Warmup, etc.).\nYou can build the jar with command ./mvnw -Dmaven.test.skip -DskipTests -pl :microbench package -am -Pbenchmark.\nJMH tests could run as a normal unit test. And they could run as an independent uber jar via java -jar benchmark.jar for all benchmarks, or via java -jar /benchmarks.jar exampleClassName for a specific test.\nOutput test results in JSON format, you can add -rf json like java -jar benchmarks.jar -rf json, if you run through the IDE, you can configure the -DperfReportDir=savePath parameter to set the JMH report result save path, a report results in JSON format will be generated when the run ends.\nMore information about JMH can be found here: jmh docs.\n","excerpt":"Java Microbenchmark Harness (JMH) JMH is a Java harness for building, running, and analysing …","ref":"/docs/main/v9.7.0/en/guides/benchmark/","title":"Java Microbenchmark Harness (JMH)"},{"body":"JavaAgent Introduction To see the final injected agent\u0026rsquo;s configuration, we define a CustomDefinitionResource called JavaAgent.\nWhen the pod is injected, the pod will be labeled with sidecar.skywalking.apache.org/succeed, then the controller will watch the specific pod labeled with sidecar.skywalking.apache.org/succeed. After the pod is created, the controller will create JavaAgent(custom resource), which contains the final agent configuration as below.\nSpec    Field Name Description     podSelector We hope users can use workloads to create pods, the podSelector is the selector label of workload.   serviceName serviceName is an important attribute that needs to be printed.   backendService backendService is an important attribute that needs to be printed.   agentConfiguration agentConfiguration contains serviceName、backendService and covered agent configuration, other default configurations will not be displayed, please see agent.config for details.    Status    Field Name Description     creationTime The creation time of the JavaAgent   lastUpdateTime The last Update time of the JavaAgent   expectedInjectiedNum The number of the pod that need to be injected   realInjectedNum The real number of injected pods.    Demo This demo shows the usage of javaagent. If you want to see the complete process, please see java-agent-injector-usagefor details.\nWhen we use java-agent-injector, we can get custom resources as below.\n$ kubectl get javaagent -A NAMESPACE NAME PODSELECTOR SERVICENAME BACKENDSERVICE default app-demo1-javaagent app=demo1 Your_ApplicationName 127.0.0.1:11800 default app-demo2-javaagent app=demo2 Your_ApplicationName 127.0.0.1:11800 $ kubectl get pod -l app=demo1 NAME READY STATUS RESTARTS AGE demo1-bb97b8b4d-bkwm4 1/1 Running 0 28s demo1-bb97b8b4d-wxgs2 1/1 Running 0 28s $ kubectl get pod -l app=demo2 NAME READY STATUS RESTARTS AGE app2-0 1/1 Running 0 27s app2-1 1/1 Running 0 25s app2-2 1/1 Running 0 23s If we want to see more information, we can get the specific javaagent\u0026rsquo;s yaml as below.\n$ kubectl get javaagent app-demo1-javaagent -oyaml apiVersion: operator.skywalking.apache.org/v1alpha1 kind: JavaAgent metadata: creationTimestamp: \u0026quot;2021-10-14T07:07:12Z\u0026quot; generation: 1 name: app-demo1-javaagent namespace: default ownerReferences: - apiVersion: apps/v1 blockOwnerDeletion: true controller: true kind: ReplicaSet name: demo1-bb97b8b4d uid: c712924f-4652-4c07-8332-b3938ad72392 resourceVersion: \u0026quot;330808\u0026quot; selfLink: /apis/operator.skywalking.apache.org/v1alpha1/namespaces/default/javaagents/app-demo1-javaagent uid: 9350338f-15a5-4832-84d1-530f8d0e1c3b spec: agentConfiguration: agent.namespace: default-namespace agent.service_name: Your_ApplicationName collector.backend_service: 127.0.0.1:11800 backendService: 127.0.0.1:11800 podSelector: app=demo1 serviceName: Your_ApplicationName status: creationTime: \u0026quot;2021-10-14T07:07:12Z\u0026quot; expectedInjectiedNum: 2 lastUpdateTime: \u0026quot;2021-10-14T07:07:14Z\u0026quot; realInjectedNum: 2 ","excerpt":"JavaAgent Introduction To see the final injected agent\u0026rsquo;s configuration, we define a …","ref":"/docs/skywalking-swck/latest/javaagent/","title":"JavaAgent Introduction"},{"body":"JavaAgent Introduction To see the final injected agent\u0026rsquo;s configuration, we define a CustomDefinitionResource called JavaAgent.\nWhen the pod is injected, the pod will be labeled with sidecar.skywalking.apache.org/succeed, then the controller will watch the specific pod labeled with sidecar.skywalking.apache.org/succeed. After the pod is created, the controller will create JavaAgent(custom resource), which contains the final agent configuration as below.\nSpec    Field Name Description     podSelector We hope users can use workloads to create pods, the podSelector is the selector label of workload.   serviceName serviceName is an important attribute that needs to be printed.   backendService backendService is an important attribute that needs to be printed.   agentConfiguration agentConfiguration contains serviceName、backendService and covered agent configuration, other default configurations will not be displayed, please see agent.config for details.    Status    Field Name Description     creationTime The creation time of the JavaAgent   lastUpdateTime The last Update time of the JavaAgent   expectedInjectiedNum The number of the pod that need to be injected   realInjectedNum The real number of injected pods.    Demo This demo shows the usage of javaagent. If you want to see the complete process, please see java-agent-injector-usagefor details.\nWhen we use java-agent-injector, we can get custom resources as below.\n$ kubectl get javaagent -A NAMESPACE NAME PODSELECTOR SERVICENAME BACKENDSERVICE default app-demo1-javaagent app=demo1 Your_ApplicationName 127.0.0.1:11800 default app-demo2-javaagent app=demo2 Your_ApplicationName 127.0.0.1:11800 $ kubectl get pod -l app=demo1 NAME READY STATUS RESTARTS AGE demo1-bb97b8b4d-bkwm4 1/1 Running 0 28s demo1-bb97b8b4d-wxgs2 1/1 Running 0 28s $ kubectl get pod -l app=demo2 NAME READY STATUS RESTARTS AGE app2-0 1/1 Running 0 27s app2-1 1/1 Running 0 25s app2-2 1/1 Running 0 23s If we want to see more information, we can get the specific javaagent\u0026rsquo;s yaml as below.\n$ kubectl get javaagent app-demo1-javaagent -oyaml apiVersion: operator.skywalking.apache.org/v1alpha1 kind: JavaAgent metadata: creationTimestamp: \u0026quot;2021-10-14T07:07:12Z\u0026quot; generation: 1 name: app-demo1-javaagent namespace: default ownerReferences: - apiVersion: apps/v1 blockOwnerDeletion: true controller: true kind: ReplicaSet name: demo1-bb97b8b4d uid: c712924f-4652-4c07-8332-b3938ad72392 resourceVersion: \u0026quot;330808\u0026quot; selfLink: /apis/operator.skywalking.apache.org/v1alpha1/namespaces/default/javaagents/app-demo1-javaagent uid: 9350338f-15a5-4832-84d1-530f8d0e1c3b spec: agentConfiguration: agent.namespace: default-namespace agent.service_name: Your_ApplicationName collector.backend_service: 127.0.0.1:11800 backendService: 127.0.0.1:11800 podSelector: app=demo1 serviceName: Your_ApplicationName status: creationTime: \u0026quot;2021-10-14T07:07:12Z\u0026quot; expectedInjectiedNum: 2 lastUpdateTime: \u0026quot;2021-10-14T07:07:14Z\u0026quot; realInjectedNum: 2 ","excerpt":"JavaAgent Introduction To see the final injected agent\u0026rsquo;s configuration, we define a …","ref":"/docs/skywalking-swck/next/javaagent/","title":"JavaAgent Introduction"},{"body":"JavaAgent Introduction To see the final injected agent\u0026rsquo;s configuration, we define a CustomDefinitionResource called JavaAgent.\nWhen the pod is injected, the pod will be labeled with sidecar.skywalking.apache.org/succeed, then the controller will watch the specific pod labeled with sidecar.skywalking.apache.org/succeed. After the pod is created, the controller will create JavaAgent(custom resource), which contains the final agent configuration as below.\nSpec    Field Name Description     podSelector We hope users can use workloads to create pods, the podSelector is the selector label of workload.   serviceName serviceName is an important attribute that needs to be printed.   backendService backendService is an important attribute that needs to be printed.   agentConfiguration agentConfiguration contains serviceName、backendService and covered agent configuration, other default configurations will not be displayed, please see agent.config for details.    Status    Field Name Description     creationTime The creation time of the JavaAgent   lastUpdateTime The last Update time of the JavaAgent   expectedInjectiedNum The number of the pod that need to be injected   realInjectedNum The real number of injected pods.    Demo This demo shows the usage of javaagent. If you want to see the complete process, please see java-agent-injector-usagefor details.\nWhen we use java-agent-injector, we can get custom resources as below.\n$ kubectl get javaagent -A NAMESPACE NAME PODSELECTOR SERVICENAME BACKENDSERVICE default app-demo1-javaagent app=demo1 Your_ApplicationName 127.0.0.1:11800 default app-demo2-javaagent app=demo2 Your_ApplicationName 127.0.0.1:11800 $ kubectl get pod -l app=demo1 NAME READY STATUS RESTARTS AGE demo1-bb97b8b4d-bkwm4 1/1 Running 0 28s demo1-bb97b8b4d-wxgs2 1/1 Running 0 28s $ kubectl get pod -l app=demo2 NAME READY STATUS RESTARTS AGE app2-0 1/1 Running 0 27s app2-1 1/1 Running 0 25s app2-2 1/1 Running 0 23s If we want to see more information, we can get the specific javaagent\u0026rsquo;s yaml as below.\n$ kubectl get javaagent app-demo1-javaagent -oyaml apiVersion: operator.skywalking.apache.org/v1alpha1 kind: JavaAgent metadata: creationTimestamp: \u0026quot;2021-10-14T07:07:12Z\u0026quot; generation: 1 name: app-demo1-javaagent namespace: default ownerReferences: - apiVersion: apps/v1 blockOwnerDeletion: true controller: true kind: ReplicaSet name: demo1-bb97b8b4d uid: c712924f-4652-4c07-8332-b3938ad72392 resourceVersion: \u0026quot;330808\u0026quot; selfLink: /apis/operator.skywalking.apache.org/v1alpha1/namespaces/default/javaagents/app-demo1-javaagent uid: 9350338f-15a5-4832-84d1-530f8d0e1c3b spec: agentConfiguration: agent.namespace: default-namespace agent.service_name: Your_ApplicationName collector.backend_service: 127.0.0.1:11800 backendService: 127.0.0.1:11800 podSelector: app=demo1 serviceName: Your_ApplicationName status: creationTime: \u0026quot;2021-10-14T07:07:12Z\u0026quot; expectedInjectiedNum: 2 lastUpdateTime: \u0026quot;2021-10-14T07:07:14Z\u0026quot; realInjectedNum: 2 ","excerpt":"JavaAgent Introduction To see the final injected agent\u0026rsquo;s configuration, we define a …","ref":"/docs/skywalking-swck/v0.9.0/javaagent/","title":"JavaAgent Introduction"},{"body":"JVM Metrics APIs Notice, SkyWalking has provided general available meter APIs for all kinds of metrics. This API is still supported for forward compatibility only. SkyWalking community would not accept new language specific metric APIs anymore.\nUplink the JVM metrics, including PermSize, HeapSize, CPU, Memory, etc., every second.\ngRPC service define\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.language.agent.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/language/agent/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the JVM metrics report service. service JVMMetricReportService { rpc collect (JVMMetricCollection) returns (Commands) { }}message JVMMetricCollection { repeated JVMMetric metrics = 1; string service = 2; string serviceInstance = 3;}message JVMMetric { int64 time = 1; CPU cpu = 2; repeated Memory memory = 3; repeated MemoryPool memoryPool = 4; repeated GC gc = 5; Thread thread = 6; Class clazz = 7;}message Memory { bool isHeap = 1; int64 init = 2; int64 max = 3; int64 used = 4; int64 committed = 5;}message MemoryPool { PoolType type = 1; int64 init = 2; int64 max = 3; int64 used = 4; int64 committed = 5;}enum PoolType { CODE_CACHE_USAGE = 0; NEWGEN_USAGE = 1; OLDGEN_USAGE = 2; SURVIVOR_USAGE = 3; PERMGEN_USAGE = 4; METASPACE_USAGE = 5; ZHEAP_USAGE = 6; COMPRESSED_CLASS_SPACE_USAGE = 7; CODEHEAP_NON_NMETHODS_USAGE = 8; CODEHEAP_PROFILED_NMETHODS_USAGE = 9; CODEHEAP_NON_PROFILED_NMETHODS_USAGE = 10;}message GC { GCPhase phase = 1; int64 count = 2; int64 time = 3;}enum GCPhase { NEW = 0; OLD = 1; NORMAL = 2; // The type of GC doesn\u0026#39;t have new and old phases, like Z Garbage Collector (ZGC) }// See: https://docs.oracle.com/javase/8/docs/api/java/lang/management/ThreadMXBean.html message Thread { int64 liveCount = 1; int64 daemonCount = 2; int64 peakCount = 3; int64 runnableStateThreadCount = 4; int64 blockedStateThreadCount = 5; int64 waitingStateThreadCount = 6; int64 timedWaitingStateThreadCount = 7;}// See: https://docs.oracle.com/javase/8/docs/api/java/lang/management/ClassLoadingMXBean.html message Class { int64 loadedClassCount = 1; int64 totalUnloadedClassCount = 2; int64 totalLoadedClassCount = 3;}","excerpt":"JVM Metrics APIs Notice, SkyWalking has provided general available meter APIs for all kinds of …","ref":"/docs/main/latest/en/api/jvm-protocol/","title":"JVM Metrics APIs"},{"body":"JVM Metrics APIs Notice, SkyWalking has provided general available meter APIs for all kinds of metrics. This API is still supported for forward compatibility only. SkyWalking community would not accept new language specific metric APIs anymore.\nUplink the JVM metrics, including PermSize, HeapSize, CPU, Memory, etc., every second.\ngRPC service define\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.language.agent.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/language/agent/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the JVM metrics report service. service JVMMetricReportService { rpc collect (JVMMetricCollection) returns (Commands) { }}message JVMMetricCollection { repeated JVMMetric metrics = 1; string service = 2; string serviceInstance = 3;}message JVMMetric { int64 time = 1; CPU cpu = 2; repeated Memory memory = 3; repeated MemoryPool memoryPool = 4; repeated GC gc = 5; Thread thread = 6; Class clazz = 7;}message Memory { bool isHeap = 1; int64 init = 2; int64 max = 3; int64 used = 4; int64 committed = 5;}message MemoryPool { PoolType type = 1; int64 init = 2; int64 max = 3; int64 used = 4; int64 committed = 5;}enum PoolType { CODE_CACHE_USAGE = 0; NEWGEN_USAGE = 1; OLDGEN_USAGE = 2; SURVIVOR_USAGE = 3; PERMGEN_USAGE = 4; METASPACE_USAGE = 5; ZHEAP_USAGE = 6; COMPRESSED_CLASS_SPACE_USAGE = 7; CODEHEAP_NON_NMETHODS_USAGE = 8; CODEHEAP_PROFILED_NMETHODS_USAGE = 9; CODEHEAP_NON_PROFILED_NMETHODS_USAGE = 10;}message GC { GCPhase phase = 1; int64 count = 2; int64 time = 3;}enum GCPhase { NEW = 0; OLD = 1; NORMAL = 2; // The type of GC doesn\u0026#39;t have new and old phases, like Z Garbage Collector (ZGC) }// See: https://docs.oracle.com/javase/8/docs/api/java/lang/management/ThreadMXBean.html message Thread { int64 liveCount = 1; int64 daemonCount = 2; int64 peakCount = 3; int64 runnableStateThreadCount = 4; int64 blockedStateThreadCount = 5; int64 waitingStateThreadCount = 6; int64 timedWaitingStateThreadCount = 7;}// See: https://docs.oracle.com/javase/8/docs/api/java/lang/management/ClassLoadingMXBean.html message Class { int64 loadedClassCount = 1; int64 totalUnloadedClassCount = 2; int64 totalLoadedClassCount = 3;}","excerpt":"JVM Metrics APIs Notice, SkyWalking has provided general available meter APIs for all kinds of …","ref":"/docs/main/next/en/api/jvm-protocol/","title":"JVM Metrics APIs"},{"body":"JVM Metrics APIs Notice, SkyWalking has provided general available meter APIs for all kinds of metrics. This API is still supported for forward compatibility only. SkyWalking community would not accept new language specific metric APIs anymore.\nUplink the JVM metrics, including PermSize, HeapSize, CPU, Memory, etc., every second.\ngRPC service define\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.language.agent.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/language/agent/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the JVM metrics report service. service JVMMetricReportService { rpc collect (JVMMetricCollection) returns (Commands) { }}message JVMMetricCollection { repeated JVMMetric metrics = 1; string service = 2; string serviceInstance = 3;}message JVMMetric { int64 time = 1; CPU cpu = 2; repeated Memory memory = 3; repeated MemoryPool memoryPool = 4; repeated GC gc = 5; Thread thread = 6; Class clazz = 7;}message Memory { bool isHeap = 1; int64 init = 2; int64 max = 3; int64 used = 4; int64 committed = 5;}message MemoryPool { PoolType type = 1; int64 init = 2; int64 max = 3; int64 used = 4; int64 committed = 5;}enum PoolType { CODE_CACHE_USAGE = 0; NEWGEN_USAGE = 1; OLDGEN_USAGE = 2; SURVIVOR_USAGE = 3; PERMGEN_USAGE = 4; METASPACE_USAGE = 5;}message GC { GCPhase phase = 1; int64 count = 2; int64 time = 3;}enum GCPhase { NEW = 0; OLD = 1; NORMAL = 2; // The type of GC doesn\u0026#39;t have new and old phases, like Z Garbage Collector (ZGC) }// See: https://docs.oracle.com/javase/8/docs/api/java/lang/management/ThreadMXBean.html message Thread { int64 liveCount = 1; int64 daemonCount = 2; int64 peakCount = 3; int64 runnableStateThreadCount = 4; int64 blockedStateThreadCount = 5; int64 waitingStateThreadCount = 6; int64 timedWaitingStateThreadCount = 7;}// See: https://docs.oracle.com/javase/8/docs/api/java/lang/management/ClassLoadingMXBean.html message Class { int64 loadedClassCount = 1; int64 totalUnloadedClassCount = 2; int64 totalLoadedClassCount = 3;}","excerpt":"JVM Metrics APIs Notice, SkyWalking has provided general available meter APIs for all kinds of …","ref":"/docs/main/v9.4.0/en/api/jvm-protocol/","title":"JVM Metrics APIs"},{"body":"JVM Metrics APIs Notice, SkyWalking has provided general available meter APIs for all kinds of metrics. This API is still supported for forward compatibility only. SkyWalking community would not accept new language specific metric APIs anymore.\nUplink the JVM metrics, including PermSize, HeapSize, CPU, Memory, etc., every second.\ngRPC service define\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.language.agent.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/language/agent/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the JVM metrics report service. service JVMMetricReportService { rpc collect (JVMMetricCollection) returns (Commands) { }}message JVMMetricCollection { repeated JVMMetric metrics = 1; string service = 2; string serviceInstance = 3;}message JVMMetric { int64 time = 1; CPU cpu = 2; repeated Memory memory = 3; repeated MemoryPool memoryPool = 4; repeated GC gc = 5; Thread thread = 6; Class clazz = 7;}message Memory { bool isHeap = 1; int64 init = 2; int64 max = 3; int64 used = 4; int64 committed = 5;}message MemoryPool { PoolType type = 1; int64 init = 2; int64 max = 3; int64 used = 4; int64 committed = 5;}enum PoolType { CODE_CACHE_USAGE = 0; NEWGEN_USAGE = 1; OLDGEN_USAGE = 2; SURVIVOR_USAGE = 3; PERMGEN_USAGE = 4; METASPACE_USAGE = 5;}message GC { GCPhase phase = 1; int64 count = 2; int64 time = 3;}enum GCPhase { NEW = 0; OLD = 1; NORMAL = 2; // The type of GC doesn\u0026#39;t have new and old phases, like Z Garbage Collector (ZGC) }// See: https://docs.oracle.com/javase/8/docs/api/java/lang/management/ThreadMXBean.html message Thread { int64 liveCount = 1; int64 daemonCount = 2; int64 peakCount = 3; int64 runnableStateThreadCount = 4; int64 blockedStateThreadCount = 5; int64 waitingStateThreadCount = 6; int64 timedWaitingStateThreadCount = 7;}// See: https://docs.oracle.com/javase/8/docs/api/java/lang/management/ClassLoadingMXBean.html message Class { int64 loadedClassCount = 1; int64 totalUnloadedClassCount = 2; int64 totalLoadedClassCount = 3;}","excerpt":"JVM Metrics APIs Notice, SkyWalking has provided general available meter APIs for all kinds of …","ref":"/docs/main/v9.5.0/en/api/jvm-protocol/","title":"JVM Metrics APIs"},{"body":"JVM Metrics APIs Notice, SkyWalking has provided general available meter APIs for all kinds of metrics. This API is still supported for forward compatibility only. SkyWalking community would not accept new language specific metric APIs anymore.\nUplink the JVM metrics, including PermSize, HeapSize, CPU, Memory, etc., every second.\ngRPC service define\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.language.agent.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/language/agent/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the JVM metrics report service. service JVMMetricReportService { rpc collect (JVMMetricCollection) returns (Commands) { }}message JVMMetricCollection { repeated JVMMetric metrics = 1; string service = 2; string serviceInstance = 3;}message JVMMetric { int64 time = 1; CPU cpu = 2; repeated Memory memory = 3; repeated MemoryPool memoryPool = 4; repeated GC gc = 5; Thread thread = 6; Class clazz = 7;}message Memory { bool isHeap = 1; int64 init = 2; int64 max = 3; int64 used = 4; int64 committed = 5;}message MemoryPool { PoolType type = 1; int64 init = 2; int64 max = 3; int64 used = 4; int64 committed = 5;}enum PoolType { CODE_CACHE_USAGE = 0; NEWGEN_USAGE = 1; OLDGEN_USAGE = 2; SURVIVOR_USAGE = 3; PERMGEN_USAGE = 4; METASPACE_USAGE = 5;}message GC { GCPhase phase = 1; int64 count = 2; int64 time = 3;}enum GCPhase { NEW = 0; OLD = 1; NORMAL = 2; // The type of GC doesn\u0026#39;t have new and old phases, like Z Garbage Collector (ZGC) }// See: https://docs.oracle.com/javase/8/docs/api/java/lang/management/ThreadMXBean.html message Thread { int64 liveCount = 1; int64 daemonCount = 2; int64 peakCount = 3; int64 runnableStateThreadCount = 4; int64 blockedStateThreadCount = 5; int64 waitingStateThreadCount = 6; int64 timedWaitingStateThreadCount = 7;}// See: https://docs.oracle.com/javase/8/docs/api/java/lang/management/ClassLoadingMXBean.html message Class { int64 loadedClassCount = 1; int64 totalUnloadedClassCount = 2; int64 totalLoadedClassCount = 3;}","excerpt":"JVM Metrics APIs Notice, SkyWalking has provided general available meter APIs for all kinds of …","ref":"/docs/main/v9.6.0/en/api/jvm-protocol/","title":"JVM Metrics APIs"},{"body":"JVM Metrics APIs Notice, SkyWalking has provided general available meter APIs for all kinds of metrics. This API is still supported for forward compatibility only. SkyWalking community would not accept new language specific metric APIs anymore.\nUplink the JVM metrics, including PermSize, HeapSize, CPU, Memory, etc., every second.\ngRPC service define\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.language.agent.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/language/agent/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the JVM metrics report service. service JVMMetricReportService { rpc collect (JVMMetricCollection) returns (Commands) { }}message JVMMetricCollection { repeated JVMMetric metrics = 1; string service = 2; string serviceInstance = 3;}message JVMMetric { int64 time = 1; CPU cpu = 2; repeated Memory memory = 3; repeated MemoryPool memoryPool = 4; repeated GC gc = 5; Thread thread = 6; Class clazz = 7;}message Memory { bool isHeap = 1; int64 init = 2; int64 max = 3; int64 used = 4; int64 committed = 5;}message MemoryPool { PoolType type = 1; int64 init = 2; int64 max = 3; int64 used = 4; int64 committed = 5;}enum PoolType { CODE_CACHE_USAGE = 0; NEWGEN_USAGE = 1; OLDGEN_USAGE = 2; SURVIVOR_USAGE = 3; PERMGEN_USAGE = 4; METASPACE_USAGE = 5; ZHEAP_USAGE = 6; COMPRESSED_CLASS_SPACE_USAGE = 7; CODEHEAP_NON_NMETHODS_USAGE = 8; CODEHEAP_PROFILED_NMETHODS_USAGE = 9; CODEHEAP_NON_PROFILED_NMETHODS_USAGE = 10;}message GC { GCPhase phase = 1; int64 count = 2; int64 time = 3;}enum GCPhase { NEW = 0; OLD = 1; NORMAL = 2; // The type of GC doesn\u0026#39;t have new and old phases, like Z Garbage Collector (ZGC) }// See: https://docs.oracle.com/javase/8/docs/api/java/lang/management/ThreadMXBean.html message Thread { int64 liveCount = 1; int64 daemonCount = 2; int64 peakCount = 3; int64 runnableStateThreadCount = 4; int64 blockedStateThreadCount = 5; int64 waitingStateThreadCount = 6; int64 timedWaitingStateThreadCount = 7;}// See: https://docs.oracle.com/javase/8/docs/api/java/lang/management/ClassLoadingMXBean.html message Class { int64 loadedClassCount = 1; int64 totalUnloadedClassCount = 2; int64 totalLoadedClassCount = 3;}","excerpt":"JVM Metrics APIs Notice, SkyWalking has provided general available meter APIs for all kinds of …","ref":"/docs/main/v9.7.0/en/api/jvm-protocol/","title":"JVM Metrics APIs"},{"body":"JVM Metrics Service Abstract Uplink the JVM metrics, including PermSize, HeapSize, CPU, Memory, etc., every second.\ngRPC service define\n","excerpt":"JVM Metrics Service Abstract Uplink the JVM metrics, including PermSize, HeapSize, CPU, Memory, …","ref":"/docs/main/v9.0.0/en/protocols/jvm-protocol/","title":"JVM Metrics Service"},{"body":"JVM Metrics Service Abstract Uplink the JVM metrics, including PermSize, HeapSize, CPU, Memory, etc., every second.\ngRPC service define\n","excerpt":"JVM Metrics Service Abstract Uplink the JVM metrics, including PermSize, HeapSize, CPU, Memory, …","ref":"/docs/main/v9.1.0/en/protocols/jvm-protocol/","title":"JVM Metrics Service"},{"body":"JVM Metrics Service Abstract Uplink the JVM metrics, including PermSize, HeapSize, CPU, Memory, etc., every second.\ngRPC service define\n","excerpt":"JVM Metrics Service Abstract Uplink the JVM metrics, including PermSize, HeapSize, CPU, Memory, …","ref":"/docs/main/v9.2.0/en/protocols/jvm-protocol/","title":"JVM Metrics Service"},{"body":"JVM Metrics Service Abstract Uplink the JVM metrics, including PermSize, HeapSize, CPU, Memory, etc., every second.\ngRPC service define\n","excerpt":"JVM Metrics Service Abstract Uplink the JVM metrics, including PermSize, HeapSize, CPU, Memory, …","ref":"/docs/main/v9.3.0/en/protocols/jvm-protocol/","title":"JVM Metrics Service"},{"body":"K8s monitoring SkyWalking leverages K8s kube-state-metrics and cAdvisor for collecting metrics data from K8s, and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. This feature requires authorizing the OAP Server to access K8s\u0026rsquo;s API Server.\nData flow  K8s kube-state-metrics and cAdvisor collect metrics data from K8s. OpenTelemetry Collector fetches metrics from kube-state-metrics and cAdvisor via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via the OpenCensus GRPC Exporter. The SkyWalking OAP Server access to K8s\u0026rsquo;s API Server gets meta info and parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup kube-state-metric. cAdvisor is integrated into kubelet by default. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector for K8s, refer to here. For a quick start, we have provided a full example of configuration and recommended version , you can refer to showcase. Config SkyWalking OpenTelemetry receiver.  K8s Cluster Monitoring K8s cluster monitoring provide monitoring of the status and resources of the K8S Cluster, including the whole cluster and each node. K8s cluster as a Service in OAP, K8s node as a Instance in OAP, and land on the Layer: K8S.\nK8s Cluster Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Node Total  k8s_cluster_node_total The number of nodes K8s kube-state-metrics   Namespace Total  k8s_cluster_namespace_total The number of namespaces K8s kube-state-metrics   Deployment Total  k8s_cluster_deployment_total The number of deployments K8s kube-state-metrics   Service Total  k8s_cluster_service_total The number of services K8s kube-state-metrics   Pod Total  k8s_cluster_pod_total The number of pods K8s kube-state-metrics   Container Total  k8s_cluster_container_total The number of containers K8s kube-state-metrics   CPU Resources m k8s_cluster_cpu_cores\nk8s_cluster_cpu_cores_requests\nk8s_cluster_cpu_cores_limits\nk8s_cluster_cpu_cores_allocatable The capacity and the Requests / Limits / Allocatable of the CPU K8s kube-state-metrics   Memory Resources Gi k8s_cluster_memory_total\nk8s_cluster_memory_requests\nk8s_cluster_memory_limits\nk8s_cluster_memory_allocatable The capacity and the Requests / Limits / Allocatable of the memory K8s kube-state-metrics   Storage Resources Gi k8s_cluster_storage_total\nk8s_cluster_storage_allocatable The capacity and allocatable of the storage K8s kube-state-metrics   Node Status  k8s_cluster_node_status The current status of the nodes K8s kube-state-metrics   Deployment Status  k8s_cluster_deployment_status The current status of the deployment K8s kube-state-metrics   Deployment Spec Replicas  k8s_cluster_deployment_spec_replicas The number of desired pods for a deployment K8s kube-state-metrics   Service Status  k8s_cluster_service_pod_status The services current status, depending on the related pods' status K8s kube-state-metrics   Pod Status Not Running  k8s_cluster_pod_status_not_running The pods which are not running in the current phase K8s kube-state-metrics   Pod Status Waiting  k8s_cluster_pod_status_waiting The pods and containers which are currently in the waiting status, with reasons shown K8s kube-state-metrics   Pod Status Terminated  k8s_cluster_container_status_terminated The pods and containers which are currently in the terminated status, with reasons shown K8s kube-state-metrics    K8s Cluster Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Pod Total  k8s_node_pod_total The number of pods in this node K8s kube-state-metrics   Node Status  k8s_node_node_status The current status of this node K8s kube-state-metrics   CPU Resources m k8s_node_cpu_cores\nk8s_node_cpu_cores_allocatable\nk8s_node_cpu_cores_requests\nk8s_node_cpu_cores_limits The capacity and the requests / Limits / Allocatable of the CPU K8s kube-state-metrics   Memory Resources Gi k8s_node_memory_total\nk8s_node_memory_allocatable\nk8s_node_memory_requests\nk8s_node_memory_limits The capacity and the requests / Limits / Allocatable of the memory K8s kube-state-metrics   Storage Resources Gi k8s_node_storage_total\nk8s_node_storage_allocatable The capacity and allocatable of the storage K8s kube-state-metrics   CPU Usage m k8s_node_cpu_usage The total usage of the CPU core, if there are 2 cores the maximum usage is 2000m cAdvisor   Memory Usage Gi k8s_node_memory_usage The totaly memory usage cAdvisor   Network I/O KB/s k8s_node_network_receive\nk8s_node_network_transmit The network receive and transmit cAdvisor    K8s Service Monitoring K8s Service Monitoring provide observe service status and resources from Kubernetes. K8s Service as a Service in OAP and land on the Layer: K8S_SERVICE.\nK8s Service Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Service Pod Total  k8s_service_pod_total The number of pods K8s kube-state-metrics   Service Pod Status  k8s_service_pod_status The current status of pods K8s kube-state-metrics   Service CPU Resources m k8s_service_cpu_cores_requests\nk8s_service_cpu_cores_limits The CPU resources requests / Limits of this service K8s kube-state-metrics   Service Memory Resources MB k8s_service_memory_requests\nk8s_service_memory_limits The memory resources requests / Limits of this service K8s kube-state-metrics   Pod CPU Usage m k8s_service_pod_cpu_usage The CPU resources total usage of pods cAdvisor   Pod Memory Usage MB k8s_service_pod_memory_usage The memory resources total usage of pods cAdvisor   Pod Waiting  k8s_service_pod_status_waiting The pods and containers which are currently in the waiting status, with reasons shown K8s kube-state-metrics   Pod Terminated  k8s_service_pod_status_terminated The pods and containers which are currently in the terminated status, with reasons shown K8s kube-state-metrics   Pod Restarts  k8s_service_pod_status_restarts_total The number of per container restarts related to the pods K8s kube-state-metrics    Customizing You can customize your own metrics/expression/dashboard panel.\nThe metrics definition and expression rules are found in /config/otel-oc-rules/k8s-cluster.yaml,/config/otel-oc-rules/k8s-node.yaml, /config/otel-oc-rules/k8s-service.yaml.\nThe K8s Cluster dashboard panel configurations are found in /config/ui-initialized-templates/k8s. The K8s Service dashboard panel configurations are found in /config/ui-initialized-templates/k8s_service.\n","excerpt":"K8s monitoring SkyWalking leverages K8s kube-state-metrics and cAdvisor for collecting metrics data …","ref":"/docs/main/v9.0.0/en/setup/backend/backend-k8s-monitoring/","title":"K8s monitoring"},{"body":"Kafka Fetcher The Kafka Fetcher pulls messages from the Kafka Broker to learn about what agents have delivered. Check the agent documentation for details on how to enable the Kafka reporter. Typically, tracing segments, service/instance properties, JVM metrics, and meter system data are supported (depending on the agent implementation). Kafka Fetcher can work with gRPC/HTTP Receivers simultaneously for adopting different transport protocols.\nKafka Fetcher is disabled by default. To enable it, configure it as follows.\nNamespace aims to isolate multi OAP clusters when using the same Kafka cluster. If you set a namespace for Kafka fetcher, the OAP will add a prefix to the topic name. You should also set the namespace in the property named plugin.kafka.namespace in agent.config.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}skywalking-segments, skywalking-metrics, skywalking-profilings, skywalking-managements, skywalking-meters, skywalking-logs and skywalking-logs-json topics are required by kafka-fetcher. If they do not exist, Kafka Fetcher will create them by default. Also, you can create them by yourself before the OAP server starts.\nWhen using the OAP server automatic creation mechanism, you could modify the number of partitions and replications of the topics using the following configurations:\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}When using Kafka MirrorMaker 2.0 to replicate topics between Kafka clusters, you can set the source Kafka Cluster alias (mm2SourceAlias) and separator (mm2SourceSeparator) according to your Kafka MirrorMaker config.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}mm2SourceAlias:${SW_KAFKA_MM2_SOURCE_ALIAS:\u0026#34;\u0026#34;}mm2SourceSeparator:${SW_KAFKA_MM2_SOURCE_SEPARATOR:\u0026#34;\u0026#34;}kafkaConsumerConfig:enable.auto.commit:true...","excerpt":"Kafka Fetcher The Kafka Fetcher pulls messages from the Kafka Broker to learn about what agents have …","ref":"/docs/main/latest/en/setup/backend/kafka-fetcher/","title":"Kafka Fetcher"},{"body":"Kafka Fetcher The Kafka Fetcher pulls messages from the Kafka Broker to learn about what agents have delivered. Check the agent documentation for details on how to enable the Kafka reporter. Typically, tracing segments, service/instance properties, JVM metrics, and meter system data are supported (depending on the agent implementation). Kafka Fetcher can work with gRPC/HTTP Receivers simultaneously for adopting different transport protocols.\nKafka Fetcher is disabled by default. To enable it, configure it as follows.\nNamespace aims to isolate multi OAP clusters when using the same Kafka cluster. If you set a namespace for Kafka fetcher, the OAP will add a prefix to the topic name. You should also set the namespace in the property named plugin.kafka.namespace in agent.config.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}skywalking-segments, skywalking-metrics, skywalking-profilings, skywalking-managements, skywalking-meters, skywalking-logs and skywalking-logs-json topics are required by kafka-fetcher. If they do not exist, Kafka Fetcher will create them by default. Also, you can create them by yourself before the OAP server starts.\nWhen using the OAP server automatic creation mechanism, you could modify the number of partitions and replications of the topics using the following configurations:\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}When using Kafka MirrorMaker 2.0 to replicate topics between Kafka clusters, you can set the source Kafka Cluster alias (mm2SourceAlias) and separator (mm2SourceSeparator) according to your Kafka MirrorMaker config.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}mm2SourceAlias:${SW_KAFKA_MM2_SOURCE_ALIAS:\u0026#34;\u0026#34;}mm2SourceSeparator:${SW_KAFKA_MM2_SOURCE_SEPARATOR:\u0026#34;\u0026#34;}kafkaConsumerConfig:enable.auto.commit:true...","excerpt":"Kafka Fetcher The Kafka Fetcher pulls messages from the Kafka Broker to learn about what agents have …","ref":"/docs/main/next/en/setup/backend/kafka-fetcher/","title":"Kafka Fetcher"},{"body":"Kafka Fetcher The Kafka Fetcher pulls messages from the Kafka Broker to learn about what agent is delivered. Check the agent documentation for details. Typically, tracing segments, service/instance properties, JVM metrics, and meter system data are supported. Kafka Fetcher can work with gRPC/HTTP Receivers at the same time for adopting different transport protocols.\nKafka Fetcher is disabled by default. To enable it, configure as follows.\nNamespace aims to isolate multi OAP cluster when using the same Kafka cluster. If you set a namespace for Kafka fetcher, the OAP will add a prefix to topic name. You should also set namespace in the property named plugin.kafka.namespace in agent.config.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}skywalking-segments, skywalking-metrics, skywalking-profilings, skywalking-managements, skywalking-meters, skywalking-logs and skywalking-logs-json topics are required by kafka-fetcher. If they do not exist, Kafka Fetcher will create them by default. Also, you can create them by yourself before the OAP server starts.\nWhen using the OAP server automatic creation mechanism, you could modify the number of partitions and replications of the topics using the following configurations:\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}isSharding:${SW_KAFKA_FETCHER_IS_SHARDING:false}consumePartitions:${SW_KAFKA_FETCHER_CONSUME_PARTITIONS:\u0026#34;\u0026#34;}In the cluster mode, all topics have the same number of partitions. Set \u0026quot;isSharding\u0026quot; to \u0026quot;true\u0026quot; and assign the partitions to consume for the OAP server. Use commas to separate multiple partitions for the OAP server.\nThe Kafka Fetcher allows you to configure all the Kafka producers listed here in property kafkaConsumerConfig. For example:\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}isSharding:${SW_KAFKA_FETCHER_IS_SHARDING:true}consumePartitions:${SW_KAFKA_FETCHER_CONSUME_PARTITIONS:1,3,5}kafkaConsumerConfig:enable.auto.commit:true...When using Kafka MirrorMaker 2.0 to replicate topics between Kafka clusters, you can set the source Kafka Cluster alias (mm2SourceAlias) and separator (mm2SourceSeparator) according to your Kafka MirrorMaker config.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}isSharding:${SW_KAFKA_FETCHER_IS_SHARDING:true}consumePartitions:${SW_KAFKA_FETCHER_CONSUME_PARTITIONS:1,3,5}mm2SourceAlias:${SW_KAFKA_MM2_SOURCE_ALIAS:\u0026#34;\u0026#34;}mm2SourceSeparator:${SW_KAFKA_MM2_SOURCE_SEPARATOR:\u0026#34;\u0026#34;}kafkaConsumerConfig:enable.auto.commit:true...Other Fetcher Plugins There are other transporter plugins. You could find these plugins from 3rd party repositories.\n  Pulsar Fetcher Plugin\n  RocketMQ Fetcher Plugin\n  ","excerpt":"Kafka Fetcher The Kafka Fetcher pulls messages from the Kafka Broker to learn about what agent is …","ref":"/docs/main/v9.0.0/en/setup/backend/kafka-fetcher/","title":"Kafka Fetcher"},{"body":"Kafka Fetcher The Kafka Fetcher pulls messages from the Kafka Broker to learn about what agents have delivered. Check the agent documentation for details on how to enable the Kafka reporter. Typically, tracing segments, service/instance properties, JVM metrics, and meter system data are supported (depending on the agent implementation). Kafka Fetcher can work with gRPC/HTTP Receivers simultaneously for adopting different transport protocols.\nKafka Fetcher is disabled by default. To enable it, configure it as follows.\nNamespace aims to isolate multi OAP clusters when using the same Kafka cluster. If you set a namespace for Kafka fetcher, the OAP will add a prefix to the topic name. You should also set the namespace in the property named plugin.kafka.namespace in agent.config.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}skywalking-segments, skywalking-metrics, skywalking-profilings, skywalking-managements, skywalking-meters, skywalking-logs and skywalking-logs-json topics are required by kafka-fetcher. If they do not exist, Kafka Fetcher will create them by default. Also, you can create them by yourself before the OAP server starts.\nWhen using the OAP server automatic creation mechanism, you could modify the number of partitions and replications of the topics using the following configurations:\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}isSharding:${SW_KAFKA_FETCHER_IS_SHARDING:false}consumePartitions:${SW_KAFKA_FETCHER_CONSUME_PARTITIONS:\u0026#34;\u0026#34;}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}In the cluster mode, all topics have the same number of partitions. Set \u0026quot;isSharding\u0026quot; to \u0026quot;true\u0026quot; and assign the partitions to consume for the OAP server. Use commas to separate multiple partitions for the OAP server.\nThe Kafka Fetcher allows you to configure all the Kafka producers listed here in property kafkaConsumerConfig. For example:\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}isSharding:${SW_KAFKA_FETCHER_IS_SHARDING:true}consumePartitions:${SW_KAFKA_FETCHER_CONSUME_PARTITIONS:1,3,5}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}kafkaConsumerConfig:enable.auto.commit:true...When using Kafka MirrorMaker 2.0 to replicate topics between Kafka clusters, you can set the source Kafka Cluster alias (mm2SourceAlias) and separator (mm2SourceSeparator) according to your Kafka MirrorMaker config.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}isSharding:${SW_KAFKA_FETCHER_IS_SHARDING:true}consumePartitions:${SW_KAFKA_FETCHER_CONSUME_PARTITIONS:1,3,5}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}mm2SourceAlias:${SW_KAFKA_MM2_SOURCE_ALIAS:\u0026#34;\u0026#34;}mm2SourceSeparator:${SW_KAFKA_MM2_SOURCE_SEPARATOR:\u0026#34;\u0026#34;}kafkaConsumerConfig:enable.auto.commit:true...Other Fetcher Plugins There are other transporter plugins. You can find these plugins from 3rd party repositories.\n  Pulsar Fetcher Plugin\n  RocketMQ Fetcher Plugin\n  ","excerpt":"Kafka Fetcher The Kafka Fetcher pulls messages from the Kafka Broker to learn about what agents have …","ref":"/docs/main/v9.1.0/en/setup/backend/kafka-fetcher/","title":"Kafka Fetcher"},{"body":"Kafka Fetcher The Kafka Fetcher pulls messages from the Kafka Broker to learn about what agents have delivered. Check the agent documentation for details on how to enable the Kafka reporter. Typically, tracing segments, service/instance properties, JVM metrics, and meter system data are supported (depending on the agent implementation). Kafka Fetcher can work with gRPC/HTTP Receivers simultaneously for adopting different transport protocols.\nKafka Fetcher is disabled by default. To enable it, configure it as follows.\nNamespace aims to isolate multi OAP clusters when using the same Kafka cluster. If you set a namespace for Kafka fetcher, the OAP will add a prefix to the topic name. You should also set the namespace in the property named plugin.kafka.namespace in agent.config.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}skywalking-segments, skywalking-metrics, skywalking-profilings, skywalking-managements, skywalking-meters, skywalking-logs and skywalking-logs-json topics are required by kafka-fetcher. If they do not exist, Kafka Fetcher will create them by default. Also, you can create them by yourself before the OAP server starts.\nWhen using the OAP server automatic creation mechanism, you could modify the number of partitions and replications of the topics using the following configurations:\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}isSharding:${SW_KAFKA_FETCHER_IS_SHARDING:false}consumePartitions:${SW_KAFKA_FETCHER_CONSUME_PARTITIONS:\u0026#34;\u0026#34;}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}In the cluster mode, all topics have the same number of partitions. Set \u0026quot;isSharding\u0026quot; to \u0026quot;true\u0026quot; and assign the partitions to consume for the OAP server. Use commas to separate multiple partitions for the OAP server.\nThe Kafka Fetcher allows you to configure all the Kafka producers listed here in property kafkaConsumerConfig. For example:\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}isSharding:${SW_KAFKA_FETCHER_IS_SHARDING:true}consumePartitions:${SW_KAFKA_FETCHER_CONSUME_PARTITIONS:1,3,5}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}kafkaConsumerConfig:enable.auto.commit:true...When using Kafka MirrorMaker 2.0 to replicate topics between Kafka clusters, you can set the source Kafka Cluster alias (mm2SourceAlias) and separator (mm2SourceSeparator) according to your Kafka MirrorMaker config.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}isSharding:${SW_KAFKA_FETCHER_IS_SHARDING:true}consumePartitions:${SW_KAFKA_FETCHER_CONSUME_PARTITIONS:1,3,5}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}mm2SourceAlias:${SW_KAFKA_MM2_SOURCE_ALIAS:\u0026#34;\u0026#34;}mm2SourceSeparator:${SW_KAFKA_MM2_SOURCE_SEPARATOR:\u0026#34;\u0026#34;}kafkaConsumerConfig:enable.auto.commit:true...Other Fetcher Plugins There are other transporter plugins. You can find these plugins from 3rd party repositories.\n  Pulsar Fetcher Plugin\n  RocketMQ Fetcher Plugin\n  ","excerpt":"Kafka Fetcher The Kafka Fetcher pulls messages from the Kafka Broker to learn about what agents have …","ref":"/docs/main/v9.2.0/en/setup/backend/kafka-fetcher/","title":"Kafka Fetcher"},{"body":"Kafka Fetcher The Kafka Fetcher pulls messages from the Kafka Broker to learn about what agents have delivered. Check the agent documentation for details on how to enable the Kafka reporter. Typically, tracing segments, service/instance properties, JVM metrics, and meter system data are supported (depending on the agent implementation). Kafka Fetcher can work with gRPC/HTTP Receivers simultaneously for adopting different transport protocols.\nKafka Fetcher is disabled by default. To enable it, configure it as follows.\nNamespace aims to isolate multi OAP clusters when using the same Kafka cluster. If you set a namespace for Kafka fetcher, the OAP will add a prefix to the topic name. You should also set the namespace in the property named plugin.kafka.namespace in agent.config.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}skywalking-segments, skywalking-metrics, skywalking-profilings, skywalking-managements, skywalking-meters, skywalking-logs and skywalking-logs-json topics are required by kafka-fetcher. If they do not exist, Kafka Fetcher will create them by default. Also, you can create them by yourself before the OAP server starts.\nWhen using the OAP server automatic creation mechanism, you could modify the number of partitions and replications of the topics using the following configurations:\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}isSharding:${SW_KAFKA_FETCHER_IS_SHARDING:false}consumePartitions:${SW_KAFKA_FETCHER_CONSUME_PARTITIONS:\u0026#34;\u0026#34;}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}In the cluster mode, all topics have the same number of partitions. Set \u0026quot;isSharding\u0026quot; to \u0026quot;true\u0026quot; and assign the partitions to consume for the OAP server. Use commas to separate multiple partitions for the OAP server.\nThe Kafka Fetcher allows you to configure all the Kafka producers listed here in property kafkaConsumerConfig. For example:\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}isSharding:${SW_KAFKA_FETCHER_IS_SHARDING:true}consumePartitions:${SW_KAFKA_FETCHER_CONSUME_PARTITIONS:1,3,5}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}kafkaConsumerConfig:enable.auto.commit:true...When using Kafka MirrorMaker 2.0 to replicate topics between Kafka clusters, you can set the source Kafka Cluster alias (mm2SourceAlias) and separator (mm2SourceSeparator) according to your Kafka MirrorMaker config.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}isSharding:${SW_KAFKA_FETCHER_IS_SHARDING:true}consumePartitions:${SW_KAFKA_FETCHER_CONSUME_PARTITIONS:1,3,5}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}mm2SourceAlias:${SW_KAFKA_MM2_SOURCE_ALIAS:\u0026#34;\u0026#34;}mm2SourceSeparator:${SW_KAFKA_MM2_SOURCE_SEPARATOR:\u0026#34;\u0026#34;}kafkaConsumerConfig:enable.auto.commit:true...Other Fetcher Plugins There are other transporter plugins. You can find these plugins from 3rd party repositories.\n  Pulsar Fetcher Plugin\n  RocketMQ Fetcher Plugin\n  ","excerpt":"Kafka Fetcher The Kafka Fetcher pulls messages from the Kafka Broker to learn about what agents have …","ref":"/docs/main/v9.3.0/en/setup/backend/kafka-fetcher/","title":"Kafka Fetcher"},{"body":"Kafka Fetcher The Kafka Fetcher pulls messages from the Kafka Broker to learn about what agents have delivered. Check the agent documentation for details on how to enable the Kafka reporter. Typically, tracing segments, service/instance properties, JVM metrics, and meter system data are supported (depending on the agent implementation). Kafka Fetcher can work with gRPC/HTTP Receivers simultaneously for adopting different transport protocols.\nKafka Fetcher is disabled by default. To enable it, configure it as follows.\nNamespace aims to isolate multi OAP clusters when using the same Kafka cluster. If you set a namespace for Kafka fetcher, the OAP will add a prefix to the topic name. You should also set the namespace in the property named plugin.kafka.namespace in agent.config.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}skywalking-segments, skywalking-metrics, skywalking-profilings, skywalking-managements, skywalking-meters, skywalking-logs and skywalking-logs-json topics are required by kafka-fetcher. If they do not exist, Kafka Fetcher will create them by default. Also, you can create them by yourself before the OAP server starts.\nWhen using the OAP server automatic creation mechanism, you could modify the number of partitions and replications of the topics using the following configurations:\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}isSharding:${SW_KAFKA_FETCHER_IS_SHARDING:false}consumePartitions:${SW_KAFKA_FETCHER_CONSUME_PARTITIONS:\u0026#34;\u0026#34;}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}In the cluster mode, all topics have the same number of partitions. Set \u0026quot;isSharding\u0026quot; to \u0026quot;true\u0026quot; and assign the partitions to consume for the OAP server. Use commas to separate multiple partitions for the OAP server.\nThe Kafka Fetcher allows you to configure all the Kafka producers listed here in property kafkaConsumerConfig. For example:\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}isSharding:${SW_KAFKA_FETCHER_IS_SHARDING:true}consumePartitions:${SW_KAFKA_FETCHER_CONSUME_PARTITIONS:1,3,5}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}kafkaConsumerConfig:enable.auto.commit:true...When using Kafka MirrorMaker 2.0 to replicate topics between Kafka clusters, you can set the source Kafka Cluster alias (mm2SourceAlias) and separator (mm2SourceSeparator) according to your Kafka MirrorMaker config.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}isSharding:${SW_KAFKA_FETCHER_IS_SHARDING:true}consumePartitions:${SW_KAFKA_FETCHER_CONSUME_PARTITIONS:1,3,5}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}mm2SourceAlias:${SW_KAFKA_MM2_SOURCE_ALIAS:\u0026#34;\u0026#34;}mm2SourceSeparator:${SW_KAFKA_MM2_SOURCE_SEPARATOR:\u0026#34;\u0026#34;}kafkaConsumerConfig:enable.auto.commit:true...Other Fetcher Plugins There are other transporter plugins. You can find these plugins from 3rd party repositories.\n  Pulsar Fetcher Plugin\n  RocketMQ Fetcher Plugin\n  ","excerpt":"Kafka Fetcher The Kafka Fetcher pulls messages from the Kafka Broker to learn about what agents have …","ref":"/docs/main/v9.4.0/en/setup/backend/kafka-fetcher/","title":"Kafka Fetcher"},{"body":"Kafka Fetcher The Kafka Fetcher pulls messages from the Kafka Broker to learn about what agents have delivered. Check the agent documentation for details on how to enable the Kafka reporter. Typically, tracing segments, service/instance properties, JVM metrics, and meter system data are supported (depending on the agent implementation). Kafka Fetcher can work with gRPC/HTTP Receivers simultaneously for adopting different transport protocols.\nKafka Fetcher is disabled by default. To enable it, configure it as follows.\nNamespace aims to isolate multi OAP clusters when using the same Kafka cluster. If you set a namespace for Kafka fetcher, the OAP will add a prefix to the topic name. You should also set the namespace in the property named plugin.kafka.namespace in agent.config.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}skywalking-segments, skywalking-metrics, skywalking-profilings, skywalking-managements, skywalking-meters, skywalking-logs and skywalking-logs-json topics are required by kafka-fetcher. If they do not exist, Kafka Fetcher will create them by default. Also, you can create them by yourself before the OAP server starts.\nWhen using the OAP server automatic creation mechanism, you could modify the number of partitions and replications of the topics using the following configurations:\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}isSharding:${SW_KAFKA_FETCHER_IS_SHARDING:false}consumePartitions:${SW_KAFKA_FETCHER_CONSUME_PARTITIONS:\u0026#34;\u0026#34;}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}In the cluster mode, all topics have the same number of partitions. Set \u0026quot;isSharding\u0026quot; to \u0026quot;true\u0026quot; and assign the partitions to consume for the OAP server. Use commas to separate multiple partitions for the OAP server.\nThe Kafka Fetcher allows you to configure all the Kafka producers listed here in property kafkaConsumerConfig. For example:\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}isSharding:${SW_KAFKA_FETCHER_IS_SHARDING:true}consumePartitions:${SW_KAFKA_FETCHER_CONSUME_PARTITIONS:1,3,5}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}kafkaConsumerConfig:enable.auto.commit:true...When using Kafka MirrorMaker 2.0 to replicate topics between Kafka clusters, you can set the source Kafka Cluster alias (mm2SourceAlias) and separator (mm2SourceSeparator) according to your Kafka MirrorMaker config.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}isSharding:${SW_KAFKA_FETCHER_IS_SHARDING:true}consumePartitions:${SW_KAFKA_FETCHER_CONSUME_PARTITIONS:1,3,5}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}mm2SourceAlias:${SW_KAFKA_MM2_SOURCE_ALIAS:\u0026#34;\u0026#34;}mm2SourceSeparator:${SW_KAFKA_MM2_SOURCE_SEPARATOR:\u0026#34;\u0026#34;}kafkaConsumerConfig:enable.auto.commit:true...Other Fetcher Plugins There are other transporter plugins. You can find these plugins from 3rd party repositories.\n  Pulsar Fetcher Plugin\n  RocketMQ Fetcher Plugin\n  ","excerpt":"Kafka Fetcher The Kafka Fetcher pulls messages from the Kafka Broker to learn about what agents have …","ref":"/docs/main/v9.5.0/en/setup/backend/kafka-fetcher/","title":"Kafka Fetcher"},{"body":"Kafka Fetcher The Kafka Fetcher pulls messages from the Kafka Broker to learn about what agents have delivered. Check the agent documentation for details on how to enable the Kafka reporter. Typically, tracing segments, service/instance properties, JVM metrics, and meter system data are supported (depending on the agent implementation). Kafka Fetcher can work with gRPC/HTTP Receivers simultaneously for adopting different transport protocols.\nKafka Fetcher is disabled by default. To enable it, configure it as follows.\nNamespace aims to isolate multi OAP clusters when using the same Kafka cluster. If you set a namespace for Kafka fetcher, the OAP will add a prefix to the topic name. You should also set the namespace in the property named plugin.kafka.namespace in agent.config.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}skywalking-segments, skywalking-metrics, skywalking-profilings, skywalking-managements, skywalking-meters, skywalking-logs and skywalking-logs-json topics are required by kafka-fetcher. If they do not exist, Kafka Fetcher will create them by default. Also, you can create them by yourself before the OAP server starts.\nWhen using the OAP server automatic creation mechanism, you could modify the number of partitions and replications of the topics using the following configurations:\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}When using Kafka MirrorMaker 2.0 to replicate topics between Kafka clusters, you can set the source Kafka Cluster alias (mm2SourceAlias) and separator (mm2SourceSeparator) according to your Kafka MirrorMaker config.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}mm2SourceAlias:${SW_KAFKA_MM2_SOURCE_ALIAS:\u0026#34;\u0026#34;}mm2SourceSeparator:${SW_KAFKA_MM2_SOURCE_SEPARATOR:\u0026#34;\u0026#34;}kafkaConsumerConfig:enable.auto.commit:true...","excerpt":"Kafka Fetcher The Kafka Fetcher pulls messages from the Kafka Broker to learn about what agents have …","ref":"/docs/main/v9.6.0/en/setup/backend/kafka-fetcher/","title":"Kafka Fetcher"},{"body":"Kafka Fetcher The Kafka Fetcher pulls messages from the Kafka Broker to learn about what agents have delivered. Check the agent documentation for details on how to enable the Kafka reporter. Typically, tracing segments, service/instance properties, JVM metrics, and meter system data are supported (depending on the agent implementation). Kafka Fetcher can work with gRPC/HTTP Receivers simultaneously for adopting different transport protocols.\nKafka Fetcher is disabled by default. To enable it, configure it as follows.\nNamespace aims to isolate multi OAP clusters when using the same Kafka cluster. If you set a namespace for Kafka fetcher, the OAP will add a prefix to the topic name. You should also set the namespace in the property named plugin.kafka.namespace in agent.config.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}skywalking-segments, skywalking-metrics, skywalking-profilings, skywalking-managements, skywalking-meters, skywalking-logs and skywalking-logs-json topics are required by kafka-fetcher. If they do not exist, Kafka Fetcher will create them by default. Also, you can create them by yourself before the OAP server starts.\nWhen using the OAP server automatic creation mechanism, you could modify the number of partitions and replications of the topics using the following configurations:\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}When using Kafka MirrorMaker 2.0 to replicate topics between Kafka clusters, you can set the source Kafka Cluster alias (mm2SourceAlias) and separator (mm2SourceSeparator) according to your Kafka MirrorMaker config.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}mm2SourceAlias:${SW_KAFKA_MM2_SOURCE_ALIAS:\u0026#34;\u0026#34;}mm2SourceSeparator:${SW_KAFKA_MM2_SOURCE_SEPARATOR:\u0026#34;\u0026#34;}kafkaConsumerConfig:enable.auto.commit:true...","excerpt":"Kafka Fetcher The Kafka Fetcher pulls messages from the Kafka Broker to learn about what agents have …","ref":"/docs/main/v9.7.0/en/setup/backend/kafka-fetcher/","title":"Kafka Fetcher"},{"body":"Kafka monitoring SkyWalking leverages Prometheus JMX Exporter to collect metrics data from the Kafka and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. Kafka entity as a Service in OAP and on the Layer: KAFKA.\nData flow  The prometheus_JMX_Exporter collect metrics data from Kafka. Note: Running the exporter as a Java agent. OpenTelemetry Collector fetches metrics from prometheus_JMX_Exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup prometheus_JMX_Exporter. This is an example for JMX Exporter configuration kafka-2_0_0.yml. Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  Kafka Monitoring Kafka monitoring provides multidimensional metrics monitoring of Kafka cluster as Layer: KAFKA Service in the OAP. In each cluster, the kafka brokers are represented as Instance.\nKafka Cluster Supported Metrics    Monitoring Panel Metric Name Description Data Source     Under-Replicated Partitions meter_kafka_under_replicated_partitions Number of under-replicated partitions in the broker. A higher number is a sign of potential issues. Prometheus JMX Exporter   Offline Partitions Count meter_kafka_offline_partitions_count Number of partitions that are offline. Non-zero values indicate a problem. Prometheus JMX Exporter   Partition Count meter_kafka_partition_count Total number of partitions on the broker. Prometheus JMX Exporter   Leader Count meter_kafka_leader_count Number of leader partitions on this broker. Prometheus JMX Exporter   Active Controller Count meter_kafka_active_controller_count The number of active controllers in the cluster. Typically should be 1. Prometheus JMX Exporter   Leader Election Rate meter_kafka_leader_election_rate The rate of leader elections per minute. High rate could be a sign of instability. Prometheus JMX Exporter   Unclean Leader Elections Per Second meter_kafka_unclean_leader_elections_per_second The rate of unclean leader elections per second. Non-zero values indicate a serious problem. Prometheus JMX Exporter   Max Lag meter_kafka_max_lag The maximum lag between the leader and followers in terms of messages still needed to be sent. Higher lag indicates delays. Prometheus JMX Exporter    Kafka Broker Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     CPU Usage % meter_kafka_broker_cpu_time_total CPU usage in percentage Prometheus JMX Exporter   Memory Usage % meter_kafka_broker_memory_usage_percentage JVM heap memory usage in percentage Prometheus JMX Exporter   Incoming Messages Msg/sec meter_kafka_broker_messages_per_second Rate of incoming messages Prometheus JMX Exporter   Bytes In Bytes/sec meter_kafka_broker_bytes_in_per_second Rate of incoming bytes Prometheus JMX Exporter   Bytes Out Bytes/sec meter_kafka_broker_bytes_out_per_second Rate of outgoing bytes Prometheus JMX Exporter   Replication Bytes In Bytes/sec meter_kafka_broker_replication_bytes_in_per_second Rate of incoming bytes for replication Prometheus JMX Exporter   Replication Bytes Out Bytes/sec meter_kafka_broker_replication_bytes_out_per_second Rate of outgoing bytes for replication Prometheus JMX Exporter   Under-Replicated Partitions Count meter_kafka_broker_under_replicated_partitions Number of under-replicated partitions Prometheus JMX Exporter   Under Min ISR Partition Count Count meter_kafka_broker_under_min_isr_partition_count Number of partitions below the minimum ISR (In-Sync Replicas) Prometheus JMX Exporter   Partition Count Count meter_kafka_broker_partition_count Total number of partitions Prometheus JMX Exporter   Leader Count Count meter_kafka_broker_leader_count Number of partitions for which this broker is the leader Prometheus JMX Exporter   ISR Shrinks Count/sec meter_kafka_broker_isr_shrinks_per_second Rate of ISR (In-Sync Replicas) shrinking Prometheus JMX Exporter   ISR Expands Count/sec meter_kafka_broker_isr_expands_per_second Rate of ISR (In-Sync Replicas) expanding Prometheus JMX Exporter   Max Lag Count meter_kafka_broker_max_lag Maximum lag between the leader and follower for a partition Prometheus JMX Exporter   Purgatory Size Count meter_kafka_broker_purgatory_size Size of purgatory for Produce and Fetch operations Prometheus JMX Exporter   Garbage Collector Count Count/sec meter_kafka_broker_garbage_collector_count Rate of garbage collection cycles Prometheus JMX Exporter   Requests Per Second Req/sec meter_kafka_broker_requests_per_second Rate of requests to the broker Prometheus JMX Exporter   Request Queue Time ms meter_kafka_broker_request_queue_time_ms Average time a request spends in the request queue Prometheus JMX Exporter   Remote Time ms meter_kafka_broker_remote_time_ms Average time taken for a remote operation Prometheus JMX Exporter   Response Queue Time ms meter_kafka_broker_response_queue_time_ms Average time a response spends in the response queue Prometheus JMX Exporter   Response Send Time ms meter_kafka_broker_response_send_time_ms Average time taken to send a response Prometheus JMX Exporter   Network Processor Avg Idle % meter_kafka_broker_network_processor_avg_idle_percent Percentage of idle time for the network processor Prometheus JMX Exporter   Topic Messages In Total Count meter_kafka_broker_topic_messages_in_total Total number of messages per topic Prometheus JMX Exporter   Topic Bytes Out Per Second Bytes/sec meter_kafka_broker_topic_bytesout_per_second Rate of outgoing bytes per topic Prometheus JMX Exporter   Topic Bytes In Per Second Bytes/sec meter_kafka_broker_topic_bytesin_per_second Rate of incoming bytes per topic Prometheus JMX Exporter   Topic Fetch Requests Per Second Req/sec meter_kafka_broker_topic_fetch_requests_per_second Rate of fetch requests per topic Prometheus JMX Exporter   Topic Produce Requests Per Second Req/sec meter_kafka_broker_topic_produce_requests_per_second Rate of produce requests per topic Prometheus JMX Exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/kafka/kafka-cluster.yaml, /config/otel-rules/kafka/kafka-node.yaml. The Kafka dashboard panel configurations are found in /config/ui-initialized-templates/kafka.\nReference For more details on monitoring Kafka and the metrics to focus on, see the following articles:\n Monitoring Kafka Streams Applications Kafka Monitoring  ","excerpt":"Kafka monitoring SkyWalking leverages Prometheus JMX Exporter to collect metrics data from the Kafka …","ref":"/docs/main/latest/en/setup/backend/backend-kafka-monitoring/","title":"Kafka monitoring"},{"body":"Kafka monitoring SkyWalking leverages Prometheus JMX Exporter to collect metrics data from the Kafka and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. Kafka entity as a Service in OAP and on the Layer: KAFKA.\nData flow  The prometheus_JMX_Exporter collect metrics data from Kafka. Note: Running the exporter as a Java agent. OpenTelemetry Collector fetches metrics from prometheus_JMX_Exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup prometheus_JMX_Exporter. This is an example for JMX Exporter configuration kafka-2_0_0.yml. Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  Kafka Monitoring Kafka monitoring provides multidimensional metrics monitoring of Kafka cluster as Layer: KAFKA Service in the OAP. In each cluster, the kafka brokers are represented as Instance.\nKafka Cluster Supported Metrics    Monitoring Panel Metric Name Description Data Source     Under-Replicated Partitions meter_kafka_under_replicated_partitions Number of under-replicated partitions in the broker. A higher number is a sign of potential issues. Prometheus JMX Exporter   Offline Partitions Count meter_kafka_offline_partitions_count Number of partitions that are offline. Non-zero values indicate a problem. Prometheus JMX Exporter   Partition Count meter_kafka_partition_count Total number of partitions on the broker. Prometheus JMX Exporter   Leader Count meter_kafka_leader_count Number of leader partitions on this broker. Prometheus JMX Exporter   Active Controller Count meter_kafka_active_controller_count The number of active controllers in the cluster. Typically should be 1. Prometheus JMX Exporter   Leader Election Rate meter_kafka_leader_election_rate The rate of leader elections per minute. High rate could be a sign of instability. Prometheus JMX Exporter   Unclean Leader Elections Per Second meter_kafka_unclean_leader_elections_per_second The rate of unclean leader elections per second. Non-zero values indicate a serious problem. Prometheus JMX Exporter   Max Lag meter_kafka_max_lag The maximum lag between the leader and followers in terms of messages still needed to be sent. Higher lag indicates delays. Prometheus JMX Exporter    Kafka Broker Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     CPU Usage % meter_kafka_broker_cpu_time_total CPU usage in percentage Prometheus JMX Exporter   Memory Usage % meter_kafka_broker_memory_usage_percentage JVM heap memory usage in percentage Prometheus JMX Exporter   Incoming Messages Msg/sec meter_kafka_broker_messages_per_second Rate of incoming messages Prometheus JMX Exporter   Bytes In Bytes/sec meter_kafka_broker_bytes_in_per_second Rate of incoming bytes Prometheus JMX Exporter   Bytes Out Bytes/sec meter_kafka_broker_bytes_out_per_second Rate of outgoing bytes Prometheus JMX Exporter   Replication Bytes In Bytes/sec meter_kafka_broker_replication_bytes_in_per_second Rate of incoming bytes for replication Prometheus JMX Exporter   Replication Bytes Out Bytes/sec meter_kafka_broker_replication_bytes_out_per_second Rate of outgoing bytes for replication Prometheus JMX Exporter   Under-Replicated Partitions Count meter_kafka_broker_under_replicated_partitions Number of under-replicated partitions Prometheus JMX Exporter   Under Min ISR Partition Count Count meter_kafka_broker_under_min_isr_partition_count Number of partitions below the minimum ISR (In-Sync Replicas) Prometheus JMX Exporter   Partition Count Count meter_kafka_broker_partition_count Total number of partitions Prometheus JMX Exporter   Leader Count Count meter_kafka_broker_leader_count Number of partitions for which this broker is the leader Prometheus JMX Exporter   ISR Shrinks Count/sec meter_kafka_broker_isr_shrinks_per_second Rate of ISR (In-Sync Replicas) shrinking Prometheus JMX Exporter   ISR Expands Count/sec meter_kafka_broker_isr_expands_per_second Rate of ISR (In-Sync Replicas) expanding Prometheus JMX Exporter   Max Lag Count meter_kafka_broker_max_lag Maximum lag between the leader and follower for a partition Prometheus JMX Exporter   Purgatory Size Count meter_kafka_broker_purgatory_size Size of purgatory for Produce and Fetch operations Prometheus JMX Exporter   Garbage Collector Count Count/sec meter_kafka_broker_garbage_collector_count Rate of garbage collection cycles Prometheus JMX Exporter   Requests Per Second Req/sec meter_kafka_broker_requests_per_second Rate of requests to the broker Prometheus JMX Exporter   Request Queue Time ms meter_kafka_broker_request_queue_time_ms Average time a request spends in the request queue Prometheus JMX Exporter   Remote Time ms meter_kafka_broker_remote_time_ms Average time taken for a remote operation Prometheus JMX Exporter   Response Queue Time ms meter_kafka_broker_response_queue_time_ms Average time a response spends in the response queue Prometheus JMX Exporter   Response Send Time ms meter_kafka_broker_response_send_time_ms Average time taken to send a response Prometheus JMX Exporter   Network Processor Avg Idle % meter_kafka_broker_network_processor_avg_idle_percent Percentage of idle time for the network processor Prometheus JMX Exporter   Topic Messages In Total Count meter_kafka_broker_topic_messages_in_total Total number of messages per topic Prometheus JMX Exporter   Topic Bytes Out Per Second Bytes/sec meter_kafka_broker_topic_bytesout_per_second Rate of outgoing bytes per topic Prometheus JMX Exporter   Topic Bytes In Per Second Bytes/sec meter_kafka_broker_topic_bytesin_per_second Rate of incoming bytes per topic Prometheus JMX Exporter   Topic Fetch Requests Per Second Req/sec meter_kafka_broker_topic_fetch_requests_per_second Rate of fetch requests per topic Prometheus JMX Exporter   Topic Produce Requests Per Second Req/sec meter_kafka_broker_topic_produce_requests_per_second Rate of produce requests per topic Prometheus JMX Exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/kafka/kafka-cluster.yaml, /config/otel-rules/kafka/kafka-node.yaml. The Kafka dashboard panel configurations are found in /config/ui-initialized-templates/kafka.\nReference For more details on monitoring Kafka and the metrics to focus on, see the following articles:\n Monitoring Kafka Streams Applications Kafka Monitoring  ","excerpt":"Kafka monitoring SkyWalking leverages Prometheus JMX Exporter to collect metrics data from the Kafka …","ref":"/docs/main/next/en/setup/backend/backend-kafka-monitoring/","title":"Kafka monitoring"},{"body":"Kafka monitoring SkyWalking leverages Prometheus JMX Exporter to collect metrics data from the Kafka and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. Kafka entity as a Service in OAP and on the Layer: KAFKA.\nData flow  The prometheus_JMX_Exporter collect metrics data from Kafka. Note: Running the exporter as a Java agent. OpenTelemetry Collector fetches metrics from prometheus_JMX_Exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup prometheus_JMX_Exporter. This is an example for JMX Exporter configuration kafka-2_0_0.yml. Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  Kafka Monitoring Kafka monitoring provides multidimensional metrics monitoring of Kafka cluster as Layer: KAFKA Service in the OAP. In each cluster, the kafka brokers are represented as Instance.\nKafka Cluster Supported Metrics    Monitoring Panel Metric Name Description Data Source     Under-Replicated Partitions meter_kafka_under_replicated_partitions Number of under-replicated partitions in the broker. A higher number is a sign of potential issues. Prometheus JMX Exporter   Offline Partitions Count meter_kafka_offline_partitions_count Number of partitions that are offline. Non-zero values indicate a problem. Prometheus JMX Exporter   Partition Count meter_kafka_partition_count Total number of partitions on the broker. Prometheus JMX Exporter   Leader Count meter_kafka_leader_count Number of leader partitions on this broker. Prometheus JMX Exporter   Active Controller Count meter_kafka_active_controller_count The number of active controllers in the cluster. Typically should be 1. Prometheus JMX Exporter   Leader Election Rate meter_kafka_leader_election_rate The rate of leader elections per minute. High rate could be a sign of instability. Prometheus JMX Exporter   Unclean Leader Elections Per Second meter_kafka_unclean_leader_elections_per_second The rate of unclean leader elections per second. Non-zero values indicate a serious problem. Prometheus JMX Exporter   Max Lag meter_kafka_max_lag The maximum lag between the leader and followers in terms of messages still needed to be sent. Higher lag indicates delays. Prometheus JMX Exporter    Kafka Broker Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     CPU Usage % meter_kafka_broker_cpu_time_total CPU usage in percentage Prometheus JMX Exporter   Memory Usage % meter_kafka_broker_memory_usage_percentage JVM heap memory usage in percentage Prometheus JMX Exporter   Incoming Messages Msg/sec meter_kafka_broker_messages_per_second Rate of incoming messages Prometheus JMX Exporter   Bytes In Bytes/sec meter_kafka_broker_bytes_in_per_second Rate of incoming bytes Prometheus JMX Exporter   Bytes Out Bytes/sec meter_kafka_broker_bytes_out_per_second Rate of outgoing bytes Prometheus JMX Exporter   Replication Bytes In Bytes/sec meter_kafka_broker_replication_bytes_in_per_second Rate of incoming bytes for replication Prometheus JMX Exporter   Replication Bytes Out Bytes/sec meter_kafka_broker_replication_bytes_out_per_second Rate of outgoing bytes for replication Prometheus JMX Exporter   Under-Replicated Partitions Count meter_kafka_broker_under_replicated_partitions Number of under-replicated partitions Prometheus JMX Exporter   Under Min ISR Partition Count Count meter_kafka_broker_under_min_isr_partition_count Number of partitions below the minimum ISR (In-Sync Replicas) Prometheus JMX Exporter   Partition Count Count meter_kafka_broker_partition_count Total number of partitions Prometheus JMX Exporter   Leader Count Count meter_kafka_broker_leader_count Number of partitions for which this broker is the leader Prometheus JMX Exporter   ISR Shrinks Count/sec meter_kafka_broker_isr_shrinks_per_second Rate of ISR (In-Sync Replicas) shrinking Prometheus JMX Exporter   ISR Expands Count/sec meter_kafka_broker_isr_expands_per_second Rate of ISR (In-Sync Replicas) expanding Prometheus JMX Exporter   Max Lag Count meter_kafka_broker_max_lag Maximum lag between the leader and follower for a partition Prometheus JMX Exporter   Purgatory Size Count meter_kafka_broker_purgatory_size Size of purgatory for Produce and Fetch operations Prometheus JMX Exporter   Garbage Collector Count Count/sec meter_kafka_broker_garbage_collector_count Rate of garbage collection cycles Prometheus JMX Exporter   Requests Per Second Req/sec meter_kafka_broker_requests_per_second Rate of requests to the broker Prometheus JMX Exporter   Request Queue Time ms meter_kafka_broker_request_queue_time_ms Average time a request spends in the request queue Prometheus JMX Exporter   Remote Time ms meter_kafka_broker_remote_time_ms Average time taken for a remote operation Prometheus JMX Exporter   Response Queue Time ms meter_kafka_broker_response_queue_time_ms Average time a response spends in the response queue Prometheus JMX Exporter   Response Send Time ms meter_kafka_broker_response_send_time_ms Average time taken to send a response Prometheus JMX Exporter   Network Processor Avg Idle % meter_kafka_broker_network_processor_avg_idle_percent Percentage of idle time for the network processor Prometheus JMX Exporter   Topic Messages In Total Count meter_kafka_broker_topic_messages_in_total Total number of messages per topic Prometheus JMX Exporter   Topic Bytes Out Per Second Bytes/sec meter_kafka_broker_topic_bytesout_per_second Rate of outgoing bytes per topic Prometheus JMX Exporter   Topic Bytes In Per Second Bytes/sec meter_kafka_broker_topic_bytesin_per_second Rate of incoming bytes per topic Prometheus JMX Exporter   Topic Fetch Requests Per Second Req/sec meter_kafka_broker_topic_fetch_requests_per_second Rate of fetch requests per topic Prometheus JMX Exporter   Topic Produce Requests Per Second Req/sec meter_kafka_broker_topic_produce_requests_per_second Rate of produce requests per topic Prometheus JMX Exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/kafka/kafka-cluster.yaml, /config/otel-rules/kafka/kafka-node.yaml. The Kafka dashboard panel configurations are found in /config/ui-initialized-templates/kafka.\nReference For more details on monitoring Kafka and the metrics to focus on, see the following articles:\n Monitoring Kafka Streams Applications Kafka Monitoring  ","excerpt":"Kafka monitoring SkyWalking leverages Prometheus JMX Exporter to collect metrics data from the Kafka …","ref":"/docs/main/v9.7.0/en/setup/backend/backend-kafka-monitoring/","title":"Kafka monitoring"},{"body":"Kafka Poll And Invoke  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-kafka\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  usage 1.  public class ConsumerThread2 extends Thread { @Override public void run() { Properties consumerProperties = new Properties(); //...consumerProperties.put()  KafkaConsumer\u0026lt;String, String\u0026gt; consumer = new KafkaConsumer\u0026lt;\u0026gt;(consumerProperties); consumer.subscribe(topicPattern, new NoOpConsumerRebalanceListener()); while (true) { if (pollAndInvoke(consumer)) break; } consumer.close(); } @KafkaPollAndInvoke private boolean pollAndInvoke(KafkaConsumer\u0026lt;String, String\u0026gt; consumer) { try { Thread.sleep(1000); } catch (InterruptedException e) { } ConsumerRecords\u0026lt;String, String\u0026gt; records = consumer.poll(100); if (!records.isEmpty()) { OkHttpClient client = new OkHttpClient.Builder().build(); Request request = new Request.Builder().url(\u0026#34;http://localhost:8080/kafka-scenario/case/kafka-thread2-ping\u0026#34;).build(); Response response = null; try { response = client.newCall(request).execute(); } catch (IOException e) { } response.body().close(); return true; } return false; } } Sample codes only\n","excerpt":"Kafka Poll And Invoke  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; …","ref":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/application-toolkit-kafka/","title":"Kafka Poll And Invoke"},{"body":"Kafka Poll And Invoke  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-kafka\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  usage 1.  public class ConsumerThread2 extends Thread { @Override public void run() { Properties consumerProperties = new Properties(); //...consumerProperties.put()  KafkaConsumer\u0026lt;String, String\u0026gt; consumer = new KafkaConsumer\u0026lt;\u0026gt;(consumerProperties); consumer.subscribe(topicPattern, new NoOpConsumerRebalanceListener()); while (true) { if (pollAndInvoke(consumer)) break; } consumer.close(); } @KafkaPollAndInvoke private boolean pollAndInvoke(KafkaConsumer\u0026lt;String, String\u0026gt; consumer) { try { Thread.sleep(1000); } catch (InterruptedException e) { } ConsumerRecords\u0026lt;String, String\u0026gt; records = consumer.poll(100); if (!records.isEmpty()) { OkHttpClient client = new OkHttpClient.Builder().build(); Request request = new Request.Builder().url(\u0026#34;http://localhost:8080/kafka-scenario/case/kafka-thread2-ping\u0026#34;).build(); Response response = null; try { response = client.newCall(request).execute(); } catch (IOException e) { } response.body().close(); return true; } return false; } } Sample codes only\n","excerpt":"Kafka Poll And Invoke  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; …","ref":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-kafka/","title":"Kafka Poll And Invoke"},{"body":"Kafka Poll And Invoke  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-kafka\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  usage 1.  public class ConsumerThread2 extends Thread { @Override public void run() { Properties consumerProperties = new Properties(); //...consumerProperties.put()  KafkaConsumer\u0026lt;String, String\u0026gt; consumer = new KafkaConsumer\u0026lt;\u0026gt;(consumerProperties); consumer.subscribe(topicPattern, new NoOpConsumerRebalanceListener()); while (true) { if (pollAndInvoke(consumer)) break; } consumer.close(); } @KafkaPollAndInvoke private boolean pollAndInvoke(KafkaConsumer\u0026lt;String, String\u0026gt; consumer) { try { Thread.sleep(1000); } catch (InterruptedException e) { } ConsumerRecords\u0026lt;String, String\u0026gt; records = consumer.poll(100); if (!records.isEmpty()) { OkHttpClient client = new OkHttpClient.Builder().build(); Request request = new Request.Builder().url(\u0026#34;http://localhost:8080/kafka-scenario/case/kafka-thread2-ping\u0026#34;).build(); Response response = null; try { response = client.newCall(request).execute(); } catch (IOException e) { } response.body().close(); return true; } return false; } } Sample codes only\n","excerpt":"Kafka Poll And Invoke  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; …","ref":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/application-toolkit-kafka/","title":"Kafka Poll And Invoke"},{"body":"Kafka Poll And Invoke  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-kafka\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  usage 1.  public class ConsumerThread2 extends Thread { @Override public void run() { Properties consumerProperties = new Properties(); //...consumerProperties.put()  KafkaConsumer\u0026lt;String, String\u0026gt; consumer = new KafkaConsumer\u0026lt;\u0026gt;(consumerProperties); consumer.subscribe(topicPattern, new NoOpConsumerRebalanceListener()); while (true) { if (pollAndInvoke(consumer)) break; } consumer.close(); } @KafkaPollAndInvoke private boolean pollAndInvoke(KafkaConsumer\u0026lt;String, String\u0026gt; consumer) { try { Thread.sleep(1000); } catch (InterruptedException e) { } ConsumerRecords\u0026lt;String, String\u0026gt; records = consumer.poll(100); if (!records.isEmpty()) { OkHttpClient client = new OkHttpClient.Builder().build(); Request request = new Request.Builder().url(\u0026#34;http://localhost:8080/kafka-scenario/case/kafka-thread2-ping\u0026#34;).build(); Response response = null; try { response = client.newCall(request).execute(); } catch (IOException e) { } response.body().close(); return true; } return false; } } Sample codes only\n","excerpt":"Kafka Poll And Invoke  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; …","ref":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/application-toolkit-kafka/","title":"Kafka Poll And Invoke"},{"body":"Kafka Poll And Invoke  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-kafka\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  usage 1.  public class ConsumerThread2 extends Thread { @Override public void run() { Properties consumerProperties = new Properties(); //...consumerProperties.put()  KafkaConsumer\u0026lt;String, String\u0026gt; consumer = new KafkaConsumer\u0026lt;\u0026gt;(consumerProperties); consumer.subscribe(topicPattern, new NoOpConsumerRebalanceListener()); while (true) { if (pollAndInvoke(consumer)) break; } consumer.close(); } @KafkaPollAndInvoke private boolean pollAndInvoke(KafkaConsumer\u0026lt;String, String\u0026gt; consumer) { try { Thread.sleep(1000); } catch (InterruptedException e) { } ConsumerRecords\u0026lt;String, String\u0026gt; records = consumer.poll(100); if (!records.isEmpty()) { OkHttpClient client = new OkHttpClient.Builder().build(); Request request = new Request.Builder().url(\u0026#34;http://localhost:8080/kafka-scenario/case/kafka-thread2-ping\u0026#34;).build(); Response response = null; try { response = client.newCall(request).execute(); } catch (IOException e) { } response.body().close(); return true; } return false; } } Sample codes only\n","excerpt":"Kafka Poll And Invoke  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; …","ref":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/application-toolkit-kafka/","title":"Kafka Poll And Invoke"},{"body":"Kafka reporter By default, the configuration option skywalking_agent.reporter_type is grpc, means that the skywalking agent will report the traces, metrics, logs etc. to SkyWalking OAP Server by gPRC protocol.\nAt the same time, SkyWalking also supports kafka-fetcher, so you can report traces, metrics, logs, etc. by kafka.\nBut the skywalking agent does not compile the kafka-reporter feature by default, you need to enable the it.\nSteps   Compile the skywalking agent with feature kafka-reporter.\nFor pecl:\npecl install skywalking_agent Enable the kafka reporter interactively:\n68 source files, building running: phpize Configuring for: PHP Api Version: 20220829 Zend Module Api No: 20220829 Zend Extension Api No: 420220829 enable cargo debug? [no] : enable kafka reporter? [no] : yes Or, build from sources:\nphpize ./configure --enable-kafka-reporter make make install   Config php.ini.\nSwitch to use kafka reporter.\n[skywalking_agent] extension = skywalking_agent.so skywalking_agent.reporter_type = kafka skywalking_agent.kafka_bootstrap_servers = 127.0.0.1:9092,127.0.0.2:9092,127.0.0.3:9092 If you want to custom the kafka reporter properties, you can specify it by JSON format:\nskywalking_agent.kafka_producer_config = {\u0026#34;delivery.timeout.ms\u0026#34;: \u0026#34;12000\u0026#34;}   ","excerpt":"Kafka reporter By default, the configuration option skywalking_agent.reporter_type is grpc, means …","ref":"/docs/skywalking-php/latest/en/reporter/kafka-reporter/","title":"Kafka reporter"},{"body":"Kafka reporter By default, the configuration option skywalking_agent.reporter_type is grpc, means that the skywalking agent will report the traces, metrics, logs etc. to SkyWalking OAP Server by gPRC protocol.\nAt the same time, SkyWalking also supports kafka-fetcher, so you can report traces, metrics, logs, etc. by kafka.\nBut the skywalking agent does not compile the kafka-reporter feature by default, you need to enable the it.\nSteps   Compile the skywalking agent with feature kafka-reporter.\nFor pecl:\npecl install skywalking_agent Enable the kafka reporter interactively:\n68 source files, building running: phpize Configuring for: PHP Api Version: 20220829 Zend Module Api No: 20220829 Zend Extension Api No: 420220829 enable cargo debug? [no] : enable kafka reporter? [no] : yes Or, build from sources:\nphpize ./configure --enable-kafka-reporter make make install   Config php.ini.\nSwitch to use kafka reporter.\n[skywalking_agent] extension = skywalking_agent.so skywalking_agent.reporter_type = kafka skywalking_agent.kafka_bootstrap_servers = 127.0.0.1:9092,127.0.0.2:9092,127.0.0.3:9092 If you want to custom the kafka reporter properties, you can specify it by JSON format:\nskywalking_agent.kafka_producer_config = {\u0026#34;delivery.timeout.ms\u0026#34;: \u0026#34;12000\u0026#34;}   ","excerpt":"Kafka reporter By default, the configuration option skywalking_agent.reporter_type is grpc, means …","ref":"/docs/skywalking-php/next/en/reporter/kafka-reporter/","title":"Kafka reporter"},{"body":"Kafka reporter By default, the configuration option skywalking_agent.reporter_type is grpc, means that the skywalking agent will report the traces, metrics, logs etc. to SkyWalking OAP Server by gPRC protocol.\nAt the same time, SkyWalking also supports kafka-fetcher, so you can report traces, metrics, logs, etc. by kafka.\nBut the skywalking agent does not compile the kafka-reporter feature by default, you need to enable the it.\nSteps   Compile the skywalking agent with feature kafka-reporter.\nFor pecl:\npecl install skywalking_agent Enable the kafka reporter interactively:\n68 source files, building running: phpize Configuring for: PHP Api Version: 20220829 Zend Module Api No: 20220829 Zend Extension Api No: 420220829 enable cargo debug? [no] : enable kafka reporter? [no] : yes Or, build from sources:\nphpize ./configure --enable-kafka-reporter make make install   Config php.ini.\nSwitch to use kafka reporter.\n[skywalking_agent] extension = skywalking_agent.so skywalking_agent.reporter_type = kafka skywalking_agent.kafka_bootstrap_servers = 127.0.0.1:9092,127.0.0.2:9092,127.0.0.3:9092 If you want to custom the kafka reporter properties, you can specify it by JSON format:\nskywalking_agent.kafka_producer_config = {\u0026#34;delivery.timeout.ms\u0026#34;: \u0026#34;12000\u0026#34;}   ","excerpt":"Kafka reporter By default, the configuration option skywalking_agent.reporter_type is grpc, means …","ref":"/docs/skywalking-php/v0.7.0/en/reporter/kafka-reporter/","title":"Kafka reporter"},{"body":"Key Principle Introduce the key technical processes used in the SkyWalking Go Agent, to help the developers and end users understand how the agent works easier.\nMethod Interceptor Method interception is particularly important in SkyWalking Go, as it enables the creation of plugins. In SkyWalking Go, method interception mainly involves the following key points:\n Finding Method: Using AST to find method information in the target code to be enhanced. Modifying Methods: Enhancing the specified methods and embedding interceptor code. Saving and Compiling: Updating the modified files in the compilation arguments.  Finding Method When looking for methods, the SkyWalking Go Agent requires to search according to the provided compilation arguments, which mainly include the following two parts:\n Package information: Based on the package name provided by the arguments, the Agent can find the specific plugin. Go files: When a matching plugin is found, the Agent reads the .go files and uses AST to parse the method information from these source files. When the method information matches the method information required by the plugin for the interception, the agent would consider the method found.  Modifying Methods After finding the method, the SkyWalking Go Agent needs to modify the method implication and embed the interceptor code.\nChange Method Body When intercepting a method, the first thing to do is to modify the method and embed the template code. This code segment includes two method executions:\n Before method execution: Pass in the current method\u0026rsquo;s arguments, instances, and other information. After method execution: Using the defer method, intercept the result parameters after the code execution is completed.  Based on these two methods, the agent can intercept before and after method execution.\nIn order not to affect the line of code execution, this code segment will only be executed in the same line as the first statement in the method. This ensures that when an exception occurs in the framework code execution, the exact location can still be found without being affected by the enhanced code.\nWrite Delegator File After the agent enhances the method body, it needs to implement the above two methods and write them into a single file, called the delegator file. These two methods would do the following:\n Before method execution: Build by the template. Build the context for before and after interception, and pass the parameter information during execution to the interceptor in each plugin. After method execution: Build by the template. Pass the method return value to the interceptor and execute the method.  Copy Files After completing the delegator file, the agent would perform the following copy operations:\n Plugin Code: Copy the Go files containing the interceptors in the plugin to the same level directory as the current framework. Plugin Development API Code: Copy the operation APIs required by the interceptors in the plugin to the same level directory as the current framework, such as tracing.  After copying the files, they cannot be immediately added to the compilation parameters, because they may have the same name as the existing framework code. Therefore, we need to perform some rewriting operations, which include the following parts:\n Types: Rename created structures, interfaces, methods, and other types by adding a unified prefix. Static Methods: Add a prefix to non-instance methods. Static methods do not need to be rewritten since they have already been processed in the types. Variables: Add a prefix to global variables. It\u0026rsquo;s not necessary to add a prefix to variables inside methods because they can ensure no conflicts would arise and are helpful for debugging.  In the Tracing API, we can see several methods, such as:\nvar ( errParameter = operator.NewError(\u0026#34;parameter are nil\u0026#34;) ) func CreateLocalSpan(operationName string, opts ...SpanOption) (s Span, err error) type SpanOption interface { Apply(interface{}) } After performed rewriting operations, they would become:\nvar ( skywalkingOperatorVarTracingerrParameter = skywalkingOperatorStaticMethodOperatorNewError(\u0026#34;parameter are nil\u0026#34;) ) func skywalkingOperatorStaticMethodTracingCreateLocalSpan(operationName string, opts ...skywalkingOperatorTypeTracingSpanOption) (s skywalkingOperatorTypeTracingSpan, err error) type skywalkingOperatorTypeTracingSpanOption interface { Apply(interface{}) } Saving and Compiling After the above steps are completed, the agent needs to save the modified files and add them to the compilation parameters.\nAt this point, when the framework executes the enhanced method, it can have the following capabilities:\n Execute Plugin Code: Custom code can be embedded before and after the method execution, and real-time parameter information can be obtained. Operate Agent: By calling the Agent API, interaction with the Agent Core can be achieved, enabling functions such as distributed tracing.  Propagation Context SkyWalking uses a new and internal mechanism to propagate context(e.g. tracing context) instead of relying on go native context.Context. This reduces the requirement for the target codes.\nContext Propagation between Methods In the agent, it would enhance the g structure in the runtime package. The g structure in Golang represents the internal data of the current goroutine. By enhancing this structure and using the runtime.getg() method, we can obtain the enhanced data in the current structure in real-time.\nEnhancement includes the following steps:\n Add Attributes to g: Add a new field to the g struct, and value as interface{}. Export Methods: Export methods for real-time setting and getting of custom field values in the current goroutine through go:linkname. Import methods: In the Agent Core, import the setting and getting methods for custom fields.  Through these, the agent has a shared context in any place within the same goroutine, similar to Java\u0026rsquo;s Thread Local.\nContext Propagation between Goroutines Besides using g object as the in-goroutine context propagation, SkyWalking builds a mechanism to propagate context between Goroutines.\nWhen a new goroutine is started on an existing goroutine, the runtime.newproc1 method is called to create a new goroutine based on the existing one. The agent would do context-copy from the previous goroutine to the newly created goroutine. The new context in the goroutine only shares limited information to help continues tracing.\nThe specific operation process is as follows:\n Write the copy method: Create a method for copying data from the previous goroutine. Insert code into newproc1: Insert the defer code, intercept the g objects before and after the execution, and call the copy method to assign values to the custom fields' data.  Agent with Dependency Since SkyWalking Go Agent is based on compile-time enhancement, it cannot introduce third-party modules. For example, when SkyWalking Agent communicates with OAP, it needs to exchange data through the gRPC protocol. If the user does not introduce the gRPC module, it cannot be completed.\nDue to resolve this problem, users need to introduce relevant modules to complete the basic dependency functions. This is why import _ \u0026quot;github.com/apache/skywalking-go\u0026quot; is required. The main key modules that users currently need to introduce include:\n uuid: Used to generate UUIDs, mainly for TraceID generation. errors: To encapsulate error content. gRPC: The basic library used for communication between SkyWalking Go Agent and the Server. skywalking-goapi: The data protocol for communication between Agent and Server in SkyWalking.  Agent Core Copy To simplify the complexity of using Agent, the SkyWalking Go introduced by users only contains the user usage API and code import. The Agent Core code would be dynamically added during hybrid compilation, so when the Agent releases new features, users only need to upgrade the Agent enhancement program without modifying the references in the program.\nCode Import You can see a lot of imports.go files anywhere in the SkyWalking Go, such as imports.go in the root directory, but there is no actual code. This is because, during hybrid compilation, if the code to be compiled references other libraries, such as os, fmt, etc., they need to be referenced through the importcfg file during compilation.\nThe content of the importcfg file is shown below, which specifies the package dependency information required for all Go files to be compiled in the current package path.\npackagefile errors=/var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build2774248373/b006/_pkg_.a packagefile internal/itoa=/var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build2774248373/b027/_pkg_.a packagefile internal/oserror=/var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build2774248373/b035/_pkg_.a So when the file is copied and added to the compilation process, the relevant dependency libraries need to be declared in importcfg. Therefore, by predefining import in the project, the compiler can be forced to introduce the relevant libraries during compilation, thus completing the dynamic enhancement operation.\nPlugin with Agent Core As mentioned in the previous section, it is not possible to dynamically add dependencies between modules. Agent can only modify the importcfg file to reference dependencies if we are sure that the previous dependencies have already been loaded, but this is often impractical. For example, Agent cannot introduce dependencies from the plugin code into the Agent Core, because the plugin is unaware of the Agent\u0026rsquo;s existence. This raises a question: how can agent enable communication between plugins and Agent Core?\nCurrently, agent employ the following method: a global object is introduced in the runtime package, provided by Agent Core. When a plugin needs to interact with Agent Core, it simply searches for this global object from runtime package. The specific steps are as follows:\n Global object definition: Add a global variable when the runtime package is loaded and provide corresponding set and get methods. Set the variable when the Agent loads: When the Agent Core is copied and enhanced, import the method for setting the global variable and initialize the object in the global variable. Plugins: When the plugin is built, import the methods for reading the global variables and APIs. At this point, we can access the object set in Agent Core and use the defined interface for the plugin to access methods in Agent Core.  Limitation Since the communication between the plugin API and Agent Core is through an interface, and the plugin API is copied in each plugin, they can only transfer basic data types or any(interface{}) type. The reason is that when additional types are transferred, agent would be copied multiple times, so the types transferred in the plugin are not consistent with the types in Agent Core, as the types also need to be defined multiple times.\nTherefore, when communicating, they only pass structured data through any type, and when the Agent Core or plugin obtains the data, a type cast is simply required.\nDebugging Based on the introductions in the previous sections, both Agent Core and plugin code are dynamically copied/modified into the target package. So, how can we debug the program during development to identify issues?\nOur current approach consists of the following steps:\n Inform the source code location during flag: Enhance the debug parameters during compilation and inform the system path, for example: -toolexec \u0026quot;/path/to/agent -debug /path/to/code\u0026quot; Get the original file path: Find the absolute location of the source code of the file to be copied based on the rules. Introduce the //line directive: Add the //line directive to the copied target file to inform the compiler of the location of the original file after copying.  At this point, when the program is executed, developer can find the original file to be copied in the source code.\n","excerpt":"Key Principle Introduce the key technical processes used in the SkyWalking Go Agent, to help the …","ref":"/docs/skywalking-go/latest/en/concepts-and-designs/key-principles/","title":"Key Principle"},{"body":"Key Principle Introduce the key technical processes used in the SkyWalking Go Agent, to help the developers and end users understand how the agent works easier.\nMethod Interceptor Method interception is particularly important in SkyWalking Go, as it enables the creation of plugins. In SkyWalking Go, method interception mainly involves the following key points:\n Finding Method: Using AST to find method information in the target code to be enhanced. Modifying Methods: Enhancing the specified methods and embedding interceptor code. Saving and Compiling: Updating the modified files in the compilation arguments.  Finding Method When looking for methods, the SkyWalking Go Agent requires to search according to the provided compilation arguments, which mainly include the following two parts:\n Package information: Based on the package name provided by the arguments, the Agent can find the specific plugin. Go files: When a matching plugin is found, the Agent reads the .go files and uses AST to parse the method information from these source files. When the method information matches the method information required by the plugin for the interception, the agent would consider the method found.  Modifying Methods After finding the method, the SkyWalking Go Agent needs to modify the method implication and embed the interceptor code.\nChange Method Body When intercepting a method, the first thing to do is to modify the method and embed the template code. This code segment includes two method executions:\n Before method execution: Pass in the current method\u0026rsquo;s arguments, instances, and other information. After method execution: Using the defer method, intercept the result parameters after the code execution is completed.  Based on these two methods, the agent can intercept before and after method execution.\nIn order not to affect the line of code execution, this code segment will only be executed in the same line as the first statement in the method. This ensures that when an exception occurs in the framework code execution, the exact location can still be found without being affected by the enhanced code.\nWrite Delegator File After the agent enhances the method body, it needs to implement the above two methods and write them into a single file, called the delegator file. These two methods would do the following:\n Before method execution: Build by the template. Build the context for before and after interception, and pass the parameter information during execution to the interceptor in each plugin. After method execution: Build by the template. Pass the method return value to the interceptor and execute the method.  Copy Files After completing the delegator file, the agent would perform the following copy operations:\n Plugin Code: Copy the Go files containing the interceptors in the plugin to the same level directory as the current framework. Plugin Development API Code: Copy the operation APIs required by the interceptors in the plugin to the same level directory as the current framework, such as tracing.  After copying the files, they cannot be immediately added to the compilation parameters, because they may have the same name as the existing framework code. Therefore, we need to perform some rewriting operations, which include the following parts:\n Types: Rename created structures, interfaces, methods, and other types by adding a unified prefix. Static Methods: Add a prefix to non-instance methods. Static methods do not need to be rewritten since they have already been processed in the types. Variables: Add a prefix to global variables. It\u0026rsquo;s not necessary to add a prefix to variables inside methods because they can ensure no conflicts would arise and are helpful for debugging.  In the Tracing API, we can see several methods, such as:\nvar ( errParameter = operator.NewError(\u0026#34;parameter are nil\u0026#34;) ) func CreateLocalSpan(operationName string, opts ...SpanOption) (s Span, err error) type SpanOption interface { Apply(interface{}) } After performed rewriting operations, they would become:\nvar ( skywalkingOperatorVarTracingerrParameter = skywalkingOperatorStaticMethodOperatorNewError(\u0026#34;parameter are nil\u0026#34;) ) func skywalkingOperatorStaticMethodTracingCreateLocalSpan(operationName string, opts ...skywalkingOperatorTypeTracingSpanOption) (s skywalkingOperatorTypeTracingSpan, err error) type skywalkingOperatorTypeTracingSpanOption interface { Apply(interface{}) } Saving and Compiling After the above steps are completed, the agent needs to save the modified files and add them to the compilation parameters.\nAt this point, when the framework executes the enhanced method, it can have the following capabilities:\n Execute Plugin Code: Custom code can be embedded before and after the method execution, and real-time parameter information can be obtained. Operate Agent: By calling the Agent API, interaction with the Agent Core can be achieved, enabling functions such as distributed tracing.  Propagation Context SkyWalking uses a new and internal mechanism to propagate context(e.g. tracing context) instead of relying on go native context.Context. This reduces the requirement for the target codes.\nContext Propagation between Methods In the agent, it would enhance the g structure in the runtime package. The g structure in Golang represents the internal data of the current goroutine. By enhancing this structure and using the runtime.getg() method, we can obtain the enhanced data in the current structure in real-time.\nEnhancement includes the following steps:\n Add Attributes to g: Add a new field to the g struct, and value as interface{}. Export Methods: Export methods for real-time setting and getting of custom field values in the current goroutine through go:linkname. Import methods: In the Agent Core, import the setting and getting methods for custom fields.  Through these, the agent has a shared context in any place within the same goroutine, similar to Java\u0026rsquo;s Thread Local.\nContext Propagation between Goroutines Besides using g object as the in-goroutine context propagation, SkyWalking builds a mechanism to propagate context between Goroutines.\nWhen a new goroutine is started on an existing goroutine, the runtime.newproc1 method is called to create a new goroutine based on the existing one. The agent would do context-copy from the previous goroutine to the newly created goroutine. The new context in the goroutine only shares limited information to help continues tracing.\nThe specific operation process is as follows:\n Write the copy method: Create a method for copying data from the previous goroutine. Insert code into newproc1: Insert the defer code, intercept the g objects before and after the execution, and call the copy method to assign values to the custom fields' data.  Agent with Dependency Since SkyWalking Go Agent is based on compile-time enhancement, it cannot introduce third-party modules. For example, when SkyWalking Agent communicates with OAP, it needs to exchange data through the gRPC protocol. If the user does not introduce the gRPC module, it cannot be completed.\nDue to resolve this problem, users need to introduce relevant modules to complete the basic dependency functions. This is why import _ \u0026quot;github.com/apache/skywalking-go\u0026quot; is required. The main key modules that users currently need to introduce include:\n uuid: Used to generate UUIDs, mainly for TraceID generation. errors: To encapsulate error content. gRPC: The basic library used for communication between SkyWalking Go Agent and the Server. skywalking-goapi: The data protocol for communication between Agent and Server in SkyWalking.  Agent Core Copy To simplify the complexity of using Agent, the SkyWalking Go introduced by users only contains the user usage API and code import. The Agent Core code would be dynamically added during hybrid compilation, so when the Agent releases new features, users only need to upgrade the Agent enhancement program without modifying the references in the program.\nCode Import You can see a lot of imports.go files anywhere in the SkyWalking Go, such as imports.go in the root directory, but there is no actual code. This is because, during hybrid compilation, if the code to be compiled references other libraries, such as os, fmt, etc., they need to be referenced through the importcfg file during compilation.\nThe content of the importcfg file is shown below, which specifies the package dependency information required for all Go files to be compiled in the current package path.\npackagefile errors=/var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build2774248373/b006/_pkg_.a packagefile internal/itoa=/var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build2774248373/b027/_pkg_.a packagefile internal/oserror=/var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build2774248373/b035/_pkg_.a So when the file is copied and added to the compilation process, the relevant dependency libraries need to be declared in importcfg. Therefore, by predefining import in the project, the compiler can be forced to introduce the relevant libraries during compilation, thus completing the dynamic enhancement operation.\nPlugin with Agent Core As mentioned in the previous section, it is not possible to dynamically add dependencies between modules. Agent can only modify the importcfg file to reference dependencies if we are sure that the previous dependencies have already been loaded, but this is often impractical. For example, Agent cannot introduce dependencies from the plugin code into the Agent Core, because the plugin is unaware of the Agent\u0026rsquo;s existence. This raises a question: how can agent enable communication between plugins and Agent Core?\nCurrently, agent employ the following method: a global object is introduced in the runtime package, provided by Agent Core. When a plugin needs to interact with Agent Core, it simply searches for this global object from runtime package. The specific steps are as follows:\n Global object definition: Add a global variable when the runtime package is loaded and provide corresponding set and get methods. Set the variable when the Agent loads: When the Agent Core is copied and enhanced, import the method for setting the global variable and initialize the object in the global variable. Plugins: When the plugin is built, import the methods for reading the global variables and APIs. At this point, we can access the object set in Agent Core and use the defined interface for the plugin to access methods in Agent Core.  Limitation Since the communication between the plugin API and Agent Core is through an interface, and the plugin API is copied in each plugin, they can only transfer basic data types or any(interface{}) type. The reason is that when additional types are transferred, agent would be copied multiple times, so the types transferred in the plugin are not consistent with the types in Agent Core, as the types also need to be defined multiple times.\nTherefore, when communicating, they only pass structured data through any type, and when the Agent Core or plugin obtains the data, a type cast is simply required.\nDebugging Based on the introductions in the previous sections, both Agent Core and plugin code are dynamically copied/modified into the target package. So, how can we debug the program during development to identify issues?\nOur current approach consists of the following steps:\n Inform the source code location during flag: Enhance the debug parameters during compilation and inform the system path, for example: -toolexec \u0026quot;/path/to/agent -debug /path/to/code\u0026quot; Get the original file path: Find the absolute location of the source code of the file to be copied based on the rules. Introduce the //line directive: Add the //line directive to the copied target file to inform the compiler of the location of the original file after copying.  At this point, when the program is executed, developer can find the original file to be copied in the source code.\n","excerpt":"Key Principle Introduce the key technical processes used in the SkyWalking Go Agent, to help the …","ref":"/docs/skywalking-go/next/en/concepts-and-designs/key-principles/","title":"Key Principle"},{"body":"Key Principle Introduce the key technical processes used in the SkyWalking Go Agent, to help the developers and end users understand how the agent works easier.\nMethod Interceptor Method interception is particularly important in SkyWalking Go, as it enables the creation of plugins. In SkyWalking Go, method interception mainly involves the following key points:\n Finding Method: Using AST to find method information in the target code to be enhanced. Modifying Methods: Enhancing the specified methods and embedding interceptor code. Saving and Compiling: Updating the modified files in the compilation arguments.  Finding Method When looking for methods, the SkyWalking Go Agent requires to search according to the provided compilation arguments, which mainly include the following two parts:\n Package information: Based on the package name provided by the arguments, the Agent can find the specific plugin. Go files: When a matching plugin is found, the Agent reads the .go files and uses AST to parse the method information from these source files. When the method information matches the method information required by the plugin for the interception, the agent would consider the method found.  Modifying Methods After finding the method, the SkyWalking Go Agent needs to modify the method implication and embed the interceptor code.\nChange Method Body When intercepting a method, the first thing to do is to modify the method and embed the template code. This code segment includes two method executions:\n Before method execution: Pass in the current method\u0026rsquo;s arguments, instances, and other information. After method execution: Using the defer method, intercept the result parameters after the code execution is completed.  Based on these two methods, the agent can intercept before and after method execution.\nIn order not to affect the line of code execution, this code segment will only be executed in the same line as the first statement in the method. This ensures that when an exception occurs in the framework code execution, the exact location can still be found without being affected by the enhanced code.\nWrite Delegator File After the agent enhances the method body, it needs to implement the above two methods and write them into a single file, called the delegator file. These two methods would do the following:\n Before method execution: Build by the template. Build the context for before and after interception, and pass the parameter information during execution to the interceptor in each plugin. After method execution: Build by the template. Pass the method return value to the interceptor and execute the method.  Copy Files After completing the delegator file, the agent would perform the following copy operations:\n Plugin Code: Copy the Go files containing the interceptors in the plugin to the same level directory as the current framework. Plugin Development API Code: Copy the operation APIs required by the interceptors in the plugin to the same level directory as the current framework, such as tracing.  After copying the files, they cannot be immediately added to the compilation parameters, because they may have the same name as the existing framework code. Therefore, we need to perform some rewriting operations, which include the following parts:\n Types: Rename created structures, interfaces, methods, and other types by adding a unified prefix. Static Methods: Add a prefix to non-instance methods. Static methods do not need to be rewritten since they have already been processed in the types. Variables: Add a prefix to global variables. It\u0026rsquo;s not necessary to add a prefix to variables inside methods because they can ensure no conflicts would arise and are helpful for debugging.  In the Tracing API, we can see several methods, such as:\nvar ( errParameter = operator.NewError(\u0026#34;parameter are nil\u0026#34;) ) func CreateLocalSpan(operationName string, opts ...SpanOption) (s Span, err error) type SpanOption interface { Apply(interface{}) } After performed rewriting operations, they would become:\nvar ( skywalkingOperatorVarTracingerrParameter = skywalkingOperatorStaticMethodOperatorNewError(\u0026#34;parameter are nil\u0026#34;) ) func skywalkingOperatorStaticMethodTracingCreateLocalSpan(operationName string, opts ...skywalkingOperatorTypeTracingSpanOption) (s skywalkingOperatorTypeTracingSpan, err error) type skywalkingOperatorTypeTracingSpanOption interface { Apply(interface{}) } Saving and Compiling After the above steps are completed, the agent needs to save the modified files and add them to the compilation parameters.\nAt this point, when the framework executes the enhanced method, it can have the following capabilities:\n Execute Plugin Code: Custom code can be embedded before and after the method execution, and real-time parameter information can be obtained. Operate Agent: By calling the Agent API, interaction with the Agent Core can be achieved, enabling functions such as distributed tracing.  Propagation Context SkyWalking uses a new and internal mechanism to propagate context(e.g. tracing context) instead of relying on go native context.Context. This reduces the requirement for the target codes.\nContext Propagation between Methods In the agent, it would enhance the g structure in the runtime package. The g structure in Golang represents the internal data of the current goroutine. By enhancing this structure and using the runtime.getg() method, we can obtain the enhanced data in the current structure in real-time.\nEnhancement includes the following steps:\n Add Attributes to g: Add a new field to the g struct, and value as interface{}. Export Methods: Export methods for real-time setting and getting of custom field values in the current goroutine through go:linkname. Import methods: In the Agent Core, import the setting and getting methods for custom fields.  Through these, the agent has a shared context in any place within the same goroutine, similar to Java\u0026rsquo;s Thread Local.\nContext Propagation between Goroutines Besides using g object as the in-goroutine context propagation, SkyWalking builds a mechanism to propagate context between Goroutines.\nWhen a new goroutine is started on an existing goroutine, the runtime.newproc1 method is called to create a new goroutine based on the existing one. The agent would do context-copy from the previous goroutine to the newly created goroutine. The new context in the goroutine only shares limited information to help continues tracing.\nThe specific operation process is as follows:\n Write the copy method: Create a method for copying data from the previous goroutine. Insert code into newproc1: Insert the defer code, intercept the g objects before and after the execution, and call the copy method to assign values to the custom fields' data.  Agent with Dependency Since SkyWalking Go Agent is based on compile-time enhancement, it cannot introduce third-party modules. For example, when SkyWalking Agent communicates with OAP, it needs to exchange data through the gRPC protocol. If the user does not introduce the gRPC module, it cannot be completed.\nDue to resolve this problem, users need to introduce relevant modules to complete the basic dependency functions. This is why import _ \u0026quot;github.com/apache/skywalking-go\u0026quot; is required. The main key modules that users currently need to introduce include:\n uuid: Used to generate UUIDs, mainly for TraceID generation. errors: To encapsulate error content. gRPC: The basic library used for communication between SkyWalking Go Agent and the Server. skywalking-goapi: The data protocol for communication between Agent and Server in SkyWalking.  Agent Core Copy To simplify the complexity of using Agent, the SkyWalking Go introduced by users only contains the user usage API and code import. The Agent Core code would be dynamically added during hybrid compilation, so when the Agent releases new features, users only need to upgrade the Agent enhancement program without modifying the references in the program.\nCode Import You can see a lot of imports.go files anywhere in the SkyWalking Go, such as imports.go in the root directory, but there is no actual code. This is because, during hybrid compilation, if the code to be compiled references other libraries, such as os, fmt, etc., they need to be referenced through the importcfg file during compilation.\nThe content of the importcfg file is shown below, which specifies the package dependency information required for all Go files to be compiled in the current package path.\npackagefile errors=/var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build2774248373/b006/_pkg_.a packagefile internal/itoa=/var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build2774248373/b027/_pkg_.a packagefile internal/oserror=/var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build2774248373/b035/_pkg_.a So when the file is copied and added to the compilation process, the relevant dependency libraries need to be declared in importcfg. Therefore, by predefining import in the project, the compiler can be forced to introduce the relevant libraries during compilation, thus completing the dynamic enhancement operation.\nPlugin with Agent Core As mentioned in the previous section, it is not possible to dynamically add dependencies between modules. Agent can only modify the importcfg file to reference dependencies if we are sure that the previous dependencies have already been loaded, but this is often impractical. For example, Agent cannot introduce dependencies from the plugin code into the Agent Core, because the plugin is unaware of the Agent\u0026rsquo;s existence. This raises a question: how can agent enable communication between plugins and Agent Core?\nCurrently, agent employ the following method: a global object is introduced in the runtime package, provided by Agent Core. When a plugin needs to interact with Agent Core, it simply searches for this global object from runtime package. The specific steps are as follows:\n Global object definition: Add a global variable when the runtime package is loaded and provide corresponding set and get methods. Set the variable when the Agent loads: When the Agent Core is copied and enhanced, import the method for setting the global variable and initialize the object in the global variable. Plugins: When the plugin is built, import the methods for reading the global variables and APIs. At this point, we can access the object set in Agent Core and use the defined interface for the plugin to access methods in Agent Core.  Limitation Since the communication between the plugin API and Agent Core is through an interface, and the plugin API is copied in each plugin, they can only transfer basic data types or any(interface{}) type. The reason is that when additional types are transferred, agent would be copied multiple times, so the types transferred in the plugin are not consistent with the types in Agent Core, as the types also need to be defined multiple times.\nTherefore, when communicating, they only pass structured data through any type, and when the Agent Core or plugin obtains the data, a type cast is simply required.\nDebugging Based on the introductions in the previous sections, both Agent Core and plugin code are dynamically copied/modified into the target package. So, how can we debug the program during development to identify issues?\nOur current approach consists of the following steps:\n Inform the source code location during flag: Enhance the debug parameters during compilation and inform the system path, for example: -toolexec \u0026quot;/path/to/agent -debug /path/to/code\u0026quot; Get the original file path: Find the absolute location of the source code of the file to be copied based on the rules. Introduce the //line directive: Add the //line directive to the copied target file to inform the compiler of the location of the original file after copying.  At this point, when the program is executed, developer can find the original file to be copied in the source code.\n","excerpt":"Key Principle Introduce the key technical processes used in the SkyWalking Go Agent, to help the …","ref":"/docs/skywalking-go/v0.4.0/en/concepts-and-designs/key-principles/","title":"Key Principle"},{"body":"Kubernetes (K8s) monitoring SkyWalking leverages K8s kube-state-metrics (KSM) and cAdvisor for collecting metrics data from K8s. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. This feature requires authorizing the OAP Server to access K8s\u0026rsquo;s API Server.\nData flow  K8s kube-state-metrics and cAdvisor collect metrics data from K8s. OpenTelemetry Collector fetches metrics from kube-state-metrics and cAdvisor via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server access to K8s\u0026rsquo;s API Server gets meta info and parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup kube-state-metric. cAdvisor is integrated into kubelet by default. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector for K8s, refer to here. For a quick start, we have provided a complete example of configuration and recommended version; you can refer to showcase. Config SkyWalking OpenTelemetry receiver.  Kubernetes Cluster Monitoring K8s cluster monitoring provides monitoring of the status and resources of the whole cluster and each node. K8s cluster as a Service in OAP, K8s node as an Instance in OAP, and land on the Layer: K8S.\nKubernetes Cluster Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Node Total  k8s_cluster_node_total The number of nodes K8s kube-state-metrics   Namespace Total  k8s_cluster_namespace_total The number of namespaces K8s kube-state-metrics   Deployment Total  k8s_cluster_deployment_total The number of deployments K8s kube-state-metrics   StatefulSet Total  k8s_cluster_statefulset_total The number of statefulsets K8s kube-state-metrics   DaemonSet Total  k8s_cluster_daemonset_total The number of daemonsets K8s kube-state-metrics   Service Total  k8s_cluster_service_total The number of services K8s kube-state-metrics   Pod Total  k8s_cluster_pod_total The number of pods K8s kube-state-metrics   Container Total  k8s_cluster_container_total The number of containers K8s kube-state-metrics   CPU Resources m k8s_cluster_cpu_cores\nk8s_cluster_cpu_cores_requests\nk8s_cluster_cpu_cores_limits\nk8s_cluster_cpu_cores_allocatable The capacity and the Requests / Limits / Allocatable of the CPU K8s kube-state-metrics   Memory Resources Gi k8s_cluster_memory_total\nk8s_cluster_memory_requests\nk8s_cluster_memory_limits\nk8s_cluster_memory_allocatable The capacity and the Requests / Limits / Allocatable of the memory K8s kube-state-metrics   Storage Resources Gi k8s_cluster_storage_total\nk8s_cluster_storage_allocatable The capacity and allocatable of the storage K8s kube-state-metrics   Node Status  k8s_cluster_node_status The current status of the nodes K8s kube-state-metrics   Deployment Status  k8s_cluster_deployment_status The current status of the deployment K8s kube-state-metrics   Deployment Spec Replicas  k8s_cluster_deployment_spec_replicas The number of desired pods for a deployment K8s kube-state-metrics   Service Status  k8s_cluster_service_pod_status The services current status, depending on the related pods' status K8s kube-state-metrics   Pod Status Not Running  k8s_cluster_pod_status_not_running The pods which are not running in the current phase K8s kube-state-metrics   Pod Status Waiting  k8s_cluster_pod_status_waiting The pods and containers which are currently in the waiting status, with reasons shown K8s kube-state-metrics   Pod Status Terminated  k8s_cluster_container_status_terminated The pods and containers which are currently in the terminated status, with reasons shown K8s kube-state-metrics    Kubernetes Cluster Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Pod Total  k8s_node_pod_total The number of pods in this node K8s kube-state-metrics   Node Status  k8s_node_node_status The current status of this node K8s kube-state-metrics   CPU Resources m k8s_node_cpu_cores\nk8s_node_cpu_cores_allocatable\nk8s_node_cpu_cores_requests\nk8s_node_cpu_cores_limits The capacity and the requests / Limits / Allocatable of the CPU K8s kube-state-metrics   Memory Resources Gi k8s_node_memory_total\nk8s_node_memory_allocatable\nk8s_node_memory_requests\nk8s_node_memory_limits The capacity and the requests / Limits / Allocatable of the memory K8s kube-state-metrics   Storage Resources Gi k8s_node_storage_total\nk8s_node_storage_allocatable The capacity and allocatable of the storage K8s kube-state-metrics   CPU Usage m k8s_node_cpu_usage The total usage of the CPU core, if there are 2 cores the maximum usage is 2000m cAdvisor   Memory Usage Gi k8s_node_memory_usage The totaly memory usage cAdvisor   Network I/O KB/s k8s_node_network_receive\nk8s_node_network_transmit The network receive and transmit cAdvisor    Kubernetes Service Monitoring K8s Service Monitoring provides observabilities into service status and resources from Kubernetes. K8s Service as a Service in OAP and land on the Layer: K8S_SERVICE.\nKubernetes Service Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Service Pod Total  k8s_service_pod_total The number of pods K8s kube-state-metrics   Service Pod Status  k8s_service_pod_status The current status of pods K8s kube-state-metrics   Service CPU Resources m k8s_service_cpu_cores_requests\nk8s_service_cpu_cores_limits The CPU resources requests / Limits of this service K8s kube-state-metrics   Service Memory Resources MB k8s_service_memory_requests\nk8s_service_memory_limits The memory resources requests / Limits of this service K8s kube-state-metrics   Pod CPU Usage m k8s_service_pod_cpu_usage The CPU resources total usage of pods cAdvisor   Pod Memory Usage MB k8s_service_pod_memory_usage The memory resources total usage of pods cAdvisor   Pod Waiting  k8s_service_pod_status_waiting The pods and containers which are currently in the waiting status, with reasons shown K8s kube-state-metrics   Pod Terminated  k8s_service_pod_status_terminated The pods and containers which are currently in the terminated status, with reasons shown K8s kube-state-metrics   Pod Restarts  k8s_service_pod_status_restarts_total The number of per container restarts related to the pods K8s kube-state-metrics    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/k8s/k8s-cluster.yaml,/config/otel-rules/k8s/k8s-node.yaml, /config/otel-rules/k8s/k8s-service.yaml. The K8s Cluster dashboard panel configurations are found in /config/ui-initialized-templates/k8s. The K8s Service dashboard panel configurations are found in /config/ui-initialized-templates/k8s_service.\n","excerpt":"Kubernetes (K8s) monitoring SkyWalking leverages K8s kube-state-metrics (KSM) and cAdvisor for …","ref":"/docs/main/latest/en/setup/backend/backend-k8s-monitoring/","title":"Kubernetes (K8s) monitoring"},{"body":"Kubernetes (K8s) monitoring Kubernetes is an open-source container-orchestration system for automating computer application deployment, scaling, and management. It was originally designed by Google and is now maintained by the Cloud Native Computing Foundation. It aims to provide a \u0026ldquo;platform for automating deployment, scaling, and operations of application containers across clusters of hosts\u0026rdquo;. It works with a range of container tools, including Docker.\nNowadays, Kubernetes is the fundamental infrastructure for cloud native applications. SkyWalking provides the following ways to monitor deployments on Kubernetes.\n Use kube-state-metrics (KSM) and cAdvisor to collect metrics of Kubernetes resources, such as CPU, service, pod, and node. Read kube-state-metrics and cAdvisor setup guide for more details. Rover is a SkyWalking native eBPF agent to collect network Access Logs to support topology-aware and metrics analysis. Meanwhile, due to the power of eBPF, it could profile running services written by C++, Rust, Golang, etc. Read Rover setup guide for more details.  SkyWalking deeply integrates with Kubernetes to help users understand the status of their applications on Kubernetes. Cillium with Hubble is in our v10 plan.\n","excerpt":"Kubernetes (K8s) monitoring Kubernetes is an open-source container-orchestration system for …","ref":"/docs/main/next/en/setup/backend/backend-k8s-monitoring/","title":"Kubernetes (K8s) monitoring"},{"body":"Kubernetes (K8s) monitoring SkyWalking leverages K8s kube-state-metrics (KSM) and cAdvisor for collecting metrics data from K8s. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. This feature requires authorizing the OAP Server to access K8s\u0026rsquo;s API Server.\nData flow  K8s kube-state-metrics and cAdvisor collect metrics data from K8s. OpenTelemetry Collector fetches metrics from kube-state-metrics and cAdvisor via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via the OpenCensus GRPC Exporter. The SkyWalking OAP Server access to K8s\u0026rsquo;s API Server gets meta info and parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup kube-state-metric. cAdvisor is integrated into kubelet by default. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector for K8s, refer to here. For a quick start, we have provided a complete example of configuration and recommended version; you can refer to showcase. Config SkyWalking OpenTelemetry receiver.  Kubernetes Cluster Monitoring K8s cluster monitoring provides monitoring of the status and resources of the whole cluster and each node. K8s cluster as a Service in OAP, K8s node as an Instance in OAP, and land on the Layer: K8S.\nKubernetes Cluster Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Node Total  k8s_cluster_node_total The number of nodes K8s kube-state-metrics   Namespace Total  k8s_cluster_namespace_total The number of namespaces K8s kube-state-metrics   Deployment Total  k8s_cluster_deployment_total The number of deployments K8s kube-state-metrics   Service Total  k8s_cluster_service_total The number of services K8s kube-state-metrics   Pod Total  k8s_cluster_pod_total The number of pods K8s kube-state-metrics   Container Total  k8s_cluster_container_total The number of containers K8s kube-state-metrics   CPU Resources m k8s_cluster_cpu_cores\nk8s_cluster_cpu_cores_requests\nk8s_cluster_cpu_cores_limits\nk8s_cluster_cpu_cores_allocatable The capacity and the Requests / Limits / Allocatable of the CPU K8s kube-state-metrics   Memory Resources Gi k8s_cluster_memory_total\nk8s_cluster_memory_requests\nk8s_cluster_memory_limits\nk8s_cluster_memory_allocatable The capacity and the Requests / Limits / Allocatable of the memory K8s kube-state-metrics   Storage Resources Gi k8s_cluster_storage_total\nk8s_cluster_storage_allocatable The capacity and allocatable of the storage K8s kube-state-metrics   Node Status  k8s_cluster_node_status The current status of the nodes K8s kube-state-metrics   Deployment Status  k8s_cluster_deployment_status The current status of the deployment K8s kube-state-metrics   Deployment Spec Replicas  k8s_cluster_deployment_spec_replicas The number of desired pods for a deployment K8s kube-state-metrics   Service Status  k8s_cluster_service_pod_status The services current status, depending on the related pods' status K8s kube-state-metrics   Pod Status Not Running  k8s_cluster_pod_status_not_running The pods which are not running in the current phase K8s kube-state-metrics   Pod Status Waiting  k8s_cluster_pod_status_waiting The pods and containers which are currently in the waiting status, with reasons shown K8s kube-state-metrics   Pod Status Terminated  k8s_cluster_container_status_terminated The pods and containers which are currently in the terminated status, with reasons shown K8s kube-state-metrics    Kubernetes Cluster Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Pod Total  k8s_node_pod_total The number of pods in this node K8s kube-state-metrics   Node Status  k8s_node_node_status The current status of this node K8s kube-state-metrics   CPU Resources m k8s_node_cpu_cores\nk8s_node_cpu_cores_allocatable\nk8s_node_cpu_cores_requests\nk8s_node_cpu_cores_limits The capacity and the requests / Limits / Allocatable of the CPU K8s kube-state-metrics   Memory Resources Gi k8s_node_memory_total\nk8s_node_memory_allocatable\nk8s_node_memory_requests\nk8s_node_memory_limits The capacity and the requests / Limits / Allocatable of the memory K8s kube-state-metrics   Storage Resources Gi k8s_node_storage_total\nk8s_node_storage_allocatable The capacity and allocatable of the storage K8s kube-state-metrics   CPU Usage m k8s_node_cpu_usage The total usage of the CPU core, if there are 2 cores the maximum usage is 2000m cAdvisor   Memory Usage Gi k8s_node_memory_usage The totaly memory usage cAdvisor   Network I/O KB/s k8s_node_network_receive\nk8s_node_network_transmit The network receive and transmit cAdvisor    Kubernetes Service Monitoring K8s Service Monitoring provides observabilities into service status and resources from Kubernetes. K8s Service as a Service in OAP and land on the Layer: K8S_SERVICE.\nKubernetes Service Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Service Pod Total  k8s_service_pod_total The number of pods K8s kube-state-metrics   Service Pod Status  k8s_service_pod_status The current status of pods K8s kube-state-metrics   Service CPU Resources m k8s_service_cpu_cores_requests\nk8s_service_cpu_cores_limits The CPU resources requests / Limits of this service K8s kube-state-metrics   Service Memory Resources MB k8s_service_memory_requests\nk8s_service_memory_limits The memory resources requests / Limits of this service K8s kube-state-metrics   Pod CPU Usage m k8s_service_pod_cpu_usage The CPU resources total usage of pods cAdvisor   Pod Memory Usage MB k8s_service_pod_memory_usage The memory resources total usage of pods cAdvisor   Pod Waiting  k8s_service_pod_status_waiting The pods and containers which are currently in the waiting status, with reasons shown K8s kube-state-metrics   Pod Terminated  k8s_service_pod_status_terminated The pods and containers which are currently in the terminated status, with reasons shown K8s kube-state-metrics   Pod Restarts  k8s_service_pod_status_restarts_total The number of per container restarts related to the pods K8s kube-state-metrics    Customizations You can customize your own metrics/expression/dashboard panel.\nThe metrics definition and expression rules are found in /config/otel-oc-rules/k8s-cluster.yaml,/config/otel-oc-rules/k8s-node.yaml, /config/otel-oc-rules/k8s-service.yaml.\nThe K8s Cluster dashboard panel configurations are found in /config/ui-initialized-templates/k8s. The K8s Service dashboard panel configurations are found in /config/ui-initialized-templates/k8s_service.\n","excerpt":"Kubernetes (K8s) monitoring SkyWalking leverages K8s kube-state-metrics (KSM) and cAdvisor for …","ref":"/docs/main/v9.1.0/en/setup/backend/backend-k8s-monitoring/","title":"Kubernetes (K8s) monitoring"},{"body":"Kubernetes (K8s) monitoring SkyWalking leverages K8s kube-state-metrics (KSM) and cAdvisor for collecting metrics data from K8s. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. This feature requires authorizing the OAP Server to access K8s\u0026rsquo;s API Server.\nData flow  K8s kube-state-metrics and cAdvisor collect metrics data from K8s. OpenTelemetry Collector fetches metrics from kube-state-metrics and cAdvisor via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via the OpenCensus gRPC Exporter or OpenTelemetry gRPC exporter. The SkyWalking OAP Server access to K8s\u0026rsquo;s API Server gets meta info and parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup kube-state-metric. cAdvisor is integrated into kubelet by default. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector for K8s, refer to here. For a quick start, we have provided a complete example of configuration and recommended version; you can refer to showcase. Config SkyWalking OpenTelemetry receiver.  Kubernetes Cluster Monitoring K8s cluster monitoring provides monitoring of the status and resources of the whole cluster and each node. K8s cluster as a Service in OAP, K8s node as an Instance in OAP, and land on the Layer: K8S.\nKubernetes Cluster Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Node Total  k8s_cluster_node_total The number of nodes K8s kube-state-metrics   Namespace Total  k8s_cluster_namespace_total The number of namespaces K8s kube-state-metrics   Deployment Total  k8s_cluster_deployment_total The number of deployments K8s kube-state-metrics   Service Total  k8s_cluster_service_total The number of services K8s kube-state-metrics   Pod Total  k8s_cluster_pod_total The number of pods K8s kube-state-metrics   Container Total  k8s_cluster_container_total The number of containers K8s kube-state-metrics   CPU Resources m k8s_cluster_cpu_cores\nk8s_cluster_cpu_cores_requests\nk8s_cluster_cpu_cores_limits\nk8s_cluster_cpu_cores_allocatable The capacity and the Requests / Limits / Allocatable of the CPU K8s kube-state-metrics   Memory Resources Gi k8s_cluster_memory_total\nk8s_cluster_memory_requests\nk8s_cluster_memory_limits\nk8s_cluster_memory_allocatable The capacity and the Requests / Limits / Allocatable of the memory K8s kube-state-metrics   Storage Resources Gi k8s_cluster_storage_total\nk8s_cluster_storage_allocatable The capacity and allocatable of the storage K8s kube-state-metrics   Node Status  k8s_cluster_node_status The current status of the nodes K8s kube-state-metrics   Deployment Status  k8s_cluster_deployment_status The current status of the deployment K8s kube-state-metrics   Deployment Spec Replicas  k8s_cluster_deployment_spec_replicas The number of desired pods for a deployment K8s kube-state-metrics   Service Status  k8s_cluster_service_pod_status The services current status, depending on the related pods' status K8s kube-state-metrics   Pod Status Not Running  k8s_cluster_pod_status_not_running The pods which are not running in the current phase K8s kube-state-metrics   Pod Status Waiting  k8s_cluster_pod_status_waiting The pods and containers which are currently in the waiting status, with reasons shown K8s kube-state-metrics   Pod Status Terminated  k8s_cluster_container_status_terminated The pods and containers which are currently in the terminated status, with reasons shown K8s kube-state-metrics    Kubernetes Cluster Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Pod Total  k8s_node_pod_total The number of pods in this node K8s kube-state-metrics   Node Status  k8s_node_node_status The current status of this node K8s kube-state-metrics   CPU Resources m k8s_node_cpu_cores\nk8s_node_cpu_cores_allocatable\nk8s_node_cpu_cores_requests\nk8s_node_cpu_cores_limits The capacity and the requests / Limits / Allocatable of the CPU K8s kube-state-metrics   Memory Resources Gi k8s_node_memory_total\nk8s_node_memory_allocatable\nk8s_node_memory_requests\nk8s_node_memory_limits The capacity and the requests / Limits / Allocatable of the memory K8s kube-state-metrics   Storage Resources Gi k8s_node_storage_total\nk8s_node_storage_allocatable The capacity and allocatable of the storage K8s kube-state-metrics   CPU Usage m k8s_node_cpu_usage The total usage of the CPU core, if there are 2 cores the maximum usage is 2000m cAdvisor   Memory Usage Gi k8s_node_memory_usage The totaly memory usage cAdvisor   Network I/O KB/s k8s_node_network_receive\nk8s_node_network_transmit The network receive and transmit cAdvisor    Kubernetes Service Monitoring K8s Service Monitoring provides observabilities into service status and resources from Kubernetes. K8s Service as a Service in OAP and land on the Layer: K8S_SERVICE.\nKubernetes Service Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Service Pod Total  k8s_service_pod_total The number of pods K8s kube-state-metrics   Service Pod Status  k8s_service_pod_status The current status of pods K8s kube-state-metrics   Service CPU Resources m k8s_service_cpu_cores_requests\nk8s_service_cpu_cores_limits The CPU resources requests / Limits of this service K8s kube-state-metrics   Service Memory Resources MB k8s_service_memory_requests\nk8s_service_memory_limits The memory resources requests / Limits of this service K8s kube-state-metrics   Pod CPU Usage m k8s_service_pod_cpu_usage The CPU resources total usage of pods cAdvisor   Pod Memory Usage MB k8s_service_pod_memory_usage The memory resources total usage of pods cAdvisor   Pod Waiting  k8s_service_pod_status_waiting The pods and containers which are currently in the waiting status, with reasons shown K8s kube-state-metrics   Pod Terminated  k8s_service_pod_status_terminated The pods and containers which are currently in the terminated status, with reasons shown K8s kube-state-metrics   Pod Restarts  k8s_service_pod_status_restarts_total The number of per container restarts related to the pods K8s kube-state-metrics    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/k8s-cluster.yaml,/config/otel-rules/k8s-node.yaml, /config/otel-rules/k8s-service.yaml. The K8s Cluster dashboard panel configurations are found in /config/ui-initialized-templates/k8s. The K8s Service dashboard panel configurations are found in /config/ui-initialized-templates/k8s_service.\n","excerpt":"Kubernetes (K8s) monitoring SkyWalking leverages K8s kube-state-metrics (KSM) and cAdvisor for …","ref":"/docs/main/v9.2.0/en/setup/backend/backend-k8s-monitoring/","title":"Kubernetes (K8s) monitoring"},{"body":"Kubernetes (K8s) monitoring SkyWalking leverages K8s kube-state-metrics (KSM) and cAdvisor for collecting metrics data from K8s. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. This feature requires authorizing the OAP Server to access K8s\u0026rsquo;s API Server.\nData flow  K8s kube-state-metrics and cAdvisor collect metrics data from K8s. OpenTelemetry Collector fetches metrics from kube-state-metrics and cAdvisor via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via the OpenCensus gRPC Exporter or OpenTelemetry gRPC exporter. The SkyWalking OAP Server access to K8s\u0026rsquo;s API Server gets meta info and parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup kube-state-metric. cAdvisor is integrated into kubelet by default. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector for K8s, refer to here. For a quick start, we have provided a complete example of configuration and recommended version; you can refer to showcase. Config SkyWalking OpenTelemetry receiver.  Kubernetes Cluster Monitoring K8s cluster monitoring provides monitoring of the status and resources of the whole cluster and each node. K8s cluster as a Service in OAP, K8s node as an Instance in OAP, and land on the Layer: K8S.\nKubernetes Cluster Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Node Total  k8s_cluster_node_total The number of nodes K8s kube-state-metrics   Namespace Total  k8s_cluster_namespace_total The number of namespaces K8s kube-state-metrics   Deployment Total  k8s_cluster_deployment_total The number of deployments K8s kube-state-metrics   Service Total  k8s_cluster_service_total The number of services K8s kube-state-metrics   Pod Total  k8s_cluster_pod_total The number of pods K8s kube-state-metrics   Container Total  k8s_cluster_container_total The number of containers K8s kube-state-metrics   CPU Resources m k8s_cluster_cpu_cores\nk8s_cluster_cpu_cores_requests\nk8s_cluster_cpu_cores_limits\nk8s_cluster_cpu_cores_allocatable The capacity and the Requests / Limits / Allocatable of the CPU K8s kube-state-metrics   Memory Resources Gi k8s_cluster_memory_total\nk8s_cluster_memory_requests\nk8s_cluster_memory_limits\nk8s_cluster_memory_allocatable The capacity and the Requests / Limits / Allocatable of the memory K8s kube-state-metrics   Storage Resources Gi k8s_cluster_storage_total\nk8s_cluster_storage_allocatable The capacity and allocatable of the storage K8s kube-state-metrics   Node Status  k8s_cluster_node_status The current status of the nodes K8s kube-state-metrics   Deployment Status  k8s_cluster_deployment_status The current status of the deployment K8s kube-state-metrics   Deployment Spec Replicas  k8s_cluster_deployment_spec_replicas The number of desired pods for a deployment K8s kube-state-metrics   Service Status  k8s_cluster_service_pod_status The services current status, depending on the related pods' status K8s kube-state-metrics   Pod Status Not Running  k8s_cluster_pod_status_not_running The pods which are not running in the current phase K8s kube-state-metrics   Pod Status Waiting  k8s_cluster_pod_status_waiting The pods and containers which are currently in the waiting status, with reasons shown K8s kube-state-metrics   Pod Status Terminated  k8s_cluster_container_status_terminated The pods and containers which are currently in the terminated status, with reasons shown K8s kube-state-metrics    Kubernetes Cluster Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Pod Total  k8s_node_pod_total The number of pods in this node K8s kube-state-metrics   Node Status  k8s_node_node_status The current status of this node K8s kube-state-metrics   CPU Resources m k8s_node_cpu_cores\nk8s_node_cpu_cores_allocatable\nk8s_node_cpu_cores_requests\nk8s_node_cpu_cores_limits The capacity and the requests / Limits / Allocatable of the CPU K8s kube-state-metrics   Memory Resources Gi k8s_node_memory_total\nk8s_node_memory_allocatable\nk8s_node_memory_requests\nk8s_node_memory_limits The capacity and the requests / Limits / Allocatable of the memory K8s kube-state-metrics   Storage Resources Gi k8s_node_storage_total\nk8s_node_storage_allocatable The capacity and allocatable of the storage K8s kube-state-metrics   CPU Usage m k8s_node_cpu_usage The total usage of the CPU core, if there are 2 cores the maximum usage is 2000m cAdvisor   Memory Usage Gi k8s_node_memory_usage The totaly memory usage cAdvisor   Network I/O KB/s k8s_node_network_receive\nk8s_node_network_transmit The network receive and transmit cAdvisor    Kubernetes Service Monitoring K8s Service Monitoring provides observabilities into service status and resources from Kubernetes. K8s Service as a Service in OAP and land on the Layer: K8S_SERVICE.\nKubernetes Service Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Service Pod Total  k8s_service_pod_total The number of pods K8s kube-state-metrics   Service Pod Status  k8s_service_pod_status The current status of pods K8s kube-state-metrics   Service CPU Resources m k8s_service_cpu_cores_requests\nk8s_service_cpu_cores_limits The CPU resources requests / Limits of this service K8s kube-state-metrics   Service Memory Resources MB k8s_service_memory_requests\nk8s_service_memory_limits The memory resources requests / Limits of this service K8s kube-state-metrics   Pod CPU Usage m k8s_service_pod_cpu_usage The CPU resources total usage of pods cAdvisor   Pod Memory Usage MB k8s_service_pod_memory_usage The memory resources total usage of pods cAdvisor   Pod Waiting  k8s_service_pod_status_waiting The pods and containers which are currently in the waiting status, with reasons shown K8s kube-state-metrics   Pod Terminated  k8s_service_pod_status_terminated The pods and containers which are currently in the terminated status, with reasons shown K8s kube-state-metrics   Pod Restarts  k8s_service_pod_status_restarts_total The number of per container restarts related to the pods K8s kube-state-metrics    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/k8s-cluster.yaml,/config/otel-rules/k8s-node.yaml, /config/otel-rules/k8s-service.yaml. The K8s Cluster dashboard panel configurations are found in /config/ui-initialized-templates/k8s. The K8s Service dashboard panel configurations are found in /config/ui-initialized-templates/k8s_service.\n","excerpt":"Kubernetes (K8s) monitoring SkyWalking leverages K8s kube-state-metrics (KSM) and cAdvisor for …","ref":"/docs/main/v9.3.0/en/setup/backend/backend-k8s-monitoring/","title":"Kubernetes (K8s) monitoring"},{"body":"Kubernetes (K8s) monitoring SkyWalking leverages K8s kube-state-metrics (KSM) and cAdvisor for collecting metrics data from K8s. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. This feature requires authorizing the OAP Server to access K8s\u0026rsquo;s API Server.\nData flow  K8s kube-state-metrics and cAdvisor collect metrics data from K8s. OpenTelemetry Collector fetches metrics from kube-state-metrics and cAdvisor via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via the OpenCensus gRPC Exporter or OpenTelemetry gRPC exporter. The SkyWalking OAP Server access to K8s\u0026rsquo;s API Server gets meta info and parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup kube-state-metric. cAdvisor is integrated into kubelet by default. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector for K8s, refer to here. For a quick start, we have provided a complete example of configuration and recommended version; you can refer to showcase. Config SkyWalking OpenTelemetry receiver.  Kubernetes Cluster Monitoring K8s cluster monitoring provides monitoring of the status and resources of the whole cluster and each node. K8s cluster as a Service in OAP, K8s node as an Instance in OAP, and land on the Layer: K8S.\nKubernetes Cluster Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Node Total  k8s_cluster_node_total The number of nodes K8s kube-state-metrics   Namespace Total  k8s_cluster_namespace_total The number of namespaces K8s kube-state-metrics   Deployment Total  k8s_cluster_deployment_total The number of deployments K8s kube-state-metrics   Service Total  k8s_cluster_service_total The number of services K8s kube-state-metrics   Pod Total  k8s_cluster_pod_total The number of pods K8s kube-state-metrics   Container Total  k8s_cluster_container_total The number of containers K8s kube-state-metrics   CPU Resources m k8s_cluster_cpu_cores\nk8s_cluster_cpu_cores_requests\nk8s_cluster_cpu_cores_limits\nk8s_cluster_cpu_cores_allocatable The capacity and the Requests / Limits / Allocatable of the CPU K8s kube-state-metrics   Memory Resources Gi k8s_cluster_memory_total\nk8s_cluster_memory_requests\nk8s_cluster_memory_limits\nk8s_cluster_memory_allocatable The capacity and the Requests / Limits / Allocatable of the memory K8s kube-state-metrics   Storage Resources Gi k8s_cluster_storage_total\nk8s_cluster_storage_allocatable The capacity and allocatable of the storage K8s kube-state-metrics   Node Status  k8s_cluster_node_status The current status of the nodes K8s kube-state-metrics   Deployment Status  k8s_cluster_deployment_status The current status of the deployment K8s kube-state-metrics   Deployment Spec Replicas  k8s_cluster_deployment_spec_replicas The number of desired pods for a deployment K8s kube-state-metrics   Service Status  k8s_cluster_service_pod_status The services current status, depending on the related pods' status K8s kube-state-metrics   Pod Status Not Running  k8s_cluster_pod_status_not_running The pods which are not running in the current phase K8s kube-state-metrics   Pod Status Waiting  k8s_cluster_pod_status_waiting The pods and containers which are currently in the waiting status, with reasons shown K8s kube-state-metrics   Pod Status Terminated  k8s_cluster_container_status_terminated The pods and containers which are currently in the terminated status, with reasons shown K8s kube-state-metrics    Kubernetes Cluster Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Pod Total  k8s_node_pod_total The number of pods in this node K8s kube-state-metrics   Node Status  k8s_node_node_status The current status of this node K8s kube-state-metrics   CPU Resources m k8s_node_cpu_cores\nk8s_node_cpu_cores_allocatable\nk8s_node_cpu_cores_requests\nk8s_node_cpu_cores_limits The capacity and the requests / Limits / Allocatable of the CPU K8s kube-state-metrics   Memory Resources Gi k8s_node_memory_total\nk8s_node_memory_allocatable\nk8s_node_memory_requests\nk8s_node_memory_limits The capacity and the requests / Limits / Allocatable of the memory K8s kube-state-metrics   Storage Resources Gi k8s_node_storage_total\nk8s_node_storage_allocatable The capacity and allocatable of the storage K8s kube-state-metrics   CPU Usage m k8s_node_cpu_usage The total usage of the CPU core, if there are 2 cores the maximum usage is 2000m cAdvisor   Memory Usage Gi k8s_node_memory_usage The totaly memory usage cAdvisor   Network I/O KB/s k8s_node_network_receive\nk8s_node_network_transmit The network receive and transmit cAdvisor    Kubernetes Service Monitoring K8s Service Monitoring provides observabilities into service status and resources from Kubernetes. K8s Service as a Service in OAP and land on the Layer: K8S_SERVICE.\nKubernetes Service Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Service Pod Total  k8s_service_pod_total The number of pods K8s kube-state-metrics   Service Pod Status  k8s_service_pod_status The current status of pods K8s kube-state-metrics   Service CPU Resources m k8s_service_cpu_cores_requests\nk8s_service_cpu_cores_limits The CPU resources requests / Limits of this service K8s kube-state-metrics   Service Memory Resources MB k8s_service_memory_requests\nk8s_service_memory_limits The memory resources requests / Limits of this service K8s kube-state-metrics   Pod CPU Usage m k8s_service_pod_cpu_usage The CPU resources total usage of pods cAdvisor   Pod Memory Usage MB k8s_service_pod_memory_usage The memory resources total usage of pods cAdvisor   Pod Waiting  k8s_service_pod_status_waiting The pods and containers which are currently in the waiting status, with reasons shown K8s kube-state-metrics   Pod Terminated  k8s_service_pod_status_terminated The pods and containers which are currently in the terminated status, with reasons shown K8s kube-state-metrics   Pod Restarts  k8s_service_pod_status_restarts_total The number of per container restarts related to the pods K8s kube-state-metrics    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/k8s/k8s-cluster.yaml,/config/otel-rules/k8s/k8s-node.yaml, /config/otel-rules/k8s/k8s-service.yaml. The K8s Cluster dashboard panel configurations are found in /config/ui-initialized-templates/k8s. The K8s Service dashboard panel configurations are found in /config/ui-initialized-templates/k8s_service.\n","excerpt":"Kubernetes (K8s) monitoring SkyWalking leverages K8s kube-state-metrics (KSM) and cAdvisor for …","ref":"/docs/main/v9.4.0/en/setup/backend/backend-k8s-monitoring/","title":"Kubernetes (K8s) monitoring"},{"body":"Kubernetes (K8s) monitoring SkyWalking leverages K8s kube-state-metrics (KSM) and cAdvisor for collecting metrics data from K8s. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. This feature requires authorizing the OAP Server to access K8s\u0026rsquo;s API Server.\nData flow  K8s kube-state-metrics and cAdvisor collect metrics data from K8s. OpenTelemetry Collector fetches metrics from kube-state-metrics and cAdvisor via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server access to K8s\u0026rsquo;s API Server gets meta info and parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup kube-state-metric. cAdvisor is integrated into kubelet by default. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector for K8s, refer to here. For a quick start, we have provided a complete example of configuration and recommended version; you can refer to showcase. Config SkyWalking OpenTelemetry receiver.  Kubernetes Cluster Monitoring K8s cluster monitoring provides monitoring of the status and resources of the whole cluster and each node. K8s cluster as a Service in OAP, K8s node as an Instance in OAP, and land on the Layer: K8S.\nKubernetes Cluster Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Node Total  k8s_cluster_node_total The number of nodes K8s kube-state-metrics   Namespace Total  k8s_cluster_namespace_total The number of namespaces K8s kube-state-metrics   Deployment Total  k8s_cluster_deployment_total The number of deployments K8s kube-state-metrics   StatefulSet Total  k8s_cluster_statefulset_total The number of statefulsets K8s kube-state-metrics   DaemonSet Total  k8s_cluster_daemonset_total The number of daemonsets K8s kube-state-metrics   Service Total  k8s_cluster_service_total The number of services K8s kube-state-metrics   Pod Total  k8s_cluster_pod_total The number of pods K8s kube-state-metrics   Container Total  k8s_cluster_container_total The number of containers K8s kube-state-metrics   CPU Resources m k8s_cluster_cpu_cores\nk8s_cluster_cpu_cores_requests\nk8s_cluster_cpu_cores_limits\nk8s_cluster_cpu_cores_allocatable The capacity and the Requests / Limits / Allocatable of the CPU K8s kube-state-metrics   Memory Resources Gi k8s_cluster_memory_total\nk8s_cluster_memory_requests\nk8s_cluster_memory_limits\nk8s_cluster_memory_allocatable The capacity and the Requests / Limits / Allocatable of the memory K8s kube-state-metrics   Storage Resources Gi k8s_cluster_storage_total\nk8s_cluster_storage_allocatable The capacity and allocatable of the storage K8s kube-state-metrics   Node Status  k8s_cluster_node_status The current status of the nodes K8s kube-state-metrics   Deployment Status  k8s_cluster_deployment_status The current status of the deployment K8s kube-state-metrics   Deployment Spec Replicas  k8s_cluster_deployment_spec_replicas The number of desired pods for a deployment K8s kube-state-metrics   Service Status  k8s_cluster_service_pod_status The services current status, depending on the related pods' status K8s kube-state-metrics   Pod Status Not Running  k8s_cluster_pod_status_not_running The pods which are not running in the current phase K8s kube-state-metrics   Pod Status Waiting  k8s_cluster_pod_status_waiting The pods and containers which are currently in the waiting status, with reasons shown K8s kube-state-metrics   Pod Status Terminated  k8s_cluster_container_status_terminated The pods and containers which are currently in the terminated status, with reasons shown K8s kube-state-metrics    Kubernetes Cluster Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Pod Total  k8s_node_pod_total The number of pods in this node K8s kube-state-metrics   Node Status  k8s_node_node_status The current status of this node K8s kube-state-metrics   CPU Resources m k8s_node_cpu_cores\nk8s_node_cpu_cores_allocatable\nk8s_node_cpu_cores_requests\nk8s_node_cpu_cores_limits The capacity and the requests / Limits / Allocatable of the CPU K8s kube-state-metrics   Memory Resources Gi k8s_node_memory_total\nk8s_node_memory_allocatable\nk8s_node_memory_requests\nk8s_node_memory_limits The capacity and the requests / Limits / Allocatable of the memory K8s kube-state-metrics   Storage Resources Gi k8s_node_storage_total\nk8s_node_storage_allocatable The capacity and allocatable of the storage K8s kube-state-metrics   CPU Usage m k8s_node_cpu_usage The total usage of the CPU core, if there are 2 cores the maximum usage is 2000m cAdvisor   Memory Usage Gi k8s_node_memory_usage The totaly memory usage cAdvisor   Network I/O KB/s k8s_node_network_receive\nk8s_node_network_transmit The network receive and transmit cAdvisor    Kubernetes Service Monitoring K8s Service Monitoring provides observabilities into service status and resources from Kubernetes. K8s Service as a Service in OAP and land on the Layer: K8S_SERVICE.\nKubernetes Service Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Service Pod Total  k8s_service_pod_total The number of pods K8s kube-state-metrics   Service Pod Status  k8s_service_pod_status The current status of pods K8s kube-state-metrics   Service CPU Resources m k8s_service_cpu_cores_requests\nk8s_service_cpu_cores_limits The CPU resources requests / Limits of this service K8s kube-state-metrics   Service Memory Resources MB k8s_service_memory_requests\nk8s_service_memory_limits The memory resources requests / Limits of this service K8s kube-state-metrics   Pod CPU Usage m k8s_service_pod_cpu_usage The CPU resources total usage of pods cAdvisor   Pod Memory Usage MB k8s_service_pod_memory_usage The memory resources total usage of pods cAdvisor   Pod Waiting  k8s_service_pod_status_waiting The pods and containers which are currently in the waiting status, with reasons shown K8s kube-state-metrics   Pod Terminated  k8s_service_pod_status_terminated The pods and containers which are currently in the terminated status, with reasons shown K8s kube-state-metrics   Pod Restarts  k8s_service_pod_status_restarts_total The number of per container restarts related to the pods K8s kube-state-metrics    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/k8s/k8s-cluster.yaml,/config/otel-rules/k8s/k8s-node.yaml, /config/otel-rules/k8s/k8s-service.yaml. The K8s Cluster dashboard panel configurations are found in /config/ui-initialized-templates/k8s. The K8s Service dashboard panel configurations are found in /config/ui-initialized-templates/k8s_service.\n","excerpt":"Kubernetes (K8s) monitoring SkyWalking leverages K8s kube-state-metrics (KSM) and cAdvisor for …","ref":"/docs/main/v9.5.0/en/setup/backend/backend-k8s-monitoring/","title":"Kubernetes (K8s) monitoring"},{"body":"Kubernetes (K8s) monitoring SkyWalking leverages K8s kube-state-metrics (KSM) and cAdvisor for collecting metrics data from K8s. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. This feature requires authorizing the OAP Server to access K8s\u0026rsquo;s API Server.\nData flow  K8s kube-state-metrics and cAdvisor collect metrics data from K8s. OpenTelemetry Collector fetches metrics from kube-state-metrics and cAdvisor via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server access to K8s\u0026rsquo;s API Server gets meta info and parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup kube-state-metric. cAdvisor is integrated into kubelet by default. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector for K8s, refer to here. For a quick start, we have provided a complete example of configuration and recommended version; you can refer to showcase. Config SkyWalking OpenTelemetry receiver.  Kubernetes Cluster Monitoring K8s cluster monitoring provides monitoring of the status and resources of the whole cluster and each node. K8s cluster as a Service in OAP, K8s node as an Instance in OAP, and land on the Layer: K8S.\nKubernetes Cluster Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Node Total  k8s_cluster_node_total The number of nodes K8s kube-state-metrics   Namespace Total  k8s_cluster_namespace_total The number of namespaces K8s kube-state-metrics   Deployment Total  k8s_cluster_deployment_total The number of deployments K8s kube-state-metrics   StatefulSet Total  k8s_cluster_statefulset_total The number of statefulsets K8s kube-state-metrics   DaemonSet Total  k8s_cluster_daemonset_total The number of daemonsets K8s kube-state-metrics   Service Total  k8s_cluster_service_total The number of services K8s kube-state-metrics   Pod Total  k8s_cluster_pod_total The number of pods K8s kube-state-metrics   Container Total  k8s_cluster_container_total The number of containers K8s kube-state-metrics   CPU Resources m k8s_cluster_cpu_cores\nk8s_cluster_cpu_cores_requests\nk8s_cluster_cpu_cores_limits\nk8s_cluster_cpu_cores_allocatable The capacity and the Requests / Limits / Allocatable of the CPU K8s kube-state-metrics   Memory Resources Gi k8s_cluster_memory_total\nk8s_cluster_memory_requests\nk8s_cluster_memory_limits\nk8s_cluster_memory_allocatable The capacity and the Requests / Limits / Allocatable of the memory K8s kube-state-metrics   Storage Resources Gi k8s_cluster_storage_total\nk8s_cluster_storage_allocatable The capacity and allocatable of the storage K8s kube-state-metrics   Node Status  k8s_cluster_node_status The current status of the nodes K8s kube-state-metrics   Deployment Status  k8s_cluster_deployment_status The current status of the deployment K8s kube-state-metrics   Deployment Spec Replicas  k8s_cluster_deployment_spec_replicas The number of desired pods for a deployment K8s kube-state-metrics   Service Status  k8s_cluster_service_pod_status The services current status, depending on the related pods' status K8s kube-state-metrics   Pod Status Not Running  k8s_cluster_pod_status_not_running The pods which are not running in the current phase K8s kube-state-metrics   Pod Status Waiting  k8s_cluster_pod_status_waiting The pods and containers which are currently in the waiting status, with reasons shown K8s kube-state-metrics   Pod Status Terminated  k8s_cluster_container_status_terminated The pods and containers which are currently in the terminated status, with reasons shown K8s kube-state-metrics    Kubernetes Cluster Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Pod Total  k8s_node_pod_total The number of pods in this node K8s kube-state-metrics   Node Status  k8s_node_node_status The current status of this node K8s kube-state-metrics   CPU Resources m k8s_node_cpu_cores\nk8s_node_cpu_cores_allocatable\nk8s_node_cpu_cores_requests\nk8s_node_cpu_cores_limits The capacity and the requests / Limits / Allocatable of the CPU K8s kube-state-metrics   Memory Resources Gi k8s_node_memory_total\nk8s_node_memory_allocatable\nk8s_node_memory_requests\nk8s_node_memory_limits The capacity and the requests / Limits / Allocatable of the memory K8s kube-state-metrics   Storage Resources Gi k8s_node_storage_total\nk8s_node_storage_allocatable The capacity and allocatable of the storage K8s kube-state-metrics   CPU Usage m k8s_node_cpu_usage The total usage of the CPU core, if there are 2 cores the maximum usage is 2000m cAdvisor   Memory Usage Gi k8s_node_memory_usage The totaly memory usage cAdvisor   Network I/O KB/s k8s_node_network_receive\nk8s_node_network_transmit The network receive and transmit cAdvisor    Kubernetes Service Monitoring K8s Service Monitoring provides observabilities into service status and resources from Kubernetes. K8s Service as a Service in OAP and land on the Layer: K8S_SERVICE.\nKubernetes Service Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Service Pod Total  k8s_service_pod_total The number of pods K8s kube-state-metrics   Service Pod Status  k8s_service_pod_status The current status of pods K8s kube-state-metrics   Service CPU Resources m k8s_service_cpu_cores_requests\nk8s_service_cpu_cores_limits The CPU resources requests / Limits of this service K8s kube-state-metrics   Service Memory Resources MB k8s_service_memory_requests\nk8s_service_memory_limits The memory resources requests / Limits of this service K8s kube-state-metrics   Pod CPU Usage m k8s_service_pod_cpu_usage The CPU resources total usage of pods cAdvisor   Pod Memory Usage MB k8s_service_pod_memory_usage The memory resources total usage of pods cAdvisor   Pod Waiting  k8s_service_pod_status_waiting The pods and containers which are currently in the waiting status, with reasons shown K8s kube-state-metrics   Pod Terminated  k8s_service_pod_status_terminated The pods and containers which are currently in the terminated status, with reasons shown K8s kube-state-metrics   Pod Restarts  k8s_service_pod_status_restarts_total The number of per container restarts related to the pods K8s kube-state-metrics    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/k8s/k8s-cluster.yaml,/config/otel-rules/k8s/k8s-node.yaml, /config/otel-rules/k8s/k8s-service.yaml. The K8s Cluster dashboard panel configurations are found in /config/ui-initialized-templates/k8s. The K8s Service dashboard panel configurations are found in /config/ui-initialized-templates/k8s_service.\n","excerpt":"Kubernetes (K8s) monitoring SkyWalking leverages K8s kube-state-metrics (KSM) and cAdvisor for …","ref":"/docs/main/v9.6.0/en/setup/backend/backend-k8s-monitoring/","title":"Kubernetes (K8s) monitoring"},{"body":"Kubernetes (K8s) monitoring SkyWalking leverages K8s kube-state-metrics (KSM) and cAdvisor for collecting metrics data from K8s. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. This feature requires authorizing the OAP Server to access K8s\u0026rsquo;s API Server.\nData flow  K8s kube-state-metrics and cAdvisor collect metrics data from K8s. OpenTelemetry Collector fetches metrics from kube-state-metrics and cAdvisor via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server access to K8s\u0026rsquo;s API Server gets meta info and parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup kube-state-metric. cAdvisor is integrated into kubelet by default. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector for K8s, refer to here. For a quick start, we have provided a complete example of configuration and recommended version; you can refer to showcase. Config SkyWalking OpenTelemetry receiver.  Kubernetes Cluster Monitoring K8s cluster monitoring provides monitoring of the status and resources of the whole cluster and each node. K8s cluster as a Service in OAP, K8s node as an Instance in OAP, and land on the Layer: K8S.\nKubernetes Cluster Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Node Total  k8s_cluster_node_total The number of nodes K8s kube-state-metrics   Namespace Total  k8s_cluster_namespace_total The number of namespaces K8s kube-state-metrics   Deployment Total  k8s_cluster_deployment_total The number of deployments K8s kube-state-metrics   StatefulSet Total  k8s_cluster_statefulset_total The number of statefulsets K8s kube-state-metrics   DaemonSet Total  k8s_cluster_daemonset_total The number of daemonsets K8s kube-state-metrics   Service Total  k8s_cluster_service_total The number of services K8s kube-state-metrics   Pod Total  k8s_cluster_pod_total The number of pods K8s kube-state-metrics   Container Total  k8s_cluster_container_total The number of containers K8s kube-state-metrics   CPU Resources m k8s_cluster_cpu_cores\nk8s_cluster_cpu_cores_requests\nk8s_cluster_cpu_cores_limits\nk8s_cluster_cpu_cores_allocatable The capacity and the Requests / Limits / Allocatable of the CPU K8s kube-state-metrics   Memory Resources Gi k8s_cluster_memory_total\nk8s_cluster_memory_requests\nk8s_cluster_memory_limits\nk8s_cluster_memory_allocatable The capacity and the Requests / Limits / Allocatable of the memory K8s kube-state-metrics   Storage Resources Gi k8s_cluster_storage_total\nk8s_cluster_storage_allocatable The capacity and allocatable of the storage K8s kube-state-metrics   Node Status  k8s_cluster_node_status The current status of the nodes K8s kube-state-metrics   Deployment Status  k8s_cluster_deployment_status The current status of the deployment K8s kube-state-metrics   Deployment Spec Replicas  k8s_cluster_deployment_spec_replicas The number of desired pods for a deployment K8s kube-state-metrics   Service Status  k8s_cluster_service_pod_status The services current status, depending on the related pods' status K8s kube-state-metrics   Pod Status Not Running  k8s_cluster_pod_status_not_running The pods which are not running in the current phase K8s kube-state-metrics   Pod Status Waiting  k8s_cluster_pod_status_waiting The pods and containers which are currently in the waiting status, with reasons shown K8s kube-state-metrics   Pod Status Terminated  k8s_cluster_container_status_terminated The pods and containers which are currently in the terminated status, with reasons shown K8s kube-state-metrics    Kubernetes Cluster Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Pod Total  k8s_node_pod_total The number of pods in this node K8s kube-state-metrics   Node Status  k8s_node_node_status The current status of this node K8s kube-state-metrics   CPU Resources m k8s_node_cpu_cores\nk8s_node_cpu_cores_allocatable\nk8s_node_cpu_cores_requests\nk8s_node_cpu_cores_limits The capacity and the requests / Limits / Allocatable of the CPU K8s kube-state-metrics   Memory Resources Gi k8s_node_memory_total\nk8s_node_memory_allocatable\nk8s_node_memory_requests\nk8s_node_memory_limits The capacity and the requests / Limits / Allocatable of the memory K8s kube-state-metrics   Storage Resources Gi k8s_node_storage_total\nk8s_node_storage_allocatable The capacity and allocatable of the storage K8s kube-state-metrics   CPU Usage m k8s_node_cpu_usage The total usage of the CPU core, if there are 2 cores the maximum usage is 2000m cAdvisor   Memory Usage Gi k8s_node_memory_usage The totaly memory usage cAdvisor   Network I/O KB/s k8s_node_network_receive\nk8s_node_network_transmit The network receive and transmit cAdvisor    Kubernetes Service Monitoring K8s Service Monitoring provides observabilities into service status and resources from Kubernetes. K8s Service as a Service in OAP and land on the Layer: K8S_SERVICE.\nKubernetes Service Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Service Pod Total  k8s_service_pod_total The number of pods K8s kube-state-metrics   Service Pod Status  k8s_service_pod_status The current status of pods K8s kube-state-metrics   Service CPU Resources m k8s_service_cpu_cores_requests\nk8s_service_cpu_cores_limits The CPU resources requests / Limits of this service K8s kube-state-metrics   Service Memory Resources MB k8s_service_memory_requests\nk8s_service_memory_limits The memory resources requests / Limits of this service K8s kube-state-metrics   Pod CPU Usage m k8s_service_pod_cpu_usage The CPU resources total usage of pods cAdvisor   Pod Memory Usage MB k8s_service_pod_memory_usage The memory resources total usage of pods cAdvisor   Pod Waiting  k8s_service_pod_status_waiting The pods and containers which are currently in the waiting status, with reasons shown K8s kube-state-metrics   Pod Terminated  k8s_service_pod_status_terminated The pods and containers which are currently in the terminated status, with reasons shown K8s kube-state-metrics   Pod Restarts  k8s_service_pod_status_restarts_total The number of per container restarts related to the pods K8s kube-state-metrics    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/k8s/k8s-cluster.yaml,/config/otel-rules/k8s/k8s-node.yaml, /config/otel-rules/k8s/k8s-service.yaml. The K8s Cluster dashboard panel configurations are found in /config/ui-initialized-templates/k8s. The K8s Service dashboard panel configurations are found in /config/ui-initialized-templates/k8s_service.\n","excerpt":"Kubernetes (K8s) monitoring SkyWalking leverages K8s kube-state-metrics (KSM) and cAdvisor for …","ref":"/docs/main/v9.7.0/en/setup/backend/backend-k8s-monitoring/","title":"Kubernetes (K8s) monitoring"},{"body":"Kubernetes (K8s) monitoring from kube-state-metrics and cAdvisor SkyWalking leverages K8s kube-state-metrics (KSM) and cAdvisor for collecting metrics data from K8s. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. This feature requires authorizing the OAP Server to access K8s\u0026rsquo;s API Server.\nData flow  K8s kube-state-metrics and cAdvisor collect metrics data from K8s. OpenTelemetry Collector fetches metrics from kube-state-metrics and cAdvisor via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server access to K8s\u0026rsquo;s API Server gets meta info and parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup kube-state-metric. cAdvisor is integrated into kubelet by default. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector for K8s, refer to here. For a quick start, we have provided a complete example of configuration and recommended version; you can refer to showcase. Config SkyWalking OpenTelemetry receiver.  Kubernetes Cluster Monitoring K8s cluster monitoring provides monitoring of the status and resources of the whole cluster and each node. K8s cluster as a Service in OAP, K8s node as an Instance in OAP, and land on the Layer: K8S.\nKubernetes Cluster Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Node Total  k8s_cluster_node_total The number of nodes K8s kube-state-metrics   Namespace Total  k8s_cluster_namespace_total The number of namespaces K8s kube-state-metrics   Deployment Total  k8s_cluster_deployment_total The number of deployments K8s kube-state-metrics   StatefulSet Total  k8s_cluster_statefulset_total The number of statefulsets K8s kube-state-metrics   DaemonSet Total  k8s_cluster_daemonset_total The number of daemonsets K8s kube-state-metrics   Service Total  k8s_cluster_service_total The number of services K8s kube-state-metrics   Pod Total  k8s_cluster_pod_total The number of pods K8s kube-state-metrics   Container Total  k8s_cluster_container_total The number of containers K8s kube-state-metrics   CPU Resources m k8s_cluster_cpu_cores\nk8s_cluster_cpu_cores_requests\nk8s_cluster_cpu_cores_limits\nk8s_cluster_cpu_cores_allocatable The capacity and the Requests / Limits / Allocatable of the CPU K8s kube-state-metrics   Memory Resources Gi k8s_cluster_memory_total\nk8s_cluster_memory_requests\nk8s_cluster_memory_limits\nk8s_cluster_memory_allocatable The capacity and the Requests / Limits / Allocatable of the memory K8s kube-state-metrics   Storage Resources Gi k8s_cluster_storage_total\nk8s_cluster_storage_allocatable The capacity and allocatable of the storage K8s kube-state-metrics   Node Status  k8s_cluster_node_status The current status of the nodes K8s kube-state-metrics   Deployment Status  k8s_cluster_deployment_status The current status of the deployment K8s kube-state-metrics   Deployment Spec Replicas  k8s_cluster_deployment_spec_replicas The number of desired pods for a deployment K8s kube-state-metrics   Service Status  k8s_cluster_service_pod_status The services current status, depending on the related pods' status K8s kube-state-metrics   Pod Status Not Running  k8s_cluster_pod_status_not_running The pods which are not running in the current phase K8s kube-state-metrics   Pod Status Waiting  k8s_cluster_pod_status_waiting The pods and containers which are currently in the waiting status, with reasons shown K8s kube-state-metrics   Pod Status Terminated  k8s_cluster_container_status_terminated The pods and containers which are currently in the terminated status, with reasons shown K8s kube-state-metrics    Kubernetes Cluster Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Pod Total  k8s_node_pod_total The number of pods in this node K8s kube-state-metrics   Node Status  k8s_node_node_status The current status of this node K8s kube-state-metrics   CPU Resources m k8s_node_cpu_cores\nk8s_node_cpu_cores_allocatable\nk8s_node_cpu_cores_requests\nk8s_node_cpu_cores_limits The capacity and the requests / Limits / Allocatable of the CPU K8s kube-state-metrics   Memory Resources Gi k8s_node_memory_total\nk8s_node_memory_allocatable\nk8s_node_memory_requests\nk8s_node_memory_limits The capacity and the requests / Limits / Allocatable of the memory K8s kube-state-metrics   Storage Resources Gi k8s_node_storage_total\nk8s_node_storage_allocatable The capacity and allocatable of the storage K8s kube-state-metrics   CPU Usage m k8s_node_cpu_usage The total usage of the CPU core, if there are 2 cores the maximum usage is 2000m cAdvisor   Memory Usage Gi k8s_node_memory_usage The totaly memory usage cAdvisor   Network I/O KB/s k8s_node_network_receive\nk8s_node_network_transmit The network receive and transmit cAdvisor    Kubernetes Service Monitoring K8s Service Monitoring provides observabilities into service status and resources from Kubernetes. K8s Service as a Service in OAP and land on the Layer: K8S_SERVICE.\nKubernetes Service Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Service Pod Total  k8s_service_pod_total The number of pods K8s kube-state-metrics   Service Pod Status  k8s_service_pod_status The current status of pods K8s kube-state-metrics   Service CPU Resources m k8s_service_cpu_cores_requests\nk8s_service_cpu_cores_limits The CPU resources requests / Limits of this service K8s kube-state-metrics   Service Memory Resources MB k8s_service_memory_requests\nk8s_service_memory_limits The memory resources requests / Limits of this service K8s kube-state-metrics   Pod CPU Usage m k8s_service_pod_cpu_usage The CPU resources total usage of pods cAdvisor   Pod Memory Usage MB k8s_service_pod_memory_usage The memory resources total usage of pods cAdvisor   Pod Waiting  k8s_service_pod_status_waiting The pods and containers which are currently in the waiting status, with reasons shown K8s kube-state-metrics   Pod Terminated  k8s_service_pod_status_terminated The pods and containers which are currently in the terminated status, with reasons shown K8s kube-state-metrics   Pod Restarts  k8s_service_pod_status_restarts_total The number of per container restarts related to the pods K8s kube-state-metrics    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/k8s/k8s-cluster.yaml,/config/otel-rules/k8s/k8s-node.yaml, /config/otel-rules/k8s/k8s-service.yaml. The K8s Cluster dashboard panel configurations are found in /config/ui-initialized-templates/k8s. The K8s Service dashboard panel configurations are found in /config/ui-initialized-templates/k8s_service.\n","excerpt":"Kubernetes (K8s) monitoring from kube-state-metrics and cAdvisor SkyWalking leverages K8s …","ref":"/docs/main/next/en/setup/backend/backend-k8s-monitoring-metrics-cadvisor/","title":"Kubernetes (K8s) monitoring from kube-state-metrics and cAdvisor"},{"body":"Kubernetes (K8s) monitoring from Rover SkyWalking uses the SkyWalking Rover system to collect access logs from Kubernetes clusters and hands them over to the OAL system for metrics and entity analysis.\nData flow  SkyWalking Rover monitoring access log data from K8s and send to the OAP. The SkyWalking OAP Server receive access log from Rover through gRPC, analysis the generate entity, and using OAL to generating metrics.  Setup  Setup Rover in the Kubernetes and enable access log service. Setup eBPF receiver module by the following configuration.  receiver-ebpf:selector:${SW_RECEIVER_EBPF:default}default:Generated Entities SkyWalking receive the access logs from Rover, analyzes the kubernetes connection information to parse out the following corresponding entities:\n Service Service Instance Service Endpoint Service Relation Service Instance Relation Service Endpoint Relation  Generate Metrics For each of the above-mentioned entities, metrics such as connection, transmission, and protocol can be analyzed.\nConnection Metrics Record the relevant metrics for every service establishing/closing connections with other services.\n   Name Unit Description     Connect CPM Count Total Connect to other Service counts per minutes.   Connect Duration Nanoseconds Total Connect to other Service use duration.   Connect Success CPM Count Success to connect to other Service counts per minutes.   Accept CPM Count Accept new connection from other Service counts per minutes.   Accept Duration Nanoseconds Total accept new connection from other Service use duration.   Close CPM Count Close one connection counts per minutes.   Close Duration Nanoseconds Total Close connections use duration.    Transfer Metrics Record the basic information and L2-L4 layer details for each syscall made during network requests by every service to other services.\nRead Data from Connection    Name Unit Description     Read CPM Count Read from connection counts per minutes.   Read Duration Nanoseconds Total read data use duration.   Read Package CPM Count Total read TCP Package count per minutes.   Read Package Size Bytes Total read TCP package size per minutes.   Read Layer 4 Duration Nanoseconds Total read data on the Layer 4 use duration.   Read Layer 3 Duration Nanoseconds Total read data on the Layer 3 use duration.   Read Layer 3 Recv Duration Nanoseconds Total read data on the Layer 3 receive use duration.   Read Layer 3 Local Duration Nanoseconds Total read data on the Layer 3 local use duration.   Read Package To Queue Duration Nanoseconds Total duration between TCP package received and send to Queue.   Read Package From Queue Duration Nanoseconds Total duration between send to Queue and receive from Queue.   Read Net Filter CPM Count Total Net Filtered count when read data.   Read Net Filter Duration Nanoseconds Total Net Filtered use duration.    Write Data to Connection    Name Unit Description     Write CPM Count Write to connection counts per minutes.   Write Duration Nanoseconds Total write data to connection use duration.   Write Package CPM Count Total write TCP Package count per minutes.   Write Package Size Bytes Total write TCP Package size per minutes.   Write L4 Duration Nanoseconds Total write data to connection Layer 4 use duration.   Write L3 Duration Nanoseconds Total write data to connection Layer 3 use duration.   Write L3 Local Duration Nanoseconds Total write data to the connection Layer 3 Local use duration.   Write L3 Output Duration Nanoseconds Total write data to the connection Layer 3 Output use duration.   Write L2 Duration Nanoseconds Total write data to connection Layer 2 use duration.   Write L2 Ready Send Duration Nanoseconds Total write data to the connection Layer 2 ready send data queue use duration.   Write L2 Send NetDevice Duration Nanoseconds Total write data to the connection Layer 2 send data to net device use duration.    Protocol Based on each transfer data analysis, extract the information of the 7-layer network protocol.\nHTTP/1.x or HTTP/2.x    Name Init Description     Call CPM Count HTTP Request calls per minutes.   Duration Nanoseconds Total HTTP Response use duration.   Success CPM Count Total HTTP Response success(status \u0026lt; 500) count.   Request Header Size Bytes Total Request Header size.   Request Body Size Bytes Total Request Body size.   Response Header Size Bytes Total Response Header size.   Response Body Size Bytes Total Response Body size.    Customizations You can customize your own metrics/dashboard panel. The metrics definition and expression rules are found in /config/oal/ebpf.oal, please refer the Scope Declaration Documentation. The K8s dashboard panel configurations are found in /config/ui-initialized-templates/k8s_service.\n","excerpt":"Kubernetes (K8s) monitoring from Rover SkyWalking uses the SkyWalking Rover system to collect access …","ref":"/docs/main/next/en/setup/backend/backend-k8s-monitoring-rover/","title":"Kubernetes (K8s) monitoring from Rover"},{"body":"Kubernetes Network monitoring SkyWalking leverages SkyWalking Rover network profiling feature to measure network performance for particular pods on-demand, including metrics of L4(TCP) and L7(HTTP) traffic and raw data of HTTP requests and responses. Underlying, SkyWalking Rover converts data from socket data to metrics using eBPF technology.\nData flow  SkyWalking OAP server observes which specific k8s pod needs to monitor the network. SkyWalking Rover receives tasks from SkyWalking OAP server and executes them, and converts the network data into metrics send to the backend service. The SkyWalking OAP Server accesses K8s\u0026rsquo;s API Server to fetch meta info and parses the expression with MAL to aggregate.  Setup  Setup SkyWalking Rover. Enable the network profiling MAL file in the OAP server.  agent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:meterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:network-profiling}Sampling config Notice the precondition, the HTTP request must have the trace header in SkyWalking(sw8 header) or Zipkin(b3 header(s)) format.\nThe sampling configurations define the sampling boundaries for the HTTP traffic. When a HTTP calling is sampled, the SkyWalking Rover could collect the HTTP request/response raw data and upload it to the span attached event.\nThe sampling config contains multiple rules, and each of rules has the following configurations:\n URI Regex: The match pattern for HTTP requests is HTTP URI-oriented. Match all requests if the URI regex is not set. Minimal Request Duration (ms): Sample the HTTP requests with slower latency than this threshold. Sample HTTP requests and responses with tracing when the response code is between 400 and 499: This is OFF by default. Sample HTTP requests and responses with tracing when the response code is between 500 and 599: This is ON by default.  Supported metrics After SkyWalking OAP server receives the metrics from the SkyWalking Rover, it supports to analysis the following data:\n Topology: Based on the process and peer address, the following topology data is supported:  Relation: Analyze the relationship between local processes, or local process with external pods or services. SSL: The socket read or write package with SSL. Protocol: The protocols for write or read data.   TCP socket read and write metrics, including following types:  Call Per Minute: The count of the socket read or write. Bytes: The package size of the socket data. Execute Time: The executed time of the socket read or write. Connect: The socket connect/accept with peer address count and execute time. Close: The socket close the socket count and execute time. RTT: The RTT(Round Trip Time) of socket communicate with peer address.   Local process communicate with peer address exception data, including following types:  Retransmit: The count of TCP package is retransmitted. Drop: The count of TCP package is dropped.   HTTP/1.x request/response related metrics, including following types:  Request CPM: The calls per minute of requests. Response CPM: The calls per minute of responses with status code. Request Package Size: The size(KB) of the request package. Response Package Size: The size(KB) of the response package. Client Side Response Duration: The duration(ms) of the client receive the response. Server Side Response Duration: The duration(ms) of the server send the response.   HTTP sampled request with traces, including following types:  Slow traces: The traces which have slow duration. Traces from HTTP Code in [400, 500) (ms): The traces which response status code in [400, 500). Traces from HTTP Code in [500, 600) (ms): The traces which response status code in [500, 600).    ","excerpt":"Kubernetes Network monitoring SkyWalking leverages SkyWalking Rover network profiling feature to …","ref":"/docs/main/latest/en/setup/backend/backend-k8s-network-monitoring/","title":"Kubernetes Network monitoring"},{"body":"Kubernetes Network monitoring SkyWalking leverages SkyWalking Rover network profiling feature to measure network performance for particular pods on-demand, including metrics of L4(TCP) and L7(HTTP) traffic and raw data of HTTP requests and responses. Underlying, SkyWalking Rover converts data from socket data to metrics using eBPF technology.\nData flow  SkyWalking OAP server observes which specific k8s pod needs to monitor the network. SkyWalking Rover receives tasks from SkyWalking OAP server and executes them, and converts the network data into metrics send to the backend service. The SkyWalking OAP Server accesses K8s\u0026rsquo;s API Server to fetch meta info and parses the expression with MAL to aggregate.  Setup  Setup SkyWalking Rover. Enable the network profiling MAL file in the OAP server.  agent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:meterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:network-profiling}Sampling config Notice the precondition, the HTTP request must have the trace header in SkyWalking(sw8 header) or Zipkin(b3 header(s)) format.\nThe sampling configurations define the sampling boundaries for the HTTP traffic. When a HTTP calling is sampled, the SkyWalking Rover could collect the HTTP request/response raw data and upload it to the span attached event.\nThe sampling config contains multiple rules, and each of rules has the following configurations:\n URI Regex: The match pattern for HTTP requests is HTTP URI-oriented. Match all requests if the URI regex is not set. Minimal Request Duration (ms): Sample the HTTP requests with slower latency than this threshold. Sample HTTP requests and responses with tracing when the response code is between 400 and 499: This is OFF by default. Sample HTTP requests and responses with tracing when the response code is between 500 and 599: This is ON by default.  Supported metrics After SkyWalking OAP server receives the metrics from the SkyWalking Rover, it supports to analysis the following data:\n Topology: Based on the process and peer address, the following topology data is supported:  Relation: Analyze the relationship between local processes, or local process with external pods or services. SSL: The socket read or write package with SSL. Protocol: The protocols for write or read data.   TCP socket read and write metrics, including following types:  Call Per Minute: The count of the socket read or write. Bytes: The package size of the socket data. Execute Time: The executed time of the socket read or write. Connect: The socket connect/accept with peer address count and execute time. Close: The socket close the socket count and execute time. RTT: The RTT(Round Trip Time) of socket communicate with peer address.   Local process communicate with peer address exception data, including following types:  Retransmit: The count of TCP package is retransmitted. Drop: The count of TCP package is dropped.   HTTP/1.x request/response related metrics, including following types:  Request CPM: The calls per minute of requests. Response CPM: The calls per minute of responses with status code. Request Package Size: The size(KB) of the request package. Response Package Size: The size(KB) of the response package. Client Side Response Duration: The duration(ms) of the client receive the response. Server Side Response Duration: The duration(ms) of the server send the response.   HTTP sampled request with traces, including following types:  Slow traces: The traces which have slow duration. Traces from HTTP Code in [400, 500) (ms): The traces which response status code in [400, 500). Traces from HTTP Code in [500, 600) (ms): The traces which response status code in [500, 600).    ","excerpt":"Kubernetes Network monitoring SkyWalking leverages SkyWalking Rover network profiling feature to …","ref":"/docs/main/next/en/setup/backend/backend-k8s-network-monitoring/","title":"Kubernetes Network monitoring"},{"body":"Kubernetes Network monitoring SkyWalking leverages SkyWalking Rover network profiling feature for collecting metrics data from the network. SkyWalking Rover converts data from socket data to metrics using eBPF technology.\nData flow  SkyWalking OAP server observes which specific k8s pod needs to monitor the network. SkyWalking Rover receives tasks from SkyWalking OAP server and executes them, and converts the network data into metrics send to the backend service. The SkyWalking OAP Server accesses K8s\u0026rsquo;s API Server to fetch meta info and parses the expression with MAL to aggregate.  Setup  Setup SkyWalking Rover. Enable the network profiling MAL file in the OAP server.  agent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:meterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:network-profiling}Supported metrics After SkyWalking OAP server receives the metrics from the SkyWalking Rover, it supports to analysis the following data:\n Topology: Based on the process and peer address, the following topology data is supported:  Relation: Analyze the relationship between local processes, or local process with external pods or services. SSL: The socket read or write package with SSL. Protocol: The protocols for write or read data.   TCP socket read and write metrics, including following types:  Call Per Minute: The count of the socket read or write. Bytes: The package size of the socket data. Execute Time: The executed time of the socket read or write. Connect: The socket connect/accept with peer address count and execute time. Close: The socket close the socket count and execute time. RTT: The RTT(Round Trip Time) of socket communicate with peer address.   Local process communicate with peer address exception data, including following types:  Retransmit: The count of TCP package is retransmitted. Drop: The count of TCP package is dropped.    ","excerpt":"Kubernetes Network monitoring SkyWalking leverages SkyWalking Rover network profiling feature for …","ref":"/docs/main/v9.2.0/en/setup/backend/backend-k8s-network-monitoring/","title":"Kubernetes Network monitoring"},{"body":"Kubernetes Network monitoring SkyWalking leverages SkyWalking Rover network profiling feature to measure network performance for particular pods on-demand, including metrics of L4(TCP) and L7(HTTP) traffic and raw data of HTTP requests and responses. Underlying, SkyWalking Rover converts data from socket data to metrics using eBPF technology.\nData flow  SkyWalking OAP server observes which specific k8s pod needs to monitor the network. SkyWalking Rover receives tasks from SkyWalking OAP server and executes them, and converts the network data into metrics send to the backend service. The SkyWalking OAP Server accesses K8s\u0026rsquo;s API Server to fetch meta info and parses the expression with MAL to aggregate.  Setup  Setup SkyWalking Rover. Enable the network profiling MAL file in the OAP server.  agent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:meterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:network-profiling}Sampling config Notice the precondition, the HTTP request must have the trace header in SkyWalking(sw8 header) or Zipkin(b3 header(s)) format.\nThe sampling configurations define the sampling boundaries for the HTTP traffic. When a HTTP calling is sampled, the SkyWalking Rover could collect the HTTP request/response raw data and upload it to the span attached event.\nThe sampling config contains multiple rules, and each of rules has the following configurations:\n URI Regex: The match pattern for HTTP requests is HTTP URI-oriented. Match all requests if the URI regex is not set. Minimal Request Duration (ms): Sample the HTTP requests with slower latency than this threshold. Sample HTTP requests and responses with tracing when the response code is between 400 and 499: This is OFF by default. Sample HTTP requests and responses with tracing when the response code is between 500 and 599: This is ON by default.  Supported metrics After SkyWalking OAP server receives the metrics from the SkyWalking Rover, it supports to analysis the following data:\n Topology: Based on the process and peer address, the following topology data is supported:  Relation: Analyze the relationship between local processes, or local process with external pods or services. SSL: The socket read or write package with SSL. Protocol: The protocols for write or read data.   TCP socket read and write metrics, including following types:  Call Per Minute: The count of the socket read or write. Bytes: The package size of the socket data. Execute Time: The executed time of the socket read or write. Connect: The socket connect/accept with peer address count and execute time. Close: The socket close the socket count and execute time. RTT: The RTT(Round Trip Time) of socket communicate with peer address.   Local process communicate with peer address exception data, including following types:  Retransmit: The count of TCP package is retransmitted. Drop: The count of TCP package is dropped.   HTTP/1.x request/response related metrics, including following types:  Request CPM: The calls per minute of requests. Response CPM: The calls per minute of responses with status code. Request Package Size: The size(KB) of the request package. Response Package Size: The size(KB) of the response package. Client Side Response Duration: The duration(ms) of the client receive the response. Server Side Response Duration: The duration(ms) of the server send the response.   HTTP sampled request with traces, including following types:  Slow traces: The traces which have slow duration. Traces from HTTP Code in [400, 500) (ms): The traces which response status code in [400, 500). Traces from HTTP Code in [500, 600) (ms): The traces which response status code in [500, 600).    ","excerpt":"Kubernetes Network monitoring SkyWalking leverages SkyWalking Rover network profiling feature to …","ref":"/docs/main/v9.3.0/en/setup/backend/backend-k8s-network-monitoring/","title":"Kubernetes Network monitoring"},{"body":"Kubernetes Network monitoring SkyWalking leverages SkyWalking Rover network profiling feature to measure network performance for particular pods on-demand, including metrics of L4(TCP) and L7(HTTP) traffic and raw data of HTTP requests and responses. Underlying, SkyWalking Rover converts data from socket data to metrics using eBPF technology.\nData flow  SkyWalking OAP server observes which specific k8s pod needs to monitor the network. SkyWalking Rover receives tasks from SkyWalking OAP server and executes them, and converts the network data into metrics send to the backend service. The SkyWalking OAP Server accesses K8s\u0026rsquo;s API Server to fetch meta info and parses the expression with MAL to aggregate.  Setup  Setup SkyWalking Rover. Enable the network profiling MAL file in the OAP server.  agent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:meterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:network-profiling}Sampling config Notice the precondition, the HTTP request must have the trace header in SkyWalking(sw8 header) or Zipkin(b3 header(s)) format.\nThe sampling configurations define the sampling boundaries for the HTTP traffic. When a HTTP calling is sampled, the SkyWalking Rover could collect the HTTP request/response raw data and upload it to the span attached event.\nThe sampling config contains multiple rules, and each of rules has the following configurations:\n URI Regex: The match pattern for HTTP requests is HTTP URI-oriented. Match all requests if the URI regex is not set. Minimal Request Duration (ms): Sample the HTTP requests with slower latency than this threshold. Sample HTTP requests and responses with tracing when the response code is between 400 and 499: This is OFF by default. Sample HTTP requests and responses with tracing when the response code is between 500 and 599: This is ON by default.  Supported metrics After SkyWalking OAP server receives the metrics from the SkyWalking Rover, it supports to analysis the following data:\n Topology: Based on the process and peer address, the following topology data is supported:  Relation: Analyze the relationship between local processes, or local process with external pods or services. SSL: The socket read or write package with SSL. Protocol: The protocols for write or read data.   TCP socket read and write metrics, including following types:  Call Per Minute: The count of the socket read or write. Bytes: The package size of the socket data. Execute Time: The executed time of the socket read or write. Connect: The socket connect/accept with peer address count and execute time. Close: The socket close the socket count and execute time. RTT: The RTT(Round Trip Time) of socket communicate with peer address.   Local process communicate with peer address exception data, including following types:  Retransmit: The count of TCP package is retransmitted. Drop: The count of TCP package is dropped.   HTTP/1.x request/response related metrics, including following types:  Request CPM: The calls per minute of requests. Response CPM: The calls per minute of responses with status code. Request Package Size: The size(KB) of the request package. Response Package Size: The size(KB) of the response package. Client Side Response Duration: The duration(ms) of the client receive the response. Server Side Response Duration: The duration(ms) of the server send the response.   HTTP sampled request with traces, including following types:  Slow traces: The traces which have slow duration. Traces from HTTP Code in [400, 500) (ms): The traces which response status code in [400, 500). Traces from HTTP Code in [500, 600) (ms): The traces which response status code in [500, 600).    ","excerpt":"Kubernetes Network monitoring SkyWalking leverages SkyWalking Rover network profiling feature to …","ref":"/docs/main/v9.4.0/en/setup/backend/backend-k8s-network-monitoring/","title":"Kubernetes Network monitoring"},{"body":"Kubernetes Network monitoring SkyWalking leverages SkyWalking Rover network profiling feature to measure network performance for particular pods on-demand, including metrics of L4(TCP) and L7(HTTP) traffic and raw data of HTTP requests and responses. Underlying, SkyWalking Rover converts data from socket data to metrics using eBPF technology.\nData flow  SkyWalking OAP server observes which specific k8s pod needs to monitor the network. SkyWalking Rover receives tasks from SkyWalking OAP server and executes them, and converts the network data into metrics send to the backend service. The SkyWalking OAP Server accesses K8s\u0026rsquo;s API Server to fetch meta info and parses the expression with MAL to aggregate.  Setup  Setup SkyWalking Rover. Enable the network profiling MAL file in the OAP server.  agent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:meterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:network-profiling}Sampling config Notice the precondition, the HTTP request must have the trace header in SkyWalking(sw8 header) or Zipkin(b3 header(s)) format.\nThe sampling configurations define the sampling boundaries for the HTTP traffic. When a HTTP calling is sampled, the SkyWalking Rover could collect the HTTP request/response raw data and upload it to the span attached event.\nThe sampling config contains multiple rules, and each of rules has the following configurations:\n URI Regex: The match pattern for HTTP requests is HTTP URI-oriented. Match all requests if the URI regex is not set. Minimal Request Duration (ms): Sample the HTTP requests with slower latency than this threshold. Sample HTTP requests and responses with tracing when the response code is between 400 and 499: This is OFF by default. Sample HTTP requests and responses with tracing when the response code is between 500 and 599: This is ON by default.  Supported metrics After SkyWalking OAP server receives the metrics from the SkyWalking Rover, it supports to analysis the following data:\n Topology: Based on the process and peer address, the following topology data is supported:  Relation: Analyze the relationship between local processes, or local process with external pods or services. SSL: The socket read or write package with SSL. Protocol: The protocols for write or read data.   TCP socket read and write metrics, including following types:  Call Per Minute: The count of the socket read or write. Bytes: The package size of the socket data. Execute Time: The executed time of the socket read or write. Connect: The socket connect/accept with peer address count and execute time. Close: The socket close the socket count and execute time. RTT: The RTT(Round Trip Time) of socket communicate with peer address.   Local process communicate with peer address exception data, including following types:  Retransmit: The count of TCP package is retransmitted. Drop: The count of TCP package is dropped.   HTTP/1.x request/response related metrics, including following types:  Request CPM: The calls per minute of requests. Response CPM: The calls per minute of responses with status code. Request Package Size: The size(KB) of the request package. Response Package Size: The size(KB) of the response package. Client Side Response Duration: The duration(ms) of the client receive the response. Server Side Response Duration: The duration(ms) of the server send the response.   HTTP sampled request with traces, including following types:  Slow traces: The traces which have slow duration. Traces from HTTP Code in [400, 500) (ms): The traces which response status code in [400, 500). Traces from HTTP Code in [500, 600) (ms): The traces which response status code in [500, 600).    ","excerpt":"Kubernetes Network monitoring SkyWalking leverages SkyWalking Rover network profiling feature to …","ref":"/docs/main/v9.5.0/en/setup/backend/backend-k8s-network-monitoring/","title":"Kubernetes Network monitoring"},{"body":"Kubernetes Network monitoring SkyWalking leverages SkyWalking Rover network profiling feature to measure network performance for particular pods on-demand, including metrics of L4(TCP) and L7(HTTP) traffic and raw data of HTTP requests and responses. Underlying, SkyWalking Rover converts data from socket data to metrics using eBPF technology.\nData flow  SkyWalking OAP server observes which specific k8s pod needs to monitor the network. SkyWalking Rover receives tasks from SkyWalking OAP server and executes them, and converts the network data into metrics send to the backend service. The SkyWalking OAP Server accesses K8s\u0026rsquo;s API Server to fetch meta info and parses the expression with MAL to aggregate.  Setup  Setup SkyWalking Rover. Enable the network profiling MAL file in the OAP server.  agent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:meterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:network-profiling}Sampling config Notice the precondition, the HTTP request must have the trace header in SkyWalking(sw8 header) or Zipkin(b3 header(s)) format.\nThe sampling configurations define the sampling boundaries for the HTTP traffic. When a HTTP calling is sampled, the SkyWalking Rover could collect the HTTP request/response raw data and upload it to the span attached event.\nThe sampling config contains multiple rules, and each of rules has the following configurations:\n URI Regex: The match pattern for HTTP requests is HTTP URI-oriented. Match all requests if the URI regex is not set. Minimal Request Duration (ms): Sample the HTTP requests with slower latency than this threshold. Sample HTTP requests and responses with tracing when the response code is between 400 and 499: This is OFF by default. Sample HTTP requests and responses with tracing when the response code is between 500 and 599: This is ON by default.  Supported metrics After SkyWalking OAP server receives the metrics from the SkyWalking Rover, it supports to analysis the following data:\n Topology: Based on the process and peer address, the following topology data is supported:  Relation: Analyze the relationship between local processes, or local process with external pods or services. SSL: The socket read or write package with SSL. Protocol: The protocols for write or read data.   TCP socket read and write metrics, including following types:  Call Per Minute: The count of the socket read or write. Bytes: The package size of the socket data. Execute Time: The executed time of the socket read or write. Connect: The socket connect/accept with peer address count and execute time. Close: The socket close the socket count and execute time. RTT: The RTT(Round Trip Time) of socket communicate with peer address.   Local process communicate with peer address exception data, including following types:  Retransmit: The count of TCP package is retransmitted. Drop: The count of TCP package is dropped.   HTTP/1.x request/response related metrics, including following types:  Request CPM: The calls per minute of requests. Response CPM: The calls per minute of responses with status code. Request Package Size: The size(KB) of the request package. Response Package Size: The size(KB) of the response package. Client Side Response Duration: The duration(ms) of the client receive the response. Server Side Response Duration: The duration(ms) of the server send the response.   HTTP sampled request with traces, including following types:  Slow traces: The traces which have slow duration. Traces from HTTP Code in [400, 500) (ms): The traces which response status code in [400, 500). Traces from HTTP Code in [500, 600) (ms): The traces which response status code in [500, 600).    ","excerpt":"Kubernetes Network monitoring SkyWalking leverages SkyWalking Rover network profiling feature to …","ref":"/docs/main/v9.6.0/en/setup/backend/backend-k8s-network-monitoring/","title":"Kubernetes Network monitoring"},{"body":"Kubernetes Network monitoring SkyWalking leverages SkyWalking Rover network profiling feature to measure network performance for particular pods on-demand, including metrics of L4(TCP) and L7(HTTP) traffic and raw data of HTTP requests and responses. Underlying, SkyWalking Rover converts data from socket data to metrics using eBPF technology.\nData flow  SkyWalking OAP server observes which specific k8s pod needs to monitor the network. SkyWalking Rover receives tasks from SkyWalking OAP server and executes them, and converts the network data into metrics send to the backend service. The SkyWalking OAP Server accesses K8s\u0026rsquo;s API Server to fetch meta info and parses the expression with MAL to aggregate.  Setup  Setup SkyWalking Rover. Enable the network profiling MAL file in the OAP server.  agent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:meterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:network-profiling}Sampling config Notice the precondition, the HTTP request must have the trace header in SkyWalking(sw8 header) or Zipkin(b3 header(s)) format.\nThe sampling configurations define the sampling boundaries for the HTTP traffic. When a HTTP calling is sampled, the SkyWalking Rover could collect the HTTP request/response raw data and upload it to the span attached event.\nThe sampling config contains multiple rules, and each of rules has the following configurations:\n URI Regex: The match pattern for HTTP requests is HTTP URI-oriented. Match all requests if the URI regex is not set. Minimal Request Duration (ms): Sample the HTTP requests with slower latency than this threshold. Sample HTTP requests and responses with tracing when the response code is between 400 and 499: This is OFF by default. Sample HTTP requests and responses with tracing when the response code is between 500 and 599: This is ON by default.  Supported metrics After SkyWalking OAP server receives the metrics from the SkyWalking Rover, it supports to analysis the following data:\n Topology: Based on the process and peer address, the following topology data is supported:  Relation: Analyze the relationship between local processes, or local process with external pods or services. SSL: The socket read or write package with SSL. Protocol: The protocols for write or read data.   TCP socket read and write metrics, including following types:  Call Per Minute: The count of the socket read or write. Bytes: The package size of the socket data. Execute Time: The executed time of the socket read or write. Connect: The socket connect/accept with peer address count and execute time. Close: The socket close the socket count and execute time. RTT: The RTT(Round Trip Time) of socket communicate with peer address.   Local process communicate with peer address exception data, including following types:  Retransmit: The count of TCP package is retransmitted. Drop: The count of TCP package is dropped.   HTTP/1.x request/response related metrics, including following types:  Request CPM: The calls per minute of requests. Response CPM: The calls per minute of responses with status code. Request Package Size: The size(KB) of the request package. Response Package Size: The size(KB) of the response package. Client Side Response Duration: The duration(ms) of the client receive the response. Server Side Response Duration: The duration(ms) of the server send the response.   HTTP sampled request with traces, including following types:  Slow traces: The traces which have slow duration. Traces from HTTP Code in [400, 500) (ms): The traces which response status code in [400, 500). Traces from HTTP Code in [500, 600) (ms): The traces which response status code in [500, 600).    ","excerpt":"Kubernetes Network monitoring SkyWalking leverages SkyWalking Rover network profiling feature to …","ref":"/docs/main/v9.7.0/en/setup/backend/backend-k8s-network-monitoring/","title":"Kubernetes Network monitoring"},{"body":"Legacy Setup You can always fall back to our traditional way of integration as introduced below, which is by importing SkyWalking into your project and starting the agent.\nDefaults By default, SkyWalking Python agent uses gRPC protocol to report data to SkyWalking backend, in SkyWalking backend, the port of gRPC protocol is 11800, and the port of HTTP protocol is 12800,\nSee all default configuration values in the Configuration Vocabulary\nYou could configure agent_collector_backend_services (or environment variable SW_AGENT_COLLECTOR_BACKEND_SERVICES) and set agent_protocol (or environment variable SW_AGENT_PROTOCOL to one of gprc, http or kafka according to the protocol you would like to use.\nReport data via gRPC protocol (Default) For example, if you want to use gRPC protocol to report data, configure agent_collector_backend_services (or environment variable SW_AGENT_COLLECTOR_BACKEND_SERVICES) to \u0026lt;oap-ip-or-host\u0026gt;:11800, such as 127.0.0.1:11800:\nfrom skywalking import agent, config config.init(agent_collector_backend_services=\u0026#39;127.0.0.1:11800\u0026#39;, agent_name=\u0026#39;your awesome service\u0026#39;, agent_instance_name=\u0026#39;your-instance-name or \u0026lt;generated uuid\u0026gt;\u0026#39;) agent.start() Report data via HTTP protocol However, if you want to use HTTP protocol to report data, configure agent_collector_backend_services (or environment variable SW_AGENT_COLLECTOR_BACKEND_SERVICES) to \u0026lt;oap-ip-or-host\u0026gt;:12800, such as 127.0.0.1:12800, further set agent_protocol (or environment variable SW_AGENT_PROTOCOL to http):\n Remember you should install skywalking-python with extra requires http, pip install \u0026quot;apache-skywalking[http].\n from skywalking import agent, config config.init(agent_collector_backend_services=\u0026#39;127.0.0.1:12800\u0026#39;, agent_name=\u0026#39;your awesome service\u0026#39;, agent_protocol=\u0026#39;http\u0026#39;, agent_instance_name=\u0026#39;your-instance-name or \u0026lt;generated uuid\u0026gt;\u0026#39;) agent.start() Report data via Kafka protocol Please make sure OAP is consuming the same Kafka topic as your agent produces to, kafka_namespace must match OAP side configuration plugin.kafka.namespace\nFinally, if you want to use Kafka protocol to report data, configure kafka_bootstrap_servers (or environment variable SW_KAFKA_BOOTSTRAP_SERVERS) to kafka-brokers, such as 127.0.0.1:9200, further set agent_protocol (or environment variable SW_AGENT_PROTOCOL to kafka):\n Remember you should install skywalking-python with extra requires kafka, pip install \u0026quot;apache-skywalking[kafka]\u0026quot;.\n from skywalking import agent, config config.init(kafka_bootstrap_servers=\u0026#39;127.0.0.1:9200\u0026#39;, agent_name=\u0026#39;your awesome service\u0026#39;, agent_protocol=\u0026#39;kafka\u0026#39;, agent_instance_name=\u0026#39;your-instance-name or \u0026lt;generated uuid\u0026gt;\u0026#39;) agent.start() Alternatively, you can also pass the configurations via environment variables (such as SW_AGENT_NAME, SW_AGENT_COLLECTOR_BACKEND_SERVICES, etc.) so that you don\u0026rsquo;t need to call config.init.\nAll supported environment variables can be found in the Environment Variables List.\n","excerpt":"Legacy Setup You can always fall back to our traditional way of integration as introduced below, …","ref":"/docs/skywalking-python/latest/en/setup/intrusive/","title":"Legacy Setup"},{"body":"Legacy Setup You can always fall back to our traditional way of integration as introduced below, which is by importing SkyWalking into your project and starting the agent.\nDefaults By default, SkyWalking Python agent uses gRPC protocol to report data to SkyWalking backend, in SkyWalking backend, the port of gRPC protocol is 11800, and the port of HTTP protocol is 12800,\nSee all default configuration values in the Configuration Vocabulary\nYou could configure agent_collector_backend_services (or environment variable SW_AGENT_COLLECTOR_BACKEND_SERVICES) and set agent_protocol (or environment variable SW_AGENT_PROTOCOL to one of gprc, http or kafka according to the protocol you would like to use.\nReport data via gRPC protocol (Default) For example, if you want to use gRPC protocol to report data, configure agent_collector_backend_services (or environment variable SW_AGENT_COLLECTOR_BACKEND_SERVICES) to \u0026lt;oap-ip-or-host\u0026gt;:11800, such as 127.0.0.1:11800:\nfrom skywalking import agent, config config.init(agent_collector_backend_services=\u0026#39;127.0.0.1:11800\u0026#39;, agent_name=\u0026#39;your awesome service\u0026#39;, agent_instance_name=\u0026#39;your-instance-name or \u0026lt;generated uuid\u0026gt;\u0026#39;) agent.start() Report data via HTTP protocol However, if you want to use HTTP protocol to report data, configure agent_collector_backend_services (or environment variable SW_AGENT_COLLECTOR_BACKEND_SERVICES) to \u0026lt;oap-ip-or-host\u0026gt;:12800, such as 127.0.0.1:12800, further set agent_protocol (or environment variable SW_AGENT_PROTOCOL to http):\n Remember you should install skywalking-python with extra requires http, pip install \u0026quot;apache-skywalking[http].\n from skywalking import agent, config config.init(agent_collector_backend_services=\u0026#39;127.0.0.1:12800\u0026#39;, agent_name=\u0026#39;your awesome service\u0026#39;, agent_protocol=\u0026#39;http\u0026#39;, agent_instance_name=\u0026#39;your-instance-name or \u0026lt;generated uuid\u0026gt;\u0026#39;) agent.start() Report data via Kafka protocol Please make sure OAP is consuming the same Kafka topic as your agent produces to, kafka_namespace must match OAP side configuration plugin.kafka.namespace\nFinally, if you want to use Kafka protocol to report data, configure kafka_bootstrap_servers (or environment variable SW_KAFKA_BOOTSTRAP_SERVERS) to kafka-brokers, such as 127.0.0.1:9200, further set agent_protocol (or environment variable SW_AGENT_PROTOCOL to kafka):\n Remember you should install skywalking-python with extra requires kafka, pip install \u0026quot;apache-skywalking[kafka]\u0026quot;.\n from skywalking import agent, config config.init(kafka_bootstrap_servers=\u0026#39;127.0.0.1:9200\u0026#39;, agent_name=\u0026#39;your awesome service\u0026#39;, agent_protocol=\u0026#39;kafka\u0026#39;, agent_instance_name=\u0026#39;your-instance-name or \u0026lt;generated uuid\u0026gt;\u0026#39;) agent.start() Alternatively, you can also pass the configurations via environment variables (such as SW_AGENT_NAME, SW_AGENT_COLLECTOR_BACKEND_SERVICES, etc.) so that you don\u0026rsquo;t need to call config.init.\nAll supported environment variables can be found in the Environment Variables List.\n","excerpt":"Legacy Setup You can always fall back to our traditional way of integration as introduced below, …","ref":"/docs/skywalking-python/next/en/setup/intrusive/","title":"Legacy Setup"},{"body":"Legacy Setup You can always fall back to our traditional way of integration as introduced below, which is by importing SkyWalking into your project and starting the agent.\nDefaults By default, SkyWalking Python agent uses gRPC protocol to report data to SkyWalking backend, in SkyWalking backend, the port of gRPC protocol is 11800, and the port of HTTP protocol is 12800,\nSee all default configuration values in the Configuration Vocabulary\nYou could configure agent_collector_backend_services (or environment variable SW_AGENT_COLLECTOR_BACKEND_SERVICES) and set agent_protocol (or environment variable SW_AGENT_PROTOCOL to one of gprc, http or kafka according to the protocol you would like to use.\nReport data via gRPC protocol (Default) For example, if you want to use gRPC protocol to report data, configure agent_collector_backend_services (or environment variable SW_AGENT_COLLECTOR_BACKEND_SERVICES) to \u0026lt;oap-ip-or-host\u0026gt;:11800, such as 127.0.0.1:11800:\nfrom skywalking import agent, config config.init(agent_collector_backend_services=\u0026#39;127.0.0.1:11800\u0026#39;, agent_name=\u0026#39;your awesome service\u0026#39;, agent_instance_name=\u0026#39;your-instance-name or \u0026lt;generated uuid\u0026gt;\u0026#39;) agent.start() Report data via HTTP protocol However, if you want to use HTTP protocol to report data, configure agent_collector_backend_services (or environment variable SW_AGENT_COLLECTOR_BACKEND_SERVICES) to \u0026lt;oap-ip-or-host\u0026gt;:12800, such as 127.0.0.1:12800, further set agent_protocol (or environment variable SW_AGENT_PROTOCOL to http):\n Remember you should install skywalking-python with extra requires http, pip install \u0026quot;apache-skywalking[http].\n from skywalking import agent, config config.init(agent_collector_backend_services=\u0026#39;127.0.0.1:12800\u0026#39;, agent_name=\u0026#39;your awesome service\u0026#39;, agent_protocol=\u0026#39;http\u0026#39;, agent_instance_name=\u0026#39;your-instance-name or \u0026lt;generated uuid\u0026gt;\u0026#39;) agent.start() Report data via Kafka protocol Please make sure OAP is consuming the same Kafka topic as your agent produces to, kafka_namespace must match OAP side configuration plugin.kafka.namespace\nFinally, if you want to use Kafka protocol to report data, configure kafka_bootstrap_servers (or environment variable SW_KAFKA_BOOTSTRAP_SERVERS) to kafka-brokers, such as 127.0.0.1:9200, further set agent_protocol (or environment variable SW_AGENT_PROTOCOL to kafka):\n Remember you should install skywalking-python with extra requires kafka, pip install \u0026quot;apache-skywalking[kafka]\u0026quot;.\n from skywalking import agent, config config.init(kafka_bootstrap_servers=\u0026#39;127.0.0.1:9200\u0026#39;, agent_name=\u0026#39;your awesome service\u0026#39;, agent_protocol=\u0026#39;kafka\u0026#39;, agent_instance_name=\u0026#39;your-instance-name or \u0026lt;generated uuid\u0026gt;\u0026#39;) agent.start() Alternatively, you can also pass the configurations via environment variables (such as SW_AGENT_NAME, SW_AGENT_COLLECTOR_BACKEND_SERVICES, etc.) so that you don\u0026rsquo;t need to call config.init.\nAll supported environment variables can be found in the Environment Variables List.\n","excerpt":"Legacy Setup You can always fall back to our traditional way of integration as introduced below, …","ref":"/docs/skywalking-python/v1.0.1/en/setup/intrusive/","title":"Legacy Setup"},{"body":"Linux Monitoring SkyWalking leverages Prometheus node-exporter to collect metrics data from the VMs and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. VM entity as a Service in OAP and on the Layer: OS_LINUX.\nSkyWalking also provides InfluxDB Telegraf to receive VMs' metrics data by Telegraf receiver. The telegraf receiver plugin receiver, process and convert the metrics, then it send converted metrics to Meter System. VM entity as a Service in OAP and on the Layer: OS_LINUX.\nData flow For OpenTelemetry receiver:\n The Prometheus node-exporter collects metrics data from the VMs. The OpenTelemetry Collector fetches metrics from node-exporter via Prometheus Receiver and pushes metrics to the SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  For Telegraf receiver:\n The InfluxDB Telegraf input plugins collects various metrics data from the VMs. The cpu, mem, system, disk and diskio input plugins should be set in telegraf.conf file. The InfluxDB Telegraf send JSON format metrics by HTTP messages to Telegraf Receiver, then pushes converted metrics to the SkyWalking OAP Server Meter System. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate ad store the results. The meter_vm_cpu_average_used metrics indicates the average usage of each CPU core for telegraf receiver.  Setup For OpenTelemetry receiver:\n Setup Prometheus node-exporter. Setup OpenTelemetry Collector. This is an example for OpenTelemetry Collector configuration otel-collector-config.yaml. Config SkyWalking OpenTelemetry receiver.  For Telegraf receiver:\n Setup InfluxDB Telegraf\u0026rsquo;s telegraf.conf file according to Telegraf office document. Setup InfluxDB Telegraf\u0026rsquo;s telegraf.conf file specific rules according to Telegraf receiver document. Config SkyWalking Telegraf receiver.  Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     CPU Usage % meter_vm_cpu_total_percentage The total percentage usage of the CPU core. If there are 2 cores, the maximum usage is 200%. Prometheus node-exporter\nTelegraf input plugin   Memory RAM Usage MB meter_vm_memory_used The total RAM usage Prometheus node-exporter\nTelegraf input plugin   Memory Swap Usage % meter_vm_memory_swap_percentage The percentage usage of swap memory Prometheus node-exporter\nTelegraf input plugin   CPU Average Used % meter_vm_cpu_average_used The percentage usage of the CPU core in each mode Prometheus node-exporter\nTelegraf input plugin   CPU Load  meter_vm_cpu_load1\nmeter_vm_cpu_load5\nmeter_vm_cpu_load15 The CPU 1m / 5m / 15m average load Prometheus node-exporter\nTelegraf input plugin   Memory RAM MB meter_vm_memory_total\nmeter_vm_memory_available\nmeter_vm_memory_used\nmeter_vm_memory_buff_cache The RAM statistics, including Total / Available / Used / Buff-Cache Prometheus node-exporter\nTelegraf input plugin   Memory Swap MB meter_vm_memory_swap_free\nmeter_vm_memory_swap_total Swap memory statistics, including Free / Total Prometheus node-exporter\nTelegraf input plugin   File System Mountpoint Usage % meter_vm_filesystem_percentage The percentage usage of the file system at each mount point Prometheus node-exporter\nTelegraf input plugin   Disk R/W KB/s meter_vm_disk_read\nmeter_vm_disk_written The disk read and written Prometheus node-exporter\nTelegraf input plugin   Network Bandwidth Usage KB/s meter_vm_network_receive\nmeter_vm_network_transmit The network receive and transmit Prometheus node-exporter\nTelegraf input plugin   Network Status  meter_vm_tcp_curr_estab\nmeter_vm_tcp_tw\nmeter_vm_tcp_alloc\nmeter_vm_sockets_used\nmeter_vm_udp_inuse The number of TCPs established / TCP time wait / TCPs allocated / sockets in use / UDPs in use Prometheus node-exporter\nTelegraf input plugin   Filefd Allocated  meter_vm_filefd_allocated The number of file descriptors allocated Prometheus node-exporter    Customizing You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/vm.yaml and /config/telegraf-rules/vm.yaml. The dashboard panel confirmations are found in /config/ui-initialized-templates/os_linux.\nBlog For more details, see the blog article SkyWalking 8.4 provides infrastructure monitoring.\n","excerpt":"Linux Monitoring SkyWalking leverages Prometheus node-exporter to collect metrics data from the VMs …","ref":"/docs/main/latest/en/setup/backend/backend-vm-monitoring/","title":"Linux Monitoring"},{"body":"Linux Monitoring SkyWalking leverages Prometheus node-exporter to collect metrics data from the VMs and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. VM entity as a Service in OAP and on the Layer: OS_LINUX.\nSkyWalking also provides InfluxDB Telegraf to receive VMs' metrics data by Telegraf receiver. The telegraf receiver plugin receiver, process and convert the metrics, then it send converted metrics to Meter System. VM entity as a Service in OAP and on the Layer: OS_LINUX.\nData flow For OpenTelemetry receiver:\n The Prometheus node-exporter collects metrics data from the VMs. The OpenTelemetry Collector fetches metrics from node-exporter via Prometheus Receiver and pushes metrics to the SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  For Telegraf receiver:\n The InfluxDB Telegraf input plugins collects various metrics data from the VMs. The cpu, mem, system, disk and diskio input plugins should be set in telegraf.conf file. The InfluxDB Telegraf send JSON format metrics by HTTP messages to Telegraf Receiver, then pushes converted metrics to the SkyWalking OAP Server Meter System. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate ad store the results. The meter_vm_cpu_average_used metrics indicates the average usage of each CPU core for telegraf receiver.  Setup For OpenTelemetry receiver:\n Setup Prometheus node-exporter. Setup OpenTelemetry Collector. This is an example for OpenTelemetry Collector configuration otel-collector-config.yaml. Config SkyWalking OpenTelemetry receiver.  For Telegraf receiver:\n Setup InfluxDB Telegraf\u0026rsquo;s telegraf.conf file according to Telegraf office document. Setup InfluxDB Telegraf\u0026rsquo;s telegraf.conf file specific rules according to Telegraf receiver document. Config SkyWalking Telegraf receiver.  Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     CPU Usage % meter_vm_cpu_total_percentage The total percentage usage of the CPU core. If there are 2 cores, the maximum usage is 200%. Prometheus node-exporter\nTelegraf input plugin   Memory RAM Usage MB meter_vm_memory_used The total RAM usage Prometheus node-exporter\nTelegraf input plugin   Memory Swap Usage % meter_vm_memory_swap_percentage The percentage usage of swap memory Prometheus node-exporter\nTelegraf input plugin   CPU Average Used % meter_vm_cpu_average_used The percentage usage of the CPU core in each mode Prometheus node-exporter\nTelegraf input plugin   CPU Load  meter_vm_cpu_load1\nmeter_vm_cpu_load5\nmeter_vm_cpu_load15 The CPU 1m / 5m / 15m average load Prometheus node-exporter\nTelegraf input plugin   Memory RAM MB meter_vm_memory_total\nmeter_vm_memory_available\nmeter_vm_memory_used\nmeter_vm_memory_buff_cache The RAM statistics, including Total / Available / Used / Buff-Cache Prometheus node-exporter\nTelegraf input plugin   Memory Swap MB meter_vm_memory_swap_free\nmeter_vm_memory_swap_total Swap memory statistics, including Free / Total Prometheus node-exporter\nTelegraf input plugin   File System Mountpoint Usage % meter_vm_filesystem_percentage The percentage usage of the file system at each mount point Prometheus node-exporter\nTelegraf input plugin   Disk R/W KB/s meter_vm_disk_read\nmeter_vm_disk_written The disk read and written Prometheus node-exporter\nTelegraf input plugin   Network Bandwidth Usage KB/s meter_vm_network_receive\nmeter_vm_network_transmit The network receive and transmit Prometheus node-exporter\nTelegraf input plugin   Network Status  meter_vm_tcp_curr_estab\nmeter_vm_tcp_tw\nmeter_vm_tcp_alloc\nmeter_vm_sockets_used\nmeter_vm_udp_inuse The number of TCPs established / TCP time wait / TCPs allocated / sockets in use / UDPs in use Prometheus node-exporter\nTelegraf input plugin   Filefd Allocated  meter_vm_filefd_allocated The number of file descriptors allocated Prometheus node-exporter    Customizing You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/vm.yaml and /config/telegraf-rules/vm.yaml. The dashboard panel confirmations are found in /config/ui-initialized-templates/os_linux.\nBlog For more details, see the blog article SkyWalking 8.4 provides infrastructure monitoring.\n","excerpt":"Linux Monitoring SkyWalking leverages Prometheus node-exporter to collect metrics data from the VMs …","ref":"/docs/main/next/en/setup/backend/backend-vm-monitoring/","title":"Linux Monitoring"},{"body":"Linux Monitoring SkyWalking leverages Prometheus node-exporter to collect metrics data from the VMs, and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nVM entity as a Service in OAP, and on the Layer: OS_LINUX.\nData flow  The Prometheus node-exporter collects metrics data from the VMs. The OpenTelemetry Collector fetches metrics from node-exporter via Prometheus Receiver and pushes metrics to the SkyWalking OAP Server via the OpenCensus gRPC Exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup Prometheus node-exporter. Setup OpenTelemetry Collector . This is an example for OpenTelemetry Collector configuration otel-collector-config.yaml. Config SkyWalking OpenTelemetry receiver.  Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     CPU Usage % cpu_total_percentage The total percentage usage of the CPU core. If there are 2 cores, the maximum usage is 200%. Prometheus node-exporter   Memory RAM Usage MB meter_vm_memory_used The total RAM usage Prometheus node-exporter   Memory Swap Usage % meter_vm_memory_swap_percentage The percentage usage of swap memory Prometheus node-exporter   CPU Average Used % meter_vm_cpu_average_used The percentage usage of the CPU core in each mode Prometheus node-exporter   CPU Load  meter_vm_cpu_load1\nmeter_vm_cpu_load5\nmeter_vm_cpu_load15 The CPU 1m / 5m / 15m average load Prometheus node-exporter   Memory RAM MB meter_vm_memory_total\nmeter_vm_memory_available\nmeter_vm_memory_used The RAM statistics, including Total / Available / Used Prometheus node-exporter   Memory Swap MB meter_vm_memory_swap_free\nmeter_vm_memory_swap_total Swap memory statistics, including Free / Total Prometheus node-exporter   File System Mountpoint Usage % meter_vm_filesystem_percentage The percentage usage of the file system at each mount point Prometheus node-exporter   Disk R/W KB/s meter_vm_disk_read,meter_vm_disk_written The disk read and written Prometheus node-exporter   Network Bandwidth Usage KB/s meter_vm_network_receive\nmeter_vm_network_transmit The network receive and transmit Prometheus node-exporter   Network Status  meter_vm_tcp_curr_estab\nmeter_vm_tcp_tw\nmeter_vm_tcp_alloc\nmeter_vm_sockets_used\nmeter_vm_udp_inuse The number of TCPs established / TCP time wait / TCPs allocated / sockets in use / UDPs in use Prometheus node-exporter   Filefd Allocated  meter_vm_filefd_allocated The number of file descriptors allocated Prometheus node-exporter    Customizing You can customize your own metrics/expression/dashboard panel.\nThe metrics definition and expression rules are found in /config/otel-oc-rules/vm.yaml.\nThe dashboard panel confirmations are found in /config/ui-initialized-templates/os_linux.\nBlog For more details, see blog article SkyWalking 8.4 provides infrastructure monitoring.\n","excerpt":"Linux Monitoring SkyWalking leverages Prometheus node-exporter to collect metrics data from the VMs, …","ref":"/docs/main/v9.0.0/en/setup/backend/backend-vm-monitoring/","title":"Linux Monitoring"},{"body":"Linux Monitoring SkyWalking leverages Prometheus node-exporter to collect metrics data from the VMs and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nVM entity as a Service in OAP and on the Layer: OS_LINUX.\nData flow  The Prometheus node-exporter collects metrics data from the VMs. The OpenTelemetry Collector fetches metrics from node-exporter via Prometheus Receiver and pushes metrics to the SkyWalking OAP Server via the OpenCensus gRPC Exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup Prometheus node-exporter. Setup OpenTelemetry Collector . This is an example for OpenTelemetry Collector configuration otel-collector-config.yaml. Config SkyWalking OpenTelemetry receiver.  Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     CPU Usage % cpu_total_percentage The total percentage usage of the CPU core. If there are 2 cores, the maximum usage is 200%. Prometheus node-exporter   Memory RAM Usage MB meter_vm_memory_used The total RAM usage Prometheus node-exporter   Memory Swap Usage % meter_vm_memory_swap_percentage The percentage usage of swap memory Prometheus node-exporter   CPU Average Used % meter_vm_cpu_average_used The percentage usage of the CPU core in each mode Prometheus node-exporter   CPU Load  meter_vm_cpu_load1\nmeter_vm_cpu_load5\nmeter_vm_cpu_load15 The CPU 1m / 5m / 15m average load Prometheus node-exporter   Memory RAM MB meter_vm_memory_total\nmeter_vm_memory_available\nmeter_vm_memory_used The RAM statistics, including Total / Available / Used Prometheus node-exporter   Memory Swap MB meter_vm_memory_swap_free\nmeter_vm_memory_swap_total Swap memory statistics, including Free / Total Prometheus node-exporter   File System Mountpoint Usage % meter_vm_filesystem_percentage The percentage usage of the file system at each mount point Prometheus node-exporter   Disk R/W KB/s meter_vm_disk_read,meter_vm_disk_written The disk read and written Prometheus node-exporter   Network Bandwidth Usage KB/s meter_vm_network_receive\nmeter_vm_network_transmit The network receive and transmit Prometheus node-exporter   Network Status  meter_vm_tcp_curr_estab\nmeter_vm_tcp_tw\nmeter_vm_tcp_alloc\nmeter_vm_sockets_used\nmeter_vm_udp_inuse The number of TCPs established / TCP time wait / TCPs allocated / sockets in use / UDPs in use Prometheus node-exporter   Filefd Allocated  meter_vm_filefd_allocated The number of file descriptors allocated Prometheus node-exporter    Customizing You can customize your own metrics/expression/dashboard panel.\nThe metrics definition and expression rules are found in /config/otel-oc-rules/vm.yaml.\nThe dashboard panel confirmations are found in /config/ui-initialized-templates/os_linux.\nBlog For more details, see the blog article SkyWalking 8.4 provides infrastructure monitoring.\n","excerpt":"Linux Monitoring SkyWalking leverages Prometheus node-exporter to collect metrics data from the VMs …","ref":"/docs/main/v9.1.0/en/setup/backend/backend-vm-monitoring/","title":"Linux Monitoring"},{"body":"Linux Monitoring SkyWalking leverages Prometheus node-exporter to collect metrics data from the VMs and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. VM entity as a Service in OAP and on the Layer: OS_LINUX.\nData flow  The Prometheus node-exporter collects metrics data from the VMs. The OpenTelemetry Collector fetches metrics from node-exporter via Prometheus Receiver and pushes metrics to the SkyWalking OAP Server via the OpenCensus gRPC Exporter or OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup Prometheus node-exporter. Setup OpenTelemetry Collector . This is an example for OpenTelemetry Collector configuration otel-collector-config.yaml. Config SkyWalking OpenTelemetry receiver.  Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     CPU Usage % cpu_total_percentage The total percentage usage of the CPU core. If there are 2 cores, the maximum usage is 200%. Prometheus node-exporter   Memory RAM Usage MB meter_vm_memory_used The total RAM usage Prometheus node-exporter   Memory Swap Usage % meter_vm_memory_swap_percentage The percentage usage of swap memory Prometheus node-exporter   CPU Average Used % meter_vm_cpu_average_used The percentage usage of the CPU core in each mode Prometheus node-exporter   CPU Load  meter_vm_cpu_load1\nmeter_vm_cpu_load5\nmeter_vm_cpu_load15 The CPU 1m / 5m / 15m average load Prometheus node-exporter   Memory RAM MB meter_vm_memory_total\nmeter_vm_memory_available\nmeter_vm_memory_used The RAM statistics, including Total / Available / Used Prometheus node-exporter   Memory Swap MB meter_vm_memory_swap_free\nmeter_vm_memory_swap_total Swap memory statistics, including Free / Total Prometheus node-exporter   File System Mountpoint Usage % meter_vm_filesystem_percentage The percentage usage of the file system at each mount point Prometheus node-exporter   Disk R/W KB/s meter_vm_disk_read,meter_vm_disk_written The disk read and written Prometheus node-exporter   Network Bandwidth Usage KB/s meter_vm_network_receive\nmeter_vm_network_transmit The network receive and transmit Prometheus node-exporter   Network Status  meter_vm_tcp_curr_estab\nmeter_vm_tcp_tw\nmeter_vm_tcp_alloc\nmeter_vm_sockets_used\nmeter_vm_udp_inuse The number of TCPs established / TCP time wait / TCPs allocated / sockets in use / UDPs in use Prometheus node-exporter   Filefd Allocated  meter_vm_filefd_allocated The number of file descriptors allocated Prometheus node-exporter    Customizing You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/vm.yaml. The dashboard panel confirmations are found in /config/ui-initialized-templates/os_linux.\nBlog For more details, see the blog article SkyWalking 8.4 provides infrastructure monitoring.\n","excerpt":"Linux Monitoring SkyWalking leverages Prometheus node-exporter to collect metrics data from the VMs …","ref":"/docs/main/v9.2.0/en/setup/backend/backend-vm-monitoring/","title":"Linux Monitoring"},{"body":"Linux Monitoring SkyWalking leverages Prometheus node-exporter to collect metrics data from the VMs and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. VM entity as a Service in OAP and on the Layer: OS_LINUX.\nSkyWalking also provides InfluxDB Telegraf to receive VMs' metrics data by Telegraf receiver. The telegraf receiver plugin receiver, process and convert the metrics, then it send converted metrics to Meter System. VM entity as a Service in OAP and on the Layer: OS_LINUX.\nData flow For OpenTelemetry receiver:\n The Prometheus node-exporter collects metrics data from the VMs. The OpenTelemetry Collector fetches metrics from node-exporter via Prometheus Receiver and pushes metrics to the SkyWalking OAP Server via the OpenCensus gRPC Exporter or OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  For Telegraf receiver:\n The InfluxDB Telegraf input plugins collects various metrics data from the VMs. The cpu, mem, system, disk and diskio input plugins should be set in telegraf.conf file. The InfluxDB Telegraf send JSON format metrics by HTTP messages to Telegraf Receiver, then pushes converted metrics to the SkyWalking OAP Server Meter System. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate ad store the results. The meter_vm_cpu_average_used metrics indicates the average usage of each CPU core for telegraf receiver.  Setup For OpenTelemetry receiver:\n Setup Prometheus node-exporter. Setup OpenTelemetry Collector . This is an example for OpenTelemetry Collector configuration otel-collector-config.yaml. Config SkyWalking OpenTelemetry receiver.  For Telegraf receiver:\n Setup InfluxDB Telegraf\u0026rsquo;s telegraf.conf file according to Telegraf office document. Setup InfluxDB Telegraf\u0026rsquo;s telegraf.conf file specific rules according to Telegraf receiver document. Config SkyWalking Telegraf receiver.  Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     CPU Usage % meter_vm_cpu_total_percentage The total percentage usage of the CPU core. If there are 2 cores, the maximum usage is 200%. Prometheus node-exporter\nTelegraf input plugin   Memory RAM Usage MB meter_vm_memory_used The total RAM usage Prometheus node-exporter\nTelegraf input plugin   Memory Swap Usage % meter_vm_memory_swap_percentage The percentage usage of swap memory Prometheus node-exporter\nTelegraf input plugin   CPU Average Used % meter_vm_cpu_average_used The percentage usage of the CPU core in each mode Prometheus node-exporter\nTelegraf input plugin   CPU Load  meter_vm_cpu_load1\nmeter_vm_cpu_load5\nmeter_vm_cpu_load15 The CPU 1m / 5m / 15m average load Prometheus node-exporter\nTelegraf input plugin   Memory RAM MB meter_vm_memory_total\nmeter_vm_memory_available\nmeter_vm_memory_used The RAM statistics, including Total / Available / Used Prometheus node-exporter\nTelegraf input plugin   Memory Swap MB meter_vm_memory_swap_free\nmeter_vm_memory_swap_total Swap memory statistics, including Free / Total Prometheus node-exporter\nTelegraf input plugin   File System Mountpoint Usage % meter_vm_filesystem_percentage The percentage usage of the file system at each mount point Prometheus node-exporter\nTelegraf input plugin   Disk R/W KB/s meter_vm_disk_read,meter_vm_disk_written The disk read and written Prometheus node-exporter\nTelegraf input plugin   Network Bandwidth Usage KB/s meter_vm_network_receive\nmeter_vm_network_transmit The network receive and transmit Prometheus node-exporter\nTelegraf input plugin   Network Status  meter_vm_tcp_curr_estab\nmeter_vm_tcp_tw\nmeter_vm_tcp_alloc\nmeter_vm_sockets_used\nmeter_vm_udp_inuse The number of TCPs established / TCP time wait / TCPs allocated / sockets in use / UDPs in use Prometheus node-exporter\nTelegraf input plugin   Filefd Allocated  meter_vm_filefd_allocated The number of file descriptors allocated Prometheus node-exporter    Customizing You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/vm.yaml and /config/telegraf-rules/vm.yaml. The dashboard panel confirmations are found in /config/ui-initialized-templates/os_linux.\nBlog For more details, see the blog article SkyWalking 8.4 provides infrastructure monitoring.\n","excerpt":"Linux Monitoring SkyWalking leverages Prometheus node-exporter to collect metrics data from the VMs …","ref":"/docs/main/v9.3.0/en/setup/backend/backend-vm-monitoring/","title":"Linux Monitoring"},{"body":"Linux Monitoring SkyWalking leverages Prometheus node-exporter to collect metrics data from the VMs and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. VM entity as a Service in OAP and on the Layer: OS_LINUX.\nSkyWalking also provides InfluxDB Telegraf to receive VMs' metrics data by Telegraf receiver. The telegraf receiver plugin receiver, process and convert the metrics, then it send converted metrics to Meter System. VM entity as a Service in OAP and on the Layer: OS_LINUX.\nData flow For OpenTelemetry receiver:\n The Prometheus node-exporter collects metrics data from the VMs. The OpenTelemetry Collector fetches metrics from node-exporter via Prometheus Receiver and pushes metrics to the SkyWalking OAP Server via the OpenCensus gRPC Exporter or OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  For Telegraf receiver:\n The InfluxDB Telegraf input plugins collects various metrics data from the VMs. The cpu, mem, system, disk and diskio input plugins should be set in telegraf.conf file. The InfluxDB Telegraf send JSON format metrics by HTTP messages to Telegraf Receiver, then pushes converted metrics to the SkyWalking OAP Server Meter System. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate ad store the results. The meter_vm_cpu_average_used metrics indicates the average usage of each CPU core for telegraf receiver.  Setup For OpenTelemetry receiver:\n Setup Prometheus node-exporter. Setup OpenTelemetry Collector . This is an example for OpenTelemetry Collector configuration otel-collector-config.yaml. Config SkyWalking OpenTelemetry receiver.  For Telegraf receiver:\n Setup InfluxDB Telegraf\u0026rsquo;s telegraf.conf file according to Telegraf office document. Setup InfluxDB Telegraf\u0026rsquo;s telegraf.conf file specific rules according to Telegraf receiver document. Config SkyWalking Telegraf receiver.  Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     CPU Usage % meter_vm_cpu_total_percentage The total percentage usage of the CPU core. If there are 2 cores, the maximum usage is 200%. Prometheus node-exporter\nTelegraf input plugin   Memory RAM Usage MB meter_vm_memory_used The total RAM usage Prometheus node-exporter\nTelegraf input plugin   Memory Swap Usage % meter_vm_memory_swap_percentage The percentage usage of swap memory Prometheus node-exporter\nTelegraf input plugin   CPU Average Used % meter_vm_cpu_average_used The percentage usage of the CPU core in each mode Prometheus node-exporter\nTelegraf input plugin   CPU Load  meter_vm_cpu_load1\nmeter_vm_cpu_load5\nmeter_vm_cpu_load15 The CPU 1m / 5m / 15m average load Prometheus node-exporter\nTelegraf input plugin   Memory RAM MB meter_vm_memory_total\nmeter_vm_memory_available\nmeter_vm_memory_used The RAM statistics, including Total / Available / Used Prometheus node-exporter\nTelegraf input plugin   Memory Swap MB meter_vm_memory_swap_free\nmeter_vm_memory_swap_total Swap memory statistics, including Free / Total Prometheus node-exporter\nTelegraf input plugin   File System Mountpoint Usage % meter_vm_filesystem_percentage The percentage usage of the file system at each mount point Prometheus node-exporter\nTelegraf input plugin   Disk R/W KB/s meter_vm_disk_read,meter_vm_disk_written The disk read and written Prometheus node-exporter\nTelegraf input plugin   Network Bandwidth Usage KB/s meter_vm_network_receive\nmeter_vm_network_transmit The network receive and transmit Prometheus node-exporter\nTelegraf input plugin   Network Status  meter_vm_tcp_curr_estab\nmeter_vm_tcp_tw\nmeter_vm_tcp_alloc\nmeter_vm_sockets_used\nmeter_vm_udp_inuse The number of TCPs established / TCP time wait / TCPs allocated / sockets in use / UDPs in use Prometheus node-exporter\nTelegraf input plugin   Filefd Allocated  meter_vm_filefd_allocated The number of file descriptors allocated Prometheus node-exporter    Customizing You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/vm.yaml and /config/telegraf-rules/vm.yaml. The dashboard panel confirmations are found in /config/ui-initialized-templates/os_linux.\nBlog For more details, see the blog article SkyWalking 8.4 provides infrastructure monitoring.\n","excerpt":"Linux Monitoring SkyWalking leverages Prometheus node-exporter to collect metrics data from the VMs …","ref":"/docs/main/v9.4.0/en/setup/backend/backend-vm-monitoring/","title":"Linux Monitoring"},{"body":"Linux Monitoring SkyWalking leverages Prometheus node-exporter to collect metrics data from the VMs and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. VM entity as a Service in OAP and on the Layer: OS_LINUX.\nSkyWalking also provides InfluxDB Telegraf to receive VMs' metrics data by Telegraf receiver. The telegraf receiver plugin receiver, process and convert the metrics, then it send converted metrics to Meter System. VM entity as a Service in OAP and on the Layer: OS_LINUX.\nData flow For OpenTelemetry receiver:\n The Prometheus node-exporter collects metrics data from the VMs. The OpenTelemetry Collector fetches metrics from node-exporter via Prometheus Receiver and pushes metrics to the SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  For Telegraf receiver:\n The InfluxDB Telegraf input plugins collects various metrics data from the VMs. The cpu, mem, system, disk and diskio input plugins should be set in telegraf.conf file. The InfluxDB Telegraf send JSON format metrics by HTTP messages to Telegraf Receiver, then pushes converted metrics to the SkyWalking OAP Server Meter System. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate ad store the results. The meter_vm_cpu_average_used metrics indicates the average usage of each CPU core for telegraf receiver.  Setup For OpenTelemetry receiver:\n Setup Prometheus node-exporter. Setup OpenTelemetry Collector. This is an example for OpenTelemetry Collector configuration otel-collector-config.yaml. Config SkyWalking OpenTelemetry receiver.  For Telegraf receiver:\n Setup InfluxDB Telegraf\u0026rsquo;s telegraf.conf file according to Telegraf office document. Setup InfluxDB Telegraf\u0026rsquo;s telegraf.conf file specific rules according to Telegraf receiver document. Config SkyWalking Telegraf receiver.  Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     CPU Usage % meter_vm_cpu_total_percentage The total percentage usage of the CPU core. If there are 2 cores, the maximum usage is 200%. Prometheus node-exporter\nTelegraf input plugin   Memory RAM Usage MB meter_vm_memory_used The total RAM usage Prometheus node-exporter\nTelegraf input plugin   Memory Swap Usage % meter_vm_memory_swap_percentage The percentage usage of swap memory Prometheus node-exporter\nTelegraf input plugin   CPU Average Used % meter_vm_cpu_average_used The percentage usage of the CPU core in each mode Prometheus node-exporter\nTelegraf input plugin   CPU Load  meter_vm_cpu_load1\nmeter_vm_cpu_load5\nmeter_vm_cpu_load15 The CPU 1m / 5m / 15m average load Prometheus node-exporter\nTelegraf input plugin   Memory RAM MB meter_vm_memory_total\nmeter_vm_memory_available\nmeter_vm_memory_used\nmeter_vm_memory_buff_cache The RAM statistics, including Total / Available / Used / Buff-Cache Prometheus node-exporter\nTelegraf input plugin   Memory Swap MB meter_vm_memory_swap_free\nmeter_vm_memory_swap_total Swap memory statistics, including Free / Total Prometheus node-exporter\nTelegraf input plugin   File System Mountpoint Usage % meter_vm_filesystem_percentage The percentage usage of the file system at each mount point Prometheus node-exporter\nTelegraf input plugin   Disk R/W KB/s meter_vm_disk_read\nmeter_vm_disk_written The disk read and written Prometheus node-exporter\nTelegraf input plugin   Network Bandwidth Usage KB/s meter_vm_network_receive\nmeter_vm_network_transmit The network receive and transmit Prometheus node-exporter\nTelegraf input plugin   Network Status  meter_vm_tcp_curr_estab\nmeter_vm_tcp_tw\nmeter_vm_tcp_alloc\nmeter_vm_sockets_used\nmeter_vm_udp_inuse The number of TCPs established / TCP time wait / TCPs allocated / sockets in use / UDPs in use Prometheus node-exporter\nTelegraf input plugin   Filefd Allocated  meter_vm_filefd_allocated The number of file descriptors allocated Prometheus node-exporter    Customizing You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/vm.yaml and /config/telegraf-rules/vm.yaml. The dashboard panel confirmations are found in /config/ui-initialized-templates/os_linux.\nBlog For more details, see the blog article SkyWalking 8.4 provides infrastructure monitoring.\n","excerpt":"Linux Monitoring SkyWalking leverages Prometheus node-exporter to collect metrics data from the VMs …","ref":"/docs/main/v9.5.0/en/setup/backend/backend-vm-monitoring/","title":"Linux Monitoring"},{"body":"Linux Monitoring SkyWalking leverages Prometheus node-exporter to collect metrics data from the VMs and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. VM entity as a Service in OAP and on the Layer: OS_LINUX.\nSkyWalking also provides InfluxDB Telegraf to receive VMs' metrics data by Telegraf receiver. The telegraf receiver plugin receiver, process and convert the metrics, then it send converted metrics to Meter System. VM entity as a Service in OAP and on the Layer: OS_LINUX.\nData flow For OpenTelemetry receiver:\n The Prometheus node-exporter collects metrics data from the VMs. The OpenTelemetry Collector fetches metrics from node-exporter via Prometheus Receiver and pushes metrics to the SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  For Telegraf receiver:\n The InfluxDB Telegraf input plugins collects various metrics data from the VMs. The cpu, mem, system, disk and diskio input plugins should be set in telegraf.conf file. The InfluxDB Telegraf send JSON format metrics by HTTP messages to Telegraf Receiver, then pushes converted metrics to the SkyWalking OAP Server Meter System. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate ad store the results. The meter_vm_cpu_average_used metrics indicates the average usage of each CPU core for telegraf receiver.  Setup For OpenTelemetry receiver:\n Setup Prometheus node-exporter. Setup OpenTelemetry Collector. This is an example for OpenTelemetry Collector configuration otel-collector-config.yaml. Config SkyWalking OpenTelemetry receiver.  For Telegraf receiver:\n Setup InfluxDB Telegraf\u0026rsquo;s telegraf.conf file according to Telegraf office document. Setup InfluxDB Telegraf\u0026rsquo;s telegraf.conf file specific rules according to Telegraf receiver document. Config SkyWalking Telegraf receiver.  Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     CPU Usage % meter_vm_cpu_total_percentage The total percentage usage of the CPU core. If there are 2 cores, the maximum usage is 200%. Prometheus node-exporter\nTelegraf input plugin   Memory RAM Usage MB meter_vm_memory_used The total RAM usage Prometheus node-exporter\nTelegraf input plugin   Memory Swap Usage % meter_vm_memory_swap_percentage The percentage usage of swap memory Prometheus node-exporter\nTelegraf input plugin   CPU Average Used % meter_vm_cpu_average_used The percentage usage of the CPU core in each mode Prometheus node-exporter\nTelegraf input plugin   CPU Load  meter_vm_cpu_load1\nmeter_vm_cpu_load5\nmeter_vm_cpu_load15 The CPU 1m / 5m / 15m average load Prometheus node-exporter\nTelegraf input plugin   Memory RAM MB meter_vm_memory_total\nmeter_vm_memory_available\nmeter_vm_memory_used\nmeter_vm_memory_buff_cache The RAM statistics, including Total / Available / Used / Buff-Cache Prometheus node-exporter\nTelegraf input plugin   Memory Swap MB meter_vm_memory_swap_free\nmeter_vm_memory_swap_total Swap memory statistics, including Free / Total Prometheus node-exporter\nTelegraf input plugin   File System Mountpoint Usage % meter_vm_filesystem_percentage The percentage usage of the file system at each mount point Prometheus node-exporter\nTelegraf input plugin   Disk R/W KB/s meter_vm_disk_read\nmeter_vm_disk_written The disk read and written Prometheus node-exporter\nTelegraf input plugin   Network Bandwidth Usage KB/s meter_vm_network_receive\nmeter_vm_network_transmit The network receive and transmit Prometheus node-exporter\nTelegraf input plugin   Network Status  meter_vm_tcp_curr_estab\nmeter_vm_tcp_tw\nmeter_vm_tcp_alloc\nmeter_vm_sockets_used\nmeter_vm_udp_inuse The number of TCPs established / TCP time wait / TCPs allocated / sockets in use / UDPs in use Prometheus node-exporter\nTelegraf input plugin   Filefd Allocated  meter_vm_filefd_allocated The number of file descriptors allocated Prometheus node-exporter    Customizing You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/vm.yaml and /config/telegraf-rules/vm.yaml. The dashboard panel confirmations are found in /config/ui-initialized-templates/os_linux.\nBlog For more details, see the blog article SkyWalking 8.4 provides infrastructure monitoring.\n","excerpt":"Linux Monitoring SkyWalking leverages Prometheus node-exporter to collect metrics data from the VMs …","ref":"/docs/main/v9.6.0/en/setup/backend/backend-vm-monitoring/","title":"Linux Monitoring"},{"body":"Linux Monitoring SkyWalking leverages Prometheus node-exporter to collect metrics data from the VMs and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. VM entity as a Service in OAP and on the Layer: OS_LINUX.\nSkyWalking also provides InfluxDB Telegraf to receive VMs' metrics data by Telegraf receiver. The telegraf receiver plugin receiver, process and convert the metrics, then it send converted metrics to Meter System. VM entity as a Service in OAP and on the Layer: OS_LINUX.\nData flow For OpenTelemetry receiver:\n The Prometheus node-exporter collects metrics data from the VMs. The OpenTelemetry Collector fetches metrics from node-exporter via Prometheus Receiver and pushes metrics to the SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  For Telegraf receiver:\n The InfluxDB Telegraf input plugins collects various metrics data from the VMs. The cpu, mem, system, disk and diskio input plugins should be set in telegraf.conf file. The InfluxDB Telegraf send JSON format metrics by HTTP messages to Telegraf Receiver, then pushes converted metrics to the SkyWalking OAP Server Meter System. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate ad store the results. The meter_vm_cpu_average_used metrics indicates the average usage of each CPU core for telegraf receiver.  Setup For OpenTelemetry receiver:\n Setup Prometheus node-exporter. Setup OpenTelemetry Collector. This is an example for OpenTelemetry Collector configuration otel-collector-config.yaml. Config SkyWalking OpenTelemetry receiver.  For Telegraf receiver:\n Setup InfluxDB Telegraf\u0026rsquo;s telegraf.conf file according to Telegraf office document. Setup InfluxDB Telegraf\u0026rsquo;s telegraf.conf file specific rules according to Telegraf receiver document. Config SkyWalking Telegraf receiver.  Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     CPU Usage % meter_vm_cpu_total_percentage The total percentage usage of the CPU core. If there are 2 cores, the maximum usage is 200%. Prometheus node-exporter\nTelegraf input plugin   Memory RAM Usage MB meter_vm_memory_used The total RAM usage Prometheus node-exporter\nTelegraf input plugin   Memory Swap Usage % meter_vm_memory_swap_percentage The percentage usage of swap memory Prometheus node-exporter\nTelegraf input plugin   CPU Average Used % meter_vm_cpu_average_used The percentage usage of the CPU core in each mode Prometheus node-exporter\nTelegraf input plugin   CPU Load  meter_vm_cpu_load1\nmeter_vm_cpu_load5\nmeter_vm_cpu_load15 The CPU 1m / 5m / 15m average load Prometheus node-exporter\nTelegraf input plugin   Memory RAM MB meter_vm_memory_total\nmeter_vm_memory_available\nmeter_vm_memory_used\nmeter_vm_memory_buff_cache The RAM statistics, including Total / Available / Used / Buff-Cache Prometheus node-exporter\nTelegraf input plugin   Memory Swap MB meter_vm_memory_swap_free\nmeter_vm_memory_swap_total Swap memory statistics, including Free / Total Prometheus node-exporter\nTelegraf input plugin   File System Mountpoint Usage % meter_vm_filesystem_percentage The percentage usage of the file system at each mount point Prometheus node-exporter\nTelegraf input plugin   Disk R/W KB/s meter_vm_disk_read\nmeter_vm_disk_written The disk read and written Prometheus node-exporter\nTelegraf input plugin   Network Bandwidth Usage KB/s meter_vm_network_receive\nmeter_vm_network_transmit The network receive and transmit Prometheus node-exporter\nTelegraf input plugin   Network Status  meter_vm_tcp_curr_estab\nmeter_vm_tcp_tw\nmeter_vm_tcp_alloc\nmeter_vm_sockets_used\nmeter_vm_udp_inuse The number of TCPs established / TCP time wait / TCPs allocated / sockets in use / UDPs in use Prometheus node-exporter\nTelegraf input plugin   Filefd Allocated  meter_vm_filefd_allocated The number of file descriptors allocated Prometheus node-exporter    Customizing You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/vm.yaml and /config/telegraf-rules/vm.yaml. The dashboard panel confirmations are found in /config/ui-initialized-templates/os_linux.\nBlog For more details, see the blog article SkyWalking 8.4 provides infrastructure monitoring.\n","excerpt":"Linux Monitoring SkyWalking leverages Prometheus node-exporter to collect metrics data from the VMs …","ref":"/docs/main/v9.7.0/en/setup/backend/backend-vm-monitoring/","title":"Linux Monitoring"},{"body":"Locate agent config file by system property Supported version 5.0.0-RC+\nWhat is Locate agent config file by system property ? In Default. The agent will try to locate agent.config, which should be in the /config dictionary of agent package. If User sets the specified agent config file through system properties, The agent will try to load file from there. By the way, This function has no conflict with Setting Override\nOverride priority The specified agent config \u0026gt; The default agent config\nHow to use The content formats of the specified config must be same as the default config.\nUsing System.Properties(-D) to set the specified config path\n-Dskywalking_config=/path/to/agent.config /path/to/agent.config is the absolute path of the specified config file\n","excerpt":"Locate agent config file by system property Supported version 5.0.0-RC+\nWhat is Locate agent config …","ref":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/specified-agent-config/","title":"Locate agent config file by system property"},{"body":"Locate agent config file by system property Supported version 5.0.0-RC+\nWhat is Locate agent config file by system property ? In Default. The agent will try to locate agent.config, which should be in the /config dictionary of agent package. If User sets the specified agent config file through system properties, The agent will try to load file from there. By the way, This function has no conflict with Setting Override\nOverride priority The specified agent config \u0026gt; The default agent config\nHow to use The content formats of the specified config must be same as the default config.\nUsing System.Properties(-D) to set the specified config path\n-Dskywalking_config=/path/to/agent.config /path/to/agent.config is the absolute path of the specified config file\n","excerpt":"Locate agent config file by system property Supported version 5.0.0-RC+\nWhat is Locate agent config …","ref":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/specified-agent-config/","title":"Locate agent config file by system property"},{"body":"Locate agent config file by system property Supported version 5.0.0-RC+\nWhat is Locate agent config file by system property ? In Default. The agent will try to locate agent.config, which should be in the /config dictionary of agent package. If User sets the specified agent config file through system properties, The agent will try to load file from there. By the way, This function has no conflict with Setting Override\nOverride priority The specified agent config \u0026gt; The default agent config\nHow to use The content formats of the specified config must be same as the default config.\nUsing System.Properties(-D) to set the specified config path\n-Dskywalking_config=/path/to/agent.config /path/to/agent.config is the absolute path of the specified config file\n","excerpt":"Locate agent config file by system property Supported version 5.0.0-RC+\nWhat is Locate agent config …","ref":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/specified-agent-config/","title":"Locate agent config file by system property"},{"body":"Locate agent config file by system property Supported version 5.0.0-RC+\nWhat is Locate agent config file by system property ? In Default. The agent will try to locate agent.config, which should be in the /config dictionary of agent package. If User sets the specified agent config file through system properties, The agent will try to load file from there. By the way, This function has no conflict with Setting Override\nOverride priority The specified agent config \u0026gt; The default agent config\nHow to use The content formats of the specified config must be same as the default config.\nUsing System.Properties(-D) to set the specified config path\n-Dskywalking_config=/path/to/agent.config /path/to/agent.config is the absolute path of the specified config file\n","excerpt":"Locate agent config file by system property Supported version 5.0.0-RC+\nWhat is Locate agent config …","ref":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/specified-agent-config/","title":"Locate agent config file by system property"},{"body":"Locate agent config file by system property Supported version 5.0.0-RC+\nWhat is Locate agent config file by system property ? In Default. The agent will try to locate agent.config, which should be in the /config dictionary of agent package. If User sets the specified agent config file through system properties, The agent will try to load file from there. By the way, This function has no conflict with Setting Override\nOverride priority The specified agent config \u0026gt; The default agent config\nHow to use The content formats of the specified config must be same as the default config.\nUsing System.Properties(-D) to set the specified config path\n-Dskywalking_config=/path/to/agent.config /path/to/agent.config is the absolute path of the specified config file\n","excerpt":"Locate agent config file by system property Supported version 5.0.0-RC+\nWhat is Locate agent config …","ref":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/specified-agent-config/","title":"Locate agent config file by system property"},{"body":"Log Analysis Log analyzer of OAP server supports native log data. OAP could use Log Analysis Language to structure log content through parsing, extracting and saving logs. The analyzer also uses Meter Analysis Language Engine for further metrics calculation.\nlog-analyzer:selector:${SW_LOG_ANALYZER:default}default:lalFiles:${SW_LOG_LAL_FILES:default}malFiles:${SW_LOG_MAL_FILES:\u0026#34;\u0026#34;}Read the doc on Log Analysis Language(LAL) for more on log structuring and metrics analysis. The LAL\u0026rsquo;s metrics extracts provide the capabilities to generate new metrics from the raw log text for further calculation.\n","excerpt":"Log Analysis Log analyzer of OAP server supports native log data. OAP could use Log Analysis …","ref":"/docs/main/latest/en/setup/backend/log-analyzer/","title":"Log Analysis"},{"body":"Log Analysis Log analyzer of OAP server supports native log data. OAP could use Log Analysis Language to structure log content through parsing, extracting and saving logs. The analyzer also uses Meter Analysis Language Engine for further metrics calculation.\nlog-analyzer:selector:${SW_LOG_ANALYZER:default}default:lalFiles:${SW_LOG_LAL_FILES:default}malFiles:${SW_LOG_MAL_FILES:\u0026#34;\u0026#34;}Read the doc on Log Analysis Language(LAL) for more on log structuring and metrics analysis. The LAL\u0026rsquo;s metrics extracts provide the capabilities to generate new metrics from the raw log text for further calculation.\n","excerpt":"Log Analysis Log analyzer of OAP server supports native log data. OAP could use Log Analysis …","ref":"/docs/main/next/en/setup/backend/log-analyzer/","title":"Log Analysis"},{"body":"Log Analysis Log analyzer of OAP server supports native log data. OAP could use Log Analysis Language to structure log content through parsing, extracting and saving logs. The analyzer also uses Meter Analysis Language Engine for further metrics calculation.\nlog-analyzer:selector:${SW_LOG_ANALYZER:default}default:lalFiles:${SW_LOG_LAL_FILES:default}malFiles:${SW_LOG_MAL_FILES:\u0026#34;\u0026#34;}Read the doc on Log Analysis Language(LAL) for more on log structuring and metrics analysis. The LAL\u0026rsquo;s metrics extracts provide the capabilities to generate new metrics from the raw log text for further calculation.\n","excerpt":"Log Analysis Log analyzer of OAP server supports native log data. OAP could use Log Analysis …","ref":"/docs/main/v9.5.0/en/setup/backend/log-analyzer/","title":"Log Analysis"},{"body":"Log Analysis Log analyzer of OAP server supports native log data. OAP could use Log Analysis Language to structure log content through parsing, extracting and saving logs. The analyzer also uses Meter Analysis Language Engine for further metrics calculation.\nlog-analyzer:selector:${SW_LOG_ANALYZER:default}default:lalFiles:${SW_LOG_LAL_FILES:default}malFiles:${SW_LOG_MAL_FILES:\u0026#34;\u0026#34;}Read the doc on Log Analysis Language(LAL) for more on log structuring and metrics analysis. The LAL\u0026rsquo;s metrics extracts provide the capabilities to generate new metrics from the raw log text for further calculation.\n","excerpt":"Log Analysis Log analyzer of OAP server supports native log data. OAP could use Log Analysis …","ref":"/docs/main/v9.6.0/en/setup/backend/log-analyzer/","title":"Log Analysis"},{"body":"Log Analysis Log analyzer of OAP server supports native log data. OAP could use Log Analysis Language to structure log content through parsing, extracting and saving logs. The analyzer also uses Meter Analysis Language Engine for further metrics calculation.\nlog-analyzer:selector:${SW_LOG_ANALYZER:default}default:lalFiles:${SW_LOG_LAL_FILES:default}malFiles:${SW_LOG_MAL_FILES:\u0026#34;\u0026#34;}Read the doc on Log Analysis Language(LAL) for more on log structuring and metrics analysis. The LAL\u0026rsquo;s metrics extracts provide the capabilities to generate new metrics from the raw log text for further calculation.\n","excerpt":"Log Analysis Log analyzer of OAP server supports native log data. OAP could use Log Analysis …","ref":"/docs/main/v9.7.0/en/setup/backend/log-analyzer/","title":"Log Analysis"},{"body":"Log Analysis Language Log Analysis Language (LAL) in SkyWalking is essentially a Domain-Specific Language (DSL) to analyze logs. You can use LAL to parse, extract, and save the logs, as well as collaborate the logs with traces (by extracting the trace ID, segment ID and span ID) and metrics (by generating metrics from the logs and sending them to the meter system).\nThe LAL config files are in YAML format, and are located under directory lal. You can set log-analyzer/default/lalFiles in the application.yml file or set environment variable SW_LOG_LAL_FILES to activate specific LAL config files.\nLayer Layer should be declared in the LAL script to represent the analysis scope of the logs.\nFilter A filter is a group of parser, extractor and sink. Users can use one or more filters to organize their processing logic. Every piece of log will be sent to all filters in an LAL rule. A piece of log sent to the filter is available as property log in the LAL, therefore you can access the log service name via log.service. For all available fields of log, please refer to the protocol definition.\nAll components are executed sequentially in the orders they are declared.\nGlobal Functions Globally available functions may be used them in all components (i.e. parsers, extractors, and sinks) where necessary.\n abort  By default, all components declared are executed no matter what flags (dropped, saved, etc.) have been set. There are cases where you may want the filter chain to stop earlier when specified conditions are met. abort function aborts the remaining filter chain from where it\u0026rsquo;s declared, and all the remaining components won\u0026rsquo;t be executed at all. abort function serves as a fast-fail mechanism in LAL.\nfilter { if (log.service == \u0026#34;TestingService\u0026#34;) { // Don\u0026#39;t waste resources on TestingServices  abort {} // all remaining components won\u0026#39;t be executed at all  } // ... parsers, extractors, sinks } Note that when you put regexp in an if statement, you need to surround the expression with () like regexp(\u0026lt;the expression\u0026gt;), instead of regexp \u0026lt;the expression\u0026gt;.\n tag  tag function provide a convenient way to get the value of a tag key.\nWe can add tags like following:\n[ { \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;TEST_KEY\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;TEST_VALUE\u0026#34; } ] }, \u0026#34;body\u0026#34;:{ ... } ... } ] And we can use this method to get the value of the tag key TEST_KEY.\nfilter { if (tag(\u0026#34;TEST_KEY\u0026#34;) == \u0026#34;TEST_VALUE\u0026#34;) { ... } } Parser Parsers are responsible for parsing the raw logs into structured data in SkyWalking for further processing. There are 3 types of parsers at the moment, namely json, yaml, and text.\nWhen a piece of log is parsed, there is a corresponding property available, called parsed, injected by LAL. Property parsed is typically a map, containing all the fields parsed from the raw logs. For example, if the parser is json / yaml, parsed is a map containing all the key-values in the json / yaml; if the parser is text , parsed is a map containing all the captured groups and their values (for regexp and grok).\nAll parsers share the following options:\n   Option Type Description Default Value     abortOnFailure boolean Whether the filter chain should abort if the parser failed to parse / match the logs true    See examples below.\njson filter { json { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  } } yaml filter { yaml { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  } } text For unstructured logs, there are some text parsers for use.\n regexp  regexp parser uses a regular expression (regexp) to parse the logs. It leverages the captured groups of the regexp, all the captured groups can be used later in the extractors or sinks. regexp returns a boolean indicating whether the log matches the pattern or not.\nfilter { text { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  // this is just a demo pattern  regexp \u0026#34;(?\u0026lt;timestamp\u0026gt;\\\\d{8}) (?\u0026lt;thread\u0026gt;\\\\w+) (?\u0026lt;level\u0026gt;\\\\w+) (?\u0026lt;traceId\u0026gt;\\\\w+) (?\u0026lt;msg\u0026gt;.+)\u0026#34; } extractor { tag level: parsed.level // we add a tag called `level` and its value is parsed.level, captured from the regexp above  traceId parsed.traceId // we also extract the trace id from the parsed result, which will be used to associate the log with the trace  } // ... }  grok (TODO)  We\u0026rsquo;re aware of certain performance issues in the grok Java library, and so we\u0026rsquo;re currently conducting investigations and benchmarking. Contributions are welcome.\nExtractor Extractors aim to extract metadata from the logs. The metadata can be a service name, a service instance name, an endpoint name, or even a trace ID, all of which can be associated with the existing traces and metrics.\n service  service extracts the service name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n instance  instance extracts the service instance name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n endpoint  endpoint extracts the service instance name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n traceId  traceId extracts the trace ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n segmentId  segmentId extracts the segment ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n spanId  spanId extracts the span ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n timestamp  timestamp extracts the timestamp from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\nThe parameter of timestamp can be a millisecond:\nfilter { // ... parser  extractor { timestamp parsed.time as String } } or a datetime string with a specified pattern:\nfilter { // ... parser  extractor { timestamp parsed.time as String, \u0026#34;yyyy-MM-dd HH:mm:ss\u0026#34; } }  layer  layer extracts the layer from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with service.\n tag  tag extracts the tags from the parsed result, and set them into the LogData. The form of this extractor should look something like this: tag key1: value, key2: value2. You may use the properties of parsed as both keys and values.\nimport javax.swing.text.LayeredHighlighter filter { // ... parser  extractor { tag level: parsed.level, (parsed.statusCode): parsed.statusMsg tag anotherKey: \u0026#34;anotherConstantValue\u0026#34; layer \u0026#39;GENERAL\u0026#39; } }  metrics  metrics extracts / generates metrics from the logs, and sends the generated metrics to the meter system. You may configure MAL for further analysis of these metrics. The dedicated MAL config files are under directory log-mal-rules, and you can set log-analyzer/default/malFiles to enable configured files.\n# application.yml# ...log-analyzer:selector:${SW_LOG_ANALYZER:default}default:lalFiles:${SW_LOG_LAL_FILES:my-lal-config}# files are under \u0026#34;lal\u0026#34; directorymalFiles:${SW_LOG_MAL_FILES:my-lal-mal-config, folder1/another-lal-mal-config, folder2/*}# files are under \u0026#34;log-mal-rules\u0026#34; directoryExamples are as follows:\nfilter { // ...  extractor { service parsed.serviceName metrics { name \u0026#34;log_count\u0026#34; timestamp parsed.timestamp labels level: parsed.level, service: parsed.service, instance: parsed.instance value 1 } metrics { name \u0026#34;http_response_time\u0026#34; timestamp parsed.timestamp labels status_code: parsed.statusCode, service: parsed.service, instance: parsed.instance value parsed.duration } } // ... } The extractor above generates a metrics named log_count, with tag key level and value 1. After that, you can configure MAL rules to calculate the log count grouping by logging level like this:\n# ... other configurations of MALmetrics:- name:log_count_debugexp:log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;DEBUG\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT1M\u0026#39;)- name:log_count_errorexp:log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;ERROR\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT1M\u0026#39;)The other metrics generated is http_response_time, so you can configure MAL rules to generate more useful metrics like percentiles.\n# ... other configurations of MALmetrics:- name:response_time_percentileexp:http_response_time.sum([\u0026#39;le\u0026#39;, \u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT5M\u0026#39;).histogram().histogram_percentile([50,70,90,99]) slowSql  slowSql aims to convert LogData to DatabaseSlowStatement. It extracts data from parsed result and save them as DatabaseSlowStatement. SlowSql will not abort or edit logs, you can use other LAL for further processing. SlowSql will reuse service, layer and timestamp of extractor, so it is necessary to use SlowSQL after setting these. We require a log tag \u0026quot;LOG_KIND\u0026quot; = \u0026quot;SLOW_SQL\u0026quot; to make OAP distinguish slow SQL logs from other log reports.\nNote, slow SQL sampling would only flag this SQL in the candidate list. The OAP server would run statistic per service and only persistent the top 50 every 10(controlled by topNReportPeriod: ${SW_CORE_TOPN_REPORT_PERIOD:10}) minutes by default.\nAn example of JSON sent to OAP is as following:\n[ { \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;LOG_KIND\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;SLOW_SQL\u0026#34; } ] }, \u0026#34;layer\u0026#34;:\u0026#34;MYSQL\u0026#34;, \u0026#34;body\u0026#34;:{ \u0026#34;json\u0026#34;:{ \u0026#34;json\u0026#34;:\u0026#34;{\\\u0026#34;time\\\u0026#34;:\\\u0026#34;1663063011\\\u0026#34;,\\\u0026#34;id\\\u0026#34;:\\\u0026#34;cb92c1a5b-2691e-fb2f-457a-9c72a392d9ed\\\u0026#34;,\\\u0026#34;service\\\u0026#34;:\\\u0026#34;root[root]@[localhost]\\\u0026#34;,\\\u0026#34;statement\\\u0026#34;:\\\u0026#34;select sleep(2);\\\u0026#34;,\\\u0026#34;layer\\\u0026#34;:\\\u0026#34;MYSQL\\\u0026#34;,\\\u0026#34;query_time\\\u0026#34;:2000}\u0026#34; } }, \u0026#34;service\u0026#34;:\u0026#34;root[root]@[localhost]\u0026#34; } ]  statement  statement extracts the SQL statement from the parsed result, and set it into the DatabaseSlowStatement, which will be persisted (if not dropped) and is used to associate with TopNDatabaseStatement.\n latency  latency extracts the latency from the parsed result, and set it into the DatabaseSlowStatement, which will be persisted (if not dropped) and is used to associate with TopNDatabaseStatement.\n id  id extracts the id from the parsed result, and set it into the DatabaseSlowStatement, which will be persisted (if not dropped) and is used to associate with TopNDatabaseStatement.\nA Example of LAL to distinguish slow logs:\nfilter { json{ } extractor{ layer parsed.layer as String service parsed.service as String timestamp parsed.time as String if (tag(\u0026#34;LOG_KIND\u0026#34;) == \u0026#34;SLOW_SQL\u0026#34;) { slowSql { id parsed.id as String statement parsed.statement as String latency parsed.query_time as Long } } } }  sampledTrace  sampledTrace aims to convert LogData to SampledTrace Records. It extracts data from parsed result and save them as SampledTraceRecord. SampledTrace will not abort or edit logs, you can use other LAL for further processing. We require a log tag \u0026quot;LOG_KIND\u0026quot; = \u0026quot;NET_PROFILING_SAMPLED_TRACE\u0026quot; to make OAP distinguish slow trace logs from other log reports. An example of JSON sent to OAP is as following:\n[ { \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;LOG_KIND\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;NET_PROFILING_SAMPLED_TRACE\u0026#34; } ] }, \u0026#34;layer\u0026#34;:\u0026#34;MESH\u0026#34;, \u0026#34;body\u0026#34;:{ \u0026#34;json\u0026#34;:{ \u0026#34;json\u0026#34;:\u0026#34;{\\\u0026#34;uri\\\u0026#34;:\\\u0026#34;/provider\\\u0026#34;,\\\u0026#34;reason\\\u0026#34;:\\\u0026#34;slow\\\u0026#34;,\\\u0026#34;latency\\\u0026#34;:2048,\\\u0026#34;client_process\\\u0026#34;:{\\\u0026#34;process_id\\\u0026#34;:\\\u0026#34;c1519f4555ec11eda8df0242ac1d0002\\\u0026#34;,\\\u0026#34;local\\\u0026#34;:false,\\\u0026#34;address\\\u0026#34;:\\\u0026#34;\\\u0026#34;},\\\u0026#34;server_process\\\u0026#34;:{\\\u0026#34;process_id\\\u0026#34;:\\\u0026#34;\\\u0026#34;,\\\u0026#34;local\\\u0026#34;:false,\\\u0026#34;address\\\u0026#34;:\\\u0026#34;172.31.0.3:443\\\u0026#34;},\\\u0026#34;detect_point\\\u0026#34;:\\\u0026#34;client\\\u0026#34;,\\\u0026#34;component\\\u0026#34;:\\\u0026#34;http\\\u0026#34;,\\\u0026#34;ssl\\\u0026#34;:true}\u0026#34; } }, \u0026#34;service\u0026#34;:\u0026#34;test-service\u0026#34;, \u0026#34;serviceInstance\u0026#34;:\u0026#34;test-service-instance\u0026#34;, \u0026#34;timestamp\u0026#34;: 1666916962406, } ] Examples are as follows:\nfilter { json { } if (tag(\u0026#34;LOG_KIND\u0026#34;) == \u0026#34;NET_PROFILING_SAMPLED_TRACE\u0026#34;) { sampledTrace { latency parsed.latency as Long uri parsed.uri as String reason parsed.reason as String if (parsed.client_process.process_id as String != \u0026#34;\u0026#34;) { processId parsed.client_process.process_id as String } else if (parsed.client_process.local as Boolean) { processId ProcessRegistry.generateVirtualLocalProcess(parsed.service as String, parsed.serviceInstance as String) as String } else { processId ProcessRegistry.generateVirtualRemoteProcess(parsed.service as String, parsed.serviceInstance as String, parsed.client_process.address as String) as String } if (parsed.server_process.process_id as String != \u0026#34;\u0026#34;) { destProcessId parsed.server_process.process_id as String } else if (parsed.server_process.local as Boolean) { destProcessId ProcessRegistry.generateVirtualLocalProcess(parsed.service as String, parsed.serviceInstance as String) as String } else { destProcessId ProcessRegistry.generateVirtualRemoteProcess(parsed.service as String, parsed.serviceInstance as String, parsed.server_process.address as String) as String } detectPoint parsed.detect_point as String if (parsed.component as String == \u0026#34;http\u0026#34; \u0026amp;\u0026amp; parsed.ssl as Boolean) { componentId 129 } else if (parsed.component as String == \u0026#34;http\u0026#34;) { componentId 49 } else if (parsed.ssl as Boolean) { componentId 130 } else { componentId 110 } } } } Sink Sinks are the persistent layer of the LAL. By default, all the logs of each filter are persisted into the storage. However, some mechanisms allow you to selectively save some logs, or even drop all the logs after you\u0026rsquo;ve extracted useful information, such as metrics.\nSampler Sampler allows you to save the logs in a sampling manner. Currently, the following sampling strategies are supported:\n rateLimit: samples n logs at a maximum rate of 1 minute. rateLimit(\u0026quot;SamplerID\u0026quot;) requires an ID for the sampler. Sampler declarations with the same ID share the same sampler instance, thus sharing the same rpm and resetting logic. possibility: every piece of log has a pseudo possibility of percentage to be sampled, the possibility was generated by Java random number generator and compare to the given percentage option.  We welcome contributions on more sampling strategies. If multiple samplers are specified, the last one determines the final sampling result. See examples in Enforcer.\nExamples 1, rateLimit:\nfilter { // ... parser  sink { sampler { if (parsed.service == \u0026#34;ImportantApp\u0026#34;) { rateLimit(\u0026#34;ImportantAppSampler\u0026#34;) { rpm 1800 // samples 1800 pieces of logs every minute for service \u0026#34;ImportantApp\u0026#34;  } } else { rateLimit(\u0026#34;OtherSampler\u0026#34;) { rpm 180 // samples 180 pieces of logs every minute for other services than \u0026#34;ImportantApp\u0026#34;  } } } } } Examples 2, possibility:\nfilter { // ... parser  sink { sampler { if (parsed.service == \u0026#34;ImportantApp\u0026#34;) { possibility(80) { // samples 80% of the logs for service \u0026#34;ImportantApp\u0026#34;  } } else { possibility(30) { // samples 30% of the logs for other services than \u0026#34;ImportantApp\u0026#34;  } } } } } Dropper Dropper is a special sink, meaning that all logs are dropped without any exception. This is useful when you want to drop debugging logs.\nfilter { // ... parser  sink { if (parsed.level == \u0026#34;DEBUG\u0026#34;) { dropper {} } else { sampler { // ... configs  } } } } Or if you have multiple filters, some of which are for extracting metrics, only one of them has to be persisted.\nfilter { // filter A: this is for persistence  // ... parser  sink { sampler { // .. sampler configs  } } } filter { // filter B:  // ... extractors to generate many metrics  extractors { metrics { // ... metrics  } } sink { dropper {} // drop all logs because they have been saved in \u0026#34;filter A\u0026#34; above.  } } Enforcer Enforcer is another special sink that forcibly samples the log. A typical use case of enforcer is when you have configured a sampler and want to save some logs forcibly, such as to save error logs even if the sampling mechanism has been configured.\nfilter { // ... parser  sink { sampler { // ... sampler configs  } if (parsed.level == \u0026#34;ERROR\u0026#34; || parsed.userId == \u0026#34;TestingUserId\u0026#34;) { // sample error logs or testing users\u0026#39; logs (userId == \u0026#34;TestingUserId\u0026#34;) even if the sampling strategy is configured  enforcer { } } } } ","excerpt":"Log Analysis Language Log Analysis Language (LAL) in SkyWalking is essentially a Domain-Specific …","ref":"/docs/main/latest/en/concepts-and-designs/lal/","title":"Log Analysis Language"},{"body":"Log Analysis Language Log Analysis Language (LAL) in SkyWalking is essentially a Domain-Specific Language (DSL) to analyze logs. You can use LAL to parse, extract, and save the logs, as well as collaborate the logs with traces (by extracting the trace ID, segment ID and span ID) and metrics (by generating metrics from the logs and sending them to the meter system).\nThe LAL config files are in YAML format, and are located under directory lal. You can set log-analyzer/default/lalFiles in the application.yml file or set environment variable SW_LOG_LAL_FILES to activate specific LAL config files.\nLayer Layer should be declared in the LAL script to represent the analysis scope of the logs.\nFilter A filter is a group of parser, extractor and sink. Users can use one or more filters to organize their processing logic. Every piece of log will be sent to all filters in an LAL rule. A piece of log sent to the filter is available as property log in the LAL, therefore you can access the log service name via log.service. For all available fields of log, please refer to the protocol definition.\nAll components are executed sequentially in the orders they are declared.\nGlobal Functions Globally available functions may be used them in all components (i.e. parsers, extractors, and sinks) where necessary.\n abort  By default, all components declared are executed no matter what flags (dropped, saved, etc.) have been set. There are cases where you may want the filter chain to stop earlier when specified conditions are met. abort function aborts the remaining filter chain from where it\u0026rsquo;s declared, and all the remaining components won\u0026rsquo;t be executed at all. abort function serves as a fast-fail mechanism in LAL.\nfilter { if (log.service == \u0026#34;TestingService\u0026#34;) { // Don\u0026#39;t waste resources on TestingServices  abort {} // all remaining components won\u0026#39;t be executed at all  } // ... parsers, extractors, sinks } Note that when you put regexp in an if statement, you need to surround the expression with () like regexp(\u0026lt;the expression\u0026gt;), instead of regexp \u0026lt;the expression\u0026gt;.\n tag  tag function provide a convenient way to get the value of a tag key.\nWe can add tags like following:\n[ { \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;TEST_KEY\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;TEST_VALUE\u0026#34; } ] }, \u0026#34;body\u0026#34;:{ ... } ... } ] And we can use this method to get the value of the tag key TEST_KEY.\nfilter { if (tag(\u0026#34;TEST_KEY\u0026#34;) == \u0026#34;TEST_VALUE\u0026#34;) { ... } } Parser Parsers are responsible for parsing the raw logs into structured data in SkyWalking for further processing. There are 3 types of parsers at the moment, namely json, yaml, and text.\nWhen a piece of log is parsed, there is a corresponding property available, called parsed, injected by LAL. Property parsed is typically a map, containing all the fields parsed from the raw logs. For example, if the parser is json / yaml, parsed is a map containing all the key-values in the json / yaml; if the parser is text , parsed is a map containing all the captured groups and their values (for regexp and grok).\nAll parsers share the following options:\n   Option Type Description Default Value     abortOnFailure boolean Whether the filter chain should abort if the parser failed to parse / match the logs true    See examples below.\njson filter { json { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  } } yaml filter { yaml { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  } } text For unstructured logs, there are some text parsers for use.\n regexp  regexp parser uses a regular expression (regexp) to parse the logs. It leverages the captured groups of the regexp, all the captured groups can be used later in the extractors or sinks. regexp returns a boolean indicating whether the log matches the pattern or not.\nfilter { text { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  // this is just a demo pattern  regexp \u0026#34;(?\u0026lt;timestamp\u0026gt;\\\\d{8}) (?\u0026lt;thread\u0026gt;\\\\w+) (?\u0026lt;level\u0026gt;\\\\w+) (?\u0026lt;traceId\u0026gt;\\\\w+) (?\u0026lt;msg\u0026gt;.+)\u0026#34; } extractor { tag level: parsed.level // we add a tag called `level` and its value is parsed.level, captured from the regexp above  traceId parsed.traceId // we also extract the trace id from the parsed result, which will be used to associate the log with the trace  } // ... }  grok (TODO)  We\u0026rsquo;re aware of certain performance issues in the grok Java library, and so we\u0026rsquo;re currently conducting investigations and benchmarking. Contributions are welcome.\nExtractor Extractors aim to extract metadata from the logs. The metadata can be a service name, a service instance name, an endpoint name, or even a trace ID, all of which can be associated with the existing traces and metrics.\n service  service extracts the service name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n instance  instance extracts the service instance name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n endpoint  endpoint extracts the service instance name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n traceId  traceId extracts the trace ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n segmentId  segmentId extracts the segment ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n spanId  spanId extracts the span ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n timestamp  timestamp extracts the timestamp from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\nThe parameter of timestamp can be a millisecond:\nfilter { // ... parser  extractor { timestamp parsed.time as String } } or a datetime string with a specified pattern:\nfilter { // ... parser  extractor { timestamp parsed.time as String, \u0026#34;yyyy-MM-dd HH:mm:ss\u0026#34; } }  layer  layer extracts the layer from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with service.\n tag  tag extracts the tags from the parsed result, and set them into the LogData. The form of this extractor should look something like this: tag key1: value, key2: value2. You may use the properties of parsed as both keys and values.\nimport javax.swing.text.LayeredHighlighter filter { // ... parser  extractor { tag level: parsed.level, (parsed.statusCode): parsed.statusMsg tag anotherKey: \u0026#34;anotherConstantValue\u0026#34; layer \u0026#39;GENERAL\u0026#39; } }  metrics  metrics extracts / generates metrics from the logs, and sends the generated metrics to the meter system. You may configure MAL for further analysis of these metrics. The dedicated MAL config files are under directory log-mal-rules, and you can set log-analyzer/default/malFiles to enable configured files.\n# application.yml# ...log-analyzer:selector:${SW_LOG_ANALYZER:default}default:lalFiles:${SW_LOG_LAL_FILES:my-lal-config}# files are under \u0026#34;lal\u0026#34; directorymalFiles:${SW_LOG_MAL_FILES:my-lal-mal-config, folder1/another-lal-mal-config, folder2/*}# files are under \u0026#34;log-mal-rules\u0026#34; directoryExamples are as follows:\nfilter { // ...  extractor { service parsed.serviceName metrics { name \u0026#34;log_count\u0026#34; timestamp parsed.timestamp labels level: parsed.level, service: parsed.service, instance: parsed.instance value 1 } metrics { name \u0026#34;http_response_time\u0026#34; timestamp parsed.timestamp labels status_code: parsed.statusCode, service: parsed.service, instance: parsed.instance value parsed.duration } } // ... } The extractor above generates a metrics named log_count, with tag key level and value 1. After that, you can configure MAL rules to calculate the log count grouping by logging level like this:\n# ... other configurations of MALmetrics:- name:log_count_debugexp:log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;DEBUG\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT1M\u0026#39;)- name:log_count_errorexp:log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;ERROR\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT1M\u0026#39;)The other metrics generated is http_response_time, so you can configure MAL rules to generate more useful metrics like percentiles.\n# ... other configurations of MALmetrics:- name:response_time_percentileexp:http_response_time.sum([\u0026#39;le\u0026#39;, \u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT5M\u0026#39;).histogram().histogram_percentile([50,70,90,99]) slowSql  slowSql aims to convert LogData to DatabaseSlowStatement. It extracts data from parsed result and save them as DatabaseSlowStatement. SlowSql will not abort or edit logs, you can use other LAL for further processing. SlowSql will reuse service, layer and timestamp of extractor, so it is necessary to use SlowSQL after setting these. We require a log tag \u0026quot;LOG_KIND\u0026quot; = \u0026quot;SLOW_SQL\u0026quot; to make OAP distinguish slow SQL logs from other log reports.\nNote, slow SQL sampling would only flag this SQL in the candidate list. The OAP server would run statistic per service and only persistent the top 50 every 10(controlled by topNReportPeriod: ${SW_CORE_TOPN_REPORT_PERIOD:10}) minutes by default.\nAn example of JSON sent to OAP is as following:\n[ { \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;LOG_KIND\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;SLOW_SQL\u0026#34; } ] }, \u0026#34;layer\u0026#34;:\u0026#34;MYSQL\u0026#34;, \u0026#34;body\u0026#34;:{ \u0026#34;json\u0026#34;:{ \u0026#34;json\u0026#34;:\u0026#34;{\\\u0026#34;time\\\u0026#34;:\\\u0026#34;1663063011\\\u0026#34;,\\\u0026#34;id\\\u0026#34;:\\\u0026#34;cb92c1a5b-2691e-fb2f-457a-9c72a392d9ed\\\u0026#34;,\\\u0026#34;service\\\u0026#34;:\\\u0026#34;root[root]@[localhost]\\\u0026#34;,\\\u0026#34;statement\\\u0026#34;:\\\u0026#34;select sleep(2);\\\u0026#34;,\\\u0026#34;layer\\\u0026#34;:\\\u0026#34;MYSQL\\\u0026#34;,\\\u0026#34;query_time\\\u0026#34;:2000}\u0026#34; } }, \u0026#34;service\u0026#34;:\u0026#34;root[root]@[localhost]\u0026#34; } ]  statement  statement extracts the SQL statement from the parsed result, and set it into the DatabaseSlowStatement, which will be persisted (if not dropped) and is used to associate with TopNDatabaseStatement.\n latency  latency extracts the latency from the parsed result, and set it into the DatabaseSlowStatement, which will be persisted (if not dropped) and is used to associate with TopNDatabaseStatement.\n id  id extracts the id from the parsed result, and set it into the DatabaseSlowStatement, which will be persisted (if not dropped) and is used to associate with TopNDatabaseStatement.\nA Example of LAL to distinguish slow logs:\nfilter { json{ } extractor{ layer parsed.layer as String service parsed.service as String timestamp parsed.time as String if (tag(\u0026#34;LOG_KIND\u0026#34;) == \u0026#34;SLOW_SQL\u0026#34;) { slowSql { id parsed.id as String statement parsed.statement as String latency parsed.query_time as Long } } } }  sampledTrace  sampledTrace aims to convert LogData to SampledTrace Records. It extracts data from parsed result and save them as SampledTraceRecord. SampledTrace will not abort or edit logs, you can use other LAL for further processing. We require a log tag \u0026quot;LOG_KIND\u0026quot; = \u0026quot;NET_PROFILING_SAMPLED_TRACE\u0026quot; to make OAP distinguish slow trace logs from other log reports. An example of JSON sent to OAP is as following:\n[ { \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;LOG_KIND\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;NET_PROFILING_SAMPLED_TRACE\u0026#34; } ] }, \u0026#34;layer\u0026#34;:\u0026#34;MESH\u0026#34;, \u0026#34;body\u0026#34;:{ \u0026#34;json\u0026#34;:{ \u0026#34;json\u0026#34;:\u0026#34;{\\\u0026#34;uri\\\u0026#34;:\\\u0026#34;/provider\\\u0026#34;,\\\u0026#34;reason\\\u0026#34;:\\\u0026#34;slow\\\u0026#34;,\\\u0026#34;latency\\\u0026#34;:2048,\\\u0026#34;client_process\\\u0026#34;:{\\\u0026#34;process_id\\\u0026#34;:\\\u0026#34;c1519f4555ec11eda8df0242ac1d0002\\\u0026#34;,\\\u0026#34;local\\\u0026#34;:false,\\\u0026#34;address\\\u0026#34;:\\\u0026#34;\\\u0026#34;},\\\u0026#34;server_process\\\u0026#34;:{\\\u0026#34;process_id\\\u0026#34;:\\\u0026#34;\\\u0026#34;,\\\u0026#34;local\\\u0026#34;:false,\\\u0026#34;address\\\u0026#34;:\\\u0026#34;172.31.0.3:443\\\u0026#34;},\\\u0026#34;detect_point\\\u0026#34;:\\\u0026#34;client\\\u0026#34;,\\\u0026#34;component\\\u0026#34;:\\\u0026#34;http\\\u0026#34;,\\\u0026#34;ssl\\\u0026#34;:true}\u0026#34; } }, \u0026#34;service\u0026#34;:\u0026#34;test-service\u0026#34;, \u0026#34;serviceInstance\u0026#34;:\u0026#34;test-service-instance\u0026#34;, \u0026#34;timestamp\u0026#34;: 1666916962406, } ] Examples are as follows:\nfilter { json { } if (tag(\u0026#34;LOG_KIND\u0026#34;) == \u0026#34;NET_PROFILING_SAMPLED_TRACE\u0026#34;) { sampledTrace { latency parsed.latency as Long uri parsed.uri as String reason parsed.reason as String if (parsed.client_process.process_id as String != \u0026#34;\u0026#34;) { processId parsed.client_process.process_id as String } else if (parsed.client_process.local as Boolean) { processId ProcessRegistry.generateVirtualLocalProcess(parsed.service as String, parsed.serviceInstance as String) as String } else { processId ProcessRegistry.generateVirtualRemoteProcess(parsed.service as String, parsed.serviceInstance as String, parsed.client_process.address as String) as String } if (parsed.server_process.process_id as String != \u0026#34;\u0026#34;) { destProcessId parsed.server_process.process_id as String } else if (parsed.server_process.local as Boolean) { destProcessId ProcessRegistry.generateVirtualLocalProcess(parsed.service as String, parsed.serviceInstance as String) as String } else { destProcessId ProcessRegistry.generateVirtualRemoteProcess(parsed.service as String, parsed.serviceInstance as String, parsed.server_process.address as String) as String } detectPoint parsed.detect_point as String if (parsed.component as String == \u0026#34;http\u0026#34; \u0026amp;\u0026amp; parsed.ssl as Boolean) { componentId 129 } else if (parsed.component as String == \u0026#34;http\u0026#34;) { componentId 49 } else if (parsed.ssl as Boolean) { componentId 130 } else { componentId 110 } } } } Sink Sinks are the persistent layer of the LAL. By default, all the logs of each filter are persisted into the storage. However, some mechanisms allow you to selectively save some logs, or even drop all the logs after you\u0026rsquo;ve extracted useful information, such as metrics.\nSampler Sampler allows you to save the logs in a sampling manner. Currently, the following sampling strategies are supported:\n rateLimit: samples n logs at a maximum rate of 1 minute. rateLimit(\u0026quot;SamplerID\u0026quot;) requires an ID for the sampler. Sampler declarations with the same ID share the same sampler instance, thus sharing the same rpm and resetting logic. possibility: every piece of log has a pseudo possibility of percentage to be sampled, the possibility was generated by Java random number generator and compare to the given percentage option.  We welcome contributions on more sampling strategies. If multiple samplers are specified, the last one determines the final sampling result. See examples in Enforcer.\nExamples 1, rateLimit:\nfilter { // ... parser  sink { sampler { if (parsed.service == \u0026#34;ImportantApp\u0026#34;) { rateLimit(\u0026#34;ImportantAppSampler\u0026#34;) { rpm 1800 // samples 1800 pieces of logs every minute for service \u0026#34;ImportantApp\u0026#34;  } } else { rateLimit(\u0026#34;OtherSampler\u0026#34;) { rpm 180 // samples 180 pieces of logs every minute for other services than \u0026#34;ImportantApp\u0026#34;  } } } } } Examples 2, possibility:\nfilter { // ... parser  sink { sampler { if (parsed.service == \u0026#34;ImportantApp\u0026#34;) { possibility(80) { // samples 80% of the logs for service \u0026#34;ImportantApp\u0026#34;  } } else { possibility(30) { // samples 30% of the logs for other services than \u0026#34;ImportantApp\u0026#34;  } } } } } Dropper Dropper is a special sink, meaning that all logs are dropped without any exception. This is useful when you want to drop debugging logs.\nfilter { // ... parser  sink { if (parsed.level == \u0026#34;DEBUG\u0026#34;) { dropper {} } else { sampler { // ... configs  } } } } Or if you have multiple filters, some of which are for extracting metrics, only one of them has to be persisted.\nfilter { // filter A: this is for persistence  // ... parser  sink { sampler { // .. sampler configs  } } } filter { // filter B:  // ... extractors to generate many metrics  extractors { metrics { // ... metrics  } } sink { dropper {} // drop all logs because they have been saved in \u0026#34;filter A\u0026#34; above.  } } Enforcer Enforcer is another special sink that forcibly samples the log. A typical use case of enforcer is when you have configured a sampler and want to save some logs forcibly, such as to save error logs even if the sampling mechanism has been configured.\nfilter { // ... parser  sink { sampler { // ... sampler configs  } if (parsed.level == \u0026#34;ERROR\u0026#34; || parsed.userId == \u0026#34;TestingUserId\u0026#34;) { // sample error logs or testing users\u0026#39; logs (userId == \u0026#34;TestingUserId\u0026#34;) even if the sampling strategy is configured  enforcer { } } } } ","excerpt":"Log Analysis Language Log Analysis Language (LAL) in SkyWalking is essentially a Domain-Specific …","ref":"/docs/main/next/en/concepts-and-designs/lal/","title":"Log Analysis Language"},{"body":"Log Analysis Language Log Analysis Language (LAL) in SkyWalking is essentially a Domain-Specific Language (DSL) to analyze logs. You can use LAL to parse, extract, and save the logs, as well as collaborate the logs with traces (by extracting the trace ID, segment ID and span ID) and metrics (by generating metrics from the logs and sending them to the meter system).\nThe LAL config files are in YAML format, and are located under directory lal. You can set log-analyzer/default/lalFiles in the application.yml file or set environment variable SW_LOG_LAL_FILES to activate specific LAL config files.\nFilter A filter is a group of parser, extractor and sink. Users can use one or more filters to organize their processing logic. Every piece of log will be sent to all filters in an LAL rule. A piece of log sent to the filter is available as property log in the LAL, therefore you can access the log service name via log.service. For all available fields of log, please refer to the protocol definition.\nAll components are executed sequentially in the orders they are declared.\nGlobal Functions Globally available functions may be used them in all components (i.e. parsers, extractors, and sinks) where necessary.\n abort  By default, all components declared are executed no matter what flags (dropped, saved, etc.) have been set. There are cases where you may want the filter chain to stop earlier when specified conditions are met. abort function aborts the remaining filter chain from where it\u0026rsquo;s declared, and all the remaining components won\u0026rsquo;t be executed at all. abort function serves as a fast-fail mechanism in LAL.\nfilter { if (log.service == \u0026#34;TestingService\u0026#34;) { // Don\u0026#39;t waste resources on TestingServices  abort {} // all remaining components won\u0026#39;t be executed at all  } // ... parsers, extractors, sinks } Note that when you put regexp in an if statement, you need to surround the expression with () like regexp(\u0026lt;the expression\u0026gt;), instead of regexp \u0026lt;the expression\u0026gt;.\nParser Parsers are responsible for parsing the raw logs into structured data in SkyWalking for further processing. There are 3 types of parsers at the moment, namely json, yaml, and text.\nWhen a piece of log is parsed, there is a corresponding property available, called parsed, injected by LAL. Property parsed is typically a map, containing all the fields parsed from the raw logs. For example, if the parser is json / yaml, parsed is a map containing all the key-values in the json / yaml; if the parser is text , parsed is a map containing all the captured groups and their values (for regexp and grok).\nAll parsers share the following options:\n   Option Type Description Default Value     abortOnFailure boolean Whether the filter chain should abort if the parser failed to parse / match the logs true    See examples below.\njson filter { json { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  } } yaml filter { yaml { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  } } text For unstructured logs, there are some text parsers for use.\n regexp  regexp parser uses a regular expression (regexp) to parse the logs. It leverages the captured groups of the regexp, all the captured groups can be used later in the extractors or sinks. regexp returns a boolean indicating whether the log matches the pattern or not.\nfilter { text { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  // this is just a demo pattern  regexp \u0026#34;(?\u0026lt;timestamp\u0026gt;\\\\d{8}) (?\u0026lt;thread\u0026gt;\\\\w+) (?\u0026lt;level\u0026gt;\\\\w+) (?\u0026lt;traceId\u0026gt;\\\\w+) (?\u0026lt;msg\u0026gt;.+)\u0026#34; } extractor { tag level: parsed.level // we add a tag called `level` and its value is parsed.level, captured from the regexp above  traceId parsed.traceId // we also extract the trace id from the parsed result, which will be used to associate the log with the trace  } // ... }  grok (TODO)  We\u0026rsquo;re aware of certains performance issues in the grok Java library, and so we\u0026rsquo;re currently conducting investigations and benchmarking. Contributions are welcome.\nExtractor Extractors aim to extract metadata from the logs. The metadata can be a service name, a service instance name, an endpoint name, or even a trace ID, all of which can be associated with the existing traces and metrics.\n service  service extracts the service name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n instance  instance extracts the service instance name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n endpoint  endpoint extracts the service instance name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n traceId  traceId extracts the trace ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n segmentId  segmentId extracts the segment ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n spanId  spanId extracts the span ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n timestamp  timestamp extracts the timestamp from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\nThe unit of timestamp is millisecond.\n layer  layer extracts the layer from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with service / instance.\n tag  tag extracts the tags from the parsed result, and set them into the LogData. The form of this extractor should look something like this: tag key1: value, key2: value2. You may use the properties of parsed as both keys and values.\nimport javax.swing.text.LayeredHighlighter filter { // ... parser  extractor { tag level: parsed.level, (parsed.statusCode): parsed.statusMsg tag anotherKey: \u0026#34;anotherConstantValue\u0026#34; layer \u0026#39;GENERAL\u0026#39; } }  metrics  metrics extracts / generates metrics from the logs, and sends the generated metrics to the meter system. You may configure MAL for further analysis of these metrics. The dedicated MAL config files are under directory log-mal-rules, and you can set log-analyzer/default/malFiles to enable configured files.\n# application.yml# ...log-analyzer:selector:${SW_LOG_ANALYZER:default}default:lalFiles:${SW_LOG_LAL_FILES:my-lal-config}# files are under \u0026#34;lal\u0026#34; directorymalFiles:${SW_LOG_MAL_FILES:my-lal-mal-config,another-lal-mal-config}# files are under \u0026#34;log-mal-rules\u0026#34; directoryExamples are as follows:\nfilter { // ...  extractor { service parsed.serviceName metrics { name \u0026#34;log_count\u0026#34; timestamp parsed.timestamp labels level: parsed.level, service: parsed.service, instance: parsed.instance value 1 } metrics { name \u0026#34;http_response_time\u0026#34; timestamp parsed.timestamp labels status_code: parsed.statusCode, service: parsed.service, instance: parsed.instance value parsed.duration } } // ... } The extractor above generates a metrics named log_count, with tag key level and value 1. After that, you can configure MAL rules to calculate the log count grouping by logging level like this:\n# ... other configurations of MALmetrics:- name:log_count_debugexp:log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;DEBUG\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT1M\u0026#39;)- name:log_count_errorexp:log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;ERROR\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT1M\u0026#39;)The other metrics generated is http_response_time, so you can configure MAL rules to generate more useful metrics like percentiles.\n# ... other configurations of MALmetrics:- name:response_time_percentileexp:http_response_time.sum([\u0026#39;le\u0026#39;, \u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT5M\u0026#39;).histogram().histogram_percentile([50,70,90,99])Sink Sinks are the persistent layer of the LAL. By default, all the logs of each filter are persisted into the storage. However, some mechanisms allow you to selectively save some logs, or even drop all the logs after you\u0026rsquo;ve extracted useful information, such as metrics.\nSampler Sampler allows you to save the logs in a sampling manner. Currently, the following sampling strategies are supported:\n rateLimit: samples n logs at a maximum rate of 1 minute. rateLimit(\u0026quot;SamplerID\u0026quot;) requires an ID for the sampler. Sampler declarations with the same ID share the same sampler instance, thus sharing the same rpm and resetting logic. possibility: every piece of log has a pseudo possibility of percentage to be sampled, the possibility was generated by Java random number generator and compare to the given percentage option.  We welcome contributions on more sampling strategies. If multiple samplers are specified, the last one determines the final sampling result. See examples in Enforcer.\nExamples 1, rateLimit:\nfilter { // ... parser  sink { sampler { if (parsed.service == \u0026#34;ImportantApp\u0026#34;) { rateLimit(\u0026#34;ImportantAppSampler\u0026#34;) { rpm 1800 // samples 1800 pieces of logs every minute for service \u0026#34;ImportantApp\u0026#34;  } } else { rateLimit(\u0026#34;OtherSampler\u0026#34;) { rpm 180 // samples 180 pieces of logs every minute for other services than \u0026#34;ImportantApp\u0026#34;  } } } } } Examples 2, possibility:\nfilter { // ... parser  sink { sampler { if (parsed.service == \u0026#34;ImportantApp\u0026#34;) { possibility(80) { // samples 80% of the logs for service \u0026#34;ImportantApp\u0026#34;  } } else { possibility(30) { // samples 30% of the logs for other services than \u0026#34;ImportantApp\u0026#34;  } } } } } Dropper Dropper is a special sink, meaning that all logs are dropped without any exception. This is useful when you want to drop debugging logs.\nfilter { // ... parser  sink { if (parsed.level == \u0026#34;DEBUG\u0026#34;) { dropper {} } else { sampler { // ... configs  } } } } Or if you have multiple filters, some of which are for extracting metrics, only one of them has to be persisted.\nfilter { // filter A: this is for persistence  // ... parser  sink { sampler { // .. sampler configs  } } } filter { // filter B:  // ... extractors to generate many metrics  extractors { metrics { // ... metrics  } } sink { dropper {} // drop all logs because they have been saved in \u0026#34;filter A\u0026#34; above.  } } Enforcer Enforcer is another special sink that forcibly samples the log. A typical use case of enforcer is when you have configured a sampler and want to save some logs forcibly, such as to save error logs even if the sampling mechanism has been configured.\nfilter { // ... parser  sink { sampler { // ... sampler configs  } if (parserd.level == \u0026#34;ERROR\u0026#34; || parsed.userId == \u0026#34;TestingUserId\u0026#34;) { // sample error logs or testing users\u0026#39; logs (userId == \u0026#34;TestingUserId\u0026#34;) even if the sampling strategy is configured  enforcer { } } } } ","excerpt":"Log Analysis Language Log Analysis Language (LAL) in SkyWalking is essentially a Domain-Specific …","ref":"/docs/main/v9.0.0/en/concepts-and-designs/lal/","title":"Log Analysis Language"},{"body":"Log Analysis Language Log Analysis Language (LAL) in SkyWalking is essentially a Domain-Specific Language (DSL) to analyze logs. You can use LAL to parse, extract, and save the logs, as well as collaborate the logs with traces (by extracting the trace ID, segment ID and span ID) and metrics (by generating metrics from the logs and sending them to the meter system).\nThe LAL config files are in YAML format, and are located under directory lal. You can set log-analyzer/default/lalFiles in the application.yml file or set environment variable SW_LOG_LAL_FILES to activate specific LAL config files.\nFilter A filter is a group of parser, extractor and sink. Users can use one or more filters to organize their processing logic. Every piece of log will be sent to all filters in an LAL rule. A piece of log sent to the filter is available as property log in the LAL, therefore you can access the log service name via log.service. For all available fields of log, please refer to the protocol definition.\nAll components are executed sequentially in the orders they are declared.\nGlobal Functions Globally available functions may be used them in all components (i.e. parsers, extractors, and sinks) where necessary.\n abort  By default, all components declared are executed no matter what flags (dropped, saved, etc.) have been set. There are cases where you may want the filter chain to stop earlier when specified conditions are met. abort function aborts the remaining filter chain from where it\u0026rsquo;s declared, and all the remaining components won\u0026rsquo;t be executed at all. abort function serves as a fast-fail mechanism in LAL.\nfilter { if (log.service == \u0026#34;TestingService\u0026#34;) { // Don\u0026#39;t waste resources on TestingServices  abort {} // all remaining components won\u0026#39;t be executed at all  } // ... parsers, extractors, sinks } Note that when you put regexp in an if statement, you need to surround the expression with () like regexp(\u0026lt;the expression\u0026gt;), instead of regexp \u0026lt;the expression\u0026gt;.\nParser Parsers are responsible for parsing the raw logs into structured data in SkyWalking for further processing. There are 3 types of parsers at the moment, namely json, yaml, and text.\nWhen a piece of log is parsed, there is a corresponding property available, called parsed, injected by LAL. Property parsed is typically a map, containing all the fields parsed from the raw logs. For example, if the parser is json / yaml, parsed is a map containing all the key-values in the json / yaml; if the parser is text , parsed is a map containing all the captured groups and their values (for regexp and grok).\nAll parsers share the following options:\n   Option Type Description Default Value     abortOnFailure boolean Whether the filter chain should abort if the parser failed to parse / match the logs true    See examples below.\njson filter { json { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  } } yaml filter { yaml { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  } } text For unstructured logs, there are some text parsers for use.\n regexp  regexp parser uses a regular expression (regexp) to parse the logs. It leverages the captured groups of the regexp, all the captured groups can be used later in the extractors or sinks. regexp returns a boolean indicating whether the log matches the pattern or not.\nfilter { text { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  // this is just a demo pattern  regexp \u0026#34;(?\u0026lt;timestamp\u0026gt;\\\\d{8}) (?\u0026lt;thread\u0026gt;\\\\w+) (?\u0026lt;level\u0026gt;\\\\w+) (?\u0026lt;traceId\u0026gt;\\\\w+) (?\u0026lt;msg\u0026gt;.+)\u0026#34; } extractor { tag level: parsed.level // we add a tag called `level` and its value is parsed.level, captured from the regexp above  traceId parsed.traceId // we also extract the trace id from the parsed result, which will be used to associate the log with the trace  } // ... }  grok (TODO)  We\u0026rsquo;re aware of certains performance issues in the grok Java library, and so we\u0026rsquo;re currently conducting investigations and benchmarking. Contributions are welcome.\nExtractor Extractors aim to extract metadata from the logs. The metadata can be a service name, a service instance name, an endpoint name, or even a trace ID, all of which can be associated with the existing traces and metrics.\n service  service extracts the service name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n instance  instance extracts the service instance name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n endpoint  endpoint extracts the service instance name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n traceId  traceId extracts the trace ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n segmentId  segmentId extracts the segment ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n spanId  spanId extracts the span ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n timestamp  timestamp extracts the timestamp from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\nThe unit of timestamp is millisecond.\n layer  layer extracts the layer from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with service.\n tag  tag extracts the tags from the parsed result, and set them into the LogData. The form of this extractor should look something like this: tag key1: value, key2: value2. You may use the properties of parsed as both keys and values.\nimport javax.swing.text.LayeredHighlighter filter { // ... parser  extractor { tag level: parsed.level, (parsed.statusCode): parsed.statusMsg tag anotherKey: \u0026#34;anotherConstantValue\u0026#34; layer \u0026#39;GENERAL\u0026#39; } }  metrics  metrics extracts / generates metrics from the logs, and sends the generated metrics to the meter system. You may configure MAL for further analysis of these metrics. The dedicated MAL config files are under directory log-mal-rules, and you can set log-analyzer/default/malFiles to enable configured files.\n# application.yml# ...log-analyzer:selector:${SW_LOG_ANALYZER:default}default:lalFiles:${SW_LOG_LAL_FILES:my-lal-config}# files are under \u0026#34;lal\u0026#34; directorymalFiles:${SW_LOG_MAL_FILES:my-lal-mal-config,another-lal-mal-config}# files are under \u0026#34;log-mal-rules\u0026#34; directoryExamples are as follows:\nfilter { // ...  extractor { service parsed.serviceName metrics { name \u0026#34;log_count\u0026#34; timestamp parsed.timestamp labels level: parsed.level, service: parsed.service, instance: parsed.instance value 1 } metrics { name \u0026#34;http_response_time\u0026#34; timestamp parsed.timestamp labels status_code: parsed.statusCode, service: parsed.service, instance: parsed.instance value parsed.duration } } // ... } The extractor above generates a metrics named log_count, with tag key level and value 1. After that, you can configure MAL rules to calculate the log count grouping by logging level like this:\n# ... other configurations of MALmetrics:- name:log_count_debugexp:log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;DEBUG\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT1M\u0026#39;)- name:log_count_errorexp:log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;ERROR\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT1M\u0026#39;)The other metrics generated is http_response_time, so you can configure MAL rules to generate more useful metrics like percentiles.\n# ... other configurations of MALmetrics:- name:response_time_percentileexp:http_response_time.sum([\u0026#39;le\u0026#39;, \u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT5M\u0026#39;).histogram().histogram_percentile([50,70,90,99])Sink Sinks are the persistent layer of the LAL. By default, all the logs of each filter are persisted into the storage. However, some mechanisms allow you to selectively save some logs, or even drop all the logs after you\u0026rsquo;ve extracted useful information, such as metrics.\nSampler Sampler allows you to save the logs in a sampling manner. Currently, the following sampling strategies are supported:\n rateLimit: samples n logs at a maximum rate of 1 minute. rateLimit(\u0026quot;SamplerID\u0026quot;) requires an ID for the sampler. Sampler declarations with the same ID share the same sampler instance, thus sharing the same rpm and resetting logic. possibility: every piece of log has a pseudo possibility of percentage to be sampled, the possibility was generated by Java random number generator and compare to the given percentage option.  We welcome contributions on more sampling strategies. If multiple samplers are specified, the last one determines the final sampling result. See examples in Enforcer.\nExamples 1, rateLimit:\nfilter { // ... parser  sink { sampler { if (parsed.service == \u0026#34;ImportantApp\u0026#34;) { rateLimit(\u0026#34;ImportantAppSampler\u0026#34;) { rpm 1800 // samples 1800 pieces of logs every minute for service \u0026#34;ImportantApp\u0026#34;  } } else { rateLimit(\u0026#34;OtherSampler\u0026#34;) { rpm 180 // samples 180 pieces of logs every minute for other services than \u0026#34;ImportantApp\u0026#34;  } } } } } Examples 2, possibility:\nfilter { // ... parser  sink { sampler { if (parsed.service == \u0026#34;ImportantApp\u0026#34;) { possibility(80) { // samples 80% of the logs for service \u0026#34;ImportantApp\u0026#34;  } } else { possibility(30) { // samples 30% of the logs for other services than \u0026#34;ImportantApp\u0026#34;  } } } } } Dropper Dropper is a special sink, meaning that all logs are dropped without any exception. This is useful when you want to drop debugging logs.\nfilter { // ... parser  sink { if (parsed.level == \u0026#34;DEBUG\u0026#34;) { dropper {} } else { sampler { // ... configs  } } } } Or if you have multiple filters, some of which are for extracting metrics, only one of them has to be persisted.\nfilter { // filter A: this is for persistence  // ... parser  sink { sampler { // .. sampler configs  } } } filter { // filter B:  // ... extractors to generate many metrics  extractors { metrics { // ... metrics  } } sink { dropper {} // drop all logs because they have been saved in \u0026#34;filter A\u0026#34; above.  } } Enforcer Enforcer is another special sink that forcibly samples the log. A typical use case of enforcer is when you have configured a sampler and want to save some logs forcibly, such as to save error logs even if the sampling mechanism has been configured.\nfilter { // ... parser  sink { sampler { // ... sampler configs  } if (parserd.level == \u0026#34;ERROR\u0026#34; || parsed.userId == \u0026#34;TestingUserId\u0026#34;) { // sample error logs or testing users\u0026#39; logs (userId == \u0026#34;TestingUserId\u0026#34;) even if the sampling strategy is configured  enforcer { } } } } ","excerpt":"Log Analysis Language Log Analysis Language (LAL) in SkyWalking is essentially a Domain-Specific …","ref":"/docs/main/v9.1.0/en/concepts-and-designs/lal/","title":"Log Analysis Language"},{"body":"Log Analysis Language Log Analysis Language (LAL) in SkyWalking is essentially a Domain-Specific Language (DSL) to analyze logs. You can use LAL to parse, extract, and save the logs, as well as collaborate the logs with traces (by extracting the trace ID, segment ID and span ID) and metrics (by generating metrics from the logs and sending them to the meter system).\nThe LAL config files are in YAML format, and are located under directory lal. You can set log-analyzer/default/lalFiles in the application.yml file or set environment variable SW_LOG_LAL_FILES to activate specific LAL config files.\nFilter A filter is a group of parser, extractor and sink. Users can use one or more filters to organize their processing logic. Every piece of log will be sent to all filters in an LAL rule. A piece of log sent to the filter is available as property log in the LAL, therefore you can access the log service name via log.service. For all available fields of log, please refer to the protocol definition.\nAll components are executed sequentially in the orders they are declared.\nGlobal Functions Globally available functions may be used them in all components (i.e. parsers, extractors, and sinks) where necessary.\n abort  By default, all components declared are executed no matter what flags (dropped, saved, etc.) have been set. There are cases where you may want the filter chain to stop earlier when specified conditions are met. abort function aborts the remaining filter chain from where it\u0026rsquo;s declared, and all the remaining components won\u0026rsquo;t be executed at all. abort function serves as a fast-fail mechanism in LAL.\nfilter { if (log.service == \u0026#34;TestingService\u0026#34;) { // Don\u0026#39;t waste resources on TestingServices  abort {} // all remaining components won\u0026#39;t be executed at all  } // ... parsers, extractors, sinks } Note that when you put regexp in an if statement, you need to surround the expression with () like regexp(\u0026lt;the expression\u0026gt;), instead of regexp \u0026lt;the expression\u0026gt;.\nParser Parsers are responsible for parsing the raw logs into structured data in SkyWalking for further processing. There are 3 types of parsers at the moment, namely json, yaml, and text.\nWhen a piece of log is parsed, there is a corresponding property available, called parsed, injected by LAL. Property parsed is typically a map, containing all the fields parsed from the raw logs. For example, if the parser is json / yaml, parsed is a map containing all the key-values in the json / yaml; if the parser is text , parsed is a map containing all the captured groups and their values (for regexp and grok).\nAll parsers share the following options:\n   Option Type Description Default Value     abortOnFailure boolean Whether the filter chain should abort if the parser failed to parse / match the logs true    See examples below.\njson filter { json { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  } } yaml filter { yaml { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  } } text For unstructured logs, there are some text parsers for use.\n regexp  regexp parser uses a regular expression (regexp) to parse the logs. It leverages the captured groups of the regexp, all the captured groups can be used later in the extractors or sinks. regexp returns a boolean indicating whether the log matches the pattern or not.\nfilter { text { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  // this is just a demo pattern  regexp \u0026#34;(?\u0026lt;timestamp\u0026gt;\\\\d{8}) (?\u0026lt;thread\u0026gt;\\\\w+) (?\u0026lt;level\u0026gt;\\\\w+) (?\u0026lt;traceId\u0026gt;\\\\w+) (?\u0026lt;msg\u0026gt;.+)\u0026#34; } extractor { tag level: parsed.level // we add a tag called `level` and its value is parsed.level, captured from the regexp above  traceId parsed.traceId // we also extract the trace id from the parsed result, which will be used to associate the log with the trace  } // ... }  grok (TODO)  We\u0026rsquo;re aware of certain performance issues in the grok Java library, and so we\u0026rsquo;re currently conducting investigations and benchmarking. Contributions are welcome.\nExtractor Extractors aim to extract metadata from the logs. The metadata can be a service name, a service instance name, an endpoint name, or even a trace ID, all of which can be associated with the existing traces and metrics.\n service  service extracts the service name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n instance  instance extracts the service instance name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n endpoint  endpoint extracts the service instance name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n traceId  traceId extracts the trace ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n segmentId  segmentId extracts the segment ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n spanId  spanId extracts the span ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n timestamp  timestamp extracts the timestamp from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\nThe unit of timestamp is millisecond.\n layer  layer extracts the layer from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with service.\n tag  tag extracts the tags from the parsed result, and set them into the LogData. The form of this extractor should look something like this: tag key1: value, key2: value2. You may use the properties of parsed as both keys and values.\nimport javax.swing.text.LayeredHighlighter filter { // ... parser  extractor { tag level: parsed.level, (parsed.statusCode): parsed.statusMsg tag anotherKey: \u0026#34;anotherConstantValue\u0026#34; layer \u0026#39;GENERAL\u0026#39; } }  metrics  metrics extracts / generates metrics from the logs, and sends the generated metrics to the meter system. You may configure MAL for further analysis of these metrics. The dedicated MAL config files are under directory log-mal-rules, and you can set log-analyzer/default/malFiles to enable configured files.\n# application.yml# ...log-analyzer:selector:${SW_LOG_ANALYZER:default}default:lalFiles:${SW_LOG_LAL_FILES:my-lal-config}# files are under \u0026#34;lal\u0026#34; directorymalFiles:${SW_LOG_MAL_FILES:my-lal-mal-config,another-lal-mal-config}# files are under \u0026#34;log-mal-rules\u0026#34; directoryExamples are as follows:\nfilter { // ...  extractor { service parsed.serviceName metrics { name \u0026#34;log_count\u0026#34; timestamp parsed.timestamp labels level: parsed.level, service: parsed.service, instance: parsed.instance value 1 } metrics { name \u0026#34;http_response_time\u0026#34; timestamp parsed.timestamp labels status_code: parsed.statusCode, service: parsed.service, instance: parsed.instance value parsed.duration } } // ... } The extractor above generates a metrics named log_count, with tag key level and value 1. After that, you can configure MAL rules to calculate the log count grouping by logging level like this:\n# ... other configurations of MALmetrics:- name:log_count_debugexp:log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;DEBUG\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT1M\u0026#39;)- name:log_count_errorexp:log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;ERROR\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT1M\u0026#39;)The other metrics generated is http_response_time, so you can configure MAL rules to generate more useful metrics like percentiles.\n# ... other configurations of MALmetrics:- name:response_time_percentileexp:http_response_time.sum([\u0026#39;le\u0026#39;, \u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT5M\u0026#39;).histogram().histogram_percentile([50,70,90,99])Sink Sinks are the persistent layer of the LAL. By default, all the logs of each filter are persisted into the storage. However, some mechanisms allow you to selectively save some logs, or even drop all the logs after you\u0026rsquo;ve extracted useful information, such as metrics.\nSampler Sampler allows you to save the logs in a sampling manner. Currently, the following sampling strategies are supported:\n rateLimit: samples n logs at a maximum rate of 1 minute. rateLimit(\u0026quot;SamplerID\u0026quot;) requires an ID for the sampler. Sampler declarations with the same ID share the same sampler instance, thus sharing the same rpm and resetting logic. possibility: every piece of log has a pseudo possibility of percentage to be sampled, the possibility was generated by Java random number generator and compare to the given percentage option.  We welcome contributions on more sampling strategies. If multiple samplers are specified, the last one determines the final sampling result. See examples in Enforcer.\nExamples 1, rateLimit:\nfilter { // ... parser  sink { sampler { if (parsed.service == \u0026#34;ImportantApp\u0026#34;) { rateLimit(\u0026#34;ImportantAppSampler\u0026#34;) { rpm 1800 // samples 1800 pieces of logs every minute for service \u0026#34;ImportantApp\u0026#34;  } } else { rateLimit(\u0026#34;OtherSampler\u0026#34;) { rpm 180 // samples 180 pieces of logs every minute for other services than \u0026#34;ImportantApp\u0026#34;  } } } } } Examples 2, possibility:\nfilter { // ... parser  sink { sampler { if (parsed.service == \u0026#34;ImportantApp\u0026#34;) { possibility(80) { // samples 80% of the logs for service \u0026#34;ImportantApp\u0026#34;  } } else { possibility(30) { // samples 30% of the logs for other services than \u0026#34;ImportantApp\u0026#34;  } } } } } Dropper Dropper is a special sink, meaning that all logs are dropped without any exception. This is useful when you want to drop debugging logs.\nfilter { // ... parser  sink { if (parsed.level == \u0026#34;DEBUG\u0026#34;) { dropper {} } else { sampler { // ... configs  } } } } Or if you have multiple filters, some of which are for extracting metrics, only one of them has to be persisted.\nfilter { // filter A: this is for persistence  // ... parser  sink { sampler { // .. sampler configs  } } } filter { // filter B:  // ... extractors to generate many metrics  extractors { metrics { // ... metrics  } } sink { dropper {} // drop all logs because they have been saved in \u0026#34;filter A\u0026#34; above.  } } Enforcer Enforcer is another special sink that forcibly samples the log. A typical use case of enforcer is when you have configured a sampler and want to save some logs forcibly, such as to save error logs even if the sampling mechanism has been configured.\nfilter { // ... parser  sink { sampler { // ... sampler configs  } if (parsed.level == \u0026#34;ERROR\u0026#34; || parsed.userId == \u0026#34;TestingUserId\u0026#34;) { // sample error logs or testing users\u0026#39; logs (userId == \u0026#34;TestingUserId\u0026#34;) even if the sampling strategy is configured  enforcer { } } } } ","excerpt":"Log Analysis Language Log Analysis Language (LAL) in SkyWalking is essentially a Domain-Specific …","ref":"/docs/main/v9.2.0/en/concepts-and-designs/lal/","title":"Log Analysis Language"},{"body":"Log Analysis Language Log Analysis Language (LAL) in SkyWalking is essentially a Domain-Specific Language (DSL) to analyze logs. You can use LAL to parse, extract, and save the logs, as well as collaborate the logs with traces (by extracting the trace ID, segment ID and span ID) and metrics (by generating metrics from the logs and sending them to the meter system).\nThe LAL config files are in YAML format, and are located under directory lal. You can set log-analyzer/default/lalFiles in the application.yml file or set environment variable SW_LOG_LAL_FILES to activate specific LAL config files.\nLayer Layer should be declared in the LAL script to represent the analysis scope of the logs.\nFilter A filter is a group of parser, extractor and sink. Users can use one or more filters to organize their processing logic. Every piece of log will be sent to all filters in an LAL rule. A piece of log sent to the filter is available as property log in the LAL, therefore you can access the log service name via log.service. For all available fields of log, please refer to the protocol definition.\nAll components are executed sequentially in the orders they are declared.\nGlobal Functions Globally available functions may be used them in all components (i.e. parsers, extractors, and sinks) where necessary.\n abort  By default, all components declared are executed no matter what flags (dropped, saved, etc.) have been set. There are cases where you may want the filter chain to stop earlier when specified conditions are met. abort function aborts the remaining filter chain from where it\u0026rsquo;s declared, and all the remaining components won\u0026rsquo;t be executed at all. abort function serves as a fast-fail mechanism in LAL.\nfilter { if (log.service == \u0026#34;TestingService\u0026#34;) { // Don\u0026#39;t waste resources on TestingServices  abort {} // all remaining components won\u0026#39;t be executed at all  } // ... parsers, extractors, sinks } Note that when you put regexp in an if statement, you need to surround the expression with () like regexp(\u0026lt;the expression\u0026gt;), instead of regexp \u0026lt;the expression\u0026gt;.\n tag  tag function provide a convenient way to get the value of a tag key.\nWe can add tags like following:\n[ { \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;TEST_KEY\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;TEST_VALUE\u0026#34; } ] }, \u0026#34;body\u0026#34;:{ ... } ... } ] And we can use this method to get the value of the tag key TEST_KEY.\nfilter { if (tag(\u0026#34;TEST_KEY\u0026#34;) == \u0026#34;TEST_VALUE\u0026#34;) { ... } } Parser Parsers are responsible for parsing the raw logs into structured data in SkyWalking for further processing. There are 3 types of parsers at the moment, namely json, yaml, and text.\nWhen a piece of log is parsed, there is a corresponding property available, called parsed, injected by LAL. Property parsed is typically a map, containing all the fields parsed from the raw logs. For example, if the parser is json / yaml, parsed is a map containing all the key-values in the json / yaml; if the parser is text , parsed is a map containing all the captured groups and their values (for regexp and grok).\nAll parsers share the following options:\n   Option Type Description Default Value     abortOnFailure boolean Whether the filter chain should abort if the parser failed to parse / match the logs true    See examples below.\njson filter { json { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  } } yaml filter { yaml { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  } } text For unstructured logs, there are some text parsers for use.\n regexp  regexp parser uses a regular expression (regexp) to parse the logs. It leverages the captured groups of the regexp, all the captured groups can be used later in the extractors or sinks. regexp returns a boolean indicating whether the log matches the pattern or not.\nfilter { text { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  // this is just a demo pattern  regexp \u0026#34;(?\u0026lt;timestamp\u0026gt;\\\\d{8}) (?\u0026lt;thread\u0026gt;\\\\w+) (?\u0026lt;level\u0026gt;\\\\w+) (?\u0026lt;traceId\u0026gt;\\\\w+) (?\u0026lt;msg\u0026gt;.+)\u0026#34; } extractor { tag level: parsed.level // we add a tag called `level` and its value is parsed.level, captured from the regexp above  traceId parsed.traceId // we also extract the trace id from the parsed result, which will be used to associate the log with the trace  } // ... }  grok (TODO)  We\u0026rsquo;re aware of certain performance issues in the grok Java library, and so we\u0026rsquo;re currently conducting investigations and benchmarking. Contributions are welcome.\nExtractor Extractors aim to extract metadata from the logs. The metadata can be a service name, a service instance name, an endpoint name, or even a trace ID, all of which can be associated with the existing traces and metrics.\n service  service extracts the service name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n instance  instance extracts the service instance name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n endpoint  endpoint extracts the service instance name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n traceId  traceId extracts the trace ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n segmentId  segmentId extracts the segment ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n spanId  spanId extracts the span ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n timestamp  timestamp extracts the timestamp from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\nThe unit of timestamp is millisecond.\n layer  layer extracts the layer from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with service.\n tag  tag extracts the tags from the parsed result, and set them into the LogData. The form of this extractor should look something like this: tag key1: value, key2: value2. You may use the properties of parsed as both keys and values.\nimport javax.swing.text.LayeredHighlighter filter { // ... parser  extractor { tag level: parsed.level, (parsed.statusCode): parsed.statusMsg tag anotherKey: \u0026#34;anotherConstantValue\u0026#34; layer \u0026#39;GENERAL\u0026#39; } }  metrics  metrics extracts / generates metrics from the logs, and sends the generated metrics to the meter system. You may configure MAL for further analysis of these metrics. The dedicated MAL config files are under directory log-mal-rules, and you can set log-analyzer/default/malFiles to enable configured files.\n# application.yml# ...log-analyzer:selector:${SW_LOG_ANALYZER:default}default:lalFiles:${SW_LOG_LAL_FILES:my-lal-config}# files are under \u0026#34;lal\u0026#34; directorymalFiles:${SW_LOG_MAL_FILES:my-lal-mal-config, folder1/another-lal-mal-config, folder2/*}# files are under \u0026#34;log-mal-rules\u0026#34; directoryExamples are as follows:\nfilter { // ...  extractor { service parsed.serviceName metrics { name \u0026#34;log_count\u0026#34; timestamp parsed.timestamp labels level: parsed.level, service: parsed.service, instance: parsed.instance value 1 } metrics { name \u0026#34;http_response_time\u0026#34; timestamp parsed.timestamp labels status_code: parsed.statusCode, service: parsed.service, instance: parsed.instance value parsed.duration } } // ... } The extractor above generates a metrics named log_count, with tag key level and value 1. After that, you can configure MAL rules to calculate the log count grouping by logging level like this:\n# ... other configurations of MALmetrics:- name:log_count_debugexp:log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;DEBUG\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT1M\u0026#39;)- name:log_count_errorexp:log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;ERROR\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT1M\u0026#39;)The other metrics generated is http_response_time, so you can configure MAL rules to generate more useful metrics like percentiles.\n# ... other configurations of MALmetrics:- name:response_time_percentileexp:http_response_time.sum([\u0026#39;le\u0026#39;, \u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT5M\u0026#39;).histogram().histogram_percentile([50,70,90,99]) slowSql  slowSql aims to convert LogData to DatabaseSlowStatement. It extracts data from parsed result and save them as DatabaseSlowStatement. SlowSql will not abort or edit logs, you can use other LAL for further processing. SlowSql will reuse service, layer and timestamp of extractor, so it is necessary to use SlowSQL after setting these. We require a log tag \u0026quot;LOG_KIND\u0026quot; = \u0026quot;SLOW_SQL\u0026quot; to make OAP distinguish slow SQL logs from other log reports.\nNote, slow SQL sampling would only flag this SQL in the candidate list. The OAP server would run statistic per service and only persistent the top 50 every 10(controlled by topNReportPeriod: ${SW_CORE_TOPN_REPORT_PERIOD:10}) minutes by default.\nAn example of JSON sent to OAP is as following:\n[ { \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;LOG_KIND\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;SLOW_SQL\u0026#34; } ] }, \u0026#34;layer\u0026#34;:\u0026#34;MYSQL\u0026#34;, \u0026#34;body\u0026#34;:{ \u0026#34;json\u0026#34;:{ \u0026#34;json\u0026#34;:\u0026#34;{\\\u0026#34;time\\\u0026#34;:\\\u0026#34;1663063011\\\u0026#34;,\\\u0026#34;id\\\u0026#34;:\\\u0026#34;cb92c1a5b-2691e-fb2f-457a-9c72a392d9ed\\\u0026#34;,\\\u0026#34;service\\\u0026#34;:\\\u0026#34;root[root]@[localhost]\\\u0026#34;,\\\u0026#34;statement\\\u0026#34;:\\\u0026#34;select sleep(2);\\\u0026#34;,\\\u0026#34;layer\\\u0026#34;:\\\u0026#34;MYSQL\\\u0026#34;,\\\u0026#34;query_time\\\u0026#34;:2000}\u0026#34; } }, \u0026#34;service\u0026#34;:\u0026#34;root[root]@[localhost]\u0026#34; } ]  statement  statement extracts the SQL statement from the parsed result, and set it into the DatabaseSlowStatement, which will be persisted (if not dropped) and is used to associate with TopNDatabaseStatement.\n latency  latency extracts the latency from the parsed result, and set it into the DatabaseSlowStatement, which will be persisted (if not dropped) and is used to associate with TopNDatabaseStatement.\n id  id extracts the id from the parsed result, and set it into the DatabaseSlowStatement, which will be persisted (if not dropped) and is used to associate with TopNDatabaseStatement.\nA Example of LAL to distinguish slow logs:\nfilter { json{ } extractor{ layer parsed.layer as String service parsed.service as String timestamp parsed.time as String if (tag(\u0026#34;LOG_KIND\u0026#34;) == \u0026#34;SLOW_SQL\u0026#34;) { slowSql { id parsed.id as String statement parsed.statement as String latency parsed.query_time as Long } } } }  sampledTrace  sampledTrace aims to convert LogData to SampledTrace Records. It extracts data from parsed result and save them as SampledTraceRecord. SampledTrace will not abort or edit logs, you can use other LAL for further processing. We require a log tag \u0026quot;LOG_KIND\u0026quot; = \u0026quot;NET_PROFILING_SAMPLED_TRACE\u0026quot; to make OAP distinguish slow trace logs from other log reports. An example of JSON sent to OAP is as following:\n[ { \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;LOG_KIND\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;NET_PROFILING_SAMPLED_TRACE\u0026#34; } ] }, \u0026#34;layer\u0026#34;:\u0026#34;MESH\u0026#34;, \u0026#34;body\u0026#34;:{ \u0026#34;json\u0026#34;:{ \u0026#34;json\u0026#34;:\u0026#34;{\\\u0026#34;uri\\\u0026#34;:\\\u0026#34;/provider\\\u0026#34;,\\\u0026#34;reason\\\u0026#34;:\\\u0026#34;slow\\\u0026#34;,\\\u0026#34;latency\\\u0026#34;:2048,\\\u0026#34;client_process\\\u0026#34;:{\\\u0026#34;process_id\\\u0026#34;:\\\u0026#34;c1519f4555ec11eda8df0242ac1d0002\\\u0026#34;,\\\u0026#34;local\\\u0026#34;:false,\\\u0026#34;address\\\u0026#34;:\\\u0026#34;\\\u0026#34;},\\\u0026#34;server_process\\\u0026#34;:{\\\u0026#34;process_id\\\u0026#34;:\\\u0026#34;\\\u0026#34;,\\\u0026#34;local\\\u0026#34;:false,\\\u0026#34;address\\\u0026#34;:\\\u0026#34;172.31.0.3:443\\\u0026#34;},\\\u0026#34;detect_point\\\u0026#34;:\\\u0026#34;client\\\u0026#34;,\\\u0026#34;component\\\u0026#34;:\\\u0026#34;http\\\u0026#34;,\\\u0026#34;ssl\\\u0026#34;:true}\u0026#34; } }, \u0026#34;service\u0026#34;:\u0026#34;test-service\u0026#34;, \u0026#34;serviceInstance\u0026#34;:\u0026#34;test-service-instance\u0026#34;, \u0026#34;timestamp\u0026#34;: 1666916962406, } ] Examples are as follows:\nfilter { json { } if (tag(\u0026#34;LOG_KIND\u0026#34;) == \u0026#34;NET_PROFILING_SAMPLED_TRACE\u0026#34;) { sampledTrace { latency parsed.latency as Long uri parsed.uri as String reason parsed.reason as String if (parsed.client_process.process_id as String != \u0026#34;\u0026#34;) { processId parsed.client_process.process_id as String } else if (parsed.client_process.local as Boolean) { processId ProcessRegistry.generateVirtualLocalProcess(parsed.service as String, parsed.serviceInstance as String) as String } else { processId ProcessRegistry.generateVirtualRemoteProcess(parsed.service as String, parsed.serviceInstance as String, parsed.client_process.address as String) as String } if (parsed.server_process.process_id as String != \u0026#34;\u0026#34;) { destProcessId parsed.server_process.process_id as String } else if (parsed.server_process.local as Boolean) { destProcessId ProcessRegistry.generateVirtualLocalProcess(parsed.service as String, parsed.serviceInstance as String) as String } else { destProcessId ProcessRegistry.generateVirtualRemoteProcess(parsed.service as String, parsed.serviceInstance as String, parsed.server_process.address as String) as String } detectPoint parsed.detect_point as String if (parsed.component as String == \u0026#34;http\u0026#34; \u0026amp;\u0026amp; parsed.ssl as Boolean) { componentId 129 } else if (parsed.component as String == \u0026#34;http\u0026#34;) { componentId 49 } else if (parsed.ssl as Boolean) { componentId 130 } else { componentId 110 } } } } Sink Sinks are the persistent layer of the LAL. By default, all the logs of each filter are persisted into the storage. However, some mechanisms allow you to selectively save some logs, or even drop all the logs after you\u0026rsquo;ve extracted useful information, such as metrics.\nSampler Sampler allows you to save the logs in a sampling manner. Currently, the following sampling strategies are supported:\n rateLimit: samples n logs at a maximum rate of 1 minute. rateLimit(\u0026quot;SamplerID\u0026quot;) requires an ID for the sampler. Sampler declarations with the same ID share the same sampler instance, thus sharing the same rpm and resetting logic. possibility: every piece of log has a pseudo possibility of percentage to be sampled, the possibility was generated by Java random number generator and compare to the given percentage option.  We welcome contributions on more sampling strategies. If multiple samplers are specified, the last one determines the final sampling result. See examples in Enforcer.\nExamples 1, rateLimit:\nfilter { // ... parser  sink { sampler { if (parsed.service == \u0026#34;ImportantApp\u0026#34;) { rateLimit(\u0026#34;ImportantAppSampler\u0026#34;) { rpm 1800 // samples 1800 pieces of logs every minute for service \u0026#34;ImportantApp\u0026#34;  } } else { rateLimit(\u0026#34;OtherSampler\u0026#34;) { rpm 180 // samples 180 pieces of logs every minute for other services than \u0026#34;ImportantApp\u0026#34;  } } } } } Examples 2, possibility:\nfilter { // ... parser  sink { sampler { if (parsed.service == \u0026#34;ImportantApp\u0026#34;) { possibility(80) { // samples 80% of the logs for service \u0026#34;ImportantApp\u0026#34;  } } else { possibility(30) { // samples 30% of the logs for other services than \u0026#34;ImportantApp\u0026#34;  } } } } } Dropper Dropper is a special sink, meaning that all logs are dropped without any exception. This is useful when you want to drop debugging logs.\nfilter { // ... parser  sink { if (parsed.level == \u0026#34;DEBUG\u0026#34;) { dropper {} } else { sampler { // ... configs  } } } } Or if you have multiple filters, some of which are for extracting metrics, only one of them has to be persisted.\nfilter { // filter A: this is for persistence  // ... parser  sink { sampler { // .. sampler configs  } } } filter { // filter B:  // ... extractors to generate many metrics  extractors { metrics { // ... metrics  } } sink { dropper {} // drop all logs because they have been saved in \u0026#34;filter A\u0026#34; above.  } } Enforcer Enforcer is another special sink that forcibly samples the log. A typical use case of enforcer is when you have configured a sampler and want to save some logs forcibly, such as to save error logs even if the sampling mechanism has been configured.\nfilter { // ... parser  sink { sampler { // ... sampler configs  } if (parsed.level == \u0026#34;ERROR\u0026#34; || parsed.userId == \u0026#34;TestingUserId\u0026#34;) { // sample error logs or testing users\u0026#39; logs (userId == \u0026#34;TestingUserId\u0026#34;) even if the sampling strategy is configured  enforcer { } } } } ","excerpt":"Log Analysis Language Log Analysis Language (LAL) in SkyWalking is essentially a Domain-Specific …","ref":"/docs/main/v9.3.0/en/concepts-and-designs/lal/","title":"Log Analysis Language"},{"body":"Log Analysis Language Log Analysis Language (LAL) in SkyWalking is essentially a Domain-Specific Language (DSL) to analyze logs. You can use LAL to parse, extract, and save the logs, as well as collaborate the logs with traces (by extracting the trace ID, segment ID and span ID) and metrics (by generating metrics from the logs and sending them to the meter system).\nThe LAL config files are in YAML format, and are located under directory lal. You can set log-analyzer/default/lalFiles in the application.yml file or set environment variable SW_LOG_LAL_FILES to activate specific LAL config files.\nLayer Layer should be declared in the LAL script to represent the analysis scope of the logs.\nFilter A filter is a group of parser, extractor and sink. Users can use one or more filters to organize their processing logic. Every piece of log will be sent to all filters in an LAL rule. A piece of log sent to the filter is available as property log in the LAL, therefore you can access the log service name via log.service. For all available fields of log, please refer to the protocol definition.\nAll components are executed sequentially in the orders they are declared.\nGlobal Functions Globally available functions may be used them in all components (i.e. parsers, extractors, and sinks) where necessary.\n abort  By default, all components declared are executed no matter what flags (dropped, saved, etc.) have been set. There are cases where you may want the filter chain to stop earlier when specified conditions are met. abort function aborts the remaining filter chain from where it\u0026rsquo;s declared, and all the remaining components won\u0026rsquo;t be executed at all. abort function serves as a fast-fail mechanism in LAL.\nfilter { if (log.service == \u0026#34;TestingService\u0026#34;) { // Don\u0026#39;t waste resources on TestingServices  abort {} // all remaining components won\u0026#39;t be executed at all  } // ... parsers, extractors, sinks } Note that when you put regexp in an if statement, you need to surround the expression with () like regexp(\u0026lt;the expression\u0026gt;), instead of regexp \u0026lt;the expression\u0026gt;.\n tag  tag function provide a convenient way to get the value of a tag key.\nWe can add tags like following:\n[ { \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;TEST_KEY\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;TEST_VALUE\u0026#34; } ] }, \u0026#34;body\u0026#34;:{ ... } ... } ] And we can use this method to get the value of the tag key TEST_KEY.\nfilter { if (tag(\u0026#34;TEST_KEY\u0026#34;) == \u0026#34;TEST_VALUE\u0026#34;) { ... } } Parser Parsers are responsible for parsing the raw logs into structured data in SkyWalking for further processing. There are 3 types of parsers at the moment, namely json, yaml, and text.\nWhen a piece of log is parsed, there is a corresponding property available, called parsed, injected by LAL. Property parsed is typically a map, containing all the fields parsed from the raw logs. For example, if the parser is json / yaml, parsed is a map containing all the key-values in the json / yaml; if the parser is text , parsed is a map containing all the captured groups and their values (for regexp and grok).\nAll parsers share the following options:\n   Option Type Description Default Value     abortOnFailure boolean Whether the filter chain should abort if the parser failed to parse / match the logs true    See examples below.\njson filter { json { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  } } yaml filter { yaml { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  } } text For unstructured logs, there are some text parsers for use.\n regexp  regexp parser uses a regular expression (regexp) to parse the logs. It leverages the captured groups of the regexp, all the captured groups can be used later in the extractors or sinks. regexp returns a boolean indicating whether the log matches the pattern or not.\nfilter { text { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  // this is just a demo pattern  regexp \u0026#34;(?\u0026lt;timestamp\u0026gt;\\\\d{8}) (?\u0026lt;thread\u0026gt;\\\\w+) (?\u0026lt;level\u0026gt;\\\\w+) (?\u0026lt;traceId\u0026gt;\\\\w+) (?\u0026lt;msg\u0026gt;.+)\u0026#34; } extractor { tag level: parsed.level // we add a tag called `level` and its value is parsed.level, captured from the regexp above  traceId parsed.traceId // we also extract the trace id from the parsed result, which will be used to associate the log with the trace  } // ... }  grok (TODO)  We\u0026rsquo;re aware of certain performance issues in the grok Java library, and so we\u0026rsquo;re currently conducting investigations and benchmarking. Contributions are welcome.\nExtractor Extractors aim to extract metadata from the logs. The metadata can be a service name, a service instance name, an endpoint name, or even a trace ID, all of which can be associated with the existing traces and metrics.\n service  service extracts the service name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n instance  instance extracts the service instance name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n endpoint  endpoint extracts the service instance name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n traceId  traceId extracts the trace ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n segmentId  segmentId extracts the segment ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n spanId  spanId extracts the span ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n timestamp  timestamp extracts the timestamp from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\nThe unit of timestamp is millisecond.\n layer  layer extracts the layer from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with service.\n tag  tag extracts the tags from the parsed result, and set them into the LogData. The form of this extractor should look something like this: tag key1: value, key2: value2. You may use the properties of parsed as both keys and values.\nimport javax.swing.text.LayeredHighlighter filter { // ... parser  extractor { tag level: parsed.level, (parsed.statusCode): parsed.statusMsg tag anotherKey: \u0026#34;anotherConstantValue\u0026#34; layer \u0026#39;GENERAL\u0026#39; } }  metrics  metrics extracts / generates metrics from the logs, and sends the generated metrics to the meter system. You may configure MAL for further analysis of these metrics. The dedicated MAL config files are under directory log-mal-rules, and you can set log-analyzer/default/malFiles to enable configured files.\n# application.yml# ...log-analyzer:selector:${SW_LOG_ANALYZER:default}default:lalFiles:${SW_LOG_LAL_FILES:my-lal-config}# files are under \u0026#34;lal\u0026#34; directorymalFiles:${SW_LOG_MAL_FILES:my-lal-mal-config, folder1/another-lal-mal-config, folder2/*}# files are under \u0026#34;log-mal-rules\u0026#34; directoryExamples are as follows:\nfilter { // ...  extractor { service parsed.serviceName metrics { name \u0026#34;log_count\u0026#34; timestamp parsed.timestamp labels level: parsed.level, service: parsed.service, instance: parsed.instance value 1 } metrics { name \u0026#34;http_response_time\u0026#34; timestamp parsed.timestamp labels status_code: parsed.statusCode, service: parsed.service, instance: parsed.instance value parsed.duration } } // ... } The extractor above generates a metrics named log_count, with tag key level and value 1. After that, you can configure MAL rules to calculate the log count grouping by logging level like this:\n# ... other configurations of MALmetrics:- name:log_count_debugexp:log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;DEBUG\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT1M\u0026#39;)- name:log_count_errorexp:log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;ERROR\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT1M\u0026#39;)The other metrics generated is http_response_time, so you can configure MAL rules to generate more useful metrics like percentiles.\n# ... other configurations of MALmetrics:- name:response_time_percentileexp:http_response_time.sum([\u0026#39;le\u0026#39;, \u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT5M\u0026#39;).histogram().histogram_percentile([50,70,90,99]) slowSql  slowSql aims to convert LogData to DatabaseSlowStatement. It extracts data from parsed result and save them as DatabaseSlowStatement. SlowSql will not abort or edit logs, you can use other LAL for further processing. SlowSql will reuse service, layer and timestamp of extractor, so it is necessary to use SlowSQL after setting these. We require a log tag \u0026quot;LOG_KIND\u0026quot; = \u0026quot;SLOW_SQL\u0026quot; to make OAP distinguish slow SQL logs from other log reports.\nNote, slow SQL sampling would only flag this SQL in the candidate list. The OAP server would run statistic per service and only persistent the top 50 every 10(controlled by topNReportPeriod: ${SW_CORE_TOPN_REPORT_PERIOD:10}) minutes by default.\nAn example of JSON sent to OAP is as following:\n[ { \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;LOG_KIND\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;SLOW_SQL\u0026#34; } ] }, \u0026#34;layer\u0026#34;:\u0026#34;MYSQL\u0026#34;, \u0026#34;body\u0026#34;:{ \u0026#34;json\u0026#34;:{ \u0026#34;json\u0026#34;:\u0026#34;{\\\u0026#34;time\\\u0026#34;:\\\u0026#34;1663063011\\\u0026#34;,\\\u0026#34;id\\\u0026#34;:\\\u0026#34;cb92c1a5b-2691e-fb2f-457a-9c72a392d9ed\\\u0026#34;,\\\u0026#34;service\\\u0026#34;:\\\u0026#34;root[root]@[localhost]\\\u0026#34;,\\\u0026#34;statement\\\u0026#34;:\\\u0026#34;select sleep(2);\\\u0026#34;,\\\u0026#34;layer\\\u0026#34;:\\\u0026#34;MYSQL\\\u0026#34;,\\\u0026#34;query_time\\\u0026#34;:2000}\u0026#34; } }, \u0026#34;service\u0026#34;:\u0026#34;root[root]@[localhost]\u0026#34; } ]  statement  statement extracts the SQL statement from the parsed result, and set it into the DatabaseSlowStatement, which will be persisted (if not dropped) and is used to associate with TopNDatabaseStatement.\n latency  latency extracts the latency from the parsed result, and set it into the DatabaseSlowStatement, which will be persisted (if not dropped) and is used to associate with TopNDatabaseStatement.\n id  id extracts the id from the parsed result, and set it into the DatabaseSlowStatement, which will be persisted (if not dropped) and is used to associate with TopNDatabaseStatement.\nA Example of LAL to distinguish slow logs:\nfilter { json{ } extractor{ layer parsed.layer as String service parsed.service as String timestamp parsed.time as String if (tag(\u0026#34;LOG_KIND\u0026#34;) == \u0026#34;SLOW_SQL\u0026#34;) { slowSql { id parsed.id as String statement parsed.statement as String latency parsed.query_time as Long } } } }  sampledTrace  sampledTrace aims to convert LogData to SampledTrace Records. It extracts data from parsed result and save them as SampledTraceRecord. SampledTrace will not abort or edit logs, you can use other LAL for further processing. We require a log tag \u0026quot;LOG_KIND\u0026quot; = \u0026quot;NET_PROFILING_SAMPLED_TRACE\u0026quot; to make OAP distinguish slow trace logs from other log reports. An example of JSON sent to OAP is as following:\n[ { \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;LOG_KIND\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;NET_PROFILING_SAMPLED_TRACE\u0026#34; } ] }, \u0026#34;layer\u0026#34;:\u0026#34;MESH\u0026#34;, \u0026#34;body\u0026#34;:{ \u0026#34;json\u0026#34;:{ \u0026#34;json\u0026#34;:\u0026#34;{\\\u0026#34;uri\\\u0026#34;:\\\u0026#34;/provider\\\u0026#34;,\\\u0026#34;reason\\\u0026#34;:\\\u0026#34;slow\\\u0026#34;,\\\u0026#34;latency\\\u0026#34;:2048,\\\u0026#34;client_process\\\u0026#34;:{\\\u0026#34;process_id\\\u0026#34;:\\\u0026#34;c1519f4555ec11eda8df0242ac1d0002\\\u0026#34;,\\\u0026#34;local\\\u0026#34;:false,\\\u0026#34;address\\\u0026#34;:\\\u0026#34;\\\u0026#34;},\\\u0026#34;server_process\\\u0026#34;:{\\\u0026#34;process_id\\\u0026#34;:\\\u0026#34;\\\u0026#34;,\\\u0026#34;local\\\u0026#34;:false,\\\u0026#34;address\\\u0026#34;:\\\u0026#34;172.31.0.3:443\\\u0026#34;},\\\u0026#34;detect_point\\\u0026#34;:\\\u0026#34;client\\\u0026#34;,\\\u0026#34;component\\\u0026#34;:\\\u0026#34;http\\\u0026#34;,\\\u0026#34;ssl\\\u0026#34;:true}\u0026#34; } }, \u0026#34;service\u0026#34;:\u0026#34;test-service\u0026#34;, \u0026#34;serviceInstance\u0026#34;:\u0026#34;test-service-instance\u0026#34;, \u0026#34;timestamp\u0026#34;: 1666916962406, } ] Examples are as follows:\nfilter { json { } if (tag(\u0026#34;LOG_KIND\u0026#34;) == \u0026#34;NET_PROFILING_SAMPLED_TRACE\u0026#34;) { sampledTrace { latency parsed.latency as Long uri parsed.uri as String reason parsed.reason as String if (parsed.client_process.process_id as String != \u0026#34;\u0026#34;) { processId parsed.client_process.process_id as String } else if (parsed.client_process.local as Boolean) { processId ProcessRegistry.generateVirtualLocalProcess(parsed.service as String, parsed.serviceInstance as String) as String } else { processId ProcessRegistry.generateVirtualRemoteProcess(parsed.service as String, parsed.serviceInstance as String, parsed.client_process.address as String) as String } if (parsed.server_process.process_id as String != \u0026#34;\u0026#34;) { destProcessId parsed.server_process.process_id as String } else if (parsed.server_process.local as Boolean) { destProcessId ProcessRegistry.generateVirtualLocalProcess(parsed.service as String, parsed.serviceInstance as String) as String } else { destProcessId ProcessRegistry.generateVirtualRemoteProcess(parsed.service as String, parsed.serviceInstance as String, parsed.server_process.address as String) as String } detectPoint parsed.detect_point as String if (parsed.component as String == \u0026#34;http\u0026#34; \u0026amp;\u0026amp; parsed.ssl as Boolean) { componentId 129 } else if (parsed.component as String == \u0026#34;http\u0026#34;) { componentId 49 } else if (parsed.ssl as Boolean) { componentId 130 } else { componentId 110 } } } } Sink Sinks are the persistent layer of the LAL. By default, all the logs of each filter are persisted into the storage. However, some mechanisms allow you to selectively save some logs, or even drop all the logs after you\u0026rsquo;ve extracted useful information, such as metrics.\nSampler Sampler allows you to save the logs in a sampling manner. Currently, the following sampling strategies are supported:\n rateLimit: samples n logs at a maximum rate of 1 minute. rateLimit(\u0026quot;SamplerID\u0026quot;) requires an ID for the sampler. Sampler declarations with the same ID share the same sampler instance, thus sharing the same rpm and resetting logic. possibility: every piece of log has a pseudo possibility of percentage to be sampled, the possibility was generated by Java random number generator and compare to the given percentage option.  We welcome contributions on more sampling strategies. If multiple samplers are specified, the last one determines the final sampling result. See examples in Enforcer.\nExamples 1, rateLimit:\nfilter { // ... parser  sink { sampler { if (parsed.service == \u0026#34;ImportantApp\u0026#34;) { rateLimit(\u0026#34;ImportantAppSampler\u0026#34;) { rpm 1800 // samples 1800 pieces of logs every minute for service \u0026#34;ImportantApp\u0026#34;  } } else { rateLimit(\u0026#34;OtherSampler\u0026#34;) { rpm 180 // samples 180 pieces of logs every minute for other services than \u0026#34;ImportantApp\u0026#34;  } } } } } Examples 2, possibility:\nfilter { // ... parser  sink { sampler { if (parsed.service == \u0026#34;ImportantApp\u0026#34;) { possibility(80) { // samples 80% of the logs for service \u0026#34;ImportantApp\u0026#34;  } } else { possibility(30) { // samples 30% of the logs for other services than \u0026#34;ImportantApp\u0026#34;  } } } } } Dropper Dropper is a special sink, meaning that all logs are dropped without any exception. This is useful when you want to drop debugging logs.\nfilter { // ... parser  sink { if (parsed.level == \u0026#34;DEBUG\u0026#34;) { dropper {} } else { sampler { // ... configs  } } } } Or if you have multiple filters, some of which are for extracting metrics, only one of them has to be persisted.\nfilter { // filter A: this is for persistence  // ... parser  sink { sampler { // .. sampler configs  } } } filter { // filter B:  // ... extractors to generate many metrics  extractors { metrics { // ... metrics  } } sink { dropper {} // drop all logs because they have been saved in \u0026#34;filter A\u0026#34; above.  } } Enforcer Enforcer is another special sink that forcibly samples the log. A typical use case of enforcer is when you have configured a sampler and want to save some logs forcibly, such as to save error logs even if the sampling mechanism has been configured.\nfilter { // ... parser  sink { sampler { // ... sampler configs  } if (parsed.level == \u0026#34;ERROR\u0026#34; || parsed.userId == \u0026#34;TestingUserId\u0026#34;) { // sample error logs or testing users\u0026#39; logs (userId == \u0026#34;TestingUserId\u0026#34;) even if the sampling strategy is configured  enforcer { } } } } ","excerpt":"Log Analysis Language Log Analysis Language (LAL) in SkyWalking is essentially a Domain-Specific …","ref":"/docs/main/v9.4.0/en/concepts-and-designs/lal/","title":"Log Analysis Language"},{"body":"Log Analysis Language Log Analysis Language (LAL) in SkyWalking is essentially a Domain-Specific Language (DSL) to analyze logs. You can use LAL to parse, extract, and save the logs, as well as collaborate the logs with traces (by extracting the trace ID, segment ID and span ID) and metrics (by generating metrics from the logs and sending them to the meter system).\nThe LAL config files are in YAML format, and are located under directory lal. You can set log-analyzer/default/lalFiles in the application.yml file or set environment variable SW_LOG_LAL_FILES to activate specific LAL config files.\nLayer Layer should be declared in the LAL script to represent the analysis scope of the logs.\nFilter A filter is a group of parser, extractor and sink. Users can use one or more filters to organize their processing logic. Every piece of log will be sent to all filters in an LAL rule. A piece of log sent to the filter is available as property log in the LAL, therefore you can access the log service name via log.service. For all available fields of log, please refer to the protocol definition.\nAll components are executed sequentially in the orders they are declared.\nGlobal Functions Globally available functions may be used them in all components (i.e. parsers, extractors, and sinks) where necessary.\n abort  By default, all components declared are executed no matter what flags (dropped, saved, etc.) have been set. There are cases where you may want the filter chain to stop earlier when specified conditions are met. abort function aborts the remaining filter chain from where it\u0026rsquo;s declared, and all the remaining components won\u0026rsquo;t be executed at all. abort function serves as a fast-fail mechanism in LAL.\nfilter { if (log.service == \u0026#34;TestingService\u0026#34;) { // Don\u0026#39;t waste resources on TestingServices  abort {} // all remaining components won\u0026#39;t be executed at all  } // ... parsers, extractors, sinks } Note that when you put regexp in an if statement, you need to surround the expression with () like regexp(\u0026lt;the expression\u0026gt;), instead of regexp \u0026lt;the expression\u0026gt;.\n tag  tag function provide a convenient way to get the value of a tag key.\nWe can add tags like following:\n[ { \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;TEST_KEY\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;TEST_VALUE\u0026#34; } ] }, \u0026#34;body\u0026#34;:{ ... } ... } ] And we can use this method to get the value of the tag key TEST_KEY.\nfilter { if (tag(\u0026#34;TEST_KEY\u0026#34;) == \u0026#34;TEST_VALUE\u0026#34;) { ... } } Parser Parsers are responsible for parsing the raw logs into structured data in SkyWalking for further processing. There are 3 types of parsers at the moment, namely json, yaml, and text.\nWhen a piece of log is parsed, there is a corresponding property available, called parsed, injected by LAL. Property parsed is typically a map, containing all the fields parsed from the raw logs. For example, if the parser is json / yaml, parsed is a map containing all the key-values in the json / yaml; if the parser is text , parsed is a map containing all the captured groups and their values (for regexp and grok).\nAll parsers share the following options:\n   Option Type Description Default Value     abortOnFailure boolean Whether the filter chain should abort if the parser failed to parse / match the logs true    See examples below.\njson filter { json { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  } } yaml filter { yaml { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  } } text For unstructured logs, there are some text parsers for use.\n regexp  regexp parser uses a regular expression (regexp) to parse the logs. It leverages the captured groups of the regexp, all the captured groups can be used later in the extractors or sinks. regexp returns a boolean indicating whether the log matches the pattern or not.\nfilter { text { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  // this is just a demo pattern  regexp \u0026#34;(?\u0026lt;timestamp\u0026gt;\\\\d{8}) (?\u0026lt;thread\u0026gt;\\\\w+) (?\u0026lt;level\u0026gt;\\\\w+) (?\u0026lt;traceId\u0026gt;\\\\w+) (?\u0026lt;msg\u0026gt;.+)\u0026#34; } extractor { tag level: parsed.level // we add a tag called `level` and its value is parsed.level, captured from the regexp above  traceId parsed.traceId // we also extract the trace id from the parsed result, which will be used to associate the log with the trace  } // ... }  grok (TODO)  We\u0026rsquo;re aware of certain performance issues in the grok Java library, and so we\u0026rsquo;re currently conducting investigations and benchmarking. Contributions are welcome.\nExtractor Extractors aim to extract metadata from the logs. The metadata can be a service name, a service instance name, an endpoint name, or even a trace ID, all of which can be associated with the existing traces and metrics.\n service  service extracts the service name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n instance  instance extracts the service instance name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n endpoint  endpoint extracts the service instance name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n traceId  traceId extracts the trace ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n segmentId  segmentId extracts the segment ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n spanId  spanId extracts the span ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n timestamp  timestamp extracts the timestamp from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\nThe unit of timestamp is millisecond.\n layer  layer extracts the layer from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with service.\n tag  tag extracts the tags from the parsed result, and set them into the LogData. The form of this extractor should look something like this: tag key1: value, key2: value2. You may use the properties of parsed as both keys and values.\nimport javax.swing.text.LayeredHighlighter filter { // ... parser  extractor { tag level: parsed.level, (parsed.statusCode): parsed.statusMsg tag anotherKey: \u0026#34;anotherConstantValue\u0026#34; layer \u0026#39;GENERAL\u0026#39; } }  metrics  metrics extracts / generates metrics from the logs, and sends the generated metrics to the meter system. You may configure MAL for further analysis of these metrics. The dedicated MAL config files are under directory log-mal-rules, and you can set log-analyzer/default/malFiles to enable configured files.\n# application.yml# ...log-analyzer:selector:${SW_LOG_ANALYZER:default}default:lalFiles:${SW_LOG_LAL_FILES:my-lal-config}# files are under \u0026#34;lal\u0026#34; directorymalFiles:${SW_LOG_MAL_FILES:my-lal-mal-config, folder1/another-lal-mal-config, folder2/*}# files are under \u0026#34;log-mal-rules\u0026#34; directoryExamples are as follows:\nfilter { // ...  extractor { service parsed.serviceName metrics { name \u0026#34;log_count\u0026#34; timestamp parsed.timestamp labels level: parsed.level, service: parsed.service, instance: parsed.instance value 1 } metrics { name \u0026#34;http_response_time\u0026#34; timestamp parsed.timestamp labels status_code: parsed.statusCode, service: parsed.service, instance: parsed.instance value parsed.duration } } // ... } The extractor above generates a metrics named log_count, with tag key level and value 1. After that, you can configure MAL rules to calculate the log count grouping by logging level like this:\n# ... other configurations of MALmetrics:- name:log_count_debugexp:log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;DEBUG\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT1M\u0026#39;)- name:log_count_errorexp:log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;ERROR\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT1M\u0026#39;)The other metrics generated is http_response_time, so you can configure MAL rules to generate more useful metrics like percentiles.\n# ... other configurations of MALmetrics:- name:response_time_percentileexp:http_response_time.sum([\u0026#39;le\u0026#39;, \u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT5M\u0026#39;).histogram().histogram_percentile([50,70,90,99]) slowSql  slowSql aims to convert LogData to DatabaseSlowStatement. It extracts data from parsed result and save them as DatabaseSlowStatement. SlowSql will not abort or edit logs, you can use other LAL for further processing. SlowSql will reuse service, layer and timestamp of extractor, so it is necessary to use SlowSQL after setting these. We require a log tag \u0026quot;LOG_KIND\u0026quot; = \u0026quot;SLOW_SQL\u0026quot; to make OAP distinguish slow SQL logs from other log reports.\nNote, slow SQL sampling would only flag this SQL in the candidate list. The OAP server would run statistic per service and only persistent the top 50 every 10(controlled by topNReportPeriod: ${SW_CORE_TOPN_REPORT_PERIOD:10}) minutes by default.\nAn example of JSON sent to OAP is as following:\n[ { \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;LOG_KIND\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;SLOW_SQL\u0026#34; } ] }, \u0026#34;layer\u0026#34;:\u0026#34;MYSQL\u0026#34;, \u0026#34;body\u0026#34;:{ \u0026#34;json\u0026#34;:{ \u0026#34;json\u0026#34;:\u0026#34;{\\\u0026#34;time\\\u0026#34;:\\\u0026#34;1663063011\\\u0026#34;,\\\u0026#34;id\\\u0026#34;:\\\u0026#34;cb92c1a5b-2691e-fb2f-457a-9c72a392d9ed\\\u0026#34;,\\\u0026#34;service\\\u0026#34;:\\\u0026#34;root[root]@[localhost]\\\u0026#34;,\\\u0026#34;statement\\\u0026#34;:\\\u0026#34;select sleep(2);\\\u0026#34;,\\\u0026#34;layer\\\u0026#34;:\\\u0026#34;MYSQL\\\u0026#34;,\\\u0026#34;query_time\\\u0026#34;:2000}\u0026#34; } }, \u0026#34;service\u0026#34;:\u0026#34;root[root]@[localhost]\u0026#34; } ]  statement  statement extracts the SQL statement from the parsed result, and set it into the DatabaseSlowStatement, which will be persisted (if not dropped) and is used to associate with TopNDatabaseStatement.\n latency  latency extracts the latency from the parsed result, and set it into the DatabaseSlowStatement, which will be persisted (if not dropped) and is used to associate with TopNDatabaseStatement.\n id  id extracts the id from the parsed result, and set it into the DatabaseSlowStatement, which will be persisted (if not dropped) and is used to associate with TopNDatabaseStatement.\nA Example of LAL to distinguish slow logs:\nfilter { json{ } extractor{ layer parsed.layer as String service parsed.service as String timestamp parsed.time as String if (tag(\u0026#34;LOG_KIND\u0026#34;) == \u0026#34;SLOW_SQL\u0026#34;) { slowSql { id parsed.id as String statement parsed.statement as String latency parsed.query_time as Long } } } }  sampledTrace  sampledTrace aims to convert LogData to SampledTrace Records. It extracts data from parsed result and save them as SampledTraceRecord. SampledTrace will not abort or edit logs, you can use other LAL for further processing. We require a log tag \u0026quot;LOG_KIND\u0026quot; = \u0026quot;NET_PROFILING_SAMPLED_TRACE\u0026quot; to make OAP distinguish slow trace logs from other log reports. An example of JSON sent to OAP is as following:\n[ { \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;LOG_KIND\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;NET_PROFILING_SAMPLED_TRACE\u0026#34; } ] }, \u0026#34;layer\u0026#34;:\u0026#34;MESH\u0026#34;, \u0026#34;body\u0026#34;:{ \u0026#34;json\u0026#34;:{ \u0026#34;json\u0026#34;:\u0026#34;{\\\u0026#34;uri\\\u0026#34;:\\\u0026#34;/provider\\\u0026#34;,\\\u0026#34;reason\\\u0026#34;:\\\u0026#34;slow\\\u0026#34;,\\\u0026#34;latency\\\u0026#34;:2048,\\\u0026#34;client_process\\\u0026#34;:{\\\u0026#34;process_id\\\u0026#34;:\\\u0026#34;c1519f4555ec11eda8df0242ac1d0002\\\u0026#34;,\\\u0026#34;local\\\u0026#34;:false,\\\u0026#34;address\\\u0026#34;:\\\u0026#34;\\\u0026#34;},\\\u0026#34;server_process\\\u0026#34;:{\\\u0026#34;process_id\\\u0026#34;:\\\u0026#34;\\\u0026#34;,\\\u0026#34;local\\\u0026#34;:false,\\\u0026#34;address\\\u0026#34;:\\\u0026#34;172.31.0.3:443\\\u0026#34;},\\\u0026#34;detect_point\\\u0026#34;:\\\u0026#34;client\\\u0026#34;,\\\u0026#34;component\\\u0026#34;:\\\u0026#34;http\\\u0026#34;,\\\u0026#34;ssl\\\u0026#34;:true}\u0026#34; } }, \u0026#34;service\u0026#34;:\u0026#34;test-service\u0026#34;, \u0026#34;serviceInstance\u0026#34;:\u0026#34;test-service-instance\u0026#34;, \u0026#34;timestamp\u0026#34;: 1666916962406, } ] Examples are as follows:\nfilter { json { } if (tag(\u0026#34;LOG_KIND\u0026#34;) == \u0026#34;NET_PROFILING_SAMPLED_TRACE\u0026#34;) { sampledTrace { latency parsed.latency as Long uri parsed.uri as String reason parsed.reason as String if (parsed.client_process.process_id as String != \u0026#34;\u0026#34;) { processId parsed.client_process.process_id as String } else if (parsed.client_process.local as Boolean) { processId ProcessRegistry.generateVirtualLocalProcess(parsed.service as String, parsed.serviceInstance as String) as String } else { processId ProcessRegistry.generateVirtualRemoteProcess(parsed.service as String, parsed.serviceInstance as String, parsed.client_process.address as String) as String } if (parsed.server_process.process_id as String != \u0026#34;\u0026#34;) { destProcessId parsed.server_process.process_id as String } else if (parsed.server_process.local as Boolean) { destProcessId ProcessRegistry.generateVirtualLocalProcess(parsed.service as String, parsed.serviceInstance as String) as String } else { destProcessId ProcessRegistry.generateVirtualRemoteProcess(parsed.service as String, parsed.serviceInstance as String, parsed.server_process.address as String) as String } detectPoint parsed.detect_point as String if (parsed.component as String == \u0026#34;http\u0026#34; \u0026amp;\u0026amp; parsed.ssl as Boolean) { componentId 129 } else if (parsed.component as String == \u0026#34;http\u0026#34;) { componentId 49 } else if (parsed.ssl as Boolean) { componentId 130 } else { componentId 110 } } } } Sink Sinks are the persistent layer of the LAL. By default, all the logs of each filter are persisted into the storage. However, some mechanisms allow you to selectively save some logs, or even drop all the logs after you\u0026rsquo;ve extracted useful information, such as metrics.\nSampler Sampler allows you to save the logs in a sampling manner. Currently, the following sampling strategies are supported:\n rateLimit: samples n logs at a maximum rate of 1 minute. rateLimit(\u0026quot;SamplerID\u0026quot;) requires an ID for the sampler. Sampler declarations with the same ID share the same sampler instance, thus sharing the same rpm and resetting logic. possibility: every piece of log has a pseudo possibility of percentage to be sampled, the possibility was generated by Java random number generator and compare to the given percentage option.  We welcome contributions on more sampling strategies. If multiple samplers are specified, the last one determines the final sampling result. See examples in Enforcer.\nExamples 1, rateLimit:\nfilter { // ... parser  sink { sampler { if (parsed.service == \u0026#34;ImportantApp\u0026#34;) { rateLimit(\u0026#34;ImportantAppSampler\u0026#34;) { rpm 1800 // samples 1800 pieces of logs every minute for service \u0026#34;ImportantApp\u0026#34;  } } else { rateLimit(\u0026#34;OtherSampler\u0026#34;) { rpm 180 // samples 180 pieces of logs every minute for other services than \u0026#34;ImportantApp\u0026#34;  } } } } } Examples 2, possibility:\nfilter { // ... parser  sink { sampler { if (parsed.service == \u0026#34;ImportantApp\u0026#34;) { possibility(80) { // samples 80% of the logs for service \u0026#34;ImportantApp\u0026#34;  } } else { possibility(30) { // samples 30% of the logs for other services than \u0026#34;ImportantApp\u0026#34;  } } } } } Dropper Dropper is a special sink, meaning that all logs are dropped without any exception. This is useful when you want to drop debugging logs.\nfilter { // ... parser  sink { if (parsed.level == \u0026#34;DEBUG\u0026#34;) { dropper {} } else { sampler { // ... configs  } } } } Or if you have multiple filters, some of which are for extracting metrics, only one of them has to be persisted.\nfilter { // filter A: this is for persistence  // ... parser  sink { sampler { // .. sampler configs  } } } filter { // filter B:  // ... extractors to generate many metrics  extractors { metrics { // ... metrics  } } sink { dropper {} // drop all logs because they have been saved in \u0026#34;filter A\u0026#34; above.  } } Enforcer Enforcer is another special sink that forcibly samples the log. A typical use case of enforcer is when you have configured a sampler and want to save some logs forcibly, such as to save error logs even if the sampling mechanism has been configured.\nfilter { // ... parser  sink { sampler { // ... sampler configs  } if (parsed.level == \u0026#34;ERROR\u0026#34; || parsed.userId == \u0026#34;TestingUserId\u0026#34;) { // sample error logs or testing users\u0026#39; logs (userId == \u0026#34;TestingUserId\u0026#34;) even if the sampling strategy is configured  enforcer { } } } } ","excerpt":"Log Analysis Language Log Analysis Language (LAL) in SkyWalking is essentially a Domain-Specific …","ref":"/docs/main/v9.5.0/en/concepts-and-designs/lal/","title":"Log Analysis Language"},{"body":"Log Analysis Language Log Analysis Language (LAL) in SkyWalking is essentially a Domain-Specific Language (DSL) to analyze logs. You can use LAL to parse, extract, and save the logs, as well as collaborate the logs with traces (by extracting the trace ID, segment ID and span ID) and metrics (by generating metrics from the logs and sending them to the meter system).\nThe LAL config files are in YAML format, and are located under directory lal. You can set log-analyzer/default/lalFiles in the application.yml file or set environment variable SW_LOG_LAL_FILES to activate specific LAL config files.\nLayer Layer should be declared in the LAL script to represent the analysis scope of the logs.\nFilter A filter is a group of parser, extractor and sink. Users can use one or more filters to organize their processing logic. Every piece of log will be sent to all filters in an LAL rule. A piece of log sent to the filter is available as property log in the LAL, therefore you can access the log service name via log.service. For all available fields of log, please refer to the protocol definition.\nAll components are executed sequentially in the orders they are declared.\nGlobal Functions Globally available functions may be used them in all components (i.e. parsers, extractors, and sinks) where necessary.\n abort  By default, all components declared are executed no matter what flags (dropped, saved, etc.) have been set. There are cases where you may want the filter chain to stop earlier when specified conditions are met. abort function aborts the remaining filter chain from where it\u0026rsquo;s declared, and all the remaining components won\u0026rsquo;t be executed at all. abort function serves as a fast-fail mechanism in LAL.\nfilter { if (log.service == \u0026#34;TestingService\u0026#34;) { // Don\u0026#39;t waste resources on TestingServices  abort {} // all remaining components won\u0026#39;t be executed at all  } // ... parsers, extractors, sinks } Note that when you put regexp in an if statement, you need to surround the expression with () like regexp(\u0026lt;the expression\u0026gt;), instead of regexp \u0026lt;the expression\u0026gt;.\n tag  tag function provide a convenient way to get the value of a tag key.\nWe can add tags like following:\n[ { \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;TEST_KEY\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;TEST_VALUE\u0026#34; } ] }, \u0026#34;body\u0026#34;:{ ... } ... } ] And we can use this method to get the value of the tag key TEST_KEY.\nfilter { if (tag(\u0026#34;TEST_KEY\u0026#34;) == \u0026#34;TEST_VALUE\u0026#34;) { ... } } Parser Parsers are responsible for parsing the raw logs into structured data in SkyWalking for further processing. There are 3 types of parsers at the moment, namely json, yaml, and text.\nWhen a piece of log is parsed, there is a corresponding property available, called parsed, injected by LAL. Property parsed is typically a map, containing all the fields parsed from the raw logs. For example, if the parser is json / yaml, parsed is a map containing all the key-values in the json / yaml; if the parser is text , parsed is a map containing all the captured groups and their values (for regexp and grok).\nAll parsers share the following options:\n   Option Type Description Default Value     abortOnFailure boolean Whether the filter chain should abort if the parser failed to parse / match the logs true    See examples below.\njson filter { json { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  } } yaml filter { yaml { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  } } text For unstructured logs, there are some text parsers for use.\n regexp  regexp parser uses a regular expression (regexp) to parse the logs. It leverages the captured groups of the regexp, all the captured groups can be used later in the extractors or sinks. regexp returns a boolean indicating whether the log matches the pattern or not.\nfilter { text { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  // this is just a demo pattern  regexp \u0026#34;(?\u0026lt;timestamp\u0026gt;\\\\d{8}) (?\u0026lt;thread\u0026gt;\\\\w+) (?\u0026lt;level\u0026gt;\\\\w+) (?\u0026lt;traceId\u0026gt;\\\\w+) (?\u0026lt;msg\u0026gt;.+)\u0026#34; } extractor { tag level: parsed.level // we add a tag called `level` and its value is parsed.level, captured from the regexp above  traceId parsed.traceId // we also extract the trace id from the parsed result, which will be used to associate the log with the trace  } // ... }  grok (TODO)  We\u0026rsquo;re aware of certain performance issues in the grok Java library, and so we\u0026rsquo;re currently conducting investigations and benchmarking. Contributions are welcome.\nExtractor Extractors aim to extract metadata from the logs. The metadata can be a service name, a service instance name, an endpoint name, or even a trace ID, all of which can be associated with the existing traces and metrics.\n service  service extracts the service name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n instance  instance extracts the service instance name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n endpoint  endpoint extracts the service instance name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n traceId  traceId extracts the trace ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n segmentId  segmentId extracts the segment ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n spanId  spanId extracts the span ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n timestamp  timestamp extracts the timestamp from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\nThe unit of timestamp is millisecond.\n layer  layer extracts the layer from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with service.\n tag  tag extracts the tags from the parsed result, and set them into the LogData. The form of this extractor should look something like this: tag key1: value, key2: value2. You may use the properties of parsed as both keys and values.\nimport javax.swing.text.LayeredHighlighter filter { // ... parser  extractor { tag level: parsed.level, (parsed.statusCode): parsed.statusMsg tag anotherKey: \u0026#34;anotherConstantValue\u0026#34; layer \u0026#39;GENERAL\u0026#39; } }  metrics  metrics extracts / generates metrics from the logs, and sends the generated metrics to the meter system. You may configure MAL for further analysis of these metrics. The dedicated MAL config files are under directory log-mal-rules, and you can set log-analyzer/default/malFiles to enable configured files.\n# application.yml# ...log-analyzer:selector:${SW_LOG_ANALYZER:default}default:lalFiles:${SW_LOG_LAL_FILES:my-lal-config}# files are under \u0026#34;lal\u0026#34; directorymalFiles:${SW_LOG_MAL_FILES:my-lal-mal-config, folder1/another-lal-mal-config, folder2/*}# files are under \u0026#34;log-mal-rules\u0026#34; directoryExamples are as follows:\nfilter { // ...  extractor { service parsed.serviceName metrics { name \u0026#34;log_count\u0026#34; timestamp parsed.timestamp labels level: parsed.level, service: parsed.service, instance: parsed.instance value 1 } metrics { name \u0026#34;http_response_time\u0026#34; timestamp parsed.timestamp labels status_code: parsed.statusCode, service: parsed.service, instance: parsed.instance value parsed.duration } } // ... } The extractor above generates a metrics named log_count, with tag key level and value 1. After that, you can configure MAL rules to calculate the log count grouping by logging level like this:\n# ... other configurations of MALmetrics:- name:log_count_debugexp:log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;DEBUG\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT1M\u0026#39;)- name:log_count_errorexp:log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;ERROR\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT1M\u0026#39;)The other metrics generated is http_response_time, so you can configure MAL rules to generate more useful metrics like percentiles.\n# ... other configurations of MALmetrics:- name:response_time_percentileexp:http_response_time.sum([\u0026#39;le\u0026#39;, \u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT5M\u0026#39;).histogram().histogram_percentile([50,70,90,99]) slowSql  slowSql aims to convert LogData to DatabaseSlowStatement. It extracts data from parsed result and save them as DatabaseSlowStatement. SlowSql will not abort or edit logs, you can use other LAL for further processing. SlowSql will reuse service, layer and timestamp of extractor, so it is necessary to use SlowSQL after setting these. We require a log tag \u0026quot;LOG_KIND\u0026quot; = \u0026quot;SLOW_SQL\u0026quot; to make OAP distinguish slow SQL logs from other log reports.\nNote, slow SQL sampling would only flag this SQL in the candidate list. The OAP server would run statistic per service and only persistent the top 50 every 10(controlled by topNReportPeriod: ${SW_CORE_TOPN_REPORT_PERIOD:10}) minutes by default.\nAn example of JSON sent to OAP is as following:\n[ { \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;LOG_KIND\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;SLOW_SQL\u0026#34; } ] }, \u0026#34;layer\u0026#34;:\u0026#34;MYSQL\u0026#34;, \u0026#34;body\u0026#34;:{ \u0026#34;json\u0026#34;:{ \u0026#34;json\u0026#34;:\u0026#34;{\\\u0026#34;time\\\u0026#34;:\\\u0026#34;1663063011\\\u0026#34;,\\\u0026#34;id\\\u0026#34;:\\\u0026#34;cb92c1a5b-2691e-fb2f-457a-9c72a392d9ed\\\u0026#34;,\\\u0026#34;service\\\u0026#34;:\\\u0026#34;root[root]@[localhost]\\\u0026#34;,\\\u0026#34;statement\\\u0026#34;:\\\u0026#34;select sleep(2);\\\u0026#34;,\\\u0026#34;layer\\\u0026#34;:\\\u0026#34;MYSQL\\\u0026#34;,\\\u0026#34;query_time\\\u0026#34;:2000}\u0026#34; } }, \u0026#34;service\u0026#34;:\u0026#34;root[root]@[localhost]\u0026#34; } ]  statement  statement extracts the SQL statement from the parsed result, and set it into the DatabaseSlowStatement, which will be persisted (if not dropped) and is used to associate with TopNDatabaseStatement.\n latency  latency extracts the latency from the parsed result, and set it into the DatabaseSlowStatement, which will be persisted (if not dropped) and is used to associate with TopNDatabaseStatement.\n id  id extracts the id from the parsed result, and set it into the DatabaseSlowStatement, which will be persisted (if not dropped) and is used to associate with TopNDatabaseStatement.\nA Example of LAL to distinguish slow logs:\nfilter { json{ } extractor{ layer parsed.layer as String service parsed.service as String timestamp parsed.time as String if (tag(\u0026#34;LOG_KIND\u0026#34;) == \u0026#34;SLOW_SQL\u0026#34;) { slowSql { id parsed.id as String statement parsed.statement as String latency parsed.query_time as Long } } } }  sampledTrace  sampledTrace aims to convert LogData to SampledTrace Records. It extracts data from parsed result and save them as SampledTraceRecord. SampledTrace will not abort or edit logs, you can use other LAL for further processing. We require a log tag \u0026quot;LOG_KIND\u0026quot; = \u0026quot;NET_PROFILING_SAMPLED_TRACE\u0026quot; to make OAP distinguish slow trace logs from other log reports. An example of JSON sent to OAP is as following:\n[ { \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;LOG_KIND\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;NET_PROFILING_SAMPLED_TRACE\u0026#34; } ] }, \u0026#34;layer\u0026#34;:\u0026#34;MESH\u0026#34;, \u0026#34;body\u0026#34;:{ \u0026#34;json\u0026#34;:{ \u0026#34;json\u0026#34;:\u0026#34;{\\\u0026#34;uri\\\u0026#34;:\\\u0026#34;/provider\\\u0026#34;,\\\u0026#34;reason\\\u0026#34;:\\\u0026#34;slow\\\u0026#34;,\\\u0026#34;latency\\\u0026#34;:2048,\\\u0026#34;client_process\\\u0026#34;:{\\\u0026#34;process_id\\\u0026#34;:\\\u0026#34;c1519f4555ec11eda8df0242ac1d0002\\\u0026#34;,\\\u0026#34;local\\\u0026#34;:false,\\\u0026#34;address\\\u0026#34;:\\\u0026#34;\\\u0026#34;},\\\u0026#34;server_process\\\u0026#34;:{\\\u0026#34;process_id\\\u0026#34;:\\\u0026#34;\\\u0026#34;,\\\u0026#34;local\\\u0026#34;:false,\\\u0026#34;address\\\u0026#34;:\\\u0026#34;172.31.0.3:443\\\u0026#34;},\\\u0026#34;detect_point\\\u0026#34;:\\\u0026#34;client\\\u0026#34;,\\\u0026#34;component\\\u0026#34;:\\\u0026#34;http\\\u0026#34;,\\\u0026#34;ssl\\\u0026#34;:true}\u0026#34; } }, \u0026#34;service\u0026#34;:\u0026#34;test-service\u0026#34;, \u0026#34;serviceInstance\u0026#34;:\u0026#34;test-service-instance\u0026#34;, \u0026#34;timestamp\u0026#34;: 1666916962406, } ] Examples are as follows:\nfilter { json { } if (tag(\u0026#34;LOG_KIND\u0026#34;) == \u0026#34;NET_PROFILING_SAMPLED_TRACE\u0026#34;) { sampledTrace { latency parsed.latency as Long uri parsed.uri as String reason parsed.reason as String if (parsed.client_process.process_id as String != \u0026#34;\u0026#34;) { processId parsed.client_process.process_id as String } else if (parsed.client_process.local as Boolean) { processId ProcessRegistry.generateVirtualLocalProcess(parsed.service as String, parsed.serviceInstance as String) as String } else { processId ProcessRegistry.generateVirtualRemoteProcess(parsed.service as String, parsed.serviceInstance as String, parsed.client_process.address as String) as String } if (parsed.server_process.process_id as String != \u0026#34;\u0026#34;) { destProcessId parsed.server_process.process_id as String } else if (parsed.server_process.local as Boolean) { destProcessId ProcessRegistry.generateVirtualLocalProcess(parsed.service as String, parsed.serviceInstance as String) as String } else { destProcessId ProcessRegistry.generateVirtualRemoteProcess(parsed.service as String, parsed.serviceInstance as String, parsed.server_process.address as String) as String } detectPoint parsed.detect_point as String if (parsed.component as String == \u0026#34;http\u0026#34; \u0026amp;\u0026amp; parsed.ssl as Boolean) { componentId 129 } else if (parsed.component as String == \u0026#34;http\u0026#34;) { componentId 49 } else if (parsed.ssl as Boolean) { componentId 130 } else { componentId 110 } } } } Sink Sinks are the persistent layer of the LAL. By default, all the logs of each filter are persisted into the storage. However, some mechanisms allow you to selectively save some logs, or even drop all the logs after you\u0026rsquo;ve extracted useful information, such as metrics.\nSampler Sampler allows you to save the logs in a sampling manner. Currently, the following sampling strategies are supported:\n rateLimit: samples n logs at a maximum rate of 1 minute. rateLimit(\u0026quot;SamplerID\u0026quot;) requires an ID for the sampler. Sampler declarations with the same ID share the same sampler instance, thus sharing the same rpm and resetting logic. possibility: every piece of log has a pseudo possibility of percentage to be sampled, the possibility was generated by Java random number generator and compare to the given percentage option.  We welcome contributions on more sampling strategies. If multiple samplers are specified, the last one determines the final sampling result. See examples in Enforcer.\nExamples 1, rateLimit:\nfilter { // ... parser  sink { sampler { if (parsed.service == \u0026#34;ImportantApp\u0026#34;) { rateLimit(\u0026#34;ImportantAppSampler\u0026#34;) { rpm 1800 // samples 1800 pieces of logs every minute for service \u0026#34;ImportantApp\u0026#34;  } } else { rateLimit(\u0026#34;OtherSampler\u0026#34;) { rpm 180 // samples 180 pieces of logs every minute for other services than \u0026#34;ImportantApp\u0026#34;  } } } } } Examples 2, possibility:\nfilter { // ... parser  sink { sampler { if (parsed.service == \u0026#34;ImportantApp\u0026#34;) { possibility(80) { // samples 80% of the logs for service \u0026#34;ImportantApp\u0026#34;  } } else { possibility(30) { // samples 30% of the logs for other services than \u0026#34;ImportantApp\u0026#34;  } } } } } Dropper Dropper is a special sink, meaning that all logs are dropped without any exception. This is useful when you want to drop debugging logs.\nfilter { // ... parser  sink { if (parsed.level == \u0026#34;DEBUG\u0026#34;) { dropper {} } else { sampler { // ... configs  } } } } Or if you have multiple filters, some of which are for extracting metrics, only one of them has to be persisted.\nfilter { // filter A: this is for persistence  // ... parser  sink { sampler { // .. sampler configs  } } } filter { // filter B:  // ... extractors to generate many metrics  extractors { metrics { // ... metrics  } } sink { dropper {} // drop all logs because they have been saved in \u0026#34;filter A\u0026#34; above.  } } Enforcer Enforcer is another special sink that forcibly samples the log. A typical use case of enforcer is when you have configured a sampler and want to save some logs forcibly, such as to save error logs even if the sampling mechanism has been configured.\nfilter { // ... parser  sink { sampler { // ... sampler configs  } if (parsed.level == \u0026#34;ERROR\u0026#34; || parsed.userId == \u0026#34;TestingUserId\u0026#34;) { // sample error logs or testing users\u0026#39; logs (userId == \u0026#34;TestingUserId\u0026#34;) even if the sampling strategy is configured  enforcer { } } } } ","excerpt":"Log Analysis Language Log Analysis Language (LAL) in SkyWalking is essentially a Domain-Specific …","ref":"/docs/main/v9.6.0/en/concepts-and-designs/lal/","title":"Log Analysis Language"},{"body":"Log Analysis Language Log Analysis Language (LAL) in SkyWalking is essentially a Domain-Specific Language (DSL) to analyze logs. You can use LAL to parse, extract, and save the logs, as well as collaborate the logs with traces (by extracting the trace ID, segment ID and span ID) and metrics (by generating metrics from the logs and sending them to the meter system).\nThe LAL config files are in YAML format, and are located under directory lal. You can set log-analyzer/default/lalFiles in the application.yml file or set environment variable SW_LOG_LAL_FILES to activate specific LAL config files.\nLayer Layer should be declared in the LAL script to represent the analysis scope of the logs.\nFilter A filter is a group of parser, extractor and sink. Users can use one or more filters to organize their processing logic. Every piece of log will be sent to all filters in an LAL rule. A piece of log sent to the filter is available as property log in the LAL, therefore you can access the log service name via log.service. For all available fields of log, please refer to the protocol definition.\nAll components are executed sequentially in the orders they are declared.\nGlobal Functions Globally available functions may be used them in all components (i.e. parsers, extractors, and sinks) where necessary.\n abort  By default, all components declared are executed no matter what flags (dropped, saved, etc.) have been set. There are cases where you may want the filter chain to stop earlier when specified conditions are met. abort function aborts the remaining filter chain from where it\u0026rsquo;s declared, and all the remaining components won\u0026rsquo;t be executed at all. abort function serves as a fast-fail mechanism in LAL.\nfilter { if (log.service == \u0026#34;TestingService\u0026#34;) { // Don\u0026#39;t waste resources on TestingServices  abort {} // all remaining components won\u0026#39;t be executed at all  } // ... parsers, extractors, sinks } Note that when you put regexp in an if statement, you need to surround the expression with () like regexp(\u0026lt;the expression\u0026gt;), instead of regexp \u0026lt;the expression\u0026gt;.\n tag  tag function provide a convenient way to get the value of a tag key.\nWe can add tags like following:\n[ { \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;TEST_KEY\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;TEST_VALUE\u0026#34; } ] }, \u0026#34;body\u0026#34;:{ ... } ... } ] And we can use this method to get the value of the tag key TEST_KEY.\nfilter { if (tag(\u0026#34;TEST_KEY\u0026#34;) == \u0026#34;TEST_VALUE\u0026#34;) { ... } } Parser Parsers are responsible for parsing the raw logs into structured data in SkyWalking for further processing. There are 3 types of parsers at the moment, namely json, yaml, and text.\nWhen a piece of log is parsed, there is a corresponding property available, called parsed, injected by LAL. Property parsed is typically a map, containing all the fields parsed from the raw logs. For example, if the parser is json / yaml, parsed is a map containing all the key-values in the json / yaml; if the parser is text , parsed is a map containing all the captured groups and their values (for regexp and grok).\nAll parsers share the following options:\n   Option Type Description Default Value     abortOnFailure boolean Whether the filter chain should abort if the parser failed to parse / match the logs true    See examples below.\njson filter { json { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  } } yaml filter { yaml { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  } } text For unstructured logs, there are some text parsers for use.\n regexp  regexp parser uses a regular expression (regexp) to parse the logs. It leverages the captured groups of the regexp, all the captured groups can be used later in the extractors or sinks. regexp returns a boolean indicating whether the log matches the pattern or not.\nfilter { text { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  // this is just a demo pattern  regexp \u0026#34;(?\u0026lt;timestamp\u0026gt;\\\\d{8}) (?\u0026lt;thread\u0026gt;\\\\w+) (?\u0026lt;level\u0026gt;\\\\w+) (?\u0026lt;traceId\u0026gt;\\\\w+) (?\u0026lt;msg\u0026gt;.+)\u0026#34; } extractor { tag level: parsed.level // we add a tag called `level` and its value is parsed.level, captured from the regexp above  traceId parsed.traceId // we also extract the trace id from the parsed result, which will be used to associate the log with the trace  } // ... }  grok (TODO)  We\u0026rsquo;re aware of certain performance issues in the grok Java library, and so we\u0026rsquo;re currently conducting investigations and benchmarking. Contributions are welcome.\nExtractor Extractors aim to extract metadata from the logs. The metadata can be a service name, a service instance name, an endpoint name, or even a trace ID, all of which can be associated with the existing traces and metrics.\n service  service extracts the service name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n instance  instance extracts the service instance name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n endpoint  endpoint extracts the service instance name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n traceId  traceId extracts the trace ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n segmentId  segmentId extracts the segment ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n spanId  spanId extracts the span ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n timestamp  timestamp extracts the timestamp from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\nThe parameter of timestamp can be a millisecond:\nfilter { // ... parser  extractor { timestamp parsed.time as String } } or a datetime string with a specified pattern:\nfilter { // ... parser  extractor { timestamp parsed.time as String, \u0026#34;yyyy-MM-dd HH:mm:ss\u0026#34; } }  layer  layer extracts the layer from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with service.\n tag  tag extracts the tags from the parsed result, and set them into the LogData. The form of this extractor should look something like this: tag key1: value, key2: value2. You may use the properties of parsed as both keys and values.\nimport javax.swing.text.LayeredHighlighter filter { // ... parser  extractor { tag level: parsed.level, (parsed.statusCode): parsed.statusMsg tag anotherKey: \u0026#34;anotherConstantValue\u0026#34; layer \u0026#39;GENERAL\u0026#39; } }  metrics  metrics extracts / generates metrics from the logs, and sends the generated metrics to the meter system. You may configure MAL for further analysis of these metrics. The dedicated MAL config files are under directory log-mal-rules, and you can set log-analyzer/default/malFiles to enable configured files.\n# application.yml# ...log-analyzer:selector:${SW_LOG_ANALYZER:default}default:lalFiles:${SW_LOG_LAL_FILES:my-lal-config}# files are under \u0026#34;lal\u0026#34; directorymalFiles:${SW_LOG_MAL_FILES:my-lal-mal-config, folder1/another-lal-mal-config, folder2/*}# files are under \u0026#34;log-mal-rules\u0026#34; directoryExamples are as follows:\nfilter { // ...  extractor { service parsed.serviceName metrics { name \u0026#34;log_count\u0026#34; timestamp parsed.timestamp labels level: parsed.level, service: parsed.service, instance: parsed.instance value 1 } metrics { name \u0026#34;http_response_time\u0026#34; timestamp parsed.timestamp labels status_code: parsed.statusCode, service: parsed.service, instance: parsed.instance value parsed.duration } } // ... } The extractor above generates a metrics named log_count, with tag key level and value 1. After that, you can configure MAL rules to calculate the log count grouping by logging level like this:\n# ... other configurations of MALmetrics:- name:log_count_debugexp:log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;DEBUG\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT1M\u0026#39;)- name:log_count_errorexp:log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;ERROR\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT1M\u0026#39;)The other metrics generated is http_response_time, so you can configure MAL rules to generate more useful metrics like percentiles.\n# ... other configurations of MALmetrics:- name:response_time_percentileexp:http_response_time.sum([\u0026#39;le\u0026#39;, \u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT5M\u0026#39;).histogram().histogram_percentile([50,70,90,99]) slowSql  slowSql aims to convert LogData to DatabaseSlowStatement. It extracts data from parsed result and save them as DatabaseSlowStatement. SlowSql will not abort or edit logs, you can use other LAL for further processing. SlowSql will reuse service, layer and timestamp of extractor, so it is necessary to use SlowSQL after setting these. We require a log tag \u0026quot;LOG_KIND\u0026quot; = \u0026quot;SLOW_SQL\u0026quot; to make OAP distinguish slow SQL logs from other log reports.\nNote, slow SQL sampling would only flag this SQL in the candidate list. The OAP server would run statistic per service and only persistent the top 50 every 10(controlled by topNReportPeriod: ${SW_CORE_TOPN_REPORT_PERIOD:10}) minutes by default.\nAn example of JSON sent to OAP is as following:\n[ { \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;LOG_KIND\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;SLOW_SQL\u0026#34; } ] }, \u0026#34;layer\u0026#34;:\u0026#34;MYSQL\u0026#34;, \u0026#34;body\u0026#34;:{ \u0026#34;json\u0026#34;:{ \u0026#34;json\u0026#34;:\u0026#34;{\\\u0026#34;time\\\u0026#34;:\\\u0026#34;1663063011\\\u0026#34;,\\\u0026#34;id\\\u0026#34;:\\\u0026#34;cb92c1a5b-2691e-fb2f-457a-9c72a392d9ed\\\u0026#34;,\\\u0026#34;service\\\u0026#34;:\\\u0026#34;root[root]@[localhost]\\\u0026#34;,\\\u0026#34;statement\\\u0026#34;:\\\u0026#34;select sleep(2);\\\u0026#34;,\\\u0026#34;layer\\\u0026#34;:\\\u0026#34;MYSQL\\\u0026#34;,\\\u0026#34;query_time\\\u0026#34;:2000}\u0026#34; } }, \u0026#34;service\u0026#34;:\u0026#34;root[root]@[localhost]\u0026#34; } ]  statement  statement extracts the SQL statement from the parsed result, and set it into the DatabaseSlowStatement, which will be persisted (if not dropped) and is used to associate with TopNDatabaseStatement.\n latency  latency extracts the latency from the parsed result, and set it into the DatabaseSlowStatement, which will be persisted (if not dropped) and is used to associate with TopNDatabaseStatement.\n id  id extracts the id from the parsed result, and set it into the DatabaseSlowStatement, which will be persisted (if not dropped) and is used to associate with TopNDatabaseStatement.\nA Example of LAL to distinguish slow logs:\nfilter { json{ } extractor{ layer parsed.layer as String service parsed.service as String timestamp parsed.time as String if (tag(\u0026#34;LOG_KIND\u0026#34;) == \u0026#34;SLOW_SQL\u0026#34;) { slowSql { id parsed.id as String statement parsed.statement as String latency parsed.query_time as Long } } } }  sampledTrace  sampledTrace aims to convert LogData to SampledTrace Records. It extracts data from parsed result and save them as SampledTraceRecord. SampledTrace will not abort or edit logs, you can use other LAL for further processing. We require a log tag \u0026quot;LOG_KIND\u0026quot; = \u0026quot;NET_PROFILING_SAMPLED_TRACE\u0026quot; to make OAP distinguish slow trace logs from other log reports. An example of JSON sent to OAP is as following:\n[ { \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;LOG_KIND\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;NET_PROFILING_SAMPLED_TRACE\u0026#34; } ] }, \u0026#34;layer\u0026#34;:\u0026#34;MESH\u0026#34;, \u0026#34;body\u0026#34;:{ \u0026#34;json\u0026#34;:{ \u0026#34;json\u0026#34;:\u0026#34;{\\\u0026#34;uri\\\u0026#34;:\\\u0026#34;/provider\\\u0026#34;,\\\u0026#34;reason\\\u0026#34;:\\\u0026#34;slow\\\u0026#34;,\\\u0026#34;latency\\\u0026#34;:2048,\\\u0026#34;client_process\\\u0026#34;:{\\\u0026#34;process_id\\\u0026#34;:\\\u0026#34;c1519f4555ec11eda8df0242ac1d0002\\\u0026#34;,\\\u0026#34;local\\\u0026#34;:false,\\\u0026#34;address\\\u0026#34;:\\\u0026#34;\\\u0026#34;},\\\u0026#34;server_process\\\u0026#34;:{\\\u0026#34;process_id\\\u0026#34;:\\\u0026#34;\\\u0026#34;,\\\u0026#34;local\\\u0026#34;:false,\\\u0026#34;address\\\u0026#34;:\\\u0026#34;172.31.0.3:443\\\u0026#34;},\\\u0026#34;detect_point\\\u0026#34;:\\\u0026#34;client\\\u0026#34;,\\\u0026#34;component\\\u0026#34;:\\\u0026#34;http\\\u0026#34;,\\\u0026#34;ssl\\\u0026#34;:true}\u0026#34; } }, \u0026#34;service\u0026#34;:\u0026#34;test-service\u0026#34;, \u0026#34;serviceInstance\u0026#34;:\u0026#34;test-service-instance\u0026#34;, \u0026#34;timestamp\u0026#34;: 1666916962406, } ] Examples are as follows:\nfilter { json { } if (tag(\u0026#34;LOG_KIND\u0026#34;) == \u0026#34;NET_PROFILING_SAMPLED_TRACE\u0026#34;) { sampledTrace { latency parsed.latency as Long uri parsed.uri as String reason parsed.reason as String if (parsed.client_process.process_id as String != \u0026#34;\u0026#34;) { processId parsed.client_process.process_id as String } else if (parsed.client_process.local as Boolean) { processId ProcessRegistry.generateVirtualLocalProcess(parsed.service as String, parsed.serviceInstance as String) as String } else { processId ProcessRegistry.generateVirtualRemoteProcess(parsed.service as String, parsed.serviceInstance as String, parsed.client_process.address as String) as String } if (parsed.server_process.process_id as String != \u0026#34;\u0026#34;) { destProcessId parsed.server_process.process_id as String } else if (parsed.server_process.local as Boolean) { destProcessId ProcessRegistry.generateVirtualLocalProcess(parsed.service as String, parsed.serviceInstance as String) as String } else { destProcessId ProcessRegistry.generateVirtualRemoteProcess(parsed.service as String, parsed.serviceInstance as String, parsed.server_process.address as String) as String } detectPoint parsed.detect_point as String if (parsed.component as String == \u0026#34;http\u0026#34; \u0026amp;\u0026amp; parsed.ssl as Boolean) { componentId 129 } else if (parsed.component as String == \u0026#34;http\u0026#34;) { componentId 49 } else if (parsed.ssl as Boolean) { componentId 130 } else { componentId 110 } } } } Sink Sinks are the persistent layer of the LAL. By default, all the logs of each filter are persisted into the storage. However, some mechanisms allow you to selectively save some logs, or even drop all the logs after you\u0026rsquo;ve extracted useful information, such as metrics.\nSampler Sampler allows you to save the logs in a sampling manner. Currently, the following sampling strategies are supported:\n rateLimit: samples n logs at a maximum rate of 1 minute. rateLimit(\u0026quot;SamplerID\u0026quot;) requires an ID for the sampler. Sampler declarations with the same ID share the same sampler instance, thus sharing the same rpm and resetting logic. possibility: every piece of log has a pseudo possibility of percentage to be sampled, the possibility was generated by Java random number generator and compare to the given percentage option.  We welcome contributions on more sampling strategies. If multiple samplers are specified, the last one determines the final sampling result. See examples in Enforcer.\nExamples 1, rateLimit:\nfilter { // ... parser  sink { sampler { if (parsed.service == \u0026#34;ImportantApp\u0026#34;) { rateLimit(\u0026#34;ImportantAppSampler\u0026#34;) { rpm 1800 // samples 1800 pieces of logs every minute for service \u0026#34;ImportantApp\u0026#34;  } } else { rateLimit(\u0026#34;OtherSampler\u0026#34;) { rpm 180 // samples 180 pieces of logs every minute for other services than \u0026#34;ImportantApp\u0026#34;  } } } } } Examples 2, possibility:\nfilter { // ... parser  sink { sampler { if (parsed.service == \u0026#34;ImportantApp\u0026#34;) { possibility(80) { // samples 80% of the logs for service \u0026#34;ImportantApp\u0026#34;  } } else { possibility(30) { // samples 30% of the logs for other services than \u0026#34;ImportantApp\u0026#34;  } } } } } Dropper Dropper is a special sink, meaning that all logs are dropped without any exception. This is useful when you want to drop debugging logs.\nfilter { // ... parser  sink { if (parsed.level == \u0026#34;DEBUG\u0026#34;) { dropper {} } else { sampler { // ... configs  } } } } Or if you have multiple filters, some of which are for extracting metrics, only one of them has to be persisted.\nfilter { // filter A: this is for persistence  // ... parser  sink { sampler { // .. sampler configs  } } } filter { // filter B:  // ... extractors to generate many metrics  extractors { metrics { // ... metrics  } } sink { dropper {} // drop all logs because they have been saved in \u0026#34;filter A\u0026#34; above.  } } Enforcer Enforcer is another special sink that forcibly samples the log. A typical use case of enforcer is when you have configured a sampler and want to save some logs forcibly, such as to save error logs even if the sampling mechanism has been configured.\nfilter { // ... parser  sink { sampler { // ... sampler configs  } if (parsed.level == \u0026#34;ERROR\u0026#34; || parsed.userId == \u0026#34;TestingUserId\u0026#34;) { // sample error logs or testing users\u0026#39; logs (userId == \u0026#34;TestingUserId\u0026#34;) even if the sampling strategy is configured  enforcer { } } } } ","excerpt":"Log Analysis Language Log Analysis Language (LAL) in SkyWalking is essentially a Domain-Specific …","ref":"/docs/main/v9.7.0/en/concepts-and-designs/lal/","title":"Log Analysis Language"},{"body":"Log Collection and Analysis Collection There are various ways to collect logs from applications.\nLog files collector You can use Filebeat, Fluentd and FluentBit to collect logs, and then transport the logs to SkyWalking OAP through Kafka or HTTP protocol, with the formats Kafka JSON or HTTP JSON array.\nFilebeat Filebeat supports using Kafka to transport logs. Open kafka-fetcher and enable configs enableNativeJsonLog.\nTake the following filebeat config yaml as an example to set up Filebeat:\n filebeat.yml  Fluentd Fluentd supports using Kafka to transport logs. Open kafka-fetcher and enable configs enableNativeJsonLog.\nTake the following fluentd config file as an example to set up Fluentd:\n fluentd.conf  Fluent-bit Fluent-bit sends logs to OAP directly through HTTP(rest port). Point the output address to restHost:restPort of receiver-sharing-server or core(if receiver-sharing-server is inactivated)\nTake the following fluent-bit config files as an example to set up Fluent-bit:\n fluent-bit.conf  OpenTelemetry You can use OpenTelemetry Collector to transport the logs to SkyWalking OAP. Read the doc on Skywalking Exporter for a detailed guide.\nJava agent\u0026rsquo;s toolkits Java agent provides toolkits for log4j, log4j2, and logback to report logs through gRPC with automatically injected trace context.\nSkyWalking Satellite sidecar is a recommended proxy/side that forwards logs (including the use of Kafka MQ to transport logs). When using this, open kafka-fetcher and enable configs enableNativeProtoLog.\nJava agent provides toolkits for log4j, log4j2, and logback to report logs through files with automatically injected trace context.\nLog framework config examples:\n log4j1.x fileAppender log4j2.x fileAppender logback fileAppender  Python agent log reporter SkyWalking Python Agent implements a log reporter for the logging module with functionalities aligning with the Java toolkits.\nTo explore how to enable the reporting features for your use cases, please refer to the Log Reporter Doc for a detailed guide.\nLog Analyzer Log analyzer of OAP server supports native log data. OAP could use Log Analysis Language to structure log content through parsing, extracting, and saving logs. The analyzer also uses Meter Analysis Language Engine for further metrics calculation.\nlog-analyzer:selector:${SW_LOG_ANALYZER:default}default:lalFiles:${SW_LOG_LAL_FILES:default}malFiles:${SW_LOG_MAL_FILES:\u0026#34;\u0026#34;}Read the doc on Log Analysis Language for more on log structuring and metrics analysis.\n","excerpt":"Log Collection and Analysis Collection There are various ways to collect logs from applications.\nLog …","ref":"/docs/main/v9.0.0/en/setup/backend/log-analyzer/","title":"Log Collection and Analysis"},{"body":"Log Collection and Analysis Collection There are various ways to collect logs from applications.\nLog files collector You can use Filebeat, Fluentd and FluentBit to collect logs, and then transport the logs to SkyWalking OAP through Kafka or HTTP protocol, with the formats Kafka JSON or HTTP JSON array.\nFilebeat Filebeat supports using Kafka to transport logs. Open kafka-fetcher and enable configs enableNativeJsonLog.\nTake the following Filebeat config YAML as an example to set up Filebeat:\n filebeat.yml  Fluentd Fluentd supports using Kafka to transport logs. Open kafka-fetcher and enable configs enableNativeJsonLog.\nTake the following fluentd config file as an example to set up Fluentd:\n fluentd.conf  Fluent-bit Fluent-bit sends logs to OAP directly through HTTP(rest port). Point the output address to restHost:restPort of receiver-sharing-server or core(if receiver-sharing-server is inactivated)\nTake the following fluent-bit config files as an example to set up Fluent-bit:\n fluent-bit.conf  OpenTelemetry You can use OpenTelemetry Collector to transport the logs to SkyWalking OAP. Read the doc on Skywalking Exporter for a detailed guide.\nJava agent\u0026rsquo;s toolkits Java agent provides toolkits for log4j, log4j2, and logback to report logs through gRPC with automatically injected trace context.\nSkyWalking Satellite sidecar is a recommended proxy/side that forwards logs (including the use of Kafka MQ to transport logs). When using this, open kafka-fetcher and enable configs enableNativeProtoLog.\nJava agent provides toolkits for log4j, log4j2, and logback to report logs through files with automatically injected trace context.\nLog framework config examples:\n log4j1.x fileAppender log4j2.x fileAppender logback fileAppender  Python agent log reporter SkyWalking Python Agent implements a log reporter for the logging module with functionalities aligning with the Java toolkits.\nTo explore how to enable the reporting features for your use cases, please refer to the Log Reporter Doc for a detailed guide.\nLog Analyzer Log analyzer of OAP server supports native log data. OAP could use Log Analysis Language to structure log content through parsing, extracting and saving logs. The analyzer also uses Meter Analysis Language Engine for further metrics calculation.\nlog-analyzer:selector:${SW_LOG_ANALYZER:default}default:lalFiles:${SW_LOG_LAL_FILES:default}malFiles:${SW_LOG_MAL_FILES:\u0026#34;\u0026#34;}Read the doc on Log Analysis Language for more on log structuring and metrics analysis.\n","excerpt":"Log Collection and Analysis Collection There are various ways to collect logs from applications.\nLog …","ref":"/docs/main/v9.1.0/en/setup/backend/log-analyzer/","title":"Log Collection and Analysis"},{"body":"Log Collection and Analysis Collection There are various ways to collect logs from applications.\nLog files collector You can use Filebeat, Fluentd and FluentBit to collect logs, and then transport the logs to SkyWalking OAP through Kafka or HTTP protocol, with the formats Kafka JSON or HTTP JSON array.\nFilebeat Filebeat supports using Kafka to transport logs. Open kafka-fetcher and enable configs enableNativeJsonLog.\nTake the following Filebeat config YAML as an example to set up Filebeat:\n filebeat.yml  Fluentd Fluentd supports using Kafka to transport logs. Open kafka-fetcher and enable configs enableNativeJsonLog.\nTake the following fluentd config file as an example to set up Fluentd:\n fluentd.conf  Fluent-bit Fluent-bit sends logs to OAP directly through HTTP(rest port). Point the output address to restHost:restPort of receiver-sharing-server or core(if receiver-sharing-server is inactivated)\nTake the following fluent-bit config files as an example to set up Fluent-bit:\n fluent-bit.conf  OpenTelemetry You can use OpenTelemetry Collector to transport the logs to SkyWalking OAP. Read the doc on Skywalking Exporter for a detailed guide.\nJava agent\u0026rsquo;s toolkits Java agent provides toolkits for log4j, log4j2, and logback to report logs through gRPC with automatically injected trace context.\nSkyWalking Satellite sidecar is a recommended proxy/side that forwards logs (including the use of Kafka MQ to transport logs). When using this, open kafka-fetcher and enable configs enableNativeProtoLog.\nJava agent provides toolkits for log4j, log4j2, and logback to report logs through files with automatically injected trace context.\nLog framework config examples:\n log4j1.x fileAppender log4j2.x fileAppender logback fileAppender  Python agent log reporter SkyWalking Python Agent implements a log reporter for the logging module with functionalities aligning with the Java toolkits.\nTo explore how to enable the reporting features for your use cases, please refer to the Log Reporter Doc for a detailed guide.\nLog Analyzer Log analyzer of OAP server supports native log data. OAP could use Log Analysis Language to structure log content through parsing, extracting and saving logs. The analyzer also uses Meter Analysis Language Engine for further metrics calculation.\nlog-analyzer:selector:${SW_LOG_ANALYZER:default}default:lalFiles:${SW_LOG_LAL_FILES:default}malFiles:${SW_LOG_MAL_FILES:\u0026#34;\u0026#34;}Read the doc on Log Analysis Language for more on log structuring and metrics analysis.\n","excerpt":"Log Collection and Analysis Collection There are various ways to collect logs from applications.\nLog …","ref":"/docs/main/v9.2.0/en/setup/backend/log-analyzer/","title":"Log Collection and Analysis"},{"body":"Log Collection and Analysis Collection There are various ways to collect logs from applications.\nLog files collector You can use Filebeat, Fluentd and FluentBit to collect logs, and then transport the logs to SkyWalking OAP through Kafka or HTTP protocol, with the formats Kafka JSON or HTTP JSON array.\nFilebeat Filebeat supports using Kafka to transport logs. Open kafka-fetcher and enable configs enableNativeJsonLog.\nTake the following Filebeat config YAML as an example to set up Filebeat:\n filebeat.yml  Fluentd Fluentd supports using Kafka to transport logs. Open kafka-fetcher and enable configs enableNativeJsonLog.\nTake the following fluentd config file as an example to set up Fluentd:\n fluentd.conf  Fluent-bit Fluent-bit sends logs to OAP directly through HTTP(rest port). Point the output address to restHost:restPort of receiver-sharing-server or core(if receiver-sharing-server is inactivated)\nTake the following fluent-bit config files as an example to set up Fluent-bit:\n fluent-bit.conf  OpenTelemetry You can use OpenTelemetry Collector to transport the logs to SkyWalking OAP. Read the doc on Skywalking Exporter for a detailed guide.\nJava agent\u0026rsquo;s toolkits Java agent provides toolkits for log4j, log4j2, and logback to report logs through gRPC with automatically injected trace context.\nSkyWalking Satellite sidecar is a recommended proxy/side that forwards logs (including the use of Kafka MQ to transport logs). When using this, open kafka-fetcher and enable configs enableNativeProtoLog.\nJava agent provides toolkits for log4j, log4j2, and logback to report logs through files with automatically injected trace context.\nLog framework config examples:\n log4j1.x fileAppender log4j2.x fileAppender logback fileAppender  Python agent log reporter SkyWalking Python Agent implements a log reporter for the logging module with functionalities aligning with the Java toolkits.\nTo explore how to enable the reporting features for your use cases, please refer to the Log Reporter Doc for a detailed guide.\nLog Analyzer Log analyzer of OAP server supports native log data. OAP could use Log Analysis Language to structure log content through parsing, extracting and saving logs. The analyzer also uses Meter Analysis Language Engine for further metrics calculation.\nlog-analyzer:selector:${SW_LOG_ANALYZER:default}default:lalFiles:${SW_LOG_LAL_FILES:default}malFiles:${SW_LOG_MAL_FILES:\u0026#34;\u0026#34;}Read the doc on Log Analysis Language for more on log structuring and metrics analysis.\n","excerpt":"Log Collection and Analysis Collection There are various ways to collect logs from applications.\nLog …","ref":"/docs/main/v9.3.0/en/setup/backend/log-analyzer/","title":"Log Collection and Analysis"},{"body":"Log Collection and Analysis Collection There are various ways to collect logs from applications.\nLog files collector You can use Filebeat, Fluentd and FluentBit to collect logs, and then transport the logs to SkyWalking OAP through Kafka or HTTP protocol, with the formats Kafka JSON or HTTP JSON array.\nFilebeat Filebeat supports using Kafka to transport logs. Open kafka-fetcher and enable configs enableNativeJsonLog.\nTake the following Filebeat config YAML as an example to set up Filebeat:\n filebeat.yml  Fluentd Fluentd supports using Kafka to transport logs. Open kafka-fetcher and enable configs enableNativeJsonLog.\nTake the following fluentd config file as an example to set up Fluentd:\n fluentd.conf  Fluent-bit Fluent-bit sends logs to OAP directly through HTTP(rest port). Point the output address to restHost:restPort of receiver-sharing-server or core(if receiver-sharing-server is inactivated)\nTake the following fluent-bit config files as an example to set up Fluent-bit:\n fluent-bit.conf  OpenTelemetry You can use OpenTelemetry Collector to transport the logs to SkyWalking OAP. Read the doc on Skywalking Exporter for a detailed guide.\nJava agent\u0026rsquo;s toolkits Java agent provides toolkits for log4j, log4j2, and logback to report logs through gRPC with automatically injected trace context.\nSkyWalking Satellite sidecar is a recommended proxy/side that forwards logs (including the use of Kafka MQ to transport logs). When using this, open kafka-fetcher and enable configs enableNativeProtoLog.\nJava agent provides toolkits for log4j, log4j2, and logback to report logs through files with automatically injected trace context.\nLog framework config examples:\n log4j1.x fileAppender log4j2.x fileAppender logback fileAppender  Python agent log reporter SkyWalking Python Agent implements a log reporter for the logging module with functionalities aligning with the Java toolkits.\nTo explore how to enable the reporting features for your use cases, please refer to the Log Reporter Doc for a detailed guide.\nLog Analyzer Log analyzer of OAP server supports native log data. OAP could use Log Analysis Language to structure log content through parsing, extracting and saving logs. The analyzer also uses Meter Analysis Language Engine for further metrics calculation.\nlog-analyzer:selector:${SW_LOG_ANALYZER:default}default:lalFiles:${SW_LOG_LAL_FILES:default}malFiles:${SW_LOG_MAL_FILES:\u0026#34;\u0026#34;}Read the doc on Log Analysis Language for more on log structuring and metrics analysis.\n","excerpt":"Log Collection and Analysis Collection There are various ways to collect logs from applications.\nLog …","ref":"/docs/main/v9.4.0/en/setup/backend/log-analyzer/","title":"Log Collection and Analysis"},{"body":"Log Data Protocol Report log data via protocol.\nNative Proto Protocol Report native-proto format log via gRPC.\ngRPC service define\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.logging.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/logging/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Report collected logs into the OAP backend service LogReportService { // Recommend to report log data in a stream mode.  // The service/instance/endpoint of the log could share the previous value if they are not set.  // Reporting the logs of same service in the batch mode could reduce the network cost.  rpc collect (stream LogData) returns (Commands) { }}// Log data is collected through file scratcher of agent. // Natively, Satellite provides various ways to collect logs. message LogData { // [Optional] The timestamp of the log, in millisecond.  // If not set, OAP server would use the received timestamp as log\u0026#39;s timestamp, or relies on the OAP server analyzer.  int64 timestamp = 1; // [Required] **Service**. Represents a set/group of workloads which provide the same behaviours for incoming requests.  //  // The logic name represents the service. This would show as a separate node in the topology.  // The metrics analyzed from the spans, would be aggregated for this entity as the service level.  //  // If this is not the first element of the streaming, use the previous not-null name as the service name.  string service = 2; // [Optional] **Service Instance**. Each individual workload in the Service group is known as an instance. Like `pods` in Kubernetes, it  // doesn\u0026#39;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process.  //  // The logic name represents the service instance. This would show as a separate node in the instance relationship.  // The metrics analyzed from the spans, would be aggregated for this entity as the service instance level.  string serviceInstance = 3; // [Optional] **Endpoint**. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature.  //  // The logic name represents the endpoint, which logs belong.  string endpoint = 4; // [Required] The content of the log.  LogDataBody body = 5; // [Optional] Logs with trace context  TraceContext traceContext = 6; // [Optional] The available tags. OAP server could provide search/analysis capabilities based on these.  LogTags tags = 7; // [Optional] Since 9.0.0  // The layer of the service and servce instance. If absent, the OAP would set `layer`=`ID: 2, NAME: general`  string layer = 8;}// The content of the log data message LogDataBody { // A type to match analyzer(s) at the OAP server.  // The data could be analyzed at the client side, but could be partial  string type = 1; // Content with extendable format.  oneof content { TextLog text = 2; JSONLog json = 3; YAMLLog yaml = 4; }}// Literal text log, typically requires regex or split mechanism to filter meaningful info. message TextLog { string text = 1;}// JSON formatted log. The json field represents the string that could be formatted as a JSON object. message JSONLog { string json = 1;}// YAML formatted log. The yaml field represents the string that could be formatted as a YAML map. message YAMLLog { string yaml = 1;}// Logs with trace context, represent agent system has injects context(IDs) into log text. message TraceContext { // [Optional] A string id represents the whole trace.  string traceId = 1; // [Optional] A unique id represents this segment. Other segments could use this id to reference as a child segment.  string traceSegmentId = 2; // [Optional] The number id of the span. Should be unique in the whole segment.  // Starting at 0.  int32 spanId = 3;}message LogTags { // String key, String value pair.  repeated KeyStringValuePair data = 1;}Native Kafka Protocol Report native-json format log via kafka.\nJson log record example:\n{ \u0026#34;timestamp\u0026#34;:1618161813371, \u0026#34;service\u0026#34;:\u0026#34;Your_ApplicationName\u0026#34;, \u0026#34;serviceInstance\u0026#34;:\u0026#34;3a5b8da5a5ba40c0b192e91b5c80f1a8@192.168.1.8\u0026#34;, \u0026#34;layer\u0026#34;:\u0026#34;GENERAL\u0026#34;, \u0026#34;traceContext\u0026#34;:{ \u0026#34;traceId\u0026#34;:\u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470001\u0026#34;, \u0026#34;spanId\u0026#34;:\u0026#34;0\u0026#34;, \u0026#34;traceSegmentId\u0026#34;:\u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470000\u0026#34; }, \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;INFO\u0026#34; }, { \u0026#34;key\u0026#34;:\u0026#34;logger\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;com.example.MyLogger\u0026#34; } ] }, \u0026#34;body\u0026#34;:{ \u0026#34;text\u0026#34;:{ \u0026#34;text\u0026#34;:\u0026#34;log message\u0026#34; } } } HTTP API Report json format logs via HTTP API, the endpoint is http://\u0026lt;oap-address\u0026gt;:12800/v3/logs.\nJson log record example:\n[ { \u0026#34;timestamp\u0026#34;: 1618161813371, \u0026#34;service\u0026#34;: \u0026#34;Your_ApplicationName\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;3a5b8da5a5ba40c0b192e91b5c80f1a8@192.168.1.8\u0026#34;, \u0026#34;layer\u0026#34;:\u0026#34;GENERAL\u0026#34;, \u0026#34;traceContext\u0026#34;: { \u0026#34;traceId\u0026#34;: \u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470001\u0026#34;, \u0026#34;spanId\u0026#34;: \u0026#34;0\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470000\u0026#34; }, \u0026#34;tags\u0026#34;: { \u0026#34;data\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;INFO\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;logger\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;com.example.MyLogger\u0026#34; } ] }, \u0026#34;body\u0026#34;: { \u0026#34;text\u0026#34;: { \u0026#34;text\u0026#34;: \u0026#34;log message\u0026#34; } } } ] ","excerpt":"Log Data Protocol Report log data via protocol.\nNative Proto Protocol Report native-proto format log …","ref":"/docs/main/latest/en/api/log-data-protocol/","title":"Log Data Protocol"},{"body":"Log Data Protocol Report log data via protocol.\nNative Proto Protocol Report native-proto format log via gRPC.\ngRPC service define\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.logging.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/logging/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Report collected logs into the OAP backend service LogReportService { // Recommend to report log data in a stream mode.  // The service/instance/endpoint of the log could share the previous value if they are not set.  // Reporting the logs of same service in the batch mode could reduce the network cost.  rpc collect (stream LogData) returns (Commands) { }}// Log data is collected through file scratcher of agent. // Natively, Satellite provides various ways to collect logs. message LogData { // [Optional] The timestamp of the log, in millisecond.  // If not set, OAP server would use the received timestamp as log\u0026#39;s timestamp, or relies on the OAP server analyzer.  int64 timestamp = 1; // [Required] **Service**. Represents a set/group of workloads which provide the same behaviours for incoming requests.  //  // The logic name represents the service. This would show as a separate node in the topology.  // The metrics analyzed from the spans, would be aggregated for this entity as the service level.  //  // If this is not the first element of the streaming, use the previous not-null name as the service name.  string service = 2; // [Optional] **Service Instance**. Each individual workload in the Service group is known as an instance. Like `pods` in Kubernetes, it  // doesn\u0026#39;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process.  //  // The logic name represents the service instance. This would show as a separate node in the instance relationship.  // The metrics analyzed from the spans, would be aggregated for this entity as the service instance level.  string serviceInstance = 3; // [Optional] **Endpoint**. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature.  //  // The logic name represents the endpoint, which logs belong.  string endpoint = 4; // [Required] The content of the log.  LogDataBody body = 5; // [Optional] Logs with trace context  TraceContext traceContext = 6; // [Optional] The available tags. OAP server could provide search/analysis capabilities based on these.  LogTags tags = 7; // [Optional] Since 9.0.0  // The layer of the service and servce instance. If absent, the OAP would set `layer`=`ID: 2, NAME: general`  string layer = 8;}// The content of the log data message LogDataBody { // A type to match analyzer(s) at the OAP server.  // The data could be analyzed at the client side, but could be partial  string type = 1; // Content with extendable format.  oneof content { TextLog text = 2; JSONLog json = 3; YAMLLog yaml = 4; }}// Literal text log, typically requires regex or split mechanism to filter meaningful info. message TextLog { string text = 1;}// JSON formatted log. The json field represents the string that could be formatted as a JSON object. message JSONLog { string json = 1;}// YAML formatted log. The yaml field represents the string that could be formatted as a YAML map. message YAMLLog { string yaml = 1;}// Logs with trace context, represent agent system has injects context(IDs) into log text. message TraceContext { // [Optional] A string id represents the whole trace.  string traceId = 1; // [Optional] A unique id represents this segment. Other segments could use this id to reference as a child segment.  string traceSegmentId = 2; // [Optional] The number id of the span. Should be unique in the whole segment.  // Starting at 0.  int32 spanId = 3;}message LogTags { // String key, String value pair.  repeated KeyStringValuePair data = 1;}Native Kafka Protocol Report native-json format log via kafka.\nJson log record example:\n{ \u0026#34;timestamp\u0026#34;:1618161813371, \u0026#34;service\u0026#34;:\u0026#34;Your_ApplicationName\u0026#34;, \u0026#34;serviceInstance\u0026#34;:\u0026#34;3a5b8da5a5ba40c0b192e91b5c80f1a8@192.168.1.8\u0026#34;, \u0026#34;layer\u0026#34;:\u0026#34;GENERAL\u0026#34;, \u0026#34;traceContext\u0026#34;:{ \u0026#34;traceId\u0026#34;:\u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470001\u0026#34;, \u0026#34;spanId\u0026#34;:\u0026#34;0\u0026#34;, \u0026#34;traceSegmentId\u0026#34;:\u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470000\u0026#34; }, \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;INFO\u0026#34; }, { \u0026#34;key\u0026#34;:\u0026#34;logger\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;com.example.MyLogger\u0026#34; } ] }, \u0026#34;body\u0026#34;:{ \u0026#34;text\u0026#34;:{ \u0026#34;text\u0026#34;:\u0026#34;log message\u0026#34; } } } HTTP API Report json format logs via HTTP API, the endpoint is http://\u0026lt;oap-address\u0026gt;:12800/v3/logs.\nJson log record example:\n[ { \u0026#34;timestamp\u0026#34;: 1618161813371, \u0026#34;service\u0026#34;: \u0026#34;Your_ApplicationName\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;3a5b8da5a5ba40c0b192e91b5c80f1a8@192.168.1.8\u0026#34;, \u0026#34;layer\u0026#34;:\u0026#34;GENERAL\u0026#34;, \u0026#34;traceContext\u0026#34;: { \u0026#34;traceId\u0026#34;: \u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470001\u0026#34;, \u0026#34;spanId\u0026#34;: \u0026#34;0\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470000\u0026#34; }, \u0026#34;tags\u0026#34;: { \u0026#34;data\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;INFO\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;logger\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;com.example.MyLogger\u0026#34; } ] }, \u0026#34;body\u0026#34;: { \u0026#34;text\u0026#34;: { \u0026#34;text\u0026#34;: \u0026#34;log message\u0026#34; } } } ] ","excerpt":"Log Data Protocol Report log data via protocol.\nNative Proto Protocol Report native-proto format log …","ref":"/docs/main/next/en/api/log-data-protocol/","title":"Log Data Protocol"},{"body":"Log Data Protocol Report log data via protocol.\nNative Proto Protocol Report native-proto format log via gRPC.\ngRPC service define\nNative Kafka Protocol Report native-json format log via kafka.\nJson log record example:\n{ \u0026#34;timestamp\u0026#34;:1618161813371, \u0026#34;service\u0026#34;:\u0026#34;Your_ApplicationName\u0026#34;, \u0026#34;serviceInstance\u0026#34;:\u0026#34;3a5b8da5a5ba40c0b192e91b5c80f1a8@192.168.1.8\u0026#34;, \u0026#34;layer\u0026#34;:\u0026#34;GENERAL\u0026#34;, \u0026#34;traceContext\u0026#34;:{ \u0026#34;traceId\u0026#34;:\u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470001\u0026#34;, \u0026#34;spanId\u0026#34;:\u0026#34;0\u0026#34;, \u0026#34;traceSegmentId\u0026#34;:\u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470000\u0026#34; }, \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;INFO\u0026#34; }, { \u0026#34;key\u0026#34;:\u0026#34;logger\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;com.example.MyLogger\u0026#34; } ] }, \u0026#34;body\u0026#34;:{ \u0026#34;text\u0026#34;:{ \u0026#34;text\u0026#34;:\u0026#34;log message\u0026#34; } } } HTTP API Report json format logs via HTTP API, the endpoint is http://\u0026lt;oap-address\u0026gt;:12800/v3/logs.\nJson log record example:\n[ { \u0026#34;timestamp\u0026#34;: 1618161813371, \u0026#34;service\u0026#34;: \u0026#34;Your_ApplicationName\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;3a5b8da5a5ba40c0b192e91b5c80f1a8@192.168.1.8\u0026#34;, \u0026#34;layer\u0026#34;:\u0026#34;GENERAL\u0026#34;, \u0026#34;traceContext\u0026#34;: { \u0026#34;traceId\u0026#34;: \u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470001\u0026#34;, \u0026#34;spanId\u0026#34;: \u0026#34;0\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470000\u0026#34; }, \u0026#34;tags\u0026#34;: { \u0026#34;data\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;INFO\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;logger\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;com.example.MyLogger\u0026#34; } ] }, \u0026#34;body\u0026#34;: { \u0026#34;text\u0026#34;: { \u0026#34;text\u0026#34;: \u0026#34;log message\u0026#34; } } } ] ","excerpt":"Log Data Protocol Report log data via protocol.\nNative Proto Protocol Report native-proto format log …","ref":"/docs/main/v9.0.0/en/protocols/log-data-protocol/","title":"Log Data Protocol"},{"body":"Log Data Protocol Report log data via protocol.\nNative Proto Protocol Report native-proto format log via gRPC.\ngRPC service define\nNative Kafka Protocol Report native-json format log via kafka.\nJson log record example:\n{ \u0026#34;timestamp\u0026#34;:1618161813371, \u0026#34;service\u0026#34;:\u0026#34;Your_ApplicationName\u0026#34;, \u0026#34;serviceInstance\u0026#34;:\u0026#34;3a5b8da5a5ba40c0b192e91b5c80f1a8@192.168.1.8\u0026#34;, \u0026#34;layer\u0026#34;:\u0026#34;GENERAL\u0026#34;, \u0026#34;traceContext\u0026#34;:{ \u0026#34;traceId\u0026#34;:\u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470001\u0026#34;, \u0026#34;spanId\u0026#34;:\u0026#34;0\u0026#34;, \u0026#34;traceSegmentId\u0026#34;:\u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470000\u0026#34; }, \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;INFO\u0026#34; }, { \u0026#34;key\u0026#34;:\u0026#34;logger\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;com.example.MyLogger\u0026#34; } ] }, \u0026#34;body\u0026#34;:{ \u0026#34;text\u0026#34;:{ \u0026#34;text\u0026#34;:\u0026#34;log message\u0026#34; } } } HTTP API Report json format logs via HTTP API, the endpoint is http://\u0026lt;oap-address\u0026gt;:12800/v3/logs.\nJson log record example:\n[ { \u0026#34;timestamp\u0026#34;: 1618161813371, \u0026#34;service\u0026#34;: \u0026#34;Your_ApplicationName\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;3a5b8da5a5ba40c0b192e91b5c80f1a8@192.168.1.8\u0026#34;, \u0026#34;layer\u0026#34;:\u0026#34;GENERAL\u0026#34;, \u0026#34;traceContext\u0026#34;: { \u0026#34;traceId\u0026#34;: \u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470001\u0026#34;, \u0026#34;spanId\u0026#34;: \u0026#34;0\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470000\u0026#34; }, \u0026#34;tags\u0026#34;: { \u0026#34;data\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;INFO\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;logger\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;com.example.MyLogger\u0026#34; } ] }, \u0026#34;body\u0026#34;: { \u0026#34;text\u0026#34;: { \u0026#34;text\u0026#34;: \u0026#34;log message\u0026#34; } } } ] ","excerpt":"Log Data Protocol Report log data via protocol.\nNative Proto Protocol Report native-proto format log …","ref":"/docs/main/v9.1.0/en/protocols/log-data-protocol/","title":"Log Data Protocol"},{"body":"Log Data Protocol Report log data via protocol.\nNative Proto Protocol Report native-proto format log via gRPC.\ngRPC service define\nNative Kafka Protocol Report native-json format log via kafka.\nJson log record example:\n{ \u0026#34;timestamp\u0026#34;:1618161813371, \u0026#34;service\u0026#34;:\u0026#34;Your_ApplicationName\u0026#34;, \u0026#34;serviceInstance\u0026#34;:\u0026#34;3a5b8da5a5ba40c0b192e91b5c80f1a8@192.168.1.8\u0026#34;, \u0026#34;layer\u0026#34;:\u0026#34;GENERAL\u0026#34;, \u0026#34;traceContext\u0026#34;:{ \u0026#34;traceId\u0026#34;:\u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470001\u0026#34;, \u0026#34;spanId\u0026#34;:\u0026#34;0\u0026#34;, \u0026#34;traceSegmentId\u0026#34;:\u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470000\u0026#34; }, \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;INFO\u0026#34; }, { \u0026#34;key\u0026#34;:\u0026#34;logger\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;com.example.MyLogger\u0026#34; } ] }, \u0026#34;body\u0026#34;:{ \u0026#34;text\u0026#34;:{ \u0026#34;text\u0026#34;:\u0026#34;log message\u0026#34; } } } HTTP API Report json format logs via HTTP API, the endpoint is http://\u0026lt;oap-address\u0026gt;:12800/v3/logs.\nJson log record example:\n[ { \u0026#34;timestamp\u0026#34;: 1618161813371, \u0026#34;service\u0026#34;: \u0026#34;Your_ApplicationName\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;3a5b8da5a5ba40c0b192e91b5c80f1a8@192.168.1.8\u0026#34;, \u0026#34;layer\u0026#34;:\u0026#34;GENERAL\u0026#34;, \u0026#34;traceContext\u0026#34;: { \u0026#34;traceId\u0026#34;: \u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470001\u0026#34;, \u0026#34;spanId\u0026#34;: \u0026#34;0\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470000\u0026#34; }, \u0026#34;tags\u0026#34;: { \u0026#34;data\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;INFO\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;logger\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;com.example.MyLogger\u0026#34; } ] }, \u0026#34;body\u0026#34;: { \u0026#34;text\u0026#34;: { \u0026#34;text\u0026#34;: \u0026#34;log message\u0026#34; } } } ] ","excerpt":"Log Data Protocol Report log data via protocol.\nNative Proto Protocol Report native-proto format log …","ref":"/docs/main/v9.2.0/en/protocols/log-data-protocol/","title":"Log Data Protocol"},{"body":"Log Data Protocol Report log data via protocol.\nNative Proto Protocol Report native-proto format log via gRPC.\ngRPC service define\nNative Kafka Protocol Report native-json format log via kafka.\nJson log record example:\n{ \u0026#34;timestamp\u0026#34;:1618161813371, \u0026#34;service\u0026#34;:\u0026#34;Your_ApplicationName\u0026#34;, \u0026#34;serviceInstance\u0026#34;:\u0026#34;3a5b8da5a5ba40c0b192e91b5c80f1a8@192.168.1.8\u0026#34;, \u0026#34;layer\u0026#34;:\u0026#34;GENERAL\u0026#34;, \u0026#34;traceContext\u0026#34;:{ \u0026#34;traceId\u0026#34;:\u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470001\u0026#34;, \u0026#34;spanId\u0026#34;:\u0026#34;0\u0026#34;, \u0026#34;traceSegmentId\u0026#34;:\u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470000\u0026#34; }, \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;INFO\u0026#34; }, { \u0026#34;key\u0026#34;:\u0026#34;logger\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;com.example.MyLogger\u0026#34; } ] }, \u0026#34;body\u0026#34;:{ \u0026#34;text\u0026#34;:{ \u0026#34;text\u0026#34;:\u0026#34;log message\u0026#34; } } } HTTP API Report json format logs via HTTP API, the endpoint is http://\u0026lt;oap-address\u0026gt;:12800/v3/logs.\nJson log record example:\n[ { \u0026#34;timestamp\u0026#34;: 1618161813371, \u0026#34;service\u0026#34;: \u0026#34;Your_ApplicationName\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;3a5b8da5a5ba40c0b192e91b5c80f1a8@192.168.1.8\u0026#34;, \u0026#34;layer\u0026#34;:\u0026#34;GENERAL\u0026#34;, \u0026#34;traceContext\u0026#34;: { \u0026#34;traceId\u0026#34;: \u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470001\u0026#34;, \u0026#34;spanId\u0026#34;: \u0026#34;0\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470000\u0026#34; }, \u0026#34;tags\u0026#34;: { \u0026#34;data\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;INFO\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;logger\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;com.example.MyLogger\u0026#34; } ] }, \u0026#34;body\u0026#34;: { \u0026#34;text\u0026#34;: { \u0026#34;text\u0026#34;: \u0026#34;log message\u0026#34; } } } ] ","excerpt":"Log Data Protocol Report log data via protocol.\nNative Proto Protocol Report native-proto format log …","ref":"/docs/main/v9.3.0/en/protocols/log-data-protocol/","title":"Log Data Protocol"},{"body":"Log Data Protocol Report log data via protocol.\nNative Proto Protocol Report native-proto format log via gRPC.\ngRPC service define\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.logging.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/logging/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Report collected logs into the OAP backend service LogReportService { // Recommend to report log data in a stream mode.  // The service/instance/endpoint of the log could share the previous value if they are not set.  // Reporting the logs of same service in the batch mode could reduce the network cost.  rpc collect (stream LogData) returns (Commands) { }}// Log data is collected through file scratcher of agent. // Natively, Satellite provides various ways to collect logs. message LogData { // [Optional] The timestamp of the log, in millisecond.  // If not set, OAP server would use the received timestamp as log\u0026#39;s timestamp, or relies on the OAP server analyzer.  int64 timestamp = 1; // [Required] **Service**. Represents a set/group of workloads which provide the same behaviours for incoming requests.  //  // The logic name represents the service. This would show as a separate node in the topology.  // The metrics analyzed from the spans, would be aggregated for this entity as the service level.  //  // If this is not the first element of the streaming, use the previous not-null name as the service name.  string service = 2; // [Optional] **Service Instance**. Each individual workload in the Service group is known as an instance. Like `pods` in Kubernetes, it  // doesn\u0026#39;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process.  //  // The logic name represents the service instance. This would show as a separate node in the instance relationship.  // The metrics analyzed from the spans, would be aggregated for this entity as the service instance level.  string serviceInstance = 3; // [Optional] **Endpoint**. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature.  //  // The logic name represents the endpoint, which logs belong.  string endpoint = 4; // [Required] The content of the log.  LogDataBody body = 5; // [Optional] Logs with trace context  TraceContext traceContext = 6; // [Optional] The available tags. OAP server could provide search/analysis capabilities based on these.  LogTags tags = 7; // [Optional] Since 9.0.0  // The layer of the service and servce instance. If absent, the OAP would set `layer`=`ID: 2, NAME: general`  string layer = 8;}// The content of the log data message LogDataBody { // A type to match analyzer(s) at the OAP server.  // The data could be analyzed at the client side, but could be partial  string type = 1; // Content with extendable format.  oneof content { TextLog text = 2; JSONLog json = 3; YAMLLog yaml = 4; }}// Literal text log, typically requires regex or split mechanism to filter meaningful info. message TextLog { string text = 1;}// JSON formatted log. The json field represents the string that could be formatted as a JSON object. message JSONLog { string json = 1;}// YAML formatted log. The yaml field represents the string that could be formatted as a YAML map. message YAMLLog { string yaml = 1;}// Logs with trace context, represent agent system has injects context(IDs) into log text. message TraceContext { // [Optional] A string id represents the whole trace.  string traceId = 1; // [Optional] A unique id represents this segment. Other segments could use this id to reference as a child segment.  string traceSegmentId = 2; // [Optional] The number id of the span. Should be unique in the whole segment.  // Starting at 0.  int32 spanId = 3;}message LogTags { // String key, String value pair.  repeated KeyStringValuePair data = 1;}Native Kafka Protocol Report native-json format log via kafka.\nJson log record example:\n{ \u0026#34;timestamp\u0026#34;:1618161813371, \u0026#34;service\u0026#34;:\u0026#34;Your_ApplicationName\u0026#34;, \u0026#34;serviceInstance\u0026#34;:\u0026#34;3a5b8da5a5ba40c0b192e91b5c80f1a8@192.168.1.8\u0026#34;, \u0026#34;layer\u0026#34;:\u0026#34;GENERAL\u0026#34;, \u0026#34;traceContext\u0026#34;:{ \u0026#34;traceId\u0026#34;:\u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470001\u0026#34;, \u0026#34;spanId\u0026#34;:\u0026#34;0\u0026#34;, \u0026#34;traceSegmentId\u0026#34;:\u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470000\u0026#34; }, \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;INFO\u0026#34; }, { \u0026#34;key\u0026#34;:\u0026#34;logger\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;com.example.MyLogger\u0026#34; } ] }, \u0026#34;body\u0026#34;:{ \u0026#34;text\u0026#34;:{ \u0026#34;text\u0026#34;:\u0026#34;log message\u0026#34; } } } HTTP API Report json format logs via HTTP API, the endpoint is http://\u0026lt;oap-address\u0026gt;:12800/v3/logs.\nJson log record example:\n[ { \u0026#34;timestamp\u0026#34;: 1618161813371, \u0026#34;service\u0026#34;: \u0026#34;Your_ApplicationName\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;3a5b8da5a5ba40c0b192e91b5c80f1a8@192.168.1.8\u0026#34;, \u0026#34;layer\u0026#34;:\u0026#34;GENERAL\u0026#34;, \u0026#34;traceContext\u0026#34;: { \u0026#34;traceId\u0026#34;: \u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470001\u0026#34;, \u0026#34;spanId\u0026#34;: \u0026#34;0\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470000\u0026#34; }, \u0026#34;tags\u0026#34;: { \u0026#34;data\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;INFO\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;logger\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;com.example.MyLogger\u0026#34; } ] }, \u0026#34;body\u0026#34;: { \u0026#34;text\u0026#34;: { \u0026#34;text\u0026#34;: \u0026#34;log message\u0026#34; } } } ] ","excerpt":"Log Data Protocol Report log data via protocol.\nNative Proto Protocol Report native-proto format log …","ref":"/docs/main/v9.4.0/en/api/log-data-protocol/","title":"Log Data Protocol"},{"body":"Log Data Protocol Report log data via protocol.\nNative Proto Protocol Report native-proto format log via gRPC.\ngRPC service define\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.logging.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/logging/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Report collected logs into the OAP backend service LogReportService { // Recommend to report log data in a stream mode.  // The service/instance/endpoint of the log could share the previous value if they are not set.  // Reporting the logs of same service in the batch mode could reduce the network cost.  rpc collect (stream LogData) returns (Commands) { }}// Log data is collected through file scratcher of agent. // Natively, Satellite provides various ways to collect logs. message LogData { // [Optional] The timestamp of the log, in millisecond.  // If not set, OAP server would use the received timestamp as log\u0026#39;s timestamp, or relies on the OAP server analyzer.  int64 timestamp = 1; // [Required] **Service**. Represents a set/group of workloads which provide the same behaviours for incoming requests.  //  // The logic name represents the service. This would show as a separate node in the topology.  // The metrics analyzed from the spans, would be aggregated for this entity as the service level.  //  // If this is not the first element of the streaming, use the previous not-null name as the service name.  string service = 2; // [Optional] **Service Instance**. Each individual workload in the Service group is known as an instance. Like `pods` in Kubernetes, it  // doesn\u0026#39;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process.  //  // The logic name represents the service instance. This would show as a separate node in the instance relationship.  // The metrics analyzed from the spans, would be aggregated for this entity as the service instance level.  string serviceInstance = 3; // [Optional] **Endpoint**. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature.  //  // The logic name represents the endpoint, which logs belong.  string endpoint = 4; // [Required] The content of the log.  LogDataBody body = 5; // [Optional] Logs with trace context  TraceContext traceContext = 6; // [Optional] The available tags. OAP server could provide search/analysis capabilities based on these.  LogTags tags = 7; // [Optional] Since 9.0.0  // The layer of the service and servce instance. If absent, the OAP would set `layer`=`ID: 2, NAME: general`  string layer = 8;}// The content of the log data message LogDataBody { // A type to match analyzer(s) at the OAP server.  // The data could be analyzed at the client side, but could be partial  string type = 1; // Content with extendable format.  oneof content { TextLog text = 2; JSONLog json = 3; YAMLLog yaml = 4; }}// Literal text log, typically requires regex or split mechanism to filter meaningful info. message TextLog { string text = 1;}// JSON formatted log. The json field represents the string that could be formatted as a JSON object. message JSONLog { string json = 1;}// YAML formatted log. The yaml field represents the string that could be formatted as a YAML map. message YAMLLog { string yaml = 1;}// Logs with trace context, represent agent system has injects context(IDs) into log text. message TraceContext { // [Optional] A string id represents the whole trace.  string traceId = 1; // [Optional] A unique id represents this segment. Other segments could use this id to reference as a child segment.  string traceSegmentId = 2; // [Optional] The number id of the span. Should be unique in the whole segment.  // Starting at 0.  int32 spanId = 3;}message LogTags { // String key, String value pair.  repeated KeyStringValuePair data = 1;}Native Kafka Protocol Report native-json format log via kafka.\nJson log record example:\n{ \u0026#34;timestamp\u0026#34;:1618161813371, \u0026#34;service\u0026#34;:\u0026#34;Your_ApplicationName\u0026#34;, \u0026#34;serviceInstance\u0026#34;:\u0026#34;3a5b8da5a5ba40c0b192e91b5c80f1a8@192.168.1.8\u0026#34;, \u0026#34;layer\u0026#34;:\u0026#34;GENERAL\u0026#34;, \u0026#34;traceContext\u0026#34;:{ \u0026#34;traceId\u0026#34;:\u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470001\u0026#34;, \u0026#34;spanId\u0026#34;:\u0026#34;0\u0026#34;, \u0026#34;traceSegmentId\u0026#34;:\u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470000\u0026#34; }, \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;INFO\u0026#34; }, { \u0026#34;key\u0026#34;:\u0026#34;logger\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;com.example.MyLogger\u0026#34; } ] }, \u0026#34;body\u0026#34;:{ \u0026#34;text\u0026#34;:{ \u0026#34;text\u0026#34;:\u0026#34;log message\u0026#34; } } } HTTP API Report json format logs via HTTP API, the endpoint is http://\u0026lt;oap-address\u0026gt;:12800/v3/logs.\nJson log record example:\n[ { \u0026#34;timestamp\u0026#34;: 1618161813371, \u0026#34;service\u0026#34;: \u0026#34;Your_ApplicationName\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;3a5b8da5a5ba40c0b192e91b5c80f1a8@192.168.1.8\u0026#34;, \u0026#34;layer\u0026#34;:\u0026#34;GENERAL\u0026#34;, \u0026#34;traceContext\u0026#34;: { \u0026#34;traceId\u0026#34;: \u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470001\u0026#34;, \u0026#34;spanId\u0026#34;: \u0026#34;0\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470000\u0026#34; }, \u0026#34;tags\u0026#34;: { \u0026#34;data\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;INFO\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;logger\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;com.example.MyLogger\u0026#34; } ] }, \u0026#34;body\u0026#34;: { \u0026#34;text\u0026#34;: { \u0026#34;text\u0026#34;: \u0026#34;log message\u0026#34; } } } ] ","excerpt":"Log Data Protocol Report log data via protocol.\nNative Proto Protocol Report native-proto format log …","ref":"/docs/main/v9.5.0/en/api/log-data-protocol/","title":"Log Data Protocol"},{"body":"Log Data Protocol Report log data via protocol.\nNative Proto Protocol Report native-proto format log via gRPC.\ngRPC service define\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.logging.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/logging/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Report collected logs into the OAP backend service LogReportService { // Recommend to report log data in a stream mode.  // The service/instance/endpoint of the log could share the previous value if they are not set.  // Reporting the logs of same service in the batch mode could reduce the network cost.  rpc collect (stream LogData) returns (Commands) { }}// Log data is collected through file scratcher of agent. // Natively, Satellite provides various ways to collect logs. message LogData { // [Optional] The timestamp of the log, in millisecond.  // If not set, OAP server would use the received timestamp as log\u0026#39;s timestamp, or relies on the OAP server analyzer.  int64 timestamp = 1; // [Required] **Service**. Represents a set/group of workloads which provide the same behaviours for incoming requests.  //  // The logic name represents the service. This would show as a separate node in the topology.  // The metrics analyzed from the spans, would be aggregated for this entity as the service level.  //  // If this is not the first element of the streaming, use the previous not-null name as the service name.  string service = 2; // [Optional] **Service Instance**. Each individual workload in the Service group is known as an instance. Like `pods` in Kubernetes, it  // doesn\u0026#39;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process.  //  // The logic name represents the service instance. This would show as a separate node in the instance relationship.  // The metrics analyzed from the spans, would be aggregated for this entity as the service instance level.  string serviceInstance = 3; // [Optional] **Endpoint**. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature.  //  // The logic name represents the endpoint, which logs belong.  string endpoint = 4; // [Required] The content of the log.  LogDataBody body = 5; // [Optional] Logs with trace context  TraceContext traceContext = 6; // [Optional] The available tags. OAP server could provide search/analysis capabilities based on these.  LogTags tags = 7; // [Optional] Since 9.0.0  // The layer of the service and servce instance. If absent, the OAP would set `layer`=`ID: 2, NAME: general`  string layer = 8;}// The content of the log data message LogDataBody { // A type to match analyzer(s) at the OAP server.  // The data could be analyzed at the client side, but could be partial  string type = 1; // Content with extendable format.  oneof content { TextLog text = 2; JSONLog json = 3; YAMLLog yaml = 4; }}// Literal text log, typically requires regex or split mechanism to filter meaningful info. message TextLog { string text = 1;}// JSON formatted log. The json field represents the string that could be formatted as a JSON object. message JSONLog { string json = 1;}// YAML formatted log. The yaml field represents the string that could be formatted as a YAML map. message YAMLLog { string yaml = 1;}// Logs with trace context, represent agent system has injects context(IDs) into log text. message TraceContext { // [Optional] A string id represents the whole trace.  string traceId = 1; // [Optional] A unique id represents this segment. Other segments could use this id to reference as a child segment.  string traceSegmentId = 2; // [Optional] The number id of the span. Should be unique in the whole segment.  // Starting at 0.  int32 spanId = 3;}message LogTags { // String key, String value pair.  repeated KeyStringValuePair data = 1;}Native Kafka Protocol Report native-json format log via kafka.\nJson log record example:\n{ \u0026#34;timestamp\u0026#34;:1618161813371, \u0026#34;service\u0026#34;:\u0026#34;Your_ApplicationName\u0026#34;, \u0026#34;serviceInstance\u0026#34;:\u0026#34;3a5b8da5a5ba40c0b192e91b5c80f1a8@192.168.1.8\u0026#34;, \u0026#34;layer\u0026#34;:\u0026#34;GENERAL\u0026#34;, \u0026#34;traceContext\u0026#34;:{ \u0026#34;traceId\u0026#34;:\u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470001\u0026#34;, \u0026#34;spanId\u0026#34;:\u0026#34;0\u0026#34;, \u0026#34;traceSegmentId\u0026#34;:\u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470000\u0026#34; }, \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;INFO\u0026#34; }, { \u0026#34;key\u0026#34;:\u0026#34;logger\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;com.example.MyLogger\u0026#34; } ] }, \u0026#34;body\u0026#34;:{ \u0026#34;text\u0026#34;:{ \u0026#34;text\u0026#34;:\u0026#34;log message\u0026#34; } } } HTTP API Report json format logs via HTTP API, the endpoint is http://\u0026lt;oap-address\u0026gt;:12800/v3/logs.\nJson log record example:\n[ { \u0026#34;timestamp\u0026#34;: 1618161813371, \u0026#34;service\u0026#34;: \u0026#34;Your_ApplicationName\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;3a5b8da5a5ba40c0b192e91b5c80f1a8@192.168.1.8\u0026#34;, \u0026#34;layer\u0026#34;:\u0026#34;GENERAL\u0026#34;, \u0026#34;traceContext\u0026#34;: { \u0026#34;traceId\u0026#34;: \u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470001\u0026#34;, \u0026#34;spanId\u0026#34;: \u0026#34;0\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470000\u0026#34; }, \u0026#34;tags\u0026#34;: { \u0026#34;data\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;INFO\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;logger\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;com.example.MyLogger\u0026#34; } ] }, \u0026#34;body\u0026#34;: { \u0026#34;text\u0026#34;: { \u0026#34;text\u0026#34;: \u0026#34;log message\u0026#34; } } } ] ","excerpt":"Log Data Protocol Report log data via protocol.\nNative Proto Protocol Report native-proto format log …","ref":"/docs/main/v9.6.0/en/api/log-data-protocol/","title":"Log Data Protocol"},{"body":"Log Data Protocol Report log data via protocol.\nNative Proto Protocol Report native-proto format log via gRPC.\ngRPC service define\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.logging.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/logging/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Report collected logs into the OAP backend service LogReportService { // Recommend to report log data in a stream mode.  // The service/instance/endpoint of the log could share the previous value if they are not set.  // Reporting the logs of same service in the batch mode could reduce the network cost.  rpc collect (stream LogData) returns (Commands) { }}// Log data is collected through file scratcher of agent. // Natively, Satellite provides various ways to collect logs. message LogData { // [Optional] The timestamp of the log, in millisecond.  // If not set, OAP server would use the received timestamp as log\u0026#39;s timestamp, or relies on the OAP server analyzer.  int64 timestamp = 1; // [Required] **Service**. Represents a set/group of workloads which provide the same behaviours for incoming requests.  //  // The logic name represents the service. This would show as a separate node in the topology.  // The metrics analyzed from the spans, would be aggregated for this entity as the service level.  //  // If this is not the first element of the streaming, use the previous not-null name as the service name.  string service = 2; // [Optional] **Service Instance**. Each individual workload in the Service group is known as an instance. Like `pods` in Kubernetes, it  // doesn\u0026#39;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process.  //  // The logic name represents the service instance. This would show as a separate node in the instance relationship.  // The metrics analyzed from the spans, would be aggregated for this entity as the service instance level.  string serviceInstance = 3; // [Optional] **Endpoint**. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature.  //  // The logic name represents the endpoint, which logs belong.  string endpoint = 4; // [Required] The content of the log.  LogDataBody body = 5; // [Optional] Logs with trace context  TraceContext traceContext = 6; // [Optional] The available tags. OAP server could provide search/analysis capabilities based on these.  LogTags tags = 7; // [Optional] Since 9.0.0  // The layer of the service and servce instance. If absent, the OAP would set `layer`=`ID: 2, NAME: general`  string layer = 8;}// The content of the log data message LogDataBody { // A type to match analyzer(s) at the OAP server.  // The data could be analyzed at the client side, but could be partial  string type = 1; // Content with extendable format.  oneof content { TextLog text = 2; JSONLog json = 3; YAMLLog yaml = 4; }}// Literal text log, typically requires regex or split mechanism to filter meaningful info. message TextLog { string text = 1;}// JSON formatted log. The json field represents the string that could be formatted as a JSON object. message JSONLog { string json = 1;}// YAML formatted log. The yaml field represents the string that could be formatted as a YAML map. message YAMLLog { string yaml = 1;}// Logs with trace context, represent agent system has injects context(IDs) into log text. message TraceContext { // [Optional] A string id represents the whole trace.  string traceId = 1; // [Optional] A unique id represents this segment. Other segments could use this id to reference as a child segment.  string traceSegmentId = 2; // [Optional] The number id of the span. Should be unique in the whole segment.  // Starting at 0.  int32 spanId = 3;}message LogTags { // String key, String value pair.  repeated KeyStringValuePair data = 1;}Native Kafka Protocol Report native-json format log via kafka.\nJson log record example:\n{ \u0026#34;timestamp\u0026#34;:1618161813371, \u0026#34;service\u0026#34;:\u0026#34;Your_ApplicationName\u0026#34;, \u0026#34;serviceInstance\u0026#34;:\u0026#34;3a5b8da5a5ba40c0b192e91b5c80f1a8@192.168.1.8\u0026#34;, \u0026#34;layer\u0026#34;:\u0026#34;GENERAL\u0026#34;, \u0026#34;traceContext\u0026#34;:{ \u0026#34;traceId\u0026#34;:\u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470001\u0026#34;, \u0026#34;spanId\u0026#34;:\u0026#34;0\u0026#34;, \u0026#34;traceSegmentId\u0026#34;:\u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470000\u0026#34; }, \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;INFO\u0026#34; }, { \u0026#34;key\u0026#34;:\u0026#34;logger\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;com.example.MyLogger\u0026#34; } ] }, \u0026#34;body\u0026#34;:{ \u0026#34;text\u0026#34;:{ \u0026#34;text\u0026#34;:\u0026#34;log message\u0026#34; } } } HTTP API Report json format logs via HTTP API, the endpoint is http://\u0026lt;oap-address\u0026gt;:12800/v3/logs.\nJson log record example:\n[ { \u0026#34;timestamp\u0026#34;: 1618161813371, \u0026#34;service\u0026#34;: \u0026#34;Your_ApplicationName\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;3a5b8da5a5ba40c0b192e91b5c80f1a8@192.168.1.8\u0026#34;, \u0026#34;layer\u0026#34;:\u0026#34;GENERAL\u0026#34;, \u0026#34;traceContext\u0026#34;: { \u0026#34;traceId\u0026#34;: \u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470001\u0026#34;, \u0026#34;spanId\u0026#34;: \u0026#34;0\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470000\u0026#34; }, \u0026#34;tags\u0026#34;: { \u0026#34;data\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;INFO\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;logger\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;com.example.MyLogger\u0026#34; } ] }, \u0026#34;body\u0026#34;: { \u0026#34;text\u0026#34;: { \u0026#34;text\u0026#34;: \u0026#34;log message\u0026#34; } } } ] ","excerpt":"Log Data Protocol Report log data via protocol.\nNative Proto Protocol Report native-proto format log …","ref":"/docs/main/v9.7.0/en/api/log-data-protocol/","title":"Log Data Protocol"},{"body":"logback plugin  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-logback-1.x\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Print trace ID in your logs  set %tid in Pattern section of logback.xml  \u0026lt;appender name=\u0026#34;STDOUT\u0026#34; class=\u0026#34;ch.qos.logback.core.ConsoleAppender\u0026#34;\u0026gt; \u0026lt;encoder class=\u0026#34;ch.qos.logback.core.encoder.LayoutWrappingEncoder\u0026#34;\u0026gt; \u0026lt;layout class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.TraceIdPatternLogbackLayout\u0026#34;\u0026gt; \u0026lt;Pattern\u0026gt;%d{yyyy-MM-dd HH:mm:ss.SSS} [%tid] [%thread] %-5level %logger{36} -%msg%n\u0026lt;/Pattern\u0026gt; \u0026lt;/layout\u0026gt; \u0026lt;/encoder\u0026gt; \u0026lt;/appender\u0026gt;  with the MDC, set %X{tid} in Pattern section of logback.xml  \u0026lt;appender name=\u0026#34;STDOUT\u0026#34; class=\u0026#34;ch.qos.logback.core.ConsoleAppender\u0026#34;\u0026gt; \u0026lt;encoder class=\u0026#34;ch.qos.logback.core.encoder.LayoutWrappingEncoder\u0026#34;\u0026gt; \u0026lt;layout class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.mdc.TraceIdMDCPatternLogbackLayout\u0026#34;\u0026gt; \u0026lt;Pattern\u0026gt;%d{yyyy-MM-dd HH:mm:ss.SSS} [%X{tid}] [%thread] %-5level %logger{36} -%msg%n\u0026lt;/Pattern\u0026gt; \u0026lt;/layout\u0026gt; \u0026lt;/encoder\u0026gt; \u0026lt;/appender\u0026gt;  Support logback AsyncAppender(MDC also support), No additional configuration is required. Refer to the demo of logback.xml below. For details: Logback AsyncAppender  \u0026lt;configuration scan=\u0026#34;true\u0026#34; scanPeriod=\u0026#34; 5 seconds\u0026#34;\u0026gt; \u0026lt;appender name=\u0026#34;STDOUT\u0026#34; class=\u0026#34;ch.qos.logback.core.ConsoleAppender\u0026#34;\u0026gt; \u0026lt;encoder class=\u0026#34;ch.qos.logback.core.encoder.LayoutWrappingEncoder\u0026#34;\u0026gt; \u0026lt;layout class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.mdc.TraceIdMDCPatternLogbackLayout\u0026#34;\u0026gt; \u0026lt;Pattern\u0026gt;%d{yyyy-MM-dd HH:mm:ss.SSS} [%X{tid}] [%thread] %-5level %logger{36} -%msg%n\u0026lt;/Pattern\u0026gt; \u0026lt;/layout\u0026gt; \u0026lt;/encoder\u0026gt; \u0026lt;/appender\u0026gt; \u0026lt;appender name=\u0026#34;ASYNC\u0026#34; class=\u0026#34;ch.qos.logback.classic.AsyncAppender\u0026#34;\u0026gt; \u0026lt;discardingThreshold\u0026gt;0\u0026lt;/discardingThreshold\u0026gt; \u0026lt;queueSize\u0026gt;1024\u0026lt;/queueSize\u0026gt; \u0026lt;neverBlock\u0026gt;true\u0026lt;/neverBlock\u0026gt; \u0026lt;appender-ref ref=\u0026#34;STDOUT\u0026#34;/\u0026gt; \u0026lt;/appender\u0026gt; \u0026lt;root level=\u0026#34;INFO\u0026#34;\u0026gt; \u0026lt;appender-ref ref=\u0026#34;ASYNC\u0026#34;/\u0026gt; \u0026lt;/root\u0026gt; \u0026lt;/configuration\u0026gt;  When you use -javaagent to active the SkyWalking tracer, logback will output traceId, if it existed. If the tracer is inactive, the output will be TID: N/A.  Print SkyWalking context in your logs   Your only need to replace pattern %tid or %X{tid]} with %sw_ctx or %X{sw_ctx}.\n  When you use -javaagent to active the SkyWalking tracer, logback will output SW_CTX: [$serviceName,$instanceName,$traceId,$traceSegmentId,$spanId], if it existed. If the tracer is inactive, the output will be SW_CTX: N/A.\n  logstash logback plugin  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-logback-1.x\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  set LogstashEncoder of logback.xml  \u0026lt;encoder charset=\u0026#34;UTF-8\u0026#34; class=\u0026#34;net.logstash.logback.encoder.LogstashEncoder\u0026#34;\u0026gt; \u0026lt;!-- add TID(traceId) field --\u0026gt; \u0026lt;provider class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.logstash.TraceIdJsonProvider\u0026#34;\u0026gt; \u0026lt;/provider\u0026gt; \u0026lt;!-- add SW_CTX(SkyWalking context) field --\u0026gt; \u0026lt;provider class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.logstash.SkyWalkingContextJsonProvider\u0026#34;\u0026gt; \u0026lt;/provider\u0026gt; \u0026lt;/encoder\u0026gt;  set LoggingEventCompositeJsonEncoder of logstash in logback-spring.xml for custom json format  1.add converter for %tid or %sw_ctx as child of  node\n\u0026lt;!-- add converter for %tid --\u0026gt; \u0026lt;conversionRule conversionWord=\u0026#34;tid\u0026#34; converterClass=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.LogbackPatternConverter\u0026#34;/\u0026gt; \u0026lt;!-- add converter for %sw_ctx --\u0026gt; \u0026lt;conversionRule conversionWord=\u0026#34;sw_ctx\u0026#34; converterClass=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.LogbackSkyWalkingContextPatternConverter\u0026#34;/\u0026gt; 2.add json encoder for custom json format\n\u0026lt;encoder class=\u0026#34;net.logstash.logback.encoder.LoggingEventCompositeJsonEncoder\u0026#34;\u0026gt; \u0026lt;providers\u0026gt; \u0026lt;timestamp\u0026gt; \u0026lt;timeZone\u0026gt;UTC\u0026lt;/timeZone\u0026gt; \u0026lt;/timestamp\u0026gt; \u0026lt;pattern\u0026gt; \u0026lt;pattern\u0026gt; { \u0026#34;level\u0026#34;: \u0026#34;%level\u0026#34;, \u0026#34;tid\u0026#34;: \u0026#34;%tid\u0026#34;, \u0026#34;skyWalkingContext\u0026#34;: \u0026#34;%sw_ctx\u0026#34;, \u0026#34;thread\u0026#34;: \u0026#34;%thread\u0026#34;, \u0026#34;class\u0026#34;: \u0026#34;%logger{1.}:%L\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;%message\u0026#34;, \u0026#34;stackTrace\u0026#34;: \u0026#34;%exception{10}\u0026#34; } \u0026lt;/pattern\u0026gt; \u0026lt;/pattern\u0026gt; \u0026lt;/providers\u0026gt; \u0026lt;/encoder\u0026gt; gRPC reporter The gRPC reporter could forward the collected logs to SkyWalking OAP server, or SkyWalking Satellite sidecar. Trace id, segment id, and span id will attach to logs automatically. There is no need to modify existing layouts.\n Add GRPCLogClientAppender in logback.xml  \u0026lt;appender name=\u0026#34;grpc-log\u0026#34; class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.log.GRPCLogClientAppender\u0026#34;\u0026gt; \u0026lt;encoder class=\u0026#34;ch.qos.logback.core.encoder.LayoutWrappingEncoder\u0026#34;\u0026gt; \u0026lt;layout class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.mdc.TraceIdMDCPatternLogbackLayout\u0026#34;\u0026gt; \u0026lt;Pattern\u0026gt;%d{yyyy-MM-dd HH:mm:ss.SSS} [%X{tid}] [%thread] %-5level %logger{36} -%msg%n\u0026lt;/Pattern\u0026gt; \u0026lt;/layout\u0026gt; \u0026lt;/encoder\u0026gt; \u0026lt;/appender\u0026gt;  Add config of the plugin or use default  log.max_message_size=${SW_GRPC_LOG_MAX_MESSAGE_SIZE:10485760} Transmitting un-formatted messages The logback 1.x gRPC reporter supports transmitting logs as formatted or un-formatted. Transmitting formatted data is the default but can be disabled by adding the following to the agent config:\nplugin.toolkit.log.transmit_formatted=false The above will result in the content field being used for the log pattern with additional log tags of argument.0, argument.1, and so on representing each logged argument as well as an additional exception tag which is only present if a throwable is also logged.\nFor example, the following code:\nlog.info(\u0026#34;{} {} {}\u0026#34;, 1, 2, 3); Will result in:\n{ \u0026#34;content\u0026#34;: \u0026#34;{} {} {}\u0026#34;, \u0026#34;tags\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;argument.0\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.1\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.2\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;3\u0026#34; } ] } ","excerpt":"logback plugin  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; …","ref":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/application-toolkit-logback-1.x/","title":"logback plugin"},{"body":"logback plugin  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-logback-1.x\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Print trace ID in your logs  set %tid in Pattern section of logback.xml  \u0026lt;appender name=\u0026#34;STDOUT\u0026#34; class=\u0026#34;ch.qos.logback.core.ConsoleAppender\u0026#34;\u0026gt; \u0026lt;encoder class=\u0026#34;ch.qos.logback.core.encoder.LayoutWrappingEncoder\u0026#34;\u0026gt; \u0026lt;layout class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.TraceIdPatternLogbackLayout\u0026#34;\u0026gt; \u0026lt;Pattern\u0026gt;%d{yyyy-MM-dd HH:mm:ss.SSS} [%tid] [%thread] %-5level %logger{36} -%msg%n\u0026lt;/Pattern\u0026gt; \u0026lt;/layout\u0026gt; \u0026lt;/encoder\u0026gt; \u0026lt;/appender\u0026gt;  with the MDC, set %X{tid} in Pattern section of logback.xml  \u0026lt;appender name=\u0026#34;STDOUT\u0026#34; class=\u0026#34;ch.qos.logback.core.ConsoleAppender\u0026#34;\u0026gt; \u0026lt;encoder class=\u0026#34;ch.qos.logback.core.encoder.LayoutWrappingEncoder\u0026#34;\u0026gt; \u0026lt;layout class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.mdc.TraceIdMDCPatternLogbackLayout\u0026#34;\u0026gt; \u0026lt;Pattern\u0026gt;%d{yyyy-MM-dd HH:mm:ss.SSS} [%X{tid}] [%thread] %-5level %logger{36} -%msg%n\u0026lt;/Pattern\u0026gt; \u0026lt;/layout\u0026gt; \u0026lt;/encoder\u0026gt; \u0026lt;/appender\u0026gt;  Support logback AsyncAppender(MDC also support), No additional configuration is required. Refer to the demo of logback.xml below. For details: Logback AsyncAppender  \u0026lt;configuration scan=\u0026#34;true\u0026#34; scanPeriod=\u0026#34; 5 seconds\u0026#34;\u0026gt; \u0026lt;appender name=\u0026#34;STDOUT\u0026#34; class=\u0026#34;ch.qos.logback.core.ConsoleAppender\u0026#34;\u0026gt; \u0026lt;encoder class=\u0026#34;ch.qos.logback.core.encoder.LayoutWrappingEncoder\u0026#34;\u0026gt; \u0026lt;layout class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.mdc.TraceIdMDCPatternLogbackLayout\u0026#34;\u0026gt; \u0026lt;Pattern\u0026gt;%d{yyyy-MM-dd HH:mm:ss.SSS} [%X{tid}] [%thread] %-5level %logger{36} -%msg%n\u0026lt;/Pattern\u0026gt; \u0026lt;/layout\u0026gt; \u0026lt;/encoder\u0026gt; \u0026lt;/appender\u0026gt; \u0026lt;appender name=\u0026#34;ASYNC\u0026#34; class=\u0026#34;ch.qos.logback.classic.AsyncAppender\u0026#34;\u0026gt; \u0026lt;discardingThreshold\u0026gt;0\u0026lt;/discardingThreshold\u0026gt; \u0026lt;queueSize\u0026gt;1024\u0026lt;/queueSize\u0026gt; \u0026lt;neverBlock\u0026gt;true\u0026lt;/neverBlock\u0026gt; \u0026lt;appender-ref ref=\u0026#34;STDOUT\u0026#34;/\u0026gt; \u0026lt;/appender\u0026gt; \u0026lt;root level=\u0026#34;INFO\u0026#34;\u0026gt; \u0026lt;appender-ref ref=\u0026#34;ASYNC\u0026#34;/\u0026gt; \u0026lt;/root\u0026gt; \u0026lt;/configuration\u0026gt;  When you use -javaagent to active the SkyWalking tracer, logback will output traceId, if it existed. If the tracer is inactive, the output will be TID: N/A.  Print SkyWalking context in your logs   Your only need to replace pattern %tid or %X{tid]} with %sw_ctx or %X{sw_ctx}.\n  When you use -javaagent to active the SkyWalking tracer, logback will output SW_CTX: [$serviceName,$instanceName,$traceId,$traceSegmentId,$spanId], if it existed. If the tracer is inactive, the output will be SW_CTX: N/A.\n  logstash logback plugin  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-logback-1.x\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  set LogstashEncoder of logback.xml  \u0026lt;encoder charset=\u0026#34;UTF-8\u0026#34; class=\u0026#34;net.logstash.logback.encoder.LogstashEncoder\u0026#34;\u0026gt; \u0026lt;!-- add TID(traceId) field --\u0026gt; \u0026lt;provider class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.logstash.TraceIdJsonProvider\u0026#34;\u0026gt; \u0026lt;/provider\u0026gt; \u0026lt;!-- add SW_CTX(SkyWalking context) field --\u0026gt; \u0026lt;provider class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.logstash.SkyWalkingContextJsonProvider\u0026#34;\u0026gt; \u0026lt;/provider\u0026gt; \u0026lt;/encoder\u0026gt;  set LoggingEventCompositeJsonEncoder of logstash in logback-spring.xml for custom json format  1.add converter for %tid or %sw_ctx as child of  node\n\u0026lt;!-- add converter for %tid --\u0026gt; \u0026lt;conversionRule conversionWord=\u0026#34;tid\u0026#34; converterClass=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.LogbackPatternConverter\u0026#34;/\u0026gt; \u0026lt;!-- add converter for %sw_ctx --\u0026gt; \u0026lt;conversionRule conversionWord=\u0026#34;sw_ctx\u0026#34; converterClass=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.LogbackSkyWalkingContextPatternConverter\u0026#34;/\u0026gt; 2.add json encoder for custom json format\n\u0026lt;encoder class=\u0026#34;net.logstash.logback.encoder.LoggingEventCompositeJsonEncoder\u0026#34;\u0026gt; \u0026lt;providers\u0026gt; \u0026lt;timestamp\u0026gt; \u0026lt;timeZone\u0026gt;UTC\u0026lt;/timeZone\u0026gt; \u0026lt;/timestamp\u0026gt; \u0026lt;pattern\u0026gt; \u0026lt;pattern\u0026gt; { \u0026#34;level\u0026#34;: \u0026#34;%level\u0026#34;, \u0026#34;tid\u0026#34;: \u0026#34;%tid\u0026#34;, \u0026#34;skyWalkingContext\u0026#34;: \u0026#34;%sw_ctx\u0026#34;, \u0026#34;thread\u0026#34;: \u0026#34;%thread\u0026#34;, \u0026#34;class\u0026#34;: \u0026#34;%logger{1.}:%L\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;%message\u0026#34;, \u0026#34;stackTrace\u0026#34;: \u0026#34;%exception{10}\u0026#34; } \u0026lt;/pattern\u0026gt; \u0026lt;/pattern\u0026gt; \u0026lt;/providers\u0026gt; \u0026lt;/encoder\u0026gt; gRPC reporter The gRPC reporter could forward the collected logs to SkyWalking OAP server, or SkyWalking Satellite sidecar. Trace id, segment id, and span id will attach to logs automatically. There is no need to modify existing layouts.\n Add GRPCLogClientAppender in logback.xml  \u0026lt;appender name=\u0026#34;grpc-log\u0026#34; class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.log.GRPCLogClientAppender\u0026#34;\u0026gt; \u0026lt;encoder class=\u0026#34;ch.qos.logback.core.encoder.LayoutWrappingEncoder\u0026#34;\u0026gt; \u0026lt;layout class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.mdc.TraceIdMDCPatternLogbackLayout\u0026#34;\u0026gt; \u0026lt;Pattern\u0026gt;%d{yyyy-MM-dd HH:mm:ss.SSS} [%X{tid}] [%thread] %-5level %logger{36} -%msg%n\u0026lt;/Pattern\u0026gt; \u0026lt;/layout\u0026gt; \u0026lt;/encoder\u0026gt; \u0026lt;/appender\u0026gt;  Add config of the plugin or use default  log.max_message_size=${SW_GRPC_LOG_MAX_MESSAGE_SIZE:10485760} Transmitting un-formatted messages The logback 1.x gRPC reporter supports transmitting logs as formatted or un-formatted. Transmitting formatted data is the default but can be disabled by adding the following to the agent config:\nplugin.toolkit.log.transmit_formatted=false The above will result in the content field being used for the log pattern with additional log tags of argument.0, argument.1, and so on representing each logged argument as well as an additional exception tag which is only present if a throwable is also logged.\nFor example, the following code:\nlog.info(\u0026#34;{} {} {}\u0026#34;, 1, 2, 3); Will result in:\n{ \u0026#34;content\u0026#34;: \u0026#34;{} {} {}\u0026#34;, \u0026#34;tags\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;argument.0\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.1\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.2\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;3\u0026#34; } ] } ","excerpt":"logback plugin  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; …","ref":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-logback-1.x/","title":"logback plugin"},{"body":"logback plugin  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-logback-1.x\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Print trace ID in your logs  set %tid in Pattern section of logback.xml  \u0026lt;appender name=\u0026#34;STDOUT\u0026#34; class=\u0026#34;ch.qos.logback.core.ConsoleAppender\u0026#34;\u0026gt; \u0026lt;encoder class=\u0026#34;ch.qos.logback.core.encoder.LayoutWrappingEncoder\u0026#34;\u0026gt; \u0026lt;layout class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.TraceIdPatternLogbackLayout\u0026#34;\u0026gt; \u0026lt;Pattern\u0026gt;%d{yyyy-MM-dd HH:mm:ss.SSS} [%tid] [%thread] %-5level %logger{36} -%msg%n\u0026lt;/Pattern\u0026gt; \u0026lt;/layout\u0026gt; \u0026lt;/encoder\u0026gt; \u0026lt;/appender\u0026gt;  with the MDC, set %X{tid} in Pattern section of logback.xml  \u0026lt;appender name=\u0026#34;STDOUT\u0026#34; class=\u0026#34;ch.qos.logback.core.ConsoleAppender\u0026#34;\u0026gt; \u0026lt;encoder class=\u0026#34;ch.qos.logback.core.encoder.LayoutWrappingEncoder\u0026#34;\u0026gt; \u0026lt;layout class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.mdc.TraceIdMDCPatternLogbackLayout\u0026#34;\u0026gt; \u0026lt;Pattern\u0026gt;%d{yyyy-MM-dd HH:mm:ss.SSS} [%X{tid}] [%thread] %-5level %logger{36} -%msg%n\u0026lt;/Pattern\u0026gt; \u0026lt;/layout\u0026gt; \u0026lt;/encoder\u0026gt; \u0026lt;/appender\u0026gt;  Support logback AsyncAppender(MDC also support), No additional configuration is required. Refer to the demo of logback.xml below. For details: Logback AsyncAppender  \u0026lt;configuration scan=\u0026#34;true\u0026#34; scanPeriod=\u0026#34; 5 seconds\u0026#34;\u0026gt; \u0026lt;appender name=\u0026#34;STDOUT\u0026#34; class=\u0026#34;ch.qos.logback.core.ConsoleAppender\u0026#34;\u0026gt; \u0026lt;encoder class=\u0026#34;ch.qos.logback.core.encoder.LayoutWrappingEncoder\u0026#34;\u0026gt; \u0026lt;layout class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.mdc.TraceIdMDCPatternLogbackLayout\u0026#34;\u0026gt; \u0026lt;Pattern\u0026gt;%d{yyyy-MM-dd HH:mm:ss.SSS} [%X{tid}] [%thread] %-5level %logger{36} -%msg%n\u0026lt;/Pattern\u0026gt; \u0026lt;/layout\u0026gt; \u0026lt;/encoder\u0026gt; \u0026lt;/appender\u0026gt; \u0026lt;appender name=\u0026#34;ASYNC\u0026#34; class=\u0026#34;ch.qos.logback.classic.AsyncAppender\u0026#34;\u0026gt; \u0026lt;discardingThreshold\u0026gt;0\u0026lt;/discardingThreshold\u0026gt; \u0026lt;queueSize\u0026gt;1024\u0026lt;/queueSize\u0026gt; \u0026lt;neverBlock\u0026gt;true\u0026lt;/neverBlock\u0026gt; \u0026lt;appender-ref ref=\u0026#34;STDOUT\u0026#34;/\u0026gt; \u0026lt;/appender\u0026gt; \u0026lt;root level=\u0026#34;INFO\u0026#34;\u0026gt; \u0026lt;appender-ref ref=\u0026#34;ASYNC\u0026#34;/\u0026gt; \u0026lt;/root\u0026gt; \u0026lt;/configuration\u0026gt;  When you use -javaagent to active the SkyWalking tracer, logback will output traceId, if it existed. If the tracer is inactive, the output will be TID: N/A.  Print SkyWalking context in your logs   Your only need to replace pattern %tid or %X{tid]} with %sw_ctx or %X{sw_ctx}.\n  When you use -javaagent to active the SkyWalking tracer, logback will output SW_CTX: [$serviceName,$instanceName,$traceId,$traceSegmentId,$spanId], if it existed. If the tracer is inactive, the output will be SW_CTX: N/A.\n  logstash logback plugin  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-logback-1.x\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  set LogstashEncoder of logback.xml  \u0026lt;encoder charset=\u0026#34;UTF-8\u0026#34; class=\u0026#34;net.logstash.logback.encoder.LogstashEncoder\u0026#34;\u0026gt; \u0026lt;!-- add TID(traceId) field --\u0026gt; \u0026lt;provider class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.logstash.TraceIdJsonProvider\u0026#34;\u0026gt; \u0026lt;/provider\u0026gt; \u0026lt;!-- add SW_CTX(SkyWalking context) field --\u0026gt; \u0026lt;provider class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.logstash.SkyWalkingContextJsonProvider\u0026#34;\u0026gt; \u0026lt;/provider\u0026gt; \u0026lt;/encoder\u0026gt;  set LoggingEventCompositeJsonEncoder of logstash in logback-spring.xml for custom json format  1.add converter for %tid or %sw_ctx as child of  node\n\u0026lt;!-- add converter for %tid --\u0026gt; \u0026lt;conversionRule conversionWord=\u0026#34;tid\u0026#34; converterClass=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.LogbackPatternConverter\u0026#34;/\u0026gt; \u0026lt;!-- add converter for %sw_ctx --\u0026gt; \u0026lt;conversionRule conversionWord=\u0026#34;sw_ctx\u0026#34; converterClass=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.LogbackSkyWalkingContextPatternConverter\u0026#34;/\u0026gt; 2.add json encoder for custom json format\n\u0026lt;encoder class=\u0026#34;net.logstash.logback.encoder.LoggingEventCompositeJsonEncoder\u0026#34;\u0026gt; \u0026lt;providers\u0026gt; \u0026lt;timestamp\u0026gt; \u0026lt;timeZone\u0026gt;UTC\u0026lt;/timeZone\u0026gt; \u0026lt;/timestamp\u0026gt; \u0026lt;pattern\u0026gt; \u0026lt;pattern\u0026gt; { \u0026#34;level\u0026#34;: \u0026#34;%level\u0026#34;, \u0026#34;tid\u0026#34;: \u0026#34;%tid\u0026#34;, \u0026#34;skyWalkingContext\u0026#34;: \u0026#34;%sw_ctx\u0026#34;, \u0026#34;thread\u0026#34;: \u0026#34;%thread\u0026#34;, \u0026#34;class\u0026#34;: \u0026#34;%logger{1.}:%L\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;%message\u0026#34;, \u0026#34;stackTrace\u0026#34;: \u0026#34;%exception{10}\u0026#34; } \u0026lt;/pattern\u0026gt; \u0026lt;/pattern\u0026gt; \u0026lt;/providers\u0026gt; \u0026lt;/encoder\u0026gt; gRPC reporter The gRPC reporter could forward the collected logs to SkyWalking OAP server, or SkyWalking Satellite sidecar. Trace id, segment id, and span id will attach to logs automatically. There is no need to modify existing layouts.\n Add GRPCLogClientAppender in logback.xml  \u0026lt;appender name=\u0026#34;grpc-log\u0026#34; class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.log.GRPCLogClientAppender\u0026#34;\u0026gt; \u0026lt;encoder class=\u0026#34;ch.qos.logback.core.encoder.LayoutWrappingEncoder\u0026#34;\u0026gt; \u0026lt;layout class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.mdc.TraceIdMDCPatternLogbackLayout\u0026#34;\u0026gt; \u0026lt;Pattern\u0026gt;%d{yyyy-MM-dd HH:mm:ss.SSS} [%X{tid}] [%thread] %-5level %logger{36} -%msg%n\u0026lt;/Pattern\u0026gt; \u0026lt;/layout\u0026gt; \u0026lt;/encoder\u0026gt; \u0026lt;/appender\u0026gt;  Add config of the plugin or use default  log.max_message_size=${SW_GRPC_LOG_MAX_MESSAGE_SIZE:10485760} Transmitting un-formatted messages The logback 1.x gRPC reporter supports transmitting logs as formatted or un-formatted. Transmitting formatted data is the default but can be disabled by adding the following to the agent config:\nplugin.toolkit.log.transmit_formatted=false The above will result in the content field being used for the log pattern with additional log tags of argument.0, argument.1, and so on representing each logged argument as well as an additional exception tag which is only present if a throwable is also logged.\nFor example, the following code:\nlog.info(\u0026#34;{} {} {}\u0026#34;, 1, 2, 3); Will result in:\n{ \u0026#34;content\u0026#34;: \u0026#34;{} {} {}\u0026#34;, \u0026#34;tags\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;argument.0\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.1\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.2\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;3\u0026#34; } ] } ","excerpt":"logback plugin  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; …","ref":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/application-toolkit-logback-1.x/","title":"logback plugin"},{"body":"logback plugin  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-logback-1.x\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Print trace ID in your logs  set %tid in Pattern section of logback.xml  \u0026lt;appender name=\u0026#34;STDOUT\u0026#34; class=\u0026#34;ch.qos.logback.core.ConsoleAppender\u0026#34;\u0026gt; \u0026lt;encoder class=\u0026#34;ch.qos.logback.core.encoder.LayoutWrappingEncoder\u0026#34;\u0026gt; \u0026lt;layout class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.TraceIdPatternLogbackLayout\u0026#34;\u0026gt; \u0026lt;Pattern\u0026gt;%d{yyyy-MM-dd HH:mm:ss.SSS} [%tid] [%thread] %-5level %logger{36} -%msg%n\u0026lt;/Pattern\u0026gt; \u0026lt;/layout\u0026gt; \u0026lt;/encoder\u0026gt; \u0026lt;/appender\u0026gt;  with the MDC, set %X{tid} in Pattern section of logback.xml  \u0026lt;appender name=\u0026#34;STDOUT\u0026#34; class=\u0026#34;ch.qos.logback.core.ConsoleAppender\u0026#34;\u0026gt; \u0026lt;encoder class=\u0026#34;ch.qos.logback.core.encoder.LayoutWrappingEncoder\u0026#34;\u0026gt; \u0026lt;layout class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.mdc.TraceIdMDCPatternLogbackLayout\u0026#34;\u0026gt; \u0026lt;Pattern\u0026gt;%d{yyyy-MM-dd HH:mm:ss.SSS} [%X{tid}] [%thread] %-5level %logger{36} -%msg%n\u0026lt;/Pattern\u0026gt; \u0026lt;/layout\u0026gt; \u0026lt;/encoder\u0026gt; \u0026lt;/appender\u0026gt;  Support logback AsyncAppender(MDC also support), No additional configuration is required. Refer to the demo of logback.xml below. For details: Logback AsyncAppender  \u0026lt;configuration scan=\u0026#34;true\u0026#34; scanPeriod=\u0026#34; 5 seconds\u0026#34;\u0026gt; \u0026lt;appender name=\u0026#34;STDOUT\u0026#34; class=\u0026#34;ch.qos.logback.core.ConsoleAppender\u0026#34;\u0026gt; \u0026lt;encoder class=\u0026#34;ch.qos.logback.core.encoder.LayoutWrappingEncoder\u0026#34;\u0026gt; \u0026lt;layout class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.mdc.TraceIdMDCPatternLogbackLayout\u0026#34;\u0026gt; \u0026lt;Pattern\u0026gt;%d{yyyy-MM-dd HH:mm:ss.SSS} [%X{tid}] [%thread] %-5level %logger{36} -%msg%n\u0026lt;/Pattern\u0026gt; \u0026lt;/layout\u0026gt; \u0026lt;/encoder\u0026gt; \u0026lt;/appender\u0026gt; \u0026lt;appender name=\u0026#34;ASYNC\u0026#34; class=\u0026#34;ch.qos.logback.classic.AsyncAppender\u0026#34;\u0026gt; \u0026lt;discardingThreshold\u0026gt;0\u0026lt;/discardingThreshold\u0026gt; \u0026lt;queueSize\u0026gt;1024\u0026lt;/queueSize\u0026gt; \u0026lt;neverBlock\u0026gt;true\u0026lt;/neverBlock\u0026gt; \u0026lt;appender-ref ref=\u0026#34;STDOUT\u0026#34;/\u0026gt; \u0026lt;/appender\u0026gt; \u0026lt;root level=\u0026#34;INFO\u0026#34;\u0026gt; \u0026lt;appender-ref ref=\u0026#34;ASYNC\u0026#34;/\u0026gt; \u0026lt;/root\u0026gt; \u0026lt;/configuration\u0026gt;  When you use -javaagent to active the SkyWalking tracer, logback will output traceId, if it existed. If the tracer is inactive, the output will be TID: N/A.  Print SkyWalking context in your logs   Your only need to replace pattern %tid or %X{tid]} with %sw_ctx or %X{sw_ctx}.\n  When you use -javaagent to active the SkyWalking tracer, logback will output SW_CTX: [$serviceName,$instanceName,$traceId,$traceSegmentId,$spanId], if it existed. If the tracer is inactive, the output will be SW_CTX: N/A.\n  logstash logback plugin  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-logback-1.x\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  set LogstashEncoder of logback.xml  \u0026lt;encoder charset=\u0026#34;UTF-8\u0026#34; class=\u0026#34;net.logstash.logback.encoder.LogstashEncoder\u0026#34;\u0026gt; \u0026lt;!-- add TID(traceId) field --\u0026gt; \u0026lt;provider class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.logstash.TraceIdJsonProvider\u0026#34;\u0026gt; \u0026lt;/provider\u0026gt; \u0026lt;!-- add SW_CTX(SkyWalking context) field --\u0026gt; \u0026lt;provider class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.logstash.SkyWalkingContextJsonProvider\u0026#34;\u0026gt; \u0026lt;/provider\u0026gt; \u0026lt;/encoder\u0026gt;  set LoggingEventCompositeJsonEncoder of logstash in logback-spring.xml for custom json format  1.add converter for %tid or %sw_ctx as child of  node\n\u0026lt;!-- add converter for %tid --\u0026gt; \u0026lt;conversionRule conversionWord=\u0026#34;tid\u0026#34; converterClass=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.LogbackPatternConverter\u0026#34;/\u0026gt; \u0026lt;!-- add converter for %sw_ctx --\u0026gt; \u0026lt;conversionRule conversionWord=\u0026#34;sw_ctx\u0026#34; converterClass=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.LogbackSkyWalkingContextPatternConverter\u0026#34;/\u0026gt; 2.add json encoder for custom json format\n\u0026lt;encoder class=\u0026#34;net.logstash.logback.encoder.LoggingEventCompositeJsonEncoder\u0026#34;\u0026gt; \u0026lt;providers\u0026gt; \u0026lt;timestamp\u0026gt; \u0026lt;timeZone\u0026gt;UTC\u0026lt;/timeZone\u0026gt; \u0026lt;/timestamp\u0026gt; \u0026lt;pattern\u0026gt; \u0026lt;pattern\u0026gt; { \u0026#34;level\u0026#34;: \u0026#34;%level\u0026#34;, \u0026#34;tid\u0026#34;: \u0026#34;%tid\u0026#34;, \u0026#34;skyWalkingContext\u0026#34;: \u0026#34;%sw_ctx\u0026#34;, \u0026#34;thread\u0026#34;: \u0026#34;%thread\u0026#34;, \u0026#34;class\u0026#34;: \u0026#34;%logger{1.}:%L\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;%message\u0026#34;, \u0026#34;stackTrace\u0026#34;: \u0026#34;%exception{10}\u0026#34; } \u0026lt;/pattern\u0026gt; \u0026lt;/pattern\u0026gt; \u0026lt;/providers\u0026gt; \u0026lt;/encoder\u0026gt; gRPC reporter The gRPC reporter could forward the collected logs to SkyWalking OAP server, or SkyWalking Satellite sidecar. Trace id, segment id, and span id will attach to logs automatically. There is no need to modify existing layouts.\n Add GRPCLogClientAppender in logback.xml  \u0026lt;appender name=\u0026#34;grpc-log\u0026#34; class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.log.GRPCLogClientAppender\u0026#34;\u0026gt; \u0026lt;encoder class=\u0026#34;ch.qos.logback.core.encoder.LayoutWrappingEncoder\u0026#34;\u0026gt; \u0026lt;layout class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.mdc.TraceIdMDCPatternLogbackLayout\u0026#34;\u0026gt; \u0026lt;Pattern\u0026gt;%d{yyyy-MM-dd HH:mm:ss.SSS} [%X{tid}] [%thread] %-5level %logger{36} -%msg%n\u0026lt;/Pattern\u0026gt; \u0026lt;/layout\u0026gt; \u0026lt;/encoder\u0026gt; \u0026lt;/appender\u0026gt;  Add config of the plugin or use default  log.max_message_size=${SW_GRPC_LOG_MAX_MESSAGE_SIZE:10485760} Transmitting un-formatted messages The logback 1.x gRPC reporter supports transmitting logs as formatted or un-formatted. Transmitting formatted data is the default but can be disabled by adding the following to the agent config:\nplugin.toolkit.log.transmit_formatted=false The above will result in the content field being used for the log pattern with additional log tags of argument.0, argument.1, and so on representing each logged argument as well as an additional exception tag which is only present if a throwable is also logged.\nFor example, the following code:\nlog.info(\u0026#34;{} {} {}\u0026#34;, 1, 2, 3); Will result in:\n{ \u0026#34;content\u0026#34;: \u0026#34;{} {} {}\u0026#34;, \u0026#34;tags\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;argument.0\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.1\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.2\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;3\u0026#34; } ] } ","excerpt":"logback plugin  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; …","ref":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/application-toolkit-logback-1.x/","title":"logback plugin"},{"body":"logback plugin  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-logback-1.x\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Print trace ID in your logs  set %tid in Pattern section of logback.xml  \u0026lt;appender name=\u0026#34;STDOUT\u0026#34; class=\u0026#34;ch.qos.logback.core.ConsoleAppender\u0026#34;\u0026gt; \u0026lt;encoder class=\u0026#34;ch.qos.logback.core.encoder.LayoutWrappingEncoder\u0026#34;\u0026gt; \u0026lt;layout class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.TraceIdPatternLogbackLayout\u0026#34;\u0026gt; \u0026lt;Pattern\u0026gt;%d{yyyy-MM-dd HH:mm:ss.SSS} [%tid] [%thread] %-5level %logger{36} -%msg%n\u0026lt;/Pattern\u0026gt; \u0026lt;/layout\u0026gt; \u0026lt;/encoder\u0026gt; \u0026lt;/appender\u0026gt;  with the MDC, set %X{tid} in Pattern section of logback.xml  \u0026lt;appender name=\u0026#34;STDOUT\u0026#34; class=\u0026#34;ch.qos.logback.core.ConsoleAppender\u0026#34;\u0026gt; \u0026lt;encoder class=\u0026#34;ch.qos.logback.core.encoder.LayoutWrappingEncoder\u0026#34;\u0026gt; \u0026lt;layout class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.mdc.TraceIdMDCPatternLogbackLayout\u0026#34;\u0026gt; \u0026lt;Pattern\u0026gt;%d{yyyy-MM-dd HH:mm:ss.SSS} [%X{tid}] [%thread] %-5level %logger{36} -%msg%n\u0026lt;/Pattern\u0026gt; \u0026lt;/layout\u0026gt; \u0026lt;/encoder\u0026gt; \u0026lt;/appender\u0026gt;  Support logback AsyncAppender(MDC also support), No additional configuration is required. Refer to the demo of logback.xml below. For details: Logback AsyncAppender  \u0026lt;configuration scan=\u0026#34;true\u0026#34; scanPeriod=\u0026#34; 5 seconds\u0026#34;\u0026gt; \u0026lt;appender name=\u0026#34;STDOUT\u0026#34; class=\u0026#34;ch.qos.logback.core.ConsoleAppender\u0026#34;\u0026gt; \u0026lt;encoder class=\u0026#34;ch.qos.logback.core.encoder.LayoutWrappingEncoder\u0026#34;\u0026gt; \u0026lt;layout class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.mdc.TraceIdMDCPatternLogbackLayout\u0026#34;\u0026gt; \u0026lt;Pattern\u0026gt;%d{yyyy-MM-dd HH:mm:ss.SSS} [%X{tid}] [%thread] %-5level %logger{36} -%msg%n\u0026lt;/Pattern\u0026gt; \u0026lt;/layout\u0026gt; \u0026lt;/encoder\u0026gt; \u0026lt;/appender\u0026gt; \u0026lt;appender name=\u0026#34;ASYNC\u0026#34; class=\u0026#34;ch.qos.logback.classic.AsyncAppender\u0026#34;\u0026gt; \u0026lt;discardingThreshold\u0026gt;0\u0026lt;/discardingThreshold\u0026gt; \u0026lt;queueSize\u0026gt;1024\u0026lt;/queueSize\u0026gt; \u0026lt;neverBlock\u0026gt;true\u0026lt;/neverBlock\u0026gt; \u0026lt;appender-ref ref=\u0026#34;STDOUT\u0026#34;/\u0026gt; \u0026lt;/appender\u0026gt; \u0026lt;root level=\u0026#34;INFO\u0026#34;\u0026gt; \u0026lt;appender-ref ref=\u0026#34;ASYNC\u0026#34;/\u0026gt; \u0026lt;/root\u0026gt; \u0026lt;/configuration\u0026gt;  When you use -javaagent to active the SkyWalking tracer, logback will output traceId, if it existed. If the tracer is inactive, the output will be TID: N/A.  Print SkyWalking context in your logs   Your only need to replace pattern %tid or %X{tid]} with %sw_ctx or %X{sw_ctx}.\n  When you use -javaagent to active the SkyWalking tracer, logback will output SW_CTX: [$serviceName,$instanceName,$traceId,$traceSegmentId,$spanId], if it existed. If the tracer is inactive, the output will be SW_CTX: N/A.\n  logstash logback plugin  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-logback-1.x\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  set LogstashEncoder of logback.xml  \u0026lt;encoder charset=\u0026#34;UTF-8\u0026#34; class=\u0026#34;net.logstash.logback.encoder.LogstashEncoder\u0026#34;\u0026gt; \u0026lt;!-- add TID(traceId) field --\u0026gt; \u0026lt;provider class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.logstash.TraceIdJsonProvider\u0026#34;\u0026gt; \u0026lt;/provider\u0026gt; \u0026lt;!-- add SW_CTX(SkyWalking context) field --\u0026gt; \u0026lt;provider class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.logstash.SkyWalkingContextJsonProvider\u0026#34;\u0026gt; \u0026lt;/provider\u0026gt; \u0026lt;/encoder\u0026gt;  set LoggingEventCompositeJsonEncoder of logstash in logback-spring.xml for custom json format  1.add converter for %tid or %sw_ctx as child of  node\n\u0026lt;!-- add converter for %tid --\u0026gt; \u0026lt;conversionRule conversionWord=\u0026#34;tid\u0026#34; converterClass=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.LogbackPatternConverter\u0026#34;/\u0026gt; \u0026lt;!-- add converter for %sw_ctx --\u0026gt; \u0026lt;conversionRule conversionWord=\u0026#34;sw_ctx\u0026#34; converterClass=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.LogbackSkyWalkingContextPatternConverter\u0026#34;/\u0026gt; 2.add json encoder for custom json format\n\u0026lt;encoder class=\u0026#34;net.logstash.logback.encoder.LoggingEventCompositeJsonEncoder\u0026#34;\u0026gt; \u0026lt;providers\u0026gt; \u0026lt;timestamp\u0026gt; \u0026lt;timeZone\u0026gt;UTC\u0026lt;/timeZone\u0026gt; \u0026lt;/timestamp\u0026gt; \u0026lt;pattern\u0026gt; \u0026lt;pattern\u0026gt; { \u0026#34;level\u0026#34;: \u0026#34;%level\u0026#34;, \u0026#34;tid\u0026#34;: \u0026#34;%tid\u0026#34;, \u0026#34;skyWalkingContext\u0026#34;: \u0026#34;%sw_ctx\u0026#34;, \u0026#34;thread\u0026#34;: \u0026#34;%thread\u0026#34;, \u0026#34;class\u0026#34;: \u0026#34;%logger{1.}:%L\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;%message\u0026#34;, \u0026#34;stackTrace\u0026#34;: \u0026#34;%exception{10}\u0026#34; } \u0026lt;/pattern\u0026gt; \u0026lt;/pattern\u0026gt; \u0026lt;/providers\u0026gt; \u0026lt;/encoder\u0026gt; gRPC reporter The gRPC reporter could forward the collected logs to SkyWalking OAP server, or SkyWalking Satellite sidecar. Trace id, segment id, and span id will attach to logs automatically. There is no need to modify existing layouts.\n Add GRPCLogClientAppender in logback.xml  \u0026lt;appender name=\u0026#34;grpc-log\u0026#34; class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.log.GRPCLogClientAppender\u0026#34;\u0026gt; \u0026lt;encoder class=\u0026#34;ch.qos.logback.core.encoder.LayoutWrappingEncoder\u0026#34;\u0026gt; \u0026lt;layout class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.mdc.TraceIdMDCPatternLogbackLayout\u0026#34;\u0026gt; \u0026lt;Pattern\u0026gt;%d{yyyy-MM-dd HH:mm:ss.SSS} [%X{tid}] [%thread] %-5level %logger{36} -%msg%n\u0026lt;/Pattern\u0026gt; \u0026lt;/layout\u0026gt; \u0026lt;/encoder\u0026gt; \u0026lt;/appender\u0026gt;  Add config of the plugin or use default  log.max_message_size=${SW_GRPC_LOG_MAX_MESSAGE_SIZE:10485760} Transmitting un-formatted messages The logback 1.x gRPC reporter supports transmitting logs as formatted or un-formatted. Transmitting formatted data is the default but can be disabled by adding the following to the agent config:\nplugin.toolkit.log.transmit_formatted=false The above will result in the content field being used for the log pattern with additional log tags of argument.0, argument.1, and so on representing each logged argument as well as an additional exception tag which is only present if a throwable is also logged.\nFor example, the following code:\nlog.info(\u0026#34;{} {} {}\u0026#34;, 1, 2, 3); Will result in:\n{ \u0026#34;content\u0026#34;: \u0026#34;{} {} {}\u0026#34;, \u0026#34;tags\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;argument.0\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.1\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.2\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;3\u0026#34; } ] } ","excerpt":"logback plugin  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; …","ref":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/application-toolkit-logback-1.x/","title":"logback plugin"},{"body":"Logging Setup Logging Setup is used to integrate the Go Agent with the logging system in the current service. It currently supports the recognition of Logrus and Zap frameworks. If neither of these frameworks is present, it would output logs using Std Error.\nYou can learn about the configuration details through the \u0026ldquo;log\u0026rdquo; configuration item in the default settings.\nLogging Detection Log detection means that the logging plugin would automatically detect the usage of logs in your application. When the log type is set to auto, it would choose the appropriate log based on the creation rules of different frameworks. The selection rules vary depending on the framework:\n Logrus: It automatically selects the current logger when executing functions such as logrus.New, logger.SetOutput, or logger.SetFormatter. Zap: It automatically selects the current logger when executing functions such as zap.New, zap.NewNop, zap.NewProduction, zap.NewDevelopment, or zap.NewExample.  If there are multiple different logging systems in your current application, the last-called logging system would be chosen.\nThe configuration information is as follows:\n   Name Environment Key Default Value Description     log.type SW_LOG_TYPE auto The type of logging system. It currently supports auto, logrus, zap, and std.    Agent with Logging system The integration of the Agent with logs includes the two parts as following.\n Integrating Agent logs into the Service: Integrating the logs from the Agent into the framework used by the service. Integrating Tracing information into the Service: Integrating the information from Tracing into the service logs.  Agent logs into the Service Agent logs output the current running status of the Agent system, most of which are execution exceptions. For example, communication anomalies between the Agent and the backend service, plugin execution exceptions, etc.\nIntegrating Agent logs into the service\u0026rsquo;s logging system can effectively help users quickly troubleshoot whether there are issues with the current Agent execution.\nTracing information into the Service The Agent would also enhance the existing logging system. When the service outputs log, if the current goroutine contains Tracing data, it would be outputted together with the current logs. This helps users to quickly locate the link based on the Tracing data.\nTracing data The Tracing includes the following information:\n ServiceName: Current service name. ServiceInstanceName: Current service instance name. TraceID: The current Trace ID. If there is no link, it outputs N/A. SegmentID: The Segment ID in the current Trace. If there is no link, it outputs N/A. SpanID: The Span ID currently being operated on. If there is no link, it outputs -1.  The output format is as follows: [${ServiceName},${ServiceInstanceName},${TraceID},${SegmentID},${SpanID}].\nThe following is an example of a log output when using Zap.NewProduction:\n{\u0026quot;level\u0026quot;:\u0026quot;info\u0026quot;,\u0026quot;ts\u0026quot;:1683641507.052247,\u0026quot;caller\u0026quot;:\u0026quot;gin/main.go:45\u0026quot;,\u0026quot;msg\u0026quot;:\u0026quot;test log\u0026quot;,\u0026quot;SW_CTX\u0026quot;:\u0026quot;[Your_ApplicationName,681e4178ee7311ed864facde48001122@192.168.50.193,6f13069eee7311ed864facde48001122,6f13070cee7311ed864facde48001122,0]\u0026quot;} The configuration information is as follows:\n   Name Environment Key Default Value Description     log.tracing.enable SW_AGENT_LOG_TRACING_ENABLE true Whether to automatically integrate Tracing information into the logs.   log.tracing.key SW_AGENT_LOG_TRACING_KEY SW_CTX The key of the Tracing information in the log.    Log Upload The Agent would report the following two types of logs to the SkyWalking backend for storage and querying:\n Application Logs: It provides support for various logging frameworks and reports logs along with the corresponding distributed tracing information related to the current request. Only the relevant logs matching the current system log level would be output. Agent Logs: These are the logs generated by the Agent itself.  The current configuration options available are as follows:\n   Name Environment Key Default Value Description     log.reporter.enable SW_LOG_REPORTER_ENABLE true Whether to enable log reporting.   log.reporter.label_keys SW_LOG_REPORTER_LABEL_KEYS  By default, all fields are not reported. To specify the fields that need to be reported, please provide a comma-separated list of configuration item keys.    ","excerpt":"Logging Setup Logging Setup is used to integrate the Go Agent with the logging system in the current …","ref":"/docs/skywalking-go/latest/en/advanced-features/logging-setup/","title":"Logging Setup"},{"body":"Logging Setup Logging Setup is used to integrate the Go Agent with the logging system in the current service. It currently supports the recognition of Logrus and Zap frameworks. If neither of these frameworks is present, it would output logs using Std Error.\nYou can learn about the configuration details through the \u0026ldquo;log\u0026rdquo; configuration item in the default settings.\nLogging Detection Log detection means that the logging plugin would automatically detect the usage of logs in your application. When the log type is set to auto, it would choose the appropriate log based on the creation rules of different frameworks. The selection rules vary depending on the framework:\n Logrus: It automatically selects the current logger when executing functions such as logrus.New, logger.SetOutput, or logger.SetFormatter. Zap: It automatically selects the current logger when executing functions such as zap.New, zap.NewNop, zap.NewProduction, zap.NewDevelopment, or zap.NewExample.  If there are multiple different logging systems in your current application, the last-called logging system would be chosen.\nThe configuration information is as follows:\n   Name Environment Key Default Value Description     log.type SW_LOG_TYPE auto The type of logging system. It currently supports auto, logrus, zap, and std.    Agent with Logging system The integration of the Agent with logs includes the two parts as following.\n Integrating Agent logs into the Service: Integrating the logs from the Agent into the framework used by the service. Integrating Tracing information into the Service: Integrating the information from Tracing into the service logs.  Agent logs into the Service Agent logs output the current running status of the Agent system, most of which are execution exceptions. For example, communication anomalies between the Agent and the backend service, plugin execution exceptions, etc.\nIntegrating Agent logs into the service\u0026rsquo;s logging system can effectively help users quickly troubleshoot whether there are issues with the current Agent execution.\nTracing information into the Service The Agent would also enhance the existing logging system. When the service outputs log, if the current goroutine contains Tracing data, it would be outputted together with the current logs. This helps users to quickly locate the link based on the Tracing data.\nTracing data The Tracing includes the following information:\n ServiceName: Current service name. ServiceInstanceName: Current service instance name. TraceID: The current Trace ID. If there is no link, it outputs N/A. SegmentID: The Segment ID in the current Trace. If there is no link, it outputs N/A. SpanID: The Span ID currently being operated on. If there is no link, it outputs -1.  The output format is as follows: [${ServiceName},${ServiceInstanceName},${TraceID},${SegmentID},${SpanID}].\nThe following is an example of a log output when using Zap.NewProduction:\n{\u0026quot;level\u0026quot;:\u0026quot;info\u0026quot;,\u0026quot;ts\u0026quot;:1683641507.052247,\u0026quot;caller\u0026quot;:\u0026quot;gin/main.go:45\u0026quot;,\u0026quot;msg\u0026quot;:\u0026quot;test log\u0026quot;,\u0026quot;SW_CTX\u0026quot;:\u0026quot;[Your_ApplicationName,681e4178ee7311ed864facde48001122@192.168.50.193,6f13069eee7311ed864facde48001122,6f13070cee7311ed864facde48001122,0]\u0026quot;} The configuration information is as follows:\n   Name Environment Key Default Value Description     log.tracing.enable SW_AGENT_LOG_TRACING_ENABLE true Whether to automatically integrate Tracing information into the logs.   log.tracing.key SW_AGENT_LOG_TRACING_KEY SW_CTX The key of the Tracing information in the log.    Log Upload The Agent would report the following two types of logs to the SkyWalking backend for storage and querying:\n Application Logs: It provides support for various logging frameworks and reports logs along with the corresponding distributed tracing information related to the current request. Only the relevant logs matching the current system log level would be output. Agent Logs: These are the logs generated by the Agent itself.  The current configuration options available are as follows:\n   Name Environment Key Default Value Description     log.reporter.enable SW_LOG_REPORTER_ENABLE true Whether to enable log reporting.   log.reporter.label_keys SW_LOG_REPORTER_LABEL_KEYS  By default, all fields are not reported. To specify the fields that need to be reported, please provide a comma-separated list of configuration item keys.    ","excerpt":"Logging Setup Logging Setup is used to integrate the Go Agent with the logging system in the current …","ref":"/docs/skywalking-go/next/en/advanced-features/logging-setup/","title":"Logging Setup"},{"body":"Logging Setup Logging Setup is used to integrate the Go Agent with the logging system in the current service. It currently supports the recognition of Logrus and Zap frameworks. If neither of these frameworks is present, it would output logs using Std Error.\nYou can learn about the configuration details through the \u0026ldquo;log\u0026rdquo; configuration item in the default settings.\nLogging Detection Log detection means that the logging plugin would automatically detect the usage of logs in your application. When the log type is set to auto, it would choose the appropriate log based on the creation rules of different frameworks. The selection rules vary depending on the framework:\n Logrus: It automatically selects the current logger when executing functions such as logrus.New, logger.SetOutput, or logger.SetFormatter. Zap: It automatically selects the current logger when executing functions such as zap.New, zap.NewNop, zap.NewProduction, zap.NewDevelopment, or zap.NewExample.  If there are multiple different logging systems in your current application, the last-called logging system would be chosen.\nThe configuration information is as follows:\n   Name Environment Key Default Value Description     log.type SW_LOG_TYPE auto The type of logging system. It currently supports auto, logrus, zap, and std.    Agent with Logging system The integration of the Agent with logs includes the two parts as following.\n Integrating Agent logs into the Service: Integrating the logs from the Agent into the framework used by the service. Integrating Tracing information into the Service: Integrating the information from Tracing into the service logs.  Agent logs into the Service Agent logs output the current running status of the Agent system, most of which are execution exceptions. For example, communication anomalies between the Agent and the backend service, plugin execution exceptions, etc.\nIntegrating Agent logs into the service\u0026rsquo;s logging system can effectively help users quickly troubleshoot whether there are issues with the current Agent execution.\nTracing information into the Service The Agent would also enhance the existing logging system. When the service outputs log, if the current goroutine contains Tracing data, it would be outputted together with the current logs. This helps users to quickly locate the link based on the Tracing data.\nTracing data The Tracing includes the following information:\n ServiceName: Current service name. ServiceInstanceName: Current service instance name. TraceID: The current Trace ID. If there is no link, it outputs N/A. SegmentID: The Segment ID in the current Trace. If there is no link, it outputs N/A. SpanID: The Span ID currently being operated on. If there is no link, it outputs -1.  The output format is as follows: [${ServiceName},${ServiceInstanceName},${TraceID},${SegmentID},${SpanID}].\nThe following is an example of a log output when using Zap.NewProduction:\n{\u0026quot;level\u0026quot;:\u0026quot;info\u0026quot;,\u0026quot;ts\u0026quot;:1683641507.052247,\u0026quot;caller\u0026quot;:\u0026quot;gin/main.go:45\u0026quot;,\u0026quot;msg\u0026quot;:\u0026quot;test log\u0026quot;,\u0026quot;SW_CTX\u0026quot;:\u0026quot;[Your_ApplicationName,681e4178ee7311ed864facde48001122@192.168.50.193,6f13069eee7311ed864facde48001122,6f13070cee7311ed864facde48001122,0]\u0026quot;} The configuration information is as follows:\n   Name Environment Key Default Value Description     log.tracing.enable SW_AGENT_LOG_TRACING_ENABLE true Whether to automatically integrate Tracing information into the logs.   log.tracing.key SW_AGENT_LOG_TRACING_KEY SW_CTX The key of the Tracing information in the log.    Log Upload The Agent would report the following two types of logs to the SkyWalking backend for storage and querying:\n Application Logs: It provides support for various logging frameworks and reports logs along with the corresponding distributed tracing information related to the current request. Only the relevant logs matching the current system log level would be output. Agent Logs: These are the logs generated by the Agent itself.  The current configuration options available are as follows:\n   Name Environment Key Default Value Description     log.reporter.enable SW_LOG_REPORTER_ENABLE true Whether to enable log reporting.   log.reporter.label_keys SW_LOG_REPORTER_LABEL_KEYS  By default, all fields are not reported. To specify the fields that need to be reported, please provide a comma-separated list of configuration item keys.    ","excerpt":"Logging Setup Logging Setup is used to integrate the Go Agent with the logging system in the current …","ref":"/docs/skywalking-go/v0.4.0/en/advanced-features/logging-setup/","title":"Logging Setup"},{"body":"LogQL Service LogQL (Log Query Language) is Grafana Loki’s PromQL-inspired query language. LogQL Service exposes Loki Querying HTTP APIs including the bundled LogQL expression system. Third-party systems or visualization platforms that already support LogQL (such as Grafana), could obtain logs through LogQL Service.\nAs Skywalking log mechanism is different from Loki(metric extract, storage, etc.), the LogQL implemented by Skywalking won\u0026rsquo;t be a full features LogQL.\nDetails Of Supported LogQL The following doc describes the details of the supported protocol and compared it to the LogQL official documentation. If not mentioned, it will not be supported by default.\nLog queries The picture bellow is LogQL syntax in log queries: The expression supported by LogQL is composed of the following parts (expression with [✅] is implemented in SkyWalking):\n stream selector:The stream selector determines which log streams to include in a query’s results by labels. line filter: The line filter expression does a grep over the logs from the matching log streams. label filter: Label filter expression allows filtering log line using their original and extracted labels. parser: Parser expression can parse and extract labels from the log content. Those extracted labels can then be used by label filter expressions. line formate: The line format expression can rewrite the log line content by using the text/template format. labels formate: The label format expression can rename, modify or add labels. drop labels: The drop expression will drop the given labels in the pipeline.  The stream selector operator supported by LogQL is composed of the following (operator with [✅] is implemented in SkyWalking):\n =: exactly equal !=: not equal =~: regex matches !~: regex does not match  The filter operator supported by LogQL is composed of the following (operator with [✅] is implemented in SkyWalking):\n |=: Log line contains string !=: Log line does not contain string |~: Log line contains a match to the regular expression !~: Log line does not contain a match to the regular expression  Here are some typical expressions used in SkyWalking log query:\n# query service instance logs with specified traceId {service=\u0026quot;$service\u0026quot;, service_instance=\u0026quot;$service_instance\u0026quot;, trace_id=\u0026quot;$trace_id\u0026quot;} # query service instance logs contains keyword in content {service=\u0026quot;$service\u0026quot;, service_instance=\u0026quot;$service_instance\u0026quot;} |= \u0026quot;$keyword_contains\u0026quot; # query service instance logs not contains keyword in content {service=\u0026quot;$service\u0026quot;, service_instance=\u0026quot;$service_instance\u0026quot;} != \u0026quot;$keyword_not_contains\u0026quot; # query service instance logs contains A keyword but not contains B keyword in content {service=\u0026quot;$service\u0026quot;, service_instance=\u0026quot;$service_instance\u0026quot;} |= \u0026quot;$keyword_contains\u0026quot; != \u0026quot;$keyword_not_contains\u0026quot; Metric queries Metric queries is used to calculate metrics from logs in Loki. In SkyWalking, it is recommended to use LAL(Log Analysis Language). So metric queries LogQL won\u0026rsquo;t be supported in SkyWalking.\nDetails Of Supported Http Query API List Labels Query log tags within a range of time. It is different from Loki. In loki, this api query all labels used in stream selector, but in SkyWalking, this api only for log tags query. Others metadata (service, service_instance, endpoint) query is provided by PromQL Service.\nGET /loki/api/v1/labels    Parameter Definition Optional     start start timestamp in nanoseconds no   end end timestamp in nanoseconds no    For example:\n/loki/api/v1/labels?start=1690947455457000000\u0026amp;end=1690947671936000000 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;level\u0026#34; ] } List Label values Query log tag values of tag within a range of time.\nGET /loki/api/v1/label/\u0026lt;label_name\u0026gt;/values    Parameter Definition Optional     start start timestamp in nanoseconds no   end end timestamp in nanoseconds no    For example:\n/loki/api/v1/label/level/values?start=1690947455457000000\u0026amp;end=1690947671936000000 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;INFO\u0026#34;, \u0026#34;WARN\u0026#34;, \u0026#34;ERROR\u0026#34; ] } Range queries Query logs within a range of time with LogQL expression.\nGET /loki/api/v1/query_range    Parameter Definition Optional     query logql expression no   start start timestamp in nanoseconds no   end end timestamp in nanoseconds no   limit numbers of log line returned in a query no   direction log order,FORWARD or BACKWARD no    For example:\n/api/v1/query_range?query={service=\u0026#39;agent::songs\u0026#39;}\u0026amp;start=1690947455457000000\u0026amp;end=1690947671936000000\u0026amp;limit=100\u0026amp;direction=BACKWARD Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;streams\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;stream\u0026#34;: { \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;instance1\u0026#34;, \u0026#34;endpoint\u0026#34;: \u0026#34;xxx\u0026#34;, \u0026#34;trace_id\u0026#34;: \u0026#34;xxx\u0026#34; }, \u0026#34;values\u0026#34;: [ [ \u0026#34;1690947671936000000\u0026#34;, \u0026#34;foo\u0026#34; ], [ \u0026#34;1690947455457000000\u0026#34;, \u0026#34;bar\u0026#34; ] ] }, { \u0026#34;stream\u0026#34;: { \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;instance2\u0026#34;, \u0026#34;endpoint\u0026#34;: \u0026#34;xxx\u0026#34;, \u0026#34;trace_id\u0026#34;: \u0026#34;xxx\u0026#34; }, \u0026#34;values\u0026#34;: [ [ \u0026#34;1690947671936000000\u0026#34;, \u0026#34;foo\u0026#34; ], [ \u0026#34;1690947455457000000\u0026#34;, \u0026#34;bar\u0026#34; ] ] } ] } } ","excerpt":"LogQL Service LogQL (Log Query Language) is Grafana Loki’s PromQL-inspired query language. LogQL …","ref":"/docs/main/latest/en/api/logql-service/","title":"LogQL Service"},{"body":"LogQL Service LogQL (Log Query Language) is Grafana Loki’s PromQL-inspired query language. LogQL Service exposes Loki Querying HTTP APIs including the bundled LogQL expression system. Third-party systems or visualization platforms that already support LogQL (such as Grafana), could obtain logs through LogQL Service.\nAs Skywalking log mechanism is different from Loki(metric extract, storage, etc.), the LogQL implemented by Skywalking won\u0026rsquo;t be a full features LogQL.\nDetails Of Supported LogQL The following doc describes the details of the supported protocol and compared it to the LogQL official documentation. If not mentioned, it will not be supported by default.\nLog queries The picture bellow is LogQL syntax in log queries: The expression supported by LogQL is composed of the following parts (expression with [✅] is implemented in SkyWalking):\n stream selector:The stream selector determines which log streams to include in a query’s results by labels. line filter: The line filter expression does a grep over the logs from the matching log streams. label filter: Label filter expression allows filtering log line using their original and extracted labels. parser: Parser expression can parse and extract labels from the log content. Those extracted labels can then be used by label filter expressions. line formate: The line format expression can rewrite the log line content by using the text/template format. labels formate: The label format expression can rename, modify or add labels. drop labels: The drop expression will drop the given labels in the pipeline.  The stream selector operator supported by LogQL is composed of the following (operator with [✅] is implemented in SkyWalking):\n =: exactly equal !=: not equal =~: regex matches !~: regex does not match  The filter operator supported by LogQL is composed of the following (operator with [✅] is implemented in SkyWalking):\n |=: Log line contains string !=: Log line does not contain string |~: Log line contains a match to the regular expression !~: Log line does not contain a match to the regular expression  Here are some typical expressions used in SkyWalking log query:\n# query service instance logs with specified traceId {service=\u0026quot;$service\u0026quot;, service_instance=\u0026quot;$service_instance\u0026quot;, trace_id=\u0026quot;$trace_id\u0026quot;} # query service instance logs contains keyword in content {service=\u0026quot;$service\u0026quot;, service_instance=\u0026quot;$service_instance\u0026quot;} |= \u0026quot;$keyword_contains\u0026quot; # query service instance logs not contains keyword in content {service=\u0026quot;$service\u0026quot;, service_instance=\u0026quot;$service_instance\u0026quot;} != \u0026quot;$keyword_not_contains\u0026quot; # query service instance logs contains A keyword but not contains B keyword in content {service=\u0026quot;$service\u0026quot;, service_instance=\u0026quot;$service_instance\u0026quot;} |= \u0026quot;$keyword_contains\u0026quot; != \u0026quot;$keyword_not_contains\u0026quot; Metric queries Metric queries is used to calculate metrics from logs in Loki. In SkyWalking, it is recommended to use LAL(Log Analysis Language). So metric queries LogQL won\u0026rsquo;t be supported in SkyWalking.\nDetails Of Supported Http Query API List Labels Query log tags within a range of time. It is different from Loki. In loki, this api query all labels used in stream selector, but in SkyWalking, this api only for log tags query. Others metadata (service, service_instance, endpoint) query is provided by PromQL Service.\nGET /loki/api/v1/labels    Parameter Definition Optional     start start timestamp in nanoseconds no   end end timestamp in nanoseconds no    For example:\n/loki/api/v1/labels?start=1690947455457000000\u0026amp;end=1690947671936000000 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;level\u0026#34; ] } List Label values Query log tag values of tag within a range of time.\nGET /loki/api/v1/label/\u0026lt;label_name\u0026gt;/values    Parameter Definition Optional     start start timestamp in nanoseconds no   end end timestamp in nanoseconds no    For example:\n/loki/api/v1/label/level/values?start=1690947455457000000\u0026amp;end=1690947671936000000 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;INFO\u0026#34;, \u0026#34;WARN\u0026#34;, \u0026#34;ERROR\u0026#34; ] } Range queries Query logs within a range of time with LogQL expression.\nGET /loki/api/v1/query_range    Parameter Definition Optional     query logql expression no   start start timestamp in nanoseconds no   end end timestamp in nanoseconds no   limit numbers of log line returned in a query no   direction log order,FORWARD or BACKWARD no    For example:\n/api/v1/query_range?query={service=\u0026#39;agent::songs\u0026#39;}\u0026amp;start=1690947455457000000\u0026amp;end=1690947671936000000\u0026amp;limit=100\u0026amp;direction=BACKWARD Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;streams\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;stream\u0026#34;: { \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;instance1\u0026#34;, \u0026#34;endpoint\u0026#34;: \u0026#34;xxx\u0026#34;, \u0026#34;trace_id\u0026#34;: \u0026#34;xxx\u0026#34; }, \u0026#34;values\u0026#34;: [ [ \u0026#34;1690947671936000000\u0026#34;, \u0026#34;foo\u0026#34; ], [ \u0026#34;1690947455457000000\u0026#34;, \u0026#34;bar\u0026#34; ] ] }, { \u0026#34;stream\u0026#34;: { \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;instance2\u0026#34;, \u0026#34;endpoint\u0026#34;: \u0026#34;xxx\u0026#34;, \u0026#34;trace_id\u0026#34;: \u0026#34;xxx\u0026#34; }, \u0026#34;values\u0026#34;: [ [ \u0026#34;1690947671936000000\u0026#34;, \u0026#34;foo\u0026#34; ], [ \u0026#34;1690947455457000000\u0026#34;, \u0026#34;bar\u0026#34; ] ] } ] } } ","excerpt":"LogQL Service LogQL (Log Query Language) is Grafana Loki’s PromQL-inspired query language. LogQL …","ref":"/docs/main/next/en/api/logql-service/","title":"LogQL Service"},{"body":"LogQL Service LogQL (Log Query Language) is Grafana Loki’s PromQL-inspired query language. LogQL Service exposes Loki Querying HTTP APIs including the bundled LogQL expression system. Third-party systems or visualization platforms that already support LogQL (such as Grafana), could obtain logs through LogQL Service.\nAs Skywalking log mechanism is different from Loki(metric extract, storage, etc.), the LogQL implemented by Skywalking won\u0026rsquo;t be a full features LogQL.\nDetails Of Supported LogQL The following doc describes the details of the supported protocol and compared it to the LogQL official documentation. If not mentioned, it will not be supported by default.\nLog queries The picture bellow is LogQL syntax in log queries: The expression supported by LogQL is composed of the following parts (expression with [✅] is implemented in SkyWalking):\n stream selector:The stream selector determines which log streams to include in a query’s results by labels. line filter: The line filter expression does a grep over the logs from the matching log streams. label filter: Label filter expression allows filtering log line using their original and extracted labels. parser: Parser expression can parse and extract labels from the log content. Those extracted labels can then be used by label filter expressions. line formate: The line format expression can rewrite the log line content by using the text/template format. labels formate: The label format expression can rename, modify or add labels. drop labels: The drop expression will drop the given labels in the pipeline.  The stream selector operator supported by LogQL is composed of the following (operator with [✅] is implemented in SkyWalking):\n =: exactly equal !=: not equal =~: regex matches !~: regex does not match  The filter operator supported by LogQL is composed of the following (operator with [✅] is implemented in SkyWalking):\n |=: Log line contains string !=: Log line does not contain string |~: Log line contains a match to the regular expression !~: Log line does not contain a match to the regular expression  Here are some typical expressions used in SkyWalking log query:\n# query service instance logs with specified traceId {service=\u0026quot;$service\u0026quot;, service_instance=\u0026quot;$service_instance\u0026quot;, trace_id=\u0026quot;$trace_id\u0026quot;} # query service instance logs contains keyword in content {service=\u0026quot;$service\u0026quot;, service_instance=\u0026quot;$service_instance\u0026quot;} |= \u0026quot;$keyword_contains\u0026quot; # query service instance logs not contains keyword in content {service=\u0026quot;$service\u0026quot;, service_instance=\u0026quot;$service_instance\u0026quot;} != \u0026quot;$keyword_not_contains\u0026quot; # query service instance logs contains A keyword but not contains B keyword in content {service=\u0026quot;$service\u0026quot;, service_instance=\u0026quot;$service_instance\u0026quot;} |= \u0026quot;$keyword_contains\u0026quot; != \u0026quot;$keyword_not_contains\u0026quot; Metric queries Metric queries is used to calculate metrics from logs in Loki. In SkyWalking, it is recommended to use LAL(Log Analysis Language). So metric queries LogQL won\u0026rsquo;t be supported in SkyWalking.\nDetails Of Supported Http Query API List Labels Query log tags within a range of time. It is different from Loki. In loki, this api query all labels used in stream selector, but in SkyWalking, this api only for log tags query. Others metadata (service, service_instance, endpoint) query is provided by PromQL Service.\nGET /loki/api/v1/labels    Parameter Definition Optional     start start timestamp in nanoseconds no   end end timestamp in nanoseconds no    For example:\n/loki/api/v1/labels?start=1690947455457000000\u0026amp;end=1690947671936000000 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;level\u0026#34; ] } List Label values Query log tag values of tag within a range of time.\nGET /loki/api/v1/label/\u0026lt;label_name\u0026gt;/values    Parameter Definition Optional     start start timestamp in nanoseconds no   end end timestamp in nanoseconds no    For example:\n/loki/api/v1/label/level/values?start=1690947455457000000\u0026amp;end=1690947671936000000 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;INFO\u0026#34;, \u0026#34;WARN\u0026#34;, \u0026#34;ERROR\u0026#34; ] } Range queries Query logs within a range of time with LogQL expression.\nGET /loki/api/v1/query_range    Parameter Definition Optional     query logql expression no   start start timestamp in nanoseconds no   end end timestamp in nanoseconds no   limit numbers of log line returned in a query no   direction log order,FORWARD or BACKWARD no    For example:\n/api/v1/query_range?query={service=\u0026#39;agent::songs\u0026#39;}\u0026amp;start=1690947455457000000\u0026amp;end=1690947671936000000\u0026amp;limit=100\u0026amp;direction=BACKWARD Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;streams\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;stream\u0026#34;: { \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;instance1\u0026#34;, \u0026#34;endpoint\u0026#34;: \u0026#34;xxx\u0026#34;, \u0026#34;trace_id\u0026#34;: \u0026#34;xxx\u0026#34; }, \u0026#34;values\u0026#34;: [ [ \u0026#34;1690947671936000000\u0026#34;, \u0026#34;foo\u0026#34; ], [ \u0026#34;1690947455457000000\u0026#34;, \u0026#34;bar\u0026#34; ] ] }, { \u0026#34;stream\u0026#34;: { \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;instance2\u0026#34;, \u0026#34;endpoint\u0026#34;: \u0026#34;xxx\u0026#34;, \u0026#34;trace_id\u0026#34;: \u0026#34;xxx\u0026#34; }, \u0026#34;values\u0026#34;: [ [ \u0026#34;1690947671936000000\u0026#34;, \u0026#34;foo\u0026#34; ], [ \u0026#34;1690947455457000000\u0026#34;, \u0026#34;bar\u0026#34; ] ] } ] } } ","excerpt":"LogQL Service LogQL (Log Query Language) is Grafana Loki’s PromQL-inspired query language. LogQL …","ref":"/docs/main/v9.6.0/en/api/logql-service/","title":"LogQL Service"},{"body":"LogQL Service LogQL (Log Query Language) is Grafana Loki’s PromQL-inspired query language. LogQL Service exposes Loki Querying HTTP APIs including the bundled LogQL expression system. Third-party systems or visualization platforms that already support LogQL (such as Grafana), could obtain logs through LogQL Service.\nAs Skywalking log mechanism is different from Loki(metric extract, storage, etc.), the LogQL implemented by Skywalking won\u0026rsquo;t be a full features LogQL.\nDetails Of Supported LogQL The following doc describes the details of the supported protocol and compared it to the LogQL official documentation. If not mentioned, it will not be supported by default.\nLog queries The picture bellow is LogQL syntax in log queries: The expression supported by LogQL is composed of the following parts (expression with [✅] is implemented in SkyWalking):\n stream selector:The stream selector determines which log streams to include in a query’s results by labels. line filter: The line filter expression does a grep over the logs from the matching log streams. label filter: Label filter expression allows filtering log line using their original and extracted labels. parser: Parser expression can parse and extract labels from the log content. Those extracted labels can then be used by label filter expressions. line formate: The line format expression can rewrite the log line content by using the text/template format. labels formate: The label format expression can rename, modify or add labels. drop labels: The drop expression will drop the given labels in the pipeline.  The stream selector operator supported by LogQL is composed of the following (operator with [✅] is implemented in SkyWalking):\n =: exactly equal !=: not equal =~: regex matches !~: regex does not match  The filter operator supported by LogQL is composed of the following (operator with [✅] is implemented in SkyWalking):\n |=: Log line contains string !=: Log line does not contain string |~: Log line contains a match to the regular expression !~: Log line does not contain a match to the regular expression  Here are some typical expressions used in SkyWalking log query:\n# query service instance logs with specified traceId {service=\u0026quot;$service\u0026quot;, service_instance=\u0026quot;$service_instance\u0026quot;, trace_id=\u0026quot;$trace_id\u0026quot;} # query service instance logs contains keyword in content {service=\u0026quot;$service\u0026quot;, service_instance=\u0026quot;$service_instance\u0026quot;} |= \u0026quot;$keyword_contains\u0026quot; # query service instance logs not contains keyword in content {service=\u0026quot;$service\u0026quot;, service_instance=\u0026quot;$service_instance\u0026quot;} != \u0026quot;$keyword_not_contains\u0026quot; # query service instance logs contains A keyword but not contains B keyword in content {service=\u0026quot;$service\u0026quot;, service_instance=\u0026quot;$service_instance\u0026quot;} |= \u0026quot;$keyword_contains\u0026quot; != \u0026quot;$keyword_not_contains\u0026quot; Metric queries Metric queries is used to calculate metrics from logs in Loki. In SkyWalking, it is recommended to use LAL(Log Analysis Language). So metric queries LogQL won\u0026rsquo;t be supported in SkyWalking.\nDetails Of Supported Http Query API List Labels Query log tags within a range of time. It is different from Loki. In loki, this api query all labels used in stream selector, but in SkyWalking, this api only for log tags query. Others metadata (service, service_instance, endpoint) query is provided by PromQL Service.\nGET /loki/api/v1/labels    Parameter Definition Optional     start start timestamp in nanoseconds no   end end timestamp in nanoseconds no    For example:\n/loki/api/v1/labels?start=1690947455457000000\u0026amp;end=1690947671936000000 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;level\u0026#34; ] } List Label values Query log tag values of tag within a range of time.\nGET /loki/api/v1/label/\u0026lt;label_name\u0026gt;/values    Parameter Definition Optional     start start timestamp in nanoseconds no   end end timestamp in nanoseconds no    For example:\n/loki/api/v1/label/level/values?start=1690947455457000000\u0026amp;end=1690947671936000000 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;INFO\u0026#34;, \u0026#34;WARN\u0026#34;, \u0026#34;ERROR\u0026#34; ] } Range queries Query logs within a range of time with LogQL expression.\nGET /loki/api/v1/query_range    Parameter Definition Optional     query logql expression no   start start timestamp in nanoseconds no   end end timestamp in nanoseconds no   limit numbers of log line returned in a query no   direction log order,FORWARD or BACKWARD no    For example:\n/api/v1/query_range?query={service=\u0026#39;agent::songs\u0026#39;}\u0026amp;start=1690947455457000000\u0026amp;end=1690947671936000000\u0026amp;limit=100\u0026amp;direction=BACKWARD Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;streams\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;stream\u0026#34;: { \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;instance1\u0026#34;, \u0026#34;endpoint\u0026#34;: \u0026#34;xxx\u0026#34;, \u0026#34;trace_id\u0026#34;: \u0026#34;xxx\u0026#34; }, \u0026#34;values\u0026#34;: [ [ \u0026#34;1690947671936000000\u0026#34;, \u0026#34;foo\u0026#34; ], [ \u0026#34;1690947455457000000\u0026#34;, \u0026#34;bar\u0026#34; ] ] }, { \u0026#34;stream\u0026#34;: { \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;instance2\u0026#34;, \u0026#34;endpoint\u0026#34;: \u0026#34;xxx\u0026#34;, \u0026#34;trace_id\u0026#34;: \u0026#34;xxx\u0026#34; }, \u0026#34;values\u0026#34;: [ [ \u0026#34;1690947671936000000\u0026#34;, \u0026#34;foo\u0026#34; ], [ \u0026#34;1690947455457000000\u0026#34;, \u0026#34;bar\u0026#34; ] ] } ] } } ","excerpt":"LogQL Service LogQL (Log Query Language) is Grafana Loki’s PromQL-inspired query language. LogQL …","ref":"/docs/main/v9.7.0/en/api/logql-service/","title":"LogQL Service"},{"body":"Manual instrument SDK Our incredible community has contributed to the manual instrument SDK.\n Rust. Rust SDK follows the SkyWalking format. C++. C++ SDK follows the SkyWalking format.  Below is the archived list.\n Go2Sky. Since Jun 14, 2023.  What are the SkyWalking format and the propagation protocols?  Tracing APIs Meter APIs Logging APIs  Envoy tracer Envoy has its internal tracer implementation for SkyWalking. Read SkyWalking Tracer doc and SkyWalking tracing sandbox for more details.\n","excerpt":"Manual instrument SDK Our incredible community has contributed to the manual instrument SDK.\n Rust. …","ref":"/docs/main/latest/en/concepts-and-designs/manual-sdk/","title":"Manual instrument SDK"},{"body":"Manual instrument SDK Our incredible community has contributed to the manual instrument SDK.\n Rust. Rust SDK follows the SkyWalking format. C++. C++ SDK follows the SkyWalking format.  Below is the archived list.\n Go2Sky. Since Jun 14, 2023.  What are the SkyWalking format and the propagation protocols?  Tracing APIs Meter APIs Logging APIs  Envoy tracer Envoy has its internal tracer implementation for SkyWalking. Read SkyWalking Tracer doc and SkyWalking tracing sandbox for more details.\n","excerpt":"Manual instrument SDK Our incredible community has contributed to the manual instrument SDK.\n Rust. …","ref":"/docs/main/next/en/concepts-and-designs/manual-sdk/","title":"Manual instrument SDK"},{"body":"Manual instrument SDK Our incredible community has contributed to the manual instrument SDK.\n Go2Sky. Go SDK follows the SkyWalking format. C++. C++ SDK follows the SkyWalking format.  What are the SkyWalking format and the propagation protocols? See these protocols in protocols document.\nEnvoy tracer Envoy has its internal tracer implementation for SkyWalking. Read SkyWalking Tracer doc and SkyWalking tracing sandbox for more details.\n","excerpt":"Manual instrument SDK Our incredible community has contributed to the manual instrument SDK. …","ref":"/docs/main/v9.0.0/en/concepts-and-designs/manual-sdk/","title":"Manual instrument SDK"},{"body":"Manual instrument SDK Our incredible community has contributed to the manual instrument SDK.\n Go2Sky. Go SDK follows the SkyWalking format. C++. C++ SDK follows the SkyWalking format.  What are the SkyWalking format and the propagation protocols? See these protocols in protocols document.\nEnvoy tracer Envoy has its internal tracer implementation for SkyWalking. Read SkyWalking Tracer doc and SkyWalking tracing sandbox for more details.\n","excerpt":"Manual instrument SDK Our incredible community has contributed to the manual instrument SDK. …","ref":"/docs/main/v9.1.0/en/concepts-and-designs/manual-sdk/","title":"Manual instrument SDK"},{"body":"Manual instrument SDK Our incredible community has contributed to the manual instrument SDK.\n Go2Sky. Go SDK follows the SkyWalking format. C++. C++ SDK follows the SkyWalking format.  What are the SkyWalking format and the propagation protocols? See these protocols in protocols document.\nEnvoy tracer Envoy has its internal tracer implementation for SkyWalking. Read SkyWalking Tracer doc and SkyWalking tracing sandbox for more details.\n","excerpt":"Manual instrument SDK Our incredible community has contributed to the manual instrument SDK. …","ref":"/docs/main/v9.2.0/en/concepts-and-designs/manual-sdk/","title":"Manual instrument SDK"},{"body":"Manual instrument SDK Our incredible community has contributed to the manual instrument SDK.\n Rust. Rust SDK follows the SkyWalking format. Go2Sky. Go SDK follows the SkyWalking format. C++. C++ SDK follows the SkyWalking format.  What are the SkyWalking format and the propagation protocols? See these protocols in protocols document.\nEnvoy tracer Envoy has its internal tracer implementation for SkyWalking. Read SkyWalking Tracer doc and SkyWalking tracing sandbox for more details.\n","excerpt":"Manual instrument SDK Our incredible community has contributed to the manual instrument SDK.\n Rust. …","ref":"/docs/main/v9.3.0/en/concepts-and-designs/manual-sdk/","title":"Manual instrument SDK"},{"body":"Manual instrument SDK Our incredible community has contributed to the manual instrument SDK.\n Rust. Rust SDK follows the SkyWalking format. Go2Sky. Go SDK follows the SkyWalking format. C++. C++ SDK follows the SkyWalking format.  What are the SkyWalking format and the propagation protocols?  Tracing APIs Meter APIs Logging APIs  Envoy tracer Envoy has its internal tracer implementation for SkyWalking. Read SkyWalking Tracer doc and SkyWalking tracing sandbox for more details.\n","excerpt":"Manual instrument SDK Our incredible community has contributed to the manual instrument SDK.\n Rust. …","ref":"/docs/main/v9.4.0/en/concepts-and-designs/manual-sdk/","title":"Manual instrument SDK"},{"body":"Manual instrument SDK Our incredible community has contributed to the manual instrument SDK.\n Rust. Rust SDK follows the SkyWalking format. Go2Sky. Go SDK follows the SkyWalking format. C++. C++ SDK follows the SkyWalking format.  What are the SkyWalking format and the propagation protocols?  Tracing APIs Meter APIs Logging APIs  Envoy tracer Envoy has its internal tracer implementation for SkyWalking. Read SkyWalking Tracer doc and SkyWalking tracing sandbox for more details.\n","excerpt":"Manual instrument SDK Our incredible community has contributed to the manual instrument SDK.\n Rust. …","ref":"/docs/main/v9.5.0/en/concepts-and-designs/manual-sdk/","title":"Manual instrument SDK"},{"body":"Manual instrument SDK Our incredible community has contributed to the manual instrument SDK.\n Rust. Rust SDK follows the SkyWalking format. Go2Sky. Go SDK follows the SkyWalking format. C++. C++ SDK follows the SkyWalking format.  What are the SkyWalking format and the propagation protocols?  Tracing APIs Meter APIs Logging APIs  Envoy tracer Envoy has its internal tracer implementation for SkyWalking. Read SkyWalking Tracer doc and SkyWalking tracing sandbox for more details.\n","excerpt":"Manual instrument SDK Our incredible community has contributed to the manual instrument SDK.\n Rust. …","ref":"/docs/main/v9.6.0/en/concepts-and-designs/manual-sdk/","title":"Manual instrument SDK"},{"body":"Manual instrument SDK Our incredible community has contributed to the manual instrument SDK.\n Rust. Rust SDK follows the SkyWalking format. C++. C++ SDK follows the SkyWalking format.  Below is the archived list.\n Go2Sky. Since Jun 14, 2023.  What are the SkyWalking format and the propagation protocols?  Tracing APIs Meter APIs Logging APIs  Envoy tracer Envoy has its internal tracer implementation for SkyWalking. Read SkyWalking Tracer doc and SkyWalking tracing sandbox for more details.\n","excerpt":"Manual instrument SDK Our incredible community has contributed to the manual instrument SDK.\n Rust. …","ref":"/docs/main/v9.7.0/en/concepts-and-designs/manual-sdk/","title":"Manual instrument SDK"},{"body":"Message Queue performance and consuming latency monitoring Message Queue server plays an essential role in today\u0026rsquo;s distributed system to reduce the length and latency of blocking RPC and eventually improve user experience. But in this async way, the measure for queue consuming traffic and latency becomes significant.\nSince 8.9.0, SkyWalking leverages native tracing agent and Extension Header Item of SkyWalking Cross Process Propagation Headers Protocol v3 To provide performance monitoring for the Message Queue systems.\nIn default, we provide Message Queue Consuming Count and Message Queue Avg Consuming Latency metrics for service and endpoint levels.\nMore metrics could be added through core.oal.\n","excerpt":"Message Queue performance and consuming latency monitoring Message Queue server plays an essential …","ref":"/docs/main/latest/en/setup/backend/mq/","title":"Message Queue performance and consuming latency monitoring"},{"body":"Message Queue performance and consuming latency monitoring Message Queue server plays an essential role in today\u0026rsquo;s distributed system to reduce the length and latency of blocking RPC and eventually improve user experience. But in this async way, the measure for queue consuming traffic and latency becomes significant.\nSince 8.9.0, SkyWalking leverages native tracing agent and Extension Header Item of SkyWalking Cross Process Propagation Headers Protocol v3 To provide performance monitoring for the Message Queue systems.\nIn default, we provide Message Queue Consuming Count and Message Queue Avg Consuming Latency metrics for service and endpoint levels.\nMore metrics could be added through core.oal.\n","excerpt":"Message Queue performance and consuming latency monitoring Message Queue server plays an essential …","ref":"/docs/main/next/en/setup/backend/mq/","title":"Message Queue performance and consuming latency monitoring"},{"body":"Message Queue performance and consuming latency monitoring Message Queue server plays an important role in today\u0026rsquo;s distributed system, in order to reduce the length and latency of blocking RPC, and eventually improve user experience. But in this async way, the measure for queue consuming traffic and latency becomes significant.\nSince 8.9.0, SkyWalking leverages native tracing agent and Extension Header Item of SkyWalking Cross Process Propagation Headers Protocol v3 , to provide performance monitoring for Message Queue system.\nIn default, we provide Message Queue Consuming Count and Message Queue Avg Consuming Latency metrics for service and endpoint levels.\nMore metrics could be added through core.oal.\n","excerpt":"Message Queue performance and consuming latency monitoring Message Queue server plays an important …","ref":"/docs/main/v9.0.0/en/setup/backend/mq/","title":"Message Queue performance and consuming latency monitoring"},{"body":"Message Queue performance and consuming latency monitoring Message Queue server plays an essential role in today\u0026rsquo;s distributed system to reduce the length and latency of blocking RPC and eventually improve user experience. But in this async way, the measure for queue consuming traffic and latency becomes significant.\nSince 8.9.0, SkyWalking leverages native tracing agent and Extension Header Item of SkyWalking Cross Process Propagation Headers Protocol v3 To provide performance monitoring for the Message Queue systems.\nIn default, we provide Message Queue Consuming Count and Message Queue Avg Consuming Latency metrics for service and endpoint levels.\nMore metrics could be added through core.oal.\n","excerpt":"Message Queue performance and consuming latency monitoring Message Queue server plays an essential …","ref":"/docs/main/v9.1.0/en/setup/backend/mq/","title":"Message Queue performance and consuming latency monitoring"},{"body":"Message Queue performance and consuming latency monitoring Message Queue server plays an essential role in today\u0026rsquo;s distributed system to reduce the length and latency of blocking RPC and eventually improve user experience. But in this async way, the measure for queue consuming traffic and latency becomes significant.\nSince 8.9.0, SkyWalking leverages native tracing agent and Extension Header Item of SkyWalking Cross Process Propagation Headers Protocol v3 To provide performance monitoring for the Message Queue systems.\nIn default, we provide Message Queue Consuming Count and Message Queue Avg Consuming Latency metrics for service and endpoint levels.\nMore metrics could be added through core.oal.\n","excerpt":"Message Queue performance and consuming latency monitoring Message Queue server plays an essential …","ref":"/docs/main/v9.2.0/en/setup/backend/mq/","title":"Message Queue performance and consuming latency monitoring"},{"body":"Message Queue performance and consuming latency monitoring Message Queue server plays an essential role in today\u0026rsquo;s distributed system to reduce the length and latency of blocking RPC and eventually improve user experience. But in this async way, the measure for queue consuming traffic and latency becomes significant.\nSince 8.9.0, SkyWalking leverages native tracing agent and Extension Header Item of SkyWalking Cross Process Propagation Headers Protocol v3 To provide performance monitoring for the Message Queue systems.\nIn default, we provide Message Queue Consuming Count and Message Queue Avg Consuming Latency metrics for service and endpoint levels.\nMore metrics could be added through core.oal.\n","excerpt":"Message Queue performance and consuming latency monitoring Message Queue server plays an essential …","ref":"/docs/main/v9.3.0/en/setup/backend/mq/","title":"Message Queue performance and consuming latency monitoring"},{"body":"Message Queue performance and consuming latency monitoring Message Queue server plays an essential role in today\u0026rsquo;s distributed system to reduce the length and latency of blocking RPC and eventually improve user experience. But in this async way, the measure for queue consuming traffic and latency becomes significant.\nSince 8.9.0, SkyWalking leverages native tracing agent and Extension Header Item of SkyWalking Cross Process Propagation Headers Protocol v3 To provide performance monitoring for the Message Queue systems.\nIn default, we provide Message Queue Consuming Count and Message Queue Avg Consuming Latency metrics for service and endpoint levels.\nMore metrics could be added through core.oal.\n","excerpt":"Message Queue performance and consuming latency monitoring Message Queue server plays an essential …","ref":"/docs/main/v9.4.0/en/setup/backend/mq/","title":"Message Queue performance and consuming latency monitoring"},{"body":"Message Queue performance and consuming latency monitoring Message Queue server plays an essential role in today\u0026rsquo;s distributed system to reduce the length and latency of blocking RPC and eventually improve user experience. But in this async way, the measure for queue consuming traffic and latency becomes significant.\nSince 8.9.0, SkyWalking leverages native tracing agent and Extension Header Item of SkyWalking Cross Process Propagation Headers Protocol v3 To provide performance monitoring for the Message Queue systems.\nIn default, we provide Message Queue Consuming Count and Message Queue Avg Consuming Latency metrics for service and endpoint levels.\nMore metrics could be added through core.oal.\n","excerpt":"Message Queue performance and consuming latency monitoring Message Queue server plays an essential …","ref":"/docs/main/v9.5.0/en/setup/backend/mq/","title":"Message Queue performance and consuming latency monitoring"},{"body":"Message Queue performance and consuming latency monitoring Message Queue server plays an essential role in today\u0026rsquo;s distributed system to reduce the length and latency of blocking RPC and eventually improve user experience. But in this async way, the measure for queue consuming traffic and latency becomes significant.\nSince 8.9.0, SkyWalking leverages native tracing agent and Extension Header Item of SkyWalking Cross Process Propagation Headers Protocol v3 To provide performance monitoring for the Message Queue systems.\nIn default, we provide Message Queue Consuming Count and Message Queue Avg Consuming Latency metrics for service and endpoint levels.\nMore metrics could be added through core.oal.\n","excerpt":"Message Queue performance and consuming latency monitoring Message Queue server plays an essential …","ref":"/docs/main/v9.6.0/en/setup/backend/mq/","title":"Message Queue performance and consuming latency monitoring"},{"body":"Message Queue performance and consuming latency monitoring Message Queue server plays an essential role in today\u0026rsquo;s distributed system to reduce the length and latency of blocking RPC and eventually improve user experience. But in this async way, the measure for queue consuming traffic and latency becomes significant.\nSince 8.9.0, SkyWalking leverages native tracing agent and Extension Header Item of SkyWalking Cross Process Propagation Headers Protocol v3 To provide performance monitoring for the Message Queue systems.\nIn default, we provide Message Queue Consuming Count and Message Queue Avg Consuming Latency metrics for service and endpoint levels.\nMore metrics could be added through core.oal.\n","excerpt":"Message Queue performance and consuming latency monitoring Message Queue server plays an essential …","ref":"/docs/main/v9.7.0/en/setup/backend/mq/","title":"Message Queue performance and consuming latency monitoring"},{"body":"Meter Analysis Language The meter system provides a functional analysis language called MAL (Meter Analysis Language) that lets users analyze and aggregate meter data in the OAP streaming system. The result of an expression can either be ingested by the agent analyzer, or the OpenTelemetry/Prometheus analyzer.\nLanguage data type In MAL, an expression or sub-expression can evaluate to one of the following two types:\n Sample family: A set of samples (metrics) containing a range of metrics whose names are identical. Scalar: A simple numeric value that supports integer/long and floating/double.  Sample family A set of samples, which acts as the basic unit in MAL. For example:\ninstance_trace_count The sample family above may contain the following samples which are provided by external modules, such as the agent analyzer:\ninstance_trace_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 100 instance_trace_count{region=\u0026quot;us-east\u0026quot;,az=\u0026quot;az-3\u0026quot;} 20 instance_trace_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 33 Tag filter MAL supports four type operations to filter samples in a sample family by tag:\n tagEqual: Filter tags exactly equal to the string provided. tagNotEqual: Filter tags not equal to the string provided. tagMatch: Filter tags that regex-match the string provided. tagNotMatch: Filter labels that do not regex-match the string provided.  For example, this filters all instance_trace_count samples for us-west and asia-north region and az-1 az:\ninstance_trace_count.tagMatch(\u0026quot;region\u0026quot;, \u0026quot;us-west|asia-north\u0026quot;).tagEqual(\u0026quot;az\u0026quot;, \u0026quot;az-1\u0026quot;) Value filter MAL supports six type operations to filter samples in a sample family by value:\n valueEqual: Filter values exactly equal to the value provided. valueNotEqual: Filter values equal to the value provided. valueGreater: Filter values greater than the value provided. valueGreaterEqual: Filter values greater than or equal to the value provided. valueLess: Filter values less than the value provided. valueLessEqual: Filter values less than or equal to the value provided.  For example, this filters all instance_trace_count samples for values \u0026gt;= 33:\ninstance_trace_count.valueGreaterEqual(33) Tag manipulator MAL allows tag manipulators to change (i.e. add/delete/update) tags and their values.\nK8s MAL supports using the metadata of K8s to manipulate the tags and their values. This feature requires authorizing the OAP Server to access K8s\u0026rsquo;s API Server.\nretagByK8sMeta retagByK8sMeta(newLabelName, K8sRetagType, existingLabelName, namespaceLabelName). Add a new tag to the sample family based on the value of an existing label. Provide several internal converting types, including\n K8sRetagType.Pod2Service  Add a tag to the sample using service as the key, $serviceName.$namespace as the value, and according to the given value of the tag key, which represents the name of a pod.\nFor example:\ncontainer_cpu_usage_seconds_total{namespace=default, container=my-nginx, cpu=total, pod=my-nginx-5dc4865748-mbczh} 2 Expression:\ncontainer_cpu_usage_seconds_total.retagByK8sMeta('service' , K8sRetagType.Pod2Service , 'pod' , 'namespace') Output:\ncontainer_cpu_usage_seconds_total{namespace=default, container=my-nginx, cpu=total, pod=my-nginx-5dc4865748-mbczh, service='nginx-service.default'} 2 Binary operators The following binary arithmetic operators are available in MAL:\n + (addition) - (subtraction) * (multiplication) / (division)  Binary operators are defined between scalar/scalar, sampleFamily/scalar and sampleFamily/sampleFamily value pairs.\nBetween two scalars: they evaluate to another scalar that is the result of the operator being applied to both scalar operands:\n1 + 2 Between a sample family and a scalar, the operator is applied to the value of every sample in the sample family. For example:\ninstance_trace_count + 2 or\n2 + instance_trace_count results in\ninstance_trace_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 102 // 100 + 2 instance_trace_count{region=\u0026quot;us-east\u0026quot;,az=\u0026quot;az-3\u0026quot;} 22 // 20 + 2 instance_trace_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 35 // 33 + 2 Between two sample families, a binary operator is applied to each sample in the sample family on the left and its matching sample in the sample family on the right. A new sample family with empty name will be generated. Only the matched tags will be reserved. Samples with no matching samples in the sample family on the right will not be found in the result.\nAnother sample family instance_trace_analysis_error_count is\ninstance_trace_analysis_error_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 20 instance_trace_analysis_error_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 11 Example expression:\ninstance_trace_analysis_error_count / instance_trace_count This returns a resulting sample family containing the error rate of trace analysis. Samples with region us-west and az az-3 have no match and will not show up in the result:\n{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 0.2 // 20 / 100 {region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 0.3333 // 11 / 33 Aggregation Operation Sample family supports the following aggregation operations that can be used to aggregate the samples of a single sample family, resulting in a new sample family having fewer samples (sometimes having just a single sample) with aggregated values:\n sum (calculate sum over dimensions) min (select minimum over dimensions) max (select maximum over dimensions) avg (calculate the average over dimensions)  These operations can be used to aggregate overall label dimensions or preserve distinct dimensions by inputting by parameter( the keyword by could be omitted)\n\u0026lt;aggr-op\u0026gt;(by=[\u0026lt;tag1\u0026gt;, \u0026lt;tag2\u0026gt;, ...]) Example expression:\ninstance_trace_count.sum(by=['az']) will output the following result:\ninstance_trace_count{az=\u0026quot;az-1\u0026quot;} 133 // 100 + 33 instance_trace_count{az=\u0026quot;az-3\u0026quot;} 20 Function Duration is a textual representation of a time range. The formats accepted are based on the ISO-8601 duration format {@code PnDTnHnMn.nS} where a day is regarded as exactly 24 hours.\nExamples:\n \u0026ldquo;PT20.345S\u0026rdquo; \u0026ndash; parses as \u0026ldquo;20.345 seconds\u0026rdquo; \u0026ldquo;PT15M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;15 minutes\u0026rdquo; (where a minute is 60 seconds) \u0026ldquo;PT10H\u0026rdquo; \u0026ndash; parses as \u0026ldquo;10 hours\u0026rdquo; (where an hour is 3600 seconds) \u0026ldquo;P2D\u0026rdquo; \u0026ndash; parses as \u0026ldquo;2 days\u0026rdquo; (where a day is 24 hours or 86400 seconds) \u0026ldquo;P2DT3H4M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;2 days, 3 hours and 4 minutes\u0026rdquo; \u0026ldquo;P-6H3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;-6 hours and +3 minutes\u0026rdquo; \u0026ldquo;-P6H3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;-6 hours and -3 minutes\u0026rdquo; \u0026ldquo;-P-6H+3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;+6 hours and -3 minutes\u0026rdquo;  increase increase(Duration): Calculates the increase in the time range.\nrate rate(Duration): Calculates the per-second average rate of increase in the time range.\nirate irate(): Calculates the per-second instant rate of increase in the time range.\ntag tag({allTags -\u0026gt; }): Updates tags of samples. User can add, drop, rename and update tags.\nhistogram histogram(le: '\u0026lt;the tag name of le\u0026gt;'): Transforms less-based histogram buckets to meter system histogram buckets. le parameter represents the tag name of the bucket.\nhistogram_percentile histogram_percentile([\u0026lt;p scalar\u0026gt;]): Represents the meter-system to calculate the p-percentile (0 ≤ p ≤ 100) from the buckets.\ntime time(): Returns the number of seconds since January 1, 1970 UTC.\nforeach forEach([string_array], Closure\u0026lt;Void\u0026gt; each): Iterates all samples according to the first array argument, and provide two parameters in the second closure argument:\n element: element in the array. tags: tags in each sample.  Down Sampling Operation MAL should instruct meter-system on how to downsample for metrics. It doesn\u0026rsquo;t only refer to aggregate raw samples to minute level, but also expresses data from minute in higher levels, such as hour and day.\nDown sampling function is called downsampling in MAL, and it accepts the following types:\n AVG SUM LATEST SUM_PER_MIN MIN (TODO) MAX (TODO) MEAN (TODO) COUNT (TODO)  The default type is AVG.\nIf users want to get the latest time from last_server_state_sync_time_in_seconds:\nlast_server_state_sync_time_in_seconds.tagEqual('production', 'catalog').downsampling(LATEST) Metric level function They extract level relevant labels from metric labels, then informs the meter-system the level and layer to which this metric belongs.\n service([svc_label1, svc_label2...], Layer) extracts service level labels from the array argument, extracts layer from Layer argument. instance([svc_label1, svc_label2...], [ins_label1, ins_label2...], Layer, Closure\u0026lt;Map\u0026lt;String, String\u0026gt;\u0026gt; propertiesExtractor) extracts service level labels from the first array argument, extracts instance level labels from the second array argument, extracts layer from Layer argument, propertiesExtractor is an optional closure that extracts instance properties from tags, e.g. { tags -\u0026gt; ['pod': tags.pod, 'namespace': tags.namespace] }. endpoint([svc_label1, svc_label2...], [ep_label1, ep_label2...]) extracts service level labels from the first array argument, extracts endpoint level labels from the second array argument, extracts layer from Layer argument. process([svc_label1, svc_label2...], [ins_label1, ins_label2...], [ps_label1, ps_label2...], layer_lable) extracts service level labels from the first array argument, extracts instance level labels from the second array argument, extracts process level labels from the third array argument, extracts layer label from fourse argument. serviceRelation(DetectPoint, [source_svc_label1...], [dest_svc_label1...], Layer) DetectPoint including DetectPoint.CLIENT and DetectPoint.SERVER, extracts sourceService labels from the first array argument, extracts destService labels from the second array argument, extracts layer from Layer argument. processRelation(detect_point_label, [service_label1...], [instance_label1...], source_process_id_label, dest_process_id_label, component_label) extracts DetectPoint labels from first argument, the label value should be client or server. extracts Service labels from the first array argument, extracts Instance labels from the second array argument, extracts ProcessID labels from the fourth and fifth arguments of the source and destination.  Configuration file The OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP fails to start up. The files are located at $CLASSPATH/otel-rules, $CLASSPATH/meter-analyzer-config, $CLASSPATH/envoy-metrics-rules and $CLASSPATH/zabbix-rules.\nThe file is written in YAML format, defined by the scheme described below. Brackets indicate that a parameter is optional.\nA full example can be found here\nGeneric placeholders are defined as follows:\n \u0026lt;string\u0026gt;: A regular string. \u0026lt;closure\u0026gt;: A closure with custom logic.  # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# filter the metrics, only those metrics that satisfy this condition will be passed into the `metricsRules` below.filter: \u0026lt;closure\u0026gt; # example:\u0026#39;{ tags -\u0026gt; tags.job_name == \u0026#34;vm-monitoring\u0026#34; }\u0026#39;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# Metrics rule allow you to recompute queries.metricsRules:[- \u0026lt;metric_rules\u0026gt; ]\u0026lt;metric_rules\u0026gt; # The name of rule, which combinates with a prefix \u0026#39;meter_\u0026#39; as the index/table name in storage.name:\u0026lt;string\u0026gt;# MAL expression.exp:\u0026lt;string\u0026gt;More Examples Please refer to OAP Self-Observability.\n","excerpt":"Meter Analysis Language The meter system provides a functional analysis language called MAL (Meter …","ref":"/docs/main/latest/en/concepts-and-designs/mal/","title":"Meter Analysis Language"},{"body":"Meter Analysis Language The meter system provides a functional analysis language called MAL (Meter Analysis Language) that lets users analyze and aggregate meter data in the OAP streaming system. The result of an expression can either be ingested by the agent analyzer, or the OpenTelemetry/Prometheus analyzer.\nLanguage data type In MAL, an expression or sub-expression can evaluate to one of the following two types:\n Sample family: A set of samples (metrics) containing a range of metrics whose names are identical. Scalar: A simple numeric value that supports integer/long and floating/double.  Sample family A set of samples, which acts as the basic unit in MAL. For example:\ninstance_trace_count The sample family above may contain the following samples which are provided by external modules, such as the agent analyzer:\ninstance_trace_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 100 instance_trace_count{region=\u0026quot;us-east\u0026quot;,az=\u0026quot;az-3\u0026quot;} 20 instance_trace_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 33 Tag filter MAL supports four type operations to filter samples in a sample family by tag:\n tagEqual: Filter tags exactly equal to the string provided. tagNotEqual: Filter tags not equal to the string provided. tagMatch: Filter tags that regex-match the string provided. tagNotMatch: Filter labels that do not regex-match the string provided.  For example, this filters all instance_trace_count samples for us-west and asia-north region and az-1 az:\ninstance_trace_count.tagMatch(\u0026quot;region\u0026quot;, \u0026quot;us-west|asia-north\u0026quot;).tagEqual(\u0026quot;az\u0026quot;, \u0026quot;az-1\u0026quot;) Value filter MAL supports six type operations to filter samples in a sample family by value:\n valueEqual: Filter values exactly equal to the value provided. valueNotEqual: Filter values equal to the value provided. valueGreater: Filter values greater than the value provided. valueGreaterEqual: Filter values greater than or equal to the value provided. valueLess: Filter values less than the value provided. valueLessEqual: Filter values less than or equal to the value provided.  For example, this filters all instance_trace_count samples for values \u0026gt;= 33:\ninstance_trace_count.valueGreaterEqual(33) Tag manipulator MAL allows tag manipulators to change (i.e. add/delete/update) tags and their values.\nK8s MAL supports using the metadata of K8s to manipulate the tags and their values. This feature requires authorizing the OAP Server to access K8s\u0026rsquo;s API Server.\nretagByK8sMeta retagByK8sMeta(newLabelName, K8sRetagType, existingLabelName, namespaceLabelName). Add a new tag to the sample family based on the value of an existing label. Provide several internal converting types, including\n K8sRetagType.Pod2Service  Add a tag to the sample using service as the key, $serviceName.$namespace as the value, and according to the given value of the tag key, which represents the name of a pod.\nFor example:\ncontainer_cpu_usage_seconds_total{namespace=default, container=my-nginx, cpu=total, pod=my-nginx-5dc4865748-mbczh} 2 Expression:\ncontainer_cpu_usage_seconds_total.retagByK8sMeta('service' , K8sRetagType.Pod2Service , 'pod' , 'namespace') Output:\ncontainer_cpu_usage_seconds_total{namespace=default, container=my-nginx, cpu=total, pod=my-nginx-5dc4865748-mbczh, service='nginx-service.default'} 2 Binary operators The following binary arithmetic operators are available in MAL:\n + (addition) - (subtraction) * (multiplication) / (division)  Binary operators are defined between scalar/scalar, sampleFamily/scalar and sampleFamily/sampleFamily value pairs.\nBetween two scalars: they evaluate to another scalar that is the result of the operator being applied to both scalar operands:\n1 + 2 Between a sample family and a scalar, the operator is applied to the value of every sample in the sample family. For example:\ninstance_trace_count + 2 or\n2 + instance_trace_count results in\ninstance_trace_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 102 // 100 + 2 instance_trace_count{region=\u0026quot;us-east\u0026quot;,az=\u0026quot;az-3\u0026quot;} 22 // 20 + 2 instance_trace_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 35 // 33 + 2 Between two sample families, a binary operator is applied to each sample in the sample family on the left and its matching sample in the sample family on the right. A new sample family with empty name will be generated. Only the matched tags will be reserved. Samples with no matching samples in the sample family on the right will not be found in the result.\nAnother sample family instance_trace_analysis_error_count is\ninstance_trace_analysis_error_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 20 instance_trace_analysis_error_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 11 Example expression:\ninstance_trace_analysis_error_count / instance_trace_count This returns a resulting sample family containing the error rate of trace analysis. Samples with region us-west and az az-3 have no match and will not show up in the result:\n{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 0.2 // 20 / 100 {region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 0.3333 // 11 / 33 Aggregation Operation Sample family supports the following aggregation operations that can be used to aggregate the samples of a single sample family, resulting in a new sample family having fewer samples (sometimes having just a single sample) with aggregated values:\n sum (calculate sum over dimensions) min (select minimum over dimensions) max (select maximum over dimensions) avg (calculate the average over dimensions) count (calculate the count over dimensions, the last tag will be counted)  These operations can be used to aggregate overall label dimensions or preserve distinct dimensions by inputting by parameter( the keyword by could be omitted)\n\u0026lt;aggr-op\u0026gt;(by=[\u0026lt;tag1\u0026gt;, \u0026lt;tag2\u0026gt;, ...]) Example expression:\ninstance_trace_count.sum(by=['az']) will output the following result:\ninstance_trace_count{az=\u0026quot;az-1\u0026quot;} 133 // 100 + 33 instance_trace_count{az=\u0026quot;az-3\u0026quot;} 20  Note, aggregation operations affect the samples from one bulk only. If the metrics are reported parallel from multiple instances/nodes through different SampleFamily, this aggregation would NOT work.\nIn the best practice for this scenario, build the metric with labels that represent each instance/node. Then use the AggregateLabels Operation in MQE to aggregate the metrics.\n Function Duration is a textual representation of a time range. The formats accepted are based on the ISO-8601 duration format {@code PnDTnHnMn.nS} where a day is regarded as exactly 24 hours.\nExamples:\n \u0026ldquo;PT20.345S\u0026rdquo; \u0026ndash; parses as \u0026ldquo;20.345 seconds\u0026rdquo; \u0026ldquo;PT15M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;15 minutes\u0026rdquo; (where a minute is 60 seconds) \u0026ldquo;PT10H\u0026rdquo; \u0026ndash; parses as \u0026ldquo;10 hours\u0026rdquo; (where an hour is 3600 seconds) \u0026ldquo;P2D\u0026rdquo; \u0026ndash; parses as \u0026ldquo;2 days\u0026rdquo; (where a day is 24 hours or 86400 seconds) \u0026ldquo;P2DT3H4M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;2 days, 3 hours and 4 minutes\u0026rdquo; \u0026ldquo;P-6H3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;-6 hours and +3 minutes\u0026rdquo; \u0026ldquo;-P6H3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;-6 hours and -3 minutes\u0026rdquo; \u0026ldquo;-P-6H+3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;+6 hours and -3 minutes\u0026rdquo;  increase increase(Duration): Calculates the increase in the time range.\nrate rate(Duration): Calculates the per-second average rate of increase in the time range.\nirate irate(): Calculates the per-second instant rate of increase in the time range.\ntag tag({allTags -\u0026gt; }): Updates tags of samples. User can add, drop, rename and update tags.\nhistogram histogram(le: '\u0026lt;the tag name of le\u0026gt;'): Transforms less-based histogram buckets to meter system histogram buckets. le parameter represents the tag name of the bucket.\nhistogram_percentile histogram_percentile([\u0026lt;p scalar\u0026gt;]): Represents the meter-system to calculate the p-percentile (0 ≤ p ≤ 100) from the buckets.\ntime time(): Returns the number of seconds since January 1, 1970 UTC.\nforeach forEach([string_array], Closure\u0026lt;Void\u0026gt; each): Iterates all samples according to the first array argument, and provide two parameters in the second closure argument:\n element: element in the array. tags: tags in each sample.  Down Sampling Operation MAL should instruct meter-system on how to downsample for metrics. It doesn\u0026rsquo;t only refer to aggregate raw samples to minute level, but also expresses data from minute in higher levels, such as hour and day.\nDown sampling function is called downsampling in MAL, and it accepts the following types:\n AVG SUM LATEST SUM_PER_MIN MIN MAX MEAN (TODO) COUNT (TODO)  The default type is AVG.\nIf users want to get the latest time from last_server_state_sync_time_in_seconds:\nlast_server_state_sync_time_in_seconds.tagEqual('production', 'catalog').downsampling(LATEST) Metric level function They extract level relevant labels from metric labels, then informs the meter-system the level and layer to which this metric belongs.\n service([svc_label1, svc_label2...], Layer) extracts service level labels from the array argument, extracts layer from Layer argument. instance([svc_label1, svc_label2...], [ins_label1, ins_label2...], Layer, Closure\u0026lt;Map\u0026lt;String, String\u0026gt;\u0026gt; propertiesExtractor) extracts service level labels from the first array argument, extracts instance level labels from the second array argument, extracts layer from Layer argument, propertiesExtractor is an optional closure that extracts instance properties from tags, e.g. { tags -\u0026gt; ['pod': tags.pod, 'namespace': tags.namespace] }. endpoint([svc_label1, svc_label2...], [ep_label1, ep_label2...]) extracts service level labels from the first array argument, extracts endpoint level labels from the second array argument, extracts layer from Layer argument. process([svc_label1, svc_label2...], [ins_label1, ins_label2...], [ps_label1, ps_label2...], layer_lable) extracts service level labels from the first array argument, extracts instance level labels from the second array argument, extracts process level labels from the third array argument, extracts layer label from fourse argument. serviceRelation(DetectPoint, [source_svc_label1...], [dest_svc_label1...], Layer) DetectPoint including DetectPoint.CLIENT and DetectPoint.SERVER, extracts sourceService labels from the first array argument, extracts destService labels from the second array argument, extracts layer from Layer argument. processRelation(detect_point_label, [service_label1...], [instance_label1...], source_process_id_label, dest_process_id_label, component_label) extracts DetectPoint labels from first argument, the label value should be client or server. extracts Service labels from the first array argument, extracts Instance labels from the second array argument, extracts ProcessID labels from the fourth and fifth arguments of the source and destination.  Configuration file The OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP fails to start up. The files are located at $CLASSPATH/otel-rules, $CLASSPATH/meter-analyzer-config, $CLASSPATH/envoy-metrics-rules and $CLASSPATH/zabbix-rules.\nThe file is written in YAML format, defined by the scheme described below. Brackets indicate that a parameter is optional.\nA full example can be found here\nGeneric placeholders are defined as follows:\n \u0026lt;string\u0026gt;: A regular string. \u0026lt;closure\u0026gt;: A closure with custom logic.  # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# filter the metrics, only those metrics that satisfy this condition will be passed into the `metricsRules` below.filter: \u0026lt;closure\u0026gt; # example:\u0026#39;{ tags -\u0026gt; tags.job_name == \u0026#34;vm-monitoring\u0026#34; }\u0026#39;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# Metrics rule allow you to recompute queries.metricsRules:[- \u0026lt;metric_rules\u0026gt; ]\u0026lt;metric_rules\u0026gt; # The name of rule, which combinates with a prefix \u0026#39;meter_\u0026#39; as the index/table name in storage.name:\u0026lt;string\u0026gt;# MAL expression.exp:\u0026lt;string\u0026gt;More Examples Please refer to OAP Self-Observability.\n","excerpt":"Meter Analysis Language The meter system provides a functional analysis language called MAL (Meter …","ref":"/docs/main/next/en/concepts-and-designs/mal/","title":"Meter Analysis Language"},{"body":"Meter Analysis Language The meter system provides a functional analysis language called MAL (Meter Analysis Language) that lets users analyze and aggregate meter data in the OAP streaming system. The result of an expression can either be ingested by the agent analyzer, or the OC/Prometheus analyzer.\nLanguage data type In MAL, an expression or sub-expression can evaluate to one of the following two types:\n Sample family: A set of samples (metrics) containing a range of metrics whose names are identical. Scalar: A simple numeric value that supports integer/long and floating/double.  Sample family A set of samples, which acts as the basic unit in MAL. For example:\ninstance_trace_count The sample family above may contain the following samples which are provided by external modules, such as the agent analyzer:\ninstance_trace_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 100 instance_trace_count{region=\u0026quot;us-east\u0026quot;,az=\u0026quot;az-3\u0026quot;} 20 instance_trace_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 33 Tag filter MAL supports four type operations to filter samples in a sample family:\n tagEqual: Filter tags exactly equal to the string provided. tagNotEqual: Filter tags not equal to the string provided. tagMatch: Filter tags that regex-match the string provided. tagNotMatch: Filter labels that do not regex-match the string provided.  For example, this filters all instance_trace_count samples for us-west and asia-north region and az-1 az:\ninstance_trace_count.tagMatch(\u0026quot;region\u0026quot;, \u0026quot;us-west|asia-north\u0026quot;).tagEqual(\u0026quot;az\u0026quot;, \u0026quot;az-1\u0026quot;) Value filter MAL supports six type operations to filter samples in a sample family by value:\n valueEqual: Filter values exactly equal to the value provided. valueNotEqual: Filter values equal to the value provided. valueGreater: Filter values greater than the value provided. valueGreaterEqual: Filter values greater than or equal to the value provided. valueLess: Filter values less than the value provided. valueLessEqual: Filter values less than or equal to the value provided.  For example, this filters all instance_trace_count samples for values \u0026gt;= 33:\ninstance_trace_count.valueGreaterEqual(33) Tag manipulator MAL allows tag manipulators to change (i.e. add/delete/update) tags and their values.\nK8s MAL supports using the metadata of K8s to manipulate the tags and their values. This feature requires authorizing the OAP Server to access K8s\u0026rsquo;s API Server.\nretagByK8sMeta retagByK8sMeta(newLabelName, K8sRetagType, existingLabelName, namespaceLabelName). Add a new tag to the sample family based on the value of an existing label. Provide several internal converting types, including\n K8sRetagType.Pod2Service  Add a tag to the sample using service as the key, $serviceName.$namespace as the value, and according to the given value of the tag key, which represents the name of a pod.\nFor example:\ncontainer_cpu_usage_seconds_total{namespace=default, container=my-nginx, cpu=total, pod=my-nginx-5dc4865748-mbczh} 2 Expression:\ncontainer_cpu_usage_seconds_total.retagByK8sMeta('service' , K8sRetagType.Pod2Service , 'pod' , 'namespace') Output:\ncontainer_cpu_usage_seconds_total{namespace=default, container=my-nginx, cpu=total, pod=my-nginx-5dc4865748-mbczh, service='nginx-service.default'} 2 Binary operators The following binary arithmetic operators are available in MAL:\n + (addition) - (subtraction) * (multiplication) / (division)  Binary operators are defined between scalar/scalar, sampleFamily/scalar and sampleFamily/sampleFamily value pairs.\nBetween two scalars: they evaluate to another scalar that is the result of the operator being applied to both scalar operands:\n1 + 2 Between a sample family and a scalar, the operator is applied to the value of every sample in the sample family. For example:\ninstance_trace_count + 2 or\n2 + instance_trace_count results in\ninstance_trace_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 102 // 100 + 2 instance_trace_count{region=\u0026quot;us-east\u0026quot;,az=\u0026quot;az-3\u0026quot;} 22 // 20 + 2 instance_trace_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 35 // 33 + 2 Between two sample families, a binary operator is applied to each sample in the sample family on the left and its matching sample in the sample family on the right. A new sample family with empty name will be generated. Only the matched tags will be reserved. Samples with no matching samples in the sample family on the right will not be found in the result.\nAnother sample family instance_trace_analysis_error_count is\ninstance_trace_analysis_error_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 20 instance_trace_analysis_error_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 11 Example expression:\ninstance_trace_analysis_error_count / instance_trace_count This returns a resulting sample family containing the error rate of trace analysis. Samples with region us-west and az az-3 have no match and will not show up in the result:\n{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 0.8 // 20 / 100 {region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 0.3333 // 11 / 33 Aggregation Operation Sample family supports the following aggregation operations that can be used to aggregate the samples of a single sample family, resulting in a new sample family having fewer samples (sometimes having just a single sample) with aggregated values:\n sum (calculate sum over dimensions) min (select minimum over dimensions) max (select maximum over dimensions) avg (calculate the average over dimensions)  These operations can be used to aggregate overall label dimensions or preserve distinct dimensions by inputting by parameter.\n\u0026lt;aggr-op\u0026gt;(by: \u0026lt;tag1, tag2, ...\u0026gt;) Example expression:\ninstance_trace_count.sum(by: ['az']) will output the following result:\ninstance_trace_count{az=\u0026quot;az-1\u0026quot;} 133 // 100 + 33 instance_trace_count{az=\u0026quot;az-3\u0026quot;} 20 Function Duraton is a textual representation of a time range. The formats accepted are based on the ISO-8601 duration format {@code PnDTnHnMn.nS} where a day is regarded as exactly 24 hours.\nExamples:\n \u0026ldquo;PT20.345S\u0026rdquo; \u0026ndash; parses as \u0026ldquo;20.345 seconds\u0026rdquo; \u0026ldquo;PT15M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;15 minutes\u0026rdquo; (where a minute is 60 seconds) \u0026ldquo;PT10H\u0026rdquo; \u0026ndash; parses as \u0026ldquo;10 hours\u0026rdquo; (where an hour is 3600 seconds) \u0026ldquo;P2D\u0026rdquo; \u0026ndash; parses as \u0026ldquo;2 days\u0026rdquo; (where a day is 24 hours or 86400 seconds) \u0026ldquo;P2DT3H4M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;2 days, 3 hours and 4 minutes\u0026rdquo; \u0026ldquo;P-6H3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;-6 hours and +3 minutes\u0026rdquo; \u0026ldquo;-P6H3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;-6 hours and -3 minutes\u0026rdquo; \u0026ldquo;-P-6H+3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;+6 hours and -3 minutes\u0026rdquo;  increase increase(Duration): Calculates the increase in the time range.\nrate rate(Duration): Calculates the per-second average rate of increase in the time range.\nirate irate(): Calculates the per-second instant rate of increase in the time range.\ntag tag({allTags -\u0026gt; }): Updates tags of samples. User can add, drop, rename and update tags.\nhistogram histogram(le: '\u0026lt;the tag name of le\u0026gt;'): Transforms less-based histogram buckets to meter system histogram buckets. le parameter represents the tag name of the bucket.\nhistogram_percentile histogram_percentile([\u0026lt;p scalar\u0026gt;]). Represents the meter-system to calculate the p-percentile (0 ≤ p ≤ 100) from the buckets.\ntime time(): Returns the number of seconds since January 1, 1970 UTC.\nDown Sampling Operation MAL should instruct meter-system on how to downsample for metrics. It doesn\u0026rsquo;t only refer to aggregate raw samples to minute level, but also expresses data from minute in higher levels, such as hour and day.\nDown sampling function is called downsampling in MAL, and it accepts the following types:\n AVG SUM LATEST MIN (TODO) MAX (TODO) MEAN (TODO) COUNT (TODO)  The default type is AVG.\nIf users want to get the latest time from last_server_state_sync_time_in_seconds:\nlast_server_state_sync_time_in_seconds.tagEqual('production', 'catalog').downsampling(LATEST) Metric level function They extract level relevant labels from metric labels, then informs the meter-system the level and layer to which this metric belongs.\n service([svc_label1, svc_label2...], Layer) extracts service level labels from the array argument, extracts layer from Layer argument. instance([svc_label1, svc_label2...], [ins_label1, ins_label2...], Layer) extracts service level labels from the first array argument, extracts instance level labels from the second array argument, extracts layer from Layer argument. endpoint([svc_label1, svc_label2...], [ep_label1, ep_label2...]) extracts service level labels from the first array argument, extracts endpoint level labels from the second array argument, extracts layer from Layer argument. serviceRelation(DetectPoint, [source_svc_label1...], [dest_svc_label1...], Layer) DetectPoint including DetectPoint.CLIENT and DetectPoint.SERVER, extracts sourceService labels from the first array argument, extracts destService labels from the second array argument, extracts layer from Layer argument.  More Examples Please refer to OAP Self-Observability\n","excerpt":"Meter Analysis Language The meter system provides a functional analysis language called MAL (Meter …","ref":"/docs/main/v9.0.0/en/concepts-and-designs/mal/","title":"Meter Analysis Language"},{"body":"Meter Analysis Language The meter system provides a functional analysis language called MAL (Meter Analysis Language) that lets users analyze and aggregate meter data in the OAP streaming system. The result of an expression can either be ingested by the agent analyzer, or the OC/Prometheus analyzer.\nLanguage data type In MAL, an expression or sub-expression can evaluate to one of the following two types:\n Sample family: A set of samples (metrics) containing a range of metrics whose names are identical. Scalar: A simple numeric value that supports integer/long and floating/double.  Sample family A set of samples, which acts as the basic unit in MAL. For example:\ninstance_trace_count The sample family above may contain the following samples which are provided by external modules, such as the agent analyzer:\ninstance_trace_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 100 instance_trace_count{region=\u0026quot;us-east\u0026quot;,az=\u0026quot;az-3\u0026quot;} 20 instance_trace_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 33 Tag filter MAL supports four type operations to filter samples in a sample family:\n tagEqual: Filter tags exactly equal to the string provided. tagNotEqual: Filter tags not equal to the string provided. tagMatch: Filter tags that regex-match the string provided. tagNotMatch: Filter labels that do not regex-match the string provided.  For example, this filters all instance_trace_count samples for us-west and asia-north region and az-1 az:\ninstance_trace_count.tagMatch(\u0026quot;region\u0026quot;, \u0026quot;us-west|asia-north\u0026quot;).tagEqual(\u0026quot;az\u0026quot;, \u0026quot;az-1\u0026quot;) Value filter MAL supports six type operations to filter samples in a sample family by value:\n valueEqual: Filter values exactly equal to the value provided. valueNotEqual: Filter values equal to the value provided. valueGreater: Filter values greater than the value provided. valueGreaterEqual: Filter values greater than or equal to the value provided. valueLess: Filter values less than the value provided. valueLessEqual: Filter values less than or equal to the value provided.  For example, this filters all instance_trace_count samples for values \u0026gt;= 33:\ninstance_trace_count.valueGreaterEqual(33) Tag manipulator MAL allows tag manipulators to change (i.e. add/delete/update) tags and their values.\nK8s MAL supports using the metadata of K8s to manipulate the tags and their values. This feature requires authorizing the OAP Server to access K8s\u0026rsquo;s API Server.\nretagByK8sMeta retagByK8sMeta(newLabelName, K8sRetagType, existingLabelName, namespaceLabelName). Add a new tag to the sample family based on the value of an existing label. Provide several internal converting types, including\n K8sRetagType.Pod2Service  Add a tag to the sample using service as the key, $serviceName.$namespace as the value, and according to the given value of the tag key, which represents the name of a pod.\nFor example:\ncontainer_cpu_usage_seconds_total{namespace=default, container=my-nginx, cpu=total, pod=my-nginx-5dc4865748-mbczh} 2 Expression:\ncontainer_cpu_usage_seconds_total.retagByK8sMeta('service' , K8sRetagType.Pod2Service , 'pod' , 'namespace') Output:\ncontainer_cpu_usage_seconds_total{namespace=default, container=my-nginx, cpu=total, pod=my-nginx-5dc4865748-mbczh, service='nginx-service.default'} 2 Binary operators The following binary arithmetic operators are available in MAL:\n + (addition) - (subtraction) * (multiplication) / (division)  Binary operators are defined between scalar/scalar, sampleFamily/scalar and sampleFamily/sampleFamily value pairs.\nBetween two scalars: they evaluate to another scalar that is the result of the operator being applied to both scalar operands:\n1 + 2 Between a sample family and a scalar, the operator is applied to the value of every sample in the sample family. For example:\ninstance_trace_count + 2 or\n2 + instance_trace_count results in\ninstance_trace_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 102 // 100 + 2 instance_trace_count{region=\u0026quot;us-east\u0026quot;,az=\u0026quot;az-3\u0026quot;} 22 // 20 + 2 instance_trace_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 35 // 33 + 2 Between two sample families, a binary operator is applied to each sample in the sample family on the left and its matching sample in the sample family on the right. A new sample family with empty name will be generated. Only the matched tags will be reserved. Samples with no matching samples in the sample family on the right will not be found in the result.\nAnother sample family instance_trace_analysis_error_count is\ninstance_trace_analysis_error_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 20 instance_trace_analysis_error_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 11 Example expression:\ninstance_trace_analysis_error_count / instance_trace_count This returns a resulting sample family containing the error rate of trace analysis. Samples with region us-west and az az-3 have no match and will not show up in the result:\n{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 0.2 // 20 / 100 {region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 0.3333 // 11 / 33 Aggregation Operation Sample family supports the following aggregation operations that can be used to aggregate the samples of a single sample family, resulting in a new sample family having fewer samples (sometimes having just a single sample) with aggregated values:\n sum (calculate sum over dimensions) min (select minimum over dimensions) max (select maximum over dimensions) avg (calculate the average over dimensions)  These operations can be used to aggregate overall label dimensions or preserve distinct dimensions by inputting by parameter.\n\u0026lt;aggr-op\u0026gt;(by: \u0026lt;tag1, tag2, ...\u0026gt;) Example expression:\ninstance_trace_count.sum(by: ['az']) will output the following result:\ninstance_trace_count{az=\u0026quot;az-1\u0026quot;} 133 // 100 + 33 instance_trace_count{az=\u0026quot;az-3\u0026quot;} 20 Function Duration is a textual representation of a time range. The formats accepted are based on the ISO-8601 duration format {@code PnDTnHnMn.nS} where a day is regarded as exactly 24 hours.\nExamples:\n \u0026ldquo;PT20.345S\u0026rdquo; \u0026ndash; parses as \u0026ldquo;20.345 seconds\u0026rdquo; \u0026ldquo;PT15M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;15 minutes\u0026rdquo; (where a minute is 60 seconds) \u0026ldquo;PT10H\u0026rdquo; \u0026ndash; parses as \u0026ldquo;10 hours\u0026rdquo; (where an hour is 3600 seconds) \u0026ldquo;P2D\u0026rdquo; \u0026ndash; parses as \u0026ldquo;2 days\u0026rdquo; (where a day is 24 hours or 86400 seconds) \u0026ldquo;P2DT3H4M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;2 days, 3 hours and 4 minutes\u0026rdquo; \u0026ldquo;P-6H3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;-6 hours and +3 minutes\u0026rdquo; \u0026ldquo;-P6H3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;-6 hours and -3 minutes\u0026rdquo; \u0026ldquo;-P-6H+3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;+6 hours and -3 minutes\u0026rdquo;  increase increase(Duration): Calculates the increase in the time range.\nrate rate(Duration): Calculates the per-second average rate of increase in the time range.\nirate irate(): Calculates the per-second instant rate of increase in the time range.\ntag tag({allTags -\u0026gt; }): Updates tags of samples. User can add, drop, rename and update tags.\nhistogram histogram(le: '\u0026lt;the tag name of le\u0026gt;'): Transforms less-based histogram buckets to meter system histogram buckets. le parameter represents the tag name of the bucket.\nhistogram_percentile histogram_percentile([\u0026lt;p scalar\u0026gt;]). Represents the meter-system to calculate the p-percentile (0 ≤ p ≤ 100) from the buckets.\ntime time(): Returns the number of seconds since January 1, 1970 UTC.\nDown Sampling Operation MAL should instruct meter-system on how to downsample for metrics. It doesn\u0026rsquo;t only refer to aggregate raw samples to minute level, but also expresses data from minute in higher levels, such as hour and day.\nDown sampling function is called downsampling in MAL, and it accepts the following types:\n AVG SUM LATEST MIN (TODO) MAX (TODO) MEAN (TODO) COUNT (TODO)  The default type is AVG.\nIf users want to get the latest time from last_server_state_sync_time_in_seconds:\nlast_server_state_sync_time_in_seconds.tagEqual('production', 'catalog').downsampling(LATEST) Metric level function They extract level relevant labels from metric labels, then informs the meter-system the level and layer to which this metric belongs.\n service([svc_label1, svc_label2...], Layer) extracts service level labels from the array argument, extracts layer from Layer argument. instance([svc_label1, svc_label2...], [ins_label1, ins_label2...], Layer, Closure\u0026lt;Map\u0026lt;String, String\u0026gt;\u0026gt; propertiesExtractor) extracts service level labels from the first array argument, extracts instance level labels from the second array argument, extracts layer from Layer argument, propertiesExtractor is an optional closure that extracts instance properties from tags, e.g. { tags -\u0026gt; ['pod': tags.pod, 'namespace': tags.namespace] }. endpoint([svc_label1, svc_label2...], [ep_label1, ep_label2...]) extracts service level labels from the first array argument, extracts endpoint level labels from the second array argument, extracts layer from Layer argument. serviceRelation(DetectPoint, [source_svc_label1...], [dest_svc_label1...], Layer) DetectPoint including DetectPoint.CLIENT and DetectPoint.SERVER, extracts sourceService labels from the first array argument, extracts destService labels from the second array argument, extracts layer from Layer argument.  More Examples Please refer to OAP Self-Observability\n","excerpt":"Meter Analysis Language The meter system provides a functional analysis language called MAL (Meter …","ref":"/docs/main/v9.1.0/en/concepts-and-designs/mal/","title":"Meter Analysis Language"},{"body":"Meter Analysis Language The meter system provides a functional analysis language called MAL (Meter Analysis Language) that lets users analyze and aggregate meter data in the OAP streaming system. The result of an expression can either be ingested by the agent analyzer, or the OC/Prometheus analyzer.\nLanguage data type In MAL, an expression or sub-expression can evaluate to one of the following two types:\n Sample family: A set of samples (metrics) containing a range of metrics whose names are identical. Scalar: A simple numeric value that supports integer/long and floating/double.  Sample family A set of samples, which acts as the basic unit in MAL. For example:\ninstance_trace_count The sample family above may contain the following samples which are provided by external modules, such as the agent analyzer:\ninstance_trace_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 100 instance_trace_count{region=\u0026quot;us-east\u0026quot;,az=\u0026quot;az-3\u0026quot;} 20 instance_trace_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 33 Tag filter MAL supports four type operations to filter samples in a sample family:\n tagEqual: Filter tags exactly equal to the string provided. tagNotEqual: Filter tags not equal to the string provided. tagMatch: Filter tags that regex-match the string provided. tagNotMatch: Filter labels that do not regex-match the string provided.  For example, this filters all instance_trace_count samples for us-west and asia-north region and az-1 az:\ninstance_trace_count.tagMatch(\u0026quot;region\u0026quot;, \u0026quot;us-west|asia-north\u0026quot;).tagEqual(\u0026quot;az\u0026quot;, \u0026quot;az-1\u0026quot;) Value filter MAL supports six type operations to filter samples in a sample family by value:\n valueEqual: Filter values exactly equal to the value provided. valueNotEqual: Filter values equal to the value provided. valueGreater: Filter values greater than the value provided. valueGreaterEqual: Filter values greater than or equal to the value provided. valueLess: Filter values less than the value provided. valueLessEqual: Filter values less than or equal to the value provided.  For example, this filters all instance_trace_count samples for values \u0026gt;= 33:\ninstance_trace_count.valueGreaterEqual(33) Tag manipulator MAL allows tag manipulators to change (i.e. add/delete/update) tags and their values.\nK8s MAL supports using the metadata of K8s to manipulate the tags and their values. This feature requires authorizing the OAP Server to access K8s\u0026rsquo;s API Server.\nretagByK8sMeta retagByK8sMeta(newLabelName, K8sRetagType, existingLabelName, namespaceLabelName). Add a new tag to the sample family based on the value of an existing label. Provide several internal converting types, including\n K8sRetagType.Pod2Service  Add a tag to the sample using service as the key, $serviceName.$namespace as the value, and according to the given value of the tag key, which represents the name of a pod.\nFor example:\ncontainer_cpu_usage_seconds_total{namespace=default, container=my-nginx, cpu=total, pod=my-nginx-5dc4865748-mbczh} 2 Expression:\ncontainer_cpu_usage_seconds_total.retagByK8sMeta('service' , K8sRetagType.Pod2Service , 'pod' , 'namespace') Output:\ncontainer_cpu_usage_seconds_total{namespace=default, container=my-nginx, cpu=total, pod=my-nginx-5dc4865748-mbczh, service='nginx-service.default'} 2 Binary operators The following binary arithmetic operators are available in MAL:\n + (addition) - (subtraction) * (multiplication) / (division)  Binary operators are defined between scalar/scalar, sampleFamily/scalar and sampleFamily/sampleFamily value pairs.\nBetween two scalars: they evaluate to another scalar that is the result of the operator being applied to both scalar operands:\n1 + 2 Between a sample family and a scalar, the operator is applied to the value of every sample in the sample family. For example:\ninstance_trace_count + 2 or\n2 + instance_trace_count results in\ninstance_trace_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 102 // 100 + 2 instance_trace_count{region=\u0026quot;us-east\u0026quot;,az=\u0026quot;az-3\u0026quot;} 22 // 20 + 2 instance_trace_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 35 // 33 + 2 Between two sample families, a binary operator is applied to each sample in the sample family on the left and its matching sample in the sample family on the right. A new sample family with empty name will be generated. Only the matched tags will be reserved. Samples with no matching samples in the sample family on the right will not be found in the result.\nAnother sample family instance_trace_analysis_error_count is\ninstance_trace_analysis_error_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 20 instance_trace_analysis_error_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 11 Example expression:\ninstance_trace_analysis_error_count / instance_trace_count This returns a resulting sample family containing the error rate of trace analysis. Samples with region us-west and az az-3 have no match and will not show up in the result:\n{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 0.2 // 20 / 100 {region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 0.3333 // 11 / 33 Aggregation Operation Sample family supports the following aggregation operations that can be used to aggregate the samples of a single sample family, resulting in a new sample family having fewer samples (sometimes having just a single sample) with aggregated values:\n sum (calculate sum over dimensions) min (select minimum over dimensions) max (select maximum over dimensions) avg (calculate the average over dimensions)  These operations can be used to aggregate overall label dimensions or preserve distinct dimensions by inputting by parameter.\n\u0026lt;aggr-op\u0026gt;(by: \u0026lt;tag1, tag2, ...\u0026gt;) Example expression:\ninstance_trace_count.sum(by: ['az']) will output the following result:\ninstance_trace_count{az=\u0026quot;az-1\u0026quot;} 133 // 100 + 33 instance_trace_count{az=\u0026quot;az-3\u0026quot;} 20 Function Duration is a textual representation of a time range. The formats accepted are based on the ISO-8601 duration format {@code PnDTnHnMn.nS} where a day is regarded as exactly 24 hours.\nExamples:\n \u0026ldquo;PT20.345S\u0026rdquo; \u0026ndash; parses as \u0026ldquo;20.345 seconds\u0026rdquo; \u0026ldquo;PT15M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;15 minutes\u0026rdquo; (where a minute is 60 seconds) \u0026ldquo;PT10H\u0026rdquo; \u0026ndash; parses as \u0026ldquo;10 hours\u0026rdquo; (where an hour is 3600 seconds) \u0026ldquo;P2D\u0026rdquo; \u0026ndash; parses as \u0026ldquo;2 days\u0026rdquo; (where a day is 24 hours or 86400 seconds) \u0026ldquo;P2DT3H4M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;2 days, 3 hours and 4 minutes\u0026rdquo; \u0026ldquo;P-6H3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;-6 hours and +3 minutes\u0026rdquo; \u0026ldquo;-P6H3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;-6 hours and -3 minutes\u0026rdquo; \u0026ldquo;-P-6H+3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;+6 hours and -3 minutes\u0026rdquo;  increase increase(Duration): Calculates the increase in the time range.\nrate rate(Duration): Calculates the per-second average rate of increase in the time range.\nirate irate(): Calculates the per-second instant rate of increase in the time range.\ntag tag({allTags -\u0026gt; }): Updates tags of samples. User can add, drop, rename and update tags.\nhistogram histogram(le: '\u0026lt;the tag name of le\u0026gt;'): Transforms less-based histogram buckets to meter system histogram buckets. le parameter represents the tag name of the bucket.\nhistogram_percentile histogram_percentile([\u0026lt;p scalar\u0026gt;]). Represents the meter-system to calculate the p-percentile (0 ≤ p ≤ 100) from the buckets.\ntime time(): Returns the number of seconds since January 1, 1970 UTC.\nforeach forEach([string_array], Closure\u0026lt;Void\u0026gt; each): Iterates all samples according to the first array argument, and provide two parameters in the second closure argument:\n element: element in the array. tags: tags in each sample.  Down Sampling Operation MAL should instruct meter-system on how to downsample for metrics. It doesn\u0026rsquo;t only refer to aggregate raw samples to minute level, but also expresses data from minute in higher levels, such as hour and day.\nDown sampling function is called downsampling in MAL, and it accepts the following types:\n AVG SUM LATEST MIN (TODO) MAX (TODO) MEAN (TODO) COUNT (TODO)  The default type is AVG.\nIf users want to get the latest time from last_server_state_sync_time_in_seconds:\nlast_server_state_sync_time_in_seconds.tagEqual('production', 'catalog').downsampling(LATEST) Metric level function They extract level relevant labels from metric labels, then informs the meter-system the level and layer to which this metric belongs.\n service([svc_label1, svc_label2...], Layer) extracts service level labels from the array argument, extracts layer from Layer argument. instance([svc_label1, svc_label2...], [ins_label1, ins_label2...], Layer, Closure\u0026lt;Map\u0026lt;String, String\u0026gt;\u0026gt; propertiesExtractor) extracts service level labels from the first array argument, extracts instance level labels from the second array argument, extracts layer from Layer argument, propertiesExtractor is an optional closure that extracts instance properties from tags, e.g. { tags -\u0026gt; ['pod': tags.pod, 'namespace': tags.namespace] }. endpoint([svc_label1, svc_label2...], [ep_label1, ep_label2...]) extracts service level labels from the first array argument, extracts endpoint level labels from the second array argument, extracts layer from Layer argument. serviceRelation(DetectPoint, [source_svc_label1...], [dest_svc_label1...], Layer) DetectPoint including DetectPoint.CLIENT and DetectPoint.SERVER, extracts sourceService labels from the first array argument, extracts destService labels from the second array argument, extracts layer from Layer argument. processRelation(detect_point_label, [service_label1...], [instance_label1...], source_process_id_label, dest_process_id_label, component_label) extracts DetectPoint labels from first argument, the label value should be client or server. extracts Service labels from the first array argument, extracts Instance labels from the second array argument, extracts ProcessID labels from the fourth and fifth arguments of the source and destination.  More Examples Please refer to OAP Self-Observability\n","excerpt":"Meter Analysis Language The meter system provides a functional analysis language called MAL (Meter …","ref":"/docs/main/v9.2.0/en/concepts-and-designs/mal/","title":"Meter Analysis Language"},{"body":"Meter Analysis Language The meter system provides a functional analysis language called MAL (Meter Analysis Language) that lets users analyze and aggregate meter data in the OAP streaming system. The result of an expression can either be ingested by the agent analyzer, or the OC/Prometheus analyzer.\nLanguage data type In MAL, an expression or sub-expression can evaluate to one of the following two types:\n Sample family: A set of samples (metrics) containing a range of metrics whose names are identical. Scalar: A simple numeric value that supports integer/long and floating/double.  Sample family A set of samples, which acts as the basic unit in MAL. For example:\ninstance_trace_count The sample family above may contain the following samples which are provided by external modules, such as the agent analyzer:\ninstance_trace_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 100 instance_trace_count{region=\u0026quot;us-east\u0026quot;,az=\u0026quot;az-3\u0026quot;} 20 instance_trace_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 33 Tag filter MAL supports four type operations to filter samples in a sample family:\n tagEqual: Filter tags exactly equal to the string provided. tagNotEqual: Filter tags not equal to the string provided. tagMatch: Filter tags that regex-match the string provided. tagNotMatch: Filter labels that do not regex-match the string provided.  For example, this filters all instance_trace_count samples for us-west and asia-north region and az-1 az:\ninstance_trace_count.tagMatch(\u0026quot;region\u0026quot;, \u0026quot;us-west|asia-north\u0026quot;).tagEqual(\u0026quot;az\u0026quot;, \u0026quot;az-1\u0026quot;) Value filter MAL supports six type operations to filter samples in a sample family by value:\n valueEqual: Filter values exactly equal to the value provided. valueNotEqual: Filter values equal to the value provided. valueGreater: Filter values greater than the value provided. valueGreaterEqual: Filter values greater than or equal to the value provided. valueLess: Filter values less than the value provided. valueLessEqual: Filter values less than or equal to the value provided.  For example, this filters all instance_trace_count samples for values \u0026gt;= 33:\ninstance_trace_count.valueGreaterEqual(33) Tag manipulator MAL allows tag manipulators to change (i.e. add/delete/update) tags and their values.\nK8s MAL supports using the metadata of K8s to manipulate the tags and their values. This feature requires authorizing the OAP Server to access K8s\u0026rsquo;s API Server.\nretagByK8sMeta retagByK8sMeta(newLabelName, K8sRetagType, existingLabelName, namespaceLabelName). Add a new tag to the sample family based on the value of an existing label. Provide several internal converting types, including\n K8sRetagType.Pod2Service  Add a tag to the sample using service as the key, $serviceName.$namespace as the value, and according to the given value of the tag key, which represents the name of a pod.\nFor example:\ncontainer_cpu_usage_seconds_total{namespace=default, container=my-nginx, cpu=total, pod=my-nginx-5dc4865748-mbczh} 2 Expression:\ncontainer_cpu_usage_seconds_total.retagByK8sMeta('service' , K8sRetagType.Pod2Service , 'pod' , 'namespace') Output:\ncontainer_cpu_usage_seconds_total{namespace=default, container=my-nginx, cpu=total, pod=my-nginx-5dc4865748-mbczh, service='nginx-service.default'} 2 Binary operators The following binary arithmetic operators are available in MAL:\n + (addition) - (subtraction) * (multiplication) / (division)  Binary operators are defined between scalar/scalar, sampleFamily/scalar and sampleFamily/sampleFamily value pairs.\nBetween two scalars: they evaluate to another scalar that is the result of the operator being applied to both scalar operands:\n1 + 2 Between a sample family and a scalar, the operator is applied to the value of every sample in the sample family. For example:\ninstance_trace_count + 2 or\n2 + instance_trace_count results in\ninstance_trace_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 102 // 100 + 2 instance_trace_count{region=\u0026quot;us-east\u0026quot;,az=\u0026quot;az-3\u0026quot;} 22 // 20 + 2 instance_trace_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 35 // 33 + 2 Between two sample families, a binary operator is applied to each sample in the sample family on the left and its matching sample in the sample family on the right. A new sample family with empty name will be generated. Only the matched tags will be reserved. Samples with no matching samples in the sample family on the right will not be found in the result.\nAnother sample family instance_trace_analysis_error_count is\ninstance_trace_analysis_error_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 20 instance_trace_analysis_error_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 11 Example expression:\ninstance_trace_analysis_error_count / instance_trace_count This returns a resulting sample family containing the error rate of trace analysis. Samples with region us-west and az az-3 have no match and will not show up in the result:\n{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 0.2 // 20 / 100 {region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 0.3333 // 11 / 33 Aggregation Operation Sample family supports the following aggregation operations that can be used to aggregate the samples of a single sample family, resulting in a new sample family having fewer samples (sometimes having just a single sample) with aggregated values:\n sum (calculate sum over dimensions) min (select minimum over dimensions) max (select maximum over dimensions) avg (calculate the average over dimensions)  These operations can be used to aggregate overall label dimensions or preserve distinct dimensions by inputting by parameter( the keyword by could be omitted)\n\u0026lt;aggr-op\u0026gt;(by=[\u0026lt;tag1\u0026gt;, \u0026lt;tag2\u0026gt;, ...]) Example expression:\ninstance_trace_count.sum(by=['az']) will output the following result:\ninstance_trace_count{az=\u0026quot;az-1\u0026quot;} 133 // 100 + 33 instance_trace_count{az=\u0026quot;az-3\u0026quot;} 20 Function Duration is a textual representation of a time range. The formats accepted are based on the ISO-8601 duration format {@code PnDTnHnMn.nS} where a day is regarded as exactly 24 hours.\nExamples:\n \u0026ldquo;PT20.345S\u0026rdquo; \u0026ndash; parses as \u0026ldquo;20.345 seconds\u0026rdquo; \u0026ldquo;PT15M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;15 minutes\u0026rdquo; (where a minute is 60 seconds) \u0026ldquo;PT10H\u0026rdquo; \u0026ndash; parses as \u0026ldquo;10 hours\u0026rdquo; (where an hour is 3600 seconds) \u0026ldquo;P2D\u0026rdquo; \u0026ndash; parses as \u0026ldquo;2 days\u0026rdquo; (where a day is 24 hours or 86400 seconds) \u0026ldquo;P2DT3H4M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;2 days, 3 hours and 4 minutes\u0026rdquo; \u0026ldquo;P-6H3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;-6 hours and +3 minutes\u0026rdquo; \u0026ldquo;-P6H3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;-6 hours and -3 minutes\u0026rdquo; \u0026ldquo;-P-6H+3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;+6 hours and -3 minutes\u0026rdquo;  increase increase(Duration): Calculates the increase in the time range.\nrate rate(Duration): Calculates the per-second average rate of increase in the time range.\nirate irate(): Calculates the per-second instant rate of increase in the time range.\ntag tag({allTags -\u0026gt; }): Updates tags of samples. User can add, drop, rename and update tags.\nhistogram histogram(le: '\u0026lt;the tag name of le\u0026gt;'): Transforms less-based histogram buckets to meter system histogram buckets. le parameter represents the tag name of the bucket.\nhistogram_percentile histogram_percentile([\u0026lt;p scalar\u0026gt;]). Represents the meter-system to calculate the p-percentile (0 ≤ p ≤ 100) from the buckets.\ntime time(): Returns the number of seconds since January 1, 1970 UTC.\nforeach forEach([string_array], Closure\u0026lt;Void\u0026gt; each): Iterates all samples according to the first array argument, and provide two parameters in the second closure argument:\n element: element in the array. tags: tags in each sample.  Down Sampling Operation MAL should instruct meter-system on how to downsample for metrics. It doesn\u0026rsquo;t only refer to aggregate raw samples to minute level, but also expresses data from minute in higher levels, such as hour and day.\nDown sampling function is called downsampling in MAL, and it accepts the following types:\n AVG SUM LATEST MIN (TODO) MAX (TODO) MEAN (TODO) COUNT (TODO)  The default type is AVG.\nIf users want to get the latest time from last_server_state_sync_time_in_seconds:\nlast_server_state_sync_time_in_seconds.tagEqual('production', 'catalog').downsampling(LATEST) Metric level function They extract level relevant labels from metric labels, then informs the meter-system the level and layer to which this metric belongs.\n service([svc_label1, svc_label2...], Layer) extracts service level labels from the array argument, extracts layer from Layer argument. instance([svc_label1, svc_label2...], [ins_label1, ins_label2...], Layer, Closure\u0026lt;Map\u0026lt;String, String\u0026gt;\u0026gt; propertiesExtractor) extracts service level labels from the first array argument, extracts instance level labels from the second array argument, extracts layer from Layer argument, propertiesExtractor is an optional closure that extracts instance properties from tags, e.g. { tags -\u0026gt; ['pod': tags.pod, 'namespace': tags.namespace] }. endpoint([svc_label1, svc_label2...], [ep_label1, ep_label2...]) extracts service level labels from the first array argument, extracts endpoint level labels from the second array argument, extracts layer from Layer argument. serviceRelation(DetectPoint, [source_svc_label1...], [dest_svc_label1...], Layer) DetectPoint including DetectPoint.CLIENT and DetectPoint.SERVER, extracts sourceService labels from the first array argument, extracts destService labels from the second array argument, extracts layer from Layer argument. processRelation(detect_point_label, [service_label1...], [instance_label1...], source_process_id_label, dest_process_id_label, component_label) extracts DetectPoint labels from first argument, the label value should be client or server. extracts Service labels from the first array argument, extracts Instance labels from the second array argument, extracts ProcessID labels from the fourth and fifth arguments of the source and destination.  Configuration file The OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP fails to start up. The files are located at $CLASSPATH/otel-rules, $CLASSPATH/meter-analyzer-config, $CLASSPATH/envoy-metrics-rules and $CLASSPATH/zabbix-rules.\nThe file is written in YAML format, defined by the scheme described below. Brackets indicate that a parameter is optional.\nA full example can be found here\nGeneric placeholders are defined as follows:\n \u0026lt;string\u0026gt;: A regular string. \u0026lt;closure\u0026gt;: A closure with custom logic.  # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# filter the metrics, only those metrics that satisfy this condition will be passed into the `metricsRules` below.filter: \u0026lt;closure\u0026gt; # example:\u0026#39;{ tags -\u0026gt; tags.job_name == \u0026#34;vm-monitoring\u0026#34; }\u0026#39;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# Metrics rule allow you to recompute queries.metricsRules:[- \u0026lt;metric_rules\u0026gt; ]\u0026lt;metric_rules\u0026gt; # The name of rule, which combinates with a prefix \u0026#39;meter_\u0026#39; as the index/table name in storage.name:\u0026lt;string\u0026gt;# MAL expression.exp:\u0026lt;string\u0026gt;More Examples Please refer to OAP Self-Observability.\n","excerpt":"Meter Analysis Language The meter system provides a functional analysis language called MAL (Meter …","ref":"/docs/main/v9.3.0/en/concepts-and-designs/mal/","title":"Meter Analysis Language"},{"body":"Meter Analysis Language The meter system provides a functional analysis language called MAL (Meter Analysis Language) that lets users analyze and aggregate meter data in the OAP streaming system. The result of an expression can either be ingested by the agent analyzer, or the OC/Prometheus analyzer.\nLanguage data type In MAL, an expression or sub-expression can evaluate to one of the following two types:\n Sample family: A set of samples (metrics) containing a range of metrics whose names are identical. Scalar: A simple numeric value that supports integer/long and floating/double.  Sample family A set of samples, which acts as the basic unit in MAL. For example:\ninstance_trace_count The sample family above may contain the following samples which are provided by external modules, such as the agent analyzer:\ninstance_trace_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 100 instance_trace_count{region=\u0026quot;us-east\u0026quot;,az=\u0026quot;az-3\u0026quot;} 20 instance_trace_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 33 Tag filter MAL supports four type operations to filter samples in a sample family:\n tagEqual: Filter tags exactly equal to the string provided. tagNotEqual: Filter tags not equal to the string provided. tagMatch: Filter tags that regex-match the string provided. tagNotMatch: Filter labels that do not regex-match the string provided.  For example, this filters all instance_trace_count samples for us-west and asia-north region and az-1 az:\ninstance_trace_count.tagMatch(\u0026quot;region\u0026quot;, \u0026quot;us-west|asia-north\u0026quot;).tagEqual(\u0026quot;az\u0026quot;, \u0026quot;az-1\u0026quot;) Value filter MAL supports six type operations to filter samples in a sample family by value:\n valueEqual: Filter values exactly equal to the value provided. valueNotEqual: Filter values equal to the value provided. valueGreater: Filter values greater than the value provided. valueGreaterEqual: Filter values greater than or equal to the value provided. valueLess: Filter values less than the value provided. valueLessEqual: Filter values less than or equal to the value provided.  For example, this filters all instance_trace_count samples for values \u0026gt;= 33:\ninstance_trace_count.valueGreaterEqual(33) Tag manipulator MAL allows tag manipulators to change (i.e. add/delete/update) tags and their values.\nK8s MAL supports using the metadata of K8s to manipulate the tags and their values. This feature requires authorizing the OAP Server to access K8s\u0026rsquo;s API Server.\nretagByK8sMeta retagByK8sMeta(newLabelName, K8sRetagType, existingLabelName, namespaceLabelName). Add a new tag to the sample family based on the value of an existing label. Provide several internal converting types, including\n K8sRetagType.Pod2Service  Add a tag to the sample using service as the key, $serviceName.$namespace as the value, and according to the given value of the tag key, which represents the name of a pod.\nFor example:\ncontainer_cpu_usage_seconds_total{namespace=default, container=my-nginx, cpu=total, pod=my-nginx-5dc4865748-mbczh} 2 Expression:\ncontainer_cpu_usage_seconds_total.retagByK8sMeta('service' , K8sRetagType.Pod2Service , 'pod' , 'namespace') Output:\ncontainer_cpu_usage_seconds_total{namespace=default, container=my-nginx, cpu=total, pod=my-nginx-5dc4865748-mbczh, service='nginx-service.default'} 2 Binary operators The following binary arithmetic operators are available in MAL:\n + (addition) - (subtraction) * (multiplication) / (division)  Binary operators are defined between scalar/scalar, sampleFamily/scalar and sampleFamily/sampleFamily value pairs.\nBetween two scalars: they evaluate to another scalar that is the result of the operator being applied to both scalar operands:\n1 + 2 Between a sample family and a scalar, the operator is applied to the value of every sample in the sample family. For example:\ninstance_trace_count + 2 or\n2 + instance_trace_count results in\ninstance_trace_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 102 // 100 + 2 instance_trace_count{region=\u0026quot;us-east\u0026quot;,az=\u0026quot;az-3\u0026quot;} 22 // 20 + 2 instance_trace_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 35 // 33 + 2 Between two sample families, a binary operator is applied to each sample in the sample family on the left and its matching sample in the sample family on the right. A new sample family with empty name will be generated. Only the matched tags will be reserved. Samples with no matching samples in the sample family on the right will not be found in the result.\nAnother sample family instance_trace_analysis_error_count is\ninstance_trace_analysis_error_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 20 instance_trace_analysis_error_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 11 Example expression:\ninstance_trace_analysis_error_count / instance_trace_count This returns a resulting sample family containing the error rate of trace analysis. Samples with region us-west and az az-3 have no match and will not show up in the result:\n{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 0.2 // 20 / 100 {region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 0.3333 // 11 / 33 Aggregation Operation Sample family supports the following aggregation operations that can be used to aggregate the samples of a single sample family, resulting in a new sample family having fewer samples (sometimes having just a single sample) with aggregated values:\n sum (calculate sum over dimensions) min (select minimum over dimensions) max (select maximum over dimensions) avg (calculate the average over dimensions)  These operations can be used to aggregate overall label dimensions or preserve distinct dimensions by inputting by parameter( the keyword by could be omitted)\n\u0026lt;aggr-op\u0026gt;(by=[\u0026lt;tag1\u0026gt;, \u0026lt;tag2\u0026gt;, ...]) Example expression:\ninstance_trace_count.sum(by=['az']) will output the following result:\ninstance_trace_count{az=\u0026quot;az-1\u0026quot;} 133 // 100 + 33 instance_trace_count{az=\u0026quot;az-3\u0026quot;} 20 Function Duration is a textual representation of a time range. The formats accepted are based on the ISO-8601 duration format {@code PnDTnHnMn.nS} where a day is regarded as exactly 24 hours.\nExamples:\n \u0026ldquo;PT20.345S\u0026rdquo; \u0026ndash; parses as \u0026ldquo;20.345 seconds\u0026rdquo; \u0026ldquo;PT15M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;15 minutes\u0026rdquo; (where a minute is 60 seconds) \u0026ldquo;PT10H\u0026rdquo; \u0026ndash; parses as \u0026ldquo;10 hours\u0026rdquo; (where an hour is 3600 seconds) \u0026ldquo;P2D\u0026rdquo; \u0026ndash; parses as \u0026ldquo;2 days\u0026rdquo; (where a day is 24 hours or 86400 seconds) \u0026ldquo;P2DT3H4M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;2 days, 3 hours and 4 minutes\u0026rdquo; \u0026ldquo;P-6H3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;-6 hours and +3 minutes\u0026rdquo; \u0026ldquo;-P6H3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;-6 hours and -3 minutes\u0026rdquo; \u0026ldquo;-P-6H+3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;+6 hours and -3 minutes\u0026rdquo;  increase increase(Duration): Calculates the increase in the time range.\nrate rate(Duration): Calculates the per-second average rate of increase in the time range.\nirate irate(): Calculates the per-second instant rate of increase in the time range.\ntag tag({allTags -\u0026gt; }): Updates tags of samples. User can add, drop, rename and update tags.\nhistogram histogram(le: '\u0026lt;the tag name of le\u0026gt;'): Transforms less-based histogram buckets to meter system histogram buckets. le parameter represents the tag name of the bucket.\nhistogram_percentile histogram_percentile([\u0026lt;p scalar\u0026gt;]). Represents the meter-system to calculate the p-percentile (0 ≤ p ≤ 100) from the buckets.\ntime time(): Returns the number of seconds since January 1, 1970 UTC.\nforeach forEach([string_array], Closure\u0026lt;Void\u0026gt; each): Iterates all samples according to the first array argument, and provide two parameters in the second closure argument:\n element: element in the array. tags: tags in each sample.  Down Sampling Operation MAL should instruct meter-system on how to downsample for metrics. It doesn\u0026rsquo;t only refer to aggregate raw samples to minute level, but also expresses data from minute in higher levels, such as hour and day.\nDown sampling function is called downsampling in MAL, and it accepts the following types:\n AVG SUM LATEST MIN (TODO) MAX (TODO) MEAN (TODO) COUNT (TODO)  The default type is AVG.\nIf users want to get the latest time from last_server_state_sync_time_in_seconds:\nlast_server_state_sync_time_in_seconds.tagEqual('production', 'catalog').downsampling(LATEST) Metric level function They extract level relevant labels from metric labels, then informs the meter-system the level and layer to which this metric belongs.\n service([svc_label1, svc_label2...], Layer) extracts service level labels from the array argument, extracts layer from Layer argument. instance([svc_label1, svc_label2...], [ins_label1, ins_label2...], Layer, Closure\u0026lt;Map\u0026lt;String, String\u0026gt;\u0026gt; propertiesExtractor) extracts service level labels from the first array argument, extracts instance level labels from the second array argument, extracts layer from Layer argument, propertiesExtractor is an optional closure that extracts instance properties from tags, e.g. { tags -\u0026gt; ['pod': tags.pod, 'namespace': tags.namespace] }. endpoint([svc_label1, svc_label2...], [ep_label1, ep_label2...]) extracts service level labels from the first array argument, extracts endpoint level labels from the second array argument, extracts layer from Layer argument. serviceRelation(DetectPoint, [source_svc_label1...], [dest_svc_label1...], Layer) DetectPoint including DetectPoint.CLIENT and DetectPoint.SERVER, extracts sourceService labels from the first array argument, extracts destService labels from the second array argument, extracts layer from Layer argument. processRelation(detect_point_label, [service_label1...], [instance_label1...], source_process_id_label, dest_process_id_label, component_label) extracts DetectPoint labels from first argument, the label value should be client or server. extracts Service labels from the first array argument, extracts Instance labels from the second array argument, extracts ProcessID labels from the fourth and fifth arguments of the source and destination.  Configuration file The OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP fails to start up. The files are located at $CLASSPATH/otel-rules, $CLASSPATH/meter-analyzer-config, $CLASSPATH/envoy-metrics-rules and $CLASSPATH/zabbix-rules.\nThe file is written in YAML format, defined by the scheme described below. Brackets indicate that a parameter is optional.\nA full example can be found here\nGeneric placeholders are defined as follows:\n \u0026lt;string\u0026gt;: A regular string. \u0026lt;closure\u0026gt;: A closure with custom logic.  # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# filter the metrics, only those metrics that satisfy this condition will be passed into the `metricsRules` below.filter: \u0026lt;closure\u0026gt; # example:\u0026#39;{ tags -\u0026gt; tags.job_name == \u0026#34;vm-monitoring\u0026#34; }\u0026#39;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# Metrics rule allow you to recompute queries.metricsRules:[- \u0026lt;metric_rules\u0026gt; ]\u0026lt;metric_rules\u0026gt; # The name of rule, which combinates with a prefix \u0026#39;meter_\u0026#39; as the index/table name in storage.name:\u0026lt;string\u0026gt;# MAL expression.exp:\u0026lt;string\u0026gt;More Examples Please refer to OAP Self-Observability.\n","excerpt":"Meter Analysis Language The meter system provides a functional analysis language called MAL (Meter …","ref":"/docs/main/v9.4.0/en/concepts-and-designs/mal/","title":"Meter Analysis Language"},{"body":"Meter Analysis Language The meter system provides a functional analysis language called MAL (Meter Analysis Language) that lets users analyze and aggregate meter data in the OAP streaming system. The result of an expression can either be ingested by the agent analyzer, or the OpenTelemetry/Prometheus analyzer.\nLanguage data type In MAL, an expression or sub-expression can evaluate to one of the following two types:\n Sample family: A set of samples (metrics) containing a range of metrics whose names are identical. Scalar: A simple numeric value that supports integer/long and floating/double.  Sample family A set of samples, which acts as the basic unit in MAL. For example:\ninstance_trace_count The sample family above may contain the following samples which are provided by external modules, such as the agent analyzer:\ninstance_trace_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 100 instance_trace_count{region=\u0026quot;us-east\u0026quot;,az=\u0026quot;az-3\u0026quot;} 20 instance_trace_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 33 Tag filter MAL supports four type operations to filter samples in a sample family by tag:\n tagEqual: Filter tags exactly equal to the string provided. tagNotEqual: Filter tags not equal to the string provided. tagMatch: Filter tags that regex-match the string provided. tagNotMatch: Filter labels that do not regex-match the string provided.  For example, this filters all instance_trace_count samples for us-west and asia-north region and az-1 az:\ninstance_trace_count.tagMatch(\u0026quot;region\u0026quot;, \u0026quot;us-west|asia-north\u0026quot;).tagEqual(\u0026quot;az\u0026quot;, \u0026quot;az-1\u0026quot;) Value filter MAL supports six type operations to filter samples in a sample family by value:\n valueEqual: Filter values exactly equal to the value provided. valueNotEqual: Filter values equal to the value provided. valueGreater: Filter values greater than the value provided. valueGreaterEqual: Filter values greater than or equal to the value provided. valueLess: Filter values less than the value provided. valueLessEqual: Filter values less than or equal to the value provided.  For example, this filters all instance_trace_count samples for values \u0026gt;= 33:\ninstance_trace_count.valueGreaterEqual(33) Tag manipulator MAL allows tag manipulators to change (i.e. add/delete/update) tags and their values.\nK8s MAL supports using the metadata of K8s to manipulate the tags and their values. This feature requires authorizing the OAP Server to access K8s\u0026rsquo;s API Server.\nretagByK8sMeta retagByK8sMeta(newLabelName, K8sRetagType, existingLabelName, namespaceLabelName). Add a new tag to the sample family based on the value of an existing label. Provide several internal converting types, including\n K8sRetagType.Pod2Service  Add a tag to the sample using service as the key, $serviceName.$namespace as the value, and according to the given value of the tag key, which represents the name of a pod.\nFor example:\ncontainer_cpu_usage_seconds_total{namespace=default, container=my-nginx, cpu=total, pod=my-nginx-5dc4865748-mbczh} 2 Expression:\ncontainer_cpu_usage_seconds_total.retagByK8sMeta('service' , K8sRetagType.Pod2Service , 'pod' , 'namespace') Output:\ncontainer_cpu_usage_seconds_total{namespace=default, container=my-nginx, cpu=total, pod=my-nginx-5dc4865748-mbczh, service='nginx-service.default'} 2 Binary operators The following binary arithmetic operators are available in MAL:\n + (addition) - (subtraction) * (multiplication) / (division)  Binary operators are defined between scalar/scalar, sampleFamily/scalar and sampleFamily/sampleFamily value pairs.\nBetween two scalars: they evaluate to another scalar that is the result of the operator being applied to both scalar operands:\n1 + 2 Between a sample family and a scalar, the operator is applied to the value of every sample in the sample family. For example:\ninstance_trace_count + 2 or\n2 + instance_trace_count results in\ninstance_trace_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 102 // 100 + 2 instance_trace_count{region=\u0026quot;us-east\u0026quot;,az=\u0026quot;az-3\u0026quot;} 22 // 20 + 2 instance_trace_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 35 // 33 + 2 Between two sample families, a binary operator is applied to each sample in the sample family on the left and its matching sample in the sample family on the right. A new sample family with empty name will be generated. Only the matched tags will be reserved. Samples with no matching samples in the sample family on the right will not be found in the result.\nAnother sample family instance_trace_analysis_error_count is\ninstance_trace_analysis_error_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 20 instance_trace_analysis_error_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 11 Example expression:\ninstance_trace_analysis_error_count / instance_trace_count This returns a resulting sample family containing the error rate of trace analysis. Samples with region us-west and az az-3 have no match and will not show up in the result:\n{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 0.2 // 20 / 100 {region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 0.3333 // 11 / 33 Aggregation Operation Sample family supports the following aggregation operations that can be used to aggregate the samples of a single sample family, resulting in a new sample family having fewer samples (sometimes having just a single sample) with aggregated values:\n sum (calculate sum over dimensions) min (select minimum over dimensions) max (select maximum over dimensions) avg (calculate the average over dimensions)  These operations can be used to aggregate overall label dimensions or preserve distinct dimensions by inputting by parameter( the keyword by could be omitted)\n\u0026lt;aggr-op\u0026gt;(by=[\u0026lt;tag1\u0026gt;, \u0026lt;tag2\u0026gt;, ...]) Example expression:\ninstance_trace_count.sum(by=['az']) will output the following result:\ninstance_trace_count{az=\u0026quot;az-1\u0026quot;} 133 // 100 + 33 instance_trace_count{az=\u0026quot;az-3\u0026quot;} 20 Function Duration is a textual representation of a time range. The formats accepted are based on the ISO-8601 duration format {@code PnDTnHnMn.nS} where a day is regarded as exactly 24 hours.\nExamples:\n \u0026ldquo;PT20.345S\u0026rdquo; \u0026ndash; parses as \u0026ldquo;20.345 seconds\u0026rdquo; \u0026ldquo;PT15M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;15 minutes\u0026rdquo; (where a minute is 60 seconds) \u0026ldquo;PT10H\u0026rdquo; \u0026ndash; parses as \u0026ldquo;10 hours\u0026rdquo; (where an hour is 3600 seconds) \u0026ldquo;P2D\u0026rdquo; \u0026ndash; parses as \u0026ldquo;2 days\u0026rdquo; (where a day is 24 hours or 86400 seconds) \u0026ldquo;P2DT3H4M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;2 days, 3 hours and 4 minutes\u0026rdquo; \u0026ldquo;P-6H3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;-6 hours and +3 minutes\u0026rdquo; \u0026ldquo;-P6H3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;-6 hours and -3 minutes\u0026rdquo; \u0026ldquo;-P-6H+3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;+6 hours and -3 minutes\u0026rdquo;  increase increase(Duration): Calculates the increase in the time range.\nrate rate(Duration): Calculates the per-second average rate of increase in the time range.\nirate irate(): Calculates the per-second instant rate of increase in the time range.\ntag tag({allTags -\u0026gt; }): Updates tags of samples. User can add, drop, rename and update tags.\nhistogram histogram(le: '\u0026lt;the tag name of le\u0026gt;'): Transforms less-based histogram buckets to meter system histogram buckets. le parameter represents the tag name of the bucket.\nhistogram_percentile histogram_percentile([\u0026lt;p scalar\u0026gt;]): Represents the meter-system to calculate the p-percentile (0 ≤ p ≤ 100) from the buckets.\ntime time(): Returns the number of seconds since January 1, 1970 UTC.\nforeach forEach([string_array], Closure\u0026lt;Void\u0026gt; each): Iterates all samples according to the first array argument, and provide two parameters in the second closure argument:\n element: element in the array. tags: tags in each sample.  Down Sampling Operation MAL should instruct meter-system on how to downsample for metrics. It doesn\u0026rsquo;t only refer to aggregate raw samples to minute level, but also expresses data from minute in higher levels, such as hour and day.\nDown sampling function is called downsampling in MAL, and it accepts the following types:\n AVG SUM LATEST SUM_PER_MIN MIN (TODO) MAX (TODO) MEAN (TODO) COUNT (TODO)  The default type is AVG.\nIf users want to get the latest time from last_server_state_sync_time_in_seconds:\nlast_server_state_sync_time_in_seconds.tagEqual('production', 'catalog').downsampling(LATEST) Metric level function They extract level relevant labels from metric labels, then informs the meter-system the level and layer to which this metric belongs.\n service([svc_label1, svc_label2...], Layer) extracts service level labels from the array argument, extracts layer from Layer argument. instance([svc_label1, svc_label2...], [ins_label1, ins_label2...], Layer, Closure\u0026lt;Map\u0026lt;String, String\u0026gt;\u0026gt; propertiesExtractor) extracts service level labels from the first array argument, extracts instance level labels from the second array argument, extracts layer from Layer argument, propertiesExtractor is an optional closure that extracts instance properties from tags, e.g. { tags -\u0026gt; ['pod': tags.pod, 'namespace': tags.namespace] }. endpoint([svc_label1, svc_label2...], [ep_label1, ep_label2...]) extracts service level labels from the first array argument, extracts endpoint level labels from the second array argument, extracts layer from Layer argument. process([svc_label1, svc_label2...], [ins_label1, ins_label2...], [ps_label1, ps_label2...], layer_lable) extracts service level labels from the first array argument, extracts instance level labels from the second array argument, extracts process level labels from the third array argument, extracts layer label from fourse argument. serviceRelation(DetectPoint, [source_svc_label1...], [dest_svc_label1...], Layer) DetectPoint including DetectPoint.CLIENT and DetectPoint.SERVER, extracts sourceService labels from the first array argument, extracts destService labels from the second array argument, extracts layer from Layer argument. processRelation(detect_point_label, [service_label1...], [instance_label1...], source_process_id_label, dest_process_id_label, component_label) extracts DetectPoint labels from first argument, the label value should be client or server. extracts Service labels from the first array argument, extracts Instance labels from the second array argument, extracts ProcessID labels from the fourth and fifth arguments of the source and destination.  Configuration file The OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP fails to start up. The files are located at $CLASSPATH/otel-rules, $CLASSPATH/meter-analyzer-config, $CLASSPATH/envoy-metrics-rules and $CLASSPATH/zabbix-rules.\nThe file is written in YAML format, defined by the scheme described below. Brackets indicate that a parameter is optional.\nA full example can be found here\nGeneric placeholders are defined as follows:\n \u0026lt;string\u0026gt;: A regular string. \u0026lt;closure\u0026gt;: A closure with custom logic.  # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# filter the metrics, only those metrics that satisfy this condition will be passed into the `metricsRules` below.filter: \u0026lt;closure\u0026gt; # example:\u0026#39;{ tags -\u0026gt; tags.job_name == \u0026#34;vm-monitoring\u0026#34; }\u0026#39;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# Metrics rule allow you to recompute queries.metricsRules:[- \u0026lt;metric_rules\u0026gt; ]\u0026lt;metric_rules\u0026gt; # The name of rule, which combinates with a prefix \u0026#39;meter_\u0026#39; as the index/table name in storage.name:\u0026lt;string\u0026gt;# MAL expression.exp:\u0026lt;string\u0026gt;More Examples Please refer to OAP Self-Observability.\n","excerpt":"Meter Analysis Language The meter system provides a functional analysis language called MAL (Meter …","ref":"/docs/main/v9.5.0/en/concepts-and-designs/mal/","title":"Meter Analysis Language"},{"body":"Meter Analysis Language The meter system provides a functional analysis language called MAL (Meter Analysis Language) that lets users analyze and aggregate meter data in the OAP streaming system. The result of an expression can either be ingested by the agent analyzer, or the OpenTelemetry/Prometheus analyzer.\nLanguage data type In MAL, an expression or sub-expression can evaluate to one of the following two types:\n Sample family: A set of samples (metrics) containing a range of metrics whose names are identical. Scalar: A simple numeric value that supports integer/long and floating/double.  Sample family A set of samples, which acts as the basic unit in MAL. For example:\ninstance_trace_count The sample family above may contain the following samples which are provided by external modules, such as the agent analyzer:\ninstance_trace_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 100 instance_trace_count{region=\u0026quot;us-east\u0026quot;,az=\u0026quot;az-3\u0026quot;} 20 instance_trace_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 33 Tag filter MAL supports four type operations to filter samples in a sample family by tag:\n tagEqual: Filter tags exactly equal to the string provided. tagNotEqual: Filter tags not equal to the string provided. tagMatch: Filter tags that regex-match the string provided. tagNotMatch: Filter labels that do not regex-match the string provided.  For example, this filters all instance_trace_count samples for us-west and asia-north region and az-1 az:\ninstance_trace_count.tagMatch(\u0026quot;region\u0026quot;, \u0026quot;us-west|asia-north\u0026quot;).tagEqual(\u0026quot;az\u0026quot;, \u0026quot;az-1\u0026quot;) Value filter MAL supports six type operations to filter samples in a sample family by value:\n valueEqual: Filter values exactly equal to the value provided. valueNotEqual: Filter values equal to the value provided. valueGreater: Filter values greater than the value provided. valueGreaterEqual: Filter values greater than or equal to the value provided. valueLess: Filter values less than the value provided. valueLessEqual: Filter values less than or equal to the value provided.  For example, this filters all instance_trace_count samples for values \u0026gt;= 33:\ninstance_trace_count.valueGreaterEqual(33) Tag manipulator MAL allows tag manipulators to change (i.e. add/delete/update) tags and their values.\nK8s MAL supports using the metadata of K8s to manipulate the tags and their values. This feature requires authorizing the OAP Server to access K8s\u0026rsquo;s API Server.\nretagByK8sMeta retagByK8sMeta(newLabelName, K8sRetagType, existingLabelName, namespaceLabelName). Add a new tag to the sample family based on the value of an existing label. Provide several internal converting types, including\n K8sRetagType.Pod2Service  Add a tag to the sample using service as the key, $serviceName.$namespace as the value, and according to the given value of the tag key, which represents the name of a pod.\nFor example:\ncontainer_cpu_usage_seconds_total{namespace=default, container=my-nginx, cpu=total, pod=my-nginx-5dc4865748-mbczh} 2 Expression:\ncontainer_cpu_usage_seconds_total.retagByK8sMeta('service' , K8sRetagType.Pod2Service , 'pod' , 'namespace') Output:\ncontainer_cpu_usage_seconds_total{namespace=default, container=my-nginx, cpu=total, pod=my-nginx-5dc4865748-mbczh, service='nginx-service.default'} 2 Binary operators The following binary arithmetic operators are available in MAL:\n + (addition) - (subtraction) * (multiplication) / (division)  Binary operators are defined between scalar/scalar, sampleFamily/scalar and sampleFamily/sampleFamily value pairs.\nBetween two scalars: they evaluate to another scalar that is the result of the operator being applied to both scalar operands:\n1 + 2 Between a sample family and a scalar, the operator is applied to the value of every sample in the sample family. For example:\ninstance_trace_count + 2 or\n2 + instance_trace_count results in\ninstance_trace_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 102 // 100 + 2 instance_trace_count{region=\u0026quot;us-east\u0026quot;,az=\u0026quot;az-3\u0026quot;} 22 // 20 + 2 instance_trace_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 35 // 33 + 2 Between two sample families, a binary operator is applied to each sample in the sample family on the left and its matching sample in the sample family on the right. A new sample family with empty name will be generated. Only the matched tags will be reserved. Samples with no matching samples in the sample family on the right will not be found in the result.\nAnother sample family instance_trace_analysis_error_count is\ninstance_trace_analysis_error_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 20 instance_trace_analysis_error_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 11 Example expression:\ninstance_trace_analysis_error_count / instance_trace_count This returns a resulting sample family containing the error rate of trace analysis. Samples with region us-west and az az-3 have no match and will not show up in the result:\n{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 0.2 // 20 / 100 {region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 0.3333 // 11 / 33 Aggregation Operation Sample family supports the following aggregation operations that can be used to aggregate the samples of a single sample family, resulting in a new sample family having fewer samples (sometimes having just a single sample) with aggregated values:\n sum (calculate sum over dimensions) min (select minimum over dimensions) max (select maximum over dimensions) avg (calculate the average over dimensions)  These operations can be used to aggregate overall label dimensions or preserve distinct dimensions by inputting by parameter( the keyword by could be omitted)\n\u0026lt;aggr-op\u0026gt;(by=[\u0026lt;tag1\u0026gt;, \u0026lt;tag2\u0026gt;, ...]) Example expression:\ninstance_trace_count.sum(by=['az']) will output the following result:\ninstance_trace_count{az=\u0026quot;az-1\u0026quot;} 133 // 100 + 33 instance_trace_count{az=\u0026quot;az-3\u0026quot;} 20 Function Duration is a textual representation of a time range. The formats accepted are based on the ISO-8601 duration format {@code PnDTnHnMn.nS} where a day is regarded as exactly 24 hours.\nExamples:\n \u0026ldquo;PT20.345S\u0026rdquo; \u0026ndash; parses as \u0026ldquo;20.345 seconds\u0026rdquo; \u0026ldquo;PT15M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;15 minutes\u0026rdquo; (where a minute is 60 seconds) \u0026ldquo;PT10H\u0026rdquo; \u0026ndash; parses as \u0026ldquo;10 hours\u0026rdquo; (where an hour is 3600 seconds) \u0026ldquo;P2D\u0026rdquo; \u0026ndash; parses as \u0026ldquo;2 days\u0026rdquo; (where a day is 24 hours or 86400 seconds) \u0026ldquo;P2DT3H4M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;2 days, 3 hours and 4 minutes\u0026rdquo; \u0026ldquo;P-6H3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;-6 hours and +3 minutes\u0026rdquo; \u0026ldquo;-P6H3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;-6 hours and -3 minutes\u0026rdquo; \u0026ldquo;-P-6H+3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;+6 hours and -3 minutes\u0026rdquo;  increase increase(Duration): Calculates the increase in the time range.\nrate rate(Duration): Calculates the per-second average rate of increase in the time range.\nirate irate(): Calculates the per-second instant rate of increase in the time range.\ntag tag({allTags -\u0026gt; }): Updates tags of samples. User can add, drop, rename and update tags.\nhistogram histogram(le: '\u0026lt;the tag name of le\u0026gt;'): Transforms less-based histogram buckets to meter system histogram buckets. le parameter represents the tag name of the bucket.\nhistogram_percentile histogram_percentile([\u0026lt;p scalar\u0026gt;]): Represents the meter-system to calculate the p-percentile (0 ≤ p ≤ 100) from the buckets.\ntime time(): Returns the number of seconds since January 1, 1970 UTC.\nforeach forEach([string_array], Closure\u0026lt;Void\u0026gt; each): Iterates all samples according to the first array argument, and provide two parameters in the second closure argument:\n element: element in the array. tags: tags in each sample.  Down Sampling Operation MAL should instruct meter-system on how to downsample for metrics. It doesn\u0026rsquo;t only refer to aggregate raw samples to minute level, but also expresses data from minute in higher levels, such as hour and day.\nDown sampling function is called downsampling in MAL, and it accepts the following types:\n AVG SUM LATEST SUM_PER_MIN MIN (TODO) MAX (TODO) MEAN (TODO) COUNT (TODO)  The default type is AVG.\nIf users want to get the latest time from last_server_state_sync_time_in_seconds:\nlast_server_state_sync_time_in_seconds.tagEqual('production', 'catalog').downsampling(LATEST) Metric level function They extract level relevant labels from metric labels, then informs the meter-system the level and layer to which this metric belongs.\n service([svc_label1, svc_label2...], Layer) extracts service level labels from the array argument, extracts layer from Layer argument. instance([svc_label1, svc_label2...], [ins_label1, ins_label2...], Layer, Closure\u0026lt;Map\u0026lt;String, String\u0026gt;\u0026gt; propertiesExtractor) extracts service level labels from the first array argument, extracts instance level labels from the second array argument, extracts layer from Layer argument, propertiesExtractor is an optional closure that extracts instance properties from tags, e.g. { tags -\u0026gt; ['pod': tags.pod, 'namespace': tags.namespace] }. endpoint([svc_label1, svc_label2...], [ep_label1, ep_label2...]) extracts service level labels from the first array argument, extracts endpoint level labels from the second array argument, extracts layer from Layer argument. process([svc_label1, svc_label2...], [ins_label1, ins_label2...], [ps_label1, ps_label2...], layer_lable) extracts service level labels from the first array argument, extracts instance level labels from the second array argument, extracts process level labels from the third array argument, extracts layer label from fourse argument. serviceRelation(DetectPoint, [source_svc_label1...], [dest_svc_label1...], Layer) DetectPoint including DetectPoint.CLIENT and DetectPoint.SERVER, extracts sourceService labels from the first array argument, extracts destService labels from the second array argument, extracts layer from Layer argument. processRelation(detect_point_label, [service_label1...], [instance_label1...], source_process_id_label, dest_process_id_label, component_label) extracts DetectPoint labels from first argument, the label value should be client or server. extracts Service labels from the first array argument, extracts Instance labels from the second array argument, extracts ProcessID labels from the fourth and fifth arguments of the source and destination.  Configuration file The OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP fails to start up. The files are located at $CLASSPATH/otel-rules, $CLASSPATH/meter-analyzer-config, $CLASSPATH/envoy-metrics-rules and $CLASSPATH/zabbix-rules.\nThe file is written in YAML format, defined by the scheme described below. Brackets indicate that a parameter is optional.\nA full example can be found here\nGeneric placeholders are defined as follows:\n \u0026lt;string\u0026gt;: A regular string. \u0026lt;closure\u0026gt;: A closure with custom logic.  # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# filter the metrics, only those metrics that satisfy this condition will be passed into the `metricsRules` below.filter: \u0026lt;closure\u0026gt; # example:\u0026#39;{ tags -\u0026gt; tags.job_name == \u0026#34;vm-monitoring\u0026#34; }\u0026#39;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# Metrics rule allow you to recompute queries.metricsRules:[- \u0026lt;metric_rules\u0026gt; ]\u0026lt;metric_rules\u0026gt; # The name of rule, which combinates with a prefix \u0026#39;meter_\u0026#39; as the index/table name in storage.name:\u0026lt;string\u0026gt;# MAL expression.exp:\u0026lt;string\u0026gt;More Examples Please refer to OAP Self-Observability.\n","excerpt":"Meter Analysis Language The meter system provides a functional analysis language called MAL (Meter …","ref":"/docs/main/v9.6.0/en/concepts-and-designs/mal/","title":"Meter Analysis Language"},{"body":"Meter Analysis Language The meter system provides a functional analysis language called MAL (Meter Analysis Language) that lets users analyze and aggregate meter data in the OAP streaming system. The result of an expression can either be ingested by the agent analyzer, or the OpenTelemetry/Prometheus analyzer.\nLanguage data type In MAL, an expression or sub-expression can evaluate to one of the following two types:\n Sample family: A set of samples (metrics) containing a range of metrics whose names are identical. Scalar: A simple numeric value that supports integer/long and floating/double.  Sample family A set of samples, which acts as the basic unit in MAL. For example:\ninstance_trace_count The sample family above may contain the following samples which are provided by external modules, such as the agent analyzer:\ninstance_trace_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 100 instance_trace_count{region=\u0026quot;us-east\u0026quot;,az=\u0026quot;az-3\u0026quot;} 20 instance_trace_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 33 Tag filter MAL supports four type operations to filter samples in a sample family by tag:\n tagEqual: Filter tags exactly equal to the string provided. tagNotEqual: Filter tags not equal to the string provided. tagMatch: Filter tags that regex-match the string provided. tagNotMatch: Filter labels that do not regex-match the string provided.  For example, this filters all instance_trace_count samples for us-west and asia-north region and az-1 az:\ninstance_trace_count.tagMatch(\u0026quot;region\u0026quot;, \u0026quot;us-west|asia-north\u0026quot;).tagEqual(\u0026quot;az\u0026quot;, \u0026quot;az-1\u0026quot;) Value filter MAL supports six type operations to filter samples in a sample family by value:\n valueEqual: Filter values exactly equal to the value provided. valueNotEqual: Filter values equal to the value provided. valueGreater: Filter values greater than the value provided. valueGreaterEqual: Filter values greater than or equal to the value provided. valueLess: Filter values less than the value provided. valueLessEqual: Filter values less than or equal to the value provided.  For example, this filters all instance_trace_count samples for values \u0026gt;= 33:\ninstance_trace_count.valueGreaterEqual(33) Tag manipulator MAL allows tag manipulators to change (i.e. add/delete/update) tags and their values.\nK8s MAL supports using the metadata of K8s to manipulate the tags and their values. This feature requires authorizing the OAP Server to access K8s\u0026rsquo;s API Server.\nretagByK8sMeta retagByK8sMeta(newLabelName, K8sRetagType, existingLabelName, namespaceLabelName). Add a new tag to the sample family based on the value of an existing label. Provide several internal converting types, including\n K8sRetagType.Pod2Service  Add a tag to the sample using service as the key, $serviceName.$namespace as the value, and according to the given value of the tag key, which represents the name of a pod.\nFor example:\ncontainer_cpu_usage_seconds_total{namespace=default, container=my-nginx, cpu=total, pod=my-nginx-5dc4865748-mbczh} 2 Expression:\ncontainer_cpu_usage_seconds_total.retagByK8sMeta('service' , K8sRetagType.Pod2Service , 'pod' , 'namespace') Output:\ncontainer_cpu_usage_seconds_total{namespace=default, container=my-nginx, cpu=total, pod=my-nginx-5dc4865748-mbczh, service='nginx-service.default'} 2 Binary operators The following binary arithmetic operators are available in MAL:\n + (addition) - (subtraction) * (multiplication) / (division)  Binary operators are defined between scalar/scalar, sampleFamily/scalar and sampleFamily/sampleFamily value pairs.\nBetween two scalars: they evaluate to another scalar that is the result of the operator being applied to both scalar operands:\n1 + 2 Between a sample family and a scalar, the operator is applied to the value of every sample in the sample family. For example:\ninstance_trace_count + 2 or\n2 + instance_trace_count results in\ninstance_trace_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 102 // 100 + 2 instance_trace_count{region=\u0026quot;us-east\u0026quot;,az=\u0026quot;az-3\u0026quot;} 22 // 20 + 2 instance_trace_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 35 // 33 + 2 Between two sample families, a binary operator is applied to each sample in the sample family on the left and its matching sample in the sample family on the right. A new sample family with empty name will be generated. Only the matched tags will be reserved. Samples with no matching samples in the sample family on the right will not be found in the result.\nAnother sample family instance_trace_analysis_error_count is\ninstance_trace_analysis_error_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 20 instance_trace_analysis_error_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 11 Example expression:\ninstance_trace_analysis_error_count / instance_trace_count This returns a resulting sample family containing the error rate of trace analysis. Samples with region us-west and az az-3 have no match and will not show up in the result:\n{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 0.2 // 20 / 100 {region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 0.3333 // 11 / 33 Aggregation Operation Sample family supports the following aggregation operations that can be used to aggregate the samples of a single sample family, resulting in a new sample family having fewer samples (sometimes having just a single sample) with aggregated values:\n sum (calculate sum over dimensions) min (select minimum over dimensions) max (select maximum over dimensions) avg (calculate the average over dimensions)  These operations can be used to aggregate overall label dimensions or preserve distinct dimensions by inputting by parameter( the keyword by could be omitted)\n\u0026lt;aggr-op\u0026gt;(by=[\u0026lt;tag1\u0026gt;, \u0026lt;tag2\u0026gt;, ...]) Example expression:\ninstance_trace_count.sum(by=['az']) will output the following result:\ninstance_trace_count{az=\u0026quot;az-1\u0026quot;} 133 // 100 + 33 instance_trace_count{az=\u0026quot;az-3\u0026quot;} 20 Function Duration is a textual representation of a time range. The formats accepted are based on the ISO-8601 duration format {@code PnDTnHnMn.nS} where a day is regarded as exactly 24 hours.\nExamples:\n \u0026ldquo;PT20.345S\u0026rdquo; \u0026ndash; parses as \u0026ldquo;20.345 seconds\u0026rdquo; \u0026ldquo;PT15M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;15 minutes\u0026rdquo; (where a minute is 60 seconds) \u0026ldquo;PT10H\u0026rdquo; \u0026ndash; parses as \u0026ldquo;10 hours\u0026rdquo; (where an hour is 3600 seconds) \u0026ldquo;P2D\u0026rdquo; \u0026ndash; parses as \u0026ldquo;2 days\u0026rdquo; (where a day is 24 hours or 86400 seconds) \u0026ldquo;P2DT3H4M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;2 days, 3 hours and 4 minutes\u0026rdquo; \u0026ldquo;P-6H3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;-6 hours and +3 minutes\u0026rdquo; \u0026ldquo;-P6H3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;-6 hours and -3 minutes\u0026rdquo; \u0026ldquo;-P-6H+3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;+6 hours and -3 minutes\u0026rdquo;  increase increase(Duration): Calculates the increase in the time range.\nrate rate(Duration): Calculates the per-second average rate of increase in the time range.\nirate irate(): Calculates the per-second instant rate of increase in the time range.\ntag tag({allTags -\u0026gt; }): Updates tags of samples. User can add, drop, rename and update tags.\nhistogram histogram(le: '\u0026lt;the tag name of le\u0026gt;'): Transforms less-based histogram buckets to meter system histogram buckets. le parameter represents the tag name of the bucket.\nhistogram_percentile histogram_percentile([\u0026lt;p scalar\u0026gt;]): Represents the meter-system to calculate the p-percentile (0 ≤ p ≤ 100) from the buckets.\ntime time(): Returns the number of seconds since January 1, 1970 UTC.\nforeach forEach([string_array], Closure\u0026lt;Void\u0026gt; each): Iterates all samples according to the first array argument, and provide two parameters in the second closure argument:\n element: element in the array. tags: tags in each sample.  Down Sampling Operation MAL should instruct meter-system on how to downsample for metrics. It doesn\u0026rsquo;t only refer to aggregate raw samples to minute level, but also expresses data from minute in higher levels, such as hour and day.\nDown sampling function is called downsampling in MAL, and it accepts the following types:\n AVG SUM LATEST SUM_PER_MIN MIN (TODO) MAX (TODO) MEAN (TODO) COUNT (TODO)  The default type is AVG.\nIf users want to get the latest time from last_server_state_sync_time_in_seconds:\nlast_server_state_sync_time_in_seconds.tagEqual('production', 'catalog').downsampling(LATEST) Metric level function They extract level relevant labels from metric labels, then informs the meter-system the level and layer to which this metric belongs.\n service([svc_label1, svc_label2...], Layer) extracts service level labels from the array argument, extracts layer from Layer argument. instance([svc_label1, svc_label2...], [ins_label1, ins_label2...], Layer, Closure\u0026lt;Map\u0026lt;String, String\u0026gt;\u0026gt; propertiesExtractor) extracts service level labels from the first array argument, extracts instance level labels from the second array argument, extracts layer from Layer argument, propertiesExtractor is an optional closure that extracts instance properties from tags, e.g. { tags -\u0026gt; ['pod': tags.pod, 'namespace': tags.namespace] }. endpoint([svc_label1, svc_label2...], [ep_label1, ep_label2...]) extracts service level labels from the first array argument, extracts endpoint level labels from the second array argument, extracts layer from Layer argument. process([svc_label1, svc_label2...], [ins_label1, ins_label2...], [ps_label1, ps_label2...], layer_lable) extracts service level labels from the first array argument, extracts instance level labels from the second array argument, extracts process level labels from the third array argument, extracts layer label from fourse argument. serviceRelation(DetectPoint, [source_svc_label1...], [dest_svc_label1...], Layer) DetectPoint including DetectPoint.CLIENT and DetectPoint.SERVER, extracts sourceService labels from the first array argument, extracts destService labels from the second array argument, extracts layer from Layer argument. processRelation(detect_point_label, [service_label1...], [instance_label1...], source_process_id_label, dest_process_id_label, component_label) extracts DetectPoint labels from first argument, the label value should be client or server. extracts Service labels from the first array argument, extracts Instance labels from the second array argument, extracts ProcessID labels from the fourth and fifth arguments of the source and destination.  Configuration file The OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP fails to start up. The files are located at $CLASSPATH/otel-rules, $CLASSPATH/meter-analyzer-config, $CLASSPATH/envoy-metrics-rules and $CLASSPATH/zabbix-rules.\nThe file is written in YAML format, defined by the scheme described below. Brackets indicate that a parameter is optional.\nA full example can be found here\nGeneric placeholders are defined as follows:\n \u0026lt;string\u0026gt;: A regular string. \u0026lt;closure\u0026gt;: A closure with custom logic.  # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# filter the metrics, only those metrics that satisfy this condition will be passed into the `metricsRules` below.filter: \u0026lt;closure\u0026gt; # example:\u0026#39;{ tags -\u0026gt; tags.job_name == \u0026#34;vm-monitoring\u0026#34; }\u0026#39;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# Metrics rule allow you to recompute queries.metricsRules:[- \u0026lt;metric_rules\u0026gt; ]\u0026lt;metric_rules\u0026gt; # The name of rule, which combinates with a prefix \u0026#39;meter_\u0026#39; as the index/table name in storage.name:\u0026lt;string\u0026gt;# MAL expression.exp:\u0026lt;string\u0026gt;More Examples Please refer to OAP Self-Observability.\n","excerpt":"Meter Analysis Language The meter system provides a functional analysis language called MAL (Meter …","ref":"/docs/main/v9.7.0/en/concepts-and-designs/mal/","title":"Meter Analysis Language"},{"body":"Meter APIs SkyWalking has a native metrics format, and supports widely used metric formats, such as Prometheus, OpenTelemetry, and Zabbix.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.language.agent.v3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/language/agent/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;service MeterReportService { // Meter data is reported in a certain period. The agent/SDK should report all collected metrics in this period through one stream.  // The whole stream is an input data set, client should onComplete the stream per report period.  rpc collect (stream MeterData) returns (Commands) { } // Reporting meter data in bulk mode as MeterDataCollection.  // By using this, each one in the stream would be treated as a complete input for MAL engine,  // comparing to `collect (stream MeterData)`, which is using one stream as an input data set.  rpc collectBatch (stream MeterDataCollection) returns (Commands) { }}// Label of the meter message Label { string name = 1; string value = 2;}// The histogram element definition. It includes the bucket lower boundary and the count in the bucket. message MeterBucketValue { // The value represents the min value of the bucket,  // the upper boundary is determined by next MeterBucketValue$bucket,  // if it doesn\u0026#39;t exist, the upper boundary is positive infinity.  double bucket = 1; int64 count = 2; // If is negative infinity, the value of the bucket is invalid  bool isNegativeInfinity = 3;}// Meter single value message MeterSingleValue { // Meter name  string name = 1; // Labels  repeated Label labels = 2; // Single value  double value = 3;}// Histogram message MeterHistogram { // Meter name  string name = 1; // Labels  repeated Label labels = 2; // Customize the buckets  repeated MeterBucketValue values = 3;}// Single meter data, if the same metrics have a different label, they will separate. message MeterData { // Meter data could be a single value or histogram.  oneof metric { MeterSingleValue singleValue = 1; MeterHistogram histogram = 2; } // Service name, be set value in the first element in the stream-call.  string service = 3; // Service instance name, be set value in the first element in the stream-call.  string serviceInstance = 4; // Meter data report time, be set value in the first element in the stream-call.  int64 timestamp = 5;}message MeterDataCollection { repeated MeterData meterData = 1;}OpenTelemetry collector, Telegraf agents, Zabbix agents could use their native protocol(e.g. OTLP) and OAP server would convert metrics into native format and forward them to Meter Analysis Language engine.\nTo learn more about receiving 3rd party formats metrics, see\n Meter receiver OpenTelemetry receiver. Zabbix receiver  ","excerpt":"Meter APIs SkyWalking has a native metrics format, and supports widely used metric formats, such as …","ref":"/docs/main/latest/en/api/meter/","title":"Meter APIs"},{"body":"Meter APIs SkyWalking has a native metrics format, and supports widely used metric formats, such as Prometheus, OpenTelemetry, and Zabbix.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.language.agent.v3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/language/agent/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;service MeterReportService { // Meter data is reported in a certain period. The agent/SDK should report all collected metrics in this period through one stream.  // The whole stream is an input data set, client should onComplete the stream per report period.  rpc collect (stream MeterData) returns (Commands) { } // Reporting meter data in bulk mode as MeterDataCollection.  // By using this, each one in the stream would be treated as a complete input for MAL engine,  // comparing to `collect (stream MeterData)`, which is using one stream as an input data set.  rpc collectBatch (stream MeterDataCollection) returns (Commands) { }}// Label of the meter message Label { string name = 1; string value = 2;}// The histogram element definition. It includes the bucket lower boundary and the count in the bucket. message MeterBucketValue { // The value represents the min value of the bucket,  // the upper boundary is determined by next MeterBucketValue$bucket,  // if it doesn\u0026#39;t exist, the upper boundary is positive infinity.  double bucket = 1; int64 count = 2; // If is negative infinity, the value of the bucket is invalid  bool isNegativeInfinity = 3;}// Meter single value message MeterSingleValue { // Meter name  string name = 1; // Labels  repeated Label labels = 2; // Single value  double value = 3;}// Histogram message MeterHistogram { // Meter name  string name = 1; // Labels  repeated Label labels = 2; // Customize the buckets  repeated MeterBucketValue values = 3;}// Single meter data, if the same metrics have a different label, they will separate. message MeterData { // Meter data could be a single value or histogram.  oneof metric { MeterSingleValue singleValue = 1; MeterHistogram histogram = 2; } // Service name, be set value in the first element in the stream-call.  string service = 3; // Service instance name, be set value in the first element in the stream-call.  string serviceInstance = 4; // Meter data report time, be set value in the first element in the stream-call.  int64 timestamp = 5;}message MeterDataCollection { repeated MeterData meterData = 1;}OpenTelemetry collector, Telegraf agents, Zabbix agents could use their native protocol(e.g. OTLP) and OAP server would convert metrics into native format and forward them to Meter Analysis Language engine.\nTo learn more about receiving 3rd party formats metrics, see\n Meter receiver OpenTelemetry receiver. Zabbix receiver  ","excerpt":"Meter APIs SkyWalking has a native metrics format, and supports widely used metric formats, such as …","ref":"/docs/main/next/en/api/meter/","title":"Meter APIs"},{"body":"Meter APIs SkyWalking has a native metrics format, and supports widely used metric formats, such as Prometheus, OpenCensus, OpenTelemetry, and Zabbix.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.language.agent.v3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/language/agent/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;service MeterReportService { // Meter data is reported in a certain period. The agent/SDK should report all collected metrics in this period through one stream.  // The whole stream is an input data set, client should onComplete the stream per report period.  rpc collect (stream MeterData) returns (Commands) { } // Reporting meter data in bulk mode as MeterDataCollection.  // By using this, each one in the stream would be treated as a complete input for MAL engine,  // comparing to `collect (stream MeterData)`, which is using one stream as an input data set.  rpc collectBatch (stream MeterDataCollection) returns (Commands) { }}// Label of the meter message Label { string name = 1; string value = 2;}// The histogram element definition. It includes the bucket lower boundary and the count in the bucket. message MeterBucketValue { // The value represents the min value of the bucket,  // the upper boundary is determined by next MeterBucketValue$bucket,  // if it doesn\u0026#39;t exist, the upper boundary is positive infinity.  double bucket = 1; int64 count = 2; // If is negative infinity, the value of the bucket is invalid  bool isNegativeInfinity = 3;}// Meter single value message MeterSingleValue { // Meter name  string name = 1; // Labels  repeated Label labels = 2; // Single value  double value = 3;}// Histogram message MeterHistogram { // Meter name  string name = 1; // Labels  repeated Label labels = 2; // Customize the buckets  repeated MeterBucketValue values = 3;}// Single meter data, if the same metrics have a different label, they will separate. message MeterData { // Meter data could be a single value or histogram.  oneof metric { MeterSingleValue singleValue = 1; MeterHistogram histogram = 2; } // Service name, be set value in the first element in the stream-call.  string service = 3; // Service instance name, be set value in the first element in the stream-call.  string serviceInstance = 4; // Meter data report time, be set value in the first element in the stream-call.  int64 timestamp = 5;}message MeterDataCollection { repeated MeterData meterData = 1;}OpenTelemetry collector, Telegraf agents, Zabbix agents could use their native protocol(e.g. OTLP) and OAP server would convert metrics into native format and forward them to Meter Analysis Language engine.\nTo learn more about receiving 3rd party formats metrics, see\n Meter receiver OpenTelemetry receiver. Zabbix receiver  ","excerpt":"Meter APIs SkyWalking has a native metrics format, and supports widely used metric formats, such as …","ref":"/docs/main/v9.4.0/en/api/meter/","title":"Meter APIs"},{"body":"Meter APIs SkyWalking has a native metrics format, and supports widely used metric formats, such as Prometheus, OpenTelemetry, and Zabbix.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.language.agent.v3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/language/agent/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;service MeterReportService { // Meter data is reported in a certain period. The agent/SDK should report all collected metrics in this period through one stream.  // The whole stream is an input data set, client should onComplete the stream per report period.  rpc collect (stream MeterData) returns (Commands) { } // Reporting meter data in bulk mode as MeterDataCollection.  // By using this, each one in the stream would be treated as a complete input for MAL engine,  // comparing to `collect (stream MeterData)`, which is using one stream as an input data set.  rpc collectBatch (stream MeterDataCollection) returns (Commands) { }}// Label of the meter message Label { string name = 1; string value = 2;}// The histogram element definition. It includes the bucket lower boundary and the count in the bucket. message MeterBucketValue { // The value represents the min value of the bucket,  // the upper boundary is determined by next MeterBucketValue$bucket,  // if it doesn\u0026#39;t exist, the upper boundary is positive infinity.  double bucket = 1; int64 count = 2; // If is negative infinity, the value of the bucket is invalid  bool isNegativeInfinity = 3;}// Meter single value message MeterSingleValue { // Meter name  string name = 1; // Labels  repeated Label labels = 2; // Single value  double value = 3;}// Histogram message MeterHistogram { // Meter name  string name = 1; // Labels  repeated Label labels = 2; // Customize the buckets  repeated MeterBucketValue values = 3;}// Single meter data, if the same metrics have a different label, they will separate. message MeterData { // Meter data could be a single value or histogram.  oneof metric { MeterSingleValue singleValue = 1; MeterHistogram histogram = 2; } // Service name, be set value in the first element in the stream-call.  string service = 3; // Service instance name, be set value in the first element in the stream-call.  string serviceInstance = 4; // Meter data report time, be set value in the first element in the stream-call.  int64 timestamp = 5;}message MeterDataCollection { repeated MeterData meterData = 1;}OpenTelemetry collector, Telegraf agents, Zabbix agents could use their native protocol(e.g. OTLP) and OAP server would convert metrics into native format and forward them to Meter Analysis Language engine.\nTo learn more about receiving 3rd party formats metrics, see\n Meter receiver OpenTelemetry receiver. Zabbix receiver  ","excerpt":"Meter APIs SkyWalking has a native metrics format, and supports widely used metric formats, such as …","ref":"/docs/main/v9.5.0/en/api/meter/","title":"Meter APIs"},{"body":"Meter APIs SkyWalking has a native metrics format, and supports widely used metric formats, such as Prometheus, OpenTelemetry, and Zabbix.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.language.agent.v3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/language/agent/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;service MeterReportService { // Meter data is reported in a certain period. The agent/SDK should report all collected metrics in this period through one stream.  // The whole stream is an input data set, client should onComplete the stream per report period.  rpc collect (stream MeterData) returns (Commands) { } // Reporting meter data in bulk mode as MeterDataCollection.  // By using this, each one in the stream would be treated as a complete input for MAL engine,  // comparing to `collect (stream MeterData)`, which is using one stream as an input data set.  rpc collectBatch (stream MeterDataCollection) returns (Commands) { }}// Label of the meter message Label { string name = 1; string value = 2;}// The histogram element definition. It includes the bucket lower boundary and the count in the bucket. message MeterBucketValue { // The value represents the min value of the bucket,  // the upper boundary is determined by next MeterBucketValue$bucket,  // if it doesn\u0026#39;t exist, the upper boundary is positive infinity.  double bucket = 1; int64 count = 2; // If is negative infinity, the value of the bucket is invalid  bool isNegativeInfinity = 3;}// Meter single value message MeterSingleValue { // Meter name  string name = 1; // Labels  repeated Label labels = 2; // Single value  double value = 3;}// Histogram message MeterHistogram { // Meter name  string name = 1; // Labels  repeated Label labels = 2; // Customize the buckets  repeated MeterBucketValue values = 3;}// Single meter data, if the same metrics have a different label, they will separate. message MeterData { // Meter data could be a single value or histogram.  oneof metric { MeterSingleValue singleValue = 1; MeterHistogram histogram = 2; } // Service name, be set value in the first element in the stream-call.  string service = 3; // Service instance name, be set value in the first element in the stream-call.  string serviceInstance = 4; // Meter data report time, be set value in the first element in the stream-call.  int64 timestamp = 5;}message MeterDataCollection { repeated MeterData meterData = 1;}OpenTelemetry collector, Telegraf agents, Zabbix agents could use their native protocol(e.g. OTLP) and OAP server would convert metrics into native format and forward them to Meter Analysis Language engine.\nTo learn more about receiving 3rd party formats metrics, see\n Meter receiver OpenTelemetry receiver. Zabbix receiver  ","excerpt":"Meter APIs SkyWalking has a native metrics format, and supports widely used metric formats, such as …","ref":"/docs/main/v9.6.0/en/api/meter/","title":"Meter APIs"},{"body":"Meter APIs SkyWalking has a native metrics format, and supports widely used metric formats, such as Prometheus, OpenTelemetry, and Zabbix.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.language.agent.v3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/language/agent/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;service MeterReportService { // Meter data is reported in a certain period. The agent/SDK should report all collected metrics in this period through one stream.  // The whole stream is an input data set, client should onComplete the stream per report period.  rpc collect (stream MeterData) returns (Commands) { } // Reporting meter data in bulk mode as MeterDataCollection.  // By using this, each one in the stream would be treated as a complete input for MAL engine,  // comparing to `collect (stream MeterData)`, which is using one stream as an input data set.  rpc collectBatch (stream MeterDataCollection) returns (Commands) { }}// Label of the meter message Label { string name = 1; string value = 2;}// The histogram element definition. It includes the bucket lower boundary and the count in the bucket. message MeterBucketValue { // The value represents the min value of the bucket,  // the upper boundary is determined by next MeterBucketValue$bucket,  // if it doesn\u0026#39;t exist, the upper boundary is positive infinity.  double bucket = 1; int64 count = 2; // If is negative infinity, the value of the bucket is invalid  bool isNegativeInfinity = 3;}// Meter single value message MeterSingleValue { // Meter name  string name = 1; // Labels  repeated Label labels = 2; // Single value  double value = 3;}// Histogram message MeterHistogram { // Meter name  string name = 1; // Labels  repeated Label labels = 2; // Customize the buckets  repeated MeterBucketValue values = 3;}// Single meter data, if the same metrics have a different label, they will separate. message MeterData { // Meter data could be a single value or histogram.  oneof metric { MeterSingleValue singleValue = 1; MeterHistogram histogram = 2; } // Service name, be set value in the first element in the stream-call.  string service = 3; // Service instance name, be set value in the first element in the stream-call.  string serviceInstance = 4; // Meter data report time, be set value in the first element in the stream-call.  int64 timestamp = 5;}message MeterDataCollection { repeated MeterData meterData = 1;}OpenTelemetry collector, Telegraf agents, Zabbix agents could use their native protocol(e.g. OTLP) and OAP server would convert metrics into native format and forward them to Meter Analysis Language engine.\nTo learn more about receiving 3rd party formats metrics, see\n Meter receiver OpenTelemetry receiver. Zabbix receiver  ","excerpt":"Meter APIs SkyWalking has a native metrics format, and supports widely used metric formats, such as …","ref":"/docs/main/v9.7.0/en/api/meter/","title":"Meter APIs"},{"body":"Meter receiver The meter receiver accepts the metrics of meter protocol into the meter system.\nModule definition Module definition is defined in application.yml, typically located at $SKYWALKING_BASE_DIR/config/application.yml by default.\nreceiver-meter:selector:${SW_RECEIVER_METER:default}default:In Kafka Fetcher, follow these configurations to enable it.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}Report Meter Telemetry Data Manual Meter API Custom metrics may be collected by the Manual Meter API. Custom metrics collected cannot be used directly; they should be configured in the meter-analyzer-config configuration files described in the next part.\nThe receiver adds labels with key = service and key = instance to the collected data samples, and values from service and service instance name defined in SkyWalking Agent, for identification of the metric data.\nA typical manual meter API set is Spring MicroMeter Observations APIs\nOpenTelemetry Exporter You can use OpenTelemetry Collector to transport the metrics to SkyWalking OAP. Read the doc on Skywalking Exporter for a detailed guide.\nThe following is the correspondence between the OpenTelemetry Metric Data Type and the Skywalking Data Collect Protocol:\n   OpenTelemetry Metric Data Type Skywalking Data Collect Protocol     MetricDataTypeGauge MeterSingleValue   MetricDataTypeSum MeterSingleValue   MetricDataTypeHistogram MeterHistogram and two MeterSingleValues containing $name_sum and $name_count metrics.   MetricDataTypeSummary A series of MeterSingleValue containing tag quantile and two MeterSingleValues containing $name_sum and $name_count metrics.   MetricDataTypeExponentialHistogram Not Supported    Note: $name is the original metric name.\nConfiguration file The meter receiver is configured via a configuration file. The configuration file defines everything related to receiving from agents, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP may fail to start up. The files are located at $CLASSPATH/meter-analyzer-config.\nNew meter-analyzer-config files is NOT enabled by default, you should make meter configuration take effect through section agent-analyzer in application.yml of skywalking backend.\nagent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:# ... take care of other analyzersmeterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:your-custom-meter-conf-without-ext-name}# The multiple files should be separated by \u0026#34;,\u0026#34;Meter-analyzer-config file is written in YAML format, defined by the scheme described below. Brackets indicate that a parameter is optional.\nAll available meter analysis scripts could be found here.\n   Rule Name Description Configuration File Data Source     satellite Metrics of SkyWalking Satellite self-observability(so11y) meter-analyzer-config/satellite.yaml SkyWalking Satellite \u0026ndash;meter format\u0026ndash;\u0026gt;SkyWalking OAP Server   threadpool Metrics of Thread Pool meter-analyzer-config/threadpool.yaml Thread Pool \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server   datasource Metrics of DataSource metrics meter-analyzer-config/datasource.yaml Datasource \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server   spring-micrometer Metrics of Spring Sleuth Application meter-analyzer-config/spring-micrometer.yaml Spring Sleuth Application \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server    An example can be found here. If you\u0026rsquo;re using Spring MicroMeter Observations, see Spring MicroMeter Observations APIs.\nMeters configuration # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# filter the metrics, only those metrics that satisfy this condition will be passed into the `metricsRules` below.filter: \u0026lt;closure\u0026gt; # example:\u0026#39;{ tags -\u0026gt; tags.job_name == \u0026#34;vm-monitoring\u0026#34; }\u0026#39;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# Metrics rule allow you to recompute queries.metricsRules:# The name of rule, which combinates with a prefix \u0026#39;\u0026lt;metricPrefix\u0026gt;_\u0026#39; as the index/table name in storage.# The name with prefix can also be quoted in UI (Dashboard/Template/Item/Metrics)name:\u0026lt;string\u0026gt;# MAL expression. Raw name of custom metrics collected can be used hereexp:\u0026lt;string\u0026gt;For more information on MAL, please refer to mal.md\nrate, irate, and increase Although we support the rate, irate, increase functions in the backend, we still recommend users to consider using client-side APIs to run these functions. The reasons are as follows:\n The OAP has to set up caches to calculate the values. Once the agent reconnects to another OAP instance, the time windows of rate calculation break. This leads to inaccurate results.  ","excerpt":"Meter receiver The meter receiver accepts the metrics of meter protocol into the meter system. …","ref":"/docs/main/latest/en/setup/backend/backend-meter/","title":"Meter receiver"},{"body":"Meter receiver The meter receiver accepts the metrics of meter protocol into the meter system.\nModule definition Module definition is defined in application.yml, typically located at $SKYWALKING_BASE_DIR/config/application.yml by default.\nreceiver-meter:selector:${SW_RECEIVER_METER:default}default:In Kafka Fetcher, follow these configurations to enable it.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}Report Meter Telemetry Data Manual Meter API Custom metrics may be collected by the Manual Meter API. Custom metrics collected cannot be used directly; they should be configured in the meter-analyzer-config configuration files described in the next part.\nThe receiver adds labels with key = service and key = instance to the collected data samples, and values from service and service instance name defined in SkyWalking Agent, for identification of the metric data.\nThere are following known API libs to report meter telemetry data:\n SkyWalking Java Meter toolkit APIs Spring MicroMeter Observations APIs works with OAP MicroMeter Observations setup  Agents Bundled Meters All following agents and components have built-in meters reporting to the OAP through Meter APIs.\n Go agent for Go VM metrics Python agent for PVM metrics Java agent with Spring micrometer toolkit Java agent for datasource metrics Java agent for thread-pool metrics Rover(eBPF) agent for metrics used continues profiling Satellite proxy self-observability metrics  Configuration file The meter receiver is configured via a configuration file. The configuration file defines everything related to receiving from agents, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP may fail to start up. The files are located at $CLASSPATH/meter-analyzer-config.\nNew meter-analyzer-config files is NOT enabled by default, you should make meter configuration take effect through section agent-analyzer in application.yml of skywalking backend.\nagent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:# ... take care of other analyzersmeterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:your-custom-meter-conf-without-ext-name}# The multiple files should be separated by \u0026#34;,\u0026#34;Meter-analyzer-config file is written in YAML format, defined by the scheme described below. Brackets indicate that a parameter is optional.\nMeters configuration # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# filter the metrics, only those metrics that satisfy this condition will be passed into the `metricsRules` below.filter: \u0026lt;closure\u0026gt; # example:\u0026#39;{ tags -\u0026gt; tags.job_name == \u0026#34;vm-monitoring\u0026#34; }\u0026#39;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# Metrics rule allow you to recompute queries.metricsRules:# The name of rule, which combinates with a prefix \u0026#39;\u0026lt;metricPrefix\u0026gt;_\u0026#39; as the index/table name in storage.# The name with prefix can also be quoted in UI (Dashboard/Template/Item/Metrics)name:\u0026lt;string\u0026gt;# MAL expression. Raw name of custom metrics collected can be used hereexp:\u0026lt;string\u0026gt;For more information on MAL, please refer to mal.md\nrate, irate, and increase Although we support the rate, irate, increase functions in the backend, we still recommend users to consider using client-side APIs to run these functions. The reasons are as follows:\n The OAP has to set up caches to calculate the values. Once the agent reconnects to another OAP instance, the time windows of rate calculation break. This leads to inaccurate results.  ","excerpt":"Meter receiver The meter receiver accepts the metrics of meter protocol into the meter system. …","ref":"/docs/main/next/en/setup/backend/backend-meter/","title":"Meter receiver"},{"body":"Meter receiver The meter receiver accepts the metrics of meter protocol into the meter system.\nModule definition Module definition is defined in application.yml, typically located at $SKYWALKING_BASE_DIR/config/application.yml by default.\nreceiver-meter:selector:${SW_RECEIVER_METER:default}default:In Kafka Fetcher, follow these configurations to enable it.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}Report Meter Telemetry Data Manual Meter API Custom metrics may be collected by Manual Meter API. Custom metrics collected cannot be used directly, they should be configured in meter-analyzer-config configuration files, which is described in next part.\nThe receiver adds labels with key = service and key = instance to the collected data samples, and values from service and service instance name defined in SkyWalking Agent, for identification of the metric data.\nA typical manual meter API set is Spring Sleuth APIs\nOpenTelemetry Exporter You can use OpenTelemetry Collector to transport the metrics to SkyWalking OAP. Read the doc on Skywalking Exporter for a detailed guide.\nThe following is the correspondence between the OpenTelemetry Metric Data Type and the Skywalking Data Collect Protocol:\n   OpenTelemetry Metric Data Type Skywalking Data Collect Protocol     MetricDataTypeGauge MeterSingleValue   MetricDataTypeSum MeterSingleValue   MetricDataTypeHistogram MeterHistogram and two MeterSingleValues containing $name_sum and $name_count metrics.   MetricDataTypeSummary A series of MeterSingleValue containing tag quantile and two MeterSingleValues containing $name_sum and $name_count metrics.   MetricDataTypeExponentialHistogram Not Supported    Note: $name is the original metric name.\nConfiguration file The meter receiver is configured via a configuration file. The configuration file defines everything related to receiving from agents, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP may fail to start up. The files are located at $CLASSPATH/meter-analyzer-config.\nNew meter-analyzer-config files is NOT enabled by default, you should make meter configuration take effect through section agent-analyzer in application.yml of skywalking backend.\nagent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:# ... take care of other analyzersmeterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:your-custom-meter-conf-without-ext-name}# The multiple files should be separated by \u0026#34;,\u0026#34;Meter-analyzer-config file is written in YAML format, defined by the scheme described below. Brackets indicate that a parameter is optional.\nAll available meter analysis scripts could be found here.\n   Rule Name Description Configuration File Data Source     satellite Metrics of SkyWalking Satellite self-observability(so11y) meter-analyzer-config/satellite.yaml SkyWalking Satellite \u0026ndash;meter format\u0026ndash;\u0026gt;SkyWalking OAP Server   threadpool Metrics of Thread Pool meter-analyzer-config/threadpool.yaml Thread Pool \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server   datasource Metrics of DataSource metrics meter-analyzer-config/datasource.yaml Datasource \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server   spring-sleuth Metrics of Spring Sleuth Application meter-analyzer-config/spring-sleuth.yaml Sprign Sleuth Application \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server    An example can be found here. If you\u0026rsquo;re using Spring Sleuth, see Spring Sleuth Setup.\nMeters configuration # filter the metrics, only those metrics that satisfy this condition will be passed into the `metricsRules` below.filter: \u0026lt;closure\u0026gt; # example:\u0026#39;{ tags -\u0026gt; tags.job_name == \u0026#34;vm-monitoring\u0026#34; }\u0026#39;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# Metrics rule allow you to recompute queries.metricsRules:# The name of rule, which combinates with a prefix \u0026#39;\u0026lt;metricPrefix\u0026gt;_\u0026#39; as the index/table name in storage.# The name with prefix can also be quoted in UI (Dashboard/Template/Item/Metrics)name:\u0026lt;string\u0026gt;# MAL expression. Raw name of custom metrics collected can be used hereexp:\u0026lt;string\u0026gt;For more information on MAL, please refer to mal.md\nrate, irate, and increase Although we support the rate, irate, increase functions in the backend, we still recommend users to consider using client-side APIs to run these functions. The reasons are as follows:\n The OAP has to set up caches to calculate the values. Once the agent reconnects to another OAP instance, the time windows of rate calculation break. This leads to inaccurate results.  ","excerpt":"Meter receiver The meter receiver accepts the metrics of meter protocol into the meter system. …","ref":"/docs/main/v9.0.0/en/setup/backend/backend-meter/","title":"Meter receiver"},{"body":"Meter receiver The meter receiver accepts the metrics of meter protocol into the meter system.\nModule definition Module definition is defined in application.yml, typically located at $SKYWALKING_BASE_DIR/config/application.yml by default.\nreceiver-meter:selector:${SW_RECEIVER_METER:default}default:In Kafka Fetcher, follow these configurations to enable it.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}Report Meter Telemetry Data Manual Meter API Custom metrics may be collected by the Manual Meter API. Custom metrics collected cannot be used directly; they should be configured in the meter-analyzer-config configuration files described in the next part.\nThe receiver adds labels with key = service and key = instance to the collected data samples, and values from service and service instance name defined in SkyWalking Agent, for identification of the metric data.\nA typical manual meter API set is Spring Sleuth APIs\nOpenTelemetry Exporter You can use OpenTelemetry Collector to transport the metrics to SkyWalking OAP. Read the doc on Skywalking Exporter for a detailed guide.\nThe following is the correspondence between the OpenTelemetry Metric Data Type and the Skywalking Data Collect Protocol:\n   OpenTelemetry Metric Data Type Skywalking Data Collect Protocol     MetricDataTypeGauge MeterSingleValue   MetricDataTypeSum MeterSingleValue   MetricDataTypeHistogram MeterHistogram and two MeterSingleValues containing $name_sum and $name_count metrics.   MetricDataTypeSummary A series of MeterSingleValue containing tag quantile and two MeterSingleValues containing $name_sum and $name_count metrics.   MetricDataTypeExponentialHistogram Not Supported    Note: $name is the original metric name.\nConfiguration file The meter receiver is configured via a configuration file. The configuration file defines everything related to receiving from agents, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP may fail to start up. The files are located at $CLASSPATH/meter-analyzer-config.\nNew meter-analyzer-config files is NOT enabled by default, you should make meter configuration take effect through section agent-analyzer in application.yml of skywalking backend.\nagent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:# ... take care of other analyzersmeterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:your-custom-meter-conf-without-ext-name}# The multiple files should be separated by \u0026#34;,\u0026#34;Meter-analyzer-config file is written in YAML format, defined by the scheme described below. Brackets indicate that a parameter is optional.\nAll available meter analysis scripts could be found here.\n   Rule Name Description Configuration File Data Source     satellite Metrics of SkyWalking Satellite self-observability(so11y) meter-analyzer-config/satellite.yaml SkyWalking Satellite \u0026ndash;meter format\u0026ndash;\u0026gt;SkyWalking OAP Server   threadpool Metrics of Thread Pool meter-analyzer-config/threadpool.yaml Thread Pool \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server   datasource Metrics of DataSource metrics meter-analyzer-config/datasource.yaml Datasource \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server   spring-sleuth Metrics of Spring Sleuth Application meter-analyzer-config/spring-sleuth.yaml Sprign Sleuth Application \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server    An example can be found here. If you\u0026rsquo;re using Spring Sleuth, see Spring Sleuth Setup.\nMeters configuration # filter the metrics, only those metrics that satisfy this condition will be passed into the `metricsRules` below.filter: \u0026lt;closure\u0026gt; # example:\u0026#39;{ tags -\u0026gt; tags.job_name == \u0026#34;vm-monitoring\u0026#34; }\u0026#39;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# Metrics rule allow you to recompute queries.metricsRules:# The name of rule, which combinates with a prefix \u0026#39;\u0026lt;metricPrefix\u0026gt;_\u0026#39; as the index/table name in storage.# The name with prefix can also be quoted in UI (Dashboard/Template/Item/Metrics)name:\u0026lt;string\u0026gt;# MAL expression. Raw name of custom metrics collected can be used hereexp:\u0026lt;string\u0026gt;For more information on MAL, please refer to mal.md\nrate, irate, and increase Although we support the rate, irate, increase functions in the backend, we still recommend users to consider using client-side APIs to run these functions. The reasons are as follows:\n The OAP has to set up caches to calculate the values. Once the agent reconnects to another OAP instance, the time windows of rate calculation break. This leads to inaccurate results.  ","excerpt":"Meter receiver The meter receiver accepts the metrics of meter protocol into the meter system. …","ref":"/docs/main/v9.1.0/en/setup/backend/backend-meter/","title":"Meter receiver"},{"body":"Meter receiver The meter receiver accepts the metrics of meter protocol into the meter system.\nModule definition Module definition is defined in application.yml, typically located at $SKYWALKING_BASE_DIR/config/application.yml by default.\nreceiver-meter:selector:${SW_RECEIVER_METER:default}default:In Kafka Fetcher, follow these configurations to enable it.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}Report Meter Telemetry Data Manual Meter API Custom metrics may be collected by the Manual Meter API. Custom metrics collected cannot be used directly; they should be configured in the meter-analyzer-config configuration files described in the next part.\nThe receiver adds labels with key = service and key = instance to the collected data samples, and values from service and service instance name defined in SkyWalking Agent, for identification of the metric data.\nA typical manual meter API set is Spring Sleuth APIs\nOpenTelemetry Exporter You can use OpenTelemetry Collector to transport the metrics to SkyWalking OAP. Read the doc on Skywalking Exporter for a detailed guide.\nThe following is the correspondence between the OpenTelemetry Metric Data Type and the Skywalking Data Collect Protocol:\n   OpenTelemetry Metric Data Type Skywalking Data Collect Protocol     MetricDataTypeGauge MeterSingleValue   MetricDataTypeSum MeterSingleValue   MetricDataTypeHistogram MeterHistogram and two MeterSingleValues containing $name_sum and $name_count metrics.   MetricDataTypeSummary A series of MeterSingleValue containing tag quantile and two MeterSingleValues containing $name_sum and $name_count metrics.   MetricDataTypeExponentialHistogram Not Supported    Note: $name is the original metric name.\nConfiguration file The meter receiver is configured via a configuration file. The configuration file defines everything related to receiving from agents, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP may fail to start up. The files are located at $CLASSPATH/meter-analyzer-config.\nNew meter-analyzer-config files is NOT enabled by default, you should make meter configuration take effect through section agent-analyzer in application.yml of skywalking backend.\nagent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:# ... take care of other analyzersmeterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:your-custom-meter-conf-without-ext-name}# The multiple files should be separated by \u0026#34;,\u0026#34;Meter-analyzer-config file is written in YAML format, defined by the scheme described below. Brackets indicate that a parameter is optional.\nAll available meter analysis scripts could be found here.\n   Rule Name Description Configuration File Data Source     satellite Metrics of SkyWalking Satellite self-observability(so11y) meter-analyzer-config/satellite.yaml SkyWalking Satellite \u0026ndash;meter format\u0026ndash;\u0026gt;SkyWalking OAP Server   threadpool Metrics of Thread Pool meter-analyzer-config/threadpool.yaml Thread Pool \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server   datasource Metrics of DataSource metrics meter-analyzer-config/datasource.yaml Datasource \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server   spring-sleuth Metrics of Spring Sleuth Application meter-analyzer-config/spring-sleuth.yaml Sprign Sleuth Application \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server    An example can be found here. If you\u0026rsquo;re using Spring Sleuth, see Spring Sleuth Setup.\nMeters configuration # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# filter the metrics, only those metrics that satisfy this condition will be passed into the `metricsRules` below.filter: \u0026lt;closure\u0026gt; # example:\u0026#39;{ tags -\u0026gt; tags.job_name == \u0026#34;vm-monitoring\u0026#34; }\u0026#39;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# Metrics rule allow you to recompute queries.metricsRules:# The name of rule, which combinates with a prefix \u0026#39;\u0026lt;metricPrefix\u0026gt;_\u0026#39; as the index/table name in storage.# The name with prefix can also be quoted in UI (Dashboard/Template/Item/Metrics)name:\u0026lt;string\u0026gt;# MAL expression. Raw name of custom metrics collected can be used hereexp:\u0026lt;string\u0026gt;For more information on MAL, please refer to mal.md\nrate, irate, and increase Although we support the rate, irate, increase functions in the backend, we still recommend users to consider using client-side APIs to run these functions. The reasons are as follows:\n The OAP has to set up caches to calculate the values. Once the agent reconnects to another OAP instance, the time windows of rate calculation break. This leads to inaccurate results.  ","excerpt":"Meter receiver The meter receiver accepts the metrics of meter protocol into the meter system. …","ref":"/docs/main/v9.2.0/en/setup/backend/backend-meter/","title":"Meter receiver"},{"body":"Meter receiver The meter receiver accepts the metrics of meter protocol into the meter system.\nModule definition Module definition is defined in application.yml, typically located at $SKYWALKING_BASE_DIR/config/application.yml by default.\nreceiver-meter:selector:${SW_RECEIVER_METER:default}default:In Kafka Fetcher, follow these configurations to enable it.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}Report Meter Telemetry Data Manual Meter API Custom metrics may be collected by the Manual Meter API. Custom metrics collected cannot be used directly; they should be configured in the meter-analyzer-config configuration files described in the next part.\nThe receiver adds labels with key = service and key = instance to the collected data samples, and values from service and service instance name defined in SkyWalking Agent, for identification of the metric data.\nA typical manual meter API set is Spring Sleuth APIs\nOpenTelemetry Exporter You can use OpenTelemetry Collector to transport the metrics to SkyWalking OAP. Read the doc on Skywalking Exporter for a detailed guide.\nThe following is the correspondence between the OpenTelemetry Metric Data Type and the Skywalking Data Collect Protocol:\n   OpenTelemetry Metric Data Type Skywalking Data Collect Protocol     MetricDataTypeGauge MeterSingleValue   MetricDataTypeSum MeterSingleValue   MetricDataTypeHistogram MeterHistogram and two MeterSingleValues containing $name_sum and $name_count metrics.   MetricDataTypeSummary A series of MeterSingleValue containing tag quantile and two MeterSingleValues containing $name_sum and $name_count metrics.   MetricDataTypeExponentialHistogram Not Supported    Note: $name is the original metric name.\nConfiguration file The meter receiver is configured via a configuration file. The configuration file defines everything related to receiving from agents, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP may fail to start up. The files are located at $CLASSPATH/meter-analyzer-config.\nNew meter-analyzer-config files is NOT enabled by default, you should make meter configuration take effect through section agent-analyzer in application.yml of skywalking backend.\nagent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:# ... take care of other analyzersmeterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:your-custom-meter-conf-without-ext-name}# The multiple files should be separated by \u0026#34;,\u0026#34;Meter-analyzer-config file is written in YAML format, defined by the scheme described below. Brackets indicate that a parameter is optional.\nAll available meter analysis scripts could be found here.\n   Rule Name Description Configuration File Data Source     satellite Metrics of SkyWalking Satellite self-observability(so11y) meter-analyzer-config/satellite.yaml SkyWalking Satellite \u0026ndash;meter format\u0026ndash;\u0026gt;SkyWalking OAP Server   threadpool Metrics of Thread Pool meter-analyzer-config/threadpool.yaml Thread Pool \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server   datasource Metrics of DataSource metrics meter-analyzer-config/datasource.yaml Datasource \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server   spring-sleuth Metrics of Spring Sleuth Application meter-analyzer-config/spring-sleuth.yaml Sprign Sleuth Application \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server    An example can be found here. If you\u0026rsquo;re using Spring Sleuth, see Spring Sleuth Setup.\nMeters configuration # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# filter the metrics, only those metrics that satisfy this condition will be passed into the `metricsRules` below.filter: \u0026lt;closure\u0026gt; # example:\u0026#39;{ tags -\u0026gt; tags.job_name == \u0026#34;vm-monitoring\u0026#34; }\u0026#39;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# Metrics rule allow you to recompute queries.metricsRules:# The name of rule, which combinates with a prefix \u0026#39;\u0026lt;metricPrefix\u0026gt;_\u0026#39; as the index/table name in storage.# The name with prefix can also be quoted in UI (Dashboard/Template/Item/Metrics)name:\u0026lt;string\u0026gt;# MAL expression. Raw name of custom metrics collected can be used hereexp:\u0026lt;string\u0026gt;For more information on MAL, please refer to mal.md\nrate, irate, and increase Although we support the rate, irate, increase functions in the backend, we still recommend users to consider using client-side APIs to run these functions. The reasons are as follows:\n The OAP has to set up caches to calculate the values. Once the agent reconnects to another OAP instance, the time windows of rate calculation break. This leads to inaccurate results.  ","excerpt":"Meter receiver The meter receiver accepts the metrics of meter protocol into the meter system. …","ref":"/docs/main/v9.3.0/en/setup/backend/backend-meter/","title":"Meter receiver"},{"body":"Meter receiver The meter receiver accepts the metrics of meter protocol into the meter system.\nModule definition Module definition is defined in application.yml, typically located at $SKYWALKING_BASE_DIR/config/application.yml by default.\nreceiver-meter:selector:${SW_RECEIVER_METER:default}default:In Kafka Fetcher, follow these configurations to enable it.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}Report Meter Telemetry Data Manual Meter API Custom metrics may be collected by the Manual Meter API. Custom metrics collected cannot be used directly; they should be configured in the meter-analyzer-config configuration files described in the next part.\nThe receiver adds labels with key = service and key = instance to the collected data samples, and values from service and service instance name defined in SkyWalking Agent, for identification of the metric data.\nA typical manual meter API set is Spring MicroMeter Observations APIs\nOpenTelemetry Exporter You can use OpenTelemetry Collector to transport the metrics to SkyWalking OAP. Read the doc on Skywalking Exporter for a detailed guide.\nThe following is the correspondence between the OpenTelemetry Metric Data Type and the Skywalking Data Collect Protocol:\n   OpenTelemetry Metric Data Type Skywalking Data Collect Protocol     MetricDataTypeGauge MeterSingleValue   MetricDataTypeSum MeterSingleValue   MetricDataTypeHistogram MeterHistogram and two MeterSingleValues containing $name_sum and $name_count metrics.   MetricDataTypeSummary A series of MeterSingleValue containing tag quantile and two MeterSingleValues containing $name_sum and $name_count metrics.   MetricDataTypeExponentialHistogram Not Supported    Note: $name is the original metric name.\nConfiguration file The meter receiver is configured via a configuration file. The configuration file defines everything related to receiving from agents, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP may fail to start up. The files are located at $CLASSPATH/meter-analyzer-config.\nNew meter-analyzer-config files is NOT enabled by default, you should make meter configuration take effect through section agent-analyzer in application.yml of skywalking backend.\nagent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:# ... take care of other analyzersmeterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:your-custom-meter-conf-without-ext-name}# The multiple files should be separated by \u0026#34;,\u0026#34;Meter-analyzer-config file is written in YAML format, defined by the scheme described below. Brackets indicate that a parameter is optional.\nAll available meter analysis scripts could be found here.\n   Rule Name Description Configuration File Data Source     satellite Metrics of SkyWalking Satellite self-observability(so11y) meter-analyzer-config/satellite.yaml SkyWalking Satellite \u0026ndash;meter format\u0026ndash;\u0026gt;SkyWalking OAP Server   threadpool Metrics of Thread Pool meter-analyzer-config/threadpool.yaml Thread Pool \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server   datasource Metrics of DataSource metrics meter-analyzer-config/datasource.yaml Datasource \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server   spring-micrometer Metrics of Spring Sleuth Application meter-analyzer-config/spring-micrometer.yaml Sprign Sleuth Application \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server    An example can be found here. If you\u0026rsquo;re using Spring MicroMeter Observations, see Spring MicroMeter Observations APIs.\nMeters configuration # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# filter the metrics, only those metrics that satisfy this condition will be passed into the `metricsRules` below.filter: \u0026lt;closure\u0026gt; # example:\u0026#39;{ tags -\u0026gt; tags.job_name == \u0026#34;vm-monitoring\u0026#34; }\u0026#39;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# Metrics rule allow you to recompute queries.metricsRules:# The name of rule, which combinates with a prefix \u0026#39;\u0026lt;metricPrefix\u0026gt;_\u0026#39; as the index/table name in storage.# The name with prefix can also be quoted in UI (Dashboard/Template/Item/Metrics)name:\u0026lt;string\u0026gt;# MAL expression. Raw name of custom metrics collected can be used hereexp:\u0026lt;string\u0026gt;For more information on MAL, please refer to mal.md\nrate, irate, and increase Although we support the rate, irate, increase functions in the backend, we still recommend users to consider using client-side APIs to run these functions. The reasons are as follows:\n The OAP has to set up caches to calculate the values. Once the agent reconnects to another OAP instance, the time windows of rate calculation break. This leads to inaccurate results.  ","excerpt":"Meter receiver The meter receiver accepts the metrics of meter protocol into the meter system. …","ref":"/docs/main/v9.4.0/en/setup/backend/backend-meter/","title":"Meter receiver"},{"body":"Meter receiver The meter receiver accepts the metrics of meter protocol into the meter system.\nModule definition Module definition is defined in application.yml, typically located at $SKYWALKING_BASE_DIR/config/application.yml by default.\nreceiver-meter:selector:${SW_RECEIVER_METER:default}default:In Kafka Fetcher, follow these configurations to enable it.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}Report Meter Telemetry Data Manual Meter API Custom metrics may be collected by the Manual Meter API. Custom metrics collected cannot be used directly; they should be configured in the meter-analyzer-config configuration files described in the next part.\nThe receiver adds labels with key = service and key = instance to the collected data samples, and values from service and service instance name defined in SkyWalking Agent, for identification of the metric data.\nA typical manual meter API set is Spring MicroMeter Observations APIs\nOpenTelemetry Exporter You can use OpenTelemetry Collector to transport the metrics to SkyWalking OAP. Read the doc on Skywalking Exporter for a detailed guide.\nThe following is the correspondence between the OpenTelemetry Metric Data Type and the Skywalking Data Collect Protocol:\n   OpenTelemetry Metric Data Type Skywalking Data Collect Protocol     MetricDataTypeGauge MeterSingleValue   MetricDataTypeSum MeterSingleValue   MetricDataTypeHistogram MeterHistogram and two MeterSingleValues containing $name_sum and $name_count metrics.   MetricDataTypeSummary A series of MeterSingleValue containing tag quantile and two MeterSingleValues containing $name_sum and $name_count metrics.   MetricDataTypeExponentialHistogram Not Supported    Note: $name is the original metric name.\nConfiguration file The meter receiver is configured via a configuration file. The configuration file defines everything related to receiving from agents, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP may fail to start up. The files are located at $CLASSPATH/meter-analyzer-config.\nNew meter-analyzer-config files is NOT enabled by default, you should make meter configuration take effect through section agent-analyzer in application.yml of skywalking backend.\nagent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:# ... take care of other analyzersmeterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:your-custom-meter-conf-without-ext-name}# The multiple files should be separated by \u0026#34;,\u0026#34;Meter-analyzer-config file is written in YAML format, defined by the scheme described below. Brackets indicate that a parameter is optional.\nAll available meter analysis scripts could be found here.\n   Rule Name Description Configuration File Data Source     satellite Metrics of SkyWalking Satellite self-observability(so11y) meter-analyzer-config/satellite.yaml SkyWalking Satellite \u0026ndash;meter format\u0026ndash;\u0026gt;SkyWalking OAP Server   threadpool Metrics of Thread Pool meter-analyzer-config/threadpool.yaml Thread Pool \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server   datasource Metrics of DataSource metrics meter-analyzer-config/datasource.yaml Datasource \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server   spring-micrometer Metrics of Spring Sleuth Application meter-analyzer-config/spring-micrometer.yaml Spring Sleuth Application \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server    An example can be found here. If you\u0026rsquo;re using Spring MicroMeter Observations, see Spring MicroMeter Observations APIs.\nMeters configuration # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# filter the metrics, only those metrics that satisfy this condition will be passed into the `metricsRules` below.filter: \u0026lt;closure\u0026gt; # example:\u0026#39;{ tags -\u0026gt; tags.job_name == \u0026#34;vm-monitoring\u0026#34; }\u0026#39;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# Metrics rule allow you to recompute queries.metricsRules:# The name of rule, which combinates with a prefix \u0026#39;\u0026lt;metricPrefix\u0026gt;_\u0026#39; as the index/table name in storage.# The name with prefix can also be quoted in UI (Dashboard/Template/Item/Metrics)name:\u0026lt;string\u0026gt;# MAL expression. Raw name of custom metrics collected can be used hereexp:\u0026lt;string\u0026gt;For more information on MAL, please refer to mal.md\nrate, irate, and increase Although we support the rate, irate, increase functions in the backend, we still recommend users to consider using client-side APIs to run these functions. The reasons are as follows:\n The OAP has to set up caches to calculate the values. Once the agent reconnects to another OAP instance, the time windows of rate calculation break. This leads to inaccurate results.  ","excerpt":"Meter receiver The meter receiver accepts the metrics of meter protocol into the meter system. …","ref":"/docs/main/v9.5.0/en/setup/backend/backend-meter/","title":"Meter receiver"},{"body":"Meter receiver The meter receiver accepts the metrics of meter protocol into the meter system.\nModule definition Module definition is defined in application.yml, typically located at $SKYWALKING_BASE_DIR/config/application.yml by default.\nreceiver-meter:selector:${SW_RECEIVER_METER:default}default:In Kafka Fetcher, follow these configurations to enable it.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}Report Meter Telemetry Data Manual Meter API Custom metrics may be collected by the Manual Meter API. Custom metrics collected cannot be used directly; they should be configured in the meter-analyzer-config configuration files described in the next part.\nThe receiver adds labels with key = service and key = instance to the collected data samples, and values from service and service instance name defined in SkyWalking Agent, for identification of the metric data.\nA typical manual meter API set is Spring MicroMeter Observations APIs\nOpenTelemetry Exporter You can use OpenTelemetry Collector to transport the metrics to SkyWalking OAP. Read the doc on Skywalking Exporter for a detailed guide.\nThe following is the correspondence between the OpenTelemetry Metric Data Type and the Skywalking Data Collect Protocol:\n   OpenTelemetry Metric Data Type Skywalking Data Collect Protocol     MetricDataTypeGauge MeterSingleValue   MetricDataTypeSum MeterSingleValue   MetricDataTypeHistogram MeterHistogram and two MeterSingleValues containing $name_sum and $name_count metrics.   MetricDataTypeSummary A series of MeterSingleValue containing tag quantile and two MeterSingleValues containing $name_sum and $name_count metrics.   MetricDataTypeExponentialHistogram Not Supported    Note: $name is the original metric name.\nConfiguration file The meter receiver is configured via a configuration file. The configuration file defines everything related to receiving from agents, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP may fail to start up. The files are located at $CLASSPATH/meter-analyzer-config.\nNew meter-analyzer-config files is NOT enabled by default, you should make meter configuration take effect through section agent-analyzer in application.yml of skywalking backend.\nagent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:# ... take care of other analyzersmeterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:your-custom-meter-conf-without-ext-name}# The multiple files should be separated by \u0026#34;,\u0026#34;Meter-analyzer-config file is written in YAML format, defined by the scheme described below. Brackets indicate that a parameter is optional.\nAll available meter analysis scripts could be found here.\n   Rule Name Description Configuration File Data Source     satellite Metrics of SkyWalking Satellite self-observability(so11y) meter-analyzer-config/satellite.yaml SkyWalking Satellite \u0026ndash;meter format\u0026ndash;\u0026gt;SkyWalking OAP Server   threadpool Metrics of Thread Pool meter-analyzer-config/threadpool.yaml Thread Pool \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server   datasource Metrics of DataSource metrics meter-analyzer-config/datasource.yaml Datasource \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server   spring-micrometer Metrics of Spring Sleuth Application meter-analyzer-config/spring-micrometer.yaml Spring Sleuth Application \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server    An example can be found here. If you\u0026rsquo;re using Spring MicroMeter Observations, see Spring MicroMeter Observations APIs.\nMeters configuration # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# filter the metrics, only those metrics that satisfy this condition will be passed into the `metricsRules` below.filter: \u0026lt;closure\u0026gt; # example:\u0026#39;{ tags -\u0026gt; tags.job_name == \u0026#34;vm-monitoring\u0026#34; }\u0026#39;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# Metrics rule allow you to recompute queries.metricsRules:# The name of rule, which combinates with a prefix \u0026#39;\u0026lt;metricPrefix\u0026gt;_\u0026#39; as the index/table name in storage.# The name with prefix can also be quoted in UI (Dashboard/Template/Item/Metrics)name:\u0026lt;string\u0026gt;# MAL expression. Raw name of custom metrics collected can be used hereexp:\u0026lt;string\u0026gt;For more information on MAL, please refer to mal.md\nrate, irate, and increase Although we support the rate, irate, increase functions in the backend, we still recommend users to consider using client-side APIs to run these functions. The reasons are as follows:\n The OAP has to set up caches to calculate the values. Once the agent reconnects to another OAP instance, the time windows of rate calculation break. This leads to inaccurate results.  ","excerpt":"Meter receiver The meter receiver accepts the metrics of meter protocol into the meter system. …","ref":"/docs/main/v9.6.0/en/setup/backend/backend-meter/","title":"Meter receiver"},{"body":"Meter receiver The meter receiver accepts the metrics of meter protocol into the meter system.\nModule definition Module definition is defined in application.yml, typically located at $SKYWALKING_BASE_DIR/config/application.yml by default.\nreceiver-meter:selector:${SW_RECEIVER_METER:default}default:In Kafka Fetcher, follow these configurations to enable it.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}Report Meter Telemetry Data Manual Meter API Custom metrics may be collected by the Manual Meter API. Custom metrics collected cannot be used directly; they should be configured in the meter-analyzer-config configuration files described in the next part.\nThe receiver adds labels with key = service and key = instance to the collected data samples, and values from service and service instance name defined in SkyWalking Agent, for identification of the metric data.\nA typical manual meter API set is Spring MicroMeter Observations APIs\nOpenTelemetry Exporter You can use OpenTelemetry Collector to transport the metrics to SkyWalking OAP. Read the doc on Skywalking Exporter for a detailed guide.\nThe following is the correspondence between the OpenTelemetry Metric Data Type and the Skywalking Data Collect Protocol:\n   OpenTelemetry Metric Data Type Skywalking Data Collect Protocol     MetricDataTypeGauge MeterSingleValue   MetricDataTypeSum MeterSingleValue   MetricDataTypeHistogram MeterHistogram and two MeterSingleValues containing $name_sum and $name_count metrics.   MetricDataTypeSummary A series of MeterSingleValue containing tag quantile and two MeterSingleValues containing $name_sum and $name_count metrics.   MetricDataTypeExponentialHistogram Not Supported    Note: $name is the original metric name.\nConfiguration file The meter receiver is configured via a configuration file. The configuration file defines everything related to receiving from agents, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP may fail to start up. The files are located at $CLASSPATH/meter-analyzer-config.\nNew meter-analyzer-config files is NOT enabled by default, you should make meter configuration take effect through section agent-analyzer in application.yml of skywalking backend.\nagent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:# ... take care of other analyzersmeterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:your-custom-meter-conf-without-ext-name}# The multiple files should be separated by \u0026#34;,\u0026#34;Meter-analyzer-config file is written in YAML format, defined by the scheme described below. Brackets indicate that a parameter is optional.\nAll available meter analysis scripts could be found here.\n   Rule Name Description Configuration File Data Source     satellite Metrics of SkyWalking Satellite self-observability(so11y) meter-analyzer-config/satellite.yaml SkyWalking Satellite \u0026ndash;meter format\u0026ndash;\u0026gt;SkyWalking OAP Server   threadpool Metrics of Thread Pool meter-analyzer-config/threadpool.yaml Thread Pool \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server   datasource Metrics of DataSource metrics meter-analyzer-config/datasource.yaml Datasource \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server   spring-micrometer Metrics of Spring Sleuth Application meter-analyzer-config/spring-micrometer.yaml Spring Sleuth Application \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server    An example can be found here. If you\u0026rsquo;re using Spring MicroMeter Observations, see Spring MicroMeter Observations APIs.\nMeters configuration # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# filter the metrics, only those metrics that satisfy this condition will be passed into the `metricsRules` below.filter: \u0026lt;closure\u0026gt; # example:\u0026#39;{ tags -\u0026gt; tags.job_name == \u0026#34;vm-monitoring\u0026#34; }\u0026#39;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# Metrics rule allow you to recompute queries.metricsRules:# The name of rule, which combinates with a prefix \u0026#39;\u0026lt;metricPrefix\u0026gt;_\u0026#39; as the index/table name in storage.# The name with prefix can also be quoted in UI (Dashboard/Template/Item/Metrics)name:\u0026lt;string\u0026gt;# MAL expression. Raw name of custom metrics collected can be used hereexp:\u0026lt;string\u0026gt;For more information on MAL, please refer to mal.md\nrate, irate, and increase Although we support the rate, irate, increase functions in the backend, we still recommend users to consider using client-side APIs to run these functions. The reasons are as follows:\n The OAP has to set up caches to calculate the values. Once the agent reconnects to another OAP instance, the time windows of rate calculation break. This leads to inaccurate results.  ","excerpt":"Meter receiver The meter receiver accepts the metrics of meter protocol into the meter system. …","ref":"/docs/main/v9.7.0/en/setup/backend/backend-meter/","title":"Meter receiver"},{"body":"Meter System Meter system is another streaming calculation mode designed for metrics data. In the OAL, there are clear Scope Definitions, including definitions for native objects. Meter system is focused on the data type itself, and provides a more flexible approach to the end user in defining the scope entity.\nThe meter system is open to different receivers and fetchers in the backend, see the backend setup document for more details.\nEvery metric is declared in the meter system to include the following attributes:\n Metrics Name. A globally unique name to avoid overlapping between the OAL variable names. Function Name. The function used for this metric, namely distributed aggregation, value calculation or down sampling calculation based on the function implementation. Further, the data structure is determined by the function as well, such as function Avg is for Long. Scope Type. Unlike within the OAL, there are plenty of logic scope definitions. In the meter system, only type is required. Type values include service, instance, and endpoint, just as we have described in the Overview section. The values of scope entity name, such as service name, are required when metrics data are generated with the metrics data values.  NOTE: The metrics must be declared in the bootstrap stage, and there must be no change to runtime.\nThe Meter System supports the following binding functions:\n avg. Calculates the avg value for every entity under the same metrics name. histogram. Aggregates the counts in the configurable buckets. Buckets are configurable but must be assigned in the declaration stage. percentile. See percentile in WIKI. Unlike the OAL, we provide 50/75/90/95/99 by default. In the meter system function, the percentile function accepts several ranks, which should be in the (0, 100) range.  ","excerpt":"Meter System Meter system is another streaming calculation mode designed for metrics data. In the …","ref":"/docs/main/latest/en/concepts-and-designs/meter/","title":"Meter System"},{"body":"Meter System Meter system is another streaming calculation mode designed for metrics data. In the OAL, there are clear Scope Definitions, including definitions for native objects. Meter system is focused on the data type itself, and provides a more flexible approach to the end user in defining the scope entity.\nThe meter system is open to different receivers and fetchers in the backend, see the backend setup document for more details.\nEvery metric is declared in the meter system to include the following attributes:\n Metrics Name. A globally unique name to avoid overlapping between the OAL variable names. Function Name. The function used for this metric, namely distributed aggregation, value calculation or down sampling calculation based on the function implementation. Further, the data structure is determined by the function as well, such as function Avg is for Long. Scope Type. Unlike within the OAL, there are plenty of logic scope definitions. In the meter system, only type is required. Type values include service, instance, and endpoint, just as we have described in the Overview section. The values of scope entity name, such as service name, are required when metrics data are generated with the metrics data values.  NOTE: The metrics must be declared in the bootstrap stage, and there must be no change to runtime.\nThe Meter System supports the following binding functions:\n avg. Calculates the avg value for every entity under the same metrics name. histogram. Aggregates the counts in the configurable buckets. Buckets are configurable but must be assigned in the declaration stage. percentile. See percentile in WIKI. Unlike the OAL, we provide 50/75/90/95/99 by default. In the meter system function, the percentile function accepts several ranks, which should be in the (0, 100) range.  ","excerpt":"Meter System Meter system is another streaming calculation mode designed for metrics data. In the …","ref":"/docs/main/next/en/concepts-and-designs/meter/","title":"Meter System"},{"body":"Meter System Meter system is another streaming calculation mode designed for metrics data. In the OAL, there are clear Scope Definitions, including definitions for native objects. Meter system is focused on the data type itself, and provides a more flexible approach to the end user in defining the scope entity.\nThe meter system is open to different receivers and fetchers in the backend, see the backend setup document for more details.\nEvery metric is declared in the meter system to include the following attributes:\n Metrics Name. A globally unique name to avoid overlapping between the OAL variable names. Function Name. The function used for this metric, namely distributed aggregation, value calculation or down sampling calculation based on the function implementation. Further, the data structure is determined by the function as well, such as function Avg is for Long. Scope Type. Unlike within the OAL, there are plenty of logic scope definitions. In the meter system, only type is required. Type values include service, instance, and endpoint, just as we have described in the Overview section. The values of scope entity name, such as service name, are required when metrics data are generated with the metrics data values.  NOTE: The metrics must be declared in the bootstrap stage, and there must be no change to runtime.\nThe Meter System supports the following binding functions:\n avg. Calculates the avg value for every entity under the same metrics name. histogram. Aggregates the counts in the configurable buckets. Buckets are configurable but must be assigned in the declaration stage. percentile. See percentile in WIKI. Unlike the OAL, we provide 50/75/90/95/99 by default. In the meter system function, the percentile function accepts several ranks, which should be in the (0, 100) range.  ","excerpt":"Meter System Meter system is another streaming calculation mode designed for metrics data. In the …","ref":"/docs/main/v9.0.0/en/concepts-and-designs/meter/","title":"Meter System"},{"body":"Meter System Meter system is another streaming calculation mode designed for metrics data. In the OAL, there are clear Scope Definitions, including definitions for native objects. Meter system is focused on the data type itself, and provides a more flexible approach to the end user in defining the scope entity.\nThe meter system is open to different receivers and fetchers in the backend, see the backend setup document for more details.\nEvery metric is declared in the meter system to include the following attributes:\n Metrics Name. A globally unique name to avoid overlapping between the OAL variable names. Function Name. The function used for this metric, namely distributed aggregation, value calculation or down sampling calculation based on the function implementation. Further, the data structure is determined by the function as well, such as function Avg is for Long. Scope Type. Unlike within the OAL, there are plenty of logic scope definitions. In the meter system, only type is required. Type values include service, instance, and endpoint, just as we have described in the Overview section. The values of scope entity name, such as service name, are required when metrics data are generated with the metrics data values.  NOTE: The metrics must be declared in the bootstrap stage, and there must be no change to runtime.\nThe Meter System supports the following binding functions:\n avg. Calculates the avg value for every entity under the same metrics name. histogram. Aggregates the counts in the configurable buckets. Buckets are configurable but must be assigned in the declaration stage. percentile. See percentile in WIKI. Unlike the OAL, we provide 50/75/90/95/99 by default. In the meter system function, the percentile function accepts several ranks, which should be in the (0, 100) range.  ","excerpt":"Meter System Meter system is another streaming calculation mode designed for metrics data. In the …","ref":"/docs/main/v9.1.0/en/concepts-and-designs/meter/","title":"Meter System"},{"body":"Meter System Meter system is another streaming calculation mode designed for metrics data. In the OAL, there are clear Scope Definitions, including definitions for native objects. Meter system is focused on the data type itself, and provides a more flexible approach to the end user in defining the scope entity.\nThe meter system is open to different receivers and fetchers in the backend, see the backend setup document for more details.\nEvery metric is declared in the meter system to include the following attributes:\n Metrics Name. A globally unique name to avoid overlapping between the OAL variable names. Function Name. The function used for this metric, namely distributed aggregation, value calculation or down sampling calculation based on the function implementation. Further, the data structure is determined by the function as well, such as function Avg is for Long. Scope Type. Unlike within the OAL, there are plenty of logic scope definitions. In the meter system, only type is required. Type values include service, instance, and endpoint, just as we have described in the Overview section. The values of scope entity name, such as service name, are required when metrics data are generated with the metrics data values.  NOTE: The metrics must be declared in the bootstrap stage, and there must be no change to runtime.\nThe Meter System supports the following binding functions:\n avg. Calculates the avg value for every entity under the same metrics name. histogram. Aggregates the counts in the configurable buckets. Buckets are configurable but must be assigned in the declaration stage. percentile. See percentile in WIKI. Unlike the OAL, we provide 50/75/90/95/99 by default. In the meter system function, the percentile function accepts several ranks, which should be in the (0, 100) range.  ","excerpt":"Meter System Meter system is another streaming calculation mode designed for metrics data. In the …","ref":"/docs/main/v9.2.0/en/concepts-and-designs/meter/","title":"Meter System"},{"body":"Meter System Meter system is another streaming calculation mode designed for metrics data. In the OAL, there are clear Scope Definitions, including definitions for native objects. Meter system is focused on the data type itself, and provides a more flexible approach to the end user in defining the scope entity.\nThe meter system is open to different receivers and fetchers in the backend, see the backend setup document for more details.\nEvery metric is declared in the meter system to include the following attributes:\n Metrics Name. A globally unique name to avoid overlapping between the OAL variable names. Function Name. The function used for this metric, namely distributed aggregation, value calculation or down sampling calculation based on the function implementation. Further, the data structure is determined by the function as well, such as function Avg is for Long. Scope Type. Unlike within the OAL, there are plenty of logic scope definitions. In the meter system, only type is required. Type values include service, instance, and endpoint, just as we have described in the Overview section. The values of scope entity name, such as service name, are required when metrics data are generated with the metrics data values.  NOTE: The metrics must be declared in the bootstrap stage, and there must be no change to runtime.\nThe Meter System supports the following binding functions:\n avg. Calculates the avg value for every entity under the same metrics name. histogram. Aggregates the counts in the configurable buckets. Buckets are configurable but must be assigned in the declaration stage. percentile. See percentile in WIKI. Unlike the OAL, we provide 50/75/90/95/99 by default. In the meter system function, the percentile function accepts several ranks, which should be in the (0, 100) range.  ","excerpt":"Meter System Meter system is another streaming calculation mode designed for metrics data. In the …","ref":"/docs/main/v9.3.0/en/concepts-and-designs/meter/","title":"Meter System"},{"body":"Meter System Meter system is another streaming calculation mode designed for metrics data. In the OAL, there are clear Scope Definitions, including definitions for native objects. Meter system is focused on the data type itself, and provides a more flexible approach to the end user in defining the scope entity.\nThe meter system is open to different receivers and fetchers in the backend, see the backend setup document for more details.\nEvery metric is declared in the meter system to include the following attributes:\n Metrics Name. A globally unique name to avoid overlapping between the OAL variable names. Function Name. The function used for this metric, namely distributed aggregation, value calculation or down sampling calculation based on the function implementation. Further, the data structure is determined by the function as well, such as function Avg is for Long. Scope Type. Unlike within the OAL, there are plenty of logic scope definitions. In the meter system, only type is required. Type values include service, instance, and endpoint, just as we have described in the Overview section. The values of scope entity name, such as service name, are required when metrics data are generated with the metrics data values.  NOTE: The metrics must be declared in the bootstrap stage, and there must be no change to runtime.\nThe Meter System supports the following binding functions:\n avg. Calculates the avg value for every entity under the same metrics name. histogram. Aggregates the counts in the configurable buckets. Buckets are configurable but must be assigned in the declaration stage. percentile. See percentile in WIKI. Unlike the OAL, we provide 50/75/90/95/99 by default. In the meter system function, the percentile function accepts several ranks, which should be in the (0, 100) range.  ","excerpt":"Meter System Meter system is another streaming calculation mode designed for metrics data. In the …","ref":"/docs/main/v9.4.0/en/concepts-and-designs/meter/","title":"Meter System"},{"body":"Meter System Meter system is another streaming calculation mode designed for metrics data. In the OAL, there are clear Scope Definitions, including definitions for native objects. Meter system is focused on the data type itself, and provides a more flexible approach to the end user in defining the scope entity.\nThe meter system is open to different receivers and fetchers in the backend, see the backend setup document for more details.\nEvery metric is declared in the meter system to include the following attributes:\n Metrics Name. A globally unique name to avoid overlapping between the OAL variable names. Function Name. The function used for this metric, namely distributed aggregation, value calculation or down sampling calculation based on the function implementation. Further, the data structure is determined by the function as well, such as function Avg is for Long. Scope Type. Unlike within the OAL, there are plenty of logic scope definitions. In the meter system, only type is required. Type values include service, instance, and endpoint, just as we have described in the Overview section. The values of scope entity name, such as service name, are required when metrics data are generated with the metrics data values.  NOTE: The metrics must be declared in the bootstrap stage, and there must be no change to runtime.\nThe Meter System supports the following binding functions:\n avg. Calculates the avg value for every entity under the same metrics name. histogram. Aggregates the counts in the configurable buckets. Buckets are configurable but must be assigned in the declaration stage. percentile. See percentile in WIKI. Unlike the OAL, we provide 50/75/90/95/99 by default. In the meter system function, the percentile function accepts several ranks, which should be in the (0, 100) range.  ","excerpt":"Meter System Meter system is another streaming calculation mode designed for metrics data. In the …","ref":"/docs/main/v9.5.0/en/concepts-and-designs/meter/","title":"Meter System"},{"body":"Meter System Meter system is another streaming calculation mode designed for metrics data. In the OAL, there are clear Scope Definitions, including definitions for native objects. Meter system is focused on the data type itself, and provides a more flexible approach to the end user in defining the scope entity.\nThe meter system is open to different receivers and fetchers in the backend, see the backend setup document for more details.\nEvery metric is declared in the meter system to include the following attributes:\n Metrics Name. A globally unique name to avoid overlapping between the OAL variable names. Function Name. The function used for this metric, namely distributed aggregation, value calculation or down sampling calculation based on the function implementation. Further, the data structure is determined by the function as well, such as function Avg is for Long. Scope Type. Unlike within the OAL, there are plenty of logic scope definitions. In the meter system, only type is required. Type values include service, instance, and endpoint, just as we have described in the Overview section. The values of scope entity name, such as service name, are required when metrics data are generated with the metrics data values.  NOTE: The metrics must be declared in the bootstrap stage, and there must be no change to runtime.\nThe Meter System supports the following binding functions:\n avg. Calculates the avg value for every entity under the same metrics name. histogram. Aggregates the counts in the configurable buckets. Buckets are configurable but must be assigned in the declaration stage. percentile. See percentile in WIKI. Unlike the OAL, we provide 50/75/90/95/99 by default. In the meter system function, the percentile function accepts several ranks, which should be in the (0, 100) range.  ","excerpt":"Meter System Meter system is another streaming calculation mode designed for metrics data. In the …","ref":"/docs/main/v9.6.0/en/concepts-and-designs/meter/","title":"Meter System"},{"body":"Meter System Meter system is another streaming calculation mode designed for metrics data. In the OAL, there are clear Scope Definitions, including definitions for native objects. Meter system is focused on the data type itself, and provides a more flexible approach to the end user in defining the scope entity.\nThe meter system is open to different receivers and fetchers in the backend, see the backend setup document for more details.\nEvery metric is declared in the meter system to include the following attributes:\n Metrics Name. A globally unique name to avoid overlapping between the OAL variable names. Function Name. The function used for this metric, namely distributed aggregation, value calculation or down sampling calculation based on the function implementation. Further, the data structure is determined by the function as well, such as function Avg is for Long. Scope Type. Unlike within the OAL, there are plenty of logic scope definitions. In the meter system, only type is required. Type values include service, instance, and endpoint, just as we have described in the Overview section. The values of scope entity name, such as service name, are required when metrics data are generated with the metrics data values.  NOTE: The metrics must be declared in the bootstrap stage, and there must be no change to runtime.\nThe Meter System supports the following binding functions:\n avg. Calculates the avg value for every entity under the same metrics name. histogram. Aggregates the counts in the configurable buckets. Buckets are configurable but must be assigned in the declaration stage. percentile. See percentile in WIKI. Unlike the OAL, we provide 50/75/90/95/99 by default. In the meter system function, the percentile function accepts several ranks, which should be in the (0, 100) range.  ","excerpt":"Meter System Meter system is another streaming calculation mode designed for metrics data. In the …","ref":"/docs/main/v9.7.0/en/concepts-and-designs/meter/","title":"Meter System"},{"body":"Metrics  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-micrometer-registry\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  Using org.apache.skywalking.apm.meter.micrometer.SkywalkingMeterRegistry as the registry, it could forward the MicroMeter collected metrics to OAP server.  import org.apache.skywalking.apm.meter.micrometer.SkywalkingMeterRegistry; SkywalkingMeterRegistry registry = new SkywalkingMeterRegistry(); // If you has some counter want to rate by agent side SkywalkingConfig config = new SkywalkingConfig(Arrays.asList(\u0026#34;test_rate_counter\u0026#34;)); new SkywalkingMeterRegistry(config); // Also you could using composite registry to combine multiple meter registry, such as collect to Skywalking and prometheus CompositeMeterRegistry compositeRegistry = new CompositeMeterRegistry(); compositeRegistry.add(new PrometheusMeterRegistry(PrometheusConfig.DEFAULT)); compositeRegistry.add(new SkywalkingMeterRegistry());   Using snake case as the naming convention. Such as test.meter will be send to test_meter.\n  Using Millisecond as the time unit.\n  Adapt micrometer data convention.\n     Micrometer data type Transform to meter name Skywalking data type Description     Counter Counter name Counter Same with counter   Gauges Gauges name Gauges Same with gauges   Timer Timer name + \u0026ldquo;_count\u0026rdquo; Counter Execute finished count    Timer name + \u0026ldquo;_sum\u0026rdquo; Counter Total execute finished duration    Timer name + \u0026ldquo;_max\u0026rdquo; Gauges Max duration of execute finished time    Timer name + \u0026ldquo;_histogram\u0026rdquo; Histogram Histogram of execute finished duration   LongTaskTimer Timer name + \u0026ldquo;_active_count\u0026rdquo; Gauges Executing task count    Timer name + \u0026ldquo;_duration_sum\u0026rdquo; Counter All of executing task sum duration    Timer name + \u0026ldquo;_max\u0026rdquo; Counter Current longest running task execute duration   Function Timer Timer name + \u0026ldquo;_count\u0026rdquo; Gauges Execute finished timer count    Timer name + \u0026ldquo;_sum\u0026rdquo; Gauges Execute finished timer total duration   Function Counter Counter name Counter Custom counter value   Distribution summary Summary name + \u0026ldquo;_count\u0026rdquo; Counter Total record count    Summary name + \u0026ldquo;_sum\u0026rdquo; Counter Total record amount sum    Summary name + \u0026ldquo;_max\u0026rdquo; Gauges Max record amount    Summary name + \u0026ldquo;_histogram\u0026rdquo; Gauges Histogram of the amount     Not Adapt data convention.     Micrometer data type Data type     LongTaskTimer Histogram    ","excerpt":"Metrics  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; …","ref":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/application-toolkit-micrometer/","title":"Metrics"},{"body":"Metrics  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-micrometer-registry\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  Using org.apache.skywalking.apm.meter.micrometer.SkywalkingMeterRegistry as the registry, it could forward the MicroMeter collected metrics to OAP server.  import org.apache.skywalking.apm.meter.micrometer.SkywalkingMeterRegistry; SkywalkingMeterRegistry registry = new SkywalkingMeterRegistry(); // If you has some counter want to rate by agent side SkywalkingConfig config = new SkywalkingConfig(Arrays.asList(\u0026#34;test_rate_counter\u0026#34;)); new SkywalkingMeterRegistry(config); // Also you could using composite registry to combine multiple meter registry, such as collect to Skywalking and prometheus CompositeMeterRegistry compositeRegistry = new CompositeMeterRegistry(); compositeRegistry.add(new PrometheusMeterRegistry(PrometheusConfig.DEFAULT)); compositeRegistry.add(new SkywalkingMeterRegistry());   Using snake case as the naming convention. Such as test.meter will be send to test_meter.\n  Using Millisecond as the time unit.\n  Adapt micrometer data convention.\n     Micrometer data type Transform to meter name Skywalking data type Description     Counter Counter name Counter Same with counter   Gauges Gauges name Gauges Same with gauges   Timer Timer name + \u0026ldquo;_count\u0026rdquo; Counter Execute finished count    Timer name + \u0026ldquo;_sum\u0026rdquo; Counter Total execute finished duration    Timer name + \u0026ldquo;_max\u0026rdquo; Gauges Max duration of execute finished time    Timer name + \u0026ldquo;_histogram\u0026rdquo; Histogram Histogram of execute finished duration   LongTaskTimer Timer name + \u0026ldquo;_active_count\u0026rdquo; Gauges Executing task count    Timer name + \u0026ldquo;_duration_sum\u0026rdquo; Counter All of executing task sum duration    Timer name + \u0026ldquo;_max\u0026rdquo; Counter Current longest running task execute duration   Function Timer Timer name + \u0026ldquo;_count\u0026rdquo; Gauges Execute finished timer count    Timer name + \u0026ldquo;_sum\u0026rdquo; Gauges Execute finished timer total duration   Function Counter Counter name Counter Custom counter value   Distribution summary Summary name + \u0026ldquo;_count\u0026rdquo; Counter Total record count    Summary name + \u0026ldquo;_sum\u0026rdquo; Counter Total record amount sum    Summary name + \u0026ldquo;_max\u0026rdquo; Gauges Max record amount    Summary name + \u0026ldquo;_histogram\u0026rdquo; Gauges Histogram of the amount     Not Adapt data convention.     Micrometer data type Data type     LongTaskTimer Histogram    ","excerpt":"Metrics  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; …","ref":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-micrometer/","title":"Metrics"},{"body":"Metrics  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-micrometer-registry\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  Using org.apache.skywalking.apm.meter.micrometer.SkywalkingMeterRegistry as the registry, it could forward the MicroMeter collected metrics to OAP server.  import org.apache.skywalking.apm.meter.micrometer.SkywalkingMeterRegistry; SkywalkingMeterRegistry registry = new SkywalkingMeterRegistry(); // If you has some counter want to rate by agent side SkywalkingConfig config = new SkywalkingConfig(Arrays.asList(\u0026#34;test_rate_counter\u0026#34;)); new SkywalkingMeterRegistry(config); // Also you could using composite registry to combine multiple meter registry, such as collect to Skywalking and prometheus CompositeMeterRegistry compositeRegistry = new CompositeMeterRegistry(); compositeRegistry.add(new PrometheusMeterRegistry(PrometheusConfig.DEFAULT)); compositeRegistry.add(new SkywalkingMeterRegistry());   Using snake case as the naming convention. Such as test.meter will be send to test_meter.\n  Using Millisecond as the time unit.\n  Adapt micrometer data convention.\n     Micrometer data type Transform to meter name Skywalking data type Description     Counter Counter name Counter Same with counter   Gauges Gauges name Gauges Same with gauges   Timer Timer name + \u0026ldquo;_count\u0026rdquo; Counter Execute finished count    Timer name + \u0026ldquo;_sum\u0026rdquo; Counter Total execute finished duration    Timer name + \u0026ldquo;_max\u0026rdquo; Gauges Max duration of execute finished time    Timer name + \u0026ldquo;_histogram\u0026rdquo; Histogram Histogram of execute finished duration   LongTaskTimer Timer name + \u0026ldquo;_active_count\u0026rdquo; Gauges Executing task count    Timer name + \u0026ldquo;_duration_sum\u0026rdquo; Counter All of executing task sum duration    Timer name + \u0026ldquo;_max\u0026rdquo; Counter Current longest running task execute duration   Function Timer Timer name + \u0026ldquo;_count\u0026rdquo; Gauges Execute finished timer count    Timer name + \u0026ldquo;_sum\u0026rdquo; Gauges Execute finished timer total duration   Function Counter Counter name Counter Custom counter value   Distribution summary Summary name + \u0026ldquo;_count\u0026rdquo; Counter Total record count    Summary name + \u0026ldquo;_sum\u0026rdquo; Counter Total record amount sum    Summary name + \u0026ldquo;_max\u0026rdquo; Gauges Max record amount    Summary name + \u0026ldquo;_histogram\u0026rdquo; Gauges Histogram of the amount     Not Adapt data convention.     Micrometer data type Data type     LongTaskTimer Histogram    ","excerpt":"Metrics  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; …","ref":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/application-toolkit-micrometer/","title":"Metrics"},{"body":"Metrics  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-micrometer-registry\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  Using org.apache.skywalking.apm.meter.micrometer.SkywalkingMeterRegistry as the registry, it could forward the MicroMeter collected metrics to OAP server.  import org.apache.skywalking.apm.meter.micrometer.SkywalkingMeterRegistry; SkywalkingMeterRegistry registry = new SkywalkingMeterRegistry(); // If you has some counter want to rate by agent side SkywalkingConfig config = new SkywalkingConfig(Arrays.asList(\u0026#34;test_rate_counter\u0026#34;)); new SkywalkingMeterRegistry(config); // Also you could using composite registry to combine multiple meter registry, such as collect to Skywalking and prometheus CompositeMeterRegistry compositeRegistry = new CompositeMeterRegistry(); compositeRegistry.add(new PrometheusMeterRegistry(PrometheusConfig.DEFAULT)); compositeRegistry.add(new SkywalkingMeterRegistry());   Using snake case as the naming convention. Such as test.meter will be send to test_meter.\n  Using Millisecond as the time unit.\n  Adapt micrometer data convention.\n     Micrometer data type Transform to meter name Skywalking data type Description     Counter Counter name Counter Same with counter   Gauges Gauges name Gauges Same with gauges   Timer Timer name + \u0026ldquo;_count\u0026rdquo; Counter Execute finished count    Timer name + \u0026ldquo;_sum\u0026rdquo; Counter Total execute finished duration    Timer name + \u0026ldquo;_max\u0026rdquo; Gauges Max duration of execute finished time    Timer name + \u0026ldquo;_histogram\u0026rdquo; Histogram Histogram of execute finished duration   LongTaskTimer Timer name + \u0026ldquo;_active_count\u0026rdquo; Gauges Executing task count    Timer name + \u0026ldquo;_duration_sum\u0026rdquo; Counter All of executing task sum duration    Timer name + \u0026ldquo;_max\u0026rdquo; Counter Current longest running task execute duration   Function Timer Timer name + \u0026ldquo;_count\u0026rdquo; Gauges Execute finished timer count    Timer name + \u0026ldquo;_sum\u0026rdquo; Gauges Execute finished timer total duration   Function Counter Counter name Counter Custom counter value   Distribution summary Summary name + \u0026ldquo;_count\u0026rdquo; Counter Total record count    Summary name + \u0026ldquo;_sum\u0026rdquo; Counter Total record amount sum    Summary name + \u0026ldquo;_max\u0026rdquo; Gauges Max record amount    Summary name + \u0026ldquo;_histogram\u0026rdquo; Gauges Histogram of the amount     Not Adapt data convention.     Micrometer data type Data type     LongTaskTimer Histogram    ","excerpt":"Metrics  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; …","ref":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/application-toolkit-micrometer/","title":"Metrics"},{"body":"Metrics  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-micrometer-registry\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  Using org.apache.skywalking.apm.meter.micrometer.SkywalkingMeterRegistry as the registry, it could forward the MicroMeter collected metrics to OAP server.  import org.apache.skywalking.apm.meter.micrometer.SkywalkingMeterRegistry; SkywalkingMeterRegistry registry = new SkywalkingMeterRegistry(); // If you has some counter want to rate by agent side SkywalkingConfig config = new SkywalkingConfig(Arrays.asList(\u0026#34;test_rate_counter\u0026#34;)); new SkywalkingMeterRegistry(config); // Also you could using composite registry to combine multiple meter registry, such as collect to Skywalking and prometheus CompositeMeterRegistry compositeRegistry = new CompositeMeterRegistry(); compositeRegistry.add(new PrometheusMeterRegistry(PrometheusConfig.DEFAULT)); compositeRegistry.add(new SkywalkingMeterRegistry());   Using snake case as the naming convention. Such as test.meter will be send to test_meter.\n  Using Millisecond as the time unit.\n  Adapt micrometer data convention.\n     Micrometer data type Transform to meter name Skywalking data type Description     Counter Counter name Counter Same with counter   Gauges Gauges name Gauges Same with gauges   Timer Timer name + \u0026ldquo;_count\u0026rdquo; Counter Execute finished count    Timer name + \u0026ldquo;_sum\u0026rdquo; Counter Total execute finished duration    Timer name + \u0026ldquo;_max\u0026rdquo; Gauges Max duration of execute finished time    Timer name + \u0026ldquo;_histogram\u0026rdquo; Histogram Histogram of execute finished duration   LongTaskTimer Timer name + \u0026ldquo;_active_count\u0026rdquo; Gauges Executing task count    Timer name + \u0026ldquo;_duration_sum\u0026rdquo; Counter All of executing task sum duration    Timer name + \u0026ldquo;_max\u0026rdquo; Counter Current longest running task execute duration   Function Timer Timer name + \u0026ldquo;_count\u0026rdquo; Gauges Execute finished timer count    Timer name + \u0026ldquo;_sum\u0026rdquo; Gauges Execute finished timer total duration   Function Counter Counter name Counter Custom counter value   Distribution summary Summary name + \u0026ldquo;_count\u0026rdquo; Counter Total record count    Summary name + \u0026ldquo;_sum\u0026rdquo; Counter Total record amount sum    Summary name + \u0026ldquo;_max\u0026rdquo; Gauges Max record amount    Summary name + \u0026ldquo;_histogram\u0026rdquo; Gauges Histogram of the amount     Not Adapt data convention.     Micrometer data type Data type     LongTaskTimer Histogram    ","excerpt":"Metrics  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; …","ref":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/application-toolkit-micrometer/","title":"Metrics"},{"body":"Metrics Exporter SkyWalking provides the essential functions of metrics aggregation, alarm, and analysis. In the real world, many may want to forward their data to a 3rd party system for an in-depth analysis or otherwise. Metrics Exporter has made that possible.\nMetrics exporter is an independent module that has to be manually activated.\nRight now, we provide the following exporters:\n gRPC exporter  gRPC exporter gRPC exporter uses SkyWalking\u0026rsquo;s native exporter service definition. Here is the proto definition.\nservice MetricExportService { rpc export (stream ExportMetricValue) returns (ExportResponse) { } rpc subscription (SubscriptionReq) returns (SubscriptionsResp) { }}message ExportMetricValue { string metricName = 1; string entityName = 2; string entityId = 3; ValueType type = 4; int64 timeBucket = 5; int64 longValue = 6; double doubleValue = 7; repeated int64 longValues = 8;}message SubscriptionsResp { repeated SubscriptionMetric metrics = 1;}message SubscriptionMetric { string metricName = 1; EventType eventType = 2;}enum ValueType { LONG = 0; DOUBLE = 1; MULTI_LONG = 2;}enum EventType { // The metrics aggregated in this bulk, not include the existing persistent data.  INCREMENT = 0; // Final result of the metrics at this moment.  TOTAL = 1;}message SubscriptionReq {}message ExportResponse {}To activate the exporter, you should add this into your application.yml\nexporter:grpc:targetHost:127.0.0.1targetPort:9870 targetHost:targetPort is the expected target service address. You could set any gRPC server to receive the data. Target gRPC service needs to go on standby; otherwise, the OAP startup may fail.  Target exporter service Subscription implementation Return the expected metrics name list with event type (incremental or total). All names must match the OAL/MAL script definition. Return empty list, if you want to export all metrics in the incremental event type.\nExport implementation Stream service. All subscribed metrics will be sent here based on the OAP core schedule. Also, if the OAP is deployed as cluster, this method will be called concurrently. For metrics value, you need to follow #type to choose #longValue or #doubleValue.\n","excerpt":"Metrics Exporter SkyWalking provides the essential functions of metrics aggregation, alarm, and …","ref":"/docs/main/v9.0.0/en/setup/backend/metrics-exporter/","title":"Metrics Exporter"},{"body":"Metrics Exporter SkyWalking provides the essential functions of metrics aggregation, alarm, and analysis. In many real-world scenarios, users may want to forward their data to a 3rd party system for further in-depth analysis. Metrics Exporter has made that possible.\nThe metrics exporter is an independent module that has to be manually activated.\nRight now, we provide the following exporters:\n gRPC exporter  gRPC exporter gRPC exporter uses SkyWalking\u0026rsquo;s native exporter service definition. Here is the proto definition.\nservice MetricExportService { rpc export (stream ExportMetricValue) returns (ExportResponse) { } rpc subscription (SubscriptionReq) returns (SubscriptionsResp) { }}message ExportMetricValue { string metricName = 1; string entityName = 2; string entityId = 3; ValueType type = 4; int64 timeBucket = 5; int64 longValue = 6; double doubleValue = 7; repeated int64 longValues = 8;}message SubscriptionsResp { repeated SubscriptionMetric metrics = 1;}message SubscriptionMetric { string metricName = 1; EventType eventType = 2;}enum ValueType { LONG = 0; DOUBLE = 1; MULTI_LONG = 2;}enum EventType { // The metrics aggregated in this bulk, not include the existing persistent data.  INCREMENT = 0; // Final result of the metrics at this moment.  TOTAL = 1;}message SubscriptionReq {}message ExportResponse {}To activate the exporter, you should add this into your application.yml\nexporter:grpc:targetHost:127.0.0.1targetPort:9870 targetHost:targetPort is the expected target service address. You could set any gRPC server to receive the data. Target gRPC service needs to go on standby; otherwise, the OAP startup may fail.  Target exporter service Subscription implementation Return the expected metrics name list with event type (incremental or total). All names must match the OAL/MAL script definition. Return empty list, if you want to export all metrics in the incremental event type.\nExport implementation Stream service. All subscribed metrics will be sent here based on the OAP core schedule. Also, if the OAP is deployed as a cluster, this method will be called concurrently. For metrics value, you need to follow #type to choose #longValue or #doubleValue.\n","excerpt":"Metrics Exporter SkyWalking provides the essential functions of metrics aggregation, alarm, and …","ref":"/docs/main/v9.1.0/en/setup/backend/metrics-exporter/","title":"Metrics Exporter"},{"body":"Metrics Exporter SkyWalking provides the essential functions of metrics aggregation, alarm, and analysis. In many real-world scenarios, users may want to forward their data to a 3rd party system for further in-depth analysis. Metrics Exporter has made that possible.\nThe metrics exporter is an independent module that has to be manually activated.\nRight now, we provide the following exporters:\n gRPC exporter  gRPC exporter gRPC exporter uses SkyWalking\u0026rsquo;s native exporter service definition. Here is the proto definition.\nservice MetricExportService { rpc export (stream ExportMetricValue) returns (ExportResponse) { } rpc subscription (SubscriptionReq) returns (SubscriptionsResp) { }}message ExportMetricValue { string metricName = 1; string entityName = 2; string entityId = 3; ValueType type = 4; int64 timeBucket = 5; int64 longValue = 6; double doubleValue = 7; repeated int64 longValues = 8;}message SubscriptionsResp { repeated SubscriptionMetric metrics = 1;}message SubscriptionMetric { string metricName = 1; EventType eventType = 2;}enum ValueType { LONG = 0; DOUBLE = 1; MULTI_LONG = 2;}enum EventType { // The metrics aggregated in this bulk, not include the existing persistent data.  INCREMENT = 0; // Final result of the metrics at this moment.  TOTAL = 1;}message SubscriptionReq {}message ExportResponse {}To activate the exporter, you should add this into your application.yml\nexporter:grpc:targetHost:127.0.0.1targetPort:9870 targetHost:targetPort is the expected target service address. You could set any gRPC server to receive the data. Target gRPC service needs to go on standby; otherwise, the OAP startup may fail.  Target exporter service Subscription implementation Return the expected metrics name list with event type (incremental or total). All names must match the OAL/MAL script definition. Return empty list, if you want to export all metrics in the incremental event type.\nExport implementation Stream service. All subscribed metrics will be sent here based on the OAP core schedule. Also, if the OAP is deployed as a cluster, this method will be called concurrently. For metrics value, you need to follow #type to choose #longValue or #doubleValue.\n","excerpt":"Metrics Exporter SkyWalking provides the essential functions of metrics aggregation, alarm, and …","ref":"/docs/main/v9.2.0/en/setup/backend/metrics-exporter/","title":"Metrics Exporter"},{"body":"Metrics Query Expression(MQE) Syntax MQE is a string that consists of one or more expressions. Each expression could be a combination of one or more operations. The expression allows users to do simple query-stage calculation through V3 APIs.\nExpression = \u0026lt;Operation\u0026gt; Expression1 \u0026lt;Operation\u0026gt; Expression2 \u0026lt;Operation\u0026gt; Expression3 ... The following document lists the operations supported by MQE.\nMetrics Expression Metrics Expression will return a collection of time-series values.\nCommon Value Metrics Expression:\n\u0026lt;metric_name\u0026gt; For example: If we want to query the service_sla metric, we can use the following expression:\nservice_sla Result Type The ExpressionResultType of the expression is TIME_SERIES_VALUES.\nLabeled Value Metrics For now, we only have a single anonymous label with multi label values in a labeled metric. To be able to use it in expressions, define _ as the anonymous label name (key).\nExpression:\n\u0026lt;metric_name\u0026gt;{_=\u0026#39;\u0026lt;label_value_1\u0026gt;,...\u0026#39;} {_='\u0026lt;label_value_1\u0026gt;,...'} is the selected label value of the metric. If is not specified, all label values of the metric will be selected.\nFor example: If we want to query the service_percentile metric with the label values 0,1,2,3,4, we can use the following expression:\nservice_percentile{_=\u0026#39;0,1,2,3,4\u0026#39;} If we want to rename the label values to P50,P75,P90,P95,P99, see Relabel Operation.\nResult Type The ExpressionResultType of the expression is TIME_SERIES_VALUES and with labels.\nBinary Operation The Binary Operation is an operation that takes two expressions and performs a calculation on their results. The following table lists the binary operations supported by MQE.\nExpression:\nExpression1 \u0026lt;Binary-Operator\u0026gt; Expression2    Operator Definition     + addition   - subtraction   * multiplication   / division   % modulo    For example: If we want to transform the service_sla metric value to percent, we can use the following expression:\nservice_sla / 100 Result Type For the result type of the expression, please refer to the following table.\nBinary Operation Rules The following table lists if the different result types of the input expressions could do this operation and the result type after the operation. The expression could be on the left or right side of the operator. Note: If the expressions on both sides of the operator are the TIME_SERIES_VALUES with labels, they should have the same labels for calculation.\n   Expression Expression Yes/No ExpressionResultType     SINGLE_VALUE SINGLE_VALUE Yes SINGLE_VALUE   SINGLE_VALUE TIME_SERIES_VALUES Yes TIME_SERIES_VALUES   SINGLE_VALUE SORTED_LIST/RECORD_LIST Yes SORTED_LIST/RECORD_LIST   TIME_SERIES_VALUES TIME_SERIES_VALUES Yes TIME_SERIES_VALUES   TIME_SERIES_VALUES SORTED_LIST/RECORD_LIST no    SORTED_LIST/RECORD_LIST SORTED_LIST/RECORD_LIST no     Compare Operation Compare Operation takes two expressions and compares their results. The following table lists the compare operations supported by MQE.\nExpression:\nExpression1 \u0026lt;Compare-Operator\u0026gt; Expression2    Operator Definition     \u0026gt; greater than   \u0026gt;= greater than or equal   \u0026lt; less than   \u0026lt;= less than or equal   == equal   != not equal    The result of the compare operation is an int value:\n 1: true 0: false  For example: Compare the service_resp_time metric value if greater than 3000, if the service_resp_time result is:\n{ \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 3500, \u0026#34;traceID\u0026#34;: null}] } ] } } } we can use the following expression:\nservice_resp_time \u0026gt; 3000 and get result:\n{ \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;0\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 1, \u0026#34;traceID\u0026#34;: null}] } ] } } } Compare Operation Rules and Result Type Same as the Binary Operation Rules.\nAggregation Operation Aggregation Operation takes an expression and performs aggregate calculations on its results.\nExpression:\n\u0026lt;Aggregation-Operator\u0026gt;(Expression)    Operator Definition ExpressionResultType     avg average the result SINGLE_VALUE   count count number of the result SINGLE_VALUE   latest select the latest non-null value from the result SINGLE_VALUE   sum sum the result SINGLE_VALUE   max select maximum from the result SINGLE_VALUE   min select minimum from the result SINGLE_VALUE    For example: If we want to query the average value of the service_cpm metric, we can use the following expression:\navg(service_cpm) Result Type The different operators could impact the ExpressionResultType, please refer to the above table.\nMathematical Operation Mathematical Operation takes an expression and performs mathematical calculations on its results.\nExpression:\n\u0026lt;Mathematical-Operator\u0026gt;(Expression, parameters)    Operator Definition parameters ExpressionResultType     abs returns the absolute value of the result  follow the input expression   ceil returns the smallest integer value that is greater or equal to the result  follow the input expression   floor returns the largest integer value that is greater or equal to the result  follow the input expression   round returns result round to specific decimal places places: a positive integer specific decimal places of the result follow the input expression    For example: If we want to query the average value of the service_cpm metric in seconds, and round the result to 2 decimal places, we can use the following expression:\nround(service_cpm / 60 , 2) Result Type The different operators could impact the ExpressionResultType, please refer to the above table.\nTopN Operation TopN Operation takes an expression and performs TopN calculation on its results.\nExpression:\ntop_n(\u0026lt;metric_name\u0026gt;, \u0026lt;top_number\u0026gt;, \u0026lt;order\u0026gt;) top_number is the number of the top results, should be a positive integer.\norder is the order of the top results. The value of order can be asc or des.\nFor example: If we want to query the top 10 services with the highest service_cpm metric value, we can use the following expression:\ntop_n(service_instance_cpm, 10, des) Result Type According to the type of the metric, the ExpressionResultType of the expression will be SORTED_LIST or RECORD_LIST.\nRelabel Operation Relabel Operation takes an expression and replaces the label values with new label values on its results.\nExpression:\nrelabel(Expression, _=\u0026#39;\u0026lt;new_label_value_1\u0026gt;,...\u0026#39;) _ is the new label of the metric after the label is relabeled, the order of the new label values should be the same as the order of the label values in the input expression result.\nFor example: If we want to query the service_percentile metric with the label values 0,1,2,3,4, and rename the label values to P50,P75,P90,P95,P99, we can use the following expression:\nrelabel(service_percentile{_=\u0026#39;0,1,2,3,4\u0026#39;}, _=\u0026#39;P50,P75,P90,P95,P99\u0026#39;) Result Type Follow the input expression.\nAggregateLabels Operation AggregateLabels Operation takes an expression and performs an aggregate calculation on its Labeled Value Metrics results. It aggregates a group of TIME_SERIES_VALUES into a single TIME_SERIES_VALUES.\nExpression:\naggregate_labels(Expression, parameter)    parameter Definition ExpressionResultType     avg calculate avg value of a Labeled Value Metrics TIME_SERIES_VALUES   sum calculate sum value of a Labeled Value Metrics TIME_SERIES_VALUES   max select the maximum value from a Labeled Value Metrics TIME_SERIES_VALUES   min select the minimum value from a Labeled Value Metrics TIME_SERIES_VALUES    For example: If we want to query all Redis command total rates, we can use the following expression(total_commands_rate is a metric which recorded every command rate in labeled value):\naggregate_labels(total_commands_rate, SUM) Result Type The ExpressionResultType of the aggregateLabels operation is TIME_SERIES_VALUES.\nLogical Operation ViewAsSequence Operation ViewAsSequence operation represents the first not-null metric from the listing metrics in the given prioritized sequence(left to right). It could also be considered as a short-circuit of given metrics for the first value existing metric.\nExpression:\nview_as_seq([\u0026lt;expression_1\u0026gt;, \u0026lt;expression_2\u0026gt;, ...]) For example: if the first expression value is empty but the second one is not empty, it would return the result from the second expression. The following example would return the content of the service_cpm metric.\nview_as_seq(not_existing, service_cpm) Result Type The result type is determined by the type of selected not-null metric expression.\nTrend Operation Trend Operation takes an expression and performs a trend calculation on its results.\nExpression:\n\u0026lt;Trend-Operator\u0026gt;(Metrics Expression, time_range) time_range is the positive int of the calculated range. The unit will automatically align with to the query Step, for example, if the query Step is MINUTE, the unit of time_range is minute.\n   Operator Definition ExpressionResultType     increase returns the increase in the time range in the time series TIME_SERIES_VALUES   rate returns the per-second average rate of increase in the time range in the time series TIME_SERIES_VALUES    For example: If we want to query the increase value of the service_cpm metric in 2 minute(assume the query Step is MINUTE), we can use the following expression:\nincrease(service_cpm, 2) If the query duration is 3 minutes, from (T1 to T3) and the metric has values in time series:\nV(T1-2), V(T1-1), V(T1), V(T2), V(T3) then the expression result is:\nV(T1)-V(T1-2), V(T2)-V(T1-1), V(T3)-V(T1) Note:\n If the calculated metric value is empty, the result will be empty. Assume in the T3 point, the increase value = V(T3)-V(T1), If the metric V(T3) or V(T1) is empty, the result value in T3 will be empty.  Result Type TIME_SERIES_VALUES.\nExpression Query Example Labeled Value Metrics service_percentile{_=\u0026#39;0,1\u0026#39;} The example result is:\n{ \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;0\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1000\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 2000, \u0026#34;traceID\u0026#34;: null}] }, { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2000\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 3000, \u0026#34;traceID\u0026#34;: null}] } ] } } } If we want to transform the percentile value unit from ms to s the expression is:\nservice_percentile{_=\u0026#39;0,1\u0026#39;} / 1000 { \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;0\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 2, \u0026#34;traceID\u0026#34;: null}] }, { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 3, \u0026#34;traceID\u0026#34;: null}] } ] } } } Get the average value of each percentile, the expression is:\navg(service_percentile{_=\u0026#39;0,1\u0026#39;}) { \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;SINGLE_VALUE\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;0\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: null, \u0026#34;value\u0026#34;: \u0026#34;1500\u0026#34;, \u0026#34;traceID\u0026#34;: null}] }, { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: null, \u0026#34;value\u0026#34;: \u0026#34;2500\u0026#34;, \u0026#34;traceID\u0026#34;: null}] } ] } } } Calculate the difference between the percentile and the average value, the expression is:\nservice_percentile{_=\u0026#39;0,1\u0026#39;} - avg(service_percentile{_=\u0026#39;0,1\u0026#39;}) { \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;0\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;-500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 500, \u0026#34;traceID\u0026#34;: null}] }, { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;-500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 500, \u0026#34;traceID\u0026#34;: null}] } ] } } } Calculate the difference between the service_resp_time and the service_percentile, if the service_resp_time result is:\n{ \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 3500, \u0026#34;traceID\u0026#34;: null}] } ] } } } The expression is:\nservice_resp_time - service_percentile{_=\u0026#39;0,1\u0026#39;} { \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;0\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1500\u0026#34;, \u0026#34;traceID\u0026#34;: null}] }, { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;500\u0026#34;, \u0026#34;traceID\u0026#34;: null}] } ] } } } ","excerpt":"Metrics Query Expression(MQE) Syntax MQE is a string that consists of one or more expressions. Each …","ref":"/docs/main/latest/en/api/metrics-query-expression/","title":"Metrics Query Expression(MQE) Syntax"},{"body":"Metrics Query Expression(MQE) Syntax MQE is a string that consists of one or more expressions. Each expression could be a combination of one or more operations. The expression allows users to do simple query-stage calculation through V3 APIs.\nExpression = \u0026lt;Operation\u0026gt; Expression1 \u0026lt;Operation\u0026gt; Expression2 \u0026lt;Operation\u0026gt; Expression3 ... The following document lists the operations supported by MQE.\nMetrics Expression Metrics Expression will return a collection of time-series values.\nCommon Value Metrics Expression:\n\u0026lt;metric_name\u0026gt; For example: If we want to query the service_sla metric, we can use the following expression:\nservice_sla Result Type The ExpressionResultType of the expression is TIME_SERIES_VALUES.\nLabeled Value Metrics Since v10.0.0, SkyWalking supports multiple labels metrics. We could query the specific labels of the metric by the following expression.\nExpression:\n\u0026lt;metric_name\u0026gt;{\u0026lt;label1_name\u0026gt;=\u0026#39;\u0026lt;label1_value_1\u0026gt;,...\u0026#39;, \u0026lt;label2_name\u0026gt;=\u0026#39;\u0026lt;label2_value_1\u0026gt;,...\u0026#39;,\u0026lt;label2...} {\u0026lt;label1_name\u0026gt;='\u0026lt;label_value_1\u0026gt;,...'} is the selected label name/value of the metric. If is not specified, all label values of the metric will be selected.\nFor example: The k8s_cluster_deployment_status metric has labels namespace, deployment and status. If we want to query all deployment metric value with namespace=skywalking-showcase and status=true, we can use the following expression:\nk8s_cluster_deployment_status{namespace=\u0026#39;skywalking-showcase\u0026#39;, status=\u0026#39;true\u0026#39;} We also could query the label with multiple values by separating the values with ,: If we want to query the service_percentile metric with the label name p and values 50,75,90,95,99, we can use the following expression:\nservice_percentile{p=\u0026#39;50,75,90,95,99\u0026#39;} If we want to rename the label values to P50,P75,P90,P95,P99, see Relabel Operation.\nResult Type The ExpressionResultType of the expression is TIME_SERIES_VALUES and with labels.\nBinary Operation The Binary Operation is an operation that takes two expressions and performs a calculation on their results. The following table lists the binary operations supported by MQE.\nExpression:\nExpression1 \u0026lt;Binary-Operator\u0026gt; Expression2    Operator Definition     + addition   - subtraction   * multiplication   / division   % modulo    For example: If we want to transform the service_sla metric value to percent, we can use the following expression:\nservice_sla / 100 Result Type For the result type of the expression, please refer to the following table.\nBinary Operation Rules The following table lists if the different result types of the input expressions could do this operation and the result type after the operation. The expression could be on the left or right side of the operator. Note: If the expressions result on both sides of the operator are with labels, they should have the same labels for calculation. If the labels match, will reserve left expression result labels and the calculated value. Otherwise, will return empty value.\n   Expression Expression Yes/No ExpressionResultType     SINGLE_VALUE SINGLE_VALUE Yes SINGLE_VALUE   SINGLE_VALUE TIME_SERIES_VALUES Yes TIME_SERIES_VALUES   SINGLE_VALUE SORTED_LIST/RECORD_LIST Yes SORTED_LIST/RECORD_LIST   TIME_SERIES_VALUES TIME_SERIES_VALUES Yes TIME_SERIES_VALUES   TIME_SERIES_VALUES SORTED_LIST/RECORD_LIST no    SORTED_LIST/RECORD_LIST SORTED_LIST/RECORD_LIST no     Compare Operation Compare Operation takes two expressions and compares their results. The following table lists the compare operations supported by MQE.\nExpression:\nExpression1 \u0026lt;Compare-Operator\u0026gt; Expression2    Operator Definition     \u0026gt; greater than   \u0026gt;= greater than or equal   \u0026lt; less than   \u0026lt;= less than or equal   == equal   != not equal    The result of the compare operation is an int value:\n 1: true 0: false  For example: Compare the service_resp_time metric value if greater than 3000, if the service_resp_time result is:\n{ \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 3500, \u0026#34;traceID\u0026#34;: null}] } ] } } } we can use the following expression:\nservice_resp_time \u0026gt; 3000 and get result:\n{ \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;0\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 1, \u0026#34;traceID\u0026#34;: null}] } ] } } } Compare Operation Rules and Result Type Same as the Binary Operation Rules.\nAggregation Operation Aggregation Operation takes an expression and performs aggregate calculations on its results.\nExpression:\n\u0026lt;Aggregation-Operator\u0026gt;(Expression)    Operator Definition ExpressionResultType     avg average the result SINGLE_VALUE   count count number of the result SINGLE_VALUE   latest select the latest non-null value from the result SINGLE_VALUE   sum sum the result SINGLE_VALUE   max select maximum from the result SINGLE_VALUE   min select minimum from the result SINGLE_VALUE    For example: If we want to query the average value of the service_cpm metric, we can use the following expression:\navg(service_cpm) Result Type The different operators could impact the ExpressionResultType, please refer to the above table.\nMathematical Operation Mathematical Operation takes an expression and performs mathematical calculations on its results.\nExpression:\n\u0026lt;Mathematical-Operator\u0026gt;(Expression, parameters)    Operator Definition parameters ExpressionResultType     abs returns the absolute value of the result  follow the input expression   ceil returns the smallest integer value that is greater or equal to the result  follow the input expression   floor returns the largest integer value that is greater or equal to the result  follow the input expression   round returns result round to specific decimal places places: a positive integer specific decimal places of the result follow the input expression    For example: If we want to query the average value of the service_cpm metric in seconds, and round the result to 2 decimal places, we can use the following expression:\nround(service_cpm / 60 , 2) Result Type The different operators could impact the ExpressionResultType, please refer to the above table.\nTopN Operation TopN Operation takes an expression and performs calculation to get the TopN of Services/Instances/Endpoints. The result depends on the entity condition in the query.\n Global TopN:  The entity is empty. The result is the topN Services/Instances/Endpoints in the whole traffics. Notice: If query the Endpoints metric, the global candidate set could be huge, please use it carefully.   Service\u0026rsquo;s Instances/Endpoints TopN:  The serviceName in the entity is not empty. The result is the topN Instances/Endpoints of the service.    Expression:\ntop_n(\u0026lt;metric_name\u0026gt;, \u0026lt;top_number\u0026gt;, \u0026lt;order\u0026gt;)  top_number is the number of the top results, should be a positive integer. order is the order of the top results. The value of order can be asc or des.  For example: If we want to query the current service\u0026rsquo;s top 10 instances with the highest service_instance_cpm metric value, we can use the following expression under specific service:\ntop_n(service_instance_cpm, 10, des) Result Type According to the type of the metric, the ExpressionResultType of the expression will be SORTED_LIST or RECORD_LIST.\nRelabel Operation Relabel Operation takes an expression and replaces the label values with new label values on its results. Since v10.0.0, SkyWalking supports relabel multiple labels.\nExpression:\nrelabel(Expression, \u0026lt;target_label_name\u0026gt;=\u0026#39;\u0026lt;origin_label_value_1\u0026gt;,...\u0026#39;, \u0026lt;new_label_name\u0026gt;=\u0026#39;\u0026lt;new_label_value_1\u0026gt;,...\u0026#39;) The order of the new label values should be the same as the order of the label values in the input expression result.\nFor example: If we want to query the service_percentile metric with the label values 50,75,90,95,99, and rename the label name to percentile and the label values to P50,P75,P90,P95,P99, we can use the following expression:\nrelabel(service_percentile{p=\u0026#39;50,75,90,95,99\u0026#39;}, p=\u0026#39;50,75,90,95,99\u0026#39;, percentile=\u0026#39;P50,P75,P90,P95,P99\u0026#39;) Result Type Follow the input expression.\nAggregateLabels Operation AggregateLabels Operation takes an expression and performs an aggregate calculation on its Labeled Value Metrics results. It aggregates a group of TIME_SERIES_VALUES into a single TIME_SERIES_VALUES.\nExpression:\naggregate_labels(Expression, \u0026lt;AggregateType\u0026gt;(\u0026lt;label1_name\u0026gt;,\u0026lt;label2_name\u0026gt;...))  AggregateType is the type of the aggregation operation. \u0026lt;label1_name\u0026gt;,\u0026lt;label2_name\u0026gt;... is the label names that need to be aggregated. If not specified, all labels will be aggregated. Optional.     AggregateType Definition ExpressionResultType     avg calculate avg value of a Labeled Value Metrics TIME_SERIES_VALUES   sum calculate sum value of a Labeled Value Metrics TIME_SERIES_VALUES   max select the maximum value from a Labeled Value Metrics TIME_SERIES_VALUES   min select the minimum value from a Labeled Value Metrics TIME_SERIES_VALUES    For example: If we want to query all Redis command total rates, we can use the following expression(total_commands_rate is a metric which recorded every command rate in labeled value): Aggregating all the labels:\naggregate_labels(total_commands_rate, sum) Also, we can aggregate by the cmd label:\naggregate_labels(total_commands_rate, sum(cmd)) Result Type The ExpressionResultType of the aggregateLabels operation is TIME_SERIES_VALUES.\nLogical Operation ViewAsSequence Operation ViewAsSequence operation represents the first not-null metric from the listing metrics in the given prioritized sequence(left to right). It could also be considered as a short-circuit of given metrics for the first value existing metric.\nExpression:\nview_as_seq([\u0026lt;expression_1\u0026gt;, \u0026lt;expression_2\u0026gt;, ...]) For example: if the first expression value is empty but the second one is not empty, it would return the result from the second expression. The following example would return the content of the service_cpm metric.\nview_as_seq(not_existing, service_cpm) Result Type The result type is determined by the type of selected not-null metric expression.\nIsPresent Operation IsPresent operation represents that in a list of metrics, if any expression has a value, it would return 1 in the result; otherwise, it would return 0.\nExpression:\nis_present([\u0026lt;expression_1\u0026gt;, \u0026lt;expression_2\u0026gt;, ...]) For example: When the meter does not exist or the metrics has no value, it would return 0. However, if the metrics list contains meter with values, it would return 1.\nis_present(not_existing, existing_without_value, existing_with_value) Result Type The result type is SINGLE_VALUE, and the result(1 or 0) in the first value.\nTrend Operation Trend Operation takes an expression and performs a trend calculation on its results.\nExpression:\n\u0026lt;Trend-Operator\u0026gt;(Metrics Expression, time_range) time_range is the positive int of the calculated range. The unit will automatically align with to the query Step, for example, if the query Step is MINUTE, the unit of time_range is minute.\n   Operator Definition ExpressionResultType     increase returns the increase in the time range in the time series TIME_SERIES_VALUES   rate returns the per-second average rate of increase in the time range in the time series TIME_SERIES_VALUES    For example: If we want to query the increase value of the service_cpm metric in 2 minute(assume the query Step is MINUTE), we can use the following expression:\nincrease(service_cpm, 2) If the query duration is 3 minutes, from (T1 to T3) and the metric has values in time series:\nV(T1-2), V(T1-1), V(T1), V(T2), V(T3) then the expression result is:\nV(T1)-V(T1-2), V(T2)-V(T1-1), V(T3)-V(T1) Note:\n If the calculated metric value is empty, the result will be empty. Assume in the T3 point, the increase value = V(T3)-V(T1), If the metric V(T3) or V(T1) is empty, the result value in T3 will be empty.  Result Type TIME_SERIES_VALUES.\nSort Operation SortValues Operation SortValues Operation takes an expression and sorts the values of the input expression result.\nExpression:\nsort_values(Expression, \u0026lt;limit\u0026gt;, \u0026lt;order\u0026gt;)  limit is the number of the sort results, should be a positive integer, if not specified, will return all results. Optional. order is the order of the sort results. The value of order can be asc or des.  For example: If we want to sort the service_resp_time metric values in descending order and get the top 10 values, we can use the following expression:\nsort_values(service_resp_time, 10, des) Result Type The result type follows the input expression.\nSortLabelValues Operation SortLabelValues Operation takes an expression and sorts the label values of the input expression result. This function uses natural sort order.\nExpression:\nsort_label_values(Expression, \u0026lt;order\u0026gt;, \u0026lt;label1_name\u0026gt;, \u0026lt;label2_name\u0026gt; ...)  order is the order of the sort results. The value of order can be asc or des. \u0026lt;label1_name\u0026gt;, \u0026lt;label2_name\u0026gt; ... is the label names that need to be sorted by their values. At least one label name should be specified. The labels in the head of the list will be sorted first, and if the label not be included in the expression result will be ignored.  For example: If we want to sort the service_percentile metric label values in descending order by the p label, we can use the following expression:\nsort_label_values(service_percentile{p=\u0026#39;50,75,90,95,99\u0026#39;}, des, p) For multiple labels, assume the metric has 2 labels:\nmetric{label1=\u0026#39;a\u0026#39;, label2=\u0026#39;2a\u0026#39;} metric{label1=\u0026#39;a\u0026#39;, label2=\u0026#39;2c\u0026#39;} metric{label1=\u0026#39;b\u0026#39;, label2=\u0026#39;2a\u0026#39;} metric{label1=\u0026#39;b\u0026#39;, label2=\u0026#39;2c\u0026#39;} If we want to sort the metric metric label values in descending order by the label1 and label2 labels, we can use the following expression:\nsort_label_values(metric, des, label1, label2) And the result will be:\nmetric{label1=\u0026#39;b\u0026#39;, label2=\u0026#39;2c\u0026#39;} metric{label1=\u0026#39;b\u0026#39;, label2=\u0026#39;2a\u0026#39;} metric{label1=\u0026#39;a\u0026#39;, label2=\u0026#39;2c\u0026#39;} metric{label1=\u0026#39;a\u0026#39;, label2=\u0026#39;2a\u0026#39;} Expression Query Example Labeled Value Metrics service_percentile{p=\u0026#39;50,95\u0026#39;} The example result is:\n{ \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;p\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;50\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1000\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 2000, \u0026#34;traceID\u0026#34;: null}] }, { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;p\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;75\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2000\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 3000, \u0026#34;traceID\u0026#34;: null}] } ] } } } If we want to transform the percentile value unit from ms to s the expression is:\nservice_percentile{p=\u0026#39;50,75\u0026#39;} / 1000 { \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;p\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;50\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 2, \u0026#34;traceID\u0026#34;: null}] }, { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;p\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;75\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 3, \u0026#34;traceID\u0026#34;: null}] } ] } } } Get the average value of each percentile, the expression is:\navg(service_percentile{p=\u0026#39;50,75\u0026#39;}) { \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;SINGLE_VALUE\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;p\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;50\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: null, \u0026#34;value\u0026#34;: \u0026#34;1500\u0026#34;, \u0026#34;traceID\u0026#34;: null}] }, { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;p\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;75\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: null, \u0026#34;value\u0026#34;: \u0026#34;2500\u0026#34;, \u0026#34;traceID\u0026#34;: null}] } ] } } } Calculate the difference between the percentile and the average value, the expression is:\nservice_percentile{p=\u0026#39;50,75\u0026#39;} - avg(service_percentile{p=\u0026#39;50,75\u0026#39;}) { \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;p\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;50\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;-500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 500, \u0026#34;traceID\u0026#34;: null}] }, { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;p\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;75\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;-500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 500, \u0026#34;traceID\u0026#34;: null}] } ] } } } Calculate the difference between the service_resp_time and the service_percentile, if the service_resp_time result is:\n{ \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 3500, \u0026#34;traceID\u0026#34;: null}] } ] } } } The expression is:\nservice_resp_time - service_percentile{p=\u0026#39;50,75\u0026#39;} { \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;p\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;50\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1500\u0026#34;, \u0026#34;traceID\u0026#34;: null}] }, { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;p\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;75\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;500\u0026#34;, \u0026#34;traceID\u0026#34;: null}] } ] } } } ","excerpt":"Metrics Query Expression(MQE) Syntax MQE is a string that consists of one or more expressions. Each …","ref":"/docs/main/next/en/api/metrics-query-expression/","title":"Metrics Query Expression(MQE) Syntax"},{"body":"Metrics Query Expression(MQE) Syntax MQE is a string that consists of one or more expressions. Each expression could be a combination of one or more operations. The expression allows users to do simple query-stage calculation through V3 APIs.\nExpression = \u0026lt;Operation\u0026gt; Expression1 \u0026lt;Operation\u0026gt; Expression2 \u0026lt;Operation\u0026gt; Expression3 ... The following document lists the operations supported by MQE.\nMetrics Expression Metrics Expression will return a collection of time-series values.\nCommon Value Metrics Expression:\n\u0026lt;metric_name\u0026gt; For example: If we want to query the service_sla metric, we can use the following expression:\nservice_sla Result Type The ExpressionResultType of the expression is TIME_SERIES_VALUES.\nLabeled Value Metrics Expression:\n\u0026lt;metric_name\u0026gt;{label=\u0026#39;\u0026lt;label_1\u0026gt;,...\u0026#39;} label is the selected label of the metric. If label is not specified, all label values of the metric will be selected.\nFor example: If we want to query the service_percentile metric with the labels 0,1,2,3,4, we can use the following expression:\nservice_percentile{label=\u0026#39;0,1,2,3,4\u0026#39;} If we want to rename the labels to P50,P75,P90,P95,P99, see Relabel Operation.\nResult Type The ExpressionResultType of the expression is TIME_SERIES_VALUES and with labels.\nBinary Operation Binary Operation is an operation that takes two expressions and performs a calculation on their results. The following table lists the binary operations supported by MQE.\nExpression:\nExpression1 \u0026lt;Binary-Operator\u0026gt; Expression2    Operator Definition     + addition   - subtraction   * multiplication   / division   % modulo    For example: If we want to transform the service_sla metric value to percent, we can use the following expression:\nservice_sla / 100 Result Type The result type of the expression please refer to the following table.\nBinary Operation Rules The following table listed if the difference result types of the input expressions could do this operation and the result type after the operation. The expression could on the left or right side of the operator. Note: If the expressions on both sides of the operator are the TIME_SERIES_VALUES with labels, they should have the same labels for calculation.\n   Expression Expression Yes/No ExpressionResultType     SINGLE_VALUE SINGLE_VALUE Yes SINGLE_VALUE   SINGLE_VALUE TIME_SERIES_VALUES Yes TIME_SERIES_VALUES   SINGLE_VALUE SORTED_LIST/RECORD_LIST Yes SORTED_LIST/RECORD_LIST   TIME_SERIES_VALUES TIME_SERIES_VALUES Yes TIME_SERIES_VALUES   TIME_SERIES_VALUES SORTED_LIST/RECORD_LIST no    SORTED_LIST/RECORD_LIST SORTED_LIST/RECORD_LIST no     Aggregation Operation Aggregation Operation takes an expression and performs aggregate calculation on its results.\nExpression:\n\u0026lt;Aggregation-Operator\u0026gt;(Expression)    Operator Definition ExpressionResultType     avg average the result SINGLE_VALUE   count count number of the result SINGLE_VALUE   latest select the latest non-null value from the result SINGLE_VALUE   sum sum the result SINGLE_VALUE   max select maximum from the result SINGLE_VALUE   min select minimum from the result SINGLE_VALUE    For example: If we want to query the average value of the service_cpm metric, we can use the following expression:\navg(service_cpm) Result Type The different operator could impact the ExpressionResultType, please refer to the above table.\nFunction Operation Function Operation takes an expression and performs function calculation on its results.\nExpression:\n\u0026lt;Function-Operator\u0026gt;(Expression, parameters)    Operator Definition parameters ExpressionResultType     abs returns the absolute value of the result  follow the input expression   ceil returns the smallest integer value that is greater or equal to the result  follow the input expression   floor returns the largest integer value that is greater or equal to the result  follow the input expression   round returns result round to specific decimal places places: a positive integer specific decimal places of the result follow the input expression    For example: If we want to query the average value of the service_cpm metric in seconds, and round the result to 2 decimal places, we can use the following expression:\nround(service_cpm / 60 , 2) Result Type The different operator could impact the ExpressionResultType, please refer to the above table.\nTopN Operation TopN Operation takes an expression and performs TopN calculation on its results.\nExpression:\ntop_n(\u0026lt;metric_name\u0026gt;, \u0026lt;top_number\u0026gt;, \u0026lt;order\u0026gt;) top_number is the number of the top results, should be a positive integer.\norder is the order of the top results. The value of order can be asc or des.\nFor example: If we want to query the top 10 services with the highest service_cpm metric value, we can use the following expression:\ntop_n(service_instance_cpm, 10, des) Result Type According to the type of the metric, the ExpressionResultType of the expression will be SORTED_LIST or RECORD_LIST.\nRelabel Operation Relabel Operation takes an expression and replace the labels to new labels on its results.\nExpression:\nrelabel(Expression, label=\u0026#39;\u0026lt;new_label_1\u0026gt;,...\u0026#39;) label is the new labels of the metric after the label is relabeled, the order of the new labels should be the same as the order of the labels in the input expression result.\nFor example: If we want to query the service_percentile metric with the labels 0,1,2,3,4, and rename the labels to P50,P75,P90,P95,P99, we can use the following expression:\nrelabel(service_percentile{label=\u0026#39;0,1,2,3,4\u0026#39;}, label=\u0026#39;P50,P75,P90,P95,P99\u0026#39;) Result Type Follow the input expression.\n","excerpt":"Metrics Query Expression(MQE) Syntax MQE is a string that consists of one or more expressions. Each …","ref":"/docs/main/v9.5.0/en/api/metrics-query-expression/","title":"Metrics Query Expression(MQE) Syntax"},{"body":"Metrics Query Expression(MQE) Syntax MQE is a string that consists of one or more expressions. Each expression could be a combination of one or more operations. The expression allows users to do simple query-stage calculation through V3 APIs.\nExpression = \u0026lt;Operation\u0026gt; Expression1 \u0026lt;Operation\u0026gt; Expression2 \u0026lt;Operation\u0026gt; Expression3 ... The following document lists the operations supported by MQE.\nMetrics Expression Metrics Expression will return a collection of time-series values.\nCommon Value Metrics Expression:\n\u0026lt;metric_name\u0026gt; For example: If we want to query the service_sla metric, we can use the following expression:\nservice_sla Result Type The ExpressionResultType of the expression is TIME_SERIES_VALUES.\nLabeled Value Metrics For now, we only have a single anonymous label with multi label values in a labeled metric. To be able to use it in expressions, define _ as the anonymous label name (key).\nExpression:\n\u0026lt;metric_name\u0026gt;{_=\u0026#39;\u0026lt;label_value_1\u0026gt;,...\u0026#39;} {_='\u0026lt;label_value_1\u0026gt;,...'} is the selected label value of the metric. If is not specified, all label values of the metric will be selected.\nFor example: If we want to query the service_percentile metric with the label values 0,1,2,3,4, we can use the following expression:\nservice_percentile{_=\u0026#39;0,1,2,3,4\u0026#39;} If we want to rename the label values to P50,P75,P90,P95,P99, see Relabel Operation.\nResult Type The ExpressionResultType of the expression is TIME_SERIES_VALUES and with labels.\nBinary Operation The Binary Operation is an operation that takes two expressions and performs a calculation on their results. The following table lists the binary operations supported by MQE.\nExpression:\nExpression1 \u0026lt;Binary-Operator\u0026gt; Expression2    Operator Definition     + addition   - subtraction   * multiplication   / division   % modulo    For example: If we want to transform the service_sla metric value to percent, we can use the following expression:\nservice_sla / 100 Result Type For the result type of the expression, please refer to the following table.\nBinary Operation Rules The following table lists if the different result types of the input expressions could do this operation and the result type after the operation. The expression could be on the left or right side of the operator. Note: If the expressions on both sides of the operator are the TIME_SERIES_VALUES with labels, they should have the same labels for calculation.\n   Expression Expression Yes/No ExpressionResultType     SINGLE_VALUE SINGLE_VALUE Yes SINGLE_VALUE   SINGLE_VALUE TIME_SERIES_VALUES Yes TIME_SERIES_VALUES   SINGLE_VALUE SORTED_LIST/RECORD_LIST Yes SORTED_LIST/RECORD_LIST   TIME_SERIES_VALUES TIME_SERIES_VALUES Yes TIME_SERIES_VALUES   TIME_SERIES_VALUES SORTED_LIST/RECORD_LIST no    SORTED_LIST/RECORD_LIST SORTED_LIST/RECORD_LIST no     Compare Operation Compare Operation takes two expressions and compares their results. The following table lists the compare operations supported by MQE.\nExpression:\nExpression1 \u0026lt;Compare-Operator\u0026gt; Expression2    Operator Definition     \u0026gt; greater than   \u0026gt;= greater than or equal   \u0026lt; less than   \u0026lt;= less than or equal   == equal   != not equal    The result of the compare operation is an int value:\n 1: true 0: false  For example: Compare the service_resp_time metric value if greater than 3000, if the service_resp_time result is:\n{ \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 3500, \u0026#34;traceID\u0026#34;: null}] } ] } } } we can use the following expression:\nservice_resp_time \u0026gt; 3000 and get result:\n{ \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;0\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 1, \u0026#34;traceID\u0026#34;: null}] } ] } } } Compare Operation Rules and Result Type Same as the Binary Operation Rules.\nAggregation Operation Aggregation Operation takes an expression and performs aggregate calculations on its results.\nExpression:\n\u0026lt;Aggregation-Operator\u0026gt;(Expression)    Operator Definition ExpressionResultType     avg average the result SINGLE_VALUE   count count number of the result SINGLE_VALUE   latest select the latest non-null value from the result SINGLE_VALUE   sum sum the result SINGLE_VALUE   max select maximum from the result SINGLE_VALUE   min select minimum from the result SINGLE_VALUE    For example: If we want to query the average value of the service_cpm metric, we can use the following expression:\navg(service_cpm) Result Type The different operators could impact the ExpressionResultType, please refer to the above table.\nMathematical Operation Mathematical Operation takes an expression and performs mathematical calculations on its results.\nExpression:\n\u0026lt;Mathematical-Operator\u0026gt;(Expression, parameters)    Operator Definition parameters ExpressionResultType     abs returns the absolute value of the result  follow the input expression   ceil returns the smallest integer value that is greater or equal to the result  follow the input expression   floor returns the largest integer value that is greater or equal to the result  follow the input expression   round returns result round to specific decimal places places: a positive integer specific decimal places of the result follow the input expression    For example: If we want to query the average value of the service_cpm metric in seconds, and round the result to 2 decimal places, we can use the following expression:\nround(service_cpm / 60 , 2) Result Type The different operators could impact the ExpressionResultType, please refer to the above table.\nTopN Operation TopN Operation takes an expression and performs TopN calculation on its results.\nExpression:\ntop_n(\u0026lt;metric_name\u0026gt;, \u0026lt;top_number\u0026gt;, \u0026lt;order\u0026gt;) top_number is the number of the top results, should be a positive integer.\norder is the order of the top results. The value of order can be asc or des.\nFor example: If we want to query the top 10 services with the highest service_cpm metric value, we can use the following expression:\ntop_n(service_instance_cpm, 10, des) Result Type According to the type of the metric, the ExpressionResultType of the expression will be SORTED_LIST or RECORD_LIST.\nRelabel Operation Relabel Operation takes an expression and replaces the label values with new label values on its results.\nExpression:\nrelabel(Expression, _=\u0026#39;\u0026lt;new_label_value_1\u0026gt;,...\u0026#39;) _ is the new label of the metric after the label is relabeled, the order of the new label values should be the same as the order of the label values in the input expression result.\nFor example: If we want to query the service_percentile metric with the label values 0,1,2,3,4, and rename the label values to P50,P75,P90,P95,P99, we can use the following expression:\nrelabel(service_percentile{_=\u0026#39;0,1,2,3,4\u0026#39;}, _=\u0026#39;P50,P75,P90,P95,P99\u0026#39;) Result Type Follow the input expression.\nAggregateLabels Operation AggregateLabels Operation takes an expression and performs an aggregate calculation on its Labeled Value Metrics results. It aggregates a group of TIME_SERIES_VALUES into a single TIME_SERIES_VALUES.\nExpression:\naggregate_labels(Expression, parameter)    parameter Definition ExpressionResultType     avg calculate avg value of a Labeled Value Metrics TIME_SERIES_VALUES   sum calculate sum value of a Labeled Value Metrics TIME_SERIES_VALUES   max select the maximum value from a Labeled Value Metrics TIME_SERIES_VALUES   min select the minimum value from a Labeled Value Metrics TIME_SERIES_VALUES    For example: If we want to query all Redis command total rates, we can use the following expression(total_commands_rate is a metric which recorded every command rate in labeled value):\naggregate_labels(total_commands_rate, SUM) Result Type The ExpressionResultType of the aggregateLabels operation is TIME_SERIES_VALUES.\nLogical Operation ViewAsSequence Operation ViewAsSequence operation represents the first not-null metric from the listing metrics in the given prioritized sequence(left to right). It could also be considered as a short-circuit of given metrics for the first value existing metric.\nExpression:\nview_as_seq([\u0026lt;expression_1\u0026gt;, \u0026lt;expression_2\u0026gt;, ...]) For example: if the first expression value is empty but the second one is not empty, it would return the result from the second expression. The following example would return the content of the service_cpm metric.\nview_as_seq(not_existing, service_cpm) Result Type The result type is determined by the type of selected not-null metric expression.\nExpression Query Example Labeled Value Metrics service_percentile{_=\u0026#39;0,1\u0026#39;} The example result is:\n{ \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;0\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1000\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 2000, \u0026#34;traceID\u0026#34;: null}] }, { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2000\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 3000, \u0026#34;traceID\u0026#34;: null}] } ] } } } If we want to transform the percentile value unit from ms to s the expression is:\nservice_percentile{_=\u0026#39;0,1\u0026#39;} / 1000 { \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;0\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 2, \u0026#34;traceID\u0026#34;: null}] }, { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 3, \u0026#34;traceID\u0026#34;: null}] } ] } } } Get the average value of each percentile, the expression is:\navg(service_percentile{_=\u0026#39;0,1\u0026#39;}) { \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;SINGLE_VALUE\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;0\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: null, \u0026#34;value\u0026#34;: \u0026#34;1500\u0026#34;, \u0026#34;traceID\u0026#34;: null}] }, { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: null, \u0026#34;value\u0026#34;: \u0026#34;2500\u0026#34;, \u0026#34;traceID\u0026#34;: null}] } ] } } } Calculate the difference between the percentile and the average value, the expression is:\nservice_percentile{_=\u0026#39;0,1\u0026#39;} - avg(service_percentile{_=\u0026#39;0,1\u0026#39;}) { \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;0\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;-500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 500, \u0026#34;traceID\u0026#34;: null}] }, { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;-500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 500, \u0026#34;traceID\u0026#34;: null}] } ] } } } Calculate the difference between the service_resp_time and the service_percentile, if the service_resp_time result is:\n{ \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 3500, \u0026#34;traceID\u0026#34;: null}] } ] } } } The expression is:\nservice_resp_time - service_percentile{_=\u0026#39;0,1\u0026#39;} { \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;0\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1500\u0026#34;, \u0026#34;traceID\u0026#34;: null}] }, { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;500\u0026#34;, \u0026#34;traceID\u0026#34;: null}] } ] } } } ","excerpt":"Metrics Query Expression(MQE) Syntax MQE is a string that consists of one or more expressions. Each …","ref":"/docs/main/v9.6.0/en/api/metrics-query-expression/","title":"Metrics Query Expression(MQE) Syntax"},{"body":"Metrics Query Expression(MQE) Syntax MQE is a string that consists of one or more expressions. Each expression could be a combination of one or more operations. The expression allows users to do simple query-stage calculation through V3 APIs.\nExpression = \u0026lt;Operation\u0026gt; Expression1 \u0026lt;Operation\u0026gt; Expression2 \u0026lt;Operation\u0026gt; Expression3 ... The following document lists the operations supported by MQE.\nMetrics Expression Metrics Expression will return a collection of time-series values.\nCommon Value Metrics Expression:\n\u0026lt;metric_name\u0026gt; For example: If we want to query the service_sla metric, we can use the following expression:\nservice_sla Result Type The ExpressionResultType of the expression is TIME_SERIES_VALUES.\nLabeled Value Metrics For now, we only have a single anonymous label with multi label values in a labeled metric. To be able to use it in expressions, define _ as the anonymous label name (key).\nExpression:\n\u0026lt;metric_name\u0026gt;{_=\u0026#39;\u0026lt;label_value_1\u0026gt;,...\u0026#39;} {_='\u0026lt;label_value_1\u0026gt;,...'} is the selected label value of the metric. If is not specified, all label values of the metric will be selected.\nFor example: If we want to query the service_percentile metric with the label values 0,1,2,3,4, we can use the following expression:\nservice_percentile{_=\u0026#39;0,1,2,3,4\u0026#39;} If we want to rename the label values to P50,P75,P90,P95,P99, see Relabel Operation.\nResult Type The ExpressionResultType of the expression is TIME_SERIES_VALUES and with labels.\nBinary Operation The Binary Operation is an operation that takes two expressions and performs a calculation on their results. The following table lists the binary operations supported by MQE.\nExpression:\nExpression1 \u0026lt;Binary-Operator\u0026gt; Expression2    Operator Definition     + addition   - subtraction   * multiplication   / division   % modulo    For example: If we want to transform the service_sla metric value to percent, we can use the following expression:\nservice_sla / 100 Result Type For the result type of the expression, please refer to the following table.\nBinary Operation Rules The following table lists if the different result types of the input expressions could do this operation and the result type after the operation. The expression could be on the left or right side of the operator. Note: If the expressions on both sides of the operator are the TIME_SERIES_VALUES with labels, they should have the same labels for calculation.\n   Expression Expression Yes/No ExpressionResultType     SINGLE_VALUE SINGLE_VALUE Yes SINGLE_VALUE   SINGLE_VALUE TIME_SERIES_VALUES Yes TIME_SERIES_VALUES   SINGLE_VALUE SORTED_LIST/RECORD_LIST Yes SORTED_LIST/RECORD_LIST   TIME_SERIES_VALUES TIME_SERIES_VALUES Yes TIME_SERIES_VALUES   TIME_SERIES_VALUES SORTED_LIST/RECORD_LIST no    SORTED_LIST/RECORD_LIST SORTED_LIST/RECORD_LIST no     Compare Operation Compare Operation takes two expressions and compares their results. The following table lists the compare operations supported by MQE.\nExpression:\nExpression1 \u0026lt;Compare-Operator\u0026gt; Expression2    Operator Definition     \u0026gt; greater than   \u0026gt;= greater than or equal   \u0026lt; less than   \u0026lt;= less than or equal   == equal   != not equal    The result of the compare operation is an int value:\n 1: true 0: false  For example: Compare the service_resp_time metric value if greater than 3000, if the service_resp_time result is:\n{ \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 3500, \u0026#34;traceID\u0026#34;: null}] } ] } } } we can use the following expression:\nservice_resp_time \u0026gt; 3000 and get result:\n{ \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;0\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 1, \u0026#34;traceID\u0026#34;: null}] } ] } } } Compare Operation Rules and Result Type Same as the Binary Operation Rules.\nAggregation Operation Aggregation Operation takes an expression and performs aggregate calculations on its results.\nExpression:\n\u0026lt;Aggregation-Operator\u0026gt;(Expression)    Operator Definition ExpressionResultType     avg average the result SINGLE_VALUE   count count number of the result SINGLE_VALUE   latest select the latest non-null value from the result SINGLE_VALUE   sum sum the result SINGLE_VALUE   max select maximum from the result SINGLE_VALUE   min select minimum from the result SINGLE_VALUE    For example: If we want to query the average value of the service_cpm metric, we can use the following expression:\navg(service_cpm) Result Type The different operators could impact the ExpressionResultType, please refer to the above table.\nMathematical Operation Mathematical Operation takes an expression and performs mathematical calculations on its results.\nExpression:\n\u0026lt;Mathematical-Operator\u0026gt;(Expression, parameters)    Operator Definition parameters ExpressionResultType     abs returns the absolute value of the result  follow the input expression   ceil returns the smallest integer value that is greater or equal to the result  follow the input expression   floor returns the largest integer value that is greater or equal to the result  follow the input expression   round returns result round to specific decimal places places: a positive integer specific decimal places of the result follow the input expression    For example: If we want to query the average value of the service_cpm metric in seconds, and round the result to 2 decimal places, we can use the following expression:\nround(service_cpm / 60 , 2) Result Type The different operators could impact the ExpressionResultType, please refer to the above table.\nTopN Operation TopN Operation takes an expression and performs TopN calculation on its results.\nExpression:\ntop_n(\u0026lt;metric_name\u0026gt;, \u0026lt;top_number\u0026gt;, \u0026lt;order\u0026gt;) top_number is the number of the top results, should be a positive integer.\norder is the order of the top results. The value of order can be asc or des.\nFor example: If we want to query the top 10 services with the highest service_cpm metric value, we can use the following expression:\ntop_n(service_instance_cpm, 10, des) Result Type According to the type of the metric, the ExpressionResultType of the expression will be SORTED_LIST or RECORD_LIST.\nRelabel Operation Relabel Operation takes an expression and replaces the label values with new label values on its results.\nExpression:\nrelabel(Expression, _=\u0026#39;\u0026lt;new_label_value_1\u0026gt;,...\u0026#39;) _ is the new label of the metric after the label is relabeled, the order of the new label values should be the same as the order of the label values in the input expression result.\nFor example: If we want to query the service_percentile metric with the label values 0,1,2,3,4, and rename the label values to P50,P75,P90,P95,P99, we can use the following expression:\nrelabel(service_percentile{_=\u0026#39;0,1,2,3,4\u0026#39;}, _=\u0026#39;P50,P75,P90,P95,P99\u0026#39;) Result Type Follow the input expression.\nAggregateLabels Operation AggregateLabels Operation takes an expression and performs an aggregate calculation on its Labeled Value Metrics results. It aggregates a group of TIME_SERIES_VALUES into a single TIME_SERIES_VALUES.\nExpression:\naggregate_labels(Expression, parameter)    parameter Definition ExpressionResultType     avg calculate avg value of a Labeled Value Metrics TIME_SERIES_VALUES   sum calculate sum value of a Labeled Value Metrics TIME_SERIES_VALUES   max select the maximum value from a Labeled Value Metrics TIME_SERIES_VALUES   min select the minimum value from a Labeled Value Metrics TIME_SERIES_VALUES    For example: If we want to query all Redis command total rates, we can use the following expression(total_commands_rate is a metric which recorded every command rate in labeled value):\naggregate_labels(total_commands_rate, SUM) Result Type The ExpressionResultType of the aggregateLabels operation is TIME_SERIES_VALUES.\nLogical Operation ViewAsSequence Operation ViewAsSequence operation represents the first not-null metric from the listing metrics in the given prioritized sequence(left to right). It could also be considered as a short-circuit of given metrics for the first value existing metric.\nExpression:\nview_as_seq([\u0026lt;expression_1\u0026gt;, \u0026lt;expression_2\u0026gt;, ...]) For example: if the first expression value is empty but the second one is not empty, it would return the result from the second expression. The following example would return the content of the service_cpm metric.\nview_as_seq(not_existing, service_cpm) Result Type The result type is determined by the type of selected not-null metric expression.\nTrend Operation Trend Operation takes an expression and performs a trend calculation on its results.\nExpression:\n\u0026lt;Trend-Operator\u0026gt;(Metrics Expression, time_range) time_range is the positive int of the calculated range. The unit will automatically align with to the query Step, for example, if the query Step is MINUTE, the unit of time_range is minute.\n   Operator Definition ExpressionResultType     increase returns the increase in the time range in the time series TIME_SERIES_VALUES   rate returns the per-second average rate of increase in the time range in the time series TIME_SERIES_VALUES    For example: If we want to query the increase value of the service_cpm metric in 2 minute(assume the query Step is MINUTE), we can use the following expression:\nincrease(service_cpm, 2) If the query duration is 3 minutes, from (T1 to T3) and the metric has values in time series:\nV(T1-2), V(T1-1), V(T1), V(T2), V(T3) then the expression result is:\nV(T1)-V(T1-2), V(T2)-V(T1-1), V(T3)-V(T1) Note:\n If the calculated metric value is empty, the result will be empty. Assume in the T3 point, the increase value = V(T3)-V(T1), If the metric V(T3) or V(T1) is empty, the result value in T3 will be empty.  Result Type TIME_SERIES_VALUES.\nExpression Query Example Labeled Value Metrics service_percentile{_=\u0026#39;0,1\u0026#39;} The example result is:\n{ \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;0\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1000\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 2000, \u0026#34;traceID\u0026#34;: null}] }, { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2000\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 3000, \u0026#34;traceID\u0026#34;: null}] } ] } } } If we want to transform the percentile value unit from ms to s the expression is:\nservice_percentile{_=\u0026#39;0,1\u0026#39;} / 1000 { \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;0\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 2, \u0026#34;traceID\u0026#34;: null}] }, { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 3, \u0026#34;traceID\u0026#34;: null}] } ] } } } Get the average value of each percentile, the expression is:\navg(service_percentile{_=\u0026#39;0,1\u0026#39;}) { \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;SINGLE_VALUE\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;0\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: null, \u0026#34;value\u0026#34;: \u0026#34;1500\u0026#34;, \u0026#34;traceID\u0026#34;: null}] }, { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: null, \u0026#34;value\u0026#34;: \u0026#34;2500\u0026#34;, \u0026#34;traceID\u0026#34;: null}] } ] } } } Calculate the difference between the percentile and the average value, the expression is:\nservice_percentile{_=\u0026#39;0,1\u0026#39;} - avg(service_percentile{_=\u0026#39;0,1\u0026#39;}) { \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;0\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;-500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 500, \u0026#34;traceID\u0026#34;: null}] }, { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;-500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 500, \u0026#34;traceID\u0026#34;: null}] } ] } } } Calculate the difference between the service_resp_time and the service_percentile, if the service_resp_time result is:\n{ \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 3500, \u0026#34;traceID\u0026#34;: null}] } ] } } } The expression is:\nservice_resp_time - service_percentile{_=\u0026#39;0,1\u0026#39;} { \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;0\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1500\u0026#34;, \u0026#34;traceID\u0026#34;: null}] }, { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;500\u0026#34;, \u0026#34;traceID\u0026#34;: null}] } ] } } } ","excerpt":"Metrics Query Expression(MQE) Syntax MQE is a string that consists of one or more expressions. Each …","ref":"/docs/main/v9.7.0/en/api/metrics-query-expression/","title":"Metrics Query Expression(MQE) Syntax"},{"body":"MicroMeter Observations setup Micrometer Observation is part of the Micrometer project and contains the Observation API. SkyWalking integrates its MicroMeter 1.10 APIs so that it can send metrics to the Skywalking Meter System.\nFollow Java agent Observations docs to set up agent in the Spring first.\nSet up backend receiver  Make sure to enable meter receiver in application.yml.  receiver-meter:selector:${SW_RECEIVER_METER:default}default: Configure the meter config file. It already has the spring sleuth meter config. If you have a customized meter at the agent side, please configure the meter using the steps set out in the meter document.\n  Enable Spring sleuth config in application.yml.\n  agent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:meterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:spring-micrometer}Dashboard configuration SkyWalking provides the Spring Sleuth dashboard by default under the general service instance, which contains the metrics provided by Spring Sleuth by default. Once you have added customized metrics in the application and configuration the meter config file in the backend. Please following the customized dashboard documentation to add the metrics in the dashboard.\nSupported meter Three types of information are supported: Application, System, and JVM.\n Application: HTTP request count and duration, JDBC max/idle/active connection count, and Tomcat session active/reject count. System: CPU system/process usage, OS system load, and OS process file count. JVM: GC pause count and duration, memory max/used/committed size, thread peak/live/daemon count, and classes loaded/unloaded count.  ","excerpt":"MicroMeter Observations setup Micrometer Observation is part of the Micrometer project and contains …","ref":"/docs/main/latest/en/setup/backend/micrometer-observations/","title":"MicroMeter Observations setup"},{"body":"MicroMeter Observations setup Micrometer Observation is part of the Micrometer project and contains the Observation API. SkyWalking integrates its MicroMeter 1.10 APIs so that it can send metrics to the SkyWalking Meter System.\nFollow Java agent Observations docs to set up agent in the Spring first.\nSet up backend receiver  Make sure to enable meter receiver in application.yml.  receiver-meter:selector:${SW_RECEIVER_METER:default}default: Configure the meter config file. It already has the spring sleuth meter config. If you have a customized meter at the agent side, please configure the meter using the steps set out in the meter document.\n  Enable Spring sleuth config in application.yml.\n  agent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:meterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:spring-micrometer}Dashboard configuration SkyWalking provides the Spring Sleuth dashboard by default under the general service instance, which contains the metrics provided by Spring Sleuth by default. Once you have added customized metrics in the application and configuration the meter config file in the backend. Please following the customized dashboard documentation to add the metrics in the dashboard.\nSupported meter Three types of information are supported: Application, System, and JVM.\n Application: HTTP request count and duration, JDBC max/idle/active connection count, and Tomcat session active/reject count. System: CPU system/process usage, OS system load, and OS process file count. JVM: GC pause count and duration, memory max/used/committed size, thread peak/live/daemon count, and classes loaded/unloaded count.  ","excerpt":"MicroMeter Observations setup Micrometer Observation is part of the Micrometer project and contains …","ref":"/docs/main/next/en/setup/backend/micrometer-observations/","title":"MicroMeter Observations setup"},{"body":"MicroMeter Observations setup Micrometer Observation is part of the Micrometer project and contains the Observation API. SkyWalking integrates its MicroMeter 1.10 APIs so that it can send metrics to the Skywalking Meter System.\nFollow Java agent Observations docs to set up agent in the Spring first.\nSet up backend receiver  Make sure to enable meter receiver in application.yml.  receiver-meter:selector:${SW_RECEIVER_METER:default}default: Configure the meter config file. It already has the spring sleuth meter config. If you have a customized meter at the agent side, please configure the meter using the steps set out in the meter document.\n  Enable Spring sleuth config in application.yml.\n  agent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:meterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:spring-micrometer}Dashboard configuration SkyWalking provides the Spring Sleuth dashboard by default under the general service instance, which contains the metrics provided by Spring Sleuth by default. Once you have added customized metrics in the application and configuration the meter config file in the backend. Please following the customized dashboard documentation to add the metrics in the dashboard.\nSupported meter Three types of information are supported: Application, System, and JVM.\n Application: HTTP request count and duration, JDBC max/idle/active connection count, and Tomcat session active/reject count. System: CPU system/process usage, OS system load, and OS process file count. JVM: GC pause count and duration, memory max/used/committed size, thread peak/live/daemon count, and classes loaded/unloaded count.  ","excerpt":"MicroMeter Observations setup Micrometer Observation is part of the Micrometer project and contains …","ref":"/docs/main/v9.4.0/en/setup/backend/micrometer-observations/","title":"MicroMeter Observations setup"},{"body":"MicroMeter Observations setup Micrometer Observation is part of the Micrometer project and contains the Observation API. SkyWalking integrates its MicroMeter 1.10 APIs so that it can send metrics to the Skywalking Meter System.\nFollow Java agent Observations docs to set up agent in the Spring first.\nSet up backend receiver  Make sure to enable meter receiver in application.yml.  receiver-meter:selector:${SW_RECEIVER_METER:default}default: Configure the meter config file. It already has the spring sleuth meter config. If you have a customized meter at the agent side, please configure the meter using the steps set out in the meter document.\n  Enable Spring sleuth config in application.yml.\n  agent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:meterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:spring-micrometer}Dashboard configuration SkyWalking provides the Spring Sleuth dashboard by default under the general service instance, which contains the metrics provided by Spring Sleuth by default. Once you have added customized metrics in the application and configuration the meter config file in the backend. Please following the customized dashboard documentation to add the metrics in the dashboard.\nSupported meter Three types of information are supported: Application, System, and JVM.\n Application: HTTP request count and duration, JDBC max/idle/active connection count, and Tomcat session active/reject count. System: CPU system/process usage, OS system load, and OS process file count. JVM: GC pause count and duration, memory max/used/committed size, thread peak/live/daemon count, and classes loaded/unloaded count.  ","excerpt":"MicroMeter Observations setup Micrometer Observation is part of the Micrometer project and contains …","ref":"/docs/main/v9.5.0/en/setup/backend/micrometer-observations/","title":"MicroMeter Observations setup"},{"body":"MicroMeter Observations setup Micrometer Observation is part of the Micrometer project and contains the Observation API. SkyWalking integrates its MicroMeter 1.10 APIs so that it can send metrics to the Skywalking Meter System.\nFollow Java agent Observations docs to set up agent in the Spring first.\nSet up backend receiver  Make sure to enable meter receiver in application.yml.  receiver-meter:selector:${SW_RECEIVER_METER:default}default: Configure the meter config file. It already has the spring sleuth meter config. If you have a customized meter at the agent side, please configure the meter using the steps set out in the meter document.\n  Enable Spring sleuth config in application.yml.\n  agent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:meterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:spring-micrometer}Dashboard configuration SkyWalking provides the Spring Sleuth dashboard by default under the general service instance, which contains the metrics provided by Spring Sleuth by default. Once you have added customized metrics in the application and configuration the meter config file in the backend. Please following the customized dashboard documentation to add the metrics in the dashboard.\nSupported meter Three types of information are supported: Application, System, and JVM.\n Application: HTTP request count and duration, JDBC max/idle/active connection count, and Tomcat session active/reject count. System: CPU system/process usage, OS system load, and OS process file count. JVM: GC pause count and duration, memory max/used/committed size, thread peak/live/daemon count, and classes loaded/unloaded count.  ","excerpt":"MicroMeter Observations setup Micrometer Observation is part of the Micrometer project and contains …","ref":"/docs/main/v9.6.0/en/setup/backend/micrometer-observations/","title":"MicroMeter Observations setup"},{"body":"MicroMeter Observations setup Micrometer Observation is part of the Micrometer project and contains the Observation API. SkyWalking integrates its MicroMeter 1.10 APIs so that it can send metrics to the Skywalking Meter System.\nFollow Java agent Observations docs to set up agent in the Spring first.\nSet up backend receiver  Make sure to enable meter receiver in application.yml.  receiver-meter:selector:${SW_RECEIVER_METER:default}default: Configure the meter config file. It already has the spring sleuth meter config. If you have a customized meter at the agent side, please configure the meter using the steps set out in the meter document.\n  Enable Spring sleuth config in application.yml.\n  agent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:meterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:spring-micrometer}Dashboard configuration SkyWalking provides the Spring Sleuth dashboard by default under the general service instance, which contains the metrics provided by Spring Sleuth by default. Once you have added customized metrics in the application and configuration the meter config file in the backend. Please following the customized dashboard documentation to add the metrics in the dashboard.\nSupported meter Three types of information are supported: Application, System, and JVM.\n Application: HTTP request count and duration, JDBC max/idle/active connection count, and Tomcat session active/reject count. System: CPU system/process usage, OS system load, and OS process file count. JVM: GC pause count and duration, memory max/used/committed size, thread peak/live/daemon count, and classes loaded/unloaded count.  ","excerpt":"MicroMeter Observations setup Micrometer Observation is part of the Micrometer project and contains …","ref":"/docs/main/v9.7.0/en/setup/backend/micrometer-observations/","title":"MicroMeter Observations setup"},{"body":"Mock data generator for testing In 9.1.0, SkyWalking adds a module to generate mock data for testing. You can use this module to generate mock data that will be sent to the storage.\nTo start the data generator, execute the script tools/data-generator/bin/start.sh.\nNote that SkyWalking doesn\u0026rsquo;t release a Docker image for this module, but you can still build it yourselves by running the commands:\n# build a Docker image for local use make docker.data-generator # or push to your registry export HUB=\u0026lt;your-registry\u0026gt; make push.docker.data-generator Currently the module can generate two kinds of SkyWalking data, segments and logs. For each type, there are some generators that can be used to fill the fields.\nGenerate mock data To generate mock data, POST a request to URL path /mock-data/segments/tasks (segments) or /mock-data/logs/tasks (logs) with a generator template:\ncurl -XPOST \u0026#39;http://localhost:12800/mock-data/segments/tasks?size=20\u0026#39; -H\u0026#39;Content-Type: application/json\u0026#39; -d \u0026#34;@segment-template.json\u0026#34; curl -XPOST \u0026#39;http://localhost:12800/mock-data/logs/tasks?size=20\u0026#39; -H\u0026#39;Content-Type: application/json\u0026#39; -d \u0026#34;@logs-template.json\u0026#34; There are two possible types of task to generate mock data, size and qps:\n size (/mock-data/segments/tasks?size=20): the task will generate total number of size segments/logs and then finish. qps (/mock-data/segments/tasks?qps=20): the task will generate qps segments/logs per second continuously, until the task is cancelled.  Refer to the segment template, the log template and the Generators for more details about how to compose a template.\nCancel a task When the task is acknowledged by the server it will return a task id that can be used to cancelled the task by sending a DELETE request to URL path /mock-data/logs/tasks with a parameter requestId (i.e. /mock-data/logs/tasks?requestId={request id returned in previous request}):\ncurl -XDELETE \u0026#39;http://localhost:12800/mock-data/segments/task?requestId=70d8a39e-b51e-49de-a6fc-43abf80482c1\u0026#39; curl -XDELETE \u0026#39;http://localhost:12800/mock-data/logs/task?requestId=70d8a39e-b51e-49de-a6fc-43abf80482c1\u0026#39; Cancel all tasks When needed, you can also send a DELETE request to path /mock-data/segments/tasks to cancel all segment tasks.\ncurl -XDELETE \u0026#39;http://localhost:12800/mock-data/segments/tasks curl -XDELETE \u0026#39;http://localhost:12800/mock-data/logs/tasks Generators uuid uuid generator leverages java.util.UUID to generate a string. You can use uuid generator to fill the traceId field of segments.\nchangingFrequency property can be used when you want to reuse a uuid for multiple times, for example, if you want a traceId to be reused by 5 segments, then setting changingFrequency to 5 would do the trick. By setting changingFrequency to 5, uuid generates 1 string, and uses it for 5 times, then re-generates a new uuid string and uses it for another 5 times.\n\u0026#34;traceId\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;uuid\u0026#34;, \u0026#34;changingFrequency\u0026#34;: \u0026#34;5\u0026#34; } randomString (String) length (int) length specifies the length of the random string to be generated, i.e. generatedString.length() == length is always true.\nprefix (String) prefix is always added to the random strings after they are generated, that means:\n generatedString.startsWith(prefix) is always true, and, generatedString.length() == length + prefix.length() is always true.  letters (boolean) Specifies whether the random string contains letters (i.e. a-zA-Z).\nnumbers (boolean) Specifies whether the random string contains numbers (i.e. 0-9).\ndomainSize (int) When generating random strings, you might just want some random strings and use them over and over again randomly, by setting domainSize, the generator generates domainSize random strings, and pick them randomly every time you need a string.\nrandomBool (boolean) This generator generates a Boolean value, true or false with a default possibility of 50%, while you can change the possibility below.\npossibility (double, [0, 1]) possibility is a double value \u0026gt;= 0 and \u0026lt;= 1, it\u0026rsquo;s 0.5 by default, meaning about half of the generated values are true.\nTo always return a fixed boolean value true, you can just set the possibility to 1, to always return a fixed boolean value false, you can set the possibility to 0\n\u0026#34;error\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomBool\u0026#34;, \u0026#34;possibility\u0026#34;: \u0026#34;0.9\u0026#34; }  90 percent of the generated values are true.\n randomInt (long) min (long) The minimum value of the random integers, meaning all generated values satisfy generatedInt \u0026gt;= min.\nmax (long) The maximum value of the random integers, meaning all generated values satisfy generatedInt \u0026lt; min.\ndomainSize (int) This is similar to randomString\u0026rsquo;s domainSize.\nrandomList (list / array) size (int) The list size of the generated list, i.e. generatedList.size() == size.\nitem (object) item is a template that will be use as a prototype to generate the list items, for example when generating a list of Tag, the item should be the prototype of Tag, which can be composed by the generators again.\n\u0026#34;tags\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomList\u0026#34;, \u0026#34;size\u0026#34;: 5, \u0026#34;item\u0026#34;: { \u0026#34;key\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomString\u0026#34;, \u0026#34;length\u0026#34;: \u0026#34;10\u0026#34;, \u0026#34;prefix\u0026#34;: \u0026#34;test_tag_\u0026#34;, \u0026#34;letters\u0026#34;: true, \u0026#34;numbers\u0026#34;: true, \u0026#34;domainSize\u0026#34;: 10 }, \u0026#34;value\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomString\u0026#34;, \u0026#34;length\u0026#34;: \u0026#34;10\u0026#34;, \u0026#34;prefix\u0026#34;: \u0026#34;test_value_\u0026#34;, \u0026#34;letters\u0026#34;: true, \u0026#34;numbers\u0026#34;: true } } } fixedString (string) This generator always returns a fixed value of string.\nsequence (long) sequence generator generates a sequence of monotonically increasing integers, with a configurable fluctuation.\nmin (long) The minimum value of the sequence.\nmax (long) The maximum value of the sequence.\nstep (long) The increasing step of this sequence, i.e. the next generated value == the previous value + step.\ndomainSize (int) This is similar to randomString\u0026rsquo;s domainSize.\nfluctuation (int) By default, sequence is strictly increasing numbers, but in some cases you might want the numbers to fluctuate slightly while they are increasing. Adding property fluctuation to the generator will add a random number \u0026gt;= -fluctuation, \u0026lt;= fluctuation to the sequence elements.\nFor example, min = 10, max = 15, step = 1 generates a sequence [10, 11, 12, 13, 14, 15], but adding fluctuation = 2 might generate a sequence [10, 12, 11, 14, 13, 15].\n","excerpt":"Mock data generator for testing In 9.1.0, SkyWalking adds a module to generate mock data for …","ref":"/docs/main/latest/en/setup/backend/backend-data-generator/","title":"Mock data generator for testing"},{"body":"Mock data generator for testing In 9.1.0, SkyWalking adds a module to generate mock data for testing. You can use this module to generate mock data that will be sent to the storage.\nTo start the data generator, execute the script tools/data-generator/bin/start.sh.\nNote that SkyWalking doesn\u0026rsquo;t release a Docker image for this module, but you can still build it yourselves by running the commands:\n# build a Docker image for local use make docker.data-generator # or push to your registry export HUB=\u0026lt;your-registry\u0026gt; make push.docker.data-generator Currently the module can generate two kinds of SkyWalking data, segments and logs. For each type, there are some generators that can be used to fill the fields.\nGenerate mock data To generate mock data, POST a request to URL path /mock-data/segments/tasks (segments) or /mock-data/logs/tasks (logs) with a generator template:\ncurl -XPOST \u0026#39;http://localhost:12800/mock-data/segments/tasks?size=20\u0026#39; -H\u0026#39;Content-Type: application/json\u0026#39; -d \u0026#34;@segment-template.json\u0026#34; curl -XPOST \u0026#39;http://localhost:12800/mock-data/logs/tasks?size=20\u0026#39; -H\u0026#39;Content-Type: application/json\u0026#39; -d \u0026#34;@logs-template.json\u0026#34; There are two possible types of task to generate mock data, size and qps:\n size (/mock-data/segments/tasks?size=20): the task will generate total number of size segments/logs and then finish. qps (/mock-data/segments/tasks?qps=20): the task will generate qps segments/logs per second continuously, until the task is cancelled.  Refer to the segment template, the log template and the Generators for more details about how to compose a template.\nCancel a task When the task is acknowledged by the server it will return a task id that can be used to cancelled the task by sending a DELETE request to URL path /mock-data/logs/tasks with a parameter requestId (i.e. /mock-data/logs/tasks?requestId={request id returned in previous request}):\ncurl -XDELETE \u0026#39;http://localhost:12800/mock-data/segments/task?requestId=70d8a39e-b51e-49de-a6fc-43abf80482c1\u0026#39; curl -XDELETE \u0026#39;http://localhost:12800/mock-data/logs/task?requestId=70d8a39e-b51e-49de-a6fc-43abf80482c1\u0026#39; Cancel all tasks When needed, you can also send a DELETE request to path /mock-data/segments/tasks to cancel all segment tasks.\ncurl -XDELETE \u0026#39;http://localhost:12800/mock-data/segments/tasks curl -XDELETE \u0026#39;http://localhost:12800/mock-data/logs/tasks Generators uuid uuid generator leverages java.util.UUID to generate a string. You can use uuid generator to fill the traceId field of segments.\nchangingFrequency property can be used when you want to reuse a uuid for multiple times, for example, if you want a traceId to be reused by 5 segments, then setting changingFrequency to 5 would do the trick. By setting changingFrequency to 5, uuid generates 1 string, and uses it for 5 times, then re-generates a new uuid string and uses it for another 5 times.\n\u0026#34;traceId\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;uuid\u0026#34;, \u0026#34;changingFrequency\u0026#34;: \u0026#34;5\u0026#34; } randomString (String) length (int) length specifies the length of the random string to be generated, i.e. generatedString.length() == length is always true.\nprefix (String) prefix is always added to the random strings after they are generated, that means:\n generatedString.startsWith(prefix) is always true, and, generatedString.length() == length + prefix.length() is always true.  letters (boolean) Specifies whether the random string contains letters (i.e. a-zA-Z).\nnumbers (boolean) Specifies whether the random string contains numbers (i.e. 0-9).\ndomainSize (int) When generating random strings, you might just want some random strings and use them over and over again randomly, by setting domainSize, the generator generates domainSize random strings, and pick them randomly every time you need a string.\nrandomBool (boolean) This generator generates a Boolean value, true or false with a default possibility of 50%, while you can change the possibility below.\npossibility (double, [0, 1]) possibility is a double value \u0026gt;= 0 and \u0026lt;= 1, it\u0026rsquo;s 0.5 by default, meaning about half of the generated values are true.\nTo always return a fixed boolean value true, you can just set the possibility to 1, to always return a fixed boolean value false, you can set the possibility to 0\n\u0026#34;error\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomBool\u0026#34;, \u0026#34;possibility\u0026#34;: \u0026#34;0.9\u0026#34; }  90 percent of the generated values are true.\n randomInt (long) min (long) The minimum value of the random integers, meaning all generated values satisfy generatedInt \u0026gt;= min.\nmax (long) The maximum value of the random integers, meaning all generated values satisfy generatedInt \u0026lt; min.\ndomainSize (int) This is similar to randomString\u0026rsquo;s domainSize.\nrandomList (list / array) size (int) The list size of the generated list, i.e. generatedList.size() == size.\nitem (object) item is a template that will be use as a prototype to generate the list items, for example when generating a list of Tag, the item should be the prototype of Tag, which can be composed by the generators again.\n\u0026#34;tags\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomList\u0026#34;, \u0026#34;size\u0026#34;: 5, \u0026#34;item\u0026#34;: { \u0026#34;key\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomString\u0026#34;, \u0026#34;length\u0026#34;: \u0026#34;10\u0026#34;, \u0026#34;prefix\u0026#34;: \u0026#34;test_tag_\u0026#34;, \u0026#34;letters\u0026#34;: true, \u0026#34;numbers\u0026#34;: true, \u0026#34;domainSize\u0026#34;: 10 }, \u0026#34;value\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomString\u0026#34;, \u0026#34;length\u0026#34;: \u0026#34;10\u0026#34;, \u0026#34;prefix\u0026#34;: \u0026#34;test_value_\u0026#34;, \u0026#34;letters\u0026#34;: true, \u0026#34;numbers\u0026#34;: true } } } fixedString (string) This generator always returns a fixed value of string.\nsequence (long) sequence generator generates a sequence of monotonically increasing integers, with a configurable fluctuation.\nmin (long) The minimum value of the sequence.\nmax (long) The maximum value of the sequence.\nstep (long) The increasing step of this sequence, i.e. the next generated value == the previous value + step.\ndomainSize (int) This is similar to randomString\u0026rsquo;s domainSize.\nfluctuation (int) By default, sequence is strictly increasing numbers, but in some cases you might want the numbers to fluctuate slightly while they are increasing. Adding property fluctuation to the generator will add a random number \u0026gt;= -fluctuation, \u0026lt;= fluctuation to the sequence elements.\nFor example, min = 10, max = 15, step = 1 generates a sequence [10, 11, 12, 13, 14, 15], but adding fluctuation = 2 might generate a sequence [10, 12, 11, 14, 13, 15].\n","excerpt":"Mock data generator for testing In 9.1.0, SkyWalking adds a module to generate mock data for …","ref":"/docs/main/next/en/setup/backend/backend-data-generator/","title":"Mock data generator for testing"},{"body":"Mock data generator for testing In 9.1.0, SkyWalking adds a module to generate mock data for testing. You can use this module to generate mock data that will be sent to the storage.\nTo start the data generator, execute the script tools/data-generator/bin/start.sh.\nNote that SkyWalking doesn\u0026rsquo;t release a Docker image for this module, but you can still build it yourselves by running the commands:\n# build a Docker image for local use make docker.data-generator # or push to your registry export HUB=\u0026lt;your-registry\u0026gt; make push.docker.data-generator Currently the module can generate two kinds of SkyWalking data, segments and logs. For each type, there are some generators that can be used to fill the fields.\nGenerate mock data To generate mock data, POST a request to URL path /mock-data/segments/tasks (segments) or /mock-data/logs/tasks (logs) with a generator template:\ncurl -XPOST \u0026#39;http://localhost:12800/mock-data/segments/tasks?size=20\u0026#39; -H\u0026#39;Content-Type: application/json\u0026#39; -d \u0026#34;@segment-template.json\u0026#34; curl -XPOST \u0026#39;http://localhost:12800/mock-data/logs/tasks?size=20\u0026#39; -H\u0026#39;Content-Type: application/json\u0026#39; -d \u0026#34;@logs-template.json\u0026#34; There are two possible types of task to generate mock data, size and qps:\n size (/mock-data/segments/tasks?size=20): the task will generate total number of size segments/logs and then finish. qps (/mock-data/segments/tasks?qps=20): the task will generate qps segments/logs per second continuously, until the task is cancelled.  Refer to the segment template, the log template and the Generators for more details about how to compose a template.\nCancel a task When the task is acknowledged by the server it will return a task id that can be used to cancelled the task by sending a DELETE request to URL path /mock-data/logs/tasks with a parameter requestId (i.e. /mock-data/logs/tasks?requestId={request id returned in previous request}):\ncurl -XDELETE \u0026#39;http://localhost:12800/mock-data/segments/task?requestId=70d8a39e-b51e-49de-a6fc-43abf80482c1\u0026#39; curl -XDELETE \u0026#39;http://localhost:12800/mock-data/logs/task?requestId=70d8a39e-b51e-49de-a6fc-43abf80482c1\u0026#39; Cancel all tasks When needed, you can also send a DELETE request to path /mock-data/segments/tasks to cancel all segment tasks.\ncurl -XDELETE \u0026#39;http://localhost:12800/mock-data/segments/tasks curl -XDELETE \u0026#39;http://localhost:12800/mock-data/logs/tasks Generators uuid uuid generator leverages java.util.UUID to generate a string. You can use uuid generator to fill the traceId field of segments.\nchangingFrequency property can be used when you want to reuse a uuid for multiple times, for example, if you want a traceId to be reused by 5 segments, then setting changingFrequency to 5 would do the trick. By setting changingFrequency to 5, uuid generates 1 string, and uses it for 5 times, then re-generates a new uuid string and uses it for another 5 times.\n\u0026#34;traceId\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;uuid\u0026#34;, \u0026#34;changingFrequency\u0026#34;: \u0026#34;5\u0026#34; } randomString (String) length (int) length specifies the length of the random string to be generated, i.e. generatedString.length() == length is always true.\nprefix (String) prefix is always added to the random strings after they are generated, that means:\n generatedString.startsWith(prefix) is always true, and, generatedString.length() == length + prefix.length() is always true.  letters (boolean) Specifies whether the random string contains letters (i.e. a-zA-Z).\nnumbers (boolean) Specifies whether the random string contains numbers (i.e. 0-9).\ndomainSize (int) When generating random strings, you might just want some random strings and use them over and over again randomly, by setting domainSize, the generator generates domainSize random strings, and pick them randomly every time you need a string.\nrandomBool (boolean) This generator generates a Boolean value, true or false with a default possibility of 50%, while you can change the possibility below.\npossibility (double, [0, 1]) possibility is a double value \u0026gt;= 0 and \u0026lt;= 1, it\u0026rsquo;s 0.5 by default, meaning about half of the generated values are true.\nTo always return a fixed boolean value true, you can just set the possibility to 1, to always return a fixed boolean value false, you can set the possibility to 0\n\u0026#34;error\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomBool\u0026#34;, \u0026#34;possibility\u0026#34;: \u0026#34;0.9\u0026#34; }  90 percent of the generated values are true.\n randomInt (long) min (long) The minimum value of the random integers, meaning all generated values satisfy generatedInt \u0026gt;= min.\nmax (long) The maximum value of the random integers, meaning all generated values satisfy generatedInt \u0026lt; min.\ndomainSize (int) This is similar to randomString\u0026rsquo;s domainSize.\nrandomList (list / array) size (int) The list size of the generated list, i.e. generatedList.size() == size.\nitem (object) item is a template that will be use as a prototype to generate the list items, for example when generating a list of Tag, the item should be the prototype of Tag, which can be composed by the generators again.\n\u0026#34;tags\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomList\u0026#34;, \u0026#34;size\u0026#34;: 5, \u0026#34;item\u0026#34;: { \u0026#34;key\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomString\u0026#34;, \u0026#34;length\u0026#34;: \u0026#34;10\u0026#34;, \u0026#34;prefix\u0026#34;: \u0026#34;test_tag_\u0026#34;, \u0026#34;letters\u0026#34;: true, \u0026#34;numbers\u0026#34;: true, \u0026#34;domainSize\u0026#34;: 10 }, \u0026#34;value\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomString\u0026#34;, \u0026#34;length\u0026#34;: \u0026#34;10\u0026#34;, \u0026#34;prefix\u0026#34;: \u0026#34;test_value_\u0026#34;, \u0026#34;letters\u0026#34;: true, \u0026#34;numbers\u0026#34;: true } } } fixedString (string) This generator always returns a fixed value of string.\nsequence (long) sequence generator generates a sequence of monotonically increasing integers, with a configurable fluctuation.\nmin (long) The minimum value of the sequence.\nmax (long) The maximum value of the sequence.\nstep (long) The increasing step of this sequence, i.e. the next generated value == the previous value + step.\ndomainSize (int) This is similar to randomString\u0026rsquo;s domainSize.\nfluctuation (int) By default, sequence is strictly increasing numbers, but in some cases you might want the numbers to fluctuate slightly while they are increasing. Adding property fluctuation to the generator will add a random number \u0026gt;= -fluctuation, \u0026lt;= fluctuation to the sequence elements.\nFor example, min = 10, max = 15, step = 1 generates a sequence [10, 11, 12, 13, 14, 15], but adding fluctuation = 2 might generate a sequence [10, 12, 11, 14, 13, 15].\n","excerpt":"Mock data generator for testing In 9.1.0, SkyWalking adds a module to generate mock data for …","ref":"/docs/main/v9.1.0/en/setup/backend/backend-data-generator/","title":"Mock data generator for testing"},{"body":"Mock data generator for testing In 9.1.0, SkyWalking adds a module to generate mock data for testing. You can use this module to generate mock data that will be sent to the storage.\nTo start the data generator, execute the script tools/data-generator/bin/start.sh.\nNote that SkyWalking doesn\u0026rsquo;t release a Docker image for this module, but you can still build it yourselves by running the commands:\n# build a Docker image for local use make docker.data-generator # or push to your registry export HUB=\u0026lt;your-registry\u0026gt; make push.docker.data-generator Currently the module can generate two kinds of SkyWalking data, segments and logs. For each type, there are some generators that can be used to fill the fields.\nGenerate mock data To generate mock data, POST a request to URL path /mock-data/segments/tasks (segments) or /mock-data/logs/tasks (logs) with a generator template:\ncurl -XPOST \u0026#39;http://localhost:12800/mock-data/segments/tasks?size=20\u0026#39; -H\u0026#39;Content-Type: application/json\u0026#39; -d \u0026#34;@segment-template.json\u0026#34; curl -XPOST \u0026#39;http://localhost:12800/mock-data/logs/tasks?size=20\u0026#39; -H\u0026#39;Content-Type: application/json\u0026#39; -d \u0026#34;@logs-template.json\u0026#34; There are two possible types of task to generate mock data, size and qps:\n size (/mock-data/segments/tasks?size=20): the task will generate total number of size segments/logs and then finish. qps (/mock-data/segments/tasks?qps=20): the task will generate qps segments/logs per second continuously, until the task is cancelled.  Refer to the segment template, the log template and the Generators for more details about how to compose a template.\nCancel a task When the task is acknowledged by the server it will return a task id that can be used to cancelled the task by sending a DELETE request to URL path /mock-data/logs/tasks with a parameter requestId (i.e. /mock-data/logs/tasks?requestId={request id returned in previous request}):\ncurl -XDELETE \u0026#39;http://localhost:12800/mock-data/segments/task?requestId=70d8a39e-b51e-49de-a6fc-43abf80482c1\u0026#39; curl -XDELETE \u0026#39;http://localhost:12800/mock-data/logs/task?requestId=70d8a39e-b51e-49de-a6fc-43abf80482c1\u0026#39; Cancel all tasks When needed, you can also send a DELETE request to path /mock-data/segments/tasks to cancel all segment tasks.\ncurl -XDELETE \u0026#39;http://localhost:12800/mock-data/segments/tasks curl -XDELETE \u0026#39;http://localhost:12800/mock-data/logs/tasks Generators uuid uuid generator leverages java.util.UUID to generate a string. You can use uuid generator to fill the traceId field of segments.\nchangingFrequency property can be used when you want to reuse a uuid for multiple times, for example, if you want a traceId to be reused by 5 segments, then setting changingFrequency to 5 would do the trick. By setting changingFrequency to 5, uuid generates 1 string, and uses it for 5 times, then re-generates a new uuid string and uses it for another 5 times.\n\u0026#34;traceId\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;uuid\u0026#34;, \u0026#34;changingFrequency\u0026#34;: \u0026#34;5\u0026#34; } randomString (String) length (int) length specifies the length of the random string to be generated, i.e. generatedString.length() == length is always true.\nprefix (String) prefix is always added to the random strings after they are generated, that means:\n generatedString.startsWith(prefix) is always true, and, generatedString.length() == length + prefix.length() is always true.  letters (boolean) Specifies whether the random string contains letters (i.e. a-zA-Z).\nnumbers (boolean) Specifies whether the random string contains numbers (i.e. 0-9).\ndomainSize (int) When generating random strings, you might just want some random strings and use them over and over again randomly, by setting domainSize, the generator generates domainSize random strings, and pick them randomly every time you need a string.\nrandomBool (boolean) This generator generates a Boolean value, true or false with a default possibility of 50%, while you can change the possibility below.\npossibility (double, [0, 1]) possibility is a double value \u0026gt;= 0 and \u0026lt;= 1, it\u0026rsquo;s 0.5 by default, meaning about half of the generated values are true.\nTo always return a fixed boolean value true, you can just set the possibility to 1, to always return a fixed boolean value false, you can set the possibility to 0\n\u0026#34;error\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomBool\u0026#34;, \u0026#34;possibility\u0026#34;: \u0026#34;0.9\u0026#34; }  90 percent of the generated values are true.\n randomInt (long) min (long) The minimum value of the random integers, meaning all generated values satisfy generatedInt \u0026gt;= min.\nmax (long) The maximum value of the random integers, meaning all generated values satisfy generatedInt \u0026lt; min.\ndomainSize (int) This is similar to randomString\u0026rsquo;s domainSize.\nrandomList (list / array) size (int) The list size of the generated list, i.e. generatedList.size() == size.\nitem (object) item is a template that will be use as a prototype to generate the list items, for example when generating a list of Tag, the item should be the prototype of Tag, which can be composed by the generators again.\n\u0026#34;tags\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomList\u0026#34;, \u0026#34;size\u0026#34;: 5, \u0026#34;item\u0026#34;: { \u0026#34;key\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomString\u0026#34;, \u0026#34;length\u0026#34;: \u0026#34;10\u0026#34;, \u0026#34;prefix\u0026#34;: \u0026#34;test_tag_\u0026#34;, \u0026#34;letters\u0026#34;: true, \u0026#34;numbers\u0026#34;: true, \u0026#34;domainSize\u0026#34;: 10 }, \u0026#34;value\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomString\u0026#34;, \u0026#34;length\u0026#34;: \u0026#34;10\u0026#34;, \u0026#34;prefix\u0026#34;: \u0026#34;test_value_\u0026#34;, \u0026#34;letters\u0026#34;: true, \u0026#34;numbers\u0026#34;: true } } } fixedString (string) This generator always returns a fixed value of string.\nsequence (long) sequence generator generates a sequence of monotonically increasing integers, with a configurable fluctuation.\nmin (long) The minimum value of the sequence.\nmax (long) The maximum value of the sequence.\nstep (long) The increasing step of this sequence, i.e. the next generated value == the previous value + step.\ndomainSize (int) This is similar to randomString\u0026rsquo;s domainSize.\nfluctuation (int) By default, sequence is strictly increasing numbers, but in some cases you might want the numbers to fluctuate slightly while they are increasing. Adding property fluctuation to the generator will add a random number \u0026gt;= -fluctuation, \u0026lt;= fluctuation to the sequence elements.\nFor example, min = 10, max = 15, step = 1 generates a sequence [10, 11, 12, 13, 14, 15], but adding fluctuation = 2 might generate a sequence [10, 12, 11, 14, 13, 15].\n","excerpt":"Mock data generator for testing In 9.1.0, SkyWalking adds a module to generate mock data for …","ref":"/docs/main/v9.2.0/en/setup/backend/backend-data-generator/","title":"Mock data generator for testing"},{"body":"Mock data generator for testing In 9.1.0, SkyWalking adds a module to generate mock data for testing. You can use this module to generate mock data that will be sent to the storage.\nTo start the data generator, execute the script tools/data-generator/bin/start.sh.\nNote that SkyWalking doesn\u0026rsquo;t release a Docker image for this module, but you can still build it yourselves by running the commands:\n# build a Docker image for local use make docker.data-generator # or push to your registry export HUB=\u0026lt;your-registry\u0026gt; make push.docker.data-generator Currently the module can generate two kinds of SkyWalking data, segments and logs. For each type, there are some generators that can be used to fill the fields.\nGenerate mock data To generate mock data, POST a request to URL path /mock-data/segments/tasks (segments) or /mock-data/logs/tasks (logs) with a generator template:\ncurl -XPOST \u0026#39;http://localhost:12800/mock-data/segments/tasks?size=20\u0026#39; -H\u0026#39;Content-Type: application/json\u0026#39; -d \u0026#34;@segment-template.json\u0026#34; curl -XPOST \u0026#39;http://localhost:12800/mock-data/logs/tasks?size=20\u0026#39; -H\u0026#39;Content-Type: application/json\u0026#39; -d \u0026#34;@logs-template.json\u0026#34; There are two possible types of task to generate mock data, size and qps:\n size (/mock-data/segments/tasks?size=20): the task will generate total number of size segments/logs and then finish. qps (/mock-data/segments/tasks?qps=20): the task will generate qps segments/logs per second continuously, until the task is cancelled.  Refer to the segment template, the log template and the Generators for more details about how to compose a template.\nCancel a task When the task is acknowledged by the server it will return a task id that can be used to cancelled the task by sending a DELETE request to URL path /mock-data/logs/tasks with a parameter requestId (i.e. /mock-data/logs/tasks?requestId={request id returned in previous request}):\ncurl -XDELETE \u0026#39;http://localhost:12800/mock-data/segments/task?requestId=70d8a39e-b51e-49de-a6fc-43abf80482c1\u0026#39; curl -XDELETE \u0026#39;http://localhost:12800/mock-data/logs/task?requestId=70d8a39e-b51e-49de-a6fc-43abf80482c1\u0026#39; Cancel all tasks When needed, you can also send a DELETE request to path /mock-data/segments/tasks to cancel all segment tasks.\ncurl -XDELETE \u0026#39;http://localhost:12800/mock-data/segments/tasks curl -XDELETE \u0026#39;http://localhost:12800/mock-data/logs/tasks Generators uuid uuid generator leverages java.util.UUID to generate a string. You can use uuid generator to fill the traceId field of segments.\nchangingFrequency property can be used when you want to reuse a uuid for multiple times, for example, if you want a traceId to be reused by 5 segments, then setting changingFrequency to 5 would do the trick. By setting changingFrequency to 5, uuid generates 1 string, and uses it for 5 times, then re-generates a new uuid string and uses it for another 5 times.\n\u0026#34;traceId\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;uuid\u0026#34;, \u0026#34;changingFrequency\u0026#34;: \u0026#34;5\u0026#34; } randomString (String) length (int) length specifies the length of the random string to be generated, i.e. generatedString.length() == length is always true.\nprefix (String) prefix is always added to the random strings after they are generated, that means:\n generatedString.startsWith(prefix) is always true, and, generatedString.length() == length + prefix.length() is always true.  letters (boolean) Specifies whether the random string contains letters (i.e. a-zA-Z).\nnumbers (boolean) Specifies whether the random string contains numbers (i.e. 0-9).\ndomainSize (int) When generating random strings, you might just want some random strings and use them over and over again randomly, by setting domainSize, the generator generates domainSize random strings, and pick them randomly every time you need a string.\nrandomBool (boolean) This generator generates a Boolean value, true or false with a default possibility of 50%, while you can change the possibility below.\npossibility (double, [0, 1]) possibility is a double value \u0026gt;= 0 and \u0026lt;= 1, it\u0026rsquo;s 0.5 by default, meaning about half of the generated values are true.\nTo always return a fixed boolean value true, you can just set the possibility to 1, to always return a fixed boolean value false, you can set the possibility to 0\n\u0026#34;error\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomBool\u0026#34;, \u0026#34;possibility\u0026#34;: \u0026#34;0.9\u0026#34; }  90 percent of the generated values are true.\n randomInt (long) min (long) The minimum value of the random integers, meaning all generated values satisfy generatedInt \u0026gt;= min.\nmax (long) The maximum value of the random integers, meaning all generated values satisfy generatedInt \u0026lt; min.\ndomainSize (int) This is similar to randomString\u0026rsquo;s domainSize.\nrandomList (list / array) size (int) The list size of the generated list, i.e. generatedList.size() == size.\nitem (object) item is a template that will be use as a prototype to generate the list items, for example when generating a list of Tag, the item should be the prototype of Tag, which can be composed by the generators again.\n\u0026#34;tags\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomList\u0026#34;, \u0026#34;size\u0026#34;: 5, \u0026#34;item\u0026#34;: { \u0026#34;key\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomString\u0026#34;, \u0026#34;length\u0026#34;: \u0026#34;10\u0026#34;, \u0026#34;prefix\u0026#34;: \u0026#34;test_tag_\u0026#34;, \u0026#34;letters\u0026#34;: true, \u0026#34;numbers\u0026#34;: true, \u0026#34;domainSize\u0026#34;: 10 }, \u0026#34;value\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomString\u0026#34;, \u0026#34;length\u0026#34;: \u0026#34;10\u0026#34;, \u0026#34;prefix\u0026#34;: \u0026#34;test_value_\u0026#34;, \u0026#34;letters\u0026#34;: true, \u0026#34;numbers\u0026#34;: true } } } fixedString (string) This generator always returns a fixed value of string.\nsequence (long) sequence generator generates a sequence of monotonically increasing integers, with a configurable fluctuation.\nmin (long) The minimum value of the sequence.\nmax (long) The maximum value of the sequence.\nstep (long) The increasing step of this sequence, i.e. the next generated value == the previous value + step.\ndomainSize (int) This is similar to randomString\u0026rsquo;s domainSize.\nfluctuation (int) By default, sequence is strictly increasing numbers, but in some cases you might want the numbers to fluctuate slightly while they are increasing. Adding property fluctuation to the generator will add a random number \u0026gt;= -fluctuation, \u0026lt;= fluctuation to the sequence elements.\nFor example, min = 10, max = 15, step = 1 generates a sequence [10, 11, 12, 13, 14, 15], but adding fluctuation = 2 might generate a sequence [10, 12, 11, 14, 13, 15].\n","excerpt":"Mock data generator for testing In 9.1.0, SkyWalking adds a module to generate mock data for …","ref":"/docs/main/v9.3.0/en/setup/backend/backend-data-generator/","title":"Mock data generator for testing"},{"body":"Mock data generator for testing In 9.1.0, SkyWalking adds a module to generate mock data for testing. You can use this module to generate mock data that will be sent to the storage.\nTo start the data generator, execute the script tools/data-generator/bin/start.sh.\nNote that SkyWalking doesn\u0026rsquo;t release a Docker image for this module, but you can still build it yourselves by running the commands:\n# build a Docker image for local use make docker.data-generator # or push to your registry export HUB=\u0026lt;your-registry\u0026gt; make push.docker.data-generator Currently the module can generate two kinds of SkyWalking data, segments and logs. For each type, there are some generators that can be used to fill the fields.\nGenerate mock data To generate mock data, POST a request to URL path /mock-data/segments/tasks (segments) or /mock-data/logs/tasks (logs) with a generator template:\ncurl -XPOST \u0026#39;http://localhost:12800/mock-data/segments/tasks?size=20\u0026#39; -H\u0026#39;Content-Type: application/json\u0026#39; -d \u0026#34;@segment-template.json\u0026#34; curl -XPOST \u0026#39;http://localhost:12800/mock-data/logs/tasks?size=20\u0026#39; -H\u0026#39;Content-Type: application/json\u0026#39; -d \u0026#34;@logs-template.json\u0026#34; There are two possible types of task to generate mock data, size and qps:\n size (/mock-data/segments/tasks?size=20): the task will generate total number of size segments/logs and then finish. qps (/mock-data/segments/tasks?qps=20): the task will generate qps segments/logs per second continuously, until the task is cancelled.  Refer to the segment template, the log template and the Generators for more details about how to compose a template.\nCancel a task When the task is acknowledged by the server it will return a task id that can be used to cancelled the task by sending a DELETE request to URL path /mock-data/logs/tasks with a parameter requestId (i.e. /mock-data/logs/tasks?requestId={request id returned in previous request}):\ncurl -XDELETE \u0026#39;http://localhost:12800/mock-data/segments/task?requestId=70d8a39e-b51e-49de-a6fc-43abf80482c1\u0026#39; curl -XDELETE \u0026#39;http://localhost:12800/mock-data/logs/task?requestId=70d8a39e-b51e-49de-a6fc-43abf80482c1\u0026#39; Cancel all tasks When needed, you can also send a DELETE request to path /mock-data/segments/tasks to cancel all segment tasks.\ncurl -XDELETE \u0026#39;http://localhost:12800/mock-data/segments/tasks curl -XDELETE \u0026#39;http://localhost:12800/mock-data/logs/tasks Generators uuid uuid generator leverages java.util.UUID to generate a string. You can use uuid generator to fill the traceId field of segments.\nchangingFrequency property can be used when you want to reuse a uuid for multiple times, for example, if you want a traceId to be reused by 5 segments, then setting changingFrequency to 5 would do the trick. By setting changingFrequency to 5, uuid generates 1 string, and uses it for 5 times, then re-generates a new uuid string and uses it for another 5 times.\n\u0026#34;traceId\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;uuid\u0026#34;, \u0026#34;changingFrequency\u0026#34;: \u0026#34;5\u0026#34; } randomString (String) length (int) length specifies the length of the random string to be generated, i.e. generatedString.length() == length is always true.\nprefix (String) prefix is always added to the random strings after they are generated, that means:\n generatedString.startsWith(prefix) is always true, and, generatedString.length() == length + prefix.length() is always true.  letters (boolean) Specifies whether the random string contains letters (i.e. a-zA-Z).\nnumbers (boolean) Specifies whether the random string contains numbers (i.e. 0-9).\ndomainSize (int) When generating random strings, you might just want some random strings and use them over and over again randomly, by setting domainSize, the generator generates domainSize random strings, and pick them randomly every time you need a string.\nrandomBool (boolean) This generator generates a Boolean value, true or false with a default possibility of 50%, while you can change the possibility below.\npossibility (double, [0, 1]) possibility is a double value \u0026gt;= 0 and \u0026lt;= 1, it\u0026rsquo;s 0.5 by default, meaning about half of the generated values are true.\nTo always return a fixed boolean value true, you can just set the possibility to 1, to always return a fixed boolean value false, you can set the possibility to 0\n\u0026#34;error\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomBool\u0026#34;, \u0026#34;possibility\u0026#34;: \u0026#34;0.9\u0026#34; }  90 percent of the generated values are true.\n randomInt (long) min (long) The minimum value of the random integers, meaning all generated values satisfy generatedInt \u0026gt;= min.\nmax (long) The maximum value of the random integers, meaning all generated values satisfy generatedInt \u0026lt; min.\ndomainSize (int) This is similar to randomString\u0026rsquo;s domainSize.\nrandomList (list / array) size (int) The list size of the generated list, i.e. generatedList.size() == size.\nitem (object) item is a template that will be use as a prototype to generate the list items, for example when generating a list of Tag, the item should be the prototype of Tag, which can be composed by the generators again.\n\u0026#34;tags\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomList\u0026#34;, \u0026#34;size\u0026#34;: 5, \u0026#34;item\u0026#34;: { \u0026#34;key\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomString\u0026#34;, \u0026#34;length\u0026#34;: \u0026#34;10\u0026#34;, \u0026#34;prefix\u0026#34;: \u0026#34;test_tag_\u0026#34;, \u0026#34;letters\u0026#34;: true, \u0026#34;numbers\u0026#34;: true, \u0026#34;domainSize\u0026#34;: 10 }, \u0026#34;value\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomString\u0026#34;, \u0026#34;length\u0026#34;: \u0026#34;10\u0026#34;, \u0026#34;prefix\u0026#34;: \u0026#34;test_value_\u0026#34;, \u0026#34;letters\u0026#34;: true, \u0026#34;numbers\u0026#34;: true } } } fixedString (string) This generator always returns a fixed value of string.\nsequence (long) sequence generator generates a sequence of monotonically increasing integers, with a configurable fluctuation.\nmin (long) The minimum value of the sequence.\nmax (long) The maximum value of the sequence.\nstep (long) The increasing step of this sequence, i.e. the next generated value == the previous value + step.\ndomainSize (int) This is similar to randomString\u0026rsquo;s domainSize.\nfluctuation (int) By default, sequence is strictly increasing numbers, but in some cases you might want the numbers to fluctuate slightly while they are increasing. Adding property fluctuation to the generator will add a random number \u0026gt;= -fluctuation, \u0026lt;= fluctuation to the sequence elements.\nFor example, min = 10, max = 15, step = 1 generates a sequence [10, 11, 12, 13, 14, 15], but adding fluctuation = 2 might generate a sequence [10, 12, 11, 14, 13, 15].\n","excerpt":"Mock data generator for testing In 9.1.0, SkyWalking adds a module to generate mock data for …","ref":"/docs/main/v9.4.0/en/setup/backend/backend-data-generator/","title":"Mock data generator for testing"},{"body":"Mock data generator for testing In 9.1.0, SkyWalking adds a module to generate mock data for testing. You can use this module to generate mock data that will be sent to the storage.\nTo start the data generator, execute the script tools/data-generator/bin/start.sh.\nNote that SkyWalking doesn\u0026rsquo;t release a Docker image for this module, but you can still build it yourselves by running the commands:\n# build a Docker image for local use make docker.data-generator # or push to your registry export HUB=\u0026lt;your-registry\u0026gt; make push.docker.data-generator Currently the module can generate two kinds of SkyWalking data, segments and logs. For each type, there are some generators that can be used to fill the fields.\nGenerate mock data To generate mock data, POST a request to URL path /mock-data/segments/tasks (segments) or /mock-data/logs/tasks (logs) with a generator template:\ncurl -XPOST \u0026#39;http://localhost:12800/mock-data/segments/tasks?size=20\u0026#39; -H\u0026#39;Content-Type: application/json\u0026#39; -d \u0026#34;@segment-template.json\u0026#34; curl -XPOST \u0026#39;http://localhost:12800/mock-data/logs/tasks?size=20\u0026#39; -H\u0026#39;Content-Type: application/json\u0026#39; -d \u0026#34;@logs-template.json\u0026#34; There are two possible types of task to generate mock data, size and qps:\n size (/mock-data/segments/tasks?size=20): the task will generate total number of size segments/logs and then finish. qps (/mock-data/segments/tasks?qps=20): the task will generate qps segments/logs per second continuously, until the task is cancelled.  Refer to the segment template, the log template and the Generators for more details about how to compose a template.\nCancel a task When the task is acknowledged by the server it will return a task id that can be used to cancelled the task by sending a DELETE request to URL path /mock-data/logs/tasks with a parameter requestId (i.e. /mock-data/logs/tasks?requestId={request id returned in previous request}):\ncurl -XDELETE \u0026#39;http://localhost:12800/mock-data/segments/task?requestId=70d8a39e-b51e-49de-a6fc-43abf80482c1\u0026#39; curl -XDELETE \u0026#39;http://localhost:12800/mock-data/logs/task?requestId=70d8a39e-b51e-49de-a6fc-43abf80482c1\u0026#39; Cancel all tasks When needed, you can also send a DELETE request to path /mock-data/segments/tasks to cancel all segment tasks.\ncurl -XDELETE \u0026#39;http://localhost:12800/mock-data/segments/tasks curl -XDELETE \u0026#39;http://localhost:12800/mock-data/logs/tasks Generators uuid uuid generator leverages java.util.UUID to generate a string. You can use uuid generator to fill the traceId field of segments.\nchangingFrequency property can be used when you want to reuse a uuid for multiple times, for example, if you want a traceId to be reused by 5 segments, then setting changingFrequency to 5 would do the trick. By setting changingFrequency to 5, uuid generates 1 string, and uses it for 5 times, then re-generates a new uuid string and uses it for another 5 times.\n\u0026#34;traceId\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;uuid\u0026#34;, \u0026#34;changingFrequency\u0026#34;: \u0026#34;5\u0026#34; } randomString (String) length (int) length specifies the length of the random string to be generated, i.e. generatedString.length() == length is always true.\nprefix (String) prefix is always added to the random strings after they are generated, that means:\n generatedString.startsWith(prefix) is always true, and, generatedString.length() == length + prefix.length() is always true.  letters (boolean) Specifies whether the random string contains letters (i.e. a-zA-Z).\nnumbers (boolean) Specifies whether the random string contains numbers (i.e. 0-9).\ndomainSize (int) When generating random strings, you might just want some random strings and use them over and over again randomly, by setting domainSize, the generator generates domainSize random strings, and pick them randomly every time you need a string.\nrandomBool (boolean) This generator generates a Boolean value, true or false with a default possibility of 50%, while you can change the possibility below.\npossibility (double, [0, 1]) possibility is a double value \u0026gt;= 0 and \u0026lt;= 1, it\u0026rsquo;s 0.5 by default, meaning about half of the generated values are true.\nTo always return a fixed boolean value true, you can just set the possibility to 1, to always return a fixed boolean value false, you can set the possibility to 0\n\u0026#34;error\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomBool\u0026#34;, \u0026#34;possibility\u0026#34;: \u0026#34;0.9\u0026#34; }  90 percent of the generated values are true.\n randomInt (long) min (long) The minimum value of the random integers, meaning all generated values satisfy generatedInt \u0026gt;= min.\nmax (long) The maximum value of the random integers, meaning all generated values satisfy generatedInt \u0026lt; min.\ndomainSize (int) This is similar to randomString\u0026rsquo;s domainSize.\nrandomList (list / array) size (int) The list size of the generated list, i.e. generatedList.size() == size.\nitem (object) item is a template that will be use as a prototype to generate the list items, for example when generating a list of Tag, the item should be the prototype of Tag, which can be composed by the generators again.\n\u0026#34;tags\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomList\u0026#34;, \u0026#34;size\u0026#34;: 5, \u0026#34;item\u0026#34;: { \u0026#34;key\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomString\u0026#34;, \u0026#34;length\u0026#34;: \u0026#34;10\u0026#34;, \u0026#34;prefix\u0026#34;: \u0026#34;test_tag_\u0026#34;, \u0026#34;letters\u0026#34;: true, \u0026#34;numbers\u0026#34;: true, \u0026#34;domainSize\u0026#34;: 10 }, \u0026#34;value\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomString\u0026#34;, \u0026#34;length\u0026#34;: \u0026#34;10\u0026#34;, \u0026#34;prefix\u0026#34;: \u0026#34;test_value_\u0026#34;, \u0026#34;letters\u0026#34;: true, \u0026#34;numbers\u0026#34;: true } } } fixedString (string) This generator always returns a fixed value of string.\nsequence (long) sequence generator generates a sequence of monotonically increasing integers, with a configurable fluctuation.\nmin (long) The minimum value of the sequence.\nmax (long) The maximum value of the sequence.\nstep (long) The increasing step of this sequence, i.e. the next generated value == the previous value + step.\ndomainSize (int) This is similar to randomString\u0026rsquo;s domainSize.\nfluctuation (int) By default, sequence is strictly increasing numbers, but in some cases you might want the numbers to fluctuate slightly while they are increasing. Adding property fluctuation to the generator will add a random number \u0026gt;= -fluctuation, \u0026lt;= fluctuation to the sequence elements.\nFor example, min = 10, max = 15, step = 1 generates a sequence [10, 11, 12, 13, 14, 15], but adding fluctuation = 2 might generate a sequence [10, 12, 11, 14, 13, 15].\n","excerpt":"Mock data generator for testing In 9.1.0, SkyWalking adds a module to generate mock data for …","ref":"/docs/main/v9.5.0/en/setup/backend/backend-data-generator/","title":"Mock data generator for testing"},{"body":"Mock data generator for testing In 9.1.0, SkyWalking adds a module to generate mock data for testing. You can use this module to generate mock data that will be sent to the storage.\nTo start the data generator, execute the script tools/data-generator/bin/start.sh.\nNote that SkyWalking doesn\u0026rsquo;t release a Docker image for this module, but you can still build it yourselves by running the commands:\n# build a Docker image for local use make docker.data-generator # or push to your registry export HUB=\u0026lt;your-registry\u0026gt; make push.docker.data-generator Currently the module can generate two kinds of SkyWalking data, segments and logs. For each type, there are some generators that can be used to fill the fields.\nGenerate mock data To generate mock data, POST a request to URL path /mock-data/segments/tasks (segments) or /mock-data/logs/tasks (logs) with a generator template:\ncurl -XPOST \u0026#39;http://localhost:12800/mock-data/segments/tasks?size=20\u0026#39; -H\u0026#39;Content-Type: application/json\u0026#39; -d \u0026#34;@segment-template.json\u0026#34; curl -XPOST \u0026#39;http://localhost:12800/mock-data/logs/tasks?size=20\u0026#39; -H\u0026#39;Content-Type: application/json\u0026#39; -d \u0026#34;@logs-template.json\u0026#34; There are two possible types of task to generate mock data, size and qps:\n size (/mock-data/segments/tasks?size=20): the task will generate total number of size segments/logs and then finish. qps (/mock-data/segments/tasks?qps=20): the task will generate qps segments/logs per second continuously, until the task is cancelled.  Refer to the segment template, the log template and the Generators for more details about how to compose a template.\nCancel a task When the task is acknowledged by the server it will return a task id that can be used to cancelled the task by sending a DELETE request to URL path /mock-data/logs/tasks with a parameter requestId (i.e. /mock-data/logs/tasks?requestId={request id returned in previous request}):\ncurl -XDELETE \u0026#39;http://localhost:12800/mock-data/segments/task?requestId=70d8a39e-b51e-49de-a6fc-43abf80482c1\u0026#39; curl -XDELETE \u0026#39;http://localhost:12800/mock-data/logs/task?requestId=70d8a39e-b51e-49de-a6fc-43abf80482c1\u0026#39; Cancel all tasks When needed, you can also send a DELETE request to path /mock-data/segments/tasks to cancel all segment tasks.\ncurl -XDELETE \u0026#39;http://localhost:12800/mock-data/segments/tasks curl -XDELETE \u0026#39;http://localhost:12800/mock-data/logs/tasks Generators uuid uuid generator leverages java.util.UUID to generate a string. You can use uuid generator to fill the traceId field of segments.\nchangingFrequency property can be used when you want to reuse a uuid for multiple times, for example, if you want a traceId to be reused by 5 segments, then setting changingFrequency to 5 would do the trick. By setting changingFrequency to 5, uuid generates 1 string, and uses it for 5 times, then re-generates a new uuid string and uses it for another 5 times.\n\u0026#34;traceId\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;uuid\u0026#34;, \u0026#34;changingFrequency\u0026#34;: \u0026#34;5\u0026#34; } randomString (String) length (int) length specifies the length of the random string to be generated, i.e. generatedString.length() == length is always true.\nprefix (String) prefix is always added to the random strings after they are generated, that means:\n generatedString.startsWith(prefix) is always true, and, generatedString.length() == length + prefix.length() is always true.  letters (boolean) Specifies whether the random string contains letters (i.e. a-zA-Z).\nnumbers (boolean) Specifies whether the random string contains numbers (i.e. 0-9).\ndomainSize (int) When generating random strings, you might just want some random strings and use them over and over again randomly, by setting domainSize, the generator generates domainSize random strings, and pick them randomly every time you need a string.\nrandomBool (boolean) This generator generates a Boolean value, true or false with a default possibility of 50%, while you can change the possibility below.\npossibility (double, [0, 1]) possibility is a double value \u0026gt;= 0 and \u0026lt;= 1, it\u0026rsquo;s 0.5 by default, meaning about half of the generated values are true.\nTo always return a fixed boolean value true, you can just set the possibility to 1, to always return a fixed boolean value false, you can set the possibility to 0\n\u0026#34;error\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomBool\u0026#34;, \u0026#34;possibility\u0026#34;: \u0026#34;0.9\u0026#34; }  90 percent of the generated values are true.\n randomInt (long) min (long) The minimum value of the random integers, meaning all generated values satisfy generatedInt \u0026gt;= min.\nmax (long) The maximum value of the random integers, meaning all generated values satisfy generatedInt \u0026lt; min.\ndomainSize (int) This is similar to randomString\u0026rsquo;s domainSize.\nrandomList (list / array) size (int) The list size of the generated list, i.e. generatedList.size() == size.\nitem (object) item is a template that will be use as a prototype to generate the list items, for example when generating a list of Tag, the item should be the prototype of Tag, which can be composed by the generators again.\n\u0026#34;tags\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomList\u0026#34;, \u0026#34;size\u0026#34;: 5, \u0026#34;item\u0026#34;: { \u0026#34;key\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomString\u0026#34;, \u0026#34;length\u0026#34;: \u0026#34;10\u0026#34;, \u0026#34;prefix\u0026#34;: \u0026#34;test_tag_\u0026#34;, \u0026#34;letters\u0026#34;: true, \u0026#34;numbers\u0026#34;: true, \u0026#34;domainSize\u0026#34;: 10 }, \u0026#34;value\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomString\u0026#34;, \u0026#34;length\u0026#34;: \u0026#34;10\u0026#34;, \u0026#34;prefix\u0026#34;: \u0026#34;test_value_\u0026#34;, \u0026#34;letters\u0026#34;: true, \u0026#34;numbers\u0026#34;: true } } } fixedString (string) This generator always returns a fixed value of string.\nsequence (long) sequence generator generates a sequence of monotonically increasing integers, with a configurable fluctuation.\nmin (long) The minimum value of the sequence.\nmax (long) The maximum value of the sequence.\nstep (long) The increasing step of this sequence, i.e. the next generated value == the previous value + step.\ndomainSize (int) This is similar to randomString\u0026rsquo;s domainSize.\nfluctuation (int) By default, sequence is strictly increasing numbers, but in some cases you might want the numbers to fluctuate slightly while they are increasing. Adding property fluctuation to the generator will add a random number \u0026gt;= -fluctuation, \u0026lt;= fluctuation to the sequence elements.\nFor example, min = 10, max = 15, step = 1 generates a sequence [10, 11, 12, 13, 14, 15], but adding fluctuation = 2 might generate a sequence [10, 12, 11, 14, 13, 15].\n","excerpt":"Mock data generator for testing In 9.1.0, SkyWalking adds a module to generate mock data for …","ref":"/docs/main/v9.6.0/en/setup/backend/backend-data-generator/","title":"Mock data generator for testing"},{"body":"Mock data generator for testing In 9.1.0, SkyWalking adds a module to generate mock data for testing. You can use this module to generate mock data that will be sent to the storage.\nTo start the data generator, execute the script tools/data-generator/bin/start.sh.\nNote that SkyWalking doesn\u0026rsquo;t release a Docker image for this module, but you can still build it yourselves by running the commands:\n# build a Docker image for local use make docker.data-generator # or push to your registry export HUB=\u0026lt;your-registry\u0026gt; make push.docker.data-generator Currently the module can generate two kinds of SkyWalking data, segments and logs. For each type, there are some generators that can be used to fill the fields.\nGenerate mock data To generate mock data, POST a request to URL path /mock-data/segments/tasks (segments) or /mock-data/logs/tasks (logs) with a generator template:\ncurl -XPOST \u0026#39;http://localhost:12800/mock-data/segments/tasks?size=20\u0026#39; -H\u0026#39;Content-Type: application/json\u0026#39; -d \u0026#34;@segment-template.json\u0026#34; curl -XPOST \u0026#39;http://localhost:12800/mock-data/logs/tasks?size=20\u0026#39; -H\u0026#39;Content-Type: application/json\u0026#39; -d \u0026#34;@logs-template.json\u0026#34; There are two possible types of task to generate mock data, size and qps:\n size (/mock-data/segments/tasks?size=20): the task will generate total number of size segments/logs and then finish. qps (/mock-data/segments/tasks?qps=20): the task will generate qps segments/logs per second continuously, until the task is cancelled.  Refer to the segment template, the log template and the Generators for more details about how to compose a template.\nCancel a task When the task is acknowledged by the server it will return a task id that can be used to cancelled the task by sending a DELETE request to URL path /mock-data/logs/tasks with a parameter requestId (i.e. /mock-data/logs/tasks?requestId={request id returned in previous request}):\ncurl -XDELETE \u0026#39;http://localhost:12800/mock-data/segments/task?requestId=70d8a39e-b51e-49de-a6fc-43abf80482c1\u0026#39; curl -XDELETE \u0026#39;http://localhost:12800/mock-data/logs/task?requestId=70d8a39e-b51e-49de-a6fc-43abf80482c1\u0026#39; Cancel all tasks When needed, you can also send a DELETE request to path /mock-data/segments/tasks to cancel all segment tasks.\ncurl -XDELETE \u0026#39;http://localhost:12800/mock-data/segments/tasks curl -XDELETE \u0026#39;http://localhost:12800/mock-data/logs/tasks Generators uuid uuid generator leverages java.util.UUID to generate a string. You can use uuid generator to fill the traceId field of segments.\nchangingFrequency property can be used when you want to reuse a uuid for multiple times, for example, if you want a traceId to be reused by 5 segments, then setting changingFrequency to 5 would do the trick. By setting changingFrequency to 5, uuid generates 1 string, and uses it for 5 times, then re-generates a new uuid string and uses it for another 5 times.\n\u0026#34;traceId\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;uuid\u0026#34;, \u0026#34;changingFrequency\u0026#34;: \u0026#34;5\u0026#34; } randomString (String) length (int) length specifies the length of the random string to be generated, i.e. generatedString.length() == length is always true.\nprefix (String) prefix is always added to the random strings after they are generated, that means:\n generatedString.startsWith(prefix) is always true, and, generatedString.length() == length + prefix.length() is always true.  letters (boolean) Specifies whether the random string contains letters (i.e. a-zA-Z).\nnumbers (boolean) Specifies whether the random string contains numbers (i.e. 0-9).\ndomainSize (int) When generating random strings, you might just want some random strings and use them over and over again randomly, by setting domainSize, the generator generates domainSize random strings, and pick them randomly every time you need a string.\nrandomBool (boolean) This generator generates a Boolean value, true or false with a default possibility of 50%, while you can change the possibility below.\npossibility (double, [0, 1]) possibility is a double value \u0026gt;= 0 and \u0026lt;= 1, it\u0026rsquo;s 0.5 by default, meaning about half of the generated values are true.\nTo always return a fixed boolean value true, you can just set the possibility to 1, to always return a fixed boolean value false, you can set the possibility to 0\n\u0026#34;error\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomBool\u0026#34;, \u0026#34;possibility\u0026#34;: \u0026#34;0.9\u0026#34; }  90 percent of the generated values are true.\n randomInt (long) min (long) The minimum value of the random integers, meaning all generated values satisfy generatedInt \u0026gt;= min.\nmax (long) The maximum value of the random integers, meaning all generated values satisfy generatedInt \u0026lt; min.\ndomainSize (int) This is similar to randomString\u0026rsquo;s domainSize.\nrandomList (list / array) size (int) The list size of the generated list, i.e. generatedList.size() == size.\nitem (object) item is a template that will be use as a prototype to generate the list items, for example when generating a list of Tag, the item should be the prototype of Tag, which can be composed by the generators again.\n\u0026#34;tags\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomList\u0026#34;, \u0026#34;size\u0026#34;: 5, \u0026#34;item\u0026#34;: { \u0026#34;key\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomString\u0026#34;, \u0026#34;length\u0026#34;: \u0026#34;10\u0026#34;, \u0026#34;prefix\u0026#34;: \u0026#34;test_tag_\u0026#34;, \u0026#34;letters\u0026#34;: true, \u0026#34;numbers\u0026#34;: true, \u0026#34;domainSize\u0026#34;: 10 }, \u0026#34;value\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomString\u0026#34;, \u0026#34;length\u0026#34;: \u0026#34;10\u0026#34;, \u0026#34;prefix\u0026#34;: \u0026#34;test_value_\u0026#34;, \u0026#34;letters\u0026#34;: true, \u0026#34;numbers\u0026#34;: true } } } fixedString (string) This generator always returns a fixed value of string.\nsequence (long) sequence generator generates a sequence of monotonically increasing integers, with a configurable fluctuation.\nmin (long) The minimum value of the sequence.\nmax (long) The maximum value of the sequence.\nstep (long) The increasing step of this sequence, i.e. the next generated value == the previous value + step.\ndomainSize (int) This is similar to randomString\u0026rsquo;s domainSize.\nfluctuation (int) By default, sequence is strictly increasing numbers, but in some cases you might want the numbers to fluctuate slightly while they are increasing. Adding property fluctuation to the generator will add a random number \u0026gt;= -fluctuation, \u0026lt;= fluctuation to the sequence elements.\nFor example, min = 10, max = 15, step = 1 generates a sequence [10, 11, 12, 13, 14, 15], but adding fluctuation = 2 might generate a sequence [10, 12, 11, 14, 13, 15].\n","excerpt":"Mock data generator for testing In 9.1.0, SkyWalking adds a module to generate mock data for …","ref":"/docs/main/v9.7.0/en/setup/backend/backend-data-generator/","title":"Mock data generator for testing"},{"body":"Module Design Controller The controller means composing all the steps declared in the configuration file, it progressive and display which step is currently running. If it failed in a step, the error message could be shown, as much comprehensive as possible. An example of the output might be.\ne2e run ✔ Started Kind Cluster - Cluster Name ✔ Checked Pods Readiness - All pods are ready ? Generating Traffic - HTTP localhost:9090/users (progress spinner) ✔ Verified Output - service ls (progress spinner) Verifying Output - endpoint ls ✘ Failed to Verify Output Data - endpoint ls \u0026lt;the diff content\u0026gt; ✔ Clean Up Compared with running the steps one by one, the controller is also responsible for cleaning up the environment (by executing the cleanup command) no matter what status other commands are, even if they are failed, the controller has the following semantics in terms of setup and cleanup.\n// Java try { setup(); // trigger step // verify step // ... } finally { cleanup(); } // GoLang func run() { setup(); defer cleanup(); // trigger step // verify step // ... } Steps According to the content in the Controller, E2E Testing can be divided into the following steps.\nSetup Start the environment required for this E2E Testing, such as database, back-end process, API, etc.\nSupport two ways to set up the environment:\n compose:  Start the docker-compose services. Check the services' healthiness. Wait until all services are ready according to the interval, etc. Execute command to set up the testing environment or help verify, such as yq help to eval the YAML format.   kind:  Start the KinD cluster according to the config files or Start on an existing kubernetes cluster. Apply the resources files (--manifests) or/and run the custom init command (--commands). Check the pods' readiness. Wait until all pods are ready according to the interval, etc.    Trigger Generate traffic by trigger the action, It could access HTTP API or execute commands with interval.\nIt could have these settings:\n interval: How frequency to trigger the action. times: How many times the operation is triggered before aborting on the condition that the trigger had failed always. 0=infinite. action: The action of the trigger.  Verify Verify that the data content is matching with the expected results. such as unit test assert, etc.\nIt could have these settings:\n actual: The actual data file. query: The query to get the actual data, could run shell commands to generate the data. expected: The expected data file, could specify some matching rules to verify the actual content.  Cleanup This step requires the same options in the setup step so that it can clean up all things necessarily. Such as destroy the environment, etc.\n","excerpt":"Module Design Controller The controller means composing all the steps declared in the configuration …","ref":"/docs/skywalking-infra-e2e/latest/en/concepts-and-designs/module-design/","title":"Module Design"},{"body":"Module Design Controller The controller means composing all the steps declared in the configuration file, it progressive and display which step is currently running. If it failed in a step, the error message could be shown, as much comprehensive as possible. An example of the output might be.\ne2e run ✔ Started Kind Cluster - Cluster Name ✔ Checked Pods Readiness - All pods are ready ? Generating Traffic - HTTP localhost:9090/users (progress spinner) ✔ Verified Output - service ls (progress spinner) Verifying Output - endpoint ls ✘ Failed to Verify Output Data - endpoint ls \u0026lt;the diff content\u0026gt; ✔ Clean Up Compared with running the steps one by one, the controller is also responsible for cleaning up the environment (by executing the cleanup command) no matter what status other commands are, even if they are failed, the controller has the following semantics in terms of setup and cleanup.\n// Java try { setup(); // trigger step // verify step // ... } finally { cleanup(); } // GoLang func run() { setup(); defer cleanup(); // trigger step // verify step // ... } Steps According to the content in the Controller, E2E Testing can be divided into the following steps.\nSetup Start the environment required for this E2E Testing, such as database, back-end process, API, etc.\nSupport two ways to set up the environment:\n compose:  Start the docker-compose services. Check the services' healthiness. Wait until all services are ready according to the interval, etc. Execute command to set up the testing environment or help verify, such as yq help to eval the YAML format.   kind:  Start the KinD cluster according to the config files or Start on an existing kubernetes cluster. Apply the resources files (--manifests) or/and run the custom init command (--commands). Check the pods' readiness. Wait until all pods are ready according to the interval, etc.    Trigger Generate traffic by trigger the action, It could access HTTP API or execute commands with interval.\nIt could have these settings:\n interval: How frequency to trigger the action. times: How many times the operation is triggered before aborting on the condition that the trigger had failed always. 0=infinite. action: The action of the trigger.  Verify Verify that the data content is matching with the expected results. such as unit test assert, etc.\nIt could have these settings:\n actual: The actual data file. query: The query to get the actual data, could run shell commands to generate the data. expected: The expected data file, could specify some matching rules to verify the actual content.  Cleanup This step requires the same options in the setup step so that it can clean up all things necessarily. Such as destroy the environment, etc.\n","excerpt":"Module Design Controller The controller means composing all the steps declared in the configuration …","ref":"/docs/skywalking-infra-e2e/next/en/concepts-and-designs/module-design/","title":"Module Design"},{"body":"Module Design Controller The controller means composing all the steps declared in the configuration file, it progressive and display which step is currently running. If it failed in a step, the error message could be shown, as much comprehensive as possible. An example of the output might be.\ne2e run ✔ Started Kind Cluster - Cluster Name ✔ Checked Pods Readiness - All pods are ready ? Generating Traffic - HTTP localhost:9090/users (progress spinner) ✔ Verified Output - service ls (progress spinner) Verifying Output - endpoint ls ✘ Failed to Verify Output Data - endpoint ls \u0026lt;the diff content\u0026gt; ✔ Clean Up Compared with running the steps one by one, the controller is also responsible for cleaning up the environment (by executing the cleanup command) no matter what status other commands are, even if they are failed, the controller has the following semantics in terms of setup and cleanup.\n// Java try { setup(); // trigger step // verify step // ... } finally { cleanup(); } // GoLang func run() { setup(); defer cleanup(); // trigger step // verify step // ... } Steps According to the content in the Controller, E2E Testing can be divided into the following steps.\nSetup Start the environment required for this E2E Testing, such as database, back-end process, API, etc.\nSupport two ways to set up the environment:\n compose:  Start the docker-compose services. Check the services' healthiness. Wait until all services are ready according to the interval, etc. Execute command to set up the testing environment or help verify, such as yq help to eval the YAML format.   kind:  Start the KinD cluster according to the config files or Start on an existing kubernetes cluster. Apply the resources files (--manifests) or/and run the custom init command (--commands). Check the pods' readiness. Wait until all pods are ready according to the interval, etc.    Trigger Generate traffic by trigger the action, It could access HTTP API or execute commands with interval.\nIt could have these settings:\n interval: How frequency to trigger the action. times: How many times the operation is triggered before aborting on the condition that the trigger had failed always. 0=infinite. action: The action of the trigger.  Verify Verify that the data content is matching with the expected results. such as unit test assert, etc.\nIt could have these settings:\n actual: The actual data file. query: The query to get the actual data, could run shell commands to generate the data. expected: The expected data file, could specify some matching rules to verify the actual content.  Cleanup This step requires the same options in the setup step so that it can clean up all things necessarily. Such as destroy the environment, etc.\n","excerpt":"Module Design Controller The controller means composing all the steps declared in the configuration …","ref":"/docs/skywalking-infra-e2e/v1.3.0/en/concepts-and-designs/module-design/","title":"Module Design"},{"body":"Module Design Pipe The pipe is an isolation concept in Satellite. Each pipe has one pipeline to process the telemetry data(metrics/traces/logs). Two pipes are not sharing data.\n Satellite --------------------------------------------------------------------- | ------------------------------------------- | | | Pipe | | | ------------------------------------------- | | ------------------------------------------- | | | Pipe | | | ------------------------------------------- | | ------------------------------------------- | | | Pipe | | | ------------------------------------------- | --------------------------------------------------------------------- Modules Module is the core workers in Satellite. Module is constituted by the specific extension plugins. There are 3 modules in one namespace, which are Gatherer, Processor, and Sender.\n The Gatherer module is responsible for fetching or receiving data and pushing the data to Queue. So there are 2 kinds of Gatherer, which are ReceiverGatherer and FetcherGatherer. The Processor module is responsible for reading data from the queue and processing data by a series of filter chains. The Sender module is responsible for async processing and forwarding the data to the external services in the batch mode. After sending success, Sender would also acknowledge the offset of Queue in Gatherer.   Pipe -------------------------------------------------------------------- | ---------- ----------- -------- | | | Gatherer | =\u0026gt; | Processor | =\u0026gt; | Sender | | | ---------- ----------- -------- | -------------------------------------------------------------------- LifeCycle\n Prepare: Prepare phase is to do some preparation works, such as register the client status listener to the client in ReceiverGatherer. Boot: Boot phase is to start the current module until receives a close signal. ShutDown: ShutDown phase is to close the used resources.  Plugins Plugin is the minimal components in the module. Satellite has 2 plugin catalogs, which are sharing plugins and normal plugins.\n a sharing plugin instance could be sharing with multiple modules in the different pipes. a normal plugin instance is only be used in a fixed module of the fixed pipes.  Sharing plugin Nowadays, there are 2 kinds of sharing plugins in Satellite, which are server plugins and client plugins. The reason why they are sharing plugins is to reduce the resource cost in connection. Server plugins are sharing with the ReceiverGatherer modules in the different pipes to receive the external requests. And the client plugins is sharing with the Sender modules in the different pipes to connect with external services, such as Kafka and OAP.\n Sharing Server Sharing Client -------------------------------------------------------------------- | ------------------ ----------- -------- | | | ReceiverGatherer | =\u0026gt; | Processor | =\u0026gt; | Sender | | | ------------------ ----------- -------- | -------------------------------------------------------------------- -------------------------------------------------------------------- | ------------------ ----------- -------- | | | ReceiverGatherer | =\u0026gt; | Processor | =\u0026gt; | Sender | | | ------------------ ----------- -------- | -------------------------------------------------------------------- -------------------------------------------------------------------- | ------------------ ----------- -------- | | | ReceiverGatherer | =\u0026gt; | Processor | =\u0026gt; | Sender | | | ------------------ ----------- -------- | -------------------------------------------------------------------- Normal plugin There are 7 kinds of normal plugins in Satellite, which are Receiver, Fetcher, Queue, Parser, Filter, Forwarder, and Fallbacker.\n Receiver: receives the input APM data from the request. Fetcher: fetch the APM data by fetching. Queue: store the APM data to ensure the data stability. Parser: supports some ways to parse data, such parse a csv file. Filter: processes the APM data. Forwarder: forwards the APM data to the external receiver, such as Kafka and OAP. Fallbacker: supports some fallback strategies, such as timer retry strategy.   Gatherer Processor ------------------------------- ------------------------------------------- | ----------- --------- | | ----------- ----------- | | | Receiver | ==\u0026gt; | Queue | |==\u0026gt;| | Filter | ==\u0026gt; ... ==\u0026gt; | Filter | | | | /Fetcher | | Mem/File | | | ----------- ----------- | | ----------- ---------- | | || || | -------------------------------- | \\/\t\\/ | | --------------------------------------- | | | OutputEventContext | | | --------------------------------------- | ------------------------------------------- || \\/ Sender ------------------------------------------ | --- --- | | | B | | D | ----------------- | | | A | | I | |Segment Forwarder| | | | T | | S | | (Fallbacker) | | | | C | | P | ----------------- | | | H | =\u0026gt; | A | | ===\u0026gt; Kafka/OAP | | B | | T | =\u0026gt; ...... | | | U | | C | | | | F | | H | ----------------- | | | F | | E | | Meter Forwarder| | | | E | | R | | (Fallbacker | | | | R | | | ----------------- | | --- --- | ------------------------------------------ 1. The Fetcher/Receiver plugin would fetch or receive the input data. 2. The Parser plugin would parse the input data to SerializableEvent that is supported to be stored in Queue. 3. The Queue plugin stores the SerializableEvent. However, whether serializing depends on the Queue implements. For example, the serialization is unnecessary when using a Memory Queue. Once an event is pulled by the consumer of Queue, the event will be processed by the filters in Processor. 4. The Filter plugin would process the event to create a new event. Next, the event is passed to the next filter to do the same things until the whole filters are performed. All created events would be stored in the OutputEventContext. However, only the events labeled with RemoteEvent type would be forwarded by Forwarder. 5. After processing, the events in OutputEventContext would be stored in the BatchBuffer. When the timer is triggered or the capacity limit is reached, the events in BatchBuffer would be partitioned by EventType and sent to the different Forwarders, such as Segment Forwarder and Meter Forwarder. 6. The Follower in different Senders would share with the remote client to avoid make duplicate connections and have the same Fallbacker(FallBack strategy) to process data. When all forwarders send success or process success in Fallbacker, the dispatcher would also ack the batch is a success. ============================================================================================ ","excerpt":"Module Design Pipe The pipe is an isolation concept in Satellite. Each pipe has one pipeline to …","ref":"/docs/skywalking-satellite/latest/en/concepts-and-designs/module_design/","title":"Module Design"},{"body":"Module Design Pipe The pipe is an isolation concept in Satellite. Each pipe has one pipeline to process the telemetry data(metrics/traces/logs). Two pipes are not sharing data.\n Satellite --------------------------------------------------------------------- | ------------------------------------------- | | | Pipe | | | ------------------------------------------- | | ------------------------------------------- | | | Pipe | | | ------------------------------------------- | | ------------------------------------------- | | | Pipe | | | ------------------------------------------- | --------------------------------------------------------------------- Modules Module is the core workers in Satellite. Module is constituted by the specific extension plugins. There are 3 modules in one namespace, which are Gatherer, Processor, and Sender.\n The Gatherer module is responsible for fetching or receiving data and pushing the data to Queue. So there are 2 kinds of Gatherer, which are ReceiverGatherer and FetcherGatherer. The Processor module is responsible for reading data from the queue and processing data by a series of filter chains. The Sender module is responsible for async processing and forwarding the data to the external services in the batch mode. After sending success, Sender would also acknowledge the offset of Queue in Gatherer.   Pipe -------------------------------------------------------------------- | ---------- ----------- -------- | | | Gatherer | =\u0026gt; | Processor | =\u0026gt; | Sender | | | ---------- ----------- -------- | -------------------------------------------------------------------- LifeCycle\n Prepare: Prepare phase is to do some preparation works, such as register the client status listener to the client in ReceiverGatherer. Boot: Boot phase is to start the current module until receives a close signal. ShutDown: ShutDown phase is to close the used resources.  Plugins Plugin is the minimal components in the module. Satellite has 2 plugin catalogs, which are sharing plugins and normal plugins.\n a sharing plugin instance could be sharing with multiple modules in the different pipes. a normal plugin instance is only be used in a fixed module of the fixed pipes.  Sharing plugin Nowadays, there are 2 kinds of sharing plugins in Satellite, which are server plugins and client plugins. The reason why they are sharing plugins is to reduce the resource cost in connection. Server plugins are sharing with the ReceiverGatherer modules in the different pipes to receive the external requests. And the client plugins is sharing with the Sender modules in the different pipes to connect with external services, such as Kafka and OAP.\n Sharing Server Sharing Client -------------------------------------------------------------------- | ------------------ ----------- -------- | | | ReceiverGatherer | =\u0026gt; | Processor | =\u0026gt; | Sender | | | ------------------ ----------- -------- | -------------------------------------------------------------------- -------------------------------------------------------------------- | ------------------ ----------- -------- | | | ReceiverGatherer | =\u0026gt; | Processor | =\u0026gt; | Sender | | | ------------------ ----------- -------- | -------------------------------------------------------------------- -------------------------------------------------------------------- | ------------------ ----------- -------- | | | ReceiverGatherer | =\u0026gt; | Processor | =\u0026gt; | Sender | | | ------------------ ----------- -------- | -------------------------------------------------------------------- Normal plugin There are 7 kinds of normal plugins in Satellite, which are Receiver, Fetcher, Queue, Parser, Filter, Forwarder, and Fallbacker.\n Receiver: receives the input APM data from the request. Fetcher: fetch the APM data by fetching. Queue: store the APM data to ensure the data stability. Parser: supports some ways to parse data, such parse a csv file. Filter: processes the APM data. Forwarder: forwards the APM data to the external receiver, such as Kafka and OAP. Fallbacker: supports some fallback strategies, such as timer retry strategy.   Gatherer Processor ------------------------------- ------------------------------------------- | ----------- --------- | | ----------- ----------- | | | Receiver | ==\u0026gt; | Queue | |==\u0026gt;| | Filter | ==\u0026gt; ... ==\u0026gt; | Filter | | | | /Fetcher | | Mem/File | | | ----------- ----------- | | ----------- ---------- | | || || | -------------------------------- | \\/\t\\/ | | --------------------------------------- | | | OutputEventContext | | | --------------------------------------- | ------------------------------------------- || \\/ Sender ------------------------------------------ | --- --- | | | B | | D | ----------------- | | | A | | I | |Segment Forwarder| | | | T | | S | | (Fallbacker) | | | | C | | P | ----------------- | | | H | =\u0026gt; | A | | ===\u0026gt; Kafka/OAP | | B | | T | =\u0026gt; ...... | | | U | | C | | | | F | | H | ----------------- | | | F | | E | | Meter Forwarder| | | | E | | R | | (Fallbacker | | | | R | | | ----------------- | | --- --- | ------------------------------------------ 1. The Fetcher/Receiver plugin would fetch or receive the input data. 2. The Parser plugin would parse the input data to SerializableEvent that is supported to be stored in Queue. 3. The Queue plugin stores the SerializableEvent. However, whether serializing depends on the Queue implements. For example, the serialization is unnecessary when using a Memory Queue. Once an event is pulled by the consumer of Queue, the event will be processed by the filters in Processor. 4. The Filter plugin would process the event to create a new event. Next, the event is passed to the next filter to do the same things until the whole filters are performed. All created events would be stored in the OutputEventContext. However, only the events labeled with RemoteEvent type would be forwarded by Forwarder. 5. After processing, the events in OutputEventContext would be stored in the BatchBuffer. When the timer is triggered or the capacity limit is reached, the events in BatchBuffer would be partitioned by EventType and sent to the different Forwarders, such as Segment Forwarder and Meter Forwarder. 6. The Follower in different Senders would share with the remote client to avoid make duplicate connections and have the same Fallbacker(FallBack strategy) to process data. When all forwarders send success or process success in Fallbacker, the dispatcher would also ack the batch is a success. ============================================================================================ ","excerpt":"Module Design Pipe The pipe is an isolation concept in Satellite. Each pipe has one pipeline to …","ref":"/docs/skywalking-satellite/next/en/concepts-and-designs/module_design/","title":"Module Design"},{"body":"Module Design Pipe The pipe is an isolation concept in Satellite. Each pipe has one pipeline to process the telemetry data(metrics/traces/logs). Two pipes are not sharing data.\n Satellite --------------------------------------------------------------------- | ------------------------------------------- | | | Pipe | | | ------------------------------------------- | | ------------------------------------------- | | | Pipe | | | ------------------------------------------- | | ------------------------------------------- | | | Pipe | | | ------------------------------------------- | --------------------------------------------------------------------- Modules Module is the core workers in Satellite. Module is constituted by the specific extension plugins. There are 3 modules in one namespace, which are Gatherer, Processor, and Sender.\n The Gatherer module is responsible for fetching or receiving data and pushing the data to Queue. So there are 2 kinds of Gatherer, which are ReceiverGatherer and FetcherGatherer. The Processor module is responsible for reading data from the queue and processing data by a series of filter chains. The Sender module is responsible for async processing and forwarding the data to the external services in the batch mode. After sending success, Sender would also acknowledge the offset of Queue in Gatherer.   Pipe -------------------------------------------------------------------- | ---------- ----------- -------- | | | Gatherer | =\u0026gt; | Processor | =\u0026gt; | Sender | | | ---------- ----------- -------- | -------------------------------------------------------------------- LifeCycle\n Prepare: Prepare phase is to do some preparation works, such as register the client status listener to the client in ReceiverGatherer. Boot: Boot phase is to start the current module until receives a close signal. ShutDown: ShutDown phase is to close the used resources.  Plugins Plugin is the minimal components in the module. Satellite has 2 plugin catalogs, which are sharing plugins and normal plugins.\n a sharing plugin instance could be sharing with multiple modules in the different pipes. a normal plugin instance is only be used in a fixed module of the fixed pipes.  Sharing plugin Nowadays, there are 2 kinds of sharing plugins in Satellite, which are server plugins and client plugins. The reason why they are sharing plugins is to reduce the resource cost in connection. Server plugins are sharing with the ReceiverGatherer modules in the different pipes to receive the external requests. And the client plugins is sharing with the Sender modules in the different pipes to connect with external services, such as Kafka and OAP.\n Sharing Server Sharing Client -------------------------------------------------------------------- | ------------------ ----------- -------- | | | ReceiverGatherer | =\u0026gt; | Processor | =\u0026gt; | Sender | | | ------------------ ----------- -------- | -------------------------------------------------------------------- -------------------------------------------------------------------- | ------------------ ----------- -------- | | | ReceiverGatherer | =\u0026gt; | Processor | =\u0026gt; | Sender | | | ------------------ ----------- -------- | -------------------------------------------------------------------- -------------------------------------------------------------------- | ------------------ ----------- -------- | | | ReceiverGatherer | =\u0026gt; | Processor | =\u0026gt; | Sender | | | ------------------ ----------- -------- | -------------------------------------------------------------------- Normal plugin There are 7 kinds of normal plugins in Satellite, which are Receiver, Fetcher, Queue, Parser, Filter, Forwarder, and Fallbacker.\n Receiver: receives the input APM data from the request. Fetcher: fetch the APM data by fetching. Queue: store the APM data to ensure the data stability. Parser: supports some ways to parse data, such parse a csv file. Filter: processes the APM data. Forwarder: forwards the APM data to the external receiver, such as Kafka and OAP. Fallbacker: supports some fallback strategies, such as timer retry strategy.   Gatherer Processor ------------------------------- ------------------------------------------- | ----------- --------- | | ----------- ----------- | | | Receiver | ==\u0026gt; | Queue | |==\u0026gt;| | Filter | ==\u0026gt; ... ==\u0026gt; | Filter | | | | /Fetcher | | Mem/File | | | ----------- ----------- | | ----------- ---------- | | || || | -------------------------------- | \\/\t\\/ | | --------------------------------------- | | | OutputEventContext | | | --------------------------------------- | ------------------------------------------- || \\/ Sender ------------------------------------------ | --- --- | | | B | | D | ----------------- | | | A | | I | |Segment Forwarder| | | | T | | S | | (Fallbacker) | | | | C | | P | ----------------- | | | H | =\u0026gt; | A | | ===\u0026gt; Kafka/OAP | | B | | T | =\u0026gt; ...... | | | U | | C | | | | F | | H | ----------------- | | | F | | E | | Meter Forwarder| | | | E | | R | | (Fallbacker | | | | R | | | ----------------- | | --- --- | ------------------------------------------ 1. The Fetcher/Receiver plugin would fetch or receive the input data. 2. The Parser plugin would parse the input data to SerializableEvent that is supported to be stored in Queue. 3. The Queue plugin stores the SerializableEvent. However, whether serializing depends on the Queue implements. For example, the serialization is unnecessary when using a Memory Queue. Once an event is pulled by the consumer of Queue, the event will be processed by the filters in Processor. 4. The Filter plugin would process the event to create a new event. Next, the event is passed to the next filter to do the same things until the whole filters are performed. All created events would be stored in the OutputEventContext. However, only the events labeled with RemoteEvent type would be forwarded by Forwarder. 5. After processing, the events in OutputEventContext would be stored in the BatchBuffer. When the timer is triggered or the capacity limit is reached, the events in BatchBuffer would be partitioned by EventType and sent to the different Forwarders, such as Segment Forwarder and Meter Forwarder. 6. The Follower in different Senders would share with the remote client to avoid make duplicate connections and have the same Fallbacker(FallBack strategy) to process data. When all forwarders send success or process success in Fallbacker, the dispatcher would also ack the batch is a success. ============================================================================================ ","excerpt":"Module Design Pipe The pipe is an isolation concept in Satellite. Each pipe has one pipeline to …","ref":"/docs/skywalking-satellite/v1.2.0/en/concepts-and-designs/module_design/","title":"Module Design"},{"body":"MongoDB monitoring SkyWalking leverages mongodb-exporter for collecting metrics data from MongoDB. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  The mongodb-exporter collects metrics data from MongoDB. The exporter works side by side with the MongoDB node. OpenTelemetry Collector fetches metrics from mongodb-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup mongodb-exporter. Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  MongoDB Monitoring MongoDB monitoring provides multidimensional metrics monitoring of MongoDB clusters as Layer: MONGODB Service in the OAP. In each cluster, the nodes are represented as Instance.\nMongoDB Cluster Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Uptime (day) day meter_mongodb_cluster_uptime Maximum uptime of nodes in the cluster mongodb-exporter   Data Size (GB) GB meter_mongodb_cluster_data_size Total data size of the cluster mongodb-exporter   Collection Count  meter_mongodb_cluster_collection_count Number of collection of the cluster mongodb-exporter   Object Count  meter_mongodb_cluster_object_count Number of object of the cluster mongodb-exporter   Document Avg QPS  meter_mongodb_cluster_document_avg_qps Avg document operations rate of nodes mongodb-exporter   Operation Avg QPS  meter_mongodb_cluster_operation_avg_qps Avg operations rate of nodes mongodb-exporter   Total Connections  meter_mongodb_cluster_connections Cluster total connections of nodes mongodb-exporter   Cursor Avg  meter_mongodb_cluster_cursor_avg Avg Opened cursor of nodes mongodb-exporter   Replication Lag (ms) ms meter_mongodb_cluster_repl_lag Repl set member avg replication lag, this metric works in repl mode mongodb-exporter   DB Avg Data Size Per Shard (GB) GB meter_mongodb_cluster_db_data_size Avg data size per shard (replSet) of every database mongodb-exporter   DB Avg Index Size Per Shard (GB) GB meter_mongodb_cluster_db_index_size Avg index size per shard (replSet) of every database mongodb-exporter   DB Avg Collection Count Per Shard  meter_mongodb_cluster_db_collection_count Avg collection count per shard (replSet) of every database mongodb-exporter   DB Avg Index Count Per Shard  meter_mongodb_cluster_db_index_count Avg index count per shard (replSet) of every database mongodb-exporter    MongoDB Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Uptime (day) day meter_mongodb_node_uptime Uptime of the node mongodb-exporter   QPS  meter_mongodb_node_qps Operations per second of the node mongodb-exporter   Latency µs meter_mongodb_node_latency Latency of operations mongodb-exporter   Memory Usage % meter_mongodb_node_memory_usage Memory usage percent of RAM mongodb-exporter   Version  meter_mongodb_node_version MongoDB edition and version mongodb-exporter   ReplSet State  meter_mongodb_node_rs_state Repl set state of the node, this metric works in repl mode mongodb-exporter   CPU Usage (%) % meter_mongodb_node_cpu_total_percentage Cpu usage percent of the node mongodb-exporter   Network (KB/s) KB/s meter_mongodb_node_network_bytes_inmeter_mongodb_node_network_bytes_out Inbound and outbound network bytes of node mongodb-exporter   Memory Free (GB) GB meter_mongodb_node_memory_free_kbmeter_mongodb_node_swap_memory_free_kb Free memory of RAM and swap mongodb-exporter   Disk (GB) GB meter_mongodb_node_fs_used_sizemeter_mongodb_node_fs_total_size Used and total size of disk mongodb-exporter   Connections  meter_mongodb_node_connections Connection nums of node mongodb-exporter   Active Client  meter_mongodb_node_active_total_nummeter_mongodb_node_active_reader_nummeter_mongodb_node_active_writer_num Count of active reader and writer mongodb-exporter   Transactions  meter_mongodb_node_transactions_activemeter_mongodb_node_transactions_inactive Count of transactions running on the node mongodb-exporter   Document QPS  meter_mongodb_node_document_qps Document operations per second mongodb-exporter   Operation QPS  meter_mongodb_node_operation_qps Operations per second mongodb-exporter   Repl Operation QPS  meter_mongodb_node_repl_operation_qps Repl operations per second mongodb-exporter   Operation Latency (µs) µs meter_mongodb_node_operation_latency Latencies for different operation type mongodb-exporter   Cursor  meter_mongodb_node_cursor Opened cursor of the node mongodb-exporter   Server Status Memory (MB) MB meter_mongodb_node_mem_virtualmeter_mongodb_node_mem_resident Virtual and resident memory of the node mongodb-exporter   Asserts  meter_mongodb_node_asserts The rate of raised assertions mongodb-exporter   Repl Buffer Count  meter_mongodb_node_repl_buffer_count The current number of operations in the oplog buffer mongodb-exporter   Repl Buffer Size (MB) MB meter_mongodb_node_repl_buffer_sizemeter_mongodb_node_repl_buffer_size_max The maximum size of the oplog buffer mongodb-exporter   Queued Operation  meter_mongodb_node_queued_operation The number of operations queued because of a lock mongodb-exporter   getLastError Write Num  meter_mongodb_node_write_wait_nummeter_mongodb_node_write_wait_timeout_num The number of write concern operation mongodb-exporter   getLastError Write Time (ms) ms meter_mongodb_node_write_wait_time The wait time of write concern operation mongodb-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/mongodb/mongodb-cluster.yaml, /config/otel-rules/mongodb/mongodb-node.yaml. The MongoDB dashboard panel configurations are found in /config/ui-initialized-templates/mongodb.\n","excerpt":"MongoDB monitoring SkyWalking leverages mongodb-exporter for collecting metrics data from MongoDB. …","ref":"/docs/main/latest/en/setup/backend/backend-mongodb-monitoring/","title":"MongoDB monitoring"},{"body":"MongoDB monitoring SkyWalking leverages mongodb-exporter for collecting metrics data from MongoDB. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  The mongodb-exporter collects metrics data from MongoDB. The exporter works side by side with the MongoDB node. OpenTelemetry Collector fetches metrics from mongodb-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup mongodb-exporter. Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  MongoDB Monitoring MongoDB monitoring provides multidimensional metrics monitoring of MongoDB clusters as Layer: MONGODB Service in the OAP. In each cluster, the nodes are represented as Instance.\nMongoDB Cluster Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Uptime (day) day meter_mongodb_cluster_uptime Maximum uptime of nodes in the cluster mongodb-exporter   Data Size (GB) GB meter_mongodb_cluster_data_size Total data size of the cluster mongodb-exporter   Collection Count  meter_mongodb_cluster_collection_count Number of collection of the cluster mongodb-exporter   Object Count  meter_mongodb_cluster_object_count Number of object of the cluster mongodb-exporter   Document Total QPS  meter_mongodb_cluster_document_avg_qps Total document operations rate of nodes mongodb-exporter   Operation Total QPS  meter_mongodb_cluster_operation_avg_qps Total operations rate of nodes mongodb-exporter   Total Connections  meter_mongodb_cluster_connections Cluster total connections of nodes mongodb-exporter   Cursor Total  meter_mongodb_cluster_cursor_avg Total Opened cursor of nodes mongodb-exporter   Replication Lag (ms) ms meter_mongodb_cluster_repl_lag Repl set member avg replication lag, this metric works in repl mode mongodb-exporter   DB Total Data Size (GB) GB meter_mongodb_cluster_db_data_size Total data size of every database mongodb-exporter   DB Total Index Size (GB) GB meter_mongodb_cluster_db_index_size Total index size per of every database mongodb-exporter   DB Total Collection Count  meter_mongodb_cluster_db_collection_count Total collection count of every database mongodb-exporter   DB Total Index Count  meter_mongodb_cluster_db_index_count Total index count of every database mongodb-exporter    MongoDB Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Uptime (day) day meter_mongodb_node_uptime Uptime of the node mongodb-exporter   QPS  meter_mongodb_node_qps Operations per second of the node mongodb-exporter   Latency µs meter_mongodb_node_op_ratemeter_mongodb_node_latency_rate Latency of operations mongodb-exporter   Memory Usage % meter_mongodb_node_memory_usage Memory usage percent of RAM mongodb-exporter   Version  meter_mongodb_node_version MongoDB edition and version mongodb-exporter   ReplSet State  meter_mongodb_node_rs_state Repl set state of the node, this metric works in repl mode mongodb-exporter   CPU Usage (%) % meter_mongodb_node_cpu_total_percentage Cpu usage percent of the node mongodb-exporter   Network (KB/s) KB/s meter_mongodb_node_network_bytes_inmeter_mongodb_node_network_bytes_out Inbound and outbound network bytes of node mongodb-exporter   Memory Free (GB) GB meter_mongodb_node_memory_free_kbmeter_mongodb_node_swap_memory_free_kb Free memory of RAM and swap mongodb-exporter   Disk (GB) GB meter_mongodb_node_fs_used_sizemeter_mongodb_node_fs_total_size Used and total size of disk mongodb-exporter   Connections  meter_mongodb_node_connections Connection nums of node mongodb-exporter   Active Client  meter_mongodb_node_active_total_nummeter_mongodb_node_active_reader_nummeter_mongodb_node_active_writer_num Count of active reader and writer mongodb-exporter   Transactions  meter_mongodb_node_transactions_activemeter_mongodb_node_transactions_inactive Count of transactions running on the node mongodb-exporter   Document QPS  meter_mongodb_node_document_qps Document operations per second mongodb-exporter   Operation QPS  meter_mongodb_node_operation_qps Operations per second mongodb-exporter   Repl Operation QPS  meter_mongodb_node_repl_operation_qps Repl operations per second mongodb-exporter   Operation Latency (µs) µs meter_mongodb_node_op_ratemeter_mongodb_node_latency_rate Latencies for different operation type mongodb-exporter   Cursor  meter_mongodb_node_cursor Opened cursor of the node mongodb-exporter   Server Status Memory (MB) MB meter_mongodb_node_mem_virtualmeter_mongodb_node_mem_resident Virtual and resident memory of the node mongodb-exporter   Asserts  meter_mongodb_node_asserts The rate of raised assertions mongodb-exporter   Repl Buffer Count  meter_mongodb_node_repl_buffer_count The current number of operations in the oplog buffer mongodb-exporter   Repl Buffer Size (MB) MB meter_mongodb_node_repl_buffer_sizemeter_mongodb_node_repl_buffer_size_max The maximum size of the oplog buffer mongodb-exporter   Queued Operation  meter_mongodb_node_queued_operation The number of operations queued because of a lock mongodb-exporter   getLastError Write Num  meter_mongodb_node_write_wait_nummeter_mongodb_node_write_wait_timeout_num The number of write concern operation mongodb-exporter   getLastError Write Time (ms) ms meter_mongodb_node_write_wait_time The wait time of write concern operation mongodb-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/mongodb/mongodb-cluster.yaml, /config/otel-rules/mongodb/mongodb-node.yaml. The MongoDB dashboard panel configurations are found in /config/ui-initialized-templates/mongodb.\n","excerpt":"MongoDB monitoring SkyWalking leverages mongodb-exporter for collecting metrics data from MongoDB. …","ref":"/docs/main/next/en/setup/backend/backend-mongodb-monitoring/","title":"MongoDB monitoring"},{"body":"MongoDB monitoring SkyWalking leverages mongodb-exporter for collecting metrics data from MongoDB. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  The mongodb-exporter collects metrics data from MongoDB. The exporter works side by side with the MongoDB node. OpenTelemetry Collector fetches metrics from mongodb-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup mongodb-exporter. Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  MongoDB Monitoring MongoDB monitoring provides multidimensional metrics monitoring of MongoDB clusters as Layer: MONGODB Service in the OAP. In each cluster, the nodes are represented as Instance.\nMongoDB Cluster Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Uptime (day) day meter_mongodb_cluster_uptime Maximum uptime of nodes in the cluster mongodb-exporter   Data Size (GB) GB meter_mongodb_cluster_data_size Total data size of the cluster mongodb-exporter   Collection Count  meter_mongodb_cluster_collection_count Number of collection of the cluster mongodb-exporter   Object Count  meter_mongodb_cluster_object_count Number of object of the cluster mongodb-exporter   Document Avg QPS  meter_mongodb_cluster_document_avg_qps Avg document operations rate of nodes mongodb-exporter   Operation Avg QPS  meter_mongodb_cluster_operation_avg_qps Avg operations rate of nodes mongodb-exporter   Total Connections  meter_mongodb_cluster_connections Cluster total connections of nodes mongodb-exporter   Cursor Avg  meter_mongodb_cluster_cursor_avg Avg Opened cursor of nodes mongodb-exporter   Replication Lag (ms) ms meter_mongodb_cluster_repl_lag Repl set member avg replication lag, this metric works in repl mode mongodb-exporter   DB Avg Data Size Per Shard (GB) GB meter_mongodb_cluster_db_data_size Avg data size per shard (replSet) of every database mongodb-exporter   DB Avg Index Size Per Shard (GB) GB meter_mongodb_cluster_db_index_size Avg index size per shard (replSet) of every database mongodb-exporter   DB Avg Collection Count Per Shard  meter_mongodb_cluster_db_collection_count Avg collection count per shard (replSet) of every database mongodb-exporter   DB Avg Index Count Per Shard  meter_mongodb_cluster_db_index_count Avg index count per shard (replSet) of every database mongodb-exporter    MongoDB Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Uptime (day) day meter_mongodb_node_uptime Uptime of the node mongodb-exporter   QPS  meter_mongodb_node_qps Operations per second of the node mongodb-exporter   Latency µs meter_mongodb_node_latency Latency of operations mongodb-exporter   Memory Usage % meter_mongodb_node_memory_usage Memory usage percent of RAM mongodb-exporter   Version  meter_mongodb_node_version MongoDB edition and version mongodb-exporter   ReplSet State  meter_mongodb_node_rs_state Repl set state of the node, this metric works in repl mode mongodb-exporter   CPU Usage (%) % meter_mongodb_node_cpu_total_percentage Cpu usage percent of the node mongodb-exporter   Network (KB/s) KB/s meter_mongodb_node_network_bytes_inmeter_mongodb_node_network_bytes_out Inbound and outbound network bytes of node mongodb-exporter   Memory Free (GB) GB meter_mongodb_node_memory_free_kbmeter_mongodb_node_swap_memory_free_kb Free memory of RAM and swap mongodb-exporter   Disk (GB) GB meter_mongodb_node_fs_used_sizemeter_mongodb_node_fs_total_size Used and total size of disk mongodb-exporter   Connections  meter_mongodb_node_connections Connection nums of node mongodb-exporter   Active Client  meter_mongodb_node_active_total_nummeter_mongodb_node_active_reader_nummeter_mongodb_node_active_writer_num Count of active reader and writer mongodb-exporter   Transactions  meter_mongodb_node_transactions_activemeter_mongodb_node_transactions_inactive Count of transactions running on the node mongodb-exporter   Document QPS  meter_mongodb_node_document_qps Document operations per second mongodb-exporter   Operation QPS  meter_mongodb_node_operation_qps Operations per second mongodb-exporter   Repl Operation QPS  meter_mongodb_node_repl_operation_qps Repl operations per second mongodb-exporter   Operation Latency (µs) µs meter_mongodb_node_operation_latency Latencies for different operation type mongodb-exporter   Cursor  meter_mongodb_node_cursor Opened cursor of the node mongodb-exporter   Server Status Memory (MB) MB meter_mongodb_node_mem_virtualmeter_mongodb_node_mem_resident Virtual and resident memory of the node mongodb-exporter   Asserts  meter_mongodb_node_asserts The rate of raised assertions mongodb-exporter   Repl Buffer Count  meter_mongodb_node_repl_buffer_count The current number of operations in the oplog buffer mongodb-exporter   Repl Buffer Size (MB) MB meter_mongodb_node_repl_buffer_sizemeter_mongodb_node_repl_buffer_size_max The maximum size of the oplog buffer mongodb-exporter   Queued Operation  meter_mongodb_node_queued_operation The number of operations queued because of a lock mongodb-exporter   getLastError Write Num  meter_mongodb_node_write_wait_nummeter_mongodb_node_write_wait_timeout_num The number of write concern operation mongodb-exporter   getLastError Write Time (ms) ms meter_mongodb_node_write_wait_time The wait time of write concern operation mongodb-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/mongodb/mongodb-cluster.yaml, /config/otel-rules/mongodb/mongodb-node.yaml. The MongoDB dashboard panel configurations are found in /config/ui-initialized-templates/mongodb.\n","excerpt":"MongoDB monitoring SkyWalking leverages mongodb-exporter for collecting metrics data from MongoDB. …","ref":"/docs/main/v9.6.0/en/setup/backend/backend-mongodb-monitoring/","title":"MongoDB monitoring"},{"body":"MongoDB monitoring SkyWalking leverages mongodb-exporter for collecting metrics data from MongoDB. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  The mongodb-exporter collects metrics data from MongoDB. The exporter works side by side with the MongoDB node. OpenTelemetry Collector fetches metrics from mongodb-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup mongodb-exporter. Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  MongoDB Monitoring MongoDB monitoring provides multidimensional metrics monitoring of MongoDB clusters as Layer: MONGODB Service in the OAP. In each cluster, the nodes are represented as Instance.\nMongoDB Cluster Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Uptime (day) day meter_mongodb_cluster_uptime Maximum uptime of nodes in the cluster mongodb-exporter   Data Size (GB) GB meter_mongodb_cluster_data_size Total data size of the cluster mongodb-exporter   Collection Count  meter_mongodb_cluster_collection_count Number of collection of the cluster mongodb-exporter   Object Count  meter_mongodb_cluster_object_count Number of object of the cluster mongodb-exporter   Document Avg QPS  meter_mongodb_cluster_document_avg_qps Avg document operations rate of nodes mongodb-exporter   Operation Avg QPS  meter_mongodb_cluster_operation_avg_qps Avg operations rate of nodes mongodb-exporter   Total Connections  meter_mongodb_cluster_connections Cluster total connections of nodes mongodb-exporter   Cursor Avg  meter_mongodb_cluster_cursor_avg Avg Opened cursor of nodes mongodb-exporter   Replication Lag (ms) ms meter_mongodb_cluster_repl_lag Repl set member avg replication lag, this metric works in repl mode mongodb-exporter   DB Avg Data Size Per Shard (GB) GB meter_mongodb_cluster_db_data_size Avg data size per shard (replSet) of every database mongodb-exporter   DB Avg Index Size Per Shard (GB) GB meter_mongodb_cluster_db_index_size Avg index size per shard (replSet) of every database mongodb-exporter   DB Avg Collection Count Per Shard  meter_mongodb_cluster_db_collection_count Avg collection count per shard (replSet) of every database mongodb-exporter   DB Avg Index Count Per Shard  meter_mongodb_cluster_db_index_count Avg index count per shard (replSet) of every database mongodb-exporter    MongoDB Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Uptime (day) day meter_mongodb_node_uptime Uptime of the node mongodb-exporter   QPS  meter_mongodb_node_qps Operations per second of the node mongodb-exporter   Latency µs meter_mongodb_node_latency Latency of operations mongodb-exporter   Memory Usage % meter_mongodb_node_memory_usage Memory usage percent of RAM mongodb-exporter   Version  meter_mongodb_node_version MongoDB edition and version mongodb-exporter   ReplSet State  meter_mongodb_node_rs_state Repl set state of the node, this metric works in repl mode mongodb-exporter   CPU Usage (%) % meter_mongodb_node_cpu_total_percentage Cpu usage percent of the node mongodb-exporter   Network (KB/s) KB/s meter_mongodb_node_network_bytes_inmeter_mongodb_node_network_bytes_out Inbound and outbound network bytes of node mongodb-exporter   Memory Free (GB) GB meter_mongodb_node_memory_free_kbmeter_mongodb_node_swap_memory_free_kb Free memory of RAM and swap mongodb-exporter   Disk (GB) GB meter_mongodb_node_fs_used_sizemeter_mongodb_node_fs_total_size Used and total size of disk mongodb-exporter   Connections  meter_mongodb_node_connections Connection nums of node mongodb-exporter   Active Client  meter_mongodb_node_active_total_nummeter_mongodb_node_active_reader_nummeter_mongodb_node_active_writer_num Count of active reader and writer mongodb-exporter   Transactions  meter_mongodb_node_transactions_activemeter_mongodb_node_transactions_inactive Count of transactions running on the node mongodb-exporter   Document QPS  meter_mongodb_node_document_qps Document operations per second mongodb-exporter   Operation QPS  meter_mongodb_node_operation_qps Operations per second mongodb-exporter   Repl Operation QPS  meter_mongodb_node_repl_operation_qps Repl operations per second mongodb-exporter   Operation Latency (µs) µs meter_mongodb_node_operation_latency Latencies for different operation type mongodb-exporter   Cursor  meter_mongodb_node_cursor Opened cursor of the node mongodb-exporter   Server Status Memory (MB) MB meter_mongodb_node_mem_virtualmeter_mongodb_node_mem_resident Virtual and resident memory of the node mongodb-exporter   Asserts  meter_mongodb_node_asserts The rate of raised assertions mongodb-exporter   Repl Buffer Count  meter_mongodb_node_repl_buffer_count The current number of operations in the oplog buffer mongodb-exporter   Repl Buffer Size (MB) MB meter_mongodb_node_repl_buffer_sizemeter_mongodb_node_repl_buffer_size_max The maximum size of the oplog buffer mongodb-exporter   Queued Operation  meter_mongodb_node_queued_operation The number of operations queued because of a lock mongodb-exporter   getLastError Write Num  meter_mongodb_node_write_wait_nummeter_mongodb_node_write_wait_timeout_num The number of write concern operation mongodb-exporter   getLastError Write Time (ms) ms meter_mongodb_node_write_wait_time The wait time of write concern operation mongodb-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/mongodb/mongodb-cluster.yaml, /config/otel-rules/mongodb/mongodb-node.yaml. The MongoDB dashboard panel configurations are found in /config/ui-initialized-templates/mongodb.\n","excerpt":"MongoDB monitoring SkyWalking leverages mongodb-exporter for collecting metrics data from MongoDB. …","ref":"/docs/main/v9.7.0/en/setup/backend/backend-mongodb-monitoring/","title":"MongoDB monitoring"},{"body":"MySQL Activate MySQL as storage, and set storage provider to mysql.\nNOTE: MySQL driver is NOT allowed in Apache official distribution and source codes. Please download the MySQL driver on your own. Copy the connection driver jar to oap-libs.\nstorage:selector:${SW_STORAGE:mysql}mysql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:3306/swtest?rewriteBatchedStatements=true\u0026amp;allowMultiQueries=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root@1234}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password, are found in application.yml. Only part of the settings is listed here. See the HikariCP connection pool document for full settings. To understand the function of the parameter rewriteBatchedStatements=true in MySQL, see the MySQL official document for more details.\nIn theory, all other databases that are compatible with MySQL protocol should be able to use this storage plugin, such as TiDB. Please compose the JDBC URL according to the database\u0026rsquo;s documentation.\n","excerpt":"MySQL Activate MySQL as storage, and set storage provider to mysql.\nNOTE: MySQL driver is NOT …","ref":"/docs/main/latest/en/setup/backend/storages/mysql/","title":"MySQL"},{"body":"MySQL Activate MySQL as storage, and set storage provider to mysql.\nNOTE: MySQL driver is NOT allowed in Apache official distribution and source codes. Please download the MySQL driver on your own. Copy the connection driver jar to oap-libs.\nstorage:selector:${SW_STORAGE:mysql}mysql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:3306/swtest?rewriteBatchedStatements=true\u0026amp;allowMultiQueries=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root@1234}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password, are found in application.yml. Only part of the settings is listed here. See the HikariCP connection pool document for full settings. To understand the function of the parameter rewriteBatchedStatements=true in MySQL, see the MySQL official document for more details.\nIn theory, all other databases that are compatible with MySQL protocol should be able to use this storage plugin, such as TiDB. Please compose the JDBC URL according to the database\u0026rsquo;s documentation.\n","excerpt":"MySQL Activate MySQL as storage, and set storage provider to mysql.\nNOTE: MySQL driver is NOT …","ref":"/docs/main/next/en/setup/backend/storages/mysql/","title":"MySQL"},{"body":"MySQL Activate MySQL as storage, and set storage provider to mysql.\nNOTE: MySQL driver is NOT allowed in Apache official distribution and source codes. Please download the MySQL driver on your own. Copy the connection driver jar to oap-libs.\nstorage:selector:${SW_STORAGE:mysql}mysql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:3306/swtest?rewriteBatchedStatements=true\u0026amp;allowMultiQueries=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root@1234}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password, are found in application.yml. Only part of the settings is listed here. See the HikariCP connection pool document for full settings. To understand the function of the parameter rewriteBatchedStatements=true in MySQL, see the MySQL official document for more details.\nIn theory, all other databases that are compatible with MySQL protocol should be able to use this storage plugin, such as TiDB. Please compose the JDBC URL according to the database\u0026rsquo;s documentation.\n","excerpt":"MySQL Activate MySQL as storage, and set storage provider to mysql.\nNOTE: MySQL driver is NOT …","ref":"/docs/main/v9.7.0/en/setup/backend/storages/mysql/","title":"MySQL"},{"body":"MySQL monitoring SkyWalking leverages prometheus/mysqld_exporter for collecting metrics data from MySQL. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  mysqld_exporter collect metrics data from MySQL. OpenTelemetry Collector fetches metrics from mysqld_exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via the OpenCensus gRPC Exporter or OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up mysqld_exporter. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  MySQL Monitoring MySQL monitoring provides monitoring of the status and resources of the MySQL server. MySQL server as a Service in OAP, and land on the Layer: MYSQL.\nMySQL Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     MySQL Uptime day meter_mysql_uptime The MySQL startup time mysqld_exporter   Max Connections  meter_mysql_max_connections The max number of connections. mysqld_exporter   Innodb Buffer Pool Size MB meter_mysql_innodb_buffer_pool_size The buffer pool size in Innodb engine mysqld_exporter   Thread Cache Size  meter_mysql_thread_cache_size The size of thread cache mysqld_exporter   Current QPS  meter_mysql_qps Queries Per Second mysqld_exporter   Current TPS  meter_mysql_tps Transactions Per Second mysqld_exporter   Commands Rate  meter_mysql_commands_insert_rate meter_mysql_commands_select_rate\nmeter_mysql_commands_delete_rate\nmeter_mysql_commands_update_rate The rate of total number of insert/select/delete/update executed by the current server mysqld_exporter   Threads  meter_mysql_threads_connected\nmeter_mysql_threads_created\nmeter_mysql_threads_cached\nmeter_mysql_threads_running The number of currently open connections(threads_connected)  The number of threads created(threads_created)  The number of threads in the thread cache(threads_cached)  The number of threads that are not sleeping(threads_running) mysqld_exporter   Connects  meter_mysql_connects_available\nmeter_mysql_connects_aborted The number of available connections(connects_available)The number of MySQL instance connection rejections(connects_aborted) mysqld_exporter   Connection Errors  meter_mysql_connection_errors_internal  meter_mysql_connection_errors_max_connections Errors due to exceeding the max_connections(connection_errors_max_connections) Error caused by internal system(connection_errors_internal) mysqld_exporter   Slow Queries Rate  meter_mysql_slow_queries_rate The rate of slow queries mysqld_exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/mysql.yaml. The MySQL dashboard panel configurations are found in /config/ui-initialized-templates/mysql.\n","excerpt":"MySQL monitoring SkyWalking leverages prometheus/mysqld_exporter for collecting metrics data from …","ref":"/docs/main/v9.2.0/en/setup/backend/backend-mysql-monitoring/","title":"MySQL monitoring"},{"body":"MySQL monitoring MySQL server performance from prometheus/mysqld_exporter SkyWalking leverages prometheus/mysqld_exporter for collecting metrics data. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  mysqld_exporter collect metrics data from MySQL. OpenTelemetry Collector fetches metrics from mysqld_exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via the OpenCensus gRPC Exporter or OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up mysqld_exporter. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  MySQL Monitoring MySQL monitoring provides monitoring of the status and resources of the MySQL server. MySQL cluster is cataloged as a Layer: MYSQL Service in OAP. Each MySQL server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     MySQL Uptime day meter_mysql_uptime The MySQL startup time mysqld_exporter   Max Connections  meter_mysql_max_connections The max number of connections. mysqld_exporter   Innodb Buffer Pool Size MB meter_mysql_innodb_buffer_pool_size The buffer pool size in Innodb engine mysqld_exporter   Thread Cache Size  meter_mysql_thread_cache_size The size of thread cache mysqld_exporter   Current QPS  meter_mysql_qps Queries Per Second mysqld_exporter   Current TPS  meter_mysql_tps Transactions Per Second mysqld_exporter   Commands Rate  meter_mysql_commands_insert_rate meter_mysql_commands_select_rate\nmeter_mysql_commands_delete_rate\nmeter_mysql_commands_update_rate The rate of total number of insert/select/delete/update executed by the current server mysqld_exporter   Threads  meter_mysql_threads_connected\nmeter_mysql_threads_created\nmeter_mysql_threads_cached\nmeter_mysql_threads_running The number of currently open connections(threads_connected)  The number of threads created(threads_created)  The number of threads in the thread cache(threads_cached)  The number of threads that are not sleeping(threads_running) mysqld_exporter   Connects  meter_mysql_connects_available\nmeter_mysql_connects_aborted The number of available connections(connects_available)The number of MySQL instance connection rejections(connects_aborted) mysqld_exporter   Connection Errors  meter_mysql_connection_errors_internal  meter_mysql_connection_errors_max_connections Errors due to exceeding the max_connections(connection_errors_max_connections) Error caused by internal system(connection_errors_internal) mysqld_exporter   Slow Queries Rate  meter_mysql_slow_queries_rate The rate of slow queries mysqld_exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/mysql.yaml. The MySQL dashboard panel configurations are found in /config/ui-initialized-templates/mysql.\nCollect sampled slow SQLs SkyWalking leverages fluentbit or other log agents for collecting slow SQL statements from MySQL.\nData flow  fluentbit agent collects slow sql logs from MySQL. fluentbit agent sends data to SkyWalking OAP Server using native meter APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Set up fluentbit. Config fluentbit Config MySQL to enable slow log.example.  Slow SQL Monitoring Slow SQL monitoring provides monitoring of the slow SQL statements of the MySQL server. MySQL server is cataloged as a Layer: MYSQL Service in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Slow Statements ms top_n_database_statement The latency and statement of MySQL slow SQLs fluentbit    Customizations You can customize your own metrics/expression/dashboard panel. The slowsql expression rules are found in /config/lal/mysql-slowsql.yaml The MySQL dashboard panel configurations are found in /config/ui-initialized-templates/mysql.\n","excerpt":"MySQL monitoring MySQL server performance from prometheus/mysqld_exporter SkyWalking leverages …","ref":"/docs/main/v9.3.0/en/setup/backend/backend-mysql-monitoring/","title":"MySQL monitoring"},{"body":"MySQL/MariaDB monitoring MySQL/MariaDB server performance from prometheus/mysqld_exporter SkyWalking leverages prometheus/mysqld_exporter for collecting metrics data. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  mysqld_exporter collect metrics data from MySQL/MariaDB. OpenTelemetry Collector fetches metrics from mysqld_exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up mysqld_exporter. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  MySQL/MariaDB Monitoring MySQL/MariaDB monitoring provides monitoring of the status and resources of the MySQL/MariaDB server. MySQL/MariaDB cluster is cataloged as a Layer: MYSQL Service in OAP. Each MySQL/MariaDB server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     MySQL Uptime day meter_mysql_uptime The MySQL startup time mysqld_exporter   Max Connections  meter_mysql_max_connections The max number of connections. mysqld_exporter   Innodb Buffer Pool Size MB meter_mysql_innodb_buffer_pool_size The buffer pool size in Innodb engine mysqld_exporter   Thread Cache Size  meter_mysql_thread_cache_size The size of thread cache mysqld_exporter   Current QPS  meter_mysql_qps Queries Per Second mysqld_exporter   Current TPS  meter_mysql_tps Transactions Per Second mysqld_exporter   Commands Rate  meter_mysql_commands_insert_rate meter_mysql_commands_select_rate\nmeter_mysql_commands_delete_rate\nmeter_mysql_commands_update_rate The rate of total number of insert/select/delete/update executed by the current server mysqld_exporter   Threads  meter_mysql_threads_connected\nmeter_mysql_threads_created\nmeter_mysql_threads_cached\nmeter_mysql_threads_running The number of currently open connections(threads_connected)  The number of threads created(threads_created)  The number of threads in the thread cache(threads_cached)  The number of threads that are not sleeping(threads_running) mysqld_exporter   Connects  meter_mysql_connects_available\nmeter_mysql_connects_aborted The number of available connections(connects_available)The number of MySQL instance connection rejections(connects_aborted) mysqld_exporter   Connection Errors  meter_mysql_connection_errors_internal  meter_mysql_connection_errors_max_connections Errors due to exceeding the max_connections(connection_errors_max_connections) Error caused by internal system(connection_errors_internal) mysqld_exporter   Slow Queries Rate  meter_mysql_slow_queries_rate The rate of slow queries mysqld_exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/mysql. The MySQL dashboard panel configurations are found in /config/ui-initialized-templates/mysql.\nCollect sampled slow SQLs SkyWalking leverages fluentbit or other log agents for collecting slow SQL statements from MySQL/MariaDB.\nData flow  fluentbit agent collects slow sql logs from MySQL/MariaDB. fluentbit agent sends data to SkyWalking OAP Server using native meter APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Set up fluentbit. Config fluentbit from here for MySQL or here for MariaDB. Enable slow log from here for MySQL or here for MariaDB.  Slow SQL Monitoring Slow SQL monitoring provides monitoring of the slow SQL statements of the MySQL/MariaDB server. MySQL/MariaDB server is cataloged as a Layer: MYSQL Service in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Slow Statements ms top_n_database_statement The latency and statement of MySQL/MariaDB slow SQLs fluentbit    Customizations You can customize your own metrics/expression/dashboard panel. The slowsql expression rules are found in /config/lal/mysql-slowsql.yaml The MySQL/MariaDB dashboard panel configurations are found in /config/ui-initialized-templates/mysql.\n","excerpt":"MySQL/MariaDB monitoring MySQL/MariaDB server performance from prometheus/mysqld_exporter SkyWalking …","ref":"/docs/main/latest/en/setup/backend/backend-mysql-monitoring/","title":"MySQL/MariaDB monitoring"},{"body":"MySQL/MariaDB monitoring MySQL/MariaDB server performance from prometheus/mysqld_exporter SkyWalking leverages prometheus/mysqld_exporter for collecting metrics data. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  mysqld_exporter collect metrics data from MySQL/MariaDB. OpenTelemetry Collector fetches metrics from mysqld_exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up mysqld_exporter. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  MySQL/MariaDB Monitoring MySQL/MariaDB monitoring provides monitoring of the status and resources of the MySQL/MariaDB server. MySQL/MariaDB cluster is cataloged as a Layer: MYSQL Service in OAP. Each MySQL/MariaDB server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     MySQL Uptime day meter_mysql_uptime The MySQL startup time mysqld_exporter   Max Connections  meter_mysql_max_connections The max number of connections. mysqld_exporter   Innodb Buffer Pool Size MB meter_mysql_innodb_buffer_pool_size The buffer pool size in Innodb engine mysqld_exporter   Thread Cache Size  meter_mysql_thread_cache_size The size of thread cache mysqld_exporter   Current QPS  meter_mysql_qps Queries Per Second mysqld_exporter   Current TPS  meter_mysql_tps Transactions Per Second mysqld_exporter   Commands Rate  meter_mysql_commands_insert_rate meter_mysql_commands_select_rate\nmeter_mysql_commands_delete_rate\nmeter_mysql_commands_update_rate The rate of total number of insert/select/delete/update executed by the current server mysqld_exporter   Threads  meter_mysql_threads_connected\nmeter_mysql_threads_created\nmeter_mysql_threads_cached\nmeter_mysql_threads_running The number of currently open connections(threads_connected)  The number of threads created(threads_created)  The number of threads in the thread cache(threads_cached)  The number of threads that are not sleeping(threads_running) mysqld_exporter   Connects  meter_mysql_max_connections\nmeter_mysql_status_thread_connected\nmeter_mysql_connects_aborted The number of available connections(connects_available)The number of MySQL instance connection rejections(connects_aborted) mysqld_exporter   Connection Errors  meter_mysql_connection_errors_internal  meter_mysql_connection_errors_max_connections Errors due to exceeding the max_connections(connection_errors_max_connections) Error caused by internal system(connection_errors_internal) mysqld_exporter   Slow Queries Rate  meter_mysql_slow_queries_rate The rate of slow queries mysqld_exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/mysql. The MySQL dashboard panel configurations are found in /config/ui-initialized-templates/mysql.\nCollect sampled slow SQLs SkyWalking leverages fluentbit or other log agents for collecting slow SQL statements from MySQL/MariaDB.\nData flow  fluentbit agent collects slow sql logs from MySQL/MariaDB. fluentbit agent sends data to SkyWalking OAP Server using native meter APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Set up fluentbit. Config fluentbit from here for MySQL or here for MariaDB. Enable slow log from here for MySQL or here for MariaDB.  Slow SQL Monitoring Slow SQL monitoring provides monitoring of the slow SQL statements of the MySQL/MariaDB server. MySQL/MariaDB server is cataloged as a Layer: MYSQL Service in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Slow Statements ms top_n_database_statement The latency and statement of MySQL/MariaDB slow SQLs fluentbit    Customizations You can customize your own metrics/expression/dashboard panel. The slowsql expression rules are found in /config/lal/mysql-slowsql.yaml The MySQL/MariaDB dashboard panel configurations are found in /config/ui-initialized-templates/mysql.\n","excerpt":"MySQL/MariaDB monitoring MySQL/MariaDB server performance from prometheus/mysqld_exporter SkyWalking …","ref":"/docs/main/next/en/setup/backend/backend-mysql-monitoring/","title":"MySQL/MariaDB monitoring"},{"body":"MySQL/MariaDB monitoring MySQL/MariaDB server performance from prometheus/mysqld_exporter SkyWalking leverages prometheus/mysqld_exporter for collecting metrics data. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  mysqld_exporter collect metrics data from MySQL/MariaDB. OpenTelemetry Collector fetches metrics from mysqld_exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via the OpenCensus gRPC Exporter or OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up mysqld_exporter. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  MySQL/MariaDB Monitoring MySQL/MariaDB monitoring provides monitoring of the status and resources of the MySQL/MariaDB server. MySQL/MariaDB cluster is cataloged as a Layer: MYSQL Service in OAP. Each MySQL/MariaDB server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     MySQL Uptime day meter_mysql_uptime The MySQL startup time mysqld_exporter   Max Connections  meter_mysql_max_connections The max number of connections. mysqld_exporter   Innodb Buffer Pool Size MB meter_mysql_innodb_buffer_pool_size The buffer pool size in Innodb engine mysqld_exporter   Thread Cache Size  meter_mysql_thread_cache_size The size of thread cache mysqld_exporter   Current QPS  meter_mysql_qps Queries Per Second mysqld_exporter   Current TPS  meter_mysql_tps Transactions Per Second mysqld_exporter   Commands Rate  meter_mysql_commands_insert_rate meter_mysql_commands_select_rate\nmeter_mysql_commands_delete_rate\nmeter_mysql_commands_update_rate The rate of total number of insert/select/delete/update executed by the current server mysqld_exporter   Threads  meter_mysql_threads_connected\nmeter_mysql_threads_created\nmeter_mysql_threads_cached\nmeter_mysql_threads_running The number of currently open connections(threads_connected)  The number of threads created(threads_created)  The number of threads in the thread cache(threads_cached)  The number of threads that are not sleeping(threads_running) mysqld_exporter   Connects  meter_mysql_connects_available\nmeter_mysql_connects_aborted The number of available connections(connects_available)The number of MySQL instance connection rejections(connects_aborted) mysqld_exporter   Connection Errors  meter_mysql_connection_errors_internal  meter_mysql_connection_errors_max_connections Errors due to exceeding the max_connections(connection_errors_max_connections) Error caused by internal system(connection_errors_internal) mysqld_exporter   Slow Queries Rate  meter_mysql_slow_queries_rate The rate of slow queries mysqld_exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/mysql. The MySQL dashboard panel configurations are found in /config/ui-initialized-templates/mysql.\nCollect sampled slow SQLs SkyWalking leverages fluentbit or other log agents for collecting slow SQL statements from MySQL/MariaDB.\nData flow  fluentbit agent collects slow sql logs from MySQL/MariaDB. fluentbit agent sends data to SkyWalking OAP Server using native meter APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Set up fluentbit. Config fluentbit from here for MySQL or here for MariaDB. Enable slow log from here for MySQL or here for MariaDB.  Slow SQL Monitoring Slow SQL monitoring provides monitoring of the slow SQL statements of the MySQL/MariaDB server. MySQL/MariaDB server is cataloged as a Layer: MYSQL Service in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Slow Statements ms top_n_database_statement The latency and statement of MySQL/MariaDB slow SQLs fluentbit    Customizations You can customize your own metrics/expression/dashboard panel. The slowsql expression rules are found in /config/lal/mysql-slowsql.yaml The MySQL/MariaDB dashboard panel configurations are found in /config/ui-initialized-templates/mysql.\n","excerpt":"MySQL/MariaDB monitoring MySQL/MariaDB server performance from prometheus/mysqld_exporter SkyWalking …","ref":"/docs/main/v9.4.0/en/setup/backend/backend-mysql-monitoring/","title":"MySQL/MariaDB monitoring"},{"body":"MySQL/MariaDB monitoring MySQL/MariaDB server performance from prometheus/mysqld_exporter SkyWalking leverages prometheus/mysqld_exporter for collecting metrics data. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  mysqld_exporter collect metrics data from MySQL/MariaDB. OpenTelemetry Collector fetches metrics from mysqld_exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up mysqld_exporter. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  MySQL/MariaDB Monitoring MySQL/MariaDB monitoring provides monitoring of the status and resources of the MySQL/MariaDB server. MySQL/MariaDB cluster is cataloged as a Layer: MYSQL Service in OAP. Each MySQL/MariaDB server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     MySQL Uptime day meter_mysql_uptime The MySQL startup time mysqld_exporter   Max Connections  meter_mysql_max_connections The max number of connections. mysqld_exporter   Innodb Buffer Pool Size MB meter_mysql_innodb_buffer_pool_size The buffer pool size in Innodb engine mysqld_exporter   Thread Cache Size  meter_mysql_thread_cache_size The size of thread cache mysqld_exporter   Current QPS  meter_mysql_qps Queries Per Second mysqld_exporter   Current TPS  meter_mysql_tps Transactions Per Second mysqld_exporter   Commands Rate  meter_mysql_commands_insert_rate meter_mysql_commands_select_rate\nmeter_mysql_commands_delete_rate\nmeter_mysql_commands_update_rate The rate of total number of insert/select/delete/update executed by the current server mysqld_exporter   Threads  meter_mysql_threads_connected\nmeter_mysql_threads_created\nmeter_mysql_threads_cached\nmeter_mysql_threads_running The number of currently open connections(threads_connected)  The number of threads created(threads_created)  The number of threads in the thread cache(threads_cached)  The number of threads that are not sleeping(threads_running) mysqld_exporter   Connects  meter_mysql_connects_available\nmeter_mysql_connects_aborted The number of available connections(connects_available)The number of MySQL instance connection rejections(connects_aborted) mysqld_exporter   Connection Errors  meter_mysql_connection_errors_internal  meter_mysql_connection_errors_max_connections Errors due to exceeding the max_connections(connection_errors_max_connections) Error caused by internal system(connection_errors_internal) mysqld_exporter   Slow Queries Rate  meter_mysql_slow_queries_rate The rate of slow queries mysqld_exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/mysql. The MySQL dashboard panel configurations are found in /config/ui-initialized-templates/mysql.\nCollect sampled slow SQLs SkyWalking leverages fluentbit or other log agents for collecting slow SQL statements from MySQL/MariaDB.\nData flow  fluentbit agent collects slow sql logs from MySQL/MariaDB. fluentbit agent sends data to SkyWalking OAP Server using native meter APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Set up fluentbit. Config fluentbit from here for MySQL or here for MariaDB. Enable slow log from here for MySQL or here for MariaDB.  Slow SQL Monitoring Slow SQL monitoring provides monitoring of the slow SQL statements of the MySQL/MariaDB server. MySQL/MariaDB server is cataloged as a Layer: MYSQL Service in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Slow Statements ms top_n_database_statement The latency and statement of MySQL/MariaDB slow SQLs fluentbit    Customizations You can customize your own metrics/expression/dashboard panel. The slowsql expression rules are found in /config/lal/mysql-slowsql.yaml The MySQL/MariaDB dashboard panel configurations are found in /config/ui-initialized-templates/mysql.\n","excerpt":"MySQL/MariaDB monitoring MySQL/MariaDB server performance from prometheus/mysqld_exporter SkyWalking …","ref":"/docs/main/v9.5.0/en/setup/backend/backend-mysql-monitoring/","title":"MySQL/MariaDB monitoring"},{"body":"MySQL/MariaDB monitoring MySQL/MariaDB server performance from prometheus/mysqld_exporter SkyWalking leverages prometheus/mysqld_exporter for collecting metrics data. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  mysqld_exporter collect metrics data from MySQL/MariaDB. OpenTelemetry Collector fetches metrics from mysqld_exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up mysqld_exporter. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  MySQL/MariaDB Monitoring MySQL/MariaDB monitoring provides monitoring of the status and resources of the MySQL/MariaDB server. MySQL/MariaDB cluster is cataloged as a Layer: MYSQL Service in OAP. Each MySQL/MariaDB server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     MySQL Uptime day meter_mysql_uptime The MySQL startup time mysqld_exporter   Max Connections  meter_mysql_max_connections The max number of connections. mysqld_exporter   Innodb Buffer Pool Size MB meter_mysql_innodb_buffer_pool_size The buffer pool size in Innodb engine mysqld_exporter   Thread Cache Size  meter_mysql_thread_cache_size The size of thread cache mysqld_exporter   Current QPS  meter_mysql_qps Queries Per Second mysqld_exporter   Current TPS  meter_mysql_tps Transactions Per Second mysqld_exporter   Commands Rate  meter_mysql_commands_insert_rate meter_mysql_commands_select_rate\nmeter_mysql_commands_delete_rate\nmeter_mysql_commands_update_rate The rate of total number of insert/select/delete/update executed by the current server mysqld_exporter   Threads  meter_mysql_threads_connected\nmeter_mysql_threads_created\nmeter_mysql_threads_cached\nmeter_mysql_threads_running The number of currently open connections(threads_connected)  The number of threads created(threads_created)  The number of threads in the thread cache(threads_cached)  The number of threads that are not sleeping(threads_running) mysqld_exporter   Connects  meter_mysql_connects_available\nmeter_mysql_connects_aborted The number of available connections(connects_available)The number of MySQL instance connection rejections(connects_aborted) mysqld_exporter   Connection Errors  meter_mysql_connection_errors_internal  meter_mysql_connection_errors_max_connections Errors due to exceeding the max_connections(connection_errors_max_connections) Error caused by internal system(connection_errors_internal) mysqld_exporter   Slow Queries Rate  meter_mysql_slow_queries_rate The rate of slow queries mysqld_exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/mysql. The MySQL dashboard panel configurations are found in /config/ui-initialized-templates/mysql.\nCollect sampled slow SQLs SkyWalking leverages fluentbit or other log agents for collecting slow SQL statements from MySQL/MariaDB.\nData flow  fluentbit agent collects slow sql logs from MySQL/MariaDB. fluentbit agent sends data to SkyWalking OAP Server using native meter APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Set up fluentbit. Config fluentbit from here for MySQL or here for MariaDB. Enable slow log from here for MySQL or here for MariaDB.  Slow SQL Monitoring Slow SQL monitoring provides monitoring of the slow SQL statements of the MySQL/MariaDB server. MySQL/MariaDB server is cataloged as a Layer: MYSQL Service in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Slow Statements ms top_n_database_statement The latency and statement of MySQL/MariaDB slow SQLs fluentbit    Customizations You can customize your own metrics/expression/dashboard panel. The slowsql expression rules are found in /config/lal/mysql-slowsql.yaml The MySQL/MariaDB dashboard panel configurations are found in /config/ui-initialized-templates/mysql.\n","excerpt":"MySQL/MariaDB monitoring MySQL/MariaDB server performance from prometheus/mysqld_exporter SkyWalking …","ref":"/docs/main/v9.6.0/en/setup/backend/backend-mysql-monitoring/","title":"MySQL/MariaDB monitoring"},{"body":"MySQL/MariaDB monitoring MySQL/MariaDB server performance from prometheus/mysqld_exporter SkyWalking leverages prometheus/mysqld_exporter for collecting metrics data. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  mysqld_exporter collect metrics data from MySQL/MariaDB. OpenTelemetry Collector fetches metrics from mysqld_exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up mysqld_exporter. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  MySQL/MariaDB Monitoring MySQL/MariaDB monitoring provides monitoring of the status and resources of the MySQL/MariaDB server. MySQL/MariaDB cluster is cataloged as a Layer: MYSQL Service in OAP. Each MySQL/MariaDB server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     MySQL Uptime day meter_mysql_uptime The MySQL startup time mysqld_exporter   Max Connections  meter_mysql_max_connections The max number of connections. mysqld_exporter   Innodb Buffer Pool Size MB meter_mysql_innodb_buffer_pool_size The buffer pool size in Innodb engine mysqld_exporter   Thread Cache Size  meter_mysql_thread_cache_size The size of thread cache mysqld_exporter   Current QPS  meter_mysql_qps Queries Per Second mysqld_exporter   Current TPS  meter_mysql_tps Transactions Per Second mysqld_exporter   Commands Rate  meter_mysql_commands_insert_rate meter_mysql_commands_select_rate\nmeter_mysql_commands_delete_rate\nmeter_mysql_commands_update_rate The rate of total number of insert/select/delete/update executed by the current server mysqld_exporter   Threads  meter_mysql_threads_connected\nmeter_mysql_threads_created\nmeter_mysql_threads_cached\nmeter_mysql_threads_running The number of currently open connections(threads_connected)  The number of threads created(threads_created)  The number of threads in the thread cache(threads_cached)  The number of threads that are not sleeping(threads_running) mysqld_exporter   Connects  meter_mysql_connects_available\nmeter_mysql_connects_aborted The number of available connections(connects_available)The number of MySQL instance connection rejections(connects_aborted) mysqld_exporter   Connection Errors  meter_mysql_connection_errors_internal  meter_mysql_connection_errors_max_connections Errors due to exceeding the max_connections(connection_errors_max_connections) Error caused by internal system(connection_errors_internal) mysqld_exporter   Slow Queries Rate  meter_mysql_slow_queries_rate The rate of slow queries mysqld_exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/mysql. The MySQL dashboard panel configurations are found in /config/ui-initialized-templates/mysql.\nCollect sampled slow SQLs SkyWalking leverages fluentbit or other log agents for collecting slow SQL statements from MySQL/MariaDB.\nData flow  fluentbit agent collects slow sql logs from MySQL/MariaDB. fluentbit agent sends data to SkyWalking OAP Server using native meter APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Set up fluentbit. Config fluentbit from here for MySQL or here for MariaDB. Enable slow log from here for MySQL or here for MariaDB.  Slow SQL Monitoring Slow SQL monitoring provides monitoring of the slow SQL statements of the MySQL/MariaDB server. MySQL/MariaDB server is cataloged as a Layer: MYSQL Service in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Slow Statements ms top_n_database_statement The latency and statement of MySQL/MariaDB slow SQLs fluentbit    Customizations You can customize your own metrics/expression/dashboard panel. The slowsql expression rules are found in /config/lal/mysql-slowsql.yaml The MySQL/MariaDB dashboard panel configurations are found in /config/ui-initialized-templates/mysql.\n","excerpt":"MySQL/MariaDB monitoring MySQL/MariaDB server performance from prometheus/mysqld_exporter SkyWalking …","ref":"/docs/main/v9.7.0/en/setup/backend/backend-mysql-monitoring/","title":"MySQL/MariaDB monitoring"},{"body":"New ElasticSearch storage option explanation in 9.2.0 Since v9.2.0, SkyWalking OAP provides 2 storage options for all data, including metadata, metrics, traces, logs, events, profiling data, etc.. OAP exposes a system environment variable (SW_STORAGE_ES_LOGIC_SHARDING) to control the running mode.\nNo-Sharding Mode (OAP default setting, SW_STORAGE_ES_LOGIC_SHARDING = false) This is the new mode introduced in 9.2.0. It prefers to keep data with similar properties in one index template, such as all metrics and metadata.\n OAP merges all metrics/meter and records(without super datasets, such as segments) indices into one physical index template metrics-all and records-all. The logic index name would be present in columns metric_table or record_table. If the logic column name has an alias (configured through @ElasticSearch.Column()), the alias would be the real physical column name.  The super dataset would not be affected by this, such as traces and logs.\nSharding Mode (SW_STORAGE_ES_LOGIC_SHARDING = true )  OAP shard metrics/meter indices into multi-physical indices as in the previous versions(one index template per metric/meter aggregation function). Records and metrics without configuring aggregation functions with @MetricsFunction or @MeterFunction annotation would not be merged. They would be kept in a separate index template. The shard template name would be metrics-aggregation function name or meter-aggregation function name such as metrics-count, and the logic index name would be present in column metric_table. The OAP would not use the column alias, the logic column name would be the real physical column name.   Notice: Users still could choose to adjust ElasticSearch\u0026rsquo;s shard number(SW_STORAGE_ES_INDEX_SHARDS_NUMBER) to scale out in either mode.\n","excerpt":"New ElasticSearch storage option explanation in 9.2.0 Since v9.2.0, SkyWalking OAP provides 2 …","ref":"/docs/main/latest/en/faq/new-elasticsearch-storage-option-explanation-in-9.2.0/","title":"New ElasticSearch storage option explanation in 9.2.0"},{"body":"New ElasticSearch storage option explanation in 9.2.0 Since v9.2.0, SkyWalking OAP provides 2 storage options for all data, including metadata, metrics, traces, logs, events, profiling data, etc.. OAP exposes a system environment variable (SW_STORAGE_ES_LOGIC_SHARDING) to control the running mode.\nNo-Sharding Mode (OAP default setting, SW_STORAGE_ES_LOGIC_SHARDING = false) This is the new mode introduced in 9.2.0. It prefers to keep data with similar properties in one index template, such as all metrics and metadata.\n OAP merges all metrics/meter and records(without super datasets, such as segments) indices into one physical index template metrics-all and records-all. The logic index name would be present in columns metric_table or record_table. If the logic column name has an alias (configured through @ElasticSearch.Column()), the alias would be the real physical column name.  The super dataset would not be affected by this, such as traces and logs.\nSharding Mode (SW_STORAGE_ES_LOGIC_SHARDING = true )  OAP shard metrics/meter indices into multi-physical indices as in the previous versions(one index template per metric/meter aggregation function). Records and metrics without configuring aggregation functions with @MetricsFunction or @MeterFunction annotation would not be merged. They would be kept in a separate index template. The shard template name would be metrics-aggregation function name or meter-aggregation function name such as metrics-count, and the logic index name would be present in column metric_table. The OAP would not use the column alias, the logic column name would be the real physical column name.   Notice: Users still could choose to adjust ElasticSearch\u0026rsquo;s shard number(SW_STORAGE_ES_INDEX_SHARDS_NUMBER) to scale out in either mode.\n","excerpt":"New ElasticSearch storage option explanation in 9.2.0 Since v9.2.0, SkyWalking OAP provides 2 …","ref":"/docs/main/next/en/faq/new-elasticsearch-storage-option-explanation-in-9.2.0/","title":"New ElasticSearch storage option explanation in 9.2.0"},{"body":"New ElasticSearch storage option explanation in 9.2.0 Since v9.2.0, SkyWalking OAP provides 2 storage options for all data, including metadata, metrics, traces, logs, events, profiling data, etc.. OAP exposes a system environment variable (SW_STORAGE_ES_LOGIC_SHARDING) to control the running mode.\nNo-Sharding Mode (OAP default setting, SW_STORAGE_ES_LOGIC_SHARDING = false) This is the new mode introduced in 9.2.0. It prefers to keep data with similar properties in one index template, such as all metrics and metadata.\n OAP merges all metrics/meter and records(without super datasets, such as segments) indices into one physical index template metrics-all and records-all. The logic index name would be present in columns metric_table or record_table. If the logic column name has an alias (configured through @ElasticSearch.Column()), the alias would be the real physical column name.  The super dataset would not be affected by this, such as traces and logs.\nSharding Mode (SW_STORAGE_ES_LOGIC_SHARDING = true )  OAP shard metrics/meter indices into multi-physical indices as in the previous versions(one index template per metric/meter aggregation function). Records and metrics without configuring aggregation functions with @MetricsFunction or @MeterFunction annotation would not be merged. They would be kept in a separate index template. The shard template name would be metrics-aggregation function name or meter-aggregation function name such as metrics-count, and the logic index name would be present in column metric_table. The OAP would not use the column alias, the logic column name would be the real physical column name.   Notice: Users still could choose to adjust ElasticSearch\u0026rsquo;s shard number(SW_STORAGE_ES_INDEX_SHARDS_NUMBER) to scale out in either mode.\n","excerpt":"New ElasticSearch storage option explanation in 9.2.0 Since v9.2.0, SkyWalking OAP provides 2 …","ref":"/docs/main/v9.2.0/en/faq/new-elasticsearch-storage-option-explanation-in-9.2.0/","title":"New ElasticSearch storage option explanation in 9.2.0"},{"body":"New ElasticSearch storage option explanation in 9.2.0 Since v9.2.0, SkyWalking OAP provides 2 storage options for all data, including metadata, metrics, traces, logs, events, profiling data, etc.. OAP exposes a system environment variable (SW_STORAGE_ES_LOGIC_SHARDING) to control the running mode.\nNo-Sharding Mode (OAP default setting, SW_STORAGE_ES_LOGIC_SHARDING = false) This is the new mode introduced in 9.2.0. It prefers to keep data with similar properties in one index template, such as all metrics and metadata.\n OAP merges all metrics/meter and records(without super datasets, such as segments) indices into one physical index template metrics-all and records-all. The logic index name would be present in columns metric_table or record_table. If the logic column name has an alias (configured through @ElasticSearch.Column()), the alias would be the real physical column name.  The super dataset would not be affected by this, such as traces and logs.\nSharding Mode (SW_STORAGE_ES_LOGIC_SHARDING = true )  OAP shard metrics/meter indices into multi-physical indices as in the previous versions(one index template per metric/meter aggregation function). Records and metrics without configuring aggregation functions with @MetricsFunction or @MeterFunction annotation would not be merged. They would be kept in a separate index template. The shard template name would be metrics-aggregation function name or meter-aggregation function name such as metrics-count, and the logic index name would be present in column metric_table. The OAP would not use the column alias, the logic column name would be the real physical column name.   Notice: Users still could choose to adjust ElasticSearch\u0026rsquo;s shard number(SW_STORAGE_ES_INDEX_SHARDS_NUMBER) to scale out in either mode.\n","excerpt":"New ElasticSearch storage option explanation in 9.2.0 Since v9.2.0, SkyWalking OAP provides 2 …","ref":"/docs/main/v9.3.0/en/faq/new-elasticsearch-storage-option-explanation-in-9.2.0/","title":"New ElasticSearch storage option explanation in 9.2.0"},{"body":"New ElasticSearch storage option explanation in 9.2.0 Since v9.2.0, SkyWalking OAP provides 2 storage options for all data, including metadata, metrics, traces, logs, events, profiling data, etc.. OAP exposes a system environment variable (SW_STORAGE_ES_LOGIC_SHARDING) to control the running mode.\nNo-Sharding Mode (OAP default setting, SW_STORAGE_ES_LOGIC_SHARDING = false) This is the new mode introduced in 9.2.0. It prefers to keep data with similar properties in one index template, such as all metrics and metadata.\n OAP merges all metrics/meter and records(without super datasets, such as segments) indices into one physical index template metrics-all and records-all. The logic index name would be present in columns metric_table or record_table. If the logic column name has an alias (configured through @ElasticSearch.Column()), the alias would be the real physical column name.  The super dataset would not be affected by this, such as traces and logs.\nSharding Mode (SW_STORAGE_ES_LOGIC_SHARDING = true )  OAP shard metrics/meter indices into multi-physical indices as in the previous versions(one index template per metric/meter aggregation function). Records and metrics without configuring aggregation functions with @MetricsFunction or @MeterFunction annotation would not be merged. They would be kept in a separate index template. The shard template name would be metrics-aggregation function name or meter-aggregation function name such as metrics-count, and the logic index name would be present in column metric_table. The OAP would not use the column alias, the logic column name would be the real physical column name.   Notice: Users still could choose to adjust ElasticSearch\u0026rsquo;s shard number(SW_STORAGE_ES_INDEX_SHARDS_NUMBER) to scale out in either mode.\n","excerpt":"New ElasticSearch storage option explanation in 9.2.0 Since v9.2.0, SkyWalking OAP provides 2 …","ref":"/docs/main/v9.4.0/en/faq/new-elasticsearch-storage-option-explanation-in-9.2.0/","title":"New ElasticSearch storage option explanation in 9.2.0"},{"body":"New ElasticSearch storage option explanation in 9.2.0 Since v9.2.0, SkyWalking OAP provides 2 storage options for all data, including metadata, metrics, traces, logs, events, profiling data, etc.. OAP exposes a system environment variable (SW_STORAGE_ES_LOGIC_SHARDING) to control the running mode.\nNo-Sharding Mode (OAP default setting, SW_STORAGE_ES_LOGIC_SHARDING = false) This is the new mode introduced in 9.2.0. It prefers to keep data with similar properties in one index template, such as all metrics and metadata.\n OAP merges all metrics/meter and records(without super datasets, such as segments) indices into one physical index template metrics-all and records-all. The logic index name would be present in columns metric_table or record_table. If the logic column name has an alias (configured through @ElasticSearch.Column()), the alias would be the real physical column name.  The super dataset would not be affected by this, such as traces and logs.\nSharding Mode (SW_STORAGE_ES_LOGIC_SHARDING = true )  OAP shard metrics/meter indices into multi-physical indices as in the previous versions(one index template per metric/meter aggregation function). Records and metrics without configuring aggregation functions with @MetricsFunction or @MeterFunction annotation would not be merged. They would be kept in a separate index template. The shard template name would be metrics-aggregation function name or meter-aggregation function name such as metrics-count, and the logic index name would be present in column metric_table. The OAP would not use the column alias, the logic column name would be the real physical column name.   Notice: Users still could choose to adjust ElasticSearch\u0026rsquo;s shard number(SW_STORAGE_ES_INDEX_SHARDS_NUMBER) to scale out in either mode.\n","excerpt":"New ElasticSearch storage option explanation in 9.2.0 Since v9.2.0, SkyWalking OAP provides 2 …","ref":"/docs/main/v9.5.0/en/faq/new-elasticsearch-storage-option-explanation-in-9.2.0/","title":"New ElasticSearch storage option explanation in 9.2.0"},{"body":"New ElasticSearch storage option explanation in 9.2.0 Since v9.2.0, SkyWalking OAP provides 2 storage options for all data, including metadata, metrics, traces, logs, events, profiling data, etc.. OAP exposes a system environment variable (SW_STORAGE_ES_LOGIC_SHARDING) to control the running mode.\nNo-Sharding Mode (OAP default setting, SW_STORAGE_ES_LOGIC_SHARDING = false) This is the new mode introduced in 9.2.0. It prefers to keep data with similar properties in one index template, such as all metrics and metadata.\n OAP merges all metrics/meter and records(without super datasets, such as segments) indices into one physical index template metrics-all and records-all. The logic index name would be present in columns metric_table or record_table. If the logic column name has an alias (configured through @ElasticSearch.Column()), the alias would be the real physical column name.  The super dataset would not be affected by this, such as traces and logs.\nSharding Mode (SW_STORAGE_ES_LOGIC_SHARDING = true )  OAP shard metrics/meter indices into multi-physical indices as in the previous versions(one index template per metric/meter aggregation function). Records and metrics without configuring aggregation functions with @MetricsFunction or @MeterFunction annotation would not be merged. They would be kept in a separate index template. The shard template name would be metrics-aggregation function name or meter-aggregation function name such as metrics-count, and the logic index name would be present in column metric_table. The OAP would not use the column alias, the logic column name would be the real physical column name.   Notice: Users still could choose to adjust ElasticSearch\u0026rsquo;s shard number(SW_STORAGE_ES_INDEX_SHARDS_NUMBER) to scale out in either mode.\n","excerpt":"New ElasticSearch storage option explanation in 9.2.0 Since v9.2.0, SkyWalking OAP provides 2 …","ref":"/docs/main/v9.6.0/en/faq/new-elasticsearch-storage-option-explanation-in-9.2.0/","title":"New ElasticSearch storage option explanation in 9.2.0"},{"body":"New ElasticSearch storage option explanation in 9.2.0 Since v9.2.0, SkyWalking OAP provides 2 storage options for all data, including metadata, metrics, traces, logs, events, profiling data, etc.. OAP exposes a system environment variable (SW_STORAGE_ES_LOGIC_SHARDING) to control the running mode.\nNo-Sharding Mode (OAP default setting, SW_STORAGE_ES_LOGIC_SHARDING = false) This is the new mode introduced in 9.2.0. It prefers to keep data with similar properties in one index template, such as all metrics and metadata.\n OAP merges all metrics/meter and records(without super datasets, such as segments) indices into one physical index template metrics-all and records-all. The logic index name would be present in columns metric_table or record_table. If the logic column name has an alias (configured through @ElasticSearch.Column()), the alias would be the real physical column name.  The super dataset would not be affected by this, such as traces and logs.\nSharding Mode (SW_STORAGE_ES_LOGIC_SHARDING = true )  OAP shard metrics/meter indices into multi-physical indices as in the previous versions(one index template per metric/meter aggregation function). Records and metrics without configuring aggregation functions with @MetricsFunction or @MeterFunction annotation would not be merged. They would be kept in a separate index template. The shard template name would be metrics-aggregation function name or meter-aggregation function name such as metrics-count, and the logic index name would be present in column metric_table. The OAP would not use the column alias, the logic column name would be the real physical column name.   Notice: Users still could choose to adjust ElasticSearch\u0026rsquo;s shard number(SW_STORAGE_ES_INDEX_SHARDS_NUMBER) to scale out in either mode.\n","excerpt":"New ElasticSearch storage option explanation in 9.2.0 Since v9.2.0, SkyWalking OAP provides 2 …","ref":"/docs/main/v9.7.0/en/faq/new-elasticsearch-storage-option-explanation-in-9.2.0/","title":"New ElasticSearch storage option explanation in 9.2.0"},{"body":"Nginx monitoring Nginx performance from nginx-lua-prometheus The nginx-lua-prometheus is a lua library that can be used with Nginx to collect metrics and expose them on a separate web page. To use this library, you will need Nginx with lua-nginx-module or directly OpenResty.\nSkyWalking leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  nginx-lua-prometheus collects metrics from Nginx and expose them to an endpoint. OpenTelemetry Collector fetches metrics from the endpoint expose above via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Collect Nginx metrics and expose the following four metrics by nginx-lua-prometheus. For details on metrics definition, refer to here.   histogram: nginx_http_latency gauge: nginx_http_connections counter: nginx_http_size_bytes counter: nginx_http_requests_total  Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  Nginx Monitoring SkyWalking observes the status, payload, and latency of the Nginx server, which is cataloged as a LAYER: Nginx Service in the OAP and instances would be recognized as LAYER: Nginx instance.\nAbout LAYER: Nginx endpoint, it depends on how precision you want to monitor the nginx. We do not recommend expose every request path metrics, because it will cause explosion of metrics endpoint data.\nYou can collect host metrics:\nhttp { log_by_lua_block { metric_bytes:inc(tonumber(ngx.var.request_length), {\u0026quot;request\u0026quot;, ngx.var.host}) metric_bytes:inc(tonumber(ngx.var.bytes_send), {\u0026quot;response\u0026quot;, ngx.var.host}) metric_requests:inc(1, {ngx.var.status, ngx.var.host}) metric_latency:observe(tonumber(ngx.var.request_time), {ngx.var.host}) } } or grouped urls and upstream metrics:\nupstream backend { server ip:port; } server { location /test { default_type application/json; return 200 '{\u0026quot;code\u0026quot;: 200, \u0026quot;message\u0026quot;: \u0026quot;success\u0026quot;}'; log_by_lua_block { metric_bytes:inc(tonumber(ngx.var.request_length), {\u0026quot;request\u0026quot;, \u0026quot;/test/**\u0026quot;}) metric_bytes:inc(tonumber(ngx.var.bytes_send), {\u0026quot;response\u0026quot;, \u0026quot;/test/**\u0026quot;}) metric_requests:inc(1, {ngx.var.status, \u0026quot;/test/**\u0026quot;}) metric_latency:observe(tonumber(ngx.var.request_time), {\u0026quot;/test/**\u0026quot;}) } } location /test_upstream { proxy_pass http://backend; log_by_lua_block { metric_bytes:inc(tonumber(ngx.var.request_length), {\u0026quot;request\u0026quot;, \u0026quot;upstream/backend\u0026quot;}) metric_bytes:inc(tonumber(ngx.var.bytes_send), {\u0026quot;response\u0026quot;, \u0026quot;upstream/backend\u0026quot;}) metric_requests:inc(1, {ngx.var.status, \u0026quot;upstream/backend\u0026quot;}) metric_latency:observe(tonumber(ngx.var.request_time), {\u0026quot;upstream/backend\u0026quot;}) } } } Nginx Service Supported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     HTTP Request Trend  meter_nginx_service_http_requests Service The increment rate of HTTP requests nginx-lua-prometheus   HTTP Latency ms meter_nginx_service_http_latency Service The increment rate of the latency of HTTP requests nginx-lua-prometheus   HTTP Bandwidth KB meter_nginx_service_bandwidth Service The increment rate of the bandwidth of HTTP requests nginx-lua-prometheus   HTTP Connections  meter_nginx_service_http_connections Service The avg number of the connections nginx-lua-prometheus   HTTP Status Trend  meter_nginx_service_http_status Service The increment rate of the status of HTTP requests nginx-lua-prometheus   HTTP Status 4xx Percent % meter_nginx_service_http_4xx_requests_increment / meter_nginx_service_http_requests_increment Service The percentage of 4xx status of HTTP requests nginx-lua-prometheus   HTTP Status 5xx Percent % meter_nginx_service_http_5xx_requests_increment / meter_nginx_service_http_requests_increment Service The percentage of 4xx status of HTTP requests nginx-lua-prometheus    Nginx Instance Supported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     HTTP Request Trend  meter_nginx_instance_http_requests Instance The increment rate of HTTP requests nginx-lua-prometheus   HTTP Latency ms meter_nginx_instance_http_latency Instance The increment rate of the latency of HTTP requests nginx-lua-prometheus   HTTP Bandwidth KB meter_nginx_instance_bandwidth Instance The increment rate of the bandwidth of HTTP requests nginx-lua-prometheus   HTTP Connections  meter_nginx_instance_http_connections Instance The avg number of the connections nginx-lua-prometheus   HTTP Status Trend  meter_nginx_instance_http_status Instance The increment rate of the status of HTTP requests nginx-lua-prometheus   HTTP Status 4xx Percent % meter_nginx_instance_http_4xx_requests_increment / meter_nginx_instance_http_requests_increment Instance The percentage of 4xx status of HTTP requests nginx-lua-prometheus   HTTP Status 5xx Percent % meter_nginx_instance_http_5xx_requests_increment / meter_nginx_instance_http_requests_increment Instance The percentage of 4xx status of HTTP requests nginx-lua-prometheus    Nginx Endpoint Supported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     HTTP Request Trend  meter_nginx_endpoint_http_requests Endpoint The increment rate of HTTP requests nginx-lua-prometheus   HTTP Latency ms meter_nginx_endpoint_http_latency Endpoint The increment rate of the latency of HTTP requests nginx-lua-prometheus   HTTP Bandwidth KB meter_nginx_endpoint_bandwidth Endpoint The increment rate of the bandwidth of HTTP requests nginx-lua-prometheus   HTTP Status Trend  meter_nginx_endpoint_http_status Endpoint The increment rate of the status of HTTP requests nginx-lua-prometheus   HTTP Status 4xx Percent % meter_nginx_endpoint_http_4xx_requests_increment / meter_nginx_endpoint_http_requests_increment Endpoint The percentage of 4xx status of HTTP requests nginx-lua-prometheus   HTTP Status 5xx Percent % meter_nginx_endpoint_http_5xx_requests_increment / meter_nginx_endpoint_http_requests_increment Endpoint The percentage of 4xx status of HTTP requests nginx-lua-prometheus    Customizations You can customize your own metrics/expression/dashboard panel.\nThe metrics definition and expression rules are found in /config/otel-rules/nginx-service.yaml, /config/otel-rules/nginx-instance.yaml, /config/otel-rules/nginx-endpoint.yaml.\nThe Nginx dashboard panel configurations are found in /config/ui-initialized-templates/nginx.\nCollect nginx access and error log SkyWalking leverages fluentbit or other log agents for collecting access log and error log of Nginx.\nData flow  fluentbit agent collects access log and error log from Nginx. fluentbit agent sends data to SkyWalking OAP Server using native meter APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Install fluentbit. Config fluent bit with fluent-bit.conf, refer to here.  Error Log Monitoring Error Log monitoring provides monitoring of the error.log of the Nginx server.\nSupported Metrics    Monitoring Panel Metric Name Catalog Description Data Source     Service Error Log Count meter_nginx_service_error_log_count Service The count of log level of nginx error.log fluent bit   Instance Error Log Count meter_nginx_instance_error_log_count Instance The count of log level of nginx error.log fluent bit    Customizations You can customize your own metrics/expression/dashboard panel.\nThe log collect and analyse rules are found in /config/lal/nginx.yaml, /config/log-mal-rules/nginx.yaml.\nThe Nginx dashboard panel configurations are found in /config/ui-initialized-templates/nginx.\n","excerpt":"Nginx monitoring Nginx performance from nginx-lua-prometheus The nginx-lua-prometheus is a lua …","ref":"/docs/main/latest/en/setup/backend/backend-nginx-monitoring/","title":"Nginx monitoring"},{"body":"Nginx monitoring Nginx performance from nginx-lua-prometheus The nginx-lua-prometheus is a lua library that can be used with Nginx to collect metrics and expose them on a separate web page. To use this library, you will need Nginx with lua-nginx-module or directly OpenResty.\nSkyWalking leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  nginx-lua-prometheus collects metrics from Nginx and expose them to an endpoint. OpenTelemetry Collector fetches metrics from the endpoint expose above via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Collect Nginx metrics and expose the following four metrics by nginx-lua-prometheus. For details on metrics definition, refer to here.   histogram: nginx_http_latency gauge: nginx_http_connections counter: nginx_http_size_bytes counter: nginx_http_requests_total  Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  Nginx Monitoring SkyWalking observes the status, payload, and latency of the Nginx server, which is cataloged as a LAYER: Nginx Service in the OAP and instances would be recognized as LAYER: Nginx instance.\nAbout LAYER: Nginx endpoint, it depends on how precision you want to monitor the nginx. We do not recommend expose every request path metrics, because it will cause explosion of metrics endpoint data.\nYou can collect host metrics:\nhttp { log_by_lua_block { metric_bytes:inc(tonumber(ngx.var.request_length), {\u0026quot;request\u0026quot;, ngx.var.host}) metric_bytes:inc(tonumber(ngx.var.bytes_send), {\u0026quot;response\u0026quot;, ngx.var.host}) metric_requests:inc(1, {ngx.var.status, ngx.var.host}) metric_latency:observe(tonumber(ngx.var.request_time), {ngx.var.host}) } } or grouped urls and upstream metrics:\nupstream backend { server ip:port; } server { location /test { default_type application/json; return 200 '{\u0026quot;code\u0026quot;: 200, \u0026quot;message\u0026quot;: \u0026quot;success\u0026quot;}'; log_by_lua_block { metric_bytes:inc(tonumber(ngx.var.request_length), {\u0026quot;request\u0026quot;, \u0026quot;/test/**\u0026quot;}) metric_bytes:inc(tonumber(ngx.var.bytes_send), {\u0026quot;response\u0026quot;, \u0026quot;/test/**\u0026quot;}) metric_requests:inc(1, {ngx.var.status, \u0026quot;/test/**\u0026quot;}) metric_latency:observe(tonumber(ngx.var.request_time), {\u0026quot;/test/**\u0026quot;}) } } location /test_upstream { proxy_pass http://backend; log_by_lua_block { metric_bytes:inc(tonumber(ngx.var.request_length), {\u0026quot;request\u0026quot;, \u0026quot;upstream/backend\u0026quot;}) metric_bytes:inc(tonumber(ngx.var.bytes_send), {\u0026quot;response\u0026quot;, \u0026quot;upstream/backend\u0026quot;}) metric_requests:inc(1, {ngx.var.status, \u0026quot;upstream/backend\u0026quot;}) metric_latency:observe(tonumber(ngx.var.request_time), {\u0026quot;upstream/backend\u0026quot;}) } } } Nginx Service Supported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     HTTP Request Trend  meter_nginx_service_http_requests Service The increment rate of HTTP requests nginx-lua-prometheus   HTTP Latency ms meter_nginx_service_http_latency Service The increment rate of the latency of HTTP requests nginx-lua-prometheus   HTTP Bandwidth KB meter_nginx_service_bandwidth Service The increment rate of the bandwidth of HTTP requests nginx-lua-prometheus   HTTP Connections  meter_nginx_service_http_connections Service The avg number of the connections nginx-lua-prometheus   HTTP Status Trend  meter_nginx_service_http_status Service The increment rate of the status of HTTP requests nginx-lua-prometheus   HTTP Status 4xx Percent % meter_nginx_service_http_4xx_requests_increment / meter_nginx_service_http_requests_increment Service The percentage of 4xx status of HTTP requests nginx-lua-prometheus   HTTP Status 5xx Percent % meter_nginx_service_http_5xx_requests_increment / meter_nginx_service_http_requests_increment Service The percentage of 4xx status of HTTP requests nginx-lua-prometheus    Nginx Instance Supported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     HTTP Request Trend  meter_nginx_instance_http_requests Instance The increment rate of HTTP requests nginx-lua-prometheus   HTTP Latency ms meter_nginx_instance_http_latency Instance The increment rate of the latency of HTTP requests nginx-lua-prometheus   HTTP Bandwidth KB meter_nginx_instance_bandwidth Instance The increment rate of the bandwidth of HTTP requests nginx-lua-prometheus   HTTP Connections  meter_nginx_instance_http_connections Instance The avg number of the connections nginx-lua-prometheus   HTTP Status Trend  meter_nginx_instance_http_status Instance The increment rate of the status of HTTP requests nginx-lua-prometheus   HTTP Status 4xx Percent % meter_nginx_instance_http_4xx_requests_increment / meter_nginx_instance_http_requests_increment Instance The percentage of 4xx status of HTTP requests nginx-lua-prometheus   HTTP Status 5xx Percent % meter_nginx_instance_http_5xx_requests_increment / meter_nginx_instance_http_requests_increment Instance The percentage of 4xx status of HTTP requests nginx-lua-prometheus    Nginx Endpoint Supported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     HTTP Request Trend  meter_nginx_endpoint_http_requests Endpoint The increment rate of HTTP requests nginx-lua-prometheus   HTTP Latency ms meter_nginx_endpoint_http_latency Endpoint The increment rate of the latency of HTTP requests nginx-lua-prometheus   HTTP Bandwidth KB meter_nginx_endpoint_bandwidth Endpoint The increment rate of the bandwidth of HTTP requests nginx-lua-prometheus   HTTP Status Trend  meter_nginx_endpoint_http_status Endpoint The increment rate of the status of HTTP requests nginx-lua-prometheus   HTTP Status 4xx Percent % meter_nginx_endpoint_http_4xx_requests_increment / meter_nginx_endpoint_http_requests_increment Endpoint The percentage of 4xx status of HTTP requests nginx-lua-prometheus   HTTP Status 5xx Percent % meter_nginx_endpoint_http_5xx_requests_increment / meter_nginx_endpoint_http_requests_increment Endpoint The percentage of 4xx status of HTTP requests nginx-lua-prometheus    Customizations You can customize your own metrics/expression/dashboard panel.\nThe metrics definition and expression rules are found in /config/otel-rules/nginx-service.yaml, /config/otel-rules/nginx-instance.yaml, /config/otel-rules/nginx-endpoint.yaml.\nThe Nginx dashboard panel configurations are found in /config/ui-initialized-templates/nginx.\nCollect nginx access and error log SkyWalking leverages fluentbit or other log agents for collecting access log and error log of Nginx.\nData flow  fluentbit agent collects access log and error log from Nginx. fluentbit agent sends data to SkyWalking OAP Server using native meter APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Install fluentbit. Config fluent bit with fluent-bit.conf, refer to here.  Error Log Monitoring Error Log monitoring provides monitoring of the error.log of the Nginx server.\nSupported Metrics    Monitoring Panel Metric Name Catalog Description Data Source     Service Error Log Count meter_nginx_service_error_log_count Service The count of log level of nginx error.log fluent bit   Instance Error Log Count meter_nginx_instance_error_log_count Instance The count of log level of nginx error.log fluent bit    Customizations You can customize your own metrics/expression/dashboard panel.\nThe log collect and analyse rules are found in /config/lal/nginx.yaml, /config/log-mal-rules/nginx.yaml.\nThe Nginx dashboard panel configurations are found in /config/ui-initialized-templates/nginx.\n","excerpt":"Nginx monitoring Nginx performance from nginx-lua-prometheus The nginx-lua-prometheus is a lua …","ref":"/docs/main/next/en/setup/backend/backend-nginx-monitoring/","title":"Nginx monitoring"},{"body":"Nginx monitoring Nginx performance from nginx-lua-prometheus The nginx-lua-prometheus is a lua library that can be used with Nginx to collect metrics and expose them on a separate web page. To use this library, you will need Nginx with lua-nginx-module or directly OpenResty.\nSkyWalking leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  nginx-lua-prometheus collects metrics from Nginx and expose them to an endpoint. OpenTelemetry Collector fetches metrics from the endpoint expose above via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Collect Nginx metrics and expose the following four metrics by nginx-lua-prometheus. For details on metrics definition, refer to here.   histogram: nginx_http_latency gauge: nginx_http_connections counter: nginx_http_size_bytes counter: nginx_http_requests_total  Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  Nginx Monitoring SkyWalking observes the status, payload, and latency of the Nginx server, which is cataloged as a LAYER: Nginx Service in the OAP and instances would be recognized as LAYER: Nginx instance.\nAbout LAYER: Nginx endpoint, it depends on how precision you want to monitor the nginx. We do not recommend expose every request path metrics, because it will cause explosion of metrics endpoint data.\nYou can collect host metrics:\nhttp { log_by_lua_block { metric_bytes:inc(tonumber(ngx.var.request_length), {\u0026quot;request\u0026quot;, ngx.var.host}) metric_bytes:inc(tonumber(ngx.var.bytes_send), {\u0026quot;response\u0026quot;, ngx.var.host}) metric_requests:inc(1, {ngx.var.status, ngx.var.host}) metric_latency:observe(tonumber(ngx.var.request_time), {ngx.var.host}) } } or grouped urls and upstream metrics:\nupstream backend { server ip:port; } server { location /test { default_type application/json; return 200 '{\u0026quot;code\u0026quot;: 200, \u0026quot;message\u0026quot;: \u0026quot;success\u0026quot;}'; log_by_lua_block { metric_bytes:inc(tonumber(ngx.var.request_length), {\u0026quot;request\u0026quot;, \u0026quot;/test/**\u0026quot;}) metric_bytes:inc(tonumber(ngx.var.bytes_send), {\u0026quot;response\u0026quot;, \u0026quot;/test/**\u0026quot;}) metric_requests:inc(1, {ngx.var.status, \u0026quot;/test/**\u0026quot;}) metric_latency:observe(tonumber(ngx.var.request_time), {\u0026quot;/test/**\u0026quot;}) } } location /test_upstream { proxy_pass http://backend; log_by_lua_block { metric_bytes:inc(tonumber(ngx.var.request_length), {\u0026quot;request\u0026quot;, \u0026quot;upstream/backend\u0026quot;}) metric_bytes:inc(tonumber(ngx.var.bytes_send), {\u0026quot;response\u0026quot;, \u0026quot;upstream/backend\u0026quot;}) metric_requests:inc(1, {ngx.var.status, \u0026quot;upstream/backend\u0026quot;}) metric_latency:observe(tonumber(ngx.var.request_time), {\u0026quot;upstream/backend\u0026quot;}) } } } Nginx Service Supported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     HTTP Request Trend  meter_nginx_service_http_requests Service The increment rate of HTTP requests nginx-lua-prometheus   HTTP Latency ms meter_nginx_service_http_latency Service The increment rate of the latency of HTTP requests nginx-lua-prometheus   HTTP Bandwidth KB meter_nginx_service_bandwidth Service The increment rate of the bandwidth of HTTP requests nginx-lua-prometheus   HTTP Connections  meter_nginx_service_http_connections Service The avg number of the connections nginx-lua-prometheus   HTTP Status Trend  meter_nginx_service_http_status Service The increment rate of the status of HTTP requests nginx-lua-prometheus   HTTP Status 4xx Percent % meter_nginx_service_http_4xx_requests_increment / meter_nginx_service_http_requests_increment Service The percentage of 4xx status of HTTP requests nginx-lua-prometheus   HTTP Status 5xx Percent % meter_nginx_service_http_5xx_requests_increment / meter_nginx_service_http_requests_increment Service The percentage of 4xx status of HTTP requests nginx-lua-prometheus    Nginx Instance Supported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     HTTP Request Trend  meter_nginx_instance_http_requests Instance The increment rate of HTTP requests nginx-lua-prometheus   HTTP Latency ms meter_nginx_instance_http_latency Instance The increment rate of the latency of HTTP requests nginx-lua-prometheus   HTTP Bandwidth KB meter_nginx_instance_bandwidth Instance The increment rate of the bandwidth of HTTP requests nginx-lua-prometheus   HTTP Connections  meter_nginx_instance_http_connections Instance The avg number of the connections nginx-lua-prometheus   HTTP Status Trend  meter_nginx_instance_http_status Instance The increment rate of the status of HTTP requests nginx-lua-prometheus   HTTP Status 4xx Percent % meter_nginx_instance_http_4xx_requests_increment / meter_nginx_instance_http_requests_increment Instance The percentage of 4xx status of HTTP requests nginx-lua-prometheus   HTTP Status 5xx Percent % meter_nginx_instance_http_5xx_requests_increment / meter_nginx_instance_http_requests_increment Instance The percentage of 4xx status of HTTP requests nginx-lua-prometheus    Nginx Endpoint Supported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     HTTP Request Trend  meter_nginx_endpoint_http_requests Endpoint The increment rate of HTTP requests nginx-lua-prometheus   HTTP Latency ms meter_nginx_endpoint_http_latency Endpoint The increment rate of the latency of HTTP requests nginx-lua-prometheus   HTTP Bandwidth KB meter_nginx_endpoint_bandwidth Endpoint The increment rate of the bandwidth of HTTP requests nginx-lua-prometheus   HTTP Status Trend  meter_nginx_endpoint_http_status Endpoint The increment rate of the status of HTTP requests nginx-lua-prometheus   HTTP Status 4xx Percent % meter_nginx_endpoint_http_4xx_requests_increment / meter_nginx_endpoint_http_requests_increment Endpoint The percentage of 4xx status of HTTP requests nginx-lua-prometheus   HTTP Status 5xx Percent % meter_nginx_endpoint_http_5xx_requests_increment / meter_nginx_endpoint_http_requests_increment Endpoint The percentage of 4xx status of HTTP requests nginx-lua-prometheus    Customizations You can customize your own metrics/expression/dashboard panel.\nThe metrics definition and expression rules are found in /config/otel-rules/nginx-service.yaml, /config/otel-rules/nginx-instance.yaml, /config/otel-rules/nginx-endpoint.yaml.\nThe Nginx dashboard panel configurations are found in /config/ui-initialized-templates/nginx.\nCollect nginx access and error log SkyWalking leverages fluentbit or other log agents for collecting access log and error log of Nginx.\nData flow  fluentbit agent collects access log and error log from Nginx. fluentbit agent sends data to SkyWalking OAP Server using native meter APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Install fluentbit. Config fluent bit with fluent-bit.conf, refer to here.  Error Log Monitoring Error Log monitoring provides monitoring of the error.log of the Nginx server.\nSupported Metrics    Monitoring Panel Metric Name Catalog Description Data Source     Service Error Log Count meter_nginx_service_error_log_count Service The count of log level of nginx error.log fluent bit   Instance Error Log Count meter_nginx_instance_error_log_count Instance The count of log level of nginx error.log fluent bit    Customizations You can customize your own metrics/expression/dashboard panel.\nThe log collect and analyse rules are found in /config/lal/nginx.yaml, /config/log-mal-rules/nginx.yaml.\nThe Nginx dashboard panel configurations are found in /config/ui-initialized-templates/nginx.\n","excerpt":"Nginx monitoring Nginx performance from nginx-lua-prometheus The nginx-lua-prometheus is a lua …","ref":"/docs/main/v9.7.0/en/setup/backend/backend-nginx-monitoring/","title":"Nginx monitoring"},{"body":"OAP backend dependency management  This section is only applicable to dependencies of the OAP server and UI.\n As one of the Top Level Projects of The Apache Software Foundation (ASF), SkyWalking must follow the ASF 3RD PARTY LICENSE POLICY. So if you\u0026rsquo;re adding new dependencies to the project, you should make sure that the new dependencies would not break the policy, and add their LICENSE and NOTICE to the project.\nWe use license-eye to help you make sure that you haven\u0026rsquo;t missed out any new dependencies:\n Install license-eye according to the doc. Run license-eye dependency resolve --summary ./dist-material/release-docs/LICENSE.tpl in the root directory of this project. Check the modified lines in ./dist-material/release-docs/LICENSE (via command git diff -U0 ./dist-material/release-docs/LICENSE) and check whether the new dependencies' licenses are compatible with Apache 2.0. Add the new dependencies' notice files (if any) to ./dist-material/release-docs/NOTICE if they are Apache 2.0 license. Copy their license files to ./dist-material/release-docs/licenses if they are not standard Apache 2.0 license. Copy the new dependencies' license file to ./dist-material/release-docs/licenses if they are not standard Apache 2.0 license.  ","excerpt":"OAP backend dependency management  This section is only applicable to dependencies of the OAP server …","ref":"/docs/main/latest/en/guides/dependencies/","title":"OAP backend dependency management"},{"body":"OAP backend dependency management  This section is only applicable to dependencies of the OAP server and UI.\n As one of the Top Level Projects of The Apache Software Foundation (ASF), SkyWalking must follow the ASF 3RD PARTY LICENSE POLICY. So if you\u0026rsquo;re adding new dependencies to the project, you should make sure that the new dependencies would not break the policy, and add their LICENSE and NOTICE to the project.\nWe use license-eye to help you make sure that you haven\u0026rsquo;t missed out any new dependencies:\n Install license-eye according to the doc. Run license-eye dependency resolve --summary ./dist-material/release-docs/LICENSE.tpl in the root directory of this project. Check the modified lines in ./dist-material/release-docs/LICENSE (via command git diff -U0 ./dist-material/release-docs/LICENSE) and check whether the new dependencies' licenses are compatible with Apache 2.0. Add the new dependencies' notice files (if any) to ./dist-material/release-docs/NOTICE if they are Apache 2.0 license. Copy their license files to ./dist-material/release-docs/licenses if they are not standard Apache 2.0 license. Copy the new dependencies' license file to ./dist-material/release-docs/licenses if they are not standard Apache 2.0 license.  ","excerpt":"OAP backend dependency management  This section is only applicable to dependencies of the OAP server …","ref":"/docs/main/next/en/guides/dependencies/","title":"OAP backend dependency management"},{"body":"OAP backend dependency management  This section is only applicable to dependencies of the OAP server and UI.\n As one of the Top Level Projects of The Apache Software Foundation (ASF), SkyWalking must follow the ASF 3RD PARTY LICENSE POLICY. So if you\u0026rsquo;re adding new dependencies to the project, you should make sure that the new dependencies would not break the policy, and add their LICENSE and NOTICE to the project.\nWe use license-eye to help you make sure that you haven\u0026rsquo;t missed out any new dependencies:\n Install license-eye according to the doc. Run license-eye dependency resolve --summary ./dist-material/release-docs/LICENSE.tpl in the root directory of this project. Check the modified lines in ./dist-material/release-docs/LICENSE (via command git diff -U0 ./dist-material/release-docs/LICENSE) and check whether the new dependencies' licenses are compatible with Apache 2.0. Add the new dependencies' notice files (if any) to ./dist-material/release-docs/NOTICE if they are Apache 2.0 license. Copy their license files to ./dist-material/release-docs/licenses if they are not standard Apache 2.0 license. Copy the new dependencies' license file to ./dist-material/release-docs/licenses if they are not standard Apache 2.0 license.  ","excerpt":"OAP backend dependency management  This section is only applicable to dependencies of the OAP server …","ref":"/docs/main/v9.6.0/en/guides/dependencies/","title":"OAP backend dependency management"},{"body":"OAP backend dependency management  This section is only applicable to dependencies of the OAP server and UI.\n As one of the Top Level Projects of The Apache Software Foundation (ASF), SkyWalking must follow the ASF 3RD PARTY LICENSE POLICY. So if you\u0026rsquo;re adding new dependencies to the project, you should make sure that the new dependencies would not break the policy, and add their LICENSE and NOTICE to the project.\nWe use license-eye to help you make sure that you haven\u0026rsquo;t missed out any new dependencies:\n Install license-eye according to the doc. Run license-eye dependency resolve --summary ./dist-material/release-docs/LICENSE.tpl in the root directory of this project. Check the modified lines in ./dist-material/release-docs/LICENSE (via command git diff -U0 ./dist-material/release-docs/LICENSE) and check whether the new dependencies' licenses are compatible with Apache 2.0. Add the new dependencies' notice files (if any) to ./dist-material/release-docs/NOTICE if they are Apache 2.0 license. Copy their license files to ./dist-material/release-docs/licenses if they are not standard Apache 2.0 license. Copy the new dependencies' license file to ./dist-material/release-docs/licenses if they are not standard Apache 2.0 license.  ","excerpt":"OAP backend dependency management  This section is only applicable to dependencies of the OAP server …","ref":"/docs/main/v9.7.0/en/guides/dependencies/","title":"OAP backend dependency management"},{"body":"OAP self observability dashboard SkyWalking itself collects and exports metrics in Prometheus format for consuming, it also provides a dashboard to visualize the self-observability metrics.\nData flow  SkyWalking OAP collects metrics data internally and exposes a Prometheus http endpoint to retrieve the metrics. SkyWalking OAP itself (or OpenTelemetry Collector, prefered in Kubernetes scenarios) fetches metrics from the Prometheus endpoint in step (1). OAP (or OpenTelemetry Collector) pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up Follow OAP Self Observability Telemetry doc to set up OAP and OpenTelemetry Collector.\nSelf observability monitoring Self observability monitoring provides monitoring of the status and resources of the OAP server itself. oap-server is a Service in OAP, and land on the Layer: SO11Y_OAP.\nSelf observability metrics    Unit Metric Name Description Data Source     Count Per Minute meter_oap_instance_jvm_gc_count GC Count oap self observability   MB meter_oap_instance_jvm_memory_bytes_used Memory oap self observability   ms / min meter_oap_instance_jvm_young_gc_time GC Time (ms / min) oap self observability   ms / min meter_oap_instance_jvm_old_gc_time GC Time (ms / min) oap self observability   Count Per Minute meter_oap_instance_mesh_count Mesh Analysis Count (Per Minute) oap self observability   Count Per Minute meter_oap_instance_mesh_analysis_error_count Mesh Analysis Count (Per Minute) oap self observability   ms meter_oap_instance_trace_latency_percentile Trace Analysis Latency (ms) oap self observability   Count meter_oap_jvm_class_loaded_count Class Count oap self observability   Count meter_oap_jvm_class_total_unloaded_count Class Count oap self observability   Count meter_oap_jvm_class_total_loaded_count Class Count oap self observability   Count meter_oap_instance_persistence_prepare_count Persistence Count (Per 5 Minutes) oap self observability   Count meter_oap_instance_persistence_execute_count Persistence Count (Per 5 Minutes) oap self observability   Count meter_oap_jvm_thread_live_count Thread Count oap self observability   Count meter_oap_jvm_thread_peak_count Thread Count oap self observability   Count meter_oap_jvm_thread_daemon_count Thread Count oap self observability   ms meter_oap_instance_persistence_execute_percentile Persistence Execution Latency Per Metric Type (ms) oap self observability   ms meter_oap_instance_persistence_prepare_percentile Persistence Preparing Latency Per Metric Type (ms) oap self observability   Count meter_oap_jvm_thread_runnable_count Thread State Count oap self observability   Count meter_oap_jvm_thread_timed_waiting_count Thread State Count oap self observability   Count meter_oap_jvm_thread_blocked_count Thread State Count oap self observability   Count meter_oap_jvm_thread_waiting_count Thread State Count oap self observability   Count per minute meter_oap_instance_metrics_aggregation Aggregation (Per Minute) oap self observability   ms meter_oap_instance_mesh_latency_percentile Mesh Analysis Latency (ms) oap self observability   Count per minute meter_oap_instance_trace_count Trace Analysis Count (Per Minute) oap self observability   Count per minute meter_oap_instance_trace_analysis_error_count Trace Analysis Count (Per Minute) oap self observability   Percentage meter_oap_instance_cpu_percentage CPU (%) oap self observability   Count meter_oap_instance_metrics_persistent_cache count of metrics cache hit and no-hit oap self observability    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/fetcher-prom-rules/self.yaml and config/otel-rules/oap.yaml. The self observability dashboard panel configurations are found in /config/ui-initialized-templates/so11y_oap.\n","excerpt":"OAP self observability dashboard SkyWalking itself collects and exports metrics in Prometheus format …","ref":"/docs/main/latest/en/setup/backend/dashboards-so11y/","title":"OAP self observability dashboard"},{"body":"OAP self observability dashboard SkyWalking itself collects and exports metrics in Prometheus format for consuming, it also provides a dashboard to visualize the self-observability metrics.\nData flow  SkyWalking OAP collects metrics data internally and exposes a Prometheus http endpoint to retrieve the metrics. SkyWalking OAP itself (or OpenTelemetry Collector, prefered in Kubernetes scenarios) fetches metrics from the Prometheus endpoint in step (1). OAP (or OpenTelemetry Collector) pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up Follow OAP Self Observability Telemetry doc to set up OAP and OpenTelemetry Collector.\nSelf observability monitoring Self observability monitoring provides monitoring of the status and resources of the OAP server itself. oap-server is a Service in OAP, and land on the Layer: SO11Y_OAP.\nSelf observability metrics    Unit Metric Name Description Data Source     Count Per Minute meter_oap_instance_jvm_gc_count GC Count oap self observability   MB meter_oap_instance_jvm_memory_bytes_used Memory oap self observability   ms / min meter_oap_instance_jvm_young_gc_time GC Time (ms / min) oap self observability   ms / min meter_oap_instance_jvm_old_gc_time GC Time (ms / min) oap self observability   Count Per Minute meter_oap_instance_mesh_count Mesh Analysis Count (Per Minute) oap self observability   Count Per Minute meter_oap_instance_mesh_analysis_error_count Mesh Analysis Count (Per Minute) oap self observability   ms meter_oap_instance_trace_latency_percentile Trace Analysis Latency (ms) oap self observability   Count meter_oap_jvm_class_loaded_count Class Count oap self observability   Count meter_oap_jvm_class_total_unloaded_count Class Count oap self observability   Count meter_oap_jvm_class_total_loaded_count Class Count oap self observability   Count meter_oap_instance_persistence_prepare_count Persistence Count (Per 5 Minutes) oap self observability   Count meter_oap_instance_persistence_execute_count Persistence Count (Per 5 Minutes) oap self observability   Count meter_oap_jvm_thread_live_count Thread Count oap self observability   Count meter_oap_jvm_thread_peak_count Thread Count oap self observability   Count meter_oap_jvm_thread_daemon_count Thread Count oap self observability   ms meter_oap_instance_persistence_execute_percentile Persistence Execution Latency Per Metric Type (ms) oap self observability   ms meter_oap_instance_persistence_prepare_percentile Persistence Preparing Latency Per Metric Type (ms) oap self observability   Count meter_oap_jvm_thread_runnable_count Thread State Count oap self observability   Count meter_oap_jvm_thread_timed_waiting_count Thread State Count oap self observability   Count meter_oap_jvm_thread_blocked_count Thread State Count oap self observability   Count meter_oap_jvm_thread_waiting_count Thread State Count oap self observability   Count per minute meter_oap_instance_metrics_aggregation Aggregation (Per Minute) oap self observability   ms meter_oap_instance_mesh_latency_percentile Mesh Analysis Latency (ms) oap self observability   Count per minute meter_oap_instance_trace_count Trace Analysis Count (Per Minute) oap self observability   Count per minute meter_oap_instance_trace_analysis_error_count Trace Analysis Count (Per Minute) oap self observability   Percentage meter_oap_instance_cpu_percentage CPU (%) oap self observability   Count meter_oap_instance_metrics_persistent_cache count of metrics cache hit and no-hit oap self observability    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/fetcher-prom-rules/self.yaml and config/otel-rules/oap.yaml. The self observability dashboard panel configurations are found in /config/ui-initialized-templates/so11y_oap.\n","excerpt":"OAP self observability dashboard SkyWalking itself collects and exports metrics in Prometheus format …","ref":"/docs/main/next/en/setup/backend/dashboards-so11y/","title":"OAP self observability dashboard"},{"body":"OAP self observability dashboard SkyWalking itself collects and exports metrics in Prometheus format for consuming, it also provides a dashboard to visualize the self-observability metrics.\nData flow  SkyWalking OAP collects metrics data internally and exposes a Prometheus http endpoint to retrieve the metrics. SkyWalking OAP itself (or OpenTelemetry Collector, prefered in Kubernetes scenarios) fetches metrics from the Prometheus endpoint in step (1). OAP (or OpenTelemetry Collector) pushes metrics to SkyWalking OAP Server via the OpenCensus gRPC Exporter or OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up SkyWalking Self Observability. (Optional) Set up OpenTelemetry Collector .. Config SkyWalking OpenTelemetry receiver.  Self observability monitoring Self observability monitoring provides monitoring of the status and resources of the OAP server itself. oap-server is a Service in OAP, and land on the Layer: SO11Y_OAP.\nSelf observability metrics    Unit Metric Name Description Data Source     Count Per Minute meter_oap_instance_jvm_gc_count GC Count oap self observability   MB meter_oap_instance_jvm_memory_bytes_used Memory oap self observability   ms / min meter_oap_instance_jvm_young_gc_time GC Time (ms / min) oap self observability   ms / min meter_oap_instance_jvm_old_gc_time GC Time (ms / min) oap self observability   Count Per Minute meter_oap_instance_mesh_count Mesh Analysis Count (Per Minute) oap self observability   Count Per Minute meter_oap_instance_mesh_analysis_error_count Mesh Analysis Count (Per Minute) oap self observability   ms meter_oap_instance_trace_latency_percentile Trace Analysis Latency (ms) oap self observability   Count meter_oap_jvm_class_loaded_count Class Count oap self observability   Count meter_oap_jvm_class_total_unloaded_count Class Count oap self observability   Count meter_oap_jvm_class_total_loaded_count Class Count oap self observability   Count meter_oap_instance_persistence_prepare_count Persistence Count (Per 5 Minutes) oap self observability   Count meter_oap_instance_persistence_execute_count Persistence Count (Per 5 Minutes) oap self observability   Count meter_oap_jvm_thread_live_count Thread Count oap self observability   Count meter_oap_jvm_thread_peak_count Thread Count oap self observability   Count meter_oap_jvm_thread_daemon_count Thread Count oap self observability   ms meter_oap_instance_persistence_execute_percentile Persistence Execution Latency Per Metric Type (ms) oap self observability   ms meter_oap_instance_persistence_prepare_percentile Persistence Preparing Latency Per Metric Type (ms) oap self observability   Count meter_oap_jvm_thread_runnable_count Thread State Count oap self observability   Count meter_oap_jvm_thread_timed_waiting_count Thread State Count oap self observability   Count meter_oap_jvm_thread_blocked_count Thread State Count oap self observability   Count meter_oap_jvm_thread_waiting_count Thread State Count oap self observability   Count per minute meter_oap_instance_metrics_aggregation Aggregation (Per Minute) oap self observability   ms meter_oap_instance_mesh_latency_percentile Mesh Analysis Latency (ms) oap self observability   Count per minute meter_oap_instance_trace_count Trace Analysis Count (Per Minute) oap self observability   Count per minute meter_oap_instance_trace_analysis_error_count Trace Analysis Count (Per Minute) oap self observability   Percentage meter_oap_instance_cpu_percentage CPU (%) oap self observability   Count meter_oap_instance_metrics_persistent_cache count of metrics cache hit and no-hit oap self observability    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/fetcher-prom-rules/self.yaml and config/otel-rules/oap.yaml. The self observability dashboard panel configurations are found in /config/ui-initialized-templates/so11y_oap.\n","excerpt":"OAP self observability dashboard SkyWalking itself collects and exports metrics in Prometheus format …","ref":"/docs/main/v9.3.0/en/setup/backend/dashboards-so11y/","title":"OAP self observability dashboard"},{"body":"OAP self observability dashboard SkyWalking itself collects and exports metrics in Prometheus format for consuming, it also provides a dashboard to visualize the self-observability metrics.\nData flow  SkyWalking OAP collects metrics data internally and exposes a Prometheus http endpoint to retrieve the metrics. SkyWalking OAP itself (or OpenTelemetry Collector, prefered in Kubernetes scenarios) fetches metrics from the Prometheus endpoint in step (1). OAP (or OpenTelemetry Collector) pushes metrics to SkyWalking OAP Server via the OpenCensus gRPC Exporter or OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up Follow OAP Self Observability Telemetry doc to set up OAP and OpenTelemetry Collector.\nSelf observability monitoring Self observability monitoring provides monitoring of the status and resources of the OAP server itself. oap-server is a Service in OAP, and land on the Layer: SO11Y_OAP.\nSelf observability metrics    Unit Metric Name Description Data Source     Count Per Minute meter_oap_instance_jvm_gc_count GC Count oap self observability   MB meter_oap_instance_jvm_memory_bytes_used Memory oap self observability   ms / min meter_oap_instance_jvm_young_gc_time GC Time (ms / min) oap self observability   ms / min meter_oap_instance_jvm_old_gc_time GC Time (ms / min) oap self observability   Count Per Minute meter_oap_instance_mesh_count Mesh Analysis Count (Per Minute) oap self observability   Count Per Minute meter_oap_instance_mesh_analysis_error_count Mesh Analysis Count (Per Minute) oap self observability   ms meter_oap_instance_trace_latency_percentile Trace Analysis Latency (ms) oap self observability   Count meter_oap_jvm_class_loaded_count Class Count oap self observability   Count meter_oap_jvm_class_total_unloaded_count Class Count oap self observability   Count meter_oap_jvm_class_total_loaded_count Class Count oap self observability   Count meter_oap_instance_persistence_prepare_count Persistence Count (Per 5 Minutes) oap self observability   Count meter_oap_instance_persistence_execute_count Persistence Count (Per 5 Minutes) oap self observability   Count meter_oap_jvm_thread_live_count Thread Count oap self observability   Count meter_oap_jvm_thread_peak_count Thread Count oap self observability   Count meter_oap_jvm_thread_daemon_count Thread Count oap self observability   ms meter_oap_instance_persistence_execute_percentile Persistence Execution Latency Per Metric Type (ms) oap self observability   ms meter_oap_instance_persistence_prepare_percentile Persistence Preparing Latency Per Metric Type (ms) oap self observability   Count meter_oap_jvm_thread_runnable_count Thread State Count oap self observability   Count meter_oap_jvm_thread_timed_waiting_count Thread State Count oap self observability   Count meter_oap_jvm_thread_blocked_count Thread State Count oap self observability   Count meter_oap_jvm_thread_waiting_count Thread State Count oap self observability   Count per minute meter_oap_instance_metrics_aggregation Aggregation (Per Minute) oap self observability   ms meter_oap_instance_mesh_latency_percentile Mesh Analysis Latency (ms) oap self observability   Count per minute meter_oap_instance_trace_count Trace Analysis Count (Per Minute) oap self observability   Count per minute meter_oap_instance_trace_analysis_error_count Trace Analysis Count (Per Minute) oap self observability   Percentage meter_oap_instance_cpu_percentage CPU (%) oap self observability   Count meter_oap_instance_metrics_persistent_cache count of metrics cache hit and no-hit oap self observability    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/fetcher-prom-rules/self.yaml and config/otel-rules/oap.yaml. The self observability dashboard panel configurations are found in /config/ui-initialized-templates/so11y_oap.\n","excerpt":"OAP self observability dashboard SkyWalking itself collects and exports metrics in Prometheus format …","ref":"/docs/main/v9.4.0/en/setup/backend/dashboards-so11y/","title":"OAP self observability dashboard"},{"body":"OAP self observability dashboard SkyWalking itself collects and exports metrics in Prometheus format for consuming, it also provides a dashboard to visualize the self-observability metrics.\nData flow  SkyWalking OAP collects metrics data internally and exposes a Prometheus http endpoint to retrieve the metrics. SkyWalking OAP itself (or OpenTelemetry Collector, prefered in Kubernetes scenarios) fetches metrics from the Prometheus endpoint in step (1). OAP (or OpenTelemetry Collector) pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up Follow OAP Self Observability Telemetry doc to set up OAP and OpenTelemetry Collector.\nSelf observability monitoring Self observability monitoring provides monitoring of the status and resources of the OAP server itself. oap-server is a Service in OAP, and land on the Layer: SO11Y_OAP.\nSelf observability metrics    Unit Metric Name Description Data Source     Count Per Minute meter_oap_instance_jvm_gc_count GC Count oap self observability   MB meter_oap_instance_jvm_memory_bytes_used Memory oap self observability   ms / min meter_oap_instance_jvm_young_gc_time GC Time (ms / min) oap self observability   ms / min meter_oap_instance_jvm_old_gc_time GC Time (ms / min) oap self observability   Count Per Minute meter_oap_instance_mesh_count Mesh Analysis Count (Per Minute) oap self observability   Count Per Minute meter_oap_instance_mesh_analysis_error_count Mesh Analysis Count (Per Minute) oap self observability   ms meter_oap_instance_trace_latency_percentile Trace Analysis Latency (ms) oap self observability   Count meter_oap_jvm_class_loaded_count Class Count oap self observability   Count meter_oap_jvm_class_total_unloaded_count Class Count oap self observability   Count meter_oap_jvm_class_total_loaded_count Class Count oap self observability   Count meter_oap_instance_persistence_prepare_count Persistence Count (Per 5 Minutes) oap self observability   Count meter_oap_instance_persistence_execute_count Persistence Count (Per 5 Minutes) oap self observability   Count meter_oap_jvm_thread_live_count Thread Count oap self observability   Count meter_oap_jvm_thread_peak_count Thread Count oap self observability   Count meter_oap_jvm_thread_daemon_count Thread Count oap self observability   ms meter_oap_instance_persistence_execute_percentile Persistence Execution Latency Per Metric Type (ms) oap self observability   ms meter_oap_instance_persistence_prepare_percentile Persistence Preparing Latency Per Metric Type (ms) oap self observability   Count meter_oap_jvm_thread_runnable_count Thread State Count oap self observability   Count meter_oap_jvm_thread_timed_waiting_count Thread State Count oap self observability   Count meter_oap_jvm_thread_blocked_count Thread State Count oap self observability   Count meter_oap_jvm_thread_waiting_count Thread State Count oap self observability   Count per minute meter_oap_instance_metrics_aggregation Aggregation (Per Minute) oap self observability   ms meter_oap_instance_mesh_latency_percentile Mesh Analysis Latency (ms) oap self observability   Count per minute meter_oap_instance_trace_count Trace Analysis Count (Per Minute) oap self observability   Count per minute meter_oap_instance_trace_analysis_error_count Trace Analysis Count (Per Minute) oap self observability   Percentage meter_oap_instance_cpu_percentage CPU (%) oap self observability   Count meter_oap_instance_metrics_persistent_cache count of metrics cache hit and no-hit oap self observability    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/fetcher-prom-rules/self.yaml and config/otel-rules/oap.yaml. The self observability dashboard panel configurations are found in /config/ui-initialized-templates/so11y_oap.\n","excerpt":"OAP self observability dashboard SkyWalking itself collects and exports metrics in Prometheus format …","ref":"/docs/main/v9.5.0/en/setup/backend/dashboards-so11y/","title":"OAP self observability dashboard"},{"body":"OAP self observability dashboard SkyWalking itself collects and exports metrics in Prometheus format for consuming, it also provides a dashboard to visualize the self-observability metrics.\nData flow  SkyWalking OAP collects metrics data internally and exposes a Prometheus http endpoint to retrieve the metrics. SkyWalking OAP itself (or OpenTelemetry Collector, prefered in Kubernetes scenarios) fetches metrics from the Prometheus endpoint in step (1). OAP (or OpenTelemetry Collector) pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up Follow OAP Self Observability Telemetry doc to set up OAP and OpenTelemetry Collector.\nSelf observability monitoring Self observability monitoring provides monitoring of the status and resources of the OAP server itself. oap-server is a Service in OAP, and land on the Layer: SO11Y_OAP.\nSelf observability metrics    Unit Metric Name Description Data Source     Count Per Minute meter_oap_instance_jvm_gc_count GC Count oap self observability   MB meter_oap_instance_jvm_memory_bytes_used Memory oap self observability   ms / min meter_oap_instance_jvm_young_gc_time GC Time (ms / min) oap self observability   ms / min meter_oap_instance_jvm_old_gc_time GC Time (ms / min) oap self observability   Count Per Minute meter_oap_instance_mesh_count Mesh Analysis Count (Per Minute) oap self observability   Count Per Minute meter_oap_instance_mesh_analysis_error_count Mesh Analysis Count (Per Minute) oap self observability   ms meter_oap_instance_trace_latency_percentile Trace Analysis Latency (ms) oap self observability   Count meter_oap_jvm_class_loaded_count Class Count oap self observability   Count meter_oap_jvm_class_total_unloaded_count Class Count oap self observability   Count meter_oap_jvm_class_total_loaded_count Class Count oap self observability   Count meter_oap_instance_persistence_prepare_count Persistence Count (Per 5 Minutes) oap self observability   Count meter_oap_instance_persistence_execute_count Persistence Count (Per 5 Minutes) oap self observability   Count meter_oap_jvm_thread_live_count Thread Count oap self observability   Count meter_oap_jvm_thread_peak_count Thread Count oap self observability   Count meter_oap_jvm_thread_daemon_count Thread Count oap self observability   ms meter_oap_instance_persistence_execute_percentile Persistence Execution Latency Per Metric Type (ms) oap self observability   ms meter_oap_instance_persistence_prepare_percentile Persistence Preparing Latency Per Metric Type (ms) oap self observability   Count meter_oap_jvm_thread_runnable_count Thread State Count oap self observability   Count meter_oap_jvm_thread_timed_waiting_count Thread State Count oap self observability   Count meter_oap_jvm_thread_blocked_count Thread State Count oap self observability   Count meter_oap_jvm_thread_waiting_count Thread State Count oap self observability   Count per minute meter_oap_instance_metrics_aggregation Aggregation (Per Minute) oap self observability   ms meter_oap_instance_mesh_latency_percentile Mesh Analysis Latency (ms) oap self observability   Count per minute meter_oap_instance_trace_count Trace Analysis Count (Per Minute) oap self observability   Count per minute meter_oap_instance_trace_analysis_error_count Trace Analysis Count (Per Minute) oap self observability   Percentage meter_oap_instance_cpu_percentage CPU (%) oap self observability   Count meter_oap_instance_metrics_persistent_cache count of metrics cache hit and no-hit oap self observability    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/fetcher-prom-rules/self.yaml and config/otel-rules/oap.yaml. The self observability dashboard panel configurations are found in /config/ui-initialized-templates/so11y_oap.\n","excerpt":"OAP self observability dashboard SkyWalking itself collects and exports metrics in Prometheus format …","ref":"/docs/main/v9.6.0/en/setup/backend/dashboards-so11y/","title":"OAP self observability dashboard"},{"body":"OAP self observability dashboard SkyWalking itself collects and exports metrics in Prometheus format for consuming, it also provides a dashboard to visualize the self-observability metrics.\nData flow  SkyWalking OAP collects metrics data internally and exposes a Prometheus http endpoint to retrieve the metrics. SkyWalking OAP itself (or OpenTelemetry Collector, prefered in Kubernetes scenarios) fetches metrics from the Prometheus endpoint in step (1). OAP (or OpenTelemetry Collector) pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up Follow OAP Self Observability Telemetry doc to set up OAP and OpenTelemetry Collector.\nSelf observability monitoring Self observability monitoring provides monitoring of the status and resources of the OAP server itself. oap-server is a Service in OAP, and land on the Layer: SO11Y_OAP.\nSelf observability metrics    Unit Metric Name Description Data Source     Count Per Minute meter_oap_instance_jvm_gc_count GC Count oap self observability   MB meter_oap_instance_jvm_memory_bytes_used Memory oap self observability   ms / min meter_oap_instance_jvm_young_gc_time GC Time (ms / min) oap self observability   ms / min meter_oap_instance_jvm_old_gc_time GC Time (ms / min) oap self observability   Count Per Minute meter_oap_instance_mesh_count Mesh Analysis Count (Per Minute) oap self observability   Count Per Minute meter_oap_instance_mesh_analysis_error_count Mesh Analysis Count (Per Minute) oap self observability   ms meter_oap_instance_trace_latency_percentile Trace Analysis Latency (ms) oap self observability   Count meter_oap_jvm_class_loaded_count Class Count oap self observability   Count meter_oap_jvm_class_total_unloaded_count Class Count oap self observability   Count meter_oap_jvm_class_total_loaded_count Class Count oap self observability   Count meter_oap_instance_persistence_prepare_count Persistence Count (Per 5 Minutes) oap self observability   Count meter_oap_instance_persistence_execute_count Persistence Count (Per 5 Minutes) oap self observability   Count meter_oap_jvm_thread_live_count Thread Count oap self observability   Count meter_oap_jvm_thread_peak_count Thread Count oap self observability   Count meter_oap_jvm_thread_daemon_count Thread Count oap self observability   ms meter_oap_instance_persistence_execute_percentile Persistence Execution Latency Per Metric Type (ms) oap self observability   ms meter_oap_instance_persistence_prepare_percentile Persistence Preparing Latency Per Metric Type (ms) oap self observability   Count meter_oap_jvm_thread_runnable_count Thread State Count oap self observability   Count meter_oap_jvm_thread_timed_waiting_count Thread State Count oap self observability   Count meter_oap_jvm_thread_blocked_count Thread State Count oap self observability   Count meter_oap_jvm_thread_waiting_count Thread State Count oap self observability   Count per minute meter_oap_instance_metrics_aggregation Aggregation (Per Minute) oap self observability   ms meter_oap_instance_mesh_latency_percentile Mesh Analysis Latency (ms) oap self observability   Count per minute meter_oap_instance_trace_count Trace Analysis Count (Per Minute) oap self observability   Count per minute meter_oap_instance_trace_analysis_error_count Trace Analysis Count (Per Minute) oap self observability   Percentage meter_oap_instance_cpu_percentage CPU (%) oap self observability   Count meter_oap_instance_metrics_persistent_cache count of metrics cache hit and no-hit oap self observability    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/fetcher-prom-rules/self.yaml and config/otel-rules/oap.yaml. The self observability dashboard panel configurations are found in /config/ui-initialized-templates/so11y_oap.\n","excerpt":"OAP self observability dashboard SkyWalking itself collects and exports metrics in Prometheus format …","ref":"/docs/main/v9.7.0/en/setup/backend/dashboards-so11y/","title":"OAP self observability dashboard"},{"body":"OAPSever Configuration Introduction To configure the OAP Sever, we propose two CRDs:\n OAPServerConfig: The CRD holds all static configuration, including environment variable and file configuration. OAPServerDynamicConfig: The CRD holds all dynamic configuration.  Spec of OAPServerConfig    Field Name Description     Version The version of OAP server, the default value is 9.5.0   Env The environment variable of OAP server   File The static file in OAP Server, which contains three fieldsfile.path、file.name and file.data. The file.path plus the file.name is the real file that needs to be replaced in the container image, and the file.data is the final data in the specific file.    Status of OAPServerConfig    Field Name Description     Desired The number of oapserver that need to be configured   Ready The number of oapserver that configured successfully   CreationTime The time the OAPServerConfig was created.   LastUpdateTime The last time this condition was updated.    Demo of OAPServerConfig  When using the file, please don\u0026rsquo;t set the same name\n # static configuration of OAPServerapiVersion:operator.skywalking.apache.org/v1alpha1kind:OAPServerConfigmetadata:name:oapserverconfig-samplenamespace:skywalking-systemspec:# The version of OAPServerversion:9.5.0# The env configuration of OAPServerenv:- name:JAVA_OPTSvalue:-Xmx2048M- name:SW_CLUSTERvalue:kubernetes- name:SW_CLUSTER_K8S_NAMESPACEvalue:skywalking-system# enable the dynamic configuration- name:SW_CONFIGURATIONvalue:k8s-configmap# set the labelselector of the dynamic configuration- name:SW_CLUSTER_K8S_LABELvalue:app=collector,release=skywalking- name:SW_TELEMETRYvalue:prometheus- name:SW_HEALTH_CHECKERvalue:default- name:SKYWALKING_COLLECTOR_UIDvalueFrom:fieldRef:fieldPath:metadata.uid- name:SW_LOG_LAL_FILESvalue:test1- name:SW_LOG_MAL_FILESvalue:test2# The file configuration of OAPServer# we should avoid setting the same file name in the filefile:- name:test1.yamlpath:/skywalking/config/laldata:|rules: - name: example dsl: | filter { text { abortOnFailure false // for test purpose, we want to persist all logs regexp $/(?s)(?\u0026lt;timestamp\u0026gt;\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}.\\d{3}) \\[TID:(?\u0026lt;tid\u0026gt;.+?)] \\[(?\u0026lt;thread\u0026gt;.+?)] (?\u0026lt;level\u0026gt;\\w{4,}) (?\u0026lt;logger\u0026gt;.{1,36}) (?\u0026lt;msg\u0026gt;.+)/$ } extractor { metrics { timestamp log.timestamp as Long labels level: parsed.level, service: log.service, instance: log.serviceInstance name \u0026#34;log_count\u0026#34; value 1 } } sink { } }- name:test2.yamlpath:/skywalking/config/log-mal-rulesdata:|expSuffix: instance([\u0026#39;service\u0026#39;], [\u0026#39;instance\u0026#39;], Layer.GENERAL) metricPrefix: log metricsRules: - name: count_info exp: log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;INFO\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).downsampling(SUM)Spec of OAPServerDynamicConfig    Field Name Description     Version The version of the OAP server, the default value is 9.5.0   LabelSelector The label selector of the specific configmap, the default value is \u0026ldquo;app=collector,release=skywalking\u0026rdquo;   Data All configurations' key and value    Status of OAPServerDynamicConfig    Field Name Description     State The state of dynamic configuration, running or stopped   CreationTime All configurations in one CR, the default value is false   LastUpdateTime The last time this condition was updated    Usage of OAPServerDynamicConfig  Notice, the CR\u0026rsquo;s name cannot contain capital letters.\n Users can split all configurations into several CRs. when using the OAPServerDynamicConfig, users can not only put some configurations in a CR, but also put a configuration in a CR, and the spec.data.name in CR represents one dynamic configuration.\nDemo of Global configuration apiVersion:operator.skywalking.apache.org/v1alpha1kind:OAPServerDynamicConfigmetadata:name:oapserverdynamicconfig-samplespec:# The version of OAPServerversion:9.5.0# The labelselector of OAPServer\u0026#39;s dynamic configuration, it should be the same as labelSelector of OAPServerConfiglabelSelector:app=collector,release=skywalkingdata:- name:agent-analyzer.default.slowDBAccessThresholdvalue:default:200,mongodb:50- name:alarm.default.alarm-settingsvalue:|-rules: # Rule unique name, must be ended with `_rule`. service_resp_time_rule: metrics-name: service_resp_time op: \u0026#34;\u0026gt;\u0026#34; threshold: 1000 period: 10 count: 3 silence-period: 5 message: Response time of service {name} is more than 1000ms in 3 minutes of last 10 minutes. service_sla_rule: # Metrics value need to be long, double or int metrics-name: service_sla op: \u0026#34;\u0026lt;\u0026#34; threshold: 8000 # The length of time to evaluate the metrics period: 10 # How many times after the metrics match the condition, will trigger alarm count: 2 # How many times of checks, the alarm keeps silence after alarm triggered, default as same as period. silence-period: 3 message: Successful rate of service {name} is lower than 80% in 2 minutes of last 10 minutes service_resp_time_percentile_rule: # Metrics value need to be long, double or int metrics-name: service_percentile op: \u0026#34;\u0026gt;\u0026#34; threshold: 1000,1000,1000,1000,1000 period: 10 count: 3 silence-period: 5 message: Percentile response time of service {name} alarm in 3 minutes of last 10 minutes, due to more than one condition of p50 \u0026gt; 1000, p75 \u0026gt; 1000, p90 \u0026gt; 1000, p95 \u0026gt; 1000, p99 \u0026gt; 1000 service_instance_resp_time_rule: metrics-name: service_instance_resp_time op: \u0026#34;\u0026gt;\u0026#34; threshold: 1000 period: 10 count: 2 silence-period: 5 message: Response time of service instance {name} is more than 1000ms in 2 minutes of last 10 minutes database_access_resp_time_rule: metrics-name: database_access_resp_time threshold: 1000 op: \u0026#34;\u0026gt;\u0026#34; period: 10 count: 2 message: Response time of database access {name} is more than 1000ms in 2 minutes of last 10 minutes endpoint_relation_resp_time_rule: metrics-name: endpoint_relation_resp_time threshold: 1000 op: \u0026#34;\u0026gt;\u0026#34; period: 10 count: 2 message: Response time of endpoint relation {name} is more than 1000ms in 2 minutes of last 10 minutes # Active endpoint related metrics alarm will cost more memory than service and service instance metrics alarm. # Because the number of endpoint is much more than service and instance. # # endpoint_resp_time_rule: # metrics-name: endpoint_resp_time # op: \u0026#34;\u0026gt;\u0026#34; # threshold: 1000 # period: 10 # count: 2 # silence-period: 5 # message: Response time of endpoint {name} is more than 1000ms in 2 minutes of last 10 minutes webhooks: # - http://127.0.0.1/notify/ # - http://127.0.0.1/go-wechat/- name:core.default.apdexThresholdvalue:|-default: 500 # example: # the threshold of service \u0026#34;tomcat\u0026#34; is 1s # tomcat: 1000 # the threshold of service \u0026#34;springboot1\u0026#34; is 50ms # springboot1: 50- name:agent-analyzer.default.uninstrumentedGatewaysvalue:|-#gateways: # - name: proxy0 # instances: # - host: 127.0.0.1 # the host/ip of this gateway instance # port: 9099 # the port of this gateway instance, defaults to 80Demo of Single configuration Set the dynamic configuration agent-analyzer.default.slowDBAccessThreshold as follows.\napiVersion:operator.skywalking.apache.org/v1alpha1kind:OAPServerDynamicConfigmetadata:name:agent-analyzer.defaultspec:# The version of OAPServerversion:9.5.0# The labelselector of OAPServer\u0026#39;s dynamic configuration, it should be the same as labelSelector of OAPServerConfiglabelSelector:app=collector,release=skywalkingdata:- name:slowDBAccessThresholdvalue:default:200,mongodb:50Set the dynamic configuration core.default.endpoint-name-grouping-openapi.customerAPI-v1 and core.default.endpoint-name-grouping-openapi.productAPI-v1 as follows.\napiVersion:operator.skywalking.apache.org/v1alpha1kind:OAPServerDynamicConfigmetadata:name:core.default.endpoint-name-grouping-openapispec:# The version of OAPServerversion:9.5.0# The labelselector of OAPServer\u0026#39;s dynamic configuration, it should be the same as labelSelector of OAPServerConfiglabelSelector:app=collector,release=skywalkingdata:- name:customerAPI-v1value:value of customerAPI-v1- name:productAPI-v1value:value of productAPI-v1","excerpt":"OAPSever Configuration Introduction To configure the OAP Sever, we propose two CRDs: …","ref":"/docs/skywalking-swck/latest/oapserver-configuration/","title":"OAPSever Configuration Introduction"},{"body":"OAPSever Configuration Introduction To configure the OAP Sever, we propose two CRDs:\n OAPServerConfig: The CRD holds all static configuration, including environment variable and file configuration. OAPServerDynamicConfig: The CRD holds all dynamic configuration.  Spec of OAPServerConfig    Field Name Description     Version The version of OAP server, the default value is 9.5.0   Env The environment variable of OAP server   File The static file in OAP Server, which contains three fieldsfile.path、file.name and file.data. The file.path plus the file.name is the real file that needs to be replaced in the container image, and the file.data is the final data in the specific file.    Status of OAPServerConfig    Field Name Description     Desired The number of oapserver that need to be configured   Ready The number of oapserver that configured successfully   CreationTime The time the OAPServerConfig was created.   LastUpdateTime The last time this condition was updated.    Demo of OAPServerConfig  When using the file, please don\u0026rsquo;t set the same name\n # static configuration of OAPServerapiVersion:operator.skywalking.apache.org/v1alpha1kind:OAPServerConfigmetadata:name:oapserverconfig-samplenamespace:skywalking-systemspec:# The version of OAPServerversion:9.5.0# The env configuration of OAPServerenv:- name:JAVA_OPTSvalue:-Xmx2048M- name:SW_CLUSTERvalue:kubernetes- name:SW_CLUSTER_K8S_NAMESPACEvalue:skywalking-system# enable the dynamic configuration- name:SW_CONFIGURATIONvalue:k8s-configmap# set the labelselector of the dynamic configuration- name:SW_CLUSTER_K8S_LABELvalue:app=collector,release=skywalking- name:SW_TELEMETRYvalue:prometheus- name:SW_HEALTH_CHECKERvalue:default- name:SKYWALKING_COLLECTOR_UIDvalueFrom:fieldRef:fieldPath:metadata.uid- name:SW_LOG_LAL_FILESvalue:test1- name:SW_LOG_MAL_FILESvalue:test2# The file configuration of OAPServer# we should avoid setting the same file name in the filefile:- name:test1.yamlpath:/skywalking/config/laldata:|rules: - name: example dsl: | filter { text { abortOnFailure false // for test purpose, we want to persist all logs regexp $/(?s)(?\u0026lt;timestamp\u0026gt;\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}.\\d{3}) \\[TID:(?\u0026lt;tid\u0026gt;.+?)] \\[(?\u0026lt;thread\u0026gt;.+?)] (?\u0026lt;level\u0026gt;\\w{4,}) (?\u0026lt;logger\u0026gt;.{1,36}) (?\u0026lt;msg\u0026gt;.+)/$ } extractor { metrics { timestamp log.timestamp as Long labels level: parsed.level, service: log.service, instance: log.serviceInstance name \u0026#34;log_count\u0026#34; value 1 } } sink { } }- name:test2.yamlpath:/skywalking/config/log-mal-rulesdata:|expSuffix: instance([\u0026#39;service\u0026#39;], [\u0026#39;instance\u0026#39;], Layer.GENERAL) metricPrefix: log metricsRules: - name: count_info exp: log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;INFO\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).downsampling(SUM)Spec of OAPServerDynamicConfig    Field Name Description     Version The version of the OAP server, the default value is 9.5.0   LabelSelector The label selector of the specific configmap, the default value is \u0026ldquo;app=collector,release=skywalking\u0026rdquo;   Data All configurations' key and value    Status of OAPServerDynamicConfig    Field Name Description     State The state of dynamic configuration, running or stopped   CreationTime All configurations in one CR, the default value is false   LastUpdateTime The last time this condition was updated    Usage of OAPServerDynamicConfig  Notice, the CR\u0026rsquo;s name cannot contain capital letters.\n Users can split all configurations into several CRs. when using the OAPServerDynamicConfig, users can not only put some configurations in a CR, but also put a configuration in a CR, and the spec.data.name in CR represents one dynamic configuration.\nDemo of Global configuration apiVersion:operator.skywalking.apache.org/v1alpha1kind:OAPServerDynamicConfigmetadata:name:oapserverdynamicconfig-samplespec:# The version of OAPServerversion:9.5.0# The labelselector of OAPServer\u0026#39;s dynamic configuration, it should be the same as labelSelector of OAPServerConfiglabelSelector:app=collector,release=skywalkingdata:- name:agent-analyzer.default.slowDBAccessThresholdvalue:default:200,mongodb:50- name:alarm.default.alarm-settingsvalue:|-rules: # Rule unique name, must be ended with `_rule`. service_resp_time_rule: metrics-name: service_resp_time op: \u0026#34;\u0026gt;\u0026#34; threshold: 1000 period: 10 count: 3 silence-period: 5 message: Response time of service {name} is more than 1000ms in 3 minutes of last 10 minutes. service_sla_rule: # Metrics value need to be long, double or int metrics-name: service_sla op: \u0026#34;\u0026lt;\u0026#34; threshold: 8000 # The length of time to evaluate the metrics period: 10 # How many times after the metrics match the condition, will trigger alarm count: 2 # How many times of checks, the alarm keeps silence after alarm triggered, default as same as period. silence-period: 3 message: Successful rate of service {name} is lower than 80% in 2 minutes of last 10 minutes service_resp_time_percentile_rule: # Metrics value need to be long, double or int metrics-name: service_percentile op: \u0026#34;\u0026gt;\u0026#34; threshold: 1000,1000,1000,1000,1000 period: 10 count: 3 silence-period: 5 message: Percentile response time of service {name} alarm in 3 minutes of last 10 minutes, due to more than one condition of p50 \u0026gt; 1000, p75 \u0026gt; 1000, p90 \u0026gt; 1000, p95 \u0026gt; 1000, p99 \u0026gt; 1000 service_instance_resp_time_rule: metrics-name: service_instance_resp_time op: \u0026#34;\u0026gt;\u0026#34; threshold: 1000 period: 10 count: 2 silence-period: 5 message: Response time of service instance {name} is more than 1000ms in 2 minutes of last 10 minutes database_access_resp_time_rule: metrics-name: database_access_resp_time threshold: 1000 op: \u0026#34;\u0026gt;\u0026#34; period: 10 count: 2 message: Response time of database access {name} is more than 1000ms in 2 minutes of last 10 minutes endpoint_relation_resp_time_rule: metrics-name: endpoint_relation_resp_time threshold: 1000 op: \u0026#34;\u0026gt;\u0026#34; period: 10 count: 2 message: Response time of endpoint relation {name} is more than 1000ms in 2 minutes of last 10 minutes # Active endpoint related metrics alarm will cost more memory than service and service instance metrics alarm. # Because the number of endpoint is much more than service and instance. # # endpoint_resp_time_rule: # metrics-name: endpoint_resp_time # op: \u0026#34;\u0026gt;\u0026#34; # threshold: 1000 # period: 10 # count: 2 # silence-period: 5 # message: Response time of endpoint {name} is more than 1000ms in 2 minutes of last 10 minutes webhooks: # - http://127.0.0.1/notify/ # - http://127.0.0.1/go-wechat/- name:core.default.apdexThresholdvalue:|-default: 500 # example: # the threshold of service \u0026#34;tomcat\u0026#34; is 1s # tomcat: 1000 # the threshold of service \u0026#34;springboot1\u0026#34; is 50ms # springboot1: 50- name:agent-analyzer.default.uninstrumentedGatewaysvalue:|-#gateways: # - name: proxy0 # instances: # - host: 127.0.0.1 # the host/ip of this gateway instance # port: 9099 # the port of this gateway instance, defaults to 80Demo of Single configuration Set the dynamic configuration agent-analyzer.default.slowDBAccessThreshold as follows.\napiVersion:operator.skywalking.apache.org/v1alpha1kind:OAPServerDynamicConfigmetadata:name:agent-analyzer.defaultspec:# The version of OAPServerversion:9.5.0# The labelselector of OAPServer\u0026#39;s dynamic configuration, it should be the same as labelSelector of OAPServerConfiglabelSelector:app=collector,release=skywalkingdata:- name:slowDBAccessThresholdvalue:default:200,mongodb:50Set the dynamic configuration core.default.endpoint-name-grouping-openapi.customerAPI-v1 and core.default.endpoint-name-grouping-openapi.productAPI-v1 as follows.\napiVersion:operator.skywalking.apache.org/v1alpha1kind:OAPServerDynamicConfigmetadata:name:core.default.endpoint-name-grouping-openapispec:# The version of OAPServerversion:9.5.0# The labelselector of OAPServer\u0026#39;s dynamic configuration, it should be the same as labelSelector of OAPServerConfiglabelSelector:app=collector,release=skywalkingdata:- name:customerAPI-v1value:value of customerAPI-v1- name:productAPI-v1value:value of productAPI-v1","excerpt":"OAPSever Configuration Introduction To configure the OAP Sever, we propose two CRDs: …","ref":"/docs/skywalking-swck/next/oapserver-configuration/","title":"OAPSever Configuration Introduction"},{"body":"OAPSever Configuration Introduction To configure the OAP Sever, we propose two CRDs:\n OAPServerConfig: The CRD holds all static configuration, including environment variable and file configuration. OAPServerDynamicConfig: The CRD holds all dynamic configuration.  Spec of OAPServerConfig    Field Name Description     Version The version of OAP server, the default value is 9.5.0   Env The environment variable of OAP server   File The static file in OAP Server, which contains three fieldsfile.path、file.name and file.data. The file.path plus the file.name is the real file that needs to be replaced in the container image, and the file.data is the final data in the specific file.    Status of OAPServerConfig    Field Name Description     Desired The number of oapserver that need to be configured   Ready The number of oapserver that configured successfully   CreationTime The time the OAPServerConfig was created.   LastUpdateTime The last time this condition was updated.    Demo of OAPServerConfig  When using the file, please don\u0026rsquo;t set the same name\n # static configuration of OAPServerapiVersion:operator.skywalking.apache.org/v1alpha1kind:OAPServerConfigmetadata:name:oapserverconfig-samplenamespace:skywalking-systemspec:# The version of OAPServerversion:9.5.0# The env configuration of OAPServerenv:- name:JAVA_OPTSvalue:-Xmx2048M- name:SW_CLUSTERvalue:kubernetes- name:SW_CLUSTER_K8S_NAMESPACEvalue:skywalking-system# enable the dynamic configuration- name:SW_CONFIGURATIONvalue:k8s-configmap# set the labelselector of the dynamic configuration- name:SW_CLUSTER_K8S_LABELvalue:app=collector,release=skywalking- name:SW_TELEMETRYvalue:prometheus- name:SW_HEALTH_CHECKERvalue:default- name:SKYWALKING_COLLECTOR_UIDvalueFrom:fieldRef:fieldPath:metadata.uid- name:SW_LOG_LAL_FILESvalue:test1- name:SW_LOG_MAL_FILESvalue:test2# The file configuration of OAPServer# we should avoid setting the same file name in the filefile:- name:test1.yamlpath:/skywalking/config/laldata:|rules: - name: example dsl: | filter { text { abortOnFailure false // for test purpose, we want to persist all logs regexp $/(?s)(?\u0026lt;timestamp\u0026gt;\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}.\\d{3}) \\[TID:(?\u0026lt;tid\u0026gt;.+?)] \\[(?\u0026lt;thread\u0026gt;.+?)] (?\u0026lt;level\u0026gt;\\w{4,}) (?\u0026lt;logger\u0026gt;.{1,36}) (?\u0026lt;msg\u0026gt;.+)/$ } extractor { metrics { timestamp log.timestamp as Long labels level: parsed.level, service: log.service, instance: log.serviceInstance name \u0026#34;log_count\u0026#34; value 1 } } sink { } }- name:test2.yamlpath:/skywalking/config/log-mal-rulesdata:|expSuffix: instance([\u0026#39;service\u0026#39;], [\u0026#39;instance\u0026#39;], Layer.GENERAL) metricPrefix: log metricsRules: - name: count_info exp: log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;INFO\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).downsampling(SUM)Spec of OAPServerDynamicConfig    Field Name Description     Version The version of the OAP server, the default value is 9.5.0   LabelSelector The label selector of the specific configmap, the default value is \u0026ldquo;app=collector,release=skywalking\u0026rdquo;   Data All configurations' key and value    Status of OAPServerDynamicConfig    Field Name Description     State The state of dynamic configuration, running or stopped   CreationTime All configurations in one CR, the default value is false   LastUpdateTime The last time this condition was updated    Usage of OAPServerDynamicConfig  Notice, the CR\u0026rsquo;s name cannot contain capital letters.\n Users can split all configurations into several CRs. when using the OAPServerDynamicConfig, users can not only put some configurations in a CR, but also put a configuration in a CR, and the spec.data.name in CR represents one dynamic configuration.\nDemo of Global configuration apiVersion:operator.skywalking.apache.org/v1alpha1kind:OAPServerDynamicConfigmetadata:name:oapserverdynamicconfig-samplespec:# The version of OAPServerversion:9.5.0# The labelselector of OAPServer\u0026#39;s dynamic configuration, it should be the same as labelSelector of OAPServerConfiglabelSelector:app=collector,release=skywalkingdata:- name:agent-analyzer.default.slowDBAccessThresholdvalue:default:200,mongodb:50- name:alarm.default.alarm-settingsvalue:|-rules: # Rule unique name, must be ended with `_rule`. service_resp_time_rule: metrics-name: service_resp_time op: \u0026#34;\u0026gt;\u0026#34; threshold: 1000 period: 10 count: 3 silence-period: 5 message: Response time of service {name} is more than 1000ms in 3 minutes of last 10 minutes. service_sla_rule: # Metrics value need to be long, double or int metrics-name: service_sla op: \u0026#34;\u0026lt;\u0026#34; threshold: 8000 # The length of time to evaluate the metrics period: 10 # How many times after the metrics match the condition, will trigger alarm count: 2 # How many times of checks, the alarm keeps silence after alarm triggered, default as same as period. silence-period: 3 message: Successful rate of service {name} is lower than 80% in 2 minutes of last 10 minutes service_resp_time_percentile_rule: # Metrics value need to be long, double or int metrics-name: service_percentile op: \u0026#34;\u0026gt;\u0026#34; threshold: 1000,1000,1000,1000,1000 period: 10 count: 3 silence-period: 5 message: Percentile response time of service {name} alarm in 3 minutes of last 10 minutes, due to more than one condition of p50 \u0026gt; 1000, p75 \u0026gt; 1000, p90 \u0026gt; 1000, p95 \u0026gt; 1000, p99 \u0026gt; 1000 service_instance_resp_time_rule: metrics-name: service_instance_resp_time op: \u0026#34;\u0026gt;\u0026#34; threshold: 1000 period: 10 count: 2 silence-period: 5 message: Response time of service instance {name} is more than 1000ms in 2 minutes of last 10 minutes database_access_resp_time_rule: metrics-name: database_access_resp_time threshold: 1000 op: \u0026#34;\u0026gt;\u0026#34; period: 10 count: 2 message: Response time of database access {name} is more than 1000ms in 2 minutes of last 10 minutes endpoint_relation_resp_time_rule: metrics-name: endpoint_relation_resp_time threshold: 1000 op: \u0026#34;\u0026gt;\u0026#34; period: 10 count: 2 message: Response time of endpoint relation {name} is more than 1000ms in 2 minutes of last 10 minutes # Active endpoint related metrics alarm will cost more memory than service and service instance metrics alarm. # Because the number of endpoint is much more than service and instance. # # endpoint_resp_time_rule: # metrics-name: endpoint_resp_time # op: \u0026#34;\u0026gt;\u0026#34; # threshold: 1000 # period: 10 # count: 2 # silence-period: 5 # message: Response time of endpoint {name} is more than 1000ms in 2 minutes of last 10 minutes webhooks: # - http://127.0.0.1/notify/ # - http://127.0.0.1/go-wechat/- name:core.default.apdexThresholdvalue:|-default: 500 # example: # the threshold of service \u0026#34;tomcat\u0026#34; is 1s # tomcat: 1000 # the threshold of service \u0026#34;springboot1\u0026#34; is 50ms # springboot1: 50- name:agent-analyzer.default.uninstrumentedGatewaysvalue:|-#gateways: # - name: proxy0 # instances: # - host: 127.0.0.1 # the host/ip of this gateway instance # port: 9099 # the port of this gateway instance, defaults to 80Demo of Single configuration Set the dynamic configuration agent-analyzer.default.slowDBAccessThreshold as follows.\napiVersion:operator.skywalking.apache.org/v1alpha1kind:OAPServerDynamicConfigmetadata:name:agent-analyzer.defaultspec:# The version of OAPServerversion:9.5.0# The labelselector of OAPServer\u0026#39;s dynamic configuration, it should be the same as labelSelector of OAPServerConfiglabelSelector:app=collector,release=skywalkingdata:- name:slowDBAccessThresholdvalue:default:200,mongodb:50Set the dynamic configuration core.default.endpoint-name-grouping-openapi.customerAPI-v1 and core.default.endpoint-name-grouping-openapi.productAPI-v1 as follows.\napiVersion:operator.skywalking.apache.org/v1alpha1kind:OAPServerDynamicConfigmetadata:name:core.default.endpoint-name-grouping-openapispec:# The version of OAPServerversion:9.5.0# The labelselector of OAPServer\u0026#39;s dynamic configuration, it should be the same as labelSelector of OAPServerConfiglabelSelector:app=collector,release=skywalkingdata:- name:customerAPI-v1value:value of customerAPI-v1- name:productAPI-v1value:value of productAPI-v1","excerpt":"OAPSever Configuration Introduction To configure the OAP Sever, we propose two CRDs: …","ref":"/docs/skywalking-swck/v0.9.0/oapserver-configuration/","title":"OAPSever Configuration Introduction"},{"body":"Observability This document outlines the observability features of BanyanDB, which include metrics, profiling, and tracing. These features help monitor and understand the performance, behavior, and overall health of BanyanDB.\nMetrics BanyanDB has built-in support for metrics collection through the use of build tags. The metrics provider can be enabled by specifying the build tag during the compilation process.\nCurrently, there is only one supported metrics provider: Prometheus. To use Prometheus as the metrics client, include the prometheus build tag when building BanyanDB:\nBUILD_TAGS=prometheus make -C banyand banyand-server\nIf no build tag is specified, the metrics server will not be started, and no metrics will be collected:\nmake -C banyand banyand-server\nWhen the Prometheus metrics provider is enabled, the metrics server listens on port 2121. This allows Prometheus to scrape metrics data from BanyanDB for monitoring and analysis.\nThe Docker image is tagged as \u0026ldquo;prometheus\u0026rdquo; to facilitate cloud-native operations and simplify deployment on Kubernetes. This allows users to directly deploy the Docker image onto their Kubernetes cluster without having to rebuild it with the \u0026ldquo;prometheus\u0026rdquo; tag.\nProfiling Banyand, the server of BanyanDB, supports profiling automatically. The profiling data is collected by the pprof package and can be accessed through the /debug/pprof endpoint. The port of the profiling server is 2122 by default.\nTracing TODO: Add details about the tracing support in BanyanDB, such as how to enable tracing, available tracing tools, and how to analyze tracing data.\n","excerpt":"Observability This document outlines the observability features of BanyanDB, which include metrics, …","ref":"/docs/skywalking-banyandb/latest/observability/","title":"Observability"},{"body":"Observability This document outlines the observability features of BanyanDB, which include metrics, profiling, and tracing. These features help monitor and understand the performance, behavior, and overall health of BanyanDB.\nMetrics BanyanDB has built-in support for metrics collection through the use of build tags. The metrics provider can be enabled by specifying the build tag during the compilation process.\nCurrently, there is only one supported metrics provider: Prometheus. To use Prometheus as the metrics client, include the prometheus build tag when building BanyanDB:\nBUILD_TAGS=prometheus make -C banyand banyand-server\nIf no build tag is specified, the metrics server will not be started, and no metrics will be collected:\nmake -C banyand banyand-server\nWhen the Prometheus metrics provider is enabled, the metrics server listens on port 2121. This allows Prometheus to scrape metrics data from BanyanDB for monitoring and analysis.\nThe Docker image is tagged as \u0026ldquo;prometheus\u0026rdquo; to facilitate cloud-native operations and simplify deployment on Kubernetes. This allows users to directly deploy the Docker image onto their Kubernetes cluster without having to rebuild it with the \u0026ldquo;prometheus\u0026rdquo; tag.\nProfiling Banyand, the server of BanyanDB, supports profiling automatically. The profiling data is collected by the pprof package and can be accessed through the /debug/pprof endpoint. The port of the profiling server is 2122 by default.\nTracing TODO: Add details about the tracing support in BanyanDB, such as how to enable tracing, available tracing tools, and how to analyze tracing data.\n","excerpt":"Observability This document outlines the observability features of BanyanDB, which include metrics, …","ref":"/docs/skywalking-banyandb/next/observability/","title":"Observability"},{"body":"Observability This document outlines the observability features of BanyanDB, which include metrics, profiling, and tracing. These features help monitor and understand the performance, behavior, and overall health of BanyanDB.\nMetrics BanyanDB has built-in support for metrics collection through the use of build tags. The metrics provider can be enabled by specifying the build tag during the compilation process.\nCurrently, there is only one supported metrics provider: Prometheus. To use Prometheus as the metrics client, include the prometheus build tag when building BanyanDB:\nBUILD_TAGS=prometheus make -C banyand banyand-server\nIf no build tag is specified, the metrics server will not be started, and no metrics will be collected:\nmake -C banyand banyand-server\nWhen the Prometheus metrics provider is enabled, the metrics server listens on port 2121. This allows Prometheus to scrape metrics data from BanyanDB for monitoring and analysis.\nThe Docker image is tagged as \u0026ldquo;prometheus\u0026rdquo; to facilitate cloud-native operations and simplify deployment on Kubernetes. This allows users to directly deploy the Docker image onto their Kubernetes cluster without having to rebuild it with the \u0026ldquo;prometheus\u0026rdquo; tag.\nProfiling Banyand, the server of BanyanDB, supports profiling automatically. The profiling data is collected by the pprof package and can be accessed through the /debug/pprof endpoint. The port of the profiling server is 2122 by default.\nTracing TODO: Add details about the tracing support in BanyanDB, such as how to enable tracing, available tracing tools, and how to analyze tracing data.\n","excerpt":"Observability This document outlines the observability features of BanyanDB, which include metrics, …","ref":"/docs/skywalking-banyandb/v0.5.0/observability/","title":"Observability"},{"body":"Observability Analysis Language OAL(Observability Analysis Language) serves to analyze incoming data in streaming mode.\nOAL focuses on metrics in Service, Service Instance and Endpoint. Therefore, the language is easy to learn and use.\nSince 6.3, the OAL engine is embedded in OAP server runtime as oal-rt(OAL Runtime). OAL scripts are now found in the /config folder, and users could simply change and reboot the server to run them. However, the OAL script is a compiled language, and the OAL Runtime generates java codes dynamically.\nYou can open set SW_OAL_ENGINE_DEBUG=Y at system env to see which classes are generated.\nGrammar Scripts should be named *.oal\n// Declare the metrics. METRICS_NAME = from(CAST SCOPE.(* | [FIELD][,FIELD ...])) [.filter(CAST FIELD OP [INT | STRING])] .FUNCTION([PARAM][, PARAM ...]) // Disable hard code disable(METRICS_NAME); From The from statement defines the data source of this OAL expression.\nPrimary SCOPEs are Service, ServiceInstance, Endpoint, ServiceRelation, ServiceInstanceRelation, and EndpointRelation. There are also some secondary scopes which belong to a primary scope.\nSee Scope Definitions, where you can find all existing Scopes and Fields.\nFilter Use filter to build conditions for the value of fields by using field name and expression.\nThe expressions support linking by and, or and (...). The OPs support ==, !=, \u0026gt;, \u0026lt;, \u0026gt;=, \u0026lt;=, in [...] ,like %..., like ...% , like %...% , contain and not contain, with type detection based on field type. In the event of incompatibility, compile or code generation errors may be triggered.\nAggregation Function The default functions are provided by the SkyWalking OAP core, and it is possible to implement additional functions.\nFunctions provided\n longAvg. The avg of all input per scope entity. The input field must be a long.   instance_jvm_memory_max = from(ServiceInstanceJVMMemory.max).longAvg();\n In this case, the input represents the request of each ServiceInstanceJVMMemory scope, and avg is based on field max.\n doubleAvg. The avg of all input per scope entity. The input field must be a double.   instance_jvm_cpu = from(ServiceInstanceJVMCPU.usePercent).doubleAvg();\n In this case, the input represents the request of each ServiceInstanceJVMCPU scope, and avg is based on field usePercent.\n percent. The number or ratio is expressed as a fraction of 100, where the input matches with the condition.   endpoint_percent = from(Endpoint.*).percent(status == true);\n In this case, all input represents requests of each endpoint, and the condition is endpoint.status == true.\n rate. The rate expressed is as a fraction of 100, where the input matches with the condition.   browser_app_error_rate = from(BrowserAppTraffic.*).rate(trafficCategory == BrowserAppTrafficCategory.FIRST_ERROR, trafficCategory == BrowserAppTrafficCategory.NORMAL);\n In this case, all input represents requests of each browser app traffic, the numerator condition is trafficCategory == BrowserAppTrafficCategory.FIRST_ERROR and denominator condition is trafficCategory == BrowserAppTrafficCategory.NORMAL. Parameter (1) is the numerator condition. Parameter (2) is the denominator condition.\n count. The sum of calls per scope entity.   service_calls_sum = from(Service.*).count();\n In this case, the number of calls of each service.\n histogram. See Heatmap in WIKI.   service_heatmap = from(Service.latency).histogram(100, 20);\n In this case, the thermodynamic heatmap of all incoming requests. Parameter (1) is the precision of latency calculation, such as in the above case, where 113ms and 193ms are considered the same in the 101-200ms group. Parameter (2) is the group amount. In the above case, 21(param value + 1) groups are 0-100ms, 101-200ms, \u0026hellip; 1901-2000ms, 2000+ms\n apdex. See Apdex in WIKI.   service_apdex = from(Service.latency).apdex(name, status);\n In this case, the apdex score of each service. Parameter (1) is the service name, which reflects the Apdex threshold value loaded from service-apdex-threshold.yml in the config folder. Parameter (2) is the status of this request. The status(success/failure) reflects the Apdex calculation.\n p99, p95, p90, p75, p50. See percentile in WIKI.   service_percentile = from(Service.latency).percentile(10);\n percentile is the first multiple-value metric, which has been introduced since 7.0.0. As a metric with multiple values, it could be queried through the getMultipleLinearIntValues GraphQL query. In this case, see p99, p95, p90, p75, and p50 of all incoming requests. The parameter is precise to a latency at p99, such as in the above case, and 120ms and 124ms are considered to produce the same response time. Before 7.0.0, p99, p95, p90, p75, p50 func(s) are used to calculate metrics separately. They are still supported in 7.x, but they are no longer recommended and are not included in the current official OAL script.\n service_p99 = from(Service.latency).p99(10);\n In this case, the p99 value of all incoming requests. The parameter is precise to a latency at p99, such as in the above case, and 120ms and 124ms are considered to produce the same response time.\nMetrics name The metrics name for storage implementor, alarm and query modules. The type inference is supported by core.\nGroup All metrics data will be grouped by Scope.ID and min-level TimeBucket.\n In the Endpoint scope, the Scope.ID is same as the Endpoint ID (i.e. the unique ID based on service and its endpoint).  Cast Fields of source are static type. In some cases, the type required by the filter expression and aggregation function doesn\u0026rsquo;t match the type in the source, such as tag value in the source is String type, most aggregation calculation requires numeric.\nCast expression is provided to do so.\n (str-\u0026gt;long) or (long), cast string type into long. (str-\u0026gt;int) or (int), cast string type into int.  mq_consume_latency = from((str-\u0026gt;long)Service.tag[\u0026quot;transmission.latency\u0026quot;]).longAvg(); // the value of tag is string type. Cast statement is supported in\n From statement. from((cast)source.attre). Filter expression. .filter((cast)tag[\u0026quot;transmission.latency\u0026quot;] \u0026gt; 0) Aggregation function parameter. .longAvg((cast)strField1== 1, (cast)strField2)  Disable Disable is an advanced statement in OAL, which is only used in certain cases. Some of the aggregation and metrics are defined through core hard codes. Examples include segment and top_n_database_statement. This disable statement is designed to render them inactive. By default, none of them are disabled.\nNOTICE, all disable statements should be in oal/disable.oal script file.\nExamples // Calculate p99 of both Endpoint1 and Endpoint2 endpoint_p99 = from(Endpoint.latency).filter(name in (\u0026quot;Endpoint1\u0026quot;, \u0026quot;Endpoint2\u0026quot;)).summary(0.99) // Calculate p99 of Endpoint name started with `serv` serv_Endpoint_p99 = from(Endpoint.latency).filter(name like \u0026quot;serv%\u0026quot;).summary(0.99) // Calculate the avg response time of each Endpoint endpoint_resp_time = from(Endpoint.latency).avg() // Calculate the p50, p75, p90, p95 and p99 of each Endpoint by 50 ms steps. endpoint_percentile = from(Endpoint.latency).percentile(10) // Calculate the percent of response status is true, for each service. endpoint_success = from(Endpoint.*).filter(status == true).percent() // Calculate the sum of response code in [404, 500, 503], for each service. endpoint_abnormal = from(Endpoint.*).filter(responseCode in [404, 500, 503]).count() // Calculate the sum of request type in [RequestType.RPC, RequestType.gRPC], for each service. endpoint_rpc_calls_sum = from(Endpoint.*).filter(type in [RequestType.RPC, RequestType.gRPC]).count() // Calculate the sum of endpoint name in [\u0026quot;/v1\u0026quot;, \u0026quot;/v2\u0026quot;], for each service. endpoint_url_sum = from(Endpoint.*).filter(name in [\u0026quot;/v1\u0026quot;, \u0026quot;/v2\u0026quot;]).count() // Calculate the sum of calls for each service. endpoint_calls = from(Endpoint.*).count() // Calculate the CPM with the GET method for each service.The value is made up with `tagKey:tagValue`. // Option 1, use `tags contain`. service_cpm_http_get = from(Service.*).filter(tags contain \u0026quot;http.method:GET\u0026quot;).cpm() // Option 2, use `tag[key]`. service_cpm_http_get = from(Service.*).filter(tag[\u0026quot;http.method\u0026quot;] == \u0026quot;GET\u0026quot;).cpm(); // Calculate the CPM with the HTTP method except for the GET method for each service.The value is made up with `tagKey:tagValue`. service_cpm_http_other = from(Service.*).filter(tags not contain \u0026quot;http.method:GET\u0026quot;).cpm() disable(segment); disable(endpoint_relation_server_side); disable(top_n_database_statement); ","excerpt":"Observability Analysis Language OAL(Observability Analysis Language) serves to analyze incoming data …","ref":"/docs/main/v9.0.0/en/concepts-and-designs/oal/","title":"Observability Analysis Language"},{"body":"Observability Analysis Language OAL(Observability Analysis Language) serves to analyze incoming data in streaming mode.\nOAL focuses on metrics in Service, Service Instance and Endpoint. Therefore, the language is easy to learn and use.\nSince 6.3, the OAL engine is embedded in OAP server runtime as oal-rt(OAL Runtime). OAL scripts are now found in the /config folder, and users could simply change and reboot the server to run them. However, the OAL script is a compiled language, and the OAL Runtime generates java codes dynamically.\nYou can open set SW_OAL_ENGINE_DEBUG=Y at system env to see which classes are generated.\nGrammar Scripts should be named *.oal\n// Declare the metrics. METRICS_NAME = from(CAST SCOPE.(* | [FIELD][,FIELD ...])) [.filter(CAST FIELD OP [INT | STRING])] .FUNCTION([PARAM][, PARAM ...]) // Disable hard code disable(METRICS_NAME); From The from statement defines the data source of this OAL expression.\nPrimary SCOPEs are Service, ServiceInstance, Endpoint, ServiceRelation, ServiceInstanceRelation, and EndpointRelation. There are also some secondary scopes which belong to a primary scope.\nSee Scope Definitions, where you can find all existing Scopes and Fields.\nFilter Use filter to build conditions for the value of fields by using field name and expression.\nThe expressions support linking by and, or and (...). The OPs support ==, !=, \u0026gt;, \u0026lt;, \u0026gt;=, \u0026lt;=, in [...] ,like %..., like ...% , like %...% , contain and not contain, with type detection based on field type. In the event of incompatibility, compile or code generation errors may be triggered.\nAggregation Function The default functions are provided by the SkyWalking OAP core, and it is possible to implement additional functions.\nFunctions provided\n longAvg. The avg of all input per scope entity. The input field must be a long.   instance_jvm_memory_max = from(ServiceInstanceJVMMemory.max).longAvg();\n In this case, the input represents the request of each ServiceInstanceJVMMemory scope, and avg is based on field max.\n doubleAvg. The avg of all input per scope entity. The input field must be a double.   instance_jvm_cpu = from(ServiceInstanceJVMCPU.usePercent).doubleAvg();\n In this case, the input represents the request of each ServiceInstanceJVMCPU scope, and avg is based on field usePercent.\n percent. The number or ratio is expressed as a fraction of 100, where the input matches with the condition.   endpoint_percent = from(Endpoint.*).percent(status == true);\n In this case, all input represents requests of each endpoint, and the condition is endpoint.status == true.\n rate. The rate expressed is as a fraction of 100, where the input matches with the condition.   browser_app_error_rate = from(BrowserAppTraffic.*).rate(trafficCategory == BrowserAppTrafficCategory.FIRST_ERROR, trafficCategory == BrowserAppTrafficCategory.NORMAL);\n In this case, all input represents requests of each browser app traffic, the numerator condition is trafficCategory == BrowserAppTrafficCategory.FIRST_ERROR and denominator condition is trafficCategory == BrowserAppTrafficCategory.NORMAL. Parameter (1) is the numerator condition. Parameter (2) is the denominator condition.\n count. The sum of calls per scope entity.   service_calls_sum = from(Service.*).count();\n In this case, the number of calls of each service.\n histogram. See Heatmap in WIKI.   service_heatmap = from(Service.latency).histogram(100, 20);\n In this case, the thermodynamic heatmap of all incoming requests. Parameter (1) is the precision of latency calculation, such as in the above case, where 113ms and 193ms are considered the same in the 101-200ms group. Parameter (2) is the group amount. In the above case, 21(param value + 1) groups are 0-100ms, 101-200ms, \u0026hellip; 1901-2000ms, 2000+ms\n apdex. See Apdex in WIKI.   service_apdex = from(Service.latency).apdex(name, status);\n In this case, the apdex score of each service. Parameter (1) is the service name, which reflects the Apdex threshold value loaded from service-apdex-threshold.yml in the config folder. Parameter (2) is the status of this request. The status(success/failure) reflects the Apdex calculation.\n p99, p95, p90, p75, p50. See percentile in WIKI.   service_percentile = from(Service.latency).percentile(10);\n percentile is the first multiple-value metric, which has been introduced since 7.0.0. As a metric with multiple values, it could be queried through the getMultipleLinearIntValues GraphQL query. In this case, see p99, p95, p90, p75, and p50 of all incoming requests. The parameter is precise to a latency at p99, such as in the above case, and 120ms and 124ms are considered to produce the same response time. Before 7.0.0, p99, p95, p90, p75, p50 func(s) are used to calculate metrics separately. They are still supported in 7.x, but they are no longer recommended and are not included in the current official OAL script.\n service_p99 = from(Service.latency).p99(10);\n In this case, the p99 value of all incoming requests. The parameter is precise to a latency at p99, such as in the above case, and 120ms and 124ms are considered to produce the same response time.\nMetrics name The metrics name for storage implementor, alarm and query modules. The type inference is supported by core.\nGroup All metrics data will be grouped by Scope.ID and min-level TimeBucket.\n In the Endpoint scope, the Scope.ID is same as the Endpoint ID (i.e. the unique ID based on service and its endpoint).  Cast Fields of source are static type. In some cases, the type required by the filter expression and aggregation function doesn\u0026rsquo;t match the type in the source, such as tag value in the source is String type, most aggregation calculation requires numeric.\nCast expression is provided to do so.\n (str-\u0026gt;long) or (long), cast string type into long. (str-\u0026gt;int) or (int), cast string type into int.  mq_consume_latency = from((str-\u0026gt;long)Service.tag[\u0026quot;transmission.latency\u0026quot;]).longAvg(); // the value of tag is string type. Cast statement is supported in\n From statement. from((cast)source.attre). Filter expression. .filter((cast)tag[\u0026quot;transmission.latency\u0026quot;] \u0026gt; 0) Aggregation function parameter. .longAvg((cast)strField1== 1, (cast)strField2)  Disable Disable is an advanced statement in OAL, which is only used in certain cases. Some of the aggregation and metrics are defined through core hard codes. Examples include segment and top_n_database_statement. This disable statement is designed to render them inactive. By default, none of them are disabled.\nNOTICE, all disable statements should be in oal/disable.oal script file.\nExamples // Calculate p99 of both Endpoint1 and Endpoint2 endpoint_p99 = from(Endpoint.latency).filter(name in (\u0026quot;Endpoint1\u0026quot;, \u0026quot;Endpoint2\u0026quot;)).summary(0.99) // Calculate p99 of Endpoint name started with `serv` serv_Endpoint_p99 = from(Endpoint.latency).filter(name like \u0026quot;serv%\u0026quot;).summary(0.99) // Calculate the avg response time of each Endpoint endpoint_resp_time = from(Endpoint.latency).avg() // Calculate the p50, p75, p90, p95 and p99 of each Endpoint by 50 ms steps. endpoint_percentile = from(Endpoint.latency).percentile(10) // Calculate the percent of response status is true, for each service. endpoint_success = from(Endpoint.*).filter(status == true).percent() // Calculate the sum of response code in [404, 500, 503], for each service. endpoint_abnormal = from(Endpoint.*).filter(responseCode in [404, 500, 503]).count() // Calculate the sum of request type in [RequestType.RPC, RequestType.gRPC], for each service. endpoint_rpc_calls_sum = from(Endpoint.*).filter(type in [RequestType.RPC, RequestType.gRPC]).count() // Calculate the sum of endpoint name in [\u0026quot;/v1\u0026quot;, \u0026quot;/v2\u0026quot;], for each service. endpoint_url_sum = from(Endpoint.*).filter(name in [\u0026quot;/v1\u0026quot;, \u0026quot;/v2\u0026quot;]).count() // Calculate the sum of calls for each service. endpoint_calls = from(Endpoint.*).count() // Calculate the CPM with the GET method for each service.The value is made up with `tagKey:tagValue`. // Option 1, use `tags contain`. service_cpm_http_get = from(Service.*).filter(tags contain \u0026quot;http.method:GET\u0026quot;).cpm() // Option 2, use `tag[key]`. service_cpm_http_get = from(Service.*).filter(tag[\u0026quot;http.method\u0026quot;] == \u0026quot;GET\u0026quot;).cpm(); // Calculate the CPM with the HTTP method except for the GET method for each service.The value is made up with `tagKey:tagValue`. service_cpm_http_other = from(Service.*).filter(tags not contain \u0026quot;http.method:GET\u0026quot;).cpm() disable(segment); disable(endpoint_relation_server_side); disable(top_n_database_statement); ","excerpt":"Observability Analysis Language OAL(Observability Analysis Language) serves to analyze incoming data …","ref":"/docs/main/v9.1.0/en/concepts-and-designs/oal/","title":"Observability Analysis Language"},{"body":"Observability Analysis Language OAL(Observability Analysis Language) serves to analyze incoming data in streaming mode.\nOAL focuses on metrics in Service, Service Instance and Endpoint. Therefore, the language is easy to learn and use.\nSince 6.3, the OAL engine is embedded in OAP server runtime as oal-rt(OAL Runtime). OAL scripts are now found in the /config folder, and users could simply change and reboot the server to run them. However, the OAL script is a compiled language, and the OAL Runtime generates java codes dynamically.\nYou can open set SW_OAL_ENGINE_DEBUG=Y at system env to see which classes are generated.\nGrammar Scripts should be named *.oal\n// Declare the metrics. METRICS_NAME = from(CAST SCOPE.(* | [FIELD][,FIELD ...])) [.filter(CAST FIELD OP [INT | STRING])] .FUNCTION([PARAM][, PARAM ...]) // Disable hard code disable(METRICS_NAME); From The from statement defines the data source of this OAL expression.\nPrimary SCOPEs are Service, ServiceInstance, Endpoint, ServiceRelation, ServiceInstanceRelation, and EndpointRelation. There are also some secondary scopes which belong to a primary scope.\nSee Scope Definitions, where you can find all existing Scopes and Fields.\nFilter Use filter to build conditions for the value of fields by using field name and expression.\nThe expressions support linking by and, or and (...). The OPs support ==, !=, \u0026gt;, \u0026lt;, \u0026gt;=, \u0026lt;=, in [...] ,like %..., like ...% , like %...% , contain and not contain, with type detection based on field type. In the event of incompatibility, compile or code generation errors may be triggered.\nAggregation Function The default functions are provided by the SkyWalking OAP core, and it is possible to implement additional functions.\nFunctions provided\n longAvg. The avg of all input per scope entity. The input field must be a long.   instance_jvm_memory_max = from(ServiceInstanceJVMMemory.max).longAvg();\n In this case, the input represents the request of each ServiceInstanceJVMMemory scope, and avg is based on field max.\n doubleAvg. The avg of all input per scope entity. The input field must be a double.   instance_jvm_cpu = from(ServiceInstanceJVMCPU.usePercent).doubleAvg();\n In this case, the input represents the request of each ServiceInstanceJVMCPU scope, and avg is based on field usePercent.\n percent. The number or ratio is expressed as a fraction of 100, where the input matches with the condition.   endpoint_percent = from(Endpoint.*).percent(status == true);\n In this case, all input represents requests of each endpoint, and the condition is endpoint.status == true.\n rate. The rate expressed is as a fraction of 100, where the input matches with the condition.   browser_app_error_rate = from(BrowserAppTraffic.*).rate(trafficCategory == BrowserAppTrafficCategory.FIRST_ERROR, trafficCategory == BrowserAppTrafficCategory.NORMAL);\n In this case, all input represents requests of each browser app traffic, the numerator condition is trafficCategory == BrowserAppTrafficCategory.FIRST_ERROR and denominator condition is trafficCategory == BrowserAppTrafficCategory.NORMAL. Parameter (1) is the numerator condition. Parameter (2) is the denominator condition.\n count. The sum of calls per scope entity.   service_calls_sum = from(Service.*).count();\n In this case, the number of calls of each service.\n histogram. See Heatmap in WIKI.   service_heatmap = from(Service.latency).histogram(100, 20);\n In this case, the thermodynamic heatmap of all incoming requests. Parameter (1) is the precision of latency calculation, such as in the above case, where 113ms and 193ms are considered the same in the 101-200ms group. Parameter (2) is the group amount. In the above case, 21(param value + 1) groups are 0-100ms, 101-200ms, \u0026hellip; 1901-2000ms, 2000+ms\n apdex. See Apdex in WIKI.   service_apdex = from(Service.latency).apdex(name, status);\n In this case, the apdex score of each service. Parameter (1) is the service name, which reflects the Apdex threshold value loaded from service-apdex-threshold.yml in the config folder. Parameter (2) is the status of this request. The status(success/failure) reflects the Apdex calculation.\n p99, p95, p90, p75, p50. See percentile in WIKI.   service_percentile = from(Service.latency).percentile(10);\n percentile is the first multiple-value metric, which has been introduced since 7.0.0. As a metric with multiple values, it could be queried through the getMultipleLinearIntValues GraphQL query. In this case, see p99, p95, p90, p75, and p50 of all incoming requests. The parameter is precise to a latency at p99, such as in the above case, and 120ms and 124ms are considered to produce the same response time.\nIn this case, the p99 value of all incoming requests. The parameter is precise to a latency at p99, such as in the above case, and 120ms and 124ms are considered to produce the same response time.\nMetrics name The metrics name for storage implementor, alarm and query modules. The type inference is supported by core.\nGroup All metrics data will be grouped by Scope.ID and min-level TimeBucket.\n In the Endpoint scope, the Scope.ID is same as the Endpoint ID (i.e. the unique ID based on service and its endpoint).  Cast Fields of source are static type. In some cases, the type required by the filter expression and aggregation function doesn\u0026rsquo;t match the type in the source, such as tag value in the source is String type, most aggregation calculation requires numeric.\nCast expression is provided to do so.\n (str-\u0026gt;long) or (long), cast string type into long. (str-\u0026gt;int) or (int), cast string type into int.  mq_consume_latency = from((str-\u0026gt;long)Service.tag[\u0026quot;transmission.latency\u0026quot;]).longAvg(); // the value of tag is string type. Cast statement is supported in\n From statement. from((cast)source.attre). Filter expression. .filter((cast)tag[\u0026quot;transmission.latency\u0026quot;] \u0026gt; 0) Aggregation function parameter. .longAvg((cast)strField1== 1, (cast)strField2)  Disable Disable is an advanced statement in OAL, which is only used in certain cases. Some of the aggregation and metrics are defined through core hard codes. Examples include segment and top_n_database_statement. This disable statement is designed to render them inactive. By default, none of them are disabled.\nNOTICE, all disable statements should be in oal/disable.oal script file.\nExamples // Calculate p99 of both Endpoint1 and Endpoint2 endpoint_p99 = from(Endpoint.latency).filter(name in (\u0026quot;Endpoint1\u0026quot;, \u0026quot;Endpoint2\u0026quot;)).summary(0.99) // Calculate p99 of Endpoint name started with `serv` serv_Endpoint_p99 = from(Endpoint.latency).filter(name like \u0026quot;serv%\u0026quot;).summary(0.99) // Calculate the avg response time of each Endpoint endpoint_resp_time = from(Endpoint.latency).avg() // Calculate the p50, p75, p90, p95 and p99 of each Endpoint by 50 ms steps. endpoint_percentile = from(Endpoint.latency).percentile(10) // Calculate the percent of response status is true, for each service. endpoint_success = from(Endpoint.*).filter(status == true).percent() // Calculate the sum of response code in [404, 500, 503], for each service. endpoint_abnormal = from(Endpoint.*).filter(responseCode in [404, 500, 503]).count() // Calculate the sum of request type in [RequestType.RPC, RequestType.gRPC], for each service. endpoint_rpc_calls_sum = from(Endpoint.*).filter(type in [RequestType.RPC, RequestType.gRPC]).count() // Calculate the sum of endpoint name in [\u0026quot;/v1\u0026quot;, \u0026quot;/v2\u0026quot;], for each service. endpoint_url_sum = from(Endpoint.*).filter(name in [\u0026quot;/v1\u0026quot;, \u0026quot;/v2\u0026quot;]).count() // Calculate the sum of calls for each service. endpoint_calls = from(Endpoint.*).count() // Calculate the CPM with the GET method for each service.The value is made up with `tagKey:tagValue`. // Option 1, use `tags contain`. service_cpm_http_get = from(Service.*).filter(tags contain \u0026quot;http.method:GET\u0026quot;).cpm() // Option 2, use `tag[key]`. service_cpm_http_get = from(Service.*).filter(tag[\u0026quot;http.method\u0026quot;] == \u0026quot;GET\u0026quot;).cpm(); // Calculate the CPM with the HTTP method except for the GET method for each service.The value is made up with `tagKey:tagValue`. service_cpm_http_other = from(Service.*).filter(tags not contain \u0026quot;http.method:GET\u0026quot;).cpm() disable(segment); disable(endpoint_relation_server_side); disable(top_n_database_statement); ","excerpt":"Observability Analysis Language OAL(Observability Analysis Language) serves to analyze incoming data …","ref":"/docs/main/v9.2.0/en/concepts-and-designs/oal/","title":"Observability Analysis Language"},{"body":"Observability Analysis Language OAL(Observability Analysis Language) serves to analyze incoming data in streaming mode.\nOAL focuses on metrics in Service, Service Instance and Endpoint. Therefore, the language is easy to learn and use.\nSince 6.3, the OAL engine is embedded in OAP server runtime as oal-rt(OAL Runtime). OAL scripts are now found in the /config folder, and users could simply change and reboot the server to run them. However, the OAL script is a compiled language, and the OAL Runtime generates java codes dynamically.\nYou can open set SW_OAL_ENGINE_DEBUG=Y at system env to see which classes are generated.\nGrammar Scripts should be named *.oal\n// Declare the metrics. METRICS_NAME = from(CAST SCOPE.(* | [FIELD][,FIELD ...])) [.filter(CAST FIELD OP [INT | STRING])] .FUNCTION([PARAM][, PARAM ...]) // Disable hard code disable(METRICS_NAME); From The from statement defines the data source of this OAL expression.\nPrimary SCOPEs are Service, ServiceInstance, Endpoint, ServiceRelation, ServiceInstanceRelation, and EndpointRelation. There are also some secondary scopes which belong to a primary scope.\nSee Scope Definitions, where you can find all existing Scopes and Fields.\nFilter Use filter to build conditions for the value of fields by using field name and expression.\nThe expressions support linking by and, or and (...). The OPs support ==, !=, \u0026gt;, \u0026lt;, \u0026gt;=, \u0026lt;=, in [...] ,like %..., like ...% , like %...% , contain and not contain, with type detection based on field type. In the event of incompatibility, compile or code generation errors may be triggered.\nAggregation Function The default functions are provided by the SkyWalking OAP core, and it is possible to implement additional functions.\nFunctions provided\n longAvg. The avg of all input per scope entity. The input field must be a long.   instance_jvm_memory_max = from(ServiceInstanceJVMMemory.max).longAvg();\n In this case, the input represents the request of each ServiceInstanceJVMMemory scope, and avg is based on field max.\n doubleAvg. The avg of all input per scope entity. The input field must be a double.   instance_jvm_cpu = from(ServiceInstanceJVMCPU.usePercent).doubleAvg();\n In this case, the input represents the request of each ServiceInstanceJVMCPU scope, and avg is based on field usePercent.\n percent. The number or ratio is expressed as a fraction of 100, where the input matches with the condition.   endpoint_percent = from(Endpoint.*).percent(status == true);\n In this case, all input represents requests of each endpoint, and the condition is endpoint.status == true.\n rate. The rate expressed is as a fraction of 100, where the input matches with the condition.   browser_app_error_rate = from(BrowserAppTraffic.*).rate(trafficCategory == BrowserAppTrafficCategory.FIRST_ERROR, trafficCategory == BrowserAppTrafficCategory.NORMAL);\n In this case, all input represents requests of each browser app traffic, the numerator condition is trafficCategory == BrowserAppTrafficCategory.FIRST_ERROR and denominator condition is trafficCategory == BrowserAppTrafficCategory.NORMAL. Parameter (1) is the numerator condition. Parameter (2) is the denominator condition.\n count. The sum of calls per scope entity.   service_calls_sum = from(Service.*).count();\n In this case, the number of calls of each service.\n histogram. See Heatmap in WIKI.   service_heatmap = from(Service.latency).histogram(100, 20);\n In this case, the thermodynamic heatmap of all incoming requests. Parameter (1) is the precision of latency calculation, such as in the above case, where 113ms and 193ms are considered the same in the 101-200ms group. Parameter (2) is the group amount. In the above case, 21(param value + 1) groups are 0-100ms, 101-200ms, \u0026hellip; 1901-2000ms, 2000+ms\n apdex. See Apdex in WIKI.   service_apdex = from(Service.latency).apdex(name, status);\n In this case, the apdex score of each service. Parameter (1) is the service name, which reflects the Apdex threshold value loaded from service-apdex-threshold.yml in the config folder. Parameter (2) is the status of this request. The status(success/failure) reflects the Apdex calculation.\n p99, p95, p90, p75, p50. See percentile in WIKI.   service_percentile = from(Service.latency).percentile(10);\n percentile is the first multiple-value metric, which has been introduced since 7.0.0. As a metric with multiple values, it could be queried through the getMultipleLinearIntValues GraphQL query. In this case, see p99, p95, p90, p75, and p50 of all incoming requests. The parameter is precise to a latency at p99, such as in the above case, and 120ms and 124ms are considered to produce the same response time.\nIn this case, the p99 value of all incoming requests. The parameter is precise to a latency at p99, such as in the above case, and 120ms and 124ms are considered to produce the same response time.\nMetrics name The metrics name for storage implementor, alarm and query modules. The type inference is supported by core.\nGroup All metrics data will be grouped by Scope.ID and min-level TimeBucket.\n In the Endpoint scope, the Scope.ID is same as the Endpoint ID (i.e. the unique ID based on service and its endpoint).  Cast Fields of source are static type. In some cases, the type required by the filter expression and aggregation function doesn\u0026rsquo;t match the type in the source, such as tag value in the source is String type, most aggregation calculation requires numeric.\nCast expression is provided to do so.\n (str-\u0026gt;long) or (long), cast string type into long. (str-\u0026gt;int) or (int), cast string type into int.  mq_consume_latency = from((str-\u0026gt;long)Service.tag[\u0026quot;transmission.latency\u0026quot;]).longAvg(); // the value of tag is string type. Cast statement is supported in\n From statement. from((cast)source.attre). Filter expression. .filter((cast)tag[\u0026quot;transmission.latency\u0026quot;] \u0026gt; 0) Aggregation function parameter. .longAvg((cast)strField1== 1, (cast)strField2)  Disable Disable is an advanced statement in OAL, which is only used in certain cases. Some of the aggregation and metrics are defined through core hard codes. Examples include segment and top_n_database_statement. This disable statement is designed to render them inactive. By default, none of them are disabled.\nNOTICE, all disable statements should be in oal/disable.oal script file.\nExamples // Calculate p99 of both Endpoint1 and Endpoint2 endpoint_p99 = from(Endpoint.latency).filter(name in (\u0026quot;Endpoint1\u0026quot;, \u0026quot;Endpoint2\u0026quot;)).summary(0.99) // Calculate p99 of Endpoint name started with `serv` serv_Endpoint_p99 = from(Endpoint.latency).filter(name like \u0026quot;serv%\u0026quot;).summary(0.99) // Calculate the avg response time of each Endpoint endpoint_resp_time = from(Endpoint.latency).avg() // Calculate the p50, p75, p90, p95 and p99 of each Endpoint by 50 ms steps. endpoint_percentile = from(Endpoint.latency).percentile(10) // Calculate the percent of response status is true, for each service. endpoint_success = from(Endpoint.*).filter(status == true).percent() // Calculate the sum of response code in [404, 500, 503], for each service. endpoint_abnormal = from(Endpoint.*).filter(httpResponseStatusCode in [404, 500, 503]).count() // Calculate the sum of request type in [RequestType.RPC, RequestType.gRPC], for each service. endpoint_rpc_calls_sum = from(Endpoint.*).filter(type in [RequestType.RPC, RequestType.gRPC]).count() // Calculate the sum of endpoint name in [\u0026quot;/v1\u0026quot;, \u0026quot;/v2\u0026quot;], for each service. endpoint_url_sum = from(Endpoint.*).filter(name in [\u0026quot;/v1\u0026quot;, \u0026quot;/v2\u0026quot;]).count() // Calculate the sum of calls for each service. endpoint_calls = from(Endpoint.*).count() // Calculate the CPM with the GET method for each service.The value is made up with `tagKey:tagValue`. // Option 1, use `tags contain`. service_cpm_http_get = from(Service.*).filter(tags contain \u0026quot;http.method:GET\u0026quot;).cpm() // Option 2, use `tag[key]`. service_cpm_http_get = from(Service.*).filter(tag[\u0026quot;http.method\u0026quot;] == \u0026quot;GET\u0026quot;).cpm(); // Calculate the CPM with the HTTP method except for the GET method for each service.The value is made up with `tagKey:tagValue`. service_cpm_http_other = from(Service.*).filter(tags not contain \u0026quot;http.method:GET\u0026quot;).cpm() disable(segment); disable(endpoint_relation_server_side); disable(top_n_database_statement); ","excerpt":"Observability Analysis Language OAL(Observability Analysis Language) serves to analyze incoming data …","ref":"/docs/main/v9.3.0/en/concepts-and-designs/oal/","title":"Observability Analysis Language"},{"body":"Observability Analysis Language OAL(Observability Analysis Language) serves to analyze incoming data in streaming mode.\nOAL focuses on metrics in Service, Service Instance and Endpoint. Therefore, the language is easy to learn and use.\nSince 6.3, the OAL engine is embedded in OAP server runtime as oal-rt(OAL Runtime). OAL scripts are now found in the /config folder, and users could simply change and reboot the server to run them. However, the OAL script is a compiled language, and the OAL Runtime generates java codes dynamically.\nYou can open set SW_OAL_ENGINE_DEBUG=Y at system env to see which classes are generated.\nGrammar Scripts should be named *.oal\n// Declare the metrics. METRICS_NAME = from(CAST SCOPE.(* | [FIELD][,FIELD ...])) [.filter(CAST FIELD OP [INT | STRING])] .FUNCTION([PARAM][, PARAM ...]) // Disable hard code disable(METRICS_NAME); From The from statement defines the data source of this OAL expression.\nPrimary SCOPEs are Service, ServiceInstance, Endpoint, ServiceRelation, ServiceInstanceRelation, and EndpointRelation. There are also some secondary scopes which belong to a primary scope.\nSee Scope Definitions, where you can find all existing Scopes and Fields.\nFilter Use filter to build conditions for the value of fields by using field name and expression.\nThe expressions support linking by and, or and (...). The OPs support ==, !=, \u0026gt;, \u0026lt;, \u0026gt;=, \u0026lt;=, in [...] ,like %..., like ...% , like %...% , contain and not contain, with type detection based on field type. In the event of incompatibility, compile or code generation errors may be triggered.\nAggregation Function The default functions are provided by the SkyWalking OAP core, and it is possible to implement additional functions.\nFunctions provided\n longAvg. The avg of all input per scope entity. The input field must be a long.   instance_jvm_memory_max = from(ServiceInstanceJVMMemory.max).longAvg();\n In this case, the input represents the request of each ServiceInstanceJVMMemory scope, and avg is based on field max.\n doubleAvg. The avg of all input per scope entity. The input field must be a double.   instance_jvm_cpu = from(ServiceInstanceJVMCPU.usePercent).doubleAvg();\n In this case, the input represents the request of each ServiceInstanceJVMCPU scope, and avg is based on field usePercent.\n percent. The number or ratio is expressed as a fraction of 100, where the input matches with the condition.   endpoint_percent = from(Endpoint.*).percent(status == true);\n In this case, all input represents requests of each endpoint, and the condition is endpoint.status == true.\n rate. The rate expressed is as a fraction of 100, where the input matches with the condition.   browser_app_error_rate = from(BrowserAppTraffic.*).rate(trafficCategory == BrowserAppTrafficCategory.FIRST_ERROR, trafficCategory == BrowserAppTrafficCategory.NORMAL);\n In this case, all input represents requests of each browser app traffic, the numerator condition is trafficCategory == BrowserAppTrafficCategory.FIRST_ERROR and denominator condition is trafficCategory == BrowserAppTrafficCategory.NORMAL. Parameter (1) is the numerator condition. Parameter (2) is the denominator condition.\n count. The sum of calls per scope entity.   service_calls_sum = from(Service.*).count();\n In this case, the number of calls of each service.\n histogram. See Heatmap in WIKI.   service_heatmap = from(Service.latency).histogram(100, 20);\n In this case, the thermodynamic heatmap of all incoming requests. Parameter (1) is the precision of latency calculation, such as in the above case, where 113ms and 193ms are considered the same in the 101-200ms group. Parameter (2) is the group amount. In the above case, 21(param value + 1) groups are 0-100ms, 101-200ms, \u0026hellip; 1901-2000ms, 2000+ms\n apdex. See Apdex in WIKI.   service_apdex = from(Service.latency).apdex(name, status);\n In this case, the apdex score of each service. Parameter (1) is the service name, which reflects the Apdex threshold value loaded from service-apdex-threshold.yml in the config folder. Parameter (2) is the status of this request. The status(success/failure) reflects the Apdex calculation.\n p99, p95, p90, p75, p50. See percentile in WIKI.   service_percentile = from(Service.latency).percentile(10);\n percentile is the first multiple-value metric, which has been introduced since 7.0.0. As a metric with multiple values, it could be queried through the getMultipleLinearIntValues GraphQL query. In this case, see p99, p95, p90, p75, and p50 of all incoming requests. The parameter is precise to a latency at p99, such as in the above case, and 120ms and 124ms are considered to produce the same response time.\nIn this case, the p99 value of all incoming requests. The parameter is precise to a latency at p99, such as in the above case, and 120ms and 124ms are considered to produce the same response time.\nMetrics name The metrics name for storage implementor, alarm and query modules. The type inference is supported by core.\nGroup All metrics data will be grouped by Scope.ID and min-level TimeBucket.\n In the Endpoint scope, the Scope.ID is same as the Endpoint ID (i.e. the unique ID based on service and its endpoint).  Cast Fields of source are static type. In some cases, the type required by the filter expression and aggregation function doesn\u0026rsquo;t match the type in the source, such as tag value in the source is String type, most aggregation calculation requires numeric.\nCast expression is provided to do so.\n (str-\u0026gt;long) or (long), cast string type into long. (str-\u0026gt;int) or (int), cast string type into int.  mq_consume_latency = from((str-\u0026gt;long)Service.tag[\u0026quot;transmission.latency\u0026quot;]).longAvg(); // the value of tag is string type. Cast statement is supported in\n From statement. from((cast)source.attre). Filter expression. .filter((cast)tag[\u0026quot;transmission.latency\u0026quot;] \u0026gt; 0) Aggregation function parameter. .longAvg((cast)strField1== 1, (cast)strField2)  Disable Disable is an advanced statement in OAL, which is only used in certain cases. Some of the aggregation and metrics are defined through core hard codes. Examples include segment and top_n_database_statement. This disable statement is designed to render them inactive. By default, none of them are disabled.\nNOTICE, all disable statements should be in oal/disable.oal script file.\nExamples // Calculate p99 of both Endpoint1 and Endpoint2 endpoint_p99 = from(Endpoint.latency).filter(name in (\u0026quot;Endpoint1\u0026quot;, \u0026quot;Endpoint2\u0026quot;)).summary(0.99) // Calculate p99 of Endpoint name started with `serv` serv_Endpoint_p99 = from(Endpoint.latency).filter(name like \u0026quot;serv%\u0026quot;).summary(0.99) // Calculate the avg response time of each Endpoint endpoint_resp_time = from(Endpoint.latency).avg() // Calculate the p50, p75, p90, p95 and p99 of each Endpoint by 50 ms steps. endpoint_percentile = from(Endpoint.latency).percentile(10) // Calculate the percent of response status is true, for each service. endpoint_success = from(Endpoint.*).filter(status == true).percent() // Calculate the sum of response code in [404, 500, 503], for each service. endpoint_abnormal = from(Endpoint.*).filter(httpResponseStatusCode in [404, 500, 503]).count() // Calculate the sum of request type in [RequestType.RPC, RequestType.gRPC], for each service. endpoint_rpc_calls_sum = from(Endpoint.*).filter(type in [RequestType.RPC, RequestType.gRPC]).count() // Calculate the sum of endpoint name in [\u0026quot;/v1\u0026quot;, \u0026quot;/v2\u0026quot;], for each service. endpoint_url_sum = from(Endpoint.*).filter(name in [\u0026quot;/v1\u0026quot;, \u0026quot;/v2\u0026quot;]).count() // Calculate the sum of calls for each service. endpoint_calls = from(Endpoint.*).count() // Calculate the CPM with the GET method for each service.The value is made up with `tagKey:tagValue`. // Option 1, use `tags contain`. service_cpm_http_get = from(Service.*).filter(tags contain \u0026quot;http.method:GET\u0026quot;).cpm() // Option 2, use `tag[key]`. service_cpm_http_get = from(Service.*).filter(tag[\u0026quot;http.method\u0026quot;] == \u0026quot;GET\u0026quot;).cpm(); // Calculate the CPM with the HTTP method except for the GET method for each service.The value is made up with `tagKey:tagValue`. service_cpm_http_other = from(Service.*).filter(tags not contain \u0026quot;http.method:GET\u0026quot;).cpm() disable(segment); disable(endpoint_relation_server_side); disable(top_n_database_statement); ","excerpt":"Observability Analysis Language OAL(Observability Analysis Language) serves to analyze incoming data …","ref":"/docs/main/v9.4.0/en/concepts-and-designs/oal/","title":"Observability Analysis Language"},{"body":"Observability Analysis Language OAL(Observability Analysis Language) serves to analyze incoming data in streaming mode.\nOAL focuses on metrics in Service, Service Instance and Endpoint. Therefore, the language is easy to learn and use.\nSince 6.3, the OAL engine is embedded in OAP server runtime as oal-rt(OAL Runtime). OAL scripts are now found in the /config folder, and users could simply change and reboot the server to run them. However, the OAL script is a compiled language, and the OAL Runtime generates java codes dynamically.\nYou can open set SW_OAL_ENGINE_DEBUG=Y at system env to see which classes are generated.\nGrammar Scripts should be named *.oal\n// Declare the metrics. METRICS_NAME = from(CAST SCOPE.(* | [FIELD][,FIELD ...])) [.filter(CAST FIELD OP [INT | STRING])] .FUNCTION([PARAM][, PARAM ...]) // Disable hard code disable(METRICS_NAME); From The from statement defines the data source of this OAL expression.\nPrimary SCOPEs are Service, ServiceInstance, Endpoint, ServiceRelation, ServiceInstanceRelation, and EndpointRelation. There are also some secondary scopes which belong to a primary scope.\nSee Scope Definitions, where you can find all existing Scopes and Fields.\nFilter Use filter to build conditions for the value of fields by using field name and expression.\nThe expressions support linking by and, or and (...). The OPs support ==, !=, \u0026gt;, \u0026lt;, \u0026gt;=, \u0026lt;=, in [...] ,like %..., like ...% , like %...% , contain and not contain, with type detection based on field type. In the event of incompatibility, compile or code generation errors may be triggered.\nAggregation Function The default functions are provided by the SkyWalking OAP core, and it is possible to implement additional functions.\nFunctions provided\n longAvg. The avg of all input per scope entity. The input field must be a long.   instance_jvm_memory_max = from(ServiceInstanceJVMMemory.max).longAvg();\n In this case, the input represents the request of each ServiceInstanceJVMMemory scope, and avg is based on field max.\n doubleAvg. The avg of all input per scope entity. The input field must be a double.   instance_jvm_cpu = from(ServiceInstanceJVMCPU.usePercent).doubleAvg();\n In this case, the input represents the request of each ServiceInstanceJVMCPU scope, and avg is based on field usePercent.\n percent. The number or ratio is expressed as a fraction of 100, where the input matches with the condition.   endpoint_percent = from(Endpoint.*).percent(status == true);\n In this case, all input represents requests of each endpoint, and the condition is endpoint.status == true.\n rate. The rate expressed is as a fraction of 100, where the input matches with the condition.   browser_app_error_rate = from(BrowserAppTraffic.*).rate(trafficCategory == BrowserAppTrafficCategory.FIRST_ERROR, trafficCategory == BrowserAppTrafficCategory.NORMAL);\n In this case, all input represents requests of each browser app traffic, the numerator condition is trafficCategory == BrowserAppTrafficCategory.FIRST_ERROR and denominator condition is trafficCategory == BrowserAppTrafficCategory.NORMAL. Parameter (1) is the numerator condition. Parameter (2) is the denominator condition.\n count. The sum of calls per scope entity.   service_calls_sum = from(Service.*).count();\n In this case, the number of calls of each service.\n histogram. See Heatmap in WIKI.   service_heatmap = from(Service.latency).histogram(100, 20);\n In this case, the thermodynamic heatmap of all incoming requests. Parameter (1) is the precision of latency calculation, such as in the above case, where 113ms and 193ms are considered the same in the 101-200ms group. Parameter (2) is the group amount. In the above case, 21(param value + 1) groups are 0-100ms, 101-200ms, \u0026hellip; 1901-2000ms, 2000+ms\n apdex. See Apdex in WIKI.   service_apdex = from(Service.latency).apdex(name, status);\n In this case, the apdex score of each service. Parameter (1) is the service name, which reflects the Apdex threshold value loaded from service-apdex-threshold.yml in the config folder. Parameter (2) is the status of this request. The status(success/failure) reflects the Apdex calculation.\n p99, p95, p90, p75, p50. See percentile in WIKI.   service_percentile = from(Service.latency).percentile(10);\n percentile is the first multiple-value metric, which has been introduced since 7.0.0. As a metric with multiple values, it could be queried through the getMultipleLinearIntValues GraphQL query. In this case, see p99, p95, p90, p75, and p50 of all incoming requests. The parameter is precise to a latency at p99, such as in the above case, and 120ms and 124ms are considered to produce the same response time.\nIn this case, the p99 value of all incoming requests. The parameter is precise to a latency at p99, such as in the above case, and 120ms and 124ms are considered to produce the same response time.\nMetrics name The metrics name for storage implementor, alarm and query modules. The type inference is supported by core.\nGroup All metrics data will be grouped by Scope.ID and min-level TimeBucket.\n In the Endpoint scope, the Scope.ID is same as the Endpoint ID (i.e. the unique ID based on service and its endpoint).  Cast Fields of source are static type. In some cases, the type required by the filter expression and aggregation function doesn\u0026rsquo;t match the type in the source, such as tag value in the source is String type, most aggregation calculation requires numeric.\nCast expression is provided to do so.\n (str-\u0026gt;long) or (long), cast string type into long. (str-\u0026gt;int) or (int), cast string type into int.  mq_consume_latency = from((str-\u0026gt;long)Service.tag[\u0026quot;transmission.latency\u0026quot;]).longAvg(); // the value of tag is string type. Cast statement is supported in\n From statement. from((cast)source.attre). Filter expression. .filter((cast)tag[\u0026quot;transmission.latency\u0026quot;] \u0026gt; 0) Aggregation function parameter. .longAvg((cast)strField1== 1, (cast)strField2)  Disable Disable is an advanced statement in OAL, which is only used in certain cases. Some of the aggregation and metrics are defined through core hard codes. Examples include segment and top_n_database_statement. This disable statement is designed to render them inactive. By default, none of them are disabled.\nNOTICE, all disable statements should be in oal/disable.oal script file.\nExamples // Calculate p99 of both Endpoint1 and Endpoint2 endpoint_p99 = from(Endpoint.latency).filter(name in (\u0026quot;Endpoint1\u0026quot;, \u0026quot;Endpoint2\u0026quot;)).summary(0.99) // Calculate p99 of Endpoint name started with `serv` serv_Endpoint_p99 = from(Endpoint.latency).filter(name like \u0026quot;serv%\u0026quot;).summary(0.99) // Calculate the avg response time of each Endpoint endpoint_resp_time = from(Endpoint.latency).avg() // Calculate the p50, p75, p90, p95 and p99 of each Endpoint by 50 ms steps. endpoint_percentile = from(Endpoint.latency).percentile(10) // Calculate the percent of response status is true, for each service. endpoint_success = from(Endpoint.*).filter(status == true).percent() // Calculate the sum of response code in [404, 500, 503], for each service. endpoint_abnormal = from(Endpoint.*).filter(httpResponseStatusCode in [404, 500, 503]).count() // Calculate the sum of request type in [RequestType.RPC, RequestType.gRPC], for each service. endpoint_rpc_calls_sum = from(Endpoint.*).filter(type in [RequestType.RPC, RequestType.gRPC]).count() // Calculate the sum of endpoint name in [\u0026quot;/v1\u0026quot;, \u0026quot;/v2\u0026quot;], for each service. endpoint_url_sum = from(Endpoint.*).filter(name in [\u0026quot;/v1\u0026quot;, \u0026quot;/v2\u0026quot;]).count() // Calculate the sum of calls for each service. endpoint_calls = from(Endpoint.*).count() // Calculate the CPM with the GET method for each service.The value is made up with `tagKey:tagValue`. // Option 1, use `tags contain`. service_cpm_http_get = from(Service.*).filter(tags contain \u0026quot;http.method:GET\u0026quot;).cpm() // Option 2, use `tag[key]`. service_cpm_http_get = from(Service.*).filter(tag[\u0026quot;http.method\u0026quot;] == \u0026quot;GET\u0026quot;).cpm(); // Calculate the CPM with the HTTP method except for the GET method for each service.The value is made up with `tagKey:tagValue`. service_cpm_http_other = from(Service.*).filter(tags not contain \u0026quot;http.method:GET\u0026quot;).cpm() disable(segment); disable(endpoint_relation_server_side); disable(top_n_database_statement); ","excerpt":"Observability Analysis Language OAL(Observability Analysis Language) serves to analyze incoming data …","ref":"/docs/main/v9.5.0/en/concepts-and-designs/oal/","title":"Observability Analysis Language"},{"body":"Observability Analysis Language OAL(Observability Analysis Language) serves to analyze incoming data in streaming mode.\nOAL focuses on metrics in Service, Service Instance and Endpoint. Therefore, the language is easy to learn and use.\nSince 6.3, the OAL engine is embedded in OAP server runtime as oal-rt(OAL Runtime). OAL scripts are now found in the /config folder, and users could simply change and reboot the server to run them. However, the OAL script is a compiled language, and the OAL Runtime generates java codes dynamically.\nYou can open set SW_OAL_ENGINE_DEBUG=Y at system env to see which classes are generated.\nGrammar Scripts should be named *.oal\n// Declare the metrics. METRICS_NAME = from(CAST SCOPE.(* | [FIELD][,FIELD ...])) [.filter(CAST FIELD OP [INT | STRING])] .FUNCTION([PARAM][, PARAM ...]) // Disable hard code disable(METRICS_NAME); From The from statement defines the data source of this OAL expression.\nPrimary SCOPEs are Service, ServiceInstance, Endpoint, ServiceRelation, ServiceInstanceRelation, and EndpointRelation. There are also some secondary scopes which belong to a primary scope.\nSee Scope Definitions, where you can find all existing Scopes and Fields.\nFilter Use filter to build conditions for the value of fields by using field name and expression.\nThe filter expressions run as a chain, generally connected with logic AND. The OPs support ==, !=, \u0026gt;, \u0026lt;, \u0026gt;=, \u0026lt;=, in [...] ,like %..., like ...% , like %...% , contain and not contain, with type detection based on field type. In the event of incompatibility, compile or code generation errors may be triggered.\nAggregation Function The default functions are provided by the SkyWalking OAP core, and it is possible to implement additional functions.\nFunctions provided\n longAvg. The avg of all input per scope entity. The input field must be a long.   instance_jvm_memory_max = from(ServiceInstanceJVMMemory.max).longAvg();\n In this case, the input represents the request of each ServiceInstanceJVMMemory scope, and avg is based on field max.\n doubleAvg. The avg of all input per scope entity. The input field must be a double.   instance_jvm_cpu = from(ServiceInstanceJVMCPU.usePercent).doubleAvg();\n In this case, the input represents the request of each ServiceInstanceJVMCPU scope, and avg is based on field usePercent.\n percent. The number or ratio is expressed as a fraction of 100, where the input matches with the condition.   endpoint_percent = from(Endpoint.*).percent(status == true);\n In this case, all input represents requests of each endpoint, and the condition is endpoint.status == true.\n rate. The rate expressed is as a fraction of 100, where the input matches with the condition.   browser_app_error_rate = from(BrowserAppTraffic.*).rate(trafficCategory == BrowserAppTrafficCategory.FIRST_ERROR, trafficCategory == BrowserAppTrafficCategory.NORMAL);\n In this case, all input represents requests of each browser app traffic, the numerator condition is trafficCategory == BrowserAppTrafficCategory.FIRST_ERROR and denominator condition is trafficCategory == BrowserAppTrafficCategory.NORMAL. Parameter (1) is the numerator condition. Parameter (2) is the denominator condition.\n count. The sum of calls per scope entity.   service_calls_sum = from(Service.*).count();\n In this case, the number of calls of each service.\n histogram. See Heatmap in WIKI.   service_heatmap = from(Service.latency).histogram(100, 20);\n In this case, the thermodynamic heatmap of all incoming requests. Parameter (1) is the precision of latency calculation, such as in the above case, where 113ms and 193ms are considered the same in the 101-200ms group. Parameter (2) is the group amount. In the above case, 21(param value + 1) groups are 0-100ms, 101-200ms, \u0026hellip; 1901-2000ms, 2000+ms\n apdex. See Apdex in WIKI.   service_apdex = from(Service.latency).apdex(name, status);\n In this case, the apdex score of each service. Parameter (1) is the service name, which reflects the Apdex threshold value loaded from service-apdex-threshold.yml in the config folder. Parameter (2) is the status of this request. The status(success/failure) reflects the Apdex calculation.\n p99, p95, p90, p75, p50. See percentile in WIKI.   service_percentile = from(Service.latency).percentile(10);\n percentile is the first multiple-value metric, which has been introduced since 7.0.0. As a metric with multiple values, it could be queried through the getMultipleLinearIntValues GraphQL query. In this case, see p99, p95, p90, p75, and p50 of all incoming requests. The parameter is precise to a latency at p99, such as in the above case, and 120ms and 124ms are considered to produce the same response time.\nIn this case, the p99 value of all incoming requests. The parameter is precise to a latency at p99, such as in the above case, and 120ms and 124ms are considered to produce the same response time.\nMetrics name The metrics name for storage implementor, alarm and query modules. The type inference is supported by core.\nGroup All metrics data will be grouped by Scope.ID and min-level TimeBucket.\n In the Endpoint scope, the Scope.ID is same as the Endpoint ID (i.e. the unique ID based on service and its endpoint).  Cast Fields of source are static type. In some cases, the type required by the filter expression and aggregation function doesn\u0026rsquo;t match the type in the source, such as tag value in the source is String type, most aggregation calculation requires numeric.\nCast expression is provided to do so.\n (str-\u0026gt;long) or (long), cast string type into long. (str-\u0026gt;int) or (int), cast string type into int.  mq_consume_latency = from((str-\u0026gt;long)Service.tag[\u0026quot;transmission.latency\u0026quot;]).longAvg(); // the value of tag is string type. Cast statement is supported in\n From statement. from((cast)source.attre). Filter expression. .filter((cast)tag[\u0026quot;transmission.latency\u0026quot;] \u0026gt; 0) Aggregation function parameter. .longAvg((cast)strField1== 1, (cast)strField2)  Disable Disable is an advanced statement in OAL, which is only used in certain cases. Some of the aggregation and metrics are defined through core hard codes. Examples include segment and top_n_database_statement. This disable statement is designed to render them inactive. By default, none of them are disabled.\nNOTICE, all disable statements should be in oal/disable.oal script file.\nExamples // Calculate p99 of both Endpoint1 and Endpoint2 endpoint_p99 = from(Endpoint.latency).filter(name in (\u0026quot;Endpoint1\u0026quot;, \u0026quot;Endpoint2\u0026quot;)).summary(0.99) // Calculate p99 of Endpoint name started with `serv` serv_Endpoint_p99 = from(Endpoint.latency).filter(name like \u0026quot;serv%\u0026quot;).summary(0.99) // Calculate the avg response time of each Endpoint endpoint_resp_time = from(Endpoint.latency).avg() // Calculate the p50, p75, p90, p95 and p99 of each Endpoint by 50 ms steps. endpoint_percentile = from(Endpoint.latency).percentile(10) // Calculate the percent of response status is true, for each service. endpoint_success = from(Endpoint.*).filter(status == true).percent() // Calculate the sum of response code in [404, 500, 503], for each service. endpoint_abnormal = from(Endpoint.*).filter(httpResponseStatusCode in [404, 500, 503]).count() // Calculate the sum of request type in [RequestType.RPC, RequestType.gRPC], for each service. endpoint_rpc_calls_sum = from(Endpoint.*).filter(type in [RequestType.RPC, RequestType.gRPC]).count() // Calculate the sum of endpoint name in [\u0026quot;/v1\u0026quot;, \u0026quot;/v2\u0026quot;], for each service. endpoint_url_sum = from(Endpoint.*).filter(name in [\u0026quot;/v1\u0026quot;, \u0026quot;/v2\u0026quot;]).count() // Calculate the sum of calls for each service. endpoint_calls = from(Endpoint.*).count() // Calculate the CPM with the GET method for each service.The value is made up with `tagKey:tagValue`. // Option 1, use `tags contain`. service_cpm_http_get = from(Service.*).filter(tags contain \u0026quot;http.method:GET\u0026quot;).cpm() // Option 2, use `tag[key]`. service_cpm_http_get = from(Service.*).filter(tag[\u0026quot;http.method\u0026quot;] == \u0026quot;GET\u0026quot;).cpm(); // Calculate the CPM with the HTTP method except for the GET method for each service.The value is made up with `tagKey:tagValue`. service_cpm_http_other = from(Service.*).filter(tags not contain \u0026quot;http.method:GET\u0026quot;).cpm() disable(segment); disable(endpoint_relation_server_side); disable(top_n_database_statement); ","excerpt":"Observability Analysis Language OAL(Observability Analysis Language) serves to analyze incoming data …","ref":"/docs/main/v9.6.0/en/concepts-and-designs/oal/","title":"Observability Analysis Language"},{"body":"Observability Analysis Platform SkyWalking is an Observability Analysis Platform that provides full observability to services running in both brown and green zones, as well as services using a hybrid model.\nCapabilities SkyWalking covers all 3 areas of observability, including, Tracing, Metrics and Logging.\n Tracing. SkyWalking native data formats, and Zipkin traces of v1 and v2 formats are supported. Metrics. SkyWalking supports mature metrics formats, including native meter format, OTEL metrics format, and Telegraf format. SkyWalking integrates with Service Mesh platforms, typically Istio and Envoy, to build observability into the data plane or control plane. Also, SkyWalking native agents can run in the metrics mode, which greatly improves performances. Logging. Includes logs collected from disk or through network. Native agents could bind the tracing context with logs automatically, or use SkyWalking to bind the trace and log through the text content.  There are 3 powerful and native language engines designed to analyze observability data from the above areas.\n Observability Analysis Language processes native traces and service mesh data. Meter Analysis Language is responsible for metrics calculation for native meter data, and adopts a stable and widely used metrics system, such as Prometheus and OpenTelemetry. Log Analysis Language focuses on log contents and collaborate with Meter Analysis Language.  ","excerpt":"Observability Analysis Platform SkyWalking is an Observability Analysis Platform that provides full …","ref":"/docs/main/latest/en/concepts-and-designs/backend-overview/","title":"Observability Analysis Platform"},{"body":"Observability Analysis Platform SkyWalking OAP and UI provides dozens of features to support observability analysis for your services, cloud infrastructure, open-source components, and more.\nBesides those out-of-box features for monitoring, users could leverage the powerful and flexible analysis language to build their own analysis and visualization.\nThere are 3 powerful and native language engines designed to analyze observability data from the above areas.\n Observability Analysis Language processes native traces and service mesh data to build metrics of entity and topology map. Meter Analysis Language is responsible for metrics calculation for native meter data, and adopts a stable and widely used metrics system, such as Prometheus and OpenTelemetry. Log Analysis Language focuses on analyzing log contents to format and label them, and extract metrics from them to feed Meter Analysis Language for further analysis.  SkyWalking community is willing to accept your monitoring extension powered by these languages, if the monitoring targets are public and general usable.\n","excerpt":"Observability Analysis Platform SkyWalking OAP and UI provides dozens of features to support …","ref":"/docs/main/next/en/concepts-and-designs/backend-overview/","title":"Observability Analysis Platform"},{"body":"Observability Analysis Platform SkyWalking is an Observability Analysis Platform that provides full observability to services running in both brown and green zones, as well as services using a hybrid model.\nCapabilities SkyWalking covers all 3 areas of observability, including, Tracing, Metrics and Logging.\n Tracing. SkyWalking native data formats, including Zipkin v1 and v2, as well as Jaeger. Metrics. SkyWalking integrates with Service Mesh platforms, such as Istio, Envoy, and Linkerd, to build observability into the data plane or control plane. Also, SkyWalking native agents can run in the metrics mode, which greatly improves performances. Logging. Includes logs collected from disk or through network. Native agents could bind the tracing context with logs automatically, or use SkyWalking to bind the trace and log through the text content.  There are 3 powerful and native language engines designed to analyze observability data from the above areas.\n Observability Analysis Language processes native traces and service mesh data. Meter Analysis Language is responsible for metrics calculation for native meter data, and adopts a stable and widely used metrics system, such as Prometheus and OpenTelemetry. Log Analysis Language focuses on log contents and collaborate with Meter Analysis Language.  ","excerpt":"Observability Analysis Platform SkyWalking is an Observability Analysis Platform that provides full …","ref":"/docs/main/v9.0.0/en/concepts-and-designs/backend-overview/","title":"Observability Analysis Platform"},{"body":"Observability Analysis Platform SkyWalking is an Observability Analysis Platform that provides full observability to services running in both brown and green zones, as well as services using a hybrid model.\nCapabilities SkyWalking covers all 3 areas of observability, including, Tracing, Metrics and Logging.\n Tracing. SkyWalking native data formats, including Zipkin v1 and v2, as well as Jaeger. Metrics. SkyWalking integrates with Service Mesh platforms, such as Istio, Envoy, and Linkerd, to build observability into the data plane or control plane. Also, SkyWalking native agents can run in the metrics mode, which greatly improves performances. Logging. Includes logs collected from disk or through network. Native agents could bind the tracing context with logs automatically, or use SkyWalking to bind the trace and log through the text content.  There are 3 powerful and native language engines designed to analyze observability data from the above areas.\n Observability Analysis Language processes native traces and service mesh data. Meter Analysis Language is responsible for metrics calculation for native meter data, and adopts a stable and widely used metrics system, such as Prometheus and OpenTelemetry. Log Analysis Language focuses on log contents and collaborate with Meter Analysis Language.  ","excerpt":"Observability Analysis Platform SkyWalking is an Observability Analysis Platform that provides full …","ref":"/docs/main/v9.1.0/en/concepts-and-designs/backend-overview/","title":"Observability Analysis Platform"},{"body":"Observability Analysis Platform SkyWalking is an Observability Analysis Platform that provides full observability to services running in both brown and green zones, as well as services using a hybrid model.\nCapabilities SkyWalking covers all 3 areas of observability, including, Tracing, Metrics and Logging.\n Tracing. SkyWalking native data formats, including Zipkin v1 and v2, as well as Jaeger. Metrics. SkyWalking integrates with Service Mesh platforms, such as Istio, Envoy, and Linkerd, to build observability into the data plane or control plane. Also, SkyWalking native agents can run in the metrics mode, which greatly improves performances. Logging. Includes logs collected from disk or through network. Native agents could bind the tracing context with logs automatically, or use SkyWalking to bind the trace and log through the text content.  There are 3 powerful and native language engines designed to analyze observability data from the above areas.\n Observability Analysis Language processes native traces and service mesh data. Meter Analysis Language is responsible for metrics calculation for native meter data, and adopts a stable and widely used metrics system, such as Prometheus and OpenTelemetry. Log Analysis Language focuses on log contents and collaborate with Meter Analysis Language.  ","excerpt":"Observability Analysis Platform SkyWalking is an Observability Analysis Platform that provides full …","ref":"/docs/main/v9.2.0/en/concepts-and-designs/backend-overview/","title":"Observability Analysis Platform"},{"body":"Observability Analysis Platform SkyWalking is an Observability Analysis Platform that provides full observability to services running in both brown and green zones, as well as services using a hybrid model.\nCapabilities SkyWalking covers all 3 areas of observability, including, Tracing, Metrics and Logging.\n Tracing. SkyWalking native data formats, including Zipkin v1 and v2, as well as Jaeger. Metrics. SkyWalking integrates with Service Mesh platforms, such as Istio, Envoy, and Linkerd, to build observability into the data plane or control plane. Also, SkyWalking native agents can run in the metrics mode, which greatly improves performances. Logging. Includes logs collected from disk or through network. Native agents could bind the tracing context with logs automatically, or use SkyWalking to bind the trace and log through the text content.  There are 3 powerful and native language engines designed to analyze observability data from the above areas.\n Observability Analysis Language processes native traces and service mesh data. Meter Analysis Language is responsible for metrics calculation for native meter data, and adopts a stable and widely used metrics system, such as Prometheus and OpenTelemetry. Log Analysis Language focuses on log contents and collaborate with Meter Analysis Language.  ","excerpt":"Observability Analysis Platform SkyWalking is an Observability Analysis Platform that provides full …","ref":"/docs/main/v9.3.0/en/concepts-and-designs/backend-overview/","title":"Observability Analysis Platform"},{"body":"Observability Analysis Platform SkyWalking is an Observability Analysis Platform that provides full observability to services running in both brown and green zones, as well as services using a hybrid model.\nCapabilities SkyWalking covers all 3 areas of observability, including, Tracing, Metrics and Logging.\n Tracing. SkyWalking native data formats, including Zipkin v1 and v2, as well as Jaeger. Metrics. SkyWalking integrates with Service Mesh platforms, such as Istio, Envoy, and Linkerd, to build observability into the data plane or control plane. Also, SkyWalking native agents can run in the metrics mode, which greatly improves performances. Logging. Includes logs collected from disk or through network. Native agents could bind the tracing context with logs automatically, or use SkyWalking to bind the trace and log through the text content.  There are 3 powerful and native language engines designed to analyze observability data from the above areas.\n Observability Analysis Language processes native traces and service mesh data. Meter Analysis Language is responsible for metrics calculation for native meter data, and adopts a stable and widely used metrics system, such as Prometheus and OpenTelemetry. Log Analysis Language focuses on log contents and collaborate with Meter Analysis Language.  ","excerpt":"Observability Analysis Platform SkyWalking is an Observability Analysis Platform that provides full …","ref":"/docs/main/v9.4.0/en/concepts-and-designs/backend-overview/","title":"Observability Analysis Platform"},{"body":"Observability Analysis Platform SkyWalking is an Observability Analysis Platform that provides full observability to services running in both brown and green zones, as well as services using a hybrid model.\nCapabilities SkyWalking covers all 3 areas of observability, including, Tracing, Metrics and Logging.\n Tracing. SkyWalking native data formats, including Zipkin v1 and v2, as well as Jaeger. Metrics. SkyWalking integrates with Service Mesh platforms, such as Istio, Envoy, and Linkerd, to build observability into the data plane or control plane. Also, SkyWalking native agents can run in the metrics mode, which greatly improves performances. Logging. Includes logs collected from disk or through network. Native agents could bind the tracing context with logs automatically, or use SkyWalking to bind the trace and log through the text content.  There are 3 powerful and native language engines designed to analyze observability data from the above areas.\n Observability Analysis Language processes native traces and service mesh data. Meter Analysis Language is responsible for metrics calculation for native meter data, and adopts a stable and widely used metrics system, such as Prometheus and OpenTelemetry. Log Analysis Language focuses on log contents and collaborate with Meter Analysis Language.  ","excerpt":"Observability Analysis Platform SkyWalking is an Observability Analysis Platform that provides full …","ref":"/docs/main/v9.5.0/en/concepts-and-designs/backend-overview/","title":"Observability Analysis Platform"},{"body":"Observability Analysis Platform SkyWalking is an Observability Analysis Platform that provides full observability to services running in both brown and green zones, as well as services using a hybrid model.\nCapabilities SkyWalking covers all 3 areas of observability, including, Tracing, Metrics and Logging.\n Tracing. SkyWalking native data formats, including Zipkin v1 and v2, as well as Jaeger. Metrics. SkyWalking integrates with Service Mesh platforms, such as Istio, Envoy, and Linkerd, to build observability into the data plane or control plane. Also, SkyWalking native agents can run in the metrics mode, which greatly improves performances. Logging. Includes logs collected from disk or through network. Native agents could bind the tracing context with logs automatically, or use SkyWalking to bind the trace and log through the text content.  There are 3 powerful and native language engines designed to analyze observability data from the above areas.\n Observability Analysis Language processes native traces and service mesh data. Meter Analysis Language is responsible for metrics calculation for native meter data, and adopts a stable and widely used metrics system, such as Prometheus and OpenTelemetry. Log Analysis Language focuses on log contents and collaborate with Meter Analysis Language.  ","excerpt":"Observability Analysis Platform SkyWalking is an Observability Analysis Platform that provides full …","ref":"/docs/main/v9.6.0/en/concepts-and-designs/backend-overview/","title":"Observability Analysis Platform"},{"body":"Observability Analysis Platform SkyWalking is an Observability Analysis Platform that provides full observability to services running in both brown and green zones, as well as services using a hybrid model.\nCapabilities SkyWalking covers all 3 areas of observability, including, Tracing, Metrics and Logging.\n Tracing. SkyWalking native data formats, and Zipkin traces of v1 and v2 formats are supported. Metrics. SkyWalking supports mature metrics formats, including native meter format, OTEL metrics format, and Telegraf format. SkyWalking integrates with Service Mesh platforms, typically Istio and Envoy, to build observability into the data plane or control plane. Also, SkyWalking native agents can run in the metrics mode, which greatly improves performances. Logging. Includes logs collected from disk or through network. Native agents could bind the tracing context with logs automatically, or use SkyWalking to bind the trace and log through the text content.  There are 3 powerful and native language engines designed to analyze observability data from the above areas.\n Observability Analysis Language processes native traces and service mesh data. Meter Analysis Language is responsible for metrics calculation for native meter data, and adopts a stable and widely used metrics system, such as Prometheus and OpenTelemetry. Log Analysis Language focuses on log contents and collaborate with Meter Analysis Language.  ","excerpt":"Observability Analysis Platform SkyWalking is an Observability Analysis Platform that provides full …","ref":"/docs/main/v9.7.0/en/concepts-and-designs/backend-overview/","title":"Observability Analysis Platform"},{"body":"Observations  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-micrometer-1.10\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  To use the Micrometer Observation Registry with Skywalking, you need to add handlers to the registry. Skywalking comes with dedicated SkywalkingMeterHandler (for metrics) and SkywalkingSenderTracingHandler, SkywalkingReceiverTracingHandler SkywalkingDefaultTracingHandler (for traces).  // Here we create the Observation Registry with attached handlers ObservationRegistry registry = ObservationRegistry.create(); // Here we add a meter handler registry.observationConfig() .observationHandler(new ObservationHandler.FirstMatchingCompositeObservationHandler( new SkywalkingMeterHandler(new SkywalkingMeterRegistry()) ); // Here we add tracing handlers registry.observationConfig() .observationHandler(new ObservationHandler.FirstMatchingCompositeObservationHandler( new SkywalkingSenderTracingHandler(), new SkywalkingReceiverTracingHandler(), new SkywalkingDefaultTracingHandler() )); With such setup metrics and traces will be created for any Micrometer Observation based instrumentations.\n","excerpt":"Observations  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; …","ref":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/application-toolkit-micrometer-1.10/","title":"Observations"},{"body":"Observations  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-micrometer-1.10\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  To use the Micrometer Observation Registry with Skywalking, you need to add handlers to the registry. Skywalking comes with dedicated SkywalkingMeterHandler (for metrics) and SkywalkingSenderTracingHandler, SkywalkingReceiverTracingHandler SkywalkingDefaultTracingHandler (for traces).  // Here we create the Observation Registry with attached handlers ObservationRegistry registry = ObservationRegistry.create(); // Here we add a meter handler registry.observationConfig() .observationHandler(new ObservationHandler.FirstMatchingCompositeObservationHandler( new SkywalkingMeterHandler(new SkywalkingMeterRegistry()) ); // Here we add tracing handlers registry.observationConfig() .observationHandler(new ObservationHandler.FirstMatchingCompositeObservationHandler( new SkywalkingSenderTracingHandler(), new SkywalkingReceiverTracingHandler(), new SkywalkingDefaultTracingHandler() )); With such setup metrics and traces will be created for any Micrometer Observation based instrumentations.\n","excerpt":"Observations  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; …","ref":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-micrometer-1.10/","title":"Observations"},{"body":"Observations  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-micrometer-1.10\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  To use the Micrometer Observation Registry with Skywalking, you need to add handlers to the registry. Skywalking comes with dedicated SkywalkingMeterHandler (for metrics) and SkywalkingSenderTracingHandler, SkywalkingReceiverTracingHandler SkywalkingDefaultTracingHandler (for traces).  // Here we create the Observation Registry with attached handlers ObservationRegistry registry = ObservationRegistry.create(); // Here we add a meter handler registry.observationConfig() .observationHandler(new ObservationHandler.FirstMatchingCompositeObservationHandler( new SkywalkingMeterHandler(new SkywalkingMeterRegistry()) ); // Here we add tracing handlers registry.observationConfig() .observationHandler(new ObservationHandler.FirstMatchingCompositeObservationHandler( new SkywalkingSenderTracingHandler(), new SkywalkingReceiverTracingHandler(), new SkywalkingDefaultTracingHandler() )); With such setup metrics and traces will be created for any Micrometer Observation based instrumentations.\n","excerpt":"Observations  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; …","ref":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/application-toolkit-micrometer-1.10/","title":"Observations"},{"body":"Observations  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-micrometer-1.10\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  To use the Micrometer Observation Registry with Skywalking, you need to add handlers to the registry. Skywalking comes with dedicated SkywalkingMeterHandler (for metrics) and SkywalkingSenderTracingHandler, SkywalkingReceiverTracingHandler SkywalkingDefaultTracingHandler (for traces).  // Here we create the Observation Registry with attached handlers ObservationRegistry registry = ObservationRegistry.create(); // Here we add a meter handler registry.observationConfig() .observationHandler(new ObservationHandler.FirstMatchingCompositeObservationHandler( new SkywalkingMeterHandler(new SkywalkingMeterRegistry()) ); // Here we add tracing handlers registry.observationConfig() .observationHandler(new ObservationHandler.FirstMatchingCompositeObservationHandler( new SkywalkingSenderTracingHandler(), new SkywalkingReceiverTracingHandler(), new SkywalkingDefaultTracingHandler() )); With such setup metrics and traces will be created for any Micrometer Observation based instrumentations.\n","excerpt":"Observations  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; …","ref":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/application-toolkit-micrometer-1.10/","title":"Observations"},{"body":"Observations  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-micrometer-1.10\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  To use the Micrometer Observation Registry with Skywalking, you need to add handlers to the registry. Skywalking comes with dedicated SkywalkingMeterHandler (for metrics) and SkywalkingSenderTracingHandler, SkywalkingReceiverTracingHandler SkywalkingDefaultTracingHandler (for traces).  // Here we create the Observation Registry with attached handlers ObservationRegistry registry = ObservationRegistry.create(); // Here we add a meter handler registry.observationConfig() .observationHandler(new ObservationHandler.FirstMatchingCompositeObservationHandler( new SkywalkingMeterHandler(new SkywalkingMeterRegistry()) ); // Here we add tracing handlers registry.observationConfig() .observationHandler(new ObservationHandler.FirstMatchingCompositeObservationHandler( new SkywalkingSenderTracingHandler(), new SkywalkingReceiverTracingHandler(), new SkywalkingDefaultTracingHandler() )); With such setup metrics and traces will be created for any Micrometer Observation based instrumentations.\n","excerpt":"Observations  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; …","ref":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/application-toolkit-micrometer-1.10/","title":"Observations"},{"body":"Observe Service Mesh through ALS Envoy Access Log Service (ALS) provides full logs on routed RPC, including HTTP and TCP.\nBackground The solution was initialized and first implemented by Sheng Wu, Hongtao Gao, Lizan Zhou, and Dhi Aurrahman on May 17, 2019, and was presented at KubeCon China 2019. Here is a video recording of the presentation.\nSkyWalking is the first open-source project that introduced an ALS-based solution to the world. This solution provides a new take on observability with a lightweight payload on the service mesh.\nEnable ALS and SkyWalking Receiver You need the following steps to set up ALS.\n  Enable envoyAccessLogService in ProxyConfig and set the ALS address to where the SkyWalking OAP listens. In Istio version 1.6.0+, if Istio is installed with demo profile, you can enable ALS with this command:\nistioctl manifest apply \\  --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=\u0026lt;skywalking-oap.skywalking.svc:11800\u0026gt; Note: Replace \u0026lt;skywalking-oap.skywalking.svc:11800\u0026gt; with the real address where SkyWalking OAP is deployed.\n  Activate SkyWalking Envoy Receiver. (activated in default)\n  envoy-metric:selector:${SW_ENVOY_METRIC:default}  Choose an ALS analyzer. There are two available analyzers for both HTTP access logs and TCP access logs: k8s-mesh and mx-mesh. Set the system environment variables SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS and SW_ENVOY_METRIC_ALS_TCP_ANALYSIS, such as SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=mx-mesh and SW_ENVOY_METRIC_ALS_TCP_ANALYSIS=mx-mesh, or in application.yaml to activate the analyzers. For more about the analyzers, see SkyWalking ALS Analyzers.\nenvoy-metric:selector:${SW_ENVOY_METRIC:default}default:acceptMetricsService:${SW_ENVOY_METRIC_SERVICE:true}alsHTTPAnalysis:${SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS:\u0026#34;\u0026#34;}# Setting the system env variable would override this.alsTCPAnalysis:${SW_ENVOY_METRIC_ALS_TCP_ANALYSIS:\u0026#34;\u0026#34;}To use multiple analyzers as a fallback, please use , to concatenate.\n  Example Here\u0026rsquo;s an example of installing Istio and deploying SkyWalking by Helm chart.\nistioctl install \\  --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-oap.istio-system:11800 git clone https://github.com/apache/skywalking-helm.git cd skywalking-helm/chart helm repo add elastic https://helm.elastic.co helm dep up skywalking helm install 8.1.0 skywalking -n istio-system \\  --set oap.env.SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=mx-mesh \\  --set oap.env.SW_ENVOY_METRIC_ALS_TCP_ANALYSIS=mx-mesh \\  --set fullnameOverride=skywalking \\  --set oap.envoy.als.enabled=true You can use kubectl -n istio-system logs -l app=skywalking | grep \u0026quot;K8sALSServiceMeshHTTPAnalysis\u0026quot; to ensure that OAP ALS mx-mesh analyzer has been activated.\nSkyWalking ALS Analyzers There are several available analyzers: k8s-mesh, mx-mesh, and persistence. You can specify one or more analyzers to analyze the access logs. When multiple analyzers are specified, it acts as a fast-success mechanism: SkyWalking loops over the analyzers and use them to analyze the logs. Once there is an analyzer that is able to produce a result, it stops the loop.\nk8s-mesh k8s-mesh uses the metadata from Kubernetes clusters, hence in this analyzer, OAP needs access roles to Pod, Service, and Endpoints.\nThe blog illustrates the details of how it works and a step-by-step tutorial to apply it to the bookinfo application.\nmx-mesh mx-mesh uses the Envoy metadata exchange mechanism to get the service name, etc. This analyzer requires Istio to enable the metadata exchange plugin (you can enable it by --set values.telemetry.v2.enabled=true, or if you\u0026rsquo;re using Istio 1.7+ and installing it with profile demo/preview, it should already be enabled).\nThe blog illustrates the details of how it works and a step-by-step tutorial on applying it to the Online Boutique system.\npersistence persistence analyzer adapts the Envoy access log format to SkyWalking\u0026rsquo;s native log format, and forwards the formatted logs to LAL, where you can configure persistent conditions, such as sampler, only persist error logs, etc. SkyWalking provides a default configuration file envoy-als.yaml that you can adjust as per your needs. Please make sure to activate this rule via adding the rule name envoy-als into config item log-analyzer/default/lalFiles (or environment variable SW_LOG_LAL_FILES, e.g. SW_LOG_LAL_FILES=envoy-als).\nAttention: Since the persistence analyzer also needs a mechanism to map the logs into responding services, you need to configure at least one of k8s-mesh or mx-mesh as its antecedent so that persistence analyzer knows which service the logs belong to. For example, you should set envoy-metric/default/alsHTTPAnalysis (or environment variable SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS) to something like k8s-mesh,persistence, mx-mesh,persistence, or mx-mesh,k8s-mesh,persistence.\n","excerpt":"Observe Service Mesh through ALS Envoy Access Log Service (ALS) provides full logs on routed RPC, …","ref":"/docs/main/latest/en/setup/envoy/als_setting/","title":"Observe Service Mesh through ALS"},{"body":"Observe Service Mesh through ALS Envoy Access Log Service (ALS) provides full logs on routed RPC, including HTTP and TCP.\nBackground The solution was initialized and first implemented by Sheng Wu, Hongtao Gao, Lizan Zhou, and Dhi Aurrahman on May 17, 2019, and was presented at KubeCon China 2019. Here is a video recording of the presentation.\nSkyWalking is the first open-source project that introduced an ALS-based solution to the world. This solution provides a new take on observability with a lightweight payload on the service mesh.\nEnable ALS and SkyWalking Receiver You need the following steps to set up ALS.\n  Enable envoyAccessLogService in ProxyConfig and set the ALS address to where the SkyWalking OAP listens. In Istio version 1.6.0+, if Istio is installed with demo profile, you can enable ALS with this command:\nistioctl manifest apply \\  --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=\u0026lt;skywalking-oap.skywalking.svc:11800\u0026gt; Note: Replace \u0026lt;skywalking-oap.skywalking.svc:11800\u0026gt; with the real address where SkyWalking OAP is deployed.\n  Activate SkyWalking Envoy Receiver. (activated in default)\n  envoy-metric:selector:${SW_ENVOY_METRIC:default}  Choose an ALS analyzer. There are two available analyzers for both HTTP access logs and TCP access logs: k8s-mesh and mx-mesh. Set the system environment variables SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS and SW_ENVOY_METRIC_ALS_TCP_ANALYSIS, such as SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=mx-mesh and SW_ENVOY_METRIC_ALS_TCP_ANALYSIS=mx-mesh, or in application.yaml to activate the analyzers. For more about the analyzers, see SkyWalking ALS Analyzers.\nenvoy-metric:selector:${SW_ENVOY_METRIC:default}default:acceptMetricsService:${SW_ENVOY_METRIC_SERVICE:true}alsHTTPAnalysis:${SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS:\u0026#34;\u0026#34;}# Setting the system env variable would override this.alsTCPAnalysis:${SW_ENVOY_METRIC_ALS_TCP_ANALYSIS:\u0026#34;\u0026#34;}To use multiple analyzers as a fallback, please use , to concatenate.\n  Example Here\u0026rsquo;s an example of installing Istio and deploying SkyWalking by Helm chart.\nistioctl install \\  --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-oap.istio-system:11800 git clone https://github.com/apache/skywalking-helm.git cd skywalking-helm/chart helm repo add elastic https://helm.elastic.co helm dep up skywalking helm install 8.1.0 skywalking -n istio-system \\  --set oap.env.SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=mx-mesh \\  --set oap.env.SW_ENVOY_METRIC_ALS_TCP_ANALYSIS=mx-mesh \\  --set fullnameOverride=skywalking \\  --set oap.envoy.als.enabled=true You can use kubectl -n istio-system logs -l app=skywalking | grep \u0026quot;K8sALSServiceMeshHTTPAnalysis\u0026quot; to ensure that OAP ALS mx-mesh analyzer has been activated.\nSkyWalking ALS Analyzers There are several available analyzers: k8s-mesh, mx-mesh, and persistence. You can specify one or more analyzers to analyze the access logs. When multiple analyzers are specified, it acts as a fast-success mechanism: SkyWalking loops over the analyzers and use them to analyze the logs. Once there is an analyzer that is able to produce a result, it stops the loop.\nk8s-mesh k8s-mesh uses the metadata from Kubernetes clusters, hence in this analyzer, OAP needs access roles to Pod, Service, and Endpoints.\nThe blog illustrates the details of how it works and a step-by-step tutorial to apply it to the bookinfo application.\nmx-mesh mx-mesh uses the Envoy metadata exchange mechanism to get the service name, etc. This analyzer requires Istio to enable the metadata exchange plugin (you can enable it by --set values.telemetry.v2.enabled=true, or if you\u0026rsquo;re using Istio 1.7+ and installing it with profile demo/preview, it should already be enabled).\nThe blog illustrates the details of how it works and a step-by-step tutorial on applying it to the Online Boutique system.\npersistence persistence analyzer adapts the Envoy access log format to SkyWalking\u0026rsquo;s native log format, and forwards the formatted logs to LAL, where you can configure persistent conditions, such as sampler, only persist error logs, etc. SkyWalking provides a default configuration file envoy-als.yaml that you can adjust as per your needs. Please make sure to activate this rule via adding the rule name envoy-als into config item log-analyzer/default/lalFiles (or environment variable SW_LOG_LAL_FILES, e.g. SW_LOG_LAL_FILES=envoy-als).\nAttention: Since the persistence analyzer also needs a mechanism to map the logs into responding services, you need to configure at least one of k8s-mesh or mx-mesh as its antecedent so that persistence analyzer knows which service the logs belong to. For example, you should set envoy-metric/default/alsHTTPAnalysis (or environment variable SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS) to something like k8s-mesh,persistence, mx-mesh,persistence, or mx-mesh,k8s-mesh,persistence.\n","excerpt":"Observe Service Mesh through ALS Envoy Access Log Service (ALS) provides full logs on routed RPC, …","ref":"/docs/main/next/en/setup/envoy/als_setting/","title":"Observe Service Mesh through ALS"},{"body":"Observe Service Mesh through ALS Envoy Access Log Service (ALS) provides full logs on routed RPC, including HTTP and TCP.\nBackground The solution was initialized and first implemented by Sheng Wu, Hongtao Gao, Lizan Zhou, and Dhi Aurrahman on May 17, 2019, and was presented at KubeCon China 2019. Here is a video recording of the presentation.\nSkyWalking is the first open source project that introduced an ALS-based solution to the world. This solution provides a new take on observability with a lightweight payload on the service mesh.\nEnable ALS and SkyWalking Receiver You need the following steps to set up ALS.\n  Enable envoyAccessLogService in ProxyConfig and set the ALS address to where the SkyWalking OAP listens. In Istio version 1.6.0+, if Istio is installed with demo profile, you can enable ALS with this command:\nistioctl manifest apply \\  --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=\u0026lt;skywalking-oap.skywalking.svc:11800\u0026gt; Note: Replace \u0026lt;skywalking-oap.skywalking.svc:11800\u0026gt; with the real address where SkyWalking OAP is deployed.\n  Activate SkyWalking Envoy Receiver. (activated in default)\n  envoy-metric:selector:${SW_ENVOY_METRIC:default}  Choose an ALS analyzer. There are two available analyzers for both HTTP access logs and TCP access logs: k8s-mesh and mx-mesh. Set the system environment variables SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS and SW_ENVOY_METRIC_ALS_TCP_ANALYSIS, such as SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=mx-mesh and SW_ENVOY_METRIC_ALS_TCP_ANALYSIS=mx-mesh, or in application.yaml to activate the analyzers. For more about the analyzers, see SkyWalking ALS Analyzers.\nenvoy-metric:selector:${SW_ENVOY_METRIC:default}default:acceptMetricsService:${SW_ENVOY_METRIC_SERVICE:true}alsHTTPAnalysis:${SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS:\u0026#34;\u0026#34;}# Setting the system env variable would override this. alsTCPAnalysis:${SW_ENVOY_METRIC_ALS_TCP_ANALYSIS:\u0026#34;\u0026#34;}To use multiple analyzers as a fallback, please use , to concatenate.\n  Example Here\u0026rsquo;s an example on installing Istio and deploying SkyWalking by Helm chart.\nistioctl install \\  --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-oap.istio-system:11800 git clone https://github.com/apache/skywalking-kubernetes.git cd skywalking-kubernetes/chart helm repo add elastic https://helm.elastic.co helm dep up skywalking helm install 8.1.0 skywalking -n istio-system \\  --set oap.env.SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=mx-mesh \\  --set oap.env.SW_ENVOY_METRIC_ALS_TCP_ANALYSIS=mx-mesh \\  --set fullnameOverride=skywalking \\  --set oap.envoy.als.enabled=true You can use kubectl -n istio-system logs -l app=skywalking | grep \u0026quot;K8sALSServiceMeshHTTPAnalysis\u0026quot; to ensure that OAP ALS mx-mesh analyzer has been activated.\nSkyWalking ALS Analyzers There are several available analyzers: k8s-mesh, mx-mesh, and persistence. You can specify one or more analyzers to analyze the access logs. When multiple analyzers are specified, it acts as a fast-success mechanism: SkyWalking loops over the analyzers and use them to analyze the logs. Once there is an analyzer that is able to produce a result, it stops the loop.\nk8s-mesh k8s-mesh uses the metadata from Kubernetes cluster, hence in this analyzer OAP needs access roles to Pod, Service, and Endpoints.\nThe blog illustrates the details of how it works, and a step-by-step tutorial to apply it into the bookinfo application.\nmx-mesh mx-mesh uses the Envoy metadata exchange mechanism to get the service name, etc. This analyzer requires Istio to enable the metadata exchange plugin (you can enable it by --set values.telemetry.v2.enabled=true, or if you\u0026rsquo;re using Istio 1.7+ and installing it with profile demo/preview, it should already be enabled).\nThe blog illustrates the details of how it works, and a step-by-step tutorial to apply it into the Online Boutique system.\npersistence persistence analyzer adapts the Envoy access log format to SkyWalking\u0026rsquo;s native log format, and forwards the formatted logs to LAL, where you can configure persistent conditions, such as sampler, only persist error logs, etc. SkyWalking provides a default configuration file envoy-als.yaml that you can adjust as per your needs. Please make sure to activate this rule via adding the rule name envoy-als into config item log-analyzer/default/lalFiles (or environment variable SW_LOG_LAL_FILES, e.g. SW_LOG_LAL_FILES=envoy-als).\nAttention: Since the persistence analyzer also needs a mechanism to map the logs into responding services, you need to configure at least one of k8s-mesh or mx-mesh as its antecedent so that persistence analyzer knows which service the logs belong to. For example, you should set envoy-metric/default/alsHTTPAnalysis (or environment variable SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS) to something like k8s-mesh,persistence, mx-mesh,persistence, or mx-mesh,k8s-mesh,persistence.\n","excerpt":"Observe Service Mesh through ALS Envoy Access Log Service (ALS) provides full logs on routed RPC, …","ref":"/docs/main/v9.0.0/en/setup/envoy/als_setting/","title":"Observe Service Mesh through ALS"},{"body":"Observe Service Mesh through ALS Envoy Access Log Service (ALS) provides full logs on routed RPC, including HTTP and TCP.\nBackground The solution was initialized and first implemented by Sheng Wu, Hongtao Gao, Lizan Zhou, and Dhi Aurrahman on May 17, 2019, and was presented at KubeCon China 2019. Here is a video recording of the presentation.\nSkyWalking is the first open-source project that introduced an ALS-based solution to the world. This solution provides a new take on observability with a lightweight payload on the service mesh.\nEnable ALS and SkyWalking Receiver You need the following steps to set up ALS.\n  Enable envoyAccessLogService in ProxyConfig and set the ALS address to where the SkyWalking OAP listens. In Istio version 1.6.0+, if Istio is installed with demo profile, you can enable ALS with this command:\nistioctl manifest apply \\  --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=\u0026lt;skywalking-oap.skywalking.svc:11800\u0026gt; Note: Replace \u0026lt;skywalking-oap.skywalking.svc:11800\u0026gt; with the real address where SkyWalking OAP is deployed.\n  Activate SkyWalking Envoy Receiver. (activated in default)\n  envoy-metric:selector:${SW_ENVOY_METRIC:default}  Choose an ALS analyzer. There are two available analyzers for both HTTP access logs and TCP access logs: k8s-mesh and mx-mesh. Set the system environment variables SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS and SW_ENVOY_METRIC_ALS_TCP_ANALYSIS, such as SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=mx-mesh and SW_ENVOY_METRIC_ALS_TCP_ANALYSIS=mx-mesh, or in application.yaml to activate the analyzers. For more about the analyzers, see SkyWalking ALS Analyzers.\nenvoy-metric:selector:${SW_ENVOY_METRIC:default}default:acceptMetricsService:${SW_ENVOY_METRIC_SERVICE:true}alsHTTPAnalysis:${SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS:\u0026#34;\u0026#34;}# Setting the system env variable would override this. alsTCPAnalysis:${SW_ENVOY_METRIC_ALS_TCP_ANALYSIS:\u0026#34;\u0026#34;}To use multiple analyzers as a fallback, please use , to concatenate.\n  Example Here\u0026rsquo;s an example of installing Istio and deploying SkyWalking by Helm chart.\nistioctl install \\  --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-oap.istio-system:11800 git clone https://github.com/apache/skywalking-kubernetes.git cd skywalking-kubernetes/chart helm repo add elastic https://helm.elastic.co helm dep up skywalking helm install 8.1.0 skywalking -n istio-system \\  --set oap.env.SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=mx-mesh \\  --set oap.env.SW_ENVOY_METRIC_ALS_TCP_ANALYSIS=mx-mesh \\  --set fullnameOverride=skywalking \\  --set oap.envoy.als.enabled=true You can use kubectl -n istio-system logs -l app=skywalking | grep \u0026quot;K8sALSServiceMeshHTTPAnalysis\u0026quot; to ensure that OAP ALS mx-mesh analyzer has been activated.\nSkyWalking ALS Analyzers There are several available analyzers: k8s-mesh, mx-mesh, and persistence. You can specify one or more analyzers to analyze the access logs. When multiple analyzers are specified, it acts as a fast-success mechanism: SkyWalking loops over the analyzers and use them to analyze the logs. Once there is an analyzer that is able to produce a result, it stops the loop.\nk8s-mesh k8s-mesh uses the metadata from Kubernetes clusters, hence in this analyzer, OAP needs access roles to Pod, Service, and Endpoints.\nThe blog illustrates the details of how it works and a step-by-step tutorial to apply it to the bookinfo application.\nmx-mesh mx-mesh uses the Envoy metadata exchange mechanism to get the service name, etc. This analyzer requires Istio to enable the metadata exchange plugin (you can enable it by --set values.telemetry.v2.enabled=true, or if you\u0026rsquo;re using Istio 1.7+ and installing it with profile demo/preview, it should already be enabled).\nThe blog illustrates the details of how it works and a step-by-step tutorial on applying it to the Online Boutique system.\npersistence persistence analyzer adapts the Envoy access log format to SkyWalking\u0026rsquo;s native log format, and forwards the formatted logs to LAL, where you can configure persistent conditions, such as sampler, only persist error logs, etc. SkyWalking provides a default configuration file envoy-als.yaml that you can adjust as per your needs. Please make sure to activate this rule via adding the rule name envoy-als into config item log-analyzer/default/lalFiles (or environment variable SW_LOG_LAL_FILES, e.g. SW_LOG_LAL_FILES=envoy-als).\nAttention: Since the persistence analyzer also needs a mechanism to map the logs into responding services, you need to configure at least one of k8s-mesh or mx-mesh as its antecedent so that persistence analyzer knows which service the logs belong to. For example, you should set envoy-metric/default/alsHTTPAnalysis (or environment variable SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS) to something like k8s-mesh,persistence, mx-mesh,persistence, or mx-mesh,k8s-mesh,persistence.\n","excerpt":"Observe Service Mesh through ALS Envoy Access Log Service (ALS) provides full logs on routed RPC, …","ref":"/docs/main/v9.1.0/en/setup/envoy/als_setting/","title":"Observe Service Mesh through ALS"},{"body":"Observe Service Mesh through ALS Envoy Access Log Service (ALS) provides full logs on routed RPC, including HTTP and TCP.\nBackground The solution was initialized and first implemented by Sheng Wu, Hongtao Gao, Lizan Zhou, and Dhi Aurrahman on May 17, 2019, and was presented at KubeCon China 2019. Here is a video recording of the presentation.\nSkyWalking is the first open-source project that introduced an ALS-based solution to the world. This solution provides a new take on observability with a lightweight payload on the service mesh.\nEnable ALS and SkyWalking Receiver You need the following steps to set up ALS.\n  Enable envoyAccessLogService in ProxyConfig and set the ALS address to where the SkyWalking OAP listens. In Istio version 1.6.0+, if Istio is installed with demo profile, you can enable ALS with this command:\nistioctl manifest apply \\  --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=\u0026lt;skywalking-oap.skywalking.svc:11800\u0026gt; Note: Replace \u0026lt;skywalking-oap.skywalking.svc:11800\u0026gt; with the real address where SkyWalking OAP is deployed.\n  Activate SkyWalking Envoy Receiver. (activated in default)\n  envoy-metric:selector:${SW_ENVOY_METRIC:default}  Choose an ALS analyzer. There are two available analyzers for both HTTP access logs and TCP access logs: k8s-mesh and mx-mesh. Set the system environment variables SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS and SW_ENVOY_METRIC_ALS_TCP_ANALYSIS, such as SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=mx-mesh and SW_ENVOY_METRIC_ALS_TCP_ANALYSIS=mx-mesh, or in application.yaml to activate the analyzers. For more about the analyzers, see SkyWalking ALS Analyzers.\nenvoy-metric:selector:${SW_ENVOY_METRIC:default}default:acceptMetricsService:${SW_ENVOY_METRIC_SERVICE:true}alsHTTPAnalysis:${SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS:\u0026#34;\u0026#34;}# Setting the system env variable would override this. alsTCPAnalysis:${SW_ENVOY_METRIC_ALS_TCP_ANALYSIS:\u0026#34;\u0026#34;}To use multiple analyzers as a fallback, please use , to concatenate.\n  Example Here\u0026rsquo;s an example of installing Istio and deploying SkyWalking by Helm chart.\nistioctl install \\  --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-oap.istio-system:11800 git clone https://github.com/apache/skywalking-kubernetes.git cd skywalking-kubernetes/chart helm repo add elastic https://helm.elastic.co helm dep up skywalking helm install 8.1.0 skywalking -n istio-system \\  --set oap.env.SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=mx-mesh \\  --set oap.env.SW_ENVOY_METRIC_ALS_TCP_ANALYSIS=mx-mesh \\  --set fullnameOverride=skywalking \\  --set oap.envoy.als.enabled=true You can use kubectl -n istio-system logs -l app=skywalking | grep \u0026quot;K8sALSServiceMeshHTTPAnalysis\u0026quot; to ensure that OAP ALS mx-mesh analyzer has been activated.\nSkyWalking ALS Analyzers There are several available analyzers: k8s-mesh, mx-mesh, and persistence. You can specify one or more analyzers to analyze the access logs. When multiple analyzers are specified, it acts as a fast-success mechanism: SkyWalking loops over the analyzers and use them to analyze the logs. Once there is an analyzer that is able to produce a result, it stops the loop.\nk8s-mesh k8s-mesh uses the metadata from Kubernetes clusters, hence in this analyzer, OAP needs access roles to Pod, Service, and Endpoints.\nThe blog illustrates the details of how it works and a step-by-step tutorial to apply it to the bookinfo application.\nmx-mesh mx-mesh uses the Envoy metadata exchange mechanism to get the service name, etc. This analyzer requires Istio to enable the metadata exchange plugin (you can enable it by --set values.telemetry.v2.enabled=true, or if you\u0026rsquo;re using Istio 1.7+ and installing it with profile demo/preview, it should already be enabled).\nThe blog illustrates the details of how it works and a step-by-step tutorial on applying it to the Online Boutique system.\npersistence persistence analyzer adapts the Envoy access log format to SkyWalking\u0026rsquo;s native log format, and forwards the formatted logs to LAL, where you can configure persistent conditions, such as sampler, only persist error logs, etc. SkyWalking provides a default configuration file envoy-als.yaml that you can adjust as per your needs. Please make sure to activate this rule via adding the rule name envoy-als into config item log-analyzer/default/lalFiles (or environment variable SW_LOG_LAL_FILES, e.g. SW_LOG_LAL_FILES=envoy-als).\nAttention: Since the persistence analyzer also needs a mechanism to map the logs into responding services, you need to configure at least one of k8s-mesh or mx-mesh as its antecedent so that persistence analyzer knows which service the logs belong to. For example, you should set envoy-metric/default/alsHTTPAnalysis (or environment variable SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS) to something like k8s-mesh,persistence, mx-mesh,persistence, or mx-mesh,k8s-mesh,persistence.\n","excerpt":"Observe Service Mesh through ALS Envoy Access Log Service (ALS) provides full logs on routed RPC, …","ref":"/docs/main/v9.2.0/en/setup/envoy/als_setting/","title":"Observe Service Mesh through ALS"},{"body":"Observe Service Mesh through ALS Envoy Access Log Service (ALS) provides full logs on routed RPC, including HTTP and TCP.\nBackground The solution was initialized and first implemented by Sheng Wu, Hongtao Gao, Lizan Zhou, and Dhi Aurrahman on May 17, 2019, and was presented at KubeCon China 2019. Here is a video recording of the presentation.\nSkyWalking is the first open-source project that introduced an ALS-based solution to the world. This solution provides a new take on observability with a lightweight payload on the service mesh.\nEnable ALS and SkyWalking Receiver You need the following steps to set up ALS.\n  Enable envoyAccessLogService in ProxyConfig and set the ALS address to where the SkyWalking OAP listens. In Istio version 1.6.0+, if Istio is installed with demo profile, you can enable ALS with this command:\nistioctl manifest apply \\  --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=\u0026lt;skywalking-oap.skywalking.svc:11800\u0026gt; Note: Replace \u0026lt;skywalking-oap.skywalking.svc:11800\u0026gt; with the real address where SkyWalking OAP is deployed.\n  Activate SkyWalking Envoy Receiver. (activated in default)\n  envoy-metric:selector:${SW_ENVOY_METRIC:default}  Choose an ALS analyzer. There are two available analyzers for both HTTP access logs and TCP access logs: k8s-mesh and mx-mesh. Set the system environment variables SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS and SW_ENVOY_METRIC_ALS_TCP_ANALYSIS, such as SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=mx-mesh and SW_ENVOY_METRIC_ALS_TCP_ANALYSIS=mx-mesh, or in application.yaml to activate the analyzers. For more about the analyzers, see SkyWalking ALS Analyzers.\nenvoy-metric:selector:${SW_ENVOY_METRIC:default}default:acceptMetricsService:${SW_ENVOY_METRIC_SERVICE:true}alsHTTPAnalysis:${SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS:\u0026#34;\u0026#34;}# Setting the system env variable would override this. alsTCPAnalysis:${SW_ENVOY_METRIC_ALS_TCP_ANALYSIS:\u0026#34;\u0026#34;}To use multiple analyzers as a fallback, please use , to concatenate.\n  Example Here\u0026rsquo;s an example of installing Istio and deploying SkyWalking by Helm chart.\nistioctl install \\  --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-oap.istio-system:11800 git clone https://github.com/apache/skywalking-kubernetes.git cd skywalking-kubernetes/chart helm repo add elastic https://helm.elastic.co helm dep up skywalking helm install 8.1.0 skywalking -n istio-system \\  --set oap.env.SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=mx-mesh \\  --set oap.env.SW_ENVOY_METRIC_ALS_TCP_ANALYSIS=mx-mesh \\  --set fullnameOverride=skywalking \\  --set oap.envoy.als.enabled=true You can use kubectl -n istio-system logs -l app=skywalking | grep \u0026quot;K8sALSServiceMeshHTTPAnalysis\u0026quot; to ensure that OAP ALS mx-mesh analyzer has been activated.\nSkyWalking ALS Analyzers There are several available analyzers: k8s-mesh, mx-mesh, and persistence. You can specify one or more analyzers to analyze the access logs. When multiple analyzers are specified, it acts as a fast-success mechanism: SkyWalking loops over the analyzers and use them to analyze the logs. Once there is an analyzer that is able to produce a result, it stops the loop.\nk8s-mesh k8s-mesh uses the metadata from Kubernetes clusters, hence in this analyzer, OAP needs access roles to Pod, Service, and Endpoints.\nThe blog illustrates the details of how it works and a step-by-step tutorial to apply it to the bookinfo application.\nmx-mesh mx-mesh uses the Envoy metadata exchange mechanism to get the service name, etc. This analyzer requires Istio to enable the metadata exchange plugin (you can enable it by --set values.telemetry.v2.enabled=true, or if you\u0026rsquo;re using Istio 1.7+ and installing it with profile demo/preview, it should already be enabled).\nThe blog illustrates the details of how it works and a step-by-step tutorial on applying it to the Online Boutique system.\npersistence persistence analyzer adapts the Envoy access log format to SkyWalking\u0026rsquo;s native log format, and forwards the formatted logs to LAL, where you can configure persistent conditions, such as sampler, only persist error logs, etc. SkyWalking provides a default configuration file envoy-als.yaml that you can adjust as per your needs. Please make sure to activate this rule via adding the rule name envoy-als into config item log-analyzer/default/lalFiles (or environment variable SW_LOG_LAL_FILES, e.g. SW_LOG_LAL_FILES=envoy-als).\nAttention: Since the persistence analyzer also needs a mechanism to map the logs into responding services, you need to configure at least one of k8s-mesh or mx-mesh as its antecedent so that persistence analyzer knows which service the logs belong to. For example, you should set envoy-metric/default/alsHTTPAnalysis (or environment variable SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS) to something like k8s-mesh,persistence, mx-mesh,persistence, or mx-mesh,k8s-mesh,persistence.\n","excerpt":"Observe Service Mesh through ALS Envoy Access Log Service (ALS) provides full logs on routed RPC, …","ref":"/docs/main/v9.3.0/en/setup/envoy/als_setting/","title":"Observe Service Mesh through ALS"},{"body":"Observe Service Mesh through ALS Envoy Access Log Service (ALS) provides full logs on routed RPC, including HTTP and TCP.\nBackground The solution was initialized and first implemented by Sheng Wu, Hongtao Gao, Lizan Zhou, and Dhi Aurrahman on May 17, 2019, and was presented at KubeCon China 2019. Here is a video recording of the presentation.\nSkyWalking is the first open-source project that introduced an ALS-based solution to the world. This solution provides a new take on observability with a lightweight payload on the service mesh.\nEnable ALS and SkyWalking Receiver You need the following steps to set up ALS.\n  Enable envoyAccessLogService in ProxyConfig and set the ALS address to where the SkyWalking OAP listens. In Istio version 1.6.0+, if Istio is installed with demo profile, you can enable ALS with this command:\nistioctl manifest apply \\  --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=\u0026lt;skywalking-oap.skywalking.svc:11800\u0026gt; Note: Replace \u0026lt;skywalking-oap.skywalking.svc:11800\u0026gt; with the real address where SkyWalking OAP is deployed.\n  Activate SkyWalking Envoy Receiver. (activated in default)\n  envoy-metric:selector:${SW_ENVOY_METRIC:default}  Choose an ALS analyzer. There are two available analyzers for both HTTP access logs and TCP access logs: k8s-mesh and mx-mesh. Set the system environment variables SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS and SW_ENVOY_METRIC_ALS_TCP_ANALYSIS, such as SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=mx-mesh and SW_ENVOY_METRIC_ALS_TCP_ANALYSIS=mx-mesh, or in application.yaml to activate the analyzers. For more about the analyzers, see SkyWalking ALS Analyzers.\nenvoy-metric:selector:${SW_ENVOY_METRIC:default}default:acceptMetricsService:${SW_ENVOY_METRIC_SERVICE:true}alsHTTPAnalysis:${SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS:\u0026#34;\u0026#34;}# Setting the system env variable would override this.alsTCPAnalysis:${SW_ENVOY_METRIC_ALS_TCP_ANALYSIS:\u0026#34;\u0026#34;}To use multiple analyzers as a fallback, please use , to concatenate.\n  Example Here\u0026rsquo;s an example of installing Istio and deploying SkyWalking by Helm chart.\nistioctl install \\  --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-oap.istio-system:11800 git clone https://github.com/apache/skywalking-kubernetes.git cd skywalking-kubernetes/chart helm repo add elastic https://helm.elastic.co helm dep up skywalking helm install 8.1.0 skywalking -n istio-system \\  --set oap.env.SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=mx-mesh \\  --set oap.env.SW_ENVOY_METRIC_ALS_TCP_ANALYSIS=mx-mesh \\  --set fullnameOverride=skywalking \\  --set oap.envoy.als.enabled=true You can use kubectl -n istio-system logs -l app=skywalking | grep \u0026quot;K8sALSServiceMeshHTTPAnalysis\u0026quot; to ensure that OAP ALS mx-mesh analyzer has been activated.\nSkyWalking ALS Analyzers There are several available analyzers: k8s-mesh, mx-mesh, and persistence. You can specify one or more analyzers to analyze the access logs. When multiple analyzers are specified, it acts as a fast-success mechanism: SkyWalking loops over the analyzers and use them to analyze the logs. Once there is an analyzer that is able to produce a result, it stops the loop.\nk8s-mesh k8s-mesh uses the metadata from Kubernetes clusters, hence in this analyzer, OAP needs access roles to Pod, Service, and Endpoints.\nThe blog illustrates the details of how it works and a step-by-step tutorial to apply it to the bookinfo application.\nmx-mesh mx-mesh uses the Envoy metadata exchange mechanism to get the service name, etc. This analyzer requires Istio to enable the metadata exchange plugin (you can enable it by --set values.telemetry.v2.enabled=true, or if you\u0026rsquo;re using Istio 1.7+ and installing it with profile demo/preview, it should already be enabled).\nThe blog illustrates the details of how it works and a step-by-step tutorial on applying it to the Online Boutique system.\npersistence persistence analyzer adapts the Envoy access log format to SkyWalking\u0026rsquo;s native log format, and forwards the formatted logs to LAL, where you can configure persistent conditions, such as sampler, only persist error logs, etc. SkyWalking provides a default configuration file envoy-als.yaml that you can adjust as per your needs. Please make sure to activate this rule via adding the rule name envoy-als into config item log-analyzer/default/lalFiles (or environment variable SW_LOG_LAL_FILES, e.g. SW_LOG_LAL_FILES=envoy-als).\nAttention: Since the persistence analyzer also needs a mechanism to map the logs into responding services, you need to configure at least one of k8s-mesh or mx-mesh as its antecedent so that persistence analyzer knows which service the logs belong to. For example, you should set envoy-metric/default/alsHTTPAnalysis (or environment variable SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS) to something like k8s-mesh,persistence, mx-mesh,persistence, or mx-mesh,k8s-mesh,persistence.\n","excerpt":"Observe Service Mesh through ALS Envoy Access Log Service (ALS) provides full logs on routed RPC, …","ref":"/docs/main/v9.4.0/en/setup/envoy/als_setting/","title":"Observe Service Mesh through ALS"},{"body":"Observe Service Mesh through ALS Envoy Access Log Service (ALS) provides full logs on routed RPC, including HTTP and TCP.\nBackground The solution was initialized and first implemented by Sheng Wu, Hongtao Gao, Lizan Zhou, and Dhi Aurrahman on May 17, 2019, and was presented at KubeCon China 2019. Here is a video recording of the presentation.\nSkyWalking is the first open-source project that introduced an ALS-based solution to the world. This solution provides a new take on observability with a lightweight payload on the service mesh.\nEnable ALS and SkyWalking Receiver You need the following steps to set up ALS.\n  Enable envoyAccessLogService in ProxyConfig and set the ALS address to where the SkyWalking OAP listens. In Istio version 1.6.0+, if Istio is installed with demo profile, you can enable ALS with this command:\nistioctl manifest apply \\  --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=\u0026lt;skywalking-oap.skywalking.svc:11800\u0026gt; Note: Replace \u0026lt;skywalking-oap.skywalking.svc:11800\u0026gt; with the real address where SkyWalking OAP is deployed.\n  Activate SkyWalking Envoy Receiver. (activated in default)\n  envoy-metric:selector:${SW_ENVOY_METRIC:default}  Choose an ALS analyzer. There are two available analyzers for both HTTP access logs and TCP access logs: k8s-mesh and mx-mesh. Set the system environment variables SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS and SW_ENVOY_METRIC_ALS_TCP_ANALYSIS, such as SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=mx-mesh and SW_ENVOY_METRIC_ALS_TCP_ANALYSIS=mx-mesh, or in application.yaml to activate the analyzers. For more about the analyzers, see SkyWalking ALS Analyzers.\nenvoy-metric:selector:${SW_ENVOY_METRIC:default}default:acceptMetricsService:${SW_ENVOY_METRIC_SERVICE:true}alsHTTPAnalysis:${SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS:\u0026#34;\u0026#34;}# Setting the system env variable would override this.alsTCPAnalysis:${SW_ENVOY_METRIC_ALS_TCP_ANALYSIS:\u0026#34;\u0026#34;}To use multiple analyzers as a fallback, please use , to concatenate.\n  Example Here\u0026rsquo;s an example of installing Istio and deploying SkyWalking by Helm chart.\nistioctl install \\  --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-oap.istio-system:11800 git clone https://github.com/apache/skywalking-kubernetes.git cd skywalking-kubernetes/chart helm repo add elastic https://helm.elastic.co helm dep up skywalking helm install 8.1.0 skywalking -n istio-system \\  --set oap.env.SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=mx-mesh \\  --set oap.env.SW_ENVOY_METRIC_ALS_TCP_ANALYSIS=mx-mesh \\  --set fullnameOverride=skywalking \\  --set oap.envoy.als.enabled=true You can use kubectl -n istio-system logs -l app=skywalking | grep \u0026quot;K8sALSServiceMeshHTTPAnalysis\u0026quot; to ensure that OAP ALS mx-mesh analyzer has been activated.\nSkyWalking ALS Analyzers There are several available analyzers: k8s-mesh, mx-mesh, and persistence. You can specify one or more analyzers to analyze the access logs. When multiple analyzers are specified, it acts as a fast-success mechanism: SkyWalking loops over the analyzers and use them to analyze the logs. Once there is an analyzer that is able to produce a result, it stops the loop.\nk8s-mesh k8s-mesh uses the metadata from Kubernetes clusters, hence in this analyzer, OAP needs access roles to Pod, Service, and Endpoints.\nThe blog illustrates the details of how it works and a step-by-step tutorial to apply it to the bookinfo application.\nmx-mesh mx-mesh uses the Envoy metadata exchange mechanism to get the service name, etc. This analyzer requires Istio to enable the metadata exchange plugin (you can enable it by --set values.telemetry.v2.enabled=true, or if you\u0026rsquo;re using Istio 1.7+ and installing it with profile demo/preview, it should already be enabled).\nThe blog illustrates the details of how it works and a step-by-step tutorial on applying it to the Online Boutique system.\npersistence persistence analyzer adapts the Envoy access log format to SkyWalking\u0026rsquo;s native log format, and forwards the formatted logs to LAL, where you can configure persistent conditions, such as sampler, only persist error logs, etc. SkyWalking provides a default configuration file envoy-als.yaml that you can adjust as per your needs. Please make sure to activate this rule via adding the rule name envoy-als into config item log-analyzer/default/lalFiles (or environment variable SW_LOG_LAL_FILES, e.g. SW_LOG_LAL_FILES=envoy-als).\nAttention: Since the persistence analyzer also needs a mechanism to map the logs into responding services, you need to configure at least one of k8s-mesh or mx-mesh as its antecedent so that persistence analyzer knows which service the logs belong to. For example, you should set envoy-metric/default/alsHTTPAnalysis (or environment variable SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS) to something like k8s-mesh,persistence, mx-mesh,persistence, or mx-mesh,k8s-mesh,persistence.\n","excerpt":"Observe Service Mesh through ALS Envoy Access Log Service (ALS) provides full logs on routed RPC, …","ref":"/docs/main/v9.5.0/en/setup/envoy/als_setting/","title":"Observe Service Mesh through ALS"},{"body":"Observe Service Mesh through ALS Envoy Access Log Service (ALS) provides full logs on routed RPC, including HTTP and TCP.\nBackground The solution was initialized and first implemented by Sheng Wu, Hongtao Gao, Lizan Zhou, and Dhi Aurrahman on May 17, 2019, and was presented at KubeCon China 2019. Here is a video recording of the presentation.\nSkyWalking is the first open-source project that introduced an ALS-based solution to the world. This solution provides a new take on observability with a lightweight payload on the service mesh.\nEnable ALS and SkyWalking Receiver You need the following steps to set up ALS.\n  Enable envoyAccessLogService in ProxyConfig and set the ALS address to where the SkyWalking OAP listens. In Istio version 1.6.0+, if Istio is installed with demo profile, you can enable ALS with this command:\nistioctl manifest apply \\  --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=\u0026lt;skywalking-oap.skywalking.svc:11800\u0026gt; Note: Replace \u0026lt;skywalking-oap.skywalking.svc:11800\u0026gt; with the real address where SkyWalking OAP is deployed.\n  Activate SkyWalking Envoy Receiver. (activated in default)\n  envoy-metric:selector:${SW_ENVOY_METRIC:default}  Choose an ALS analyzer. There are two available analyzers for both HTTP access logs and TCP access logs: k8s-mesh and mx-mesh. Set the system environment variables SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS and SW_ENVOY_METRIC_ALS_TCP_ANALYSIS, such as SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=mx-mesh and SW_ENVOY_METRIC_ALS_TCP_ANALYSIS=mx-mesh, or in application.yaml to activate the analyzers. For more about the analyzers, see SkyWalking ALS Analyzers.\nenvoy-metric:selector:${SW_ENVOY_METRIC:default}default:acceptMetricsService:${SW_ENVOY_METRIC_SERVICE:true}alsHTTPAnalysis:${SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS:\u0026#34;\u0026#34;}# Setting the system env variable would override this.alsTCPAnalysis:${SW_ENVOY_METRIC_ALS_TCP_ANALYSIS:\u0026#34;\u0026#34;}To use multiple analyzers as a fallback, please use , to concatenate.\n  Example Here\u0026rsquo;s an example of installing Istio and deploying SkyWalking by Helm chart.\nistioctl install \\  --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-oap.istio-system:11800 git clone https://github.com/apache/skywalking-kubernetes.git cd skywalking-kubernetes/chart helm repo add elastic https://helm.elastic.co helm dep up skywalking helm install 8.1.0 skywalking -n istio-system \\  --set oap.env.SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=mx-mesh \\  --set oap.env.SW_ENVOY_METRIC_ALS_TCP_ANALYSIS=mx-mesh \\  --set fullnameOverride=skywalking \\  --set oap.envoy.als.enabled=true You can use kubectl -n istio-system logs -l app=skywalking | grep \u0026quot;K8sALSServiceMeshHTTPAnalysis\u0026quot; to ensure that OAP ALS mx-mesh analyzer has been activated.\nSkyWalking ALS Analyzers There are several available analyzers: k8s-mesh, mx-mesh, and persistence. You can specify one or more analyzers to analyze the access logs. When multiple analyzers are specified, it acts as a fast-success mechanism: SkyWalking loops over the analyzers and use them to analyze the logs. Once there is an analyzer that is able to produce a result, it stops the loop.\nk8s-mesh k8s-mesh uses the metadata from Kubernetes clusters, hence in this analyzer, OAP needs access roles to Pod, Service, and Endpoints.\nThe blog illustrates the details of how it works and a step-by-step tutorial to apply it to the bookinfo application.\nmx-mesh mx-mesh uses the Envoy metadata exchange mechanism to get the service name, etc. This analyzer requires Istio to enable the metadata exchange plugin (you can enable it by --set values.telemetry.v2.enabled=true, or if you\u0026rsquo;re using Istio 1.7+ and installing it with profile demo/preview, it should already be enabled).\nThe blog illustrates the details of how it works and a step-by-step tutorial on applying it to the Online Boutique system.\npersistence persistence analyzer adapts the Envoy access log format to SkyWalking\u0026rsquo;s native log format, and forwards the formatted logs to LAL, where you can configure persistent conditions, such as sampler, only persist error logs, etc. SkyWalking provides a default configuration file envoy-als.yaml that you can adjust as per your needs. Please make sure to activate this rule via adding the rule name envoy-als into config item log-analyzer/default/lalFiles (or environment variable SW_LOG_LAL_FILES, e.g. SW_LOG_LAL_FILES=envoy-als).\nAttention: Since the persistence analyzer also needs a mechanism to map the logs into responding services, you need to configure at least one of k8s-mesh or mx-mesh as its antecedent so that persistence analyzer knows which service the logs belong to. For example, you should set envoy-metric/default/alsHTTPAnalysis (or environment variable SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS) to something like k8s-mesh,persistence, mx-mesh,persistence, or mx-mesh,k8s-mesh,persistence.\n","excerpt":"Observe Service Mesh through ALS Envoy Access Log Service (ALS) provides full logs on routed RPC, …","ref":"/docs/main/v9.6.0/en/setup/envoy/als_setting/","title":"Observe Service Mesh through ALS"},{"body":"Observe Service Mesh through ALS Envoy Access Log Service (ALS) provides full logs on routed RPC, including HTTP and TCP.\nBackground The solution was initialized and first implemented by Sheng Wu, Hongtao Gao, Lizan Zhou, and Dhi Aurrahman on May 17, 2019, and was presented at KubeCon China 2019. Here is a video recording of the presentation.\nSkyWalking is the first open-source project that introduced an ALS-based solution to the world. This solution provides a new take on observability with a lightweight payload on the service mesh.\nEnable ALS and SkyWalking Receiver You need the following steps to set up ALS.\n  Enable envoyAccessLogService in ProxyConfig and set the ALS address to where the SkyWalking OAP listens. In Istio version 1.6.0+, if Istio is installed with demo profile, you can enable ALS with this command:\nistioctl manifest apply \\  --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=\u0026lt;skywalking-oap.skywalking.svc:11800\u0026gt; Note: Replace \u0026lt;skywalking-oap.skywalking.svc:11800\u0026gt; with the real address where SkyWalking OAP is deployed.\n  Activate SkyWalking Envoy Receiver. (activated in default)\n  envoy-metric:selector:${SW_ENVOY_METRIC:default}  Choose an ALS analyzer. There are two available analyzers for both HTTP access logs and TCP access logs: k8s-mesh and mx-mesh. Set the system environment variables SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS and SW_ENVOY_METRIC_ALS_TCP_ANALYSIS, such as SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=mx-mesh and SW_ENVOY_METRIC_ALS_TCP_ANALYSIS=mx-mesh, or in application.yaml to activate the analyzers. For more about the analyzers, see SkyWalking ALS Analyzers.\nenvoy-metric:selector:${SW_ENVOY_METRIC:default}default:acceptMetricsService:${SW_ENVOY_METRIC_SERVICE:true}alsHTTPAnalysis:${SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS:\u0026#34;\u0026#34;}# Setting the system env variable would override this.alsTCPAnalysis:${SW_ENVOY_METRIC_ALS_TCP_ANALYSIS:\u0026#34;\u0026#34;}To use multiple analyzers as a fallback, please use , to concatenate.\n  Example Here\u0026rsquo;s an example of installing Istio and deploying SkyWalking by Helm chart.\nistioctl install \\  --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-oap.istio-system:11800 git clone https://github.com/apache/skywalking-helm.git cd skywalking-helm/chart helm repo add elastic https://helm.elastic.co helm dep up skywalking helm install 8.1.0 skywalking -n istio-system \\  --set oap.env.SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=mx-mesh \\  --set oap.env.SW_ENVOY_METRIC_ALS_TCP_ANALYSIS=mx-mesh \\  --set fullnameOverride=skywalking \\  --set oap.envoy.als.enabled=true You can use kubectl -n istio-system logs -l app=skywalking | grep \u0026quot;K8sALSServiceMeshHTTPAnalysis\u0026quot; to ensure that OAP ALS mx-mesh analyzer has been activated.\nSkyWalking ALS Analyzers There are several available analyzers: k8s-mesh, mx-mesh, and persistence. You can specify one or more analyzers to analyze the access logs. When multiple analyzers are specified, it acts as a fast-success mechanism: SkyWalking loops over the analyzers and use them to analyze the logs. Once there is an analyzer that is able to produce a result, it stops the loop.\nk8s-mesh k8s-mesh uses the metadata from Kubernetes clusters, hence in this analyzer, OAP needs access roles to Pod, Service, and Endpoints.\nThe blog illustrates the details of how it works and a step-by-step tutorial to apply it to the bookinfo application.\nmx-mesh mx-mesh uses the Envoy metadata exchange mechanism to get the service name, etc. This analyzer requires Istio to enable the metadata exchange plugin (you can enable it by --set values.telemetry.v2.enabled=true, or if you\u0026rsquo;re using Istio 1.7+ and installing it with profile demo/preview, it should already be enabled).\nThe blog illustrates the details of how it works and a step-by-step tutorial on applying it to the Online Boutique system.\npersistence persistence analyzer adapts the Envoy access log format to SkyWalking\u0026rsquo;s native log format, and forwards the formatted logs to LAL, where you can configure persistent conditions, such as sampler, only persist error logs, etc. SkyWalking provides a default configuration file envoy-als.yaml that you can adjust as per your needs. Please make sure to activate this rule via adding the rule name envoy-als into config item log-analyzer/default/lalFiles (or environment variable SW_LOG_LAL_FILES, e.g. SW_LOG_LAL_FILES=envoy-als).\nAttention: Since the persistence analyzer also needs a mechanism to map the logs into responding services, you need to configure at least one of k8s-mesh or mx-mesh as its antecedent so that persistence analyzer knows which service the logs belong to. For example, you should set envoy-metric/default/alsHTTPAnalysis (or environment variable SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS) to something like k8s-mesh,persistence, mx-mesh,persistence, or mx-mesh,k8s-mesh,persistence.\n","excerpt":"Observe Service Mesh through ALS Envoy Access Log Service (ALS) provides full logs on routed RPC, …","ref":"/docs/main/v9.7.0/en/setup/envoy/als_setting/","title":"Observe Service Mesh through ALS"},{"body":"Observe Service Mesh through Zipkin traces Istio has built-in support to generate Zipkin traces from Envoy proxy sidecar, and SkyWalking can serve as a Zipkin server to collect and provide query APIs for these traces, you can deploy SkyWalking to replace Zipkin server in Istio, and point the Zipkin address to SkyWalking. SkyWalking also embeds Zipkin Lens UI as part of SkyWalking UI, you can use it to query Zipkin traces.\nEnable Zipkin Traces Receiver SkyWalking has built-in Zipkin receiver, you can enable it by setting receiver-zipkin to default in application.yml, or by setting environment variable SW_RECEIVER_ZIPKIN=default before starting OAP server:\nreceiver-zipkin:selector:${SW_RECEIVER_ZIPKIN:default}default:# Other configurations...After enabling the Zipkin receiver, SkyWalking listens on port 9411 for Zipkin traces, you can just change the Zipkin server address to SkyWalking\u0026rsquo;s address with 9411 as the port.\nEnable Zipkin Traces Query Module If you want to query Zipkin traces from SkyWalking, you need to enable the Zipkin traces query module by setting query-zipkin to default in application.yml, or by setting environment variable SW_QUERY_ZIPKIN=default before starting OAP server:\nquery-zipkin:selector:${SW_QUERY_ZIPKIN:default}default:# Other configurationsAfter enabling Zipkin query module, SkyWalking listens on port 9412 for Zipkin query APIs, you can also query the Zipkin traces from SkyWalking UI, menu Service Mesh --\u0026gt; Services --\u0026gt; Zipkin Trace.\nSet Up Zipkin Traces in Istio When installing Istio, you can enable Zipkin tracing and point it to SkyWalking by setting\nistioctl install -y --set profile=demo \\ \t--set meshConfig.defaultConfig.tracing.sampling=100 \\ \t--set meshConfig.defaultConfig.tracing.zipkin.address=oap.istio-system.svc.cluster.local:9411 \\ \t--set meshConfig.enableTracing=true so that Istio proxy (Envoy) can generate traces and sent them to SkyWalking.\nFor more details about Zipkin on Istio, refer to the Istio doc.\n","excerpt":"Observe Service Mesh through Zipkin traces Istio has built-in support to generate Zipkin traces from …","ref":"/docs/main/latest/en/setup/zipkin/tracing/","title":"Observe Service Mesh through Zipkin traces"},{"body":"Observe Service Mesh through Zipkin traces Istio has built-in support to generate Zipkin traces from Envoy proxy sidecar, and SkyWalking can serve as a Zipkin server to collect and provide query APIs for these traces, you can deploy SkyWalking to replace Zipkin server in Istio, and point the Zipkin address to SkyWalking. SkyWalking also embeds Zipkin Lens UI as part of SkyWalking UI, you can use it to query Zipkin traces.\nEnable Zipkin Traces Receiver SkyWalking has built-in Zipkin receiver, you can enable it by setting receiver-zipkin to default in application.yml, or by setting environment variable SW_RECEIVER_ZIPKIN=default before starting OAP server:\nreceiver-zipkin:selector:${SW_RECEIVER_ZIPKIN:default}default:# Other configurations...After enabling the Zipkin receiver, SkyWalking listens on port 9411 for Zipkin traces, you can just change the Zipkin server address to SkyWalking\u0026rsquo;s address with 9411 as the port.\nEnable Zipkin Traces Query Module If you want to query Zipkin traces from SkyWalking, you need to enable the Zipkin traces query module by setting query-zipkin to default in application.yml, or by setting environment variable SW_QUERY_ZIPKIN=default before starting OAP server:\nquery-zipkin:selector:${SW_QUERY_ZIPKIN:default}default:# Other configurationsAfter enabling Zipkin query module, SkyWalking listens on port 9412 for Zipkin query APIs, you can also query the Zipkin traces from SkyWalking UI, menu Service Mesh --\u0026gt; Services --\u0026gt; Zipkin Trace.\nSet Up Zipkin Traces in Istio When installing Istio, you can enable Zipkin tracing and point it to SkyWalking by setting\nistioctl install -y --set profile=demo \\ \t--set meshConfig.defaultConfig.tracing.sampling=100 \\ \t--set meshConfig.defaultConfig.tracing.zipkin.address=oap.istio-system.svc.cluster.local:9411 \\ \t--set meshConfig.enableTracing=true so that Istio proxy (Envoy) can generate traces and sent them to SkyWalking.\nFor more details about Zipkin on Istio, refer to the Istio doc.\n","excerpt":"Observe Service Mesh through Zipkin traces Istio has built-in support to generate Zipkin traces from …","ref":"/docs/main/next/en/setup/zipkin/tracing/","title":"Observe Service Mesh through Zipkin traces"},{"body":"Observe Service Mesh through Zipkin traces Istio has built-in support to generate Zipkin traces from Envoy proxy sidecar, and SkyWalking can serve as a Zipkin server to collect and provide query APIs for these traces, you can deploy SkyWalking to replace Zipkin server in Istio, and point the Zipkin address to SkyWalking. SkyWalking also embeds Zipkin Lens UI as part of SkyWalking UI, you can use it to query Zipkin traces.\nEnable Zipkin Traces Receiver SkyWalking has built-in Zipkin receiver, you can enable it by setting receiver-zipkin to default in application.yml, or by setting environment variable SW_RECEIVER_ZIPKIN=default before starting OAP server:\nreceiver-zipkin:selector:${SW_RECEIVER_ZIPKIN:default}default:# Other configurations...After enabling the Zipkin receiver, SkyWalking listens on port 9411 for Zipkin traces, you can just change the Zipkin server address to SkyWalking\u0026rsquo;s address with 9411 as the port.\nEnable Zipkin Traces Query Module If you want to query Zipkin traces from SkyWalking, you need to enable the Zipkin traces query module by setting query-zipkin to default in application.yml, or by setting environment variable SW_QUERY_ZIPKIN=default before starting OAP server:\nquery-zipkin:selector:${SW_QUERY_ZIPKIN:default}default:# Other configurationsAfter enabling Zipkin query module, SkyWalking listens on port 9412 for Zipkin query APIs, you can also query the Zipkin traces from SkyWalking UI, menu Service Mesh --\u0026gt; Services --\u0026gt; Zipkin Trace.\nSet Up Zipkin Traces in Istio When installing Istio, you can enable Zipkin tracing and point it to SkyWalking by setting\nistioctl install -y --set profile=demo \\ \t--set meshConfig.defaultConfig.tracing.sampling=100 \\ \t--set meshConfig.defaultConfig.tracing.zipkin.address=oap.istio-system.svc.cluster.local:9411 \\ \t--set meshConfig.enableTracing=true so that Istio proxy (Envoy) can generate traces and sent them to SkyWalking.\nFor more details about Zipkin on Istio, refer to the Istio doc.\n","excerpt":"Observe Service Mesh through Zipkin traces Istio has built-in support to generate Zipkin traces from …","ref":"/docs/main/v9.4.0/en/setup/zipkin/tracing/","title":"Observe Service Mesh through Zipkin traces"},{"body":"Observe Service Mesh through Zipkin traces Istio has built-in support to generate Zipkin traces from Envoy proxy sidecar, and SkyWalking can serve as a Zipkin server to collect and provide query APIs for these traces, you can deploy SkyWalking to replace Zipkin server in Istio, and point the Zipkin address to SkyWalking. SkyWalking also embeds Zipkin Lens UI as part of SkyWalking UI, you can use it to query Zipkin traces.\nEnable Zipkin Traces Receiver SkyWalking has built-in Zipkin receiver, you can enable it by setting receiver-zipkin to default in application.yml, or by setting environment variable SW_RECEIVER_ZIPKIN=default before starting OAP server:\nreceiver-zipkin:selector:${SW_RECEIVER_ZIPKIN:default}default:# Other configurations...After enabling the Zipkin receiver, SkyWalking listens on port 9411 for Zipkin traces, you can just change the Zipkin server address to SkyWalking\u0026rsquo;s address with 9411 as the port.\nEnable Zipkin Traces Query Module If you want to query Zipkin traces from SkyWalking, you need to enable the Zipkin traces query module by setting query-zipkin to default in application.yml, or by setting environment variable SW_QUERY_ZIPKIN=default before starting OAP server:\nquery-zipkin:selector:${SW_QUERY_ZIPKIN:default}default:# Other configurationsAfter enabling Zipkin query module, SkyWalking listens on port 9412 for Zipkin query APIs, you can also query the Zipkin traces from SkyWalking UI, menu Service Mesh --\u0026gt; Services --\u0026gt; Zipkin Trace.\nSet Up Zipkin Traces in Istio When installing Istio, you can enable Zipkin tracing and point it to SkyWalking by setting\nistioctl install -y --set profile=demo \\ \t--set meshConfig.defaultConfig.tracing.sampling=100 \\ \t--set meshConfig.defaultConfig.tracing.zipkin.address=oap.istio-system.svc.cluster.local:9411 \\ \t--set meshConfig.enableTracing=true so that Istio proxy (Envoy) can generate traces and sent them to SkyWalking.\nFor more details about Zipkin on Istio, refer to the Istio doc.\n","excerpt":"Observe Service Mesh through Zipkin traces Istio has built-in support to generate Zipkin traces from …","ref":"/docs/main/v9.5.0/en/setup/zipkin/tracing/","title":"Observe Service Mesh through Zipkin traces"},{"body":"Observe Service Mesh through Zipkin traces Istio has built-in support to generate Zipkin traces from Envoy proxy sidecar, and SkyWalking can serve as a Zipkin server to collect and provide query APIs for these traces, you can deploy SkyWalking to replace Zipkin server in Istio, and point the Zipkin address to SkyWalking. SkyWalking also embeds Zipkin Lens UI as part of SkyWalking UI, you can use it to query Zipkin traces.\nEnable Zipkin Traces Receiver SkyWalking has built-in Zipkin receiver, you can enable it by setting receiver-zipkin to default in application.yml, or by setting environment variable SW_RECEIVER_ZIPKIN=default before starting OAP server:\nreceiver-zipkin:selector:${SW_RECEIVER_ZIPKIN:default}default:# Other configurations...After enabling the Zipkin receiver, SkyWalking listens on port 9411 for Zipkin traces, you can just change the Zipkin server address to SkyWalking\u0026rsquo;s address with 9411 as the port.\nEnable Zipkin Traces Query Module If you want to query Zipkin traces from SkyWalking, you need to enable the Zipkin traces query module by setting query-zipkin to default in application.yml, or by setting environment variable SW_QUERY_ZIPKIN=default before starting OAP server:\nquery-zipkin:selector:${SW_QUERY_ZIPKIN:default}default:# Other configurationsAfter enabling Zipkin query module, SkyWalking listens on port 9412 for Zipkin query APIs, you can also query the Zipkin traces from SkyWalking UI, menu Service Mesh --\u0026gt; Services --\u0026gt; Zipkin Trace.\nSet Up Zipkin Traces in Istio When installing Istio, you can enable Zipkin tracing and point it to SkyWalking by setting\nistioctl install -y --set profile=demo \\ \t--set meshConfig.defaultConfig.tracing.sampling=100 \\ \t--set meshConfig.defaultConfig.tracing.zipkin.address=oap.istio-system.svc.cluster.local:9411 \\ \t--set meshConfig.enableTracing=true so that Istio proxy (Envoy) can generate traces and sent them to SkyWalking.\nFor more details about Zipkin on Istio, refer to the Istio doc.\n","excerpt":"Observe Service Mesh through Zipkin traces Istio has built-in support to generate Zipkin traces from …","ref":"/docs/main/v9.6.0/en/setup/zipkin/tracing/","title":"Observe Service Mesh through Zipkin traces"},{"body":"Observe Service Mesh through Zipkin traces Istio has built-in support to generate Zipkin traces from Envoy proxy sidecar, and SkyWalking can serve as a Zipkin server to collect and provide query APIs for these traces, you can deploy SkyWalking to replace Zipkin server in Istio, and point the Zipkin address to SkyWalking. SkyWalking also embeds Zipkin Lens UI as part of SkyWalking UI, you can use it to query Zipkin traces.\nEnable Zipkin Traces Receiver SkyWalking has built-in Zipkin receiver, you can enable it by setting receiver-zipkin to default in application.yml, or by setting environment variable SW_RECEIVER_ZIPKIN=default before starting OAP server:\nreceiver-zipkin:selector:${SW_RECEIVER_ZIPKIN:default}default:# Other configurations...After enabling the Zipkin receiver, SkyWalking listens on port 9411 for Zipkin traces, you can just change the Zipkin server address to SkyWalking\u0026rsquo;s address with 9411 as the port.\nEnable Zipkin Traces Query Module If you want to query Zipkin traces from SkyWalking, you need to enable the Zipkin traces query module by setting query-zipkin to default in application.yml, or by setting environment variable SW_QUERY_ZIPKIN=default before starting OAP server:\nquery-zipkin:selector:${SW_QUERY_ZIPKIN:default}default:# Other configurationsAfter enabling Zipkin query module, SkyWalking listens on port 9412 for Zipkin query APIs, you can also query the Zipkin traces from SkyWalking UI, menu Service Mesh --\u0026gt; Services --\u0026gt; Zipkin Trace.\nSet Up Zipkin Traces in Istio When installing Istio, you can enable Zipkin tracing and point it to SkyWalking by setting\nistioctl install -y --set profile=demo \\ \t--set meshConfig.defaultConfig.tracing.sampling=100 \\ \t--set meshConfig.defaultConfig.tracing.zipkin.address=oap.istio-system.svc.cluster.local:9411 \\ \t--set meshConfig.enableTracing=true so that Istio proxy (Envoy) can generate traces and sent them to SkyWalking.\nFor more details about Zipkin on Istio, refer to the Istio doc.\n","excerpt":"Observe Service Mesh through Zipkin traces Istio has built-in support to generate Zipkin traces from …","ref":"/docs/main/v9.7.0/en/setup/zipkin/tracing/","title":"Observe Service Mesh through Zipkin traces"},{"body":"Official OAL script First, read the OAL introduction to learn the OAL script grammar and the source concept.\nFrom 8.0.0, you may find the OAL script at /config/oal/*.oal of the SkyWalking dist. You could change it, such as by adding filter conditions or new metrics. Then, reboot the OAP server, and it will come into effect.\nAll metrics named in this script could be used in alarm and UI query.\nExtension Logic Endpoint In default, SkyWalking only treats the operation name of entry span as the endpoint, which are used in the OAL engine. Users could declare their custom endpoint names by adding the logic endpoint tag manually through agent\u0026rsquo;s plugins or manual APIs.\nThe logic endpoint is a concept that doesn\u0026rsquo;t represent a real RPC call, but requires the statistic. The value of x-le should be in JSON format. There are two options:\n Define a new logic endpoint in the entry span as a separate new endpoint. Provide its own endpoint name, latency and status. Suitable for entry and local span.  { \u0026#34;name\u0026#34;: \u0026#34;GraphQL-service\u0026#34;, \u0026#34;latency\u0026#34;: 100, \u0026#34;status\u0026#34;: true } Declare the current local span representing a logic endpoint.  { \u0026#34;logic-span\u0026#34;: true } References  Java plugin API guides users to write plugins with logic endpoint. Java agent\u0026rsquo;s plugins include native included logic endpoints, also it provides ways to set the tag of logic span. The document could be found here.  ","excerpt":"Official OAL script First, read the OAL introduction to learn the OAL script grammar and the source …","ref":"/docs/main/latest/en/guides/backend-oal-scripts/","title":"Official OAL script"},{"body":"Official OAL script First, read the OAL introduction to learn the OAL script grammar and the source concept.\nFrom 8.0.0, you may find the OAL script at /config/oal/*.oal of the SkyWalking dist. You could change it, such as by adding filter conditions or new metrics. Then, reboot the OAP server, and it will come into effect.\nAll metrics named in this script could be used in alarm and UI query.\nExtension Logic Endpoint In default, SkyWalking only treats the operation name of entry span as the endpoint, which are used in the OAL engine. Users could declare their custom endpoint names by adding the logic endpoint tag manually through agent\u0026rsquo;s plugins or manual APIs.\nThe logic endpoint is a concept that doesn\u0026rsquo;t represent a real RPC call, but requires the statistic. The value of x-le should be in JSON format. There are two options:\n Define a new logic endpoint in the entry span as a separate new endpoint. Provide its own endpoint name, latency and status. Suitable for entry and local span.  { \u0026#34;name\u0026#34;: \u0026#34;GraphQL-service\u0026#34;, \u0026#34;latency\u0026#34;: 100, \u0026#34;status\u0026#34;: true } Declare the current local span representing a logic endpoint.  { \u0026#34;logic-span\u0026#34;: true } References  Java plugin API guides users to write plugins with logic endpoint. Java agent\u0026rsquo;s plugins include native included logic endpoints, also it provides ways to set the tag of logic span. The document could be found here.  ","excerpt":"Official OAL script First, read the OAL introduction to learn the OAL script grammar and the source …","ref":"/docs/main/next/en/guides/backend-oal-scripts/","title":"Official OAL script"},{"body":"Official OAL script First, read the OAL introduction to learn the OAL script grammar and the source concept.\nFrom 8.0.0, you may find the OAL script at /config/oal/*.oal of the SkyWalking dist. You could change it, such as by adding filter conditions or new metrics. Then, reboot the OAP server, and it will come into effect.\nAll metrics named in this script could be used in alarm and UI query.\nExtension Logic Endpoint In default, SkyWalking only treats the operation name of entry span as the endpoint, which are used in the OAL engine. Users could declare their custom endpoint names by adding the logic endpoint tag manually through agent\u0026rsquo;s plugins or manual APIs.\nThe logic endpoint is a concept that doesn\u0026rsquo;t represent a real RPC call, but requires the statistic. The value of x-le should be in JSON format. There are two options:\n Define a new logic endpoint in the entry span as a separate new endpoint. Provide its own endpoint name, latency and status. Suitable for entry and local span.  { \u0026#34;name\u0026#34;: \u0026#34;GraphQL-service\u0026#34;, \u0026#34;latency\u0026#34;: 100, \u0026#34;status\u0026#34;: true } Declare the current local span representing a logic endpoint.  { \u0026#34;logic-span\u0026#34;: true } References  Java plugin API guides users to write plugins with logic endpoint. Java agent\u0026rsquo;s plugins include native included logic endpoints, also it provides ways to set the tag of logic span. The document could be found here.  ","excerpt":"Official OAL script First, read the OAL introduction to learn the OAL script grammar and the source …","ref":"/docs/main/v9.0.0/en/guides/backend-oal-scripts/","title":"Official OAL script"},{"body":"Official OAL script First, read the OAL introduction to learn the OAL script grammar and the source concept.\nFrom 8.0.0, you may find the OAL script at /config/oal/*.oal of the SkyWalking dist. You could change it, such as by adding filter conditions or new metrics. Then, reboot the OAP server, and it will come into effect.\nAll metrics named in this script could be used in alarm and UI query.\nExtension Logic Endpoint In default, SkyWalking only treats the operation name of entry span as the endpoint, which are used in the OAL engine. Users could declare their custom endpoint names by adding the logic endpoint tag manually through agent\u0026rsquo;s plugins or manual APIs.\nThe logic endpoint is a concept that doesn\u0026rsquo;t represent a real RPC call, but requires the statistic. The value of x-le should be in JSON format. There are two options:\n Define a new logic endpoint in the entry span as a separate new endpoint. Provide its own endpoint name, latency and status. Suitable for entry and local span.  { \u0026#34;name\u0026#34;: \u0026#34;GraphQL-service\u0026#34;, \u0026#34;latency\u0026#34;: 100, \u0026#34;status\u0026#34;: true } Declare the current local span representing a logic endpoint.  { \u0026#34;logic-span\u0026#34;: true } References  Java plugin API guides users to write plugins with logic endpoint. Java agent\u0026rsquo;s plugins include native included logic endpoints, also it provides ways to set the tag of logic span. The document could be found here.  ","excerpt":"Official OAL script First, read the OAL introduction to learn the OAL script grammar and the source …","ref":"/docs/main/v9.1.0/en/guides/backend-oal-scripts/","title":"Official OAL script"},{"body":"Official OAL script First, read the OAL introduction to learn the OAL script grammar and the source concept.\nFrom 8.0.0, you may find the OAL script at /config/oal/*.oal of the SkyWalking dist. You could change it, such as by adding filter conditions or new metrics. Then, reboot the OAP server, and it will come into effect.\nAll metrics named in this script could be used in alarm and UI query.\nExtension Logic Endpoint In default, SkyWalking only treats the operation name of entry span as the endpoint, which are used in the OAL engine. Users could declare their custom endpoint names by adding the logic endpoint tag manually through agent\u0026rsquo;s plugins or manual APIs.\nThe logic endpoint is a concept that doesn\u0026rsquo;t represent a real RPC call, but requires the statistic. The value of x-le should be in JSON format. There are two options:\n Define a new logic endpoint in the entry span as a separate new endpoint. Provide its own endpoint name, latency and status. Suitable for entry and local span.  { \u0026#34;name\u0026#34;: \u0026#34;GraphQL-service\u0026#34;, \u0026#34;latency\u0026#34;: 100, \u0026#34;status\u0026#34;: true } Declare the current local span representing a logic endpoint.  { \u0026#34;logic-span\u0026#34;: true } References  Java plugin API guides users to write plugins with logic endpoint. Java agent\u0026rsquo;s plugins include native included logic endpoints, also it provides ways to set the tag of logic span. The document could be found here.  ","excerpt":"Official OAL script First, read the OAL introduction to learn the OAL script grammar and the source …","ref":"/docs/main/v9.2.0/en/guides/backend-oal-scripts/","title":"Official OAL script"},{"body":"Official OAL script First, read the OAL introduction to learn the OAL script grammar and the source concept.\nFrom 8.0.0, you may find the OAL script at /config/oal/*.oal of the SkyWalking dist. You could change it, such as by adding filter conditions or new metrics. Then, reboot the OAP server, and it will come into effect.\nAll metrics named in this script could be used in alarm and UI query.\nExtension Logic Endpoint In default, SkyWalking only treats the operation name of entry span as the endpoint, which are used in the OAL engine. Users could declare their custom endpoint names by adding the logic endpoint tag manually through agent\u0026rsquo;s plugins or manual APIs.\nThe logic endpoint is a concept that doesn\u0026rsquo;t represent a real RPC call, but requires the statistic. The value of x-le should be in JSON format. There are two options:\n Define a new logic endpoint in the entry span as a separate new endpoint. Provide its own endpoint name, latency and status. Suitable for entry and local span.  { \u0026#34;name\u0026#34;: \u0026#34;GraphQL-service\u0026#34;, \u0026#34;latency\u0026#34;: 100, \u0026#34;status\u0026#34;: true } Declare the current local span representing a logic endpoint.  { \u0026#34;logic-span\u0026#34;: true } References  Java plugin API guides users to write plugins with logic endpoint. Java agent\u0026rsquo;s plugins include native included logic endpoints, also it provides ways to set the tag of logic span. The document could be found here.  ","excerpt":"Official OAL script First, read the OAL introduction to learn the OAL script grammar and the source …","ref":"/docs/main/v9.3.0/en/guides/backend-oal-scripts/","title":"Official OAL script"},{"body":"Official OAL script First, read the OAL introduction to learn the OAL script grammar and the source concept.\nFrom 8.0.0, you may find the OAL script at /config/oal/*.oal of the SkyWalking dist. You could change it, such as by adding filter conditions or new metrics. Then, reboot the OAP server, and it will come into effect.\nAll metrics named in this script could be used in alarm and UI query.\nExtension Logic Endpoint In default, SkyWalking only treats the operation name of entry span as the endpoint, which are used in the OAL engine. Users could declare their custom endpoint names by adding the logic endpoint tag manually through agent\u0026rsquo;s plugins or manual APIs.\nThe logic endpoint is a concept that doesn\u0026rsquo;t represent a real RPC call, but requires the statistic. The value of x-le should be in JSON format. There are two options:\n Define a new logic endpoint in the entry span as a separate new endpoint. Provide its own endpoint name, latency and status. Suitable for entry and local span.  { \u0026#34;name\u0026#34;: \u0026#34;GraphQL-service\u0026#34;, \u0026#34;latency\u0026#34;: 100, \u0026#34;status\u0026#34;: true } Declare the current local span representing a logic endpoint.  { \u0026#34;logic-span\u0026#34;: true } References  Java plugin API guides users to write plugins with logic endpoint. Java agent\u0026rsquo;s plugins include native included logic endpoints, also it provides ways to set the tag of logic span. The document could be found here.  ","excerpt":"Official OAL script First, read the OAL introduction to learn the OAL script grammar and the source …","ref":"/docs/main/v9.4.0/en/guides/backend-oal-scripts/","title":"Official OAL script"},{"body":"Official OAL script First, read the OAL introduction to learn the OAL script grammar and the source concept.\nFrom 8.0.0, you may find the OAL script at /config/oal/*.oal of the SkyWalking dist. You could change it, such as by adding filter conditions or new metrics. Then, reboot the OAP server, and it will come into effect.\nAll metrics named in this script could be used in alarm and UI query.\nExtension Logic Endpoint In default, SkyWalking only treats the operation name of entry span as the endpoint, which are used in the OAL engine. Users could declare their custom endpoint names by adding the logic endpoint tag manually through agent\u0026rsquo;s plugins or manual APIs.\nThe logic endpoint is a concept that doesn\u0026rsquo;t represent a real RPC call, but requires the statistic. The value of x-le should be in JSON format. There are two options:\n Define a new logic endpoint in the entry span as a separate new endpoint. Provide its own endpoint name, latency and status. Suitable for entry and local span.  { \u0026#34;name\u0026#34;: \u0026#34;GraphQL-service\u0026#34;, \u0026#34;latency\u0026#34;: 100, \u0026#34;status\u0026#34;: true } Declare the current local span representing a logic endpoint.  { \u0026#34;logic-span\u0026#34;: true } References  Java plugin API guides users to write plugins with logic endpoint. Java agent\u0026rsquo;s plugins include native included logic endpoints, also it provides ways to set the tag of logic span. The document could be found here.  ","excerpt":"Official OAL script First, read the OAL introduction to learn the OAL script grammar and the source …","ref":"/docs/main/v9.5.0/en/guides/backend-oal-scripts/","title":"Official OAL script"},{"body":"Official OAL script First, read the OAL introduction to learn the OAL script grammar and the source concept.\nFrom 8.0.0, you may find the OAL script at /config/oal/*.oal of the SkyWalking dist. You could change it, such as by adding filter conditions or new metrics. Then, reboot the OAP server, and it will come into effect.\nAll metrics named in this script could be used in alarm and UI query.\nExtension Logic Endpoint In default, SkyWalking only treats the operation name of entry span as the endpoint, which are used in the OAL engine. Users could declare their custom endpoint names by adding the logic endpoint tag manually through agent\u0026rsquo;s plugins or manual APIs.\nThe logic endpoint is a concept that doesn\u0026rsquo;t represent a real RPC call, but requires the statistic. The value of x-le should be in JSON format. There are two options:\n Define a new logic endpoint in the entry span as a separate new endpoint. Provide its own endpoint name, latency and status. Suitable for entry and local span.  { \u0026#34;name\u0026#34;: \u0026#34;GraphQL-service\u0026#34;, \u0026#34;latency\u0026#34;: 100, \u0026#34;status\u0026#34;: true } Declare the current local span representing a logic endpoint.  { \u0026#34;logic-span\u0026#34;: true } References  Java plugin API guides users to write plugins with logic endpoint. Java agent\u0026rsquo;s plugins include native included logic endpoints, also it provides ways to set the tag of logic span. The document could be found here.  ","excerpt":"Official OAL script First, read the OAL introduction to learn the OAL script grammar and the source …","ref":"/docs/main/v9.6.0/en/guides/backend-oal-scripts/","title":"Official OAL script"},{"body":"Official OAL script First, read the OAL introduction to learn the OAL script grammar and the source concept.\nFrom 8.0.0, you may find the OAL script at /config/oal/*.oal of the SkyWalking dist. You could change it, such as by adding filter conditions or new metrics. Then, reboot the OAP server, and it will come into effect.\nAll metrics named in this script could be used in alarm and UI query.\nExtension Logic Endpoint In default, SkyWalking only treats the operation name of entry span as the endpoint, which are used in the OAL engine. Users could declare their custom endpoint names by adding the logic endpoint tag manually through agent\u0026rsquo;s plugins or manual APIs.\nThe logic endpoint is a concept that doesn\u0026rsquo;t represent a real RPC call, but requires the statistic. The value of x-le should be in JSON format. There are two options:\n Define a new logic endpoint in the entry span as a separate new endpoint. Provide its own endpoint name, latency and status. Suitable for entry and local span.  { \u0026#34;name\u0026#34;: \u0026#34;GraphQL-service\u0026#34;, \u0026#34;latency\u0026#34;: 100, \u0026#34;status\u0026#34;: true } Declare the current local span representing a logic endpoint.  { \u0026#34;logic-span\u0026#34;: true } References  Java plugin API guides users to write plugins with logic endpoint. Java agent\u0026rsquo;s plugins include native included logic endpoints, also it provides ways to set the tag of logic span. The document could be found here.  ","excerpt":"Official OAL script First, read the OAL introduction to learn the OAL script grammar and the source …","ref":"/docs/main/v9.7.0/en/guides/backend-oal-scripts/","title":"Official OAL script"},{"body":"On Demand Pod Logs This feature is to fetch the Pod logs on users' demand, the logs are fetched and displayed in real time, and are not persisted in any kind. This is helpful when users want to do some experiments and monitor the logs and see what\u0026rsquo;s happing inside the service.\nNote: if you print secrets in the logs, they are also visible to the UI, so for the sake of security, this feature is disabled by default, please read the configuration documentation to enable this feature manually.\nHow it works As the name indicates, this feature only works for Kubernetes Pods.\nSkyWalking OAP collects and saves the service instance\u0026rsquo;s namespace and Pod name in the service instance\u0026rsquo;s properties, named namespace and pod, users can select the same and UI should fetch the logs by service instance in a given interval and display the logs in UI, OAP receives the query and checks the instance\u0026rsquo;s properties and use the namespace and pod to locate the Pod and query the logs.\nIf you want to register a service instance that has on demand logs available, you should add namespace and pod in the service instance properties, so that you can query the real time logs from that Pod.\nThat said, in order to make this feature work properly, you should in advance configure the cluster role for OAP to list/get namespaces, services, pods and pods/log.\n","excerpt":"On Demand Pod Logs This feature is to fetch the Pod logs on users' demand, the logs are fetched and …","ref":"/docs/main/latest/en/setup/backend/on-demand-pod-log/","title":"On Demand Pod Logs"},{"body":"On Demand Pod Logs This feature is to fetch the Pod logs on users' demand, the logs are fetched and displayed in real time, and are not persisted in any kind. This is helpful when users want to do some experiments and monitor the logs and see what\u0026rsquo;s happing inside the service.\nNote: if you print secrets in the logs, they are also visible to the UI, so for the sake of security, this feature is disabled by default, please read the configuration documentation to enable this feature manually.\nHow it works As the name indicates, this feature only works for Kubernetes Pods.\nSkyWalking OAP collects and saves the service instance\u0026rsquo;s namespace and Pod name in the service instance\u0026rsquo;s properties, named namespace and pod, users can select the same and UI should fetch the logs by service instance in a given interval and display the logs in UI, OAP receives the query and checks the instance\u0026rsquo;s properties and use the namespace and pod to locate the Pod and query the logs.\nIf you want to register a service instance that has on demand logs available, you should add namespace and pod in the service instance properties, so that you can query the real time logs from that Pod.\nThat said, in order to make this feature work properly, you should in advance configure the cluster role for OAP to list/get namespaces, services, pods and pods/log.\n","excerpt":"On Demand Pod Logs This feature is to fetch the Pod logs on users' demand, the logs are fetched and …","ref":"/docs/main/next/en/setup/backend/on-demand-pod-log/","title":"On Demand Pod Logs"},{"body":"On Demand Pod Logs This feature is to fetch the Pod logs on users' demand, the logs are fetched and displayed in real time, and are not persisted in any kind. This is helpful when users want to do some experiments and monitor the logs and see what\u0026rsquo;s happing inside the service.\nNote: if you print secrets in the logs, they are also visible to the UI, so for the sake of security, this feature is disabled by default, please read the configuration documentation to enable this feature manually.\nHow it works As the name indicates, this feature only works for Kubernetes Pods.\nSkyWalking OAP collects and saves the service instance\u0026rsquo;s namespace and Pod name in ther serivce instance\u0026rsquo;s properties, named namespace and pod, users can select the same and UI should fetch the logs by service instance in a given interval and display the logs in UI, OAP receives the query and checks the instance\u0026rsquo;s properties and use the namespace and pod to locate the Pod and query the logs.\nIf you want to register a service instance that has on demand logs available, you should add namespace and pod in the service instance properties, so that you can query the real time logs from that Pod.\nThat said, in order to make this feature work properly, you should in advance configure the cluster role for OAP to list/get namespaces, services, pods and pods/log.\n","excerpt":"On Demand Pod Logs This feature is to fetch the Pod logs on users' demand, the logs are fetched and …","ref":"/docs/main/v9.1.0/en/setup/backend/on-demand-pod-log/","title":"On Demand Pod Logs"},{"body":"On Demand Pod Logs This feature is to fetch the Pod logs on users' demand, the logs are fetched and displayed in real time, and are not persisted in any kind. This is helpful when users want to do some experiments and monitor the logs and see what\u0026rsquo;s happing inside the service.\nNote: if you print secrets in the logs, they are also visible to the UI, so for the sake of security, this feature is disabled by default, please read the configuration documentation to enable this feature manually.\nHow it works As the name indicates, this feature only works for Kubernetes Pods.\nSkyWalking OAP collects and saves the service instance\u0026rsquo;s namespace and Pod name in the service instance\u0026rsquo;s properties, named namespace and pod, users can select the same and UI should fetch the logs by service instance in a given interval and display the logs in UI, OAP receives the query and checks the instance\u0026rsquo;s properties and use the namespace and pod to locate the Pod and query the logs.\nIf you want to register a service instance that has on demand logs available, you should add namespace and pod in the service instance properties, so that you can query the real time logs from that Pod.\nThat said, in order to make this feature work properly, you should in advance configure the cluster role for OAP to list/get namespaces, services, pods and pods/log.\n","excerpt":"On Demand Pod Logs This feature is to fetch the Pod logs on users' demand, the logs are fetched and …","ref":"/docs/main/v9.2.0/en/setup/backend/on-demand-pod-log/","title":"On Demand Pod Logs"},{"body":"On Demand Pod Logs This feature is to fetch the Pod logs on users' demand, the logs are fetched and displayed in real time, and are not persisted in any kind. This is helpful when users want to do some experiments and monitor the logs and see what\u0026rsquo;s happing inside the service.\nNote: if you print secrets in the logs, they are also visible to the UI, so for the sake of security, this feature is disabled by default, please read the configuration documentation to enable this feature manually.\nHow it works As the name indicates, this feature only works for Kubernetes Pods.\nSkyWalking OAP collects and saves the service instance\u0026rsquo;s namespace and Pod name in the service instance\u0026rsquo;s properties, named namespace and pod, users can select the same and UI should fetch the logs by service instance in a given interval and display the logs in UI, OAP receives the query and checks the instance\u0026rsquo;s properties and use the namespace and pod to locate the Pod and query the logs.\nIf you want to register a service instance that has on demand logs available, you should add namespace and pod in the service instance properties, so that you can query the real time logs from that Pod.\nThat said, in order to make this feature work properly, you should in advance configure the cluster role for OAP to list/get namespaces, services, pods and pods/log.\n","excerpt":"On Demand Pod Logs This feature is to fetch the Pod logs on users' demand, the logs are fetched and …","ref":"/docs/main/v9.3.0/en/setup/backend/on-demand-pod-log/","title":"On Demand Pod Logs"},{"body":"On Demand Pod Logs This feature is to fetch the Pod logs on users' demand, the logs are fetched and displayed in real time, and are not persisted in any kind. This is helpful when users want to do some experiments and monitor the logs and see what\u0026rsquo;s happing inside the service.\nNote: if you print secrets in the logs, they are also visible to the UI, so for the sake of security, this feature is disabled by default, please read the configuration documentation to enable this feature manually.\nHow it works As the name indicates, this feature only works for Kubernetes Pods.\nSkyWalking OAP collects and saves the service instance\u0026rsquo;s namespace and Pod name in the service instance\u0026rsquo;s properties, named namespace and pod, users can select the same and UI should fetch the logs by service instance in a given interval and display the logs in UI, OAP receives the query and checks the instance\u0026rsquo;s properties and use the namespace and pod to locate the Pod and query the logs.\nIf you want to register a service instance that has on demand logs available, you should add namespace and pod in the service instance properties, so that you can query the real time logs from that Pod.\nThat said, in order to make this feature work properly, you should in advance configure the cluster role for OAP to list/get namespaces, services, pods and pods/log.\n","excerpt":"On Demand Pod Logs This feature is to fetch the Pod logs on users' demand, the logs are fetched and …","ref":"/docs/main/v9.4.0/en/setup/backend/on-demand-pod-log/","title":"On Demand Pod Logs"},{"body":"On Demand Pod Logs This feature is to fetch the Pod logs on users' demand, the logs are fetched and displayed in real time, and are not persisted in any kind. This is helpful when users want to do some experiments and monitor the logs and see what\u0026rsquo;s happing inside the service.\nNote: if you print secrets in the logs, they are also visible to the UI, so for the sake of security, this feature is disabled by default, please read the configuration documentation to enable this feature manually.\nHow it works As the name indicates, this feature only works for Kubernetes Pods.\nSkyWalking OAP collects and saves the service instance\u0026rsquo;s namespace and Pod name in the service instance\u0026rsquo;s properties, named namespace and pod, users can select the same and UI should fetch the logs by service instance in a given interval and display the logs in UI, OAP receives the query and checks the instance\u0026rsquo;s properties and use the namespace and pod to locate the Pod and query the logs.\nIf you want to register a service instance that has on demand logs available, you should add namespace and pod in the service instance properties, so that you can query the real time logs from that Pod.\nThat said, in order to make this feature work properly, you should in advance configure the cluster role for OAP to list/get namespaces, services, pods and pods/log.\n","excerpt":"On Demand Pod Logs This feature is to fetch the Pod logs on users' demand, the logs are fetched and …","ref":"/docs/main/v9.5.0/en/setup/backend/on-demand-pod-log/","title":"On Demand Pod Logs"},{"body":"On Demand Pod Logs This feature is to fetch the Pod logs on users' demand, the logs are fetched and displayed in real time, and are not persisted in any kind. This is helpful when users want to do some experiments and monitor the logs and see what\u0026rsquo;s happing inside the service.\nNote: if you print secrets in the logs, they are also visible to the UI, so for the sake of security, this feature is disabled by default, please read the configuration documentation to enable this feature manually.\nHow it works As the name indicates, this feature only works for Kubernetes Pods.\nSkyWalking OAP collects and saves the service instance\u0026rsquo;s namespace and Pod name in the service instance\u0026rsquo;s properties, named namespace and pod, users can select the same and UI should fetch the logs by service instance in a given interval and display the logs in UI, OAP receives the query and checks the instance\u0026rsquo;s properties and use the namespace and pod to locate the Pod and query the logs.\nIf you want to register a service instance that has on demand logs available, you should add namespace and pod in the service instance properties, so that you can query the real time logs from that Pod.\nThat said, in order to make this feature work properly, you should in advance configure the cluster role for OAP to list/get namespaces, services, pods and pods/log.\n","excerpt":"On Demand Pod Logs This feature is to fetch the Pod logs on users' demand, the logs are fetched and …","ref":"/docs/main/v9.6.0/en/setup/backend/on-demand-pod-log/","title":"On Demand Pod Logs"},{"body":"On Demand Pod Logs This feature is to fetch the Pod logs on users' demand, the logs are fetched and displayed in real time, and are not persisted in any kind. This is helpful when users want to do some experiments and monitor the logs and see what\u0026rsquo;s happing inside the service.\nNote: if you print secrets in the logs, they are also visible to the UI, so for the sake of security, this feature is disabled by default, please read the configuration documentation to enable this feature manually.\nHow it works As the name indicates, this feature only works for Kubernetes Pods.\nSkyWalking OAP collects and saves the service instance\u0026rsquo;s namespace and Pod name in the service instance\u0026rsquo;s properties, named namespace and pod, users can select the same and UI should fetch the logs by service instance in a given interval and display the logs in UI, OAP receives the query and checks the instance\u0026rsquo;s properties and use the namespace and pod to locate the Pod and query the logs.\nIf you want to register a service instance that has on demand logs available, you should add namespace and pod in the service instance properties, so that you can query the real time logs from that Pod.\nThat said, in order to make this feature work properly, you should in advance configure the cluster role for OAP to list/get namespaces, services, pods and pods/log.\n","excerpt":"On Demand Pod Logs This feature is to fetch the Pod logs on users' demand, the logs are fetched and …","ref":"/docs/main/v9.7.0/en/setup/backend/on-demand-pod-log/","title":"On Demand Pod Logs"},{"body":"OpenTelemetry Logging Format SkyWalking can receive logs exported from OpenTelemetry collector, the data flow is:\ngraph LR B[OpenTelemetry SDK 1] C[FluentBit/FluentD, etc.] K[Other sources that OpenTelemetry supports ...] D[OpenTelemetry Collector] E[SkyWalking OAP Server] B --\u0026gt; D C --\u0026gt; D K --\u0026gt; D D -- exporter --\u0026gt; E where the exporter can be one of the following:\n OpenTelemetry SkyWalking Exporter. An exporter that transforms the logs to SkyWalking format before sending them to SkyWalking OAP. Read the doc in the aforementioned link for a detailed guide. OpenTelemetry OTLP Exporter. An exporter that sends the logs to SkyWalking OAP in OTLP format, and SkyWalking OAP is responsible for transforming the data format.  OpenTelemetry OTLP Exporter By using this exporter, you can send any log data to SkyWalking OAP as long as the data is in OTLP format, no matter where the data is generated.\nTo enable this exporter, make sure the receiver-otel is enabled and the otlp-logs value is in the receiver-otel/default/enabledHandlers configuration section:\nreceiver-otel:selector:${SW_OTEL_RECEIVER:default}default:enabledHandlers:${SW_OTEL_RECEIVER_ENABLED_HANDLERS:\u0026#34;otlp-metrics,otlp-logs\u0026#34;}Also, because most of the language SDKs of OpenTelemetry do not support logging feature (yet) or the logging feature is experimental, it\u0026rsquo;s your responsibility to make sure the reported log data contains the following attributes, otherwise SkyWalking is not able to consume them:\n service.name: the name of the service that generates the log data, OpenTelemetry Java SDK (experimental) has this attribute set, if you\u0026rsquo;re using other SDK or agent, please check the corresponding doc.  ","excerpt":"OpenTelemetry Logging Format SkyWalking can receive logs exported from OpenTelemetry collector, the …","ref":"/docs/main/latest/en/setup/backend/log-otlp/","title":"OpenTelemetry Logging Format"},{"body":"OpenTelemetry Logging Format SkyWalking can receive logs exported from OpenTelemetry collector, the data flow is:\ngraph LR B[OpenTelemetry SDK 1] C[FluentBit/FluentD, etc.] K[Other sources that OpenTelemetry supports ...] D[OpenTelemetry Collector] E[SkyWalking OAP Server] B --\u0026gt; D C --\u0026gt; D K --\u0026gt; D D -- exporter --\u0026gt; E Recommend to use OpenTelemetry OTLP Exporter to forward collected logs to OAP server in OTLP format, and SkyWalking OAP is responsible for transforming the data format into native log format with analysis support powered by LAL script.\n Deprecated: unmaintained and not recommended to use, will be removed.\nOpenTelemetry SkyWalking Exporter was first added into open-telemetry/opentelemetry-collector-contrib before OAP OTLP support. It transforms the logs to SkyWalking format before sending them to SkyWalking OAP. Currently, from OTLP community, it is not well maintained, and already being marked as unmaintained, and may be removed in 2024.\n OpenTelemetry OTLP Exporter By using this exporter, you can send any log data to SkyWalking OAP as long as the data is in OTLP format, no matter where the data is generated.\nTo enable this exporter, make sure the receiver-otel is enabled and the otlp-logs value is in the receiver-otel/default/enabledHandlers configuration section:\nreceiver-otel:selector:${SW_OTEL_RECEIVER:default}default:enabledHandlers:${SW_OTEL_RECEIVER_ENABLED_HANDLERS:\u0026#34;otlp-metrics,otlp-logs\u0026#34;}Also, because most of the language SDKs of OpenTelemetry do not support logging feature (yet) or the logging feature is experimental, it\u0026rsquo;s your responsibility to make sure the reported log data contains the following attributes, otherwise SkyWalking is not able to consume them:\n service.name: the name of the service that generates the log data.  And several attributes are optional as add-on information for the logs before analyzing.\n service.layer: the layer of the service that generates the logs. The default value is GENERAL layer, which is 100% sampled defined by LAL general rule service.instance: the instance name that generates the logs. The default value is empty.  Note, that these attributes should be set manually through OpenTelemetry SDK or through attribute#insert in OpenTelemetry Collector.\n","excerpt":"OpenTelemetry Logging Format SkyWalking can receive logs exported from OpenTelemetry collector, the …","ref":"/docs/main/next/en/setup/backend/log-otlp/","title":"OpenTelemetry Logging Format"},{"body":"OpenTelemetry Logging Format SkyWalking can receive logs exported from OpenTelemetry collector, the data flow is:\ngraph LR B[OpenTelemetry SDK 1] C[FluentBit/FluentD, etc.] K[Other sources that OpenTelemetry supports ...] D[OpenTelemetry Collector] E[SkyWalking OAP Server] B --\u0026gt; D C --\u0026gt; D K --\u0026gt; D D -- exporter --\u0026gt; E where the exporter can be one of the following:\n OpenTelemetry SkyWalking Exporter. An exporter that transforms the logs to SkyWalking format before sending them to SkyWalking OAP. Read the doc in the aforementioned link for a detailed guide. OpenTelemetry OTLP Exporter. An exporter that sends the logs to SkyWalking OAP in OTLP format, and SkyWalking OAP is responsible for transforming the data format.  OpenTelemetry OTLP Exporter By using this exporter, you can send any log data to SkyWalking OAP as long as the data is in OTLP format, no matter where the data is generated.\nTo enable this exporter, make sure the receiver-otel is enabled and the otlp-logs value is in the receiver-otel/default/enabledHandlers configuration section:\nreceiver-otel:selector:${SW_OTEL_RECEIVER:default}default:enabledHandlers:${SW_OTEL_RECEIVER_ENABLED_HANDLERS:\u0026#34;otlp-metrics,otlp-logs\u0026#34;}Also, because most of the language SDKs of OpenTelemetry do not support logging feature (yet) or the logging feature is experimental, it\u0026rsquo;s your responsibility to make sure the reported log data contains the following attributes, otherwise SkyWalking is not able to consume them:\n service.name: the name of the service that generates the log data, OpenTelemetry Java SDK (experimental) has this attribute set, if you\u0026rsquo;re using other SDK or agent, please check the corresponding doc.  ","excerpt":"OpenTelemetry Logging Format SkyWalking can receive logs exported from OpenTelemetry collector, the …","ref":"/docs/main/v9.5.0/en/setup/backend/log-otlp/","title":"OpenTelemetry Logging Format"},{"body":"OpenTelemetry Logging Format SkyWalking can receive logs exported from OpenTelemetry collector, the data flow is:\ngraph LR B[OpenTelemetry SDK 1] C[FluentBit/FluentD, etc.] K[Other sources that OpenTelemetry supports ...] D[OpenTelemetry Collector] E[SkyWalking OAP Server] B --\u0026gt; D C --\u0026gt; D K --\u0026gt; D D -- exporter --\u0026gt; E where the exporter can be one of the following:\n OpenTelemetry SkyWalking Exporter. An exporter that transforms the logs to SkyWalking format before sending them to SkyWalking OAP. Read the doc in the aforementioned link for a detailed guide. OpenTelemetry OTLP Exporter. An exporter that sends the logs to SkyWalking OAP in OTLP format, and SkyWalking OAP is responsible for transforming the data format.  OpenTelemetry OTLP Exporter By using this exporter, you can send any log data to SkyWalking OAP as long as the data is in OTLP format, no matter where the data is generated.\nTo enable this exporter, make sure the receiver-otel is enabled and the otlp-logs value is in the receiver-otel/default/enabledHandlers configuration section:\nreceiver-otel:selector:${SW_OTEL_RECEIVER:default}default:enabledHandlers:${SW_OTEL_RECEIVER_ENABLED_HANDLERS:\u0026#34;otlp-metrics,otlp-logs\u0026#34;}Also, because most of the language SDKs of OpenTelemetry do not support logging feature (yet) or the logging feature is experimental, it\u0026rsquo;s your responsibility to make sure the reported log data contains the following attributes, otherwise SkyWalking is not able to consume them:\n service.name: the name of the service that generates the log data, OpenTelemetry Java SDK (experimental) has this attribute set, if you\u0026rsquo;re using other SDK or agent, please check the corresponding doc.  ","excerpt":"OpenTelemetry Logging Format SkyWalking can receive logs exported from OpenTelemetry collector, the …","ref":"/docs/main/v9.6.0/en/setup/backend/log-otlp/","title":"OpenTelemetry Logging Format"},{"body":"OpenTelemetry Logging Format SkyWalking can receive logs exported from OpenTelemetry collector, the data flow is:\ngraph LR B[OpenTelemetry SDK 1] C[FluentBit/FluentD, etc.] K[Other sources that OpenTelemetry supports ...] D[OpenTelemetry Collector] E[SkyWalking OAP Server] B --\u0026gt; D C --\u0026gt; D K --\u0026gt; D D -- exporter --\u0026gt; E where the exporter can be one of the following:\n OpenTelemetry SkyWalking Exporter. An exporter that transforms the logs to SkyWalking format before sending them to SkyWalking OAP. Read the doc in the aforementioned link for a detailed guide. OpenTelemetry OTLP Exporter. An exporter that sends the logs to SkyWalking OAP in OTLP format, and SkyWalking OAP is responsible for transforming the data format.  OpenTelemetry OTLP Exporter By using this exporter, you can send any log data to SkyWalking OAP as long as the data is in OTLP format, no matter where the data is generated.\nTo enable this exporter, make sure the receiver-otel is enabled and the otlp-logs value is in the receiver-otel/default/enabledHandlers configuration section:\nreceiver-otel:selector:${SW_OTEL_RECEIVER:default}default:enabledHandlers:${SW_OTEL_RECEIVER_ENABLED_HANDLERS:\u0026#34;otlp-metrics,otlp-logs\u0026#34;}Also, because most of the language SDKs of OpenTelemetry do not support logging feature (yet) or the logging feature is experimental, it\u0026rsquo;s your responsibility to make sure the reported log data contains the following attributes, otherwise SkyWalking is not able to consume them:\n service.name: the name of the service that generates the log data, OpenTelemetry Java SDK (experimental) has this attribute set, if you\u0026rsquo;re using other SDK or agent, please check the corresponding doc.  ","excerpt":"OpenTelemetry Logging Format SkyWalking can receive logs exported from OpenTelemetry collector, the …","ref":"/docs/main/v9.7.0/en/setup/backend/log-otlp/","title":"OpenTelemetry Logging Format"},{"body":"OpenTelemetry Metrics Format The OpenTelemetry receiver supports ingesting agent metrics by meter-system. The OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP may fail to start up. The files are located at $CLASSPATH/otel-rules.\nSupported handlers:\n otlp: OpenTelemetry gRPC service handler.  Notice: Set SW_OTEL_RECEIVER=default through system environment or change receiver-otel/selector=${SW_OTEL_RECEIVER:default} to activate the OpenTelemetry receiver.\nThe rule file should be in YAML format, defined by the scheme described in MAL. Note: receiver-otel only supports the group, defaultMetricLevel, and metricsRules nodes of the scheme due to its push mode.\nTo activate the otlp handler and relevant rules of istio:\nreceiver-otel:selector:${SW_OTEL_RECEIVER:default}default:enabledHandlers:${SW_OTEL_RECEIVER_ENABLED_HANDLERS:\u0026#34;otlp-metrics\u0026#34;}enabledOtelMetricsRules:${SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES:\u0026#34;istio-controlplane\u0026#34;}The receiver adds label with key node_identifier_host_name to the collected data samples, and its value is from net.host.name (or host.name for some OTLP versions) resource attributes defined in OpenTelemetry proto, for identification of the metric data.\n   Description Configuration File Data Source     Metrics of Istio Control Plane otel-rules/istio-controlplane.yaml Istio Control Plane -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of SkyWalking OAP server itself otel-rules/oap.yaml SkyWalking OAP Server(SelfObservability) -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Linux OS otel-rules/vm.yaml prometheus/node_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Windows OS otel-rules/windows.yaml prometheus-community/windows_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of K8s cluster otel-rules/k8s/k8s-cluster.yaml K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of K8s cluster otel-rules/k8s/k8s-node.yaml cAdvisor \u0026amp; K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of K8s cluster otel-rules/k8s/k8s-service.yaml cAdvisor \u0026amp; K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of MYSQL otel-rules/mysql/mysql-instance.yaml prometheus/mysqld_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of MYSQL otel-rules/mysql/mysql-service.yaml prometheus/mysqld_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of PostgreSQL otel-rules/postgresql/postgresql-instance.yaml prometheus-community/postgres_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of PostgreSQL otel-rules/postgresql/postgresql-service.yaml prometheus-community/postgres_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Apache APISIX otel-rules/apisix.yaml apisix prometheus plugin -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of AWS Cloud EKS otel-rules/aws-eks/eks-cluster.yaml AWS Container Insights Receiver -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of AWS Cloud EKS otel-rules/aws-eks/eks-service.yaml AWS Container Insights Receiver -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of AWS Cloud EKS otel-rules/aws-eks/eks-node.yaml AWS Container Insights Receiver -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Elasticsearch otel-rules/elasticsearch/elasticsearch-cluster.yaml prometheus-community/elasticsearch_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Elasticsearch otel-rules/elasticsearch/elasticsearch-index.yaml prometheus-community/elasticsearch_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Elasticsearch otel-rules/elasticsearch/elasticsearch-node.yaml prometheus-community/elasticsearch_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Redis otel-rules/redis/redis-service.yaml oliver006/redis_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Redis otel-rules/redis/redis-instance.yaml oliver006/redis_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of RabbitMQ otel-rules/rabbitmq/rabbitmq-cluster.yaml rabbitmq-prometheus -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of RabbitMQ otel-rules/rabbitmq/rabbitmq-node.yaml rabbitmq-prometheus -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of MongoDB otel-rules/mongodb/mongodb-cluster.yaml percona/mongodb_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of MongoDB otel-rules/mongodb/mongodb-node.yaml percona/mongodb_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Kafka otel-rules/kafka/kafka-clusteryaml prometheus/jmx_exporter/jmx_prometheus_javaagent -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Kafka otel-rules/kafka/kafka-broker.yaml prometheus/jmx_exporter/jmx_prometheus_javaagent -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Note: You can also use OpenTelemetry exporter to transport the metrics to SkyWalking OAP directly. See OpenTelemetry Exporter.      ","excerpt":"OpenTelemetry Metrics Format The OpenTelemetry receiver supports ingesting agent metrics by …","ref":"/docs/main/latest/en/setup/backend/opentelemetry-receiver/","title":"OpenTelemetry Metrics Format"},{"body":"OpenTelemetry Metrics Format The OpenTelemetry receiver supports ingesting agent metrics by meter-system. The OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP may fail to start up. The files are located at $CLASSPATH/otel-rules.\nSupported handlers:\n otlp: OpenTelemetry gRPC service handler.  Notice: Set SW_OTEL_RECEIVER=default through system environment or change receiver-otel/selector=${SW_OTEL_RECEIVER:default} to activate the OpenTelemetry receiver.\nThe rule file should be in YAML format, defined by the scheme described in MAL. Note: receiver-otel only supports the group, defaultMetricLevel, and metricsRules nodes of the scheme due to its push mode.\nTo activate the otlp handler and relevant rules of istio:\nreceiver-otel:selector:${SW_OTEL_RECEIVER:default}default:enabledHandlers:${SW_OTEL_RECEIVER_ENABLED_HANDLERS:\u0026#34;otlp-metrics\u0026#34;}enabledOtelMetricsRules:${SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES:\u0026#34;istio-controlplane\u0026#34;}The receiver adds label with key node_identifier_host_name to the collected data samples, and its value is from net.host.name (or host.name for some OTLP versions) resource attributes defined in OpenTelemetry proto, for identification of the metric data.\nNotice: In the resource scope, dots (.) in the attributes' key names are converted to underscores (_), whereas in the metrics scope, they are not converted.\n   Description Configuration File Data Source     Metrics of Istio Control Plane otel-rules/istio-controlplane.yaml Istio Control Plane -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of SkyWalking OAP server itself otel-rules/oap.yaml SkyWalking OAP Server(SelfObservability) -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Linux OS otel-rules/vm.yaml prometheus/node_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Windows OS otel-rules/windows.yaml prometheus-community/windows_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of K8s cluster otel-rules/k8s/k8s-cluster.yaml K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of K8s cluster otel-rules/k8s/k8s-node.yaml cAdvisor \u0026amp; K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of K8s cluster otel-rules/k8s/k8s-service.yaml cAdvisor \u0026amp; K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of MYSQL otel-rules/mysql/mysql-instance.yaml prometheus/mysqld_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of MYSQL otel-rules/mysql/mysql-service.yaml prometheus/mysqld_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of PostgreSQL otel-rules/postgresql/postgresql-instance.yaml prometheus-community/postgres_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of PostgreSQL otel-rules/postgresql/postgresql-service.yaml prometheus-community/postgres_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Apache APISIX otel-rules/apisix.yaml apisix prometheus plugin -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of AWS Cloud EKS otel-rules/aws-eks/eks-cluster.yaml AWS Container Insights Receiver -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of AWS Cloud EKS otel-rules/aws-eks/eks-service.yaml AWS Container Insights Receiver -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of AWS Cloud EKS otel-rules/aws-eks/eks-node.yaml AWS Container Insights Receiver -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Elasticsearch otel-rules/elasticsearch/elasticsearch-cluster.yaml prometheus-community/elasticsearch_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Elasticsearch otel-rules/elasticsearch/elasticsearch-index.yaml prometheus-community/elasticsearch_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Elasticsearch otel-rules/elasticsearch/elasticsearch-node.yaml prometheus-community/elasticsearch_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Redis otel-rules/redis/redis-service.yaml oliver006/redis_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Redis otel-rules/redis/redis-instance.yaml oliver006/redis_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of RabbitMQ otel-rules/rabbitmq/rabbitmq-cluster.yaml rabbitmq-prometheus -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of RabbitMQ otel-rules/rabbitmq/rabbitmq-node.yaml rabbitmq-prometheus -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of MongoDB otel-rules/mongodb/mongodb-cluster.yaml percona/mongodb_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of MongoDB otel-rules/mongodb/mongodb-node.yaml percona/mongodb_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Kafka otel-rules/kafka/kafka-cluster.yaml prometheus/jmx_exporter/jmx_prometheus_javaagent -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Kafka otel-rules/kafka/kafka-broker.yaml prometheus/jmx_exporter/jmx_prometheus_javaagent -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of ClickHouse otel-rules/clickhouse/clickhouse-instance.yaml ClickHouse(embedded prometheus endpoint) -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of ClickHouse otel-rules/clickhouse/clickhouse-service.yaml ClickHouse(embedded prometheus endpoint) -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of RocketMQ otel-rules/rocketmq/rocketmq-cluster.yaml rocketmq-exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of RocketMQ otel-rules/rocketmq/rocketmq-broker.yaml rocketmq-exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of RocketMQ otel-rules/rocketmq/rocketmq-topic.yaml rocketmq-exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server    ","excerpt":"OpenTelemetry Metrics Format The OpenTelemetry receiver supports ingesting agent metrics by …","ref":"/docs/main/next/en/setup/backend/opentelemetry-receiver/","title":"OpenTelemetry Metrics Format"},{"body":"OpenTelemetry Metrics Format The OpenTelemetry receiver supports ingesting agent metrics by meter-system. The OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP may fail to start up. The files are located at $CLASSPATH/otel-rules.\nSupported handlers:\n otlp: OpenTelemetry gRPC service handler.  Notice: Set SW_OTEL_RECEIVER=default through system environment or change receiver-otel/selector=${SW_OTEL_RECEIVER:default} to activate the OpenTelemetry receiver.\nThe rule file should be in YAML format, defined by the scheme described in MAL. Note: receiver-otel only supports the group, defaultMetricLevel, and metricsRules nodes of the scheme due to its push mode.\nTo activate the otlp handler and relevant rules of istio:\nreceiver-otel:selector:${SW_OTEL_RECEIVER:default}default:enabledHandlers:${SW_OTEL_RECEIVER_ENABLED_HANDLERS:\u0026#34;otlp-metrics\u0026#34;}enabledOtelMetricsRules:${SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES:\u0026#34;istio-controlplane\u0026#34;}The receiver adds label with key node_identifier_host_name to the collected data samples, and its value is from net.host.name (or host.name for some OTLP versions) resource attributes defined in OpenTelemetry proto, for identification of the metric data.\n   Description Configuration File Data Source     Metrics of Istio Control Plane otel-rules/istio-controlplane.yaml Istio Control Plane -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of SkyWalking OAP server itself otel-rules/oap.yaml SkyWalking OAP Server(SelfObservability) -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of VMs otel-rules/vm.yaml Prometheus node-exporter(VMs) -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of K8s cluster otel-rules/k8s/k8s-cluster.yaml K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of K8s cluster otel-rules/k8s/k8s-node.yaml cAdvisor \u0026amp; K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of K8s cluster otel-rules/k8s/k8s-service.yaml cAdvisor \u0026amp; K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of MYSQL otel-rules/mysql/mysql-instance.yaml prometheus/mysqld_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of MYSQL otel-rules/mysql/mysql-service.yaml prometheus/mysqld_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of PostgreSQL otel-rules/postgresql/postgresql-instance.yaml postgres_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of PostgreSQL otel-rules/postgresql/postgresql-service.yaml postgres_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Apache APISIX otel-rules/apisix.yaml apisix prometheus plugin -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of AWS Cloud EKS otel-rules/aws-eks/eks-cluster.yaml AWS Container Insights Receiver -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of AWS Cloud EKS otel-rules/aws-eks/eks-service.yaml AWS Container Insights Receiver -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of AWS Cloud EKS otel-rules/aws-eks/eks-node.yaml AWS Container Insights Receiver -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server    Note: You can also use OpenTelemetry exporter to transport the metrics to SkyWalking OAP directly. See OpenTelemetry Exporter.\n","excerpt":"OpenTelemetry Metrics Format The OpenTelemetry receiver supports ingesting agent metrics by …","ref":"/docs/main/v9.6.0/en/setup/backend/opentelemetry-receiver/","title":"OpenTelemetry Metrics Format"},{"body":"OpenTelemetry Metrics Format The OpenTelemetry receiver supports ingesting agent metrics by meter-system. The OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP may fail to start up. The files are located at $CLASSPATH/otel-rules.\nSupported handlers:\n otlp: OpenTelemetry gRPC service handler.  Notice: Set SW_OTEL_RECEIVER=default through system environment or change receiver-otel/selector=${SW_OTEL_RECEIVER:default} to activate the OpenTelemetry receiver.\nThe rule file should be in YAML format, defined by the scheme described in MAL. Note: receiver-otel only supports the group, defaultMetricLevel, and metricsRules nodes of the scheme due to its push mode.\nTo activate the otlp handler and relevant rules of istio:\nreceiver-otel:selector:${SW_OTEL_RECEIVER:default}default:enabledHandlers:${SW_OTEL_RECEIVER_ENABLED_HANDLERS:\u0026#34;otlp-metrics\u0026#34;}enabledOtelMetricsRules:${SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES:\u0026#34;istio-controlplane\u0026#34;}The receiver adds label with key node_identifier_host_name to the collected data samples, and its value is from net.host.name (or host.name for some OTLP versions) resource attributes defined in OpenTelemetry proto, for identification of the metric data.\n   Description Configuration File Data Source     Metrics of Istio Control Plane otel-rules/istio-controlplane.yaml Istio Control Plane -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of SkyWalking OAP server itself otel-rules/oap.yaml SkyWalking OAP Server(SelfObservability) -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Linux OS otel-rules/vm.yaml prometheus/node_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Windows OS otel-rules/windows.yaml prometheus-community/windows_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of K8s cluster otel-rules/k8s/k8s-cluster.yaml K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of K8s cluster otel-rules/k8s/k8s-node.yaml cAdvisor \u0026amp; K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of K8s cluster otel-rules/k8s/k8s-service.yaml cAdvisor \u0026amp; K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of MYSQL otel-rules/mysql/mysql-instance.yaml prometheus/mysqld_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of MYSQL otel-rules/mysql/mysql-service.yaml prometheus/mysqld_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of PostgreSQL otel-rules/postgresql/postgresql-instance.yaml prometheus-community/postgres_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of PostgreSQL otel-rules/postgresql/postgresql-service.yaml prometheus-community/postgres_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Apache APISIX otel-rules/apisix.yaml apisix prometheus plugin -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of AWS Cloud EKS otel-rules/aws-eks/eks-cluster.yaml AWS Container Insights Receiver -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of AWS Cloud EKS otel-rules/aws-eks/eks-service.yaml AWS Container Insights Receiver -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of AWS Cloud EKS otel-rules/aws-eks/eks-node.yaml AWS Container Insights Receiver -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Elasticsearch otel-rules/elasticsearch/elasticsearch-cluster.yaml prometheus-community/elasticsearch_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Elasticsearch otel-rules/elasticsearch/elasticsearch-index.yaml prometheus-community/elasticsearch_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Elasticsearch otel-rules/elasticsearch/elasticsearch-node.yaml prometheus-community/elasticsearch_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Redis otel-rules/redis/redis-service.yaml oliver006/redis_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Redis otel-rules/redis/redis-instance.yaml oliver006/redis_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of RabbitMQ otel-rules/rabbitmq/rabbitmq-cluster.yaml rabbitmq-prometheus -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of RabbitMQ otel-rules/rabbitmq/rabbitmq-node.yaml rabbitmq-prometheus -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of MongoDB otel-rules/mongodb/mongodb-cluster.yaml percona/mongodb_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of MongoDB otel-rules/mongodb/mongodb-node.yaml percona/mongodb_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Kafka otel-rules/kafka/kafka-clusteryaml prometheus/jmx_exporter/jmx_prometheus_javaagent -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Kafka otel-rules/kafka/kafka-broker.yaml prometheus/jmx_exporter/jmx_prometheus_javaagent -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Note: You can also use OpenTelemetry exporter to transport the metrics to SkyWalking OAP directly. See OpenTelemetry Exporter.      ","excerpt":"OpenTelemetry Metrics Format The OpenTelemetry receiver supports ingesting agent metrics by …","ref":"/docs/main/v9.7.0/en/setup/backend/opentelemetry-receiver/","title":"OpenTelemetry Metrics Format"},{"body":"OpenTelemetry receiver The OpenTelemetry receiver supports ingesting agent metrics by meter-system. The OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP may fail to start up. The files are located at $CLASSPATH/otel-\u0026lt;handler\u0026gt;-rules. E.g. The oc handler loads rules from $CLASSPATH/otel-oc-rules.\nSupported handlers:\n oc: OpenCensus gRPC service handler.  Notice: Set SW_OTEL_RECEIVER=default through system environment or change receiver-otel/selector=${SW_OTEL_RECEIVER:default} to activate the OpenTelemetry receiver.\nThe rule file should be in YAML format, defined by the scheme described in prometheus-fetcher. Note: receiver-otel only supports the group, defaultMetricLevel, and metricsRules nodes of the scheme due to its push mode.\nTo activate the oc handler and relevant rules of istio:\nreceiver-otel:// Change selector value to default, for activating the otel receiver.selector:${SW_OTEL_RECEIVER:default}default:enabledHandlers:${SW_OTEL_RECEIVER_ENABLED_HANDLERS:\u0026#34;oc\u0026#34;}enabledOcRules:${SW_OTEL_RECEIVER_ENABLED_OC_RULES:\u0026#34;istio-controlplane\u0026#34;}The receiver adds labels with key = node_identifier_host_name and key = node_identifier_pid to the collected data samples, and values from Node.identifier.host_name and Node.identifier.pid defined in OpenCensus Agent Proto, for identification of the metric data.\n   Rule Name Description Configuration File Data Source     istio-controlplane Metrics of Istio Control Plane otel-oc-rules/istio-controlplane.yaml Istio Control Plane -\u0026gt; OpenTelemetry Collector \u0026ndash;OC format\u0026ndash;\u0026gt; SkyWalking OAP Server   oap Metrics of SkyWalking OAP server itself otel-oc-rules/oap.yaml SkyWalking OAP Server(SelfObservability) -\u0026gt; OpenTelemetry Collector \u0026ndash;OC format\u0026ndash;\u0026gt; SkyWalking OAP Server   vm Metrics of VMs otel-oc-rules/vm.yaml Prometheus node-exporter(VMs) -\u0026gt; OpenTelemetry Collector \u0026ndash;OC format\u0026ndash;\u0026gt; SkyWalking OAP Server   k8s-cluster Metrics of K8s cluster otel-oc-rules/k8s-cluster.yaml K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash;OC format\u0026ndash;\u0026gt; SkyWalking OAP Server   k8s-node Metrics of K8s cluster otel-oc-rules/k8s-node.yaml cAdvisor \u0026amp; K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash;OC format\u0026ndash;\u0026gt; SkyWalking OAP Server   k8s-service Metrics of K8s cluster otel-oc-rules/k8s-service.yaml cAdvisor \u0026amp; K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash;OC format\u0026ndash;\u0026gt; SkyWalking OAP Server    Note: You can also use OpenTelemetry exporter to directly transport the metrics to SkyWalking OAP. See OpenTelemetry Exporter.\n","excerpt":"OpenTelemetry receiver The OpenTelemetry receiver supports ingesting agent metrics by meter-system. …","ref":"/docs/main/v9.0.0/en/setup/backend/opentelemetry-receiver/","title":"OpenTelemetry receiver"},{"body":"OpenTelemetry receiver The OpenTelemetry receiver supports ingesting agent metrics by meter-system. The OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP may fail to start up. The files are located at $CLASSPATH/otel-\u0026lt;handler\u0026gt;-rules. E.g. The oc handler loads rules from $CLASSPATH/otel-oc-rules.\nSupported handlers:\n oc: OpenCensus gRPC service handler.  Notice: Set SW_OTEL_RECEIVER=default through system environment or change receiver-otel/selector=${SW_OTEL_RECEIVER:default} to activate the OpenTelemetry receiver.\nThe rule file should be in YAML format, defined by the scheme described in prometheus-fetcher. Note: receiver-otel only supports the group, defaultMetricLevel, and metricsRules nodes of the scheme due to its push mode.\nTo activate the oc handler and relevant rules of istio:\nreceiver-otel:// Change selector value to default, for activating the otel receiver.selector:${SW_OTEL_RECEIVER:default}default:enabledHandlers:${SW_OTEL_RECEIVER_ENABLED_HANDLERS:\u0026#34;oc\u0026#34;}enabledOcRules:${SW_OTEL_RECEIVER_ENABLED_OC_RULES:\u0026#34;istio-controlplane\u0026#34;}The receiver adds labels with key = node_identifier_host_name and key = node_identifier_pid to the collected data samples, and values from Node.identifier.host_name and Node.identifier.pid defined in OpenCensus Agent Proto, for identification of the metric data.\n   Rule Name Description Configuration File Data Source     istio-controlplane Metrics of Istio Control Plane otel-oc-rules/istio-controlplane.yaml Istio Control Plane -\u0026gt; OpenTelemetry Collector \u0026ndash;OC format\u0026ndash;\u0026gt; SkyWalking OAP Server   oap Metrics of SkyWalking OAP server itself otel-oc-rules/oap.yaml SkyWalking OAP Server(SelfObservability) -\u0026gt; OpenTelemetry Collector \u0026ndash;OC format\u0026ndash;\u0026gt; SkyWalking OAP Server   vm Metrics of VMs otel-oc-rules/vm.yaml Prometheus node-exporter(VMs) -\u0026gt; OpenTelemetry Collector \u0026ndash;OC format\u0026ndash;\u0026gt; SkyWalking OAP Server   k8s-cluster Metrics of K8s cluster otel-oc-rules/k8s-cluster.yaml K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash;OC format\u0026ndash;\u0026gt; SkyWalking OAP Server   k8s-node Metrics of K8s cluster otel-oc-rules/k8s-node.yaml cAdvisor \u0026amp; K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash;OC format\u0026ndash;\u0026gt; SkyWalking OAP Server   k8s-service Metrics of K8s cluster otel-oc-rules/k8s-service.yaml cAdvisor \u0026amp; K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash;OC format\u0026ndash;\u0026gt; SkyWalking OAP Server    Note: You can also use OpenTelemetry exporter to transport the metrics to SkyWalking OAP directly. See OpenTelemetry Exporter.\n","excerpt":"OpenTelemetry receiver The OpenTelemetry receiver supports ingesting agent metrics by meter-system. …","ref":"/docs/main/v9.1.0/en/setup/backend/opentelemetry-receiver/","title":"OpenTelemetry receiver"},{"body":"OpenTelemetry receiver The OpenTelemetry receiver supports ingesting agent metrics by meter-system. The OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP may fail to start up. The files are located at $CLASSPATH/otel-rules.\nSupported handlers:\n oc: OpenCensus gRPC service handler. otlp: OpenTelemetry gRPC service handler.  Notice: Set SW_OTEL_RECEIVER=default through system environment or change receiver-otel/selector=${SW_OTEL_RECEIVER:default} to activate the OpenTelemetry receiver.\nThe rule file should be in YAML format, defined by the scheme described in prometheus-fetcher. Note: receiver-otel only supports the group, defaultMetricLevel, and metricsRules nodes of the scheme due to its push mode.\nTo activate the oc handler and relevant rules of istio:\nreceiver-otel:// Change selector value to default, for activating the otel receiver.selector:${SW_OTEL_RECEIVER:default}default:enabledHandlers:${SW_OTEL_RECEIVER_ENABLED_HANDLERS:\u0026#34;oc,otlp\u0026#34;}enabledOtelRules:${SW_OTEL_RECEIVER_ENABLED_OTEL_RULES:\u0026#34;istio-controlplane\u0026#34;}The receiver adds label with key node_identifier_host_name to the collected data samples, and its value is from Node.identifier.host_name defined in OpenCensus Agent Proto, or net.host.name (or host.name for some OTLP versions) resource attributes defined in OpenTelemetry proto, for identification of the metric data.\n   Rule Name Description Configuration File Data Source     istio-controlplane Metrics of Istio Control Plane otel-rules/istio-controlplane.yaml Istio Control Plane -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   oap Metrics of SkyWalking OAP server itself otel-rules/oap.yaml SkyWalking OAP Server(SelfObservability) -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   vm Metrics of VMs otel-rules/vm.yaml Prometheus node-exporter(VMs) -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   k8s-cluster Metrics of K8s cluster otel-rules/k8s-cluster.yaml K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   k8s-node Metrics of K8s cluster otel-rules/k8s-node.yaml cAdvisor \u0026amp; K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   k8s-service Metrics of K8s cluster otel-rules/k8s-service.yaml cAdvisor \u0026amp; K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   mysql Metrics of MYSQL otel-rules/mysql.yaml prometheus/mysqld_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   postgresql Metrics of PostgreSQL otel-rules/postgresql.yaml postgres_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server    Note: You can also use OpenTelemetry exporter to transport the metrics to SkyWalking OAP directly. See OpenTelemetry Exporter.\n","excerpt":"OpenTelemetry receiver The OpenTelemetry receiver supports ingesting agent metrics by meter-system. …","ref":"/docs/main/v9.2.0/en/setup/backend/opentelemetry-receiver/","title":"OpenTelemetry receiver"},{"body":"OpenTelemetry receiver The OpenTelemetry receiver supports ingesting agent metrics by meter-system. The OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP may fail to start up. The files are located at $CLASSPATH/otel-rules.\nSupported handlers:\n oc: OpenCensus gRPC service handler. otlp: OpenTelemetry gRPC service handler.  Notice: Set SW_OTEL_RECEIVER=default through system environment or change receiver-otel/selector=${SW_OTEL_RECEIVER:default} to activate the OpenTelemetry receiver.\nThe rule file should be in YAML format, defined by the scheme described in MAL. Note: receiver-otel only supports the group, defaultMetricLevel, and metricsRules nodes of the scheme due to its push mode.\nTo activate the oc handler and relevant rules of istio:\nreceiver-otel:// Change selector value to default, for activating the otel receiver.selector:${SW_OTEL_RECEIVER:default}default:enabledHandlers:${SW_OTEL_RECEIVER_ENABLED_HANDLERS:\u0026#34;oc,otlp\u0026#34;}enabledOtelRules:${SW_OTEL_RECEIVER_ENABLED_OTEL_RULES:\u0026#34;istio-controlplane\u0026#34;}The receiver adds label with key node_identifier_host_name to the collected data samples, and its value is from Node.identifier.host_name defined in OpenCensus Agent Proto, or net.host.name (or host.name for some OTLP versions) resource attributes defined in OpenTelemetry proto, for identification of the metric data.\n   Description Configuration File Data Source     Metrics of Istio Control Plane otel-rules/istio-controlplane.yaml Istio Control Plane -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of SkyWalking OAP server itself otel-rules/oap.yaml SkyWalking OAP Server(SelfObservability) -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of VMs otel-rules/vm.yaml Prometheus node-exporter(VMs) -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of K8s cluster otel-rules/k8s-cluster.yaml K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of K8s cluster otel-rules/k8s-node.yaml cAdvisor \u0026amp; K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of K8s cluster otel-rules/k8s-service.yaml cAdvisor \u0026amp; K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of MYSQL otel-rules/mysql.yaml prometheus/mysqld_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of PostgreSQL otel-rules/postgresql.yaml postgres_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Apache APISIX otel-rules/apisix.yaml apisix prometheus plugin -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server    Note: You can also use OpenTelemetry exporter to transport the metrics to SkyWalking OAP directly. See OpenTelemetry Exporter.\n","excerpt":"OpenTelemetry receiver The OpenTelemetry receiver supports ingesting agent metrics by meter-system. …","ref":"/docs/main/v9.3.0/en/setup/backend/opentelemetry-receiver/","title":"OpenTelemetry receiver"},{"body":"OpenTelemetry receiver The OpenTelemetry receiver supports ingesting agent metrics by meter-system. The OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP may fail to start up. The files are located at $CLASSPATH/otel-rules.\nSupported handlers:\n oc: OpenCensus gRPC service handler. otlp: OpenTelemetry gRPC service handler.  Notice: Set SW_OTEL_RECEIVER=default through system environment or change receiver-otel/selector=${SW_OTEL_RECEIVER:default} to activate the OpenTelemetry receiver.\nThe rule file should be in YAML format, defined by the scheme described in MAL. Note: receiver-otel only supports the group, defaultMetricLevel, and metricsRules nodes of the scheme due to its push mode.\nTo activate the oc handler and relevant rules of istio:\nreceiver-otel:// Change selector value to default, for activating the otel receiver.selector:${SW_OTEL_RECEIVER:default}default:enabledHandlers:${SW_OTEL_RECEIVER_ENABLED_HANDLERS:\u0026#34;oc,otlp\u0026#34;}enabledOtelRules:${SW_OTEL_RECEIVER_ENABLED_OTEL_RULES:\u0026#34;istio-controlplane\u0026#34;}The receiver adds label with key node_identifier_host_name to the collected data samples, and its value is from Node.identifier.host_name defined in OpenCensus Agent Proto, or net.host.name (or host.name for some OTLP versions) resource attributes defined in OpenTelemetry proto, for identification of the metric data.\n   Description Configuration File Data Source     Metrics of Istio Control Plane otel-rules/istio-controlplane.yaml Istio Control Plane -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of SkyWalking OAP server itself otel-rules/oap.yaml SkyWalking OAP Server(SelfObservability) -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of VMs otel-rules/vm.yaml Prometheus node-exporter(VMs) -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of K8s cluster otel-rules/k8s/k8s-cluster.yaml K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of K8s cluster otel-rules/k8s/k8s-node.yaml cAdvisor \u0026amp; K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of K8s cluster otel-rules/k8s/k8s-service.yaml cAdvisor \u0026amp; K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of MYSQL otel-rules/mysql/mysql-instance.yaml prometheus/mysqld_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of MYSQL otel-rules/mysql/mysql-service.yaml prometheus/mysqld_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of PostgreSQL otel-rules/postgresql/postgresql-instance.yaml postgres_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of PostgreSQL otel-rules/postgresql/postgresql-service.yaml postgres_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Apache APISIX otel-rules/apisix.yaml apisix prometheus plugin -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of AWS Cloud EKS otel-rules/aws-eks/eks-cluster.yaml AWS Container Insights Receiver -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of AWS Cloud EKS otel-rules/aws-eks/eks-service.yaml AWS Container Insights Receiver -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of AWS Cloud EKS otel-rules/aws-eks/eks-node.yaml AWS Container Insights Receiver -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server    Note: You can also use OpenTelemetry exporter to transport the metrics to SkyWalking OAP directly. See OpenTelemetry Exporter.\n","excerpt":"OpenTelemetry receiver The OpenTelemetry receiver supports ingesting agent metrics by meter-system. …","ref":"/docs/main/v9.4.0/en/setup/backend/opentelemetry-receiver/","title":"OpenTelemetry receiver"},{"body":"OpenTelemetry receiver The OpenTelemetry receiver supports ingesting agent metrics by meter-system. The OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP may fail to start up. The files are located at $CLASSPATH/otel-rules.\nSupported handlers:\n otlp: OpenTelemetry gRPC service handler.  Notice: Set SW_OTEL_RECEIVER=default through system environment or change receiver-otel/selector=${SW_OTEL_RECEIVER:default} to activate the OpenTelemetry receiver.\nThe rule file should be in YAML format, defined by the scheme described in MAL. Note: receiver-otel only supports the group, defaultMetricLevel, and metricsRules nodes of the scheme due to its push mode.\nTo activate the otlp handler and relevant rules of istio:\nreceiver-otel:selector:${SW_OTEL_RECEIVER:default}default:enabledHandlers:${SW_OTEL_RECEIVER_ENABLED_HANDLERS:\u0026#34;otlp\u0026#34;}enabledOtelMetricsRules:${SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES:\u0026#34;istio-controlplane\u0026#34;}The receiver adds label with key node_identifier_host_name to the collected data samples, and its value is from net.host.name (or host.name for some OTLP versions) resource attributes defined in OpenTelemetry proto, for identification of the metric data.\n   Description Configuration File Data Source     Metrics of Istio Control Plane otel-rules/istio-controlplane.yaml Istio Control Plane -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of SkyWalking OAP server itself otel-rules/oap.yaml SkyWalking OAP Server(SelfObservability) -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of VMs otel-rules/vm.yaml Prometheus node-exporter(VMs) -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of K8s cluster otel-rules/k8s/k8s-cluster.yaml K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of K8s cluster otel-rules/k8s/k8s-node.yaml cAdvisor \u0026amp; K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of K8s cluster otel-rules/k8s/k8s-service.yaml cAdvisor \u0026amp; K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of MYSQL otel-rules/mysql/mysql-instance.yaml prometheus/mysqld_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of MYSQL otel-rules/mysql/mysql-service.yaml prometheus/mysqld_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of PostgreSQL otel-rules/postgresql/postgresql-instance.yaml postgres_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of PostgreSQL otel-rules/postgresql/postgresql-service.yaml postgres_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Apache APISIX otel-rules/apisix.yaml apisix prometheus plugin -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of AWS Cloud EKS otel-rules/aws-eks/eks-cluster.yaml AWS Container Insights Receiver -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of AWS Cloud EKS otel-rules/aws-eks/eks-service.yaml AWS Container Insights Receiver -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of AWS Cloud EKS otel-rules/aws-eks/eks-node.yaml AWS Container Insights Receiver -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server    Note: You can also use OpenTelemetry exporter to transport the metrics to SkyWalking OAP directly. See OpenTelemetry Exporter.\n","excerpt":"OpenTelemetry receiver The OpenTelemetry receiver supports ingesting agent metrics by meter-system. …","ref":"/docs/main/v9.5.0/en/setup/backend/opentelemetry-receiver/","title":"OpenTelemetry receiver"},{"body":"OpenTelemetry Trace Format SkyWalking can receive traces from Traces in OTLP format and convert them to Zipkin Trace format eventually. For data analysis and queries related to Zipkin Trace, please refer to the relevant documentation.\nOTLP Trace handler references the Zipkin Exporter in the OpenTelemetry Collector to convert the data format.\nSet up backend receiver  Make sure to enable otlp-traces handler in OTLP receiver of application.yml.  receiver-otel:selector:defaultdefault:enabledHandlers:otlp-tracesMake sure to enable zipkin receiver and zipkin query in application.yml for config the zipkin.  Setup Query and Lens UI Please read deploy Lens UI documentation for query OTLP traces.\n","excerpt":"OpenTelemetry Trace Format SkyWalking can receive traces from Traces in OTLP format and convert them …","ref":"/docs/main/latest/en/setup/backend/otlp-trace/","title":"OpenTelemetry Trace Format"},{"body":"OpenTelemetry Trace Format SkyWalking can receive traces from Traces in OTLP format and convert them to Zipkin Trace format eventually. For data analysis and queries related to Zipkin Trace, please refer to the relevant documentation.\nOTLP Trace handler references the Zipkin Exporter in the OpenTelemetry Collector to convert the data format.\nSet up backend receiver  Make sure to enable otlp-traces handler in OTLP receiver of application.yml.  receiver-otel:selector:defaultdefault:enabledHandlers:otlp-tracesMake sure to enable zipkin receiver and zipkin query in application.yml for config the zipkin.  Setup Query and Lens UI Please read deploy Lens UI documentation for query OTLP traces.\n","excerpt":"OpenTelemetry Trace Format SkyWalking can receive traces from Traces in OTLP format and convert them …","ref":"/docs/main/next/en/setup/backend/otlp-trace/","title":"OpenTelemetry Trace Format"},{"body":"OpenTelemetry Trace Format SkyWalking can receive traces from Traces in OTLP format and convert them to Zipkin Trace format eventually. For data analysis and queries related to Zipkin Trace, please refer to the relevant documentation.\nOTLP Trace handler references the Zipkin Exporter in the OpenTelemetry Collector to convert the data format.\nSet up backend receiver  Make sure to enable otlp-traces handler in OTLP receiver of application.yml.  receiver-otel:selector:defaultdefault:enabledHandlers:otlp-tracesMake sure to enable zipkin receiver and zipkin query in application.yml for config the zipkin.  Setup Query and Lens UI Please read deploy Lens UI documentation for query OTLP traces.\n","excerpt":"OpenTelemetry Trace Format SkyWalking can receive traces from Traces in OTLP format and convert them …","ref":"/docs/main/v9.6.0/en/setup/backend/otlp-trace/","title":"OpenTelemetry Trace Format"},{"body":"OpenTelemetry Trace Format SkyWalking can receive traces from Traces in OTLP format and convert them to Zipkin Trace format eventually. For data analysis and queries related to Zipkin Trace, please refer to the relevant documentation.\nOTLP Trace handler references the Zipkin Exporter in the OpenTelemetry Collector to convert the data format.\nSet up backend receiver  Make sure to enable otlp-traces handler in OTLP receiver of application.yml.  receiver-otel:selector:defaultdefault:enabledHandlers:otlp-tracesMake sure to enable zipkin receiver and zipkin query in application.yml for config the zipkin.  Setup Query and Lens UI Please read deploy Lens UI documentation for query OTLP traces.\n","excerpt":"OpenTelemetry Trace Format SkyWalking can receive traces from Traces in OTLP format and convert them …","ref":"/docs/main/v9.7.0/en/setup/backend/otlp-trace/","title":"OpenTelemetry Trace Format"},{"body":"Operator Usage Guide In this guide, you will learn:\n How to deploy the operator from a released package or scratch The core CRDs the operator supports  Operator Deployment You could provision the operator from a binary package or build from sources.\nBinary Package  Go to the download page to download the latest release binary, skywalking-swck-\u0026lt;SWCK_VERSION\u0026gt;-bin.tgz. Unarchive the package to a folder named skywalking-swck-\u0026lt;SWCK_VERSION\u0026gt;-bin To install the operator in an existing cluster, make sure you have cert-manager installed. Apply the manifests for the Controller and CRDs in config:  kubectl apply -f skywalking-swck-\u0026lt;SWCK_VERSION\u0026gt;-bin/config/operator-bundle.yaml Build from sources  Download released source package or clone the source code:  git clone git@github.com:apache/skywalking-swck.git  Build docker image from scratch. If you prefer to your private docker image, a quick path to override OPERATOR_IMG environment variable : export OPERATOR_IMG=\u0026lt;private registry\u0026gt;/controller:\u0026lt;tag\u0026gt;  export OPERATOR_IMG=controller make -C operator docker-build Then, push this image controller:latest to a repository where the operator\u0026rsquo;s pod could pull from. If you use a local KinD cluster:\nkind load docker-image controller   Customize resource configurations based the templates laid in operator/config. We use kustomize to build them, please refer to kustomize in case you don\u0026rsquo;t familiar with its syntax.\n  Install the CRDs to Kubernetes:\n  make -C operator install  Use make to generate the final manifests and deploy:  make -C operator deploy Test your deployment  Deploy a sample OAP server, this will create an OAP server in the default namespace:  curl https://raw.githubusercontent.com/apache/skywalking-swck/master/operator/config/samples/default.yaml | kubectl apply -f -  Check the OAP server in Kubernetes:  kubectl get oapserver  Check the UI server in Kubernetes:  kubectl get ui Troubleshooting If you encounter any issue, you can check the log of the controller by pulling it from Kubernetes:\n# get the pod name of your controller kubectl --namespace skywalking-swck-system get pods # pull the logs kubectl --namespace skywalking-swck-system logs -f [name_of_the_controller_pod] Custom Resource Define(CRD) The custom resources that the operator introduced are:\nJavaAgent The JavaAgent custom resource definition (CRD) declaratively defines a view to tracing the injection result.\nThe java-agent-injector creat JavaAgents once it injects agents into some workloads. Refer to Java Agent for more details.\nOAP The OAP custom resource definition (CRD) declaratively defines a desired OAP setup to run in a Kubernetes cluster. It provides options to configure environment variables and how to connect a Storage.\nUI The UI custom resource definition (CRD) declaratively defines a desired UI setup to run in a Kubernetes cluster. It provides options for how to connect an OAP.\nStorage The Storage custom resource definition (CRD) declaratively defines a desired storage setup to run in a Kubernetes cluster. The Storage could be managed instances onboarded by the operator or an external service. The OAP has options to select which Storage it would connect.\n Caveat: Stroage only supports the Elasticsearch.\n Satellite The Satellite custom resource definition (CRD) declaratively defines a desired Satellite setup to run in a Kubernetes cluster. It provides options for how to connect an OAP.\nFetcher The Fetcher custom resource definition (CRD) declaratively defines a desired Fetcher setup to run in a Kubernetes cluster. It provides options to configure OpenTelemetry collector, which fetches metrics to the deployed OAP.\nExamples of the Operator There are some instant examples to represent the functions or features of the Operator.\n Deploy OAP server and UI with default settings Fetch metrics from the Istio control plane(istiod) Inject the java agent to pods Deploy a storage Deploy a Satellite  ","excerpt":"Operator Usage Guide In this guide, you will learn:\n How to deploy the operator from a released …","ref":"/docs/skywalking-swck/latest/operator/","title":"Operator Usage Guide"},{"body":"Operator Usage Guide In this guide, you will learn:\n How to deploy the operator from a released package or scratch The core CRDs the operator supports  Operator Deployment You could provision the operator from a binary package or build from sources.\nBinary Package  Go to the download page to download the latest release binary, skywalking-swck-\u0026lt;SWCK_VERSION\u0026gt;-bin.tgz. Unarchive the package to a folder named skywalking-swck-\u0026lt;SWCK_VERSION\u0026gt;-bin To install the operator in an existing cluster, make sure you have cert-manager installed. Apply the manifests for the Controller and CRDs in config:  kubectl apply -f skywalking-swck-\u0026lt;SWCK_VERSION\u0026gt;-bin/config/operator-bundle.yaml Build from sources  Download released source package or clone the source code:  git clone git@github.com:apache/skywalking-swck.git  Build docker image from scratch. If you prefer to your private docker image, a quick path to override OPERATOR_IMG environment variable : export OPERATOR_IMG=\u0026lt;private registry\u0026gt;/controller:\u0026lt;tag\u0026gt;  export OPERATOR_IMG=controller make -C operator docker-build Then, push this image controller:latest to a repository where the operator\u0026rsquo;s pod could pull from. If you use a local KinD cluster:\nkind load docker-image controller   Customize resource configurations based the templates laid in operator/config. We use kustomize to build them, please refer to kustomize in case you don\u0026rsquo;t familiar with its syntax.\n  Install the CRDs to Kubernetes:\n  make -C operator install  Use make to generate the final manifests and deploy:  make -C operator deploy Test your deployment  Deploy a sample OAP server, this will create an OAP server in the default namespace:  curl https://raw.githubusercontent.com/apache/skywalking-swck/master/operator/config/samples/default.yaml | kubectl apply -f -  Check the OAP server in Kubernetes:  kubectl get oapserver  Check the UI server in Kubernetes:  kubectl get ui Troubleshooting If you encounter any issue, you can check the log of the controller by pulling it from Kubernetes:\n# get the pod name of your controller kubectl --namespace skywalking-swck-system get pods # pull the logs kubectl --namespace skywalking-swck-system logs -f [name_of_the_controller_pod] Custom Resource Define(CRD) The custom resources that the operator introduced are:\nJavaAgent The JavaAgent custom resource definition (CRD) declaratively defines a view to tracing the injection result.\nThe java-agent-injector creat JavaAgents once it injects agents into some workloads. Refer to Java Agent for more details.\nOAP The OAP custom resource definition (CRD) declaratively defines a desired OAP setup to run in a Kubernetes cluster. It provides options to configure environment variables and how to connect a Storage.\nUI The UI custom resource definition (CRD) declaratively defines a desired UI setup to run in a Kubernetes cluster. It provides options for how to connect an OAP.\nStorage The Storage custom resource definition (CRD) declaratively defines a desired storage setup to run in a Kubernetes cluster. The Storage could be managed instances onboarded by the operator or an external service. The OAP has options to select which Storage it would connect.\n Caveat: Stroage only supports the Elasticsearch.\n Satellite The Satellite custom resource definition (CRD) declaratively defines a desired Satellite setup to run in a Kubernetes cluster. It provides options for how to connect an OAP.\nFetcher The Fetcher custom resource definition (CRD) declaratively defines a desired Fetcher setup to run in a Kubernetes cluster. It provides options to configure OpenTelemetry collector, which fetches metrics to the deployed OAP.\nExamples of the Operator There are some instant examples to represent the functions or features of the Operator.\n Deploy OAP server and UI with default settings Fetch metrics from the Istio control plane(istiod) Inject the java agent to pods Deploy a storage Deploy a Satellite  ","excerpt":"Operator Usage Guide In this guide, you will learn:\n How to deploy the operator from a released …","ref":"/docs/skywalking-swck/next/operator/","title":"Operator Usage Guide"},{"body":"Operator Usage Guide In this guide, you will learn:\n How to deploy the operator from a released package or scratch The core CRDs the operator supports  Operator Deployment You could provision the operator from a binary package or build from sources.\nBinary Package  Go to the download page to download the latest release binary, skywalking-swck-\u0026lt;SWCK_VERSION\u0026gt;-bin.tgz. Unarchive the package to a folder named skywalking-swck-\u0026lt;SWCK_VERSION\u0026gt;-bin To install the operator in an existing cluster, make sure you have cert-manager installed. Apply the manifests for the Controller and CRDs in config:  kubectl apply -f skywalking-swck-\u0026lt;SWCK_VERSION\u0026gt;-bin/config/operator-bundle.yaml Build from sources  Download released source package or clone the source code:  git clone git@github.com:apache/skywalking-swck.git  Build docker image from scratch. If you prefer to your private docker image, a quick path to override OPERATOR_IMG environment variable : export OPERATOR_IMG=\u0026lt;private registry\u0026gt;/controller:\u0026lt;tag\u0026gt;  export OPERATOR_IMG=controller make -C operator docker-build Then, push this image controller:latest to a repository where the operator\u0026rsquo;s pod could pull from. If you use a local KinD cluster:\nkind load docker-image controller   Customize resource configurations based the templates laid in operator/config. We use kustomize to build them, please refer to kustomize in case you don\u0026rsquo;t familiar with its syntax.\n  Install the CRDs to Kubernetes:\n  make -C operator install  Use make to generate the final manifests and deploy:  make -C operator deploy Test your deployment  Deploy a sample OAP server, this will create an OAP server in the default namespace:  curl https://raw.githubusercontent.com/apache/skywalking-swck/master/operator/config/samples/default.yaml | kubectl apply -f -  Check the OAP server in Kubernetes:  kubectl get oapserver  Check the UI server in Kubernetes:  kubectl get ui Troubleshooting If you encounter any issue, you can check the log of the controller by pulling it from Kubernetes:\n# get the pod name of your controller kubectl --namespace skywalking-swck-system get pods # pull the logs kubectl --namespace skywalking-swck-system logs -f [name_of_the_controller_pod] Custom Resource Define(CRD) The custom resources that the operator introduced are:\nJavaAgent The JavaAgent custom resource definition (CRD) declaratively defines a view to tracing the injection result.\nThe java-agent-injector creat JavaAgents once it injects agents into some workloads. Refer to Java Agent for more details.\nOAP The OAP custom resource definition (CRD) declaratively defines a desired OAP setup to run in a Kubernetes cluster. It provides options to configure environment variables and how to connect a Storage.\nUI The UI custom resource definition (CRD) declaratively defines a desired UI setup to run in a Kubernetes cluster. It provides options for how to connect an OAP.\nStorage The Storage custom resource definition (CRD) declaratively defines a desired storage setup to run in a Kubernetes cluster. The Storage could be managed instances onboarded by the operator or an external service. The OAP has options to select which Storage it would connect.\n Caveat: Stroage only supports the Elasticsearch.\n Satellite The Satellite custom resource definition (CRD) declaratively defines a desired Satellite setup to run in a Kubernetes cluster. It provides options for how to connect an OAP.\nFetcher The Fetcher custom resource definition (CRD) declaratively defines a desired Fetcher setup to run in a Kubernetes cluster. It provides options to configure OpenTelemetry collector, which fetches metrics to the deployed OAP.\nExamples of the Operator There are some instant examples to represent the functions or features of the Operator.\n Deploy OAP server and UI with default settings Fetch metrics from the Istio control plane(istiod) Inject the java agent to pods Deploy a storage Deploy a Satellite  ","excerpt":"Operator Usage Guide In this guide, you will learn:\n How to deploy the operator from a released …","ref":"/docs/skywalking-swck/v0.9.0/operator/","title":"Operator Usage Guide"},{"body":"Optional Plugins Java agent plugins are all pluggable. Optional plugins could be provided in optional-plugins and expired-plugins folder under agent or 3rd party repositories. For using these plugins, you need to put the target plugin jar file into /plugins.\nNow, we have the following known 2 kinds of optional plugins.\nOptional Level 2 Plugins These plugins affect the performance or must be used under some conditions, from experiences. So only released in /optional-plugins or /bootstrap-plugins, copy to /plugins in order to make them work.\n Plugin of tracing Spring annotation beans Plugin of tracing Oracle and Resin Filter traces through specified endpoint name patterns Plugin of Gson serialization lib in optional plugin folder. Plugin of Zookeeper 3.4.x in optional plugin folder. The reason of being optional plugin is, many business irrelevant traces are generated, which cause extra payload to agents and backends. At the same time, those traces may be just heartbeat(s). Customize enhance Trace methods based on description files, rather than write plugin or change source codes. Plugin of Spring Cloud Gateway 2.x and 3.x and 4.x in optional plugin folder. Please only activate this plugin when you install agent in Spring Gateway. Plugin of Spring Transaction in optional plugin folder. The reason of being optional plugin is, many local span are generated, which also spend more CPU, memory and network. Plugin of Kotlin coroutine provides the tracing across coroutines automatically. As it will add local spans to all across routines scenarios, Please assess the performance impact. Plugin of quartz-scheduler-2.x in the optional plugin folder. The reason for being an optional plugin is, many task scheduling systems are based on quartz-scheduler, this will cause duplicate tracing and link different sub-tasks as they share the same quartz level trigger, such as ElasticJob. Plugin of spring-webflux-5.x in the optional plugin folder. Please only activate this plugin when you use webflux alone as a web container. If you are using SpringMVC 5 or Spring Gateway, you don\u0026rsquo;t need this plugin. Plugin of mybatis-3.x in optional plugin folder. The reason of being optional plugin is, many local span are generated, which also spend more CPU, memory and network. Plugin of sentinel-1.x in the optional plugin folder. The reason for being an optional plugin is, the sentinel plugin generates a large number of local spans, which have a potential performance impact. Plugin of ehcache-2.x in the optional plugin folder. The reason for being an optional plugin is, this plugin enhanced cache framework, generates large number of local spans, which have a potential performance impact. Plugin of guava-cache in the optional plugin folder. The reason for being an optional plugin is, this plugin enhanced cache framework, generates large number of local spans, which have a potential performance impact. Plugin of fastjson serialization lib in optional plugin folder. Plugin of jackson serialization lib in optional plugin folder. Plugin of Apache ShenYu(incubating) Gateway 2.4.x in optional plugin folder. Please only activate this plugin when you install agent in Apache ShenYu Gateway. Plugin of trace sampler CPU policy in the optional plugin folder. Please only activate this plugin when you need to disable trace collecting when the agent process CPU usage is too high(over threshold). Plugin for Spring 6.x and RestTemplate 6.x are in the optional plugin folder. Spring 6 requires Java 17 but SkyWalking is still compatible with Java 8. So, we put it in the optional plugin folder. Plugin of nacos-client 2.x lib in optional plugin folder. The reason is many business irrelevant traces are generated, which cause extra payload to agents and backends, also spend more CPU, memory and network. Plugin of netty-http 4.1.x lib in optional plugin folder. The reason is some frameworks use Netty HTTP as kernel, which could double the unnecessary spans and create incorrect RPC relative metrics.  Optional Level 3 Plugins. Expired Plugins These plugins are not tested in the CI/CD pipeline, as the previous added tests are not able to run according to the latest CI/CD infrastructure limitations, lack of maintenance, or dependencies/images not available(e.g. removed from DockerHub).\nWarning, there is no guarantee of working and maintenance. The committer team may remove them from the agent package in the future without further notice.\n Plugin of Spring Impala 2.6.x was tested through parrot-stream released images. The images are not available since Mar. 2024. This plugin is expired due to lack of testing.  ","excerpt":"Optional Plugins Java agent plugins are all pluggable. Optional plugins could be provided in …","ref":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/optional-plugins/","title":"Optional Plugins"},{"body":"Optional Plugins Java agent plugins are all pluggable. Optional plugins could be provided in optional-plugins and expired-plugins folder under agent or 3rd party repositories. For using these plugins, you need to put the target plugin jar file into /plugins.\nNow, we have the following known 2 kinds of optional plugins.\nOptional Level 2 Plugins These plugins affect the performance or must be used under some conditions, from experiences. So only released in /optional-plugins or /bootstrap-plugins, copy to /plugins in order to make them work.\n Plugin of tracing Spring annotation beans Plugin of tracing Oracle and Resin Filter traces through specified endpoint name patterns Plugin of Gson serialization lib in optional plugin folder. Plugin of Zookeeper 3.4.x in optional plugin folder. The reason of being optional plugin is, many business irrelevant traces are generated, which cause extra payload to agents and backends. At the same time, those traces may be just heartbeat(s). Customize enhance Trace methods based on description files, rather than write plugin or change source codes. Plugin of Spring Cloud Gateway 2.x and 3.x and 4.x in optional plugin folder. Please only activate this plugin when you install agent in Spring Gateway. Plugin of Spring Transaction in optional plugin folder. The reason of being optional plugin is, many local span are generated, which also spend more CPU, memory and network. Plugin of Kotlin coroutine provides the tracing across coroutines automatically. As it will add local spans to all across routines scenarios, Please assess the performance impact. Plugin of quartz-scheduler-2.x in the optional plugin folder. The reason for being an optional plugin is, many task scheduling systems are based on quartz-scheduler, this will cause duplicate tracing and link different sub-tasks as they share the same quartz level trigger, such as ElasticJob. Plugin of spring-webflux-5.x in the optional plugin folder. Please only activate this plugin when you use webflux alone as a web container. If you are using SpringMVC 5 or Spring Gateway, you don\u0026rsquo;t need this plugin. Plugin of mybatis-3.x in optional plugin folder. The reason of being optional plugin is, many local span are generated, which also spend more CPU, memory and network. Plugin of sentinel-1.x in the optional plugin folder. The reason for being an optional plugin is, the sentinel plugin generates a large number of local spans, which have a potential performance impact. Plugin of ehcache-2.x in the optional plugin folder. The reason for being an optional plugin is, this plugin enhanced cache framework, generates large number of local spans, which have a potential performance impact. Plugin of guava-cache in the optional plugin folder. The reason for being an optional plugin is, this plugin enhanced cache framework, generates large number of local spans, which have a potential performance impact. Plugin of fastjson serialization lib in optional plugin folder. Plugin of jackson serialization lib in optional plugin folder. Plugin of Apache ShenYu(incubating) Gateway 2.4.x in optional plugin folder. Please only activate this plugin when you install agent in Apache ShenYu Gateway. Plugin of trace sampler CPU policy in the optional plugin folder. Please only activate this plugin when you need to disable trace collecting when the agent process CPU usage is too high(over threshold). Plugin for Spring 6.x and RestTemplate 6.x are in the optional plugin folder. Spring 6 requires Java 17 but SkyWalking is still compatible with Java 8. So, we put it in the optional plugin folder. Plugin of nacos-client 2.x lib in optional plugin folder. The reason is many business irrelevant traces are generated, which cause extra payload to agents and backends, also spend more CPU, memory and network. Plugin of netty-http 4.1.x lib in optional plugin folder. The reason is some frameworks use Netty HTTP as kernel, which could double the unnecessary spans and create incorrect RPC relative metrics.  Optional Level 3 Plugins. Expired Plugins These plugins are not tested in the CI/CD pipeline, as the previous added tests are not able to run according to the latest CI/CD infrastructure limitations, lack of maintenance, or dependencies/images not available(e.g. removed from DockerHub).\nWarning, there is no guarantee of working and maintenance. The committer team may remove them from the agent package in the future without further notice.\n Plugin of Spring Impala 2.6.x was tested through parrot-stream released images. The images are not available since Mar. 2024. This plugin is expired due to lack of testing.  ","excerpt":"Optional Plugins Java agent plugins are all pluggable. Optional plugins could be provided in …","ref":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/optional-plugins/","title":"Optional Plugins"},{"body":"Optional Plugins Java agent plugins are all pluggable. Optional plugins could be provided in optional-plugins folder under agent or 3rd party repositories. For using these plugins, you need to put the target plugin jar file into /plugins.\nNow, we have the following known optional plugins.\n Plugin of tracing Spring annotation beans Plugin of tracing Oracle and Resin Filter traces through specified endpoint name patterns Plugin of Gson serialization lib in optional plugin folder. Plugin of Zookeeper 3.4.x in optional plugin folder. The reason of being optional plugin is, many business irrelevant traces are generated, which cause extra payload to agents and backends. At the same time, those traces may be just heartbeat(s). Customize enhance Trace methods based on description files, rather than write plugin or change source codes. Plugin of Spring Cloud Gateway 2.x and 3.x in optional plugin folder. Please only activate this plugin when you install agent in Spring Gateway. Plugin of Spring Transaction in optional plugin folder. The reason of being optional plugin is, many local span are generated, which also spend more CPU, memory and network. Plugin of Kotlin coroutine provides the tracing across coroutines automatically. As it will add local spans to all across routines scenarios, Please assess the performance impact. Plugin of quartz-scheduler-2.x in the optional plugin folder. The reason for being an optional plugin is, many task scheduling systems are based on quartz-scheduler, this will cause duplicate tracing and link different sub-tasks as they share the same quartz level trigger, such as ElasticJob. Plugin of spring-webflux-5.x in the optional plugin folder. Please only activate this plugin when you use webflux alone as a web container. If you are using SpringMVC 5 or Spring Gateway, you don\u0026rsquo;t need this plugin. Plugin of mybatis-3.x in optional plugin folder. The reason of being optional plugin is, many local span are generated, which also spend more CPU, memory and network. Plugin of sentinel-1.x in the optional plugin folder. The reason for being an optional plugin is, the sentinel plugin generates a large number of local spans, which have a potential performance impact. Plugin of ehcache-2.x in the optional plugin folder. The reason for being an optional plugin is, this plugin enhanced cache framework, generates large number of local spans, which have a potential performance impact. Plugin of guava-cache in the optional plugin folder. The reason for being an optional plugin is, this plugin enhanced cache framework, generates large number of local spans, which have a potential performance impact. Plugin of fastjson serialization lib in optional plugin folder. Plugin of jackson serialization lib in optional plugin folder. Plugin of Apache ShenYu(incubating) Gateway 2.4.x in optional plugin folder. Please only activate this plugin when you install agent in Apache ShenYu Gateway. Plugin of trace sampler CPU policy in the optional plugin folder. Please only activate this plugin when you need to disable trace collecting when the agent process CPU usage is too high(over threshold). Plugin for Spring 6.x and RestTemplate 6.x are in the optional plugin folder. Spring 6 requires Java 17 but SkyWalking is still compatible with Java 8. So, we put it in the optional plugin folder. Plugin of nacos-client 2.x lib in optional plugin folder.The reason is many business irrelevant traces are generated, which cause extra payload to agents and backends, also spend more CPU, memory and network.  ","excerpt":"Optional Plugins Java agent plugins are all pluggable. Optional plugins could be provided in …","ref":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/optional-plugins/","title":"Optional Plugins"},{"body":"Optional Plugins Java agent plugins are all pluggable. Optional plugins could be provided in optional-plugins folder under agent or 3rd party repositories. For using these plugins, you need to put the target plugin jar file into /plugins.\nNow, we have the following known optional plugins.\n Plugin of tracing Spring annotation beans Plugin of tracing Oracle and Resin Filter traces through specified endpoint name patterns Plugin of Gson serialization lib in optional plugin folder. Plugin of Zookeeper 3.4.x in optional plugin folder. The reason of being optional plugin is, many business irrelevant traces are generated, which cause extra payload to agents and backends. At the same time, those traces may be just heartbeat(s). Customize enhance Trace methods based on description files, rather than write plugin or change source codes. Plugin of Spring Cloud Gateway 2.x and 3.x in optional plugin folder. Please only activate this plugin when you install agent in Spring Gateway. Plugin of Spring Transaction in optional plugin folder. The reason of being optional plugin is, many local span are generated, which also spend more CPU, memory and network. Plugin of Kotlin coroutine provides the tracing across coroutines automatically. As it will add local spans to all across routines scenarios, Please assess the performance impact. Plugin of quartz-scheduler-2.x in the optional plugin folder. The reason for being an optional plugin is, many task scheduling systems are based on quartz-scheduler, this will cause duplicate tracing and link different sub-tasks as they share the same quartz level trigger, such as ElasticJob. Plugin of spring-webflux-5.x in the optional plugin folder. Please only activate this plugin when you use webflux alone as a web container. If you are using SpringMVC 5 or Spring Gateway, you don\u0026rsquo;t need this plugin. Plugin of mybatis-3.x in optional plugin folder. The reason of being optional plugin is, many local span are generated, which also spend more CPU, memory and network. Plugin of sentinel-1.x in the optional plugin folder. The reason for being an optional plugin is, the sentinel plugin generates a large number of local spans, which have a potential performance impact. Plugin of ehcache-2.x in the optional plugin folder. The reason for being an optional plugin is, this plugin enhanced cache framework, generates large number of local spans, which have a potential performance impact. Plugin of guava-cache in the optional plugin folder. The reason for being an optional plugin is, this plugin enhanced cache framework, generates large number of local spans, which have a potential performance impact. Plugin of fastjson serialization lib in optional plugin folder. Plugin of jackson serialization lib in optional plugin folder. Plugin of Apache ShenYu(incubating) Gateway 2.4.x in optional plugin folder. Please only activate this plugin when you install agent in Apache ShenYu Gateway. Plugin of trace sampler CPU policy in the optional plugin folder. Please only activate this plugin when you need to disable trace collecting when the agent process CPU usage is too high(over threshold). Plugin for Spring 6.x and RestTemplate 6.x are in the optional plugin folder. Spring 6 requires Java 17 but SkyWalking is still compatible with Java 8. So, we put it in the optional plugin folder. Plugin of nacos-client 2.x lib in optional plugin folder. The reason is many business irrelevant traces are generated, which cause extra payload to agents and backends, also spend more CPU, memory and network. Plugin of netty-http 4.1.x lib in optional plugin folder. The reason is some frameworks use Netty HTTP as kernel, which could double the unnecessary spans and create incorrect RPC relative metrics.  ","excerpt":"Optional Plugins Java agent plugins are all pluggable. Optional plugins could be provided in …","ref":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/optional-plugins/","title":"Optional Plugins"},{"body":"Optional Plugins Java agent plugins are all pluggable. Optional plugins could be provided in optional-plugins and expired-plugins folder under agent or 3rd party repositories. For using these plugins, you need to put the target plugin jar file into /plugins.\nNow, we have the following known 2 kinds of optional plugins.\nOptional Level 2 Plugins These plugins affect the performance or must be used under some conditions, from experiences. So only released in /optional-plugins or /bootstrap-plugins, copy to /plugins in order to make them work.\n Plugin of tracing Spring annotation beans Plugin of tracing Oracle and Resin Filter traces through specified endpoint name patterns Plugin of Gson serialization lib in optional plugin folder. Plugin of Zookeeper 3.4.x in optional plugin folder. The reason of being optional plugin is, many business irrelevant traces are generated, which cause extra payload to agents and backends. At the same time, those traces may be just heartbeat(s). Customize enhance Trace methods based on description files, rather than write plugin or change source codes. Plugin of Spring Cloud Gateway 2.x and 3.x and 4.x in optional plugin folder. Please only activate this plugin when you install agent in Spring Gateway. Plugin of Spring Transaction in optional plugin folder. The reason of being optional plugin is, many local span are generated, which also spend more CPU, memory and network. Plugin of Kotlin coroutine provides the tracing across coroutines automatically. As it will add local spans to all across routines scenarios, Please assess the performance impact. Plugin of quartz-scheduler-2.x in the optional plugin folder. The reason for being an optional plugin is, many task scheduling systems are based on quartz-scheduler, this will cause duplicate tracing and link different sub-tasks as they share the same quartz level trigger, such as ElasticJob. Plugin of spring-webflux-5.x in the optional plugin folder. Please only activate this plugin when you use webflux alone as a web container. If you are using SpringMVC 5 or Spring Gateway, you don\u0026rsquo;t need this plugin. Plugin of mybatis-3.x in optional plugin folder. The reason of being optional plugin is, many local span are generated, which also spend more CPU, memory and network. Plugin of sentinel-1.x in the optional plugin folder. The reason for being an optional plugin is, the sentinel plugin generates a large number of local spans, which have a potential performance impact. Plugin of ehcache-2.x in the optional plugin folder. The reason for being an optional plugin is, this plugin enhanced cache framework, generates large number of local spans, which have a potential performance impact. Plugin of guava-cache in the optional plugin folder. The reason for being an optional plugin is, this plugin enhanced cache framework, generates large number of local spans, which have a potential performance impact. Plugin of fastjson serialization lib in optional plugin folder. Plugin of jackson serialization lib in optional plugin folder. Plugin of Apache ShenYu(incubating) Gateway 2.4.x in optional plugin folder. Please only activate this plugin when you install agent in Apache ShenYu Gateway. Plugin of trace sampler CPU policy in the optional plugin folder. Please only activate this plugin when you need to disable trace collecting when the agent process CPU usage is too high(over threshold). Plugin for Spring 6.x and RestTemplate 6.x are in the optional plugin folder. Spring 6 requires Java 17 but SkyWalking is still compatible with Java 8. So, we put it in the optional plugin folder. Plugin of nacos-client 2.x lib in optional plugin folder. The reason is many business irrelevant traces are generated, which cause extra payload to agents and backends, also spend more CPU, memory and network. Plugin of netty-http 4.1.x lib in optional plugin folder. The reason is some frameworks use Netty HTTP as kernel, which could double the unnecessary spans and create incorrect RPC relative metrics.  Optional Level 3 Plugins. Expired Plugins These plugins are not tested in the CI/CD pipeline, as the previous added tests are not able to run according to the latest CI/CD infrastructure limitations, lack of maintenance, or dependencies/images not available(e.g. removed from DockerHub).\nWarning, there is no guarantee of working and maintenance. The committer team may remove them from the agent package in the future without further notice.\n Plugin of Spring Impala 2.6.x was tested through parrot-stream released images. The images are not available since Mar. 2024. This plugin is expired due to lack of testing.  ","excerpt":"Optional Plugins Java agent plugins are all pluggable. Optional plugins could be provided in …","ref":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/optional-plugins/","title":"Optional Plugins"},{"body":"Oracle and Resin plugins These plugins can\u0026rsquo;t be provided in Apache release because of Oracle and Resin Licenses. If you want to know details, please read Apache license legal document\nDue to license incompatibilities/restrictions these plugins are hosted and released in 3rd part repository, go to OpenSkywalking java plugin extension repository to get these.\n","excerpt":"Oracle and Resin plugins These plugins can\u0026rsquo;t be provided in Apache release because of Oracle …","ref":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/agent-optional-plugins/oracle-resin-plugins/","title":"Oracle and Resin plugins"},{"body":"Oracle and Resin plugins These plugins can\u0026rsquo;t be provided in Apache release because of Oracle and Resin Licenses. If you want to know details, please read Apache license legal document\nDue to license incompatibilities/restrictions these plugins are hosted and released in 3rd part repository, go to OpenSkywalking java plugin extension repository to get these.\n","excerpt":"Oracle and Resin plugins These plugins can\u0026rsquo;t be provided in Apache release because of Oracle …","ref":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/agent-optional-plugins/oracle-resin-plugins/","title":"Oracle and Resin plugins"},{"body":"Oracle and Resin plugins These plugins can\u0026rsquo;t be provided in Apache release because of Oracle and Resin Licenses. If you want to know details, please read Apache license legal document\nDue to license incompatibilities/restrictions these plugins are hosted and released in 3rd part repository, go to OpenSkywalking java plugin extension repository to get these.\n","excerpt":"Oracle and Resin plugins These plugins can\u0026rsquo;t be provided in Apache release because of Oracle …","ref":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/agent-optional-plugins/oracle-resin-plugins/","title":"Oracle and Resin plugins"},{"body":"Oracle and Resin plugins These plugins can\u0026rsquo;t be provided in Apache release because of Oracle and Resin Licenses. If you want to know details, please read Apache license legal document\nDue to license incompatibilities/restrictions these plugins are hosted and released in 3rd part repository, go to OpenSkywalking java plugin extension repository to get these.\n","excerpt":"Oracle and Resin plugins These plugins can\u0026rsquo;t be provided in Apache release because of Oracle …","ref":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/agent-optional-plugins/oracle-resin-plugins/","title":"Oracle and Resin plugins"},{"body":"Oracle and Resin plugins These plugins can\u0026rsquo;t be provided in Apache release because of Oracle and Resin Licenses. If you want to know details, please read Apache license legal document\nDue to license incompatibilities/restrictions these plugins are hosted and released in 3rd part repository, go to OpenSkywalking java plugin extension repository to get these.\n","excerpt":"Oracle and Resin plugins These plugins can\u0026rsquo;t be provided in Apache release because of Oracle …","ref":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/agent-optional-plugins/oracle-resin-plugins/","title":"Oracle and Resin plugins"},{"body":"Overview SkyWalking is an open source observability platform used to collect, analyze, aggregate and visualize data from services and cloud native infrastructures. SkyWalking provides an easy way to maintain a clear view of your distributed systems, even across Clouds. It is a modern APM, specially designed for cloud native, container based distributed systems.\nWhy use SkyWalking? SkyWalking provides solutions for observing and monitoring distributed systems, in many different scenarios. First of all, like traditional approaches, SkyWalking provides auto instrument agents for services, such as Java, C#, Node.js, Go, PHP and Nginx LUA. (with calls out for Python and C++ SDK contributions). In multi-language, continuously deployed environments, cloud native infrastructures grow more powerful but also more complex. SkyWalking\u0026rsquo;s service mesh receiver allows SkyWalking to receive telemetry data from service mesh frameworks such as Istio/Envoy and Linkerd, allowing users to understand the entire distributed system.\nSkyWalking provides observability capabilities for service(s), service instance(s), endpoint(s), process(s). The terms Service, Instance and Endpoint are used everywhere today, so it is worth defining their specific meanings in the context of SkyWalking:\n Service. Represents a set/group of workloads which provide the same behaviours for incoming requests. You can define the service name when you are using instrument agents or SDKs. SkyWalking can also use the name you define in platforms such as Istio. Service Instance. Each individual workload in the Service group is known as an instance. Like pods in Kubernetes, it doesn\u0026rsquo;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process. Endpoint. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature. Process. An operating system process. In some scenarios, a Service Instance is not a process, such as a pod Kubernetes could contain multiple processes.  SkyWalking allows users to understand the topology relationship between Services and Endpoints, to view the metrics of every Service/Service Instance/Endpoint and to set alarm rules.\nStarting from v9, SkyWalking introduces the new core concept Layer. A layer represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), Kubernetes(k8s layer). All detected instances belong to a layer to represent the running environment of this instance, the service would have one or multiple layer definitions according to its instances.\nIn addition, you can integrate\n Other distributed tracing using Zipkin. Other metrics systems, such as Prometheus, Sleuth(Micrometer), OpenTelemetry, Telegraf.  Architecture SkyWalking is logically split into four parts: Probes, Platform backend, Storage and UI.\n Probes collect telemetry data, including metrics, traces, logs and events in various formats(SkyWalking, Zipkin, OpenTelemetry, Prometheus, Zabbix, etc.) Platform backend supports data aggregation, analysis and streaming process covers traces, metrics, logs and events. Work as Aggregator Role, Receiver Role or both. Storage houses SkyWalking data through an open/plugable interface. You can choose an existing implementation, such as ElasticSearch, H2, MySQL, TiDB, BanyanDB, or implement your own. UI is a highly customizable web based interface allowing SkyWalking end users to visualize and manage SkyWalking data.  What is next?  Learn SkyWalking\u0026rsquo;s Project Goals FAQ, Why doesn\u0026rsquo;t SkyWalking involve MQ in the architecture in default?  ","excerpt":"Overview SkyWalking is an open source observability platform used to collect, analyze, aggregate and …","ref":"/docs/main/latest/en/concepts-and-designs/overview/","title":"Overview"},{"body":"Overview SkyWalking is an open source observability platform used to collect, analyze, aggregate and visualize data from services and cloud native infrastructures. SkyWalking provides an easy way to maintain a clear view of your distributed systems, even across Clouds. It is a modern APM, specially designed for cloud native, container based distributed systems.\nSkyWalking covers all the observability needs in Cloud Native world, including:\n Tracing. SkyWalking native data formats, and Zipkin traces of v1 and v2 formats are supported. Metrics. SkyWalking supports mature metrics formats, including native meter format, OTEL metrics format, and Telegraf format. SkyWalking integrates with Service Mesh platforms, typically Istio and Envoy, to build observability into the data plane or control plane. Also, SkyWalking native agents can run in the metrics mode, which greatly improves performances. Logging. Includes logs collected from disk or through network. Native agents could bind the tracing context with logs automatically, or use SkyWalking to bind the trace and log through the text content. Profiling. Profiling is a powerful tool to help developers understand the performance of their applications from lines of codes perspective. SkyWalking provides profiling feature bundled in native language agents and independent ebpf agents. Event. Event is a special kind of data, which is used to record the important moments in the system, such as version upgrade, configuration change, etc. Linking the events with metrics could help on explain the peaks or valleys in the metrics, and linking the events with traces and logs could help on troubleshooting root cause.  Why use SkyWalking? SkyWalking provides solutions for observing and monitoring distributed systems, in many different scenarios. First of all, like traditional approaches, SkyWalking provides auto instrument agents for services, such as Java, C#, Node.js, Go, PHP and Python, and manually SDKs for C++, Rust, and Nginx LUA. In multi-language, continuously deployed environments, cloud native infrastructures grow more powerful but also more complex. SkyWalking\u0026rsquo;s service mesh receiver allows SkyWalking to receive telemetry data from service mesh frameworks such as Istio/Envoy, allowing users to understand the entire distributed system. Powered by eBPF stack, SkyWalking provides k8s monitoring. Also, by adopting OpenTelemetry, Telegraf, Zabbix, Zipkin, Prometheus, SkyWalking can integrate with other distributed tracing, metrics and logging systems and build a unified APM system to host all data.\nBesides the support of various kinds of telemetry formats, the hierarchy structure of objects in SkyWalking is defined as service(s), service instance(s), endpoint(s), process(s). The terms Service, Instance and Endpoint are used everywhere today, so it is worth defining their specific meanings in the context of SkyWalking:\n Layer. A layer represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), and Kubernetes(k8s layer). A layer is an abstract collection of services. A service typically only belongs to one layer, but in some scenarios, a service could belong to multiple layers. For example, a service could be deployed in an Istio service mesh, it could belong to mesh and mesh-dp(mesh data plane) layer. Service. Represents a set/group of workloads which provide the same behaviours for incoming requests. You can define the service name when you are using instrument agents or SDKs. SkyWalking can also use the name you define in platforms such as Istio. Service Instance. Each individual workload in the Service group is known as an instance. Like pods in Kubernetes, it doesn\u0026rsquo;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process. Endpoint. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature. Process. An operating system process. In some scenarios, a Service Instance is not a process, such as a pod Kubernetes could contain multiple processes.  SkyWalking allows users to understand the topology relationship between Services and Endpoints, also detect API dependencies in the distributed environment if you use our native agents.,\nBesides topology map, SkyWalking provides Service Hierarchy Relationship , which defines the relationships of existing logically same services in various layers. For example, a service could be deployed in a Kubernetes cluster with Istio mesh, services are detected by k8s monitoring and Istio mesh, this hierarchy relationship could connect the services in k8s layer and mesh layer.\nArchitecture SkyWalking is logically split into four parts: Probes, Platform backend, Storage and UI.\n Probes collect telemetry data, including metrics, traces, logs and events in various formats(SkyWalking, Zipkin, OpenTelemetry, Prometheus, Zabbix, etc.) Platform backend supports data aggregation, analysis and streaming process covers traces, metrics, logs and events. Work as Aggregator Role, Receiver Role or both. Storage houses SkyWalking data through an open/plugable interface. You can choose an existing implementation, such as ElasticSearch, H2, MySQL, TiDB, BanyanDB, or implement your own. UI is a highly customizable web based interface allowing SkyWalking end users to visualize and manage SkyWalking data.  What is next?  Learn SkyWalking\u0026rsquo;s Project Goals FAQ, Why doesn\u0026rsquo;t SkyWalking involve MQ in the architecture in default?  ","excerpt":"Overview SkyWalking is an open source observability platform used to collect, analyze, aggregate and …","ref":"/docs/main/next/en/concepts-and-designs/overview/","title":"Overview"},{"body":"Overview SkyWalking is an open source observability platform used to collect, analyze, aggregate and visualize data from services and cloud native infrastructures. SkyWalking provides an easy way to maintain a clear view of your distributed systems, even across Clouds. It is a modern APM, specially designed for cloud native, container based distributed systems.\nWhy use SkyWalking? SkyWalking provides solutions for observing and monitoring distributed systems, in many different scenarios. First of all, like traditional approaches, SkyWalking provides auto instrument agents for services, such as Java, C#, Node.js, Go, PHP and Nginx LUA. (with calls out for Python and C++ SDK contributions). In multi-language, continuously deployed environments, cloud native infrastructures grow more powerful but also more complex. SkyWalking\u0026rsquo;s service mesh receiver allows SkyWalking to receive telemetry data from service mesh frameworks such as Istio/Envoy and Linkerd, allowing users to understand the entire distributed system.\nSkyWalking provides observability capabilities for service(s), service instance(s), endpoint(s), process(s). The terms Service, Instance and Endpoint are used everywhere today, so it is worth defining their specific meanings in the context of SkyWalking:\n Service. Represents a set/group of workloads which provide the same behaviours for incoming requests. You can define the service name when you are using instrument agents or SDKs. SkyWalking can also use the name you define in platforms such as Istio. Service Instance. Each individual workload in the Service group is known as an instance. Like pods in Kubernetes, it doesn\u0026rsquo;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process. Endpoint. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature. Process. An operating system process. In some scenarios, a Service Instance is not a process, such as a pod Kubernetes could contain multiple processes.  SkyWalking allows users to understand the topology relationship between Services and Endpoints, to view the metrics of every Service/Service Instance/Endpoint and to set alarm rules.\nStarting from v9, SkyWalking introduces the new core concept Layer. A layer represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), Kubernetes(k8s layer). All detected instances belong to a layer to represent the running environment of this instance, the service would have one or multiple layer definitions according to its instances.\nIn addition, you can integrate\n Other distributed tracing using SkyWalking native agents and SDKs with Zipkin, Jaeger and OpenCensus. Other metrics systems, such as Prometheus, Sleuth(Micrometer), OpenTelemetry.  Architecture SkyWalking is logically split into four parts: Probes, Platform backend, Storage and UI.\n Probes collect data and reformat them for SkyWalking requirements (different probes support different sources). Platform backend supports data aggregation, analysis and streaming process covers traces, metrics, and logs. Storage houses SkyWalking data through an open/plugable interface. You can choose an existing implementation, such as ElasticSearch, H2, MySQL, TiDB, InfluxDB, or implement your own. Patches for new storage implementors welcome! UI is a highly customizable web based interface allowing SkyWalking end users to visualize and manage SkyWalking data.  What is next?  Learn SkyWalking\u0026rsquo;s Project Goals FAQ, Why doesn\u0026rsquo;t SkyWalking involve MQ in the architecture in default?  ","excerpt":"Overview SkyWalking is an open source observability platform used to collect, analyze, aggregate and …","ref":"/docs/main/v9.0.0/en/concepts-and-designs/overview/","title":"Overview"},{"body":"Overview SkyWalking is an open source observability platform used to collect, analyze, aggregate and visualize data from services and cloud native infrastructures. SkyWalking provides an easy way to maintain a clear view of your distributed systems, even across Clouds. It is a modern APM, specially designed for cloud native, container based distributed systems.\nWhy use SkyWalking? SkyWalking provides solutions for observing and monitoring distributed systems, in many different scenarios. First of all, like traditional approaches, SkyWalking provides auto instrument agents for services, such as Java, C#, Node.js, Go, PHP and Nginx LUA. (with calls out for Python and C++ SDK contributions). In multi-language, continuously deployed environments, cloud native infrastructures grow more powerful but also more complex. SkyWalking\u0026rsquo;s service mesh receiver allows SkyWalking to receive telemetry data from service mesh frameworks such as Istio/Envoy and Linkerd, allowing users to understand the entire distributed system.\nSkyWalking provides observability capabilities for service(s), service instance(s), endpoint(s), process(s). The terms Service, Instance and Endpoint are used everywhere today, so it is worth defining their specific meanings in the context of SkyWalking:\n Service. Represents a set/group of workloads which provide the same behaviours for incoming requests. You can define the service name when you are using instrument agents or SDKs. SkyWalking can also use the name you define in platforms such as Istio. Service Instance. Each individual workload in the Service group is known as an instance. Like pods in Kubernetes, it doesn\u0026rsquo;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process. Endpoint. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature. Process. An operating system process. In some scenarios, a Service Instance is not a process, such as a pod Kubernetes could contain multiple processes.  SkyWalking allows users to understand the topology relationship between Services and Endpoints, to view the metrics of every Service/Service Instance/Endpoint and to set alarm rules.\nStarting from v9, SkyWalking introduces the new core concept Layer. A layer represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), Kubernetes(k8s layer). All detected instances belong to a layer to represent the running environment of this instance, the service would have one or multiple layer definitions according to its instances.\nIn addition, you can integrate\n Other distributed tracing using SkyWalking native agents and SDKs with Zipkin, Jaeger and OpenCensus. Other metrics systems, such as Prometheus, Sleuth(Micrometer), OpenTelemetry.  Architecture SkyWalking is logically split into four parts: Probes, Platform backend, Storage and UI.\n Probes collect data and reformat them for SkyWalking requirements (different probes support different sources). Platform backend supports data aggregation, analysis and streaming process covers traces, metrics, and logs. Storage houses SkyWalking data through an open/plugable interface. You can choose an existing implementation, such as ElasticSearch, H2, MySQL, TiDB, InfluxDB, or implement your own. Patches for new storage implementors welcome! UI is a highly customizable web based interface allowing SkyWalking end users to visualize and manage SkyWalking data.  What is next?  Learn SkyWalking\u0026rsquo;s Project Goals FAQ, Why doesn\u0026rsquo;t SkyWalking involve MQ in the architecture in default?  ","excerpt":"Overview SkyWalking is an open source observability platform used to collect, analyze, aggregate and …","ref":"/docs/main/v9.1.0/en/concepts-and-designs/overview/","title":"Overview"},{"body":"Overview SkyWalking is an open source observability platform used to collect, analyze, aggregate and visualize data from services and cloud native infrastructures. SkyWalking provides an easy way to maintain a clear view of your distributed systems, even across Clouds. It is a modern APM, specially designed for cloud native, container based distributed systems.\nWhy use SkyWalking? SkyWalking provides solutions for observing and monitoring distributed systems, in many different scenarios. First of all, like traditional approaches, SkyWalking provides auto instrument agents for services, such as Java, C#, Node.js, Go, PHP and Nginx LUA. (with calls out for Python and C++ SDK contributions). In multi-language, continuously deployed environments, cloud native infrastructures grow more powerful but also more complex. SkyWalking\u0026rsquo;s service mesh receiver allows SkyWalking to receive telemetry data from service mesh frameworks such as Istio/Envoy and Linkerd, allowing users to understand the entire distributed system.\nSkyWalking provides observability capabilities for service(s), service instance(s), endpoint(s), process(s). The terms Service, Instance and Endpoint are used everywhere today, so it is worth defining their specific meanings in the context of SkyWalking:\n Service. Represents a set/group of workloads which provide the same behaviours for incoming requests. You can define the service name when you are using instrument agents or SDKs. SkyWalking can also use the name you define in platforms such as Istio. Service Instance. Each individual workload in the Service group is known as an instance. Like pods in Kubernetes, it doesn\u0026rsquo;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process. Endpoint. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature. Process. An operating system process. In some scenarios, a Service Instance is not a process, such as a pod Kubernetes could contain multiple processes.  SkyWalking allows users to understand the topology relationship between Services and Endpoints, to view the metrics of every Service/Service Instance/Endpoint and to set alarm rules.\nStarting from v9, SkyWalking introduces the new core concept Layer. A layer represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), Kubernetes(k8s layer). All detected instances belong to a layer to represent the running environment of this instance, the service would have one or multiple layer definitions according to its instances.\nIn addition, you can integrate\n Other distributed tracing using SkyWalking native agents and SDKs with Zipkin, Jaeger and OpenCensus. Other metrics systems, such as Prometheus, Sleuth(Micrometer), OpenTelemetry.  Architecture SkyWalking is logically split into four parts: Probes, Platform backend, Storage and UI.\n Probes collect telemetry data, including metrics, traces, logs and events in various formats(SkyWalking, Zipkin, OpenTelemetry, Prometheus, Zabbix, etc.) Platform backend supports data aggregation, analysis and streaming process covers traces, metrics, logs and events. Work as Aggregator Role, Receiver Role or both. Storage houses SkyWalking data through an open/plugable interface. You can choose an existing implementation, such as ElasticSearch, H2, MySQL, TiDB, BanyanDB, or implement your own. UI is a highly customizable web based interface allowing SkyWalking end users to visualize and manage SkyWalking data.  What is next?  Learn SkyWalking\u0026rsquo;s Project Goals FAQ, Why doesn\u0026rsquo;t SkyWalking involve MQ in the architecture in default?  ","excerpt":"Overview SkyWalking is an open source observability platform used to collect, analyze, aggregate and …","ref":"/docs/main/v9.2.0/en/concepts-and-designs/overview/","title":"Overview"},{"body":"Overview SkyWalking is an open source observability platform used to collect, analyze, aggregate and visualize data from services and cloud native infrastructures. SkyWalking provides an easy way to maintain a clear view of your distributed systems, even across Clouds. It is a modern APM, specially designed for cloud native, container based distributed systems.\nWhy use SkyWalking? SkyWalking provides solutions for observing and monitoring distributed systems, in many different scenarios. First of all, like traditional approaches, SkyWalking provides auto instrument agents for services, such as Java, C#, Node.js, Go, PHP and Nginx LUA. (with calls out for Python and C++ SDK contributions). In multi-language, continuously deployed environments, cloud native infrastructures grow more powerful but also more complex. SkyWalking\u0026rsquo;s service mesh receiver allows SkyWalking to receive telemetry data from service mesh frameworks such as Istio/Envoy and Linkerd, allowing users to understand the entire distributed system.\nSkyWalking provides observability capabilities for service(s), service instance(s), endpoint(s), process(s). The terms Service, Instance and Endpoint are used everywhere today, so it is worth defining their specific meanings in the context of SkyWalking:\n Service. Represents a set/group of workloads which provide the same behaviours for incoming requests. You can define the service name when you are using instrument agents or SDKs. SkyWalking can also use the name you define in platforms such as Istio. Service Instance. Each individual workload in the Service group is known as an instance. Like pods in Kubernetes, it doesn\u0026rsquo;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process. Endpoint. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature. Process. An operating system process. In some scenarios, a Service Instance is not a process, such as a pod Kubernetes could contain multiple processes.  SkyWalking allows users to understand the topology relationship between Services and Endpoints, to view the metrics of every Service/Service Instance/Endpoint and to set alarm rules.\nStarting from v9, SkyWalking introduces the new core concept Layer. A layer represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), Kubernetes(k8s layer). All detected instances belong to a layer to represent the running environment of this instance, the service would have one or multiple layer definitions according to its instances.\nIn addition, you can integrate\n Other distributed tracing using SkyWalking native agents and SDKs with Zipkin, Jaeger and OpenCensus. Other metrics systems, such as Prometheus, Sleuth(Micrometer), OpenTelemetry.  Architecture SkyWalking is logically split into four parts: Probes, Platform backend, Storage and UI.\n Probes collect telemetry data, including metrics, traces, logs and events in various formats(SkyWalking, Zipkin, OpenTelemetry, Prometheus, Zabbix, etc.) Platform backend supports data aggregation, analysis and streaming process covers traces, metrics, logs and events. Work as Aggregator Role, Receiver Role or both. Storage houses SkyWalking data through an open/plugable interface. You can choose an existing implementation, such as ElasticSearch, H2, MySQL, TiDB, BanyanDB, or implement your own. UI is a highly customizable web based interface allowing SkyWalking end users to visualize and manage SkyWalking data.  What is next?  Learn SkyWalking\u0026rsquo;s Project Goals FAQ, Why doesn\u0026rsquo;t SkyWalking involve MQ in the architecture in default?  ","excerpt":"Overview SkyWalking is an open source observability platform used to collect, analyze, aggregate and …","ref":"/docs/main/v9.3.0/en/concepts-and-designs/overview/","title":"Overview"},{"body":"Overview SkyWalking is an open source observability platform used to collect, analyze, aggregate and visualize data from services and cloud native infrastructures. SkyWalking provides an easy way to maintain a clear view of your distributed systems, even across Clouds. It is a modern APM, specially designed for cloud native, container based distributed systems.\nWhy use SkyWalking? SkyWalking provides solutions for observing and monitoring distributed systems, in many different scenarios. First of all, like traditional approaches, SkyWalking provides auto instrument agents for services, such as Java, C#, Node.js, Go, PHP and Nginx LUA. (with calls out for Python and C++ SDK contributions). In multi-language, continuously deployed environments, cloud native infrastructures grow more powerful but also more complex. SkyWalking\u0026rsquo;s service mesh receiver allows SkyWalking to receive telemetry data from service mesh frameworks such as Istio/Envoy and Linkerd, allowing users to understand the entire distributed system.\nSkyWalking provides observability capabilities for service(s), service instance(s), endpoint(s), process(s). The terms Service, Instance and Endpoint are used everywhere today, so it is worth defining their specific meanings in the context of SkyWalking:\n Service. Represents a set/group of workloads which provide the same behaviours for incoming requests. You can define the service name when you are using instrument agents or SDKs. SkyWalking can also use the name you define in platforms such as Istio. Service Instance. Each individual workload in the Service group is known as an instance. Like pods in Kubernetes, it doesn\u0026rsquo;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process. Endpoint. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature. Process. An operating system process. In some scenarios, a Service Instance is not a process, such as a pod Kubernetes could contain multiple processes.  SkyWalking allows users to understand the topology relationship between Services and Endpoints, to view the metrics of every Service/Service Instance/Endpoint and to set alarm rules.\nStarting from v9, SkyWalking introduces the new core concept Layer. A layer represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), Kubernetes(k8s layer). All detected instances belong to a layer to represent the running environment of this instance, the service would have one or multiple layer definitions according to its instances.\nIn addition, you can integrate\n Other distributed tracing using SkyWalking native agents and SDKs with Zipkin, Jaeger and OpenCensus. Other metrics systems, such as Prometheus, Sleuth(Micrometer), OpenTelemetry.  Architecture SkyWalking is logically split into four parts: Probes, Platform backend, Storage and UI.\n Probes collect telemetry data, including metrics, traces, logs and events in various formats(SkyWalking, Zipkin, OpenTelemetry, Prometheus, Zabbix, etc.) Platform backend supports data aggregation, analysis and streaming process covers traces, metrics, logs and events. Work as Aggregator Role, Receiver Role or both. Storage houses SkyWalking data through an open/plugable interface. You can choose an existing implementation, such as ElasticSearch, H2, MySQL, TiDB, BanyanDB, or implement your own. UI is a highly customizable web based interface allowing SkyWalking end users to visualize and manage SkyWalking data.  What is next?  Learn SkyWalking\u0026rsquo;s Project Goals FAQ, Why doesn\u0026rsquo;t SkyWalking involve MQ in the architecture in default?  ","excerpt":"Overview SkyWalking is an open source observability platform used to collect, analyze, aggregate and …","ref":"/docs/main/v9.4.0/en/concepts-and-designs/overview/","title":"Overview"},{"body":"Overview SkyWalking is an open source observability platform used to collect, analyze, aggregate and visualize data from services and cloud native infrastructures. SkyWalking provides an easy way to maintain a clear view of your distributed systems, even across Clouds. It is a modern APM, specially designed for cloud native, container based distributed systems.\nWhy use SkyWalking? SkyWalking provides solutions for observing and monitoring distributed systems, in many different scenarios. First of all, like traditional approaches, SkyWalking provides auto instrument agents for services, such as Java, C#, Node.js, Go, PHP and Nginx LUA. (with calls out for Python and C++ SDK contributions). In multi-language, continuously deployed environments, cloud native infrastructures grow more powerful but also more complex. SkyWalking\u0026rsquo;s service mesh receiver allows SkyWalking to receive telemetry data from service mesh frameworks such as Istio/Envoy and Linkerd, allowing users to understand the entire distributed system.\nSkyWalking provides observability capabilities for service(s), service instance(s), endpoint(s), process(s). The terms Service, Instance and Endpoint are used everywhere today, so it is worth defining their specific meanings in the context of SkyWalking:\n Service. Represents a set/group of workloads which provide the same behaviours for incoming requests. You can define the service name when you are using instrument agents or SDKs. SkyWalking can also use the name you define in platforms such as Istio. Service Instance. Each individual workload in the Service group is known as an instance. Like pods in Kubernetes, it doesn\u0026rsquo;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process. Endpoint. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature. Process. An operating system process. In some scenarios, a Service Instance is not a process, such as a pod Kubernetes could contain multiple processes.  SkyWalking allows users to understand the topology relationship between Services and Endpoints, to view the metrics of every Service/Service Instance/Endpoint and to set alarm rules.\nStarting from v9, SkyWalking introduces the new core concept Layer. A layer represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), Kubernetes(k8s layer). All detected instances belong to a layer to represent the running environment of this instance, the service would have one or multiple layer definitions according to its instances.\nIn addition, you can integrate\n Other distributed tracing using SkyWalking native agents and SDKs with Zipkin and Jaeger. Other metrics systems, such as Prometheus, Sleuth(Micrometer), OpenTelemetry.  Architecture SkyWalking is logically split into four parts: Probes, Platform backend, Storage and UI.\n Probes collect telemetry data, including metrics, traces, logs and events in various formats(SkyWalking, Zipkin, OpenTelemetry, Prometheus, Zabbix, etc.) Platform backend supports data aggregation, analysis and streaming process covers traces, metrics, logs and events. Work as Aggregator Role, Receiver Role or both. Storage houses SkyWalking data through an open/plugable interface. You can choose an existing implementation, such as ElasticSearch, H2, MySQL, TiDB, BanyanDB, or implement your own. UI is a highly customizable web based interface allowing SkyWalking end users to visualize and manage SkyWalking data.  What is next?  Learn SkyWalking\u0026rsquo;s Project Goals FAQ, Why doesn\u0026rsquo;t SkyWalking involve MQ in the architecture in default?  ","excerpt":"Overview SkyWalking is an open source observability platform used to collect, analyze, aggregate and …","ref":"/docs/main/v9.5.0/en/concepts-and-designs/overview/","title":"Overview"},{"body":"Overview SkyWalking is an open source observability platform used to collect, analyze, aggregate and visualize data from services and cloud native infrastructures. SkyWalking provides an easy way to maintain a clear view of your distributed systems, even across Clouds. It is a modern APM, specially designed for cloud native, container based distributed systems.\nWhy use SkyWalking? SkyWalking provides solutions for observing and monitoring distributed systems, in many different scenarios. First of all, like traditional approaches, SkyWalking provides auto instrument agents for services, such as Java, C#, Node.js, Go, PHP and Nginx LUA. (with calls out for Python and C++ SDK contributions). In multi-language, continuously deployed environments, cloud native infrastructures grow more powerful but also more complex. SkyWalking\u0026rsquo;s service mesh receiver allows SkyWalking to receive telemetry data from service mesh frameworks such as Istio/Envoy and Linkerd, allowing users to understand the entire distributed system.\nSkyWalking provides observability capabilities for service(s), service instance(s), endpoint(s), process(s). The terms Service, Instance and Endpoint are used everywhere today, so it is worth defining their specific meanings in the context of SkyWalking:\n Service. Represents a set/group of workloads which provide the same behaviours for incoming requests. You can define the service name when you are using instrument agents or SDKs. SkyWalking can also use the name you define in platforms such as Istio. Service Instance. Each individual workload in the Service group is known as an instance. Like pods in Kubernetes, it doesn\u0026rsquo;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process. Endpoint. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature. Process. An operating system process. In some scenarios, a Service Instance is not a process, such as a pod Kubernetes could contain multiple processes.  SkyWalking allows users to understand the topology relationship between Services and Endpoints, to view the metrics of every Service/Service Instance/Endpoint and to set alarm rules.\nStarting from v9, SkyWalking introduces the new core concept Layer. A layer represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), Kubernetes(k8s layer). All detected instances belong to a layer to represent the running environment of this instance, the service would have one or multiple layer definitions according to its instances.\nIn addition, you can integrate\n Other distributed tracing using SkyWalking native agents and SDKs with Zipkin and Jaeger. Other metrics systems, such as Prometheus, Sleuth(Micrometer), OpenTelemetry.  Architecture SkyWalking is logically split into four parts: Probes, Platform backend, Storage and UI.\n Probes collect telemetry data, including metrics, traces, logs and events in various formats(SkyWalking, Zipkin, OpenTelemetry, Prometheus, Zabbix, etc.) Platform backend supports data aggregation, analysis and streaming process covers traces, metrics, logs and events. Work as Aggregator Role, Receiver Role or both. Storage houses SkyWalking data through an open/plugable interface. You can choose an existing implementation, such as ElasticSearch, H2, MySQL, TiDB, BanyanDB, or implement your own. UI is a highly customizable web based interface allowing SkyWalking end users to visualize and manage SkyWalking data.  What is next?  Learn SkyWalking\u0026rsquo;s Project Goals FAQ, Why doesn\u0026rsquo;t SkyWalking involve MQ in the architecture in default?  ","excerpt":"Overview SkyWalking is an open source observability platform used to collect, analyze, aggregate and …","ref":"/docs/main/v9.6.0/en/concepts-and-designs/overview/","title":"Overview"},{"body":"Overview SkyWalking is an open source observability platform used to collect, analyze, aggregate and visualize data from services and cloud native infrastructures. SkyWalking provides an easy way to maintain a clear view of your distributed systems, even across Clouds. It is a modern APM, specially designed for cloud native, container based distributed systems.\nWhy use SkyWalking? SkyWalking provides solutions for observing and monitoring distributed systems, in many different scenarios. First of all, like traditional approaches, SkyWalking provides auto instrument agents for services, such as Java, C#, Node.js, Go, PHP and Nginx LUA. (with calls out for Python and C++ SDK contributions). In multi-language, continuously deployed environments, cloud native infrastructures grow more powerful but also more complex. SkyWalking\u0026rsquo;s service mesh receiver allows SkyWalking to receive telemetry data from service mesh frameworks such as Istio/Envoy and Linkerd, allowing users to understand the entire distributed system.\nSkyWalking provides observability capabilities for service(s), service instance(s), endpoint(s), process(s). The terms Service, Instance and Endpoint are used everywhere today, so it is worth defining their specific meanings in the context of SkyWalking:\n Service. Represents a set/group of workloads which provide the same behaviours for incoming requests. You can define the service name when you are using instrument agents or SDKs. SkyWalking can also use the name you define in platforms such as Istio. Service Instance. Each individual workload in the Service group is known as an instance. Like pods in Kubernetes, it doesn\u0026rsquo;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process. Endpoint. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature. Process. An operating system process. In some scenarios, a Service Instance is not a process, such as a pod Kubernetes could contain multiple processes.  SkyWalking allows users to understand the topology relationship between Services and Endpoints, to view the metrics of every Service/Service Instance/Endpoint and to set alarm rules.\nStarting from v9, SkyWalking introduces the new core concept Layer. A layer represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), Kubernetes(k8s layer). All detected instances belong to a layer to represent the running environment of this instance, the service would have one or multiple layer definitions according to its instances.\nIn addition, you can integrate\n Other distributed tracing using Zipkin. Other metrics systems, such as Prometheus, Sleuth(Micrometer), OpenTelemetry, Telegraf.  Architecture SkyWalking is logically split into four parts: Probes, Platform backend, Storage and UI.\n Probes collect telemetry data, including metrics, traces, logs and events in various formats(SkyWalking, Zipkin, OpenTelemetry, Prometheus, Zabbix, etc.) Platform backend supports data aggregation, analysis and streaming process covers traces, metrics, logs and events. Work as Aggregator Role, Receiver Role or both. Storage houses SkyWalking data through an open/plugable interface. You can choose an existing implementation, such as ElasticSearch, H2, MySQL, TiDB, BanyanDB, or implement your own. UI is a highly customizable web based interface allowing SkyWalking end users to visualize and manage SkyWalking data.  What is next?  Learn SkyWalking\u0026rsquo;s Project Goals FAQ, Why doesn\u0026rsquo;t SkyWalking involve MQ in the architecture in default?  ","excerpt":"Overview SkyWalking is an open source observability platform used to collect, analyze, aggregate and …","ref":"/docs/main/v9.7.0/en/concepts-and-designs/overview/","title":"Overview"},{"body":"Overview SkyWalking Rover is an open-source collector, which provides a eBPF-based monitor and profiler in the Kubernetes.\nWhy use SkyWalking Rover? On the Kubernetes platform, we could collect a lot of telemetry data. Rover could collect them based on the eBPF technology, and upload them to the SkyWalking backend for analysis, aggregate, and visualize them.\n EBPF-based profiling for C, C++, Golang, and Rust. Network profiling for L4(TCP) and L7(HTTP) traffic, including with TLS. Tracing enhancement. Collect extra information from OS level as attached events for the existing tracing system, such as attach raw data of HTTP request and response. Network monitoring for generating network access logs.  Architecture  Process represents the data monitored by Rover. Rover is deployed in the VM instance, collects data in VM and Process, and reports it to the OAP cluster. OAP collect data from the rover side, analysis, and stores them.  ","excerpt":"Overview SkyWalking Rover is an open-source collector, which provides a eBPF-based monitor and …","ref":"/docs/skywalking-rover/latest/en/concepts-and-designs/overview/","title":"Overview"},{"body":"Overview SkyWalking Rover is an open-source collector, which provides a eBPF-based monitor and profiler in the Kubernetes.\nWhy use SkyWalking Rover? On the Kubernetes platform, we could collect a lot of telemetry data. Rover could collect them based on the eBPF technology, and upload them to the SkyWalking backend for analysis, aggregate, and visualize them.\n EBPF-based profiling for C, C++, Golang, and Rust. Network profiling for L4(TCP) and L7(HTTP) traffic, including with TLS. Tracing enhancement. Collect extra information from OS level as attached events for the existing tracing system, such as attach raw data of HTTP request and response. Network monitoring for generating network access logs.  Architecture  Process represents the data monitored by Rover. Rover is deployed in the VM instance, collects data in VM and Process, and reports it to the OAP cluster. OAP collect data from the rover side, analysis, and stores them.  ","excerpt":"Overview SkyWalking Rover is an open-source collector, which provides a eBPF-based monitor and …","ref":"/docs/skywalking-rover/next/en/concepts-and-designs/overview/","title":"Overview"},{"body":"Overview SkyWalking Rover is an open-source collector, which provides a eBPF-based monitor and profiler in the Kubernetes.\nWhy use SkyWalking Rover? On the Kubernetes platform, we could collect a lot of telemetry data. Rover could collect them based on the eBPF technology, and upload them to the SkyWalking backend for analysis, aggregate, and visualize them.\n EBPF-based profiling for C, C++, Golang, and Rust. Network profiling for L4(TCP) and L7(HTTP) traffic, including with TLS. Tracing enhancement. Collect extra information from OS level as attached events for the existing tracing system, such as attach raw data of HTTP request and response. Network monitoring for generating network access logs.  Architecture  Process represents the data monitored by Rover. Rover is deployed in the VM instance, collects data in VM and Process, and reports it to the OAP cluster. OAP collect data from the rover side, analysis, and stores them.  ","excerpt":"Overview SkyWalking Rover is an open-source collector, which provides a eBPF-based monitor and …","ref":"/docs/skywalking-rover/v0.6.0/en/concepts-and-designs/overview/","title":"Overview"},{"body":"Overview SkyWalking Satellite: an open-source agent designed for the cloud-native infrastructures, which provides a low-cost, high-efficient, and more secure way to collect telemetry data, such that Trace Segments, Logs, or Metrics.\nWhy use SkyWalking Satellite? Observability is the solution to the complex scenario of cloud-native services. However, we may encounter different telemetry data scenarios, different language services, big data analysis, etc. Satellite provides a unified data collection layer for cloud-native services. You can easily use it to connect to the SkyWalking ecosystem and enhance the capacity of SkyWalking. There are some enhance features on the following when using Satellite.\n Provide a unified data collection layer to collect logs, traces, and metrics. Provide a safer local cache to reduce the memory cost of the service. Provide the unified transfer way shields the functional differences in the different language libs, such as MQ. Provides the preprocessing functions to ensure accuracy of the metrics, such as sampling.  Architecture SkyWalking Satellite is logically split into three parts: Gatherer, Processor, and Sender.\n Gatherer collect data and reformat them for SkyWalking requirements. Processor processes the input data to generate the new data for Observability. Sender would transfer the downstream data to the SkyWalking OAP with different protocols.  ","excerpt":"Overview SkyWalking Satellite: an open-source agent designed for the cloud-native infrastructures, …","ref":"/docs/skywalking-satellite/latest/en/concepts-and-designs/overview/","title":"Overview"},{"body":"Overview SkyWalking Satellite: an open-source agent designed for the cloud-native infrastructures, which provides a low-cost, high-efficient, and more secure way to collect telemetry data, such that Trace Segments, Logs, or Metrics.\nWhy use SkyWalking Satellite? Observability is the solution to the complex scenario of cloud-native services. However, we may encounter different telemetry data scenarios, different language services, big data analysis, etc. Satellite provides a unified data collection layer for cloud-native services. You can easily use it to connect to the SkyWalking ecosystem and enhance the capacity of SkyWalking. There are some enhance features on the following when using Satellite.\n Provide a unified data collection layer to collect logs, traces, and metrics. Provide a safer local cache to reduce the memory cost of the service. Provide the unified transfer way shields the functional differences in the different language libs, such as MQ. Provides the preprocessing functions to ensure accuracy of the metrics, such as sampling.  Architecture SkyWalking Satellite is logically split into three parts: Gatherer, Processor, and Sender.\n Gatherer collect data and reformat them for SkyWalking requirements. Processor processes the input data to generate the new data for Observability. Sender would transfer the downstream data to the SkyWalking OAP with different protocols.  ","excerpt":"Overview SkyWalking Satellite: an open-source agent designed for the cloud-native infrastructures, …","ref":"/docs/skywalking-satellite/next/en/concepts-and-designs/overview/","title":"Overview"},{"body":"Overview SkyWalking Satellite: an open-source agent designed for the cloud-native infrastructures, which provides a low-cost, high-efficient, and more secure way to collect telemetry data, such that Trace Segments, Logs, or Metrics.\nWhy use SkyWalking Satellite? Observability is the solution to the complex scenario of cloud-native services. However, we may encounter different telemetry data scenarios, different language services, big data analysis, etc. Satellite provides a unified data collection layer for cloud-native services. You can easily use it to connect to the SkyWalking ecosystem and enhance the capacity of SkyWalking. There are some enhance features on the following when using Satellite.\n Provide a unified data collection layer to collect logs, traces, and metrics. Provide a safer local cache to reduce the memory cost of the service. Provide the unified transfer way shields the functional differences in the different language libs, such as MQ. Provides the preprocessing functions to ensure accuracy of the metrics, such as sampling.  Architecture SkyWalking Satellite is logically split into three parts: Gatherer, Processor, and Sender.\n Gatherer collect data and reformat them for SkyWalking requirements. Processor processes the input data to generate the new data for Observability. Sender would transfer the downstream data to the SkyWalking OAP with different protocols.  ","excerpt":"Overview SkyWalking Satellite: an open-source agent designed for the cloud-native infrastructures, …","ref":"/docs/skywalking-satellite/v1.2.0/en/concepts-and-designs/overview/","title":"Overview"},{"body":"Performance best practices  Following changes are expected in the next official release (v1.1.0).\n The Python agent currently uses a number of threads to communicate with SkyWalking OAP, it is planned to be refactored using AsyncIO (Uvloop) along with an async version of gRPC(aio-client)/HTTP(aiohttp/httpx)/Kafka(aio-kafka) to further minimize the cost of thread switching and IO time.\nFor now, we still have a few points to mention to keep the overhead to your application minimal.\n When using the gRPC protocol to report data, a higher version of gRPC is always recommended. Please also make sure that:  By running python -c \u0026quot;from google.protobuf.internal import api_implementation; print(api_implementation._implementation_type)\u0026quot;, or python -c \u0026quot;from google.protobuf.internal import api_implementation; print(api_implementation._default_implementation_type)\u0026quot; you should either see upb or cpp as the returned value. It means the Protobuf library is using a much faster implementation than Python native. If not, try setting PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION='cpp' or 'upb' or upgrade the gRPC dependency (SkyWalking Python will use whatever version your application uses).   Though HTTP is provided as an alternative, it could be slower compared to other protocols, Kafka is often a good choice when gRPC is not suitable. When some features are not needed in your use case, you could turn them off either via config.init(agent_some_reporter_active=False) or environment variables. Use ignore_path, ignore_method, and log filters to avoid reporting less valuable data that is of large amount. Log reporter safe mode is designed for situations where HTTP basic auth info could be visible in traceback and logs but shouldn\u0026rsquo;t be reported to OAP. You should keep the option as OFF if it\u0026rsquo;s not your case because frequent regular expression searches will inevitably introduce overhead to the CPU. Do not turn on sw-python CLI or agent debug logging in production, otherwise large amount of log will be produced.  sw-python CLI debug mode will automatically turn on agent debug log (override from sitecustomize.py).    ","excerpt":"Performance best practices  Following changes are expected in the next official release (v1.1.0). …","ref":"/docs/skywalking-python/latest/en/setup/faq/performance/","title":"Performance best practices"},{"body":"Performance best practices  Following changes are expected in the next official release (v1.1.0).\n The Python agent currently uses a number of threads to communicate with SkyWalking OAP, it is planned to be refactored using AsyncIO (Uvloop) along with an async version of gRPC(aio-client)/HTTP(aiohttp/httpx)/Kafka(aio-kafka) to further minimize the cost of thread switching and IO time.\nFor now, we still have a few points to mention to keep the overhead to your application minimal.\n When using the gRPC protocol to report data, a higher version of gRPC is always recommended. Please also make sure that:  By running python -c \u0026quot;from google.protobuf.internal import api_implementation; print(api_implementation._implementation_type)\u0026quot;, or python -c \u0026quot;from google.protobuf.internal import api_implementation; print(api_implementation._default_implementation_type)\u0026quot; you should either see upb or cpp as the returned value. It means the Protobuf library is using a much faster implementation than Python native. If not, try setting PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION='cpp' or 'upb' or upgrade the gRPC dependency (SkyWalking Python will use whatever version your application uses).   Though HTTP is provided as an alternative, it could be slower compared to other protocols, Kafka is often a good choice when gRPC is not suitable. When some features are not needed in your use case, you could turn them off either via config.init(agent_some_reporter_active=False) or environment variables. Use ignore_path, ignore_method, and log filters to avoid reporting less valuable data that is of large amount. Log reporter safe mode is designed for situations where HTTP basic auth info could be visible in traceback and logs but shouldn\u0026rsquo;t be reported to OAP. You should keep the option as OFF if it\u0026rsquo;s not your case because frequent regular expression searches will inevitably introduce overhead to the CPU. Do not turn on sw-python CLI or agent debug logging in production, otherwise large amount of log will be produced.  sw-python CLI debug mode will automatically turn on agent debug log (override from sitecustomize.py).    ","excerpt":"Performance best practices  Following changes are expected in the next official release (v1.1.0). …","ref":"/docs/skywalking-python/next/en/setup/faq/performance/","title":"Performance best practices"},{"body":"Performance best practices  Following changes are expected in the next official release (v1.1.0).\n The Python agent currently uses a number of threads to communicate with SkyWalking OAP, it is planned to be refactored using AsyncIO (Uvloop) along with an async version of gRPC(aio-client)/HTTP(aiohttp/httpx)/Kafka(aio-kafka) to further minimize the cost of thread switching and IO time.\nFor now, we still have a few points to mention to keep the overhead to your application minimal.\n When using the gRPC protocol to report data, a higher version of gRPC is always recommended. Please also make sure that:  By running python -c \u0026quot;from google.protobuf.internal import api_implementation; print(api_implementation._implementation_type)\u0026quot;, or python -c \u0026quot;from google.protobuf.internal import api_implementation; print(api_implementation._default_implementation_type)\u0026quot; you should either see upb or cpp as the returned value. It means the Protobuf library is using a much faster implementation than Python native. If not, try setting PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION='cpp' or 'upb' or upgrade the gRPC dependency (SkyWalking Python will use whatever version your application uses).   Though HTTP is provided as an alternative, it could be slower compared to other protocols, Kafka is often a good choice when gRPC is not suitable. When some features are not needed in your use case, you could turn them off either via config.init(agent_some_reporter_active=False) or environment variables. Use ignore_path, ignore_method, and log filters to avoid reporting less valuable data that is of large amount. Log reporter safe mode is designed for situations where HTTP basic auth info could be visible in traceback and logs but shouldn\u0026rsquo;t be reported to OAP. You should keep the option as OFF if it\u0026rsquo;s not your case because frequent regular expression searches will inevitably introduce overhead to the CPU. Do not turn on sw-python CLI or agent debug logging in production, otherwise large amount of log will be produced.  sw-python CLI debug mode will automatically turn on agent debug log (override from sitecustomize.py).    ","excerpt":"Performance best practices  Following changes are expected in the next official release (v1.1.0). …","ref":"/docs/skywalking-python/v1.0.1/en/setup/faq/performance/","title":"Performance best practices"},{"body":"Performance Tests Performance testing is used to verify the impact on application performance when using SkyWalking Go.\nTest Objective By launching both the agent and non-agent compiled applications, we subject them to the same QPS under stress testing, evaluating the CPU, memory, and network latency of the machine during the testing period.\nThe application has been saved and submitted to the test/benchmark-codebase directory, with the following topology:\ntraffic generator -\u0026gt; consumer -\u0026gt; provider The payload(traffic) generator uses multithreading to send HTTP requests to the consumer service. When the consumer receives a request, it sends three requests to the provider service to obtain return data results. Based on these network requests, when using SkyWalking Go, the consumer service generates four Spans (1 Entry Span, 3 Exit Spans).\nApplication The application\u0026rsquo;s integration with SkyWalking Go follows the same process as other applications. For more information, please refer to the documentation.\nIn the application, we use loops and mathematical calculations (math.Log) to simulate the execution of the business program. This consumes a certain amount of CPU usage, preventing idle processing during service stress testing and amplifying the impact of the Agent program on the business application.\nStress Testing Service We use the Vegeta service for stress testing, which launches traffic at a specified QPS to the application. It is based on the Go language and uses goroutines to provide a more efficient stress testing solution.\nTest Environment A total of 4 GCP machines are launched, all instances are running on tbe 4C8G VM.\n traffic generator: Used for deploying traffic to the consumer machine. consumer: Used for deploying the consumer service. provider: Used for deploying the provider service. skywalking: Used for deploying the SkyWalking backend cluster, providing a standalone OAP node (in-memory H2 storage) and a UI interface.  Each service is deployed on a separate machine to ensure there is no interference with one another.\nTest Process Preparation Phase The preparation phase is used to ensure that all machines and test case preparations are completed.\nTraffic Generator Install the Vegeta service on the stress testing instance and create the following file(request.txt) to simulate traffic usage.\nGET http://${CONSUMER_IP}:8080/consumer Sw8: 1-MWYyZDRiZjQ3YmY3MTFlYWI3OTRhY2RlNDgwMDExMjI=-MWU3YzIwNGE3YmY3MTFlYWI4NThhY2RlNDgwMDExMjI=-0-c2VydmljZQ==-aW5zdGFuY2U=-cHJvcGFnYXRpb24=-cHJvcGFnYXRpb246NTU2Ng== Please replace the above CONSUMER_IP with the real IP address of the consumer instance.\nConsumer and Provider Install the skywalking-go service on the machines to be tested, and compile with and without the Agent.\nModify the machine\u0026rsquo;s file limit to prevent the inability to create new connections due to excessive handles: ulimit -n 65536.\nStart the provider service(without Agent) and obtain the provider machine\u0026rsquo;s IP address. Please provide this address when starting the consumer machine later.\nSkyWalking Download the SkyWalking service, modify the SkyWalking OAP startup script to increase the memory size, preventing OAP crashes due to insufficient memory.\nTesting without Agent  Start the Consumer service without the Agent version. Please add the provider flag for the provider address, the format is: http://${PROVIDER_IP}:8080/provider. Execute this command to preheat the system: vegeta attack -duration=1m -rate=1000/s -max-workers=2000 -targets=request.txt Execute this command to perform the stress test. The command will output statistical data of the stress test when completed: vegeta attack -duration=20m -rate=1000/s -max-workers=2000 -targets=request.txt | tee results.bin | vegeta report  Testing with Agent The only difference in the test without the Agent is the version of the consumer that is compiled and launched.\n Add the SW_AGENT_REPORTER_GRPC_BACKEND_SERVICE environment variables to the consumer service, for setting the IP address of the SkyWalking OAP service. Start the Consumer service with the Agent version. Please add the provider flag for the provider address, the format is: http://${PROVIDER_IP}:8080/provider. Execute this command to preheat the system: vegeta attack -duration=1m -rate=1000/s -max-workers=2000 -targets=request.txt Execute this command to perform the stress test. The command will output statistical data of the stress test when completed: vegeta attack -duration=20m -rate=1000/s -max-workers=2000 -targets=request.txt | tee results.bin | vegeta report  Test Results In the tests, we used 1000 QPS as a benchmark to stress test both the Consumer services with and without the Agent.\n In the non-Agent version, the CPU usage was around 74%, memory usage was 2.53%, and the average response time for a single request was 4.18ms. In the Agent-compiled version, the CPU usage was around 81%, memory usage was 2.61%, and the average response time for a single request was 4.32ms.  From these results, we can conclude that after adding the Agent, the CPU usage increased by about 9%, memory usage experienced almost no growth, and the average response time for requests increased by approximately 0.15ms.\nExplanation, approximately 0.15ms is the in-band cost. The most of CPU(extra 9%) cost are due to the amount of out of band data being sent to the collectors from the application(consumer), which is 4000 spans/s in our test case.\n","excerpt":"Performance Tests Performance testing is used to verify the impact on application performance when …","ref":"/docs/skywalking-go/latest/en/agent/performance-tests/","title":"Performance Tests"},{"body":"Performance Tests Performance testing is used to verify the impact on application performance when using SkyWalking Go.\nTest Objective By launching both the agent and non-agent compiled applications, we subject them to the same QPS under stress testing, evaluating the CPU, memory, and network latency of the machine during the testing period.\nThe application has been saved and submitted to the test/benchmark-codebase directory, with the following topology:\ntraffic generator -\u0026gt; consumer -\u0026gt; provider The payload(traffic) generator uses multithreading to send HTTP requests to the consumer service. When the consumer receives a request, it sends three requests to the provider service to obtain return data results. Based on these network requests, when using SkyWalking Go, the consumer service generates four Spans (1 Entry Span, 3 Exit Spans).\nApplication The application\u0026rsquo;s integration with SkyWalking Go follows the same process as other applications. For more information, please refer to the documentation.\nIn the application, we use loops and mathematical calculations (math.Log) to simulate the execution of the business program. This consumes a certain amount of CPU usage, preventing idle processing during service stress testing and amplifying the impact of the Agent program on the business application.\nStress Testing Service We use the Vegeta service for stress testing, which launches traffic at a specified QPS to the application. It is based on the Go language and uses goroutines to provide a more efficient stress testing solution.\nTest Environment A total of 4 GCP machines are launched, all instances are running on tbe 4C8G VM.\n traffic generator: Used for deploying traffic to the consumer machine. consumer: Used for deploying the consumer service. provider: Used for deploying the provider service. skywalking: Used for deploying the SkyWalking backend cluster, providing a standalone OAP node (in-memory H2 storage) and a UI interface.  Each service is deployed on a separate machine to ensure there is no interference with one another.\nTest Process Preparation Phase The preparation phase is used to ensure that all machines and test case preparations are completed.\nTraffic Generator Install the Vegeta service on the stress testing instance and create the following file(request.txt) to simulate traffic usage.\nGET http://${CONSUMER_IP}:8080/consumer Sw8: 1-MWYyZDRiZjQ3YmY3MTFlYWI3OTRhY2RlNDgwMDExMjI=-MWU3YzIwNGE3YmY3MTFlYWI4NThhY2RlNDgwMDExMjI=-0-c2VydmljZQ==-aW5zdGFuY2U=-cHJvcGFnYXRpb24=-cHJvcGFnYXRpb246NTU2Ng== Please replace the above CONSUMER_IP with the real IP address of the consumer instance.\nConsumer and Provider Install the skywalking-go service on the machines to be tested, and compile with and without the Agent.\nModify the machine\u0026rsquo;s file limit to prevent the inability to create new connections due to excessive handles: ulimit -n 65536.\nStart the provider service(without Agent) and obtain the provider machine\u0026rsquo;s IP address. Please provide this address when starting the consumer machine later.\nSkyWalking Download the SkyWalking service, modify the SkyWalking OAP startup script to increase the memory size, preventing OAP crashes due to insufficient memory.\nTesting without Agent  Start the Consumer service without the Agent version. Please add the provider flag for the provider address, the format is: http://${PROVIDER_IP}:8080/provider. Execute this command to preheat the system: vegeta attack -duration=1m -rate=1000/s -max-workers=2000 -targets=request.txt Execute this command to perform the stress test. The command will output statistical data of the stress test when completed: vegeta attack -duration=20m -rate=1000/s -max-workers=2000 -targets=request.txt | tee results.bin | vegeta report  Testing with Agent The only difference in the test without the Agent is the version of the consumer that is compiled and launched.\n Add the SW_AGENT_REPORTER_GRPC_BACKEND_SERVICE environment variables to the consumer service, for setting the IP address of the SkyWalking OAP service. Start the Consumer service with the Agent version. Please add the provider flag for the provider address, the format is: http://${PROVIDER_IP}:8080/provider. Execute this command to preheat the system: vegeta attack -duration=1m -rate=1000/s -max-workers=2000 -targets=request.txt Execute this command to perform the stress test. The command will output statistical data of the stress test when completed: vegeta attack -duration=20m -rate=1000/s -max-workers=2000 -targets=request.txt | tee results.bin | vegeta report  Test Results In the tests, we used 1000 QPS as a benchmark to stress test both the Consumer services with and without the Agent.\n In the non-Agent version, the CPU usage was around 74%, memory usage was 2.53%, and the average response time for a single request was 4.18ms. In the Agent-compiled version, the CPU usage was around 81%, memory usage was 2.61%, and the average response time for a single request was 4.32ms.  From these results, we can conclude that after adding the Agent, the CPU usage increased by about 9%, memory usage experienced almost no growth, and the average response time for requests increased by approximately 0.15ms.\nExplanation, approximately 0.15ms is the in-band cost. The most of CPU(extra 9%) cost are due to the amount of out of band data being sent to the collectors from the application(consumer), which is 4000 spans/s in our test case.\n","excerpt":"Performance Tests Performance testing is used to verify the impact on application performance when …","ref":"/docs/skywalking-go/next/en/agent/performance-tests/","title":"Performance Tests"},{"body":"Performance Tests Performance testing is used to verify the impact on application performance when using SkyWalking Go.\nTest Objective By launching both the agent and non-agent compiled applications, we subject them to the same QPS under stress testing, evaluating the CPU, memory, and network latency of the machine during the testing period.\nThe application has been saved and submitted to the test/benchmark-codebase directory, with the following topology:\ntraffic generator -\u0026gt; consumer -\u0026gt; provider The payload(traffic) generator uses multithreading to send HTTP requests to the consumer service. When the consumer receives a request, it sends three requests to the provider service to obtain return data results. Based on these network requests, when using SkyWalking Go, the consumer service generates four Spans (1 Entry Span, 3 Exit Spans).\nApplication The application\u0026rsquo;s integration with SkyWalking Go follows the same process as other applications. For more information, please refer to the documentation.\nIn the application, we use loops and mathematical calculations (math.Log) to simulate the execution of the business program. This consumes a certain amount of CPU usage, preventing idle processing during service stress testing and amplifying the impact of the Agent program on the business application.\nStress Testing Service We use the Vegeta service for stress testing, which launches traffic at a specified QPS to the application. It is based on the Go language and uses goroutines to provide a more efficient stress testing solution.\nTest Environment A total of 4 GCP machines are launched, all instances are running on tbe 4C8G VM.\n traffic generator: Used for deploying traffic to the consumer machine. consumer: Used for deploying the consumer service. provider: Used for deploying the provider service. skywalking: Used for deploying the SkyWalking backend cluster, providing a standalone OAP node (in-memory H2 storage) and a UI interface.  Each service is deployed on a separate machine to ensure there is no interference with one another.\nTest Process Preparation Phase The preparation phase is used to ensure that all machines and test case preparations are completed.\nTraffic Generator Install the Vegeta service on the stress testing instance and create the following file(request.txt) to simulate traffic usage.\nGET http://${CONSUMER_IP}:8080/consumer Sw8: 1-MWYyZDRiZjQ3YmY3MTFlYWI3OTRhY2RlNDgwMDExMjI=-MWU3YzIwNGE3YmY3MTFlYWI4NThhY2RlNDgwMDExMjI=-0-c2VydmljZQ==-aW5zdGFuY2U=-cHJvcGFnYXRpb24=-cHJvcGFnYXRpb246NTU2Ng== Please replace the above CONSUMER_IP with the real IP address of the consumer instance.\nConsumer and Provider Install the skywalking-go service on the machines to be tested, and compile with and without the Agent.\nModify the machine\u0026rsquo;s file limit to prevent the inability to create new connections due to excessive handles: ulimit -n 65536.\nStart the provider service(without Agent) and obtain the provider machine\u0026rsquo;s IP address. Please provide this address when starting the consumer machine later.\nSkyWalking Download the SkyWalking service, modify the SkyWalking OAP startup script to increase the memory size, preventing OAP crashes due to insufficient memory.\nTesting without Agent  Start the Consumer service without the Agent version. Please add the provider flag for the provider address, the format is: http://${PROVIDER_IP}:8080/provider. Execute this command to preheat the system: vegeta attack -duration=1m -rate=1000/s -max-workers=2000 -targets=request.txt Execute this command to perform the stress test. The command will output statistical data of the stress test when completed: vegeta attack -duration=20m -rate=1000/s -max-workers=2000 -targets=request.txt | tee results.bin | vegeta report  Testing with Agent The only difference in the test without the Agent is the version of the consumer that is compiled and launched.\n Add the SW_AGENT_REPORTER_GRPC_BACKEND_SERVICE environment variables to the consumer service, for setting the IP address of the SkyWalking OAP service. Start the Consumer service with the Agent version. Please add the provider flag for the provider address, the format is: http://${PROVIDER_IP}:8080/provider. Execute this command to preheat the system: vegeta attack -duration=1m -rate=1000/s -max-workers=2000 -targets=request.txt Execute this command to perform the stress test. The command will output statistical data of the stress test when completed: vegeta attack -duration=20m -rate=1000/s -max-workers=2000 -targets=request.txt | tee results.bin | vegeta report  Test Results In the tests, we used 1000 QPS as a benchmark to stress test both the Consumer services with and without the Agent.\n In the non-Agent version, the CPU usage was around 74%, memory usage was 2.53%, and the average response time for a single request was 4.18ms. In the Agent-compiled version, the CPU usage was around 81%, memory usage was 2.61%, and the average response time for a single request was 4.32ms.  From these results, we can conclude that after adding the Agent, the CPU usage increased by about 9%, memory usage experienced almost no growth, and the average response time for requests increased by approximately 0.15ms.\nExplanation, approximately 0.15ms is the in-band cost. The most of CPU(extra 9%) cost are due to the amount of out of band data being sent to the collectors from the application(consumer), which is 4000 spans/s in our test case.\n","excerpt":"Performance Tests Performance testing is used to verify the impact on application performance when …","ref":"/docs/skywalking-go/v0.4.0/en/agent/performance-tests/","title":"Performance Tests"},{"body":"Persistence Storage Persistence storage is used for unifying data of BanyanDB persistence, including write-ahead logging(WAL), index, and data collected from skywalking and other observability platforms or APM systems. It provides various implementations and IO modes to satisfy the need of different components. BanyanDB provides a concise interface that shields the complexity of the implementation from the upper layer. By exposing necessary interfaces, upper components do not need to care how persistence is implemented and avoid dealing with differences between different operating systems.\nArchitecture BanyanDB uses third-party storage for actual storage, and the file system shields the differences between different platforms and storage systems, allowing developers to operate files as easily as the local file system without worrying about specific details.\nFor different data models, stored in different locations, such as for meta and wal data, BanyanDB uses a local file system for storage. For index and data, the architecture of the file system is divided into three layers.\n The first layer is the API interface, which developers only need to care about how to operate the remote file system. The second layer is the storage system adapter, which is used to mask the differences between different storage systems. The last layer is the actual storage system. With the use of remote storage architecture, the local system can still play its role and can borrow the local system to speed up reading and writing.  IO Mode Persistence storage offers a range of IO modes to cater to various throughput requirements. The interface can be accessed by developers and can be configured through settings, which can be set in the configuration file.\nIo_uring Io_uring is a new feature in Linux 5.1, which is fully asynchronous and offers high throughput. In the scene of massive storage, io_uring can bring significant benefits. The following is the diagram about how io_uring works. If the user sets io_uring for use, the read and write requests will first be placed in the submission queue buffer when calling the operation API. When the threshold is reached, batch submissions will be made to SQ. After the kernel threads complete execution, the requests will be placed in the CQ, and the user can obtain the request results.\nSynchronous IO The most common IO mode is Synchronous IO, but it has a relatively low throughput. BanyanDB provides a nonblocking mode that is compatible with lower Linux versions.\nOperation Directory Create Create the specified directory and return the file descriptor, the error will happen if the directory already exists. The following is the pseudocode that calls the API in the go style.、\nparam:\nname: The name of the directory.\npermisson: Permission you want to set. BanyanDB provides three modes: Read, Write, ReadAndWrite. you can use it as Mode.Read.\nCreateDirectory(name String, permission Mode) (error)\nOpen Open the directory and return an error if the file descriptor does not exist. The following is the pseudocode that calls the API in the go style.\nparam:\nname: The name of the directory.\nreturn: Directory pointer, you can use it for various operations.\nOpenDirectory(name String) (*Dir, error)\nDelete Delete the directory and all files and return an error if the directory does not exist or the directory not reading or writing. The following is the pseudocode that calls the API in the go style.\nDir.DeleteDirectory() (error)\nRename Rename the directory and return an error if the directory already exists. The following is the pseudocode that calls the API in the go style.\nparam:\nname: The name of the directory.\nDir.RenameDirectory(newName String) (error)\nRead Get all lists of files or children\u0026rsquo;s directories in the directory and an error if the directory does not exist. The following is the pseudocode that calls the API in the go style.\nreturn: List of files belonging to the directory.\nDir.ReadDirectory() (FileList, error)\nPermission When creating a file, the default owner is the user who created the directory. The owner can specify read and write permissions of the directory. If not specified, the default is read and write permissions, which include permissions for all files in the directory. The following is the pseudocode that calls the API in the go style.\nparam:\npermisson: Permission you want to set. BanyanDB provides three mode: Read, Write, ReadAndWrite. you can use it as Mode.Read.\nDir.SetDirectoryPermission(permission Mode) (error)\nFile Create Create the specified file and return the file descriptor, the error will happen if the file already exists. The following is the pseudocode that calls the API in the go style.\nparam:\nname: The name of the file.\npermisson: Permission you want to set. BanyanDB provides three mode: Read, Write, ReadAndWrite. you can use it as Mode.Read.\nCreateFile(name String, permission Mode) (error)\nOpen Open the file and return an error if the file descriptor does not exist. The following is the pseudocode that calls the API in the go style.\nparam:\nname: The name of the file.\nreturn: File pointer, you can use it for various operations.\nOpenFile(name String) (*File, error)\nWrite BanyanDB provides two methods for writing files. Append mode, which adds new data to the end of a file. This mode is typically used for WAL. And BanyanDB supports vector Append mode, which supports appending consecutive buffers to the end of the file. Flush mode, which flushes all data to one file. It will return an error when writing a directory, the file does not exist or there is not enough space, and the incomplete file will be discarded. The flush operation is atomic, which means the file won\u0026rsquo;t be created if an error happens during the flush process. The following is the pseudocode that calls the API in the go style.\nFor append mode:\nparam:\nbuffer: The data append to the file.\nFile.AppendWriteFile(buffer []byte) (error)\nFor vector append mode:\nparam:\niov: The data in consecutive buffers.\nFile.AppendWritevFile(iov *[][]byte) (error)\nFor flush mode:\nparam:\nbuffer: The data append to the file.\npermisson: Permission you want to set. BanyanDB provides three mode: Read, Write, ReadAndWrite. you can use it as Mode.Read.\nreturn: File pointer, you can use it for various operations.\nFlushWriteFile(buffer []byte, permission Mode) (*File, error)\nDelete BanyanDB provides the deleting operation, which can delete a file at once. it will return an error if the directory does not exist or the file not reading or writing.\nThe following is the pseudocode that calls the API in the go style.\nFile.DeleteFile() (error)\nRead For reading operation, two read methods are provided: Reading a specified location of data, which relies on a specified offset and a buffer. And BanyanDB supports reading contiguous regions of a file and dispersing them into discontinuous buffers. Read the entire file, BanyanDB provides stream reading, which can use when the file is too large, the size gets each time can be set when using stream reading. If entering incorrect parameters such as incorrect offset or non-existent file, it will return an error. The following is the pseudocode that calls the API in the go style.\nFor reading specified location of data:\nparam:\noffset: Read begin location of the file.\nbuffer: The read length is the same as the buffer length.\nFile.ReadFile(offset int, buffer []byte) (error)\nFor vector reading:\nparam:\niov: Discontinuous buffers in memory.\nFile.ReadvFile(iov *[][]byte) (error)\nFor stream reading:\nparam:\noffset: Read begin location of the file.\nbuffer: Every read length in the stream is the same as the buffer length.\nreturn: A Iterator, the size of each iteration is the length of the buffer.\nFile.StreamReadFile(offset int, buffer []byte) (*iter, error)\nRename Rename the file and return an error if the directory exists in this directory. The following is the pseudocode that calls the API in the go style.\nparam:\nnewName: The new name of the file.\nFile.RenameFile(newName String) (error)\nGet size Get the file written data\u0026rsquo;s size and return an error if the file does not exist. The unit of file size is Byte. The following is the pseudocode that calls the API in the go style.\nreturn: the file written data\u0026rsquo;s size.\nFile.GetFileSize() (int, error)\nPermission When creating a file, the default owner is the user who created the file. The owner can specify the read and write permissions of the file. If not specified, the default is read and write permissions. The following is the pseudocode that calls the API in the go style.\nparam:\npermisson: Permission you want to set. BanyanDB provides three mode: Read, Write, ReadAndWrite. you can use it as Mode.Read.\nFile.SetFilePermission(permission Mode) (error)\n","excerpt":"Persistence Storage Persistence storage is used for unifying data of BanyanDB persistence, including …","ref":"/docs/skywalking-banyandb/latest/concept/persistence-storage/","title":"Persistence Storage"},{"body":"Persistence Storage Persistence storage is used for unifying data of BanyanDB persistence, including index, and data collected from skywalking and other observability platforms or APM systems. It provides various implementations and IO modes to satisfy the need of different components. BanyanDB provides a concise interface that shields the complexity of the implementation from the upper layer. By exposing necessary interfaces, upper components do not need to care how persistence is implemented and avoid dealing with differences between different operating systems.\nArchitecture BanyanDB uses third-party storage for actual storage, and the file system shields the differences between different platforms and storage systems, allowing developers to operate files as easily as the local file system without worrying about specific details.\nFor different data models, stored in different locations, such as for meta data, BanyanDB uses a local file system for storage. For index and data, the architecture of the file system is divided into three layers.\n The first layer is the API interface, which developers only need to care about how to operate the remote file system. The second layer is the storage system adapter, which is used to mask the differences between different storage systems. The last layer is the actual storage system. With the use of remote storage architecture, the local system can still play its role and can borrow the local system to speed up reading and writing.  IO Mode Persistence storage offers a range of IO modes to cater to various throughput requirements. The interface can be accessed by developers and can be configured through settings, which can be set in the configuration file.\nIo_uring Io_uring is a new feature in Linux 5.1, which is fully asynchronous and offers high throughput. In the scene of massive storage, io_uring can bring significant benefits. The following is the diagram about how io_uring works. If the user sets io_uring for use, the read and write requests will first be placed in the submission queue buffer when calling the operation API. When the threshold is reached, batch submissions will be made to SQ. After the kernel threads complete execution, the requests will be placed in the CQ, and the user can obtain the request results.\nSynchronous IO The most common IO mode is Synchronous IO, but it has a relatively low throughput. BanyanDB provides a nonblocking mode that is compatible with lower Linux versions.\nOperation File Create Create the specified file and return the file descriptor, the error will happen if the file already exists. The following is the pseudocode that calls the API in the go style.\nparam:\nname: The name of the file.\npermisson: Permission you want to set. BanyanDB provides three mode: Read, Write, ReadAndWrite. you can use it as Mode.Read.\nreturn: The file instance, can be used for various file operations.\nCreateFile(name String, permission Mode) (File, error)\nWrite BanyanDB provides two methods for writing files. Append mode, which adds new data to the end of a file. BanyanDB also supports vector Append mode, which supports appending consecutive buffers to the end of the file. Flush mode, which flushes all data to one file. It will return an error when writing a directory, the file does not exist or there is not enough space, and the incomplete file will be discarded. The flush operation is atomic, which means the file won\u0026rsquo;t be created if an error happens during the flush process. The following is the pseudocode that calls the API in the go style.\nFor append mode:\nparam:\nbuffer: The data append to the file.\nActual length of written data.\nFile.Write(buffer []byte) (int, error)\nFor vector append mode:\nparam:\niov: The data in consecutive buffers.\nreturn: Actual length of written data.\nFile.Writev(iov *[][]byte) (int, error)\nFor flush mode:\nparam:\nbuffer: The data append to the file.\npermisson: Permission you want to set. BanyanDB provides three mode: Read, Write, ReadAndWrite. you can use it as Mode.Read.\nreturn: Actual length of flushed data.\nWrite(buffer []byte, permission Mode) (int, error)\nDelete BanyanDB provides the deleting operation, which can delete a file at once. it will return an error if the directory does not exist or the file not reading or writing.\nThe following is the pseudocode that calls the API in the go style.\nDeleteFile(name string) (error)\nRead For reading operation, two read methods are provided: Reading a specified location of data, which relies on a specified offset and a buffer. And BanyanDB supports reading contiguous regions of a file and dispersing them into discontinuous buffers. Read the entire file, BanyanDB provides stream reading, which can use when the file is too large, the size gets each time can be set when using stream reading. If entering incorrect parameters such as incorrect offset or non-existent file, it will return an error. The following is the pseudocode that calls the API in the go style.\nFor reading specified location of data:\nparam:\noffset: Read begin location of the file.\nbuffer: The read length is the same as the buffer length.\nreturn: Actual length of reading data.\nFile.Read(offset int64, buffer []byte) (int, error)\nFor vector reading:\nparam:\niov: Discontinuous buffers in memory.\nreturn: Actual length of reading data.\nFile.Readv(iov *[][]byte) (int, error)\nFor stream reading:\nparam:\nbuffer: Every read length in the stream is the same as the buffer length.\nreturn: A Iterator, the size of each iteration is the length of the buffer.\nFile.StreamRead(buffer []byte) (*iter, error)\nGet size Get the file written data\u0026rsquo;s size and return an error if the file does not exist. The unit of file size is Byte. The following is the pseudocode that calls the API in the go style.\nreturn: the file written data\u0026rsquo;s size.\nFile.Size() (int, error)\nClose Close File.The following is the pseudocode that calls the API in the go style.\nFile.Close() error\n","excerpt":"Persistence Storage Persistence storage is used for unifying data of BanyanDB persistence, including …","ref":"/docs/skywalking-banyandb/next/concept/persistence-storage/","title":"Persistence Storage"},{"body":"Persistence Storage Persistence storage is used for unifying data of BanyanDB persistence, including write-ahead logging(WAL), index, and data collected from skywalking and other observability platforms or APM systems. It provides various implementations and IO modes to satisfy the need of different components. BanyanDB provides a concise interface that shields the complexity of the implementation from the upper layer. By exposing necessary interfaces, upper components do not need to care how persistence is implemented and avoid dealing with differences between different operating systems.\nArchitecture BanyanDB uses third-party storage for actual storage, and the file system shields the differences between different platforms and storage systems, allowing developers to operate files as easily as the local file system without worrying about specific details.\nFor different data models, stored in different locations, such as for meta and wal data, BanyanDB uses a local file system for storage. For index and data, the architecture of the file system is divided into three layers.\n The first layer is the API interface, which developers only need to care about how to operate the remote file system. The second layer is the storage system adapter, which is used to mask the differences between different storage systems. The last layer is the actual storage system. With the use of remote storage architecture, the local system can still play its role and can borrow the local system to speed up reading and writing.  IO Mode Persistence storage offers a range of IO modes to cater to various throughput requirements. The interface can be accessed by developers and can be configured through settings, which can be set in the configuration file.\nIo_uring Io_uring is a new feature in Linux 5.1, which is fully asynchronous and offers high throughput. In the scene of massive storage, io_uring can bring significant benefits. The following is the diagram about how io_uring works. If the user sets io_uring for use, the read and write requests will first be placed in the submission queue buffer when calling the operation API. When the threshold is reached, batch submissions will be made to SQ. After the kernel threads complete execution, the requests will be placed in the CQ, and the user can obtain the request results.\nSynchronous IO The most common IO mode is Synchronous IO, but it has a relatively low throughput. BanyanDB provides a nonblocking mode that is compatible with lower Linux versions.\nOperation Directory Create Create the specified directory and return the file descriptor, the error will happen if the directory already exists. The following is the pseudocode that calls the API in the go style.、\nparam:\nname: The name of the directory.\npermisson: Permission you want to set. BanyanDB provides three modes: Read, Write, ReadAndWrite. you can use it as Mode.Read.\nCreateDirectory(name String, permission Mode) (error)\nOpen Open the directory and return an error if the file descriptor does not exist. The following is the pseudocode that calls the API in the go style.\nparam:\nname: The name of the directory.\nreturn: Directory pointer, you can use it for various operations.\nOpenDirectory(name String) (*Dir, error)\nDelete Delete the directory and all files and return an error if the directory does not exist or the directory not reading or writing. The following is the pseudocode that calls the API in the go style.\nDir.DeleteDirectory() (error)\nRename Rename the directory and return an error if the directory already exists. The following is the pseudocode that calls the API in the go style.\nparam:\nname: The name of the directory.\nDir.RenameDirectory(newName String) (error)\nRead Get all lists of files or children\u0026rsquo;s directories in the directory and an error if the directory does not exist. The following is the pseudocode that calls the API in the go style.\nreturn: List of files belonging to the directory.\nDir.ReadDirectory() (FileList, error)\nPermission When creating a file, the default owner is the user who created the directory. The owner can specify read and write permissions of the directory. If not specified, the default is read and write permissions, which include permissions for all files in the directory. The following is the pseudocode that calls the API in the go style.\nparam:\npermisson: Permission you want to set. BanyanDB provides three mode: Read, Write, ReadAndWrite. you can use it as Mode.Read.\nDir.SetDirectoryPermission(permission Mode) (error)\nFile Create Create the specified file and return the file descriptor, the error will happen if the file already exists. The following is the pseudocode that calls the API in the go style.\nparam:\nname: The name of the file.\npermisson: Permission you want to set. BanyanDB provides three mode: Read, Write, ReadAndWrite. you can use it as Mode.Read.\nCreateFile(name String, permission Mode) (error)\nOpen Open the file and return an error if the file descriptor does not exist. The following is the pseudocode that calls the API in the go style.\nparam:\nname: The name of the file.\nreturn: File pointer, you can use it for various operations.\nOpenFile(name String) (*File, error)\nWrite BanyanDB provides two methods for writing files. Append mode, which adds new data to the end of a file. This mode is typically used for WAL. And BanyanDB supports vector Append mode, which supports appending consecutive buffers to the end of the file. Flush mode, which flushes all data to one file. It will return an error when writing a directory, the file does not exist or there is not enough space, and the incomplete file will be discarded. The flush operation is atomic, which means the file won\u0026rsquo;t be created if an error happens during the flush process. The following is the pseudocode that calls the API in the go style.\nFor append mode:\nparam:\nbuffer: The data append to the file.\nFile.AppendWriteFile(buffer []byte) (error)\nFor vector append mode:\nparam:\niov: The data in consecutive buffers.\nFile.AppendWritevFile(iov *[][]byte) (error)\nFor flush mode:\nparam:\nbuffer: The data append to the file.\npermisson: Permission you want to set. BanyanDB provides three mode: Read, Write, ReadAndWrite. you can use it as Mode.Read.\nreturn: File pointer, you can use it for various operations.\nFlushWriteFile(buffer []byte, permission Mode) (*File, error)\nDelete BanyanDB provides the deleting operation, which can delete a file at once. it will return an error if the directory does not exist or the file not reading or writing.\nThe following is the pseudocode that calls the API in the go style.\nFile.DeleteFile() (error)\nRead For reading operation, two read methods are provided: Reading a specified location of data, which relies on a specified offset and a buffer. And BanyanDB supports reading contiguous regions of a file and dispersing them into discontinuous buffers. Read the entire file, BanyanDB provides stream reading, which can use when the file is too large, the size gets each time can be set when using stream reading. If entering incorrect parameters such as incorrect offset or non-existent file, it will return an error. The following is the pseudocode that calls the API in the go style.\nFor reading specified location of data:\nparam:\noffset: Read begin location of the file.\nbuffer: The read length is the same as the buffer length.\nFile.ReadFile(offset int, buffer []byte) (error)\nFor vector reading:\nparam:\niov: Discontinuous buffers in memory.\nFile.ReadvFile(iov *[][]byte) (error)\nFor stream reading:\nparam:\noffset: Read begin location of the file.\nbuffer: Every read length in the stream is the same as the buffer length.\nreturn: A Iterator, the size of each iteration is the length of the buffer.\nFile.StreamReadFile(offset int, buffer []byte) (*iter, error)\nRename Rename the file and return an error if the directory exists in this directory. The following is the pseudocode that calls the API in the go style.\nparam:\nnewName: The new name of the file.\nFile.RenameFile(newName String) (error)\nGet size Get the file written data\u0026rsquo;s size and return an error if the file does not exist. The unit of file size is Byte. The following is the pseudocode that calls the API in the go style.\nreturn: the file written data\u0026rsquo;s size.\nFile.GetFileSize() (int, error)\nPermission When creating a file, the default owner is the user who created the file. The owner can specify the read and write permissions of the file. If not specified, the default is read and write permissions. The following is the pseudocode that calls the API in the go style.\nparam:\npermisson: Permission you want to set. BanyanDB provides three mode: Read, Write, ReadAndWrite. you can use it as Mode.Read.\nFile.SetFilePermission(permission Mode) (error)\n","excerpt":"Persistence Storage Persistence storage is used for unifying data of BanyanDB persistence, including …","ref":"/docs/skywalking-banyandb/v0.5.0/concept/persistence-storage/","title":"Persistence Storage"},{"body":"Pinpoint Service Mesh Critical Performance Impact by using eBPF Background Apache SkyWalking observes metrics, logs, traces, and events for services deployed into the service mesh. When troubleshooting, SkyWalking error analysis can be an invaluable tool helping to pinpoint where an error occurred. However, performance problems are more difficult: It’s often impossible to locate the root cause of performance problems with pre-existing observation data. To move beyond the status quo, dynamic debugging and troubleshooting are essential service performance tools. In this article, we\u0026rsquo;ll discuss how to use eBPF technology to improve the profiling feature in SkyWalking and analyze the performance impact in the service mesh.\nTrace Profiling in SkyWalking Since SkyWalking 7.0.0, Trace Profiling has helped developers find performance problems by periodically sampling the thread stack to let developers know which lines of code take more time. However, Trace Profiling is not suitable for the following scenarios:\n Thread Model: Trace Profiling is most useful for profiling code that executes in a single thread. It is less useful for middleware that relies heavily on async execution models. For example Goroutines in Go or Kotlin Coroutines. Language: Currently, Trace Profiling is only supported in Java and Python, since it’s not easy to obtain the thread stack in the runtimes of some languages such as Go and Node.js. Agent Binding: Trace Profiling requires Agent installation, which can be tricky depending on the language (e.g., PHP has to rely on its C kernel; Rust and C/C++ require manual instrumentation to make install). Trace Correlation: Since Trace Profiling is only associated with a single request it can be hard to determine which request is causing the problem. Short Lifecycle Services: Trace Profiling doesn\u0026rsquo;t support short-lived services for (at least) two reasons:  It\u0026rsquo;s hard to differentiate system performance from class code manipulation in the booting stage. Trace profiling is linked to an endpoint to identify performance impact, but there is no endpoint to match these short-lived services.    Fortunately, there are techniques that can go further than Trace Profiling in these situations.\nIntroduce eBPF We have found that eBPF — a technology that can run sandboxed programs in an operating system kernel and thus safely and efficiently extend the capabilities of the kernel without requiring kernel modifications or loading kernel modules — can help us fill gaps left by Trace Profiling. eBPF is a trending technology because it breaks the traditional barrier between user and kernel space. Programs can now inject bytecode that runs in the kernel, instead of having to recompile the kernel to customize it. This is naturally a good fit for observability.\nIn the figure below, we can see that when the system executes the execve syscalls, the eBPF program is triggered, and the current process runtime information is obtained by using function calls.\nUsing eBPF technology, we can expand the scope of Skywalking\u0026rsquo;s profiling capabilities:\n Global Performance Analysis: Before eBPF, data collection was limited to what agents can observe. Since eBPF programs run in the kernel, they can observe all threads. This is especially useful when you are not sure whether a performance problem is caused by a particular request. Data Content: eBPF can dump both user and kernel space thread stacks, so if a performance issue happens in kernel space, it’s easier to find. Agent Binding: All modern Linux kernels support eBPF, so there is no need to install anything. This means it is an orchestration-free vs an agent model. This reduces friction caused by built-in software which may not have the correct agents installed, such as Envoy in a Service Mesh. Sampling Type: Unlike Trace Profiling, eBPF is event-driven and, therefore, not constrained by interval polling. For example, eBPF can trigger events and collect more data depending on a transfer size threshold. This can allow the system to triage and prioritize data collection under extreme load.  eBPF Limitations While eBPF offers significant advantages for hunting performance bottlenecks, no technology is perfect. eBPF has a number of limitations described below. Fortunately, since SkyWalking does not require eBPF, the impact is limited.\n Linux Version Requirement: eBPF programs require a Linux kernel version above 4.4, with later kernel versions offering more data to be collected. The BCC has documented the features supported by different Linux kernel versions, with the differences between versions usually being what data can be collected with eBPF. Privileges Required: All processes that intend to load eBPF programs into the Linux kernel must be running in privileged mode. As such, bugs or other issues in such code may have a big impact. Weak Support for Dynamic Language: eBPF has weak support for JIT-based dynamic languages, such as Java. It also depends on what data you want to collect. For Profiling, eBPF does not support parsing the symbols of the program, which is why most eBPF-based profiling technologies only support static languages like C, C++, Go, and Rust. However, symbol mapping can sometimes be solved through tools provided by the language. For example, in Java, perf-map-agent can be used to generate the symbol mapping. However, dynamic languages don\u0026rsquo;t support the attach (uprobe) functionality that would allow us to trace execution events through symbols.  Introducing SkyWalking Rover SkyWalking Rover introduces the eBPF profiling feature into the SkyWalking ecosystem. The figure below shows the overall architecture of SkyWalking Rover. SkyWalking Rover is currently supported in Kubernetes environments and must be deployed inside a Kubernetes cluster. After establishing a connection with the SkyWalking backend server, it saves information about the processes on the current machine to SkyWalking. When the user creates an eBPF profiling task via the user interface, SkyWalking Rover receives the task and executes it in the relevant C, C++, Golang, and Rust language-based programs.\nOther than an eBPF-capable kernel, there are no additional prerequisites for deploying SkyWalking Rover.\nCPU Profiling with Rover CPU profiling is the most intuitive way to show service performance. Inspired by Brendan Gregg‘s blog post, we\u0026rsquo;ve divided CPU profiling into two types that we have implemented in Rover:\n On-CPU Profiling: Where threads are spending time running on-CPU. Off-CPU Profiling: Where time is spent waiting while blocked on I/O, locks, timers, paging/swapping, etc.  Profiling Envoy with eBPF Envoy is a popular proxy, used as the data plane by the Istio service mesh. In a Kubernetes cluster, Istio injects Envoy into each service’s pod as a sidecar where it transparently intercepts and processes incoming and outgoing traffic. As the data plane, any performance issues in Envoy can affect all service traffic in the mesh. In this scenario, it’s more powerful to use eBPF profiling to analyze issues in production caused by service mesh configuration.\nDemo Environment If you want to see this scenario in action, we\u0026rsquo;ve built a demo environment where we deploy an Nginx service for stress testing. Traffic is intercepted by Envoy and forwarded to Nginx. The commands to install the whole environment can be accessed through GitHub.\nOn-CPU Profiling On-CPU profiling is suitable for analyzing thread stacks when service CPU usage is high. If the stack is dumped more times, it means that the thread stack occupies more CPU resources.\nWhen installing Istio using the demo configuration profile, we found there are two places where we can optimize performance:\n Zipkin Tracing: Different Zipkin sampling percentages have a direct impact on QPS. Access Log Format: Reducing the fields of the Envoy access log can improve QPS.  Zipkin Tracing Zipkin with 100% sampling In the default demo configuration profile, Envoy is using 100% sampling as default tracing policy. How does that impact the performance?\nAs shown in the figure below, using the on-CPU profiling, we found that it takes about 16% of the CPU overhead. At a fixed consumption of 2 CPUs, its QPS can reach 5.7K.\nDisable Zipkin tracing At this point, we found that if Zipkin is not necessary, the sampling percentage can be reduced or we can even disable tracing. Based on the Istio documentation, we can disable tracing when installing the service mesh using the following command:\nistioctl install -y --set profile=demo \\  --set \u0026#39;meshConfig.enableTracing=false\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.tracing.sampling=0.0\u0026#39; After disabling tracing, we performed on-CPU profiling again. According to the figure below, we found that Zipkin has disappeared from the flame graph. With the same 2 CPU consumption as in the previous example, the QPS reached 9K, which is an almost 60% increase. Tracing with Throughput With the same CPU usage, we\u0026rsquo;ve discovered that Envoy performance greatly improves when the tracing feature is disabled. Of course, this requires us to make trade-offs between the number of samples Zipkin collects and the desired performance of Envoy (QPS).\nThe table below illustrates how different Zipkin sampling percentages under the same CPU usage affect QPS.\n   Zipkin sampling % QPS CPUs Note     100% (default) 5.7K 2 16% used by Zipkin   1% 8.1K 2 0.3% used by Zipkin   disabled 9.2K 2 0% used by Zipkin    Access Log Format Default Log Format In the default demo configuration profile, the default Access Log format contains a lot of data. The flame graph below shows various functions involved in parsing the data such as request headers, response headers, and streaming the body.\nSimplifying Access Log Format Typically, we don’t need all the information in the access log, so we can often simplify it to get what we need. The following command simplifies the access log format to only display basic information:\nistioctl install -y --set profile=demo \\  --set meshConfig.accessLogFormat=\u0026#34;[%START_TIME%] \\\u0026#34;%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\\\u0026#34; %RESPONSE_CODE%\\n\u0026#34; After simplifying the access log format, we found that the QPS increased from 5.7K to 5.9K. When executing the on-CPU profiling again, the CPU usage of log formatting dropped from 2.4% to 0.7%.\nSimplifying the log format helped us to improve the performance.\nOff-CPU Profiling Off-CPU profiling is suitable for performance issues that are not caused by high CPU usage. For example, when there are too many threads in one service, using off-CPU profiling could reveal which threads spend more time context switching.\nWe provide data aggregation in two dimensions:\n Switch count: The number of times a thread switches context. When the thread returns to the CPU, it completes one context switch. A thread stack with a higher switch count spends more time context switching. Switch duration: The time it takes a thread to switch the context. A thread stack with a higher switch duration spends more time off-CPU.  Write Access Log Enable Write Using the same environment and settings as before in the on-CPU test, we performed off-CPU profiling. As shown below, we found that access log writes accounted for about 28% of the total context switches. The \u0026ldquo;__write\u0026rdquo; shown below also indicates that this method is the Linux kernel method.\nDisable Write SkyWalking implements Envoy\u0026rsquo;s Access Log Service (ALS) feature which allows us to send access logs to the SkyWalking Observability Analysis Platform (OAP) using the gRPC protocol. Even by disabling the access logging, we can still use ALS to capture/aggregate the logs. We\u0026rsquo;ve disabled writing to the access log using the following command:\nistioctl install -y --set profile=demo --set meshConfig.accessLogFile=\u0026#34;\u0026#34; After disabling the Access Log feature, we performed the off-CPU profiling. File writing entries have disappeared as shown in the figure below. Envoy throughput also increased from 5.7K to 5.9K.\nConclusion In this article, we\u0026rsquo;ve examined the insights Apache Skywalking\u0026rsquo;s Trace Profiling can give us and how much more can be achieved with eBPF profiling. All of these features are implemented in skywalking-rover. In addition to on- and off-CPU profiling, you will also find the following features:\n Continuous profiling, helps you automatically profile without manual intervention. For example, when Rover detects that the CPU exceeds a configurable threshold, it automatically executes the on-CPU profiling task. More profiling types to enrich usage scenarios, such as network, and memory profiling.  ","excerpt":"Pinpoint Service Mesh Critical Performance Impact by using eBPF Background Apache SkyWalking …","ref":"/docs/main/latest/en/concepts-and-designs/ebpf-cpu-profiling/","title":"Pinpoint Service Mesh Critical Performance Impact by using eBPF"},{"body":"Pinpoint Service Mesh Critical Performance Impact by using eBPF Background Apache SkyWalking observes metrics, logs, traces, and events for services deployed into the service mesh. When troubleshooting, SkyWalking error analysis can be an invaluable tool helping to pinpoint where an error occurred. However, performance problems are more difficult: It’s often impossible to locate the root cause of performance problems with pre-existing observation data. To move beyond the status quo, dynamic debugging and troubleshooting are essential service performance tools. In this article, we\u0026rsquo;ll discuss how to use eBPF technology to improve the profiling feature in SkyWalking and analyze the performance impact in the service mesh.\nTrace Profiling in SkyWalking Since SkyWalking 7.0.0, Trace Profiling has helped developers find performance problems by periodically sampling the thread stack to let developers know which lines of code take more time. However, Trace Profiling is not suitable for the following scenarios:\n Thread Model: Trace Profiling is most useful for profiling code that executes in a single thread. It is less useful for middleware that relies heavily on async execution models. For example Goroutines in Go or Kotlin Coroutines. Language: Currently, Trace Profiling is only supported in Java and Python, since it’s not easy to obtain the thread stack in the runtimes of some languages such as Go and Node.js. Agent Binding: Trace Profiling requires Agent installation, which can be tricky depending on the language (e.g., PHP has to rely on its C kernel; Rust and C/C++ require manual instrumentation to make install). Trace Correlation: Since Trace Profiling is only associated with a single request it can be hard to determine which request is causing the problem. Short Lifecycle Services: Trace Profiling doesn\u0026rsquo;t support short-lived services for (at least) two reasons:  It\u0026rsquo;s hard to differentiate system performance from class code manipulation in the booting stage. Trace profiling is linked to an endpoint to identify performance impact, but there is no endpoint to match these short-lived services.    Fortunately, there are techniques that can go further than Trace Profiling in these situations.\nIntroduce eBPF We have found that eBPF — a technology that can run sandboxed programs in an operating system kernel and thus safely and efficiently extend the capabilities of the kernel without requiring kernel modifications or loading kernel modules — can help us fill gaps left by Trace Profiling. eBPF is a trending technology because it breaks the traditional barrier between user and kernel space. Programs can now inject bytecode that runs in the kernel, instead of having to recompile the kernel to customize it. This is naturally a good fit for observability.\nIn the figure below, we can see that when the system executes the execve syscalls, the eBPF program is triggered, and the current process runtime information is obtained by using function calls.\nUsing eBPF technology, we can expand the scope of Skywalking\u0026rsquo;s profiling capabilities:\n Global Performance Analysis: Before eBPF, data collection was limited to what agents can observe. Since eBPF programs run in the kernel, they can observe all threads. This is especially useful when you are not sure whether a performance problem is caused by a particular request. Data Content: eBPF can dump both user and kernel space thread stacks, so if a performance issue happens in kernel space, it’s easier to find. Agent Binding: All modern Linux kernels support eBPF, so there is no need to install anything. This means it is an orchestration-free vs an agent model. This reduces friction caused by built-in software which may not have the correct agents installed, such as Envoy in a Service Mesh. Sampling Type: Unlike Trace Profiling, eBPF is event-driven and, therefore, not constrained by interval polling. For example, eBPF can trigger events and collect more data depending on a transfer size threshold. This can allow the system to triage and prioritize data collection under extreme load.  eBPF Limitations While eBPF offers significant advantages for hunting performance bottlenecks, no technology is perfect. eBPF has a number of limitations described below. Fortunately, since SkyWalking does not require eBPF, the impact is limited.\n Linux Version Requirement: eBPF programs require a Linux kernel version above 4.4, with later kernel versions offering more data to be collected. The BCC has documented the features supported by different Linux kernel versions, with the differences between versions usually being what data can be collected with eBPF. Privileges Required: All processes that intend to load eBPF programs into the Linux kernel must be running in privileged mode. As such, bugs or other issues in such code may have a big impact. Weak Support for Dynamic Language: eBPF has weak support for JIT-based dynamic languages, such as Java. It also depends on what data you want to collect. For Profiling, eBPF does not support parsing the symbols of the program, which is why most eBPF-based profiling technologies only support static languages like C, C++, Go, and Rust. However, symbol mapping can sometimes be solved through tools provided by the language. For example, in Java, perf-map-agent can be used to generate the symbol mapping. However, dynamic languages don\u0026rsquo;t support the attach (uprobe) functionality that would allow us to trace execution events through symbols.  Introducing SkyWalking Rover SkyWalking Rover introduces the eBPF profiling feature into the SkyWalking ecosystem. The figure below shows the overall architecture of SkyWalking Rover. SkyWalking Rover is currently supported in Kubernetes environments and must be deployed inside a Kubernetes cluster. After establishing a connection with the SkyWalking backend server, it saves information about the processes on the current machine to SkyWalking. When the user creates an eBPF profiling task via the user interface, SkyWalking Rover receives the task and executes it in the relevant C, C++, Golang, and Rust language-based programs.\nOther than an eBPF-capable kernel, there are no additional prerequisites for deploying SkyWalking Rover.\nCPU Profiling with Rover CPU profiling is the most intuitive way to show service performance. Inspired by Brendan Gregg‘s blog post, we\u0026rsquo;ve divided CPU profiling into two types that we have implemented in Rover:\n On-CPU Profiling: Where threads are spending time running on-CPU. Off-CPU Profiling: Where time is spent waiting while blocked on I/O, locks, timers, paging/swapping, etc.  Profiling Envoy with eBPF Envoy is a popular proxy, used as the data plane by the Istio service mesh. In a Kubernetes cluster, Istio injects Envoy into each service’s pod as a sidecar where it transparently intercepts and processes incoming and outgoing traffic. As the data plane, any performance issues in Envoy can affect all service traffic in the mesh. In this scenario, it’s more powerful to use eBPF profiling to analyze issues in production caused by service mesh configuration.\nDemo Environment If you want to see this scenario in action, we\u0026rsquo;ve built a demo environment where we deploy an Nginx service for stress testing. Traffic is intercepted by Envoy and forwarded to Nginx. The commands to install the whole environment can be accessed through GitHub.\nOn-CPU Profiling On-CPU profiling is suitable for analyzing thread stacks when service CPU usage is high. If the stack is dumped more times, it means that the thread stack occupies more CPU resources.\nWhen installing Istio using the demo configuration profile, we found there are two places where we can optimize performance:\n Zipkin Tracing: Different Zipkin sampling percentages have a direct impact on QPS. Access Log Format: Reducing the fields of the Envoy access log can improve QPS.  Zipkin Tracing Zipkin with 100% sampling In the default demo configuration profile, Envoy is using 100% sampling as default tracing policy. How does that impact the performance?\nAs shown in the figure below, using the on-CPU profiling, we found that it takes about 16% of the CPU overhead. At a fixed consumption of 2 CPUs, its QPS can reach 5.7K.\nDisable Zipkin tracing At this point, we found that if Zipkin is not necessary, the sampling percentage can be reduced or we can even disable tracing. Based on the Istio documentation, we can disable tracing when installing the service mesh using the following command:\nistioctl install -y --set profile=demo \\  --set \u0026#39;meshConfig.enableTracing=false\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.tracing.sampling=0.0\u0026#39; After disabling tracing, we performed on-CPU profiling again. According to the figure below, we found that Zipkin has disappeared from the flame graph. With the same 2 CPU consumption as in the previous example, the QPS reached 9K, which is an almost 60% increase. Tracing with Throughput With the same CPU usage, we\u0026rsquo;ve discovered that Envoy performance greatly improves when the tracing feature is disabled. Of course, this requires us to make trade-offs between the number of samples Zipkin collects and the desired performance of Envoy (QPS).\nThe table below illustrates how different Zipkin sampling percentages under the same CPU usage affect QPS.\n   Zipkin sampling % QPS CPUs Note     100% (default) 5.7K 2 16% used by Zipkin   1% 8.1K 2 0.3% used by Zipkin   disabled 9.2K 2 0% used by Zipkin    Access Log Format Default Log Format In the default demo configuration profile, the default Access Log format contains a lot of data. The flame graph below shows various functions involved in parsing the data such as request headers, response headers, and streaming the body.\nSimplifying Access Log Format Typically, we don’t need all the information in the access log, so we can often simplify it to get what we need. The following command simplifies the access log format to only display basic information:\nistioctl install -y --set profile=demo \\  --set meshConfig.accessLogFormat=\u0026#34;[%START_TIME%] \\\u0026#34;%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\\\u0026#34; %RESPONSE_CODE%\\n\u0026#34; After simplifying the access log format, we found that the QPS increased from 5.7K to 5.9K. When executing the on-CPU profiling again, the CPU usage of log formatting dropped from 2.4% to 0.7%.\nSimplifying the log format helped us to improve the performance.\nOff-CPU Profiling Off-CPU profiling is suitable for performance issues that are not caused by high CPU usage. For example, when there are too many threads in one service, using off-CPU profiling could reveal which threads spend more time context switching.\nWe provide data aggregation in two dimensions:\n Switch count: The number of times a thread switches context. When the thread returns to the CPU, it completes one context switch. A thread stack with a higher switch count spends more time context switching. Switch duration: The time it takes a thread to switch the context. A thread stack with a higher switch duration spends more time off-CPU.  Write Access Log Enable Write Using the same environment and settings as before in the on-CPU test, we performed off-CPU profiling. As shown below, we found that access log writes accounted for about 28% of the total context switches. The \u0026ldquo;__write\u0026rdquo; shown below also indicates that this method is the Linux kernel method.\nDisable Write SkyWalking implements Envoy\u0026rsquo;s Access Log Service (ALS) feature which allows us to send access logs to the SkyWalking Observability Analysis Platform (OAP) using the gRPC protocol. Even by disabling the access logging, we can still use ALS to capture/aggregate the logs. We\u0026rsquo;ve disabled writing to the access log using the following command:\nistioctl install -y --set profile=demo --set meshConfig.accessLogFile=\u0026#34;\u0026#34; After disabling the Access Log feature, we performed the off-CPU profiling. File writing entries have disappeared as shown in the figure below. Envoy throughput also increased from 5.7K to 5.9K.\nConclusion In this article, we\u0026rsquo;ve examined the insights Apache Skywalking\u0026rsquo;s Trace Profiling can give us and how much more can be achieved with eBPF profiling. All of these features are implemented in skywalking-rover. In addition to on- and off-CPU profiling, you will also find the following features:\n Continuous profiling, helps you automatically profile without manual intervention. For example, when Rover detects that the CPU exceeds a configurable threshold, it automatically executes the on-CPU profiling task. More profiling types to enrich usage scenarios, such as network, and memory profiling.  ","excerpt":"Pinpoint Service Mesh Critical Performance Impact by using eBPF Background Apache SkyWalking …","ref":"/docs/main/next/en/concepts-and-designs/ebpf-cpu-profiling/","title":"Pinpoint Service Mesh Critical Performance Impact by using eBPF"},{"body":"Pinpoint Service Mesh Critical Performance Impact by using eBPF Background Apache SkyWalking observes metrics, logs, traces, and events for services deployed into the service mesh. When troubleshooting, SkyWalking error analysis can be an invaluable tool helping to pinpoint where an error occurred. However, performance problems are more difficult: It’s often impossible to locate the root cause of performance problems with pre-existing observation data. To move beyond the status quo, dynamic debugging and troubleshooting are essential service performance tools. In this article, we\u0026rsquo;ll discuss how to use eBPF technology to improve the profiling feature in SkyWalking and analyze the performance impact in the service mesh.\nTrace Profiling in SkyWalking Since SkyWalking 7.0.0, Trace Profiling has helped developers find performance problems by periodically sampling the thread stack to let developers know which lines of code take more time. However, Trace Profiling is not suitable for the following scenarios:\n Thread Model: Trace Profiling is most useful for profiling code that executes in a single thread. It is less useful for middleware that relies heavily on async execution models. For example Goroutines in Go or Kotlin Coroutines. Language: Currently, Trace Profiling is only supported in Java and Python, since it’s not easy to obtain the thread stack in the runtimes of some languages such as Go and Node.js. Agent Binding: Trace Profiling requires Agent installation, which can be tricky depending on the language (e.g., PHP has to rely on its C kernel; Rust and C/C++ require manual instrumentation to make install). Trace Correlation: Since Trace Profiling is only associated with a single request it can be hard to determine which request is causing the problem. Short Lifecycle Services: Trace Profiling doesn\u0026rsquo;t support short-lived services for (at least) two reasons:  It\u0026rsquo;s hard to differentiate system performance from class code manipulation in the booting stage. Trace profiling is linked to an endpoint to identify performance impact, but there is no endpoint to match these short-lived services.    Fortunately, there are techniques that can go further than Trace Profiling in these situations.\nIntroduce eBPF We have found that eBPF — a technology that can run sandboxed programs in an operating system kernel and thus safely and efficiently extend the capabilities of the kernel without requiring kernel modifications or loading kernel modules — can help us fill gaps left by Trace Profiling. eBPF is a trending technology because it breaks the traditional barrier between user and kernel space. Programs can now inject bytecode that runs in the kernel, instead of having to recompile the kernel to customize it. This is naturally a good fit for observability.\nIn the figure below, we can see that when the system executes the execve syscalls, the eBPF program is triggered, and the current process runtime information is obtained by using function calls.\nUsing eBPF technology, we can expand the scope of Skywalking\u0026rsquo;s profiling capabilities:\n Global Performance Analysis: Before eBPF, data collection was limited to what agents can observe. Since eBPF programs run in the kernel, they can observe all threads. This is especially useful when you are not sure whether a performance problem is caused by a particular request. Data Content: eBPF can dump both user and kernel space thread stacks, so if a performance issue happens in kernel space, it’s easier to find. Agent Binding: All modern Linux kernels support eBPF, so there is no need to install anything. This means it is an orchestration-free vs an agent model. This reduces friction caused by built-in software which may not have the correct agents installed, such as Envoy in a Service Mesh. Sampling Type: Unlike Trace Profiling, eBPF is event-driven and, therefore, not constrained by interval polling. For example, eBPF can trigger events and collect more data depending on a transfer size threshold. This can allow the system to triage and prioritize data collection under extreme load.  eBPF Limitations While eBPF offers significant advantages for hunting performance bottlenecks, no technology is perfect. eBPF has a number of limitations described below. Fortunately, since SkyWalking does not require eBPF, the impact is limited.\n Linux Version Requirement: eBPF programs require a Linux kernel version above 4.4, with later kernel versions offering more data to be collected. The BCC has documented the features supported by different Linux kernel versions, with the differences between versions usually being what data can be collected with eBPF. Privileges Required: All processes that intend to load eBPF programs into the Linux kernel must be running in privileged mode. As such, bugs or other issues in such code may have a big impact. Weak Support for Dynamic Language: eBPF has weak support for JIT-based dynamic languages, such as Java. It also depends on what data you want to collect. For Profiling, eBPF does not support parsing the symbols of the program, which is why most eBPF-based profiling technologies only support static languages like C, C++, Go, and Rust. However, symbol mapping can sometimes be solved through tools provided by the language. For example, in Java, perf-map-agent can be used to generate the symbol mapping. However, dynamic languages don\u0026rsquo;t support the attach (uprobe) functionality that would allow us to trace execution events through symbols.  Introducing SkyWalking Rover SkyWalking Rover introduces the eBPF profiling feature into the SkyWalking ecosystem. The figure below shows the overall architecture of SkyWalking Rover. SkyWalking Rover is currently supported in Kubernetes environments and must be deployed inside a Kubernetes cluster. After establishing a connection with the SkyWalking backend server, it saves information about the processes on the current machine to SkyWalking. When the user creates an eBPF profiling task via the user interface, SkyWalking Rover receives the task and executes it in the relevant C, C++, Golang, and Rust language-based programs.\nOther than an eBPF-capable kernel, there are no additional prerequisites for deploying SkyWalking Rover.\nCPU Profiling with Rover CPU profiling is the most intuitive way to show service performance. Inspired by Brendan Gregg‘s blog post, we\u0026rsquo;ve divided CPU profiling into two types that we have implemented in Rover:\n On-CPU Profiling: Where threads are spending time running on-CPU. Off-CPU Profiling: Where time is spent waiting while blocked on I/O, locks, timers, paging/swapping, etc.  Profiling Envoy with eBPF Envoy is a popular proxy, used as the data plane by the Istio service mesh. In a Kubernetes cluster, Istio injects Envoy into each service’s pod as a sidecar where it transparently intercepts and processes incoming and outgoing traffic. As the data plane, any performance issues in Envoy can affect all service traffic in the mesh. In this scenario, it’s more powerful to use eBPF profiling to analyze issues in production caused by service mesh configuration.\nDemo Environment If you want to see this scenario in action, we\u0026rsquo;ve built a demo environment where we deploy an Nginx service for stress testing. Traffic is intercepted by Envoy and forwarded to Nginx. The commands to install the whole environment can be accessed through GitHub.\nOn-CPU Profiling On-CPU profiling is suitable for analyzing thread stacks when service CPU usage is high. If the stack is dumped more times, it means that the thread stack occupies more CPU resources.\nWhen installing Istio using the demo configuration profile, we found there are two places where we can optimize performance:\n Zipkin Tracing: Different Zipkin sampling percentages have a direct impact on QPS. Access Log Format: Reducing the fields of the Envoy access log can improve QPS.  Zipkin Tracing Zipkin with 100% sampling In the default demo configuration profile, Envoy is using 100% sampling as default tracing policy. How does that impact the performance?\nAs shown in the figure below, using the on-CPU profiling, we found that it takes about 16% of the CPU overhead. At a fixed consumption of 2 CPUs, its QPS can reach 5.7K.\nDisable Zipkin tracing At this point, we found that if Zipkin is not necessary, the sampling percentage can be reduced or we can even disable tracing. Based on the Istio documentation, we can disable tracing when installing the service mesh using the following command:\nistioctl install -y --set profile=demo \\  --set \u0026#39;meshConfig.enableTracing=false\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.tracing.sampling=0.0\u0026#39; After disabling tracing, we performed on-CPU profiling again. According to the figure below, we found that Zipkin has disappeared from the flame graph. With the same 2 CPU consumption as in the previous example, the QPS reached 9K, which is an almost 60% increase. Tracing with Throughput With the same CPU usage, we\u0026rsquo;ve discovered that Envoy performance greatly improves when the tracing feature is disabled. Of course, this requires us to make trade-offs between the number of samples Zipkin collects and the desired performance of Envoy (QPS).\nThe table below illustrates how different Zipkin sampling percentages under the same CPU usage affect QPS.\n   Zipkin sampling % QPS CPUs Note     100% (default) 5.7K 2 16% used by Zipkin   1% 8.1K 2 0.3% used by Zipkin   disabled 9.2K 2 0% used by Zipkin    Access Log Format Default Log Format In the default demo configuration profile, the default Access Log format contains a lot of data. The flame graph below shows various functions involved in parsing the data such as request headers, response headers, and streaming the body.\nSimplifying Access Log Format Typically, we don’t need all the information in the access log, so we can often simplify it to get what we need. The following command simplifies the access log format to only display basic information:\nistioctl install -y --set profile=demo \\  --set meshConfig.accessLogFormat=\u0026#34;[%START_TIME%] \\\u0026#34;%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\\\u0026#34; %RESPONSE_CODE%\\n\u0026#34; After simplifying the access log format, we found that the QPS increased from 5.7K to 5.9K. When executing the on-CPU profiling again, the CPU usage of log formatting dropped from 2.4% to 0.7%.\nSimplifying the log format helped us to improve the performance.\nOff-CPU Profiling Off-CPU profiling is suitable for performance issues that are not caused by high CPU usage. For example, when there are too many threads in one service, using off-CPU profiling could reveal which threads spend more time context switching.\nWe provide data aggregation in two dimensions:\n Switch count: The number of times a thread switches context. When the thread returns to the CPU, it completes one context switch. A thread stack with a higher switch count spends more time context switching. Switch duration: The time it takes a thread to switch the context. A thread stack with a higher switch duration spends more time off-CPU.  Write Access Log Enable Write Using the same environment and settings as before in the on-CPU test, we performed off-CPU profiling. As shown below, we found that access log writes accounted for about 28% of the total context switches. The \u0026ldquo;__write\u0026rdquo; shown below also indicates that this method is the Linux kernel method.\nDisable Write SkyWalking implements Envoy\u0026rsquo;s Access Log Service (ALS) feature which allows us to send access logs to the SkyWalking Observability Analysis Platform (OAP) using the gRPC protocol. Even by disabling the access logging, we can still use ALS to capture/aggregate the logs. We\u0026rsquo;ve disabled writing to the access log using the following command:\nistioctl install -y --set profile=demo --set meshConfig.accessLogFile=\u0026#34;\u0026#34; After disabling the Access Log feature, we performed the off-CPU profiling. File writing entries have disappeared as shown in the figure below. Envoy throughput also increased from 5.7K to 5.9K.\nConclusion In this article, we\u0026rsquo;ve examined the insights Apache Skywalking\u0026rsquo;s Trace Profiling can give us and how much more can be achieved with eBPF profiling. All of these features are implemented in skywalking-rover. In addition to on- and off-CPU profiling, you will also find the following features:\n Continuous profiling, helps you automatically profile without manual intervention. For example, when Rover detects that the CPU exceeds a configurable threshold, it automatically executes the on-CPU profiling task. More profiling types to enrich usage scenarios, such as network, and memory profiling.  ","excerpt":"Pinpoint Service Mesh Critical Performance Impact by using eBPF Background Apache SkyWalking …","ref":"/docs/main/v9.2.0/en/concepts-and-designs/ebpf-cpu-profiling/","title":"Pinpoint Service Mesh Critical Performance Impact by using eBPF"},{"body":"Pinpoint Service Mesh Critical Performance Impact by using eBPF Background Apache SkyWalking observes metrics, logs, traces, and events for services deployed into the service mesh. When troubleshooting, SkyWalking error analysis can be an invaluable tool helping to pinpoint where an error occurred. However, performance problems are more difficult: It’s often impossible to locate the root cause of performance problems with pre-existing observation data. To move beyond the status quo, dynamic debugging and troubleshooting are essential service performance tools. In this article, we\u0026rsquo;ll discuss how to use eBPF technology to improve the profiling feature in SkyWalking and analyze the performance impact in the service mesh.\nTrace Profiling in SkyWalking Since SkyWalking 7.0.0, Trace Profiling has helped developers find performance problems by periodically sampling the thread stack to let developers know which lines of code take more time. However, Trace Profiling is not suitable for the following scenarios:\n Thread Model: Trace Profiling is most useful for profiling code that executes in a single thread. It is less useful for middleware that relies heavily on async execution models. For example Goroutines in Go or Kotlin Coroutines. Language: Currently, Trace Profiling is only supported in Java and Python, since it’s not easy to obtain the thread stack in the runtimes of some languages such as Go and Node.js. Agent Binding: Trace Profiling requires Agent installation, which can be tricky depending on the language (e.g., PHP has to rely on its C kernel; Rust and C/C++ require manual instrumentation to make install). Trace Correlation: Since Trace Profiling is only associated with a single request it can be hard to determine which request is causing the problem. Short Lifecycle Services: Trace Profiling doesn\u0026rsquo;t support short-lived services for (at least) two reasons:  It\u0026rsquo;s hard to differentiate system performance from class code manipulation in the booting stage. Trace profiling is linked to an endpoint to identify performance impact, but there is no endpoint to match these short-lived services.    Fortunately, there are techniques that can go further than Trace Profiling in these situations.\nIntroduce eBPF We have found that eBPF — a technology that can run sandboxed programs in an operating system kernel and thus safely and efficiently extend the capabilities of the kernel without requiring kernel modifications or loading kernel modules — can help us fill gaps left by Trace Profiling. eBPF is a trending technology because it breaks the traditional barrier between user and kernel space. Programs can now inject bytecode that runs in the kernel, instead of having to recompile the kernel to customize it. This is naturally a good fit for observability.\nIn the figure below, we can see that when the system executes the execve syscalls, the eBPF program is triggered, and the current process runtime information is obtained by using function calls.\nUsing eBPF technology, we can expand the scope of Skywalking\u0026rsquo;s profiling capabilities:\n Global Performance Analysis: Before eBPF, data collection was limited to what agents can observe. Since eBPF programs run in the kernel, they can observe all threads. This is especially useful when you are not sure whether a performance problem is caused by a particular request. Data Content: eBPF can dump both user and kernel space thread stacks, so if a performance issue happens in kernel space, it’s easier to find. Agent Binding: All modern Linux kernels support eBPF, so there is no need to install anything. This means it is an orchestration-free vs an agent model. This reduces friction caused by built-in software which may not have the correct agents installed, such as Envoy in a Service Mesh. Sampling Type: Unlike Trace Profiling, eBPF is event-driven and, therefore, not constrained by interval polling. For example, eBPF can trigger events and collect more data depending on a transfer size threshold. This can allow the system to triage and prioritize data collection under extreme load.  eBPF Limitations While eBPF offers significant advantages for hunting performance bottlenecks, no technology is perfect. eBPF has a number of limitations described below. Fortunately, since SkyWalking does not require eBPF, the impact is limited.\n Linux Version Requirement: eBPF programs require a Linux kernel version above 4.4, with later kernel versions offering more data to be collected. The BCC has documented the features supported by different Linux kernel versions, with the differences between versions usually being what data can be collected with eBPF. Privileges Required: All processes that intend to load eBPF programs into the Linux kernel must be running in privileged mode. As such, bugs or other issues in such code may have a big impact. Weak Support for Dynamic Language: eBPF has weak support for JIT-based dynamic languages, such as Java. It also depends on what data you want to collect. For Profiling, eBPF does not support parsing the symbols of the program, which is why most eBPF-based profiling technologies only support static languages like C, C++, Go, and Rust. However, symbol mapping can sometimes be solved through tools provided by the language. For example, in Java, perf-map-agent can be used to generate the symbol mapping. However, dynamic languages don\u0026rsquo;t support the attach (uprobe) functionality that would allow us to trace execution events through symbols.  Introducing SkyWalking Rover SkyWalking Rover introduces the eBPF profiling feature into the SkyWalking ecosystem. The figure below shows the overall architecture of SkyWalking Rover. SkyWalking Rover is currently supported in Kubernetes environments and must be deployed inside a Kubernetes cluster. After establishing a connection with the SkyWalking backend server, it saves information about the processes on the current machine to SkyWalking. When the user creates an eBPF profiling task via the user interface, SkyWalking Rover receives the task and executes it in the relevant C, C++, Golang, and Rust language-based programs.\nOther than an eBPF-capable kernel, there are no additional prerequisites for deploying SkyWalking Rover.\nCPU Profiling with Rover CPU profiling is the most intuitive way to show service performance. Inspired by Brendan Gregg‘s blog post, we\u0026rsquo;ve divided CPU profiling into two types that we have implemented in Rover:\n On-CPU Profiling: Where threads are spending time running on-CPU. Off-CPU Profiling: Where time is spent waiting while blocked on I/O, locks, timers, paging/swapping, etc.  Profiling Envoy with eBPF Envoy is a popular proxy, used as the data plane by the Istio service mesh. In a Kubernetes cluster, Istio injects Envoy into each service’s pod as a sidecar where it transparently intercepts and processes incoming and outgoing traffic. As the data plane, any performance issues in Envoy can affect all service traffic in the mesh. In this scenario, it’s more powerful to use eBPF profiling to analyze issues in production caused by service mesh configuration.\nDemo Environment If you want to see this scenario in action, we\u0026rsquo;ve built a demo environment where we deploy an Nginx service for stress testing. Traffic is intercepted by Envoy and forwarded to Nginx. The commands to install the whole environment can be accessed through GitHub.\nOn-CPU Profiling On-CPU profiling is suitable for analyzing thread stacks when service CPU usage is high. If the stack is dumped more times, it means that the thread stack occupies more CPU resources.\nWhen installing Istio using the demo configuration profile, we found there are two places where we can optimize performance:\n Zipkin Tracing: Different Zipkin sampling percentages have a direct impact on QPS. Access Log Format: Reducing the fields of the Envoy access log can improve QPS.  Zipkin Tracing Zipkin with 100% sampling In the default demo configuration profile, Envoy is using 100% sampling as default tracing policy. How does that impact the performance?\nAs shown in the figure below, using the on-CPU profiling, we found that it takes about 16% of the CPU overhead. At a fixed consumption of 2 CPUs, its QPS can reach 5.7K.\nDisable Zipkin tracing At this point, we found that if Zipkin is not necessary, the sampling percentage can be reduced or we can even disable tracing. Based on the Istio documentation, we can disable tracing when installing the service mesh using the following command:\nistioctl install -y --set profile=demo \\  --set \u0026#39;meshConfig.enableTracing=false\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.tracing.sampling=0.0\u0026#39; After disabling tracing, we performed on-CPU profiling again. According to the figure below, we found that Zipkin has disappeared from the flame graph. With the same 2 CPU consumption as in the previous example, the QPS reached 9K, which is an almost 60% increase. Tracing with Throughput With the same CPU usage, we\u0026rsquo;ve discovered that Envoy performance greatly improves when the tracing feature is disabled. Of course, this requires us to make trade-offs between the number of samples Zipkin collects and the desired performance of Envoy (QPS).\nThe table below illustrates how different Zipkin sampling percentages under the same CPU usage affect QPS.\n   Zipkin sampling % QPS CPUs Note     100% (default) 5.7K 2 16% used by Zipkin   1% 8.1K 2 0.3% used by Zipkin   disabled 9.2K 2 0% used by Zipkin    Access Log Format Default Log Format In the default demo configuration profile, the default Access Log format contains a lot of data. The flame graph below shows various functions involved in parsing the data such as request headers, response headers, and streaming the body.\nSimplifying Access Log Format Typically, we don’t need all the information in the access log, so we can often simplify it to get what we need. The following command simplifies the access log format to only display basic information:\nistioctl install -y --set profile=demo \\  --set meshConfig.accessLogFormat=\u0026#34;[%START_TIME%] \\\u0026#34;%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\\\u0026#34; %RESPONSE_CODE%\\n\u0026#34; After simplifying the access log format, we found that the QPS increased from 5.7K to 5.9K. When executing the on-CPU profiling again, the CPU usage of log formatting dropped from 2.4% to 0.7%.\nSimplifying the log format helped us to improve the performance.\nOff-CPU Profiling Off-CPU profiling is suitable for performance issues that are not caused by high CPU usage. For example, when there are too many threads in one service, using off-CPU profiling could reveal which threads spend more time context switching.\nWe provide data aggregation in two dimensions:\n Switch count: The number of times a thread switches context. When the thread returns to the CPU, it completes one context switch. A thread stack with a higher switch count spends more time context switching. Switch duration: The time it takes a thread to switch the context. A thread stack with a higher switch duration spends more time off-CPU.  Write Access Log Enable Write Using the same environment and settings as before in the on-CPU test, we performed off-CPU profiling. As shown below, we found that access log writes accounted for about 28% of the total context switches. The \u0026ldquo;__write\u0026rdquo; shown below also indicates that this method is the Linux kernel method.\nDisable Write SkyWalking implements Envoy\u0026rsquo;s Access Log Service (ALS) feature which allows us to send access logs to the SkyWalking Observability Analysis Platform (OAP) using the gRPC protocol. Even by disabling the access logging, we can still use ALS to capture/aggregate the logs. We\u0026rsquo;ve disabled writing to the access log using the following command:\nistioctl install -y --set profile=demo --set meshConfig.accessLogFile=\u0026#34;\u0026#34; After disabling the Access Log feature, we performed the off-CPU profiling. File writing entries have disappeared as shown in the figure below. Envoy throughput also increased from 5.7K to 5.9K.\nConclusion In this article, we\u0026rsquo;ve examined the insights Apache Skywalking\u0026rsquo;s Trace Profiling can give us and how much more can be achieved with eBPF profiling. All of these features are implemented in skywalking-rover. In addition to on- and off-CPU profiling, you will also find the following features:\n Continuous profiling, helps you automatically profile without manual intervention. For example, when Rover detects that the CPU exceeds a configurable threshold, it automatically executes the on-CPU profiling task. More profiling types to enrich usage scenarios, such as network, and memory profiling.  ","excerpt":"Pinpoint Service Mesh Critical Performance Impact by using eBPF Background Apache SkyWalking …","ref":"/docs/main/v9.3.0/en/concepts-and-designs/ebpf-cpu-profiling/","title":"Pinpoint Service Mesh Critical Performance Impact by using eBPF"},{"body":"Pinpoint Service Mesh Critical Performance Impact by using eBPF Background Apache SkyWalking observes metrics, logs, traces, and events for services deployed into the service mesh. When troubleshooting, SkyWalking error analysis can be an invaluable tool helping to pinpoint where an error occurred. However, performance problems are more difficult: It’s often impossible to locate the root cause of performance problems with pre-existing observation data. To move beyond the status quo, dynamic debugging and troubleshooting are essential service performance tools. In this article, we\u0026rsquo;ll discuss how to use eBPF technology to improve the profiling feature in SkyWalking and analyze the performance impact in the service mesh.\nTrace Profiling in SkyWalking Since SkyWalking 7.0.0, Trace Profiling has helped developers find performance problems by periodically sampling the thread stack to let developers know which lines of code take more time. However, Trace Profiling is not suitable for the following scenarios:\n Thread Model: Trace Profiling is most useful for profiling code that executes in a single thread. It is less useful for middleware that relies heavily on async execution models. For example Goroutines in Go or Kotlin Coroutines. Language: Currently, Trace Profiling is only supported in Java and Python, since it’s not easy to obtain the thread stack in the runtimes of some languages such as Go and Node.js. Agent Binding: Trace Profiling requires Agent installation, which can be tricky depending on the language (e.g., PHP has to rely on its C kernel; Rust and C/C++ require manual instrumentation to make install). Trace Correlation: Since Trace Profiling is only associated with a single request it can be hard to determine which request is causing the problem. Short Lifecycle Services: Trace Profiling doesn\u0026rsquo;t support short-lived services for (at least) two reasons:  It\u0026rsquo;s hard to differentiate system performance from class code manipulation in the booting stage. Trace profiling is linked to an endpoint to identify performance impact, but there is no endpoint to match these short-lived services.    Fortunately, there are techniques that can go further than Trace Profiling in these situations.\nIntroduce eBPF We have found that eBPF — a technology that can run sandboxed programs in an operating system kernel and thus safely and efficiently extend the capabilities of the kernel without requiring kernel modifications or loading kernel modules — can help us fill gaps left by Trace Profiling. eBPF is a trending technology because it breaks the traditional barrier between user and kernel space. Programs can now inject bytecode that runs in the kernel, instead of having to recompile the kernel to customize it. This is naturally a good fit for observability.\nIn the figure below, we can see that when the system executes the execve syscalls, the eBPF program is triggered, and the current process runtime information is obtained by using function calls.\nUsing eBPF technology, we can expand the scope of Skywalking\u0026rsquo;s profiling capabilities:\n Global Performance Analysis: Before eBPF, data collection was limited to what agents can observe. Since eBPF programs run in the kernel, they can observe all threads. This is especially useful when you are not sure whether a performance problem is caused by a particular request. Data Content: eBPF can dump both user and kernel space thread stacks, so if a performance issue happens in kernel space, it’s easier to find. Agent Binding: All modern Linux kernels support eBPF, so there is no need to install anything. This means it is an orchestration-free vs an agent model. This reduces friction caused by built-in software which may not have the correct agents installed, such as Envoy in a Service Mesh. Sampling Type: Unlike Trace Profiling, eBPF is event-driven and, therefore, not constrained by interval polling. For example, eBPF can trigger events and collect more data depending on a transfer size threshold. This can allow the system to triage and prioritize data collection under extreme load.  eBPF Limitations While eBPF offers significant advantages for hunting performance bottlenecks, no technology is perfect. eBPF has a number of limitations described below. Fortunately, since SkyWalking does not require eBPF, the impact is limited.\n Linux Version Requirement: eBPF programs require a Linux kernel version above 4.4, with later kernel versions offering more data to be collected. The BCC has documented the features supported by different Linux kernel versions, with the differences between versions usually being what data can be collected with eBPF. Privileges Required: All processes that intend to load eBPF programs into the Linux kernel must be running in privileged mode. As such, bugs or other issues in such code may have a big impact. Weak Support for Dynamic Language: eBPF has weak support for JIT-based dynamic languages, such as Java. It also depends on what data you want to collect. For Profiling, eBPF does not support parsing the symbols of the program, which is why most eBPF-based profiling technologies only support static languages like C, C++, Go, and Rust. However, symbol mapping can sometimes be solved through tools provided by the language. For example, in Java, perf-map-agent can be used to generate the symbol mapping. However, dynamic languages don\u0026rsquo;t support the attach (uprobe) functionality that would allow us to trace execution events through symbols.  Introducing SkyWalking Rover SkyWalking Rover introduces the eBPF profiling feature into the SkyWalking ecosystem. The figure below shows the overall architecture of SkyWalking Rover. SkyWalking Rover is currently supported in Kubernetes environments and must be deployed inside a Kubernetes cluster. After establishing a connection with the SkyWalking backend server, it saves information about the processes on the current machine to SkyWalking. When the user creates an eBPF profiling task via the user interface, SkyWalking Rover receives the task and executes it in the relevant C, C++, Golang, and Rust language-based programs.\nOther than an eBPF-capable kernel, there are no additional prerequisites for deploying SkyWalking Rover.\nCPU Profiling with Rover CPU profiling is the most intuitive way to show service performance. Inspired by Brendan Gregg‘s blog post, we\u0026rsquo;ve divided CPU profiling into two types that we have implemented in Rover:\n On-CPU Profiling: Where threads are spending time running on-CPU. Off-CPU Profiling: Where time is spent waiting while blocked on I/O, locks, timers, paging/swapping, etc.  Profiling Envoy with eBPF Envoy is a popular proxy, used as the data plane by the Istio service mesh. In a Kubernetes cluster, Istio injects Envoy into each service’s pod as a sidecar where it transparently intercepts and processes incoming and outgoing traffic. As the data plane, any performance issues in Envoy can affect all service traffic in the mesh. In this scenario, it’s more powerful to use eBPF profiling to analyze issues in production caused by service mesh configuration.\nDemo Environment If you want to see this scenario in action, we\u0026rsquo;ve built a demo environment where we deploy an Nginx service for stress testing. Traffic is intercepted by Envoy and forwarded to Nginx. The commands to install the whole environment can be accessed through GitHub.\nOn-CPU Profiling On-CPU profiling is suitable for analyzing thread stacks when service CPU usage is high. If the stack is dumped more times, it means that the thread stack occupies more CPU resources.\nWhen installing Istio using the demo configuration profile, we found there are two places where we can optimize performance:\n Zipkin Tracing: Different Zipkin sampling percentages have a direct impact on QPS. Access Log Format: Reducing the fields of the Envoy access log can improve QPS.  Zipkin Tracing Zipkin with 100% sampling In the default demo configuration profile, Envoy is using 100% sampling as default tracing policy. How does that impact the performance?\nAs shown in the figure below, using the on-CPU profiling, we found that it takes about 16% of the CPU overhead. At a fixed consumption of 2 CPUs, its QPS can reach 5.7K.\nDisable Zipkin tracing At this point, we found that if Zipkin is not necessary, the sampling percentage can be reduced or we can even disable tracing. Based on the Istio documentation, we can disable tracing when installing the service mesh using the following command:\nistioctl install -y --set profile=demo \\  --set \u0026#39;meshConfig.enableTracing=false\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.tracing.sampling=0.0\u0026#39; After disabling tracing, we performed on-CPU profiling again. According to the figure below, we found that Zipkin has disappeared from the flame graph. With the same 2 CPU consumption as in the previous example, the QPS reached 9K, which is an almost 60% increase. Tracing with Throughput With the same CPU usage, we\u0026rsquo;ve discovered that Envoy performance greatly improves when the tracing feature is disabled. Of course, this requires us to make trade-offs between the number of samples Zipkin collects and the desired performance of Envoy (QPS).\nThe table below illustrates how different Zipkin sampling percentages under the same CPU usage affect QPS.\n   Zipkin sampling % QPS CPUs Note     100% (default) 5.7K 2 16% used by Zipkin   1% 8.1K 2 0.3% used by Zipkin   disabled 9.2K 2 0% used by Zipkin    Access Log Format Default Log Format In the default demo configuration profile, the default Access Log format contains a lot of data. The flame graph below shows various functions involved in parsing the data such as request headers, response headers, and streaming the body.\nSimplifying Access Log Format Typically, we don’t need all the information in the access log, so we can often simplify it to get what we need. The following command simplifies the access log format to only display basic information:\nistioctl install -y --set profile=demo \\  --set meshConfig.accessLogFormat=\u0026#34;[%START_TIME%] \\\u0026#34;%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\\\u0026#34; %RESPONSE_CODE%\\n\u0026#34; After simplifying the access log format, we found that the QPS increased from 5.7K to 5.9K. When executing the on-CPU profiling again, the CPU usage of log formatting dropped from 2.4% to 0.7%.\nSimplifying the log format helped us to improve the performance.\nOff-CPU Profiling Off-CPU profiling is suitable for performance issues that are not caused by high CPU usage. For example, when there are too many threads in one service, using off-CPU profiling could reveal which threads spend more time context switching.\nWe provide data aggregation in two dimensions:\n Switch count: The number of times a thread switches context. When the thread returns to the CPU, it completes one context switch. A thread stack with a higher switch count spends more time context switching. Switch duration: The time it takes a thread to switch the context. A thread stack with a higher switch duration spends more time off-CPU.  Write Access Log Enable Write Using the same environment and settings as before in the on-CPU test, we performed off-CPU profiling. As shown below, we found that access log writes accounted for about 28% of the total context switches. The \u0026ldquo;__write\u0026rdquo; shown below also indicates that this method is the Linux kernel method.\nDisable Write SkyWalking implements Envoy\u0026rsquo;s Access Log Service (ALS) feature which allows us to send access logs to the SkyWalking Observability Analysis Platform (OAP) using the gRPC protocol. Even by disabling the access logging, we can still use ALS to capture/aggregate the logs. We\u0026rsquo;ve disabled writing to the access log using the following command:\nistioctl install -y --set profile=demo --set meshConfig.accessLogFile=\u0026#34;\u0026#34; After disabling the Access Log feature, we performed the off-CPU profiling. File writing entries have disappeared as shown in the figure below. Envoy throughput also increased from 5.7K to 5.9K.\nConclusion In this article, we\u0026rsquo;ve examined the insights Apache Skywalking\u0026rsquo;s Trace Profiling can give us and how much more can be achieved with eBPF profiling. All of these features are implemented in skywalking-rover. In addition to on- and off-CPU profiling, you will also find the following features:\n Continuous profiling, helps you automatically profile without manual intervention. For example, when Rover detects that the CPU exceeds a configurable threshold, it automatically executes the on-CPU profiling task. More profiling types to enrich usage scenarios, such as network, and memory profiling.  ","excerpt":"Pinpoint Service Mesh Critical Performance Impact by using eBPF Background Apache SkyWalking …","ref":"/docs/main/v9.4.0/en/concepts-and-designs/ebpf-cpu-profiling/","title":"Pinpoint Service Mesh Critical Performance Impact by using eBPF"},{"body":"Pinpoint Service Mesh Critical Performance Impact by using eBPF Background Apache SkyWalking observes metrics, logs, traces, and events for services deployed into the service mesh. When troubleshooting, SkyWalking error analysis can be an invaluable tool helping to pinpoint where an error occurred. However, performance problems are more difficult: It’s often impossible to locate the root cause of performance problems with pre-existing observation data. To move beyond the status quo, dynamic debugging and troubleshooting are essential service performance tools. In this article, we\u0026rsquo;ll discuss how to use eBPF technology to improve the profiling feature in SkyWalking and analyze the performance impact in the service mesh.\nTrace Profiling in SkyWalking Since SkyWalking 7.0.0, Trace Profiling has helped developers find performance problems by periodically sampling the thread stack to let developers know which lines of code take more time. However, Trace Profiling is not suitable for the following scenarios:\n Thread Model: Trace Profiling is most useful for profiling code that executes in a single thread. It is less useful for middleware that relies heavily on async execution models. For example Goroutines in Go or Kotlin Coroutines. Language: Currently, Trace Profiling is only supported in Java and Python, since it’s not easy to obtain the thread stack in the runtimes of some languages such as Go and Node.js. Agent Binding: Trace Profiling requires Agent installation, which can be tricky depending on the language (e.g., PHP has to rely on its C kernel; Rust and C/C++ require manual instrumentation to make install). Trace Correlation: Since Trace Profiling is only associated with a single request it can be hard to determine which request is causing the problem. Short Lifecycle Services: Trace Profiling doesn\u0026rsquo;t support short-lived services for (at least) two reasons:  It\u0026rsquo;s hard to differentiate system performance from class code manipulation in the booting stage. Trace profiling is linked to an endpoint to identify performance impact, but there is no endpoint to match these short-lived services.    Fortunately, there are techniques that can go further than Trace Profiling in these situations.\nIntroduce eBPF We have found that eBPF — a technology that can run sandboxed programs in an operating system kernel and thus safely and efficiently extend the capabilities of the kernel without requiring kernel modifications or loading kernel modules — can help us fill gaps left by Trace Profiling. eBPF is a trending technology because it breaks the traditional barrier between user and kernel space. Programs can now inject bytecode that runs in the kernel, instead of having to recompile the kernel to customize it. This is naturally a good fit for observability.\nIn the figure below, we can see that when the system executes the execve syscalls, the eBPF program is triggered, and the current process runtime information is obtained by using function calls.\nUsing eBPF technology, we can expand the scope of Skywalking\u0026rsquo;s profiling capabilities:\n Global Performance Analysis: Before eBPF, data collection was limited to what agents can observe. Since eBPF programs run in the kernel, they can observe all threads. This is especially useful when you are not sure whether a performance problem is caused by a particular request. Data Content: eBPF can dump both user and kernel space thread stacks, so if a performance issue happens in kernel space, it’s easier to find. Agent Binding: All modern Linux kernels support eBPF, so there is no need to install anything. This means it is an orchestration-free vs an agent model. This reduces friction caused by built-in software which may not have the correct agents installed, such as Envoy in a Service Mesh. Sampling Type: Unlike Trace Profiling, eBPF is event-driven and, therefore, not constrained by interval polling. For example, eBPF can trigger events and collect more data depending on a transfer size threshold. This can allow the system to triage and prioritize data collection under extreme load.  eBPF Limitations While eBPF offers significant advantages for hunting performance bottlenecks, no technology is perfect. eBPF has a number of limitations described below. Fortunately, since SkyWalking does not require eBPF, the impact is limited.\n Linux Version Requirement: eBPF programs require a Linux kernel version above 4.4, with later kernel versions offering more data to be collected. The BCC has documented the features supported by different Linux kernel versions, with the differences between versions usually being what data can be collected with eBPF. Privileges Required: All processes that intend to load eBPF programs into the Linux kernel must be running in privileged mode. As such, bugs or other issues in such code may have a big impact. Weak Support for Dynamic Language: eBPF has weak support for JIT-based dynamic languages, such as Java. It also depends on what data you want to collect. For Profiling, eBPF does not support parsing the symbols of the program, which is why most eBPF-based profiling technologies only support static languages like C, C++, Go, and Rust. However, symbol mapping can sometimes be solved through tools provided by the language. For example, in Java, perf-map-agent can be used to generate the symbol mapping. However, dynamic languages don\u0026rsquo;t support the attach (uprobe) functionality that would allow us to trace execution events through symbols.  Introducing SkyWalking Rover SkyWalking Rover introduces the eBPF profiling feature into the SkyWalking ecosystem. The figure below shows the overall architecture of SkyWalking Rover. SkyWalking Rover is currently supported in Kubernetes environments and must be deployed inside a Kubernetes cluster. After establishing a connection with the SkyWalking backend server, it saves information about the processes on the current machine to SkyWalking. When the user creates an eBPF profiling task via the user interface, SkyWalking Rover receives the task and executes it in the relevant C, C++, Golang, and Rust language-based programs.\nOther than an eBPF-capable kernel, there are no additional prerequisites for deploying SkyWalking Rover.\nCPU Profiling with Rover CPU profiling is the most intuitive way to show service performance. Inspired by Brendan Gregg‘s blog post, we\u0026rsquo;ve divided CPU profiling into two types that we have implemented in Rover:\n On-CPU Profiling: Where threads are spending time running on-CPU. Off-CPU Profiling: Where time is spent waiting while blocked on I/O, locks, timers, paging/swapping, etc.  Profiling Envoy with eBPF Envoy is a popular proxy, used as the data plane by the Istio service mesh. In a Kubernetes cluster, Istio injects Envoy into each service’s pod as a sidecar where it transparently intercepts and processes incoming and outgoing traffic. As the data plane, any performance issues in Envoy can affect all service traffic in the mesh. In this scenario, it’s more powerful to use eBPF profiling to analyze issues in production caused by service mesh configuration.\nDemo Environment If you want to see this scenario in action, we\u0026rsquo;ve built a demo environment where we deploy an Nginx service for stress testing. Traffic is intercepted by Envoy and forwarded to Nginx. The commands to install the whole environment can be accessed through GitHub.\nOn-CPU Profiling On-CPU profiling is suitable for analyzing thread stacks when service CPU usage is high. If the stack is dumped more times, it means that the thread stack occupies more CPU resources.\nWhen installing Istio using the demo configuration profile, we found there are two places where we can optimize performance:\n Zipkin Tracing: Different Zipkin sampling percentages have a direct impact on QPS. Access Log Format: Reducing the fields of the Envoy access log can improve QPS.  Zipkin Tracing Zipkin with 100% sampling In the default demo configuration profile, Envoy is using 100% sampling as default tracing policy. How does that impact the performance?\nAs shown in the figure below, using the on-CPU profiling, we found that it takes about 16% of the CPU overhead. At a fixed consumption of 2 CPUs, its QPS can reach 5.7K.\nDisable Zipkin tracing At this point, we found that if Zipkin is not necessary, the sampling percentage can be reduced or we can even disable tracing. Based on the Istio documentation, we can disable tracing when installing the service mesh using the following command:\nistioctl install -y --set profile=demo \\  --set \u0026#39;meshConfig.enableTracing=false\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.tracing.sampling=0.0\u0026#39; After disabling tracing, we performed on-CPU profiling again. According to the figure below, we found that Zipkin has disappeared from the flame graph. With the same 2 CPU consumption as in the previous example, the QPS reached 9K, which is an almost 60% increase. Tracing with Throughput With the same CPU usage, we\u0026rsquo;ve discovered that Envoy performance greatly improves when the tracing feature is disabled. Of course, this requires us to make trade-offs between the number of samples Zipkin collects and the desired performance of Envoy (QPS).\nThe table below illustrates how different Zipkin sampling percentages under the same CPU usage affect QPS.\n   Zipkin sampling % QPS CPUs Note     100% (default) 5.7K 2 16% used by Zipkin   1% 8.1K 2 0.3% used by Zipkin   disabled 9.2K 2 0% used by Zipkin    Access Log Format Default Log Format In the default demo configuration profile, the default Access Log format contains a lot of data. The flame graph below shows various functions involved in parsing the data such as request headers, response headers, and streaming the body.\nSimplifying Access Log Format Typically, we don’t need all the information in the access log, so we can often simplify it to get what we need. The following command simplifies the access log format to only display basic information:\nistioctl install -y --set profile=demo \\  --set meshConfig.accessLogFormat=\u0026#34;[%START_TIME%] \\\u0026#34;%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\\\u0026#34; %RESPONSE_CODE%\\n\u0026#34; After simplifying the access log format, we found that the QPS increased from 5.7K to 5.9K. When executing the on-CPU profiling again, the CPU usage of log formatting dropped from 2.4% to 0.7%.\nSimplifying the log format helped us to improve the performance.\nOff-CPU Profiling Off-CPU profiling is suitable for performance issues that are not caused by high CPU usage. For example, when there are too many threads in one service, using off-CPU profiling could reveal which threads spend more time context switching.\nWe provide data aggregation in two dimensions:\n Switch count: The number of times a thread switches context. When the thread returns to the CPU, it completes one context switch. A thread stack with a higher switch count spends more time context switching. Switch duration: The time it takes a thread to switch the context. A thread stack with a higher switch duration spends more time off-CPU.  Write Access Log Enable Write Using the same environment and settings as before in the on-CPU test, we performed off-CPU profiling. As shown below, we found that access log writes accounted for about 28% of the total context switches. The \u0026ldquo;__write\u0026rdquo; shown below also indicates that this method is the Linux kernel method.\nDisable Write SkyWalking implements Envoy\u0026rsquo;s Access Log Service (ALS) feature which allows us to send access logs to the SkyWalking Observability Analysis Platform (OAP) using the gRPC protocol. Even by disabling the access logging, we can still use ALS to capture/aggregate the logs. We\u0026rsquo;ve disabled writing to the access log using the following command:\nistioctl install -y --set profile=demo --set meshConfig.accessLogFile=\u0026#34;\u0026#34; After disabling the Access Log feature, we performed the off-CPU profiling. File writing entries have disappeared as shown in the figure below. Envoy throughput also increased from 5.7K to 5.9K.\nConclusion In this article, we\u0026rsquo;ve examined the insights Apache Skywalking\u0026rsquo;s Trace Profiling can give us and how much more can be achieved with eBPF profiling. All of these features are implemented in skywalking-rover. In addition to on- and off-CPU profiling, you will also find the following features:\n Continuous profiling, helps you automatically profile without manual intervention. For example, when Rover detects that the CPU exceeds a configurable threshold, it automatically executes the on-CPU profiling task. More profiling types to enrich usage scenarios, such as network, and memory profiling.  ","excerpt":"Pinpoint Service Mesh Critical Performance Impact by using eBPF Background Apache SkyWalking …","ref":"/docs/main/v9.5.0/en/concepts-and-designs/ebpf-cpu-profiling/","title":"Pinpoint Service Mesh Critical Performance Impact by using eBPF"},{"body":"Pinpoint Service Mesh Critical Performance Impact by using eBPF Background Apache SkyWalking observes metrics, logs, traces, and events for services deployed into the service mesh. When troubleshooting, SkyWalking error analysis can be an invaluable tool helping to pinpoint where an error occurred. However, performance problems are more difficult: It’s often impossible to locate the root cause of performance problems with pre-existing observation data. To move beyond the status quo, dynamic debugging and troubleshooting are essential service performance tools. In this article, we\u0026rsquo;ll discuss how to use eBPF technology to improve the profiling feature in SkyWalking and analyze the performance impact in the service mesh.\nTrace Profiling in SkyWalking Since SkyWalking 7.0.0, Trace Profiling has helped developers find performance problems by periodically sampling the thread stack to let developers know which lines of code take more time. However, Trace Profiling is not suitable for the following scenarios:\n Thread Model: Trace Profiling is most useful for profiling code that executes in a single thread. It is less useful for middleware that relies heavily on async execution models. For example Goroutines in Go or Kotlin Coroutines. Language: Currently, Trace Profiling is only supported in Java and Python, since it’s not easy to obtain the thread stack in the runtimes of some languages such as Go and Node.js. Agent Binding: Trace Profiling requires Agent installation, which can be tricky depending on the language (e.g., PHP has to rely on its C kernel; Rust and C/C++ require manual instrumentation to make install). Trace Correlation: Since Trace Profiling is only associated with a single request it can be hard to determine which request is causing the problem. Short Lifecycle Services: Trace Profiling doesn\u0026rsquo;t support short-lived services for (at least) two reasons:  It\u0026rsquo;s hard to differentiate system performance from class code manipulation in the booting stage. Trace profiling is linked to an endpoint to identify performance impact, but there is no endpoint to match these short-lived services.    Fortunately, there are techniques that can go further than Trace Profiling in these situations.\nIntroduce eBPF We have found that eBPF — a technology that can run sandboxed programs in an operating system kernel and thus safely and efficiently extend the capabilities of the kernel without requiring kernel modifications or loading kernel modules — can help us fill gaps left by Trace Profiling. eBPF is a trending technology because it breaks the traditional barrier between user and kernel space. Programs can now inject bytecode that runs in the kernel, instead of having to recompile the kernel to customize it. This is naturally a good fit for observability.\nIn the figure below, we can see that when the system executes the execve syscalls, the eBPF program is triggered, and the current process runtime information is obtained by using function calls.\nUsing eBPF technology, we can expand the scope of Skywalking\u0026rsquo;s profiling capabilities:\n Global Performance Analysis: Before eBPF, data collection was limited to what agents can observe. Since eBPF programs run in the kernel, they can observe all threads. This is especially useful when you are not sure whether a performance problem is caused by a particular request. Data Content: eBPF can dump both user and kernel space thread stacks, so if a performance issue happens in kernel space, it’s easier to find. Agent Binding: All modern Linux kernels support eBPF, so there is no need to install anything. This means it is an orchestration-free vs an agent model. This reduces friction caused by built-in software which may not have the correct agents installed, such as Envoy in a Service Mesh. Sampling Type: Unlike Trace Profiling, eBPF is event-driven and, therefore, not constrained by interval polling. For example, eBPF can trigger events and collect more data depending on a transfer size threshold. This can allow the system to triage and prioritize data collection under extreme load.  eBPF Limitations While eBPF offers significant advantages for hunting performance bottlenecks, no technology is perfect. eBPF has a number of limitations described below. Fortunately, since SkyWalking does not require eBPF, the impact is limited.\n Linux Version Requirement: eBPF programs require a Linux kernel version above 4.4, with later kernel versions offering more data to be collected. The BCC has documented the features supported by different Linux kernel versions, with the differences between versions usually being what data can be collected with eBPF. Privileges Required: All processes that intend to load eBPF programs into the Linux kernel must be running in privileged mode. As such, bugs or other issues in such code may have a big impact. Weak Support for Dynamic Language: eBPF has weak support for JIT-based dynamic languages, such as Java. It also depends on what data you want to collect. For Profiling, eBPF does not support parsing the symbols of the program, which is why most eBPF-based profiling technologies only support static languages like C, C++, Go, and Rust. However, symbol mapping can sometimes be solved through tools provided by the language. For example, in Java, perf-map-agent can be used to generate the symbol mapping. However, dynamic languages don\u0026rsquo;t support the attach (uprobe) functionality that would allow us to trace execution events through symbols.  Introducing SkyWalking Rover SkyWalking Rover introduces the eBPF profiling feature into the SkyWalking ecosystem. The figure below shows the overall architecture of SkyWalking Rover. SkyWalking Rover is currently supported in Kubernetes environments and must be deployed inside a Kubernetes cluster. After establishing a connection with the SkyWalking backend server, it saves information about the processes on the current machine to SkyWalking. When the user creates an eBPF profiling task via the user interface, SkyWalking Rover receives the task and executes it in the relevant C, C++, Golang, and Rust language-based programs.\nOther than an eBPF-capable kernel, there are no additional prerequisites for deploying SkyWalking Rover.\nCPU Profiling with Rover CPU profiling is the most intuitive way to show service performance. Inspired by Brendan Gregg‘s blog post, we\u0026rsquo;ve divided CPU profiling into two types that we have implemented in Rover:\n On-CPU Profiling: Where threads are spending time running on-CPU. Off-CPU Profiling: Where time is spent waiting while blocked on I/O, locks, timers, paging/swapping, etc.  Profiling Envoy with eBPF Envoy is a popular proxy, used as the data plane by the Istio service mesh. In a Kubernetes cluster, Istio injects Envoy into each service’s pod as a sidecar where it transparently intercepts and processes incoming and outgoing traffic. As the data plane, any performance issues in Envoy can affect all service traffic in the mesh. In this scenario, it’s more powerful to use eBPF profiling to analyze issues in production caused by service mesh configuration.\nDemo Environment If you want to see this scenario in action, we\u0026rsquo;ve built a demo environment where we deploy an Nginx service for stress testing. Traffic is intercepted by Envoy and forwarded to Nginx. The commands to install the whole environment can be accessed through GitHub.\nOn-CPU Profiling On-CPU profiling is suitable for analyzing thread stacks when service CPU usage is high. If the stack is dumped more times, it means that the thread stack occupies more CPU resources.\nWhen installing Istio using the demo configuration profile, we found there are two places where we can optimize performance:\n Zipkin Tracing: Different Zipkin sampling percentages have a direct impact on QPS. Access Log Format: Reducing the fields of the Envoy access log can improve QPS.  Zipkin Tracing Zipkin with 100% sampling In the default demo configuration profile, Envoy is using 100% sampling as default tracing policy. How does that impact the performance?\nAs shown in the figure below, using the on-CPU profiling, we found that it takes about 16% of the CPU overhead. At a fixed consumption of 2 CPUs, its QPS can reach 5.7K.\nDisable Zipkin tracing At this point, we found that if Zipkin is not necessary, the sampling percentage can be reduced or we can even disable tracing. Based on the Istio documentation, we can disable tracing when installing the service mesh using the following command:\nistioctl install -y --set profile=demo \\  --set \u0026#39;meshConfig.enableTracing=false\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.tracing.sampling=0.0\u0026#39; After disabling tracing, we performed on-CPU profiling again. According to the figure below, we found that Zipkin has disappeared from the flame graph. With the same 2 CPU consumption as in the previous example, the QPS reached 9K, which is an almost 60% increase. Tracing with Throughput With the same CPU usage, we\u0026rsquo;ve discovered that Envoy performance greatly improves when the tracing feature is disabled. Of course, this requires us to make trade-offs between the number of samples Zipkin collects and the desired performance of Envoy (QPS).\nThe table below illustrates how different Zipkin sampling percentages under the same CPU usage affect QPS.\n   Zipkin sampling % QPS CPUs Note     100% (default) 5.7K 2 16% used by Zipkin   1% 8.1K 2 0.3% used by Zipkin   disabled 9.2K 2 0% used by Zipkin    Access Log Format Default Log Format In the default demo configuration profile, the default Access Log format contains a lot of data. The flame graph below shows various functions involved in parsing the data such as request headers, response headers, and streaming the body.\nSimplifying Access Log Format Typically, we don’t need all the information in the access log, so we can often simplify it to get what we need. The following command simplifies the access log format to only display basic information:\nistioctl install -y --set profile=demo \\  --set meshConfig.accessLogFormat=\u0026#34;[%START_TIME%] \\\u0026#34;%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\\\u0026#34; %RESPONSE_CODE%\\n\u0026#34; After simplifying the access log format, we found that the QPS increased from 5.7K to 5.9K. When executing the on-CPU profiling again, the CPU usage of log formatting dropped from 2.4% to 0.7%.\nSimplifying the log format helped us to improve the performance.\nOff-CPU Profiling Off-CPU profiling is suitable for performance issues that are not caused by high CPU usage. For example, when there are too many threads in one service, using off-CPU profiling could reveal which threads spend more time context switching.\nWe provide data aggregation in two dimensions:\n Switch count: The number of times a thread switches context. When the thread returns to the CPU, it completes one context switch. A thread stack with a higher switch count spends more time context switching. Switch duration: The time it takes a thread to switch the context. A thread stack with a higher switch duration spends more time off-CPU.  Write Access Log Enable Write Using the same environment and settings as before in the on-CPU test, we performed off-CPU profiling. As shown below, we found that access log writes accounted for about 28% of the total context switches. The \u0026ldquo;__write\u0026rdquo; shown below also indicates that this method is the Linux kernel method.\nDisable Write SkyWalking implements Envoy\u0026rsquo;s Access Log Service (ALS) feature which allows us to send access logs to the SkyWalking Observability Analysis Platform (OAP) using the gRPC protocol. Even by disabling the access logging, we can still use ALS to capture/aggregate the logs. We\u0026rsquo;ve disabled writing to the access log using the following command:\nistioctl install -y --set profile=demo --set meshConfig.accessLogFile=\u0026#34;\u0026#34; After disabling the Access Log feature, we performed the off-CPU profiling. File writing entries have disappeared as shown in the figure below. Envoy throughput also increased from 5.7K to 5.9K.\nConclusion In this article, we\u0026rsquo;ve examined the insights Apache Skywalking\u0026rsquo;s Trace Profiling can give us and how much more can be achieved with eBPF profiling. All of these features are implemented in skywalking-rover. In addition to on- and off-CPU profiling, you will also find the following features:\n Continuous profiling, helps you automatically profile without manual intervention. For example, when Rover detects that the CPU exceeds a configurable threshold, it automatically executes the on-CPU profiling task. More profiling types to enrich usage scenarios, such as network, and memory profiling.  ","excerpt":"Pinpoint Service Mesh Critical Performance Impact by using eBPF Background Apache SkyWalking …","ref":"/docs/main/v9.6.0/en/concepts-and-designs/ebpf-cpu-profiling/","title":"Pinpoint Service Mesh Critical Performance Impact by using eBPF"},{"body":"Pinpoint Service Mesh Critical Performance Impact by using eBPF Background Apache SkyWalking observes metrics, logs, traces, and events for services deployed into the service mesh. When troubleshooting, SkyWalking error analysis can be an invaluable tool helping to pinpoint where an error occurred. However, performance problems are more difficult: It’s often impossible to locate the root cause of performance problems with pre-existing observation data. To move beyond the status quo, dynamic debugging and troubleshooting are essential service performance tools. In this article, we\u0026rsquo;ll discuss how to use eBPF technology to improve the profiling feature in SkyWalking and analyze the performance impact in the service mesh.\nTrace Profiling in SkyWalking Since SkyWalking 7.0.0, Trace Profiling has helped developers find performance problems by periodically sampling the thread stack to let developers know which lines of code take more time. However, Trace Profiling is not suitable for the following scenarios:\n Thread Model: Trace Profiling is most useful for profiling code that executes in a single thread. It is less useful for middleware that relies heavily on async execution models. For example Goroutines in Go or Kotlin Coroutines. Language: Currently, Trace Profiling is only supported in Java and Python, since it’s not easy to obtain the thread stack in the runtimes of some languages such as Go and Node.js. Agent Binding: Trace Profiling requires Agent installation, which can be tricky depending on the language (e.g., PHP has to rely on its C kernel; Rust and C/C++ require manual instrumentation to make install). Trace Correlation: Since Trace Profiling is only associated with a single request it can be hard to determine which request is causing the problem. Short Lifecycle Services: Trace Profiling doesn\u0026rsquo;t support short-lived services for (at least) two reasons:  It\u0026rsquo;s hard to differentiate system performance from class code manipulation in the booting stage. Trace profiling is linked to an endpoint to identify performance impact, but there is no endpoint to match these short-lived services.    Fortunately, there are techniques that can go further than Trace Profiling in these situations.\nIntroduce eBPF We have found that eBPF — a technology that can run sandboxed programs in an operating system kernel and thus safely and efficiently extend the capabilities of the kernel without requiring kernel modifications or loading kernel modules — can help us fill gaps left by Trace Profiling. eBPF is a trending technology because it breaks the traditional barrier between user and kernel space. Programs can now inject bytecode that runs in the kernel, instead of having to recompile the kernel to customize it. This is naturally a good fit for observability.\nIn the figure below, we can see that when the system executes the execve syscalls, the eBPF program is triggered, and the current process runtime information is obtained by using function calls.\nUsing eBPF technology, we can expand the scope of Skywalking\u0026rsquo;s profiling capabilities:\n Global Performance Analysis: Before eBPF, data collection was limited to what agents can observe. Since eBPF programs run in the kernel, they can observe all threads. This is especially useful when you are not sure whether a performance problem is caused by a particular request. Data Content: eBPF can dump both user and kernel space thread stacks, so if a performance issue happens in kernel space, it’s easier to find. Agent Binding: All modern Linux kernels support eBPF, so there is no need to install anything. This means it is an orchestration-free vs an agent model. This reduces friction caused by built-in software which may not have the correct agents installed, such as Envoy in a Service Mesh. Sampling Type: Unlike Trace Profiling, eBPF is event-driven and, therefore, not constrained by interval polling. For example, eBPF can trigger events and collect more data depending on a transfer size threshold. This can allow the system to triage and prioritize data collection under extreme load.  eBPF Limitations While eBPF offers significant advantages for hunting performance bottlenecks, no technology is perfect. eBPF has a number of limitations described below. Fortunately, since SkyWalking does not require eBPF, the impact is limited.\n Linux Version Requirement: eBPF programs require a Linux kernel version above 4.4, with later kernel versions offering more data to be collected. The BCC has documented the features supported by different Linux kernel versions, with the differences between versions usually being what data can be collected with eBPF. Privileges Required: All processes that intend to load eBPF programs into the Linux kernel must be running in privileged mode. As such, bugs or other issues in such code may have a big impact. Weak Support for Dynamic Language: eBPF has weak support for JIT-based dynamic languages, such as Java. It also depends on what data you want to collect. For Profiling, eBPF does not support parsing the symbols of the program, which is why most eBPF-based profiling technologies only support static languages like C, C++, Go, and Rust. However, symbol mapping can sometimes be solved through tools provided by the language. For example, in Java, perf-map-agent can be used to generate the symbol mapping. However, dynamic languages don\u0026rsquo;t support the attach (uprobe) functionality that would allow us to trace execution events through symbols.  Introducing SkyWalking Rover SkyWalking Rover introduces the eBPF profiling feature into the SkyWalking ecosystem. The figure below shows the overall architecture of SkyWalking Rover. SkyWalking Rover is currently supported in Kubernetes environments and must be deployed inside a Kubernetes cluster. After establishing a connection with the SkyWalking backend server, it saves information about the processes on the current machine to SkyWalking. When the user creates an eBPF profiling task via the user interface, SkyWalking Rover receives the task and executes it in the relevant C, C++, Golang, and Rust language-based programs.\nOther than an eBPF-capable kernel, there are no additional prerequisites for deploying SkyWalking Rover.\nCPU Profiling with Rover CPU profiling is the most intuitive way to show service performance. Inspired by Brendan Gregg‘s blog post, we\u0026rsquo;ve divided CPU profiling into two types that we have implemented in Rover:\n On-CPU Profiling: Where threads are spending time running on-CPU. Off-CPU Profiling: Where time is spent waiting while blocked on I/O, locks, timers, paging/swapping, etc.  Profiling Envoy with eBPF Envoy is a popular proxy, used as the data plane by the Istio service mesh. In a Kubernetes cluster, Istio injects Envoy into each service’s pod as a sidecar where it transparently intercepts and processes incoming and outgoing traffic. As the data plane, any performance issues in Envoy can affect all service traffic in the mesh. In this scenario, it’s more powerful to use eBPF profiling to analyze issues in production caused by service mesh configuration.\nDemo Environment If you want to see this scenario in action, we\u0026rsquo;ve built a demo environment where we deploy an Nginx service for stress testing. Traffic is intercepted by Envoy and forwarded to Nginx. The commands to install the whole environment can be accessed through GitHub.\nOn-CPU Profiling On-CPU profiling is suitable for analyzing thread stacks when service CPU usage is high. If the stack is dumped more times, it means that the thread stack occupies more CPU resources.\nWhen installing Istio using the demo configuration profile, we found there are two places where we can optimize performance:\n Zipkin Tracing: Different Zipkin sampling percentages have a direct impact on QPS. Access Log Format: Reducing the fields of the Envoy access log can improve QPS.  Zipkin Tracing Zipkin with 100% sampling In the default demo configuration profile, Envoy is using 100% sampling as default tracing policy. How does that impact the performance?\nAs shown in the figure below, using the on-CPU profiling, we found that it takes about 16% of the CPU overhead. At a fixed consumption of 2 CPUs, its QPS can reach 5.7K.\nDisable Zipkin tracing At this point, we found that if Zipkin is not necessary, the sampling percentage can be reduced or we can even disable tracing. Based on the Istio documentation, we can disable tracing when installing the service mesh using the following command:\nistioctl install -y --set profile=demo \\  --set \u0026#39;meshConfig.enableTracing=false\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.tracing.sampling=0.0\u0026#39; After disabling tracing, we performed on-CPU profiling again. According to the figure below, we found that Zipkin has disappeared from the flame graph. With the same 2 CPU consumption as in the previous example, the QPS reached 9K, which is an almost 60% increase. Tracing with Throughput With the same CPU usage, we\u0026rsquo;ve discovered that Envoy performance greatly improves when the tracing feature is disabled. Of course, this requires us to make trade-offs between the number of samples Zipkin collects and the desired performance of Envoy (QPS).\nThe table below illustrates how different Zipkin sampling percentages under the same CPU usage affect QPS.\n   Zipkin sampling % QPS CPUs Note     100% (default) 5.7K 2 16% used by Zipkin   1% 8.1K 2 0.3% used by Zipkin   disabled 9.2K 2 0% used by Zipkin    Access Log Format Default Log Format In the default demo configuration profile, the default Access Log format contains a lot of data. The flame graph below shows various functions involved in parsing the data such as request headers, response headers, and streaming the body.\nSimplifying Access Log Format Typically, we don’t need all the information in the access log, so we can often simplify it to get what we need. The following command simplifies the access log format to only display basic information:\nistioctl install -y --set profile=demo \\  --set meshConfig.accessLogFormat=\u0026#34;[%START_TIME%] \\\u0026#34;%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\\\u0026#34; %RESPONSE_CODE%\\n\u0026#34; After simplifying the access log format, we found that the QPS increased from 5.7K to 5.9K. When executing the on-CPU profiling again, the CPU usage of log formatting dropped from 2.4% to 0.7%.\nSimplifying the log format helped us to improve the performance.\nOff-CPU Profiling Off-CPU profiling is suitable for performance issues that are not caused by high CPU usage. For example, when there are too many threads in one service, using off-CPU profiling could reveal which threads spend more time context switching.\nWe provide data aggregation in two dimensions:\n Switch count: The number of times a thread switches context. When the thread returns to the CPU, it completes one context switch. A thread stack with a higher switch count spends more time context switching. Switch duration: The time it takes a thread to switch the context. A thread stack with a higher switch duration spends more time off-CPU.  Write Access Log Enable Write Using the same environment and settings as before in the on-CPU test, we performed off-CPU profiling. As shown below, we found that access log writes accounted for about 28% of the total context switches. The \u0026ldquo;__write\u0026rdquo; shown below also indicates that this method is the Linux kernel method.\nDisable Write SkyWalking implements Envoy\u0026rsquo;s Access Log Service (ALS) feature which allows us to send access logs to the SkyWalking Observability Analysis Platform (OAP) using the gRPC protocol. Even by disabling the access logging, we can still use ALS to capture/aggregate the logs. We\u0026rsquo;ve disabled writing to the access log using the following command:\nistioctl install -y --set profile=demo --set meshConfig.accessLogFile=\u0026#34;\u0026#34; After disabling the Access Log feature, we performed the off-CPU profiling. File writing entries have disappeared as shown in the figure below. Envoy throughput also increased from 5.7K to 5.9K.\nConclusion In this article, we\u0026rsquo;ve examined the insights Apache Skywalking\u0026rsquo;s Trace Profiling can give us and how much more can be achieved with eBPF profiling. All of these features are implemented in skywalking-rover. In addition to on- and off-CPU profiling, you will also find the following features:\n Continuous profiling, helps you automatically profile without manual intervention. For example, when Rover detects that the CPU exceeds a configurable threshold, it automatically executes the on-CPU profiling task. More profiling types to enrich usage scenarios, such as network, and memory profiling.  ","excerpt":"Pinpoint Service Mesh Critical Performance Impact by using eBPF Background Apache SkyWalking …","ref":"/docs/main/v9.7.0/en/concepts-and-designs/ebpf-cpu-profiling/","title":"Pinpoint Service Mesh Critical Performance Impact by using eBPF"},{"body":"Pipe Plugins The pipe plugin configurations contain a series of pipe configuration. Each pipe configuration has 5 parts, which are common_config, gatherer, processor and the sender.\ncommon_config    Config Description     pipe_name The unique collect space name.    Gatherer The gatherer has 2 roles, which are the receiver and fetcher.\nReceiver Role    Config Description     server_name The server name in the sharing pipe, which would be used in the receiver plugin.   receiver The receiver configuration. Please read the doc to find all receiver plugins.   queue The queue buffers the input telemetry data. Please read the doc to find all queue plugins.    Fetcher Role    Config Description     fetch_interval The time interval between two fetch operations. The time unit is millisecond.   fetcher The fetcher configuration. Please read the doc to find all fetcher plugins.   queue The queue buffers the input telemetry data. Please read the doc to find all queue plugins.    processor The filter configuration. Please read the doc to find all filter plugins.\nsender    Config Description     flush_time The time interval between two flush operations. And the time unit is millisecond.   max_buffer_size The maximum buffer elements.   min_flush_events The minimum flush elements.   client_name The client name used in the forwarders of the sharing pipe.   forwarders The forwarder plugin list. Please read the doc to find all forwarders plugins.   fallbacker The fallbacker plugin. Please read the doc to find all fallbacker plugins.    Example pipes:- common_config:pipe_name:pipe1gatherer:server_name:\u0026#34;grpc-server\u0026#34;receiver:plugin_name:\u0026#34;grpc-native-log-receiver\u0026#34;queue:plugin_name:\u0026#34;mmap-queue\u0026#34;segment_size:${SATELLITE_MMAP_QUEUE_SIZE:524288}max_in_mem_segments:${SATELLITE_MMAP_QUEUE_MAX_IN_MEM_SEGMENTS:6}queue_dir:\u0026#34;pipe1-log-grpc-receiver-queue\u0026#34;processor:filters:sender:fallbacker:plugin_name:none-fallbackerflush_time:${SATELLITE_PIPE1_SENDER_FLUSH_TIME:1000}max_buffer_size:${SATELLITE_PIPE1_SENDER_MAX_BUFFER_SIZE:200}min_flush_events:${SATELLITE_PIPE1_SENDER_MIN_FLUSH_EVENTS:100}client_name:kafka-clientforwarders:- plugin_name:native-log-kafka-forwardertopic:${SATELLITE_NATIVELOG-TOPIC:log-topic}","excerpt":"Pipe Plugins The pipe plugin configurations contain a series of pipe configuration. Each pipe …","ref":"/docs/skywalking-satellite/latest/en/setup/configuration/pipe-plugins/","title":"Pipe Plugins"},{"body":"Pipe Plugins The pipe plugin configurations contain a series of pipe configuration. Each pipe configuration has 5 parts, which are common_config, gatherer, processor and the sender.\ncommon_config    Config Description     pipe_name The unique collect space name.    Gatherer The gatherer has 2 roles, which are the receiver and fetcher.\nReceiver Role    Config Description     server_name The server name in the sharing pipe, which would be used in the receiver plugin.   receiver The receiver configuration. Please read the doc to find all receiver plugins.   queue The queue buffers the input telemetry data. Please read the doc to find all queue plugins.    Fetcher Role    Config Description     fetch_interval The time interval between two fetch operations. The time unit is millisecond.   fetcher The fetcher configuration. Please read the doc to find all fetcher plugins.   queue The queue buffers the input telemetry data. Please read the doc to find all queue plugins.    processor The filter configuration. Please read the doc to find all filter plugins.\nsender    Config Description     flush_time The time interval between two flush operations. And the time unit is millisecond.   max_buffer_size The maximum buffer elements.   min_flush_events The minimum flush elements.   client_name The client name used in the forwarders of the sharing pipe.   forwarders The forwarder plugin list. Please read the doc to find all forwarders plugins.   fallbacker The fallbacker plugin. Please read the doc to find all fallbacker plugins.    Example pipes:- common_config:pipe_name:pipe1gatherer:server_name:\u0026#34;grpc-server\u0026#34;receiver:plugin_name:\u0026#34;grpc-native-log-receiver\u0026#34;queue:plugin_name:\u0026#34;mmap-queue\u0026#34;segment_size:${SATELLITE_MMAP_QUEUE_SIZE:524288}max_in_mem_segments:${SATELLITE_MMAP_QUEUE_MAX_IN_MEM_SEGMENTS:6}queue_dir:\u0026#34;pipe1-log-grpc-receiver-queue\u0026#34;processor:filters:sender:fallbacker:plugin_name:none-fallbackerflush_time:${SATELLITE_PIPE1_SENDER_FLUSH_TIME:1000}max_buffer_size:${SATELLITE_PIPE1_SENDER_MAX_BUFFER_SIZE:200}min_flush_events:${SATELLITE_PIPE1_SENDER_MIN_FLUSH_EVENTS:100}client_name:kafka-clientforwarders:- plugin_name:native-log-kafka-forwardertopic:${SATELLITE_NATIVELOG-TOPIC:log-topic}","excerpt":"Pipe Plugins The pipe plugin configurations contain a series of pipe configuration. Each pipe …","ref":"/docs/skywalking-satellite/next/en/setup/configuration/pipe-plugins/","title":"Pipe Plugins"},{"body":"Pipe Plugins The pipe plugin configurations contain a series of pipe configuration. Each pipe configuration has 5 parts, which are common_config, gatherer, processor and the sender.\ncommon_config    Config Description     pipe_name The unique collect space name.    Gatherer The gatherer has 2 roles, which are the receiver and fetcher.\nReceiver Role    Config Description     server_name The server name in the sharing pipe, which would be used in the receiver plugin.   receiver The receiver configuration. Please read the doc to find all receiver plugins.   queue The queue buffers the input telemetry data. Please read the doc to find all queue plugins.    Fetcher Role    Config Description     fetch_interval The time interval between two fetch operations. The time unit is millisecond.   fetcher The fetcher configuration. Please read the doc to find all fetcher plugins.   queue The queue buffers the input telemetry data. Please read the doc to find all queue plugins.    processor The filter configuration. Please read the doc to find all filter plugins.\nsender    Config Description     flush_time The time interval between two flush operations. And the time unit is millisecond.   max_buffer_size The maximum buffer elements.   min_flush_events The minimum flush elements.   client_name The client name used in the forwarders of the sharing pipe.   forwarders The forwarder plugin list. Please read the doc to find all forwarders plugins.   fallbacker The fallbacker plugin. Please read the doc to find all fallbacker plugins.    Example pipes:- common_config:pipe_name:pipe1gatherer:server_name:\u0026#34;grpc-server\u0026#34;receiver:plugin_name:\u0026#34;grpc-native-log-receiver\u0026#34;queue:plugin_name:\u0026#34;mmap-queue\u0026#34;segment_size:${SATELLITE_MMAP_QUEUE_SIZE:524288}max_in_mem_segments:${SATELLITE_MMAP_QUEUE_MAX_IN_MEM_SEGMENTS:6}queue_dir:\u0026#34;pipe1-log-grpc-receiver-queue\u0026#34;processor:filters:sender:fallbacker:plugin_name:none-fallbackerflush_time:${SATELLITE_PIPE1_SENDER_FLUSH_TIME:1000}max_buffer_size:${SATELLITE_PIPE1_SENDER_MAX_BUFFER_SIZE:200}min_flush_events:${SATELLITE_PIPE1_SENDER_MIN_FLUSH_EVENTS:100}client_name:kafka-clientforwarders:- plugin_name:native-log-kafka-forwardertopic:${SATELLITE_NATIVELOG-TOPIC:log-topic}","excerpt":"Pipe Plugins The pipe plugin configurations contain a series of pipe configuration. Each pipe …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/configuration/pipe-plugins/","title":"Pipe Plugins"},{"body":"Plugin automatic test framework The plugin test framework is designed to verify the function and compatibility of plugins. As there are dozens of plugins and hundreds of versions that need to be verified, it is impossible to do it manually. The test framework uses container-based tech stack and requires a set of real services with the agents installed. Then, the test mock OAP backend runs to check the segments data sent from agents.\nEvery plugin maintained in the main repo requires corresponding test cases as well as matching versions in the supported list doc.\nEnvironment Requirements  MacOS/Linux JDK 8+ Docker Docker Compose  Case Base Image Introduction The test framework provides JVM-container and Tomcat-container base images including JDK8 and JDK17. You can choose the best one for your test case. If both are suitable for your case, JVM-container is preferred.\nJVM-container Image Introduction JVM-container uses eclipse-temurin:8-jdk as the base image. JVM-container supports JDK8 and JDK17 as well in CI, which inherits eclipse-temurin:8-jdk and eclipse-temurin:17-jdk. It is supported to custom the base Java docker image by specify base_image_java. The test case project must be packaged as project-name.zip, including startup.sh and uber jar, by using mvn clean package.\nTake the following test projects as examples:\n sofarpc-scenario is a single project case. webflux-scenario is a case including multiple projects. jdk17-with-gson-scenario is a single project case with JDK17.  Tomcat-container Image Introduction Tomcat-container uses tomcat:8.5-jdk8-openjdk, tomcat:8.5-jdk17-openjdk as the base image. It is supported to custom the base Tomcat docker image by specify base_image_tomcat. The test case project must be packaged as project-name.war by using mvn package.\nTake the following test project as an example\n spring-4.3.x-scenario  Test project hierarchical structure The test case is an independent maven project, and it must be packaged as a war tar ball or zip file, depending on the chosen base image. Also, two external accessible endpoints usually two URLs) are required.\nAll test case codes should be in the org.apache.skywalking.apm.testcase.* package. If there are some codes expected to be instrumented, then the classes could be in the test.org.apache.skywalking.apm.testcase.* package.\nJVM-container test project hierarchical structure\n[plugin-scenario] |- [bin] |- startup.sh |- [config] |- expectedData.yaml |- [src] |- [main] |- ... |- [resource] |- log4j2.xml |- pom.xml |- configuration.yml |- support-version.list [] = directory Tomcat-container test project hierarchical structure\n[plugin-scenario] |- [config] |- expectedData.yaml |- [src] |- [main] |- ... |- [resource] |- log4j2.xml |- [webapp] |- [WEB-INF] |- web.xml |- pom.xml |- configuration.yml |- support-version.list [] = directory Test case configuration files The following files are required in every test case.\n   File Name Descriptions     configuration.yml Declare the basic case information, including case name, entrance endpoints, mode, and dependencies.   expectedData.yaml Describe the expected segmentItems, meterItems or logItems.   support-version.list List the target versions for this case.   startup.sh JVM-container only. This is not required when using Tomcat-container.    * support-version.list format requires every line for a single version (contains only the last version number of each minor version). You may use # to comment out this version.\nconfiguration.yml    Field description     type Image type, options, jvm, or tomcat. Required.   entryService The entrance endpoint (URL) for test case access. Required. (HTTP Method: GET)   healthCheck The health check endpoint (URL) for test case access. Required. (HTTP Method: HEAD)   startScript Path of the start up script. Required in type: jvm only.   runningMode Running mode with the optional plugin, options, default(default), with_optional, or with_bootstrap.   withPlugins Plugin selector rule, e.g.:apm-spring-annotation-plugin-*.jar. Required for runningMode=with_optional or runningMode=with_bootstrap.   environment Same as docker-compose#environment.   depends_on Same as docker-compose#depends_on.   dependencies Same as docker-compose#services, image, links, hostname, command, environment and depends_on are supported.    Note:, docker-compose activates only when dependencies is blank.\nrunningMode option description.\n   Option description     default Activate all plugins in plugin folder like the official distribution agent.   with_optional Activate default and plugins in optional-plugin by the give selector.   with_bootstrap Activate default and plugins in bootstrap-plugin by the give selector.    with_optional/with_bootstrap supports multiple selectors, separated by ;.\nFile Format\ntype: entryService: healthCheck: startScript: runningMode: withPlugins: environment: ... depends_on: ... dependencies: service1: image: hostname: expose: ... environment: ... depends_on: ... links: ... entrypoint: ... healthcheck: ...  dependencies support docker compose healthcheck. But the format is a little different. We need to have - as the start of every config item, and describe it as a string line.  For example, in the official document, the health check is:\nhealthcheck:test:[\u0026#34;CMD\u0026#34;,\u0026#34;curl\u0026#34;,\u0026#34;-f\u0026#34;,\u0026#34;http://localhost\u0026#34;]interval:1m30stimeout:10sretries:3start_period:40sHere you should write:\nhealthcheck:- \u0026#39;test:[\u0026#34;CMD\u0026#34;,\u0026#34;curl\u0026#34;,\u0026#34;-f\u0026#34;,\u0026#34;http://localhost\u0026#34;]\u0026#39;- \u0026#34;interval: 1m30s\u0026#34;- \u0026#34;timeout: 10s\u0026#34;- \u0026#34;retries: 3\u0026#34;- \u0026#34;start_period: 40s\u0026#34;In some cases, the dependency service (usually a third-party server like the SolrJ server) is required to keep the same version as the client lib version, which is defined as ${test.framework.version} in pom. You may use ${CASE_SERVER_IMAGE_VERSION} as the version number, which will be changed in the test for each version.\n It does not support resource related configurations, such as volumes, ports, and ulimits. The reason for this is that in test scenarios, no mapping is required for any port to the host VM, or to mount any folder.\n Take the following test cases as examples:\n dubbo-2.7.x with JVM-container jetty with JVM-container gateway with runningMode canal with docker-compose  expectedData.yaml Operator for number\n   Operator Description     nq Not equal   eq Equal(default)   ge Greater than or equal   gt Greater than    Operator for String\n   Operator Description     not null Not null   not blank Not blank ,it\u0026rsquo;s recommended for String type field as the default value maybe blank string, such as span tags   null Null or empty String   eq Equal(default)   start with Tests if this string starts with the specified prefix. DO NOT use it with meterItem tags value   end with Tests if this string ends with the specified suffix. DO NOT use it with meterItem tags value    Expected Data Format Of The Segment\nsegmentItems:- serviceName:SERVICE_NAME(string)segmentSize:SEGMENT_SIZE(int)segments:- segmentId:SEGMENT_ID(string)spans:...   Field Description     serviceName Service Name.   segmentSize The number of segments is expected.   segmentId Trace ID.   spans Segment span list. In the next section, you will learn how to describe each span.    Expected Data Format Of The Span\nNote: The order of span list should follow the order of the span finish time.\noperationName:OPERATION_NAME(string)parentSpanId:PARENT_SPAN_ID(int)spanId:SPAN_ID(int)startTime:START_TIME(int)endTime:END_TIME(int)isError: IS_ERROR(string:true,false)spanLayer: SPAN_LAYER(string:DB, RPC_FRAMEWORK, HTTP, MQ, CACHE)spanType: SPAN_TYPE(string:Exit, Entry, Local)componentId:COMPONENT_ID(int)tags:- {key: TAG_KEY(string), value:TAG_VALUE(string)}...logs:- {key: LOG_KEY(string), value:LOG_VALUE(string)}...peer:PEER(string)refs:- {traceId:TRACE_ID(string),parentTraceSegmentId:PARENT_TRACE_SEGMENT_ID(string),parentSpanId:PARENT_SPAN_ID(int),parentService:PARENT_SERVICE(string),parentServiceInstance:PARENT_SERVICE_INSTANCE(string),parentEndpoint:PARENT_ENDPOINT_NAME(string),networkAddress:NETWORK_ADDRESS(string),refType: REF_TYPE(string:CrossProcess, CrossThread)}...   Field Description     operationName Span Operation Name.   parentSpanId Parent span ID. Note: The parent span ID of the first span should be -1.   spanId Span ID. Note: Start from 0.   startTime Span start time. It is impossible to get the accurate time, not 0 should be enough.   endTime Span finish time. It is impossible to get the accurate time, not 0 should be enough.   isError Span status, true or false.   componentId Component id for your plugin.   tags Span tag list. Notice, Keep in the same order as the plugin coded.   logs Span log list. Notice, Keep in the same order as the plugin coded.   SpanLayer Options, DB, RPC_FRAMEWORK, HTTP, MQ, CACHE.   SpanType Span type, options, Exit, Entry or Local.   peer Remote network address, IP + port mostly. For exit span, this should be required.    The verify description for SegmentRef\n   Field Description     traceId    parentTraceSegmentId Parent SegmentId, pointing to the segment id in the parent segment.   parentSpanId Parent SpanID, pointing to the span id in the parent segment.   parentService The service of parent/downstream service name.   parentServiceInstance The instance of parent/downstream service instance name.   parentEndpoint The endpoint of parent/downstream service.   networkAddress The peer value of parent exit span.   refType Ref type, options, CrossProcess or CrossThread.    Expected Data Format Of The Meter Items\nmeterItems:- serviceName:SERVICE_NAME(string)meterSize:METER_SIZE(int)meters:- ...   Field Description     serviceName Service Name.   meterSize The number of meters is expected.   meters meter list. Follow the next section to see how to describe every meter.    Expected Data Format Of The Meter\nmeterId:name:NAME(string)tags:- {name: TAG_NAME(string), value:TAG_VALUE(string)}singleValue:SINGLE_VALUE(double)histogramBuckets:- HISTOGRAM_BUCKET(double)...The verify description for MeterId\n   Field Description     name meter name.   tags meter tags.   tags.name tag name.   tags.value tag value.   singleValue counter or gauge value. Using condition operate of the number to validate, such as gt, ge. If current meter is histogram, don\u0026rsquo;t need to write this field.   histogramBuckets histogram bucket. The bucket list must be ordered. The tool assert at least one bucket of the histogram having nonzero count. If current meter is counter or gauge, don\u0026rsquo;t need to write this field.    Expected Data Format Of The Log Items\nlogItems:- serviceName:SERVICE_NAME(string)logSize:LOG_SIZE(int)logs:- ...   Field Description     serviceName Service Name.   logSize The number of logs is expected.   logs log list. Follow the next section to see how to describe every log.    Expected Data Format Of The Log\ntimestamp:TIMESTAMP_VALUE(int)endpoint:ENDPOINT_VALUE(int)traceContext:traceId:TRACE_ID_VALUE(string)traceSegmentId:TRACE_SEGMENT_ID_VALUE(string)spanId:SPAN_ID_VALUE(int)body:type:TYPE_VALUE(string)content:# Choose one of three (text, json or yaml)text:TEXT_VALUE(string)# json: JSON_VALUE(string)# yaml: YAML_VALUE(string)tags:data:- key:TAG_KEY(string)value:TAG_VALUE(string)...layer:LAYER_VALUE(string)...The verify description for Log\n   Field Description     timestamp log timestamp.   endpoint log endpoint.   traceContext.traceId log associated trace id.   traceContext.traceSegmentId log associated trace segment id.   traceContext.spanId log associated span id.   body.type log body type.   body.content log content, the sub field choose one of three (text, json or yaml).   tags.data log tags, key value pairs.   layer log layer.    startup.sh This script provide a start point to JVM based service, most of them starts by a java -jar, with some variables. The following system environment variables are available in the shell.\n   Variable Description     agent_opts Agent plugin opts, check the detail in plugin doc or the same opt added in this PR.   SCENARIO_NAME Service name. Default same as the case folder name   SCENARIO_VERSION Version   SCENARIO_ENTRY_SERVICE Entrance URL to access this service   SCENARIO_HEALTH_CHECK_URL Health check URL     ${agent_opts} is required to add into your java -jar command, which including the parameter injected by test framework, and make agent installed. All other parameters should be added after ${agent_opts}.\n The test framework will set the service name as the test case folder name by default, but in some cases, there are more than one test projects are required to run in different service codes, could set it explicitly like the following example.\nExample\nhome=\u0026#34;$(cd \u0026#34;$(dirname $0)\u0026#34;; pwd)\u0026#34; java -jar ${agent_opts} \u0026#34;-Dskywalking.agent.service_name=jettyserver-scenario\u0026#34; ${home}/../libs/jettyserver-scenario.jar \u0026amp; sleep 1 java -jar ${agent_opts} \u0026#34;-Dskywalking.agent.service_name=jettyclient-scenario\u0026#34; ${home}/../libs/jettyclient-scenario.jar \u0026amp;  Only set this or use other skywalking options when it is really necessary.\n Take the following test cases as examples\n undertow webflux  Best Practices How To Use The Archetype To Create A Test Case Project We provided archetypes and a script to make creating a project easier. It creates a completed project of a test case. So that we only need to focus on cases. First, we can use followed command to get usage about the script.\nbash ${SKYWALKING_HOME}/test/plugin/generator.sh\nThen, runs and generates a project, named by scenario_name, in ./scenarios.\nRecommendations for pom \u0026lt;properties\u0026gt; \u0026lt;!-- Provide and use this property in the pom. --\u0026gt; \u0026lt;!-- This version should match the library version, --\u0026gt; \u0026lt;!-- in this case, http components lib version 4.3. --\u0026gt; \u0026lt;test.framework.version\u0026gt;4.3\u0026lt;/test.framework.version\u0026gt; \u0026lt;/properties\u0026gt; \u0026lt;dependencies\u0026gt; \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.httpcomponents\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;httpclient\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${test.framework.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; ... \u0026lt;/dependencies\u0026gt; \u0026lt;build\u0026gt; \u0026lt;!-- Set the package final name as same as the test case folder case. --\u0026gt; \u0026lt;finalName\u0026gt;httpclient-4.3.x-scenario\u0026lt;/finalName\u0026gt; .... \u0026lt;/build\u0026gt; How To Implement Heartbeat Service Heartbeat service is designed for checking the service available status. This service is a simple HTTP service, returning 200 means the target service is ready. Then the traffic generator will access the entry service and verify the expected data. User should consider to use this service to detect such as whether the dependent services are ready, especially when dependent services are database or cluster.\nNotice, because heartbeat service could be traced fully or partially, so, segmentSize in expectedData.yaml should use ge as the operator, and don\u0026rsquo;t include the segments of heartbeat service in the expected segment data.\nThe example Process of Writing Tracing Expected Data Expected data file, expectedData.yaml, include SegmentItems part.\nWe are using the HttpClient plugin to show how to write the expected data.\nThere are two key points of testing\n Whether is HttpClient span created. Whether the ContextCarrier created correctly, and propagates across processes.  +-------------+ +------------------+ +-------------------------+ | Browser | | Case Servlet | | ContextPropagateServlet | | | | | | | +-----|-------+ +---------|--------+ +------------|------------+ | | | | | | | WebHttp +-+ | +------------------------\u0026gt; |-| HttpClient +-+ | |--------------------------------\u0026gt; |-| | |-| |-| | |-| |-| | |-| \u0026lt;--------------------------------| | |-| +-+ | \u0026lt;--------------------------| | | +-+ | | | | | | | | | | | | | + + + segmentItems By following the flow of HttpClient case, there should be two segments created.\n Segment represents the CaseServlet access. Let\u0026rsquo;s name it as SegmentA. Segment represents the ContextPropagateServlet access. Let\u0026rsquo;s name it as SegmentB.  segmentItems:- serviceName:httpclient-casesegmentSize:ge 2# Could have more than one health check segments, because, the dependency is not standby.Because Tomcat plugin is a default plugin of SkyWalking, so, in SegmentA, there are two spans\n Tomcat entry span HttpClient exit span  SegmentA span list should like following\n- segmentId:not nullspans:- operationName:/httpclient-case/case/context-propagateparentSpanId:0spanId:1startTime:nq 0endTime:nq 0isError:falsespanLayer:HttpspanType:ExitcomponentId:eq 2tags:- {key: url, value:\u0026#39;http://127.0.0.1:8080/httpclient-case/case/context-propagate\u0026#39;}- {key: http.method, value:GET}- {key: http.status_code, value:\u0026#39;200\u0026#39;}logs:[]peer:127.0.0.1:8080- operationName:/httpclient-case/case/httpclientparentSpanId:-1spanId:0startTime:nq 0endTime:nq 0spanLayer:HttpisError:falsespanType:EntrycomponentId:1tags:- {key: url, value:\u0026#39;http://localhost:{SERVER_OUTPUT_PORT}/httpclient-case/case/httpclient\u0026#39;}- {key: http.method, value:GET}- {key: http.status_code, value:\u0026#39;200\u0026#39;}logs:[]peer:nullSegmentB should only have one Tomcat entry span, but includes the Ref pointing to SegmentA.\nSegmentB span list should like following\n- segmentId:not nullspans:-operationName:/httpclient-case/case/context-propagateparentSpanId:-1spanId:0tags:- {key: url, value:\u0026#39;http://127.0.0.1:8080/httpclient-case/case/context-propagate\u0026#39;}- {key: http.method, value:GET}- {key: http.status_code, value:\u0026#39;200\u0026#39;}logs:[]startTime:nq 0endTime:nq 0spanLayer:HttpisError:falsespanType:EntrycomponentId:1peer:nullrefs:- {parentEndpoint: /httpclient-case/case/httpclient, networkAddress: \u0026#39;localhost:8080\u0026#39;, refType: CrossProcess, parentSpanId: 1, parentTraceSegmentId: not null, parentServiceInstance: not null, parentService: not null, traceId:not null}The example Process of Writing Meter Expected Data Expected data file, expectedData.yaml, include MeterItems part.\nWe are using the toolkit plugin to demonstrate how to write the expected data. When write the meter plugin, the expected data file keeps the same.\nThere is one key point of testing\n Build a meter and operate it.  Such as Counter:\nMeterFactory.counter(\u0026#34;test_counter\u0026#34;).tag(\u0026#34;ck1\u0026#34;, \u0026#34;cv1\u0026#34;).build().increment(1d); MeterFactory.histogram(\u0026#34;test_histogram\u0026#34;).tag(\u0026#34;hk1\u0026#34;, \u0026#34;hv1\u0026#34;).steps(1d, 5d, 10d).build().addValue(2d); +-------------+ +------------------+ | Plugin | | Agent core | | | | | +-----|-------+ +---------|--------+ | | | | | Build or operate +-+ +------------------------\u0026gt; |-| | |-] | |-| | |-| | |-| | |-| | \u0026lt;--------------------------| | +-+ | | | | | | | | + + meterItems By following the flow of the toolkit case, there should be two meters created.\n Meter test_counter created from MeterFactory#counter. Let\u0026rsquo;s name it as MeterA. Meter test_histogram created from MeterFactory#histogram. Let\u0026rsquo;s name it as MeterB.  meterItems:- serviceName:toolkit-casemeterSize:2They\u0026rsquo;re showing two kinds of meter, MeterA has a single value, MeterB has a histogram value.\nMeterA should like following, counter and gauge use the same data format.\n- meterId:name:test_countertags:- {name: ck1, value:cv1}singleValue:gt 0MeterB should like following.\n- meterId:name:test_histogramtags:- {name: hk1, value:hv1}histogramBuckets:- 0.0- 1.0- 5.0- 10.0Local Test and Pull Request To The Upstream First of all, the test case project could be compiled successfully, with right project structure and be able to deploy. The developer should test the start script could run in Linux/MacOS, and entryService/health services are able to provide the response.\nYou could run test by using following commands\ncd ${SKYWALKING_HOME} bash ./test/plugin/run.sh -f ${scenario_name} Notice,if codes in ./apm-sniffer have been changed, no matter because your change or git update, please recompile the skywalking-agent. Because the test framework will use the existing skywalking-agent folder, rather than recompiling it every time.\nUse ${SKYWALKING_HOME}/test/plugin/run.sh -h to know more command options.\nIf the local test passed, then you could add it to .github/workflows/plugins-test.\u0026lt;n\u0026gt;.yaml file, which will drive the tests running on the GitHub Actions of official SkyWalking repository. Based on your plugin\u0026rsquo;s name, please add the test case into file .github/workflows/plugins-test.\u0026lt;n\u0026gt;.yaml, by alphabetical orders.\nEvery test case is a GitHub Actions Job. Please use the scenario directory name as the case name, mostly you\u0026rsquo;ll just need to decide which file (plugins-test.\u0026lt;n\u0026gt;.yaml) to add your test case, and simply put one line (as follows) in it, take the existed cases as examples. You can run python3 tools/select-group.py to see which file contains the least cases and add your cases into it, in order to balance the running time of each group.\nIf a test case required to run in JDK 17 environment, please add you test case into file plugins-jdk17-test.\u0026lt;n\u0026gt;.yaml. If a test case required to run in JDK 21 environment, please add you test case into file plugins-jdk21-test.\u0026lt;n\u0026gt;.yaml.\njobs:PluginsTest:name:Pluginruns-on:ubuntu-latesttimeout-minutes:90strategy:fail-fast:truematrix:case:# ...- \u0026lt;your scenario test directory name\u0026gt;# ...","excerpt":"Plugin automatic test framework The plugin test framework is designed to verify the function and …","ref":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/plugin-test/","title":"Plugin automatic test framework"},{"body":"Plugin automatic test framework The plugin test framework is designed to verify the function and compatibility of plugins. As there are dozens of plugins and hundreds of versions that need to be verified, it is impossible to do it manually. The test framework uses container-based tech stack and requires a set of real services with the agents installed. Then, the test mock OAP backend runs to check the segments data sent from agents.\nEvery plugin maintained in the main repo requires corresponding test cases as well as matching versions in the supported list doc.\nEnvironment Requirements  MacOS/Linux JDK 8+ Docker Docker Compose  Case Base Image Introduction The test framework provides JVM-container and Tomcat-container base images including JDK8 and JDK17. You can choose the best one for your test case. If both are suitable for your case, JVM-container is preferred.\nJVM-container Image Introduction JVM-container uses eclipse-temurin:8-jdk as the base image. JVM-container supports JDK8 and JDK17 as well in CI, which inherits eclipse-temurin:8-jdk and eclipse-temurin:17-jdk. It is supported to custom the base Java docker image by specify base_image_java. The test case project must be packaged as project-name.zip, including startup.sh and uber jar, by using mvn clean package.\nTake the following test projects as examples:\n sofarpc-scenario is a single project case. webflux-scenario is a case including multiple projects. jdk17-with-gson-scenario is a single project case with JDK17.  Tomcat-container Image Introduction Tomcat-container uses tomcat:8.5-jdk8-openjdk, tomcat:8.5-jdk17-openjdk as the base image. It is supported to custom the base Tomcat docker image by specify base_image_tomcat. The test case project must be packaged as project-name.war by using mvn package.\nTake the following test project as an example\n spring-4.3.x-scenario  Test project hierarchical structure The test case is an independent maven project, and it must be packaged as a war tar ball or zip file, depending on the chosen base image. Also, two external accessible endpoints usually two URLs) are required.\nAll test case codes should be in the org.apache.skywalking.apm.testcase.* package. If there are some codes expected to be instrumented, then the classes could be in the test.org.apache.skywalking.apm.testcase.* package.\nJVM-container test project hierarchical structure\n[plugin-scenario] |- [bin] |- startup.sh |- [config] |- expectedData.yaml |- [src] |- [main] |- ... |- [resource] |- log4j2.xml |- pom.xml |- configuration.yml |- support-version.list [] = directory Tomcat-container test project hierarchical structure\n[plugin-scenario] |- [config] |- expectedData.yaml |- [src] |- [main] |- ... |- [resource] |- log4j2.xml |- [webapp] |- [WEB-INF] |- web.xml |- pom.xml |- configuration.yml |- support-version.list [] = directory Test case configuration files The following files are required in every test case.\n   File Name Descriptions     configuration.yml Declare the basic case information, including case name, entrance endpoints, mode, and dependencies.   expectedData.yaml Describe the expected segmentItems, meterItems or logItems.   support-version.list List the target versions for this case.   startup.sh JVM-container only. This is not required when using Tomcat-container.    * support-version.list format requires every line for a single version (contains only the last version number of each minor version). You may use # to comment out this version.\nconfiguration.yml    Field description     type Image type, options, jvm, or tomcat. Required.   entryService The entrance endpoint (URL) for test case access. Required. (HTTP Method: GET)   healthCheck The health check endpoint (URL) for test case access. Required. (HTTP Method: HEAD)   startScript Path of the start up script. Required in type: jvm only.   runningMode Running mode with the optional plugin, options, default(default), with_optional, or with_bootstrap.   withPlugins Plugin selector rule, e.g.:apm-spring-annotation-plugin-*.jar. Required for runningMode=with_optional or runningMode=with_bootstrap.   environment Same as docker-compose#environment.   depends_on Same as docker-compose#depends_on.   dependencies Same as docker-compose#services, image, links, hostname, command, environment and depends_on are supported.    Note:, docker-compose activates only when dependencies is blank.\nrunningMode option description.\n   Option description     default Activate all plugins in plugin folder like the official distribution agent.   with_optional Activate default and plugins in optional-plugin by the give selector.   with_bootstrap Activate default and plugins in bootstrap-plugin by the give selector.    with_optional/with_bootstrap supports multiple selectors, separated by ;.\nFile Format\ntype: entryService: healthCheck: startScript: runningMode: withPlugins: environment: ... depends_on: ... dependencies: service1: image: hostname: expose: ... environment: ... depends_on: ... links: ... entrypoint: ... healthcheck: ...  dependencies support docker compose healthcheck. But the format is a little different. We need to have - as the start of every config item, and describe it as a string line.  For example, in the official document, the health check is:\nhealthcheck:test:[\u0026#34;CMD\u0026#34;,\u0026#34;curl\u0026#34;,\u0026#34;-f\u0026#34;,\u0026#34;http://localhost\u0026#34;]interval:1m30stimeout:10sretries:3start_period:40sHere you should write:\nhealthcheck:- \u0026#39;test:[\u0026#34;CMD\u0026#34;,\u0026#34;curl\u0026#34;,\u0026#34;-f\u0026#34;,\u0026#34;http://localhost\u0026#34;]\u0026#39;- \u0026#34;interval: 1m30s\u0026#34;- \u0026#34;timeout: 10s\u0026#34;- \u0026#34;retries: 3\u0026#34;- \u0026#34;start_period: 40s\u0026#34;In some cases, the dependency service (usually a third-party server like the SolrJ server) is required to keep the same version as the client lib version, which is defined as ${test.framework.version} in pom. You may use ${CASE_SERVER_IMAGE_VERSION} as the version number, which will be changed in the test for each version.\n It does not support resource related configurations, such as volumes, ports, and ulimits. The reason for this is that in test scenarios, no mapping is required for any port to the host VM, or to mount any folder.\n Take the following test cases as examples:\n dubbo-2.7.x with JVM-container jetty with JVM-container gateway with runningMode canal with docker-compose  expectedData.yaml Operator for number\n   Operator Description     nq Not equal   eq Equal(default)   ge Greater than or equal   gt Greater than    Operator for String\n   Operator Description     not null Not null   not blank Not blank ,it\u0026rsquo;s recommended for String type field as the default value maybe blank string, such as span tags   null Null or empty String   eq Equal(default)   start with Tests if this string starts with the specified prefix. DO NOT use it with meterItem tags value   end with Tests if this string ends with the specified suffix. DO NOT use it with meterItem tags value    Expected Data Format Of The Segment\nsegmentItems:- serviceName:SERVICE_NAME(string)segmentSize:SEGMENT_SIZE(int)segments:- segmentId:SEGMENT_ID(string)spans:...   Field Description     serviceName Service Name.   segmentSize The number of segments is expected.   segmentId Trace ID.   spans Segment span list. In the next section, you will learn how to describe each span.    Expected Data Format Of The Span\nNote: The order of span list should follow the order of the span finish time.\noperationName:OPERATION_NAME(string)parentSpanId:PARENT_SPAN_ID(int)spanId:SPAN_ID(int)startTime:START_TIME(int)endTime:END_TIME(int)isError: IS_ERROR(string:true,false)spanLayer: SPAN_LAYER(string:DB, RPC_FRAMEWORK, HTTP, MQ, CACHE)spanType: SPAN_TYPE(string:Exit, Entry, Local)componentId:COMPONENT_ID(int)tags:- {key: TAG_KEY(string), value:TAG_VALUE(string)}...logs:- {key: LOG_KEY(string), value:LOG_VALUE(string)}...peer:PEER(string)refs:- {traceId:TRACE_ID(string),parentTraceSegmentId:PARENT_TRACE_SEGMENT_ID(string),parentSpanId:PARENT_SPAN_ID(int),parentService:PARENT_SERVICE(string),parentServiceInstance:PARENT_SERVICE_INSTANCE(string),parentEndpoint:PARENT_ENDPOINT_NAME(string),networkAddress:NETWORK_ADDRESS(string),refType: REF_TYPE(string:CrossProcess, CrossThread)}...   Field Description     operationName Span Operation Name.   parentSpanId Parent span ID. Note: The parent span ID of the first span should be -1.   spanId Span ID. Note: Start from 0.   startTime Span start time. It is impossible to get the accurate time, not 0 should be enough.   endTime Span finish time. It is impossible to get the accurate time, not 0 should be enough.   isError Span status, true or false.   componentId Component id for your plugin.   tags Span tag list. Notice, Keep in the same order as the plugin coded.   logs Span log list. Notice, Keep in the same order as the plugin coded.   SpanLayer Options, DB, RPC_FRAMEWORK, HTTP, MQ, CACHE.   SpanType Span type, options, Exit, Entry or Local.   peer Remote network address, IP + port mostly. For exit span, this should be required.    The verify description for SegmentRef\n   Field Description     traceId    parentTraceSegmentId Parent SegmentId, pointing to the segment id in the parent segment.   parentSpanId Parent SpanID, pointing to the span id in the parent segment.   parentService The service of parent/downstream service name.   parentServiceInstance The instance of parent/downstream service instance name.   parentEndpoint The endpoint of parent/downstream service.   networkAddress The peer value of parent exit span.   refType Ref type, options, CrossProcess or CrossThread.    Expected Data Format Of The Meter Items\nmeterItems:- serviceName:SERVICE_NAME(string)meterSize:METER_SIZE(int)meters:- ...   Field Description     serviceName Service Name.   meterSize The number of meters is expected.   meters meter list. Follow the next section to see how to describe every meter.    Expected Data Format Of The Meter\nmeterId:name:NAME(string)tags:- {name: TAG_NAME(string), value:TAG_VALUE(string)}singleValue:SINGLE_VALUE(double)histogramBuckets:- HISTOGRAM_BUCKET(double)...The verify description for MeterId\n   Field Description     name meter name.   tags meter tags.   tags.name tag name.   tags.value tag value.   singleValue counter or gauge value. Using condition operate of the number to validate, such as gt, ge. If current meter is histogram, don\u0026rsquo;t need to write this field.   histogramBuckets histogram bucket. The bucket list must be ordered. The tool assert at least one bucket of the histogram having nonzero count. If current meter is counter or gauge, don\u0026rsquo;t need to write this field.    Expected Data Format Of The Log Items\nlogItems:- serviceName:SERVICE_NAME(string)logSize:LOG_SIZE(int)logs:- ...   Field Description     serviceName Service Name.   logSize The number of logs is expected.   logs log list. Follow the next section to see how to describe every log.    Expected Data Format Of The Log\ntimestamp:TIMESTAMP_VALUE(int)endpoint:ENDPOINT_VALUE(int)traceContext:traceId:TRACE_ID_VALUE(string)traceSegmentId:TRACE_SEGMENT_ID_VALUE(string)spanId:SPAN_ID_VALUE(int)body:type:TYPE_VALUE(string)content:# Choose one of three (text, json or yaml)text:TEXT_VALUE(string)# json: JSON_VALUE(string)# yaml: YAML_VALUE(string)tags:data:- key:TAG_KEY(string)value:TAG_VALUE(string)...layer:LAYER_VALUE(string)...The verify description for Log\n   Field Description     timestamp log timestamp.   endpoint log endpoint.   traceContext.traceId log associated trace id.   traceContext.traceSegmentId log associated trace segment id.   traceContext.spanId log associated span id.   body.type log body type.   body.content log content, the sub field choose one of three (text, json or yaml).   tags.data log tags, key value pairs.   layer log layer.    startup.sh This script provide a start point to JVM based service, most of them starts by a java -jar, with some variables. The following system environment variables are available in the shell.\n   Variable Description     agent_opts Agent plugin opts, check the detail in plugin doc or the same opt added in this PR.   SCENARIO_NAME Service name. Default same as the case folder name   SCENARIO_VERSION Version   SCENARIO_ENTRY_SERVICE Entrance URL to access this service   SCENARIO_HEALTH_CHECK_URL Health check URL     ${agent_opts} is required to add into your java -jar command, which including the parameter injected by test framework, and make agent installed. All other parameters should be added after ${agent_opts}.\n The test framework will set the service name as the test case folder name by default, but in some cases, there are more than one test projects are required to run in different service codes, could set it explicitly like the following example.\nExample\nhome=\u0026#34;$(cd \u0026#34;$(dirname $0)\u0026#34;; pwd)\u0026#34; java -jar ${agent_opts} \u0026#34;-Dskywalking.agent.service_name=jettyserver-scenario\u0026#34; ${home}/../libs/jettyserver-scenario.jar \u0026amp; sleep 1 java -jar ${agent_opts} \u0026#34;-Dskywalking.agent.service_name=jettyclient-scenario\u0026#34; ${home}/../libs/jettyclient-scenario.jar \u0026amp;  Only set this or use other skywalking options when it is really necessary.\n Take the following test cases as examples\n undertow webflux  Best Practices How To Use The Archetype To Create A Test Case Project We provided archetypes and a script to make creating a project easier. It creates a completed project of a test case. So that we only need to focus on cases. First, we can use followed command to get usage about the script.\nbash ${SKYWALKING_HOME}/test/plugin/generator.sh\nThen, runs and generates a project, named by scenario_name, in ./scenarios.\nRecommendations for pom \u0026lt;properties\u0026gt; \u0026lt;!-- Provide and use this property in the pom. --\u0026gt; \u0026lt;!-- This version should match the library version, --\u0026gt; \u0026lt;!-- in this case, http components lib version 4.3. --\u0026gt; \u0026lt;test.framework.version\u0026gt;4.3\u0026lt;/test.framework.version\u0026gt; \u0026lt;/properties\u0026gt; \u0026lt;dependencies\u0026gt; \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.httpcomponents\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;httpclient\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${test.framework.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; ... \u0026lt;/dependencies\u0026gt; \u0026lt;build\u0026gt; \u0026lt;!-- Set the package final name as same as the test case folder case. --\u0026gt; \u0026lt;finalName\u0026gt;httpclient-4.3.x-scenario\u0026lt;/finalName\u0026gt; .... \u0026lt;/build\u0026gt; How To Implement Heartbeat Service Heartbeat service is designed for checking the service available status. This service is a simple HTTP service, returning 200 means the target service is ready. Then the traffic generator will access the entry service and verify the expected data. User should consider to use this service to detect such as whether the dependent services are ready, especially when dependent services are database or cluster.\nNotice, because heartbeat service could be traced fully or partially, so, segmentSize in expectedData.yaml should use ge as the operator, and don\u0026rsquo;t include the segments of heartbeat service in the expected segment data.\nThe example Process of Writing Tracing Expected Data Expected data file, expectedData.yaml, include SegmentItems part.\nWe are using the HttpClient plugin to show how to write the expected data.\nThere are two key points of testing\n Whether is HttpClient span created. Whether the ContextCarrier created correctly, and propagates across processes.  +-------------+ +------------------+ +-------------------------+ | Browser | | Case Servlet | | ContextPropagateServlet | | | | | | | +-----|-------+ +---------|--------+ +------------|------------+ | | | | | | | WebHttp +-+ | +------------------------\u0026gt; |-| HttpClient +-+ | |--------------------------------\u0026gt; |-| | |-| |-| | |-| |-| | |-| \u0026lt;--------------------------------| | |-| +-+ | \u0026lt;--------------------------| | | +-+ | | | | | | | | | | | | | + + + segmentItems By following the flow of HttpClient case, there should be two segments created.\n Segment represents the CaseServlet access. Let\u0026rsquo;s name it as SegmentA. Segment represents the ContextPropagateServlet access. Let\u0026rsquo;s name it as SegmentB.  segmentItems:- serviceName:httpclient-casesegmentSize:ge 2# Could have more than one health check segments, because, the dependency is not standby.Because Tomcat plugin is a default plugin of SkyWalking, so, in SegmentA, there are two spans\n Tomcat entry span HttpClient exit span  SegmentA span list should like following\n- segmentId:not nullspans:- operationName:/httpclient-case/case/context-propagateparentSpanId:0spanId:1startTime:nq 0endTime:nq 0isError:falsespanLayer:HttpspanType:ExitcomponentId:eq 2tags:- {key: url, value:\u0026#39;http://127.0.0.1:8080/httpclient-case/case/context-propagate\u0026#39;}- {key: http.method, value:GET}- {key: http.status_code, value:\u0026#39;200\u0026#39;}logs:[]peer:127.0.0.1:8080- operationName:/httpclient-case/case/httpclientparentSpanId:-1spanId:0startTime:nq 0endTime:nq 0spanLayer:HttpisError:falsespanType:EntrycomponentId:1tags:- {key: url, value:\u0026#39;http://localhost:{SERVER_OUTPUT_PORT}/httpclient-case/case/httpclient\u0026#39;}- {key: http.method, value:GET}- {key: http.status_code, value:\u0026#39;200\u0026#39;}logs:[]peer:nullSegmentB should only have one Tomcat entry span, but includes the Ref pointing to SegmentA.\nSegmentB span list should like following\n- segmentId:not nullspans:-operationName:/httpclient-case/case/context-propagateparentSpanId:-1spanId:0tags:- {key: url, value:\u0026#39;http://127.0.0.1:8080/httpclient-case/case/context-propagate\u0026#39;}- {key: http.method, value:GET}- {key: http.status_code, value:\u0026#39;200\u0026#39;}logs:[]startTime:nq 0endTime:nq 0spanLayer:HttpisError:falsespanType:EntrycomponentId:1peer:nullrefs:- {parentEndpoint: /httpclient-case/case/httpclient, networkAddress: \u0026#39;localhost:8080\u0026#39;, refType: CrossProcess, parentSpanId: 1, parentTraceSegmentId: not null, parentServiceInstance: not null, parentService: not null, traceId:not null}The example Process of Writing Meter Expected Data Expected data file, expectedData.yaml, include MeterItems part.\nWe are using the toolkit plugin to demonstrate how to write the expected data. When write the meter plugin, the expected data file keeps the same.\nThere is one key point of testing\n Build a meter and operate it.  Such as Counter:\nMeterFactory.counter(\u0026#34;test_counter\u0026#34;).tag(\u0026#34;ck1\u0026#34;, \u0026#34;cv1\u0026#34;).build().increment(1d); MeterFactory.histogram(\u0026#34;test_histogram\u0026#34;).tag(\u0026#34;hk1\u0026#34;, \u0026#34;hv1\u0026#34;).steps(1d, 5d, 10d).build().addValue(2d); +-------------+ +------------------+ | Plugin | | Agent core | | | | | +-----|-------+ +---------|--------+ | | | | | Build or operate +-+ +------------------------\u0026gt; |-| | |-] | |-| | |-| | |-| | |-| | \u0026lt;--------------------------| | +-+ | | | | | | | | + + meterItems By following the flow of the toolkit case, there should be two meters created.\n Meter test_counter created from MeterFactory#counter. Let\u0026rsquo;s name it as MeterA. Meter test_histogram created from MeterFactory#histogram. Let\u0026rsquo;s name it as MeterB.  meterItems:- serviceName:toolkit-casemeterSize:2They\u0026rsquo;re showing two kinds of meter, MeterA has a single value, MeterB has a histogram value.\nMeterA should like following, counter and gauge use the same data format.\n- meterId:name:test_countertags:- {name: ck1, value:cv1}singleValue:gt 0MeterB should like following.\n- meterId:name:test_histogramtags:- {name: hk1, value:hv1}histogramBuckets:- 0.0- 1.0- 5.0- 10.0Local Test and Pull Request To The Upstream First of all, the test case project could be compiled successfully, with right project structure and be able to deploy. The developer should test the start script could run in Linux/MacOS, and entryService/health services are able to provide the response.\nYou could run test by using following commands\ncd ${SKYWALKING_HOME} bash ./test/plugin/run.sh -f ${scenario_name} Notice,if codes in ./apm-sniffer have been changed, no matter because your change or git update, please recompile the skywalking-agent. Because the test framework will use the existing skywalking-agent folder, rather than recompiling it every time.\nUse ${SKYWALKING_HOME}/test/plugin/run.sh -h to know more command options.\nIf the local test passed, then you could add it to .github/workflows/plugins-test.\u0026lt;n\u0026gt;.yaml file, which will drive the tests running on the GitHub Actions of official SkyWalking repository. Based on your plugin\u0026rsquo;s name, please add the test case into file .github/workflows/plugins-test.\u0026lt;n\u0026gt;.yaml, by alphabetical orders.\nEvery test case is a GitHub Actions Job. Please use the scenario directory name as the case name, mostly you\u0026rsquo;ll just need to decide which file (plugins-test.\u0026lt;n\u0026gt;.yaml) to add your test case, and simply put one line (as follows) in it, take the existed cases as examples. You can run python3 tools/select-group.py to see which file contains the least cases and add your cases into it, in order to balance the running time of each group.\nIf a test case required to run in JDK 17 environment, please add you test case into file plugins-jdk17-test.\u0026lt;n\u0026gt;.yaml. If a test case required to run in JDK 21 environment, please add you test case into file plugins-jdk21-test.\u0026lt;n\u0026gt;.yaml.\njobs:PluginsTest:name:Pluginruns-on:ubuntu-latesttimeout-minutes:90strategy:fail-fast:truematrix:case:# ...- \u0026lt;your scenario test directory name\u0026gt;# ...","excerpt":"Plugin automatic test framework The plugin test framework is designed to verify the function and …","ref":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/plugin-test/","title":"Plugin automatic test framework"},{"body":"Plugin automatic test framework The plugin test framework is designed to verify the function and compatibility of plugins. As there are dozens of plugins and hundreds of versions that need to be verified, it is impossible to do it manually. The test framework uses container-based tech stack and requires a set of real services with the agents installed. Then, the test mock OAP backend runs to check the segments data sent from agents.\nEvery plugin maintained in the main repo requires corresponding test cases as well as matching versions in the supported list doc.\nEnvironment Requirements  MacOS/Linux JDK 8+ Docker Docker Compose  Case Base Image Introduction The test framework provides JVM-container and Tomcat-container base images including JDK8 and JDK17. You can choose the best one for your test case. If both are suitable for your case, JVM-container is preferred.\nJVM-container Image Introduction JVM-container uses eclipse-temurin:8-jdk as the base image. JVM-container supports JDK8 and JDK17 as well in CI, which inherits eclipse-temurin:8-jdk and eclipse-temurin:17-jdk. It is supported to custom the base Java docker image by specify base_image_java. The test case project must be packaged as project-name.zip, including startup.sh and uber jar, by using mvn clean package.\nTake the following test projects as examples:\n sofarpc-scenario is a single project case. webflux-scenario is a case including multiple projects. jdk17-with-gson-scenario is a single project case with JDK17.  Tomcat-container Image Introduction Tomcat-container uses tomcat:8.5-jdk8-openjdk, tomcat:8.5-jdk17-openjdk as the base image. It is supported to custom the base Tomcat docker image by specify base_image_tomcat. The test case project must be packaged as project-name.war by using mvn package.\nTake the following test project as an example\n spring-4.3.x-scenario  Test project hierarchical structure The test case is an independent maven project, and it must be packaged as a war tar ball or zip file, depending on the chosen base image. Also, two external accessible endpoints usually two URLs) are required.\nAll test case codes should be in the org.apache.skywalking.apm.testcase.* package. If there are some codes expected to be instrumented, then the classes could be in the test.org.apache.skywalking.apm.testcase.* package.\nJVM-container test project hierarchical structure\n[plugin-scenario] |- [bin] |- startup.sh |- [config] |- expectedData.yaml |- [src] |- [main] |- ... |- [resource] |- log4j2.xml |- pom.xml |- configuration.yml |- support-version.list [] = directory Tomcat-container test project hierarchical structure\n[plugin-scenario] |- [config] |- expectedData.yaml |- [src] |- [main] |- ... |- [resource] |- log4j2.xml |- [webapp] |- [WEB-INF] |- web.xml |- pom.xml |- configuration.yml |- support-version.list [] = directory Test case configuration files The following files are required in every test case.\n   File Name Descriptions     configuration.yml Declare the basic case information, including case name, entrance endpoints, mode, and dependencies.   expectedData.yaml Describe the expected segmentItems, meterItems or logItems.   support-version.list List the target versions for this case.   startup.sh JVM-container only. This is not required when using Tomcat-container.    * support-version.list format requires every line for a single version (contains only the last version number of each minor version). You may use # to comment out this version.\nconfiguration.yml    Field description     type Image type, options, jvm, or tomcat. Required.   entryService The entrance endpoint (URL) for test case access. Required. (HTTP Method: GET)   healthCheck The health check endpoint (URL) for test case access. Required. (HTTP Method: HEAD)   startScript Path of the start up script. Required in type: jvm only.   runningMode Running mode with the optional plugin, options, default(default), with_optional, or with_bootstrap.   withPlugins Plugin selector rule, e.g.:apm-spring-annotation-plugin-*.jar. Required for runningMode=with_optional or runningMode=with_bootstrap.   environment Same as docker-compose#environment.   depends_on Same as docker-compose#depends_on.   dependencies Same as docker-compose#services, image, links, hostname, command, environment and depends_on are supported.    Note:, docker-compose activates only when dependencies is blank.\nrunningMode option description.\n   Option description     default Activate all plugins in plugin folder like the official distribution agent.   with_optional Activate default and plugins in optional-plugin by the give selector.   with_bootstrap Activate default and plugins in bootstrap-plugin by the give selector.    with_optional/with_bootstrap supports multiple selectors, separated by ;.\nFile Format\ntype: entryService: healthCheck: startScript: runningMode: withPlugins: environment: ... depends_on: ... dependencies: service1: image: hostname: expose: ... environment: ... depends_on: ... links: ... entrypoint: ... healthcheck: ...  dependencies support docker compose healthcheck. But the format is a little different. We need to have - as the start of every config item, and describe it as a string line.  For example, in the official document, the health check is:\nhealthcheck:test:[\u0026#34;CMD\u0026#34;,\u0026#34;curl\u0026#34;,\u0026#34;-f\u0026#34;,\u0026#34;http://localhost\u0026#34;]interval:1m30stimeout:10sretries:3start_period:40sHere you should write:\nhealthcheck:- \u0026#39;test:[\u0026#34;CMD\u0026#34;,\u0026#34;curl\u0026#34;,\u0026#34;-f\u0026#34;,\u0026#34;http://localhost\u0026#34;]\u0026#39;- \u0026#34;interval: 1m30s\u0026#34;- \u0026#34;timeout: 10s\u0026#34;- \u0026#34;retries: 3\u0026#34;- \u0026#34;start_period: 40s\u0026#34;In some cases, the dependency service (usually a third-party server like the SolrJ server) is required to keep the same version as the client lib version, which is defined as ${test.framework.version} in pom. You may use ${CASE_SERVER_IMAGE_VERSION} as the version number, which will be changed in the test for each version.\n It does not support resource related configurations, such as volumes, ports, and ulimits. The reason for this is that in test scenarios, no mapping is required for any port to the host VM, or to mount any folder.\n Take the following test cases as examples:\n dubbo-2.7.x with JVM-container jetty with JVM-container gateway with runningMode canal with docker-compose  expectedData.yaml Operator for number\n   Operator Description     nq Not equal   eq Equal(default)   ge Greater than or equal   gt Greater than    Operator for String\n   Operator Description     not null Not null   not blank Not blank ,it\u0026rsquo;s recommended for String type field as the default value maybe blank string, such as span tags   null Null or empty String   eq Equal(default)   start with Tests if this string starts with the specified prefix. DO NOT use it with meterItem tags value   end with Tests if this string ends with the specified suffix. DO NOT use it with meterItem tags value    Expected Data Format Of The Segment\nsegmentItems:- serviceName:SERVICE_NAME(string)segmentSize:SEGMENT_SIZE(int)segments:- segmentId:SEGMENT_ID(string)spans:...   Field Description     serviceName Service Name.   segmentSize The number of segments is expected.   segmentId Trace ID.   spans Segment span list. In the next section, you will learn how to describe each span.    Expected Data Format Of The Span\nNote: The order of span list should follow the order of the span finish time.\noperationName:OPERATION_NAME(string)parentSpanId:PARENT_SPAN_ID(int)spanId:SPAN_ID(int)startTime:START_TIME(int)endTime:END_TIME(int)isError: IS_ERROR(string:true,false)spanLayer: SPAN_LAYER(string:DB, RPC_FRAMEWORK, HTTP, MQ, CACHE)spanType: SPAN_TYPE(string:Exit, Entry, Local)componentId:COMPONENT_ID(int)tags:- {key: TAG_KEY(string), value:TAG_VALUE(string)}...logs:- {key: LOG_KEY(string), value:LOG_VALUE(string)}...peer:PEER(string)refs:- {traceId:TRACE_ID(string),parentTraceSegmentId:PARENT_TRACE_SEGMENT_ID(string),parentSpanId:PARENT_SPAN_ID(int),parentService:PARENT_SERVICE(string),parentServiceInstance:PARENT_SERVICE_INSTANCE(string),parentEndpoint:PARENT_ENDPOINT_NAME(string),networkAddress:NETWORK_ADDRESS(string),refType: REF_TYPE(string:CrossProcess, CrossThread)}...   Field Description     operationName Span Operation Name.   parentSpanId Parent span ID. Note: The parent span ID of the first span should be -1.   spanId Span ID. Note: Start from 0.   startTime Span start time. It is impossible to get the accurate time, not 0 should be enough.   endTime Span finish time. It is impossible to get the accurate time, not 0 should be enough.   isError Span status, true or false.   componentId Component id for your plugin.   tags Span tag list. Notice, Keep in the same order as the plugin coded.   logs Span log list. Notice, Keep in the same order as the plugin coded.   SpanLayer Options, DB, RPC_FRAMEWORK, HTTP, MQ, CACHE.   SpanType Span type, options, Exit, Entry or Local.   peer Remote network address, IP + port mostly. For exit span, this should be required.    The verify description for SegmentRef\n   Field Description     traceId    parentTraceSegmentId Parent SegmentId, pointing to the segment id in the parent segment.   parentSpanId Parent SpanID, pointing to the span id in the parent segment.   parentService The service of parent/downstream service name.   parentServiceInstance The instance of parent/downstream service instance name.   parentEndpoint The endpoint of parent/downstream service.   networkAddress The peer value of parent exit span.   refType Ref type, options, CrossProcess or CrossThread.    Expected Data Format Of The Meter Items\nmeterItems:- serviceName:SERVICE_NAME(string)meterSize:METER_SIZE(int)meters:- ...   Field Description     serviceName Service Name.   meterSize The number of meters is expected.   meters meter list. Follow the next section to see how to describe every meter.    Expected Data Format Of The Meter\nmeterId:name:NAME(string)tags:- {name: TAG_NAME(string), value:TAG_VALUE(string)}singleValue:SINGLE_VALUE(double)histogramBuckets:- HISTOGRAM_BUCKET(double)...The verify description for MeterId\n   Field Description     name meter name.   tags meter tags.   tags.name tag name.   tags.value tag value.   singleValue counter or gauge value. Using condition operate of the number to validate, such as gt, ge. If current meter is histogram, don\u0026rsquo;t need to write this field.   histogramBuckets histogram bucket. The bucket list must be ordered. The tool assert at least one bucket of the histogram having nonzero count. If current meter is counter or gauge, don\u0026rsquo;t need to write this field.    Expected Data Format Of The Log Items\nlogItems:- serviceName:SERVICE_NAME(string)logSize:LOG_SIZE(int)logs:- ...   Field Description     serviceName Service Name.   logSize The number of logs is expected.   logs log list. Follow the next section to see how to describe every log.    Expected Data Format Of The Log\ntimestamp:TIMESTAMP_VALUE(int)endpoint:ENDPOINT_VALUE(int)traceContext:traceId:TRACE_ID_VALUE(string)traceSegmentId:TRACE_SEGMENT_ID_VALUE(string)spanId:SPAN_ID_VALUE(int)body:type:TYPE_VALUE(string)content:# Choose one of three (text, json or yaml)text:TEXT_VALUE(string)# json: JSON_VALUE(string)# yaml: YAML_VALUE(string)tags:data:- key:TAG_KEY(string)value:TAG_VALUE(string)...layer:LAYER_VALUE(string)...The verify description for Log\n   Field Description     timestamp log timestamp.   endpoint log endpoint.   traceContext.traceId log associated trace id.   traceContext.traceSegmentId log associated trace segment id.   traceContext.spanId log associated span id.   body.type log body type.   body.content log content, the sub field choose one of three (text, json or yaml).   tags.data log tags, key value pairs.   layer log layer.    startup.sh This script provide a start point to JVM based service, most of them starts by a java -jar, with some variables. The following system environment variables are available in the shell.\n   Variable Description     agent_opts Agent plugin opts, check the detail in plugin doc or the same opt added in this PR.   SCENARIO_NAME Service name. Default same as the case folder name   SCENARIO_VERSION Version   SCENARIO_ENTRY_SERVICE Entrance URL to access this service   SCENARIO_HEALTH_CHECK_URL Health check URL     ${agent_opts} is required to add into your java -jar command, which including the parameter injected by test framework, and make agent installed. All other parameters should be added after ${agent_opts}.\n The test framework will set the service name as the test case folder name by default, but in some cases, there are more than one test projects are required to run in different service codes, could set it explicitly like the following example.\nExample\nhome=\u0026#34;$(cd \u0026#34;$(dirname $0)\u0026#34;; pwd)\u0026#34; java -jar ${agent_opts} \u0026#34;-Dskywalking.agent.service_name=jettyserver-scenario\u0026#34; ${home}/../libs/jettyserver-scenario.jar \u0026amp; sleep 1 java -jar ${agent_opts} \u0026#34;-Dskywalking.agent.service_name=jettyclient-scenario\u0026#34; ${home}/../libs/jettyclient-scenario.jar \u0026amp;  Only set this or use other skywalking options when it is really necessary.\n Take the following test cases as examples\n undertow webflux  Best Practices How To Use The Archetype To Create A Test Case Project We provided archetypes and a script to make creating a project easier. It creates a completed project of a test case. So that we only need to focus on cases. First, we can use followed command to get usage about the script.\nbash ${SKYWALKING_HOME}/test/plugin/generator.sh\nThen, runs and generates a project, named by scenario_name, in ./scenarios.\nRecommendations for pom \u0026lt;properties\u0026gt; \u0026lt;!-- Provide and use this property in the pom. --\u0026gt; \u0026lt;!-- This version should match the library version, --\u0026gt; \u0026lt;!-- in this case, http components lib version 4.3. --\u0026gt; \u0026lt;test.framework.version\u0026gt;4.3\u0026lt;/test.framework.version\u0026gt; \u0026lt;/properties\u0026gt; \u0026lt;dependencies\u0026gt; \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.httpcomponents\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;httpclient\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${test.framework.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; ... \u0026lt;/dependencies\u0026gt; \u0026lt;build\u0026gt; \u0026lt;!-- Set the package final name as same as the test case folder case. --\u0026gt; \u0026lt;finalName\u0026gt;httpclient-4.3.x-scenario\u0026lt;/finalName\u0026gt; .... \u0026lt;/build\u0026gt; How To Implement Heartbeat Service Heartbeat service is designed for checking the service available status. This service is a simple HTTP service, returning 200 means the target service is ready. Then the traffic generator will access the entry service and verify the expected data. User should consider to use this service to detect such as whether the dependent services are ready, especially when dependent services are database or cluster.\nNotice, because heartbeat service could be traced fully or partially, so, segmentSize in expectedData.yaml should use ge as the operator, and don\u0026rsquo;t include the segments of heartbeat service in the expected segment data.\nThe example Process of Writing Tracing Expected Data Expected data file, expectedData.yaml, include SegmentItems part.\nWe are using the HttpClient plugin to show how to write the expected data.\nThere are two key points of testing\n Whether is HttpClient span created. Whether the ContextCarrier created correctly, and propagates across processes.  +-------------+ +------------------+ +-------------------------+ | Browser | | Case Servlet | | ContextPropagateServlet | | | | | | | +-----|-------+ +---------|--------+ +------------|------------+ | | | | | | | WebHttp +-+ | +------------------------\u0026gt; |-| HttpClient +-+ | |--------------------------------\u0026gt; |-| | |-| |-| | |-| |-| | |-| \u0026lt;--------------------------------| | |-| +-+ | \u0026lt;--------------------------| | | +-+ | | | | | | | | | | | | | + + + segmentItems By following the flow of HttpClient case, there should be two segments created.\n Segment represents the CaseServlet access. Let\u0026rsquo;s name it as SegmentA. Segment represents the ContextPropagateServlet access. Let\u0026rsquo;s name it as SegmentB.  segmentItems:- serviceName:httpclient-casesegmentSize:ge 2# Could have more than one health check segments, because, the dependency is not standby.Because Tomcat plugin is a default plugin of SkyWalking, so, in SegmentA, there are two spans\n Tomcat entry span HttpClient exit span  SegmentA span list should like following\n- segmentId:not nullspans:- operationName:/httpclient-case/case/context-propagateparentSpanId:0spanId:1startTime:nq 0endTime:nq 0isError:falsespanLayer:HttpspanType:ExitcomponentId:eq 2tags:- {key: url, value:\u0026#39;http://127.0.0.1:8080/httpclient-case/case/context-propagate\u0026#39;}- {key: http.method, value:GET}- {key: http.status_code, value:\u0026#39;200\u0026#39;}logs:[]peer:127.0.0.1:8080- operationName:/httpclient-case/case/httpclientparentSpanId:-1spanId:0startTime:nq 0endTime:nq 0spanLayer:HttpisError:falsespanType:EntrycomponentId:1tags:- {key: url, value:\u0026#39;http://localhost:{SERVER_OUTPUT_PORT}/httpclient-case/case/httpclient\u0026#39;}- {key: http.method, value:GET}- {key: http.status_code, value:\u0026#39;200\u0026#39;}logs:[]peer:nullSegmentB should only have one Tomcat entry span, but includes the Ref pointing to SegmentA.\nSegmentB span list should like following\n- segmentId:not nullspans:-operationName:/httpclient-case/case/context-propagateparentSpanId:-1spanId:0tags:- {key: url, value:\u0026#39;http://127.0.0.1:8080/httpclient-case/case/context-propagate\u0026#39;}- {key: http.method, value:GET}- {key: http.status_code, value:\u0026#39;200\u0026#39;}logs:[]startTime:nq 0endTime:nq 0spanLayer:HttpisError:falsespanType:EntrycomponentId:1peer:nullrefs:- {parentEndpoint: /httpclient-case/case/httpclient, networkAddress: \u0026#39;localhost:8080\u0026#39;, refType: CrossProcess, parentSpanId: 1, parentTraceSegmentId: not null, parentServiceInstance: not null, parentService: not null, traceId:not null}The example Process of Writing Meter Expected Data Expected data file, expectedData.yaml, include MeterItems part.\nWe are using the toolkit plugin to demonstrate how to write the expected data. When write the meter plugin, the expected data file keeps the same.\nThere is one key point of testing\n Build a meter and operate it.  Such as Counter:\nMeterFactory.counter(\u0026#34;test_counter\u0026#34;).tag(\u0026#34;ck1\u0026#34;, \u0026#34;cv1\u0026#34;).build().increment(1d); MeterFactory.histogram(\u0026#34;test_histogram\u0026#34;).tag(\u0026#34;hk1\u0026#34;, \u0026#34;hv1\u0026#34;).steps(1d, 5d, 10d).build().addValue(2d); +-------------+ +------------------+ | Plugin | | Agent core | | | | | +-----|-------+ +---------|--------+ | | | | | Build or operate +-+ +------------------------\u0026gt; |-| | |-] | |-| | |-| | |-| | |-| | \u0026lt;--------------------------| | +-+ | | | | | | | | + + meterItems By following the flow of the toolkit case, there should be two meters created.\n Meter test_counter created from MeterFactory#counter. Let\u0026rsquo;s name it as MeterA. Meter test_histogram created from MeterFactory#histogram. Let\u0026rsquo;s name it as MeterB.  meterItems:- serviceName:toolkit-casemeterSize:2They\u0026rsquo;re showing two kinds of meter, MeterA has a single value, MeterB has a histogram value.\nMeterA should like following, counter and gauge use the same data format.\n- meterId:name:test_countertags:- {name: ck1, value:cv1}singleValue:gt 0MeterB should like following.\n- meterId:name:test_histogramtags:- {name: hk1, value:hv1}histogramBuckets:- 0.0- 1.0- 5.0- 10.0Local Test and Pull Request To The Upstream First of all, the test case project could be compiled successfully, with right project structure and be able to deploy. The developer should test the start script could run in Linux/MacOS, and entryService/health services are able to provide the response.\nYou could run test by using following commands\ncd ${SKYWALKING_HOME} bash ./test/plugin/run.sh -f ${scenario_name} Notice,if codes in ./apm-sniffer have been changed, no matter because your change or git update, please recompile the skywalking-agent. Because the test framework will use the existing skywalking-agent folder, rather than recompiling it every time.\nUse ${SKYWALKING_HOME}/test/plugin/run.sh -h to know more command options.\nIf the local test passed, then you could add it to .github/workflows/plugins-test.\u0026lt;n\u0026gt;.yaml file, which will drive the tests running on the GitHub Actions of official SkyWalking repository. Based on your plugin\u0026rsquo;s name, please add the test case into file .github/workflows/plugins-test.\u0026lt;n\u0026gt;.yaml, by alphabetical orders.\nEvery test case is a GitHub Actions Job. Please use the scenario directory name as the case name, mostly you\u0026rsquo;ll just need to decide which file (plugins-test.\u0026lt;n\u0026gt;.yaml) to add your test case, and simply put one line (as follows) in it, take the existed cases as examples. You can run python3 tools/select-group.py to see which file contains the least cases and add your cases into it, in order to balance the running time of each group.\nIf a test case required to run in JDK 17 environment, please add you test case into file plugins-jdk17-test.\u0026lt;n\u0026gt;.yaml.\njobs:PluginsTest:name:Pluginruns-on:ubuntu-latesttimeout-minutes:90strategy:fail-fast:truematrix:case:# ...- \u0026lt;your scenario test directory name\u0026gt;# ...","excerpt":"Plugin automatic test framework The plugin test framework is designed to verify the function and …","ref":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/plugin-test/","title":"Plugin automatic test framework"},{"body":"Plugin automatic test framework The plugin test framework is designed to verify the function and compatibility of plugins. As there are dozens of plugins and hundreds of versions that need to be verified, it is impossible to do it manually. The test framework uses container-based tech stack and requires a set of real services with the agents installed. Then, the test mock OAP backend runs to check the segments data sent from agents.\nEvery plugin maintained in the main repo requires corresponding test cases as well as matching versions in the supported list doc.\nEnvironment Requirements  MacOS/Linux JDK 8+ Docker Docker Compose  Case Base Image Introduction The test framework provides JVM-container and Tomcat-container base images including JDK8 and JDK17. You can choose the best one for your test case. If both are suitable for your case, JVM-container is preferred.\nJVM-container Image Introduction JVM-container uses eclipse-temurin:8-jdk as the base image. JVM-container supports JDK8 and JDK17 as well in CI, which inherits eclipse-temurin:8-jdk and eclipse-temurin:17-jdk. It is supported to custom the base Java docker image by specify base_image_java. The test case project must be packaged as project-name.zip, including startup.sh and uber jar, by using mvn clean package.\nTake the following test projects as examples:\n sofarpc-scenario is a single project case. webflux-scenario is a case including multiple projects. jdk17-with-gson-scenario is a single project case with JDK17.  Tomcat-container Image Introduction Tomcat-container uses tomcat:8.5-jdk8-openjdk, tomcat:8.5-jdk17-openjdk as the base image. It is supported to custom the base Tomcat docker image by specify base_image_tomcat. The test case project must be packaged as project-name.war by using mvn package.\nTake the following test project as an example\n spring-4.3.x-scenario  Test project hierarchical structure The test case is an independent maven project, and it must be packaged as a war tar ball or zip file, depending on the chosen base image. Also, two external accessible endpoints usually two URLs) are required.\nAll test case codes should be in the org.apache.skywalking.apm.testcase.* package. If there are some codes expected to be instrumented, then the classes could be in the test.org.apache.skywalking.apm.testcase.* package.\nJVM-container test project hierarchical structure\n[plugin-scenario] |- [bin] |- startup.sh |- [config] |- expectedData.yaml |- [src] |- [main] |- ... |- [resource] |- log4j2.xml |- pom.xml |- configuration.yml |- support-version.list [] = directory Tomcat-container test project hierarchical structure\n[plugin-scenario] |- [config] |- expectedData.yaml |- [src] |- [main] |- ... |- [resource] |- log4j2.xml |- [webapp] |- [WEB-INF] |- web.xml |- pom.xml |- configuration.yml |- support-version.list [] = directory Test case configuration files The following files are required in every test case.\n   File Name Descriptions     configuration.yml Declare the basic case information, including case name, entrance endpoints, mode, and dependencies.   expectedData.yaml Describe the expected segmentItems, meterItems or logItems.   support-version.list List the target versions for this case.   startup.sh JVM-container only. This is not required when using Tomcat-container.    * support-version.list format requires every line for a single version (contains only the last version number of each minor version). You may use # to comment out this version.\nconfiguration.yml    Field description     type Image type, options, jvm, or tomcat. Required.   entryService The entrance endpoint (URL) for test case access. Required. (HTTP Method: GET)   healthCheck The health check endpoint (URL) for test case access. Required. (HTTP Method: HEAD)   startScript Path of the start up script. Required in type: jvm only.   runningMode Running mode with the optional plugin, options, default(default), with_optional, or with_bootstrap.   withPlugins Plugin selector rule, e.g.:apm-spring-annotation-plugin-*.jar. Required for runningMode=with_optional or runningMode=with_bootstrap.   environment Same as docker-compose#environment.   depends_on Same as docker-compose#depends_on.   dependencies Same as docker-compose#services, image, links, hostname, command, environment and depends_on are supported.    Note:, docker-compose activates only when dependencies is blank.\nrunningMode option description.\n   Option description     default Activate all plugins in plugin folder like the official distribution agent.   with_optional Activate default and plugins in optional-plugin by the give selector.   with_bootstrap Activate default and plugins in bootstrap-plugin by the give selector.    with_optional/with_bootstrap supports multiple selectors, separated by ;.\nFile Format\ntype: entryService: healthCheck: startScript: runningMode: withPlugins: environment: ... depends_on: ... dependencies: service1: image: hostname: expose: ... environment: ... depends_on: ... links: ... entrypoint: ... healthcheck: ...  dependencies support docker compose healthcheck. But the format is a little different. We need to have - as the start of every config item, and describe it as a string line.  For example, in the official document, the health check is:\nhealthcheck:test:[\u0026#34;CMD\u0026#34;,\u0026#34;curl\u0026#34;,\u0026#34;-f\u0026#34;,\u0026#34;http://localhost\u0026#34;]interval:1m30stimeout:10sretries:3start_period:40sHere you should write:\nhealthcheck:- \u0026#39;test:[\u0026#34;CMD\u0026#34;,\u0026#34;curl\u0026#34;,\u0026#34;-f\u0026#34;,\u0026#34;http://localhost\u0026#34;]\u0026#39;- \u0026#34;interval: 1m30s\u0026#34;- \u0026#34;timeout: 10s\u0026#34;- \u0026#34;retries: 3\u0026#34;- \u0026#34;start_period: 40s\u0026#34;In some cases, the dependency service (usually a third-party server like the SolrJ server) is required to keep the same version as the client lib version, which is defined as ${test.framework.version} in pom. You may use ${CASE_SERVER_IMAGE_VERSION} as the version number, which will be changed in the test for each version.\n It does not support resource related configurations, such as volumes, ports, and ulimits. The reason for this is that in test scenarios, no mapping is required for any port to the host VM, or to mount any folder.\n Take the following test cases as examples:\n dubbo-2.7.x with JVM-container jetty with JVM-container gateway with runningMode canal with docker-compose  expectedData.yaml Operator for number\n   Operator Description     nq Not equal   eq Equal(default)   ge Greater than or equal   gt Greater than    Operator for String\n   Operator Description     not null Not null   not blank Not blank ,it\u0026rsquo;s recommended for String type field as the default value maybe blank string, such as span tags   null Null or empty String   eq Equal(default)   start with Tests if this string starts with the specified prefix. DO NOT use it with meterItem tags value   end with Tests if this string ends with the specified suffix. DO NOT use it with meterItem tags value    Expected Data Format Of The Segment\nsegmentItems:- serviceName:SERVICE_NAME(string)segmentSize:SEGMENT_SIZE(int)segments:- segmentId:SEGMENT_ID(string)spans:...   Field Description     serviceName Service Name.   segmentSize The number of segments is expected.   segmentId Trace ID.   spans Segment span list. In the next section, you will learn how to describe each span.    Expected Data Format Of The Span\nNote: The order of span list should follow the order of the span finish time.\noperationName:OPERATION_NAME(string)parentSpanId:PARENT_SPAN_ID(int)spanId:SPAN_ID(int)startTime:START_TIME(int)endTime:END_TIME(int)isError: IS_ERROR(string:true,false)spanLayer: SPAN_LAYER(string:DB, RPC_FRAMEWORK, HTTP, MQ, CACHE)spanType: SPAN_TYPE(string:Exit, Entry, Local)componentId:COMPONENT_ID(int)tags:- {key: TAG_KEY(string), value:TAG_VALUE(string)}...logs:- {key: LOG_KEY(string), value:LOG_VALUE(string)}...peer:PEER(string)refs:- {traceId:TRACE_ID(string),parentTraceSegmentId:PARENT_TRACE_SEGMENT_ID(string),parentSpanId:PARENT_SPAN_ID(int),parentService:PARENT_SERVICE(string),parentServiceInstance:PARENT_SERVICE_INSTANCE(string),parentEndpoint:PARENT_ENDPOINT_NAME(string),networkAddress:NETWORK_ADDRESS(string),refType: REF_TYPE(string:CrossProcess, CrossThread)}...   Field Description     operationName Span Operation Name.   parentSpanId Parent span ID. Note: The parent span ID of the first span should be -1.   spanId Span ID. Note: Start from 0.   startTime Span start time. It is impossible to get the accurate time, not 0 should be enough.   endTime Span finish time. It is impossible to get the accurate time, not 0 should be enough.   isError Span status, true or false.   componentId Component id for your plugin.   tags Span tag list. Notice, Keep in the same order as the plugin coded.   logs Span log list. Notice, Keep in the same order as the plugin coded.   SpanLayer Options, DB, RPC_FRAMEWORK, HTTP, MQ, CACHE.   SpanType Span type, options, Exit, Entry or Local.   peer Remote network address, IP + port mostly. For exit span, this should be required.    The verify description for SegmentRef\n   Field Description     traceId    parentTraceSegmentId Parent SegmentId, pointing to the segment id in the parent segment.   parentSpanId Parent SpanID, pointing to the span id in the parent segment.   parentService The service of parent/downstream service name.   parentServiceInstance The instance of parent/downstream service instance name.   parentEndpoint The endpoint of parent/downstream service.   networkAddress The peer value of parent exit span.   refType Ref type, options, CrossProcess or CrossThread.    Expected Data Format Of The Meter Items\nmeterItems:- serviceName:SERVICE_NAME(string)meterSize:METER_SIZE(int)meters:- ...   Field Description     serviceName Service Name.   meterSize The number of meters is expected.   meters meter list. Follow the next section to see how to describe every meter.    Expected Data Format Of The Meter\nmeterId:name:NAME(string)tags:- {name: TAG_NAME(string), value:TAG_VALUE(string)}singleValue:SINGLE_VALUE(double)histogramBuckets:- HISTOGRAM_BUCKET(double)...The verify description for MeterId\n   Field Description     name meter name.   tags meter tags.   tags.name tag name.   tags.value tag value.   singleValue counter or gauge value. Using condition operate of the number to validate, such as gt, ge. If current meter is histogram, don\u0026rsquo;t need to write this field.   histogramBuckets histogram bucket. The bucket list must be ordered. The tool assert at least one bucket of the histogram having nonzero count. If current meter is counter or gauge, don\u0026rsquo;t need to write this field.    Expected Data Format Of The Log Items\nlogItems:- serviceName:SERVICE_NAME(string)logSize:LOG_SIZE(int)logs:- ...   Field Description     serviceName Service Name.   logSize The number of logs is expected.   logs log list. Follow the next section to see how to describe every log.    Expected Data Format Of The Log\ntimestamp:TIMESTAMP_VALUE(int)endpoint:ENDPOINT_VALUE(int)traceContext:traceId:TRACE_ID_VALUE(string)traceSegmentId:TRACE_SEGMENT_ID_VALUE(string)spanId:SPAN_ID_VALUE(int)body:type:TYPE_VALUE(string)content:# Choose one of three (text, json or yaml)text:TEXT_VALUE(string)# json: JSON_VALUE(string)# yaml: YAML_VALUE(string)tags:data:- key:TAG_KEY(string)value:TAG_VALUE(string)...layer:LAYER_VALUE(string)...The verify description for Log\n   Field Description     timestamp log timestamp.   endpoint log endpoint.   traceContext.traceId log associated trace id.   traceContext.traceSegmentId log associated trace segment id.   traceContext.spanId log associated span id.   body.type log body type.   body.content log content, the sub field choose one of three (text, json or yaml).   tags.data log tags, key value pairs.   layer log layer.    startup.sh This script provide a start point to JVM based service, most of them starts by a java -jar, with some variables. The following system environment variables are available in the shell.\n   Variable Description     agent_opts Agent plugin opts, check the detail in plugin doc or the same opt added in this PR.   SCENARIO_NAME Service name. Default same as the case folder name   SCENARIO_VERSION Version   SCENARIO_ENTRY_SERVICE Entrance URL to access this service   SCENARIO_HEALTH_CHECK_URL Health check URL     ${agent_opts} is required to add into your java -jar command, which including the parameter injected by test framework, and make agent installed. All other parameters should be added after ${agent_opts}.\n The test framework will set the service name as the test case folder name by default, but in some cases, there are more than one test projects are required to run in different service codes, could set it explicitly like the following example.\nExample\nhome=\u0026#34;$(cd \u0026#34;$(dirname $0)\u0026#34;; pwd)\u0026#34; java -jar ${agent_opts} \u0026#34;-Dskywalking.agent.service_name=jettyserver-scenario\u0026#34; ${home}/../libs/jettyserver-scenario.jar \u0026amp; sleep 1 java -jar ${agent_opts} \u0026#34;-Dskywalking.agent.service_name=jettyclient-scenario\u0026#34; ${home}/../libs/jettyclient-scenario.jar \u0026amp;  Only set this or use other skywalking options when it is really necessary.\n Take the following test cases as examples\n undertow webflux  Best Practices How To Use The Archetype To Create A Test Case Project We provided archetypes and a script to make creating a project easier. It creates a completed project of a test case. So that we only need to focus on cases. First, we can use followed command to get usage about the script.\nbash ${SKYWALKING_HOME}/test/plugin/generator.sh\nThen, runs and generates a project, named by scenario_name, in ./scenarios.\nRecommendations for pom \u0026lt;properties\u0026gt; \u0026lt;!-- Provide and use this property in the pom. --\u0026gt; \u0026lt;!-- This version should match the library version, --\u0026gt; \u0026lt;!-- in this case, http components lib version 4.3. --\u0026gt; \u0026lt;test.framework.version\u0026gt;4.3\u0026lt;/test.framework.version\u0026gt; \u0026lt;/properties\u0026gt; \u0026lt;dependencies\u0026gt; \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.httpcomponents\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;httpclient\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${test.framework.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; ... \u0026lt;/dependencies\u0026gt; \u0026lt;build\u0026gt; \u0026lt;!-- Set the package final name as same as the test case folder case. --\u0026gt; \u0026lt;finalName\u0026gt;httpclient-4.3.x-scenario\u0026lt;/finalName\u0026gt; .... \u0026lt;/build\u0026gt; How To Implement Heartbeat Service Heartbeat service is designed for checking the service available status. This service is a simple HTTP service, returning 200 means the target service is ready. Then the traffic generator will access the entry service and verify the expected data. User should consider to use this service to detect such as whether the dependent services are ready, especially when dependent services are database or cluster.\nNotice, because heartbeat service could be traced fully or partially, so, segmentSize in expectedData.yaml should use ge as the operator, and don\u0026rsquo;t include the segments of heartbeat service in the expected segment data.\nThe example Process of Writing Tracing Expected Data Expected data file, expectedData.yaml, include SegmentItems part.\nWe are using the HttpClient plugin to show how to write the expected data.\nThere are two key points of testing\n Whether is HttpClient span created. Whether the ContextCarrier created correctly, and propagates across processes.  +-------------+ +------------------+ +-------------------------+ | Browser | | Case Servlet | | ContextPropagateServlet | | | | | | | +-----|-------+ +---------|--------+ +------------|------------+ | | | | | | | WebHttp +-+ | +------------------------\u0026gt; |-| HttpClient +-+ | |--------------------------------\u0026gt; |-| | |-| |-| | |-| |-| | |-| \u0026lt;--------------------------------| | |-| +-+ | \u0026lt;--------------------------| | | +-+ | | | | | | | | | | | | | + + + segmentItems By following the flow of HttpClient case, there should be two segments created.\n Segment represents the CaseServlet access. Let\u0026rsquo;s name it as SegmentA. Segment represents the ContextPropagateServlet access. Let\u0026rsquo;s name it as SegmentB.  segmentItems:- serviceName:httpclient-casesegmentSize:ge 2# Could have more than one health check segments, because, the dependency is not standby.Because Tomcat plugin is a default plugin of SkyWalking, so, in SegmentA, there are two spans\n Tomcat entry span HttpClient exit span  SegmentA span list should like following\n- segmentId:not nullspans:- operationName:/httpclient-case/case/context-propagateparentSpanId:0spanId:1startTime:nq 0endTime:nq 0isError:falsespanLayer:HttpspanType:ExitcomponentId:eq 2tags:- {key: url, value:\u0026#39;http://127.0.0.1:8080/httpclient-case/case/context-propagate\u0026#39;}- {key: http.method, value:GET}- {key: http.status_code, value:\u0026#39;200\u0026#39;}logs:[]peer:127.0.0.1:8080- operationName:/httpclient-case/case/httpclientparentSpanId:-1spanId:0startTime:nq 0endTime:nq 0spanLayer:HttpisError:falsespanType:EntrycomponentId:1tags:- {key: url, value:\u0026#39;http://localhost:{SERVER_OUTPUT_PORT}/httpclient-case/case/httpclient\u0026#39;}- {key: http.method, value:GET}- {key: http.status_code, value:\u0026#39;200\u0026#39;}logs:[]peer:nullSegmentB should only have one Tomcat entry span, but includes the Ref pointing to SegmentA.\nSegmentB span list should like following\n- segmentId:not nullspans:-operationName:/httpclient-case/case/context-propagateparentSpanId:-1spanId:0tags:- {key: url, value:\u0026#39;http://127.0.0.1:8080/httpclient-case/case/context-propagate\u0026#39;}- {key: http.method, value:GET}- {key: http.status_code, value:\u0026#39;200\u0026#39;}logs:[]startTime:nq 0endTime:nq 0spanLayer:HttpisError:falsespanType:EntrycomponentId:1peer:nullrefs:- {parentEndpoint: /httpclient-case/case/httpclient, networkAddress: \u0026#39;localhost:8080\u0026#39;, refType: CrossProcess, parentSpanId: 1, parentTraceSegmentId: not null, parentServiceInstance: not null, parentService: not null, traceId:not null}The example Process of Writing Meter Expected Data Expected data file, expectedData.yaml, include MeterItems part.\nWe are using the toolkit plugin to demonstrate how to write the expected data. When write the meter plugin, the expected data file keeps the same.\nThere is one key point of testing\n Build a meter and operate it.  Such as Counter:\nMeterFactory.counter(\u0026#34;test_counter\u0026#34;).tag(\u0026#34;ck1\u0026#34;, \u0026#34;cv1\u0026#34;).build().increment(1d); MeterFactory.histogram(\u0026#34;test_histogram\u0026#34;).tag(\u0026#34;hk1\u0026#34;, \u0026#34;hv1\u0026#34;).steps(1d, 5d, 10d).build().addValue(2d); +-------------+ +------------------+ | Plugin | | Agent core | | | | | +-----|-------+ +---------|--------+ | | | | | Build or operate +-+ +------------------------\u0026gt; |-| | |-] | |-| | |-| | |-| | |-| | \u0026lt;--------------------------| | +-+ | | | | | | | | + + meterItems By following the flow of the toolkit case, there should be two meters created.\n Meter test_counter created from MeterFactory#counter. Let\u0026rsquo;s name it as MeterA. Meter test_histogram created from MeterFactory#histogram. Let\u0026rsquo;s name it as MeterB.  meterItems:- serviceName:toolkit-casemeterSize:2They\u0026rsquo;re showing two kinds of meter, MeterA has a single value, MeterB has a histogram value.\nMeterA should like following, counter and gauge use the same data format.\n- meterId:name:test_countertags:- {name: ck1, value:cv1}singleValue:gt 0MeterB should like following.\n- meterId:name:test_histogramtags:- {name: hk1, value:hv1}histogramBuckets:- 0.0- 1.0- 5.0- 10.0Local Test and Pull Request To The Upstream First of all, the test case project could be compiled successfully, with right project structure and be able to deploy. The developer should test the start script could run in Linux/MacOS, and entryService/health services are able to provide the response.\nYou could run test by using following commands\ncd ${SKYWALKING_HOME} bash ./test/plugin/run.sh -f ${scenario_name} Notice,if codes in ./apm-sniffer have been changed, no matter because your change or git update, please recompile the skywalking-agent. Because the test framework will use the existing skywalking-agent folder, rather than recompiling it every time.\nUse ${SKYWALKING_HOME}/test/plugin/run.sh -h to know more command options.\nIf the local test passed, then you could add it to .github/workflows/plugins-test.\u0026lt;n\u0026gt;.yaml file, which will drive the tests running on the GitHub Actions of official SkyWalking repository. Based on your plugin\u0026rsquo;s name, please add the test case into file .github/workflows/plugins-test.\u0026lt;n\u0026gt;.yaml, by alphabetical orders.\nEvery test case is a GitHub Actions Job. Please use the scenario directory name as the case name, mostly you\u0026rsquo;ll just need to decide which file (plugins-test.\u0026lt;n\u0026gt;.yaml) to add your test case, and simply put one line (as follows) in it, take the existed cases as examples. You can run python3 tools/select-group.py to see which file contains the least cases and add your cases into it, in order to balance the running time of each group.\nIf a test case required to run in JDK 17 environment, please add you test case into file plugins-jdk17-test.\u0026lt;n\u0026gt;.yaml. If a test case required to run in JDK 21 environment, please add you test case into file plugins-jdk21-test.\u0026lt;n\u0026gt;.yaml.\njobs:PluginsTest:name:Pluginruns-on:ubuntu-latesttimeout-minutes:90strategy:fail-fast:truematrix:case:# ...- \u0026lt;your scenario test directory name\u0026gt;# ...","excerpt":"Plugin automatic test framework The plugin test framework is designed to verify the function and …","ref":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/plugin-test/","title":"Plugin automatic test framework"},{"body":"Plugin automatic test framework The plugin test framework is designed to verify the function and compatibility of plugins. As there are dozens of plugins and hundreds of versions that need to be verified, it is impossible to do it manually. The test framework uses container-based tech stack and requires a set of real services with the agents installed. Then, the test mock OAP backend runs to check the segments data sent from agents.\nEvery plugin maintained in the main repo requires corresponding test cases as well as matching versions in the supported list doc.\nEnvironment Requirements  MacOS/Linux JDK 8+ Docker Docker Compose  Case Base Image Introduction The test framework provides JVM-container and Tomcat-container base images including JDK8 and JDK17. You can choose the best one for your test case. If both are suitable for your case, JVM-container is preferred.\nJVM-container Image Introduction JVM-container uses eclipse-temurin:8-jdk as the base image. JVM-container supports JDK8 and JDK17 as well in CI, which inherits eclipse-temurin:8-jdk and eclipse-temurin:17-jdk. It is supported to custom the base Java docker image by specify base_image_java. The test case project must be packaged as project-name.zip, including startup.sh and uber jar, by using mvn clean package.\nTake the following test projects as examples:\n sofarpc-scenario is a single project case. webflux-scenario is a case including multiple projects. jdk17-with-gson-scenario is a single project case with JDK17.  Tomcat-container Image Introduction Tomcat-container uses tomcat:8.5-jdk8-openjdk, tomcat:8.5-jdk17-openjdk as the base image. It is supported to custom the base Tomcat docker image by specify base_image_tomcat. The test case project must be packaged as project-name.war by using mvn package.\nTake the following test project as an example\n spring-4.3.x-scenario  Test project hierarchical structure The test case is an independent maven project, and it must be packaged as a war tar ball or zip file, depending on the chosen base image. Also, two external accessible endpoints usually two URLs) are required.\nAll test case codes should be in the org.apache.skywalking.apm.testcase.* package. If there are some codes expected to be instrumented, then the classes could be in the test.org.apache.skywalking.apm.testcase.* package.\nJVM-container test project hierarchical structure\n[plugin-scenario] |- [bin] |- startup.sh |- [config] |- expectedData.yaml |- [src] |- [main] |- ... |- [resource] |- log4j2.xml |- pom.xml |- configuration.yml |- support-version.list [] = directory Tomcat-container test project hierarchical structure\n[plugin-scenario] |- [config] |- expectedData.yaml |- [src] |- [main] |- ... |- [resource] |- log4j2.xml |- [webapp] |- [WEB-INF] |- web.xml |- pom.xml |- configuration.yml |- support-version.list [] = directory Test case configuration files The following files are required in every test case.\n   File Name Descriptions     configuration.yml Declare the basic case information, including case name, entrance endpoints, mode, and dependencies.   expectedData.yaml Describe the expected segmentItems, meterItems or logItems.   support-version.list List the target versions for this case.   startup.sh JVM-container only. This is not required when using Tomcat-container.    * support-version.list format requires every line for a single version (contains only the last version number of each minor version). You may use # to comment out this version.\nconfiguration.yml    Field description     type Image type, options, jvm, or tomcat. Required.   entryService The entrance endpoint (URL) for test case access. Required. (HTTP Method: GET)   healthCheck The health check endpoint (URL) for test case access. Required. (HTTP Method: HEAD)   startScript Path of the start up script. Required in type: jvm only.   runningMode Running mode with the optional plugin, options, default(default), with_optional, or with_bootstrap.   withPlugins Plugin selector rule, e.g.:apm-spring-annotation-plugin-*.jar. Required for runningMode=with_optional or runningMode=with_bootstrap.   environment Same as docker-compose#environment.   depends_on Same as docker-compose#depends_on.   dependencies Same as docker-compose#services, image, links, hostname, command, environment and depends_on are supported.    Note:, docker-compose activates only when dependencies is blank.\nrunningMode option description.\n   Option description     default Activate all plugins in plugin folder like the official distribution agent.   with_optional Activate default and plugins in optional-plugin by the give selector.   with_bootstrap Activate default and plugins in bootstrap-plugin by the give selector.    with_optional/with_bootstrap supports multiple selectors, separated by ;.\nFile Format\ntype: entryService: healthCheck: startScript: runningMode: withPlugins: environment: ... depends_on: ... dependencies: service1: image: hostname: expose: ... environment: ... depends_on: ... links: ... entrypoint: ... healthcheck: ...  dependencies support docker compose healthcheck. But the format is a little different. We need to have - as the start of every config item, and describe it as a string line.  For example, in the official document, the health check is:\nhealthcheck:test:[\u0026#34;CMD\u0026#34;,\u0026#34;curl\u0026#34;,\u0026#34;-f\u0026#34;,\u0026#34;http://localhost\u0026#34;]interval:1m30stimeout:10sretries:3start_period:40sHere you should write:\nhealthcheck:- \u0026#39;test:[\u0026#34;CMD\u0026#34;,\u0026#34;curl\u0026#34;,\u0026#34;-f\u0026#34;,\u0026#34;http://localhost\u0026#34;]\u0026#39;- \u0026#34;interval: 1m30s\u0026#34;- \u0026#34;timeout: 10s\u0026#34;- \u0026#34;retries: 3\u0026#34;- \u0026#34;start_period: 40s\u0026#34;In some cases, the dependency service (usually a third-party server like the SolrJ server) is required to keep the same version as the client lib version, which is defined as ${test.framework.version} in pom. You may use ${CASE_SERVER_IMAGE_VERSION} as the version number, which will be changed in the test for each version.\n It does not support resource related configurations, such as volumes, ports, and ulimits. The reason for this is that in test scenarios, no mapping is required for any port to the host VM, or to mount any folder.\n Take the following test cases as examples:\n dubbo-2.7.x with JVM-container jetty with JVM-container gateway with runningMode canal with docker-compose  expectedData.yaml Operator for number\n   Operator Description     nq Not equal   eq Equal(default)   ge Greater than or equal   gt Greater than    Operator for String\n   Operator Description     not null Not null   not blank Not blank ,it\u0026rsquo;s recommended for String type field as the default value maybe blank string, such as span tags   null Null or empty String   eq Equal(default)   start with Tests if this string starts with the specified prefix. DO NOT use it with meterItem tags value   end with Tests if this string ends with the specified suffix. DO NOT use it with meterItem tags value    Expected Data Format Of The Segment\nsegmentItems:- serviceName:SERVICE_NAME(string)segmentSize:SEGMENT_SIZE(int)segments:- segmentId:SEGMENT_ID(string)spans:...   Field Description     serviceName Service Name.   segmentSize The number of segments is expected.   segmentId Trace ID.   spans Segment span list. In the next section, you will learn how to describe each span.    Expected Data Format Of The Span\nNote: The order of span list should follow the order of the span finish time.\noperationName:OPERATION_NAME(string)parentSpanId:PARENT_SPAN_ID(int)spanId:SPAN_ID(int)startTime:START_TIME(int)endTime:END_TIME(int)isError: IS_ERROR(string:true,false)spanLayer: SPAN_LAYER(string:DB, RPC_FRAMEWORK, HTTP, MQ, CACHE)spanType: SPAN_TYPE(string:Exit, Entry, Local)componentId:COMPONENT_ID(int)tags:- {key: TAG_KEY(string), value:TAG_VALUE(string)}...logs:- {key: LOG_KEY(string), value:LOG_VALUE(string)}...peer:PEER(string)refs:- {traceId:TRACE_ID(string),parentTraceSegmentId:PARENT_TRACE_SEGMENT_ID(string),parentSpanId:PARENT_SPAN_ID(int),parentService:PARENT_SERVICE(string),parentServiceInstance:PARENT_SERVICE_INSTANCE(string),parentEndpoint:PARENT_ENDPOINT_NAME(string),networkAddress:NETWORK_ADDRESS(string),refType: REF_TYPE(string:CrossProcess, CrossThread)}...   Field Description     operationName Span Operation Name.   parentSpanId Parent span ID. Note: The parent span ID of the first span should be -1.   spanId Span ID. Note: Start from 0.   startTime Span start time. It is impossible to get the accurate time, not 0 should be enough.   endTime Span finish time. It is impossible to get the accurate time, not 0 should be enough.   isError Span status, true or false.   componentId Component id for your plugin.   tags Span tag list. Notice, Keep in the same order as the plugin coded.   logs Span log list. Notice, Keep in the same order as the plugin coded.   SpanLayer Options, DB, RPC_FRAMEWORK, HTTP, MQ, CACHE.   SpanType Span type, options, Exit, Entry or Local.   peer Remote network address, IP + port mostly. For exit span, this should be required.    The verify description for SegmentRef\n   Field Description     traceId    parentTraceSegmentId Parent SegmentId, pointing to the segment id in the parent segment.   parentSpanId Parent SpanID, pointing to the span id in the parent segment.   parentService The service of parent/downstream service name.   parentServiceInstance The instance of parent/downstream service instance name.   parentEndpoint The endpoint of parent/downstream service.   networkAddress The peer value of parent exit span.   refType Ref type, options, CrossProcess or CrossThread.    Expected Data Format Of The Meter Items\nmeterItems:- serviceName:SERVICE_NAME(string)meterSize:METER_SIZE(int)meters:- ...   Field Description     serviceName Service Name.   meterSize The number of meters is expected.   meters meter list. Follow the next section to see how to describe every meter.    Expected Data Format Of The Meter\nmeterId:name:NAME(string)tags:- {name: TAG_NAME(string), value:TAG_VALUE(string)}singleValue:SINGLE_VALUE(double)histogramBuckets:- HISTOGRAM_BUCKET(double)...The verify description for MeterId\n   Field Description     name meter name.   tags meter tags.   tags.name tag name.   tags.value tag value.   singleValue counter or gauge value. Using condition operate of the number to validate, such as gt, ge. If current meter is histogram, don\u0026rsquo;t need to write this field.   histogramBuckets histogram bucket. The bucket list must be ordered. The tool assert at least one bucket of the histogram having nonzero count. If current meter is counter or gauge, don\u0026rsquo;t need to write this field.    Expected Data Format Of The Log Items\nlogItems:- serviceName:SERVICE_NAME(string)logSize:LOG_SIZE(int)logs:- ...   Field Description     serviceName Service Name.   logSize The number of logs is expected.   logs log list. Follow the next section to see how to describe every log.    Expected Data Format Of The Log\ntimestamp:TIMESTAMP_VALUE(int)endpoint:ENDPOINT_VALUE(int)traceContext:traceId:TRACE_ID_VALUE(string)traceSegmentId:TRACE_SEGMENT_ID_VALUE(string)spanId:SPAN_ID_VALUE(int)body:type:TYPE_VALUE(string)content:# Choose one of three (text, json or yaml)text:TEXT_VALUE(string)# json: JSON_VALUE(string)# yaml: YAML_VALUE(string)tags:data:- key:TAG_KEY(string)value:TAG_VALUE(string)...layer:LAYER_VALUE(string)...The verify description for Log\n   Field Description     timestamp log timestamp.   endpoint log endpoint.   traceContext.traceId log associated trace id.   traceContext.traceSegmentId log associated trace segment id.   traceContext.spanId log associated span id.   body.type log body type.   body.content log content, the sub field choose one of three (text, json or yaml).   tags.data log tags, key value pairs.   layer log layer.    startup.sh This script provide a start point to JVM based service, most of them starts by a java -jar, with some variables. The following system environment variables are available in the shell.\n   Variable Description     agent_opts Agent plugin opts, check the detail in plugin doc or the same opt added in this PR.   SCENARIO_NAME Service name. Default same as the case folder name   SCENARIO_VERSION Version   SCENARIO_ENTRY_SERVICE Entrance URL to access this service   SCENARIO_HEALTH_CHECK_URL Health check URL     ${agent_opts} is required to add into your java -jar command, which including the parameter injected by test framework, and make agent installed. All other parameters should be added after ${agent_opts}.\n The test framework will set the service name as the test case folder name by default, but in some cases, there are more than one test projects are required to run in different service codes, could set it explicitly like the following example.\nExample\nhome=\u0026#34;$(cd \u0026#34;$(dirname $0)\u0026#34;; pwd)\u0026#34; java -jar ${agent_opts} \u0026#34;-Dskywalking.agent.service_name=jettyserver-scenario\u0026#34; ${home}/../libs/jettyserver-scenario.jar \u0026amp; sleep 1 java -jar ${agent_opts} \u0026#34;-Dskywalking.agent.service_name=jettyclient-scenario\u0026#34; ${home}/../libs/jettyclient-scenario.jar \u0026amp;  Only set this or use other skywalking options when it is really necessary.\n Take the following test cases as examples\n undertow webflux  Best Practices How To Use The Archetype To Create A Test Case Project We provided archetypes and a script to make creating a project easier. It creates a completed project of a test case. So that we only need to focus on cases. First, we can use followed command to get usage about the script.\nbash ${SKYWALKING_HOME}/test/plugin/generator.sh\nThen, runs and generates a project, named by scenario_name, in ./scenarios.\nRecommendations for pom \u0026lt;properties\u0026gt; \u0026lt;!-- Provide and use this property in the pom. --\u0026gt; \u0026lt;!-- This version should match the library version, --\u0026gt; \u0026lt;!-- in this case, http components lib version 4.3. --\u0026gt; \u0026lt;test.framework.version\u0026gt;4.3\u0026lt;/test.framework.version\u0026gt; \u0026lt;/properties\u0026gt; \u0026lt;dependencies\u0026gt; \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.httpcomponents\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;httpclient\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${test.framework.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; ... \u0026lt;/dependencies\u0026gt; \u0026lt;build\u0026gt; \u0026lt;!-- Set the package final name as same as the test case folder case. --\u0026gt; \u0026lt;finalName\u0026gt;httpclient-4.3.x-scenario\u0026lt;/finalName\u0026gt; .... \u0026lt;/build\u0026gt; How To Implement Heartbeat Service Heartbeat service is designed for checking the service available status. This service is a simple HTTP service, returning 200 means the target service is ready. Then the traffic generator will access the entry service and verify the expected data. User should consider to use this service to detect such as whether the dependent services are ready, especially when dependent services are database or cluster.\nNotice, because heartbeat service could be traced fully or partially, so, segmentSize in expectedData.yaml should use ge as the operator, and don\u0026rsquo;t include the segments of heartbeat service in the expected segment data.\nThe example Process of Writing Tracing Expected Data Expected data file, expectedData.yaml, include SegmentItems part.\nWe are using the HttpClient plugin to show how to write the expected data.\nThere are two key points of testing\n Whether is HttpClient span created. Whether the ContextCarrier created correctly, and propagates across processes.  +-------------+ +------------------+ +-------------------------+ | Browser | | Case Servlet | | ContextPropagateServlet | | | | | | | +-----|-------+ +---------|--------+ +------------|------------+ | | | | | | | WebHttp +-+ | +------------------------\u0026gt; |-| HttpClient +-+ | |--------------------------------\u0026gt; |-| | |-| |-| | |-| |-| | |-| \u0026lt;--------------------------------| | |-| +-+ | \u0026lt;--------------------------| | | +-+ | | | | | | | | | | | | | + + + segmentItems By following the flow of HttpClient case, there should be two segments created.\n Segment represents the CaseServlet access. Let\u0026rsquo;s name it as SegmentA. Segment represents the ContextPropagateServlet access. Let\u0026rsquo;s name it as SegmentB.  segmentItems:- serviceName:httpclient-casesegmentSize:ge 2# Could have more than one health check segments, because, the dependency is not standby.Because Tomcat plugin is a default plugin of SkyWalking, so, in SegmentA, there are two spans\n Tomcat entry span HttpClient exit span  SegmentA span list should like following\n- segmentId:not nullspans:- operationName:/httpclient-case/case/context-propagateparentSpanId:0spanId:1startTime:nq 0endTime:nq 0isError:falsespanLayer:HttpspanType:ExitcomponentId:eq 2tags:- {key: url, value:\u0026#39;http://127.0.0.1:8080/httpclient-case/case/context-propagate\u0026#39;}- {key: http.method, value:GET}- {key: http.status_code, value:\u0026#39;200\u0026#39;}logs:[]peer:127.0.0.1:8080- operationName:/httpclient-case/case/httpclientparentSpanId:-1spanId:0startTime:nq 0endTime:nq 0spanLayer:HttpisError:falsespanType:EntrycomponentId:1tags:- {key: url, value:\u0026#39;http://localhost:{SERVER_OUTPUT_PORT}/httpclient-case/case/httpclient\u0026#39;}- {key: http.method, value:GET}- {key: http.status_code, value:\u0026#39;200\u0026#39;}logs:[]peer:nullSegmentB should only have one Tomcat entry span, but includes the Ref pointing to SegmentA.\nSegmentB span list should like following\n- segmentId:not nullspans:-operationName:/httpclient-case/case/context-propagateparentSpanId:-1spanId:0tags:- {key: url, value:\u0026#39;http://127.0.0.1:8080/httpclient-case/case/context-propagate\u0026#39;}- {key: http.method, value:GET}- {key: http.status_code, value:\u0026#39;200\u0026#39;}logs:[]startTime:nq 0endTime:nq 0spanLayer:HttpisError:falsespanType:EntrycomponentId:1peer:nullrefs:- {parentEndpoint: /httpclient-case/case/httpclient, networkAddress: \u0026#39;localhost:8080\u0026#39;, refType: CrossProcess, parentSpanId: 1, parentTraceSegmentId: not null, parentServiceInstance: not null, parentService: not null, traceId:not null}The example Process of Writing Meter Expected Data Expected data file, expectedData.yaml, include MeterItems part.\nWe are using the toolkit plugin to demonstrate how to write the expected data. When write the meter plugin, the expected data file keeps the same.\nThere is one key point of testing\n Build a meter and operate it.  Such as Counter:\nMeterFactory.counter(\u0026#34;test_counter\u0026#34;).tag(\u0026#34;ck1\u0026#34;, \u0026#34;cv1\u0026#34;).build().increment(1d); MeterFactory.histogram(\u0026#34;test_histogram\u0026#34;).tag(\u0026#34;hk1\u0026#34;, \u0026#34;hv1\u0026#34;).steps(1d, 5d, 10d).build().addValue(2d); +-------------+ +------------------+ | Plugin | | Agent core | | | | | +-----|-------+ +---------|--------+ | | | | | Build or operate +-+ +------------------------\u0026gt; |-| | |-] | |-| | |-| | |-| | |-| | \u0026lt;--------------------------| | +-+ | | | | | | | | + + meterItems By following the flow of the toolkit case, there should be two meters created.\n Meter test_counter created from MeterFactory#counter. Let\u0026rsquo;s name it as MeterA. Meter test_histogram created from MeterFactory#histogram. Let\u0026rsquo;s name it as MeterB.  meterItems:- serviceName:toolkit-casemeterSize:2They\u0026rsquo;re showing two kinds of meter, MeterA has a single value, MeterB has a histogram value.\nMeterA should like following, counter and gauge use the same data format.\n- meterId:name:test_countertags:- {name: ck1, value:cv1}singleValue:gt 0MeterB should like following.\n- meterId:name:test_histogramtags:- {name: hk1, value:hv1}histogramBuckets:- 0.0- 1.0- 5.0- 10.0Local Test and Pull Request To The Upstream First of all, the test case project could be compiled successfully, with right project structure and be able to deploy. The developer should test the start script could run in Linux/MacOS, and entryService/health services are able to provide the response.\nYou could run test by using following commands\ncd ${SKYWALKING_HOME} bash ./test/plugin/run.sh -f ${scenario_name} Notice,if codes in ./apm-sniffer have been changed, no matter because your change or git update, please recompile the skywalking-agent. Because the test framework will use the existing skywalking-agent folder, rather than recompiling it every time.\nUse ${SKYWALKING_HOME}/test/plugin/run.sh -h to know more command options.\nIf the local test passed, then you could add it to .github/workflows/plugins-test.\u0026lt;n\u0026gt;.yaml file, which will drive the tests running on the GitHub Actions of official SkyWalking repository. Based on your plugin\u0026rsquo;s name, please add the test case into file .github/workflows/plugins-test.\u0026lt;n\u0026gt;.yaml, by alphabetical orders.\nEvery test case is a GitHub Actions Job. Please use the scenario directory name as the case name, mostly you\u0026rsquo;ll just need to decide which file (plugins-test.\u0026lt;n\u0026gt;.yaml) to add your test case, and simply put one line (as follows) in it, take the existed cases as examples. You can run python3 tools/select-group.py to see which file contains the least cases and add your cases into it, in order to balance the running time of each group.\nIf a test case required to run in JDK 17 environment, please add you test case into file plugins-jdk17-test.\u0026lt;n\u0026gt;.yaml. If a test case required to run in JDK 21 environment, please add you test case into file plugins-jdk21-test.\u0026lt;n\u0026gt;.yaml.\njobs:PluginsTest:name:Pluginruns-on:ubuntu-latesttimeout-minutes:90strategy:fail-fast:truematrix:case:# ...- \u0026lt;your scenario test directory name\u0026gt;# ...","excerpt":"Plugin automatic test framework The plugin test framework is designed to verify the function and …","ref":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/plugin-test/","title":"Plugin automatic test framework"},{"body":"Plugin Configurations    key environment key default value description     http.server_collect_parameters SW_AGENT_PLUGIN_CONFIG_HTTP_SERVER_COLLECT_PARAMETERS false Collect the parameters of the HTTP request on the server side.   mongo.collect_statement SW_AGENT_PLUGIN_CONFIG_MONGO_COLLECT_STATEMENT false Collect the statement of the MongoDB request.   sql.collect_parameter SW_AGENT_PLUGIN_CONFIG_SQL_COLLECT_PARAMETER false Collect the parameter of the SQL request.   redis.max_args_bytes SW_AGENT_PLUGIN_CONFIG_REDIS_MAX_ARGS_BYTES 1024 Limit the bytes size of redis args request.   reporter.discard SW_AGENT_REPORTER_DISCARD false Discard the reporter.    ","excerpt":"Plugin Configurations    key environment key default value description …","ref":"/docs/skywalking-go/latest/en/agent/plugin-configurations/","title":"Plugin Configurations"},{"body":"Plugin Configurations    key environment key default value description     http.server_collect_parameters SW_AGENT_PLUGIN_CONFIG_HTTP_SERVER_COLLECT_PARAMETERS false Collect the parameters of the HTTP request on the server side.   mongo.collect_statement SW_AGENT_PLUGIN_CONFIG_MONGO_COLLECT_STATEMENT false Collect the statement of the MongoDB request.   sql.collect_parameter SW_AGENT_PLUGIN_CONFIG_SQL_COLLECT_PARAMETER false Collect the parameter of the SQL request.   redis.max_args_bytes SW_AGENT_PLUGIN_CONFIG_REDIS_MAX_ARGS_BYTES 1024 Limit the bytes size of redis args request.   reporter.discard SW_AGENT_REPORTER_DISCARD false Discard the reporter.   gin.collect_request_headers SW_AGENT_PLUGIN_CONFIG_GIN_COLLECT_REQUEST_HEADERS  Collect the http header of gin request.   gin.header_length_threshold SW_AGENT_PLUGIN_CONFIG_GIN_HEADER_LENGTH_THRESHOLD 2048 Controlling the length limitation of all header values.    ","excerpt":"Plugin Configurations    key environment key default value description …","ref":"/docs/skywalking-go/next/en/agent/plugin-configurations/","title":"Plugin Configurations"},{"body":"Plugin Configurations    key environment key default value description     http.server_collect_parameters SW_AGENT_PLUGIN_CONFIG_HTTP_SERVER_COLLECT_PARAMETERS false Collect the parameters of the HTTP request on the server side.   mongo.collect_statement SW_AGENT_PLUGIN_CONFIG_MONGO_COLLECT_STATEMENT false Collect the statement of the MongoDB request.   sql.collect_parameter SW_AGENT_PLUGIN_CONFIG_SQL_COLLECT_PARAMETER false Collect the parameter of the SQL request.   redis.max_args_bytes SW_AGENT_PLUGIN_CONFIG_REDIS_MAX_ARGS_BYTES 1024 Limit the bytes size of redis args request.   reporter.discard SW_AGENT_REPORTER_DISCARD false Discard the reporter.    ","excerpt":"Plugin Configurations    key environment key default value description …","ref":"/docs/skywalking-go/v0.4.0/en/agent/plugin-configurations/","title":"Plugin Configurations"},{"body":"Plugin Development Guide This documentation introduces how developers can create a plugin.\nAll plugins must follow these steps:\n Create a new plugin module: Create a new project in the specified directory and import the plugin API module. Define the enhancement object: Define the description for the plugin. Invoke the plugin API: Call the API provided by the core to complete the core invocation. Import the plugin module: Import the plugin into the management module for users to use.  Create a new plugin module The plugin must create a new module, which is currently stored in the project\u0026rsquo;s plugins directory.\nPlugins can import the following two modules:\n Agent core: This module provides all the dependencies needed for the plugin, including the plugin API, enhancement declaration objects, etc. Agent core plugin should be github.com/apache/skywalking-go/plugins/core and replaced by the relative location. Framework to be enhanced: Import the framework you wish to enhance.  Note: Plugins should NOT import and use any other modules, as this may cause compilation issues for users. If certain tools are needed, they should be provided by the agent core.\nDefine the enhancement object In the root directory of the project, create a new go file to define the basic information of the plugin. The basic information includes the following methods, corresponding to the Instrument interface:\n Name: The name of the plugin. Please keep this name consistent with the newly created project name. The reason will be explained later. Base Package: Declare which package this plugin intercepts. For example, if you want to intercept gin, you can write: \u0026ldquo;github.com/gin-gonic/gin\u0026rdquo;. Version Checker: This method passes the version number to the enhancement object to verify whether the specified version of the framework is supported. If not, the enhancement program will not be executed. Points: A plugin can define one or more enhancement points. This will be explained in more detail in the following sections. File System: Use //go:embed * in the current file to import all files in this module, which will be used for file copying during the mixed compilation process.  Note: Please declare //skywalking:nocopy at any position in this file to indicate that the file would not be copied. This file is only used for guidance during hybrid compilation. Also, this file involves the use of the embed package, and if the target framework does not import the package embed, a compilation error may occur.\nManage Instrument and Interceptor codes in hierarchy structure Instrument and interceptor codes are placed in root by default. In complex instrumentation scenarios, there could be dozens of interceptors, we provide PluginSourceCodePath to build a hierarchy folder structure to manage those codes.\nNotice: The instrumentation still works without proper setting of this, but the debug tool would lose the location of the source codes.\nExample For example, the framework needs to enhance two packages, as shown in the following directory structure:\n- plugins - test - go.mod - package1 - instrument.go - interceptor.go - package2 - instrument.go - interceptor.go ... In the above directory structure, the test framework needs to provide multiple different enhancement objects. In this case, a PluginSourceCodePath Source Code Path** method needs to be added for each enhancement object, the values of this method should be package1 and package2.\nInstrument Point Instrument points are used to declare that which methods and structs in the current package should be instrumented. They mainly include the following information:\n Package path: If the interception point that needs to be intercepted is not in the root directory of the current package, you need to fill in the relative path to the package. For example, if this interception point wants to instrument content in the github.com/gin-gonic/gin/render directory, you need to fill in render here. Package Name(optional): Define the package name of the current package. If the package name is not defined, the package name of the current package would be used by default. It\u0026rsquo;s used when the package path and package name are not same, such as the name of github.com/emicklei/go-restful/v3 is restful. Matcher(At): Specify which eligible content in the current package path needs to be enhanced. Interceptor: If the current method is being intercepted (whether it\u0026rsquo;s a static method or an instance method), the name of the interceptor must be specified.  Method Matcher Method matchers are used to intercept both static and non-static methods. The specific definitions are as follows:\n// NewStaticMethodEnhance creates a new EnhanceMatcher for static method. // name: method name needs to be enhanced.(Public and private methods are supported) // filters: filters for method. func NewStaticMethodEnhance(name string, filters ...MethodFilterOption) // NewMethodEnhance creates a new EnhanceMatcher for method. // receiver: receiver type name of method needs to be enhanced. // name: method name needs to be enhanced.(Public and private methods are supported) // filters: filters for method. func NewMethodEnhance(receiver, name string, filters ...MethodFilterOption) Filter Option Filter Options are used to validate the parameters or return values in the method. If the method name matches but the Options validation fails, the enhancement would not be performed.\n// WithArgsCount filter methods with specific count of arguments. func WithArgsCount(argsCount int) // WithResultCount filter methods with specific count of results. func WithResultCount(resultCount int) // WithArgType filter methods with specific type of the index of the argument. func WithArgType(argIndex int, dataType string) // WithResultType filter methods with specific type of the index of the result. func WithResultType(argIndex int, dataType string) Demo For example, if you have the following method that needs to be intercepted:\nfunc (c *Context) HandleMethod(name string) bool you can describe it using this condition:\ninstrument.NewMethodEnhance(\u0026#34;*Context\u0026#34;, \u0026#34;HandleMethod\u0026#34;, instrument.WithArgsCount(1), instrument.WithArgType(0, \u0026#34;string\u0026#34;), instrument.WithResultCount(1), instrument.WithResultType(0, \u0026#34;bool\u0026#34;)) Struct Matcher Enhancement structures can embed enhanced fields within specified structs. After the struct is instantiated, custom data content can be added to the specified struct in the method interceptor.\nStruct matchers are used to intercept struct methods. The specific definitions are as follows:\n// NewStructEnhance creates a new EnhanceMatcher for struct. // name: struct name needs to be enhanced.(Public and private structs are supported) // filters: filters for struct. func NewStructEnhance(name string, filters ...StructFilterOption) Filter Option Filter Options are used to validate the fields in the structure.\n// WithFieldExists filter the struct has the field with specific name. func WithFieldExists(fieldName string) // WithFiledType filter the struct has the field with specific name and type. func WithFiledType(filedName, filedType string) Enhanced Instance After completing the definition of the struct enhancement, you can convert the specified instance into the following interface when intercepting methods, and get or set custom field information. The interface definition is as follows:\ntype EnhancedInstance interface { // GetSkyWalkingDynamicField get the customized data from instance \tGetSkyWalkingDynamicField() interface{} // SetSkyWalkingDynamicField set the customized data into the instance \tSetSkyWalkingDynamicField(interface{}) } Demo For example, if you have the following struct that needs to be enhanced:\ntype Test struct { value *Context } you can describe it using this condition:\ninstrument.NewStructEnhance(\u0026#34;Test\u0026#34;, instrument.WithFieldExists(\u0026#34;value\u0026#34;), instrument.WithFiledType(\u0026#34;value\u0026#34;, \u0026#34;*Context\u0026#34;)) Next, you can set custom content for the specified enhanced instance when intercepting methods.\nins := testInstance.(instrument.EnhancedInstance) // setting custom content ins.SetSkyWalkingDynamicField(\u0026#34;custom content\u0026#34;) // getting custom content res := ins.GetSkyWalkingDynamicField() Interceptor Interceptors are used to define custom business logic before and after method execution, allowing you to access data from before and after method execution and interact with the Agent Core by using the Agent API.\nThe interceptor definition is as follows, you need to create a new structure and implement it:\ntype Interceptor interface { // BeforeInvoke would be called before the target method invocation.  BeforeInvoke(invocation Invocation) error // AfterInvoke would be called after the target method invocation.  AfterInvoke(invocation Invocation, result ...interface{}) error } Within the interface, you can see the Invocation interface, which defines the context of an interception. The specific definition is as follows:\ntype Invocation interface { // CallerInstance is the instance of the caller, nil if the method is static method.  CallerInstance() interface{} // Args is get the arguments of the method, please cast to the specific type to get more information.  Args() []interface{} // ChangeArg is change the argument value of the method  ChangeArg(int, interface{}) // IsContinue is the flag to control the method invocation, if it is true, the target method would not be invoked.  IsContinue() bool // DefineReturnValues are defined the return value of the method, and continue the method invoked  DefineReturnValues(...interface{}) // SetContext is the customized context of the method invocation, it should be propagated the tracing span.  SetContext(interface{}) // GetContext is get the customized context of the method invocation  GetContext() interface{} } Thread safe The Interceptor instance would define new instance at the current package level, rather than creating a new instance each time a method is intercepted.\nTherefore, do not declare objects in the interceptor, and instead use Invocation.Context to pass data.\nPackage Path If the method you want to intercept is not located in the root directory of the framework, place your interceptor code in the relative location within the plugin. The Agent would only copy files from the same package directory.\nFor example, if you want to intercept a method in github.com/gin-gonic/gin/render, create a render directory in the root of your plugin, and put the interceptor inside it. This ensures that the interceptor is properly included during the copy operation and can be correctly applied to the target package.\nPlugin Configuration Plugin configuration is used to add custom configuration parameters to a specified plugin. When users specify configuration items, the plugin can dynamically adapt the content needed in the plugin according to the user\u0026rsquo;s configuration items.\nDeclaration Please declare the configuration file you need in the package you want to use. Declare it using var, and add the //skywalking:config directive to specify that this variable requires dynamic updating.\nBy default, the configuration item belongs to the configuration of the current plugin. For example, if the name of my current plugin is gin, then this configuration item is under the gin plugin. Of course, you can also change it to the http plugin to reference the configuration information of the relevant plugin, in which case you need to specify it as //skywalking:config http.\nItem Each configuration item needs to add a config tag. This is used to specify the name of the current configuration content. By default, it would lowercase all letters and add an _ identifier before each uppercase letter.\nCurrently, it supports basic data types and struct types, and it also supports obtaining data values through environment variables.\nDemo For example, I have declared the following configuration item:\n//skywalking:config http var config struct { ServerCollectParameters bool `config:\u0026#34;server_collect_parameters\u0026#34;` Client struct{ CollectParameters bool `config:\u0026#34;collect_parameters\u0026#34;` } `config:\u0026#34;client\u0026#34;` } In the above example, I created a plugin configuration for http, which includes two configuration items.\n config.ServerCollectParameters: Its configuration is located at http.server_collect_parameters. config.Client.CollectParameter: Its configuration is located at http.client.collect_parameter.  When the plugin needs to be used, it can be accessed directly by reading the config configuration.\nAgent API The Agent API is used when a method is intercepted and interacts with the Agent Core.\nTracing API The Tracing API is used for building distributed tracing, and currently supports the following methods:\n// CreateEntrySpan creates a new entry span. // operationName is the name of the span. // extractor is the extractor to extract the context from the carrier. // opts is the options to create the span. func CreateEntrySpan(operationName string, extractor Extractor, opts ...SpanOption) // CreateLocalSpan creates a new local span. // operationName is the name of the span. // opts is the options to create the span. func CreateLocalSpan(operationName string, opts ...SpanOption) // CreateExitSpan creates a new exit span. // operationName is the name of the span. // peer is the peer address of the span. // injector is the injector to inject the context into the carrier. // opts is the options to create the span. func CreateExitSpan(operationName, peer string, injector Injector, opts ...SpanOption) // ActiveSpan returns the current active span, it can be got the current span in the current goroutine. // If the current goroutine is not in the context of the span, it will return nil. // If get the span from other goroutine, it can only get information but cannot be operated. func ActiveSpan() // GetRuntimeContextValue returns the value of the key in the runtime context, which is current goroutine. // The value can also read from the goroutine which is created by the current goroutine func GetRuntimeContextValue(key string) // SetRuntimeContextValue sets the value of the key in the runtime context. func SetRuntimeContextValue(key string, val interface{}) Context Carrier The context carrier is used to pass the context between the difference application.\nWhen creating an Entry Span, you need to obtain the context carrier from the request. When creating an Exit Span, you need to write the context carrier into the target RPC request.\n// Extractor is a tool specification which define how to // extract trace parent context from propagation context type Extractor func(headerKey string) (string, error) // Injector is a tool specification which define how to // inject trace context into propagation context type Injector func(headerKey, headerValue string) error The following demo demonstrates how to pass the Context Carrier in the Tracing API:\n// create a new entry span and extract the context carrier from the request tracing.CreateEntrySpan(fmt.Sprintf(\u0026#34;%s:%s\u0026#34;, request.Method, request.URL.Path), func(headerKey string) (string, error) { return request.Header.Get(headerKey), nil }) // create a new exit span and inject the context carrier into the request tracing.CreateExitSpan(fmt.Sprintf(\u0026#34;%s:%s\u0026#34;, request.Method, request.URL.Path), request.Host, func(headerKey, headerValue string) error { request.Header.Add(headerKey, headerValue) return nil } Span Option Span Options can be passed when creating a Span to configure the information in the Span.\nThe following options are currently supported:\n// WithLayer set the SpanLayer of the Span func WithLayer(layer SpanLayer) // WithComponent set the component id of the Span func WithComponent(componentID int32) // WithTag set the Tag of the Span func WithTag(key Tag, value string) Span Component The Component ID in Span is used to identify the current component, with its data defined in SkyWalking OAP. If the framework you are writing does not exist in this file, please submit a PR in the SkyWalking project to add the definition of this plugin.\nSpan Operation After creating a Span, you can perform additional operations on it.\n// Span for plugin API type Span interface { // AsyncSpan for the async API \tAsyncSpan // Tag set the Tag of the Span \tTag(Tag, string) // SetSpanLayer set the SpanLayer of the Span \tSetSpanLayer(SpanLayer) // SetOperationName re-set the operation name of the Span \tSetOperationName(string) // SetPeer re-set the peer address of the Span \tSetPeer(string) // Log add log to the Span \tLog(...string) // Error add error log to the Span \tError(...string) // End end the Span \tEnd() } Async Span There is a set of advanced APIs in Span which is specifically designed for async use cases. When setting name, tags, logs, and other operations (including end span) of the span in another goroutine, you should use these APIs.\ntype AsyncSpan interface { // PrepareAsync the span finished at current tracing context, but current span is still alive until AsyncFinish called  PrepareAsync() // AsyncFinish to finished current async span  AsyncFinish() } Following the previous API define, you should following these steps to use the async API:\n Call span.PrepareAsync() to prepare the span to do any operation in another goroutine. Use Span.End() in the original goroutine when your job in the current goroutine is complete. Propagate the span to any other goroutine in your plugin. Once the above steps are all set, call span.AsyncFinish() in any goroutine. When the span.AsyncFinish() is complete for all spans, the all spans would be finished and report to the backend.  Tracing Context Operation In the Go Agent, Trace Context would continue cross goroutines automatically by default. However, in some cases, goroutine would be context sharing due to be scheduled by the pool mechanism. Consider these advanced APIs to manipulate context and switch the current context.\n// CaptureContext capture current tracing context in the current goroutine. func CaptureContext() ContextSnapshot // ContinueContext continue the tracing context in the current goroutine. func ContinueContext(ctx ContextSnapshot) // CleanContext clean the tracing context in the current goroutine. func CleanContext() Typically, use APIs as following to control or switch the context:\n Use tracing.CaptureContext() to get the ContextSnapshot object. Propagate the snapshot context to any other goroutine in your plugin. Use tracing.ContinueContext(snapshot) to continue the snapshot context in the target goroutine.  Meter API The Meter API is used to record the metrics of the target program, and currently supports the following methods:\n// NewCounter creates a new counter metrics. // name is the name of the metrics // opts is the options for the metrics func NewCounter(name string, opts ...Opt) Counter // NewGauge creates a new gauge metrics. // name is the name of the metrics // getter is the function to get the value of the gauge meter // opts is the options for the metrics func NewGauge(name string, getter func() float64, opts ...Opt) Gauge // NewHistogram creates a new histogram metrics. // name is the name of the metrics // steps is the buckets of the histogram // opts is the options for the metrics func NewHistogram(name string, steps []float64, opts ...Opt) Histogram // NewHistogramWithMinValue creates a new histogram metrics. // name is the name of the metrics // minVal is the min value of the histogram bucket // steps is the buckets of the histogram // opts is the options for the metrics func NewHistogramWithMinValue(name string, minVal float64, steps []float64, opts ...Opt) Histogram // RegisterBeforeCollectHook registers a hook function which will be called before metrics collect. func RegisterBeforeCollectHook(f func()) Meter Option The Meter Options can be passed when creating a Meter to configure the information in the Meter.\n// WithLabel adds a label to the metrics. func WithLabel(key, value string) Opt Meter Type Counter Counter is a cumulative metric that represents a single monotonically increasing counter whose value can only increase.\ntype Counter interface { // Get returns the current value of the counter. \tGet() float64 // Inc increments the counter with value. \tInc(val float64) } Gauge Gauge is a metric that represents a single numerical value that can arbitrarily go up and down.\ntype Gauge interface { // Get returns the current value of the gauge.  Get() float64 } Histogram Histogram is a metric that represents the distribution of a set of values.\ntype Histogram interface { // Observe find the value associate bucket and add 1. \tObserve(val float64) // ObserveWithCount find the value associate bucket and add specific count. \tObserveWithCount(val float64, count int64) } Import Plugin Once you have finished developing the plugin, you need to import the completed module into the Agent program and define it in the corresponding file.\nAt this point, your plugin development process is complete. When the Agent performs hybrid compilation on the target program, your plugin will be executed as expected.\n","excerpt":"Plugin Development Guide This documentation introduces how developers can create a plugin.\nAll …","ref":"/docs/skywalking-go/latest/en/development-and-contribution/development-guide/","title":"Plugin Development Guide"},{"body":"Plugin Development Guide This documentation introduces how developers can create a plugin.\nAll plugins must follow these steps:\n Create a new plugin module: Create a new project in the specified directory and import the plugin API module. Define the enhancement object: Define the description for the plugin. Invoke the plugin API: Call the API provided by the core to complete the core invocation. Import the plugin module: Import the plugin into the management module for users to use.  Create a new plugin module The plugin must create a new module, which is currently stored in the project\u0026rsquo;s plugins directory.\nPlugins can import the following two modules:\n Agent core: This module provides all the dependencies needed for the plugin, including the plugin API, enhancement declaration objects, etc. Agent core plugin should be github.com/apache/skywalking-go/plugins/core and replaced by the relative location. Framework to be enhanced: Import the framework you wish to enhance.  Note: Plugins should NOT import and use any other modules, as this may cause compilation issues for users. If certain tools are needed, they should be provided by the agent core.\nDefine the enhancement object In the root directory of the project, create a new go file to define the basic information of the plugin. The basic information includes the following methods, corresponding to the Instrument interface:\n Name: The name of the plugin. Please keep this name consistent with the newly created project name. The reason will be explained later. Base Package: Declare which package this plugin intercepts. For example, if you want to intercept gin, you can write: \u0026ldquo;github.com/gin-gonic/gin\u0026rdquo;. Version Checker: This method passes the version number to the enhancement object to verify whether the specified version of the framework is supported. If not, the enhancement program will not be executed. Points: A plugin can define one or more enhancement points. This will be explained in more detail in the following sections. File System: Use //go:embed * in the current file to import all files in this module, which will be used for file copying during the mixed compilation process.  Note: Please declare //skywalking:nocopy at any position in this file to indicate that the file would not be copied. This file is only used for guidance during hybrid compilation. Also, this file involves the use of the embed package, and if the target framework does not import the package embed, a compilation error may occur.\nManage Instrument and Interceptor codes in hierarchy structure Instrument and interceptor codes are placed in root by default. In complex instrumentation scenarios, there could be dozens of interceptors, we provide PluginSourceCodePath to build a hierarchy folder structure to manage those codes.\nNotice: The instrumentation still works without proper setting of this, but the debug tool would lose the location of the source codes.\nExample For example, the framework needs to enhance two packages, as shown in the following directory structure:\n- plugins - test - go.mod - package1 - instrument.go - interceptor.go - package2 - instrument.go - interceptor.go ... In the above directory structure, the test framework needs to provide multiple different enhancement objects. In this case, a PluginSourceCodePath Source Code Path** method needs to be added for each enhancement object, the values of this method should be package1 and package2.\nInstrument Point Instrument points are used to declare that which methods and structs in the current package should be instrumented. They mainly include the following information:\n Package path: If the interception point that needs to be intercepted is not in the root directory of the current package, you need to fill in the relative path to the package. For example, if this interception point wants to instrument content in the github.com/gin-gonic/gin/render directory, you need to fill in render here. Package Name(optional): Define the package name of the current package. If the package name is not defined, the package name of the current package would be used by default. It\u0026rsquo;s used when the package path and package name are not same, such as the name of github.com/emicklei/go-restful/v3 is restful. Matcher(At): Specify which eligible content in the current package path needs to be enhanced. Interceptor: If the current method is being intercepted (whether it\u0026rsquo;s a static method or an instance method), the name of the interceptor must be specified.  Method Matcher Method matchers are used to intercept both static and non-static methods. The specific definitions are as follows:\n// NewStaticMethodEnhance creates a new EnhanceMatcher for static method. // name: method name needs to be enhanced.(Public and private methods are supported) // filters: filters for method. func NewStaticMethodEnhance(name string, filters ...MethodFilterOption) // NewMethodEnhance creates a new EnhanceMatcher for method. // receiver: receiver type name of method needs to be enhanced. // name: method name needs to be enhanced.(Public and private methods are supported) // filters: filters for method. func NewMethodEnhance(receiver, name string, filters ...MethodFilterOption) Filter Option Filter Options are used to validate the parameters or return values in the method. If the method name matches but the Options validation fails, the enhancement would not be performed.\n// WithArgsCount filter methods with specific count of arguments. func WithArgsCount(argsCount int) // WithResultCount filter methods with specific count of results. func WithResultCount(resultCount int) // WithArgType filter methods with specific type of the index of the argument. func WithArgType(argIndex int, dataType string) // WithResultType filter methods with specific type of the index of the result. func WithResultType(argIndex int, dataType string) Demo For example, if you have the following method that needs to be intercepted:\nfunc (c *Context) HandleMethod(name string) bool you can describe it using this condition:\ninstrument.NewMethodEnhance(\u0026#34;*Context\u0026#34;, \u0026#34;HandleMethod\u0026#34;, instrument.WithArgsCount(1), instrument.WithArgType(0, \u0026#34;string\u0026#34;), instrument.WithResultCount(1), instrument.WithResultType(0, \u0026#34;bool\u0026#34;)) Struct Matcher Enhancement structures can embed enhanced fields within specified structs. After the struct is instantiated, custom data content can be added to the specified struct in the method interceptor.\nStruct matchers are used to intercept struct methods. The specific definitions are as follows:\n// NewStructEnhance creates a new EnhanceMatcher for struct. // name: struct name needs to be enhanced.(Public and private structs are supported) // filters: filters for struct. func NewStructEnhance(name string, filters ...StructFilterOption) Filter Option Filter Options are used to validate the fields in the structure.\n// WithFieldExists filter the struct has the field with specific name. func WithFieldExists(fieldName string) // WithFiledType filter the struct has the field with specific name and type. func WithFiledType(filedName, filedType string) Enhanced Instance After completing the definition of the struct enhancement, you can convert the specified instance into the following interface when intercepting methods, and get or set custom field information. The interface definition is as follows:\ntype EnhancedInstance interface { // GetSkyWalkingDynamicField get the customized data from instance \tGetSkyWalkingDynamicField() interface{} // SetSkyWalkingDynamicField set the customized data into the instance \tSetSkyWalkingDynamicField(interface{}) } Demo For example, if you have the following struct that needs to be enhanced:\ntype Test struct { value *Context } you can describe it using this condition:\ninstrument.NewStructEnhance(\u0026#34;Test\u0026#34;, instrument.WithFieldExists(\u0026#34;value\u0026#34;), instrument.WithFiledType(\u0026#34;value\u0026#34;, \u0026#34;*Context\u0026#34;)) Next, you can set custom content for the specified enhanced instance when intercepting methods.\nins := testInstance.(instrument.EnhancedInstance) // setting custom content ins.SetSkyWalkingDynamicField(\u0026#34;custom content\u0026#34;) // getting custom content res := ins.GetSkyWalkingDynamicField() Interceptor Interceptors are used to define custom business logic before and after method execution, allowing you to access data from before and after method execution and interact with the Agent Core by using the Agent API.\nThe interceptor definition is as follows, you need to create a new structure and implement it:\ntype Interceptor interface { // BeforeInvoke would be called before the target method invocation.  BeforeInvoke(invocation Invocation) error // AfterInvoke would be called after the target method invocation.  AfterInvoke(invocation Invocation, result ...interface{}) error } Within the interface, you can see the Invocation interface, which defines the context of an interception. The specific definition is as follows:\ntype Invocation interface { // CallerInstance is the instance of the caller, nil if the method is static method.  CallerInstance() interface{} // Args is get the arguments of the method, please cast to the specific type to get more information.  Args() []interface{} // ChangeArg is change the argument value of the method  ChangeArg(int, interface{}) // IsContinue is the flag to control the method invocation, if it is true, the target method would not be invoked.  IsContinue() bool // DefineReturnValues are defined the return value of the method, and continue the method invoked  DefineReturnValues(...interface{}) // SetContext is the customized context of the method invocation, it should be propagated the tracing span.  SetContext(interface{}) // GetContext is get the customized context of the method invocation  GetContext() interface{} } Thread safe The Interceptor instance would define new instance at the current package level, rather than creating a new instance each time a method is intercepted.\nTherefore, do not declare objects in the interceptor, and instead use Invocation.Context to pass data.\nPackage Path If the method you want to intercept is not located in the root directory of the framework, place your interceptor code in the relative location within the plugin. The Agent would only copy files from the same package directory.\nFor example, if you want to intercept a method in github.com/gin-gonic/gin/render, create a render directory in the root of your plugin, and put the interceptor inside it. This ensures that the interceptor is properly included during the copy operation and can be correctly applied to the target package.\nPlugin Configuration Plugin configuration is used to add custom configuration parameters to a specified plugin. When users specify configuration items, the plugin can dynamically adapt the content needed in the plugin according to the user\u0026rsquo;s configuration items.\nDeclaration Please declare the configuration file you need in the package you want to use. Declare it using var, and add the //skywalking:config directive to specify that this variable requires dynamic updating.\nBy default, the configuration item belongs to the configuration of the current plugin. For example, if the name of my current plugin is gin, then this configuration item is under the gin plugin. Of course, you can also change it to the http plugin to reference the configuration information of the relevant plugin, in which case you need to specify it as //skywalking:config http.\nItem Each configuration item needs to add a config tag. This is used to specify the name of the current configuration content. By default, it would lowercase all letters and add an _ identifier before each uppercase letter.\nCurrently, it supports basic data types and struct types, and it also supports obtaining data values through environment variables.\nDemo For example, I have declared the following configuration item:\n//skywalking:config http var config struct { ServerCollectParameters bool `config:\u0026#34;server_collect_parameters\u0026#34;` Client struct{ CollectParameters bool `config:\u0026#34;collect_parameters\u0026#34;` } `config:\u0026#34;client\u0026#34;` } In the above example, I created a plugin configuration for http, which includes two configuration items.\n config.ServerCollectParameters: Its configuration is located at http.server_collect_parameters. config.Client.CollectParameter: Its configuration is located at http.client.collect_parameter.  When the plugin needs to be used, it can be accessed directly by reading the config configuration.\nAgent API The Agent API is used when a method is intercepted and interacts with the Agent Core.\nTracing API The Tracing API is used for building distributed tracing, and currently supports the following methods:\n// CreateEntrySpan creates a new entry span. // operationName is the name of the span. // extractor is the extractor to extract the context from the carrier. // opts is the options to create the span. func CreateEntrySpan(operationName string, extractor Extractor, opts ...SpanOption) // CreateLocalSpan creates a new local span. // operationName is the name of the span. // opts is the options to create the span. func CreateLocalSpan(operationName string, opts ...SpanOption) // CreateExitSpan creates a new exit span. // operationName is the name of the span. // peer is the peer address of the span. // injector is the injector to inject the context into the carrier. // opts is the options to create the span. func CreateExitSpan(operationName, peer string, injector Injector, opts ...SpanOption) // ActiveSpan returns the current active span, it can be got the current span in the current goroutine. // If the current goroutine is not in the context of the span, it will return nil. // If get the span from other goroutine, it can only get information but cannot be operated. func ActiveSpan() // GetRuntimeContextValue returns the value of the key in the runtime context, which is current goroutine. // The value can also read from the goroutine which is created by the current goroutine func GetRuntimeContextValue(key string) // SetRuntimeContextValue sets the value of the key in the runtime context. func SetRuntimeContextValue(key string, val interface{}) Context Carrier The context carrier is used to pass the context between the difference application.\nWhen creating an Entry Span, you need to obtain the context carrier from the request. When creating an Exit Span, you need to write the context carrier into the target RPC request.\n// Extractor is a tool specification which define how to // extract trace parent context from propagation context type Extractor func(headerKey string) (string, error) // Injector is a tool specification which define how to // inject trace context into propagation context type Injector func(headerKey, headerValue string) error The following demo demonstrates how to pass the Context Carrier in the Tracing API:\n// create a new entry span and extract the context carrier from the request tracing.CreateEntrySpan(fmt.Sprintf(\u0026#34;%s:%s\u0026#34;, request.Method, request.URL.Path), func(headerKey string) (string, error) { return request.Header.Get(headerKey), nil }) // create a new exit span and inject the context carrier into the request tracing.CreateExitSpan(fmt.Sprintf(\u0026#34;%s:%s\u0026#34;, request.Method, request.URL.Path), request.Host, func(headerKey, headerValue string) error { request.Header.Add(headerKey, headerValue) return nil } Span Option Span Options can be passed when creating a Span to configure the information in the Span.\nThe following options are currently supported:\n// WithLayer set the SpanLayer of the Span func WithLayer(layer SpanLayer) // WithComponent set the component id of the Span func WithComponent(componentID int32) // WithTag set the Tag of the Span func WithTag(key Tag, value string) Span Component The Component ID in Span is used to identify the current component, with its data defined in SkyWalking OAP. If the framework you are writing does not exist in this file, please submit a PR in the SkyWalking project to add the definition of this plugin.\nSpan Operation After creating a Span, you can perform additional operations on it.\n// Span for plugin API type Span interface { // AsyncSpan for the async API \tAsyncSpan // Tag set the Tag of the Span \tTag(Tag, string) // SetSpanLayer set the SpanLayer of the Span \tSetSpanLayer(SpanLayer) // SetOperationName re-set the operation name of the Span \tSetOperationName(string) // SetPeer re-set the peer address of the Span \tSetPeer(string) // Log add log to the Span \tLog(...string) // Error add error log to the Span \tError(...string) // End end the Span \tEnd() } Async Span There is a set of advanced APIs in Span which is specifically designed for async use cases. When setting name, tags, logs, and other operations (including end span) of the span in another goroutine, you should use these APIs.\ntype AsyncSpan interface { // PrepareAsync the span finished at current tracing context, but current span is still alive until AsyncFinish called  PrepareAsync() // AsyncFinish to finished current async span  AsyncFinish() } Following the previous API define, you should following these steps to use the async API:\n Call span.PrepareAsync() to prepare the span to do any operation in another goroutine. Use Span.End() in the original goroutine when your job in the current goroutine is complete. Propagate the span to any other goroutine in your plugin. Once the above steps are all set, call span.AsyncFinish() in any goroutine. When the span.AsyncFinish() is complete for all spans, the all spans would be finished and report to the backend.  Tracing Context Operation In the Go Agent, Trace Context would continue cross goroutines automatically by default. However, in some cases, goroutine would be context sharing due to be scheduled by the pool mechanism. Consider these advanced APIs to manipulate context and switch the current context.\n// CaptureContext capture current tracing context in the current goroutine. func CaptureContext() ContextSnapshot // ContinueContext continue the tracing context in the current goroutine. func ContinueContext(ctx ContextSnapshot) // CleanContext clean the tracing context in the current goroutine. func CleanContext() Typically, use APIs as following to control or switch the context:\n Use tracing.CaptureContext() to get the ContextSnapshot object. Propagate the snapshot context to any other goroutine in your plugin. Use tracing.ContinueContext(snapshot) to continue the snapshot context in the target goroutine.  Meter API The Meter API is used to record the metrics of the target program, and currently supports the following methods:\n// NewCounter creates a new counter metrics. // name is the name of the metrics // opts is the options for the metrics func NewCounter(name string, opts ...Opt) Counter // NewGauge creates a new gauge metrics. // name is the name of the metrics // getter is the function to get the value of the gauge meter // opts is the options for the metrics func NewGauge(name string, getter func() float64, opts ...Opt) Gauge // NewHistogram creates a new histogram metrics. // name is the name of the metrics // steps is the buckets of the histogram // opts is the options for the metrics func NewHistogram(name string, steps []float64, opts ...Opt) Histogram // NewHistogramWithMinValue creates a new histogram metrics. // name is the name of the metrics // minVal is the min value of the histogram bucket // steps is the buckets of the histogram // opts is the options for the metrics func NewHistogramWithMinValue(name string, minVal float64, steps []float64, opts ...Opt) Histogram // RegisterBeforeCollectHook registers a hook function which will be called before metrics collect. func RegisterBeforeCollectHook(f func()) Meter Option The Meter Options can be passed when creating a Meter to configure the information in the Meter.\n// WithLabel adds a label to the metrics. func WithLabel(key, value string) Opt Meter Type Counter Counter is a cumulative metric that represents a single monotonically increasing counter whose value can only increase.\ntype Counter interface { // Get returns the current value of the counter. \tGet() float64 // Inc increments the counter with value. \tInc(val float64) } Gauge Gauge is a metric that represents a single numerical value that can arbitrarily go up and down.\ntype Gauge interface { // Get returns the current value of the gauge.  Get() float64 } Histogram Histogram is a metric that represents the distribution of a set of values.\ntype Histogram interface { // Observe find the value associate bucket and add 1. \tObserve(val float64) // ObserveWithCount find the value associate bucket and add specific count. \tObserveWithCount(val float64, count int64) } Import Plugin Once you have finished developing the plugin, you need to import the completed module into the Agent program and define it in the corresponding file.\nAt this point, your plugin development process is complete. When the Agent performs hybrid compilation on the target program, your plugin will be executed as expected.\n","excerpt":"Plugin Development Guide This documentation introduces how developers can create a plugin.\nAll …","ref":"/docs/skywalking-go/next/en/development-and-contribution/development-guide/","title":"Plugin Development Guide"},{"body":"Plugin Development Guide This documentation introduces how developers can create a plugin.\nAll plugins must follow these steps:\n Create a new plugin module: Create a new project in the specified directory and import the plugin API module. Define the enhancement object: Define the description for the plugin. Invoke the plugin API: Call the API provided by the core to complete the core invocation. Import the plugin module: Import the plugin into the management module for users to use.  Create a new plugin module The plugin must create a new module, which is currently stored in the project\u0026rsquo;s plugins directory.\nPlugins can import the following two modules:\n Agent core: This module provides all the dependencies needed for the plugin, including the plugin API, enhancement declaration objects, etc. Agent core plugin should be github.com/apache/skywalking-go/plugins/core and replaced by the relative location. Framework to be enhanced: Import the framework you wish to enhance.  Note: Plugins should NOT import and use any other modules, as this may cause compilation issues for users. If certain tools are needed, they should be provided by the agent core.\nDefine the enhancement object In the root directory of the project, create a new go file to define the basic information of the plugin. The basic information includes the following methods, corresponding to the Instrument interface:\n Name: The name of the plugin. Please keep this name consistent with the newly created project name. The reason will be explained later. Base Package: Declare which package this plugin intercepts. For example, if you want to intercept gin, you can write: \u0026ldquo;github.com/gin-gonic/gin\u0026rdquo;. Version Checker: This method passes the version number to the enhancement object to verify whether the specified version of the framework is supported. If not, the enhancement program will not be executed. Points: A plugin can define one or more enhancement points. This will be explained in more detail in the following sections. File System: Use //go:embed * in the current file to import all files in this module, which will be used for file copying during the mixed compilation process.  Note: Please declare //skywalking:nocopy at any position in this file to indicate that the file would not be copied. This file is only used for guidance during hybrid compilation. Also, this file involves the use of the embed package, and if the target framework does not import the package embed, a compilation error may occur.\nManage Instrument and Interceptor codes in hierarchy structure Instrument and interceptor codes are placed in root by default. In complex instrumentation scenarios, there could be dozens of interceptors, we provide PluginSourceCodePath to build a hierarchy folder structure to manage those codes.\nNotice: The instrumentation still works without proper setting of this, but the debug tool would lose the location of the source codes.\nExample For example, the framework needs to enhance two packages, as shown in the following directory structure:\n- plugins - test - go.mod - package1 - instrument.go - interceptor.go - package2 - instrument.go - interceptor.go ... In the above directory structure, the test framework needs to provide multiple different enhancement objects. In this case, a PluginSourceCodePath Source Code Path** method needs to be added for each enhancement object, the values of this method should be package1 and package2.\nInstrument Point Instrument points are used to declare that which methods and structs in the current package should be instrumented. They mainly include the following information:\n Package path: If the interception point that needs to be intercepted is not in the root directory of the current package, you need to fill in the relative path to the package. For example, if this interception point wants to instrument content in the github.com/gin-gonic/gin/render directory, you need to fill in render here. Package Name(optional): Define the package name of the current package. If the package name is not defined, the package name of the current package would be used by default. It\u0026rsquo;s used when the package path and package name are not same, such as the name of github.com/emicklei/go-restful/v3 is restful. Matcher(At): Specify which eligible content in the current package path needs to be enhanced. Interceptor: If the current method is being intercepted (whether it\u0026rsquo;s a static method or an instance method), the name of the interceptor must be specified.  Method Matcher Method matchers are used to intercept both static and non-static methods. The specific definitions are as follows:\n// NewStaticMethodEnhance creates a new EnhanceMatcher for static method. // name: method name needs to be enhanced.(Public and private methods are supported) // filters: filters for method. func NewStaticMethodEnhance(name string, filters ...MethodFilterOption) // NewMethodEnhance creates a new EnhanceMatcher for method. // receiver: receiver type name of method needs to be enhanced. // name: method name needs to be enhanced.(Public and private methods are supported) // filters: filters for method. func NewMethodEnhance(receiver, name string, filters ...MethodFilterOption) Filter Option Filter Options are used to validate the parameters or return values in the method. If the method name matches but the Options validation fails, the enhancement would not be performed.\n// WithArgsCount filter methods with specific count of arguments. func WithArgsCount(argsCount int) // WithResultCount filter methods with specific count of results. func WithResultCount(resultCount int) // WithArgType filter methods with specific type of the index of the argument. func WithArgType(argIndex int, dataType string) // WithResultType filter methods with specific type of the index of the result. func WithResultType(argIndex int, dataType string) Demo For example, if you have the following method that needs to be intercepted:\nfunc (c *Context) HandleMethod(name string) bool you can describe it using this condition:\ninstrument.NewMethodEnhance(\u0026#34;*Context\u0026#34;, \u0026#34;HandleMethod\u0026#34;, instrument.WithArgsCount(1), instrument.WithArgType(0, \u0026#34;string\u0026#34;), instrument.WithResultCount(1), instrument.WithResultType(0, \u0026#34;bool\u0026#34;)) Struct Matcher Enhancement structures can embed enhanced fields within specified structs. After the struct is instantiated, custom data content can be added to the specified struct in the method interceptor.\nStruct matchers are used to intercept struct methods. The specific definitions are as follows:\n// NewStructEnhance creates a new EnhanceMatcher for struct. // name: struct name needs to be enhanced.(Public and private structs are supported) // filters: filters for struct. func NewStructEnhance(name string, filters ...StructFilterOption) Filter Option Filter Options are used to validate the fields in the structure.\n// WithFieldExists filter the struct has the field with specific name. func WithFieldExists(fieldName string) // WithFiledType filter the struct has the field with specific name and type. func WithFiledType(filedName, filedType string) Enhanced Instance After completing the definition of the struct enhancement, you can convert the specified instance into the following interface when intercepting methods, and get or set custom field information. The interface definition is as follows:\ntype EnhancedInstance interface { // GetSkyWalkingDynamicField get the customized data from instance \tGetSkyWalkingDynamicField() interface{} // SetSkyWalkingDynamicField set the customized data into the instance \tSetSkyWalkingDynamicField(interface{}) } Demo For example, if you have the following struct that needs to be enhanced:\ntype Test struct { value *Context } you can describe it using this condition:\ninstrument.NewStructEnhance(\u0026#34;Test\u0026#34;, instrument.WithFieldExists(\u0026#34;value\u0026#34;), instrument.WithFiledType(\u0026#34;value\u0026#34;, \u0026#34;*Context\u0026#34;)) Next, you can set custom content for the specified enhanced instance when intercepting methods.\nins := testInstance.(instrument.EnhancedInstance) // setting custom content ins.SetSkyWalkingDynamicField(\u0026#34;custom content\u0026#34;) // getting custom content res := ins.GetSkyWalkingDynamicField() Interceptor Interceptors are used to define custom business logic before and after method execution, allowing you to access data from before and after method execution and interact with the Agent Core by using the Agent API.\nThe interceptor definition is as follows, you need to create a new structure and implement it:\ntype Interceptor interface { // BeforeInvoke would be called before the target method invocation.  BeforeInvoke(invocation Invocation) error // AfterInvoke would be called after the target method invocation.  AfterInvoke(invocation Invocation, result ...interface{}) error } Within the interface, you can see the Invocation interface, which defines the context of an interception. The specific definition is as follows:\ntype Invocation interface { // CallerInstance is the instance of the caller, nil if the method is static method.  CallerInstance() interface{} // Args is get the arguments of the method, please cast to the specific type to get more information.  Args() []interface{} // ChangeArg is change the argument value of the method  ChangeArg(int, interface{}) // IsContinue is the flag to control the method invocation, if it is true, the target method would not be invoked.  IsContinue() bool // DefineReturnValues are defined the return value of the method, and continue the method invoked  DefineReturnValues(...interface{}) // SetContext is the customized context of the method invocation, it should be propagated the tracing span.  SetContext(interface{}) // GetContext is get the customized context of the method invocation  GetContext() interface{} } Thread safe The Interceptor instance would define new instance at the current package level, rather than creating a new instance each time a method is intercepted.\nTherefore, do not declare objects in the interceptor, and instead use Invocation.Context to pass data.\nPackage Path If the method you want to intercept is not located in the root directory of the framework, place your interceptor code in the relative location within the plugin. The Agent would only copy files from the same package directory.\nFor example, if you want to intercept a method in github.com/gin-gonic/gin/render, create a render directory in the root of your plugin, and put the interceptor inside it. This ensures that the interceptor is properly included during the copy operation and can be correctly applied to the target package.\nPlugin Configuration Plugin configuration is used to add custom configuration parameters to a specified plugin. When users specify configuration items, the plugin can dynamically adapt the content needed in the plugin according to the user\u0026rsquo;s configuration items.\nDeclaration Please declare the configuration file you need in the package you want to use. Declare it using var, and add the //skywalking:config directive to specify that this variable requires dynamic updating.\nBy default, the configuration item belongs to the configuration of the current plugin. For example, if the name of my current plugin is gin, then this configuration item is under the gin plugin. Of course, you can also change it to the http plugin to reference the configuration information of the relevant plugin, in which case you need to specify it as //skywalking:config http.\nItem Each configuration item needs to add a config tag. This is used to specify the name of the current configuration content. By default, it would lowercase all letters and add an _ identifier before each uppercase letter.\nCurrently, it supports basic data types and struct types, and it also supports obtaining data values through environment variables.\nDemo For example, I have declared the following configuration item:\n//skywalking:config http var config struct { ServerCollectParameters bool `config:\u0026#34;server_collect_parameters\u0026#34;` Client struct{ CollectParameters bool `config:\u0026#34;collect_parameters\u0026#34;` } `config:\u0026#34;client\u0026#34;` } In the above example, I created a plugin configuration for http, which includes two configuration items.\n config.ServerCollectParameters: Its configuration is located at http.server_collect_parameters. config.Client.CollectParameter: Its configuration is located at http.client.collect_parameter.  When the plugin needs to be used, it can be accessed directly by reading the config configuration.\nAgent API The Agent API is used when a method is intercepted and interacts with the Agent Core.\nTracing API The Tracing API is used for building distributed tracing, and currently supports the following methods:\n// CreateEntrySpan creates a new entry span. // operationName is the name of the span. // extractor is the extractor to extract the context from the carrier. // opts is the options to create the span. func CreateEntrySpan(operationName string, extractor Extractor, opts ...SpanOption) // CreateLocalSpan creates a new local span. // operationName is the name of the span. // opts is the options to create the span. func CreateLocalSpan(operationName string, opts ...SpanOption) // CreateExitSpan creates a new exit span. // operationName is the name of the span. // peer is the peer address of the span. // injector is the injector to inject the context into the carrier. // opts is the options to create the span. func CreateExitSpan(operationName, peer string, injector Injector, opts ...SpanOption) // ActiveSpan returns the current active span, it can be got the current span in the current goroutine. // If the current goroutine is not in the context of the span, it will return nil. // If get the span from other goroutine, it can only get information but cannot be operated. func ActiveSpan() // GetRuntimeContextValue returns the value of the key in the runtime context, which is current goroutine. // The value can also read from the goroutine which is created by the current goroutine func GetRuntimeContextValue(key string) // SetRuntimeContextValue sets the value of the key in the runtime context. func SetRuntimeContextValue(key string, val interface{}) Context Carrier The context carrier is used to pass the context between the difference application.\nWhen creating an Entry Span, you need to obtain the context carrier from the request. When creating an Exit Span, you need to write the context carrier into the target RPC request.\n// Extractor is a tool specification which define how to // extract trace parent context from propagation context type Extractor func(headerKey string) (string, error) // Injector is a tool specification which define how to // inject trace context into propagation context type Injector func(headerKey, headerValue string) error The following demo demonstrates how to pass the Context Carrier in the Tracing API:\n// create a new entry span and extract the context carrier from the request tracing.CreateEntrySpan(fmt.Sprintf(\u0026#34;%s:%s\u0026#34;, request.Method, request.URL.Path), func(headerKey string) (string, error) { return request.Header.Get(headerKey), nil }) // create a new exit span and inject the context carrier into the request tracing.CreateExitSpan(fmt.Sprintf(\u0026#34;%s:%s\u0026#34;, request.Method, request.URL.Path), request.Host, func(headerKey, headerValue string) error { request.Header.Add(headerKey, headerValue) return nil } Span Option Span Options can be passed when creating a Span to configure the information in the Span.\nThe following options are currently supported:\n// WithLayer set the SpanLayer of the Span func WithLayer(layer SpanLayer) // WithComponent set the component id of the Span func WithComponent(componentID int32) // WithTag set the Tag of the Span func WithTag(key Tag, value string) Span Component The Component ID in Span is used to identify the current component, with its data defined in SkyWalking OAP. If the framework you are writing does not exist in this file, please submit a PR in the SkyWalking project to add the definition of this plugin.\nSpan Operation After creating a Span, you can perform additional operations on it.\n// Span for plugin API type Span interface { // AsyncSpan for the async API \tAsyncSpan // Tag set the Tag of the Span \tTag(Tag, string) // SetSpanLayer set the SpanLayer of the Span \tSetSpanLayer(SpanLayer) // SetOperationName re-set the operation name of the Span \tSetOperationName(string) // SetPeer re-set the peer address of the Span \tSetPeer(string) // Log add log to the Span \tLog(...string) // Error add error log to the Span \tError(...string) // End end the Span \tEnd() } Async Span There is a set of advanced APIs in Span which is specifically designed for async use cases. When setting name, tags, logs, and other operations (including end span) of the span in another goroutine, you should use these APIs.\ntype AsyncSpan interface { // PrepareAsync the span finished at current tracing context, but current span is still alive until AsyncFinish called  PrepareAsync() // AsyncFinish to finished current async span  AsyncFinish() } Following the previous API define, you should following these steps to use the async API:\n Call span.PrepareAsync() to prepare the span to do any operation in another goroutine. Use Span.End() in the original goroutine when your job in the current goroutine is complete. Propagate the span to any other goroutine in your plugin. Once the above steps are all set, call span.AsyncFinish() in any goroutine. When the span.AsyncFinish() is complete for all spans, the all spans would be finished and report to the backend.  Tracing Context Operation In the Go Agent, Trace Context would continue cross goroutines automatically by default. However, in some cases, goroutine would be context sharing due to be scheduled by the pool mechanism. Consider these advanced APIs to manipulate context and switch the current context.\n// CaptureContext capture current tracing context in the current goroutine. func CaptureContext() ContextSnapshot // ContinueContext continue the tracing context in the current goroutine. func ContinueContext(ctx ContextSnapshot) // CleanContext clean the tracing context in the current goroutine. func CleanContext() Typically, use APIs as following to control or switch the context:\n Use tracing.CaptureContext() to get the ContextSnapshot object. Propagate the snapshot context to any other goroutine in your plugin. Use tracing.ContinueContext(snapshot) to continue the snapshot context in the target goroutine.  Meter API The Meter API is used to record the metrics of the target program, and currently supports the following methods:\n// NewCounter creates a new counter metrics. // name is the name of the metrics // opts is the options for the metrics func NewCounter(name string, opts ...Opt) Counter // NewGauge creates a new gauge metrics. // name is the name of the metrics // getter is the function to get the value of the gauge meter // opts is the options for the metrics func NewGauge(name string, getter func() float64, opts ...Opt) Gauge // NewHistogram creates a new histogram metrics. // name is the name of the metrics // steps is the buckets of the histogram // opts is the options for the metrics func NewHistogram(name string, steps []float64, opts ...Opt) Histogram // NewHistogramWithMinValue creates a new histogram metrics. // name is the name of the metrics // minVal is the min value of the histogram bucket // steps is the buckets of the histogram // opts is the options for the metrics func NewHistogramWithMinValue(name string, minVal float64, steps []float64, opts ...Opt) Histogram // RegisterBeforeCollectHook registers a hook function which will be called before metrics collect. func RegisterBeforeCollectHook(f func()) Meter Option The Meter Options can be passed when creating a Meter to configure the information in the Meter.\n// WithLabel adds a label to the metrics. func WithLabel(key, value string) Opt Meter Type Counter Counter is a cumulative metric that represents a single monotonically increasing counter whose value can only increase.\ntype Counter interface { // Get returns the current value of the counter. \tGet() float64 // Inc increments the counter with value. \tInc(val float64) } Gauge Gauge is a metric that represents a single numerical value that can arbitrarily go up and down.\ntype Gauge interface { // Get returns the current value of the gauge.  Get() float64 } Histogram Histogram is a metric that represents the distribution of a set of values.\ntype Histogram interface { // Observe find the value associate bucket and add 1. \tObserve(val float64) // ObserveWithCount find the value associate bucket and add specific count. \tObserveWithCount(val float64, count int64) } Import Plugin Once you have finished developing the plugin, you need to import the completed module into the Agent program and define it in the corresponding file.\nAt this point, your plugin development process is complete. When the Agent performs hybrid compilation on the target program, your plugin will be executed as expected.\n","excerpt":"Plugin Development Guide This documentation introduces how developers can create a plugin.\nAll …","ref":"/docs/skywalking-go/v0.4.0/en/development-and-contribution/development-guide/","title":"Plugin Development Guide"},{"body":"Plugin Development Guide This document describes how to understand, develop and contribute a plugin.\nThere are 2 kinds of plugin:\n Tracing plugin. Follow the distributed tracing concept to collect spans with tags and logs. Meter plugin. Collect numeric metrics in Counter, Gauge, and Histogram formats.  We also provide the plugin test tool to verify the data collected and reported by the plugin. If you plan to contribute any plugin to our main repo, the data would be verified by this tool too.\nTracing plugin Concepts Span The span is an important and recognized concept in the distributed tracing system. Learn about the span from the Google Dapper Paper and OpenTracing\nSkyWalking has supported OpenTracing and OpenTracing-Java API since 2017. Our concepts of the span are similar to that of the Google Dapper Paper and OpenTracing. We have also extended the span.\nThere are three types of span:\n1.1 EntrySpan The EntrySpan represents a service provider. It is also an endpoint on the server end. As an APM system, our target is the application servers. Therefore, almost all the services and MQ-consumers are EntrySpan.\n1.2 LocalSpan The LocalSpan represents a normal Java method that does not concern remote services. It is neither a MQ producer/consumer nor a service (e.g. HTTP service) provider/consumer.\n1.3 ExitSpan The ExitSpan represents a client of service or MQ-producer. It is named the LeafSpan in the early versions of SkyWalking. For example, accessing DB through JDBC and reading Redis/Memcached are classified as an ExitSpan.\nContextCarrier In order to implement distributed tracing, cross-process tracing has to be bound, and the context must propagate across the process. This is where the ContextCarrier comes in.\nHere are the steps on how to use the ContextCarrier in an A-\u0026gt;B distributed call.\n Create a new and empty ContextCarrier on the client end. Create an ExitSpan by ContextManager#createExitSpan or use ContextManager#inject to initalize the ContextCarrier. Place all items of ContextCarrier into heads (e.g. HTTP HEAD), attachments (e.g. Dubbo RPC framework) or messages (e.g. Kafka). The ContextCarrier propagates to the server end through the service call. On the server end, obtain all items from the heads, attachments or messages. Create an EntrySpan by ContextManager#createEntrySpan or use ContextManager#extract to bind the client and server ends.  See the following examples, where we use the Apache HTTPComponent client plugin and Tomcat 7 server plugin:\n Using the Apache HTTPComponent client plugin on the client end  span = ContextManager.createExitSpan(\u0026#34;/span/operation/name\u0026#34;, contextCarrier, \u0026#34;ip:port\u0026#34;); CarrierItem next = contextCarrier.items(); while (next.hasNext()) { next = next.next(); httpRequest.setHeader(next.getHeadKey(), next.getHeadValue()); } Using the Tomcat 7 server plugin on the server end  ContextCarrier contextCarrier = new ContextCarrier(); CarrierItem next = contextCarrier.items(); while (next.hasNext()) { next = next.next(); next.setHeadValue(request.getHeader(next.getHeadKey())); } span = ContextManager.createEntrySpan(“/span/operation/name”, contextCarrier); ContextSnapshot Besides cross-process tracing, cross-thread tracing has to be supported as well. For instance, both async process (in-memory MQ) and batch process are common in Java. Cross-process and cross-thread tracing are very similar in that they both require propagating context, except that cross-thread tracing does not require serialization.\nHere are the three steps on cross-thread propagation:\n Use ContextManager#capture to get the ContextSnapshot object. Let the sub-thread access the ContextSnapshot through method arguments or being carried by existing arguments Use ContextManager#continued in sub-thread.  Core APIs ContextManager ContextManager provides all major and primary APIs.\n Create EntrySpan  public static AbstractSpan createEntrySpan(String endpointName, ContextCarrier carrier) Create EntrySpan according to the operation name (e.g. service name, uri) and ContextCarrier.\nCreate LocalSpan  public static AbstractSpan createLocalSpan(String endpointName) Create LocalSpan according to the operation name (e.g. full method signature).\nCreate ExitSpan  public static AbstractSpan createExitSpan(String endpointName, ContextCarrier carrier, String remotePeer) Create ExitSpan according to the operation name (e.g. service name, uri) and the new ContextCarrier and peer address (e.g. ip+port, hostname+port).\nAbstractSpan /** * Set the component id, which defines in {@link ComponentsDefine} * * @param component * @return the span for chaining. */ AbstractSpan setComponent(Component component); AbstractSpan setLayer(SpanLayer layer); /** * Set a key:value tag on the Span. * * @return this Span instance, for chaining */ AbstractSpan tag(String key, String value); /** * Record an exception event of the current walltime timestamp. * * @param t any subclass of {@link Throwable}, which occurs in this span. * @return the Span, for chaining */ AbstractSpan log(Throwable t); AbstractSpan errorOccurred(); /** * Record an event at a specific timestamp. * * @param timestamp The explicit timestamp for the log record. * @param event the events * @return the Span, for chaining */ AbstractSpan log(long timestamp, Map\u0026lt;String, ?\u0026gt; event); /** * Sets the string name for the logical operation this span represents. * * @return this Span instance, for chaining */ AbstractSpan setOperationName(String endpointName); Besides setting the operation name, tags and logs, two attributes must be set, namely the component and layer. This is especially important for the EntrySpan and ExitSpan.\nSpanLayer is the type of span. There are 5 values:\n UNKNOWN (default) DB RPC_FRAMEWORK (designed for the RPC framework, rather than an ordinary HTTP call) HTTP MQ  Component IDs are defined and reserved by the SkyWalking project. For extension of the component name/ID, please follow the OAP server Component library settings document.\nSpecial Span Tags All tags are available in the trace view. Meanwhile, in the OAP backend analysis, some special tags or tag combinations provide other advanced features.\nTag key http.status_code The value should be an integer. The response code of OAL entities corresponds to this value.\nTag keys db.statement and db.type. The value of db.statement should be a string that represents the database statement, such as SQL, or [No statement]/+span#operationName if the value is empty. When the exit span contains this tag, OAP samples the slow statements based on agent-analyzer/default/maxSlowSQLLength. The threshold of slow statement is defined in accordance with agent-analyzer/default/slowDBAccessThreshold. Check Slow Database Statement document of OAP server for details.\nExtension logic endpoint: Tag key x-le The logic endpoint is a concept that doesn\u0026rsquo;t represent a real RPC call, but requires the statistic. The value of x-le should be in JSON format. There are two options:\n Define a separated logic endpoint. Provide its own endpoint name, latency and status. Suitable for entry and local span.  { \u0026#34;name\u0026#34;: \u0026#34;GraphQL-service\u0026#34;, \u0026#34;latency\u0026#34;: 100, \u0026#34;status\u0026#34;: true } Declare the current local span representing a logic endpoint.  { \u0026#34;logic-span\u0026#34;: true } Virtual Database Relative Tags SkyWalking analysis Database(SQL-like) performance metrics through the following tags.\npublic static final StringTag DB_TYPE = new StringTag(3, \u0026#34;db.type\u0026#34;); public static final StringTag DB_STATEMENT = new StringTag(5, \u0026#34;db.statement\u0026#34;);  db.type records database type, such as sql, cassandra, Elasticsearch. db.statementrecords the sql statement of the database access.  Read backend\u0026rsquo;s virtual database doc for more details.\nVirtual Cache Relative Tags SkyWalking analysis cache performance related metrics through the following tags.\npublic static final StringTag CACHE_TYPE = new StringTag(15, \u0026#34;cache.type\u0026#34;); public static final StringTag CACHE_CMD = new StringTag(17, \u0026#34;cache.cmd\u0026#34;); public static final StringTag CACHE_OP = new StringTag(16, \u0026#34;cache.op\u0026#34;); public static final StringTag CACHE_KEY = new StringTag(18, \u0026#34;cache.key\u0026#34;);  cache.type indicates the cache type , usually it\u0026rsquo;s official name of cache (e.g. Redis) cache.cmd indicates the cache command that would be sent to cache server (e.g. setnx) cache.op indicates the command is used for write or read operation , usually the value is converting from command cache.key indicates the cache key that would be sent to cache server , this tag maybe null , as string type key would be collected usually.  In order to decide which op should be converted to flexibly , It\u0026rsquo;s better that providing config property . Reference Jedis-4.x-plugin\nVirtual Message Queue (MQ) Relative Tags SkyWalking analysis MQ performance related metrics through the following tags.\npublic static final StringTag MQ_QUEUE = new StringTag(7, \u0026#34;mq.queue\u0026#34;); public static final StringTag MQ_TOPIC = new StringTag(9, \u0026#34;mq.topic\u0026#34;); public static final StringTag TRANSMISSION_LATENCY = new StringTag(15, \u0026#34;transmission.latency\u0026#34;, false);  mq.queue indicates MQ queue name mq.topic indicates MQ topic name , It\u0026rsquo;s optional as some MQ don\u0026rsquo;t hava concept of topic transmission.latency The transmission latency from consumer to producer. Usually you needn\u0026rsquo;t to record this tag manually, instead to call contextCarrier.extensionInjector().injectSendingTimestamp(); to record tag sendingTimestamp on producer side , and SkyWalking would record this tag on consumer side if sw8-x context carrier(from producer side) contains sendingTimestamp  Notice , you should set peer at both sides(producer and consumer). And the value of peer should represent the MQ server cluster.\nAdvanced APIs Async Span APIs There is a set of advanced APIs in Span which is specifically designed for async use cases. When tags, logs, and attributes (including end time) of the span need to be set in another thread, you should use these APIs.\n/** * The span finish at current tracing context, but the current span is still alive, until {@link #asyncFinish} * called. * * This method must be called\u0026lt;br/\u0026gt; * 1. In original thread(tracing context). * 2. Current span is active span. * * During alive, tags, logs and attributes of the span could be changed, in any thread. * * The execution times of {@link #prepareForAsync} and {@link #asyncFinish()} must match. * * @return the current span */ AbstractSpan prepareForAsync(); /** * Notify the span, it could be finished. * * The execution times of {@link #prepareForAsync} and {@link #asyncFinish()} must match. * * @return the current span */ AbstractSpan asyncFinish();  Call #prepareForAsync in the original context. Run ContextManager#stopSpan in the original context when your job in the current thread is complete. Propagate the span to any other thread. Once the above steps are all set, call #asyncFinish in any thread. When #prepareForAsync is complete for all spans, the tracing context will be finished and will report to the backend (based on the count of API execution).  Develop a plugin Abstract The basic method to trace is to intercept a Java method, by using byte code manipulation tech and AOP concept. SkyWalking has packaged the byte code manipulation tech and tracing context propagation, so you simply have to define the intercept point (a.k.a. aspect pointcut in Spring).\nIntercept SkyWalking provides two common definitions to intercept constructor, instance method and class method.\nv1 APIs  Extend ClassInstanceMethodsEnhancePluginDefine to define constructor intercept points and instance method intercept points. Extend ClassStaticMethodsEnhancePluginDefine to define class method intercept points.  Of course, you can extend ClassEnhancePluginDefine to set all intercept points, although it is uncommon to do so.\nv2 APIs v2 APIs provide an enhanced interceptor, which could propagate context through MIC(MethodInvocationContext).\n Extend ClassInstanceMethodsEnhancePluginDefineV2 to define constructor intercept points and instance method intercept points. Extend ClassStaticMethodsEnhancePluginDefineV2 to define class method intercept points.  Of course, you can extend ClassEnhancePluginDefineV2 to set all intercept points, although it is uncommon to do so.\nImplement plugin See the following demonstration on how to implement a plugin by extending ClassInstanceMethodsEnhancePluginDefine.\n Define the target class name.  protected abstract ClassMatch enhanceClass(); ClassMatch represents how to match the target classes. There are 4 ways:\n byName: Based on the full class names (package name + . + class name). byClassAnnotationMatch: Depends on whether there are certain annotations in the target classes. byMethodAnnotationMatch: Depends on whether there are certain annotations in the methods of the target classes. byHierarchyMatch: Based on the parent classes or interfaces of the target classes.  Attention:\n Never use ThirdPartyClass.class in the instrumentation definitions, such as takesArguments(ThirdPartyClass.class), or byName(ThirdPartyClass.class.getName()), because of the fact that ThirdPartyClass dose not necessarily exist in the target application and this will break the agent; we have import checks to assist in checking this in CI, but it doesn\u0026rsquo;t cover all scenarios of this limitation, so never try to work around this limitation by something like using full-qualified-class-name (FQCN), i.e. takesArguments(full.qualified.ThirdPartyClass.class) and byName(full.qualified.ThirdPartyClass.class.getName()) will pass the CI check, but are still invalid in the agent codes. Therefore, Use Full Qualified Class Name String Literature Instead. Even if you are perfectly sure that the class to be intercepted exists in the target application (such as JDK classes), still, do not use *.class.getName() to get the class String name. We recommend you to use a literal string. This is to avoid ClassLoader issues. by*AnnotationMatch does not support inherited annotations. We do not recommend using byHierarchyMatch unless necessary. Using it may trigger the interception of many unexcepted methods, which would cause performance issues.  Example:\n@Override protected ClassMatch enhanceClassName() { return byName(\u0026#34;org.apache.catalina.core.StandardEngineValve\u0026#34;); } Define an instance method intercept point.  public InstanceMethodsInterceptPoint[] getInstanceMethodsInterceptPoints(); public interface InstanceMethodsInterceptPoint { /** * class instance methods matcher. * * @return methods matcher */ ElementMatcher\u0026lt;MethodDescription\u0026gt; getMethodsMatcher(); /** * @return represents a class name, the class instance must instanceof InstanceMethodsAroundInterceptor. */ String getMethodsInterceptor(); boolean isOverrideArgs(); } You may also use Matcher to set the target methods. Return true in isOverrideArgs, if you want to change the argument ref in interceptor. Please refer to bytebuddy for details of defining ElementMatcher.\nIn Skywalking, we provide 3 classes to facilitate ElementMatcher definition:\n AnnotationTypeNameMatch: Check on whether there is a certain annotation in the target method. ReturnTypeNameMatch: Check the return type name (package name + . + class name) of the target method. ArgumentTypeNameMatch: Check on the argument index and the type name (package name + . + class name) of the target method.  Attention:\n In case of using ReturnTypeNameMatch and ArgumentTypeNameMatch, use [Lxxx; (Java file format defined in JVM Specification) to define an Array type. For example, you should write [Ljava.lang.String; for java.lang.String[].  The following sections will tell you how to implement the interceptor.\nAdd plugin definition into the skywalking-plugin.def file.  tomcat-7.x/8.x=TomcatInstrumentation  Set up witnessClasses and/or witnessMethods if the instrumentation has to be activated in specific versions.\nExample:\n// The plugin is activated only when the foo.Bar class exists. @Override protected String[] witnessClasses() { return new String[] { \u0026#34;foo.Bar\u0026#34; }; } // The plugin is activated only when the foo.Bar#hello method exists. @Override protected List\u0026lt;WitnessMethod\u0026gt; witnessMethods() { List\u0026lt;WitnessMethod\u0026gt; witnessMethodList = new ArrayList\u0026lt;\u0026gt;(); WitnessMethod witnessMethod = new WitnessMethod(\u0026#34;foo.Bar\u0026#34;, ElementMatchers.named(\u0026#34;hello\u0026#34;)); witnessMethodList.add(witnessMethod); return witnessMethodList; } For more examples, see WitnessTest.java\n  Implement an interceptor As an interceptor for an instance method, it has to implement org.apache.skywalking.apm.agent.core.plugin.interceptor.enhance.InstanceMethodsAroundInterceptor\n/** * A interceptor, which intercept method\u0026#39;s invocation. The target methods will be defined in {@link * ClassEnhancePluginDefine}\u0026#39;s subclass, most likely in {@link ClassInstanceMethodsEnhancePluginDefine} */ public interface InstanceMethodsAroundInterceptor { /** * called before target method invocation. * * @param result change this result, if you want to truncate the method. * @throws Throwable */ void beforeMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, MethodInterceptResult result) throws Throwable; /** * called after target method invocation. Even method\u0026#39;s invocation triggers an exception. * * @param ret the method\u0026#39;s original return value. * @return the method\u0026#39;s actual return value. * @throws Throwable */ Object afterMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Object ret) throws Throwable; /** * called when occur exception. * * @param t the exception occur. */ void handleMethodException(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Throwable t); } Use the core APIs before and after calling the method, as well as during exception handling.\nV2 APIs The interceptor of V2 API uses MethodInvocationContext context to replace the MethodInterceptResult result in the beforeMethod, and be added as a new parameter in afterMethod and handleMethodException.\nMethodInvocationContext context is only shared in one time execution, and safe to use when face concurrency execution.\n/** * A v2 interceptor, which intercept method\u0026#39;s invocation. The target methods will be defined in {@link * ClassEnhancePluginDefineV2}\u0026#39;s subclass, most likely in {@link ClassInstanceMethodsEnhancePluginDefine} */ public interface InstanceMethodsAroundInterceptorV2 { /** * called before target method invocation. * * @param context the method invocation context including result context. */ void beforeMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, MethodInvocationContext context) throws Throwable; /** * called after target method invocation. Even method\u0026#39;s invocation triggers an exception. * * @param ret the method\u0026#39;s original return value. May be null if the method triggers an exception. * @return the method\u0026#39;s actual return value. */ Object afterMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Object ret, MethodInvocationContext context) throws Throwable; /** * called when occur exception. * * @param t the exception occur. */ void handleMethodException(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Throwable t, MethodInvocationContext context); } Bootstrap class instrumentation. SkyWalking has packaged the bootstrap instrumentation in the agent core. You can easily implement it by declaring it in the instrumentation definition.\nOverride the public boolean isBootstrapInstrumentation() and return true. Such as\npublic class URLInstrumentation extends ClassEnhancePluginDefine { private static String CLASS_NAME = \u0026#34;java.net.URL\u0026#34;; @Override protected ClassMatch enhanceClass() { return byName(CLASS_NAME); } @Override public ConstructorInterceptPoint[] getConstructorsInterceptPoints() { return new ConstructorInterceptPoint[] { new ConstructorInterceptPoint() { @Override public ElementMatcher\u0026lt;MethodDescription\u0026gt; getConstructorMatcher() { return any(); } @Override public String getConstructorInterceptor() { return \u0026#34;org.apache.skywalking.apm.plugin.jre.httpurlconnection.Interceptor2\u0026#34;; } } }; } @Override public InstanceMethodsInterceptPoint[] getInstanceMethodsInterceptPoints() { return new InstanceMethodsInterceptPoint[0]; } @Override public StaticMethodsInterceptPoint[] getStaticMethodsInterceptPoints() { return new StaticMethodsInterceptPoint[0]; } @Override public boolean isBootstrapInstrumentation() { return true; } } ClassEnhancePluginDefineV2 is provided in v2 APIs, #isBootstrapInstrumentation works too.\nNOTE: Bootstrap instrumentation should be used only where necessary. During its actual execution, it mostly affects the JRE core(rt.jar). Defining it other than where necessary could lead to unexpected results or side effects.\nProvide custom config for the plugin The config could provide different behaviours based on the configurations. The SkyWalking plugin mechanism provides the configuration injection and initialization system in the agent core.\nEvery plugin could declare one or more classes to represent the config by using @PluginConfig annotation. The agent core could initialize this class' static field through System environments, System properties, and agent.config static file.\nThe #root() method in the @PluginConfig annotation requires declaring the root class for the initialization process. Typically, SkyWalking prefers to use nested inner static classes for the hierarchy of the configuration. We recommend using Plugin/plugin-name/config-key as the nested classes structure of the config class.\nNOTE: because of the Java ClassLoader mechanism, the @PluginConfig annotation should be added on the real class used in the interceptor codes.\nIn the following example, @PluginConfig(root = SpringMVCPluginConfig.class) indicates that initialization should start with using SpringMVCPluginConfig as the root. Then, the config key of the attribute USE_QUALIFIED_NAME_AS_ENDPOINT_NAME should be plugin.springmvc.use_qualified_name_as_endpoint_name.\npublic class SpringMVCPluginConfig { public static class Plugin { // NOTE, if move this annotation on the `Plugin` or `SpringMVCPluginConfig` class, it no longer has any effect.  @PluginConfig(root = SpringMVCPluginConfig.class) public static class SpringMVC { /** * If true, the fully qualified method name will be used as the endpoint name instead of the request URL, * default is false. */ public static boolean USE_QUALIFIED_NAME_AS_ENDPOINT_NAME = false; /** * This config item controls that whether the SpringMVC plugin should collect the parameters of the * request. */ public static boolean COLLECT_HTTP_PARAMS = false; } @PluginConfig(root = SpringMVCPluginConfig.class) public static class Http { /** * When either {@link Plugin.SpringMVC#COLLECT_HTTP_PARAMS} is enabled, how many characters to keep and send * to the OAP backend, use negative values to keep and send the complete parameters, NB. this config item is * added for the sake of performance */ public static int HTTP_PARAMS_LENGTH_THRESHOLD = 1024; } } } Meter Plugin Java agent plugin could use meter APIs to collect metrics for backend analysis.\n Counter API represents a single monotonically increasing counter which automatically collects data and reports to the backend. import org.apache.skywalking.apm.agent.core.meter.MeterFactory; Counter counter = MeterFactory.counter(meterName).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).mode(Counter.Mode.INCREMENT).build(); counter.increment(1d);    MeterFactory.counter creates a new counter builder with the meter name. Counter.Builder.tag(String key, String value) marks a tag key/value pair. Counter.Builder.mode(Counter.Mode mode) changes the counter mode. RATE mode means the reporting rate to the backend. Counter.Builder.build() builds a new Counter which is collected and reported to the backend. Counter.increment(double count) increment counts to the Counter. It could be a positive value.   Gauge API represents a single numerical value.  import org.apache.skywalking.apm.agent.core.meter.MeterFactory; ThreadPoolExecutor threadPool = ...; Gauge gauge = MeterFactory.gauge(meterName, () -\u0026gt; threadPool.getActiveCount()).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).build();  MeterFactory.gauge(String name, Supplier\u0026lt;Double\u0026gt; getter) creates a new gauge builder with the meter name and supplier function. This function must return a double value. Gauge.Builder.tag(String key, String value) marks a tag key/value pair. Gauge.Builder.build() builds a new Gauge which is collected and reported to the backend.   Histogram API represents a summary sample observations with customized buckets.  import org.apache.skywalking.apm.agent.core.meter.MeterFactory; Histogram histogram = MeterFactory.histogram(\u0026#34;test\u0026#34;).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).steps(Arrays.asList(1, 5, 10)).minValue(0).build(); histogram.addValue(3);  MeterFactory.histogram(String name) creates a new histogram builder with the meter name. Histogram.Builder.tag(String key, String value) marks a tag key/value pair. Histogram.Builder.steps(List\u0026lt;Double\u0026gt; steps) sets up the max values of every histogram buckets. Histogram.Builder.minValue(double value) sets up the minimal value of this histogram. Default is 0. Histogram.Builder.build() builds a new Histogram which is collected and reported to the backend. Histogram.addValue(double value) adds value into the histogram, and automatically analyzes what bucket count needs to be incremented. Rule: count into [step1, step2).  Plugin Test Tool The Apache SkyWalking Agent Test Tool Suite is an incredibly useful test tool suite that is available in a wide variety of agent languages. It includes the mock collector and validator. The mock collector is a SkyWalking receiver, like the OAP server.\nYou could learn how to use this tool to test the plugin in this doc. This is a must if you want to contribute plugins to the SkyWalking official repo.\nContribute plugins to the Apache SkyWalking repository We welcome everyone to contribute their plugins.\nPlease follow these steps:\n Submit an issue for your plugin, including any supported versions. Create sub modules under apm-sniffer/apm-sdk-plugin or apm-sniffer/optional-plugins, and the name should include supported library name and versions. Follow this guide to develop. Make sure comments and test cases are provided. Develop and test. Provide the automatic test cases. Learn how to write the plugin test case from this doc Send a pull request and ask for review. The plugin committers will approve your plugins, plugin CI-with-IT, e2e, and the plugin tests will be passed. The plugin is accepted by SkyWalking.  ","excerpt":"Plugin Development Guide This document describes how to understand, develop and contribute a plugin. …","ref":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/java-plugin-development-guide/","title":"Plugin Development Guide"},{"body":"Plugin Development Guide This document describes how to understand, develop and contribute a plugin.\nThere are 2 kinds of plugin:\n Tracing plugin. Follow the distributed tracing concept to collect spans with tags and logs. Meter plugin. Collect numeric metrics in Counter, Gauge, and Histogram formats.  We also provide the plugin test tool to verify the data collected and reported by the plugin. If you plan to contribute any plugin to our main repo, the data would be verified by this tool too.\nTracing plugin Concepts Span The span is an important and recognized concept in the distributed tracing system. Learn about the span from the Google Dapper Paper and OpenTracing\nSkyWalking has supported OpenTracing and OpenTracing-Java API since 2017. Our concepts of the span are similar to that of the Google Dapper Paper and OpenTracing. We have also extended the span.\nThere are three types of span:\n1.1 EntrySpan The EntrySpan represents a service provider. It is also an endpoint on the server end. As an APM system, our target is the application servers. Therefore, almost all the services and MQ-consumers are EntrySpan.\n1.2 LocalSpan The LocalSpan represents a normal Java method that does not concern remote services. It is neither a MQ producer/consumer nor a service (e.g. HTTP service) provider/consumer.\n1.3 ExitSpan The ExitSpan represents a client of service or MQ-producer. It is named the LeafSpan in the early versions of SkyWalking. For example, accessing DB through JDBC and reading Redis/Memcached are classified as an ExitSpan.\nContextCarrier In order to implement distributed tracing, cross-process tracing has to be bound, and the context must propagate across the process. This is where the ContextCarrier comes in.\nHere are the steps on how to use the ContextCarrier in an A-\u0026gt;B distributed call.\n Create a new and empty ContextCarrier on the client end. Create an ExitSpan by ContextManager#createExitSpan or use ContextManager#inject to initalize the ContextCarrier. Place all items of ContextCarrier into heads (e.g. HTTP HEAD), attachments (e.g. Dubbo RPC framework) or messages (e.g. Kafka). The ContextCarrier propagates to the server end through the service call. On the server end, obtain all items from the heads, attachments or messages. Create an EntrySpan by ContextManager#createEntrySpan or use ContextManager#extract to bind the client and server ends.  See the following examples, where we use the Apache HTTPComponent client plugin and Tomcat 7 server plugin:\n Using the Apache HTTPComponent client plugin on the client end  span = ContextManager.createExitSpan(\u0026#34;/span/operation/name\u0026#34;, contextCarrier, \u0026#34;ip:port\u0026#34;); CarrierItem next = contextCarrier.items(); while (next.hasNext()) { next = next.next(); httpRequest.setHeader(next.getHeadKey(), next.getHeadValue()); } Using the Tomcat 7 server plugin on the server end  ContextCarrier contextCarrier = new ContextCarrier(); CarrierItem next = contextCarrier.items(); while (next.hasNext()) { next = next.next(); next.setHeadValue(request.getHeader(next.getHeadKey())); } span = ContextManager.createEntrySpan(“/span/operation/name”, contextCarrier); ContextSnapshot Besides cross-process tracing, cross-thread tracing has to be supported as well. For instance, both async process (in-memory MQ) and batch process are common in Java. Cross-process and cross-thread tracing are very similar in that they both require propagating context, except that cross-thread tracing does not require serialization.\nHere are the three steps on cross-thread propagation:\n Use ContextManager#capture to get the ContextSnapshot object. Let the sub-thread access the ContextSnapshot through method arguments or being carried by existing arguments Use ContextManager#continued in sub-thread.  Core APIs ContextManager ContextManager provides all major and primary APIs.\n Create EntrySpan  public static AbstractSpan createEntrySpan(String endpointName, ContextCarrier carrier) Create EntrySpan according to the operation name (e.g. service name, uri) and ContextCarrier.\nCreate LocalSpan  public static AbstractSpan createLocalSpan(String endpointName) Create LocalSpan according to the operation name (e.g. full method signature).\nCreate ExitSpan  public static AbstractSpan createExitSpan(String endpointName, ContextCarrier carrier, String remotePeer) Create ExitSpan according to the operation name (e.g. service name, uri) and the new ContextCarrier and peer address (e.g. ip+port, hostname+port).\nAbstractSpan /** * Set the component id, which defines in {@link ComponentsDefine} * * @param component * @return the span for chaining. */ AbstractSpan setComponent(Component component); AbstractSpan setLayer(SpanLayer layer); /** * Set a key:value tag on the Span. * * @return this Span instance, for chaining */ AbstractSpan tag(String key, String value); /** * Record an exception event of the current walltime timestamp. * * @param t any subclass of {@link Throwable}, which occurs in this span. * @return the Span, for chaining */ AbstractSpan log(Throwable t); AbstractSpan errorOccurred(); /** * Record an event at a specific timestamp. * * @param timestamp The explicit timestamp for the log record. * @param event the events * @return the Span, for chaining */ AbstractSpan log(long timestamp, Map\u0026lt;String, ?\u0026gt; event); /** * Sets the string name for the logical operation this span represents. * * @return this Span instance, for chaining */ AbstractSpan setOperationName(String endpointName); Besides setting the operation name, tags and logs, two attributes must be set, namely the component and layer. This is especially important for the EntrySpan and ExitSpan.\nSpanLayer is the type of span. There are 5 values:\n UNKNOWN (default) DB RPC_FRAMEWORK (designed for the RPC framework, rather than an ordinary HTTP call) HTTP MQ  Component IDs are defined and reserved by the SkyWalking project. For extension of the component name/ID, please follow the OAP server Component library settings document.\nSpecial Span Tags All tags are available in the trace view. Meanwhile, in the OAP backend analysis, some special tags or tag combinations provide other advanced features.\nTag key http.status_code The value should be an integer. The response code of OAL entities corresponds to this value.\nTag keys db.statement and db.type. The value of db.statement should be a string that represents the database statement, such as SQL, or [No statement]/+span#operationName if the value is empty. When the exit span contains this tag, OAP samples the slow statements based on agent-analyzer/default/maxSlowSQLLength. The threshold of slow statement is defined in accordance with agent-analyzer/default/slowDBAccessThreshold. Check Slow Database Statement document of OAP server for details.\nExtension logic endpoint: Tag key x-le The logic endpoint is a concept that doesn\u0026rsquo;t represent a real RPC call, but requires the statistic. The value of x-le should be in JSON format. There are two options:\n Define a separated logic endpoint. Provide its own endpoint name, latency and status. Suitable for entry and local span.  { \u0026#34;name\u0026#34;: \u0026#34;GraphQL-service\u0026#34;, \u0026#34;latency\u0026#34;: 100, \u0026#34;status\u0026#34;: true } Declare the current local span representing a logic endpoint.  { \u0026#34;logic-span\u0026#34;: true } Virtual Database Relative Tags SkyWalking analysis Database(SQL-like) performance metrics through the following tags.\npublic static final StringTag DB_TYPE = new StringTag(3, \u0026#34;db.type\u0026#34;); public static final StringTag DB_STATEMENT = new StringTag(5, \u0026#34;db.statement\u0026#34;);  db.type records database type, such as sql, cassandra, Elasticsearch. db.statementrecords the sql statement of the database access.  Read backend\u0026rsquo;s virtual database doc for more details.\nVirtual Cache Relative Tags SkyWalking analysis cache performance related metrics through the following tags.\npublic static final StringTag CACHE_TYPE = new StringTag(15, \u0026#34;cache.type\u0026#34;); public static final StringTag CACHE_CMD = new StringTag(17, \u0026#34;cache.cmd\u0026#34;); public static final StringTag CACHE_OP = new StringTag(16, \u0026#34;cache.op\u0026#34;); public static final StringTag CACHE_KEY = new StringTag(18, \u0026#34;cache.key\u0026#34;);  cache.type indicates the cache type , usually it\u0026rsquo;s official name of cache (e.g. Redis) cache.cmd indicates the cache command that would be sent to cache server (e.g. setnx) cache.op indicates the command is used for write or read operation , usually the value is converting from command cache.key indicates the cache key that would be sent to cache server , this tag maybe null , as string type key would be collected usually.  In order to decide which op should be converted to flexibly , It\u0026rsquo;s better that providing config property . Reference Jedis-4.x-plugin\nVirtual Message Queue (MQ) Relative Tags SkyWalking analysis MQ performance related metrics through the following tags.\npublic static final StringTag MQ_QUEUE = new StringTag(7, \u0026#34;mq.queue\u0026#34;); public static final StringTag MQ_TOPIC = new StringTag(9, \u0026#34;mq.topic\u0026#34;); public static final StringTag TRANSMISSION_LATENCY = new StringTag(15, \u0026#34;transmission.latency\u0026#34;, false);  mq.queue indicates MQ queue name mq.topic indicates MQ topic name , It\u0026rsquo;s optional as some MQ don\u0026rsquo;t hava concept of topic transmission.latency The transmission latency from consumer to producer. Usually you needn\u0026rsquo;t to record this tag manually, instead to call contextCarrier.extensionInjector().injectSendingTimestamp(); to record tag sendingTimestamp on producer side , and SkyWalking would record this tag on consumer side if sw8-x context carrier(from producer side) contains sendingTimestamp  Notice , you should set peer at both sides(producer and consumer). And the value of peer should represent the MQ server cluster.\nAdvanced APIs Async Span APIs There is a set of advanced APIs in Span which is specifically designed for async use cases. When tags, logs, and attributes (including end time) of the span need to be set in another thread, you should use these APIs.\n/** * The span finish at current tracing context, but the current span is still alive, until {@link #asyncFinish} * called. * * This method must be called\u0026lt;br/\u0026gt; * 1. In original thread(tracing context). * 2. Current span is active span. * * During alive, tags, logs and attributes of the span could be changed, in any thread. * * The execution times of {@link #prepareForAsync} and {@link #asyncFinish()} must match. * * @return the current span */ AbstractSpan prepareForAsync(); /** * Notify the span, it could be finished. * * The execution times of {@link #prepareForAsync} and {@link #asyncFinish()} must match. * * @return the current span */ AbstractSpan asyncFinish();  Call #prepareForAsync in the original context. Run ContextManager#stopSpan in the original context when your job in the current thread is complete. Propagate the span to any other thread. Once the above steps are all set, call #asyncFinish in any thread. When #prepareForAsync is complete for all spans, the tracing context will be finished and will report to the backend (based on the count of API execution).  Develop a plugin Abstract The basic method to trace is to intercept a Java method, by using byte code manipulation tech and AOP concept. SkyWalking has packaged the byte code manipulation tech and tracing context propagation, so you simply have to define the intercept point (a.k.a. aspect pointcut in Spring).\nIntercept SkyWalking provides two common definitions to intercept constructor, instance method and class method.\nv1 APIs  Extend ClassInstanceMethodsEnhancePluginDefine to define constructor intercept points and instance method intercept points. Extend ClassStaticMethodsEnhancePluginDefine to define class method intercept points.  Of course, you can extend ClassEnhancePluginDefine to set all intercept points, although it is uncommon to do so.\nv2 APIs v2 APIs provide an enhanced interceptor, which could propagate context through MIC(MethodInvocationContext).\n Extend ClassInstanceMethodsEnhancePluginDefineV2 to define constructor intercept points and instance method intercept points. Extend ClassStaticMethodsEnhancePluginDefineV2 to define class method intercept points.  Of course, you can extend ClassEnhancePluginDefineV2 to set all intercept points, although it is uncommon to do so.\nImplement plugin See the following demonstration on how to implement a plugin by extending ClassInstanceMethodsEnhancePluginDefine.\n Define the target class name.  protected abstract ClassMatch enhanceClass(); ClassMatch represents how to match the target classes. There are 4 ways:\n byName: Based on the full class names (package name + . + class name). byClassAnnotationMatch: Depends on whether there are certain annotations in the target classes. byMethodAnnotationMatch: Depends on whether there are certain annotations in the methods of the target classes. byHierarchyMatch: Based on the parent classes or interfaces of the target classes.  Attention:\n Never use ThirdPartyClass.class in the instrumentation definitions, such as takesArguments(ThirdPartyClass.class), or byName(ThirdPartyClass.class.getName()), because of the fact that ThirdPartyClass dose not necessarily exist in the target application and this will break the agent; we have import checks to assist in checking this in CI, but it doesn\u0026rsquo;t cover all scenarios of this limitation, so never try to work around this limitation by something like using full-qualified-class-name (FQCN), i.e. takesArguments(full.qualified.ThirdPartyClass.class) and byName(full.qualified.ThirdPartyClass.class.getName()) will pass the CI check, but are still invalid in the agent codes. Therefore, Use Full Qualified Class Name String Literature Instead. Even if you are perfectly sure that the class to be intercepted exists in the target application (such as JDK classes), still, do not use *.class.getName() to get the class String name. We recommend you to use a literal string. This is to avoid ClassLoader issues. by*AnnotationMatch does not support inherited annotations. We do not recommend using byHierarchyMatch unless necessary. Using it may trigger the interception of many unexcepted methods, which would cause performance issues.  Example:\n@Override protected ClassMatch enhanceClassName() { return byName(\u0026#34;org.apache.catalina.core.StandardEngineValve\u0026#34;); } Define an instance method intercept point.  public InstanceMethodsInterceptPoint[] getInstanceMethodsInterceptPoints(); public interface InstanceMethodsInterceptPoint { /** * class instance methods matcher. * * @return methods matcher */ ElementMatcher\u0026lt;MethodDescription\u0026gt; getMethodsMatcher(); /** * @return represents a class name, the class instance must instanceof InstanceMethodsAroundInterceptor. */ String getMethodsInterceptor(); boolean isOverrideArgs(); } You may also use Matcher to set the target methods. Return true in isOverrideArgs, if you want to change the argument ref in interceptor. Please refer to bytebuddy for details of defining ElementMatcher.\nIn Skywalking, we provide 3 classes to facilitate ElementMatcher definition:\n AnnotationTypeNameMatch: Check on whether there is a certain annotation in the target method. ReturnTypeNameMatch: Check the return type name (package name + . + class name) of the target method. ArgumentTypeNameMatch: Check on the argument index and the type name (package name + . + class name) of the target method.  Attention:\n In case of using ReturnTypeNameMatch and ArgumentTypeNameMatch, use [Lxxx; (Java file format defined in JVM Specification) to define an Array type. For example, you should write [Ljava.lang.String; for java.lang.String[].  The following sections will tell you how to implement the interceptor.\nAdd plugin definition into the skywalking-plugin.def file.  tomcat-7.x/8.x=TomcatInstrumentation  Set up witnessClasses and/or witnessMethods if the instrumentation has to be activated in specific versions.\nExample:\n// The plugin is activated only when the foo.Bar class exists. @Override protected String[] witnessClasses() { return new String[] { \u0026#34;foo.Bar\u0026#34; }; } // The plugin is activated only when the foo.Bar#hello method exists. @Override protected List\u0026lt;WitnessMethod\u0026gt; witnessMethods() { List\u0026lt;WitnessMethod\u0026gt; witnessMethodList = new ArrayList\u0026lt;\u0026gt;(); WitnessMethod witnessMethod = new WitnessMethod(\u0026#34;foo.Bar\u0026#34;, ElementMatchers.named(\u0026#34;hello\u0026#34;)); witnessMethodList.add(witnessMethod); return witnessMethodList; } For more examples, see WitnessTest.java\n  Implement an interceptor As an interceptor for an instance method, it has to implement org.apache.skywalking.apm.agent.core.plugin.interceptor.enhance.InstanceMethodsAroundInterceptor\n/** * A interceptor, which intercept method\u0026#39;s invocation. The target methods will be defined in {@link * ClassEnhancePluginDefine}\u0026#39;s subclass, most likely in {@link ClassInstanceMethodsEnhancePluginDefine} */ public interface InstanceMethodsAroundInterceptor { /** * called before target method invocation. * * @param result change this result, if you want to truncate the method. * @throws Throwable */ void beforeMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, MethodInterceptResult result) throws Throwable; /** * called after target method invocation. Even method\u0026#39;s invocation triggers an exception. * * @param ret the method\u0026#39;s original return value. * @return the method\u0026#39;s actual return value. * @throws Throwable */ Object afterMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Object ret) throws Throwable; /** * called when occur exception. * * @param t the exception occur. */ void handleMethodException(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Throwable t); } Use the core APIs before and after calling the method, as well as during exception handling.\nV2 APIs The interceptor of V2 API uses MethodInvocationContext context to replace the MethodInterceptResult result in the beforeMethod, and be added as a new parameter in afterMethod and handleMethodException.\nMethodInvocationContext context is only shared in one time execution, and safe to use when face concurrency execution.\n/** * A v2 interceptor, which intercept method\u0026#39;s invocation. The target methods will be defined in {@link * ClassEnhancePluginDefineV2}\u0026#39;s subclass, most likely in {@link ClassInstanceMethodsEnhancePluginDefine} */ public interface InstanceMethodsAroundInterceptorV2 { /** * called before target method invocation. * * @param context the method invocation context including result context. */ void beforeMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, MethodInvocationContext context) throws Throwable; /** * called after target method invocation. Even method\u0026#39;s invocation triggers an exception. * * @param ret the method\u0026#39;s original return value. May be null if the method triggers an exception. * @return the method\u0026#39;s actual return value. */ Object afterMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Object ret, MethodInvocationContext context) throws Throwable; /** * called when occur exception. * * @param t the exception occur. */ void handleMethodException(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Throwable t, MethodInvocationContext context); } Bootstrap class instrumentation. SkyWalking has packaged the bootstrap instrumentation in the agent core. You can easily implement it by declaring it in the instrumentation definition.\nOverride the public boolean isBootstrapInstrumentation() and return true. Such as\npublic class URLInstrumentation extends ClassEnhancePluginDefine { private static String CLASS_NAME = \u0026#34;java.net.URL\u0026#34;; @Override protected ClassMatch enhanceClass() { return byName(CLASS_NAME); } @Override public ConstructorInterceptPoint[] getConstructorsInterceptPoints() { return new ConstructorInterceptPoint[] { new ConstructorInterceptPoint() { @Override public ElementMatcher\u0026lt;MethodDescription\u0026gt; getConstructorMatcher() { return any(); } @Override public String getConstructorInterceptor() { return \u0026#34;org.apache.skywalking.apm.plugin.jre.httpurlconnection.Interceptor2\u0026#34;; } } }; } @Override public InstanceMethodsInterceptPoint[] getInstanceMethodsInterceptPoints() { return new InstanceMethodsInterceptPoint[0]; } @Override public StaticMethodsInterceptPoint[] getStaticMethodsInterceptPoints() { return new StaticMethodsInterceptPoint[0]; } @Override public boolean isBootstrapInstrumentation() { return true; } } ClassEnhancePluginDefineV2 is provided in v2 APIs, #isBootstrapInstrumentation works too.\nNOTE: Bootstrap instrumentation should be used only where necessary. During its actual execution, it mostly affects the JRE core(rt.jar). Defining it other than where necessary could lead to unexpected results or side effects.\nProvide custom config for the plugin The config could provide different behaviours based on the configurations. The SkyWalking plugin mechanism provides the configuration injection and initialization system in the agent core.\nEvery plugin could declare one or more classes to represent the config by using @PluginConfig annotation. The agent core could initialize this class' static field through System environments, System properties, and agent.config static file.\nThe #root() method in the @PluginConfig annotation requires declaring the root class for the initialization process. Typically, SkyWalking prefers to use nested inner static classes for the hierarchy of the configuration. We recommend using Plugin/plugin-name/config-key as the nested classes structure of the config class.\nNOTE: because of the Java ClassLoader mechanism, the @PluginConfig annotation should be added on the real class used in the interceptor codes.\nIn the following example, @PluginConfig(root = SpringMVCPluginConfig.class) indicates that initialization should start with using SpringMVCPluginConfig as the root. Then, the config key of the attribute USE_QUALIFIED_NAME_AS_ENDPOINT_NAME should be plugin.springmvc.use_qualified_name_as_endpoint_name.\npublic class SpringMVCPluginConfig { public static class Plugin { // NOTE, if move this annotation on the `Plugin` or `SpringMVCPluginConfig` class, it no longer has any effect.  @PluginConfig(root = SpringMVCPluginConfig.class) public static class SpringMVC { /** * If true, the fully qualified method name will be used as the endpoint name instead of the request URL, * default is false. */ public static boolean USE_QUALIFIED_NAME_AS_ENDPOINT_NAME = false; /** * This config item controls that whether the SpringMVC plugin should collect the parameters of the * request. */ public static boolean COLLECT_HTTP_PARAMS = false; } @PluginConfig(root = SpringMVCPluginConfig.class) public static class Http { /** * When either {@link Plugin.SpringMVC#COLLECT_HTTP_PARAMS} is enabled, how many characters to keep and send * to the OAP backend, use negative values to keep and send the complete parameters, NB. this config item is * added for the sake of performance */ public static int HTTP_PARAMS_LENGTH_THRESHOLD = 1024; } } } Meter Plugin Java agent plugin could use meter APIs to collect metrics for backend analysis.\n Counter API represents a single monotonically increasing counter which automatically collects data and reports to the backend. import org.apache.skywalking.apm.agent.core.meter.MeterFactory; Counter counter = MeterFactory.counter(meterName).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).mode(Counter.Mode.INCREMENT).build(); counter.increment(1d);    MeterFactory.counter creates a new counter builder with the meter name. Counter.Builder.tag(String key, String value) marks a tag key/value pair. Counter.Builder.mode(Counter.Mode mode) changes the counter mode. RATE mode means the reporting rate to the backend. Counter.Builder.build() builds a new Counter which is collected and reported to the backend. Counter.increment(double count) increment counts to the Counter. It could be a positive value.   Gauge API represents a single numerical value.  import org.apache.skywalking.apm.agent.core.meter.MeterFactory; ThreadPoolExecutor threadPool = ...; Gauge gauge = MeterFactory.gauge(meterName, () -\u0026gt; threadPool.getActiveCount()).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).build();  MeterFactory.gauge(String name, Supplier\u0026lt;Double\u0026gt; getter) creates a new gauge builder with the meter name and supplier function. This function must return a double value. Gauge.Builder.tag(String key, String value) marks a tag key/value pair. Gauge.Builder.build() builds a new Gauge which is collected and reported to the backend.   Histogram API represents a summary sample observations with customized buckets.  import org.apache.skywalking.apm.agent.core.meter.MeterFactory; Histogram histogram = MeterFactory.histogram(\u0026#34;test\u0026#34;).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).steps(Arrays.asList(1, 5, 10)).minValue(0).build(); histogram.addValue(3);  MeterFactory.histogram(String name) creates a new histogram builder with the meter name. Histogram.Builder.tag(String key, String value) marks a tag key/value pair. Histogram.Builder.steps(List\u0026lt;Double\u0026gt; steps) sets up the max values of every histogram buckets. Histogram.Builder.minValue(double value) sets up the minimal value of this histogram. Default is 0. Histogram.Builder.build() builds a new Histogram which is collected and reported to the backend. Histogram.addValue(double value) adds value into the histogram, and automatically analyzes what bucket count needs to be incremented. Rule: count into [step1, step2).  Plugin Test Tool The Apache SkyWalking Agent Test Tool Suite is an incredibly useful test tool suite that is available in a wide variety of agent languages. It includes the mock collector and validator. The mock collector is a SkyWalking receiver, like the OAP server.\nYou could learn how to use this tool to test the plugin in this doc. This is a must if you want to contribute plugins to the SkyWalking official repo.\nContribute plugins to the Apache SkyWalking repository We welcome everyone to contribute their plugins.\nPlease follow these steps:\n Submit an issue for your plugin, including any supported versions. Create sub modules under apm-sniffer/apm-sdk-plugin or apm-sniffer/optional-plugins, and the name should include supported library name and versions. Follow this guide to develop. Make sure comments and test cases are provided. Develop and test. Provide the automatic test cases. Learn how to write the plugin test case from this doc Send a pull request and ask for review. The plugin committers will approve your plugins, plugin CI-with-IT, e2e, and the plugin tests will be passed. The plugin is accepted by SkyWalking.  ","excerpt":"Plugin Development Guide This document describes how to understand, develop and contribute a plugin. …","ref":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/java-plugin-development-guide/","title":"Plugin Development Guide"},{"body":"Plugin Development Guide This document describes how to understand, develop and contribute a plugin.\nThere are 2 kinds of plugin:\n Tracing plugin. Follow the distributed tracing concept to collect spans with tags and logs. Meter plugin. Collect numeric metrics in Counter, Gauge, and Histogram formats.  We also provide the plugin test tool to verify the data collected and reported by the plugin. If you plan to contribute any plugin to our main repo, the data would be verified by this tool too.\nTracing plugin Concepts Span The span is an important and recognized concept in the distributed tracing system. Learn about the span from the Google Dapper Paper and OpenTracing\nSkyWalking has supported OpenTracing and OpenTracing-Java API since 2017. Our concepts of the span are similar to that of the Google Dapper Paper and OpenTracing. We have also extended the span.\nThere are three types of span:\n1.1 EntrySpan The EntrySpan represents a service provider. It is also an endpoint on the server end. As an APM system, our target is the application servers. Therefore, almost all the services and MQ-consumers are EntrySpan.\n1.2 LocalSpan The LocalSpan represents a normal Java method that does not concern remote services. It is neither a MQ producer/consumer nor a service (e.g. HTTP service) provider/consumer.\n1.3 ExitSpan The ExitSpan represents a client of service or MQ-producer. It is named the LeafSpan in the early versions of SkyWalking. For example, accessing DB through JDBC and reading Redis/Memcached are classified as an ExitSpan.\nContextCarrier In order to implement distributed tracing, cross-process tracing has to be bound, and the context must propagate across the process. This is where the ContextCarrier comes in.\nHere are the steps on how to use the ContextCarrier in an A-\u0026gt;B distributed call.\n Create a new and empty ContextCarrier on the client end. Create an ExitSpan by ContextManager#createExitSpan or use ContextManager#inject to initalize the ContextCarrier. Place all items of ContextCarrier into heads (e.g. HTTP HEAD), attachments (e.g. Dubbo RPC framework) or messages (e.g. Kafka). The ContextCarrier propagates to the server end through the service call. On the server end, obtain all items from the heads, attachments or messages. Create an EntrySpan by ContextManager#createEntrySpan or use ContextManager#extract to bind the client and server ends.  See the following examples, where we use the Apache HTTPComponent client plugin and Tomcat 7 server plugin:\n Using the Apache HTTPComponent client plugin on the client end  span = ContextManager.createExitSpan(\u0026#34;/span/operation/name\u0026#34;, contextCarrier, \u0026#34;ip:port\u0026#34;); CarrierItem next = contextCarrier.items(); while (next.hasNext()) { next = next.next(); httpRequest.setHeader(next.getHeadKey(), next.getHeadValue()); } Using the Tomcat 7 server plugin on the server end  ContextCarrier contextCarrier = new ContextCarrier(); CarrierItem next = contextCarrier.items(); while (next.hasNext()) { next = next.next(); next.setHeadValue(request.getHeader(next.getHeadKey())); } span = ContextManager.createEntrySpan(“/span/operation/name”, contextCarrier); ContextSnapshot Besides cross-process tracing, cross-thread tracing has to be supported as well. For instance, both async process (in-memory MQ) and batch process are common in Java. Cross-process and cross-thread tracing are very similar in that they both require propagating context, except that cross-thread tracing does not require serialization.\nHere are the three steps on cross-thread propagation:\n Use ContextManager#capture to get the ContextSnapshot object. Let the sub-thread access the ContextSnapshot through method arguments or being carried by existing arguments Use ContextManager#continued in sub-thread.  Core APIs ContextManager ContextManager provides all major and primary APIs.\n Create EntrySpan  public static AbstractSpan createEntrySpan(String endpointName, ContextCarrier carrier) Create EntrySpan according to the operation name (e.g. service name, uri) and ContextCarrier.\nCreate LocalSpan  public static AbstractSpan createLocalSpan(String endpointName) Create LocalSpan according to the operation name (e.g. full method signature).\nCreate ExitSpan  public static AbstractSpan createExitSpan(String endpointName, ContextCarrier carrier, String remotePeer) Create ExitSpan according to the operation name (e.g. service name, uri) and the new ContextCarrier and peer address (e.g. ip+port, hostname+port).\nAbstractSpan /** * Set the component id, which defines in {@link ComponentsDefine} * * @param component * @return the span for chaining. */ AbstractSpan setComponent(Component component); AbstractSpan setLayer(SpanLayer layer); /** * Set a key:value tag on the Span. * * @return this Span instance, for chaining */ AbstractSpan tag(String key, String value); /** * Record an exception event of the current walltime timestamp. * * @param t any subclass of {@link Throwable}, which occurs in this span. * @return the Span, for chaining */ AbstractSpan log(Throwable t); AbstractSpan errorOccurred(); /** * Record an event at a specific timestamp. * * @param timestamp The explicit timestamp for the log record. * @param event the events * @return the Span, for chaining */ AbstractSpan log(long timestamp, Map\u0026lt;String, ?\u0026gt; event); /** * Sets the string name for the logical operation this span represents. * * @return this Span instance, for chaining */ AbstractSpan setOperationName(String endpointName); Besides setting the operation name, tags and logs, two attributes must be set, namely the component and layer. This is especially important for the EntrySpan and ExitSpan.\nSpanLayer is the type of span. There are 5 values:\n UNKNOWN (default) DB RPC_FRAMEWORK (designed for the RPC framework, rather than an ordinary HTTP call) HTTP MQ  Component IDs are defined and reserved by the SkyWalking project. For extension of the component name/ID, please follow the OAP server Component library settings document.\nSpecial Span Tags All tags are available in the trace view. Meanwhile, in the OAP backend analysis, some special tags or tag combinations provide other advanced features.\nTag key http.status_code The value should be an integer. The response code of OAL entities corresponds to this value.\nTag keys db.statement and db.type. The value of db.statement should be a string that represents the database statement, such as SQL, or [No statement]/+span#operationName if the value is empty. When the exit span contains this tag, OAP samples the slow statements based on agent-analyzer/default/maxSlowSQLLength. The threshold of slow statement is defined in accordance with agent-analyzer/default/slowDBAccessThreshold. Check Slow Database Statement document of OAP server for details.\nExtension logic endpoint: Tag key x-le The logic endpoint is a concept that doesn\u0026rsquo;t represent a real RPC call, but requires the statistic. The value of x-le should be in JSON format. There are two options:\n Define a separated logic endpoint. Provide its own endpoint name, latency and status. Suitable for entry and local span.  { \u0026#34;name\u0026#34;: \u0026#34;GraphQL-service\u0026#34;, \u0026#34;latency\u0026#34;: 100, \u0026#34;status\u0026#34;: true } Declare the current local span representing a logic endpoint.  { \u0026#34;logic-span\u0026#34;: true } Virtual Database Relative Tags SkyWalking analysis Database(SQL-like) performance metrics through the following tags.\npublic static final StringTag DB_TYPE = new StringTag(3, \u0026#34;db.type\u0026#34;); public static final StringTag DB_STATEMENT = new StringTag(5, \u0026#34;db.statement\u0026#34;);  db.type records database type, such as sql, cassandra, Elasticsearch. db.statementrecords the sql statement of the database access.  Read backend\u0026rsquo;s virtual database doc for more details.\nVirtual Cache Relative Tags SkyWalking analysis cache performance related metrics through the following tags.\npublic static final StringTag CACHE_TYPE = new StringTag(15, \u0026#34;cache.type\u0026#34;); public static final StringTag CACHE_CMD = new StringTag(17, \u0026#34;cache.cmd\u0026#34;); public static final StringTag CACHE_OP = new StringTag(16, \u0026#34;cache.op\u0026#34;); public static final StringTag CACHE_KEY = new StringTag(18, \u0026#34;cache.key\u0026#34;);  cache.type indicates the cache type , usually it\u0026rsquo;s official name of cache (e.g. Redis) cache.cmd indicates the cache command that would be sent to cache server (e.g. setnx) cache.op indicates the command is used for write or read operation , usually the value is converting from command cache.key indicates the cache key that would be sent to cache server , this tag maybe null , as string type key would be collected usually.  In order to decide which op should be converted to flexibly , It\u0026rsquo;s better that providing config property . Reference Jedis-4.x-plugin\nVirtual Message Queue (MQ) Relative Tags SkyWalking analysis MQ performance related metrics through the following tags.\npublic static final StringTag MQ_QUEUE = new StringTag(7, \u0026#34;mq.queue\u0026#34;); public static final StringTag MQ_TOPIC = new StringTag(9, \u0026#34;mq.topic\u0026#34;); public static final StringTag TRANSMISSION_LATENCY = new StringTag(15, \u0026#34;transmission.latency\u0026#34;, false);  mq.queue indicates MQ queue name mq.topic indicates MQ topic name , It\u0026rsquo;s optional as some MQ don\u0026rsquo;t hava concept of topic transmission.latency The transmission latency from consumer to producer. Usually you needn\u0026rsquo;t to record this tag manually, instead to call contextCarrier.extensionInjector().injectSendingTimestamp(); to record tag sendingTimestamp on producer side , and SkyWalking would record this tag on consumer side if sw8-x context carrier(from producer side) contains sendingTimestamp  Notice , you should set peer at both sides(producer and consumer). And the value of peer should represent the MQ server cluster.\nAdvanced APIs Async Span APIs There is a set of advanced APIs in Span which is specifically designed for async use cases. When tags, logs, and attributes (including end time) of the span need to be set in another thread, you should use these APIs.\n/** * The span finish at current tracing context, but the current span is still alive, until {@link #asyncFinish} * called. * * This method must be called\u0026lt;br/\u0026gt; * 1. In original thread(tracing context). * 2. Current span is active span. * * During alive, tags, logs and attributes of the span could be changed, in any thread. * * The execution times of {@link #prepareForAsync} and {@link #asyncFinish()} must match. * * @return the current span */ AbstractSpan prepareForAsync(); /** * Notify the span, it could be finished. * * The execution times of {@link #prepareForAsync} and {@link #asyncFinish()} must match. * * @return the current span */ AbstractSpan asyncFinish();  Call #prepareForAsync in the original context. Run ContextManager#stopSpan in the original context when your job in the current thread is complete. Propagate the span to any other thread. Once the above steps are all set, call #asyncFinish in any thread. When #prepareForAsync is complete for all spans, the tracing context will be finished and will report to the backend (based on the count of API execution).  Develop a plugin Abstract The basic method to trace is to intercept a Java method, by using byte code manipulation tech and AOP concept. SkyWalking has packaged the byte code manipulation tech and tracing context propagation, so you simply have to define the intercept point (a.k.a. aspect pointcut in Spring).\nIntercept SkyWalking provides two common definitions to intercept constructor, instance method and class method.\nv1 APIs  Extend ClassInstanceMethodsEnhancePluginDefine to define constructor intercept points and instance method intercept points. Extend ClassStaticMethodsEnhancePluginDefine to define class method intercept points.  Of course, you can extend ClassEnhancePluginDefine to set all intercept points, although it is uncommon to do so.\nv2 APIs v2 APIs provide an enhanced interceptor, which could propagate context through MIC(MethodInvocationContext).\n Extend ClassInstanceMethodsEnhancePluginDefineV2 to define constructor intercept points and instance method intercept points. Extend ClassStaticMethodsEnhancePluginDefineV2 to define class method intercept points.  Of course, you can extend ClassEnhancePluginDefineV2 to set all intercept points, although it is uncommon to do so.\nImplement plugin See the following demonstration on how to implement a plugin by extending ClassInstanceMethodsEnhancePluginDefine.\n Define the target class name.  protected abstract ClassMatch enhanceClass(); ClassMatch represents how to match the target classes. There are 4 ways:\n byName: Based on the full class names (package name + . + class name). byClassAnnotationMatch: Depends on whether there are certain annotations in the target classes. byMethodAnnotationMatch: Depends on whether there are certain annotations in the methods of the target classes. byHierarchyMatch: Based on the parent classes or interfaces of the target classes.  Attention:\n Never use ThirdPartyClass.class in the instrumentation definitions, such as takesArguments(ThirdPartyClass.class), or byName(ThirdPartyClass.class.getName()), because of the fact that ThirdPartyClass dose not necessarily exist in the target application and this will break the agent; we have import checks to assist in checking this in CI, but it doesn\u0026rsquo;t cover all scenarios of this limitation, so never try to work around this limitation by something like using full-qualified-class-name (FQCN), i.e. takesArguments(full.qualified.ThirdPartyClass.class) and byName(full.qualified.ThirdPartyClass.class.getName()) will pass the CI check, but are still invalid in the agent codes. Therefore, Use Full Qualified Class Name String Literature Instead. Even if you are perfectly sure that the class to be intercepted exists in the target application (such as JDK classes), still, do not use *.class.getName() to get the class String name. We recommend you to use a literal string. This is to avoid ClassLoader issues. by*AnnotationMatch does not support inherited annotations. We do not recommend using byHierarchyMatch unless necessary. Using it may trigger the interception of many unexcepted methods, which would cause performance issues.  Example:\n@Override protected ClassMatch enhanceClassName() { return byName(\u0026#34;org.apache.catalina.core.StandardEngineValve\u0026#34;); } Define an instance method intercept point.  public InstanceMethodsInterceptPoint[] getInstanceMethodsInterceptPoints(); public interface InstanceMethodsInterceptPoint { /** * class instance methods matcher. * * @return methods matcher */ ElementMatcher\u0026lt;MethodDescription\u0026gt; getMethodsMatcher(); /** * @return represents a class name, the class instance must instanceof InstanceMethodsAroundInterceptor. */ String getMethodsInterceptor(); boolean isOverrideArgs(); } You may also use Matcher to set the target methods. Return true in isOverrideArgs, if you want to change the argument ref in interceptor. Please refer to bytebuddy for details of defining ElementMatcher.\nIn Skywalking, we provide 3 classes to facilitate ElementMatcher definition:\n AnnotationTypeNameMatch: Check on whether there is a certain annotation in the target method. ReturnTypeNameMatch: Check the return type name (package name + . + class name) of the target method. ArgumentTypeNameMatch: Check on the argument index and the type name (package name + . + class name) of the target method.  Attention:\n In case of using ReturnTypeNameMatch and ArgumentTypeNameMatch, use [Lxxx; (Java file format defined in JVM Specification) to define an Array type. For example, you should write [Ljava.lang.String; for java.lang.String[].  The following sections will tell you how to implement the interceptor.\nAdd plugin definition into the skywalking-plugin.def file.  tomcat-7.x/8.x=TomcatInstrumentation  Set up witnessClasses and/or witnessMethods if the instrumentation has to be activated in specific versions.\nExample:\n// The plugin is activated only when the foo.Bar class exists. @Override protected String[] witnessClasses() { return new String[] { \u0026#34;foo.Bar\u0026#34; }; } // The plugin is activated only when the foo.Bar#hello method exists. @Override protected List\u0026lt;WitnessMethod\u0026gt; witnessMethods() { List\u0026lt;WitnessMethod\u0026gt; witnessMethodList = new ArrayList\u0026lt;\u0026gt;(); WitnessMethod witnessMethod = new WitnessMethod(\u0026#34;foo.Bar\u0026#34;, ElementMatchers.named(\u0026#34;hello\u0026#34;)); witnessMethodList.add(witnessMethod); return witnessMethodList; } For more examples, see WitnessTest.java\n  Implement an interceptor As an interceptor for an instance method, it has to implement org.apache.skywalking.apm.agent.core.plugin.interceptor.enhance.InstanceMethodsAroundInterceptor\n/** * A interceptor, which intercept method\u0026#39;s invocation. The target methods will be defined in {@link * ClassEnhancePluginDefine}\u0026#39;s subclass, most likely in {@link ClassInstanceMethodsEnhancePluginDefine} */ public interface InstanceMethodsAroundInterceptor { /** * called before target method invocation. * * @param result change this result, if you want to truncate the method. * @throws Throwable */ void beforeMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, MethodInterceptResult result) throws Throwable; /** * called after target method invocation. Even method\u0026#39;s invocation triggers an exception. * * @param ret the method\u0026#39;s original return value. * @return the method\u0026#39;s actual return value. * @throws Throwable */ Object afterMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Object ret) throws Throwable; /** * called when occur exception. * * @param t the exception occur. */ void handleMethodException(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Throwable t); } Use the core APIs before and after calling the method, as well as during exception handling.\nV2 APIs The interceptor of V2 API uses MethodInvocationContext context to replace the MethodInterceptResult result in the beforeMethod, and be added as a new parameter in afterMethod and handleMethodException.\nMethodInvocationContext context is only shared in one time execution, and safe to use when face concurrency execution.\n/** * A v2 interceptor, which intercept method\u0026#39;s invocation. The target methods will be defined in {@link * ClassEnhancePluginDefineV2}\u0026#39;s subclass, most likely in {@link ClassInstanceMethodsEnhancePluginDefine} */ public interface InstanceMethodsAroundInterceptorV2 { /** * called before target method invocation. * * @param context the method invocation context including result context. */ void beforeMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, MethodInvocationContext context) throws Throwable; /** * called after target method invocation. Even method\u0026#39;s invocation triggers an exception. * * @param ret the method\u0026#39;s original return value. May be null if the method triggers an exception. * @return the method\u0026#39;s actual return value. */ Object afterMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Object ret, MethodInvocationContext context) throws Throwable; /** * called when occur exception. * * @param t the exception occur. */ void handleMethodException(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Throwable t, MethodInvocationContext context); } Bootstrap class instrumentation. SkyWalking has packaged the bootstrap instrumentation in the agent core. You can easily implement it by declaring it in the instrumentation definition.\nOverride the public boolean isBootstrapInstrumentation() and return true. Such as\npublic class URLInstrumentation extends ClassEnhancePluginDefine { private static String CLASS_NAME = \u0026#34;java.net.URL\u0026#34;; @Override protected ClassMatch enhanceClass() { return byName(CLASS_NAME); } @Override public ConstructorInterceptPoint[] getConstructorsInterceptPoints() { return new ConstructorInterceptPoint[] { new ConstructorInterceptPoint() { @Override public ElementMatcher\u0026lt;MethodDescription\u0026gt; getConstructorMatcher() { return any(); } @Override public String getConstructorInterceptor() { return \u0026#34;org.apache.skywalking.apm.plugin.jre.httpurlconnection.Interceptor2\u0026#34;; } } }; } @Override public InstanceMethodsInterceptPoint[] getInstanceMethodsInterceptPoints() { return new InstanceMethodsInterceptPoint[0]; } @Override public StaticMethodsInterceptPoint[] getStaticMethodsInterceptPoints() { return new StaticMethodsInterceptPoint[0]; } @Override public boolean isBootstrapInstrumentation() { return true; } } ClassEnhancePluginDefineV2 is provided in v2 APIs, #isBootstrapInstrumentation works too.\nNOTE: Bootstrap instrumentation should be used only where necessary. During its actual execution, it mostly affects the JRE core(rt.jar). Defining it other than where necessary could lead to unexpected results or side effects.\nProvide custom config for the plugin The config could provide different behaviours based on the configurations. The SkyWalking plugin mechanism provides the configuration injection and initialization system in the agent core.\nEvery plugin could declare one or more classes to represent the config by using @PluginConfig annotation. The agent core could initialize this class' static field through System environments, System properties, and agent.config static file.\nThe #root() method in the @PluginConfig annotation requires declaring the root class for the initialization process. Typically, SkyWalking prefers to use nested inner static classes for the hierarchy of the configuration. We recommend using Plugin/plugin-name/config-key as the nested classes structure of the config class.\nNOTE: because of the Java ClassLoader mechanism, the @PluginConfig annotation should be added on the real class used in the interceptor codes.\nIn the following example, @PluginConfig(root = SpringMVCPluginConfig.class) indicates that initialization should start with using SpringMVCPluginConfig as the root. Then, the config key of the attribute USE_QUALIFIED_NAME_AS_ENDPOINT_NAME should be plugin.springmvc.use_qualified_name_as_endpoint_name.\npublic class SpringMVCPluginConfig { public static class Plugin { // NOTE, if move this annotation on the `Plugin` or `SpringMVCPluginConfig` class, it no longer has any effect.  @PluginConfig(root = SpringMVCPluginConfig.class) public static class SpringMVC { /** * If true, the fully qualified method name will be used as the endpoint name instead of the request URL, * default is false. */ public static boolean USE_QUALIFIED_NAME_AS_ENDPOINT_NAME = false; /** * This config item controls that whether the SpringMVC plugin should collect the parameters of the * request. */ public static boolean COLLECT_HTTP_PARAMS = false; } @PluginConfig(root = SpringMVCPluginConfig.class) public static class Http { /** * When either {@link Plugin.SpringMVC#COLLECT_HTTP_PARAMS} is enabled, how many characters to keep and send * to the OAP backend, use negative values to keep and send the complete parameters, NB. this config item is * added for the sake of performance */ public static int HTTP_PARAMS_LENGTH_THRESHOLD = 1024; } } } Meter Plugin Java agent plugin could use meter APIs to collect metrics for backend analysis.\n Counter API represents a single monotonically increasing counter which automatically collects data and reports to the backend. import org.apache.skywalking.apm.agent.core.meter.MeterFactory; Counter counter = MeterFactory.counter(meterName).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).mode(Counter.Mode.INCREMENT).build(); counter.increment(1d);    MeterFactory.counter creates a new counter builder with the meter name. Counter.Builder.tag(String key, String value) marks a tag key/value pair. Counter.Builder.mode(Counter.Mode mode) changes the counter mode. RATE mode means the reporting rate to the backend. Counter.Builder.build() builds a new Counter which is collected and reported to the backend. Counter.increment(double count) increment counts to the Counter. It could be a positive value.   Gauge API represents a single numerical value.  import org.apache.skywalking.apm.agent.core.meter.MeterFactory; ThreadPoolExecutor threadPool = ...; Gauge gauge = MeterFactory.gauge(meterName, () -\u0026gt; threadPool.getActiveCount()).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).build();  MeterFactory.gauge(String name, Supplier\u0026lt;Double\u0026gt; getter) creates a new gauge builder with the meter name and supplier function. This function must return a double value. Gauge.Builder.tag(String key, String value) marks a tag key/value pair. Gauge.Builder.build() builds a new Gauge which is collected and reported to the backend.   Histogram API represents a summary sample observations with customized buckets.  import org.apache.skywalking.apm.agent.core.meter.MeterFactory; Histogram histogram = MeterFactory.histogram(\u0026#34;test\u0026#34;).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).steps(Arrays.asList(1, 5, 10)).minValue(0).build(); histogram.addValue(3);  MeterFactory.histogram(String name) creates a new histogram builder with the meter name. Histogram.Builder.tag(String key, String value) marks a tag key/value pair. Histogram.Builder.steps(List\u0026lt;Double\u0026gt; steps) sets up the max values of every histogram buckets. Histogram.Builder.minValue(double value) sets up the minimal value of this histogram. Default is 0. Histogram.Builder.build() builds a new Histogram which is collected and reported to the backend. Histogram.addValue(double value) adds value into the histogram, and automatically analyzes what bucket count needs to be incremented. Rule: count into [step1, step2).  Plugin Test Tool The Apache SkyWalking Agent Test Tool Suite is an incredibly useful test tool suite that is available in a wide variety of agent languages. It includes the mock collector and validator. The mock collector is a SkyWalking receiver, like the OAP server.\nYou could learn how to use this tool to test the plugin in this doc. This is a must if you want to contribute plugins to the SkyWalking official repo.\nContribute plugins to the Apache SkyWalking repository We welcome everyone to contribute their plugins.\nPlease follow these steps:\n Submit an issue for your plugin, including any supported versions. Create sub modules under apm-sniffer/apm-sdk-plugin or apm-sniffer/optional-plugins, and the name should include supported library name and versions. Follow this guide to develop. Make sure comments and test cases are provided. Develop and test. Provide the automatic test cases. Learn how to write the plugin test case from this doc Send a pull request and ask for review. The plugin committers will approve your plugins, plugin CI-with-IT, e2e, and the plugin tests will be passed. The plugin is accepted by SkyWalking.  ","excerpt":"Plugin Development Guide This document describes how to understand, develop and contribute a plugin. …","ref":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/java-plugin-development-guide/","title":"Plugin Development Guide"},{"body":"Plugin Development Guide This document describes how to understand, develop and contribute a plugin.\nThere are 2 kinds of plugin:\n Tracing plugin. Follow the distributed tracing concept to collect spans with tags and logs. Meter plugin. Collect numeric metrics in Counter, Gauge, and Histogram formats.  We also provide the plugin test tool to verify the data collected and reported by the plugin. If you plan to contribute any plugin to our main repo, the data would be verified by this tool too.\nTracing plugin Concepts Span The span is an important and recognized concept in the distributed tracing system. Learn about the span from the Google Dapper Paper and OpenTracing\nSkyWalking has supported OpenTracing and OpenTracing-Java API since 2017. Our concepts of the span are similar to that of the Google Dapper Paper and OpenTracing. We have also extended the span.\nThere are three types of span:\n1.1 EntrySpan The EntrySpan represents a service provider. It is also an endpoint on the server end. As an APM system, our target is the application servers. Therefore, almost all the services and MQ-consumers are EntrySpan.\n1.2 LocalSpan The LocalSpan represents a normal Java method that does not concern remote services. It is neither a MQ producer/consumer nor a service (e.g. HTTP service) provider/consumer.\n1.3 ExitSpan The ExitSpan represents a client of service or MQ-producer. It is named the LeafSpan in the early versions of SkyWalking. For example, accessing DB through JDBC and reading Redis/Memcached are classified as an ExitSpan.\nContextCarrier In order to implement distributed tracing, cross-process tracing has to be bound, and the context must propagate across the process. This is where the ContextCarrier comes in.\nHere are the steps on how to use the ContextCarrier in an A-\u0026gt;B distributed call.\n Create a new and empty ContextCarrier on the client end. Create an ExitSpan by ContextManager#createExitSpan or use ContextManager#inject to initalize the ContextCarrier. Place all items of ContextCarrier into heads (e.g. HTTP HEAD), attachments (e.g. Dubbo RPC framework) or messages (e.g. Kafka). The ContextCarrier propagates to the server end through the service call. On the server end, obtain all items from the heads, attachments or messages. Create an EntrySpan by ContextManager#createEntrySpan or use ContextManager#extract to bind the client and server ends.  See the following examples, where we use the Apache HTTPComponent client plugin and Tomcat 7 server plugin:\n Using the Apache HTTPComponent client plugin on the client end  span = ContextManager.createExitSpan(\u0026#34;/span/operation/name\u0026#34;, contextCarrier, \u0026#34;ip:port\u0026#34;); CarrierItem next = contextCarrier.items(); while (next.hasNext()) { next = next.next(); httpRequest.setHeader(next.getHeadKey(), next.getHeadValue()); } Using the Tomcat 7 server plugin on the server end  ContextCarrier contextCarrier = new ContextCarrier(); CarrierItem next = contextCarrier.items(); while (next.hasNext()) { next = next.next(); next.setHeadValue(request.getHeader(next.getHeadKey())); } span = ContextManager.createEntrySpan(“/span/operation/name”, contextCarrier); ContextSnapshot Besides cross-process tracing, cross-thread tracing has to be supported as well. For instance, both async process (in-memory MQ) and batch process are common in Java. Cross-process and cross-thread tracing are very similar in that they both require propagating context, except that cross-thread tracing does not require serialization.\nHere are the three steps on cross-thread propagation:\n Use ContextManager#capture to get the ContextSnapshot object. Let the sub-thread access the ContextSnapshot through method arguments or being carried by existing arguments Use ContextManager#continued in sub-thread.  Core APIs ContextManager ContextManager provides all major and primary APIs.\n Create EntrySpan  public static AbstractSpan createEntrySpan(String endpointName, ContextCarrier carrier) Create EntrySpan according to the operation name (e.g. service name, uri) and ContextCarrier.\nCreate LocalSpan  public static AbstractSpan createLocalSpan(String endpointName) Create LocalSpan according to the operation name (e.g. full method signature).\nCreate ExitSpan  public static AbstractSpan createExitSpan(String endpointName, ContextCarrier carrier, String remotePeer) Create ExitSpan according to the operation name (e.g. service name, uri) and the new ContextCarrier and peer address (e.g. ip+port, hostname+port).\nAbstractSpan /** * Set the component id, which defines in {@link ComponentsDefine} * * @param component * @return the span for chaining. */ AbstractSpan setComponent(Component component); AbstractSpan setLayer(SpanLayer layer); /** * Set a key:value tag on the Span. * * @return this Span instance, for chaining */ AbstractSpan tag(String key, String value); /** * Record an exception event of the current walltime timestamp. * * @param t any subclass of {@link Throwable}, which occurs in this span. * @return the Span, for chaining */ AbstractSpan log(Throwable t); AbstractSpan errorOccurred(); /** * Record an event at a specific timestamp. * * @param timestamp The explicit timestamp for the log record. * @param event the events * @return the Span, for chaining */ AbstractSpan log(long timestamp, Map\u0026lt;String, ?\u0026gt; event); /** * Sets the string name for the logical operation this span represents. * * @return this Span instance, for chaining */ AbstractSpan setOperationName(String endpointName); Besides setting the operation name, tags and logs, two attributes must be set, namely the component and layer. This is especially important for the EntrySpan and ExitSpan.\nSpanLayer is the type of span. There are 5 values:\n UNKNOWN (default) DB RPC_FRAMEWORK (designed for the RPC framework, rather than an ordinary HTTP call) HTTP MQ  Component IDs are defined and reserved by the SkyWalking project. For extension of the component name/ID, please follow the OAP server Component library settings document.\nSpecial Span Tags All tags are available in the trace view. Meanwhile, in the OAP backend analysis, some special tags or tag combinations provide other advanced features.\nTag key http.status_code The value should be an integer. The response code of OAL entities corresponds to this value.\nTag keys db.statement and db.type. The value of db.statement should be a string that represents the database statement, such as SQL, or [No statement]/+span#operationName if the value is empty. When the exit span contains this tag, OAP samples the slow statements based on agent-analyzer/default/maxSlowSQLLength. The threshold of slow statement is defined in accordance with agent-analyzer/default/slowDBAccessThreshold. Check Slow Database Statement document of OAP server for details.\nExtension logic endpoint: Tag key x-le The logic endpoint is a concept that doesn\u0026rsquo;t represent a real RPC call, but requires the statistic. The value of x-le should be in JSON format. There are two options:\n Define a separated logic endpoint. Provide its own endpoint name, latency and status. Suitable for entry and local span.  { \u0026#34;name\u0026#34;: \u0026#34;GraphQL-service\u0026#34;, \u0026#34;latency\u0026#34;: 100, \u0026#34;status\u0026#34;: true } Declare the current local span representing a logic endpoint.  { \u0026#34;logic-span\u0026#34;: true } Virtual Database Relative Tags SkyWalking analysis Database(SQL-like) performance metrics through the following tags.\npublic static final StringTag DB_TYPE = new StringTag(3, \u0026#34;db.type\u0026#34;); public static final StringTag DB_STATEMENT = new StringTag(5, \u0026#34;db.statement\u0026#34;);  db.type records database type, such as sql, cassandra, Elasticsearch. db.statementrecords the sql statement of the database access.  Read backend\u0026rsquo;s virtual database doc for more details.\nVirtual Cache Relative Tags SkyWalking analysis cache performance related metrics through the following tags.\npublic static final StringTag CACHE_TYPE = new StringTag(15, \u0026#34;cache.type\u0026#34;); public static final StringTag CACHE_CMD = new StringTag(17, \u0026#34;cache.cmd\u0026#34;); public static final StringTag CACHE_OP = new StringTag(16, \u0026#34;cache.op\u0026#34;); public static final StringTag CACHE_KEY = new StringTag(18, \u0026#34;cache.key\u0026#34;);  cache.type indicates the cache type , usually it\u0026rsquo;s official name of cache (e.g. Redis) cache.cmd indicates the cache command that would be sent to cache server (e.g. setnx) cache.op indicates the command is used for write or read operation , usually the value is converting from command cache.key indicates the cache key that would be sent to cache server , this tag maybe null , as string type key would be collected usually.  In order to decide which op should be converted to flexibly , It\u0026rsquo;s better that providing config property . Reference Jedis-4.x-plugin\nVirtual Message Queue (MQ) Relative Tags SkyWalking analysis MQ performance related metrics through the following tags.\npublic static final StringTag MQ_QUEUE = new StringTag(7, \u0026#34;mq.queue\u0026#34;); public static final StringTag MQ_TOPIC = new StringTag(9, \u0026#34;mq.topic\u0026#34;); public static final StringTag TRANSMISSION_LATENCY = new StringTag(15, \u0026#34;transmission.latency\u0026#34;, false);  mq.queue indicates MQ queue name mq.topic indicates MQ topic name , It\u0026rsquo;s optional as some MQ don\u0026rsquo;t hava concept of topic transmission.latency The transmission latency from consumer to producer. Usually you needn\u0026rsquo;t to record this tag manually, instead to call contextCarrier.extensionInjector().injectSendingTimestamp(); to record tag sendingTimestamp on producer side , and SkyWalking would record this tag on consumer side if sw8-x context carrier(from producer side) contains sendingTimestamp  Notice , you should set peer at both sides(producer and consumer). And the value of peer should represent the MQ server cluster.\nAdvanced APIs Async Span APIs There is a set of advanced APIs in Span which is specifically designed for async use cases. When tags, logs, and attributes (including end time) of the span need to be set in another thread, you should use these APIs.\n/** * The span finish at current tracing context, but the current span is still alive, until {@link #asyncFinish} * called. * * This method must be called\u0026lt;br/\u0026gt; * 1. In original thread(tracing context). * 2. Current span is active span. * * During alive, tags, logs and attributes of the span could be changed, in any thread. * * The execution times of {@link #prepareForAsync} and {@link #asyncFinish()} must match. * * @return the current span */ AbstractSpan prepareForAsync(); /** * Notify the span, it could be finished. * * The execution times of {@link #prepareForAsync} and {@link #asyncFinish()} must match. * * @return the current span */ AbstractSpan asyncFinish();  Call #prepareForAsync in the original context. Run ContextManager#stopSpan in the original context when your job in the current thread is complete. Propagate the span to any other thread. Once the above steps are all set, call #asyncFinish in any thread. When #prepareForAsync is complete for all spans, the tracing context will be finished and will report to the backend (based on the count of API execution).  Develop a plugin Abstract The basic method to trace is to intercept a Java method, by using byte code manipulation tech and AOP concept. SkyWalking has packaged the byte code manipulation tech and tracing context propagation, so you simply have to define the intercept point (a.k.a. aspect pointcut in Spring).\nIntercept SkyWalking provides two common definitions to intercept constructor, instance method and class method.\nv1 APIs  Extend ClassInstanceMethodsEnhancePluginDefine to define constructor intercept points and instance method intercept points. Extend ClassStaticMethodsEnhancePluginDefine to define class method intercept points.  Of course, you can extend ClassEnhancePluginDefine to set all intercept points, although it is uncommon to do so.\nv2 APIs v2 APIs provide an enhanced interceptor, which could propagate context through MIC(MethodInvocationContext).\n Extend ClassInstanceMethodsEnhancePluginDefineV2 to define constructor intercept points and instance method intercept points. Extend ClassStaticMethodsEnhancePluginDefineV2 to define class method intercept points.  Of course, you can extend ClassEnhancePluginDefineV2 to set all intercept points, although it is uncommon to do so.\nImplement plugin See the following demonstration on how to implement a plugin by extending ClassInstanceMethodsEnhancePluginDefine.\n Define the target class name.  protected abstract ClassMatch enhanceClass(); ClassMatch represents how to match the target classes. There are 4 ways:\n byName: Based on the full class names (package name + . + class name). byClassAnnotationMatch: Depends on whether there are certain annotations in the target classes. byMethodAnnotationMatch: Depends on whether there are certain annotations in the methods of the target classes. byHierarchyMatch: Based on the parent classes or interfaces of the target classes.  Attention:\n Never use ThirdPartyClass.class in the instrumentation definitions, such as takesArguments(ThirdPartyClass.class), or byName(ThirdPartyClass.class.getName()), because of the fact that ThirdPartyClass dose not necessarily exist in the target application and this will break the agent; we have import checks to assist in checking this in CI, but it doesn\u0026rsquo;t cover all scenarios of this limitation, so never try to work around this limitation by something like using full-qualified-class-name (FQCN), i.e. takesArguments(full.qualified.ThirdPartyClass.class) and byName(full.qualified.ThirdPartyClass.class.getName()) will pass the CI check, but are still invalid in the agent codes. Therefore, Use Full Qualified Class Name String Literature Instead. Even if you are perfectly sure that the class to be intercepted exists in the target application (such as JDK classes), still, do not use *.class.getName() to get the class String name. We recommend you to use a literal string. This is to avoid ClassLoader issues. by*AnnotationMatch does not support inherited annotations. We do not recommend using byHierarchyMatch unless necessary. Using it may trigger the interception of many unexcepted methods, which would cause performance issues.  Example:\n@Override protected ClassMatch enhanceClassName() { return byName(\u0026#34;org.apache.catalina.core.StandardEngineValve\u0026#34;); } Define an instance method intercept point.  public InstanceMethodsInterceptPoint[] getInstanceMethodsInterceptPoints(); public interface InstanceMethodsInterceptPoint { /** * class instance methods matcher. * * @return methods matcher */ ElementMatcher\u0026lt;MethodDescription\u0026gt; getMethodsMatcher(); /** * @return represents a class name, the class instance must instanceof InstanceMethodsAroundInterceptor. */ String getMethodsInterceptor(); boolean isOverrideArgs(); } You may also use Matcher to set the target methods. Return true in isOverrideArgs, if you want to change the argument ref in interceptor. Please refer to bytebuddy for details of defining ElementMatcher.\nIn Skywalking, we provide 3 classes to facilitate ElementMatcher definition:\n AnnotationTypeNameMatch: Check on whether there is a certain annotation in the target method. ReturnTypeNameMatch: Check the return type name (package name + . + class name) of the target method. ArgumentTypeNameMatch: Check on the argument index and the type name (package name + . + class name) of the target method.  Attention:\n In case of using ReturnTypeNameMatch and ArgumentTypeNameMatch, use [Lxxx; (Java file format defined in JVM Specification) to define an Array type. For example, you should write [Ljava.lang.String; for java.lang.String[].  The following sections will tell you how to implement the interceptor.\nAdd plugin definition into the skywalking-plugin.def file.  tomcat-7.x/8.x=TomcatInstrumentation  Set up witnessClasses and/or witnessMethods if the instrumentation has to be activated in specific versions.\nExample:\n// The plugin is activated only when the foo.Bar class exists. @Override protected String[] witnessClasses() { return new String[] { \u0026#34;foo.Bar\u0026#34; }; } // The plugin is activated only when the foo.Bar#hello method exists. @Override protected List\u0026lt;WitnessMethod\u0026gt; witnessMethods() { List\u0026lt;WitnessMethod\u0026gt; witnessMethodList = new ArrayList\u0026lt;\u0026gt;(); WitnessMethod witnessMethod = new WitnessMethod(\u0026#34;foo.Bar\u0026#34;, ElementMatchers.named(\u0026#34;hello\u0026#34;)); witnessMethodList.add(witnessMethod); return witnessMethodList; } For more examples, see WitnessTest.java\n  Implement an interceptor As an interceptor for an instance method, it has to implement org.apache.skywalking.apm.agent.core.plugin.interceptor.enhance.InstanceMethodsAroundInterceptor\n/** * A interceptor, which intercept method\u0026#39;s invocation. The target methods will be defined in {@link * ClassEnhancePluginDefine}\u0026#39;s subclass, most likely in {@link ClassInstanceMethodsEnhancePluginDefine} */ public interface InstanceMethodsAroundInterceptor { /** * called before target method invocation. * * @param result change this result, if you want to truncate the method. * @throws Throwable */ void beforeMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, MethodInterceptResult result) throws Throwable; /** * called after target method invocation. Even method\u0026#39;s invocation triggers an exception. * * @param ret the method\u0026#39;s original return value. * @return the method\u0026#39;s actual return value. * @throws Throwable */ Object afterMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Object ret) throws Throwable; /** * called when occur exception. * * @param t the exception occur. */ void handleMethodException(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Throwable t); } Use the core APIs before and after calling the method, as well as during exception handling.\nV2 APIs The interceptor of V2 API uses MethodInvocationContext context to replace the MethodInterceptResult result in the beforeMethod, and be added as a new parameter in afterMethod and handleMethodException.\nMethodInvocationContext context is only shared in one time execution, and safe to use when face concurrency execution.\n/** * A v2 interceptor, which intercept method\u0026#39;s invocation. The target methods will be defined in {@link * ClassEnhancePluginDefineV2}\u0026#39;s subclass, most likely in {@link ClassInstanceMethodsEnhancePluginDefine} */ public interface InstanceMethodsAroundInterceptorV2 { /** * called before target method invocation. * * @param context the method invocation context including result context. */ void beforeMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, MethodInvocationContext context) throws Throwable; /** * called after target method invocation. Even method\u0026#39;s invocation triggers an exception. * * @param ret the method\u0026#39;s original return value. May be null if the method triggers an exception. * @return the method\u0026#39;s actual return value. */ Object afterMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Object ret, MethodInvocationContext context) throws Throwable; /** * called when occur exception. * * @param t the exception occur. */ void handleMethodException(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Throwable t, MethodInvocationContext context); } Bootstrap class instrumentation. SkyWalking has packaged the bootstrap instrumentation in the agent core. You can easily implement it by declaring it in the instrumentation definition.\nOverride the public boolean isBootstrapInstrumentation() and return true. Such as\npublic class URLInstrumentation extends ClassEnhancePluginDefine { private static String CLASS_NAME = \u0026#34;java.net.URL\u0026#34;; @Override protected ClassMatch enhanceClass() { return byName(CLASS_NAME); } @Override public ConstructorInterceptPoint[] getConstructorsInterceptPoints() { return new ConstructorInterceptPoint[] { new ConstructorInterceptPoint() { @Override public ElementMatcher\u0026lt;MethodDescription\u0026gt; getConstructorMatcher() { return any(); } @Override public String getConstructorInterceptor() { return \u0026#34;org.apache.skywalking.apm.plugin.jre.httpurlconnection.Interceptor2\u0026#34;; } } }; } @Override public InstanceMethodsInterceptPoint[] getInstanceMethodsInterceptPoints() { return new InstanceMethodsInterceptPoint[0]; } @Override public StaticMethodsInterceptPoint[] getStaticMethodsInterceptPoints() { return new StaticMethodsInterceptPoint[0]; } @Override public boolean isBootstrapInstrumentation() { return true; } } ClassEnhancePluginDefineV2 is provided in v2 APIs, #isBootstrapInstrumentation works too.\nNOTE: Bootstrap instrumentation should be used only where necessary. During its actual execution, it mostly affects the JRE core(rt.jar). Defining it other than where necessary could lead to unexpected results or side effects.\nProvide custom config for the plugin The config could provide different behaviours based on the configurations. The SkyWalking plugin mechanism provides the configuration injection and initialization system in the agent core.\nEvery plugin could declare one or more classes to represent the config by using @PluginConfig annotation. The agent core could initialize this class' static field through System environments, System properties, and agent.config static file.\nThe #root() method in the @PluginConfig annotation requires declaring the root class for the initialization process. Typically, SkyWalking prefers to use nested inner static classes for the hierarchy of the configuration. We recommend using Plugin/plugin-name/config-key as the nested classes structure of the config class.\nNOTE: because of the Java ClassLoader mechanism, the @PluginConfig annotation should be added on the real class used in the interceptor codes.\nIn the following example, @PluginConfig(root = SpringMVCPluginConfig.class) indicates that initialization should start with using SpringMVCPluginConfig as the root. Then, the config key of the attribute USE_QUALIFIED_NAME_AS_ENDPOINT_NAME should be plugin.springmvc.use_qualified_name_as_endpoint_name.\npublic class SpringMVCPluginConfig { public static class Plugin { // NOTE, if move this annotation on the `Plugin` or `SpringMVCPluginConfig` class, it no longer has any effect.  @PluginConfig(root = SpringMVCPluginConfig.class) public static class SpringMVC { /** * If true, the fully qualified method name will be used as the endpoint name instead of the request URL, * default is false. */ public static boolean USE_QUALIFIED_NAME_AS_ENDPOINT_NAME = false; /** * This config item controls that whether the SpringMVC plugin should collect the parameters of the * request. */ public static boolean COLLECT_HTTP_PARAMS = false; } @PluginConfig(root = SpringMVCPluginConfig.class) public static class Http { /** * When either {@link Plugin.SpringMVC#COLLECT_HTTP_PARAMS} is enabled, how many characters to keep and send * to the OAP backend, use negative values to keep and send the complete parameters, NB. this config item is * added for the sake of performance */ public static int HTTP_PARAMS_LENGTH_THRESHOLD = 1024; } } } Meter Plugin Java agent plugin could use meter APIs to collect metrics for backend analysis.\n Counter API represents a single monotonically increasing counter which automatically collects data and reports to the backend. import org.apache.skywalking.apm.agent.core.meter.MeterFactory; Counter counter = MeterFactory.counter(meterName).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).mode(Counter.Mode.INCREMENT).build(); counter.increment(1d);    MeterFactory.counter creates a new counter builder with the meter name. Counter.Builder.tag(String key, String value) marks a tag key/value pair. Counter.Builder.mode(Counter.Mode mode) changes the counter mode. RATE mode means the reporting rate to the backend. Counter.Builder.build() builds a new Counter which is collected and reported to the backend. Counter.increment(double count) increment counts to the Counter. It could be a positive value.   Gauge API represents a single numerical value.  import org.apache.skywalking.apm.agent.core.meter.MeterFactory; ThreadPoolExecutor threadPool = ...; Gauge gauge = MeterFactory.gauge(meterName, () -\u0026gt; threadPool.getActiveCount()).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).build();  MeterFactory.gauge(String name, Supplier\u0026lt;Double\u0026gt; getter) creates a new gauge builder with the meter name and supplier function. This function must return a double value. Gauge.Builder.tag(String key, String value) marks a tag key/value pair. Gauge.Builder.build() builds a new Gauge which is collected and reported to the backend.   Histogram API represents a summary sample observations with customized buckets.  import org.apache.skywalking.apm.agent.core.meter.MeterFactory; Histogram histogram = MeterFactory.histogram(\u0026#34;test\u0026#34;).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).steps(Arrays.asList(1, 5, 10)).minValue(0).build(); histogram.addValue(3);  MeterFactory.histogram(String name) creates a new histogram builder with the meter name. Histogram.Builder.tag(String key, String value) marks a tag key/value pair. Histogram.Builder.steps(List\u0026lt;Double\u0026gt; steps) sets up the max values of every histogram buckets. Histogram.Builder.minValue(double value) sets up the minimal value of this histogram. Default is 0. Histogram.Builder.build() builds a new Histogram which is collected and reported to the backend. Histogram.addValue(double value) adds value into the histogram, and automatically analyzes what bucket count needs to be incremented. Rule: count into [step1, step2).  Plugin Test Tool The Apache SkyWalking Agent Test Tool Suite is an incredibly useful test tool suite that is available in a wide variety of agent languages. It includes the mock collector and validator. The mock collector is a SkyWalking receiver, like the OAP server.\nYou could learn how to use this tool to test the plugin in this doc. This is a must if you want to contribute plugins to the SkyWalking official repo.\nContribute plugins to the Apache SkyWalking repository We welcome everyone to contribute their plugins.\nPlease follow these steps:\n Submit an issue for your plugin, including any supported versions. Create sub modules under apm-sniffer/apm-sdk-plugin or apm-sniffer/optional-plugins, and the name should include supported library name and versions. Follow this guide to develop. Make sure comments and test cases are provided. Develop and test. Provide the automatic test cases. Learn how to write the plugin test case from this doc Send a pull request and ask for review. The plugin committers will approve your plugins, plugin CI-with-IT, e2e, and the plugin tests will be passed. The plugin is accepted by SkyWalking.  ","excerpt":"Plugin Development Guide This document describes how to understand, develop and contribute a plugin. …","ref":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/java-plugin-development-guide/","title":"Plugin Development Guide"},{"body":"Plugin Development Guide This document describes how to understand, develop and contribute a plugin.\nThere are 2 kinds of plugin:\n Tracing plugin. Follow the distributed tracing concept to collect spans with tags and logs. Meter plugin. Collect numeric metrics in Counter, Gauge, and Histogram formats.  We also provide the plugin test tool to verify the data collected and reported by the plugin. If you plan to contribute any plugin to our main repo, the data would be verified by this tool too.\nTracing plugin Concepts Span The span is an important and recognized concept in the distributed tracing system. Learn about the span from the Google Dapper Paper and OpenTracing\nSkyWalking has supported OpenTracing and OpenTracing-Java API since 2017. Our concepts of the span are similar to that of the Google Dapper Paper and OpenTracing. We have also extended the span.\nThere are three types of span:\n1.1 EntrySpan The EntrySpan represents a service provider. It is also an endpoint on the server end. As an APM system, our target is the application servers. Therefore, almost all the services and MQ-consumers are EntrySpan.\n1.2 LocalSpan The LocalSpan represents a normal Java method that does not concern remote services. It is neither a MQ producer/consumer nor a service (e.g. HTTP service) provider/consumer.\n1.3 ExitSpan The ExitSpan represents a client of service or MQ-producer. It is named the LeafSpan in the early versions of SkyWalking. For example, accessing DB through JDBC and reading Redis/Memcached are classified as an ExitSpan.\nContextCarrier In order to implement distributed tracing, cross-process tracing has to be bound, and the context must propagate across the process. This is where the ContextCarrier comes in.\nHere are the steps on how to use the ContextCarrier in an A-\u0026gt;B distributed call.\n Create a new and empty ContextCarrier on the client end. Create an ExitSpan by ContextManager#createExitSpan or use ContextManager#inject to initalize the ContextCarrier. Place all items of ContextCarrier into heads (e.g. HTTP HEAD), attachments (e.g. Dubbo RPC framework) or messages (e.g. Kafka). The ContextCarrier propagates to the server end through the service call. On the server end, obtain all items from the heads, attachments or messages. Create an EntrySpan by ContextManager#createEntrySpan or use ContextManager#extract to bind the client and server ends.  See the following examples, where we use the Apache HTTPComponent client plugin and Tomcat 7 server plugin:\n Using the Apache HTTPComponent client plugin on the client end  span = ContextManager.createExitSpan(\u0026#34;/span/operation/name\u0026#34;, contextCarrier, \u0026#34;ip:port\u0026#34;); CarrierItem next = contextCarrier.items(); while (next.hasNext()) { next = next.next(); httpRequest.setHeader(next.getHeadKey(), next.getHeadValue()); } Using the Tomcat 7 server plugin on the server end  ContextCarrier contextCarrier = new ContextCarrier(); CarrierItem next = contextCarrier.items(); while (next.hasNext()) { next = next.next(); next.setHeadValue(request.getHeader(next.getHeadKey())); } span = ContextManager.createEntrySpan(“/span/operation/name”, contextCarrier); ContextSnapshot Besides cross-process tracing, cross-thread tracing has to be supported as well. For instance, both async process (in-memory MQ) and batch process are common in Java. Cross-process and cross-thread tracing are very similar in that they both require propagating context, except that cross-thread tracing does not require serialization.\nHere are the three steps on cross-thread propagation:\n Use ContextManager#capture to get the ContextSnapshot object. Let the sub-thread access the ContextSnapshot through method arguments or being carried by existing arguments Use ContextManager#continued in sub-thread.  Core APIs ContextManager ContextManager provides all major and primary APIs.\n Create EntrySpan  public static AbstractSpan createEntrySpan(String endpointName, ContextCarrier carrier) Create EntrySpan according to the operation name (e.g. service name, uri) and ContextCarrier.\nCreate LocalSpan  public static AbstractSpan createLocalSpan(String endpointName) Create LocalSpan according to the operation name (e.g. full method signature).\nCreate ExitSpan  public static AbstractSpan createExitSpan(String endpointName, ContextCarrier carrier, String remotePeer) Create ExitSpan according to the operation name (e.g. service name, uri) and the new ContextCarrier and peer address (e.g. ip+port, hostname+port).\nAbstractSpan /** * Set the component id, which defines in {@link ComponentsDefine} * * @param component * @return the span for chaining. */ AbstractSpan setComponent(Component component); AbstractSpan setLayer(SpanLayer layer); /** * Set a key:value tag on the Span. * * @return this Span instance, for chaining */ AbstractSpan tag(String key, String value); /** * Record an exception event of the current walltime timestamp. * * @param t any subclass of {@link Throwable}, which occurs in this span. * @return the Span, for chaining */ AbstractSpan log(Throwable t); AbstractSpan errorOccurred(); /** * Record an event at a specific timestamp. * * @param timestamp The explicit timestamp for the log record. * @param event the events * @return the Span, for chaining */ AbstractSpan log(long timestamp, Map\u0026lt;String, ?\u0026gt; event); /** * Sets the string name for the logical operation this span represents. * * @return this Span instance, for chaining */ AbstractSpan setOperationName(String endpointName); Besides setting the operation name, tags and logs, two attributes must be set, namely the component and layer. This is especially important for the EntrySpan and ExitSpan.\nSpanLayer is the type of span. There are 5 values:\n UNKNOWN (default) DB RPC_FRAMEWORK (designed for the RPC framework, rather than an ordinary HTTP call) HTTP MQ  Component IDs are defined and reserved by the SkyWalking project. For extension of the component name/ID, please follow the OAP server Component library settings document.\nSpecial Span Tags All tags are available in the trace view. Meanwhile, in the OAP backend analysis, some special tags or tag combinations provide other advanced features.\nTag key http.status_code The value should be an integer. The response code of OAL entities corresponds to this value.\nTag keys db.statement and db.type. The value of db.statement should be a string that represents the database statement, such as SQL, or [No statement]/+span#operationName if the value is empty. When the exit span contains this tag, OAP samples the slow statements based on agent-analyzer/default/maxSlowSQLLength. The threshold of slow statement is defined in accordance with agent-analyzer/default/slowDBAccessThreshold. Check Slow Database Statement document of OAP server for details.\nExtension logic endpoint: Tag key x-le The logic endpoint is a concept that doesn\u0026rsquo;t represent a real RPC call, but requires the statistic. The value of x-le should be in JSON format. There are two options:\n Define a separated logic endpoint. Provide its own endpoint name, latency and status. Suitable for entry and local span.  { \u0026#34;name\u0026#34;: \u0026#34;GraphQL-service\u0026#34;, \u0026#34;latency\u0026#34;: 100, \u0026#34;status\u0026#34;: true } Declare the current local span representing a logic endpoint.  { \u0026#34;logic-span\u0026#34;: true } Virtual Database Relative Tags SkyWalking analysis Database(SQL-like) performance metrics through the following tags.\npublic static final StringTag DB_TYPE = new StringTag(3, \u0026#34;db.type\u0026#34;); public static final StringTag DB_STATEMENT = new StringTag(5, \u0026#34;db.statement\u0026#34;);  db.type records database type, such as sql, cassandra, Elasticsearch. db.statementrecords the sql statement of the database access.  Read backend\u0026rsquo;s virtual database doc for more details.\nVirtual Cache Relative Tags SkyWalking analysis cache performance related metrics through the following tags.\npublic static final StringTag CACHE_TYPE = new StringTag(15, \u0026#34;cache.type\u0026#34;); public static final StringTag CACHE_CMD = new StringTag(17, \u0026#34;cache.cmd\u0026#34;); public static final StringTag CACHE_OP = new StringTag(16, \u0026#34;cache.op\u0026#34;); public static final StringTag CACHE_KEY = new StringTag(18, \u0026#34;cache.key\u0026#34;);  cache.type indicates the cache type , usually it\u0026rsquo;s official name of cache (e.g. Redis) cache.cmd indicates the cache command that would be sent to cache server (e.g. setnx) cache.op indicates the command is used for write or read operation , usually the value is converting from command cache.key indicates the cache key that would be sent to cache server , this tag maybe null , as string type key would be collected usually.  In order to decide which op should be converted to flexibly , It\u0026rsquo;s better that providing config property . Reference Jedis-4.x-plugin\nVirtual Message Queue (MQ) Relative Tags SkyWalking analysis MQ performance related metrics through the following tags.\npublic static final StringTag MQ_QUEUE = new StringTag(7, \u0026#34;mq.queue\u0026#34;); public static final StringTag MQ_TOPIC = new StringTag(9, \u0026#34;mq.topic\u0026#34;); public static final StringTag TRANSMISSION_LATENCY = new StringTag(15, \u0026#34;transmission.latency\u0026#34;, false);  mq.queue indicates MQ queue name mq.topic indicates MQ topic name , It\u0026rsquo;s optional as some MQ don\u0026rsquo;t hava concept of topic transmission.latency The transmission latency from consumer to producer. Usually you needn\u0026rsquo;t to record this tag manually, instead to call contextCarrier.extensionInjector().injectSendingTimestamp(); to record tag sendingTimestamp on producer side , and SkyWalking would record this tag on consumer side if sw8-x context carrier(from producer side) contains sendingTimestamp  Notice , you should set peer at both sides(producer and consumer). And the value of peer should represent the MQ server cluster.\nAdvanced APIs Async Span APIs There is a set of advanced APIs in Span which is specifically designed for async use cases. When tags, logs, and attributes (including end time) of the span need to be set in another thread, you should use these APIs.\n/** * The span finish at current tracing context, but the current span is still alive, until {@link #asyncFinish} * called. * * This method must be called\u0026lt;br/\u0026gt; * 1. In original thread(tracing context). * 2. Current span is active span. * * During alive, tags, logs and attributes of the span could be changed, in any thread. * * The execution times of {@link #prepareForAsync} and {@link #asyncFinish()} must match. * * @return the current span */ AbstractSpan prepareForAsync(); /** * Notify the span, it could be finished. * * The execution times of {@link #prepareForAsync} and {@link #asyncFinish()} must match. * * @return the current span */ AbstractSpan asyncFinish();  Call #prepareForAsync in the original context. Run ContextManager#stopSpan in the original context when your job in the current thread is complete. Propagate the span to any other thread. Once the above steps are all set, call #asyncFinish in any thread. When #prepareForAsync is complete for all spans, the tracing context will be finished and will report to the backend (based on the count of API execution).  Develop a plugin Abstract The basic method to trace is to intercept a Java method, by using byte code manipulation tech and AOP concept. SkyWalking has packaged the byte code manipulation tech and tracing context propagation, so you simply have to define the intercept point (a.k.a. aspect pointcut in Spring).\nIntercept SkyWalking provides two common definitions to intercept constructor, instance method and class method.\nv1 APIs  Extend ClassInstanceMethodsEnhancePluginDefine to define constructor intercept points and instance method intercept points. Extend ClassStaticMethodsEnhancePluginDefine to define class method intercept points.  Of course, you can extend ClassEnhancePluginDefine to set all intercept points, although it is uncommon to do so.\nv2 APIs v2 APIs provide an enhanced interceptor, which could propagate context through MIC(MethodInvocationContext).\n Extend ClassInstanceMethodsEnhancePluginDefineV2 to define constructor intercept points and instance method intercept points. Extend ClassStaticMethodsEnhancePluginDefineV2 to define class method intercept points.  Of course, you can extend ClassEnhancePluginDefineV2 to set all intercept points, although it is uncommon to do so.\nImplement plugin See the following demonstration on how to implement a plugin by extending ClassInstanceMethodsEnhancePluginDefine.\n Define the target class name.  protected abstract ClassMatch enhanceClass(); ClassMatch represents how to match the target classes. There are 4 ways:\n byName: Based on the full class names (package name + . + class name). byClassAnnotationMatch: Depends on whether there are certain annotations in the target classes. byMethodAnnotationMatch: Depends on whether there are certain annotations in the methods of the target classes. byHierarchyMatch: Based on the parent classes or interfaces of the target classes.  Attention:\n Never use ThirdPartyClass.class in the instrumentation definitions, such as takesArguments(ThirdPartyClass.class), or byName(ThirdPartyClass.class.getName()), because of the fact that ThirdPartyClass dose not necessarily exist in the target application and this will break the agent; we have import checks to assist in checking this in CI, but it doesn\u0026rsquo;t cover all scenarios of this limitation, so never try to work around this limitation by something like using full-qualified-class-name (FQCN), i.e. takesArguments(full.qualified.ThirdPartyClass.class) and byName(full.qualified.ThirdPartyClass.class.getName()) will pass the CI check, but are still invalid in the agent codes. Therefore, Use Full Qualified Class Name String Literature Instead. Even if you are perfectly sure that the class to be intercepted exists in the target application (such as JDK classes), still, do not use *.class.getName() to get the class String name. We recommend you to use a literal string. This is to avoid ClassLoader issues. by*AnnotationMatch does not support inherited annotations. We do not recommend using byHierarchyMatch unless necessary. Using it may trigger the interception of many unexcepted methods, which would cause performance issues.  Example:\n@Override protected ClassMatch enhanceClassName() { return byName(\u0026#34;org.apache.catalina.core.StandardEngineValve\u0026#34;); } Define an instance method intercept point.  public InstanceMethodsInterceptPoint[] getInstanceMethodsInterceptPoints(); public interface InstanceMethodsInterceptPoint { /** * class instance methods matcher. * * @return methods matcher */ ElementMatcher\u0026lt;MethodDescription\u0026gt; getMethodsMatcher(); /** * @return represents a class name, the class instance must instanceof InstanceMethodsAroundInterceptor. */ String getMethodsInterceptor(); boolean isOverrideArgs(); } You may also use Matcher to set the target methods. Return true in isOverrideArgs, if you want to change the argument ref in interceptor. Please refer to bytebuddy for details of defining ElementMatcher.\nIn Skywalking, we provide 3 classes to facilitate ElementMatcher definition:\n AnnotationTypeNameMatch: Check on whether there is a certain annotation in the target method. ReturnTypeNameMatch: Check the return type name (package name + . + class name) of the target method. ArgumentTypeNameMatch: Check on the argument index and the type name (package name + . + class name) of the target method.  Attention:\n In case of using ReturnTypeNameMatch and ArgumentTypeNameMatch, use [Lxxx; (Java file format defined in JVM Specification) to define an Array type. For example, you should write [Ljava.lang.String; for java.lang.String[].  The following sections will tell you how to implement the interceptor.\nAdd plugin definition into the skywalking-plugin.def file.  tomcat-7.x/8.x=TomcatInstrumentation  Set up witnessClasses and/or witnessMethods if the instrumentation has to be activated in specific versions.\nExample:\n// The plugin is activated only when the foo.Bar class exists. @Override protected String[] witnessClasses() { return new String[] { \u0026#34;foo.Bar\u0026#34; }; } // The plugin is activated only when the foo.Bar#hello method exists. @Override protected List\u0026lt;WitnessMethod\u0026gt; witnessMethods() { List\u0026lt;WitnessMethod\u0026gt; witnessMethodList = new ArrayList\u0026lt;\u0026gt;(); WitnessMethod witnessMethod = new WitnessMethod(\u0026#34;foo.Bar\u0026#34;, ElementMatchers.named(\u0026#34;hello\u0026#34;)); witnessMethodList.add(witnessMethod); return witnessMethodList; } For more examples, see WitnessTest.java\n  Implement an interceptor As an interceptor for an instance method, it has to implement org.apache.skywalking.apm.agent.core.plugin.interceptor.enhance.InstanceMethodsAroundInterceptor\n/** * A interceptor, which intercept method\u0026#39;s invocation. The target methods will be defined in {@link * ClassEnhancePluginDefine}\u0026#39;s subclass, most likely in {@link ClassInstanceMethodsEnhancePluginDefine} */ public interface InstanceMethodsAroundInterceptor { /** * called before target method invocation. * * @param result change this result, if you want to truncate the method. * @throws Throwable */ void beforeMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, MethodInterceptResult result) throws Throwable; /** * called after target method invocation. Even method\u0026#39;s invocation triggers an exception. * * @param ret the method\u0026#39;s original return value. * @return the method\u0026#39;s actual return value. * @throws Throwable */ Object afterMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Object ret) throws Throwable; /** * called when occur exception. * * @param t the exception occur. */ void handleMethodException(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Throwable t); } Use the core APIs before and after calling the method, as well as during exception handling.\nV2 APIs The interceptor of V2 API uses MethodInvocationContext context to replace the MethodInterceptResult result in the beforeMethod, and be added as a new parameter in afterMethod and handleMethodException.\nMethodInvocationContext context is only shared in one time execution, and safe to use when face concurrency execution.\n/** * A v2 interceptor, which intercept method\u0026#39;s invocation. The target methods will be defined in {@link * ClassEnhancePluginDefineV2}\u0026#39;s subclass, most likely in {@link ClassInstanceMethodsEnhancePluginDefine} */ public interface InstanceMethodsAroundInterceptorV2 { /** * called before target method invocation. * * @param context the method invocation context including result context. */ void beforeMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, MethodInvocationContext context) throws Throwable; /** * called after target method invocation. Even method\u0026#39;s invocation triggers an exception. * * @param ret the method\u0026#39;s original return value. May be null if the method triggers an exception. * @return the method\u0026#39;s actual return value. */ Object afterMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Object ret, MethodInvocationContext context) throws Throwable; /** * called when occur exception. * * @param t the exception occur. */ void handleMethodException(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Throwable t, MethodInvocationContext context); } Bootstrap class instrumentation. SkyWalking has packaged the bootstrap instrumentation in the agent core. You can easily implement it by declaring it in the instrumentation definition.\nOverride the public boolean isBootstrapInstrumentation() and return true. Such as\npublic class URLInstrumentation extends ClassEnhancePluginDefine { private static String CLASS_NAME = \u0026#34;java.net.URL\u0026#34;; @Override protected ClassMatch enhanceClass() { return byName(CLASS_NAME); } @Override public ConstructorInterceptPoint[] getConstructorsInterceptPoints() { return new ConstructorInterceptPoint[] { new ConstructorInterceptPoint() { @Override public ElementMatcher\u0026lt;MethodDescription\u0026gt; getConstructorMatcher() { return any(); } @Override public String getConstructorInterceptor() { return \u0026#34;org.apache.skywalking.apm.plugin.jre.httpurlconnection.Interceptor2\u0026#34;; } } }; } @Override public InstanceMethodsInterceptPoint[] getInstanceMethodsInterceptPoints() { return new InstanceMethodsInterceptPoint[0]; } @Override public StaticMethodsInterceptPoint[] getStaticMethodsInterceptPoints() { return new StaticMethodsInterceptPoint[0]; } @Override public boolean isBootstrapInstrumentation() { return true; } } ClassEnhancePluginDefineV2 is provided in v2 APIs, #isBootstrapInstrumentation works too.\nNOTE: Bootstrap instrumentation should be used only where necessary. During its actual execution, it mostly affects the JRE core(rt.jar). Defining it other than where necessary could lead to unexpected results or side effects.\nProvide custom config for the plugin The config could provide different behaviours based on the configurations. The SkyWalking plugin mechanism provides the configuration injection and initialization system in the agent core.\nEvery plugin could declare one or more classes to represent the config by using @PluginConfig annotation. The agent core could initialize this class' static field through System environments, System properties, and agent.config static file.\nThe #root() method in the @PluginConfig annotation requires declaring the root class for the initialization process. Typically, SkyWalking prefers to use nested inner static classes for the hierarchy of the configuration. We recommend using Plugin/plugin-name/config-key as the nested classes structure of the config class.\nNOTE: because of the Java ClassLoader mechanism, the @PluginConfig annotation should be added on the real class used in the interceptor codes.\nIn the following example, @PluginConfig(root = SpringMVCPluginConfig.class) indicates that initialization should start with using SpringMVCPluginConfig as the root. Then, the config key of the attribute USE_QUALIFIED_NAME_AS_ENDPOINT_NAME should be plugin.springmvc.use_qualified_name_as_endpoint_name.\npublic class SpringMVCPluginConfig { public static class Plugin { // NOTE, if move this annotation on the `Plugin` or `SpringMVCPluginConfig` class, it no longer has any effect.  @PluginConfig(root = SpringMVCPluginConfig.class) public static class SpringMVC { /** * If true, the fully qualified method name will be used as the endpoint name instead of the request URL, * default is false. */ public static boolean USE_QUALIFIED_NAME_AS_ENDPOINT_NAME = false; /** * This config item controls that whether the SpringMVC plugin should collect the parameters of the * request. */ public static boolean COLLECT_HTTP_PARAMS = false; } @PluginConfig(root = SpringMVCPluginConfig.class) public static class Http { /** * When either {@link Plugin.SpringMVC#COLLECT_HTTP_PARAMS} is enabled, how many characters to keep and send * to the OAP backend, use negative values to keep and send the complete parameters, NB. this config item is * added for the sake of performance */ public static int HTTP_PARAMS_LENGTH_THRESHOLD = 1024; } } } Meter Plugin Java agent plugin could use meter APIs to collect metrics for backend analysis.\n Counter API represents a single monotonically increasing counter which automatically collects data and reports to the backend. import org.apache.skywalking.apm.agent.core.meter.MeterFactory; Counter counter = MeterFactory.counter(meterName).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).mode(Counter.Mode.INCREMENT).build(); counter.increment(1d);    MeterFactory.counter creates a new counter builder with the meter name. Counter.Builder.tag(String key, String value) marks a tag key/value pair. Counter.Builder.mode(Counter.Mode mode) changes the counter mode. RATE mode means the reporting rate to the backend. Counter.Builder.build() builds a new Counter which is collected and reported to the backend. Counter.increment(double count) increment counts to the Counter. It could be a positive value.   Gauge API represents a single numerical value.  import org.apache.skywalking.apm.agent.core.meter.MeterFactory; ThreadPoolExecutor threadPool = ...; Gauge gauge = MeterFactory.gauge(meterName, () -\u0026gt; threadPool.getActiveCount()).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).build();  MeterFactory.gauge(String name, Supplier\u0026lt;Double\u0026gt; getter) creates a new gauge builder with the meter name and supplier function. This function must return a double value. Gauge.Builder.tag(String key, String value) marks a tag key/value pair. Gauge.Builder.build() builds a new Gauge which is collected and reported to the backend.   Histogram API represents a summary sample observations with customized buckets.  import org.apache.skywalking.apm.agent.core.meter.MeterFactory; Histogram histogram = MeterFactory.histogram(\u0026#34;test\u0026#34;).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).steps(Arrays.asList(1, 5, 10)).minValue(0).build(); histogram.addValue(3);  MeterFactory.histogram(String name) creates a new histogram builder with the meter name. Histogram.Builder.tag(String key, String value) marks a tag key/value pair. Histogram.Builder.steps(List\u0026lt;Double\u0026gt; steps) sets up the max values of every histogram buckets. Histogram.Builder.minValue(double value) sets up the minimal value of this histogram. Default is 0. Histogram.Builder.build() builds a new Histogram which is collected and reported to the backend. Histogram.addValue(double value) adds value into the histogram, and automatically analyzes what bucket count needs to be incremented. Rule: count into [step1, step2).  Plugin Test Tool The Apache SkyWalking Agent Test Tool Suite is an incredibly useful test tool suite that is available in a wide variety of agent languages. It includes the mock collector and validator. The mock collector is a SkyWalking receiver, like the OAP server.\nYou could learn how to use this tool to test the plugin in this doc. This is a must if you want to contribute plugins to the SkyWalking official repo.\nContribute plugins to the Apache SkyWalking repository We welcome everyone to contribute their plugins.\nPlease follow these steps:\n Submit an issue for your plugin, including any supported versions. Create sub modules under apm-sniffer/apm-sdk-plugin or apm-sniffer/optional-plugins, and the name should include supported library name and versions. Follow this guide to develop. Make sure comments and test cases are provided. Develop and test. Provide the automatic test cases. Learn how to write the plugin test case from this doc Send a pull request and ask for review. The plugin committers will approve your plugins, plugin CI-with-IT, e2e, and the plugin tests will be passed. The plugin is accepted by SkyWalking.  ","excerpt":"Plugin Development Guide This document describes how to understand, develop and contribute a plugin. …","ref":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/java-plugin-development-guide/","title":"Plugin Development Guide"},{"body":"Plugin Development Guide You can always take the existing plugins as examples, while there are some general ideas for all plugins.\n  A plugin is a module under the directory skywalking/plugins with an install method;\n  Inside the install method, you find out the relevant method(s) of the libraries that you plan to instrument, and create/close spans before/after those method(s).\n  You should also provide version rules in the plugin module, which means the version of package your plugin aim to test.\nAll below variables will be used by the tools/plugin_doc_gen.py to produce a latest Plugin Doc.\nlink_vector = [\u0026#39;https://www.python-httpx.org/\u0026#39;] # This should link to the official website/doc of this lib # The support matrix is for scenarios where some libraries don\u0026#39;t work for certain Python versions # Therefore, we use the matrix to instruct the CI testing pipeline to skip over plugin test for such Python version # The right side versions, should almost always use A.B.* to test the latest minor version of two recent major versions.  support_matrix = { \u0026#39;httpx\u0026#39;: { \u0026#39;\u0026gt;=3.7\u0026#39;: [\u0026#39;0.23.*\u0026#39;, \u0026#39;0.22.*\u0026#39;] } } # The note will be used when generating the plugin documentation for users. note = \u0026#34;\u0026#34;\u0026#34;\u0026#34;\u0026#34;\u0026#34;   Every plugin requires a corresponding test under tests/plugin before it can be merged, refer to the Plugin Test Guide when writing a plugin test.\n  Add the corresponding configuration options added/modified by the new plugin to the config.py and add new comments for each, then regenerate the configuration.md by make doc-gen.\n  Steps after coding If your PR introduces the need for a new non-standard library which needs to be pulled via pip or if it removes the need for a previously-used library:\n Run poetry add library --group plugins to pin the dependency to the plugins group, Do not add it to the main dependency! Run make doc-gen to generate a test matrix documentation for the plugin.  ","excerpt":"Plugin Development Guide You can always take the existing plugins as examples, while there are some …","ref":"/docs/skywalking-python/latest/en/contribution/how-to-develop-plugin/","title":"Plugin Development Guide"},{"body":"Plugin Development Guide You can always take the existing plugins as examples, while there are some general ideas for all plugins.\n  A plugin is a module under the directory skywalking/plugins with an install method;\n  Inside the install method, you find out the relevant method(s) of the libraries that you plan to instrument, and create/close spans before/after those method(s).\n  You should also provide version rules in the plugin module, which means the version of package your plugin aim to test.\nAll below variables will be used by the tools/plugin_doc_gen.py to produce a latest Plugin Doc.\nlink_vector = [\u0026#39;https://www.python-httpx.org/\u0026#39;] # This should link to the official website/doc of this lib # The support matrix is for scenarios where some libraries don\u0026#39;t work for certain Python versions # Therefore, we use the matrix to instruct the CI testing pipeline to skip over plugin test for such Python version # The right side versions, should almost always use A.B.* to test the latest minor version of two recent major versions.  support_matrix = { \u0026#39;httpx\u0026#39;: { \u0026#39;\u0026gt;=3.7\u0026#39;: [\u0026#39;0.23.*\u0026#39;, \u0026#39;0.22.*\u0026#39;] } } # The note will be used when generating the plugin documentation for users. note = \u0026#34;\u0026#34;\u0026#34;\u0026#34;\u0026#34;\u0026#34;   Every plugin requires a corresponding test under tests/plugin before it can be merged, refer to the Plugin Test Guide when writing a plugin test.\n  Add the corresponding configuration options added/modified by the new plugin to the config.py and add new comments for each, then regenerate the configuration.md by make doc-gen.\n  Steps after coding If your PR introduces the need for a new non-standard library which needs to be pulled via pip or if it removes the need for a previously-used library:\n Run poetry add library --group plugins to pin the dependency to the plugins group, Do not add it to the main dependency! Run make doc-gen to generate a test matrix documentation for the plugin.  ","excerpt":"Plugin Development Guide You can always take the existing plugins as examples, while there are some …","ref":"/docs/skywalking-python/next/en/contribution/how-to-develop-plugin/","title":"Plugin Development Guide"},{"body":"Plugin Development Guide You can always take the existing plugins as examples, while there are some general ideas for all plugins.\n  A plugin is a module under the directory skywalking/plugins with an install method;\n  Inside the install method, you find out the relevant method(s) of the libraries that you plan to instrument, and create/close spans before/after those method(s).\n  You should also provide version rules in the plugin module, which means the version of package your plugin aim to test.\nAll below variables will be used by the tools/plugin_doc_gen.py to produce a latest Plugin Doc.\nlink_vector = [\u0026#39;https://www.python-httpx.org/\u0026#39;] # This should link to the official website/doc of this lib # The support matrix is for scenarios where some libraries don\u0026#39;t work for certain Python versions # Therefore, we use the matrix to instruct the CI testing pipeline to skip over plugin test for such Python version # The right side versions, should almost always use A.B.* to test the latest minor version of two recent major versions.  support_matrix = { \u0026#39;httpx\u0026#39;: { \u0026#39;\u0026gt;=3.7\u0026#39;: [\u0026#39;0.23.*\u0026#39;, \u0026#39;0.22.*\u0026#39;] } } # The note will be used when generating the plugin documentation for users. note = \u0026#34;\u0026#34;\u0026#34;\u0026#34;\u0026#34;\u0026#34;   Every plugin requires a corresponding test under tests/plugin before it can be merged, refer to the Plugin Test Guide when writing a plugin test.\n  Add the corresponding configuration options added/modified by the new plugin to the config.py and add new comments for each, then regenerate the configuration.md by make doc-gen.\n  Steps after coding If your PR introduces the need for a new non-standard library which needs to be pulled via pip or if it removes the need for a previously-used library:\n Run poetry add library --group plugins to pin the dependency to the plugins group, Do not add it to the main dependency! Run make doc-gen to generate a test matrix documentation for the plugin.  ","excerpt":"Plugin Development Guide You can always take the existing plugins as examples, while there are some …","ref":"/docs/skywalking-python/v1.0.1/en/contribution/how-to-develop-plugin/","title":"Plugin Development Guide"},{"body":"Plugin Exclusion The plugin exclusion is used during the compilation phase to exclude specific plugins, through their names. Consequently, the codes of these excluded plugins will not be weaved in, then, no relative tracing and metrics.\nConfiguration plugin:# List the names of excluded plugins, multiple plugin names should be splitted by \u0026#34;,\u0026#34;# NOTE: This parameter only takes effect during the compilation phase.excluded:${SW_AGENT_PLUGIN_EXCLUDES:}This configuration option is also located in the existing configuration files and supports configuration based on environment variables. However, this environment variable only takes effect during the compilation phase.\nThe plugins name please refer to the Support Plugins Documentation.\n","excerpt":"Plugin Exclusion The plugin exclusion is used during the compilation phase to exclude specific …","ref":"/docs/skywalking-go/latest/en/advanced-features/plugin-exclusion/","title":"Plugin Exclusion"},{"body":"Plugin Exclusion The plugin exclusion is used during the compilation phase to exclude specific plugins, through their names. Consequently, the codes of these excluded plugins will not be weaved in, then, no relative tracing and metrics.\nConfiguration plugin:# List the names of excluded plugins, multiple plugin names should be splitted by \u0026#34;,\u0026#34;# NOTE: This parameter only takes effect during the compilation phase.excluded:${SW_AGENT_PLUGIN_EXCLUDES:}This configuration option is also located in the existing configuration files and supports configuration based on environment variables. However, this environment variable only takes effect during the compilation phase.\nThe plugins name please refer to the Support Plugins Documentation.\n","excerpt":"Plugin Exclusion The plugin exclusion is used during the compilation phase to exclude specific …","ref":"/docs/skywalking-go/next/en/advanced-features/plugin-exclusion/","title":"Plugin Exclusion"},{"body":"Plugin Exclusion The plugin exclusion is used during the compilation phase to exclude specific plugins, through their names. Consequently, the codes of these excluded plugins will not be weaved in, then, no relative tracing and metrics.\nConfiguration plugin:# List the names of excluded plugins, multiple plugin names should be splitted by \u0026#34;,\u0026#34;# NOTE: This parameter only takes effect during the compilation phase.excluded:${SW_AGENT_PLUGIN_EXCLUDES:}This configuration option is also located in the existing configuration files and supports configuration based on environment variables. However, this environment variable only takes effect during the compilation phase.\nThe plugins name please refer to the Support Plugins Documentation.\n","excerpt":"Plugin Exclusion The plugin exclusion is used during the compilation phase to exclude specific …","ref":"/docs/skywalking-go/v0.4.0/en/advanced-features/plugin-exclusion/","title":"Plugin Exclusion"},{"body":"Plugin List  Client  GRPC Client Kafka Client   Fallbacker  None Fallbacker Timer Fallbacker   Fetcher Filter Forwarder  Envoy ALS v2 GRPC Forwarder Envoy ALS v3 GRPC Forwarder Envoy Metrics v2 GRPC Forwarder Envoy Metrics v3 GRPC Forwarder Native CDS GRPC Forwarder Native EBPF Profiling GRPC Forwarder Native Event GRPC Forwarder Native JVM GRPC Forwarder Native CLR GRPC Forwarder Native Log GRPC Forwarder Native Log Kafka Forwarder Native Management GRPC Forwarder Native Meter GRPC Forwarder Native Process GRPC Forwarder Native Profile GRPC Forwarder Native Tracing GRPC Forwarder OpenTelemetry Metrics v1 GRPC Forwarder   Parser Queue  Memory Queue Mmap Queue None Queue   Receiver  GRPC Envoy ALS v2 Receiver GRPC Envoy ALS v3 Receiver GRPC Envoy Metrics v2 Receiver GRPC Envoy Metrics v3 Receiver GRPC Native CDS Receiver GRPC Native EBFP Profiling Receiver GRPC Native Event Receiver GRPC Native JVM Receiver GRPC Native CLR Receiver GRPC Native Log Receiver GRPC Native Management Receiver GRPC Native Meter Receiver GRPC Native Process Receiver GRPC Native Profile Receiver GRPC Native Tracing Receiver GRPC OpenTelemetry Metrics v1 Receiver HTTP Native Log Receiver   Server  GRPC Server HTTP Server    ","excerpt":"Plugin List  Client  GRPC Client Kafka Client   Fallbacker  None Fallbacker Timer Fallbacker …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/plugin-list/","title":"Plugin List"},{"body":"Plugin List  Client  GRPC Client Kafka Client   Fallbacker  None Fallbacker Timer Fallbacker   Fetcher Filter Forwarder  Envoy ALS v2 GRPC Forwarder Envoy ALS v3 GRPC Forwarder Envoy Metrics v2 GRPC Forwarder Envoy Metrics v3 GRPC Forwarder Native CDS GRPC Forwarder Native CLR GRPC Forwarder GRPC Native EBFP Access Log Forwarder Native EBPF Profiling GRPC Forwarder Native Event GRPC Forwarder Native JVM GRPC Forwarder Native Log GRPC Forwarder Native Log Kafka Forwarder Native Management GRPC Forwarder Native Meter GRPC Forwarder Native Process GRPC Forwarder Native Profile GRPC Forwarder Native Tracing GRPC Forwarder OpenTelemetry Metrics v1 GRPC Forwarder   Parser Queue  Memory Queue Mmap Queue None Queue   Receiver  GRPC Envoy ALS v2 Receiver GRPC Envoy ALS v3 Receiver GRPC Envoy Metrics v2 Receiver GRPC Envoy Metrics v3 Receiver GRPC Native CDS Receiver GRPC Native CLR Receiver GRPC Native EBFP Accesslog Receiver GRPC Native EBFP Profiling Receiver GRPC Native Event Receiver GRPC Native JVM Receiver GRPC Native Log Receiver GRPC Native Management Receiver GRPC Native Meter Receiver GRPC Native Process Receiver GRPC Native Profile Receiver GRPC Native Tracing Receiver GRPC OpenTelemetry Metrics v1 Receiver HTTP Native Log Receiver   Server  GRPC Server HTTP Server    ","excerpt":"Plugin List  Client  GRPC Client Kafka Client   Fallbacker  None Fallbacker Timer Fallbacker …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/plugin-list/","title":"Plugin List"},{"body":"Plugin List  Client  GRPC Client Kafka Client   Fallbacker  None Fallbacker Timer Fallbacker   Fetcher Filter Forwarder  Envoy ALS v2 GRPC Forwarder Envoy ALS v3 GRPC Forwarder Envoy Metrics v2 GRPC Forwarder Envoy Metrics v3 GRPC Forwarder Native CDS GRPC Forwarder Native EBPF Profiling GRPC Forwarder Native Event GRPC Forwarder Native JVM GRPC Forwarder Native CLR GRPC Forwarder Native Log GRPC Forwarder Native Log Kafka Forwarder Native Management GRPC Forwarder Native Meter GRPC Forwarder Native Process GRPC Forwarder Native Profile GRPC Forwarder Native Tracing GRPC Forwarder OpenTelemetry Metrics v1 GRPC Forwarder   Parser Queue  Memory Queue Mmap Queue None Queue   Receiver  GRPC Envoy ALS v2 Receiver GRPC Envoy ALS v3 Receiver GRPC Envoy Metrics v2 Receiver GRPC Envoy Metrics v3 Receiver GRPC Native CDS Receiver GRPC Native EBFP Profiling Receiver GRPC Native Event Receiver GRPC Native JVM Receiver GRPC Native CLR Receiver GRPC Native Log Receiver GRPC Native Management Receiver GRPC Native Meter Receiver GRPC Native Process Receiver GRPC Native Profile Receiver GRPC Native Tracing Receiver GRPC OpenTelemetry Metrics v1 Receiver HTTP Native Log Receiver   Server  GRPC Server HTTP Server    ","excerpt":"Plugin List  Client  GRPC Client Kafka Client   Fallbacker  None Fallbacker Timer Fallbacker …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/plugin-list/","title":"Plugin List"},{"body":"plugin structure Plugin is a common concept for Satellite, which is in all extension plugins.\nRegistration mechanism The Plugin registration mechanism in Satellite is similar to the SPI registration mechanism of Java. Plugin registration mechanism supports to register an interface and its implementation, that means different interfaces have different registration spaces. We can easily find the type of a specific plugin according to the interface and the plugin name and initialize it according to the type.\nstructure:\n code: map[reflect.Type]map[string]reflect.Value meaning: map[interface type]map[plugin name] plugin type  Initialization mechanism Users can easily find a plugin type and initialize an empty plugin instance according to the previous registration mechanism. For setting up the configuration of the extension convenience, we define the initialization mechanism in Plugin structure.\nIn the initialization mechanism, the plugin category(interface) and the init config is required.\nInitialize processing is like the following.\n Find the plugin name in the input config according to the fixed key plugin_name. Find plugin type according to the plugin category(interface) and the plugin name. Create an empty plugin. Initialize the plugin according to the merged config, which is created by the input config and the default config.  Plugin usage in Satellite Nowadays, the numbers of the Plugin categories is 2. One is the sharing Plugin, and another is the other normal Plugin.\n Extension Plugins:  sharing plugins  Server Plugin Client Plugin   normal plugins  Receiver Plugin Fetcher Plugin Parser Plugin Queue Plugin Filter Plugin Fallbacker Plugin Forwarder Plugin      ","excerpt":"plugin structure Plugin is a common concept for Satellite, which is in all extension plugins. …","ref":"/docs/skywalking-satellite/latest/en/concepts-and-designs/plugin_mechanism/","title":"plugin structure"},{"body":"plugin structure Plugin is a common concept for Satellite, which is in all extension plugins.\nRegistration mechanism The Plugin registration mechanism in Satellite is similar to the SPI registration mechanism of Java. Plugin registration mechanism supports to register an interface and its implementation, that means different interfaces have different registration spaces. We can easily find the type of a specific plugin according to the interface and the plugin name and initialize it according to the type.\nstructure:\n code: map[reflect.Type]map[string]reflect.Value meaning: map[interface type]map[plugin name] plugin type  Initialization mechanism Users can easily find a plugin type and initialize an empty plugin instance according to the previous registration mechanism. For setting up the configuration of the extension convenience, we define the initialization mechanism in Plugin structure.\nIn the initialization mechanism, the plugin category(interface) and the init config is required.\nInitialize processing is like the following.\n Find the plugin name in the input config according to the fixed key plugin_name. Find plugin type according to the plugin category(interface) and the plugin name. Create an empty plugin. Initialize the plugin according to the merged config, which is created by the input config and the default config.  Plugin usage in Satellite Nowadays, the numbers of the Plugin categories is 2. One is the sharing Plugin, and another is the other normal Plugin.\n Extension Plugins:  sharing plugins  Server Plugin Client Plugin   normal plugins  Receiver Plugin Fetcher Plugin Parser Plugin Queue Plugin Filter Plugin Fallbacker Plugin Forwarder Plugin      ","excerpt":"plugin structure Plugin is a common concept for Satellite, which is in all extension plugins. …","ref":"/docs/skywalking-satellite/next/en/concepts-and-designs/plugin_mechanism/","title":"plugin structure"},{"body":"plugin structure Plugin is a common concept for Satellite, which is in all extension plugins.\nRegistration mechanism The Plugin registration mechanism in Satellite is similar to the SPI registration mechanism of Java. Plugin registration mechanism supports to register an interface and its implementation, that means different interfaces have different registration spaces. We can easily find the type of a specific plugin according to the interface and the plugin name and initialize it according to the type.\nstructure:\n code: map[reflect.Type]map[string]reflect.Value meaning: map[interface type]map[plugin name] plugin type  Initialization mechanism Users can easily find a plugin type and initialize an empty plugin instance according to the previous registration mechanism. For setting up the configuration of the extension convenience, we define the initialization mechanism in Plugin structure.\nIn the initialization mechanism, the plugin category(interface) and the init config is required.\nInitialize processing is like the following.\n Find the plugin name in the input config according to the fixed key plugin_name. Find plugin type according to the plugin category(interface) and the plugin name. Create an empty plugin. Initialize the plugin according to the merged config, which is created by the input config and the default config.  Plugin usage in Satellite Nowadays, the numbers of the Plugin categories is 2. One is the sharing Plugin, and another is the other normal Plugin.\n Extension Plugins:  sharing plugins  Server Plugin Client Plugin   normal plugins  Receiver Plugin Fetcher Plugin Parser Plugin Queue Plugin Filter Plugin Fallbacker Plugin Forwarder Plugin      ","excerpt":"plugin structure Plugin is a common concept for Satellite, which is in all extension plugins. …","ref":"/docs/skywalking-satellite/v1.2.0/en/concepts-and-designs/plugin_mechanism/","title":"plugin structure"},{"body":"Plugin Test Plugin tests are required and should pass before a new plugin is able to merge into the master branch. Specify a support matrix in each plugin in the skywalking/plugins folder, along with their website links, the matrix and links will be used for plugin support table documentation generation for this doc Plugins.md.\nUse make doc-gen to generate a table and paste into Plugins.md after all test passes.\nSkyWalking Agent Test Tool (Mock Collector) SkyWalking Agent Test Tool respects the same protocol as the SkyWalking backend, and thus receives the report data from the agent side, besides, it also exposes some HTTP endpoints for verification.\nTested Service A tested service is a service involving the plugin that is to be tested, and exposes some endpoints to trigger the instrumented code and report log/trace/meter data to the mock collector.\nDocker Compose docker-compose is used to orchestrate the mock collector and the tested service(s), the docker-compose.yml should be able to run with docker-compose -f docker-compose.yml up in standalone mode, which can be used in debugging too.\nExpected Data The expected.data.yml file contains the expected segment/log/meter data after we have triggered the instrumentation and report to mock collector.\nOnce the mock collector receives data, we post the expected data to the mock collector and verify whether they match.\nThis can be done through the /dataValidate of the mock collector, say http://collector:12800/dataValidate, for example.\nExample If we want to test the plugin for the built-in library http, we will:\n Build a tested service, which sets up an HTTP server by http library, and exposes an HTTP endpoint to be triggered in the test codes, say /trigger, take this provider service as example. Compose a docker-compose.yml file, orchestrating the service built in step 1 and the mock collector, take this docker-compose.yml as an example. Write test codes to trigger the endpoint in step 1, and send the expected data file to the mock collector to verify, take this test as example.  ","excerpt":"Plugin Test Plugin tests are required and should pass before a new plugin is able to merge into the …","ref":"/docs/skywalking-python/latest/en/contribution/how-to-test-plugin/","title":"Plugin Test"},{"body":"Plugin Test Plugin tests are required and should pass before a new plugin is able to merge into the master branch. Specify a support matrix in each plugin in the skywalking/plugins folder, along with their website links, the matrix and links will be used for plugin support table documentation generation for this doc Plugins.md.\nUse make doc-gen to generate a table and paste into Plugins.md after all test passes.\nSkyWalking Agent Test Tool (Mock Collector) SkyWalking Agent Test Tool respects the same protocol as the SkyWalking backend, and thus receives the report data from the agent side, besides, it also exposes some HTTP endpoints for verification.\nTested Service A tested service is a service involving the plugin that is to be tested, and exposes some endpoints to trigger the instrumented code and report log/trace/meter data to the mock collector.\nDocker Compose docker-compose is used to orchestrate the mock collector and the tested service(s), the docker-compose.yml should be able to run with docker-compose -f docker-compose.yml up in standalone mode, which can be used in debugging too.\nExpected Data The expected.data.yml file contains the expected segment/log/meter data after we have triggered the instrumentation and report to mock collector.\nOnce the mock collector receives data, we post the expected data to the mock collector and verify whether they match.\nThis can be done through the /dataValidate of the mock collector, say http://collector:12800/dataValidate, for example.\nExample If we want to test the plugin for the built-in library http, we will:\n Build a tested service, which sets up an HTTP server by http library, and exposes an HTTP endpoint to be triggered in the test codes, say /trigger, take this provider service as example. Compose a docker-compose.yml file, orchestrating the service built in step 1 and the mock collector, take this docker-compose.yml as an example. Write test codes to trigger the endpoint in step 1, and send the expected data file to the mock collector to verify, take this test as example.  ","excerpt":"Plugin Test Plugin tests are required and should pass before a new plugin is able to merge into the …","ref":"/docs/skywalking-python/next/en/contribution/how-to-test-plugin/","title":"Plugin Test"},{"body":"Plugin Test Plugin tests are required and should pass before a new plugin is able to merge into the master branch. Specify a support matrix in each plugin in the skywalking/plugins folder, along with their website links, the matrix and links will be used for plugin support table documentation generation for this doc Plugins.md.\nUse make doc-gen to generate a table and paste into Plugins.md after all test passes.\nSkyWalking Agent Test Tool (Mock Collector) SkyWalking Agent Test Tool respects the same protocol as the SkyWalking backend, and thus receives the report data from the agent side, besides, it also exposes some HTTP endpoints for verification.\nTested Service A tested service is a service involving the plugin that is to be tested, and exposes some endpoints to trigger the instrumented code and report log/trace/meter data to the mock collector.\nDocker Compose docker-compose is used to orchestrate the mock collector and the tested service(s), the docker-compose.yml should be able to run with docker-compose -f docker-compose.yml up in standalone mode, which can be used in debugging too.\nExpected Data The expected.data.yml file contains the expected segment/log/meter data after we have triggered the instrumentation and report to mock collector.\nOnce the mock collector receives data, we post the expected data to the mock collector and verify whether they match.\nThis can be done through the /dataValidate of the mock collector, say http://collector:12800/dataValidate, for example.\nExample If we want to test the plugin for the built-in library http, we will:\n Build a tested service, which sets up an HTTP server by http library, and exposes an HTTP endpoint to be triggered in the test codes, say /trigger, take this provider service as example. Compose a docker-compose.yml file, orchestrating the service built in step 1 and the mock collector, take this docker-compose.yml as an example. Write test codes to trigger the endpoint in step 1, and send the expected data file to the mock collector to verify, take this test as example.  ","excerpt":"Plugin Test Plugin tests are required and should pass before a new plugin is able to merge into the …","ref":"/docs/skywalking-python/v1.0.1/en/contribution/how-to-test-plugin/","title":"Plugin Test"},{"body":"PostgreSQL PostgreSQL JDBC driver uses version 42.3.2. It supports PostgreSQL 8.2 or newer. Activate PostgreSQL as storage, and set storage provider to postgresql.\nstorage:selector:${SW_STORAGE:postgresql}postgresql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:postgresql://localhost:5432/skywalking\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:postgres}dataSource.password:${SW_DATA_SOURCE_PASSWORD:123456}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfArrayColumn:${SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN:20}numOfSearchableValuesPerTag:${SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG:2}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password, are found in application.yml. Only part of the settings is listed here. Please follow HikariCP connection pool document for full settings.\n","excerpt":"PostgreSQL PostgreSQL JDBC driver uses version 42.3.2. It supports PostgreSQL 8.2 or newer. Activate …","ref":"/docs/main/latest/en/setup/backend/storages/postgresql/","title":"PostgreSQL"},{"body":"PostgreSQL PostgreSQL JDBC driver uses version 42.3.2. It supports PostgreSQL 8.2 or newer. Activate PostgreSQL as storage, and set storage provider to postgresql.\nstorage:selector:${SW_STORAGE:postgresql}postgresql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:postgresql://localhost:5432/skywalking\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:postgres}dataSource.password:${SW_DATA_SOURCE_PASSWORD:123456}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfArrayColumn:${SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN:20}numOfSearchableValuesPerTag:${SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG:2}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password, are found in application.yml. Only part of the settings is listed here. Please follow HikariCP connection pool document for full settings.\n","excerpt":"PostgreSQL PostgreSQL JDBC driver uses version 42.3.2. It supports PostgreSQL 8.2 or newer. Activate …","ref":"/docs/main/next/en/setup/backend/storages/postgresql/","title":"PostgreSQL"},{"body":"PostgreSQL PostgreSQL JDBC driver uses version 42.3.2. It supports PostgreSQL 8.2 or newer. Activate PostgreSQL as storage, and set storage provider to postgresql.\nstorage:selector:${SW_STORAGE:postgresql}postgresql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:postgresql://localhost:5432/skywalking\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:postgres}dataSource.password:${SW_DATA_SOURCE_PASSWORD:123456}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfArrayColumn:${SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN:20}numOfSearchableValuesPerTag:${SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG:2}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password, are found in application.yml. Only part of the settings is listed here. Please follow HikariCP connection pool document for full settings.\n","excerpt":"PostgreSQL PostgreSQL JDBC driver uses version 42.3.2. It supports PostgreSQL 8.2 or newer. Activate …","ref":"/docs/main/v9.7.0/en/setup/backend/storages/postgresql/","title":"PostgreSQL"},{"body":"PostgreSQL monitoring PostgreSQL server performance from postgres-exporter SkyWalking leverages postgres-exporter for collecting metrics data from PostgreSQL. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  postgres-exporter collect metrics data from PostgreSQL. OpenTelemetry Collector fetches metrics from postgres-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up postgres-exporter. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  PostgreSQL Monitoring PostgreSQL cluster is cataloged as a Layer: PostgreSQL Service in OAP. Each PostgreSQL server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Shared Buffers MB meter_pg_shared_buffers The number of shared memory buffers used by the server postgres-exporter   Effective Cache GB meter_pg_effective_cache The planner\u0026rsquo;s assumption about the total size of the data caches postgres-exporter   Maintenance Work Mem MB meter_pg_maintenance_work_mem The maximum memory to be used for maintenance operations postgres-exporter   Seq Page Cost  meter_pg_seq_page_cost The planner\u0026rsquo;s estimate of the cost of a sequentially fetched disk page. postgres-exporter   Random Page Cost  meter_pg_random_page_cost The planner\u0026rsquo;s estimate of the cost of a nonsequentially fetched disk page. postgres-exporter   Max Worker Processes  meter_pg_max_worker_processes Maximum number of concurrent worker processes postgres-exporter   Max WAL Size GB meter_max_wal_size The WAL size that triggers a checkpoint postgres-exporter   Max Parallel Workers  meter_pg_max_parallel_workers The maximum number of parallel processes per executor node postgres-exporter   Work Mem MB meter_pg_max_work_mem The maximum memory to be used for query workspaces. postgres-exporter   Fetched Row Trend  meter_pg_fetched_rows_rate The trend of the number of rows fetched by queries in this database. postgres-exporter   Inserted Row Trend  meter_pg_inserted_rows_rate The trend of the number of rows inserted by queries in this database. postgres-exporter   Updated Row Trend  meter_pg_updated_rows_rate The trend of the number of rows updated by queries in this database. postgres-exporter   Deleted Row Trend  meter_pg_deleted_rows_rate The trend of the number of rows deleted by queries in this database. postgres-exporter   Returned Row Trend  meter_pg_returned_rows_rate The trend of the number of rows returned by queries in this database. postgres-exporter   Committed Transactions Trend  meter_pg_committed_transactions_rate The trend of the number of transactions in this database that have been committed postgres-exporter   Rolled Back Transactions Trend  meter_pg_rolled_back_transactions_rate The trend of the number of transactions in this database that have been rolled back postgres-exporter   Buffers Trend  meter_pg_buffers_alloc  meter_pg_buffers_checkpoint meter_pg_buffers_clean meter_pg_buffers_backend_fsync meter_pg_buffers_backend The trend of the number of buffers postgres-exporter   Conflicts Trend  meter_pg_conflicts_rate The trend of the number of queries canceled due to conflicts with recovery in this database postgres-exporter   Deadlock Trend  meter_pg_deadlocks_rate The trend of the number of deadlocks detected in this database postgres-exporter   Cache Hit Rate % meter_pg_cache_hit_rate The rate of cache hit postgres-exporter   Temporary Files Trend  meter_pg_temporary_files_rate The rate of total amount of data written to temporary files by queries in this database. All temporary files are counted, regardless of why the temporary file was created, and regardless of the log_temp_files setting postgres-exporter   Checkpoint Stat Trend  meter_pg_checkpoint_write_time_rate  meter_pg_checkpoint_sync_time_rate  meter_pg_checkpoint_req_rate meter_pg_checkpoint_timed_rate The trend of checkpoint stat postgres-exporter   Active Sessions  meter_pg_active_sessions The number of connections which state is active postgres-exporter   Idle Sessions  meter_pg_idle_sessions The number of connections which state is idle,idle in transaction or idle in transaction (aborted) postgres-exporter   Locks Count  meter_pg_locks_count Number of locks postgres-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/postgresql. The PostgreSQL dashboard panel configurations are found in /config/ui-initialized-templates/postgresql.\nCollect sampled slow SQLs SkyWalking leverages fluentbit or other log agents for collecting slow SQL statements from PostgreSQL.\nData flow  fluentbit agent collects slow sql logs from PostgreSQL. fluentbit agent sends data to SkyWalking OAP Server using native log APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Set up fluentbit. Config fluentbit Config PostgreSQL to enable slow log. Example.  Slow SQL Monitoring Slow SQL monitoring provides monitoring of the slow SQL statements of the PostgreSQL server. PostgreSQL Cluster is cataloged as a Layer: POSTGRESQL Service in OAP. Each PostgreSQL server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Slow Statements ms top_n_database_statement The latency and statement of PostgreSQL slow SQLs fluentbit    Customizations You can customize your own metrics/expression/dashboard panel. The slowsql expression rules are found in /config/lal/pgsql-slowsql.yaml The PostgreSQL dashboard panel configurations are found in /config/ui-initialized-templates/postgresql.\n","excerpt":"PostgreSQL monitoring PostgreSQL server performance from postgres-exporter SkyWalking leverages …","ref":"/docs/main/latest/en/setup/backend/backend-postgresql-monitoring/","title":"PostgreSQL monitoring"},{"body":"PostgreSQL monitoring PostgreSQL server performance from postgres-exporter SkyWalking leverages postgres-exporter for collecting metrics data from PostgreSQL. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  postgres-exporter collect metrics data from PostgreSQL. OpenTelemetry Collector fetches metrics from postgres-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up postgres-exporter. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  PostgreSQL Monitoring PostgreSQL cluster is cataloged as a Layer: PostgreSQL Service in OAP. Each PostgreSQL server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Shared Buffers MB meter_pg_shared_buffers The number of shared memory buffers used by the server postgres-exporter   Effective Cache GB meter_pg_effective_cache The planner\u0026rsquo;s assumption about the total size of the data caches postgres-exporter   Maintenance Work Mem MB meter_pg_maintenance_work_mem The maximum memory to be used for maintenance operations postgres-exporter   Seq Page Cost  meter_pg_seq_page_cost The planner\u0026rsquo;s estimate of the cost of a sequentially fetched disk page. postgres-exporter   Random Page Cost  meter_pg_random_page_cost The planner\u0026rsquo;s estimate of the cost of a nonsequentially fetched disk page. postgres-exporter   Max Worker Processes  meter_pg_max_worker_processes Maximum number of concurrent worker processes postgres-exporter   Max WAL Size GB meter_max_wal_size The WAL size that triggers a checkpoint postgres-exporter   Max Parallel Workers  meter_pg_max_parallel_workers The maximum number of parallel processes per executor node postgres-exporter   Work Mem MB meter_pg_max_work_mem The maximum memory to be used for query workspaces. postgres-exporter   Fetched Row Trend  meter_pg_fetched_rows_rate The trend of the number of rows fetched by queries in this database. postgres-exporter   Inserted Row Trend  meter_pg_inserted_rows_rate The trend of the number of rows inserted by queries in this database. postgres-exporter   Updated Row Trend  meter_pg_updated_rows_rate The trend of the number of rows updated by queries in this database. postgres-exporter   Deleted Row Trend  meter_pg_deleted_rows_rate The trend of the number of rows deleted by queries in this database. postgres-exporter   Returned Row Trend  meter_pg_returned_rows_rate The trend of the number of rows returned by queries in this database. postgres-exporter   Committed Transactions Trend  meter_pg_committed_transactions_rate The trend of the number of transactions in this database that have been committed postgres-exporter   Rolled Back Transactions Trend  meter_pg_rolled_back_transactions_rate The trend of the number of transactions in this database that have been rolled back postgres-exporter   Buffers Trend  meter_pg_buffers_alloc  meter_pg_buffers_checkpoint meter_pg_buffers_clean meter_pg_buffers_backend_fsync meter_pg_buffers_backend The trend of the number of buffers postgres-exporter   Conflicts Trend  meter_pg_conflicts_rate The trend of the number of queries canceled due to conflicts with recovery in this database postgres-exporter   Deadlock Trend  meter_pg_deadlocks_rate The trend of the number of deadlocks detected in this database postgres-exporter   Cache Hit Rate % meter_pg_cache_hit_rate The rate of cache hit postgres-exporter   Temporary Files Trend  meter_pg_temporary_files_rate The rate of total amount of data written to temporary files by queries in this database. All temporary files are counted, regardless of why the temporary file was created, and regardless of the log_temp_files setting postgres-exporter   Checkpoint Stat Trend  meter_pg_checkpoint_write_time_rate  meter_pg_checkpoint_sync_time_rate  meter_pg_checkpoint_req_rate meter_pg_checkpoint_timed_rate The trend of checkpoint stat postgres-exporter   Active Sessions  meter_pg_active_sessions The number of connections which state is active postgres-exporter   Idle Sessions  meter_pg_idle_sessions The number of connections which state is idle,idle in transaction or idle in transaction (aborted) postgres-exporter   Locks Count  meter_pg_locks_count Number of locks postgres-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/postgresql. The PostgreSQL dashboard panel configurations are found in /config/ui-initialized-templates/postgresql.\nCollect sampled slow SQLs SkyWalking leverages fluentbit or other log agents for collecting slow SQL statements from PostgreSQL.\nData flow  fluentbit agent collects slow sql logs from PostgreSQL. fluentbit agent sends data to SkyWalking OAP Server using native log APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Set up fluentbit. Config fluentbit Config PostgreSQL to enable slow log. Example.  Slow SQL Monitoring Slow SQL monitoring provides monitoring of the slow SQL statements of the PostgreSQL server. PostgreSQL Cluster is cataloged as a Layer: POSTGRESQL Service in OAP. Each PostgreSQL server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Slow Statements ms top_n_database_statement The latency and statement of PostgreSQL slow SQLs fluentbit    Customizations You can customize your own metrics/expression/dashboard panel. The slowsql expression rules are found in /config/lal/pgsql-slowsql.yaml The PostgreSQL dashboard panel configurations are found in /config/ui-initialized-templates/postgresql.\n","excerpt":"PostgreSQL monitoring PostgreSQL server performance from postgres-exporter SkyWalking leverages …","ref":"/docs/main/next/en/setup/backend/backend-postgresql-monitoring/","title":"PostgreSQL monitoring"},{"body":"PostgreSQL monitoring SkyWalking leverages postgres-exporter for collecting metrics data from PostgreSQL. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  postgres-exporter collect metrics data from PostgreSQL. OpenTelemetry Collector fetches metrics from postgres-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via the OpenCensus gRPC Exporter or OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up postgres-exporter. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  PostgreSQL Monitoring PostgreSQL monitoring provides monitoring of the status and resources of the PostgreSQL server.PostgreSQL server as a Service in OAP, and land on the Layer: POSTGRESQL.\nPostgreSQL Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Shared Buffers MB meter_pg_shared_buffers The number of shared memory buffers used by the server postgres-exporter   Effective Cache GB meter_pg_effective_cache The planner\u0026rsquo;s assumption about the total size of the data caches postgres-exporter   Maintenance Work Mem MB meter_pg_maintenance_work_mem The maximum memory to be used for maintenance operations postgres-exporter   Seq Page Cost  meter_pg_seq_page_cost The planner\u0026rsquo;s estimate of the cost of a sequentially fetched disk page. postgres-exporter   Random Page Cost  meter_pg_random_page_cost The planner\u0026rsquo;s estimate of the cost of a nonsequentially fetched disk page. postgres-exporter   Max Worker Processes  meter_pg_max_worker_processes Maximum number of concurrent worker processes postgres-exporter   Max WAL Size GB meter_max_wal_size The WAL size that triggers a checkpoint postgres-exporter   Max Parallel Workers  meter_pg_max_parallel_workers The maximum number of parallel processes per executor node postgres-exporter   Work Mem MB meter_pg_max_work_mem The maximum memory to be used for query workspaces. postgres-exporter   Fetched Row Trend  meter_pg_fetched_rows_rate The trend of the number of rows fetched by queries in this database. postgres-exporter   Inserted Row Trend  meter_pg_inserted_rows_rate The trend of the number of rows inserted by queries in this database. postgres-exporter   Updated Row Trend  meter_pg_updated_rows_rate The trend of the number of rows updated by queries in this database. postgres-exporter   Deleted Row Trend  meter_pg_deleted_rows_rate The trend of the number of rows deleted by queries in this database. postgres-exporter   Returned Row Trend  meter_pg_returned_rows_rate The trend of the number of rows returned by queries in this database. postgres-exporter   Committed Transactions Trend  meter_pg_committed_transactions_rate The trend of the number of transactions in this database that have been committed postgres-exporter   Rolled Back Transactions Trend  meter_pg_rolled_back_transactions_rate The trend of the number of transactions in this database that have been rolled back postgres-exporter   Buffers Trend  meter_pg_buffers_alloc  meter_pg_buffers_checkpoint meter_pg_buffers_clean meter_pg_buffers_backend_fsync meter_pg_buffers_backend The trend of the number of buffers postgres-exporter   Conflicts Trend  meter_pg_conflicts_rate The trend of the number of queries canceled due to conflicts with recovery in this database postgres-exporter   Deadlock Trend  meter_pg_deadlocks_rate The trend of the number of deadlocks detected in this database postgres-exporter   Cache Hit Rate % meter_pg_cache_hit_rate The rate of cache hit postgres-exporter   Temporary Files Trend  meter_pg_temporary_files_rate The rate of total amount of data written to temporary files by queries in this database. All temporary files are counted, regardless of why the temporary file was created, and regardless of the log_temp_files setting postgres-exporter   Checkpoint Stat Trend  meter_pg_checkpoint_write_time_rate  meter_pg_checkpoint_sync_time_rate  meter_pg_checkpoint_req_rate meter_pg_checkpoint_timed_rate The trend of checkpoint stat postgres-exporter   Active Sessions  meter_pg_active_sessions The number of connections which state is active postgres-exporter   Idle Sessions  meter_pg_idle_sessions The number of connections which state is idle,idle in transaction or idle in transaction (aborted) postgres-exporter   Locks Count  meter_pg_locks_count Number of locks postgres-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/postgresql.yaml. The PostgreSQL dashboard panel configurations are found in /config/ui-initialized-templates/postgresql.\n","excerpt":"PostgreSQL monitoring SkyWalking leverages postgres-exporter for collecting metrics data from …","ref":"/docs/main/v9.2.0/en/setup/backend/backend-postgresql-monitoring/","title":"PostgreSQL monitoring"},{"body":"PostgreSQL monitoring PostgreSQL server performance from postgres-exporter SkyWalking leverages postgres-exporter for collecting metrics data from PostgreSQL. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  postgres-exporter collect metrics data from PostgreSQL. OpenTelemetry Collector fetches metrics from postgres-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via the OpenCensus gRPC Exporter or OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up postgres-exporter. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  PostgreSQL Monitoring PostgreSQL monitoring provides monitoring of the status and resources of the PostgreSQL server.PostgreSQL server as a Service in OAP, and land on the Layer: POSTGRESQL.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Shared Buffers MB meter_pg_shared_buffers The number of shared memory buffers used by the server postgres-exporter   Effective Cache GB meter_pg_effective_cache The planner\u0026rsquo;s assumption about the total size of the data caches postgres-exporter   Maintenance Work Mem MB meter_pg_maintenance_work_mem The maximum memory to be used for maintenance operations postgres-exporter   Seq Page Cost  meter_pg_seq_page_cost The planner\u0026rsquo;s estimate of the cost of a sequentially fetched disk page. postgres-exporter   Random Page Cost  meter_pg_random_page_cost The planner\u0026rsquo;s estimate of the cost of a nonsequentially fetched disk page. postgres-exporter   Max Worker Processes  meter_pg_max_worker_processes Maximum number of concurrent worker processes postgres-exporter   Max WAL Size GB meter_max_wal_size The WAL size that triggers a checkpoint postgres-exporter   Max Parallel Workers  meter_pg_max_parallel_workers The maximum number of parallel processes per executor node postgres-exporter   Work Mem MB meter_pg_max_work_mem The maximum memory to be used for query workspaces. postgres-exporter   Fetched Row Trend  meter_pg_fetched_rows_rate The trend of the number of rows fetched by queries in this database. postgres-exporter   Inserted Row Trend  meter_pg_inserted_rows_rate The trend of the number of rows inserted by queries in this database. postgres-exporter   Updated Row Trend  meter_pg_updated_rows_rate The trend of the number of rows updated by queries in this database. postgres-exporter   Deleted Row Trend  meter_pg_deleted_rows_rate The trend of the number of rows deleted by queries in this database. postgres-exporter   Returned Row Trend  meter_pg_returned_rows_rate The trend of the number of rows returned by queries in this database. postgres-exporter   Committed Transactions Trend  meter_pg_committed_transactions_rate The trend of the number of transactions in this database that have been committed postgres-exporter   Rolled Back Transactions Trend  meter_pg_rolled_back_transactions_rate The trend of the number of transactions in this database that have been rolled back postgres-exporter   Buffers Trend  meter_pg_buffers_alloc  meter_pg_buffers_checkpoint meter_pg_buffers_clean meter_pg_buffers_backend_fsync meter_pg_buffers_backend The trend of the number of buffers postgres-exporter   Conflicts Trend  meter_pg_conflicts_rate The trend of the number of queries canceled due to conflicts with recovery in this database postgres-exporter   Deadlock Trend  meter_pg_deadlocks_rate The trend of the number of deadlocks detected in this database postgres-exporter   Cache Hit Rate % meter_pg_cache_hit_rate The rate of cache hit postgres-exporter   Temporary Files Trend  meter_pg_temporary_files_rate The rate of total amount of data written to temporary files by queries in this database. All temporary files are counted, regardless of why the temporary file was created, and regardless of the log_temp_files setting postgres-exporter   Checkpoint Stat Trend  meter_pg_checkpoint_write_time_rate  meter_pg_checkpoint_sync_time_rate  meter_pg_checkpoint_req_rate meter_pg_checkpoint_timed_rate The trend of checkpoint stat postgres-exporter   Active Sessions  meter_pg_active_sessions The number of connections which state is active postgres-exporter   Idle Sessions  meter_pg_idle_sessions The number of connections which state is idle,idle in transaction or idle in transaction (aborted) postgres-exporter   Locks Count  meter_pg_locks_count Number of locks postgres-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/postgresql.yaml. The PostgreSQL dashboard panel configurations are found in /config/ui-initialized-templates/postgresql.\nCollect sampled slow SQLs SkyWalking leverages fluentbit or other log agents for collecting slow SQL statements from PostgreSQL.\nData flow  fluentbit agent collects slow sql logs from PostgreSQL. fluentbit agent sends data to SkyWalking OAP Server using native log APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Set up fluentbit. Config fluentbit Config PostgreSQL to enable slow log. Example.  Slow SQL Monitoring Slow SQL monitoring provides monitoring of the slow SQL statements of the PostgreSQL server. PostgreSQL Cluster is cataloged as a Layer: POSTGRESQL Service in OAP. Each PostgreSQL server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Slow Statements ms top_n_database_statement The latency and statement of PostgreSQL slow SQLs fluentbit    Customizations You can customize your own metrics/expression/dashboard panel. The slowsql expression rules are found in /config/lal/pgsql-slowsql.yaml The PostgreSQL dashboard panel configurations are found in /config/ui-initialized-templates/postgresql.\n","excerpt":"PostgreSQL monitoring PostgreSQL server performance from postgres-exporter SkyWalking leverages …","ref":"/docs/main/v9.3.0/en/setup/backend/backend-postgresql-monitoring/","title":"PostgreSQL monitoring"},{"body":"PostgreSQL monitoring PostgreSQL server performance from postgres-exporter SkyWalking leverages postgres-exporter for collecting metrics data from PostgreSQL. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  postgres-exporter collect metrics data from PostgreSQL. OpenTelemetry Collector fetches metrics from postgres-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via the OpenCensus gRPC Exporter or OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up postgres-exporter. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  PostgreSQL Monitoring PostgreSQL monitoring provides monitoring of the status and resources of the PostgreSQL server.PostgreSQL server as a Service in OAP, and land on the Layer: POSTGRESQL.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Shared Buffers MB meter_pg_shared_buffers The number of shared memory buffers used by the server postgres-exporter   Effective Cache GB meter_pg_effective_cache The planner\u0026rsquo;s assumption about the total size of the data caches postgres-exporter   Maintenance Work Mem MB meter_pg_maintenance_work_mem The maximum memory to be used for maintenance operations postgres-exporter   Seq Page Cost  meter_pg_seq_page_cost The planner\u0026rsquo;s estimate of the cost of a sequentially fetched disk page. postgres-exporter   Random Page Cost  meter_pg_random_page_cost The planner\u0026rsquo;s estimate of the cost of a nonsequentially fetched disk page. postgres-exporter   Max Worker Processes  meter_pg_max_worker_processes Maximum number of concurrent worker processes postgres-exporter   Max WAL Size GB meter_max_wal_size The WAL size that triggers a checkpoint postgres-exporter   Max Parallel Workers  meter_pg_max_parallel_workers The maximum number of parallel processes per executor node postgres-exporter   Work Mem MB meter_pg_max_work_mem The maximum memory to be used for query workspaces. postgres-exporter   Fetched Row Trend  meter_pg_fetched_rows_rate The trend of the number of rows fetched by queries in this database. postgres-exporter   Inserted Row Trend  meter_pg_inserted_rows_rate The trend of the number of rows inserted by queries in this database. postgres-exporter   Updated Row Trend  meter_pg_updated_rows_rate The trend of the number of rows updated by queries in this database. postgres-exporter   Deleted Row Trend  meter_pg_deleted_rows_rate The trend of the number of rows deleted by queries in this database. postgres-exporter   Returned Row Trend  meter_pg_returned_rows_rate The trend of the number of rows returned by queries in this database. postgres-exporter   Committed Transactions Trend  meter_pg_committed_transactions_rate The trend of the number of transactions in this database that have been committed postgres-exporter   Rolled Back Transactions Trend  meter_pg_rolled_back_transactions_rate The trend of the number of transactions in this database that have been rolled back postgres-exporter   Buffers Trend  meter_pg_buffers_alloc  meter_pg_buffers_checkpoint meter_pg_buffers_clean meter_pg_buffers_backend_fsync meter_pg_buffers_backend The trend of the number of buffers postgres-exporter   Conflicts Trend  meter_pg_conflicts_rate The trend of the number of queries canceled due to conflicts with recovery in this database postgres-exporter   Deadlock Trend  meter_pg_deadlocks_rate The trend of the number of deadlocks detected in this database postgres-exporter   Cache Hit Rate % meter_pg_cache_hit_rate The rate of cache hit postgres-exporter   Temporary Files Trend  meter_pg_temporary_files_rate The rate of total amount of data written to temporary files by queries in this database. All temporary files are counted, regardless of why the temporary file was created, and regardless of the log_temp_files setting postgres-exporter   Checkpoint Stat Trend  meter_pg_checkpoint_write_time_rate  meter_pg_checkpoint_sync_time_rate  meter_pg_checkpoint_req_rate meter_pg_checkpoint_timed_rate The trend of checkpoint stat postgres-exporter   Active Sessions  meter_pg_active_sessions The number of connections which state is active postgres-exporter   Idle Sessions  meter_pg_idle_sessions The number of connections which state is idle,idle in transaction or idle in transaction (aborted) postgres-exporter   Locks Count  meter_pg_locks_count Number of locks postgres-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/postgresql. The PostgreSQL dashboard panel configurations are found in /config/ui-initialized-templates/postgresql.\nCollect sampled slow SQLs SkyWalking leverages fluentbit or other log agents for collecting slow SQL statements from PostgreSQL.\nData flow  fluentbit agent collects slow sql logs from PostgreSQL. fluentbit agent sends data to SkyWalking OAP Server using native log APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Set up fluentbit. Config fluentbit Config PostgreSQL to enable slow log. Example.  Slow SQL Monitoring Slow SQL monitoring provides monitoring of the slow SQL statements of the PostgreSQL server. PostgreSQL Cluster is cataloged as a Layer: POSTGRESQL Service in OAP. Each PostgreSQL server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Slow Statements ms top_n_database_statement The latency and statement of PostgreSQL slow SQLs fluentbit    Customizations You can customize your own metrics/expression/dashboard panel. The slowsql expression rules are found in /config/lal/pgsql-slowsql.yaml The PostgreSQL dashboard panel configurations are found in /config/ui-initialized-templates/postgresql.\n","excerpt":"PostgreSQL monitoring PostgreSQL server performance from postgres-exporter SkyWalking leverages …","ref":"/docs/main/v9.4.0/en/setup/backend/backend-postgresql-monitoring/","title":"PostgreSQL monitoring"},{"body":"PostgreSQL monitoring PostgreSQL server performance from postgres-exporter SkyWalking leverages postgres-exporter for collecting metrics data from PostgreSQL. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  postgres-exporter collect metrics data from PostgreSQL. OpenTelemetry Collector fetches metrics from postgres-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up postgres-exporter. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  PostgreSQL Monitoring PostgreSQL cluster is cataloged as a Layer: PostgreSQL Service in OAP. Each PostgreSQL server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Shared Buffers MB meter_pg_shared_buffers The number of shared memory buffers used by the server postgres-exporter   Effective Cache GB meter_pg_effective_cache The planner\u0026rsquo;s assumption about the total size of the data caches postgres-exporter   Maintenance Work Mem MB meter_pg_maintenance_work_mem The maximum memory to be used for maintenance operations postgres-exporter   Seq Page Cost  meter_pg_seq_page_cost The planner\u0026rsquo;s estimate of the cost of a sequentially fetched disk page. postgres-exporter   Random Page Cost  meter_pg_random_page_cost The planner\u0026rsquo;s estimate of the cost of a nonsequentially fetched disk page. postgres-exporter   Max Worker Processes  meter_pg_max_worker_processes Maximum number of concurrent worker processes postgres-exporter   Max WAL Size GB meter_max_wal_size The WAL size that triggers a checkpoint postgres-exporter   Max Parallel Workers  meter_pg_max_parallel_workers The maximum number of parallel processes per executor node postgres-exporter   Work Mem MB meter_pg_max_work_mem The maximum memory to be used for query workspaces. postgres-exporter   Fetched Row Trend  meter_pg_fetched_rows_rate The trend of the number of rows fetched by queries in this database. postgres-exporter   Inserted Row Trend  meter_pg_inserted_rows_rate The trend of the number of rows inserted by queries in this database. postgres-exporter   Updated Row Trend  meter_pg_updated_rows_rate The trend of the number of rows updated by queries in this database. postgres-exporter   Deleted Row Trend  meter_pg_deleted_rows_rate The trend of the number of rows deleted by queries in this database. postgres-exporter   Returned Row Trend  meter_pg_returned_rows_rate The trend of the number of rows returned by queries in this database. postgres-exporter   Committed Transactions Trend  meter_pg_committed_transactions_rate The trend of the number of transactions in this database that have been committed postgres-exporter   Rolled Back Transactions Trend  meter_pg_rolled_back_transactions_rate The trend of the number of transactions in this database that have been rolled back postgres-exporter   Buffers Trend  meter_pg_buffers_alloc  meter_pg_buffers_checkpoint meter_pg_buffers_clean meter_pg_buffers_backend_fsync meter_pg_buffers_backend The trend of the number of buffers postgres-exporter   Conflicts Trend  meter_pg_conflicts_rate The trend of the number of queries canceled due to conflicts with recovery in this database postgres-exporter   Deadlock Trend  meter_pg_deadlocks_rate The trend of the number of deadlocks detected in this database postgres-exporter   Cache Hit Rate % meter_pg_cache_hit_rate The rate of cache hit postgres-exporter   Temporary Files Trend  meter_pg_temporary_files_rate The rate of total amount of data written to temporary files by queries in this database. All temporary files are counted, regardless of why the temporary file was created, and regardless of the log_temp_files setting postgres-exporter   Checkpoint Stat Trend  meter_pg_checkpoint_write_time_rate  meter_pg_checkpoint_sync_time_rate  meter_pg_checkpoint_req_rate meter_pg_checkpoint_timed_rate The trend of checkpoint stat postgres-exporter   Active Sessions  meter_pg_active_sessions The number of connections which state is active postgres-exporter   Idle Sessions  meter_pg_idle_sessions The number of connections which state is idle,idle in transaction or idle in transaction (aborted) postgres-exporter   Locks Count  meter_pg_locks_count Number of locks postgres-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/postgresql. The PostgreSQL dashboard panel configurations are found in /config/ui-initialized-templates/postgresql.\nCollect sampled slow SQLs SkyWalking leverages fluentbit or other log agents for collecting slow SQL statements from PostgreSQL.\nData flow  fluentbit agent collects slow sql logs from PostgreSQL. fluentbit agent sends data to SkyWalking OAP Server using native log APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Set up fluentbit. Config fluentbit Config PostgreSQL to enable slow log. Example.  Slow SQL Monitoring Slow SQL monitoring provides monitoring of the slow SQL statements of the PostgreSQL server. PostgreSQL Cluster is cataloged as a Layer: POSTGRESQL Service in OAP. Each PostgreSQL server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Slow Statements ms top_n_database_statement The latency and statement of PostgreSQL slow SQLs fluentbit    Customizations You can customize your own metrics/expression/dashboard panel. The slowsql expression rules are found in /config/lal/pgsql-slowsql.yaml The PostgreSQL dashboard panel configurations are found in /config/ui-initialized-templates/postgresql.\n","excerpt":"PostgreSQL monitoring PostgreSQL server performance from postgres-exporter SkyWalking leverages …","ref":"/docs/main/v9.5.0/en/setup/backend/backend-postgresql-monitoring/","title":"PostgreSQL monitoring"},{"body":"PostgreSQL monitoring PostgreSQL server performance from postgres-exporter SkyWalking leverages postgres-exporter for collecting metrics data from PostgreSQL. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  postgres-exporter collect metrics data from PostgreSQL. OpenTelemetry Collector fetches metrics from postgres-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up postgres-exporter. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  PostgreSQL Monitoring PostgreSQL cluster is cataloged as a Layer: PostgreSQL Service in OAP. Each PostgreSQL server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Shared Buffers MB meter_pg_shared_buffers The number of shared memory buffers used by the server postgres-exporter   Effective Cache GB meter_pg_effective_cache The planner\u0026rsquo;s assumption about the total size of the data caches postgres-exporter   Maintenance Work Mem MB meter_pg_maintenance_work_mem The maximum memory to be used for maintenance operations postgres-exporter   Seq Page Cost  meter_pg_seq_page_cost The planner\u0026rsquo;s estimate of the cost of a sequentially fetched disk page. postgres-exporter   Random Page Cost  meter_pg_random_page_cost The planner\u0026rsquo;s estimate of the cost of a nonsequentially fetched disk page. postgres-exporter   Max Worker Processes  meter_pg_max_worker_processes Maximum number of concurrent worker processes postgres-exporter   Max WAL Size GB meter_max_wal_size The WAL size that triggers a checkpoint postgres-exporter   Max Parallel Workers  meter_pg_max_parallel_workers The maximum number of parallel processes per executor node postgres-exporter   Work Mem MB meter_pg_max_work_mem The maximum memory to be used for query workspaces. postgres-exporter   Fetched Row Trend  meter_pg_fetched_rows_rate The trend of the number of rows fetched by queries in this database. postgres-exporter   Inserted Row Trend  meter_pg_inserted_rows_rate The trend of the number of rows inserted by queries in this database. postgres-exporter   Updated Row Trend  meter_pg_updated_rows_rate The trend of the number of rows updated by queries in this database. postgres-exporter   Deleted Row Trend  meter_pg_deleted_rows_rate The trend of the number of rows deleted by queries in this database. postgres-exporter   Returned Row Trend  meter_pg_returned_rows_rate The trend of the number of rows returned by queries in this database. postgres-exporter   Committed Transactions Trend  meter_pg_committed_transactions_rate The trend of the number of transactions in this database that have been committed postgres-exporter   Rolled Back Transactions Trend  meter_pg_rolled_back_transactions_rate The trend of the number of transactions in this database that have been rolled back postgres-exporter   Buffers Trend  meter_pg_buffers_alloc  meter_pg_buffers_checkpoint meter_pg_buffers_clean meter_pg_buffers_backend_fsync meter_pg_buffers_backend The trend of the number of buffers postgres-exporter   Conflicts Trend  meter_pg_conflicts_rate The trend of the number of queries canceled due to conflicts with recovery in this database postgres-exporter   Deadlock Trend  meter_pg_deadlocks_rate The trend of the number of deadlocks detected in this database postgres-exporter   Cache Hit Rate % meter_pg_cache_hit_rate The rate of cache hit postgres-exporter   Temporary Files Trend  meter_pg_temporary_files_rate The rate of total amount of data written to temporary files by queries in this database. All temporary files are counted, regardless of why the temporary file was created, and regardless of the log_temp_files setting postgres-exporter   Checkpoint Stat Trend  meter_pg_checkpoint_write_time_rate  meter_pg_checkpoint_sync_time_rate  meter_pg_checkpoint_req_rate meter_pg_checkpoint_timed_rate The trend of checkpoint stat postgres-exporter   Active Sessions  meter_pg_active_sessions The number of connections which state is active postgres-exporter   Idle Sessions  meter_pg_idle_sessions The number of connections which state is idle,idle in transaction or idle in transaction (aborted) postgres-exporter   Locks Count  meter_pg_locks_count Number of locks postgres-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/postgresql. The PostgreSQL dashboard panel configurations are found in /config/ui-initialized-templates/postgresql.\nCollect sampled slow SQLs SkyWalking leverages fluentbit or other log agents for collecting slow SQL statements from PostgreSQL.\nData flow  fluentbit agent collects slow sql logs from PostgreSQL. fluentbit agent sends data to SkyWalking OAP Server using native log APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Set up fluentbit. Config fluentbit Config PostgreSQL to enable slow log. Example.  Slow SQL Monitoring Slow SQL monitoring provides monitoring of the slow SQL statements of the PostgreSQL server. PostgreSQL Cluster is cataloged as a Layer: POSTGRESQL Service in OAP. Each PostgreSQL server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Slow Statements ms top_n_database_statement The latency and statement of PostgreSQL slow SQLs fluentbit    Customizations You can customize your own metrics/expression/dashboard panel. The slowsql expression rules are found in /config/lal/pgsql-slowsql.yaml The PostgreSQL dashboard panel configurations are found in /config/ui-initialized-templates/postgresql.\n","excerpt":"PostgreSQL monitoring PostgreSQL server performance from postgres-exporter SkyWalking leverages …","ref":"/docs/main/v9.6.0/en/setup/backend/backend-postgresql-monitoring/","title":"PostgreSQL monitoring"},{"body":"PostgreSQL monitoring PostgreSQL server performance from postgres-exporter SkyWalking leverages postgres-exporter for collecting metrics data from PostgreSQL. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  postgres-exporter collect metrics data from PostgreSQL. OpenTelemetry Collector fetches metrics from postgres-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up postgres-exporter. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  PostgreSQL Monitoring PostgreSQL cluster is cataloged as a Layer: PostgreSQL Service in OAP. Each PostgreSQL server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Shared Buffers MB meter_pg_shared_buffers The number of shared memory buffers used by the server postgres-exporter   Effective Cache GB meter_pg_effective_cache The planner\u0026rsquo;s assumption about the total size of the data caches postgres-exporter   Maintenance Work Mem MB meter_pg_maintenance_work_mem The maximum memory to be used for maintenance operations postgres-exporter   Seq Page Cost  meter_pg_seq_page_cost The planner\u0026rsquo;s estimate of the cost of a sequentially fetched disk page. postgres-exporter   Random Page Cost  meter_pg_random_page_cost The planner\u0026rsquo;s estimate of the cost of a nonsequentially fetched disk page. postgres-exporter   Max Worker Processes  meter_pg_max_worker_processes Maximum number of concurrent worker processes postgres-exporter   Max WAL Size GB meter_max_wal_size The WAL size that triggers a checkpoint postgres-exporter   Max Parallel Workers  meter_pg_max_parallel_workers The maximum number of parallel processes per executor node postgres-exporter   Work Mem MB meter_pg_max_work_mem The maximum memory to be used for query workspaces. postgres-exporter   Fetched Row Trend  meter_pg_fetched_rows_rate The trend of the number of rows fetched by queries in this database. postgres-exporter   Inserted Row Trend  meter_pg_inserted_rows_rate The trend of the number of rows inserted by queries in this database. postgres-exporter   Updated Row Trend  meter_pg_updated_rows_rate The trend of the number of rows updated by queries in this database. postgres-exporter   Deleted Row Trend  meter_pg_deleted_rows_rate The trend of the number of rows deleted by queries in this database. postgres-exporter   Returned Row Trend  meter_pg_returned_rows_rate The trend of the number of rows returned by queries in this database. postgres-exporter   Committed Transactions Trend  meter_pg_committed_transactions_rate The trend of the number of transactions in this database that have been committed postgres-exporter   Rolled Back Transactions Trend  meter_pg_rolled_back_transactions_rate The trend of the number of transactions in this database that have been rolled back postgres-exporter   Buffers Trend  meter_pg_buffers_alloc  meter_pg_buffers_checkpoint meter_pg_buffers_clean meter_pg_buffers_backend_fsync meter_pg_buffers_backend The trend of the number of buffers postgres-exporter   Conflicts Trend  meter_pg_conflicts_rate The trend of the number of queries canceled due to conflicts with recovery in this database postgres-exporter   Deadlock Trend  meter_pg_deadlocks_rate The trend of the number of deadlocks detected in this database postgres-exporter   Cache Hit Rate % meter_pg_cache_hit_rate The rate of cache hit postgres-exporter   Temporary Files Trend  meter_pg_temporary_files_rate The rate of total amount of data written to temporary files by queries in this database. All temporary files are counted, regardless of why the temporary file was created, and regardless of the log_temp_files setting postgres-exporter   Checkpoint Stat Trend  meter_pg_checkpoint_write_time_rate  meter_pg_checkpoint_sync_time_rate  meter_pg_checkpoint_req_rate meter_pg_checkpoint_timed_rate The trend of checkpoint stat postgres-exporter   Active Sessions  meter_pg_active_sessions The number of connections which state is active postgres-exporter   Idle Sessions  meter_pg_idle_sessions The number of connections which state is idle,idle in transaction or idle in transaction (aborted) postgres-exporter   Locks Count  meter_pg_locks_count Number of locks postgres-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/postgresql. The PostgreSQL dashboard panel configurations are found in /config/ui-initialized-templates/postgresql.\nCollect sampled slow SQLs SkyWalking leverages fluentbit or other log agents for collecting slow SQL statements from PostgreSQL.\nData flow  fluentbit agent collects slow sql logs from PostgreSQL. fluentbit agent sends data to SkyWalking OAP Server using native log APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Set up fluentbit. Config fluentbit Config PostgreSQL to enable slow log. Example.  Slow SQL Monitoring Slow SQL monitoring provides monitoring of the slow SQL statements of the PostgreSQL server. PostgreSQL Cluster is cataloged as a Layer: POSTGRESQL Service in OAP. Each PostgreSQL server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Slow Statements ms top_n_database_statement The latency and statement of PostgreSQL slow SQLs fluentbit    Customizations You can customize your own metrics/expression/dashboard panel. The slowsql expression rules are found in /config/lal/pgsql-slowsql.yaml The PostgreSQL dashboard panel configurations are found in /config/ui-initialized-templates/postgresql.\n","excerpt":"PostgreSQL monitoring PostgreSQL server performance from postgres-exporter SkyWalking leverages …","ref":"/docs/main/v9.7.0/en/setup/backend/backend-postgresql-monitoring/","title":"PostgreSQL monitoring"},{"body":"Probe Introduction In SkyWalking, probe means an agent or SDK library integrated into a target system that takes charge of collecting telemetry data, including tracing and metrics. Depending on the target system tech stack, there are very different ways how the probe performs such tasks. But ultimately, they all work towards the same goal — to collect and reformat data, and then to send them to the backend.\nOn a high level, there are four typical categories in all SkyWalking probes.\n  Language based native agent. These agents run in target service user spaces, such as a part of user codes. For example, the SkyWalking Java agent uses the -javaagent command line argument to manipulate codes in runtime, where manipulate means to change and inject user\u0026rsquo;s codes. Another example is SkyWalking agent, which leverage Golang compiling mechanism to weaves codes in the compiling time. For some static compilation languages, such as C++, manual library is the only choice. As you can see, these agents are based on languages and libraries, no matter we provide auto instrument or manual agents.\n  Service Mesh probes. Service Mesh probes collect data from sidecar, control plane in service mesh or proxy. In the old days, proxy is only used as an ingress of the whole cluster, but with the Service Mesh and sidecar, we can now perform observability functions.\n  3rd-party instrument library. SkyWalking accepts many widely used instrument libraries data formats. SkyWalking community is connected closely with Zipkin community, it could work as an alternative server for both v1 and v2 Zipkin traces. Also, OTEL trace format in gRPC is supported, and converted to Zipkin format inside SkyWalking. As an alternative Zipkin server, Zipkin lens UI could be used to visualize accepted traces when they are in Zipkin format. See Receiver for Zipkin traces and Receiver for OTEL traces for more information.\n  eBPF agent. The eBPF agent collects metrics and profiling the target service powered by the eBPF technology of Linux kernel.\n  You don\u0026rsquo;t have to install all probes to make SkyWalking up and running. There are several recommended ways on how to use these probes:\n Use Language based native agent only to build topology and metrics for your business application. Use 3rd-party instrument library only, like the Zipkin instrument ecosystem. Use Service Mesh probe if you prefer Service Mesh stack and don\u0026rsquo;t want to use native agents. Use Service Mesh probe with Language based native agent or 3rd-party instrument library in pure tracing status. (Advanced usage) Use eBPF agent only if you only want to profile on demand and/or activating automatic performance analysis. Use eBPF agent with Language based native agent collaboratively. Enhance the traces with the eBPF agent to collect extra information.  What is the meaning of in tracing status?\nBy default, Language based native agent and 3rd-party instrument library both send distributed traces to the backend, where analyses and aggregation on those traces are performed. In pure tracing status means that the backend considers these traces as something like logs. In other words, the backend saves them, but doesn\u0026rsquo;t run the metrics analysis from traces. As a result, there would not have data of service/instance/endpoint metrics and relationships.\nWhat is next?  Learn more about the probes supported by SkyWalking in Service auto instrument agent , Manual instrument SDK and Zipkin receiver. After understanding how the probe works, see the backend overview for more on analysis and persistence.  ","excerpt":"Probe Introduction In SkyWalking, probe means an agent or SDK library integrated into a target …","ref":"/docs/main/latest/en/concepts-and-designs/probe-introduction/","title":"Probe Introduction"},{"body":"Probe Introduction In SkyWalking, probe means an agent or SDK library integrated into a target system that takes charge of collecting telemetry data, including tracing and metrics. Depending on the target system tech stack, there are very different ways how the probe performs such tasks. But ultimately, they all work towards the same goal — to collect and reformat data, and then to send them to the backend.\nOn a high level, there are four typical categories in all SkyWalking probes.\n  Language based native agent. These agents run in target service user spaces, such as a part of user codes. For example, the SkyWalking Java agent uses the -javaagent command line argument to manipulate codes in runtime, where manipulate means to change and inject user\u0026rsquo;s codes. Another example is SkyWalking agent, which leverage Golang compiling mechanism to weaves codes in the compiling time. For some static compilation languages, such as C++, manual library is the only choice. As you can see, these agents are based on languages and libraries, no matter we provide auto instrument or manual agents.\n  Service Mesh probes. Service Mesh probes collect data from sidecar, control plane in service mesh or proxy. In the old days, proxy is only used as an ingress of the whole cluster, but with the Service Mesh and sidecar, we can now perform observability functions.\n  3rd-party instrument library. SkyWalking accepts many widely used instrument libraries data formats. SkyWalking community is connected closely with Zipkin community, it could work as an alternative server for both v1 and v2 Zipkin traces. Also, OTEL trace format in gRPC is supported, and converted to Zipkin format inside SkyWalking. As an alternative Zipkin server, Zipkin lens UI could be used to visualize accepted traces when they are in Zipkin format. See Receiver for Zipkin traces and Receiver for OTEL traces for more information.\n  eBPF agent. The eBPF agent collects metrics and profiling the target service powered by the eBPF technology of Linux kernel.\n  You don\u0026rsquo;t have to install all probes to make SkyWalking up and running. There are several recommended ways on how to use these probes:\n Use Language based native agent only to build topology and metrics for your business application. Use 3rd-party instrument library only, like the Zipkin instrument ecosystem. Use Service Mesh probe if you prefer Service Mesh stack and don\u0026rsquo;t want to use native agents. Use Service Mesh probe with Language based native agent or 3rd-party instrument library in pure tracing status. (Advanced usage) Use eBPF agent only if you only want to profile on demand and/or activating automatic performance analysis. Use eBPF agent with Language based native agent collaboratively. Enhance the traces with the eBPF agent to collect extra information.  What is the meaning of in tracing status?\nBy default, Language based native agent and 3rd-party instrument library both send distributed traces to the backend, where analyses and aggregation on those traces are performed. In pure tracing status means that the backend considers these traces as something like logs. In other words, the backend saves them, but doesn\u0026rsquo;t run the metrics analysis from traces. As a result, there would not have data of service/instance/endpoint metrics and relationships.\nWhat is next?  Learn more about the probes supported by SkyWalking in Service auto instrument agent , Manual instrument SDK and Zipkin receiver. After understanding how the probe works, see the backend overview for more on analysis and persistence.  ","excerpt":"Probe Introduction In SkyWalking, probe means an agent or SDK library integrated into a target …","ref":"/docs/main/next/en/concepts-and-designs/probe-introduction/","title":"Probe Introduction"},{"body":"Probe Introduction In SkyWalking, probe means an agent or SDK library integrated into a target system that takes charge of collecting telemetry data, including tracing and metrics. Depending on the target system tech stack, there are very different ways how the probe performs such tasks. But ultimately, they all work towards the same goal — to collect and reformat data, and then to send them to the backend.\nOn a high level, there are three typical categories in all SkyWalking probes.\n  Language based native agent. These agents run in target service user spaces, such as a part of user codes. For example, the SkyWalking Java agent uses the -javaagent command line argument to manipulate codes in runtime, where manipulate means to change and inject user\u0026rsquo;s codes. Another kind of agents uses certain hook or intercept mechanism provided by target libraries. As you can see, these agents are based on languages and libraries.\n  Service Mesh probes. Service Mesh probes collect data from sidecar, control plane in service mesh or proxy. In the old days, proxy is only used as an ingress of the whole cluster, but with the Service Mesh and sidecar, we can now perform observability functions.\n  3rd-party instrument library. SkyWalking accepts many widely used instrument libraries data formats. It analyzes the data, transfers it to SkyWalking\u0026rsquo;s formats of trace, metrics or both. This feature starts with accepting Zipkin span data. See Receiver for Zipkin traces for more information.\n  You don\u0026rsquo;t need to use Language based native agent and Service Mesh probe at the same time, since they both serve to collect metrics data. Otherwise, your system will suffer twice the payload, and the analytic numbers will be doubled.\nThere are several recommended ways on how to use these probes:\n Use Language based native agent only. Use 3rd-party instrument library only, like the Zipkin instrument ecosystem. Use Service Mesh probe only. Use Service Mesh probe with Language based native agent or 3rd-party instrument library in tracing status. (Advanced usage)  What is the meaning of in tracing status?\nBy default, Language based native agent and 3rd-party instrument library both send distributed traces to the backend, where analyses and aggregation on those traces are performed. In tracing status means that the backend considers these traces as something like logs. In other words, the backend saves them, and builds the links between traces and metrics, like which endpoint and service does the trace belong?.\nWhat is next?  Learn more about the probes supported by SkyWalking in Service auto instrument agent , Manual instrument SDK and Zipkin receiver. After understanding how the probe works, see the backend overview for more on analysis and persistence.  ","excerpt":"Probe Introduction In SkyWalking, probe means an agent or SDK library integrated into a target …","ref":"/docs/main/v9.0.0/en/concepts-and-designs/probe-introduction/","title":"Probe Introduction"},{"body":"Probe Introduction In SkyWalking, probe means an agent or SDK library integrated into a target system that takes charge of collecting telemetry data, including tracing and metrics. Depending on the target system tech stack, there are very different ways how the probe performs such tasks. But ultimately, they all work towards the same goal — to collect and reformat data, and then to send them to the backend.\nOn a high level, there are four typical categories in all SkyWalking probes.\n  Language based native agent. These agents run in target service user spaces, such as a part of user codes. For example, the SkyWalking Java agent uses the -javaagent command line argument to manipulate codes in runtime, where manipulate means to change and inject user\u0026rsquo;s codes. Another kind of agents uses certain hook or intercept mechanism provided by target libraries. As you can see, these agents are based on languages and libraries.\n  Service Mesh probes. Service Mesh probes collect data from sidecar, control plane in service mesh or proxy. In the old days, proxy is only used as an ingress of the whole cluster, but with the Service Mesh and sidecar, we can now perform observability functions.\n  3rd-party instrument library. SkyWalking accepts many widely used instrument libraries data formats. It analyzes the data, transfers it to SkyWalking\u0026rsquo;s formats of trace, metrics or both. This feature starts with accepting Zipkin span data. See Receiver for Zipkin traces for more information.\n  eBPF agent. The eBPF agent collects metrics and proifiling the target service powered by the eBPF technology of Linux kernel.\n  You don\u0026rsquo;t need to use Language based native agent and Service Mesh probe at the same time, since they both serve to collect metrics data. Otherwise, your system will suffer twice the payload, and the analytic numbers will be doubled.\nThere are several recommended ways on how to use these probes:\n Use Language based native agent only. Use 3rd-party instrument library only, like the Zipkin instrument ecosystem. Use Service Mesh probe only. Use Service Mesh probe with Language based native agent or 3rd-party instrument library in tracing status. (Advanced usage) Use eBPF agent only. Use eBPF agent with Language based native agent collaboratively.  What is the meaning of in tracing status?\nBy default, Language based native agent and 3rd-party instrument library both send distributed traces to the backend, where analyses and aggregation on those traces are performed. In tracing status means that the backend considers these traces as something like logs. In other words, the backend saves them, and builds the links between traces and metrics, like which endpoint and service does the trace belong?.\nWhat is next?  Learn more about the probes supported by SkyWalking in Service auto instrument agent , Manual instrument SDK and Zipkin receiver. After understanding how the probe works, see the backend overview for more on analysis and persistence.  ","excerpt":"Probe Introduction In SkyWalking, probe means an agent or SDK library integrated into a target …","ref":"/docs/main/v9.1.0/en/concepts-and-designs/probe-introduction/","title":"Probe Introduction"},{"body":"Probe Introduction In SkyWalking, probe means an agent or SDK library integrated into a target system that takes charge of collecting telemetry data, including tracing and metrics. Depending on the target system tech stack, there are very different ways how the probe performs such tasks. But ultimately, they all work towards the same goal — to collect and reformat data, and then to send them to the backend.\nOn a high level, there are four typical categories in all SkyWalking probes.\n  Language based native agent. These agents run in target service user spaces, such as a part of user codes. For example, the SkyWalking Java agent uses the -javaagent command line argument to manipulate codes in runtime, where manipulate means to change and inject user\u0026rsquo;s codes. Another kind of agents uses certain hook or intercept mechanism provided by target libraries. As you can see, these agents are based on languages and libraries.\n  Service Mesh probes. Service Mesh probes collect data from sidecar, control plane in service mesh or proxy. In the old days, proxy is only used as an ingress of the whole cluster, but with the Service Mesh and sidecar, we can now perform observability functions.\n  3rd-party instrument library. SkyWalking accepts many widely used instrument libraries data formats. It analyzes the data, transfers it to SkyWalking\u0026rsquo;s formats of trace, metrics or both. This feature starts with accepting Zipkin span data. See Receiver for Zipkin traces for more information.\n  eBPF agent. The eBPF agent collects metrics and profiling the target service powered by the eBPF technology of Linux kernel.\n  You don\u0026rsquo;t need to use Language based native agent and Service Mesh probe at the same time, since they both serve to collect metrics data. Otherwise, your system will suffer twice the payload, and the analytic numbers will be doubled.\nThere are several recommended ways on how to use these probes:\n Use Language based native agent only. Use 3rd-party instrument library only, like the Zipkin instrument ecosystem. Use Service Mesh probe only. Use Service Mesh probe with Language based native agent or 3rd-party instrument library in tracing status. (Advanced usage) Use eBPF agent only. Use eBPF agent with Language based native agent collaboratively.  What is the meaning of in tracing status?\nBy default, Language based native agent and 3rd-party instrument library both send distributed traces to the backend, where analyses and aggregation on those traces are performed. In tracing status means that the backend considers these traces as something like logs. In other words, the backend saves them, and builds the links between traces and metrics, like which endpoint and service does the trace belong?.\nWhat is next?  Learn more about the probes supported by SkyWalking in Service auto instrument agent , Manual instrument SDK and Zipkin receiver. After understanding how the probe works, see the backend overview for more on analysis and persistence.  ","excerpt":"Probe Introduction In SkyWalking, probe means an agent or SDK library integrated into a target …","ref":"/docs/main/v9.2.0/en/concepts-and-designs/probe-introduction/","title":"Probe Introduction"},{"body":"Probe Introduction In SkyWalking, probe means an agent or SDK library integrated into a target system that takes charge of collecting telemetry data, including tracing and metrics. Depending on the target system tech stack, there are very different ways how the probe performs such tasks. But ultimately, they all work towards the same goal — to collect and reformat data, and then to send them to the backend.\nOn a high level, there are four typical categories in all SkyWalking probes.\n  Language based native agent. These agents run in target service user spaces, such as a part of user codes. For example, the SkyWalking Java agent uses the -javaagent command line argument to manipulate codes in runtime, where manipulate means to change and inject user\u0026rsquo;s codes. Another kind of agents uses certain hook or intercept mechanism provided by target libraries. As you can see, these agents are based on languages and libraries.\n  Service Mesh probes. Service Mesh probes collect data from sidecar, control plane in service mesh or proxy. In the old days, proxy is only used as an ingress of the whole cluster, but with the Service Mesh and sidecar, we can now perform observability functions.\n  3rd-party instrument library. SkyWalking accepts many widely used instrument libraries data formats. It analyzes the data, transfers it to SkyWalking\u0026rsquo;s formats of trace, metrics or both. This feature starts with accepting Zipkin span data. See Receiver for Zipkin traces for more information.\n  eBPF agent. The eBPF agent collects metrics and profiling the target service powered by the eBPF technology of Linux kernel.\n  You don\u0026rsquo;t need to use Language based native agent and Service Mesh probe at the same time, since they both serve to collect metrics data. Otherwise, your system will suffer twice the payload, and the analytic numbers will be doubled.\nThere are several recommended ways on how to use these probes:\n Use Language based native agent only. Use 3rd-party instrument library only, like the Zipkin instrument ecosystem. Use Service Mesh probe only. Use Service Mesh probe with Language based native agent or 3rd-party instrument library in tracing status. (Advanced usage) Use eBPF agent only. Use eBPF agent with Language based native agent collaboratively.  What is the meaning of in tracing status?\nBy default, Language based native agent and 3rd-party instrument library both send distributed traces to the backend, where analyses and aggregation on those traces are performed. In tracing status means that the backend considers these traces as something like logs. In other words, the backend saves them, and builds the links between traces and metrics, like which endpoint and service does the trace belong?.\nWhat is next?  Learn more about the probes supported by SkyWalking in Service auto instrument agent , Manual instrument SDK and Zipkin receiver. After understanding how the probe works, see the backend overview for more on analysis and persistence.  ","excerpt":"Probe Introduction In SkyWalking, probe means an agent or SDK library integrated into a target …","ref":"/docs/main/v9.3.0/en/concepts-and-designs/probe-introduction/","title":"Probe Introduction"},{"body":"Probe Introduction In SkyWalking, probe means an agent or SDK library integrated into a target system that takes charge of collecting telemetry data, including tracing and metrics. Depending on the target system tech stack, there are very different ways how the probe performs such tasks. But ultimately, they all work towards the same goal — to collect and reformat data, and then to send them to the backend.\nOn a high level, there are four typical categories in all SkyWalking probes.\n  Language based native agent. These agents run in target service user spaces, such as a part of user codes. For example, the SkyWalking Java agent uses the -javaagent command line argument to manipulate codes in runtime, where manipulate means to change and inject user\u0026rsquo;s codes. Another kind of agents uses certain hook or intercept mechanism provided by target libraries. As you can see, these agents are based on languages and libraries.\n  Service Mesh probes. Service Mesh probes collect data from sidecar, control plane in service mesh or proxy. In the old days, proxy is only used as an ingress of the whole cluster, but with the Service Mesh and sidecar, we can now perform observability functions.\n  3rd-party instrument library. SkyWalking accepts many widely used instrument libraries data formats. It analyzes the data, transfers it to SkyWalking\u0026rsquo;s formats of trace, metrics or both. This feature starts with accepting Zipkin span data. See Receiver for Zipkin traces for more information.\n  eBPF agent. The eBPF agent collects metrics and profiling the target service powered by the eBPF technology of Linux kernel.\n  You don\u0026rsquo;t need to use Language based native agent and Service Mesh probe at the same time, since they both serve to collect metrics data. Otherwise, your system will suffer twice the payload, and the analytic numbers will be doubled.\nThere are several recommended ways on how to use these probes:\n Use Language based native agent only. Use 3rd-party instrument library only, like the Zipkin instrument ecosystem. Use Service Mesh probe only. Use Service Mesh probe with Language based native agent or 3rd-party instrument library in tracing status. (Advanced usage) Use eBPF agent only. Use eBPF agent with Language based native agent collaboratively.  What is the meaning of in tracing status?\nBy default, Language based native agent and 3rd-party instrument library both send distributed traces to the backend, where analyses and aggregation on those traces are performed. In tracing status means that the backend considers these traces as something like logs. In other words, the backend saves them, and builds the links between traces and metrics, like which endpoint and service does the trace belong?.\nWhat is next?  Learn more about the probes supported by SkyWalking in Service auto instrument agent , Manual instrument SDK and Zipkin receiver. After understanding how the probe works, see the backend overview for more on analysis and persistence.  ","excerpt":"Probe Introduction In SkyWalking, probe means an agent or SDK library integrated into a target …","ref":"/docs/main/v9.4.0/en/concepts-and-designs/probe-introduction/","title":"Probe Introduction"},{"body":"Probe Introduction In SkyWalking, probe means an agent or SDK library integrated into a target system that takes charge of collecting telemetry data, including tracing and metrics. Depending on the target system tech stack, there are very different ways how the probe performs such tasks. But ultimately, they all work towards the same goal — to collect and reformat data, and then to send them to the backend.\nOn a high level, there are four typical categories in all SkyWalking probes.\n  Language based native agent. These agents run in target service user spaces, such as a part of user codes. For example, the SkyWalking Java agent uses the -javaagent command line argument to manipulate codes in runtime, where manipulate means to change and inject user\u0026rsquo;s codes. Another kind of agents uses certain hook or intercept mechanism provided by target libraries. As you can see, these agents are based on languages and libraries.\n  Service Mesh probes. Service Mesh probes collect data from sidecar, control plane in service mesh or proxy. In the old days, proxy is only used as an ingress of the whole cluster, but with the Service Mesh and sidecar, we can now perform observability functions.\n  3rd-party instrument library. SkyWalking accepts many widely used instrument libraries data formats. It analyzes the data, transfers it to SkyWalking\u0026rsquo;s formats of trace, metrics or both. This feature starts with accepting Zipkin span data. See Receiver for Zipkin traces for more information.\n  eBPF agent. The eBPF agent collects metrics and profiling the target service powered by the eBPF technology of Linux kernel.\n  You don\u0026rsquo;t need to use Language based native agent and Service Mesh probe at the same time, since they both serve to collect metrics data. Otherwise, your system will suffer twice the payload, and the analytic numbers will be doubled.\nThere are several recommended ways on how to use these probes:\n Use Language based native agent only. Use 3rd-party instrument library only, like the Zipkin instrument ecosystem. Use Service Mesh probe only. Use Service Mesh probe with Language based native agent or 3rd-party instrument library in tracing status. (Advanced usage) Use eBPF agent only. Use eBPF agent with Language based native agent collaboratively.  What is the meaning of in tracing status?\nBy default, Language based native agent and 3rd-party instrument library both send distributed traces to the backend, where analyses and aggregation on those traces are performed. In tracing status means that the backend considers these traces as something like logs. In other words, the backend saves them, and builds the links between traces and metrics, like which endpoint and service does the trace belong?.\nWhat is next?  Learn more about the probes supported by SkyWalking in Service auto instrument agent , Manual instrument SDK and Zipkin receiver. After understanding how the probe works, see the backend overview for more on analysis and persistence.  ","excerpt":"Probe Introduction In SkyWalking, probe means an agent or SDK library integrated into a target …","ref":"/docs/main/v9.5.0/en/concepts-and-designs/probe-introduction/","title":"Probe Introduction"},{"body":"Probe Introduction In SkyWalking, probe means an agent or SDK library integrated into a target system that takes charge of collecting telemetry data, including tracing and metrics. Depending on the target system tech stack, there are very different ways how the probe performs such tasks. But ultimately, they all work towards the same goal — to collect and reformat data, and then to send them to the backend.\nOn a high level, there are four typical categories in all SkyWalking probes.\n  Language based native agent. These agents run in target service user spaces, such as a part of user codes. For example, the SkyWalking Java agent uses the -javaagent command line argument to manipulate codes in runtime, where manipulate means to change and inject user\u0026rsquo;s codes. Another kind of agents uses certain hook or intercept mechanism provided by target libraries. As you can see, these agents are based on languages and libraries.\n  Service Mesh probes. Service Mesh probes collect data from sidecar, control plane in service mesh or proxy. In the old days, proxy is only used as an ingress of the whole cluster, but with the Service Mesh and sidecar, we can now perform observability functions.\n  3rd-party instrument library. SkyWalking accepts many widely used instrument libraries data formats. It analyzes the data, transfers it to SkyWalking\u0026rsquo;s formats of trace, metrics or both. This feature starts with accepting Zipkin span data. See Receiver for Zipkin traces for more information.\n  eBPF agent. The eBPF agent collects metrics and profiling the target service powered by the eBPF technology of Linux kernel.\n  You don\u0026rsquo;t need to use Language based native agent and Service Mesh probe at the same time, since they both serve to collect metrics data. Otherwise, your system will suffer twice the payload, and the analytic numbers will be doubled.\nThere are several recommended ways on how to use these probes:\n Use Language based native agent only. Use 3rd-party instrument library only, like the Zipkin instrument ecosystem. Use Service Mesh probe only. Use Service Mesh probe with Language based native agent or 3rd-party instrument library in tracing status. (Advanced usage) Use eBPF agent only. Use eBPF agent with Language based native agent collaboratively.  What is the meaning of in tracing status?\nBy default, Language based native agent and 3rd-party instrument library both send distributed traces to the backend, where analyses and aggregation on those traces are performed. In tracing status means that the backend considers these traces as something like logs. In other words, the backend saves them, and builds the links between traces and metrics, like which endpoint and service does the trace belong?.\nWhat is next?  Learn more about the probes supported by SkyWalking in Service auto instrument agent , Manual instrument SDK and Zipkin receiver. After understanding how the probe works, see the backend overview for more on analysis and persistence.  ","excerpt":"Probe Introduction In SkyWalking, probe means an agent or SDK library integrated into a target …","ref":"/docs/main/v9.6.0/en/concepts-and-designs/probe-introduction/","title":"Probe Introduction"},{"body":"Probe Introduction In SkyWalking, probe means an agent or SDK library integrated into a target system that takes charge of collecting telemetry data, including tracing and metrics. Depending on the target system tech stack, there are very different ways how the probe performs such tasks. But ultimately, they all work towards the same goal — to collect and reformat data, and then to send them to the backend.\nOn a high level, there are four typical categories in all SkyWalking probes.\n  Language based native agent. These agents run in target service user spaces, such as a part of user codes. For example, the SkyWalking Java agent uses the -javaagent command line argument to manipulate codes in runtime, where manipulate means to change and inject user\u0026rsquo;s codes. Another example is SkyWalking agent, which leverage Golang compiling mechanism to weaves codes in the compiling time. For some static compilation languages, such as C++, manual library is the only choice. As you can see, these agents are based on languages and libraries, no matter we provide auto instrument or manual agents.\n  Service Mesh probes. Service Mesh probes collect data from sidecar, control plane in service mesh or proxy. In the old days, proxy is only used as an ingress of the whole cluster, but with the Service Mesh and sidecar, we can now perform observability functions.\n  3rd-party instrument library. SkyWalking accepts many widely used instrument libraries data formats. SkyWalking community is connected closely with Zipkin community, it could work as an alternative server for both v1 and v2 Zipkin traces. Also, OTEL trace format in gRPC is supported, and converted to Zipkin format inside SkyWalking. As an alternative Zipkin server, Zipkin lens UI could be used to visualize accepted traces when they are in Zipkin format. See Receiver for Zipkin traces and Receiver for OTEL traces for more information.\n  eBPF agent. The eBPF agent collects metrics and profiling the target service powered by the eBPF technology of Linux kernel.\n  You don\u0026rsquo;t have to install all probes to make SkyWalking up and running. There are several recommended ways on how to use these probes:\n Use Language based native agent only to build topology and metrics for your business application. Use 3rd-party instrument library only, like the Zipkin instrument ecosystem. Use Service Mesh probe if you prefer Service Mesh stack and don\u0026rsquo;t want to use native agents. Use Service Mesh probe with Language based native agent or 3rd-party instrument library in pure tracing status. (Advanced usage) Use eBPF agent only if you only want to profile on demand and/or activating automatic performance analysis. Use eBPF agent with Language based native agent collaboratively. Enhance the traces with the eBPF agent to collect extra information.  What is the meaning of in tracing status?\nBy default, Language based native agent and 3rd-party instrument library both send distributed traces to the backend, where analyses and aggregation on those traces are performed. In pure tracing status means that the backend considers these traces as something like logs. In other words, the backend saves them, but doesn\u0026rsquo;t run the metrics analysis from traces. As a result, there would not have data of service/instance/endpoint metrics and relationships.\nWhat is next?  Learn more about the probes supported by SkyWalking in Service auto instrument agent , Manual instrument SDK and Zipkin receiver. After understanding how the probe works, see the backend overview for more on analysis and persistence.  ","excerpt":"Probe Introduction In SkyWalking, probe means an agent or SDK library integrated into a target …","ref":"/docs/main/v9.7.0/en/concepts-and-designs/probe-introduction/","title":"Probe Introduction"},{"body":"Probe Protocols Probe protocols describe and define how agents send collected metrics, logs, traces, and events, as well as set out the format of each entity.\nTracing There are two types of protocols that help language agents work in distributed tracing.\n Cross Process Propagation Headers Protocol and Cross Process Correlation Headers Protocol come in in-wire data format. Agent/SDK usually uses HTTP/MQ/HTTP2 headers to carry the data with the RPC request. The remote agent will receive this in the request handler, and bind the context with this specific request.  Cross Process Propagation Headers Protocol v3 has been the new protocol for in-wire context propagation since the version 8.0.0 release.\nCross Process Correlation Headers Protocol v1 is a new in-wire context propagation protocol which is additional and optional. Please read SkyWalking language agents documentation to see whether it is supported.\n Trace Data Protocol is an out-of-wire data format. Agent/SDK uses this to send traces to SkyWalking OAP server.  SkyWalking Trace Data Protocol v3 defines the communication method and format between the agent and backend.\nLogging  Log Data Protocol is an out-of-wire data format. Agent/SDK and collector use this to send logs into SkyWalking OAP server. SkyWalking Log Data Protocol defines the communication method and format between the agent and backend.  Metrics SkyWalking has a native metrics format, and supports widely used metric formats, such as Prometheus, OpenCensus, and Zabbix.\nThe native metrics format definition could be found here. Typically, the agent meter plugin (e.g. Java Meter Plugin) and Satellite Prometheus fetcher would convert metrics into native format and forward them to SkyWalking OAP server.\nTo learn more about receiving 3rd party formats metrics, see Meter receiver and OpenTelemetry receiver.\nBrowser probe protocol The browser probe, such as skywalking-client-js, could use this protocol to send data to the backend. This service is provided by gRPC.\nSkyWalking Browser Protocol defines the communication method and format between skywalking-client-js and backend.\nEvents Report Protocol The protocol is used to report events to the backend. The doc introduces the definition of an event, and the protocol repository defines gRPC services and message formats of events.\nJSON format events can be reported via HTTP API. The endpoint is http://\u0026lt;oap-address\u0026gt;:12800/v3/events. Example of a JSON event record:\n[ { \u0026#34;uuid\u0026#34;: \u0026#34;f498b3c0-8bca-438d-a5b0-3701826ae21c\u0026#34;, \u0026#34;source\u0026#34;: { \u0026#34;service\u0026#34;: \u0026#34;SERVICE-A\u0026#34;, \u0026#34;instance\u0026#34;: \u0026#34;INSTANCE-1\u0026#34; }, \u0026#34;name\u0026#34;: \u0026#34;Reboot\u0026#34;, \u0026#34;type\u0026#34;: \u0026#34;Normal\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;App reboot.\u0026#34;, \u0026#34;parameters\u0026#34;: {}, \u0026#34;startTime\u0026#34;: 1628044330000, \u0026#34;endTime\u0026#34;: 1628044331000 } ] ","excerpt":"Probe Protocols Probe protocols describe and define how agents send collected metrics, logs, traces, …","ref":"/docs/main/v9.0.0/en/protocols/readme/","title":"Probe Protocols"},{"body":"Probe Protocols Probe protocols describe and define how agents send collected metrics, logs, traces, and events, as well as set out the format of each entity.\nTracing There are two types of protocols that help language agents work in distributed tracing.\n Cross Process Propagation Headers Protocol and Cross Process Correlation Headers Protocol come in in-wire data format. Agent/SDK usually uses HTTP/MQ/HTTP2 headers to carry the data with the RPC request. The remote agent will receive this in the request handler, and bind the context with this specific request.  Cross Process Propagation Headers Protocol v3 has been the new protocol for in-wire context propagation since the version 8.0.0 release.\nCross Process Correlation Headers Protocol v1 is a new in-wire context propagation protocol which is additional and optional. Please read SkyWalking language agents documentation to see whether it is supported.\n Trace Data Protocol is an out-of-wire data format. Agent/SDK uses this to send traces to SkyWalking OAP server.  SkyWalking Trace Data Protocol v3 defines the communication method and format between the agent and backend.\nLogging  Log Data Protocol is an out-of-wire data format. Agent/SDK and collector use this to send logs into SkyWalking OAP server. SkyWalking Log Data Protocol defines the communication method and format between the agent and backend.  Metrics SkyWalking has a native metrics format, and supports widely used metric formats, such as Prometheus, OpenCensus, and Zabbix.\nThe native metrics format definition could be found here. Typically, the agent meter plugin (e.g. Java Meter Plugin) and Satellite Prometheus fetcher would convert metrics into native format and forward them to SkyWalking OAP server.\nTo learn more about receiving 3rd party formats metrics, see Meter receiver and OpenTelemetry receiver.\nBrowser probe protocol The browser probe, such as skywalking-client-js, could use this protocol to send data to the backend. This service is provided by gRPC.\nSkyWalking Browser Protocol defines the communication method and format between skywalking-client-js and backend.\nEvents Report Protocol The protocol is used to report events to the backend. The doc introduces the definition of an event, and the protocol repository defines gRPC services and message formats of events.\nJSON format events can be reported via HTTP API. The endpoint is http://\u0026lt;oap-address\u0026gt;:12800/v3/events. Example of a JSON event record:\n[ { \u0026#34;uuid\u0026#34;: \u0026#34;f498b3c0-8bca-438d-a5b0-3701826ae21c\u0026#34;, \u0026#34;source\u0026#34;: { \u0026#34;service\u0026#34;: \u0026#34;SERVICE-A\u0026#34;, \u0026#34;instance\u0026#34;: \u0026#34;INSTANCE-1\u0026#34; }, \u0026#34;name\u0026#34;: \u0026#34;Reboot\u0026#34;, \u0026#34;type\u0026#34;: \u0026#34;Normal\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;App reboot.\u0026#34;, \u0026#34;parameters\u0026#34;: {}, \u0026#34;startTime\u0026#34;: 1628044330000, \u0026#34;endTime\u0026#34;: 1628044331000 } ] ","excerpt":"Probe Protocols Probe protocols describe and define how agents send collected metrics, logs, traces, …","ref":"/docs/main/v9.1.0/en/protocols/readme/","title":"Probe Protocols"},{"body":"Probe Protocols Probe protocols describe and define how agents send collected metrics, logs, traces, and events, as well as set out the format of each entity.\nTracing There are two types of protocols that help language agents work in distributed tracing.\n Cross Process Propagation Headers Protocol and Cross Process Correlation Headers Protocol come in in-wire data format. Agent/SDK usually uses HTTP/MQ/HTTP2 headers to carry the data with the RPC request. The remote agent will receive this in the request handler, and bind the context with this specific request.  Cross Process Propagation Headers Protocol v3 has been the new protocol for in-wire context propagation since the version 8.0.0 release.\nCross Process Correlation Headers Protocol v1 is a new in-wire context propagation protocol which is additional and optional. Please read SkyWalking language agents documentation to see whether it is supported.\n Trace Data Protocol is an out-of-wire data format. Agent/SDK uses this to send traces to SkyWalking OAP server.  SkyWalking Trace Data Protocol v3 defines the communication method and format between the agent and backend.\nLogging  Log Data Protocol is an out-of-wire data format. Agent/SDK and collector use this to send logs into SkyWalking OAP server. SkyWalking Log Data Protocol defines the communication method and format between the agent and backend.  Metrics SkyWalking has a native metrics format, and supports widely used metric formats, such as Prometheus, OpenCensus, OpenTelemetry, and Zabbix.\nThe native metrics format definition could be found here. Typically, the agent meter plugin (e.g. Java Meter Plugin) and Satellite Prometheus fetcher would convert metrics into native format and forward them to SkyWalking OAP server.\nTo learn more about receiving 3rd party formats metrics, see Meter receiver and OpenTelemetry receiver.\nBrowser probe protocol The browser probe, such as skywalking-client-js, could use this protocol to send data to the backend. This service is provided by gRPC.\nSkyWalking Browser Protocol defines the communication method and format between skywalking-client-js and backend.\nEvents Report Protocol The protocol is used to report events to the backend. The doc introduces the definition of an event, and the protocol repository defines gRPC services and message formats of events.\nJSON format events can be reported via HTTP API. The endpoint is http://\u0026lt;oap-address\u0026gt;:12800/v3/events. Example of a JSON event record:\n[ { \u0026#34;uuid\u0026#34;: \u0026#34;f498b3c0-8bca-438d-a5b0-3701826ae21c\u0026#34;, \u0026#34;source\u0026#34;: { \u0026#34;service\u0026#34;: \u0026#34;SERVICE-A\u0026#34;, \u0026#34;instance\u0026#34;: \u0026#34;INSTANCE-1\u0026#34; }, \u0026#34;name\u0026#34;: \u0026#34;Reboot\u0026#34;, \u0026#34;type\u0026#34;: \u0026#34;Normal\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;App reboot.\u0026#34;, \u0026#34;parameters\u0026#34;: {}, \u0026#34;startTime\u0026#34;: 1628044330000, \u0026#34;endTime\u0026#34;: 1628044331000 } ] ","excerpt":"Probe Protocols Probe protocols describe and define how agents send collected metrics, logs, traces, …","ref":"/docs/main/v9.2.0/en/protocols/readme/","title":"Probe Protocols"},{"body":"Probe Protocols Probe protocols describe and define how agents send collected metrics, logs, traces, and events, as well as set out the format of each entity.\nTracing There are two types of protocols that help language agents work in distributed tracing.\n Cross Process Propagation Headers Protocol and Cross Process Correlation Headers Protocol come in in-wire data format. Agent/SDK usually uses HTTP/MQ/HTTP2 headers to carry the data with the RPC request. The remote agent will receive this in the request handler, and bind the context with this specific request.  Cross Process Propagation Headers Protocol v3 has been the new protocol for in-wire context propagation since the version 8.0.0 release.\nCross Process Correlation Headers Protocol v1 is a new in-wire context propagation protocol which is additional and optional. Please read SkyWalking language agents documentation to see whether it is supported.\n Trace Data Protocol is an out-of-wire data format. Agent/SDK uses this to send traces to SkyWalking OAP server.  SkyWalking Trace Data Protocol v3.1 defines the communication method and format between the agent and backend.\nLogging  Log Data Protocol is an out-of-wire data format. Agent/SDK and collector use this to send logs into SkyWalking OAP server. SkyWalking Log Data Protocol defines the communication method and format between the agent and backend.  Metrics SkyWalking has a native metrics format, and supports widely used metric formats, such as Prometheus, OpenCensus, OpenTelemetry, and Zabbix.\nThe native metrics format definition could be found here. The agent meter plugin (e.g. Java Meter Plugin) uses the native metric format to report metrics.\nOpenTelemetry collector, Telegraf agents, Zabbix agents could use their native protocol(e.g. OTLP) and OAP server would convert metrics into native format and forward them to MAL engine.\nTo learn more about receiving 3rd party formats metrics, see Meter receiver and OpenTelemetry receiver.\nBrowser probe protocol The browser probe, such as skywalking-client-js, could use this protocol to send data to the backend. This service is provided by gRPC.\nSkyWalking Browser Protocol defines the communication method and format between skywalking-client-js and backend.\nEvents Report Protocol The protocol is used to report events to the backend. The doc introduces the definition of an event, and the protocol repository defines gRPC services and message formats of events.\nJSON format events can be reported via HTTP API. The endpoint is http://\u0026lt;oap-address\u0026gt;:12800/v3/events. Example of a JSON event record:\n[ { \u0026#34;uuid\u0026#34;: \u0026#34;f498b3c0-8bca-438d-a5b0-3701826ae21c\u0026#34;, \u0026#34;source\u0026#34;: { \u0026#34;service\u0026#34;: \u0026#34;SERVICE-A\u0026#34;, \u0026#34;instance\u0026#34;: \u0026#34;INSTANCE-1\u0026#34; }, \u0026#34;name\u0026#34;: \u0026#34;Reboot\u0026#34;, \u0026#34;type\u0026#34;: \u0026#34;Normal\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;App reboot.\u0026#34;, \u0026#34;parameters\u0026#34;: {}, \u0026#34;startTime\u0026#34;: 1628044330000, \u0026#34;endTime\u0026#34;: 1628044331000 } ] ","excerpt":"Probe Protocols Probe protocols describe and define how agents send collected metrics, logs, traces, …","ref":"/docs/main/v9.3.0/en/protocols/readme/","title":"Probe Protocols"},{"body":"Problem When you start your application with the skywalking agent, you may find this exception in your agent log which means that EnhanceRequireObjectCache cannot be casted to EnhanceRequireObjectCache. For example:\nERROR 2018-05-07 21:31:24 InstMethodsInter : class[class org.springframework.web.method.HandlerMethod] after method[getBean] intercept failure java.lang.ClassCastException: org.apache.skywalking.apm.plugin.spring.mvc.commons.EnhanceRequireObjectCache cannot be cast to org.apache.skywalking.apm.plugin.spring.mvc.commons.EnhanceRequireObjectCache at org.apache.skywalking.apm.plugin.spring.mvc.commons.interceptor.GetBeanInterceptor.afterMethod(GetBeanInterceptor.java:45) at org.apache.skywalking.apm.agent.core.plugin.interceptor.enhance.InstMethodsInter.intercept(InstMethodsInter.java:105) at org.springframework.web.method.HandlerMethod.getBean(HandlerMethod.java) at org.springframework.web.servlet.handler.AbstractHandlerMethodExceptionResolver.shouldApplyTo(AbstractHandlerMethodExceptionResolver.java:47) at org.springframework.web.servlet.handler.AbstractHandlerExceptionResolver.resolveException(AbstractHandlerExceptionResolver.java:131) at org.springframework.web.servlet.handler.HandlerExceptionResolverComposite.resolveException(HandlerExceptionResolverComposite.java:76) ... Reason This exception may be caused by hot deployment tools (spring-boot-devtool) or otherwise, which changes the classloader in runtime.\nResolution  This error does not occur under the production environment, since developer tools are automatically disabled: See spring-boot-devtools. If you would like to debug in your development environment as usual, you should temporarily remove such hot deployment package in your lib path.  ","excerpt":"Problem When you start your application with the skywalking agent, you may find this exception in …","ref":"/docs/main/latest/en/faq/enhancerequireobjectcache-cast-exception/","title":"Problem"},{"body":"Problem  When importing the SkyWalking project to Eclipse, the following errors may occur:   Software being installed: Checkstyle configuration plugin for M2Eclipse 1.0.0.201705301746 (com.basistech.m2e.code.quality.checkstyle.feature.feature.group 1.0.0.201705301746) Missing requirement: Checkstyle configuration plugin for M2Eclipse 1.0.0.201705301746 (com.basistech.m2e.code.quality.checkstyle.feature.feature.group 1.0.0.201705301746) requires \u0026lsquo;net.sf.eclipsecs.core 5.2.0\u0026rsquo; but it could not be found\n Reason The Eclipse Checkstyle Plug-in has not been installed.\nResolution Download the plug-in at the link here: https://sourceforge.net/projects/eclipse-cs/?source=typ_redirect Eclipse Checkstyle Plug-in version 8.7.0.201801131309 is required. Plug-in notification: The Eclipse Checkstyle plug-in integrates the Checkstyle Java code auditor into the Eclipse IDE. The plug-in provides real-time feedback to the user on rule violations, including checking against coding style and error-prone code constructs.\n","excerpt":"Problem  When importing the SkyWalking project to Eclipse, the following errors may occur: …","ref":"/docs/main/latest/en/faq/import-project-eclipse-requireitems-exception/","title":"Problem"},{"body":"Problem Tracing doesn\u0026rsquo;t work on the Kafka consumer end.\nReason The kafka client is responsible for pulling messages from the brokers, after which the data will be processed by user-defined codes. However, only the poll action can be traced by the plug-in and the subsequent data processing work inevitably goes beyond the scope of the trace context. Thus, in order to complete tracing on the client end, manual instrumentation is required, i.e. the poll action and the processing action should be wrapped manually.\nResolve For a native Kafka client, please use the Application Toolkit libraries to do the manual instrumentation, with the help of the @KafkaPollAndInvoke annotation in apm-toolkit-kafka or with OpenTracing API. If you\u0026rsquo;re using spring-kafka 1.3.x, 2.2.x or above, you can easily trace the consumer end without further configuration.\n","excerpt":"Problem Tracing doesn\u0026rsquo;t work on the Kafka consumer end.\nReason The kafka client is responsible …","ref":"/docs/main/latest/en/faq/kafka-plugin/","title":"Problem"},{"body":"Problem When using a thread pool, TraceSegment data in a thread cannot be reported and there are memory data that cannot be recycled (memory leaks).\nExample ThreadPoolTaskExecutor executor = new ThreadPoolTaskExecutor(); executor.setThreadFactory(r -\u0026gt; new Thread(RunnableWrapper.of(r))); Reason  Worker threads are enhanced when using the thread pool. Based on the design of the SkyWalking Java Agent, when tracing a cross thread, you must enhance the task thread.  Resolution   When using Thread Schedule Framework: See SkyWalking Thread Schedule Framework at SkyWalking Java agent supported list, such as Spring FrameWork @Async, which can implement tracing without any modification.\n  When using Custom Thread Pool: Enhance the task thread with the following code.\n  ExecutorService executorService = Executors.newFixedThreadPool(1); executorService.execute(RunnableWrapper.of(new Runnable() { @Override public void run() { //your code  } })); See across thread solution APIs for more use cases.\n","excerpt":"Problem When using a thread pool, TraceSegment data in a thread cannot be reported and there are …","ref":"/docs/main/latest/en/faq/memory-leak-enhance-worker-thread/","title":"Problem"},{"body":"Problem  In maven build, the following error may occur with the protoc-plugin:  [ERROR] Failed to execute goal org.xolstice.maven.plugins:protobuf-maven-plugin:0.5.0:compile-custom (default) on project apm-network: Unable to copy the file to \\skywalking\\apm-network\\target\\protoc-plugins: \\skywalking\\apm-network\\target\\protoc-plugins\\protoc-3.3.0-linux-x86_64.exe (The process cannot access the file because it is being used by another process) -\u0026gt; [Help 1] Reason  The Protobuf compiler is dependent on the glibc. However, glibc has not been installed, or there is an old version already installed in the system.  Resolution  Install or upgrade to the latest version of the glibc library. Under the container environment, the latest glibc version of the alpine system is recommended. Please refer to http://www.gnu.org/software/libc/documentation.html.  ","excerpt":"Problem  In maven build, the following error may occur with the protoc-plugin:  [ERROR] Failed to …","ref":"/docs/main/latest/en/faq/protoc-plugin-fails-when-build/","title":"Problem"},{"body":"Problem The message with Field ID, 8888, must be reserved.\nReason Because Thrift cannot carry metadata to transport Trace Header in the original API, we transport them by wrapping TProtocolFactory.\nThrift allows us to append any additional fields in the message even if the receiver doesn\u0026rsquo;t deal with them. Those data will be skipped and left unread. Based on this, the 8888th field of the message is used to store Trace Header (or metadata) and to transport them. That means the message with Field ID, 8888, must be reserved.\nResolution Avoid using the Field(ID is 8888) in your application.\n","excerpt":"Problem The message with Field ID, 8888, must be reserved.\nReason Because Thrift cannot carry …","ref":"/docs/main/latest/en/faq/thrift-plugin/","title":"Problem"},{"body":"Problem  There is no abnormal log in Agent log and Collector log. The traces can be seen, but no other information is available in UI.  Reason The operating system where the monitored system is located is not set as the current time zone, causing statistics collection time points to deviate.\nResolution Make sure the time is synchronized between collector servers and monitored application servers.\n","excerpt":"Problem  There is no abnormal log in Agent log and Collector log. The traces can be seen, but no …","ref":"/docs/main/latest/en/faq/why-have-traces-no-others/","title":"Problem"},{"body":"Problem When you start your application with the skywalking agent, you may find this exception in your agent log which means that EnhanceRequireObjectCache cannot be casted to EnhanceRequireObjectCache. For example:\nERROR 2018-05-07 21:31:24 InstMethodsInter : class[class org.springframework.web.method.HandlerMethod] after method[getBean] intercept failure java.lang.ClassCastException: org.apache.skywalking.apm.plugin.spring.mvc.commons.EnhanceRequireObjectCache cannot be cast to org.apache.skywalking.apm.plugin.spring.mvc.commons.EnhanceRequireObjectCache at org.apache.skywalking.apm.plugin.spring.mvc.commons.interceptor.GetBeanInterceptor.afterMethod(GetBeanInterceptor.java:45) at org.apache.skywalking.apm.agent.core.plugin.interceptor.enhance.InstMethodsInter.intercept(InstMethodsInter.java:105) at org.springframework.web.method.HandlerMethod.getBean(HandlerMethod.java) at org.springframework.web.servlet.handler.AbstractHandlerMethodExceptionResolver.shouldApplyTo(AbstractHandlerMethodExceptionResolver.java:47) at org.springframework.web.servlet.handler.AbstractHandlerExceptionResolver.resolveException(AbstractHandlerExceptionResolver.java:131) at org.springframework.web.servlet.handler.HandlerExceptionResolverComposite.resolveException(HandlerExceptionResolverComposite.java:76) ... Reason This exception may be caused by hot deployment tools (spring-boot-devtool) or otherwise, which changes the classloader in runtime.\nResolution  This error does not occur under the production environment, since developer tools are automatically disabled: See spring-boot-devtools. If you would like to debug in your development environment as usual, you should temporarily remove such hot deployment package in your lib path.  ","excerpt":"Problem When you start your application with the skywalking agent, you may find this exception in …","ref":"/docs/main/next/en/faq/enhancerequireobjectcache-cast-exception/","title":"Problem"},{"body":"Problem  When importing the SkyWalking project to Eclipse, the following errors may occur:   Software being installed: Checkstyle configuration plugin for M2Eclipse 1.0.0.201705301746 (com.basistech.m2e.code.quality.checkstyle.feature.feature.group 1.0.0.201705301746) Missing requirement: Checkstyle configuration plugin for M2Eclipse 1.0.0.201705301746 (com.basistech.m2e.code.quality.checkstyle.feature.feature.group 1.0.0.201705301746) requires \u0026lsquo;net.sf.eclipsecs.core 5.2.0\u0026rsquo; but it could not be found\n Reason The Eclipse Checkstyle Plug-in has not been installed.\nResolution Download the plug-in at the link here: https://sourceforge.net/projects/eclipse-cs/?source=typ_redirect Eclipse Checkstyle Plug-in version 8.7.0.201801131309 is required. Plug-in notification: The Eclipse Checkstyle plug-in integrates the Checkstyle Java code auditor into the Eclipse IDE. The plug-in provides real-time feedback to the user on rule violations, including checking against coding style and error-prone code constructs.\n","excerpt":"Problem  When importing the SkyWalking project to Eclipse, the following errors may occur: …","ref":"/docs/main/next/en/faq/import-project-eclipse-requireitems-exception/","title":"Problem"},{"body":"Problem Tracing doesn\u0026rsquo;t work on the Kafka consumer end.\nReason The kafka client is responsible for pulling messages from the brokers, after which the data will be processed by user-defined codes. However, only the poll action can be traced by the plug-in and the subsequent data processing work inevitably goes beyond the scope of the trace context. Thus, in order to complete tracing on the client end, manual instrumentation is required, i.e. the poll action and the processing action should be wrapped manually.\nResolve For a native Kafka client, please use the Application Toolkit libraries to do the manual instrumentation, with the help of the @KafkaPollAndInvoke annotation in apm-toolkit-kafka or with OpenTracing API. If you\u0026rsquo;re using spring-kafka 1.3.x, 2.2.x or above, you can easily trace the consumer end without further configuration.\n","excerpt":"Problem Tracing doesn\u0026rsquo;t work on the Kafka consumer end.\nReason The kafka client is responsible …","ref":"/docs/main/next/en/faq/kafka-plugin/","title":"Problem"},{"body":"Problem When using a thread pool, TraceSegment data in a thread cannot be reported and there are memory data that cannot be recycled (memory leaks).\nExample ThreadPoolTaskExecutor executor = new ThreadPoolTaskExecutor(); executor.setThreadFactory(r -\u0026gt; new Thread(RunnableWrapper.of(r))); Reason  Worker threads are enhanced when using the thread pool. Based on the design of the SkyWalking Java Agent, when tracing a cross thread, you must enhance the task thread.  Resolution   When using Thread Schedule Framework: See SkyWalking Thread Schedule Framework at SkyWalking Java agent supported list, such as Spring FrameWork @Async, which can implement tracing without any modification.\n  When using Custom Thread Pool: Enhance the task thread with the following code.\n  ExecutorService executorService = Executors.newFixedThreadPool(1); executorService.execute(RunnableWrapper.of(new Runnable() { @Override public void run() { //your code  } })); See across thread solution APIs for more use cases.\n","excerpt":"Problem When using a thread pool, TraceSegment data in a thread cannot be reported and there are …","ref":"/docs/main/next/en/faq/memory-leak-enhance-worker-thread/","title":"Problem"},{"body":"Problem  In maven build, the following error may occur with the protoc-plugin:  [ERROR] Failed to execute goal org.xolstice.maven.plugins:protobuf-maven-plugin:0.5.0:compile-custom (default) on project apm-network: Unable to copy the file to \\skywalking\\apm-network\\target\\protoc-plugins: \\skywalking\\apm-network\\target\\protoc-plugins\\protoc-3.3.0-linux-x86_64.exe (The process cannot access the file because it is being used by another process) -\u0026gt; [Help 1] Reason  The Protobuf compiler is dependent on the glibc. However, glibc has not been installed, or there is an old version already installed in the system.  Resolution  Install or upgrade to the latest version of the glibc library. Under the container environment, the latest glibc version of the alpine system is recommended. Please refer to http://www.gnu.org/software/libc/documentation.html.  ","excerpt":"Problem  In maven build, the following error may occur with the protoc-plugin:  [ERROR] Failed to …","ref":"/docs/main/next/en/faq/protoc-plugin-fails-when-build/","title":"Problem"},{"body":"Problem The message with Field ID, 8888, must be reserved.\nReason Because Thrift cannot carry metadata to transport Trace Header in the original API, we transport them by wrapping TProtocolFactory.\nThrift allows us to append any additional fields in the message even if the receiver doesn\u0026rsquo;t deal with them. Those data will be skipped and left unread. Based on this, the 8888th field of the message is used to store Trace Header (or metadata) and to transport them. That means the message with Field ID, 8888, must be reserved.\nResolution Avoid using the Field(ID is 8888) in your application.\n","excerpt":"Problem The message with Field ID, 8888, must be reserved.\nReason Because Thrift cannot carry …","ref":"/docs/main/next/en/faq/thrift-plugin/","title":"Problem"},{"body":"Problem  There is no abnormal log in Agent log and Collector log. The traces can be seen, but no other information is available in UI.  Reason The operating system where the monitored system is located is not set as the current time zone, causing statistics collection time points to deviate.\nResolution Make sure the time is synchronized between collector servers and monitored application servers.\n","excerpt":"Problem  There is no abnormal log in Agent log and Collector log. The traces can be seen, but no …","ref":"/docs/main/next/en/faq/why-have-traces-no-others/","title":"Problem"},{"body":"Problem When you start your application with the skywalking agent, you may find this exception in your agent log which means that EnhanceRequireObjectCache cannot be casted to EnhanceRequireObjectCache. For example:\nERROR 2018-05-07 21:31:24 InstMethodsInter : class[class org.springframework.web.method.HandlerMethod] after method[getBean] intercept failure java.lang.ClassCastException: org.apache.skywalking.apm.plugin.spring.mvc.commons.EnhanceRequireObjectCache cannot be cast to org.apache.skywalking.apm.plugin.spring.mvc.commons.EnhanceRequireObjectCache at org.apache.skywalking.apm.plugin.spring.mvc.commons.interceptor.GetBeanInterceptor.afterMethod(GetBeanInterceptor.java:45) at org.apache.skywalking.apm.agent.core.plugin.interceptor.enhance.InstMethodsInter.intercept(InstMethodsInter.java:105) at org.springframework.web.method.HandlerMethod.getBean(HandlerMethod.java) at org.springframework.web.servlet.handler.AbstractHandlerMethodExceptionResolver.shouldApplyTo(AbstractHandlerMethodExceptionResolver.java:47) at org.springframework.web.servlet.handler.AbstractHandlerExceptionResolver.resolveException(AbstractHandlerExceptionResolver.java:131) at org.springframework.web.servlet.handler.HandlerExceptionResolverComposite.resolveException(HandlerExceptionResolverComposite.java:76) ... Reason This exception may be caused by hot deployment tools (spring-boot-devtool) or otherwise, which changes the classloader in runtime.\nResolution  This error does not occur under the production environment, since developer tools are automatically disabled: See spring-boot-devtools. If you would like to debug in your development environment as usual, you should temporarily remove such hot deployment package in your lib path.  ","excerpt":"Problem When you start your application with the skywalking agent, you may find this exception in …","ref":"/docs/main/v9.0.0/en/faq/enhancerequireobjectcache-cast-exception/","title":"Problem"},{"body":"Problem  When importing the SkyWalking project to Eclipse, the following errors may occur:   Software being installed: Checkstyle configuration plugin for M2Eclipse 1.0.0.201705301746 (com.basistech.m2e.code.quality.checkstyle.feature.feature.group 1.0.0.201705301746) Missing requirement: Checkstyle configuration plugin for M2Eclipse 1.0.0.201705301746 (com.basistech.m2e.code.quality.checkstyle.feature.feature.group 1.0.0.201705301746) requires \u0026lsquo;net.sf.eclipsecs.core 5.2.0\u0026rsquo; but it could not be found\n Reason The Eclipse Checkstyle Plug-in has not been installed.\nResolution Download the plug-in at the link here: https://sourceforge.net/projects/eclipse-cs/?source=typ_redirect Eclipse Checkstyle Plug-in version 8.7.0.201801131309 is required. Plug-in notification: The Eclipse Checkstyle plug-in integrates the Checkstyle Java code auditor into the Eclipse IDE. The plug-in provides real-time feedback to the user on rule violations, including checking against coding style and error-prone code constructs.\n","excerpt":"Problem  When importing the SkyWalking project to Eclipse, the following errors may occur: …","ref":"/docs/main/v9.0.0/en/faq/import-project-eclipse-requireitems-exception/","title":"Problem"},{"body":"Problem Tracing doesn\u0026rsquo;t work on the Kafka consumer end.\nReason The kafka client is responsible for pulling messages from the brokers, after which the data will be processed by user-defined codes. However, only the poll action can be traced by the plug-in and the subsequent data processing work inevitably goes beyond the scope of the trace context. Thus, in order to complete tracing on the client end, manual instrumentation is required, i.e. the poll action and the processing action should be wrapped manually.\nResolve For a native Kafka client, please use the Application Toolkit libraries to do the manual instrumentation, with the help of the @KafkaPollAndInvoke annotation in apm-toolkit-kafka or with OpenTracing API. If you\u0026rsquo;re using spring-kafka 1.3.x, 2.2.x or above, you can easily trace the consumer end without further configuration.\n","excerpt":"Problem Tracing doesn\u0026rsquo;t work on the Kafka consumer end.\nReason The kafka client is responsible …","ref":"/docs/main/v9.0.0/en/faq/kafka-plugin/","title":"Problem"},{"body":"Problem When using a thread pool, TraceSegment data in a thread cannot be reported and there are memory data that cannot be recycled (memory leaks).\nExample ThreadPoolTaskExecutor executor = new ThreadPoolTaskExecutor(); executor.setThreadFactory(r -\u0026gt; new Thread(RunnableWrapper.of(r))); Reason  Worker threads are enhanced when using the thread pool. Based on the design of the SkyWalking Java Agent, when tracing a cross thread, you must enhance the task thread.  Resolution   When using Thread Schedule Framework: See SkyWalking Thread Schedule Framework at SkyWalking Java agent supported list, such as Spring FrameWork @Async, which can implement tracing without any modification.\n  When using Custom Thread Pool: Enhance the task thread with the following code.\n  ExecutorService executorService = Executors.newFixedThreadPool(1); executorService.execute(RunnableWrapper.of(new Runnable() { @Override public void run() { //your code  } })); See across thread solution APIs for more use cases.\n","excerpt":"Problem When using a thread pool, TraceSegment data in a thread cannot be reported and there are …","ref":"/docs/main/v9.0.0/en/faq/memory-leak-enhance-worker-thread/","title":"Problem"},{"body":"Problem  In maven build, the following error may occur with the protoc-plugin:  [ERROR] Failed to execute goal org.xolstice.maven.plugins:protobuf-maven-plugin:0.5.0:compile-custom (default) on project apm-network: Unable to copy the file to \\skywalking\\apm-network\\target\\protoc-plugins: \\skywalking\\apm-network\\target\\protoc-plugins\\protoc-3.3.0-linux-x86_64.exe (The process cannot access the file because it is being used by another process) -\u0026gt; [Help 1] Reason  The Protobuf compiler is dependent on the glibc. However, glibc has not been installed, or there is an old version already installed in the system.  Resolution  Install or upgrade to the latest version of the glibc library. Under the container environment, the latest glibc version of the alpine system is recommended. Please refer to http://www.gnu.org/software/libc/documentation.html.  ","excerpt":"Problem  In maven build, the following error may occur with the protoc-plugin:  [ERROR] Failed to …","ref":"/docs/main/v9.0.0/en/faq/protoc-plugin-fails-when-build/","title":"Problem"},{"body":"Problem The message with Field ID, 8888, must be reserved.\nReason Because Thrift cannot carry metadata to transport Trace Header in the original API, we transport them by wrapping TProtocolFactory.\nThrift allows us to append any additional fields in the message even if the receiver doesn\u0026rsquo;t deal with them. Those data will be skipped and left unread. Based on this, the 8888th field of the message is used to store Trace Header (or metadata) and to transport them. That means the message with Field ID, 8888, must be reserved.\nResolution Avoid using the Field(ID is 8888) in your application.\n","excerpt":"Problem The message with Field ID, 8888, must be reserved.\nReason Because Thrift cannot carry …","ref":"/docs/main/v9.0.0/en/faq/thrift-plugin/","title":"Problem"},{"body":"Problem  There is no abnormal log in Agent log and Collector log. The traces can be seen, but no other information is available in UI.  Reason The operating system where the monitored system is located is not set as the current time zone, causing statistics collection time points to deviate.\nResolution Make sure the time is synchronized between collector servers and monitored application servers.\n","excerpt":"Problem  There is no abnormal log in Agent log and Collector log. The traces can be seen, but no …","ref":"/docs/main/v9.0.0/en/faq/why-have-traces-no-others/","title":"Problem"},{"body":"Problem When you start your application with the skywalking agent, you may find this exception in your agent log which means that EnhanceRequireObjectCache cannot be casted to EnhanceRequireObjectCache. For example:\nERROR 2018-05-07 21:31:24 InstMethodsInter : class[class org.springframework.web.method.HandlerMethod] after method[getBean] intercept failure java.lang.ClassCastException: org.apache.skywalking.apm.plugin.spring.mvc.commons.EnhanceRequireObjectCache cannot be cast to org.apache.skywalking.apm.plugin.spring.mvc.commons.EnhanceRequireObjectCache at org.apache.skywalking.apm.plugin.spring.mvc.commons.interceptor.GetBeanInterceptor.afterMethod(GetBeanInterceptor.java:45) at org.apache.skywalking.apm.agent.core.plugin.interceptor.enhance.InstMethodsInter.intercept(InstMethodsInter.java:105) at org.springframework.web.method.HandlerMethod.getBean(HandlerMethod.java) at org.springframework.web.servlet.handler.AbstractHandlerMethodExceptionResolver.shouldApplyTo(AbstractHandlerMethodExceptionResolver.java:47) at org.springframework.web.servlet.handler.AbstractHandlerExceptionResolver.resolveException(AbstractHandlerExceptionResolver.java:131) at org.springframework.web.servlet.handler.HandlerExceptionResolverComposite.resolveException(HandlerExceptionResolverComposite.java:76) ... Reason This exception may be caused by hot deployment tools (spring-boot-devtool) or otherwise, which changes the classloader in runtime.\nResolution  This error does not occur under the production environment, since developer tools are automatically disabled: See spring-boot-devtools. If you would like to debug in your development environment as usual, you should temporarily remove such hot deployment package in your lib path.  ","excerpt":"Problem When you start your application with the skywalking agent, you may find this exception in …","ref":"/docs/main/v9.1.0/en/faq/enhancerequireobjectcache-cast-exception/","title":"Problem"},{"body":"Problem  When importing the SkyWalking project to Eclipse, the following errors may occur:   Software being installed: Checkstyle configuration plugin for M2Eclipse 1.0.0.201705301746 (com.basistech.m2e.code.quality.checkstyle.feature.feature.group 1.0.0.201705301746) Missing requirement: Checkstyle configuration plugin for M2Eclipse 1.0.0.201705301746 (com.basistech.m2e.code.quality.checkstyle.feature.feature.group 1.0.0.201705301746) requires \u0026lsquo;net.sf.eclipsecs.core 5.2.0\u0026rsquo; but it could not be found\n Reason The Eclipse Checkstyle Plug-in has not been installed.\nResolution Download the plug-in at the link here: https://sourceforge.net/projects/eclipse-cs/?source=typ_redirect Eclipse Checkstyle Plug-in version 8.7.0.201801131309 is required. Plug-in notification: The Eclipse Checkstyle plug-in integrates the Checkstyle Java code auditor into the Eclipse IDE. The plug-in provides real-time feedback to the user on rule violations, including checking against coding style and error-prone code constructs.\n","excerpt":"Problem  When importing the SkyWalking project to Eclipse, the following errors may occur: …","ref":"/docs/main/v9.1.0/en/faq/import-project-eclipse-requireitems-exception/","title":"Problem"},{"body":"Problem Tracing doesn\u0026rsquo;t work on the Kafka consumer end.\nReason The kafka client is responsible for pulling messages from the brokers, after which the data will be processed by user-defined codes. However, only the poll action can be traced by the plug-in and the subsequent data processing work inevitably goes beyond the scope of the trace context. Thus, in order to complete tracing on the client end, manual instrumentation is required, i.e. the poll action and the processing action should be wrapped manually.\nResolve For a native Kafka client, please use the Application Toolkit libraries to do the manual instrumentation, with the help of the @KafkaPollAndInvoke annotation in apm-toolkit-kafka or with OpenTracing API. If you\u0026rsquo;re using spring-kafka 1.3.x, 2.2.x or above, you can easily trace the consumer end without further configuration.\n","excerpt":"Problem Tracing doesn\u0026rsquo;t work on the Kafka consumer end.\nReason The kafka client is responsible …","ref":"/docs/main/v9.1.0/en/faq/kafka-plugin/","title":"Problem"},{"body":"Problem When using a thread pool, TraceSegment data in a thread cannot be reported and there are memory data that cannot be recycled (memory leaks).\nExample ThreadPoolTaskExecutor executor = new ThreadPoolTaskExecutor(); executor.setThreadFactory(r -\u0026gt; new Thread(RunnableWrapper.of(r))); Reason  Worker threads are enhanced when using the thread pool. Based on the design of the SkyWalking Java Agent, when tracing a cross thread, you must enhance the task thread.  Resolution   When using Thread Schedule Framework: See SkyWalking Thread Schedule Framework at SkyWalking Java agent supported list, such as Spring FrameWork @Async, which can implement tracing without any modification.\n  When using Custom Thread Pool: Enhance the task thread with the following code.\n  ExecutorService executorService = Executors.newFixedThreadPool(1); executorService.execute(RunnableWrapper.of(new Runnable() { @Override public void run() { //your code  } })); See across thread solution APIs for more use cases.\n","excerpt":"Problem When using a thread pool, TraceSegment data in a thread cannot be reported and there are …","ref":"/docs/main/v9.1.0/en/faq/memory-leak-enhance-worker-thread/","title":"Problem"},{"body":"Problem  In maven build, the following error may occur with the protoc-plugin:  [ERROR] Failed to execute goal org.xolstice.maven.plugins:protobuf-maven-plugin:0.5.0:compile-custom (default) on project apm-network: Unable to copy the file to \\skywalking\\apm-network\\target\\protoc-plugins: \\skywalking\\apm-network\\target\\protoc-plugins\\protoc-3.3.0-linux-x86_64.exe (The process cannot access the file because it is being used by another process) -\u0026gt; [Help 1] Reason  The Protobuf compiler is dependent on the glibc. However, glibc has not been installed, or there is an old version already installed in the system.  Resolution  Install or upgrade to the latest version of the glibc library. Under the container environment, the latest glibc version of the alpine system is recommended. Please refer to http://www.gnu.org/software/libc/documentation.html.  ","excerpt":"Problem  In maven build, the following error may occur with the protoc-plugin:  [ERROR] Failed to …","ref":"/docs/main/v9.1.0/en/faq/protoc-plugin-fails-when-build/","title":"Problem"},{"body":"Problem The message with Field ID, 8888, must be reserved.\nReason Because Thrift cannot carry metadata to transport Trace Header in the original API, we transport them by wrapping TProtocolFactory.\nThrift allows us to append any additional fields in the message even if the receiver doesn\u0026rsquo;t deal with them. Those data will be skipped and left unread. Based on this, the 8888th field of the message is used to store Trace Header (or metadata) and to transport them. That means the message with Field ID, 8888, must be reserved.\nResolution Avoid using the Field(ID is 8888) in your application.\n","excerpt":"Problem The message with Field ID, 8888, must be reserved.\nReason Because Thrift cannot carry …","ref":"/docs/main/v9.1.0/en/faq/thrift-plugin/","title":"Problem"},{"body":"Problem  There is no abnormal log in Agent log and Collector log. The traces can be seen, but no other information is available in UI.  Reason The operating system where the monitored system is located is not set as the current time zone, causing statistics collection time points to deviate.\nResolution Make sure the time is synchronized between collector servers and monitored application servers.\n","excerpt":"Problem  There is no abnormal log in Agent log and Collector log. The traces can be seen, but no …","ref":"/docs/main/v9.1.0/en/faq/why-have-traces-no-others/","title":"Problem"},{"body":"Problem When you start your application with the skywalking agent, you may find this exception in your agent log which means that EnhanceRequireObjectCache cannot be casted to EnhanceRequireObjectCache. For example:\nERROR 2018-05-07 21:31:24 InstMethodsInter : class[class org.springframework.web.method.HandlerMethod] after method[getBean] intercept failure java.lang.ClassCastException: org.apache.skywalking.apm.plugin.spring.mvc.commons.EnhanceRequireObjectCache cannot be cast to org.apache.skywalking.apm.plugin.spring.mvc.commons.EnhanceRequireObjectCache at org.apache.skywalking.apm.plugin.spring.mvc.commons.interceptor.GetBeanInterceptor.afterMethod(GetBeanInterceptor.java:45) at org.apache.skywalking.apm.agent.core.plugin.interceptor.enhance.InstMethodsInter.intercept(InstMethodsInter.java:105) at org.springframework.web.method.HandlerMethod.getBean(HandlerMethod.java) at org.springframework.web.servlet.handler.AbstractHandlerMethodExceptionResolver.shouldApplyTo(AbstractHandlerMethodExceptionResolver.java:47) at org.springframework.web.servlet.handler.AbstractHandlerExceptionResolver.resolveException(AbstractHandlerExceptionResolver.java:131) at org.springframework.web.servlet.handler.HandlerExceptionResolverComposite.resolveException(HandlerExceptionResolverComposite.java:76) ... Reason This exception may be caused by hot deployment tools (spring-boot-devtool) or otherwise, which changes the classloader in runtime.\nResolution  This error does not occur under the production environment, since developer tools are automatically disabled: See spring-boot-devtools. If you would like to debug in your development environment as usual, you should temporarily remove such hot deployment package in your lib path.  ","excerpt":"Problem When you start your application with the skywalking agent, you may find this exception in …","ref":"/docs/main/v9.2.0/en/faq/enhancerequireobjectcache-cast-exception/","title":"Problem"},{"body":"Problem  When importing the SkyWalking project to Eclipse, the following errors may occur:   Software being installed: Checkstyle configuration plugin for M2Eclipse 1.0.0.201705301746 (com.basistech.m2e.code.quality.checkstyle.feature.feature.group 1.0.0.201705301746) Missing requirement: Checkstyle configuration plugin for M2Eclipse 1.0.0.201705301746 (com.basistech.m2e.code.quality.checkstyle.feature.feature.group 1.0.0.201705301746) requires \u0026lsquo;net.sf.eclipsecs.core 5.2.0\u0026rsquo; but it could not be found\n Reason The Eclipse Checkstyle Plug-in has not been installed.\nResolution Download the plug-in at the link here: https://sourceforge.net/projects/eclipse-cs/?source=typ_redirect Eclipse Checkstyle Plug-in version 8.7.0.201801131309 is required. Plug-in notification: The Eclipse Checkstyle plug-in integrates the Checkstyle Java code auditor into the Eclipse IDE. The plug-in provides real-time feedback to the user on rule violations, including checking against coding style and error-prone code constructs.\n","excerpt":"Problem  When importing the SkyWalking project to Eclipse, the following errors may occur: …","ref":"/docs/main/v9.2.0/en/faq/import-project-eclipse-requireitems-exception/","title":"Problem"},{"body":"Problem Tracing doesn\u0026rsquo;t work on the Kafka consumer end.\nReason The kafka client is responsible for pulling messages from the brokers, after which the data will be processed by user-defined codes. However, only the poll action can be traced by the plug-in and the subsequent data processing work inevitably goes beyond the scope of the trace context. Thus, in order to complete tracing on the client end, manual instrumentation is required, i.e. the poll action and the processing action should be wrapped manually.\nResolve For a native Kafka client, please use the Application Toolkit libraries to do the manual instrumentation, with the help of the @KafkaPollAndInvoke annotation in apm-toolkit-kafka or with OpenTracing API. If you\u0026rsquo;re using spring-kafka 1.3.x, 2.2.x or above, you can easily trace the consumer end without further configuration.\n","excerpt":"Problem Tracing doesn\u0026rsquo;t work on the Kafka consumer end.\nReason The kafka client is responsible …","ref":"/docs/main/v9.2.0/en/faq/kafka-plugin/","title":"Problem"},{"body":"Problem When using a thread pool, TraceSegment data in a thread cannot be reported and there are memory data that cannot be recycled (memory leaks).\nExample ThreadPoolTaskExecutor executor = new ThreadPoolTaskExecutor(); executor.setThreadFactory(r -\u0026gt; new Thread(RunnableWrapper.of(r))); Reason  Worker threads are enhanced when using the thread pool. Based on the design of the SkyWalking Java Agent, when tracing a cross thread, you must enhance the task thread.  Resolution   When using Thread Schedule Framework: See SkyWalking Thread Schedule Framework at SkyWalking Java agent supported list, such as Spring FrameWork @Async, which can implement tracing without any modification.\n  When using Custom Thread Pool: Enhance the task thread with the following code.\n  ExecutorService executorService = Executors.newFixedThreadPool(1); executorService.execute(RunnableWrapper.of(new Runnable() { @Override public void run() { //your code  } })); See across thread solution APIs for more use cases.\n","excerpt":"Problem When using a thread pool, TraceSegment data in a thread cannot be reported and there are …","ref":"/docs/main/v9.2.0/en/faq/memory-leak-enhance-worker-thread/","title":"Problem"},{"body":"Problem  In maven build, the following error may occur with the protoc-plugin:  [ERROR] Failed to execute goal org.xolstice.maven.plugins:protobuf-maven-plugin:0.5.0:compile-custom (default) on project apm-network: Unable to copy the file to \\skywalking\\apm-network\\target\\protoc-plugins: \\skywalking\\apm-network\\target\\protoc-plugins\\protoc-3.3.0-linux-x86_64.exe (The process cannot access the file because it is being used by another process) -\u0026gt; [Help 1] Reason  The Protobuf compiler is dependent on the glibc. However, glibc has not been installed, or there is an old version already installed in the system.  Resolution  Install or upgrade to the latest version of the glibc library. Under the container environment, the latest glibc version of the alpine system is recommended. Please refer to http://www.gnu.org/software/libc/documentation.html.  ","excerpt":"Problem  In maven build, the following error may occur with the protoc-plugin:  [ERROR] Failed to …","ref":"/docs/main/v9.2.0/en/faq/protoc-plugin-fails-when-build/","title":"Problem"},{"body":"Problem The message with Field ID, 8888, must be reserved.\nReason Because Thrift cannot carry metadata to transport Trace Header in the original API, we transport them by wrapping TProtocolFactory.\nThrift allows us to append any additional fields in the message even if the receiver doesn\u0026rsquo;t deal with them. Those data will be skipped and left unread. Based on this, the 8888th field of the message is used to store Trace Header (or metadata) and to transport them. That means the message with Field ID, 8888, must be reserved.\nResolution Avoid using the Field(ID is 8888) in your application.\n","excerpt":"Problem The message with Field ID, 8888, must be reserved.\nReason Because Thrift cannot carry …","ref":"/docs/main/v9.2.0/en/faq/thrift-plugin/","title":"Problem"},{"body":"Problem  There is no abnormal log in Agent log and Collector log. The traces can be seen, but no other information is available in UI.  Reason The operating system where the monitored system is located is not set as the current time zone, causing statistics collection time points to deviate.\nResolution Make sure the time is synchronized between collector servers and monitored application servers.\n","excerpt":"Problem  There is no abnormal log in Agent log and Collector log. The traces can be seen, but no …","ref":"/docs/main/v9.2.0/en/faq/why-have-traces-no-others/","title":"Problem"},{"body":"Problem When you start your application with the skywalking agent, you may find this exception in your agent log which means that EnhanceRequireObjectCache cannot be casted to EnhanceRequireObjectCache. For example:\nERROR 2018-05-07 21:31:24 InstMethodsInter : class[class org.springframework.web.method.HandlerMethod] after method[getBean] intercept failure java.lang.ClassCastException: org.apache.skywalking.apm.plugin.spring.mvc.commons.EnhanceRequireObjectCache cannot be cast to org.apache.skywalking.apm.plugin.spring.mvc.commons.EnhanceRequireObjectCache at org.apache.skywalking.apm.plugin.spring.mvc.commons.interceptor.GetBeanInterceptor.afterMethod(GetBeanInterceptor.java:45) at org.apache.skywalking.apm.agent.core.plugin.interceptor.enhance.InstMethodsInter.intercept(InstMethodsInter.java:105) at org.springframework.web.method.HandlerMethod.getBean(HandlerMethod.java) at org.springframework.web.servlet.handler.AbstractHandlerMethodExceptionResolver.shouldApplyTo(AbstractHandlerMethodExceptionResolver.java:47) at org.springframework.web.servlet.handler.AbstractHandlerExceptionResolver.resolveException(AbstractHandlerExceptionResolver.java:131) at org.springframework.web.servlet.handler.HandlerExceptionResolverComposite.resolveException(HandlerExceptionResolverComposite.java:76) ... Reason This exception may be caused by hot deployment tools (spring-boot-devtool) or otherwise, which changes the classloader in runtime.\nResolution  This error does not occur under the production environment, since developer tools are automatically disabled: See spring-boot-devtools. If you would like to debug in your development environment as usual, you should temporarily remove such hot deployment package in your lib path.  ","excerpt":"Problem When you start your application with the skywalking agent, you may find this exception in …","ref":"/docs/main/v9.3.0/en/faq/enhancerequireobjectcache-cast-exception/","title":"Problem"},{"body":"Problem  When importing the SkyWalking project to Eclipse, the following errors may occur:   Software being installed: Checkstyle configuration plugin for M2Eclipse 1.0.0.201705301746 (com.basistech.m2e.code.quality.checkstyle.feature.feature.group 1.0.0.201705301746) Missing requirement: Checkstyle configuration plugin for M2Eclipse 1.0.0.201705301746 (com.basistech.m2e.code.quality.checkstyle.feature.feature.group 1.0.0.201705301746) requires \u0026lsquo;net.sf.eclipsecs.core 5.2.0\u0026rsquo; but it could not be found\n Reason The Eclipse Checkstyle Plug-in has not been installed.\nResolution Download the plug-in at the link here: https://sourceforge.net/projects/eclipse-cs/?source=typ_redirect Eclipse Checkstyle Plug-in version 8.7.0.201801131309 is required. Plug-in notification: The Eclipse Checkstyle plug-in integrates the Checkstyle Java code auditor into the Eclipse IDE. The plug-in provides real-time feedback to the user on rule violations, including checking against coding style and error-prone code constructs.\n","excerpt":"Problem  When importing the SkyWalking project to Eclipse, the following errors may occur: …","ref":"/docs/main/v9.3.0/en/faq/import-project-eclipse-requireitems-exception/","title":"Problem"},{"body":"Problem Tracing doesn\u0026rsquo;t work on the Kafka consumer end.\nReason The kafka client is responsible for pulling messages from the brokers, after which the data will be processed by user-defined codes. However, only the poll action can be traced by the plug-in and the subsequent data processing work inevitably goes beyond the scope of the trace context. Thus, in order to complete tracing on the client end, manual instrumentation is required, i.e. the poll action and the processing action should be wrapped manually.\nResolve For a native Kafka client, please use the Application Toolkit libraries to do the manual instrumentation, with the help of the @KafkaPollAndInvoke annotation in apm-toolkit-kafka or with OpenTracing API. If you\u0026rsquo;re using spring-kafka 1.3.x, 2.2.x or above, you can easily trace the consumer end without further configuration.\n","excerpt":"Problem Tracing doesn\u0026rsquo;t work on the Kafka consumer end.\nReason The kafka client is responsible …","ref":"/docs/main/v9.3.0/en/faq/kafka-plugin/","title":"Problem"},{"body":"Problem When using a thread pool, TraceSegment data in a thread cannot be reported and there are memory data that cannot be recycled (memory leaks).\nExample ThreadPoolTaskExecutor executor = new ThreadPoolTaskExecutor(); executor.setThreadFactory(r -\u0026gt; new Thread(RunnableWrapper.of(r))); Reason  Worker threads are enhanced when using the thread pool. Based on the design of the SkyWalking Java Agent, when tracing a cross thread, you must enhance the task thread.  Resolution   When using Thread Schedule Framework: See SkyWalking Thread Schedule Framework at SkyWalking Java agent supported list, such as Spring FrameWork @Async, which can implement tracing without any modification.\n  When using Custom Thread Pool: Enhance the task thread with the following code.\n  ExecutorService executorService = Executors.newFixedThreadPool(1); executorService.execute(RunnableWrapper.of(new Runnable() { @Override public void run() { //your code  } })); See across thread solution APIs for more use cases.\n","excerpt":"Problem When using a thread pool, TraceSegment data in a thread cannot be reported and there are …","ref":"/docs/main/v9.3.0/en/faq/memory-leak-enhance-worker-thread/","title":"Problem"},{"body":"Problem  In maven build, the following error may occur with the protoc-plugin:  [ERROR] Failed to execute goal org.xolstice.maven.plugins:protobuf-maven-plugin:0.5.0:compile-custom (default) on project apm-network: Unable to copy the file to \\skywalking\\apm-network\\target\\protoc-plugins: \\skywalking\\apm-network\\target\\protoc-plugins\\protoc-3.3.0-linux-x86_64.exe (The process cannot access the file because it is being used by another process) -\u0026gt; [Help 1] Reason  The Protobuf compiler is dependent on the glibc. However, glibc has not been installed, or there is an old version already installed in the system.  Resolution  Install or upgrade to the latest version of the glibc library. Under the container environment, the latest glibc version of the alpine system is recommended. Please refer to http://www.gnu.org/software/libc/documentation.html.  ","excerpt":"Problem  In maven build, the following error may occur with the protoc-plugin:  [ERROR] Failed to …","ref":"/docs/main/v9.3.0/en/faq/protoc-plugin-fails-when-build/","title":"Problem"},{"body":"Problem The message with Field ID, 8888, must be reserved.\nReason Because Thrift cannot carry metadata to transport Trace Header in the original API, we transport them by wrapping TProtocolFactory.\nThrift allows us to append any additional fields in the message even if the receiver doesn\u0026rsquo;t deal with them. Those data will be skipped and left unread. Based on this, the 8888th field of the message is used to store Trace Header (or metadata) and to transport them. That means the message with Field ID, 8888, must be reserved.\nResolution Avoid using the Field(ID is 8888) in your application.\n","excerpt":"Problem The message with Field ID, 8888, must be reserved.\nReason Because Thrift cannot carry …","ref":"/docs/main/v9.3.0/en/faq/thrift-plugin/","title":"Problem"},{"body":"Problem  There is no abnormal log in Agent log and Collector log. The traces can be seen, but no other information is available in UI.  Reason The operating system where the monitored system is located is not set as the current time zone, causing statistics collection time points to deviate.\nResolution Make sure the time is synchronized between collector servers and monitored application servers.\n","excerpt":"Problem  There is no abnormal log in Agent log and Collector log. The traces can be seen, but no …","ref":"/docs/main/v9.3.0/en/faq/why-have-traces-no-others/","title":"Problem"},{"body":"Problem When you start your application with the skywalking agent, you may find this exception in your agent log which means that EnhanceRequireObjectCache cannot be casted to EnhanceRequireObjectCache. For example:\nERROR 2018-05-07 21:31:24 InstMethodsInter : class[class org.springframework.web.method.HandlerMethod] after method[getBean] intercept failure java.lang.ClassCastException: org.apache.skywalking.apm.plugin.spring.mvc.commons.EnhanceRequireObjectCache cannot be cast to org.apache.skywalking.apm.plugin.spring.mvc.commons.EnhanceRequireObjectCache at org.apache.skywalking.apm.plugin.spring.mvc.commons.interceptor.GetBeanInterceptor.afterMethod(GetBeanInterceptor.java:45) at org.apache.skywalking.apm.agent.core.plugin.interceptor.enhance.InstMethodsInter.intercept(InstMethodsInter.java:105) at org.springframework.web.method.HandlerMethod.getBean(HandlerMethod.java) at org.springframework.web.servlet.handler.AbstractHandlerMethodExceptionResolver.shouldApplyTo(AbstractHandlerMethodExceptionResolver.java:47) at org.springframework.web.servlet.handler.AbstractHandlerExceptionResolver.resolveException(AbstractHandlerExceptionResolver.java:131) at org.springframework.web.servlet.handler.HandlerExceptionResolverComposite.resolveException(HandlerExceptionResolverComposite.java:76) ... Reason This exception may be caused by hot deployment tools (spring-boot-devtool) or otherwise, which changes the classloader in runtime.\nResolution  This error does not occur under the production environment, since developer tools are automatically disabled: See spring-boot-devtools. If you would like to debug in your development environment as usual, you should temporarily remove such hot deployment package in your lib path.  ","excerpt":"Problem When you start your application with the skywalking agent, you may find this exception in …","ref":"/docs/main/v9.4.0/en/faq/enhancerequireobjectcache-cast-exception/","title":"Problem"},{"body":"Problem  When importing the SkyWalking project to Eclipse, the following errors may occur:   Software being installed: Checkstyle configuration plugin for M2Eclipse 1.0.0.201705301746 (com.basistech.m2e.code.quality.checkstyle.feature.feature.group 1.0.0.201705301746) Missing requirement: Checkstyle configuration plugin for M2Eclipse 1.0.0.201705301746 (com.basistech.m2e.code.quality.checkstyle.feature.feature.group 1.0.0.201705301746) requires \u0026lsquo;net.sf.eclipsecs.core 5.2.0\u0026rsquo; but it could not be found\n Reason The Eclipse Checkstyle Plug-in has not been installed.\nResolution Download the plug-in at the link here: https://sourceforge.net/projects/eclipse-cs/?source=typ_redirect Eclipse Checkstyle Plug-in version 8.7.0.201801131309 is required. Plug-in notification: The Eclipse Checkstyle plug-in integrates the Checkstyle Java code auditor into the Eclipse IDE. The plug-in provides real-time feedback to the user on rule violations, including checking against coding style and error-prone code constructs.\n","excerpt":"Problem  When importing the SkyWalking project to Eclipse, the following errors may occur: …","ref":"/docs/main/v9.4.0/en/faq/import-project-eclipse-requireitems-exception/","title":"Problem"},{"body":"Problem Tracing doesn\u0026rsquo;t work on the Kafka consumer end.\nReason The kafka client is responsible for pulling messages from the brokers, after which the data will be processed by user-defined codes. However, only the poll action can be traced by the plug-in and the subsequent data processing work inevitably goes beyond the scope of the trace context. Thus, in order to complete tracing on the client end, manual instrumentation is required, i.e. the poll action and the processing action should be wrapped manually.\nResolve For a native Kafka client, please use the Application Toolkit libraries to do the manual instrumentation, with the help of the @KafkaPollAndInvoke annotation in apm-toolkit-kafka or with OpenTracing API. If you\u0026rsquo;re using spring-kafka 1.3.x, 2.2.x or above, you can easily trace the consumer end without further configuration.\n","excerpt":"Problem Tracing doesn\u0026rsquo;t work on the Kafka consumer end.\nReason The kafka client is responsible …","ref":"/docs/main/v9.4.0/en/faq/kafka-plugin/","title":"Problem"},{"body":"Problem When using a thread pool, TraceSegment data in a thread cannot be reported and there are memory data that cannot be recycled (memory leaks).\nExample ThreadPoolTaskExecutor executor = new ThreadPoolTaskExecutor(); executor.setThreadFactory(r -\u0026gt; new Thread(RunnableWrapper.of(r))); Reason  Worker threads are enhanced when using the thread pool. Based on the design of the SkyWalking Java Agent, when tracing a cross thread, you must enhance the task thread.  Resolution   When using Thread Schedule Framework: See SkyWalking Thread Schedule Framework at SkyWalking Java agent supported list, such as Spring FrameWork @Async, which can implement tracing without any modification.\n  When using Custom Thread Pool: Enhance the task thread with the following code.\n  ExecutorService executorService = Executors.newFixedThreadPool(1); executorService.execute(RunnableWrapper.of(new Runnable() { @Override public void run() { //your code  } })); See across thread solution APIs for more use cases.\n","excerpt":"Problem When using a thread pool, TraceSegment data in a thread cannot be reported and there are …","ref":"/docs/main/v9.4.0/en/faq/memory-leak-enhance-worker-thread/","title":"Problem"},{"body":"Problem  In maven build, the following error may occur with the protoc-plugin:  [ERROR] Failed to execute goal org.xolstice.maven.plugins:protobuf-maven-plugin:0.5.0:compile-custom (default) on project apm-network: Unable to copy the file to \\skywalking\\apm-network\\target\\protoc-plugins: \\skywalking\\apm-network\\target\\protoc-plugins\\protoc-3.3.0-linux-x86_64.exe (The process cannot access the file because it is being used by another process) -\u0026gt; [Help 1] Reason  The Protobuf compiler is dependent on the glibc. However, glibc has not been installed, or there is an old version already installed in the system.  Resolution  Install or upgrade to the latest version of the glibc library. Under the container environment, the latest glibc version of the alpine system is recommended. Please refer to http://www.gnu.org/software/libc/documentation.html.  ","excerpt":"Problem  In maven build, the following error may occur with the protoc-plugin:  [ERROR] Failed to …","ref":"/docs/main/v9.4.0/en/faq/protoc-plugin-fails-when-build/","title":"Problem"},{"body":"Problem The message with Field ID, 8888, must be reserved.\nReason Because Thrift cannot carry metadata to transport Trace Header in the original API, we transport them by wrapping TProtocolFactory.\nThrift allows us to append any additional fields in the message even if the receiver doesn\u0026rsquo;t deal with them. Those data will be skipped and left unread. Based on this, the 8888th field of the message is used to store Trace Header (or metadata) and to transport them. That means the message with Field ID, 8888, must be reserved.\nResolution Avoid using the Field(ID is 8888) in your application.\n","excerpt":"Problem The message with Field ID, 8888, must be reserved.\nReason Because Thrift cannot carry …","ref":"/docs/main/v9.4.0/en/faq/thrift-plugin/","title":"Problem"},{"body":"Problem  There is no abnormal log in Agent log and Collector log. The traces can be seen, but no other information is available in UI.  Reason The operating system where the monitored system is located is not set as the current time zone, causing statistics collection time points to deviate.\nResolution Make sure the time is synchronized between collector servers and monitored application servers.\n","excerpt":"Problem  There is no abnormal log in Agent log and Collector log. The traces can be seen, but no …","ref":"/docs/main/v9.4.0/en/faq/why-have-traces-no-others/","title":"Problem"},{"body":"Problem When you start your application with the skywalking agent, you may find this exception in your agent log which means that EnhanceRequireObjectCache cannot be casted to EnhanceRequireObjectCache. For example:\nERROR 2018-05-07 21:31:24 InstMethodsInter : class[class org.springframework.web.method.HandlerMethod] after method[getBean] intercept failure java.lang.ClassCastException: org.apache.skywalking.apm.plugin.spring.mvc.commons.EnhanceRequireObjectCache cannot be cast to org.apache.skywalking.apm.plugin.spring.mvc.commons.EnhanceRequireObjectCache at org.apache.skywalking.apm.plugin.spring.mvc.commons.interceptor.GetBeanInterceptor.afterMethod(GetBeanInterceptor.java:45) at org.apache.skywalking.apm.agent.core.plugin.interceptor.enhance.InstMethodsInter.intercept(InstMethodsInter.java:105) at org.springframework.web.method.HandlerMethod.getBean(HandlerMethod.java) at org.springframework.web.servlet.handler.AbstractHandlerMethodExceptionResolver.shouldApplyTo(AbstractHandlerMethodExceptionResolver.java:47) at org.springframework.web.servlet.handler.AbstractHandlerExceptionResolver.resolveException(AbstractHandlerExceptionResolver.java:131) at org.springframework.web.servlet.handler.HandlerExceptionResolverComposite.resolveException(HandlerExceptionResolverComposite.java:76) ... Reason This exception may be caused by hot deployment tools (spring-boot-devtool) or otherwise, which changes the classloader in runtime.\nResolution  This error does not occur under the production environment, since developer tools are automatically disabled: See spring-boot-devtools. If you would like to debug in your development environment as usual, you should temporarily remove such hot deployment package in your lib path.  ","excerpt":"Problem When you start your application with the skywalking agent, you may find this exception in …","ref":"/docs/main/v9.5.0/en/faq/enhancerequireobjectcache-cast-exception/","title":"Problem"},{"body":"Problem  When importing the SkyWalking project to Eclipse, the following errors may occur:   Software being installed: Checkstyle configuration plugin for M2Eclipse 1.0.0.201705301746 (com.basistech.m2e.code.quality.checkstyle.feature.feature.group 1.0.0.201705301746) Missing requirement: Checkstyle configuration plugin for M2Eclipse 1.0.0.201705301746 (com.basistech.m2e.code.quality.checkstyle.feature.feature.group 1.0.0.201705301746) requires \u0026lsquo;net.sf.eclipsecs.core 5.2.0\u0026rsquo; but it could not be found\n Reason The Eclipse Checkstyle Plug-in has not been installed.\nResolution Download the plug-in at the link here: https://sourceforge.net/projects/eclipse-cs/?source=typ_redirect Eclipse Checkstyle Plug-in version 8.7.0.201801131309 is required. Plug-in notification: The Eclipse Checkstyle plug-in integrates the Checkstyle Java code auditor into the Eclipse IDE. The plug-in provides real-time feedback to the user on rule violations, including checking against coding style and error-prone code constructs.\n","excerpt":"Problem  When importing the SkyWalking project to Eclipse, the following errors may occur: …","ref":"/docs/main/v9.5.0/en/faq/import-project-eclipse-requireitems-exception/","title":"Problem"},{"body":"Problem Tracing doesn\u0026rsquo;t work on the Kafka consumer end.\nReason The kafka client is responsible for pulling messages from the brokers, after which the data will be processed by user-defined codes. However, only the poll action can be traced by the plug-in and the subsequent data processing work inevitably goes beyond the scope of the trace context. Thus, in order to complete tracing on the client end, manual instrumentation is required, i.e. the poll action and the processing action should be wrapped manually.\nResolve For a native Kafka client, please use the Application Toolkit libraries to do the manual instrumentation, with the help of the @KafkaPollAndInvoke annotation in apm-toolkit-kafka or with OpenTracing API. If you\u0026rsquo;re using spring-kafka 1.3.x, 2.2.x or above, you can easily trace the consumer end without further configuration.\n","excerpt":"Problem Tracing doesn\u0026rsquo;t work on the Kafka consumer end.\nReason The kafka client is responsible …","ref":"/docs/main/v9.5.0/en/faq/kafka-plugin/","title":"Problem"},{"body":"Problem When using a thread pool, TraceSegment data in a thread cannot be reported and there are memory data that cannot be recycled (memory leaks).\nExample ThreadPoolTaskExecutor executor = new ThreadPoolTaskExecutor(); executor.setThreadFactory(r -\u0026gt; new Thread(RunnableWrapper.of(r))); Reason  Worker threads are enhanced when using the thread pool. Based on the design of the SkyWalking Java Agent, when tracing a cross thread, you must enhance the task thread.  Resolution   When using Thread Schedule Framework: See SkyWalking Thread Schedule Framework at SkyWalking Java agent supported list, such as Spring FrameWork @Async, which can implement tracing without any modification.\n  When using Custom Thread Pool: Enhance the task thread with the following code.\n  ExecutorService executorService = Executors.newFixedThreadPool(1); executorService.execute(RunnableWrapper.of(new Runnable() { @Override public void run() { //your code  } })); See across thread solution APIs for more use cases.\n","excerpt":"Problem When using a thread pool, TraceSegment data in a thread cannot be reported and there are …","ref":"/docs/main/v9.5.0/en/faq/memory-leak-enhance-worker-thread/","title":"Problem"},{"body":"Problem  In maven build, the following error may occur with the protoc-plugin:  [ERROR] Failed to execute goal org.xolstice.maven.plugins:protobuf-maven-plugin:0.5.0:compile-custom (default) on project apm-network: Unable to copy the file to \\skywalking\\apm-network\\target\\protoc-plugins: \\skywalking\\apm-network\\target\\protoc-plugins\\protoc-3.3.0-linux-x86_64.exe (The process cannot access the file because it is being used by another process) -\u0026gt; [Help 1] Reason  The Protobuf compiler is dependent on the glibc. However, glibc has not been installed, or there is an old version already installed in the system.  Resolution  Install or upgrade to the latest version of the glibc library. Under the container environment, the latest glibc version of the alpine system is recommended. Please refer to http://www.gnu.org/software/libc/documentation.html.  ","excerpt":"Problem  In maven build, the following error may occur with the protoc-plugin:  [ERROR] Failed to …","ref":"/docs/main/v9.5.0/en/faq/protoc-plugin-fails-when-build/","title":"Problem"},{"body":"Problem The message with Field ID, 8888, must be reserved.\nReason Because Thrift cannot carry metadata to transport Trace Header in the original API, we transport them by wrapping TProtocolFactory.\nThrift allows us to append any additional fields in the message even if the receiver doesn\u0026rsquo;t deal with them. Those data will be skipped and left unread. Based on this, the 8888th field of the message is used to store Trace Header (or metadata) and to transport them. That means the message with Field ID, 8888, must be reserved.\nResolution Avoid using the Field(ID is 8888) in your application.\n","excerpt":"Problem The message with Field ID, 8888, must be reserved.\nReason Because Thrift cannot carry …","ref":"/docs/main/v9.5.0/en/faq/thrift-plugin/","title":"Problem"},{"body":"Problem  There is no abnormal log in Agent log and Collector log. The traces can be seen, but no other information is available in UI.  Reason The operating system where the monitored system is located is not set as the current time zone, causing statistics collection time points to deviate.\nResolution Make sure the time is synchronized between collector servers and monitored application servers.\n","excerpt":"Problem  There is no abnormal log in Agent log and Collector log. The traces can be seen, but no …","ref":"/docs/main/v9.5.0/en/faq/why-have-traces-no-others/","title":"Problem"},{"body":"Problem When you start your application with the skywalking agent, you may find this exception in your agent log which means that EnhanceRequireObjectCache cannot be casted to EnhanceRequireObjectCache. For example:\nERROR 2018-05-07 21:31:24 InstMethodsInter : class[class org.springframework.web.method.HandlerMethod] after method[getBean] intercept failure java.lang.ClassCastException: org.apache.skywalking.apm.plugin.spring.mvc.commons.EnhanceRequireObjectCache cannot be cast to org.apache.skywalking.apm.plugin.spring.mvc.commons.EnhanceRequireObjectCache at org.apache.skywalking.apm.plugin.spring.mvc.commons.interceptor.GetBeanInterceptor.afterMethod(GetBeanInterceptor.java:45) at org.apache.skywalking.apm.agent.core.plugin.interceptor.enhance.InstMethodsInter.intercept(InstMethodsInter.java:105) at org.springframework.web.method.HandlerMethod.getBean(HandlerMethod.java) at org.springframework.web.servlet.handler.AbstractHandlerMethodExceptionResolver.shouldApplyTo(AbstractHandlerMethodExceptionResolver.java:47) at org.springframework.web.servlet.handler.AbstractHandlerExceptionResolver.resolveException(AbstractHandlerExceptionResolver.java:131) at org.springframework.web.servlet.handler.HandlerExceptionResolverComposite.resolveException(HandlerExceptionResolverComposite.java:76) ... Reason This exception may be caused by hot deployment tools (spring-boot-devtool) or otherwise, which changes the classloader in runtime.\nResolution  This error does not occur under the production environment, since developer tools are automatically disabled: See spring-boot-devtools. If you would like to debug in your development environment as usual, you should temporarily remove such hot deployment package in your lib path.  ","excerpt":"Problem When you start your application with the skywalking agent, you may find this exception in …","ref":"/docs/main/v9.6.0/en/faq/enhancerequireobjectcache-cast-exception/","title":"Problem"},{"body":"Problem  When importing the SkyWalking project to Eclipse, the following errors may occur:   Software being installed: Checkstyle configuration plugin for M2Eclipse 1.0.0.201705301746 (com.basistech.m2e.code.quality.checkstyle.feature.feature.group 1.0.0.201705301746) Missing requirement: Checkstyle configuration plugin for M2Eclipse 1.0.0.201705301746 (com.basistech.m2e.code.quality.checkstyle.feature.feature.group 1.0.0.201705301746) requires \u0026lsquo;net.sf.eclipsecs.core 5.2.0\u0026rsquo; but it could not be found\n Reason The Eclipse Checkstyle Plug-in has not been installed.\nResolution Download the plug-in at the link here: https://sourceforge.net/projects/eclipse-cs/?source=typ_redirect Eclipse Checkstyle Plug-in version 8.7.0.201801131309 is required. Plug-in notification: The Eclipse Checkstyle plug-in integrates the Checkstyle Java code auditor into the Eclipse IDE. The plug-in provides real-time feedback to the user on rule violations, including checking against coding style and error-prone code constructs.\n","excerpt":"Problem  When importing the SkyWalking project to Eclipse, the following errors may occur: …","ref":"/docs/main/v9.6.0/en/faq/import-project-eclipse-requireitems-exception/","title":"Problem"},{"body":"Problem Tracing doesn\u0026rsquo;t work on the Kafka consumer end.\nReason The kafka client is responsible for pulling messages from the brokers, after which the data will be processed by user-defined codes. However, only the poll action can be traced by the plug-in and the subsequent data processing work inevitably goes beyond the scope of the trace context. Thus, in order to complete tracing on the client end, manual instrumentation is required, i.e. the poll action and the processing action should be wrapped manually.\nResolve For a native Kafka client, please use the Application Toolkit libraries to do the manual instrumentation, with the help of the @KafkaPollAndInvoke annotation in apm-toolkit-kafka or with OpenTracing API. If you\u0026rsquo;re using spring-kafka 1.3.x, 2.2.x or above, you can easily trace the consumer end without further configuration.\n","excerpt":"Problem Tracing doesn\u0026rsquo;t work on the Kafka consumer end.\nReason The kafka client is responsible …","ref":"/docs/main/v9.6.0/en/faq/kafka-plugin/","title":"Problem"},{"body":"Problem When using a thread pool, TraceSegment data in a thread cannot be reported and there are memory data that cannot be recycled (memory leaks).\nExample ThreadPoolTaskExecutor executor = new ThreadPoolTaskExecutor(); executor.setThreadFactory(r -\u0026gt; new Thread(RunnableWrapper.of(r))); Reason  Worker threads are enhanced when using the thread pool. Based on the design of the SkyWalking Java Agent, when tracing a cross thread, you must enhance the task thread.  Resolution   When using Thread Schedule Framework: See SkyWalking Thread Schedule Framework at SkyWalking Java agent supported list, such as Spring FrameWork @Async, which can implement tracing without any modification.\n  When using Custom Thread Pool: Enhance the task thread with the following code.\n  ExecutorService executorService = Executors.newFixedThreadPool(1); executorService.execute(RunnableWrapper.of(new Runnable() { @Override public void run() { //your code  } })); See across thread solution APIs for more use cases.\n","excerpt":"Problem When using a thread pool, TraceSegment data in a thread cannot be reported and there are …","ref":"/docs/main/v9.6.0/en/faq/memory-leak-enhance-worker-thread/","title":"Problem"},{"body":"Problem  In maven build, the following error may occur with the protoc-plugin:  [ERROR] Failed to execute goal org.xolstice.maven.plugins:protobuf-maven-plugin:0.5.0:compile-custom (default) on project apm-network: Unable to copy the file to \\skywalking\\apm-network\\target\\protoc-plugins: \\skywalking\\apm-network\\target\\protoc-plugins\\protoc-3.3.0-linux-x86_64.exe (The process cannot access the file because it is being used by another process) -\u0026gt; [Help 1] Reason  The Protobuf compiler is dependent on the glibc. However, glibc has not been installed, or there is an old version already installed in the system.  Resolution  Install or upgrade to the latest version of the glibc library. Under the container environment, the latest glibc version of the alpine system is recommended. Please refer to http://www.gnu.org/software/libc/documentation.html.  ","excerpt":"Problem  In maven build, the following error may occur with the protoc-plugin:  [ERROR] Failed to …","ref":"/docs/main/v9.6.0/en/faq/protoc-plugin-fails-when-build/","title":"Problem"},{"body":"Problem The message with Field ID, 8888, must be reserved.\nReason Because Thrift cannot carry metadata to transport Trace Header in the original API, we transport them by wrapping TProtocolFactory.\nThrift allows us to append any additional fields in the message even if the receiver doesn\u0026rsquo;t deal with them. Those data will be skipped and left unread. Based on this, the 8888th field of the message is used to store Trace Header (or metadata) and to transport them. That means the message with Field ID, 8888, must be reserved.\nResolution Avoid using the Field(ID is 8888) in your application.\n","excerpt":"Problem The message with Field ID, 8888, must be reserved.\nReason Because Thrift cannot carry …","ref":"/docs/main/v9.6.0/en/faq/thrift-plugin/","title":"Problem"},{"body":"Problem  There is no abnormal log in Agent log and Collector log. The traces can be seen, but no other information is available in UI.  Reason The operating system where the monitored system is located is not set as the current time zone, causing statistics collection time points to deviate.\nResolution Make sure the time is synchronized between collector servers and monitored application servers.\n","excerpt":"Problem  There is no abnormal log in Agent log and Collector log. The traces can be seen, but no …","ref":"/docs/main/v9.6.0/en/faq/why-have-traces-no-others/","title":"Problem"},{"body":"Problem When you start your application with the skywalking agent, you may find this exception in your agent log which means that EnhanceRequireObjectCache cannot be casted to EnhanceRequireObjectCache. For example:\nERROR 2018-05-07 21:31:24 InstMethodsInter : class[class org.springframework.web.method.HandlerMethod] after method[getBean] intercept failure java.lang.ClassCastException: org.apache.skywalking.apm.plugin.spring.mvc.commons.EnhanceRequireObjectCache cannot be cast to org.apache.skywalking.apm.plugin.spring.mvc.commons.EnhanceRequireObjectCache at org.apache.skywalking.apm.plugin.spring.mvc.commons.interceptor.GetBeanInterceptor.afterMethod(GetBeanInterceptor.java:45) at org.apache.skywalking.apm.agent.core.plugin.interceptor.enhance.InstMethodsInter.intercept(InstMethodsInter.java:105) at org.springframework.web.method.HandlerMethod.getBean(HandlerMethod.java) at org.springframework.web.servlet.handler.AbstractHandlerMethodExceptionResolver.shouldApplyTo(AbstractHandlerMethodExceptionResolver.java:47) at org.springframework.web.servlet.handler.AbstractHandlerExceptionResolver.resolveException(AbstractHandlerExceptionResolver.java:131) at org.springframework.web.servlet.handler.HandlerExceptionResolverComposite.resolveException(HandlerExceptionResolverComposite.java:76) ... Reason This exception may be caused by hot deployment tools (spring-boot-devtool) or otherwise, which changes the classloader in runtime.\nResolution  This error does not occur under the production environment, since developer tools are automatically disabled: See spring-boot-devtools. If you would like to debug in your development environment as usual, you should temporarily remove such hot deployment package in your lib path.  ","excerpt":"Problem When you start your application with the skywalking agent, you may find this exception in …","ref":"/docs/main/v9.7.0/en/faq/enhancerequireobjectcache-cast-exception/","title":"Problem"},{"body":"Problem  When importing the SkyWalking project to Eclipse, the following errors may occur:   Software being installed: Checkstyle configuration plugin for M2Eclipse 1.0.0.201705301746 (com.basistech.m2e.code.quality.checkstyle.feature.feature.group 1.0.0.201705301746) Missing requirement: Checkstyle configuration plugin for M2Eclipse 1.0.0.201705301746 (com.basistech.m2e.code.quality.checkstyle.feature.feature.group 1.0.0.201705301746) requires \u0026lsquo;net.sf.eclipsecs.core 5.2.0\u0026rsquo; but it could not be found\n Reason The Eclipse Checkstyle Plug-in has not been installed.\nResolution Download the plug-in at the link here: https://sourceforge.net/projects/eclipse-cs/?source=typ_redirect Eclipse Checkstyle Plug-in version 8.7.0.201801131309 is required. Plug-in notification: The Eclipse Checkstyle plug-in integrates the Checkstyle Java code auditor into the Eclipse IDE. The plug-in provides real-time feedback to the user on rule violations, including checking against coding style and error-prone code constructs.\n","excerpt":"Problem  When importing the SkyWalking project to Eclipse, the following errors may occur: …","ref":"/docs/main/v9.7.0/en/faq/import-project-eclipse-requireitems-exception/","title":"Problem"},{"body":"Problem Tracing doesn\u0026rsquo;t work on the Kafka consumer end.\nReason The kafka client is responsible for pulling messages from the brokers, after which the data will be processed by user-defined codes. However, only the poll action can be traced by the plug-in and the subsequent data processing work inevitably goes beyond the scope of the trace context. Thus, in order to complete tracing on the client end, manual instrumentation is required, i.e. the poll action and the processing action should be wrapped manually.\nResolve For a native Kafka client, please use the Application Toolkit libraries to do the manual instrumentation, with the help of the @KafkaPollAndInvoke annotation in apm-toolkit-kafka or with OpenTracing API. If you\u0026rsquo;re using spring-kafka 1.3.x, 2.2.x or above, you can easily trace the consumer end without further configuration.\n","excerpt":"Problem Tracing doesn\u0026rsquo;t work on the Kafka consumer end.\nReason The kafka client is responsible …","ref":"/docs/main/v9.7.0/en/faq/kafka-plugin/","title":"Problem"},{"body":"Problem When using a thread pool, TraceSegment data in a thread cannot be reported and there are memory data that cannot be recycled (memory leaks).\nExample ThreadPoolTaskExecutor executor = new ThreadPoolTaskExecutor(); executor.setThreadFactory(r -\u0026gt; new Thread(RunnableWrapper.of(r))); Reason  Worker threads are enhanced when using the thread pool. Based on the design of the SkyWalking Java Agent, when tracing a cross thread, you must enhance the task thread.  Resolution   When using Thread Schedule Framework: See SkyWalking Thread Schedule Framework at SkyWalking Java agent supported list, such as Spring FrameWork @Async, which can implement tracing without any modification.\n  When using Custom Thread Pool: Enhance the task thread with the following code.\n  ExecutorService executorService = Executors.newFixedThreadPool(1); executorService.execute(RunnableWrapper.of(new Runnable() { @Override public void run() { //your code  } })); See across thread solution APIs for more use cases.\n","excerpt":"Problem When using a thread pool, TraceSegment data in a thread cannot be reported and there are …","ref":"/docs/main/v9.7.0/en/faq/memory-leak-enhance-worker-thread/","title":"Problem"},{"body":"Problem  In maven build, the following error may occur with the protoc-plugin:  [ERROR] Failed to execute goal org.xolstice.maven.plugins:protobuf-maven-plugin:0.5.0:compile-custom (default) on project apm-network: Unable to copy the file to \\skywalking\\apm-network\\target\\protoc-plugins: \\skywalking\\apm-network\\target\\protoc-plugins\\protoc-3.3.0-linux-x86_64.exe (The process cannot access the file because it is being used by another process) -\u0026gt; [Help 1] Reason  The Protobuf compiler is dependent on the glibc. However, glibc has not been installed, or there is an old version already installed in the system.  Resolution  Install or upgrade to the latest version of the glibc library. Under the container environment, the latest glibc version of the alpine system is recommended. Please refer to http://www.gnu.org/software/libc/documentation.html.  ","excerpt":"Problem  In maven build, the following error may occur with the protoc-plugin:  [ERROR] Failed to …","ref":"/docs/main/v9.7.0/en/faq/protoc-plugin-fails-when-build/","title":"Problem"},{"body":"Problem The message with Field ID, 8888, must be reserved.\nReason Because Thrift cannot carry metadata to transport Trace Header in the original API, we transport them by wrapping TProtocolFactory.\nThrift allows us to append any additional fields in the message even if the receiver doesn\u0026rsquo;t deal with them. Those data will be skipped and left unread. Based on this, the 8888th field of the message is used to store Trace Header (or metadata) and to transport them. That means the message with Field ID, 8888, must be reserved.\nResolution Avoid using the Field(ID is 8888) in your application.\n","excerpt":"Problem The message with Field ID, 8888, must be reserved.\nReason Because Thrift cannot carry …","ref":"/docs/main/v9.7.0/en/faq/thrift-plugin/","title":"Problem"},{"body":"Problem  There is no abnormal log in Agent log and Collector log. The traces can be seen, but no other information is available in UI.  Reason The operating system where the monitored system is located is not set as the current time zone, causing statistics collection time points to deviate.\nResolution Make sure the time is synchronized between collector servers and monitored application servers.\n","excerpt":"Problem  There is no abnormal log in Agent log and Collector log. The traces can be seen, but no …","ref":"/docs/main/v9.7.0/en/faq/why-have-traces-no-others/","title":"Problem"},{"body":"Problem: Maven compilation failure with error such as Error: not found: python2 When you compile the project via Maven, it fails at module apm-webapp and the following error occurs.\nPay attention to keywords such as node-sass and Error: not found: python2.\n[INFO] \u0026gt; node-sass@4.11.0 postinstall C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\node-sass [INFO] \u0026gt; node scripts/build.js [ERROR] gyp verb check python checking for Python executable \u0026quot;python2\u0026quot; in the PATH [ERROR] gyp verb `which` failed Error: not found: python2 [ERROR] gyp verb `which` failed at getNotFoundError (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:13:12) [ERROR] gyp verb `which` failed at F (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:68:19) [ERROR] gyp verb `which` failed at E (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:80:29) [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:89:16 [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\isexe\\index.js:42:5 [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\isexe\\windows.js:36:5 [ERROR] gyp verb `which` failed at FSReqWrap.oncomplete (fs.js:152:21) [ERROR] gyp verb `which` failed code: 'ENOENT' } [ERROR] gyp verb check python checking for Python executable \u0026quot;python\u0026quot; in the PATH [ERROR] gyp verb `which` succeeded python C:\\Users\\XXX\\AppData\\Local\\Programs\\Python\\Python37\\python.EXE [ERROR] gyp ERR! configure error [ERROR] gyp ERR! stack Error: Command failed: C:\\Users\\XXX\\AppData\\Local\\Programs\\Python\\Python37\\python.EXE -c import sys; print \u0026quot;%s.%s.%s\u0026quot; % sys.version_info[:3]; [ERROR] gyp ERR! stack File \u0026quot;\u0026lt;string\u0026gt;\u0026quot;, line 1 [ERROR] gyp ERR! stack import sys; print \u0026quot;%s.%s.%s\u0026quot; % sys.version_info[:3]; [ERROR] gyp ERR! stack ^ [ERROR] gyp ERR! stack SyntaxError: invalid syntax [ERROR] gyp ERR! stack [ERROR] gyp ERR! stack at ChildProcess.exithandler (child_process.js:275:12) [ERROR] gyp ERR! stack at emitTwo (events.js:126:13) [ERROR] gyp ERR! stack at ChildProcess.emit (events.js:214:7) [ERROR] gyp ERR! stack at maybeClose (internal/child_process.js:925:16) [ERROR] gyp ERR! stack at Process.ChildProcess._handle.onexit (internal/child_process.js:209:5) [ERROR] gyp ERR! System Windows_NT 10.0.17134 ...... [INFO] server-starter-es7 ................................. SUCCESS [ 11.657 s] [INFO] apm-webapp ......................................... FAILURE [ 25.857 s] [INFO] apache-skywalking-apm .............................. SKIPPED [INFO] apache-skywalking-apm-es7 .......................... SKIPPED Reason The error has nothing to do with SkyWalking.\nAccording to the issue here (https://github.com/sass/node-sass/issues/1176), if you live in countries where requesting resources from GitHub and npmjs.org runs slow, some precompiled binaries for dependency node-sass would fail to be downloaded during npm install, and npm would try to compile them itself. That\u0026rsquo;s why python2 is needed.\nResolution 1. Use mirror. For instance, if you\u0026rsquo;re in China, please edit skywalking\\apm-webapp\\pom.xml as follows. Find\n\u0026lt;configuration\u0026gt; \u0026lt;arguments\u0026gt;install --registry=https://registry.npmjs.org/\u0026lt;/arguments\u0026gt; \u0026lt;/configuration\u0026gt; Replace it with\n\u0026lt;configuration\u0026gt; \u0026lt;arguments\u0026gt;install --registry=https://registry.npmmirror.com/ --sass_binary_site=https://npmmirror.com/mirrors/node-sass/\u0026lt;/arguments\u0026gt; \u0026lt;/configuration\u0026gt; 2. Get a sufficiently powerful VPN. ","excerpt":"Problem: Maven compilation failure with error such as Error: not found: python2 When you compile the …","ref":"/docs/main/latest/en/faq/maven-compile-npm-failure/","title":"Problem: Maven compilation failure with error such as `Error: not found: python2`"},{"body":"Problem: Maven compilation failure with error such as Error: not found: python2 When you compile the project via Maven, it fails at module apm-webapp and the following error occurs.\nPay attention to keywords such as node-sass and Error: not found: python2.\n[INFO] \u0026gt; node-sass@4.11.0 postinstall C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\node-sass [INFO] \u0026gt; node scripts/build.js [ERROR] gyp verb check python checking for Python executable \u0026quot;python2\u0026quot; in the PATH [ERROR] gyp verb `which` failed Error: not found: python2 [ERROR] gyp verb `which` failed at getNotFoundError (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:13:12) [ERROR] gyp verb `which` failed at F (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:68:19) [ERROR] gyp verb `which` failed at E (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:80:29) [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:89:16 [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\isexe\\index.js:42:5 [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\isexe\\windows.js:36:5 [ERROR] gyp verb `which` failed at FSReqWrap.oncomplete (fs.js:152:21) [ERROR] gyp verb `which` failed code: 'ENOENT' } [ERROR] gyp verb check python checking for Python executable \u0026quot;python\u0026quot; in the PATH [ERROR] gyp verb `which` succeeded python C:\\Users\\XXX\\AppData\\Local\\Programs\\Python\\Python37\\python.EXE [ERROR] gyp ERR! configure error [ERROR] gyp ERR! stack Error: Command failed: C:\\Users\\XXX\\AppData\\Local\\Programs\\Python\\Python37\\python.EXE -c import sys; print \u0026quot;%s.%s.%s\u0026quot; % sys.version_info[:3]; [ERROR] gyp ERR! stack File \u0026quot;\u0026lt;string\u0026gt;\u0026quot;, line 1 [ERROR] gyp ERR! stack import sys; print \u0026quot;%s.%s.%s\u0026quot; % sys.version_info[:3]; [ERROR] gyp ERR! stack ^ [ERROR] gyp ERR! stack SyntaxError: invalid syntax [ERROR] gyp ERR! stack [ERROR] gyp ERR! stack at ChildProcess.exithandler (child_process.js:275:12) [ERROR] gyp ERR! stack at emitTwo (events.js:126:13) [ERROR] gyp ERR! stack at ChildProcess.emit (events.js:214:7) [ERROR] gyp ERR! stack at maybeClose (internal/child_process.js:925:16) [ERROR] gyp ERR! stack at Process.ChildProcess._handle.onexit (internal/child_process.js:209:5) [ERROR] gyp ERR! System Windows_NT 10.0.17134 ...... [INFO] server-starter-es7 ................................. SUCCESS [ 11.657 s] [INFO] apm-webapp ......................................... FAILURE [ 25.857 s] [INFO] apache-skywalking-apm .............................. SKIPPED [INFO] apache-skywalking-apm-es7 .......................... SKIPPED Reason The error has nothing to do with SkyWalking.\nAccording to the issue here (https://github.com/sass/node-sass/issues/1176), if you live in countries where requesting resources from GitHub and npmjs.org runs slow, some precompiled binaries for dependency node-sass would fail to be downloaded during npm install, and npm would try to compile them itself. That\u0026rsquo;s why python2 is needed.\nResolution 1. Use mirror. For instance, if you\u0026rsquo;re in China, please edit skywalking\\apm-webapp\\pom.xml as follows. Find\n\u0026lt;configuration\u0026gt; \u0026lt;arguments\u0026gt;install --registry=https://registry.npmjs.org/\u0026lt;/arguments\u0026gt; \u0026lt;/configuration\u0026gt; Replace it with\n\u0026lt;configuration\u0026gt; \u0026lt;arguments\u0026gt;install --registry=https://registry.npmmirror.com/ --sass_binary_site=https://npmmirror.com/mirrors/node-sass/\u0026lt;/arguments\u0026gt; \u0026lt;/configuration\u0026gt; 2. Get a sufficiently powerful VPN. ","excerpt":"Problem: Maven compilation failure with error such as Error: not found: python2 When you compile the …","ref":"/docs/main/next/en/faq/maven-compile-npm-failure/","title":"Problem: Maven compilation failure with error such as `Error: not found: python2`"},{"body":"Problem: Maven compilation failure with error such as Error: not found: python2 When you compile the project via Maven, it fails at module apm-webapp and the following error occurs.\nPay attention to keywords such as node-sass and Error: not found: python2.\n[INFO] \u0026gt; node-sass@4.11.0 postinstall C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\node-sass [INFO] \u0026gt; node scripts/build.js [ERROR] gyp verb check python checking for Python executable \u0026quot;python2\u0026quot; in the PATH [ERROR] gyp verb `which` failed Error: not found: python2 [ERROR] gyp verb `which` failed at getNotFoundError (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:13:12) [ERROR] gyp verb `which` failed at F (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:68:19) [ERROR] gyp verb `which` failed at E (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:80:29) [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:89:16 [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\isexe\\index.js:42:5 [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\isexe\\windows.js:36:5 [ERROR] gyp verb `which` failed at FSReqWrap.oncomplete (fs.js:152:21) [ERROR] gyp verb `which` failed code: 'ENOENT' } [ERROR] gyp verb check python checking for Python executable \u0026quot;python\u0026quot; in the PATH [ERROR] gyp verb `which` succeeded python C:\\Users\\XXX\\AppData\\Local\\Programs\\Python\\Python37\\python.EXE [ERROR] gyp ERR! configure error [ERROR] gyp ERR! stack Error: Command failed: C:\\Users\\XXX\\AppData\\Local\\Programs\\Python\\Python37\\python.EXE -c import sys; print \u0026quot;%s.%s.%s\u0026quot; % sys.version_info[:3]; [ERROR] gyp ERR! stack File \u0026quot;\u0026lt;string\u0026gt;\u0026quot;, line 1 [ERROR] gyp ERR! stack import sys; print \u0026quot;%s.%s.%s\u0026quot; % sys.version_info[:3]; [ERROR] gyp ERR! stack ^ [ERROR] gyp ERR! stack SyntaxError: invalid syntax [ERROR] gyp ERR! stack [ERROR] gyp ERR! stack at ChildProcess.exithandler (child_process.js:275:12) [ERROR] gyp ERR! stack at emitTwo (events.js:126:13) [ERROR] gyp ERR! stack at ChildProcess.emit (events.js:214:7) [ERROR] gyp ERR! stack at maybeClose (internal/child_process.js:925:16) [ERROR] gyp ERR! stack at Process.ChildProcess._handle.onexit (internal/child_process.js:209:5) [ERROR] gyp ERR! System Windows_NT 10.0.17134 ...... [INFO] server-starter-es7 ................................. SUCCESS [ 11.657 s] [INFO] apm-webapp ......................................... FAILURE [ 25.857 s] [INFO] apache-skywalking-apm .............................. SKIPPED [INFO] apache-skywalking-apm-es7 .......................... SKIPPED Reason The error has nothing to do with SkyWalking.\nAccording to the issue here (https://github.com/sass/node-sass/issues/1176), if you live in countries where requesting resources from GitHub and npmjs.org runs slow, some precompiled binaries for dependency node-sass would fail to be downloaded during npm install, and npm would try to compile them itself. That\u0026rsquo;s why python2 is needed.\nResolution 1. Use mirror. For instance, if you\u0026rsquo;re in China, please edit skywalking\\apm-webapp\\pom.xml as follows. Find\n\u0026lt;configuration\u0026gt; \u0026lt;arguments\u0026gt;install --registry=https://registry.npmjs.org/\u0026lt;/arguments\u0026gt; \u0026lt;/configuration\u0026gt; Replace it with\n\u0026lt;configuration\u0026gt; \u0026lt;arguments\u0026gt;install --registry=https://registry.npmmirror.com/ --sass_binary_site=https://npmmirror.com/mirrors/node-sass/\u0026lt;/arguments\u0026gt; \u0026lt;/configuration\u0026gt; 2. Get a sufficiently powerful VPN. ","excerpt":"Problem: Maven compilation failure with error such as Error: not found: python2 When you compile the …","ref":"/docs/main/v9.0.0/en/faq/maven-compile-npm-failure/","title":"Problem: Maven compilation failure with error such as `Error: not found: python2`"},{"body":"Problem: Maven compilation failure with error such as Error: not found: python2 When you compile the project via Maven, it fails at module apm-webapp and the following error occurs.\nPay attention to keywords such as node-sass and Error: not found: python2.\n[INFO] \u0026gt; node-sass@4.11.0 postinstall C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\node-sass [INFO] \u0026gt; node scripts/build.js [ERROR] gyp verb check python checking for Python executable \u0026quot;python2\u0026quot; in the PATH [ERROR] gyp verb `which` failed Error: not found: python2 [ERROR] gyp verb `which` failed at getNotFoundError (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:13:12) [ERROR] gyp verb `which` failed at F (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:68:19) [ERROR] gyp verb `which` failed at E (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:80:29) [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:89:16 [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\isexe\\index.js:42:5 [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\isexe\\windows.js:36:5 [ERROR] gyp verb `which` failed at FSReqWrap.oncomplete (fs.js:152:21) [ERROR] gyp verb `which` failed code: 'ENOENT' } [ERROR] gyp verb check python checking for Python executable \u0026quot;python\u0026quot; in the PATH [ERROR] gyp verb `which` succeeded python C:\\Users\\XXX\\AppData\\Local\\Programs\\Python\\Python37\\python.EXE [ERROR] gyp ERR! configure error [ERROR] gyp ERR! stack Error: Command failed: C:\\Users\\XXX\\AppData\\Local\\Programs\\Python\\Python37\\python.EXE -c import sys; print \u0026quot;%s.%s.%s\u0026quot; % sys.version_info[:3]; [ERROR] gyp ERR! stack File \u0026quot;\u0026lt;string\u0026gt;\u0026quot;, line 1 [ERROR] gyp ERR! stack import sys; print \u0026quot;%s.%s.%s\u0026quot; % sys.version_info[:3]; [ERROR] gyp ERR! stack ^ [ERROR] gyp ERR! stack SyntaxError: invalid syntax [ERROR] gyp ERR! stack [ERROR] gyp ERR! stack at ChildProcess.exithandler (child_process.js:275:12) [ERROR] gyp ERR! stack at emitTwo (events.js:126:13) [ERROR] gyp ERR! stack at ChildProcess.emit (events.js:214:7) [ERROR] gyp ERR! stack at maybeClose (internal/child_process.js:925:16) [ERROR] gyp ERR! stack at Process.ChildProcess._handle.onexit (internal/child_process.js:209:5) [ERROR] gyp ERR! System Windows_NT 10.0.17134 ...... [INFO] server-starter-es7 ................................. SUCCESS [ 11.657 s] [INFO] apm-webapp ......................................... FAILURE [ 25.857 s] [INFO] apache-skywalking-apm .............................. SKIPPED [INFO] apache-skywalking-apm-es7 .......................... SKIPPED Reason The error has nothing to do with SkyWalking.\nAccording to the issue here (https://github.com/sass/node-sass/issues/1176), if you live in countries where requesting resources from GitHub and npmjs.org runs slow, some precompiled binaries for dependency node-sass would fail to be downloaded during npm install, and npm would try to compile them itself. That\u0026rsquo;s why python2 is needed.\nResolution 1. Use mirror. For instance, if you\u0026rsquo;re in China, please edit skywalking\\apm-webapp\\pom.xml as follows. Find\n\u0026lt;configuration\u0026gt; \u0026lt;arguments\u0026gt;install --registry=https://registry.npmjs.org/\u0026lt;/arguments\u0026gt; \u0026lt;/configuration\u0026gt; Replace it with\n\u0026lt;configuration\u0026gt; \u0026lt;arguments\u0026gt;install --registry=https://registry.npmmirror.com/ --sass_binary_site=https://npmmirror.com/mirrors/node-sass/\u0026lt;/arguments\u0026gt; \u0026lt;/configuration\u0026gt; 2. Get a sufficiently powerful VPN. ","excerpt":"Problem: Maven compilation failure with error such as Error: not found: python2 When you compile the …","ref":"/docs/main/v9.1.0/en/faq/maven-compile-npm-failure/","title":"Problem: Maven compilation failure with error such as `Error: not found: python2`"},{"body":"Problem: Maven compilation failure with error such as Error: not found: python2 When you compile the project via Maven, it fails at module apm-webapp and the following error occurs.\nPay attention to keywords such as node-sass and Error: not found: python2.\n[INFO] \u0026gt; node-sass@4.11.0 postinstall C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\node-sass [INFO] \u0026gt; node scripts/build.js [ERROR] gyp verb check python checking for Python executable \u0026quot;python2\u0026quot; in the PATH [ERROR] gyp verb `which` failed Error: not found: python2 [ERROR] gyp verb `which` failed at getNotFoundError (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:13:12) [ERROR] gyp verb `which` failed at F (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:68:19) [ERROR] gyp verb `which` failed at E (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:80:29) [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:89:16 [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\isexe\\index.js:42:5 [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\isexe\\windows.js:36:5 [ERROR] gyp verb `which` failed at FSReqWrap.oncomplete (fs.js:152:21) [ERROR] gyp verb `which` failed code: 'ENOENT' } [ERROR] gyp verb check python checking for Python executable \u0026quot;python\u0026quot; in the PATH [ERROR] gyp verb `which` succeeded python C:\\Users\\XXX\\AppData\\Local\\Programs\\Python\\Python37\\python.EXE [ERROR] gyp ERR! configure error [ERROR] gyp ERR! stack Error: Command failed: C:\\Users\\XXX\\AppData\\Local\\Programs\\Python\\Python37\\python.EXE -c import sys; print \u0026quot;%s.%s.%s\u0026quot; % sys.version_info[:3]; [ERROR] gyp ERR! stack File \u0026quot;\u0026lt;string\u0026gt;\u0026quot;, line 1 [ERROR] gyp ERR! stack import sys; print \u0026quot;%s.%s.%s\u0026quot; % sys.version_info[:3]; [ERROR] gyp ERR! stack ^ [ERROR] gyp ERR! stack SyntaxError: invalid syntax [ERROR] gyp ERR! stack [ERROR] gyp ERR! stack at ChildProcess.exithandler (child_process.js:275:12) [ERROR] gyp ERR! stack at emitTwo (events.js:126:13) [ERROR] gyp ERR! stack at ChildProcess.emit (events.js:214:7) [ERROR] gyp ERR! stack at maybeClose (internal/child_process.js:925:16) [ERROR] gyp ERR! stack at Process.ChildProcess._handle.onexit (internal/child_process.js:209:5) [ERROR] gyp ERR! System Windows_NT 10.0.17134 ...... [INFO] server-starter-es7 ................................. SUCCESS [ 11.657 s] [INFO] apm-webapp ......................................... FAILURE [ 25.857 s] [INFO] apache-skywalking-apm .............................. SKIPPED [INFO] apache-skywalking-apm-es7 .......................... SKIPPED Reason The error has nothing to do with SkyWalking.\nAccording to the issue here (https://github.com/sass/node-sass/issues/1176), if you live in countries where requesting resources from GitHub and npmjs.org runs slow, some precompiled binaries for dependency node-sass would fail to be downloaded during npm install, and npm would try to compile them itself. That\u0026rsquo;s why python2 is needed.\nResolution 1. Use mirror. For instance, if you\u0026rsquo;re in China, please edit skywalking\\apm-webapp\\pom.xml as follows. Find\n\u0026lt;configuration\u0026gt; \u0026lt;arguments\u0026gt;install --registry=https://registry.npmjs.org/\u0026lt;/arguments\u0026gt; \u0026lt;/configuration\u0026gt; Replace it with\n\u0026lt;configuration\u0026gt; \u0026lt;arguments\u0026gt;install --registry=https://registry.npmmirror.com/ --sass_binary_site=https://npmmirror.com/mirrors/node-sass/\u0026lt;/arguments\u0026gt; \u0026lt;/configuration\u0026gt; 2. Get a sufficiently powerful VPN. ","excerpt":"Problem: Maven compilation failure with error such as Error: not found: python2 When you compile the …","ref":"/docs/main/v9.2.0/en/faq/maven-compile-npm-failure/","title":"Problem: Maven compilation failure with error such as `Error: not found: python2`"},{"body":"Problem: Maven compilation failure with error such as Error: not found: python2 When you compile the project via Maven, it fails at module apm-webapp and the following error occurs.\nPay attention to keywords such as node-sass and Error: not found: python2.\n[INFO] \u0026gt; node-sass@4.11.0 postinstall C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\node-sass [INFO] \u0026gt; node scripts/build.js [ERROR] gyp verb check python checking for Python executable \u0026quot;python2\u0026quot; in the PATH [ERROR] gyp verb `which` failed Error: not found: python2 [ERROR] gyp verb `which` failed at getNotFoundError (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:13:12) [ERROR] gyp verb `which` failed at F (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:68:19) [ERROR] gyp verb `which` failed at E (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:80:29) [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:89:16 [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\isexe\\index.js:42:5 [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\isexe\\windows.js:36:5 [ERROR] gyp verb `which` failed at FSReqWrap.oncomplete (fs.js:152:21) [ERROR] gyp verb `which` failed code: 'ENOENT' } [ERROR] gyp verb check python checking for Python executable \u0026quot;python\u0026quot; in the PATH [ERROR] gyp verb `which` succeeded python C:\\Users\\XXX\\AppData\\Local\\Programs\\Python\\Python37\\python.EXE [ERROR] gyp ERR! configure error [ERROR] gyp ERR! stack Error: Command failed: C:\\Users\\XXX\\AppData\\Local\\Programs\\Python\\Python37\\python.EXE -c import sys; print \u0026quot;%s.%s.%s\u0026quot; % sys.version_info[:3]; [ERROR] gyp ERR! stack File \u0026quot;\u0026lt;string\u0026gt;\u0026quot;, line 1 [ERROR] gyp ERR! stack import sys; print \u0026quot;%s.%s.%s\u0026quot; % sys.version_info[:3]; [ERROR] gyp ERR! stack ^ [ERROR] gyp ERR! stack SyntaxError: invalid syntax [ERROR] gyp ERR! stack [ERROR] gyp ERR! stack at ChildProcess.exithandler (child_process.js:275:12) [ERROR] gyp ERR! stack at emitTwo (events.js:126:13) [ERROR] gyp ERR! stack at ChildProcess.emit (events.js:214:7) [ERROR] gyp ERR! stack at maybeClose (internal/child_process.js:925:16) [ERROR] gyp ERR! stack at Process.ChildProcess._handle.onexit (internal/child_process.js:209:5) [ERROR] gyp ERR! System Windows_NT 10.0.17134 ...... [INFO] server-starter-es7 ................................. SUCCESS [ 11.657 s] [INFO] apm-webapp ......................................... FAILURE [ 25.857 s] [INFO] apache-skywalking-apm .............................. SKIPPED [INFO] apache-skywalking-apm-es7 .......................... SKIPPED Reason The error has nothing to do with SkyWalking.\nAccording to the issue here (https://github.com/sass/node-sass/issues/1176), if you live in countries where requesting resources from GitHub and npmjs.org runs slow, some precompiled binaries for dependency node-sass would fail to be downloaded during npm install, and npm would try to compile them itself. That\u0026rsquo;s why python2 is needed.\nResolution 1. Use mirror. For instance, if you\u0026rsquo;re in China, please edit skywalking\\apm-webapp\\pom.xml as follows. Find\n\u0026lt;configuration\u0026gt; \u0026lt;arguments\u0026gt;install --registry=https://registry.npmjs.org/\u0026lt;/arguments\u0026gt; \u0026lt;/configuration\u0026gt; Replace it with\n\u0026lt;configuration\u0026gt; \u0026lt;arguments\u0026gt;install --registry=https://registry.npmmirror.com/ --sass_binary_site=https://npmmirror.com/mirrors/node-sass/\u0026lt;/arguments\u0026gt; \u0026lt;/configuration\u0026gt; 2. Get a sufficiently powerful VPN. ","excerpt":"Problem: Maven compilation failure with error such as Error: not found: python2 When you compile the …","ref":"/docs/main/v9.3.0/en/faq/maven-compile-npm-failure/","title":"Problem: Maven compilation failure with error such as `Error: not found: python2`"},{"body":"Problem: Maven compilation failure with error such as Error: not found: python2 When you compile the project via Maven, it fails at module apm-webapp and the following error occurs.\nPay attention to keywords such as node-sass and Error: not found: python2.\n[INFO] \u0026gt; node-sass@4.11.0 postinstall C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\node-sass [INFO] \u0026gt; node scripts/build.js [ERROR] gyp verb check python checking for Python executable \u0026quot;python2\u0026quot; in the PATH [ERROR] gyp verb `which` failed Error: not found: python2 [ERROR] gyp verb `which` failed at getNotFoundError (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:13:12) [ERROR] gyp verb `which` failed at F (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:68:19) [ERROR] gyp verb `which` failed at E (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:80:29) [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:89:16 [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\isexe\\index.js:42:5 [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\isexe\\windows.js:36:5 [ERROR] gyp verb `which` failed at FSReqWrap.oncomplete (fs.js:152:21) [ERROR] gyp verb `which` failed code: 'ENOENT' } [ERROR] gyp verb check python checking for Python executable \u0026quot;python\u0026quot; in the PATH [ERROR] gyp verb `which` succeeded python C:\\Users\\XXX\\AppData\\Local\\Programs\\Python\\Python37\\python.EXE [ERROR] gyp ERR! configure error [ERROR] gyp ERR! stack Error: Command failed: C:\\Users\\XXX\\AppData\\Local\\Programs\\Python\\Python37\\python.EXE -c import sys; print \u0026quot;%s.%s.%s\u0026quot; % sys.version_info[:3]; [ERROR] gyp ERR! stack File \u0026quot;\u0026lt;string\u0026gt;\u0026quot;, line 1 [ERROR] gyp ERR! stack import sys; print \u0026quot;%s.%s.%s\u0026quot; % sys.version_info[:3]; [ERROR] gyp ERR! stack ^ [ERROR] gyp ERR! stack SyntaxError: invalid syntax [ERROR] gyp ERR! stack [ERROR] gyp ERR! stack at ChildProcess.exithandler (child_process.js:275:12) [ERROR] gyp ERR! stack at emitTwo (events.js:126:13) [ERROR] gyp ERR! stack at ChildProcess.emit (events.js:214:7) [ERROR] gyp ERR! stack at maybeClose (internal/child_process.js:925:16) [ERROR] gyp ERR! stack at Process.ChildProcess._handle.onexit (internal/child_process.js:209:5) [ERROR] gyp ERR! System Windows_NT 10.0.17134 ...... [INFO] server-starter-es7 ................................. SUCCESS [ 11.657 s] [INFO] apm-webapp ......................................... FAILURE [ 25.857 s] [INFO] apache-skywalking-apm .............................. SKIPPED [INFO] apache-skywalking-apm-es7 .......................... SKIPPED Reason The error has nothing to do with SkyWalking.\nAccording to the issue here (https://github.com/sass/node-sass/issues/1176), if you live in countries where requesting resources from GitHub and npmjs.org runs slow, some precompiled binaries for dependency node-sass would fail to be downloaded during npm install, and npm would try to compile them itself. That\u0026rsquo;s why python2 is needed.\nResolution 1. Use mirror. For instance, if you\u0026rsquo;re in China, please edit skywalking\\apm-webapp\\pom.xml as follows. Find\n\u0026lt;configuration\u0026gt; \u0026lt;arguments\u0026gt;install --registry=https://registry.npmjs.org/\u0026lt;/arguments\u0026gt; \u0026lt;/configuration\u0026gt; Replace it with\n\u0026lt;configuration\u0026gt; \u0026lt;arguments\u0026gt;install --registry=https://registry.npmmirror.com/ --sass_binary_site=https://npmmirror.com/mirrors/node-sass/\u0026lt;/arguments\u0026gt; \u0026lt;/configuration\u0026gt; 2. Get a sufficiently powerful VPN. ","excerpt":"Problem: Maven compilation failure with error such as Error: not found: python2 When you compile the …","ref":"/docs/main/v9.4.0/en/faq/maven-compile-npm-failure/","title":"Problem: Maven compilation failure with error such as `Error: not found: python2`"},{"body":"Problem: Maven compilation failure with error such as Error: not found: python2 When you compile the project via Maven, it fails at module apm-webapp and the following error occurs.\nPay attention to keywords such as node-sass and Error: not found: python2.\n[INFO] \u0026gt; node-sass@4.11.0 postinstall C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\node-sass [INFO] \u0026gt; node scripts/build.js [ERROR] gyp verb check python checking for Python executable \u0026quot;python2\u0026quot; in the PATH [ERROR] gyp verb `which` failed Error: not found: python2 [ERROR] gyp verb `which` failed at getNotFoundError (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:13:12) [ERROR] gyp verb `which` failed at F (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:68:19) [ERROR] gyp verb `which` failed at E (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:80:29) [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:89:16 [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\isexe\\index.js:42:5 [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\isexe\\windows.js:36:5 [ERROR] gyp verb `which` failed at FSReqWrap.oncomplete (fs.js:152:21) [ERROR] gyp verb `which` failed code: 'ENOENT' } [ERROR] gyp verb check python checking for Python executable \u0026quot;python\u0026quot; in the PATH [ERROR] gyp verb `which` succeeded python C:\\Users\\XXX\\AppData\\Local\\Programs\\Python\\Python37\\python.EXE [ERROR] gyp ERR! configure error [ERROR] gyp ERR! stack Error: Command failed: C:\\Users\\XXX\\AppData\\Local\\Programs\\Python\\Python37\\python.EXE -c import sys; print \u0026quot;%s.%s.%s\u0026quot; % sys.version_info[:3]; [ERROR] gyp ERR! stack File \u0026quot;\u0026lt;string\u0026gt;\u0026quot;, line 1 [ERROR] gyp ERR! stack import sys; print \u0026quot;%s.%s.%s\u0026quot; % sys.version_info[:3]; [ERROR] gyp ERR! stack ^ [ERROR] gyp ERR! stack SyntaxError: invalid syntax [ERROR] gyp ERR! stack [ERROR] gyp ERR! stack at ChildProcess.exithandler (child_process.js:275:12) [ERROR] gyp ERR! stack at emitTwo (events.js:126:13) [ERROR] gyp ERR! stack at ChildProcess.emit (events.js:214:7) [ERROR] gyp ERR! stack at maybeClose (internal/child_process.js:925:16) [ERROR] gyp ERR! stack at Process.ChildProcess._handle.onexit (internal/child_process.js:209:5) [ERROR] gyp ERR! System Windows_NT 10.0.17134 ...... [INFO] server-starter-es7 ................................. SUCCESS [ 11.657 s] [INFO] apm-webapp ......................................... FAILURE [ 25.857 s] [INFO] apache-skywalking-apm .............................. SKIPPED [INFO] apache-skywalking-apm-es7 .......................... SKIPPED Reason The error has nothing to do with SkyWalking.\nAccording to the issue here (https://github.com/sass/node-sass/issues/1176), if you live in countries where requesting resources from GitHub and npmjs.org runs slow, some precompiled binaries for dependency node-sass would fail to be downloaded during npm install, and npm would try to compile them itself. That\u0026rsquo;s why python2 is needed.\nResolution 1. Use mirror. For instance, if you\u0026rsquo;re in China, please edit skywalking\\apm-webapp\\pom.xml as follows. Find\n\u0026lt;configuration\u0026gt; \u0026lt;arguments\u0026gt;install --registry=https://registry.npmjs.org/\u0026lt;/arguments\u0026gt; \u0026lt;/configuration\u0026gt; Replace it with\n\u0026lt;configuration\u0026gt; \u0026lt;arguments\u0026gt;install --registry=https://registry.npmmirror.com/ --sass_binary_site=https://npmmirror.com/mirrors/node-sass/\u0026lt;/arguments\u0026gt; \u0026lt;/configuration\u0026gt; 2. Get a sufficiently powerful VPN. ","excerpt":"Problem: Maven compilation failure with error such as Error: not found: python2 When you compile the …","ref":"/docs/main/v9.5.0/en/faq/maven-compile-npm-failure/","title":"Problem: Maven compilation failure with error such as `Error: not found: python2`"},{"body":"Problem: Maven compilation failure with error such as Error: not found: python2 When you compile the project via Maven, it fails at module apm-webapp and the following error occurs.\nPay attention to keywords such as node-sass and Error: not found: python2.\n[INFO] \u0026gt; node-sass@4.11.0 postinstall C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\node-sass [INFO] \u0026gt; node scripts/build.js [ERROR] gyp verb check python checking for Python executable \u0026quot;python2\u0026quot; in the PATH [ERROR] gyp verb `which` failed Error: not found: python2 [ERROR] gyp verb `which` failed at getNotFoundError (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:13:12) [ERROR] gyp verb `which` failed at F (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:68:19) [ERROR] gyp verb `which` failed at E (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:80:29) [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:89:16 [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\isexe\\index.js:42:5 [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\isexe\\windows.js:36:5 [ERROR] gyp verb `which` failed at FSReqWrap.oncomplete (fs.js:152:21) [ERROR] gyp verb `which` failed code: 'ENOENT' } [ERROR] gyp verb check python checking for Python executable \u0026quot;python\u0026quot; in the PATH [ERROR] gyp verb `which` succeeded python C:\\Users\\XXX\\AppData\\Local\\Programs\\Python\\Python37\\python.EXE [ERROR] gyp ERR! configure error [ERROR] gyp ERR! stack Error: Command failed: C:\\Users\\XXX\\AppData\\Local\\Programs\\Python\\Python37\\python.EXE -c import sys; print \u0026quot;%s.%s.%s\u0026quot; % sys.version_info[:3]; [ERROR] gyp ERR! stack File \u0026quot;\u0026lt;string\u0026gt;\u0026quot;, line 1 [ERROR] gyp ERR! stack import sys; print \u0026quot;%s.%s.%s\u0026quot; % sys.version_info[:3]; [ERROR] gyp ERR! stack ^ [ERROR] gyp ERR! stack SyntaxError: invalid syntax [ERROR] gyp ERR! stack [ERROR] gyp ERR! stack at ChildProcess.exithandler (child_process.js:275:12) [ERROR] gyp ERR! stack at emitTwo (events.js:126:13) [ERROR] gyp ERR! stack at ChildProcess.emit (events.js:214:7) [ERROR] gyp ERR! stack at maybeClose (internal/child_process.js:925:16) [ERROR] gyp ERR! stack at Process.ChildProcess._handle.onexit (internal/child_process.js:209:5) [ERROR] gyp ERR! System Windows_NT 10.0.17134 ...... [INFO] server-starter-es7 ................................. SUCCESS [ 11.657 s] [INFO] apm-webapp ......................................... FAILURE [ 25.857 s] [INFO] apache-skywalking-apm .............................. SKIPPED [INFO] apache-skywalking-apm-es7 .......................... SKIPPED Reason The error has nothing to do with SkyWalking.\nAccording to the issue here (https://github.com/sass/node-sass/issues/1176), if you live in countries where requesting resources from GitHub and npmjs.org runs slow, some precompiled binaries for dependency node-sass would fail to be downloaded during npm install, and npm would try to compile them itself. That\u0026rsquo;s why python2 is needed.\nResolution 1. Use mirror. For instance, if you\u0026rsquo;re in China, please edit skywalking\\apm-webapp\\pom.xml as follows. Find\n\u0026lt;configuration\u0026gt; \u0026lt;arguments\u0026gt;install --registry=https://registry.npmjs.org/\u0026lt;/arguments\u0026gt; \u0026lt;/configuration\u0026gt; Replace it with\n\u0026lt;configuration\u0026gt; \u0026lt;arguments\u0026gt;install --registry=https://registry.npmmirror.com/ --sass_binary_site=https://npmmirror.com/mirrors/node-sass/\u0026lt;/arguments\u0026gt; \u0026lt;/configuration\u0026gt; 2. Get a sufficiently powerful VPN. ","excerpt":"Problem: Maven compilation failure with error such as Error: not found: python2 When you compile the …","ref":"/docs/main/v9.6.0/en/faq/maven-compile-npm-failure/","title":"Problem: Maven compilation failure with error such as `Error: not found: python2`"},{"body":"Problem: Maven compilation failure with error such as Error: not found: python2 When you compile the project via Maven, it fails at module apm-webapp and the following error occurs.\nPay attention to keywords such as node-sass and Error: not found: python2.\n[INFO] \u0026gt; node-sass@4.11.0 postinstall C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\node-sass [INFO] \u0026gt; node scripts/build.js [ERROR] gyp verb check python checking for Python executable \u0026quot;python2\u0026quot; in the PATH [ERROR] gyp verb `which` failed Error: not found: python2 [ERROR] gyp verb `which` failed at getNotFoundError (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:13:12) [ERROR] gyp verb `which` failed at F (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:68:19) [ERROR] gyp verb `which` failed at E (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:80:29) [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:89:16 [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\isexe\\index.js:42:5 [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\isexe\\windows.js:36:5 [ERROR] gyp verb `which` failed at FSReqWrap.oncomplete (fs.js:152:21) [ERROR] gyp verb `which` failed code: 'ENOENT' } [ERROR] gyp verb check python checking for Python executable \u0026quot;python\u0026quot; in the PATH [ERROR] gyp verb `which` succeeded python C:\\Users\\XXX\\AppData\\Local\\Programs\\Python\\Python37\\python.EXE [ERROR] gyp ERR! configure error [ERROR] gyp ERR! stack Error: Command failed: C:\\Users\\XXX\\AppData\\Local\\Programs\\Python\\Python37\\python.EXE -c import sys; print \u0026quot;%s.%s.%s\u0026quot; % sys.version_info[:3]; [ERROR] gyp ERR! stack File \u0026quot;\u0026lt;string\u0026gt;\u0026quot;, line 1 [ERROR] gyp ERR! stack import sys; print \u0026quot;%s.%s.%s\u0026quot; % sys.version_info[:3]; [ERROR] gyp ERR! stack ^ [ERROR] gyp ERR! stack SyntaxError: invalid syntax [ERROR] gyp ERR! stack [ERROR] gyp ERR! stack at ChildProcess.exithandler (child_process.js:275:12) [ERROR] gyp ERR! stack at emitTwo (events.js:126:13) [ERROR] gyp ERR! stack at ChildProcess.emit (events.js:214:7) [ERROR] gyp ERR! stack at maybeClose (internal/child_process.js:925:16) [ERROR] gyp ERR! stack at Process.ChildProcess._handle.onexit (internal/child_process.js:209:5) [ERROR] gyp ERR! System Windows_NT 10.0.17134 ...... [INFO] server-starter-es7 ................................. SUCCESS [ 11.657 s] [INFO] apm-webapp ......................................... FAILURE [ 25.857 s] [INFO] apache-skywalking-apm .............................. SKIPPED [INFO] apache-skywalking-apm-es7 .......................... SKIPPED Reason The error has nothing to do with SkyWalking.\nAccording to the issue here (https://github.com/sass/node-sass/issues/1176), if you live in countries where requesting resources from GitHub and npmjs.org runs slow, some precompiled binaries for dependency node-sass would fail to be downloaded during npm install, and npm would try to compile them itself. That\u0026rsquo;s why python2 is needed.\nResolution 1. Use mirror. For instance, if you\u0026rsquo;re in China, please edit skywalking\\apm-webapp\\pom.xml as follows. Find\n\u0026lt;configuration\u0026gt; \u0026lt;arguments\u0026gt;install --registry=https://registry.npmjs.org/\u0026lt;/arguments\u0026gt; \u0026lt;/configuration\u0026gt; Replace it with\n\u0026lt;configuration\u0026gt; \u0026lt;arguments\u0026gt;install --registry=https://registry.npmmirror.com/ --sass_binary_site=https://npmmirror.com/mirrors/node-sass/\u0026lt;/arguments\u0026gt; \u0026lt;/configuration\u0026gt; 2. Get a sufficiently powerful VPN. ","excerpt":"Problem: Maven compilation failure with error such as Error: not found: python2 When you compile the …","ref":"/docs/main/v9.7.0/en/faq/maven-compile-npm-failure/","title":"Problem: Maven compilation failure with error such as `Error: not found: python2`"},{"body":"Profiling The profiling is an on-demand diagnosing method to locate bottleneck of the services. These typical scenarios usually are suitable for profiling through various profiling tools\n Some methods slow down the API performance. Too many threads and/or high-frequency I/O per OS process reduce the CPU efficiency. Massive RPC requests block the network to cause responding slowly. Unexpected network requests caused by security issues or codes' bug.  In the SkyWalking landscape, we provided three ways to support profiling within reasonable resource cost.\n In-process profiling is bundled with auto-instrument agents. Out-of-process profiling is powered by eBPF agent. Continuous profiling is powered by eBPF agent.  In-process profiling In-process profiling is primarily provided by auto-instrument agents in the VM-based runtime. This feature resolves the issue \u0026lt;1\u0026gt; through capture the snapshot of the thread stacks periodically. The OAP would aggregate the thread stack per RPC request, and provide a hierarchy graph to indicate the slow methods based on continuous snapshot.\nThe period is usually every 10-100 milliseconds, which is not recommended to be less, due to this capture would usually cause classical stop-the-world for the VM, which would impact the whole process performance.\nLearn more tech details from the post, Use Profiling to Fix the Blind Spot of Distributed Tracing.\nFor now, Java and Python agents support this.\nOut-of-process profiling Out-of-process profiling leverage eBPF technology with origins in the Linux kernel. It provides a way to extend the capabilities of the kernel safely and efficiently.\nOn-CPU Profiling On-CPU profiling is suitable for analyzing thread stacks when service CPU usage is high.\nIf the stack is dumped more times, it means that the thread stack occupies more CPU resources.\nThis is pretty similar with in-process profiling to resolve the issue \u0026lt;1\u0026gt;, but it is made out-of-process and based on Linux eBPF. Meanwhile, this is made for languages without VM mechanism, which caused not supported by in-process agents, such as, C/C++, Rust. Golang is a special case, it exposed the metadata of the VM for eBPF, so, it could be profiled.\nOff-CPU Profiling Off-CPU profiling is suitable for performance issues that are not caused by high CPU usage, but may be on high CPU load. This profiling aims to resolve the issue \u0026lt;2\u0026gt;.\nFor example,\n When there are too many threads in one service, using off-CPU profiling could reveal which threads spend more time context switching. Codes heavily rely on disk I/O or remote service performance would slow down the whole process.  Off-CPU profiling provides two perspectives\n Thread switch count: The number of times a thread switches context. When the thread returns to the CPU, it completes one context switch. A thread stack with a higher switch count spends more time context switching. Thread switch duration: The time it takes a thread to switch the context. A thread stack with a higher switch duration spends more time off-CPU.  Learn more tech details about ON/OFF CPU profiling from the post, Pinpoint Service Mesh Critical Performance Impact by using eBPF\nNetwork Profiling Network profiling captures the network packages to analysis traffic at L4(TCP) and L7(HTTP) to recognize network traffic from a specific process or a k8s pod. Through this traffic analysis, locate the root causes of the issues \u0026lt;3\u0026gt; and \u0026lt;4\u0026gt;.\nNetwork profiling provides\n Network topology and identify processes. Observe TCP traffic metrics with TLS status. Observe HTTP traffic metrics. Sample HTTP request/response raw data within tracing context. Observe time costs for local I/O costing on the OS. Such as the time of Linux process HTTP request/response.  Learn more tech details from the post, Diagnose Service Mesh Network Performance with eBPF\nContinuous Profiling Continuous Profiling utilizes monitoring of system, processes, and network, and automatically initiates profiling tasks when conditions meet the configured thresholds and time windows.\nMonitor type Continuous profiling periodically collects the following types of performance metrics for processes and systems:\n System Load: Monitor current system load value. Process CPU: Monitor process CPU usage percent, value in [0-100]. Process Thread Count: Monitor process thread count. HTTP Error Rate: Monitor the process HTTP(/1.x) response error(response status \u0026gt;= 500) percent, value in [0-100]. HTTP Avg Response Time: Monitor the process HTTP(/1.x) response duration(ms).  Trigger Target When the collected metric data matches the configured threshold, the following types of profiling tasks could be triggered:\n On CPU Profiling: Perform eBPF On CPU Profiling on processes that meet the threshold. Off CPU Profiling: Perform eBPF Off CPU Profiling on processes that meet the threshold. Network Profiling: Perform eBPF Network Profiling on all processes within the same instance as the processes that meet the threshold.  ","excerpt":"Profiling The profiling is an on-demand diagnosing method to locate bottleneck of the services. …","ref":"/docs/main/latest/en/concepts-and-designs/profiling/","title":"Profiling"},{"body":"Profiling The profiling is an on-demand diagnosing method to locate bottleneck of the services. These typical scenarios usually are suitable for profiling through various profiling tools\n Some methods slow down the API performance. Too many threads and/or high-frequency I/O per OS process reduce the CPU efficiency. Massive RPC requests block the network to cause responding slowly. Unexpected network requests caused by security issues or codes' bug.  In the SkyWalking landscape, we provided three ways to support profiling within reasonable resource cost.\n In-process profiling is bundled with auto-instrument agents. Out-of-process profiling is powered by eBPF agent. Continuous profiling is powered by eBPF agent.  In-process profiling In-process profiling is primarily provided by auto-instrument agents in the VM-based runtime. This feature resolves the issue \u0026lt;1\u0026gt; through capture the snapshot of the thread stacks periodically. The OAP would aggregate the thread stack per RPC request, and provide a hierarchy graph to indicate the slow methods based on continuous snapshot.\nThe period is usually every 10-100 milliseconds, which is not recommended to be less, due to this capture would usually cause classical stop-the-world for the VM, which would impact the whole process performance.\nLearn more tech details from the post, Use Profiling to Fix the Blind Spot of Distributed Tracing.\nFor now, Java and Python agents support this.\nOut-of-process profiling Out-of-process profiling leverage eBPF technology with origins in the Linux kernel. It provides a way to extend the capabilities of the kernel safely and efficiently.\nOn-CPU Profiling On-CPU profiling is suitable for analyzing thread stacks when service CPU usage is high.\nIf the stack is dumped more times, it means that the thread stack occupies more CPU resources.\nThis is pretty similar with in-process profiling to resolve the issue \u0026lt;1\u0026gt;, but it is made out-of-process and based on Linux eBPF. Meanwhile, this is made for languages without VM mechanism, which caused not supported by in-process agents, such as, C/C++, Rust. Golang is a special case, it exposed the metadata of the VM for eBPF, so, it could be profiled.\nOff-CPU Profiling Off-CPU profiling is suitable for performance issues that are not caused by high CPU usage, but may be on high CPU load. This profiling aims to resolve the issue \u0026lt;2\u0026gt;.\nFor example,\n When there are too many threads in one service, using off-CPU profiling could reveal which threads spend more time context switching. Codes heavily rely on disk I/O or remote service performance would slow down the whole process.  Off-CPU profiling provides two perspectives\n Thread switch count: The number of times a thread switches context. When the thread returns to the CPU, it completes one context switch. A thread stack with a higher switch count spends more time context switching. Thread switch duration: The time it takes a thread to switch the context. A thread stack with a higher switch duration spends more time off-CPU.  Learn more tech details about ON/OFF CPU profiling from the post, Pinpoint Service Mesh Critical Performance Impact by using eBPF\nNetwork Profiling Network profiling captures the network packages to analysis traffic at L4(TCP) and L7(HTTP) to recognize network traffic from a specific process or a k8s pod. Through this traffic analysis, locate the root causes of the issues \u0026lt;3\u0026gt; and \u0026lt;4\u0026gt;.\nNetwork profiling provides\n Network topology and identify processes. Observe TCP traffic metrics with TLS status. Observe HTTP traffic metrics. Sample HTTP request/response raw data within tracing context. Observe time costs for local I/O costing on the OS. Such as the time of Linux process HTTP request/response.  Learn more tech details from the post, Diagnose Service Mesh Network Performance with eBPF\nContinuous Profiling Continuous Profiling utilizes monitoring of system, processes, and network, and automatically initiates profiling tasks when conditions meet the configured thresholds and time windows.\nMonitor type Continuous profiling periodically collects the following types of performance metrics for processes and systems:\n System Load: Monitor current system load value. Process CPU: Monitor process CPU usage percent, value in [0-100]. Process Thread Count: Monitor process thread count. HTTP Error Rate: Monitor the process HTTP(/1.x) response error(response status \u0026gt;= 500) percent, value in [0-100]. HTTP Avg Response Time: Monitor the process HTTP(/1.x) response duration(ms).  Trigger Target When the collected metric data matches the configured threshold, the following types of profiling tasks could be triggered:\n On CPU Profiling: Perform eBPF On CPU Profiling on processes that meet the threshold. Off CPU Profiling: Perform eBPF Off CPU Profiling on processes that meet the threshold. Network Profiling: Perform eBPF Network Profiling on all processes within the same instance as the processes that meet the threshold.  ","excerpt":"Profiling The profiling is an on-demand diagnosing method to locate bottleneck of the services. …","ref":"/docs/main/next/en/concepts-and-designs/profiling/","title":"Profiling"},{"body":"Profiling The profiling is an on-demand diagnosing method to locate bottleneck of the services. These typical scenarios usually are suitable for profiling through various profiling tools\n Some methods slow down the API performance. Too many threads and/or high-frequency I/O per OS process reduce the CPU efficiency. Massive RPC requests block the network to cause responding slowly. Unexpected network requests caused by security issues or codes' bug.  In the SkyWalking landscape, we provided two ways to support profiling within reasonable resource cost.\n In-process profiling is bundled with auto-instrument agents. Out-of-process profiling is powered by eBPF agent.  In-process profiling In-process profiling is primarily provided by auto-instrument agents in the VM-based runtime. This feature resolves the issue \u0026lt;1\u0026gt; through capture the snapshot of the thread stacks periodically. The OAP would aggregate the thread stack per RPC request, and provide a hierarchy graph to indicate the slow methods based on continuous snapshot.\nThe period is usually every 10-100 milliseconds, which is not recommended to be less, due to this capture would usually cause classical stop-the-world for the VM, which would impact the whole process performance.\nLearn more tech details from the post, Use Profiling to Fix the Blind Spot of Distributed Tracing.\nFor now, Java and Python agents support this.\nOut-of-process profiling Out-of-process profiling leverage eBPF technology with origins in the Linux kernel. It provides a way to extend the capabilities of the kernel safely and efficiently.\nOn-CPU Profiling On-CPU profiling is suitable for analyzing thread stacks when service CPU usage is high.\nIf the stack is dumped more times, it means that the thread stack occupies more CPU resources.\nThis is pretty similar with in-process profiling to resolve the issue \u0026lt;1\u0026gt;, but it is made out-of-process and based on Linux eBPF. Meanwhile, this is made for languages without VM mechanism, which caused not supported by in-process agents, such as, C/C++, Rust. Golang is a special case, it exposed the metadata of the VM for eBPF, so, it could be profiled.\nOff-CPU Profiling Off-CPU profiling is suitable for performance issues that are not caused by high CPU usage, but may be on high CPU load. This profiling aims to resolve the issue \u0026lt;2\u0026gt;.\nFor example,\n When there are too many threads in one service, using off-CPU profiling could reveal which threads spend more time context switching. Codes heavily rely on disk I/O or remote service performance would slow down the whole process.  Off-CPU profiling provides two perspectives\n Thread switch count: The number of times a thread switches context. When the thread returns to the CPU, it completes one context switch. A thread stack with a higher switch count spends more time context switching. Thread switch duration: The time it takes a thread to switch the context. A thread stack with a higher switch duration spends more time off-CPU.  Learn more tech details about ON/OFF CPU profiling from the post, Pinpoint Service Mesh Critical Performance Impact by using eBPF\nNetwork Profiling Network profiling captures the network packages to analysis traffic at L4(TCP) and L7(HTTP) to recognize network traffic from a specific process or a k8s pod. Through this traffic analysis, locate the root causes of the issues \u0026lt;3\u0026gt; and \u0026lt;4\u0026gt;.\nNetwork profiling provides\n Network topology and identify processes. Observe TCP traffic metrics with TLS status. Observe HTTP traffic metrics. Sample HTTP request/response raw data within tracing context. Observe time costs for local I/O costing on the OS. Such as the time of Linux process HTTP request/response.  Learn more tech details from the post, Diagnose Service Mesh Network Performance with eBPF\n","excerpt":"Profiling The profiling is an on-demand diagnosing method to locate bottleneck of the services. …","ref":"/docs/main/v9.3.0/en/concepts-and-designs/profiling/","title":"Profiling"},{"body":"Profiling The profiling is an on-demand diagnosing method to locate bottleneck of the services. These typical scenarios usually are suitable for profiling through various profiling tools\n Some methods slow down the API performance. Too many threads and/or high-frequency I/O per OS process reduce the CPU efficiency. Massive RPC requests block the network to cause responding slowly. Unexpected network requests caused by security issues or codes' bug.  In the SkyWalking landscape, we provided two ways to support profiling within reasonable resource cost.\n In-process profiling is bundled with auto-instrument agents. Out-of-process profiling is powered by eBPF agent.  In-process profiling In-process profiling is primarily provided by auto-instrument agents in the VM-based runtime. This feature resolves the issue \u0026lt;1\u0026gt; through capture the snapshot of the thread stacks periodically. The OAP would aggregate the thread stack per RPC request, and provide a hierarchy graph to indicate the slow methods based on continuous snapshot.\nThe period is usually every 10-100 milliseconds, which is not recommended to be less, due to this capture would usually cause classical stop-the-world for the VM, which would impact the whole process performance.\nLearn more tech details from the post, Use Profiling to Fix the Blind Spot of Distributed Tracing.\nFor now, Java and Python agents support this.\nOut-of-process profiling Out-of-process profiling leverage eBPF technology with origins in the Linux kernel. It provides a way to extend the capabilities of the kernel safely and efficiently.\nOn-CPU Profiling On-CPU profiling is suitable for analyzing thread stacks when service CPU usage is high.\nIf the stack is dumped more times, it means that the thread stack occupies more CPU resources.\nThis is pretty similar with in-process profiling to resolve the issue \u0026lt;1\u0026gt;, but it is made out-of-process and based on Linux eBPF. Meanwhile, this is made for languages without VM mechanism, which caused not supported by in-process agents, such as, C/C++, Rust. Golang is a special case, it exposed the metadata of the VM for eBPF, so, it could be profiled.\nOff-CPU Profiling Off-CPU profiling is suitable for performance issues that are not caused by high CPU usage, but may be on high CPU load. This profiling aims to resolve the issue \u0026lt;2\u0026gt;.\nFor example,\n When there are too many threads in one service, using off-CPU profiling could reveal which threads spend more time context switching. Codes heavily rely on disk I/O or remote service performance would slow down the whole process.  Off-CPU profiling provides two perspectives\n Thread switch count: The number of times a thread switches context. When the thread returns to the CPU, it completes one context switch. A thread stack with a higher switch count spends more time context switching. Thread switch duration: The time it takes a thread to switch the context. A thread stack with a higher switch duration spends more time off-CPU.  Learn more tech details about ON/OFF CPU profiling from the post, Pinpoint Service Mesh Critical Performance Impact by using eBPF\nNetwork Profiling Network profiling captures the network packages to analysis traffic at L4(TCP) and L7(HTTP) to recognize network traffic from a specific process or a k8s pod. Through this traffic analysis, locate the root causes of the issues \u0026lt;3\u0026gt; and \u0026lt;4\u0026gt;.\nNetwork profiling provides\n Network topology and identify processes. Observe TCP traffic metrics with TLS status. Observe HTTP traffic metrics. Sample HTTP request/response raw data within tracing context. Observe time costs for local I/O costing on the OS. Such as the time of Linux process HTTP request/response.  Learn more tech details from the post, Diagnose Service Mesh Network Performance with eBPF\n","excerpt":"Profiling The profiling is an on-demand diagnosing method to locate bottleneck of the services. …","ref":"/docs/main/v9.4.0/en/concepts-and-designs/profiling/","title":"Profiling"},{"body":"Profiling The profiling is an on-demand diagnosing method to locate bottleneck of the services. These typical scenarios usually are suitable for profiling through various profiling tools\n Some methods slow down the API performance. Too many threads and/or high-frequency I/O per OS process reduce the CPU efficiency. Massive RPC requests block the network to cause responding slowly. Unexpected network requests caused by security issues or codes' bug.  In the SkyWalking landscape, we provided three ways to support profiling within reasonable resource cost.\n In-process profiling is bundled with auto-instrument agents. Out-of-process profiling is powered by eBPF agent. Continuous profiling is powered by eBPF agent.  In-process profiling In-process profiling is primarily provided by auto-instrument agents in the VM-based runtime. This feature resolves the issue \u0026lt;1\u0026gt; through capture the snapshot of the thread stacks periodically. The OAP would aggregate the thread stack per RPC request, and provide a hierarchy graph to indicate the slow methods based on continuous snapshot.\nThe period is usually every 10-100 milliseconds, which is not recommended to be less, due to this capture would usually cause classical stop-the-world for the VM, which would impact the whole process performance.\nLearn more tech details from the post, Use Profiling to Fix the Blind Spot of Distributed Tracing.\nFor now, Java and Python agents support this.\nOut-of-process profiling Out-of-process profiling leverage eBPF technology with origins in the Linux kernel. It provides a way to extend the capabilities of the kernel safely and efficiently.\nOn-CPU Profiling On-CPU profiling is suitable for analyzing thread stacks when service CPU usage is high.\nIf the stack is dumped more times, it means that the thread stack occupies more CPU resources.\nThis is pretty similar with in-process profiling to resolve the issue \u0026lt;1\u0026gt;, but it is made out-of-process and based on Linux eBPF. Meanwhile, this is made for languages without VM mechanism, which caused not supported by in-process agents, such as, C/C++, Rust. Golang is a special case, it exposed the metadata of the VM for eBPF, so, it could be profiled.\nOff-CPU Profiling Off-CPU profiling is suitable for performance issues that are not caused by high CPU usage, but may be on high CPU load. This profiling aims to resolve the issue \u0026lt;2\u0026gt;.\nFor example,\n When there are too many threads in one service, using off-CPU profiling could reveal which threads spend more time context switching. Codes heavily rely on disk I/O or remote service performance would slow down the whole process.  Off-CPU profiling provides two perspectives\n Thread switch count: The number of times a thread switches context. When the thread returns to the CPU, it completes one context switch. A thread stack with a higher switch count spends more time context switching. Thread switch duration: The time it takes a thread to switch the context. A thread stack with a higher switch duration spends more time off-CPU.  Learn more tech details about ON/OFF CPU profiling from the post, Pinpoint Service Mesh Critical Performance Impact by using eBPF\nNetwork Profiling Network profiling captures the network packages to analysis traffic at L4(TCP) and L7(HTTP) to recognize network traffic from a specific process or a k8s pod. Through this traffic analysis, locate the root causes of the issues \u0026lt;3\u0026gt; and \u0026lt;4\u0026gt;.\nNetwork profiling provides\n Network topology and identify processes. Observe TCP traffic metrics with TLS status. Observe HTTP traffic metrics. Sample HTTP request/response raw data within tracing context. Observe time costs for local I/O costing on the OS. Such as the time of Linux process HTTP request/response.  Learn more tech details from the post, Diagnose Service Mesh Network Performance with eBPF\nContinuous Profiling Continuous Profiling utilizes monitoring of system, processes, and network, and automatically initiates profiling tasks when conditions meet the configured thresholds and time windows.\nMonitor type Continuous profiling periodically collects the following types of performance metrics for processes and systems:\n System Load: Monitor current system load value. Process CPU: Monitor process CPU usage percent, value in [0-100]. Process Thread Count: Monitor process thread count. HTTP Error Rate: Monitor the process HTTP(/1.x) response error(response status \u0026gt;= 500) percent, value in [0-100]. HTTP Avg Response Time: Monitor the process HTTP(/1.x) response duration(ms).  Trigger Target When the collected metric data matches the configured threshold, the following types of profiling tasks could be triggered:\n On CPU Profiling: Perform eBPF On CPU Profiling on processes that meet the threshold. Off CPU Profiling: Perform eBPF Off CPU Profiling on processes that meet the threshold. Network Profiling: Perform eBPF Network Profiling on all processes within the same instance as the processes that meet the threshold.  ","excerpt":"Profiling The profiling is an on-demand diagnosing method to locate bottleneck of the services. …","ref":"/docs/main/v9.5.0/en/concepts-and-designs/profiling/","title":"Profiling"},{"body":"Profiling The profiling is an on-demand diagnosing method to locate bottleneck of the services. These typical scenarios usually are suitable for profiling through various profiling tools\n Some methods slow down the API performance. Too many threads and/or high-frequency I/O per OS process reduce the CPU efficiency. Massive RPC requests block the network to cause responding slowly. Unexpected network requests caused by security issues or codes' bug.  In the SkyWalking landscape, we provided three ways to support profiling within reasonable resource cost.\n In-process profiling is bundled with auto-instrument agents. Out-of-process profiling is powered by eBPF agent. Continuous profiling is powered by eBPF agent.  In-process profiling In-process profiling is primarily provided by auto-instrument agents in the VM-based runtime. This feature resolves the issue \u0026lt;1\u0026gt; through capture the snapshot of the thread stacks periodically. The OAP would aggregate the thread stack per RPC request, and provide a hierarchy graph to indicate the slow methods based on continuous snapshot.\nThe period is usually every 10-100 milliseconds, which is not recommended to be less, due to this capture would usually cause classical stop-the-world for the VM, which would impact the whole process performance.\nLearn more tech details from the post, Use Profiling to Fix the Blind Spot of Distributed Tracing.\nFor now, Java and Python agents support this.\nOut-of-process profiling Out-of-process profiling leverage eBPF technology with origins in the Linux kernel. It provides a way to extend the capabilities of the kernel safely and efficiently.\nOn-CPU Profiling On-CPU profiling is suitable for analyzing thread stacks when service CPU usage is high.\nIf the stack is dumped more times, it means that the thread stack occupies more CPU resources.\nThis is pretty similar with in-process profiling to resolve the issue \u0026lt;1\u0026gt;, but it is made out-of-process and based on Linux eBPF. Meanwhile, this is made for languages without VM mechanism, which caused not supported by in-process agents, such as, C/C++, Rust. Golang is a special case, it exposed the metadata of the VM for eBPF, so, it could be profiled.\nOff-CPU Profiling Off-CPU profiling is suitable for performance issues that are not caused by high CPU usage, but may be on high CPU load. This profiling aims to resolve the issue \u0026lt;2\u0026gt;.\nFor example,\n When there are too many threads in one service, using off-CPU profiling could reveal which threads spend more time context switching. Codes heavily rely on disk I/O or remote service performance would slow down the whole process.  Off-CPU profiling provides two perspectives\n Thread switch count: The number of times a thread switches context. When the thread returns to the CPU, it completes one context switch. A thread stack with a higher switch count spends more time context switching. Thread switch duration: The time it takes a thread to switch the context. A thread stack with a higher switch duration spends more time off-CPU.  Learn more tech details about ON/OFF CPU profiling from the post, Pinpoint Service Mesh Critical Performance Impact by using eBPF\nNetwork Profiling Network profiling captures the network packages to analysis traffic at L4(TCP) and L7(HTTP) to recognize network traffic from a specific process or a k8s pod. Through this traffic analysis, locate the root causes of the issues \u0026lt;3\u0026gt; and \u0026lt;4\u0026gt;.\nNetwork profiling provides\n Network topology and identify processes. Observe TCP traffic metrics with TLS status. Observe HTTP traffic metrics. Sample HTTP request/response raw data within tracing context. Observe time costs for local I/O costing on the OS. Such as the time of Linux process HTTP request/response.  Learn more tech details from the post, Diagnose Service Mesh Network Performance with eBPF\nContinuous Profiling Continuous Profiling utilizes monitoring of system, processes, and network, and automatically initiates profiling tasks when conditions meet the configured thresholds and time windows.\nMonitor type Continuous profiling periodically collects the following types of performance metrics for processes and systems:\n System Load: Monitor current system load value. Process CPU: Monitor process CPU usage percent, value in [0-100]. Process Thread Count: Monitor process thread count. HTTP Error Rate: Monitor the process HTTP(/1.x) response error(response status \u0026gt;= 500) percent, value in [0-100]. HTTP Avg Response Time: Monitor the process HTTP(/1.x) response duration(ms).  Trigger Target When the collected metric data matches the configured threshold, the following types of profiling tasks could be triggered:\n On CPU Profiling: Perform eBPF On CPU Profiling on processes that meet the threshold. Off CPU Profiling: Perform eBPF Off CPU Profiling on processes that meet the threshold. Network Profiling: Perform eBPF Network Profiling on all processes within the same instance as the processes that meet the threshold.  ","excerpt":"Profiling The profiling is an on-demand diagnosing method to locate bottleneck of the services. …","ref":"/docs/main/v9.6.0/en/concepts-and-designs/profiling/","title":"Profiling"},{"body":"Profiling The profiling is an on-demand diagnosing method to locate bottleneck of the services. These typical scenarios usually are suitable for profiling through various profiling tools\n Some methods slow down the API performance. Too many threads and/or high-frequency I/O per OS process reduce the CPU efficiency. Massive RPC requests block the network to cause responding slowly. Unexpected network requests caused by security issues or codes' bug.  In the SkyWalking landscape, we provided three ways to support profiling within reasonable resource cost.\n In-process profiling is bundled with auto-instrument agents. Out-of-process profiling is powered by eBPF agent. Continuous profiling is powered by eBPF agent.  In-process profiling In-process profiling is primarily provided by auto-instrument agents in the VM-based runtime. This feature resolves the issue \u0026lt;1\u0026gt; through capture the snapshot of the thread stacks periodically. The OAP would aggregate the thread stack per RPC request, and provide a hierarchy graph to indicate the slow methods based on continuous snapshot.\nThe period is usually every 10-100 milliseconds, which is not recommended to be less, due to this capture would usually cause classical stop-the-world for the VM, which would impact the whole process performance.\nLearn more tech details from the post, Use Profiling to Fix the Blind Spot of Distributed Tracing.\nFor now, Java and Python agents support this.\nOut-of-process profiling Out-of-process profiling leverage eBPF technology with origins in the Linux kernel. It provides a way to extend the capabilities of the kernel safely and efficiently.\nOn-CPU Profiling On-CPU profiling is suitable for analyzing thread stacks when service CPU usage is high.\nIf the stack is dumped more times, it means that the thread stack occupies more CPU resources.\nThis is pretty similar with in-process profiling to resolve the issue \u0026lt;1\u0026gt;, but it is made out-of-process and based on Linux eBPF. Meanwhile, this is made for languages without VM mechanism, which caused not supported by in-process agents, such as, C/C++, Rust. Golang is a special case, it exposed the metadata of the VM for eBPF, so, it could be profiled.\nOff-CPU Profiling Off-CPU profiling is suitable for performance issues that are not caused by high CPU usage, but may be on high CPU load. This profiling aims to resolve the issue \u0026lt;2\u0026gt;.\nFor example,\n When there are too many threads in one service, using off-CPU profiling could reveal which threads spend more time context switching. Codes heavily rely on disk I/O or remote service performance would slow down the whole process.  Off-CPU profiling provides two perspectives\n Thread switch count: The number of times a thread switches context. When the thread returns to the CPU, it completes one context switch. A thread stack with a higher switch count spends more time context switching. Thread switch duration: The time it takes a thread to switch the context. A thread stack with a higher switch duration spends more time off-CPU.  Learn more tech details about ON/OFF CPU profiling from the post, Pinpoint Service Mesh Critical Performance Impact by using eBPF\nNetwork Profiling Network profiling captures the network packages to analysis traffic at L4(TCP) and L7(HTTP) to recognize network traffic from a specific process or a k8s pod. Through this traffic analysis, locate the root causes of the issues \u0026lt;3\u0026gt; and \u0026lt;4\u0026gt;.\nNetwork profiling provides\n Network topology and identify processes. Observe TCP traffic metrics with TLS status. Observe HTTP traffic metrics. Sample HTTP request/response raw data within tracing context. Observe time costs for local I/O costing on the OS. Such as the time of Linux process HTTP request/response.  Learn more tech details from the post, Diagnose Service Mesh Network Performance with eBPF\nContinuous Profiling Continuous Profiling utilizes monitoring of system, processes, and network, and automatically initiates profiling tasks when conditions meet the configured thresholds and time windows.\nMonitor type Continuous profiling periodically collects the following types of performance metrics for processes and systems:\n System Load: Monitor current system load value. Process CPU: Monitor process CPU usage percent, value in [0-100]. Process Thread Count: Monitor process thread count. HTTP Error Rate: Monitor the process HTTP(/1.x) response error(response status \u0026gt;= 500) percent, value in [0-100]. HTTP Avg Response Time: Monitor the process HTTP(/1.x) response duration(ms).  Trigger Target When the collected metric data matches the configured threshold, the following types of profiling tasks could be triggered:\n On CPU Profiling: Perform eBPF On CPU Profiling on processes that meet the threshold. Off CPU Profiling: Perform eBPF Off CPU Profiling on processes that meet the threshold. Network Profiling: Perform eBPF Network Profiling on all processes within the same instance as the processes that meet the threshold.  ","excerpt":"Profiling The profiling is an on-demand diagnosing method to locate bottleneck of the services. …","ref":"/docs/main/v9.7.0/en/concepts-and-designs/profiling/","title":"Profiling"},{"body":"Profiling The profiling is used to profiling the processes from the Service Discovery, and send the snapshot to the backend server.\nConfiguration    Name Default Environment Key Description     profiling.active true ROVER_PROFILING_ACTIVE Is active the process profiling.   profiling.check_interval 10s ROVER_PROFILING_CHECK_INTERVAL Check the profiling task interval.   profiling.flush_interval 5s ROVER_PROFILING_FLUSH_INTERVAL Combine existing profiling data and report to the backend interval.   profiling.task.on_cpu.dump_period 9ms ROVER_PROFILING_TASK_ON_CPU_DUMP_PERIOD The profiling stack dump period.   profiling.task.network.report_interval 2s ROVER_PROFILING_TASK_NETWORK_TOPOLOGY_REPORT_INTERVAL The interval of send metrics to the backend.   profiling.task.network.meter_prefix rover_net_p ROVER_PROFILING_TASK_NETWORK_TOPOLOGY_METER_PREFIX The prefix of network profiling metrics name.   profiling.task.network.protocol_analyze.per_cpu_buffer 400KB ROVER_PROFILING_TASK_NETWORK_PROTOCOL_ANALYZE_PER_CPU_BUFFER The size of socket data buffer on each CPU.   profiling.task.network.protocol_analyze.parallels 2 ROVER_PROFILING_TASK_NETWORK_PROTOCOL_ANALYZE_PARALLELS The count of parallel protocol analyzer.   profiling.task.network.protocol_analyze.queue_size 5000 ROVER_PROFILING_TASK_NETWORK_PROTOCOL_ANALYZE_QUEUE_SIZE The size of per paralleled analyzer queue.   profiling.task.network.protocol_analyze.sampling.http.default_request_encoding UTF-8 ROVER_PROFILING_TASK_NETWORK_PROTOCOL_ANALYZE_SAMPLING_HTTP_DEFAULT_REQUEST_ENCODING The default body encoding when sampling the request.   profiling.task.network.protocol_analyze.sampling.http.default_response_encoding UTF-8 ROVER_PROFILING_TASK_NETWORK_PROTOCOL_ANALYZE_SAMPLING_HTTP_DEFAULT_RESPONSE_ENCODING The default body encoding when sampling the response.   profiling.continuous.meter_prefix rover_con_p ROVER_PROFILING_CONTINUOUS_METER_PREFIX The continuous related meters prefix name.   profiling.continuous.fetch_interval 1s ROVER_PROFILING_CONTINUOUS_FETCH_INTERVAL The interval of fetch metrics from the system, such as Process CPU, System Load, etc.   profiling.continuous.check_interval 5s ROVER_PROFILING_CONTINUOUS_CHECK_INTERVAL The interval of check metrics is reach the thresholds.   profiling.continuous.trigger.execute_duration 10m ROVER_PROFILING_CONTINUOUS_TRIGGER_EXECUTE_DURATION The duration of the profiling task.   profiling.continuous.trigger.silence_duration 20m ROVER_PROFILING_CONTINUOUS_TRIGGER_SILENCE_DURATION The minimal duration between the execution of the same profiling task.    Prepare service Before profiling your service, please make sure your service already has the symbol data inside the binary file. So we could locate the stack symbol, It could be checked following these ways:\n objdump: Using objdump --syms path/to/service. readelf: Using readelf --syms path/to/service.  Profiling Type All the profiling tasks are using the Linux Official Function and kprobe or uprobe to open perf event, and attach the eBPF Program to dump stacks.\nOn CPU On CPU Profiling task is using PERF_COUNT_SW_CPU_CLOCK to profiling the process with the CPU clock.\nOff CPU Off CPU Profiling task is attach the finish_task_switch in krobe to profiling the process.\nNetwork Network Profiling task is intercept IO-related syscall and urprobe in process to identify the network traffic and generate the metrics. Also, the following protocol are supported for analyzing using OpenSSL library, BoringSSL library, GoTLS, NodeTLS or plaintext:\n HTTP/1.x HTTP/2 MySQL CQL(The Cassandra Query Language) MongoDB Kafka DNS  Collecting data Network profiling uses metrics, logs send to the backend service.\nData Type The network profiling has customized the following two types of metrics to represent the network data:\n Counter: Records the total number of data in a certain period of time. Each counter containers the following data:  Count: The count of the execution. Bytes: The package size of the execution. Exe Time: The consumed time(nanosecond) of the execution.   Histogram: Records the distribution of the data in the bucket. TopN: Record the highest latency data in a certain period of time.  Labels Each metric contains the following labels to identify the process relationship:\n   Name Type Description     client_process_id or server_process_id string The ID of the current process, which is determined by the role of the current process in the connection as server or client.   client_local or server_local boolean The remote process is a local process.   client_address or server_address string The remote process address. ex: IP:port.   side enum The current process is either \u0026ldquo;client\u0026rdquo; or \u0026ldquo;server\u0026rdquo; in this connection.   protocol string Identification the protocol based on the package data content.   is_ssl bool Is the current connection using SSL.    Layer-4 Data Based on the above two data types, the following metrics are provided.\n   Name Type Unit Description     write Counter nanosecond The socket write counter   read Counter nanosecond The socket read counter   write RTT Counter microsecond The socket write RTT counter   connect Counter nanosecond The socket connect/accept with other server/client counter   close Counter nanosecond The socket close counter   retransmit Counter nanosecond The socket retransmit package counter   drop Counter nanosecond The socket drop package counter   write RTT Histogram microsecond The socket write RTT execute time histogram   write execute time Histogram nanosecond The socket write data execute time histogram   read execute time Histogram nanosecond The socket read data execute time histogram   connect execute time Histogram nanosecond The socket connect/accept with other server/client execute time histogram   close execute time Histogram nanosecond The socket close execute time histogram    HTTP/1.x Data Metrics    Name Type Unit Description     http1_request_cpm Counter count The HTTP request counter   http1_response_status_cpm Counter count The count of per HTTP response code   http1_request_package_size Histogram Byte size The request package size   http1_response_package_size Histogram Byte size The response package size   http1_client_duration Histogram millisecond The duration of single HTTP response on the client side   http1_server_duration Histogram millisecond The duration of single HTTP response on the server side    Logs    Name Type Unit Description     slow_traces TopN millisecond The Top N slow trace(id)s   status_4xx TopN millisecond The Top N trace(id)s with response status in 400-499   status_5xx TopN millisecond The Top N trace(id)s with response status in 500-599    Span Attached Event    Name Description     HTTP Request Sampling Complete information about the HTTP request, it\u0026rsquo;s only reported when it matches slow/4xx/5xx traces.   HTTP Response Sampling Complete information about the HTTP response, it\u0026rsquo;s only reported when it matches slow/4xx/5xx traces.   Syscall xxx The methods to use when the process invoke with the network-related syscall method. It\u0026rsquo;s only reported when it matches slow/4xx/5xx traces.    Continuous Profiling The continuous profiling feature monitors low-power target process information, including process CPU usage and network requests, based on configuration passed from the backend. When a threshold is met, it automatically initiates a profiling task(on/off CPU, Network) to provide more detailed analysis.\nMonitor Type System Load Monitor the average system load for the last minute, which is equivalent to using the first value of the load average in the uptime command.\nProcess CPU The target process utilizes a certain percentage of the CPU on the current host.\nProcess Thread Count The real-time number of threads in the target process.\nNetwork Network monitoring uses eBPF technology to collect real-time performance data of the current process responding to requests. Requests sent upstream are not monitored by the system.\nCurrently, network monitoring supports parsing of the HTTP/1.x protocol and supports the following types of monitoring:\n Error Rate: The percentage of network request errors, such as HTTP status codes within the range of [500-600), is considered as erroneous. Avg Response Time: Average response time(ms) for specified URI.  Metrics Rover would periodically send collected monitoring data to the backend using the Native Meter Protocol.\n   Name Unit Description     process_cpu (0-100)% The CPU usage percent   process_thread_count count The thread count of process   system_load count The average system load for the last minute, each process have same value   http_error_rate (0-100)% The network request error rate percentage   http_avg_response_time ms The network average response duration    ","excerpt":"Profiling The profiling is used to profiling the processes from the Service Discovery, and send the …","ref":"/docs/skywalking-rover/latest/en/setup/configuration/profiling/","title":"Profiling"},{"body":"Profiling The profiling is used to profiling the processes from the Service Discovery, and send the snapshot to the backend server.\nConfiguration    Name Default Environment Key Description     profiling.active true ROVER_PROFILING_ACTIVE Is active the process profiling.   profiling.check_interval 10s ROVER_PROFILING_CHECK_INTERVAL Check the profiling task interval.   profiling.flush_interval 5s ROVER_PROFILING_FLUSH_INTERVAL Combine existing profiling data and report to the backend interval.   profiling.task.on_cpu.dump_period 9ms ROVER_PROFILING_TASK_ON_CPU_DUMP_PERIOD The profiling stack dump period.   profiling.task.network.report_interval 2s ROVER_PROFILING_TASK_NETWORK_TOPOLOGY_REPORT_INTERVAL The interval of send metrics to the backend.   profiling.task.network.meter_prefix rover_net_p ROVER_PROFILING_TASK_NETWORK_TOPOLOGY_METER_PREFIX The prefix of network profiling metrics name.   profiling.task.network.protocol_analyze.per_cpu_buffer 400KB ROVER_PROFILING_TASK_NETWORK_PROTOCOL_ANALYZE_PER_CPU_BUFFER The size of socket data buffer on each CPU.   profiling.task.network.protocol_analyze.parallels 2 ROVER_PROFILING_TASK_NETWORK_PROTOCOL_ANALYZE_PARALLELS The count of parallel protocol analyzer.   profiling.task.network.protocol_analyze.queue_size 5000 ROVER_PROFILING_TASK_NETWORK_PROTOCOL_ANALYZE_QUEUE_SIZE The size of per paralleled analyzer queue.   profiling.task.network.protocol_analyze.sampling.http.default_request_encoding UTF-8 ROVER_PROFILING_TASK_NETWORK_PROTOCOL_ANALYZE_SAMPLING_HTTP_DEFAULT_REQUEST_ENCODING The default body encoding when sampling the request.   profiling.task.network.protocol_analyze.sampling.http.default_response_encoding UTF-8 ROVER_PROFILING_TASK_NETWORK_PROTOCOL_ANALYZE_SAMPLING_HTTP_DEFAULT_RESPONSE_ENCODING The default body encoding when sampling the response.   profiling.continuous.meter_prefix rover_con_p ROVER_PROFILING_CONTINUOUS_METER_PREFIX The continuous related meters prefix name.   profiling.continuous.fetch_interval 1s ROVER_PROFILING_CONTINUOUS_FETCH_INTERVAL The interval of fetch metrics from the system, such as Process CPU, System Load, etc.   profiling.continuous.check_interval 5s ROVER_PROFILING_CONTINUOUS_CHECK_INTERVAL The interval of check metrics is reach the thresholds.   profiling.continuous.trigger.execute_duration 10m ROVER_PROFILING_CONTINUOUS_TRIGGER_EXECUTE_DURATION The duration of the profiling task.   profiling.continuous.trigger.silence_duration 20m ROVER_PROFILING_CONTINUOUS_TRIGGER_SILENCE_DURATION The minimal duration between the execution of the same profiling task.    Prepare service Before profiling your service, please make sure your service already has the symbol data inside the binary file. So we could locate the stack symbol, It could be checked following these ways:\n objdump: Using objdump --syms path/to/service. readelf: Using readelf --syms path/to/service.  Profiling Type All the profiling tasks are using the Linux Official Function and kprobe or uprobe to open perf event, and attach the eBPF Program to dump stacks.\nOn CPU On CPU Profiling task is using PERF_COUNT_SW_CPU_CLOCK to profiling the process with the CPU clock.\nOff CPU Off CPU Profiling task is attach the finish_task_switch in krobe to profiling the process.\nNetwork Network Profiling task is intercept IO-related syscall and urprobe in process to identify the network traffic and generate the metrics. Also, the following protocol are supported for analyzing using OpenSSL library, BoringSSL library, GoTLS, NodeTLS or plaintext:\n HTTP/1.x HTTP/2 MySQL CQL(The Cassandra Query Language) MongoDB Kafka DNS  Collecting data Network profiling uses metrics, logs send to the backend service.\nData Type The network profiling has customized the following two types of metrics to represent the network data:\n Counter: Records the total number of data in a certain period of time. Each counter containers the following data:  Count: The count of the execution. Bytes: The package size of the execution. Exe Time: The consumed time(nanosecond) of the execution.   Histogram: Records the distribution of the data in the bucket. TopN: Record the highest latency data in a certain period of time.  Labels Each metric contains the following labels to identify the process relationship:\n   Name Type Description     client_process_id or server_process_id string The ID of the current process, which is determined by the role of the current process in the connection as server or client.   client_local or server_local boolean The remote process is a local process.   client_address or server_address string The remote process address. ex: IP:port.   side enum The current process is either \u0026ldquo;client\u0026rdquo; or \u0026ldquo;server\u0026rdquo; in this connection.   protocol string Identification the protocol based on the package data content.   is_ssl bool Is the current connection using SSL.    Layer-4 Data Based on the above two data types, the following metrics are provided.\n   Name Type Unit Description     write Counter nanosecond The socket write counter   read Counter nanosecond The socket read counter   write RTT Counter microsecond The socket write RTT counter   connect Counter nanosecond The socket connect/accept with other server/client counter   close Counter nanosecond The socket close counter   retransmit Counter nanosecond The socket retransmit package counter   drop Counter nanosecond The socket drop package counter   write RTT Histogram microsecond The socket write RTT execute time histogram   write execute time Histogram nanosecond The socket write data execute time histogram   read execute time Histogram nanosecond The socket read data execute time histogram   connect execute time Histogram nanosecond The socket connect/accept with other server/client execute time histogram   close execute time Histogram nanosecond The socket close execute time histogram    HTTP/1.x Data Metrics    Name Type Unit Description     http1_request_cpm Counter count The HTTP request counter   http1_response_status_cpm Counter count The count of per HTTP response code   http1_request_package_size Histogram Byte size The request package size   http1_response_package_size Histogram Byte size The response package size   http1_client_duration Histogram millisecond The duration of single HTTP response on the client side   http1_server_duration Histogram millisecond The duration of single HTTP response on the server side    Logs    Name Type Unit Description     slow_traces TopN millisecond The Top N slow trace(id)s   status_4xx TopN millisecond The Top N trace(id)s with response status in 400-499   status_5xx TopN millisecond The Top N trace(id)s with response status in 500-599    Span Attached Event    Name Description     HTTP Request Sampling Complete information about the HTTP request, it\u0026rsquo;s only reported when it matches slow/4xx/5xx traces.   HTTP Response Sampling Complete information about the HTTP response, it\u0026rsquo;s only reported when it matches slow/4xx/5xx traces.   Syscall xxx The methods to use when the process invoke with the network-related syscall method. It\u0026rsquo;s only reported when it matches slow/4xx/5xx traces.    Continuous Profiling The continuous profiling feature monitors low-power target process information, including process CPU usage and network requests, based on configuration passed from the backend. When a threshold is met, it automatically initiates a profiling task(on/off CPU, Network) to provide more detailed analysis.\nMonitor Type System Load Monitor the average system load for the last minute, which is equivalent to using the first value of the load average in the uptime command.\nProcess CPU The target process utilizes a certain percentage of the CPU on the current host.\nProcess Thread Count The real-time number of threads in the target process.\nNetwork Network monitoring uses eBPF technology to collect real-time performance data of the current process responding to requests. Requests sent upstream are not monitored by the system.\nCurrently, network monitoring supports parsing of the HTTP/1.x protocol and supports the following types of monitoring:\n Error Rate: The percentage of network request errors, such as HTTP status codes within the range of [500-600), is considered as erroneous. Avg Response Time: Average response time(ms) for specified URI.  Metrics Rover would periodically send collected monitoring data to the backend using the Native Meter Protocol.\n   Name Unit Description     process_cpu (0-100)% The CPU usage percent   process_thread_count count The thread count of process   system_load count The average system load for the last minute, each process have same value   http_error_rate (0-100)% The network request error rate percentage   http_avg_response_time ms The network average response duration    ","excerpt":"Profiling The profiling is used to profiling the processes from the Service Discovery, and send the …","ref":"/docs/skywalking-rover/next/en/setup/configuration/profiling/","title":"Profiling"},{"body":"Profiling The profiling is used to profiling the processes from the Service Discovery, and send the snapshot to the backend server.\nConfiguration    Name Default Environment Key Description     profiling.active true ROVER_PROFILING_ACTIVE Is active the process profiling.   profiling.check_interval 10s ROVER_PROFILING_CHECK_INTERVAL Check the profiling task interval.   profiling.flush_interval 5s ROVER_PROFILING_FLUSH_INTERVAL Combine existing profiling data and report to the backend interval.   profiling.task.on_cpu.dump_period 9ms ROVER_PROFILING_TASK_ON_CPU_DUMP_PERIOD The profiling stack dump period.   profiling.task.network.report_interval 2s ROVER_PROFILING_TASK_NETWORK_TOPOLOGY_REPORT_INTERVAL The interval of send metrics to the backend.   profiling.task.network.meter_prefix rover_net_p ROVER_PROFILING_TASK_NETWORK_TOPOLOGY_METER_PREFIX The prefix of network profiling metrics name.   profiling.task.network.protocol_analyze.per_cpu_buffer 400KB ROVER_PROFILING_TASK_NETWORK_PROTOCOL_ANALYZE_PER_CPU_BUFFER The size of socket data buffer on each CPU.   profiling.task.network.protocol_analyze.parallels 2 ROVER_PROFILING_TASK_NETWORK_PROTOCOL_ANALYZE_PARALLELS The count of parallel protocol analyzer.   profiling.task.network.protocol_analyze.queue_size 5000 ROVER_PROFILING_TASK_NETWORK_PROTOCOL_ANALYZE_QUEUE_SIZE The size of per paralleled analyzer queue.   profiling.task.network.protocol_analyze.sampling.http.default_request_encoding UTF-8 ROVER_PROFILING_TASK_NETWORK_PROTOCOL_ANALYZE_SAMPLING_HTTP_DEFAULT_REQUEST_ENCODING The default body encoding when sampling the request.   profiling.task.network.protocol_analyze.sampling.http.default_response_encoding UTF-8 ROVER_PROFILING_TASK_NETWORK_PROTOCOL_ANALYZE_SAMPLING_HTTP_DEFAULT_RESPONSE_ENCODING The default body encoding when sampling the response.   profiling.continuous.meter_prefix rover_con_p ROVER_PROFILING_CONTINUOUS_METER_PREFIX The continuous related meters prefix name.   profiling.continuous.fetch_interval 1s ROVER_PROFILING_CONTINUOUS_FETCH_INTERVAL The interval of fetch metrics from the system, such as Process CPU, System Load, etc.   profiling.continuous.check_interval 5s ROVER_PROFILING_CONTINUOUS_CHECK_INTERVAL The interval of check metrics is reach the thresholds.   profiling.continuous.trigger.execute_duration 10m ROVER_PROFILING_CONTINUOUS_TRIGGER_EXECUTE_DURATION The duration of the profiling task.   profiling.continuous.trigger.silence_duration 20m ROVER_PROFILING_CONTINUOUS_TRIGGER_SILENCE_DURATION The minimal duration between the execution of the same profiling task.    Prepare service Before profiling your service, please make sure your service already has the symbol data inside the binary file. So we could locate the stack symbol, It could be checked following these ways:\n objdump: Using objdump --syms path/to/service. readelf: Using readelf --syms path/to/service.  Profiling Type All the profiling tasks are using the Linux Official Function and kprobe or uprobe to open perf event, and attach the eBPF Program to dump stacks.\nOn CPU On CPU Profiling task is using PERF_COUNT_SW_CPU_CLOCK to profiling the process with the CPU clock.\nOff CPU Off CPU Profiling task is attach the finish_task_switch in krobe to profiling the process.\nNetwork Network Profiling task is intercept IO-related syscall and urprobe in process to identify the network traffic and generate the metrics. Also, the following protocol are supported for analyzing using OpenSSL library, BoringSSL library, GoTLS, NodeTLS or plaintext:\n HTTP/1.x HTTP/2 MySQL CQL(The Cassandra Query Language) MongoDB Kafka DNS  Collecting data Network profiling uses metrics, logs send to the backend service.\nData Type The network profiling has customized the following two types of metrics to represent the network data:\n Counter: Records the total number of data in a certain period of time. Each counter containers the following data:  Count: The count of the execution. Bytes: The package size of the execution. Exe Time: The consumed time(nanosecond) of the execution.   Histogram: Records the distribution of the data in the bucket. TopN: Record the highest latency data in a certain period of time.  Labels Each metric contains the following labels to identify the process relationship:\n   Name Type Description     client_process_id or server_process_id string The ID of the current process, which is determined by the role of the current process in the connection as server or client.   client_local or server_local boolean The remote process is a local process.   client_address or server_address string The remote process address. ex: IP:port.   side enum The current process is either \u0026ldquo;client\u0026rdquo; or \u0026ldquo;server\u0026rdquo; in this connection.   protocol string Identification the protocol based on the package data content.   is_ssl bool Is the current connection using SSL.    Layer-4 Data Based on the above two data types, the following metrics are provided.\n   Name Type Unit Description     write Counter nanosecond The socket write counter   read Counter nanosecond The socket read counter   write RTT Counter microsecond The socket write RTT counter   connect Counter nanosecond The socket connect/accept with other server/client counter   close Counter nanosecond The socket close counter   retransmit Counter nanosecond The socket retransmit package counter   drop Counter nanosecond The socket drop package counter   write RTT Histogram microsecond The socket write RTT execute time histogram   write execute time Histogram nanosecond The socket write data execute time histogram   read execute time Histogram nanosecond The socket read data execute time histogram   connect execute time Histogram nanosecond The socket connect/accept with other server/client execute time histogram   close execute time Histogram nanosecond The socket close execute time histogram    HTTP/1.x Data Metrics    Name Type Unit Description     http1_request_cpm Counter count The HTTP request counter   http1_response_status_cpm Counter count The count of per HTTP response code   http1_request_package_size Histogram Byte size The request package size   http1_response_package_size Histogram Byte size The response package size   http1_client_duration Histogram millisecond The duration of single HTTP response on the client side   http1_server_duration Histogram millisecond The duration of single HTTP response on the server side    Logs    Name Type Unit Description     slow_traces TopN millisecond The Top N slow trace(id)s   status_4xx TopN millisecond The Top N trace(id)s with response status in 400-499   status_5xx TopN millisecond The Top N trace(id)s with response status in 500-599    Span Attached Event    Name Description     HTTP Request Sampling Complete information about the HTTP request, it\u0026rsquo;s only reported when it matches slow/4xx/5xx traces.   HTTP Response Sampling Complete information about the HTTP response, it\u0026rsquo;s only reported when it matches slow/4xx/5xx traces.   Syscall xxx The methods to use when the process invoke with the network-related syscall method. It\u0026rsquo;s only reported when it matches slow/4xx/5xx traces.    Continuous Profiling The continuous profiling feature monitors low-power target process information, including process CPU usage and network requests, based on configuration passed from the backend. When a threshold is met, it automatically initiates a profiling task(on/off CPU, Network) to provide more detailed analysis.\nMonitor Type System Load Monitor the average system load for the last minute, which is equivalent to using the first value of the load average in the uptime command.\nProcess CPU The target process utilizes a certain percentage of the CPU on the current host.\nProcess Thread Count The real-time number of threads in the target process.\nNetwork Network monitoring uses eBPF technology to collect real-time performance data of the current process responding to requests. Requests sent upstream are not monitored by the system.\nCurrently, network monitoring supports parsing of the HTTP/1.x protocol and supports the following types of monitoring:\n Error Rate: The percentage of network request errors, such as HTTP status codes within the range of [500-600), is considered as erroneous. Avg Response Time: Average response time(ms) for specified URI.  Metrics Rover would periodically send collected monitoring data to the backend using the Native Meter Protocol.\n   Name Unit Description     process_cpu (0-100)% The CPU usage percent   process_thread_count count The thread count of process   system_load count The average system load for the last minute, each process have same value   http_error_rate (0-100)% The network request error rate percentage   http_avg_response_time ms The network average response duration    ","excerpt":"Profiling The profiling is used to profiling the processes from the Service Discovery, and send the …","ref":"/docs/skywalking-rover/v0.6.0/en/setup/configuration/profiling/","title":"Profiling"},{"body":"Profiling APIs SkyWalking offers two types of Profiling, in-process and out-process, each with its own API.\nIn-process profiling APIs In-process profiling commonly interacts with auto-instrument agents. It gathers stack traces of programs and sends the data to the OAP for further analysis.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.language.profile.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/language/profile/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;service ProfileTask { // query all sniffer need to execute profile task commands  rpc getProfileTaskCommands (ProfileTaskCommandQuery) returns (Commands) { } // collect dumped thread snapshot  rpc collectSnapshot (stream ThreadSnapshot) returns (Commands) { } // report profiling task finished  rpc reportTaskFinish (ProfileTaskFinishReport) returns (Commands) { }}message ProfileTaskCommandQuery { // current sniffer information  string service = 1; string serviceInstance = 2; // last command timestamp  int64 lastCommandTime = 3;}// dumped thread snapshot message ThreadSnapshot { // profile task id  string taskId = 1; // dumped segment id  string traceSegmentId = 2; // dump timestamp  int64 time = 3; // snapshot dump sequence, start with zero  int32 sequence = 4; // snapshot stack  ThreadStack stack = 5;}message ThreadStack { // stack code signature list  repeated string codeSignatures = 1;}// profile task finished report message ProfileTaskFinishReport { // current sniffer information  string service = 1; string serviceInstance = 2; // profile task  string taskId = 3;}Out-process profiling Out-process profiling interacts with eBPF agent, which receives tasks and captures data, then reports it to the OAP for further analysis.\nProcess APIs Similar to Service Instance, all processes must be reported to the OAP storage segment prior to analysis.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.ebpf.profiling.process.v3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/ebpf/profiling/process/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the detected processes and report them. service EBPFProcessService { // Report discovered process in Rover  rpc reportProcesses (EBPFProcessReportList) returns (EBPFReportProcessDownstream) { } // Keep the process alive in the backend.  rpc keepAlive (EBPFProcessPingPkgList) returns (Commands) { }}message EBPFProcessReportList { repeated EBPFProcessProperties processes = 1; // An ID generated by eBPF agent, should be unique globally.  string ebpfAgentID = 2;}message EBPFProcessProperties { // The Process metadata  oneof metadata { EBPFHostProcessMetadata hostProcess = 1; EBPFKubernetesProcessMetadata k8sProcess = 2; }}message EBPFHostProcessMetadata { // [required] Entity metadata  // Must ensure that entity information is unique at the time of reporting  EBPFProcessEntityMetadata entity = 1; // [required] The Process id of the host  int32 pid = 2; // [optional] properties of the process  repeated KeyStringValuePair properties = 3;}// Process Entity metadata message EBPFProcessEntityMetadata { // [required] Process belong layer name which define in the backend  string layer = 1; // [required] Process belong service name  string serviceName = 2; // [required] Process belong service instance name  string instanceName = 3; // [required] Process name  string processName = 4; // Process labels for aggregate from service  repeated string labels = 5;}// Kubernetes process metadata message EBPFKubernetesProcessMetadata { // [required] Entity metadata  // Must ensure that entity information is unique at the time of reporting  EBPFProcessEntityMetadata entity = 1; // [required] The Process id of the host  int32 pid = 2; // [optional] properties of the process  repeated KeyStringValuePair properties = 3;}message EBPFReportProcessDownstream { repeated EBPFProcessDownstream processes = 1;}message EBPFProcessDownstream { // Generated process id  string processId = 1; // Locate the process by basic information  oneof process { EBPFHostProcessDownstream hostProcess = 2; EBPFKubernetesProcessDownstream k8sProcess = 3; }}message EBPFHostProcessDownstream { int32 pid = 1; EBPFProcessEntityMetadata entityMetadata = 2;}// Kubernetes process downstream message EBPFKubernetesProcessDownstream { int32 pid = 1; EBPFProcessEntityMetadata entityMetadata = 2;}message EBPFProcessPingPkgList { repeated EBPFProcessPingPkg processes = 1; // An ID generated by eBPF agent, should be unique globally.  string ebpfAgentID = 2;}message EBPFProcessPingPkg { // Process entity  EBPFProcessEntityMetadata entityMetadata = 1; // Minimize necessary properties  repeated KeyStringValuePair properties = 2;}Out-process profiling APIs syntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.ebpf.profiling.v3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/ebpf/profiling/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the Rover Process profiling task and upload profiling data. service EBPFProfilingService { // Query profiling (start or stop) tasks  rpc queryTasks (EBPFProfilingTaskQuery) returns (Commands) { } // collect profiling data  rpc collectProfilingData (stream EBPFProfilingData) returns (Commands) { }}message EBPFProfilingTaskQuery { // rover instance id  string roverInstanceId = 1; // latest task update time  int64 latestUpdateTime = 2;}message EBPFProfilingData { // task metadata  EBPFProfilingTaskMetadata task = 1; // profiling data  oneof profiling { EBPFOnCPUProfiling onCPU = 2; EBPFOffCPUProfiling offCPU = 3; }}message EBPFProfilingTaskMetadata { // profiling task id  string taskId = 1; // profiling process id  string processId = 2; // the start time of this profiling process  int64 profilingStartTime = 3; // report time  int64 currentTime = 4;}message EBPFProfilingStackMetadata { // stack type  EBPFProfilingStackType stackType = 1; // stack id from kernel provide  int32 stackId = 2; // stack symbols  repeated string stackSymbols = 3;}enum EBPFProfilingStackType { PROCESS_KERNEL_SPACE = 0; PROCESS_USER_SPACE = 1;}message EBPFOnCPUProfiling { // stack data in one task(thread)  repeated EBPFProfilingStackMetadata stacks = 1; // stack counts  int32 dumpCount = 2;}message EBPFOffCPUProfiling { // stack data in one task(thread)  repeated EBPFProfilingStackMetadata stacks = 1; // total count of the process is switched to off cpu by the scheduler.  int32 switchCount = 2; // where time(nanoseconds) is spent waiting while blocked on I/O, locks, timers, paging/swapping, etc.  int64 duration = 3;}","excerpt":"Profiling APIs SkyWalking offers two types of Profiling, in-process and out-process, each with its …","ref":"/docs/main/latest/en/api/profiling-protocol/","title":"Profiling APIs"},{"body":"Profiling APIs SkyWalking offers two types of Profiling, in-process and out-process, each with its own API.\nIn-process profiling APIs In-process profiling commonly interacts with auto-instrument agents. It gathers stack traces of programs and sends the data to the OAP for further analysis.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.language.profile.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/language/profile/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;service ProfileTask { // query all sniffer need to execute profile task commands  rpc getProfileTaskCommands (ProfileTaskCommandQuery) returns (Commands) { } // collect dumped thread snapshot  rpc collectSnapshot (stream ThreadSnapshot) returns (Commands) { } // report profiling task finished  rpc reportTaskFinish (ProfileTaskFinishReport) returns (Commands) { }}message ProfileTaskCommandQuery { // current sniffer information  string service = 1; string serviceInstance = 2; // last command timestamp  int64 lastCommandTime = 3;}// dumped thread snapshot message ThreadSnapshot { // profile task id  string taskId = 1; // dumped segment id  string traceSegmentId = 2; // dump timestamp  int64 time = 3; // snapshot dump sequence, start with zero  int32 sequence = 4; // snapshot stack  ThreadStack stack = 5;}message ThreadStack { // stack code signature list  repeated string codeSignatures = 1;}// profile task finished report message ProfileTaskFinishReport { // current sniffer information  string service = 1; string serviceInstance = 2; // profile task  string taskId = 3;}Out-process profiling Out-process profiling interacts with eBPF agent, which receives tasks and captures data, then reports it to the OAP for further analysis.\nProcess APIs Similar to Service Instance, all processes must be reported to the OAP storage segment prior to analysis.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.ebpf.profiling.process.v3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/ebpf/profiling/process/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the detected processes and report them. service EBPFProcessService { // Report discovered process in Rover  rpc reportProcesses (EBPFProcessReportList) returns (EBPFReportProcessDownstream) { } // Keep the process alive in the backend.  rpc keepAlive (EBPFProcessPingPkgList) returns (Commands) { }}message EBPFProcessReportList { repeated EBPFProcessProperties processes = 1; // An ID generated by eBPF agent, should be unique globally.  string ebpfAgentID = 2;}message EBPFProcessProperties { // The Process metadata  oneof metadata { EBPFHostProcessMetadata hostProcess = 1; EBPFKubernetesProcessMetadata k8sProcess = 2; }}message EBPFHostProcessMetadata { // [required] Entity metadata  // Must ensure that entity information is unique at the time of reporting  EBPFProcessEntityMetadata entity = 1; // [required] The Process id of the host  int32 pid = 2; // [optional] properties of the process  repeated KeyStringValuePair properties = 3;}// Process Entity metadata message EBPFProcessEntityMetadata { // [required] Process belong layer name which define in the backend  string layer = 1; // [required] Process belong service name  string serviceName = 2; // [required] Process belong service instance name  string instanceName = 3; // [required] Process name  string processName = 4; // Process labels for aggregate from service  repeated string labels = 5;}// Kubernetes process metadata message EBPFKubernetesProcessMetadata { // [required] Entity metadata  // Must ensure that entity information is unique at the time of reporting  EBPFProcessEntityMetadata entity = 1; // [required] The Process id of the host  int32 pid = 2; // [optional] properties of the process  repeated KeyStringValuePair properties = 3;}message EBPFReportProcessDownstream { repeated EBPFProcessDownstream processes = 1;}message EBPFProcessDownstream { // Generated process id  string processId = 1; // Locate the process by basic information  oneof process { EBPFHostProcessDownstream hostProcess = 2; EBPFKubernetesProcessDownstream k8sProcess = 3; }}message EBPFHostProcessDownstream { int32 pid = 1; EBPFProcessEntityMetadata entityMetadata = 2;}// Kubernetes process downstream message EBPFKubernetesProcessDownstream { int32 pid = 1; EBPFProcessEntityMetadata entityMetadata = 2;}message EBPFProcessPingPkgList { repeated EBPFProcessPingPkg processes = 1; // An ID generated by eBPF agent, should be unique globally.  string ebpfAgentID = 2;}message EBPFProcessPingPkg { // Process entity  EBPFProcessEntityMetadata entityMetadata = 1; // Minimize necessary properties  repeated KeyStringValuePair properties = 2;}Out-process profiling APIs syntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.ebpf.profiling.v3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/ebpf/profiling/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the Rover Process profiling task and upload profiling data. service EBPFProfilingService { // Query profiling (start or stop) tasks  rpc queryTasks (EBPFProfilingTaskQuery) returns (Commands) { } // collect profiling data  rpc collectProfilingData (stream EBPFProfilingData) returns (Commands) { }}message EBPFProfilingTaskQuery { // rover instance id  string roverInstanceId = 1; // latest task update time  int64 latestUpdateTime = 2;}message EBPFProfilingData { // task metadata  EBPFProfilingTaskMetadata task = 1; // profiling data  oneof profiling { EBPFOnCPUProfiling onCPU = 2; EBPFOffCPUProfiling offCPU = 3; }}message EBPFProfilingTaskMetadata { // profiling task id  string taskId = 1; // profiling process id  string processId = 2; // the start time of this profiling process  int64 profilingStartTime = 3; // report time  int64 currentTime = 4;}message EBPFProfilingStackMetadata { // stack type  EBPFProfilingStackType stackType = 1; // stack id from kernel provide  int32 stackId = 2; // stack symbols  repeated string stackSymbols = 3;}enum EBPFProfilingStackType { PROCESS_KERNEL_SPACE = 0; PROCESS_USER_SPACE = 1;}message EBPFOnCPUProfiling { // stack data in one task(thread)  repeated EBPFProfilingStackMetadata stacks = 1; // stack counts  int32 dumpCount = 2;}message EBPFOffCPUProfiling { // stack data in one task(thread)  repeated EBPFProfilingStackMetadata stacks = 1; // total count of the process is switched to off cpu by the scheduler.  int32 switchCount = 2; // where time(nanoseconds) is spent waiting while blocked on I/O, locks, timers, paging/swapping, etc.  int64 duration = 3;}","excerpt":"Profiling APIs SkyWalking offers two types of Profiling, in-process and out-process, each with its …","ref":"/docs/main/next/en/api/profiling-protocol/","title":"Profiling APIs"},{"body":"Profiling APIs SkyWalking offers two types of Profiling, in-process and out-process, each with its own API.\nIn-process profiling APIs In-process profiling commonly interacts with auto-instrument agents. It gathers stack traces of programs and sends the data to the OAP for further analysis.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.language.profile.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/language/profile/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;service ProfileTask { // query all sniffer need to execute profile task commands  rpc getProfileTaskCommands (ProfileTaskCommandQuery) returns (Commands) { } // collect dumped thread snapshot  rpc collectSnapshot (stream ThreadSnapshot) returns (Commands) { } // report profiling task finished  rpc reportTaskFinish (ProfileTaskFinishReport) returns (Commands) { }}message ProfileTaskCommandQuery { // current sniffer information  string service = 1; string serviceInstance = 2; // last command timestamp  int64 lastCommandTime = 3;}// dumped thread snapshot message ThreadSnapshot { // profile task id  string taskId = 1; // dumped segment id  string traceSegmentId = 2; // dump timestamp  int64 time = 3; // snapshot dump sequence, start with zero  int32 sequence = 4; // snapshot stack  ThreadStack stack = 5;}message ThreadStack { // stack code signature list  repeated string codeSignatures = 1;}// profile task finished report message ProfileTaskFinishReport { // current sniffer information  string service = 1; string serviceInstance = 2; // profile task  string taskId = 3;}Out-process profiling Out-process profiling interacts with eBPF agent, which receives tasks and captures data, then reports it to the OAP for further analysis.\nProcess APIs Similar to Service Instance, all processes must be reported to the OAP storage segment prior to analysis.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.ebpf.profiling.process.v3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/ebpf/profiling/process/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the detected processes and report them. service EBPFProcessService { // Report discovered process in Rover  rpc reportProcesses (EBPFProcessReportList) returns (EBPFReportProcessDownstream) { } // Keep the process alive in the backend.  rpc keepAlive (EBPFProcessPingPkgList) returns (Commands) { }}message EBPFProcessReportList { repeated EBPFProcessProperties processes = 1; // An ID generated by eBPF agent, should be unique globally.  string ebpfAgentID = 2;}message EBPFProcessProperties { // The Process metadata  oneof metadata { EBPFHostProcessMetadata hostProcess = 1; EBPFKubernetesProcessMetadata k8sProcess = 2; }}message EBPFHostProcessMetadata { // [required] Entity metadata  // Must ensure that entity information is unique at the time of reporting  EBPFProcessEntityMetadata entity = 1; // [required] The Process id of the host  int32 pid = 2; // [optional] properties of the process  repeated KeyStringValuePair properties = 3;}// Process Entity metadata message EBPFProcessEntityMetadata { // [required] Process belong layer name which define in the backend  string layer = 1; // [required] Process belong service name  string serviceName = 2; // [required] Process belong service instance name  string instanceName = 3; // [required] Process name  string processName = 4; // Process labels for aggregate from service  repeated string labels = 5;}// Kubernetes process metadata message EBPFKubernetesProcessMetadata { // [required] Entity metadata  // Must ensure that entity information is unique at the time of reporting  EBPFProcessEntityMetadata entity = 1; // [required] The Process id of the host  int32 pid = 2; // [optional] properties of the process  repeated KeyStringValuePair properties = 3;}message EBPFReportProcessDownstream { repeated EBPFProcessDownstream processes = 1;}message EBPFProcessDownstream { // Generated process id  string processId = 1; // Locate the process by basic information  oneof process { EBPFHostProcessDownstream hostProcess = 2; EBPFKubernetesProcessDownstream k8sProcess = 3; }}message EBPFHostProcessDownstream { int32 pid = 1; EBPFProcessEntityMetadata entityMetadata = 2;}// Kubernetes process downstream message EBPFKubernetesProcessDownstream { int32 pid = 1; EBPFProcessEntityMetadata entityMetadata = 2;}message EBPFProcessPingPkgList { repeated EBPFProcessPingPkg processes = 1; // An ID generated by eBPF agent, should be unique globally.  string ebpfAgentID = 2;}message EBPFProcessPingPkg { // Process entity  EBPFProcessEntityMetadata entityMetadata = 1; // Minimize necessary properties  repeated KeyStringValuePair properties = 2;}Out-process profiling APIs syntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.ebpf.profiling.v3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/ebpf/profiling/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the Rover Process profiling task and upload profiling data. service EBPFProfilingService { // Query profiling (start or stop) tasks  rpc queryTasks (EBPFProfilingTaskQuery) returns (Commands) { } // collect profiling data  rpc collectProfilingData (stream EBPFProfilingData) returns (Commands) { }}message EBPFProfilingTaskQuery { // rover instance id  string roverInstanceId = 1; // latest task update time  int64 latestUpdateTime = 2;}message EBPFProfilingData { // task metadata  EBPFProfilingTaskMetadata task = 1; // profiling data  oneof profiling { EBPFOnCPUProfiling onCPU = 2; EBPFOffCPUProfiling offCPU = 3; }}message EBPFProfilingTaskMetadata { // profiling task id  string taskId = 1; // profiling process id  string processId = 2; // the start time of this profiling process  int64 profilingStartTime = 3; // report time  int64 currentTime = 4;}message EBPFProfilingStackMetadata { // stack type  EBPFProfilingStackType stackType = 1; // stack id from kernel provide  int32 stackId = 2; // stack symbols  repeated string stackSymbols = 3;}enum EBPFProfilingStackType { PROCESS_KERNEL_SPACE = 0; PROCESS_USER_SPACE = 1;}message EBPFOnCPUProfiling { // stack data in one task(thread)  repeated EBPFProfilingStackMetadata stacks = 1; // stack counts  int32 dumpCount = 2;}message EBPFOffCPUProfiling { // stack data in one task(thread)  repeated EBPFProfilingStackMetadata stacks = 1; // total count of the process is switched to off cpu by the scheduler.  int32 switchCount = 2; // where time(nanoseconds) is spent waiting while blocked on I/O, locks, timers, paging/swapping, etc.  int64 duration = 3;}","excerpt":"Profiling APIs SkyWalking offers two types of Profiling, in-process and out-process, each with its …","ref":"/docs/main/v9.4.0/en/api/profiling-protocol/","title":"Profiling APIs"},{"body":"Profiling APIs SkyWalking offers two types of Profiling, in-process and out-process, each with its own API.\nIn-process profiling APIs In-process profiling commonly interacts with auto-instrument agents. It gathers stack traces of programs and sends the data to the OAP for further analysis.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.language.profile.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/language/profile/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;service ProfileTask { // query all sniffer need to execute profile task commands  rpc getProfileTaskCommands (ProfileTaskCommandQuery) returns (Commands) { } // collect dumped thread snapshot  rpc collectSnapshot (stream ThreadSnapshot) returns (Commands) { } // report profiling task finished  rpc reportTaskFinish (ProfileTaskFinishReport) returns (Commands) { }}message ProfileTaskCommandQuery { // current sniffer information  string service = 1; string serviceInstance = 2; // last command timestamp  int64 lastCommandTime = 3;}// dumped thread snapshot message ThreadSnapshot { // profile task id  string taskId = 1; // dumped segment id  string traceSegmentId = 2; // dump timestamp  int64 time = 3; // snapshot dump sequence, start with zero  int32 sequence = 4; // snapshot stack  ThreadStack stack = 5;}message ThreadStack { // stack code signature list  repeated string codeSignatures = 1;}// profile task finished report message ProfileTaskFinishReport { // current sniffer information  string service = 1; string serviceInstance = 2; // profile task  string taskId = 3;}Out-process profiling Out-process profiling interacts with eBPF agent, which receives tasks and captures data, then reports it to the OAP for further analysis.\nProcess APIs Similar to Service Instance, all processes must be reported to the OAP storage segment prior to analysis.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.ebpf.profiling.process.v3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/ebpf/profiling/process/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the detected processes and report them. service EBPFProcessService { // Report discovered process in Rover  rpc reportProcesses (EBPFProcessReportList) returns (EBPFReportProcessDownstream) { } // Keep the process alive in the backend.  rpc keepAlive (EBPFProcessPingPkgList) returns (Commands) { }}message EBPFProcessReportList { repeated EBPFProcessProperties processes = 1; // An ID generated by eBPF agent, should be unique globally.  string ebpfAgentID = 2;}message EBPFProcessProperties { // The Process metadata  oneof metadata { EBPFHostProcessMetadata hostProcess = 1; EBPFKubernetesProcessMetadata k8sProcess = 2; }}message EBPFHostProcessMetadata { // [required] Entity metadata  // Must ensure that entity information is unique at the time of reporting  EBPFProcessEntityMetadata entity = 1; // [required] The Process id of the host  int32 pid = 2; // [optional] properties of the process  repeated KeyStringValuePair properties = 3;}// Process Entity metadata message EBPFProcessEntityMetadata { // [required] Process belong layer name which define in the backend  string layer = 1; // [required] Process belong service name  string serviceName = 2; // [required] Process belong service instance name  string instanceName = 3; // [required] Process name  string processName = 4; // Process labels for aggregate from service  repeated string labels = 5;}// Kubernetes process metadata message EBPFKubernetesProcessMetadata { // [required] Entity metadata  // Must ensure that entity information is unique at the time of reporting  EBPFProcessEntityMetadata entity = 1; // [required] The Process id of the host  int32 pid = 2; // [optional] properties of the process  repeated KeyStringValuePair properties = 3;}message EBPFReportProcessDownstream { repeated EBPFProcessDownstream processes = 1;}message EBPFProcessDownstream { // Generated process id  string processId = 1; // Locate the process by basic information  oneof process { EBPFHostProcessDownstream hostProcess = 2; EBPFKubernetesProcessDownstream k8sProcess = 3; }}message EBPFHostProcessDownstream { int32 pid = 1; EBPFProcessEntityMetadata entityMetadata = 2;}// Kubernetes process downstream message EBPFKubernetesProcessDownstream { int32 pid = 1; EBPFProcessEntityMetadata entityMetadata = 2;}message EBPFProcessPingPkgList { repeated EBPFProcessPingPkg processes = 1; // An ID generated by eBPF agent, should be unique globally.  string ebpfAgentID = 2;}message EBPFProcessPingPkg { // Process entity  EBPFProcessEntityMetadata entityMetadata = 1; // Minimize necessary properties  repeated KeyStringValuePair properties = 2;}Out-process profiling APIs syntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.ebpf.profiling.v3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/ebpf/profiling/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the Rover Process profiling task and upload profiling data. service EBPFProfilingService { // Query profiling (start or stop) tasks  rpc queryTasks (EBPFProfilingTaskQuery) returns (Commands) { } // collect profiling data  rpc collectProfilingData (stream EBPFProfilingData) returns (Commands) { }}message EBPFProfilingTaskQuery { // rover instance id  string roverInstanceId = 1; // latest task update time  int64 latestUpdateTime = 2;}message EBPFProfilingData { // task metadata  EBPFProfilingTaskMetadata task = 1; // profiling data  oneof profiling { EBPFOnCPUProfiling onCPU = 2; EBPFOffCPUProfiling offCPU = 3; }}message EBPFProfilingTaskMetadata { // profiling task id  string taskId = 1; // profiling process id  string processId = 2; // the start time of this profiling process  int64 profilingStartTime = 3; // report time  int64 currentTime = 4;}message EBPFProfilingStackMetadata { // stack type  EBPFProfilingStackType stackType = 1; // stack id from kernel provide  int32 stackId = 2; // stack symbols  repeated string stackSymbols = 3;}enum EBPFProfilingStackType { PROCESS_KERNEL_SPACE = 0; PROCESS_USER_SPACE = 1;}message EBPFOnCPUProfiling { // stack data in one task(thread)  repeated EBPFProfilingStackMetadata stacks = 1; // stack counts  int32 dumpCount = 2;}message EBPFOffCPUProfiling { // stack data in one task(thread)  repeated EBPFProfilingStackMetadata stacks = 1; // total count of the process is switched to off cpu by the scheduler.  int32 switchCount = 2; // where time(nanoseconds) is spent waiting while blocked on I/O, locks, timers, paging/swapping, etc.  int64 duration = 3;}","excerpt":"Profiling APIs SkyWalking offers two types of Profiling, in-process and out-process, each with its …","ref":"/docs/main/v9.5.0/en/api/profiling-protocol/","title":"Profiling APIs"},{"body":"Profiling APIs SkyWalking offers two types of Profiling, in-process and out-process, each with its own API.\nIn-process profiling APIs In-process profiling commonly interacts with auto-instrument agents. It gathers stack traces of programs and sends the data to the OAP for further analysis.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.language.profile.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/language/profile/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;service ProfileTask { // query all sniffer need to execute profile task commands  rpc getProfileTaskCommands (ProfileTaskCommandQuery) returns (Commands) { } // collect dumped thread snapshot  rpc collectSnapshot (stream ThreadSnapshot) returns (Commands) { } // report profiling task finished  rpc reportTaskFinish (ProfileTaskFinishReport) returns (Commands) { }}message ProfileTaskCommandQuery { // current sniffer information  string service = 1; string serviceInstance = 2; // last command timestamp  int64 lastCommandTime = 3;}// dumped thread snapshot message ThreadSnapshot { // profile task id  string taskId = 1; // dumped segment id  string traceSegmentId = 2; // dump timestamp  int64 time = 3; // snapshot dump sequence, start with zero  int32 sequence = 4; // snapshot stack  ThreadStack stack = 5;}message ThreadStack { // stack code signature list  repeated string codeSignatures = 1;}// profile task finished report message ProfileTaskFinishReport { // current sniffer information  string service = 1; string serviceInstance = 2; // profile task  string taskId = 3;}Out-process profiling Out-process profiling interacts with eBPF agent, which receives tasks and captures data, then reports it to the OAP for further analysis.\nProcess APIs Similar to Service Instance, all processes must be reported to the OAP storage segment prior to analysis.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.ebpf.profiling.process.v3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/ebpf/profiling/process/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the detected processes and report them. service EBPFProcessService { // Report discovered process in Rover  rpc reportProcesses (EBPFProcessReportList) returns (EBPFReportProcessDownstream) { } // Keep the process alive in the backend.  rpc keepAlive (EBPFProcessPingPkgList) returns (Commands) { }}message EBPFProcessReportList { repeated EBPFProcessProperties processes = 1; // An ID generated by eBPF agent, should be unique globally.  string ebpfAgentID = 2;}message EBPFProcessProperties { // The Process metadata  oneof metadata { EBPFHostProcessMetadata hostProcess = 1; EBPFKubernetesProcessMetadata k8sProcess = 2; }}message EBPFHostProcessMetadata { // [required] Entity metadata  // Must ensure that entity information is unique at the time of reporting  EBPFProcessEntityMetadata entity = 1; // [required] The Process id of the host  int32 pid = 2; // [optional] properties of the process  repeated KeyStringValuePair properties = 3;}// Process Entity metadata message EBPFProcessEntityMetadata { // [required] Process belong layer name which define in the backend  string layer = 1; // [required] Process belong service name  string serviceName = 2; // [required] Process belong service instance name  string instanceName = 3; // [required] Process name  string processName = 4; // Process labels for aggregate from service  repeated string labels = 5;}// Kubernetes process metadata message EBPFKubernetesProcessMetadata { // [required] Entity metadata  // Must ensure that entity information is unique at the time of reporting  EBPFProcessEntityMetadata entity = 1; // [required] The Process id of the host  int32 pid = 2; // [optional] properties of the process  repeated KeyStringValuePair properties = 3;}message EBPFReportProcessDownstream { repeated EBPFProcessDownstream processes = 1;}message EBPFProcessDownstream { // Generated process id  string processId = 1; // Locate the process by basic information  oneof process { EBPFHostProcessDownstream hostProcess = 2; EBPFKubernetesProcessDownstream k8sProcess = 3; }}message EBPFHostProcessDownstream { int32 pid = 1; EBPFProcessEntityMetadata entityMetadata = 2;}// Kubernetes process downstream message EBPFKubernetesProcessDownstream { int32 pid = 1; EBPFProcessEntityMetadata entityMetadata = 2;}message EBPFProcessPingPkgList { repeated EBPFProcessPingPkg processes = 1; // An ID generated by eBPF agent, should be unique globally.  string ebpfAgentID = 2;}message EBPFProcessPingPkg { // Process entity  EBPFProcessEntityMetadata entityMetadata = 1; // Minimize necessary properties  repeated KeyStringValuePair properties = 2;}Out-process profiling APIs syntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.ebpf.profiling.v3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/ebpf/profiling/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the Rover Process profiling task and upload profiling data. service EBPFProfilingService { // Query profiling (start or stop) tasks  rpc queryTasks (EBPFProfilingTaskQuery) returns (Commands) { } // collect profiling data  rpc collectProfilingData (stream EBPFProfilingData) returns (Commands) { }}message EBPFProfilingTaskQuery { // rover instance id  string roverInstanceId = 1; // latest task update time  int64 latestUpdateTime = 2;}message EBPFProfilingData { // task metadata  EBPFProfilingTaskMetadata task = 1; // profiling data  oneof profiling { EBPFOnCPUProfiling onCPU = 2; EBPFOffCPUProfiling offCPU = 3; }}message EBPFProfilingTaskMetadata { // profiling task id  string taskId = 1; // profiling process id  string processId = 2; // the start time of this profiling process  int64 profilingStartTime = 3; // report time  int64 currentTime = 4;}message EBPFProfilingStackMetadata { // stack type  EBPFProfilingStackType stackType = 1; // stack id from kernel provide  int32 stackId = 2; // stack symbols  repeated string stackSymbols = 3;}enum EBPFProfilingStackType { PROCESS_KERNEL_SPACE = 0; PROCESS_USER_SPACE = 1;}message EBPFOnCPUProfiling { // stack data in one task(thread)  repeated EBPFProfilingStackMetadata stacks = 1; // stack counts  int32 dumpCount = 2;}message EBPFOffCPUProfiling { // stack data in one task(thread)  repeated EBPFProfilingStackMetadata stacks = 1; // total count of the process is switched to off cpu by the scheduler.  int32 switchCount = 2; // where time(nanoseconds) is spent waiting while blocked on I/O, locks, timers, paging/swapping, etc.  int64 duration = 3;}","excerpt":"Profiling APIs SkyWalking offers two types of Profiling, in-process and out-process, each with its …","ref":"/docs/main/v9.6.0/en/api/profiling-protocol/","title":"Profiling APIs"},{"body":"Profiling APIs SkyWalking offers two types of Profiling, in-process and out-process, each with its own API.\nIn-process profiling APIs In-process profiling commonly interacts with auto-instrument agents. It gathers stack traces of programs and sends the data to the OAP for further analysis.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.language.profile.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/language/profile/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;service ProfileTask { // query all sniffer need to execute profile task commands  rpc getProfileTaskCommands (ProfileTaskCommandQuery) returns (Commands) { } // collect dumped thread snapshot  rpc collectSnapshot (stream ThreadSnapshot) returns (Commands) { } // report profiling task finished  rpc reportTaskFinish (ProfileTaskFinishReport) returns (Commands) { }}message ProfileTaskCommandQuery { // current sniffer information  string service = 1; string serviceInstance = 2; // last command timestamp  int64 lastCommandTime = 3;}// dumped thread snapshot message ThreadSnapshot { // profile task id  string taskId = 1; // dumped segment id  string traceSegmentId = 2; // dump timestamp  int64 time = 3; // snapshot dump sequence, start with zero  int32 sequence = 4; // snapshot stack  ThreadStack stack = 5;}message ThreadStack { // stack code signature list  repeated string codeSignatures = 1;}// profile task finished report message ProfileTaskFinishReport { // current sniffer information  string service = 1; string serviceInstance = 2; // profile task  string taskId = 3;}Out-process profiling Out-process profiling interacts with eBPF agent, which receives tasks and captures data, then reports it to the OAP for further analysis.\nProcess APIs Similar to Service Instance, all processes must be reported to the OAP storage segment prior to analysis.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.ebpf.profiling.process.v3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/ebpf/profiling/process/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the detected processes and report them. service EBPFProcessService { // Report discovered process in Rover  rpc reportProcesses (EBPFProcessReportList) returns (EBPFReportProcessDownstream) { } // Keep the process alive in the backend.  rpc keepAlive (EBPFProcessPingPkgList) returns (Commands) { }}message EBPFProcessReportList { repeated EBPFProcessProperties processes = 1; // An ID generated by eBPF agent, should be unique globally.  string ebpfAgentID = 2;}message EBPFProcessProperties { // The Process metadata  oneof metadata { EBPFHostProcessMetadata hostProcess = 1; EBPFKubernetesProcessMetadata k8sProcess = 2; }}message EBPFHostProcessMetadata { // [required] Entity metadata  // Must ensure that entity information is unique at the time of reporting  EBPFProcessEntityMetadata entity = 1; // [required] The Process id of the host  int32 pid = 2; // [optional] properties of the process  repeated KeyStringValuePair properties = 3;}// Process Entity metadata message EBPFProcessEntityMetadata { // [required] Process belong layer name which define in the backend  string layer = 1; // [required] Process belong service name  string serviceName = 2; // [required] Process belong service instance name  string instanceName = 3; // [required] Process name  string processName = 4; // Process labels for aggregate from service  repeated string labels = 5;}// Kubernetes process metadata message EBPFKubernetesProcessMetadata { // [required] Entity metadata  // Must ensure that entity information is unique at the time of reporting  EBPFProcessEntityMetadata entity = 1; // [required] The Process id of the host  int32 pid = 2; // [optional] properties of the process  repeated KeyStringValuePair properties = 3;}message EBPFReportProcessDownstream { repeated EBPFProcessDownstream processes = 1;}message EBPFProcessDownstream { // Generated process id  string processId = 1; // Locate the process by basic information  oneof process { EBPFHostProcessDownstream hostProcess = 2; EBPFKubernetesProcessDownstream k8sProcess = 3; }}message EBPFHostProcessDownstream { int32 pid = 1; EBPFProcessEntityMetadata entityMetadata = 2;}// Kubernetes process downstream message EBPFKubernetesProcessDownstream { int32 pid = 1; EBPFProcessEntityMetadata entityMetadata = 2;}message EBPFProcessPingPkgList { repeated EBPFProcessPingPkg processes = 1; // An ID generated by eBPF agent, should be unique globally.  string ebpfAgentID = 2;}message EBPFProcessPingPkg { // Process entity  EBPFProcessEntityMetadata entityMetadata = 1; // Minimize necessary properties  repeated KeyStringValuePair properties = 2;}Out-process profiling APIs syntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.ebpf.profiling.v3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/ebpf/profiling/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the Rover Process profiling task and upload profiling data. service EBPFProfilingService { // Query profiling (start or stop) tasks  rpc queryTasks (EBPFProfilingTaskQuery) returns (Commands) { } // collect profiling data  rpc collectProfilingData (stream EBPFProfilingData) returns (Commands) { }}message EBPFProfilingTaskQuery { // rover instance id  string roverInstanceId = 1; // latest task update time  int64 latestUpdateTime = 2;}message EBPFProfilingData { // task metadata  EBPFProfilingTaskMetadata task = 1; // profiling data  oneof profiling { EBPFOnCPUProfiling onCPU = 2; EBPFOffCPUProfiling offCPU = 3; }}message EBPFProfilingTaskMetadata { // profiling task id  string taskId = 1; // profiling process id  string processId = 2; // the start time of this profiling process  int64 profilingStartTime = 3; // report time  int64 currentTime = 4;}message EBPFProfilingStackMetadata { // stack type  EBPFProfilingStackType stackType = 1; // stack id from kernel provide  int32 stackId = 2; // stack symbols  repeated string stackSymbols = 3;}enum EBPFProfilingStackType { PROCESS_KERNEL_SPACE = 0; PROCESS_USER_SPACE = 1;}message EBPFOnCPUProfiling { // stack data in one task(thread)  repeated EBPFProfilingStackMetadata stacks = 1; // stack counts  int32 dumpCount = 2;}message EBPFOffCPUProfiling { // stack data in one task(thread)  repeated EBPFProfilingStackMetadata stacks = 1; // total count of the process is switched to off cpu by the scheduler.  int32 switchCount = 2; // where time(nanoseconds) is spent waiting while blocked on I/O, locks, timers, paging/swapping, etc.  int64 duration = 3;}","excerpt":"Profiling APIs SkyWalking offers two types of Profiling, in-process and out-process, each with its …","ref":"/docs/main/v9.7.0/en/api/profiling-protocol/","title":"Profiling APIs"},{"body":"Project Structure  agent: The agent core files copied when hybrid compilation. bin: The binary files of Go agent program. docs: The documentation of Go agent. log: The log configuration for adapt the Golang agent. plugins: The plugins for adapt the frameworks.  core: Agent core and API for the SkyWalking Agent, the plugins should import this module. xxx: The plugins for adapt the framework.   reporter: The reporter for adapt the SkyWalking backend. tools/go-agent: The Golang Agent enhancement program.  cmd: The agent starter. config: The application register configuration for agent. instrument: Perform enhancement on different packages during hybrid compilation.  agentcore: When compiling SkyWalking Go, enhance its code, mainly for Agent Core file copying. api: The API of the instrument. entry: When compiling the main package, enhance its code, mainly focusing on starting the Agent system. plugins: When detecting a framework that requires enhancement, enhance its. For specific operation details, please refer to the Key Principle document. reporter: When compiling the reporter package under agent, enhance its code, mainly focusing on starting the reporter. runtime: When compiling the runtime package, enhance its code. For specific operation details, please refer to the Key Principle document.   tools: helps to build the agent.    ","excerpt":"Project Structure  agent: The agent core files copied when hybrid compilation. bin: The binary files …","ref":"/docs/skywalking-go/latest/en/concepts-and-designs/project-structure/","title":"Project Structure"},{"body":"Project Structure  agent: The agent core files copied when hybrid compilation. bin: The binary files of Go agent program. docs: The documentation of Go agent. log: The log configuration for adapt the Golang agent. plugins: The plugins for adapt the frameworks.  core: Agent core and API for the SkyWalking Agent, the plugins should import this module. xxx: The plugins for adapt the framework.   reporter: The reporter for adapt the SkyWalking backend. tools/go-agent: The Golang Agent enhancement program.  cmd: The agent starter. config: The application register configuration for agent. instrument: Perform enhancement on different packages during hybrid compilation.  agentcore: When compiling SkyWalking Go, enhance its code, mainly for Agent Core file copying. api: The API of the instrument. entry: When compiling the main package, enhance its code, mainly focusing on starting the Agent system. plugins: When detecting a framework that requires enhancement, enhance its. For specific operation details, please refer to the Key Principle document. reporter: When compiling the reporter package under agent, enhance its code, mainly focusing on starting the reporter. runtime: When compiling the runtime package, enhance its code. For specific operation details, please refer to the Key Principle document.   tools: helps to build the agent.    ","excerpt":"Project Structure  agent: The agent core files copied when hybrid compilation. bin: The binary files …","ref":"/docs/skywalking-go/next/en/concepts-and-designs/project-structure/","title":"Project Structure"},{"body":"Project Structure  agent: The agent core files copied when hybrid compilation. bin: The binary files of Go agent program. docs: The documentation of Go agent. log: The log configuration for adapt the Golang agent. plugins: The plugins for adapt the frameworks.  core: Agent core and API for the SkyWalking Agent, the plugins should import this module. xxx: The plugins for adapt the framework.   reporter: The reporter for adapt the SkyWalking backend. tools/go-agent: The Golang Agent enhancement program.  cmd: The agent starter. config: The application register configuration for agent. instrument: Perform enhancement on different packages during hybrid compilation.  agentcore: When compiling SkyWalking Go, enhance its code, mainly for Agent Core file copying. api: The API of the instrument. entry: When compiling the main package, enhance its code, mainly focusing on starting the Agent system. plugins: When detecting a framework that requires enhancement, enhance its. For specific operation details, please refer to the Key Principle document. reporter: When compiling the reporter package under agent, enhance its code, mainly focusing on starting the reporter. runtime: When compiling the runtime package, enhance its code. For specific operation details, please refer to the Key Principle document.   tools: helps to build the agent.    ","excerpt":"Project Structure  agent: The agent core files copied when hybrid compilation. bin: The binary files …","ref":"/docs/skywalking-go/v0.4.0/en/concepts-and-designs/project-structure/","title":"Project Structure"},{"body":"Project Structure  cmd: The starter of Satellite. configs: Satellite configs. internal: Core, API, and common utils.  internal/pkg: Sharing with Core and Plugins, such as api and utils. internal/satellite: The core of Satellite.   plugins: Contains all plugins.  plugins/{type}: Contains the plugins of this {type}. Satellite has 9 plugin types. plugins/{type}/api: Contains the plugin definition and initializer. plugins/{type}/{plugin-name}: Contains the specific plugin. init.go: Register the plugins to the plugin registry.    . ├── CHANGES.md ├── cmd ├── configs ├── docs ├── go.sum ├── internal │ ├── pkg │ └── satellite ├── plugins │ ├── client │ ├── fallbacker │ ├── fetcher │ ├── filter │ ├── forwarder │ ├── init.go │ ├── parser │ ├── queue │ ├── receiver │ └── server ","excerpt":"Project Structure  cmd: The starter of Satellite. configs: Satellite configs. internal: Core, API, …","ref":"/docs/skywalking-satellite/latest/en/concepts-and-designs/project_structue/","title":"Project Structure"},{"body":"Project Structure  cmd: The starter of Satellite. configs: Satellite configs. internal: Core, API, and common utils.  internal/pkg: Sharing with Core and Plugins, such as api and utils. internal/satellite: The core of Satellite.   plugins: Contains all plugins.  plugins/{type}: Contains the plugins of this {type}. Satellite has 9 plugin types. plugins/{type}/api: Contains the plugin definition and initializer. plugins/{type}/{plugin-name}: Contains the specific plugin. init.go: Register the plugins to the plugin registry.    . ├── CHANGES.md ├── cmd ├── configs ├── docs ├── go.sum ├── internal │ ├── pkg │ └── satellite ├── plugins │ ├── client │ ├── fallbacker │ ├── fetcher │ ├── filter │ ├── forwarder │ ├── init.go │ ├── parser │ ├── queue │ ├── receiver │ └── server ","excerpt":"Project Structure  cmd: The starter of Satellite. configs: Satellite configs. internal: Core, API, …","ref":"/docs/skywalking-satellite/next/en/concepts-and-designs/project_structue/","title":"Project Structure"},{"body":"Project Structure  cmd: The starter of Satellite. configs: Satellite configs. internal: Core, API, and common utils.  internal/pkg: Sharing with Core and Plugins, such as api and utils. internal/satellite: The core of Satellite.   plugins: Contains all plugins.  plugins/{type}: Contains the plugins of this {type}. Satellite has 9 plugin types. plugins/{type}/api: Contains the plugin definition and initializer. plugins/{type}/{plugin-name}: Contains the specific plugin. init.go: Register the plugins to the plugin registry.    . ├── CHANGES.md ├── cmd ├── configs ├── docs ├── go.sum ├── internal │ ├── pkg │ └── satellite ├── plugins │ ├── client │ ├── fallbacker │ ├── fetcher │ ├── filter │ ├── forwarder │ ├── init.go │ ├── parser │ ├── queue │ ├── receiver │ └── server ","excerpt":"Project Structure  cmd: The starter of Satellite. configs: Satellite configs. internal: Core, API, …","ref":"/docs/skywalking-satellite/v1.2.0/en/concepts-and-designs/project_structue/","title":"Project Structure"},{"body":"Prometheus Fetcher Prometheus fetcher reads metrics from Prometheus endpoint, and transfer the metrics into SkyWalking native format for the MAL engine.\nConfiguration file Prometheus fetcher is configured via a configuration file. The configuration file defines everything related to fetching services and their instances, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP fails to start up. The files are located at $CLASSPATH/fetcher-prom-rules.\nThe file is written in YAML format, defined by the scheme described below. Brackets indicate that a parameter is optional.\nA full example can be found here\nGeneric placeholders are defined as follows:\n \u0026lt;duration\u0026gt;: This is parsed into a textual representation of a duration. The formats accepted are based on the ISO-8601 duration format PnDTnHnMn.nS with days considered to be exactly 24 hours. \u0026lt;labelname\u0026gt;: A string matching the regular expression [a-zA-Z_][a-zA-Z0-9_]*. \u0026lt;labelvalue\u0026gt;: A string of unicode characters. \u0026lt;host\u0026gt;: A valid string consisting of a hostname or IP followed by an optional port number. \u0026lt;path\u0026gt;: A valid URL path. \u0026lt;string\u0026gt;: A regular string.  # How frequently to fetch targets.fetcherInterval:\u0026lt;duration\u0026gt;# Per-fetch timeout when fetching this target.fetcherTimeout:\u0026lt;duration\u0026gt;# The HTTP resource path on which to fetch metrics from targets.metricsPath:\u0026lt;path\u0026gt;#Statically configured targets.staticConfig:# The targets specified by the static config.targets:[- \u0026lt;target\u0026gt; ]# Labels assigned to all metrics fetched from the targets.labels:[ \u0026lt;labelname\u0026gt;:\u0026lt;labelvalue\u0026gt; ... ]# filter the metrics, only those metrics that satisfy this condition will be passed into the `metricsRules` below.filter: \u0026lt;closure\u0026gt; # example:\u0026#39;{ tags -\u0026gt; tags.job_name == \u0026#34;vm-monitoring\u0026#34; }\u0026#39;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# Metrics rule allow you to recompute queries.metricsRules:[- \u0026lt;metric_rules\u0026gt; ] # The url of target exporter. the format should be complied with \u0026#34;java.net.URI\u0026#34;url:\u0026lt;string\u0026gt;# The path of root CA file.sslCaFilePath:\u0026lt;string\u0026gt;\u0026lt;metric_rules\u0026gt; # The name of rule, which combinates with a prefix \u0026#39;meter_\u0026#39; as the index/table name in storage.name:\u0026lt;string\u0026gt;# MAL expression.exp:\u0026lt;string\u0026gt;To know more about MAL, please refer to mal.md\nActive Fetcher Rules Suppose you want to enable some metric-custom.yaml files stored at fetcher-prom-rules, append its name to enabledRules of prometheus-fetcher as follows:\nprometheus-fetcher:selector:${SW_PROMETHEUS_FETCHER:default}default:enabledRules:${SW_PROMETHEUS_FETCHER_ENABLED_RULES:\u0026#34;self,metric-custom\u0026#34;}","excerpt":"Prometheus Fetcher Prometheus fetcher reads metrics from Prometheus endpoint, and transfer the …","ref":"/docs/main/v9.0.0/en/setup/backend/prometheus-metrics/","title":"Prometheus Fetcher"},{"body":"Prometheus Fetcher Prometheus fetcher reads metrics from the Prometheus endpoint and transfers the metrics into SkyWalking native format for the MAL engine.\nConfiguration file Prometheus fetcher is configured via a configuration file. The configuration file defines everything related to fetching services and their instances, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP fails to start up. The files are located at $CLASSPATH/fetcher-prom-rules.\nThe file is written in YAML format, defined by the scheme described below. Brackets indicate that a parameter is optional.\nA full example can be found here\nGeneric placeholders are defined as follows:\n \u0026lt;duration\u0026gt;: This is parsed into a textual representation of a duration. The accepted formats are based on the ISO-8601 duration format PnDTnHnMn.nS with days of exactly 24 hours. \u0026lt;labelname\u0026gt;: A string matching the regular expression [a-zA-Z_][a-zA-Z0-9_]*. \u0026lt;labelvalue\u0026gt;: A string of Unicode characters. \u0026lt;host\u0026gt;: A valid string consisting of a hostname or IP followed by an optional port number. \u0026lt;path\u0026gt;: A valid URL path. \u0026lt;string\u0026gt;: A regular string.  # How frequently to fetch targets.fetcherInterval:\u0026lt;duration\u0026gt;# Per-fetch timeout when fetching this target.fetcherTimeout:\u0026lt;duration\u0026gt;# The HTTP resource path on which to fetch metrics from targets.metricsPath:\u0026lt;path\u0026gt;#Statically configured targets.staticConfig:# The targets specified by the static config.targets:[- \u0026lt;target\u0026gt; ]# Labels assigned to all metrics fetched from the targets.labels:[ \u0026lt;labelname\u0026gt;:\u0026lt;labelvalue\u0026gt; ... ]# filter the metrics, only those metrics that satisfy this condition will be passed into the `metricsRules` below.filter: \u0026lt;closure\u0026gt; # example:\u0026#39;{ tags -\u0026gt; tags.job_name == \u0026#34;vm-monitoring\u0026#34; }\u0026#39;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# Metrics rule allow you to recompute queries.metricsRules:[- \u0026lt;metric_rules\u0026gt; ] # The url of target exporter. the format should be complied with \u0026#34;java.net.URI\u0026#34;url:\u0026lt;string\u0026gt;# The path of root CA file.sslCaFilePath:\u0026lt;string\u0026gt;\u0026lt;metric_rules\u0026gt; # The name of rule, which combinates with a prefix \u0026#39;meter_\u0026#39; as the index/table name in storage.name:\u0026lt;string\u0026gt;# MAL expression.exp:\u0026lt;string\u0026gt;To know more about MAL, please refer to mal.md\nActive Fetcher Rules Suppose you want to enable some metric-custom.yaml files stored at fetcher-prom-rules, append its name to enabledRules of prometheus-fetcher as follows:\nprometheus-fetcher:selector:${SW_PROMETHEUS_FETCHER:default}default:enabledRules:${SW_PROMETHEUS_FETCHER_ENABLED_RULES:\u0026#34;self,metric-custom\u0026#34;}","excerpt":"Prometheus Fetcher Prometheus fetcher reads metrics from the Prometheus endpoint and transfers the …","ref":"/docs/main/v9.1.0/en/setup/backend/prometheus-metrics/","title":"Prometheus Fetcher"},{"body":"Prometheus Fetcher Prometheus fetcher reads metrics from the Prometheus endpoint and transfers the metrics into SkyWalking native format for the MAL engine.\nConfiguration file Prometheus fetcher is configured via a configuration file. The configuration file defines everything related to fetching services and their instances, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP fails to start up. The files are located at $CLASSPATH/fetcher-prom-rules.\nThe file is written in YAML format, defined by the scheme described below. Brackets indicate that a parameter is optional.\nA full example can be found here\nGeneric placeholders are defined as follows:\n \u0026lt;duration\u0026gt;: This is parsed into a textual representation of a duration. The accepted formats are based on the ISO-8601 duration format PnDTnHnMn.nS with days of exactly 24 hours. \u0026lt;labelname\u0026gt;: A string matching the regular expression [a-zA-Z_][a-zA-Z0-9_]*. \u0026lt;labelvalue\u0026gt;: A string of Unicode characters. \u0026lt;host\u0026gt;: A valid string consisting of a hostname or IP followed by an optional port number. \u0026lt;path\u0026gt;: A valid URL path. \u0026lt;string\u0026gt;: A regular string.  # How frequently to fetch targets.fetcherInterval:\u0026lt;duration\u0026gt;# Per-fetch timeout when fetching this target.fetcherTimeout:\u0026lt;duration\u0026gt;# The HTTP resource path on which to fetch metrics from targets.metricsPath:\u0026lt;path\u0026gt;#Statically configured targets.staticConfig:# The targets specified by the static config.targets:[- \u0026lt;target\u0026gt; ]# Labels assigned to all metrics fetched from the targets.labels:[ \u0026lt;labelname\u0026gt;:\u0026lt;labelvalue\u0026gt; ... ]# initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# filter the metrics, only those metrics that satisfy this condition will be passed into the `metricsRules` below.filter: \u0026lt;closure\u0026gt; # example:\u0026#39;{ tags -\u0026gt; tags.job_name == \u0026#34;vm-monitoring\u0026#34; }\u0026#39;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# Metrics rule allow you to recompute queries.metricsRules:[- \u0026lt;metric_rules\u0026gt; ] # The url of target exporter. the format should be complied with \u0026#34;java.net.URI\u0026#34;url:\u0026lt;string\u0026gt;# The path of root CA file.sslCaFilePath:\u0026lt;string\u0026gt;\u0026lt;metric_rules\u0026gt; # The name of rule, which combinates with a prefix \u0026#39;meter_\u0026#39; as the index/table name in storage.name:\u0026lt;string\u0026gt;# MAL expression.exp:\u0026lt;string\u0026gt;To know more about MAL, please refer to mal.md\nActive Fetcher Rules Suppose you want to enable some metric-custom.yaml files stored at fetcher-prom-rules, append its name to enabledRules of prometheus-fetcher as follows:\nprometheus-fetcher:selector:${SW_PROMETHEUS_FETCHER:default}default:enabledRules:${SW_PROMETHEUS_FETCHER_ENABLED_RULES:\u0026#34;self,metric-custom\u0026#34;}","excerpt":"Prometheus Fetcher Prometheus fetcher reads metrics from the Prometheus endpoint and transfers the …","ref":"/docs/main/v9.2.0/en/setup/backend/prometheus-metrics/","title":"Prometheus Fetcher"},{"body":"PromQL Service PromQL(Prometheus Query Language) Service exposes Prometheus Querying HTTP APIs including the bundled PromQL expression system. Third-party systems or visualization platforms that already support PromQL (such as Grafana), could obtain metrics through PromQL Service.\nAs SkyWalking and Prometheus have fundamental differences in metrics classification, format, storage, etc. The PromQL Service supported will be a subset of the complete PromQL.\nDetails Of Supported Protocol The following doc describes the details of the supported protocol and compared it to the PromQL official documentation. If not mentioned, it will not be supported by default.\nTime series Selectors Instant Vector Selectors For example: select metric service_cpm which the service is $service and the layer is $layer.\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} Note: The label matching operators only support = instead of regular expressions.\nRange Vector Selectors For example: select metric service_cpm which the service is $service and the layer is $layer within the last 5 minutes.\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;}[5m] Time Durations    Unit Definition Support     ms milliseconds yes   s seconds yes   m minutes yes   h hours yes   d days yes   w weeks yes   y years no    Binary operators Arithmetic binary operators    Operator Definition Support     + addition yes   - subtraction yes   * multiplication yes   / division yes   % modulo yes   ^ power/exponentiation no    Between two scalars For example:\n1 + 2 Between an instant vector and a scalar For example:\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} / 100 Between two instant vectors For example:\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} + service_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} Note: The operations between vectors require the same metric and labels, and don\u0026rsquo;t support Vector matching.\nComparison binary operators    Operator Definition Support     == equal yes   != not-equal yes   \u0026gt; greater-than yes   \u0026lt; less-than yes   \u0026gt;= greater-or-equal yes   \u0026lt;= less-or-equal) yes    Between two scalars For example:\n1 \u0026gt; bool 2 Between an instant vector and a scalar For example:\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} \u0026gt; 1 Between two instant vectors For example:\nservice_cpm{service=\u0026#39;service_A\u0026#39;, layer=\u0026#39;$layer\u0026#39;} \u0026gt; service_cpm{service=\u0026#39;service_B\u0026#39;, layer=\u0026#39;$layer\u0026#39;} HTTP API Expression queries Instant queries GET|POST /api/v1/query    Parameter Definition Support Optional     query prometheus expression yes no   time The latest metrics value from current time to this time is returned. If time is empty, the default look-back time is 2 minutes. yes yes   timeout evaluation timeout no ignore    For example:\n/api/v1/query?query=service_cpm{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;} Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677548400, \u0026#34;6\u0026#34; ] } ] } } Range queries GET|POST /api/v1/query_range    Parameter Definition Support Optional     query prometheus expression yes no   start start timestamp, seconds yes no   end end timestamp, seconds yes no   step SkyWalking will automatically fit Step(DAY, HOUR, MINUTE) through start and end. no ignore   timeout evaluation timeout no ignore    For example:\n/api/v1/query_range?query=service_cpm{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;}\u0026amp;start=1677479336\u0026amp;end=1677479636 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;matrix\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;values\u0026#34;: [ [ 1677479280, \u0026#34;18\u0026#34; ], [ 1677479340, \u0026#34;18\u0026#34; ], [ 1677479400, \u0026#34;18\u0026#34; ], [ 1677479460, \u0026#34;18\u0026#34; ], [ 1677479520, \u0026#34;18\u0026#34; ], [ 1677479580, \u0026#34;18\u0026#34; ] ] } ] } } Querying metadata Finding series by label matchers GET|POST /api/v1/series    Parameter Definition Support Optional     match[] series selector yes no   start start timestamp, seconds yes no   end end timestamp, seconds yes no    For example:\n/api/v1/series?match[]=service_traffic{layer=\u0026#39;GENERAL\u0026#39;}\u0026amp;start=1677479336\u0026amp;end=1677479636 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::recommendation\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::app\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::gateway\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::frontend\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; } ] } Note: SkyWalking\u0026rsquo;s metadata exists in the following metrics(traffics):\n service_traffic instance_traffic endpoint_traffic  Getting label names GET|POST /api/v1/labels    Parameter Definition Support Optional     match[] series selector yes yes   start start timestamp no yes   end end timestamp no yes    For example:\n/api/v1/labels?match[]=instance_jvm_cpu\u0026#39; Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;layer\u0026#34;, \u0026#34;service\u0026#34;, \u0026#34;top_n\u0026#34;, \u0026#34;order\u0026#34;, \u0026#34;service_instance\u0026#34;, \u0026#34;parent_service\u0026#34; ] } Querying label values GET /api/v1/label/\u0026lt;label_name\u0026gt;/values    Parameter Definition Support Optional     match[] series selector yes no   start start timestamp no yes   end end timestamp no yes    For example:\n/api/v1/label/__name__/values Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;meter_mysql_instance_qps\u0026#34;, \u0026#34;service_cpm\u0026#34;, \u0026#34;envoy_cluster_up_rq_active\u0026#34;, \u0026#34;instance_jvm_class_loaded_class_count\u0026#34;, \u0026#34;k8s_cluster_memory_requests\u0026#34;, \u0026#34;meter_vm_memory_used\u0026#34;, \u0026#34;meter_apisix_sv_bandwidth_unmatched\u0026#34;, \u0026#34;meter_vm_memory_total\u0026#34;, \u0026#34;instance_jvm_thread_live_count\u0026#34;, \u0026#34;instance_jvm_thread_timed_waiting_state_thread_count\u0026#34;, \u0026#34;browser_app_page_first_pack_percentile\u0026#34;, \u0026#34;instance_clr_max_worker_threads\u0026#34;, ... ] } Querying metric metadata GET /api/v1/metadata    Parameter Definition Support Optional     limit maximum number of metrics to return yes yes   metric metric name, support regular expression yes yes    For example:\n/api/v1/metadata?limit=10 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;meter_mysql_instance_qps\u0026#34;: [ { \u0026#34;type\u0026#34;: \u0026#34;gauge\u0026#34;, \u0026#34;help\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;unit\u0026#34;: \u0026#34;\u0026#34; } ], \u0026#34;meter_apisix_sv_bandwidth_unmatched\u0026#34;: [ { \u0026#34;type\u0026#34;: \u0026#34;gauge\u0026#34;, \u0026#34;help\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;unit\u0026#34;: \u0026#34;\u0026#34; } ], \u0026#34;service_cpm\u0026#34;: [ { \u0026#34;type\u0026#34;: \u0026#34;gauge\u0026#34;, \u0026#34;help\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;unit\u0026#34;: \u0026#34;\u0026#34; } ], ... } } Metrics Type For Query Supported Metrics Scope(Catalog) Not all scopes are supported for now, please check the following table:\n   Scope Support     Service yes   ServiceInstance yes   Endpoint yes   ServiceRelation no   ServiceInstanceRelation no   Process no   ProcessRelation no    General labels Each metric contains general labels: layer. Different metrics will have different labels depending on their Scope and metric value type.\n   Query Labels Scope Expression Example     layer, service Service service_cpm{service='$service', layer='$layer'}   layer, service, service_instance ServiceInstance service_instance_cpm{service='$service', service_instance='$service_instance', layer='$layer'}   layer, service, endpoint Endpoint endpoint_cpm{service='$service', endpoint='$endpoint', layer='$layer'}    Common Value Metrics  Query Labels:  {General labels}  Expression Example:  service_cpm{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677490740, \u0026#34;3\u0026#34; ] } ] } } Labeled Value Metrics  Query Labels:  --{General labels} --labels: Used to filter the value labels to be returned --relabels: Used to rename the returned value labels note: The number and order of labels must match the number and order of relabels.  Expression Example:  service_percentile{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;, labels=\u0026#39;0,1,2\u0026#39;, relabels=\u0026#39;P50,P75,P90\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_percentile\u0026#34;, \u0026#34;label\u0026#34;: \u0026#34;P50\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677493380, \u0026#34;0\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_percentile\u0026#34;, \u0026#34;label\u0026#34;: \u0026#34;P75\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677493380, \u0026#34;0\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_percentile\u0026#34;, \u0026#34;label\u0026#34;: \u0026#34;P90\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677493380, \u0026#34;0\u0026#34; ] } ] } } Sort Metrics  Query Labels:  --parent_service: \u0026lt;optional\u0026gt; Name of the parent service. --top_n: The max number of the selected metric value --order: ASC/DES  Expression Example:  service_instance_cpm{parent_service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;, top_n=\u0026#39;10\u0026#39;, order=\u0026#39;DES\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_instance_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;ServiceInstance\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;651db53c0e3843d8b9c4c53a90b4992a@10.4.0.28\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677494280, \u0026#34;14\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_instance_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;ServiceInstance\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;4c04cf44d6bd408880556aa3c2cfb620@10.4.0.232\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677494280, \u0026#34;6\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_instance_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;ServiceInstance\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;f5ac8ead31af4e6795cae761729a2742@10.4.0.236\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677494280, \u0026#34;5\u0026#34; ] } ] } } Sampled Records  Query Labels:  --parent_service: Name of the parent service --top_n: The max number of the selected records value --order: ASC/DES  Expression Example:  top_n_database_statement{parent_service=\u0026#39;localhost:-1\u0026#39;, layer=\u0026#39;VIRTUAL_DATABASE\u0026#39;, top_n=\u0026#39;10\u0026#39;, order=\u0026#39;DES\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;top_n_database_statement\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;VIRTUAL_DATABASE\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;record\u0026#34;: \u0026#34;select song0_.id as id1_0_, song0_.artist as artist2_0_, song0_.genre as genre3_0_, song0_.liked as liked4_0_, song0_.name as name5_0_ from song song0_ where song0_.liked\u0026gt;?\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677501360, \u0026#34;1\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;top_n_database_statement\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;VIRTUAL_DATABASE\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;record\u0026#34;: \u0026#34;select song0_.id as id1_0_, song0_.artist as artist2_0_, song0_.genre as genre3_0_, song0_.liked as liked4_0_, song0_.name as name5_0_ from song song0_ where song0_.liked\u0026gt;?\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677501360, \u0026#34;1\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;top_n_database_statement\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;VIRTUAL_DATABASE\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;record\u0026#34;: \u0026#34;select song0_.id as id1_0_, song0_.artist as artist2_0_, song0_.genre as genre3_0_, song0_.liked as liked4_0_, song0_.name as name5_0_ from song song0_ where song0_.liked\u0026gt;?\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677501360, \u0026#34;1\u0026#34; ] } ] } } ","excerpt":"PromQL Service PromQL(Prometheus Query Language) Service exposes Prometheus Querying HTTP APIs …","ref":"/docs/main/latest/en/api/promql-service/","title":"PromQL Service"},{"body":"PromQL Service PromQL(Prometheus Query Language) Service exposes Prometheus Querying HTTP APIs including the bundled PromQL expression system. Third-party systems or visualization platforms that already support PromQL (such as Grafana), could obtain metrics through PromQL Service.\nAs SkyWalking and Prometheus have fundamental differences in metrics classification, format, storage, etc. The PromQL Service supported will be a subset of the complete PromQL.\nDetails Of Supported Protocol The following doc describes the details of the supported protocol and compared it to the PromQL official documentation. If not mentioned, it will not be supported by default.\nTime series Selectors Instant Vector Selectors For example: select metric service_cpm which the service is $service and the layer is $layer.\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} Note: The label matching operators only support = instead of regular expressions.\nRange Vector Selectors For example: select metric service_cpm which the service is $service and the layer is $layer within the last 5 minutes.\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;}[5m] Time Durations    Unit Definition Support     ms milliseconds yes   s seconds yes   m minutes yes   h hours yes   d days yes   w weeks yes   y years no    Binary operators Arithmetic binary operators    Operator Definition Support     + addition yes   - subtraction yes   * multiplication yes   / division yes   % modulo yes   ^ power/exponentiation no    Between two scalars For example:\n1 + 2 Between an instant vector and a scalar For example:\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} / 100 Between two instant vectors For example:\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} + service_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} Note: The operations between vectors require the same metric and labels, and don\u0026rsquo;t support Vector matching.\nComparison binary operators    Operator Definition Support     == equal yes   != not-equal yes   \u0026gt; greater-than yes   \u0026lt; less-than yes   \u0026gt;= greater-or-equal yes   \u0026lt;= less-or-equal) yes    Between two scalars For example:\n1 \u0026gt; bool 2 Between an instant vector and a scalar For example:\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} \u0026gt; 1 Between two instant vectors For example:\nservice_cpm{service=\u0026#39;service_A\u0026#39;, layer=\u0026#39;$layer\u0026#39;} \u0026gt; service_cpm{service=\u0026#39;service_B\u0026#39;, layer=\u0026#39;$layer\u0026#39;} HTTP API Expression queries Instant queries GET|POST /api/v1/query    Parameter Definition Support Optional     query prometheus expression yes no   time The latest metrics value from current time to this time is returned. If time is empty, the default look-back time is 2 minutes. yes yes   timeout evaluation timeout no ignore    For example:\n/api/v1/query?query=service_cpm{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;} Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677548400, \u0026#34;6\u0026#34; ] } ] } } Range queries GET|POST /api/v1/query_range    Parameter Definition Support Optional     query prometheus expression yes no   start start timestamp, seconds yes no   end end timestamp, seconds yes no   step SkyWalking will automatically fit Step(DAY, HOUR, MINUTE) through start and end. no ignore   timeout evaluation timeout no ignore    For example:\n/api/v1/query_range?query=service_cpm{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;}\u0026amp;start=1677479336\u0026amp;end=1677479636 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;matrix\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;values\u0026#34;: [ [ 1677479280, \u0026#34;18\u0026#34; ], [ 1677479340, \u0026#34;18\u0026#34; ], [ 1677479400, \u0026#34;18\u0026#34; ], [ 1677479460, \u0026#34;18\u0026#34; ], [ 1677479520, \u0026#34;18\u0026#34; ], [ 1677479580, \u0026#34;18\u0026#34; ] ] } ] } } Querying metadata Finding series by label matchers GET|POST /api/v1/series    Parameter Definition Support Optional     match[] series selector yes no   start start timestamp, seconds yes no   end end timestamp, seconds yes no    For example:\n/api/v1/series?match[]=service_traffic{layer=\u0026#39;GENERAL\u0026#39;}\u0026amp;start=1677479336\u0026amp;end=1677479636 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::recommendation\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::app\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::gateway\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::frontend\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; } ] } Note: SkyWalking\u0026rsquo;s metadata exists in the following metrics(traffics):\n service_traffic instance_traffic endpoint_traffic  Getting label names GET|POST /api/v1/labels    Parameter Definition Support Optional     match[] series selector yes yes   start start timestamp no yes   end end timestamp, if end time is not present, use current time as default end time yes yes    For example:\n/api/v1/labels?match[]=instance_jvm_cpu\u0026#39; Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;layer\u0026#34;, \u0026#34;service\u0026#34;, \u0026#34;top_n\u0026#34;, \u0026#34;order\u0026#34;, \u0026#34;service_instance\u0026#34;, \u0026#34;parent_service\u0026#34; ] } Querying label values GET /api/v1/label/\u0026lt;label_name\u0026gt;/values    Parameter Definition Support Optional     match[] series selector yes yes   start start timestamp no yes   end end timestamp, if end time is not present, use current time as default end time yes yes    For example:\n/api/v1/label/__name__/values Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;meter_mysql_instance_qps\u0026#34;, \u0026#34;service_cpm\u0026#34;, \u0026#34;envoy_cluster_up_rq_active\u0026#34;, \u0026#34;instance_jvm_class_loaded_class_count\u0026#34;, \u0026#34;k8s_cluster_memory_requests\u0026#34;, \u0026#34;meter_vm_memory_used\u0026#34;, \u0026#34;meter_apisix_sv_bandwidth_unmatched\u0026#34;, \u0026#34;meter_vm_memory_total\u0026#34;, \u0026#34;instance_jvm_thread_live_count\u0026#34;, \u0026#34;instance_jvm_thread_timed_waiting_state_thread_count\u0026#34;, \u0026#34;browser_app_page_first_pack_percentile\u0026#34;, \u0026#34;instance_clr_max_worker_threads\u0026#34;, ... ] } Querying metric metadata GET /api/v1/metadata    Parameter Definition Support Optional     limit maximum number of metrics to return yes yes   metric metric name, support regular expression yes yes    For example:\n/api/v1/metadata?limit=10 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;meter_mysql_instance_qps\u0026#34;: [ { \u0026#34;type\u0026#34;: \u0026#34;gauge\u0026#34;, \u0026#34;help\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;unit\u0026#34;: \u0026#34;\u0026#34; } ], \u0026#34;meter_apisix_sv_bandwidth_unmatched\u0026#34;: [ { \u0026#34;type\u0026#34;: \u0026#34;gauge\u0026#34;, \u0026#34;help\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;unit\u0026#34;: \u0026#34;\u0026#34; } ], \u0026#34;service_cpm\u0026#34;: [ { \u0026#34;type\u0026#34;: \u0026#34;gauge\u0026#34;, \u0026#34;help\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;unit\u0026#34;: \u0026#34;\u0026#34; } ], ... } } Metrics Type For Query Supported Metrics Scope(Catalog) Not all scopes are supported for now, please check the following table:\n   Scope Support     Service yes   ServiceInstance yes   Endpoint yes   ServiceRelation no   ServiceInstanceRelation no   Process no   ProcessRelation no    General labels Each metric contains general labels: layer. Different metrics will have different labels depending on their Scope and metric value type.\n   Query Labels Scope Expression Example     layer, service Service service_cpm{service='$service', layer='$layer'}   layer, service, service_instance ServiceInstance service_instance_cpm{service='$service', service_instance='$service_instance', layer='$layer'}   layer, service, endpoint Endpoint endpoint_cpm{service='$service', endpoint='$endpoint', layer='$layer'}    Common Value Metrics  Query Labels:  {General labels}  Expression Example:  service_cpm{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677490740, \u0026#34;3\u0026#34; ] } ] } } Labeled Value Metrics  Query Labels:  --{General labels} --metric labels: Used to filter the value labels to be returned  Expression Example:  service_percentile{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;, p=\u0026#39;50,75,90\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_percentile\u0026#34;, \u0026#34;p\u0026#34;: \u0026#34;50\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677493380, \u0026#34;0\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_percentile\u0026#34;, \u0026#34;p\u0026#34;: \u0026#34;75\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677493380, \u0026#34;0\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_percentile\u0026#34;, \u0026#34;p\u0026#34;: \u0026#34;90\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677493380, \u0026#34;0\u0026#34; ] } ] } } Sort Metrics  Query Labels:  --parent_service: \u0026lt;optional\u0026gt; Name of the parent service. --top_n: The max number of the selected metric value --order: ASC/DES  Expression Example:  service_instance_cpm{parent_service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;, top_n=\u0026#39;10\u0026#39;, order=\u0026#39;DES\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_instance_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;ServiceInstance\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;651db53c0e3843d8b9c4c53a90b4992a@10.4.0.28\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677494280, \u0026#34;14\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_instance_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;ServiceInstance\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;4c04cf44d6bd408880556aa3c2cfb620@10.4.0.232\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677494280, \u0026#34;6\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_instance_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;ServiceInstance\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;f5ac8ead31af4e6795cae761729a2742@10.4.0.236\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677494280, \u0026#34;5\u0026#34; ] } ] } } Sampled Records  Query Labels:  --parent_service: Name of the parent service --top_n: The max number of the selected records value --order: ASC/DES  Expression Example:  top_n_database_statement{parent_service=\u0026#39;localhost:-1\u0026#39;, layer=\u0026#39;VIRTUAL_DATABASE\u0026#39;, top_n=\u0026#39;10\u0026#39;, order=\u0026#39;DES\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;top_n_database_statement\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;VIRTUAL_DATABASE\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;record\u0026#34;: \u0026#34;select song0_.id as id1_0_, song0_.artist as artist2_0_, song0_.genre as genre3_0_, song0_.liked as liked4_0_, song0_.name as name5_0_ from song song0_ where song0_.liked\u0026gt;?\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677501360, \u0026#34;1\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;top_n_database_statement\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;VIRTUAL_DATABASE\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;record\u0026#34;: \u0026#34;select song0_.id as id1_0_, song0_.artist as artist2_0_, song0_.genre as genre3_0_, song0_.liked as liked4_0_, song0_.name as name5_0_ from song song0_ where song0_.liked\u0026gt;?\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677501360, \u0026#34;1\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;top_n_database_statement\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;VIRTUAL_DATABASE\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;record\u0026#34;: \u0026#34;select song0_.id as id1_0_, song0_.artist as artist2_0_, song0_.genre as genre3_0_, song0_.liked as liked4_0_, song0_.name as name5_0_ from song song0_ where song0_.liked\u0026gt;?\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677501360, \u0026#34;1\u0026#34; ] } ] } } ","excerpt":"PromQL Service PromQL(Prometheus Query Language) Service exposes Prometheus Querying HTTP APIs …","ref":"/docs/main/next/en/api/promql-service/","title":"PromQL Service"},{"body":"PromQL Service PromQL(Prometheus Query Language) Service exposes Prometheus Querying HTTP APIs including the bundled PromQL expression system. Third-party systems or visualization platforms that already support PromQL (such as Grafana), could obtain metrics through PromeQL Service.\nAs SkyWalking and Prometheus have fundamental differences in metrics classification, format, storage, etc. The PromQL Service supported will be a subset of the complete PromQL\nDetails Of Supported Protocol The following doc describes the details of the supported protocol and compared it to the PromQL official documentation. If not mentioned, it will not be supported by default.\nTime series Selectors Instant Vector Selectors For example: select metric service_cpm which the service is $service and the layer is $layer.\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} Note: The label matching operators only support = instead of regular expressions.\nRange Vector Selectors For example: select metric service_cpm which the service is $service and the layer is $layer within the last 5 minutes.\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;}[5m] Time Durations    Unit Definition Support     ms milliseconds yes   s seconds yes   m minutes yes   h hours yes   d days yes   w weeks yes   y years no    Binary operators Arithmetic binary operators    Operator Definition Support     + addition yes   - subtraction yes   * multiplication yes   / division yes   % modulo yes   ^ power/exponentiation no    Between two scalars For example:\n1 + 2 Between an instant vector and a scalar For example:\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} / 100 Between two instant vectors For example:\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} + service_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} Note: The operations between vectors require the same metric and labels, and don\u0026rsquo;t support Vector matching.\nComparison binary operators    Operator Definition Support     == equal yes   != not-equal yes   \u0026gt; greater-than yes   \u0026lt; less-than yes   \u0026gt;= greater-or-equal yes   \u0026lt;= less-or-equal) yes    Between two scalars For example:\n1 \u0026gt; bool 2 Between an instant vector and a scalar For example:\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} \u0026gt; 1 Between two instant vectors For example:\nservice_cpm{service=\u0026#39;service_A\u0026#39;, layer=\u0026#39;$layer\u0026#39;} \u0026gt; service_cpm{service=\u0026#39;service_B\u0026#39;, layer=\u0026#39;$layer\u0026#39;} HTTP API Expression queries Instant queries GET|POST /api/v1/query    Parameter Definition Support Optional     query prometheus expression yes no   time The latest metrics value from current time to this time is returned. If time is empty, the default look-back time is 2 minutes. yes yes   timeout evaluation timeout no ignore    For example:\n/api/v1/query?query=service_cpm{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;} Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677548400, \u0026#34;6\u0026#34; ] } ] } } Range queries GET|POST /api/v1/query_range    Parameter Definition Support Optional     query prometheus expression yes no   start start timestamp, seconds yes no   end end timestamp, seconds yes no   step SkyWalking will automatically fit Step(DAY, HOUR, MINUTE) through start and end. no ignore   timeout evaluation timeout no ignore    For example:\n/api/v1/query_range?query=service_cpm{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;}\u0026amp;start=1677479336\u0026amp;end=1677479636 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;matrix\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;values\u0026#34;: [ [ 1677479280, \u0026#34;18\u0026#34; ], [ 1677479340, \u0026#34;18\u0026#34; ], [ 1677479400, \u0026#34;18\u0026#34; ], [ 1677479460, \u0026#34;18\u0026#34; ], [ 1677479520, \u0026#34;18\u0026#34; ], [ 1677479580, \u0026#34;18\u0026#34; ] ] } ] } } Querying metadata Finding series by label matchers GET|POST /api/v1/series    Parameter Definition Support Optional     match[] series selector yes no   start start timestamp, seconds yes no   end end timestamp, seconds yes no    For example:\n/api/v1/series?match[]=service_traffic{layer=\u0026#39;GENERAL\u0026#39;}\u0026amp;start=1677479336\u0026amp;end=1677479636 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::recommendation\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::app\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::gateway\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::frontend\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; } ] } Note: SkyWalking\u0026rsquo;s metadata exists in the following metrics(traffics):\n service_traffic instance_traffic endpoint_traffic  Getting label names GET|POST /api/v1/labels    Parameter Definition Support Optional     match[] series selector yes yes   start start timestamp no yes   end end timestamp no yes    For example:\n/api/v1/labels?match[]=instance_jvm_cpu\u0026#39; Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;layer\u0026#34;, \u0026#34;scope\u0026#34;, \u0026#34;top_n\u0026#34;, \u0026#34;order\u0026#34;, \u0026#34;service_instance\u0026#34;, \u0026#34;parent_service\u0026#34; ] } Querying label values GET /api/v1/label/\u0026lt;label_name\u0026gt;/values    Parameter Definition Support Optional     match[] series selector yes no   start start timestamp no yes   end end timestamp no yes    For example:\n/api/v1/label/__name__/values Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;meter_mysql_instance_qps\u0026#34;, \u0026#34;service_cpm\u0026#34;, \u0026#34;envoy_cluster_up_rq_active\u0026#34;, \u0026#34;instance_jvm_class_loaded_class_count\u0026#34;, \u0026#34;k8s_cluster_memory_requests\u0026#34;, \u0026#34;meter_vm_memory_used\u0026#34;, \u0026#34;meter_apisix_sv_bandwidth_unmatched\u0026#34;, \u0026#34;meter_vm_memory_total\u0026#34;, \u0026#34;instance_jvm_thread_live_count\u0026#34;, \u0026#34;instance_jvm_thread_timed_waiting_state_thread_count\u0026#34;, \u0026#34;browser_app_page_first_pack_percentile\u0026#34;, \u0026#34;instance_clr_max_worker_threads\u0026#34;, ... ] } Querying metric metadata GET /api/v1/metadata    Parameter Definition Support Optional     limit maximum number of metrics to return yes yes   metric metric name, support regular expression yes yes    For example:\n/api/v1/metadata?limit=10 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;meter_mysql_instance_qps\u0026#34;: [ { \u0026#34;type\u0026#34;: \u0026#34;gauge\u0026#34;, \u0026#34;help\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;unit\u0026#34;: \u0026#34;\u0026#34; } ], \u0026#34;meter_apisix_sv_bandwidth_unmatched\u0026#34;: [ { \u0026#34;type\u0026#34;: \u0026#34;gauge\u0026#34;, \u0026#34;help\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;unit\u0026#34;: \u0026#34;\u0026#34; } ], \u0026#34;service_cpm\u0026#34;: [ { \u0026#34;type\u0026#34;: \u0026#34;gauge\u0026#34;, \u0026#34;help\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;unit\u0026#34;: \u0026#34;\u0026#34; } ], ... } } Metrics Type For Query Supported Metrics Scope(Catalog) All scopes are not supported completely, please check the following table:\n   Scope Support     Service yes   ServiceInstance yes   Endpoint yes   ServiceRelation no   ServiceInstanceRelation no   Process no   ProcessRelation no    General labels Each metric contains general labels: layer. Different metrics will have different labels depending on their Scope and metric value type.\n   Query Labels Scope Expression Example     layer, service Service service_cpm{service='$service', layer='$layer'}   layer, service, service_instance ServiceInstance service_instance_cpm{service='$service', service_instance='$service_instance', layer='$layer'}   layer, service, endpoint Endpoint endpoint_cpm{service='$service', endpoint='$endpoint', layer='$layer'}    Common Value Metrics  Query Labels:  {General labels}  Expression Example:  service_cpm{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677490740, \u0026#34;3\u0026#34; ] } ] } } Labeled Value Metrics  Query Labels:  --{General labels} --labels: Used to filter the value labels to be returned --relabels: Used to rename the returned value labels note: The number and order of labels must match the number and order of relabels.  Expression Example:  service_percentile{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;, labels=\u0026#39;0,1,2\u0026#39;, relabels=\u0026#39;P50,P75,P90\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_percentile\u0026#34;, \u0026#34;label\u0026#34;: \u0026#34;P50\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677493380, \u0026#34;0\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_percentile\u0026#34;, \u0026#34;label\u0026#34;: \u0026#34;P75\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677493380, \u0026#34;0\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_percentile\u0026#34;, \u0026#34;label\u0026#34;: \u0026#34;P90\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677493380, \u0026#34;0\u0026#34; ] } ] } } Sort Metrics  Query Labels:  --parent_service: \u0026lt;optional\u0026gt; Name of the parent service. --top_n: The max number of the selected metric value --order: ASC/DES  Expression Example:  service_instance_cpm{parent_service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;, top_n=\u0026#39;10\u0026#39;, order=\u0026#39;DES\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_instance_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;ServiceInstance\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;651db53c0e3843d8b9c4c53a90b4992a@10.4.0.28\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677494280, \u0026#34;14\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_instance_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;ServiceInstance\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;4c04cf44d6bd408880556aa3c2cfb620@10.4.0.232\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677494280, \u0026#34;6\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_instance_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;ServiceInstance\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;f5ac8ead31af4e6795cae761729a2742@10.4.0.236\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677494280, \u0026#34;5\u0026#34; ] } ] } } Sampled Records  Query Labels:  --parent_service: Name of the parent service --top_n: The max number of the selected records value --order: ASC/DES  Expression Example:  top_n_database_statement{parent_service=\u0026#39;localhost:-1\u0026#39;, layer=\u0026#39;VIRTUAL_DATABASE\u0026#39;, top_n=\u0026#39;10\u0026#39;, order=\u0026#39;DES\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;top_n_database_statement\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;VIRTUAL_DATABASE\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;record\u0026#34;: \u0026#34;select song0_.id as id1_0_, song0_.artist as artist2_0_, song0_.genre as genre3_0_, song0_.liked as liked4_0_, song0_.name as name5_0_ from song song0_ where song0_.liked\u0026gt;?\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677501360, \u0026#34;1\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;top_n_database_statement\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;VIRTUAL_DATABASE\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;record\u0026#34;: \u0026#34;select song0_.id as id1_0_, song0_.artist as artist2_0_, song0_.genre as genre3_0_, song0_.liked as liked4_0_, song0_.name as name5_0_ from song song0_ where song0_.liked\u0026gt;?\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677501360, \u0026#34;1\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;top_n_database_statement\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;VIRTUAL_DATABASE\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;record\u0026#34;: \u0026#34;select song0_.id as id1_0_, song0_.artist as artist2_0_, song0_.genre as genre3_0_, song0_.liked as liked4_0_, song0_.name as name5_0_ from song song0_ where song0_.liked\u0026gt;?\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677501360, \u0026#34;1\u0026#34; ] } ] } } ","excerpt":"PromQL Service PromQL(Prometheus Query Language) Service exposes Prometheus Querying HTTP APIs …","ref":"/docs/main/v9.4.0/en/api/promql-service/","title":"PromQL Service"},{"body":"PromQL Service PromQL(Prometheus Query Language) Service exposes Prometheus Querying HTTP APIs including the bundled PromQL expression system. Third-party systems or visualization platforms that already support PromQL (such as Grafana), could obtain metrics through PromQL Service.\nAs SkyWalking and Prometheus have fundamental differences in metrics classification, format, storage, etc. The PromQL Service supported will be a subset of the complete PromQL.\nDetails Of Supported Protocol The following doc describes the details of the supported protocol and compared it to the PromQL official documentation. If not mentioned, it will not be supported by default.\nTime series Selectors Instant Vector Selectors For example: select metric service_cpm which the service is $service and the layer is $layer.\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} Note: The label matching operators only support = instead of regular expressions.\nRange Vector Selectors For example: select metric service_cpm which the service is $service and the layer is $layer within the last 5 minutes.\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;}[5m] Time Durations    Unit Definition Support     ms milliseconds yes   s seconds yes   m minutes yes   h hours yes   d days yes   w weeks yes   y years no    Binary operators Arithmetic binary operators    Operator Definition Support     + addition yes   - subtraction yes   * multiplication yes   / division yes   % modulo yes   ^ power/exponentiation no    Between two scalars For example:\n1 + 2 Between an instant vector and a scalar For example:\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} / 100 Between two instant vectors For example:\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} + service_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} Note: The operations between vectors require the same metric and labels, and don\u0026rsquo;t support Vector matching.\nComparison binary operators    Operator Definition Support     == equal yes   != not-equal yes   \u0026gt; greater-than yes   \u0026lt; less-than yes   \u0026gt;= greater-or-equal yes   \u0026lt;= less-or-equal) yes    Between two scalars For example:\n1 \u0026gt; bool 2 Between an instant vector and a scalar For example:\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} \u0026gt; 1 Between two instant vectors For example:\nservice_cpm{service=\u0026#39;service_A\u0026#39;, layer=\u0026#39;$layer\u0026#39;} \u0026gt; service_cpm{service=\u0026#39;service_B\u0026#39;, layer=\u0026#39;$layer\u0026#39;} HTTP API Expression queries Instant queries GET|POST /api/v1/query    Parameter Definition Support Optional     query prometheus expression yes no   time The latest metrics value from current time to this time is returned. If time is empty, the default look-back time is 2 minutes. yes yes   timeout evaluation timeout no ignore    For example:\n/api/v1/query?query=service_cpm{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;} Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677548400, \u0026#34;6\u0026#34; ] } ] } } Range queries GET|POST /api/v1/query_range    Parameter Definition Support Optional     query prometheus expression yes no   start start timestamp, seconds yes no   end end timestamp, seconds yes no   step SkyWalking will automatically fit Step(DAY, HOUR, MINUTE) through start and end. no ignore   timeout evaluation timeout no ignore    For example:\n/api/v1/query_range?query=service_cpm{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;}\u0026amp;start=1677479336\u0026amp;end=1677479636 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;matrix\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;values\u0026#34;: [ [ 1677479280, \u0026#34;18\u0026#34; ], [ 1677479340, \u0026#34;18\u0026#34; ], [ 1677479400, \u0026#34;18\u0026#34; ], [ 1677479460, \u0026#34;18\u0026#34; ], [ 1677479520, \u0026#34;18\u0026#34; ], [ 1677479580, \u0026#34;18\u0026#34; ] ] } ] } } Querying metadata Finding series by label matchers GET|POST /api/v1/series    Parameter Definition Support Optional     match[] series selector yes no   start start timestamp, seconds yes no   end end timestamp, seconds yes no    For example:\n/api/v1/series?match[]=service_traffic{layer=\u0026#39;GENERAL\u0026#39;}\u0026amp;start=1677479336\u0026amp;end=1677479636 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::recommendation\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::app\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::gateway\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::frontend\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; } ] } Note: SkyWalking\u0026rsquo;s metadata exists in the following metrics(traffics):\n service_traffic instance_traffic endpoint_traffic  Getting label names GET|POST /api/v1/labels    Parameter Definition Support Optional     match[] series selector yes yes   start start timestamp no yes   end end timestamp no yes    For example:\n/api/v1/labels?match[]=instance_jvm_cpu\u0026#39; Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;layer\u0026#34;, \u0026#34;service\u0026#34;, \u0026#34;top_n\u0026#34;, \u0026#34;order\u0026#34;, \u0026#34;service_instance\u0026#34;, \u0026#34;parent_service\u0026#34; ] } Querying label values GET /api/v1/label/\u0026lt;label_name\u0026gt;/values    Parameter Definition Support Optional     match[] series selector yes no   start start timestamp no yes   end end timestamp no yes    For example:\n/api/v1/label/__name__/values Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;meter_mysql_instance_qps\u0026#34;, \u0026#34;service_cpm\u0026#34;, \u0026#34;envoy_cluster_up_rq_active\u0026#34;, \u0026#34;instance_jvm_class_loaded_class_count\u0026#34;, \u0026#34;k8s_cluster_memory_requests\u0026#34;, \u0026#34;meter_vm_memory_used\u0026#34;, \u0026#34;meter_apisix_sv_bandwidth_unmatched\u0026#34;, \u0026#34;meter_vm_memory_total\u0026#34;, \u0026#34;instance_jvm_thread_live_count\u0026#34;, \u0026#34;instance_jvm_thread_timed_waiting_state_thread_count\u0026#34;, \u0026#34;browser_app_page_first_pack_percentile\u0026#34;, \u0026#34;instance_clr_max_worker_threads\u0026#34;, ... ] } Querying metric metadata GET /api/v1/metadata    Parameter Definition Support Optional     limit maximum number of metrics to return yes yes   metric metric name, support regular expression yes yes    For example:\n/api/v1/metadata?limit=10 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;meter_mysql_instance_qps\u0026#34;: [ { \u0026#34;type\u0026#34;: \u0026#34;gauge\u0026#34;, \u0026#34;help\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;unit\u0026#34;: \u0026#34;\u0026#34; } ], \u0026#34;meter_apisix_sv_bandwidth_unmatched\u0026#34;: [ { \u0026#34;type\u0026#34;: \u0026#34;gauge\u0026#34;, \u0026#34;help\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;unit\u0026#34;: \u0026#34;\u0026#34; } ], \u0026#34;service_cpm\u0026#34;: [ { \u0026#34;type\u0026#34;: \u0026#34;gauge\u0026#34;, \u0026#34;help\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;unit\u0026#34;: \u0026#34;\u0026#34; } ], ... } } Metrics Type For Query Supported Metrics Scope(Catalog) Not all scopes are supported for now, please check the following table:\n   Scope Support     Service yes   ServiceInstance yes   Endpoint yes   ServiceRelation no   ServiceInstanceRelation no   Process no   ProcessRelation no    General labels Each metric contains general labels: layer. Different metrics will have different labels depending on their Scope and metric value type.\n   Query Labels Scope Expression Example     layer, service Service service_cpm{service='$service', layer='$layer'}   layer, service, service_instance ServiceInstance service_instance_cpm{service='$service', service_instance='$service_instance', layer='$layer'}   layer, service, endpoint Endpoint endpoint_cpm{service='$service', endpoint='$endpoint', layer='$layer'}    Common Value Metrics  Query Labels:  {General labels}  Expression Example:  service_cpm{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677490740, \u0026#34;3\u0026#34; ] } ] } } Labeled Value Metrics  Query Labels:  --{General labels} --labels: Used to filter the value labels to be returned --relabels: Used to rename the returned value labels note: The number and order of labels must match the number and order of relabels.  Expression Example:  service_percentile{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;, labels=\u0026#39;0,1,2\u0026#39;, relabels=\u0026#39;P50,P75,P90\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_percentile\u0026#34;, \u0026#34;label\u0026#34;: \u0026#34;P50\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677493380, \u0026#34;0\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_percentile\u0026#34;, \u0026#34;label\u0026#34;: \u0026#34;P75\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677493380, \u0026#34;0\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_percentile\u0026#34;, \u0026#34;label\u0026#34;: \u0026#34;P90\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677493380, \u0026#34;0\u0026#34; ] } ] } } Sort Metrics  Query Labels:  --parent_service: \u0026lt;optional\u0026gt; Name of the parent service. --top_n: The max number of the selected metric value --order: ASC/DES  Expression Example:  service_instance_cpm{parent_service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;, top_n=\u0026#39;10\u0026#39;, order=\u0026#39;DES\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_instance_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;ServiceInstance\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;651db53c0e3843d8b9c4c53a90b4992a@10.4.0.28\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677494280, \u0026#34;14\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_instance_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;ServiceInstance\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;4c04cf44d6bd408880556aa3c2cfb620@10.4.0.232\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677494280, \u0026#34;6\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_instance_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;ServiceInstance\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;f5ac8ead31af4e6795cae761729a2742@10.4.0.236\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677494280, \u0026#34;5\u0026#34; ] } ] } } Sampled Records  Query Labels:  --parent_service: Name of the parent service --top_n: The max number of the selected records value --order: ASC/DES  Expression Example:  top_n_database_statement{parent_service=\u0026#39;localhost:-1\u0026#39;, layer=\u0026#39;VIRTUAL_DATABASE\u0026#39;, top_n=\u0026#39;10\u0026#39;, order=\u0026#39;DES\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;top_n_database_statement\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;VIRTUAL_DATABASE\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;record\u0026#34;: \u0026#34;select song0_.id as id1_0_, song0_.artist as artist2_0_, song0_.genre as genre3_0_, song0_.liked as liked4_0_, song0_.name as name5_0_ from song song0_ where song0_.liked\u0026gt;?\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677501360, \u0026#34;1\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;top_n_database_statement\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;VIRTUAL_DATABASE\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;record\u0026#34;: \u0026#34;select song0_.id as id1_0_, song0_.artist as artist2_0_, song0_.genre as genre3_0_, song0_.liked as liked4_0_, song0_.name as name5_0_ from song song0_ where song0_.liked\u0026gt;?\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677501360, \u0026#34;1\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;top_n_database_statement\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;VIRTUAL_DATABASE\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;record\u0026#34;: \u0026#34;select song0_.id as id1_0_, song0_.artist as artist2_0_, song0_.genre as genre3_0_, song0_.liked as liked4_0_, song0_.name as name5_0_ from song song0_ where song0_.liked\u0026gt;?\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677501360, \u0026#34;1\u0026#34; ] } ] } } ","excerpt":"PromQL Service PromQL(Prometheus Query Language) Service exposes Prometheus Querying HTTP APIs …","ref":"/docs/main/v9.5.0/en/api/promql-service/","title":"PromQL Service"},{"body":"PromQL Service PromQL(Prometheus Query Language) Service exposes Prometheus Querying HTTP APIs including the bundled PromQL expression system. Third-party systems or visualization platforms that already support PromQL (such as Grafana), could obtain metrics through PromQL Service.\nAs SkyWalking and Prometheus have fundamental differences in metrics classification, format, storage, etc. The PromQL Service supported will be a subset of the complete PromQL.\nDetails Of Supported Protocol The following doc describes the details of the supported protocol and compared it to the PromQL official documentation. If not mentioned, it will not be supported by default.\nTime series Selectors Instant Vector Selectors For example: select metric service_cpm which the service is $service and the layer is $layer.\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} Note: The label matching operators only support = instead of regular expressions.\nRange Vector Selectors For example: select metric service_cpm which the service is $service and the layer is $layer within the last 5 minutes.\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;}[5m] Time Durations    Unit Definition Support     ms milliseconds yes   s seconds yes   m minutes yes   h hours yes   d days yes   w weeks yes   y years no    Binary operators Arithmetic binary operators    Operator Definition Support     + addition yes   - subtraction yes   * multiplication yes   / division yes   % modulo yes   ^ power/exponentiation no    Between two scalars For example:\n1 + 2 Between an instant vector and a scalar For example:\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} / 100 Between two instant vectors For example:\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} + service_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} Note: The operations between vectors require the same metric and labels, and don\u0026rsquo;t support Vector matching.\nComparison binary operators    Operator Definition Support     == equal yes   != not-equal yes   \u0026gt; greater-than yes   \u0026lt; less-than yes   \u0026gt;= greater-or-equal yes   \u0026lt;= less-or-equal) yes    Between two scalars For example:\n1 \u0026gt; bool 2 Between an instant vector and a scalar For example:\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} \u0026gt; 1 Between two instant vectors For example:\nservice_cpm{service=\u0026#39;service_A\u0026#39;, layer=\u0026#39;$layer\u0026#39;} \u0026gt; service_cpm{service=\u0026#39;service_B\u0026#39;, layer=\u0026#39;$layer\u0026#39;} HTTP API Expression queries Instant queries GET|POST /api/v1/query    Parameter Definition Support Optional     query prometheus expression yes no   time The latest metrics value from current time to this time is returned. If time is empty, the default look-back time is 2 minutes. yes yes   timeout evaluation timeout no ignore    For example:\n/api/v1/query?query=service_cpm{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;} Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677548400, \u0026#34;6\u0026#34; ] } ] } } Range queries GET|POST /api/v1/query_range    Parameter Definition Support Optional     query prometheus expression yes no   start start timestamp, seconds yes no   end end timestamp, seconds yes no   step SkyWalking will automatically fit Step(DAY, HOUR, MINUTE) through start and end. no ignore   timeout evaluation timeout no ignore    For example:\n/api/v1/query_range?query=service_cpm{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;}\u0026amp;start=1677479336\u0026amp;end=1677479636 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;matrix\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;values\u0026#34;: [ [ 1677479280, \u0026#34;18\u0026#34; ], [ 1677479340, \u0026#34;18\u0026#34; ], [ 1677479400, \u0026#34;18\u0026#34; ], [ 1677479460, \u0026#34;18\u0026#34; ], [ 1677479520, \u0026#34;18\u0026#34; ], [ 1677479580, \u0026#34;18\u0026#34; ] ] } ] } } Querying metadata Finding series by label matchers GET|POST /api/v1/series    Parameter Definition Support Optional     match[] series selector yes no   start start timestamp, seconds yes no   end end timestamp, seconds yes no    For example:\n/api/v1/series?match[]=service_traffic{layer=\u0026#39;GENERAL\u0026#39;}\u0026amp;start=1677479336\u0026amp;end=1677479636 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::recommendation\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::app\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::gateway\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::frontend\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; } ] } Note: SkyWalking\u0026rsquo;s metadata exists in the following metrics(traffics):\n service_traffic instance_traffic endpoint_traffic  Getting label names GET|POST /api/v1/labels    Parameter Definition Support Optional     match[] series selector yes yes   start start timestamp no yes   end end timestamp no yes    For example:\n/api/v1/labels?match[]=instance_jvm_cpu\u0026#39; Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;layer\u0026#34;, \u0026#34;service\u0026#34;, \u0026#34;top_n\u0026#34;, \u0026#34;order\u0026#34;, \u0026#34;service_instance\u0026#34;, \u0026#34;parent_service\u0026#34; ] } Querying label values GET /api/v1/label/\u0026lt;label_name\u0026gt;/values    Parameter Definition Support Optional     match[] series selector yes no   start start timestamp no yes   end end timestamp no yes    For example:\n/api/v1/label/__name__/values Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;meter_mysql_instance_qps\u0026#34;, \u0026#34;service_cpm\u0026#34;, \u0026#34;envoy_cluster_up_rq_active\u0026#34;, \u0026#34;instance_jvm_class_loaded_class_count\u0026#34;, \u0026#34;k8s_cluster_memory_requests\u0026#34;, \u0026#34;meter_vm_memory_used\u0026#34;, \u0026#34;meter_apisix_sv_bandwidth_unmatched\u0026#34;, \u0026#34;meter_vm_memory_total\u0026#34;, \u0026#34;instance_jvm_thread_live_count\u0026#34;, \u0026#34;instance_jvm_thread_timed_waiting_state_thread_count\u0026#34;, \u0026#34;browser_app_page_first_pack_percentile\u0026#34;, \u0026#34;instance_clr_max_worker_threads\u0026#34;, ... ] } Querying metric metadata GET /api/v1/metadata    Parameter Definition Support Optional     limit maximum number of metrics to return yes yes   metric metric name, support regular expression yes yes    For example:\n/api/v1/metadata?limit=10 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;meter_mysql_instance_qps\u0026#34;: [ { \u0026#34;type\u0026#34;: \u0026#34;gauge\u0026#34;, \u0026#34;help\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;unit\u0026#34;: \u0026#34;\u0026#34; } ], \u0026#34;meter_apisix_sv_bandwidth_unmatched\u0026#34;: [ { \u0026#34;type\u0026#34;: \u0026#34;gauge\u0026#34;, \u0026#34;help\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;unit\u0026#34;: \u0026#34;\u0026#34; } ], \u0026#34;service_cpm\u0026#34;: [ { \u0026#34;type\u0026#34;: \u0026#34;gauge\u0026#34;, \u0026#34;help\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;unit\u0026#34;: \u0026#34;\u0026#34; } ], ... } } Metrics Type For Query Supported Metrics Scope(Catalog) Not all scopes are supported for now, please check the following table:\n   Scope Support     Service yes   ServiceInstance yes   Endpoint yes   ServiceRelation no   ServiceInstanceRelation no   Process no   ProcessRelation no    General labels Each metric contains general labels: layer. Different metrics will have different labels depending on their Scope and metric value type.\n   Query Labels Scope Expression Example     layer, service Service service_cpm{service='$service', layer='$layer'}   layer, service, service_instance ServiceInstance service_instance_cpm{service='$service', service_instance='$service_instance', layer='$layer'}   layer, service, endpoint Endpoint endpoint_cpm{service='$service', endpoint='$endpoint', layer='$layer'}    Common Value Metrics  Query Labels:  {General labels}  Expression Example:  service_cpm{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677490740, \u0026#34;3\u0026#34; ] } ] } } Labeled Value Metrics  Query Labels:  --{General labels} --labels: Used to filter the value labels to be returned --relabels: Used to rename the returned value labels note: The number and order of labels must match the number and order of relabels.  Expression Example:  service_percentile{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;, labels=\u0026#39;0,1,2\u0026#39;, relabels=\u0026#39;P50,P75,P90\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_percentile\u0026#34;, \u0026#34;label\u0026#34;: \u0026#34;P50\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677493380, \u0026#34;0\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_percentile\u0026#34;, \u0026#34;label\u0026#34;: \u0026#34;P75\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677493380, \u0026#34;0\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_percentile\u0026#34;, \u0026#34;label\u0026#34;: \u0026#34;P90\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677493380, \u0026#34;0\u0026#34; ] } ] } } Sort Metrics  Query Labels:  --parent_service: \u0026lt;optional\u0026gt; Name of the parent service. --top_n: The max number of the selected metric value --order: ASC/DES  Expression Example:  service_instance_cpm{parent_service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;, top_n=\u0026#39;10\u0026#39;, order=\u0026#39;DES\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_instance_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;ServiceInstance\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;651db53c0e3843d8b9c4c53a90b4992a@10.4.0.28\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677494280, \u0026#34;14\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_instance_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;ServiceInstance\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;4c04cf44d6bd408880556aa3c2cfb620@10.4.0.232\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677494280, \u0026#34;6\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_instance_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;ServiceInstance\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;f5ac8ead31af4e6795cae761729a2742@10.4.0.236\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677494280, \u0026#34;5\u0026#34; ] } ] } } Sampled Records  Query Labels:  --parent_service: Name of the parent service --top_n: The max number of the selected records value --order: ASC/DES  Expression Example:  top_n_database_statement{parent_service=\u0026#39;localhost:-1\u0026#39;, layer=\u0026#39;VIRTUAL_DATABASE\u0026#39;, top_n=\u0026#39;10\u0026#39;, order=\u0026#39;DES\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;top_n_database_statement\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;VIRTUAL_DATABASE\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;record\u0026#34;: \u0026#34;select song0_.id as id1_0_, song0_.artist as artist2_0_, song0_.genre as genre3_0_, song0_.liked as liked4_0_, song0_.name as name5_0_ from song song0_ where song0_.liked\u0026gt;?\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677501360, \u0026#34;1\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;top_n_database_statement\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;VIRTUAL_DATABASE\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;record\u0026#34;: \u0026#34;select song0_.id as id1_0_, song0_.artist as artist2_0_, song0_.genre as genre3_0_, song0_.liked as liked4_0_, song0_.name as name5_0_ from song song0_ where song0_.liked\u0026gt;?\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677501360, \u0026#34;1\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;top_n_database_statement\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;VIRTUAL_DATABASE\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;record\u0026#34;: \u0026#34;select song0_.id as id1_0_, song0_.artist as artist2_0_, song0_.genre as genre3_0_, song0_.liked as liked4_0_, song0_.name as name5_0_ from song song0_ where song0_.liked\u0026gt;?\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677501360, \u0026#34;1\u0026#34; ] } ] } } ","excerpt":"PromQL Service PromQL(Prometheus Query Language) Service exposes Prometheus Querying HTTP APIs …","ref":"/docs/main/v9.6.0/en/api/promql-service/","title":"PromQL Service"},{"body":"PromQL Service PromQL(Prometheus Query Language) Service exposes Prometheus Querying HTTP APIs including the bundled PromQL expression system. Third-party systems or visualization platforms that already support PromQL (such as Grafana), could obtain metrics through PromQL Service.\nAs SkyWalking and Prometheus have fundamental differences in metrics classification, format, storage, etc. The PromQL Service supported will be a subset of the complete PromQL.\nDetails Of Supported Protocol The following doc describes the details of the supported protocol and compared it to the PromQL official documentation. If not mentioned, it will not be supported by default.\nTime series Selectors Instant Vector Selectors For example: select metric service_cpm which the service is $service and the layer is $layer.\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} Note: The label matching operators only support = instead of regular expressions.\nRange Vector Selectors For example: select metric service_cpm which the service is $service and the layer is $layer within the last 5 minutes.\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;}[5m] Time Durations    Unit Definition Support     ms milliseconds yes   s seconds yes   m minutes yes   h hours yes   d days yes   w weeks yes   y years no    Binary operators Arithmetic binary operators    Operator Definition Support     + addition yes   - subtraction yes   * multiplication yes   / division yes   % modulo yes   ^ power/exponentiation no    Between two scalars For example:\n1 + 2 Between an instant vector and a scalar For example:\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} / 100 Between two instant vectors For example:\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} + service_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} Note: The operations between vectors require the same metric and labels, and don\u0026rsquo;t support Vector matching.\nComparison binary operators    Operator Definition Support     == equal yes   != not-equal yes   \u0026gt; greater-than yes   \u0026lt; less-than yes   \u0026gt;= greater-or-equal yes   \u0026lt;= less-or-equal) yes    Between two scalars For example:\n1 \u0026gt; bool 2 Between an instant vector and a scalar For example:\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} \u0026gt; 1 Between two instant vectors For example:\nservice_cpm{service=\u0026#39;service_A\u0026#39;, layer=\u0026#39;$layer\u0026#39;} \u0026gt; service_cpm{service=\u0026#39;service_B\u0026#39;, layer=\u0026#39;$layer\u0026#39;} HTTP API Expression queries Instant queries GET|POST /api/v1/query    Parameter Definition Support Optional     query prometheus expression yes no   time The latest metrics value from current time to this time is returned. If time is empty, the default look-back time is 2 minutes. yes yes   timeout evaluation timeout no ignore    For example:\n/api/v1/query?query=service_cpm{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;} Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677548400, \u0026#34;6\u0026#34; ] } ] } } Range queries GET|POST /api/v1/query_range    Parameter Definition Support Optional     query prometheus expression yes no   start start timestamp, seconds yes no   end end timestamp, seconds yes no   step SkyWalking will automatically fit Step(DAY, HOUR, MINUTE) through start and end. no ignore   timeout evaluation timeout no ignore    For example:\n/api/v1/query_range?query=service_cpm{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;}\u0026amp;start=1677479336\u0026amp;end=1677479636 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;matrix\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;values\u0026#34;: [ [ 1677479280, \u0026#34;18\u0026#34; ], [ 1677479340, \u0026#34;18\u0026#34; ], [ 1677479400, \u0026#34;18\u0026#34; ], [ 1677479460, \u0026#34;18\u0026#34; ], [ 1677479520, \u0026#34;18\u0026#34; ], [ 1677479580, \u0026#34;18\u0026#34; ] ] } ] } } Querying metadata Finding series by label matchers GET|POST /api/v1/series    Parameter Definition Support Optional     match[] series selector yes no   start start timestamp, seconds yes no   end end timestamp, seconds yes no    For example:\n/api/v1/series?match[]=service_traffic{layer=\u0026#39;GENERAL\u0026#39;}\u0026amp;start=1677479336\u0026amp;end=1677479636 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::recommendation\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::app\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::gateway\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::frontend\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; } ] } Note: SkyWalking\u0026rsquo;s metadata exists in the following metrics(traffics):\n service_traffic instance_traffic endpoint_traffic  Getting label names GET|POST /api/v1/labels    Parameter Definition Support Optional     match[] series selector yes yes   start start timestamp no yes   end end timestamp no yes    For example:\n/api/v1/labels?match[]=instance_jvm_cpu\u0026#39; Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;layer\u0026#34;, \u0026#34;service\u0026#34;, \u0026#34;top_n\u0026#34;, \u0026#34;order\u0026#34;, \u0026#34;service_instance\u0026#34;, \u0026#34;parent_service\u0026#34; ] } Querying label values GET /api/v1/label/\u0026lt;label_name\u0026gt;/values    Parameter Definition Support Optional     match[] series selector yes no   start start timestamp no yes   end end timestamp no yes    For example:\n/api/v1/label/__name__/values Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;meter_mysql_instance_qps\u0026#34;, \u0026#34;service_cpm\u0026#34;, \u0026#34;envoy_cluster_up_rq_active\u0026#34;, \u0026#34;instance_jvm_class_loaded_class_count\u0026#34;, \u0026#34;k8s_cluster_memory_requests\u0026#34;, \u0026#34;meter_vm_memory_used\u0026#34;, \u0026#34;meter_apisix_sv_bandwidth_unmatched\u0026#34;, \u0026#34;meter_vm_memory_total\u0026#34;, \u0026#34;instance_jvm_thread_live_count\u0026#34;, \u0026#34;instance_jvm_thread_timed_waiting_state_thread_count\u0026#34;, \u0026#34;browser_app_page_first_pack_percentile\u0026#34;, \u0026#34;instance_clr_max_worker_threads\u0026#34;, ... ] } Querying metric metadata GET /api/v1/metadata    Parameter Definition Support Optional     limit maximum number of metrics to return yes yes   metric metric name, support regular expression yes yes    For example:\n/api/v1/metadata?limit=10 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;meter_mysql_instance_qps\u0026#34;: [ { \u0026#34;type\u0026#34;: \u0026#34;gauge\u0026#34;, \u0026#34;help\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;unit\u0026#34;: \u0026#34;\u0026#34; } ], \u0026#34;meter_apisix_sv_bandwidth_unmatched\u0026#34;: [ { \u0026#34;type\u0026#34;: \u0026#34;gauge\u0026#34;, \u0026#34;help\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;unit\u0026#34;: \u0026#34;\u0026#34; } ], \u0026#34;service_cpm\u0026#34;: [ { \u0026#34;type\u0026#34;: \u0026#34;gauge\u0026#34;, \u0026#34;help\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;unit\u0026#34;: \u0026#34;\u0026#34; } ], ... } } Metrics Type For Query Supported Metrics Scope(Catalog) Not all scopes are supported for now, please check the following table:\n   Scope Support     Service yes   ServiceInstance yes   Endpoint yes   ServiceRelation no   ServiceInstanceRelation no   Process no   ProcessRelation no    General labels Each metric contains general labels: layer. Different metrics will have different labels depending on their Scope and metric value type.\n   Query Labels Scope Expression Example     layer, service Service service_cpm{service='$service', layer='$layer'}   layer, service, service_instance ServiceInstance service_instance_cpm{service='$service', service_instance='$service_instance', layer='$layer'}   layer, service, endpoint Endpoint endpoint_cpm{service='$service', endpoint='$endpoint', layer='$layer'}    Common Value Metrics  Query Labels:  {General labels}  Expression Example:  service_cpm{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677490740, \u0026#34;3\u0026#34; ] } ] } } Labeled Value Metrics  Query Labels:  --{General labels} --labels: Used to filter the value labels to be returned --relabels: Used to rename the returned value labels note: The number and order of labels must match the number and order of relabels.  Expression Example:  service_percentile{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;, labels=\u0026#39;0,1,2\u0026#39;, relabels=\u0026#39;P50,P75,P90\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_percentile\u0026#34;, \u0026#34;label\u0026#34;: \u0026#34;P50\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677493380, \u0026#34;0\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_percentile\u0026#34;, \u0026#34;label\u0026#34;: \u0026#34;P75\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677493380, \u0026#34;0\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_percentile\u0026#34;, \u0026#34;label\u0026#34;: \u0026#34;P90\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677493380, \u0026#34;0\u0026#34; ] } ] } } Sort Metrics  Query Labels:  --parent_service: \u0026lt;optional\u0026gt; Name of the parent service. --top_n: The max number of the selected metric value --order: ASC/DES  Expression Example:  service_instance_cpm{parent_service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;, top_n=\u0026#39;10\u0026#39;, order=\u0026#39;DES\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_instance_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;ServiceInstance\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;651db53c0e3843d8b9c4c53a90b4992a@10.4.0.28\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677494280, \u0026#34;14\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_instance_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;ServiceInstance\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;4c04cf44d6bd408880556aa3c2cfb620@10.4.0.232\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677494280, \u0026#34;6\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_instance_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;ServiceInstance\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;f5ac8ead31af4e6795cae761729a2742@10.4.0.236\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677494280, \u0026#34;5\u0026#34; ] } ] } } Sampled Records  Query Labels:  --parent_service: Name of the parent service --top_n: The max number of the selected records value --order: ASC/DES  Expression Example:  top_n_database_statement{parent_service=\u0026#39;localhost:-1\u0026#39;, layer=\u0026#39;VIRTUAL_DATABASE\u0026#39;, top_n=\u0026#39;10\u0026#39;, order=\u0026#39;DES\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;top_n_database_statement\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;VIRTUAL_DATABASE\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;record\u0026#34;: \u0026#34;select song0_.id as id1_0_, song0_.artist as artist2_0_, song0_.genre as genre3_0_, song0_.liked as liked4_0_, song0_.name as name5_0_ from song song0_ where song0_.liked\u0026gt;?\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677501360, \u0026#34;1\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;top_n_database_statement\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;VIRTUAL_DATABASE\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;record\u0026#34;: \u0026#34;select song0_.id as id1_0_, song0_.artist as artist2_0_, song0_.genre as genre3_0_, song0_.liked as liked4_0_, song0_.name as name5_0_ from song song0_ where song0_.liked\u0026gt;?\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677501360, \u0026#34;1\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;top_n_database_statement\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;VIRTUAL_DATABASE\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;record\u0026#34;: \u0026#34;select song0_.id as id1_0_, song0_.artist as artist2_0_, song0_.genre as genre3_0_, song0_.liked as liked4_0_, song0_.name as name5_0_ from song song0_ where song0_.liked\u0026gt;?\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677501360, \u0026#34;1\u0026#34; ] } ] } } ","excerpt":"PromQL Service PromQL(Prometheus Query Language) Service exposes Prometheus Querying HTTP APIs …","ref":"/docs/main/v9.7.0/en/api/promql-service/","title":"PromQL Service"},{"body":"Protocol Documentation \nTable of Contents   banyandb/cluster/v1/rpc.proto\n  SendRequest\n  SendResponse\n  Service\n    banyandb/common/v1/common.proto\n  Group\n  IntervalRule\n  Metadata\n  ResourceOpts\n  Catalog\n  IntervalRule.Unit\n    banyandb/database/v1/database.proto\n  Node\n  Shard\n  Role\n    banyandb/model/v1/common.proto\n  FieldValue\n  Float\n  Int\n  IntArray\n  Str\n  StrArray\n  TagFamilyForWrite\n  TagValue\n  AggregationFunction\n    banyandb/model/v1/query.proto\n  Condition\n  Criteria\n  LogicalExpression\n  QueryOrder\n  Tag\n  TagFamily\n  TagProjection\n  TagProjection.TagFamily\n  TimeRange\n  Condition.BinaryOp\n  LogicalExpression.LogicalOp\n  Sort\n    banyandb/database/v1/schema.proto\n  Entity\n  FieldSpec\n  IndexRule\n  IndexRuleBinding\n  Measure\n  Stream\n  Subject\n  TagFamilySpec\n  TagSpec\n  TopNAggregation\n  CompressionMethod\n  EncodingMethod\n  FieldType\n  IndexRule.Analyzer\n  IndexRule.Location\n  IndexRule.Type\n  TagType\n    banyandb/database/v1/rpc.proto\n  GroupRegistryServiceCreateRequest\n  GroupRegistryServiceCreateResponse\n  GroupRegistryServiceDeleteRequest\n  GroupRegistryServiceDeleteResponse\n  GroupRegistryServiceExistRequest\n  GroupRegistryServiceExistResponse\n  GroupRegistryServiceGetRequest\n  GroupRegistryServiceGetResponse\n  GroupRegistryServiceListRequest\n  GroupRegistryServiceListResponse\n  GroupRegistryServiceUpdateRequest\n  GroupRegistryServiceUpdateResponse\n  IndexRuleBindingRegistryServiceCreateRequest\n  IndexRuleBindingRegistryServiceCreateResponse\n  IndexRuleBindingRegistryServiceDeleteRequest\n  IndexRuleBindingRegistryServiceDeleteResponse\n  IndexRuleBindingRegistryServiceExistRequest\n  IndexRuleBindingRegistryServiceExistResponse\n  IndexRuleBindingRegistryServiceGetRequest\n  IndexRuleBindingRegistryServiceGetResponse\n  IndexRuleBindingRegistryServiceListRequest\n  IndexRuleBindingRegistryServiceListResponse\n  IndexRuleBindingRegistryServiceUpdateRequest\n  IndexRuleBindingRegistryServiceUpdateResponse\n  IndexRuleRegistryServiceCreateRequest\n  IndexRuleRegistryServiceCreateResponse\n  IndexRuleRegistryServiceDeleteRequest\n  IndexRuleRegistryServiceDeleteResponse\n  IndexRuleRegistryServiceExistRequest\n  IndexRuleRegistryServiceExistResponse\n  IndexRuleRegistryServiceGetRequest\n  IndexRuleRegistryServiceGetResponse\n  IndexRuleRegistryServiceListRequest\n  IndexRuleRegistryServiceListResponse\n  IndexRuleRegistryServiceUpdateRequest\n  IndexRuleRegistryServiceUpdateResponse\n  MeasureRegistryServiceCreateRequest\n  MeasureRegistryServiceCreateResponse\n  MeasureRegistryServiceDeleteRequest\n  MeasureRegistryServiceDeleteResponse\n  MeasureRegistryServiceExistRequest\n  MeasureRegistryServiceExistResponse\n  MeasureRegistryServiceGetRequest\n  MeasureRegistryServiceGetResponse\n  MeasureRegistryServiceListRequest\n  MeasureRegistryServiceListResponse\n  MeasureRegistryServiceUpdateRequest\n  MeasureRegistryServiceUpdateResponse\n  StreamRegistryServiceCreateRequest\n  StreamRegistryServiceCreateResponse\n  StreamRegistryServiceDeleteRequest\n  StreamRegistryServiceDeleteResponse\n  StreamRegistryServiceExistRequest\n  StreamRegistryServiceExistResponse\n  StreamRegistryServiceGetRequest\n  StreamRegistryServiceGetResponse\n  StreamRegistryServiceListRequest\n  StreamRegistryServiceListResponse\n  StreamRegistryServiceUpdateRequest\n  StreamRegistryServiceUpdateResponse\n  TopNAggregationRegistryServiceCreateRequest\n  TopNAggregationRegistryServiceCreateResponse\n  TopNAggregationRegistryServiceDeleteRequest\n  TopNAggregationRegistryServiceDeleteResponse\n  TopNAggregationRegistryServiceExistRequest\n  TopNAggregationRegistryServiceExistResponse\n  TopNAggregationRegistryServiceGetRequest\n  TopNAggregationRegistryServiceGetResponse\n  TopNAggregationRegistryServiceListRequest\n  TopNAggregationRegistryServiceListResponse\n  TopNAggregationRegistryServiceUpdateRequest\n  TopNAggregationRegistryServiceUpdateResponse\n  GroupRegistryService\n  IndexRuleBindingRegistryService\n  IndexRuleRegistryService\n  MeasureRegistryService\n  StreamRegistryService\n  TopNAggregationRegistryService\n    banyandb/measure/v1/query.proto\n DataPoint DataPoint.Field QueryRequest QueryRequest.Aggregation QueryRequest.FieldProjection QueryRequest.GroupBy QueryRequest.Top QueryResponse    banyandb/measure/v1/topn.proto\n TopNList TopNList.Item TopNRequest TopNResponse    banyandb/model/v1/write.proto\n Status    banyandb/measure/v1/write.proto\n DataPointValue InternalWriteRequest WriteRequest WriteResponse    banyandb/measure/v1/rpc.proto\n MeasureService    banyandb/property/v1/property.proto\n Metadata Property    banyandb/property/v1/rpc.proto\n  ApplyRequest\n  ApplyResponse\n  DeleteRequest\n  DeleteResponse\n  GetRequest\n  GetResponse\n  KeepAliveRequest\n  KeepAliveResponse\n  ListRequest\n  ListResponse\n  ApplyRequest.Strategy\n  PropertyService\n    banyandb/stream/v1/query.proto\n Element QueryRequest QueryResponse    banyandb/stream/v1/write.proto\n ElementValue InternalWriteRequest WriteRequest WriteResponse    banyandb/stream/v1/rpc.proto\n StreamService    Scalar Value Types\n  \nTop\nbanyandb/cluster/v1/rpc.proto \nSendRequest    Field Type Label Description     topic string     message_id uint64     body google.protobuf.Any      \nSendResponse    Field Type Label Description     message_id uint64     error string     body google.protobuf.Any      \nService    Method Name Request Type Response Type Description     Send SendRequest stream SendResponse stream     \nTop\nbanyandb/common/v1/common.proto \nGroup Group is an internal object for Group management\n   Field Type Label Description     metadata Metadata  metadata define the group's identity   catalog Catalog  catalog denotes which type of data the group contains   resource_opts ResourceOpts  resourceOpts indicates the structure of the underlying kv storage   updated_at google.protobuf.Timestamp  updated_at indicates when resources of the group are updated    \nIntervalRule IntervalRule is a structured duration\n   Field Type Label Description     unit IntervalRule.Unit  unit can only be UNIT_HOUR or UNIT_DAY   num uint32      \nMetadata Metadata is for multi-tenant, multi-model use\n   Field Type Label Description     group string  group contains a set of options, like retention policy, max   name string  name of the entity   id uint32     create_revision int64  readonly. create_revision is the revision of last creation on this key.   mod_revision int64  readonly. mod_revision is the revision of last modification on this key.    \nResourceOpts    Field Type Label Description     shard_num uint32  shard_num is the number of shards   block_interval IntervalRule  block_interval indicates the length of a block block_interval should be less than or equal to segment_interval   segment_interval IntervalRule  segment_interval indicates the length of a segment   ttl IntervalRule  ttl indicates time to live, how long the data will be cached    \nCatalog    Name Number Description     CATALOG_UNSPECIFIED 0    CATALOG_STREAM 1    CATALOG_MEASURE 2     \nIntervalRule.Unit    Name Number Description     UNIT_UNSPECIFIED 0    UNIT_HOUR 1    UNIT_DAY 2     \nTop\nbanyandb/database/v1/database.proto \nNode    Field Type Label Description     metadata banyandb.common.v1.Metadata     roles Role repeated    grpc_address string     http_address string     created_at google.protobuf.Timestamp      \nShard    Field Type Label Description     id uint64     metadata banyandb.common.v1.Metadata     catalog banyandb.common.v1.Catalog     node string     total uint32     updated_at google.protobuf.Timestamp     created_at google.protobuf.Timestamp      \nRole    Name Number Description     ROLE_UNSPECIFIED 0    ROLE_META 1    ROLE_DATA 2    ROLE_LIAISON 3     \nTop\nbanyandb/model/v1/common.proto \nFieldValue    Field Type Label Description     null google.protobuf.NullValue     str Str     int Int     binary_data bytes     float Float      \nFloat    Field Type Label Description     value double      \nInt    Field Type Label Description     value int64      \nIntArray    Field Type Label Description     value int64 repeated     \nStr    Field Type Label Description     value string      \nStrArray    Field Type Label Description     value string repeated     \nTagFamilyForWrite    Field Type Label Description     tags TagValue repeated     \nTagValue    Field Type Label Description     null google.protobuf.NullValue     str Str     str_array StrArray     int Int     int_array IntArray     binary_data bytes      \nAggregationFunction    Name Number Description     AGGREGATION_FUNCTION_UNSPECIFIED 0    AGGREGATION_FUNCTION_MEAN 1    AGGREGATION_FUNCTION_MAX 2    AGGREGATION_FUNCTION_MIN 3    AGGREGATION_FUNCTION_COUNT 4    AGGREGATION_FUNCTION_SUM 5     \nTop\nbanyandb/model/v1/query.proto \nCondition Condition consists of the query condition with a single binary operator to be imposed For 1:1 BinaryOp, values in condition must be an array with length = 1, while for 1:N BinaryOp, values can be an array with length \u0026gt;= 1.\n   Field Type Label Description     name string     op Condition.BinaryOp     value TagValue      \nCriteria tag_families are indexed.\n   Field Type Label Description     le LogicalExpression     condition Condition      \nLogicalExpression LogicalExpression supports logical operation\n   Field Type Label Description     op LogicalExpression.LogicalOp  op is a logical operation   left Criteria     right Criteria      \nQueryOrder QueryOrder means a Sort operation to be done for a given index rule. The index_rule_name refers to the name of a index rule bound to the subject.\n   Field Type Label Description     index_rule_name string     sort Sort      \nTag Pair is the building block of a record which is equivalent to a key-value pair. In the context of Trace, it could be metadata of a trace such as service_name, service_instance, etc. Besides, other tags are organized in key-value pair in the underlying storage layer. One should notice that the values can be a multi-value.\n   Field Type Label Description     key string     value TagValue      \nTagFamily    Field Type Label Description     name string     tags Tag repeated     \nTagProjection TagProjection is used to select the names of keys to be returned.\n   Field Type Label Description     tag_families TagProjection.TagFamily repeated     \nTagProjection.TagFamily    Field Type Label Description     name string     tags string repeated     \nTimeRange TimeRange is a range query for uint64, the range here follows left-inclusive and right-exclusive rule, i.e. [begin, end) if both edges exist\n   Field Type Label Description     begin google.protobuf.Timestamp     end google.protobuf.Timestamp      \nCondition.BinaryOp BinaryOp specifies the operation imposed to the given query condition For EQ, NE, LT, GT, LE and GE, only one operand should be given, i.e. one-to-one relationship. HAVING and NOT_HAVING allow multi-value to be the operand such as array/vector, i.e. one-to-many relationship. For example, \u0026quot;keyA\u0026quot; contains \u0026quot;valueA\u0026quot; and \u0026quot;valueB\u0026quot; MATCH performances a full-text search if the tag is analyzed. The string value applies to the same analyzer as the tag, but string array value does not. Each item in a string array is seen as a token instead of a query expression.\n   Name Number Description     BINARY_OP_UNSPECIFIED 0    BINARY_OP_EQ 1    BINARY_OP_NE 2    BINARY_OP_LT 3    BINARY_OP_GT 4    BINARY_OP_LE 5    BINARY_OP_GE 6    BINARY_OP_HAVING 7    BINARY_OP_NOT_HAVING 8    BINARY_OP_IN 9    BINARY_OP_NOT_IN 10    BINARY_OP_MATCH 11     \nLogicalExpression.LogicalOp    Name Number Description     LOGICAL_OP_UNSPECIFIED 0    LOGICAL_OP_AND 1    LOGICAL_OP_OR 2     \nSort    Name Number Description     SORT_UNSPECIFIED 0    SORT_DESC 1    SORT_ASC 2     \nTop\nbanyandb/database/v1/schema.proto \nEntity    Field Type Label Description     tag_names string repeated     \nFieldSpec FieldSpec is the specification of field\n   Field Type Label Description     name string  name is the identity of a field   field_type FieldType  field_type denotes the type of field value   encoding_method EncodingMethod  encoding_method indicates how to encode data during writing   compression_method CompressionMethod  compression_method indicates how to compress data during writing    \nIndexRule IndexRule defines how to generate indices based on tags and the index type IndexRule should bind to a subject through an IndexRuleBinding to generate proper indices.\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  metadata define the rule's identity   tags string repeated tags are the combination that refers to an indexed object If the elements in tags are more than 1, the object will generate a multi-tag index Caveat: All tags in a multi-tag MUST have an identical IndexType   type IndexRule.Type  type is the IndexType of this IndexObject.   location IndexRule.Location  location indicates where to store index.   updated_at google.protobuf.Timestamp  updated_at indicates when the IndexRule is updated   analyzer IndexRule.Analyzer  analyzer analyzes tag value to support the full-text searching for TYPE_INVERTED indices.    \nIndexRuleBinding IndexRuleBinding is a bridge to connect severalIndexRules to a subject This binding is valid between begin_at_nanoseconds and expire_at_nanoseconds, that provides flexible strategies to control how to generate time series indices.\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  metadata is the identity of this binding   rules string repeated rules refers to the IndexRule   subject Subject  subject indicates the subject of binding action   begin_at google.protobuf.Timestamp  begin_at_nanoseconds is the timestamp, after which the binding will be active   expire_at google.protobuf.Timestamp  expire_at_nanoseconds it the timestamp, after which the binding will be inactive expire_at_nanoseconds must be larger than begin_at_nanoseconds   updated_at google.protobuf.Timestamp  updated_at indicates when the IndexRuleBinding is updated    \nMeasure Measure intends to store data point\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  metadata is the identity of a measure   tag_families TagFamilySpec repeated tag_families are for filter measures   fields FieldSpec repeated fields denote measure values   entity Entity  entity indicates which tags will be to generate a series and shard a measure   interval string  interval indicates how frequently to send a data point valid time units are \u0026quot;ns\u0026quot;, \u0026quot;us\u0026quot; (or \u0026quot;µs\u0026quot;), \u0026quot;ms\u0026quot;, \u0026quot;s\u0026quot;, \u0026quot;m\u0026quot;, \u0026quot;h\u0026quot;, \u0026quot;d\u0026quot;.   updated_at google.protobuf.Timestamp  updated_at indicates when the measure is updated    \nStream Stream intends to store streaming data, for example, traces or logs\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  metadata is the identity of a trace series   tag_families TagFamilySpec repeated tag_families   entity Entity  entity indicates how to generate a series and shard a stream   updated_at google.protobuf.Timestamp  updated_at indicates when the stream is updated    \nSubject Subject defines which stream or measure would generate indices\n   Field Type Label Description     catalog banyandb.common.v1.Catalog  catalog is where the subject belongs to todo validate plugin exist bug https://github.com/bufbuild/protoc-gen-validate/issues/672   name string  name refers to a stream or measure in a particular catalog    \nTagFamilySpec    Field Type Label Description     name string     tags TagSpec repeated tags defines accepted tags    \nTagSpec    Field Type Label Description     name string     type TagType     indexed_only bool  indexed_only indicates whether the tag is stored True: It's indexed only, but not stored False: it's stored and indexed    \nTopNAggregation TopNAggregation generates offline TopN statistics for a measure's TopN approximation\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  metadata is the identity of an aggregation   source_measure banyandb.common.v1.Metadata  source_measure denotes the data source of this aggregation   field_name string  field_name is the name of field used for ranking   field_value_sort banyandb.model.v1.Sort  field_value_sort indicates how to sort fields ASC: bottomN DESC: topN UNSPECIFIED: topN + bottomN todo validate plugin exist bug https://github.com/bufbuild/protoc-gen-validate/issues/672   group_by_tag_names string repeated group_by_tag_names groups data points into statistical counters   criteria banyandb.model.v1.Criteria  criteria select partial data points from measure   counters_number int32  counters_number sets the number of counters to be tracked. The default value is 1000   lru_size int32  lru_size defines how much entry is allowed to be maintained in the memory   updated_at google.protobuf.Timestamp  updated_at indicates when the measure is updated    \nCompressionMethod    Name Number Description     COMPRESSION_METHOD_UNSPECIFIED 0    COMPRESSION_METHOD_ZSTD 1     \nEncodingMethod    Name Number Description     ENCODING_METHOD_UNSPECIFIED 0    ENCODING_METHOD_GORILLA 1     \nFieldType    Name Number Description     FIELD_TYPE_UNSPECIFIED 0    FIELD_TYPE_STRING 1    FIELD_TYPE_INT 2    FIELD_TYPE_DATA_BINARY 3    FIELD_TYPE_FLOAT 4     \nIndexRule.Analyzer    Name Number Description     ANALYZER_UNSPECIFIED 0    ANALYZER_KEYWORD 1 Keyword analyzer is a “noop” analyzer which returns the entire input string as a single token.   ANALYZER_STANDARD 2 Standard analyzer provides grammar based tokenization   ANALYZER_SIMPLE 3 Simple analyzer breaks text into tokens at any non-letter character, such as numbers, spaces, hyphens and apostrophes, discards non-letter characters, and changes uppercase to lowercase.    \nIndexRule.Location    Name Number Description     LOCATION_UNSPECIFIED 0    LOCATION_SERIES 1    LOCATION_GLOBAL 2     \nIndexRule.Type Type determine the index structure under the hood\n   Name Number Description     TYPE_UNSPECIFIED 0    TYPE_TREE 1    TYPE_INVERTED 2     \nTagType    Name Number Description     TAG_TYPE_UNSPECIFIED 0    TAG_TYPE_STRING 1    TAG_TYPE_INT 2    TAG_TYPE_STRING_ARRAY 3    TAG_TYPE_INT_ARRAY 4    TAG_TYPE_DATA_BINARY 5     \nTop\nbanyandb/database/v1/rpc.proto \nGroupRegistryServiceCreateRequest    Field Type Label Description     group banyandb.common.v1.Group      \nGroupRegistryServiceCreateResponse \nGroupRegistryServiceDeleteRequest    Field Type Label Description     group string      \nGroupRegistryServiceDeleteResponse    Field Type Label Description     deleted bool      \nGroupRegistryServiceExistRequest    Field Type Label Description     group string      \nGroupRegistryServiceExistResponse    Field Type Label Description     has_group bool      \nGroupRegistryServiceGetRequest    Field Type Label Description     group string      \nGroupRegistryServiceGetResponse    Field Type Label Description     group banyandb.common.v1.Group      \nGroupRegistryServiceListRequest \nGroupRegistryServiceListResponse    Field Type Label Description     group banyandb.common.v1.Group repeated     \nGroupRegistryServiceUpdateRequest    Field Type Label Description     group banyandb.common.v1.Group      \nGroupRegistryServiceUpdateResponse \nIndexRuleBindingRegistryServiceCreateRequest    Field Type Label Description     index_rule_binding IndexRuleBinding      \nIndexRuleBindingRegistryServiceCreateResponse \nIndexRuleBindingRegistryServiceDeleteRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nIndexRuleBindingRegistryServiceDeleteResponse    Field Type Label Description     deleted bool      \nIndexRuleBindingRegistryServiceExistRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nIndexRuleBindingRegistryServiceExistResponse    Field Type Label Description     has_group bool     has_index_rule_binding bool      \nIndexRuleBindingRegistryServiceGetRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nIndexRuleBindingRegistryServiceGetResponse    Field Type Label Description     index_rule_binding IndexRuleBinding      \nIndexRuleBindingRegistryServiceListRequest    Field Type Label Description     group string      \nIndexRuleBindingRegistryServiceListResponse    Field Type Label Description     index_rule_binding IndexRuleBinding repeated     \nIndexRuleBindingRegistryServiceUpdateRequest    Field Type Label Description     index_rule_binding IndexRuleBinding      \nIndexRuleBindingRegistryServiceUpdateResponse \nIndexRuleRegistryServiceCreateRequest    Field Type Label Description     index_rule IndexRule      \nIndexRuleRegistryServiceCreateResponse \nIndexRuleRegistryServiceDeleteRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nIndexRuleRegistryServiceDeleteResponse    Field Type Label Description     deleted bool      \nIndexRuleRegistryServiceExistRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nIndexRuleRegistryServiceExistResponse    Field Type Label Description     has_group bool     has_index_rule bool      \nIndexRuleRegistryServiceGetRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nIndexRuleRegistryServiceGetResponse    Field Type Label Description     index_rule IndexRule      \nIndexRuleRegistryServiceListRequest    Field Type Label Description     group string      \nIndexRuleRegistryServiceListResponse    Field Type Label Description     index_rule IndexRule repeated     \nIndexRuleRegistryServiceUpdateRequest    Field Type Label Description     index_rule IndexRule      \nIndexRuleRegistryServiceUpdateResponse \nMeasureRegistryServiceCreateRequest    Field Type Label Description     measure Measure      \nMeasureRegistryServiceCreateResponse    Field Type Label Description     mod_revision int64      \nMeasureRegistryServiceDeleteRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nMeasureRegistryServiceDeleteResponse    Field Type Label Description     deleted bool      \nMeasureRegistryServiceExistRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nMeasureRegistryServiceExistResponse    Field Type Label Description     has_group bool     has_measure bool      \nMeasureRegistryServiceGetRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nMeasureRegistryServiceGetResponse    Field Type Label Description     measure Measure      \nMeasureRegistryServiceListRequest    Field Type Label Description     group string      \nMeasureRegistryServiceListResponse    Field Type Label Description     measure Measure repeated     \nMeasureRegistryServiceUpdateRequest    Field Type Label Description     measure Measure      \nMeasureRegistryServiceUpdateResponse    Field Type Label Description     mod_revision int64      \nStreamRegistryServiceCreateRequest    Field Type Label Description     stream Stream      \nStreamRegistryServiceCreateResponse    Field Type Label Description     mod_revision int64      \nStreamRegistryServiceDeleteRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nStreamRegistryServiceDeleteResponse    Field Type Label Description     deleted bool      \nStreamRegistryServiceExistRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nStreamRegistryServiceExistResponse    Field Type Label Description     has_group bool     has_stream bool      \nStreamRegistryServiceGetRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nStreamRegistryServiceGetResponse    Field Type Label Description     stream Stream      \nStreamRegistryServiceListRequest    Field Type Label Description     group string      \nStreamRegistryServiceListResponse    Field Type Label Description     stream Stream repeated     \nStreamRegistryServiceUpdateRequest    Field Type Label Description     stream Stream      \nStreamRegistryServiceUpdateResponse    Field Type Label Description     mod_revision int64      \nTopNAggregationRegistryServiceCreateRequest    Field Type Label Description     top_n_aggregation TopNAggregation      \nTopNAggregationRegistryServiceCreateResponse \nTopNAggregationRegistryServiceDeleteRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nTopNAggregationRegistryServiceDeleteResponse    Field Type Label Description     deleted bool      \nTopNAggregationRegistryServiceExistRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nTopNAggregationRegistryServiceExistResponse    Field Type Label Description     has_group bool     has_top_n_aggregation bool      \nTopNAggregationRegistryServiceGetRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nTopNAggregationRegistryServiceGetResponse    Field Type Label Description     top_n_aggregation TopNAggregation      \nTopNAggregationRegistryServiceListRequest    Field Type Label Description     group string      \nTopNAggregationRegistryServiceListResponse    Field Type Label Description     top_n_aggregation TopNAggregation repeated     \nTopNAggregationRegistryServiceUpdateRequest    Field Type Label Description     top_n_aggregation TopNAggregation      \nTopNAggregationRegistryServiceUpdateResponse \nGroupRegistryService    Method Name Request Type Response Type Description     Create GroupRegistryServiceCreateRequest GroupRegistryServiceCreateResponse    Update GroupRegistryServiceUpdateRequest GroupRegistryServiceUpdateResponse    Delete GroupRegistryServiceDeleteRequest GroupRegistryServiceDeleteResponse    Get GroupRegistryServiceGetRequest GroupRegistryServiceGetResponse    List GroupRegistryServiceListRequest GroupRegistryServiceListResponse    Exist GroupRegistryServiceExistRequest GroupRegistryServiceExistResponse Exist doesn't expose an HTTP endpoint. Please use HEAD method to touch Get instead    \nIndexRuleBindingRegistryService    Method Name Request Type Response Type Description     Create IndexRuleBindingRegistryServiceCreateRequest IndexRuleBindingRegistryServiceCreateResponse    Update IndexRuleBindingRegistryServiceUpdateRequest IndexRuleBindingRegistryServiceUpdateResponse    Delete IndexRuleBindingRegistryServiceDeleteRequest IndexRuleBindingRegistryServiceDeleteResponse    Get IndexRuleBindingRegistryServiceGetRequest IndexRuleBindingRegistryServiceGetResponse    List IndexRuleBindingRegistryServiceListRequest IndexRuleBindingRegistryServiceListResponse    Exist IndexRuleBindingRegistryServiceExistRequest IndexRuleBindingRegistryServiceExistResponse Exist doesn't expose an HTTP endpoint. Please use HEAD method to touch Get instead    \nIndexRuleRegistryService    Method Name Request Type Response Type Description     Create IndexRuleRegistryServiceCreateRequest IndexRuleRegistryServiceCreateResponse    Update IndexRuleRegistryServiceUpdateRequest IndexRuleRegistryServiceUpdateResponse    Delete IndexRuleRegistryServiceDeleteRequest IndexRuleRegistryServiceDeleteResponse    Get IndexRuleRegistryServiceGetRequest IndexRuleRegistryServiceGetResponse    List IndexRuleRegistryServiceListRequest IndexRuleRegistryServiceListResponse    Exist IndexRuleRegistryServiceExistRequest IndexRuleRegistryServiceExistResponse Exist doesn't expose an HTTP endpoint. Please use HEAD method to touch Get instead    \nMeasureRegistryService    Method Name Request Type Response Type Description     Create MeasureRegistryServiceCreateRequest MeasureRegistryServiceCreateResponse    Update MeasureRegistryServiceUpdateRequest MeasureRegistryServiceUpdateResponse    Delete MeasureRegistryServiceDeleteRequest MeasureRegistryServiceDeleteResponse    Get MeasureRegistryServiceGetRequest MeasureRegistryServiceGetResponse    List MeasureRegistryServiceListRequest MeasureRegistryServiceListResponse    Exist MeasureRegistryServiceExistRequest MeasureRegistryServiceExistResponse Exist doesn't expose an HTTP endpoint. Please use HEAD method to touch Get instead    \nStreamRegistryService    Method Name Request Type Response Type Description     Create StreamRegistryServiceCreateRequest StreamRegistryServiceCreateResponse    Update StreamRegistryServiceUpdateRequest StreamRegistryServiceUpdateResponse    Delete StreamRegistryServiceDeleteRequest StreamRegistryServiceDeleteResponse    Get StreamRegistryServiceGetRequest StreamRegistryServiceGetResponse    List StreamRegistryServiceListRequest StreamRegistryServiceListResponse    Exist StreamRegistryServiceExistRequest StreamRegistryServiceExistResponse Exist doesn't expose an HTTP endpoint. Please use HEAD method to touch Get instead    \nTopNAggregationRegistryService    Method Name Request Type Response Type Description     Create TopNAggregationRegistryServiceCreateRequest TopNAggregationRegistryServiceCreateResponse    Update TopNAggregationRegistryServiceUpdateRequest TopNAggregationRegistryServiceUpdateResponse    Delete TopNAggregationRegistryServiceDeleteRequest TopNAggregationRegistryServiceDeleteResponse    Get TopNAggregationRegistryServiceGetRequest TopNAggregationRegistryServiceGetResponse    List TopNAggregationRegistryServiceListRequest TopNAggregationRegistryServiceListResponse    Exist TopNAggregationRegistryServiceExistRequest TopNAggregationRegistryServiceExistResponse     \nTop\nbanyandb/measure/v1/query.proto \nDataPoint DataPoint is stored in Measures\n   Field Type Label Description     timestamp google.protobuf.Timestamp  timestamp is in the timeunit of milliseconds.   tag_families banyandb.model.v1.TagFamily repeated tag_families contains tags selected in the projection   fields DataPoint.Field repeated fields contains fields selected in the projection    \nDataPoint.Field    Field Type Label Description     name string     value banyandb.model.v1.FieldValue      \nQueryRequest QueryRequest is the request contract for query.\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  metadata is required   time_range banyandb.model.v1.TimeRange  time_range is a range query with begin/end time of entities in the timeunit of milliseconds.   criteria banyandb.model.v1.Criteria  tag_families are indexed.   tag_projection banyandb.model.v1.TagProjection  tag_projection can be used to select tags of the data points in the response   field_projection QueryRequest.FieldProjection  field_projection can be used to select fields of the data points in the response   group_by QueryRequest.GroupBy  group_by groups data points based on their field value for a specific tag and use field_name as the projection name   agg QueryRequest.Aggregation  agg aggregates data points based on a field   top QueryRequest.Top  top limits the result based on a particular field. If order_by is specified, top sorts the dataset based on order_by's output   offset uint32  offset is used to support pagination, together with the following limit. If top is specified, offset processes the dataset based on top's output   limit uint32  limit is used to impose a boundary on the number of records being returned. If top is specified, limit processes the dataset based on top's output   order_by banyandb.model.v1.QueryOrder  order_by is given to specify the sort for a tag.    \nQueryRequest.Aggregation    Field Type Label Description     function banyandb.model.v1.AggregationFunction     field_name string  field_name must be one of files indicated by the field_projection    \nQueryRequest.FieldProjection    Field Type Label Description     names string repeated     \nQueryRequest.GroupBy    Field Type Label Description     tag_projection banyandb.model.v1.TagProjection  tag_projection must be a subset of the tag_projection of QueryRequest   field_name string  field_name must be one of fields indicated by field_projection    \nQueryRequest.Top    Field Type Label Description     number int32  number set the how many items should be returned   field_name string  field_name must be one of files indicated by the field_projection   field_value_sort banyandb.model.v1.Sort  field_value_sort indicates how to sort fields ASC: bottomN DESC: topN UNSPECIFIED: topN    \nQueryResponse QueryResponse is the response for a query to the Query module.\n   Field Type Label Description     data_points DataPoint repeated data_points are the actual data returned    \nTop\nbanyandb/measure/v1/topn.proto \nTopNList TopNList contains a series of topN items\n   Field Type Label Description     timestamp google.protobuf.Timestamp  timestamp is in the timeunit of milliseconds.   items TopNList.Item repeated items contains top-n items in a list    \nTopNList.Item    Field Type Label Description     entity banyandb.model.v1.Tag repeated    value banyandb.model.v1.FieldValue      \nTopNRequest TopNRequest is the request contract for query.\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  metadata is required   time_range banyandb.model.v1.TimeRange  time_range is a range query with begin/end time of entities in the timeunit of milliseconds.   top_n int32  top_n set the how many items should be returned in each list.   agg banyandb.model.v1.AggregationFunction  agg aggregates lists grouped by field names in the time_range TODO validate enum defined_only   conditions banyandb.model.v1.Condition repeated criteria select counters. Only equals are acceptable.   field_value_sort banyandb.model.v1.Sort  field_value_sort indicates how to sort fields    \nTopNResponse TopNResponse is the response for a query to the Query module.\n   Field Type Label Description     lists TopNList repeated lists contain a series topN lists ranked by timestamp if agg_func in query request is specified, lists' size should be one.    \nTop\nbanyandb/model/v1/write.proto \nStatus Status is the response status for write\n   Name Number Description     STATUS_UNSPECIFIED 0    STATUS_SUCCEED 1    STATUS_INVALID_TIMESTAMP 2    STATUS_NOT_FOUND 3    STATUS_EXPIRED_SCHEMA 4    STATUS_INTERNAL_ERROR 5     \nTop\nbanyandb/measure/v1/write.proto \nDataPointValue DataPointValue is the data point for writing. It only contains values.\n   Field Type Label Description     timestamp google.protobuf.Timestamp  timestamp is in the timeunit of milliseconds.   tag_families banyandb.model.v1.TagFamilyForWrite repeated the order of tag_families' items match the measure schema   fields banyandb.model.v1.FieldValue repeated the order of fields match the measure schema    \nInternalWriteRequest    Field Type Label Description     shard_id uint32     series_hash bytes     entity_values banyandb.model.v1.TagValue repeated    request WriteRequest      \nWriteRequest WriteRequest is the request contract for write\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  the metadata is required.   data_point DataPointValue  the data_point is required.   message_id uint64  the message_id is required.    \nWriteResponse WriteResponse is the response contract for write\n   Field Type Label Description     message_id uint64  the message_id from request.   status banyandb.model.v1.Status  status indicates the request processing result   metadata banyandb.common.v1.Metadata  the metadata from request when request fails    \nTop\nbanyandb/measure/v1/rpc.proto \nMeasureService    Method Name Request Type Response Type Description     Query QueryRequest QueryResponse    Write WriteRequest stream WriteResponse stream    TopN TopNRequest TopNResponse     \nTop\nbanyandb/property/v1/property.proto \nMetadata Metadata is for multi-tenant use\n   Field Type Label Description     container banyandb.common.v1.Metadata  container is created when it receives the first property   id string  id identifies a property    \nProperty Property stores the user defined data\n   Field Type Label Description     metadata Metadata  metadata is the identity of a property   tags banyandb.model.v1.Tag repeated tag stores the content of a property   updated_at google.protobuf.Timestamp  updated_at indicates when the property is updated   lease_id int64  readonly. lease_id is the ID of the lease that attached to key.   ttl string  ttl indicates the time to live of the property. It's a string in the format of \u0026quot;1h\u0026quot;, \u0026quot;2m\u0026quot;, \u0026quot;3s\u0026quot;, \u0026quot;1500ms\u0026quot;. It defaults to 0s, which means the property never expires. The minimum allowed ttl is 1s.    \nTop\nbanyandb/property/v1/rpc.proto \nApplyRequest    Field Type Label Description     property Property     strategy ApplyRequest.Strategy  strategy indicates how to update a property. It defaults to STRATEGY_MERGE    \nApplyResponse    Field Type Label Description     created bool  created indicates whether the property existed. True: the property is absent. False: the property existed.   tags_num uint32     lease_id int64      \nDeleteRequest    Field Type Label Description     metadata Metadata     tags string repeated     \nDeleteResponse    Field Type Label Description     deleted bool     tags_num uint32      \nGetRequest    Field Type Label Description     metadata Metadata     tags string repeated     \nGetResponse    Field Type Label Description     property Property      \nKeepAliveRequest    Field Type Label Description     lease_id int64      \nKeepAliveResponse \nListRequest    Field Type Label Description     container banyandb.common.v1.Metadata     ids string repeated    tags string repeated     \nListResponse    Field Type Label Description     property Property repeated     \nApplyRequest.Strategy    Name Number Description     STRATEGY_UNSPECIFIED 0    STRATEGY_MERGE 1    STRATEGY_REPLACE 2     \nPropertyService    Method Name Request Type Response Type Description     Apply ApplyRequest ApplyResponse Apply creates a property if it's absent, or update a existed one based on a strategy.   Delete DeleteRequest DeleteResponse    Get GetRequest GetResponse    List ListRequest ListResponse    KeepAlive KeepAliveRequest KeepAliveResponse     \nTop\nbanyandb/stream/v1/query.proto \nElement Element represents (stream context) a Span defined in Google Dapper paper or equivalently a Segment in Skywalking. (Log context) a log\n   Field Type Label Description     element_id string  element_id could be span_id of a Span or segment_id of a Segment in the context of stream   timestamp google.protobuf.Timestamp  timestamp represents a millisecond 1) either the start time of a Span/Segment, 2) or the timestamp of a log   tag_families banyandb.model.v1.TagFamily repeated fields contains all indexed Field. Some typical names, - stream_id - duration - service_name - service_instance_id - end_time_milliseconds    \nQueryRequest QueryRequest is the request contract for query.\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  metadata is required   time_range banyandb.model.v1.TimeRange  time_range is a range query with begin/end time of entities in the timeunit of milliseconds. In the context of stream, it represents the range of the startTime for spans/segments, while in the context of Log, it means the range of the timestamp(s) for logs. it is always recommended to specify time range for performance reason   offset uint32  offset is used to support pagination, together with the following limit   limit uint32  limit is used to impose a boundary on the number of records being returned   order_by banyandb.model.v1.QueryOrder  order_by is given to specify the sort for a field. So far, only fields in the type of Integer are supported   criteria banyandb.model.v1.Criteria  tag_families are indexed.   projection banyandb.model.v1.TagProjection  projection can be used to select the key names of the element in the response    \nQueryResponse QueryResponse is the response for a query to the Query module.\n   Field Type Label Description     elements Element repeated elements are the actual data returned    \nTop\nbanyandb/stream/v1/write.proto \nElementValue    Field Type Label Description     element_id string  element_id could be span_id of a Span or segment_id of a Segment in the context of stream   timestamp google.protobuf.Timestamp  timestamp is in the timeunit of milliseconds. It represents 1) either the start time of a Span/Segment, 2) or the timestamp of a log   tag_families banyandb.model.v1.TagFamilyForWrite repeated the order of tag_families' items match the stream schema    \nInternalWriteRequest    Field Type Label Description     shard_id uint32     series_hash bytes     entity_values banyandb.model.v1.TagValue repeated    request WriteRequest      \nWriteRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata  the metadata is required.   element ElementValue  the element is required.   message_id uint64  the message_id is required.    \nWriteResponse    Field Type Label Description     message_id uint64  the message_id from request.   status banyandb.model.v1.Status  status indicates the request processing result   metadata banyandb.common.v1.Metadata  the metadata from request when request fails    \nTop\nbanyandb/stream/v1/rpc.proto \nStreamService    Method Name Request Type Response Type Description     Query QueryRequest QueryResponse    Write WriteRequest stream WriteResponse stream     Scalar Value Types    .proto Type Notes C++ Java Python Go C# PHP Ruby     double  double double float float64 double float Float   float  float float float float32 float float Float   int32 Uses variable-length encoding. Inefficient for encoding negative numbers – if your field is likely to have negative values, use sint32 instead. int32 int int int32 int integer Bignum or Fixnum (as required)   int64 Uses variable-length encoding. Inefficient for encoding negative numbers – if your field is likely to have negative values, use sint64 instead. int64 long int/long int64 long integer/string Bignum   uint32 Uses variable-length encoding. uint32 int int/long uint32 uint integer Bignum or Fixnum (as required)   uint64 Uses variable-length encoding. uint64 long int/long uint64 ulong integer/string Bignum or Fixnum (as required)   sint32 Uses variable-length encoding. Signed int value. These more efficiently encode negative numbers than regular int32s. int32 int int int32 int integer Bignum or Fixnum (as required)   sint64 Uses variable-length encoding. Signed int value. These more efficiently encode negative numbers than regular int64s. int64 long int/long int64 long integer/string Bignum   fixed32 Always four bytes. More efficient than uint32 if values are often greater than 2^28. uint32 int int uint32 uint integer Bignum or Fixnum (as required)   fixed64 Always eight bytes. More efficient than uint64 if values are often greater than 2^56. uint64 long int/long uint64 ulong integer/string Bignum   sfixed32 Always four bytes. int32 int int int32 int integer Bignum or Fixnum (as required)   sfixed64 Always eight bytes. int64 long int/long int64 long integer/string Bignum   bool  bool boolean boolean bool bool boolean TrueClass/FalseClass   string A string must always contain UTF-8 encoded or 7-bit ASCII text. string String str/unicode string string string String (UTF-8)   bytes May contain any arbitrary sequence of bytes. string ByteString str []byte ByteString string String (ASCII-8BIT)    ","excerpt":"Protocol Documentation \nTable of Contents   banyandb/cluster/v1/rpc.proto\n  SendRequest …","ref":"/docs/skywalking-banyandb/latest/api-reference/","title":"Protocol Documentation"},{"body":"Protocol Documentation \nTable of Contents   banyandb/cluster/v1/rpc.proto\n  SendRequest\n  SendResponse\n  Service\n    banyandb/common/v1/common.proto\n  Group\n  IntervalRule\n  Metadata\n  ResourceOpts\n  Catalog\n  IntervalRule.Unit\n    banyandb/database/v1/database.proto\n  Node\n  Shard\n  Role\n    banyandb/model/v1/common.proto\n  FieldValue\n  Float\n  Int\n  IntArray\n  Str\n  StrArray\n  TagFamilyForWrite\n  TagValue\n  AggregationFunction\n    banyandb/model/v1/query.proto\n  Condition\n  Criteria\n  LogicalExpression\n  QueryOrder\n  Tag\n  TagFamily\n  TagProjection\n  TagProjection.TagFamily\n  TimeRange\n  Condition.BinaryOp\n  LogicalExpression.LogicalOp\n  Sort\n    banyandb/database/v1/schema.proto\n  Entity\n  FieldSpec\n  IndexRule\n  IndexRuleBinding\n  Measure\n  Stream\n  Subject\n  TagFamilySpec\n  TagSpec\n  TopNAggregation\n  CompressionMethod\n  EncodingMethod\n  FieldType\n  IndexRule.Analyzer\n  IndexRule.Type\n  TagType\n    banyandb/database/v1/rpc.proto\n  GroupRegistryServiceCreateRequest\n  GroupRegistryServiceCreateResponse\n  GroupRegistryServiceDeleteRequest\n  GroupRegistryServiceDeleteResponse\n  GroupRegistryServiceExistRequest\n  GroupRegistryServiceExistResponse\n  GroupRegistryServiceGetRequest\n  GroupRegistryServiceGetResponse\n  GroupRegistryServiceListRequest\n  GroupRegistryServiceListResponse\n  GroupRegistryServiceUpdateRequest\n  GroupRegistryServiceUpdateResponse\n  IndexRuleBindingRegistryServiceCreateRequest\n  IndexRuleBindingRegistryServiceCreateResponse\n  IndexRuleBindingRegistryServiceDeleteRequest\n  IndexRuleBindingRegistryServiceDeleteResponse\n  IndexRuleBindingRegistryServiceExistRequest\n  IndexRuleBindingRegistryServiceExistResponse\n  IndexRuleBindingRegistryServiceGetRequest\n  IndexRuleBindingRegistryServiceGetResponse\n  IndexRuleBindingRegistryServiceListRequest\n  IndexRuleBindingRegistryServiceListResponse\n  IndexRuleBindingRegistryServiceUpdateRequest\n  IndexRuleBindingRegistryServiceUpdateResponse\n  IndexRuleRegistryServiceCreateRequest\n  IndexRuleRegistryServiceCreateResponse\n  IndexRuleRegistryServiceDeleteRequest\n  IndexRuleRegistryServiceDeleteResponse\n  IndexRuleRegistryServiceExistRequest\n  IndexRuleRegistryServiceExistResponse\n  IndexRuleRegistryServiceGetRequest\n  IndexRuleRegistryServiceGetResponse\n  IndexRuleRegistryServiceListRequest\n  IndexRuleRegistryServiceListResponse\n  IndexRuleRegistryServiceUpdateRequest\n  IndexRuleRegistryServiceUpdateResponse\n  MeasureRegistryServiceCreateRequest\n  MeasureRegistryServiceCreateResponse\n  MeasureRegistryServiceDeleteRequest\n  MeasureRegistryServiceDeleteResponse\n  MeasureRegistryServiceExistRequest\n  MeasureRegistryServiceExistResponse\n  MeasureRegistryServiceGetRequest\n  MeasureRegistryServiceGetResponse\n  MeasureRegistryServiceListRequest\n  MeasureRegistryServiceListResponse\n  MeasureRegistryServiceUpdateRequest\n  MeasureRegistryServiceUpdateResponse\n  StreamRegistryServiceCreateRequest\n  StreamRegistryServiceCreateResponse\n  StreamRegistryServiceDeleteRequest\n  StreamRegistryServiceDeleteResponse\n  StreamRegistryServiceExistRequest\n  StreamRegistryServiceExistResponse\n  StreamRegistryServiceGetRequest\n  StreamRegistryServiceGetResponse\n  StreamRegistryServiceListRequest\n  StreamRegistryServiceListResponse\n  StreamRegistryServiceUpdateRequest\n  StreamRegistryServiceUpdateResponse\n  TopNAggregationRegistryServiceCreateRequest\n  TopNAggregationRegistryServiceCreateResponse\n  TopNAggregationRegistryServiceDeleteRequest\n  TopNAggregationRegistryServiceDeleteResponse\n  TopNAggregationRegistryServiceExistRequest\n  TopNAggregationRegistryServiceExistResponse\n  TopNAggregationRegistryServiceGetRequest\n  TopNAggregationRegistryServiceGetResponse\n  TopNAggregationRegistryServiceListRequest\n  TopNAggregationRegistryServiceListResponse\n  TopNAggregationRegistryServiceUpdateRequest\n  TopNAggregationRegistryServiceUpdateResponse\n  GroupRegistryService\n  IndexRuleBindingRegistryService\n  IndexRuleRegistryService\n  MeasureRegistryService\n  StreamRegistryService\n  TopNAggregationRegistryService\n    banyandb/measure/v1/query.proto\n DataPoint DataPoint.Field QueryRequest QueryRequest.Aggregation QueryRequest.FieldProjection QueryRequest.GroupBy QueryRequest.Top QueryResponse    banyandb/measure/v1/topn.proto\n TopNList TopNList.Item TopNRequest TopNResponse    banyandb/model/v1/write.proto\n Status    banyandb/measure/v1/write.proto\n DataPointValue InternalWriteRequest WriteRequest WriteResponse    banyandb/measure/v1/rpc.proto\n MeasureService    banyandb/property/v1/property.proto\n Metadata Property    banyandb/property/v1/rpc.proto\n  ApplyRequest\n  ApplyResponse\n  DeleteRequest\n  DeleteResponse\n  GetRequest\n  GetResponse\n  KeepAliveRequest\n  KeepAliveResponse\n  ListRequest\n  ListResponse\n  ApplyRequest.Strategy\n  PropertyService\n    banyandb/stream/v1/query.proto\n Element QueryRequest QueryResponse    banyandb/stream/v1/write.proto\n ElementValue InternalWriteRequest WriteRequest WriteResponse    banyandb/stream/v1/rpc.proto\n StreamService    Scalar Value Types\n  \nTop\nbanyandb/cluster/v1/rpc.proto \nSendRequest    Field Type Label Description     topic string     message_id uint64     body google.protobuf.Any     batch_mod bool      \nSendResponse    Field Type Label Description     message_id uint64     error string     body google.protobuf.Any      \nService    Method Name Request Type Response Type Description     Send SendRequest stream SendResponse stream     \nTop\nbanyandb/common/v1/common.proto \nGroup Group is an internal object for Group management\n   Field Type Label Description     metadata Metadata  metadata define the group's identity   catalog Catalog  catalog denotes which type of data the group contains   resource_opts ResourceOpts  resourceOpts indicates the structure of the underlying kv storage   updated_at google.protobuf.Timestamp  updated_at indicates when resources of the group are updated    \nIntervalRule IntervalRule is a structured duration\n   Field Type Label Description     unit IntervalRule.Unit  unit can only be UNIT_HOUR or UNIT_DAY   num uint32      \nMetadata Metadata is for multi-tenant, multi-model use\n   Field Type Label Description     group string  group contains a set of options, like retention policy, max   name string  name of the entity   id uint32  id is the unique identifier of the entity if id is not set, the system will generate a unique id   create_revision int64  readonly. create_revision is the revision of last creation on this key.   mod_revision int64  readonly. mod_revision is the revision of last modification on this key.    \nResourceOpts    Field Type Label Description     shard_num uint32  shard_num is the number of shards   segment_interval IntervalRule  segment_interval indicates the length of a segment   ttl IntervalRule  ttl indicates time to live, how long the data will be cached    \nCatalog    Name Number Description     CATALOG_UNSPECIFIED 0    CATALOG_STREAM 1    CATALOG_MEASURE 2     \nIntervalRule.Unit    Name Number Description     UNIT_UNSPECIFIED 0    UNIT_HOUR 1    UNIT_DAY 2     \nTop\nbanyandb/database/v1/database.proto \nNode    Field Type Label Description     metadata banyandb.common.v1.Metadata     roles Role repeated    grpc_address string     http_address string     created_at google.protobuf.Timestamp      \nShard    Field Type Label Description     id uint64     metadata banyandb.common.v1.Metadata     catalog banyandb.common.v1.Catalog     node string     total uint32     updated_at google.protobuf.Timestamp     created_at google.protobuf.Timestamp      \nRole    Name Number Description     ROLE_UNSPECIFIED 0    ROLE_META 1    ROLE_DATA 2    ROLE_LIAISON 3     \nTop\nbanyandb/model/v1/common.proto \nFieldValue    Field Type Label Description     null google.protobuf.NullValue     str Str     int Int     binary_data bytes     float Float      \nFloat    Field Type Label Description     value double      \nInt    Field Type Label Description     value int64      \nIntArray    Field Type Label Description     value int64 repeated     \nStr    Field Type Label Description     value string      \nStrArray    Field Type Label Description     value string repeated     \nTagFamilyForWrite    Field Type Label Description     tags TagValue repeated     \nTagValue    Field Type Label Description     null google.protobuf.NullValue     str Str     str_array StrArray     int Int     int_array IntArray     binary_data bytes      \nAggregationFunction    Name Number Description     AGGREGATION_FUNCTION_UNSPECIFIED 0    AGGREGATION_FUNCTION_MEAN 1    AGGREGATION_FUNCTION_MAX 2    AGGREGATION_FUNCTION_MIN 3    AGGREGATION_FUNCTION_COUNT 4    AGGREGATION_FUNCTION_SUM 5     \nTop\nbanyandb/model/v1/query.proto \nCondition Condition consists of the query condition with a single binary operator to be imposed For 1:1 BinaryOp, values in condition must be an array with length = 1, while for 1:N BinaryOp, values can be an array with length \u0026gt;= 1.\n   Field Type Label Description     name string     op Condition.BinaryOp     value TagValue      \nCriteria tag_families are indexed.\n   Field Type Label Description     le LogicalExpression     condition Condition      \nLogicalExpression LogicalExpression supports logical operation\n   Field Type Label Description     op LogicalExpression.LogicalOp  op is a logical operation   left Criteria     right Criteria      \nQueryOrder QueryOrder means a Sort operation to be done for a given index rule. The index_rule_name refers to the name of a index rule bound to the subject.\n   Field Type Label Description     index_rule_name string     sort Sort      \nTag Pair is the building block of a record which is equivalent to a key-value pair. In the context of Trace, it could be metadata of a trace such as service_name, service_instance, etc. Besides, other tags are organized in key-value pair in the underlying storage layer. One should notice that the values can be a multi-value.\n   Field Type Label Description     key string     value TagValue      \nTagFamily    Field Type Label Description     name string     tags Tag repeated     \nTagProjection TagProjection is used to select the names of keys to be returned.\n   Field Type Label Description     tag_families TagProjection.TagFamily repeated     \nTagProjection.TagFamily    Field Type Label Description     name string     tags string repeated     \nTimeRange TimeRange is a range query for uint64, the range here follows left-inclusive and right-exclusive rule, i.e. [begin, end) if both edges exist\n   Field Type Label Description     begin google.protobuf.Timestamp     end google.protobuf.Timestamp      \nCondition.BinaryOp BinaryOp specifies the operation imposed to the given query condition For EQ, NE, LT, GT, LE and GE, only one operand should be given, i.e. one-to-one relationship. HAVING and NOT_HAVING allow multi-value to be the operand such as array/vector, i.e. one-to-many relationship. For example, \u0026quot;keyA\u0026quot; contains \u0026quot;valueA\u0026quot; and \u0026quot;valueB\u0026quot; MATCH performances a full-text search if the tag is analyzed. The string value applies to the same analyzer as the tag, but string array value does not. Each item in a string array is seen as a token instead of a query expression.\n   Name Number Description     BINARY_OP_UNSPECIFIED 0    BINARY_OP_EQ 1    BINARY_OP_NE 2    BINARY_OP_LT 3    BINARY_OP_GT 4    BINARY_OP_LE 5    BINARY_OP_GE 6    BINARY_OP_HAVING 7    BINARY_OP_NOT_HAVING 8    BINARY_OP_IN 9    BINARY_OP_NOT_IN 10    BINARY_OP_MATCH 11     \nLogicalExpression.LogicalOp    Name Number Description     LOGICAL_OP_UNSPECIFIED 0    LOGICAL_OP_AND 1    LOGICAL_OP_OR 2     \nSort    Name Number Description     SORT_UNSPECIFIED 0    SORT_DESC 1    SORT_ASC 2     \nTop\nbanyandb/database/v1/schema.proto \nEntity    Field Type Label Description     tag_names string repeated     \nFieldSpec FieldSpec is the specification of field\n   Field Type Label Description     name string  name is the identity of a field   field_type FieldType  field_type denotes the type of field value   encoding_method EncodingMethod  encoding_method indicates how to encode data during writing   compression_method CompressionMethod  compression_method indicates how to compress data during writing    \nIndexRule IndexRule defines how to generate indices based on tags and the index type IndexRule should bind to a subject through an IndexRuleBinding to generate proper indices.\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  metadata define the rule's identity   tags string repeated tags are the combination that refers to an indexed object If the elements in tags are more than 1, the object will generate a multi-tag index Caveat: All tags in a multi-tag MUST have an identical IndexType   type IndexRule.Type  type is the IndexType of this IndexObject.   updated_at google.protobuf.Timestamp  updated_at indicates when the IndexRule is updated   analyzer IndexRule.Analyzer  analyzer analyzes tag value to support the full-text searching for TYPE_INVERTED indices.    \nIndexRuleBinding IndexRuleBinding is a bridge to connect severalIndexRules to a subject This binding is valid between begin_at_nanoseconds and expire_at_nanoseconds, that provides flexible strategies to control how to generate time series indices.\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  metadata is the identity of this binding   rules string repeated rules refers to the IndexRule   subject Subject  subject indicates the subject of binding action   begin_at google.protobuf.Timestamp  begin_at_nanoseconds is the timestamp, after which the binding will be active   expire_at google.protobuf.Timestamp  expire_at_nanoseconds it the timestamp, after which the binding will be inactive expire_at_nanoseconds must be larger than begin_at_nanoseconds   updated_at google.protobuf.Timestamp  updated_at indicates when the IndexRuleBinding is updated    \nMeasure Measure intends to store data point\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  metadata is the identity of a measure   tag_families TagFamilySpec repeated tag_families are for filter measures   fields FieldSpec repeated fields denote measure values   entity Entity  entity indicates which tags will be to generate a series and shard a measure   interval string  interval indicates how frequently to send a data point valid time units are \u0026quot;ns\u0026quot;, \u0026quot;us\u0026quot; (or \u0026quot;µs\u0026quot;), \u0026quot;ms\u0026quot;, \u0026quot;s\u0026quot;, \u0026quot;m\u0026quot;, \u0026quot;h\u0026quot;, \u0026quot;d\u0026quot;.   updated_at google.protobuf.Timestamp  updated_at indicates when the measure is updated    \nStream Stream intends to store streaming data, for example, traces or logs\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  metadata is the identity of a trace series   tag_families TagFamilySpec repeated tag_families   entity Entity  entity indicates how to generate a series and shard a stream   updated_at google.protobuf.Timestamp  updated_at indicates when the stream is updated    \nSubject Subject defines which stream or measure would generate indices\n   Field Type Label Description     catalog banyandb.common.v1.Catalog  catalog is where the subject belongs to todo validate plugin exist bug https://github.com/bufbuild/protoc-gen-validate/issues/672   name string  name refers to a stream or measure in a particular catalog    \nTagFamilySpec    Field Type Label Description     name string     tags TagSpec repeated tags defines accepted tags    \nTagSpec    Field Type Label Description     name string     type TagType     indexed_only bool  indexed_only indicates whether the tag is stored True: It's indexed only, but not stored False: it's stored and indexed    \nTopNAggregation TopNAggregation generates offline TopN statistics for a measure's TopN approximation\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  metadata is the identity of an aggregation   source_measure banyandb.common.v1.Metadata  source_measure denotes the data source of this aggregation   field_name string  field_name is the name of field used for ranking   field_value_sort banyandb.model.v1.Sort  field_value_sort indicates how to sort fields ASC: bottomN DESC: topN UNSPECIFIED: topN + bottomN todo validate plugin exist bug https://github.com/bufbuild/protoc-gen-validate/issues/672   group_by_tag_names string repeated group_by_tag_names groups data points into statistical counters   criteria banyandb.model.v1.Criteria  criteria select partial data points from measure   counters_number int32  counters_number sets the number of counters to be tracked. The default value is 1000   lru_size int32  lru_size defines how much entry is allowed to be maintained in the memory   updated_at google.protobuf.Timestamp  updated_at indicates when the measure is updated    \nCompressionMethod    Name Number Description     COMPRESSION_METHOD_UNSPECIFIED 0    COMPRESSION_METHOD_ZSTD 1     \nEncodingMethod    Name Number Description     ENCODING_METHOD_UNSPECIFIED 0    ENCODING_METHOD_GORILLA 1     \nFieldType    Name Number Description     FIELD_TYPE_UNSPECIFIED 0    FIELD_TYPE_STRING 1    FIELD_TYPE_INT 2    FIELD_TYPE_DATA_BINARY 3    FIELD_TYPE_FLOAT 4     \nIndexRule.Analyzer    Name Number Description     ANALYZER_UNSPECIFIED 0    ANALYZER_KEYWORD 1 Keyword analyzer is a “noop” analyzer which returns the entire input string as a single token.   ANALYZER_STANDARD 2 Standard analyzer provides grammar based tokenization   ANALYZER_SIMPLE 3 Simple analyzer breaks text into tokens at any non-letter character, such as numbers, spaces, hyphens and apostrophes, discards non-letter characters, and changes uppercase to lowercase.    \nIndexRule.Type Type determine the index structure under the hood\n   Name Number Description     TYPE_UNSPECIFIED 0    TYPE_INVERTED 1     \nTagType    Name Number Description     TAG_TYPE_UNSPECIFIED 0    TAG_TYPE_STRING 1    TAG_TYPE_INT 2    TAG_TYPE_STRING_ARRAY 3    TAG_TYPE_INT_ARRAY 4    TAG_TYPE_DATA_BINARY 5     \nTop\nbanyandb/database/v1/rpc.proto \nGroupRegistryServiceCreateRequest    Field Type Label Description     group banyandb.common.v1.Group      \nGroupRegistryServiceCreateResponse \nGroupRegistryServiceDeleteRequest    Field Type Label Description     group string      \nGroupRegistryServiceDeleteResponse    Field Type Label Description     deleted bool      \nGroupRegistryServiceExistRequest    Field Type Label Description     group string      \nGroupRegistryServiceExistResponse    Field Type Label Description     has_group bool      \nGroupRegistryServiceGetRequest    Field Type Label Description     group string      \nGroupRegistryServiceGetResponse    Field Type Label Description     group banyandb.common.v1.Group      \nGroupRegistryServiceListRequest \nGroupRegistryServiceListResponse    Field Type Label Description     group banyandb.common.v1.Group repeated     \nGroupRegistryServiceUpdateRequest    Field Type Label Description     group banyandb.common.v1.Group      \nGroupRegistryServiceUpdateResponse \nIndexRuleBindingRegistryServiceCreateRequest    Field Type Label Description     index_rule_binding IndexRuleBinding      \nIndexRuleBindingRegistryServiceCreateResponse \nIndexRuleBindingRegistryServiceDeleteRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nIndexRuleBindingRegistryServiceDeleteResponse    Field Type Label Description     deleted bool      \nIndexRuleBindingRegistryServiceExistRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nIndexRuleBindingRegistryServiceExistResponse    Field Type Label Description     has_group bool     has_index_rule_binding bool      \nIndexRuleBindingRegistryServiceGetRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nIndexRuleBindingRegistryServiceGetResponse    Field Type Label Description     index_rule_binding IndexRuleBinding      \nIndexRuleBindingRegistryServiceListRequest    Field Type Label Description     group string      \nIndexRuleBindingRegistryServiceListResponse    Field Type Label Description     index_rule_binding IndexRuleBinding repeated     \nIndexRuleBindingRegistryServiceUpdateRequest    Field Type Label Description     index_rule_binding IndexRuleBinding      \nIndexRuleBindingRegistryServiceUpdateResponse \nIndexRuleRegistryServiceCreateRequest    Field Type Label Description     index_rule IndexRule      \nIndexRuleRegistryServiceCreateResponse \nIndexRuleRegistryServiceDeleteRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nIndexRuleRegistryServiceDeleteResponse    Field Type Label Description     deleted bool      \nIndexRuleRegistryServiceExistRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nIndexRuleRegistryServiceExistResponse    Field Type Label Description     has_group bool     has_index_rule bool      \nIndexRuleRegistryServiceGetRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nIndexRuleRegistryServiceGetResponse    Field Type Label Description     index_rule IndexRule      \nIndexRuleRegistryServiceListRequest    Field Type Label Description     group string      \nIndexRuleRegistryServiceListResponse    Field Type Label Description     index_rule IndexRule repeated     \nIndexRuleRegistryServiceUpdateRequest    Field Type Label Description     index_rule IndexRule      \nIndexRuleRegistryServiceUpdateResponse \nMeasureRegistryServiceCreateRequest    Field Type Label Description     measure Measure      \nMeasureRegistryServiceCreateResponse    Field Type Label Description     mod_revision int64      \nMeasureRegistryServiceDeleteRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nMeasureRegistryServiceDeleteResponse    Field Type Label Description     deleted bool      \nMeasureRegistryServiceExistRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nMeasureRegistryServiceExistResponse    Field Type Label Description     has_group bool     has_measure bool      \nMeasureRegistryServiceGetRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nMeasureRegistryServiceGetResponse    Field Type Label Description     measure Measure      \nMeasureRegistryServiceListRequest    Field Type Label Description     group string      \nMeasureRegistryServiceListResponse    Field Type Label Description     measure Measure repeated     \nMeasureRegistryServiceUpdateRequest    Field Type Label Description     measure Measure      \nMeasureRegistryServiceUpdateResponse    Field Type Label Description     mod_revision int64      \nStreamRegistryServiceCreateRequest    Field Type Label Description     stream Stream      \nStreamRegistryServiceCreateResponse    Field Type Label Description     mod_revision int64      \nStreamRegistryServiceDeleteRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nStreamRegistryServiceDeleteResponse    Field Type Label Description     deleted bool      \nStreamRegistryServiceExistRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nStreamRegistryServiceExistResponse    Field Type Label Description     has_group bool     has_stream bool      \nStreamRegistryServiceGetRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nStreamRegistryServiceGetResponse    Field Type Label Description     stream Stream      \nStreamRegistryServiceListRequest    Field Type Label Description     group string      \nStreamRegistryServiceListResponse    Field Type Label Description     stream Stream repeated     \nStreamRegistryServiceUpdateRequest    Field Type Label Description     stream Stream      \nStreamRegistryServiceUpdateResponse    Field Type Label Description     mod_revision int64      \nTopNAggregationRegistryServiceCreateRequest    Field Type Label Description     top_n_aggregation TopNAggregation      \nTopNAggregationRegistryServiceCreateResponse \nTopNAggregationRegistryServiceDeleteRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nTopNAggregationRegistryServiceDeleteResponse    Field Type Label Description     deleted bool      \nTopNAggregationRegistryServiceExistRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nTopNAggregationRegistryServiceExistResponse    Field Type Label Description     has_group bool     has_top_n_aggregation bool      \nTopNAggregationRegistryServiceGetRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nTopNAggregationRegistryServiceGetResponse    Field Type Label Description     top_n_aggregation TopNAggregation      \nTopNAggregationRegistryServiceListRequest    Field Type Label Description     group string      \nTopNAggregationRegistryServiceListResponse    Field Type Label Description     top_n_aggregation TopNAggregation repeated     \nTopNAggregationRegistryServiceUpdateRequest    Field Type Label Description     top_n_aggregation TopNAggregation      \nTopNAggregationRegistryServiceUpdateResponse \nGroupRegistryService    Method Name Request Type Response Type Description     Create GroupRegistryServiceCreateRequest GroupRegistryServiceCreateResponse    Update GroupRegistryServiceUpdateRequest GroupRegistryServiceUpdateResponse    Delete GroupRegistryServiceDeleteRequest GroupRegistryServiceDeleteResponse    Get GroupRegistryServiceGetRequest GroupRegistryServiceGetResponse    List GroupRegistryServiceListRequest GroupRegistryServiceListResponse    Exist GroupRegistryServiceExistRequest GroupRegistryServiceExistResponse Exist doesn't expose an HTTP endpoint. Please use HEAD method to touch Get instead    \nIndexRuleBindingRegistryService    Method Name Request Type Response Type Description     Create IndexRuleBindingRegistryServiceCreateRequest IndexRuleBindingRegistryServiceCreateResponse    Update IndexRuleBindingRegistryServiceUpdateRequest IndexRuleBindingRegistryServiceUpdateResponse    Delete IndexRuleBindingRegistryServiceDeleteRequest IndexRuleBindingRegistryServiceDeleteResponse    Get IndexRuleBindingRegistryServiceGetRequest IndexRuleBindingRegistryServiceGetResponse    List IndexRuleBindingRegistryServiceListRequest IndexRuleBindingRegistryServiceListResponse    Exist IndexRuleBindingRegistryServiceExistRequest IndexRuleBindingRegistryServiceExistResponse Exist doesn't expose an HTTP endpoint. Please use HEAD method to touch Get instead    \nIndexRuleRegistryService    Method Name Request Type Response Type Description     Create IndexRuleRegistryServiceCreateRequest IndexRuleRegistryServiceCreateResponse    Update IndexRuleRegistryServiceUpdateRequest IndexRuleRegistryServiceUpdateResponse    Delete IndexRuleRegistryServiceDeleteRequest IndexRuleRegistryServiceDeleteResponse    Get IndexRuleRegistryServiceGetRequest IndexRuleRegistryServiceGetResponse    List IndexRuleRegistryServiceListRequest IndexRuleRegistryServiceListResponse    Exist IndexRuleRegistryServiceExistRequest IndexRuleRegistryServiceExistResponse Exist doesn't expose an HTTP endpoint. Please use HEAD method to touch Get instead    \nMeasureRegistryService    Method Name Request Type Response Type Description     Create MeasureRegistryServiceCreateRequest MeasureRegistryServiceCreateResponse    Update MeasureRegistryServiceUpdateRequest MeasureRegistryServiceUpdateResponse    Delete MeasureRegistryServiceDeleteRequest MeasureRegistryServiceDeleteResponse    Get MeasureRegistryServiceGetRequest MeasureRegistryServiceGetResponse    List MeasureRegistryServiceListRequest MeasureRegistryServiceListResponse    Exist MeasureRegistryServiceExistRequest MeasureRegistryServiceExistResponse Exist doesn't expose an HTTP endpoint. Please use HEAD method to touch Get instead    \nStreamRegistryService    Method Name Request Type Response Type Description     Create StreamRegistryServiceCreateRequest StreamRegistryServiceCreateResponse    Update StreamRegistryServiceUpdateRequest StreamRegistryServiceUpdateResponse    Delete StreamRegistryServiceDeleteRequest StreamRegistryServiceDeleteResponse    Get StreamRegistryServiceGetRequest StreamRegistryServiceGetResponse    List StreamRegistryServiceListRequest StreamRegistryServiceListResponse    Exist StreamRegistryServiceExistRequest StreamRegistryServiceExistResponse Exist doesn't expose an HTTP endpoint. Please use HEAD method to touch Get instead    \nTopNAggregationRegistryService    Method Name Request Type Response Type Description     Create TopNAggregationRegistryServiceCreateRequest TopNAggregationRegistryServiceCreateResponse    Update TopNAggregationRegistryServiceUpdateRequest TopNAggregationRegistryServiceUpdateResponse    Delete TopNAggregationRegistryServiceDeleteRequest TopNAggregationRegistryServiceDeleteResponse    Get TopNAggregationRegistryServiceGetRequest TopNAggregationRegistryServiceGetResponse    List TopNAggregationRegistryServiceListRequest TopNAggregationRegistryServiceListResponse    Exist TopNAggregationRegistryServiceExistRequest TopNAggregationRegistryServiceExistResponse Exist doesn't expose an HTTP endpoint. Please use HEAD method to touch Get instead    \nTop\nbanyandb/measure/v1/query.proto \nDataPoint DataPoint is stored in Measures\n   Field Type Label Description     timestamp google.protobuf.Timestamp  timestamp is in the timeunit of milliseconds.   tag_families banyandb.model.v1.TagFamily repeated tag_families contains tags selected in the projection   fields DataPoint.Field repeated fields contains fields selected in the projection    \nDataPoint.Field    Field Type Label Description     name string     value banyandb.model.v1.FieldValue      \nQueryRequest QueryRequest is the request contract for query.\n   Field Type Label Description     groups string repeated groups indicate where the data points are stored.   name string  name is the identity of a measure.   time_range banyandb.model.v1.TimeRange  time_range is a range query with begin/end time of entities in the timeunit of milliseconds.   criteria banyandb.model.v1.Criteria  tag_families are indexed.   tag_projection banyandb.model.v1.TagProjection  tag_projection can be used to select tags of the data points in the response   field_projection QueryRequest.FieldProjection  field_projection can be used to select fields of the data points in the response   group_by QueryRequest.GroupBy  group_by groups data points based on their field value for a specific tag and use field_name as the projection name   agg QueryRequest.Aggregation  agg aggregates data points based on a field   top QueryRequest.Top  top limits the result based on a particular field. If order_by is specified, top sorts the dataset based on order_by's output   offset uint32  offset is used to support pagination, together with the following limit. If top is specified, offset processes the dataset based on top's output   limit uint32  limit is used to impose a boundary on the number of records being returned. If top is specified, limit processes the dataset based on top's output   order_by banyandb.model.v1.QueryOrder  order_by is given to specify the sort for a tag.    \nQueryRequest.Aggregation    Field Type Label Description     function banyandb.model.v1.AggregationFunction     field_name string  field_name must be one of files indicated by the field_projection    \nQueryRequest.FieldProjection    Field Type Label Description     names string repeated     \nQueryRequest.GroupBy    Field Type Label Description     tag_projection banyandb.model.v1.TagProjection  tag_projection must be a subset of the tag_projection of QueryRequest   field_name string  field_name must be one of fields indicated by field_projection    \nQueryRequest.Top    Field Type Label Description     number int32  number set the how many items should be returned   field_name string  field_name must be one of files indicated by the field_projection   field_value_sort banyandb.model.v1.Sort  field_value_sort indicates how to sort fields ASC: bottomN DESC: topN UNSPECIFIED: topN    \nQueryResponse QueryResponse is the response for a query to the Query module.\n   Field Type Label Description     data_points DataPoint repeated data_points are the actual data returned    \nTop\nbanyandb/measure/v1/topn.proto \nTopNList TopNList contains a series of topN items\n   Field Type Label Description     timestamp google.protobuf.Timestamp  timestamp is in the timeunit of milliseconds.   items TopNList.Item repeated items contains top-n items in a list    \nTopNList.Item    Field Type Label Description     entity banyandb.model.v1.Tag repeated    value banyandb.model.v1.FieldValue      \nTopNRequest TopNRequest is the request contract for query.\n   Field Type Label Description     groups string repeated groups indicate where the data points are stored.   name string  name is the identity of a measure.   time_range banyandb.model.v1.TimeRange  time_range is a range query with begin/end time of entities in the timeunit of milliseconds.   top_n int32  top_n set the how many items should be returned in each list.   agg banyandb.model.v1.AggregationFunction  agg aggregates lists grouped by field names in the time_range TODO validate enum defined_only   conditions banyandb.model.v1.Condition repeated criteria select counters. Only equals are acceptable.   field_value_sort banyandb.model.v1.Sort  field_value_sort indicates how to sort fields    \nTopNResponse TopNResponse is the response for a query to the Query module.\n   Field Type Label Description     lists TopNList repeated lists contain a series topN lists ranked by timestamp if agg_func in query request is specified, lists' size should be one.    \nTop\nbanyandb/model/v1/write.proto \nStatus Status is the response status for write\n   Name Number Description     STATUS_UNSPECIFIED 0    STATUS_SUCCEED 1    STATUS_INVALID_TIMESTAMP 2    STATUS_NOT_FOUND 3    STATUS_EXPIRED_SCHEMA 4    STATUS_INTERNAL_ERROR 5     \nTop\nbanyandb/measure/v1/write.proto \nDataPointValue DataPointValue is the data point for writing. It only contains values.\n   Field Type Label Description     timestamp google.protobuf.Timestamp  timestamp is in the timeunit of milliseconds.   tag_families banyandb.model.v1.TagFamilyForWrite repeated the order of tag_families' items match the measure schema   fields banyandb.model.v1.FieldValue repeated the order of fields match the measure schema    \nInternalWriteRequest    Field Type Label Description     shard_id uint32     series_hash bytes     entity_values banyandb.model.v1.TagValue repeated    request WriteRequest      \nWriteRequest WriteRequest is the request contract for write\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  the metadata is required.   data_point DataPointValue  the data_point is required.   message_id uint64  the message_id is required.    \nWriteResponse WriteResponse is the response contract for write\n   Field Type Label Description     message_id uint64  the message_id from request.   status banyandb.model.v1.Status  status indicates the request processing result   metadata banyandb.common.v1.Metadata  the metadata from request when request fails    \nTop\nbanyandb/measure/v1/rpc.proto \nMeasureService    Method Name Request Type Response Type Description     Query QueryRequest QueryResponse    Write WriteRequest stream WriteResponse stream    TopN TopNRequest TopNResponse     \nTop\nbanyandb/property/v1/property.proto \nMetadata Metadata is for multi-tenant use\n   Field Type Label Description     container banyandb.common.v1.Metadata  container is created when it receives the first property   id string  id identifies a property    \nProperty Property stores the user defined data\n   Field Type Label Description     metadata Metadata  metadata is the identity of a property   tags banyandb.model.v1.Tag repeated tag stores the content of a property   updated_at google.protobuf.Timestamp  updated_at indicates when the property is updated   lease_id int64  readonly. lease_id is the ID of the lease that attached to key.   ttl string  ttl indicates the time to live of the property. It's a string in the format of \u0026quot;1h\u0026quot;, \u0026quot;2m\u0026quot;, \u0026quot;3s\u0026quot;, \u0026quot;1500ms\u0026quot;. It defaults to 0s, which means the property never expires. The minimum allowed ttl is 1s.    \nTop\nbanyandb/property/v1/rpc.proto \nApplyRequest    Field Type Label Description     property Property     strategy ApplyRequest.Strategy  strategy indicates how to update a property. It defaults to STRATEGY_MERGE    \nApplyResponse    Field Type Label Description     created bool  created indicates whether the property existed. True: the property is absent. False: the property existed.   tags_num uint32     lease_id int64      \nDeleteRequest    Field Type Label Description     metadata Metadata     tags string repeated     \nDeleteResponse    Field Type Label Description     deleted bool     tags_num uint32      \nGetRequest    Field Type Label Description     metadata Metadata     tags string repeated     \nGetResponse    Field Type Label Description     property Property      \nKeepAliveRequest    Field Type Label Description     lease_id int64      \nKeepAliveResponse \nListRequest    Field Type Label Description     container banyandb.common.v1.Metadata     ids string repeated    tags string repeated     \nListResponse    Field Type Label Description     property Property repeated     \nApplyRequest.Strategy    Name Number Description     STRATEGY_UNSPECIFIED 0    STRATEGY_MERGE 1    STRATEGY_REPLACE 2     \nPropertyService    Method Name Request Type Response Type Description     Apply ApplyRequest ApplyResponse Apply creates a property if it's absent, or update a existed one based on a strategy.   Delete DeleteRequest DeleteResponse    Get GetRequest GetResponse    List ListRequest ListResponse    KeepAlive KeepAliveRequest KeepAliveResponse     \nTop\nbanyandb/stream/v1/query.proto \nElement Element represents (stream context) a Span defined in Google Dapper paper or equivalently a Segment in Skywalking. (Log context) a log\n   Field Type Label Description     element_id string  element_id could be span_id of a Span or segment_id of a Segment in the context of stream   timestamp google.protobuf.Timestamp  timestamp represents a millisecond 1) either the start time of a Span/Segment, 2) or the timestamp of a log   tag_families banyandb.model.v1.TagFamily repeated fields contains all indexed Field. Some typical names, - stream_id - duration - service_name - service_instance_id - end_time_milliseconds    \nQueryRequest QueryRequest is the request contract for query.\n   Field Type Label Description     groups string repeated groups indicate where the elements are stored.   name string  name is the identity of a stream.   time_range banyandb.model.v1.TimeRange  time_range is a range query with begin/end time of entities in the timeunit of milliseconds. In the context of stream, it represents the range of the startTime for spans/segments, while in the context of Log, it means the range of the timestamp(s) for logs. it is always recommended to specify time range for performance reason   offset uint32  offset is used to support pagination, together with the following limit   limit uint32  limit is used to impose a boundary on the number of records being returned   order_by banyandb.model.v1.QueryOrder  order_by is given to specify the sort for a field. So far, only fields in the type of Integer are supported   criteria banyandb.model.v1.Criteria  tag_families are indexed.   projection banyandb.model.v1.TagProjection  projection can be used to select the key names of the element in the response    \nQueryResponse QueryResponse is the response for a query to the Query module.\n   Field Type Label Description     elements Element repeated elements are the actual data returned    \nTop\nbanyandb/stream/v1/write.proto \nElementValue    Field Type Label Description     element_id string  element_id could be span_id of a Span or segment_id of a Segment in the context of stream   timestamp google.protobuf.Timestamp  timestamp is in the timeunit of milliseconds. It represents 1) either the start time of a Span/Segment, 2) or the timestamp of a log   tag_families banyandb.model.v1.TagFamilyForWrite repeated the order of tag_families' items match the stream schema    \nInternalWriteRequest    Field Type Label Description     shard_id uint32     series_hash bytes     entity_values banyandb.model.v1.TagValue repeated    request WriteRequest      \nWriteRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata  the metadata is required.   element ElementValue  the element is required.   message_id uint64  the message_id is required.    \nWriteResponse    Field Type Label Description     message_id uint64  the message_id from request.   status banyandb.model.v1.Status  status indicates the request processing result   metadata banyandb.common.v1.Metadata  the metadata from request when request fails    \nTop\nbanyandb/stream/v1/rpc.proto \nStreamService    Method Name Request Type Response Type Description     Query QueryRequest QueryResponse    Write WriteRequest stream WriteResponse stream     Scalar Value Types    .proto Type Notes C++ Java Python Go C# PHP Ruby     double  double double float float64 double float Float   float  float float float float32 float float Float   int32 Uses variable-length encoding. Inefficient for encoding negative numbers – if your field is likely to have negative values, use sint32 instead. int32 int int int32 int integer Bignum or Fixnum (as required)   int64 Uses variable-length encoding. Inefficient for encoding negative numbers – if your field is likely to have negative values, use sint64 instead. int64 long int/long int64 long integer/string Bignum   uint32 Uses variable-length encoding. uint32 int int/long uint32 uint integer Bignum or Fixnum (as required)   uint64 Uses variable-length encoding. uint64 long int/long uint64 ulong integer/string Bignum or Fixnum (as required)   sint32 Uses variable-length encoding. Signed int value. These more efficiently encode negative numbers than regular int32s. int32 int int int32 int integer Bignum or Fixnum (as required)   sint64 Uses variable-length encoding. Signed int value. These more efficiently encode negative numbers than regular int64s. int64 long int/long int64 long integer/string Bignum   fixed32 Always four bytes. More efficient than uint32 if values are often greater than 2^28. uint32 int int uint32 uint integer Bignum or Fixnum (as required)   fixed64 Always eight bytes. More efficient than uint64 if values are often greater than 2^56. uint64 long int/long uint64 ulong integer/string Bignum   sfixed32 Always four bytes. int32 int int int32 int integer Bignum or Fixnum (as required)   sfixed64 Always eight bytes. int64 long int/long int64 long integer/string Bignum   bool  bool boolean boolean bool bool boolean TrueClass/FalseClass   string A string must always contain UTF-8 encoded or 7-bit ASCII text. string String str/unicode string string string String (UTF-8)   bytes May contain any arbitrary sequence of bytes. string ByteString str []byte ByteString string String (ASCII-8BIT)    ","excerpt":"Protocol Documentation \nTable of Contents   banyandb/cluster/v1/rpc.proto\n  SendRequest …","ref":"/docs/skywalking-banyandb/next/api-reference/","title":"Protocol Documentation"},{"body":"Protocol Documentation \nTable of Contents   banyandb/cluster/v1/rpc.proto\n  SendRequest\n  SendResponse\n  Service\n    banyandb/common/v1/common.proto\n  Group\n  IntervalRule\n  Metadata\n  ResourceOpts\n  Catalog\n  IntervalRule.Unit\n    banyandb/database/v1/database.proto\n  Node\n  Shard\n  Role\n    banyandb/model/v1/common.proto\n  FieldValue\n  Float\n  Int\n  IntArray\n  Str\n  StrArray\n  TagFamilyForWrite\n  TagValue\n  AggregationFunction\n    banyandb/model/v1/query.proto\n  Condition\n  Criteria\n  LogicalExpression\n  QueryOrder\n  Tag\n  TagFamily\n  TagProjection\n  TagProjection.TagFamily\n  TimeRange\n  Condition.BinaryOp\n  LogicalExpression.LogicalOp\n  Sort\n    banyandb/database/v1/schema.proto\n  Entity\n  FieldSpec\n  IndexRule\n  IndexRuleBinding\n  Measure\n  Stream\n  Subject\n  TagFamilySpec\n  TagSpec\n  TopNAggregation\n  CompressionMethod\n  EncodingMethod\n  FieldType\n  IndexRule.Analyzer\n  IndexRule.Location\n  IndexRule.Type\n  TagType\n    banyandb/database/v1/rpc.proto\n  GroupRegistryServiceCreateRequest\n  GroupRegistryServiceCreateResponse\n  GroupRegistryServiceDeleteRequest\n  GroupRegistryServiceDeleteResponse\n  GroupRegistryServiceExistRequest\n  GroupRegistryServiceExistResponse\n  GroupRegistryServiceGetRequest\n  GroupRegistryServiceGetResponse\n  GroupRegistryServiceListRequest\n  GroupRegistryServiceListResponse\n  GroupRegistryServiceUpdateRequest\n  GroupRegistryServiceUpdateResponse\n  IndexRuleBindingRegistryServiceCreateRequest\n  IndexRuleBindingRegistryServiceCreateResponse\n  IndexRuleBindingRegistryServiceDeleteRequest\n  IndexRuleBindingRegistryServiceDeleteResponse\n  IndexRuleBindingRegistryServiceExistRequest\n  IndexRuleBindingRegistryServiceExistResponse\n  IndexRuleBindingRegistryServiceGetRequest\n  IndexRuleBindingRegistryServiceGetResponse\n  IndexRuleBindingRegistryServiceListRequest\n  IndexRuleBindingRegistryServiceListResponse\n  IndexRuleBindingRegistryServiceUpdateRequest\n  IndexRuleBindingRegistryServiceUpdateResponse\n  IndexRuleRegistryServiceCreateRequest\n  IndexRuleRegistryServiceCreateResponse\n  IndexRuleRegistryServiceDeleteRequest\n  IndexRuleRegistryServiceDeleteResponse\n  IndexRuleRegistryServiceExistRequest\n  IndexRuleRegistryServiceExistResponse\n  IndexRuleRegistryServiceGetRequest\n  IndexRuleRegistryServiceGetResponse\n  IndexRuleRegistryServiceListRequest\n  IndexRuleRegistryServiceListResponse\n  IndexRuleRegistryServiceUpdateRequest\n  IndexRuleRegistryServiceUpdateResponse\n  MeasureRegistryServiceCreateRequest\n  MeasureRegistryServiceCreateResponse\n  MeasureRegistryServiceDeleteRequest\n  MeasureRegistryServiceDeleteResponse\n  MeasureRegistryServiceExistRequest\n  MeasureRegistryServiceExistResponse\n  MeasureRegistryServiceGetRequest\n  MeasureRegistryServiceGetResponse\n  MeasureRegistryServiceListRequest\n  MeasureRegistryServiceListResponse\n  MeasureRegistryServiceUpdateRequest\n  MeasureRegistryServiceUpdateResponse\n  StreamRegistryServiceCreateRequest\n  StreamRegistryServiceCreateResponse\n  StreamRegistryServiceDeleteRequest\n  StreamRegistryServiceDeleteResponse\n  StreamRegistryServiceExistRequest\n  StreamRegistryServiceExistResponse\n  StreamRegistryServiceGetRequest\n  StreamRegistryServiceGetResponse\n  StreamRegistryServiceListRequest\n  StreamRegistryServiceListResponse\n  StreamRegistryServiceUpdateRequest\n  StreamRegistryServiceUpdateResponse\n  TopNAggregationRegistryServiceCreateRequest\n  TopNAggregationRegistryServiceCreateResponse\n  TopNAggregationRegistryServiceDeleteRequest\n  TopNAggregationRegistryServiceDeleteResponse\n  TopNAggregationRegistryServiceExistRequest\n  TopNAggregationRegistryServiceExistResponse\n  TopNAggregationRegistryServiceGetRequest\n  TopNAggregationRegistryServiceGetResponse\n  TopNAggregationRegistryServiceListRequest\n  TopNAggregationRegistryServiceListResponse\n  TopNAggregationRegistryServiceUpdateRequest\n  TopNAggregationRegistryServiceUpdateResponse\n  GroupRegistryService\n  IndexRuleBindingRegistryService\n  IndexRuleRegistryService\n  MeasureRegistryService\n  StreamRegistryService\n  TopNAggregationRegistryService\n    banyandb/measure/v1/query.proto\n DataPoint DataPoint.Field QueryRequest QueryRequest.Aggregation QueryRequest.FieldProjection QueryRequest.GroupBy QueryRequest.Top QueryResponse    banyandb/measure/v1/topn.proto\n TopNList TopNList.Item TopNRequest TopNResponse    banyandb/model/v1/write.proto\n Status    banyandb/measure/v1/write.proto\n DataPointValue InternalWriteRequest WriteRequest WriteResponse    banyandb/measure/v1/rpc.proto\n MeasureService    banyandb/property/v1/property.proto\n Metadata Property    banyandb/property/v1/rpc.proto\n  ApplyRequest\n  ApplyResponse\n  DeleteRequest\n  DeleteResponse\n  GetRequest\n  GetResponse\n  KeepAliveRequest\n  KeepAliveResponse\n  ListRequest\n  ListResponse\n  ApplyRequest.Strategy\n  PropertyService\n    banyandb/stream/v1/query.proto\n Element QueryRequest QueryResponse    banyandb/stream/v1/write.proto\n ElementValue InternalWriteRequest WriteRequest WriteResponse    banyandb/stream/v1/rpc.proto\n StreamService    Scalar Value Types\n  \nTop\nbanyandb/cluster/v1/rpc.proto \nSendRequest    Field Type Label Description     topic string     message_id uint64     body google.protobuf.Any      \nSendResponse    Field Type Label Description     message_id uint64     error string     body google.protobuf.Any      \nService    Method Name Request Type Response Type Description     Send SendRequest stream SendResponse stream     \nTop\nbanyandb/common/v1/common.proto \nGroup Group is an internal object for Group management\n   Field Type Label Description     metadata Metadata  metadata define the group's identity   catalog Catalog  catalog denotes which type of data the group contains   resource_opts ResourceOpts  resourceOpts indicates the structure of the underlying kv storage   updated_at google.protobuf.Timestamp  updated_at indicates when resources of the group are updated    \nIntervalRule IntervalRule is a structured duration\n   Field Type Label Description     unit IntervalRule.Unit  unit can only be UNIT_HOUR or UNIT_DAY   num uint32      \nMetadata Metadata is for multi-tenant, multi-model use\n   Field Type Label Description     group string  group contains a set of options, like retention policy, max   name string  name of the entity   id uint32     create_revision int64  readonly. create_revision is the revision of last creation on this key.   mod_revision int64  readonly. mod_revision is the revision of last modification on this key.    \nResourceOpts    Field Type Label Description     shard_num uint32  shard_num is the number of shards   block_interval IntervalRule  block_interval indicates the length of a block block_interval should be less than or equal to segment_interval   segment_interval IntervalRule  segment_interval indicates the length of a segment   ttl IntervalRule  ttl indicates time to live, how long the data will be cached    \nCatalog    Name Number Description     CATALOG_UNSPECIFIED 0    CATALOG_STREAM 1    CATALOG_MEASURE 2     \nIntervalRule.Unit    Name Number Description     UNIT_UNSPECIFIED 0    UNIT_HOUR 1    UNIT_DAY 2     \nTop\nbanyandb/database/v1/database.proto \nNode    Field Type Label Description     metadata banyandb.common.v1.Metadata     roles Role repeated    grpc_address string     http_address string     created_at google.protobuf.Timestamp      \nShard    Field Type Label Description     id uint64     metadata banyandb.common.v1.Metadata     catalog banyandb.common.v1.Catalog     node string     total uint32     updated_at google.protobuf.Timestamp     created_at google.protobuf.Timestamp      \nRole    Name Number Description     ROLE_UNSPECIFIED 0    ROLE_META 1    ROLE_DATA 2    ROLE_LIAISON 3     \nTop\nbanyandb/model/v1/common.proto \nFieldValue    Field Type Label Description     null google.protobuf.NullValue     str Str     int Int     binary_data bytes     float Float      \nFloat    Field Type Label Description     value double      \nInt    Field Type Label Description     value int64      \nIntArray    Field Type Label Description     value int64 repeated     \nStr    Field Type Label Description     value string      \nStrArray    Field Type Label Description     value string repeated     \nTagFamilyForWrite    Field Type Label Description     tags TagValue repeated     \nTagValue    Field Type Label Description     null google.protobuf.NullValue     str Str     str_array StrArray     int Int     int_array IntArray     binary_data bytes      \nAggregationFunction    Name Number Description     AGGREGATION_FUNCTION_UNSPECIFIED 0    AGGREGATION_FUNCTION_MEAN 1    AGGREGATION_FUNCTION_MAX 2    AGGREGATION_FUNCTION_MIN 3    AGGREGATION_FUNCTION_COUNT 4    AGGREGATION_FUNCTION_SUM 5     \nTop\nbanyandb/model/v1/query.proto \nCondition Condition consists of the query condition with a single binary operator to be imposed For 1:1 BinaryOp, values in condition must be an array with length = 1, while for 1:N BinaryOp, values can be an array with length \u0026gt;= 1.\n   Field Type Label Description     name string     op Condition.BinaryOp     value TagValue      \nCriteria tag_families are indexed.\n   Field Type Label Description     le LogicalExpression     condition Condition      \nLogicalExpression LogicalExpression supports logical operation\n   Field Type Label Description     op LogicalExpression.LogicalOp  op is a logical operation   left Criteria     right Criteria      \nQueryOrder QueryOrder means a Sort operation to be done for a given index rule. The index_rule_name refers to the name of a index rule bound to the subject.\n   Field Type Label Description     index_rule_name string     sort Sort      \nTag Pair is the building block of a record which is equivalent to a key-value pair. In the context of Trace, it could be metadata of a trace such as service_name, service_instance, etc. Besides, other tags are organized in key-value pair in the underlying storage layer. One should notice that the values can be a multi-value.\n   Field Type Label Description     key string     value TagValue      \nTagFamily    Field Type Label Description     name string     tags Tag repeated     \nTagProjection TagProjection is used to select the names of keys to be returned.\n   Field Type Label Description     tag_families TagProjection.TagFamily repeated     \nTagProjection.TagFamily    Field Type Label Description     name string     tags string repeated     \nTimeRange TimeRange is a range query for uint64, the range here follows left-inclusive and right-exclusive rule, i.e. [begin, end) if both edges exist\n   Field Type Label Description     begin google.protobuf.Timestamp     end google.protobuf.Timestamp      \nCondition.BinaryOp BinaryOp specifies the operation imposed to the given query condition For EQ, NE, LT, GT, LE and GE, only one operand should be given, i.e. one-to-one relationship. HAVING and NOT_HAVING allow multi-value to be the operand such as array/vector, i.e. one-to-many relationship. For example, \u0026quot;keyA\u0026quot; contains \u0026quot;valueA\u0026quot; and \u0026quot;valueB\u0026quot; MATCH performances a full-text search if the tag is analyzed. The string value applies to the same analyzer as the tag, but string array value does not. Each item in a string array is seen as a token instead of a query expression.\n   Name Number Description     BINARY_OP_UNSPECIFIED 0    BINARY_OP_EQ 1    BINARY_OP_NE 2    BINARY_OP_LT 3    BINARY_OP_GT 4    BINARY_OP_LE 5    BINARY_OP_GE 6    BINARY_OP_HAVING 7    BINARY_OP_NOT_HAVING 8    BINARY_OP_IN 9    BINARY_OP_NOT_IN 10    BINARY_OP_MATCH 11     \nLogicalExpression.LogicalOp    Name Number Description     LOGICAL_OP_UNSPECIFIED 0    LOGICAL_OP_AND 1    LOGICAL_OP_OR 2     \nSort    Name Number Description     SORT_UNSPECIFIED 0    SORT_DESC 1    SORT_ASC 2     \nTop\nbanyandb/database/v1/schema.proto \nEntity    Field Type Label Description     tag_names string repeated     \nFieldSpec FieldSpec is the specification of field\n   Field Type Label Description     name string  name is the identity of a field   field_type FieldType  field_type denotes the type of field value   encoding_method EncodingMethod  encoding_method indicates how to encode data during writing   compression_method CompressionMethod  compression_method indicates how to compress data during writing    \nIndexRule IndexRule defines how to generate indices based on tags and the index type IndexRule should bind to a subject through an IndexRuleBinding to generate proper indices.\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  metadata define the rule's identity   tags string repeated tags are the combination that refers to an indexed object If the elements in tags are more than 1, the object will generate a multi-tag index Caveat: All tags in a multi-tag MUST have an identical IndexType   type IndexRule.Type  type is the IndexType of this IndexObject.   location IndexRule.Location  location indicates where to store index.   updated_at google.protobuf.Timestamp  updated_at indicates when the IndexRule is updated   analyzer IndexRule.Analyzer  analyzer analyzes tag value to support the full-text searching for TYPE_INVERTED indices.    \nIndexRuleBinding IndexRuleBinding is a bridge to connect severalIndexRules to a subject This binding is valid between begin_at_nanoseconds and expire_at_nanoseconds, that provides flexible strategies to control how to generate time series indices.\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  metadata is the identity of this binding   rules string repeated rules refers to the IndexRule   subject Subject  subject indicates the subject of binding action   begin_at google.protobuf.Timestamp  begin_at_nanoseconds is the timestamp, after which the binding will be active   expire_at google.protobuf.Timestamp  expire_at_nanoseconds it the timestamp, after which the binding will be inactive expire_at_nanoseconds must be larger than begin_at_nanoseconds   updated_at google.protobuf.Timestamp  updated_at indicates when the IndexRuleBinding is updated    \nMeasure Measure intends to store data point\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  metadata is the identity of a measure   tag_families TagFamilySpec repeated tag_families are for filter measures   fields FieldSpec repeated fields denote measure values   entity Entity  entity indicates which tags will be to generate a series and shard a measure   interval string  interval indicates how frequently to send a data point valid time units are \u0026quot;ns\u0026quot;, \u0026quot;us\u0026quot; (or \u0026quot;µs\u0026quot;), \u0026quot;ms\u0026quot;, \u0026quot;s\u0026quot;, \u0026quot;m\u0026quot;, \u0026quot;h\u0026quot;, \u0026quot;d\u0026quot;.   updated_at google.protobuf.Timestamp  updated_at indicates when the measure is updated    \nStream Stream intends to store streaming data, for example, traces or logs\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  metadata is the identity of a trace series   tag_families TagFamilySpec repeated tag_families   entity Entity  entity indicates how to generate a series and shard a stream   updated_at google.protobuf.Timestamp  updated_at indicates when the stream is updated    \nSubject Subject defines which stream or measure would generate indices\n   Field Type Label Description     catalog banyandb.common.v1.Catalog  catalog is where the subject belongs to todo validate plugin exist bug https://github.com/bufbuild/protoc-gen-validate/issues/672   name string  name refers to a stream or measure in a particular catalog    \nTagFamilySpec    Field Type Label Description     name string     tags TagSpec repeated tags defines accepted tags    \nTagSpec    Field Type Label Description     name string     type TagType     indexed_only bool  indexed_only indicates whether the tag is stored True: It's indexed only, but not stored False: it's stored and indexed    \nTopNAggregation TopNAggregation generates offline TopN statistics for a measure's TopN approximation\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  metadata is the identity of an aggregation   source_measure banyandb.common.v1.Metadata  source_measure denotes the data source of this aggregation   field_name string  field_name is the name of field used for ranking   field_value_sort banyandb.model.v1.Sort  field_value_sort indicates how to sort fields ASC: bottomN DESC: topN UNSPECIFIED: topN + bottomN todo validate plugin exist bug https://github.com/bufbuild/protoc-gen-validate/issues/672   group_by_tag_names string repeated group_by_tag_names groups data points into statistical counters   criteria banyandb.model.v1.Criteria  criteria select partial data points from measure   counters_number int32  counters_number sets the number of counters to be tracked. The default value is 1000   lru_size int32  lru_size defines how much entry is allowed to be maintained in the memory   updated_at google.protobuf.Timestamp  updated_at indicates when the measure is updated    \nCompressionMethod    Name Number Description     COMPRESSION_METHOD_UNSPECIFIED 0    COMPRESSION_METHOD_ZSTD 1     \nEncodingMethod    Name Number Description     ENCODING_METHOD_UNSPECIFIED 0    ENCODING_METHOD_GORILLA 1     \nFieldType    Name Number Description     FIELD_TYPE_UNSPECIFIED 0    FIELD_TYPE_STRING 1    FIELD_TYPE_INT 2    FIELD_TYPE_DATA_BINARY 3    FIELD_TYPE_FLOAT 4     \nIndexRule.Analyzer    Name Number Description     ANALYZER_UNSPECIFIED 0    ANALYZER_KEYWORD 1 Keyword analyzer is a “noop” analyzer which returns the entire input string as a single token.   ANALYZER_STANDARD 2 Standard analyzer provides grammar based tokenization   ANALYZER_SIMPLE 3 Simple analyzer breaks text into tokens at any non-letter character, such as numbers, spaces, hyphens and apostrophes, discards non-letter characters, and changes uppercase to lowercase.    \nIndexRule.Location    Name Number Description     LOCATION_UNSPECIFIED 0    LOCATION_SERIES 1    LOCATION_GLOBAL 2     \nIndexRule.Type Type determine the index structure under the hood\n   Name Number Description     TYPE_UNSPECIFIED 0    TYPE_TREE 1    TYPE_INVERTED 2     \nTagType    Name Number Description     TAG_TYPE_UNSPECIFIED 0    TAG_TYPE_STRING 1    TAG_TYPE_INT 2    TAG_TYPE_STRING_ARRAY 3    TAG_TYPE_INT_ARRAY 4    TAG_TYPE_DATA_BINARY 5     \nTop\nbanyandb/database/v1/rpc.proto \nGroupRegistryServiceCreateRequest    Field Type Label Description     group banyandb.common.v1.Group      \nGroupRegistryServiceCreateResponse \nGroupRegistryServiceDeleteRequest    Field Type Label Description     group string      \nGroupRegistryServiceDeleteResponse    Field Type Label Description     deleted bool      \nGroupRegistryServiceExistRequest    Field Type Label Description     group string      \nGroupRegistryServiceExistResponse    Field Type Label Description     has_group bool      \nGroupRegistryServiceGetRequest    Field Type Label Description     group string      \nGroupRegistryServiceGetResponse    Field Type Label Description     group banyandb.common.v1.Group      \nGroupRegistryServiceListRequest \nGroupRegistryServiceListResponse    Field Type Label Description     group banyandb.common.v1.Group repeated     \nGroupRegistryServiceUpdateRequest    Field Type Label Description     group banyandb.common.v1.Group      \nGroupRegistryServiceUpdateResponse \nIndexRuleBindingRegistryServiceCreateRequest    Field Type Label Description     index_rule_binding IndexRuleBinding      \nIndexRuleBindingRegistryServiceCreateResponse \nIndexRuleBindingRegistryServiceDeleteRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nIndexRuleBindingRegistryServiceDeleteResponse    Field Type Label Description     deleted bool      \nIndexRuleBindingRegistryServiceExistRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nIndexRuleBindingRegistryServiceExistResponse    Field Type Label Description     has_group bool     has_index_rule_binding bool      \nIndexRuleBindingRegistryServiceGetRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nIndexRuleBindingRegistryServiceGetResponse    Field Type Label Description     index_rule_binding IndexRuleBinding      \nIndexRuleBindingRegistryServiceListRequest    Field Type Label Description     group string      \nIndexRuleBindingRegistryServiceListResponse    Field Type Label Description     index_rule_binding IndexRuleBinding repeated     \nIndexRuleBindingRegistryServiceUpdateRequest    Field Type Label Description     index_rule_binding IndexRuleBinding      \nIndexRuleBindingRegistryServiceUpdateResponse \nIndexRuleRegistryServiceCreateRequest    Field Type Label Description     index_rule IndexRule      \nIndexRuleRegistryServiceCreateResponse \nIndexRuleRegistryServiceDeleteRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nIndexRuleRegistryServiceDeleteResponse    Field Type Label Description     deleted bool      \nIndexRuleRegistryServiceExistRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nIndexRuleRegistryServiceExistResponse    Field Type Label Description     has_group bool     has_index_rule bool      \nIndexRuleRegistryServiceGetRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nIndexRuleRegistryServiceGetResponse    Field Type Label Description     index_rule IndexRule      \nIndexRuleRegistryServiceListRequest    Field Type Label Description     group string      \nIndexRuleRegistryServiceListResponse    Field Type Label Description     index_rule IndexRule repeated     \nIndexRuleRegistryServiceUpdateRequest    Field Type Label Description     index_rule IndexRule      \nIndexRuleRegistryServiceUpdateResponse \nMeasureRegistryServiceCreateRequest    Field Type Label Description     measure Measure      \nMeasureRegistryServiceCreateResponse    Field Type Label Description     mod_revision int64      \nMeasureRegistryServiceDeleteRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nMeasureRegistryServiceDeleteResponse    Field Type Label Description     deleted bool      \nMeasureRegistryServiceExistRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nMeasureRegistryServiceExistResponse    Field Type Label Description     has_group bool     has_measure bool      \nMeasureRegistryServiceGetRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nMeasureRegistryServiceGetResponse    Field Type Label Description     measure Measure      \nMeasureRegistryServiceListRequest    Field Type Label Description     group string      \nMeasureRegistryServiceListResponse    Field Type Label Description     measure Measure repeated     \nMeasureRegistryServiceUpdateRequest    Field Type Label Description     measure Measure      \nMeasureRegistryServiceUpdateResponse    Field Type Label Description     mod_revision int64      \nStreamRegistryServiceCreateRequest    Field Type Label Description     stream Stream      \nStreamRegistryServiceCreateResponse    Field Type Label Description     mod_revision int64      \nStreamRegistryServiceDeleteRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nStreamRegistryServiceDeleteResponse    Field Type Label Description     deleted bool      \nStreamRegistryServiceExistRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nStreamRegistryServiceExistResponse    Field Type Label Description     has_group bool     has_stream bool      \nStreamRegistryServiceGetRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nStreamRegistryServiceGetResponse    Field Type Label Description     stream Stream      \nStreamRegistryServiceListRequest    Field Type Label Description     group string      \nStreamRegistryServiceListResponse    Field Type Label Description     stream Stream repeated     \nStreamRegistryServiceUpdateRequest    Field Type Label Description     stream Stream      \nStreamRegistryServiceUpdateResponse    Field Type Label Description     mod_revision int64      \nTopNAggregationRegistryServiceCreateRequest    Field Type Label Description     top_n_aggregation TopNAggregation      \nTopNAggregationRegistryServiceCreateResponse \nTopNAggregationRegistryServiceDeleteRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nTopNAggregationRegistryServiceDeleteResponse    Field Type Label Description     deleted bool      \nTopNAggregationRegistryServiceExistRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nTopNAggregationRegistryServiceExistResponse    Field Type Label Description     has_group bool     has_top_n_aggregation bool      \nTopNAggregationRegistryServiceGetRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nTopNAggregationRegistryServiceGetResponse    Field Type Label Description     top_n_aggregation TopNAggregation      \nTopNAggregationRegistryServiceListRequest    Field Type Label Description     group string      \nTopNAggregationRegistryServiceListResponse    Field Type Label Description     top_n_aggregation TopNAggregation repeated     \nTopNAggregationRegistryServiceUpdateRequest    Field Type Label Description     top_n_aggregation TopNAggregation      \nTopNAggregationRegistryServiceUpdateResponse \nGroupRegistryService    Method Name Request Type Response Type Description     Create GroupRegistryServiceCreateRequest GroupRegistryServiceCreateResponse    Update GroupRegistryServiceUpdateRequest GroupRegistryServiceUpdateResponse    Delete GroupRegistryServiceDeleteRequest GroupRegistryServiceDeleteResponse    Get GroupRegistryServiceGetRequest GroupRegistryServiceGetResponse    List GroupRegistryServiceListRequest GroupRegistryServiceListResponse    Exist GroupRegistryServiceExistRequest GroupRegistryServiceExistResponse Exist doesn't expose an HTTP endpoint. Please use HEAD method to touch Get instead    \nIndexRuleBindingRegistryService    Method Name Request Type Response Type Description     Create IndexRuleBindingRegistryServiceCreateRequest IndexRuleBindingRegistryServiceCreateResponse    Update IndexRuleBindingRegistryServiceUpdateRequest IndexRuleBindingRegistryServiceUpdateResponse    Delete IndexRuleBindingRegistryServiceDeleteRequest IndexRuleBindingRegistryServiceDeleteResponse    Get IndexRuleBindingRegistryServiceGetRequest IndexRuleBindingRegistryServiceGetResponse    List IndexRuleBindingRegistryServiceListRequest IndexRuleBindingRegistryServiceListResponse    Exist IndexRuleBindingRegistryServiceExistRequest IndexRuleBindingRegistryServiceExistResponse Exist doesn't expose an HTTP endpoint. Please use HEAD method to touch Get instead    \nIndexRuleRegistryService    Method Name Request Type Response Type Description     Create IndexRuleRegistryServiceCreateRequest IndexRuleRegistryServiceCreateResponse    Update IndexRuleRegistryServiceUpdateRequest IndexRuleRegistryServiceUpdateResponse    Delete IndexRuleRegistryServiceDeleteRequest IndexRuleRegistryServiceDeleteResponse    Get IndexRuleRegistryServiceGetRequest IndexRuleRegistryServiceGetResponse    List IndexRuleRegistryServiceListRequest IndexRuleRegistryServiceListResponse    Exist IndexRuleRegistryServiceExistRequest IndexRuleRegistryServiceExistResponse Exist doesn't expose an HTTP endpoint. Please use HEAD method to touch Get instead    \nMeasureRegistryService    Method Name Request Type Response Type Description     Create MeasureRegistryServiceCreateRequest MeasureRegistryServiceCreateResponse    Update MeasureRegistryServiceUpdateRequest MeasureRegistryServiceUpdateResponse    Delete MeasureRegistryServiceDeleteRequest MeasureRegistryServiceDeleteResponse    Get MeasureRegistryServiceGetRequest MeasureRegistryServiceGetResponse    List MeasureRegistryServiceListRequest MeasureRegistryServiceListResponse    Exist MeasureRegistryServiceExistRequest MeasureRegistryServiceExistResponse Exist doesn't expose an HTTP endpoint. Please use HEAD method to touch Get instead    \nStreamRegistryService    Method Name Request Type Response Type Description     Create StreamRegistryServiceCreateRequest StreamRegistryServiceCreateResponse    Update StreamRegistryServiceUpdateRequest StreamRegistryServiceUpdateResponse    Delete StreamRegistryServiceDeleteRequest StreamRegistryServiceDeleteResponse    Get StreamRegistryServiceGetRequest StreamRegistryServiceGetResponse    List StreamRegistryServiceListRequest StreamRegistryServiceListResponse    Exist StreamRegistryServiceExistRequest StreamRegistryServiceExistResponse Exist doesn't expose an HTTP endpoint. Please use HEAD method to touch Get instead    \nTopNAggregationRegistryService    Method Name Request Type Response Type Description     Create TopNAggregationRegistryServiceCreateRequest TopNAggregationRegistryServiceCreateResponse    Update TopNAggregationRegistryServiceUpdateRequest TopNAggregationRegistryServiceUpdateResponse    Delete TopNAggregationRegistryServiceDeleteRequest TopNAggregationRegistryServiceDeleteResponse    Get TopNAggregationRegistryServiceGetRequest TopNAggregationRegistryServiceGetResponse    List TopNAggregationRegistryServiceListRequest TopNAggregationRegistryServiceListResponse    Exist TopNAggregationRegistryServiceExistRequest TopNAggregationRegistryServiceExistResponse     \nTop\nbanyandb/measure/v1/query.proto \nDataPoint DataPoint is stored in Measures\n   Field Type Label Description     timestamp google.protobuf.Timestamp  timestamp is in the timeunit of milliseconds.   tag_families banyandb.model.v1.TagFamily repeated tag_families contains tags selected in the projection   fields DataPoint.Field repeated fields contains fields selected in the projection    \nDataPoint.Field    Field Type Label Description     name string     value banyandb.model.v1.FieldValue      \nQueryRequest QueryRequest is the request contract for query.\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  metadata is required   time_range banyandb.model.v1.TimeRange  time_range is a range query with begin/end time of entities in the timeunit of milliseconds.   criteria banyandb.model.v1.Criteria  tag_families are indexed.   tag_projection banyandb.model.v1.TagProjection  tag_projection can be used to select tags of the data points in the response   field_projection QueryRequest.FieldProjection  field_projection can be used to select fields of the data points in the response   group_by QueryRequest.GroupBy  group_by groups data points based on their field value for a specific tag and use field_name as the projection name   agg QueryRequest.Aggregation  agg aggregates data points based on a field   top QueryRequest.Top  top limits the result based on a particular field. If order_by is specified, top sorts the dataset based on order_by's output   offset uint32  offset is used to support pagination, together with the following limit. If top is specified, offset processes the dataset based on top's output   limit uint32  limit is used to impose a boundary on the number of records being returned. If top is specified, limit processes the dataset based on top's output   order_by banyandb.model.v1.QueryOrder  order_by is given to specify the sort for a tag.    \nQueryRequest.Aggregation    Field Type Label Description     function banyandb.model.v1.AggregationFunction     field_name string  field_name must be one of files indicated by the field_projection    \nQueryRequest.FieldProjection    Field Type Label Description     names string repeated     \nQueryRequest.GroupBy    Field Type Label Description     tag_projection banyandb.model.v1.TagProjection  tag_projection must be a subset of the tag_projection of QueryRequest   field_name string  field_name must be one of fields indicated by field_projection    \nQueryRequest.Top    Field Type Label Description     number int32  number set the how many items should be returned   field_name string  field_name must be one of files indicated by the field_projection   field_value_sort banyandb.model.v1.Sort  field_value_sort indicates how to sort fields ASC: bottomN DESC: topN UNSPECIFIED: topN    \nQueryResponse QueryResponse is the response for a query to the Query module.\n   Field Type Label Description     data_points DataPoint repeated data_points are the actual data returned    \nTop\nbanyandb/measure/v1/topn.proto \nTopNList TopNList contains a series of topN items\n   Field Type Label Description     timestamp google.protobuf.Timestamp  timestamp is in the timeunit of milliseconds.   items TopNList.Item repeated items contains top-n items in a list    \nTopNList.Item    Field Type Label Description     entity banyandb.model.v1.Tag repeated    value banyandb.model.v1.FieldValue      \nTopNRequest TopNRequest is the request contract for query.\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  metadata is required   time_range banyandb.model.v1.TimeRange  time_range is a range query with begin/end time of entities in the timeunit of milliseconds.   top_n int32  top_n set the how many items should be returned in each list.   agg banyandb.model.v1.AggregationFunction  agg aggregates lists grouped by field names in the time_range TODO validate enum defined_only   conditions banyandb.model.v1.Condition repeated criteria select counters. Only equals are acceptable.   field_value_sort banyandb.model.v1.Sort  field_value_sort indicates how to sort fields    \nTopNResponse TopNResponse is the response for a query to the Query module.\n   Field Type Label Description     lists TopNList repeated lists contain a series topN lists ranked by timestamp if agg_func in query request is specified, lists' size should be one.    \nTop\nbanyandb/model/v1/write.proto \nStatus Status is the response status for write\n   Name Number Description     STATUS_UNSPECIFIED 0    STATUS_SUCCEED 1    STATUS_INVALID_TIMESTAMP 2    STATUS_NOT_FOUND 3    STATUS_EXPIRED_SCHEMA 4    STATUS_INTERNAL_ERROR 5     \nTop\nbanyandb/measure/v1/write.proto \nDataPointValue DataPointValue is the data point for writing. It only contains values.\n   Field Type Label Description     timestamp google.protobuf.Timestamp  timestamp is in the timeunit of milliseconds.   tag_families banyandb.model.v1.TagFamilyForWrite repeated the order of tag_families' items match the measure schema   fields banyandb.model.v1.FieldValue repeated the order of fields match the measure schema    \nInternalWriteRequest    Field Type Label Description     shard_id uint32     series_hash bytes     entity_values banyandb.model.v1.TagValue repeated    request WriteRequest      \nWriteRequest WriteRequest is the request contract for write\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  the metadata is required.   data_point DataPointValue  the data_point is required.   message_id uint64  the message_id is required.    \nWriteResponse WriteResponse is the response contract for write\n   Field Type Label Description     message_id uint64  the message_id from request.   status banyandb.model.v1.Status  status indicates the request processing result   metadata banyandb.common.v1.Metadata  the metadata from request when request fails    \nTop\nbanyandb/measure/v1/rpc.proto \nMeasureService    Method Name Request Type Response Type Description     Query QueryRequest QueryResponse    Write WriteRequest stream WriteResponse stream    TopN TopNRequest TopNResponse     \nTop\nbanyandb/property/v1/property.proto \nMetadata Metadata is for multi-tenant use\n   Field Type Label Description     container banyandb.common.v1.Metadata  container is created when it receives the first property   id string  id identifies a property    \nProperty Property stores the user defined data\n   Field Type Label Description     metadata Metadata  metadata is the identity of a property   tags banyandb.model.v1.Tag repeated tag stores the content of a property   updated_at google.protobuf.Timestamp  updated_at indicates when the property is updated   lease_id int64  readonly. lease_id is the ID of the lease that attached to key.   ttl string  ttl indicates the time to live of the property. It's a string in the format of \u0026quot;1h\u0026quot;, \u0026quot;2m\u0026quot;, \u0026quot;3s\u0026quot;, \u0026quot;1500ms\u0026quot;. It defaults to 0s, which means the property never expires. The minimum allowed ttl is 1s.    \nTop\nbanyandb/property/v1/rpc.proto \nApplyRequest    Field Type Label Description     property Property     strategy ApplyRequest.Strategy  strategy indicates how to update a property. It defaults to STRATEGY_MERGE    \nApplyResponse    Field Type Label Description     created bool  created indicates whether the property existed. True: the property is absent. False: the property existed.   tags_num uint32     lease_id int64      \nDeleteRequest    Field Type Label Description     metadata Metadata     tags string repeated     \nDeleteResponse    Field Type Label Description     deleted bool     tags_num uint32      \nGetRequest    Field Type Label Description     metadata Metadata     tags string repeated     \nGetResponse    Field Type Label Description     property Property      \nKeepAliveRequest    Field Type Label Description     lease_id int64      \nKeepAliveResponse \nListRequest    Field Type Label Description     container banyandb.common.v1.Metadata     ids string repeated    tags string repeated     \nListResponse    Field Type Label Description     property Property repeated     \nApplyRequest.Strategy    Name Number Description     STRATEGY_UNSPECIFIED 0    STRATEGY_MERGE 1    STRATEGY_REPLACE 2     \nPropertyService    Method Name Request Type Response Type Description     Apply ApplyRequest ApplyResponse Apply creates a property if it's absent, or update a existed one based on a strategy.   Delete DeleteRequest DeleteResponse    Get GetRequest GetResponse    List ListRequest ListResponse    KeepAlive KeepAliveRequest KeepAliveResponse     \nTop\nbanyandb/stream/v1/query.proto \nElement Element represents (stream context) a Span defined in Google Dapper paper or equivalently a Segment in Skywalking. (Log context) a log\n   Field Type Label Description     element_id string  element_id could be span_id of a Span or segment_id of a Segment in the context of stream   timestamp google.protobuf.Timestamp  timestamp represents a millisecond 1) either the start time of a Span/Segment, 2) or the timestamp of a log   tag_families banyandb.model.v1.TagFamily repeated fields contains all indexed Field. Some typical names, - stream_id - duration - service_name - service_instance_id - end_time_milliseconds    \nQueryRequest QueryRequest is the request contract for query.\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  metadata is required   time_range banyandb.model.v1.TimeRange  time_range is a range query with begin/end time of entities in the timeunit of milliseconds. In the context of stream, it represents the range of the startTime for spans/segments, while in the context of Log, it means the range of the timestamp(s) for logs. it is always recommended to specify time range for performance reason   offset uint32  offset is used to support pagination, together with the following limit   limit uint32  limit is used to impose a boundary on the number of records being returned   order_by banyandb.model.v1.QueryOrder  order_by is given to specify the sort for a field. So far, only fields in the type of Integer are supported   criteria banyandb.model.v1.Criteria  tag_families are indexed.   projection banyandb.model.v1.TagProjection  projection can be used to select the key names of the element in the response    \nQueryResponse QueryResponse is the response for a query to the Query module.\n   Field Type Label Description     elements Element repeated elements are the actual data returned    \nTop\nbanyandb/stream/v1/write.proto \nElementValue    Field Type Label Description     element_id string  element_id could be span_id of a Span or segment_id of a Segment in the context of stream   timestamp google.protobuf.Timestamp  timestamp is in the timeunit of milliseconds. It represents 1) either the start time of a Span/Segment, 2) or the timestamp of a log   tag_families banyandb.model.v1.TagFamilyForWrite repeated the order of tag_families' items match the stream schema    \nInternalWriteRequest    Field Type Label Description     shard_id uint32     series_hash bytes     entity_values banyandb.model.v1.TagValue repeated    request WriteRequest      \nWriteRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata  the metadata is required.   element ElementValue  the element is required.   message_id uint64  the message_id is required.    \nWriteResponse    Field Type Label Description     message_id uint64  the message_id from request.   status banyandb.model.v1.Status  status indicates the request processing result   metadata banyandb.common.v1.Metadata  the metadata from request when request fails    \nTop\nbanyandb/stream/v1/rpc.proto \nStreamService    Method Name Request Type Response Type Description     Query QueryRequest QueryResponse    Write WriteRequest stream WriteResponse stream     Scalar Value Types    .proto Type Notes C++ Java Python Go C# PHP Ruby     double  double double float float64 double float Float   float  float float float float32 float float Float   int32 Uses variable-length encoding. Inefficient for encoding negative numbers – if your field is likely to have negative values, use sint32 instead. int32 int int int32 int integer Bignum or Fixnum (as required)   int64 Uses variable-length encoding. Inefficient for encoding negative numbers – if your field is likely to have negative values, use sint64 instead. int64 long int/long int64 long integer/string Bignum   uint32 Uses variable-length encoding. uint32 int int/long uint32 uint integer Bignum or Fixnum (as required)   uint64 Uses variable-length encoding. uint64 long int/long uint64 ulong integer/string Bignum or Fixnum (as required)   sint32 Uses variable-length encoding. Signed int value. These more efficiently encode negative numbers than regular int32s. int32 int int int32 int integer Bignum or Fixnum (as required)   sint64 Uses variable-length encoding. Signed int value. These more efficiently encode negative numbers than regular int64s. int64 long int/long int64 long integer/string Bignum   fixed32 Always four bytes. More efficient than uint32 if values are often greater than 2^28. uint32 int int uint32 uint integer Bignum or Fixnum (as required)   fixed64 Always eight bytes. More efficient than uint64 if values are often greater than 2^56. uint64 long int/long uint64 ulong integer/string Bignum   sfixed32 Always four bytes. int32 int int int32 int integer Bignum or Fixnum (as required)   sfixed64 Always eight bytes. int64 long int/long int64 long integer/string Bignum   bool  bool boolean boolean bool bool boolean TrueClass/FalseClass   string A string must always contain UTF-8 encoded or 7-bit ASCII text. string String str/unicode string string string String (UTF-8)   bytes May contain any arbitrary sequence of bytes. string ByteString str []byte ByteString string String (ASCII-8BIT)    ","excerpt":"Protocol Documentation \nTable of Contents   banyandb/cluster/v1/rpc.proto\n  SendRequest …","ref":"/docs/skywalking-banyandb/v0.5.0/api-reference/","title":"Protocol Documentation"},{"body":"Pulsar monitoring SkyWalking leverages OpenTelemetry Collector to collect metrics data in Prometheus format from the Pulsar and transfer the metrics to OpenTelemetry receiver and into the Meter System. Kafka entity as a Service in OAP and on the `Layer: PULSAR.\nData flow  Pulsar exposes metrics through Prometheus endpoint. OpenTelemetry Collector fetches metrics from Pulsar cluster via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.`  Setup  Set up Pulsar Cluster. (Pulsar cluster includes pulsar broker cluster and Bookkeeper bookie cluster.) Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  Pulsar Monitoring Pulsar monitoring provides multidimensional metrics monitoring of Pulsar cluster as Layer: PULSAR Service in the OAP. In each cluster, the nodes are represented as Instance.\nPulsar Cluster Supported Metrics    Monitoring Panel Metric Name Description Data Source     Total Topics meter_pulsar_total_topics The number of Pulsar topics in this cluster. Pulsar Cluster   Total Subscriptions meter_pulsar_total_subscriptions The number of Pulsar subscriptions in this cluster. Pulsar Cluster   Total Producers meter_pulsar_total_producers The number of active producers connected to this cluster. Pulsar Cluster   Total Consumers meter_pulsar_total_consumers The number of active consumers connected to this cluster. Pulsar Cluster   Message Rate In meter_pulsar_message_rate_in The total message rate coming into this cluster (message per second). Pulsar Cluster   Message Rate Out meter_pulsar_message_rate_out The total message rate going out from this cluster (message per second). Pulsar Cluster   Throughput In meter_pulsar_throughput_in The total throughput coming into this cluster (byte per second). Pulsar Cluster   Throughput Out meter_pulsar_throughput_out The total throughput going out from this cluster (byte per second). Pulsar Cluster   Storage Size meter_pulsar_storage_size The total storage size of all topics in this broker (in bytes). Pulsar Cluster   Storage Logical Size meter_pulsar_storage_logical_size The storage size of all topics in this broker without replicas (in bytes). Pulsar Cluster   Storage Write Rate meter_pulsar_storage_write_rate The total message batches (entries) written to the storage for this broker (message batch per second). Pulsar Cluster   Storage Read Rate meter_pulsar_storage_read_rate The total message batches (entries) read from the storage for this broker (message batch per second). Pulsar Cluster    Pulsar Node Supported Metrics    Monitoring Panel Metric Name Description Data Source     Active Connections meter_pulsar_broker_active_connections The number of active connections. Pulsar Broker   Total Connections meter_pulsar_broker_total_connections The total number of connections. Pulsar Broker   Connection Create Success Count meter_pulsar_broker_connection_create_success_count The number of successfully created connections. Pulsar Broker   Connection Create Fail Count meter_pulsar_broker_connection_create_fail_count The number of failed connections. Pulsar Broker   Connection Closed Total Count meter_pulsar_broker_connection_closed_total_count The total number of closed connections. Pulsar Broker   JVM Buffer Pool Used meter_pulsar_broker_jvm_buffer_pool_used_bytes The usage of jvm buffer pool. Pulsar Broker   JVM Memory Pool Used meter_pulsar_broker_jvm_memory_pool_used The usage of jvm memory pool. Pulsar Broker   JVM Memory meter_pulsar_broker_jvm_memory_init meter_pulsar_broker_jvm_memory_used meter_pulsar_broker_jvm_memory_committed The usage of jvm memory. Pulsar Broker   JVM Threads meter_pulsar_broker_jvm_threads_current meter_pulsar_broker_jvm_threads_daemon meter_pulsar_broker_jvm_threads_peak meter_pulsar_broker_jvm_threads_deadlocked The usage of jvm threads. Pulsar Broker   GC Time meter_pulsar_broker_jvm_gc_collection_seconds_sum Time spent in a given JVM garbage collector in seconds. Pulsar Broker   GC Count meter_pulsar_broker_jvm_gc_collection_seconds_count The count of a given JVM garbage collector. Pulsar Broker    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in otel-rules/pulsar/pulsar-cluster.yaml, otel-rules/pulsar/pulsar-broker.yaml. The RabbitMQ dashboard panel configurations are found in ui-initialized-templates/pulsar.\n","excerpt":"Pulsar monitoring SkyWalking leverages OpenTelemetry Collector to collect metrics data in Prometheus …","ref":"/docs/main/latest/en/setup/backend/backend-pulsar-monitoring/","title":"Pulsar monitoring"},{"body":"Pulsar monitoring SkyWalking leverages OpenTelemetry Collector to collect metrics data in Prometheus format from the Pulsar and transfer the metrics to OpenTelemetry receiver and into the Meter System. Kafka entity as a Service in OAP and on the `Layer: PULSAR.\nData flow  Pulsar exposes metrics through Prometheus endpoint. OpenTelemetry Collector fetches metrics from Pulsar cluster via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.`  Setup  Set up Pulsar Cluster. (Pulsar cluster includes pulsar broker cluster and Bookkeeper bookie cluster.) Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  Pulsar Monitoring Pulsar monitoring provides multidimensional metrics monitoring of Pulsar cluster as Layer: PULSAR Service in the OAP. In each cluster, the nodes are represented as Instance.\nPulsar Cluster Supported Metrics    Monitoring Panel Metric Name Description Data Source     Total Topics meter_pulsar_total_topics The number of Pulsar topics in this cluster. Pulsar Cluster   Total Subscriptions meter_pulsar_total_subscriptions The number of Pulsar subscriptions in this cluster. Pulsar Cluster   Total Producers meter_pulsar_total_producers The number of active producers connected to this cluster. Pulsar Cluster   Total Consumers meter_pulsar_total_consumers The number of active consumers connected to this cluster. Pulsar Cluster   Message Rate In meter_pulsar_message_rate_in The total message rate coming into this cluster (message per second). Pulsar Cluster   Message Rate Out meter_pulsar_message_rate_out The total message rate going out from this cluster (message per second). Pulsar Cluster   Throughput In meter_pulsar_throughput_in The total throughput coming into this cluster (byte per second). Pulsar Cluster   Throughput Out meter_pulsar_throughput_out The total throughput going out from this cluster (byte per second). Pulsar Cluster   Storage Size meter_pulsar_storage_size The total storage size of all topics in this broker (in bytes). Pulsar Cluster   Storage Logical Size meter_pulsar_storage_logical_size The storage size of all topics in this broker without replicas (in bytes). Pulsar Cluster   Storage Write Rate meter_pulsar_storage_write_rate The total message batches (entries) written to the storage for this broker (message batch per second). Pulsar Cluster   Storage Read Rate meter_pulsar_storage_read_rate The total message batches (entries) read from the storage for this broker (message batch per second). Pulsar Cluster    Pulsar Node Supported Metrics    Monitoring Panel Metric Name Description Data Source     Active Connections meter_pulsar_broker_active_connections The number of active connections. Pulsar Broker   Total Connections meter_pulsar_broker_total_connections The total number of connections. Pulsar Broker   Connection Create Success Count meter_pulsar_broker_connection_create_success_count The number of successfully created connections. Pulsar Broker   Connection Create Fail Count meter_pulsar_broker_connection_create_fail_count The number of failed connections. Pulsar Broker   Connection Closed Total Count meter_pulsar_broker_connection_closed_total_count The total number of closed connections. Pulsar Broker   JVM Buffer Pool Used meter_pulsar_broker_jvm_buffer_pool_used_bytes The usage of jvm buffer pool. Pulsar Broker   JVM Memory Pool Used meter_pulsar_broker_jvm_memory_pool_used The usage of jvm memory pool. Pulsar Broker   JVM Memory meter_pulsar_broker_jvm_memory_init meter_pulsar_broker_jvm_memory_used meter_pulsar_broker_jvm_memory_committed The usage of jvm memory. Pulsar Broker   JVM Threads meter_pulsar_broker_jvm_threads_current meter_pulsar_broker_jvm_threads_daemon meter_pulsar_broker_jvm_threads_peak meter_pulsar_broker_jvm_threads_deadlocked The usage of jvm threads. Pulsar Broker   GC Time meter_pulsar_broker_jvm_gc_collection_seconds_sum Time spent in a given JVM garbage collector in seconds. Pulsar Broker   GC Count meter_pulsar_broker_jvm_gc_collection_seconds_count The count of a given JVM garbage collector. Pulsar Broker    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in otel-rules/pulsar/pulsar-cluster.yaml, otel-rules/pulsar/pulsar-broker.yaml. The Pulsar dashboard panel configurations are found in ui-initialized-templates/pulsar.\n","excerpt":"Pulsar monitoring SkyWalking leverages OpenTelemetry Collector to collect metrics data in Prometheus …","ref":"/docs/main/next/en/setup/backend/backend-pulsar-monitoring/","title":"Pulsar monitoring"},{"body":"Pulsar monitoring SkyWalking leverages OpenTelemetry Collector to collect metrics data in Prometheus format from the Pulsar and transfer the metrics to OpenTelemetry receiver and into the Meter System. Kafka entity as a Service in OAP and on the `Layer: PULSAR.\nData flow  Pulsar exposes metrics through Prometheus endpoint. OpenTelemetry Collector fetches metrics from Pulsar cluster via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.`  Setup  Set up Pulsar Cluster. (Pulsar cluster includes pulsar broker cluster and Bookkeeper bookie cluster.) Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  Pulsar Monitoring Pulsar monitoring provides multidimensional metrics monitoring of Pulsar cluster as Layer: PULSAR Service in the OAP. In each cluster, the nodes are represented as Instance.\nPulsar Cluster Supported Metrics    Monitoring Panel Metric Name Description Data Source     Total Topics meter_pulsar_total_topics The number of Pulsar topics in this cluster. Pulsar Cluster   Total Subscriptions meter_pulsar_total_subscriptions The number of Pulsar subscriptions in this cluster. Pulsar Cluster   Total Producers meter_pulsar_total_producers The number of active producers connected to this cluster. Pulsar Cluster   Total Consumers meter_pulsar_total_consumers The number of active consumers connected to this cluster. Pulsar Cluster   Message Rate In meter_pulsar_message_rate_in The total message rate coming into this cluster (message per second). Pulsar Cluster   Message Rate Out meter_pulsar_message_rate_out The total message rate going out from this cluster (message per second). Pulsar Cluster   Throughput In meter_pulsar_throughput_in The total throughput coming into this cluster (byte per second). Pulsar Cluster   Throughput Out meter_pulsar_throughput_out The total throughput going out from this cluster (byte per second). Pulsar Cluster   Storage Size meter_pulsar_storage_size The total storage size of all topics in this broker (in bytes). Pulsar Cluster   Storage Logical Size meter_pulsar_storage_logical_size The storage size of all topics in this broker without replicas (in bytes). Pulsar Cluster   Storage Write Rate meter_pulsar_storage_write_rate The total message batches (entries) written to the storage for this broker (message batch per second). Pulsar Cluster   Storage Read Rate meter_pulsar_storage_read_rate The total message batches (entries) read from the storage for this broker (message batch per second). Pulsar Cluster    Pulsar Node Supported Metrics    Monitoring Panel Metric Name Description Data Source     Active Connections meter_pulsar_broker_active_connections The number of active connections. Pulsar Broker   Total Connections meter_pulsar_broker_total_connections The total number of connections. Pulsar Broker   Connection Create Success Count meter_pulsar_broker_connection_create_success_count The number of successfully created connections. Pulsar Broker   Connection Create Fail Count meter_pulsar_broker_connection_create_fail_count The number of failed connections. Pulsar Broker   Connection Closed Total Count meter_pulsar_broker_connection_closed_total_count The total number of closed connections. Pulsar Broker   JVM Buffer Pool Used meter_pulsar_broker_jvm_buffer_pool_used_bytes The usage of jvm buffer pool. Pulsar Broker   JVM Memory Pool Used meter_pulsar_broker_jvm_memory_pool_used The usage of jvm memory pool. Pulsar Broker   JVM Memory meter_pulsar_broker_jvm_memory_init meter_pulsar_broker_jvm_memory_used meter_pulsar_broker_jvm_memory_committed The usage of jvm memory. Pulsar Broker   JVM Threads meter_pulsar_broker_jvm_threads_current meter_pulsar_broker_jvm_threads_daemon meter_pulsar_broker_jvm_threads_peak meter_pulsar_broker_jvm_threads_deadlocked The usage of jvm threads. Pulsar Broker   GC Time meter_pulsar_broker_jvm_gc_collection_seconds_sum Time spent in a given JVM garbage collector in seconds. Pulsar Broker   GC Count meter_pulsar_broker_jvm_gc_collection_seconds_count The count of a given JVM garbage collector. Pulsar Broker    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in otel-rules/pulsar/pulsar-cluster.yaml, otel-rules/pulsar/pulsar-broker.yaml. The RabbitMQ dashboard panel configurations are found in ui-initialized-templates/pulsar.\n","excerpt":"Pulsar monitoring SkyWalking leverages OpenTelemetry Collector to collect metrics data in Prometheus …","ref":"/docs/main/v9.7.0/en/setup/backend/backend-pulsar-monitoring/","title":"Pulsar monitoring"},{"body":"Python Agent Asynchronous Enhancement Since 1.1.0, the Python agent supports asynchronous reporting of ALL telemetry data, including traces, metrics, logs and profile. This feature is disabled by default, since it is still in the experimental stage. You can enable it by setting the SW_AGENT_ASYNCIO_ENHANCEMENT environment variable to true. See the configuration document for more information.\nexport SW_AGENT_ASYNCIO_ENHANCEMENT=true Why we need this feature Before version 1.1.0, SkyWalking Python agent had only an implementation with the Threading module to provide data reporters. Yet with the growth of the Python agent, it is now fully capable and requires more resources than when only tracing was supported (we start many threads and gRPC itself creates even more threads when streaming).\nAs well known, the Global Interpreter Lock (GIL) in Python can limit the true parallel execution of threads. This issue also effects the Python agent, especially on network communication with the SkyWalking OAP (gRPC, HTTP and Kafka).\nTherefore, we have decided to implement the reporter code for the SkyWalking Python agent based on the asyncio library. asyncio is an officially supported asynchronous programming library in Python that operates on a single-threaded, coroutine-driven model. Currently, it enjoys widespread adoption and boasts a rich ecosystem, making it the preferred choice for enhancing asynchronous capabilities in many Python projects.\nHow it works To keep the API unchanged, we have completely rewritten a new class called SkyWalkingAgentAsync (identical to the SkyWalkingAgent class). We use the environment variable mentioned above, SW_AGENT_ASYNCIO_ENHANCEMENT, to control which class implements the agent\u0026rsquo;s interface.\nIn the SkyWalkingAgentAsync class, we have employed asyncio coroutines and their related functions to replace the Python threading implementation in nearly all instances. And we have applied asyncio enhancements to all three primary reporting protocols of the current SkyWalking Python agent:\n  gRPC: We use the grpc.aio module to replace the grpc module. Since the grpc.aio module is also officially supported and included in the grpc package, we can use it directly without any additional installation.\n  HTTP: We use the aiohttp module to replace the requests module.\n  Kafka: We use the aiokafka module to replace the kafka-python module.\n  Performance improvement We use wrk to pressure test the network throughput of the Python agents in a FastAPI application.\n gRPC  The performance has been improved by about 32.8%\n   gRPC QPS TPS Avg Latency     sync (original) 899.26 146.66KB 545.97ms   async (new) 1194.55 194.81KB 410.97ms     HTTP  The performance has been improved by about 9.8%\n   HTTP QPS TPS Avg Latency     sync (original) 530.95 86.59KB 1.53s   async (new) 583.37 95.14KB 1.44s     Kafka  The performance has been improved by about 89.6%\n   Kafka QPS TPS Avg Latency     sync (original) 345.89 56.41KB 1.09s   async (new) 655.67 106.93KB 1.24s     In fact, only the performance improvement of gRPC is of more reference value. Because the other two protocols use third-party libraries with completely different implementations, the performance improvement depends to a certain extent on the performance of these third-party libraries.\n More details see this PR .\nPotential problems We have shown that the asynchronous enhancement function improves the transmission efficiency of metrics, traces and logs. But it improves the proformance of profile data very little, and even causes performance degradation.\nThis is mainly because a large part of the data in the profile part comes from the monitoring and measurement of Python threads, which is exactly what we need to avoid in asynchronous enhancement. Since operations on threads cannot be bypassed, we may need additional overhead to support cross-thread coroutine communication, which may lead to performance degradation instead of increase.\nAsynchronous enhancements involve many code changes and introduced some new dependencies. Since this feature is relatively new, it may cause some unexpected errors and problems. If you encounter them, please feel free to contact us or submit issues and PRs!\n","excerpt":"Python Agent Asynchronous Enhancement Since 1.1.0, the Python agent supports asynchronous reporting …","ref":"/docs/skywalking-python/next/en/setup/advanced/asyncenhancement/","title":"Python Agent Asynchronous Enhancement"},{"body":"Python Agent Log Reporter This functionality reports logs collected from the Python logging module (in theory, also logging libraries depending on the core logging module) and loguru module.\nFrom Python agent 1.0.0, the log reporter is automatically enabled and can be disabled through agent_log_reporter_active=False or SW_AGENT_LOG_REPORTER_ACTIVE=False.\nLog reporter supports all three protocols including grpc, http and kafka, which shares the same config agent_protocol with trace reporter.\nIf chosen http protocol, the logs will be batch-reported to the collector REST endpoint oap/v3/logs.\nIf chosen kafka protocol, please make sure to config kafka-fetcher on the OAP side, and make sure Python agent config kafka_bootstrap_servers points to your Kafka brokers.\nPlease make sure OAP is consuming the same Kafka topic as your agent produces to, kafka_namespace must match OAP side configuration plugin.kafka.namespace\nagent_log_reporter_active=True - Enables the log reporter.\nagent_log_reporter_max_buffer_size - The maximum queue backlog size for sending log data to backend, logs beyond this are silently dropped.\nAlternatively, you can pass configurations through environment variables. Please refer to the Configuration Vocabulary for the list of environment variables associated with the log reporter.\nSpecify a logging level  [Important] Agent will only report logs that passes the default level threshold logging.getLogger().setLevel(logging.WARNING) For example, if your logger level is logging.INFO, agent will not report info logs even if you set agent_log_reporter_level to INFO\n Additional to the code level configuration, only the logs with a level equal to or higher than the specified configuration will be collected and reported.\nIn other words, the agent skips reporting some unwanted logs based on your level threshold even though they are still logged.\nlog_reporter_level - The string name of a logger level.\nNote that it also works with your custom logger levels, simply specify its string name in the config.\nIgnore log filters The following config is disabled by default. When enabled, the log reporter will collect logs disregarding your custom log filters.\nFor example, if you attach the filter below to the logger - the default behavior of log reporting aligns with the filter (not reporting any logs with a message starting with SW test)\nclass AppFilter(logging.Filter): def filter(self, record): return not record.getMessage().startswith(\u0026#39;SW test\u0026#39;) logger.addFilter(AppFilter()) However, if you do would like to report those filtered logs, set the log_reporter_ignore_filter to True.\nFormatting Note that regardless of the formatting, Python agent will always report the following three tags -\nlevel - the logger level name\nlogger - the logger name\nthread - the thread name\nLimit stacktrace depth You can set the cause_exception_depth config entry to a desired level(defaults to 10), which limits the output depth of exception stacktrace in reporting.\nThis config limits agent to report up to limit stacktrace, please refer to Python traceback for more explanations.\nCustomize the reported log format You can choose to report collected logs in a custom layout.\nIf not set, the agent uses the layout below by default, else the agent uses your custom layout set in log_reporter_layout.\n'%(asctime)s [%(threadName)s] %(levelname)s %(name)s - %(message)s'\nIf the layout is set to None, the reported log content will only contain the pre-formatted LogRecord.message(msg % args) without any additional styles or extra fields, stacktrace will be attached if an exception was raised.\nTransmit un-formatted logs You can also choose to report the log messages without any formatting. It separates the raw log msg logRecord.msg and logRecord.args, then puts them into message content and tags starting from argument.0, respectively, along with an exception tag if an exception was raised.\nNote when you set log_reporter_formatted to False, it ignores your custom layout introduced above.\nAs an example, the following code:\nlogger.info(\u0026#34;SW test log %s%s%s\u0026#34;, \u0026#39;arg0\u0026#39;, \u0026#39;arg1\u0026#39;, \u0026#39;arg2\u0026#39;) Will result in:\n{ \u0026#34;content\u0026#34;: \u0026#34;SW test log %s %s %s\u0026#34;, \u0026#34;tags\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;argument.0\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;arg0\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.1\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;arg1\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.2\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;arg2\u0026#34; } ] } ","excerpt":"Python Agent Log Reporter This functionality reports logs collected from the Python logging module …","ref":"/docs/skywalking-python/latest/en/setup/advanced/logreporter/","title":"Python Agent Log Reporter"},{"body":"Python Agent Log Reporter This functionality reports logs collected from the Python logging module (in theory, also logging libraries depending on the core logging module) and loguru module.\nFrom Python agent 1.0.0, the log reporter is automatically enabled and can be disabled through agent_log_reporter_active=False or SW_AGENT_LOG_REPORTER_ACTIVE=False.\nLog reporter supports all three protocols including grpc, http and kafka, which shares the same config agent_protocol with trace reporter.\nIf chosen http protocol, the logs will be batch-reported to the collector REST endpoint oap/v3/logs.\nIf chosen kafka protocol, please make sure to config kafka-fetcher on the OAP side, and make sure Python agent config kafka_bootstrap_servers points to your Kafka brokers.\nPlease make sure OAP is consuming the same Kafka topic as your agent produces to, kafka_namespace must match OAP side configuration plugin.kafka.namespace\nagent_log_reporter_active=True - Enables the log reporter.\nagent_log_reporter_max_buffer_size - The maximum queue backlog size for sending log data to backend, logs beyond this are silently dropped.\nAlternatively, you can pass configurations through environment variables. Please refer to the Configuration Vocabulary for the list of environment variables associated with the log reporter.\nSpecify a logging level  [Important] Agent will only report logs that passes the default level threshold logging.getLogger().setLevel(logging.WARNING) For example, if your logger level is logging.INFO, agent will not report info logs even if you set agent_log_reporter_level to INFO\n Additional to the code level configuration, only the logs with a level equal to or higher than the specified configuration will be collected and reported.\nIn other words, the agent skips reporting some unwanted logs based on your level threshold even though they are still logged.\nlog_reporter_level - The string name of a logger level.\nNote that it also works with your custom logger levels, simply specify its string name in the config.\nIgnore log filters The following config is disabled by default. When enabled, the log reporter will collect logs disregarding your custom log filters.\nFor example, if you attach the filter below to the logger - the default behavior of log reporting aligns with the filter (not reporting any logs with a message starting with SW test)\nclass AppFilter(logging.Filter): def filter(self, record): return not record.getMessage().startswith(\u0026#39;SW test\u0026#39;) logger.addFilter(AppFilter()) However, if you do would like to report those filtered logs, set the log_reporter_ignore_filter to True.\nFormatting Note that regardless of the formatting, Python agent will always report the following three tags -\nlevel - the logger level name\nlogger - the logger name\nthread - the thread name\nLimit stacktrace depth You can set the cause_exception_depth config entry to a desired level(defaults to 10), which limits the output depth of exception stacktrace in reporting.\nThis config limits agent to report up to limit stacktrace, please refer to Python traceback for more explanations.\nCustomize the reported log format You can choose to report collected logs in a custom layout.\nIf not set, the agent uses the layout below by default, else the agent uses your custom layout set in log_reporter_layout.\n'%(asctime)s [%(threadName)s] %(levelname)s %(name)s - %(message)s'\nIf the layout is set to None, the reported log content will only contain the pre-formatted LogRecord.message(msg % args) without any additional styles or extra fields, stacktrace will be attached if an exception was raised.\nTransmit un-formatted logs You can also choose to report the log messages without any formatting. It separates the raw log msg logRecord.msg and logRecord.args, then puts them into message content and tags starting from argument.0, respectively, along with an exception tag if an exception was raised.\nNote when you set log_reporter_formatted to False, it ignores your custom layout introduced above.\nAs an example, the following code:\nlogger.info(\u0026#34;SW test log %s%s%s\u0026#34;, \u0026#39;arg0\u0026#39;, \u0026#39;arg1\u0026#39;, \u0026#39;arg2\u0026#39;) Will result in:\n{ \u0026#34;content\u0026#34;: \u0026#34;SW test log %s %s %s\u0026#34;, \u0026#34;tags\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;argument.0\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;arg0\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.1\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;arg1\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.2\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;arg2\u0026#34; } ] } Print trace ID in your logs To print out the trace IDs in the logs, simply add %(tid)s to the agent_log_reporter_layout.\nYou can take advantage of this feature to print out the trace IDs on any channel you desire, not limited to reporting logs to OAP, this can be achieved by using any formatter you prefer in your own application logic.\n","excerpt":"Python Agent Log Reporter This functionality reports logs collected from the Python logging module …","ref":"/docs/skywalking-python/next/en/setup/advanced/logreporter/","title":"Python Agent Log Reporter"},{"body":"Python Agent Log Reporter This functionality reports logs collected from the Python logging module (in theory, also logging libraries depending on the core logging module) and loguru module.\nFrom Python agent 1.0.0, the log reporter is automatically enabled and can be disabled through agent_log_reporter_active=False or SW_AGENT_LOG_REPORTER_ACTIVE=False.\nLog reporter supports all three protocols including grpc, http and kafka, which shares the same config agent_protocol with trace reporter.\nIf chosen http protocol, the logs will be batch-reported to the collector REST endpoint oap/v3/logs.\nIf chosen kafka protocol, please make sure to config kafka-fetcher on the OAP side, and make sure Python agent config kafka_bootstrap_servers points to your Kafka brokers.\nPlease make sure OAP is consuming the same Kafka topic as your agent produces to, kafka_namespace must match OAP side configuration plugin.kafka.namespace\nagent_log_reporter_active=True - Enables the log reporter.\nagent_log_reporter_max_buffer_size - The maximum queue backlog size for sending log data to backend, logs beyond this are silently dropped.\nAlternatively, you can pass configurations through environment variables. Please refer to the Configuration Vocabulary for the list of environment variables associated with the log reporter.\nSpecify a logging level  [Important] Agent will only report logs that passes the default level threshold logging.getLogger().setLevel(logging.WARNING) For example, if your logger level is logging.INFO, agent will not report info logs even if you set agent_log_reporter_level to INFO\n Additional to the code level configuration, only the logs with a level equal to or higher than the specified configuration will be collected and reported.\nIn other words, the agent skips reporting some unwanted logs based on your level threshold even though they are still logged.\nlog_reporter_level - The string name of a logger level.\nNote that it also works with your custom logger levels, simply specify its string name in the config.\nIgnore log filters The following config is disabled by default. When enabled, the log reporter will collect logs disregarding your custom log filters.\nFor example, if you attach the filter below to the logger - the default behavior of log reporting aligns with the filter (not reporting any logs with a message starting with SW test)\nclass AppFilter(logging.Filter): def filter(self, record): return not record.getMessage().startswith(\u0026#39;SW test\u0026#39;) logger.addFilter(AppFilter()) However, if you do would like to report those filtered logs, set the log_reporter_ignore_filter to True.\nFormatting Note that regardless of the formatting, Python agent will always report the following three tags -\nlevel - the logger level name\nlogger - the logger name\nthread - the thread name\nLimit stacktrace depth You can set the cause_exception_depth config entry to a desired level(defaults to 10), which limits the output depth of exception stacktrace in reporting.\nThis config limits agent to report up to limit stacktrace, please refer to Python traceback for more explanations.\nCustomize the reported log format You can choose to report collected logs in a custom layout.\nIf not set, the agent uses the layout below by default, else the agent uses your custom layout set in log_reporter_layout.\n'%(asctime)s [%(threadName)s] %(levelname)s %(name)s - %(message)s'\nIf the layout is set to None, the reported log content will only contain the pre-formatted LogRecord.message(msg % args) without any additional styles or extra fields, stacktrace will be attached if an exception was raised.\nTransmit un-formatted logs You can also choose to report the log messages without any formatting. It separates the raw log msg logRecord.msg and logRecord.args, then puts them into message content and tags starting from argument.0, respectively, along with an exception tag if an exception was raised.\nNote when you set log_reporter_formatted to False, it ignores your custom layout introduced above.\nAs an example, the following code:\nlogger.info(\u0026#34;SW test log %s%s%s\u0026#34;, \u0026#39;arg0\u0026#39;, \u0026#39;arg1\u0026#39;, \u0026#39;arg2\u0026#39;) Will result in:\n{ \u0026#34;content\u0026#34;: \u0026#34;SW test log %s %s %s\u0026#34;, \u0026#34;tags\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;argument.0\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;arg0\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.1\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;arg1\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.2\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;arg2\u0026#34; } ] } ","excerpt":"Python Agent Log Reporter This functionality reports logs collected from the Python logging module …","ref":"/docs/skywalking-python/v1.0.1/en/setup/advanced/logreporter/","title":"Python Agent Log Reporter"},{"body":"Python Agent Meter Reporter Important Note: Meter reporter is currently available to send in gRPC and Kafka protocol, HTTP protocol is not implemented yet (requires additional handler on SkyWalking OAP side).\nEnabling the feature (default is enabled) PVM Reporter is also by default enabled, meaning useful Python metrics such as thread count/GC info will be shown in OAP General Services - Instance - PVM Tab) If you really don\u0026rsquo;t need such a feature, disable them through config.agent_pvm_meter_reporter_active or SW_AGENT_PVM_METER_REPORTER_ACTIVE\nconfig.agent_meter_reporter_active = True # Or os.environ[\u0026#39;SW_AGENT_METER_REPORTER_ACTIVE\u0026#39;] = \u0026#39;True\u0026#39; or\nexport SW_AGENT_METER_REPORTER_ACTIVE=True Disable the feature os.environ[\u0026#39;SW_AGENT_METER_REPORTER_ACTIVE\u0026#39;] = \u0026#39;False\u0026#39; or\nexport SW_AGENT_METER_REPORTER_ACTIVE=False Counter  Counter API represents a single monotonically increasing counter, automatic collect data and report to backend.  builder = Counter.Builder(\u0026#39;c2\u0026#39;, CounterMode.INCREMENT, ((\u0026#34;k1\u0026#34;, \u0026#34;v1\u0026#34;), (\u0026#34;k2\u0026#34;, \u0026#34;v2\u0026#34;))) # or this way # builder = Counter.Builder(\u0026#39;c2\u0026#39;, CounterMode.INCREMENT).tag(\u0026#39;key1\u0026#39;, \u0026#39;value1\u0026#39;).tag(\u0026#39;key2\u0026#39;, \u0026#39;value2\u0026#39;) c = builder.build() c.increment(2) Syntactic sugars builder = Counter.Builder(\u0026#39;c2\u0026#39;, CounterMode.INCREMENT) c = builder.build() # increase Counter c by the time the with-wrapped codes consumed with c.create_timer(): # some codes may consume a certain time builder = Counter.Builder(\u0026#39;c3\u0026#39;, CounterMode.INCREMENT) c = builder.build() # increase Counter c by num once counter_decorator_test gets called @Counter.increase(name=\u0026#39;c3\u0026#39;, num=2) def counter_decorator_test(): # some codes builder = Counter.Builder(\u0026#39;c4\u0026#39;, CounterMode.INCREMENT) c = builder.build() # increase Counter c by the time counter_decorator_test consumed @Counter.timer(name=\u0026#39;c4\u0026#39;) def counter_decorator_test(s): # some codes may consume a certain time  Counter.Builder(name, tags) Create a new counter builder with the meter name and optional tags. Counter.tag(key: str, value) Mark a tag key/value pair. Counter.mode(mode: CounterMode) Change the counter mode, RATE mode means reporting rate to the backend. Counter.increment(count) Increment count to the Counter, It could be a positive value.  Gauge  Gauge API represents a single numerical value.  # producer: iterable object builder = Gauge.Builder(\u0026#39;g1\u0026#39;, producer, ((\u0026#34;key\u0026#34;, \u0026#34;value\u0026#34;))) g = Builder.build()  Gauge.Builder(name, tags) Create a new gauge builder with the meter name and iterable object, this iterable object need to produce numeric value. Gauge.tag(key: str, value) Mark a tag key/value pair. Gauge.build() Build a new Gauge which is collected and reported to the backend.  Histogram  Histogram API represents a summary sample observations with customize buckets.  builder = Histogram.Builder(\u0026#39;h2\u0026#39;, [i / 10 for i in range(10)], (\u0026#34;key\u0026#34;, \u0026#34;value\u0026#34;)) h = builder.build() Syntactic sugars builder = Histogram.Builder(\u0026#39;h3\u0026#39;, [i / 10 for i in range(10)]) h = builder.build() # Histogram h will record the time the with-wprapped codes consumed with h.create_timer(): # some codes may consume a certain time builder = Histogram.Builder(\u0026#39;h2\u0026#39;, [i / 10 for i in range(10)]) h = builder.build() # Histogram h will record the time histogram_decorator_test consumed @Histogram.timer(name=\u0026#39;h2\u0026#39;) def histogram_decorator_test(s): time.sleep(s)  Histogram.Builder(name, tags) Create a new histogram builder with the meter name and optional tags. Histogram.tag(key: str, value) Mark a tag key/value pair. Histogram.minValue(value) Set up the minimal value of this histogram, default is 0. Histogram.build() Build a new Histogram which is collected and reported to the backend. Histogram.addValue(value) Add value into the histogram, automatically analyze what bucket count needs to be increment. rule: count into [step1, step2).  ","excerpt":"Python Agent Meter Reporter Important Note: Meter reporter is currently available to send in gRPC …","ref":"/docs/skywalking-python/latest/en/setup/advanced/meterreporter/","title":"Python Agent Meter Reporter"},{"body":"Python Agent Meter Reporter Important Note: Meter reporter is currently available to send in gRPC and Kafka protocol, HTTP protocol is not implemented yet (requires additional handler on SkyWalking OAP side).\nEnabling the feature (default is enabled) PVM Reporter is also by default enabled, meaning useful Python metrics such as thread count/GC info will be shown in OAP General Services - Instance - PVM Tab) If you really don\u0026rsquo;t need such a feature, disable them through config.agent_pvm_meter_reporter_active or SW_AGENT_PVM_METER_REPORTER_ACTIVE\nconfig.agent_meter_reporter_active = True # Or os.environ[\u0026#39;SW_AGENT_METER_REPORTER_ACTIVE\u0026#39;] = \u0026#39;True\u0026#39; or\nexport SW_AGENT_METER_REPORTER_ACTIVE=True Disable the feature os.environ[\u0026#39;SW_AGENT_METER_REPORTER_ACTIVE\u0026#39;] = \u0026#39;False\u0026#39; or\nexport SW_AGENT_METER_REPORTER_ACTIVE=False Counter  Counter API represents a single monotonically increasing counter, automatic collect data and report to backend.  builder = Counter.Builder(\u0026#39;c2\u0026#39;, CounterMode.INCREMENT, ((\u0026#34;k1\u0026#34;, \u0026#34;v1\u0026#34;), (\u0026#34;k2\u0026#34;, \u0026#34;v2\u0026#34;))) # or this way # builder = Counter.Builder(\u0026#39;c2\u0026#39;, CounterMode.INCREMENT).tag(\u0026#39;key1\u0026#39;, \u0026#39;value1\u0026#39;).tag(\u0026#39;key2\u0026#39;, \u0026#39;value2\u0026#39;) c = builder.build() c.increment(2) Syntactic sugars builder = Counter.Builder(\u0026#39;c2\u0026#39;, CounterMode.INCREMENT) c = builder.build() # increase Counter c by the time the with-wrapped codes consumed with c.create_timer(): # some codes may consume a certain time builder = Counter.Builder(\u0026#39;c3\u0026#39;, CounterMode.INCREMENT) c = builder.build() # increase Counter c by num once counter_decorator_test gets called @Counter.increase(name=\u0026#39;c3\u0026#39;, num=2) def counter_decorator_test(): # some codes builder = Counter.Builder(\u0026#39;c4\u0026#39;, CounterMode.INCREMENT) c = builder.build() # increase Counter c by the time counter_decorator_test consumed @Counter.timer(name=\u0026#39;c4\u0026#39;) def counter_decorator_test(s): # some codes may consume a certain time  Counter.Builder(name, tags) Create a new counter builder with the meter name and optional tags. Counter.tag(key: str, value) Mark a tag key/value pair. Counter.mode(mode: CounterMode) Change the counter mode, RATE mode means reporting rate to the backend. Counter.increment(count) Increment count to the Counter, It could be a positive value.  Gauge  Gauge API represents a single numerical value.  # producer: iterable object builder = Gauge.Builder(\u0026#39;g1\u0026#39;, producer, ((\u0026#34;key\u0026#34;, \u0026#34;value\u0026#34;))) g = Builder.build()  Gauge.Builder(name, tags) Create a new gauge builder with the meter name and iterable object, this iterable object need to produce numeric value. Gauge.tag(key: str, value) Mark a tag key/value pair. Gauge.build() Build a new Gauge which is collected and reported to the backend.  Histogram  Histogram API represents a summary sample observations with customize buckets.  builder = Histogram.Builder(\u0026#39;h2\u0026#39;, [i / 10 for i in range(10)], (\u0026#34;key\u0026#34;, \u0026#34;value\u0026#34;)) h = builder.build() Syntactic sugars builder = Histogram.Builder(\u0026#39;h3\u0026#39;, [i / 10 for i in range(10)]) h = builder.build() # Histogram h will record the time the with-wprapped codes consumed with h.create_timer(): # some codes may consume a certain time builder = Histogram.Builder(\u0026#39;h2\u0026#39;, [i / 10 for i in range(10)]) h = builder.build() # Histogram h will record the time histogram_decorator_test consumed @Histogram.timer(name=\u0026#39;h2\u0026#39;) def histogram_decorator_test(s): time.sleep(s)  Histogram.Builder(name, tags) Create a new histogram builder with the meter name and optional tags. Histogram.tag(key: str, value) Mark a tag key/value pair. Histogram.minValue(value) Set up the minimal value of this histogram, default is 0. Histogram.build() Build a new Histogram which is collected and reported to the backend. Histogram.addValue(value) Add value into the histogram, automatically analyze what bucket count needs to be increment. rule: count into [step1, step2).  ","excerpt":"Python Agent Meter Reporter Important Note: Meter reporter is currently available to send in gRPC …","ref":"/docs/skywalking-python/next/en/setup/advanced/meterreporter/","title":"Python Agent Meter Reporter"},{"body":"Python Agent Meter Reporter Important Note: Meter reporter is currently available to send in gRPC and Kafka protocol, HTTP protocol is not implemented yet (requires additional handler on SkyWalking OAP side).\nEnabling the feature (default is enabled) PVM Reporter is also by default enabled, meaning useful Python metrics such as thread count/GC info will be shown in OAP General Services - Instance - PVM Tab) If you really don\u0026rsquo;t need such a feature, disable them through config.agent_pvm_meter_reporter_active or SW_AGENT_PVM_METER_REPORTER_ACTIVE\nconfig.agent_meter_reporter_active = True # Or os.environ[\u0026#39;SW_AGENT_METER_REPORTER_ACTIVE\u0026#39;] = \u0026#39;True\u0026#39; or\nexport SW_AGENT_METER_REPORTER_ACTIVE=True Disable the feature os.environ[\u0026#39;SW_AGENT_METER_REPORTER_ACTIVE\u0026#39;] = \u0026#39;False\u0026#39; or\nexport SW_AGENT_METER_REPORTER_ACTIVE=False Counter  Counter API represents a single monotonically increasing counter, automatic collect data and report to backend.  builder = Counter.Builder(\u0026#39;c2\u0026#39;, CounterMode.INCREMENT, ((\u0026#34;k1\u0026#34;, \u0026#34;v1\u0026#34;), (\u0026#34;k2\u0026#34;, \u0026#34;v2\u0026#34;))) # or this way # builder = Counter.Builder(\u0026#39;c2\u0026#39;, CounterMode.INCREMENT).tag(\u0026#39;key1\u0026#39;, \u0026#39;value1\u0026#39;).tag(\u0026#39;key2\u0026#39;, \u0026#39;value2\u0026#39;) c = builder.build() c.increment(2) Syntactic sugars builder = Counter.Builder(\u0026#39;c2\u0026#39;, CounterMode.INCREMENT) c = builder.build() # increase Counter c by the time the with-wrapped codes consumed with c.create_timer(): # some codes may consume a certain time builder = Counter.Builder(\u0026#39;c3\u0026#39;, CounterMode.INCREMENT) c = builder.build() # increase Counter c by num once counter_decorator_test gets called @Counter.increase(name=\u0026#39;c3\u0026#39;, num=2) def counter_decorator_test(): # some codes builder = Counter.Builder(\u0026#39;c4\u0026#39;, CounterMode.INCREMENT) c = builder.build() # increase Counter c by the time counter_decorator_test consumed @Counter.timer(name=\u0026#39;c4\u0026#39;) def counter_decorator_test(s): # some codes may consume a certain time  Counter.Builder(name, tags) Create a new counter builder with the meter name and optional tags. Counter.tag(key: str, value) Mark a tag key/value pair. Counter.mode(mode: CounterMode) Change the counter mode, RATE mode means reporting rate to the backend. Counter.increment(count) Increment count to the Counter, It could be a positive value.  Gauge  Gauge API represents a single numerical value.  # producer: iterable object builder = Gauge.Builder(\u0026#39;g1\u0026#39;, producer, ((\u0026#34;key\u0026#34;, \u0026#34;value\u0026#34;))) g = Builder.build()  Gauge.Builder(name, tags) Create a new gauge builder with the meter name and iterable object, this iterable object need to produce numeric value. Gauge.tag(key: str, value) Mark a tag key/value pair. Gauge.build() Build a new Gauge which is collected and reported to the backend.  Histogram  Histogram API represents a summary sample observations with customize buckets.  builder = Histogram.Builder(\u0026#39;h2\u0026#39;, [i / 10 for i in range(10)], (\u0026#34;key\u0026#34;, \u0026#34;value\u0026#34;)) h = builder.build() Syntactic sugars builder = Histogram.Builder(\u0026#39;h3\u0026#39;, [i / 10 for i in range(10)]) h = builder.build() # Histogram h will record the time the with-wprapped codes consumed with h.create_timer(): # some codes may consume a certain time builder = Histogram.Builder(\u0026#39;h2\u0026#39;, [i / 10 for i in range(10)]) h = builder.build() # Histogram h will record the time histogram_decorator_test consumed @Histogram.timer(name=\u0026#39;h2\u0026#39;) def histogram_decorator_test(s): time.sleep(s)  Histogram.Builder(name, tags) Create a new histogram builder with the meter name and optional tags. Histogram.tag(key: str, value) Mark a tag key/value pair. Histogram.minValue(value) Set up the minimal value of this histogram, default is 0. Histogram.build() Build a new Histogram which is collected and reported to the backend. Histogram.addValue(value) Add value into the histogram, automatically analyze what bucket count needs to be increment. rule: count into [step1, step2).  ","excerpt":"Python Agent Meter Reporter Important Note: Meter reporter is currently available to send in gRPC …","ref":"/docs/skywalking-python/v1.0.1/en/setup/advanced/meterreporter/","title":"Python Agent Meter Reporter"},{"body":"Query Measures Query operation queries the data in a measure.\nbydbctl is the command line tool in examples.\nThe input contains two parts:\n Request: a YAML-based text which is defined by the API Time Range: YAML and CLI\u0026rsquo;s flags both support it.  Time Range The query specification contains time_range field. The request should set absolute times to it. bydbctl also provides start and end flags to support passing absolute and relative times.\n\u0026ldquo;start\u0026rdquo; and \u0026ldquo;end\u0026rdquo; specify a time range during which the query is performed, they can be an absolute time like \u0026ldquo;2006-01-02T15:04:05Z07:00\u0026rdquo;, or relative time (to the current time) like \u0026ldquo;-30m\u0026rdquo;, or \u0026ldquo;30m\u0026rdquo;. They are both optional and their default values follow the rules below:\n when \u0026ldquo;start\u0026rdquo; and \u0026ldquo;end\u0026rdquo; are both absent, \u0026ldquo;start = now - 30 minutes\u0026rdquo; and \u0026ldquo;end = now\u0026rdquo;, namely past 30 minutes; when \u0026ldquo;start\u0026rdquo; is absent and \u0026ldquo;end\u0026rdquo; is present, this command calculates \u0026ldquo;start\u0026rdquo; (minus 30 units), e.g. \u0026ldquo;end = 2022-11-09T12:34:00Z\u0026rdquo;, so \u0026ldquo;start = end - 30 minutes = 2022-11-09T12:04:00Z\u0026rdquo;; when \u0026ldquo;start\u0026rdquo; is present and \u0026ldquo;end\u0026rdquo; is absent, this command calculates \u0026ldquo;end\u0026rdquo; (plus 30 units), e.g. \u0026ldquo;start = 2022-11-09T12:04:00Z\u0026rdquo;, so \u0026ldquo;end = start + 30 minutes = 2022-11-09T12:34:00Z\u0026rdquo;.  Examples To retrieve a series of data points between 2022-10-15T22:32:48Z and 2022-10-15T23:32:48Z could use the below command. These data points contain tags: id and entity_id that belong to a family default. They also choose fields: total and value.\n$ bydbctl measure query -f - \u0026lt;\u0026lt;EOF metadata: name: \u0026#34;service_cpm_minute\u0026#34; group: \u0026#34;sw_metric\u0026#34; tagProjection: tagFamilies: - name: \u0026#34;default\u0026#34; tags: [\u0026#34;id\u0026#34;, \u0026#34;entity_id\u0026#34;] fieldProjection: names: [\u0026#34;total\u0026#34;, \u0026#34;value\u0026#34;] timeRange: begin: 2022-10-15T22:32:48Z end: 2022-10-15T23:32:48Z EOF The below command could query data in the last 30 minutes using relative time duration :\n$ bydbctl measure query --start -30m -f - \u0026lt;\u0026lt;EOF metadata: name: \u0026#34;service_cpm_minute\u0026#34; group: \u0026#34;sw_metric\u0026#34; tagProjection: tagFamilies: - name: \u0026#34;default\u0026#34; tags: [\u0026#34;id\u0026#34;, \u0026#34;entity_id\u0026#34;] fieldProjection: names: [\u0026#34;total\u0026#34;, \u0026#34;value\u0026#34;] EOF API Reference MeasureService v1\n","excerpt":"Query Measures Query operation queries the data in a measure.\nbydbctl is the command line tool in …","ref":"/docs/skywalking-banyandb/latest/crud/measure/query/","title":"Query Measures"},{"body":"Query Measures Query operation queries the data in a measure.\nbydbctl is the command line tool in examples.\nThe input contains two parts:\n Request: a YAML-based text which is defined by the API Time Range: YAML and CLI\u0026rsquo;s flags both support it.  Time Range The query specification contains time_range field. The request should set absolute times to it. bydbctl also provides start and end flags to support passing absolute and relative times.\n\u0026ldquo;start\u0026rdquo; and \u0026ldquo;end\u0026rdquo; specify a time range during which the query is performed, they can be an absolute time like \u0026ldquo;2006-01-02T15:04:05Z07:00\u0026rdquo;, or relative time (to the current time) like \u0026ldquo;-30m\u0026rdquo;, or \u0026ldquo;30m\u0026rdquo;. They are both optional and their default values follow the rules below:\n when \u0026ldquo;start\u0026rdquo; and \u0026ldquo;end\u0026rdquo; are both absent, \u0026ldquo;start = now - 30 minutes\u0026rdquo; and \u0026ldquo;end = now\u0026rdquo;, namely past 30 minutes; when \u0026ldquo;start\u0026rdquo; is absent and \u0026ldquo;end\u0026rdquo; is present, this command calculates \u0026ldquo;start\u0026rdquo; (minus 30 units), e.g. \u0026ldquo;end = 2022-11-09T12:34:00Z\u0026rdquo;, so \u0026ldquo;start = end - 30 minutes = 2022-11-09T12:04:00Z\u0026rdquo;; when \u0026ldquo;start\u0026rdquo; is present and \u0026ldquo;end\u0026rdquo; is absent, this command calculates \u0026ldquo;end\u0026rdquo; (plus 30 units), e.g. \u0026ldquo;start = 2022-11-09T12:04:00Z\u0026rdquo;, so \u0026ldquo;end = start + 30 minutes = 2022-11-09T12:34:00Z\u0026rdquo;.  Examples To retrieve a series of data points between 2022-10-15T22:32:48Z and 2022-10-15T23:32:48Z could use the below command. These data points contain tags: id and entity_id that belong to a family default. They also choose fields: total and value.\n$ bydbctl measure query -f - \u0026lt;\u0026lt;EOF metadata: name: \u0026#34;service_cpm_minute\u0026#34; group: \u0026#34;sw_metric\u0026#34; tagProjection: tagFamilies: - name: \u0026#34;default\u0026#34; tags: [\u0026#34;id\u0026#34;, \u0026#34;entity_id\u0026#34;] fieldProjection: names: [\u0026#34;total\u0026#34;, \u0026#34;value\u0026#34;] timeRange: begin: 2022-10-15T22:32:48Z end: 2022-10-15T23:32:48Z EOF The below command could query data in the last 30 minutes using relative time duration :\n$ bydbctl measure query --start -30m -f - \u0026lt;\u0026lt;EOF metadata: name: \u0026#34;service_cpm_minute\u0026#34; group: \u0026#34;sw_metric\u0026#34; tagProjection: tagFamilies: - name: \u0026#34;default\u0026#34; tags: [\u0026#34;id\u0026#34;, \u0026#34;entity_id\u0026#34;] fieldProjection: names: [\u0026#34;total\u0026#34;, \u0026#34;value\u0026#34;] EOF API Reference MeasureService v1\n","excerpt":"Query Measures Query operation queries the data in a measure.\nbydbctl is the command line tool in …","ref":"/docs/skywalking-banyandb/next/crud/measure/query/","title":"Query Measures"},{"body":"Query Measures Query operation queries the data in a measure.\nbydbctl is the command line tool in examples.\nThe input contains two parts:\n Request: a YAML-based text which is defined by the API Time Range: YAML and CLI\u0026rsquo;s flags both support it.  Time Range The query specification contains time_range field. The request should set absolute times to it. bydbctl also provides start and end flags to support passing absolute and relative times.\n\u0026ldquo;start\u0026rdquo; and \u0026ldquo;end\u0026rdquo; specify a time range during which the query is performed, they can be an absolute time like \u0026ldquo;2006-01-02T15:04:05Z07:00\u0026rdquo;, or relative time (to the current time) like \u0026ldquo;-30m\u0026rdquo;, or \u0026ldquo;30m\u0026rdquo;. They are both optional and their default values follow the rules below:\n when \u0026ldquo;start\u0026rdquo; and \u0026ldquo;end\u0026rdquo; are both absent, \u0026ldquo;start = now - 30 minutes\u0026rdquo; and \u0026ldquo;end = now\u0026rdquo;, namely past 30 minutes; when \u0026ldquo;start\u0026rdquo; is absent and \u0026ldquo;end\u0026rdquo; is present, this command calculates \u0026ldquo;start\u0026rdquo; (minus 30 units), e.g. \u0026ldquo;end = 2022-11-09T12:34:00Z\u0026rdquo;, so \u0026ldquo;start = end - 30 minutes = 2022-11-09T12:04:00Z\u0026rdquo;; when \u0026ldquo;start\u0026rdquo; is present and \u0026ldquo;end\u0026rdquo; is absent, this command calculates \u0026ldquo;end\u0026rdquo; (plus 30 units), e.g. \u0026ldquo;start = 2022-11-09T12:04:00Z\u0026rdquo;, so \u0026ldquo;end = start + 30 minutes = 2022-11-09T12:34:00Z\u0026rdquo;.  Examples To retrieve a series of data points between 2022-10-15T22:32:48Z and 2022-10-15T23:32:48Z could use the below command. These data points contain tags: id and entity_id that belong to a family default. They also choose fields: total and value.\n$ bydbctl measure query -f - \u0026lt;\u0026lt;EOF metadata: name: \u0026#34;service_cpm_minute\u0026#34; group: \u0026#34;sw_metric\u0026#34; tagProjection: tagFamilies: - name: \u0026#34;default\u0026#34; tags: [\u0026#34;id\u0026#34;, \u0026#34;entity_id\u0026#34;] fieldProjection: names: [\u0026#34;total\u0026#34;, \u0026#34;value\u0026#34;] timeRange: begin: 2022-10-15T22:32:48Z end: 2022-10-15T23:32:48Z EOF The below command could query data in the last 30 minutes using relative time duration :\n$ bydbctl measure query --start -30m -f - \u0026lt;\u0026lt;EOF metadata: name: \u0026#34;service_cpm_minute\u0026#34; group: \u0026#34;sw_metric\u0026#34; tagProjection: tagFamilies: - name: \u0026#34;default\u0026#34; tags: [\u0026#34;id\u0026#34;, \u0026#34;entity_id\u0026#34;] fieldProjection: names: [\u0026#34;total\u0026#34;, \u0026#34;value\u0026#34;] EOF API Reference MeasureService v1\n","excerpt":"Query Measures Query operation queries the data in a measure.\nbydbctl is the command line tool in …","ref":"/docs/skywalking-banyandb/v0.5.0/crud/measure/query/","title":"Query Measures"},{"body":"Query Protocol Query Protocol defines a set of APIs in GraphQL grammar to provide data query and interactive capabilities with SkyWalking native visualization tool or 3rd party system, including Web UI, CLI or private system.\nQuery protocol official repository, https://github.com/apache/skywalking-query-protocol.\nAll deprecated APIs are moved here.\nMetadata Metadata contains concise information on all services and their instances, endpoints, etc. under monitoring. You may query the metadata in different ways.\nextendtypeQuery{# Normal service related meta info getAllServices(duration:Duration!,group:String):[Service!]!searchServices(duration:Duration!,keyword:String!):[Service!]!searchService(serviceCode:String!):Service# Fetch all services of Browser typegetAllBrowserServices(duration:Duration!):[Service!]!searchBrowserServices(duration:Duration!,keyword:String!):[Service!]!searchBrowserService(serviceCode:String!):Service# Service instance querygetServiceInstances(duration:Duration!,serviceId:ID!):[ServiceInstance!]!# Endpoint query# Consider there are huge numbers of endpoint,# must use endpoint owner\u0026#39;s service id, keyword and limit filter to do query.searchEndpoint(keyword:String!,serviceId:ID!,limit:Int!):[Endpoint!]!getEndpointInfo(endpointId:ID!):EndpointInfo# Process query# Read process list.listProcesses(duration:Duration!,instanceId:ID!):[Process!]!# Find process according to given ID. Return null if not existing.getProcess(processId:ID!):Process# Get the number of matched processes through serviceId, labels# Labels: the matched process should contain all labels## The return is not a precise number, the process has its lifecycle, as it reboots and shutdowns with time.# The return number just gives an abstract of the scale of profiling that would be applied.estimateProcessScale(serviceId:ID!,labels:[String!]!):Long!# Database related meta info.getAllDatabases(duration:Duration!):[Database!]!getTimeInfo:TimeInfo}Topology The topology and dependency graphs among services, instances and endpoints. Includes direct relationships or global maps.\nextendtypeQuery{# Query the global topologygetGlobalTopology(duration:Duration!):Topology# Query the topology, based on the given servicegetServiceTopology(serviceId:ID!,duration:Duration!):Topology# Query the topology, based on the given services.# `#getServiceTopology` could be replaced by this.getServicesTopology(serviceIds:[ID!]!,duration:Duration!):Topology# Query the instance topology, based on the given clientServiceId and serverServiceIdgetServiceInstanceTopology(clientServiceId:ID!,serverServiceId:ID!,duration:Duration!):ServiceInstanceTopology# Query the topology, based on the given endpointgetEndpointTopology(endpointId:ID!,duration:Duration!):Topology# v2 of getEndpointTopologygetEndpointDependencies(endpointId:ID!,duration:Duration!):EndpointTopology}Metrics Metrics query targets all objects defined in OAL script and MAL.\nV3 APIs Provide Metrics V3 query APIs since 9.5.0, including metadata and MQE. SkyWalking Metrics Query Expression(MQE) is an extension query mechanism. MQE allows users to do simple query-stage calculation like well known PromQL through GraphQL. The expression\u0026rsquo;s syntax can refer to here.\nextendtypeQuery{# Metrics definition metadata query. Response the metrics type which determines the suitable query methods.typeOfMetrics(name:String!):MetricsType!# Get the list of all available metrics in the current OAP server.# Param, regex, could be used to filter the metrics by name.listMetrics(regex:String):[MetricDefinition!]!execExpression(expression:String!,entity:Entity!,duration:Duration!):ExpressionResult!}typeExpressionResult{type:ExpressionResultType!# When the type == TIME_SERIES_VALUES, the results would be a collection of MQEValues.# In other legal type cases, only one MQEValues is expected in the array.results:[MQEValues!]!# When type == ExpressionResultType.UNKNOWN,# the error message includes the expression resolving errors.error:String}enumExpressionResultType{# Can\u0026#39;t resolve the type of the given expression.UNKNOWN# A single valueSINGLE_VALUE# A collection of time-series values.# The value could have labels or not.TIME_SERIES_VALUES# A collection of aggregated values through metric sort functionSORTED_LIST# A collection of sampled records.# When the original metric type is sampled recordsRECORD_LIST}Logs extendtypeQuery{# Return true if the current storage implementation supports fuzzy query for logs.supportQueryLogsByKeywords:Boolean!queryLogs(condition:LogQueryCondition):Logs# Test the logs and get the results of the LAL output.test(requests:LogTestRequest!):LogTestResponse!}Log implementations vary between different database options. Some search engines like ElasticSearch and OpenSearch can support full log text fuzzy queries, while others do not due to considerations related to performance impact and end user experience.\ntest API serves as the debugging tool for native LAL parsing.\nTrace extendtypeQuery{queryBasicTraces(condition:TraceQueryCondition):TraceBriefqueryTrace(traceId:ID!):Trace}Trace query fetches trace segment lists and spans of given trace IDs.\nAlarm extendtypeQuery{getAlarmTrend(duration:Duration!):AlarmTrend!getAlarm(duration:Duration!,scope:Scope,keyword:String,paging:Pagination!,tags:[AlarmTag]):Alarms}Alarm query identifies alarms and related events.\nEvent extendtypeQuery{queryEvents(condition:EventQueryCondition):Events}Event query fetches the event list based on given sources and time range conditions.\nProfiling SkyWalking offers two types of profiling, in-process and out-process, allowing users to create tasks and check their execution status.\nIn-process profiling extendtypeMutation{# crate new profile taskcreateProfileTask(creationRequest:ProfileTaskCreationRequest):ProfileTaskCreationResult!}extendtypeQuery{# query all task list, order by ProfileTask#startTime descendinggetProfileTaskList(serviceId:ID,endpointName:String):[ProfileTask!]!# query all task logsgetProfileTaskLogs(taskID:String):[ProfileTaskLog!]!# query all task profiled segment listgetProfileTaskSegmentList(taskID:String):[BasicTrace!]!# query profiled segmentgetProfiledSegment(segmentId:String):ProfiledSegment# analyze profiled segment, start and end time use timestamp(millisecond)getProfileAnalyze(segmentId:String!,timeRanges:[ProfileAnalyzeTimeRange!]!):ProfileAnalyzation!}Out-process profiling extendtypeMutation{# create a new eBPF fixed time profiling taskcreateEBPFProfilingFixedTimeTask(request:EBPFProfilingTaskFixedTimeCreationRequest!):EBPFProfilingTaskCreationResult!# create a new eBPF network profiling taskcreateEBPFNetworkProfiling(request:EBPFProfilingNetworkTaskRequest!):EBPFProfilingTaskCreationResult!# keep alive the eBPF profiling taskkeepEBPFNetworkProfiling(taskId:ID!):EBPFNetworkKeepProfilingResult!}extendtypeQuery{# query eBPF profiling data for prepare create taskqueryPrepareCreateEBPFProfilingTaskData(serviceId:ID!):EBPFProfilingTaskPrepare!# query eBPF profiling task listqueryEBPFProfilingTasks(serviceId:ID,serviceInstanceId:ID,targets:[EBPFProfilingTargetType!]):[EBPFProfilingTask!]!# query schedules from profiling taskqueryEBPFProfilingSchedules(taskId:ID!):[EBPFProfilingSchedule!]!# analyze the profiling schedule# aggregateType is \u0026#34;EBPFProfilingAnalyzeAggregateType#COUNT\u0026#34; as default. analysisEBPFProfilingResult(scheduleIdList:[ID!]!,timeRanges:[EBPFProfilingAnalyzeTimeRange!]!,aggregateType:EBPFProfilingAnalyzeAggregateType):EBPFProfilingAnalyzation!}Condition Duration Duration is a widely used parameter type as the APM data is time-related. See the following for more details. Step relates to precision.\n# The Duration defines the start and end time for each query operation.# Fields: `start` and `end`# represents the time span. And each of them matches the step.# ref https://www.ietf.org/rfc/rfc3339.txt# The time formats are# `SECOND` step: yyyy-MM-dd HHmmss# `MINUTE` step: yyyy-MM-dd HHmm# `HOUR` step: yyyy-MM-dd HH# `DAY` step: yyyy-MM-dd# `MONTH` step: yyyy-MM# Field: `step`# represents the accurate time point.# e.g.# if step==HOUR , start=2017-11-08 09, end=2017-11-08 19# then# metrics from the following time points expected# 2017-11-08 9:00 -\u0026gt; 2017-11-08 19:00# there are 11 time points (hours) in the time span.inputDuration{start:String!end:String!step:Step!}enumStep{MONTHDAYHOURMINUTESECOND}","excerpt":"Query Protocol Query Protocol defines a set of APIs in GraphQL grammar to provide data query and …","ref":"/docs/main/latest/en/api/query-protocol/","title":"Query Protocol"},{"body":"Query Protocol Query Protocol defines a set of APIs in GraphQL grammar to provide data query and interactive capabilities with SkyWalking native visualization tool or 3rd party system, including Web UI, CLI or private system.\nQuery protocol official repository, https://github.com/apache/skywalking-query-protocol.\nAll deprecated APIs are moved here.\nMetadata Metadata contains concise information on all services and their instances, endpoints, etc. under monitoring. You may query the metadata in different ways.\nV2 APIs Provide Metadata V2 query APIs since 9.0.0, including Layer concept.\nextendtypeQuery{# Read all available layers# UI could use this list to determine available dashboards/panels# The available layers would change with time in the runtime, because new service could be detected in any time.# This list should be loaded periodically.listLayers:[String!]!# Read the service list according to layer.listServices(layer:String):[Service!]!# Find service according to given ID. Return null if not existing.getService(serviceId:String!):Service# Search and find service according to given name. Return null if not existing.findService(serviceName:String!):Service# Read service instance list.listInstances(duration:Duration!,serviceId:ID!):[ServiceInstance!]!# Search and find service instance according to given ID. Return null if not existing.getInstance(instanceId:String!):ServiceInstance# Search and find matched endpoints according to given service and keyword(optional)# If no keyword, randomly choose endpoint based on `limit` value.findEndpoint(keyword:String,serviceId:ID!,limit:Int!):[Endpoint!]!getEndpointInfo(endpointId:ID!):EndpointInfo# Read process list.listProcesses(duration:Duration!,instanceId:ID!):[Process!]!# Find process according to given ID. Return null if not existing.getProcess(processId:ID!):Process# Get the number of matched processes through serviceId, labels# Labels: the matched process should contain all labels## The return is not a precise number, the process has its lifecycle, as it reboots and shutdowns with time.# The return number just gives an abstract of the scale of profiling that would be applied.estimateProcessScale(serviceId:ID!,labels:[String!]!):Long!getTimeInfo:TimeInfo}Topology The topology and dependency graphs among services, instances and endpoints. Includes direct relationships or global maps.\nextendtypeQuery{# Query the global topology# When layer is specified, the topology of this layer would be queriedgetGlobalTopology(duration:Duration!,layer:String):Topology# Query the topology, based on the given servicegetServiceTopology(serviceId:ID!,duration:Duration!):Topology# Query the topology, based on the given services.# `#getServiceTopology` could be replaced by this.getServicesTopology(serviceIds:[ID!]!,duration:Duration!):Topology# Query the instance topology, based on the given clientServiceId and serverServiceIdgetServiceInstanceTopology(clientServiceId:ID!,serverServiceId:ID!,duration:Duration!):ServiceInstanceTopology# Query the topology, based on the given endpointgetEndpointTopology(endpointId:ID!,duration:Duration!):Topology# v2 of getEndpointTopologygetEndpointDependencies(endpointId:ID!,duration:Duration!):EndpointTopology# Query the topology, based on the given instancegetProcessTopology(serviceInstanceId:ID!,duration:Duration!):ProcessTopology}Metrics Metrics query targets all objects defined in OAL script and MAL.\nV3 APIs Provide Metrics V3 query APIs since 9.5.0, including metadata and MQE. SkyWalking Metrics Query Expression(MQE) is an extension query mechanism. MQE allows users to do simple query-stage calculation like well known PromQL through GraphQL. The expression\u0026rsquo;s syntax can refer to here.\nextendtypeQuery{# Metrics definition metadata query. Response the metrics type which determines the suitable query methods.typeOfMetrics(name:String!):MetricsType!# Get the list of all available metrics in the current OAP server.# Param, regex, could be used to filter the metrics by name.listMetrics(regex:String):[MetricDefinition!]!execExpression(expression:String!,entity:Entity!,duration:Duration!):ExpressionResult!}typeExpressionResult{type:ExpressionResultType!# When the type == TIME_SERIES_VALUES, the results would be a collection of MQEValues.# In other legal type cases, only one MQEValues is expected in the array.results:[MQEValues!]!# When type == ExpressionResultType.UNKNOWN,# the error message includes the expression resolving errors.error:String}enumExpressionResultType{# Can\u0026#39;t resolve the type of the given expression.UNKNOWN# A single valueSINGLE_VALUE# A collection of time-series values.# The value could have labels or not.TIME_SERIES_VALUES# A collection of aggregated values through metric sort functionSORTED_LIST# A collection of sampled records.# When the original metric type is sampled recordsRECORD_LIST}Logs extendtypeQuery{# Return true if the current storage implementation supports fuzzy query for logs.supportQueryLogsByKeywords:Boolean!queryLogs(condition:LogQueryCondition):Logs# Test the logs and get the results of the LAL output.test(requests:LogTestRequest!):LogTestResponse!# Read the list of searchable keysqueryLogTagAutocompleteKeys(duration:Duration!):[String!]# Search the available value options of the given key.queryLogTagAutocompleteValues(tagKey:String!,duration:Duration!):[String!]}Log implementations vary between different database options. Some search engines like ElasticSearch and OpenSearch can support full log text fuzzy queries, while others do not due to considerations related to performance impact and end user experience.\ntest API serves as the debugging tool for native LAL parsing.\nTrace extendtypeQuery{# Search segment list with given conditionsqueryBasicTraces(condition:TraceQueryCondition):TraceBrief# Read the specific trace ID with given trace IDqueryTrace(traceId:ID!):Trace# Read the list of searchable keysqueryTraceTagAutocompleteKeys(duration:Duration!):[String!]# Search the available value options of the given key.queryTraceTagAutocompleteValues(tagKey:String!,duration:Duration!):[String!]}Trace query fetches trace segment lists and spans of given trace IDs.\nAlarm extendtypeQuery{getAlarmTrend(duration:Duration!):AlarmTrend!getAlarm(duration:Duration!,scope:Scope,keyword:String,paging:Pagination!,tags:[AlarmTag]):Alarms}Alarm query identifies alarms and related events.\nEvent extendtypeQuery{queryEvents(condition:EventQueryCondition):Events}Event query fetches the event list based on given sources and time range conditions.\nProfiling SkyWalking offers two types of profiling, in-process and out-process, allowing users to create tasks and check their execution status.\nIn-process profiling extendtypeMutation{# crate new profile taskcreateProfileTask(creationRequest:ProfileTaskCreationRequest):ProfileTaskCreationResult!}extendtypeQuery{# query all task list, order by ProfileTask#startTime descendinggetProfileTaskList(serviceId:ID,endpointName:String):[ProfileTask!]!# query all task logsgetProfileTaskLogs(taskID:String):[ProfileTaskLog!]!# query all task profiled segment listgetProfileTaskSegments(taskID:ID!):[ProfiledTraceSegments!]!# analyze multiple profiled segments, start and end time use timestamp(millisecond)getSegmentsProfileAnalyze(queries:[SegmentProfileAnalyzeQuery!]!):ProfileAnalyzation!}Out-process profiling extendtypeMutation{# create a new eBPF fixed time profiling taskcreateEBPFProfilingFixedTimeTask(request:EBPFProfilingTaskFixedTimeCreationRequest!):EBPFProfilingTaskCreationResult!# create a new eBPF network profiling taskcreateEBPFNetworkProfiling(request:EBPFProfilingNetworkTaskRequest!):EBPFProfilingTaskCreationResult!# keep alive the eBPF profiling taskkeepEBPFNetworkProfiling(taskId:ID!):EBPFNetworkKeepProfilingResult!}extendtypeQuery{# query eBPF profiling data for prepare create taskqueryPrepareCreateEBPFProfilingTaskData(serviceId:ID!):EBPFProfilingTaskPrepare!# query eBPF profiling task list# query `triggerType == FIXED_TIME` when triggerType is absentqueryEBPFProfilingTasks(serviceId:ID,serviceInstanceId:ID,targets:[EBPFProfilingTargetType!],triggerType:EBPFProfilingTriggerType,duration:Duration):[EBPFProfilingTask!]!# query schedules from profiling taskqueryEBPFProfilingSchedules(taskId:ID!):[EBPFProfilingSchedule!]!# analyze the profiling schedule# aggregateType is \u0026#34;EBPFProfilingAnalyzeAggregateType#COUNT\u0026#34; as default. analysisEBPFProfilingResult(scheduleIdList:[ID!]!,timeRanges:[EBPFProfilingAnalyzeTimeRange!]!,aggregateType:EBPFProfilingAnalyzeAggregateType):EBPFProfilingAnalyzation!}On-Demand Pod Logs Provide APIs to query on-demand pod logs since 9.1.0.\nextendtypeQuery{listContainers(condition:OndemandContainergQueryCondition):PodContainersondemandPodLogs(condition:OndemandLogQueryCondition):Logs}Hierarchy Provide Hierarchy query APIs since 10.0.0, including service and instance hierarchy.\nextendtypeQuery{# Query the service hierarchy, based on the given service. Will recursively return all related layers services in the hierarchy.getServiceHierarchy(serviceId:ID!,layer:String!):ServiceHierarchy!# Query the instance hierarchy, based on the given instance. Will return all direct related layers instances in the hierarchy, no recursive.getInstanceHierarchy(instanceId:ID!,layer:String!):InstanceHierarchy!# List layer hierarchy levels. The layer levels are defined in the `hierarchy-definition.yml`.listLayerLevels:[LayerLevel!]!}Condition Duration Duration is a widely used parameter type as the APM data is time-related. See the following for more details. Step relates to precision.\n# The Duration defines the start and end time for each query operation.# Fields: `start` and `end`# represents the time span. And each of them matches the step.# ref https://www.ietf.org/rfc/rfc3339.txt# The time formats are# `SECOND` step: yyyy-MM-dd HHmmss# `MINUTE` step: yyyy-MM-dd HHmm# `HOUR` step: yyyy-MM-dd HH# `DAY` step: yyyy-MM-dd# `MONTH` step: yyyy-MM# Field: `step`# represents the accurate time point.# e.g.# if step==HOUR , start=2017-11-08 09, end=2017-11-08 19# then# metrics from the following time points expected# 2017-11-08 9:00 -\u0026gt; 2017-11-08 19:00# there are 11 time points (hours) in the time span.inputDuration{start:String!end:String!step:Step!}enumStep{MONTHDAYHOURMINUTESECOND}","excerpt":"Query Protocol Query Protocol defines a set of APIs in GraphQL grammar to provide data query and …","ref":"/docs/main/next/en/api/query-protocol/","title":"Query Protocol"},{"body":"Query Protocol Query Protocol defines a set of APIs in GraphQL grammar to provide data query and interactive capabilities with SkyWalking native visualization tool or 3rd party system, including Web UI, CLI or private system.\nQuery protocol official repository, https://github.com/apache/skywalking-query-protocol.\nMetadata Metadata contains concise information on all services and their instances, endpoints, etc. under monitoring. You may query the metadata in different ways.\nextendtypeQuery{# Normal service related meta info getAllServices(duration:Duration!,group:String):[Service!]!searchServices(duration:Duration!,keyword:String!):[Service!]!searchService(serviceCode:String!):Service# Fetch all services of Browser typegetAllBrowserServices(duration:Duration!):[Service!]!searchBrowserServices(duration:Duration!,keyword:String!):[Service!]!searchBrowserService(serviceCode:String!):Service# Service instance querygetServiceInstances(duration:Duration!,serviceId:ID!):[ServiceInstance!]!# Endpoint query# Consider there are huge numbers of endpoint,# must use endpoint owner\u0026#39;s service id, keyword and limit filter to do query.searchEndpoint(keyword:String!,serviceId:ID!,limit:Int!):[Endpoint!]!getEndpointInfo(endpointId:ID!):EndpointInfo# Database related meta info.getAllDatabases(duration:Duration!):[Database!]!getTimeInfo:TimeInfo}Topology The topology and dependency graphs among services, instances and endpoints. Includes direct relationships or global maps.\nextendtypeQuery{# Query the global topologygetGlobalTopology(duration:Duration!):Topology# Query the topology, based on the given servicegetServiceTopology(serviceId:ID!,duration:Duration!):Topology# Query the topology, based on the given services.# `#getServiceTopology` could be replaced by this.getServicesTopology(serviceIds:[ID!]!,duration:Duration!):Topology# Query the instance topology, based on the given clientServiceId and serverServiceIdgetServiceInstanceTopology(clientServiceId:ID!,serverServiceId:ID!,duration:Duration!):ServiceInstanceTopology# Query the topology, based on the given endpointgetEndpointTopology(endpointId:ID!,duration:Duration!):Topology# v2 of getEndpointTopologygetEndpointDependencies(endpointId:ID!,duration:Duration!):EndpointTopology}Metrics Metrics query targets all objects defined in OAL script and MAL. You may obtain the metrics data in linear or thermodynamic matrix formats based on the aggregation functions in script.\nV2 APIs Provide Metrics V2 query APIs since 8.0.0, including metadata, single/multiple values, heatmap, and sampled records metrics.\nextendtypeQuery{# Metrics definition metadata query. Response the metrics type which determines the suitable query methods.typeOfMetrics(name:String!):MetricsType!# Get the list of all available metrics in the current OAP server.# Param, regex, could be used to filter the metrics by name.listMetrics(regex:String):[MetricDefinition!]!# Read metrics single value in the duration of required metricsreadMetricsValue(condition:MetricsCondition!,duration:Duration!):Long!# Read time-series values in the duration of required metricsreadMetricsValues(condition:MetricsCondition!,duration:Duration!):MetricsValues!# Read entity list of required metrics and parent entity type.sortMetrics(condition:TopNCondition!,duration:Duration!):[SelectedRecord!]!# Read value in the given time duration, usually as a linear.# labels: the labels you need to query.readLabeledMetricsValues(condition:MetricsCondition!,labels:[String!]!,duration:Duration!):[MetricsValues!]!# Heatmap is bucket based value statistic result.readHeatMap(condition:MetricsCondition!,duration:Duration!):HeatMap# Read the sampled records# TopNCondition#scope is not required.readSampledRecords(condition:TopNCondition!,duration:Duration!):[SelectedRecord!]!}V1 APIs 3 types of metrics can be queried. V1 APIs were introduced since 6.x. Now they are a shell to V2 APIs.\n Single value. Most default metrics are in single value. getValues and getLinearIntValues are suitable for this purpose. Multiple value. A metric defined in OAL includes multiple value calculations. Use getMultipleLinearIntValues to obtain all values. percentile is a typical multiple value function in OAL. Heatmap value. Read Heatmap in WIKI for details. thermodynamic is the only OAL function. Use getThermodynamic to get the values.  extendtypeQuery{getValues(metric:BatchMetricConditions!,duration:Duration!):IntValuesgetLinearIntValues(metric:MetricCondition!,duration:Duration!):IntValues# Query the type of metrics including multiple values, and format them as multiple linears.# The seq of these multiple lines base on the calculation func in OAL# Such as, should us this to query the result of func percentile(50,75,90,95,99) in OAL,# then five lines will be responsed, p50 is the first element of return value.getMultipleLinearIntValues(metric:MetricCondition!,numOfLinear:Int!,duration:Duration!):[IntValues!]!getThermodynamic(metric:MetricCondition!,duration:Duration!):Thermodynamic}Metrics are defined in the config/oal/*.oal files.\nAggregation Aggregation query means that the metrics data need a secondary aggregation at query stage, which causes the query interfaces to have some different arguments. A typical example of aggregation query is the TopN list of services. Metrics stream aggregation simply calculates the metrics values of each service, but the expected list requires ordering metrics data by their values.\nAggregation query is for single value metrics only.\n# The aggregation query is different with the metric query.# All aggregation queries require backend or/and storage do aggregation in query time.extendtypeQuery{# TopN is an aggregation query.getServiceTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getAllServiceInstanceTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getServiceInstanceTopN(serviceId:ID!,name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getAllEndpointTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getEndpointTopN(serviceId:ID!,name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!}Logs extendtypeQuery{# Return true if the current storage implementation supports fuzzy query for logs.supportQueryLogsByKeywords:Boolean!queryLogs(condition:LogQueryCondition):Logs# Test the logs and get the results of the LAL output.test(requests:LogTestRequest!):LogTestResponse!}Log implementations vary between different database options. Some search engines like ElasticSearch and OpenSearch can support full log text fuzzy queries, while others do not due to considerations related to performance impact and end user experience.\ntest API serves as the debugging tool for native LAL parsing.\nTrace extendtypeQuery{queryBasicTraces(condition:TraceQueryCondition):TraceBriefqueryTrace(traceId:ID!):Trace}Trace query fetches trace segment lists and spans of given trace IDs.\nAlarm extendtypeQuery{getAlarmTrend(duration:Duration!):AlarmTrend!getAlarm(duration:Duration!,scope:Scope,keyword:String,paging:Pagination!,tags:[AlarmTag]):Alarms}Alarm query identifies alarms and related events.\nEvent extendtypeQuery{queryEvents(condition:EventQueryCondition):Events}Event query fetches the event list based on given sources and time range conditions.\nCondition Duration Duration is a widely used parameter type as the APM data is time-related. See the following for more details. Step relates to precision.\n# The Duration defines the start and end time for each query operation.# Fields: `start` and `end`# represents the time span. And each of them matches the step.# ref https://www.ietf.org/rfc/rfc3339.txt# The time formats are# `SECOND` step: yyyy-MM-dd HHmmss# `MINUTE` step: yyyy-MM-dd HHmm# `HOUR` step: yyyy-MM-dd HH# `DAY` step: yyyy-MM-dd# `MONTH` step: yyyy-MM# Field: `step`# represents the accurate time point.# e.g.# if step==HOUR , start=2017-11-08 09, end=2017-11-08 19# then# metrics from the following time points expected# 2017-11-08 9:00 -\u0026gt; 2017-11-08 19:00# there are 11 time points (hours) in the time span.inputDuration{start:String!end:String!step:Step!}enumStep{MONTHDAYHOURMINUTESECOND}","excerpt":"Query Protocol Query Protocol defines a set of APIs in GraphQL grammar to provide data query and …","ref":"/docs/main/v9.0.0/en/protocols/query-protocol/","title":"Query Protocol"},{"body":"Query Protocol Query Protocol defines a set of APIs in GraphQL grammar to provide data query and interactive capabilities with SkyWalking native visualization tool or 3rd party system, including Web UI, CLI or private system.\nQuery protocol official repository, https://github.com/apache/skywalking-query-protocol.\nMetadata Metadata contains concise information on all services and their instances, endpoints, etc. under monitoring. You may query the metadata in different ways.\nextendtypeQuery{# Normal service related meta info getAllServices(duration:Duration!,group:String):[Service!]!searchServices(duration:Duration!,keyword:String!):[Service!]!searchService(serviceCode:String!):Service# Fetch all services of Browser typegetAllBrowserServices(duration:Duration!):[Service!]!searchBrowserServices(duration:Duration!,keyword:String!):[Service!]!searchBrowserService(serviceCode:String!):Service# Service instance querygetServiceInstances(duration:Duration!,serviceId:ID!):[ServiceInstance!]!# Endpoint query# Consider there are huge numbers of endpoint,# must use endpoint owner\u0026#39;s service id, keyword and limit filter to do query.searchEndpoint(keyword:String!,serviceId:ID!,limit:Int!):[Endpoint!]!getEndpointInfo(endpointId:ID!):EndpointInfo# Database related meta info.getAllDatabases(duration:Duration!):[Database!]!getTimeInfo:TimeInfo}Topology The topology and dependency graphs among services, instances and endpoints. Includes direct relationships or global maps.\nextendtypeQuery{# Query the global topologygetGlobalTopology(duration:Duration!):Topology# Query the topology, based on the given servicegetServiceTopology(serviceId:ID!,duration:Duration!):Topology# Query the topology, based on the given services.# `#getServiceTopology` could be replaced by this.getServicesTopology(serviceIds:[ID!]!,duration:Duration!):Topology# Query the instance topology, based on the given clientServiceId and serverServiceIdgetServiceInstanceTopology(clientServiceId:ID!,serverServiceId:ID!,duration:Duration!):ServiceInstanceTopology# Query the topology, based on the given endpointgetEndpointTopology(endpointId:ID!,duration:Duration!):Topology# v2 of getEndpointTopologygetEndpointDependencies(endpointId:ID!,duration:Duration!):EndpointTopology}Metrics Metrics query targets all objects defined in OAL script and MAL. You may obtain the metrics data in linear or thermodynamic matrix formats based on the aggregation functions in script.\nV2 APIs Provide Metrics V2 query APIs since 8.0.0, including metadata, single/multiple values, heatmap, and sampled records metrics.\nextendtypeQuery{# Metrics definition metadata query. Response the metrics type which determines the suitable query methods.typeOfMetrics(name:String!):MetricsType!# Get the list of all available metrics in the current OAP server.# Param, regex, could be used to filter the metrics by name.listMetrics(regex:String):[MetricDefinition!]!# Read metrics single value in the duration of required metricsreadMetricsValue(condition:MetricsCondition!,duration:Duration!):Long!# Read time-series values in the duration of required metricsreadMetricsValues(condition:MetricsCondition!,duration:Duration!):MetricsValues!# Read entity list of required metrics and parent entity type.sortMetrics(condition:TopNCondition!,duration:Duration!):[SelectedRecord!]!# Read value in the given time duration, usually as a linear.# labels: the labels you need to query.readLabeledMetricsValues(condition:MetricsCondition!,labels:[String!]!,duration:Duration!):[MetricsValues!]!# Heatmap is bucket based value statistic result.readHeatMap(condition:MetricsCondition!,duration:Duration!):HeatMap# Read the sampled records# TopNCondition#scope is not required.readSampledRecords(condition:TopNCondition!,duration:Duration!):[SelectedRecord!]!}V1 APIs 3 types of metrics can be queried. V1 APIs were introduced since 6.x. Now they are a shell to V2 APIs.\n Single value. Most default metrics are in single value. getValues and getLinearIntValues are suitable for this purpose. Multiple value. A metric defined in OAL includes multiple value calculations. Use getMultipleLinearIntValues to obtain all values. percentile is a typical multiple value function in OAL. Heatmap value. Read Heatmap in WIKI for details. thermodynamic is the only OAL function. Use getThermodynamic to get the values.  extendtypeQuery{getValues(metric:BatchMetricConditions!,duration:Duration!):IntValuesgetLinearIntValues(metric:MetricCondition!,duration:Duration!):IntValues# Query the type of metrics including multiple values, and format them as multiple linears.# The seq of these multiple lines base on the calculation func in OAL# Such as, should us this to query the result of func percentile(50,75,90,95,99) in OAL,# then five lines will be responsed, p50 is the first element of return value.getMultipleLinearIntValues(metric:MetricCondition!,numOfLinear:Int!,duration:Duration!):[IntValues!]!getThermodynamic(metric:MetricCondition!,duration:Duration!):Thermodynamic}Metrics are defined in the config/oal/*.oal files.\nAggregation Aggregation query means that the metrics data need a secondary aggregation at query stage, which causes the query interfaces to have some different arguments. A typical example of aggregation query is the TopN list of services. Metrics stream aggregation simply calculates the metrics values of each service, but the expected list requires ordering metrics data by their values.\nAggregation query is for single value metrics only.\n# The aggregation query is different with the metric query.# All aggregation queries require backend or/and storage do aggregation in query time.extendtypeQuery{# TopN is an aggregation query.getServiceTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getAllServiceInstanceTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getServiceInstanceTopN(serviceId:ID!,name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getAllEndpointTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getEndpointTopN(serviceId:ID!,name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!}Logs extendtypeQuery{# Return true if the current storage implementation supports fuzzy query for logs.supportQueryLogsByKeywords:Boolean!queryLogs(condition:LogQueryCondition):Logs# Test the logs and get the results of the LAL output.test(requests:LogTestRequest!):LogTestResponse!}Log implementations vary between different database options. Some search engines like ElasticSearch and OpenSearch can support full log text fuzzy queries, while others do not due to considerations related to performance impact and end user experience.\ntest API serves as the debugging tool for native LAL parsing.\nTrace extendtypeQuery{queryBasicTraces(condition:TraceQueryCondition):TraceBriefqueryTrace(traceId:ID!):Trace}Trace query fetches trace segment lists and spans of given trace IDs.\nAlarm extendtypeQuery{getAlarmTrend(duration:Duration!):AlarmTrend!getAlarm(duration:Duration!,scope:Scope,keyword:String,paging:Pagination!,tags:[AlarmTag]):Alarms}Alarm query identifies alarms and related events.\nEvent extendtypeQuery{queryEvents(condition:EventQueryCondition):Events}Event query fetches the event list based on given sources and time range conditions.\nCondition Duration Duration is a widely used parameter type as the APM data is time-related. See the following for more details. Step relates to precision.\n# The Duration defines the start and end time for each query operation.# Fields: `start` and `end`# represents the time span. And each of them matches the step.# ref https://www.ietf.org/rfc/rfc3339.txt# The time formats are# `SECOND` step: yyyy-MM-dd HHmmss# `MINUTE` step: yyyy-MM-dd HHmm# `HOUR` step: yyyy-MM-dd HH# `DAY` step: yyyy-MM-dd# `MONTH` step: yyyy-MM# Field: `step`# represents the accurate time point.# e.g.# if step==HOUR , start=2017-11-08 09, end=2017-11-08 19# then# metrics from the following time points expected# 2017-11-08 9:00 -\u0026gt; 2017-11-08 19:00# there are 11 time points (hours) in the time span.inputDuration{start:String!end:String!step:Step!}enumStep{MONTHDAYHOURMINUTESECOND}","excerpt":"Query Protocol Query Protocol defines a set of APIs in GraphQL grammar to provide data query and …","ref":"/docs/main/v9.1.0/en/protocols/query-protocol/","title":"Query Protocol"},{"body":"Query Protocol Query Protocol defines a set of APIs in GraphQL grammar to provide data query and interactive capabilities with SkyWalking native visualization tool or 3rd party system, including Web UI, CLI or private system.\nQuery protocol official repository, https://github.com/apache/skywalking-query-protocol.\nMetadata Metadata contains concise information on all services and their instances, endpoints, etc. under monitoring. You may query the metadata in different ways.\nextendtypeQuery{# Normal service related meta info getAllServices(duration:Duration!,group:String):[Service!]!searchServices(duration:Duration!,keyword:String!):[Service!]!searchService(serviceCode:String!):Service# Fetch all services of Browser typegetAllBrowserServices(duration:Duration!):[Service!]!searchBrowserServices(duration:Duration!,keyword:String!):[Service!]!searchBrowserService(serviceCode:String!):Service# Service instance querygetServiceInstances(duration:Duration!,serviceId:ID!):[ServiceInstance!]!# Endpoint query# Consider there are huge numbers of endpoint,# must use endpoint owner\u0026#39;s service id, keyword and limit filter to do query.searchEndpoint(keyword:String!,serviceId:ID!,limit:Int!):[Endpoint!]!getEndpointInfo(endpointId:ID!):EndpointInfo# Database related meta info.getAllDatabases(duration:Duration!):[Database!]!getTimeInfo:TimeInfo}Topology The topology and dependency graphs among services, instances and endpoints. Includes direct relationships or global maps.\nextendtypeQuery{# Query the global topologygetGlobalTopology(duration:Duration!):Topology# Query the topology, based on the given servicegetServiceTopology(serviceId:ID!,duration:Duration!):Topology# Query the topology, based on the given services.# `#getServiceTopology` could be replaced by this.getServicesTopology(serviceIds:[ID!]!,duration:Duration!):Topology# Query the instance topology, based on the given clientServiceId and serverServiceIdgetServiceInstanceTopology(clientServiceId:ID!,serverServiceId:ID!,duration:Duration!):ServiceInstanceTopology# Query the topology, based on the given endpointgetEndpointTopology(endpointId:ID!,duration:Duration!):Topology# v2 of getEndpointTopologygetEndpointDependencies(endpointId:ID!,duration:Duration!):EndpointTopology}Metrics Metrics query targets all objects defined in OAL script and MAL. You may obtain the metrics data in linear or thermodynamic matrix formats based on the aggregation functions in script.\nV2 APIs Provide Metrics V2 query APIs since 8.0.0, including metadata, single/multiple values, heatmap, and sampled records metrics.\nextendtypeQuery{# Metrics definition metadata query. Response the metrics type which determines the suitable query methods.typeOfMetrics(name:String!):MetricsType!# Get the list of all available metrics in the current OAP server.# Param, regex, could be used to filter the metrics by name.listMetrics(regex:String):[MetricDefinition!]!# Read metrics single value in the duration of required metricsreadMetricsValue(condition:MetricsCondition!,duration:Duration!):Long!# Read time-series values in the duration of required metricsreadMetricsValues(condition:MetricsCondition!,duration:Duration!):MetricsValues!# Read entity list of required metrics and parent entity type.sortMetrics(condition:TopNCondition!,duration:Duration!):[SelectedRecord!]!# Read value in the given time duration, usually as a linear.# labels: the labels you need to query.readLabeledMetricsValues(condition:MetricsCondition!,labels:[String!]!,duration:Duration!):[MetricsValues!]!# Heatmap is bucket based value statistic result.readHeatMap(condition:MetricsCondition!,duration:Duration!):HeatMap# Read the sampled records# TopNCondition#scope is not required.readSampledRecords(condition:TopNCondition!,duration:Duration!):[SelectedRecord!]!}V1 APIs 3 types of metrics can be queried. V1 APIs were introduced since 6.x. Now they are a shell to V2 APIs.\n Single value. Most default metrics are in single value. getValues and getLinearIntValues are suitable for this purpose. Multiple value. A metric defined in OAL includes multiple value calculations. Use getMultipleLinearIntValues to obtain all values. percentile is a typical multiple value function in OAL. Heatmap value. Read Heatmap in WIKI for details. thermodynamic is the only OAL function. Use getThermodynamic to get the values.  extendtypeQuery{getValues(metric:BatchMetricConditions!,duration:Duration!):IntValuesgetLinearIntValues(metric:MetricCondition!,duration:Duration!):IntValues# Query the type of metrics including multiple values, and format them as multiple linears.# The seq of these multiple lines base on the calculation func in OAL# Such as, should us this to query the result of func percentile(50,75,90,95,99) in OAL,# then five lines will be responsed, p50 is the first element of return value.getMultipleLinearIntValues(metric:MetricCondition!,numOfLinear:Int!,duration:Duration!):[IntValues!]!getThermodynamic(metric:MetricCondition!,duration:Duration!):Thermodynamic}Metrics are defined in the config/oal/*.oal files.\nAggregation Aggregation query means that the metrics data need a secondary aggregation at query stage, which causes the query interfaces to have some different arguments. A typical example of aggregation query is the TopN list of services. Metrics stream aggregation simply calculates the metrics values of each service, but the expected list requires ordering metrics data by their values.\nAggregation query is for single value metrics only.\n# The aggregation query is different with the metric query.# All aggregation queries require backend or/and storage do aggregation in query time.extendtypeQuery{# TopN is an aggregation query.getServiceTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getAllServiceInstanceTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getServiceInstanceTopN(serviceId:ID!,name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getAllEndpointTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getEndpointTopN(serviceId:ID!,name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!}Logs extendtypeQuery{# Return true if the current storage implementation supports fuzzy query for logs.supportQueryLogsByKeywords:Boolean!queryLogs(condition:LogQueryCondition):Logs# Test the logs and get the results of the LAL output.test(requests:LogTestRequest!):LogTestResponse!}Log implementations vary between different database options. Some search engines like ElasticSearch and OpenSearch can support full log text fuzzy queries, while others do not due to considerations related to performance impact and end user experience.\ntest API serves as the debugging tool for native LAL parsing.\nTrace extendtypeQuery{queryBasicTraces(condition:TraceQueryCondition):TraceBriefqueryTrace(traceId:ID!):Trace}Trace query fetches trace segment lists and spans of given trace IDs.\nAlarm extendtypeQuery{getAlarmTrend(duration:Duration!):AlarmTrend!getAlarm(duration:Duration!,scope:Scope,keyword:String,paging:Pagination!,tags:[AlarmTag]):Alarms}Alarm query identifies alarms and related events.\nEvent extendtypeQuery{queryEvents(condition:EventQueryCondition):Events}Event query fetches the event list based on given sources and time range conditions.\nCondition Duration Duration is a widely used parameter type as the APM data is time-related. See the following for more details. Step relates to precision.\n# The Duration defines the start and end time for each query operation.# Fields: `start` and `end`# represents the time span. And each of them matches the step.# ref https://www.ietf.org/rfc/rfc3339.txt# The time formats are# `SECOND` step: yyyy-MM-dd HHmmss# `MINUTE` step: yyyy-MM-dd HHmm# `HOUR` step: yyyy-MM-dd HH# `DAY` step: yyyy-MM-dd# `MONTH` step: yyyy-MM# Field: `step`# represents the accurate time point.# e.g.# if step==HOUR , start=2017-11-08 09, end=2017-11-08 19# then# metrics from the following time points expected# 2017-11-08 9:00 -\u0026gt; 2017-11-08 19:00# there are 11 time points (hours) in the time span.inputDuration{start:String!end:String!step:Step!}enumStep{MONTHDAYHOURMINUTESECOND}","excerpt":"Query Protocol Query Protocol defines a set of APIs in GraphQL grammar to provide data query and …","ref":"/docs/main/v9.2.0/en/protocols/query-protocol/","title":"Query Protocol"},{"body":"Query Protocol Query Protocol defines a set of APIs in GraphQL grammar to provide data query and interactive capabilities with SkyWalking native visualization tool or 3rd party system, including Web UI, CLI or private system.\nQuery protocol official repository, https://github.com/apache/skywalking-query-protocol.\nMetadata Metadata contains concise information on all services and their instances, endpoints, etc. under monitoring. You may query the metadata in different ways.\nextendtypeQuery{# Normal service related meta info getAllServices(duration:Duration!,group:String):[Service!]!searchServices(duration:Duration!,keyword:String!):[Service!]!searchService(serviceCode:String!):Service# Fetch all services of Browser typegetAllBrowserServices(duration:Duration!):[Service!]!searchBrowserServices(duration:Duration!,keyword:String!):[Service!]!searchBrowserService(serviceCode:String!):Service# Service instance querygetServiceInstances(duration:Duration!,serviceId:ID!):[ServiceInstance!]!# Endpoint query# Consider there are huge numbers of endpoint,# must use endpoint owner\u0026#39;s service id, keyword and limit filter to do query.searchEndpoint(keyword:String!,serviceId:ID!,limit:Int!):[Endpoint!]!getEndpointInfo(endpointId:ID!):EndpointInfo# Database related meta info.getAllDatabases(duration:Duration!):[Database!]!getTimeInfo:TimeInfo}Topology The topology and dependency graphs among services, instances and endpoints. Includes direct relationships or global maps.\nextendtypeQuery{# Query the global topologygetGlobalTopology(duration:Duration!):Topology# Query the topology, based on the given servicegetServiceTopology(serviceId:ID!,duration:Duration!):Topology# Query the topology, based on the given services.# `#getServiceTopology` could be replaced by this.getServicesTopology(serviceIds:[ID!]!,duration:Duration!):Topology# Query the instance topology, based on the given clientServiceId and serverServiceIdgetServiceInstanceTopology(clientServiceId:ID!,serverServiceId:ID!,duration:Duration!):ServiceInstanceTopology# Query the topology, based on the given endpointgetEndpointTopology(endpointId:ID!,duration:Duration!):Topology# v2 of getEndpointTopologygetEndpointDependencies(endpointId:ID!,duration:Duration!):EndpointTopology}Metrics Metrics query targets all objects defined in OAL script and MAL. You may obtain the metrics data in linear or thermodynamic matrix formats based on the aggregation functions in script.\nV2 APIs Provide Metrics V2 query APIs since 8.0.0, including metadata, single/multiple values, heatmap, and sampled records metrics.\nextendtypeQuery{# Metrics definition metadata query. Response the metrics type which determines the suitable query methods.typeOfMetrics(name:String!):MetricsType!# Get the list of all available metrics in the current OAP server.# Param, regex, could be used to filter the metrics by name.listMetrics(regex:String):[MetricDefinition!]!# Read metrics single value in the duration of required metricsreadMetricsValue(condition:MetricsCondition!,duration:Duration!):Long!# Read time-series values in the duration of required metricsreadMetricsValues(condition:MetricsCondition!,duration:Duration!):MetricsValues!# Read entity list of required metrics and parent entity type.sortMetrics(condition:TopNCondition!,duration:Duration!):[SelectedRecord!]!# Read value in the given time duration, usually as a linear.# labels: the labels you need to query.readLabeledMetricsValues(condition:MetricsCondition!,labels:[String!]!,duration:Duration!):[MetricsValues!]!# Heatmap is bucket based value statistic result.readHeatMap(condition:MetricsCondition!,duration:Duration!):HeatMap# Deprecated since 9.3.0, replaced by readRecords defined in record.graphqls# Read the sampled records# TopNCondition#scope is not required.readSampledRecords(condition:TopNCondition!,duration:Duration!):[SelectedRecord!]!}V1 APIs 3 types of metrics can be queried. V1 APIs were introduced since 6.x. Now they are a shell to V2 APIs.\n Single value. Most default metrics are in single value. getValues and getLinearIntValues are suitable for this purpose. Multiple value. A metric defined in OAL includes multiple value calculations. Use getMultipleLinearIntValues to obtain all values. percentile is a typical multiple value function in OAL. Heatmap value. Read Heatmap in WIKI for details. thermodynamic is the only OAL function. Use getThermodynamic to get the values.  extendtypeQuery{getValues(metric:BatchMetricConditions!,duration:Duration!):IntValuesgetLinearIntValues(metric:MetricCondition!,duration:Duration!):IntValues# Query the type of metrics including multiple values, and format them as multiple lines.# The seq of these multiple lines base on the calculation func in OAL# Such as, should us this to query the result of func percentile(50,75,90,95,99) in OAL,# then five lines will be responded, p50 is the first element of return value.getMultipleLinearIntValues(metric:MetricCondition!,numOfLinear:Int!,duration:Duration!):[IntValues!]!getThermodynamic(metric:MetricCondition!,duration:Duration!):Thermodynamic}Metrics are defined in the config/oal/*.oal files.\nAggregation Aggregation query means that the metrics data need a secondary aggregation at query stage, which causes the query interfaces to have some different arguments. A typical example of aggregation query is the TopN list of services. Metrics stream aggregation simply calculates the metrics values of each service, but the expected list requires ordering metrics data by their values.\nAggregation query is for single value metrics only.\n# The aggregation query is different with the metric query.# All aggregation queries require backend or/and storage do aggregation in query time.extendtypeQuery{# TopN is an aggregation query.getServiceTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getAllServiceInstanceTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getServiceInstanceTopN(serviceId:ID!,name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getAllEndpointTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getEndpointTopN(serviceId:ID!,name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!}Record Record is a general and abstract type for collected raw data. In the observability, traces and logs have specific and well-defined meanings, meanwhile, the general records represent other collected records. Such as sampled slow SQL statement, HTTP request raw data(request/response header/body)\nextendtypeQuery{# Query collected records with given metric name and parent entity conditions, and return in the requested order.readRecords(condition:RecordCondition!,duration:Duration!):[Record!]!}Logs extendtypeQuery{# Return true if the current storage implementation supports fuzzy query for logs.supportQueryLogsByKeywords:Boolean!queryLogs(condition:LogQueryCondition):Logs# Test the logs and get the results of the LAL output.test(requests:LogTestRequest!):LogTestResponse!}Log implementations vary between different database options. Some search engines like ElasticSearch and OpenSearch can support full log text fuzzy queries, while others do not due to considerations related to performance impact and end user experience.\ntest API serves as the debugging tool for native LAL parsing.\nTrace extendtypeQuery{queryBasicTraces(condition:TraceQueryCondition):TraceBriefqueryTrace(traceId:ID!):Trace}Trace query fetches trace segment lists and spans of given trace IDs.\nAlarm extendtypeQuery{getAlarmTrend(duration:Duration!):AlarmTrend!getAlarm(duration:Duration!,scope:Scope,keyword:String,paging:Pagination!,tags:[AlarmTag]):Alarms}Alarm query identifies alarms and related events.\nEvent extendtypeQuery{queryEvents(condition:EventQueryCondition):Events}Event query fetches the event list based on given sources and time range conditions.\nCondition Duration Duration is a widely used parameter type as the APM data is time-related. See the following for more details. Step relates to precision.\n# The Duration defines the start and end time for each query operation.# Fields: `start` and `end`# represents the time span. And each of them matches the step.# ref https://www.ietf.org/rfc/rfc3339.txt# The time formats are# `SECOND` step: yyyy-MM-dd HHmmss# `MINUTE` step: yyyy-MM-dd HHmm# `HOUR` step: yyyy-MM-dd HH# `DAY` step: yyyy-MM-dd# `MONTH` step: yyyy-MM# Field: `step`# represents the accurate time point.# e.g.# if step==HOUR , start=2017-11-08 09, end=2017-11-08 19# then# metrics from the following time points expected# 2017-11-08 9:00 -\u0026gt; 2017-11-08 19:00# there are 11 time points (hours) in the time span.inputDuration{start:String!end:String!step:Step!}enumStep{MONTHDAYHOURMINUTESECOND}","excerpt":"Query Protocol Query Protocol defines a set of APIs in GraphQL grammar to provide data query and …","ref":"/docs/main/v9.3.0/en/protocols/query-protocol/","title":"Query Protocol"},{"body":"Query Protocol Query Protocol defines a set of APIs in GraphQL grammar to provide data query and interactive capabilities with SkyWalking native visualization tool or 3rd party system, including Web UI, CLI or private system.\nQuery protocol official repository, https://github.com/apache/skywalking-query-protocol.\nMetadata Metadata contains concise information on all services and their instances, endpoints, etc. under monitoring. You may query the metadata in different ways.\nextendtypeQuery{# Normal service related meta info getAllServices(duration:Duration!,group:String):[Service!]!searchServices(duration:Duration!,keyword:String!):[Service!]!searchService(serviceCode:String!):Service# Fetch all services of Browser typegetAllBrowserServices(duration:Duration!):[Service!]!searchBrowserServices(duration:Duration!,keyword:String!):[Service!]!searchBrowserService(serviceCode:String!):Service# Service instance querygetServiceInstances(duration:Duration!,serviceId:ID!):[ServiceInstance!]!# Endpoint query# Consider there are huge numbers of endpoint,# must use endpoint owner\u0026#39;s service id, keyword and limit filter to do query.searchEndpoint(keyword:String!,serviceId:ID!,limit:Int!):[Endpoint!]!getEndpointInfo(endpointId:ID!):EndpointInfo# Process query# Read process list.listProcesses(duration:Duration!,instanceId:ID!):[Process!]!# Find process according to given ID. Return null if not existing.getProcess(processId:ID!):Process# Get the number of matched processes through serviceId, labels# Labels: the matched process should contain all labels## The return is not a precise number, the process has its lifecycle, as it reboots and shutdowns with time.# The return number just gives an abstract of the scale of profiling that would be applied.estimateProcessScale(serviceId:ID!,labels:[String!]!):Long!# Database related meta info.getAllDatabases(duration:Duration!):[Database!]!getTimeInfo:TimeInfo}Topology The topology and dependency graphs among services, instances and endpoints. Includes direct relationships or global maps.\nextendtypeQuery{# Query the global topologygetGlobalTopology(duration:Duration!):Topology# Query the topology, based on the given servicegetServiceTopology(serviceId:ID!,duration:Duration!):Topology# Query the topology, based on the given services.# `#getServiceTopology` could be replaced by this.getServicesTopology(serviceIds:[ID!]!,duration:Duration!):Topology# Query the instance topology, based on the given clientServiceId and serverServiceIdgetServiceInstanceTopology(clientServiceId:ID!,serverServiceId:ID!,duration:Duration!):ServiceInstanceTopology# Query the topology, based on the given endpointgetEndpointTopology(endpointId:ID!,duration:Duration!):Topology# v2 of getEndpointTopologygetEndpointDependencies(endpointId:ID!,duration:Duration!):EndpointTopology}Metrics Metrics query targets all objects defined in OAL script and MAL. You may obtain the metrics data in linear or thermodynamic matrix formats based on the aggregation functions in script.\nV2 APIs Provide Metrics V2 query APIs since 8.0.0, including metadata, single/multiple values, heatmap, and sampled records metrics.\nextendtypeQuery{# Metrics definition metadata query. Response the metrics type which determines the suitable query methods.typeOfMetrics(name:String!):MetricsType!# Get the list of all available metrics in the current OAP server.# Param, regex, could be used to filter the metrics by name.listMetrics(regex:String):[MetricDefinition!]!# Read metrics single value in the duration of required metricsreadMetricsValue(condition:MetricsCondition!,duration:Duration!):Long!# Read time-series values in the duration of required metricsreadMetricsValues(condition:MetricsCondition!,duration:Duration!):MetricsValues!# Read entity list of required metrics and parent entity type.sortMetrics(condition:TopNCondition!,duration:Duration!):[SelectedRecord!]!# Read value in the given time duration, usually as a linear.# labels: the labels you need to query.readLabeledMetricsValues(condition:MetricsCondition!,labels:[String!]!,duration:Duration!):[MetricsValues!]!# Heatmap is bucket based value statistic result.readHeatMap(condition:MetricsCondition!,duration:Duration!):HeatMap# Deprecated since 9.3.0, replaced by readRecords defined in record.graphqls# Read the sampled records# TopNCondition#scope is not required.readSampledRecords(condition:TopNCondition!,duration:Duration!):[SelectedRecord!]!}V1 APIs 3 types of metrics can be queried. V1 APIs were introduced since 6.x. Now they are a shell to V2 APIs.\n Single value. Most default metrics are in single value. getValues and getLinearIntValues are suitable for this purpose. Multiple value. A metric defined in OAL includes multiple value calculations. Use getMultipleLinearIntValues to obtain all values. percentile is a typical multiple value function in OAL. Heatmap value. Read Heatmap in WIKI for details. thermodynamic is the only OAL function. Use getThermodynamic to get the values.  extendtypeQuery{getValues(metric:BatchMetricConditions!,duration:Duration!):IntValuesgetLinearIntValues(metric:MetricCondition!,duration:Duration!):IntValues# Query the type of metrics including multiple values, and format them as multiple lines.# The seq of these multiple lines base on the calculation func in OAL# Such as, should us this to query the result of func percentile(50,75,90,95,99) in OAL,# then five lines will be responded, p50 is the first element of return value.getMultipleLinearIntValues(metric:MetricCondition!,numOfLinear:Int!,duration:Duration!):[IntValues!]!getThermodynamic(metric:MetricCondition!,duration:Duration!):Thermodynamic}Metrics are defined in the config/oal/*.oal files.\nAggregation Aggregation query means that the metrics data need a secondary aggregation at query stage, which causes the query interfaces to have some different arguments. A typical example of aggregation query is the TopN list of services. Metrics stream aggregation simply calculates the metrics values of each service, but the expected list requires ordering metrics data by their values.\nAggregation query is for single value metrics only.\n# The aggregation query is different with the metric query.# All aggregation queries require backend or/and storage do aggregation in query time.extendtypeQuery{# TopN is an aggregation query.getServiceTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getAllServiceInstanceTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getServiceInstanceTopN(serviceId:ID!,name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getAllEndpointTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getEndpointTopN(serviceId:ID!,name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!}Record Record is a general and abstract type for collected raw data. In the observability, traces and logs have specific and well-defined meanings, meanwhile, the general records represent other collected records. Such as sampled slow SQL statement, HTTP request raw data(request/response header/body)\nextendtypeQuery{# Query collected records with given metric name and parent entity conditions, and return in the requested order.readRecords(condition:RecordCondition!,duration:Duration!):[Record!]!}Logs extendtypeQuery{# Return true if the current storage implementation supports fuzzy query for logs.supportQueryLogsByKeywords:Boolean!queryLogs(condition:LogQueryCondition):Logs# Test the logs and get the results of the LAL output.test(requests:LogTestRequest!):LogTestResponse!}Log implementations vary between different database options. Some search engines like ElasticSearch and OpenSearch can support full log text fuzzy queries, while others do not due to considerations related to performance impact and end user experience.\ntest API serves as the debugging tool for native LAL parsing.\nTrace extendtypeQuery{queryBasicTraces(condition:TraceQueryCondition):TraceBriefqueryTrace(traceId:ID!):Trace}Trace query fetches trace segment lists and spans of given trace IDs.\nAlarm extendtypeQuery{getAlarmTrend(duration:Duration!):AlarmTrend!getAlarm(duration:Duration!,scope:Scope,keyword:String,paging:Pagination!,tags:[AlarmTag]):Alarms}Alarm query identifies alarms and related events.\nEvent extendtypeQuery{queryEvents(condition:EventQueryCondition):Events}Event query fetches the event list based on given sources and time range conditions.\nProfiling SkyWalking offers two types of profiling, in-process and out-process, allowing users to create tasks and check their execution status.\nIn-process profiling extendtypeMutation{# crate new profile taskcreateProfileTask(creationRequest:ProfileTaskCreationRequest):ProfileTaskCreationResult!}extendtypeQuery{# query all task list, order by ProfileTask#startTime descendinggetProfileTaskList(serviceId:ID,endpointName:String):[ProfileTask!]!# query all task logsgetProfileTaskLogs(taskID:String):[ProfileTaskLog!]!# query all task profiled segment listgetProfileTaskSegmentList(taskID:String):[BasicTrace!]!# query profiled segmentgetProfiledSegment(segmentId:String):ProfiledSegment# analyze profiled segment, start and end time use timestamp(millisecond)getProfileAnalyze(segmentId:String!,timeRanges:[ProfileAnalyzeTimeRange!]!):ProfileAnalyzation!}Out-process profiling extendtypeMutation{# create a new eBPF fixed time profiling taskcreateEBPFProfilingFixedTimeTask(request:EBPFProfilingTaskFixedTimeCreationRequest!):EBPFProfilingTaskCreationResult!# create a new eBPF network profiling taskcreateEBPFNetworkProfiling(request:EBPFProfilingNetworkTaskRequest!):EBPFProfilingTaskCreationResult!# keep alive the eBPF profiling taskkeepEBPFNetworkProfiling(taskId:ID!):EBPFNetworkKeepProfilingResult!}extendtypeQuery{# query eBPF profiling data for prepare create taskqueryPrepareCreateEBPFProfilingTaskData(serviceId:ID!):EBPFProfilingTaskPrepare!# query eBPF profiling task listqueryEBPFProfilingTasks(serviceId:ID,serviceInstanceId:ID,targets:[EBPFProfilingTargetType!]):[EBPFProfilingTask!]!# query schedules from profiling taskqueryEBPFProfilingSchedules(taskId:ID!):[EBPFProfilingSchedule!]!# analyze the profiling schedule# aggregateType is \u0026#34;EBPFProfilingAnalyzeAggregateType#COUNT\u0026#34; as default. analysisEBPFProfilingResult(scheduleIdList:[ID!]!,timeRanges:[EBPFProfilingAnalyzeTimeRange!]!,aggregateType:EBPFProfilingAnalyzeAggregateType):EBPFProfilingAnalyzation!}Condition Duration Duration is a widely used parameter type as the APM data is time-related. See the following for more details. Step relates to precision.\n# The Duration defines the start and end time for each query operation.# Fields: `start` and `end`# represents the time span. And each of them matches the step.# ref https://www.ietf.org/rfc/rfc3339.txt# The time formats are# `SECOND` step: yyyy-MM-dd HHmmss# `MINUTE` step: yyyy-MM-dd HHmm# `HOUR` step: yyyy-MM-dd HH# `DAY` step: yyyy-MM-dd# `MONTH` step: yyyy-MM# Field: `step`# represents the accurate time point.# e.g.# if step==HOUR , start=2017-11-08 09, end=2017-11-08 19# then# metrics from the following time points expected# 2017-11-08 9:00 -\u0026gt; 2017-11-08 19:00# there are 11 time points (hours) in the time span.inputDuration{start:String!end:String!step:Step!}enumStep{MONTHDAYHOURMINUTESECOND}","excerpt":"Query Protocol Query Protocol defines a set of APIs in GraphQL grammar to provide data query and …","ref":"/docs/main/v9.4.0/en/api/query-protocol/","title":"Query Protocol"},{"body":"Query Protocol Query Protocol defines a set of APIs in GraphQL grammar to provide data query and interactive capabilities with SkyWalking native visualization tool or 3rd party system, including Web UI, CLI or private system.\nQuery protocol official repository, https://github.com/apache/skywalking-query-protocol.\nAll deprecated APIs are moved here.\nMetadata Metadata contains concise information on all services and their instances, endpoints, etc. under monitoring. You may query the metadata in different ways.\nextendtypeQuery{# Normal service related meta info getAllServices(duration:Duration!,group:String):[Service!]!searchServices(duration:Duration!,keyword:String!):[Service!]!searchService(serviceCode:String!):Service# Fetch all services of Browser typegetAllBrowserServices(duration:Duration!):[Service!]!searchBrowserServices(duration:Duration!,keyword:String!):[Service!]!searchBrowserService(serviceCode:String!):Service# Service instance querygetServiceInstances(duration:Duration!,serviceId:ID!):[ServiceInstance!]!# Endpoint query# Consider there are huge numbers of endpoint,# must use endpoint owner\u0026#39;s service id, keyword and limit filter to do query.searchEndpoint(keyword:String!,serviceId:ID!,limit:Int!):[Endpoint!]!getEndpointInfo(endpointId:ID!):EndpointInfo# Process query# Read process list.listProcesses(duration:Duration!,instanceId:ID!):[Process!]!# Find process according to given ID. Return null if not existing.getProcess(processId:ID!):Process# Get the number of matched processes through serviceId, labels# Labels: the matched process should contain all labels## The return is not a precise number, the process has its lifecycle, as it reboots and shutdowns with time.# The return number just gives an abstract of the scale of profiling that would be applied.estimateProcessScale(serviceId:ID!,labels:[String!]!):Long!# Database related meta info.getAllDatabases(duration:Duration!):[Database!]!getTimeInfo:TimeInfo}Topology The topology and dependency graphs among services, instances and endpoints. Includes direct relationships or global maps.\nextendtypeQuery{# Query the global topologygetGlobalTopology(duration:Duration!):Topology# Query the topology, based on the given servicegetServiceTopology(serviceId:ID!,duration:Duration!):Topology# Query the topology, based on the given services.# `#getServiceTopology` could be replaced by this.getServicesTopology(serviceIds:[ID!]!,duration:Duration!):Topology# Query the instance topology, based on the given clientServiceId and serverServiceIdgetServiceInstanceTopology(clientServiceId:ID!,serverServiceId:ID!,duration:Duration!):ServiceInstanceTopology# Query the topology, based on the given endpointgetEndpointTopology(endpointId:ID!,duration:Duration!):Topology# v2 of getEndpointTopologygetEndpointDependencies(endpointId:ID!,duration:Duration!):EndpointTopology}Metrics Metrics query targets all objects defined in OAL script and MAL.\nV3 APIs Provide Metrics V3 query APIs since 9.5.0, including metadata and MQE. SkyWalking Metrics Query Expression(MQE) is an extension query mechanism. MQE allows users to do simple query-stage calculation like well known PromQL through GraphQL. The expression\u0026rsquo;s syntax can refer to here.\nextendtypeQuery{# Metrics definition metadata query. Response the metrics type which determines the suitable query methods.typeOfMetrics(name:String!):MetricsType!# Get the list of all available metrics in the current OAP server.# Param, regex, could be used to filter the metrics by name.listMetrics(regex:String):[MetricDefinition!]!execExpression(expression:String!,entity:Entity!,duration:Duration!):ExpressionResult!}typeExpressionResult{type:ExpressionResultType!# When the type == TIME_SERIES_VALUES, the results would be a collection of MQEValues.# In other legal type cases, only one MQEValues is expected in the array.results:[MQEValues!]!# When type == ExpressionResultType.UNKNOWN,# the error message includes the expression resolving errors.error:String}enumExpressionResultType{# Can\u0026#39;t resolve the type of the given expression.UNKNOWN# A single valueSINGLE_VALUE# A collection of time-series values.# The value could have labels or not.TIME_SERIES_VALUES# A collection of aggregated values through metric sort functionSORTED_LIST# A collection of sampled records.# When the original metric type is sampled recordsRECORD_LIST}Logs extendtypeQuery{# Return true if the current storage implementation supports fuzzy query for logs.supportQueryLogsByKeywords:Boolean!queryLogs(condition:LogQueryCondition):Logs# Test the logs and get the results of the LAL output.test(requests:LogTestRequest!):LogTestResponse!}Log implementations vary between different database options. Some search engines like ElasticSearch and OpenSearch can support full log text fuzzy queries, while others do not due to considerations related to performance impact and end user experience.\ntest API serves as the debugging tool for native LAL parsing.\nTrace extendtypeQuery{queryBasicTraces(condition:TraceQueryCondition):TraceBriefqueryTrace(traceId:ID!):Trace}Trace query fetches trace segment lists and spans of given trace IDs.\nAlarm extendtypeQuery{getAlarmTrend(duration:Duration!):AlarmTrend!getAlarm(duration:Duration!,scope:Scope,keyword:String,paging:Pagination!,tags:[AlarmTag]):Alarms}Alarm query identifies alarms and related events.\nEvent extendtypeQuery{queryEvents(condition:EventQueryCondition):Events}Event query fetches the event list based on given sources and time range conditions.\nProfiling SkyWalking offers two types of profiling, in-process and out-process, allowing users to create tasks and check their execution status.\nIn-process profiling extendtypeMutation{# crate new profile taskcreateProfileTask(creationRequest:ProfileTaskCreationRequest):ProfileTaskCreationResult!}extendtypeQuery{# query all task list, order by ProfileTask#startTime descendinggetProfileTaskList(serviceId:ID,endpointName:String):[ProfileTask!]!# query all task logsgetProfileTaskLogs(taskID:String):[ProfileTaskLog!]!# query all task profiled segment listgetProfileTaskSegmentList(taskID:String):[BasicTrace!]!# query profiled segmentgetProfiledSegment(segmentId:String):ProfiledSegment# analyze profiled segment, start and end time use timestamp(millisecond)getProfileAnalyze(segmentId:String!,timeRanges:[ProfileAnalyzeTimeRange!]!):ProfileAnalyzation!}Out-process profiling extendtypeMutation{# create a new eBPF fixed time profiling taskcreateEBPFProfilingFixedTimeTask(request:EBPFProfilingTaskFixedTimeCreationRequest!):EBPFProfilingTaskCreationResult!# create a new eBPF network profiling taskcreateEBPFNetworkProfiling(request:EBPFProfilingNetworkTaskRequest!):EBPFProfilingTaskCreationResult!# keep alive the eBPF profiling taskkeepEBPFNetworkProfiling(taskId:ID!):EBPFNetworkKeepProfilingResult!}extendtypeQuery{# query eBPF profiling data for prepare create taskqueryPrepareCreateEBPFProfilingTaskData(serviceId:ID!):EBPFProfilingTaskPrepare!# query eBPF profiling task listqueryEBPFProfilingTasks(serviceId:ID,serviceInstanceId:ID,targets:[EBPFProfilingTargetType!]):[EBPFProfilingTask!]!# query schedules from profiling taskqueryEBPFProfilingSchedules(taskId:ID!):[EBPFProfilingSchedule!]!# analyze the profiling schedule# aggregateType is \u0026#34;EBPFProfilingAnalyzeAggregateType#COUNT\u0026#34; as default. analysisEBPFProfilingResult(scheduleIdList:[ID!]!,timeRanges:[EBPFProfilingAnalyzeTimeRange!]!,aggregateType:EBPFProfilingAnalyzeAggregateType):EBPFProfilingAnalyzation!}Condition Duration Duration is a widely used parameter type as the APM data is time-related. See the following for more details. Step relates to precision.\n# The Duration defines the start and end time for each query operation.# Fields: `start` and `end`# represents the time span. And each of them matches the step.# ref https://www.ietf.org/rfc/rfc3339.txt# The time formats are# `SECOND` step: yyyy-MM-dd HHmmss# `MINUTE` step: yyyy-MM-dd HHmm# `HOUR` step: yyyy-MM-dd HH# `DAY` step: yyyy-MM-dd# `MONTH` step: yyyy-MM# Field: `step`# represents the accurate time point.# e.g.# if step==HOUR , start=2017-11-08 09, end=2017-11-08 19# then# metrics from the following time points expected# 2017-11-08 9:00 -\u0026gt; 2017-11-08 19:00# there are 11 time points (hours) in the time span.inputDuration{start:String!end:String!step:Step!}enumStep{MONTHDAYHOURMINUTESECOND}","excerpt":"Query Protocol Query Protocol defines a set of APIs in GraphQL grammar to provide data query and …","ref":"/docs/main/v9.5.0/en/api/query-protocol/","title":"Query Protocol"},{"body":"Query Protocol Query Protocol defines a set of APIs in GraphQL grammar to provide data query and interactive capabilities with SkyWalking native visualization tool or 3rd party system, including Web UI, CLI or private system.\nQuery protocol official repository, https://github.com/apache/skywalking-query-protocol.\nAll deprecated APIs are moved here.\nMetadata Metadata contains concise information on all services and their instances, endpoints, etc. under monitoring. You may query the metadata in different ways.\nextendtypeQuery{# Normal service related meta info getAllServices(duration:Duration!,group:String):[Service!]!searchServices(duration:Duration!,keyword:String!):[Service!]!searchService(serviceCode:String!):Service# Fetch all services of Browser typegetAllBrowserServices(duration:Duration!):[Service!]!searchBrowserServices(duration:Duration!,keyword:String!):[Service!]!searchBrowserService(serviceCode:String!):Service# Service instance querygetServiceInstances(duration:Duration!,serviceId:ID!):[ServiceInstance!]!# Endpoint query# Consider there are huge numbers of endpoint,# must use endpoint owner\u0026#39;s service id, keyword and limit filter to do query.searchEndpoint(keyword:String!,serviceId:ID!,limit:Int!):[Endpoint!]!getEndpointInfo(endpointId:ID!):EndpointInfo# Process query# Read process list.listProcesses(duration:Duration!,instanceId:ID!):[Process!]!# Find process according to given ID. Return null if not existing.getProcess(processId:ID!):Process# Get the number of matched processes through serviceId, labels# Labels: the matched process should contain all labels## The return is not a precise number, the process has its lifecycle, as it reboots and shutdowns with time.# The return number just gives an abstract of the scale of profiling that would be applied.estimateProcessScale(serviceId:ID!,labels:[String!]!):Long!# Database related meta info.getAllDatabases(duration:Duration!):[Database!]!getTimeInfo:TimeInfo}Topology The topology and dependency graphs among services, instances and endpoints. Includes direct relationships or global maps.\nextendtypeQuery{# Query the global topologygetGlobalTopology(duration:Duration!):Topology# Query the topology, based on the given servicegetServiceTopology(serviceId:ID!,duration:Duration!):Topology# Query the topology, based on the given services.# `#getServiceTopology` could be replaced by this.getServicesTopology(serviceIds:[ID!]!,duration:Duration!):Topology# Query the instance topology, based on the given clientServiceId and serverServiceIdgetServiceInstanceTopology(clientServiceId:ID!,serverServiceId:ID!,duration:Duration!):ServiceInstanceTopology# Query the topology, based on the given endpointgetEndpointTopology(endpointId:ID!,duration:Duration!):Topology# v2 of getEndpointTopologygetEndpointDependencies(endpointId:ID!,duration:Duration!):EndpointTopology}Metrics Metrics query targets all objects defined in OAL script and MAL.\nV3 APIs Provide Metrics V3 query APIs since 9.5.0, including metadata and MQE. SkyWalking Metrics Query Expression(MQE) is an extension query mechanism. MQE allows users to do simple query-stage calculation like well known PromQL through GraphQL. The expression\u0026rsquo;s syntax can refer to here.\nextendtypeQuery{# Metrics definition metadata query. Response the metrics type which determines the suitable query methods.typeOfMetrics(name:String!):MetricsType!# Get the list of all available metrics in the current OAP server.# Param, regex, could be used to filter the metrics by name.listMetrics(regex:String):[MetricDefinition!]!execExpression(expression:String!,entity:Entity!,duration:Duration!):ExpressionResult!}typeExpressionResult{type:ExpressionResultType!# When the type == TIME_SERIES_VALUES, the results would be a collection of MQEValues.# In other legal type cases, only one MQEValues is expected in the array.results:[MQEValues!]!# When type == ExpressionResultType.UNKNOWN,# the error message includes the expression resolving errors.error:String}enumExpressionResultType{# Can\u0026#39;t resolve the type of the given expression.UNKNOWN# A single valueSINGLE_VALUE# A collection of time-series values.# The value could have labels or not.TIME_SERIES_VALUES# A collection of aggregated values through metric sort functionSORTED_LIST# A collection of sampled records.# When the original metric type is sampled recordsRECORD_LIST}Logs extendtypeQuery{# Return true if the current storage implementation supports fuzzy query for logs.supportQueryLogsByKeywords:Boolean!queryLogs(condition:LogQueryCondition):Logs# Test the logs and get the results of the LAL output.test(requests:LogTestRequest!):LogTestResponse!}Log implementations vary between different database options. Some search engines like ElasticSearch and OpenSearch can support full log text fuzzy queries, while others do not due to considerations related to performance impact and end user experience.\ntest API serves as the debugging tool for native LAL parsing.\nTrace extendtypeQuery{queryBasicTraces(condition:TraceQueryCondition):TraceBriefqueryTrace(traceId:ID!):Trace}Trace query fetches trace segment lists and spans of given trace IDs.\nAlarm extendtypeQuery{getAlarmTrend(duration:Duration!):AlarmTrend!getAlarm(duration:Duration!,scope:Scope,keyword:String,paging:Pagination!,tags:[AlarmTag]):Alarms}Alarm query identifies alarms and related events.\nEvent extendtypeQuery{queryEvents(condition:EventQueryCondition):Events}Event query fetches the event list based on given sources and time range conditions.\nProfiling SkyWalking offers two types of profiling, in-process and out-process, allowing users to create tasks and check their execution status.\nIn-process profiling extendtypeMutation{# crate new profile taskcreateProfileTask(creationRequest:ProfileTaskCreationRequest):ProfileTaskCreationResult!}extendtypeQuery{# query all task list, order by ProfileTask#startTime descendinggetProfileTaskList(serviceId:ID,endpointName:String):[ProfileTask!]!# query all task logsgetProfileTaskLogs(taskID:String):[ProfileTaskLog!]!# query all task profiled segment listgetProfileTaskSegmentList(taskID:String):[BasicTrace!]!# query profiled segmentgetProfiledSegment(segmentId:String):ProfiledSegment# analyze profiled segment, start and end time use timestamp(millisecond)getProfileAnalyze(segmentId:String!,timeRanges:[ProfileAnalyzeTimeRange!]!):ProfileAnalyzation!}Out-process profiling extendtypeMutation{# create a new eBPF fixed time profiling taskcreateEBPFProfilingFixedTimeTask(request:EBPFProfilingTaskFixedTimeCreationRequest!):EBPFProfilingTaskCreationResult!# create a new eBPF network profiling taskcreateEBPFNetworkProfiling(request:EBPFProfilingNetworkTaskRequest!):EBPFProfilingTaskCreationResult!# keep alive the eBPF profiling taskkeepEBPFNetworkProfiling(taskId:ID!):EBPFNetworkKeepProfilingResult!}extendtypeQuery{# query eBPF profiling data for prepare create taskqueryPrepareCreateEBPFProfilingTaskData(serviceId:ID!):EBPFProfilingTaskPrepare!# query eBPF profiling task listqueryEBPFProfilingTasks(serviceId:ID,serviceInstanceId:ID,targets:[EBPFProfilingTargetType!]):[EBPFProfilingTask!]!# query schedules from profiling taskqueryEBPFProfilingSchedules(taskId:ID!):[EBPFProfilingSchedule!]!# analyze the profiling schedule# aggregateType is \u0026#34;EBPFProfilingAnalyzeAggregateType#COUNT\u0026#34; as default. analysisEBPFProfilingResult(scheduleIdList:[ID!]!,timeRanges:[EBPFProfilingAnalyzeTimeRange!]!,aggregateType:EBPFProfilingAnalyzeAggregateType):EBPFProfilingAnalyzation!}Condition Duration Duration is a widely used parameter type as the APM data is time-related. See the following for more details. Step relates to precision.\n# The Duration defines the start and end time for each query operation.# Fields: `start` and `end`# represents the time span. And each of them matches the step.# ref https://www.ietf.org/rfc/rfc3339.txt# The time formats are# `SECOND` step: yyyy-MM-dd HHmmss# `MINUTE` step: yyyy-MM-dd HHmm# `HOUR` step: yyyy-MM-dd HH# `DAY` step: yyyy-MM-dd# `MONTH` step: yyyy-MM# Field: `step`# represents the accurate time point.# e.g.# if step==HOUR , start=2017-11-08 09, end=2017-11-08 19# then# metrics from the following time points expected# 2017-11-08 9:00 -\u0026gt; 2017-11-08 19:00# there are 11 time points (hours) in the time span.inputDuration{start:String!end:String!step:Step!}enumStep{MONTHDAYHOURMINUTESECOND}","excerpt":"Query Protocol Query Protocol defines a set of APIs in GraphQL grammar to provide data query and …","ref":"/docs/main/v9.6.0/en/api/query-protocol/","title":"Query Protocol"},{"body":"Query Protocol Query Protocol defines a set of APIs in GraphQL grammar to provide data query and interactive capabilities with SkyWalking native visualization tool or 3rd party system, including Web UI, CLI or private system.\nQuery protocol official repository, https://github.com/apache/skywalking-query-protocol.\nAll deprecated APIs are moved here.\nMetadata Metadata contains concise information on all services and their instances, endpoints, etc. under monitoring. You may query the metadata in different ways.\nextendtypeQuery{# Normal service related meta info getAllServices(duration:Duration!,group:String):[Service!]!searchServices(duration:Duration!,keyword:String!):[Service!]!searchService(serviceCode:String!):Service# Fetch all services of Browser typegetAllBrowserServices(duration:Duration!):[Service!]!searchBrowserServices(duration:Duration!,keyword:String!):[Service!]!searchBrowserService(serviceCode:String!):Service# Service instance querygetServiceInstances(duration:Duration!,serviceId:ID!):[ServiceInstance!]!# Endpoint query# Consider there are huge numbers of endpoint,# must use endpoint owner\u0026#39;s service id, keyword and limit filter to do query.searchEndpoint(keyword:String!,serviceId:ID!,limit:Int!):[Endpoint!]!getEndpointInfo(endpointId:ID!):EndpointInfo# Process query# Read process list.listProcesses(duration:Duration!,instanceId:ID!):[Process!]!# Find process according to given ID. Return null if not existing.getProcess(processId:ID!):Process# Get the number of matched processes through serviceId, labels# Labels: the matched process should contain all labels## The return is not a precise number, the process has its lifecycle, as it reboots and shutdowns with time.# The return number just gives an abstract of the scale of profiling that would be applied.estimateProcessScale(serviceId:ID!,labels:[String!]!):Long!# Database related meta info.getAllDatabases(duration:Duration!):[Database!]!getTimeInfo:TimeInfo}Topology The topology and dependency graphs among services, instances and endpoints. Includes direct relationships or global maps.\nextendtypeQuery{# Query the global topologygetGlobalTopology(duration:Duration!):Topology# Query the topology, based on the given servicegetServiceTopology(serviceId:ID!,duration:Duration!):Topology# Query the topology, based on the given services.# `#getServiceTopology` could be replaced by this.getServicesTopology(serviceIds:[ID!]!,duration:Duration!):Topology# Query the instance topology, based on the given clientServiceId and serverServiceIdgetServiceInstanceTopology(clientServiceId:ID!,serverServiceId:ID!,duration:Duration!):ServiceInstanceTopology# Query the topology, based on the given endpointgetEndpointTopology(endpointId:ID!,duration:Duration!):Topology# v2 of getEndpointTopologygetEndpointDependencies(endpointId:ID!,duration:Duration!):EndpointTopology}Metrics Metrics query targets all objects defined in OAL script and MAL.\nV3 APIs Provide Metrics V3 query APIs since 9.5.0, including metadata and MQE. SkyWalking Metrics Query Expression(MQE) is an extension query mechanism. MQE allows users to do simple query-stage calculation like well known PromQL through GraphQL. The expression\u0026rsquo;s syntax can refer to here.\nextendtypeQuery{# Metrics definition metadata query. Response the metrics type which determines the suitable query methods.typeOfMetrics(name:String!):MetricsType!# Get the list of all available metrics in the current OAP server.# Param, regex, could be used to filter the metrics by name.listMetrics(regex:String):[MetricDefinition!]!execExpression(expression:String!,entity:Entity!,duration:Duration!):ExpressionResult!}typeExpressionResult{type:ExpressionResultType!# When the type == TIME_SERIES_VALUES, the results would be a collection of MQEValues.# In other legal type cases, only one MQEValues is expected in the array.results:[MQEValues!]!# When type == ExpressionResultType.UNKNOWN,# the error message includes the expression resolving errors.error:String}enumExpressionResultType{# Can\u0026#39;t resolve the type of the given expression.UNKNOWN# A single valueSINGLE_VALUE# A collection of time-series values.# The value could have labels or not.TIME_SERIES_VALUES# A collection of aggregated values through metric sort functionSORTED_LIST# A collection of sampled records.# When the original metric type is sampled recordsRECORD_LIST}Logs extendtypeQuery{# Return true if the current storage implementation supports fuzzy query for logs.supportQueryLogsByKeywords:Boolean!queryLogs(condition:LogQueryCondition):Logs# Test the logs and get the results of the LAL output.test(requests:LogTestRequest!):LogTestResponse!}Log implementations vary between different database options. Some search engines like ElasticSearch and OpenSearch can support full log text fuzzy queries, while others do not due to considerations related to performance impact and end user experience.\ntest API serves as the debugging tool for native LAL parsing.\nTrace extendtypeQuery{queryBasicTraces(condition:TraceQueryCondition):TraceBriefqueryTrace(traceId:ID!):Trace}Trace query fetches trace segment lists and spans of given trace IDs.\nAlarm extendtypeQuery{getAlarmTrend(duration:Duration!):AlarmTrend!getAlarm(duration:Duration!,scope:Scope,keyword:String,paging:Pagination!,tags:[AlarmTag]):Alarms}Alarm query identifies alarms and related events.\nEvent extendtypeQuery{queryEvents(condition:EventQueryCondition):Events}Event query fetches the event list based on given sources and time range conditions.\nProfiling SkyWalking offers two types of profiling, in-process and out-process, allowing users to create tasks and check their execution status.\nIn-process profiling extendtypeMutation{# crate new profile taskcreateProfileTask(creationRequest:ProfileTaskCreationRequest):ProfileTaskCreationResult!}extendtypeQuery{# query all task list, order by ProfileTask#startTime descendinggetProfileTaskList(serviceId:ID,endpointName:String):[ProfileTask!]!# query all task logsgetProfileTaskLogs(taskID:String):[ProfileTaskLog!]!# query all task profiled segment listgetProfileTaskSegmentList(taskID:String):[BasicTrace!]!# query profiled segmentgetProfiledSegment(segmentId:String):ProfiledSegment# analyze profiled segment, start and end time use timestamp(millisecond)getProfileAnalyze(segmentId:String!,timeRanges:[ProfileAnalyzeTimeRange!]!):ProfileAnalyzation!}Out-process profiling extendtypeMutation{# create a new eBPF fixed time profiling taskcreateEBPFProfilingFixedTimeTask(request:EBPFProfilingTaskFixedTimeCreationRequest!):EBPFProfilingTaskCreationResult!# create a new eBPF network profiling taskcreateEBPFNetworkProfiling(request:EBPFProfilingNetworkTaskRequest!):EBPFProfilingTaskCreationResult!# keep alive the eBPF profiling taskkeepEBPFNetworkProfiling(taskId:ID!):EBPFNetworkKeepProfilingResult!}extendtypeQuery{# query eBPF profiling data for prepare create taskqueryPrepareCreateEBPFProfilingTaskData(serviceId:ID!):EBPFProfilingTaskPrepare!# query eBPF profiling task listqueryEBPFProfilingTasks(serviceId:ID,serviceInstanceId:ID,targets:[EBPFProfilingTargetType!]):[EBPFProfilingTask!]!# query schedules from profiling taskqueryEBPFProfilingSchedules(taskId:ID!):[EBPFProfilingSchedule!]!# analyze the profiling schedule# aggregateType is \u0026#34;EBPFProfilingAnalyzeAggregateType#COUNT\u0026#34; as default. analysisEBPFProfilingResult(scheduleIdList:[ID!]!,timeRanges:[EBPFProfilingAnalyzeTimeRange!]!,aggregateType:EBPFProfilingAnalyzeAggregateType):EBPFProfilingAnalyzation!}Condition Duration Duration is a widely used parameter type as the APM data is time-related. See the following for more details. Step relates to precision.\n# The Duration defines the start and end time for each query operation.# Fields: `start` and `end`# represents the time span. And each of them matches the step.# ref https://www.ietf.org/rfc/rfc3339.txt# The time formats are# `SECOND` step: yyyy-MM-dd HHmmss# `MINUTE` step: yyyy-MM-dd HHmm# `HOUR` step: yyyy-MM-dd HH# `DAY` step: yyyy-MM-dd# `MONTH` step: yyyy-MM# Field: `step`# represents the accurate time point.# e.g.# if step==HOUR , start=2017-11-08 09, end=2017-11-08 19# then# metrics from the following time points expected# 2017-11-08 9:00 -\u0026gt; 2017-11-08 19:00# there are 11 time points (hours) in the time span.inputDuration{start:String!end:String!step:Step!}enumStep{MONTHDAYHOURMINUTESECOND}","excerpt":"Query Protocol Query Protocol defines a set of APIs in GraphQL grammar to provide data query and …","ref":"/docs/main/v9.7.0/en/api/query-protocol/","title":"Query Protocol"},{"body":"Query Streams Query operation queries the data in a stream.\nbydbctl is the command line tool in examples.\nThe input contains two parts:\n Request: a YAML-based text which is defined by the API Time Range: YAML and CLI\u0026rsquo;s flags both support it.  Time Range The query specification contains time_range field. The request should set absolute times to it. bydbctl also provides start and end flags to support passing absolute and relative times.\n\u0026ldquo;start\u0026rdquo; and \u0026ldquo;end\u0026rdquo; specify a time range during which the query is performed, they can be an absolute time like \u0026ldquo;2006-01-02T15:04:05Z07:00\u0026rdquo;, or relative time (to the current time) like \u0026ldquo;-30m\u0026rdquo;, or \u0026ldquo;30m\u0026rdquo;. They are both optional and their default values follow the rules below:\n when \u0026ldquo;start\u0026rdquo; and \u0026ldquo;end\u0026rdquo; are both absent, \u0026ldquo;start = now - 30 minutes\u0026rdquo; and \u0026ldquo;end = now\u0026rdquo;, namely past 30 minutes; when \u0026ldquo;start\u0026rdquo; is absent and \u0026ldquo;end\u0026rdquo; is present, this command calculates \u0026ldquo;start\u0026rdquo; (minus 30 units), e.g. \u0026ldquo;end = 2022-11-09T12:34:00Z\u0026rdquo;, so \u0026ldquo;start = end - 30 minutes = 2022-11-09T12:04:00Z\u0026rdquo;; when \u0026ldquo;start\u0026rdquo; is present and \u0026ldquo;end\u0026rdquo; is absent, this command calculates \u0026ldquo;end\u0026rdquo; (plus 30 units), e.g. \u0026ldquo;start = 2022-11-09T12:04:00Z\u0026rdquo;, so \u0026ldquo;end = start + 30 minutes = 2022-11-09T12:34:00Z\u0026rdquo;.  Examples To retrieve elements in a stream named sw between 2022-10-15T22:32:48Z and 2022-10-15T23:32:48Z could use the below command. These elements also choose a tag trace_id which lives in a family named searchable.\n$ bydbctl stream query -f - \u0026lt;\u0026lt;EOF metadata: group: \u0026#34;default\u0026#34; name: \u0026#34;sw\u0026#34; projection: tagFamilies: - name: \u0026#34;searchable\u0026#34; tags: [\u0026#34;trace_id\u0026#34;] timeRange: begin: 2022-10-15T22:32:48+08:00 end: 2022-10-15T23:32:48+08:00 EOF The below command could query data in the last 30 minutes using relative time duration :\n$ bydbctl stream query --start -30m -f - \u0026lt;\u0026lt;EOF metadata: group: \u0026#34;default\u0026#34; name: \u0026#34;sw\u0026#34; projection: tagFamilies: - name: \u0026#34;searchable\u0026#34; tags: [\u0026#34;trace_id\u0026#34;] EOF API Reference StreamService v1\n","excerpt":"Query Streams Query operation queries the data in a stream.\nbydbctl is the command line tool in …","ref":"/docs/skywalking-banyandb/latest/crud/stream/query/","title":"Query Streams"},{"body":"Query Streams Query operation queries the data in a stream.\nbydbctl is the command line tool in examples.\nThe input contains two parts:\n Request: a YAML-based text which is defined by the API Time Range: YAML and CLI\u0026rsquo;s flags both support it.  Time Range The query specification contains time_range field. The request should set absolute times to it. bydbctl also provides start and end flags to support passing absolute and relative times.\n\u0026ldquo;start\u0026rdquo; and \u0026ldquo;end\u0026rdquo; specify a time range during which the query is performed, they can be an absolute time like \u0026ldquo;2006-01-02T15:04:05Z07:00\u0026rdquo;, or relative time (to the current time) like \u0026ldquo;-30m\u0026rdquo;, or \u0026ldquo;30m\u0026rdquo;. They are both optional and their default values follow the rules below:\n when \u0026ldquo;start\u0026rdquo; and \u0026ldquo;end\u0026rdquo; are both absent, \u0026ldquo;start = now - 30 minutes\u0026rdquo; and \u0026ldquo;end = now\u0026rdquo;, namely past 30 minutes; when \u0026ldquo;start\u0026rdquo; is absent and \u0026ldquo;end\u0026rdquo; is present, this command calculates \u0026ldquo;start\u0026rdquo; (minus 30 units), e.g. \u0026ldquo;end = 2022-11-09T12:34:00Z\u0026rdquo;, so \u0026ldquo;start = end - 30 minutes = 2022-11-09T12:04:00Z\u0026rdquo;; when \u0026ldquo;start\u0026rdquo; is present and \u0026ldquo;end\u0026rdquo; is absent, this command calculates \u0026ldquo;end\u0026rdquo; (plus 30 units), e.g. \u0026ldquo;start = 2022-11-09T12:04:00Z\u0026rdquo;, so \u0026ldquo;end = start + 30 minutes = 2022-11-09T12:34:00Z\u0026rdquo;.  Examples To retrieve elements in a stream named sw between 2022-10-15T22:32:48Z and 2022-10-15T23:32:48Z could use the below command. These elements also choose a tag trace_id which lives in a family named searchable.\n$ bydbctl stream query -f - \u0026lt;\u0026lt;EOF metadata: group: \u0026#34;default\u0026#34; name: \u0026#34;sw\u0026#34; projection: tagFamilies: - name: \u0026#34;searchable\u0026#34; tags: [\u0026#34;trace_id\u0026#34;] timeRange: begin: 2022-10-15T22:32:48+08:00 end: 2022-10-15T23:32:48+08:00 EOF The below command could query data in the last 30 minutes using relative time duration :\n$ bydbctl stream query --start -30m -f - \u0026lt;\u0026lt;EOF metadata: group: \u0026#34;default\u0026#34; name: \u0026#34;sw\u0026#34; projection: tagFamilies: - name: \u0026#34;searchable\u0026#34; tags: [\u0026#34;trace_id\u0026#34;] EOF API Reference StreamService v1\n","excerpt":"Query Streams Query operation queries the data in a stream.\nbydbctl is the command line tool in …","ref":"/docs/skywalking-banyandb/next/crud/stream/query/","title":"Query Streams"},{"body":"Query Streams Query operation queries the data in a stream.\nbydbctl is the command line tool in examples.\nThe input contains two parts:\n Request: a YAML-based text which is defined by the API Time Range: YAML and CLI\u0026rsquo;s flags both support it.  Time Range The query specification contains time_range field. The request should set absolute times to it. bydbctl also provides start and end flags to support passing absolute and relative times.\n\u0026ldquo;start\u0026rdquo; and \u0026ldquo;end\u0026rdquo; specify a time range during which the query is performed, they can be an absolute time like \u0026ldquo;2006-01-02T15:04:05Z07:00\u0026rdquo;, or relative time (to the current time) like \u0026ldquo;-30m\u0026rdquo;, or \u0026ldquo;30m\u0026rdquo;. They are both optional and their default values follow the rules below:\n when \u0026ldquo;start\u0026rdquo; and \u0026ldquo;end\u0026rdquo; are both absent, \u0026ldquo;start = now - 30 minutes\u0026rdquo; and \u0026ldquo;end = now\u0026rdquo;, namely past 30 minutes; when \u0026ldquo;start\u0026rdquo; is absent and \u0026ldquo;end\u0026rdquo; is present, this command calculates \u0026ldquo;start\u0026rdquo; (minus 30 units), e.g. \u0026ldquo;end = 2022-11-09T12:34:00Z\u0026rdquo;, so \u0026ldquo;start = end - 30 minutes = 2022-11-09T12:04:00Z\u0026rdquo;; when \u0026ldquo;start\u0026rdquo; is present and \u0026ldquo;end\u0026rdquo; is absent, this command calculates \u0026ldquo;end\u0026rdquo; (plus 30 units), e.g. \u0026ldquo;start = 2022-11-09T12:04:00Z\u0026rdquo;, so \u0026ldquo;end = start + 30 minutes = 2022-11-09T12:34:00Z\u0026rdquo;.  Examples To retrieve elements in a stream named sw between 2022-10-15T22:32:48Z and 2022-10-15T23:32:48Z could use the below command. These elements also choose a tag trace_id which lives in a family named searchable.\n$ bydbctl stream query -f - \u0026lt;\u0026lt;EOF metadata: group: \u0026#34;default\u0026#34; name: \u0026#34;sw\u0026#34; projection: tagFamilies: - name: \u0026#34;searchable\u0026#34; tags: [\u0026#34;trace_id\u0026#34;] timeRange: begin: 2022-10-15T22:32:48+08:00 end: 2022-10-15T23:32:48+08:00 EOF The below command could query data in the last 30 minutes using relative time duration :\n$ bydbctl stream query --start -30m -f - \u0026lt;\u0026lt;EOF metadata: group: \u0026#34;default\u0026#34; name: \u0026#34;sw\u0026#34; projection: tagFamilies: - name: \u0026#34;searchable\u0026#34; tags: [\u0026#34;trace_id\u0026#34;] EOF API Reference StreamService v1\n","excerpt":"Query Streams Query operation queries the data in a stream.\nbydbctl is the command line tool in …","ref":"/docs/skywalking-banyandb/v0.5.0/crud/stream/query/","title":"Query Streams"},{"body":"Queue/memory-queue Description This is a memory queue to buffer the input event.\nDefaultConfig # The maximum buffer event size.event_buffer_size:5000# The partition count of queue.partition:1Configuration    Name Type Description     event_buffer_size int configThe maximum buffer event size.   partition int The total partition count.    ","excerpt":"Queue/memory-queue Description This is a memory queue to buffer the input event.\nDefaultConfig # The …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/queue_memory-queue/","title":"Queue/memory-queue"},{"body":"Queue/memory-queue Description This is a memory queue to buffer the input event.\nDefaultConfig # The maximum buffer event size.event_buffer_size:5000# The partition count of queue.partition:1Configuration    Name Type Description     event_buffer_size int configThe maximum buffer event size.   partition int The total partition count.    ","excerpt":"Queue/memory-queue Description This is a memory queue to buffer the input event.\nDefaultConfig # The …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/queue_memory-queue/","title":"Queue/memory-queue"},{"body":"Queue/memory-queue Description This is a memory queue to buffer the input event.\nDefaultConfig # The maximum buffer event size.event_buffer_size:5000# The partition count of queue.partition:1Configuration    Name Type Description     event_buffer_size int configThe maximum buffer event size.   partition int The total partition count.    ","excerpt":"Queue/memory-queue Description This is a memory queue to buffer the input event.\nDefaultConfig # The …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/queue_memory-queue/","title":"Queue/memory-queue"},{"body":"Queue/mmap-queue Description This is a memory mapped queue to provide the persistent storage for the input event. Please note that this plugin does not support Windows platform.\nDefaultConfig # The size of each segment. Default value is 256K. The unit is Byte.segment_size:262114# The max num of segments in memory. Default value is 10.max_in_mem_segments:10# The capacity of Queue = segment_size * queue_capacity_segments.queue_capacity_segments:2000# The period flush time. The unit is ms. Default value is 1 second.flush_period:1000# The max number in one flush time. Default value is 10000.flush_ceiling_num:10000# The max size of the input event. Default value is 20k.max_event_size:20480# The partition count of queue.partition:1Configuration    Name Type Description     segment_size int The size of each segment. The unit is byte.   max_in_mem_segments int32 The max num of segments in memory.   queue_capacity_segments int The capacity of Queue = segment_size * queue_capacity_segments.   flush_period int The period flush time. The unit is ms.   flush_ceiling_num int The max number in one flush time.   max_event_size int The max size of the input event.   partition int The total partition count.    ","excerpt":"Queue/mmap-queue Description This is a memory mapped queue to provide the persistent storage for the …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/queue_mmap-queue/","title":"Queue/mmap-queue"},{"body":"Queue/mmap-queue Description This is a memory mapped queue to provide the persistent storage for the input event. Please note that this plugin does not support Windows platform.\nDefaultConfig # The size of each segment. Default value is 256K. The unit is Byte.segment_size:262114# The max num of segments in memory. Default value is 10.max_in_mem_segments:10# The capacity of Queue = segment_size * queue_capacity_segments.queue_capacity_segments:2000# The period flush time. The unit is ms. Default value is 1 second.flush_period:1000# The max number in one flush time. Default value is 10000.flush_ceiling_num:10000# The max size of the input event. Default value is 20k.max_event_size:20480# The partition count of queue.partition:1Configuration    Name Type Description     segment_size int The size of each segment. The unit is byte.   max_in_mem_segments int32 The max num of segments in memory.   queue_capacity_segments int The capacity of Queue = segment_size * queue_capacity_segments.   flush_period int The period flush time. The unit is ms.   flush_ceiling_num int The max number in one flush time.   max_event_size int The max size of the input event.   partition int The total partition count.    ","excerpt":"Queue/mmap-queue Description This is a memory mapped queue to provide the persistent storage for the …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/queue_mmap-queue/","title":"Queue/mmap-queue"},{"body":"Queue/mmap-queue Description This is a memory mapped queue to provide the persistent storage for the input event. Please note that this plugin does not support Windows platform.\nDefaultConfig # The size of each segment. Default value is 256K. The unit is Byte.segment_size:262114# The max num of segments in memory. Default value is 10.max_in_mem_segments:10# The capacity of Queue = segment_size * queue_capacity_segments.queue_capacity_segments:2000# The period flush time. The unit is ms. Default value is 1 second.flush_period:1000# The max number in one flush time. Default value is 10000.flush_ceiling_num:10000# The max size of the input event. Default value is 20k.max_event_size:20480# The partition count of queue.partition:1Configuration    Name Type Description     segment_size int The size of each segment. The unit is byte.   max_in_mem_segments int32 The max num of segments in memory.   queue_capacity_segments int The capacity of Queue = segment_size * queue_capacity_segments.   flush_period int The period flush time. The unit is ms.   flush_ceiling_num int The max number in one flush time.   max_event_size int The max size of the input event.   partition int The total partition count.    ","excerpt":"Queue/mmap-queue Description This is a memory mapped queue to provide the persistent storage for the …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/queue_mmap-queue/","title":"Queue/mmap-queue"},{"body":"Queue/none-queue Description This is an empty queue for direct connection protocols, such as SkyWalking native configuration discovery service protocol.\nDefaultConfig # The partition count of queue.partition:1Configuration    Name Type Description     partition int The total partition count.    ","excerpt":"Queue/none-queue Description This is an empty queue for direct connection protocols, such as …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/queue_none-queue/","title":"Queue/none-queue"},{"body":"Queue/none-queue Description This is an empty queue for direct connection protocols, such as SkyWalking native configuration discovery service protocol.\nDefaultConfig # The partition count of queue.partition:1Configuration    Name Type Description     partition int The total partition count.    ","excerpt":"Queue/none-queue Description This is an empty queue for direct connection protocols, such as …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/queue_none-queue/","title":"Queue/none-queue"},{"body":"Queue/none-queue Description This is an empty queue for direct connection protocols, such as SkyWalking native configuration discovery service protocol.\nDefaultConfig # The partition count of queue.partition:1Configuration    Name Type Description     partition int The total partition count.    ","excerpt":"Queue/none-queue Description This is an empty queue for direct connection protocols, such as …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/queue_none-queue/","title":"Queue/none-queue"},{"body":"Quick Start for Contributors Make and Makefile We rely on Makefile to automate jobs, including setting up environments, testing and releasing.\nFirst you need to have the make command available:\n# ubuntu/wsl sudo apt-get update sudo apt-get -y install make or\n# windows powershell Set-ExecutionPolicy RemoteSigned -Scope CurrentUser # Optional: Needed to run a remote script the first time irm get.scoop.sh | iex scoop install make Poetry We have migrated from basic pip to Poetry to manage dependencies and package our project.\nOnce you have make ready, run make env, this will automatically install the right Poetry release, and create (plus manage) a .venv virtual environment for us based on the currently activated Python 3 version. Enjoy coding!\nNote: Make sure you have python3 aliased to python available on Windows computers instead of pointing to the Microsoft app store.\nSwitching between Multiple Python Versions Do not develop/test on Python \u0026lt; 3.7, since Poetry and some other functionalities we implement rely on Python 3.7+\nIf you would like to test on multiple Python versions, run the following to switch and recreate virtual environment:\nWithout Python Version Tools poetry env use python3.x poetry install With Python Version Tools pyenv shell 3.9.11 poetry env use $(pyenv which python) poetry install Or try: virtualenvs.prefer-active-python, which is an experimental poetry feature that can be set to true so that it will automatically follow environment.\nNext Refer to the Plugin Development Guide to learn how to build a new plugin for a library.\n","excerpt":"Quick Start for Contributors Make and Makefile We rely on Makefile to automate jobs, including …","ref":"/docs/skywalking-python/latest/en/contribution/developer/","title":"Quick Start for Contributors"},{"body":"Quick Start for Contributors Make and Makefile We rely on Makefile to automate jobs, including setting up environments, testing and releasing.\nFirst you need to have the make command available:\n# ubuntu/wsl sudo apt-get update sudo apt-get -y install make or\n# windows powershell Set-ExecutionPolicy RemoteSigned -Scope CurrentUser # Optional: Needed to run a remote script the first time irm get.scoop.sh | iex scoop install make Poetry We have migrated from basic pip to Poetry to manage dependencies and package our project.\nOnce you have make ready, run make env, this will automatically install the right Poetry release, and create (plus manage) a .venv virtual environment for us based on the currently activated Python 3 version. Enjoy coding!\nNote: Make sure you have python3 aliased to python available on Windows computers instead of pointing to the Microsoft app store.\nSwitching between Multiple Python Versions Do not develop/test on Python \u0026lt; 3.7, since Poetry and some other functionalities we implement rely on Python 3.7+\nIf you would like to test on multiple Python versions, run the following to switch and recreate virtual environment:\nWithout Python Version Tools poetry env use python3.x poetry install With Python Version Tools pyenv shell 3.9.11 poetry env use $(pyenv which python) poetry install Or try: virtualenvs.prefer-active-python, which is an experimental poetry feature that can be set to true so that it will automatically follow environment.\nNext Refer to the Plugin Development Guide to learn how to build a new plugin for a library.\n","excerpt":"Quick Start for Contributors Make and Makefile We rely on Makefile to automate jobs, including …","ref":"/docs/skywalking-python/next/en/contribution/developer/","title":"Quick Start for Contributors"},{"body":"Quick Start for Contributors Make and Makefile We rely on Makefile to automate jobs, including setting up environments, testing and releasing.\nFirst you need to have the make command available:\n# ubuntu/wsl sudo apt-get update sudo apt-get -y install make or\n# windows powershell Set-ExecutionPolicy RemoteSigned -Scope CurrentUser # Optional: Needed to run a remote script the first time irm get.scoop.sh | iex scoop install make Poetry We have migrated from basic pip to Poetry to manage dependencies and package our project.\nOnce you have make ready, run make env, this will automatically install the right Poetry release, and create (plus manage) a .venv virtual environment for us based on the currently activated Python 3 version. Enjoy coding!\nNote: Make sure you have python3 aliased to python available on Windows computers instead of pointing to the Microsoft app store.\nSwitching between Multiple Python Versions Do not develop/test on Python \u0026lt; 3.7, since Poetry and some other functionalities we implement rely on Python 3.7+\nIf you would like to test on multiple Python versions, run the following to switch and recreate virtual environment:\nWithout Python Version Tools poetry env use python3.x poetry install With Python Version Tools pyenv shell 3.9.11 poetry env use $(pyenv which python) poetry install Or try: virtualenvs.prefer-active-python, which is an experimental poetry feature that can be set to true so that it will automatically follow environment.\nNext Refer to the Plugin Development Guide to learn how to build a new plugin for a library.\n","excerpt":"Quick Start for Contributors Make and Makefile We rely on Makefile to automate jobs, including …","ref":"/docs/skywalking-python/v1.0.1/en/contribution/developer/","title":"Quick Start for Contributors"},{"body":"RabbitMQ monitoring SkyWalking leverages rabbitmq_prometheus plugin for collecting metrics data from RabbitMQ. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  The rabbitmq_prometheus plugin collect metrics data from RabbitMQ. Note: The RabbitMQ version is required to be 3.8.0+. The rabbitmq_prometheus plugin is built-in since RabbitMQ v3.8.0. OpenTelemetry Collector fetches metrics from rabbitmq_prometheus plugin via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup rabbitmq_prometheus. Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  RabbitMQ Monitoring RabbitMQ monitoring provides multidimensional metrics monitoring of RabbitMQ cluster as Layer: RABBITMQ Service in the OAP. In each cluster, the nodes are represented as Instance.\nRabbitMQ Cluster Supported Metrics    Monitoring Panel Metric Name Description Data Source     Memory Available Before Publishers Blocked (MB) meter_rabbitmq_memory_available_before_publisher_blocked If the value is zero or less, the memory alarm will be triggered and all publishing connections across all cluster nodes will be blocked. rabbitmq_prometheus plugin   Disk Space Available Before Publishers Blocked (GB) meter_rabbitmq_disk_space_available_before_publisher_blocked This metric is reported for the partition where the RabbitMQ data directory is stored. rabbitmq_prometheus plugin   File Descriptors Available meter_rabbitmq_file_descriptors_available When this value reaches zero, new connections will not be accepted and disk write operations may fail. rabbitmq_prometheus plugin   TCP Sockets Available meter_rabbitmq_tcp_socket_available When this value reaches zero, new connections will not be accepted. rabbitmq_prometheus plugin   Messages Ready To Be Delivered To Consumers meter_rabbitmq_message_ready_delivered_consumers Total number of ready messages ready to be delivered to consumers. rabbitmq_prometheus plugin   Messages Pending Consumer Acknowledgement meter_rabbitmq_message_unacknowledged_delivered_consumers The total number of messages that are either in-flight to consumers, currently being processed by consumers or simply waiting for the consumer acknowledgements to be processed by the queue. Until the queue processes the message acknowledgement, the message will remain unacknowledged. rabbitmq_prometheus plugin   Messages Published meter_rabbitmq_messages_published The incoming message rate before any routing rules are applied. rabbitmq_prometheus plugin   Messages Confirmed To Publishers meter_rabbitmq_messages_confirmed The rate of messages confirmed by the broker to publishers. Publishers must opt-in to receive message confirmations. rabbitmq_prometheus plugin   Messages Unconfirmed To Publishers meter_rabbitmq_messages_unconfirmed The rate of messages received from publishers that have publisher confirms enabled and the broker has not confirmed yet. rabbitmq_prometheus plugin   Messages Routed To Queues meter_rabbitmq_messages_routed The rate of messages received from publishers and successfully routed to the master queue replicas. rabbitmq_prometheus plugin   Unroutable Messages Returned To Publishers meter_rabbitmq_messages_unroutable_returned The rate of messages that cannot be routed and are returned back to publishers. rabbitmq_prometheus plugin   Unroutable Messages Dropped meter_rabbitmq_messages_unroutable_dropped The rate of messages that cannot be routed and are dropped. rabbitmq_prometheus plugin   Queues Total meter_rabbitmq_queues Total number of queue masters per node. rabbitmq_prometheus plugin   Queues Declared meter_rabbitmq_queues_declared_total The rate of queue declarations performed by clients. rabbitmq_prometheus plugin   Queues Created meter_rabbitmq_queues_created_total The rate of new queues created (as opposed to redeclarations). rabbitmq_prometheus plugin   Queues Deleted meter_rabbitmq_queues_deleted_total The rate of queues deleted. rabbitmq_prometheus plugin   Channels Total meter_rabbitmq_channels Total number of channels on all currently opened connections. rabbitmq_prometheus plugin   Channels Opened meter_rabbitmq_channels_opened_total The rate of new channels opened by applications across all connections. Channels are expected to be long-lived. rabbitmq_prometheus plugin   Channels Closed meter_rabbitmq_channels_closed_total The rate of channels closed by applications across all connections. Channels are expected to be long-lived. rabbitmq_prometheus plugin   Connections Total meter_rabbitmq_connections Total number of client connections. rabbitmq_prometheus plugin   Connections Opened meter_rabbitmq_connections_opened_total The rate of new connections opened by clients. Connections are expected to be long-lived. rabbitmq_prometheus plugin   Connections Closed meter_rabbitmq_connections_closed_total The rate of connections closed. Connections are expected to be long-lived. rabbitmq_prometheus plugin    RabbitMQ Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Ready Messages  meter_rabbitmq_node_queue_messages_ready Total number of ready messages ready to be delivered to consumers. rabbitmq_prometheus plugin   Unacknowledged Messages  meter_rabbitmq_node_unacknowledged_messages Messages delivered to consumers but not yet acknowledged rabbitmq_prometheus plugin   Incoming Messages  meter_rabbitmq_node_incoming_messages The incoming message rate before any routing rules are applied. rabbitmq_prometheus plugin   Outgoing Messages  meter_rabbitmq_node_outgoing_messages_total The outgoing message rate before any routing rules are applied. rabbitmq_prometheus plugin   Publishers  meter_rabbitmq_node_publisher_total Publishers rabbitmq_prometheus plugin   Consumers  meter_rabbitmq_node_consumer_total Consumers currently connect rabbitmq_prometheus plugin   Collections  meter_rabbitmq_node_connections_total Connections currently open rabbitmq_prometheus plugin   Channels  meter_rabbitmq_node_channel_total Channels currently open rabbitmq_prometheus plugin   Queues  meter_rabbitmq_node_queue_total Queues available rabbitmq_prometheus plugin   Allocated Used % meter_rabbitmq_node_allocated_used_percent Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Allocated Unused % meter_rabbitmq_node_allocated_unused_percent Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Allocated Used MB meter_rabbitmq_node_allocated_used_bytes Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Allocated Unused MB meter_rabbitmq_node_allocated_unused_bytes Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Allocated Total MB meter_rabbitmq_node_allocated_total_bytes Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Resident Set Size MB meter_rabbitmq_node_process_resident_memory_bytes Erlang VM Resident Set Size (RSS) As reported by the OS rabbitmq_prometheus plugin   Allocators MB meter_rabbitmq_node_allocated_unused_bytes meter_rabbitmq_node_allocated_total_bytes meter_rabbitmq_node_process_resident_memory_bytes  rabbitmq_prometheus plugin   Allocated By Type MB meter_rabbitmq_node_allocated_by_type Allocated by allocator type rabbitmq_prometheus plugin   Multiblock Used MB meter_rabbitmq_node_allocated_multiblock_used Multi block used rabbitmq_prometheus plugin   Multiblock Unused MB meter_rabbitmq_node_allocated_multiblock_unused Multi block used rabbitmq_prometheus plugin   Multiblock Pool Used MB meter_rabbitmq_node_allocated_multiblock_pool_used Multi block pool used rabbitmq_prometheus plugin   Multiblock Pool Unused MB meter_rabbitmq_node_allocated_multiblock_pool_unused Multi block pool unused rabbitmq_prometheus plugin   Singleblock Used MB meter_rabbitmq_node_allocated_singleblock_used Single block used rabbitmq_prometheus plugin   Singleblock Unused MB meter_rabbitmq_node_allocated_singleblock_unused Single block unused rabbitmq_prometheus plugin    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/rabbitmq/rabbitmq-cluster.yaml, /config/otel-rules/rabbitmq/rabbitmq-node.yaml. The RabbitMQ dashboard panel configurations are found in /config/ui-initialized-templates/rabbitmq.\n","excerpt":"RabbitMQ monitoring SkyWalking leverages rabbitmq_prometheus plugin for collecting metrics data from …","ref":"/docs/main/latest/en/setup/backend/backend-rabbitmq-monitoring/","title":"RabbitMQ monitoring"},{"body":"RabbitMQ monitoring SkyWalking leverages rabbitmq_prometheus plugin for collecting metrics data from RabbitMQ. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  The rabbitmq_prometheus plugin collect metrics data from RabbitMQ. Note: The RabbitMQ version is required to be 3.8.0+. The rabbitmq_prometheus plugin is built-in since RabbitMQ v3.8.0. OpenTelemetry Collector fetches metrics from rabbitmq_prometheus plugin via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup rabbitmq_prometheus. Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  RabbitMQ Monitoring RabbitMQ monitoring provides multidimensional metrics monitoring of RabbitMQ cluster as Layer: RABBITMQ Service in the OAP. In each cluster, the nodes are represented as Instance.\nRabbitMQ Cluster Supported Metrics    Monitoring Panel Metric Name Description Data Source     Memory Available Before Publishers Blocked (MB) meter_rabbitmq_memory_available_before_publisher_blocked If the value is zero or less, the memory alarm will be triggered and all publishing connections across all cluster nodes will be blocked. rabbitmq_prometheus plugin   Disk Space Available Before Publishers Blocked (GB) meter_rabbitmq_disk_space_available_before_publisher_blocked This metric is reported for the partition where the RabbitMQ data directory is stored. rabbitmq_prometheus plugin   File Descriptors Available meter_rabbitmq_file_descriptors_available When this value reaches zero, new connections will not be accepted and disk write operations may fail. rabbitmq_prometheus plugin   TCP Sockets Available meter_rabbitmq_tcp_socket_available When this value reaches zero, new connections will not be accepted. rabbitmq_prometheus plugin   Messages Ready To Be Delivered To Consumers meter_rabbitmq_message_ready_delivered_consumers Total number of ready messages ready to be delivered to consumers. rabbitmq_prometheus plugin   Messages Pending Consumer Acknowledgement meter_rabbitmq_message_unacknowledged_delivered_consumers The total number of messages that are either in-flight to consumers, currently being processed by consumers or simply waiting for the consumer acknowledgements to be processed by the queue. Until the queue processes the message acknowledgement, the message will remain unacknowledged. rabbitmq_prometheus plugin   Messages Published meter_rabbitmq_messages_published The incoming message rate before any routing rules are applied. rabbitmq_prometheus plugin   Messages Confirmed To Publishers meter_rabbitmq_messages_confirmed The rate of messages confirmed by the broker to publishers. Publishers must opt-in to receive message confirmations. rabbitmq_prometheus plugin   Messages Unconfirmed To Publishers meter_rabbitmq_messages_unconfirmed The rate of messages received from publishers that have publisher confirms enabled and the broker has not confirmed yet. rabbitmq_prometheus plugin   Messages Routed To Queues meter_rabbitmq_messages_routed The rate of messages received from publishers and successfully routed to the master queue replicas. rabbitmq_prometheus plugin   Unroutable Messages Returned To Publishers meter_rabbitmq_messages_unroutable_returned The rate of messages that cannot be routed and are returned back to publishers. rabbitmq_prometheus plugin   Unroutable Messages Dropped meter_rabbitmq_messages_unroutable_dropped The rate of messages that cannot be routed and are dropped. rabbitmq_prometheus plugin   Queues Total meter_rabbitmq_queues Total number of queue masters per node. rabbitmq_prometheus plugin   Queues Declared meter_rabbitmq_queues_declared_total The rate of queue declarations performed by clients. rabbitmq_prometheus plugin   Queues Created meter_rabbitmq_queues_created_total The rate of new queues created (as opposed to redeclarations). rabbitmq_prometheus plugin   Queues Deleted meter_rabbitmq_queues_deleted_total The rate of queues deleted. rabbitmq_prometheus plugin   Channels Total meter_rabbitmq_channels Total number of channels on all currently opened connections. rabbitmq_prometheus plugin   Channels Opened meter_rabbitmq_channels_opened_total The rate of new channels opened by applications across all connections. Channels are expected to be long-lived. rabbitmq_prometheus plugin   Channels Closed meter_rabbitmq_channels_closed_total The rate of channels closed by applications across all connections. Channels are expected to be long-lived. rabbitmq_prometheus plugin   Connections Total meter_rabbitmq_connections Total number of client connections. rabbitmq_prometheus plugin   Connections Opened meter_rabbitmq_connections_opened_total The rate of new connections opened by clients. Connections are expected to be long-lived. rabbitmq_prometheus plugin   Connections Closed meter_rabbitmq_connections_closed_total The rate of connections closed. Connections are expected to be long-lived. rabbitmq_prometheus plugin    RabbitMQ Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Ready Messages  meter_rabbitmq_node_queue_messages_ready Total number of ready messages ready to be delivered to consumers. rabbitmq_prometheus plugin   Unacknowledged Messages  meter_rabbitmq_node_unacknowledged_messages Messages delivered to consumers but not yet acknowledged rabbitmq_prometheus plugin   Incoming Messages  meter_rabbitmq_node_incoming_messages The incoming message rate before any routing rules are applied. rabbitmq_prometheus plugin   Outgoing Messages  meter_rabbitmq_node_outgoing_messages_total The outgoing message rate before any routing rules are applied. rabbitmq_prometheus plugin   Publishers  meter_rabbitmq_node_publisher_total Publishers rabbitmq_prometheus plugin   Consumers  meter_rabbitmq_node_consumer_total Consumers currently connect rabbitmq_prometheus plugin   Collections  meter_rabbitmq_node_connections_total Connections currently open rabbitmq_prometheus plugin   Channels  meter_rabbitmq_node_channel_total Channels currently open rabbitmq_prometheus plugin   Queues  meter_rabbitmq_node_queue_total Queues available rabbitmq_prometheus plugin   Allocated Used % meter_rabbitmq_node_allocated_used_percent Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Allocated Unused % meter_rabbitmq_node_allocated_unused_percent Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Allocated Used MB meter_rabbitmq_node_allocated_used_bytes Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Allocated Unused MB meter_rabbitmq_node_allocated_unused_bytes Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Allocated Total MB meter_rabbitmq_node_allocated_total_bytes Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Resident Set Size MB meter_rabbitmq_node_process_resident_memory_bytes Erlang VM Resident Set Size (RSS) As reported by the OS rabbitmq_prometheus plugin   Allocators MB meter_rabbitmq_node_allocated_unused_bytes meter_rabbitmq_node_allocated_total_bytes meter_rabbitmq_node_process_resident_memory_bytes  rabbitmq_prometheus plugin   Allocated By Type MB meter_rabbitmq_node_allocated_by_type Allocated by allocator type rabbitmq_prometheus plugin   Multiblock Used MB meter_rabbitmq_node_allocated_multiblock_used Multi block used rabbitmq_prometheus plugin   Multiblock Unused MB meter_rabbitmq_node_allocated_multiblock_unused Multi block used rabbitmq_prometheus plugin   Multiblock Pool Used MB meter_rabbitmq_node_allocated_multiblock_pool_used Multi block pool used rabbitmq_prometheus plugin   Multiblock Pool Unused MB meter_rabbitmq_node_allocated_multiblock_pool_unused Multi block pool unused rabbitmq_prometheus plugin   Singleblock Used MB meter_rabbitmq_node_allocated_singleblock_used Single block used rabbitmq_prometheus plugin   Singleblock Unused MB meter_rabbitmq_node_allocated_singleblock_unused Single block unused rabbitmq_prometheus plugin    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/rabbitmq/rabbitmq-cluster.yaml, /config/otel-rules/rabbitmq/rabbitmq-node.yaml. The RabbitMQ dashboard panel configurations are found in /config/ui-initialized-templates/rabbitmq.\n","excerpt":"RabbitMQ monitoring SkyWalking leverages rabbitmq_prometheus plugin for collecting metrics data from …","ref":"/docs/main/next/en/setup/backend/backend-rabbitmq-monitoring/","title":"RabbitMQ monitoring"},{"body":"RabbitMQ monitoring SkyWalking leverages rabbitmq_prometheus plugin for collecting metrics data from RabbitMQ. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  The rabbitmq_prometheus plugin collect metrics data from RabbitMQ. Note: The RabbitMQ version is required to be 3.8.0+. The rabbitmq_prometheus plugin is built-in since RabbitMQ v3.8.0. OpenTelemetry Collector fetches metrics from rabbitmq_prometheus plugin via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup rabbitmq_prometheus. Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  RabbitMQ Monitoring RabbitMQ monitoring provides multidimensional metrics monitoring of RabbitMQ cluster as Layer: RABBITMQ Service in the OAP. In each cluster, the nodes are represented as Instance.\nRabbitMQ Cluster Supported Metrics    Monitoring Panel Metric Name Description Data Source     Memory Available Before Publishers Blocked (MB) meter_rabbitmq_memory_available_before_publisher_blocked If the value is zero or less, the memory alarm will be triggered and all publishing connections across all cluster nodes will be blocked. rabbitmq_prometheus plugin   Disk Space Available Before Publishers Blocked (GB) meter_rabbitmq_disk_space_available_before_publisher_blocked This metric is reported for the partition where the RabbitMQ data directory is stored. rabbitmq_prometheus plugin   File Descriptors Available meter_rabbitmq_file_descriptors_available When this value reaches zero, new connections will not be accepted and disk write operations may fail. rabbitmq_prometheus plugin   TCP Sockets Available meter_rabbitmq_tcp_socket_available When this value reaches zero, new connections will not be accepted. rabbitmq_prometheus plugin   Messages Ready To Be Delivered To Consumers meter_rabbitmq_message_ready_delivered_consumers Total number of ready messages ready to be delivered to consumers. rabbitmq_prometheus plugin   Messages Pending Consumer Acknowledgement meter_rabbitmq_message_unacknowledged_delivered_consumers The total number of messages that are either in-flight to consumers, currently being processed by consumers or simply waiting for the consumer acknowledgements to be processed by the queue. Until the queue processes the message acknowledgement, the message will remain unacknowledged. rabbitmq_prometheus plugin   Messages Published meter_rabbitmq_messages_published The incoming message rate before any routing rules are applied. rabbitmq_prometheus plugin   Messages Confirmed To Publishers meter_rabbitmq_messages_confirmed The rate of messages confirmed by the broker to publishers. Publishers must opt-in to receive message confirmations. rabbitmq_prometheus plugin   Messages Unconfirmed To Publishers meter_rabbitmq_messages_unconfirmed The rate of messages received from publishers that have publisher confirms enabled and the broker has not confirmed yet. rabbitmq_prometheus plugin   Messages Routed To Queues meter_rabbitmq_messages_routed The rate of messages received from publishers and successfully routed to the master queue replicas. rabbitmq_prometheus plugin   Unroutable Messages Returned To Publishers meter_rabbitmq_messages_unroutable_returned The rate of messages that cannot be routed and are returned back to publishers. rabbitmq_prometheus plugin   Unroutable Messages Dropped meter_rabbitmq_messages_unroutable_dropped The rate of messages that cannot be routed and are dropped. rabbitmq_prometheus plugin   Queues Total meter_rabbitmq_queues Total number of queue masters per node. rabbitmq_prometheus plugin   Queues Declared meter_rabbitmq_queues_declared_total The rate of queue declarations performed by clients. rabbitmq_prometheus plugin   Queues Created meter_rabbitmq_queues_created_total The rate of new queues created (as opposed to redeclarations). rabbitmq_prometheus plugin   Queues Deleted meter_rabbitmq_queues_deleted_total The rate of queues deleted. rabbitmq_prometheus plugin   Channels Total meter_rabbitmq_channels Total number of channels on all currently opened connections. rabbitmq_prometheus plugin   Channels Opened meter_rabbitmq_channels_opened_total The rate of new channels opened by applications across all connections. Channels are expected to be long-lived. rabbitmq_prometheus plugin   Channels Closed meter_rabbitmq_channels_closed_total The rate of channels closed by applications across all connections. Channels are expected to be long-lived. rabbitmq_prometheus plugin   Connections Total meter_rabbitmq_connections Total number of client connections. rabbitmq_prometheus plugin   Connections Opened meter_rabbitmq_connections_opened_total The rate of new connections opened by clients. Connections are expected to be long-lived. rabbitmq_prometheus plugin   Connections Closed meter_rabbitmq_connections_closed_total The rate of connections closed. Connections are expected to be long-lived. rabbitmq_prometheus plugin    RabbitMQ Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Ready Messages  meter_rabbitmq_node_queue_messages_ready Total number of ready messages ready to be delivered to consumers. rabbitmq_prometheus plugin   Unacknowledged Messages  meter_rabbitmq_node_unacknowledged_messages Messages delivered to consumers but not yet acknowledged rabbitmq_prometheus plugin   Incoming Messages  meter_rabbitmq_node_incoming_messages The incoming message rate before any routing rules are applied. rabbitmq_prometheus plugin   Outgoing Messages  meter_rabbitmq_node_outgoing_messages_total The outgoing message rate before any routing rules are applied. rabbitmq_prometheus plugin   Publishers  meter_rabbitmq_node_publisher_total Publishers rabbitmq_prometheus plugin   Consumers  meter_rabbitmq_node_consumer_total Consumers currently connect rabbitmq_prometheus plugin   Collections  meter_rabbitmq_node_connections_total Connections currently open rabbitmq_prometheus plugin   Channels  meter_rabbitmq_node_channel_total Channels currently open rabbitmq_prometheus plugin   Queues  meter_rabbitmq_node_queue_total Queues available rabbitmq_prometheus plugin   Allocated Used % meter_rabbitmq_node_allocated_used_percent Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Allocated Unused % meter_rabbitmq_node_allocated_unused_percent Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Allocated Used MB meter_rabbitmq_node_allocated_used_bytes Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Allocated Unused MB meter_rabbitmq_node_allocated_unused_bytes Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Allocated Total MB meter_rabbitmq_node_allocated_total_bytes Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Resident Set Size MB meter_rabbitmq_node_process_resident_memory_bytes Erlang VM Resident Set Size (RSS) As reported by the OS rabbitmq_prometheus plugin   Allocators MB meter_rabbitmq_node_allocated_unused_bytes meter_rabbitmq_node_allocated_total_bytes meter_rabbitmq_node_process_resident_memory_bytes  rabbitmq_prometheus plugin   Allocated By Type MB meter_rabbitmq_node_allocated_by_type Allocated by allocator type rabbitmq_prometheus plugin   Multiblock Used MB meter_rabbitmq_node_allocated_multiblock_used Multi block used rabbitmq_prometheus plugin   Multiblock Unused MB meter_rabbitmq_node_allocated_multiblock_unused Multi block used rabbitmq_prometheus plugin   Multiblock Pool Used MB meter_rabbitmq_node_allocated_multiblock_pool_used Multi block pool used rabbitmq_prometheus plugin   Multiblock Pool Unused MB meter_rabbitmq_node_allocated_multiblock_pool_unused Multi block pool unused rabbitmq_prometheus plugin   Singleblock Used MB meter_rabbitmq_node_allocated_singleblock_used Single block used rabbitmq_prometheus plugin   Singleblock Unused MB meter_rabbitmq_node_allocated_singleblock_unused Single block unused rabbitmq_prometheus plugin    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/rabbitmq/rabbitmq-cluster.yaml, /config/otel-rules/rabbitmq/rabbitmq-node.yaml. The RabbitMQ dashboard panel configurations are found in /config/ui-initialized-templates/rabbitmq.\n","excerpt":"RabbitMQ monitoring SkyWalking leverages rabbitmq_prometheus plugin for collecting metrics data from …","ref":"/docs/main/v9.5.0/en/setup/backend/backend-rabbitmq-monitoring/","title":"RabbitMQ monitoring"},{"body":"RabbitMQ monitoring SkyWalking leverages rabbitmq_prometheus plugin for collecting metrics data from RabbitMQ. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  The rabbitmq_prometheus plugin collect metrics data from RabbitMQ. Note: The RabbitMQ version is required to be 3.8.0+. The rabbitmq_prometheus plugin is built-in since RabbitMQ v3.8.0. OpenTelemetry Collector fetches metrics from rabbitmq_prometheus plugin via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup rabbitmq_prometheus. Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  RabbitMQ Monitoring RabbitMQ monitoring provides multidimensional metrics monitoring of RabbitMQ cluster as Layer: RABBITMQ Service in the OAP. In each cluster, the nodes are represented as Instance.\nRabbitMQ Cluster Supported Metrics    Monitoring Panel Metric Name Description Data Source     Memory Available Before Publishers Blocked (MB) meter_rabbitmq_memory_available_before_publisher_blocked If the value is zero or less, the memory alarm will be triggered and all publishing connections across all cluster nodes will be blocked. rabbitmq_prometheus plugin   Disk Space Available Before Publishers Blocked (GB) meter_rabbitmq_disk_space_available_before_publisher_blocked This metric is reported for the partition where the RabbitMQ data directory is stored. rabbitmq_prometheus plugin   File Descriptors Available meter_rabbitmq_file_descriptors_available When this value reaches zero, new connections will not be accepted and disk write operations may fail. rabbitmq_prometheus plugin   TCP Sockets Available meter_rabbitmq_tcp_socket_available When this value reaches zero, new connections will not be accepted. rabbitmq_prometheus plugin   Messages Ready To Be Delivered To Consumers meter_rabbitmq_message_ready_delivered_consumers Total number of ready messages ready to be delivered to consumers. rabbitmq_prometheus plugin   Messages Pending Consumer Acknowledgement meter_rabbitmq_message_unacknowledged_delivered_consumers The total number of messages that are either in-flight to consumers, currently being processed by consumers or simply waiting for the consumer acknowledgements to be processed by the queue. Until the queue processes the message acknowledgement, the message will remain unacknowledged. rabbitmq_prometheus plugin   Messages Published meter_rabbitmq_messages_published The incoming message rate before any routing rules are applied. rabbitmq_prometheus plugin   Messages Confirmed To Publishers meter_rabbitmq_messages_confirmed The rate of messages confirmed by the broker to publishers. Publishers must opt-in to receive message confirmations. rabbitmq_prometheus plugin   Messages Unconfirmed To Publishers meter_rabbitmq_messages_unconfirmed The rate of messages received from publishers that have publisher confirms enabled and the broker has not confirmed yet. rabbitmq_prometheus plugin   Messages Routed To Queues meter_rabbitmq_messages_routed The rate of messages received from publishers and successfully routed to the master queue replicas. rabbitmq_prometheus plugin   Unroutable Messages Returned To Publishers meter_rabbitmq_messages_unroutable_returned The rate of messages that cannot be routed and are returned back to publishers. rabbitmq_prometheus plugin   Unroutable Messages Dropped meter_rabbitmq_messages_unroutable_dropped The rate of messages that cannot be routed and are dropped. rabbitmq_prometheus plugin   Queues Total meter_rabbitmq_queues Total number of queue masters per node. rabbitmq_prometheus plugin   Queues Declared meter_rabbitmq_queues_declared_total The rate of queue declarations performed by clients. rabbitmq_prometheus plugin   Queues Created meter_rabbitmq_queues_created_total The rate of new queues created (as opposed to redeclarations). rabbitmq_prometheus plugin   Queues Deleted meter_rabbitmq_queues_deleted_total The rate of queues deleted. rabbitmq_prometheus plugin   Channels Total meter_rabbitmq_channels Total number of channels on all currently opened connections. rabbitmq_prometheus plugin   Channels Opened meter_rabbitmq_channels_opened_total The rate of new channels opened by applications across all connections. Channels are expected to be long-lived. rabbitmq_prometheus plugin   Channels Closed meter_rabbitmq_channels_closed_total The rate of channels closed by applications across all connections. Channels are expected to be long-lived. rabbitmq_prometheus plugin   Connections Total meter_rabbitmq_connections Total number of client connections. rabbitmq_prometheus plugin   Connections Opened meter_rabbitmq_connections_opened_total The rate of new connections opened by clients. Connections are expected to be long-lived. rabbitmq_prometheus plugin   Connections Closed meter_rabbitmq_connections_closed_total The rate of connections closed. Connections are expected to be long-lived. rabbitmq_prometheus plugin    RabbitMQ Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Ready Messages  meter_rabbitmq_node_queue_messages_ready Total number of ready messages ready to be delivered to consumers. rabbitmq_prometheus plugin   Unacknowledged Messages  meter_rabbitmq_node_unacknowledged_messages Messages delivered to consumers but not yet acknowledged rabbitmq_prometheus plugin   Incoming Messages  meter_rabbitmq_node_incoming_messages The incoming message rate before any routing rules are applied. rabbitmq_prometheus plugin   Outgoing Messages  meter_rabbitmq_node_outgoing_messages_total The outgoing message rate before any routing rules are applied. rabbitmq_prometheus plugin   Publishers  meter_rabbitmq_node_publisher_total Publishers rabbitmq_prometheus plugin   Consumers  meter_rabbitmq_node_consumer_total Consumers currently connect rabbitmq_prometheus plugin   Collections  meter_rabbitmq_node_connections_total Connections currently open rabbitmq_prometheus plugin   Channels  meter_rabbitmq_node_channel_total Channels currently open rabbitmq_prometheus plugin   Queues  meter_rabbitmq_node_queue_total Queues available rabbitmq_prometheus plugin   Allocated Used % meter_rabbitmq_node_allocated_used_percent Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Allocated Unused % meter_rabbitmq_node_allocated_unused_percent Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Allocated Used MB meter_rabbitmq_node_allocated_used_bytes Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Allocated Unused MB meter_rabbitmq_node_allocated_unused_bytes Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Allocated Total MB meter_rabbitmq_node_allocated_total_bytes Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Resident Set Size MB meter_rabbitmq_node_process_resident_memory_bytes Erlang VM Resident Set Size (RSS) As reported by the OS rabbitmq_prometheus plugin   Allocators MB meter_rabbitmq_node_allocated_unused_bytes meter_rabbitmq_node_allocated_total_bytes meter_rabbitmq_node_process_resident_memory_bytes  rabbitmq_prometheus plugin   Allocated By Type MB meter_rabbitmq_node_allocated_by_type Allocated by allocator type rabbitmq_prometheus plugin   Multiblock Used MB meter_rabbitmq_node_allocated_multiblock_used Multi block used rabbitmq_prometheus plugin   Multiblock Unused MB meter_rabbitmq_node_allocated_multiblock_unused Multi block used rabbitmq_prometheus plugin   Multiblock Pool Used MB meter_rabbitmq_node_allocated_multiblock_pool_used Multi block pool used rabbitmq_prometheus plugin   Multiblock Pool Unused MB meter_rabbitmq_node_allocated_multiblock_pool_unused Multi block pool unused rabbitmq_prometheus plugin   Singleblock Used MB meter_rabbitmq_node_allocated_singleblock_used Single block used rabbitmq_prometheus plugin   Singleblock Unused MB meter_rabbitmq_node_allocated_singleblock_unused Single block unused rabbitmq_prometheus plugin    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/rabbitmq/rabbitmq-cluster.yaml, /config/otel-rules/rabbitmq/rabbitmq-node.yaml. The RabbitMQ dashboard panel configurations are found in /config/ui-initialized-templates/rabbitmq.\n","excerpt":"RabbitMQ monitoring SkyWalking leverages rabbitmq_prometheus plugin for collecting metrics data from …","ref":"/docs/main/v9.6.0/en/setup/backend/backend-rabbitmq-monitoring/","title":"RabbitMQ monitoring"},{"body":"RabbitMQ monitoring SkyWalking leverages rabbitmq_prometheus plugin for collecting metrics data from RabbitMQ. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  The rabbitmq_prometheus plugin collect metrics data from RabbitMQ. Note: The RabbitMQ version is required to be 3.8.0+. The rabbitmq_prometheus plugin is built-in since RabbitMQ v3.8.0. OpenTelemetry Collector fetches metrics from rabbitmq_prometheus plugin via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup rabbitmq_prometheus. Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  RabbitMQ Monitoring RabbitMQ monitoring provides multidimensional metrics monitoring of RabbitMQ cluster as Layer: RABBITMQ Service in the OAP. In each cluster, the nodes are represented as Instance.\nRabbitMQ Cluster Supported Metrics    Monitoring Panel Metric Name Description Data Source     Memory Available Before Publishers Blocked (MB) meter_rabbitmq_memory_available_before_publisher_blocked If the value is zero or less, the memory alarm will be triggered and all publishing connections across all cluster nodes will be blocked. rabbitmq_prometheus plugin   Disk Space Available Before Publishers Blocked (GB) meter_rabbitmq_disk_space_available_before_publisher_blocked This metric is reported for the partition where the RabbitMQ data directory is stored. rabbitmq_prometheus plugin   File Descriptors Available meter_rabbitmq_file_descriptors_available When this value reaches zero, new connections will not be accepted and disk write operations may fail. rabbitmq_prometheus plugin   TCP Sockets Available meter_rabbitmq_tcp_socket_available When this value reaches zero, new connections will not be accepted. rabbitmq_prometheus plugin   Messages Ready To Be Delivered To Consumers meter_rabbitmq_message_ready_delivered_consumers Total number of ready messages ready to be delivered to consumers. rabbitmq_prometheus plugin   Messages Pending Consumer Acknowledgement meter_rabbitmq_message_unacknowledged_delivered_consumers The total number of messages that are either in-flight to consumers, currently being processed by consumers or simply waiting for the consumer acknowledgements to be processed by the queue. Until the queue processes the message acknowledgement, the message will remain unacknowledged. rabbitmq_prometheus plugin   Messages Published meter_rabbitmq_messages_published The incoming message rate before any routing rules are applied. rabbitmq_prometheus plugin   Messages Confirmed To Publishers meter_rabbitmq_messages_confirmed The rate of messages confirmed by the broker to publishers. Publishers must opt-in to receive message confirmations. rabbitmq_prometheus plugin   Messages Unconfirmed To Publishers meter_rabbitmq_messages_unconfirmed The rate of messages received from publishers that have publisher confirms enabled and the broker has not confirmed yet. rabbitmq_prometheus plugin   Messages Routed To Queues meter_rabbitmq_messages_routed The rate of messages received from publishers and successfully routed to the master queue replicas. rabbitmq_prometheus plugin   Unroutable Messages Returned To Publishers meter_rabbitmq_messages_unroutable_returned The rate of messages that cannot be routed and are returned back to publishers. rabbitmq_prometheus plugin   Unroutable Messages Dropped meter_rabbitmq_messages_unroutable_dropped The rate of messages that cannot be routed and are dropped. rabbitmq_prometheus plugin   Queues Total meter_rabbitmq_queues Total number of queue masters per node. rabbitmq_prometheus plugin   Queues Declared meter_rabbitmq_queues_declared_total The rate of queue declarations performed by clients. rabbitmq_prometheus plugin   Queues Created meter_rabbitmq_queues_created_total The rate of new queues created (as opposed to redeclarations). rabbitmq_prometheus plugin   Queues Deleted meter_rabbitmq_queues_deleted_total The rate of queues deleted. rabbitmq_prometheus plugin   Channels Total meter_rabbitmq_channels Total number of channels on all currently opened connections. rabbitmq_prometheus plugin   Channels Opened meter_rabbitmq_channels_opened_total The rate of new channels opened by applications across all connections. Channels are expected to be long-lived. rabbitmq_prometheus plugin   Channels Closed meter_rabbitmq_channels_closed_total The rate of channels closed by applications across all connections. Channels are expected to be long-lived. rabbitmq_prometheus plugin   Connections Total meter_rabbitmq_connections Total number of client connections. rabbitmq_prometheus plugin   Connections Opened meter_rabbitmq_connections_opened_total The rate of new connections opened by clients. Connections are expected to be long-lived. rabbitmq_prometheus plugin   Connections Closed meter_rabbitmq_connections_closed_total The rate of connections closed. Connections are expected to be long-lived. rabbitmq_prometheus plugin    RabbitMQ Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Ready Messages  meter_rabbitmq_node_queue_messages_ready Total number of ready messages ready to be delivered to consumers. rabbitmq_prometheus plugin   Unacknowledged Messages  meter_rabbitmq_node_unacknowledged_messages Messages delivered to consumers but not yet acknowledged rabbitmq_prometheus plugin   Incoming Messages  meter_rabbitmq_node_incoming_messages The incoming message rate before any routing rules are applied. rabbitmq_prometheus plugin   Outgoing Messages  meter_rabbitmq_node_outgoing_messages_total The outgoing message rate before any routing rules are applied. rabbitmq_prometheus plugin   Publishers  meter_rabbitmq_node_publisher_total Publishers rabbitmq_prometheus plugin   Consumers  meter_rabbitmq_node_consumer_total Consumers currently connect rabbitmq_prometheus plugin   Collections  meter_rabbitmq_node_connections_total Connections currently open rabbitmq_prometheus plugin   Channels  meter_rabbitmq_node_channel_total Channels currently open rabbitmq_prometheus plugin   Queues  meter_rabbitmq_node_queue_total Queues available rabbitmq_prometheus plugin   Allocated Used % meter_rabbitmq_node_allocated_used_percent Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Allocated Unused % meter_rabbitmq_node_allocated_unused_percent Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Allocated Used MB meter_rabbitmq_node_allocated_used_bytes Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Allocated Unused MB meter_rabbitmq_node_allocated_unused_bytes Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Allocated Total MB meter_rabbitmq_node_allocated_total_bytes Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Resident Set Size MB meter_rabbitmq_node_process_resident_memory_bytes Erlang VM Resident Set Size (RSS) As reported by the OS rabbitmq_prometheus plugin   Allocators MB meter_rabbitmq_node_allocated_unused_bytes meter_rabbitmq_node_allocated_total_bytes meter_rabbitmq_node_process_resident_memory_bytes  rabbitmq_prometheus plugin   Allocated By Type MB meter_rabbitmq_node_allocated_by_type Allocated by allocator type rabbitmq_prometheus plugin   Multiblock Used MB meter_rabbitmq_node_allocated_multiblock_used Multi block used rabbitmq_prometheus plugin   Multiblock Unused MB meter_rabbitmq_node_allocated_multiblock_unused Multi block used rabbitmq_prometheus plugin   Multiblock Pool Used MB meter_rabbitmq_node_allocated_multiblock_pool_used Multi block pool used rabbitmq_prometheus plugin   Multiblock Pool Unused MB meter_rabbitmq_node_allocated_multiblock_pool_unused Multi block pool unused rabbitmq_prometheus plugin   Singleblock Used MB meter_rabbitmq_node_allocated_singleblock_used Single block used rabbitmq_prometheus plugin   Singleblock Unused MB meter_rabbitmq_node_allocated_singleblock_unused Single block unused rabbitmq_prometheus plugin    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/rabbitmq/rabbitmq-cluster.yaml, /config/otel-rules/rabbitmq/rabbitmq-node.yaml. The RabbitMQ dashboard panel configurations are found in /config/ui-initialized-templates/rabbitmq.\n","excerpt":"RabbitMQ monitoring SkyWalking leverages rabbitmq_prometheus plugin for collecting metrics data from …","ref":"/docs/main/v9.7.0/en/setup/backend/backend-rabbitmq-monitoring/","title":"RabbitMQ monitoring"},{"body":"Reading Context All following APIs provide readonly features for the tracing context from tracing system. The values are only available when the current thread is traced.\n Use TraceContext.traceId() API to obtain traceId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;traceId\u0026#34;, TraceContext.traceId());  Use TraceContext.segmentId() API to obtain segmentId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;segmentId\u0026#34;, TraceContext.segmentId());  Use TraceContext.spanId() API to obtain spanId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;spanId\u0026#34;, TraceContext.spanId()); Sample codes only\n","excerpt":"Reading Context All following APIs provide readonly features for the tracing context from tracing …","ref":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/application-toolkit-trace-read-context/","title":"Reading Context"},{"body":"Reading Context All following APIs provide readonly features for the tracing context from tracing system. The values are only available when the current thread is traced.\n Use TraceContext.traceId() API to obtain traceId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;traceId\u0026#34;, TraceContext.traceId());  Use TraceContext.segmentId() API to obtain segmentId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;segmentId\u0026#34;, TraceContext.segmentId());  Use TraceContext.spanId() API to obtain spanId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;spanId\u0026#34;, TraceContext.spanId()); Sample codes only\n","excerpt":"Reading Context All following APIs provide readonly features for the tracing context from tracing …","ref":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-trace-read-context/","title":"Reading Context"},{"body":"Reading Context All following APIs provide readonly features for the tracing context from tracing system. The values are only available when the current thread is traced.\n Use TraceContext.traceId() API to obtain traceId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;traceId\u0026#34;, TraceContext.traceId());  Use TraceContext.segmentId() API to obtain segmentId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;segmentId\u0026#34;, TraceContext.segmentId());  Use TraceContext.spanId() API to obtain spanId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;spanId\u0026#34;, TraceContext.spanId()); Sample codes only\n","excerpt":"Reading Context All following APIs provide readonly features for the tracing context from tracing …","ref":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/application-toolkit-trace-read-context/","title":"Reading Context"},{"body":"Reading Context All following APIs provide readonly features for the tracing context from tracing system. The values are only available when the current thread is traced.\n Use TraceContext.traceId() API to obtain traceId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;traceId\u0026#34;, TraceContext.traceId());  Use TraceContext.segmentId() API to obtain segmentId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;segmentId\u0026#34;, TraceContext.segmentId());  Use TraceContext.spanId() API to obtain spanId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;spanId\u0026#34;, TraceContext.spanId()); Sample codes only\n","excerpt":"Reading Context All following APIs provide readonly features for the tracing context from tracing …","ref":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/application-toolkit-trace-read-context/","title":"Reading Context"},{"body":"Reading Context All following APIs provide readonly features for the tracing context from tracing system. The values are only available when the current thread is traced.\n Use TraceContext.traceId() API to obtain traceId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;traceId\u0026#34;, TraceContext.traceId());  Use TraceContext.segmentId() API to obtain segmentId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;segmentId\u0026#34;, TraceContext.segmentId());  Use TraceContext.spanId() API to obtain spanId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;spanId\u0026#34;, TraceContext.spanId()); Sample codes only\n","excerpt":"Reading Context All following APIs provide readonly features for the tracing context from tracing …","ref":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/application-toolkit-trace-read-context/","title":"Reading Context"},{"body":"Receiver/grpc-envoy-als-v2-receiver Description This is a receiver for Envoy ALS format, which is defined at https://github.com/envoyproxy/envoy/blob/v1.17.4/api/envoy/service/accesslog/v2/als.proto.\nSupport Forwarders  envoy-als-v2-grpc-forwarder  DefaultConfig # The time interval between two flush operations. And the time unit is millisecond.flush_time:1000# The max cache count when receive the messagelimit_count:500Configuration    Name Type Description     flush_time int The time interval between two flush operations. And the time unit is millisecond.   limit_count int The max cache count when receive the message    ","excerpt":"Receiver/grpc-envoy-als-v2-receiver Description This is a receiver for Envoy ALS format, which is …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/receiver_grpc-envoy-als-v2-receiver/","title":"Receiver/grpc-envoy-als-v2-receiver"},{"body":"Receiver/grpc-envoy-als-v2-receiver Description This is a receiver for Envoy ALS format, which is defined at https://github.com/envoyproxy/envoy/blob/v1.17.4/api/envoy/service/accesslog/v2/als.proto.\nSupport Forwarders  envoy-als-v2-grpc-forwarder  DefaultConfig # The time interval between two flush operations. And the time unit is millisecond.flush_time:1000# The max cache count when receive the messagelimit_count:500Configuration    Name Type Description     flush_time int The time interval between two flush operations. And the time unit is millisecond.   limit_count int The max cache count when receive the message    ","excerpt":"Receiver/grpc-envoy-als-v2-receiver Description This is a receiver for Envoy ALS format, which is …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/receiver_grpc-envoy-als-v2-receiver/","title":"Receiver/grpc-envoy-als-v2-receiver"},{"body":"Receiver/grpc-envoy-als-v2-receiver Description This is a receiver for Envoy ALS format, which is defined at https://github.com/envoyproxy/envoy/blob/v1.17.4/api/envoy/service/accesslog/v2/als.proto.\nSupport Forwarders  envoy-als-v2-grpc-forwarder  DefaultConfig # The time interval between two flush operations. And the time unit is millisecond.flush_time:1000# The max cache count when receive the messagelimit_count:500Configuration    Name Type Description     flush_time int The time interval between two flush operations. And the time unit is millisecond.   limit_count int The max cache count when receive the message    ","excerpt":"Receiver/grpc-envoy-als-v2-receiver Description This is a receiver for Envoy ALS format, which is …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/receiver_grpc-envoy-als-v2-receiver/","title":"Receiver/grpc-envoy-als-v2-receiver"},{"body":"Receiver/grpc-envoy-als-v3-receiver Description This is a receiver for Envoy ALS format, which is defined at https://github.com/envoyproxy/envoy/blob/3791753e94edbac8a90c5485c68136886c40e719/api/envoy/config/accesslog/v3/accesslog.proto.\nSupport Forwarders  envoy-als-v3-grpc-forwarder  DefaultConfig # The time interval between two flush operations. And the time unit is millisecond.flush_time:1000# The max cache count when receive the messagelimit_count:500Configuration    Name Type Description     flush_time int The time interval between two flush operations. And the time unit is millisecond.   limit_count int The max cache count when receive the message    ","excerpt":"Receiver/grpc-envoy-als-v3-receiver Description This is a receiver for Envoy ALS format, which is …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/receiver_grpc-envoy-als-v3-receiver/","title":"Receiver/grpc-envoy-als-v3-receiver"},{"body":"Receiver/grpc-envoy-als-v3-receiver Description This is a receiver for Envoy ALS format, which is defined at https://github.com/envoyproxy/envoy/blob/3791753e94edbac8a90c5485c68136886c40e719/api/envoy/config/accesslog/v3/accesslog.proto.\nSupport Forwarders  envoy-als-v3-grpc-forwarder  DefaultConfig # The time interval between two flush operations. And the time unit is millisecond.flush_time:1000# The max cache count when receive the messagelimit_count:500Configuration    Name Type Description     flush_time int The time interval between two flush operations. And the time unit is millisecond.   limit_count int The max cache count when receive the message    ","excerpt":"Receiver/grpc-envoy-als-v3-receiver Description This is a receiver for Envoy ALS format, which is …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/receiver_grpc-envoy-als-v3-receiver/","title":"Receiver/grpc-envoy-als-v3-receiver"},{"body":"Receiver/grpc-envoy-als-v3-receiver Description This is a receiver for Envoy ALS format, which is defined at https://github.com/envoyproxy/envoy/blob/3791753e94edbac8a90c5485c68136886c40e719/api/envoy/config/accesslog/v3/accesslog.proto.\nSupport Forwarders  envoy-als-v3-grpc-forwarder  DefaultConfig # The time interval between two flush operations. And the time unit is millisecond.flush_time:1000# The max cache count when receive the messagelimit_count:500Configuration    Name Type Description     flush_time int The time interval between two flush operations. And the time unit is millisecond.   limit_count int The max cache count when receive the message    ","excerpt":"Receiver/grpc-envoy-als-v3-receiver Description This is a receiver for Envoy ALS format, which is …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/receiver_grpc-envoy-als-v3-receiver/","title":"Receiver/grpc-envoy-als-v3-receiver"},{"body":"Receiver/grpc-envoy-metrics-v2-receiver Description This is a receiver for Envoy Metrics format, which is defined at https://github.com/envoyproxy/envoy/blob/v1.17.4/api/envoy/service/metrics/v2/metrics_service.proto.\nSupport Forwarders  envoy-metrics-v2-grpc-forwarder  DefaultConfig # The time interval between two flush operations. And the time unit is millisecond.flush_time:1000# The max cache count when receive the messagelimit_count:500Configuration    Name Type Description     flush_time int The time interval between two flush operations. And the time unit is millisecond.   limit_count int The max cache count when receive the message    ","excerpt":"Receiver/grpc-envoy-metrics-v2-receiver Description This is a receiver for Envoy Metrics format, …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/receiver_grpc-envoy-metrics-v2-receiver/","title":"Receiver/grpc-envoy-metrics-v2-receiver"},{"body":"Receiver/grpc-envoy-metrics-v2-receiver Description This is a receiver for Envoy Metrics format, which is defined at https://github.com/envoyproxy/envoy/blob/v1.17.4/api/envoy/service/metrics/v2/metrics_service.proto.\nSupport Forwarders  envoy-metrics-v2-grpc-forwarder  DefaultConfig # The time interval between two flush operations. And the time unit is millisecond.flush_time:1000# The max cache count when receive the messagelimit_count:500Configuration    Name Type Description     flush_time int The time interval between two flush operations. And the time unit is millisecond.   limit_count int The max cache count when receive the message    ","excerpt":"Receiver/grpc-envoy-metrics-v2-receiver Description This is a receiver for Envoy Metrics format, …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/receiver_grpc-envoy-metrics-v2-receiver/","title":"Receiver/grpc-envoy-metrics-v2-receiver"},{"body":"Receiver/grpc-envoy-metrics-v2-receiver Description This is a receiver for Envoy Metrics format, which is defined at https://github.com/envoyproxy/envoy/blob/v1.17.4/api/envoy/service/metrics/v2/metrics_service.proto.\nSupport Forwarders  envoy-metrics-v2-grpc-forwarder  DefaultConfig # The time interval between two flush operations. And the time unit is millisecond.flush_time:1000# The max cache count when receive the messagelimit_count:500Configuration    Name Type Description     flush_time int The time interval between two flush operations. And the time unit is millisecond.   limit_count int The max cache count when receive the message    ","excerpt":"Receiver/grpc-envoy-metrics-v2-receiver Description This is a receiver for Envoy Metrics format, …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/receiver_grpc-envoy-metrics-v2-receiver/","title":"Receiver/grpc-envoy-metrics-v2-receiver"},{"body":"Receiver/grpc-envoy-metrics-v3-receiver Description This is a receiver for Envoy Metrics format, which is defined at https://github.com/envoyproxy/envoy/blob/5f7d6efb5786ee3de31b1fb37c78fa281718b704/api/envoy/service/metrics/v3/metrics_service.proto.\nSupport Forwarders  envoy-metrics-v3-grpc-forwarder  DefaultConfig # The time interval between two flush operations. And the time unit is millisecond.flush_time:1000# The max cache count when receive the messagelimit_count:500Configuration    Name Type Description     flush_time int The time interval between two flush operations. And the time unit is millisecond.   limit_count int The max cache count when receive the message    ","excerpt":"Receiver/grpc-envoy-metrics-v3-receiver Description This is a receiver for Envoy Metrics format, …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/receiver_grpc-envoy-metrics-v3-receiver/","title":"Receiver/grpc-envoy-metrics-v3-receiver"},{"body":"Receiver/grpc-envoy-metrics-v3-receiver Description This is a receiver for Envoy Metrics format, which is defined at https://github.com/envoyproxy/envoy/blob/5f7d6efb5786ee3de31b1fb37c78fa281718b704/api/envoy/service/metrics/v3/metrics_service.proto.\nSupport Forwarders  envoy-metrics-v3-grpc-forwarder  DefaultConfig # The time interval between two flush operations. And the time unit is millisecond.flush_time:1000# The max cache count when receive the messagelimit_count:500Configuration    Name Type Description     flush_time int The time interval between two flush operations. And the time unit is millisecond.   limit_count int The max cache count when receive the message    ","excerpt":"Receiver/grpc-envoy-metrics-v3-receiver Description This is a receiver for Envoy Metrics format, …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/receiver_grpc-envoy-metrics-v3-receiver/","title":"Receiver/grpc-envoy-metrics-v3-receiver"},{"body":"Receiver/grpc-envoy-metrics-v3-receiver Description This is a receiver for Envoy Metrics format, which is defined at https://github.com/envoyproxy/envoy/blob/5f7d6efb5786ee3de31b1fb37c78fa281718b704/api/envoy/service/metrics/v3/metrics_service.proto.\nSupport Forwarders  envoy-metrics-v3-grpc-forwarder  DefaultConfig # The time interval between two flush operations. And the time unit is millisecond.flush_time:1000# The max cache count when receive the messagelimit_count:500Configuration    Name Type Description     flush_time int The time interval between two flush operations. And the time unit is millisecond.   limit_count int The max cache count when receive the message    ","excerpt":"Receiver/grpc-envoy-metrics-v3-receiver Description This is a receiver for Envoy Metrics format, …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/receiver_grpc-envoy-metrics-v3-receiver/","title":"Receiver/grpc-envoy-metrics-v3-receiver"},{"body":"Receiver/grpc-native-cds-receiver Description This is a receiver for SkyWalking native Configuration Discovery Service format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/language-agent/ConfigurationDiscoveryService.proto.\nSupport Forwarders  native-cds-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Receiver/grpc-native-cds-receiver Description This is a receiver for SkyWalking native Configuration …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/receiver_grpc-native-cds-receiver/","title":"Receiver/grpc-native-cds-receiver"},{"body":"Receiver/grpc-native-cds-receiver Description This is a receiver for SkyWalking native Configuration Discovery Service format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/language-agent/ConfigurationDiscoveryService.proto.\nSupport Forwarders  native-cds-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Receiver/grpc-native-cds-receiver Description This is a receiver for SkyWalking native Configuration …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/receiver_grpc-native-cds-receiver/","title":"Receiver/grpc-native-cds-receiver"},{"body":"Receiver/grpc-native-cds-receiver Description This is a receiver for SkyWalking native Configuration Discovery Service format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/language-agent/ConfigurationDiscoveryService.proto.\nSupport Forwarders  native-cds-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Receiver/grpc-native-cds-receiver Description This is a receiver for SkyWalking native Configuration …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/receiver_grpc-native-cds-receiver/","title":"Receiver/grpc-native-cds-receiver"},{"body":"Receiver/grpc-native-clr-receiver Description This is a receiver for SkyWalking native clr format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/language-agent/CLRMetric.proto.\nSupport Forwarders  native-clr-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Receiver/grpc-native-clr-receiver Description This is a receiver for SkyWalking native clr format, …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/receiver_grpc-native-clr-receiver/","title":"Receiver/grpc-native-clr-receiver"},{"body":"Receiver/grpc-native-clr-receiver Description This is a receiver for SkyWalking native clr format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/language-agent/CLRMetric.proto.\nSupport Forwarders  native-clr-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Receiver/grpc-native-clr-receiver Description This is a receiver for SkyWalking native clr format, …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/receiver_grpc-native-clr-receiver/","title":"Receiver/grpc-native-clr-receiver"},{"body":"Receiver/grpc-native-clr-receiver Description This is a receiver for SkyWalking native clr format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/language-agent/CLRMetric.proto.\nSupport Forwarders  native-clr-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Receiver/grpc-native-clr-receiver Description This is a receiver for SkyWalking native clr format, …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/receiver_grpc-native-clr-receiver/","title":"Receiver/grpc-native-clr-receiver"},{"body":"Receiver/grpc-native-ebpf-accesslog-receiver Description This is a receiver for SkyWalking native accesslog format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/ebpf/accesslog.proto.\nSupport Forwarders  native-ebpf-accesslog-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Receiver/grpc-native-ebpf-accesslog-receiver Description This is a receiver for SkyWalking native …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/receiver_grpc-native-ebpf-accesslog-receiver/","title":"Receiver/grpc-native-ebpf-accesslog-receiver"},{"body":"Receiver/grpc-native-ebpf-profiling-receiver Description This is a receiver for SkyWalking native process format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/ebpf/profiling/Process.proto.\nSupport Forwarders  native-ebpf-profiling-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Receiver/grpc-native-ebpf-profiling-receiver Description This is a receiver for SkyWalking native …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/receiver_grpc-native-ebpf-profiling-receiver/","title":"Receiver/grpc-native-ebpf-profiling-receiver"},{"body":"Receiver/grpc-native-ebpf-profiling-receiver Description This is a receiver for SkyWalking native process format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/ebpf/profiling/Process.proto.\nSupport Forwarders  native-ebpf-profiling-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Receiver/grpc-native-ebpf-profiling-receiver Description This is a receiver for SkyWalking native …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/receiver_grpc-native-ebpf-profiling-receiver/","title":"Receiver/grpc-native-ebpf-profiling-receiver"},{"body":"Receiver/grpc-native-ebpf-profiling-receiver Description This is a receiver for SkyWalking native process format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/ebpf/profiling/Process.proto.\nSupport Forwarders  native-ebpf-profiling-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Receiver/grpc-native-ebpf-profiling-receiver Description This is a receiver for SkyWalking native …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/receiver_grpc-native-ebpf-profiling-receiver/","title":"Receiver/grpc-native-ebpf-profiling-receiver"},{"body":"Receiver/grpc-native-event-receiver Description This is a receiver for SkyWalking native meter format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/event/Event.proto.\nSupport Forwarders  native-event-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Receiver/grpc-native-event-receiver Description This is a receiver for SkyWalking native meter …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/receiver_grpc-native-event-receiver/","title":"Receiver/grpc-native-event-receiver"},{"body":"Receiver/grpc-native-event-receiver Description This is a receiver for SkyWalking native meter format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/event/Event.proto.\nSupport Forwarders  native-event-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Receiver/grpc-native-event-receiver Description This is a receiver for SkyWalking native meter …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/receiver_grpc-native-event-receiver/","title":"Receiver/grpc-native-event-receiver"},{"body":"Receiver/grpc-native-event-receiver Description This is a receiver for SkyWalking native meter format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/event/Event.proto.\nSupport Forwarders  native-event-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Receiver/grpc-native-event-receiver Description This is a receiver for SkyWalking native meter …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/receiver_grpc-native-event-receiver/","title":"Receiver/grpc-native-event-receiver"},{"body":"Receiver/grpc-native-jvm-receiver Description This is a receiver for SkyWalking native jvm format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/language-agent/JVMMetric.proto.\nSupport Forwarders  native-jvm-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Receiver/grpc-native-jvm-receiver Description This is a receiver for SkyWalking native jvm format, …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/receiver_grpc-native-jvm-receiver/","title":"Receiver/grpc-native-jvm-receiver"},{"body":"Receiver/grpc-native-jvm-receiver Description This is a receiver for SkyWalking native jvm format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/language-agent/JVMMetric.proto.\nSupport Forwarders  native-jvm-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Receiver/grpc-native-jvm-receiver Description This is a receiver for SkyWalking native jvm format, …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/receiver_grpc-native-jvm-receiver/","title":"Receiver/grpc-native-jvm-receiver"},{"body":"Receiver/grpc-native-jvm-receiver Description This is a receiver for SkyWalking native jvm format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/language-agent/JVMMetric.proto.\nSupport Forwarders  native-jvm-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Receiver/grpc-native-jvm-receiver Description This is a receiver for SkyWalking native jvm format, …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/receiver_grpc-native-jvm-receiver/","title":"Receiver/grpc-native-jvm-receiver"},{"body":"Receiver/grpc-native-log-receiver Description This is a receiver for SkyWalking native logging format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/logging/Logging.proto.\nSupport Forwarders  native-log-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Receiver/grpc-native-log-receiver Description This is a receiver for SkyWalking native logging …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/receiver_grpc-native-log-receiver/","title":"Receiver/grpc-native-log-receiver"},{"body":"Receiver/grpc-native-log-receiver Description This is a receiver for SkyWalking native logging format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/logging/Logging.proto.\nSupport Forwarders  native-log-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Receiver/grpc-native-log-receiver Description This is a receiver for SkyWalking native logging …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/receiver_grpc-native-log-receiver/","title":"Receiver/grpc-native-log-receiver"},{"body":"Receiver/grpc-native-log-receiver Description This is a receiver for SkyWalking native logging format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/logging/Logging.proto.\nSupport Forwarders  native-log-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Receiver/grpc-native-log-receiver Description This is a receiver for SkyWalking native logging …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/receiver_grpc-native-log-receiver/","title":"Receiver/grpc-native-log-receiver"},{"body":"Receiver/grpc-native-management-receiver Description This is a receiver for SkyWalking native management format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/management/Management.proto.\nSupport Forwarders  native-management-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Receiver/grpc-native-management-receiver Description This is a receiver for SkyWalking native …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/receiver_grpc-native-management-receiver/","title":"Receiver/grpc-native-management-receiver"},{"body":"Receiver/grpc-native-management-receiver Description This is a receiver for SkyWalking native management format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/management/Management.proto.\nSupport Forwarders  native-management-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Receiver/grpc-native-management-receiver Description This is a receiver for SkyWalking native …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/receiver_grpc-native-management-receiver/","title":"Receiver/grpc-native-management-receiver"},{"body":"Receiver/grpc-native-management-receiver Description This is a receiver for SkyWalking native management format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/management/Management.proto.\nSupport Forwarders  native-management-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Receiver/grpc-native-management-receiver Description This is a receiver for SkyWalking native …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/receiver_grpc-native-management-receiver/","title":"Receiver/grpc-native-management-receiver"},{"body":"Receiver/grpc-native-meter-receiver Description This is a receiver for SkyWalking native meter format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/event/Event.proto.\nSupport Forwarders  native-meter-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Receiver/grpc-native-meter-receiver Description This is a receiver for SkyWalking native meter …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/receiver_grpc-native-meter-receiver/","title":"Receiver/grpc-native-meter-receiver"},{"body":"Receiver/grpc-native-meter-receiver Description This is a receiver for SkyWalking native meter format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/event/Event.proto.\nSupport Forwarders  native-meter-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Receiver/grpc-native-meter-receiver Description This is a receiver for SkyWalking native meter …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/receiver_grpc-native-meter-receiver/","title":"Receiver/grpc-native-meter-receiver"},{"body":"Receiver/grpc-native-meter-receiver Description This is a receiver for SkyWalking native meter format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/event/Event.proto.\nSupport Forwarders  native-meter-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Receiver/grpc-native-meter-receiver Description This is a receiver for SkyWalking native meter …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/receiver_grpc-native-meter-receiver/","title":"Receiver/grpc-native-meter-receiver"},{"body":"Receiver/grpc-native-process-receiver Description This is a receiver for SkyWalking native process format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/ebpf/profiling/Process.proto.\nSupport Forwarders  native-process-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Receiver/grpc-native-process-receiver Description This is a receiver for SkyWalking native process …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/receiver_grpc-native-process-receiver/","title":"Receiver/grpc-native-process-receiver"},{"body":"Receiver/grpc-native-process-receiver Description This is a receiver for SkyWalking native process format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/ebpf/profiling/Process.proto.\nSupport Forwarders  native-process-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Receiver/grpc-native-process-receiver Description This is a receiver for SkyWalking native process …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/receiver_grpc-native-process-receiver/","title":"Receiver/grpc-native-process-receiver"},{"body":"Receiver/grpc-native-process-receiver Description This is a receiver for SkyWalking native process format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/ebpf/profiling/Process.proto.\nSupport Forwarders  native-process-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Receiver/grpc-native-process-receiver Description This is a receiver for SkyWalking native process …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/receiver_grpc-native-process-receiver/","title":"Receiver/grpc-native-process-receiver"},{"body":"Receiver/grpc-native-profile-receiver Description This is a receiver for SkyWalking native profile format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/profile/Profile.proto.\nSupport Forwarders  native-profile-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Receiver/grpc-native-profile-receiver Description This is a receiver for SkyWalking native profile …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/receiver_grpc-native-profile-receiver/","title":"Receiver/grpc-native-profile-receiver"},{"body":"Receiver/grpc-native-profile-receiver Description This is a receiver for SkyWalking native profile format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/profile/Profile.proto.\nSupport Forwarders  native-profile-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Receiver/grpc-native-profile-receiver Description This is a receiver for SkyWalking native profile …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/receiver_grpc-native-profile-receiver/","title":"Receiver/grpc-native-profile-receiver"},{"body":"Receiver/grpc-native-profile-receiver Description This is a receiver for SkyWalking native profile format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/profile/Profile.proto.\nSupport Forwarders  native-profile-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Receiver/grpc-native-profile-receiver Description This is a receiver for SkyWalking native profile …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/receiver_grpc-native-profile-receiver/","title":"Receiver/grpc-native-profile-receiver"},{"body":"Receiver/grpc-native-tracing-receiver Description This is a receiver for SkyWalking native tracing and span attached event format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/language-agent/Tracing.proto.\nSupport Forwarders  native-tracing-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Receiver/grpc-native-tracing-receiver Description This is a receiver for SkyWalking native tracing …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/receiver_grpc-native-tracing-receiver/","title":"Receiver/grpc-native-tracing-receiver"},{"body":"Receiver/grpc-native-tracing-receiver Description This is a receiver for SkyWalking native tracing and span attached event format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/language-agent/Tracing.proto.\nSupport Forwarders  native-tracing-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Receiver/grpc-native-tracing-receiver Description This is a receiver for SkyWalking native tracing …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/receiver_grpc-native-tracing-receiver/","title":"Receiver/grpc-native-tracing-receiver"},{"body":"Receiver/grpc-native-tracing-receiver Description This is a receiver for SkyWalking native tracing and span attached event format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/language-agent/Tracing.proto.\nSupport Forwarders  native-tracing-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","excerpt":"Receiver/grpc-native-tracing-receiver Description This is a receiver for SkyWalking native tracing …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/receiver_grpc-native-tracing-receiver/","title":"Receiver/grpc-native-tracing-receiver"},{"body":"Receiver/grpc-otlp-metrics-v1-receiver Description This is a receiver for OpenTelemetry Metrics v1 format, which is defined at https://github.com/open-telemetry/opentelemetry-proto/blob/724e427879e3d2bae2edc0218fff06e37b9eb46e/opentelemetry/proto/collector/metrics/v1/metrics_service.proto.\nSupport Forwarders  otlp-metrics-v1-grpc-forwarder  DefaultConfig yaml \nConfiguration    Name Type Description    ","excerpt":"Receiver/grpc-otlp-metrics-v1-receiver Description This is a receiver for OpenTelemetry Metrics v1 …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/receiver_grpc-otlp-metrics-v1-receiver/","title":"Receiver/grpc-otlp-metrics-v1-receiver"},{"body":"Receiver/grpc-otlp-metrics-v1-receiver Description This is a receiver for OpenTelemetry Metrics v1 format, which is defined at https://github.com/open-telemetry/opentelemetry-proto/blob/724e427879e3d2bae2edc0218fff06e37b9eb46e/opentelemetry/proto/collector/metrics/v1/metrics_service.proto.\nSupport Forwarders  otlp-metrics-v1-grpc-forwarder  DefaultConfig yaml \nConfiguration    Name Type Description    ","excerpt":"Receiver/grpc-otlp-metrics-v1-receiver Description This is a receiver for OpenTelemetry Metrics v1 …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/receiver_grpc-otlp-metrics-v1-receiver/","title":"Receiver/grpc-otlp-metrics-v1-receiver"},{"body":"Receiver/grpc-otlp-metrics-v1-receiver Description This is a receiver for OpenTelemetry Metrics v1 format, which is defined at https://github.com/open-telemetry/opentelemetry-proto/blob/724e427879e3d2bae2edc0218fff06e37b9eb46e/opentelemetry/proto/collector/metrics/v1/metrics_service.proto.\nSupport Forwarders  otlp-metrics-v1-grpc-forwarder  DefaultConfig yaml \nConfiguration    Name Type Description    ","excerpt":"Receiver/grpc-otlp-metrics-v1-receiver Description This is a receiver for OpenTelemetry Metrics v1 …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/receiver_grpc-otlp-metrics-v1-receiver/","title":"Receiver/grpc-otlp-metrics-v1-receiver"},{"body":"Receiver/http-native-log-receiver Description This is a receiver for SkyWalking http logging format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/logging/Logging.proto.\nSupport Forwarders  native-log-grpc-forwarder  DefaultConfig # The native log request URI.uri:\u0026#34;/logging\u0026#34;# The request timeout seconds.timeout:5Configuration    Name Type Description     uri string config   timeout int     ","excerpt":"Receiver/http-native-log-receiver Description This is a receiver for SkyWalking http logging format, …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/receiver_http-native-log-receiver/","title":"Receiver/http-native-log-receiver"},{"body":"Receiver/http-native-log-receiver Description This is a receiver for SkyWalking http logging format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/logging/Logging.proto.\nSupport Forwarders  native-log-grpc-forwarder  DefaultConfig # The native log request URI.uri:\u0026#34;/logging\u0026#34;# The request timeout seconds.timeout:5Configuration    Name Type Description     uri string config   timeout int     ","excerpt":"Receiver/http-native-log-receiver Description This is a receiver for SkyWalking http logging format, …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/receiver_http-native-log-receiver/","title":"Receiver/http-native-log-receiver"},{"body":"Receiver/http-native-log-receiver Description This is a receiver for SkyWalking http logging format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/logging/Logging.proto.\nSupport Forwarders  native-log-grpc-forwarder  DefaultConfig # The native log request URI.uri:\u0026#34;/logging\u0026#34;# The request timeout seconds.timeout:5Configuration    Name Type Description     uri string config   timeout int     ","excerpt":"Receiver/http-native-log-receiver Description This is a receiver for SkyWalking http logging format, …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/receiver_http-native-log-receiver/","title":"Receiver/http-native-log-receiver"},{"body":"Redis monitoring Redis server performance from redis-exporter SkyWalking leverages redis-exporter for collecting metrics data from Redis. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  redis-exporter collect metrics data from Redis. OpenTelemetry Collector fetches metrics from redis-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up redis-exporter. Set up OpenTelemetry Collector. For details on Redis Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  Redis Monitoring Redis monitoring provides monitoring of the status and resources of the Redis server. Redis cluster is cataloged as a Layer: REDIS Service in OAP. Each Redis server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Uptime day meter_redis_uptime The uptime of Redis. redis-exporter   Connected Clients  meter_redis_connected_clients The number of connected clients. redis-exporter   Blocked Clients  meter_redis_blocked_clients The number of blocked clients. redis-exporter   Memory Max Bytes MB meter_redis_memory_max_bytes The max bytes of memory. redis-exporter   Hits Rate % meter_redis_hit_rate Hit rate of redis when used as a cache. redis-exporter   Average Time Spend By Command second meter_redis_average_time_spent_by_command Average time to execute various types of commands. redis-exporter   Total Commands Trend  meter_redis_total_commands_rate The Trend of total commands. redis-exporter   DB keys  meter_redis_evicted_keys_total  meter_redis_expired_keys_total  meter_redis_db_keys The number of Expired / Evicted / total keys. redis-exporter   Net Input/Output Bytes KB meter_redis_net_input_bytes  meter_redis_net_output_bytes Total bytes of input / output of redis net. redis-exporter   Memory Usage % meter_redis_memory_usage Percentage of used memory. redis-exporter   Total Time Spend By Command Trend  meter_redis_commands_duration_seconds_total_rate The trend of total time spend by command redis-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/redis. The Redis dashboard panel configurations are found in /config/ui-initialized-templates/redis.\nCollect sampled slow commands SkyWalking leverages fluentbit or other log agents for collecting slow commands from Redis.\nData flow  Execute commands periodically to collect slow logs from Redis and save the result locally. Fluent-bit agent collects slow logs from local file. fluent-bit agent sends data to SkyWalking OAP Server using native meter APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Set up fluentbit. Config fluentbit from here for Redis. Config slow log from here for Redis. Periodically execute the commands.  Notice:\n1.The slowlog-log-slower-than and slowlog-max-len configuration items in the configuration file are for the slow log, the former indicating that execution time longer than the specified time (in milliseconds) will be logged to the slowlog, and the latter indicating the maximum number of slow logs that will be stored in the slow log file. 2.In the e2e test, SkyWalking uses cron to periodically execute the redis command to fetch the slow logs and write them to a local file, which is then collected by fluent-bit to send the data to the OAP. You can see the relevant configuration files here.You can also get slow logs periodically and send them to OAP in other ways than using cron and fluent-bit.\nSlow Commands Monitoring Slow SQL monitoring provides monitoring of the slow commands of the Redis servers. Redis servers are cataloged as a Layer: REDIS Service in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Slow Statements ms top_n_database_statement The latency and statement of Redis slow commands fluentbit    Customizations You can customize your own metrics/expression/dashboard panel. The slowsql expression rules are found in /config/lal/redis-slowsql.yaml The Redis dashboard panel configurations are found in /config/ui-initialized-templates/redis. `\n","excerpt":"Redis monitoring Redis server performance from redis-exporter SkyWalking leverages redis-exporter …","ref":"/docs/main/latest/en/setup/backend/backend-redis-monitoring/","title":"Redis monitoring"},{"body":"Redis monitoring Redis server performance from redis-exporter SkyWalking leverages redis-exporter for collecting metrics data from Redis. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  redis-exporter collect metrics data from Redis. OpenTelemetry Collector fetches metrics from redis-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up redis-exporter. Set up OpenTelemetry Collector. For details on Redis Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  Redis Monitoring Redis monitoring provides monitoring of the status and resources of the Redis server. Redis cluster is cataloged as a Layer: REDIS Service in OAP. Each Redis server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Uptime day meter_redis_uptime The uptime of Redis. redis-exporter   Connected Clients  meter_redis_connected_clients The number of connected clients. redis-exporter   Blocked Clients  meter_redis_blocked_clients The number of blocked clients. redis-exporter   Memory Max Bytes MB meter_redis_memory_max_bytes The max bytes of memory. redis-exporter   Hits Rate % meter_redis_hit_rate Hit rate of redis when used as a cache. redis-exporter   Average Time Spend By Command second meter_redis_average_time_spent_by_command Average time to execute various types of commands. redis-exporter   Total Commands Trend  meter_redis_total_commands_rate The Trend of total commands. redis-exporter   DB keys  meter_redis_evicted_keys_total  meter_redis_expired_keys_total  meter_redis_db_keys The number of Expired / Evicted / total keys. redis-exporter   Net Input/Output Bytes KB meter_redis_net_input_bytes  meter_redis_net_output_bytes Total bytes of input / output of redis net. redis-exporter   Memory Usage % meter_redis_memory_used_bytes  meter_redis_memory_max_bytes Percentage of used memory. redis-exporter   Total Time Spend By Command Trend  meter_redis_commands_duration  meter_redis_commands_total The trend of total time spend by command redis-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/redis. The Redis dashboard panel configurations are found in /config/ui-initialized-templates/redis.\nCollect sampled slow commands SkyWalking leverages fluentbit or other log agents for collecting slow commands from Redis.\nData flow  Execute commands periodically to collect slow logs from Redis and save the result locally. Fluent-bit agent collects slow logs from local file. fluent-bit agent sends data to SkyWalking OAP Server using native meter APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Set up fluentbit. Config fluentbit from here for Redis. Config slow log from here for Redis. Periodically execute the commands.  Notice:\n1.The slowlog-log-slower-than and slowlog-max-len configuration items in the configuration file are for the slow log, the former indicating that execution time longer than the specified time (in milliseconds) will be logged to the slowlog, and the latter indicating the maximum number of slow logs that will be stored in the slow log file. 2.In the e2e test, SkyWalking uses cron to periodically execute the redis command to fetch the slow logs and write them to a local file, which is then collected by fluent-bit to send the data to the OAP. You can see the relevant configuration files here.You can also get slow logs periodically and send them to OAP in other ways than using cron and fluent-bit.\nSlow Commands Monitoring Slow SQL monitoring provides monitoring of the slow commands of the Redis servers. Redis servers are cataloged as a Layer: REDIS Service in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Slow Statements ms top_n_database_statement The latency and statement of Redis slow commands fluentbit    Customizations You can customize your own metrics/expression/dashboard panel. The slowsql expression rules are found in /config/lal/redis-slowsql.yaml The Redis dashboard panel configurations are found in /config/ui-initialized-templates/redis. `\n","excerpt":"Redis monitoring Redis server performance from redis-exporter SkyWalking leverages redis-exporter …","ref":"/docs/main/next/en/setup/backend/backend-redis-monitoring/","title":"Redis monitoring"},{"body":"Redis monitoring Redis server performance from redis-exporter SkyWalking leverages redis-exporter for collecting metrics data from Redis. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  redis-exporter collect metrics data from Redis. OpenTelemetry Collector fetches metrics from redis-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up redis-exporter. Set up OpenTelemetry Collector. For details on Redis Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  Redis Monitoring Redis monitoring provides monitoring of the status and resources of the Redis server. Redis cluster is cataloged as a Layer: REDIS Service in OAP. Each Redis server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Uptime day meter_redis_uptime The uptime of Redis. redis-exporter   Connected Clients  meter_redis_connected_clients The number of connected clients. redis-exporter   Blocked Clients  meter_redis_blocked_clients The number of blocked clients. redis-exporter   Memory Max Bytes MB meter_redis_memory_max_bytes The max bytes of memory. redis-exporter   Hits Rate % meter_redis_hit_rate Hit rate of redis when used as a cache. redis-exporter   Average Time Spend By Command second meter_redis_average_time_spent_by_command Average time to execute various types of commands. redis-exporter   Total Commands Trend  meter_redis_total_commands_rate The Trend of total commands. redis-exporter   DB keys  meter_redis_evicted_keys_total  meter_redis_expired_keys_total  meter_redis_db_keys The number of Expired / Evicted / total keys. redis-exporter   Net Input/Output Bytes KB meter_redis_net_input_bytes  meter_redis_net_output_bytes Total bytes of input / output of redis net. redis-exporter   Memory Usage % meter_redis_memory_usage Percentage of used memory. redis-exporter   Total Time Spend By Command Trend  meter_redis_commands_duration_seconds_total_rate The trend of total time spend by command redis-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/redis. The Redis dashboard panel configurations are found in /config/ui-initialized-templates/redis.\nCollect sampled slow commands SkyWalking leverages fluentbit or other log agents for collecting slow commands from Redis.\nData flow  Execute commands periodically to collect slow logs from Redis and save the result locally. Fluent-bit agent collects slow logs from local file. fluent-bit agent sends data to SkyWalking OAP Server using native meter APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Set up fluentbit. Config fluentbit from here for Redis. Config slow log from here for Redis. Periodically execute the commands.  Notice:\n1.The slowlog-log-slower-than and slowlog-max-len configuration items in the configuration file are for the slow log, the former indicating that execution time longer than the specified time (in milliseconds) will be logged to the slowlog, and the latter indicating the maximum number of slow logs that will be stored in the slow log file. 2.In the e2e test, SkyWalking uses cron to periodically execute the redis command to fetch the slow logs and write them to a local file, which is then collected by fluent-bit to send the data to the OAP. You can see the relevant configuration files here.You can also get slow logs periodically and send them to OAP in other ways than using cron and fluent-bit.\nSlow Commands Monitoring Slow SQL monitoring provides monitoring of the slow commands of the Redis servers. Redis servers are cataloged as a Layer: REDIS Service in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Slow Statements ms top_n_database_statement The latency and statement of Redis slow commands fluentbit    Customizations You can customize your own metrics/expression/dashboard panel. The slowsql expression rules are found in /config/lal/redis-slowsql.yaml The Redis dashboard panel configurations are found in /config/ui-initialized-templates/redis. `\n","excerpt":"Redis monitoring Redis server performance from redis-exporter SkyWalking leverages redis-exporter …","ref":"/docs/main/v9.5.0/en/setup/backend/backend-redis-monitoring/","title":"Redis monitoring"},{"body":"Redis monitoring Redis server performance from redis-exporter SkyWalking leverages redis-exporter for collecting metrics data from Redis. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  redis-exporter collect metrics data from Redis. OpenTelemetry Collector fetches metrics from redis-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up redis-exporter. Set up OpenTelemetry Collector. For details on Redis Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  Redis Monitoring Redis monitoring provides monitoring of the status and resources of the Redis server. Redis cluster is cataloged as a Layer: REDIS Service in OAP. Each Redis server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Uptime day meter_redis_uptime The uptime of Redis. redis-exporter   Connected Clients  meter_redis_connected_clients The number of connected clients. redis-exporter   Blocked Clients  meter_redis_blocked_clients The number of blocked clients. redis-exporter   Memory Max Bytes MB meter_redis_memory_max_bytes The max bytes of memory. redis-exporter   Hits Rate % meter_redis_hit_rate Hit rate of redis when used as a cache. redis-exporter   Average Time Spend By Command second meter_redis_average_time_spent_by_command Average time to execute various types of commands. redis-exporter   Total Commands Trend  meter_redis_total_commands_rate The Trend of total commands. redis-exporter   DB keys  meter_redis_evicted_keys_total  meter_redis_expired_keys_total  meter_redis_db_keys The number of Expired / Evicted / total keys. redis-exporter   Net Input/Output Bytes KB meter_redis_net_input_bytes  meter_redis_net_output_bytes Total bytes of input / output of redis net. redis-exporter   Memory Usage % meter_redis_memory_usage Percentage of used memory. redis-exporter   Total Time Spend By Command Trend  meter_redis_commands_duration_seconds_total_rate The trend of total time spend by command redis-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/redis. The Redis dashboard panel configurations are found in /config/ui-initialized-templates/redis.\nCollect sampled slow commands SkyWalking leverages fluentbit or other log agents for collecting slow commands from Redis.\nData flow  Execute commands periodically to collect slow logs from Redis and save the result locally. Fluent-bit agent collects slow logs from local file. fluent-bit agent sends data to SkyWalking OAP Server using native meter APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Set up fluentbit. Config fluentbit from here for Redis. Config slow log from here for Redis. Periodically execute the commands.  Notice:\n1.The slowlog-log-slower-than and slowlog-max-len configuration items in the configuration file are for the slow log, the former indicating that execution time longer than the specified time (in milliseconds) will be logged to the slowlog, and the latter indicating the maximum number of slow logs that will be stored in the slow log file. 2.In the e2e test, SkyWalking uses cron to periodically execute the redis command to fetch the slow logs and write them to a local file, which is then collected by fluent-bit to send the data to the OAP. You can see the relevant configuration files here.You can also get slow logs periodically and send them to OAP in other ways than using cron and fluent-bit.\nSlow Commands Monitoring Slow SQL monitoring provides monitoring of the slow commands of the Redis servers. Redis servers are cataloged as a Layer: REDIS Service in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Slow Statements ms top_n_database_statement The latency and statement of Redis slow commands fluentbit    Customizations You can customize your own metrics/expression/dashboard panel. The slowsql expression rules are found in /config/lal/redis-slowsql.yaml The Redis dashboard panel configurations are found in /config/ui-initialized-templates/redis. `\n","excerpt":"Redis monitoring Redis server performance from redis-exporter SkyWalking leverages redis-exporter …","ref":"/docs/main/v9.6.0/en/setup/backend/backend-redis-monitoring/","title":"Redis monitoring"},{"body":"Redis monitoring Redis server performance from redis-exporter SkyWalking leverages redis-exporter for collecting metrics data from Redis. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  redis-exporter collect metrics data from Redis. OpenTelemetry Collector fetches metrics from redis-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up redis-exporter. Set up OpenTelemetry Collector. For details on Redis Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  Redis Monitoring Redis monitoring provides monitoring of the status and resources of the Redis server. Redis cluster is cataloged as a Layer: REDIS Service in OAP. Each Redis server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Uptime day meter_redis_uptime The uptime of Redis. redis-exporter   Connected Clients  meter_redis_connected_clients The number of connected clients. redis-exporter   Blocked Clients  meter_redis_blocked_clients The number of blocked clients. redis-exporter   Memory Max Bytes MB meter_redis_memory_max_bytes The max bytes of memory. redis-exporter   Hits Rate % meter_redis_hit_rate Hit rate of redis when used as a cache. redis-exporter   Average Time Spend By Command second meter_redis_average_time_spent_by_command Average time to execute various types of commands. redis-exporter   Total Commands Trend  meter_redis_total_commands_rate The Trend of total commands. redis-exporter   DB keys  meter_redis_evicted_keys_total  meter_redis_expired_keys_total  meter_redis_db_keys The number of Expired / Evicted / total keys. redis-exporter   Net Input/Output Bytes KB meter_redis_net_input_bytes  meter_redis_net_output_bytes Total bytes of input / output of redis net. redis-exporter   Memory Usage % meter_redis_memory_usage Percentage of used memory. redis-exporter   Total Time Spend By Command Trend  meter_redis_commands_duration_seconds_total_rate The trend of total time spend by command redis-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/redis. The Redis dashboard panel configurations are found in /config/ui-initialized-templates/redis.\nCollect sampled slow commands SkyWalking leverages fluentbit or other log agents for collecting slow commands from Redis.\nData flow  Execute commands periodically to collect slow logs from Redis and save the result locally. Fluent-bit agent collects slow logs from local file. fluent-bit agent sends data to SkyWalking OAP Server using native meter APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Set up fluentbit. Config fluentbit from here for Redis. Config slow log from here for Redis. Periodically execute the commands.  Notice:\n1.The slowlog-log-slower-than and slowlog-max-len configuration items in the configuration file are for the slow log, the former indicating that execution time longer than the specified time (in milliseconds) will be logged to the slowlog, and the latter indicating the maximum number of slow logs that will be stored in the slow log file. 2.In the e2e test, SkyWalking uses cron to periodically execute the redis command to fetch the slow logs and write them to a local file, which is then collected by fluent-bit to send the data to the OAP. You can see the relevant configuration files here.You can also get slow logs periodically and send them to OAP in other ways than using cron and fluent-bit.\nSlow Commands Monitoring Slow SQL monitoring provides monitoring of the slow commands of the Redis servers. Redis servers are cataloged as a Layer: REDIS Service in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Slow Statements ms top_n_database_statement The latency and statement of Redis slow commands fluentbit    Customizations You can customize your own metrics/expression/dashboard panel. The slowsql expression rules are found in /config/lal/redis-slowsql.yaml The Redis dashboard panel configurations are found in /config/ui-initialized-templates/redis. `\n","excerpt":"Redis monitoring Redis server performance from redis-exporter SkyWalking leverages redis-exporter …","ref":"/docs/main/v9.7.0/en/setup/backend/backend-redis-monitoring/","title":"Redis monitoring"},{"body":"Register mechanism is no longer required for local / exit span Since version 6.6.0, SkyWalking has removed the local and exit span registers. If an old java agent (before 6.6.0) is still running, which registers to the 6.6.0+ backend, you will face the following warning message.\nclass=RegisterServiceHandler, message = Unexpected endpoint register, endpoint isn't detected from server side. This will not harm the backend or cause any issues, but serves as a reminder that your agent or other clients should follow the new protocol requirements.\nYou could simply use log4j2.xml to filter this warning message out.\n","excerpt":"Register mechanism is no longer required for local / exit span Since version 6.6.0, SkyWalking has …","ref":"/docs/main/latest/en/faq/unexpected-endpoint-register/","title":"Register mechanism is no longer required for local / exit span"},{"body":"Register mechanism is no longer required for local / exit span Since version 6.6.0, SkyWalking has removed the local and exit span registers. If an old java agent (before 6.6.0) is still running, which registers to the 6.6.0+ backend, you will face the following warning message.\nclass=RegisterServiceHandler, message = Unexpected endpoint register, endpoint isn't detected from server side. This will not harm the backend or cause any issues, but serves as a reminder that your agent or other clients should follow the new protocol requirements.\nYou could simply use log4j2.xml to filter this warning message out.\n","excerpt":"Register mechanism is no longer required for local / exit span Since version 6.6.0, SkyWalking has …","ref":"/docs/main/next/en/faq/unexpected-endpoint-register/","title":"Register mechanism is no longer required for local / exit span"},{"body":"Register mechanism is no longer required for local / exit span Since version 6.6.0, SkyWalking has removed the local and exit span registers. If an old java agent (before 6.6.0) is still running, which registers to the 6.6.0+ backend, you will face the following warning message.\nclass=RegisterServiceHandler, message = Unexpected endpoint register, endpoint isn't detected from server side. This will not harm the backend or cause any issues, but serves as a reminder that your agent or other clients should follow the new protocol requirements.\nYou could simply use log4j2.xml to filter this warning message out.\n","excerpt":"Register mechanism is no longer required for local / exit span Since version 6.6.0, SkyWalking has …","ref":"/docs/main/v9.0.0/en/faq/unexpected-endpoint-register/","title":"Register mechanism is no longer required for local / exit span"},{"body":"Register mechanism is no longer required for local / exit span Since version 6.6.0, SkyWalking has removed the local and exit span registers. If an old java agent (before 6.6.0) is still running, which registers to the 6.6.0+ backend, you will face the following warning message.\nclass=RegisterServiceHandler, message = Unexpected endpoint register, endpoint isn't detected from server side. This will not harm the backend or cause any issues, but serves as a reminder that your agent or other clients should follow the new protocol requirements.\nYou could simply use log4j2.xml to filter this warning message out.\n","excerpt":"Register mechanism is no longer required for local / exit span Since version 6.6.0, SkyWalking has …","ref":"/docs/main/v9.1.0/en/faq/unexpected-endpoint-register/","title":"Register mechanism is no longer required for local / exit span"},{"body":"Register mechanism is no longer required for local / exit span Since version 6.6.0, SkyWalking has removed the local and exit span registers. If an old java agent (before 6.6.0) is still running, which registers to the 6.6.0+ backend, you will face the following warning message.\nclass=RegisterServiceHandler, message = Unexpected endpoint register, endpoint isn't detected from server side. This will not harm the backend or cause any issues, but serves as a reminder that your agent or other clients should follow the new protocol requirements.\nYou could simply use log4j2.xml to filter this warning message out.\n","excerpt":"Register mechanism is no longer required for local / exit span Since version 6.6.0, SkyWalking has …","ref":"/docs/main/v9.2.0/en/faq/unexpected-endpoint-register/","title":"Register mechanism is no longer required for local / exit span"},{"body":"Register mechanism is no longer required for local / exit span Since version 6.6.0, SkyWalking has removed the local and exit span registers. If an old java agent (before 6.6.0) is still running, which registers to the 6.6.0+ backend, you will face the following warning message.\nclass=RegisterServiceHandler, message = Unexpected endpoint register, endpoint isn't detected from server side. This will not harm the backend or cause any issues, but serves as a reminder that your agent or other clients should follow the new protocol requirements.\nYou could simply use log4j2.xml to filter this warning message out.\n","excerpt":"Register mechanism is no longer required for local / exit span Since version 6.6.0, SkyWalking has …","ref":"/docs/main/v9.3.0/en/faq/unexpected-endpoint-register/","title":"Register mechanism is no longer required for local / exit span"},{"body":"Register mechanism is no longer required for local / exit span Since version 6.6.0, SkyWalking has removed the local and exit span registers. If an old java agent (before 6.6.0) is still running, which registers to the 6.6.0+ backend, you will face the following warning message.\nclass=RegisterServiceHandler, message = Unexpected endpoint register, endpoint isn't detected from server side. This will not harm the backend or cause any issues, but serves as a reminder that your agent or other clients should follow the new protocol requirements.\nYou could simply use log4j2.xml to filter this warning message out.\n","excerpt":"Register mechanism is no longer required for local / exit span Since version 6.6.0, SkyWalking has …","ref":"/docs/main/v9.4.0/en/faq/unexpected-endpoint-register/","title":"Register mechanism is no longer required for local / exit span"},{"body":"Register mechanism is no longer required for local / exit span Since version 6.6.0, SkyWalking has removed the local and exit span registers. If an old java agent (before 6.6.0) is still running, which registers to the 6.6.0+ backend, you will face the following warning message.\nclass=RegisterServiceHandler, message = Unexpected endpoint register, endpoint isn't detected from server side. This will not harm the backend or cause any issues, but serves as a reminder that your agent or other clients should follow the new protocol requirements.\nYou could simply use log4j2.xml to filter this warning message out.\n","excerpt":"Register mechanism is no longer required for local / exit span Since version 6.6.0, SkyWalking has …","ref":"/docs/main/v9.5.0/en/faq/unexpected-endpoint-register/","title":"Register mechanism is no longer required for local / exit span"},{"body":"Register mechanism is no longer required for local / exit span Since version 6.6.0, SkyWalking has removed the local and exit span registers. If an old java agent (before 6.6.0) is still running, which registers to the 6.6.0+ backend, you will face the following warning message.\nclass=RegisterServiceHandler, message = Unexpected endpoint register, endpoint isn't detected from server side. This will not harm the backend or cause any issues, but serves as a reminder that your agent or other clients should follow the new protocol requirements.\nYou could simply use log4j2.xml to filter this warning message out.\n","excerpt":"Register mechanism is no longer required for local / exit span Since version 6.6.0, SkyWalking has …","ref":"/docs/main/v9.6.0/en/faq/unexpected-endpoint-register/","title":"Register mechanism is no longer required for local / exit span"},{"body":"Register mechanism is no longer required for local / exit span Since version 6.6.0, SkyWalking has removed the local and exit span registers. If an old java agent (before 6.6.0) is still running, which registers to the 6.6.0+ backend, you will face the following warning message.\nclass=RegisterServiceHandler, message = Unexpected endpoint register, endpoint isn't detected from server side. This will not harm the backend or cause any issues, but serves as a reminder that your agent or other clients should follow the new protocol requirements.\nYou could simply use log4j2.xml to filter this warning message out.\n","excerpt":"Register mechanism is no longer required for local / exit span Since version 6.6.0, SkyWalking has …","ref":"/docs/main/v9.7.0/en/faq/unexpected-endpoint-register/","title":"Register mechanism is no longer required for local / exit span"},{"body":"Report service instance status   Service Instance Properties Service instance contains more information than just a name. In order for the agent to report service instance status, use ManagementService#reportInstanceProperties service to provide a string-key/string-value pair list as the parameter. The language of target instance must be provided as the minimum requirement.\n  Service Ping Service instance should keep alive with the backend. The agent should set a scheduler using ManagementService#keepAlive service every minute.\n  syntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.management.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/management/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the service reporting the extra information of the instance. service ManagementService { // Report custom properties of a service instance.  rpc reportInstanceProperties (InstanceProperties) returns (Commands) { } // Keep the instance alive in the backend analysis.  // Only recommend to do separate keepAlive report when no trace and metrics needs to be reported.  // Otherwise, it is duplicated.  rpc keepAlive (InstancePingPkg) returns (Commands) { }}message InstanceProperties { string service = 1; string serviceInstance = 2; repeated KeyStringValuePair properties = 3; // Instance belong layer name which define in the backend, general is default.  string layer = 4;}message InstancePingPkg { string service = 1; string serviceInstance = 2; // Instance belong layer name which define in the backend, general is default.  string layer = 3;}Via HTTP Endpoint  Report service instance properties   POST http://localhost:12800/v3/management/reportProperties\n Input:\n{ \u0026#34;service\u0026#34;: \u0026#34;User Service Name\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User Service Instance Name\u0026#34;, \u0026#34;properties\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;language\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;Lua\u0026#34; } ] } Output JSON Array:\n{}  Service instance ping   POST http://localhost:12800/v3/management/keepAlive\n Input:\n{ \u0026#34;service\u0026#34;: \u0026#34;User Service Name\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User Service Instance Name\u0026#34; } OutPut:\n{} ","excerpt":"Report service instance status   Service Instance Properties Service instance contains more …","ref":"/docs/main/latest/en/api/instance-properties/","title":"Report service instance status"},{"body":"Report service instance status   Service Instance Properties Service instance contains more information than just a name. In order for the agent to report service instance status, use ManagementService#reportInstanceProperties service to provide a string-key/string-value pair list as the parameter. The language of target instance must be provided as the minimum requirement.\n  Service Ping Service instance should keep alive with the backend. The agent should set a scheduler using ManagementService#keepAlive service every minute.\n  syntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.management.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/management/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the service reporting the extra information of the instance. service ManagementService { // Report custom properties of a service instance.  rpc reportInstanceProperties (InstanceProperties) returns (Commands) { } // Keep the instance alive in the backend analysis.  // Only recommend to do separate keepAlive report when no trace and metrics needs to be reported.  // Otherwise, it is duplicated.  rpc keepAlive (InstancePingPkg) returns (Commands) { }}message InstanceProperties { string service = 1; string serviceInstance = 2; repeated KeyStringValuePair properties = 3; // Instance belong layer name which define in the backend, general is default.  string layer = 4;}message InstancePingPkg { string service = 1; string serviceInstance = 2; // Instance belong layer name which define in the backend, general is default.  string layer = 3;}Via HTTP Endpoint  Report service instance properties   POST http://localhost:12800/v3/management/reportProperties\n Input:\n{ \u0026#34;service\u0026#34;: \u0026#34;User Service Name\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User Service Instance Name\u0026#34;, \u0026#34;properties\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;language\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;Lua\u0026#34; } ] } Output JSON Array:\n{}  Service instance ping   POST http://localhost:12800/v3/management/keepAlive\n Input:\n{ \u0026#34;service\u0026#34;: \u0026#34;User Service Name\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User Service Instance Name\u0026#34; } OutPut:\n{} ","excerpt":"Report service instance status   Service Instance Properties Service instance contains more …","ref":"/docs/main/next/en/api/instance-properties/","title":"Report service instance status"},{"body":"Report service instance status   Service Instance Properties Service instance contains more information than just a name. In order for the agent to report service instance status, use ManagementService#reportInstanceProperties service to provide a string-key/string-value pair list as the parameter. The language of target instance must be provided as the minimum requirement.\n  Service Ping Service instance should keep alive with the backend. The agent should set a scheduler using ManagementService#keepAlive service every minute.\n  syntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.management.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/management/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the service reporting the extra information of the instance. service ManagementService { // Report custom properties of a service instance.  rpc reportInstanceProperties (InstanceProperties) returns (Commands) { } // Keep the instance alive in the backend analysis.  // Only recommend to do separate keepAlive report when no trace and metrics needs to be reported.  // Otherwise, it is duplicated.  rpc keepAlive (InstancePingPkg) returns (Commands) { }}message InstanceProperties { string service = 1; string serviceInstance = 2; repeated KeyStringValuePair properties = 3; // Instance belong layer name which define in the backend, general is default.  string layer = 4;}message InstancePingPkg { string service = 1; string serviceInstance = 2; // Instance belong layer name which define in the backend, general is default.  string layer = 3;}Via HTTP Endpoint  Report service instance properties   POST http://localhost:12800/v3/management/reportProperties\n Input:\n{ \u0026#34;service\u0026#34;: \u0026#34;User Service Name\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User Service Instance Name\u0026#34;, \u0026#34;properties\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;language\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;Lua\u0026#34; } ] } Output JSON Array:\n{}  Service instance ping   POST http://localhost:12800/v3/management/keepAlive\n Input:\n{ \u0026#34;service\u0026#34;: \u0026#34;User Service Name\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User Service Instance Name\u0026#34; } OutPut:\n{} ","excerpt":"Report service instance status   Service Instance Properties Service instance contains more …","ref":"/docs/main/v9.4.0/en/api/instance-properties/","title":"Report service instance status"},{"body":"Report service instance status   Service Instance Properties Service instance contains more information than just a name. In order for the agent to report service instance status, use ManagementService#reportInstanceProperties service to provide a string-key/string-value pair list as the parameter. The language of target instance must be provided as the minimum requirement.\n  Service Ping Service instance should keep alive with the backend. The agent should set a scheduler using ManagementService#keepAlive service every minute.\n  syntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.management.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/management/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the service reporting the extra information of the instance. service ManagementService { // Report custom properties of a service instance.  rpc reportInstanceProperties (InstanceProperties) returns (Commands) { } // Keep the instance alive in the backend analysis.  // Only recommend to do separate keepAlive report when no trace and metrics needs to be reported.  // Otherwise, it is duplicated.  rpc keepAlive (InstancePingPkg) returns (Commands) { }}message InstanceProperties { string service = 1; string serviceInstance = 2; repeated KeyStringValuePair properties = 3; // Instance belong layer name which define in the backend, general is default.  string layer = 4;}message InstancePingPkg { string service = 1; string serviceInstance = 2; // Instance belong layer name which define in the backend, general is default.  string layer = 3;}Via HTTP Endpoint  Report service instance properties   POST http://localhost:12800/v3/management/reportProperties\n Input:\n{ \u0026#34;service\u0026#34;: \u0026#34;User Service Name\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User Service Instance Name\u0026#34;, \u0026#34;properties\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;language\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;Lua\u0026#34; } ] } Output JSON Array:\n{}  Service instance ping   POST http://localhost:12800/v3/management/keepAlive\n Input:\n{ \u0026#34;service\u0026#34;: \u0026#34;User Service Name\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User Service Instance Name\u0026#34; } OutPut:\n{} ","excerpt":"Report service instance status   Service Instance Properties Service instance contains more …","ref":"/docs/main/v9.5.0/en/api/instance-properties/","title":"Report service instance status"},{"body":"Report service instance status   Service Instance Properties Service instance contains more information than just a name. In order for the agent to report service instance status, use ManagementService#reportInstanceProperties service to provide a string-key/string-value pair list as the parameter. The language of target instance must be provided as the minimum requirement.\n  Service Ping Service instance should keep alive with the backend. The agent should set a scheduler using ManagementService#keepAlive service every minute.\n  syntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.management.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/management/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the service reporting the extra information of the instance. service ManagementService { // Report custom properties of a service instance.  rpc reportInstanceProperties (InstanceProperties) returns (Commands) { } // Keep the instance alive in the backend analysis.  // Only recommend to do separate keepAlive report when no trace and metrics needs to be reported.  // Otherwise, it is duplicated.  rpc keepAlive (InstancePingPkg) returns (Commands) { }}message InstanceProperties { string service = 1; string serviceInstance = 2; repeated KeyStringValuePair properties = 3; // Instance belong layer name which define in the backend, general is default.  string layer = 4;}message InstancePingPkg { string service = 1; string serviceInstance = 2; // Instance belong layer name which define in the backend, general is default.  string layer = 3;}Via HTTP Endpoint  Report service instance properties   POST http://localhost:12800/v3/management/reportProperties\n Input:\n{ \u0026#34;service\u0026#34;: \u0026#34;User Service Name\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User Service Instance Name\u0026#34;, \u0026#34;properties\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;language\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;Lua\u0026#34; } ] } Output JSON Array:\n{}  Service instance ping   POST http://localhost:12800/v3/management/keepAlive\n Input:\n{ \u0026#34;service\u0026#34;: \u0026#34;User Service Name\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User Service Instance Name\u0026#34; } OutPut:\n{} ","excerpt":"Report service instance status   Service Instance Properties Service instance contains more …","ref":"/docs/main/v9.6.0/en/api/instance-properties/","title":"Report service instance status"},{"body":"Report service instance status   Service Instance Properties Service instance contains more information than just a name. In order for the agent to report service instance status, use ManagementService#reportInstanceProperties service to provide a string-key/string-value pair list as the parameter. The language of target instance must be provided as the minimum requirement.\n  Service Ping Service instance should keep alive with the backend. The agent should set a scheduler using ManagementService#keepAlive service every minute.\n  syntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.management.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/management/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the service reporting the extra information of the instance. service ManagementService { // Report custom properties of a service instance.  rpc reportInstanceProperties (InstanceProperties) returns (Commands) { } // Keep the instance alive in the backend analysis.  // Only recommend to do separate keepAlive report when no trace and metrics needs to be reported.  // Otherwise, it is duplicated.  rpc keepAlive (InstancePingPkg) returns (Commands) { }}message InstanceProperties { string service = 1; string serviceInstance = 2; repeated KeyStringValuePair properties = 3; // Instance belong layer name which define in the backend, general is default.  string layer = 4;}message InstancePingPkg { string service = 1; string serviceInstance = 2; // Instance belong layer name which define in the backend, general is default.  string layer = 3;}Via HTTP Endpoint  Report service instance properties   POST http://localhost:12800/v3/management/reportProperties\n Input:\n{ \u0026#34;service\u0026#34;: \u0026#34;User Service Name\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User Service Instance Name\u0026#34;, \u0026#34;properties\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;language\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;Lua\u0026#34; } ] } Output JSON Array:\n{}  Service instance ping   POST http://localhost:12800/v3/management/keepAlive\n Input:\n{ \u0026#34;service\u0026#34;: \u0026#34;User Service Name\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User Service Instance Name\u0026#34; } OutPut:\n{} ","excerpt":"Report service instance status   Service Instance Properties Service instance contains more …","ref":"/docs/main/v9.7.0/en/api/instance-properties/","title":"Report service instance status"},{"body":"RocketMQ monitoring SkyWalking leverages rocketmq-exporter for collecting metrics data from RocketMQ. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  The rocketmq-exporter (https://github.com/apache/rocketmq-exporter?tab=readme-ov-file#readme) collects metrics data from RocketMQ, The RocketMQ version is required to be 4.3.2+. OpenTelemetry Collector fetches metrics from rocketmq-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup rocketmq-exporter. Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  RocketMQ Monitoring RocketMQ monitoring provides multidimensional metrics monitoring of RocketMQ Exporter as Layer: RocketMQ Service in the OAP. In each cluster, the broker is represented as Instance and the topic is represented as Endpoint.\nRocketMQ Cluster Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Messages Produced Today Count meter_rocketmq_cluster_messages_produced_today The number of cluster messages produced today. RocketMQ Exporter   Messages Consumed Today Count meter_rocketmq_cluster_messages_consumed_today The number of cluster messages consumed today. RocketMQ Exporter   Total Producer Tps Msg/sec meter_rocketmq_cluster_total_producer_tps The number of messages produced per second. RocketMQ Exporter   Total Consume Tps Msg/sec meter_rocketmq_cluster_total_consumer_tps The number of messages consumed per second. RocketMQ Exporter   Producer Message Size Bytes/sec meter_rocketmq_cluster_producer_message_size The max size of a message produced per second. RocketMQ Exporter   Consumer Message Size Bytes/sec meter_rocketmq_cluster_consumer_message_size The max size of the consumed message per second. RocketMQ Exporter   Messages Produced Until Yesterday Count meter_rocketmq_cluster_messages_produced_until_yesterday The total number of messages put until 12 o\u0026rsquo;clock last night. RocketMQ Exporter   Messages Consumed Until Yesterday Count meter_rocketmq_cluster_messages_consumed_until_yesterday The total number of messages read until 12 o\u0026rsquo;clock last night. RocketMQ Exporter   Max Consumer Latency ms meter_rocketmq_cluster_max_consumer_latency The max number of consumer latency. RocketMQ Exporter   Max CommitLog Disk Ratio % meter_rocketmq_cluster_max_commitLog_disk_ratio The max utilization ratio of the commit log disk. RocketMQ Exporter   CommitLog Disk Ratio % meter_rocketmq_cluster_commitLog_disk_ratio The utilization ratio of the commit log disk per broker IP. RocketMQ Exporter   Pull ThreadPool Queue Head Wait Time ms meter_rocketmq_cluster_pull_threadPool_queue_head_wait_time The wait time in milliseconds for pulling threadPool queue per broker IP. RocketMQ Exporter   Send ThreadPool Queue Head Wait Time ms meter_rocketmq_cluster_send_threadPool_queue_head_wait_time The wait time in milliseconds for sending threadPool queue per broker IP. RocketMQ Exporter    RocketMQ Broker Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Produce TPS Msg/sec meter_rocketmq_broker_produce_tps The number of broker produces messages per second. RocketMQ Exporter   Consume QPS Msg/sec meter_rocketmq_broker_consume_qps The number of broker consumes messages per second. RocketMQ Exporter   Producer Message Size Bytes/sec meter_rocketmq_broker_producer_message_size The max size of the messages produced per second. RocketMQ Exporter   Consumer Message Size Bytes/sec meter_rocketmq_broker_consumer_message_size The max size of the messages consumed per second. RocketMQ Exporter    RocketMQ Topic Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Max Producer Message Size Byte meter_rocketmq_topic_max_producer_message_size The maximum number of messages produced. RocketMQ Exporter   Max Consumer Message Size Byte meter_rocketmq_topic_max_consumer_message_size The maximum number of messages consumed. RocketMQ Exporter   Consumer Latency ms meter_rocketmq_topic_consumer_latency Consumption delay time of a consumer group. RocketMQ Exporter   Producer Tps Msg/sec meter_rocketmq_topic_producer_tps The number of messages produced per second. RocketMQ Exporter   Consumer Group Tps Msg/sec meter_rocketmq_topic_consumer_group_tps The number of messages consumed per second per consumer group. RocketMQ Exporter   Producer Offset Count meter_rocketmq_topic_producer_offset The max progress of a topic\u0026rsquo;s production message. RocketMQ Exporter   Consumer Group Offset Count meter_rocketmq_topic_consumer_group_offset The max progress of a topic\u0026rsquo;s consumption message per consumer group. RocketMQ Exporter   Producer Message Size Byte/sec meter_rocketmq_topic_producer_message_size The max size of messages produced per second. RocketMQ Exporter   Consumer Message Size Byte/sec meter_rocketmq_topic_consumer_message_size The max size of messages consumed per second. RocketMQ Exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in otel-rules/rocketmq/rocketmq-cluster.yaml, otel-rules/rocketmq/rocketmq-broker.yaml, otel-rules/rocketmq/rocketmq-topic.yaml. The RocketMQ dashboard panel configurations are found in ui-initialized-templates/rocketmq.\n","excerpt":"RocketMQ monitoring SkyWalking leverages rocketmq-exporter for collecting metrics data from …","ref":"/docs/main/next/en/setup/backend/backend-rocketmq-monitoring/","title":"RocketMQ monitoring"},{"body":"Running and Debugging Debugging is essential when developing plugins, as it helps you verify your plugin logic. If you want to perform debugging, follow these steps:\n Write test code: Write a sample application that includes the framework content you need to test. Build the Agent: In the project root directory, run the make build command to compile the Agent program into a binary file. Adjust the test program\u0026rsquo;s Debug configuration: Modify the test program\u0026rsquo;s Debug configuration, which will be explained in more detail later. Launch the program and add breakpoints: Start your sample application and add breakpoints in your plugin code where you want to pause the execution and inspect the program state.  Write test code Please make sure that you have imported github.com/apache/skywalking-go in your test code. You can refer to the documentation on how to compile using go build for specific steps.\nAdjust the test program\u0026rsquo;s Debug configuration Please locate the following two paths:\n Go Agent: Locate the binary file generated through make build in the previous step. Current project path: Find the root directory of the current project, which will be used to search for source files in subsequent steps.  Then, please enter the following command in the tool arguments section of the debug configuration:\n-toolexec '/path/to/skywalking-go-agent -debug /path/to/current-project-path' -a\u0026quot;. ","excerpt":"Running and Debugging Debugging is essential when developing plugins, as it helps you verify your …","ref":"/docs/skywalking-go/latest/en/development-and-contribution/running-and-debugging/","title":"Running and Debugging"},{"body":"Running and Debugging Debugging is essential when developing plugins, as it helps you verify your plugin logic. If you want to perform debugging, follow these steps:\n Write test code: Write a sample application that includes the framework content you need to test. Build the Agent: In the project root directory, run the make build command to compile the Agent program into a binary file. Adjust the test program\u0026rsquo;s Debug configuration: Modify the test program\u0026rsquo;s Debug configuration, which will be explained in more detail later. Launch the program and add breakpoints: Start your sample application and add breakpoints in your plugin code where you want to pause the execution and inspect the program state.  Write test code Please make sure that you have imported github.com/apache/skywalking-go in your test code. You can refer to the documentation on how to compile using go build for specific steps.\nAdjust the test program\u0026rsquo;s Debug configuration Please locate the following two paths:\n Go Agent: Locate the binary file generated through make build in the previous step. Current project path: Find the root directory of the current project, which will be used to search for source files in subsequent steps.  Then, please enter the following command in the tool arguments section of the debug configuration:\n-toolexec '/path/to/skywalking-go-agent -debug /path/to/current-project-path' -a\u0026quot;. ","excerpt":"Running and Debugging Debugging is essential when developing plugins, as it helps you verify your …","ref":"/docs/skywalking-go/next/en/development-and-contribution/running-and-debugging/","title":"Running and Debugging"},{"body":"Running and Debugging Debugging is essential when developing plugins, as it helps you verify your plugin logic. If you want to perform debugging, follow these steps:\n Write test code: Write a sample application that includes the framework content you need to test. Build the Agent: In the project root directory, run the make build command to compile the Agent program into a binary file. Adjust the test program\u0026rsquo;s Debug configuration: Modify the test program\u0026rsquo;s Debug configuration, which will be explained in more detail later. Launch the program and add breakpoints: Start your sample application and add breakpoints in your plugin code where you want to pause the execution and inspect the program state.  Write test code Please make sure that you have imported github.com/apache/skywalking-go in your test code. You can refer to the documentation on how to compile using go build for specific steps.\nAdjust the test program\u0026rsquo;s Debug configuration Please locate the following two paths:\n Go Agent: Locate the binary file generated through make build in the previous step. Current project path: Find the root directory of the current project, which will be used to search for source files in subsequent steps.  Then, please enter the following command in the tool arguments section of the debug configuration:\n-toolexec '/path/to/skywalking-go-agent -debug /path/to/current-project-path' -a\u0026quot;. ","excerpt":"Running and Debugging Debugging is essential when developing plugins, as it helps you verify your …","ref":"/docs/skywalking-go/v0.4.0/en/development-and-contribution/running-and-debugging/","title":"Running and Debugging"},{"body":"Satellite self observability dashboard SkyWalking Satellite collects and exports metrics in Prometheus format and SkyWalking metrics service protobuffer format for consuming, it also provides a dashboard to visualize the Satellite metrics.\nData flow  SkyWalking Satellite collects metrics data internally and pushes the metrics to SkyWalking OAP. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up SkyWalking Satellite Telemetry Exporter. Config SkyWalking OpenTelemetry receiver.  Self observability monitoring Self observability monitoring provides monitoring of the status and resources of the OAP server itself. oap-server is a Service in OAP, and land on the Layer: SO11Y_OAP.\nSelf observability metrics    Monitoring Panel Unit Metric Name Description Data Source      Count satellite_service_grpc_connect_count Connection Count SkyWalking Satellite    Percentage satellite_service_server_cpu_utilization CPU (%) SkyWalking Satellite    Count satellite_service_queue_used_count The used count of queue of pipeline SkyWalking Satellite    Count satellite_service_receive_event_count Receive count of event from downstream SkyWalking Satellite    Count satellite_service_fetch_event_count Fetch count of event from downstream SkyWalking Satellite    Count satellite_service_queue_input_count The event count of push to the queue SkyWalking Satellite    Count satellite_service_send_event_count The event count of push data to the upstream SkyWalking Satellite    Customizations You can customize your own metrics/expression/dashboard panel. The self observability dashboard panel configurations are found in /config/ui-initialized-templates/so11y_satellite/so11y-root.json.\n","excerpt":"Satellite self observability dashboard SkyWalking Satellite collects and exports metrics in …","ref":"/docs/main/latest/en/setup/backend/dashboards-so11y-satellite/","title":"Satellite self observability dashboard"},{"body":"Satellite self observability dashboard SkyWalking Satellite collects and exports metrics in Prometheus format and SkyWalking metrics service protobuffer format for consuming, it also provides a dashboard to visualize the Satellite metrics.\nData flow  SkyWalking Satellite collects metrics data internally and pushes the metrics to SkyWalking OAP. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up SkyWalking Satellite Telemetry Exporter. Config SkyWalking OpenTelemetry receiver.  Self observability monitoring Self observability monitoring provides monitoring of the status and resources of the OAP server itself. oap-server is a Service in OAP, and land on the Layer: SO11Y_OAP.\nSelf observability metrics    Monitoring Panel Unit Metric Name Description Data Source      Count satellite_service_grpc_connect_count Connection Count SkyWalking Satellite    Percentage satellite_service_server_cpu_utilization CPU (%) SkyWalking Satellite    Count satellite_service_queue_used_count The used count of queue of pipeline SkyWalking Satellite    Count satellite_service_receive_event_count Receive count of event from downstream SkyWalking Satellite    Count satellite_service_fetch_event_count Fetch count of event from downstream SkyWalking Satellite    Count satellite_service_queue_input_count The event count of push to the queue SkyWalking Satellite    Count satellite_service_send_event_count The event count of push data to the upstream SkyWalking Satellite    Customizations You can customize your own metrics/expression/dashboard panel. The self observability dashboard panel configurations are found in /config/ui-initialized-templates/so11y_satellite/so11y-root.json.\n","excerpt":"Satellite self observability dashboard SkyWalking Satellite collects and exports metrics in …","ref":"/docs/main/next/en/setup/backend/dashboards-so11y-satellite/","title":"Satellite self observability dashboard"},{"body":"Satellite self observability dashboard SkyWalking Satellite collects and exports metrics in Prometheus format and SkyWalking metrics service protobuffer format for consuming, it also provides a dashboard to visualize the Satellite metrics.\nData flow  SkyWalking Satellite collects metrics data internally and pushes the metrics to SkyWalking OAP. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up SkyWalking Satellite Telemetry Exporter. Config SkyWalking OpenTelemetry receiver.  Self observability monitoring Self observability monitoring provides monitoring of the status and resources of the OAP server itself. oap-server is a Service in OAP, and land on the Layer: SO11Y_OAP.\nSelf observability metrics    Monitoring Panel Unit Metric Name Description Data Source      Count satellite_service_grpc_connect_count Connection Count SkyWalking Satellite    Percentage satellite_service_server_cpu_utilization CPU (%) SkyWalking Satellite    Count satellite_service_queue_used_count The used count of queue of pipeline SkyWalking Satellite    Count satellite_service_receive_event_count Receive count of event from downstream SkyWalking Satellite    Count satellite_service_fetch_event_count Fetch count of event from downstream SkyWalking Satellite    Count satellite_service_queue_input_count The event count of push to the queue SkyWalking Satellite    Count satellite_service_send_event_count The event count of push data to the upstream SkyWalking Satellite    Customizations You can customize your own metrics/expression/dashboard panel. The self observability dashboard panel configurations are found in /config/ui-initialized-templates/so11y_satellite/so11y-root.json.\n","excerpt":"Satellite self observability dashboard SkyWalking Satellite collects and exports metrics in …","ref":"/docs/main/v9.3.0/en/setup/backend/dashboards-so11y-satellite/","title":"Satellite self observability dashboard"},{"body":"Satellite self observability dashboard SkyWalking Satellite collects and exports metrics in Prometheus format and SkyWalking metrics service protobuffer format for consuming, it also provides a dashboard to visualize the Satellite metrics.\nData flow  SkyWalking Satellite collects metrics data internally and pushes the metrics to SkyWalking OAP. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up SkyWalking Satellite Telemetry Exporter. Config SkyWalking OpenTelemetry receiver.  Self observability monitoring Self observability monitoring provides monitoring of the status and resources of the OAP server itself. oap-server is a Service in OAP, and land on the Layer: SO11Y_OAP.\nSelf observability metrics    Monitoring Panel Unit Metric Name Description Data Source      Count satellite_service_grpc_connect_count Connection Count SkyWalking Satellite    Percentage satellite_service_server_cpu_utilization CPU (%) SkyWalking Satellite    Count satellite_service_queue_used_count The used count of queue of pipeline SkyWalking Satellite    Count satellite_service_receive_event_count Receive count of event from downstream SkyWalking Satellite    Count satellite_service_fetch_event_count Fetch count of event from downstream SkyWalking Satellite    Count satellite_service_queue_input_count The event count of push to the queue SkyWalking Satellite    Count satellite_service_send_event_count The event count of push data to the upstream SkyWalking Satellite    Customizations You can customize your own metrics/expression/dashboard panel. The self observability dashboard panel configurations are found in /config/ui-initialized-templates/so11y_satellite/so11y-root.json.\n","excerpt":"Satellite self observability dashboard SkyWalking Satellite collects and exports metrics in …","ref":"/docs/main/v9.4.0/en/setup/backend/dashboards-so11y-satellite/","title":"Satellite self observability dashboard"},{"body":"Satellite self observability dashboard SkyWalking Satellite collects and exports metrics in Prometheus format and SkyWalking metrics service protobuffer format for consuming, it also provides a dashboard to visualize the Satellite metrics.\nData flow  SkyWalking Satellite collects metrics data internally and pushes the metrics to SkyWalking OAP. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up SkyWalking Satellite Telemetry Exporter. Config SkyWalking OpenTelemetry receiver.  Self observability monitoring Self observability monitoring provides monitoring of the status and resources of the OAP server itself. oap-server is a Service in OAP, and land on the Layer: SO11Y_OAP.\nSelf observability metrics    Monitoring Panel Unit Metric Name Description Data Source      Count satellite_service_grpc_connect_count Connection Count SkyWalking Satellite    Percentage satellite_service_server_cpu_utilization CPU (%) SkyWalking Satellite    Count satellite_service_queue_used_count The used count of queue of pipeline SkyWalking Satellite    Count satellite_service_receive_event_count Receive count of event from downstream SkyWalking Satellite    Count satellite_service_fetch_event_count Fetch count of event from downstream SkyWalking Satellite    Count satellite_service_queue_input_count The event count of push to the queue SkyWalking Satellite    Count satellite_service_send_event_count The event count of push data to the upstream SkyWalking Satellite    Customizations You can customize your own metrics/expression/dashboard panel. The self observability dashboard panel configurations are found in /config/ui-initialized-templates/so11y_satellite/so11y-root.json.\n","excerpt":"Satellite self observability dashboard SkyWalking Satellite collects and exports metrics in …","ref":"/docs/main/v9.5.0/en/setup/backend/dashboards-so11y-satellite/","title":"Satellite self observability dashboard"},{"body":"Satellite self observability dashboard SkyWalking Satellite collects and exports metrics in Prometheus format and SkyWalking metrics service protobuffer format for consuming, it also provides a dashboard to visualize the Satellite metrics.\nData flow  SkyWalking Satellite collects metrics data internally and pushes the metrics to SkyWalking OAP. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up SkyWalking Satellite Telemetry Exporter. Config SkyWalking OpenTelemetry receiver.  Self observability monitoring Self observability monitoring provides monitoring of the status and resources of the OAP server itself. oap-server is a Service in OAP, and land on the Layer: SO11Y_OAP.\nSelf observability metrics    Monitoring Panel Unit Metric Name Description Data Source      Count satellite_service_grpc_connect_count Connection Count SkyWalking Satellite    Percentage satellite_service_server_cpu_utilization CPU (%) SkyWalking Satellite    Count satellite_service_queue_used_count The used count of queue of pipeline SkyWalking Satellite    Count satellite_service_receive_event_count Receive count of event from downstream SkyWalking Satellite    Count satellite_service_fetch_event_count Fetch count of event from downstream SkyWalking Satellite    Count satellite_service_queue_input_count The event count of push to the queue SkyWalking Satellite    Count satellite_service_send_event_count The event count of push data to the upstream SkyWalking Satellite    Customizations You can customize your own metrics/expression/dashboard panel. The self observability dashboard panel configurations are found in /config/ui-initialized-templates/so11y_satellite/so11y-root.json.\n","excerpt":"Satellite self observability dashboard SkyWalking Satellite collects and exports metrics in …","ref":"/docs/main/v9.6.0/en/setup/backend/dashboards-so11y-satellite/","title":"Satellite self observability dashboard"},{"body":"Satellite self observability dashboard SkyWalking Satellite collects and exports metrics in Prometheus format and SkyWalking metrics service protobuffer format for consuming, it also provides a dashboard to visualize the Satellite metrics.\nData flow  SkyWalking Satellite collects metrics data internally and pushes the metrics to SkyWalking OAP. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up SkyWalking Satellite Telemetry Exporter. Config SkyWalking OpenTelemetry receiver.  Self observability monitoring Self observability monitoring provides monitoring of the status and resources of the OAP server itself. oap-server is a Service in OAP, and land on the Layer: SO11Y_OAP.\nSelf observability metrics    Monitoring Panel Unit Metric Name Description Data Source      Count satellite_service_grpc_connect_count Connection Count SkyWalking Satellite    Percentage satellite_service_server_cpu_utilization CPU (%) SkyWalking Satellite    Count satellite_service_queue_used_count The used count of queue of pipeline SkyWalking Satellite    Count satellite_service_receive_event_count Receive count of event from downstream SkyWalking Satellite    Count satellite_service_fetch_event_count Fetch count of event from downstream SkyWalking Satellite    Count satellite_service_queue_input_count The event count of push to the queue SkyWalking Satellite    Count satellite_service_send_event_count The event count of push data to the upstream SkyWalking Satellite    Customizations You can customize your own metrics/expression/dashboard panel. The self observability dashboard panel configurations are found in /config/ui-initialized-templates/so11y_satellite/so11y-root.json.\n","excerpt":"Satellite self observability dashboard SkyWalking Satellite collects and exports metrics in …","ref":"/docs/main/v9.7.0/en/setup/backend/dashboards-so11y-satellite/","title":"Satellite self observability dashboard"},{"body":"Satellite Usage In this example, you will learn how to use the Satellite.\nInstall Satellite Install the Satellite component.\nInstall Operator And Backend  Follow Operator installation instrument to install the operator. Follow Deploy OAP server and UI to install backend.  Deploy Satellite with default setting  Deploy the Storage use the below command:  Clone this repo, then change current directory to samples.\nIssue the below command to deploy an OAP server and UI.\nkubectl apply -f satellite.yaml Check the Satellite in Kubernetes:  $ kubectl get satellite NAME INSTANCES RUNNING ADDRESS default 1 1 default-satellite.default Satellite With HPA  Follow Custom Metrics Adapter to install the metrics adapter. Update the config in the Satellite CRD and re-apply it to activate the metrics service in satellite.  config: - name: SATELLITE_TELEMETRY_EXPORT_TYPE value: metrics_service Update the config in the OAP CRD and re-apply it to activate the satellite MAL.  config: - name: SW_METER_ANALYZER_ACTIVE_FILES value: satellite Add the HorizontalPodAutoScaler CRD, and update the config file the service and target to your excepted config. It\u0026rsquo;s recommend to set the stabilizationWindowSeconds and selectPolicy of scaling up in HPA, which would help prevent continuous scaling up of pods due to metric delay fluctuations. Check the HorizontalPodAutoScaler in the Kubernetes:  $ kubectl get HorizontalPodAutoscaler NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE hpa-demo Deployment/skywalking-system-satellite 2/1900, 5/75 1 3 1 92m ","excerpt":"Satellite Usage In this example, you will learn how to use the Satellite.\nInstall Satellite Install …","ref":"/docs/skywalking-swck/latest/examples/satellite/","title":"Satellite Usage"},{"body":"Satellite Usage In this example, you will learn how to use the Satellite.\nInstall Satellite Install the Satellite component.\nInstall Operator And Backend  Follow Operator installation instrument to install the operator. Follow Deploy OAP server and UI to install backend.  Deploy Satellite with default setting  Deploy the Storage use the below command:  Clone this repo, then change current directory to samples.\nIssue the below command to deploy an OAP server and UI.\nkubectl apply -f satellite.yaml Check the Satellite in Kubernetes:  $ kubectl get satellite NAME INSTANCES RUNNING ADDRESS default 1 1 default-satellite.default Satellite With HPA  Follow Custom Metrics Adapter to install the metrics adapter. Update the config in the Satellite CRD and re-apply it to activate the metrics service in satellite.  config: - name: SATELLITE_TELEMETRY_EXPORT_TYPE value: metrics_service Update the config in the OAP CRD and re-apply it to activate the satellite MAL.  config: - name: SW_METER_ANALYZER_ACTIVE_FILES value: satellite Add the HorizontalPodAutoScaler CRD, and update the config file the service and target to your excepted config. It\u0026rsquo;s recommend to set the stabilizationWindowSeconds and selectPolicy of scaling up in HPA, which would help prevent continuous scaling up of pods due to metric delay fluctuations. Check the HorizontalPodAutoScaler in the Kubernetes:  $ kubectl get HorizontalPodAutoscaler NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE hpa-demo Deployment/skywalking-system-satellite 2/1900, 5/75 1 3 1 92m ","excerpt":"Satellite Usage In this example, you will learn how to use the Satellite.\nInstall Satellite Install …","ref":"/docs/skywalking-swck/next/examples/satellite/","title":"Satellite Usage"},{"body":"Satellite Usage In this example, you will learn how to use the Satellite.\nInstall Satellite Install the Satellite component.\nInstall Operator And Backend  Follow Operator installation instrument to install the operator. Follow Deploy OAP server and UI to install backend.  Deploy Satellite with default setting  Deploy the Storage use the below command:  Clone this repo, then change current directory to samples.\nIssue the below command to deploy an OAP server and UI.\nkubectl apply -f satellite.yaml Check the Satellite in Kubernetes:  $ kubectl get satellite NAME INSTANCES RUNNING ADDRESS default 1 1 default-satellite.default Satellite With HPA  Follow Custom Metrics Adapter to install the metrics adapter. Update the config in the Satellite CRD and re-apply it to activate the metrics service in satellite.  config: - name: SATELLITE_TELEMETRY_EXPORT_TYPE value: metrics_service Update the config in the OAP CRD and re-apply it to activate the satellite MAL.  config: - name: SW_METER_ANALYZER_ACTIVE_FILES value: satellite Add the HorizontalPodAutoScaler CRD, and update the config file the service and target to your excepted config. It\u0026rsquo;s recommend to set the stabilizationWindowSeconds and selectPolicy of scaling up in HPA, which would help prevent continuous scaling up of pods due to metric delay fluctuations. Check the HorizontalPodAutoScaler in the Kubernetes:  $ kubectl get HorizontalPodAutoscaler NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE hpa-demo Deployment/skywalking-system-satellite 2/1900, 5/75 1 3 1 92m ","excerpt":"Satellite Usage In this example, you will learn how to use the Satellite.\nInstall Satellite Install …","ref":"/docs/skywalking-swck/v0.9.0/examples/satellite/","title":"Satellite Usage"},{"body":"Scaling with Apache SkyWalking Background In the Apache SkyWalking ecosystem, the OAP obtains metrics, traces, logs, and event data through SkyWalking Agent, Envoy, or other data sources. Under the gRPC protocol, it transmits data by communicating with a single server node. Only when the connection is broken, the reconnecting policy would be used based on DNS round-robin mode. When new services are added at runtime or the OAP load is kept high due to increased traffic of observed services, the OAP cluster needs to scale out for increased traffic. The load of the new OAP node would be less due to all existing agents having connected to previous nodes. Even without scaling, the load of OAP nodes would be unbalanced, because the agent would keep the connection due to random policy at the booting stage. In these cases, it would become a challenge to keep up the health status of all nodes, and be able to scale out when needed.\nIn this article, we mainly discuss how to solve this challenge in SkyWalking.\nHow to Load Balance SkyWalking mainly uses the gRPC protocol for data transmission, so this article mainly introduces load balancing in the gRPC protocol.\nProxy Or Client-side Based on the gRPC official Load Balancing blog, there are two approaches to load balancing:\n Client-side: The client perceives multiple back-end services and uses a load-balancing algorithm to select a back-end service for each RPC. Proxy: The client sends the message to the proxy server, and the proxy server load balances the message to the back-end service.  From the perspective of observability system architecture:\n    Pros Cons     Client-side High performance because of the elimination of extra hop Complex client (cluster awareness, load balancing, health check, etc.)Ensure each data source to be connected provides complex client capabilities   Proxy Simple Client Higher latency    We choose Proxy mode for the following reasons:\n Observable data is not very time-sensitive, a little latency caused by transmission is acceptable. A little extra hop is acceptable and there is no impact on the client-side. As an observability platform, we cannot/should not ask clients to change. They make their own tech decisions and may have their own commercial considerations.  Transmission Policy In the proxy mode, we should determine the transmission path between downstream and upstream.\nDifferent data protocols require different processing policies. There are two transmission policies:\n Synchronous: Suitable for protocols that require data exchange in the client, such as SkyWalking Dynamic Configuration Service. This type of protocol provides real-time results. Asynchronous batch: Used when the client doesn’t care about the upstream processing results, but only the transmitted data (e.g., trace report, log report, etc.)  The synchronization policy requires that the proxy send the message to the upstream server when receiving the client message, and synchronously return the response data to the downstream client. Usually, only a few protocols need to use the synchronization policy.\nAs shown below, after the client sends the request to the Proxy, the proxy would send the message to the server synchronously. When the proxy receives the result, it returns to the client.\nThe asynchronous batch policy means that the data is sent to the upstream server in batches asynchronously. This policy is more common because most protocols in SkyWalking are primarily based on data reporting. We think using the queue as a buffer could have a good effect. The asynchronous batch policy is executed according to the following steps:\n The proxy receives the data and wraps it as an Event object. An event is added into the queue. When the cycle time is reached or when the queue elements reach the fixed number, the elements in the queue will parallel consume and send to the OAP.  The advantage of using queues is:\n Separate data receiving and sending to reduce the mutual influence. The interval quantization mechanism can be used to combine events, which helps to speed up sending events to the OAP. Using multi-threaded consumption queue events can make fuller use of network IO.  As shown below, after the proxy receives the message, the proxy would wrap the message as an event and push it to the queue. The message sender would take batch events from the queue and send them to the upstream OAP.\nRouting Routing algorithms are used to route messages to a single upstream server node.\nThe Round-Robin algorithm selects nodes in order from the list of upstream service nodes. The advantage of this algorithm is that the number of times each node is selected is average. When the size of the data is close to the same, each upstream node can handle the same quantity of data content.\nWith the Weight Round-Robin, each upstream server node has a corresponding routing weight ratio. The difference from Round-Robin is that each upstream node has more chances to be routed according to its weight. This algorithm is more suitable to use when the upstream server node machine configuration is not the same.\nThe Fixed algorithm is a hybrid algorithm. It can ensure that the same data is routed to the same upstream server node, and when the upstream server scales out, it still maintains routing to the same node; unless the upstream node does not exist, it will reroute. This algorithm is mainly used in the SkyWalking Meter protocol because this protocol needs to ensure that the metrics of the same service instance are sent to the same OAP node. The Routing steps are as follows:\n Generate a unique identification string based on the data content, as short as possible. The amount of data is controllable. Get the upstream node of identity from LRU Cache, and use it if it exists. According to the identification, generate the corresponding hash value, and find the upstream server node from the upstream list. Save the mapping relationship between the upstream server node and identification to LRU Cache.  The advantage of this algorithm is to bind the data with the upstream server node as much as possible, so the upstream server can better process continuous data. The disadvantage is that it takes up a certain amount of memory space to save the corresponding relationship.\nAs shown below, the image is divided into two parts:\n The left side represents that the same data content always is routed to the same server node. The right side represents the data routing algorithm. Get the number from the data, and use the remainder algorithm to obtain the position.  We choose to use a combination of Round-Robin and Fixed algorithm for routing:\n The Fixed routing algorithm is suitable for specific protocols, mainly used when passing metrics data to the SkyWalking Meter protocol The Round-Robin algorithm is used by default. When the SkyWalking OAP cluster is deployed, the configuration of the nodes needs to be as much the same as possible, so there would be no need to use the Weight Round-Robin algorithm.  How to balance the load balancer itself? Proxy still needs to deal with the load balancing problem from client to itself, especially when deploying a Proxy cluster in a production environment.\nThere are three ways to solve this problem:\n Connection management: Use the max_connection config on the client-side to specify the maximum connection duration of each connection. For more information, please read the proposal. Cluster awareness: The proxy has cluster awareness, and actively disconnects the connection when the load is unbalanced to allow the client to re-pick up the proxy. Resource limit+HPA: Restrict the connection resource situation of each proxy, and no longer accept new connections when the resource limit is reached. And use the HPA mechanism of Kubernetes to dynamically scale out the number of the proxy.      Connection management Cluster awareness Resource Limit+HPA     Pros Simple to use Ensure that the number of connections in each proxy is relatively  Simple to use   Cons Each client needs to ensure that data is not lostThe client is required to accept GOWAY responses May cause a sudden increase in traffic on some nodesEach client needs to ensure that data is not lost  Traffic will not be particularly balanced in each instance    We choose Limit+HPA for these reasons:\n Easy to config and use the proxy and easy to understand based on basic data metrics. No data loss due to broken connection. There is no need for the client to implement any other protocols to prevent data loss, especially when the client is a commercial product. The connection of each node in the proxy cluster does not need to be particularly balanced, as long as the proxy node itself is high-performance.  SkyWalking-Satellite We have implemented this Proxy in the SkyWalking-Satellite project. It’s used between Client and SkyWalking OAP, effectively solving the load balancing problem.\nAfter the system is deployed, the Satellite would accept the traffic from the Client, and the Satellite will perceive all the nodes of the OAP through Kubernetes Label Selector or manual configuration, and load balance the traffic to the upstream OAP node.\nAs shown below, a single client still maintains a connection with a single Satellite, Satellite would establish the connection with each OAP, and load balance message to the OAP node.\nWhen scaling Satellite, we need to deploy the SWCK adapter and configure the HPA in Kubernetes. SWCK is a platform for the SkyWalking users, provisions, upgrades, maintains SkyWalking relevant components, and makes them work natively on Kubernetes.\nAfter deployment is finished, the following steps would be performed:\n Read metrics from OAP: HPA requests the SWCK metrics adapter to dynamically read the metrics in the OAP. Scaling the Satellite: Kubernetes HPA senses that the metrics values are in line with expectations, so the Satellite would be scaling automatically.  As shown below, use the dotted line to divide the two parts. HPA uses SWCK Adapter to read the metrics in the OAP. When the threshold is met, HPA would scale the Satellite deployment.\nExample In this section, we will demonstrate two cases:\n SkyWalking Scaling: After SkyWalking OAP scaling, the traffic would auto load balancing through Satellite. Satellite Scaling: Satellite’s own traffic load balancing.  NOTE: All commands could be accessed through GitHub.\nSkyWalking Scaling We will use the bookinfo application to demonstrate how to integrate Apache SkyWalking 8.9.1 with Apache SkyWalking-Satellite 0.5.0, and observe the service mesh through the Envoy ALS protocol.\nBefore starting, please make sure that you already have a Kubernetes environment.\nInstall Istio Istio provides a very convenient way to configure the Envoy proxy and enable the access log service. The following step:\n Install the istioctl locally to help manage the Istio mesh. Install Istio into the Kubernetes environment with a demo configuration profile, and enable the Envoy ALS. Transmit the ALS message to the satellite. The satellite we will deploy later. Add the label into the default namespace so Istio could automatically inject Envoy sidecar proxies when you deploy your application later.  # install istioctl export ISTIO_VERSION=1.12.0 curl -L https://istio.io/downloadIstio | sh - sudo mv $PWD/istio-$ISTIO_VERSION/bin/istioctl /usr/local/bin/ # install istio istioctl install -y --set profile=demo \\ \t--set meshConfig.enableEnvoyAccessLogService=true \\ \t--set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-system-satellite.skywalking-system:11800 # enbale envoy proxy in default namespace kubectl label namespace default istio-injection=enabled Install SWCK SWCK provides convenience for users to deploy and upgrade SkyWalking related components based on Kubernetes. The automatic scale function of Satellite also mainly relies on SWCK. For more information, you could refer to the official documentation.\n# Install cert-manager kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.3.1/cert-manager.yaml # Deploy SWCK mkdir -p skywalking-swck \u0026amp;\u0026amp; cd skywalking-swck wget https://dlcdn.apache.org/skywalking/swck/0.6.1/skywalking-swck-0.6.1-bin.tgz tar -zxvf skywalking-swck-0.6.1-bin.tgz cd config kubectl apply -f operator-bundle.yaml Deploy Apache SkyWalking And Apache SkyWalking-Satellite We have provided a simple script to deploy the skywalking OAP, UI, and Satellite.\n# Create the skywalking components namespace kubectl create namespace skywalking-system kubectl label namespace skywalking-system swck-injection=enabled # Deploy components kubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/sw-components.yaml Deploy Bookinfo Application export ISTIO_VERSION=1.12.0 kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/platform/kube/bookinfo.yaml kubectl wait --for=condition=Ready pods --all --timeout=1200s kubectl port-forward service/productpage 9080 Next, please open your browser and visit http://localhost:9080. You should be able to see the Bookinfo application. Refresh the webpage several times to generate enough access logs.\nThen, you can see the topology and metrics of the Bookinfo application on SkyWalking WebUI. At this time, you can see that the Satellite is working!\nDeploy Monitor We need to install OpenTelemetry Collector to collect metrics in OAPs and analyze them.\n# Add OTEL collector kubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/otel-collector-oap.yaml kubectl port-forward -n skywalking-system service/skywalking-system-ui 8080:80 Next, please open your browser and visit http://localhost:8080/ and create a new item on the dashboard. The SkyWalking Web UI pictured below shows how the data content is applied.\nScaling OAP Scaling the number of OAPs by deployment.\nkubectl scale --replicas=3 -n skywalking-system deployment/skywalking-system-oap Done! After a period of time, you will see that the number of OAPs becomes 3, and the ALS traffic is balanced to each OAP.\nSatellite Scaling After we have completed the SkyWalking Scaling, we would carry out the Satellite Scaling demo.\nDeploy SWCK HPA SWCK provides an adapter to implement the Kubernetes external metrics to adapt the HPA through reading the metrics in SkyWalking OAP. We expose the metrics service in Satellite to OAP and configure HPA Resource to auto-scaling the Satellite.\nInstall the SWCK adapter into the Kubernetes environment:\nkubectl apply -f skywalking-swck/config/adapter-bundle.yaml Create the HPA resource, and limit each Satellite to handle a maximum of 10 connections:\nkubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/satellite-hpa.yaml Then, you could see we have 9 connections in one satellite. One envoy proxy may establish multiple connections to the satellite.\n$ kubectl get HorizontalPodAutoscaler -n skywalking-system NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE hpa-demo Deployment/skywalking-system-satellite 9/10 1 3 1 5m18s Scaling Application The scaling application could establish more connections to the satellite, to verify whether the HPA is in effect.\nkubectl scale --replicas=3 deployment/productpage-v1 deployment/details-v1 Done! By default, Satellite will deploy a single instance and a single instance will only accept 11 connections. HPA resources limit one Satellite to handle 10 connections and use a stabilization window to make Satellite stable scaling up. In this case, we deploy the Bookinfo application in 10+ instances after scaling, which means that 10+ connections will be established to the Satellite.\nSo after HPA resources are running, the Satellite would be automatically scaled up to 2 instances. You can learn about the calculation algorithm of replicas through the official documentation. Run the following command to view the running status:\n$ kubectl get HorizontalPodAutoscaler -n skywalking-system --watch NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 1 3m31s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 1 4m20s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 2 4m38s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 2 5m8s hpa-demo Deployment/skywalking-system-satellite 6/10 1 3 2 5m23s By observing the “number of connections” metric, we would be able to see that when the number of connections of each gRPC exceeds 10 connections, then the satellite automatically scales through the HPA rule. As a result, the connection number is down to normal status (in this example, less than 10)\nswctl metrics linear --name satellite_service_grpc_connect_count --service-name satellite::satellite-service ","excerpt":"Scaling with Apache SkyWalking Background In the Apache SkyWalking ecosystem, the OAP obtains …","ref":"/docs/main/latest/en/academy/scaling-with-apache-skywalking/","title":"Scaling with Apache SkyWalking"},{"body":"Scaling with Apache SkyWalking Background In the Apache SkyWalking ecosystem, the OAP obtains metrics, traces, logs, and event data through SkyWalking Agent, Envoy, or other data sources. Under the gRPC protocol, it transmits data by communicating with a single server node. Only when the connection is broken, the reconnecting policy would be used based on DNS round-robin mode. When new services are added at runtime or the OAP load is kept high due to increased traffic of observed services, the OAP cluster needs to scale out for increased traffic. The load of the new OAP node would be less due to all existing agents having connected to previous nodes. Even without scaling, the load of OAP nodes would be unbalanced, because the agent would keep the connection due to random policy at the booting stage. In these cases, it would become a challenge to keep up the health status of all nodes, and be able to scale out when needed.\nIn this article, we mainly discuss how to solve this challenge in SkyWalking.\nHow to Load Balance SkyWalking mainly uses the gRPC protocol for data transmission, so this article mainly introduces load balancing in the gRPC protocol.\nProxy Or Client-side Based on the gRPC official Load Balancing blog, there are two approaches to load balancing:\n Client-side: The client perceives multiple back-end services and uses a load-balancing algorithm to select a back-end service for each RPC. Proxy: The client sends the message to the proxy server, and the proxy server load balances the message to the back-end service.  From the perspective of observability system architecture:\n    Pros Cons     Client-side High performance because of the elimination of extra hop Complex client (cluster awareness, load balancing, health check, etc.)Ensure each data source to be connected provides complex client capabilities   Proxy Simple Client Higher latency    We choose Proxy mode for the following reasons:\n Observable data is not very time-sensitive, a little latency caused by transmission is acceptable. A little extra hop is acceptable and there is no impact on the client-side. As an observability platform, we cannot/should not ask clients to change. They make their own tech decisions and may have their own commercial considerations.  Transmission Policy In the proxy mode, we should determine the transmission path between downstream and upstream.\nDifferent data protocols require different processing policies. There are two transmission policies:\n Synchronous: Suitable for protocols that require data exchange in the client, such as SkyWalking Dynamic Configuration Service. This type of protocol provides real-time results. Asynchronous batch: Used when the client doesn’t care about the upstream processing results, but only the transmitted data (e.g., trace report, log report, etc.)  The synchronization policy requires that the proxy send the message to the upstream server when receiving the client message, and synchronously return the response data to the downstream client. Usually, only a few protocols need to use the synchronization policy.\nAs shown below, after the client sends the request to the Proxy, the proxy would send the message to the server synchronously. When the proxy receives the result, it returns to the client.\nThe asynchronous batch policy means that the data is sent to the upstream server in batches asynchronously. This policy is more common because most protocols in SkyWalking are primarily based on data reporting. We think using the queue as a buffer could have a good effect. The asynchronous batch policy is executed according to the following steps:\n The proxy receives the data and wraps it as an Event object. An event is added into the queue. When the cycle time is reached or when the queue elements reach the fixed number, the elements in the queue will parallel consume and send to the OAP.  The advantage of using queues is:\n Separate data receiving and sending to reduce the mutual influence. The interval quantization mechanism can be used to combine events, which helps to speed up sending events to the OAP. Using multi-threaded consumption queue events can make fuller use of network IO.  As shown below, after the proxy receives the message, the proxy would wrap the message as an event and push it to the queue. The message sender would take batch events from the queue and send them to the upstream OAP.\nRouting Routing algorithms are used to route messages to a single upstream server node.\nThe Round-Robin algorithm selects nodes in order from the list of upstream service nodes. The advantage of this algorithm is that the number of times each node is selected is average. When the size of the data is close to the same, each upstream node can handle the same quantity of data content.\nWith the Weight Round-Robin, each upstream server node has a corresponding routing weight ratio. The difference from Round-Robin is that each upstream node has more chances to be routed according to its weight. This algorithm is more suitable to use when the upstream server node machine configuration is not the same.\nThe Fixed algorithm is a hybrid algorithm. It can ensure that the same data is routed to the same upstream server node, and when the upstream server scales out, it still maintains routing to the same node; unless the upstream node does not exist, it will reroute. This algorithm is mainly used in the SkyWalking Meter protocol because this protocol needs to ensure that the metrics of the same service instance are sent to the same OAP node. The Routing steps are as follows:\n Generate a unique identification string based on the data content, as short as possible. The amount of data is controllable. Get the upstream node of identity from LRU Cache, and use it if it exists. According to the identification, generate the corresponding hash value, and find the upstream server node from the upstream list. Save the mapping relationship between the upstream server node and identification to LRU Cache.  The advantage of this algorithm is to bind the data with the upstream server node as much as possible, so the upstream server can better process continuous data. The disadvantage is that it takes up a certain amount of memory space to save the corresponding relationship.\nAs shown below, the image is divided into two parts:\n The left side represents that the same data content always is routed to the same server node. The right side represents the data routing algorithm. Get the number from the data, and use the remainder algorithm to obtain the position.  We choose to use a combination of Round-Robin and Fixed algorithm for routing:\n The Fixed routing algorithm is suitable for specific protocols, mainly used when passing metrics data to the SkyWalking Meter protocol The Round-Robin algorithm is used by default. When the SkyWalking OAP cluster is deployed, the configuration of the nodes needs to be as much the same as possible, so there would be no need to use the Weight Round-Robin algorithm.  How to balance the load balancer itself? Proxy still needs to deal with the load balancing problem from client to itself, especially when deploying a Proxy cluster in a production environment.\nThere are three ways to solve this problem:\n Connection management: Use the max_connection config on the client-side to specify the maximum connection duration of each connection. For more information, please read the proposal. Cluster awareness: The proxy has cluster awareness, and actively disconnects the connection when the load is unbalanced to allow the client to re-pick up the proxy. Resource limit+HPA: Restrict the connection resource situation of each proxy, and no longer accept new connections when the resource limit is reached. And use the HPA mechanism of Kubernetes to dynamically scale out the number of the proxy.      Connection management Cluster awareness Resource Limit+HPA     Pros Simple to use Ensure that the number of connections in each proxy is relatively  Simple to use   Cons Each client needs to ensure that data is not lostThe client is required to accept GOWAY responses May cause a sudden increase in traffic on some nodesEach client needs to ensure that data is not lost  Traffic will not be particularly balanced in each instance    We choose Limit+HPA for these reasons:\n Easy to config and use the proxy and easy to understand based on basic data metrics. No data loss due to broken connection. There is no need for the client to implement any other protocols to prevent data loss, especially when the client is a commercial product. The connection of each node in the proxy cluster does not need to be particularly balanced, as long as the proxy node itself is high-performance.  SkyWalking-Satellite We have implemented this Proxy in the SkyWalking-Satellite project. It’s used between Client and SkyWalking OAP, effectively solving the load balancing problem.\nAfter the system is deployed, the Satellite would accept the traffic from the Client, and the Satellite will perceive all the nodes of the OAP through Kubernetes Label Selector or manual configuration, and load balance the traffic to the upstream OAP node.\nAs shown below, a single client still maintains a connection with a single Satellite, Satellite would establish the connection with each OAP, and load balance message to the OAP node.\nWhen scaling Satellite, we need to deploy the SWCK adapter and configure the HPA in Kubernetes. SWCK is a platform for the SkyWalking users, provisions, upgrades, maintains SkyWalking relevant components, and makes them work natively on Kubernetes.\nAfter deployment is finished, the following steps would be performed:\n Read metrics from OAP: HPA requests the SWCK metrics adapter to dynamically read the metrics in the OAP. Scaling the Satellite: Kubernetes HPA senses that the metrics values are in line with expectations, so the Satellite would be scaling automatically.  As shown below, use the dotted line to divide the two parts. HPA uses SWCK Adapter to read the metrics in the OAP. When the threshold is met, HPA would scale the Satellite deployment.\nExample In this section, we will demonstrate two cases:\n SkyWalking Scaling: After SkyWalking OAP scaling, the traffic would auto load balancing through Satellite. Satellite Scaling: Satellite’s own traffic load balancing.  NOTE: All commands could be accessed through GitHub.\nSkyWalking Scaling We will use the bookinfo application to demonstrate how to integrate Apache SkyWalking 8.9.1 with Apache SkyWalking-Satellite 0.5.0, and observe the service mesh through the Envoy ALS protocol.\nBefore starting, please make sure that you already have a Kubernetes environment.\nInstall Istio Istio provides a very convenient way to configure the Envoy proxy and enable the access log service. The following step:\n Install the istioctl locally to help manage the Istio mesh. Install Istio into the Kubernetes environment with a demo configuration profile, and enable the Envoy ALS. Transmit the ALS message to the satellite. The satellite we will deploy later. Add the label into the default namespace so Istio could automatically inject Envoy sidecar proxies when you deploy your application later.  # install istioctl export ISTIO_VERSION=1.12.0 curl -L https://istio.io/downloadIstio | sh - sudo mv $PWD/istio-$ISTIO_VERSION/bin/istioctl /usr/local/bin/ # install istio istioctl install -y --set profile=demo \\ \t--set meshConfig.enableEnvoyAccessLogService=true \\ \t--set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-system-satellite.skywalking-system:11800 # enbale envoy proxy in default namespace kubectl label namespace default istio-injection=enabled Install SWCK SWCK provides convenience for users to deploy and upgrade SkyWalking related components based on Kubernetes. The automatic scale function of Satellite also mainly relies on SWCK. For more information, you could refer to the official documentation.\n# Install cert-manager kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.3.1/cert-manager.yaml # Deploy SWCK mkdir -p skywalking-swck \u0026amp;\u0026amp; cd skywalking-swck wget https://dlcdn.apache.org/skywalking/swck/0.6.1/skywalking-swck-0.6.1-bin.tgz tar -zxvf skywalking-swck-0.6.1-bin.tgz cd config kubectl apply -f operator-bundle.yaml Deploy Apache SkyWalking And Apache SkyWalking-Satellite We have provided a simple script to deploy the skywalking OAP, UI, and Satellite.\n# Create the skywalking components namespace kubectl create namespace skywalking-system kubectl label namespace skywalking-system swck-injection=enabled # Deploy components kubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/sw-components.yaml Deploy Bookinfo Application export ISTIO_VERSION=1.12.0 kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/platform/kube/bookinfo.yaml kubectl wait --for=condition=Ready pods --all --timeout=1200s kubectl port-forward service/productpage 9080 Next, please open your browser and visit http://localhost:9080. You should be able to see the Bookinfo application. Refresh the webpage several times to generate enough access logs.\nThen, you can see the topology and metrics of the Bookinfo application on SkyWalking WebUI. At this time, you can see that the Satellite is working!\nDeploy Monitor We need to install OpenTelemetry Collector to collect metrics in OAPs and analyze them.\n# Add OTEL collector kubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/otel-collector-oap.yaml kubectl port-forward -n skywalking-system service/skywalking-system-ui 8080:80 Next, please open your browser and visit http://localhost:8080/ and create a new item on the dashboard. The SkyWalking Web UI pictured below shows how the data content is applied.\nScaling OAP Scaling the number of OAPs by deployment.\nkubectl scale --replicas=3 -n skywalking-system deployment/skywalking-system-oap Done! After a period of time, you will see that the number of OAPs becomes 3, and the ALS traffic is balanced to each OAP.\nSatellite Scaling After we have completed the SkyWalking Scaling, we would carry out the Satellite Scaling demo.\nDeploy SWCK HPA SWCK provides an adapter to implement the Kubernetes external metrics to adapt the HPA through reading the metrics in SkyWalking OAP. We expose the metrics service in Satellite to OAP and configure HPA Resource to auto-scaling the Satellite.\nInstall the SWCK adapter into the Kubernetes environment:\nkubectl apply -f skywalking-swck/config/adapter-bundle.yaml Create the HPA resource, and limit each Satellite to handle a maximum of 10 connections:\nkubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/satellite-hpa.yaml Then, you could see we have 9 connections in one satellite. One envoy proxy may establish multiple connections to the satellite.\n$ kubectl get HorizontalPodAutoscaler -n skywalking-system NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE hpa-demo Deployment/skywalking-system-satellite 9/10 1 3 1 5m18s Scaling Application The scaling application could establish more connections to the satellite, to verify whether the HPA is in effect.\nkubectl scale --replicas=3 deployment/productpage-v1 deployment/details-v1 Done! By default, Satellite will deploy a single instance and a single instance will only accept 11 connections. HPA resources limit one Satellite to handle 10 connections and use a stabilization window to make Satellite stable scaling up. In this case, we deploy the Bookinfo application in 10+ instances after scaling, which means that 10+ connections will be established to the Satellite.\nSo after HPA resources are running, the Satellite would be automatically scaled up to 2 instances. You can learn about the calculation algorithm of replicas through the official documentation. Run the following command to view the running status:\n$ kubectl get HorizontalPodAutoscaler -n skywalking-system --watch NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 1 3m31s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 1 4m20s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 2 4m38s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 2 5m8s hpa-demo Deployment/skywalking-system-satellite 6/10 1 3 2 5m23s By observing the “number of connections” metric, we would be able to see that when the number of connections of each gRPC exceeds 10 connections, then the satellite automatically scales through the HPA rule. As a result, the connection number is down to normal status (in this example, less than 10)\nswctl metrics linear --name satellite_service_grpc_connect_count --service-name satellite::satellite-service ","excerpt":"Scaling with Apache SkyWalking Background In the Apache SkyWalking ecosystem, the OAP obtains …","ref":"/docs/main/next/en/academy/scaling-with-apache-skywalking/","title":"Scaling with Apache SkyWalking"},{"body":"Scaling with Apache SkyWalking Background In the Apache SkyWalking ecosystem, the OAP obtains metrics, traces, logs, and event data through SkyWalking Agent, Envoy, or other data sources. Under the gRPC protocol, it transmits data by communicating with a single server node. Only when the connection is broken, the reconnecting policy would be used based on DNS round-robin mode. When new services are added at runtime or the OAP load is kept high due to increased traffic of observed services, the OAP cluster needs to scale out for increased traffic. The load of the new OAP node would be less due to all existing agents having connected to previous nodes. Even without scaling, the load of OAP nodes would be unbalanced, because the agent would keep the connection due to random policy at the booting stage. In these cases, it would become a challenge to keep up the health status of all nodes, and be able to scale out when needed.\nIn this article, we mainly discuss how to solve this challenge in SkyWalking.\nHow to Load Balance SkyWalking mainly uses the gRPC protocol for data transmission, so this article mainly introduces load balancing in the gRPC protocol.\nProxy Or Client-side Based on the gRPC official Load Balancing blog, there are two approaches to load balancing:\n Client-side: The client perceives multiple back-end services and uses a load-balancing algorithm to select a back-end service for each RPC. Proxy: The client sends the message to the proxy server, and the proxy server load balances the message to the back-end service.  From the perspective of observability system architecture:\n    Pros Cons     Client-side High performance because of the elimination of extra hop Complex client (cluster awareness, load balancing, health check, etc.)Ensure each data source to be connected provides complex client capabilities   Proxy Simple Client Higher latency    We choose Proxy mode for the following reasons:\n Observable data is not very time-sensitive, a little latency caused by transmission is acceptable. A little extra hop is acceptable and there is no impact on the client-side. As an observability platform, we cannot/should not ask clients to change. They make their own tech decisions and may have their own commercial considerations.  Transmission Policy In the proxy mode, we should determine the transmission path between downstream and upstream.\nDifferent data protocols require different processing policies. There are two transmission policies:\n Synchronous: Suitable for protocols that require data exchange in the client, such as SkyWalking Dynamic Configuration Service. This type of protocol provides real-time results. Asynchronous batch: Used when the client doesn’t care about the upstream processing results, but only the transmitted data (e.g., trace report, log report, etc.)  The synchronization policy requires that the proxy send the message to the upstream server when receiving the client message, and synchronously return the response data to the downstream client. Usually, only a few protocols need to use the synchronization policy.\nAs shown below, after the client sends the request to the Proxy, the proxy would send the message to the server synchronously. When the proxy receives the result, it returns to the client.\nThe asynchronous batch policy means that the data is sent to the upstream server in batches asynchronously. This policy is more common because most protocols in SkyWalking are primarily based on data reporting. We think using the queue as a buffer could have a good effect. The asynchronous batch policy is executed according to the following steps:\n The proxy receives the data and wraps it as an Event object. An event is added into the queue. When the cycle time is reached or when the queue elements reach the fixed number, the elements in the queue will parallel consume and send to the OAP.  The advantage of using queues is:\n Separate data receiving and sending to reduce the mutual influence. The interval quantization mechanism can be used to combine events, which helps to speed up sending events to the OAP. Using multi-threaded consumption queue events can make fuller use of network IO.  As shown below, after the proxy receives the message, the proxy would wrap the message as an event and push it to the queue. The message sender would take batch events from the queue and send them to the upstream OAP.\nRouting Routing algorithms are used to route messages to a single upstream server node.\nThe Round-Robin algorithm selects nodes in order from the list of upstream service nodes. The advantage of this algorithm is that the number of times each node is selected is average. When the size of the data is close to the same, each upstream node can handle the same quantity of data content.\nWith the Weight Round-Robin, each upstream server node has a corresponding routing weight ratio. The difference from Round-Robin is that each upstream node has more chances to be routed according to its weight. This algorithm is more suitable to use when the upstream server node machine configuration is not the same.\nThe Fixed algorithm is a hybrid algorithm. It can ensure that the same data is routed to the same upstream server node, and when the upstream server scales out, it still maintains routing to the same node; unless the upstream node does not exist, it will reroute. This algorithm is mainly used in the SkyWalking Meter protocol because this protocol needs to ensure that the metrics of the same service instance are sent to the same OAP node. The Routing steps are as follows:\n Generate a unique identification string based on the data content, as short as possible. The amount of data is controllable. Get the upstream node of identity from LRU Cache, and use it if it exists. According to the identification, generate the corresponding hash value, and find the upstream server node from the upstream list. Save the mapping relationship between the upstream server node and identification to LRU Cache.  The advantage of this algorithm is to bind the data with the upstream server node as much as possible, so the upstream server can better process continuous data. The disadvantage is that it takes up a certain amount of memory space to save the corresponding relationship.\nAs shown below, the image is divided into two parts:\n The left side represents that the same data content always is routed to the same server node. The right side represents the data routing algorithm. Get the number from the data, and use the remainder algorithm to obtain the position.  We choose to use a combination of Round-Robin and Fixed algorithm for routing:\n The Fixed routing algorithm is suitable for specific protocols, mainly used when passing metrics data to the SkyWalking Meter protocol The Round-Robin algorithm is used by default. When the SkyWalking OAP cluster is deployed, the configuration of the nodes needs to be as much the same as possible, so there would be no need to use the Weight Round-Robin algorithm.  How to balance the load balancer itself? Proxy still needs to deal with the load balancing problem from client to itself, especially when deploying a Proxy cluster in a production environment.\nThere are three ways to solve this problem:\n Connection management: Use the max_connection config on the client-side to specify the maximum connection duration of each connection. For more information, please read the proposal. Cluster awareness: The proxy has cluster awareness, and actively disconnects the connection when the load is unbalanced to allow the client to re-pick up the proxy. Resource limit+HPA: Restrict the connection resource situation of each proxy, and no longer accept new connections when the resource limit is reached. And use the HPA mechanism of Kubernetes to dynamically scale out the number of the proxy.      Connection management Cluster awareness Resource Limit+HPA     Pros Simple to use Ensure that the number of connections in each proxy is relatively  Simple to use   Cons Each client needs to ensure that data is not lostThe client is required to accept GOWAY responses May cause a sudden increase in traffic on some nodesEach client needs to ensure that data is not lost  Traffic will not be particularly balanced in each instance    We choose Limit+HPA for these reasons:\n Easy to config and use the proxy and easy to understand based on basic data metrics. No data loss due to broken connection. There is no need for the client to implement any other protocols to prevent data loss, especially when the client is a commercial product. The connection of each node in the proxy cluster does not need to be particularly balanced, as long as the proxy node itself is high-performance.  SkyWalking-Satellite We have implemented this Proxy in the SkyWalking-Satellite project. It’s used between Client and SkyWalking OAP, effectively solving the load balancing problem.\nAfter the system is deployed, the Satellite would accept the traffic from the Client, and the Satellite will perceive all the nodes of the OAP through Kubernetes Label Selector or manual configuration, and load balance the traffic to the upstream OAP node.\nAs shown below, a single client still maintains a connection with a single Satellite, Satellite would establish the connection with each OAP, and load balance message to the OAP node.\nWhen scaling Satellite, we need to deploy the SWCK adapter and configure the HPA in Kubernetes. SWCK is a platform for the SkyWalking users, provisions, upgrades, maintains SkyWalking relevant components, and makes them work natively on Kubernetes.\nAfter deployment is finished, the following steps would be performed:\n Read metrics from OAP: HPA requests the SWCK metrics adapter to dynamically read the metrics in the OAP. Scaling the Satellite: Kubernetes HPA senses that the metrics values are in line with expectations, so the Satellite would be scaling automatically.  As shown below, use the dotted line to divide the two parts. HPA uses SWCK Adapter to read the metrics in the OAP. When the threshold is met, HPA would scale the Satellite deployment.\nExample In this section, we will demonstrate two cases:\n SkyWalking Scaling: After SkyWalking OAP scaling, the traffic would auto load balancing through Satellite. Satellite Scaling: Satellite’s own traffic load balancing.  NOTE: All commands could be accessed through GitHub.\nSkyWalking Scaling We will use the bookinfo application to demonstrate how to integrate Apache SkyWalking 8.9.1 with Apache SkyWalking-Satellite 0.5.0, and observe the service mesh through the Envoy ALS protocol.\nBefore starting, please make sure that you already have a Kubernetes environment.\nInstall Istio Istio provides a very convenient way to configure the Envoy proxy and enable the access log service. The following step:\n Install the istioctl locally to help manage the Istio mesh. Install Istio into the Kubernetes environment with a demo configuration profile, and enable the Envoy ALS. Transmit the ALS message to the satellite. The satellite we will deploy later. Add the label into the default namespace so Istio could automatically inject Envoy sidecar proxies when you deploy your application later.  # install istioctl export ISTIO_VERSION=1.12.0 curl -L https://istio.io/downloadIstio | sh - sudo mv $PWD/istio-$ISTIO_VERSION/bin/istioctl /usr/local/bin/ # install istio istioctl install -y --set profile=demo \\ \t--set meshConfig.enableEnvoyAccessLogService=true \\ \t--set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-system-satellite.skywalking-system:11800 # enbale envoy proxy in default namespace kubectl label namespace default istio-injection=enabled Install SWCK SWCK provides convenience for users to deploy and upgrade SkyWalking related components based on Kubernetes. The automatic scale function of Satellite also mainly relies on SWCK. For more information, you could refer to the official documentation.\n# Install cert-manager kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.3.1/cert-manager.yaml # Deploy SWCK mkdir -p skywalking-swck \u0026amp;\u0026amp; cd skywalking-swck wget https://dlcdn.apache.org/skywalking/swck/0.6.1/skywalking-swck-0.6.1-bin.tgz tar -zxvf skywalking-swck-0.6.1-bin.tgz cd config kubectl apply -f operator-bundle.yaml Deploy Apache SkyWalking And Apache SkyWalking-Satellite We have provided a simple script to deploy the skywalking OAP, UI, and Satellite.\n# Create the skywalking components namespace kubectl create namespace skywalking-system kubectl label namespace skywalking-system swck-injection=enabled # Deploy components kubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/sw-components.yaml Deploy Bookinfo Application export ISTIO_VERSION=1.12.0 kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/platform/kube/bookinfo.yaml kubectl wait --for=condition=Ready pods --all --timeout=1200s kubectl port-forward service/productpage 9080 Next, please open your browser and visit http://localhost:9080. You should be able to see the Bookinfo application. Refresh the webpage several times to generate enough access logs.\nThen, you can see the topology and metrics of the Bookinfo application on SkyWalking WebUI. At this time, you can see that the Satellite is working!\nDeploy Monitor We need to install OpenTelemetry Collector to collect metrics in OAPs and analyze them.\n# Add OTEL collector kubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/otel-collector-oap.yaml kubectl port-forward -n skywalking-system service/skywalking-system-ui 8080:80 Next, please open your browser and visit http://localhost:8080/ and create a new item on the dashboard. The SkyWalking Web UI pictured below shows how the data content is applied.\nScaling OAP Scaling the number of OAPs by deployment.\nkubectl scale --replicas=3 -n skywalking-system deployment/skywalking-system-oap Done! After a period of time, you will see that the number of OAPs becomes 3, and the ALS traffic is balanced to each OAP.\nSatellite Scaling After we have completed the SkyWalking Scaling, we would carry out the Satellite Scaling demo.\nDeploy SWCK HPA SWCK provides an adapter to implement the Kubernetes external metrics to adapt the HPA through reading the metrics in SkyWalking OAP. We expose the metrics service in Satellite to OAP and configure HPA Resource to auto-scaling the Satellite.\nInstall the SWCK adapter into the Kubernetes environment:\nkubectl apply -f skywalking-swck/config/adapter-bundle.yaml Create the HPA resource, and limit each Satellite to handle a maximum of 10 connections:\nkubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/satellite-hpa.yaml Then, you could see we have 9 connections in one satellite. One envoy proxy may establish multiple connections to the satellite.\n$ kubectl get HorizontalPodAutoscaler -n skywalking-system NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE hpa-demo Deployment/skywalking-system-satellite 9/10 1 3 1 5m18s Scaling Application The scaling application could establish more connections to the satellite, to verify whether the HPA is in effect.\nkubectl scale --replicas=3 deployment/productpage-v1 deployment/details-v1 Done! By default, Satellite will deploy a single instance and a single instance will only accept 11 connections. HPA resources limit one Satellite to handle 10 connections and use a stabilization window to make Satellite stable scaling up. In this case, we deploy the Bookinfo application in 10+ instances after scaling, which means that 10+ connections will be established to the Satellite.\nSo after HPA resources are running, the Satellite would be automatically scaled up to 2 instances. You can learn about the calculation algorithm of replicas through the official documentation. Run the following command to view the running status:\n$ kubectl get HorizontalPodAutoscaler -n skywalking-system --watch NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 1 3m31s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 1 4m20s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 2 4m38s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 2 5m8s hpa-demo Deployment/skywalking-system-satellite 6/10 1 3 2 5m23s By observing the “number of connections” metric, we would be able to see that when the number of connections of each gRPC exceeds 10 connections, then the satellite automatically scales through the HPA rule. As a result, the connection number is down to normal status (in this example, less than 10)\nswctl metrics linear --name satellite_service_grpc_connect_count --service-name satellite::satellite-service ","excerpt":"Scaling with Apache SkyWalking Background In the Apache SkyWalking ecosystem, the OAP obtains …","ref":"/docs/main/v9.3.0/en/academy/scaling-with-apache-skywalking/","title":"Scaling with Apache SkyWalking"},{"body":"Scaling with Apache SkyWalking Background In the Apache SkyWalking ecosystem, the OAP obtains metrics, traces, logs, and event data through SkyWalking Agent, Envoy, or other data sources. Under the gRPC protocol, it transmits data by communicating with a single server node. Only when the connection is broken, the reconnecting policy would be used based on DNS round-robin mode. When new services are added at runtime or the OAP load is kept high due to increased traffic of observed services, the OAP cluster needs to scale out for increased traffic. The load of the new OAP node would be less due to all existing agents having connected to previous nodes. Even without scaling, the load of OAP nodes would be unbalanced, because the agent would keep the connection due to random policy at the booting stage. In these cases, it would become a challenge to keep up the health status of all nodes, and be able to scale out when needed.\nIn this article, we mainly discuss how to solve this challenge in SkyWalking.\nHow to Load Balance SkyWalking mainly uses the gRPC protocol for data transmission, so this article mainly introduces load balancing in the gRPC protocol.\nProxy Or Client-side Based on the gRPC official Load Balancing blog, there are two approaches to load balancing:\n Client-side: The client perceives multiple back-end services and uses a load-balancing algorithm to select a back-end service for each RPC. Proxy: The client sends the message to the proxy server, and the proxy server load balances the message to the back-end service.  From the perspective of observability system architecture:\n    Pros Cons     Client-side High performance because of the elimination of extra hop Complex client (cluster awareness, load balancing, health check, etc.)Ensure each data source to be connected provides complex client capabilities   Proxy Simple Client Higher latency    We choose Proxy mode for the following reasons:\n Observable data is not very time-sensitive, a little latency caused by transmission is acceptable. A little extra hop is acceptable and there is no impact on the client-side. As an observability platform, we cannot/should not ask clients to change. They make their own tech decisions and may have their own commercial considerations.  Transmission Policy In the proxy mode, we should determine the transmission path between downstream and upstream.\nDifferent data protocols require different processing policies. There are two transmission policies:\n Synchronous: Suitable for protocols that require data exchange in the client, such as SkyWalking Dynamic Configuration Service. This type of protocol provides real-time results. Asynchronous batch: Used when the client doesn’t care about the upstream processing results, but only the transmitted data (e.g., trace report, log report, etc.)  The synchronization policy requires that the proxy send the message to the upstream server when receiving the client message, and synchronously return the response data to the downstream client. Usually, only a few protocols need to use the synchronization policy.\nAs shown below, after the client sends the request to the Proxy, the proxy would send the message to the server synchronously. When the proxy receives the result, it returns to the client.\nThe asynchronous batch policy means that the data is sent to the upstream server in batches asynchronously. This policy is more common because most protocols in SkyWalking are primarily based on data reporting. We think using the queue as a buffer could have a good effect. The asynchronous batch policy is executed according to the following steps:\n The proxy receives the data and wraps it as an Event object. An event is added into the queue. When the cycle time is reached or when the queue elements reach the fixed number, the elements in the queue will parallel consume and send to the OAP.  The advantage of using queues is:\n Separate data receiving and sending to reduce the mutual influence. The interval quantization mechanism can be used to combine events, which helps to speed up sending events to the OAP. Using multi-threaded consumption queue events can make fuller use of network IO.  As shown below, after the proxy receives the message, the proxy would wrap the message as an event and push it to the queue. The message sender would take batch events from the queue and send them to the upstream OAP.\nRouting Routing algorithms are used to route messages to a single upstream server node.\nThe Round-Robin algorithm selects nodes in order from the list of upstream service nodes. The advantage of this algorithm is that the number of times each node is selected is average. When the size of the data is close to the same, each upstream node can handle the same quantity of data content.\nWith the Weight Round-Robin, each upstream server node has a corresponding routing weight ratio. The difference from Round-Robin is that each upstream node has more chances to be routed according to its weight. This algorithm is more suitable to use when the upstream server node machine configuration is not the same.\nThe Fixed algorithm is a hybrid algorithm. It can ensure that the same data is routed to the same upstream server node, and when the upstream server scales out, it still maintains routing to the same node; unless the upstream node does not exist, it will reroute. This algorithm is mainly used in the SkyWalking Meter protocol because this protocol needs to ensure that the metrics of the same service instance are sent to the same OAP node. The Routing steps are as follows:\n Generate a unique identification string based on the data content, as short as possible. The amount of data is controllable. Get the upstream node of identity from LRU Cache, and use it if it exists. According to the identification, generate the corresponding hash value, and find the upstream server node from the upstream list. Save the mapping relationship between the upstream server node and identification to LRU Cache.  The advantage of this algorithm is to bind the data with the upstream server node as much as possible, so the upstream server can better process continuous data. The disadvantage is that it takes up a certain amount of memory space to save the corresponding relationship.\nAs shown below, the image is divided into two parts:\n The left side represents that the same data content always is routed to the same server node. The right side represents the data routing algorithm. Get the number from the data, and use the remainder algorithm to obtain the position.  We choose to use a combination of Round-Robin and Fixed algorithm for routing:\n The Fixed routing algorithm is suitable for specific protocols, mainly used when passing metrics data to the SkyWalking Meter protocol The Round-Robin algorithm is used by default. When the SkyWalking OAP cluster is deployed, the configuration of the nodes needs to be as much the same as possible, so there would be no need to use the Weight Round-Robin algorithm.  How to balance the load balancer itself? Proxy still needs to deal with the load balancing problem from client to itself, especially when deploying a Proxy cluster in a production environment.\nThere are three ways to solve this problem:\n Connection management: Use the max_connection config on the client-side to specify the maximum connection duration of each connection. For more information, please read the proposal. Cluster awareness: The proxy has cluster awareness, and actively disconnects the connection when the load is unbalanced to allow the client to re-pick up the proxy. Resource limit+HPA: Restrict the connection resource situation of each proxy, and no longer accept new connections when the resource limit is reached. And use the HPA mechanism of Kubernetes to dynamically scale out the number of the proxy.      Connection management Cluster awareness Resource Limit+HPA     Pros Simple to use Ensure that the number of connections in each proxy is relatively  Simple to use   Cons Each client needs to ensure that data is not lostThe client is required to accept GOWAY responses May cause a sudden increase in traffic on some nodesEach client needs to ensure that data is not lost  Traffic will not be particularly balanced in each instance    We choose Limit+HPA for these reasons:\n Easy to config and use the proxy and easy to understand based on basic data metrics. No data loss due to broken connection. There is no need for the client to implement any other protocols to prevent data loss, especially when the client is a commercial product. The connection of each node in the proxy cluster does not need to be particularly balanced, as long as the proxy node itself is high-performance.  SkyWalking-Satellite We have implemented this Proxy in the SkyWalking-Satellite project. It’s used between Client and SkyWalking OAP, effectively solving the load balancing problem.\nAfter the system is deployed, the Satellite would accept the traffic from the Client, and the Satellite will perceive all the nodes of the OAP through Kubernetes Label Selector or manual configuration, and load balance the traffic to the upstream OAP node.\nAs shown below, a single client still maintains a connection with a single Satellite, Satellite would establish the connection with each OAP, and load balance message to the OAP node.\nWhen scaling Satellite, we need to deploy the SWCK adapter and configure the HPA in Kubernetes. SWCK is a platform for the SkyWalking users, provisions, upgrades, maintains SkyWalking relevant components, and makes them work natively on Kubernetes.\nAfter deployment is finished, the following steps would be performed:\n Read metrics from OAP: HPA requests the SWCK metrics adapter to dynamically read the metrics in the OAP. Scaling the Satellite: Kubernetes HPA senses that the metrics values are in line with expectations, so the Satellite would be scaling automatically.  As shown below, use the dotted line to divide the two parts. HPA uses SWCK Adapter to read the metrics in the OAP. When the threshold is met, HPA would scale the Satellite deployment.\nExample In this section, we will demonstrate two cases:\n SkyWalking Scaling: After SkyWalking OAP scaling, the traffic would auto load balancing through Satellite. Satellite Scaling: Satellite’s own traffic load balancing.  NOTE: All commands could be accessed through GitHub.\nSkyWalking Scaling We will use the bookinfo application to demonstrate how to integrate Apache SkyWalking 8.9.1 with Apache SkyWalking-Satellite 0.5.0, and observe the service mesh through the Envoy ALS protocol.\nBefore starting, please make sure that you already have a Kubernetes environment.\nInstall Istio Istio provides a very convenient way to configure the Envoy proxy and enable the access log service. The following step:\n Install the istioctl locally to help manage the Istio mesh. Install Istio into the Kubernetes environment with a demo configuration profile, and enable the Envoy ALS. Transmit the ALS message to the satellite. The satellite we will deploy later. Add the label into the default namespace so Istio could automatically inject Envoy sidecar proxies when you deploy your application later.  # install istioctl export ISTIO_VERSION=1.12.0 curl -L https://istio.io/downloadIstio | sh - sudo mv $PWD/istio-$ISTIO_VERSION/bin/istioctl /usr/local/bin/ # install istio istioctl install -y --set profile=demo \\ \t--set meshConfig.enableEnvoyAccessLogService=true \\ \t--set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-system-satellite.skywalking-system:11800 # enbale envoy proxy in default namespace kubectl label namespace default istio-injection=enabled Install SWCK SWCK provides convenience for users to deploy and upgrade SkyWalking related components based on Kubernetes. The automatic scale function of Satellite also mainly relies on SWCK. For more information, you could refer to the official documentation.\n# Install cert-manager kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.3.1/cert-manager.yaml # Deploy SWCK mkdir -p skywalking-swck \u0026amp;\u0026amp; cd skywalking-swck wget https://dlcdn.apache.org/skywalking/swck/0.6.1/skywalking-swck-0.6.1-bin.tgz tar -zxvf skywalking-swck-0.6.1-bin.tgz cd config kubectl apply -f operator-bundle.yaml Deploy Apache SkyWalking And Apache SkyWalking-Satellite We have provided a simple script to deploy the skywalking OAP, UI, and Satellite.\n# Create the skywalking components namespace kubectl create namespace skywalking-system kubectl label namespace skywalking-system swck-injection=enabled # Deploy components kubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/sw-components.yaml Deploy Bookinfo Application export ISTIO_VERSION=1.12.0 kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/platform/kube/bookinfo.yaml kubectl wait --for=condition=Ready pods --all --timeout=1200s kubectl port-forward service/productpage 9080 Next, please open your browser and visit http://localhost:9080. You should be able to see the Bookinfo application. Refresh the webpage several times to generate enough access logs.\nThen, you can see the topology and metrics of the Bookinfo application on SkyWalking WebUI. At this time, you can see that the Satellite is working!\nDeploy Monitor We need to install OpenTelemetry Collector to collect metrics in OAPs and analyze them.\n# Add OTEL collector kubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/otel-collector-oap.yaml kubectl port-forward -n skywalking-system service/skywalking-system-ui 8080:80 Next, please open your browser and visit http://localhost:8080/ and create a new item on the dashboard. The SkyWalking Web UI pictured below shows how the data content is applied.\nScaling OAP Scaling the number of OAPs by deployment.\nkubectl scale --replicas=3 -n skywalking-system deployment/skywalking-system-oap Done! After a period of time, you will see that the number of OAPs becomes 3, and the ALS traffic is balanced to each OAP.\nSatellite Scaling After we have completed the SkyWalking Scaling, we would carry out the Satellite Scaling demo.\nDeploy SWCK HPA SWCK provides an adapter to implement the Kubernetes external metrics to adapt the HPA through reading the metrics in SkyWalking OAP. We expose the metrics service in Satellite to OAP and configure HPA Resource to auto-scaling the Satellite.\nInstall the SWCK adapter into the Kubernetes environment:\nkubectl apply -f skywalking-swck/config/adapter-bundle.yaml Create the HPA resource, and limit each Satellite to handle a maximum of 10 connections:\nkubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/satellite-hpa.yaml Then, you could see we have 9 connections in one satellite. One envoy proxy may establish multiple connections to the satellite.\n$ kubectl get HorizontalPodAutoscaler -n skywalking-system NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE hpa-demo Deployment/skywalking-system-satellite 9/10 1 3 1 5m18s Scaling Application The scaling application could establish more connections to the satellite, to verify whether the HPA is in effect.\nkubectl scale --replicas=3 deployment/productpage-v1 deployment/details-v1 Done! By default, Satellite will deploy a single instance and a single instance will only accept 11 connections. HPA resources limit one Satellite to handle 10 connections and use a stabilization window to make Satellite stable scaling up. In this case, we deploy the Bookinfo application in 10+ instances after scaling, which means that 10+ connections will be established to the Satellite.\nSo after HPA resources are running, the Satellite would be automatically scaled up to 2 instances. You can learn about the calculation algorithm of replicas through the official documentation. Run the following command to view the running status:\n$ kubectl get HorizontalPodAutoscaler -n skywalking-system --watch NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 1 3m31s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 1 4m20s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 2 4m38s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 2 5m8s hpa-demo Deployment/skywalking-system-satellite 6/10 1 3 2 5m23s By observing the “number of connections” metric, we would be able to see that when the number of connections of each gRPC exceeds 10 connections, then the satellite automatically scales through the HPA rule. As a result, the connection number is down to normal status (in this example, less than 10)\nswctl metrics linear --name satellite_service_grpc_connect_count --service-name satellite::satellite-service ","excerpt":"Scaling with Apache SkyWalking Background In the Apache SkyWalking ecosystem, the OAP obtains …","ref":"/docs/main/v9.4.0/en/academy/scaling-with-apache-skywalking/","title":"Scaling with Apache SkyWalking"},{"body":"Scaling with Apache SkyWalking Background In the Apache SkyWalking ecosystem, the OAP obtains metrics, traces, logs, and event data through SkyWalking Agent, Envoy, or other data sources. Under the gRPC protocol, it transmits data by communicating with a single server node. Only when the connection is broken, the reconnecting policy would be used based on DNS round-robin mode. When new services are added at runtime or the OAP load is kept high due to increased traffic of observed services, the OAP cluster needs to scale out for increased traffic. The load of the new OAP node would be less due to all existing agents having connected to previous nodes. Even without scaling, the load of OAP nodes would be unbalanced, because the agent would keep the connection due to random policy at the booting stage. In these cases, it would become a challenge to keep up the health status of all nodes, and be able to scale out when needed.\nIn this article, we mainly discuss how to solve this challenge in SkyWalking.\nHow to Load Balance SkyWalking mainly uses the gRPC protocol for data transmission, so this article mainly introduces load balancing in the gRPC protocol.\nProxy Or Client-side Based on the gRPC official Load Balancing blog, there are two approaches to load balancing:\n Client-side: The client perceives multiple back-end services and uses a load-balancing algorithm to select a back-end service for each RPC. Proxy: The client sends the message to the proxy server, and the proxy server load balances the message to the back-end service.  From the perspective of observability system architecture:\n    Pros Cons     Client-side High performance because of the elimination of extra hop Complex client (cluster awareness, load balancing, health check, etc.)Ensure each data source to be connected provides complex client capabilities   Proxy Simple Client Higher latency    We choose Proxy mode for the following reasons:\n Observable data is not very time-sensitive, a little latency caused by transmission is acceptable. A little extra hop is acceptable and there is no impact on the client-side. As an observability platform, we cannot/should not ask clients to change. They make their own tech decisions and may have their own commercial considerations.  Transmission Policy In the proxy mode, we should determine the transmission path between downstream and upstream.\nDifferent data protocols require different processing policies. There are two transmission policies:\n Synchronous: Suitable for protocols that require data exchange in the client, such as SkyWalking Dynamic Configuration Service. This type of protocol provides real-time results. Asynchronous batch: Used when the client doesn’t care about the upstream processing results, but only the transmitted data (e.g., trace report, log report, etc.)  The synchronization policy requires that the proxy send the message to the upstream server when receiving the client message, and synchronously return the response data to the downstream client. Usually, only a few protocols need to use the synchronization policy.\nAs shown below, after the client sends the request to the Proxy, the proxy would send the message to the server synchronously. When the proxy receives the result, it returns to the client.\nThe asynchronous batch policy means that the data is sent to the upstream server in batches asynchronously. This policy is more common because most protocols in SkyWalking are primarily based on data reporting. We think using the queue as a buffer could have a good effect. The asynchronous batch policy is executed according to the following steps:\n The proxy receives the data and wraps it as an Event object. An event is added into the queue. When the cycle time is reached or when the queue elements reach the fixed number, the elements in the queue will parallel consume and send to the OAP.  The advantage of using queues is:\n Separate data receiving and sending to reduce the mutual influence. The interval quantization mechanism can be used to combine events, which helps to speed up sending events to the OAP. Using multi-threaded consumption queue events can make fuller use of network IO.  As shown below, after the proxy receives the message, the proxy would wrap the message as an event and push it to the queue. The message sender would take batch events from the queue and send them to the upstream OAP.\nRouting Routing algorithms are used to route messages to a single upstream server node.\nThe Round-Robin algorithm selects nodes in order from the list of upstream service nodes. The advantage of this algorithm is that the number of times each node is selected is average. When the size of the data is close to the same, each upstream node can handle the same quantity of data content.\nWith the Weight Round-Robin, each upstream server node has a corresponding routing weight ratio. The difference from Round-Robin is that each upstream node has more chances to be routed according to its weight. This algorithm is more suitable to use when the upstream server node machine configuration is not the same.\nThe Fixed algorithm is a hybrid algorithm. It can ensure that the same data is routed to the same upstream server node, and when the upstream server scales out, it still maintains routing to the same node; unless the upstream node does not exist, it will reroute. This algorithm is mainly used in the SkyWalking Meter protocol because this protocol needs to ensure that the metrics of the same service instance are sent to the same OAP node. The Routing steps are as follows:\n Generate a unique identification string based on the data content, as short as possible. The amount of data is controllable. Get the upstream node of identity from LRU Cache, and use it if it exists. According to the identification, generate the corresponding hash value, and find the upstream server node from the upstream list. Save the mapping relationship between the upstream server node and identification to LRU Cache.  The advantage of this algorithm is to bind the data with the upstream server node as much as possible, so the upstream server can better process continuous data. The disadvantage is that it takes up a certain amount of memory space to save the corresponding relationship.\nAs shown below, the image is divided into two parts:\n The left side represents that the same data content always is routed to the same server node. The right side represents the data routing algorithm. Get the number from the data, and use the remainder algorithm to obtain the position.  We choose to use a combination of Round-Robin and Fixed algorithm for routing:\n The Fixed routing algorithm is suitable for specific protocols, mainly used when passing metrics data to the SkyWalking Meter protocol The Round-Robin algorithm is used by default. When the SkyWalking OAP cluster is deployed, the configuration of the nodes needs to be as much the same as possible, so there would be no need to use the Weight Round-Robin algorithm.  How to balance the load balancer itself? Proxy still needs to deal with the load balancing problem from client to itself, especially when deploying a Proxy cluster in a production environment.\nThere are three ways to solve this problem:\n Connection management: Use the max_connection config on the client-side to specify the maximum connection duration of each connection. For more information, please read the proposal. Cluster awareness: The proxy has cluster awareness, and actively disconnects the connection when the load is unbalanced to allow the client to re-pick up the proxy. Resource limit+HPA: Restrict the connection resource situation of each proxy, and no longer accept new connections when the resource limit is reached. And use the HPA mechanism of Kubernetes to dynamically scale out the number of the proxy.      Connection management Cluster awareness Resource Limit+HPA     Pros Simple to use Ensure that the number of connections in each proxy is relatively  Simple to use   Cons Each client needs to ensure that data is not lostThe client is required to accept GOWAY responses May cause a sudden increase in traffic on some nodesEach client needs to ensure that data is not lost  Traffic will not be particularly balanced in each instance    We choose Limit+HPA for these reasons:\n Easy to config and use the proxy and easy to understand based on basic data metrics. No data loss due to broken connection. There is no need for the client to implement any other protocols to prevent data loss, especially when the client is a commercial product. The connection of each node in the proxy cluster does not need to be particularly balanced, as long as the proxy node itself is high-performance.  SkyWalking-Satellite We have implemented this Proxy in the SkyWalking-Satellite project. It’s used between Client and SkyWalking OAP, effectively solving the load balancing problem.\nAfter the system is deployed, the Satellite would accept the traffic from the Client, and the Satellite will perceive all the nodes of the OAP through Kubernetes Label Selector or manual configuration, and load balance the traffic to the upstream OAP node.\nAs shown below, a single client still maintains a connection with a single Satellite, Satellite would establish the connection with each OAP, and load balance message to the OAP node.\nWhen scaling Satellite, we need to deploy the SWCK adapter and configure the HPA in Kubernetes. SWCK is a platform for the SkyWalking users, provisions, upgrades, maintains SkyWalking relevant components, and makes them work natively on Kubernetes.\nAfter deployment is finished, the following steps would be performed:\n Read metrics from OAP: HPA requests the SWCK metrics adapter to dynamically read the metrics in the OAP. Scaling the Satellite: Kubernetes HPA senses that the metrics values are in line with expectations, so the Satellite would be scaling automatically.  As shown below, use the dotted line to divide the two parts. HPA uses SWCK Adapter to read the metrics in the OAP. When the threshold is met, HPA would scale the Satellite deployment.\nExample In this section, we will demonstrate two cases:\n SkyWalking Scaling: After SkyWalking OAP scaling, the traffic would auto load balancing through Satellite. Satellite Scaling: Satellite’s own traffic load balancing.  NOTE: All commands could be accessed through GitHub.\nSkyWalking Scaling We will use the bookinfo application to demonstrate how to integrate Apache SkyWalking 8.9.1 with Apache SkyWalking-Satellite 0.5.0, and observe the service mesh through the Envoy ALS protocol.\nBefore starting, please make sure that you already have a Kubernetes environment.\nInstall Istio Istio provides a very convenient way to configure the Envoy proxy and enable the access log service. The following step:\n Install the istioctl locally to help manage the Istio mesh. Install Istio into the Kubernetes environment with a demo configuration profile, and enable the Envoy ALS. Transmit the ALS message to the satellite. The satellite we will deploy later. Add the label into the default namespace so Istio could automatically inject Envoy sidecar proxies when you deploy your application later.  # install istioctl export ISTIO_VERSION=1.12.0 curl -L https://istio.io/downloadIstio | sh - sudo mv $PWD/istio-$ISTIO_VERSION/bin/istioctl /usr/local/bin/ # install istio istioctl install -y --set profile=demo \\ \t--set meshConfig.enableEnvoyAccessLogService=true \\ \t--set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-system-satellite.skywalking-system:11800 # enbale envoy proxy in default namespace kubectl label namespace default istio-injection=enabled Install SWCK SWCK provides convenience for users to deploy and upgrade SkyWalking related components based on Kubernetes. The automatic scale function of Satellite also mainly relies on SWCK. For more information, you could refer to the official documentation.\n# Install cert-manager kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.3.1/cert-manager.yaml # Deploy SWCK mkdir -p skywalking-swck \u0026amp;\u0026amp; cd skywalking-swck wget https://dlcdn.apache.org/skywalking/swck/0.6.1/skywalking-swck-0.6.1-bin.tgz tar -zxvf skywalking-swck-0.6.1-bin.tgz cd config kubectl apply -f operator-bundle.yaml Deploy Apache SkyWalking And Apache SkyWalking-Satellite We have provided a simple script to deploy the skywalking OAP, UI, and Satellite.\n# Create the skywalking components namespace kubectl create namespace skywalking-system kubectl label namespace skywalking-system swck-injection=enabled # Deploy components kubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/sw-components.yaml Deploy Bookinfo Application export ISTIO_VERSION=1.12.0 kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/platform/kube/bookinfo.yaml kubectl wait --for=condition=Ready pods --all --timeout=1200s kubectl port-forward service/productpage 9080 Next, please open your browser and visit http://localhost:9080. You should be able to see the Bookinfo application. Refresh the webpage several times to generate enough access logs.\nThen, you can see the topology and metrics of the Bookinfo application on SkyWalking WebUI. At this time, you can see that the Satellite is working!\nDeploy Monitor We need to install OpenTelemetry Collector to collect metrics in OAPs and analyze them.\n# Add OTEL collector kubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/otel-collector-oap.yaml kubectl port-forward -n skywalking-system service/skywalking-system-ui 8080:80 Next, please open your browser and visit http://localhost:8080/ and create a new item on the dashboard. The SkyWalking Web UI pictured below shows how the data content is applied.\nScaling OAP Scaling the number of OAPs by deployment.\nkubectl scale --replicas=3 -n skywalking-system deployment/skywalking-system-oap Done! After a period of time, you will see that the number of OAPs becomes 3, and the ALS traffic is balanced to each OAP.\nSatellite Scaling After we have completed the SkyWalking Scaling, we would carry out the Satellite Scaling demo.\nDeploy SWCK HPA SWCK provides an adapter to implement the Kubernetes external metrics to adapt the HPA through reading the metrics in SkyWalking OAP. We expose the metrics service in Satellite to OAP and configure HPA Resource to auto-scaling the Satellite.\nInstall the SWCK adapter into the Kubernetes environment:\nkubectl apply -f skywalking-swck/config/adapter-bundle.yaml Create the HPA resource, and limit each Satellite to handle a maximum of 10 connections:\nkubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/satellite-hpa.yaml Then, you could see we have 9 connections in one satellite. One envoy proxy may establish multiple connections to the satellite.\n$ kubectl get HorizontalPodAutoscaler -n skywalking-system NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE hpa-demo Deployment/skywalking-system-satellite 9/10 1 3 1 5m18s Scaling Application The scaling application could establish more connections to the satellite, to verify whether the HPA is in effect.\nkubectl scale --replicas=3 deployment/productpage-v1 deployment/details-v1 Done! By default, Satellite will deploy a single instance and a single instance will only accept 11 connections. HPA resources limit one Satellite to handle 10 connections and use a stabilization window to make Satellite stable scaling up. In this case, we deploy the Bookinfo application in 10+ instances after scaling, which means that 10+ connections will be established to the Satellite.\nSo after HPA resources are running, the Satellite would be automatically scaled up to 2 instances. You can learn about the calculation algorithm of replicas through the official documentation. Run the following command to view the running status:\n$ kubectl get HorizontalPodAutoscaler -n skywalking-system --watch NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 1 3m31s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 1 4m20s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 2 4m38s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 2 5m8s hpa-demo Deployment/skywalking-system-satellite 6/10 1 3 2 5m23s By observing the “number of connections” metric, we would be able to see that when the number of connections of each gRPC exceeds 10 connections, then the satellite automatically scales through the HPA rule. As a result, the connection number is down to normal status (in this example, less than 10)\nswctl metrics linear --name satellite_service_grpc_connect_count --service-name satellite::satellite-service ","excerpt":"Scaling with Apache SkyWalking Background In the Apache SkyWalking ecosystem, the OAP obtains …","ref":"/docs/main/v9.5.0/en/academy/scaling-with-apache-skywalking/","title":"Scaling with Apache SkyWalking"},{"body":"Scaling with Apache SkyWalking Background In the Apache SkyWalking ecosystem, the OAP obtains metrics, traces, logs, and event data through SkyWalking Agent, Envoy, or other data sources. Under the gRPC protocol, it transmits data by communicating with a single server node. Only when the connection is broken, the reconnecting policy would be used based on DNS round-robin mode. When new services are added at runtime or the OAP load is kept high due to increased traffic of observed services, the OAP cluster needs to scale out for increased traffic. The load of the new OAP node would be less due to all existing agents having connected to previous nodes. Even without scaling, the load of OAP nodes would be unbalanced, because the agent would keep the connection due to random policy at the booting stage. In these cases, it would become a challenge to keep up the health status of all nodes, and be able to scale out when needed.\nIn this article, we mainly discuss how to solve this challenge in SkyWalking.\nHow to Load Balance SkyWalking mainly uses the gRPC protocol for data transmission, so this article mainly introduces load balancing in the gRPC protocol.\nProxy Or Client-side Based on the gRPC official Load Balancing blog, there are two approaches to load balancing:\n Client-side: The client perceives multiple back-end services and uses a load-balancing algorithm to select a back-end service for each RPC. Proxy: The client sends the message to the proxy server, and the proxy server load balances the message to the back-end service.  From the perspective of observability system architecture:\n    Pros Cons     Client-side High performance because of the elimination of extra hop Complex client (cluster awareness, load balancing, health check, etc.)Ensure each data source to be connected provides complex client capabilities   Proxy Simple Client Higher latency    We choose Proxy mode for the following reasons:\n Observable data is not very time-sensitive, a little latency caused by transmission is acceptable. A little extra hop is acceptable and there is no impact on the client-side. As an observability platform, we cannot/should not ask clients to change. They make their own tech decisions and may have their own commercial considerations.  Transmission Policy In the proxy mode, we should determine the transmission path between downstream and upstream.\nDifferent data protocols require different processing policies. There are two transmission policies:\n Synchronous: Suitable for protocols that require data exchange in the client, such as SkyWalking Dynamic Configuration Service. This type of protocol provides real-time results. Asynchronous batch: Used when the client doesn’t care about the upstream processing results, but only the transmitted data (e.g., trace report, log report, etc.)  The synchronization policy requires that the proxy send the message to the upstream server when receiving the client message, and synchronously return the response data to the downstream client. Usually, only a few protocols need to use the synchronization policy.\nAs shown below, after the client sends the request to the Proxy, the proxy would send the message to the server synchronously. When the proxy receives the result, it returns to the client.\nThe asynchronous batch policy means that the data is sent to the upstream server in batches asynchronously. This policy is more common because most protocols in SkyWalking are primarily based on data reporting. We think using the queue as a buffer could have a good effect. The asynchronous batch policy is executed according to the following steps:\n The proxy receives the data and wraps it as an Event object. An event is added into the queue. When the cycle time is reached or when the queue elements reach the fixed number, the elements in the queue will parallel consume and send to the OAP.  The advantage of using queues is:\n Separate data receiving and sending to reduce the mutual influence. The interval quantization mechanism can be used to combine events, which helps to speed up sending events to the OAP. Using multi-threaded consumption queue events can make fuller use of network IO.  As shown below, after the proxy receives the message, the proxy would wrap the message as an event and push it to the queue. The message sender would take batch events from the queue and send them to the upstream OAP.\nRouting Routing algorithms are used to route messages to a single upstream server node.\nThe Round-Robin algorithm selects nodes in order from the list of upstream service nodes. The advantage of this algorithm is that the number of times each node is selected is average. When the size of the data is close to the same, each upstream node can handle the same quantity of data content.\nWith the Weight Round-Robin, each upstream server node has a corresponding routing weight ratio. The difference from Round-Robin is that each upstream node has more chances to be routed according to its weight. This algorithm is more suitable to use when the upstream server node machine configuration is not the same.\nThe Fixed algorithm is a hybrid algorithm. It can ensure that the same data is routed to the same upstream server node, and when the upstream server scales out, it still maintains routing to the same node; unless the upstream node does not exist, it will reroute. This algorithm is mainly used in the SkyWalking Meter protocol because this protocol needs to ensure that the metrics of the same service instance are sent to the same OAP node. The Routing steps are as follows:\n Generate a unique identification string based on the data content, as short as possible. The amount of data is controllable. Get the upstream node of identity from LRU Cache, and use it if it exists. According to the identification, generate the corresponding hash value, and find the upstream server node from the upstream list. Save the mapping relationship between the upstream server node and identification to LRU Cache.  The advantage of this algorithm is to bind the data with the upstream server node as much as possible, so the upstream server can better process continuous data. The disadvantage is that it takes up a certain amount of memory space to save the corresponding relationship.\nAs shown below, the image is divided into two parts:\n The left side represents that the same data content always is routed to the same server node. The right side represents the data routing algorithm. Get the number from the data, and use the remainder algorithm to obtain the position.  We choose to use a combination of Round-Robin and Fixed algorithm for routing:\n The Fixed routing algorithm is suitable for specific protocols, mainly used when passing metrics data to the SkyWalking Meter protocol The Round-Robin algorithm is used by default. When the SkyWalking OAP cluster is deployed, the configuration of the nodes needs to be as much the same as possible, so there would be no need to use the Weight Round-Robin algorithm.  How to balance the load balancer itself? Proxy still needs to deal with the load balancing problem from client to itself, especially when deploying a Proxy cluster in a production environment.\nThere are three ways to solve this problem:\n Connection management: Use the max_connection config on the client-side to specify the maximum connection duration of each connection. For more information, please read the proposal. Cluster awareness: The proxy has cluster awareness, and actively disconnects the connection when the load is unbalanced to allow the client to re-pick up the proxy. Resource limit+HPA: Restrict the connection resource situation of each proxy, and no longer accept new connections when the resource limit is reached. And use the HPA mechanism of Kubernetes to dynamically scale out the number of the proxy.      Connection management Cluster awareness Resource Limit+HPA     Pros Simple to use Ensure that the number of connections in each proxy is relatively  Simple to use   Cons Each client needs to ensure that data is not lostThe client is required to accept GOWAY responses May cause a sudden increase in traffic on some nodesEach client needs to ensure that data is not lost  Traffic will not be particularly balanced in each instance    We choose Limit+HPA for these reasons:\n Easy to config and use the proxy and easy to understand based on basic data metrics. No data loss due to broken connection. There is no need for the client to implement any other protocols to prevent data loss, especially when the client is a commercial product. The connection of each node in the proxy cluster does not need to be particularly balanced, as long as the proxy node itself is high-performance.  SkyWalking-Satellite We have implemented this Proxy in the SkyWalking-Satellite project. It’s used between Client and SkyWalking OAP, effectively solving the load balancing problem.\nAfter the system is deployed, the Satellite would accept the traffic from the Client, and the Satellite will perceive all the nodes of the OAP through Kubernetes Label Selector or manual configuration, and load balance the traffic to the upstream OAP node.\nAs shown below, a single client still maintains a connection with a single Satellite, Satellite would establish the connection with each OAP, and load balance message to the OAP node.\nWhen scaling Satellite, we need to deploy the SWCK adapter and configure the HPA in Kubernetes. SWCK is a platform for the SkyWalking users, provisions, upgrades, maintains SkyWalking relevant components, and makes them work natively on Kubernetes.\nAfter deployment is finished, the following steps would be performed:\n Read metrics from OAP: HPA requests the SWCK metrics adapter to dynamically read the metrics in the OAP. Scaling the Satellite: Kubernetes HPA senses that the metrics values are in line with expectations, so the Satellite would be scaling automatically.  As shown below, use the dotted line to divide the two parts. HPA uses SWCK Adapter to read the metrics in the OAP. When the threshold is met, HPA would scale the Satellite deployment.\nExample In this section, we will demonstrate two cases:\n SkyWalking Scaling: After SkyWalking OAP scaling, the traffic would auto load balancing through Satellite. Satellite Scaling: Satellite’s own traffic load balancing.  NOTE: All commands could be accessed through GitHub.\nSkyWalking Scaling We will use the bookinfo application to demonstrate how to integrate Apache SkyWalking 8.9.1 with Apache SkyWalking-Satellite 0.5.0, and observe the service mesh through the Envoy ALS protocol.\nBefore starting, please make sure that you already have a Kubernetes environment.\nInstall Istio Istio provides a very convenient way to configure the Envoy proxy and enable the access log service. The following step:\n Install the istioctl locally to help manage the Istio mesh. Install Istio into the Kubernetes environment with a demo configuration profile, and enable the Envoy ALS. Transmit the ALS message to the satellite. The satellite we will deploy later. Add the label into the default namespace so Istio could automatically inject Envoy sidecar proxies when you deploy your application later.  # install istioctl export ISTIO_VERSION=1.12.0 curl -L https://istio.io/downloadIstio | sh - sudo mv $PWD/istio-$ISTIO_VERSION/bin/istioctl /usr/local/bin/ # install istio istioctl install -y --set profile=demo \\ \t--set meshConfig.enableEnvoyAccessLogService=true \\ \t--set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-system-satellite.skywalking-system:11800 # enbale envoy proxy in default namespace kubectl label namespace default istio-injection=enabled Install SWCK SWCK provides convenience for users to deploy and upgrade SkyWalking related components based on Kubernetes. The automatic scale function of Satellite also mainly relies on SWCK. For more information, you could refer to the official documentation.\n# Install cert-manager kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.3.1/cert-manager.yaml # Deploy SWCK mkdir -p skywalking-swck \u0026amp;\u0026amp; cd skywalking-swck wget https://dlcdn.apache.org/skywalking/swck/0.6.1/skywalking-swck-0.6.1-bin.tgz tar -zxvf skywalking-swck-0.6.1-bin.tgz cd config kubectl apply -f operator-bundle.yaml Deploy Apache SkyWalking And Apache SkyWalking-Satellite We have provided a simple script to deploy the skywalking OAP, UI, and Satellite.\n# Create the skywalking components namespace kubectl create namespace skywalking-system kubectl label namespace skywalking-system swck-injection=enabled # Deploy components kubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/sw-components.yaml Deploy Bookinfo Application export ISTIO_VERSION=1.12.0 kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/platform/kube/bookinfo.yaml kubectl wait --for=condition=Ready pods --all --timeout=1200s kubectl port-forward service/productpage 9080 Next, please open your browser and visit http://localhost:9080. You should be able to see the Bookinfo application. Refresh the webpage several times to generate enough access logs.\nThen, you can see the topology and metrics of the Bookinfo application on SkyWalking WebUI. At this time, you can see that the Satellite is working!\nDeploy Monitor We need to install OpenTelemetry Collector to collect metrics in OAPs and analyze them.\n# Add OTEL collector kubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/otel-collector-oap.yaml kubectl port-forward -n skywalking-system service/skywalking-system-ui 8080:80 Next, please open your browser and visit http://localhost:8080/ and create a new item on the dashboard. The SkyWalking Web UI pictured below shows how the data content is applied.\nScaling OAP Scaling the number of OAPs by deployment.\nkubectl scale --replicas=3 -n skywalking-system deployment/skywalking-system-oap Done! After a period of time, you will see that the number of OAPs becomes 3, and the ALS traffic is balanced to each OAP.\nSatellite Scaling After we have completed the SkyWalking Scaling, we would carry out the Satellite Scaling demo.\nDeploy SWCK HPA SWCK provides an adapter to implement the Kubernetes external metrics to adapt the HPA through reading the metrics in SkyWalking OAP. We expose the metrics service in Satellite to OAP and configure HPA Resource to auto-scaling the Satellite.\nInstall the SWCK adapter into the Kubernetes environment:\nkubectl apply -f skywalking-swck/config/adapter-bundle.yaml Create the HPA resource, and limit each Satellite to handle a maximum of 10 connections:\nkubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/satellite-hpa.yaml Then, you could see we have 9 connections in one satellite. One envoy proxy may establish multiple connections to the satellite.\n$ kubectl get HorizontalPodAutoscaler -n skywalking-system NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE hpa-demo Deployment/skywalking-system-satellite 9/10 1 3 1 5m18s Scaling Application The scaling application could establish more connections to the satellite, to verify whether the HPA is in effect.\nkubectl scale --replicas=3 deployment/productpage-v1 deployment/details-v1 Done! By default, Satellite will deploy a single instance and a single instance will only accept 11 connections. HPA resources limit one Satellite to handle 10 connections and use a stabilization window to make Satellite stable scaling up. In this case, we deploy the Bookinfo application in 10+ instances after scaling, which means that 10+ connections will be established to the Satellite.\nSo after HPA resources are running, the Satellite would be automatically scaled up to 2 instances. You can learn about the calculation algorithm of replicas through the official documentation. Run the following command to view the running status:\n$ kubectl get HorizontalPodAutoscaler -n skywalking-system --watch NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 1 3m31s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 1 4m20s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 2 4m38s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 2 5m8s hpa-demo Deployment/skywalking-system-satellite 6/10 1 3 2 5m23s By observing the “number of connections” metric, we would be able to see that when the number of connections of each gRPC exceeds 10 connections, then the satellite automatically scales through the HPA rule. As a result, the connection number is down to normal status (in this example, less than 10)\nswctl metrics linear --name satellite_service_grpc_connect_count --service-name satellite::satellite-service ","excerpt":"Scaling with Apache SkyWalking Background In the Apache SkyWalking ecosystem, the OAP obtains …","ref":"/docs/main/v9.6.0/en/academy/scaling-with-apache-skywalking/","title":"Scaling with Apache SkyWalking"},{"body":"Scaling with Apache SkyWalking Background In the Apache SkyWalking ecosystem, the OAP obtains metrics, traces, logs, and event data through SkyWalking Agent, Envoy, or other data sources. Under the gRPC protocol, it transmits data by communicating with a single server node. Only when the connection is broken, the reconnecting policy would be used based on DNS round-robin mode. When new services are added at runtime or the OAP load is kept high due to increased traffic of observed services, the OAP cluster needs to scale out for increased traffic. The load of the new OAP node would be less due to all existing agents having connected to previous nodes. Even without scaling, the load of OAP nodes would be unbalanced, because the agent would keep the connection due to random policy at the booting stage. In these cases, it would become a challenge to keep up the health status of all nodes, and be able to scale out when needed.\nIn this article, we mainly discuss how to solve this challenge in SkyWalking.\nHow to Load Balance SkyWalking mainly uses the gRPC protocol for data transmission, so this article mainly introduces load balancing in the gRPC protocol.\nProxy Or Client-side Based on the gRPC official Load Balancing blog, there are two approaches to load balancing:\n Client-side: The client perceives multiple back-end services and uses a load-balancing algorithm to select a back-end service for each RPC. Proxy: The client sends the message to the proxy server, and the proxy server load balances the message to the back-end service.  From the perspective of observability system architecture:\n    Pros Cons     Client-side High performance because of the elimination of extra hop Complex client (cluster awareness, load balancing, health check, etc.)Ensure each data source to be connected provides complex client capabilities   Proxy Simple Client Higher latency    We choose Proxy mode for the following reasons:\n Observable data is not very time-sensitive, a little latency caused by transmission is acceptable. A little extra hop is acceptable and there is no impact on the client-side. As an observability platform, we cannot/should not ask clients to change. They make their own tech decisions and may have their own commercial considerations.  Transmission Policy In the proxy mode, we should determine the transmission path between downstream and upstream.\nDifferent data protocols require different processing policies. There are two transmission policies:\n Synchronous: Suitable for protocols that require data exchange in the client, such as SkyWalking Dynamic Configuration Service. This type of protocol provides real-time results. Asynchronous batch: Used when the client doesn’t care about the upstream processing results, but only the transmitted data (e.g., trace report, log report, etc.)  The synchronization policy requires that the proxy send the message to the upstream server when receiving the client message, and synchronously return the response data to the downstream client. Usually, only a few protocols need to use the synchronization policy.\nAs shown below, after the client sends the request to the Proxy, the proxy would send the message to the server synchronously. When the proxy receives the result, it returns to the client.\nThe asynchronous batch policy means that the data is sent to the upstream server in batches asynchronously. This policy is more common because most protocols in SkyWalking are primarily based on data reporting. We think using the queue as a buffer could have a good effect. The asynchronous batch policy is executed according to the following steps:\n The proxy receives the data and wraps it as an Event object. An event is added into the queue. When the cycle time is reached or when the queue elements reach the fixed number, the elements in the queue will parallel consume and send to the OAP.  The advantage of using queues is:\n Separate data receiving and sending to reduce the mutual influence. The interval quantization mechanism can be used to combine events, which helps to speed up sending events to the OAP. Using multi-threaded consumption queue events can make fuller use of network IO.  As shown below, after the proxy receives the message, the proxy would wrap the message as an event and push it to the queue. The message sender would take batch events from the queue and send them to the upstream OAP.\nRouting Routing algorithms are used to route messages to a single upstream server node.\nThe Round-Robin algorithm selects nodes in order from the list of upstream service nodes. The advantage of this algorithm is that the number of times each node is selected is average. When the size of the data is close to the same, each upstream node can handle the same quantity of data content.\nWith the Weight Round-Robin, each upstream server node has a corresponding routing weight ratio. The difference from Round-Robin is that each upstream node has more chances to be routed according to its weight. This algorithm is more suitable to use when the upstream server node machine configuration is not the same.\nThe Fixed algorithm is a hybrid algorithm. It can ensure that the same data is routed to the same upstream server node, and when the upstream server scales out, it still maintains routing to the same node; unless the upstream node does not exist, it will reroute. This algorithm is mainly used in the SkyWalking Meter protocol because this protocol needs to ensure that the metrics of the same service instance are sent to the same OAP node. The Routing steps are as follows:\n Generate a unique identification string based on the data content, as short as possible. The amount of data is controllable. Get the upstream node of identity from LRU Cache, and use it if it exists. According to the identification, generate the corresponding hash value, and find the upstream server node from the upstream list. Save the mapping relationship between the upstream server node and identification to LRU Cache.  The advantage of this algorithm is to bind the data with the upstream server node as much as possible, so the upstream server can better process continuous data. The disadvantage is that it takes up a certain amount of memory space to save the corresponding relationship.\nAs shown below, the image is divided into two parts:\n The left side represents that the same data content always is routed to the same server node. The right side represents the data routing algorithm. Get the number from the data, and use the remainder algorithm to obtain the position.  We choose to use a combination of Round-Robin and Fixed algorithm for routing:\n The Fixed routing algorithm is suitable for specific protocols, mainly used when passing metrics data to the SkyWalking Meter protocol The Round-Robin algorithm is used by default. When the SkyWalking OAP cluster is deployed, the configuration of the nodes needs to be as much the same as possible, so there would be no need to use the Weight Round-Robin algorithm.  How to balance the load balancer itself? Proxy still needs to deal with the load balancing problem from client to itself, especially when deploying a Proxy cluster in a production environment.\nThere are three ways to solve this problem:\n Connection management: Use the max_connection config on the client-side to specify the maximum connection duration of each connection. For more information, please read the proposal. Cluster awareness: The proxy has cluster awareness, and actively disconnects the connection when the load is unbalanced to allow the client to re-pick up the proxy. Resource limit+HPA: Restrict the connection resource situation of each proxy, and no longer accept new connections when the resource limit is reached. And use the HPA mechanism of Kubernetes to dynamically scale out the number of the proxy.      Connection management Cluster awareness Resource Limit+HPA     Pros Simple to use Ensure that the number of connections in each proxy is relatively  Simple to use   Cons Each client needs to ensure that data is not lostThe client is required to accept GOWAY responses May cause a sudden increase in traffic on some nodesEach client needs to ensure that data is not lost  Traffic will not be particularly balanced in each instance    We choose Limit+HPA for these reasons:\n Easy to config and use the proxy and easy to understand based on basic data metrics. No data loss due to broken connection. There is no need for the client to implement any other protocols to prevent data loss, especially when the client is a commercial product. The connection of each node in the proxy cluster does not need to be particularly balanced, as long as the proxy node itself is high-performance.  SkyWalking-Satellite We have implemented this Proxy in the SkyWalking-Satellite project. It’s used between Client and SkyWalking OAP, effectively solving the load balancing problem.\nAfter the system is deployed, the Satellite would accept the traffic from the Client, and the Satellite will perceive all the nodes of the OAP through Kubernetes Label Selector or manual configuration, and load balance the traffic to the upstream OAP node.\nAs shown below, a single client still maintains a connection with a single Satellite, Satellite would establish the connection with each OAP, and load balance message to the OAP node.\nWhen scaling Satellite, we need to deploy the SWCK adapter and configure the HPA in Kubernetes. SWCK is a platform for the SkyWalking users, provisions, upgrades, maintains SkyWalking relevant components, and makes them work natively on Kubernetes.\nAfter deployment is finished, the following steps would be performed:\n Read metrics from OAP: HPA requests the SWCK metrics adapter to dynamically read the metrics in the OAP. Scaling the Satellite: Kubernetes HPA senses that the metrics values are in line with expectations, so the Satellite would be scaling automatically.  As shown below, use the dotted line to divide the two parts. HPA uses SWCK Adapter to read the metrics in the OAP. When the threshold is met, HPA would scale the Satellite deployment.\nExample In this section, we will demonstrate two cases:\n SkyWalking Scaling: After SkyWalking OAP scaling, the traffic would auto load balancing through Satellite. Satellite Scaling: Satellite’s own traffic load balancing.  NOTE: All commands could be accessed through GitHub.\nSkyWalking Scaling We will use the bookinfo application to demonstrate how to integrate Apache SkyWalking 8.9.1 with Apache SkyWalking-Satellite 0.5.0, and observe the service mesh through the Envoy ALS protocol.\nBefore starting, please make sure that you already have a Kubernetes environment.\nInstall Istio Istio provides a very convenient way to configure the Envoy proxy and enable the access log service. The following step:\n Install the istioctl locally to help manage the Istio mesh. Install Istio into the Kubernetes environment with a demo configuration profile, and enable the Envoy ALS. Transmit the ALS message to the satellite. The satellite we will deploy later. Add the label into the default namespace so Istio could automatically inject Envoy sidecar proxies when you deploy your application later.  # install istioctl export ISTIO_VERSION=1.12.0 curl -L https://istio.io/downloadIstio | sh - sudo mv $PWD/istio-$ISTIO_VERSION/bin/istioctl /usr/local/bin/ # install istio istioctl install -y --set profile=demo \\ \t--set meshConfig.enableEnvoyAccessLogService=true \\ \t--set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-system-satellite.skywalking-system:11800 # enbale envoy proxy in default namespace kubectl label namespace default istio-injection=enabled Install SWCK SWCK provides convenience for users to deploy and upgrade SkyWalking related components based on Kubernetes. The automatic scale function of Satellite also mainly relies on SWCK. For more information, you could refer to the official documentation.\n# Install cert-manager kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.3.1/cert-manager.yaml # Deploy SWCK mkdir -p skywalking-swck \u0026amp;\u0026amp; cd skywalking-swck wget https://dlcdn.apache.org/skywalking/swck/0.6.1/skywalking-swck-0.6.1-bin.tgz tar -zxvf skywalking-swck-0.6.1-bin.tgz cd config kubectl apply -f operator-bundle.yaml Deploy Apache SkyWalking And Apache SkyWalking-Satellite We have provided a simple script to deploy the skywalking OAP, UI, and Satellite.\n# Create the skywalking components namespace kubectl create namespace skywalking-system kubectl label namespace skywalking-system swck-injection=enabled # Deploy components kubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/sw-components.yaml Deploy Bookinfo Application export ISTIO_VERSION=1.12.0 kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/platform/kube/bookinfo.yaml kubectl wait --for=condition=Ready pods --all --timeout=1200s kubectl port-forward service/productpage 9080 Next, please open your browser and visit http://localhost:9080. You should be able to see the Bookinfo application. Refresh the webpage several times to generate enough access logs.\nThen, you can see the topology and metrics of the Bookinfo application on SkyWalking WebUI. At this time, you can see that the Satellite is working!\nDeploy Monitor We need to install OpenTelemetry Collector to collect metrics in OAPs and analyze them.\n# Add OTEL collector kubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/otel-collector-oap.yaml kubectl port-forward -n skywalking-system service/skywalking-system-ui 8080:80 Next, please open your browser and visit http://localhost:8080/ and create a new item on the dashboard. The SkyWalking Web UI pictured below shows how the data content is applied.\nScaling OAP Scaling the number of OAPs by deployment.\nkubectl scale --replicas=3 -n skywalking-system deployment/skywalking-system-oap Done! After a period of time, you will see that the number of OAPs becomes 3, and the ALS traffic is balanced to each OAP.\nSatellite Scaling After we have completed the SkyWalking Scaling, we would carry out the Satellite Scaling demo.\nDeploy SWCK HPA SWCK provides an adapter to implement the Kubernetes external metrics to adapt the HPA through reading the metrics in SkyWalking OAP. We expose the metrics service in Satellite to OAP and configure HPA Resource to auto-scaling the Satellite.\nInstall the SWCK adapter into the Kubernetes environment:\nkubectl apply -f skywalking-swck/config/adapter-bundle.yaml Create the HPA resource, and limit each Satellite to handle a maximum of 10 connections:\nkubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/satellite-hpa.yaml Then, you could see we have 9 connections in one satellite. One envoy proxy may establish multiple connections to the satellite.\n$ kubectl get HorizontalPodAutoscaler -n skywalking-system NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE hpa-demo Deployment/skywalking-system-satellite 9/10 1 3 1 5m18s Scaling Application The scaling application could establish more connections to the satellite, to verify whether the HPA is in effect.\nkubectl scale --replicas=3 deployment/productpage-v1 deployment/details-v1 Done! By default, Satellite will deploy a single instance and a single instance will only accept 11 connections. HPA resources limit one Satellite to handle 10 connections and use a stabilization window to make Satellite stable scaling up. In this case, we deploy the Bookinfo application in 10+ instances after scaling, which means that 10+ connections will be established to the Satellite.\nSo after HPA resources are running, the Satellite would be automatically scaled up to 2 instances. You can learn about the calculation algorithm of replicas through the official documentation. Run the following command to view the running status:\n$ kubectl get HorizontalPodAutoscaler -n skywalking-system --watch NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 1 3m31s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 1 4m20s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 2 4m38s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 2 5m8s hpa-demo Deployment/skywalking-system-satellite 6/10 1 3 2 5m23s By observing the “number of connections” metric, we would be able to see that when the number of connections of each gRPC exceeds 10 connections, then the satellite automatically scales through the HPA rule. As a result, the connection number is down to normal status (in this example, less than 10)\nswctl metrics linear --name satellite_service_grpc_connect_count --service-name satellite::satellite-service ","excerpt":"Scaling with Apache SkyWalking Background In the Apache SkyWalking ecosystem, the OAP obtains …","ref":"/docs/main/v9.7.0/en/academy/scaling-with-apache-skywalking/","title":"Scaling with Apache SkyWalking"},{"body":"Scopes and Fields Using the Aggregation Function, the requests will be grouped by time and Group Key(s) in each scope.\nSCOPE Service This calculates the metrics data from each request of the service.\n   Name Remarks Group Key Type     name The name of the service.  string   layer Layer represents an abstract framework in the computer science, such as operation system(OS_LINUX layer), Kubernetes(k8s layer)  enum   serviceInstanceName The name of the service instance ID.  string   endpointName The name of the endpoint, such as a full path of HTTP URI.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request. Such as: Database, HTTP, RPC, gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPService This calculates the metrics data from each request of the TCP service.\n   Name Remarks Group Key Type     name The name of the service.  string   layer Layer represents an abstract framework in the computer science, such as operation system(OS_LINUX layer), Kubernetes(k8s layer)  enum   serviceInstanceName The name of the service instance ID.  string   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    SCOPE ServiceInstance This calculates the metrics data from each request of the service instance.\n   Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   endpointName The name of the endpoint, such as a full path of the HTTP URI.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPServiceInstance This calculates the metrics data from each request of the service instance.\n   Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    Secondary scopes of ServiceInstance This calculates the metrics data if the service instance is a JVM and collects through javaagent.\n SCOPE ServiceInstanceJVMCPU     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   usePercent The percentage of CPU time spent.  double    SCOPE ServiceInstanceJVMMemory     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   heapStatus Indicates whether the metric has a heap property or not.  bool   init See the JVM documentation.  long   max See the JVM documentation.  long   used See the JVM documentation.  long   committed See the JVM documentation.  long    SCOPE ServiceInstanceJVMMemoryPool     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   poolType The type may be CODE_CACHE_USAGE, NEWGEN_USAGE, OLDGEN_USAGE, SURVIVOR_USAGE, PERMGEN_USAGE, or METASPACE_USAGE based on different versions of JVM.  enum   init See the JVM documentation.  long   max See the JVM documentation.  long   used See the JVM documentation.  long   committed See the JVM documentation.  long    SCOPE ServiceInstanceJVMGC     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   phase Includes both NEW and OLD.  Enum   time The time spent in GC.  long   count The count in GC operations.  long    SCOPE ServiceInstanceJVMThread     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   liveCount The current number of live threads.  long   daemonCount The current number of daemon threads.  long   peakCount The current number of peak threads.  long   runnableStateThreadCount The current number of threads in runnable state.  long   blockedStateThreadCount The current number of threads in blocked state.  long   waitingStateThreadCount The current number of threads in waiting state.  long   timedWaitingStateThreadCount The current number of threads in time-waiting state.  long    SCOPE ServiceInstanceJVMClass     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   loadedClassCount The number of classes that are currently loaded in the JVM.  long   totalUnloadedClassCount The total number of classes unloaded since the JVM has started execution.  long   totalLoadedClassCount The total number of classes that have been loaded since the JVM has started execution.  long    SCOPE Endpoint This calculates the metrics data from each request of the endpoint in the service.\n   Name Remarks Group Key Type     name The name of the endpoint, such as a full path of the HTTP URI.  string   serviceName The name of the service.  string   serviceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  enum   serviceInstanceName The name of the service instance ID.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE ServiceRelation This calculates the metrics data from each request between services.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceLayer The layer of the source service.  enum   destServiceName The name of the destination service.  string   destServiceInstanceName The name of the destination service instance.  string   destLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination services, such as service_relation_mtls_cpm = from(ServiceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPServiceRelation This calculates the metrics data from each request between services.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceLayer The layer of the source service.  enum   destServiceName The name of the destination service.  string   destServiceInstanceName The name of the destination service instance.  string   destLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination services, such as service_relation_mtls_cpm = from(ServiceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    SCOPE ServiceInstanceRelation This calculates the metrics data from each request between service instances.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceServiceLayer The layer of the source service.  enum   destServiceName The name of the destination service.     destServiceInstanceName The name of the destination service instance.  string   destServiceLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of the component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination service instances, such as service_instance_relation_mtls_cpm = from(ServiceInstanceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPServiceInstanceRelation This calculates the metrics data from each request between service instances.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceServiceLayer The layer of the source service.  enum   destServiceName The name of the destination service.     destServiceInstanceName The name of the destination service instance.  string   destServiceLayer The layer of the destination service.  enum   componentId The ID of the component used in this call. yes string   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination service instances, such as service_instance_relation_mtls_cpm = from(ServiceInstanceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    SCOPE EndpointRelation This calculates the metrics data of the dependency between endpoints. This relation is hard to detect, and it depends on the tracing library to propagate the previous endpoint. Therefore, the EndpointRelation scope aggregation comes into effect only in services under tracing by SkyWalking native agents, including auto instrument agents (like Java and .NET), or other tracing context propagation in SkyWalking specification.\n   Name Remarks Group Key Type     endpoint The parent endpoint in the dependency.  string   serviceName The name of the service.  string   serviceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  enum   childEndpoint The endpoint used by the parent endpoint in row(1).  string   childServiceName The endpoint used by the parent service in row(1).  string   childServiceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  string   childServiceInstanceName The endpoint used by the parent service instance in row(1).  string   rpcLatency The latency of the RPC between the parent endpoint and childEndpoint, excluding the latency caused by the parent endpoint itself.     componentId The ID of the component used in this call. yes string   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Indicates where the relation is detected. The value may be client, server, or proxy. yes enum    SCOPE BrowserAppTraffic This calculates the metrics data from each request of the browser application (browser only).\n   Name Remarks Group Key Type     name The browser application name of each request.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppSingleVersionTraffic This calculates the metrics data from each request of a single version in the browser application (browser only).\n   Name Remarks Group Key Type     name The single version name of each request.  string   serviceName The name of the browser application.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppPageTraffic This calculates the metrics data from each request of the page in the browser application (browser only).\n   Name Remarks Group Key Type     name The page name of each request.  string   serviceName The name of the browser application.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppPagePerf This calculates the metrics data from each request of the page in the browser application (browser only).\n   Name Remarks Group Key Type     name The page name of each request.  string   serviceName The name of the browser application.  string   redirectTime The time taken to redirect.  int(in ms)   dnsTime The DNS query time.  int(in ms)   ttfbTime Time to first byte.  int(in ms)   tcpTime TCP connection time.  int(in ms)   transTime Content transfer time.  int(in ms)   domAnalysisTime Dom parsing time.  int(in ms)   fptTime First paint time or blank screen time.  int(in ms)   domReadyTime Dom ready time.  int(in ms)   loadPageTime Page full load time.  int(in ms)   resTime Synchronous load resources in the page.  int(in ms)   sslTime Only valid for HTTPS.  int(in ms)   ttlTime Time to interact.  int(in ms)   firstPackTime First pack time.  int(in ms)   fmpTime First Meaningful Paint.  int(in ms)    SCOPE Event This calculates the metrics data from events.\n   Name Remarks Group Key Type     name The name of the event.  string   service The service name to which the event belongs to.  string   serviceInstance The service instance to which the event belongs to, if any.  string   endpoint The service endpoint to which the event belongs to, if any.  string   type The type of the event, Normal or Error.  string   message The message of the event.  string   parameters The parameters in the message, see parameters.  string    SCOPE DatabaseAccess This calculates the metrics data from each request of database.\n   Name Remarks Group Key Type     name The service name of virtual database service.  string   databaseTypeId The ID of the component used in this call.  int   latency The time taken by each request.  int(in ms)   status Indicates the success or failure of the request.  boolean    SCOPE DatabaseSlowStatement This calculates the metrics data from slow request of database.\n   Name Remarks Group Key Type     databaseServiceId The service id of virtual cache service.  string   statement The sql statement .  string   latency The time taken by each request.  int(in ms)   traceId The traceId of this slow statement  string    SCOPE CacheAccess This calculates the metrics data from each request of cache system.\n   Name Remarks Group Key Type     name The service name of virtual cache service.  string   cacheTypeId The ID of the component used in this call.  int   latency The time taken by each request.  int(in ms)   status Indicates the success or failure of the request.  boolean   operation Indicates this access is used for write or read  string    SCOPE CacheSlowAccess This calculates the metrics data from slow request of cache system , which is used for write or read operation.\n   Name Remarks Group Key Type     cacheServiceId The service id of virtual cache service.  string   command The cache command .  string   key The cache command key.  string   latency The time taken by each request.  int(in ms)   traceId The traceId of this slow access  string   status Indicates the success or failure of the request.  boolean   operation Indicates this access is used for write or read  string    SCOPE MQAccess This calculates the service dimensional metrics data from each request of MQ system on consume/produce side\n   Name Remarks Group Key Type     name The service name , usually it\u0026rsquo;s MQ address(es)  string   transmissionLatency The latency from produce side to consume side .  int(in ms)   status Indicates the success or failure of the request.  boolean   operation Indicates this access is on Produce or Consume side  enum    SCOPE MQEndpointAccess This calculates the endpoint dimensional metrics data from each request of MQ system on consume/produce side\n   Name Remarks Group Key Type     serviceName The service name that this endpoint belongs to.  string   endpoint The endpoint name , usually it\u0026rsquo;s combined by queue,topic  string   transmissionLatency The latency from produce side to consume side .  int(in ms)   status Indicates the success or failure of the request.  boolean   operation Indicates this access is on Produce or Consume side  enum    ","excerpt":"Scopes and Fields Using the Aggregation Function, the requests will be grouped by time and Group …","ref":"/docs/main/latest/en/concepts-and-designs/scope-definitions/","title":"Scopes and Fields"},{"body":"Scopes and Fields Using the Aggregation Function, the requests will be grouped by time and Group Key(s) in each scope.\nSCOPE Service This calculates the metrics data from each request of the service.\n   Name Remarks Group Key Type     name The name of the service.  string   layer Layer represents an abstract framework in the computer science, such as operation system(OS_LINUX layer), Kubernetes(k8s layer)  enum   serviceInstanceName The name of the service instance ID.  string   endpointName The name of the endpoint, such as a full path of HTTP URI.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request. Such as: Database, HTTP, RPC, gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPService This calculates the metrics data from each request of the TCP service.\n   Name Remarks Group Key Type     name The name of the service.  string   layer Layer represents an abstract framework in the computer science, such as operation system(OS_LINUX layer), Kubernetes(k8s layer)  enum   serviceInstanceName The name of the service instance ID.  string   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    SCOPE ServiceInstance This calculates the metrics data from each request of the service instance.\n   Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   endpointName The name of the endpoint, such as a full path of the HTTP URI.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPServiceInstance This calculates the metrics data from each request of the service instance.\n   Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    Secondary scopes of ServiceInstance This calculates the metrics data if the service instance is a JVM and collects through javaagent.\n SCOPE ServiceInstanceJVMCPU     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   usePercent The percentage of CPU time spent.  double    SCOPE ServiceInstanceJVMMemory     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   heapStatus Indicates whether the metric has a heap property or not.  bool   init See the JVM documentation.  long   max See the JVM documentation.  long   used See the JVM documentation.  long   committed See the JVM documentation.  long    SCOPE ServiceInstanceJVMMemoryPool     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   poolType The type may be CODE_CACHE_USAGE, NEWGEN_USAGE, OLDGEN_USAGE, SURVIVOR_USAGE, PERMGEN_USAGE, or METASPACE_USAGE based on different versions of JVM.  enum   init See the JVM documentation.  long   max See the JVM documentation.  long   used See the JVM documentation.  long   committed See the JVM documentation.  long    SCOPE ServiceInstanceJVMGC     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   phase Includes both NEW and OLD.  Enum   time The time spent in GC.  long   count The count in GC operations.  long    SCOPE ServiceInstanceJVMThread     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   liveCount The current number of live threads.  long   daemonCount The current number of daemon threads.  long   peakCount The current number of peak threads.  long   runnableStateThreadCount The current number of threads in runnable state.  long   blockedStateThreadCount The current number of threads in blocked state.  long   waitingStateThreadCount The current number of threads in waiting state.  long   timedWaitingStateThreadCount The current number of threads in time-waiting state.  long    SCOPE ServiceInstanceJVMClass     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   loadedClassCount The number of classes that are currently loaded in the JVM.  long   totalUnloadedClassCount The total number of classes unloaded since the JVM has started execution.  long   totalLoadedClassCount The total number of classes that have been loaded since the JVM has started execution.  long    SCOPE Endpoint This calculates the metrics data from each request of the endpoint in the service.\n   Name Remarks Group Key Type     name The name of the endpoint, such as a full path of the HTTP URI.  string   serviceName The name of the service.  string   serviceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  enum   serviceInstanceName The name of the service instance ID.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE ServiceRelation This calculates the metrics data from each request between services.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceLayer The layer of the source service.  enum   destServiceName The name of the destination service.  string   destServiceInstanceName The name of the destination service instance.  string   destLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination services, such as service_relation_mtls_cpm = from(ServiceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPServiceRelation This calculates the metrics data from each request between services.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceLayer The layer of the source service.  enum   destServiceName The name of the destination service.  string   destServiceInstanceName The name of the destination service instance.  string   destLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination services, such as service_relation_mtls_cpm = from(ServiceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    SCOPE ServiceInstanceRelation This calculates the metrics data from each request between service instances.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceServiceLayer The layer of the source service.  enum   destServiceName The name of the destination service.     destServiceInstanceName The name of the destination service instance.  string   destServiceLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of the component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination service instances, such as service_instance_relation_mtls_cpm = from(ServiceInstanceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPServiceInstanceRelation This calculates the metrics data from each request between service instances.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceServiceLayer The layer of the source service.  enum   destServiceName The name of the destination service.     destServiceInstanceName The name of the destination service instance.  string   destServiceLayer The layer of the destination service.  enum   componentId The ID of the component used in this call. yes string   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination service instances, such as service_instance_relation_mtls_cpm = from(ServiceInstanceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    SCOPE EndpointRelation This calculates the metrics data of the dependency between endpoints. This relation is hard to detect, and it depends on the tracing library to propagate the previous endpoint. Therefore, the EndpointRelation scope aggregation comes into effect only in services under tracing by SkyWalking native agents, including auto instrument agents (like Java and .NET), or other tracing context propagation in SkyWalking specification.\n   Name Remarks Group Key Type     endpoint The parent endpoint in the dependency.  string   serviceName The name of the service.  string   serviceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  enum   childEndpoint The endpoint used by the parent endpoint in row(1).  string   childServiceName The endpoint used by the parent service in row(1).  string   childServiceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  string   childServiceInstanceName The endpoint used by the parent service instance in row(1).  string   rpcLatency The latency of the RPC between the parent endpoint and childEndpoint, excluding the latency caused by the parent endpoint itself.     componentId The ID of the component used in this call. yes string   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Indicates where the relation is detected. The value may be client, server, or proxy. yes enum    SCOPE BrowserAppTraffic This calculates the metrics data from each request of the browser application (browser only).\n   Name Remarks Group Key Type     name The browser application name of each request.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppSingleVersionTraffic This calculates the metrics data from each request of a single version in the browser application (browser only).\n   Name Remarks Group Key Type     name The single version name of each request.  string   serviceName The name of the browser application.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppPageTraffic This calculates the metrics data from each request of the page in the browser application (browser only).\n   Name Remarks Group Key Type     name The page name of each request.  string   serviceName The name of the browser application.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppPagePerf This calculates the metrics data from each request of the page in the browser application (browser only).\n   Name Remarks Group Key Type     name The page name of each request.  string   serviceName The name of the browser application.  string   redirectTime The time taken to redirect.  int(in ms)   dnsTime The DNS query time.  int(in ms)   ttfbTime Time to first byte.  int(in ms)   tcpTime TCP connection time.  int(in ms)   transTime Content transfer time.  int(in ms)   domAnalysisTime Dom parsing time.  int(in ms)   fptTime First paint time or blank screen time.  int(in ms)   domReadyTime Dom ready time.  int(in ms)   loadPageTime Page full load time.  int(in ms)   resTime Synchronous load resources in the page.  int(in ms)   sslTime Only valid for HTTPS.  int(in ms)   ttlTime Time to interact.  int(in ms)   firstPackTime First pack time.  int(in ms)   fmpTime First Meaningful Paint.  int(in ms)    SCOPE Event This calculates the metrics data from events.\n   Name Remarks Group Key Type     name The name of the event.  string   service The service name to which the event belongs to.  string   serviceInstance The service instance to which the event belongs to, if any.  string   endpoint The service endpoint to which the event belongs to, if any.  string   type The type of the event, Normal or Error.  string   message The message of the event.  string   parameters The parameters in the message, see parameters.  string    SCOPE DatabaseAccess This calculates the metrics data from each request of database.\n   Name Remarks Group Key Type     name The service name of virtual database service.  string   databaseTypeId The ID of the component used in this call.  int   latency The time taken by each request.  int(in ms)   status Indicates the success or failure of the request.  boolean    SCOPE DatabaseSlowStatement This calculates the metrics data from slow request of database.\n   Name Remarks Group Key Type     databaseServiceId The service id of virtual cache service.  string   statement The sql statement .  string   latency The time taken by each request.  int(in ms)   traceId The traceId of this slow statement  string    SCOPE CacheAccess This calculates the metrics data from each request of cache system.\n   Name Remarks Group Key Type     name The service name of virtual cache service.  string   cacheTypeId The ID of the component used in this call.  int   latency The time taken by each request.  int(in ms)   status Indicates the success or failure of the request.  boolean   operation Indicates this access is used for write or read  string    SCOPE CacheSlowAccess This calculates the metrics data from slow request of cache system , which is used for write or read operation.\n   Name Remarks Group Key Type     cacheServiceId The service id of virtual cache service.  string   command The cache command .  string   key The cache command key.  string   latency The time taken by each request.  int(in ms)   traceId The traceId of this slow access  string   status Indicates the success or failure of the request.  boolean   operation Indicates this access is used for write or read  string    SCOPE MQAccess This calculates the service dimensional metrics data from each request of MQ system on consume/produce side\n   Name Remarks Group Key Type     name The service name , usually it\u0026rsquo;s MQ address(es)  string   transmissionLatency The latency from produce side to consume side .  int(in ms)   status Indicates the success or failure of the request.  boolean   operation Indicates this access is on Produce or Consume side  enum    SCOPE MQEndpointAccess This calculates the endpoint dimensional metrics data from each request of MQ system on consume/produce side\n   Name Remarks Group Key Type     serviceName The service name that this endpoint belongs to.  string   endpoint The endpoint name , usually it\u0026rsquo;s combined by queue,topic  string   transmissionLatency The latency from produce side to consume side .  int(in ms)   status Indicates the success or failure of the request.  boolean   operation Indicates this access is on Produce or Consume side  enum    SCOPES with K8S Prefix All metrics starting with K8S are derived from Kubernetes monitoring by Rover(eBPF agent).\nService, Service Instance and relations For all K8SService, K8SServiceInstance, K8SServiceRelation and K8SServiceInstanceRelation, they all have the following package/protocol level metric contents.\n   Name Remarks Group Key Type     type The metrics from log type, the following names should have the type prefix. The value may be connect, accept, close, write, read, protocol.  string   connect.duration Connect to other service use duration.  long(in nanoseconds)   connect.success The connect is success or not.  boolean   accept.duration Accept connection from client use duration.  long(in nanoseconds)   close.duration Close one connection use duration.  long(in nanoseconds)   close.success Close one connection is success or not.  boolean   write.duration Write data to the connection use duration.  long(in nanoseconds)   write.syscall Write data to the connection syscall name. The value should be Write, Writev, Send, SendTo, SendMsg, SendMmsg, SendFile, SendFile64.  string   write.l4.duration Write data to the connection use duration on Linux Layer 4.  long(in nanoseconds)   write.l4.transmitPackageCount Total package count on write data to the connection.  long   write.l4.retransmitPackageCount Total retransmit package count on write data to the connection.  long   write.l4.totalPackageSize Total transmit package size on write data to the connection.  long(bytes)   write.l3.duration Write data to the connection use duration on Linux Layer 3.  long(in nanoseconds)   write.l3.localDuration Write data to the connection use local duration on Linux Layer 3.  long(in nanoseconds)   write.l3.outputDuration Write data to the connection use output duration on Linux Layer 3.  long(in nanoseconds)   write.l3.resolveMACCount Total resolve remote MAC address count on write data to the connection.  long   write.l3.resolveMACDuration Total resolve remote MAC address use duration on write data to the connection.  long(in nanoseconds)   write.l3.netFilterCount Total do net filtering count on write data to the connection.  long   write.l3.netFilterDuration Total do net filtering use duration on write data to the connection.  long(in nanoseconds)   write.l2.duration Write data to the connection use duration on Linux L2.  long(nanoseconds)   write.l2.networkDeviceName The network device name on write data to the connection.  string   write.l2.enterQueueBufferCount The write package count to the network device queue on write data to the connection.  long   write.l2.readySendDuration Total ready send buffer duration on write data to the connection.  long(in nanoseconds)   write.l2.networkDeviceSendDuration Total network send buffer use duration on write data to the connection.  long(in nanoseconds)   read.duration Read data from the connection use duration.  long(in nanoseconds)   read.syscall Read data from the connection syscall name. The value should Read, Readv, Recv, RecvFrom, RecvMsg, RecvMmsg.  string   read.l4.duration Read data to the connection use duration on Linux Layer 4.  long(in nanoseconds)   read.l3.duration Read data to the connection use duration on Linux Layer 3.  long(in nanoseconds)   read.l3.rcvDuration Read data to the connection use receive duration on Linux Layer 3.  long(in nanoseconds)   read.l3.localDuration Read data to the connection use local duration on Linux Layer 3.  long(in nanoseconds)   read.l3.netFilterCount Total do net filtering count on read data from the connection.  long   read.l3.netFilterDuration Total do net filtering use duration on read data from the connection.  long(in nanoseconds)   read.l2.netDeviceName The network device name on read data from the connection.  string   read.l2.packageCount Total read package count on the connection.  long   read.l2.totalPackageSize Total read package size on the connection.  long(bytes)   read.l2.packageToQueueDuration Total read package to the queue duration on the connection.  long(in nanoseconds)   read.l2.rcvPackageFromQueueDuration Total read package from the queue duration on the connection.  long(in nanoseconds)   protocol.type The protocol type name, the following names should have the type prefix. The value should be HTTP.  string   protocol.success This protocol request and response is success or not.  boolean   protocol.http.latency The latency of HTTP response.  long(in nanoseconds)   protocol.http.url The url path of HTTP request.  string   protocol.http.method The method name of HTTP request.  string   protocol.http.statusCode The response code of HTTP response.  int   protocol.http.sizeOfRequestHeader The header size of HTTP request.  long(bytes)   protocol.http.sizeOfRequestBody The body size of HTTP request.  long(bytes)   protocol.http.sizeOfResponseHeader The header size of HTTP response.  long(bytes)   protocol.http.sizeOfResponseBody The body size of HTTP response.  long(bytes)    SCOPE K8SService    Name Remarks Group Key Type     name The service name in kubernetes.  string   layer The layer in kubernetes service.  string   detectPoint Where the relation is detected. The value may be client or server.  enum    SCOPE K8SServiceInstance    Name Remarks Group Key Type     serviceName The service name in kubernetes.  string   serviceInstanceName The pod name in kubernetes.  string   layer The layer of kubernetes service.  string   detectPoint Where the relation is detected. The value may be client or server.  enum    SCOPE K8SServiceRelation    Name Remarks Group Key Type     sourceServiceName The source service name in kubernetes.  string   sourceLayer The source layer service in kubernetes.  string   detectPoint Where the relation is detected. The value may be client or server.  enum   componentId The ID of component used in this call.  string   tlsMode The TLS mode of relation. The value may be Plain or TLS.  enum   destServiceName The dest service name in kubernetes.  string   destLayer The dest layer service in kubernetes.  string    SCOPE K8SServiceRelation    Name Remarks Group Key Type     sourceServiceName The source service name in kubernetes.  string   sourceLayer The source layer service in kubernetes.  string   detectPoint Where the relation is detected. The value may be client or server.  enum   componentId The ID of component used in this call.  string   tlsMode The TLS mode of relation. The value may be Plain or TLS.  enum   destServiceName The dest service name in kubernetes.  string   destLayer The dest layer service in kubernetes.  string    SCOPE K8SServiceInstanceRelation    Name Remarks Group Key Type     sourceServiceName The source service name in kubernetes.  string   sourceServiceInstanceName The source pod name in kubernetes.  string   sourceLayer The source layer service in kubernetes.  string   detectPoint Where the relation is detected. The value may be client or server.  enum   componentId The ID of component used in this call.  string   tlsMode The TLS mode of relation. The value may be Plain or TLS.  enum   destServiceName The dest service name in kubernetes.  string   destServiceInstanceName The dest pod name in kubernetes.  string   destLayer The dest layer service in kubernetes.  string    Endpoint and Endpoint Relation For K8SEndpoint and K8SEndpointRelation, they only have the following protocol level metric contents.\n   Name Remarks Group Key Type     protocol.type The protocol type name, the following names should have the type prefix. The value should be HTTP.  string   protocol.success This protocol request and response is success or not.  boolean   protocol.http.latency The latency of HTTP response.  long(in nanoseconds)   protocol.http.url The url path of HTTP request.  string   protocol.http.method The method name of HTTP request.  string   protocol.http.statusCode The response code of HTTP response.  int   protocol.http.sizeOfRequestHeader The header size of HTTP request.  long(bytes)   protocol.http.sizeOfRequestBody The body size of HTTP request.  long(bytes)   protocol.http.sizeOfResponseHeader The header size of HTTP response.  long(bytes)   protocol.http.sizeOfResponseBody The body size of HTTP response.  long(bytes)    SCOPE K8SEndpoint    Name Remarks Group Key Type     serviceName The service name in kubernetes.  string   layer The layer in kubernetes service.  string   endpointName The endpoint name detect in kubernetes service.  string   duration The duration of the service endpoint response latency.  long    SCOPE K8SEndpointRelation    Name Remarks Group Key Type     sourceServiceName The source service name in kubernetes.  string   sourceServiceName The layer in kubernetes source service.  string   sourceEndpointName The endpoint name detect in kubernetes source service.  string   detectPoint Where the relation is detected. The value may be client or server.  enum   componentId The ID of component used in this call.  string   destServiceName The dest service name in kubernetes.  string   destServiceName The layer in kubernetes dest service.  string   destEndpointName The endpoint name detect in kubernetes dest service.  string    ","excerpt":"Scopes and Fields Using the Aggregation Function, the requests will be grouped by time and Group …","ref":"/docs/main/next/en/concepts-and-designs/scope-definitions/","title":"Scopes and Fields"},{"body":"Scopes and Fields Using the Aggregation Function, the requests will be grouped by time and Group Key(s) in each scope.\nSCOPE Service This calculates the metrics data from each request of the service.\n   Name Remarks Group Key Type     name The name of the service.  string   layer Layer represents an abstract framework in the computer science, such as operation system(OS_LINUX layer), Kubernetes(k8s layer)  enum   serviceInstanceName The name of the service instance ID.  string   endpointName The name of the endpoint, such as a full path of HTTP URI.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   responseCode Deprecated.The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request. Such as: Database, HTTP, RPC, gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   tcpInfo.receivedBytes The received bytes of the TCP traffic, if this request is a TCP call.  long   tcpInfo.sentBytes The sent bytes of the TCP traffic, if this request is a TCP call.  long    SCOPE ServiceInstance This calculates the metrics data from each request of the service instance.\n   Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   layer Layer represents an abstract framework in the computer science, such as operation system(OS_LINUX layer), Kubernetes(k8s layer)  enum   endpointName The name of the endpoint, such as a full path of the HTTP URI.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   responseCode Deprecated.The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   tcpInfo.receivedBytes The received bytes of the TCP traffic, if this request is a TCP call.  long   tcpInfo.sentBytes The sent bytes of the TCP traffic, if this request is a TCP call.  long    Secondary scopes of ServiceInstance This calculates the metrics data if the service instance is a JVM and collects through javaagent.\n SCOPE ServiceInstanceJVMCPU     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   usePercent The percentage of CPU time spent.  double    SCOPE ServiceInstanceJVMMemory     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   heapStatus Indicates whether the metric has a heap property or not.  bool   init See the JVM documentation.  long   max See the JVM documentation.  long   used See the JVM documentation.  long   committed See the JVM documentation.  long    SCOPE ServiceInstanceJVMMemoryPool     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   poolType The type may be CODE_CACHE_USAGE, NEWGEN_USAGE, OLDGEN_USAGE, SURVIVOR_USAGE, PERMGEN_USAGE, or METASPACE_USAGE based on different versions of JVM.  enum   init See the JVM documentation.  long   max See the JVM documentation.  long   used See the JVM documentation.  long   committed See the JVM documentation.  long    SCOPE ServiceInstanceJVMGC     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   phase Includes both NEW and OLD.  Enum   time The time spent in GC.  long   count The count in GC operations.  long    SCOPE ServiceInstanceJVMThread     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   liveCount The current number of live threads.  long   daemonCount The current number of daemon threads.  long   peakCount The current number of peak threads.  long   runnableStateThreadCount The current number of threads in runnable state.  long   blockedStateThreadCount The current number of threads in blocked state.  long   waitingStateThreadCount The current number of threads in waiting state.  long   timedWaitingStateThreadCount The current number of threads in time-waiting state.  long    SCOPE ServiceInstanceJVMClass     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   loadedClassCount The number of classes that are currently loaded in the JVM.  long   totalUnloadedClassCount The total number of classes unloaded since the JVM has started execution.  long   totalLoadedClassCount The total number of classes that have been loaded since the JVM has started execution.  long    SCOPE Endpoint This calculates the metrics data from each request of the endpoint in the service.\n   Name Remarks Group Key Type     name The name of the endpoint, such as a full path of the HTTP URI.  string   serviceName The name of the service.  string   serviceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  enum   serviceInstanceName The name of the service instance ID.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   responseCode Deprecated.The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   tcpInfo.receivedBytes The received bytes of the TCP traffic, if this request is a TCP call.  long   tcpInfo.sentBytes The sent bytes of the TCP traffic, if this request is a TCP call.  long    SCOPE ServiceRelation This calculates the metrics data from each request between services.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceLayer The layer of the source service.  enum   destServiceName The name of the destination service.  string   destServiceInstanceName The name of the destination service instance.  string   destLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   responseCode Deprecated.The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination services, such as service_relation_mtls_cpm = from(ServiceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   tcpInfo.receivedBytes The received bytes of the TCP traffic, if this request is a TCP call.  long   tcpInfo.sentBytes The sent bytes of the TCP traffic, if this request is a TCP call.  long    SCOPE ServiceInstanceRelation This calculates the metrics data from each request between service instances.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceServiceLayer The layer of the source service.  enum   destServiceName The name of the destination service.     destServiceInstanceName The name of the destination service instance.  string   destServiceLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of the component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   responseCode Deprecated.The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination service instances, such as service_instance_relation_mtls_cpm = from(ServiceInstanceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   tcpInfo.receivedBytes The received bytes of the TCP traffic, if this request is a TCP call.  long   tcpInfo.sentBytes The sent bytes of the TCP traffic, if this request is a TCP call.  long    SCOPE EndpointRelation This calculates the metrics data of the dependency between endpoints. This relation is hard to detect, and it depends on the tracing library to propagate the previous endpoint. Therefore, the EndpointRelation scope aggregation comes into effect only in services under tracing by SkyWalking native agents, including auto instrument agents (like Java and .NET), OpenCensus SkyWalking exporter implementation, or other tracing context propagation in SkyWalking specification.\n   Name Remarks Group Key Type     endpoint The parent endpoint in the dependency.  string   serviceName The name of the service.  string   serviceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  enum   childEndpoint The endpoint used by the parent endpoint in row(1).  string   childServiceName The endpoint used by the parent service in row(1).  string   childServiceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  string   childServiceInstanceName The endpoint used by the parent service instance in row(1).  string   rpcLatency The latency of the RPC between the parent endpoint and childEndpoint, excluding the latency caused by the parent endpoint itself.     componentId The ID of the component used in this call. yes string   status Indicates the success or failure of the request.  bool(true for success)   responseCode Deprecated.The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Indicates where the relation is detected. The value may be client, server, or proxy. yes enum    SCOPE BrowserAppTraffic This calculates the metrics data from each request of the browser application (browser only).\n   Name Remarks Group Key Type     name The browser application name of each request.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppSingleVersionTraffic This calculates the metrics data from each request of a single version in the browser application (browser only).\n   Name Remarks Group Key Type     name The single version name of each request.  string   serviceName The name of the browser application.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppPageTraffic This calculates the metrics data from each request of the page in the browser application (browser only).\n   Name Remarks Group Key Type     name The page name of each request.  string   serviceName The name of the browser application.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppPagePerf This calculates the metrics data from each request of the page in the browser application (browser only).\n   Name Remarks Group Key Type     name The page name of each request.  string   serviceName The name of the browser application.  string   redirectTime The time taken to redirect.  int(in ms)   dnsTime The DNS query time.  int(in ms)   ttfbTime Time to first byte.  int(in ms)   tcpTime TCP connection time.  int(in ms)   transTime Content transfer time.  int(in ms)   domAnalysisTime Dom parsing time.  int(in ms)   fptTime First paint time or blank screen time.  int(in ms)   domReadyTime Dom ready time.  int(in ms)   loadPageTime Page full load time.  int(in ms)   resTime Synchronous load resources in the page.  int(in ms)   sslTime Only valid for HTTPS.  int(in ms)   ttlTime Time to interact.  int(in ms)   firstPackTime First pack time.  int(in ms)   fmpTime First Meaningful Paint.  int(in ms)    SCOPE Event This calculates the metrics data from events.\n   Name Remarks Group Key Type     name The name of the event.  string   service The service name to which the event belongs to.  string   serviceInstance The service instance to which the event belongs to, if any.  string   endpoint The service endpoint to which the event belongs to, if any.  string   type The type of the event, Normal or Error.  string   message The message of the event.  string   parameters The parameters in the message, see parameters.  string    ","excerpt":"Scopes and Fields Using the Aggregation Function, the requests will be grouped by time and Group …","ref":"/docs/main/v9.0.0/en/concepts-and-designs/scope-definitions/","title":"Scopes and Fields"},{"body":"Scopes and Fields Using the Aggregation Function, the requests will be grouped by time and Group Key(s) in each scope.\nSCOPE Service This calculates the metrics data from each request of the service.\n   Name Remarks Group Key Type     name The name of the service.  string   layer Layer represents an abstract framework in the computer science, such as operation system(OS_LINUX layer), Kubernetes(k8s layer)  enum   serviceInstanceName The name of the service instance ID.  string   endpointName The name of the endpoint, such as a full path of HTTP URI.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   responseCode Deprecated.The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request. Such as: Database, HTTP, RPC, gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   tcpInfo.receivedBytes The received bytes of the TCP traffic, if this request is a TCP call.  long   tcpInfo.sentBytes The sent bytes of the TCP traffic, if this request is a TCP call.  long    SCOPE ServiceInstance This calculates the metrics data from each request of the service instance.\n   Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   endpointName The name of the endpoint, such as a full path of the HTTP URI.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   responseCode Deprecated.The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   tcpInfo.receivedBytes The received bytes of the TCP traffic, if this request is a TCP call.  long   tcpInfo.sentBytes The sent bytes of the TCP traffic, if this request is a TCP call.  long    Secondary scopes of ServiceInstance This calculates the metrics data if the service instance is a JVM and collects through javaagent.\n SCOPE ServiceInstanceJVMCPU     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   usePercent The percentage of CPU time spent.  double    SCOPE ServiceInstanceJVMMemory     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   heapStatus Indicates whether the metric has a heap property or not.  bool   init See the JVM documentation.  long   max See the JVM documentation.  long   used See the JVM documentation.  long   committed See the JVM documentation.  long    SCOPE ServiceInstanceJVMMemoryPool     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   poolType The type may be CODE_CACHE_USAGE, NEWGEN_USAGE, OLDGEN_USAGE, SURVIVOR_USAGE, PERMGEN_USAGE, or METASPACE_USAGE based on different versions of JVM.  enum   init See the JVM documentation.  long   max See the JVM documentation.  long   used See the JVM documentation.  long   committed See the JVM documentation.  long    SCOPE ServiceInstanceJVMGC     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   phase Includes both NEW and OLD.  Enum   time The time spent in GC.  long   count The count in GC operations.  long    SCOPE ServiceInstanceJVMThread     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   liveCount The current number of live threads.  long   daemonCount The current number of daemon threads.  long   peakCount The current number of peak threads.  long   runnableStateThreadCount The current number of threads in runnable state.  long   blockedStateThreadCount The current number of threads in blocked state.  long   waitingStateThreadCount The current number of threads in waiting state.  long   timedWaitingStateThreadCount The current number of threads in time-waiting state.  long    SCOPE ServiceInstanceJVMClass     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   loadedClassCount The number of classes that are currently loaded in the JVM.  long   totalUnloadedClassCount The total number of classes unloaded since the JVM has started execution.  long   totalLoadedClassCount The total number of classes that have been loaded since the JVM has started execution.  long    SCOPE Endpoint This calculates the metrics data from each request of the endpoint in the service.\n   Name Remarks Group Key Type     name The name of the endpoint, such as a full path of the HTTP URI.  string   serviceName The name of the service.  string   serviceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  enum   serviceInstanceName The name of the service instance ID.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   responseCode Deprecated.The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   tcpInfo.receivedBytes The received bytes of the TCP traffic, if this request is a TCP call.  long   tcpInfo.sentBytes The sent bytes of the TCP traffic, if this request is a TCP call.  long    SCOPE ServiceRelation This calculates the metrics data from each request between services.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceLayer The layer of the source service.  enum   destServiceName The name of the destination service.  string   destServiceInstanceName The name of the destination service instance.  string   destLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   responseCode Deprecated.The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination services, such as service_relation_mtls_cpm = from(ServiceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   tcpInfo.receivedBytes The received bytes of the TCP traffic, if this request is a TCP call.  long   tcpInfo.sentBytes The sent bytes of the TCP traffic, if this request is a TCP call.  long    SCOPE ServiceInstanceRelation This calculates the metrics data from each request between service instances.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceServiceLayer The layer of the source service.  enum   destServiceName The name of the destination service.     destServiceInstanceName The name of the destination service instance.  string   destServiceLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of the component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   responseCode Deprecated.The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination service instances, such as service_instance_relation_mtls_cpm = from(ServiceInstanceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   tcpInfo.receivedBytes The received bytes of the TCP traffic, if this request is a TCP call.  long   tcpInfo.sentBytes The sent bytes of the TCP traffic, if this request is a TCP call.  long    SCOPE EndpointRelation This calculates the metrics data of the dependency between endpoints. This relation is hard to detect, and it depends on the tracing library to propagate the previous endpoint. Therefore, the EndpointRelation scope aggregation comes into effect only in services under tracing by SkyWalking native agents, including auto instrument agents (like Java and .NET), OpenCensus SkyWalking exporter implementation, or other tracing context propagation in SkyWalking specification.\n   Name Remarks Group Key Type     endpoint The parent endpoint in the dependency.  string   serviceName The name of the service.  string   serviceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  enum   childEndpoint The endpoint used by the parent endpoint in row(1).  string   childServiceName The endpoint used by the parent service in row(1).  string   childServiceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  string   childServiceInstanceName The endpoint used by the parent service instance in row(1).  string   rpcLatency The latency of the RPC between the parent endpoint and childEndpoint, excluding the latency caused by the parent endpoint itself.     componentId The ID of the component used in this call. yes string   status Indicates the success or failure of the request.  bool(true for success)   responseCode Deprecated.The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Indicates where the relation is detected. The value may be client, server, or proxy. yes enum    SCOPE BrowserAppTraffic This calculates the metrics data from each request of the browser application (browser only).\n   Name Remarks Group Key Type     name The browser application name of each request.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppSingleVersionTraffic This calculates the metrics data from each request of a single version in the browser application (browser only).\n   Name Remarks Group Key Type     name The single version name of each request.  string   serviceName The name of the browser application.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppPageTraffic This calculates the metrics data from each request of the page in the browser application (browser only).\n   Name Remarks Group Key Type     name The page name of each request.  string   serviceName The name of the browser application.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppPagePerf This calculates the metrics data from each request of the page in the browser application (browser only).\n   Name Remarks Group Key Type     name The page name of each request.  string   serviceName The name of the browser application.  string   redirectTime The time taken to redirect.  int(in ms)   dnsTime The DNS query time.  int(in ms)   ttfbTime Time to first byte.  int(in ms)   tcpTime TCP connection time.  int(in ms)   transTime Content transfer time.  int(in ms)   domAnalysisTime Dom parsing time.  int(in ms)   fptTime First paint time or blank screen time.  int(in ms)   domReadyTime Dom ready time.  int(in ms)   loadPageTime Page full load time.  int(in ms)   resTime Synchronous load resources in the page.  int(in ms)   sslTime Only valid for HTTPS.  int(in ms)   ttlTime Time to interact.  int(in ms)   firstPackTime First pack time.  int(in ms)   fmpTime First Meaningful Paint.  int(in ms)    SCOPE Event This calculates the metrics data from events.\n   Name Remarks Group Key Type     name The name of the event.  string   service The service name to which the event belongs to.  string   serviceInstance The service instance to which the event belongs to, if any.  string   endpoint The service endpoint to which the event belongs to, if any.  string   type The type of the event, Normal or Error.  string   message The message of the event.  string   parameters The parameters in the message, see parameters.  string    ","excerpt":"Scopes and Fields Using the Aggregation Function, the requests will be grouped by time and Group …","ref":"/docs/main/v9.1.0/en/concepts-and-designs/scope-definitions/","title":"Scopes and Fields"},{"body":"Scopes and Fields Using the Aggregation Function, the requests will be grouped by time and Group Key(s) in each scope.\nSCOPE Service This calculates the metrics data from each request of the service.\n   Name Remarks Group Key Type     name The name of the service.  string   layer Layer represents an abstract framework in the computer science, such as operation system(OS_LINUX layer), Kubernetes(k8s layer)  enum   serviceInstanceName The name of the service instance ID.  string   endpointName The name of the endpoint, such as a full path of HTTP URI.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   responseCode Deprecated.The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request. Such as: Database, HTTP, RPC, gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   tcpInfo.receivedBytes The received bytes of the TCP traffic, if this request is a TCP call.  long   tcpInfo.sentBytes The sent bytes of the TCP traffic, if this request is a TCP call.  long    SCOPE ServiceInstance This calculates the metrics data from each request of the service instance.\n   Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   endpointName The name of the endpoint, such as a full path of the HTTP URI.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   responseCode Deprecated.The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   tcpInfo.receivedBytes The received bytes of the TCP traffic, if this request is a TCP call.  long   tcpInfo.sentBytes The sent bytes of the TCP traffic, if this request is a TCP call.  long    Secondary scopes of ServiceInstance This calculates the metrics data if the service instance is a JVM and collects through javaagent.\n SCOPE ServiceInstanceJVMCPU     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   usePercent The percentage of CPU time spent.  double    SCOPE ServiceInstanceJVMMemory     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   heapStatus Indicates whether the metric has a heap property or not.  bool   init See the JVM documentation.  long   max See the JVM documentation.  long   used See the JVM documentation.  long   committed See the JVM documentation.  long    SCOPE ServiceInstanceJVMMemoryPool     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   poolType The type may be CODE_CACHE_USAGE, NEWGEN_USAGE, OLDGEN_USAGE, SURVIVOR_USAGE, PERMGEN_USAGE, or METASPACE_USAGE based on different versions of JVM.  enum   init See the JVM documentation.  long   max See the JVM documentation.  long   used See the JVM documentation.  long   committed See the JVM documentation.  long    SCOPE ServiceInstanceJVMGC     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   phase Includes both NEW and OLD.  Enum   time The time spent in GC.  long   count The count in GC operations.  long    SCOPE ServiceInstanceJVMThread     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   liveCount The current number of live threads.  long   daemonCount The current number of daemon threads.  long   peakCount The current number of peak threads.  long   runnableStateThreadCount The current number of threads in runnable state.  long   blockedStateThreadCount The current number of threads in blocked state.  long   waitingStateThreadCount The current number of threads in waiting state.  long   timedWaitingStateThreadCount The current number of threads in time-waiting state.  long    SCOPE ServiceInstanceJVMClass     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   loadedClassCount The number of classes that are currently loaded in the JVM.  long   totalUnloadedClassCount The total number of classes unloaded since the JVM has started execution.  long   totalLoadedClassCount The total number of classes that have been loaded since the JVM has started execution.  long    SCOPE Endpoint This calculates the metrics data from each request of the endpoint in the service.\n   Name Remarks Group Key Type     name The name of the endpoint, such as a full path of the HTTP URI.  string   serviceName The name of the service.  string   serviceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  enum   serviceInstanceName The name of the service instance ID.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   responseCode Deprecated.The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   tcpInfo.receivedBytes The received bytes of the TCP traffic, if this request is a TCP call.  long   tcpInfo.sentBytes The sent bytes of the TCP traffic, if this request is a TCP call.  long    SCOPE ServiceRelation This calculates the metrics data from each request between services.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceLayer The layer of the source service.  enum   destServiceName The name of the destination service.  string   destServiceInstanceName The name of the destination service instance.  string   destLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   responseCode Deprecated.The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination services, such as service_relation_mtls_cpm = from(ServiceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   tcpInfo.receivedBytes The received bytes of the TCP traffic, if this request is a TCP call.  long   tcpInfo.sentBytes The sent bytes of the TCP traffic, if this request is a TCP call.  long    SCOPE ServiceInstanceRelation This calculates the metrics data from each request between service instances.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceServiceLayer The layer of the source service.  enum   destServiceName The name of the destination service.     destServiceInstanceName The name of the destination service instance.  string   destServiceLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of the component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   responseCode Deprecated.The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination service instances, such as service_instance_relation_mtls_cpm = from(ServiceInstanceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   tcpInfo.receivedBytes The received bytes of the TCP traffic, if this request is a TCP call.  long   tcpInfo.sentBytes The sent bytes of the TCP traffic, if this request is a TCP call.  long    SCOPE EndpointRelation This calculates the metrics data of the dependency between endpoints. This relation is hard to detect, and it depends on the tracing library to propagate the previous endpoint. Therefore, the EndpointRelation scope aggregation comes into effect only in services under tracing by SkyWalking native agents, including auto instrument agents (like Java and .NET), OpenCensus SkyWalking exporter implementation, or other tracing context propagation in SkyWalking specification.\n   Name Remarks Group Key Type     endpoint The parent endpoint in the dependency.  string   serviceName The name of the service.  string   serviceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  enum   childEndpoint The endpoint used by the parent endpoint in row(1).  string   childServiceName The endpoint used by the parent service in row(1).  string   childServiceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  string   childServiceInstanceName The endpoint used by the parent service instance in row(1).  string   rpcLatency The latency of the RPC between the parent endpoint and childEndpoint, excluding the latency caused by the parent endpoint itself.     componentId The ID of the component used in this call. yes string   status Indicates the success or failure of the request.  bool(true for success)   responseCode Deprecated.The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Indicates where the relation is detected. The value may be client, server, or proxy. yes enum    SCOPE BrowserAppTraffic This calculates the metrics data from each request of the browser application (browser only).\n   Name Remarks Group Key Type     name The browser application name of each request.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppSingleVersionTraffic This calculates the metrics data from each request of a single version in the browser application (browser only).\n   Name Remarks Group Key Type     name The single version name of each request.  string   serviceName The name of the browser application.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppPageTraffic This calculates the metrics data from each request of the page in the browser application (browser only).\n   Name Remarks Group Key Type     name The page name of each request.  string   serviceName The name of the browser application.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppPagePerf This calculates the metrics data from each request of the page in the browser application (browser only).\n   Name Remarks Group Key Type     name The page name of each request.  string   serviceName The name of the browser application.  string   redirectTime The time taken to redirect.  int(in ms)   dnsTime The DNS query time.  int(in ms)   ttfbTime Time to first byte.  int(in ms)   tcpTime TCP connection time.  int(in ms)   transTime Content transfer time.  int(in ms)   domAnalysisTime Dom parsing time.  int(in ms)   fptTime First paint time or blank screen time.  int(in ms)   domReadyTime Dom ready time.  int(in ms)   loadPageTime Page full load time.  int(in ms)   resTime Synchronous load resources in the page.  int(in ms)   sslTime Only valid for HTTPS.  int(in ms)   ttlTime Time to interact.  int(in ms)   firstPackTime First pack time.  int(in ms)   fmpTime First Meaningful Paint.  int(in ms)    SCOPE Event This calculates the metrics data from events.\n   Name Remarks Group Key Type     name The name of the event.  string   service The service name to which the event belongs to.  string   serviceInstance The service instance to which the event belongs to, if any.  string   endpoint The service endpoint to which the event belongs to, if any.  string   type The type of the event, Normal or Error.  string   message The message of the event.  string   parameters The parameters in the message, see parameters.  string    ","excerpt":"Scopes and Fields Using the Aggregation Function, the requests will be grouped by time and Group …","ref":"/docs/main/v9.2.0/en/concepts-and-designs/scope-definitions/","title":"Scopes and Fields"},{"body":"Scopes and Fields Using the Aggregation Function, the requests will be grouped by time and Group Key(s) in each scope.\nSCOPE Service This calculates the metrics data from each request of the service.\n   Name Remarks Group Key Type     name The name of the service.  string   layer Layer represents an abstract framework in the computer science, such as operation system(OS_LINUX layer), Kubernetes(k8s layer)  enum   serviceInstanceName The name of the service instance ID.  string   endpointName The name of the endpoint, such as a full path of HTTP URI.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request. Such as: Database, HTTP, RPC, gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPService This calculates the metrics data from each request of the TCP service.\n   Name Remarks Group Key Type     name The name of the service.  string   layer Layer represents an abstract framework in the computer science, such as operation system(OS_LINUX layer), Kubernetes(k8s layer)  enum   serviceInstanceName The name of the service instance ID.  string   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    SCOPE ServiceInstance This calculates the metrics data from each request of the service instance.\n   Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   endpointName The name of the endpoint, such as a full path of the HTTP URI.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPServiceInstance This calculates the metrics data from each request of the service instance.\n   Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    Secondary scopes of ServiceInstance This calculates the metrics data if the service instance is a JVM and collects through javaagent.\n SCOPE ServiceInstanceJVMCPU     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   usePercent The percentage of CPU time spent.  double    SCOPE ServiceInstanceJVMMemory     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   heapStatus Indicates whether the metric has a heap property or not.  bool   init See the JVM documentation.  long   max See the JVM documentation.  long   used See the JVM documentation.  long   committed See the JVM documentation.  long    SCOPE ServiceInstanceJVMMemoryPool     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   poolType The type may be CODE_CACHE_USAGE, NEWGEN_USAGE, OLDGEN_USAGE, SURVIVOR_USAGE, PERMGEN_USAGE, or METASPACE_USAGE based on different versions of JVM.  enum   init See the JVM documentation.  long   max See the JVM documentation.  long   used See the JVM documentation.  long   committed See the JVM documentation.  long    SCOPE ServiceInstanceJVMGC     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   phase Includes both NEW and OLD.  Enum   time The time spent in GC.  long   count The count in GC operations.  long    SCOPE ServiceInstanceJVMThread     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   liveCount The current number of live threads.  long   daemonCount The current number of daemon threads.  long   peakCount The current number of peak threads.  long   runnableStateThreadCount The current number of threads in runnable state.  long   blockedStateThreadCount The current number of threads in blocked state.  long   waitingStateThreadCount The current number of threads in waiting state.  long   timedWaitingStateThreadCount The current number of threads in time-waiting state.  long    SCOPE ServiceInstanceJVMClass     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   loadedClassCount The number of classes that are currently loaded in the JVM.  long   totalUnloadedClassCount The total number of classes unloaded since the JVM has started execution.  long   totalLoadedClassCount The total number of classes that have been loaded since the JVM has started execution.  long    SCOPE Endpoint This calculates the metrics data from each request of the endpoint in the service.\n   Name Remarks Group Key Type     name The name of the endpoint, such as a full path of the HTTP URI.  string   serviceName The name of the service.  string   serviceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  enum   serviceInstanceName The name of the service instance ID.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE ServiceRelation This calculates the metrics data from each request between services.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceLayer The layer of the source service.  enum   destServiceName The name of the destination service.  string   destServiceInstanceName The name of the destination service instance.  string   destLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination services, such as service_relation_mtls_cpm = from(ServiceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPServiceRelation This calculates the metrics data from each request between services.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceLayer The layer of the source service.  enum   destServiceName The name of the destination service.  string   destServiceInstanceName The name of the destination service instance.  string   destLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination services, such as service_relation_mtls_cpm = from(ServiceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    SCOPE ServiceInstanceRelation This calculates the metrics data from each request between service instances.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceServiceLayer The layer of the source service.  enum   destServiceName The name of the destination service.     destServiceInstanceName The name of the destination service instance.  string   destServiceLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of the component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination service instances, such as service_instance_relation_mtls_cpm = from(ServiceInstanceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPServiceInstanceRelation This calculates the metrics data from each request between service instances.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceServiceLayer The layer of the source service.  enum   destServiceName The name of the destination service.     destServiceInstanceName The name of the destination service instance.  string   destServiceLayer The layer of the destination service.  enum   componentId The ID of the component used in this call. yes string   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination service instances, such as service_instance_relation_mtls_cpm = from(ServiceInstanceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    SCOPE EndpointRelation This calculates the metrics data of the dependency between endpoints. This relation is hard to detect, and it depends on the tracing library to propagate the previous endpoint. Therefore, the EndpointRelation scope aggregation comes into effect only in services under tracing by SkyWalking native agents, including auto instrument agents (like Java and .NET), OpenCensus SkyWalking exporter implementation, or other tracing context propagation in SkyWalking specification.\n   Name Remarks Group Key Type     endpoint The parent endpoint in the dependency.  string   serviceName The name of the service.  string   serviceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  enum   childEndpoint The endpoint used by the parent endpoint in row(1).  string   childServiceName The endpoint used by the parent service in row(1).  string   childServiceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  string   childServiceInstanceName The endpoint used by the parent service instance in row(1).  string   rpcLatency The latency of the RPC between the parent endpoint and childEndpoint, excluding the latency caused by the parent endpoint itself.     componentId The ID of the component used in this call. yes string   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Indicates where the relation is detected. The value may be client, server, or proxy. yes enum    SCOPE BrowserAppTraffic This calculates the metrics data from each request of the browser application (browser only).\n   Name Remarks Group Key Type     name The browser application name of each request.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppSingleVersionTraffic This calculates the metrics data from each request of a single version in the browser application (browser only).\n   Name Remarks Group Key Type     name The single version name of each request.  string   serviceName The name of the browser application.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppPageTraffic This calculates the metrics data from each request of the page in the browser application (browser only).\n   Name Remarks Group Key Type     name The page name of each request.  string   serviceName The name of the browser application.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppPagePerf This calculates the metrics data from each request of the page in the browser application (browser only).\n   Name Remarks Group Key Type     name The page name of each request.  string   serviceName The name of the browser application.  string   redirectTime The time taken to redirect.  int(in ms)   dnsTime The DNS query time.  int(in ms)   ttfbTime Time to first byte.  int(in ms)   tcpTime TCP connection time.  int(in ms)   transTime Content transfer time.  int(in ms)   domAnalysisTime Dom parsing time.  int(in ms)   fptTime First paint time or blank screen time.  int(in ms)   domReadyTime Dom ready time.  int(in ms)   loadPageTime Page full load time.  int(in ms)   resTime Synchronous load resources in the page.  int(in ms)   sslTime Only valid for HTTPS.  int(in ms)   ttlTime Time to interact.  int(in ms)   firstPackTime First pack time.  int(in ms)   fmpTime First Meaningful Paint.  int(in ms)    SCOPE Event This calculates the metrics data from events.\n   Name Remarks Group Key Type     name The name of the event.  string   service The service name to which the event belongs to.  string   serviceInstance The service instance to which the event belongs to, if any.  string   endpoint The service endpoint to which the event belongs to, if any.  string   type The type of the event, Normal or Error.  string   message The message of the event.  string   parameters The parameters in the message, see parameters.  string    SCOPE DatabaseAccess This calculates the metrics data from each request of database.\n   Name Remarks Group Key Type     name The service name of virtual database service.  string   databaseTypeId The ID of the component used in this call.  int   latency The time taken by each request.  int(in ms)   status Indicates the success or failure of the request.  boolean    SCOPE DatabaseSlowStatement This calculates the metrics data from slow request of database.\n   Name Remarks Group Key Type     databaseServiceId The service id of virtual cache service.  string   statement The sql statement .  string   latency The time taken by each request.  int(in ms)   traceId The traceId of this slow statement  string    SCOPE CacheAccess This calculates the metrics data from each request of cache system.\n   Name Remarks Group Key Type     name The service name of virtual cache service.  string   cacheTypeId The ID of the component used in this call.  int   latency The time taken by each request.  int(in ms)   status Indicates the success or failure of the request.  boolean   operation Indicates this access is used for write or read  string    SCOPE CacheSlowAccess This calculates the metrics data from slow request of cache system , which is used for write or read operation.\n   Name Remarks Group Key Type     cacheServiceId The service id of virtual cache service.  string   command The cache command .  string   key The cache command key.  string   latency The time taken by each request.  int(in ms)   traceId The traceId of this slow access  string   status Indicates the success or failure of the request.  boolean   operation Indicates this access is used for write or read  string    SCOPE MQAccess This calculates the service dimensional metrics data from each request of MQ system on consume/produce side\n   Name Remarks Group Key Type     name The service name , usually it\u0026rsquo;s MQ address(es)  string   transmissionLatency The latency from produce side to consume side .  int(in ms)   status Indicates the success or failure of the request.  boolean   operation Indicates this access is on Produce or Consume side  enum    SCOPE MQEndpointAccess This calculates the endpoint dimensional metrics data from each request of MQ system on consume/produce side\n   Name Remarks Group Key Type     serviceName The service name that this endpoint belongs to.  string   endpoint The endpoint name , usually it\u0026rsquo;s combined by queue,topic  string   transmissionLatency The latency from produce side to consume side .  int(in ms)   status Indicates the success or failure of the request.  boolean   operation Indicates this access is on Produce or Consume side  enum    ","excerpt":"Scopes and Fields Using the Aggregation Function, the requests will be grouped by time and Group …","ref":"/docs/main/v9.3.0/en/concepts-and-designs/scope-definitions/","title":"Scopes and Fields"},{"body":"Scopes and Fields Using the Aggregation Function, the requests will be grouped by time and Group Key(s) in each scope.\nSCOPE Service This calculates the metrics data from each request of the service.\n   Name Remarks Group Key Type     name The name of the service.  string   layer Layer represents an abstract framework in the computer science, such as operation system(OS_LINUX layer), Kubernetes(k8s layer)  enum   serviceInstanceName The name of the service instance ID.  string   endpointName The name of the endpoint, such as a full path of HTTP URI.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request. Such as: Database, HTTP, RPC, gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPService This calculates the metrics data from each request of the TCP service.\n   Name Remarks Group Key Type     name The name of the service.  string   layer Layer represents an abstract framework in the computer science, such as operation system(OS_LINUX layer), Kubernetes(k8s layer)  enum   serviceInstanceName The name of the service instance ID.  string   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    SCOPE ServiceInstance This calculates the metrics data from each request of the service instance.\n   Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   endpointName The name of the endpoint, such as a full path of the HTTP URI.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPServiceInstance This calculates the metrics data from each request of the service instance.\n   Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    Secondary scopes of ServiceInstance This calculates the metrics data if the service instance is a JVM and collects through javaagent.\n SCOPE ServiceInstanceJVMCPU     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   usePercent The percentage of CPU time spent.  double    SCOPE ServiceInstanceJVMMemory     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   heapStatus Indicates whether the metric has a heap property or not.  bool   init See the JVM documentation.  long   max See the JVM documentation.  long   used See the JVM documentation.  long   committed See the JVM documentation.  long    SCOPE ServiceInstanceJVMMemoryPool     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   poolType The type may be CODE_CACHE_USAGE, NEWGEN_USAGE, OLDGEN_USAGE, SURVIVOR_USAGE, PERMGEN_USAGE, or METASPACE_USAGE based on different versions of JVM.  enum   init See the JVM documentation.  long   max See the JVM documentation.  long   used See the JVM documentation.  long   committed See the JVM documentation.  long    SCOPE ServiceInstanceJVMGC     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   phase Includes both NEW and OLD.  Enum   time The time spent in GC.  long   count The count in GC operations.  long    SCOPE ServiceInstanceJVMThread     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   liveCount The current number of live threads.  long   daemonCount The current number of daemon threads.  long   peakCount The current number of peak threads.  long   runnableStateThreadCount The current number of threads in runnable state.  long   blockedStateThreadCount The current number of threads in blocked state.  long   waitingStateThreadCount The current number of threads in waiting state.  long   timedWaitingStateThreadCount The current number of threads in time-waiting state.  long    SCOPE ServiceInstanceJVMClass     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   loadedClassCount The number of classes that are currently loaded in the JVM.  long   totalUnloadedClassCount The total number of classes unloaded since the JVM has started execution.  long   totalLoadedClassCount The total number of classes that have been loaded since the JVM has started execution.  long    SCOPE Endpoint This calculates the metrics data from each request of the endpoint in the service.\n   Name Remarks Group Key Type     name The name of the endpoint, such as a full path of the HTTP URI.  string   serviceName The name of the service.  string   serviceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  enum   serviceInstanceName The name of the service instance ID.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE ServiceRelation This calculates the metrics data from each request between services.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceLayer The layer of the source service.  enum   destServiceName The name of the destination service.  string   destServiceInstanceName The name of the destination service instance.  string   destLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination services, such as service_relation_mtls_cpm = from(ServiceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPServiceRelation This calculates the metrics data from each request between services.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceLayer The layer of the source service.  enum   destServiceName The name of the destination service.  string   destServiceInstanceName The name of the destination service instance.  string   destLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination services, such as service_relation_mtls_cpm = from(ServiceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    SCOPE ServiceInstanceRelation This calculates the metrics data from each request between service instances.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceServiceLayer The layer of the source service.  enum   destServiceName The name of the destination service.     destServiceInstanceName The name of the destination service instance.  string   destServiceLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of the component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination service instances, such as service_instance_relation_mtls_cpm = from(ServiceInstanceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPServiceInstanceRelation This calculates the metrics data from each request between service instances.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceServiceLayer The layer of the source service.  enum   destServiceName The name of the destination service.     destServiceInstanceName The name of the destination service instance.  string   destServiceLayer The layer of the destination service.  enum   componentId The ID of the component used in this call. yes string   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination service instances, such as service_instance_relation_mtls_cpm = from(ServiceInstanceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    SCOPE EndpointRelation This calculates the metrics data of the dependency between endpoints. This relation is hard to detect, and it depends on the tracing library to propagate the previous endpoint. Therefore, the EndpointRelation scope aggregation comes into effect only in services under tracing by SkyWalking native agents, including auto instrument agents (like Java and .NET), OpenCensus SkyWalking exporter implementation, or other tracing context propagation in SkyWalking specification.\n   Name Remarks Group Key Type     endpoint The parent endpoint in the dependency.  string   serviceName The name of the service.  string   serviceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  enum   childEndpoint The endpoint used by the parent endpoint in row(1).  string   childServiceName The endpoint used by the parent service in row(1).  string   childServiceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  string   childServiceInstanceName The endpoint used by the parent service instance in row(1).  string   rpcLatency The latency of the RPC between the parent endpoint and childEndpoint, excluding the latency caused by the parent endpoint itself.     componentId The ID of the component used in this call. yes string   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Indicates where the relation is detected. The value may be client, server, or proxy. yes enum    SCOPE BrowserAppTraffic This calculates the metrics data from each request of the browser application (browser only).\n   Name Remarks Group Key Type     name The browser application name of each request.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppSingleVersionTraffic This calculates the metrics data from each request of a single version in the browser application (browser only).\n   Name Remarks Group Key Type     name The single version name of each request.  string   serviceName The name of the browser application.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppPageTraffic This calculates the metrics data from each request of the page in the browser application (browser only).\n   Name Remarks Group Key Type     name The page name of each request.  string   serviceName The name of the browser application.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppPagePerf This calculates the metrics data from each request of the page in the browser application (browser only).\n   Name Remarks Group Key Type     name The page name of each request.  string   serviceName The name of the browser application.  string   redirectTime The time taken to redirect.  int(in ms)   dnsTime The DNS query time.  int(in ms)   ttfbTime Time to first byte.  int(in ms)   tcpTime TCP connection time.  int(in ms)   transTime Content transfer time.  int(in ms)   domAnalysisTime Dom parsing time.  int(in ms)   fptTime First paint time or blank screen time.  int(in ms)   domReadyTime Dom ready time.  int(in ms)   loadPageTime Page full load time.  int(in ms)   resTime Synchronous load resources in the page.  int(in ms)   sslTime Only valid for HTTPS.  int(in ms)   ttlTime Time to interact.  int(in ms)   firstPackTime First pack time.  int(in ms)   fmpTime First Meaningful Paint.  int(in ms)    SCOPE Event This calculates the metrics data from events.\n   Name Remarks Group Key Type     name The name of the event.  string   service The service name to which the event belongs to.  string   serviceInstance The service instance to which the event belongs to, if any.  string   endpoint The service endpoint to which the event belongs to, if any.  string   type The type of the event, Normal or Error.  string   message The message of the event.  string   parameters The parameters in the message, see parameters.  string    SCOPE DatabaseAccess This calculates the metrics data from each request of database.\n   Name Remarks Group Key Type     name The service name of virtual database service.  string   databaseTypeId The ID of the component used in this call.  int   latency The time taken by each request.  int(in ms)   status Indicates the success or failure of the request.  boolean    SCOPE DatabaseSlowStatement This calculates the metrics data from slow request of database.\n   Name Remarks Group Key Type     databaseServiceId The service id of virtual cache service.  string   statement The sql statement .  string   latency The time taken by each request.  int(in ms)   traceId The traceId of this slow statement  string    SCOPE CacheAccess This calculates the metrics data from each request of cache system.\n   Name Remarks Group Key Type     name The service name of virtual cache service.  string   cacheTypeId The ID of the component used in this call.  int   latency The time taken by each request.  int(in ms)   status Indicates the success or failure of the request.  boolean   operation Indicates this access is used for write or read  string    SCOPE CacheSlowAccess This calculates the metrics data from slow request of cache system , which is used for write or read operation.\n   Name Remarks Group Key Type     cacheServiceId The service id of virtual cache service.  string   command The cache command .  string   key The cache command key.  string   latency The time taken by each request.  int(in ms)   traceId The traceId of this slow access  string   status Indicates the success or failure of the request.  boolean   operation Indicates this access is used for write or read  string    SCOPE MQAccess This calculates the service dimensional metrics data from each request of MQ system on consume/produce side\n   Name Remarks Group Key Type     name The service name , usually it\u0026rsquo;s MQ address(es)  string   transmissionLatency The latency from produce side to consume side .  int(in ms)   status Indicates the success or failure of the request.  boolean   operation Indicates this access is on Produce or Consume side  enum    SCOPE MQEndpointAccess This calculates the endpoint dimensional metrics data from each request of MQ system on consume/produce side\n   Name Remarks Group Key Type     serviceName The service name that this endpoint belongs to.  string   endpoint The endpoint name , usually it\u0026rsquo;s combined by queue,topic  string   transmissionLatency The latency from produce side to consume side .  int(in ms)   status Indicates the success or failure of the request.  boolean   operation Indicates this access is on Produce or Consume side  enum    ","excerpt":"Scopes and Fields Using the Aggregation Function, the requests will be grouped by time and Group …","ref":"/docs/main/v9.4.0/en/concepts-and-designs/scope-definitions/","title":"Scopes and Fields"},{"body":"Scopes and Fields Using the Aggregation Function, the requests will be grouped by time and Group Key(s) in each scope.\nSCOPE Service This calculates the metrics data from each request of the service.\n   Name Remarks Group Key Type     name The name of the service.  string   layer Layer represents an abstract framework in the computer science, such as operation system(OS_LINUX layer), Kubernetes(k8s layer)  enum   serviceInstanceName The name of the service instance ID.  string   endpointName The name of the endpoint, such as a full path of HTTP URI.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request. Such as: Database, HTTP, RPC, gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPService This calculates the metrics data from each request of the TCP service.\n   Name Remarks Group Key Type     name The name of the service.  string   layer Layer represents an abstract framework in the computer science, such as operation system(OS_LINUX layer), Kubernetes(k8s layer)  enum   serviceInstanceName The name of the service instance ID.  string   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    SCOPE ServiceInstance This calculates the metrics data from each request of the service instance.\n   Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   endpointName The name of the endpoint, such as a full path of the HTTP URI.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPServiceInstance This calculates the metrics data from each request of the service instance.\n   Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    Secondary scopes of ServiceInstance This calculates the metrics data if the service instance is a JVM and collects through javaagent.\n SCOPE ServiceInstanceJVMCPU     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   usePercent The percentage of CPU time spent.  double    SCOPE ServiceInstanceJVMMemory     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   heapStatus Indicates whether the metric has a heap property or not.  bool   init See the JVM documentation.  long   max See the JVM documentation.  long   used See the JVM documentation.  long   committed See the JVM documentation.  long    SCOPE ServiceInstanceJVMMemoryPool     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   poolType The type may be CODE_CACHE_USAGE, NEWGEN_USAGE, OLDGEN_USAGE, SURVIVOR_USAGE, PERMGEN_USAGE, or METASPACE_USAGE based on different versions of JVM.  enum   init See the JVM documentation.  long   max See the JVM documentation.  long   used See the JVM documentation.  long   committed See the JVM documentation.  long    SCOPE ServiceInstanceJVMGC     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   phase Includes both NEW and OLD.  Enum   time The time spent in GC.  long   count The count in GC operations.  long    SCOPE ServiceInstanceJVMThread     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   liveCount The current number of live threads.  long   daemonCount The current number of daemon threads.  long   peakCount The current number of peak threads.  long   runnableStateThreadCount The current number of threads in runnable state.  long   blockedStateThreadCount The current number of threads in blocked state.  long   waitingStateThreadCount The current number of threads in waiting state.  long   timedWaitingStateThreadCount The current number of threads in time-waiting state.  long    SCOPE ServiceInstanceJVMClass     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   loadedClassCount The number of classes that are currently loaded in the JVM.  long   totalUnloadedClassCount The total number of classes unloaded since the JVM has started execution.  long   totalLoadedClassCount The total number of classes that have been loaded since the JVM has started execution.  long    SCOPE Endpoint This calculates the metrics data from each request of the endpoint in the service.\n   Name Remarks Group Key Type     name The name of the endpoint, such as a full path of the HTTP URI.  string   serviceName The name of the service.  string   serviceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  enum   serviceInstanceName The name of the service instance ID.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE ServiceRelation This calculates the metrics data from each request between services.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceLayer The layer of the source service.  enum   destServiceName The name of the destination service.  string   destServiceInstanceName The name of the destination service instance.  string   destLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination services, such as service_relation_mtls_cpm = from(ServiceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPServiceRelation This calculates the metrics data from each request between services.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceLayer The layer of the source service.  enum   destServiceName The name of the destination service.  string   destServiceInstanceName The name of the destination service instance.  string   destLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination services, such as service_relation_mtls_cpm = from(ServiceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    SCOPE ServiceInstanceRelation This calculates the metrics data from each request between service instances.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceServiceLayer The layer of the source service.  enum   destServiceName The name of the destination service.     destServiceInstanceName The name of the destination service instance.  string   destServiceLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of the component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination service instances, such as service_instance_relation_mtls_cpm = from(ServiceInstanceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPServiceInstanceRelation This calculates the metrics data from each request between service instances.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceServiceLayer The layer of the source service.  enum   destServiceName The name of the destination service.     destServiceInstanceName The name of the destination service instance.  string   destServiceLayer The layer of the destination service.  enum   componentId The ID of the component used in this call. yes string   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination service instances, such as service_instance_relation_mtls_cpm = from(ServiceInstanceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    SCOPE EndpointRelation This calculates the metrics data of the dependency between endpoints. This relation is hard to detect, and it depends on the tracing library to propagate the previous endpoint. Therefore, the EndpointRelation scope aggregation comes into effect only in services under tracing by SkyWalking native agents, including auto instrument agents (like Java and .NET), or other tracing context propagation in SkyWalking specification.\n   Name Remarks Group Key Type     endpoint The parent endpoint in the dependency.  string   serviceName The name of the service.  string   serviceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  enum   childEndpoint The endpoint used by the parent endpoint in row(1).  string   childServiceName The endpoint used by the parent service in row(1).  string   childServiceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  string   childServiceInstanceName The endpoint used by the parent service instance in row(1).  string   rpcLatency The latency of the RPC between the parent endpoint and childEndpoint, excluding the latency caused by the parent endpoint itself.     componentId The ID of the component used in this call. yes string   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Indicates where the relation is detected. The value may be client, server, or proxy. yes enum    SCOPE BrowserAppTraffic This calculates the metrics data from each request of the browser application (browser only).\n   Name Remarks Group Key Type     name The browser application name of each request.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppSingleVersionTraffic This calculates the metrics data from each request of a single version in the browser application (browser only).\n   Name Remarks Group Key Type     name The single version name of each request.  string   serviceName The name of the browser application.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppPageTraffic This calculates the metrics data from each request of the page in the browser application (browser only).\n   Name Remarks Group Key Type     name The page name of each request.  string   serviceName The name of the browser application.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppPagePerf This calculates the metrics data from each request of the page in the browser application (browser only).\n   Name Remarks Group Key Type     name The page name of each request.  string   serviceName The name of the browser application.  string   redirectTime The time taken to redirect.  int(in ms)   dnsTime The DNS query time.  int(in ms)   ttfbTime Time to first byte.  int(in ms)   tcpTime TCP connection time.  int(in ms)   transTime Content transfer time.  int(in ms)   domAnalysisTime Dom parsing time.  int(in ms)   fptTime First paint time or blank screen time.  int(in ms)   domReadyTime Dom ready time.  int(in ms)   loadPageTime Page full load time.  int(in ms)   resTime Synchronous load resources in the page.  int(in ms)   sslTime Only valid for HTTPS.  int(in ms)   ttlTime Time to interact.  int(in ms)   firstPackTime First pack time.  int(in ms)   fmpTime First Meaningful Paint.  int(in ms)    SCOPE Event This calculates the metrics data from events.\n   Name Remarks Group Key Type     name The name of the event.  string   service The service name to which the event belongs to.  string   serviceInstance The service instance to which the event belongs to, if any.  string   endpoint The service endpoint to which the event belongs to, if any.  string   type The type of the event, Normal or Error.  string   message The message of the event.  string   parameters The parameters in the message, see parameters.  string    SCOPE DatabaseAccess This calculates the metrics data from each request of database.\n   Name Remarks Group Key Type     name The service name of virtual database service.  string   databaseTypeId The ID of the component used in this call.  int   latency The time taken by each request.  int(in ms)   status Indicates the success or failure of the request.  boolean    SCOPE DatabaseSlowStatement This calculates the metrics data from slow request of database.\n   Name Remarks Group Key Type     databaseServiceId The service id of virtual cache service.  string   statement The sql statement .  string   latency The time taken by each request.  int(in ms)   traceId The traceId of this slow statement  string    SCOPE CacheAccess This calculates the metrics data from each request of cache system.\n   Name Remarks Group Key Type     name The service name of virtual cache service.  string   cacheTypeId The ID of the component used in this call.  int   latency The time taken by each request.  int(in ms)   status Indicates the success or failure of the request.  boolean   operation Indicates this access is used for write or read  string    SCOPE CacheSlowAccess This calculates the metrics data from slow request of cache system , which is used for write or read operation.\n   Name Remarks Group Key Type     cacheServiceId The service id of virtual cache service.  string   command The cache command .  string   key The cache command key.  string   latency The time taken by each request.  int(in ms)   traceId The traceId of this slow access  string   status Indicates the success or failure of the request.  boolean   operation Indicates this access is used for write or read  string    SCOPE MQAccess This calculates the service dimensional metrics data from each request of MQ system on consume/produce side\n   Name Remarks Group Key Type     name The service name , usually it\u0026rsquo;s MQ address(es)  string   transmissionLatency The latency from produce side to consume side .  int(in ms)   status Indicates the success or failure of the request.  boolean   operation Indicates this access is on Produce or Consume side  enum    SCOPE MQEndpointAccess This calculates the endpoint dimensional metrics data from each request of MQ system on consume/produce side\n   Name Remarks Group Key Type     serviceName The service name that this endpoint belongs to.  string   endpoint The endpoint name , usually it\u0026rsquo;s combined by queue,topic  string   transmissionLatency The latency from produce side to consume side .  int(in ms)   status Indicates the success or failure of the request.  boolean   operation Indicates this access is on Produce or Consume side  enum    ","excerpt":"Scopes and Fields Using the Aggregation Function, the requests will be grouped by time and Group …","ref":"/docs/main/v9.5.0/en/concepts-and-designs/scope-definitions/","title":"Scopes and Fields"},{"body":"Scopes and Fields Using the Aggregation Function, the requests will be grouped by time and Group Key(s) in each scope.\nSCOPE Service This calculates the metrics data from each request of the service.\n   Name Remarks Group Key Type     name The name of the service.  string   layer Layer represents an abstract framework in the computer science, such as operation system(OS_LINUX layer), Kubernetes(k8s layer)  enum   serviceInstanceName The name of the service instance ID.  string   endpointName The name of the endpoint, such as a full path of HTTP URI.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request. Such as: Database, HTTP, RPC, gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPService This calculates the metrics data from each request of the TCP service.\n   Name Remarks Group Key Type     name The name of the service.  string   layer Layer represents an abstract framework in the computer science, such as operation system(OS_LINUX layer), Kubernetes(k8s layer)  enum   serviceInstanceName The name of the service instance ID.  string   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    SCOPE ServiceInstance This calculates the metrics data from each request of the service instance.\n   Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   endpointName The name of the endpoint, such as a full path of the HTTP URI.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPServiceInstance This calculates the metrics data from each request of the service instance.\n   Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    Secondary scopes of ServiceInstance This calculates the metrics data if the service instance is a JVM and collects through javaagent.\n SCOPE ServiceInstanceJVMCPU     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   usePercent The percentage of CPU time spent.  double    SCOPE ServiceInstanceJVMMemory     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   heapStatus Indicates whether the metric has a heap property or not.  bool   init See the JVM documentation.  long   max See the JVM documentation.  long   used See the JVM documentation.  long   committed See the JVM documentation.  long    SCOPE ServiceInstanceJVMMemoryPool     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   poolType The type may be CODE_CACHE_USAGE, NEWGEN_USAGE, OLDGEN_USAGE, SURVIVOR_USAGE, PERMGEN_USAGE, or METASPACE_USAGE based on different versions of JVM.  enum   init See the JVM documentation.  long   max See the JVM documentation.  long   used See the JVM documentation.  long   committed See the JVM documentation.  long    SCOPE ServiceInstanceJVMGC     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   phase Includes both NEW and OLD.  Enum   time The time spent in GC.  long   count The count in GC operations.  long    SCOPE ServiceInstanceJVMThread     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   liveCount The current number of live threads.  long   daemonCount The current number of daemon threads.  long   peakCount The current number of peak threads.  long   runnableStateThreadCount The current number of threads in runnable state.  long   blockedStateThreadCount The current number of threads in blocked state.  long   waitingStateThreadCount The current number of threads in waiting state.  long   timedWaitingStateThreadCount The current number of threads in time-waiting state.  long    SCOPE ServiceInstanceJVMClass     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   loadedClassCount The number of classes that are currently loaded in the JVM.  long   totalUnloadedClassCount The total number of classes unloaded since the JVM has started execution.  long   totalLoadedClassCount The total number of classes that have been loaded since the JVM has started execution.  long    SCOPE Endpoint This calculates the metrics data from each request of the endpoint in the service.\n   Name Remarks Group Key Type     name The name of the endpoint, such as a full path of the HTTP URI.  string   serviceName The name of the service.  string   serviceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  enum   serviceInstanceName The name of the service instance ID.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE ServiceRelation This calculates the metrics data from each request between services.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceLayer The layer of the source service.  enum   destServiceName The name of the destination service.  string   destServiceInstanceName The name of the destination service instance.  string   destLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination services, such as service_relation_mtls_cpm = from(ServiceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPServiceRelation This calculates the metrics data from each request between services.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceLayer The layer of the source service.  enum   destServiceName The name of the destination service.  string   destServiceInstanceName The name of the destination service instance.  string   destLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination services, such as service_relation_mtls_cpm = from(ServiceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    SCOPE ServiceInstanceRelation This calculates the metrics data from each request between service instances.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceServiceLayer The layer of the source service.  enum   destServiceName The name of the destination service.     destServiceInstanceName The name of the destination service instance.  string   destServiceLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of the component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination service instances, such as service_instance_relation_mtls_cpm = from(ServiceInstanceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPServiceInstanceRelation This calculates the metrics data from each request between service instances.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceServiceLayer The layer of the source service.  enum   destServiceName The name of the destination service.     destServiceInstanceName The name of the destination service instance.  string   destServiceLayer The layer of the destination service.  enum   componentId The ID of the component used in this call. yes string   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination service instances, such as service_instance_relation_mtls_cpm = from(ServiceInstanceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    SCOPE EndpointRelation This calculates the metrics data of the dependency between endpoints. This relation is hard to detect, and it depends on the tracing library to propagate the previous endpoint. Therefore, the EndpointRelation scope aggregation comes into effect only in services under tracing by SkyWalking native agents, including auto instrument agents (like Java and .NET), or other tracing context propagation in SkyWalking specification.\n   Name Remarks Group Key Type     endpoint The parent endpoint in the dependency.  string   serviceName The name of the service.  string   serviceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  enum   childEndpoint The endpoint used by the parent endpoint in row(1).  string   childServiceName The endpoint used by the parent service in row(1).  string   childServiceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  string   childServiceInstanceName The endpoint used by the parent service instance in row(1).  string   rpcLatency The latency of the RPC between the parent endpoint and childEndpoint, excluding the latency caused by the parent endpoint itself.     componentId The ID of the component used in this call. yes string   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Indicates where the relation is detected. The value may be client, server, or proxy. yes enum    SCOPE BrowserAppTraffic This calculates the metrics data from each request of the browser application (browser only).\n   Name Remarks Group Key Type     name The browser application name of each request.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppSingleVersionTraffic This calculates the metrics data from each request of a single version in the browser application (browser only).\n   Name Remarks Group Key Type     name The single version name of each request.  string   serviceName The name of the browser application.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppPageTraffic This calculates the metrics data from each request of the page in the browser application (browser only).\n   Name Remarks Group Key Type     name The page name of each request.  string   serviceName The name of the browser application.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppPagePerf This calculates the metrics data from each request of the page in the browser application (browser only).\n   Name Remarks Group Key Type     name The page name of each request.  string   serviceName The name of the browser application.  string   redirectTime The time taken to redirect.  int(in ms)   dnsTime The DNS query time.  int(in ms)   ttfbTime Time to first byte.  int(in ms)   tcpTime TCP connection time.  int(in ms)   transTime Content transfer time.  int(in ms)   domAnalysisTime Dom parsing time.  int(in ms)   fptTime First paint time or blank screen time.  int(in ms)   domReadyTime Dom ready time.  int(in ms)   loadPageTime Page full load time.  int(in ms)   resTime Synchronous load resources in the page.  int(in ms)   sslTime Only valid for HTTPS.  int(in ms)   ttlTime Time to interact.  int(in ms)   firstPackTime First pack time.  int(in ms)   fmpTime First Meaningful Paint.  int(in ms)    SCOPE Event This calculates the metrics data from events.\n   Name Remarks Group Key Type     name The name of the event.  string   service The service name to which the event belongs to.  string   serviceInstance The service instance to which the event belongs to, if any.  string   endpoint The service endpoint to which the event belongs to, if any.  string   type The type of the event, Normal or Error.  string   message The message of the event.  string   parameters The parameters in the message, see parameters.  string    SCOPE DatabaseAccess This calculates the metrics data from each request of database.\n   Name Remarks Group Key Type     name The service name of virtual database service.  string   databaseTypeId The ID of the component used in this call.  int   latency The time taken by each request.  int(in ms)   status Indicates the success or failure of the request.  boolean    SCOPE DatabaseSlowStatement This calculates the metrics data from slow request of database.\n   Name Remarks Group Key Type     databaseServiceId The service id of virtual cache service.  string   statement The sql statement .  string   latency The time taken by each request.  int(in ms)   traceId The traceId of this slow statement  string    SCOPE CacheAccess This calculates the metrics data from each request of cache system.\n   Name Remarks Group Key Type     name The service name of virtual cache service.  string   cacheTypeId The ID of the component used in this call.  int   latency The time taken by each request.  int(in ms)   status Indicates the success or failure of the request.  boolean   operation Indicates this access is used for write or read  string    SCOPE CacheSlowAccess This calculates the metrics data from slow request of cache system , which is used for write or read operation.\n   Name Remarks Group Key Type     cacheServiceId The service id of virtual cache service.  string   command The cache command .  string   key The cache command key.  string   latency The time taken by each request.  int(in ms)   traceId The traceId of this slow access  string   status Indicates the success or failure of the request.  boolean   operation Indicates this access is used for write or read  string    SCOPE MQAccess This calculates the service dimensional metrics data from each request of MQ system on consume/produce side\n   Name Remarks Group Key Type     name The service name , usually it\u0026rsquo;s MQ address(es)  string   transmissionLatency The latency from produce side to consume side .  int(in ms)   status Indicates the success or failure of the request.  boolean   operation Indicates this access is on Produce or Consume side  enum    SCOPE MQEndpointAccess This calculates the endpoint dimensional metrics data from each request of MQ system on consume/produce side\n   Name Remarks Group Key Type     serviceName The service name that this endpoint belongs to.  string   endpoint The endpoint name , usually it\u0026rsquo;s combined by queue,topic  string   transmissionLatency The latency from produce side to consume side .  int(in ms)   status Indicates the success or failure of the request.  boolean   operation Indicates this access is on Produce or Consume side  enum    ","excerpt":"Scopes and Fields Using the Aggregation Function, the requests will be grouped by time and Group …","ref":"/docs/main/v9.6.0/en/concepts-and-designs/scope-definitions/","title":"Scopes and Fields"},{"body":"Scopes and Fields Using the Aggregation Function, the requests will be grouped by time and Group Key(s) in each scope.\nSCOPE Service This calculates the metrics data from each request of the service.\n   Name Remarks Group Key Type     name The name of the service.  string   layer Layer represents an abstract framework in the computer science, such as operation system(OS_LINUX layer), Kubernetes(k8s layer)  enum   serviceInstanceName The name of the service instance ID.  string   endpointName The name of the endpoint, such as a full path of HTTP URI.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request. Such as: Database, HTTP, RPC, gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPService This calculates the metrics data from each request of the TCP service.\n   Name Remarks Group Key Type     name The name of the service.  string   layer Layer represents an abstract framework in the computer science, such as operation system(OS_LINUX layer), Kubernetes(k8s layer)  enum   serviceInstanceName The name of the service instance ID.  string   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    SCOPE ServiceInstance This calculates the metrics data from each request of the service instance.\n   Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   endpointName The name of the endpoint, such as a full path of the HTTP URI.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPServiceInstance This calculates the metrics data from each request of the service instance.\n   Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    Secondary scopes of ServiceInstance This calculates the metrics data if the service instance is a JVM and collects through javaagent.\n SCOPE ServiceInstanceJVMCPU     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   usePercent The percentage of CPU time spent.  double    SCOPE ServiceInstanceJVMMemory     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   heapStatus Indicates whether the metric has a heap property or not.  bool   init See the JVM documentation.  long   max See the JVM documentation.  long   used See the JVM documentation.  long   committed See the JVM documentation.  long    SCOPE ServiceInstanceJVMMemoryPool     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   poolType The type may be CODE_CACHE_USAGE, NEWGEN_USAGE, OLDGEN_USAGE, SURVIVOR_USAGE, PERMGEN_USAGE, or METASPACE_USAGE based on different versions of JVM.  enum   init See the JVM documentation.  long   max See the JVM documentation.  long   used See the JVM documentation.  long   committed See the JVM documentation.  long    SCOPE ServiceInstanceJVMGC     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   phase Includes both NEW and OLD.  Enum   time The time spent in GC.  long   count The count in GC operations.  long    SCOPE ServiceInstanceJVMThread     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   liveCount The current number of live threads.  long   daemonCount The current number of daemon threads.  long   peakCount The current number of peak threads.  long   runnableStateThreadCount The current number of threads in runnable state.  long   blockedStateThreadCount The current number of threads in blocked state.  long   waitingStateThreadCount The current number of threads in waiting state.  long   timedWaitingStateThreadCount The current number of threads in time-waiting state.  long    SCOPE ServiceInstanceJVMClass     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   loadedClassCount The number of classes that are currently loaded in the JVM.  long   totalUnloadedClassCount The total number of classes unloaded since the JVM has started execution.  long   totalLoadedClassCount The total number of classes that have been loaded since the JVM has started execution.  long    SCOPE Endpoint This calculates the metrics data from each request of the endpoint in the service.\n   Name Remarks Group Key Type     name The name of the endpoint, such as a full path of the HTTP URI.  string   serviceName The name of the service.  string   serviceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  enum   serviceInstanceName The name of the service instance ID.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE ServiceRelation This calculates the metrics data from each request between services.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceLayer The layer of the source service.  enum   destServiceName The name of the destination service.  string   destServiceInstanceName The name of the destination service instance.  string   destLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination services, such as service_relation_mtls_cpm = from(ServiceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPServiceRelation This calculates the metrics data from each request between services.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceLayer The layer of the source service.  enum   destServiceName The name of the destination service.  string   destServiceInstanceName The name of the destination service instance.  string   destLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination services, such as service_relation_mtls_cpm = from(ServiceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    SCOPE ServiceInstanceRelation This calculates the metrics data from each request between service instances.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceServiceLayer The layer of the source service.  enum   destServiceName The name of the destination service.     destServiceInstanceName The name of the destination service instance.  string   destServiceLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of the component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination service instances, such as service_instance_relation_mtls_cpm = from(ServiceInstanceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPServiceInstanceRelation This calculates the metrics data from each request between service instances.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceServiceLayer The layer of the source service.  enum   destServiceName The name of the destination service.     destServiceInstanceName The name of the destination service instance.  string   destServiceLayer The layer of the destination service.  enum   componentId The ID of the component used in this call. yes string   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination service instances, such as service_instance_relation_mtls_cpm = from(ServiceInstanceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    SCOPE EndpointRelation This calculates the metrics data of the dependency between endpoints. This relation is hard to detect, and it depends on the tracing library to propagate the previous endpoint. Therefore, the EndpointRelation scope aggregation comes into effect only in services under tracing by SkyWalking native agents, including auto instrument agents (like Java and .NET), or other tracing context propagation in SkyWalking specification.\n   Name Remarks Group Key Type     endpoint The parent endpoint in the dependency.  string   serviceName The name of the service.  string   serviceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  enum   childEndpoint The endpoint used by the parent endpoint in row(1).  string   childServiceName The endpoint used by the parent service in row(1).  string   childServiceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  string   childServiceInstanceName The endpoint used by the parent service instance in row(1).  string   rpcLatency The latency of the RPC between the parent endpoint and childEndpoint, excluding the latency caused by the parent endpoint itself.     componentId The ID of the component used in this call. yes string   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Indicates where the relation is detected. The value may be client, server, or proxy. yes enum    SCOPE BrowserAppTraffic This calculates the metrics data from each request of the browser application (browser only).\n   Name Remarks Group Key Type     name The browser application name of each request.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppSingleVersionTraffic This calculates the metrics data from each request of a single version in the browser application (browser only).\n   Name Remarks Group Key Type     name The single version name of each request.  string   serviceName The name of the browser application.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppPageTraffic This calculates the metrics data from each request of the page in the browser application (browser only).\n   Name Remarks Group Key Type     name The page name of each request.  string   serviceName The name of the browser application.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppPagePerf This calculates the metrics data from each request of the page in the browser application (browser only).\n   Name Remarks Group Key Type     name The page name of each request.  string   serviceName The name of the browser application.  string   redirectTime The time taken to redirect.  int(in ms)   dnsTime The DNS query time.  int(in ms)   ttfbTime Time to first byte.  int(in ms)   tcpTime TCP connection time.  int(in ms)   transTime Content transfer time.  int(in ms)   domAnalysisTime Dom parsing time.  int(in ms)   fptTime First paint time or blank screen time.  int(in ms)   domReadyTime Dom ready time.  int(in ms)   loadPageTime Page full load time.  int(in ms)   resTime Synchronous load resources in the page.  int(in ms)   sslTime Only valid for HTTPS.  int(in ms)   ttlTime Time to interact.  int(in ms)   firstPackTime First pack time.  int(in ms)   fmpTime First Meaningful Paint.  int(in ms)    SCOPE Event This calculates the metrics data from events.\n   Name Remarks Group Key Type     name The name of the event.  string   service The service name to which the event belongs to.  string   serviceInstance The service instance to which the event belongs to, if any.  string   endpoint The service endpoint to which the event belongs to, if any.  string   type The type of the event, Normal or Error.  string   message The message of the event.  string   parameters The parameters in the message, see parameters.  string    SCOPE DatabaseAccess This calculates the metrics data from each request of database.\n   Name Remarks Group Key Type     name The service name of virtual database service.  string   databaseTypeId The ID of the component used in this call.  int   latency The time taken by each request.  int(in ms)   status Indicates the success or failure of the request.  boolean    SCOPE DatabaseSlowStatement This calculates the metrics data from slow request of database.\n   Name Remarks Group Key Type     databaseServiceId The service id of virtual cache service.  string   statement The sql statement .  string   latency The time taken by each request.  int(in ms)   traceId The traceId of this slow statement  string    SCOPE CacheAccess This calculates the metrics data from each request of cache system.\n   Name Remarks Group Key Type     name The service name of virtual cache service.  string   cacheTypeId The ID of the component used in this call.  int   latency The time taken by each request.  int(in ms)   status Indicates the success or failure of the request.  boolean   operation Indicates this access is used for write or read  string    SCOPE CacheSlowAccess This calculates the metrics data from slow request of cache system , which is used for write or read operation.\n   Name Remarks Group Key Type     cacheServiceId The service id of virtual cache service.  string   command The cache command .  string   key The cache command key.  string   latency The time taken by each request.  int(in ms)   traceId The traceId of this slow access  string   status Indicates the success or failure of the request.  boolean   operation Indicates this access is used for write or read  string    SCOPE MQAccess This calculates the service dimensional metrics data from each request of MQ system on consume/produce side\n   Name Remarks Group Key Type     name The service name , usually it\u0026rsquo;s MQ address(es)  string   transmissionLatency The latency from produce side to consume side .  int(in ms)   status Indicates the success or failure of the request.  boolean   operation Indicates this access is on Produce or Consume side  enum    SCOPE MQEndpointAccess This calculates the endpoint dimensional metrics data from each request of MQ system on consume/produce side\n   Name Remarks Group Key Type     serviceName The service name that this endpoint belongs to.  string   endpoint The endpoint name , usually it\u0026rsquo;s combined by queue,topic  string   transmissionLatency The latency from produce side to consume side .  int(in ms)   status Indicates the success or failure of the request.  boolean   operation Indicates this access is on Produce or Consume side  enum    ","excerpt":"Scopes and Fields Using the Aggregation Function, the requests will be grouped by time and Group …","ref":"/docs/main/v9.7.0/en/concepts-and-designs/scope-definitions/","title":"Scopes and Fields"},{"body":"Scratch The OAP Config Dump SkyWalking OAP behaviors could be controlled through hundreds of configurations. It is hard to know what is the final configuration as all the configurations could be overrided by system environments.\nThe core config file application.yml lists all the configurations and their default values. However, it is still hard to know the runtime value.\nScratch is a tool to dump the final configuration. It is provided within OAP rest server, which could be accessed through HTTP GET http://{core restHost}:{core restPort}/debugging/config/dump.\n\u0026gt; curl http://127.0.0.1:12800/debugging/config/dump cluster.provider=standalone core.provider=default core.default.prepareThreads=2 core.default.restHost=0.0.0.0 core.default.searchableLogsTags=level,http.status_code core.default.role=Mixed core.default.persistentPeriod=25 core.default.syncPeriodHttpUriRecognitionPattern=10 core.default.restIdleTimeOut=30000 core.default.dataKeeperExecutePeriod=5 core.default.topNReportPeriod=10 core.default.gRPCSslTrustedCAPath= core.default.downsampling=[Hour, Day] core.default.serviceNameMaxLength=70 core.default.gRPCSslEnabled=false core.default.restPort=12800 core.default.serviceCacheRefreshInterval=10 ... All booting configurations with their runtime values are listed, including the selected provider for each module.\nProtect The Secrets Some of the configurations contain sensitive values, such as username, password, token, etc. These values would be masked in the dump result. For example, the storage.elasticsearch.password in the following configurations,\nstorage:selector:${SW_STORAGE:h2}elasticsearch:password:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}It would be masked and shown as ******** in the dump result.\n\u0026gt; curl http://127.0.0.1:12800/debugging/config/dump ... storage.elasticsearch.password=******** ... By default, we mask the config keys through the following configurations.\n# Include the list of keywords to filter configurations including secrets. Separate keywords by a comma.keywords4MaskingSecretsOfConfig:${SW_DEBUGGING_QUERY_KEYWORDS_FOR_MASKING_SECRETS:user,password,token,accessKey,secretKey,authentication}Disable The Config Dump Service By default, this service is open for helping users to debug and diagnose. If you want to disable it, you need to diable the whole debugging-query module through setting selector=-.\ndebugging-query:selector:${SW_DEBUGGING_QUERY:-}","excerpt":"Scratch The OAP Config Dump SkyWalking OAP behaviors could be controlled through hundreds of …","ref":"/docs/main/latest/en/debugging/config_dump/","title":"Scratch The OAP Config Dump"},{"body":"Scratch The OAP Config Dump SkyWalking OAP behaviors could be controlled through hundreds of configurations. It is hard to know what is the final configuration as all the configurations could be overrided by system environments.\nThe core config file application.yml lists all the configurations and their default values. However, it is still hard to know the runtime value.\nScratch is a tool to dump the final configuration. It is provided within OAP rest server, which could be accessed through HTTP GET http://{core restHost}:{core restPort}/debugging/config/dump.\n\u0026gt; curl http://127.0.0.1:12800/debugging/config/dump cluster.provider=standalone core.provider=default core.default.prepareThreads=2 core.default.restHost=0.0.0.0 core.default.searchableLogsTags=level,http.status_code core.default.role=Mixed core.default.persistentPeriod=25 core.default.syncPeriodHttpUriRecognitionPattern=10 core.default.restIdleTimeOut=30000 core.default.dataKeeperExecutePeriod=5 core.default.topNReportPeriod=10 core.default.gRPCSslTrustedCAPath= core.default.downsampling=[Hour, Day] core.default.serviceNameMaxLength=70 core.default.gRPCSslEnabled=false core.default.restPort=12800 core.default.serviceCacheRefreshInterval=10 ... All booting configurations with their runtime values are listed, including the selected provider for each module.\nProtect The Secrets Some of the configurations contain sensitive values, such as username, password, token, etc. These values would be masked in the dump result. For example, the storage.elasticsearch.password in the following configurations,\nstorage:selector:${SW_STORAGE:h2}elasticsearch:password:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}It would be masked and shown as ******** in the dump result.\n\u0026gt; curl http://127.0.0.1:12800/debugging/config/dump ... storage.elasticsearch.password=******** ... By default, we mask the config keys through the following configurations.\n# Include the list of keywords to filter configurations including secrets. Separate keywords by a comma.keywords4MaskingSecretsOfConfig:${SW_DEBUGGING_QUERY_KEYWORDS_FOR_MASKING_SECRETS:user,password,token,accessKey,secretKey,authentication}Disable The Config Dump Service By default, this service is open for helping users to debug and diagnose. If you want to disable it, you need to diable the whole debugging-query module through setting selector=-.\ndebugging-query:selector:${SW_DEBUGGING_QUERY:-}","excerpt":"Scratch The OAP Config Dump SkyWalking OAP behaviors could be controlled through hundreds of …","ref":"/docs/main/next/en/debugging/config_dump/","title":"Scratch The OAP Config Dump"},{"body":"Scratch The OAP Config Dump SkyWalking OAP behaviors could be controlled through hundreds of configurations. It is hard to know what is the final configuration as all the configurations could be overrided by system environments.\nThe core config file application.yml lists all the configurations and their default values. However, it is still hard to know the runtime value.\nScratch is a tool to dump the final configuration. It is provided within OAP rest server, which could be accessed through HTTP GET http://{core restHost}:{core restPort}/debugging/config/dump.\n\u0026gt; curl http://127.0.0.1:12800/debugging/config/dump cluster.provider=standalone core.provider=default core.default.prepareThreads=2 core.default.restHost=0.0.0.0 core.default.searchableLogsTags=level,http.status_code core.default.role=Mixed core.default.persistentPeriod=25 core.default.syncPeriodHttpUriRecognitionPattern=10 core.default.restIdleTimeOut=30000 core.default.dataKeeperExecutePeriod=5 core.default.topNReportPeriod=10 core.default.gRPCSslTrustedCAPath= core.default.downsampling=[Hour, Day] core.default.serviceNameMaxLength=70 core.default.gRPCSslEnabled=false core.default.restPort=12800 core.default.serviceCacheRefreshInterval=10 ... All booting configurations with their runtime values are listed, including the selected provider for each module.\nProtect The Secrets Some of the configurations contain sensitive values, such as username, password, token, etc. These values would be masked in the dump result. For example, the storage.elasticsearch.password in the following configurations,\nstorage:selector:${SW_STORAGE:h2}elasticsearch:password:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}It would be masked and shown as ******** in the dump result.\n\u0026gt; curl http://127.0.0.1:12800/debugging/config/dump ... storage.elasticsearch.password=******** ... By default, we mask the config keys through the following configurations.\n# Include the list of keywords to filter configurations including secrets. Separate keywords by a comma.keywords4MaskingSecretsOfConfig:${SW_DEBUGGING_QUERY_KEYWORDS_FOR_MASKING_SECRETS:user,password,token,accessKey,secretKey,authentication}Disable The Config Dump Service By default, this service is open for helping users to debug and diagnose. If you want to disable it, you need to diable the whole debugging-query module through setting selector=-.\ndebugging-query:selector:${SW_DEBUGGING_QUERY:-}","excerpt":"Scratch The OAP Config Dump SkyWalking OAP behaviors could be controlled through hundreds of …","ref":"/docs/main/v9.7.0/en/debugging/config_dump/","title":"Scratch The OAP Config Dump"},{"body":"","excerpt":"","ref":"/search/","title":"Search Results"},{"body":"Security Notice The SkyWalking OAP server, UI, and agent deployments should run in a secure environment, such as only inside your data center. OAP server, UI, and agent deployments should only be reachable by the operation team on default deployment.\nAll telemetry data are trusted. The OAP server would not validate any field of the telemetry data to avoid extra load for the server.\nIt is up to the operator(OPS team) whether to expose the OAP server, UI, or some agent deployment to unsecured environment. The following security policies should be considered to add to secure your SkyWalking deployment.\n HTTPs and gRPC+TLS should be used between agents and OAP servers, as well as UI. Set up TOKEN or username/password based authentications for the OAP server and UI through your Gateway. Validate all fields of the traceable RPC(including HTTP 1/2, MQ) headers(header names are sw8, sw8-x and sw8-correlation) when requests are from out of the trusted zone. Or simply block/remove those headers unless you are using the client-js agent. All fields of telemetry data(HTTP in raw text or encoded Protobuf format) should be validated and reject malicious data.  Without these protections, an attacker could embed executable Javascript code in those fields, causing XSS or even Remote Code Execution (RCE) issues.\nFor some sensitive environment, consider to limit the telemetry report frequency in case of DoS/DDoS for exposed OAP and UI services.\nappendix The SkyWalking client-js agent is always running out of the secured environment. Please follow its security notice for more details.\n","excerpt":"Security Notice The SkyWalking OAP server, UI, and agent deployments should run in a secure …","ref":"/docs/main/latest/en/security/readme/","title":"Security Notice"},{"body":"Security Notice The SkyWalking OAP server, UI, and agent deployments should run in a secure environment, such as only inside your data center. OAP server, UI, and agent deployments should only be reachable by the operation team on default deployment.\nAll telemetry data are trusted. The OAP server would not validate any field of the telemetry data to avoid extra load for the server.\nIt is up to the operator(OPS team) whether to expose the OAP server, UI, or some agent deployment to unsecured environment. The following security policies should be considered to add to secure your SkyWalking deployment.\n HTTPs and gRPC+TLS should be used between agents and OAP servers, as well as UI. Set up TOKEN or username/password based authentications for the OAP server and UI through your Gateway. Validate all fields of the traceable RPC(including HTTP 1/2, MQ) headers(header names are sw8, sw8-x and sw8-correlation) when requests are from out of the trusted zone. Or simply block/remove those headers unless you are using the client-js agent. All fields of telemetry data(HTTP in raw text or encoded Protobuf format) should be validated and reject malicious data.  Without these protections, an attacker could embed executable Javascript code in those fields, causing XSS or even Remote Code Execution (RCE) issues.\nFor some sensitive environment, consider to limit the telemetry report frequency in case of DoS/DDoS for exposed OAP and UI services.\nappendix The SkyWalking client-js agent is always running out of the secured environment. Please follow its security notice for more details.\n","excerpt":"Security Notice The SkyWalking OAP server, UI, and agent deployments should run in a secure …","ref":"/docs/main/next/en/security/readme/","title":"Security Notice"},{"body":"Security Notice The SkyWalking OAP server, UI, and agent deployments should run in a secure environment, such as only inside your data center. OAP server, UI, and agent deployments should only be reachable by the operation team on default deployment.\nAll telemetry data are trusted. The OAP server would not validate any field of the telemetry data to avoid extra load for the server.\nIt is up to the operator(OPS team) whether to expose the OAP server, UI, or some agent deployment to unsecured environment. The following security policies should be considered to add to secure your SkyWalking deployment.\n HTTPs and gRPC+TLS should be used between agents and OAP servers, as well as UI. Set up TOKEN or username/password based authentications for the OAP server and UI through your Gateway. Validate all fields of the traceable RPC(including HTTP 1/2, MQ) headers(header names are sw8, sw8-x and sw8-correlation) when requests are from out of the trusted zone. Or simply block/remove those headers unless you are using the client-js agent. All fields of telemetry data(HTTP in raw text or encoded Protobuf format) should be validated and reject malicious data.  Without these protections, an attacker could embed executable Javascript code in those fields, causing XSS or even Remote Code Execution (RCE) issues.\nFor some sensitive environment, consider to limit the telemetry report frequency in case of DoS/DDoS for exposed OAP and UI services.\nappendix The SkyWalking client-js agent is always running out of the secured environment. Please follow its security notice for more details.\n","excerpt":"Security Notice The SkyWalking OAP server, UI, and agent deployments should run in a secure …","ref":"/docs/main/v9.3.0/en/security/readme/","title":"Security Notice"},{"body":"Security Notice The SkyWalking OAP server, UI, and agent deployments should run in a secure environment, such as only inside your data center. OAP server, UI, and agent deployments should only be reachable by the operation team on default deployment.\nAll telemetry data are trusted. The OAP server would not validate any field of the telemetry data to avoid extra load for the server.\nIt is up to the operator(OPS team) whether to expose the OAP server, UI, or some agent deployment to unsecured environment. The following security policies should be considered to add to secure your SkyWalking deployment.\n HTTPs and gRPC+TLS should be used between agents and OAP servers, as well as UI. Set up TOKEN or username/password based authentications for the OAP server and UI through your Gateway. Validate all fields of the traceable RPC(including HTTP 1/2, MQ) headers(header names are sw8, sw8-x and sw8-correlation) when requests are from out of the trusted zone. Or simply block/remove those headers unless you are using the client-js agent. All fields of telemetry data(HTTP in raw text or encoded Protobuf format) should be validated and reject malicious data.  Without these protections, an attacker could embed executable Javascript code in those fields, causing XSS or even Remote Code Execution (RCE) issues.\nFor some sensitive environment, consider to limit the telemetry report frequency in case of DoS/DDoS for exposed OAP and UI services.\nappendix The SkyWalking client-js agent is always running out of the secured environment. Please follow its security notice for more details.\n","excerpt":"Security Notice The SkyWalking OAP server, UI, and agent deployments should run in a secure …","ref":"/docs/main/v9.4.0/en/security/readme/","title":"Security Notice"},{"body":"Security Notice The SkyWalking OAP server, UI, and agent deployments should run in a secure environment, such as only inside your data center. OAP server, UI, and agent deployments should only be reachable by the operation team on default deployment.\nAll telemetry data are trusted. The OAP server would not validate any field of the telemetry data to avoid extra load for the server.\nIt is up to the operator(OPS team) whether to expose the OAP server, UI, or some agent deployment to unsecured environment. The following security policies should be considered to add to secure your SkyWalking deployment.\n HTTPs and gRPC+TLS should be used between agents and OAP servers, as well as UI. Set up TOKEN or username/password based authentications for the OAP server and UI through your Gateway. Validate all fields of the traceable RPC(including HTTP 1/2, MQ) headers(header names are sw8, sw8-x and sw8-correlation) when requests are from out of the trusted zone. Or simply block/remove those headers unless you are using the client-js agent. All fields of telemetry data(HTTP in raw text or encoded Protobuf format) should be validated and reject malicious data.  Without these protections, an attacker could embed executable Javascript code in those fields, causing XSS or even Remote Code Execution (RCE) issues.\nFor some sensitive environment, consider to limit the telemetry report frequency in case of DoS/DDoS for exposed OAP and UI services.\nappendix The SkyWalking client-js agent is always running out of the secured environment. Please follow its security notice for more details.\n","excerpt":"Security Notice The SkyWalking OAP server, UI, and agent deployments should run in a secure …","ref":"/docs/main/v9.5.0/en/security/readme/","title":"Security Notice"},{"body":"Security Notice The SkyWalking OAP server, UI, and agent deployments should run in a secure environment, such as only inside your data center. OAP server, UI, and agent deployments should only be reachable by the operation team on default deployment.\nAll telemetry data are trusted. The OAP server would not validate any field of the telemetry data to avoid extra load for the server.\nIt is up to the operator(OPS team) whether to expose the OAP server, UI, or some agent deployment to unsecured environment. The following security policies should be considered to add to secure your SkyWalking deployment.\n HTTPs and gRPC+TLS should be used between agents and OAP servers, as well as UI. Set up TOKEN or username/password based authentications for the OAP server and UI through your Gateway. Validate all fields of the traceable RPC(including HTTP 1/2, MQ) headers(header names are sw8, sw8-x and sw8-correlation) when requests are from out of the trusted zone. Or simply block/remove those headers unless you are using the client-js agent. All fields of telemetry data(HTTP in raw text or encoded Protobuf format) should be validated and reject malicious data.  Without these protections, an attacker could embed executable Javascript code in those fields, causing XSS or even Remote Code Execution (RCE) issues.\nFor some sensitive environment, consider to limit the telemetry report frequency in case of DoS/DDoS for exposed OAP and UI services.\nappendix The SkyWalking client-js agent is always running out of the secured environment. Please follow its security notice for more details.\n","excerpt":"Security Notice The SkyWalking OAP server, UI, and agent deployments should run in a secure …","ref":"/docs/main/v9.6.0/en/security/readme/","title":"Security Notice"},{"body":"Security Notice The SkyWalking OAP server, UI, and agent deployments should run in a secure environment, such as only inside your data center. OAP server, UI, and agent deployments should only be reachable by the operation team on default deployment.\nAll telemetry data are trusted. The OAP server would not validate any field of the telemetry data to avoid extra load for the server.\nIt is up to the operator(OPS team) whether to expose the OAP server, UI, or some agent deployment to unsecured environment. The following security policies should be considered to add to secure your SkyWalking deployment.\n HTTPs and gRPC+TLS should be used between agents and OAP servers, as well as UI. Set up TOKEN or username/password based authentications for the OAP server and UI through your Gateway. Validate all fields of the traceable RPC(including HTTP 1/2, MQ) headers(header names are sw8, sw8-x and sw8-correlation) when requests are from out of the trusted zone. Or simply block/remove those headers unless you are using the client-js agent. All fields of telemetry data(HTTP in raw text or encoded Protobuf format) should be validated and reject malicious data.  Without these protections, an attacker could embed executable Javascript code in those fields, causing XSS or even Remote Code Execution (RCE) issues.\nFor some sensitive environment, consider to limit the telemetry report frequency in case of DoS/DDoS for exposed OAP and UI services.\nappendix The SkyWalking client-js agent is always running out of the secured environment. Please follow its security notice for more details.\n","excerpt":"Security Notice The SkyWalking OAP server, UI, and agent deployments should run in a secure …","ref":"/docs/main/v9.7.0/en/security/readme/","title":"Security Notice"},{"body":"Send Envoy metrics to SkyWalking with / without Istio Envoy defines a gRPC service to emit metrics, and whatever is used to implement this protocol can be used to receive the metrics. SkyWalking has a built-in receiver that implements this protocol, so you can configure Envoy to emit its metrics to SkyWalking.\nAs an APM system, SkyWalking does not only receive and store the metrics emitted by Envoy, but it also analyzes the topology of services and service instances.\nAttention: There are two versions of Envoy metrics service protocol currently: v2 and v3. SkyWalking (8.3.0+) supports both of them.\nConfigure Envoy to send metrics to SkyWalking without Istio Envoy can be used with / without Istio. This section explains how you can configure the standalone Envoy to send metrics to SkyWalking.\nIn order to let Envoy send metrics to SkyWalking, we need to feed Envoy with a configuration that contains stats_sinks, which in turn includes envoy.metrics_service. This envoy.metrics_service should be configured as a config.grpc_service entry.\nThe noteworthy parts of the config are shown below:\nstats_sinks:- name:envoy.metrics_serviceconfig:grpc_service:# Note: we can use google_grpc implementation as well.envoy_grpc:cluster_name:service_skywalkingstatic_resources:...clusters:- name:service_skywalkingconnect_timeout:5stype:LOGICAL_DNShttp2_protocol_options:{}dns_lookup_family:V4_ONLYlb_policy:ROUND_ROBINload_assignment:cluster_name:service_skywalkingendpoints:- lb_endpoints:- endpoint:address:socket_address:address:skywalking# This is the port where SkyWalking serving the Envoy Metrics Service gRPC stream.port_value:11800The comprehensive static configuration can be found here.\nNote that Envoy can also be configured dynamically through xDS Protocol.\nAs mentioned above, SkyWalking also builds the topology of services from the metrics, since Envoy also carries service metadata along with the metrics. To feed Envoy such metadata, see the other part of the configuration as follows:\nnode:# ... other configsmetadata:LABELS:app:test-appNAME:service-instance-nameConfigure Envoy to send metrics to SkyWalking with Istio Typically, Envoy can also be used with Istio. In this case, the configurations are much simpler because Istio composes the configurations for you and sends them to Envoy via xDS Protocol. Istio also automatically injects the metadata, such as service name and instance name, into the bootstrap configurations.\nEmitting the metrics to SkyWalking is as simple as adding the option --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; to Istio install command, like this:\nistioctl install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*\u0026#39; If you already have Istio installed, you can use the following command to apply the config without re-installing Istio:\nistioctl manifest install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*\u0026#39; Note: proxyStatsMatcher is only supported by Istio 1.8+. We recommend using inclusionRegexps to reserve specific metrics which need to be analyzed, in order to reduce memory usage and avoid CPU overhead. For example, OAP uses these metrics:\nistioctl manifest install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*membership_healthy.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[1]=.*upstream_cx_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[2]=.*upstream_cx_total.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[3]=.*upstream_rq_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[4]=.*upstream_rq_total.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[5]=.*upstream_rq_pending_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[6]=.*lb_healthy_panic.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[7]=.*upstream_cx_none_healthy.*\u0026#39; Metrics data Some Envoy statistics are listed here. Sample data that contain identifiers can be found here, while the metrics can be found here.\n","excerpt":"Send Envoy metrics to SkyWalking with / without Istio Envoy defines a gRPC service to emit metrics, …","ref":"/docs/main/v9.0.0/en/setup/envoy/metrics_service_setting/","title":"Send Envoy metrics to SkyWalking with / without Istio"},{"body":"Send Envoy metrics to SkyWalking with/without Istio Envoy defines a gRPC service to emit metrics, and whatever is used to implement this protocol can be used to receive the metrics. SkyWalking has a built-in receiver that implements this protocol, so you can configure Envoy to emit its metrics to SkyWalking.\nAs an APM system, SkyWalking not only receives and stores the metrics emitted by Envoy but also analyzes the topology of services and service instances.\nAttention: There are two versions of the Envoy metrics service protocol currently: v2 and v3. SkyWalking (8.3.0+) supports both of them.\nConfigure Envoy to send metrics to SkyWalking without Istio Envoy can be used with/without Istio. This section explains how you can configure the standalone Envoy to send metrics to SkyWalking.\nTo let Envoy send metrics to SkyWalking, we need to feed Envoy with a configuration that contains stats_sinks, which in turn includes envoy.metrics_service. This envoy.metrics_service should be configured as a config.grpc_service entry.\nThe noteworthy parts of the config are shown below:\nstats_sinks:- name:envoy.metrics_serviceconfig:grpc_service:# Note: we can use google_grpc implementation as well.envoy_grpc:cluster_name:service_skywalkingstatic_resources:...clusters:- name:service_skywalkingconnect_timeout:5stype:LOGICAL_DNShttp2_protocol_options:{}dns_lookup_family:V4_ONLYlb_policy:ROUND_ROBINload_assignment:cluster_name:service_skywalkingendpoints:- lb_endpoints:- endpoint:address:socket_address:address:skywalking# This is the port where SkyWalking serving the Envoy Metrics Service gRPC stream.port_value:11800The comprehensive static configuration can be found here.\nNote that Envoy can also be configured dynamically through xDS Protocol.\nAs mentioned above, SkyWalking also builds the topology of services from the metrics since Envoy also carries service metadata along with the metrics. To feed Envoy such metadata, see the other part of the configuration as follows:\nnode:# ... other configsmetadata:LABELS:app:test-appNAME:service-instance-nameConfigure Envoy to send metrics to SkyWalking with Istio Typically, Envoy can also be used with Istio. In this case, the configurations are much simpler because Istio composes the configurations for you and sends them to Envoy via xDS Protocol. Istio also automatically injects the metadata, such as service name and instance name, into the bootstrap configurations.\nEmitting the metrics to SkyWalking is as simple as adding the option --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; to Istio install command, like this:\nistioctl install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*\u0026#39; If you already have Istio installed, you can use the following command to apply the config without re-installing Istio:\nistioctl manifest install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*\u0026#39; Note: proxyStatsMatcher is only supported by Istio 1.8+. We recommend using inclusionRegexps to reserve specific metrics that need to be analyzed to reduce memory usage and avoid CPU overhead. For example, OAP uses these metrics:\nistioctl manifest install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*membership_healthy.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[1]=.*upstream_cx_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[2]=.*upstream_cx_total.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[3]=.*upstream_rq_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[4]=.*upstream_rq_total.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[5]=.*upstream_rq_pending_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[6]=.*lb_healthy_panic.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[7]=.*upstream_cx_none_healthy.*\u0026#39; Metrics data Some Envoy statistics are listed here. Sample data that contain identifiers can be found here, while the metrics can be found here.\nNetwork Monitoring SkyWalking supports network monitoring of the data plane in the Service Mesh. Read this documentation for learn more.\n","excerpt":"Send Envoy metrics to SkyWalking with/without Istio Envoy defines a gRPC service to emit metrics, …","ref":"/docs/main/latest/en/setup/envoy/metrics_service_setting/","title":"Send Envoy metrics to SkyWalking with/without Istio"},{"body":"Send Envoy metrics to SkyWalking with/without Istio Envoy defines a gRPC service to emit metrics, and whatever is used to implement this protocol can be used to receive the metrics. SkyWalking has a built-in receiver that implements this protocol, so you can configure Envoy to emit its metrics to SkyWalking.\nAs an APM system, SkyWalking not only receives and stores the metrics emitted by Envoy but also analyzes the topology of services and service instances.\nAttention: There are two versions of the Envoy metrics service protocol currently: v2 and v3. SkyWalking (8.3.0+) supports both of them.\nConfigure Envoy to send metrics to SkyWalking without Istio Envoy can be used with/without Istio. This section explains how you can configure the standalone Envoy to send metrics to SkyWalking.\nTo let Envoy send metrics to SkyWalking, we need to feed Envoy with a configuration that contains stats_sinks, which in turn includes envoy.metrics_service. This envoy.metrics_service should be configured as a config.grpc_service entry.\nThe noteworthy parts of the config are shown below:\nstats_sinks:- name:envoy.metrics_serviceconfig:grpc_service:# Note: we can use google_grpc implementation as well.envoy_grpc:cluster_name:service_skywalkingstatic_resources:...clusters:- name:service_skywalkingconnect_timeout:5stype:LOGICAL_DNShttp2_protocol_options:{}dns_lookup_family:V4_ONLYlb_policy:ROUND_ROBINload_assignment:cluster_name:service_skywalkingendpoints:- lb_endpoints:- endpoint:address:socket_address:address:skywalking# This is the port where SkyWalking serving the Envoy Metrics Service gRPC stream.port_value:11800The comprehensive static configuration can be found here.\nNote that Envoy can also be configured dynamically through xDS Protocol.\nAs mentioned above, SkyWalking also builds the topology of services from the metrics since Envoy also carries service metadata along with the metrics. To feed Envoy such metadata, see the other part of the configuration as follows:\nnode:# ... other configsmetadata:LABELS:app:test-appNAME:service-instance-nameConfigure Envoy to send metrics to SkyWalking with Istio Typically, Envoy can also be used with Istio. In this case, the configurations are much simpler because Istio composes the configurations for you and sends them to Envoy via xDS Protocol. Istio also automatically injects the metadata, such as service name and instance name, into the bootstrap configurations.\nEmitting the metrics to SkyWalking is as simple as adding the option --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; to Istio install command, like this:\nistioctl install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*\u0026#39; If you already have Istio installed, you can use the following command to apply the config without re-installing Istio:\nistioctl manifest install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*\u0026#39; Note: proxyStatsMatcher is only supported by Istio 1.8+. We recommend using inclusionRegexps to reserve specific metrics that need to be analyzed to reduce memory usage and avoid CPU overhead. For example, OAP uses these metrics:\nistioctl manifest install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*membership_healthy.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[1]=.*upstream_cx_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[2]=.*upstream_cx_total.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[3]=.*upstream_rq_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[4]=.*upstream_rq_total.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[5]=.*upstream_rq_pending_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[6]=.*lb_healthy_panic.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[7]=.*upstream_cx_none_healthy.*\u0026#39; Metrics data Some Envoy statistics are listed here. Sample data that contain identifiers can be found here, while the metrics can be found here.\nNetwork Monitoring SkyWalking supports network monitoring of the data plane in the Service Mesh. Read this documentation for learn more.\n","excerpt":"Send Envoy metrics to SkyWalking with/without Istio Envoy defines a gRPC service to emit metrics, …","ref":"/docs/main/next/en/setup/envoy/metrics_service_setting/","title":"Send Envoy metrics to SkyWalking with/without Istio"},{"body":"Send Envoy metrics to SkyWalking with/without Istio Envoy defines a gRPC service to emit metrics, and whatever is used to implement this protocol can be used to receive the metrics. SkyWalking has a built-in receiver that implements this protocol, so you can configure Envoy to emit its metrics to SkyWalking.\nAs an APM system, SkyWalking not only receives and stores the metrics emitted by Envoy but also analyzes the topology of services and service instances.\nAttention: There are two versions of the Envoy metrics service protocol currently: v2 and v3. SkyWalking (8.3.0+) supports both of them.\nConfigure Envoy to send metrics to SkyWalking without Istio Envoy can be used with/without Istio. This section explains how you can configure the standalone Envoy to send metrics to SkyWalking.\nTo let Envoy send metrics to SkyWalking, we need to feed Envoy with a configuration that contains stats_sinks, which in turn includes envoy.metrics_service. This envoy.metrics_service should be configured as a config.grpc_service entry.\nThe noteworthy parts of the config are shown below:\nstats_sinks:- name:envoy.metrics_serviceconfig:grpc_service:# Note: we can use google_grpc implementation as well.envoy_grpc:cluster_name:service_skywalkingstatic_resources:...clusters:- name:service_skywalkingconnect_timeout:5stype:LOGICAL_DNShttp2_protocol_options:{}dns_lookup_family:V4_ONLYlb_policy:ROUND_ROBINload_assignment:cluster_name:service_skywalkingendpoints:- lb_endpoints:- endpoint:address:socket_address:address:skywalking# This is the port where SkyWalking serving the Envoy Metrics Service gRPC stream.port_value:11800The comprehensive static configuration can be found here.\nNote that Envoy can also be configured dynamically through xDS Protocol.\nAs mentioned above, SkyWalking also builds the topology of services from the metrics since Envoy also carries service metadata along with the metrics. To feed Envoy such metadata, see the other part of the configuration as follows:\nnode:# ... other configsmetadata:LABELS:app:test-appNAME:service-instance-nameConfigure Envoy to send metrics to SkyWalking with Istio Typically, Envoy can also be used with Istio. In this case, the configurations are much simpler because Istio composes the configurations for you and sends them to Envoy via xDS Protocol. Istio also automatically injects the metadata, such as service name and instance name, into the bootstrap configurations.\nEmitting the metrics to SkyWalking is as simple as adding the option --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; to Istio install command, like this:\nistioctl install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*\u0026#39; If you already have Istio installed, you can use the following command to apply the config without re-installing Istio:\nistioctl manifest install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*\u0026#39; Note: proxyStatsMatcher is only supported by Istio 1.8+. We recommend using inclusionRegexps to reserve specific metrics that need to be analyzed to reduce memory usage and avoid CPU overhead. For example, OAP uses these metrics:\nistioctl manifest install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*membership_healthy.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[1]=.*upstream_cx_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[2]=.*upstream_cx_total.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[3]=.*upstream_rq_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[4]=.*upstream_rq_total.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[5]=.*upstream_rq_pending_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[6]=.*lb_healthy_panic.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[7]=.*upstream_cx_none_healthy.*\u0026#39; Metrics data Some Envoy statistics are listed here. Sample data that contain identifiers can be found here, while the metrics can be found here.\n","excerpt":"Send Envoy metrics to SkyWalking with/without Istio Envoy defines a gRPC service to emit metrics, …","ref":"/docs/main/v9.1.0/en/setup/envoy/metrics_service_setting/","title":"Send Envoy metrics to SkyWalking with/without Istio"},{"body":"Send Envoy metrics to SkyWalking with/without Istio Envoy defines a gRPC service to emit metrics, and whatever is used to implement this protocol can be used to receive the metrics. SkyWalking has a built-in receiver that implements this protocol, so you can configure Envoy to emit its metrics to SkyWalking.\nAs an APM system, SkyWalking not only receives and stores the metrics emitted by Envoy but also analyzes the topology of services and service instances.\nAttention: There are two versions of the Envoy metrics service protocol currently: v2 and v3. SkyWalking (8.3.0+) supports both of them.\nConfigure Envoy to send metrics to SkyWalking without Istio Envoy can be used with/without Istio. This section explains how you can configure the standalone Envoy to send metrics to SkyWalking.\nTo let Envoy send metrics to SkyWalking, we need to feed Envoy with a configuration that contains stats_sinks, which in turn includes envoy.metrics_service. This envoy.metrics_service should be configured as a config.grpc_service entry.\nThe noteworthy parts of the config are shown below:\nstats_sinks:- name:envoy.metrics_serviceconfig:grpc_service:# Note: we can use google_grpc implementation as well.envoy_grpc:cluster_name:service_skywalkingstatic_resources:...clusters:- name:service_skywalkingconnect_timeout:5stype:LOGICAL_DNShttp2_protocol_options:{}dns_lookup_family:V4_ONLYlb_policy:ROUND_ROBINload_assignment:cluster_name:service_skywalkingendpoints:- lb_endpoints:- endpoint:address:socket_address:address:skywalking# This is the port where SkyWalking serving the Envoy Metrics Service gRPC stream.port_value:11800The comprehensive static configuration can be found here.\nNote that Envoy can also be configured dynamically through xDS Protocol.\nAs mentioned above, SkyWalking also builds the topology of services from the metrics since Envoy also carries service metadata along with the metrics. To feed Envoy such metadata, see the other part of the configuration as follows:\nnode:# ... other configsmetadata:LABELS:app:test-appNAME:service-instance-nameConfigure Envoy to send metrics to SkyWalking with Istio Typically, Envoy can also be used with Istio. In this case, the configurations are much simpler because Istio composes the configurations for you and sends them to Envoy via xDS Protocol. Istio also automatically injects the metadata, such as service name and instance name, into the bootstrap configurations.\nEmitting the metrics to SkyWalking is as simple as adding the option --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; to Istio install command, like this:\nistioctl install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*\u0026#39; If you already have Istio installed, you can use the following command to apply the config without re-installing Istio:\nistioctl manifest install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*\u0026#39; Note: proxyStatsMatcher is only supported by Istio 1.8+. We recommend using inclusionRegexps to reserve specific metrics that need to be analyzed to reduce memory usage and avoid CPU overhead. For example, OAP uses these metrics:\nistioctl manifest install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*membership_healthy.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[1]=.*upstream_cx_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[2]=.*upstream_cx_total.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[3]=.*upstream_rq_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[4]=.*upstream_rq_total.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[5]=.*upstream_rq_pending_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[6]=.*lb_healthy_panic.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[7]=.*upstream_cx_none_healthy.*\u0026#39; Metrics data Some Envoy statistics are listed here. Sample data that contain identifiers can be found here, while the metrics can be found here.\nNetwork Monitoring SkyWalking supports network monitoring of the data plane in the Service Mesh. Read this documentation for learn more.\n","excerpt":"Send Envoy metrics to SkyWalking with/without Istio Envoy defines a gRPC service to emit metrics, …","ref":"/docs/main/v9.2.0/en/setup/envoy/metrics_service_setting/","title":"Send Envoy metrics to SkyWalking with/without Istio"},{"body":"Send Envoy metrics to SkyWalking with/without Istio Envoy defines a gRPC service to emit metrics, and whatever is used to implement this protocol can be used to receive the metrics. SkyWalking has a built-in receiver that implements this protocol, so you can configure Envoy to emit its metrics to SkyWalking.\nAs an APM system, SkyWalking not only receives and stores the metrics emitted by Envoy but also analyzes the topology of services and service instances.\nAttention: There are two versions of the Envoy metrics service protocol currently: v2 and v3. SkyWalking (8.3.0+) supports both of them.\nConfigure Envoy to send metrics to SkyWalking without Istio Envoy can be used with/without Istio. This section explains how you can configure the standalone Envoy to send metrics to SkyWalking.\nTo let Envoy send metrics to SkyWalking, we need to feed Envoy with a configuration that contains stats_sinks, which in turn includes envoy.metrics_service. This envoy.metrics_service should be configured as a config.grpc_service entry.\nThe noteworthy parts of the config are shown below:\nstats_sinks:- name:envoy.metrics_serviceconfig:grpc_service:# Note: we can use google_grpc implementation as well.envoy_grpc:cluster_name:service_skywalkingstatic_resources:...clusters:- name:service_skywalkingconnect_timeout:5stype:LOGICAL_DNShttp2_protocol_options:{}dns_lookup_family:V4_ONLYlb_policy:ROUND_ROBINload_assignment:cluster_name:service_skywalkingendpoints:- lb_endpoints:- endpoint:address:socket_address:address:skywalking# This is the port where SkyWalking serving the Envoy Metrics Service gRPC stream.port_value:11800The comprehensive static configuration can be found here.\nNote that Envoy can also be configured dynamically through xDS Protocol.\nAs mentioned above, SkyWalking also builds the topology of services from the metrics since Envoy also carries service metadata along with the metrics. To feed Envoy such metadata, see the other part of the configuration as follows:\nnode:# ... other configsmetadata:LABELS:app:test-appNAME:service-instance-nameConfigure Envoy to send metrics to SkyWalking with Istio Typically, Envoy can also be used with Istio. In this case, the configurations are much simpler because Istio composes the configurations for you and sends them to Envoy via xDS Protocol. Istio also automatically injects the metadata, such as service name and instance name, into the bootstrap configurations.\nEmitting the metrics to SkyWalking is as simple as adding the option --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; to Istio install command, like this:\nistioctl install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*\u0026#39; If you already have Istio installed, you can use the following command to apply the config without re-installing Istio:\nistioctl manifest install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*\u0026#39; Note: proxyStatsMatcher is only supported by Istio 1.8+. We recommend using inclusionRegexps to reserve specific metrics that need to be analyzed to reduce memory usage and avoid CPU overhead. For example, OAP uses these metrics:\nistioctl manifest install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*membership_healthy.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[1]=.*upstream_cx_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[2]=.*upstream_cx_total.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[3]=.*upstream_rq_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[4]=.*upstream_rq_total.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[5]=.*upstream_rq_pending_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[6]=.*lb_healthy_panic.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[7]=.*upstream_cx_none_healthy.*\u0026#39; Metrics data Some Envoy statistics are listed here. Sample data that contain identifiers can be found here, while the metrics can be found here.\nNetwork Monitoring SkyWalking supports network monitoring of the data plane in the Service Mesh. Read this documentation for learn more.\n","excerpt":"Send Envoy metrics to SkyWalking with/without Istio Envoy defines a gRPC service to emit metrics, …","ref":"/docs/main/v9.3.0/en/setup/envoy/metrics_service_setting/","title":"Send Envoy metrics to SkyWalking with/without Istio"},{"body":"Send Envoy metrics to SkyWalking with/without Istio Envoy defines a gRPC service to emit metrics, and whatever is used to implement this protocol can be used to receive the metrics. SkyWalking has a built-in receiver that implements this protocol, so you can configure Envoy to emit its metrics to SkyWalking.\nAs an APM system, SkyWalking not only receives and stores the metrics emitted by Envoy but also analyzes the topology of services and service instances.\nAttention: There are two versions of the Envoy metrics service protocol currently: v2 and v3. SkyWalking (8.3.0+) supports both of them.\nConfigure Envoy to send metrics to SkyWalking without Istio Envoy can be used with/without Istio. This section explains how you can configure the standalone Envoy to send metrics to SkyWalking.\nTo let Envoy send metrics to SkyWalking, we need to feed Envoy with a configuration that contains stats_sinks, which in turn includes envoy.metrics_service. This envoy.metrics_service should be configured as a config.grpc_service entry.\nThe noteworthy parts of the config are shown below:\nstats_sinks:- name:envoy.metrics_serviceconfig:grpc_service:# Note: we can use google_grpc implementation as well.envoy_grpc:cluster_name:service_skywalkingstatic_resources:...clusters:- name:service_skywalkingconnect_timeout:5stype:LOGICAL_DNShttp2_protocol_options:{}dns_lookup_family:V4_ONLYlb_policy:ROUND_ROBINload_assignment:cluster_name:service_skywalkingendpoints:- lb_endpoints:- endpoint:address:socket_address:address:skywalking# This is the port where SkyWalking serving the Envoy Metrics Service gRPC stream.port_value:11800The comprehensive static configuration can be found here.\nNote that Envoy can also be configured dynamically through xDS Protocol.\nAs mentioned above, SkyWalking also builds the topology of services from the metrics since Envoy also carries service metadata along with the metrics. To feed Envoy such metadata, see the other part of the configuration as follows:\nnode:# ... other configsmetadata:LABELS:app:test-appNAME:service-instance-nameConfigure Envoy to send metrics to SkyWalking with Istio Typically, Envoy can also be used with Istio. In this case, the configurations are much simpler because Istio composes the configurations for you and sends them to Envoy via xDS Protocol. Istio also automatically injects the metadata, such as service name and instance name, into the bootstrap configurations.\nEmitting the metrics to SkyWalking is as simple as adding the option --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; to Istio install command, like this:\nistioctl install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*\u0026#39; If you already have Istio installed, you can use the following command to apply the config without re-installing Istio:\nistioctl manifest install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*\u0026#39; Note: proxyStatsMatcher is only supported by Istio 1.8+. We recommend using inclusionRegexps to reserve specific metrics that need to be analyzed to reduce memory usage and avoid CPU overhead. For example, OAP uses these metrics:\nistioctl manifest install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*membership_healthy.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[1]=.*upstream_cx_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[2]=.*upstream_cx_total.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[3]=.*upstream_rq_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[4]=.*upstream_rq_total.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[5]=.*upstream_rq_pending_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[6]=.*lb_healthy_panic.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[7]=.*upstream_cx_none_healthy.*\u0026#39; Metrics data Some Envoy statistics are listed here. Sample data that contain identifiers can be found here, while the metrics can be found here.\nNetwork Monitoring SkyWalking supports network monitoring of the data plane in the Service Mesh. Read this documentation for learn more.\n","excerpt":"Send Envoy metrics to SkyWalking with/without Istio Envoy defines a gRPC service to emit metrics, …","ref":"/docs/main/v9.4.0/en/setup/envoy/metrics_service_setting/","title":"Send Envoy metrics to SkyWalking with/without Istio"},{"body":"Send Envoy metrics to SkyWalking with/without Istio Envoy defines a gRPC service to emit metrics, and whatever is used to implement this protocol can be used to receive the metrics. SkyWalking has a built-in receiver that implements this protocol, so you can configure Envoy to emit its metrics to SkyWalking.\nAs an APM system, SkyWalking not only receives and stores the metrics emitted by Envoy but also analyzes the topology of services and service instances.\nAttention: There are two versions of the Envoy metrics service protocol currently: v2 and v3. SkyWalking (8.3.0+) supports both of them.\nConfigure Envoy to send metrics to SkyWalking without Istio Envoy can be used with/without Istio. This section explains how you can configure the standalone Envoy to send metrics to SkyWalking.\nTo let Envoy send metrics to SkyWalking, we need to feed Envoy with a configuration that contains stats_sinks, which in turn includes envoy.metrics_service. This envoy.metrics_service should be configured as a config.grpc_service entry.\nThe noteworthy parts of the config are shown below:\nstats_sinks:- name:envoy.metrics_serviceconfig:grpc_service:# Note: we can use google_grpc implementation as well.envoy_grpc:cluster_name:service_skywalkingstatic_resources:...clusters:- name:service_skywalkingconnect_timeout:5stype:LOGICAL_DNShttp2_protocol_options:{}dns_lookup_family:V4_ONLYlb_policy:ROUND_ROBINload_assignment:cluster_name:service_skywalkingendpoints:- lb_endpoints:- endpoint:address:socket_address:address:skywalking# This is the port where SkyWalking serving the Envoy Metrics Service gRPC stream.port_value:11800The comprehensive static configuration can be found here.\nNote that Envoy can also be configured dynamically through xDS Protocol.\nAs mentioned above, SkyWalking also builds the topology of services from the metrics since Envoy also carries service metadata along with the metrics. To feed Envoy such metadata, see the other part of the configuration as follows:\nnode:# ... other configsmetadata:LABELS:app:test-appNAME:service-instance-nameConfigure Envoy to send metrics to SkyWalking with Istio Typically, Envoy can also be used with Istio. In this case, the configurations are much simpler because Istio composes the configurations for you and sends them to Envoy via xDS Protocol. Istio also automatically injects the metadata, such as service name and instance name, into the bootstrap configurations.\nEmitting the metrics to SkyWalking is as simple as adding the option --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; to Istio install command, like this:\nistioctl install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*\u0026#39; If you already have Istio installed, you can use the following command to apply the config without re-installing Istio:\nistioctl manifest install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*\u0026#39; Note: proxyStatsMatcher is only supported by Istio 1.8+. We recommend using inclusionRegexps to reserve specific metrics that need to be analyzed to reduce memory usage and avoid CPU overhead. For example, OAP uses these metrics:\nistioctl manifest install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*membership_healthy.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[1]=.*upstream_cx_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[2]=.*upstream_cx_total.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[3]=.*upstream_rq_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[4]=.*upstream_rq_total.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[5]=.*upstream_rq_pending_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[6]=.*lb_healthy_panic.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[7]=.*upstream_cx_none_healthy.*\u0026#39; Metrics data Some Envoy statistics are listed here. Sample data that contain identifiers can be found here, while the metrics can be found here.\nNetwork Monitoring SkyWalking supports network monitoring of the data plane in the Service Mesh. Read this documentation for learn more.\n","excerpt":"Send Envoy metrics to SkyWalking with/without Istio Envoy defines a gRPC service to emit metrics, …","ref":"/docs/main/v9.5.0/en/setup/envoy/metrics_service_setting/","title":"Send Envoy metrics to SkyWalking with/without Istio"},{"body":"Send Envoy metrics to SkyWalking with/without Istio Envoy defines a gRPC service to emit metrics, and whatever is used to implement this protocol can be used to receive the metrics. SkyWalking has a built-in receiver that implements this protocol, so you can configure Envoy to emit its metrics to SkyWalking.\nAs an APM system, SkyWalking not only receives and stores the metrics emitted by Envoy but also analyzes the topology of services and service instances.\nAttention: There are two versions of the Envoy metrics service protocol currently: v2 and v3. SkyWalking (8.3.0+) supports both of them.\nConfigure Envoy to send metrics to SkyWalking without Istio Envoy can be used with/without Istio. This section explains how you can configure the standalone Envoy to send metrics to SkyWalking.\nTo let Envoy send metrics to SkyWalking, we need to feed Envoy with a configuration that contains stats_sinks, which in turn includes envoy.metrics_service. This envoy.metrics_service should be configured as a config.grpc_service entry.\nThe noteworthy parts of the config are shown below:\nstats_sinks:- name:envoy.metrics_serviceconfig:grpc_service:# Note: we can use google_grpc implementation as well.envoy_grpc:cluster_name:service_skywalkingstatic_resources:...clusters:- name:service_skywalkingconnect_timeout:5stype:LOGICAL_DNShttp2_protocol_options:{}dns_lookup_family:V4_ONLYlb_policy:ROUND_ROBINload_assignment:cluster_name:service_skywalkingendpoints:- lb_endpoints:- endpoint:address:socket_address:address:skywalking# This is the port where SkyWalking serving the Envoy Metrics Service gRPC stream.port_value:11800The comprehensive static configuration can be found here.\nNote that Envoy can also be configured dynamically through xDS Protocol.\nAs mentioned above, SkyWalking also builds the topology of services from the metrics since Envoy also carries service metadata along with the metrics. To feed Envoy such metadata, see the other part of the configuration as follows:\nnode:# ... other configsmetadata:LABELS:app:test-appNAME:service-instance-nameConfigure Envoy to send metrics to SkyWalking with Istio Typically, Envoy can also be used with Istio. In this case, the configurations are much simpler because Istio composes the configurations for you and sends them to Envoy via xDS Protocol. Istio also automatically injects the metadata, such as service name and instance name, into the bootstrap configurations.\nEmitting the metrics to SkyWalking is as simple as adding the option --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; to Istio install command, like this:\nistioctl install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*\u0026#39; If you already have Istio installed, you can use the following command to apply the config without re-installing Istio:\nistioctl manifest install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*\u0026#39; Note: proxyStatsMatcher is only supported by Istio 1.8+. We recommend using inclusionRegexps to reserve specific metrics that need to be analyzed to reduce memory usage and avoid CPU overhead. For example, OAP uses these metrics:\nistioctl manifest install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*membership_healthy.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[1]=.*upstream_cx_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[2]=.*upstream_cx_total.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[3]=.*upstream_rq_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[4]=.*upstream_rq_total.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[5]=.*upstream_rq_pending_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[6]=.*lb_healthy_panic.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[7]=.*upstream_cx_none_healthy.*\u0026#39; Metrics data Some Envoy statistics are listed here. Sample data that contain identifiers can be found here, while the metrics can be found here.\nNetwork Monitoring SkyWalking supports network monitoring of the data plane in the Service Mesh. Read this documentation for learn more.\n","excerpt":"Send Envoy metrics to SkyWalking with/without Istio Envoy defines a gRPC service to emit metrics, …","ref":"/docs/main/v9.6.0/en/setup/envoy/metrics_service_setting/","title":"Send Envoy metrics to SkyWalking with/without Istio"},{"body":"Send Envoy metrics to SkyWalking with/without Istio Envoy defines a gRPC service to emit metrics, and whatever is used to implement this protocol can be used to receive the metrics. SkyWalking has a built-in receiver that implements this protocol, so you can configure Envoy to emit its metrics to SkyWalking.\nAs an APM system, SkyWalking not only receives and stores the metrics emitted by Envoy but also analyzes the topology of services and service instances.\nAttention: There are two versions of the Envoy metrics service protocol currently: v2 and v3. SkyWalking (8.3.0+) supports both of them.\nConfigure Envoy to send metrics to SkyWalking without Istio Envoy can be used with/without Istio. This section explains how you can configure the standalone Envoy to send metrics to SkyWalking.\nTo let Envoy send metrics to SkyWalking, we need to feed Envoy with a configuration that contains stats_sinks, which in turn includes envoy.metrics_service. This envoy.metrics_service should be configured as a config.grpc_service entry.\nThe noteworthy parts of the config are shown below:\nstats_sinks:- name:envoy.metrics_serviceconfig:grpc_service:# Note: we can use google_grpc implementation as well.envoy_grpc:cluster_name:service_skywalkingstatic_resources:...clusters:- name:service_skywalkingconnect_timeout:5stype:LOGICAL_DNShttp2_protocol_options:{}dns_lookup_family:V4_ONLYlb_policy:ROUND_ROBINload_assignment:cluster_name:service_skywalkingendpoints:- lb_endpoints:- endpoint:address:socket_address:address:skywalking# This is the port where SkyWalking serving the Envoy Metrics Service gRPC stream.port_value:11800The comprehensive static configuration can be found here.\nNote that Envoy can also be configured dynamically through xDS Protocol.\nAs mentioned above, SkyWalking also builds the topology of services from the metrics since Envoy also carries service metadata along with the metrics. To feed Envoy such metadata, see the other part of the configuration as follows:\nnode:# ... other configsmetadata:LABELS:app:test-appNAME:service-instance-nameConfigure Envoy to send metrics to SkyWalking with Istio Typically, Envoy can also be used with Istio. In this case, the configurations are much simpler because Istio composes the configurations for you and sends them to Envoy via xDS Protocol. Istio also automatically injects the metadata, such as service name and instance name, into the bootstrap configurations.\nEmitting the metrics to SkyWalking is as simple as adding the option --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; to Istio install command, like this:\nistioctl install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*\u0026#39; If you already have Istio installed, you can use the following command to apply the config without re-installing Istio:\nistioctl manifest install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*\u0026#39; Note: proxyStatsMatcher is only supported by Istio 1.8+. We recommend using inclusionRegexps to reserve specific metrics that need to be analyzed to reduce memory usage and avoid CPU overhead. For example, OAP uses these metrics:\nistioctl manifest install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*membership_healthy.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[1]=.*upstream_cx_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[2]=.*upstream_cx_total.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[3]=.*upstream_rq_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[4]=.*upstream_rq_total.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[5]=.*upstream_rq_pending_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[6]=.*lb_healthy_panic.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[7]=.*upstream_cx_none_healthy.*\u0026#39; Metrics data Some Envoy statistics are listed here. Sample data that contain identifiers can be found here, while the metrics can be found here.\nNetwork Monitoring SkyWalking supports network monitoring of the data plane in the Service Mesh. Read this documentation for learn more.\n","excerpt":"Send Envoy metrics to SkyWalking with/without Istio Envoy defines a gRPC service to emit metrics, …","ref":"/docs/main/v9.7.0/en/setup/envoy/metrics_service_setting/","title":"Send Envoy metrics to SkyWalking with/without Istio"},{"body":"Sending Envoy Metrics to SkyWalking OAP Server Example This is an example of sending Envoy Stats to the SkyWalking OAP server through Metric Service v2 and v3.\nRunning the example The example requires docker and docker-compose to be installed in your local system. It fetches images from Docker Hub.\nNote that in this setup, we override the log4j2.xml config to set the org.apache.skywalking.oap.server.receiver.envoy logger level to DEBUG. This enables us to see the messages sent by Envoy to the SkyWalking OAP server.\nYou can also find the Envoy Metric Service V3 API example in docker-compose-envoy-v3-api.yaml\n$ make up $ docker-compose logs -f skywalking $ # Please wait for a moment until SkyWalking is ready and Envoy starts sending the stats. You will see similar messages like the following: skywalking_1 | 2021-07-23 13:25:30,683 - org.apache.skywalking.oap.server.receiver.envoy.MetricServiceGRPCHandler -19437 [grpcServerPool-1-thread-2] DEBUG [] - Received msg identifier { skywalking_1 | node { skywalking_1 | id: \u0026quot;ingress\u0026quot; skywalking_1 | cluster: \u0026quot;envoy-proxy\u0026quot; skywalking_1 | metadata { skywalking_1 | fields { skywalking_1 | key: \u0026quot;LABELS\u0026quot; skywalking_1 | value { skywalking_1 | struct_value { skywalking_1 | fields { skywalking_1 | key: \u0026quot;app\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;test-app\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;NAME\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;service-instance-name\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;envoy\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;isawesome\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;skywalking\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;iscool\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | locality { skywalking_1 | region: \u0026quot;ap-southeast-1\u0026quot; skywalking_1 | zone: \u0026quot;zone1\u0026quot; skywalking_1 | sub_zone: \u0026quot;subzone1\u0026quot; skywalking_1 | } skywalking_1 | user_agent_name: \u0026quot;envoy\u0026quot; skywalking_1 | user_agent_build_version { skywalking_1 | version { skywalking_1 | major_number: 1 skywalking_1 | minor_number: 19 skywalking_1 | } skywalking_1 | metadata { skywalking_1 | fields { skywalking_1 | key: \u0026quot;build.type\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;RELEASE\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;revision.sha\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;68fe53a889416fd8570506232052b06f5a531541\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;revision.status\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;Clean\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;ssl.version\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;BoringSSL\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | extensions { skywalking_1 | name: \u0026quot;composite-action\u0026quot; skywalking_1 | category: \u0026quot;envoy.matching.action\u0026quot; skywalking_1 | } ...... skywalking_1 | } skywalking_1 | } skywalking_1 | envoy_metrics { skywalking_1 | name: \u0026quot;cluster.service_google.update_no_rebuild\u0026quot; skywalking_1 | type: COUNTER skywalking_1 | metric { skywalking_1 | counter { skywalking_1 | value: 1.0 skywalking_1 | } skywalking_1 | timestamp_ms: 1627046729718 skywalking_1 | } ...... skywalking_1 | } ... $ # To tear down: $ make down ","excerpt":"Sending Envoy Metrics to SkyWalking OAP Server Example This is an example of sending Envoy Stats to …","ref":"/docs/main/latest/en/setup/envoy/examples/metrics/readme/","title":"Sending Envoy Metrics to SkyWalking OAP Server Example"},{"body":"Sending Envoy Metrics to SkyWalking OAP Server Example This is an example of sending Envoy Stats to the SkyWalking OAP server through Metric Service v2 and v3.\nRunning the example The example requires docker and docker-compose to be installed in your local system. It fetches images from Docker Hub.\nNote that in this setup, we override the log4j2.xml config to set the org.apache.skywalking.oap.server.receiver.envoy logger level to DEBUG. This enables us to see the messages sent by Envoy to the SkyWalking OAP server.\nYou can also find the Envoy Metric Service V3 API example in docker-compose-envoy-v3-api.yaml\n$ make up $ docker-compose logs -f skywalking $ # Please wait for a moment until SkyWalking is ready and Envoy starts sending the stats. You will see similar messages like the following: skywalking_1 | 2021-07-23 13:25:30,683 - org.apache.skywalking.oap.server.receiver.envoy.MetricServiceGRPCHandler -19437 [grpcServerPool-1-thread-2] DEBUG [] - Received msg identifier { skywalking_1 | node { skywalking_1 | id: \u0026quot;ingress\u0026quot; skywalking_1 | cluster: \u0026quot;envoy-proxy\u0026quot; skywalking_1 | metadata { skywalking_1 | fields { skywalking_1 | key: \u0026quot;LABELS\u0026quot; skywalking_1 | value { skywalking_1 | struct_value { skywalking_1 | fields { skywalking_1 | key: \u0026quot;app\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;test-app\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;NAME\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;service-instance-name\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;envoy\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;isawesome\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;skywalking\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;iscool\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | locality { skywalking_1 | region: \u0026quot;ap-southeast-1\u0026quot; skywalking_1 | zone: \u0026quot;zone1\u0026quot; skywalking_1 | sub_zone: \u0026quot;subzone1\u0026quot; skywalking_1 | } skywalking_1 | user_agent_name: \u0026quot;envoy\u0026quot; skywalking_1 | user_agent_build_version { skywalking_1 | version { skywalking_1 | major_number: 1 skywalking_1 | minor_number: 19 skywalking_1 | } skywalking_1 | metadata { skywalking_1 | fields { skywalking_1 | key: \u0026quot;build.type\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;RELEASE\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;revision.sha\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;68fe53a889416fd8570506232052b06f5a531541\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;revision.status\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;Clean\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;ssl.version\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;BoringSSL\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | extensions { skywalking_1 | name: \u0026quot;composite-action\u0026quot; skywalking_1 | category: \u0026quot;envoy.matching.action\u0026quot; skywalking_1 | } ...... skywalking_1 | } skywalking_1 | } skywalking_1 | envoy_metrics { skywalking_1 | name: \u0026quot;cluster.service_google.update_no_rebuild\u0026quot; skywalking_1 | type: COUNTER skywalking_1 | metric { skywalking_1 | counter { skywalking_1 | value: 1.0 skywalking_1 | } skywalking_1 | timestamp_ms: 1627046729718 skywalking_1 | } ...... skywalking_1 | } ... $ # To tear down: $ make down ","excerpt":"Sending Envoy Metrics to SkyWalking OAP Server Example This is an example of sending Envoy Stats to …","ref":"/docs/main/next/en/setup/envoy/examples/metrics/readme/","title":"Sending Envoy Metrics to SkyWalking OAP Server Example"},{"body":"Sending Envoy Metrics to SkyWalking OAP Server Example This is an example of sending Envoy Stats to SkyWalking OAP server through Metric Service v2 and v3.\nRunning the example The example requires docker and docker-compose to be installed in your local system. It fetches images from Docker Hub.\nNote that in this setup, we override the log4j2.xml config to set the org.apache.skywalking.oap.server.receiver.envoy logger level to DEBUG. This enables us to see the messages sent by Envoy to SkyWalking OAP server.\nYou can also find the Envoy Metric Service V3 API example in docker-compose-envoy-v3-api.yaml\n$ make up $ docker-compose logs -f skywalking $ # Please wait for a moment until SkyWalking is ready and Envoy starts sending the stats. You will see similar messages like the following: skywalking_1 | 2021-07-23 13:25:30,683 - org.apache.skywalking.oap.server.receiver.envoy.MetricServiceGRPCHandler -19437 [grpcServerPool-1-thread-2] DEBUG [] - Received msg identifier { skywalking_1 | node { skywalking_1 | id: \u0026quot;ingress\u0026quot; skywalking_1 | cluster: \u0026quot;envoy-proxy\u0026quot; skywalking_1 | metadata { skywalking_1 | fields { skywalking_1 | key: \u0026quot;LABELS\u0026quot; skywalking_1 | value { skywalking_1 | struct_value { skywalking_1 | fields { skywalking_1 | key: \u0026quot;app\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;test-app\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;NAME\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;service-instance-name\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;envoy\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;isawesome\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;skywalking\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;iscool\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | locality { skywalking_1 | region: \u0026quot;ap-southeast-1\u0026quot; skywalking_1 | zone: \u0026quot;zone1\u0026quot; skywalking_1 | sub_zone: \u0026quot;subzone1\u0026quot; skywalking_1 | } skywalking_1 | user_agent_name: \u0026quot;envoy\u0026quot; skywalking_1 | user_agent_build_version { skywalking_1 | version { skywalking_1 | major_number: 1 skywalking_1 | minor_number: 19 skywalking_1 | } skywalking_1 | metadata { skywalking_1 | fields { skywalking_1 | key: \u0026quot;build.type\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;RELEASE\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;revision.sha\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;68fe53a889416fd8570506232052b06f5a531541\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;revision.status\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;Clean\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;ssl.version\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;BoringSSL\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | extensions { skywalking_1 | name: \u0026quot;composite-action\u0026quot; skywalking_1 | category: \u0026quot;envoy.matching.action\u0026quot; skywalking_1 | } ...... skywalking_1 | } skywalking_1 | } skywalking_1 | envoy_metrics { skywalking_1 | name: \u0026quot;cluster.service_google.update_no_rebuild\u0026quot; skywalking_1 | type: COUNTER skywalking_1 | metric { skywalking_1 | counter { skywalking_1 | value: 1.0 skywalking_1 | } skywalking_1 | timestamp_ms: 1627046729718 skywalking_1 | } ...... skywalking_1 | } ... $ # To tear down: $ make down ","excerpt":"Sending Envoy Metrics to SkyWalking OAP Server Example This is an example of sending Envoy Stats to …","ref":"/docs/main/v9.0.0/en/setup/envoy/examples/metrics/readme/","title":"Sending Envoy Metrics to SkyWalking OAP Server Example"},{"body":"Sending Envoy Metrics to SkyWalking OAP Server Example This is an example of sending Envoy Stats to the SkyWalking OAP server through Metric Service v2 and v3.\nRunning the example The example requires docker and docker-compose to be installed in your local system. It fetches images from Docker Hub.\nNote that in this setup, we override the log4j2.xml config to set the org.apache.skywalking.oap.server.receiver.envoy logger level to DEBUG. This enables us to see the messages sent by Envoy to the SkyWalking OAP server.\nYou can also find the Envoy Metric Service V3 API example in docker-compose-envoy-v3-api.yaml\n$ make up $ docker-compose logs -f skywalking $ # Please wait for a moment until SkyWalking is ready and Envoy starts sending the stats. You will see similar messages like the following: skywalking_1 | 2021-07-23 13:25:30,683 - org.apache.skywalking.oap.server.receiver.envoy.MetricServiceGRPCHandler -19437 [grpcServerPool-1-thread-2] DEBUG [] - Received msg identifier { skywalking_1 | node { skywalking_1 | id: \u0026quot;ingress\u0026quot; skywalking_1 | cluster: \u0026quot;envoy-proxy\u0026quot; skywalking_1 | metadata { skywalking_1 | fields { skywalking_1 | key: \u0026quot;LABELS\u0026quot; skywalking_1 | value { skywalking_1 | struct_value { skywalking_1 | fields { skywalking_1 | key: \u0026quot;app\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;test-app\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;NAME\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;service-instance-name\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;envoy\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;isawesome\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;skywalking\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;iscool\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | locality { skywalking_1 | region: \u0026quot;ap-southeast-1\u0026quot; skywalking_1 | zone: \u0026quot;zone1\u0026quot; skywalking_1 | sub_zone: \u0026quot;subzone1\u0026quot; skywalking_1 | } skywalking_1 | user_agent_name: \u0026quot;envoy\u0026quot; skywalking_1 | user_agent_build_version { skywalking_1 | version { skywalking_1 | major_number: 1 skywalking_1 | minor_number: 19 skywalking_1 | } skywalking_1 | metadata { skywalking_1 | fields { skywalking_1 | key: \u0026quot;build.type\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;RELEASE\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;revision.sha\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;68fe53a889416fd8570506232052b06f5a531541\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;revision.status\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;Clean\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;ssl.version\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;BoringSSL\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | extensions { skywalking_1 | name: \u0026quot;composite-action\u0026quot; skywalking_1 | category: \u0026quot;envoy.matching.action\u0026quot; skywalking_1 | } ...... skywalking_1 | } skywalking_1 | } skywalking_1 | envoy_metrics { skywalking_1 | name: \u0026quot;cluster.service_google.update_no_rebuild\u0026quot; skywalking_1 | type: COUNTER skywalking_1 | metric { skywalking_1 | counter { skywalking_1 | value: 1.0 skywalking_1 | } skywalking_1 | timestamp_ms: 1627046729718 skywalking_1 | } ...... skywalking_1 | } ... $ # To tear down: $ make down ","excerpt":"Sending Envoy Metrics to SkyWalking OAP Server Example This is an example of sending Envoy Stats to …","ref":"/docs/main/v9.1.0/en/setup/envoy/examples/metrics/readme/","title":"Sending Envoy Metrics to SkyWalking OAP Server Example"},{"body":"Sending Envoy Metrics to SkyWalking OAP Server Example This is an example of sending Envoy Stats to the SkyWalking OAP server through Metric Service v2 and v3.\nRunning the example The example requires docker and docker-compose to be installed in your local system. It fetches images from Docker Hub.\nNote that in this setup, we override the log4j2.xml config to set the org.apache.skywalking.oap.server.receiver.envoy logger level to DEBUG. This enables us to see the messages sent by Envoy to the SkyWalking OAP server.\nYou can also find the Envoy Metric Service V3 API example in docker-compose-envoy-v3-api.yaml\n$ make up $ docker-compose logs -f skywalking $ # Please wait for a moment until SkyWalking is ready and Envoy starts sending the stats. You will see similar messages like the following: skywalking_1 | 2021-07-23 13:25:30,683 - org.apache.skywalking.oap.server.receiver.envoy.MetricServiceGRPCHandler -19437 [grpcServerPool-1-thread-2] DEBUG [] - Received msg identifier { skywalking_1 | node { skywalking_1 | id: \u0026quot;ingress\u0026quot; skywalking_1 | cluster: \u0026quot;envoy-proxy\u0026quot; skywalking_1 | metadata { skywalking_1 | fields { skywalking_1 | key: \u0026quot;LABELS\u0026quot; skywalking_1 | value { skywalking_1 | struct_value { skywalking_1 | fields { skywalking_1 | key: \u0026quot;app\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;test-app\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;NAME\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;service-instance-name\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;envoy\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;isawesome\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;skywalking\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;iscool\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | locality { skywalking_1 | region: \u0026quot;ap-southeast-1\u0026quot; skywalking_1 | zone: \u0026quot;zone1\u0026quot; skywalking_1 | sub_zone: \u0026quot;subzone1\u0026quot; skywalking_1 | } skywalking_1 | user_agent_name: \u0026quot;envoy\u0026quot; skywalking_1 | user_agent_build_version { skywalking_1 | version { skywalking_1 | major_number: 1 skywalking_1 | minor_number: 19 skywalking_1 | } skywalking_1 | metadata { skywalking_1 | fields { skywalking_1 | key: \u0026quot;build.type\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;RELEASE\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;revision.sha\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;68fe53a889416fd8570506232052b06f5a531541\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;revision.status\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;Clean\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;ssl.version\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;BoringSSL\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | extensions { skywalking_1 | name: \u0026quot;composite-action\u0026quot; skywalking_1 | category: \u0026quot;envoy.matching.action\u0026quot; skywalking_1 | } ...... skywalking_1 | } skywalking_1 | } skywalking_1 | envoy_metrics { skywalking_1 | name: \u0026quot;cluster.service_google.update_no_rebuild\u0026quot; skywalking_1 | type: COUNTER skywalking_1 | metric { skywalking_1 | counter { skywalking_1 | value: 1.0 skywalking_1 | } skywalking_1 | timestamp_ms: 1627046729718 skywalking_1 | } ...... skywalking_1 | } ... $ # To tear down: $ make down ","excerpt":"Sending Envoy Metrics to SkyWalking OAP Server Example This is an example of sending Envoy Stats to …","ref":"/docs/main/v9.2.0/en/setup/envoy/examples/metrics/readme/","title":"Sending Envoy Metrics to SkyWalking OAP Server Example"},{"body":"Sending Envoy Metrics to SkyWalking OAP Server Example This is an example of sending Envoy Stats to the SkyWalking OAP server through Metric Service v2 and v3.\nRunning the example The example requires docker and docker-compose to be installed in your local system. It fetches images from Docker Hub.\nNote that in this setup, we override the log4j2.xml config to set the org.apache.skywalking.oap.server.receiver.envoy logger level to DEBUG. This enables us to see the messages sent by Envoy to the SkyWalking OAP server.\nYou can also find the Envoy Metric Service V3 API example in docker-compose-envoy-v3-api.yaml\n$ make up $ docker-compose logs -f skywalking $ # Please wait for a moment until SkyWalking is ready and Envoy starts sending the stats. You will see similar messages like the following: skywalking_1 | 2021-07-23 13:25:30,683 - org.apache.skywalking.oap.server.receiver.envoy.MetricServiceGRPCHandler -19437 [grpcServerPool-1-thread-2] DEBUG [] - Received msg identifier { skywalking_1 | node { skywalking_1 | id: \u0026quot;ingress\u0026quot; skywalking_1 | cluster: \u0026quot;envoy-proxy\u0026quot; skywalking_1 | metadata { skywalking_1 | fields { skywalking_1 | key: \u0026quot;LABELS\u0026quot; skywalking_1 | value { skywalking_1 | struct_value { skywalking_1 | fields { skywalking_1 | key: \u0026quot;app\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;test-app\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;NAME\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;service-instance-name\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;envoy\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;isawesome\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;skywalking\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;iscool\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | locality { skywalking_1 | region: \u0026quot;ap-southeast-1\u0026quot; skywalking_1 | zone: \u0026quot;zone1\u0026quot; skywalking_1 | sub_zone: \u0026quot;subzone1\u0026quot; skywalking_1 | } skywalking_1 | user_agent_name: \u0026quot;envoy\u0026quot; skywalking_1 | user_agent_build_version { skywalking_1 | version { skywalking_1 | major_number: 1 skywalking_1 | minor_number: 19 skywalking_1 | } skywalking_1 | metadata { skywalking_1 | fields { skywalking_1 | key: \u0026quot;build.type\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;RELEASE\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;revision.sha\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;68fe53a889416fd8570506232052b06f5a531541\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;revision.status\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;Clean\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;ssl.version\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;BoringSSL\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | extensions { skywalking_1 | name: \u0026quot;composite-action\u0026quot; skywalking_1 | category: \u0026quot;envoy.matching.action\u0026quot; skywalking_1 | } ...... skywalking_1 | } skywalking_1 | } skywalking_1 | envoy_metrics { skywalking_1 | name: \u0026quot;cluster.service_google.update_no_rebuild\u0026quot; skywalking_1 | type: COUNTER skywalking_1 | metric { skywalking_1 | counter { skywalking_1 | value: 1.0 skywalking_1 | } skywalking_1 | timestamp_ms: 1627046729718 skywalking_1 | } ...... skywalking_1 | } ... $ # To tear down: $ make down ","excerpt":"Sending Envoy Metrics to SkyWalking OAP Server Example This is an example of sending Envoy Stats to …","ref":"/docs/main/v9.3.0/en/setup/envoy/examples/metrics/readme/","title":"Sending Envoy Metrics to SkyWalking OAP Server Example"},{"body":"Sending Envoy Metrics to SkyWalking OAP Server Example This is an example of sending Envoy Stats to the SkyWalking OAP server through Metric Service v2 and v3.\nRunning the example The example requires docker and docker-compose to be installed in your local system. It fetches images from Docker Hub.\nNote that in this setup, we override the log4j2.xml config to set the org.apache.skywalking.oap.server.receiver.envoy logger level to DEBUG. This enables us to see the messages sent by Envoy to the SkyWalking OAP server.\nYou can also find the Envoy Metric Service V3 API example in docker-compose-envoy-v3-api.yaml\n$ make up $ docker-compose logs -f skywalking $ # Please wait for a moment until SkyWalking is ready and Envoy starts sending the stats. You will see similar messages like the following: skywalking_1 | 2021-07-23 13:25:30,683 - org.apache.skywalking.oap.server.receiver.envoy.MetricServiceGRPCHandler -19437 [grpcServerPool-1-thread-2] DEBUG [] - Received msg identifier { skywalking_1 | node { skywalking_1 | id: \u0026quot;ingress\u0026quot; skywalking_1 | cluster: \u0026quot;envoy-proxy\u0026quot; skywalking_1 | metadata { skywalking_1 | fields { skywalking_1 | key: \u0026quot;LABELS\u0026quot; skywalking_1 | value { skywalking_1 | struct_value { skywalking_1 | fields { skywalking_1 | key: \u0026quot;app\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;test-app\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;NAME\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;service-instance-name\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;envoy\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;isawesome\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;skywalking\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;iscool\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | locality { skywalking_1 | region: \u0026quot;ap-southeast-1\u0026quot; skywalking_1 | zone: \u0026quot;zone1\u0026quot; skywalking_1 | sub_zone: \u0026quot;subzone1\u0026quot; skywalking_1 | } skywalking_1 | user_agent_name: \u0026quot;envoy\u0026quot; skywalking_1 | user_agent_build_version { skywalking_1 | version { skywalking_1 | major_number: 1 skywalking_1 | minor_number: 19 skywalking_1 | } skywalking_1 | metadata { skywalking_1 | fields { skywalking_1 | key: \u0026quot;build.type\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;RELEASE\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;revision.sha\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;68fe53a889416fd8570506232052b06f5a531541\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;revision.status\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;Clean\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;ssl.version\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;BoringSSL\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | extensions { skywalking_1 | name: \u0026quot;composite-action\u0026quot; skywalking_1 | category: \u0026quot;envoy.matching.action\u0026quot; skywalking_1 | } ...... skywalking_1 | } skywalking_1 | } skywalking_1 | envoy_metrics { skywalking_1 | name: \u0026quot;cluster.service_google.update_no_rebuild\u0026quot; skywalking_1 | type: COUNTER skywalking_1 | metric { skywalking_1 | counter { skywalking_1 | value: 1.0 skywalking_1 | } skywalking_1 | timestamp_ms: 1627046729718 skywalking_1 | } ...... skywalking_1 | } ... $ # To tear down: $ make down ","excerpt":"Sending Envoy Metrics to SkyWalking OAP Server Example This is an example of sending Envoy Stats to …","ref":"/docs/main/v9.4.0/en/setup/envoy/examples/metrics/readme/","title":"Sending Envoy Metrics to SkyWalking OAP Server Example"},{"body":"Sending Envoy Metrics to SkyWalking OAP Server Example This is an example of sending Envoy Stats to the SkyWalking OAP server through Metric Service v2 and v3.\nRunning the example The example requires docker and docker-compose to be installed in your local system. It fetches images from Docker Hub.\nNote that in this setup, we override the log4j2.xml config to set the org.apache.skywalking.oap.server.receiver.envoy logger level to DEBUG. This enables us to see the messages sent by Envoy to the SkyWalking OAP server.\nYou can also find the Envoy Metric Service V3 API example in docker-compose-envoy-v3-api.yaml\n$ make up $ docker-compose logs -f skywalking $ # Please wait for a moment until SkyWalking is ready and Envoy starts sending the stats. You will see similar messages like the following: skywalking_1 | 2021-07-23 13:25:30,683 - org.apache.skywalking.oap.server.receiver.envoy.MetricServiceGRPCHandler -19437 [grpcServerPool-1-thread-2] DEBUG [] - Received msg identifier { skywalking_1 | node { skywalking_1 | id: \u0026quot;ingress\u0026quot; skywalking_1 | cluster: \u0026quot;envoy-proxy\u0026quot; skywalking_1 | metadata { skywalking_1 | fields { skywalking_1 | key: \u0026quot;LABELS\u0026quot; skywalking_1 | value { skywalking_1 | struct_value { skywalking_1 | fields { skywalking_1 | key: \u0026quot;app\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;test-app\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;NAME\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;service-instance-name\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;envoy\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;isawesome\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;skywalking\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;iscool\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | locality { skywalking_1 | region: \u0026quot;ap-southeast-1\u0026quot; skywalking_1 | zone: \u0026quot;zone1\u0026quot; skywalking_1 | sub_zone: \u0026quot;subzone1\u0026quot; skywalking_1 | } skywalking_1 | user_agent_name: \u0026quot;envoy\u0026quot; skywalking_1 | user_agent_build_version { skywalking_1 | version { skywalking_1 | major_number: 1 skywalking_1 | minor_number: 19 skywalking_1 | } skywalking_1 | metadata { skywalking_1 | fields { skywalking_1 | key: \u0026quot;build.type\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;RELEASE\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;revision.sha\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;68fe53a889416fd8570506232052b06f5a531541\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;revision.status\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;Clean\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;ssl.version\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;BoringSSL\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | extensions { skywalking_1 | name: \u0026quot;composite-action\u0026quot; skywalking_1 | category: \u0026quot;envoy.matching.action\u0026quot; skywalking_1 | } ...... skywalking_1 | } skywalking_1 | } skywalking_1 | envoy_metrics { skywalking_1 | name: \u0026quot;cluster.service_google.update_no_rebuild\u0026quot; skywalking_1 | type: COUNTER skywalking_1 | metric { skywalking_1 | counter { skywalking_1 | value: 1.0 skywalking_1 | } skywalking_1 | timestamp_ms: 1627046729718 skywalking_1 | } ...... skywalking_1 | } ... $ # To tear down: $ make down ","excerpt":"Sending Envoy Metrics to SkyWalking OAP Server Example This is an example of sending Envoy Stats to …","ref":"/docs/main/v9.5.0/en/setup/envoy/examples/metrics/readme/","title":"Sending Envoy Metrics to SkyWalking OAP Server Example"},{"body":"Sending Envoy Metrics to SkyWalking OAP Server Example This is an example of sending Envoy Stats to the SkyWalking OAP server through Metric Service v2 and v3.\nRunning the example The example requires docker and docker-compose to be installed in your local system. It fetches images from Docker Hub.\nNote that in this setup, we override the log4j2.xml config to set the org.apache.skywalking.oap.server.receiver.envoy logger level to DEBUG. This enables us to see the messages sent by Envoy to the SkyWalking OAP server.\nYou can also find the Envoy Metric Service V3 API example in docker-compose-envoy-v3-api.yaml\n$ make up $ docker-compose logs -f skywalking $ # Please wait for a moment until SkyWalking is ready and Envoy starts sending the stats. You will see similar messages like the following: skywalking_1 | 2021-07-23 13:25:30,683 - org.apache.skywalking.oap.server.receiver.envoy.MetricServiceGRPCHandler -19437 [grpcServerPool-1-thread-2] DEBUG [] - Received msg identifier { skywalking_1 | node { skywalking_1 | id: \u0026quot;ingress\u0026quot; skywalking_1 | cluster: \u0026quot;envoy-proxy\u0026quot; skywalking_1 | metadata { skywalking_1 | fields { skywalking_1 | key: \u0026quot;LABELS\u0026quot; skywalking_1 | value { skywalking_1 | struct_value { skywalking_1 | fields { skywalking_1 | key: \u0026quot;app\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;test-app\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;NAME\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;service-instance-name\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;envoy\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;isawesome\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;skywalking\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;iscool\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | locality { skywalking_1 | region: \u0026quot;ap-southeast-1\u0026quot; skywalking_1 | zone: \u0026quot;zone1\u0026quot; skywalking_1 | sub_zone: \u0026quot;subzone1\u0026quot; skywalking_1 | } skywalking_1 | user_agent_name: \u0026quot;envoy\u0026quot; skywalking_1 | user_agent_build_version { skywalking_1 | version { skywalking_1 | major_number: 1 skywalking_1 | minor_number: 19 skywalking_1 | } skywalking_1 | metadata { skywalking_1 | fields { skywalking_1 | key: \u0026quot;build.type\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;RELEASE\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;revision.sha\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;68fe53a889416fd8570506232052b06f5a531541\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;revision.status\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;Clean\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;ssl.version\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;BoringSSL\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | extensions { skywalking_1 | name: \u0026quot;composite-action\u0026quot; skywalking_1 | category: \u0026quot;envoy.matching.action\u0026quot; skywalking_1 | } ...... skywalking_1 | } skywalking_1 | } skywalking_1 | envoy_metrics { skywalking_1 | name: \u0026quot;cluster.service_google.update_no_rebuild\u0026quot; skywalking_1 | type: COUNTER skywalking_1 | metric { skywalking_1 | counter { skywalking_1 | value: 1.0 skywalking_1 | } skywalking_1 | timestamp_ms: 1627046729718 skywalking_1 | } ...... skywalking_1 | } ... $ # To tear down: $ make down ","excerpt":"Sending Envoy Metrics to SkyWalking OAP Server Example This is an example of sending Envoy Stats to …","ref":"/docs/main/v9.6.0/en/setup/envoy/examples/metrics/readme/","title":"Sending Envoy Metrics to SkyWalking OAP Server Example"},{"body":"Sending Envoy Metrics to SkyWalking OAP Server Example This is an example of sending Envoy Stats to the SkyWalking OAP server through Metric Service v2 and v3.\nRunning the example The example requires docker and docker-compose to be installed in your local system. It fetches images from Docker Hub.\nNote that in this setup, we override the log4j2.xml config to set the org.apache.skywalking.oap.server.receiver.envoy logger level to DEBUG. This enables us to see the messages sent by Envoy to the SkyWalking OAP server.\nYou can also find the Envoy Metric Service V3 API example in docker-compose-envoy-v3-api.yaml\n$ make up $ docker-compose logs -f skywalking $ # Please wait for a moment until SkyWalking is ready and Envoy starts sending the stats. You will see similar messages like the following: skywalking_1 | 2021-07-23 13:25:30,683 - org.apache.skywalking.oap.server.receiver.envoy.MetricServiceGRPCHandler -19437 [grpcServerPool-1-thread-2] DEBUG [] - Received msg identifier { skywalking_1 | node { skywalking_1 | id: \u0026quot;ingress\u0026quot; skywalking_1 | cluster: \u0026quot;envoy-proxy\u0026quot; skywalking_1 | metadata { skywalking_1 | fields { skywalking_1 | key: \u0026quot;LABELS\u0026quot; skywalking_1 | value { skywalking_1 | struct_value { skywalking_1 | fields { skywalking_1 | key: \u0026quot;app\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;test-app\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;NAME\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;service-instance-name\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;envoy\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;isawesome\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;skywalking\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;iscool\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | locality { skywalking_1 | region: \u0026quot;ap-southeast-1\u0026quot; skywalking_1 | zone: \u0026quot;zone1\u0026quot; skywalking_1 | sub_zone: \u0026quot;subzone1\u0026quot; skywalking_1 | } skywalking_1 | user_agent_name: \u0026quot;envoy\u0026quot; skywalking_1 | user_agent_build_version { skywalking_1 | version { skywalking_1 | major_number: 1 skywalking_1 | minor_number: 19 skywalking_1 | } skywalking_1 | metadata { skywalking_1 | fields { skywalking_1 | key: \u0026quot;build.type\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;RELEASE\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;revision.sha\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;68fe53a889416fd8570506232052b06f5a531541\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;revision.status\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;Clean\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;ssl.version\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;BoringSSL\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | extensions { skywalking_1 | name: \u0026quot;composite-action\u0026quot; skywalking_1 | category: \u0026quot;envoy.matching.action\u0026quot; skywalking_1 | } ...... skywalking_1 | } skywalking_1 | } skywalking_1 | envoy_metrics { skywalking_1 | name: \u0026quot;cluster.service_google.update_no_rebuild\u0026quot; skywalking_1 | type: COUNTER skywalking_1 | metric { skywalking_1 | counter { skywalking_1 | value: 1.0 skywalking_1 | } skywalking_1 | timestamp_ms: 1627046729718 skywalking_1 | } ...... skywalking_1 | } ... $ # To tear down: $ make down ","excerpt":"Sending Envoy Metrics to SkyWalking OAP Server Example This is an example of sending Envoy Stats to …","ref":"/docs/main/v9.7.0/en/setup/envoy/examples/metrics/readme/","title":"Sending Envoy Metrics to SkyWalking OAP Server Example"},{"body":"Server Agents Server agents in various languages provide auto-instrumentation or/and manual-instrumentation(APIs-based) mechanisms to integrate with target services. They support collecting traces, logs, metrics, and events using SkyWalking\u0026rsquo;s native format and maximize the analysis capabilities of the SkyWalking OAP server.\nInstalling language agents in services   Java agent. Learn how to install the Java agent in your service without affecting your code.\n  LUA agent. Learn how to install the Lua agent in Nginx + LUA module or OpenResty.\n  Kong agent. Learn how to install the Lua agent in Kong.\n  Python Agent. Learn how to install the Python Agent in a Python service without affecting your code.\n  Node.js agent. Learn how to install the NodeJS Agent in a NodeJS service.\n  Rust agent. Learn how to integrate the Rust agent with a rust service.\n  PHP agent. Learn how to install the PHP agent in your service without affecting your code.\n  Go agent. Learn how to integrate the Go agent with a golang service.\n  The following agents and SDKs are compatible with SkyWalking\u0026rsquo;s data formats and network protocols but are maintained by third parties. See their project repositories for guides and releases.\n  SkyAPM .NET Core agent. See .NET Core agent project documentation for more details.\n  SkyAPM C++ SDK. See cpp2sky project documentation for more details.\n  ","excerpt":"Server Agents Server agents in various languages provide auto-instrumentation or/and …","ref":"/docs/main/latest/en/setup/service-agent/server-agents/","title":"Server Agents"},{"body":"Server Agents Server agents in various languages provide auto-instrumentation or/and manual-instrumentation(APIs-based) mechanisms to integrate with target services. They support collecting traces, logs, metrics, and events using SkyWalking\u0026rsquo;s native format and maximize the analysis capabilities of the SkyWalking OAP server.\nInstalling language agents in services   Java agent. Learn how to install the Java agent in your service without affecting your code.\n  LUA agent. Learn how to install the Lua agent in Nginx + LUA module or OpenResty.\n  Kong agent. Learn how to install the Lua agent in Kong.\n  Python Agent. Learn how to install the Python Agent in a Python service without affecting your code.\n  Node.js agent. Learn how to install the NodeJS Agent in a NodeJS service.\n  Rust agent. Learn how to integrate the Rust agent with a rust service.\n  PHP agent. Learn how to install the PHP agent in your service without affecting your code.\n  Go agent. Learn how to integrate the Go agent with a golang service.\n  The following agents and SDKs are compatible with SkyWalking\u0026rsquo;s data formats and network protocols but are maintained by third parties. See their project repositories for guides and releases.\n  SkyAPM .NET Core agent. See .NET Core agent project documentation for more details.\n  SkyAPM C++ SDK. See cpp2sky project documentation for more details.\n  ","excerpt":"Server Agents Server agents in various languages provide auto-instrumentation or/and …","ref":"/docs/main/next/en/setup/service-agent/server-agents/","title":"Server Agents"},{"body":"Server Agents Server agents in various languages provide auto-instrumentation or/and manual-instrumentation(APIs-based) mechanism to integrate with target services. They support collecting traces, logs, metrics and events by using SkyWalking\u0026rsquo;s native format, and maximum the analysis capabilities of SkyWalking OAP server.\nInstalling language agents in services   Java agent. Learn how to install the Java agent in your service without affecting your code.\n  LUA agent. Learn how to install the Lua agent in Nginx + LUA module or OpenResty.\n  Kong agent. Learn how to install the Lua agent in Kong.\n  Python Agent. Learn how to install the Python Agent in a Python service.\n  Node.js agent. Learn how to install the NodeJS Agent in a NodeJS service.\n  Rust agent. Learn how to integrate the rust agent in a rust service.\n  The following agents and SDKs are compatible with SkyWalking\u0026rsquo;s data formats and network protocols, but are maintained by third parties. See their project repositories for guides and releases.\n  SkyAPM .NET Core agent. See .NET Core agent project document for more details.\n  SkyAPM PHP agent. See PHP agent project document for more details.\n  SkyAPM Go SDK. See go2sky project document for more details.\n  SkyAPM C++ SDK. See cpp2sky project document for more details.\n  ","excerpt":"Server Agents Server agents in various languages provide auto-instrumentation or/and …","ref":"/docs/main/v9.0.0/en/setup/service-agent/server-agents/","title":"Server Agents"},{"body":"Server Agents Server agents in various languages provide auto-instrumentation or/and manual-instrumentation(APIs-based) mechanisms to integrate with target services. They support collecting traces, logs, metrics, and events using SkyWalking\u0026rsquo;s native format and maximize the analysis capabilities of the SkyWalking OAP server.\nInstalling language agents in services   Java agent. Learn how to install the Java agent in your service without affecting your code.\n  LUA agent. Learn how to install the Lua agent in Nginx + LUA module or OpenResty.\n  Kong agent. Learn how to install the Lua agent in Kong.\n  Python Agent. Learn how to install the Python Agent in a Python service without affecting your code.\n  Node.js agent. Learn how to install the NodeJS Agent in a NodeJS service.\n  Rust agent. Learn how to integrate the Rust agent with a rust service.\n  The following agents and SDKs are compatible with SkyWalking\u0026rsquo;s data formats and network protocols but are maintained by third parties. See their project repositories for guides and releases.\n  SkyAPM .NET Core agent. See .NET Core agent project documentation for more details.\n  SkyAPM PHP agent. See PHP agent project documentation for more details.\n  SkyAPM Go SDK. See go2sky project documentation for more details.\n  SkyAPM C++ SDK. See cpp2sky project documentation for more details.\n  ","excerpt":"Server Agents Server agents in various languages provide auto-instrumentation or/and …","ref":"/docs/main/v9.1.0/en/setup/service-agent/server-agents/","title":"Server Agents"},{"body":"Server Agents Server agents in various languages provide auto-instrumentation or/and manual-instrumentation(APIs-based) mechanisms to integrate with target services. They support collecting traces, logs, metrics, and events using SkyWalking\u0026rsquo;s native format and maximize the analysis capabilities of the SkyWalking OAP server.\nInstalling language agents in services   Java agent. Learn how to install the Java agent in your service without affecting your code.\n  LUA agent. Learn how to install the Lua agent in Nginx + LUA module or OpenResty.\n  Kong agent. Learn how to install the Lua agent in Kong.\n  Python Agent. Learn how to install the Python Agent in a Python service without affecting your code.\n  Node.js agent. Learn how to install the NodeJS Agent in a NodeJS service.\n  Rust agent. Learn how to integrate the Rust agent with a rust service.\n  The following agents and SDKs are compatible with SkyWalking\u0026rsquo;s data formats and network protocols but are maintained by third parties. See their project repositories for guides and releases.\n  SkyAPM .NET Core agent. See .NET Core agent project documentation for more details.\n  SkyAPM PHP agent. See PHP agent project documentation for more details.\n  SkyAPM Go SDK. See go2sky project documentation for more details.\n  SkyAPM C++ SDK. See cpp2sky project documentation for more details.\n  ","excerpt":"Server Agents Server agents in various languages provide auto-instrumentation or/and …","ref":"/docs/main/v9.2.0/en/setup/service-agent/server-agents/","title":"Server Agents"},{"body":"Server Agents Server agents in various languages provide auto-instrumentation or/and manual-instrumentation(APIs-based) mechanisms to integrate with target services. They support collecting traces, logs, metrics, and events using SkyWalking\u0026rsquo;s native format and maximize the analysis capabilities of the SkyWalking OAP server.\nInstalling language agents in services   Java agent. Learn how to install the Java agent in your service without affecting your code.\n  LUA agent. Learn how to install the Lua agent in Nginx + LUA module or OpenResty.\n  Kong agent. Learn how to install the Lua agent in Kong.\n  Python Agent. Learn how to install the Python Agent in a Python service without affecting your code.\n  Node.js agent. Learn how to install the NodeJS Agent in a NodeJS service.\n  Rust agent. Learn how to integrate the Rust agent with a rust service.\n  PHP agent. Learn how to install the PHP agent in your service without affecting your code.\n  The following agents and SDKs are compatible with SkyWalking\u0026rsquo;s data formats and network protocols but are maintained by third parties. See their project repositories for guides and releases.\n  SkyAPM .NET Core agent. See .NET Core agent project documentation for more details.\n  SkyAPM Go SDK. See go2sky project documentation for more details.\n  SkyAPM C++ SDK. See cpp2sky project documentation for more details.\n  ","excerpt":"Server Agents Server agents in various languages provide auto-instrumentation or/and …","ref":"/docs/main/v9.3.0/en/setup/service-agent/server-agents/","title":"Server Agents"},{"body":"Server Agents Server agents in various languages provide auto-instrumentation or/and manual-instrumentation(APIs-based) mechanisms to integrate with target services. They support collecting traces, logs, metrics, and events using SkyWalking\u0026rsquo;s native format and maximize the analysis capabilities of the SkyWalking OAP server.\nInstalling language agents in services   Java agent. Learn how to install the Java agent in your service without affecting your code.\n  LUA agent. Learn how to install the Lua agent in Nginx + LUA module or OpenResty.\n  Kong agent. Learn how to install the Lua agent in Kong.\n  Python Agent. Learn how to install the Python Agent in a Python service without affecting your code.\n  Node.js agent. Learn how to install the NodeJS Agent in a NodeJS service.\n  Rust agent. Learn how to integrate the Rust agent with a rust service.\n  PHP agent. Learn how to install the PHP agent in your service without affecting your code.\n  The following agents and SDKs are compatible with SkyWalking\u0026rsquo;s data formats and network protocols but are maintained by third parties. See their project repositories for guides and releases.\n  SkyAPM .NET Core agent. See .NET Core agent project documentation for more details.\n  SkyAPM Go SDK. See go2sky project documentation for more details.\n  SkyAPM C++ SDK. See cpp2sky project documentation for more details.\n  ","excerpt":"Server Agents Server agents in various languages provide auto-instrumentation or/and …","ref":"/docs/main/v9.4.0/en/setup/service-agent/server-agents/","title":"Server Agents"},{"body":"Server Agents Server agents in various languages provide auto-instrumentation or/and manual-instrumentation(APIs-based) mechanisms to integrate with target services. They support collecting traces, logs, metrics, and events using SkyWalking\u0026rsquo;s native format and maximize the analysis capabilities of the SkyWalking OAP server.\nInstalling language agents in services   Java agent. Learn how to install the Java agent in your service without affecting your code.\n  LUA agent. Learn how to install the Lua agent in Nginx + LUA module or OpenResty.\n  Kong agent. Learn how to install the Lua agent in Kong.\n  Python Agent. Learn how to install the Python Agent in a Python service without affecting your code.\n  Node.js agent. Learn how to install the NodeJS Agent in a NodeJS service.\n  Rust agent. Learn how to integrate the Rust agent with a rust service.\n  PHP agent. Learn how to install the PHP agent in your service without affecting your code.\n  The following agents and SDKs are compatible with SkyWalking\u0026rsquo;s data formats and network protocols but are maintained by third parties. See their project repositories for guides and releases.\n  SkyAPM .NET Core agent. See .NET Core agent project documentation for more details.\n  SkyAPM Go SDK. See go2sky project documentation for more details.\n  SkyAPM C++ SDK. See cpp2sky project documentation for more details.\n  ","excerpt":"Server Agents Server agents in various languages provide auto-instrumentation or/and …","ref":"/docs/main/v9.5.0/en/setup/service-agent/server-agents/","title":"Server Agents"},{"body":"Server Agents Server agents in various languages provide auto-instrumentation or/and manual-instrumentation(APIs-based) mechanisms to integrate with target services. They support collecting traces, logs, metrics, and events using SkyWalking\u0026rsquo;s native format and maximize the analysis capabilities of the SkyWalking OAP server.\nInstalling language agents in services   Java agent. Learn how to install the Java agent in your service without affecting your code.\n  LUA agent. Learn how to install the Lua agent in Nginx + LUA module or OpenResty.\n  Kong agent. Learn how to install the Lua agent in Kong.\n  Python Agent. Learn how to install the Python Agent in a Python service without affecting your code.\n  Node.js agent. Learn how to install the NodeJS Agent in a NodeJS service.\n  Rust agent. Learn how to integrate the Rust agent with a rust service.\n  PHP agent. Learn how to install the PHP agent in your service without affecting your code.\n  Go agent. Learn how to integrate the Go agent with a golang service.\n  The following agents and SDKs are compatible with SkyWalking\u0026rsquo;s data formats and network protocols but are maintained by third parties. See their project repositories for guides and releases.\n  SkyAPM .NET Core agent. See .NET Core agent project documentation for more details.\n  SkyAPM C++ SDK. See cpp2sky project documentation for more details.\n  ","excerpt":"Server Agents Server agents in various languages provide auto-instrumentation or/and …","ref":"/docs/main/v9.6.0/en/setup/service-agent/server-agents/","title":"Server Agents"},{"body":"Server Agents Server agents in various languages provide auto-instrumentation or/and manual-instrumentation(APIs-based) mechanisms to integrate with target services. They support collecting traces, logs, metrics, and events using SkyWalking\u0026rsquo;s native format and maximize the analysis capabilities of the SkyWalking OAP server.\nInstalling language agents in services   Java agent. Learn how to install the Java agent in your service without affecting your code.\n  LUA agent. Learn how to install the Lua agent in Nginx + LUA module or OpenResty.\n  Kong agent. Learn how to install the Lua agent in Kong.\n  Python Agent. Learn how to install the Python Agent in a Python service without affecting your code.\n  Node.js agent. Learn how to install the NodeJS Agent in a NodeJS service.\n  Rust agent. Learn how to integrate the Rust agent with a rust service.\n  PHP agent. Learn how to install the PHP agent in your service without affecting your code.\n  Go agent. Learn how to integrate the Go agent with a golang service.\n  The following agents and SDKs are compatible with SkyWalking\u0026rsquo;s data formats and network protocols but are maintained by third parties. See their project repositories for guides and releases.\n  SkyAPM .NET Core agent. See .NET Core agent project documentation for more details.\n  SkyAPM C++ SDK. See cpp2sky project documentation for more details.\n  ","excerpt":"Server Agents Server agents in various languages provide auto-instrumentation or/and …","ref":"/docs/main/v9.7.0/en/setup/service-agent/server-agents/","title":"Server Agents"},{"body":"Server/grpc-server Description This is a sharing plugin, which would start a gRPC server.\nDefaultConfig # The address of grpc server. Default value is :11800address::11800# The network of grpc. Default value is :tcpnetwork:tcp# The max size of receiving log. Default value is 2M. The unit is Byte.max_recv_msg_size:2097152# The max concurrent stream channels.max_concurrent_streams:32# The TLS cert file path.tls_cert_file:\u0026#34;\u0026#34;# The TLS key file path.tls_key_file:\u0026#34;\u0026#34;# To Accept Connection Limiter when reach the resourceaccept_limit:# The max CPU utilization limitcpu_utilization:75# The max connection countconnection_count:4000Configuration    Name Type Description     address string The address of grpc server.   network string The network of grpc.   max_recv_msg_size int The max size of the received log.   max_concurrent_streams uint32 The max concurrent stream channels.   tls_cert_file string The TLS cert file path.   tls_key_file string The TLS key file path.   accept_limit grpc.AcceptConnectionConfig To Accept Connection Limiter when reach the resource    ","excerpt":"Server/grpc-server Description This is a sharing plugin, which would start a gRPC server. …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/server_grpc-server/","title":"Server/grpc-server"},{"body":"Server/grpc-server Description This is a sharing plugin, which would start a gRPC server.\nDefaultConfig # The address of grpc server. Default value is :11800address::11800# The network of grpc. Default value is :tcpnetwork:tcp# The max size of receiving log. Default value is 2M. The unit is Byte.max_recv_msg_size:2097152# The max concurrent stream channels.max_concurrent_streams:32# The TLS cert file path.tls_cert_file:\u0026#34;\u0026#34;# The TLS key file path.tls_key_file:\u0026#34;\u0026#34;# To Accept Connection Limiter when reach the resourceaccept_limit:# The max CPU utilization limitcpu_utilization:75# The max connection countconnection_count:4000Configuration    Name Type Description     address string The address of grpc server.   network string The network of grpc.   max_recv_msg_size int The max size of the received log.   max_concurrent_streams uint32 The max concurrent stream channels.   tls_cert_file string The TLS cert file path.   tls_key_file string The TLS key file path.   accept_limit grpc.AcceptConnectionConfig To Accept Connection Limiter when reach the resource    ","excerpt":"Server/grpc-server Description This is a sharing plugin, which would start a gRPC server. …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/server_grpc-server/","title":"Server/grpc-server"},{"body":"Server/grpc-server Description This is a sharing plugin, which would start a gRPC server.\nDefaultConfig # The address of grpc server. Default value is :11800address::11800# The network of grpc. Default value is :tcpnetwork:tcp# The max size of receiving log. Default value is 2M. The unit is Byte.max_recv_msg_size:2097152# The max concurrent stream channels.max_concurrent_streams:32# The TLS cert file path.tls_cert_file:\u0026#34;\u0026#34;# The TLS key file path.tls_key_file:\u0026#34;\u0026#34;# To Accept Connection Limiter when reach the resourceaccept_limit:# The max CPU utilization limitcpu_utilization:75# The max connection countconnection_count:4000Configuration    Name Type Description     address string The address of grpc server.   network string The network of grpc.   max_recv_msg_size int The max size of the received log.   max_concurrent_streams uint32 The max concurrent stream channels.   tls_cert_file string The TLS cert file path.   tls_key_file string The TLS key file path.   accept_limit grpc.AcceptConnectionConfig To Accept Connection Limiter when reach the resource    ","excerpt":"Server/grpc-server Description This is a sharing plugin, which would start a gRPC server. …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/server_grpc-server/","title":"Server/grpc-server"},{"body":"Server/http-server Description This is a sharing plugin, which would start a http server.\nDefaultConfig # The http server address.address:\u0026#34;:12800\u0026#34;Configuration    Name Type Description     address string     ","excerpt":"Server/http-server Description This is a sharing plugin, which would start a http server. …","ref":"/docs/skywalking-satellite/latest/en/setup/plugins/server_http-server/","title":"Server/http-server"},{"body":"Server/http-server Description This is a sharing plugin, which would start a http server.\nDefaultConfig # The http server address.address:\u0026#34;:12800\u0026#34;Configuration    Name Type Description     address string     ","excerpt":"Server/http-server Description This is a sharing plugin, which would start a http server. …","ref":"/docs/skywalking-satellite/next/en/setup/plugins/server_http-server/","title":"Server/http-server"},{"body":"Server/http-server Description This is a sharing plugin, which would start a http server.\nDefaultConfig # The http server address.address:\u0026#34;:12800\u0026#34;Configuration    Name Type Description     address string     ","excerpt":"Server/http-server Description This is a sharing plugin, which would start a http server. …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/server_http-server/","title":"Server/http-server"},{"body":"Service Auto Grouping SkyWalking supports various default and customized dashboard templates. Each template provides an appropriate layout for services in a particular field. For example, the metrics for services with language agents installed may be different from that of services detected by the service mesh observability solution as well as SkyWalking\u0026rsquo;s self-observability metrics dashboard.\nTherefore, since version 8.3.0, the SkyWalking OAP has generated the groups based on this simple naming format:\n${service name} = [${group name}::]${logic name} If the service name includes double colons (::), the literal string before the colons is taken as the group name. In the latest GraphQL query, the group name has been provided as an optional parameter.\n getAllServices(duration: Duration!, group: String): [Service!]!\n RocketBot UI dashboards (Standard type) support the group name for default and custom configurations.\n","excerpt":"Service Auto Grouping SkyWalking supports various default and customized dashboard templates. Each …","ref":"/docs/main/latest/en/setup/backend/service-auto-grouping/","title":"Service Auto Grouping"},{"body":"Service Auto Grouping SkyWalking supports various default and customized dashboard templates. Each template provides an appropriate layout for services in a particular field. For example, the metrics for services with language agents installed may be different from that of services detected by the service mesh observability solution as well as SkyWalking\u0026rsquo;s self-observability metrics dashboard.\nTherefore, since version 8.3.0, the SkyWalking OAP has generated the groups based on this simple naming format:\n${service name} = [${group name}::]${logic name} If the service name includes double colons (::), the literal string before the colons is taken as the group name. In the latest GraphQL query, the group name has been provided as an optional parameter.\n getAllServices(duration: Duration!, group: String): [Service!]!\n RocketBot UI dashboards (Standard type) support the group name for default and custom configurations.\n","excerpt":"Service Auto Grouping SkyWalking supports various default and customized dashboard templates. Each …","ref":"/docs/main/next/en/setup/backend/service-auto-grouping/","title":"Service Auto Grouping"},{"body":"Service Auto Grouping SkyWalking supports various default and customized dashboard templates. Each template provides an appropriate layout for services in a particular field. For example, the metrics for services with language agents installed may be different from that of services detected by the service mesh observability solution as well as SkyWalking\u0026rsquo;s self-observability metrics dashboard.\nTherefore, since version 8.3.0, the SkyWalking OAP has generated the groups based on this simple naming format:\n${service name} = [${group name}::]${logic name} If the service name includes double colons (::), the literal string before the colons is taken as the group name. In the latest GraphQL query, the group name has been provided as an option parameter.\n getAllServices(duration: Duration!, group: String): [Service!]!\n RocketBot UI dashboards (Standard type) support the group name for default and custom configurations.\n","excerpt":"Service Auto Grouping SkyWalking supports various default and customized dashboard templates. Each …","ref":"/docs/main/v9.0.0/en/setup/backend/service-auto-grouping/","title":"Service Auto Grouping"},{"body":"Service Auto Grouping SkyWalking supports various default and customized dashboard templates. Each template provides an appropriate layout for services in a particular field. For example, the metrics for services with language agents installed may be different from that of services detected by the service mesh observability solution as well as SkyWalking\u0026rsquo;s self-observability metrics dashboard.\nTherefore, since version 8.3.0, the SkyWalking OAP has generated the groups based on this simple naming format:\n${service name} = [${group name}::]${logic name} If the service name includes double colons (::), the literal string before the colons is taken as the group name. In the latest GraphQL query, the group name has been provided as an optional parameter.\n getAllServices(duration: Duration!, group: String): [Service!]!\n RocketBot UI dashboards (Standard type) support the group name for default and custom configurations.\n","excerpt":"Service Auto Grouping SkyWalking supports various default and customized dashboard templates. Each …","ref":"/docs/main/v9.1.0/en/setup/backend/service-auto-grouping/","title":"Service Auto Grouping"},{"body":"Service Auto Grouping SkyWalking supports various default and customized dashboard templates. Each template provides an appropriate layout for services in a particular field. For example, the metrics for services with language agents installed may be different from that of services detected by the service mesh observability solution as well as SkyWalking\u0026rsquo;s self-observability metrics dashboard.\nTherefore, since version 8.3.0, the SkyWalking OAP has generated the groups based on this simple naming format:\n${service name} = [${group name}::]${logic name} If the service name includes double colons (::), the literal string before the colons is taken as the group name. In the latest GraphQL query, the group name has been provided as an optional parameter.\n getAllServices(duration: Duration!, group: String): [Service!]!\n RocketBot UI dashboards (Standard type) support the group name for default and custom configurations.\n","excerpt":"Service Auto Grouping SkyWalking supports various default and customized dashboard templates. Each …","ref":"/docs/main/v9.2.0/en/setup/backend/service-auto-grouping/","title":"Service Auto Grouping"},{"body":"Service Auto Grouping SkyWalking supports various default and customized dashboard templates. Each template provides an appropriate layout for services in a particular field. For example, the metrics for services with language agents installed may be different from that of services detected by the service mesh observability solution as well as SkyWalking\u0026rsquo;s self-observability metrics dashboard.\nTherefore, since version 8.3.0, the SkyWalking OAP has generated the groups based on this simple naming format:\n${service name} = [${group name}::]${logic name} If the service name includes double colons (::), the literal string before the colons is taken as the group name. In the latest GraphQL query, the group name has been provided as an optional parameter.\n getAllServices(duration: Duration!, group: String): [Service!]!\n RocketBot UI dashboards (Standard type) support the group name for default and custom configurations.\n","excerpt":"Service Auto Grouping SkyWalking supports various default and customized dashboard templates. Each …","ref":"/docs/main/v9.3.0/en/setup/backend/service-auto-grouping/","title":"Service Auto Grouping"},{"body":"Service Auto Grouping SkyWalking supports various default and customized dashboard templates. Each template provides an appropriate layout for services in a particular field. For example, the metrics for services with language agents installed may be different from that of services detected by the service mesh observability solution as well as SkyWalking\u0026rsquo;s self-observability metrics dashboard.\nTherefore, since version 8.3.0, the SkyWalking OAP has generated the groups based on this simple naming format:\n${service name} = [${group name}::]${logic name} If the service name includes double colons (::), the literal string before the colons is taken as the group name. In the latest GraphQL query, the group name has been provided as an optional parameter.\n getAllServices(duration: Duration!, group: String): [Service!]!\n RocketBot UI dashboards (Standard type) support the group name for default and custom configurations.\n","excerpt":"Service Auto Grouping SkyWalking supports various default and customized dashboard templates. Each …","ref":"/docs/main/v9.4.0/en/setup/backend/service-auto-grouping/","title":"Service Auto Grouping"},{"body":"Service Auto Grouping SkyWalking supports various default and customized dashboard templates. Each template provides an appropriate layout for services in a particular field. For example, the metrics for services with language agents installed may be different from that of services detected by the service mesh observability solution as well as SkyWalking\u0026rsquo;s self-observability metrics dashboard.\nTherefore, since version 8.3.0, the SkyWalking OAP has generated the groups based on this simple naming format:\n${service name} = [${group name}::]${logic name} If the service name includes double colons (::), the literal string before the colons is taken as the group name. In the latest GraphQL query, the group name has been provided as an optional parameter.\n getAllServices(duration: Duration!, group: String): [Service!]!\n RocketBot UI dashboards (Standard type) support the group name for default and custom configurations.\n","excerpt":"Service Auto Grouping SkyWalking supports various default and customized dashboard templates. Each …","ref":"/docs/main/v9.5.0/en/setup/backend/service-auto-grouping/","title":"Service Auto Grouping"},{"body":"Service Auto Grouping SkyWalking supports various default and customized dashboard templates. Each template provides an appropriate layout for services in a particular field. For example, the metrics for services with language agents installed may be different from that of services detected by the service mesh observability solution as well as SkyWalking\u0026rsquo;s self-observability metrics dashboard.\nTherefore, since version 8.3.0, the SkyWalking OAP has generated the groups based on this simple naming format:\n${service name} = [${group name}::]${logic name} If the service name includes double colons (::), the literal string before the colons is taken as the group name. In the latest GraphQL query, the group name has been provided as an optional parameter.\n getAllServices(duration: Duration!, group: String): [Service!]!\n RocketBot UI dashboards (Standard type) support the group name for default and custom configurations.\n","excerpt":"Service Auto Grouping SkyWalking supports various default and customized dashboard templates. Each …","ref":"/docs/main/v9.6.0/en/setup/backend/service-auto-grouping/","title":"Service Auto Grouping"},{"body":"Service Auto Grouping SkyWalking supports various default and customized dashboard templates. Each template provides an appropriate layout for services in a particular field. For example, the metrics for services with language agents installed may be different from that of services detected by the service mesh observability solution as well as SkyWalking\u0026rsquo;s self-observability metrics dashboard.\nTherefore, since version 8.3.0, the SkyWalking OAP has generated the groups based on this simple naming format:\n${service name} = [${group name}::]${logic name} If the service name includes double colons (::), the literal string before the colons is taken as the group name. In the latest GraphQL query, the group name has been provided as an optional parameter.\n getAllServices(duration: Duration!, group: String): [Service!]!\n RocketBot UI dashboards (Standard type) support the group name for default and custom configurations.\n","excerpt":"Service Auto Grouping SkyWalking supports various default and customized dashboard templates. Each …","ref":"/docs/main/v9.7.0/en/setup/backend/service-auto-grouping/","title":"Service Auto Grouping"},{"body":"Service Auto Instrument Agent The service auto instrument agent is a subset of language-based native agents. This kind of agents is based on some language-specific features, especially those of a VM-based language.\nWhat does Auto Instrument mean? Many users learned about these agents when they first heard that \u0026ldquo;Not a single line of code has to be changed\u0026rdquo;. SkyWalking used to mention this in its readme page as well. However, this does not reflect the full picture. For end users, it is true that they no longer have to modify their codes in most cases. But it is important to understand that the codes are in fact still modified by the agent, which is usually known as \u0026ldquo;runtime code manipulation\u0026rdquo;. The underlying logic is that the auto instrument agent uses the VM interface for code modification to dynamically add in the instrument code, such as modifying the class in Java through javaagent premain.\nIn fact, although the SkyWalking team has mentioned that most auto instrument agents are VM-based, you may build such tools during compiling time rather than runtime.\nWhat are the limitations? Auto instrument is very helpful, as you may perform auto instrument during compiling time, without having to depend on VM features. But there are also certain limitations that come with it:\n  Higher possibility of in-process propagation in many cases. Many high-level languages, such as Java and .NET, are used for building business systems. Most business logic codes run in the same thread for each request, which causes propagation to be based on thread ID, in order for the stack module to make sure that the context is safe.\n  Only works in certain frameworks or libraries. Since the agents are responsible for modifying the codes during runtime, the codes are already known to the agent plugin developers. There is usually a list of frameworks or libraries supported by this kind of probes. For example, see the SkyWalking Java agent supported list.\n  Cross-thread operations are not always supported. Like what is mentioned above regarding in-process propagation, most codes (especially business codes) run in a single thread per request. But in some other cases, they operate across different threads, such as assigning tasks to other threads, task pools or batch processes. Some languages may even provide coroutine or similar components like Goroutine, which allows developers to run async process with low payload. In such cases, auto instrument will face problems.\n  So, there\u0026rsquo;s nothing mysterious about auto instrument. In short, agent developers write an activation script to make instrument codes work for you. That\u0026rsquo;s it!\nWhat is next? If you want to learn about manual instrument libs in SkyWalking, see the Manual instrument SDK section.\n","excerpt":"Service Auto Instrument Agent The service auto instrument agent is a subset of language-based native …","ref":"/docs/main/latest/en/concepts-and-designs/service-agent/","title":"Service Auto Instrument Agent"},{"body":"Service Auto Instrument Agent The service auto instrument agent is a subset of language-based native agents. This kind of agents is based on some language-specific features, especially those of a VM-based language.\nWhat does Auto Instrument mean? Many users learned about these agents when they first heard that \u0026ldquo;Not a single line of code has to be changed\u0026rdquo;. SkyWalking used to mention this in its readme page as well. However, this does not reflect the full picture. For end users, it is true that they no longer have to modify their codes in most cases. But it is important to understand that the codes are in fact still modified by the agent, which is usually known as \u0026ldquo;runtime code manipulation\u0026rdquo;. The underlying logic is that the auto instrument agent uses the VM interface for code modification to dynamically add in the instrument code, such as modifying the class in Java through javaagent premain.\nIn fact, although the SkyWalking team has mentioned that most auto instrument agents are VM-based, you may build such tools during compiling time rather than runtime.\nWhat are the limitations? Auto instrument is very helpful, as you may perform auto instrument during compiling time, without having to depend on VM features. But there are also certain limitations that come with it:\n  Higher possibility of in-process propagation in many cases. Many high-level languages, such as Java and .NET, are used for building business systems. Most business logic codes run in the same thread for each request, which causes propagation to be based on thread ID, in order for the stack module to make sure that the context is safe.\n  Only works in certain frameworks or libraries. Since the agents are responsible for modifying the codes during runtime, the codes are already known to the agent plugin developers. There is usually a list of frameworks or libraries supported by this kind of probes. For example, see the SkyWalking Java agent supported list.\n  Cross-thread operations are not always supported. Like what is mentioned above regarding in-process propagation, most codes (especially business codes) run in a single thread per request. But in some other cases, they operate across different threads, such as assigning tasks to other threads, task pools or batch processes. Some languages may even provide coroutine or similar components like Goroutine, which allows developers to run async process with low payload. In such cases, auto instrument will face problems.\n  So, there\u0026rsquo;s nothing mysterious about auto instrument. In short, agent developers write an activation script to make instrument codes work for you. That\u0026rsquo;s it!\nWhat is next? If you want to learn about manual instrument libs in SkyWalking, see the Manual instrument SDK section.\n","excerpt":"Service Auto Instrument Agent The service auto instrument agent is a subset of language-based native …","ref":"/docs/main/next/en/concepts-and-designs/service-agent/","title":"Service Auto Instrument Agent"},{"body":"Service Auto Instrument Agent The service auto instrument agent is a subset of language-based native agents. This kind of agents is based on some language-specific features, especially those of a VM-based language.\nWhat does Auto Instrument mean? Many users learned about these agents when they first heard that \u0026ldquo;Not a single line of code has to be changed\u0026rdquo;. SkyWalking used to mention this in its readme page as well. However, this does not reflect the full picture. For end users, it is true that they no longer have to modify their codes in most cases. But it is important to understand that the codes are in fact still modified by the agent, which is usually known as \u0026ldquo;runtime code manipulation\u0026rdquo;. The underlying logic is that the auto instrument agent uses the VM interface for code modification to dynamically add in the instrument code, such as modifying the class in Java through javaagent premain.\nIn fact, although the SkyWalking team has mentioned that most auto instrument agents are VM-based, you may build such tools during compiling time rather than runtime.\nWhat are the limitations? Auto instrument is very helpful, as you may perform auto instrument during compiling time, without having to depend on VM features. But there are also certain limitations that come with it:\n  Higher possibility of in-process propagation in many cases. Many high-level languages, such as Java and .NET, are used for building business systems. Most business logic codes run in the same thread for each request, which causes propagation to be based on thread ID, in order for the stack module to make sure that the context is safe.\n  Only works in certain frameworks or libraries. Since the agents are responsible for modifying the codes during runtime, the codes are already known to the agent plugin developers. There is usually a list of frameworks or libraries supported by this kind of probes. For example, see the SkyWalking Java agent supported list.\n  Cross-thread operations are not always supported. Like what is mentioned above regarding in-process propagation, most codes (especially business codes) run in a single thread per request. But in some other cases, they operate across different threads, such as assigning tasks to other threads, task pools or batch processes. Some languages may even provide coroutine or similar components like Goroutine, which allows developers to run async process with low payload. In such cases, auto instrument will face problems.\n  So, there\u0026rsquo;s nothing mysterious about auto instrument. In short, agent developers write an activation script to make instrument codes work for you. That\u0026rsquo;s it!\nWhat is next? If you want to learn about manual instrument libs in SkyWalking, see the Manual instrument SDK section.\n","excerpt":"Service Auto Instrument Agent The service auto instrument agent is a subset of language-based native …","ref":"/docs/main/v9.0.0/en/concepts-and-designs/service-agent/","title":"Service Auto Instrument Agent"},{"body":"Service Auto Instrument Agent The service auto instrument agent is a subset of language-based native agents. This kind of agents is based on some language-specific features, especially those of a VM-based language.\nWhat does Auto Instrument mean? Many users learned about these agents when they first heard that \u0026ldquo;Not a single line of code has to be changed\u0026rdquo;. SkyWalking used to mention this in its readme page as well. However, this does not reflect the full picture. For end users, it is true that they no longer have to modify their codes in most cases. But it is important to understand that the codes are in fact still modified by the agent, which is usually known as \u0026ldquo;runtime code manipulation\u0026rdquo;. The underlying logic is that the auto instrument agent uses the VM interface for code modification to dynamically add in the instrument code, such as modifying the class in Java through javaagent premain.\nIn fact, although the SkyWalking team has mentioned that most auto instrument agents are VM-based, you may build such tools during compiling time rather than runtime.\nWhat are the limitations? Auto instrument is very helpful, as you may perform auto instrument during compiling time, without having to depend on VM features. But there are also certain limitations that come with it:\n  Higher possibility of in-process propagation in many cases. Many high-level languages, such as Java and .NET, are used for building business systems. Most business logic codes run in the same thread for each request, which causes propagation to be based on thread ID, in order for the stack module to make sure that the context is safe.\n  Only works in certain frameworks or libraries. Since the agents are responsible for modifying the codes during runtime, the codes are already known to the agent plugin developers. There is usually a list of frameworks or libraries supported by this kind of probes. For example, see the SkyWalking Java agent supported list.\n  Cross-thread operations are not always supported. Like what is mentioned above regarding in-process propagation, most codes (especially business codes) run in a single thread per request. But in some other cases, they operate across different threads, such as assigning tasks to other threads, task pools or batch processes. Some languages may even provide coroutine or similar components like Goroutine, which allows developers to run async process with low payload. In such cases, auto instrument will face problems.\n  So, there\u0026rsquo;s nothing mysterious about auto instrument. In short, agent developers write an activation script to make instrument codes work for you. That\u0026rsquo;s it!\nWhat is next? If you want to learn about manual instrument libs in SkyWalking, see the Manual instrument SDK section.\n","excerpt":"Service Auto Instrument Agent The service auto instrument agent is a subset of language-based native …","ref":"/docs/main/v9.1.0/en/concepts-and-designs/service-agent/","title":"Service Auto Instrument Agent"},{"body":"Service Auto Instrument Agent The service auto instrument agent is a subset of language-based native agents. This kind of agents is based on some language-specific features, especially those of a VM-based language.\nWhat does Auto Instrument mean? Many users learned about these agents when they first heard that \u0026ldquo;Not a single line of code has to be changed\u0026rdquo;. SkyWalking used to mention this in its readme page as well. However, this does not reflect the full picture. For end users, it is true that they no longer have to modify their codes in most cases. But it is important to understand that the codes are in fact still modified by the agent, which is usually known as \u0026ldquo;runtime code manipulation\u0026rdquo;. The underlying logic is that the auto instrument agent uses the VM interface for code modification to dynamically add in the instrument code, such as modifying the class in Java through javaagent premain.\nIn fact, although the SkyWalking team has mentioned that most auto instrument agents are VM-based, you may build such tools during compiling time rather than runtime.\nWhat are the limitations? Auto instrument is very helpful, as you may perform auto instrument during compiling time, without having to depend on VM features. But there are also certain limitations that come with it:\n  Higher possibility of in-process propagation in many cases. Many high-level languages, such as Java and .NET, are used for building business systems. Most business logic codes run in the same thread for each request, which causes propagation to be based on thread ID, in order for the stack module to make sure that the context is safe.\n  Only works in certain frameworks or libraries. Since the agents are responsible for modifying the codes during runtime, the codes are already known to the agent plugin developers. There is usually a list of frameworks or libraries supported by this kind of probes. For example, see the SkyWalking Java agent supported list.\n  Cross-thread operations are not always supported. Like what is mentioned above regarding in-process propagation, most codes (especially business codes) run in a single thread per request. But in some other cases, they operate across different threads, such as assigning tasks to other threads, task pools or batch processes. Some languages may even provide coroutine or similar components like Goroutine, which allows developers to run async process with low payload. In such cases, auto instrument will face problems.\n  So, there\u0026rsquo;s nothing mysterious about auto instrument. In short, agent developers write an activation script to make instrument codes work for you. That\u0026rsquo;s it!\nWhat is next? If you want to learn about manual instrument libs in SkyWalking, see the Manual instrument SDK section.\n","excerpt":"Service Auto Instrument Agent The service auto instrument agent is a subset of language-based native …","ref":"/docs/main/v9.2.0/en/concepts-and-designs/service-agent/","title":"Service Auto Instrument Agent"},{"body":"Service Auto Instrument Agent The service auto instrument agent is a subset of language-based native agents. This kind of agents is based on some language-specific features, especially those of a VM-based language.\nWhat does Auto Instrument mean? Many users learned about these agents when they first heard that \u0026ldquo;Not a single line of code has to be changed\u0026rdquo;. SkyWalking used to mention this in its readme page as well. However, this does not reflect the full picture. For end users, it is true that they no longer have to modify their codes in most cases. But it is important to understand that the codes are in fact still modified by the agent, which is usually known as \u0026ldquo;runtime code manipulation\u0026rdquo;. The underlying logic is that the auto instrument agent uses the VM interface for code modification to dynamically add in the instrument code, such as modifying the class in Java through javaagent premain.\nIn fact, although the SkyWalking team has mentioned that most auto instrument agents are VM-based, you may build such tools during compiling time rather than runtime.\nWhat are the limitations? Auto instrument is very helpful, as you may perform auto instrument during compiling time, without having to depend on VM features. But there are also certain limitations that come with it:\n  Higher possibility of in-process propagation in many cases. Many high-level languages, such as Java and .NET, are used for building business systems. Most business logic codes run in the same thread for each request, which causes propagation to be based on thread ID, in order for the stack module to make sure that the context is safe.\n  Only works in certain frameworks or libraries. Since the agents are responsible for modifying the codes during runtime, the codes are already known to the agent plugin developers. There is usually a list of frameworks or libraries supported by this kind of probes. For example, see the SkyWalking Java agent supported list.\n  Cross-thread operations are not always supported. Like what is mentioned above regarding in-process propagation, most codes (especially business codes) run in a single thread per request. But in some other cases, they operate across different threads, such as assigning tasks to other threads, task pools or batch processes. Some languages may even provide coroutine or similar components like Goroutine, which allows developers to run async process with low payload. In such cases, auto instrument will face problems.\n  So, there\u0026rsquo;s nothing mysterious about auto instrument. In short, agent developers write an activation script to make instrument codes work for you. That\u0026rsquo;s it!\nWhat is next? If you want to learn about manual instrument libs in SkyWalking, see the Manual instrument SDK section.\n","excerpt":"Service Auto Instrument Agent The service auto instrument agent is a subset of language-based native …","ref":"/docs/main/v9.3.0/en/concepts-and-designs/service-agent/","title":"Service Auto Instrument Agent"},{"body":"Service Auto Instrument Agent The service auto instrument agent is a subset of language-based native agents. This kind of agents is based on some language-specific features, especially those of a VM-based language.\nWhat does Auto Instrument mean? Many users learned about these agents when they first heard that \u0026ldquo;Not a single line of code has to be changed\u0026rdquo;. SkyWalking used to mention this in its readme page as well. However, this does not reflect the full picture. For end users, it is true that they no longer have to modify their codes in most cases. But it is important to understand that the codes are in fact still modified by the agent, which is usually known as \u0026ldquo;runtime code manipulation\u0026rdquo;. The underlying logic is that the auto instrument agent uses the VM interface for code modification to dynamically add in the instrument code, such as modifying the class in Java through javaagent premain.\nIn fact, although the SkyWalking team has mentioned that most auto instrument agents are VM-based, you may build such tools during compiling time rather than runtime.\nWhat are the limitations? Auto instrument is very helpful, as you may perform auto instrument during compiling time, without having to depend on VM features. But there are also certain limitations that come with it:\n  Higher possibility of in-process propagation in many cases. Many high-level languages, such as Java and .NET, are used for building business systems. Most business logic codes run in the same thread for each request, which causes propagation to be based on thread ID, in order for the stack module to make sure that the context is safe.\n  Only works in certain frameworks or libraries. Since the agents are responsible for modifying the codes during runtime, the codes are already known to the agent plugin developers. There is usually a list of frameworks or libraries supported by this kind of probes. For example, see the SkyWalking Java agent supported list.\n  Cross-thread operations are not always supported. Like what is mentioned above regarding in-process propagation, most codes (especially business codes) run in a single thread per request. But in some other cases, they operate across different threads, such as assigning tasks to other threads, task pools or batch processes. Some languages may even provide coroutine or similar components like Goroutine, which allows developers to run async process with low payload. In such cases, auto instrument will face problems.\n  So, there\u0026rsquo;s nothing mysterious about auto instrument. In short, agent developers write an activation script to make instrument codes work for you. That\u0026rsquo;s it!\nWhat is next? If you want to learn about manual instrument libs in SkyWalking, see the Manual instrument SDK section.\n","excerpt":"Service Auto Instrument Agent The service auto instrument agent is a subset of language-based native …","ref":"/docs/main/v9.4.0/en/concepts-and-designs/service-agent/","title":"Service Auto Instrument Agent"},{"body":"Service Auto Instrument Agent The service auto instrument agent is a subset of language-based native agents. This kind of agents is based on some language-specific features, especially those of a VM-based language.\nWhat does Auto Instrument mean? Many users learned about these agents when they first heard that \u0026ldquo;Not a single line of code has to be changed\u0026rdquo;. SkyWalking used to mention this in its readme page as well. However, this does not reflect the full picture. For end users, it is true that they no longer have to modify their codes in most cases. But it is important to understand that the codes are in fact still modified by the agent, which is usually known as \u0026ldquo;runtime code manipulation\u0026rdquo;. The underlying logic is that the auto instrument agent uses the VM interface for code modification to dynamically add in the instrument code, such as modifying the class in Java through javaagent premain.\nIn fact, although the SkyWalking team has mentioned that most auto instrument agents are VM-based, you may build such tools during compiling time rather than runtime.\nWhat are the limitations? Auto instrument is very helpful, as you may perform auto instrument during compiling time, without having to depend on VM features. But there are also certain limitations that come with it:\n  Higher possibility of in-process propagation in many cases. Many high-level languages, such as Java and .NET, are used for building business systems. Most business logic codes run in the same thread for each request, which causes propagation to be based on thread ID, in order for the stack module to make sure that the context is safe.\n  Only works in certain frameworks or libraries. Since the agents are responsible for modifying the codes during runtime, the codes are already known to the agent plugin developers. There is usually a list of frameworks or libraries supported by this kind of probes. For example, see the SkyWalking Java agent supported list.\n  Cross-thread operations are not always supported. Like what is mentioned above regarding in-process propagation, most codes (especially business codes) run in a single thread per request. But in some other cases, they operate across different threads, such as assigning tasks to other threads, task pools or batch processes. Some languages may even provide coroutine or similar components like Goroutine, which allows developers to run async process with low payload. In such cases, auto instrument will face problems.\n  So, there\u0026rsquo;s nothing mysterious about auto instrument. In short, agent developers write an activation script to make instrument codes work for you. That\u0026rsquo;s it!\nWhat is next? If you want to learn about manual instrument libs in SkyWalking, see the Manual instrument SDK section.\n","excerpt":"Service Auto Instrument Agent The service auto instrument agent is a subset of language-based native …","ref":"/docs/main/v9.5.0/en/concepts-and-designs/service-agent/","title":"Service Auto Instrument Agent"},{"body":"Service Auto Instrument Agent The service auto instrument agent is a subset of language-based native agents. This kind of agents is based on some language-specific features, especially those of a VM-based language.\nWhat does Auto Instrument mean? Many users learned about these agents when they first heard that \u0026ldquo;Not a single line of code has to be changed\u0026rdquo;. SkyWalking used to mention this in its readme page as well. However, this does not reflect the full picture. For end users, it is true that they no longer have to modify their codes in most cases. But it is important to understand that the codes are in fact still modified by the agent, which is usually known as \u0026ldquo;runtime code manipulation\u0026rdquo;. The underlying logic is that the auto instrument agent uses the VM interface for code modification to dynamically add in the instrument code, such as modifying the class in Java through javaagent premain.\nIn fact, although the SkyWalking team has mentioned that most auto instrument agents are VM-based, you may build such tools during compiling time rather than runtime.\nWhat are the limitations? Auto instrument is very helpful, as you may perform auto instrument during compiling time, without having to depend on VM features. But there are also certain limitations that come with it:\n  Higher possibility of in-process propagation in many cases. Many high-level languages, such as Java and .NET, are used for building business systems. Most business logic codes run in the same thread for each request, which causes propagation to be based on thread ID, in order for the stack module to make sure that the context is safe.\n  Only works in certain frameworks or libraries. Since the agents are responsible for modifying the codes during runtime, the codes are already known to the agent plugin developers. There is usually a list of frameworks or libraries supported by this kind of probes. For example, see the SkyWalking Java agent supported list.\n  Cross-thread operations are not always supported. Like what is mentioned above regarding in-process propagation, most codes (especially business codes) run in a single thread per request. But in some other cases, they operate across different threads, such as assigning tasks to other threads, task pools or batch processes. Some languages may even provide coroutine or similar components like Goroutine, which allows developers to run async process with low payload. In such cases, auto instrument will face problems.\n  So, there\u0026rsquo;s nothing mysterious about auto instrument. In short, agent developers write an activation script to make instrument codes work for you. That\u0026rsquo;s it!\nWhat is next? If you want to learn about manual instrument libs in SkyWalking, see the Manual instrument SDK section.\n","excerpt":"Service Auto Instrument Agent The service auto instrument agent is a subset of language-based native …","ref":"/docs/main/v9.6.0/en/concepts-and-designs/service-agent/","title":"Service Auto Instrument Agent"},{"body":"Service Auto Instrument Agent The service auto instrument agent is a subset of language-based native agents. This kind of agents is based on some language-specific features, especially those of a VM-based language.\nWhat does Auto Instrument mean? Many users learned about these agents when they first heard that \u0026ldquo;Not a single line of code has to be changed\u0026rdquo;. SkyWalking used to mention this in its readme page as well. However, this does not reflect the full picture. For end users, it is true that they no longer have to modify their codes in most cases. But it is important to understand that the codes are in fact still modified by the agent, which is usually known as \u0026ldquo;runtime code manipulation\u0026rdquo;. The underlying logic is that the auto instrument agent uses the VM interface for code modification to dynamically add in the instrument code, such as modifying the class in Java through javaagent premain.\nIn fact, although the SkyWalking team has mentioned that most auto instrument agents are VM-based, you may build such tools during compiling time rather than runtime.\nWhat are the limitations? Auto instrument is very helpful, as you may perform auto instrument during compiling time, without having to depend on VM features. But there are also certain limitations that come with it:\n  Higher possibility of in-process propagation in many cases. Many high-level languages, such as Java and .NET, are used for building business systems. Most business logic codes run in the same thread for each request, which causes propagation to be based on thread ID, in order for the stack module to make sure that the context is safe.\n  Only works in certain frameworks or libraries. Since the agents are responsible for modifying the codes during runtime, the codes are already known to the agent plugin developers. There is usually a list of frameworks or libraries supported by this kind of probes. For example, see the SkyWalking Java agent supported list.\n  Cross-thread operations are not always supported. Like what is mentioned above regarding in-process propagation, most codes (especially business codes) run in a single thread per request. But in some other cases, they operate across different threads, such as assigning tasks to other threads, task pools or batch processes. Some languages may even provide coroutine or similar components like Goroutine, which allows developers to run async process with low payload. In such cases, auto instrument will face problems.\n  So, there\u0026rsquo;s nothing mysterious about auto instrument. In short, agent developers write an activation script to make instrument codes work for you. That\u0026rsquo;s it!\nWhat is next? If you want to learn about manual instrument libs in SkyWalking, see the Manual instrument SDK section.\n","excerpt":"Service Auto Instrument Agent The service auto instrument agent is a subset of language-based native …","ref":"/docs/main/v9.7.0/en/concepts-and-designs/service-agent/","title":"Service Auto Instrument Agent"},{"body":"Service Discovery Service discovery is used to discover all Kubernetes services process in the current node and report them to backend services. After the process upload is completed, the other modules could perform more operations with the process, such as process profiling and collecting process metrics.\nConfiguration    Name Default Environment Key Description     process_discovery.heartbeat_period 20s ROVER_PROCESS_DISCOVERY_HEARTBEAT_PERIOD The period of report or keep-alive process to the backend.   process_discovery.properties_report_period 10 ROVER_PROCESS_DISCOVERY_PROPERTIES_REPORT_PERIOD The agent sends the process properties to the backend every: heartbeart period * properties report period.   process_discovery.kubernetes.active false ROVER_PROCESS_DISCOVERY_KUBERNETES_ACTIVE Is active the kubernetes process discovery.   process_discovery.kubernetes.node_name  ROVER_PROCESS_DISCOVERY_KUBERNETES_NODE_NAME Current deployed node name, it could be inject by spec.nodeName.   process_discovery.kubernetes.namespaces  ROVER_PROCESS_DISCOVERY_KUBERNETES_NAMESPACES Including pod by namespaces, if empty means including all namespaces. Multiple namespaces split by \u0026ldquo;,\u0026rdquo;.   process_discovery.kubernetes.analyzers   Declare how to build the process. The istio and k8s resources are active by default.   process_discovery.kubernetes.analyzers.active   Set is active analyzer.   process_discovery.kubernetes.analyzers.filters   Define which process is match to current process builder.   process_discovery.kubernetes.analyzers.service_name   The Service Name of the process entity.   process_discovery.kubernetes.analyzers.instance_name   The Service Instance Name of the process entity, by default, the instance name is the host IP v4 address from \u0026ldquo;en0\u0026rdquo; net interface.   process_discovery.kubernetes.analyzers.process_name   The Process Name of the process entity, by default, the process name is the executable name of the process.   process_discovery.kubernetes.analyzers.labels   The Process Labels, used to aggregate similar process from service entity. Multiple labels split by \u0026ldquo;,\u0026rdquo;.    Kubernetes Process Detector The Kubernetes process detector could detect any process under the Kubernetes container. If active the Kubernetes process detector, the rover must be deployed in the Kubernetes cluster. After finding the process, it would collect the metadata of the process when the report to the backend.\nProcess Analyze The process analysis declares which process could be profiled and how to build the process entity. The Istio and Kubernetes resources are active on default.\nFilter The filter provides an expression(go template) mechanism to match the process that can build the entity. Multiple expressions work together to determine whether the process can create the entity. Each expression must return the boolean value. Otherwise, the decision throws an error.\nThe context is similar to the entity builder. Using context could help the rover understand which process could build the entity.\nProcess Context Is the same with the process context in scanner, but doesn\u0026rsquo;t need to add the {{ and }} in prefix and suffix.\nPod Context Provide current pod information and judgments.\n   Name Argument Example Description     Name None eq .Pod.Name \u0026quot;test-pod-name\u0026quot; The name of the current pod. The example shows the pod name is equal to test-pod-name.   Namespace None eq .Pod.Namespace \u0026quot;test-namesapce\u0026quot; The name of the current pod namespace. The example shows the pod namespace name is equal to test-namespace.   Node None eq .Pod.Node \u0026quot;test-node\u0026quot; The name of the node deployed. The example shows the pod node name is equal to test-node.   LabelValue KeyNames eq .Pod.LavelValue \u0026quot;a,b\u0026quot; \u0026quot;v\u0026quot; The label value of the label keys, If provide multiple keys, if any key has value, then don\u0026rsquo;t need to get other values. The example shows the pod has anyone a or b label key, and the value matches to v.   ServiceName None eq .Pod.ServiceName \u0026quot;test-service\u0026quot; The service name of the pod. The example shows current pods matched service name is test-service.   HasContainer Container name .Pod.HasContainer \u0026quot;istio-proxy\u0026quot; The pod has the appointed container name.   LabelSelector selector .Pod.LabelSelector The pod is matches the label selector. For more details, please read the official documentation.   HasServiceName None .Pod.HasServiceName The pod has the matched service.   HasOwnerName kindNames .Pod.HasOwnerName \u0026quot;Service,Deployment\u0026quot; The pod has the matched owner name.    Container Context Provide current container(under the pod) information.\n   Name Argument Example Description     Name None eq .Container.Name \u0026quot;istio-proxy\u0026quot; The name of the current container under the pod. The examples show the container name is equal to istio-proxy.    Entity The entity including layer, serviceName, instanceName, processName and labels properties.\nThe entity also could use expression to build(serviceName, instanceName and processName).\nRover Rover context provides the context of the rover process instance and VM data.\n   Name Argument Example Description     InstanceID None {{.Rover.InstanceID}} Get the Instance ID of the rover.   HostIPV4 The Interface name {{.Rover.HostIPV4 \u0026quot;en0\u0026quot;}} Get the ipv4 address from the appointed network interface name.   HostIPV6 The Interface name {{.Rover.HostIPV6 \u0026quot;en0\u0026quot;}} Get the ipv6 address from the appointed network interface name.   HostName None {{.Rover.HostName}} Get the host name of current machine.    Process Process context provides the context relate to which process is matched.\n   Name Argument Example Description     ExeFilePath None {{.Process.ExeFilePath}} The execute file path of process.   ExeName None {{.Process.ExeName}} The execute file name.   CommandLine None {{.Process.CommandLine}} The command line of process.   Pid None {{.Process.Pid}} The id of the process.   WorkDir None {{.Process.WorkDir}} The work directory path of the process.    Pod The information on the current pod.\n   Name Argument Example Description     Name None {{.Pod.Name}} The name of current pod.   Namespace None {{.Pod.Namespace}} The name of current pod namespace.   Node None {{.Pod.Node}} The name of the node deployed.   LabelValue KeyNames, Default {{.Pod.LabelValue \u0026quot;a,b\u0026quot; \u0026quot;v\u0026quot;}} The label value of the label keys, If provide multiple keys, if any key has value, then don\u0026rsquo;t need to get other values. If all keys don\u0026rsquo;t have value, then return the default value.   ServiceName None {{.Pod.ServiceName}} The service name of the pod. If the pod hasn\u0026rsquo;t matched service, then return an empty string.   FindContainer ContainerName {{.Pod.FindContainer \u0026quot;test\u0026quot;}} Find the Container context by container name.   OwnerName KindNames {{.Pod.OwnerName \u0026quot;Service,Deployment\u0026quot;}} Find the Owner name by owner kind name.    Container The information of the current container under the pod.\n   Name Argument Example Description     Name None {{.Container.Name}} The name of the current container under the pod.    ID None {{.Container.ID}} The id of the current container under the pod.   EnvValue KeyNames {{.Container.EnvValue \u0026quot;a,b\u0026quot;}} The environment value of the first non-value key in the provided candidates(Iterate from left to right).    ","excerpt":"Service Discovery Service discovery is used to discover all Kubernetes services process in the …","ref":"/docs/skywalking-rover/latest/en/setup/configuration/service-discovery/","title":"Service Discovery"},{"body":"Service Discovery Service discovery is used to discover all Kubernetes services process in the current node and report them to backend services. After the process upload is completed, the other modules could perform more operations with the process, such as process profiling and collecting process metrics.\nConfiguration    Name Default Environment Key Description     process_discovery.heartbeat_period 20s ROVER_PROCESS_DISCOVERY_HEARTBEAT_PERIOD The period of report or keep-alive process to the backend.   process_discovery.properties_report_period 10 ROVER_PROCESS_DISCOVERY_PROPERTIES_REPORT_PERIOD The agent sends the process properties to the backend every: heartbeart period * properties report period.   process_discovery.kubernetes.active false ROVER_PROCESS_DISCOVERY_KUBERNETES_ACTIVE Is active the kubernetes process discovery.   process_discovery.kubernetes.node_name  ROVER_PROCESS_DISCOVERY_KUBERNETES_NODE_NAME Current deployed node name, it could be inject by spec.nodeName.   process_discovery.kubernetes.namespaces  ROVER_PROCESS_DISCOVERY_KUBERNETES_NAMESPACES Including pod by namespaces, if empty means including all namespaces. Multiple namespaces split by \u0026ldquo;,\u0026rdquo;.   process_discovery.kubernetes.analyzers   Declare how to build the process. The istio and k8s resources are active by default.   process_discovery.kubernetes.analyzers.active   Set is active analyzer.   process_discovery.kubernetes.analyzers.filters   Define which process is match to current process builder.   process_discovery.kubernetes.analyzers.service_name   The Service Name of the process entity.   process_discovery.kubernetes.analyzers.instance_name   The Service Instance Name of the process entity, by default, the instance name is the host IP v4 address from \u0026ldquo;en0\u0026rdquo; net interface.   process_discovery.kubernetes.analyzers.process_name   The Process Name of the process entity, by default, the process name is the executable name of the process.   process_discovery.kubernetes.analyzers.labels   The Process Labels, used to aggregate similar process from service entity. Multiple labels split by \u0026ldquo;,\u0026rdquo;.    Kubernetes Process Detector The Kubernetes process detector could detect any process under the Kubernetes container. If active the Kubernetes process detector, the rover must be deployed in the Kubernetes cluster. After finding the process, it would collect the metadata of the process when the report to the backend.\nProcess Analyze The process analysis declares which process could be profiled and how to build the process entity. The Istio and Kubernetes resources are active on default.\nFilter The filter provides an expression(go template) mechanism to match the process that can build the entity. Multiple expressions work together to determine whether the process can create the entity. Each expression must return the boolean value. Otherwise, the decision throws an error.\nThe context is similar to the entity builder. Using context could help the rover understand which process could build the entity.\nProcess Context Is the same with the process context in scanner, but doesn\u0026rsquo;t need to add the {{ and }} in prefix and suffix.\nPod Context Provide current pod information and judgments.\n   Name Argument Example Description     Name None eq .Pod.Name \u0026quot;test-pod-name\u0026quot; The name of the current pod. The example shows the pod name is equal to test-pod-name.   Namespace None eq .Pod.Namespace \u0026quot;test-namesapce\u0026quot; The name of the current pod namespace. The example shows the pod namespace name is equal to test-namespace.   Node None eq .Pod.Node \u0026quot;test-node\u0026quot; The name of the node deployed. The example shows the pod node name is equal to test-node.   LabelValue KeyNames eq .Pod.LavelValue \u0026quot;a,b\u0026quot; \u0026quot;v\u0026quot; The label value of the label keys, If provide multiple keys, if any key has value, then don\u0026rsquo;t need to get other values. The example shows the pod has anyone a or b label key, and the value matches to v.   ServiceName None eq .Pod.ServiceName \u0026quot;test-service\u0026quot; The service name of the pod. The example shows current pods matched service name is test-service.   HasContainer Container name .Pod.HasContainer \u0026quot;istio-proxy\u0026quot; The pod has the appointed container name.   LabelSelector selector .Pod.LabelSelector The pod is matches the label selector. For more details, please read the official documentation.   HasServiceName None .Pod.HasServiceName The pod has the matched service.   HasOwnerName kindNames .Pod.HasOwnerName \u0026quot;Service,Deployment\u0026quot; The pod has the matched owner name.    Container Context Provide current container(under the pod) information.\n   Name Argument Example Description     Name None eq .Container.Name \u0026quot;istio-proxy\u0026quot; The name of the current container under the pod. The examples show the container name is equal to istio-proxy.    Entity The entity including layer, serviceName, instanceName, processName and labels properties.\nThe entity also could use expression to build(serviceName, instanceName and processName).\nRover Rover context provides the context of the rover process instance and VM data.\n   Name Argument Example Description     InstanceID None {{.Rover.InstanceID}} Get the Instance ID of the rover.   HostIPV4 The Interface name {{.Rover.HostIPV4 \u0026quot;en0\u0026quot;}} Get the ipv4 address from the appointed network interface name.   HostIPV6 The Interface name {{.Rover.HostIPV6 \u0026quot;en0\u0026quot;}} Get the ipv6 address from the appointed network interface name.   HostName None {{.Rover.HostName}} Get the host name of current machine.    Process Process context provides the context relate to which process is matched.\n   Name Argument Example Description     ExeFilePath None {{.Process.ExeFilePath}} The execute file path of process.   ExeName None {{.Process.ExeName}} The execute file name.   CommandLine None {{.Process.CommandLine}} The command line of process.   Pid None {{.Process.Pid}} The id of the process.   WorkDir None {{.Process.WorkDir}} The work directory path of the process.    Pod The information on the current pod.\n   Name Argument Example Description     Name None {{.Pod.Name}} The name of current pod.   Namespace None {{.Pod.Namespace}} The name of current pod namespace.   Node None {{.Pod.Node}} The name of the node deployed.   LabelValue KeyNames, Default {{.Pod.LabelValue \u0026quot;a,b\u0026quot; \u0026quot;v\u0026quot;}} The label value of the label keys, If provide multiple keys, if any key has value, then don\u0026rsquo;t need to get other values. If all keys don\u0026rsquo;t have value, then return the default value.   ServiceName None {{.Pod.ServiceName}} The service name of the pod. If the pod hasn\u0026rsquo;t matched service, then return an empty string.   FindContainer ContainerName {{.Pod.FindContainer \u0026quot;test\u0026quot;}} Find the Container context by container name.   OwnerName KindNames {{.Pod.OwnerName \u0026quot;Service,Deployment\u0026quot;}} Find the Owner name by owner kind name.    Container The information of the current container under the pod.\n   Name Argument Example Description     Name None {{.Container.Name}} The name of the current container under the pod.    ID None {{.Container.ID}} The id of the current container under the pod.   EnvValue KeyNames {{.Container.EnvValue \u0026quot;a,b\u0026quot;}} The environment value of the first non-value key in the provided candidates(Iterate from left to right).    ","excerpt":"Service Discovery Service discovery is used to discover all Kubernetes services process in the …","ref":"/docs/skywalking-rover/next/en/setup/configuration/service-discovery/","title":"Service Discovery"},{"body":"Service Discovery Service discovery is used to discover all Kubernetes services process in the current node and report them to backend services. After the process upload is completed, the other modules could perform more operations with the process, such as process profiling and collecting process metrics.\nConfiguration    Name Default Environment Key Description     process_discovery.heartbeat_period 20s ROVER_PROCESS_DISCOVERY_HEARTBEAT_PERIOD The period of report or keep-alive process to the backend.   process_discovery.properties_report_period 10 ROVER_PROCESS_DISCOVERY_PROPERTIES_REPORT_PERIOD The agent sends the process properties to the backend every: heartbeart period * properties report period.   process_discovery.kubernetes.active false ROVER_PROCESS_DISCOVERY_KUBERNETES_ACTIVE Is active the kubernetes process discovery.   process_discovery.kubernetes.node_name  ROVER_PROCESS_DISCOVERY_KUBERNETES_NODE_NAME Current deployed node name, it could be inject by spec.nodeName.   process_discovery.kubernetes.namespaces  ROVER_PROCESS_DISCOVERY_KUBERNETES_NAMESPACES Including pod by namespaces, if empty means including all namespaces. Multiple namespaces split by \u0026ldquo;,\u0026rdquo;.   process_discovery.kubernetes.analyzers   Declare how to build the process. The istio and k8s resources are active by default.   process_discovery.kubernetes.analyzers.active   Set is active analyzer.   process_discovery.kubernetes.analyzers.filters   Define which process is match to current process builder.   process_discovery.kubernetes.analyzers.service_name   The Service Name of the process entity.   process_discovery.kubernetes.analyzers.instance_name   The Service Instance Name of the process entity, by default, the instance name is the host IP v4 address from \u0026ldquo;en0\u0026rdquo; net interface.   process_discovery.kubernetes.analyzers.process_name   The Process Name of the process entity, by default, the process name is the executable name of the process.   process_discovery.kubernetes.analyzers.labels   The Process Labels, used to aggregate similar process from service entity. Multiple labels split by \u0026ldquo;,\u0026rdquo;.    Kubernetes Process Detector The Kubernetes process detector could detect any process under the Kubernetes container. If active the Kubernetes process detector, the rover must be deployed in the Kubernetes cluster. After finding the process, it would collect the metadata of the process when the report to the backend.\nProcess Analyze The process analysis declares which process could be profiled and how to build the process entity. The Istio and Kubernetes resources are active on default.\nFilter The filter provides an expression(go template) mechanism to match the process that can build the entity. Multiple expressions work together to determine whether the process can create the entity. Each expression must return the boolean value. Otherwise, the decision throws an error.\nThe context is similar to the entity builder. Using context could help the rover understand which process could build the entity.\nProcess Context Is the same with the process context in scanner, but doesn\u0026rsquo;t need to add the {{ and }} in prefix and suffix.\nPod Context Provide current pod information and judgments.\n   Name Argument Example Description     Name None eq .Pod.Name \u0026quot;test-pod-name\u0026quot; The name of the current pod. The example shows the pod name is equal to test-pod-name.   Namespace None eq .Pod.Namespace \u0026quot;test-namesapce\u0026quot; The name of the current pod namespace. The example shows the pod namespace name is equal to test-namespace.   Node None eq .Pod.Node \u0026quot;test-node\u0026quot; The name of the node deployed. The example shows the pod node name is equal to test-node.   LabelValue KeyNames eq .Pod.LavelValue \u0026quot;a,b\u0026quot; \u0026quot;v\u0026quot; The label value of the label keys, If provide multiple keys, if any key has value, then don\u0026rsquo;t need to get other values. The example shows the pod has anyone a or b label key, and the value matches to v.   ServiceName None eq .Pod.ServiceName \u0026quot;test-service\u0026quot; The service name of the pod. The example shows current pods matched service name is test-service.   HasContainer Container name .Pod.HasContainer \u0026quot;istio-proxy\u0026quot; The pod has the appointed container name.   LabelSelector selector .Pod.LabelSelector The pod is matches the label selector. For more details, please read the official documentation.   HasServiceName None .Pod.HasServiceName The pod has the matched service.   HasOwnerName kindNames .Pod.HasOwnerName \u0026quot;Service,Deployment\u0026quot; The pod has the matched owner name.    Container Context Provide current container(under the pod) information.\n   Name Argument Example Description     Name None eq .Container.Name \u0026quot;istio-proxy\u0026quot; The name of the current container under the pod. The examples show the container name is equal to istio-proxy.    Entity The entity including layer, serviceName, instanceName, processName and labels properties.\nThe entity also could use expression to build(serviceName, instanceName and processName).\nRover Rover context provides the context of the rover process instance and VM data.\n   Name Argument Example Description     InstanceID None {{.Rover.InstanceID}} Get the Instance ID of the rover.   HostIPV4 The Interface name {{.Rover.HostIPV4 \u0026quot;en0\u0026quot;}} Get the ipv4 address from the appointed network interface name.   HostIPV6 The Interface name {{.Rover.HostIPV6 \u0026quot;en0\u0026quot;}} Get the ipv6 address from the appointed network interface name.   HostName None {{.Rover.HostName}} Get the host name of current machine.    Process Process context provides the context relate to which process is matched.\n   Name Argument Example Description     ExeFilePath None {{.Process.ExeFilePath}} The execute file path of process.   ExeName None {{.Process.ExeName}} The execute file name.   CommandLine None {{.Process.CommandLine}} The command line of process.   Pid None {{.Process.Pid}} The id of the process.   WorkDir None {{.Process.WorkDir}} The work directory path of the process.    Pod The information on the current pod.\n   Name Argument Example Description     Name None {{.Pod.Name}} The name of current pod.   Namespace None {{.Pod.Namespace}} The name of current pod namespace.   Node None {{.Pod.Node}} The name of the node deployed.   LabelValue KeyNames, Default {{.Pod.LabelValue \u0026quot;a,b\u0026quot; \u0026quot;v\u0026quot;}} The label value of the label keys, If provide multiple keys, if any key has value, then don\u0026rsquo;t need to get other values. If all keys don\u0026rsquo;t have value, then return the default value.   ServiceName None {{.Pod.ServiceName}} The service name of the pod. If the pod hasn\u0026rsquo;t matched service, then return an empty string.   FindContainer ContainerName {{.Pod.FindContainer \u0026quot;test\u0026quot;}} Find the Container context by container name.   OwnerName KindNames {{.Pod.OwnerName \u0026quot;Service,Deployment\u0026quot;}} Find the Owner name by owner kind name.    Container The information of the current container under the pod.\n   Name Argument Example Description     Name None {{.Container.Name}} The name of the current container under the pod.    ID None {{.Container.ID}} The id of the current container under the pod.   EnvValue KeyNames {{.Container.EnvValue \u0026quot;a,b\u0026quot;}} The environment value of the first non-value key in the provided candidates(Iterate from left to right).    ","excerpt":"Service Discovery Service discovery is used to discover all Kubernetes services process in the …","ref":"/docs/skywalking-rover/v0.6.0/en/setup/configuration/service-discovery/","title":"Service Discovery"},{"body":"Service Hierarchy SkyWalking v10 introduces a new concept Service Hierarchy which defines the relationships of existing logically same services in various layers. OAP will detect the services from different layers, and try to build the connections.\nDetect Service Hierarchy Connections There 2 ways to detect the connections:\n Automatically matching through OAP internal mechanism, no extra work is required. Build the connections through specific agents.  Note: All the relationships and auto-matching rules should be defined in the config/hierarchy-definition.yml file. If you want to customize it according to your own needs, please refer to Service Hierarchy Configuration.\nAutomatically Matching    Upper layer Lower layer Matching rule     GENERAL K8S_SERVICE GENERAL On K8S_SERVICE   GENERAL APISIX GENERAL On APISIX   VIRTUAL_DATABASE MYSQL VIRTUAL_DATABASE On MYSQL   VIRTUAL_DATABASE POSTGRESQL VIRTUAL_DATABASE On POSTGRESQL   VIRTUAL_DATABASE CLICKHOUSE VIRTUAL_DATABASE On CLICKHOUSE   VIRTUAL_MQ RABBITMQ VIRTUAL_MQ On RABBITMQ   VIRTUAL_MQ ROCKETMQ VIRTUAL_MQ On K8S_SERVICE   VIRTUAL_MQ KAFKA VIRTUAL_MQ On KAFKA   VIRTUAL_MQ RABBITMQ VIRTUAL_MQ On RABBITMQ   VIRTUAL_MQ PULSAR VIRTUAL_MQ On PULSAR   MESH MESH_DP MESH On MESH_DP   MESH K8S_SERVICE MESH On K8S_SERVICE   MESH_DP K8S_SERVICE MESH_DP On K8S_SERVICE   MYSQL K8S_SERVICE MYSQL On K8S_SERVICE   POSTGRESQL K8S_SERVICE POSTGRESQL On K8S_SERVICE   CLICKHOUSE K8S_SERVICE CLICKHOUSE On K8S_SERVICE   NGINX K8S_SERVICE NGINX On K8S_SERVICE   APISIX K8S_SERVICE APISIX On K8S_SERVICE   ROCKETMQ K8S_SERVICE ROCKETMQ On K8S_SERVICE   RABBITMQ K8S_SERVICE RABBITMQ On K8S_SERVICE   KAFKA K8S_SERVICE KAFKA On K8S_SERVICE   PULSAR K8S_SERVICE PULSAR On K8S_SERVICE   SO11Y_OAP K8S_SERVICE SO11Y_OAP On K8S_SERVICE     The following sections will describe the default matching rules in detail and use the upper-layer On lower-layer format. The example service name are based on SkyWalking Showcase default deployment. In SkyWalking the service name could be composed of group and short name with :: separator.  GENERAL On K8S_SERVICE  Rule name: lower-short-name-remove-ns Groovy script: { (u, l) -\u0026gt; u.shortName == l.shortName.substring(0, l.shortName.lastIndexOf('.')) } Description: GENERAL.service.shortName == K8S_SERVICE.service.shortName without namespace Matched Example:  GENERAL.service.name: agent::songs K8S_SERVICE.service.name: skywalking-showcase::songs.sample-services    GENERAL On APISIX  Rule name: lower-short-name-remove-ns Groovy script: { (u, l) -\u0026gt; u.shortName == l.shortName.substring(0, l.shortName.lastIndexOf('.')) } Description: GENERAL.service.shortName == APISIX.service.shortName without namespace Matched Example:  GENERAL.service.name: agent::frontend APISIX.service.name: APISIX::frontend.sample-services    VIRTUAL_DATABASE On MYSQL  Rule name: lower-short-name-with-fqdn Groovy script: { (u, l) -\u0026gt; u.shortName.substring(0, u.shortName.lastIndexOf(':')) == l.shortName.concat('.svc.cluster.local') } Description: VIRTUAL_DATABASE.service.shortName remove port == MYSQL.service.shortName with fqdn suffix Matched Example:  VIRTUAL_DATABASE.service.name: mysql.skywalking-showcase.svc.cluster.local:3306 MYSQL.service.name: mysql::mysql.skywalking-showcase    VIRTUAL_DATABASE On POSTGRESQL  Rule name: lower-short-name-with-fqdn Groovy script: { (u, l) -\u0026gt; u.shortName.substring(0, u.shortName.lastIndexOf(':')) == l.shortName.concat('.svc.cluster.local') } Description: VIRTUAL_DATABASE.service.shortName remove port == POSTGRESQL.service.shortName with fqdn suffix Matched Example:  VIRTUAL_DATABASE.service.name: psql.skywalking-showcase.svc.cluster.local:5432 POSTGRESQL.service.name: postgresql::psql.skywalking-showcase    VIRTUAL_DATABASE On CLICKHOUSE  Rule name: lower-short-name-with-fqdn Groovy script: { (u, l) -\u0026gt; u.shortName.substring(0, u.shortName.lastIndexOf(':')) == l.shortName.concat('.svc.cluster.local') } Description: VIRTUAL_DATABASE.service.shortName remove port == CLICKHOUSE.service.shortName with fqdn suffix Matched Example:  VIRTUAL_DATABASE.service.name: clickhouse.skywalking-showcase.svc.cluster.local:8123 CLICKHOUSE.service.name: clickhouse::clickhouse.skywalking-showcase    VIRTUAL_MQ On ROCKETMQ  Rule name: lower-short-name-with-fqdn Groovy script: { (u, l) -\u0026gt; u.shortName.substring(0, u.shortName.lastIndexOf(':')) == l.shortName.concat('.svc.cluster.local') } Description: VIRTUAL_MQ.service.shortName remove port == ROCKETMQ.service.shortName with fqdn suffix Matched Example:  VIRTUAL_MQ.service.name: rocketmq.skywalking-showcase.svc.cluster.local:9876 ROCKETMQ.service.name: rocketmq::rocketmq.skywalking-showcase    VIRTUAL_MQ On RABBITMQ  Rule name: lower-short-name-with-fqdn Groovy script: { (u, l) -\u0026gt; u.shortName.substring(0, u.shortName.lastIndexOf(':')) == l.shortName.concat('.svc.cluster.local') } Description: VIRTUAL_MQ.service.shortName remove port == RABBITMQ.service.shortName with fqdn suffix Matched Example:  VIRTUAL_MQ.service.name: rabbitmq.skywalking-showcase.svc.cluster.local:5672 RABBITMQ.service.name: rabbitmq::rabbitmq.skywalking-showcase     VIRTUAL_MQ On KAFKA  Rule name: lower-short-name-with-fqdn Groovy script: { (u, l) -\u0026gt; u.shortName.substring(0, u.shortName.lastIndexOf(':')) == l.shortName.concat('.svc.cluster.local') } Description: VIRTUAL_MQ.service.shortName remove port == KAFKA.service.shortName with fqdn suffix Matched Example:  VIRTUAL_MQ.service.name: kafka.skywalking-showcase.svc.cluster.local:9092 KAFKA.service.name: kafka::rocketmq.skywalking-showcase    VIRTUAL_MQ On PULSAR  Rule name: lower-short-name-with-fqdn Groovy script: { (u, l) -\u0026gt; u.shortName.substring(0, u.shortName.lastIndexOf(':')) == l.shortName.concat('.svc.cluster.local') } Description: VIRTUAL_MQ.service.shortName remove port == PULSAR.service.shortName with fqdn suffix Matched Example:  VIRTUAL_MQ.service.name: pulsar.skywalking-showcase.svc.cluster.local:6650 PULSAR.service.name: pulsar::pulsar.skywalking-showcase    MESH On MESH_DP  Rule name: name Groovy script: { (u, l) -\u0026gt; u.name == l.name } Description: MESH.service.name == MESH_DP.service.name Matched Example:  MESH.service.name: mesh-svr::songs.sample-services MESH_DP.service.name: mesh-svr::songs.sample-services    MESH On K8S_SERVICE  Rule name: short-name Groovy script: { (u, l) -\u0026gt; u.shortName == l.shortName } Description: MESH.service.shortName == K8S_SERVICE.service.shortName Matched Example:  MESH.service.name: mesh-svr::songs.sample-services K8S_SERVICE.service.name: skywalking-showcase::songs.sample-services    MESH_DP On K8S_SERVICE  Rule name: short-name Groovy script: { (u, l) -\u0026gt; u.shortName == l.shortName } Description: MESH_DP.service.shortName == K8S_SERVICE.service.shortName Matched Example:  MESH_DP.service.name: mesh-svr::songs.sample-services K8S_SERVICE.service.name: skywalking-showcase::songs.sample-services    MYSQL On K8S_SERVICE  Rule name: short-name Groovy script: { (u, l) -\u0026gt; u.shortName == l.shortName } Description: MYSQL.service.shortName == K8S_SERVICE.service.shortName Matched Example:  MYSQL.service.name: mysql::mysql.skywalking-showcase K8S_SERVICE.service.name: skywalking-showcase::mysql.skywalking-showcase    POSTGRESQL On K8S_SERVICE  Rule name: short-name Groovy script: { (u, l) -\u0026gt; u.shortName == l.shortName } Description: POSTGRESQL.service.shortName == K8S_SERVICE.service.shortName Matched Example:  POSTGRESQL.service.name: postgresql::psql.skywalking-showcase K8S_SERVICE.service.name: skywalking-showcase::psql.skywalking-showcase    CLICKHOUSE On K8S_SERVICE  Rule name: short-name Groovy script: { (u, l) -\u0026gt; u.shortName == l.shortName } Description: CLICKHOUSE.service.shortName == K8S_SERVICE.service.shortName Matched Example:  CLICKHOUSE.service.name: clickhouse::clickhouse.skywalking-showcase K8S_SERVICE.service.name: skywalking-showcase::clickhouse.skywalking-showcase    NGINX On K8S_SERVICE  Rule name: short-name Groovy script: { (u, l) -\u0026gt; u.shortName == l.shortName } Description: NGINX.service.shortName == K8S_SERVICE.service.shortName Matched Example:  NGINX.service.name: nginx::nginx.skywalking-showcase K8S_SERVICE.service.name: skywalking-showcase::nginx.skywalking-showcase    APISIX On K8S_SERVICE  Rule name: short-name Groovy script: { (u, l) -\u0026gt; u.shortName == l.shortName } Description: APISIX.service.shortName == K8S_SERVICE.service.shortName Matched Example:  APISIX.service.name: APISIX::frontend.sample-services K8S_SERVICE.service.name: skywalking-showcase::frontend.sample-services    ROCKETMQ On K8S_SERVICE  Rule name: short-name Groovy script: { (u, l) -\u0026gt; u.shortName == l.shortName } Description: ROCKETMQ.service.shortName == K8S_SERVICE.service.shortName Matched Example:  ROCKETMQ.service.name: rocketmq::rocketmq.skywalking-showcase K8S_SERVICE.service.name: skywalking-showcase::rocketmq.skywalking-showcase    RABBITMQ On K8S_SERVICE  Rule name: short-name Groovy script: { (u, l) -\u0026gt; u.shortName == l.shortName } Description: RABBITMQ.service.shortName == K8S_SERVICE.service.shortName Matched Example:  RABBITMQ.service.name: rabbitmq::rabbitmq.skywalking-showcase K8S_SERVICE.service.name: skywalking-showcase::rabbitmq.skywalking-showcase    KAFKA On K8S_SERVICE  Rule name: short-name Groovy script: { (u, l) -\u0026gt; u.shortName == l.shortName } Description: KAFKA.service.shortName == K8S_SERVICE.service.shortName Matched Example:  KAFKA.service.name: kafka::kafka.skywalking-showcase K8S_SERVICE.service.name: skywalking-showcase::kafka.skywalking-showcase    PULSAR On K8S_SERVICE  Rule name: short-name Groovy script: { (u, l) -\u0026gt; u.shortName == l.shortName } Description: PULSAR.service.shortName == K8S_SERVICE.service.shortName Matched Example:  PULSAR.service.name: pulsar::pulsar.skywalking-showcase K8S_SERVICE.service.name: skywalking-showcase::pulsar.skywalking-showcase    SO11Y_OAP On K8S_SERVICE  Rule name: short-name Groovy script: { (u, l) -\u0026gt; u.shortName == l.shortName } Description: SO11Y_OAP.service.shortName == K8S_SERVICE.service.shortName Matched Example:  SO11Y_OAP.service.name: demo-oap.skywalking-showcase K8S_SERVICE.service.name: skywalking-showcase::demo-oap.skywalking-showcase    Build Through Specific Agents Use agent tech involved(such as eBPF) and deployment tools(such as operator and agent injector) to detect the service hierarchy relations.\n   Upper layer Lower layer Agent    Instance Hierarchy Instance Hierarchy relationship follows the same definition as Service Hierarchy.\nAutomatically Matching If the service hierarchy is built, the instance hierarchy relationship could be detected automatically through the following rules:\n The upper instance name equals the lower instance name. The upper instance attribute pod/hostname equals the lower instance attribute pod/hostname. The upper instance attribute pod/hostname equals the lower instance name. The upper instance name equals the lower instance attribute pod/hostname.  Build Through Specific Agents ","excerpt":"Service Hierarchy SkyWalking v10 introduces a new concept Service Hierarchy which defines the …","ref":"/docs/main/next/en/concepts-and-designs/service-hierarchy/","title":"Service Hierarchy"},{"body":"Setting Override SkyWalking backend supports setting overrides by system properties and system environment variables. You may override the settings in application.yml\nSystem properties key rule ModuleName.ProviderName.SettingKey.\n  Example\nOverride restHost in this setting segment\n  core:default:restHost:${SW_CORE_REST_HOST:0.0.0.0}restPort:${SW_CORE_REST_PORT:12800}restContextPath:${SW_CORE_REST_CONTEXT_PATH:/}gRPCHost:${SW_CORE_GRPC_HOST:0.0.0.0}gRPCPort:${SW_CORE_GRPC_PORT:11800}Use command arg\n-Dcore.default.restHost=172.0.4.12 System environment variables   Example\nOverride restHost in this setting segment through environment variables\n  core:default:restHost:${REST_HOST:0.0.0.0}restPort:${SW_CORE_REST_PORT:12800}restContextPath:${SW_CORE_REST_CONTEXT_PATH:/}gRPCHost:${SW_CORE_GRPC_HOST:0.0.0.0}gRPCPort:${SW_CORE_GRPC_PORT:11800}If the REST_HOST  environment variable exists in your operating system and its value is 172.0.4.12, then the value of restHost here will be overwritten to 172.0.4.12; otherwise, it will be set to 0.0.0.0.\nPlaceholder nesting is also supported, like ${REST_HOST:${ANOTHER_REST_HOST:127.0.0.1}}. In this case, if the REST_HOST  environment variable does not exist, but the REST_ANOTHER_REST_HOSTHOST environment variable exists, and its value is 172.0.4.12, then the value of restHost here will be overwritten to 172.0.4.12; otherwise, it will be set to 127.0.0.1.\n","excerpt":"Setting Override SkyWalking backend supports setting overrides by system properties and system …","ref":"/docs/main/latest/en/setup/backend/backend-setting-override/","title":"Setting Override"},{"body":"Setting Override SkyWalking backend supports setting overrides by system properties and system environment variables. You may override the settings in application.yml\nSystem properties key rule ModuleName.ProviderName.SettingKey.\n  Example\nOverride restHost in this setting segment\n  core:default:restHost:${SW_CORE_REST_HOST:0.0.0.0}restPort:${SW_CORE_REST_PORT:12800}restContextPath:${SW_CORE_REST_CONTEXT_PATH:/}gRPCHost:${SW_CORE_GRPC_HOST:0.0.0.0}gRPCPort:${SW_CORE_GRPC_PORT:11800}Use command arg\n-Dcore.default.restHost=172.0.4.12 System environment variables   Example\nOverride restHost in this setting segment through environment variables\n  core:default:restHost:${REST_HOST:0.0.0.0}restPort:${SW_CORE_REST_PORT:12800}restContextPath:${SW_CORE_REST_CONTEXT_PATH:/}gRPCHost:${SW_CORE_GRPC_HOST:0.0.0.0}gRPCPort:${SW_CORE_GRPC_PORT:11800}If the REST_HOST  environment variable exists in your operating system and its value is 172.0.4.12, then the value of restHost here will be overwritten to 172.0.4.12; otherwise, it will be set to 0.0.0.0.\nPlaceholder nesting is also supported, like ${REST_HOST:${ANOTHER_REST_HOST:127.0.0.1}}. In this case, if the REST_HOST  environment variable does not exist, but the REST_ANOTHER_REST_HOSTHOST environment variable exists, and its value is 172.0.4.12, then the value of restHost here will be overwritten to 172.0.4.12; otherwise, it will be set to 127.0.0.1.\n","excerpt":"Setting Override SkyWalking backend supports setting overrides by system properties and system …","ref":"/docs/main/next/en/setup/backend/backend-setting-override/","title":"Setting Override"},{"body":"Setting Override SkyWalking backend supports setting overrides by system properties and system environment variables. You may override the settings in application.yml\nSystem properties key rule ModuleName.ProviderName.SettingKey.\n  Example\nOverride restHost in this setting segment\n  core:default:restHost:${SW_CORE_REST_HOST:0.0.0.0}restPort:${SW_CORE_REST_PORT:12800}restContextPath:${SW_CORE_REST_CONTEXT_PATH:/}gRPCHost:${SW_CORE_GRPC_HOST:0.0.0.0}gRPCPort:${SW_CORE_GRPC_PORT:11800}Use command arg\n-Dcore.default.restHost=172.0.4.12 System environment variables   Example\nOverride restHost in this setting segment through environment variables\n  core:default:restHost:${REST_HOST:0.0.0.0}restPort:${SW_CORE_REST_PORT:12800}restContextPath:${SW_CORE_REST_CONTEXT_PATH:/}gRPCHost:${SW_CORE_GRPC_HOST:0.0.0.0}gRPCPort:${SW_CORE_GRPC_PORT:11800}If the REST_HOST  environment variable exists in your operating system and its value is 172.0.4.12, then the value of restHost here will be overwritten to 172.0.4.12; otherwise, it will be set to 0.0.0.0.\nPlaceholder nesting is also supported, like ${REST_HOST:${ANOTHER_REST_HOST:127.0.0.1}}. In this case, if the REST_HOST  environment variable does not exist, but the REST_ANOTHER_REST_HOSTHOST environment variable exists and its value is 172.0.4.12, then the value of restHost here will be overwritten to 172.0.4.12; otherwise, it will be set to 127.0.0.1.\n","excerpt":"Setting Override SkyWalking backend supports setting overrides by system properties and system …","ref":"/docs/main/v9.0.0/en/setup/backend/backend-setting-override/","title":"Setting Override"},{"body":"Setting Override SkyWalking backend supports setting overrides by system properties and system environment variables. You may override the settings in application.yml\nSystem properties key rule ModuleName.ProviderName.SettingKey.\n  Example\nOverride restHost in this setting segment\n  core:default:restHost:${SW_CORE_REST_HOST:0.0.0.0}restPort:${SW_CORE_REST_PORT:12800}restContextPath:${SW_CORE_REST_CONTEXT_PATH:/}gRPCHost:${SW_CORE_GRPC_HOST:0.0.0.0}gRPCPort:${SW_CORE_GRPC_PORT:11800}Use command arg\n-Dcore.default.restHost=172.0.4.12 System environment variables   Example\nOverride restHost in this setting segment through environment variables\n  core:default:restHost:${REST_HOST:0.0.0.0}restPort:${SW_CORE_REST_PORT:12800}restContextPath:${SW_CORE_REST_CONTEXT_PATH:/}gRPCHost:${SW_CORE_GRPC_HOST:0.0.0.0}gRPCPort:${SW_CORE_GRPC_PORT:11800}If the REST_HOST  environment variable exists in your operating system and its value is 172.0.4.12, then the value of restHost here will be overwritten to 172.0.4.12; otherwise, it will be set to 0.0.0.0.\nPlaceholder nesting is also supported, like ${REST_HOST:${ANOTHER_REST_HOST:127.0.0.1}}. In this case, if the REST_HOST  environment variable does not exist, but the REST_ANOTHER_REST_HOSTHOST environment variable exists, and its value is 172.0.4.12, then the value of restHost here will be overwritten to 172.0.4.12; otherwise, it will be set to 127.0.0.1.\n","excerpt":"Setting Override SkyWalking backend supports setting overrides by system properties and system …","ref":"/docs/main/v9.1.0/en/setup/backend/backend-setting-override/","title":"Setting Override"},{"body":"Setting Override SkyWalking backend supports setting overrides by system properties and system environment variables. You may override the settings in application.yml\nSystem properties key rule ModuleName.ProviderName.SettingKey.\n  Example\nOverride restHost in this setting segment\n  core:default:restHost:${SW_CORE_REST_HOST:0.0.0.0}restPort:${SW_CORE_REST_PORT:12800}restContextPath:${SW_CORE_REST_CONTEXT_PATH:/}gRPCHost:${SW_CORE_GRPC_HOST:0.0.0.0}gRPCPort:${SW_CORE_GRPC_PORT:11800}Use command arg\n-Dcore.default.restHost=172.0.4.12 System environment variables   Example\nOverride restHost in this setting segment through environment variables\n  core:default:restHost:${REST_HOST:0.0.0.0}restPort:${SW_CORE_REST_PORT:12800}restContextPath:${SW_CORE_REST_CONTEXT_PATH:/}gRPCHost:${SW_CORE_GRPC_HOST:0.0.0.0}gRPCPort:${SW_CORE_GRPC_PORT:11800}If the REST_HOST  environment variable exists in your operating system and its value is 172.0.4.12, then the value of restHost here will be overwritten to 172.0.4.12; otherwise, it will be set to 0.0.0.0.\nPlaceholder nesting is also supported, like ${REST_HOST:${ANOTHER_REST_HOST:127.0.0.1}}. In this case, if the REST_HOST  environment variable does not exist, but the REST_ANOTHER_REST_HOSTHOST environment variable exists, and its value is 172.0.4.12, then the value of restHost here will be overwritten to 172.0.4.12; otherwise, it will be set to 127.0.0.1.\n","excerpt":"Setting Override SkyWalking backend supports setting overrides by system properties and system …","ref":"/docs/main/v9.2.0/en/setup/backend/backend-setting-override/","title":"Setting Override"},{"body":"Setting Override SkyWalking backend supports setting overrides by system properties and system environment variables. You may override the settings in application.yml\nSystem properties key rule ModuleName.ProviderName.SettingKey.\n  Example\nOverride restHost in this setting segment\n  core:default:restHost:${SW_CORE_REST_HOST:0.0.0.0}restPort:${SW_CORE_REST_PORT:12800}restContextPath:${SW_CORE_REST_CONTEXT_PATH:/}gRPCHost:${SW_CORE_GRPC_HOST:0.0.0.0}gRPCPort:${SW_CORE_GRPC_PORT:11800}Use command arg\n-Dcore.default.restHost=172.0.4.12 System environment variables   Example\nOverride restHost in this setting segment through environment variables\n  core:default:restHost:${REST_HOST:0.0.0.0}restPort:${SW_CORE_REST_PORT:12800}restContextPath:${SW_CORE_REST_CONTEXT_PATH:/}gRPCHost:${SW_CORE_GRPC_HOST:0.0.0.0}gRPCPort:${SW_CORE_GRPC_PORT:11800}If the REST_HOST  environment variable exists in your operating system and its value is 172.0.4.12, then the value of restHost here will be overwritten to 172.0.4.12; otherwise, it will be set to 0.0.0.0.\nPlaceholder nesting is also supported, like ${REST_HOST:${ANOTHER_REST_HOST:127.0.0.1}}. In this case, if the REST_HOST  environment variable does not exist, but the REST_ANOTHER_REST_HOSTHOST environment variable exists, and its value is 172.0.4.12, then the value of restHost here will be overwritten to 172.0.4.12; otherwise, it will be set to 127.0.0.1.\n","excerpt":"Setting Override SkyWalking backend supports setting overrides by system properties and system …","ref":"/docs/main/v9.3.0/en/setup/backend/backend-setting-override/","title":"Setting Override"},{"body":"Setting Override SkyWalking backend supports setting overrides by system properties and system environment variables. You may override the settings in application.yml\nSystem properties key rule ModuleName.ProviderName.SettingKey.\n  Example\nOverride restHost in this setting segment\n  core:default:restHost:${SW_CORE_REST_HOST:0.0.0.0}restPort:${SW_CORE_REST_PORT:12800}restContextPath:${SW_CORE_REST_CONTEXT_PATH:/}gRPCHost:${SW_CORE_GRPC_HOST:0.0.0.0}gRPCPort:${SW_CORE_GRPC_PORT:11800}Use command arg\n-Dcore.default.restHost=172.0.4.12 System environment variables   Example\nOverride restHost in this setting segment through environment variables\n  core:default:restHost:${REST_HOST:0.0.0.0}restPort:${SW_CORE_REST_PORT:12800}restContextPath:${SW_CORE_REST_CONTEXT_PATH:/}gRPCHost:${SW_CORE_GRPC_HOST:0.0.0.0}gRPCPort:${SW_CORE_GRPC_PORT:11800}If the REST_HOST  environment variable exists in your operating system and its value is 172.0.4.12, then the value of restHost here will be overwritten to 172.0.4.12; otherwise, it will be set to 0.0.0.0.\nPlaceholder nesting is also supported, like ${REST_HOST:${ANOTHER_REST_HOST:127.0.0.1}}. In this case, if the REST_HOST  environment variable does not exist, but the REST_ANOTHER_REST_HOSTHOST environment variable exists, and its value is 172.0.4.12, then the value of restHost here will be overwritten to 172.0.4.12; otherwise, it will be set to 127.0.0.1.\n","excerpt":"Setting Override SkyWalking backend supports setting overrides by system properties and system …","ref":"/docs/main/v9.4.0/en/setup/backend/backend-setting-override/","title":"Setting Override"},{"body":"Setting Override SkyWalking backend supports setting overrides by system properties and system environment variables. You may override the settings in application.yml\nSystem properties key rule ModuleName.ProviderName.SettingKey.\n  Example\nOverride restHost in this setting segment\n  core:default:restHost:${SW_CORE_REST_HOST:0.0.0.0}restPort:${SW_CORE_REST_PORT:12800}restContextPath:${SW_CORE_REST_CONTEXT_PATH:/}gRPCHost:${SW_CORE_GRPC_HOST:0.0.0.0}gRPCPort:${SW_CORE_GRPC_PORT:11800}Use command arg\n-Dcore.default.restHost=172.0.4.12 System environment variables   Example\nOverride restHost in this setting segment through environment variables\n  core:default:restHost:${REST_HOST:0.0.0.0}restPort:${SW_CORE_REST_PORT:12800}restContextPath:${SW_CORE_REST_CONTEXT_PATH:/}gRPCHost:${SW_CORE_GRPC_HOST:0.0.0.0}gRPCPort:${SW_CORE_GRPC_PORT:11800}If the REST_HOST  environment variable exists in your operating system and its value is 172.0.4.12, then the value of restHost here will be overwritten to 172.0.4.12; otherwise, it will be set to 0.0.0.0.\nPlaceholder nesting is also supported, like ${REST_HOST:${ANOTHER_REST_HOST:127.0.0.1}}. In this case, if the REST_HOST  environment variable does not exist, but the REST_ANOTHER_REST_HOSTHOST environment variable exists, and its value is 172.0.4.12, then the value of restHost here will be overwritten to 172.0.4.12; otherwise, it will be set to 127.0.0.1.\n","excerpt":"Setting Override SkyWalking backend supports setting overrides by system properties and system …","ref":"/docs/main/v9.5.0/en/setup/backend/backend-setting-override/","title":"Setting Override"},{"body":"Setting Override SkyWalking backend supports setting overrides by system properties and system environment variables. You may override the settings in application.yml\nSystem properties key rule ModuleName.ProviderName.SettingKey.\n  Example\nOverride restHost in this setting segment\n  core:default:restHost:${SW_CORE_REST_HOST:0.0.0.0}restPort:${SW_CORE_REST_PORT:12800}restContextPath:${SW_CORE_REST_CONTEXT_PATH:/}gRPCHost:${SW_CORE_GRPC_HOST:0.0.0.0}gRPCPort:${SW_CORE_GRPC_PORT:11800}Use command arg\n-Dcore.default.restHost=172.0.4.12 System environment variables   Example\nOverride restHost in this setting segment through environment variables\n  core:default:restHost:${REST_HOST:0.0.0.0}restPort:${SW_CORE_REST_PORT:12800}restContextPath:${SW_CORE_REST_CONTEXT_PATH:/}gRPCHost:${SW_CORE_GRPC_HOST:0.0.0.0}gRPCPort:${SW_CORE_GRPC_PORT:11800}If the REST_HOST  environment variable exists in your operating system and its value is 172.0.4.12, then the value of restHost here will be overwritten to 172.0.4.12; otherwise, it will be set to 0.0.0.0.\nPlaceholder nesting is also supported, like ${REST_HOST:${ANOTHER_REST_HOST:127.0.0.1}}. In this case, if the REST_HOST  environment variable does not exist, but the REST_ANOTHER_REST_HOSTHOST environment variable exists, and its value is 172.0.4.12, then the value of restHost here will be overwritten to 172.0.4.12; otherwise, it will be set to 127.0.0.1.\n","excerpt":"Setting Override SkyWalking backend supports setting overrides by system properties and system …","ref":"/docs/main/v9.6.0/en/setup/backend/backend-setting-override/","title":"Setting Override"},{"body":"Setting Override SkyWalking backend supports setting overrides by system properties and system environment variables. You may override the settings in application.yml\nSystem properties key rule ModuleName.ProviderName.SettingKey.\n  Example\nOverride restHost in this setting segment\n  core:default:restHost:${SW_CORE_REST_HOST:0.0.0.0}restPort:${SW_CORE_REST_PORT:12800}restContextPath:${SW_CORE_REST_CONTEXT_PATH:/}gRPCHost:${SW_CORE_GRPC_HOST:0.0.0.0}gRPCPort:${SW_CORE_GRPC_PORT:11800}Use command arg\n-Dcore.default.restHost=172.0.4.12 System environment variables   Example\nOverride restHost in this setting segment through environment variables\n  core:default:restHost:${REST_HOST:0.0.0.0}restPort:${SW_CORE_REST_PORT:12800}restContextPath:${SW_CORE_REST_CONTEXT_PATH:/}gRPCHost:${SW_CORE_GRPC_HOST:0.0.0.0}gRPCPort:${SW_CORE_GRPC_PORT:11800}If the REST_HOST  environment variable exists in your operating system and its value is 172.0.4.12, then the value of restHost here will be overwritten to 172.0.4.12; otherwise, it will be set to 0.0.0.0.\nPlaceholder nesting is also supported, like ${REST_HOST:${ANOTHER_REST_HOST:127.0.0.1}}. In this case, if the REST_HOST  environment variable does not exist, but the REST_ANOTHER_REST_HOSTHOST environment variable exists, and its value is 172.0.4.12, then the value of restHost here will be overwritten to 172.0.4.12; otherwise, it will be set to 127.0.0.1.\n","excerpt":"Setting Override SkyWalking backend supports setting overrides by system properties and system …","ref":"/docs/main/v9.7.0/en/setup/backend/backend-setting-override/","title":"Setting Override"},{"body":"Setting Override By default, SkyWalking Go agent provides a default agent.default.yaml to define the default configuration options.\nThis configuration file is used during hybrid compilation to write the configuration information of the Agent into the program. When the program boots, the agent would read the pre-configured content.\nConfiguration Changes The values in the config file should be updated by following the user requirements. They are applied during the hybrid compilation process.\nFor missing configuration items in the custom file, the Agent would use the values from the default configuration.\nEnvironment Variables In the default configuration, you can see that most of the configurations are in the format ${xxx:config_value}. It means that when the program starts, the agent would first read the xxx from the system environment variables in the runtime. If it cannot be found, the value would be used as the config_value as value.\nNote: that the search for environment variables is at runtime, not compile time.\n","excerpt":"Setting Override By default, SkyWalking Go agent provides a default agent.default.yaml to define the …","ref":"/docs/skywalking-go/latest/en/advanced-features/settings-override/","title":"Setting Override"},{"body":"Setting Override By default, SkyWalking Go agent provides a default agent.default.yaml to define the default configuration options.\nThis configuration file is used during hybrid compilation to write the configuration information of the Agent into the program. When the program boots, the agent would read the pre-configured content.\nConfiguration Changes The values in the config file should be updated by following the user requirements. They are applied during the hybrid compilation process.\nFor missing configuration items in the custom file, the Agent would use the values from the default configuration.\nEnvironment Variables In the default configuration, you can see that most of the configurations are in the format ${xxx:config_value}. It means that when the program starts, the agent would first read the xxx from the system environment variables in the runtime. If it cannot be found, the value would be used as the config_value as value.\nNote: that the search for environment variables is at runtime, not compile time.\n","excerpt":"Setting Override By default, SkyWalking Go agent provides a default agent.default.yaml to define the …","ref":"/docs/skywalking-go/next/en/advanced-features/settings-override/","title":"Setting Override"},{"body":"Setting Override By default, SkyWalking Go agent provides a default agent.default.yaml to define the default configuration options.\nThis configuration file is used during hybrid compilation to write the configuration information of the Agent into the program. When the program boots, the agent would read the pre-configured content.\nConfiguration Changes The values in the config file should be updated by following the user requirements. They are applied during the hybrid compilation process.\nFor missing configuration items in the custom file, the Agent would use the values from the default configuration.\nEnvironment Variables In the default configuration, you can see that most of the configurations are in the format ${xxx:config_value}. It means that when the program starts, the agent would first read the xxx from the system environment variables in the runtime. If it cannot be found, the value would be used as the config_value as value.\nNote: that the search for environment variables is at runtime, not compile time.\n","excerpt":"Setting Override By default, SkyWalking Go agent provides a default agent.default.yaml to define the …","ref":"/docs/skywalking-go/v0.4.0/en/advanced-features/settings-override/","title":"Setting Override"},{"body":"Setting Override In default, SkyWalking provide agent.config for agent.\nSetting override means end user can override the settings in these config file, through using system properties or agent options.\nSystem properties Use skywalking. + key in config file as system properties key, to override the value.\n  Why need this prefix?\nThe agent system properties and env share with target application, this prefix can avoid variable conflict.\n  Example\nOverride agent.application_code by this.\n  -Dskywalking.agent.application_code=31200 Agent options Add the properties after the agent path in JVM arguments.\n-javaagent:/path/to/skywalking-agent.jar=[option1]=[value1],[option2]=[value2]   Example\nOverride agent.application_code and logging.level by this.\n  -javaagent:/path/to/skywalking-agent.jar=agent.application_code=31200,logging.level=debug   Special characters\nIf a separator(, or =) in the option or value, it should be wrapped in quotes.\n  -javaagent:/path/to/skywalking-agent.jar=agent.ignore_suffix='.jpg,.jpeg' System environment variables   Example\nOverride agent.application_code and logging.level by this.\n  # The service name in UI agent.service_name=${SW_AGENT_NAME:Your_ApplicationName} # Logging level logging.level=${SW_LOGGING_LEVEL:INFO} If the SW_AGENT_NAME  environment variable exists in your operating system and its value is skywalking-agent-demo, then the value of agent.service_name here will be overwritten to skywalking-agent-demo, otherwise, it will be set to Your_ApplicationName.\nBy the way, Placeholder nesting is also supported, like ${SW_AGENT_NAME:${ANOTHER_AGENT_NAME:Your_ApplicationName}}. In this case, if the SW_AGENT_NAME  environment variable not exists, but the ANOTHER_AGENT_NAME environment variable exists and its value is skywalking-agent-demo, then the value of agent.service_name here will be overwritten to skywalking-agent-demo,otherwise, it will be set to Your_ApplicationName.\nOverride priority Agent Options \u0026gt; System.Properties(-D) \u0026gt; System environment variables \u0026gt; Config file\n","excerpt":"Setting Override In default, SkyWalking provide agent.config for agent.\nSetting override means end …","ref":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/setting-override/","title":"Setting Override"},{"body":"Setting Override In default, SkyWalking provide agent.config for agent.\nSetting override means end user can override the settings in these config file, through using system properties or agent options.\nSystem properties Use skywalking. + key in config file as system properties key, to override the value.\n  Why need this prefix?\nThe agent system properties and env share with target application, this prefix can avoid variable conflict.\n  Example\nOverride agent.application_code by this.\n  -Dskywalking.agent.application_code=31200 Agent options Add the properties after the agent path in JVM arguments.\n-javaagent:/path/to/skywalking-agent.jar=[option1]=[value1],[option2]=[value2]   Example\nOverride agent.application_code and logging.level by this.\n  -javaagent:/path/to/skywalking-agent.jar=agent.application_code=31200,logging.level=debug   Special characters\nIf a separator(, or =) in the option or value, it should be wrapped in quotes.\n  -javaagent:/path/to/skywalking-agent.jar=agent.ignore_suffix='.jpg,.jpeg' System environment variables   Example\nOverride agent.application_code and logging.level by this.\n  # The service name in UI agent.service_name=${SW_AGENT_NAME:Your_ApplicationName} # Logging level logging.level=${SW_LOGGING_LEVEL:INFO} If the SW_AGENT_NAME  environment variable exists in your operating system and its value is skywalking-agent-demo, then the value of agent.service_name here will be overwritten to skywalking-agent-demo, otherwise, it will be set to Your_ApplicationName.\nBy the way, Placeholder nesting is also supported, like ${SW_AGENT_NAME:${ANOTHER_AGENT_NAME:Your_ApplicationName}}. In this case, if the SW_AGENT_NAME  environment variable not exists, but the ANOTHER_AGENT_NAME environment variable exists and its value is skywalking-agent-demo, then the value of agent.service_name here will be overwritten to skywalking-agent-demo,otherwise, it will be set to Your_ApplicationName.\nOverride priority Agent Options \u0026gt; System.Properties(-D) \u0026gt; System environment variables \u0026gt; Config file\n","excerpt":"Setting Override In default, SkyWalking provide agent.config for agent.\nSetting override means end …","ref":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/setting-override/","title":"Setting Override"},{"body":"Setting Override In default, SkyWalking provide agent.config for agent.\nSetting override means end user can override the settings in these config file, through using system properties or agent options.\nSystem properties Use skywalking. + key in config file as system properties key, to override the value.\n  Why need this prefix?\nThe agent system properties and env share with target application, this prefix can avoid variable conflict.\n  Example\nOverride agent.application_code by this.\n  -Dskywalking.agent.application_code=31200 Agent options Add the properties after the agent path in JVM arguments.\n-javaagent:/path/to/skywalking-agent.jar=[option1]=[value1],[option2]=[value2]   Example\nOverride agent.application_code and logging.level by this.\n  -javaagent:/path/to/skywalking-agent.jar=agent.application_code=31200,logging.level=debug   Special characters\nIf a separator(, or =) in the option or value, it should be wrapped in quotes.\n  -javaagent:/path/to/skywalking-agent.jar=agent.ignore_suffix='.jpg,.jpeg' System environment variables   Example\nOverride agent.application_code and logging.level by this.\n  # The service name in UI agent.service_name=${SW_AGENT_NAME:Your_ApplicationName} # Logging level logging.level=${SW_LOGGING_LEVEL:INFO} If the SW_AGENT_NAME  environment variable exists in your operating system and its value is skywalking-agent-demo, then the value of agent.service_name here will be overwritten to skywalking-agent-demo, otherwise, it will be set to Your_ApplicationName.\nBy the way, Placeholder nesting is also supported, like ${SW_AGENT_NAME:${ANOTHER_AGENT_NAME:Your_ApplicationName}}. In this case, if the SW_AGENT_NAME  environment variable not exists, but the ANOTHER_AGENT_NAME environment variable exists and its value is skywalking-agent-demo, then the value of agent.service_name here will be overwritten to skywalking-agent-demo,otherwise, it will be set to Your_ApplicationName.\nOverride priority Agent Options \u0026gt; System.Properties(-D) \u0026gt; System environment variables \u0026gt; Config file\n","excerpt":"Setting Override In default, SkyWalking provide agent.config for agent.\nSetting override means end …","ref":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/setting-override/","title":"Setting Override"},{"body":"Setting Override In default, SkyWalking provide agent.config for agent.\nSetting override means end user can override the settings in these config file, through using system properties or agent options.\nSystem properties Use skywalking. + key in config file as system properties key, to override the value.\n  Why need this prefix?\nThe agent system properties and env share with target application, this prefix can avoid variable conflict.\n  Example\nOverride agent.application_code by this.\n  -Dskywalking.agent.application_code=31200 Agent options Add the properties after the agent path in JVM arguments.\n-javaagent:/path/to/skywalking-agent.jar=[option1]=[value1],[option2]=[value2]   Example\nOverride agent.application_code and logging.level by this.\n  -javaagent:/path/to/skywalking-agent.jar=agent.application_code=31200,logging.level=debug   Special characters\nIf a separator(, or =) in the option or value, it should be wrapped in quotes.\n  -javaagent:/path/to/skywalking-agent.jar=agent.ignore_suffix='.jpg,.jpeg' System environment variables   Example\nOverride agent.application_code and logging.level by this.\n  # The service name in UI agent.service_name=${SW_AGENT_NAME:Your_ApplicationName} # Logging level logging.level=${SW_LOGGING_LEVEL:INFO} If the SW_AGENT_NAME  environment variable exists in your operating system and its value is skywalking-agent-demo, then the value of agent.service_name here will be overwritten to skywalking-agent-demo, otherwise, it will be set to Your_ApplicationName.\nBy the way, Placeholder nesting is also supported, like ${SW_AGENT_NAME:${ANOTHER_AGENT_NAME:Your_ApplicationName}}. In this case, if the SW_AGENT_NAME  environment variable not exists, but the ANOTHER_AGENT_NAME environment variable exists and its value is skywalking-agent-demo, then the value of agent.service_name here will be overwritten to skywalking-agent-demo,otherwise, it will be set to Your_ApplicationName.\nOverride priority Agent Options \u0026gt; System.Properties(-D) \u0026gt; System environment variables \u0026gt; Config file\n","excerpt":"Setting Override In default, SkyWalking provide agent.config for agent.\nSetting override means end …","ref":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/setting-override/","title":"Setting Override"},{"body":"Setting Override In default, SkyWalking provide agent.config for agent.\nSetting override means end user can override the settings in these config file, through using system properties or agent options.\nSystem properties Use skywalking. + key in config file as system properties key, to override the value.\n  Why need this prefix?\nThe agent system properties and env share with target application, this prefix can avoid variable conflict.\n  Example\nOverride agent.application_code by this.\n  -Dskywalking.agent.application_code=31200 Agent options Add the properties after the agent path in JVM arguments.\n-javaagent:/path/to/skywalking-agent.jar=[option1]=[value1],[option2]=[value2]   Example\nOverride agent.application_code and logging.level by this.\n  -javaagent:/path/to/skywalking-agent.jar=agent.application_code=31200,logging.level=debug   Special characters\nIf a separator(, or =) in the option or value, it should be wrapped in quotes.\n  -javaagent:/path/to/skywalking-agent.jar=agent.ignore_suffix='.jpg,.jpeg' System environment variables   Example\nOverride agent.application_code and logging.level by this.\n  # The service name in UI agent.service_name=${SW_AGENT_NAME:Your_ApplicationName} # Logging level logging.level=${SW_LOGGING_LEVEL:INFO} If the SW_AGENT_NAME  environment variable exists in your operating system and its value is skywalking-agent-demo, then the value of agent.service_name here will be overwritten to skywalking-agent-demo, otherwise, it will be set to Your_ApplicationName.\nBy the way, Placeholder nesting is also supported, like ${SW_AGENT_NAME:${ANOTHER_AGENT_NAME:Your_ApplicationName}}. In this case, if the SW_AGENT_NAME  environment variable not exists, but the ANOTHER_AGENT_NAME environment variable exists and its value is skywalking-agent-demo, then the value of agent.service_name here will be overwritten to skywalking-agent-demo,otherwise, it will be set to Your_ApplicationName.\nOverride priority Agent Options \u0026gt; System.Properties(-D) \u0026gt; System environment variables \u0026gt; Config file\n","excerpt":"Setting Override In default, SkyWalking provide agent.config for agent.\nSetting override means end …","ref":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/setting-override/","title":"Setting Override"},{"body":"Setting Override SkyWalking Rover supports setting overrides by system environment variables. You could override the settings in rover_configs.yaml\nSystem environment variables   Example\nOverride core.backend.addr in this setting segment through environment variables\n  core:backend:addr:${ROVER_BACKEND_ADDR:localhost:11800}If the ROVER_BACKEND_ADDR  environment variable exists in your operating system and its value is oap:11800, then the value of core.backend.addr here will be overwritten to oap:11800, otherwise, it will be set to localhost:11800.\n","excerpt":"Setting Override SkyWalking Rover supports setting overrides by system environment variables. You …","ref":"/docs/skywalking-rover/latest/en/setup/configuration/override-settings/","title":"Setting Override"},{"body":"Setting Override SkyWalking Rover supports setting overrides by system environment variables. You could override the settings in rover_configs.yaml\nSystem environment variables   Example\nOverride core.backend.addr in this setting segment through environment variables\n  core:backend:addr:${ROVER_BACKEND_ADDR:localhost:11800}If the ROVER_BACKEND_ADDR  environment variable exists in your operating system and its value is oap:11800, then the value of core.backend.addr here will be overwritten to oap:11800, otherwise, it will be set to localhost:11800.\n","excerpt":"Setting Override SkyWalking Rover supports setting overrides by system environment variables. You …","ref":"/docs/skywalking-rover/next/en/setup/configuration/override-settings/","title":"Setting Override"},{"body":"Setting Override SkyWalking Rover supports setting overrides by system environment variables. You could override the settings in rover_configs.yaml\nSystem environment variables   Example\nOverride core.backend.addr in this setting segment through environment variables\n  core:backend:addr:${ROVER_BACKEND_ADDR:localhost:11800}If the ROVER_BACKEND_ADDR  environment variable exists in your operating system and its value is oap:11800, then the value of core.backend.addr here will be overwritten to oap:11800, otherwise, it will be set to localhost:11800.\n","excerpt":"Setting Override SkyWalking Rover supports setting overrides by system environment variables. You …","ref":"/docs/skywalking-rover/v0.6.0/en/setup/configuration/override-settings/","title":"Setting Override"},{"body":"Setting Override SkyWalking Satellite supports setting overrides by system environment variables. You could override the settings in satellite_config.yaml\nSystem environment variables   Example\nOverride log_pattern in this setting segment through environment variables\n  logger:log_pattern:${SATELLITE_LOGGER_LOG_PATTERN:%time [%level][%field] - %msg}time_pattern:${SATELLITE_LOGGER_TIME_PATTERN:2006-01-02 15:04:05.000}level:${SATELLITE_LOGGER_LEVEL:info}If the SATELLITE_LOGGER_LOG_PATTERN  environment variable exists in your operating system and its value is %msg, then the value of log_pattern here will be overwritten to %msg, otherwise, it will be set to %time [%level][%field] - %msg.\n","excerpt":"Setting Override SkyWalking Satellite supports setting overrides by system environment variables. …","ref":"/docs/skywalking-satellite/latest/en/setup/configuration/override-settings/","title":"Setting Override"},{"body":"Setting Override SkyWalking Satellite supports setting overrides by system environment variables. You could override the settings in satellite_config.yaml\nSystem environment variables   Example\nOverride log_pattern in this setting segment through environment variables\n  logger:log_pattern:${SATELLITE_LOGGER_LOG_PATTERN:%time [%level][%field] - %msg}time_pattern:${SATELLITE_LOGGER_TIME_PATTERN:2006-01-02 15:04:05.000}level:${SATELLITE_LOGGER_LEVEL:info}If the SATELLITE_LOGGER_LOG_PATTERN  environment variable exists in your operating system and its value is %msg, then the value of log_pattern here will be overwritten to %msg, otherwise, it will be set to %time [%level][%field] - %msg.\n","excerpt":"Setting Override SkyWalking Satellite supports setting overrides by system environment variables. …","ref":"/docs/skywalking-satellite/next/en/setup/configuration/override-settings/","title":"Setting Override"},{"body":"Setting Override SkyWalking Satellite supports setting overrides by system environment variables. You could override the settings in satellite_config.yaml\nSystem environment variables   Example\nOverride log_pattern in this setting segment through environment variables\n  logger:log_pattern:${SATELLITE_LOGGER_LOG_PATTERN:%time [%level][%field] - %msg}time_pattern:${SATELLITE_LOGGER_TIME_PATTERN:2006-01-02 15:04:05.000}level:${SATELLITE_LOGGER_LEVEL:info}If the SATELLITE_LOGGER_LOG_PATTERN  environment variable exists in your operating system and its value is %msg, then the value of log_pattern here will be overwritten to %msg, otherwise, it will be set to %time [%level][%field] - %msg.\n","excerpt":"Setting Override SkyWalking Satellite supports setting overrides by system environment variables. …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/configuration/override-settings/","title":"Setting Override"},{"body":"Setup The most important thing in E2E Testing is that it uses a separate configuration file and command to execute. If you haven\u0026rsquo;t read the Module Design, recommend read this document first.\n Installation Configuration file Run E2E Tests  ","excerpt":"Setup The most important thing in E2E Testing is that it uses a separate configuration file and …","ref":"/docs/skywalking-infra-e2e/latest/en/setup/readme/","title":"Setup"},{"body":"Setup The most important thing in E2E Testing is that it uses a separate configuration file and command to execute. If you haven\u0026rsquo;t read the Module Design, recommend read this document first.\n Installation Configuration file Run E2E Tests  ","excerpt":"Setup The most important thing in E2E Testing is that it uses a separate configuration file and …","ref":"/docs/skywalking-infra-e2e/next/en/setup/readme/","title":"Setup"},{"body":"Setup The most important thing in E2E Testing is that it uses a separate configuration file and command to execute. If you haven\u0026rsquo;t read the Module Design, recommend read this document first.\n Installation Configuration file Run E2E Tests  ","excerpt":"Setup The most important thing in E2E Testing is that it uses a separate configuration file and …","ref":"/docs/skywalking-infra-e2e/v1.3.0/en/setup/readme/","title":"Setup"},{"body":"Setup The first and most important thing is, that SkyWalking Rover startup behaviors are driven by configs/rover_configs.yaml. Understanding the setting file will help you to read this document.\nFollow Deploy on Kubernetes document to run rover in your cluster.\nRequirements and default settings Before you start, you should know that the main purpose of quickstart is to help you obtain a basic configuration for previews/demos. Usually, the process to be monitored is first declared.\nThen, you can use bin/startup.sh to start up the rover with their config.\nSkyWalking OAP Compatibility The SkyWalking Rover requires specialized protocols to communicate with SkyWalking OAP.\n   SkyWalking Rover Version SkyWalking OAP Notice     0.6.0+ \u0026gt; = 10.0.0 Only support Kubernetes.   0.1.0+ \u0026gt; = 9.1.0     Configuration  Common configurations about logs, backend address, cert files, etc. Service Discovery includes advanced setups about the ways of discovering services on your Kubernetes cluster. Access logs reports L2 to L4 network traffic relative information through access logs, to help OAP backend to do topology and metrics analysis. Profiling is an on-demand feature to enhance general observability besides access logs. It provides eBPF powered process ON_CPU, OFF_CPU profiling and network advanced profiling to link HTTP traffic with SkyWalking and Zipkin traces.  To adjust the configurations, refer to Overriding Setting document for more details.\nPrerequisites Currently, Linux operating systems are supported from version 4.9 and above, except for network profiling which requires version 4.16 or higher.\nThe following table lists currently supported/tested operating systems.\n   System Kernel Version On CPU Profiling Off CPU Profiling Network Profiling     CentOS 7 3.10.0 No No No   CentOS Stream 8 4.18.0 Yes Yes Yes   CentOS Stream 9 5.47.0 Yes Yes Yes   Debian 10 4.19.0 Yes Yes Yes   Debian 11 5.10.0 Yes Yes Yes(TCP Drop Monitor Excluded)   Fedora 35 5.14.10 Yes Yes Yes(TCP Drop Monitor Excluded)   RHEL 7 3.10.0 No No No   RHEL 8 4.18.0 Yes Yes Yes   RHEL 9 5.14.0 Yes Yes Yes   Rocky Linux 8 4.18.0 Yes Yes Yes   Rocky Linux 9 5.14.0 Yes Yes Yes   Ubuntu 1804 5.4.0 Yes Yes Yes   Ubuntu 20.04 5.15.0 Yes Yes Yes   Ubuntu 20.04 5.15.0 Yes Yes Yes   Ubuntu 22.04 5.15.0 Yes Yes Yes   Ubuntu 22.04 5.15.0 Yes Yes Yes   Ubuntu 22.10 5.19.0 Yes Yes Yes   Ubuntu Pro 16.04 4.15.0 Yes Yes No   Ubuntu Pro 18.04 5.4.0 Yes Yes Yes   Ubuntu Pro 20.04 5.15.0 Yes Yes Yes   Ubuntu Pro 22.04 5.15.0 Yes Yes Yes   Ubuntu Pro 22.04 5.15.0 Yes Yes Yes    ","excerpt":"Setup The first and most important thing is, that SkyWalking Rover startup behaviors are driven by …","ref":"/docs/skywalking-rover/latest/en/setup/overview/","title":"Setup"},{"body":"Setup The first and most important thing is, that SkyWalking Rover startup behaviors are driven by configs/rover_configs.yaml. Understanding the setting file will help you to read this document.\nFollow Deploy on Kubernetes document to run rover in your cluster.\nRequirements and default settings Before you start, you should know that the main purpose of quickstart is to help you obtain a basic configuration for previews/demos. Usually, the process to be monitored is first declared.\nThen, you can use bin/startup.sh to start up the rover with their config.\nSkyWalking OAP Compatibility The SkyWalking Rover requires specialized protocols to communicate with SkyWalking OAP.\n   SkyWalking Rover Version SkyWalking OAP Notice     0.6.0+ \u0026gt; = 10.0.0 Only support Kubernetes.   0.1.0+ \u0026gt; = 9.1.0     Configuration  Common configurations about logs, backend address, cert files, etc. Service Discovery includes advanced setups about the ways of discovering services on your Kubernetes cluster. Access logs reports L2 to L4 network traffic relative information through access logs, to help OAP backend to do topology and metrics analysis. Profiling is an on-demand feature to enhance general observability besides access logs. It provides eBPF powered process ON_CPU, OFF_CPU profiling and network advanced profiling to link HTTP traffic with SkyWalking and Zipkin traces.  To adjust the configurations, refer to Overriding Setting document for more details.\nPrerequisites Currently, Linux operating systems are supported from version 4.9 and above, except for network profiling which requires version 4.16 or higher.\nThe following table lists currently supported/tested operating systems.\n   System Kernel Version On CPU Profiling Off CPU Profiling Network Profiling     CentOS 7 3.10.0 No No No   CentOS Stream 8 4.18.0 Yes Yes Yes   CentOS Stream 9 5.47.0 Yes Yes Yes   Debian 10 4.19.0 Yes Yes Yes   Debian 11 5.10.0 Yes Yes Yes(TCP Drop Monitor Excluded)   Fedora 35 5.14.10 Yes Yes Yes(TCP Drop Monitor Excluded)   RHEL 7 3.10.0 No No No   RHEL 8 4.18.0 Yes Yes Yes   RHEL 9 5.14.0 Yes Yes Yes   Rocky Linux 8 4.18.0 Yes Yes Yes   Rocky Linux 9 5.14.0 Yes Yes Yes   Ubuntu 1804 5.4.0 Yes Yes Yes   Ubuntu 20.04 5.15.0 Yes Yes Yes   Ubuntu 20.04 5.15.0 Yes Yes Yes   Ubuntu 22.04 5.15.0 Yes Yes Yes   Ubuntu 22.04 5.15.0 Yes Yes Yes   Ubuntu 22.10 5.19.0 Yes Yes Yes   Ubuntu Pro 16.04 4.15.0 Yes Yes No   Ubuntu Pro 18.04 5.4.0 Yes Yes Yes   Ubuntu Pro 20.04 5.15.0 Yes Yes Yes   Ubuntu Pro 22.04 5.15.0 Yes Yes Yes   Ubuntu Pro 22.04 5.15.0 Yes Yes Yes    ","excerpt":"Setup The first and most important thing is, that SkyWalking Rover startup behaviors are driven by …","ref":"/docs/skywalking-rover/next/en/setup/overview/","title":"Setup"},{"body":"Setup The first and most important thing is, that SkyWalking Rover startup behaviors are driven by configs/rover_configs.yaml. Understanding the setting file will help you to read this document.\nFollow Deploy on Kubernetes document to run rover in your cluster.\nRequirements and default settings Before you start, you should know that the main purpose of quickstart is to help you obtain a basic configuration for previews/demos. Usually, the process to be monitored is first declared.\nThen, you can use bin/startup.sh to start up the rover with their config.\nSkyWalking OAP Compatibility The SkyWalking Rover requires specialized protocols to communicate with SkyWalking OAP.\n   SkyWalking Rover Version SkyWalking OAP Notice     0.6.0+ \u0026gt; = 10.0.0 Only support Kubernetes.   0.1.0+ \u0026gt; = 9.1.0     Configuration  Common configurations about logs, backend address, cert files, etc. Service Discovery includes advanced setups about the ways of discovering services on your Kubernetes cluster. Access logs reports L2 to L4 network traffic relative information through access logs, to help OAP backend to do topology and metrics analysis. Profiling is an on-demand feature to enhance general observability besides access logs. It provides eBPF powered process ON_CPU, OFF_CPU profiling and network advanced profiling to link HTTP traffic with SkyWalking and Zipkin traces.  To adjust the configurations, refer to Overriding Setting document for more details.\nPrerequisites Currently, Linux operating systems are supported from version 4.9 and above, except for network profiling which requires version 4.16 or higher.\nThe following table lists currently supported/tested operating systems.\n   System Kernel Version On CPU Profiling Off CPU Profiling Network Profiling     CentOS 7 3.10.0 No No No   CentOS Stream 8 4.18.0 Yes Yes Yes   CentOS Stream 9 5.47.0 Yes Yes Yes   Debian 10 4.19.0 Yes Yes Yes   Debian 11 5.10.0 Yes Yes Yes(TCP Drop Monitor Excluded)   Fedora 35 5.14.10 Yes Yes Yes(TCP Drop Monitor Excluded)   RHEL 7 3.10.0 No No No   RHEL 8 4.18.0 Yes Yes Yes   RHEL 9 5.14.0 Yes Yes Yes   Rocky Linux 8 4.18.0 Yes Yes Yes   Rocky Linux 9 5.14.0 Yes Yes Yes   Ubuntu 1804 5.4.0 Yes Yes Yes   Ubuntu 20.04 5.15.0 Yes Yes Yes   Ubuntu 20.04 5.15.0 Yes Yes Yes   Ubuntu 22.04 5.15.0 Yes Yes Yes   Ubuntu 22.04 5.15.0 Yes Yes Yes   Ubuntu 22.10 5.19.0 Yes Yes Yes   Ubuntu Pro 16.04 4.15.0 Yes Yes No   Ubuntu Pro 18.04 5.4.0 Yes Yes Yes   Ubuntu Pro 20.04 5.15.0 Yes Yes Yes   Ubuntu Pro 22.04 5.15.0 Yes Yes Yes   Ubuntu Pro 22.04 5.15.0 Yes Yes Yes    ","excerpt":"Setup The first and most important thing is, that SkyWalking Rover startup behaviors are driven by …","ref":"/docs/skywalking-rover/v0.6.0/en/setup/overview/","title":"Setup"},{"body":"Setup First and most important thing is, SkyWalking Satellite startup behaviours are driven by configs/satellite_config.yaml. Understanding the setting file will help you to read this document.\nRequirements and default settings Before you start, you should know that the main purpose of quickstart is to help you obtain a basic configuration for previews/demo. Performance and long-term running are not our goals.\nYou can use bin/startup.sh (or cmd) to start up the satellite with their default settings, set out as follows:\n Receive SkyWalking related protocols through grpc(listens on 0.0.0.0/11800) and transmit them to SkyWalking backend(to 0.0.0.0/11800). Expose Self-Observability telemetry data to Prometheus(listens on 0.0.0.0/1234)  Startup script Startup Script\nbin/startup.sh Examples You can quickly build your satellite according to the following examples:\nDeploy  Deploy on Linux and Windows Deploy on Kubernetes  More Use Cases  Transmit Log to Kafka Enable/Disable Channel Telemetry Exporter  satellite_config.yaml The core concept behind this setting file is, SkyWalking Satellite is based on pure modularization design. End user can switch or assemble the collector features by their own requirements.\nSo, in satellite_config.yaml, there are three parts.\n The common configurations. The sharing plugin configurations. The pipe plugin configurations.  Advanced feature document link list  Overriding settings in satellite_config.yaml is supported  Performance  ALS Load Balance.  ","excerpt":"Setup First and most important thing is, SkyWalking Satellite startup behaviours are driven by …","ref":"/docs/skywalking-satellite/latest/en/setup/readme/","title":"Setup"},{"body":"Setup First and most important thing is, SkyWalking Satellite startup behaviours are driven by configs/satellite_config.yaml. Understanding the setting file will help you to read this document.\nRequirements and default settings Before you start, you should know that the main purpose of quickstart is to help you obtain a basic configuration for previews/demo. Performance and long-term running are not our goals.\nYou can use bin/startup.sh (or cmd) to start up the satellite with their default settings, set out as follows:\n Receive SkyWalking related protocols through grpc(listens on 0.0.0.0/11800) and transmit them to SkyWalking backend(to 0.0.0.0/11800). Expose Self-Observability telemetry data to Prometheus(listens on 0.0.0.0/1234)  Startup script Startup Script\nbin/startup.sh Examples You can quickly build your satellite according to the following examples:\nDeploy  Deploy on Linux and Windows Deploy on Kubernetes  More Use Cases  Transmit Log to Kafka Enable/Disable Channel Telemetry Exporter  satellite_config.yaml The core concept behind this setting file is, SkyWalking Satellite is based on pure modularization design. End user can switch or assemble the collector features by their own requirements.\nSo, in satellite_config.yaml, there are three parts.\n The common configurations. The sharing plugin configurations. The pipe plugin configurations.  Advanced feature document link list  Overriding settings in satellite_config.yaml is supported  Performance  ALS Load Balance.  ","excerpt":"Setup First and most important thing is, SkyWalking Satellite startup behaviours are driven by …","ref":"/docs/skywalking-satellite/next/en/setup/readme/","title":"Setup"},{"body":"Setup First and most important thing is, SkyWalking Satellite startup behaviours are driven by configs/satellite_config.yaml. Understanding the setting file will help you to read this document.\nRequirements and default settings Before you start, you should know that the main purpose of quickstart is to help you obtain a basic configuration for previews/demo. Performance and long-term running are not our goals.\nYou can use bin/startup.sh (or cmd) to start up the satellite with their default settings, set out as follows:\n Receive SkyWalking related protocols through grpc(listens on 0.0.0.0/11800) and transmit them to SkyWalking backend(to 0.0.0.0/11800). Expose Self-Observability telemetry data to Prometheus(listens on 0.0.0.0/1234)  Startup script Startup Script\nbin/startup.sh Examples You can quickly build your satellite according to the following examples:\nDeploy  Deploy on Linux and Windows Deploy on Kubernetes  More Use Cases  Transmit Log to Kafka Enable/Disable Channel Telemetry Exporter  satellite_config.yaml The core concept behind this setting file is, SkyWalking Satellite is based on pure modularization design. End user can switch or assemble the collector features by their own requirements.\nSo, in satellite_config.yaml, there are three parts.\n The common configurations. The sharing plugin configurations. The pipe plugin configurations.  Advanced feature document link list  Overriding settings in satellite_config.yaml is supported  Performance  ALS Load Balance.  ","excerpt":"Setup First and most important thing is, SkyWalking Satellite startup behaviours are driven by …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/readme/","title":"Setup"},{"body":"Setup External Communication Channels SkyWalking has default activated gRPC/HTTP servers in the core module, which serve for both internal communication and external data report or query.\nIn some advanced scenarios, such as security requirements, specific gRPC/HTTP servers should be exposed for external requests.\nreceiver-sharing-server:selector:${SW_RECEIVER_SHARING_SERVER:default}default:# For REST serverrestHost:${SW_RECEIVER_SHARING_REST_HOST:0.0.0.0}restPort:${SW_RECEIVER_SHARING_REST_PORT:0}restContextPath:${SW_RECEIVER_SHARING_REST_CONTEXT_PATH:/}restMaxThreads:${SW_RECEIVER_SHARING_REST_MAX_THREADS:200}restIdleTimeOut:${SW_RECEIVER_SHARING_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_RECEIVER_SHARING_REST_QUEUE_SIZE:0}httpMaxRequestHeaderSize:${SW_RECEIVER_SHARING_HTTP_MAX_REQUEST_HEADER_SIZE:8192}# For gRPC servergRPCHost:${SW_RECEIVER_GRPC_HOST:0.0.0.0}gRPCPort:${SW_RECEIVER_GRPC_PORT:0}maxConcurrentCallsPerConnection:${SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL:0}maxMessageSize:${SW_RECEIVER_GRPC_MAX_MESSAGE_SIZE:0}gRPCThreadPoolQueueSize:${SW_RECEIVER_GRPC_POOL_QUEUE_SIZE:0}gRPCThreadPoolSize:${SW_RECEIVER_GRPC_THREAD_POOL_SIZE:0}gRPCSslEnabled:${SW_RECEIVER_GRPC_SSL_ENABLED:false}gRPCSslKeyPath:${SW_RECEIVER_GRPC_SSL_KEY_PATH:\u0026#34;\u0026#34;}gRPCSslCertChainPath:${SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH:\u0026#34;\u0026#34;}gRPCSslTrustedCAsPath:${SW_RECEIVER_GRPC_SSL_TRUSTED_CAS_PATH:\u0026#34;\u0026#34;}authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}Set restPort(HTTP) and gRPCPort(gRPC) to a legal port(greater than 0), would initialize new gRPC/HTTP servers for external requests with other relative settings. In this case, core/gRPC and core/rest could be served for cluster internal communication only.\n","excerpt":"Setup External Communication Channels SkyWalking has default activated gRPC/HTTP servers in the core …","ref":"/docs/main/latest/en/setup/backend/backend-expose/","title":"Setup External Communication Channels"},{"body":"Setup External Communication Channels SkyWalking has default activated gRPC/HTTP servers in the core module, which serve for both internal communication and external data report or query.\nIn some advanced scenarios, such as security requirements, specific gRPC/HTTP servers should be exposed for external requests.\nreceiver-sharing-server:selector:${SW_RECEIVER_SHARING_SERVER:default}default:# For REST serverrestHost:${SW_RECEIVER_SHARING_REST_HOST:0.0.0.0}restPort:${SW_RECEIVER_SHARING_REST_PORT:0}restContextPath:${SW_RECEIVER_SHARING_REST_CONTEXT_PATH:/}restMaxThreads:${SW_RECEIVER_SHARING_REST_MAX_THREADS:200}restIdleTimeOut:${SW_RECEIVER_SHARING_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_RECEIVER_SHARING_REST_QUEUE_SIZE:0}httpMaxRequestHeaderSize:${SW_RECEIVER_SHARING_HTTP_MAX_REQUEST_HEADER_SIZE:8192}# For gRPC servergRPCHost:${SW_RECEIVER_GRPC_HOST:0.0.0.0}gRPCPort:${SW_RECEIVER_GRPC_PORT:0}maxConcurrentCallsPerConnection:${SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL:0}maxMessageSize:${SW_RECEIVER_GRPC_MAX_MESSAGE_SIZE:0}gRPCThreadPoolSize:${SW_RECEIVER_GRPC_THREAD_POOL_SIZE:0}gRPCSslEnabled:${SW_RECEIVER_GRPC_SSL_ENABLED:false}gRPCSslKeyPath:${SW_RECEIVER_GRPC_SSL_KEY_PATH:\u0026#34;\u0026#34;}gRPCSslCertChainPath:${SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH:\u0026#34;\u0026#34;}gRPCSslTrustedCAsPath:${SW_RECEIVER_GRPC_SSL_TRUSTED_CAS_PATH:\u0026#34;\u0026#34;}authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}Set restPort(HTTP) and gRPCPort(gRPC) to a legal port(greater than 0), would initialize new gRPC/HTTP servers for external requests with other relative settings. In this case, core/gRPC and core/rest could be served for cluster internal communication only.\n","excerpt":"Setup External Communication Channels SkyWalking has default activated gRPC/HTTP servers in the core …","ref":"/docs/main/next/en/setup/backend/backend-expose/","title":"Setup External Communication Channels"},{"body":"Setup External Communication Channels SkyWalking has default activated gRPC/HTTP servers in the core module, which serve for both internal communication and external data report or query.\nIn some advanced scenarios, such as security requirements, specific gRPC/HTTP servers should be exposed for external requests.\nreceiver-sharing-server:selector:${SW_RECEIVER_SHARING_SERVER:default}default:# For Jetty serverrestHost:${SW_RECEIVER_SHARING_REST_HOST:0.0.0.0}restPort:${SW_RECEIVER_SHARING_REST_PORT:0}restContextPath:${SW_RECEIVER_SHARING_REST_CONTEXT_PATH:/}restMinThreads:${SW_RECEIVER_SHARING_JETTY_MIN_THREADS:1}restMaxThreads:${SW_RECEIVER_SHARING_JETTY_MAX_THREADS:200}restIdleTimeOut:${SW_RECEIVER_SHARING_JETTY_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_RECEIVER_SHARING_JETTY_QUEUE_SIZE:0}httpMaxRequestHeaderSize:${SW_RECEIVER_SHARING_HTTP_MAX_REQUEST_HEADER_SIZE:8192}# For gRPC servergRPCHost:${SW_RECEIVER_GRPC_HOST:0.0.0.0}gRPCPort:${SW_RECEIVER_GRPC_PORT:0}maxConcurrentCallsPerConnection:${SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL:0}maxMessageSize:${SW_RECEIVER_GRPC_MAX_MESSAGE_SIZE:0}gRPCThreadPoolQueueSize:${SW_RECEIVER_GRPC_POOL_QUEUE_SIZE:0}gRPCThreadPoolSize:${SW_RECEIVER_GRPC_THREAD_POOL_SIZE:0}gRPCSslEnabled:${SW_RECEIVER_GRPC_SSL_ENABLED:false}gRPCSslKeyPath:${SW_RECEIVER_GRPC_SSL_KEY_PATH:\u0026#34;\u0026#34;}gRPCSslCertChainPath:${SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH:\u0026#34;\u0026#34;}gRPCSslTrustedCAsPath:${SW_RECEIVER_GRPC_SSL_TRUSTED_CAS_PATH:\u0026#34;\u0026#34;}authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}Set restPort(HTTP) and gRPCPort(gRPC) to a legal port(greater than 0), would initialize new gRPC/HTTP servers for external requests with other relative settings. In this case, core/gRPC and core/rest could be served for cluster internal communication only.\n","excerpt":"Setup External Communication Channels SkyWalking has default activated gRPC/HTTP servers in the core …","ref":"/docs/main/v9.0.0/en/setup/backend/backend-expose/","title":"Setup External Communication Channels"},{"body":"Setup External Communication Channels SkyWalking has default activated gRPC/HTTP servers in the core module, which serve for both internal communication and external data report or query.\nIn some advanced scenarios, such as security requirements, specific gRPC/HTTP servers should be exposed for external requests.\nreceiver-sharing-server:selector:${SW_RECEIVER_SHARING_SERVER:default}default:# For REST serverrestHost:${SW_RECEIVER_SHARING_REST_HOST:0.0.0.0}restPort:${SW_RECEIVER_SHARING_REST_PORT:0}restContextPath:${SW_RECEIVER_SHARING_REST_CONTEXT_PATH:/}restMaxThreads:${SW_RECEIVER_SHARING_REST_MAX_THREADS:200}restIdleTimeOut:${SW_RECEIVER_SHARING_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_RECEIVER_SHARING_REST_QUEUE_SIZE:0}httpMaxRequestHeaderSize:${SW_RECEIVER_SHARING_HTTP_MAX_REQUEST_HEADER_SIZE:8192}# For gRPC servergRPCHost:${SW_RECEIVER_GRPC_HOST:0.0.0.0}gRPCPort:${SW_RECEIVER_GRPC_PORT:0}maxConcurrentCallsPerConnection:${SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL:0}maxMessageSize:${SW_RECEIVER_GRPC_MAX_MESSAGE_SIZE:0}gRPCThreadPoolQueueSize:${SW_RECEIVER_GRPC_POOL_QUEUE_SIZE:0}gRPCThreadPoolSize:${SW_RECEIVER_GRPC_THREAD_POOL_SIZE:0}gRPCSslEnabled:${SW_RECEIVER_GRPC_SSL_ENABLED:false}gRPCSslKeyPath:${SW_RECEIVER_GRPC_SSL_KEY_PATH:\u0026#34;\u0026#34;}gRPCSslCertChainPath:${SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH:\u0026#34;\u0026#34;}gRPCSslTrustedCAsPath:${SW_RECEIVER_GRPC_SSL_TRUSTED_CAS_PATH:\u0026#34;\u0026#34;}authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}Set restPort(HTTP) and gRPCPort(gRPC) to a legal port(greater than 0), would initialize new gRPC/HTTP servers for external requests with other relative settings. In this case, core/gRPC and core/rest could be served for cluster internal communication only.\n","excerpt":"Setup External Communication Channels SkyWalking has default activated gRPC/HTTP servers in the core …","ref":"/docs/main/v9.1.0/en/setup/backend/backend-expose/","title":"Setup External Communication Channels"},{"body":"Setup External Communication Channels SkyWalking has default activated gRPC/HTTP servers in the core module, which serve for both internal communication and external data report or query.\nIn some advanced scenarios, such as security requirements, specific gRPC/HTTP servers should be exposed for external requests.\nreceiver-sharing-server:selector:${SW_RECEIVER_SHARING_SERVER:default}default:# For REST serverrestHost:${SW_RECEIVER_SHARING_REST_HOST:0.0.0.0}restPort:${SW_RECEIVER_SHARING_REST_PORT:0}restContextPath:${SW_RECEIVER_SHARING_REST_CONTEXT_PATH:/}restMaxThreads:${SW_RECEIVER_SHARING_REST_MAX_THREADS:200}restIdleTimeOut:${SW_RECEIVER_SHARING_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_RECEIVER_SHARING_REST_QUEUE_SIZE:0}httpMaxRequestHeaderSize:${SW_RECEIVER_SHARING_HTTP_MAX_REQUEST_HEADER_SIZE:8192}# For gRPC servergRPCHost:${SW_RECEIVER_GRPC_HOST:0.0.0.0}gRPCPort:${SW_RECEIVER_GRPC_PORT:0}maxConcurrentCallsPerConnection:${SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL:0}maxMessageSize:${SW_RECEIVER_GRPC_MAX_MESSAGE_SIZE:0}gRPCThreadPoolQueueSize:${SW_RECEIVER_GRPC_POOL_QUEUE_SIZE:0}gRPCThreadPoolSize:${SW_RECEIVER_GRPC_THREAD_POOL_SIZE:0}gRPCSslEnabled:${SW_RECEIVER_GRPC_SSL_ENABLED:false}gRPCSslKeyPath:${SW_RECEIVER_GRPC_SSL_KEY_PATH:\u0026#34;\u0026#34;}gRPCSslCertChainPath:${SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH:\u0026#34;\u0026#34;}gRPCSslTrustedCAsPath:${SW_RECEIVER_GRPC_SSL_TRUSTED_CAS_PATH:\u0026#34;\u0026#34;}authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}Set restPort(HTTP) and gRPCPort(gRPC) to a legal port(greater than 0), would initialize new gRPC/HTTP servers for external requests with other relative settings. In this case, core/gRPC and core/rest could be served for cluster internal communication only.\n","excerpt":"Setup External Communication Channels SkyWalking has default activated gRPC/HTTP servers in the core …","ref":"/docs/main/v9.2.0/en/setup/backend/backend-expose/","title":"Setup External Communication Channels"},{"body":"Setup External Communication Channels SkyWalking has default activated gRPC/HTTP servers in the core module, which serve for both internal communication and external data report or query.\nIn some advanced scenarios, such as security requirements, specific gRPC/HTTP servers should be exposed for external requests.\nreceiver-sharing-server:selector:${SW_RECEIVER_SHARING_SERVER:default}default:# For REST serverrestHost:${SW_RECEIVER_SHARING_REST_HOST:0.0.0.0}restPort:${SW_RECEIVER_SHARING_REST_PORT:0}restContextPath:${SW_RECEIVER_SHARING_REST_CONTEXT_PATH:/}restMaxThreads:${SW_RECEIVER_SHARING_REST_MAX_THREADS:200}restIdleTimeOut:${SW_RECEIVER_SHARING_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_RECEIVER_SHARING_REST_QUEUE_SIZE:0}httpMaxRequestHeaderSize:${SW_RECEIVER_SHARING_HTTP_MAX_REQUEST_HEADER_SIZE:8192}# For gRPC servergRPCHost:${SW_RECEIVER_GRPC_HOST:0.0.0.0}gRPCPort:${SW_RECEIVER_GRPC_PORT:0}maxConcurrentCallsPerConnection:${SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL:0}maxMessageSize:${SW_RECEIVER_GRPC_MAX_MESSAGE_SIZE:0}gRPCThreadPoolQueueSize:${SW_RECEIVER_GRPC_POOL_QUEUE_SIZE:0}gRPCThreadPoolSize:${SW_RECEIVER_GRPC_THREAD_POOL_SIZE:0}gRPCSslEnabled:${SW_RECEIVER_GRPC_SSL_ENABLED:false}gRPCSslKeyPath:${SW_RECEIVER_GRPC_SSL_KEY_PATH:\u0026#34;\u0026#34;}gRPCSslCertChainPath:${SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH:\u0026#34;\u0026#34;}gRPCSslTrustedCAsPath:${SW_RECEIVER_GRPC_SSL_TRUSTED_CAS_PATH:\u0026#34;\u0026#34;}authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}Set restPort(HTTP) and gRPCPort(gRPC) to a legal port(greater than 0), would initialize new gRPC/HTTP servers for external requests with other relative settings. In this case, core/gRPC and core/rest could be served for cluster internal communication only.\n","excerpt":"Setup External Communication Channels SkyWalking has default activated gRPC/HTTP servers in the core …","ref":"/docs/main/v9.3.0/en/setup/backend/backend-expose/","title":"Setup External Communication Channels"},{"body":"Setup External Communication Channels SkyWalking has default activated gRPC/HTTP servers in the core module, which serve for both internal communication and external data report or query.\nIn some advanced scenarios, such as security requirements, specific gRPC/HTTP servers should be exposed for external requests.\nreceiver-sharing-server:selector:${SW_RECEIVER_SHARING_SERVER:default}default:# For REST serverrestHost:${SW_RECEIVER_SHARING_REST_HOST:0.0.0.0}restPort:${SW_RECEIVER_SHARING_REST_PORT:0}restContextPath:${SW_RECEIVER_SHARING_REST_CONTEXT_PATH:/}restMaxThreads:${SW_RECEIVER_SHARING_REST_MAX_THREADS:200}restIdleTimeOut:${SW_RECEIVER_SHARING_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_RECEIVER_SHARING_REST_QUEUE_SIZE:0}httpMaxRequestHeaderSize:${SW_RECEIVER_SHARING_HTTP_MAX_REQUEST_HEADER_SIZE:8192}# For gRPC servergRPCHost:${SW_RECEIVER_GRPC_HOST:0.0.0.0}gRPCPort:${SW_RECEIVER_GRPC_PORT:0}maxConcurrentCallsPerConnection:${SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL:0}maxMessageSize:${SW_RECEIVER_GRPC_MAX_MESSAGE_SIZE:0}gRPCThreadPoolQueueSize:${SW_RECEIVER_GRPC_POOL_QUEUE_SIZE:0}gRPCThreadPoolSize:${SW_RECEIVER_GRPC_THREAD_POOL_SIZE:0}gRPCSslEnabled:${SW_RECEIVER_GRPC_SSL_ENABLED:false}gRPCSslKeyPath:${SW_RECEIVER_GRPC_SSL_KEY_PATH:\u0026#34;\u0026#34;}gRPCSslCertChainPath:${SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH:\u0026#34;\u0026#34;}gRPCSslTrustedCAsPath:${SW_RECEIVER_GRPC_SSL_TRUSTED_CAS_PATH:\u0026#34;\u0026#34;}authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}Set restPort(HTTP) and gRPCPort(gRPC) to a legal port(greater than 0), would initialize new gRPC/HTTP servers for external requests with other relative settings. In this case, core/gRPC and core/rest could be served for cluster internal communication only.\n","excerpt":"Setup External Communication Channels SkyWalking has default activated gRPC/HTTP servers in the core …","ref":"/docs/main/v9.4.0/en/setup/backend/backend-expose/","title":"Setup External Communication Channels"},{"body":"Setup External Communication Channels SkyWalking has default activated gRPC/HTTP servers in the core module, which serve for both internal communication and external data report or query.\nIn some advanced scenarios, such as security requirements, specific gRPC/HTTP servers should be exposed for external requests.\nreceiver-sharing-server:selector:${SW_RECEIVER_SHARING_SERVER:default}default:# For REST serverrestHost:${SW_RECEIVER_SHARING_REST_HOST:0.0.0.0}restPort:${SW_RECEIVER_SHARING_REST_PORT:0}restContextPath:${SW_RECEIVER_SHARING_REST_CONTEXT_PATH:/}restMaxThreads:${SW_RECEIVER_SHARING_REST_MAX_THREADS:200}restIdleTimeOut:${SW_RECEIVER_SHARING_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_RECEIVER_SHARING_REST_QUEUE_SIZE:0}httpMaxRequestHeaderSize:${SW_RECEIVER_SHARING_HTTP_MAX_REQUEST_HEADER_SIZE:8192}# For gRPC servergRPCHost:${SW_RECEIVER_GRPC_HOST:0.0.0.0}gRPCPort:${SW_RECEIVER_GRPC_PORT:0}maxConcurrentCallsPerConnection:${SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL:0}maxMessageSize:${SW_RECEIVER_GRPC_MAX_MESSAGE_SIZE:0}gRPCThreadPoolQueueSize:${SW_RECEIVER_GRPC_POOL_QUEUE_SIZE:0}gRPCThreadPoolSize:${SW_RECEIVER_GRPC_THREAD_POOL_SIZE:0}gRPCSslEnabled:${SW_RECEIVER_GRPC_SSL_ENABLED:false}gRPCSslKeyPath:${SW_RECEIVER_GRPC_SSL_KEY_PATH:\u0026#34;\u0026#34;}gRPCSslCertChainPath:${SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH:\u0026#34;\u0026#34;}gRPCSslTrustedCAsPath:${SW_RECEIVER_GRPC_SSL_TRUSTED_CAS_PATH:\u0026#34;\u0026#34;}authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}Set restPort(HTTP) and gRPCPort(gRPC) to a legal port(greater than 0), would initialize new gRPC/HTTP servers for external requests with other relative settings. In this case, core/gRPC and core/rest could be served for cluster internal communication only.\n","excerpt":"Setup External Communication Channels SkyWalking has default activated gRPC/HTTP servers in the core …","ref":"/docs/main/v9.5.0/en/setup/backend/backend-expose/","title":"Setup External Communication Channels"},{"body":"Setup External Communication Channels SkyWalking has default activated gRPC/HTTP servers in the core module, which serve for both internal communication and external data report or query.\nIn some advanced scenarios, such as security requirements, specific gRPC/HTTP servers should be exposed for external requests.\nreceiver-sharing-server:selector:${SW_RECEIVER_SHARING_SERVER:default}default:# For REST serverrestHost:${SW_RECEIVER_SHARING_REST_HOST:0.0.0.0}restPort:${SW_RECEIVER_SHARING_REST_PORT:0}restContextPath:${SW_RECEIVER_SHARING_REST_CONTEXT_PATH:/}restMaxThreads:${SW_RECEIVER_SHARING_REST_MAX_THREADS:200}restIdleTimeOut:${SW_RECEIVER_SHARING_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_RECEIVER_SHARING_REST_QUEUE_SIZE:0}httpMaxRequestHeaderSize:${SW_RECEIVER_SHARING_HTTP_MAX_REQUEST_HEADER_SIZE:8192}# For gRPC servergRPCHost:${SW_RECEIVER_GRPC_HOST:0.0.0.0}gRPCPort:${SW_RECEIVER_GRPC_PORT:0}maxConcurrentCallsPerConnection:${SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL:0}maxMessageSize:${SW_RECEIVER_GRPC_MAX_MESSAGE_SIZE:0}gRPCThreadPoolQueueSize:${SW_RECEIVER_GRPC_POOL_QUEUE_SIZE:0}gRPCThreadPoolSize:${SW_RECEIVER_GRPC_THREAD_POOL_SIZE:0}gRPCSslEnabled:${SW_RECEIVER_GRPC_SSL_ENABLED:false}gRPCSslKeyPath:${SW_RECEIVER_GRPC_SSL_KEY_PATH:\u0026#34;\u0026#34;}gRPCSslCertChainPath:${SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH:\u0026#34;\u0026#34;}gRPCSslTrustedCAsPath:${SW_RECEIVER_GRPC_SSL_TRUSTED_CAS_PATH:\u0026#34;\u0026#34;}authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}Set restPort(HTTP) and gRPCPort(gRPC) to a legal port(greater than 0), would initialize new gRPC/HTTP servers for external requests with other relative settings. In this case, core/gRPC and core/rest could be served for cluster internal communication only.\n","excerpt":"Setup External Communication Channels SkyWalking has default activated gRPC/HTTP servers in the core …","ref":"/docs/main/v9.6.0/en/setup/backend/backend-expose/","title":"Setup External Communication Channels"},{"body":"Setup External Communication Channels SkyWalking has default activated gRPC/HTTP servers in the core module, which serve for both internal communication and external data report or query.\nIn some advanced scenarios, such as security requirements, specific gRPC/HTTP servers should be exposed for external requests.\nreceiver-sharing-server:selector:${SW_RECEIVER_SHARING_SERVER:default}default:# For REST serverrestHost:${SW_RECEIVER_SHARING_REST_HOST:0.0.0.0}restPort:${SW_RECEIVER_SHARING_REST_PORT:0}restContextPath:${SW_RECEIVER_SHARING_REST_CONTEXT_PATH:/}restMaxThreads:${SW_RECEIVER_SHARING_REST_MAX_THREADS:200}restIdleTimeOut:${SW_RECEIVER_SHARING_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_RECEIVER_SHARING_REST_QUEUE_SIZE:0}httpMaxRequestHeaderSize:${SW_RECEIVER_SHARING_HTTP_MAX_REQUEST_HEADER_SIZE:8192}# For gRPC servergRPCHost:${SW_RECEIVER_GRPC_HOST:0.0.0.0}gRPCPort:${SW_RECEIVER_GRPC_PORT:0}maxConcurrentCallsPerConnection:${SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL:0}maxMessageSize:${SW_RECEIVER_GRPC_MAX_MESSAGE_SIZE:0}gRPCThreadPoolQueueSize:${SW_RECEIVER_GRPC_POOL_QUEUE_SIZE:0}gRPCThreadPoolSize:${SW_RECEIVER_GRPC_THREAD_POOL_SIZE:0}gRPCSslEnabled:${SW_RECEIVER_GRPC_SSL_ENABLED:false}gRPCSslKeyPath:${SW_RECEIVER_GRPC_SSL_KEY_PATH:\u0026#34;\u0026#34;}gRPCSslCertChainPath:${SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH:\u0026#34;\u0026#34;}gRPCSslTrustedCAsPath:${SW_RECEIVER_GRPC_SSL_TRUSTED_CAS_PATH:\u0026#34;\u0026#34;}authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}Set restPort(HTTP) and gRPCPort(gRPC) to a legal port(greater than 0), would initialize new gRPC/HTTP servers for external requests with other relative settings. In this case, core/gRPC and core/rest could be served for cluster internal communication only.\n","excerpt":"Setup External Communication Channels SkyWalking has default activated gRPC/HTTP servers in the core …","ref":"/docs/main/v9.7.0/en/setup/backend/backend-expose/","title":"Setup External Communication Channels"},{"body":"Setup in build When you want to integrate the Agent using the original go build command, you need to follow these steps.\n1. Download Agent Download the Agent from the official website.\n2. Install SkyWalking Go SkyWalking Go offers two ways for integration into your project.\n2.1 Agent Injector Agent injector is recommended when you only want to include SkyWalking Go agent in the compiling pipeline or shell.\nPlease execute the following command, which would automatically import SkyWalking Go into your project.\n/path/to/agent -inject /path/to/your/project [-all]  /path/to/agent is the path to the agent which your downloaded. /path/to/your/project is the home path to your project, support absolute and related with current directory path. -all is the parameter for injecting all submodules in your project.  2.2 Code Dependency Use go get to import the skywalking-go program.\ngo get github.com/apache/skywalking-go Also, import the module to your main package:\nimport _ \u0026#34;github.com/apache/skywalking-go\u0026#34; NOTICE: Please ensure that the version of the Agent you downloaded is consistent with the version installed via go get in the previous section, to prevent errors such as missing package references during compilation.\n3. Build with SkyWalking Go Agent Add the following parameters in go build:\n-toolexec=\u0026#34;/path/to/go-agent\u0026#34; -a  -toolexec is the path to the Golang enhancement program. -a is the parameter for rebuilding all packages forcibly.  If you want to customize the configuration information for the current service, please add the following parameters, read more please refer the settings override documentation):\n-toolexec=\u0026#34;/path/to/go-agent -config /path/to/config.yaml\u0026#34; -a Binary Output The binary would be weaved and instrumented by SkyWalking Go.\n","excerpt":"Setup in build When you want to integrate the Agent using the original go build command, you need to …","ref":"/docs/skywalking-go/latest/en/setup/gobuild/","title":"Setup in build"},{"body":"Setup in build When you want to integrate the Agent using the original go build command, you need to follow these steps.\n1. Download Agent Download the Agent from the official website.\n2. Install SkyWalking Go SkyWalking Go offers two ways for integration into your project.\n2.1 Agent Injector Agent injector is recommended when you only want to include SkyWalking Go agent in the compiling pipeline or shell.\nPlease execute the following command, which would automatically import SkyWalking Go into your project.\n/path/to/agent -inject /path/to/your/project [-all]  /path/to/agent is the path to the agent which your downloaded. /path/to/your/project is the home path to your project, support absolute and related with current directory path. -all is the parameter for injecting all submodules in your project.  2.2 Code Dependency Use go get to import the skywalking-go program.\ngo get github.com/apache/skywalking-go Also, import the module to your main package:\nimport _ \u0026#34;github.com/apache/skywalking-go\u0026#34; NOTICE: Please ensure that the version of the Agent you downloaded is consistent with the version installed via go get in the previous section, to prevent errors such as missing package references during compilation.\n3. Build with SkyWalking Go Agent Add the following parameters in go build:\n-toolexec=\u0026#34;/path/to/go-agent\u0026#34; -a  -toolexec is the path to the Golang enhancement program. -a is the parameter for rebuilding all packages forcibly.  If you want to customize the configuration information for the current service, please add the following parameters, read more please refer the settings override documentation):\n-toolexec=\u0026#34;/path/to/go-agent -config /path/to/config.yaml\u0026#34; -a Binary Output The binary would be weaved and instrumented by SkyWalking Go.\n","excerpt":"Setup in build When you want to integrate the Agent using the original go build command, you need to …","ref":"/docs/skywalking-go/next/en/setup/gobuild/","title":"Setup in build"},{"body":"Setup in build When you want to integrate the Agent using the original go build command, you need to follow these steps.\n1. Download Agent Download the Agent from the official website.\n2. Install SkyWalking Go SkyWalking Go offers two ways for integration into your project.\n2.1 Agent Injector Agent injector is recommended when you only want to include SkyWalking Go agent in the compiling pipeline or shell.\nPlease execute the following command, which would automatically import SkyWalking Go into your project.\n/path/to/agent -inject /path/to/your/project [-all]  /path/to/agent is the path to the agent which your downloaded. /path/to/your/project is the home path to your project, support absolute and related with current directory path. -all is the parameter for injecting all submodules in your project.  2.2 Code Dependency Use go get to import the skywalking-go program.\ngo get github.com/apache/skywalking-go Also, import the module to your main package:\nimport _ \u0026#34;github.com/apache/skywalking-go\u0026#34; NOTICE: Please ensure that the version of the Agent you downloaded is consistent with the version installed via go get in the previous section, to prevent errors such as missing package references during compilation.\n3. Build with SkyWalking Go Agent Add the following parameters in go build:\n-toolexec=\u0026#34;/path/to/go-agent\u0026#34; -a  -toolexec is the path to the Golang enhancement program. -a is the parameter for rebuilding all packages forcibly.  If you want to customize the configuration information for the current service, please add the following parameters, read more please refer the settings override documentation):\n-toolexec=\u0026#34;/path/to/go-agent -config /path/to/config.yaml\u0026#34; -a Binary Output The binary would be weaved and instrumented by SkyWalking Go.\n","excerpt":"Setup in build When you want to integrate the Agent using the original go build command, you need to …","ref":"/docs/skywalking-go/v0.4.0/en/setup/gobuild/","title":"Setup in build"},{"body":"Setup in docker SkyWalking Go supports building user applications using Docker as the base container image.\nCustomized Dockerfile Using the SkyWalking Go provided image as the base image, perform file copying and other operations in the Dockerfile.\n# import the skywalking go base imageFROMapache/skywalking-go:\u0026lt;version\u0026gt;-go\u0026lt;go version\u0026gt;# Copy application codeCOPY /path/to/project /path/to/project# Inject the agent into the project or get dependencies by application selfRUN skywalking-go-agent -inject /path/to/project# Building the project including the agentRUN go build -toolexec=\u0026#34;skywalking-go-agent\u0026#34; -a /path/to/project# More operations...In the above code, we have performed the following actions:\n Used the SkyWalking Go provided image as the base image, which currently supports the following Go versions: 1.16, 1.17, 1.18, 1.19, 1.20. Copied the project into the Docker image. Installed SkyWalking Go and compiled the project, read this documentation for more detail. The SkyWalking Go agent is already installed in the /usr/local/bin directory with the name skywalking-go-agent.  ","excerpt":"Setup in docker SkyWalking Go supports building user applications using Docker as the base container …","ref":"/docs/skywalking-go/latest/en/setup/docker/","title":"Setup in docker"},{"body":"Setup in docker SkyWalking Go supports building user applications using Docker as the base container image.\nCustomized Dockerfile Using the SkyWalking Go provided image as the base image, perform file copying and other operations in the Dockerfile.\n# import the skywalking go base imageFROMapache/skywalking-go:\u0026lt;version\u0026gt;-go\u0026lt;go version\u0026gt;# Copy application codeCOPY /path/to/project /path/to/project# Inject the agent into the project or get dependencies by application selfRUN skywalking-go-agent -inject /path/to/project# Building the project including the agentRUN go build -toolexec=\u0026#34;skywalking-go-agent\u0026#34; -a /path/to/project# More operations...In the above code, we have performed the following actions:\n Used the SkyWalking Go provided image as the base image, which currently supports the following Go versions: 1.16, 1.17, 1.18, 1.19, 1.20. Copied the project into the Docker image. Installed SkyWalking Go and compiled the project, read this documentation for more detail. The SkyWalking Go agent is already installed in the /usr/local/bin directory with the name skywalking-go-agent.  ","excerpt":"Setup in docker SkyWalking Go supports building user applications using Docker as the base container …","ref":"/docs/skywalking-go/next/en/setup/docker/","title":"Setup in docker"},{"body":"Setup in docker SkyWalking Go supports building user applications using Docker as the base container image.\nCustomized Dockerfile Using the SkyWalking Go provided image as the base image, perform file copying and other operations in the Dockerfile.\n# import the skywalking go base imageFROMapache/skywalking-go:\u0026lt;version\u0026gt;-go\u0026lt;go version\u0026gt;# Copy application codeCOPY /path/to/project /path/to/project# Inject the agent into the project or get dependencies by application selfRUN skywalking-go-agent -inject /path/to/project# Building the project including the agentRUN go build -toolexec=\u0026#34;skywalking-go-agent\u0026#34; -a /path/to/project# More operations...In the above code, we have performed the following actions:\n Used the SkyWalking Go provided image as the base image, which currently supports the following Go versions: 1.16, 1.17, 1.18, 1.19, 1.20. Copied the project into the Docker image. Installed SkyWalking Go and compiled the project, read this documentation for more detail. The SkyWalking Go agent is already installed in the /usr/local/bin directory with the name skywalking-go-agent.  ","excerpt":"Setup in docker SkyWalking Go supports building user applications using Docker as the base container …","ref":"/docs/skywalking-go/v0.4.0/en/setup/docker/","title":"Setup in docker"},{"body":"Setup java agent  Agent is available for JDK 8 - 21. Find agent folder in SkyWalking release package Set agent.service_name in config/agent.config. Could be any String in English. Set collector.backend_service in config/agent.config. Default point to 127.0.0.1:11800, only works for local backend. Add -javaagent:/path/to/skywalking-package/agent/skywalking-agent.jar to JVM argument. And make sure to add it before the -jar argument.  Require SkyWalking OAP server 9.7.0+ if the agent works on the JRE using ZGC.\nThe agent release dist is included in Apache official release. New agent package looks like this.\n+-- agent +-- activations apm-toolkit-log4j-1.x-activation.jar apm-toolkit-log4j-2.x-activation.jar apm-toolkit-logback-1.x-activation.jar ... +-- config agent.config +-- plugins apm-dubbo-plugin.jar apm-feign-default-http-9.x.jar apm-httpClient-4.x-plugin.jar ..... +-- optional-plugins apm-gson-2.x-plugin.jar ..... +-- bootstrap-plugins jdk-http-plugin.jar ..... +-- expired-plugins # Expired plugins are moved to this folder. No guarantee of working and maintenance. apm-impala-2.6.x-plugin.jar ..... +-- logs skywalking-agent.jar  Start your application.  Install javaagent FAQs  Linux Tomcat 7, Tomcat 8, Tomcat 9\nChange the first line of tomcat/bin/catalina.sh.  CATALINA_OPTS=\u0026#34;$CATALINA_OPTS-javaagent:/path/to/skywalking-agent/skywalking-agent.jar\u0026#34;; export CATALINA_OPTS  Windows Tomcat 7, Tomcat 8, Tomcat 9\nChange the first line of tomcat/bin/catalina.bat.  set \u0026#34;CATALINA_OPTS=-javaagent:/path/to/skywalking-agent/skywalking-agent.jar\u0026#34;  JAR file\nAdd -javaagent argument to command line in which you start your app. eg:  java -javaagent:/path/to/skywalking-agent/skywalking-agent.jar -jar yourApp.jar  Jetty\nModify jetty.sh, add -javaagent argument to command line in which you start your app. eg:  export JAVA_OPTIONS=\u0026#34;${JAVA_OPTIONS}-javaagent:/path/to/skywalking-agent/skywalking-agent.jar\u0026#34; Plugins SkyWalking agent has supported various middlewares, frameworks and libraries. Read supported list to get them and supported version. If the plugin is in Optional² catalog, go to optional plugins and bootstrap class plugin section to learn how to active it.\n All plugins in /plugins folder are active. Remove the plugin jar, it disabled. The default logging output folder is /logs.  ","excerpt":"Setup java agent  Agent is available for JDK 8 - 21. Find agent folder in SkyWalking release package …","ref":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/readme/","title":"Setup java agent"},{"body":"Setup java agent  Agent is available for JDK 8 - 21. Find agent folder in SkyWalking release package Set agent.service_name in config/agent.config. Could be any String in English. Set collector.backend_service in config/agent.config. Default point to 127.0.0.1:11800, only works for local backend. Add -javaagent:/path/to/skywalking-package/agent/skywalking-agent.jar to JVM argument. And make sure to add it before the -jar argument.  Require SkyWalking OAP server 9.7.0+ if the agent works on the JRE using ZGC.\nThe agent release dist is included in Apache official release. New agent package looks like this.\n+-- agent +-- activations apm-toolkit-log4j-1.x-activation.jar apm-toolkit-log4j-2.x-activation.jar apm-toolkit-logback-1.x-activation.jar ... +-- config agent.config +-- plugins apm-dubbo-plugin.jar apm-feign-default-http-9.x.jar apm-httpClient-4.x-plugin.jar ..... +-- optional-plugins apm-gson-2.x-plugin.jar ..... +-- bootstrap-plugins jdk-http-plugin.jar ..... +-- expired-plugins # Expired plugins are moved to this folder. No guarantee of working and maintenance. apm-impala-2.6.x-plugin.jar ..... +-- logs skywalking-agent.jar  Start your application.  Install javaagent FAQs  Linux Tomcat 7, Tomcat 8, Tomcat 9\nChange the first line of tomcat/bin/catalina.sh.  CATALINA_OPTS=\u0026#34;$CATALINA_OPTS-javaagent:/path/to/skywalking-agent/skywalking-agent.jar\u0026#34;; export CATALINA_OPTS  Windows Tomcat 7, Tomcat 8, Tomcat 9\nChange the first line of tomcat/bin/catalina.bat.  set \u0026#34;CATALINA_OPTS=-javaagent:/path/to/skywalking-agent/skywalking-agent.jar\u0026#34;  JAR file\nAdd -javaagent argument to command line in which you start your app. eg:  java -javaagent:/path/to/skywalking-agent/skywalking-agent.jar -jar yourApp.jar  Jetty\nModify jetty.sh, add -javaagent argument to command line in which you start your app. eg:  export JAVA_OPTIONS=\u0026#34;${JAVA_OPTIONS}-javaagent:/path/to/skywalking-agent/skywalking-agent.jar\u0026#34; Plugins SkyWalking agent has supported various middlewares, frameworks and libraries. Read supported list to get them and supported version. If the plugin is in Optional² catalog, go to optional plugins and bootstrap class plugin section to learn how to active it.\n All plugins in /plugins folder are active. Remove the plugin jar, it disabled. The default logging output folder is /logs.  ","excerpt":"Setup java agent  Agent is available for JDK 8 - 21. Find agent folder in SkyWalking release package …","ref":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/readme/","title":"Setup java agent"},{"body":"Setup java agent  Agent is available for JDK 8 - 17. Find agent folder in SkyWalking release package Set agent.service_name in config/agent.config. Could be any String in English. Set collector.backend_service in config/agent.config. Default point to 127.0.0.1:11800, only works for local backend. Add -javaagent:/path/to/skywalking-package/agent/skywalking-agent.jar to JVM argument. And make sure to add it before the -jar argument.  The agent release dist is included in Apache official release. New agent package looks like this.\n+-- agent +-- activations apm-toolkit-log4j-1.x-activation.jar apm-toolkit-log4j-2.x-activation.jar apm-toolkit-logback-1.x-activation.jar ... +-- config agent.config +-- plugins apm-dubbo-plugin.jar apm-feign-default-http-9.x.jar apm-httpClient-4.x-plugin.jar ..... +-- optional-plugins apm-gson-2.x-plugin.jar ..... +-- bootstrap-plugins jdk-http-plugin.jar ..... +-- logs skywalking-agent.jar  Start your application.  Install javaagent FAQs  Linux Tomcat 7, Tomcat 8, Tomcat 9\nChange the first line of tomcat/bin/catalina.sh.  CATALINA_OPTS=\u0026#34;$CATALINA_OPTS-javaagent:/path/to/skywalking-agent/skywalking-agent.jar\u0026#34;; export CATALINA_OPTS  Windows Tomcat 7, Tomcat 8, Tomcat 9\nChange the first line of tomcat/bin/catalina.bat.  set \u0026#34;CATALINA_OPTS=-javaagent:/path/to/skywalking-agent/skywalking-agent.jar\u0026#34;  JAR file\nAdd -javaagent argument to command line in which you start your app. eg:  java -javaagent:/path/to/skywalking-agent/skywalking-agent.jar -jar yourApp.jar  Jetty\nModify jetty.sh, add -javaagent argument to command line in which you start your app. eg:  export JAVA_OPTIONS=\u0026#34;${JAVA_OPTIONS}-javaagent:/path/to/skywalking-agent/skywalking-agent.jar\u0026#34; Plugins SkyWalking agent has supported various middlewares, frameworks and libraries. Read supported list to get them and supported version. If the plugin is in Optional² catalog, go to optional plugins and bootstrap class plugin section to learn how to active it.\n All plugins in /plugins folder are active. Remove the plugin jar, it disabled. The default logging output folder is /logs.  ","excerpt":"Setup java agent  Agent is available for JDK 8 - 17. Find agent folder in SkyWalking release package …","ref":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/readme/","title":"Setup java agent"},{"body":"Setup java agent  Agent is available for JDK 8 - 21. Find agent folder in SkyWalking release package Set agent.service_name in config/agent.config. Could be any String in English. Set collector.backend_service in config/agent.config. Default point to 127.0.0.1:11800, only works for local backend. Add -javaagent:/path/to/skywalking-package/agent/skywalking-agent.jar to JVM argument. And make sure to add it before the -jar argument.  Require SkyWalking OAP server 9.7.0+ if the agent works on the JRE using ZGC.\nThe agent release dist is included in Apache official release. New agent package looks like this.\n+-- agent +-- activations apm-toolkit-log4j-1.x-activation.jar apm-toolkit-log4j-2.x-activation.jar apm-toolkit-logback-1.x-activation.jar ... +-- config agent.config +-- plugins apm-dubbo-plugin.jar apm-feign-default-http-9.x.jar apm-httpClient-4.x-plugin.jar ..... +-- optional-plugins apm-gson-2.x-plugin.jar ..... +-- bootstrap-plugins jdk-http-plugin.jar ..... +-- logs skywalking-agent.jar  Start your application.  Install javaagent FAQs  Linux Tomcat 7, Tomcat 8, Tomcat 9\nChange the first line of tomcat/bin/catalina.sh.  CATALINA_OPTS=\u0026#34;$CATALINA_OPTS-javaagent:/path/to/skywalking-agent/skywalking-agent.jar\u0026#34;; export CATALINA_OPTS  Windows Tomcat 7, Tomcat 8, Tomcat 9\nChange the first line of tomcat/bin/catalina.bat.  set \u0026#34;CATALINA_OPTS=-javaagent:/path/to/skywalking-agent/skywalking-agent.jar\u0026#34;  JAR file\nAdd -javaagent argument to command line in which you start your app. eg:  java -javaagent:/path/to/skywalking-agent/skywalking-agent.jar -jar yourApp.jar  Jetty\nModify jetty.sh, add -javaagent argument to command line in which you start your app. eg:  export JAVA_OPTIONS=\u0026#34;${JAVA_OPTIONS}-javaagent:/path/to/skywalking-agent/skywalking-agent.jar\u0026#34; Plugins SkyWalking agent has supported various middlewares, frameworks and libraries. Read supported list to get them and supported version. If the plugin is in Optional² catalog, go to optional plugins and bootstrap class plugin section to learn how to active it.\n All plugins in /plugins folder are active. Remove the plugin jar, it disabled. The default logging output folder is /logs.  ","excerpt":"Setup java agent  Agent is available for JDK 8 - 21. Find agent folder in SkyWalking release package …","ref":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/readme/","title":"Setup java agent"},{"body":"Setup java agent  Agent is available for JDK 8 - 21. Find agent folder in SkyWalking release package Set agent.service_name in config/agent.config. Could be any String in English. Set collector.backend_service in config/agent.config. Default point to 127.0.0.1:11800, only works for local backend. Add -javaagent:/path/to/skywalking-package/agent/skywalking-agent.jar to JVM argument. And make sure to add it before the -jar argument.  Require SkyWalking OAP server 9.7.0+ if the agent works on the JRE using ZGC.\nThe agent release dist is included in Apache official release. New agent package looks like this.\n+-- agent +-- activations apm-toolkit-log4j-1.x-activation.jar apm-toolkit-log4j-2.x-activation.jar apm-toolkit-logback-1.x-activation.jar ... +-- config agent.config +-- plugins apm-dubbo-plugin.jar apm-feign-default-http-9.x.jar apm-httpClient-4.x-plugin.jar ..... +-- optional-plugins apm-gson-2.x-plugin.jar ..... +-- bootstrap-plugins jdk-http-plugin.jar ..... +-- expired-plugins # Expired plugins are moved to this folder. No guarantee of working and maintenance. apm-impala-2.6.x-plugin.jar ..... +-- logs skywalking-agent.jar  Start your application.  Install javaagent FAQs  Linux Tomcat 7, Tomcat 8, Tomcat 9\nChange the first line of tomcat/bin/catalina.sh.  CATALINA_OPTS=\u0026#34;$CATALINA_OPTS-javaagent:/path/to/skywalking-agent/skywalking-agent.jar\u0026#34;; export CATALINA_OPTS  Windows Tomcat 7, Tomcat 8, Tomcat 9\nChange the first line of tomcat/bin/catalina.bat.  set \u0026#34;CATALINA_OPTS=-javaagent:/path/to/skywalking-agent/skywalking-agent.jar\u0026#34;  JAR file\nAdd -javaagent argument to command line in which you start your app. eg:  java -javaagent:/path/to/skywalking-agent/skywalking-agent.jar -jar yourApp.jar  Jetty\nModify jetty.sh, add -javaagent argument to command line in which you start your app. eg:  export JAVA_OPTIONS=\u0026#34;${JAVA_OPTIONS}-javaagent:/path/to/skywalking-agent/skywalking-agent.jar\u0026#34; Plugins SkyWalking agent has supported various middlewares, frameworks and libraries. Read supported list to get them and supported version. If the plugin is in Optional² catalog, go to optional plugins and bootstrap class plugin section to learn how to active it.\n All plugins in /plugins folder are active. Remove the plugin jar, it disabled. The default logging output folder is /logs.  ","excerpt":"Setup java agent  Agent is available for JDK 8 - 21. Find agent folder in SkyWalking release package …","ref":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/readme/","title":"Setup java agent"},{"body":"Setup PHP Agent  Agent is available for PHP 7.2 - 8.x. Build from source. Configure php.ini.  Requirements  GCC Rustc 1.65+ Cargo Libclang 9.0+ Make Protoc  Install dependencies For Debian-base OS sudo apt install gcc make llvm-13-dev libclang-13-dev protobuf-c-compiler protobuf-compiler For Alpine Linux apk add gcc make musl-dev llvm15-dev clang15-dev protobuf-c-compiler Install Rust globally The officially recommended way to install Rust is via rustup.\nBut because the source code toolchain is override by rust-toolchain.toml, so if you don\u0026rsquo;t need multi version Rust, we recommend to install Rust by these way:\n  Install through OS package manager (The Rust version in the source must be \u0026gt;= 1.65).\n  Through standalone installers.\nFor linux x86_64 user:\nwget https://static.rust-lang.org/dist/rust-1.65.0-x86_64-unknown-linux-gnu.tar.gz tar zxvf rust-1.65.0-x86_64-unknown-linux-gnu.tar.gz cd rust-1.65.0-x86_64-unknown-linux-gnu ./install.sh   Through rustup but set default-toolchain to none.\ncurl --proto \u0026#39;=https\u0026#39; --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --default-toolchain none   Install  Notice: If you compile skywalking_agent in Alpine Linux, you have to disable crt-static, otherwise the problem will be throw: \u0026ldquo;the libclang shared library at /usr/lib/libclang.so.15.0.7 could not be opened: Dynamic loading not supported\u0026rdquo;.\nYou can disable crt-static by environment variable:\nexport RUSTFLAGS=\u0026#34;-C target-feature=-crt-static\u0026#34;  Install from pecl.net pecl install skywalking_agent Install from the source codes git clone --recursive https://github.com/apache/skywalking-php.git cd skywalking-php phpize ./configure make make install Configure Configure skywalking agent in your php.ini.\n[skywalking_agent] extension = skywalking_agent.so ; Enable skywalking_agent extension or not. skywalking_agent.enable = Off ; Log file path. skywalking_agent.log_file = /tmp/skywalking-agent.log ; Log level: one of `OFF`, `TRACE`, `DEBUG`, `INFO`, `WARN`, `ERROR`. skywalking_agent.log_level = INFO ; Address of skywalking oap server. skywalking_agent.server_addr = 127.0.0.1:11800 ; Application service name. skywalking_agent.service_name = hello-skywalking Refer to the Configuration section for more configuration items.\n Notice: It is not recommended to enable skywalking_agent.enable by default globally, because skywalking agent will modify the hook function and fork a new process to be a worker. Enabling it by default will cause extra meaningless consumption when skywalking agent is not needed (such as simply executing a php script).\n Run Start php-fpm server:\nphp-fpm -F -d \u0026#34;skywalking_agent.enable=On\u0026#34;  Notice: It is necessary to keep the php-fpm process running in the foreground (by specifying the \u0026gt; -F parameter, etc.), running php-fpm as a daemon will cause the skywalking-agent reporter process immediately exit.\n ","excerpt":"Setup PHP Agent  Agent is available for PHP 7.2 - 8.x. Build from source. Configure php.ini. …","ref":"/docs/skywalking-php/latest/en/setup/service-agent/php-agent/readme/","title":"Setup PHP Agent"},{"body":"Setup PHP Agent  Agent is available for PHP 7.2 - 8.x. Build from source. Configure php.ini.  Requirements  GCC Rustc 1.65+ Cargo Libclang 9.0+ Make Protoc  Install dependencies For Debian-base OS sudo apt install gcc make llvm-13-dev libclang-13-dev protobuf-c-compiler protobuf-compiler For Alpine Linux apk add gcc make musl-dev llvm15-dev clang15-dev protobuf-c-compiler Install Rust globally The officially recommended way to install Rust is via rustup.\nBut because the source code toolchain is override by rust-toolchain.toml, so if you don\u0026rsquo;t need multi version Rust, we recommend to install Rust by these way:\n  Install through OS package manager (The Rust version in the source must be \u0026gt;= 1.65).\n  Through standalone installers.\nFor linux x86_64 user:\nwget https://static.rust-lang.org/dist/rust-1.65.0-x86_64-unknown-linux-gnu.tar.gz tar zxvf rust-1.65.0-x86_64-unknown-linux-gnu.tar.gz cd rust-1.65.0-x86_64-unknown-linux-gnu ./install.sh   Through rustup but set default-toolchain to none.\ncurl --proto \u0026#39;=https\u0026#39; --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --default-toolchain none   Install  Notice: If you compile skywalking_agent in Alpine Linux, you have to disable crt-static, otherwise the problem will be throw: \u0026ldquo;the libclang shared library at /usr/lib/libclang.so.15.0.7 could not be opened: Dynamic loading not supported\u0026rdquo;.\nYou can disable crt-static by environment variable:\nexport RUSTFLAGS=\u0026#34;-C target-feature=-crt-static\u0026#34;  Install from pecl.net pecl install skywalking_agent Install from the source codes git clone --recursive https://github.com/apache/skywalking-php.git cd skywalking-php phpize ./configure make make install Configure Configure skywalking agent in your php.ini.\n[skywalking_agent] extension = skywalking_agent.so ; Enable skywalking_agent extension or not. skywalking_agent.enable = Off ; Log file path. skywalking_agent.log_file = /tmp/skywalking-agent.log ; Log level: one of `OFF`, `TRACE`, `DEBUG`, `INFO`, `WARN`, `ERROR`. skywalking_agent.log_level = INFO ; Address of skywalking oap server. skywalking_agent.server_addr = 127.0.0.1:11800 ; Application service name. skywalking_agent.service_name = hello-skywalking Refer to the Configuration section for more configuration items.\n Notice: It is not recommended to enable skywalking_agent.enable by default globally, because skywalking agent will modify the hook function and fork a new process to be a worker. Enabling it by default will cause extra meaningless consumption when skywalking agent is not needed (such as simply executing a php script).\n Run Start php-fpm server:\nphp-fpm -F -d \u0026#34;skywalking_agent.enable=On\u0026#34;  Notice: It is necessary to keep the php-fpm process running in the foreground (by specifying the \u0026gt; -F parameter, etc.), running php-fpm as a daemon will cause the skywalking-agent reporter process immediately exit.\n ","excerpt":"Setup PHP Agent  Agent is available for PHP 7.2 - 8.x. Build from source. Configure php.ini. …","ref":"/docs/skywalking-php/next/en/setup/service-agent/php-agent/readme/","title":"Setup PHP Agent"},{"body":"Setup PHP Agent  Agent is available for PHP 7.2 - 8.x. Build from source. Configure php.ini.  Requirements  GCC Rustc 1.65+ Cargo Libclang 9.0+ Make Protoc  Install dependencies For Debian-base OS sudo apt install gcc make llvm-13-dev libclang-13-dev protobuf-c-compiler protobuf-compiler For Alpine Linux apk add gcc make musl-dev llvm15-dev clang15-dev protobuf-c-compiler Install Rust globally The officially recommended way to install Rust is via rustup.\nBut because the source code toolchain is override by rust-toolchain.toml, so if you don\u0026rsquo;t need multi version Rust, we recommend to install Rust by these way:\n  Install through OS package manager (The Rust version in the source must be \u0026gt;= 1.65).\n  Through standalone installers.\nFor linux x86_64 user:\nwget https://static.rust-lang.org/dist/rust-1.65.0-x86_64-unknown-linux-gnu.tar.gz tar zxvf rust-1.65.0-x86_64-unknown-linux-gnu.tar.gz cd rust-1.65.0-x86_64-unknown-linux-gnu ./install.sh   Through rustup but set default-toolchain to none.\ncurl --proto \u0026#39;=https\u0026#39; --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --default-toolchain none   Install  Notice: If you compile skywalking_agent in Alpine Linux, you have to disable crt-static, otherwise the problem will be throw: \u0026ldquo;the libclang shared library at /usr/lib/libclang.so.15.0.7 could not be opened: Dynamic loading not supported\u0026rdquo;.\nYou can disable crt-static by environment variable:\nexport RUSTFLAGS=\u0026#34;-C target-feature=-crt-static\u0026#34;  Install from pecl.net pecl install skywalking_agent Install from the source codes git clone --recursive https://github.com/apache/skywalking-php.git cd skywalking-php phpize ./configure make make install Configure Configure skywalking agent in your php.ini.\n[skywalking_agent] extension = skywalking_agent.so ; Enable skywalking_agent extension or not. skywalking_agent.enable = Off ; Log file path. skywalking_agent.log_file = /tmp/skywalking-agent.log ; Log level: one of `OFF`, `TRACE`, `DEBUG`, `INFO`, `WARN`, `ERROR`. skywalking_agent.log_level = INFO ; Address of skywalking oap server. skywalking_agent.server_addr = 127.0.0.1:11800 ; Application service name. skywalking_agent.service_name = hello-skywalking Refer to the Configuration section for more configuration items.\n Notice: It is not recommended to enable skywalking_agent.enable by default globally, because skywalking agent will modify the hook function and fork a new process to be a worker. Enabling it by default will cause extra meaningless consumption when skywalking agent is not needed (such as simply executing a php script).\n Run Start php-fpm server:\nphp-fpm -F -d \u0026#34;skywalking_agent.enable=On\u0026#34;  Notice: It is necessary to keep the php-fpm process running in the foreground (by specifying the \u0026gt; -F parameter, etc.), running php-fpm as a daemon will cause the skywalking-agent reporter process immediately exit.\n ","excerpt":"Setup PHP Agent  Agent is available for PHP 7.2 - 8.x. Build from source. Configure php.ini. …","ref":"/docs/skywalking-php/v0.7.0/en/setup/service-agent/php-agent/readme/","title":"Setup PHP Agent"},{"body":"Sharing Plugins Sharing plugin configurations has three 3 parts, which are common_config, clients and servers.\nCommon Configuration    Config Default Description     pipe_name sharing The group name of sharing plugins    Clients Clients have a series of client plugins, which would be sharing with the plugins of the other pipes. Please read the doc to find all client plugin configurations.\nServers Servers have a series of server plugins, which would be sharing with the plugins of the other pipes. Please read the doc to find all server plugin configurations.\nExample # The sharing plugins referenced by the specific plugins in the different pipes.sharing:common_config:pipe_name:sharingclients:- plugin_name:\u0026#34;kafka-client\u0026#34;brokers:${SATELLITE_KAFKA_CLIENT_BROKERS:127.0.0.1:9092}version:${SATELLITE_KAFKA_VERSION:\u0026#34;2.1.1\u0026#34;}servers:- plugin_name:\u0026#34;grpc-server\u0026#34;- plugin_name:\u0026#34;prometheus-server\u0026#34;address:${SATELLITE_PROMETHEUS_ADDRESS:\u0026#34;:8090\u0026#34;}","excerpt":"Sharing Plugins Sharing plugin configurations has three 3 parts, which are common_config, clients …","ref":"/docs/skywalking-satellite/latest/en/setup/configuration/sharing-plugins/","title":"Sharing Plugins"},{"body":"Sharing Plugins Sharing plugin configurations has three 3 parts, which are common_config, clients and servers.\nCommon Configuration    Config Default Description     pipe_name sharing The group name of sharing plugins    Clients Clients have a series of client plugins, which would be sharing with the plugins of the other pipes. Please read the doc to find all client plugin configurations.\nServers Servers have a series of server plugins, which would be sharing with the plugins of the other pipes. Please read the doc to find all server plugin configurations.\nExample # The sharing plugins referenced by the specific plugins in the different pipes.sharing:common_config:pipe_name:sharingclients:- plugin_name:\u0026#34;kafka-client\u0026#34;brokers:${SATELLITE_KAFKA_CLIENT_BROKERS:127.0.0.1:9092}version:${SATELLITE_KAFKA_VERSION:\u0026#34;2.1.1\u0026#34;}servers:- plugin_name:\u0026#34;grpc-server\u0026#34;- plugin_name:\u0026#34;prometheus-server\u0026#34;address:${SATELLITE_PROMETHEUS_ADDRESS:\u0026#34;:8090\u0026#34;}","excerpt":"Sharing Plugins Sharing plugin configurations has three 3 parts, which are common_config, clients …","ref":"/docs/skywalking-satellite/next/en/setup/configuration/sharing-plugins/","title":"Sharing Plugins"},{"body":"Sharing Plugins Sharing plugin configurations has three 3 parts, which are common_config, clients and servers.\nCommon Configuration    Config Default Description     pipe_name sharing The group name of sharing plugins    Clients Clients have a series of client plugins, which would be sharing with the plugins of the other pipes. Please read the doc to find all client plugin configurations.\nServers Servers have a series of server plugins, which would be sharing with the plugins of the other pipes. Please read the doc to find all server plugin configurations.\nExample # The sharing plugins referenced by the specific plugins in the different pipes.sharing:common_config:pipe_name:sharingclients:- plugin_name:\u0026#34;kafka-client\u0026#34;brokers:${SATELLITE_KAFKA_CLIENT_BROKERS:127.0.0.1:9092}version:${SATELLITE_KAFKA_VERSION:\u0026#34;2.1.1\u0026#34;}servers:- plugin_name:\u0026#34;grpc-server\u0026#34;- plugin_name:\u0026#34;prometheus-server\u0026#34;address:${SATELLITE_PROMETHEUS_ADDRESS:\u0026#34;:8090\u0026#34;}","excerpt":"Sharing Plugins Sharing plugin configurations has three 3 parts, which are common_config, clients …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/configuration/sharing-plugins/","title":"Sharing Plugins"},{"body":"SkyWalking 9.x showcase This showcase would follow the latest changes of SkyWalking 9.x, even before the official release.\nThis showcase repository includes an example music application and other manifests to demonstrate the main features of SkyWalking. The music application is composed of several microservices that are written in different programming languages. Here is the architecture:\n%% please read this doc in our official website, otherwise the graph is not correctly rendered. graph LR; loadgen[load generator] --\u0026gt; ui(\u0026quot;UI (React)\u0026quot;) --\u0026gt; Traffic1(\u0026quot;HTTP Request for backend serv\u0026quot;) --\u0026gt; apisix(\u0026quot;APISIX as UI container\u0026quot;) --\u0026gt; app(\u0026quot;app server (NodeJS)\u0026quot;) --\u0026gt; gateway(\u0026quot;gateway (Spring)\u0026quot;); ui(\u0026quot;UI (React)\u0026quot;) --\u0026gt; Traffic2(\u0026quot;HTTP Request for UI codes\u0026quot;) --\u0026gt; apisix(\u0026quot;APISIX with UI container\u0026quot;) gateway --\u0026gt; songs(\u0026quot;songs (Spring)\u0026quot;) \u0026amp; rcmd(\u0026quot;recommendations (Python)\u0026quot;); rcmd --\u0026gt; rating(\u0026quot;rating (Go)\u0026quot;); songs --\u0026gt; activeMQ activeMQ --\u0026gt; songs rcmd --\u0026gt; songs; songs --\u0026gt; db(\u0026quot;database (H2)\u0026quot;); Usage Please run the showcase in a brand new test cluster, otherwise the undeploy process may delete some resources that you have installed before running this showcase (for example cert-manager). If you don\u0026rsquo;t do this in a new test cluster, it\u0026rsquo;s all on your own risks!\nThe showcase uses GNU Make and Docker containers to run commands, so please make sure you have make installed and Docker daemon running.\nPrerequisites To deploy the full features of this showcase application, you may need up to 8 CPU cores and 32 GB memory, please increase the Docker daemon resources or Kubernetes cluster resources if you find containers / Pods failed to start up. Alternatively, you can also only deploy part of the features that interest you if you don\u0026rsquo;t want to increase the resources, via the guide in Customization.\nQuick Start Make sure you have a running Kubernetes cluster and kubectl can access to that cluster.\ngit clone https://github.com/apache/skywalking-showcase.git cd skywalking-showcase make deploy.kubernetes This will install SkyWalking components, including OAP in cluster mode with 2 nodes, SkyWalking UI, microservices with SkyWalking agent, microservices without SkyWalking agent but managed by Istio, 2 Pods to mimic virtual machines and export metrics to SkyWalking, and enable kubernetes cluster monitoring as well as SkyWalking self observability.\nFor more advanced deployments, check Customization documentation below.\nNotice, when run this showcase locally such as KinD, the images are downloaded inside the KinD, which could take over 10 mins(depend on local network). Rerun make deploy.kubernetes if some timeout errors break the process.\nCustomization The variables defined in Makefile.in can be overridden to customize the showcase, by specifying an environment variable with the same name, e.g.:\nexport ES_VERSION=7.14.0 make \u0026lt;target\u0026gt; or directly specifying in the make command, e.g.: make \u0026lt;target\u0026gt; ES_VERSION=7.14.0.\nRun make help to get more information.\nFeatures The showcase is composed of a set of scenarios with feature flags, you can deploy some of them that interest you by overriding the FEATURE_FLAGS variable defined in Makefile.in, as documented in Customization, e.g.:\nmake deploy.kubernetes FEATURE_FLAGS=single-node,agent Feature flags for different platforms (Kubernetes and Docker Compose) are not necessarily the same so make sure to specify the right feature flags.\nCurrently, the features supported are:\n   Name Description Note     java-agent-injector Use the java agent injector to inject the Skywalking Java agent and deploy microservices with other SkyWalking agent enabled. The microservices include agents for Java, NodeJS server, browser, Python.   agent Deploy microservices with SkyWalking agent pre-installed. In Kubernetes scenarios, please use java-agent-injector instead of this, if possible.   cluster Deploy SkyWalking OAP in cluster mode, with 2 nodes, and SkyWalking UI. Only one of cluster or single-node can be enabled.   single-node Deploy only one single node of SkyWalking OAP, and SkyWalking UI, ElasticSearch as storage. Only one of cluster or single-node can be enabled.   elasticsearch Deploy ElasticSearch as storage, you may want to disable this if you want to use your own ElasticSearch deployments.    postgresql Deploy PostgreSQL as storage, you may want to disable this if you want to use your own PostgreSQL deployments.    so11y Enable SkyWalking self observability. This is enabled by default for platform Docker Compose.   vm-monitor Start 2 virtual machines and export their metrics to SkyWalking. The \u0026ldquo;virtual machines\u0026rdquo; are mimicked by Docker containers or Pods.   als Start microservices WITHOUT SkyWalking agent enabled, and configure SkyWalking to analyze the topology and metrics from their access logs. Command istioctl is required to run this feature. The agentless microservices will be running at namespace ${NAMESPACE}-agentless   kubernetes-monitor Deploy OpenTelemetry and export Kubernetes monitoring metrics to SkyWalking for analysis and display on UI.    istiod-monitor Deploy OpenTelemetry and export Istio control plane metrics to SkyWalking for analysis and display on UI.    event Deploy tools to trigger events, and SkyWalking Kubernetes event exporter to export events into SkyWalking.    satellite Deploy SkyWalking Satellite to load balance the monitoring data.    trace-profiling Deploy tools to submit trace profiling tasks. Only support deployment with SkyWalking agents installed, currently Java agent and Python agent support trace profiling.   rover Deploy SkyWalking Rover and detect the processes in the Kubernetes environment. Only support deployment in the Kubernetes environment, docker is not supported.   mysql-monitor Start a MySQL server and load generator to execute the sample SQLs periodically, set up fluent bit to fetch slow logs and export to OAP, and export their metrics to SkyWalking.    postgresql-monitor Start a PostgreSQL server, and load generator to execute the sample SQLs periodically, set up fluent bit to fetch slow logs and export to OAP, and export their metrics to SkyWalking.    elasticsearch-monitor Deploy OpenTelemetry and export Elasticsearch monitoring metrics to SkyWalking for analysis and display on UI.    mongodb-monitor Deploy OpenTelemetry and export MongoDB monitoring metrics to SkyWalking for analysis and display on UI.    nginx-monitor Deploy OpenTelemetry and export Nginx metrics and logs to SkyWalking for analysis and display on UI    apisix-monitor Deploy OpenTelemetry and export APISIX metrics to SkyWalking for analysis and display on UI    mesh-with-agent Deploy services with java agent in the service mesh environment. Only support deployment in the Kubernetes environment, docker is not supported.   grafana Deploy a Grafana to show SkyWalking metrics and logs on the Grafana UI. Feel free to modify the Grafana config when deploy your own environment.   r3 Deploy R3 as RESTful URL recognition service.    rocketmq-monitor Deploy OpenTelemetry and export RocketMQ monitoring metrics to SkyWalking for analysis and display on UI.    pulsar-monitor Deploy OpenTelemetry and export Pulsar monitoring metrics to SkyWalking for analysis and display on UI.    rabbitmq-monitor Deploy OpenTelemetry and export RabbitMQ monitoring metrics to SkyWalking for analysis and display on UI.     Kubernetes To deploy the example application in Kubernetes, please make sure that you have kubectl command available, and it can connect to the Kubernetes cluster successfully.\nIf you don\u0026rsquo;t have a running cluster, you can also leverage KinD (Kubernetes in Docker) or minikube to create a cluster.\nRun kubectl get nodes to check the connectivity before going to next step. The typical error message that indicates your kubectl cannot connect to a cluster is:\nThe connection to the server localhost:8080 was refused - did you specify the right host or port? Deploy # Deploy make deploy.kubernetes # Undeploy make undeploy.kubernetes # Redeploy make redeploy.kubernetes # equivalent to make undeploy.kubernetes deploy.kubernetes Docker Compose Deploy # Deploy make deploy.docker # Undeploy make undeploy.docker # Redeploy make redeploy.docker # equivalent to make undeploy.docker deploy.docker Traffic Flow After deploy the showcase, the business system would send monitoring traffic to the OAP node, and one agent/sidecar connect to one OAP node directly.\nSatellite If the business traffic is unbalanced, it would cause the OAP node receive unbalanced monitoring data. So, you could add the Satellite component. After deploy the showcase with the satellite component, the monitoring traffic would send to the Satellite service, and satellite load balances the traffic to the OAP nodes.\n%% please read this doc in our official website, otherwise the graph is not correctly rendered. graph LR; agent[\u0026quot;business app(agent)\u0026quot;] --\u0026gt; satellite(\u0026quot;satellite\u0026quot;) --\u0026gt; oap(\u0026quot;oap\u0026quot;); envoy[\u0026quot;sidecar(envoy)\u0026quot;] --\u0026gt; satellite; Troubleshooting If you encounter any problems, please add DEBUG=true to the command line to get the output of the resources that will be applied.\nmake deploy.kubernetes DEBUG=true # this will print the resources that will be applied to Kubernetes make deploy.docker DEBUG=true # this will print the merged docker-compose.yaml content that will be used to run in Docker Compose ","excerpt":"SkyWalking 9.x showcase This showcase would follow the latest changes of SkyWalking 9.x, even before …","ref":"/docs/skywalking-showcase/next/readme/","title":"SkyWalking 9.x showcase"},{"body":"Skywalking Agent List  aerospike activemq-5.x armeria-063-084 armeria-085 armeria-086 armeria-098 armeria-100 async-http-client-2.x avro-1.x brpc-java brpc-java-3.x canal-1.x cassandra-java-driver-3.x dbcp-2.x druid-1.x dubbo dubbo-2.7.x dubbo-3.x dubbo-threadpool dubbo-threadpool-2.7.x ehcache-2.x elastic-job-2.x elasticjob-3.x elasticsearch-5.x elasticsearch-6.x elasticsearch-7.x fastjson-1.2.x feign-default-http-9.x feign-pathvar-9.x finagle graphql-8.x graphql-9.x graphql-12.x-15.x graphql-16plus grpc-1.x gson-2.8.x guava-cache h2-1.x hbase-1.x/2.x hikaricp-3.x/4.x httpasyncclient-4.x httpclient-3.x httpclient-4.x httpclient-5.x hystrix-1.x influxdb-2.x jackson-2.x jdk-http-plugin jdk-threading-plugin jedis-2.x-3.x jedis-4.x jetty-client-9.0 jetty-client-9.x jetty-server-9.x kafka-0.11.x/1.x/2.x kotlin-coroutine lettuce-5.x light4j mariadb-2.x micrometer-1.10.x memcache-2.x mongodb-2.x mongodb-3.x mongodb-4.x motan-0.x mybatis-3.x mysql-5.x mysql-6.x mysql-8.x nacos-client-2.x netty-socketio netty-http-4.1.x nutz-http-1.x nutz-mvc-annotation-1.x okhttp-3.x okhttp-4.x play-2.x postgresql-8.x pulsar-2.2-2.7 quasar quartz-scheduler-2.x rabbitmq redisson-3.x resteasy-server-3.x resteasy-server-4.x resteasy-server-6.x rocketMQ-3.x rocketMQ-4.x rocketMQ-5.x rocketMQ-client-java-5.x sentinel-1.x servicecomb-2.x sharding-sphere-3.x sharding-sphere-4.0.0 sharding-sphere-4.1.0 sharding-sphere-5.0.0 sofarpc solrj-7.x spring-annotation spring-async-annotation-5.x spring-cloud-feign-1.x spring-cloud-feign-2.x spring-cloud-gateway-2.0.x spring-cloud-gateway-2.1.x spring-concurrent-util-4.x spring-core-patch spring-kafka-1.x spring-kafka-2.x spring-mvc-annotation spring-mvc-annotation-3.x spring-mvc-annotation-4.x spring-mvc-annotation-5.x spring-mvc-annotation-6.x spring-resttemplate-3.x spring-resttemplate-4.x spring-resttemplate-6.x spring-scheduled-annotation spring-tx spring-webflux-5.x spring-webflux-5.x-webclient spymemcached-2.x struts2-2.x thrift tomcat-7.x/8.x tomcat-10.x toolkit-counter toolkit-gauge toolkit-histogram toolkit-kafka toolkit-log4j toolkit-log4j2 toolkit-logback toolkit-opentracing toolkit-tag toolkit-trace toolkit-exception toolkit-tracer toolkit-webflux undertow-2.x-plugin vertx-core-3.x vertx-core-4.x xxl-job-2.x zookeeper-3.4.x mssql-jtds-1.x mssql-jdbc apache-cxf-3.x jsonrpc4j spring-cloud-gateway-3.x neo4j-4.x clickhouse-0.3.1 clickhouse-0.3.2.x kylin-jdbc-2.6.x-3.x-4.x okhttp-2.x pulsar-2.8.x undertow-worker-thread-pool tomcat-thread-pool guava-eventbus shenyu-2.4.x jdk-threadpool-plugin hutool-http-5.x micronaut-http-client-3.2.x-3.6.x micronaut-http-server-3.2.x-3.6.x nats-client-2.14.x-2.15.x impala-jdbc-2.6.x jdk-forkjoinpool-plugin jetty-thread-pool jersey-2.x jersey-3.x grizzly-2.3.x-4.x grizzly-2.3.x-4.x-threadpool jetty-server-11.x jetty-client-11.x websphere-liberty-23.x spring-cloud-gateway-4.x spring-webflux-6.x spring-webflux-6.x-webclient activemq-artemis-jakarta-client-2.x  ","excerpt":"Skywalking Agent List  aerospike activemq-5.x armeria-063-084 armeria-085 armeria-086 armeria-098 …","ref":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/plugin-list/","title":"Skywalking Agent List"},{"body":"Skywalking Agent List  aerospike activemq-5.x armeria-063-084 armeria-085 armeria-086 armeria-098 armeria-100 async-http-client-2.x avro-1.x brpc-java brpc-java-3.x canal-1.x cassandra-java-driver-3.x dbcp-2.x druid-1.x dubbo dubbo-2.7.x dubbo-3.x dubbo-threadpool dubbo-threadpool-2.7.x ehcache-2.x elastic-job-2.x elasticjob-3.x elasticsearch-5.x elasticsearch-6.x elasticsearch-7.x fastjson-1.2.x feign-default-http-9.x feign-pathvar-9.x finagle graphql-8.x graphql-9.x graphql-12.x-15.x graphql-16plus grpc-1.x gson-2.8.x guava-cache h2-1.x hbase-1.x/2.x hikaricp-3.x/4.x httpasyncclient-4.x httpclient-3.x httpclient-4.x httpclient-5.x hystrix-1.x influxdb-2.x jackson-2.x jdk-http-plugin jdk-threading-plugin jedis-2.x-3.x jedis-4.x jetty-client-9.0 jetty-client-9.x jetty-server-9.x kafka-0.11.x/1.x/2.x kotlin-coroutine lettuce-5.x light4j mariadb-2.x micrometer-1.10.x memcache-2.x mongodb-2.x mongodb-3.x mongodb-4.x motan-0.x mybatis-3.x mysql-5.x mysql-6.x mysql-8.x nacos-client-2.x netty-socketio netty-http-4.1.x nutz-http-1.x nutz-mvc-annotation-1.x okhttp-3.x okhttp-4.x play-2.x postgresql-8.x pulsar-2.2-2.7 quasar quartz-scheduler-2.x rabbitmq redisson-3.x resteasy-server-3.x resteasy-server-4.x resteasy-server-6.x rocketMQ-3.x rocketMQ-4.x rocketMQ-5.x rocketMQ-client-java-5.x sentinel-1.x servicecomb-2.x sharding-sphere-3.x sharding-sphere-4.0.0 sharding-sphere-4.1.0 sharding-sphere-5.0.0 sofarpc solrj-7.x spring-annotation spring-async-annotation-5.x spring-cloud-feign-1.x spring-cloud-feign-2.x spring-cloud-gateway-2.0.x spring-cloud-gateway-2.1.x spring-concurrent-util-4.x spring-core-patch spring-kafka-1.x spring-kafka-2.x spring-mvc-annotation spring-mvc-annotation-3.x spring-mvc-annotation-4.x spring-mvc-annotation-5.x spring-mvc-annotation-6.x spring-resttemplate-3.x spring-resttemplate-4.x spring-resttemplate-6.x spring-scheduled-annotation spring-tx spring-webflux-5.x spring-webflux-5.x-webclient spymemcached-2.x struts2-2.x thrift tomcat-7.x/8.x tomcat-10.x toolkit-counter toolkit-gauge toolkit-histogram toolkit-kafka toolkit-log4j toolkit-log4j2 toolkit-logback toolkit-opentracing toolkit-tag toolkit-trace toolkit-exception toolkit-tracer toolkit-webflux undertow-2.x-plugin vertx-core-3.x vertx-core-4.x xxl-job-2.x zookeeper-3.4.x mssql-jtds-1.x mssql-jdbc apache-cxf-3.x jsonrpc4j spring-cloud-gateway-3.x neo4j-4.x clickhouse-0.3.1 clickhouse-0.3.2.x kylin-jdbc-2.6.x-3.x-4.x okhttp-2.x pulsar-2.8.x undertow-worker-thread-pool tomcat-thread-pool guava-eventbus shenyu-2.4.x jdk-threadpool-plugin hutool-http-5.x micronaut-http-client-3.2.x-3.6.x micronaut-http-server-3.2.x-3.6.x nats-client-2.14.x-2.15.x impala-jdbc-2.6.x jdk-forkjoinpool-plugin jetty-thread-pool jersey-2.x jersey-3.x grizzly-2.3.x-4.x grizzly-2.3.x-4.x-threadpool jetty-server-11.x jetty-client-11.x websphere-liberty-23.x spring-cloud-gateway-4.x spring-webflux-6.x spring-webflux-6.x-webclient activemq-artemis-jakarta-client-2.x c3p0-0.9.x  ","excerpt":"Skywalking Agent List  aerospike activemq-5.x armeria-063-084 armeria-085 armeria-086 armeria-098 …","ref":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/plugin-list/","title":"Skywalking Agent List"},{"body":"Skywalking Agent List  aerospike activemq-5.x armeria-063-084 armeria-085 armeria-086 armeria-098 armeria-100 async-http-client-2.x avro-1.x brpc-java brpc-java-3.x canal-1.x cassandra-java-driver-3.x dbcp-2.x druid-1.x dubbo dubbo-2.7.x dubbo-3.x dubbo-threadpool dubbo-threadpool-2.7.x ehcache-2.x elastic-job-2.x elasticjob-3.x elasticsearch-5.x elasticsearch-6.x elasticsearch-7.x fastjson-1.2.x feign-default-http-9.x feign-pathvar-9.x finagle graphql-8.x graphql-9.x graphql-12.x-15.x graphql-16plus grpc-1.x gson-2.8.x guava-cache h2-1.x hbase-1.x/2.x hikaricp-3.x/4.x httpasyncclient-4.x httpclient-3.x httpclient-4.x httpclient-5.x hystrix-1.x influxdb-2.x jackson-2.x jdk-http-plugin jdk-threading-plugin jedis-2.x-3.x jedis-4.x jetty-client-9.0 jetty-client-9.x jetty-server-9.x kafka-0.11.x/1.x/2.x kotlin-coroutine lettuce-5.x light4j mariadb-2.x micrometer-1.10.x memcache-2.x mongodb-2.x mongodb-3.x mongodb-4.x motan-0.x mybatis-3.x mysql-5.x mysql-6.x mysql-8.x nacos-client-2.x netty-socketio nutz-http-1.x nutz-mvc-annotation-1.x okhttp-3.x okhttp-4.x play-2.x postgresql-8.x pulsar-2.2-2.7 quasar quartz-scheduler-2.x rabbitmq redisson-3.x resteasy-server-3.x resteasy-server-4.x resteasy-server-6.x rocketMQ-3.x rocketMQ-4.x rocketMQ-5.x rocketMQ-client-java-5.x sentinel-1.x servicecomb-2.x sharding-sphere-3.x sharding-sphere-4.0.0 sharding-sphere-4.1.0 sharding-sphere-5.0.0 sofarpc solrj-7.x spring-annotation spring-async-annotation-5.x spring-cloud-feign-1.x spring-cloud-feign-2.x spring-cloud-gateway-2.0.x spring-cloud-gateway-2.1.x spring-concurrent-util-4.x spring-core-patch spring-kafka-1.x spring-kafka-2.x spring-mvc-annotation spring-mvc-annotation-3.x spring-mvc-annotation-4.x spring-mvc-annotation-5.x spring-mvc-annotation-6.x spring-resttemplate-3.x spring-resttemplate-4.x spring-resttemplate-6.x spring-scheduled-annotation spring-tx spring-webflux-5.x spring-webflux-5.x-webclient spymemcached-2.x struts2-2.x thrift tomcat-7.x/8.x tomcat-10.x toolkit-counter toolkit-gauge toolkit-histogram toolkit-kafka toolkit-log4j toolkit-log4j2 toolkit-logback toolkit-opentracing toolkit-tag toolkit-trace toolkit-exception toolkit-tracer toolkit-webflux undertow-2.x-plugin vertx-core-3.x vertx-core-4.x xxl-job-2.x zookeeper-3.4.x mssql-jtds-1.x mssql-jdbc apache-cxf-3.x jsonrpc4j spring-cloud-gateway-3.x neo4j-4.x clickhouse-0.3.1 clickhouse-0.3.2.x kylin-jdbc-2.6.x-3.x-4.x okhttp-2.x pulsar-2.8.x undertow-worker-thread-pool tomcat-thread-pool guava-eventbus shenyu-2.4.x jdk-threadpool-plugin hutool-http-5.x micronaut-http-client-3.2.x-3.6.x micronaut-http-server-3.2.x-3.6.x nats-client-2.14.x-2.15.x impala-jdbc-2.6.x jdk-forkjoinpool-plugin jetty-thread-pool jersey-2.x jersey-3.x grizzly-2.3.x-4.x grizzly-2.3.x-4.x-threadpool jetty-server-11.x jetty-client-11.x websphere-liberty-23.x  ","excerpt":"Skywalking Agent List  aerospike activemq-5.x armeria-063-084 armeria-085 armeria-086 armeria-098 …","ref":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/plugin-list/","title":"Skywalking Agent List"},{"body":"Skywalking Agent List  aerospike activemq-5.x armeria-063-084 armeria-085 armeria-086 armeria-098 armeria-100 async-http-client-2.x avro-1.x brpc-java brpc-java-3.x canal-1.x cassandra-java-driver-3.x dbcp-2.x druid-1.x dubbo dubbo-2.7.x dubbo-3.x dubbo-threadpool dubbo-threadpool-2.7.x ehcache-2.x elastic-job-2.x elasticjob-3.x elasticsearch-5.x elasticsearch-6.x elasticsearch-7.x fastjson-1.2.x feign-default-http-9.x feign-pathvar-9.x finagle graphql-8.x graphql-9.x graphql-12.x-15.x graphql-16plus grpc-1.x gson-2.8.x guava-cache h2-1.x hbase-1.x/2.x hikaricp-3.x/4.x httpasyncclient-4.x httpclient-3.x httpclient-4.x httpclient-5.x hystrix-1.x influxdb-2.x jackson-2.x jdk-http-plugin jdk-threading-plugin jedis-2.x-3.x jedis-4.x jetty-client-9.0 jetty-client-9.x jetty-server-9.x kafka-0.11.x/1.x/2.x kotlin-coroutine lettuce-5.x light4j mariadb-2.x micrometer-1.10.x memcache-2.x mongodb-2.x mongodb-3.x mongodb-4.x motan-0.x mybatis-3.x mysql-5.x mysql-6.x mysql-8.x nacos-client-2.x netty-socketio netty-http-4.1.x nutz-http-1.x nutz-mvc-annotation-1.x okhttp-3.x okhttp-4.x play-2.x postgresql-8.x pulsar-2.2-2.7 quasar quartz-scheduler-2.x rabbitmq redisson-3.x resteasy-server-3.x resteasy-server-4.x resteasy-server-6.x rocketMQ-3.x rocketMQ-4.x rocketMQ-5.x rocketMQ-client-java-5.x sentinel-1.x servicecomb-2.x sharding-sphere-3.x sharding-sphere-4.0.0 sharding-sphere-4.1.0 sharding-sphere-5.0.0 sofarpc solrj-7.x spring-annotation spring-async-annotation-5.x spring-cloud-feign-1.x spring-cloud-feign-2.x spring-cloud-gateway-2.0.x spring-cloud-gateway-2.1.x spring-concurrent-util-4.x spring-core-patch spring-kafka-1.x spring-kafka-2.x spring-mvc-annotation spring-mvc-annotation-3.x spring-mvc-annotation-4.x spring-mvc-annotation-5.x spring-mvc-annotation-6.x spring-resttemplate-3.x spring-resttemplate-4.x spring-resttemplate-6.x spring-scheduled-annotation spring-tx spring-webflux-5.x spring-webflux-5.x-webclient spymemcached-2.x struts2-2.x thrift tomcat-7.x/8.x tomcat-10.x toolkit-counter toolkit-gauge toolkit-histogram toolkit-kafka toolkit-log4j toolkit-log4j2 toolkit-logback toolkit-opentracing toolkit-tag toolkit-trace toolkit-exception toolkit-tracer toolkit-webflux undertow-2.x-plugin vertx-core-3.x vertx-core-4.x xxl-job-2.x zookeeper-3.4.x mssql-jtds-1.x mssql-jdbc apache-cxf-3.x jsonrpc4j spring-cloud-gateway-3.x neo4j-4.x clickhouse-0.3.1 clickhouse-0.3.2.x kylin-jdbc-2.6.x-3.x-4.x okhttp-2.x pulsar-2.8.x undertow-worker-thread-pool tomcat-thread-pool guava-eventbus shenyu-2.4.x jdk-threadpool-plugin hutool-http-5.x micronaut-http-client-3.2.x-3.6.x micronaut-http-server-3.2.x-3.6.x nats-client-2.14.x-2.15.x impala-jdbc-2.6.x jdk-forkjoinpool-plugin jetty-thread-pool jersey-2.x jersey-3.x grizzly-2.3.x-4.x grizzly-2.3.x-4.x-threadpool jetty-server-11.x jetty-client-11.x websphere-liberty-23.x  ","excerpt":"Skywalking Agent List  aerospike activemq-5.x armeria-063-084 armeria-085 armeria-086 armeria-098 …","ref":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/plugin-list/","title":"Skywalking Agent List"},{"body":"Skywalking Agent List  aerospike activemq-5.x armeria-063-084 armeria-085 armeria-086 armeria-098 armeria-100 async-http-client-2.x avro-1.x brpc-java brpc-java-3.x canal-1.x cassandra-java-driver-3.x dbcp-2.x druid-1.x dubbo dubbo-2.7.x dubbo-3.x dubbo-threadpool dubbo-threadpool-2.7.x ehcache-2.x elastic-job-2.x elasticjob-3.x elasticsearch-5.x elasticsearch-6.x elasticsearch-7.x fastjson-1.2.x feign-default-http-9.x feign-pathvar-9.x finagle graphql-8.x graphql-9.x graphql-12.x-15.x graphql-16plus grpc-1.x gson-2.8.x guava-cache h2-1.x hbase-1.x/2.x hikaricp-3.x/4.x httpasyncclient-4.x httpclient-3.x httpclient-4.x httpclient-5.x hystrix-1.x influxdb-2.x jackson-2.x jdk-http-plugin jdk-threading-plugin jedis-2.x-3.x jedis-4.x jetty-client-9.0 jetty-client-9.x jetty-server-9.x kafka-0.11.x/1.x/2.x kotlin-coroutine lettuce-5.x light4j mariadb-2.x micrometer-1.10.x memcache-2.x mongodb-2.x mongodb-3.x mongodb-4.x motan-0.x mybatis-3.x mysql-5.x mysql-6.x mysql-8.x nacos-client-2.x netty-socketio netty-http-4.1.x nutz-http-1.x nutz-mvc-annotation-1.x okhttp-3.x okhttp-4.x play-2.x postgresql-8.x pulsar-2.2-2.7 quasar quartz-scheduler-2.x rabbitmq redisson-3.x resteasy-server-3.x resteasy-server-4.x resteasy-server-6.x rocketMQ-3.x rocketMQ-4.x rocketMQ-5.x rocketMQ-client-java-5.x sentinel-1.x servicecomb-2.x sharding-sphere-3.x sharding-sphere-4.0.0 sharding-sphere-4.1.0 sharding-sphere-5.0.0 sofarpc solrj-7.x spring-annotation spring-async-annotation-5.x spring-cloud-feign-1.x spring-cloud-feign-2.x spring-cloud-gateway-2.0.x spring-cloud-gateway-2.1.x spring-concurrent-util-4.x spring-core-patch spring-kafka-1.x spring-kafka-2.x spring-mvc-annotation spring-mvc-annotation-3.x spring-mvc-annotation-4.x spring-mvc-annotation-5.x spring-mvc-annotation-6.x spring-resttemplate-3.x spring-resttemplate-4.x spring-resttemplate-6.x spring-scheduled-annotation spring-tx spring-webflux-5.x spring-webflux-5.x-webclient spymemcached-2.x struts2-2.x thrift tomcat-7.x/8.x tomcat-10.x toolkit-counter toolkit-gauge toolkit-histogram toolkit-kafka toolkit-log4j toolkit-log4j2 toolkit-logback toolkit-opentracing toolkit-tag toolkit-trace toolkit-exception toolkit-tracer toolkit-webflux undertow-2.x-plugin vertx-core-3.x vertx-core-4.x xxl-job-2.x zookeeper-3.4.x mssql-jtds-1.x mssql-jdbc apache-cxf-3.x jsonrpc4j spring-cloud-gateway-3.x neo4j-4.x clickhouse-0.3.1 clickhouse-0.3.2.x kylin-jdbc-2.6.x-3.x-4.x okhttp-2.x pulsar-2.8.x undertow-worker-thread-pool tomcat-thread-pool guava-eventbus shenyu-2.4.x jdk-threadpool-plugin hutool-http-5.x micronaut-http-client-3.2.x-3.6.x micronaut-http-server-3.2.x-3.6.x nats-client-2.14.x-2.15.x impala-jdbc-2.6.x jdk-forkjoinpool-plugin jetty-thread-pool jersey-2.x jersey-3.x grizzly-2.3.x-4.x grizzly-2.3.x-4.x-threadpool jetty-server-11.x jetty-client-11.x websphere-liberty-23.x spring-cloud-gateway-4.x spring-webflux-6.x spring-webflux-6.x-webclient activemq-artemis-jakarta-client-2.x  ","excerpt":"Skywalking Agent List  aerospike activemq-5.x armeria-063-084 armeria-085 armeria-086 armeria-098 …","ref":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/plugin-list/","title":"Skywalking Agent List"},{"body":"Apache SkyWalking Cloud on Kubernetes A bridge project between Apache SkyWalking and Kubernetes.\nSWCK is a platform for the SkyWalking user, provisions, upgrades, maintains SkyWalking relevant components, and makes them work natively on Kubernetes.\nFeatures  Java Agent Injector: Inject the java agent into the application pod natively. Operator: Provision and maintain SkyWalking backend components. Custom Metrics Adapter: Provides custom metrics come from SkyWalking OAP cluster for autoscaling by Kubernetes HPA  Build images Issue below instrument to get the docker image:\nmake or\nmake build To onboard operator or adapter, you should push the image to a registry where the kubernetes cluster can pull it.\nOnboard Java Agent Injector and Operator The java agent injector and operator share a same binary. To onboard them, you should follow:\n To install the java agent injector and operator in an existing cluster, make sure you have cert-manager installed. Apply the manifests for the Controller and CRDs in config:  kubectl apply -f config/operator-bundle.yaml Onboard Custom Metrics Adapter  Deploy OAP server by referring to Operator Quick Start. Apply the manifests for an adapter in config:  kubectl apply -f config/adapter-bundle.yaml License Apache 2.0 License.\n","excerpt":"Apache SkyWalking Cloud on Kubernetes A bridge project between Apache SkyWalking and Kubernetes. …","ref":"/docs/skywalking-swck/latest/binary-readme/","title":"SkyWalking Cloud on Kubernetes"},{"body":"Apache SkyWalking Cloud on Kubernetes A bridge project between Apache SkyWalking and Kubernetes.\nSWCK is a platform for the SkyWalking user, provisions, upgrades, maintains SkyWalking relevant components, and makes them work natively on Kubernetes.\nFeatures  Java Agent Injector: Inject the java agent into the application pod natively. Operator: Provision and maintain SkyWalking backend components. Custom Metrics Adapter: Provides custom metrics come from SkyWalking OAP cluster for autoscaling by Kubernetes HPA  Build images Issue below instrument to get the docker image:\nmake or\nmake build To onboard operator or adapter, you should push the image to a registry where the kubernetes cluster can pull it.\nOnboard Java Agent Injector and Operator The java agent injector and operator share a same binary. To onboard them, you should follow:\n To install the java agent injector and operator in an existing cluster, make sure you have cert-manager installed. Apply the manifests for the Controller and CRDs in config:  kubectl apply -f config/operator-bundle.yaml Onboard Custom Metrics Adapter  Deploy OAP server by referring to Operator Quick Start. Apply the manifests for an adapter in config:  kubectl apply -f config/adapter-bundle.yaml License Apache 2.0 License.\n","excerpt":"Apache SkyWalking Cloud on Kubernetes A bridge project between Apache SkyWalking and Kubernetes. …","ref":"/docs/skywalking-swck/next/binary-readme/","title":"SkyWalking Cloud on Kubernetes"},{"body":"Apache SkyWalking Cloud on Kubernetes A bridge project between Apache SkyWalking and Kubernetes.\nSWCK is a platform for the SkyWalking user, provisions, upgrades, maintains SkyWalking relevant components, and makes them work natively on Kubernetes.\nFeatures  Java Agent Injector: Inject the java agent into the application pod natively. Operator: Provision and maintain SkyWalking backend components. Custom Metrics Adapter: Provides custom metrics come from SkyWalking OAP cluster for autoscaling by Kubernetes HPA  Build images Issue below instrument to get the docker image:\nmake or\nmake build To onboard operator or adapter, you should push the image to a registry where the kubernetes cluster can pull it.\nOnboard Java Agent Injector and Operator The java agent injector and operator share a same binary. To onboard them, you should follow:\n To install the java agent injector and operator in an existing cluster, make sure you have cert-manager installed. Apply the manifests for the Controller and CRDs in config:  kubectl apply -f config/operator-bundle.yaml Onboard Custom Metrics Adapter  Deploy OAP server by referring to Operator Quick Start. Apply the manifests for an adapter in config:  kubectl apply -f config/adapter-bundle.yaml License Apache 2.0 License.\n","excerpt":"Apache SkyWalking Cloud on Kubernetes A bridge project between Apache SkyWalking and Kubernetes. …","ref":"/docs/skywalking-swck/v0.9.0/binary-readme/","title":"SkyWalking Cloud on Kubernetes"},{"body":"SkyWalking Cross Process Correlation Headers Protocol  Version 1.0  SkyWalking Cross Process Correlation Headers Protocol is a new in-wire context propagation protocol which is additional and optional. Please read SkyWalking language agents documentation to see whether it is supported.\nThis is an optional and additional protocol for language tracer implementation. All tracer implementation could consider implementing this. Cross Process Correlation Header key is sw8-correlation. The value is the encoded(key):encoded(value) list with elements splitted by , such as base64(string key):base64(string value),base64(string key2):base64(string value2).\nRecommendations for language APIs The following implementation method is recommended for different language APIs.\n TraceContext#putCorrelation and TraceContext#getCorrelation are recommended to write and read the correlation context, with key/value string. The key should be added if it is absent. The latter writes should override the previous value. The total number of all keys should be less than 3, and the length of each value should be less than 128 bytes. The context should be propagated as well when tracing context is propagated across threads and processes.  ","excerpt":"SkyWalking Cross Process Correlation Headers Protocol  Version 1.0  SkyWalking Cross Process …","ref":"/docs/main/latest/en/api/x-process-correlation-headers-v1/","title":"SkyWalking Cross Process Correlation Headers Protocol"},{"body":"SkyWalking Cross Process Correlation Headers Protocol  Version 1.0  SkyWalking Cross Process Correlation Headers Protocol is a new in-wire context propagation protocol which is additional and optional. Please read SkyWalking language agents documentation to see whether it is supported.\nThis is an optional and additional protocol for language tracer implementation. All tracer implementation could consider implementing this. Cross Process Correlation Header key is sw8-correlation. The value is the encoded(key):encoded(value) list with elements splitted by , such as base64(string key):base64(string value),base64(string key2):base64(string value2).\nRecommendations for language APIs The following implementation method is recommended for different language APIs.\n TraceContext#putCorrelation and TraceContext#getCorrelation are recommended to write and read the correlation context, with key/value string. The key should be added if it is absent. The latter writes should override the previous value. The total number of all keys should be less than 3, and the length of each value should be less than 128 bytes. The context should be propagated as well when tracing context is propagated across threads and processes.  ","excerpt":"SkyWalking Cross Process Correlation Headers Protocol  Version 1.0  SkyWalking Cross Process …","ref":"/docs/main/next/en/api/x-process-correlation-headers-v1/","title":"SkyWalking Cross Process Correlation Headers Protocol"},{"body":"SkyWalking Cross Process Correlation Headers Protocol  Version 1.0  The Cross Process Correlation Headers Protocol is used to transport custom data by leveraging the capability of Cross Process Propagation Headers Protocol.\nThis is an optional and additional protocol for language tracer implementation. All tracer implementation could consider implementing this. Cross Process Correlation Header key is sw8-correlation. The value is the encoded(key):encoded(value) list with elements splitted by , such as base64(string key):base64(string value),base64(string key2):base64(string value2).\nRecommendations for language APIs The following implementation method is recommended for different language APIs.\n TraceContext#putCorrelation and TraceContext#getCorrelation are recommended to write and read the correlation context, with key/value string. The key should be added if it is absent. The latter writes should override the previous value. The total number of all keys should be less than 3, and the length of each value should be less than 128 bytes. The context should be propagated as well when tracing context is propagated across threads and processes.  ","excerpt":"SkyWalking Cross Process Correlation Headers Protocol  Version 1.0  The Cross Process Correlation …","ref":"/docs/main/v9.0.0/en/protocols/skywalking-cross-process-correlation-headers-protocol-v1/","title":"SkyWalking Cross Process Correlation Headers Protocol"},{"body":"SkyWalking Cross Process Correlation Headers Protocol  Version 1.0  The Cross Process Correlation Headers Protocol is used to transport custom data by leveraging the capability of Cross Process Propagation Headers Protocol.\nThis is an optional and additional protocol for language tracer implementation. All tracer implementation could consider implementing this. Cross Process Correlation Header key is sw8-correlation. The value is the encoded(key):encoded(value) list with elements splitted by , such as base64(string key):base64(string value),base64(string key2):base64(string value2).\nRecommendations for language APIs The following implementation method is recommended for different language APIs.\n TraceContext#putCorrelation and TraceContext#getCorrelation are recommended to write and read the correlation context, with key/value string. The key should be added if it is absent. The latter writes should override the previous value. The total number of all keys should be less than 3, and the length of each value should be less than 128 bytes. The context should be propagated as well when tracing context is propagated across threads and processes.  ","excerpt":"SkyWalking Cross Process Correlation Headers Protocol  Version 1.0  The Cross Process Correlation …","ref":"/docs/main/v9.1.0/en/protocols/skywalking-cross-process-correlation-headers-protocol-v1/","title":"SkyWalking Cross Process Correlation Headers Protocol"},{"body":"SkyWalking Cross Process Correlation Headers Protocol  Version 1.0  The Cross Process Correlation Headers Protocol is used to transport custom data by leveraging the capability of Cross Process Propagation Headers Protocol.\nThis is an optional and additional protocol for language tracer implementation. All tracer implementation could consider implementing this. Cross Process Correlation Header key is sw8-correlation. The value is the encoded(key):encoded(value) list with elements splitted by , such as base64(string key):base64(string value),base64(string key2):base64(string value2).\nRecommendations for language APIs The following implementation method is recommended for different language APIs.\n TraceContext#putCorrelation and TraceContext#getCorrelation are recommended to write and read the correlation context, with key/value string. The key should be added if it is absent. The latter writes should override the previous value. The total number of all keys should be less than 3, and the length of each value should be less than 128 bytes. The context should be propagated as well when tracing context is propagated across threads and processes.  ","excerpt":"SkyWalking Cross Process Correlation Headers Protocol  Version 1.0  The Cross Process Correlation …","ref":"/docs/main/v9.2.0/en/protocols/skywalking-cross-process-correlation-headers-protocol-v1/","title":"SkyWalking Cross Process Correlation Headers Protocol"},{"body":"SkyWalking Cross Process Correlation Headers Protocol  Version 1.0  The Cross Process Correlation Headers Protocol is used to transport custom data by leveraging the capability of Cross Process Propagation Headers Protocol.\nThis is an optional and additional protocol for language tracer implementation. All tracer implementation could consider implementing this. Cross Process Correlation Header key is sw8-correlation. The value is the encoded(key):encoded(value) list with elements splitted by , such as base64(string key):base64(string value),base64(string key2):base64(string value2).\nRecommendations for language APIs The following implementation method is recommended for different language APIs.\n TraceContext#putCorrelation and TraceContext#getCorrelation are recommended to write and read the correlation context, with key/value string. The key should be added if it is absent. The latter writes should override the previous value. The total number of all keys should be less than 3, and the length of each value should be less than 128 bytes. The context should be propagated as well when tracing context is propagated across threads and processes.  ","excerpt":"SkyWalking Cross Process Correlation Headers Protocol  Version 1.0  The Cross Process Correlation …","ref":"/docs/main/v9.3.0/en/protocols/skywalking-cross-process-correlation-headers-protocol-v1/","title":"SkyWalking Cross Process Correlation Headers Protocol"},{"body":"SkyWalking Cross Process Correlation Headers Protocol  Version 1.0  SkyWalking Cross Process Correlation Headers Protocol is a new in-wire context propagation protocol which is additional and optional. Please read SkyWalking language agents documentation to see whether it is supported.\nThis is an optional and additional protocol for language tracer implementation. All tracer implementation could consider implementing this. Cross Process Correlation Header key is sw8-correlation. The value is the encoded(key):encoded(value) list with elements splitted by , such as base64(string key):base64(string value),base64(string key2):base64(string value2).\nRecommendations for language APIs The following implementation method is recommended for different language APIs.\n TraceContext#putCorrelation and TraceContext#getCorrelation are recommended to write and read the correlation context, with key/value string. The key should be added if it is absent. The latter writes should override the previous value. The total number of all keys should be less than 3, and the length of each value should be less than 128 bytes. The context should be propagated as well when tracing context is propagated across threads and processes.  ","excerpt":"SkyWalking Cross Process Correlation Headers Protocol  Version 1.0  SkyWalking Cross Process …","ref":"/docs/main/v9.4.0/en/api/x-process-correlation-headers-v1/","title":"SkyWalking Cross Process Correlation Headers Protocol"},{"body":"SkyWalking Cross Process Correlation Headers Protocol  Version 1.0  SkyWalking Cross Process Correlation Headers Protocol is a new in-wire context propagation protocol which is additional and optional. Please read SkyWalking language agents documentation to see whether it is supported.\nThis is an optional and additional protocol for language tracer implementation. All tracer implementation could consider implementing this. Cross Process Correlation Header key is sw8-correlation. The value is the encoded(key):encoded(value) list with elements splitted by , such as base64(string key):base64(string value),base64(string key2):base64(string value2).\nRecommendations for language APIs The following implementation method is recommended for different language APIs.\n TraceContext#putCorrelation and TraceContext#getCorrelation are recommended to write and read the correlation context, with key/value string. The key should be added if it is absent. The latter writes should override the previous value. The total number of all keys should be less than 3, and the length of each value should be less than 128 bytes. The context should be propagated as well when tracing context is propagated across threads and processes.  ","excerpt":"SkyWalking Cross Process Correlation Headers Protocol  Version 1.0  SkyWalking Cross Process …","ref":"/docs/main/v9.5.0/en/api/x-process-correlation-headers-v1/","title":"SkyWalking Cross Process Correlation Headers Protocol"},{"body":"SkyWalking Cross Process Correlation Headers Protocol  Version 1.0  SkyWalking Cross Process Correlation Headers Protocol is a new in-wire context propagation protocol which is additional and optional. Please read SkyWalking language agents documentation to see whether it is supported.\nThis is an optional and additional protocol for language tracer implementation. All tracer implementation could consider implementing this. Cross Process Correlation Header key is sw8-correlation. The value is the encoded(key):encoded(value) list with elements splitted by , such as base64(string key):base64(string value),base64(string key2):base64(string value2).\nRecommendations for language APIs The following implementation method is recommended for different language APIs.\n TraceContext#putCorrelation and TraceContext#getCorrelation are recommended to write and read the correlation context, with key/value string. The key should be added if it is absent. The latter writes should override the previous value. The total number of all keys should be less than 3, and the length of each value should be less than 128 bytes. The context should be propagated as well when tracing context is propagated across threads and processes.  ","excerpt":"SkyWalking Cross Process Correlation Headers Protocol  Version 1.0  SkyWalking Cross Process …","ref":"/docs/main/v9.6.0/en/api/x-process-correlation-headers-v1/","title":"SkyWalking Cross Process Correlation Headers Protocol"},{"body":"SkyWalking Cross Process Correlation Headers Protocol  Version 1.0  SkyWalking Cross Process Correlation Headers Protocol is a new in-wire context propagation protocol which is additional and optional. Please read SkyWalking language agents documentation to see whether it is supported.\nThis is an optional and additional protocol for language tracer implementation. All tracer implementation could consider implementing this. Cross Process Correlation Header key is sw8-correlation. The value is the encoded(key):encoded(value) list with elements splitted by , such as base64(string key):base64(string value),base64(string key2):base64(string value2).\nRecommendations for language APIs The following implementation method is recommended for different language APIs.\n TraceContext#putCorrelation and TraceContext#getCorrelation are recommended to write and read the correlation context, with key/value string. The key should be added if it is absent. The latter writes should override the previous value. The total number of all keys should be less than 3, and the length of each value should be less than 128 bytes. The context should be propagated as well when tracing context is propagated across threads and processes.  ","excerpt":"SkyWalking Cross Process Correlation Headers Protocol  Version 1.0  SkyWalking Cross Process …","ref":"/docs/main/v9.7.0/en/api/x-process-correlation-headers-v1/","title":"SkyWalking Cross Process Correlation Headers Protocol"},{"body":"SkyWalking Cross Process Propagation Headers Protocol  Version 3.0  SkyWalking is more akin to an APM system, rather than a common distributed tracing system. SkyWalking\u0026rsquo;s headers are much more complex than those found in a common distributed tracing system. The reason behind their complexity is for better analysis performance of the OAP. You can find many similar mechanisms in other commercial APM systems (some of which are even more complex than ours!).\nAbstract The SkyWalking Cross Process Propagation Headers Protocol v3, also known as the sw8 protocol, is designed for context propagation.\nStandard Header Item The standard header is the minimal requirement for context propagation.\n Header Name: sw8. Header Value: 8 fields split by -. The length of header value must be less than 2k (default).  Example of the value format: XXXXX-XXXXX-XXXX-XXXX\nValues Values must include the following segments, and all string type values are in BASE64 encoding.\n Required:   Sample. 0 or 1. 0 means that the context exists, but it could (and most likely will) be ignored. 1 means this trace needs to be sampled and sent to the backend. Trace ID. String(BASE64 encoded). A literal string that is globally unique. Parent trace segment ID. String(BASE64 encoded). A literal string that is globally unique. Parent span ID. Must be an integer. It begins with 0. This span ID points to the parent span in parent trace segment. Parent service. String(BASE64 encoded). Its length should be no more than 50 UTF-8 characters. Parent service instance. String(BASE64 encoded). Its length should be no more than 50 UTF-8 characters. Parent endpoint. String(BASE64 encoded). The operation name of the first entry span in the parent segment. Its length should be less than 150 UTF-8 characters. Target address of this request used on the client end. String(BASE64 encoded). The network address (not necessarily IP + port) used on the client end to access this target service.   Sample values: 1-TRACEID-SEGMENTID-3-PARENT_SERVICE-PARENT_INSTANCE-PARENT_ENDPOINT-IPPORT  Extension Header Item The extension header item is designed for advanced features. It provides interaction capabilities between the agents deployed in upstream and downstream services.\n Header Name: sw8-x Header Value: Split by -. The fields are extendable.  Values The current value includes fields.\n Tracing Mode. Empty, 0, or 1. Empty or 0 is the default. 1 indicates that all spans generated in this context will skip analysis, spanObject#skipAnalysis=true. This context is propagated to upstream by default, unless it is changed in the tracing process. The timestamp of sending on the client end. This is used in async RPC, such as MQ. Once it is set, the consumer end would calculate the latency between sending and receiving, and tag the latency in the span by using key transmission.latency automatically.  ","excerpt":"SkyWalking Cross Process Propagation Headers Protocol  Version 3.0  SkyWalking is more akin to an …","ref":"/docs/main/latest/en/api/x-process-propagation-headers-v3/","title":"SkyWalking Cross Process Propagation Headers Protocol"},{"body":"SkyWalking Cross Process Propagation Headers Protocol  Version 3.0  SkyWalking is more akin to an APM system, rather than a common distributed tracing system. SkyWalking\u0026rsquo;s headers are much more complex than those found in a common distributed tracing system. The reason behind their complexity is for better analysis performance of the OAP. You can find many similar mechanisms in other commercial APM systems (some of which are even more complex than ours!).\nAbstract The SkyWalking Cross Process Propagation Headers Protocol v3, also known as the sw8 protocol, is designed for context propagation.\nStandard Header Item The standard header is the minimal requirement for context propagation.\n Header Name: sw8. Header Value: 8 fields split by -. The length of header value must be less than 2k (default).  Example of the value format: XXXXX-XXXXX-XXXX-XXXX\nValues Values must include the following segments, and all string type values are in BASE64 encoding.\n Required:   Sample. 0 or 1. 0 means that the context exists, but it could (and most likely will) be ignored. 1 means this trace needs to be sampled and sent to the backend. Trace ID. String(BASE64 encoded). A literal string that is globally unique. Parent trace segment ID. String(BASE64 encoded). A literal string that is globally unique. Parent span ID. Must be an integer. It begins with 0. This span ID points to the parent span in parent trace segment. Parent service. String(BASE64 encoded). Its length should be no more than 50 UTF-8 characters. Parent service instance. String(BASE64 encoded). Its length should be no more than 50 UTF-8 characters. Parent endpoint. String(BASE64 encoded). The operation name of the first entry span in the parent segment. Its length should be less than 150 UTF-8 characters. Target address of this request used on the client end. String(BASE64 encoded). The network address (not necessarily IP + port) used on the client end to access this target service.   Sample values: 1-TRACEID-SEGMENTID-3-PARENT_SERVICE-PARENT_INSTANCE-PARENT_ENDPOINT-IPPORT  Extension Header Item The extension header item is designed for advanced features. It provides interaction capabilities between the agents deployed in upstream and downstream services.\n Header Name: sw8-x Header Value: Split by -. The fields are extendable.  Values The current value includes fields.\n Tracing Mode. Empty, 0, or 1. Empty or 0 is the default. 1 indicates that all spans generated in this context will skip analysis, spanObject#skipAnalysis=true. This context is propagated to upstream by default, unless it is changed in the tracing process. The timestamp of sending on the client end. This is used in async RPC, such as MQ. Once it is set, the consumer end would calculate the latency between sending and receiving, and tag the latency in the span by using key transmission.latency automatically.  ","excerpt":"SkyWalking Cross Process Propagation Headers Protocol  Version 3.0  SkyWalking is more akin to an …","ref":"/docs/main/next/en/api/x-process-propagation-headers-v3/","title":"SkyWalking Cross Process Propagation Headers Protocol"},{"body":"SkyWalking Cross Process Propagation Headers Protocol  Version 3.0  SkyWalking is more akin to an APM system, rather than a common distributed tracing system. SkyWalking\u0026rsquo;s headers are much more complex than those found in a common distributed tracing system. The reason behind their complexity is for better analysis performance of the OAP. You can find many similar mechanisms in other commercial APM systems (some of which are even more complex than ours!).\nAbstract The SkyWalking Cross Process Propagation Headers Protocol v3, also known as the sw8 protocol, is designed for context propagation.\nStandard Header Item The standard header is the minimal requirement for context propagation.\n Header Name: sw8. Header Value: 8 fields split by -. The length of header value must be less than 2k (default).  Example of the value format: XXXXX-XXXXX-XXXX-XXXX\nValues Values must include the following segments, and all string type values are in BASE64 encoding.\n Required:   Sample. 0 or 1. 0 means that the context exists, but it could (and most likely will) be ignored. 1 means this trace needs to be sampled and sent to the backend. Trace ID. String(BASE64 encoded). A literal string that is globally unique. Parent trace segment ID. String(BASE64 encoded). A literal string that is globally unique. Parent span ID. Must be an integer. It begins with 0. This span ID points to the parent span in parent trace segment. Parent service. String(BASE64 encoded). Its length should be no more than 50 UTF-8 characters. Parent service instance. String(BASE64 encoded). Its length should be no more than 50 UTF-8 characters. Parent endpoint. String(BASE64 encoded). The operation name of the first entry span in the parent segment. Its length should be less than 150 UTF-8 characters. Target address of this request used on the client end. String(BASE64 encoded). The network address (not necessarily IP + port) used on the client end to access this target service.   Sample values: 1-TRACEID-SEGMENTID-3-PARENT_SERVICE-PARENT_INSTANCE-PARENT_ENDPOINT-IPPORT  Extension Header Item The extension header item is designed for advanced features. It provides interaction capabilities between the agents deployed in upstream and downstream services.\n Header Name: sw8-x Header Value: Split by -. The fields are extendable.  Values The current value includes fields.\n Tracing Mode. Empty, 0, or 1. Empty or 0 is the default. 1 indicates that all spans generated in this context will skip analysis, spanObject#skipAnalysis=true. This context is propagated to upstream by default, unless it is changed in the tracing process. The timestamp of sending on the client end. This is used in async RPC, such as MQ. Once it is set, the consumer end would calculate the latency between sending and receiving, and tag the latency in the span by using key transmission.latency automatically.  ","excerpt":"SkyWalking Cross Process Propagation Headers Protocol  Version 3.0  SkyWalking is more akin to an …","ref":"/docs/main/v9.0.0/en/protocols/skywalking-cross-process-propagation-headers-protocol-v3/","title":"SkyWalking Cross Process Propagation Headers Protocol"},{"body":"SkyWalking Cross Process Propagation Headers Protocol  Version 3.0  SkyWalking is more akin to an APM system, rather than a common distributed tracing system. SkyWalking\u0026rsquo;s headers are much more complex than those found in a common distributed tracing system. The reason behind their complexity is for better analysis performance of the OAP. You can find many similar mechanisms in other commercial APM systems (some of which are even more complex than ours!).\nAbstract The SkyWalking Cross Process Propagation Headers Protocol v3, also known as the sw8 protocol, is designed for context propagation.\nStandard Header Item The standard header is the minimal requirement for context propagation.\n Header Name: sw8. Header Value: 8 fields split by -. The length of header value must be less than 2k (default).  Example of the value format: XXXXX-XXXXX-XXXX-XXXX\nValues Values must include the following segments, and all string type values are in BASE64 encoding.\n Required:   Sample. 0 or 1. 0 means that the context exists, but it could (and most likely will) be ignored. 1 means this trace needs to be sampled and sent to the backend. Trace ID. String(BASE64 encoded). A literal string that is globally unique. Parent trace segment ID. String(BASE64 encoded). A literal string that is globally unique. Parent span ID. Must be an integer. It begins with 0. This span ID points to the parent span in parent trace segment. Parent service. String(BASE64 encoded). Its length should be no more than 50 UTF-8 characters. Parent service instance. String(BASE64 encoded). Its length should be no more than 50 UTF-8 characters. Parent endpoint. String(BASE64 encoded). The operation name of the first entry span in the parent segment. Its length should be less than 150 UTF-8 characters. Target address of this request used on the client end. String(BASE64 encoded). The network address (not necessarily IP + port) used on the client end to access this target service.   Sample values: 1-TRACEID-SEGMENTID-3-PARENT_SERVICE-PARENT_INSTANCE-PARENT_ENDPOINT-IPPORT  Extension Header Item The extension header item is designed for advanced features. It provides interaction capabilities between the agents deployed in upstream and downstream services.\n Header Name: sw8-x Header Value: Split by -. The fields are extendable.  Values The current value includes fields.\n Tracing Mode. Empty, 0, or 1. Empty or 0 is the default. 1 indicates that all spans generated in this context will skip analysis, spanObject#skipAnalysis=true. This context is propagated to upstream by default, unless it is changed in the tracing process. The timestamp of sending on the client end. This is used in async RPC, such as MQ. Once it is set, the consumer end would calculate the latency between sending and receiving, and tag the latency in the span by using key transmission.latency automatically.  ","excerpt":"SkyWalking Cross Process Propagation Headers Protocol  Version 3.0  SkyWalking is more akin to an …","ref":"/docs/main/v9.1.0/en/protocols/skywalking-cross-process-propagation-headers-protocol-v3/","title":"SkyWalking Cross Process Propagation Headers Protocol"},{"body":"SkyWalking Cross Process Propagation Headers Protocol  Version 3.0  SkyWalking is more akin to an APM system, rather than a common distributed tracing system. SkyWalking\u0026rsquo;s headers are much more complex than those found in a common distributed tracing system. The reason behind their complexity is for better analysis performance of the OAP. You can find many similar mechanisms in other commercial APM systems (some of which are even more complex than ours!).\nAbstract The SkyWalking Cross Process Propagation Headers Protocol v3, also known as the sw8 protocol, is designed for context propagation.\nStandard Header Item The standard header is the minimal requirement for context propagation.\n Header Name: sw8. Header Value: 8 fields split by -. The length of header value must be less than 2k (default).  Example of the value format: XXXXX-XXXXX-XXXX-XXXX\nValues Values must include the following segments, and all string type values are in BASE64 encoding.\n Required:   Sample. 0 or 1. 0 means that the context exists, but it could (and most likely will) be ignored. 1 means this trace needs to be sampled and sent to the backend. Trace ID. String(BASE64 encoded). A literal string that is globally unique. Parent trace segment ID. String(BASE64 encoded). A literal string that is globally unique. Parent span ID. Must be an integer. It begins with 0. This span ID points to the parent span in parent trace segment. Parent service. String(BASE64 encoded). Its length should be no more than 50 UTF-8 characters. Parent service instance. String(BASE64 encoded). Its length should be no more than 50 UTF-8 characters. Parent endpoint. String(BASE64 encoded). The operation name of the first entry span in the parent segment. Its length should be less than 150 UTF-8 characters. Target address of this request used on the client end. String(BASE64 encoded). The network address (not necessarily IP + port) used on the client end to access this target service.   Sample values: 1-TRACEID-SEGMENTID-3-PARENT_SERVICE-PARENT_INSTANCE-PARENT_ENDPOINT-IPPORT  Extension Header Item The extension header item is designed for advanced features. It provides interaction capabilities between the agents deployed in upstream and downstream services.\n Header Name: sw8-x Header Value: Split by -. The fields are extendable.  Values The current value includes fields.\n Tracing Mode. Empty, 0, or 1. Empty or 0 is the default. 1 indicates that all spans generated in this context will skip analysis, spanObject#skipAnalysis=true. This context is propagated to upstream by default, unless it is changed in the tracing process. The timestamp of sending on the client end. This is used in async RPC, such as MQ. Once it is set, the consumer end would calculate the latency between sending and receiving, and tag the latency in the span by using key transmission.latency automatically.  ","excerpt":"SkyWalking Cross Process Propagation Headers Protocol  Version 3.0  SkyWalking is more akin to an …","ref":"/docs/main/v9.2.0/en/protocols/skywalking-cross-process-propagation-headers-protocol-v3/","title":"SkyWalking Cross Process Propagation Headers Protocol"},{"body":"SkyWalking Cross Process Propagation Headers Protocol  Version 3.0  SkyWalking is more akin to an APM system, rather than a common distributed tracing system. SkyWalking\u0026rsquo;s headers are much more complex than those found in a common distributed tracing system. The reason behind their complexity is for better analysis performance of the OAP. You can find many similar mechanisms in other commercial APM systems (some of which are even more complex than ours!).\nAbstract The SkyWalking Cross Process Propagation Headers Protocol v3, also known as the sw8 protocol, is designed for context propagation.\nStandard Header Item The standard header is the minimal requirement for context propagation.\n Header Name: sw8. Header Value: 8 fields split by -. The length of header value must be less than 2k (default).  Example of the value format: XXXXX-XXXXX-XXXX-XXXX\nValues Values must include the following segments, and all string type values are in BASE64 encoding.\n Required:   Sample. 0 or 1. 0 means that the context exists, but it could (and most likely will) be ignored. 1 means this trace needs to be sampled and sent to the backend. Trace ID. String(BASE64 encoded). A literal string that is globally unique. Parent trace segment ID. String(BASE64 encoded). A literal string that is globally unique. Parent span ID. Must be an integer. It begins with 0. This span ID points to the parent span in parent trace segment. Parent service. String(BASE64 encoded). Its length should be no more than 50 UTF-8 characters. Parent service instance. String(BASE64 encoded). Its length should be no more than 50 UTF-8 characters. Parent endpoint. String(BASE64 encoded). The operation name of the first entry span in the parent segment. Its length should be less than 150 UTF-8 characters. Target address of this request used on the client end. String(BASE64 encoded). The network address (not necessarily IP + port) used on the client end to access this target service.   Sample values: 1-TRACEID-SEGMENTID-3-PARENT_SERVICE-PARENT_INSTANCE-PARENT_ENDPOINT-IPPORT  Extension Header Item The extension header item is designed for advanced features. It provides interaction capabilities between the agents deployed in upstream and downstream services.\n Header Name: sw8-x Header Value: Split by -. The fields are extendable.  Values The current value includes fields.\n Tracing Mode. Empty, 0, or 1. Empty or 0 is the default. 1 indicates that all spans generated in this context will skip analysis, spanObject#skipAnalysis=true. This context is propagated to upstream by default, unless it is changed in the tracing process. The timestamp of sending on the client end. This is used in async RPC, such as MQ. Once it is set, the consumer end would calculate the latency between sending and receiving, and tag the latency in the span by using key transmission.latency automatically.  ","excerpt":"SkyWalking Cross Process Propagation Headers Protocol  Version 3.0  SkyWalking is more akin to an …","ref":"/docs/main/v9.3.0/en/protocols/skywalking-cross-process-propagation-headers-protocol-v3/","title":"SkyWalking Cross Process Propagation Headers Protocol"},{"body":"SkyWalking Cross Process Propagation Headers Protocol  Version 3.0  SkyWalking is more akin to an APM system, rather than a common distributed tracing system. SkyWalking\u0026rsquo;s headers are much more complex than those found in a common distributed tracing system. The reason behind their complexity is for better analysis performance of the OAP. You can find many similar mechanisms in other commercial APM systems (some of which are even more complex than ours!).\nAbstract The SkyWalking Cross Process Propagation Headers Protocol v3, also known as the sw8 protocol, is designed for context propagation.\nStandard Header Item The standard header is the minimal requirement for context propagation.\n Header Name: sw8. Header Value: 8 fields split by -. The length of header value must be less than 2k (default).  Example of the value format: XXXXX-XXXXX-XXXX-XXXX\nValues Values must include the following segments, and all string type values are in BASE64 encoding.\n Required:   Sample. 0 or 1. 0 means that the context exists, but it could (and most likely will) be ignored. 1 means this trace needs to be sampled and sent to the backend. Trace ID. String(BASE64 encoded). A literal string that is globally unique. Parent trace segment ID. String(BASE64 encoded). A literal string that is globally unique. Parent span ID. Must be an integer. It begins with 0. This span ID points to the parent span in parent trace segment. Parent service. String(BASE64 encoded). Its length should be no more than 50 UTF-8 characters. Parent service instance. String(BASE64 encoded). Its length should be no more than 50 UTF-8 characters. Parent endpoint. String(BASE64 encoded). The operation name of the first entry span in the parent segment. Its length should be less than 150 UTF-8 characters. Target address of this request used on the client end. String(BASE64 encoded). The network address (not necessarily IP + port) used on the client end to access this target service.   Sample values: 1-TRACEID-SEGMENTID-3-PARENT_SERVICE-PARENT_INSTANCE-PARENT_ENDPOINT-IPPORT  Extension Header Item The extension header item is designed for advanced features. It provides interaction capabilities between the agents deployed in upstream and downstream services.\n Header Name: sw8-x Header Value: Split by -. The fields are extendable.  Values The current value includes fields.\n Tracing Mode. Empty, 0, or 1. Empty or 0 is the default. 1 indicates that all spans generated in this context will skip analysis, spanObject#skipAnalysis=true. This context is propagated to upstream by default, unless it is changed in the tracing process. The timestamp of sending on the client end. This is used in async RPC, such as MQ. Once it is set, the consumer end would calculate the latency between sending and receiving, and tag the latency in the span by using key transmission.latency automatically.  ","excerpt":"SkyWalking Cross Process Propagation Headers Protocol  Version 3.0  SkyWalking is more akin to an …","ref":"/docs/main/v9.4.0/en/api/x-process-propagation-headers-v3/","title":"SkyWalking Cross Process Propagation Headers Protocol"},{"body":"SkyWalking Cross Process Propagation Headers Protocol  Version 3.0  SkyWalking is more akin to an APM system, rather than a common distributed tracing system. SkyWalking\u0026rsquo;s headers are much more complex than those found in a common distributed tracing system. The reason behind their complexity is for better analysis performance of the OAP. You can find many similar mechanisms in other commercial APM systems (some of which are even more complex than ours!).\nAbstract The SkyWalking Cross Process Propagation Headers Protocol v3, also known as the sw8 protocol, is designed for context propagation.\nStandard Header Item The standard header is the minimal requirement for context propagation.\n Header Name: sw8. Header Value: 8 fields split by -. The length of header value must be less than 2k (default).  Example of the value format: XXXXX-XXXXX-XXXX-XXXX\nValues Values must include the following segments, and all string type values are in BASE64 encoding.\n Required:   Sample. 0 or 1. 0 means that the context exists, but it could (and most likely will) be ignored. 1 means this trace needs to be sampled and sent to the backend. Trace ID. String(BASE64 encoded). A literal string that is globally unique. Parent trace segment ID. String(BASE64 encoded). A literal string that is globally unique. Parent span ID. Must be an integer. It begins with 0. This span ID points to the parent span in parent trace segment. Parent service. String(BASE64 encoded). Its length should be no more than 50 UTF-8 characters. Parent service instance. String(BASE64 encoded). Its length should be no more than 50 UTF-8 characters. Parent endpoint. String(BASE64 encoded). The operation name of the first entry span in the parent segment. Its length should be less than 150 UTF-8 characters. Target address of this request used on the client end. String(BASE64 encoded). The network address (not necessarily IP + port) used on the client end to access this target service.   Sample values: 1-TRACEID-SEGMENTID-3-PARENT_SERVICE-PARENT_INSTANCE-PARENT_ENDPOINT-IPPORT  Extension Header Item The extension header item is designed for advanced features. It provides interaction capabilities between the agents deployed in upstream and downstream services.\n Header Name: sw8-x Header Value: Split by -. The fields are extendable.  Values The current value includes fields.\n Tracing Mode. Empty, 0, or 1. Empty or 0 is the default. 1 indicates that all spans generated in this context will skip analysis, spanObject#skipAnalysis=true. This context is propagated to upstream by default, unless it is changed in the tracing process. The timestamp of sending on the client end. This is used in async RPC, such as MQ. Once it is set, the consumer end would calculate the latency between sending and receiving, and tag the latency in the span by using key transmission.latency automatically.  ","excerpt":"SkyWalking Cross Process Propagation Headers Protocol  Version 3.0  SkyWalking is more akin to an …","ref":"/docs/main/v9.5.0/en/api/x-process-propagation-headers-v3/","title":"SkyWalking Cross Process Propagation Headers Protocol"},{"body":"SkyWalking Cross Process Propagation Headers Protocol  Version 3.0  SkyWalking is more akin to an APM system, rather than a common distributed tracing system. SkyWalking\u0026rsquo;s headers are much more complex than those found in a common distributed tracing system. The reason behind their complexity is for better analysis performance of the OAP. You can find many similar mechanisms in other commercial APM systems (some of which are even more complex than ours!).\nAbstract The SkyWalking Cross Process Propagation Headers Protocol v3, also known as the sw8 protocol, is designed for context propagation.\nStandard Header Item The standard header is the minimal requirement for context propagation.\n Header Name: sw8. Header Value: 8 fields split by -. The length of header value must be less than 2k (default).  Example of the value format: XXXXX-XXXXX-XXXX-XXXX\nValues Values must include the following segments, and all string type values are in BASE64 encoding.\n Required:   Sample. 0 or 1. 0 means that the context exists, but it could (and most likely will) be ignored. 1 means this trace needs to be sampled and sent to the backend. Trace ID. String(BASE64 encoded). A literal string that is globally unique. Parent trace segment ID. String(BASE64 encoded). A literal string that is globally unique. Parent span ID. Must be an integer. It begins with 0. This span ID points to the parent span in parent trace segment. Parent service. String(BASE64 encoded). Its length should be no more than 50 UTF-8 characters. Parent service instance. String(BASE64 encoded). Its length should be no more than 50 UTF-8 characters. Parent endpoint. String(BASE64 encoded). The operation name of the first entry span in the parent segment. Its length should be less than 150 UTF-8 characters. Target address of this request used on the client end. String(BASE64 encoded). The network address (not necessarily IP + port) used on the client end to access this target service.   Sample values: 1-TRACEID-SEGMENTID-3-PARENT_SERVICE-PARENT_INSTANCE-PARENT_ENDPOINT-IPPORT  Extension Header Item The extension header item is designed for advanced features. It provides interaction capabilities between the agents deployed in upstream and downstream services.\n Header Name: sw8-x Header Value: Split by -. The fields are extendable.  Values The current value includes fields.\n Tracing Mode. Empty, 0, or 1. Empty or 0 is the default. 1 indicates that all spans generated in this context will skip analysis, spanObject#skipAnalysis=true. This context is propagated to upstream by default, unless it is changed in the tracing process. The timestamp of sending on the client end. This is used in async RPC, such as MQ. Once it is set, the consumer end would calculate the latency between sending and receiving, and tag the latency in the span by using key transmission.latency automatically.  ","excerpt":"SkyWalking Cross Process Propagation Headers Protocol  Version 3.0  SkyWalking is more akin to an …","ref":"/docs/main/v9.6.0/en/api/x-process-propagation-headers-v3/","title":"SkyWalking Cross Process Propagation Headers Protocol"},{"body":"SkyWalking Cross Process Propagation Headers Protocol  Version 3.0  SkyWalking is more akin to an APM system, rather than a common distributed tracing system. SkyWalking\u0026rsquo;s headers are much more complex than those found in a common distributed tracing system. The reason behind their complexity is for better analysis performance of the OAP. You can find many similar mechanisms in other commercial APM systems (some of which are even more complex than ours!).\nAbstract The SkyWalking Cross Process Propagation Headers Protocol v3, also known as the sw8 protocol, is designed for context propagation.\nStandard Header Item The standard header is the minimal requirement for context propagation.\n Header Name: sw8. Header Value: 8 fields split by -. The length of header value must be less than 2k (default).  Example of the value format: XXXXX-XXXXX-XXXX-XXXX\nValues Values must include the following segments, and all string type values are in BASE64 encoding.\n Required:   Sample. 0 or 1. 0 means that the context exists, but it could (and most likely will) be ignored. 1 means this trace needs to be sampled and sent to the backend. Trace ID. String(BASE64 encoded). A literal string that is globally unique. Parent trace segment ID. String(BASE64 encoded). A literal string that is globally unique. Parent span ID. Must be an integer. It begins with 0. This span ID points to the parent span in parent trace segment. Parent service. String(BASE64 encoded). Its length should be no more than 50 UTF-8 characters. Parent service instance. String(BASE64 encoded). Its length should be no more than 50 UTF-8 characters. Parent endpoint. String(BASE64 encoded). The operation name of the first entry span in the parent segment. Its length should be less than 150 UTF-8 characters. Target address of this request used on the client end. String(BASE64 encoded). The network address (not necessarily IP + port) used on the client end to access this target service.   Sample values: 1-TRACEID-SEGMENTID-3-PARENT_SERVICE-PARENT_INSTANCE-PARENT_ENDPOINT-IPPORT  Extension Header Item The extension header item is designed for advanced features. It provides interaction capabilities between the agents deployed in upstream and downstream services.\n Header Name: sw8-x Header Value: Split by -. The fields are extendable.  Values The current value includes fields.\n Tracing Mode. Empty, 0, or 1. Empty or 0 is the default. 1 indicates that all spans generated in this context will skip analysis, spanObject#skipAnalysis=true. This context is propagated to upstream by default, unless it is changed in the tracing process. The timestamp of sending on the client end. This is used in async RPC, such as MQ. Once it is set, the consumer end would calculate the latency between sending and receiving, and tag the latency in the span by using key transmission.latency automatically.  ","excerpt":"SkyWalking Cross Process Propagation Headers Protocol  Version 3.0  SkyWalking is more akin to an …","ref":"/docs/main/v9.7.0/en/api/x-process-propagation-headers-v3/","title":"SkyWalking Cross Process Propagation Headers Protocol"},{"body":"All SkyWalking exporter(metrics, trace, log) instructions had been moved here.\n","excerpt":"All SkyWalking exporter(metrics, trace, log) instructions had been moved here.","ref":"/docs/main/latest/en/setup/backend/metrics-exporter/","title":"SkyWalking exporter(metrics, trace, log) instructions had been moved [here](../exporter)."},{"body":"All SkyWalking exporter(metrics, trace, log) instructions had been moved here.\n","excerpt":"All SkyWalking exporter(metrics, trace, log) instructions had been moved here.","ref":"/docs/main/next/en/setup/backend/metrics-exporter/","title":"SkyWalking exporter(metrics, trace, log) instructions had been moved [here](../exporter)."},{"body":"All SkyWalking exporter(metrics, trace, log) instructions had been moved here.\n","excerpt":"All SkyWalking exporter(metrics, trace, log) instructions had been moved here.","ref":"/docs/main/v9.3.0/en/setup/backend/metrics-exporter/","title":"SkyWalking exporter(metrics, trace, log) instructions had been moved [here](../exporter)."},{"body":"All SkyWalking exporter(metrics, trace, log) instructions had been moved here.\n","excerpt":"All SkyWalking exporter(metrics, trace, log) instructions had been moved here.","ref":"/docs/main/v9.4.0/en/setup/backend/metrics-exporter/","title":"SkyWalking exporter(metrics, trace, log) instructions had been moved [here](../exporter)."},{"body":"All SkyWalking exporter(metrics, trace, log) instructions had been moved here.\n","excerpt":"All SkyWalking exporter(metrics, trace, log) instructions had been moved here.","ref":"/docs/main/v9.5.0/en/setup/backend/metrics-exporter/","title":"SkyWalking exporter(metrics, trace, log) instructions had been moved [here](../exporter)."},{"body":"All SkyWalking exporter(metrics, trace, log) instructions had been moved here.\n","excerpt":"All SkyWalking exporter(metrics, trace, log) instructions had been moved here.","ref":"/docs/main/v9.6.0/en/setup/backend/metrics-exporter/","title":"SkyWalking exporter(metrics, trace, log) instructions had been moved [here](../exporter)."},{"body":"All SkyWalking exporter(metrics, trace, log) instructions had been moved here.\n","excerpt":"All SkyWalking exporter(metrics, trace, log) instructions had been moved here.","ref":"/docs/main/v9.7.0/en/setup/backend/metrics-exporter/","title":"SkyWalking exporter(metrics, trace, log) instructions had been moved [here](../exporter)."},{"body":"SkyWalking Go Agent This is the official documentation of SkyWalking Go agent. Welcome to the SkyWalking community!\nSkyWalking Go is an open-source Golang auto-instrument agent that provides support for distributed tracing across different frameworks within the Golang language.\nTo use SkyWalking Go, simply import the base dependencies into your code and take advantage of the -toolexec parameter in Golang to enable hybrid compilation capabilities for various frameworks in your application.\n","excerpt":"SkyWalking Go Agent This is the official documentation of SkyWalking Go agent. Welcome to the …","ref":"/docs/skywalking-go/latest/readme/","title":"SkyWalking Go Agent"},{"body":"SkyWalking Go Agent This is the official documentation of SkyWalking Go agent. Welcome to the SkyWalking community!\nSkyWalking Go is an open-source Golang auto-instrument agent that provides support for distributed tracing across different frameworks within the Golang language.\nTo use SkyWalking Go, simply import the base dependencies into your code and take advantage of the -toolexec parameter in Golang to enable hybrid compilation capabilities for various frameworks in your application.\n","excerpt":"SkyWalking Go Agent This is the official documentation of SkyWalking Go agent. Welcome to the …","ref":"/docs/skywalking-go/next/readme/","title":"SkyWalking Go Agent"},{"body":"SkyWalking Go Agent This is the official documentation of SkyWalking Go agent. Welcome to the SkyWalking community!\nSkyWalking Go is an open-source Golang auto-instrument agent that provides support for distributed tracing across different frameworks within the Golang language.\nTo use SkyWalking Go, simply import the base dependencies into your code and take advantage of the -toolexec parameter in Golang to enable hybrid compilation capabilities for various frameworks in your application.\n","excerpt":"SkyWalking Go Agent This is the official documentation of SkyWalking Go agent. Welcome to the …","ref":"/docs/skywalking-go/v0.4.0/readme/","title":"SkyWalking Go Agent"},{"body":"SkyWalking Infra E2E Configuration Guide The configuration file is used to integrate all the step configuration content. You can see the sample configuration files for different environments in the examples directory.\nThere is a quick view about the configuration file, and using the yaml format.\nsetup:# set up the environmentcleanup:# clean up the environmenttrigger:# generate trafficverify:# test casesSetup Support two kinds of the environment to set up the system.\nKinD setup:env:kindfile:path/to/kind.yaml # Specified kinD manifest file pathkubeconfig:path/.kube/config # The path of kubeconfigtimeout:20m # timeout durationinit-system-environment:path/to/env # Import environment filesteps:# customize steps for prepare the environment- name:customize setups # step name# one of command line or kinD manifest filecommand:command lines # use command line to setup path:/path/to/manifest.yaml # the manifest file pathwait:# how to verify the manifest is set up finish- namespace:# The pod namespaceresource:# The pod resource namelabel-selector:# The resource label selectorfor:# The wait conditionkind:import-images:# import docker images to KinD- image:version # support using env to expand image, such as `${env_key}` or `$env_key`expose-ports:# Expose resource for host access- namespace:# The resource namespaceresource:# The resource name, such as `pod/foo` or `service/foo`port:# Want to expose port from resource NOTE: The fields file and kubeconfig are mutually exclusive.\n The KinD environment follow these steps:\n [optional]Start the KinD cluster according to the config file, expose KUBECONFIG to environment for help execute kubectl in the next steps. [optional]Setup the kubeconfig field for help execute kubectl in the next steps. Load docker images from kind.import-images if needed. Apply the resources files (--manifests) or/and run the custom init command (--commands) by steps. Wait until all steps are finished and all services are ready with the timeout(second). Expose all resource ports for host access.  Import docker image If you want to import docker image from private registries, there are several ways to do this:\n Using imagePullSecrets to pull images, please take reference from document. Using kind.import-images to load images from host. kind:import-images:- skywalking/oap:${OAP_HASH}# support using environment to expand the image name  Resource Export If you want to access the resource from host, should follow these steps:\n Declare which resource and ports need to be accessible from host. setup:kind:expose-ports:- namespace:default # Need to expose resource namespaceresource:pod/foo # Resource description, such as `pod/foo` or `service/foo`port:8080# Resource port want to expose, support `\u0026lt;resource_port\u0026gt;`, `\u0026lt;bind_to_host_port\u0026gt;:\u0026lt;resource_port\u0026gt;` Follow this format to get the host and port mapping by the environment, and it\u0026rsquo;s available in steps(trigger, verify). trigger:# trigger with specified mapped port, the resource name replace all `/` or `-` as `_`# host format: \u0026lt;resource_name\u0026gt;_host# port format: \u0026lt;resource_name\u0026gt;_\u0026lt;container_port\u0026gt;url:http://${pod_foo_host}:${pod_foo_8080}/  Log The console output of each pod could be found in ${workDir}/logs/${namespace}/${podName}.log.\nCompose setup:env:composefile:path/to/compose.yaml # Specified docker-compose file pathtimeout:20m # Timeout durationinit-system-environment:path/to/env # Import environment filesteps:# Customize steps for prepare the environment- name:customize setups # Step namecommand:command lines # Use command line to setup The docker-compose environment follow these steps:\n Import init-system-environment file for help build service and execute steps. Each line of the file content is an environment variable, and the key value is separate by \u0026ldquo;=\u0026rdquo;. Start the docker-compose services. Check the services' healthiness. Wait until all services are ready according to the interval, etc. Execute command to set up the testing environment or help verify.  Service Export If you want to get the service host and port mapping, should follow these steps:\n declare the port in the docker-compose service ports config. oap:image:xx.xx:1.0.0ports:# define the port- 8080 Follow this format to get the host and port mapping by the environment, and it\u0026rsquo;s available in steps(trigger, verify). trigger:# trigger with specified mappinged porturl:http://${oap_host}:${oap_8080}/  Log The console output of each service could be found in ${workDir}/logs/{serviceName}/std.log.\nTrigger After the Setup step is finished, use the Trigger step to generate traffic.\ntrigger:action:http # The action of the trigger. support HTTP invoke.interval:3s # Trigger the action every 3 seconds.times:5# The retry count before the request success.url:http://apache.skywalking.com/# Http trigger url link.method:GET # Http trigger method.headers:\u0026#34;Content-Type\u0026#34;: \u0026#34;application/json\u0026#34;\u0026#34;Authorization\u0026#34;: \u0026#34;Basic whatever\u0026#34;body:\u0026#39;{\u0026#34;k1\u0026#34;:\u0026#34;v1\u0026#34;, \u0026#34;k2\u0026#34;:\u0026#34;v2\u0026#34;}\u0026#39;The Trigger executed successfully at least once, after success, the next stage could be continued. Otherwise, there is an error and exit.\nVerify After the Trigger step is finished, running test cases.\nverify:retry:# verify with retry strategycount:10# max retry countinterval:10s # the interval between two attempts, e.g. 10s, 1m.fail-fast:true# when a case fails, whether to stop verifying other cases. This property defaults to true.concurrency:false# whether to verify cases concurrently. This property defaults to false.cases:# verify test cases- actual:path/to/actual.yaml # verify by actual file pathexpected:path/to/expected.yaml # excepted content file path- query:echo \u0026#39;foo\u0026#39; # verify by command execute outputexpected:path/to/expected.yaml # excepted content file path- includes:# including cases- path/to/cases.yaml # cases file pathThe test cases are executed in the order of declaration from top to bottom. When the execution of a case fails and the retry strategy is exceeded, it will stop verifying other cases if fail-fast is true. Otherwise, the process will continue to verify other cases.\nRetry strategy The retry strategy could retry automatically on the test case failure, and restart by the failed test case.\nCase source Support two kind source to verify, one case only supports one kind source type:\n source file: verify by generated yaml format file. command: use command line output as they need to verify content, also only support yaml format.  Excepted verify template After clarifying the content that needs to be verified, you need to write content to verify the real content and ensure that the data is correct.\nYou need to use the form of Go Template to write the verification file, and the data content to be rendered comes from the real data. By verifying whether the rendered data is consistent with the real data, it is verified whether the content is consistent. You could see many test cases in this directory.\nWe use go-cmp to show the parts where excepted do not match the actual data. - prefix represents the expected data content, + prefix represents the actual data content.\nWe have done a lot of extension functions for verification functions on the original Go Template.\nExtension functions Extension functions are used to help users quickly locate the problem content and write test cases that are easier to use.\nBasic Matches Verify that the number fits the range.\n   Function Description Grammar Verify success Verify failure     gt Verify the first param is greater than second param {{gt param1 param2}} param1 \u0026lt;wanted gt $param2, but was $param1\u0026gt;   ge Verify the first param is greater than or equals second param {{ge param1 param2}} param1 \u0026lt;wanted gt $param2, but was $param1\u0026gt;   lt Verify the first param is less than second param {{lt param1 param2}} param1 \u0026lt;wanted gt $param2, but was $param1\u0026gt;   le Verify the first param is less than or equals second param {{le param1 param2}} param1 \u0026lt;wanted gt $param2, but was $param1\u0026gt;   regexp Verify the first param matches the second regular expression {{regexp param1 param2}} param1 \u0026lt;\u0026quot;$param1\u0026quot; does not match the pattern $param2\u0026quot;\u0026gt;   notEmpty Verify The param is not empty {{notEmpty param}} param \u0026lt;\u0026quot;\u0026quot; is empty, wanted is not empty\u0026gt;   hasPrefix Verify The string param has the same prefix. {{hasPrefix param1 param2}} true false   hasSuffix Verify The string param has the same suffix. {{hasSuffix param1 param2}} true false    List Matches Verify the data in the condition list, Currently, it is only supported when all the conditions in the list are executed, it is considered as successful.\nHere is an example, It\u0026rsquo;s means the list values must have value is greater than 0, also have value greater than 1, Otherwise verify is failure.\n{{- contains .list }}- key:{{gt .value 0 }}- key:{{gt .value 1 }}{{- end }}Encoding In order to make the program easier for users to read and use, some code conversions are provided.\n   Function Description Grammar Result     b64enc Base64 encode {{ b64enc \u0026ldquo;Foo\u0026rdquo; }} Zm9v   sha256enc Sha256 encode {{ sha256enc \u0026ldquo;Foo\u0026rdquo; }} 1cbec737f863e4922cee63cc2ebbfaafcd1cff8b790d8cfd2e6a5d550b648afa   sha512enc Sha512 encode {{ sha512enc \u0026ldquo;Foo\u0026rdquo; }} 4abcd2639957cb23e33f63d70659b602a5923fafcfd2768ef79b0badea637e5c837161aa101a557a1d4deacbd912189e2bb11bf3c0c0c70ef7797217da7e8207    Reuse cases You could include multiple cases into one single E2E verify, It\u0026rsquo;s helpful for reusing the same verify cases.\nHere is the reused verify cases, and using includes configuration item to include this into E2E config.\ncases:- actual:path/to/actual.yaml # verify by actual file pathexpected:path/to/expected.yaml # excepted content file path- query:echo \u0026#39;foo\u0026#39; # verify by command execute outputexpected:path/to/expected.yaml # excepted content file pathCleanup After the E2E finished, how to clean up the environment.\ncleanup:on:always # Clean up strategyIf the on option under cleanup is not set, it will be automatically set to always if there is environment variable CI=true, which is present on many popular CI services, such as GitHub Actions, CircleCI, etc., otherwise it will be set to success, so the testing environment can be preserved when tests failed in your local machine.\nAll available strategies:\n always: No matter the execution result is success or failure, cleanup will be performed. success: Only when the execution succeeds. failure: Only when the execution failed. never: Never clean up the environment.  ","excerpt":"SkyWalking Infra E2E Configuration Guide The configuration file is used to integrate all the step …","ref":"/docs/skywalking-infra-e2e/latest/en/setup/configuration-file/","title":"SkyWalking Infra E2E Configuration Guide"},{"body":"SkyWalking Infra E2E Configuration Guide The configuration file is used to integrate all the step configuration content. You can see the sample configuration files for different environments in the examples directory.\nThere is a quick view about the configuration file, and using the yaml format.\nsetup:# set up the environmentcleanup:# clean up the environmenttrigger:# generate trafficverify:# test casesSetup Support two kinds of the environment to set up the system.\nKinD setup:env:kindfile:path/to/kind.yaml # Specified kinD manifest file pathkubeconfig:path/.kube/config # The path of kubeconfigtimeout:20m # timeout durationinit-system-environment:path/to/env # Import environment filesteps:# customize steps for prepare the environment- name:customize setups # step name# one of command line or kinD manifest filecommand:command lines # use command line to setup path:/path/to/manifest.yaml # the manifest file pathwait:# how to verify the manifest is set up finish- namespace:# The pod namespaceresource:# The pod resource namelabel-selector:# The resource label selectorfor:# The wait conditionkind:import-images:# import docker images to KinD- image:version # support using env to expand image, such as `${env_key}` or `$env_key`expose-ports:# Expose resource for host access- namespace:# The resource namespaceresource:# The resource name, such as `pod/foo` or `service/foo`port:# Want to expose port from resource NOTE: The fields file and kubeconfig are mutually exclusive.\n The KinD environment follow these steps:\n [optional]Start the KinD cluster according to the config file, expose KUBECONFIG to environment for help execute kubectl in the next steps. [optional]Setup the kubeconfig field for help execute kubectl in the next steps. Load docker images from kind.import-images if needed. Apply the resources files (--manifests) or/and run the custom init command (--commands) by steps. Wait until all steps are finished and all services are ready with the timeout(second). Expose all resource ports for host access.  Import docker image If you want to import docker image from private registries, there are several ways to do this:\n Using imagePullSecrets to pull images, please take reference from document. Using kind.import-images to load images from host. kind:import-images:- skywalking/oap:${OAP_HASH}# support using environment to expand the image name  Resource Export If you want to access the resource from host, should follow these steps:\n Declare which resource and ports need to be accessible from host. setup:kind:expose-ports:- namespace:default # Need to expose resource namespaceresource:pod/foo # Resource description, such as `pod/foo` or `service/foo`port:8080# Resource port want to expose, support `\u0026lt;resource_port\u0026gt;`, `\u0026lt;bind_to_host_port\u0026gt;:\u0026lt;resource_port\u0026gt;` Follow this format to get the host and port mapping by the environment, and it\u0026rsquo;s available in steps(trigger, verify). trigger:# trigger with specified mapped port, the resource name replace all `/` or `-` as `_`# host format: \u0026lt;resource_name\u0026gt;_host# port format: \u0026lt;resource_name\u0026gt;_\u0026lt;container_port\u0026gt;url:http://${pod_foo_host}:${pod_foo_8080}/  Log The console output of each pod could be found in ${workDir}/logs/${namespace}/${podName}.log.\nCompose setup:env:composefile:path/to/compose.yaml # Specified docker-compose file pathtimeout:20m # Timeout durationinit-system-environment:path/to/env # Import environment filesteps:# Customize steps for prepare the environment- name:customize setups # Step namecommand:command lines # Use command line to setup The docker-compose environment follow these steps:\n Import init-system-environment file for help build service and execute steps. Each line of the file content is an environment variable, and the key value is separate by \u0026ldquo;=\u0026rdquo;. Start the docker-compose services. Check the services' healthiness. Wait until all services are ready according to the interval, etc. Execute command to set up the testing environment or help verify.  Service Export If you want to get the service host and port mapping, should follow these steps:\n declare the port in the docker-compose service ports config. oap:image:xx.xx:1.0.0ports:# define the port- 8080 Follow this format to get the host and port mapping by the environment, and it\u0026rsquo;s available in steps(trigger, verify). trigger:# trigger with specified mappinged porturl:http://${oap_host}:${oap_8080}/  Log The console output of each service could be found in ${workDir}/logs/{serviceName}/std.log.\nTrigger After the Setup step is finished, use the Trigger step to generate traffic.\ntrigger:action:http # The action of the trigger. support HTTP invoke.interval:3s # Trigger the action every 3 seconds.times:5# The retry count before the request success.url:http://apache.skywalking.com/# Http trigger url link.method:GET # Http trigger method.headers:\u0026#34;Content-Type\u0026#34;: \u0026#34;application/json\u0026#34;\u0026#34;Authorization\u0026#34;: \u0026#34;Basic whatever\u0026#34;body:\u0026#39;{\u0026#34;k1\u0026#34;:\u0026#34;v1\u0026#34;, \u0026#34;k2\u0026#34;:\u0026#34;v2\u0026#34;}\u0026#39;The Trigger executed successfully at least once, after success, the next stage could be continued. Otherwise, there is an error and exit.\nVerify After the Trigger step is finished, running test cases.\nverify:retry:# verify with retry strategycount:10# max retry countinterval:10s # the interval between two attempts, e.g. 10s, 1m.fail-fast:true# when a case fails, whether to stop verifying other cases. This property defaults to true.concurrency:false# whether to verify cases concurrently. This property defaults to false.cases:# verify test cases- actual:path/to/actual.yaml # verify by actual file pathexpected:path/to/expected.yaml # excepted content file path- query:echo \u0026#39;foo\u0026#39; # verify by command execute outputexpected:path/to/expected.yaml # excepted content file path- includes:# including cases- path/to/cases.yaml # cases file pathThe test cases are executed in the order of declaration from top to bottom. When the execution of a case fails and the retry strategy is exceeded, it will stop verifying other cases if fail-fast is true. Otherwise, the process will continue to verify other cases.\nRetry strategy The retry strategy could retry automatically on the test case failure, and restart by the failed test case.\nCase source Support two kind source to verify, one case only supports one kind source type:\n source file: verify by generated yaml format file. command: use command line output as they need to verify content, also only support yaml format.  Excepted verify template After clarifying the content that needs to be verified, you need to write content to verify the real content and ensure that the data is correct.\nYou need to use the form of Go Template to write the verification file, and the data content to be rendered comes from the real data. By verifying whether the rendered data is consistent with the real data, it is verified whether the content is consistent. You could see many test cases in this directory.\nWe use go-cmp to show the parts where excepted do not match the actual data. - prefix represents the expected data content, + prefix represents the actual data content.\nWe have done a lot of extension functions for verification functions on the original Go Template.\nExtension functions Extension functions are used to help users quickly locate the problem content and write test cases that are easier to use.\nBasic Matches Verify that the number fits the range.\n   Function Description Grammar Verify success Verify failure     gt Verify the first param is greater than second param {{gt param1 param2}} param1 \u0026lt;wanted gt $param2, but was $param1\u0026gt;   ge Verify the first param is greater than or equals second param {{ge param1 param2}} param1 \u0026lt;wanted gt $param2, but was $param1\u0026gt;   lt Verify the first param is less than second param {{lt param1 param2}} param1 \u0026lt;wanted gt $param2, but was $param1\u0026gt;   le Verify the first param is less than or equals second param {{le param1 param2}} param1 \u0026lt;wanted gt $param2, but was $param1\u0026gt;   regexp Verify the first param matches the second regular expression {{regexp param1 param2}} param1 \u0026lt;\u0026quot;$param1\u0026quot; does not match the pattern $param2\u0026quot;\u0026gt;   notEmpty Verify The param is not empty {{notEmpty param}} param \u0026lt;\u0026quot;\u0026quot; is empty, wanted is not empty\u0026gt;   hasPrefix Verify The string param has the same prefix. {{hasPrefix param1 param2}} true false   hasSuffix Verify The string param has the same suffix. {{hasSuffix param1 param2}} true false    List Matches Verify the data in the condition list, Currently, it is only supported when all the conditions in the list are executed, it is considered as successful.\nHere is an example, It\u0026rsquo;s means the list values must have value is greater than 0, also have value greater than 1, Otherwise verify is failure.\n{{- contains .list }}- key:{{gt .value 0 }}- key:{{gt .value 1 }}{{- end }}Encoding In order to make the program easier for users to read and use, some code conversions are provided.\n   Function Description Grammar Result     b64enc Base64 encode {{ b64enc \u0026ldquo;Foo\u0026rdquo; }} Zm9v   sha256enc Sha256 encode {{ sha256enc \u0026ldquo;Foo\u0026rdquo; }} 1cbec737f863e4922cee63cc2ebbfaafcd1cff8b790d8cfd2e6a5d550b648afa   sha512enc Sha512 encode {{ sha512enc \u0026ldquo;Foo\u0026rdquo; }} 4abcd2639957cb23e33f63d70659b602a5923fafcfd2768ef79b0badea637e5c837161aa101a557a1d4deacbd912189e2bb11bf3c0c0c70ef7797217da7e8207    Reuse cases You could include multiple cases into one single E2E verify, It\u0026rsquo;s helpful for reusing the same verify cases.\nHere is the reused verify cases, and using includes configuration item to include this into E2E config.\ncases:- actual:path/to/actual.yaml # verify by actual file pathexpected:path/to/expected.yaml # excepted content file path- query:echo \u0026#39;foo\u0026#39; # verify by command execute outputexpected:path/to/expected.yaml # excepted content file pathCleanup After the E2E finished, how to clean up the environment.\ncleanup:on:always # Clean up strategyIf the on option under cleanup is not set, it will be automatically set to always if there is environment variable CI=true, which is present on many popular CI services, such as GitHub Actions, CircleCI, etc., otherwise it will be set to success, so the testing environment can be preserved when tests failed in your local machine.\nAll available strategies:\n always: No matter the execution result is success or failure, cleanup will be performed. success: Only when the execution succeeds. failure: Only when the execution failed. never: Never clean up the environment.  ","excerpt":"SkyWalking Infra E2E Configuration Guide The configuration file is used to integrate all the step …","ref":"/docs/skywalking-infra-e2e/next/en/setup/configuration-file/","title":"SkyWalking Infra E2E Configuration Guide"},{"body":"SkyWalking Infra E2E Configuration Guide The configuration file is used to integrate all the step configuration content. You can see the sample configuration files for different environments in the examples directory.\nThere is a quick view about the configuration file, and using the yaml format.\nsetup:# set up the environmentcleanup:# clean up the environmenttrigger:# generate trafficverify:# test casesSetup Support two kinds of the environment to set up the system.\nKinD setup:env:kindfile:path/to/kind.yaml # Specified kinD manifest file pathkubeconfig:path/.kube/config # The path of kubeconfigtimeout:20m # timeout durationinit-system-environment:path/to/env # Import environment filesteps:# customize steps for prepare the environment- name:customize setups # step name# one of command line or kinD manifest filecommand:command lines # use command line to setup path:/path/to/manifest.yaml # the manifest file pathwait:# how to verify the manifest is set up finish- namespace:# The pod namespaceresource:# The pod resource namelabel-selector:# The resource label selectorfor:# The wait conditionkind:import-images:# import docker images to KinD- image:version # support using env to expand image, such as `${env_key}` or `$env_key`expose-ports:# Expose resource for host access- namespace:# The resource namespaceresource:# The resource name, such as `pod/foo` or `service/foo`port:# Want to expose port from resource NOTE: The fields file and kubeconfig are mutually exclusive.\n The KinD environment follow these steps:\n [optional]Start the KinD cluster according to the config file, expose KUBECONFIG to environment for help execute kubectl in the next steps. [optional]Setup the kubeconfig field for help execute kubectl in the next steps. Load docker images from kind.import-images if needed. Apply the resources files (--manifests) or/and run the custom init command (--commands) by steps. Wait until all steps are finished and all services are ready with the timeout(second). Expose all resource ports for host access.  Import docker image If you want to import docker image from private registries, there are several ways to do this:\n Using imagePullSecrets to pull images, please take reference from document. Using kind.import-images to load images from host. kind:import-images:- skywalking/oap:${OAP_HASH}# support using environment to expand the image name  Resource Export If you want to access the resource from host, should follow these steps:\n Declare which resource and ports need to be accessible from host. setup:kind:expose-ports:- namespace:default # Need to expose resource namespaceresource:pod/foo # Resource description, such as `pod/foo` or `service/foo`port:8080# Resource port want to expose, support `\u0026lt;resource_port\u0026gt;`, `\u0026lt;bind_to_host_port\u0026gt;:\u0026lt;resource_port\u0026gt;` Follow this format to get the host and port mapping by the environment, and it\u0026rsquo;s available in steps(trigger, verify). trigger:# trigger with specified mapped port, the resource name replace all `/` or `-` as `_`# host format: \u0026lt;resource_name\u0026gt;_host# port format: \u0026lt;resource_name\u0026gt;_\u0026lt;container_port\u0026gt;url:http://${pod_foo_host}:${pod_foo_8080}/  Log The console output of each pod could be found in ${workDir}/logs/${namespace}/${podName}.log.\nCompose setup:env:composefile:path/to/compose.yaml # Specified docker-compose file pathtimeout:20m # Timeout durationinit-system-environment:path/to/env # Import environment filesteps:# Customize steps for prepare the environment- name:customize setups # Step namecommand:command lines # Use command line to setup The docker-compose environment follow these steps:\n Import init-system-environment file for help build service and execute steps. Each line of the file content is an environment variable, and the key value is separate by \u0026ldquo;=\u0026rdquo;. Start the docker-compose services. Check the services' healthiness. Wait until all services are ready according to the interval, etc. Execute command to set up the testing environment or help verify.  Service Export If you want to get the service host and port mapping, should follow these steps:\n declare the port in the docker-compose service ports config. oap:image:xx.xx:1.0.0ports:# define the port- 8080 Follow this format to get the host and port mapping by the environment, and it\u0026rsquo;s available in steps(trigger, verify). trigger:# trigger with specified mappinged porturl:http://${oap_host}:${oap_8080}/  Log The console output of each service could be found in ${workDir}/logs/{serviceName}/std.log.\nTrigger After the Setup step is finished, use the Trigger step to generate traffic.\ntrigger:action:http # The action of the trigger. support HTTP invoke.interval:3s # Trigger the action every 3 seconds.times:5# The retry count before the request success.url:http://apache.skywalking.com/# Http trigger url link.method:GET # Http trigger method.headers:\u0026#34;Content-Type\u0026#34;: \u0026#34;application/json\u0026#34;\u0026#34;Authorization\u0026#34;: \u0026#34;Basic whatever\u0026#34;body:\u0026#39;{\u0026#34;k1\u0026#34;:\u0026#34;v1\u0026#34;, \u0026#34;k2\u0026#34;:\u0026#34;v2\u0026#34;}\u0026#39;The Trigger executed successfully at least once, after success, the next stage could be continued. Otherwise, there is an error and exit.\nVerify After the Trigger step is finished, running test cases.\nverify:retry:# verify with retry strategycount:10# max retry countinterval:10s # the interval between two attempts, e.g. 10s, 1m.fail-fast:true# when a case fails, whether to stop verifying other cases. This property defaults to true.concurrency:false# whether to verify cases concurrently. This property defaults to false.cases:# verify test cases- actual:path/to/actual.yaml # verify by actual file pathexpected:path/to/expected.yaml # excepted content file path- query:echo \u0026#39;foo\u0026#39; # verify by command execute outputexpected:path/to/expected.yaml # excepted content file path- includes:# including cases- path/to/cases.yaml # cases file pathThe test cases are executed in the order of declaration from top to bottom. When the execution of a case fails and the retry strategy is exceeded, it will stop verifying other cases if fail-fast is true. Otherwise, the process will continue to verify other cases.\nRetry strategy The retry strategy could retry automatically on the test case failure, and restart by the failed test case.\nCase source Support two kind source to verify, one case only supports one kind source type:\n source file: verify by generated yaml format file. command: use command line output as they need to verify content, also only support yaml format.  Excepted verify template After clarifying the content that needs to be verified, you need to write content to verify the real content and ensure that the data is correct.\nYou need to use the form of Go Template to write the verification file, and the data content to be rendered comes from the real data. By verifying whether the rendered data is consistent with the real data, it is verified whether the content is consistent. You could see many test cases in this directory.\nWe use go-cmp to show the parts where excepted do not match the actual data. - prefix represents the expected data content, + prefix represents the actual data content.\nWe have done a lot of extension functions for verification functions on the original Go Template.\nExtension functions Extension functions are used to help users quickly locate the problem content and write test cases that are easier to use.\nBasic Matches Verify that the number fits the range.\n   Function Description Grammar Verify success Verify failure     gt Verify the first param is greater than second param {{gt param1 param2}} param1 \u0026lt;wanted gt $param2, but was $param1\u0026gt;   ge Verify the first param is greater than or equals second param {{ge param1 param2}} param1 \u0026lt;wanted gt $param2, but was $param1\u0026gt;   lt Verify the first param is less than second param {{lt param1 param2}} param1 \u0026lt;wanted gt $param2, but was $param1\u0026gt;   le Verify the first param is less than or equals second param {{le param1 param2}} param1 \u0026lt;wanted gt $param2, but was $param1\u0026gt;   regexp Verify the first param matches the second regular expression {{regexp param1 param2}} param1 \u0026lt;\u0026quot;$param1\u0026quot; does not match the pattern $param2\u0026quot;\u0026gt;   notEmpty Verify The param is not empty {{notEmpty param}} param \u0026lt;\u0026quot;\u0026quot; is empty, wanted is not empty\u0026gt;   hasPrefix Verify The string param has the same prefix. {{hasPrefix param1 param2}} true false   hasSuffix Verify The string param has the same suffix. {{hasSuffix param1 param2}} true false    List Matches Verify the data in the condition list, Currently, it is only supported when all the conditions in the list are executed, it is considered as successful.\nHere is an example, It\u0026rsquo;s means the list values must have value is greater than 0, also have value greater than 1, Otherwise verify is failure.\n{{- contains .list }}- key:{{gt .value 0 }}- key:{{gt .value 1 }}{{- end }}Encoding In order to make the program easier for users to read and use, some code conversions are provided.\n   Function Description Grammar Result     b64enc Base64 encode {{ b64enc \u0026ldquo;Foo\u0026rdquo; }} Zm9v   sha256enc Sha256 encode {{ sha256enc \u0026ldquo;Foo\u0026rdquo; }} 1cbec737f863e4922cee63cc2ebbfaafcd1cff8b790d8cfd2e6a5d550b648afa   sha512enc Sha512 encode {{ sha512enc \u0026ldquo;Foo\u0026rdquo; }} 4abcd2639957cb23e33f63d70659b602a5923fafcfd2768ef79b0badea637e5c837161aa101a557a1d4deacbd912189e2bb11bf3c0c0c70ef7797217da7e8207    Reuse cases You could include multiple cases into one single E2E verify, It\u0026rsquo;s helpful for reusing the same verify cases.\nHere is the reused verify cases, and using includes configuration item to include this into E2E config.\ncases:- actual:path/to/actual.yaml # verify by actual file pathexpected:path/to/expected.yaml # excepted content file path- query:echo \u0026#39;foo\u0026#39; # verify by command execute outputexpected:path/to/expected.yaml # excepted content file pathCleanup After the E2E finished, how to clean up the environment.\ncleanup:on:always # Clean up strategyIf the on option under cleanup is not set, it will be automatically set to always if there is environment variable CI=true, which is present on many popular CI services, such as GitHub Actions, CircleCI, etc., otherwise it will be set to success, so the testing environment can be preserved when tests failed in your local machine.\nAll available strategies:\n always: No matter the execution result is success or failure, cleanup will be performed. success: Only when the execution succeeds. failure: Only when the execution failed. never: Never clean up the environment.  ","excerpt":"SkyWalking Infra E2E Configuration Guide The configuration file is used to integrate all the step …","ref":"/docs/skywalking-infra-e2e/v1.3.0/en/setup/configuration-file/","title":"SkyWalking Infra E2E Configuration Guide"},{"body":"SkyWalking Infra E2E Execute Guide There are two ways to perform E2E Testing:\n Command: Suitable for local debugging and operation. GitHub Action: Suitable for automated execution in GitHub projects.  Command Through commands, you can execute a complete Controller.\n# e2e.yaml configuration file in current directory e2e run # or  # Specified the e2e.yaml file path e2e run -c /path/to/the/test/e2e.yaml Also, could run the separate step in the command line, these commands are all done by reading the configuration.\ne2e setup e2e trigger e2e verify e2e cleanup GitHub Action To use skywalking-infra-e2e in GitHub Actions, add a step in your GitHub workflow.\nThe working directory could be uploaded to GitHub Action Artifact after the task is completed, which contains environment variables and container logs in the environment.\n- name:Run E2E Testuses:apache/skywalking-infra-e2e@main # always prefer to use a revision instead of `main`.with:e2e-file:e2e.yaml # (required)need to run E2E file pathlog-dir:/path/to/log/dir # (Optional)Use `\u0026lt;work_dir\u0026gt;/logs/\u0026lt;job_name\u0026gt;_\u0026lt;matrix_value\u0026gt;`(if have GHA matrix) or `\u0026lt;work_dir\u0026gt;/logs/\u0026lt;job_name\u0026gt;` in GHA, and output logs into `\u0026lt;work_dir\u0026gt;/logs` out of GHA env, such as running locally.If you want to upload the log directory to the GitHub Action Artifact when this E2E test failure, you could define the below content in your GitHub Action Job.\n- name:Upload E2E Loguses:actions/upload-artifact@v2if:${{ failure() }} # Only upload the artifact when E2E testing failurewith:name:e2e-logpath:\u0026#34;${{ env.SW_INFRA_E2E_LOG_DIR }}\u0026#34;# The SkyWalking Infra E2E action sets SW_INFRA_E2E_LOG_DIR automatically. ","excerpt":"SkyWalking Infra E2E Execute Guide There are two ways to perform E2E Testing:\n Command: Suitable for …","ref":"/docs/skywalking-infra-e2e/latest/en/setup/run-e2e-tests/","title":"SkyWalking Infra E2E Execute Guide"},{"body":"SkyWalking Infra E2E Execute Guide There are two ways to perform E2E Testing:\n Command: Suitable for local debugging and operation. GitHub Action: Suitable for automated execution in GitHub projects.  Command Through commands, you can execute a complete Controller.\n# e2e.yaml configuration file in current directory e2e run # or  # Specified the e2e.yaml file path e2e run -c /path/to/the/test/e2e.yaml Also, could run the separate step in the command line, these commands are all done by reading the configuration.\ne2e setup e2e trigger e2e verify e2e cleanup GitHub Action To use skywalking-infra-e2e in GitHub Actions, add a step in your GitHub workflow.\nThe working directory could be uploaded to GitHub Action Artifact after the task is completed, which contains environment variables and container logs in the environment.\n- name:Run E2E Testuses:apache/skywalking-infra-e2e@main # always prefer to use a revision instead of `main`.with:e2e-file:e2e.yaml # (required)need to run E2E file pathlog-dir:/path/to/log/dir # (Optional)Use `\u0026lt;work_dir\u0026gt;/logs/\u0026lt;job_name\u0026gt;_\u0026lt;matrix_value\u0026gt;`(if have GHA matrix) or `\u0026lt;work_dir\u0026gt;/logs/\u0026lt;job_name\u0026gt;` in GHA, and output logs into `\u0026lt;work_dir\u0026gt;/logs` out of GHA env, such as running locally.If you want to upload the log directory to the GitHub Action Artifact when this E2E test failure, you could define the below content in your GitHub Action Job.\n- name:Upload E2E Loguses:actions/upload-artifact@v2if:${{ failure() }} # Only upload the artifact when E2E testing failurewith:name:e2e-logpath:\u0026#34;${{ env.SW_INFRA_E2E_LOG_DIR }}\u0026#34;# The SkyWalking Infra E2E action sets SW_INFRA_E2E_LOG_DIR automatically. ","excerpt":"SkyWalking Infra E2E Execute Guide There are two ways to perform E2E Testing:\n Command: Suitable for …","ref":"/docs/skywalking-infra-e2e/next/en/setup/run-e2e-tests/","title":"SkyWalking Infra E2E Execute Guide"},{"body":"SkyWalking Infra E2E Execute Guide There are two ways to perform E2E Testing:\n Command: Suitable for local debugging and operation. GitHub Action: Suitable for automated execution in GitHub projects.  Command Through commands, you can execute a complete Controller.\n# e2e.yaml configuration file in current directory e2e run # or  # Specified the e2e.yaml file path e2e run -c /path/to/the/test/e2e.yaml Also, could run the separate step in the command line, these commands are all done by reading the configuration.\ne2e setup e2e trigger e2e verify e2e cleanup GitHub Action To use skywalking-infra-e2e in GitHub Actions, add a step in your GitHub workflow.\nThe working directory could be uploaded to GitHub Action Artifact after the task is completed, which contains environment variables and container logs in the environment.\n- name:Run E2E Testuses:apache/skywalking-infra-e2e@main # always prefer to use a revision instead of `main`.with:e2e-file:e2e.yaml # (required)need to run E2E file pathlog-dir:/path/to/log/dir # (Optional)Use `\u0026lt;work_dir\u0026gt;/logs/\u0026lt;job_name\u0026gt;_\u0026lt;matrix_value\u0026gt;`(if have GHA matrix) or `\u0026lt;work_dir\u0026gt;/logs/\u0026lt;job_name\u0026gt;` in GHA, and output logs into `\u0026lt;work_dir\u0026gt;/logs` out of GHA env, such as running locally.If you want to upload the log directory to the GitHub Action Artifact when this E2E test failure, you could define the below content in your GitHub Action Job.\n- name:Upload E2E Loguses:actions/upload-artifact@v2if:${{ failure() }} # Only upload the artifact when E2E testing failurewith:name:e2e-logpath:\u0026#34;${{ env.SW_INFRA_E2E_LOG_DIR }}\u0026#34;# The SkyWalking Infra E2E action sets SW_INFRA_E2E_LOG_DIR automatically. ","excerpt":"SkyWalking Infra E2E Execute Guide There are two ways to perform E2E Testing:\n Command: Suitable for …","ref":"/docs/skywalking-infra-e2e/v1.3.0/en/setup/run-e2e-tests/","title":"SkyWalking Infra E2E Execute Guide"},{"body":"SkyWalking Java Agent This is the official documentation of SkyWalking Java agent. Welcome to the SkyWalking community!\nThe Java Agent for Apache SkyWalking, which provides the native tracing/metrics/logging/event abilities for Java projects.\nIn here, you could learn how to set up Java agent for the Java Runtime Envrionment services.\n","excerpt":"SkyWalking Java Agent This is the official documentation of SkyWalking Java agent. Welcome to the …","ref":"/docs/skywalking-java/latest/readme/","title":"SkyWalking Java Agent"},{"body":"SkyWalking Java Agent This is the official documentation of SkyWalking Java agent. Welcome to the SkyWalking community!\nThe Java Agent for Apache SkyWalking, which provides the native tracing/metrics/logging/event abilities for Java projects.\nIn here, you could learn how to set up Java agent for the Java Runtime Envrionment services.\n","excerpt":"SkyWalking Java Agent This is the official documentation of SkyWalking Java agent. Welcome to the …","ref":"/docs/skywalking-java/next/readme/","title":"SkyWalking Java Agent"},{"body":"SkyWalking Java Agent This is the official documentation of SkyWalking Java agent. Welcome to the SkyWalking community!\nThe Java Agent for Apache SkyWalking, which provides the native tracing/metrics/logging/event abilities for Java projects.\nIn here, you could learn how to set up Java agent for the Java Runtime Envrionment services.\n","excerpt":"SkyWalking Java Agent This is the official documentation of SkyWalking Java agent. Welcome to the …","ref":"/docs/skywalking-java/v9.0.0/readme/","title":"SkyWalking Java Agent"},{"body":"SkyWalking Java Agent This is the official documentation of SkyWalking Java agent. Welcome to the SkyWalking community!\nThe Java Agent for Apache SkyWalking, which provides the native tracing/metrics/logging/event abilities for Java projects.\nIn here, you could learn how to set up Java agent for the Java Runtime Envrionment services.\n","excerpt":"SkyWalking Java Agent This is the official documentation of SkyWalking Java agent. Welcome to the …","ref":"/docs/skywalking-java/v9.1.0/readme/","title":"SkyWalking Java Agent"},{"body":"SkyWalking Java Agent This is the official documentation of SkyWalking Java agent. Welcome to the SkyWalking community!\nThe Java Agent for Apache SkyWalking, which provides the native tracing/metrics/logging/event abilities for Java projects.\nIn here, you could learn how to set up Java agent for the Java Runtime Envrionment services.\n","excerpt":"SkyWalking Java Agent This is the official documentation of SkyWalking Java agent. Welcome to the …","ref":"/docs/skywalking-java/v9.2.0/readme/","title":"SkyWalking Java Agent"},{"body":"Apache SkyWalking Java Agent Release Guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking in The Apache Way and start the voting process by reading this document.\nSet up your development environment Follow the steps in the Apache maven deployment environment document to set gpg tool and encrypt passwords.\nUse the following block as a template and place it in ~/.m2/settings.xml.\n\u0026lt;settings\u0026gt; ... \u0026lt;servers\u0026gt; \u0026lt;!-- To publish a snapshot of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.snapshots.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; \u0026lt;!-- To stage a release of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.releases.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; ... \u0026lt;/servers\u0026gt; \u0026lt;/settings\u0026gt; Add your GPG public key  Add your GPG public key into the SkyWalking GPG KEYS file. If you are a committer, use your Apache ID and password to log in this svn, and update the file. Don\u0026rsquo;t override the existing file. Upload your GPG public key to the public GPG site, such as MIT\u0026rsquo;s site. This site should be in the Apache maven staging repository checklist.  Test your settings This step is only for testing purpose. If your env is correctly set, you don\u0026rsquo;t need to check every time.\n./mvnw clean install(this will build artifacts, sources and sign) Prepare for the release ./mvnw release:clean ./mvnw release:prepare -DautoVersionSubmodules=true -Pall  Set version number as x.y.z, and tag as vx.y.z (The version tag must start with v. You will find out why this is necessary in the next step.)  You could do a GPG signature before preparing for the release. If you need to input the password to sign, and the maven doesn\u0026rsquo;t provide you with the opportunity to do so, this may lead to failure of the release. To resolve this, you may run gpg --sign xxx in any file. This will allow it to remember the password for long enough to prepare for the release.\nStage the release ./mvnw release:perform -DskipTests -Pall  The release will be automatically inserted into a temporary staging repository.  Build and sign the source code and binary package export RELEASE_VERSION=x.y.z (example: RELEASE_VERSION=5.0.0-alpha) cd tools/releasing bash create_release.sh This script takes care of the following things:\n Use v + RELEASE_VERSION as tag to clone the codes. Complete git submodule init/update. Exclude all unnecessary files in the target source tar, such as .git, .github, and .gitmodules. See the script for more details. Execute gpg and shasum 512 for source code tar. Use maven package to build the agent tar. Execute gpg and shasum 512 for binary tar.  apache-skywalking-java-agent-x.y.z-src.tgz and files ending with .asc and .sha512 may be found in the tools/releasing folder. apache-skywalking-java-agent-x.y.z.tgz and files ending with .asc and .sha512 may be found in the tools/releasing/apache-skywalking-java-agent-x.y.z folder.\nUpload to Apache svn  Use your Apache ID to log in to https://dist.apache.org/repos/dist/dev/skywalking/java-agent/. Create a folder and name it by the release version and round, such as: x.y.z Upload the source code package to the folder with files ending with .asc and .sha512. Upload the distribution package to the folder with files ending with .asc and .sha512.  Make the internal announcements Send an announcement mail in dev mail list.\nMail title: [ANNOUNCE] SkyWalking Java Agent x.y.z test build available Mail content: The test build of Java Agent x.y.z is available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking-java/blob/master/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/java-agent/xxxx * sha512 checksums Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking-java/ Release Tag : * (Git Tag) x.y.z Release CommitID : * https://github.com/apache/skywalking-java/tree/(Git Commit ID) * Git submodule * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : \u0026gt; ./mvnw clean package A vote regarding the quality of this test build will be initiated within the next couple of days. Wait for at least 48 hours for test responses Any PMC member, committer or contributor can test the release features and provide feedback. Based on that, the PMC will decide whether to start the voting process.\nCall a vote in dev Call a vote in dev@skywalking.apache.org\nMail title: [VOTE] Release Apache SkyWalking Java Agent version x.y.z Mail content: Hi All, This is a call for vote to release Apache SkyWalking Java Agent version x.y.z. Release notes: * https://github.com/apache/skywalking-java/blob/master/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/java-agent/xxxx * sha512 checksums Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) x.y.z Release CommitID : * https://github.com/apache/skywalking-java/tree/(Git Commit ID) * Git submodule * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : \u0026gt; ./mvnw clean package Voting will start now (xxxx date) and will remain open for at least 72 hours, Request all PMC members to give their vote. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Vote Check All PMC members and committers should check these before casting +1 votes.\n Features test. All artifacts in staging repository are published with .asc, .md5, and *sha1 files. Source code and distribution package (apache-skywalking-java-agent-x.y.z-src.tar.gz, apache-skywalking-java-agent-x.y.z.tar.gz) are found in https://dist.apache.org/repos/dist/dev/skywalking/java-agent/x.y.z with .asc and .sha512. LICENSE and NOTICE are in the source code and distribution package. Check shasum -c apache-skywalking-java-agent-x.y.z-src.tgz.sha512. Check gpg --verify apache-skywalking-java-agent-x.y.z-src.tgz.asc apache-skywalking-apm-x.y.z-src.tgz Build a distribution package from the source code package (apache-skywalking-java-agent-x.y.z-src.tar.gz). Check the Apache License Header. Run docker run --rm -v $(pwd):/github/workspace apache/skywalking-eyes header check. (No binaries in source codes)  The voting process is as follows:\n All PMC member votes are +1 binding, and all other votes are +1 but non-binding. If you obtain at least 3 (+1 binding) votes with more +1 than -1 votes within 72 hours, the release will be approved.  Publish the release  Move source codes tar and distribution packages to https://dist.apache.org/repos/dist/release/skywalking/java-agent/.  \u0026gt; export SVN_EDITOR=vim \u0026gt; svn mv https://dist.apache.org/repos/dist/dev/skywalking/java-agent/x.y.z https://dist.apache.org/repos/dist/release/skywalking/java-agent .... enter your apache password .... Release in the nexus staging repo. Public download source and distribution tar/zip are located in http://www.apache.org/dyn/closer.cgi/skywalking/java-agent/x.y.z/xxx. The Apache mirror path is the only release information that we publish. Public asc and sha512 are located in https://www.apache.org/dist/skywalking/java-agent/x.y.z/xxx. Public KEYS point to https://www.apache.org/dist/skywalking/KEYS. Update the website download page. http://skywalking.apache.org/downloads/ . Add a new download source, distribution, sha512, asc, and document links. The links can be found following rules (3) to (6) above. Add a release event on the website homepage and event page. Announce the public release with changelog or key features. Send ANNOUNCE email to dev@skywalking.apache.org, announce@apache.org. The sender should use the Apache email account.  Mail title: [ANNOUNCE] Apache SkyWalking Java Agent x.y.z released Mail content: Hi all, Apache SkyWalking Team is glad to announce the first release of Apache SkyWalking Java Agent x.y.z. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. The Java Agent for Apache SkyWalking, which provides the native tracing/metrics/logging abilities for Java projects. This release contains a number of new features, bug fixes and improvements compared to version a.b.c(last release). The notable changes since x.y.z include: (Highlight key changes) 1. ... 2. ... 3. ... Please refer to the change log for the complete list of changes: https://github.com/apache/skywalking-java/blob/master/changes/changes-x.y.z.md Apache SkyWalking website: http://skywalking.apache.org/ Downloads: http://skywalking.apache.org/downloads/ Twitter: https://twitter.com/AsfSkyWalking SkyWalking Resources: - GitHub: https://github.com/apache/skywalking-java - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Apache SkyWalking Team Release Docker images export SW_VERSION=x.y.z git clone --depth 1 --branch v$SW_VERSION https://github.com/apache/skywalking-java.git cd skywalking-java curl -O https://dist.apache.org/repos/dist/release/skywalking/java-agent/$SW_VERSION/apache-skywalking-java-agent-$SW_VERSION.tgz tar -xzvf apache-skywalking-java-agent-$SW_VERSION.tgz export NAME=skywalking-java-agent export HUB=apache export TAG=$SW_VERSION make docker.push.alpine docker.push.java8 docker.push.java11 docker.push.java17 docker.push.java21 Clean up the old releases Once the latest release has been published, you should clean up the old releases from the mirror system.\n Update the download links (source, dist, asc, and sha512) on the website to the archive repo (https://archive.apache.org/dist/skywalking). Remove previous releases from https://dist.apache.org/repos/dist/release/skywalking/java-agent.  ","excerpt":"Apache SkyWalking Java Agent Release Guide If you\u0026rsquo;re a committer, you can learn how to release …","ref":"/docs/skywalking-java/latest/en/contribution/release-java-agent/","title":"SkyWalking Java Agent Release Guide"},{"body":"Apache SkyWalking Java Agent Release Guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking in The Apache Way and start the voting process by reading this document.\nSet up your development environment Follow the steps in the Apache maven deployment environment document to set gpg tool and encrypt passwords.\nUse the following block as a template and place it in ~/.m2/settings.xml.\n\u0026lt;settings\u0026gt; ... \u0026lt;servers\u0026gt; \u0026lt;!-- To publish a snapshot of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.snapshots.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; \u0026lt;!-- To stage a release of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.releases.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; ... \u0026lt;/servers\u0026gt; \u0026lt;/settings\u0026gt; Add your GPG public key  Add your GPG public key into the SkyWalking GPG KEYS file. If you are a committer, use your Apache ID and password to log in this svn, and update the file. Don\u0026rsquo;t override the existing file. Upload your GPG public key to the public GPG site, such as MIT\u0026rsquo;s site. This site should be in the Apache maven staging repository checklist.  Test your settings This step is only for testing purpose. If your env is correctly set, you don\u0026rsquo;t need to check every time.\n./mvnw clean install(this will build artifacts, sources and sign) Prepare for the release ./mvnw release:clean ./mvnw release:prepare -DautoVersionSubmodules=true -Pall  Set version number as x.y.z, and tag as vx.y.z (The version tag must start with v. You will find out why this is necessary in the next step.)  You could do a GPG signature before preparing for the release. If you need to input the password to sign, and the maven doesn\u0026rsquo;t provide you with the opportunity to do so, this may lead to failure of the release. To resolve this, you may run gpg --sign xxx in any file. This will allow it to remember the password for long enough to prepare for the release.\nStage the release ./mvnw release:perform -DskipTests -Pall  The release will be automatically inserted into a temporary staging repository.  Build and sign the source code and binary package export RELEASE_VERSION=x.y.z (example: RELEASE_VERSION=5.0.0-alpha) cd tools/releasing bash create_release.sh This script takes care of the following things:\n Use v + RELEASE_VERSION as tag to clone the codes. Complete git submodule init/update. Exclude all unnecessary files in the target source tar, such as .git, .github, and .gitmodules. See the script for more details. Execute gpg and shasum 512 for source code tar. Use maven package to build the agent tar. Execute gpg and shasum 512 for binary tar.  apache-skywalking-java-agent-x.y.z-src.tgz and files ending with .asc and .sha512 may be found in the tools/releasing folder. apache-skywalking-java-agent-x.y.z.tgz and files ending with .asc and .sha512 may be found in the tools/releasing/apache-skywalking-java-agent-x.y.z folder.\nUpload to Apache svn  Use your Apache ID to log in to https://dist.apache.org/repos/dist/dev/skywalking/java-agent/. Create a folder and name it by the release version and round, such as: x.y.z Upload the source code package to the folder with files ending with .asc and .sha512. Upload the distribution package to the folder with files ending with .asc and .sha512.  Make the internal announcements Send an announcement mail in dev mail list.\nMail title: [ANNOUNCE] SkyWalking Java Agent x.y.z test build available Mail content: The test build of Java Agent x.y.z is available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking-java/blob/master/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/java-agent/xxxx * sha512 checksums Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking-java/ Release Tag : * (Git Tag) x.y.z Release CommitID : * https://github.com/apache/skywalking-java/tree/(Git Commit ID) * Git submodule * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : \u0026gt; ./mvnw clean package A vote regarding the quality of this test build will be initiated within the next couple of days. Wait for at least 48 hours for test responses Any PMC member, committer or contributor can test the release features and provide feedback. Based on that, the PMC will decide whether to start the voting process.\nCall a vote in dev Call a vote in dev@skywalking.apache.org\nMail title: [VOTE] Release Apache SkyWalking Java Agent version x.y.z Mail content: Hi All, This is a call for vote to release Apache SkyWalking Java Agent version x.y.z. Release notes: * https://github.com/apache/skywalking-java/blob/master/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/java-agent/xxxx * sha512 checksums Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) x.y.z Release CommitID : * https://github.com/apache/skywalking-java/tree/(Git Commit ID) * Git submodule * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : \u0026gt; ./mvnw clean package Voting will start now (xxxx date) and will remain open for at least 72 hours, Request all PMC members to give their vote. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Vote Check All PMC members and committers should check these before casting +1 votes.\n Features test. All artifacts in staging repository are published with .asc, .md5, and *sha1 files. Source code and distribution package (apache-skywalking-java-agent-x.y.z-src.tar.gz, apache-skywalking-java-agent-x.y.z.tar.gz) are found in https://dist.apache.org/repos/dist/dev/skywalking/java-agent/x.y.z with .asc and .sha512. LICENSE and NOTICE are in the source code and distribution package. Check shasum -c apache-skywalking-java-agent-x.y.z-src.tgz.sha512. Check gpg --verify apache-skywalking-java-agent-x.y.z-src.tgz.asc apache-skywalking-apm-x.y.z-src.tgz Build a distribution package from the source code package (apache-skywalking-java-agent-x.y.z-src.tar.gz). Check the Apache License Header. Run docker run --rm -v $(pwd):/github/workspace apache/skywalking-eyes header check. (No binaries in source codes)  The voting process is as follows:\n All PMC member votes are +1 binding, and all other votes are +1 but non-binding. If you obtain at least 3 (+1 binding) votes with more +1 than -1 votes within 72 hours, the release will be approved.  Publish the release  Move source codes tar and distribution packages to https://dist.apache.org/repos/dist/release/skywalking/java-agent/.  \u0026gt; export SVN_EDITOR=vim \u0026gt; svn mv https://dist.apache.org/repos/dist/dev/skywalking/java-agent/x.y.z https://dist.apache.org/repos/dist/release/skywalking/java-agent .... enter your apache password .... Release in the nexus staging repo. Public download source and distribution tar/zip are located in http://www.apache.org/dyn/closer.cgi/skywalking/java-agent/x.y.z/xxx. The Apache mirror path is the only release information that we publish. Public asc and sha512 are located in https://www.apache.org/dist/skywalking/java-agent/x.y.z/xxx. Public KEYS point to https://www.apache.org/dist/skywalking/KEYS. Update the website download page. http://skywalking.apache.org/downloads/ . Add a new download source, distribution, sha512, asc, and document links. The links can be found following rules (3) to (6) above. Add a release event on the website homepage and event page. Announce the public release with changelog or key features. Send ANNOUNCE email to dev@skywalking.apache.org, announce@apache.org. The sender should use the Apache email account.  Mail title: [ANNOUNCE] Apache SkyWalking Java Agent x.y.z released Mail content: Hi all, Apache SkyWalking Team is glad to announce the first release of Apache SkyWalking Java Agent x.y.z. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. The Java Agent for Apache SkyWalking, which provides the native tracing/metrics/logging abilities for Java projects. This release contains a number of new features, bug fixes and improvements compared to version a.b.c(last release). The notable changes since x.y.z include: (Highlight key changes) 1. ... 2. ... 3. ... Please refer to the change log for the complete list of changes: https://github.com/apache/skywalking-java/blob/master/changes/changes-x.y.z.md Apache SkyWalking website: http://skywalking.apache.org/ Downloads: http://skywalking.apache.org/downloads/ Twitter: https://twitter.com/AsfSkyWalking SkyWalking Resources: - GitHub: https://github.com/apache/skywalking-java - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Apache SkyWalking Team Release Docker images export SW_VERSION=x.y.z git clone --depth 1 --branch v$SW_VERSION https://github.com/apache/skywalking-java.git cd skywalking-java curl -O https://dist.apache.org/repos/dist/release/skywalking/java-agent/$SW_VERSION/apache-skywalking-java-agent-$SW_VERSION.tgz tar -xzvf apache-skywalking-java-agent-$SW_VERSION.tgz export NAME=skywalking-java-agent export HUB=apache export TAG=$SW_VERSION make docker.push.alpine docker.push.java8 docker.push.java11 docker.push.java17 docker.push.java21 Clean up the old releases Once the latest release has been published, you should clean up the old releases from the mirror system.\n Update the download links (source, dist, asc, and sha512) on the website to the archive repo (https://archive.apache.org/dist/skywalking). Remove previous releases from https://dist.apache.org/repos/dist/release/skywalking/java-agent.  ","excerpt":"Apache SkyWalking Java Agent Release Guide If you\u0026rsquo;re a committer, you can learn how to release …","ref":"/docs/skywalking-java/next/en/contribution/release-java-agent/","title":"SkyWalking Java Agent Release Guide"},{"body":"Apache SkyWalking Java Agent Release Guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking in The Apache Way and start the voting process by reading this document.\nSet up your development environment Follow the steps in the Apache maven deployment environment document to set gpg tool and encrypt passwords.\nUse the following block as a template and place it in ~/.m2/settings.xml.\n\u0026lt;settings\u0026gt; ... \u0026lt;servers\u0026gt; \u0026lt;!-- To publish a snapshot of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.snapshots.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; \u0026lt;!-- To stage a release of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.releases.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; ... \u0026lt;/servers\u0026gt; \u0026lt;/settings\u0026gt; Add your GPG public key  Add your GPG public key into the SkyWalking GPG KEYS file. If you are a committer, use your Apache ID and password to log in this svn, and update the file. Don\u0026rsquo;t override the existing file. Upload your GPG public key to the public GPG site, such as MIT\u0026rsquo;s site. This site should be in the Apache maven staging repository checklist.  Test your settings This step is only for testing purpose. If your env is correctly set, you don\u0026rsquo;t need to check every time.\n./mvnw clean install(this will build artifacts, sources and sign) Prepare for the release ./mvnw release:clean ./mvnw release:prepare -DautoVersionSubmodules=true -Pall  Set version number as x.y.z, and tag as vx.y.z (The version tag must start with v. You will find out why this is necessary in the next step.)  You could do a GPG signature before preparing for the release. If you need to input the password to sign, and the maven doesn\u0026rsquo;t provide you with the opportunity to do so, this may lead to failure of the release. To resolve this, you may run gpg --sign xxx in any file. This will allow it to remember the password for long enough to prepare for the release.\nStage the release ./mvnw release:perform -DskipTests -Pall  The release will be automatically inserted into a temporary staging repository.  Build and sign the source code and binary package export RELEASE_VERSION=x.y.z (example: RELEASE_VERSION=5.0.0-alpha) cd tools/releasing bash create_release.sh This script takes care of the following things:\n Use v + RELEASE_VERSION as tag to clone the codes. Complete git submodule init/update. Exclude all unnecessary files in the target source tar, such as .git, .github, and .gitmodules. See the script for more details. Execute gpg and shasum 512 for source code tar. Use maven package to build the agent tar. Execute gpg and shasum 512 for binary tar.  apache-skywalking-java-agent-x.y.z-src.tgz and files ending with .asc and .sha512 may be found in the tools/releasing folder. apache-skywalking-java-agent-x.y.z.tgz and files ending with .asc and .sha512 may be found in the tools/releasing/apache-skywalking-java-agent-x.y.z folder.\nUpload to Apache svn  Use your Apache ID to log in to https://dist.apache.org/repos/dist/dev/skywalking/java-agent/. Create a folder and name it by the release version and round, such as: x.y.z Upload the source code package to the folder with files ending with .asc and .sha512. Upload the distribution package to the folder with files ending with .asc and .sha512.  Make the internal announcements Send an announcement mail in dev mail list.\nMail title: [ANNOUNCE] SkyWalking Java Agent x.y.z test build available Mail content: The test build of Java Agent x.y.z is available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking-java/blob/master/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/java-agent/xxxx * sha512 checksums Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking-java/ Release Tag : * (Git Tag) x.y.z Release CommitID : * https://github.com/apache/skywalking-java/tree/(Git Commit ID) * Git submodule * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : \u0026gt; ./mvnw clean package A vote regarding the quality of this test build will be initiated within the next couple of days. Wait for at least 48 hours for test responses Any PMC member, committer or contributor can test the release features and provide feedback. Based on that, the PMC will decide whether to start the voting process.\nCall a vote in dev Call a vote in dev@skywalking.apache.org\nMail title: [VOTE] Release Apache SkyWalking Java Agent version x.y.z Mail content: Hi All, This is a call for vote to release Apache SkyWalking Java Agent version x.y.z. Release notes: * https://github.com/apache/skywalking-java/blob/master/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/java-agent/xxxx * sha512 checksums Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) x.y.z Release CommitID : * https://github.com/apache/skywalking-java/tree/(Git Commit ID) * Git submodule * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : \u0026gt; ./mvnw clean package Voting will start now (xxxx date) and will remain open for at least 72 hours, Request all PMC members to give their vote. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Vote Check All PMC members and committers should check these before casting +1 votes.\n Features test. All artifacts in staging repository are published with .asc, .md5, and *sha1 files. Source code and distribution package (apache-skywalking-java-agent-x.y.z-src.tar.gz, apache-skywalking-java-agent-x.y.z.tar.gz) are found in https://dist.apache.org/repos/dist/dev/skywalking/java-agent/x.y.z with .asc and .sha512. LICENSE and NOTICE are in the source code and distribution package. Check shasum -c apache-skywalking-java-agent-x.y.z-src.tgz.sha512. Check gpg --verify apache-skywalking-java-agent-x.y.z-src.tgz.asc apache-skywalking-apm-x.y.z-src.tgz Build a distribution package from the source code package (apache-skywalking-java-agent-x.y.z-src.tar.gz). Check the Apache License Header. Run docker run --rm -v $(pwd):/github/workspace apache/skywalking-eyes header check. (No binaries in source codes)  The voting process is as follows:\n All PMC member votes are +1 binding, and all other votes are +1 but non-binding. If you obtain at least 3 (+1 binding) votes with more +1 than -1 votes within 72 hours, the release will be approved.  Publish the release  Move source codes tar and distribution packages to https://dist.apache.org/repos/dist/release/skywalking/java-agent/.  \u0026gt; export SVN_EDITOR=vim \u0026gt; svn mv https://dist.apache.org/repos/dist/dev/skywalking/java-agent/x.y.z https://dist.apache.org/repos/dist/release/skywalking/java-agent .... enter your apache password .... Release in the nexus staging repo. Public download source and distribution tar/zip are located in http://www.apache.org/dyn/closer.cgi/skywalking/java-agent/x.y.z/xxx. The Apache mirror path is the only release information that we publish. Public asc and sha512 are located in https://www.apache.org/dist/skywalking/java-agent/x.y.z/xxx. Public KEYS point to https://www.apache.org/dist/skywalking/KEYS. Update the website download page. http://skywalking.apache.org/downloads/ . Add a new download source, distribution, sha512, asc, and document links. The links can be found following rules (3) to (6) above. Add a release event on the website homepage and event page. Announce the public release with changelog or key features. Send ANNOUNCE email to dev@skywalking.apache.org, announce@apache.org. The sender should use the Apache email account.  Mail title: [ANNOUNCE] Apache SkyWalking Java Agent x.y.z released Mail content: Hi all, Apache SkyWalking Team is glad to announce the first release of Apache SkyWalking Java Agent x.y.z. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. The Java Agent for Apache SkyWalking, which provides the native tracing/metrics/logging abilities for Java projects. This release contains a number of new features, bug fixes and improvements compared to version a.b.c(last release). The notable changes since x.y.z include: (Highlight key changes) 1. ... 2. ... 3. ... Please refer to the change log for the complete list of changes: https://github.com/apache/skywalking-java/blob/master/changes/changes-x.y.z.md Apache SkyWalking website: http://skywalking.apache.org/ Downloads: http://skywalking.apache.org/downloads/ Twitter: https://twitter.com/AsfSkyWalking SkyWalking Resources: - GitHub: https://github.com/apache/skywalking-java - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Apache SkyWalking Team Release Docker images export SW_VERSION=x.y.z git clone --depth 1 --branch v$SW_VERSION https://github.com/apache/skywalking-java.git cd skywalking-java curl -O https://dist.apache.org/repos/dist/release/skywalking/java-agent/$SW_VERSION/apache-skywalking-java-agent-$SW_VERSION.tgz tar -xzvf apache-skywalking-java-agent-$SW_VERSION.tgz export NAME=skywalking-java-agent export HUB=apache export TAG=$SW_VERSION make docker.push.alpine docker.push.java8 docker.push.java11 docker.push.java17 Clean up the old releases Once the latest release has been published, you should clean up the old releases from the mirror system.\n Update the download links (source, dist, asc, and sha512) on the website to the archive repo (https://archive.apache.org/dist/skywalking). Remove previous releases from https://dist.apache.org/repos/dist/release/skywalking/java-agent.  ","excerpt":"Apache SkyWalking Java Agent Release Guide If you\u0026rsquo;re a committer, you can learn how to release …","ref":"/docs/skywalking-java/v9.0.0/en/contribution/release-java-agent/","title":"SkyWalking Java Agent Release Guide"},{"body":"Apache SkyWalking Java Agent Release Guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking in The Apache Way and start the voting process by reading this document.\nSet up your development environment Follow the steps in the Apache maven deployment environment document to set gpg tool and encrypt passwords.\nUse the following block as a template and place it in ~/.m2/settings.xml.\n\u0026lt;settings\u0026gt; ... \u0026lt;servers\u0026gt; \u0026lt;!-- To publish a snapshot of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.snapshots.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; \u0026lt;!-- To stage a release of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.releases.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; ... \u0026lt;/servers\u0026gt; \u0026lt;/settings\u0026gt; Add your GPG public key  Add your GPG public key into the SkyWalking GPG KEYS file. If you are a committer, use your Apache ID and password to log in this svn, and update the file. Don\u0026rsquo;t override the existing file. Upload your GPG public key to the public GPG site, such as MIT\u0026rsquo;s site. This site should be in the Apache maven staging repository checklist.  Test your settings This step is only for testing purpose. If your env is correctly set, you don\u0026rsquo;t need to check every time.\n./mvnw clean install(this will build artifacts, sources and sign) Prepare for the release ./mvnw release:clean ./mvnw release:prepare -DautoVersionSubmodules=true -Pall  Set version number as x.y.z, and tag as vx.y.z (The version tag must start with v. You will find out why this is necessary in the next step.)  You could do a GPG signature before preparing for the release. If you need to input the password to sign, and the maven doesn\u0026rsquo;t provide you with the opportunity to do so, this may lead to failure of the release. To resolve this, you may run gpg --sign xxx in any file. This will allow it to remember the password for long enough to prepare for the release.\nStage the release ./mvnw release:perform -DskipTests -Pall  The release will be automatically inserted into a temporary staging repository.  Build and sign the source code and binary package export RELEASE_VERSION=x.y.z (example: RELEASE_VERSION=5.0.0-alpha) cd tools/releasing bash create_release.sh This script takes care of the following things:\n Use v + RELEASE_VERSION as tag to clone the codes. Complete git submodule init/update. Exclude all unnecessary files in the target source tar, such as .git, .github, and .gitmodules. See the script for more details. Execute gpg and shasum 512 for source code tar. Use maven package to build the agent tar. Execute gpg and shasum 512 for binary tar.  apache-skywalking-java-agent-x.y.z-src.tgz and files ending with .asc and .sha512 may be found in the tools/releasing folder. apache-skywalking-java-agent-x.y.z.tgz and files ending with .asc and .sha512 may be found in the tools/releasing/apache-skywalking-java-agent-x.y.z folder.\nUpload to Apache svn  Use your Apache ID to log in to https://dist.apache.org/repos/dist/dev/skywalking/java-agent/. Create a folder and name it by the release version and round, such as: x.y.z Upload the source code package to the folder with files ending with .asc and .sha512. Upload the distribution package to the folder with files ending with .asc and .sha512.  Make the internal announcements Send an announcement mail in dev mail list.\nMail title: [ANNOUNCE] SkyWalking Java Agent x.y.z test build available Mail content: The test build of Java Agent x.y.z is available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking-java/blob/master/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/java-agent/xxxx * sha512 checksums Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking-java/ Release Tag : * (Git Tag) x.y.z Release CommitID : * https://github.com/apache/skywalking-java/tree/(Git Commit ID) * Git submodule * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : \u0026gt; ./mvnw clean package A vote regarding the quality of this test build will be initiated within the next couple of days. Wait for at least 48 hours for test responses Any PMC member, committer or contributor can test the release features and provide feedback. Based on that, the PMC will decide whether to start the voting process.\nCall a vote in dev Call a vote in dev@skywalking.apache.org\nMail title: [VOTE] Release Apache SkyWalking Java Agent version x.y.z Mail content: Hi All, This is a call for vote to release Apache SkyWalking Java Agent version x.y.z. Release notes: * https://github.com/apache/skywalking-java/blob/master/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/java-agent/xxxx * sha512 checksums Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) x.y.z Release CommitID : * https://github.com/apache/skywalking-java/tree/(Git Commit ID) * Git submodule * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : \u0026gt; ./mvnw clean package Voting will start now (xxxx date) and will remain open for at least 72 hours, Request all PMC members to give their vote. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Vote Check All PMC members and committers should check these before casting +1 votes.\n Features test. All artifacts in staging repository are published with .asc, .md5, and *sha1 files. Source code and distribution package (apache-skywalking-java-agent-x.y.z-src.tar.gz, apache-skywalking-java-agent-x.y.z.tar.gz) are found in https://dist.apache.org/repos/dist/dev/skywalking/java-agent/x.y.z with .asc and .sha512. LICENSE and NOTICE are in the source code and distribution package. Check shasum -c apache-skywalking-java-agent-x.y.z-src.tgz.sha512. Check gpg --verify apache-skywalking-java-agent-x.y.z-src.tgz.asc apache-skywalking-apm-x.y.z-src.tgz Build a distribution package from the source code package (apache-skywalking-java-agent-x.y.z-src.tar.gz). Check the Apache License Header. Run docker run --rm -v $(pwd):/github/workspace apache/skywalking-eyes header check. (No binaries in source codes)  The voting process is as follows:\n All PMC member votes are +1 binding, and all other votes are +1 but non-binding. If you obtain at least 3 (+1 binding) votes with more +1 than -1 votes within 72 hours, the release will be approved.  Publish the release  Move source codes tar and distribution packages to https://dist.apache.org/repos/dist/release/skywalking/java-agent/.  \u0026gt; export SVN_EDITOR=vim \u0026gt; svn mv https://dist.apache.org/repos/dist/dev/skywalking/java-agent/x.y.z https://dist.apache.org/repos/dist/release/skywalking/java-agent .... enter your apache password .... Release in the nexus staging repo. Public download source and distribution tar/zip are located in http://www.apache.org/dyn/closer.cgi/skywalking/java-agent/x.y.z/xxx. The Apache mirror path is the only release information that we publish. Public asc and sha512 are located in https://www.apache.org/dist/skywalking/java-agent/x.y.z/xxx. Public KEYS point to https://www.apache.org/dist/skywalking/KEYS. Update the website download page. http://skywalking.apache.org/downloads/ . Add a new download source, distribution, sha512, asc, and document links. The links can be found following rules (3) to (6) above. Add a release event on the website homepage and event page. Announce the public release with changelog or key features. Send ANNOUNCE email to dev@skywalking.apache.org, announce@apache.org. The sender should use the Apache email account.  Mail title: [ANNOUNCE] Apache SkyWalking Java Agent x.y.z released Mail content: Hi all, Apache SkyWalking Team is glad to announce the first release of Apache SkyWalking Java Agent x.y.z. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. The Java Agent for Apache SkyWalking, which provides the native tracing/metrics/logging abilities for Java projects. This release contains a number of new features, bug fixes and improvements compared to version a.b.c(last release). The notable changes since x.y.z include: (Highlight key changes) 1. ... 2. ... 3. ... Please refer to the change log for the complete list of changes: https://github.com/apache/skywalking-java/blob/master/changes/changes-x.y.z.md Apache SkyWalking website: http://skywalking.apache.org/ Downloads: http://skywalking.apache.org/downloads/ Twitter: https://twitter.com/AsfSkyWalking SkyWalking Resources: - GitHub: https://github.com/apache/skywalking-java - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Apache SkyWalking Team Release Docker images export SW_VERSION=x.y.z git clone --depth 1 --branch v$SW_VERSION https://github.com/apache/skywalking-java.git cd skywalking-java curl -O https://dist.apache.org/repos/dist/release/skywalking/java-agent/$SW_VERSION/apache-skywalking-java-agent-$SW_VERSION.tgz tar -xzvf apache-skywalking-java-agent-$SW_VERSION.tgz export NAME=skywalking-java-agent export HUB=apache export TAG=$SW_VERSION make docker.push.alpine docker.push.java8 docker.push.java11 docker.push.java17 Clean up the old releases Once the latest release has been published, you should clean up the old releases from the mirror system.\n Update the download links (source, dist, asc, and sha512) on the website to the archive repo (https://archive.apache.org/dist/skywalking). Remove previous releases from https://dist.apache.org/repos/dist/release/skywalking/java-agent.  ","excerpt":"Apache SkyWalking Java Agent Release Guide If you\u0026rsquo;re a committer, you can learn how to release …","ref":"/docs/skywalking-java/v9.1.0/en/contribution/release-java-agent/","title":"SkyWalking Java Agent Release Guide"},{"body":"Apache SkyWalking Java Agent Release Guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking in The Apache Way and start the voting process by reading this document.\nSet up your development environment Follow the steps in the Apache maven deployment environment document to set gpg tool and encrypt passwords.\nUse the following block as a template and place it in ~/.m2/settings.xml.\n\u0026lt;settings\u0026gt; ... \u0026lt;servers\u0026gt; \u0026lt;!-- To publish a snapshot of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.snapshots.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; \u0026lt;!-- To stage a release of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.releases.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; ... \u0026lt;/servers\u0026gt; \u0026lt;/settings\u0026gt; Add your GPG public key  Add your GPG public key into the SkyWalking GPG KEYS file. If you are a committer, use your Apache ID and password to log in this svn, and update the file. Don\u0026rsquo;t override the existing file. Upload your GPG public key to the public GPG site, such as MIT\u0026rsquo;s site. This site should be in the Apache maven staging repository checklist.  Test your settings This step is only for testing purpose. If your env is correctly set, you don\u0026rsquo;t need to check every time.\n./mvnw clean install(this will build artifacts, sources and sign) Prepare for the release ./mvnw release:clean ./mvnw release:prepare -DautoVersionSubmodules=true -Pall  Set version number as x.y.z, and tag as vx.y.z (The version tag must start with v. You will find out why this is necessary in the next step.)  You could do a GPG signature before preparing for the release. If you need to input the password to sign, and the maven doesn\u0026rsquo;t provide you with the opportunity to do so, this may lead to failure of the release. To resolve this, you may run gpg --sign xxx in any file. This will allow it to remember the password for long enough to prepare for the release.\nStage the release ./mvnw release:perform -DskipTests -Pall  The release will be automatically inserted into a temporary staging repository.  Build and sign the source code and binary package export RELEASE_VERSION=x.y.z (example: RELEASE_VERSION=5.0.0-alpha) cd tools/releasing bash create_release.sh This script takes care of the following things:\n Use v + RELEASE_VERSION as tag to clone the codes. Complete git submodule init/update. Exclude all unnecessary files in the target source tar, such as .git, .github, and .gitmodules. See the script for more details. Execute gpg and shasum 512 for source code tar. Use maven package to build the agent tar. Execute gpg and shasum 512 for binary tar.  apache-skywalking-java-agent-x.y.z-src.tgz and files ending with .asc and .sha512 may be found in the tools/releasing folder. apache-skywalking-java-agent-x.y.z.tgz and files ending with .asc and .sha512 may be found in the tools/releasing/apache-skywalking-java-agent-x.y.z folder.\nUpload to Apache svn  Use your Apache ID to log in to https://dist.apache.org/repos/dist/dev/skywalking/java-agent/. Create a folder and name it by the release version and round, such as: x.y.z Upload the source code package to the folder with files ending with .asc and .sha512. Upload the distribution package to the folder with files ending with .asc and .sha512.  Make the internal announcements Send an announcement mail in dev mail list.\nMail title: [ANNOUNCE] SkyWalking Java Agent x.y.z test build available Mail content: The test build of Java Agent x.y.z is available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking-java/blob/master/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/java-agent/xxxx * sha512 checksums Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking-java/ Release Tag : * (Git Tag) x.y.z Release CommitID : * https://github.com/apache/skywalking-java/tree/(Git Commit ID) * Git submodule * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : \u0026gt; ./mvnw clean package A vote regarding the quality of this test build will be initiated within the next couple of days. Wait for at least 48 hours for test responses Any PMC member, committer or contributor can test the release features and provide feedback. Based on that, the PMC will decide whether to start the voting process.\nCall a vote in dev Call a vote in dev@skywalking.apache.org\nMail title: [VOTE] Release Apache SkyWalking Java Agent version x.y.z Mail content: Hi All, This is a call for vote to release Apache SkyWalking Java Agent version x.y.z. Release notes: * https://github.com/apache/skywalking-java/blob/master/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/java-agent/xxxx * sha512 checksums Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) x.y.z Release CommitID : * https://github.com/apache/skywalking-java/tree/(Git Commit ID) * Git submodule * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : \u0026gt; ./mvnw clean package Voting will start now (xxxx date) and will remain open for at least 72 hours, Request all PMC members to give their vote. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Vote Check All PMC members and committers should check these before casting +1 votes.\n Features test. All artifacts in staging repository are published with .asc, .md5, and *sha1 files. Source code and distribution package (apache-skywalking-java-agent-x.y.z-src.tar.gz, apache-skywalking-java-agent-x.y.z.tar.gz) are found in https://dist.apache.org/repos/dist/dev/skywalking/java-agent/x.y.z with .asc and .sha512. LICENSE and NOTICE are in the source code and distribution package. Check shasum -c apache-skywalking-java-agent-x.y.z-src.tgz.sha512. Check gpg --verify apache-skywalking-java-agent-x.y.z-src.tgz.asc apache-skywalking-apm-x.y.z-src.tgz Build a distribution package from the source code package (apache-skywalking-java-agent-x.y.z-src.tar.gz). Check the Apache License Header. Run docker run --rm -v $(pwd):/github/workspace apache/skywalking-eyes header check. (No binaries in source codes)  The voting process is as follows:\n All PMC member votes are +1 binding, and all other votes are +1 but non-binding. If you obtain at least 3 (+1 binding) votes with more +1 than -1 votes within 72 hours, the release will be approved.  Publish the release  Move source codes tar and distribution packages to https://dist.apache.org/repos/dist/release/skywalking/java-agent/.  \u0026gt; export SVN_EDITOR=vim \u0026gt; svn mv https://dist.apache.org/repos/dist/dev/skywalking/java-agent/x.y.z https://dist.apache.org/repos/dist/release/skywalking/java-agent .... enter your apache password .... Release in the nexus staging repo. Public download source and distribution tar/zip are located in http://www.apache.org/dyn/closer.cgi/skywalking/java-agent/x.y.z/xxx. The Apache mirror path is the only release information that we publish. Public asc and sha512 are located in https://www.apache.org/dist/skywalking/java-agent/x.y.z/xxx. Public KEYS point to https://www.apache.org/dist/skywalking/KEYS. Update the website download page. http://skywalking.apache.org/downloads/ . Add a new download source, distribution, sha512, asc, and document links. The links can be found following rules (3) to (6) above. Add a release event on the website homepage and event page. Announce the public release with changelog or key features. Send ANNOUNCE email to dev@skywalking.apache.org, announce@apache.org. The sender should use the Apache email account.  Mail title: [ANNOUNCE] Apache SkyWalking Java Agent x.y.z released Mail content: Hi all, Apache SkyWalking Team is glad to announce the first release of Apache SkyWalking Java Agent x.y.z. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. The Java Agent for Apache SkyWalking, which provides the native tracing/metrics/logging abilities for Java projects. This release contains a number of new features, bug fixes and improvements compared to version a.b.c(last release). The notable changes since x.y.z include: (Highlight key changes) 1. ... 2. ... 3. ... Please refer to the change log for the complete list of changes: https://github.com/apache/skywalking-java/blob/master/changes/changes-x.y.z.md Apache SkyWalking website: http://skywalking.apache.org/ Downloads: http://skywalking.apache.org/downloads/ Twitter: https://twitter.com/AsfSkyWalking SkyWalking Resources: - GitHub: https://github.com/apache/skywalking-java - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Apache SkyWalking Team Release Docker images export SW_VERSION=x.y.z git clone --depth 1 --branch v$SW_VERSION https://github.com/apache/skywalking-java.git cd skywalking-java curl -O https://dist.apache.org/repos/dist/release/skywalking/java-agent/$SW_VERSION/apache-skywalking-java-agent-$SW_VERSION.tgz tar -xzvf apache-skywalking-java-agent-$SW_VERSION.tgz export NAME=skywalking-java-agent export HUB=apache export TAG=$SW_VERSION make docker.push.alpine docker.push.java8 docker.push.java11 docker.push.java17 docker.push.java21 Clean up the old releases Once the latest release has been published, you should clean up the old releases from the mirror system.\n Update the download links (source, dist, asc, and sha512) on the website to the archive repo (https://archive.apache.org/dist/skywalking). Remove previous releases from https://dist.apache.org/repos/dist/release/skywalking/java-agent.  ","excerpt":"Apache SkyWalking Java Agent Release Guide If you\u0026rsquo;re a committer, you can learn how to release …","ref":"/docs/skywalking-java/v9.2.0/en/contribution/release-java-agent/","title":"SkyWalking Java Agent Release Guide"},{"body":"SkyWalking Kubernetes Event Exporter User Guide SkyWalking Kubernetes Event Exporter is able to watch, filter, and send Kubernetes events into the Apache SkyWalking backend.\nDemo Step 1: Create a Local Kubernetes Cluster Please follow step 1 to 3 in getting started to create a cluster.\nStep 2: Deploy OAP server and Event Exporter Create the skywalking-system namespace.\n$ kubectl create namespace skywalking-system Deploy an OAP server and an event exporter.\ncat \u0026lt;\u0026lt;EOF | kubectl apply -f - apiVersion: operator.skywalking.apache.org/v1alpha1 kind: OAPServer metadata: name: skywalking-system namespace: skywalking-system spec: version: 9.5.0 instances: 1 image: apache/skywalking-oap-server:9.5.0 service: template: type: ClusterIP --- apiVersion: operator.skywalking.apache.org/v1alpha1 kind: EventExporter metadata: name: skywalking-system namespace: skywalking-system spec: replicas: 1 config: | filters: - reason: \u0026#34;\u0026#34; message: \u0026#34;\u0026#34; minCount: 1 type: \u0026#34;\u0026#34; action: \u0026#34;\u0026#34; kind: \u0026#34;Pod|Service\u0026#34; namespace: \u0026#34;^skywalking-system$\u0026#34; name: \u0026#34;\u0026#34; service: \u0026#34;[^\\\\s]{1,}\u0026#34; exporters: - skywalking exporters: skywalking: template: source: service: \u0026#34;{{ .Service.Name }}\u0026#34; serviceInstance: \u0026#34;{{ .Pod.Name }}\u0026#34; endpoint: \u0026#34;\u0026#34; message: \u0026#34;{{ .Event.Message }}\u0026#34; address: \u0026#34;skywalking-system-oap.skywalking-system:11800\u0026#34; EOF Wait until both components are ready\u0026hellip;\n$ kubectl get pod -n skywalking-system NAME READY STATUS RESTARTS AGE skywalking-system-eventexporter-566db46fb6-npx8v 1/1 Running 0 50s skywalking-system-oap-68bd877f57-zs8hw 1/1 Running 0 50s Step 3: Check Reported Events We can verify k8s events is reported to the OAP server by using skywalking-cli.\nFirst, port-forward the OAP http service to your local machine.\n$ kubectl port-forward svc/skywalking-system-oap 12800:12800 -n skywalking-system Next, use swctl to list reported events in YAML format.\n$ swctl --display yaml event ls The output should contain k8s events of the OAP server.\nevents:- uuid:1d5bfe48-bc8d-4f5a-9680-188f59793459source:service:skywalking-system-oapserviceinstance:skywalking-system-oap-68bd877f57-cvkjbendpoint:\u0026#34;\u0026#34;name:Pulledtype:Normalmessage:Successfully pulled image \u0026#34;apache/skywalking-oap-server:9.5.0\u0026#34; in 6m4.108914335sparameters:[]starttime:1713793327000endtime:1713793327000layer:K8S- uuid:f576f6ad-748d-4cec -9260-6587c145550esource:service:skywalking-system-oapserviceinstance:skywalking-system-oap-68bd877f57-cvkjbendpoint:\u0026#34;\u0026#34;name:Createdtype:Normalmessage:Created container oapparameters:[]starttime:1713793327000endtime:1713793327000layer:K8S- uuid:0cec5b55-4cb0-4ff7-a670-a097609c531fsource:service:skywalking-system-oapserviceinstance:skywalking-system-oap-68bd877f57-cvkjbendpoint:\u0026#34;\u0026#34;name:Startedtype:Normalmessage:Started container oapparameters:[]starttime:1713793327000endtime:1713793327000layer:K8S- uuid:28f0d004-befe-4c27-a7b7-dfdc4dd755fasource:service:skywalking-system-oapserviceinstance:skywalking-system-oap-68bd877f57-cvkjbendpoint:\u0026#34;\u0026#34;name:Pullingtype:Normalmessage:Pulling image \u0026#34;apache/skywalking-oap-server:9.5.0\u0026#34;parameters:[]starttime:1713792963000endtime:1713792963000layer:K8S- uuid:6d766801-5057-42c0-aa63-93ce1e201418source:service:skywalking-system-oapserviceinstance:skywalking-system-oap-68bd877f57-cvkjbendpoint:\u0026#34;\u0026#34;name:Scheduledtype:Normalmessage:Successfully assigned skywalking-system/skywalking-system-oap-68bd877f57-cvkjbto kind-workerparameters:[]starttime:1713792963000endtime:1713792963000layer:K8SWe can also verify by checking logs of the event exporter.\nkubectl logs -f skywalking-system-eventexporter-566db46fb6-npx8v -n skywalking-system ... DEBUG done: rendered event is: uuid:\u0026#34;8d8c2bd1-1812-4b0c-8237-560688366280\u0026#34; source:{service:\u0026#34;skywalking-system-oap\u0026#34; serviceInstance:\u0026#34;skywalking-system-oap-68bd877f57-zs8hw\u0026#34;} name:\u0026#34;Started\u0026#34; message:\u0026#34;Started container oap\u0026#34; startTime:1713795214000 endTime:1713795214000 layer:\u0026#34;K8S\u0026#34; Spec    name description default value     image Docker image of the event exporter. apache/skywalking-kubernetes-event-exporter:latest   replicas Number of event exporter pods. 1   config Configuration of filters and exporters in YAML format. \u0026quot;\u0026quot;    Please note: if you ignore the config field, no filters or exporter will be created.\nThis is because the EventExporter controller creates a configMap for all config values and attach the configMap to the event exporter container as configuration file. Ignoring the config field means an empty configuration file (with content \u0026quot;\u0026quot;) is provided to the event exporter.\nStatus    name description     availableReplicas Total number of available event exporter pods.   conditions Latest available observations of the underlying deployment\u0026rsquo;s current state   configMapName Name of the underlying configMap.    Configuration The event exporter supports reporting specific events by different exporters. We can add filter configs to choose which events we are interested in, and include exporter names in each filter config to tell event exporter how to export filtered events.\nAn example configuration is listed below:\nfilters:- reason:\u0026#34;\u0026#34;message:\u0026#34;\u0026#34;minCount:1type:\u0026#34;\u0026#34;action:\u0026#34;\u0026#34;kind:\u0026#34;Pod|Service\u0026#34;namespace:\u0026#34;^default$\u0026#34;name:\u0026#34;\u0026#34;service:\u0026#34;[^\\\\s]{1,}\u0026#34;exporters:- skywalkingexporters:skywalking:template:source:service:\u0026#34;{{ .Service.Name }}\u0026#34;serviceInstance:\u0026#34;{{ .Pod.Name }}\u0026#34;endpoint:\u0026#34;\u0026#34;message:\u0026#34;{{ .Event.Message }}\u0026#34;address:\u0026#34;skywalking-system-oap.skywalking-system:11800\u0026#34;Filter Config    name description example     reason Filter events of the specified reason, regular expression like \u0026quot;Killing\\|Killed\u0026quot; is supported. \u0026quot;\u0026quot;   message Filter events of the specified message, regular expression like \u0026quot;Pulling container.*\u0026quot; is supported. \u0026quot;\u0026quot;   minCount Filter events whose count is \u0026gt;= the specified value. 1   type Filter events of the specified type, regular expression like \u0026quot;Normal\\|Error\u0026quot; is supported. \u0026quot;\u0026quot;   action Filter events of the specified action, regular expression is supported. \u0026quot;\u0026quot;   kind Filter events of the specified kind, regular expression like \u0026quot;Pod\\|Service\u0026quot; is supported. \u0026quot;Pod\\|Service\u0026quot;   namespace Filter events from the specified namespace, regular expression like \u0026quot;default\\|bookinfo\u0026quot; is supported, empty means all namespaces. \u0026quot;^default$\u0026quot;   name Filter events of the specified involved object name, regular expression like \u0026quot;.*bookinfo.*\u0026quot; is supported. \u0026quot;\u0026quot;   service Filter events belonging to services whose name is not empty. \u0026quot;[^\\\\s]{1,}\u0026quot;   exporters Events satisfy this filter can be exported into several exporters that are defined below. [\u0026quot;skywalking\u0026quot;]    Skywalking Exporter Config SkyWalking exporter exports the events into Apache SkyWalking OAP server using grpc.\n   name description example     address The SkyWalking backend address where this exporter will export to. \u0026quot;skywalking-system-oap.skywalking-system:11800\u0026quot;   enableTLS Whether to use TLS for grpc server connection validation.  If TLS is enabled, the trustedCertPath is required, but clientCertPath and clientKeyPath are optional. false   clientCertPath Path of the X.509 certificate file. \u0026quot;\u0026quot;   clientKeyPath Path of the X.509 private key file. \u0026quot;\u0026quot;   trustedCertPath Path of the root certificate file. \u0026quot;\u0026quot;   insecureSkipVerify Whether a client verifies the server\u0026rsquo;s certificate chain and host name. Check tls.Config for more details. false   template The event template of SkyWalking exporter, it can be composed of metadata like Event, Pod, and Service.    template.source Event source information.    template.source.service Service name, can be a template string. \u0026quot;{{ .Service.Name }}\u0026quot;   template.source.serviceInstance Service instance name, can be a template string. \u0026quot;{{ .Pod.Name }}\u0026quot;   template.source.endpoint Endpoint, can be a template string. \u0026quot;\u0026quot;   template.message Message format, can be a template string. \u0026quot;{{ .Event.Message }}\u0026quot;    Console Exporter Config Console exporter exports the events into console logs, this exporter is typically used for debugging.\n   name description example     template The event template of SkyWalking exporter, it can be composed of metadata like Event, Pod, and Service.    template.source Event source information.    template.source.service Service name, can be a template string. \u0026quot;{{ .Service.Name }}\u0026quot;   template.source.serviceInstance Service instance name, can be a template string. \u0026quot;{{ .Pod.Name }}\u0026quot;   template.source.endpoint Endpoint, can be a template string. \u0026quot;\u0026quot;   template.message Message format, can be a template string. \u0026quot;{{ .Event.Message }}\u0026quot;    ","excerpt":"SkyWalking Kubernetes Event Exporter User Guide SkyWalking Kubernetes Event Exporter is able to …","ref":"/docs/skywalking-swck/next/examples/event-exporter/","title":"SkyWalking Kubernetes Event Exporter User Guide"},{"body":"SkyWalking PHP Agent This is the official documentation of SkyWalking PHP Agent. Welcome to the SkyWalking community!\nIn here, you could learn how to set up PHP agent for the PHP services.\n","excerpt":"SkyWalking PHP Agent This is the official documentation of SkyWalking PHP Agent. Welcome to the …","ref":"/docs/skywalking-php/latest/readme/","title":"SkyWalking PHP Agent"},{"body":"SkyWalking PHP Agent This is the official documentation of SkyWalking PHP Agent. Welcome to the SkyWalking community!\nIn here, you could learn how to set up PHP agent for the PHP services.\n","excerpt":"SkyWalking PHP Agent This is the official documentation of SkyWalking PHP Agent. Welcome to the …","ref":"/docs/skywalking-php/next/readme/","title":"SkyWalking PHP Agent"},{"body":"SkyWalking PHP Agent This is the official documentation of SkyWalking PHP Agent. Welcome to the SkyWalking community!\nIn here, you could learn how to set up PHP agent for the PHP services.\n","excerpt":"SkyWalking PHP Agent This is the official documentation of SkyWalking PHP Agent. Welcome to the …","ref":"/docs/skywalking-php/v0.7.0/readme/","title":"SkyWalking PHP Agent"},{"body":"SkyWalking Python Agent This is the official documentation of SkyWalking Python agent. Welcome to the SkyWalking community!\nThe Python Agent for Apache SkyWalking provides the native tracing/metrics/logging/profiling abilities for Python projects.\nThis documentation covers a number of ways to set up the Python agent for various use cases.\n \nCapabilities The following table demonstrates the currently supported telemetry collection capabilities in SkyWalking Python agent:\n   Reporter Supported? Details     Trace ✅ (default: ON) Automatic instrumentation + Manual SDK   Log ✅ (default: ON) Direct reporter only. (Tracing context in log planned)   Meter ✅ (default: ON) Meter API + Automatic PVM metrics   Event ❌ (Planned) Report lifecycle events of your awesome Python application   Profiling ✅ (default: ON) Threading and Greenlet Profiler    Live Demo  Find the live demo with Python agent on our website. Follow the showcase to set up preview deployment quickly.  ","excerpt":"SkyWalking Python Agent This is the official documentation of SkyWalking Python agent. Welcome to …","ref":"/docs/skywalking-python/latest/readme/","title":"SkyWalking Python Agent"},{"body":"SkyWalking Python Agent This is the official documentation of SkyWalking Python agent. Welcome to the SkyWalking community!\nThe Python Agent for Apache SkyWalking provides the native tracing/metrics/logging/profiling abilities for Python projects.\nThis documentation covers a number of ways to set up the Python agent for various use cases.\n \nCapabilities The following table demonstrates the currently supported telemetry collection capabilities in SkyWalking Python agent:\n   Reporter Supported? Details     Trace ✅ (default: ON) Automatic instrumentation + Manual SDK   Log ✅ (default: ON) Direct reporter only. (Tracing context in log planned)   Meter ✅ (default: ON) Meter API + Automatic PVM metrics   Event ❌ (Planned) Report lifecycle events of your awesome Python application   Profiling ✅ (default: ON) Threading and Greenlet Profiler    Live Demo  Find the live demo with Python agent on our website. Follow the showcase to set up preview deployment quickly.  ","excerpt":"SkyWalking Python Agent This is the official documentation of SkyWalking Python agent. Welcome to …","ref":"/docs/skywalking-python/next/readme/","title":"SkyWalking Python Agent"},{"body":"SkyWalking Python Agent This is the official documentation of SkyWalking Python agent. Welcome to the SkyWalking community!\nThe Python Agent for Apache SkyWalking provides the native tracing/metrics/logging/profiling abilities for Python projects.\nThis documentation covers a number of ways to set up the Python agent for various use cases.\n \nCapabilities The following table demonstrates the currently supported telemetry collection capabilities in SkyWalking Python agent:\n   Reporter Supported? Details     Trace ✅ (default: ON) Automatic instrumentation + Manual SDK   Log ✅ (default: ON) Direct reporter only. (Tracing context in log planned)   Meter ✅ (default: ON) Meter API + Automatic PVM metrics   Event ❌ (Planned) Report lifecycle events of your awesome Python application   Profiling ✅ (default: ON) Threading and Greenlet Profiler    Live Demo  Find the live demo with Python agent on our website. Follow the showcase to set up preview deployment quickly.  ","excerpt":"SkyWalking Python Agent This is the official documentation of SkyWalking Python agent. Welcome to …","ref":"/docs/skywalking-python/v1.0.1/readme/","title":"SkyWalking Python Agent"},{"body":"SkyWalking Python Agent Command Line Interface (sw-python CLI) Now, SkyWalking Python Agent CLI is the recommended way of running your application with Python agent, the CLI is well-tested and used by all agent E2E \u0026amp; Plugin tests.\nIn releases before 0.7.0, you would at least need to add the following lines to your applications to get the agent attached and running, this can be tedious in many cases due to large number of services, DevOps practices and can cause problem when used with prefork servers.\nfrom skywalking import agent, config config.init(SomeConfig) agent.start() The SkyWalking Python agent implements a command-line interface that can be utilized to attach the agent to your awesome applications during deployment without changing any application code, just like the SkyWalking Java Agent.\n The following feature is added in v1.0.0 as experimental flag, so you need to specify the -p flag to sw-python run -p. In the future, this flag will be removed and agent will automatically enable prefork/fork support in a more comprehensive manner.\n Especially with the new automatic postfork injection feature, you no longer have to worry about threading and forking incompatibility.\nCheck How to use with uWSGI and How to use with Gunicorn to understand the detailed background on what is post_fork, why you need them and how to easily overcome the trouble with sw-python CLI.\nYou should still read the legacy way to integrate agent in case the sw-python CLI is not working for you.\nUsage Upon successful installation of the SkyWalking Python agent via pip, a command-line script sw-python is installed in your environment (virtual env preferred).\n run sw-python to see if it is available, you will need to pass configuration by environment variables.\n For example: export SW_AGENT_COLLECTOR_BACKEND_SERVICES=localhost:11800\nThe run option The sw-python CLI provides a run option, which you can use to execute your applications (either begins with the python command or Python-based programs like gunicorn on your path) just like you invoke them normally, plus a prefix, the following example demonstrates the usage.\nIf your previous command to run your gunicorn/uwsgi application is:\ngunicorn your_app:app --workers 2 --worker-class uvicorn.workers.UvicornWorker --bind 0.0.0.0:8088\nor\nuwsgi --die-on-term --http 0.0.0.0:5000 --http-manage-expect --master --workers 3 --enable-threads --threads 3 --manage-script-name --mount /=main:app\nPlease change it to (the -p option starts one agent in each process, which is the correct behavior):\nImportant: if the call to uwsgi/gunicorn is prefixed with other commands, this approach will fail since agent currently looks for the command line input at index 0 for safety as an experimental feature.\nsw-python run -p gunicorn your_app:app --workers 2 --worker-class uvicorn.workers.UvicornWorker --bind 0.0.0.0:8088\nor\nsw-python run -p uwsgi --die-on-term --http 0.0.0.0:5000 --http-manage-expect --master --workers 3 --enable-threads --threads 3 --manage-script-name --mount /=main:app\nThe SkyWalking Python agent will start up along with all your application workers shortly.\nNote that sw-python also work with spawned subprocess (os.exec*/subprocess) as long as the PYTHONPATH is inherited.\nAdditionally, sw-python started agent works well with os.fork when your application forks workers, as long as the SW_AGENT_EXPERIMENTAL_FORK_SUPPORT is turned on. (It will be automatically turned on when gunicorn is detected)\nConfiguring the agent You would normally want to provide additional configurations other than the default ones.\nThrough environment variables The currently supported method is to provide the environment variables listed and explained in the Environment Variables List.\nThrough a sw-config.toml (TBD) Currently, only environment variable configuration is supported; an optional toml configuration is to be implemented.\nEnabling CLI DEBUG mode Note the CLI is a feature that manipulates the Python interpreter bootstrap behaviour, there could be unsupported cases.\nIf you encounter unexpected problems, please turn on the DEBUG mode by adding the -d or --debug flag to your sw-python command, as shown below.\nFrom: sw-python run command\nTo: sw-python -d run command\nPlease attach the debug logs to the SkyWalking Issues section if you believe it is a bug, idea discussions and pull requests are always welcomed.\nAdditional Remarks When executing commands with sw-python run command, your command\u0026rsquo;s Python interpreter will pick up the SkyWalking loader module.\nIt is not safe to attach SkyWalking Agent to those commands that resides in another Python installation because incompatible Python versions and mismatched SkyWalking versions can cause problems. Therefore, any attempt to pass a command that uses a different Python interpreter/ environment will not bring up SkyWalking Python Agent even if another SkyWalking Python agent is installed there(no matter the version), and will force exit with an error message indicating the reasoning.\nDisabling spawned processes from starting new agents Sometimes you don\u0026rsquo;t actually need the agent to monitor anything in a new process (when it\u0026rsquo;s not a web service worker). (here we mean process spawned by subprocess and os.exec*(), os.fork() is not controlled by this flag but experimental_fork_support)\nIf you do not need the agent to get loaded for application child processes, you can turn off the behavior by setting an environment variable.\nSW_AGENT_SW_PYTHON_BOOTSTRAP_PROPAGATE to False\nNote the auto bootstrap depends on the environment inherited by child processes, thus prepending a new sitecustomize path to or removing the loader path from the PYTHONPATH could also prevent the agent from loading in a child process.\nKnown limitations  The CLI may not work properly with arguments that involve double quotation marks in some shells. The CLI and bootstrapper stdout logs could get messy in Windows shells.  ","excerpt":"SkyWalking Python Agent Command Line Interface (sw-python CLI) Now, SkyWalking Python Agent CLI is …","ref":"/docs/skywalking-python/latest/en/setup/cli/","title":"SkyWalking Python Agent Command Line Interface (sw-python CLI)"},{"body":"SkyWalking Python Agent Command Line Interface (sw-python CLI) Now, SkyWalking Python Agent CLI is the recommended way of running your application with Python agent, the CLI is well-tested and used by all agent E2E \u0026amp; Plugin tests.\nIn releases before 0.7.0, you would at least need to add the following lines to your applications to get the agent attached and running, this can be tedious in many cases due to large number of services, DevOps practices and can cause problem when used with prefork servers.\nfrom skywalking import agent, config config.init(SomeConfig) agent.start() The SkyWalking Python agent implements a command-line interface that can be utilized to attach the agent to your awesome applications during deployment without changing any application code, just like the SkyWalking Java Agent.\n The following feature is added in v1.0.0 as experimental flag, so you need to specify the -p flag to sw-python run -p. In the future, this flag will be removed and agent will automatically enable prefork/fork support in a more comprehensive manner.\n Especially with the new automatic postfork injection feature, you no longer have to worry about threading and forking incompatibility.\nCheck How to use with uWSGI and How to use with Gunicorn to understand the detailed background on what is post_fork, why you need them and how to easily overcome the trouble with sw-python CLI.\nYou should still read the legacy way to integrate agent in case the sw-python CLI is not working for you.\nUsage Upon successful installation of the SkyWalking Python agent via pip, a command-line script sw-python is installed in your environment (virtual env preferred).\n run sw-python to see if it is available, you will need to pass configuration by environment variables.\n For example: export SW_AGENT_COLLECTOR_BACKEND_SERVICES=localhost:11800\nThe run option The sw-python CLI provides a run option, which you can use to execute your applications (either begins with the python command or Python-based programs like gunicorn on your path) just like you invoke them normally, plus a prefix, the following example demonstrates the usage.\nIf your previous command to run your gunicorn/uwsgi application is:\ngunicorn your_app:app --workers 2 --worker-class uvicorn.workers.UvicornWorker --bind 0.0.0.0:8088\nor\nuwsgi --die-on-term --http 0.0.0.0:5000 --http-manage-expect --master --workers 3 --enable-threads --threads 3 --manage-script-name --mount /=main:app\nPlease change it to (the -p option starts one agent in each process, which is the correct behavior):\nImportant: if the call to uwsgi/gunicorn is prefixed with other commands, this approach will fail since agent currently looks for the command line input at index 0 for safety as an experimental feature.\nsw-python run -p gunicorn your_app:app --workers 2 --worker-class uvicorn.workers.UvicornWorker --bind 0.0.0.0:8088\nor\nsw-python run -p uwsgi --die-on-term --http 0.0.0.0:5000 --http-manage-expect --master --workers 3 --enable-threads --threads 3 --manage-script-name --mount /=main:app\nThe SkyWalking Python agent will start up along with all your application workers shortly.\nNote that sw-python also work with spawned subprocess (os.exec*/subprocess) as long as the PYTHONPATH is inherited.\nAdditionally, sw-python started agent works well with os.fork when your application forks workers, as long as the SW_AGENT_EXPERIMENTAL_FORK_SUPPORT is turned on. (It will be automatically turned on when gunicorn is detected)\nConfiguring the agent You would normally want to provide additional configurations other than the default ones.\nThrough environment variables The currently supported method is to provide the environment variables listed and explained in the Environment Variables List.\nThrough a sw-config.toml (TBD) Currently, only environment variable configuration is supported; an optional toml configuration is to be implemented.\nEnabling CLI DEBUG mode Note the CLI is a feature that manipulates the Python interpreter bootstrap behaviour, there could be unsupported cases.\nIf you encounter unexpected problems, please turn on the DEBUG mode by adding the -d or --debug flag to your sw-python command, as shown below.\nFrom: sw-python run command\nTo: sw-python -d run command\nPlease attach the debug logs to the SkyWalking Issues section if you believe it is a bug, idea discussions and pull requests are always welcomed.\nAdditional Remarks When executing commands with sw-python run command, your command\u0026rsquo;s Python interpreter will pick up the SkyWalking loader module.\nIt is not safe to attach SkyWalking Agent to those commands that resides in another Python installation because incompatible Python versions and mismatched SkyWalking versions can cause problems. Therefore, any attempt to pass a command that uses a different Python interpreter/ environment will not bring up SkyWalking Python Agent even if another SkyWalking Python agent is installed there(no matter the version), and will force exit with an error message indicating the reasoning.\nDisabling spawned processes from starting new agents Sometimes you don\u0026rsquo;t actually need the agent to monitor anything in a new process (when it\u0026rsquo;s not a web service worker). (here we mean process spawned by subprocess and os.exec*(), os.fork() is not controlled by this flag but experimental_fork_support)\nIf you do not need the agent to get loaded for application child processes, you can turn off the behavior by setting an environment variable.\nSW_AGENT_SW_PYTHON_BOOTSTRAP_PROPAGATE to False\nNote the auto bootstrap depends on the environment inherited by child processes, thus prepending a new sitecustomize path to or removing the loader path from the PYTHONPATH could also prevent the agent from loading in a child process.\nKnown limitations  The CLI may not work properly with arguments that involve double quotation marks in some shells. The CLI and bootstrapper stdout logs could get messy in Windows shells.  ","excerpt":"SkyWalking Python Agent Command Line Interface (sw-python CLI) Now, SkyWalking Python Agent CLI is …","ref":"/docs/skywalking-python/next/en/setup/cli/","title":"SkyWalking Python Agent Command Line Interface (sw-python CLI)"},{"body":"SkyWalking Python Agent Command Line Interface (sw-python CLI) Now, SkyWalking Python Agent CLI is the recommended way of running your application with Python agent, the CLI is well-tested and used by all agent E2E \u0026amp; Plugin tests.\nIn releases before 0.7.0, you would at least need to add the following lines to your applications to get the agent attached and running, this can be tedious in many cases due to large number of services, DevOps practices and can cause problem when used with prefork servers.\nfrom skywalking import agent, config config.init(SomeConfig) agent.start() The SkyWalking Python agent implements a command-line interface that can be utilized to attach the agent to your awesome applications during deployment without changing any application code, just like the SkyWalking Java Agent.\n The following feature is added in v1.0.0 as experimental flag, so you need to specify the -p flag to sw-python run -p. In the future, this flag will be removed and agent will automatically enable prefork/fork support in a more comprehensive manner.\n Especially with the new automatic postfork injection feature, you no longer have to worry about threading and forking incompatibility.\nCheck How to use with uWSGI and How to use with Gunicorn to understand the detailed background on what is post_fork, why you need them and how to easily overcome the trouble with sw-python CLI.\nYou should still read the legacy way to integrate agent in case the sw-python CLI is not working for you.\nUsage Upon successful installation of the SkyWalking Python agent via pip, a command-line script sw-python is installed in your environment (virtual env preferred).\n run sw-python to see if it is available, you will need to pass configuration by environment variables.\n For example: export SW_AGENT_COLLECTOR_BACKEND_SERVICES=localhost:11800\nThe run option The sw-python CLI provides a run option, which you can use to execute your applications (either begins with the python command or Python-based programs like gunicorn on your path) just like you invoke them normally, plus a prefix, the following example demonstrates the usage.\nIf your previous command to run your gunicorn/uwsgi application is:\ngunicorn your_app:app --workers 2 --worker-class uvicorn.workers.UvicornWorker --bind 0.0.0.0:8088\nor\nuwsgi --die-on-term --http 0.0.0.0:5000 --http-manage-expect --master --workers 3 --enable-threads --threads 3 --manage-script-name --mount /=main:app\nPlease change it to (the -p option starts one agent in each process, which is the correct behavior):\nImportant: if the call to uwsgi/gunicorn is prefixed with other commands, this approach will fail since agent currently looks for the command line input at index 0 for safety as an experimental feature.\nsw-python run -p gunicorn your_app:app --workers 2 --worker-class uvicorn.workers.UvicornWorker --bind 0.0.0.0:8088\nor\nsw-python run -p uwsgi --die-on-term --http 0.0.0.0:5000 --http-manage-expect --master --workers 3 --enable-threads --threads 3 --manage-script-name --mount /=main:app\nThe SkyWalking Python agent will start up along with all your application workers shortly.\nNote that sw-python also work with spawned subprocess (os.exec*/subprocess) as long as the PYTHONPATH is inherited.\nAdditionally, sw-python started agent works well with os.fork when your application forks workers, as long as the SW_AGENT_EXPERIMENTAL_FORK_SUPPORT is turned on. (It will be automatically turned on when gunicorn is detected)\nConfiguring the agent You would normally want to provide additional configurations other than the default ones.\nThrough environment variables The currently supported method is to provide the environment variables listed and explained in the Environment Variables List.\nThrough a sw-config.toml (TBD) Currently, only environment variable configuration is supported; an optional toml configuration is to be implemented.\nEnabling CLI DEBUG mode Note the CLI is a feature that manipulates the Python interpreter bootstrap behaviour, there could be unsupported cases.\nIf you encounter unexpected problems, please turn on the DEBUG mode by adding the -d or --debug flag to your sw-python command, as shown below.\nFrom: sw-python run command\nTo: sw-python -d run command\nPlease attach the debug logs to the SkyWalking Issues section if you believe it is a bug, idea discussions and pull requests are always welcomed.\nAdditional Remarks When executing commands with sw-python run command, your command\u0026rsquo;s Python interpreter will pick up the SkyWalking loader module.\nIt is not safe to attach SkyWalking Agent to those commands that resides in another Python installation because incompatible Python versions and mismatched SkyWalking versions can cause problems. Therefore, any attempt to pass a command that uses a different Python interpreter/ environment will not bring up SkyWalking Python Agent even if another SkyWalking Python agent is installed there(no matter the version), and will force exit with an error message indicating the reasoning.\nDisabling spawned processes from starting new agents Sometimes you don\u0026rsquo;t actually need the agent to monitor anything in a new process (when it\u0026rsquo;s not a web service worker). (here we mean process spawned by subprocess and os.exec*(), os.fork() is not controlled by this flag but experimental_fork_support)\nIf you do not need the agent to get loaded for application child processes, you can turn off the behavior by setting an environment variable.\nSW_AGENT_SW_PYTHON_BOOTSTRAP_PROPAGATE to False\nNote the auto bootstrap depends on the environment inherited by child processes, thus prepending a new sitecustomize path to or removing the loader path from the PYTHONPATH could also prevent the agent from loading in a child process.\nKnown limitations  The CLI may not work properly with arguments that involve double quotation marks in some shells. The CLI and bootstrapper stdout logs could get messy in Windows shells.  ","excerpt":"SkyWalking Python Agent Command Line Interface (sw-python CLI) Now, SkyWalking Python Agent CLI is …","ref":"/docs/skywalking-python/v1.0.1/en/setup/cli/","title":"SkyWalking Python Agent Command Line Interface (sw-python CLI)"},{"body":"SkyWalking Python Instrumentation API Apart from the supported libraries that can be instrumented automatically, SkyWalking also provides some APIs to enable manual instrumentation.\nCreate Spans The code snippet below shows how to create entry span, exit span and local span.\nfrom skywalking import Component from skywalking.trace.context import SpanContext, get_context from skywalking.trace.tags import Tag context: SpanContext = get_context() # get a tracing context # create an entry span, by using `with` statement, # the span automatically starts/stops when entering/exiting the context with context.new_entry_span(op=\u0026#39;https://github.com/apache\u0026#39;) as span: span.component = Component.Flask # the span automatically stops when exiting the `with` context class TagSinger(Tag): key = \u0026#39;Singer\u0026#39; with context.new_exit_span(op=\u0026#39;https://github.com/apache\u0026#39;, peer=\u0026#39;localhost:8080\u0026#39;, component=Component.Flask) as span: span.tag(TagSinger(\u0026#39;Nakajima\u0026#39;)) with context.new_local_span(op=\u0026#39;https://github.com/apache\u0026#39;) as span: span.tag(TagSinger(\u0026#39;Nakajima\u0026#39;)) Decorators from time import sleep from skywalking import Component from skywalking.decorators import trace, runnable from skywalking.trace.context import SpanContext, get_context @trace() # the operation name is the method name(\u0026#39;some_other_method\u0026#39;) by default def some_other_method(): sleep(1) @trace(op=\u0026#39;awesome\u0026#39;) # customize the operation name to \u0026#39;awesome\u0026#39; def some_method(): some_other_method() @trace(op=\u0026#39;async_functions_are_also_supported\u0026#39;) async def async_func(): return \u0026#39;asynchronous\u0026#39; @trace() async def async_func2(): return await async_func() @runnable() # cross thread propagation def some_method(): some_other_method() from threading import Thread t = Thread(target=some_method) t.start() context: SpanContext = get_context() with context.new_entry_span(op=str(\u0026#39;https://github.com/apache/skywalking\u0026#39;)) as span: span.component = Component.Flask some_method() ","excerpt":"SkyWalking Python Instrumentation API Apart from the supported libraries that can be instrumented …","ref":"/docs/skywalking-python/latest/en/setup/advanced/api/","title":"SkyWalking Python Instrumentation API"},{"body":"SkyWalking Python Instrumentation API Apart from the supported libraries that can be instrumented automatically, SkyWalking also provides some APIs to enable manual instrumentation.\nCreate Spans The code snippet below shows how to create entry span, exit span and local span.\nfrom skywalking import Component from skywalking.trace.context import SpanContext, get_context from skywalking.trace.tags import Tag context: SpanContext = get_context() # get a tracing context # create an entry span, by using `with` statement, # the span automatically starts/stops when entering/exiting the context with context.new_entry_span(op=\u0026#39;https://github.com/apache\u0026#39;) as span: span.component = Component.Flask # the span automatically stops when exiting the `with` context class TagSinger(Tag): key = \u0026#39;Singer\u0026#39; with context.new_exit_span(op=\u0026#39;https://github.com/apache\u0026#39;, peer=\u0026#39;localhost:8080\u0026#39;, component=Component.Flask) as span: span.tag(TagSinger(\u0026#39;Nakajima\u0026#39;)) with context.new_local_span(op=\u0026#39;https://github.com/apache\u0026#39;) as span: span.tag(TagSinger(\u0026#39;Nakajima\u0026#39;)) Decorators from time import sleep from skywalking import Component from skywalking.decorators import trace, runnable from skywalking.trace.context import SpanContext, get_context @trace() # the operation name is the method name(\u0026#39;some_other_method\u0026#39;) by default def some_other_method(): sleep(1) @trace(op=\u0026#39;awesome\u0026#39;) # customize the operation name to \u0026#39;awesome\u0026#39; def some_method(): some_other_method() @trace(op=\u0026#39;async_functions_are_also_supported\u0026#39;) async def async_func(): return \u0026#39;asynchronous\u0026#39; @trace() async def async_func2(): return await async_func() @runnable() # cross thread propagation def some_method(): some_other_method() from threading import Thread t = Thread(target=some_method) t.start() context: SpanContext = get_context() with context.new_entry_span(op=str(\u0026#39;https://github.com/apache/skywalking\u0026#39;)) as span: span.component = Component.Flask some_method() ","excerpt":"SkyWalking Python Instrumentation API Apart from the supported libraries that can be instrumented …","ref":"/docs/skywalking-python/next/en/setup/advanced/api/","title":"SkyWalking Python Instrumentation API"},{"body":"SkyWalking Python Instrumentation API Apart from the supported libraries that can be instrumented automatically, SkyWalking also provides some APIs to enable manual instrumentation.\nCreate Spans The code snippet below shows how to create entry span, exit span and local span.\nfrom skywalking import Component from skywalking.trace.context import SpanContext, get_context from skywalking.trace.tags import Tag context: SpanContext = get_context() # get a tracing context # create an entry span, by using `with` statement, # the span automatically starts/stops when entering/exiting the context with context.new_entry_span(op=\u0026#39;https://github.com/apache\u0026#39;) as span: span.component = Component.Flask # the span automatically stops when exiting the `with` context class TagSinger(Tag): key = \u0026#39;Singer\u0026#39; with context.new_exit_span(op=\u0026#39;https://github.com/apache\u0026#39;, peer=\u0026#39;localhost:8080\u0026#39;, component=Component.Flask) as span: span.tag(TagSinger(\u0026#39;Nakajima\u0026#39;)) with context.new_local_span(op=\u0026#39;https://github.com/apache\u0026#39;) as span: span.tag(TagSinger(\u0026#39;Nakajima\u0026#39;)) Decorators from time import sleep from skywalking import Component from skywalking.decorators import trace, runnable from skywalking.trace.context import SpanContext, get_context @trace() # the operation name is the method name(\u0026#39;some_other_method\u0026#39;) by default def some_other_method(): sleep(1) @trace(op=\u0026#39;awesome\u0026#39;) # customize the operation name to \u0026#39;awesome\u0026#39; def some_method(): some_other_method() @trace(op=\u0026#39;async_functions_are_also_supported\u0026#39;) async def async_func(): return \u0026#39;asynchronous\u0026#39; @trace() async def async_func2(): return await async_func() @runnable() # cross thread propagation def some_method(): some_other_method() from threading import Thread t = Thread(target=some_method) t.start() context: SpanContext = get_context() with context.new_entry_span(op=str(\u0026#39;https://github.com/apache/skywalking\u0026#39;)) as span: span.component = Component.Flask some_method() ","excerpt":"SkyWalking Python Instrumentation API Apart from the supported libraries that can be instrumented …","ref":"/docs/skywalking-python/v1.0.1/en/setup/advanced/api/","title":"SkyWalking Python Instrumentation API"},{"body":"Apache SkyWalking release guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking in The Apache Way and start the voting process by reading this document.\nSet up your development environment Follow the steps in the Apache maven deployment environment document to set gpg tool and encrypt passwords.\nUse the following block as a template and place it in ~/.m2/settings.xml.\n\u0026lt;settings\u0026gt; ... \u0026lt;servers\u0026gt; \u0026lt;!-- To publish a snapshot of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.snapshots.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; \u0026lt;!-- To stage a release of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.releases.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; ... \u0026lt;/servers\u0026gt; \u0026lt;/settings\u0026gt; Add your GPG public key  Add your GPG public key into the SkyWalking GPG KEYS file. If you are a committer, use your Apache ID and password to log in this svn, and update the file. Don\u0026rsquo;t override the existing file. Upload your GPG public key to the public GPG site, such as MIT\u0026rsquo;s site. This site should be in the Apache maven staging repository checklist.  Test your settings This step is only for testing purpose. If your env is correctly set, you don\u0026rsquo;t need to check every time.\n./mvnw clean install -Pall (this will build artifacts, sources and sign) Prepare for the release ./mvnw release:clean ./mvnw release:prepare -DautoVersionSubmodules=true -Darguments='-Dmaven.test.skip' -Pall  Set version number as x.y.z, and tag as vx.y.z (The version tag must start with v. You will find out why this is necessary in the next step.)  You could do a GPG signature before preparing for the release. If you need to input the password to sign, and the maven doesn\u0026rsquo;t provide you with the opportunity to do so, this may lead to failure of the release. To resolve this, you may run gpg --sign xxx in any file. This will allow it to remember the password for long enough to prepare for the release.\nStage the release ./mvnw release:perform -Darguments='-Dmaven.test.skip' -Pall  The release will be automatically inserted into a temporary staging repository.  Build and sign the source code package export RELEASE_VERSION=x.y.z (example: RELEASE_VERSION=5.0.0-alpha) cd tools/releasing bash create_source_release.sh This script takes care of the following things:\n Use v + RELEASE_VERSION as tag to clone the codes. Complete git submodule init/update. Exclude all unnecessary files in the target source tar, such as .git, .github, and .gitmodules. See the script for more details. Execute gpg and shasum 512.  apache-skywalking-apm-x.y.z-src.tgz and files ending with .asc and .sha512 may be found in the tools/releasing folder.\nLocate and download the distribution package in Apache Nexus Staging repositories  Use your Apache ID to log in to https://repository.apache.org/. Go to https://repository.apache.org/#stagingRepositories. Search skywalking and find your staging repository. Close the repository and wait for all checks to pass. In this step, your GPG KEYS will be checked. See the set PGP document, if you haven\u0026rsquo;t done it before. Go to {REPO_URL}/org/apache/skywalking/apache-skywalking-apm/x.y.z. Download .tar.gz and .zip and files ending with .asc and .sha1.  Upload to Apache svn  Use your Apache ID to log in to https://dist.apache.org/repos/dist/dev/skywalking/. Create a folder and name it by the release version and round, such as: x.y.z Upload the source code package to the folder with files ending with .asc and .sha512.  Package name: apache-skywalking-x.y.z-src.tar.gz See Section \u0026ldquo;Build and sign the source code package\u0026rdquo; for more details   Upload the distribution package to the folder with files ending with .asc and .sha512.  Package name: apache-skywalking-bin-x.y.z.tar.gz. See Section \u0026ldquo;Locate and download the distribution package in Apache Nexus Staging repositories\u0026rdquo; for more details. Create a .sha512 package: shasum -a 512 file \u0026gt; file.sha512    Make the internal announcements Send an announcement mail in dev mail list.\nMail title: [ANNOUNCE] SkyWalking x.y.z test build available Mail content: The test build of x.y.z is available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/xxxx * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-apm-x.x.x-src.tgz - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.tar.gz Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) vx.y.z Release CommitID : * https://github.com/apache/skywalking/tree/(Git Commit ID) * Git submodule * skywalking-ui: https://github.com/apache/skywalking-booster-ui/tree/(Git Commit ID) * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) * oap-server/server-query-plugin/query-graphql-plugin/src/main/resources/query-protocol https://github.com/apache/skywalking-query-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking/blob/vx.y.z/docs/en/guides/How-to-build.md A vote regarding the quality of this test build will be initiated within the next couple of days. Wait for at least 48 hours for test responses Any PMC member, committer or contributor can test the release features and provide feedback. Based on that, the PMC will decide whether to start the voting process.\nCall a vote in dev Call a vote in dev@skywalking.apache.org\nMail title: [VOTE] Release Apache SkyWalking version x.y.z Mail content: Hi All, This is a call for vote to release Apache SkyWalking version x.y.z. Release notes: * https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/xxxx * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-apm-x.x.x-src.tgz - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.tar.gz Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) vx.y.z Release CommitID : * https://github.com/apache/skywalking/tree/(Git Commit ID) * Git submodule * skywalking-ui: https://github.com/apache/skywalking-booster-ui/tree/(Git Commit ID) * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) * oap-server/server-query-plugin/query-graphql-plugin/src/main/resources/query-protocol https://github.com/apache/skywalking-query-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking/blob/vx.y.z/docs/en/guides/How-to-build.md Voting will start now (xxxx date) and will remain open for at least 72 hours, Request all PMC members to give their vote. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Vote Check All PMC members and committers should check these before casting +1 votes.\n Features test. All artifacts in staging repository are published with .asc, .md5, and *sha1 files. Source code and distribution package (apache-skywalking-x.y.z-src.tar.gz, apache-skywalking-bin-x.y.z.tar.gz, apache-skywalking-bin-x.y.z.zip) are found in https://dist.apache.org/repos/dist/dev/skywalking/x.y.z with .asc and .sha512. LICENSE and NOTICE are in the source code and distribution package. Check shasum -c apache-skywalking-apm-x.y.z-src.tgz.sha512. Check gpg --verify apache-skywalking-apm-x.y.z-src.tgz.asc apache-skywalking-apm-x.y.z-src.tgz Build a distribution package from the source code package (apache-skywalking-x.y.z-src.tar.gz) by following this doc. Check the Apache License Header. Run docker run --rm -v $(pwd):/github/workspace apache/skywalking-eyes header check. (No binaries in source codes)  The voting process is as follows:\n All PMC member votes are +1 binding, and all other votes are +1 but non-binding. If you obtain at least 3 (+1 binding) votes with more +1 than -1 votes within 72 hours, the release will be approved.  Publish the release  Move source codes tar and distribution packages to https://dist.apache.org/repos/dist/release/skywalking/.  \u0026gt; export SVN_EDITOR=vim \u0026gt; svn mv https://dist.apache.org/repos/dist/dev/skywalking/x.y.z https://dist.apache.org/repos/dist/release/skywalking .... enter your apache password .... Release in the nexus staging repo. Public download source and distribution tar/zip are located in http://www.apache.org/dyn/closer.cgi/skywalking/x.y.z/xxx. The Apache mirror path is the only release information that we publish. Public asc and sha512 are located in https://www.apache.org/dist/skywalking/x.y.z/xxx. Public KEYS point to https://www.apache.org/dist/skywalking/KEYS. Update the website download page. http://skywalking.apache.org/downloads/ . Add a new download source, distribution, sha512, asc, and document links. The links can be found following rules (3) to (6) above. Add a release event on the website homepage and event page. Announce the public release with changelog or key features. Send ANNOUNCE email to dev@skywalking.apache.org, announce@apache.org. The sender should use the Apache email account.  Mail title: [ANNOUNCE] Apache SkyWalking x.y.z released Mail content: Hi all, Apache SkyWalking Team is glad to announce the first release of Apache SkyWalking x.y.z. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. This release contains a number of new features, bug fixes and improvements compared to version a.b.c(last release). The notable changes since x.y.z include: (Highlight key changes) 1. ... 2. ... 3. ... Please refer to the change log for the complete list of changes: https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Apache SkyWalking website: http://skywalking.apache.org/ Downloads: http://skywalking.apache.org/downloads/ Twitter: https://twitter.com/ASFSkyWalking SkyWalking Resources: - GitHub: https://github.com/apache/skywalking - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Apache SkyWalking Team Publish the Docker images We have a GitHub workflow to automatically publish the Docker images to Docker Hub after you set the version from pre-release to release, all you need to do is to watch that workflow and see whether it succeeds, if it fails, you can use the following steps to publish the Docker images in your local machine.\nexport SW_VERSION=x.y.z git clone --depth 1 --branch v$SW_VERSION https://github.com/apache/skywalking.git cd skywalking svn co https://dist.apache.org/repos/dist/release/skywalking/$SW_VERSION release # (1) export CONTEXT=release export HUB=apache export OAP_NAME=skywalking-oap-server export UI_NAME=skywalking-ui export TAG=$SW_VERSION export DIST=\u0026lt;the binary package name inside (1), e.g. apache-skywalking-apm-8.8.0.tar.gz\u0026gt; make docker.push Clean up the old releases Once the latest release has been published, you should clean up the old releases from the mirror system.\n Update the download links (source, dist, asc, and sha512) on the website to the archive repo (https://archive.apache.org/dist/skywalking). Remove previous releases from https://dist.apache.org/repos/dist/release/skywalking/.  ","excerpt":"Apache SkyWalking release guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking …","ref":"/docs/main/latest/en/guides/how-to-release/","title":"SkyWalking release guide"},{"body":"Apache SkyWalking release guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking in The Apache Way and start the voting process by reading this document.\nSet up your development environment Follow the steps in the Apache maven deployment environment document to set gpg tool and encrypt passwords.\nUse the following block as a template and place it in ~/.m2/settings.xml.\n\u0026lt;settings\u0026gt; ... \u0026lt;servers\u0026gt; \u0026lt;!-- To publish a snapshot of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.snapshots.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; \u0026lt;!-- To stage a release of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.releases.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; ... \u0026lt;/servers\u0026gt; \u0026lt;/settings\u0026gt; Add your GPG public key  Add your GPG public key into the SkyWalking GPG KEYS file. If you are a committer, use your Apache ID and password to log in this svn, and update the file. Don\u0026rsquo;t override the existing file. Upload your GPG public key to the public GPG site, such as MIT\u0026rsquo;s site. This site should be in the Apache maven staging repository checklist.  Test your settings This step is only for testing purpose. If your env is correctly set, you don\u0026rsquo;t need to check every time.\n./mvnw clean install -Pall (this will build artifacts, sources and sign) Prepare for the release ./mvnw release:clean ./mvnw release:prepare -DautoVersionSubmodules=true -Darguments='-Dmaven.test.skip' -Pall  Set version number as x.y.z, and tag as vx.y.z (The version tag must start with v. You will find out why this is necessary in the next step.)  You could do a GPG signature before preparing for the release. If you need to input the password to sign, and the maven doesn\u0026rsquo;t provide you with the opportunity to do so, this may lead to failure of the release. To resolve this, you may run gpg --sign xxx in any file. This will allow it to remember the password for long enough to prepare for the release.\nStage the release ./mvnw release:perform -Darguments='-Dmaven.test.skip' -Pall  The release will be automatically inserted into a temporary staging repository.  Build and sign the source code package export RELEASE_VERSION=x.y.z (example: RELEASE_VERSION=5.0.0-alpha) cd tools/releasing bash create_source_release.sh This script takes care of the following things:\n Use v + RELEASE_VERSION as tag to clone the codes. Complete git submodule init/update. Exclude all unnecessary files in the target source tar, such as .git, .github, and .gitmodules. See the script for more details. Execute gpg and shasum 512.  apache-skywalking-apm-x.y.z-src.tgz and files ending with .asc and .sha512 may be found in the tools/releasing folder.\nLocate and download the distribution package in Apache Nexus Staging repositories  Use your Apache ID to log in to https://repository.apache.org/. Go to https://repository.apache.org/#stagingRepositories. Search skywalking and find your staging repository. Close the repository and wait for all checks to pass. In this step, your GPG KEYS will be checked. See the set PGP document, if you haven\u0026rsquo;t done it before. Go to {REPO_URL}/org/apache/skywalking/apache-skywalking-apm/x.y.z. Download .tar.gz and .zip and files ending with .asc and .sha1.  Upload to Apache svn  Use your Apache ID to log in to https://dist.apache.org/repos/dist/dev/skywalking/. Create a folder and name it by the release version and round, such as: x.y.z Upload the source code package to the folder with files ending with .asc and .sha512.  Package name: apache-skywalking-x.y.z-src.tar.gz See Section \u0026ldquo;Build and sign the source code package\u0026rdquo; for more details   Upload the distribution package to the folder with files ending with .asc and .sha512.  Package name: apache-skywalking-bin-x.y.z.tar.gz. See Section \u0026ldquo;Locate and download the distribution package in Apache Nexus Staging repositories\u0026rdquo; for more details. Create a .sha512 package: shasum -a 512 file \u0026gt; file.sha512    Call a vote in dev Call a vote in dev@skywalking.apache.org\nMail title: [VOTE] Release Apache SkyWalking version x.y.z Mail content: Hi All, This is a call for vote to release Apache SkyWalking version x.y.z. Release notes: * https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/xxxx * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-apm-x.x.x-src.tgz - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.tar.gz Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) vx.y.z Release CommitID : * https://github.com/apache/skywalking/tree/(Git Commit ID) * Git submodule * skywalking-ui: https://github.com/apache/skywalking-booster-ui/tree/(Git Commit ID) * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) * oap-server/server-query-plugin/query-graphql-plugin/src/main/resources/query-protocol https://github.com/apache/skywalking-query-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking/blob/vx.y.z/docs/en/guides/How-to-build.md Voting will start now (xxxx date) and will remain open for at least 72 hours, Request all PMC members to give their vote. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Vote Check All PMC members and committers should check these before casting +1 votes.\n Features test. All artifacts in staging repository are published with .asc, .md5, and *sha1 files. Source code and distribution package (apache-skywalking-x.y.z-src.tar.gz, apache-skywalking-bin-x.y.z.tar.gz, apache-skywalking-bin-x.y.z.zip) are found in https://dist.apache.org/repos/dist/dev/skywalking/x.y.z with .asc and .sha512. LICENSE and NOTICE are in the source code and distribution package. Check shasum -c apache-skywalking-apm-x.y.z-src.tgz.sha512. Check gpg --verify apache-skywalking-apm-x.y.z-src.tgz.asc apache-skywalking-apm-x.y.z-src.tgz Build a distribution package from the source code package (apache-skywalking-x.y.z-src.tar.gz) by following this doc. Check the Apache License Header. Run docker run --rm -v $(pwd):/github/workspace apache/skywalking-eyes header check. (No binaries in source codes)  The voting process is as follows:\n All PMC member votes are +1 binding, and all other votes are +1 but non-binding. If you obtain at least 3 (+1 binding) votes with more +1 than -1 votes within 72 hours, the release will be approved.  Publish the release  Move source codes tar and distribution packages to https://dist.apache.org/repos/dist/release/skywalking/.  \u0026gt; export SVN_EDITOR=vim \u0026gt; svn mv https://dist.apache.org/repos/dist/dev/skywalking/x.y.z https://dist.apache.org/repos/dist/release/skywalking .... enter your apache password .... Release in the nexus staging repo. Public download source and distribution tar/zip are located in http://www.apache.org/dyn/closer.cgi/skywalking/x.y.z/xxx. The Apache mirror path is the only release information that we publish. Public asc and sha512 are located in https://www.apache.org/dist/skywalking/x.y.z/xxx. Public KEYS point to https://www.apache.org/dist/skywalking/KEYS. Update the website download page. http://skywalking.apache.org/downloads/ . Add a new download source, distribution, sha512, asc, and document links. The links can be found following rules (3) to (6) above. Add a release event on the website homepage and event page. Announce the public release with changelog or key features. Send ANNOUNCE email to dev@skywalking.apache.org, announce@apache.org. The sender should use the Apache email account.  Mail title: [ANNOUNCE] Apache SkyWalking x.y.z released Mail content: Hi all, Apache SkyWalking Team is glad to announce the first release of Apache SkyWalking x.y.z. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based architectures. This release contains a number of new features, bug fixes and improvements compared to version a.b.c(last release). The notable changes since x.y.z include: (Highlight key changes) 1. ... 2. ... 3. ... Please refer to the change log for the complete list of changes: https://skywalking.apache.org/docs/main/vx.y.z/en/changes/changes/ Apache SkyWalking website: http://skywalking.apache.org/ Downloads: https://skywalking.apache.org/downloads/#SkyWalkingAPM Twitter: https://twitter.com/ASFSkyWalking SkyWalking Resources: - GitHub: https://github.com/apache/skywalking - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Apache SkyWalking Team Publish the Docker images We have a GitHub workflow to automatically publish the Docker images to Docker Hub after you set the version from pre-release to release, all you need to do is to watch that workflow and see whether it succeeds, if it fails, you can use the following steps to publish the Docker images in your local machine.\nexport SW_VERSION=x.y.z git clone --depth 1 --branch v$SW_VERSION https://github.com/apache/skywalking.git cd skywalking svn co https://dist.apache.org/repos/dist/release/skywalking/$SW_VERSION release # (1) export CONTEXT=release export HUB=apache export OAP_NAME=skywalking-oap-server export UI_NAME=skywalking-ui export TAG=$SW_VERSION export DIST=\u0026lt;the binary package name inside (1), e.g. apache-skywalking-apm-8.8.0.tar.gz\u0026gt; make docker.push Clean up the old releases Once the latest release has been published, you should clean up the old releases from the mirror system.\n Update the download links (source, dist, asc, and sha512) on the website to the archive repo (https://archive.apache.org/dist/skywalking). Remove previous releases from https://dist.apache.org/repos/dist/release/skywalking/.  ","excerpt":"Apache SkyWalking release guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking …","ref":"/docs/main/next/en/guides/how-to-release/","title":"SkyWalking release guide"},{"body":"Apache SkyWalking release guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking in The Apache Way and start the voting process by reading this document.\nSet up your development environment Follow the steps in the Apache maven deployment environment document to set gpg tool and encrypt passwords.\nUse the following block as a template and place it in ~/.m2/settings.xml.\n\u0026lt;settings\u0026gt; ... \u0026lt;servers\u0026gt; \u0026lt;!-- To publish a snapshot of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.snapshots.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; \u0026lt;!-- To stage a release of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.releases.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; ... \u0026lt;/servers\u0026gt; \u0026lt;/settings\u0026gt; Add your GPG public key  Add your GPG public key into the SkyWalking GPG KEYS file. If you are a committer, use your Apache ID and password to log in this svn, and update the file. Don\u0026rsquo;t override the existing file. Upload your GPG public key to the public GPG site, such as MIT\u0026rsquo;s site. This site should be in the Apache maven staging repository checklist.  Test your settings This step is only for testing purpose. If your env is correctly set, you don\u0026rsquo;t need to check every time.\n./mvnw clean install -Pall (this will build artifacts, sources and sign) Prepare for the release ./mvnw release:clean ./mvnw release:prepare -DautoVersionSubmodules=true -Pall  Set version number as x.y.z, and tag as vx.y.z (The version tag must start with v. You will find out why this is necessary in the next step.)  You could do a GPG signature before preparing for the release. If you need to input the password to sign, and the maven doesn\u0026rsquo;t provide you with the opportunity to do so, this may lead to failure of the release. To resolve this, you may run gpg --sign xxx in any file. This will allow it to remember the password for long enough to prepare for the release.\nStage the release ./mvnw release:perform -Dmaven.test.skip -Pall  The release will be automatically inserted into a temporary staging repository.  Build and sign the source code package export RELEASE_VERSION=x.y.z (example: RELEASE_VERSION=5.0.0-alpha) cd tools/releasing bash create_source_release.sh This script takes care of the following things:\n Use v + RELEASE_VERSION as tag to clone the codes. Complete git submodule init/update. Exclude all unnecessary files in the target source tar, such as .git, .github, and .gitmodules. See the script for more details. Execute gpg and shasum 512.  apache-skywalking-apm-x.y.z-src.tgz and files ending with .asc and .sha512 may be found in the tools/releasing folder.\nLocate and download the distribution package in Apache Nexus Staging repositories  Use your Apache ID to log in to https://repository.apache.org/. Go to https://repository.apache.org/#stagingRepositories. Search skywalking and find your staging repository. Close the repository and wait for all checks to pass. In this step, your GPG KEYS will be checked. See the set PGP document, if you haven\u0026rsquo;t done it before. Go to {REPO_URL}/org/apache/skywalking/apache-skywalking-apm/x.y.z. Download .tar.gz and .zip and files ending with .asc and .sha1.  Upload to Apache svn  Use your Apache ID to log in to https://dist.apache.org/repos/dist/dev/skywalking/. Create a folder and name it by the release version and round, such as: x.y.z Upload the source code package to the folder with files ending with .asc and .sha512.  Package name: apache-skywalking-x.y.z-src.tar.gz See Section \u0026ldquo;Build and sign the source code package\u0026rdquo; for more details   Upload the distribution package to the folder with files ending with .asc and .sha512.  Package name: apache-skywalking-bin-x.y.z.tar.gz. See Section \u0026ldquo;Locate and download the distribution package in Apache Nexus Staging repositories\u0026rdquo; for more details. Create a .sha512 package: shasum -a 512 file \u0026gt; file.sha512    Make the internal announcements Send an announcement mail in dev mail list.\nMail title: [ANNOUNCE] SkyWalking x.y.z test build available Mail content: The test build of x.y.z is available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking/blob/master/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/xxxx * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-apm-x.x.x-src.tgz - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.tar.gz - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.zip Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) x.y.z Release CommitID : * https://github.com/apache/skywalking/tree/(Git Commit ID) * Git submodule * skywalking-ui: https://github.com/apache/skywalking-rocketbot-ui/tree/(Git Commit ID) * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) * oap-server/server-query-plugin/query-graphql-plugin/src/main/resources/query-protocol https://github.com/apache/skywalking-query-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking/blob/x.y.z/docs/en/guides/How-to-build.md A vote regarding the quality of this test build will be initiated within the next couple of days. Wait for at least 48 hours for test responses Any PMC member, committer or contributor can test the release features and provide feedback. Based on that, the PMC will decide whether to start the voting process.\nCall a vote in dev Call a vote in dev@skywalking.apache.org\nMail title: [VOTE] Release Apache SkyWalking version x.y.z Mail content: Hi All, This is a call for vote to release Apache SkyWalking version x.y.z. Release notes: * https://github.com/apache/skywalking/blob/master/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/xxxx * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-apm-x.x.x-src.tgz - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.tar.gz - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.zip Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) x.y.z Release CommitID : * https://github.com/apache/skywalking/tree/(Git Commit ID) * Git submodule * skywalking-ui: https://github.com/apache/skywalking-rocketbot-ui/tree/(Git Commit ID) * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) * oap-server/server-query-plugin/query-graphql-plugin/src/main/resources/query-protocol https://github.com/apache/skywalking-query-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking/blob/x.y.z/docs/en/guides/How-to-build.md Voting will start now (xxxx date) and will remain open for at least 72 hours, Request all PMC members to give their vote. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Vote Check All PMC members and committers should check these before casting +1 votes.\n Features test. All artifacts in staging repository are published with .asc, .md5, and *sha1 files. Source code and distribution package (apache-skywalking-x.y.z-src.tar.gz, apache-skywalking-bin-x.y.z.tar.gz, apache-skywalking-bin-x.y.z.zip) are found in https://dist.apache.org/repos/dist/dev/skywalking/x.y.z with .asc and .sha512. LICENSE and NOTICE are in the source code and distribution package. Check shasum -c apache-skywalking-apm-x.y.z-src.tgz.sha512. Check gpg --verify apache-skywalking-apm-x.y.z-src.tgz.asc apache-skywalking-apm-x.y.z-src.tgz Build a distribution package from the source code package (apache-skywalking-x.y.z-src.tar.gz) by following this doc. Check the Apache License Header. Run docker run --rm -v $(pwd):/github/workspace apache/skywalking-eyes header check. (No binaries in source codes)  The voting process is as follows:\n All PMC member votes are +1 binding, and all other votes are +1 but non-binding. If you obtain at least 3 (+1 binding) votes with more +1 than -1 votes within 72 hours, the release will be approved.  Publish the release  Move source codes tar and distribution packages to https://dist.apache.org/repos/dist/release/skywalking/.  \u0026gt; export SVN_EDITOR=vim \u0026gt; svn mv https://dist.apache.org/repos/dist/dev/skywalking/x.y.z https://dist.apache.org/repos/dist/release/skywalking .... enter your apache password .... Release in the nexus staging repo. Public download source and distribution tar/zip are located in http://www.apache.org/dyn/closer.cgi/skywalking/x.y.z/xxx. The Apache mirror path is the only release information that we publish. Public asc and sha512 are located in https://www.apache.org/dist/skywalking/x.y.z/xxx. Public KEYS point to https://www.apache.org/dist/skywalking/KEYS. Update the website download page. http://skywalking.apache.org/downloads/ . Add a new download source, distribution, sha512, asc, and document links. The links can be found following rules (3) to (6) above. Add a release event on the website homepage and event page. Announce the public release with changelog or key features. Send ANNOUNCE email to dev@skywalking.apache.org, announce@apache.org. The sender should use the Apache email account.  Mail title: [ANNOUNCE] Apache SkyWalking x.y.z released Mail content: Hi all, Apache SkyWalking Team is glad to announce the first release of Apache SkyWalking x.y.z. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. This release contains a number of new features, bug fixes and improvements compared to version a.b.c(last release). The notable changes since x.y.z include: (Highlight key changes) 1. ... 2. ... 3. ... Please refer to the change log for the complete list of changes: https://github.com/apache/skywalking/blob/master/changes/changes-x.y.z.md Apache SkyWalking website: http://skywalking.apache.org/ Downloads: http://skywalking.apache.org/downloads/ Twitter: https://twitter.com/ASFSkyWalking SkyWalking Resources: - GitHub: https://github.com/apache/skywalking - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Apache SkyWalking Team Publish the Docker images export SW_VERSION=x.y.z git clone --depth 1 --branch v$SW_VERSION https://github.com/apache/skywalking.git cd skywalking svn co https://dist.apache.org/repos/dist/release/skywalking/$SW_VERSION release # (1) export CONTEXT=release export HUB=apache export OAP_NAME=skywalking-oap-server export UI_NAME=skywalking-ui export TAG=$SW_VERSION export DIST=\u0026lt;the binary package name inside (1), e.g. apache-skywalking-apm-8.8.0.tar.gz\u0026gt; make docker.push Clean up the old releases Once the latest release has been published, you should clean up the old releases from the mirror system.\n Update the download links (source, dist, asc, and sha512) on the website to the archive repo (https://archive.apache.org/dist/skywalking). Remove previous releases from https://dist.apache.org/repos/dist/release/skywalking/.  ","excerpt":"Apache SkyWalking release guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking …","ref":"/docs/main/v9.0.0/en/guides/how-to-release/","title":"SkyWalking release guide"},{"body":"Apache SkyWalking release guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking in The Apache Way and start the voting process by reading this document.\nSet up your development environment Follow the steps in the Apache maven deployment environment document to set gpg tool and encrypt passwords.\nUse the following block as a template and place it in ~/.m2/settings.xml.\n\u0026lt;settings\u0026gt; ... \u0026lt;servers\u0026gt; \u0026lt;!-- To publish a snapshot of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.snapshots.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; \u0026lt;!-- To stage a release of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.releases.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; ... \u0026lt;/servers\u0026gt; \u0026lt;/settings\u0026gt; Add your GPG public key  Add your GPG public key into the SkyWalking GPG KEYS file. If you are a committer, use your Apache ID and password to log in this svn, and update the file. Don\u0026rsquo;t override the existing file. Upload your GPG public key to the public GPG site, such as MIT\u0026rsquo;s site. This site should be in the Apache maven staging repository checklist.  Test your settings This step is only for testing purpose. If your env is correctly set, you don\u0026rsquo;t need to check every time.\n./mvnw clean install -Pall (this will build artifacts, sources and sign) Prepare for the release ./mvnw release:clean ./mvnw release:prepare -DautoVersionSubmodules=true -Pall  Set version number as x.y.z, and tag as vx.y.z (The version tag must start with v. You will find out why this is necessary in the next step.)  You could do a GPG signature before preparing for the release. If you need to input the password to sign, and the maven doesn\u0026rsquo;t provide you with the opportunity to do so, this may lead to failure of the release. To resolve this, you may run gpg --sign xxx in any file. This will allow it to remember the password for long enough to prepare for the release.\nStage the release ./mvnw release:perform -Dmaven.test.skip -Pall  The release will be automatically inserted into a temporary staging repository.  Build and sign the source code package export RELEASE_VERSION=x.y.z (example: RELEASE_VERSION=5.0.0-alpha) cd tools/releasing bash create_source_release.sh This script takes care of the following things:\n Use v + RELEASE_VERSION as tag to clone the codes. Complete git submodule init/update. Exclude all unnecessary files in the target source tar, such as .git, .github, and .gitmodules. See the script for more details. Execute gpg and shasum 512.  apache-skywalking-apm-x.y.z-src.tgz and files ending with .asc and .sha512 may be found in the tools/releasing folder.\nLocate and download the distribution package in Apache Nexus Staging repositories  Use your Apache ID to log in to https://repository.apache.org/. Go to https://repository.apache.org/#stagingRepositories. Search skywalking and find your staging repository. Close the repository and wait for all checks to pass. In this step, your GPG KEYS will be checked. See the set PGP document, if you haven\u0026rsquo;t done it before. Go to {REPO_URL}/org/apache/skywalking/apache-skywalking-apm/x.y.z. Download .tar.gz and .zip and files ending with .asc and .sha1.  Upload to Apache svn  Use your Apache ID to log in to https://dist.apache.org/repos/dist/dev/skywalking/. Create a folder and name it by the release version and round, such as: x.y.z Upload the source code package to the folder with files ending with .asc and .sha512.  Package name: apache-skywalking-x.y.z-src.tar.gz See Section \u0026ldquo;Build and sign the source code package\u0026rdquo; for more details   Upload the distribution package to the folder with files ending with .asc and .sha512.  Package name: apache-skywalking-bin-x.y.z.tar.gz. See Section \u0026ldquo;Locate and download the distribution package in Apache Nexus Staging repositories\u0026rdquo; for more details. Create a .sha512 package: shasum -a 512 file \u0026gt; file.sha512    Make the internal announcements Send an announcement mail in dev mail list.\nMail title: [ANNOUNCE] SkyWalking x.y.z test build available Mail content: The test build of x.y.z is available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking/blob/master/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/xxxx * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-apm-x.x.x-src.tgz - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.tar.gz Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) x.y.z Release CommitID : * https://github.com/apache/skywalking/tree/(Git Commit ID) * Git submodule * skywalking-ui: https://github.com/apache/skywalking-booster-ui/tree/(Git Commit ID) * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) * oap-server/server-query-plugin/query-graphql-plugin/src/main/resources/query-protocol https://github.com/apache/skywalking-query-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking/blob/x.y.z/docs/en/guides/How-to-build.md A vote regarding the quality of this test build will be initiated within the next couple of days. Wait for at least 48 hours for test responses Any PMC member, committer or contributor can test the release features and provide feedback. Based on that, the PMC will decide whether to start the voting process.\nCall a vote in dev Call a vote in dev@skywalking.apache.org\nMail title: [VOTE] Release Apache SkyWalking version x.y.z Mail content: Hi All, This is a call for vote to release Apache SkyWalking version x.y.z. Release notes: * https://github.com/apache/skywalking/blob/master/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/xxxx * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-apm-x.x.x-src.tgz - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.tar.gz Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) x.y.z Release CommitID : * https://github.com/apache/skywalking/tree/(Git Commit ID) * Git submodule * skywalking-ui: https://github.com/apache/skywalking-booster-ui/tree/(Git Commit ID) * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) * oap-server/server-query-plugin/query-graphql-plugin/src/main/resources/query-protocol https://github.com/apache/skywalking-query-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking/blob/x.y.z/docs/en/guides/How-to-build.md Voting will start now (xxxx date) and will remain open for at least 72 hours, Request all PMC members to give their vote. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Vote Check All PMC members and committers should check these before casting +1 votes.\n Features test. All artifacts in staging repository are published with .asc, .md5, and *sha1 files. Source code and distribution package (apache-skywalking-x.y.z-src.tar.gz, apache-skywalking-bin-x.y.z.tar.gz, apache-skywalking-bin-x.y.z.zip) are found in https://dist.apache.org/repos/dist/dev/skywalking/x.y.z with .asc and .sha512. LICENSE and NOTICE are in the source code and distribution package. Check shasum -c apache-skywalking-apm-x.y.z-src.tgz.sha512. Check gpg --verify apache-skywalking-apm-x.y.z-src.tgz.asc apache-skywalking-apm-x.y.z-src.tgz Build a distribution package from the source code package (apache-skywalking-x.y.z-src.tar.gz) by following this doc. Check the Apache License Header. Run docker run --rm -v $(pwd):/github/workspace apache/skywalking-eyes header check. (No binaries in source codes)  The voting process is as follows:\n All PMC member votes are +1 binding, and all other votes are +1 but non-binding. If you obtain at least 3 (+1 binding) votes with more +1 than -1 votes within 72 hours, the release will be approved.  Publish the release  Move source codes tar and distribution packages to https://dist.apache.org/repos/dist/release/skywalking/.  \u0026gt; export SVN_EDITOR=vim \u0026gt; svn mv https://dist.apache.org/repos/dist/dev/skywalking/x.y.z https://dist.apache.org/repos/dist/release/skywalking .... enter your apache password .... Release in the nexus staging repo. Public download source and distribution tar/zip are located in http://www.apache.org/dyn/closer.cgi/skywalking/x.y.z/xxx. The Apache mirror path is the only release information that we publish. Public asc and sha512 are located in https://www.apache.org/dist/skywalking/x.y.z/xxx. Public KEYS point to https://www.apache.org/dist/skywalking/KEYS. Update the website download page. http://skywalking.apache.org/downloads/ . Add a new download source, distribution, sha512, asc, and document links. The links can be found following rules (3) to (6) above. Add a release event on the website homepage and event page. Announce the public release with changelog or key features. Send ANNOUNCE email to dev@skywalking.apache.org, announce@apache.org. The sender should use the Apache email account.  Mail title: [ANNOUNCE] Apache SkyWalking x.y.z released Mail content: Hi all, Apache SkyWalking Team is glad to announce the first release of Apache SkyWalking x.y.z. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. This release contains a number of new features, bug fixes and improvements compared to version a.b.c(last release). The notable changes since x.y.z include: (Highlight key changes) 1. ... 2. ... 3. ... Please refer to the change log for the complete list of changes: https://github.com/apache/skywalking/blob/master/changes/changes-x.y.z.md Apache SkyWalking website: http://skywalking.apache.org/ Downloads: http://skywalking.apache.org/downloads/ Twitter: https://twitter.com/ASFSkyWalking SkyWalking Resources: - GitHub: https://github.com/apache/skywalking - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Apache SkyWalking Team Publish the Docker images export SW_VERSION=x.y.z git clone --depth 1 --branch v$SW_VERSION https://github.com/apache/skywalking.git cd skywalking svn co https://dist.apache.org/repos/dist/release/skywalking/$SW_VERSION release # (1) export CONTEXT=release export HUB=apache export OAP_NAME=skywalking-oap-server export UI_NAME=skywalking-ui export TAG=$SW_VERSION export DIST=\u0026lt;the binary package name inside (1), e.g. apache-skywalking-apm-8.8.0.tar.gz\u0026gt; make docker.push Clean up the old releases Once the latest release has been published, you should clean up the old releases from the mirror system.\n Update the download links (source, dist, asc, and sha512) on the website to the archive repo (https://archive.apache.org/dist/skywalking). Remove previous releases from https://dist.apache.org/repos/dist/release/skywalking/.  ","excerpt":"Apache SkyWalking release guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking …","ref":"/docs/main/v9.1.0/en/guides/how-to-release/","title":"SkyWalking release guide"},{"body":"Apache SkyWalking release guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking in The Apache Way and start the voting process by reading this document.\nSet up your development environment Follow the steps in the Apache maven deployment environment document to set gpg tool and encrypt passwords.\nUse the following block as a template and place it in ~/.m2/settings.xml.\n\u0026lt;settings\u0026gt; ... \u0026lt;servers\u0026gt; \u0026lt;!-- To publish a snapshot of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.snapshots.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; \u0026lt;!-- To stage a release of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.releases.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; ... \u0026lt;/servers\u0026gt; \u0026lt;/settings\u0026gt; Add your GPG public key  Add your GPG public key into the SkyWalking GPG KEYS file. If you are a committer, use your Apache ID and password to log in this svn, and update the file. Don\u0026rsquo;t override the existing file. Upload your GPG public key to the public GPG site, such as MIT\u0026rsquo;s site. This site should be in the Apache maven staging repository checklist.  Test your settings This step is only for testing purpose. If your env is correctly set, you don\u0026rsquo;t need to check every time.\n./mvnw clean install -Pall (this will build artifacts, sources and sign) Prepare for the release ./mvnw release:clean ./mvnw release:prepare -DautoVersionSubmodules=true -Pall  Set version number as x.y.z, and tag as vx.y.z (The version tag must start with v. You will find out why this is necessary in the next step.)  You could do a GPG signature before preparing for the release. If you need to input the password to sign, and the maven doesn\u0026rsquo;t provide you with the opportunity to do so, this may lead to failure of the release. To resolve this, you may run gpg --sign xxx in any file. This will allow it to remember the password for long enough to prepare for the release.\nStage the release ./mvnw release:perform -Dmaven.test.skip -Pall  The release will be automatically inserted into a temporary staging repository.  Build and sign the source code package export RELEASE_VERSION=x.y.z (example: RELEASE_VERSION=5.0.0-alpha) cd tools/releasing bash create_source_release.sh This script takes care of the following things:\n Use v + RELEASE_VERSION as tag to clone the codes. Complete git submodule init/update. Exclude all unnecessary files in the target source tar, such as .git, .github, and .gitmodules. See the script for more details. Execute gpg and shasum 512.  apache-skywalking-apm-x.y.z-src.tgz and files ending with .asc and .sha512 may be found in the tools/releasing folder.\nLocate and download the distribution package in Apache Nexus Staging repositories  Use your Apache ID to log in to https://repository.apache.org/. Go to https://repository.apache.org/#stagingRepositories. Search skywalking and find your staging repository. Close the repository and wait for all checks to pass. In this step, your GPG KEYS will be checked. See the set PGP document, if you haven\u0026rsquo;t done it before. Go to {REPO_URL}/org/apache/skywalking/apache-skywalking-apm/x.y.z. Download .tar.gz and .zip and files ending with .asc and .sha1.  Upload to Apache svn  Use your Apache ID to log in to https://dist.apache.org/repos/dist/dev/skywalking/. Create a folder and name it by the release version and round, such as: x.y.z Upload the source code package to the folder with files ending with .asc and .sha512.  Package name: apache-skywalking-x.y.z-src.tar.gz See Section \u0026ldquo;Build and sign the source code package\u0026rdquo; for more details   Upload the distribution package to the folder with files ending with .asc and .sha512.  Package name: apache-skywalking-bin-x.y.z.tar.gz. See Section \u0026ldquo;Locate and download the distribution package in Apache Nexus Staging repositories\u0026rdquo; for more details. Create a .sha512 package: shasum -a 512 file \u0026gt; file.sha512    Make the internal announcements Send an announcement mail in dev mail list.\nMail title: [ANNOUNCE] SkyWalking x.y.z test build available Mail content: The test build of x.y.z is available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/xxxx * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-apm-x.x.x-src.tgz - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.tar.gz Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) vx.y.z Release CommitID : * https://github.com/apache/skywalking/tree/(Git Commit ID) * Git submodule * skywalking-ui: https://github.com/apache/skywalking-booster-ui/tree/(Git Commit ID) * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) * oap-server/server-query-plugin/query-graphql-plugin/src/main/resources/query-protocol https://github.com/apache/skywalking-query-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking/blob/vx.y.z/docs/en/guides/How-to-build.md A vote regarding the quality of this test build will be initiated within the next couple of days. Wait for at least 48 hours for test responses Any PMC member, committer or contributor can test the release features and provide feedback. Based on that, the PMC will decide whether to start the voting process.\nCall a vote in dev Call a vote in dev@skywalking.apache.org\nMail title: [VOTE] Release Apache SkyWalking version x.y.z Mail content: Hi All, This is a call for vote to release Apache SkyWalking version x.y.z. Release notes: * https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/xxxx * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-apm-x.x.x-src.tgz - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.tar.gz Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) vx.y.z Release CommitID : * https://github.com/apache/skywalking/tree/(Git Commit ID) * Git submodule * skywalking-ui: https://github.com/apache/skywalking-booster-ui/tree/(Git Commit ID) * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) * oap-server/server-query-plugin/query-graphql-plugin/src/main/resources/query-protocol https://github.com/apache/skywalking-query-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking/blob/vx.y.z/docs/en/guides/How-to-build.md Voting will start now (xxxx date) and will remain open for at least 72 hours, Request all PMC members to give their vote. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Vote Check All PMC members and committers should check these before casting +1 votes.\n Features test. All artifacts in staging repository are published with .asc, .md5, and *sha1 files. Source code and distribution package (apache-skywalking-x.y.z-src.tar.gz, apache-skywalking-bin-x.y.z.tar.gz, apache-skywalking-bin-x.y.z.zip) are found in https://dist.apache.org/repos/dist/dev/skywalking/x.y.z with .asc and .sha512. LICENSE and NOTICE are in the source code and distribution package. Check shasum -c apache-skywalking-apm-x.y.z-src.tgz.sha512. Check gpg --verify apache-skywalking-apm-x.y.z-src.tgz.asc apache-skywalking-apm-x.y.z-src.tgz Build a distribution package from the source code package (apache-skywalking-x.y.z-src.tar.gz) by following this doc. Check the Apache License Header. Run docker run --rm -v $(pwd):/github/workspace apache/skywalking-eyes header check. (No binaries in source codes)  The voting process is as follows:\n All PMC member votes are +1 binding, and all other votes are +1 but non-binding. If you obtain at least 3 (+1 binding) votes with more +1 than -1 votes within 72 hours, the release will be approved.  Publish the release  Move source codes tar and distribution packages to https://dist.apache.org/repos/dist/release/skywalking/.  \u0026gt; export SVN_EDITOR=vim \u0026gt; svn mv https://dist.apache.org/repos/dist/dev/skywalking/x.y.z https://dist.apache.org/repos/dist/release/skywalking .... enter your apache password .... Release in the nexus staging repo. Public download source and distribution tar/zip are located in http://www.apache.org/dyn/closer.cgi/skywalking/x.y.z/xxx. The Apache mirror path is the only release information that we publish. Public asc and sha512 are located in https://www.apache.org/dist/skywalking/x.y.z/xxx. Public KEYS point to https://www.apache.org/dist/skywalking/KEYS. Update the website download page. http://skywalking.apache.org/downloads/ . Add a new download source, distribution, sha512, asc, and document links. The links can be found following rules (3) to (6) above. Add a release event on the website homepage and event page. Announce the public release with changelog or key features. Send ANNOUNCE email to dev@skywalking.apache.org, announce@apache.org. The sender should use the Apache email account.  Mail title: [ANNOUNCE] Apache SkyWalking x.y.z released Mail content: Hi all, Apache SkyWalking Team is glad to announce the first release of Apache SkyWalking x.y.z. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. This release contains a number of new features, bug fixes and improvements compared to version a.b.c(last release). The notable changes since x.y.z include: (Highlight key changes) 1. ... 2. ... 3. ... Please refer to the change log for the complete list of changes: https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Apache SkyWalking website: http://skywalking.apache.org/ Downloads: http://skywalking.apache.org/downloads/ Twitter: https://twitter.com/ASFSkyWalking SkyWalking Resources: - GitHub: https://github.com/apache/skywalking - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Apache SkyWalking Team Publish the Docker images export SW_VERSION=x.y.z git clone --depth 1 --branch v$SW_VERSION https://github.com/apache/skywalking.git cd skywalking svn co https://dist.apache.org/repos/dist/release/skywalking/$SW_VERSION release # (1) export CONTEXT=release export HUB=apache export OAP_NAME=skywalking-oap-server export UI_NAME=skywalking-ui export TAG=$SW_VERSION export DIST=\u0026lt;the binary package name inside (1), e.g. apache-skywalking-apm-8.8.0.tar.gz\u0026gt; make docker.push Clean up the old releases Once the latest release has been published, you should clean up the old releases from the mirror system.\n Update the download links (source, dist, asc, and sha512) on the website to the archive repo (https://archive.apache.org/dist/skywalking). Remove previous releases from https://dist.apache.org/repos/dist/release/skywalking/.  ","excerpt":"Apache SkyWalking release guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking …","ref":"/docs/main/v9.2.0/en/guides/how-to-release/","title":"SkyWalking release guide"},{"body":"Apache SkyWalking release guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking in The Apache Way and start the voting process by reading this document.\nSet up your development environment Follow the steps in the Apache maven deployment environment document to set gpg tool and encrypt passwords.\nUse the following block as a template and place it in ~/.m2/settings.xml.\n\u0026lt;settings\u0026gt; ... \u0026lt;servers\u0026gt; \u0026lt;!-- To publish a snapshot of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.snapshots.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; \u0026lt;!-- To stage a release of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.releases.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; ... \u0026lt;/servers\u0026gt; \u0026lt;/settings\u0026gt; Add your GPG public key  Add your GPG public key into the SkyWalking GPG KEYS file. If you are a committer, use your Apache ID and password to log in this svn, and update the file. Don\u0026rsquo;t override the existing file. Upload your GPG public key to the public GPG site, such as MIT\u0026rsquo;s site. This site should be in the Apache maven staging repository checklist.  Test your settings This step is only for testing purpose. If your env is correctly set, you don\u0026rsquo;t need to check every time.\n./mvnw clean install -Pall (this will build artifacts, sources and sign) Prepare for the release ./mvnw release:clean ./mvnw release:prepare -DautoVersionSubmodules=true -Pall  Set version number as x.y.z, and tag as vx.y.z (The version tag must start with v. You will find out why this is necessary in the next step.)  You could do a GPG signature before preparing for the release. If you need to input the password to sign, and the maven doesn\u0026rsquo;t provide you with the opportunity to do so, this may lead to failure of the release. To resolve this, you may run gpg --sign xxx in any file. This will allow it to remember the password for long enough to prepare for the release.\nStage the release ./mvnw release:perform -Dmaven.test.skip -Pall  The release will be automatically inserted into a temporary staging repository.  Build and sign the source code package export RELEASE_VERSION=x.y.z (example: RELEASE_VERSION=5.0.0-alpha) cd tools/releasing bash create_source_release.sh This script takes care of the following things:\n Use v + RELEASE_VERSION as tag to clone the codes. Complete git submodule init/update. Exclude all unnecessary files in the target source tar, such as .git, .github, and .gitmodules. See the script for more details. Execute gpg and shasum 512.  apache-skywalking-apm-x.y.z-src.tgz and files ending with .asc and .sha512 may be found in the tools/releasing folder.\nLocate and download the distribution package in Apache Nexus Staging repositories  Use your Apache ID to log in to https://repository.apache.org/. Go to https://repository.apache.org/#stagingRepositories. Search skywalking and find your staging repository. Close the repository and wait for all checks to pass. In this step, your GPG KEYS will be checked. See the set PGP document, if you haven\u0026rsquo;t done it before. Go to {REPO_URL}/org/apache/skywalking/apache-skywalking-apm/x.y.z. Download .tar.gz and .zip and files ending with .asc and .sha1.  Upload to Apache svn  Use your Apache ID to log in to https://dist.apache.org/repos/dist/dev/skywalking/. Create a folder and name it by the release version and round, such as: x.y.z Upload the source code package to the folder with files ending with .asc and .sha512.  Package name: apache-skywalking-x.y.z-src.tar.gz See Section \u0026ldquo;Build and sign the source code package\u0026rdquo; for more details   Upload the distribution package to the folder with files ending with .asc and .sha512.  Package name: apache-skywalking-bin-x.y.z.tar.gz. See Section \u0026ldquo;Locate and download the distribution package in Apache Nexus Staging repositories\u0026rdquo; for more details. Create a .sha512 package: shasum -a 512 file \u0026gt; file.sha512    Make the internal announcements Send an announcement mail in dev mail list.\nMail title: [ANNOUNCE] SkyWalking x.y.z test build available Mail content: The test build of x.y.z is available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/xxxx * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-apm-x.x.x-src.tgz - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.tar.gz Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) vx.y.z Release CommitID : * https://github.com/apache/skywalking/tree/(Git Commit ID) * Git submodule * skywalking-ui: https://github.com/apache/skywalking-booster-ui/tree/(Git Commit ID) * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) * oap-server/server-query-plugin/query-graphql-plugin/src/main/resources/query-protocol https://github.com/apache/skywalking-query-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking/blob/vx.y.z/docs/en/guides/How-to-build.md A vote regarding the quality of this test build will be initiated within the next couple of days. Wait for at least 48 hours for test responses Any PMC member, committer or contributor can test the release features and provide feedback. Based on that, the PMC will decide whether to start the voting process.\nCall a vote in dev Call a vote in dev@skywalking.apache.org\nMail title: [VOTE] Release Apache SkyWalking version x.y.z Mail content: Hi All, This is a call for vote to release Apache SkyWalking version x.y.z. Release notes: * https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/xxxx * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-apm-x.x.x-src.tgz - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.tar.gz Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) vx.y.z Release CommitID : * https://github.com/apache/skywalking/tree/(Git Commit ID) * Git submodule * skywalking-ui: https://github.com/apache/skywalking-booster-ui/tree/(Git Commit ID) * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) * oap-server/server-query-plugin/query-graphql-plugin/src/main/resources/query-protocol https://github.com/apache/skywalking-query-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking/blob/vx.y.z/docs/en/guides/How-to-build.md Voting will start now (xxxx date) and will remain open for at least 72 hours, Request all PMC members to give their vote. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Vote Check All PMC members and committers should check these before casting +1 votes.\n Features test. All artifacts in staging repository are published with .asc, .md5, and *sha1 files. Source code and distribution package (apache-skywalking-x.y.z-src.tar.gz, apache-skywalking-bin-x.y.z.tar.gz, apache-skywalking-bin-x.y.z.zip) are found in https://dist.apache.org/repos/dist/dev/skywalking/x.y.z with .asc and .sha512. LICENSE and NOTICE are in the source code and distribution package. Check shasum -c apache-skywalking-apm-x.y.z-src.tgz.sha512. Check gpg --verify apache-skywalking-apm-x.y.z-src.tgz.asc apache-skywalking-apm-x.y.z-src.tgz Build a distribution package from the source code package (apache-skywalking-x.y.z-src.tar.gz) by following this doc. Check the Apache License Header. Run docker run --rm -v $(pwd):/github/workspace apache/skywalking-eyes header check. (No binaries in source codes)  The voting process is as follows:\n All PMC member votes are +1 binding, and all other votes are +1 but non-binding. If you obtain at least 3 (+1 binding) votes with more +1 than -1 votes within 72 hours, the release will be approved.  Publish the release  Move source codes tar and distribution packages to https://dist.apache.org/repos/dist/release/skywalking/.  \u0026gt; export SVN_EDITOR=vim \u0026gt; svn mv https://dist.apache.org/repos/dist/dev/skywalking/x.y.z https://dist.apache.org/repos/dist/release/skywalking .... enter your apache password .... Release in the nexus staging repo. Public download source and distribution tar/zip are located in http://www.apache.org/dyn/closer.cgi/skywalking/x.y.z/xxx. The Apache mirror path is the only release information that we publish. Public asc and sha512 are located in https://www.apache.org/dist/skywalking/x.y.z/xxx. Public KEYS point to https://www.apache.org/dist/skywalking/KEYS. Update the website download page. http://skywalking.apache.org/downloads/ . Add a new download source, distribution, sha512, asc, and document links. The links can be found following rules (3) to (6) above. Add a release event on the website homepage and event page. Announce the public release with changelog or key features. Send ANNOUNCE email to dev@skywalking.apache.org, announce@apache.org. The sender should use the Apache email account.  Mail title: [ANNOUNCE] Apache SkyWalking x.y.z released Mail content: Hi all, Apache SkyWalking Team is glad to announce the first release of Apache SkyWalking x.y.z. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. This release contains a number of new features, bug fixes and improvements compared to version a.b.c(last release). The notable changes since x.y.z include: (Highlight key changes) 1. ... 2. ... 3. ... Please refer to the change log for the complete list of changes: https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Apache SkyWalking website: http://skywalking.apache.org/ Downloads: http://skywalking.apache.org/downloads/ Twitter: https://twitter.com/ASFSkyWalking SkyWalking Resources: - GitHub: https://github.com/apache/skywalking - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Apache SkyWalking Team Publish the Docker images export SW_VERSION=x.y.z git clone --depth 1 --branch v$SW_VERSION https://github.com/apache/skywalking.git cd skywalking svn co https://dist.apache.org/repos/dist/release/skywalking/$SW_VERSION release # (1) export CONTEXT=release export HUB=apache export OAP_NAME=skywalking-oap-server export UI_NAME=skywalking-ui export TAG=$SW_VERSION export DIST=\u0026lt;the binary package name inside (1), e.g. apache-skywalking-apm-8.8.0.tar.gz\u0026gt; make docker.push Clean up the old releases Once the latest release has been published, you should clean up the old releases from the mirror system.\n Update the download links (source, dist, asc, and sha512) on the website to the archive repo (https://archive.apache.org/dist/skywalking). Remove previous releases from https://dist.apache.org/repos/dist/release/skywalking/.  ","excerpt":"Apache SkyWalking release guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking …","ref":"/docs/main/v9.3.0/en/guides/how-to-release/","title":"SkyWalking release guide"},{"body":"Apache SkyWalking release guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking in The Apache Way and start the voting process by reading this document.\nSet up your development environment Follow the steps in the Apache maven deployment environment document to set gpg tool and encrypt passwords.\nUse the following block as a template and place it in ~/.m2/settings.xml.\n\u0026lt;settings\u0026gt; ... \u0026lt;servers\u0026gt; \u0026lt;!-- To publish a snapshot of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.snapshots.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; \u0026lt;!-- To stage a release of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.releases.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; ... \u0026lt;/servers\u0026gt; \u0026lt;/settings\u0026gt; Add your GPG public key  Add your GPG public key into the SkyWalking GPG KEYS file. If you are a committer, use your Apache ID and password to log in this svn, and update the file. Don\u0026rsquo;t override the existing file. Upload your GPG public key to the public GPG site, such as MIT\u0026rsquo;s site. This site should be in the Apache maven staging repository checklist.  Test your settings This step is only for testing purpose. If your env is correctly set, you don\u0026rsquo;t need to check every time.\n./mvnw clean install -Pall (this will build artifacts, sources and sign) Prepare for the release ./mvnw release:clean ./mvnw release:prepare -DautoVersionSubmodules=true -Pall  Set version number as x.y.z, and tag as vx.y.z (The version tag must start with v. You will find out why this is necessary in the next step.)  You could do a GPG signature before preparing for the release. If you need to input the password to sign, and the maven doesn\u0026rsquo;t provide you with the opportunity to do so, this may lead to failure of the release. To resolve this, you may run gpg --sign xxx in any file. This will allow it to remember the password for long enough to prepare for the release.\nStage the release ./mvnw release:perform -Dmaven.test.skip -Pall  The release will be automatically inserted into a temporary staging repository.  Build and sign the source code package export RELEASE_VERSION=x.y.z (example: RELEASE_VERSION=5.0.0-alpha) cd tools/releasing bash create_source_release.sh This script takes care of the following things:\n Use v + RELEASE_VERSION as tag to clone the codes. Complete git submodule init/update. Exclude all unnecessary files in the target source tar, such as .git, .github, and .gitmodules. See the script for more details. Execute gpg and shasum 512.  apache-skywalking-apm-x.y.z-src.tgz and files ending with .asc and .sha512 may be found in the tools/releasing folder.\nLocate and download the distribution package in Apache Nexus Staging repositories  Use your Apache ID to log in to https://repository.apache.org/. Go to https://repository.apache.org/#stagingRepositories. Search skywalking and find your staging repository. Close the repository and wait for all checks to pass. In this step, your GPG KEYS will be checked. See the set PGP document, if you haven\u0026rsquo;t done it before. Go to {REPO_URL}/org/apache/skywalking/apache-skywalking-apm/x.y.z. Download .tar.gz and .zip and files ending with .asc and .sha1.  Upload to Apache svn  Use your Apache ID to log in to https://dist.apache.org/repos/dist/dev/skywalking/. Create a folder and name it by the release version and round, such as: x.y.z Upload the source code package to the folder with files ending with .asc and .sha512.  Package name: apache-skywalking-x.y.z-src.tar.gz See Section \u0026ldquo;Build and sign the source code package\u0026rdquo; for more details   Upload the distribution package to the folder with files ending with .asc and .sha512.  Package name: apache-skywalking-bin-x.y.z.tar.gz. See Section \u0026ldquo;Locate and download the distribution package in Apache Nexus Staging repositories\u0026rdquo; for more details. Create a .sha512 package: shasum -a 512 file \u0026gt; file.sha512    Make the internal announcements Send an announcement mail in dev mail list.\nMail title: [ANNOUNCE] SkyWalking x.y.z test build available Mail content: The test build of x.y.z is available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/xxxx * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-apm-x.x.x-src.tgz - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.tar.gz Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) vx.y.z Release CommitID : * https://github.com/apache/skywalking/tree/(Git Commit ID) * Git submodule * skywalking-ui: https://github.com/apache/skywalking-booster-ui/tree/(Git Commit ID) * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) * oap-server/server-query-plugin/query-graphql-plugin/src/main/resources/query-protocol https://github.com/apache/skywalking-query-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking/blob/vx.y.z/docs/en/guides/How-to-build.md A vote regarding the quality of this test build will be initiated within the next couple of days. Wait for at least 48 hours for test responses Any PMC member, committer or contributor can test the release features and provide feedback. Based on that, the PMC will decide whether to start the voting process.\nCall a vote in dev Call a vote in dev@skywalking.apache.org\nMail title: [VOTE] Release Apache SkyWalking version x.y.z Mail content: Hi All, This is a call for vote to release Apache SkyWalking version x.y.z. Release notes: * https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/xxxx * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-apm-x.x.x-src.tgz - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.tar.gz Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) vx.y.z Release CommitID : * https://github.com/apache/skywalking/tree/(Git Commit ID) * Git submodule * skywalking-ui: https://github.com/apache/skywalking-booster-ui/tree/(Git Commit ID) * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) * oap-server/server-query-plugin/query-graphql-plugin/src/main/resources/query-protocol https://github.com/apache/skywalking-query-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking/blob/vx.y.z/docs/en/guides/How-to-build.md Voting will start now (xxxx date) and will remain open for at least 72 hours, Request all PMC members to give their vote. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Vote Check All PMC members and committers should check these before casting +1 votes.\n Features test. All artifacts in staging repository are published with .asc, .md5, and *sha1 files. Source code and distribution package (apache-skywalking-x.y.z-src.tar.gz, apache-skywalking-bin-x.y.z.tar.gz, apache-skywalking-bin-x.y.z.zip) are found in https://dist.apache.org/repos/dist/dev/skywalking/x.y.z with .asc and .sha512. LICENSE and NOTICE are in the source code and distribution package. Check shasum -c apache-skywalking-apm-x.y.z-src.tgz.sha512. Check gpg --verify apache-skywalking-apm-x.y.z-src.tgz.asc apache-skywalking-apm-x.y.z-src.tgz Build a distribution package from the source code package (apache-skywalking-x.y.z-src.tar.gz) by following this doc. Check the Apache License Header. Run docker run --rm -v $(pwd):/github/workspace apache/skywalking-eyes header check. (No binaries in source codes)  The voting process is as follows:\n All PMC member votes are +1 binding, and all other votes are +1 but non-binding. If you obtain at least 3 (+1 binding) votes with more +1 than -1 votes within 72 hours, the release will be approved.  Publish the release  Move source codes tar and distribution packages to https://dist.apache.org/repos/dist/release/skywalking/.  \u0026gt; export SVN_EDITOR=vim \u0026gt; svn mv https://dist.apache.org/repos/dist/dev/skywalking/x.y.z https://dist.apache.org/repos/dist/release/skywalking .... enter your apache password .... Release in the nexus staging repo. Public download source and distribution tar/zip are located in http://www.apache.org/dyn/closer.cgi/skywalking/x.y.z/xxx. The Apache mirror path is the only release information that we publish. Public asc and sha512 are located in https://www.apache.org/dist/skywalking/x.y.z/xxx. Public KEYS point to https://www.apache.org/dist/skywalking/KEYS. Update the website download page. http://skywalking.apache.org/downloads/ . Add a new download source, distribution, sha512, asc, and document links. The links can be found following rules (3) to (6) above. Add a release event on the website homepage and event page. Announce the public release with changelog or key features. Send ANNOUNCE email to dev@skywalking.apache.org, announce@apache.org. The sender should use the Apache email account.  Mail title: [ANNOUNCE] Apache SkyWalking x.y.z released Mail content: Hi all, Apache SkyWalking Team is glad to announce the first release of Apache SkyWalking x.y.z. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. This release contains a number of new features, bug fixes and improvements compared to version a.b.c(last release). The notable changes since x.y.z include: (Highlight key changes) 1. ... 2. ... 3. ... Please refer to the change log for the complete list of changes: https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Apache SkyWalking website: http://skywalking.apache.org/ Downloads: http://skywalking.apache.org/downloads/ Twitter: https://twitter.com/ASFSkyWalking SkyWalking Resources: - GitHub: https://github.com/apache/skywalking - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Apache SkyWalking Team Publish the Docker images We have a GitHub workflow to automatically publish the Docker images to Docker Hub after you set the version from pre-release to release, all you need to do is to watch that workflow and see whether it succeeds, if it fails, you can use the following steps to publish the Docker images in your local machine.\nexport SW_VERSION=x.y.z git clone --depth 1 --branch v$SW_VERSION https://github.com/apache/skywalking.git cd skywalking svn co https://dist.apache.org/repos/dist/release/skywalking/$SW_VERSION release # (1) export CONTEXT=release export HUB=apache export OAP_NAME=skywalking-oap-server export UI_NAME=skywalking-ui export TAG=$SW_VERSION export DIST=\u0026lt;the binary package name inside (1), e.g. apache-skywalking-apm-8.8.0.tar.gz\u0026gt; make docker.push Clean up the old releases Once the latest release has been published, you should clean up the old releases from the mirror system.\n Update the download links (source, dist, asc, and sha512) on the website to the archive repo (https://archive.apache.org/dist/skywalking). Remove previous releases from https://dist.apache.org/repos/dist/release/skywalking/.  ","excerpt":"Apache SkyWalking release guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking …","ref":"/docs/main/v9.4.0/en/guides/how-to-release/","title":"SkyWalking release guide"},{"body":"Apache SkyWalking release guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking in The Apache Way and start the voting process by reading this document.\nSet up your development environment Follow the steps in the Apache maven deployment environment document to set gpg tool and encrypt passwords.\nUse the following block as a template and place it in ~/.m2/settings.xml.\n\u0026lt;settings\u0026gt; ... \u0026lt;servers\u0026gt; \u0026lt;!-- To publish a snapshot of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.snapshots.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; \u0026lt;!-- To stage a release of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.releases.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; ... \u0026lt;/servers\u0026gt; \u0026lt;/settings\u0026gt; Add your GPG public key  Add your GPG public key into the SkyWalking GPG KEYS file. If you are a committer, use your Apache ID and password to log in this svn, and update the file. Don\u0026rsquo;t override the existing file. Upload your GPG public key to the public GPG site, such as MIT\u0026rsquo;s site. This site should be in the Apache maven staging repository checklist.  Test your settings This step is only for testing purpose. If your env is correctly set, you don\u0026rsquo;t need to check every time.\n./mvnw clean install -Pall (this will build artifacts, sources and sign) Prepare for the release ./mvnw release:clean ./mvnw release:prepare -DautoVersionSubmodules=true -Darguments='-Dmaven.test.skip' -Pall  Set version number as x.y.z, and tag as vx.y.z (The version tag must start with v. You will find out why this is necessary in the next step.)  You could do a GPG signature before preparing for the release. If you need to input the password to sign, and the maven doesn\u0026rsquo;t provide you with the opportunity to do so, this may lead to failure of the release. To resolve this, you may run gpg --sign xxx in any file. This will allow it to remember the password for long enough to prepare for the release.\nStage the release ./mvnw release:perform -Darguments='-Dmaven.test.skip' -Pall  The release will be automatically inserted into a temporary staging repository.  Build and sign the source code package export RELEASE_VERSION=x.y.z (example: RELEASE_VERSION=5.0.0-alpha) cd tools/releasing bash create_source_release.sh This script takes care of the following things:\n Use v + RELEASE_VERSION as tag to clone the codes. Complete git submodule init/update. Exclude all unnecessary files in the target source tar, such as .git, .github, and .gitmodules. See the script for more details. Execute gpg and shasum 512.  apache-skywalking-apm-x.y.z-src.tgz and files ending with .asc and .sha512 may be found in the tools/releasing folder.\nLocate and download the distribution package in Apache Nexus Staging repositories  Use your Apache ID to log in to https://repository.apache.org/. Go to https://repository.apache.org/#stagingRepositories. Search skywalking and find your staging repository. Close the repository and wait for all checks to pass. In this step, your GPG KEYS will be checked. See the set PGP document, if you haven\u0026rsquo;t done it before. Go to {REPO_URL}/org/apache/skywalking/apache-skywalking-apm/x.y.z. Download .tar.gz and .zip and files ending with .asc and .sha1.  Upload to Apache svn  Use your Apache ID to log in to https://dist.apache.org/repos/dist/dev/skywalking/. Create a folder and name it by the release version and round, such as: x.y.z Upload the source code package to the folder with files ending with .asc and .sha512.  Package name: apache-skywalking-x.y.z-src.tar.gz See Section \u0026ldquo;Build and sign the source code package\u0026rdquo; for more details   Upload the distribution package to the folder with files ending with .asc and .sha512.  Package name: apache-skywalking-bin-x.y.z.tar.gz. See Section \u0026ldquo;Locate and download the distribution package in Apache Nexus Staging repositories\u0026rdquo; for more details. Create a .sha512 package: shasum -a 512 file \u0026gt; file.sha512    Make the internal announcements Send an announcement mail in dev mail list.\nMail title: [ANNOUNCE] SkyWalking x.y.z test build available Mail content: The test build of x.y.z is available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/xxxx * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-apm-x.x.x-src.tgz - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.tar.gz Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) vx.y.z Release CommitID : * https://github.com/apache/skywalking/tree/(Git Commit ID) * Git submodule * skywalking-ui: https://github.com/apache/skywalking-booster-ui/tree/(Git Commit ID) * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) * oap-server/server-query-plugin/query-graphql-plugin/src/main/resources/query-protocol https://github.com/apache/skywalking-query-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking/blob/vx.y.z/docs/en/guides/How-to-build.md A vote regarding the quality of this test build will be initiated within the next couple of days. Wait for at least 48 hours for test responses Any PMC member, committer or contributor can test the release features and provide feedback. Based on that, the PMC will decide whether to start the voting process.\nCall a vote in dev Call a vote in dev@skywalking.apache.org\nMail title: [VOTE] Release Apache SkyWalking version x.y.z Mail content: Hi All, This is a call for vote to release Apache SkyWalking version x.y.z. Release notes: * https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/xxxx * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-apm-x.x.x-src.tgz - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.tar.gz Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) vx.y.z Release CommitID : * https://github.com/apache/skywalking/tree/(Git Commit ID) * Git submodule * skywalking-ui: https://github.com/apache/skywalking-booster-ui/tree/(Git Commit ID) * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) * oap-server/server-query-plugin/query-graphql-plugin/src/main/resources/query-protocol https://github.com/apache/skywalking-query-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking/blob/vx.y.z/docs/en/guides/How-to-build.md Voting will start now (xxxx date) and will remain open for at least 72 hours, Request all PMC members to give their vote. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Vote Check All PMC members and committers should check these before casting +1 votes.\n Features test. All artifacts in staging repository are published with .asc, .md5, and *sha1 files. Source code and distribution package (apache-skywalking-x.y.z-src.tar.gz, apache-skywalking-bin-x.y.z.tar.gz, apache-skywalking-bin-x.y.z.zip) are found in https://dist.apache.org/repos/dist/dev/skywalking/x.y.z with .asc and .sha512. LICENSE and NOTICE are in the source code and distribution package. Check shasum -c apache-skywalking-apm-x.y.z-src.tgz.sha512. Check gpg --verify apache-skywalking-apm-x.y.z-src.tgz.asc apache-skywalking-apm-x.y.z-src.tgz Build a distribution package from the source code package (apache-skywalking-x.y.z-src.tar.gz) by following this doc. Check the Apache License Header. Run docker run --rm -v $(pwd):/github/workspace apache/skywalking-eyes header check. (No binaries in source codes)  The voting process is as follows:\n All PMC member votes are +1 binding, and all other votes are +1 but non-binding. If you obtain at least 3 (+1 binding) votes with more +1 than -1 votes within 72 hours, the release will be approved.  Publish the release  Move source codes tar and distribution packages to https://dist.apache.org/repos/dist/release/skywalking/.  \u0026gt; export SVN_EDITOR=vim \u0026gt; svn mv https://dist.apache.org/repos/dist/dev/skywalking/x.y.z https://dist.apache.org/repos/dist/release/skywalking .... enter your apache password .... Release in the nexus staging repo. Public download source and distribution tar/zip are located in http://www.apache.org/dyn/closer.cgi/skywalking/x.y.z/xxx. The Apache mirror path is the only release information that we publish. Public asc and sha512 are located in https://www.apache.org/dist/skywalking/x.y.z/xxx. Public KEYS point to https://www.apache.org/dist/skywalking/KEYS. Update the website download page. http://skywalking.apache.org/downloads/ . Add a new download source, distribution, sha512, asc, and document links. The links can be found following rules (3) to (6) above. Add a release event on the website homepage and event page. Announce the public release with changelog or key features. Send ANNOUNCE email to dev@skywalking.apache.org, announce@apache.org. The sender should use the Apache email account.  Mail title: [ANNOUNCE] Apache SkyWalking x.y.z released Mail content: Hi all, Apache SkyWalking Team is glad to announce the first release of Apache SkyWalking x.y.z. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. This release contains a number of new features, bug fixes and improvements compared to version a.b.c(last release). The notable changes since x.y.z include: (Highlight key changes) 1. ... 2. ... 3. ... Please refer to the change log for the complete list of changes: https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Apache SkyWalking website: http://skywalking.apache.org/ Downloads: http://skywalking.apache.org/downloads/ Twitter: https://twitter.com/ASFSkyWalking SkyWalking Resources: - GitHub: https://github.com/apache/skywalking - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Apache SkyWalking Team Publish the Docker images We have a GitHub workflow to automatically publish the Docker images to Docker Hub after you set the version from pre-release to release, all you need to do is to watch that workflow and see whether it succeeds, if it fails, you can use the following steps to publish the Docker images in your local machine.\nexport SW_VERSION=x.y.z git clone --depth 1 --branch v$SW_VERSION https://github.com/apache/skywalking.git cd skywalking svn co https://dist.apache.org/repos/dist/release/skywalking/$SW_VERSION release # (1) export CONTEXT=release export HUB=apache export OAP_NAME=skywalking-oap-server export UI_NAME=skywalking-ui export TAG=$SW_VERSION export DIST=\u0026lt;the binary package name inside (1), e.g. apache-skywalking-apm-8.8.0.tar.gz\u0026gt; make docker.push Clean up the old releases Once the latest release has been published, you should clean up the old releases from the mirror system.\n Update the download links (source, dist, asc, and sha512) on the website to the archive repo (https://archive.apache.org/dist/skywalking). Remove previous releases from https://dist.apache.org/repos/dist/release/skywalking/.  ","excerpt":"Apache SkyWalking release guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking …","ref":"/docs/main/v9.5.0/en/guides/how-to-release/","title":"SkyWalking release guide"},{"body":"Apache SkyWalking release guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking in The Apache Way and start the voting process by reading this document.\nSet up your development environment Follow the steps in the Apache maven deployment environment document to set gpg tool and encrypt passwords.\nUse the following block as a template and place it in ~/.m2/settings.xml.\n\u0026lt;settings\u0026gt; ... \u0026lt;servers\u0026gt; \u0026lt;!-- To publish a snapshot of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.snapshots.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; \u0026lt;!-- To stage a release of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.releases.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; ... \u0026lt;/servers\u0026gt; \u0026lt;/settings\u0026gt; Add your GPG public key  Add your GPG public key into the SkyWalking GPG KEYS file. If you are a committer, use your Apache ID and password to log in this svn, and update the file. Don\u0026rsquo;t override the existing file. Upload your GPG public key to the public GPG site, such as MIT\u0026rsquo;s site. This site should be in the Apache maven staging repository checklist.  Test your settings This step is only for testing purpose. If your env is correctly set, you don\u0026rsquo;t need to check every time.\n./mvnw clean install -Pall (this will build artifacts, sources and sign) Prepare for the release ./mvnw release:clean ./mvnw release:prepare -DautoVersionSubmodules=true -Darguments='-Dmaven.test.skip' -Pall  Set version number as x.y.z, and tag as vx.y.z (The version tag must start with v. You will find out why this is necessary in the next step.)  You could do a GPG signature before preparing for the release. If you need to input the password to sign, and the maven doesn\u0026rsquo;t provide you with the opportunity to do so, this may lead to failure of the release. To resolve this, you may run gpg --sign xxx in any file. This will allow it to remember the password for long enough to prepare for the release.\nStage the release ./mvnw release:perform -Darguments='-Dmaven.test.skip' -Pall  The release will be automatically inserted into a temporary staging repository.  Build and sign the source code package export RELEASE_VERSION=x.y.z (example: RELEASE_VERSION=5.0.0-alpha) cd tools/releasing bash create_source_release.sh This script takes care of the following things:\n Use v + RELEASE_VERSION as tag to clone the codes. Complete git submodule init/update. Exclude all unnecessary files in the target source tar, such as .git, .github, and .gitmodules. See the script for more details. Execute gpg and shasum 512.  apache-skywalking-apm-x.y.z-src.tgz and files ending with .asc and .sha512 may be found in the tools/releasing folder.\nLocate and download the distribution package in Apache Nexus Staging repositories  Use your Apache ID to log in to https://repository.apache.org/. Go to https://repository.apache.org/#stagingRepositories. Search skywalking and find your staging repository. Close the repository and wait for all checks to pass. In this step, your GPG KEYS will be checked. See the set PGP document, if you haven\u0026rsquo;t done it before. Go to {REPO_URL}/org/apache/skywalking/apache-skywalking-apm/x.y.z. Download .tar.gz and .zip and files ending with .asc and .sha1.  Upload to Apache svn  Use your Apache ID to log in to https://dist.apache.org/repos/dist/dev/skywalking/. Create a folder and name it by the release version and round, such as: x.y.z Upload the source code package to the folder with files ending with .asc and .sha512.  Package name: apache-skywalking-x.y.z-src.tar.gz See Section \u0026ldquo;Build and sign the source code package\u0026rdquo; for more details   Upload the distribution package to the folder with files ending with .asc and .sha512.  Package name: apache-skywalking-bin-x.y.z.tar.gz. See Section \u0026ldquo;Locate and download the distribution package in Apache Nexus Staging repositories\u0026rdquo; for more details. Create a .sha512 package: shasum -a 512 file \u0026gt; file.sha512    Make the internal announcements Send an announcement mail in dev mail list.\nMail title: [ANNOUNCE] SkyWalking x.y.z test build available Mail content: The test build of x.y.z is available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/xxxx * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-apm-x.x.x-src.tgz - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.tar.gz Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) vx.y.z Release CommitID : * https://github.com/apache/skywalking/tree/(Git Commit ID) * Git submodule * skywalking-ui: https://github.com/apache/skywalking-booster-ui/tree/(Git Commit ID) * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) * oap-server/server-query-plugin/query-graphql-plugin/src/main/resources/query-protocol https://github.com/apache/skywalking-query-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking/blob/vx.y.z/docs/en/guides/How-to-build.md A vote regarding the quality of this test build will be initiated within the next couple of days. Wait for at least 48 hours for test responses Any PMC member, committer or contributor can test the release features and provide feedback. Based on that, the PMC will decide whether to start the voting process.\nCall a vote in dev Call a vote in dev@skywalking.apache.org\nMail title: [VOTE] Release Apache SkyWalking version x.y.z Mail content: Hi All, This is a call for vote to release Apache SkyWalking version x.y.z. Release notes: * https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/xxxx * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-apm-x.x.x-src.tgz - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.tar.gz Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) vx.y.z Release CommitID : * https://github.com/apache/skywalking/tree/(Git Commit ID) * Git submodule * skywalking-ui: https://github.com/apache/skywalking-booster-ui/tree/(Git Commit ID) * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) * oap-server/server-query-plugin/query-graphql-plugin/src/main/resources/query-protocol https://github.com/apache/skywalking-query-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking/blob/vx.y.z/docs/en/guides/How-to-build.md Voting will start now (xxxx date) and will remain open for at least 72 hours, Request all PMC members to give their vote. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Vote Check All PMC members and committers should check these before casting +1 votes.\n Features test. All artifacts in staging repository are published with .asc, .md5, and *sha1 files. Source code and distribution package (apache-skywalking-x.y.z-src.tar.gz, apache-skywalking-bin-x.y.z.tar.gz, apache-skywalking-bin-x.y.z.zip) are found in https://dist.apache.org/repos/dist/dev/skywalking/x.y.z with .asc and .sha512. LICENSE and NOTICE are in the source code and distribution package. Check shasum -c apache-skywalking-apm-x.y.z-src.tgz.sha512. Check gpg --verify apache-skywalking-apm-x.y.z-src.tgz.asc apache-skywalking-apm-x.y.z-src.tgz Build a distribution package from the source code package (apache-skywalking-x.y.z-src.tar.gz) by following this doc. Check the Apache License Header. Run docker run --rm -v $(pwd):/github/workspace apache/skywalking-eyes header check. (No binaries in source codes)  The voting process is as follows:\n All PMC member votes are +1 binding, and all other votes are +1 but non-binding. If you obtain at least 3 (+1 binding) votes with more +1 than -1 votes within 72 hours, the release will be approved.  Publish the release  Move source codes tar and distribution packages to https://dist.apache.org/repos/dist/release/skywalking/.  \u0026gt; export SVN_EDITOR=vim \u0026gt; svn mv https://dist.apache.org/repos/dist/dev/skywalking/x.y.z https://dist.apache.org/repos/dist/release/skywalking .... enter your apache password .... Release in the nexus staging repo. Public download source and distribution tar/zip are located in http://www.apache.org/dyn/closer.cgi/skywalking/x.y.z/xxx. The Apache mirror path is the only release information that we publish. Public asc and sha512 are located in https://www.apache.org/dist/skywalking/x.y.z/xxx. Public KEYS point to https://www.apache.org/dist/skywalking/KEYS. Update the website download page. http://skywalking.apache.org/downloads/ . Add a new download source, distribution, sha512, asc, and document links. The links can be found following rules (3) to (6) above. Add a release event on the website homepage and event page. Announce the public release with changelog or key features. Send ANNOUNCE email to dev@skywalking.apache.org, announce@apache.org. The sender should use the Apache email account.  Mail title: [ANNOUNCE] Apache SkyWalking x.y.z released Mail content: Hi all, Apache SkyWalking Team is glad to announce the first release of Apache SkyWalking x.y.z. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. This release contains a number of new features, bug fixes and improvements compared to version a.b.c(last release). The notable changes since x.y.z include: (Highlight key changes) 1. ... 2. ... 3. ... Please refer to the change log for the complete list of changes: https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Apache SkyWalking website: http://skywalking.apache.org/ Downloads: http://skywalking.apache.org/downloads/ Twitter: https://twitter.com/ASFSkyWalking SkyWalking Resources: - GitHub: https://github.com/apache/skywalking - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Apache SkyWalking Team Publish the Docker images We have a GitHub workflow to automatically publish the Docker images to Docker Hub after you set the version from pre-release to release, all you need to do is to watch that workflow and see whether it succeeds, if it fails, you can use the following steps to publish the Docker images in your local machine.\nexport SW_VERSION=x.y.z git clone --depth 1 --branch v$SW_VERSION https://github.com/apache/skywalking.git cd skywalking svn co https://dist.apache.org/repos/dist/release/skywalking/$SW_VERSION release # (1) export CONTEXT=release export HUB=apache export OAP_NAME=skywalking-oap-server export UI_NAME=skywalking-ui export TAG=$SW_VERSION export DIST=\u0026lt;the binary package name inside (1), e.g. apache-skywalking-apm-8.8.0.tar.gz\u0026gt; make docker.push Clean up the old releases Once the latest release has been published, you should clean up the old releases from the mirror system.\n Update the download links (source, dist, asc, and sha512) on the website to the archive repo (https://archive.apache.org/dist/skywalking). Remove previous releases from https://dist.apache.org/repos/dist/release/skywalking/.  ","excerpt":"Apache SkyWalking release guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking …","ref":"/docs/main/v9.6.0/en/guides/how-to-release/","title":"SkyWalking release guide"},{"body":"Apache SkyWalking release guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking in The Apache Way and start the voting process by reading this document.\nSet up your development environment Follow the steps in the Apache maven deployment environment document to set gpg tool and encrypt passwords.\nUse the following block as a template and place it in ~/.m2/settings.xml.\n\u0026lt;settings\u0026gt; ... \u0026lt;servers\u0026gt; \u0026lt;!-- To publish a snapshot of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.snapshots.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; \u0026lt;!-- To stage a release of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.releases.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; ... \u0026lt;/servers\u0026gt; \u0026lt;/settings\u0026gt; Add your GPG public key  Add your GPG public key into the SkyWalking GPG KEYS file. If you are a committer, use your Apache ID and password to log in this svn, and update the file. Don\u0026rsquo;t override the existing file. Upload your GPG public key to the public GPG site, such as MIT\u0026rsquo;s site. This site should be in the Apache maven staging repository checklist.  Test your settings This step is only for testing purpose. If your env is correctly set, you don\u0026rsquo;t need to check every time.\n./mvnw clean install -Pall (this will build artifacts, sources and sign) Prepare for the release ./mvnw release:clean ./mvnw release:prepare -DautoVersionSubmodules=true -Darguments='-Dmaven.test.skip' -Pall  Set version number as x.y.z, and tag as vx.y.z (The version tag must start with v. You will find out why this is necessary in the next step.)  You could do a GPG signature before preparing for the release. If you need to input the password to sign, and the maven doesn\u0026rsquo;t provide you with the opportunity to do so, this may lead to failure of the release. To resolve this, you may run gpg --sign xxx in any file. This will allow it to remember the password for long enough to prepare for the release.\nStage the release ./mvnw release:perform -Darguments='-Dmaven.test.skip' -Pall  The release will be automatically inserted into a temporary staging repository.  Build and sign the source code package export RELEASE_VERSION=x.y.z (example: RELEASE_VERSION=5.0.0-alpha) cd tools/releasing bash create_source_release.sh This script takes care of the following things:\n Use v + RELEASE_VERSION as tag to clone the codes. Complete git submodule init/update. Exclude all unnecessary files in the target source tar, such as .git, .github, and .gitmodules. See the script for more details. Execute gpg and shasum 512.  apache-skywalking-apm-x.y.z-src.tgz and files ending with .asc and .sha512 may be found in the tools/releasing folder.\nLocate and download the distribution package in Apache Nexus Staging repositories  Use your Apache ID to log in to https://repository.apache.org/. Go to https://repository.apache.org/#stagingRepositories. Search skywalking and find your staging repository. Close the repository and wait for all checks to pass. In this step, your GPG KEYS will be checked. See the set PGP document, if you haven\u0026rsquo;t done it before. Go to {REPO_URL}/org/apache/skywalking/apache-skywalking-apm/x.y.z. Download .tar.gz and .zip and files ending with .asc and .sha1.  Upload to Apache svn  Use your Apache ID to log in to https://dist.apache.org/repos/dist/dev/skywalking/. Create a folder and name it by the release version and round, such as: x.y.z Upload the source code package to the folder with files ending with .asc and .sha512.  Package name: apache-skywalking-x.y.z-src.tar.gz See Section \u0026ldquo;Build and sign the source code package\u0026rdquo; for more details   Upload the distribution package to the folder with files ending with .asc and .sha512.  Package name: apache-skywalking-bin-x.y.z.tar.gz. See Section \u0026ldquo;Locate and download the distribution package in Apache Nexus Staging repositories\u0026rdquo; for more details. Create a .sha512 package: shasum -a 512 file \u0026gt; file.sha512    Make the internal announcements Send an announcement mail in dev mail list.\nMail title: [ANNOUNCE] SkyWalking x.y.z test build available Mail content: The test build of x.y.z is available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/xxxx * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-apm-x.x.x-src.tgz - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.tar.gz Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) vx.y.z Release CommitID : * https://github.com/apache/skywalking/tree/(Git Commit ID) * Git submodule * skywalking-ui: https://github.com/apache/skywalking-booster-ui/tree/(Git Commit ID) * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) * oap-server/server-query-plugin/query-graphql-plugin/src/main/resources/query-protocol https://github.com/apache/skywalking-query-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking/blob/vx.y.z/docs/en/guides/How-to-build.md A vote regarding the quality of this test build will be initiated within the next couple of days. Wait for at least 48 hours for test responses Any PMC member, committer or contributor can test the release features and provide feedback. Based on that, the PMC will decide whether to start the voting process.\nCall a vote in dev Call a vote in dev@skywalking.apache.org\nMail title: [VOTE] Release Apache SkyWalking version x.y.z Mail content: Hi All, This is a call for vote to release Apache SkyWalking version x.y.z. Release notes: * https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/xxxx * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-apm-x.x.x-src.tgz - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.tar.gz Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) vx.y.z Release CommitID : * https://github.com/apache/skywalking/tree/(Git Commit ID) * Git submodule * skywalking-ui: https://github.com/apache/skywalking-booster-ui/tree/(Git Commit ID) * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) * oap-server/server-query-plugin/query-graphql-plugin/src/main/resources/query-protocol https://github.com/apache/skywalking-query-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking/blob/vx.y.z/docs/en/guides/How-to-build.md Voting will start now (xxxx date) and will remain open for at least 72 hours, Request all PMC members to give their vote. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Vote Check All PMC members and committers should check these before casting +1 votes.\n Features test. All artifacts in staging repository are published with .asc, .md5, and *sha1 files. Source code and distribution package (apache-skywalking-x.y.z-src.tar.gz, apache-skywalking-bin-x.y.z.tar.gz, apache-skywalking-bin-x.y.z.zip) are found in https://dist.apache.org/repos/dist/dev/skywalking/x.y.z with .asc and .sha512. LICENSE and NOTICE are in the source code and distribution package. Check shasum -c apache-skywalking-apm-x.y.z-src.tgz.sha512. Check gpg --verify apache-skywalking-apm-x.y.z-src.tgz.asc apache-skywalking-apm-x.y.z-src.tgz Build a distribution package from the source code package (apache-skywalking-x.y.z-src.tar.gz) by following this doc. Check the Apache License Header. Run docker run --rm -v $(pwd):/github/workspace apache/skywalking-eyes header check. (No binaries in source codes)  The voting process is as follows:\n All PMC member votes are +1 binding, and all other votes are +1 but non-binding. If you obtain at least 3 (+1 binding) votes with more +1 than -1 votes within 72 hours, the release will be approved.  Publish the release  Move source codes tar and distribution packages to https://dist.apache.org/repos/dist/release/skywalking/.  \u0026gt; export SVN_EDITOR=vim \u0026gt; svn mv https://dist.apache.org/repos/dist/dev/skywalking/x.y.z https://dist.apache.org/repos/dist/release/skywalking .... enter your apache password .... Release in the nexus staging repo. Public download source and distribution tar/zip are located in http://www.apache.org/dyn/closer.cgi/skywalking/x.y.z/xxx. The Apache mirror path is the only release information that we publish. Public asc and sha512 are located in https://www.apache.org/dist/skywalking/x.y.z/xxx. Public KEYS point to https://www.apache.org/dist/skywalking/KEYS. Update the website download page. http://skywalking.apache.org/downloads/ . Add a new download source, distribution, sha512, asc, and document links. The links can be found following rules (3) to (6) above. Add a release event on the website homepage and event page. Announce the public release with changelog or key features. Send ANNOUNCE email to dev@skywalking.apache.org, announce@apache.org. The sender should use the Apache email account.  Mail title: [ANNOUNCE] Apache SkyWalking x.y.z released Mail content: Hi all, Apache SkyWalking Team is glad to announce the first release of Apache SkyWalking x.y.z. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. This release contains a number of new features, bug fixes and improvements compared to version a.b.c(last release). The notable changes since x.y.z include: (Highlight key changes) 1. ... 2. ... 3. ... Please refer to the change log for the complete list of changes: https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Apache SkyWalking website: http://skywalking.apache.org/ Downloads: http://skywalking.apache.org/downloads/ Twitter: https://twitter.com/ASFSkyWalking SkyWalking Resources: - GitHub: https://github.com/apache/skywalking - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Apache SkyWalking Team Publish the Docker images We have a GitHub workflow to automatically publish the Docker images to Docker Hub after you set the version from pre-release to release, all you need to do is to watch that workflow and see whether it succeeds, if it fails, you can use the following steps to publish the Docker images in your local machine.\nexport SW_VERSION=x.y.z git clone --depth 1 --branch v$SW_VERSION https://github.com/apache/skywalking.git cd skywalking svn co https://dist.apache.org/repos/dist/release/skywalking/$SW_VERSION release # (1) export CONTEXT=release export HUB=apache export OAP_NAME=skywalking-oap-server export UI_NAME=skywalking-ui export TAG=$SW_VERSION export DIST=\u0026lt;the binary package name inside (1), e.g. apache-skywalking-apm-8.8.0.tar.gz\u0026gt; make docker.push Clean up the old releases Once the latest release has been published, you should clean up the old releases from the mirror system.\n Update the download links (source, dist, asc, and sha512) on the website to the archive repo (https://archive.apache.org/dist/skywalking). Remove previous releases from https://dist.apache.org/repos/dist/release/skywalking/.  ","excerpt":"Apache SkyWalking release guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking …","ref":"/docs/main/v9.7.0/en/guides/how-to-release/","title":"SkyWalking release guide"},{"body":"Skywalking with Kotlin coroutine This Plugin provides an auto instrument support plugin for Kotlin coroutine based on context snapshot.\nDescription SkyWalking provide tracing context propagation inside thread. In order to support Kotlin Coroutine, we provide this additional plugin.\nImplementation principle As we know, Kotlin coroutine switches the execution thread by CoroutineDispatcher.\n Create a snapshot of the current context before dispatch the continuation. Then create a coroutine span after thread switched, mark the span continued with the snapshot. Every new span which created in the new thread will be a child of this coroutine span. So we can link those span together in a tracing. After the original runnable executed, we need to stop the coroutine span for cleaning thread state.  Some screenshots Run without the plugin We run a Kotlin coroutine based gRPC server without this coroutine plugin.\nYou can find, the one call (client -\u0026gt; server1 -\u0026gt; server2) has been split two tracing paths.\n Server1 without exit span and server2 tracing path.  Server2 tracing path.   Run with the plugin Without changing codes manually, just install the plugin. We can find the spans be connected together. We can get all info of one client call.\n","excerpt":"Skywalking with Kotlin coroutine This Plugin provides an auto instrument support plugin for Kotlin …","ref":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/agent-optional-plugins/kotlin-coroutine-plugin/","title":"Skywalking with Kotlin coroutine"},{"body":"Skywalking with Kotlin coroutine This Plugin provides an auto instrument support plugin for Kotlin coroutine based on context snapshot.\nDescription SkyWalking provide tracing context propagation inside thread. In order to support Kotlin Coroutine, we provide this additional plugin.\nImplementation principle As we know, Kotlin coroutine switches the execution thread by CoroutineDispatcher.\n Create a snapshot of the current context before dispatch the continuation. Then create a coroutine span after thread switched, mark the span continued with the snapshot. Every new span which created in the new thread will be a child of this coroutine span. So we can link those span together in a tracing. After the original runnable executed, we need to stop the coroutine span for cleaning thread state.  Some screenshots Run without the plugin We run a Kotlin coroutine based gRPC server without this coroutine plugin.\nYou can find, the one call (client -\u0026gt; server1 -\u0026gt; server2) has been split two tracing paths.\n Server1 without exit span and server2 tracing path.  Server2 tracing path.   Run with the plugin Without changing codes manually, just install the plugin. We can find the spans be connected together. We can get all info of one client call.\n","excerpt":"Skywalking with Kotlin coroutine This Plugin provides an auto instrument support plugin for Kotlin …","ref":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/agent-optional-plugins/kotlin-coroutine-plugin/","title":"Skywalking with Kotlin coroutine"},{"body":"Skywalking with Kotlin coroutine This Plugin provides an auto instrument support plugin for Kotlin coroutine based on context snapshot.\nDescription SkyWalking provide tracing context propagation inside thread. In order to support Kotlin Coroutine, we provide this additional plugin.\nImplementation principle As we know, Kotlin coroutine switches the execution thread by CoroutineDispatcher.\n Create a snapshot of the current context before dispatch the continuation. Then create a coroutine span after thread switched, mark the span continued with the snapshot. Every new span which created in the new thread will be a child of this coroutine span. So we can link those span together in a tracing. After the original runnable executed, we need to stop the coroutine span for cleaning thread state.  Some screenshots Run without the plugin We run a Kotlin coroutine based gRPC server without this coroutine plugin.\nYou can find, the one call (client -\u0026gt; server1 -\u0026gt; server2) has been split two tracing paths.\n Server1 without exit span and server2 tracing path.  Server2 tracing path.   Run with the plugin Without changing codes manually, just install the plugin. We can find the spans be connected together. We can get all info of one client call.\n","excerpt":"Skywalking with Kotlin coroutine This Plugin provides an auto instrument support plugin for Kotlin …","ref":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/agent-optional-plugins/kotlin-coroutine-plugin/","title":"Skywalking with Kotlin coroutine"},{"body":"Skywalking with Kotlin coroutine This Plugin provides an auto instrument support plugin for Kotlin coroutine based on context snapshot.\nDescription SkyWalking provide tracing context propagation inside thread. In order to support Kotlin Coroutine, we provide this additional plugin.\nImplementation principle As we know, Kotlin coroutine switches the execution thread by CoroutineDispatcher.\n Create a snapshot of the current context before dispatch the continuation. Then create a coroutine span after thread switched, mark the span continued with the snapshot. Every new span which created in the new thread will be a child of this coroutine span. So we can link those span together in a tracing. After the original runnable executed, we need to stop the coroutine span for cleaning thread state.  Some screenshots Run without the plugin We run a Kotlin coroutine based gRPC server without this coroutine plugin.\nYou can find, the one call (client -\u0026gt; server1 -\u0026gt; server2) has been split two tracing paths.\n Server1 without exit span and server2 tracing path.  Server2 tracing path.   Run with the plugin Without changing codes manually, just install the plugin. We can find the spans be connected together. We can get all info of one client call.\n","excerpt":"Skywalking with Kotlin coroutine This Plugin provides an auto instrument support plugin for Kotlin …","ref":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/agent-optional-plugins/kotlin-coroutine-plugin/","title":"Skywalking with Kotlin coroutine"},{"body":"Skywalking with Kotlin coroutine This Plugin provides an auto instrument support plugin for Kotlin coroutine based on context snapshot.\nDescription SkyWalking provide tracing context propagation inside thread. In order to support Kotlin Coroutine, we provide this additional plugin.\nImplementation principle As we know, Kotlin coroutine switches the execution thread by CoroutineDispatcher.\n Create a snapshot of the current context before dispatch the continuation. Then create a coroutine span after thread switched, mark the span continued with the snapshot. Every new span which created in the new thread will be a child of this coroutine span. So we can link those span together in a tracing. After the original runnable executed, we need to stop the coroutine span for cleaning thread state.  Some screenshots Run without the plugin We run a Kotlin coroutine based gRPC server without this coroutine plugin.\nYou can find, the one call (client -\u0026gt; server1 -\u0026gt; server2) has been split two tracing paths.\n Server1 without exit span and server2 tracing path.  Server2 tracing path.   Run with the plugin Without changing codes manually, just install the plugin. We can find the spans be connected together. We can get all info of one client call.\n","excerpt":"Skywalking with Kotlin coroutine This Plugin provides an auto instrument support plugin for Kotlin …","ref":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/agent-optional-plugins/kotlin-coroutine-plugin/","title":"Skywalking with Kotlin coroutine"},{"body":"Slow Cache Command Slow Cache command are sensitive for you to identify bottlenecks of a system which relies on cache system.\nSlow Cache command are based on sampling. Right now, the core samples are the top 50 slowest every 10 minutes. Note that the duration of these command must be slower than the threshold.\nHere\u0026rsquo;s the format of the settings (in milliseconds):\n cache-type:thresholdValue,cache-type2:thresholdValue2\n The default settings are default:20,redis:10. Reserved Cache type is default, which is the default threshold for all cache types, unless set explicitly.\nNote:\n The threshold should not be set too small, like 1ms. Although it works in theory, OAP performance issues may arise if your system statement access time is usually more than 1ms. The OAP server would run statistic per service and only persistent the top 50 every 10(controlled by topNReportPeriod: ${SW_CORE_TOPN_REPORT_PERIOD:10}) minutes by default.  ","excerpt":"Slow Cache Command Slow Cache command are sensitive for you to identify bottlenecks of a system …","ref":"/docs/main/latest/en/setup/backend/slow-cache-command/","title":"Slow Cache Command"},{"body":"Slow Cache Command Slow Cache command are sensitive for you to identify bottlenecks of a system which relies on cache system.\nSlow Cache command are based on sampling. Right now, the core samples are the top 50 slowest every 10 minutes. Note that the duration of these command must be slower than the threshold.\nHere\u0026rsquo;s the format of the settings (in milliseconds):\n cache-type:thresholdValue,cache-type2:thresholdValue2\n The default settings are default:20,redis:10. Reserved Cache type is default, which is the default threshold for all cache types, unless set explicitly.\nNote:\n The threshold should not be set too small, like 1ms. Although it works in theory, OAP performance issues may arise if your system statement access time is usually more than 1ms. The OAP server would run statistic per service and only persistent the top 50 every 10(controlled by topNReportPeriod: ${SW_CORE_TOPN_REPORT_PERIOD:10}) minutes by default.  ","excerpt":"Slow Cache Command Slow Cache command are sensitive for you to identify bottlenecks of a system …","ref":"/docs/main/next/en/setup/backend/slow-cache-command/","title":"Slow Cache Command"},{"body":"Slow Cache Command Slow Cache command are sensitive for you to identify bottlenecks of a system which relies on cache system.\nSlow Cache command are based on sampling. Right now, the core samples are the top 50 slowest every 10 minutes. Note that the duration of these command must be slower than the threshold.\nHere\u0026rsquo;s the format of the settings (in milliseconds):\n cache-type:thresholdValue,cache-type2:thresholdValue2\n The default settings are default:20,redis:10. Reserved Cache type is default, which is the default threshold for all cache types, unless set explicitly.\nNote:\n The threshold should not be set too small, like 1ms. Although it works in theory, OAP performance issues may arise if your system statement access time is usually more than 1ms. The OAP server would run statistic per service and only persistent the top 50 every 10(controlled by topNReportPeriod: ${SW_CORE_TOPN_REPORT_PERIOD:10}) minutes by default.  ","excerpt":"Slow Cache Command Slow Cache command are sensitive for you to identify bottlenecks of a system …","ref":"/docs/main/v9.3.0/en/setup/backend/slow-cache-command/","title":"Slow Cache Command"},{"body":"Slow Cache Command Slow Cache command are sensitive for you to identify bottlenecks of a system which relies on cache system.\nSlow Cache command are based on sampling. Right now, the core samples are the top 50 slowest every 10 minutes. Note that the duration of these command must be slower than the threshold.\nHere\u0026rsquo;s the format of the settings (in milliseconds):\n cache-type:thresholdValue,cache-type2:thresholdValue2\n The default settings are default:20,redis:10. Reserved Cache type is default, which is the default threshold for all cache types, unless set explicitly.\nNote:\n The threshold should not be set too small, like 1ms. Although it works in theory, OAP performance issues may arise if your system statement access time is usually more than 1ms. The OAP server would run statistic per service and only persistent the top 50 every 10(controlled by topNReportPeriod: ${SW_CORE_TOPN_REPORT_PERIOD:10}) minutes by default.  ","excerpt":"Slow Cache Command Slow Cache command are sensitive for you to identify bottlenecks of a system …","ref":"/docs/main/v9.4.0/en/setup/backend/slow-cache-command/","title":"Slow Cache Command"},{"body":"Slow Cache Command Slow Cache command are sensitive for you to identify bottlenecks of a system which relies on cache system.\nSlow Cache command are based on sampling. Right now, the core samples are the top 50 slowest every 10 minutes. Note that the duration of these command must be slower than the threshold.\nHere\u0026rsquo;s the format of the settings (in milliseconds):\n cache-type:thresholdValue,cache-type2:thresholdValue2\n The default settings are default:20,redis:10. Reserved Cache type is default, which is the default threshold for all cache types, unless set explicitly.\nNote:\n The threshold should not be set too small, like 1ms. Although it works in theory, OAP performance issues may arise if your system statement access time is usually more than 1ms. The OAP server would run statistic per service and only persistent the top 50 every 10(controlled by topNReportPeriod: ${SW_CORE_TOPN_REPORT_PERIOD:10}) minutes by default.  ","excerpt":"Slow Cache Command Slow Cache command are sensitive for you to identify bottlenecks of a system …","ref":"/docs/main/v9.5.0/en/setup/backend/slow-cache-command/","title":"Slow Cache Command"},{"body":"Slow Cache Command Slow Cache command are sensitive for you to identify bottlenecks of a system which relies on cache system.\nSlow Cache command are based on sampling. Right now, the core samples are the top 50 slowest every 10 minutes. Note that the duration of these command must be slower than the threshold.\nHere\u0026rsquo;s the format of the settings (in milliseconds):\n cache-type:thresholdValue,cache-type2:thresholdValue2\n The default settings are default:20,redis:10. Reserved Cache type is default, which is the default threshold for all cache types, unless set explicitly.\nNote:\n The threshold should not be set too small, like 1ms. Although it works in theory, OAP performance issues may arise if your system statement access time is usually more than 1ms. The OAP server would run statistic per service and only persistent the top 50 every 10(controlled by topNReportPeriod: ${SW_CORE_TOPN_REPORT_PERIOD:10}) minutes by default.  ","excerpt":"Slow Cache Command Slow Cache command are sensitive for you to identify bottlenecks of a system …","ref":"/docs/main/v9.6.0/en/setup/backend/slow-cache-command/","title":"Slow Cache Command"},{"body":"Slow Cache Command Slow Cache command are sensitive for you to identify bottlenecks of a system which relies on cache system.\nSlow Cache command are based on sampling. Right now, the core samples are the top 50 slowest every 10 minutes. Note that the duration of these command must be slower than the threshold.\nHere\u0026rsquo;s the format of the settings (in milliseconds):\n cache-type:thresholdValue,cache-type2:thresholdValue2\n The default settings are default:20,redis:10. Reserved Cache type is default, which is the default threshold for all cache types, unless set explicitly.\nNote:\n The threshold should not be set too small, like 1ms. Although it works in theory, OAP performance issues may arise if your system statement access time is usually more than 1ms. The OAP server would run statistic per service and only persistent the top 50 every 10(controlled by topNReportPeriod: ${SW_CORE_TOPN_REPORT_PERIOD:10}) minutes by default.  ","excerpt":"Slow Cache Command Slow Cache command are sensitive for you to identify bottlenecks of a system …","ref":"/docs/main/v9.7.0/en/setup/backend/slow-cache-command/","title":"Slow Cache Command"},{"body":"Slow Database Statement Slow Database statements are crucial for you to identify bottlenecks of a system which relies on databases.\nSlow DB statements are based on sampling. Right now, the core samples are the top 50 slowest every 10 minutes. Note that the duration of these statements must be slower than the threshold.\nHere\u0026rsquo;s the format of the settings (in milliseconds):\n database-type:thresholdValue,database-type2:thresholdValue2\n The default settings are default:200,mongodb:100. Reserved DB type is default, which is the default threshold for all database types, unless set explicitly.\nNote:\n The threshold should not be set too small, like 1ms. Although it works in theory, OAP performance issues may arise if your system statement access time is usually more than 1ms. The OAP server would run statistic per service and only persistent the top 50 every 10(controlled by topNReportPeriod: ${SW_CORE_TOPN_REPORT_PERIOD:10}) minutes by default.  ","excerpt":"Slow Database Statement Slow Database statements are crucial for you to identify bottlenecks of a …","ref":"/docs/main/latest/en/setup/backend/slow-db-statement/","title":"Slow Database Statement"},{"body":"Slow Database Statement Slow Database statements are crucial for you to identify bottlenecks of a system which relies on databases.\nSlow DB statements are based on sampling. Right now, the core samples are the top 50 slowest every 10 minutes. Note that the duration of these statements must be slower than the threshold.\nHere\u0026rsquo;s the format of the settings (in milliseconds):\n database-type:thresholdValue,database-type2:thresholdValue2\n The default settings are default:200,mongodb:100. Reserved DB type is default, which is the default threshold for all database types, unless set explicitly.\nNote:\n The threshold should not be set too small, like 1ms. Although it works in theory, OAP performance issues may arise if your system statement access time is usually more than 1ms. The OAP server would run statistic per service and only persistent the top 50 every 10(controlled by topNReportPeriod: ${SW_CORE_TOPN_REPORT_PERIOD:10}) minutes by default.  ","excerpt":"Slow Database Statement Slow Database statements are crucial for you to identify bottlenecks of a …","ref":"/docs/main/next/en/setup/backend/slow-db-statement/","title":"Slow Database Statement"},{"body":"Slow Database Statement Slow Database statements are crucial in order for you to identify bottlenecks of a system which relies on the database.\nSlow DB statements are based on sampling. Right now, the core samples the top 50 slowest every 10 minutes. Note that the duration of these statements must be slower than the threshold.\nHere\u0026rsquo;s the format of the settings (in milliseconds):\n database-type:thresholdValue,database-type2:thresholdValue2\n The default settings are default:200,mongodb:100. Reserved DB type is default, which is the default threshold for all database types, unless set explicitly.\nNote: The threshold should not be set too small, like 1ms. Although it works in theory, OAP performance issues may arise if your system statement access time is usually more than 1ms.\n","excerpt":"Slow Database Statement Slow Database statements are crucial in order for you to identify …","ref":"/docs/main/v9.0.0/en/setup/backend/slow-db-statement/","title":"Slow Database Statement"},{"body":"Slow Database Statement Slow Database statements are crucial for you to identify bottlenecks of a system which relies on databases.\nSlow DB statements are based on sampling. Right now, the core samples are the top 50 slowest every 10 minutes. Note that the duration of these statements must be slower than the threshold.\nHere\u0026rsquo;s the format of the settings (in milliseconds):\n database-type:thresholdValue,database-type2:thresholdValue2\n The default settings are default:200,mongodb:100. Reserved DB type is default, which is the default threshold for all database types, unless set explicitly.\nNote: The threshold should not be set too small, like 1ms. Although it works in theory, OAP performance issues may arise if your system statement access time is usually more than 1ms.\n","excerpt":"Slow Database Statement Slow Database statements are crucial for you to identify bottlenecks of a …","ref":"/docs/main/v9.1.0/en/setup/backend/slow-db-statement/","title":"Slow Database Statement"},{"body":"Slow Database Statement Slow Database statements are crucial for you to identify bottlenecks of a system which relies on databases.\nSlow DB statements are based on sampling. Right now, the core samples are the top 50 slowest every 10 minutes. Note that the duration of these statements must be slower than the threshold.\nHere\u0026rsquo;s the format of the settings (in milliseconds):\n database-type:thresholdValue,database-type2:thresholdValue2\n The default settings are default:200,mongodb:100. Reserved DB type is default, which is the default threshold for all database types, unless set explicitly.\nNote: The threshold should not be set too small, like 1ms. Although it works in theory, OAP performance issues may arise if your system statement access time is usually more than 1ms.\n","excerpt":"Slow Database Statement Slow Database statements are crucial for you to identify bottlenecks of a …","ref":"/docs/main/v9.2.0/en/setup/backend/slow-db-statement/","title":"Slow Database Statement"},{"body":"Slow Database Statement Slow Database statements are crucial for you to identify bottlenecks of a system which relies on databases.\nSlow DB statements are based on sampling. Right now, the core samples are the top 50 slowest every 10 minutes. Note that the duration of these statements must be slower than the threshold.\nHere\u0026rsquo;s the format of the settings (in milliseconds):\n database-type:thresholdValue,database-type2:thresholdValue2\n The default settings are default:200,mongodb:100. Reserved DB type is default, which is the default threshold for all database types, unless set explicitly.\nNote:\n The threshold should not be set too small, like 1ms. Although it works in theory, OAP performance issues may arise if your system statement access time is usually more than 1ms. The OAP server would run statistic per service and only persistent the top 50 every 10(controlled by topNReportPeriod: ${SW_CORE_TOPN_REPORT_PERIOD:10}) minutes by default.  ","excerpt":"Slow Database Statement Slow Database statements are crucial for you to identify bottlenecks of a …","ref":"/docs/main/v9.3.0/en/setup/backend/slow-db-statement/","title":"Slow Database Statement"},{"body":"Slow Database Statement Slow Database statements are crucial for you to identify bottlenecks of a system which relies on databases.\nSlow DB statements are based on sampling. Right now, the core samples are the top 50 slowest every 10 minutes. Note that the duration of these statements must be slower than the threshold.\nHere\u0026rsquo;s the format of the settings (in milliseconds):\n database-type:thresholdValue,database-type2:thresholdValue2\n The default settings are default:200,mongodb:100. Reserved DB type is default, which is the default threshold for all database types, unless set explicitly.\nNote:\n The threshold should not be set too small, like 1ms. Although it works in theory, OAP performance issues may arise if your system statement access time is usually more than 1ms. The OAP server would run statistic per service and only persistent the top 50 every 10(controlled by topNReportPeriod: ${SW_CORE_TOPN_REPORT_PERIOD:10}) minutes by default.  ","excerpt":"Slow Database Statement Slow Database statements are crucial for you to identify bottlenecks of a …","ref":"/docs/main/v9.4.0/en/setup/backend/slow-db-statement/","title":"Slow Database Statement"},{"body":"Slow Database Statement Slow Database statements are crucial for you to identify bottlenecks of a system which relies on databases.\nSlow DB statements are based on sampling. Right now, the core samples are the top 50 slowest every 10 minutes. Note that the duration of these statements must be slower than the threshold.\nHere\u0026rsquo;s the format of the settings (in milliseconds):\n database-type:thresholdValue,database-type2:thresholdValue2\n The default settings are default:200,mongodb:100. Reserved DB type is default, which is the default threshold for all database types, unless set explicitly.\nNote:\n The threshold should not be set too small, like 1ms. Although it works in theory, OAP performance issues may arise if your system statement access time is usually more than 1ms. The OAP server would run statistic per service and only persistent the top 50 every 10(controlled by topNReportPeriod: ${SW_CORE_TOPN_REPORT_PERIOD:10}) minutes by default.  ","excerpt":"Slow Database Statement Slow Database statements are crucial for you to identify bottlenecks of a …","ref":"/docs/main/v9.5.0/en/setup/backend/slow-db-statement/","title":"Slow Database Statement"},{"body":"Slow Database Statement Slow Database statements are crucial for you to identify bottlenecks of a system which relies on databases.\nSlow DB statements are based on sampling. Right now, the core samples are the top 50 slowest every 10 minutes. Note that the duration of these statements must be slower than the threshold.\nHere\u0026rsquo;s the format of the settings (in milliseconds):\n database-type:thresholdValue,database-type2:thresholdValue2\n The default settings are default:200,mongodb:100. Reserved DB type is default, which is the default threshold for all database types, unless set explicitly.\nNote:\n The threshold should not be set too small, like 1ms. Although it works in theory, OAP performance issues may arise if your system statement access time is usually more than 1ms. The OAP server would run statistic per service and only persistent the top 50 every 10(controlled by topNReportPeriod: ${SW_CORE_TOPN_REPORT_PERIOD:10}) minutes by default.  ","excerpt":"Slow Database Statement Slow Database statements are crucial for you to identify bottlenecks of a …","ref":"/docs/main/v9.6.0/en/setup/backend/slow-db-statement/","title":"Slow Database Statement"},{"body":"Slow Database Statement Slow Database statements are crucial for you to identify bottlenecks of a system which relies on databases.\nSlow DB statements are based on sampling. Right now, the core samples are the top 50 slowest every 10 minutes. Note that the duration of these statements must be slower than the threshold.\nHere\u0026rsquo;s the format of the settings (in milliseconds):\n database-type:thresholdValue,database-type2:thresholdValue2\n The default settings are default:200,mongodb:100. Reserved DB type is default, which is the default threshold for all database types, unless set explicitly.\nNote:\n The threshold should not be set too small, like 1ms. Although it works in theory, OAP performance issues may arise if your system statement access time is usually more than 1ms. The OAP server would run statistic per service and only persistent the top 50 every 10(controlled by topNReportPeriod: ${SW_CORE_TOPN_REPORT_PERIOD:10}) minutes by default.  ","excerpt":"Slow Database Statement Slow Database statements are crucial for you to identify bottlenecks of a …","ref":"/docs/main/v9.7.0/en/setup/backend/slow-db-statement/","title":"Slow Database Statement"},{"body":"Source and scope extension for new metrics From the OAL scope introduction, you should already have understood what a scope is. If you would like to create more extensions, you need to have a deeper understanding of what a source is.\nSource and scope are interrelated concepts. Scope declares the ID (int) and name, while source declares the attributes. Follow these steps to create a new source and sccope.\n In the OAP core module, it provides SourceReceiver internal services.  public interface SourceReceiver extends Service { void receive(Source source); } All data of the analysis must be a org.apache.skywalking.oap.server.core.source.Source sub class that is tagged by @SourceType annotation, and included in the org.apache.skywalking package. Then, it can be supported by the OAL script and OAP core.  Take the existing source service as an example.\n@ScopeDeclaration(id = SERVICE_INSTANCE, name = \u0026#34;ServiceInstance\u0026#34;, catalog = SERVICE_INSTANCE_CATALOG_NAME) @ScopeDefaultColumn.VirtualColumnDefinition(fieldName = \u0026#34;entityId\u0026#34;, columnName = \u0026#34;entity_id\u0026#34;, isID = true, type = String.class) public class ServiceInstance extends Source { @Override public int scope() { return DefaultScopeDefine.SERVICE_INSTANCE; } @Override public String getEntityId() { return String.valueOf(id); } @Getter @Setter private int id; @Getter @Setter @ScopeDefaultColumn.DefinedByField(columnName = \u0026#34;service_id\u0026#34;) private int serviceId; @Getter @Setter private String name; @Getter @Setter private String serviceName; @Getter @Setter private String endpointName; @Getter @Setter private int latency; @Getter @Setter private boolean status; @Getter @Setter private int responseCode; @Getter @Setter private RequestType type; }  The scope() method in source returns an ID, which is not a random value. This ID must be declared through the @ScopeDeclaration annotation too. The ID in @ScopeDeclaration and ID in scope() method must be the same for this source.\n  The String getEntityId() method in source requests the return value representing the unique entity to which the scope relates. For example, in this service scope, the ID is the service ID, which represents a particular service, like the Order service. This value is used in the OAL group mechanism.\n  @ScopeDefaultColumn.VirtualColumnDefinition and @ScopeDefaultColumn.DefinedByField are required. All declared fields (virtual/byField) will be pushed into a persistent entity, and maps to lists such as the ElasticSearch index and Database table column. For example, the entity ID and service ID for endpoint and service instance level scope are usually included. Take a reference from all existing scopes. All these fields are detected by OAL Runtime, and are required during query.\n  Add scope name as keyword to OAL grammar definition file, OALLexer.g4, which is at the antlr4 folder of the generate-tool-grammar module.\n  Add scope name as keyword to the parser definition file, OALParser.g4, which is located in the same folder as OALLexer.g4.\n   After finishing these steps, you could build a receiver, which do\n Obtain the original data of the metrics. Build the source, and send to SourceReceiver. Complete your OAL scripts. Repackage the project.  ","excerpt":"Source and scope extension for new metrics From the OAL scope introduction, you should already have …","ref":"/docs/main/latest/en/guides/source-extension/","title":"Source and scope extension for new metrics"},{"body":"Source and scope extension for new metrics From the OAL scope introduction, you should already have understood what a scope is. If you would like to create more extensions, you need to have a deeper understanding of what a source is.\nSource and scope are interrelated concepts. Scope declares the ID (int) and name, while source declares the attributes. Follow these steps to create a new source and sccope.\n In the OAP core module, it provides SourceReceiver internal services.  public interface SourceReceiver extends Service { void receive(Source source); } All data of the analysis must be a org.apache.skywalking.oap.server.core.source.Source sub class that is tagged by @SourceType annotation, and included in the org.apache.skywalking package. Then, it can be supported by the OAL script and OAP core.  Take the existing source service as an example.\n@ScopeDeclaration(id = SERVICE_INSTANCE, name = \u0026#34;ServiceInstance\u0026#34;, catalog = SERVICE_INSTANCE_CATALOG_NAME) @ScopeDefaultColumn.VirtualColumnDefinition(fieldName = \u0026#34;entityId\u0026#34;, columnName = \u0026#34;entity_id\u0026#34;, isID = true, type = String.class) public class ServiceInstance extends Source { @Override public int scope() { return DefaultScopeDefine.SERVICE_INSTANCE; } @Override public String getEntityId() { return String.valueOf(id); } @Getter @Setter private int id; @Getter @Setter @ScopeDefaultColumn.DefinedByField(columnName = \u0026#34;service_id\u0026#34;) private int serviceId; @Getter @Setter private String name; @Getter @Setter private String serviceName; @Getter @Setter private String endpointName; @Getter @Setter private int latency; @Getter @Setter private boolean status; @Getter @Setter private int responseCode; @Getter @Setter private RequestType type; }  The scope() method in source returns an ID, which is not a random value. This ID must be declared through the @ScopeDeclaration annotation too. The ID in @ScopeDeclaration and ID in scope() method must be the same for this source.\n  The String getEntityId() method in source requests the return value representing the unique entity to which the scope relates. For example, in this service scope, the ID is the service ID, which represents a particular service, like the Order service. This value is used in the OAL group mechanism.\n  @ScopeDefaultColumn.VirtualColumnDefinition and @ScopeDefaultColumn.DefinedByField are required. All declared fields (virtual/byField) will be pushed into a persistent entity, and maps to lists such as the ElasticSearch index and Database table column. For example, the entity ID and service ID for endpoint and service instance level scope are usually included. Take a reference from all existing scopes. All these fields are detected by OAL Runtime, and are required during query.\n  Add scope name as keyword to OAL grammar definition file, OALLexer.g4, which is at the antlr4 folder of the generate-tool-grammar module.\n  Add scope name as keyword to the parser definition file, OALParser.g4, which is located in the same folder as OALLexer.g4.\n   After finishing these steps, you could build a receiver, which do\n Obtain the original data of the metrics. Build the source, and send to SourceReceiver. Complete your OAL scripts. Repackage the project.  ","excerpt":"Source and scope extension for new metrics From the OAL scope introduction, you should already have …","ref":"/docs/main/next/en/guides/source-extension/","title":"Source and scope extension for new metrics"},{"body":"Source and scope extension for new metrics From the OAL scope introduction, you should already have understood what a scope is. If you would like to create more extensions, you need to have a deeper understanding of what a source is.\nSource and scope are interrelated concepts. Scope declares the ID (int) and name, while source declares the attributes. Follow these steps to create a new source and sccope.\n In the OAP core module, it provides SourceReceiver internal services.  public interface SourceReceiver extends Service { void receive(Source source); } All data of the analysis must be a org.apache.skywalking.oap.server.core.source.Source sub class that is tagged by @SourceType annotation, and included in the org.apache.skywalking package. Then, it can be supported by the OAL script and OAP core.  Take the existing source service as an example.\n@ScopeDeclaration(id = SERVICE_INSTANCE, name = \u0026#34;ServiceInstance\u0026#34;, catalog = SERVICE_INSTANCE_CATALOG_NAME) @ScopeDefaultColumn.VirtualColumnDefinition(fieldName = \u0026#34;entityId\u0026#34;, columnName = \u0026#34;entity_id\u0026#34;, isID = true, type = String.class) public class ServiceInstance extends Source { @Override public int scope() { return DefaultScopeDefine.SERVICE_INSTANCE; } @Override public String getEntityId() { return String.valueOf(id); } @Getter @Setter private int id; @Getter @Setter @ScopeDefaultColumn.DefinedByField(columnName = \u0026#34;service_id\u0026#34;) private int serviceId; @Getter @Setter private String name; @Getter @Setter private String serviceName; @Getter @Setter private String endpointName; @Getter @Setter private int latency; @Getter @Setter private boolean status; @Getter @Setter private int responseCode; @Getter @Setter private RequestType type; }  The scope() method in source returns an ID, which is not a random value. This ID must be declared through the @ScopeDeclaration annotation too. The ID in @ScopeDeclaration and ID in scope() method must be the same for this source.\n  The String getEntityId() method in source requests the return value representing the unique entity to which the scope relates. For example, in this service scope, the ID is the service ID, which represents a particular service, like the Order service. This value is used in the OAL group mechanism.\n  @ScopeDefaultColumn.VirtualColumnDefinition and @ScopeDefaultColumn.DefinedByField are required. All declared fields (virtual/byField) will be pushed into a persistent entity, and maps to lists such as the ElasticSearch index and Database table column. For example, the entity ID and service ID for endpoint and service instance level scope are usually included. Take a reference from all existing scopes. All these fields are detected by OAL Runtime, and are required during query.\n  Add scope name as keyword to OAL grammar definition file, OALLexer.g4, which is at the antlr4 folder of the generate-tool-grammar module.\n  Add scope name as keyword to the parser definition file, OALParser.g4, which is located in the same folder as OALLexer.g4.\n   After finishing these steps, you could build a receiver, which do\n Obtain the original data of the metrics. Build the source, and send to SourceReceiver. Complete your OAL scripts. Repackage the project.  ","excerpt":"Source and scope extension for new metrics From the OAL scope introduction, you should already have …","ref":"/docs/main/v9.0.0/en/guides/source-extension/","title":"Source and scope extension for new metrics"},{"body":"Source and scope extension for new metrics From the OAL scope introduction, you should already have understood what a scope is. If you would like to create more extensions, you need to have a deeper understanding of what a source is.\nSource and scope are interrelated concepts. Scope declares the ID (int) and name, while source declares the attributes. Follow these steps to create a new source and sccope.\n In the OAP core module, it provides SourceReceiver internal services.  public interface SourceReceiver extends Service { void receive(Source source); } All data of the analysis must be a org.apache.skywalking.oap.server.core.source.Source sub class that is tagged by @SourceType annotation, and included in the org.apache.skywalking package. Then, it can be supported by the OAL script and OAP core.  Take the existing source service as an example.\n@ScopeDeclaration(id = SERVICE_INSTANCE, name = \u0026#34;ServiceInstance\u0026#34;, catalog = SERVICE_INSTANCE_CATALOG_NAME) @ScopeDefaultColumn.VirtualColumnDefinition(fieldName = \u0026#34;entityId\u0026#34;, columnName = \u0026#34;entity_id\u0026#34;, isID = true, type = String.class) public class ServiceInstance extends Source { @Override public int scope() { return DefaultScopeDefine.SERVICE_INSTANCE; } @Override public String getEntityId() { return String.valueOf(id); } @Getter @Setter private int id; @Getter @Setter @ScopeDefaultColumn.DefinedByField(columnName = \u0026#34;service_id\u0026#34;) private int serviceId; @Getter @Setter private String name; @Getter @Setter private String serviceName; @Getter @Setter private String endpointName; @Getter @Setter private int latency; @Getter @Setter private boolean status; @Getter @Setter private int responseCode; @Getter @Setter private RequestType type; }  The scope() method in source returns an ID, which is not a random value. This ID must be declared through the @ScopeDeclaration annotation too. The ID in @ScopeDeclaration and ID in scope() method must be the same for this source.\n  The String getEntityId() method in source requests the return value representing the unique entity to which the scope relates. For example, in this service scope, the ID is the service ID, which represents a particular service, like the Order service. This value is used in the OAL group mechanism.\n  @ScopeDefaultColumn.VirtualColumnDefinition and @ScopeDefaultColumn.DefinedByField are required. All declared fields (virtual/byField) will be pushed into a persistent entity, and maps to lists such as the ElasticSearch index and Database table column. For example, the entity ID and service ID for endpoint and service instance level scope are usually included. Take a reference from all existing scopes. All these fields are detected by OAL Runtime, and are required during query.\n  Add scope name as keyword to OAL grammar definition file, OALLexer.g4, which is at the antlr4 folder of the generate-tool-grammar module.\n  Add scope name as keyword to the parser definition file, OALParser.g4, which is located in the same folder as OALLexer.g4.\n   After finishing these steps, you could build a receiver, which do\n Obtain the original data of the metrics. Build the source, and send to SourceReceiver. Complete your OAL scripts. Repackage the project.  ","excerpt":"Source and scope extension for new metrics From the OAL scope introduction, you should already have …","ref":"/docs/main/v9.1.0/en/guides/source-extension/","title":"Source and scope extension for new metrics"},{"body":"Source and scope extension for new metrics From the OAL scope introduction, you should already have understood what a scope is. If you would like to create more extensions, you need to have a deeper understanding of what a source is.\nSource and scope are interrelated concepts. Scope declares the ID (int) and name, while source declares the attributes. Follow these steps to create a new source and sccope.\n In the OAP core module, it provides SourceReceiver internal services.  public interface SourceReceiver extends Service { void receive(Source source); } All data of the analysis must be a org.apache.skywalking.oap.server.core.source.Source sub class that is tagged by @SourceType annotation, and included in the org.apache.skywalking package. Then, it can be supported by the OAL script and OAP core.  Take the existing source service as an example.\n@ScopeDeclaration(id = SERVICE_INSTANCE, name = \u0026#34;ServiceInstance\u0026#34;, catalog = SERVICE_INSTANCE_CATALOG_NAME) @ScopeDefaultColumn.VirtualColumnDefinition(fieldName = \u0026#34;entityId\u0026#34;, columnName = \u0026#34;entity_id\u0026#34;, isID = true, type = String.class) public class ServiceInstance extends Source { @Override public int scope() { return DefaultScopeDefine.SERVICE_INSTANCE; } @Override public String getEntityId() { return String.valueOf(id); } @Getter @Setter private int id; @Getter @Setter @ScopeDefaultColumn.DefinedByField(columnName = \u0026#34;service_id\u0026#34;) private int serviceId; @Getter @Setter private String name; @Getter @Setter private String serviceName; @Getter @Setter private String endpointName; @Getter @Setter private int latency; @Getter @Setter private boolean status; @Getter @Setter private int responseCode; @Getter @Setter private RequestType type; }  The scope() method in source returns an ID, which is not a random value. This ID must be declared through the @ScopeDeclaration annotation too. The ID in @ScopeDeclaration and ID in scope() method must be the same for this source.\n  The String getEntityId() method in source requests the return value representing the unique entity to which the scope relates. For example, in this service scope, the ID is the service ID, which represents a particular service, like the Order service. This value is used in the OAL group mechanism.\n  @ScopeDefaultColumn.VirtualColumnDefinition and @ScopeDefaultColumn.DefinedByField are required. All declared fields (virtual/byField) will be pushed into a persistent entity, and maps to lists such as the ElasticSearch index and Database table column. For example, the entity ID and service ID for endpoint and service instance level scope are usually included. Take a reference from all existing scopes. All these fields are detected by OAL Runtime, and are required during query.\n  Add scope name as keyword to OAL grammar definition file, OALLexer.g4, which is at the antlr4 folder of the generate-tool-grammar module.\n  Add scope name as keyword to the parser definition file, OALParser.g4, which is located in the same folder as OALLexer.g4.\n   After finishing these steps, you could build a receiver, which do\n Obtain the original data of the metrics. Build the source, and send to SourceReceiver. Complete your OAL scripts. Repackage the project.  ","excerpt":"Source and scope extension for new metrics From the OAL scope introduction, you should already have …","ref":"/docs/main/v9.2.0/en/guides/source-extension/","title":"Source and scope extension for new metrics"},{"body":"Source and scope extension for new metrics From the OAL scope introduction, you should already have understood what a scope is. If you would like to create more extensions, you need to have a deeper understanding of what a source is.\nSource and scope are interrelated concepts. Scope declares the ID (int) and name, while source declares the attributes. Follow these steps to create a new source and sccope.\n In the OAP core module, it provides SourceReceiver internal services.  public interface SourceReceiver extends Service { void receive(Source source); } All data of the analysis must be a org.apache.skywalking.oap.server.core.source.Source sub class that is tagged by @SourceType annotation, and included in the org.apache.skywalking package. Then, it can be supported by the OAL script and OAP core.  Take the existing source service as an example.\n@ScopeDeclaration(id = SERVICE_INSTANCE, name = \u0026#34;ServiceInstance\u0026#34;, catalog = SERVICE_INSTANCE_CATALOG_NAME) @ScopeDefaultColumn.VirtualColumnDefinition(fieldName = \u0026#34;entityId\u0026#34;, columnName = \u0026#34;entity_id\u0026#34;, isID = true, type = String.class) public class ServiceInstance extends Source { @Override public int scope() { return DefaultScopeDefine.SERVICE_INSTANCE; } @Override public String getEntityId() { return String.valueOf(id); } @Getter @Setter private int id; @Getter @Setter @ScopeDefaultColumn.DefinedByField(columnName = \u0026#34;service_id\u0026#34;) private int serviceId; @Getter @Setter private String name; @Getter @Setter private String serviceName; @Getter @Setter private String endpointName; @Getter @Setter private int latency; @Getter @Setter private boolean status; @Getter @Setter private int responseCode; @Getter @Setter private RequestType type; }  The scope() method in source returns an ID, which is not a random value. This ID must be declared through the @ScopeDeclaration annotation too. The ID in @ScopeDeclaration and ID in scope() method must be the same for this source.\n  The String getEntityId() method in source requests the return value representing the unique entity to which the scope relates. For example, in this service scope, the ID is the service ID, which represents a particular service, like the Order service. This value is used in the OAL group mechanism.\n  @ScopeDefaultColumn.VirtualColumnDefinition and @ScopeDefaultColumn.DefinedByField are required. All declared fields (virtual/byField) will be pushed into a persistent entity, and maps to lists such as the ElasticSearch index and Database table column. For example, the entity ID and service ID for endpoint and service instance level scope are usually included. Take a reference from all existing scopes. All these fields are detected by OAL Runtime, and are required during query.\n  Add scope name as keyword to OAL grammar definition file, OALLexer.g4, which is at the antlr4 folder of the generate-tool-grammar module.\n  Add scope name as keyword to the parser definition file, OALParser.g4, which is located in the same folder as OALLexer.g4.\n   After finishing these steps, you could build a receiver, which do\n Obtain the original data of the metrics. Build the source, and send to SourceReceiver. Complete your OAL scripts. Repackage the project.  ","excerpt":"Source and scope extension for new metrics From the OAL scope introduction, you should already have …","ref":"/docs/main/v9.3.0/en/guides/source-extension/","title":"Source and scope extension for new metrics"},{"body":"Source and scope extension for new metrics From the OAL scope introduction, you should already have understood what a scope is. If you would like to create more extensions, you need to have a deeper understanding of what a source is.\nSource and scope are interrelated concepts. Scope declares the ID (int) and name, while source declares the attributes. Follow these steps to create a new source and sccope.\n In the OAP core module, it provides SourceReceiver internal services.  public interface SourceReceiver extends Service { void receive(Source source); } All data of the analysis must be a org.apache.skywalking.oap.server.core.source.Source sub class that is tagged by @SourceType annotation, and included in the org.apache.skywalking package. Then, it can be supported by the OAL script and OAP core.  Take the existing source service as an example.\n@ScopeDeclaration(id = SERVICE_INSTANCE, name = \u0026#34;ServiceInstance\u0026#34;, catalog = SERVICE_INSTANCE_CATALOG_NAME) @ScopeDefaultColumn.VirtualColumnDefinition(fieldName = \u0026#34;entityId\u0026#34;, columnName = \u0026#34;entity_id\u0026#34;, isID = true, type = String.class) public class ServiceInstance extends Source { @Override public int scope() { return DefaultScopeDefine.SERVICE_INSTANCE; } @Override public String getEntityId() { return String.valueOf(id); } @Getter @Setter private int id; @Getter @Setter @ScopeDefaultColumn.DefinedByField(columnName = \u0026#34;service_id\u0026#34;) private int serviceId; @Getter @Setter private String name; @Getter @Setter private String serviceName; @Getter @Setter private String endpointName; @Getter @Setter private int latency; @Getter @Setter private boolean status; @Getter @Setter private int responseCode; @Getter @Setter private RequestType type; }  The scope() method in source returns an ID, which is not a random value. This ID must be declared through the @ScopeDeclaration annotation too. The ID in @ScopeDeclaration and ID in scope() method must be the same for this source.\n  The String getEntityId() method in source requests the return value representing the unique entity to which the scope relates. For example, in this service scope, the ID is the service ID, which represents a particular service, like the Order service. This value is used in the OAL group mechanism.\n  @ScopeDefaultColumn.VirtualColumnDefinition and @ScopeDefaultColumn.DefinedByField are required. All declared fields (virtual/byField) will be pushed into a persistent entity, and maps to lists such as the ElasticSearch index and Database table column. For example, the entity ID and service ID for endpoint and service instance level scope are usually included. Take a reference from all existing scopes. All these fields are detected by OAL Runtime, and are required during query.\n  Add scope name as keyword to OAL grammar definition file, OALLexer.g4, which is at the antlr4 folder of the generate-tool-grammar module.\n  Add scope name as keyword to the parser definition file, OALParser.g4, which is located in the same folder as OALLexer.g4.\n   After finishing these steps, you could build a receiver, which do\n Obtain the original data of the metrics. Build the source, and send to SourceReceiver. Complete your OAL scripts. Repackage the project.  ","excerpt":"Source and scope extension for new metrics From the OAL scope introduction, you should already have …","ref":"/docs/main/v9.4.0/en/guides/source-extension/","title":"Source and scope extension for new metrics"},{"body":"Source and scope extension for new metrics From the OAL scope introduction, you should already have understood what a scope is. If you would like to create more extensions, you need to have a deeper understanding of what a source is.\nSource and scope are interrelated concepts. Scope declares the ID (int) and name, while source declares the attributes. Follow these steps to create a new source and sccope.\n In the OAP core module, it provides SourceReceiver internal services.  public interface SourceReceiver extends Service { void receive(Source source); } All data of the analysis must be a org.apache.skywalking.oap.server.core.source.Source sub class that is tagged by @SourceType annotation, and included in the org.apache.skywalking package. Then, it can be supported by the OAL script and OAP core.  Take the existing source service as an example.\n@ScopeDeclaration(id = SERVICE_INSTANCE, name = \u0026#34;ServiceInstance\u0026#34;, catalog = SERVICE_INSTANCE_CATALOG_NAME) @ScopeDefaultColumn.VirtualColumnDefinition(fieldName = \u0026#34;entityId\u0026#34;, columnName = \u0026#34;entity_id\u0026#34;, isID = true, type = String.class) public class ServiceInstance extends Source { @Override public int scope() { return DefaultScopeDefine.SERVICE_INSTANCE; } @Override public String getEntityId() { return String.valueOf(id); } @Getter @Setter private int id; @Getter @Setter @ScopeDefaultColumn.DefinedByField(columnName = \u0026#34;service_id\u0026#34;) private int serviceId; @Getter @Setter private String name; @Getter @Setter private String serviceName; @Getter @Setter private String endpointName; @Getter @Setter private int latency; @Getter @Setter private boolean status; @Getter @Setter private int responseCode; @Getter @Setter private RequestType type; }  The scope() method in source returns an ID, which is not a random value. This ID must be declared through the @ScopeDeclaration annotation too. The ID in @ScopeDeclaration and ID in scope() method must be the same for this source.\n  The String getEntityId() method in source requests the return value representing the unique entity to which the scope relates. For example, in this service scope, the ID is the service ID, which represents a particular service, like the Order service. This value is used in the OAL group mechanism.\n  @ScopeDefaultColumn.VirtualColumnDefinition and @ScopeDefaultColumn.DefinedByField are required. All declared fields (virtual/byField) will be pushed into a persistent entity, and maps to lists such as the ElasticSearch index and Database table column. For example, the entity ID and service ID for endpoint and service instance level scope are usually included. Take a reference from all existing scopes. All these fields are detected by OAL Runtime, and are required during query.\n  Add scope name as keyword to OAL grammar definition file, OALLexer.g4, which is at the antlr4 folder of the generate-tool-grammar module.\n  Add scope name as keyword to the parser definition file, OALParser.g4, which is located in the same folder as OALLexer.g4.\n   After finishing these steps, you could build a receiver, which do\n Obtain the original data of the metrics. Build the source, and send to SourceReceiver. Complete your OAL scripts. Repackage the project.  ","excerpt":"Source and scope extension for new metrics From the OAL scope introduction, you should already have …","ref":"/docs/main/v9.5.0/en/guides/source-extension/","title":"Source and scope extension for new metrics"},{"body":"Source and scope extension for new metrics From the OAL scope introduction, you should already have understood what a scope is. If you would like to create more extensions, you need to have a deeper understanding of what a source is.\nSource and scope are interrelated concepts. Scope declares the ID (int) and name, while source declares the attributes. Follow these steps to create a new source and sccope.\n In the OAP core module, it provides SourceReceiver internal services.  public interface SourceReceiver extends Service { void receive(Source source); } All data of the analysis must be a org.apache.skywalking.oap.server.core.source.Source sub class that is tagged by @SourceType annotation, and included in the org.apache.skywalking package. Then, it can be supported by the OAL script and OAP core.  Take the existing source service as an example.\n@ScopeDeclaration(id = SERVICE_INSTANCE, name = \u0026#34;ServiceInstance\u0026#34;, catalog = SERVICE_INSTANCE_CATALOG_NAME) @ScopeDefaultColumn.VirtualColumnDefinition(fieldName = \u0026#34;entityId\u0026#34;, columnName = \u0026#34;entity_id\u0026#34;, isID = true, type = String.class) public class ServiceInstance extends Source { @Override public int scope() { return DefaultScopeDefine.SERVICE_INSTANCE; } @Override public String getEntityId() { return String.valueOf(id); } @Getter @Setter private int id; @Getter @Setter @ScopeDefaultColumn.DefinedByField(columnName = \u0026#34;service_id\u0026#34;) private int serviceId; @Getter @Setter private String name; @Getter @Setter private String serviceName; @Getter @Setter private String endpointName; @Getter @Setter private int latency; @Getter @Setter private boolean status; @Getter @Setter private int responseCode; @Getter @Setter private RequestType type; }  The scope() method in source returns an ID, which is not a random value. This ID must be declared through the @ScopeDeclaration annotation too. The ID in @ScopeDeclaration and ID in scope() method must be the same for this source.\n  The String getEntityId() method in source requests the return value representing the unique entity to which the scope relates. For example, in this service scope, the ID is the service ID, which represents a particular service, like the Order service. This value is used in the OAL group mechanism.\n  @ScopeDefaultColumn.VirtualColumnDefinition and @ScopeDefaultColumn.DefinedByField are required. All declared fields (virtual/byField) will be pushed into a persistent entity, and maps to lists such as the ElasticSearch index and Database table column. For example, the entity ID and service ID for endpoint and service instance level scope are usually included. Take a reference from all existing scopes. All these fields are detected by OAL Runtime, and are required during query.\n  Add scope name as keyword to OAL grammar definition file, OALLexer.g4, which is at the antlr4 folder of the generate-tool-grammar module.\n  Add scope name as keyword to the parser definition file, OALParser.g4, which is located in the same folder as OALLexer.g4.\n   After finishing these steps, you could build a receiver, which do\n Obtain the original data of the metrics. Build the source, and send to SourceReceiver. Complete your OAL scripts. Repackage the project.  ","excerpt":"Source and scope extension for new metrics From the OAL scope introduction, you should already have …","ref":"/docs/main/v9.6.0/en/guides/source-extension/","title":"Source and scope extension for new metrics"},{"body":"Source and scope extension for new metrics From the OAL scope introduction, you should already have understood what a scope is. If you would like to create more extensions, you need to have a deeper understanding of what a source is.\nSource and scope are interrelated concepts. Scope declares the ID (int) and name, while source declares the attributes. Follow these steps to create a new source and sccope.\n In the OAP core module, it provides SourceReceiver internal services.  public interface SourceReceiver extends Service { void receive(Source source); } All data of the analysis must be a org.apache.skywalking.oap.server.core.source.Source sub class that is tagged by @SourceType annotation, and included in the org.apache.skywalking package. Then, it can be supported by the OAL script and OAP core.  Take the existing source service as an example.\n@ScopeDeclaration(id = SERVICE_INSTANCE, name = \u0026#34;ServiceInstance\u0026#34;, catalog = SERVICE_INSTANCE_CATALOG_NAME) @ScopeDefaultColumn.VirtualColumnDefinition(fieldName = \u0026#34;entityId\u0026#34;, columnName = \u0026#34;entity_id\u0026#34;, isID = true, type = String.class) public class ServiceInstance extends Source { @Override public int scope() { return DefaultScopeDefine.SERVICE_INSTANCE; } @Override public String getEntityId() { return String.valueOf(id); } @Getter @Setter private int id; @Getter @Setter @ScopeDefaultColumn.DefinedByField(columnName = \u0026#34;service_id\u0026#34;) private int serviceId; @Getter @Setter private String name; @Getter @Setter private String serviceName; @Getter @Setter private String endpointName; @Getter @Setter private int latency; @Getter @Setter private boolean status; @Getter @Setter private int responseCode; @Getter @Setter private RequestType type; }  The scope() method in source returns an ID, which is not a random value. This ID must be declared through the @ScopeDeclaration annotation too. The ID in @ScopeDeclaration and ID in scope() method must be the same for this source.\n  The String getEntityId() method in source requests the return value representing the unique entity to which the scope relates. For example, in this service scope, the ID is the service ID, which represents a particular service, like the Order service. This value is used in the OAL group mechanism.\n  @ScopeDefaultColumn.VirtualColumnDefinition and @ScopeDefaultColumn.DefinedByField are required. All declared fields (virtual/byField) will be pushed into a persistent entity, and maps to lists such as the ElasticSearch index and Database table column. For example, the entity ID and service ID for endpoint and service instance level scope are usually included. Take a reference from all existing scopes. All these fields are detected by OAL Runtime, and are required during query.\n  Add scope name as keyword to OAL grammar definition file, OALLexer.g4, which is at the antlr4 folder of the generate-tool-grammar module.\n  Add scope name as keyword to the parser definition file, OALParser.g4, which is located in the same folder as OALLexer.g4.\n   After finishing these steps, you could build a receiver, which do\n Obtain the original data of the metrics. Build the source, and send to SourceReceiver. Complete your OAL scripts. Repackage the project.  ","excerpt":"Source and scope extension for new metrics From the OAL scope introduction, you should already have …","ref":"/docs/main/v9.7.0/en/guides/source-extension/","title":"Source and scope extension for new metrics"},{"body":"Spring annotation plugin This plugin allows to trace all methods of beans in Spring context, which are annotated with @Bean, @Service, @Component and @Repository.\n Why does this plugin optional?  Tracing all methods in Spring context all creates a lot of spans, which also spend more CPU, memory and network. Of course you want to have spans as many as possible, but please make sure your system payload can support these.\n","excerpt":"Spring annotation plugin This plugin allows to trace all methods of beans in Spring context, which …","ref":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/agent-optional-plugins/spring-annotation-plugin/","title":"Spring annotation plugin"},{"body":"Spring annotation plugin This plugin allows to trace all methods of beans in Spring context, which are annotated with @Bean, @Service, @Component and @Repository.\n Why does this plugin optional?  Tracing all methods in Spring context all creates a lot of spans, which also spend more CPU, memory and network. Of course you want to have spans as many as possible, but please make sure your system payload can support these.\n","excerpt":"Spring annotation plugin This plugin allows to trace all methods of beans in Spring context, which …","ref":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/agent-optional-plugins/spring-annotation-plugin/","title":"Spring annotation plugin"},{"body":"Spring annotation plugin This plugin allows to trace all methods of beans in Spring context, which are annotated with @Bean, @Service, @Component and @Repository.\n Why does this plugin optional?  Tracing all methods in Spring context all creates a lot of spans, which also spend more CPU, memory and network. Of course you want to have spans as many as possible, but please make sure your system payload can support these.\n","excerpt":"Spring annotation plugin This plugin allows to trace all methods of beans in Spring context, which …","ref":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/agent-optional-plugins/spring-annotation-plugin/","title":"Spring annotation plugin"},{"body":"Spring annotation plugin This plugin allows to trace all methods of beans in Spring context, which are annotated with @Bean, @Service, @Component and @Repository.\n Why does this plugin optional?  Tracing all methods in Spring context all creates a lot of spans, which also spend more CPU, memory and network. Of course you want to have spans as many as possible, but please make sure your system payload can support these.\n","excerpt":"Spring annotation plugin This plugin allows to trace all methods of beans in Spring context, which …","ref":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/agent-optional-plugins/spring-annotation-plugin/","title":"Spring annotation plugin"},{"body":"Spring annotation plugin This plugin allows to trace all methods of beans in Spring context, which are annotated with @Bean, @Service, @Component and @Repository.\n Why does this plugin optional?  Tracing all methods in Spring context all creates a lot of spans, which also spend more CPU, memory and network. Of course you want to have spans as many as possible, but please make sure your system payload can support these.\n","excerpt":"Spring annotation plugin This plugin allows to trace all methods of beans in Spring context, which …","ref":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/agent-optional-plugins/spring-annotation-plugin/","title":"Spring annotation plugin"},{"body":"Spring sleuth setup Spring Sleuth provides Spring Boot auto-configuration for distributed tracing. Skywalking integrates its micrometer so that it can send metrics to the Skywalking Meter System.\nSet up agent  Add micrometer and Skywalking meter registry dependency into the project\u0026rsquo;s pom.xml file. You can find more details at Toolkit micrometer.  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.springframework.boot\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;spring-boot-starter-actuator\u0026lt;/artifactId\u0026gt; \u0026lt;/dependency\u0026gt; \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-micrometer-registry\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Create Skywalking meter registry in spring bean management.  @Bean SkywalkingMeterRegistry skywalkingMeterRegistry() { // Add rate configs If you need, otherwise using none args construct  SkywalkingConfig config = new SkywalkingConfig(Arrays.asList(\u0026#34;\u0026#34;)); return new SkywalkingMeterRegistry(config); } Set up backend receiver  Make sure to enable meter receiver in application.yml.  receiver-meter:selector:${SW_RECEIVER_METER:default}default: Configure the meter config file. It already has the spring sleuth meter config. If you have a customized meter at the agent side, please configure the meter using the steps set out in the meter document.\n  Enable Spring sleuth config in application.yml.\n  agent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:meterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:spring-sleuth}Add UI dashboard   Open the dashboard view. Click edit button to edit the templates.\n  Create a new template. Template type: Standard -\u0026gt; Template Configuration: Spring -\u0026gt; Input the Template Name.\n  Click view button. You\u0026rsquo;ll see the spring sleuth dashboard.\n  Supported meter Three types of information are supported: Application, System, and JVM.\n Application: HTTP request count and duration, JDBC max/idle/active connection count, and Tomcat session active/reject count. System: CPU system/process usage, OS system load, and OS process file count. JVM: GC pause count and duration, memory max/used/committed size, thread peak/live/daemon count, and classes loaded/unloaded count.  ","excerpt":"Spring sleuth setup Spring Sleuth provides Spring Boot auto-configuration for distributed tracing. …","ref":"/docs/main/v9.0.0/en/setup/backend/spring-sleuth-setup/","title":"Spring sleuth setup"},{"body":"Spring sleuth setup Spring Sleuth provides Spring Boot auto-configuration for distributed tracing. Skywalking integrates its micrometer so that it can send metrics to the Skywalking Meter System.\nSet up agent  Add micrometer and Skywalking meter registry dependency into the project\u0026rsquo;s pom.xml file. You can find more details at Toolkit micrometer.  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.springframework.boot\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;spring-boot-starter-actuator\u0026lt;/artifactId\u0026gt; \u0026lt;/dependency\u0026gt; \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-micrometer-registry\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Create Skywalking meter registry in spring bean management.  @Bean SkywalkingMeterRegistry skywalkingMeterRegistry() { // Add rate configs If you need, otherwise using none args construct  SkywalkingConfig config = new SkywalkingConfig(Arrays.asList(\u0026#34;\u0026#34;)); return new SkywalkingMeterRegistry(config); } Set up backend receiver  Make sure to enable meter receiver in application.yml.  receiver-meter:selector:${SW_RECEIVER_METER:default}default: Configure the meter config file. It already has the spring sleuth meter config. If you have a customized meter at the agent side, please configure the meter using the steps set out in the meter document.\n  Enable Spring sleuth config in application.yml.\n  agent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:meterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:spring-sleuth}Add UI dashboard   Open the dashboard view. Click edit button to edit the templates.\n  Create a new template. Template type: Standard -\u0026gt; Template Configuration: Spring -\u0026gt; Input the Template Name.\n  Click view button. You\u0026rsquo;ll see the spring sleuth dashboard.\n  Supported meter Three types of information are supported: Application, System, and JVM.\n Application: HTTP request count and duration, JDBC max/idle/active connection count, and Tomcat session active/reject count. System: CPU system/process usage, OS system load, and OS process file count. JVM: GC pause count and duration, memory max/used/committed size, thread peak/live/daemon count, and classes loaded/unloaded count.  ","excerpt":"Spring sleuth setup Spring Sleuth provides Spring Boot auto-configuration for distributed tracing. …","ref":"/docs/main/v9.1.0/en/setup/backend/spring-sleuth-setup/","title":"Spring sleuth setup"},{"body":"Spring sleuth setup Spring Sleuth provides Spring Boot auto-configuration for distributed tracing. Skywalking integrates its micrometer so that it can send metrics to the Skywalking Meter System.\nSet up agent  Add micrometer and Skywalking meter registry dependency into the project\u0026rsquo;s pom.xml file. You can find more details at Toolkit micrometer.  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.springframework.boot\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;spring-boot-starter-actuator\u0026lt;/artifactId\u0026gt; \u0026lt;/dependency\u0026gt; \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-micrometer-registry\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Create Skywalking meter registry in spring bean management.  @Bean SkywalkingMeterRegistry skywalkingMeterRegistry() { // Add rate configs If you need, otherwise using none args construct  SkywalkingConfig config = new SkywalkingConfig(Arrays.asList(\u0026#34;\u0026#34;)); return new SkywalkingMeterRegistry(config); } Set up backend receiver  Make sure to enable meter receiver in application.yml.  receiver-meter:selector:${SW_RECEIVER_METER:default}default: Configure the meter config file. It already has the spring sleuth meter config. If you have a customized meter at the agent side, please configure the meter using the steps set out in the meter document.\n  Enable Spring sleuth config in application.yml.\n  agent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:meterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:spring-sleuth}Dashboard configuration SkyWalking provides the Spring Sleuth dashboard by default under the general service instance, which contains the metrics provided by Spring Sleuth by default. Once you have added customized metrics in the application and configuration the meter config file in the backend. Please following the customized dashboard documentation to add the metrics in the dashboard.\nSupported meter Three types of information are supported: Application, System, and JVM.\n Application: HTTP request count and duration, JDBC max/idle/active connection count, and Tomcat session active/reject count. System: CPU system/process usage, OS system load, and OS process file count. JVM: GC pause count and duration, memory max/used/committed size, thread peak/live/daemon count, and classes loaded/unloaded count.  ","excerpt":"Spring sleuth setup Spring Sleuth provides Spring Boot auto-configuration for distributed tracing. …","ref":"/docs/main/v9.2.0/en/setup/backend/spring-sleuth-setup/","title":"Spring sleuth setup"},{"body":"Spring sleuth setup Spring Sleuth provides Spring Boot auto-configuration for distributed tracing. Skywalking integrates its micrometer so that it can send metrics to the Skywalking Meter System.\nSet up agent  Add micrometer and Skywalking meter registry dependency into the project\u0026rsquo;s pom.xml file. You can find more details at Toolkit micrometer.  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.springframework.boot\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;spring-boot-starter-actuator\u0026lt;/artifactId\u0026gt; \u0026lt;/dependency\u0026gt; \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-micrometer-registry\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Create Skywalking meter registry in spring bean management.  @Bean SkywalkingMeterRegistry skywalkingMeterRegistry() { // Add rate configs If you need, otherwise using none args construct  SkywalkingConfig config = new SkywalkingConfig(Arrays.asList(\u0026#34;\u0026#34;)); return new SkywalkingMeterRegistry(config); } Set up backend receiver  Make sure to enable meter receiver in application.yml.  receiver-meter:selector:${SW_RECEIVER_METER:default}default: Configure the meter config file. It already has the spring sleuth meter config. If you have a customized meter at the agent side, please configure the meter using the steps set out in the meter document.\n  Enable Spring sleuth config in application.yml.\n  agent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:meterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:spring-sleuth}Dashboard configuration SkyWalking provides the Spring Sleuth dashboard by default under the general service instance, which contains the metrics provided by Spring Sleuth by default. Once you have added customized metrics in the application and configuration the meter config file in the backend. Please following the customized dashboard documentation to add the metrics in the dashboard.\nSupported meter Three types of information are supported: Application, System, and JVM.\n Application: HTTP request count and duration, JDBC max/idle/active connection count, and Tomcat session active/reject count. System: CPU system/process usage, OS system load, and OS process file count. JVM: GC pause count and duration, memory max/used/committed size, thread peak/live/daemon count, and classes loaded/unloaded count.  ","excerpt":"Spring sleuth setup Spring Sleuth provides Spring Boot auto-configuration for distributed tracing. …","ref":"/docs/main/v9.3.0/en/setup/backend/spring-sleuth-setup/","title":"Spring sleuth setup"},{"body":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System  Sheng Wu 吴 晟 wusheng@apache.org  Editor\u0026rsquo;s note This paper was written by Sheng Wu, project founder, in 2017, to describe the fundamental theory of all current agent core concepts. Readers could learn why SkyWalking agents are significantly different from other tracing system and Dapper[1] Paper\u0026rsquo;s description.\nAbstract Monitoring, visualizing and troubleshooting a large-scale distributed system is a major challenge. One common tool used today is the distributed tracing system (e.g., Google Dapper)[1], and detecting topology and metrics based on the tracing data. One big limitation of today’s topology detection is that the analysis depends on aggregating the client-side and server-side tracing spans in a given time window to generate the dependency of services. This causes more latency and memory use, because the client and server spans of every RPC must be matched in millions of randomly occurring requests in a highly distributed system. More importantly, it could fail to match if the duration of RPC between client and server is longer than the prior setup time window, or across the two windows.\nIn this paper, we present the STAM, Streaming Topology Analysis Method. In STAM, we could use auto instrumentation or a manual instrumentation mechanism to intercept and manipulate RPC at both client-side and server-side. In the case of auto instrumentation, STAM manipulates application codes at runtime, such as Java agent. As such, this monitoring system doesn’t require any source code changes from the application development team or RPC framework development team. The STAM injects an RPC network address used at client side, a service name and a service instance name into the RPC context, and binds the server-side service name and service instance name as the alias name for this network address used at the client side. Freeing the dependency analysis from the mechanisms that cause blocking and delay, the analysis core can process the monitoring data in stream mode and generate the accurate topology.\nThe STAM has been implemented in the Apache SkyWalking[2], an open source APM (application performance monitoring system) project of the Apache Software Foundation, which is widely used in many big enterprises[3] including Alibaba, Huawei, Tencent, Didi, Xiaomi, China Mobile and other enterprises (airlines, financial institutions and others) to support their large-scale distributed systems in the production environment. It reduces the load and memory cost significantly, with better horizontal scale capability.\nIntroduction Monitoring the highly distributed system, especially with a micro-service architecture, is very complex. Many RPCs, including HTTP, gRPC, MQ, Cache, and Database accesses, are behind a single client-side request. Allowing the IT team to understand the dependency relationships among thousands of services is the key feature and first step for observability of a whole distributed system. A distributed tracing system is capable of collecting traces, including all distributed request paths. Dependency relationships have been logically included in the trace data. A distributed tracing system, such as Zipkin [4] or Jaeger Tracing [10], provides built-in dependency analysis features, but many analysis features build on top of that. There are at least two fundamental limitations: timeliness and consistent accuracy.\nStrong timeliness is required to match the mutability of distributed application system dependency relationship, including service level and service instance level dependency.\nA Service is a logic group of instances which have the same functions or codes.\nA Service Instance is usually an OS level process, such as a JVM process. The relationships between services and instances are mutable, depending on the configuration, codes and network status. The dependency could change over time.\n Figure 1, Generated spans in traditional Dapper based tracing system. The span model in the Dapper paper and existing tracing systems,such as Zipkin instrumenting mode[9], just propagates the span id to the server side. Due to this model, dependency analysis requires a certain time window. The tracing spans are collected at both client- and server-sides, because the relationship is recorded. Due to that, the analysis process has to wait for the client and server spans to match in the same time window, in order to output the result, Service A depending on Service B. So, this time window must be over the duration of this RPC request; otherwise, the conclusion will be lost. This condition makes the analysis would not react the dependency mutation in second level, in production, it sometimes has to set the window duration in 3-5 mins. Also, because of the Windows-based design, if one side involves a long duration task, it can’t easily achieve consistent accuracy. Because in order to make the analysis as fast as possible, the analysis period is less than 5 minutes. But some spans can’t match its parent or children if the analysis is incomplete or crosses two time windows. Even if we added a mechanism to process the spans left in the previous stages, still some would have to be abandoned to keep the dataset size and memory usage reasonable.\nIn the STAM, we introduce a new span and context propagation models, with the new analysis method. These new models add the peer network address (IP or hostname) used at client side, client service instance name and client service name, into the context propagation model. Then it passes the RPC call from client to server, just as the original trace id and span id in the existing tracing system, and collects it in the server-side span. The new analysis method can easily generate the client-server relationship directly without waiting on the client span. It also sets the peer network address as one alias of the server service. After the across cluster node data sync, the client-side span analysis could use this alias metadata to generate the client-server relationship directly too. By using these new models and method in Apache SkyWalking, we remove the time windows-based analysis permanently, and fully use the streaming analysis mode with less than 5 seconds latency and consistent accuracy\nNew Span Model and Context Model The traditional span of a tracing system includes the following fields [1][6][10].\n A trace id to represent the whole trace. A span id to represent the current span. An operation name to describe what operation this span did. A start timestamp. A finish timestamp Service and Service Instance names of current span. A set of zero or more key:value Span Tags. A set of zero or more Span Logs, each of which is itself a key:value map paired with a timestamp. References to zero or more causally related Spans. Reference includes the parent span id and trace id.  In the new span model of STAM we add the following fields in the span.\nSpan type. Enumeration, including exit, local and entry. Entry and Exit spans are used in a networking related library. Entry spans represent a server-side networking library, such as Apache Tomcat[7]. Exit spans represent the client-side networking library, such as Apache HttpComponents [8].\nPeer Network Address. Remote \u0026ldquo;address,\u0026rdquo; suitable for use in exit and entry spans. In Exit spans, the peer network address is the address by the client library to access the server.\nThese fields usually are optionally included in many tracing system,. But in STAM, we require them in all RPC cases.\nContext Model is used to propagate the client-side information to server-side carried by the original RPC call, usually in the header, such as HTTP header or MQ header. In the old design, it carries the trace id and span id of client-side span. In the STAM, we enhance this model, adding the parent service name, parent service instance name and peer of exit span. The names could be literal strings. All these extra fields will help to remove the block of streaming analysis. Compared to the existing context model, this uses a little more bandwidth, but it could be optimized. In Apache SkyWalking, we design a register mechanism to exchange unique IDs to represent these names. As a result, only 3 integers are added in the RPC context, so the increase of bandwidth is at least less than 1% in the production environment.\nThe changes of two models could eliminate the time windows in the analysis process. Server-side span analysis enhances the context aware capability.\nNew Topology Analysis Method The new topology analysis method at the core of STAM is processing the span in stream mode. The analysis of the server-side span, also named entry span, includes the parent service name, parent service instance name and peer of exit span. So the analysis process could establish the following results.\n Set the peer of exit span as client using alias name of current service and instance. Peer network address \u0026lt;-\u0026gt; service name and peer network address \u0026lt;-\u0026gt; Service instance name aliases created. These two will sync with all analysis nodes and persistent in the storage, allowing more analysis processers to have this alias information. Generate relationships of parent service name -\u0026gt; current service name and parent service instance name -\u0026gt; current service instance name, unless there is another different Peer network address \u0026lt;-\u0026gt; Service Instance Name mapping found. In that case, only generate relationships of peer network address \u0026lt;-\u0026gt; service name and peer network address \u0026lt;-\u0026gt; Service instance name.  For analysis of the client-side span (exit span), there could three possibilities.\n The peer in the exit span already has the alias names established by server-side span analysis from step (1). Then use alias names to replace the peer, and generate traffic of current service name -\u0026gt; alias service name and current service instance name -\u0026gt; alias service instance name. If the alias could not be found, then just simply generate traffic for current service name -\u0026gt; peer and current service instance name -\u0026gt; peer. If multiple alias names of peer network address \u0026lt;-\u0026gt; Service Instance Name could be found, then keep generating traffic for current service name -\u0026gt; peer network address and current service instance name -\u0026gt; peer network address.   Figure 2, Apache SkyWalking uses STAM to detect and visualize the topology of distributed systems. Evaluation In this section, we evaluate the new models and analysis method in the context of several typical cases in which the old method loses timeliness and consistent accuracy.\n 1.New Service Online or Auto Scale Out  New services could be added into the whole topology by the developer team randomly, or container operation platform automatically by some scale out policy, like Kubernetes [5]. The monitoring system could not be notified in any case manually. By using STAM, we could detect the new node automatically and also keep the analysis process unblocked and consistent with detected nodes. In this case, a new service and network address (could be IP, port or both) are used. The peer network address \u0026lt;-\u0026gt; service mapping does not exist, the traffic of client service -\u0026gt; peer network address will be generated and persistent in the storage first. After mapping is generated, further traffic of client-service to server-service could be identified, generated and aggregated in the analysis platform. For filling the gap of a few traffic before the mapping generated, we require doing peer network address \u0026lt;-\u0026gt; service mapping translation again in query stage, to merge client service-\u0026gt;peer network address and client-service to server-service. In production, the amount of VM for the whole SkyWalking analysis platform deployment is less than 100, syncing among them will finish less than 10 seconds, in most cases it only takes 3-5 seconds. And in the query stage, the data has been aggregated in minutes or seconds at least. The query merge performance is not related to how much traffic happens before the mapping generated, only affected by sync duration, in here, only 3 seconds. Due to that, in minute level aggregation topology, it only adds 1 or 2 relationship records in the whole topology relationship dataset. Considering an over 100 services topology having over 500 relationship records per minute, the payload increase for this query merge is very limited and affordable. This feature is significant in a large and high load distributed system, as we don’t need to concern its scaling capability. And in some fork versions, they choose to update the existing client service-\u0026gt;peer network address to client-service to server-service after detecting the new mapping for peer generated, in order to remove the extra load at query stage permanently.\n Figure 3, Span analysis by using the new topology analysis method  2.Existing Uninstrumented Nodes  Every topology detection method has to work in this case. In many cases, there are nodes in the production environment that can’t be instrumented. Causes for this might include:(1) Restriction of the technology. In some golang or C++ written applications, there is no easy way in Java or .Net to do auto instrumentation by the agent. So, the codes may not be instrumented automatically. (2) The middleware, such as MQ, database server, has not adopted the tracing system. This would make it difficult or time consuming to implement the middleware instrumentation. (3) A 3rd party service or cloud service doesn’t support work with the current tracing system. (4) Lack of resources: e.g., the developer or operation team lacks time to make the instrumentation ready.\nThe STAM works well even if the client or server side has no instrumentation. It still keeps the topology as accurate as possible.\nIf the client side hasn’t instrumented, the server-side span wouldn’t get any reference through RPC context, so, it would simply use peer to generate traffic, as shown in Figure 4.\n Figure 4, STAM traffic generation when no client-side instrumentation As shown in Figure 5, in the other case, with no server-side instrumentation, the client span analysis doesn’t need to process this case. The STAM analysis core just simply keeps generating client service-\u0026gt;peer network address traffic. As there is no mapping for peer network address generated, there is no merging.\n Figure 5, STAM traffic generation when no server-side instrumentation  3.Uninstrumented Node Having Header Forward Capability  Besides the cases we evaluated in (2) Uninstrumented Nodes, there is one complex and special case: the instrumented node has the capability to propagate the header from downstream to upstream, typically in all proxy, such as Envoy[11], Nginx[12], Spring Cloud Gateway[13]. As proxy, it has the capability to forward all headers from downstream to upstream to keep some of information in the header, including the tracing context, authentication, browser information, and routing information, in order to make them accessible by the business services behind the proxy, like Envoy route configuration. When some proxy can’t be instrumented, no matter what the reason, it should not affect the topology detection.\nIn this case, the proxy address would be used at the client side and propagate through RPC context as peer network address, and the proxy forwards this to different upstream services. Then STAM could detect this case and generate the proxy as a conjectural node. In the STAM, more than one alias names for this network address should be generated. After those two are detected and synchronized to the analysis node, the analysis core knows there is at least one uninstrumented service standing between client and servers. So, it will generate the relationships of client service-\u0026gt;peer network address, peer-\u0026gt;server service B and peer network address -\u0026gt;server service C, as shown in Figure 6.\n Figure 6, STAM traffic generation when the proxy uninstrumentation Conclusion This paper described the STAM, which is to the best of our knowledge the best topology detection method for distributed tracing systems. It replaces the time-window based topology analysis method for tracing-based monitoring systems. It removes the resource cost of disk and memory for time-window baseds analysis permanently and totally, and the barriers of horizontal scale. One STAM implementation, Apache SkyWalking, is widely used for monitoring hundreds of applications in production. Some of them generated over 100 TB tracing data per day and topology for over 200 services in real time.\nAcknowledgments We thank all contributors of Apache SkyWalking project for suggestions, code contributions to implement the STAM, and feedback from using the STAM and SkyWalking in their production environment.\nLicense This paper and the STAM are licensed in the Apache 2.0.\nReferences  Dapper, a Large-Scale Distributed Systems Tracing Infrastructure, https://research.google.com/pubs/pub36356.html?spm=5176.100239.blogcont60165.11.OXME9Z Apache SkyWalking, http://skywalking.apache.org/ Apache Open Users, https://skywalking.apache.org/users/ Zipkin, https://zipkin.io/ Kubernetes, Production-Grade Container Orchestration. Automated container deployment, scaling, and management. https://kubernetes.io/ OpenTracing Specification https://github.com/opentracing/specification/blob/master/specification.md Apache Tomcat, http://tomcat.apache.org/ Apache HttpComponents, https://hc.apache.org/ Zipkin doc, ‘Instrumenting a library’ section, ‘Communicating trace information’ paragraph. https://zipkin.io/pages/instrumenting Jaeger Tracing, https://jaegertracing.io/ Envoy Proxy, http://envoyproxy.io/ Nginx, http://nginx.org/ Spring Cloud Gateway, https://spring.io/projects/spring-cloud-gateway  ","excerpt":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System …","ref":"/docs/main/latest/en/papers/stam/","title":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System"},{"body":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System  Sheng Wu 吴 晟 wusheng@apache.org  Editor\u0026rsquo;s note This paper was written by Sheng Wu, project founder, in 2017, to describe the fundamental theory of all current agent core concepts. Readers could learn why SkyWalking agents are significantly different from other tracing system and Dapper[1] Paper\u0026rsquo;s description.\nAbstract Monitoring, visualizing and troubleshooting a large-scale distributed system is a major challenge. One common tool used today is the distributed tracing system (e.g., Google Dapper)[1], and detecting topology and metrics based on the tracing data. One big limitation of today’s topology detection is that the analysis depends on aggregating the client-side and server-side tracing spans in a given time window to generate the dependency of services. This causes more latency and memory use, because the client and server spans of every RPC must be matched in millions of randomly occurring requests in a highly distributed system. More importantly, it could fail to match if the duration of RPC between client and server is longer than the prior setup time window, or across the two windows.\nIn this paper, we present the STAM, Streaming Topology Analysis Method. In STAM, we could use auto instrumentation or a manual instrumentation mechanism to intercept and manipulate RPC at both client-side and server-side. In the case of auto instrumentation, STAM manipulates application codes at runtime, such as Java agent. As such, this monitoring system doesn’t require any source code changes from the application development team or RPC framework development team. The STAM injects an RPC network address used at client side, a service name and a service instance name into the RPC context, and binds the server-side service name and service instance name as the alias name for this network address used at the client side. Freeing the dependency analysis from the mechanisms that cause blocking and delay, the analysis core can process the monitoring data in stream mode and generate the accurate topology.\nThe STAM has been implemented in the Apache SkyWalking[2], an open source APM (application performance monitoring system) project of the Apache Software Foundation, which is widely used in many big enterprises[3] including Alibaba, Huawei, Tencent, Didi, Xiaomi, China Mobile and other enterprises (airlines, financial institutions and others) to support their large-scale distributed systems in the production environment. It reduces the load and memory cost significantly, with better horizontal scale capability.\nIntroduction Monitoring the highly distributed system, especially with a micro-service architecture, is very complex. Many RPCs, including HTTP, gRPC, MQ, Cache, and Database accesses, are behind a single client-side request. Allowing the IT team to understand the dependency relationships among thousands of services is the key feature and first step for observability of a whole distributed system. A distributed tracing system is capable of collecting traces, including all distributed request paths. Dependency relationships have been logically included in the trace data. A distributed tracing system, such as Zipkin [4] or Jaeger Tracing [10], provides built-in dependency analysis features, but many analysis features build on top of that. There are at least two fundamental limitations: timeliness and consistent accuracy.\nStrong timeliness is required to match the mutability of distributed application system dependency relationship, including service level and service instance level dependency.\nA Service is a logic group of instances which have the same functions or codes.\nA Service Instance is usually an OS level process, such as a JVM process. The relationships between services and instances are mutable, depending on the configuration, codes and network status. The dependency could change over time.\n Figure 1, Generated spans in traditional Dapper based tracing system. The span model in the Dapper paper and existing tracing systems,such as Zipkin instrumenting mode[9], just propagates the span id to the server side. Due to this model, dependency analysis requires a certain time window. The tracing spans are collected at both client- and server-sides, because the relationship is recorded. Due to that, the analysis process has to wait for the client and server spans to match in the same time window, in order to output the result, Service A depending on Service B. So, this time window must be over the duration of this RPC request; otherwise, the conclusion will be lost. This condition makes the analysis would not react the dependency mutation in second level, in production, it sometimes has to set the window duration in 3-5 mins. Also, because of the Windows-based design, if one side involves a long duration task, it can’t easily achieve consistent accuracy. Because in order to make the analysis as fast as possible, the analysis period is less than 5 minutes. But some spans can’t match its parent or children if the analysis is incomplete or crosses two time windows. Even if we added a mechanism to process the spans left in the previous stages, still some would have to be abandoned to keep the dataset size and memory usage reasonable.\nIn the STAM, we introduce a new span and context propagation models, with the new analysis method. These new models add the peer network address (IP or hostname) used at client side, client service instance name and client service name, into the context propagation model. Then it passes the RPC call from client to server, just as the original trace id and span id in the existing tracing system, and collects it in the server-side span. The new analysis method can easily generate the client-server relationship directly without waiting on the client span. It also sets the peer network address as one alias of the server service. After the across cluster node data sync, the client-side span analysis could use this alias metadata to generate the client-server relationship directly too. By using these new models and method in Apache SkyWalking, we remove the time windows-based analysis permanently, and fully use the streaming analysis mode with less than 5 seconds latency and consistent accuracy\nNew Span Model and Context Model The traditional span of a tracing system includes the following fields [1][6][10].\n A trace id to represent the whole trace. A span id to represent the current span. An operation name to describe what operation this span did. A start timestamp. A finish timestamp Service and Service Instance names of current span. A set of zero or more key:value Span Tags. A set of zero or more Span Logs, each of which is itself a key:value map paired with a timestamp. References to zero or more causally related Spans. Reference includes the parent span id and trace id.  In the new span model of STAM we add the following fields in the span.\nSpan type. Enumeration, including exit, local and entry. Entry and Exit spans are used in a networking related library. Entry spans represent a server-side networking library, such as Apache Tomcat[7]. Exit spans represent the client-side networking library, such as Apache HttpComponents [8].\nPeer Network Address. Remote \u0026ldquo;address,\u0026rdquo; suitable for use in exit and entry spans. In Exit spans, the peer network address is the address by the client library to access the server.\nThese fields usually are optionally included in many tracing system,. But in STAM, we require them in all RPC cases.\nContext Model is used to propagate the client-side information to server-side carried by the original RPC call, usually in the header, such as HTTP header or MQ header. In the old design, it carries the trace id and span id of client-side span. In the STAM, we enhance this model, adding the parent service name, parent service instance name and peer of exit span. The names could be literal strings. All these extra fields will help to remove the block of streaming analysis. Compared to the existing context model, this uses a little more bandwidth, but it could be optimized. In Apache SkyWalking, we design a register mechanism to exchange unique IDs to represent these names. As a result, only 3 integers are added in the RPC context, so the increase of bandwidth is at least less than 1% in the production environment.\nThe changes of two models could eliminate the time windows in the analysis process. Server-side span analysis enhances the context aware capability.\nNew Topology Analysis Method The new topology analysis method at the core of STAM is processing the span in stream mode. The analysis of the server-side span, also named entry span, includes the parent service name, parent service instance name and peer of exit span. So the analysis process could establish the following results.\n Set the peer of exit span as client using alias name of current service and instance. Peer network address \u0026lt;-\u0026gt; service name and peer network address \u0026lt;-\u0026gt; Service instance name aliases created. These two will sync with all analysis nodes and persistent in the storage, allowing more analysis processers to have this alias information. Generate relationships of parent service name -\u0026gt; current service name and parent service instance name -\u0026gt; current service instance name, unless there is another different Peer network address \u0026lt;-\u0026gt; Service Instance Name mapping found. In that case, only generate relationships of peer network address \u0026lt;-\u0026gt; service name and peer network address \u0026lt;-\u0026gt; Service instance name.  For analysis of the client-side span (exit span), there could three possibilities.\n The peer in the exit span already has the alias names established by server-side span analysis from step (1). Then use alias names to replace the peer, and generate traffic of current service name -\u0026gt; alias service name and current service instance name -\u0026gt; alias service instance name. If the alias could not be found, then just simply generate traffic for current service name -\u0026gt; peer and current service instance name -\u0026gt; peer. If multiple alias names of peer network address \u0026lt;-\u0026gt; Service Instance Name could be found, then keep generating traffic for current service name -\u0026gt; peer network address and current service instance name -\u0026gt; peer network address.   Figure 2, Apache SkyWalking uses STAM to detect and visualize the topology of distributed systems. Evaluation In this section, we evaluate the new models and analysis method in the context of several typical cases in which the old method loses timeliness and consistent accuracy.\n 1.New Service Online or Auto Scale Out  New services could be added into the whole topology by the developer team randomly, or container operation platform automatically by some scale out policy, like Kubernetes [5]. The monitoring system could not be notified in any case manually. By using STAM, we could detect the new node automatically and also keep the analysis process unblocked and consistent with detected nodes. In this case, a new service and network address (could be IP, port or both) are used. The peer network address \u0026lt;-\u0026gt; service mapping does not exist, the traffic of client service -\u0026gt; peer network address will be generated and persistent in the storage first. After mapping is generated, further traffic of client-service to server-service could be identified, generated and aggregated in the analysis platform. For filling the gap of a few traffic before the mapping generated, we require doing peer network address \u0026lt;-\u0026gt; service mapping translation again in query stage, to merge client service-\u0026gt;peer network address and client-service to server-service. In production, the amount of VM for the whole SkyWalking analysis platform deployment is less than 100, syncing among them will finish less than 10 seconds, in most cases it only takes 3-5 seconds. And in the query stage, the data has been aggregated in minutes or seconds at least. The query merge performance is not related to how much traffic happens before the mapping generated, only affected by sync duration, in here, only 3 seconds. Due to that, in minute level aggregation topology, it only adds 1 or 2 relationship records in the whole topology relationship dataset. Considering an over 100 services topology having over 500 relationship records per minute, the payload increase for this query merge is very limited and affordable. This feature is significant in a large and high load distributed system, as we don’t need to concern its scaling capability. And in some fork versions, they choose to update the existing client service-\u0026gt;peer network address to client-service to server-service after detecting the new mapping for peer generated, in order to remove the extra load at query stage permanently.\n Figure 3, Span analysis by using the new topology analysis method  2.Existing Uninstrumented Nodes  Every topology detection method has to work in this case. In many cases, there are nodes in the production environment that can’t be instrumented. Causes for this might include:(1) Restriction of the technology. In some golang or C++ written applications, there is no easy way in Java or .Net to do auto instrumentation by the agent. So, the codes may not be instrumented automatically. (2) The middleware, such as MQ, database server, has not adopted the tracing system. This would make it difficult or time consuming to implement the middleware instrumentation. (3) A 3rd party service or cloud service doesn’t support work with the current tracing system. (4) Lack of resources: e.g., the developer or operation team lacks time to make the instrumentation ready.\nThe STAM works well even if the client or server side has no instrumentation. It still keeps the topology as accurate as possible.\nIf the client side hasn’t instrumented, the server-side span wouldn’t get any reference through RPC context, so, it would simply use peer to generate traffic, as shown in Figure 4.\n Figure 4, STAM traffic generation when no client-side instrumentation As shown in Figure 5, in the other case, with no server-side instrumentation, the client span analysis doesn’t need to process this case. The STAM analysis core just simply keeps generating client service-\u0026gt;peer network address traffic. As there is no mapping for peer network address generated, there is no merging.\n Figure 5, STAM traffic generation when no server-side instrumentation  3.Uninstrumented Node Having Header Forward Capability  Besides the cases we evaluated in (2) Uninstrumented Nodes, there is one complex and special case: the instrumented node has the capability to propagate the header from downstream to upstream, typically in all proxy, such as Envoy[11], Nginx[12], Spring Cloud Gateway[13]. As proxy, it has the capability to forward all headers from downstream to upstream to keep some of information in the header, including the tracing context, authentication, browser information, and routing information, in order to make them accessible by the business services behind the proxy, like Envoy route configuration. When some proxy can’t be instrumented, no matter what the reason, it should not affect the topology detection.\nIn this case, the proxy address would be used at the client side and propagate through RPC context as peer network address, and the proxy forwards this to different upstream services. Then STAM could detect this case and generate the proxy as a conjectural node. In the STAM, more than one alias names for this network address should be generated. After those two are detected and synchronized to the analysis node, the analysis core knows there is at least one uninstrumented service standing between client and servers. So, it will generate the relationships of client service-\u0026gt;peer network address, peer-\u0026gt;server service B and peer network address -\u0026gt;server service C, as shown in Figure 6.\n Figure 6, STAM traffic generation when the proxy uninstrumentation Conclusion This paper described the STAM, which is to the best of our knowledge the best topology detection method for distributed tracing systems. It replaces the time-window based topology analysis method for tracing-based monitoring systems. It removes the resource cost of disk and memory for time-window baseds analysis permanently and totally, and the barriers of horizontal scale. One STAM implementation, Apache SkyWalking, is widely used for monitoring hundreds of applications in production. Some of them generated over 100 TB tracing data per day and topology for over 200 services in real time.\nAcknowledgments We thank all contributors of Apache SkyWalking project for suggestions, code contributions to implement the STAM, and feedback from using the STAM and SkyWalking in their production environment.\nLicense This paper and the STAM are licensed in the Apache 2.0.\nReferences  Dapper, a Large-Scale Distributed Systems Tracing Infrastructure, https://research.google.com/pubs/pub36356.html?spm=5176.100239.blogcont60165.11.OXME9Z Apache SkyWalking, http://skywalking.apache.org/ Apache Open Users, https://skywalking.apache.org/users/ Zipkin, https://zipkin.io/ Kubernetes, Production-Grade Container Orchestration. Automated container deployment, scaling, and management. https://kubernetes.io/ OpenTracing Specification https://github.com/opentracing/specification/blob/master/specification.md Apache Tomcat, http://tomcat.apache.org/ Apache HttpComponents, https://hc.apache.org/ Zipkin doc, ‘Instrumenting a library’ section, ‘Communicating trace information’ paragraph. https://zipkin.io/pages/instrumenting Jaeger Tracing, https://jaegertracing.io/ Envoy Proxy, http://envoyproxy.io/ Nginx, http://nginx.org/ Spring Cloud Gateway, https://spring.io/projects/spring-cloud-gateway  ","excerpt":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System …","ref":"/docs/main/next/en/papers/stam/","title":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System"},{"body":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System  Sheng Wu 吴 晟 wusheng@apache.org  Editor\u0026rsquo;s note This paper was written by Sheng Wu, project founder, in 2017, to describe the fundamental theory of all current agent core concepts. Readers could learn why SkyWalking agents are significantly different from other tracing system and Dapper[1] Paper\u0026rsquo;s description.\nAbstract Monitoring, visualizing and troubleshooting a large-scale distributed system is a major challenge. One common tool used today is the distributed tracing system (e.g., Google Dapper)[1], and detecting topology and metrics based on the tracing data. One big limitation of today’s topology detection is that the analysis depends on aggregating the client-side and server-side tracing spans in a given time window to generate the dependency of services. This causes more latency and memory use, because the client and server spans of every RPC must be matched in millions of randomly occurring requests in a highly distributed system. More importantly, it could fail to match if the duration of RPC between client and server is longer than the prior setup time window, or across the two windows.\nIn this paper, we present the STAM, Streaming Topology Analysis Method. In STAM, we could use auto instrumentation or a manual instrumentation mechanism to intercept and manipulate RPC at both client-side and server-side. In the case of auto instrumentation, STAM manipulates application codes at runtime, such as Java agent. As such, this monitoring system doesn’t require any source code changes from the application development team or RPC framework development team. The STAM injects an RPC network address used at client side, a service name and a service instance name into the RPC context, and binds the server-side service name and service instance name as the alias name for this network address used at the client side. Freeing the dependency analysis from the mechanisms that cause blocking and delay, the analysis core can process the monitoring data in stream mode and generate the accurate topology.\nThe STAM has been implemented in the Apache SkyWalking[2], an open source APM (application performance monitoring system) project of the Apache Software Foundation, which is widely used in many big enterprises[3] including Alibaba, Huawei, Tencent, Didi, Xiaomi, China Mobile and other enterprises (airlines, financial institutions and others) to support their large-scale distributed systems in the production environment. It reduces the load and memory cost significantly, with better horizontal scale capability.\nIntroduction Monitoring the highly distributed system, especially with a micro-service architecture, is very complex. Many RPCs, including HTTP, gRPC, MQ, Cache, and Database accesses, are behind a single client-side request. Allowing the IT team to understand the dependency relationships among thousands of services is the key feature and first step for observability of a whole distributed system. A distributed tracing system is capable of collecting traces, including all distributed request paths. Dependency relationships have been logically included in the trace data. A distributed tracing system, such as Zipkin [4] or Jaeger Tracing [10], provides built-in dependency analysis features, but many analysis features build on top of that. There are at least two fundamental limitations: timeliness and consistent accuracy.\nStrong timeliness is required to match the mutability of distributed application system dependency relationship, including service level and service instance level dependency.\nA Service is a logic group of instances which have the same functions or codes.\nA Service Instance is usually an OS level process, such as a JVM process. The relationships between services and instances are mutable, depending on the configuration, codes and network status. The dependency could change over time.\n Figure 1, Generated spans in traditional Dapper based tracing system. The span model in the Dapper paper and existing tracing systems,such as Zipkin instrumenting mode[9], just propagates the span id to the server side. Due to this model, dependency analysis requires a certain time window. The tracing spans are collected at both client- and server-sides, because the relationship is recorded. Due to that, the analysis process has to wait for the client and server spans to match in the same time window, in order to output the result, Service A depending on Service B. So, this time window must be over the duration of this RPC request; otherwise, the conclusion will be lost. This condition makes the analysis would not react the dependency mutation in second level, in production, it sometimes has to set the window duration in 3-5 mins. Also, because of the Windows-based design, if one side involves a long duration task, it can’t easily achieve consistent accuracy. Because in order to make the analysis as fast as possible, the analysis period is less than 5 minutes. But some spans can’t match its parent or children if the analysis is incomplete or crosses two time windows. Even if we added a mechanism to process the spans left in the previous stages, still some would have to be abandoned to keep the dataset size and memory usage reasonable.\nIn the STAM, we introduce a new span and context propagation models, with the new analysis method. These new models add the peer network address (IP or hostname) used at client side, client service instance name and client service name, into the context propagation model. Then it passes the RPC call from client to server, just as the original trace id and span id in the existing tracing system, and collects it in the server-side span. The new analysis method can easily generate the client-server relationship directly without waiting on the client span. It also sets the peer network address as one alias of the server service. After the across cluster node data sync, the client-side span analysis could use this alias metadata to generate the client-server relationship directly too. By using these new models and method in Apache SkyWalking, we remove the time windows-based analysis permanently, and fully use the streaming analysis mode with less than 5 seconds latency and consistent accuracy\nNew Span Model and Context Model The traditional span of a tracing system includes the following fields [1][6][10].\n A trace id to represent the whole trace. A span id to represent the current span. An operation name to describe what operation this span did. A start timestamp. A finish timestamp Service and Service Instance names of current span. A set of zero or more key:value Span Tags. A set of zero or more Span Logs, each of which is itself a key:value map paired with a timestamp. References to zero or more causally related Spans. Reference includes the parent span id and trace id.  In the new span model of STAM we add the following fields in the span.\nSpan type. Enumeration, including exit, local and entry. Entry and Exit spans are used in a networking related library. Entry spans represent a server-side networking library, such as Apache Tomcat[7]. Exit spans represent the client-side networking library, such as Apache HttpComponents [8].\nPeer Network Address. Remote \u0026ldquo;address,\u0026rdquo; suitable for use in exit and entry spans. In Exit spans, the peer network address is the address by the client library to access the server.\nThese fields usually are optionally included in many tracing system,. But in STAM, we require them in all RPC cases.\nContext Model is used to propagate the client-side information to server-side carried by the original RPC call, usually in the header, such as HTTP header or MQ header. In the old design, it carries the trace id and span id of client-side span. In the STAM, we enhance this model, adding the parent service name, parent service instance name and peer of exit span. The names could be literal strings. All these extra fields will help to remove the block of streaming analysis. Compared to the existing context model, this uses a little more bandwidth, but it could be optimized. In Apache SkyWalking, we design a register mechanism to exchange unique IDs to represent these names. As a result, only 3 integers are added in the RPC context, so the increase of bandwidth is at least less than 1% in the production environment.\nThe changes of two models could eliminate the time windows in the analysis process. Server-side span analysis enhances the context aware capability.\nNew Topology Analysis Method The new topology analysis method at the core of STAM is processing the span in stream mode. The analysis of the server-side span, also named entry span, includes the parent service name, parent service instance name and peer of exit span. So the analysis process could establish the following results.\n Set the peer of exit span as client using alias name of current service and instance. Peer network address \u0026lt;-\u0026gt; service name and peer network address \u0026lt;-\u0026gt; Service instance name aliases created. These two will sync with all analysis nodes and persistent in the storage, allowing more analysis processers to have this alias information. Generate relationships of parent service name -\u0026gt; current service name and parent service instance name -\u0026gt; current service instance name, unless there is another different Peer network address \u0026lt;-\u0026gt; Service Instance Name mapping found. In that case, only generate relationships of peer network address \u0026lt;-\u0026gt; service name and peer network address \u0026lt;-\u0026gt; Service instance name.  For analysis of the client-side span (exit span), there could three possibilities.\n The peer in the exit span already has the alias names established by server-side span analysis from step (1). Then use alias names to replace the peer, and generate traffic of current service name -\u0026gt; alias service name and current service instance name -\u0026gt; alias service instance name. If the alias could not be found, then just simply generate traffic for current service name -\u0026gt; peer and current service instance name -\u0026gt; peer. If multiple alias names of peer network address \u0026lt;-\u0026gt; Service Instance Name could be found, then keep generating traffic for current service name -\u0026gt; peer network address and current service instance name -\u0026gt; peer network address.   Figure 2, Apache SkyWalking uses STAM to detect and visualize the topology of distributed systems. Evaluation In this section, we evaluate the new models and analysis method in the context of several typical cases in which the old method loses timeliness and consistent accuracy.\n 1.New Service Online or Auto Scale Out  New services could be added into the whole topology by the developer team randomly, or container operation platform automatically by some scale out policy, like Kubernetes [5]. The monitoring system could not be notified in any case manually. By using STAM, we could detect the new node automatically and also keep the analysis process unblocked and consistent with detected nodes. In this case, a new service and network address (could be IP, port or both) are used. The peer network address \u0026lt;-\u0026gt; service mapping does not exist, the traffic of client service -\u0026gt; peer network address will be generated and persistent in the storage first. After mapping is generated, further traffic of client-service to server-service could be identified, generated and aggregated in the analysis platform. For filling the gap of a few traffic before the mapping generated, we require doing peer network address \u0026lt;-\u0026gt; service mapping translation again in query stage, to merge client service-\u0026gt;peer network address and client-service to server-service. In production, the amount of VM for the whole SkyWalking analysis platform deployment is less than 100, syncing among them will finish less than 10 seconds, in most cases it only takes 3-5 seconds. And in the query stage, the data has been aggregated in minutes or seconds at least. The query merge performance is not related to how much traffic happens before the mapping generated, only affected by sync duration, in here, only 3 seconds. Due to that, in minute level aggregation topology, it only adds 1 or 2 relationship records in the whole topology relationship dataset. Considering an over 100 services topology having over 500 relationship records per minute, the payload increase for this query merge is very limited and affordable. This feature is significant in a large and high load distributed system, as we don’t need to concern its scaling capability. And in some fork versions, they choose to update the existing client service-\u0026gt;peer network address to client-service to server-service after detecting the new mapping for peer generated, in order to remove the extra load at query stage permanently.\n Figure 3, Span analysis by using the new topology analysis method  2.Existing Uninstrumented Nodes  Every topology detection method has to work in this case. In many cases, there are nodes in the production environment that can’t be instrumented. Causes for this might include:(1) Restriction of the technology. In some golang or C++ written applications, there is no easy way in Java or .Net to do auto instrumentation by the agent. So, the codes may not be instrumented automatically. (2) The middleware, such as MQ, database server, has not adopted the tracing system. This would make it difficult or time consuming to implement the middleware instrumentation. (3) A 3rd party service or cloud service doesn’t support work with the current tracing system. (4) Lack of resources: e.g., the developer or operation team lacks time to make the instrumentation ready.\nThe STAM works well even if the client or server side has no instrumentation. It still keeps the topology as accurate as possible.\nIf the client side hasn’t instrumented, the server-side span wouldn’t get any reference through RPC context, so, it would simply use peer to generate traffic, as shown in Figure 4.\n Figure 4, STAM traffic generation when no client-side instrumentation As shown in Figure 5, in the other case, with no server-side instrumentation, the client span analysis doesn’t need to process this case. The STAM analysis core just simply keeps generating client service-\u0026gt;peer network address traffic. As there is no mapping for peer network address generated, there is no merging.\n Figure 5, STAM traffic generation when no server-side instrumentation  3.Uninstrumented Node Having Header Forward Capability  Besides the cases we evaluated in (2) Uninstrumented Nodes, there is one complex and special case: the instrumented node has the capability to propagate the header from downstream to upstream, typically in all proxy, such as Envoy[11], Nginx[12], Spring Cloud Gateway[13]. As proxy, it has the capability to forward all headers from downstream to upstream to keep some of information in the header, including the tracing context, authentication, browser information, and routing information, in order to make them accessible by the business services behind the proxy, like Envoy route configuration [14]. When some proxy can’t be instrumented, no matter what the reason, it should not affect the topology detection.\nIn this case, the proxy address would be used at the client side and propagate through RPC context as peer network address, and the proxy forwards this to different upstream services. Then STAM could detect this case and generate the proxy as a conjectural node. In the STAM, more than one alias names for this network address should be generated. After those two are detected and synchronized to the analysis node, the analysis core knows there is at least one uninstrumented service standing between client and servers. So, it will generate the relationships of client service-\u0026gt;peer network address, peer-\u0026gt;server service B and peer network address -\u0026gt;server service C, as shown in Figure 6.\n Figure 6, STAM traffic generation when the proxy uninstrumentatio Conclusion This paper described the STAM, which is to the best of our knowledge the best topology detection method for distributed tracing systems. It replaces the time-window based topology analysis method for tracing-based monitoring systems. It removes the resource cost of disk and memory for time-window baseds analysis permanently and totally, and the barriers of horizontal scale. One STAM implementation, Apache SkyWalking, is widely used for monitoring hundreds of applications in production. Some of them generated over 100 TB tracing data per day and topology for over 200 services in real time.\nAcknowledgments We thank all contributors of Apache SkyWalking project for suggestions, code contributions to implement the STAM, and feedback from using the STAM and SkyWalking in their production environment.\nLicense This paper and the STAM are licensed in the Apache 2.0.\nReferences  Dapper, a Large-Scale Distributed Systems Tracing Infrastructure, https://research.google.com/pubs/pub36356.html?spm=5176.100239.blogcont60165.11.OXME9Z Apache SkyWalking, http://skywalking.apache.org/ Apache Open Users, https://skywalking.apache.org/users/ Zipkin, https://zipkin.io/ Kubernetes, Production-Grade Container Orchestration. Automated container deployment, scaling, and management. https://kubernetes.io/ OpenTracing Specification https://github.com/opentracing/specification/blob/master/specification.md Apache Tomcat, http://tomcat.apache.org/ Apache HttpComponents, https://hc.apache.org/ Zipkin doc, ‘Instrumenting a library’ section, ‘Communicating trace information’ paragraph. https://zipkin.io/pages/instrumenting Jaeger Tracing, https://jaegertracing.io/ Envoy Proxy, http://envoyproxy.io/ Nginx, http://nginx.org/ Spring Cloud Gateway, https://spring.io/projects/spring-cloud-gateway Envoy Route Configuration, https://www.envoyproxy.io/docs/envoy/latest/api-v2/api/v2/rds.proto.html?highlight=request_headers_to_  ","excerpt":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System …","ref":"/docs/main/v9.0.0/en/papers/stam/","title":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System"},{"body":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System  Sheng Wu 吴 晟 wusheng@apache.org  Editor\u0026rsquo;s note This paper was written by Sheng Wu, project founder, in 2017, to describe the fundamental theory of all current agent core concepts. Readers could learn why SkyWalking agents are significantly different from other tracing system and Dapper[1] Paper\u0026rsquo;s description.\nAbstract Monitoring, visualizing and troubleshooting a large-scale distributed system is a major challenge. One common tool used today is the distributed tracing system (e.g., Google Dapper)[1], and detecting topology and metrics based on the tracing data. One big limitation of today’s topology detection is that the analysis depends on aggregating the client-side and server-side tracing spans in a given time window to generate the dependency of services. This causes more latency and memory use, because the client and server spans of every RPC must be matched in millions of randomly occurring requests in a highly distributed system. More importantly, it could fail to match if the duration of RPC between client and server is longer than the prior setup time window, or across the two windows.\nIn this paper, we present the STAM, Streaming Topology Analysis Method. In STAM, we could use auto instrumentation or a manual instrumentation mechanism to intercept and manipulate RPC at both client-side and server-side. In the case of auto instrumentation, STAM manipulates application codes at runtime, such as Java agent. As such, this monitoring system doesn’t require any source code changes from the application development team or RPC framework development team. The STAM injects an RPC network address used at client side, a service name and a service instance name into the RPC context, and binds the server-side service name and service instance name as the alias name for this network address used at the client side. Freeing the dependency analysis from the mechanisms that cause blocking and delay, the analysis core can process the monitoring data in stream mode and generate the accurate topology.\nThe STAM has been implemented in the Apache SkyWalking[2], an open source APM (application performance monitoring system) project of the Apache Software Foundation, which is widely used in many big enterprises[3] including Alibaba, Huawei, Tencent, Didi, Xiaomi, China Mobile and other enterprises (airlines, financial institutions and others) to support their large-scale distributed systems in the production environment. It reduces the load and memory cost significantly, with better horizontal scale capability.\nIntroduction Monitoring the highly distributed system, especially with a micro-service architecture, is very complex. Many RPCs, including HTTP, gRPC, MQ, Cache, and Database accesses, are behind a single client-side request. Allowing the IT team to understand the dependency relationships among thousands of services is the key feature and first step for observability of a whole distributed system. A distributed tracing system is capable of collecting traces, including all distributed request paths. Dependency relationships have been logically included in the trace data. A distributed tracing system, such as Zipkin [4] or Jaeger Tracing [10], provides built-in dependency analysis features, but many analysis features build on top of that. There are at least two fundamental limitations: timeliness and consistent accuracy.\nStrong timeliness is required to match the mutability of distributed application system dependency relationship, including service level and service instance level dependency.\nA Service is a logic group of instances which have the same functions or codes.\nA Service Instance is usually an OS level process, such as a JVM process. The relationships between services and instances are mutable, depending on the configuration, codes and network status. The dependency could change over time.\n Figure 1, Generated spans in traditional Dapper based tracing system. The span model in the Dapper paper and existing tracing systems,such as Zipkin instrumenting mode[9], just propagates the span id to the server side. Due to this model, dependency analysis requires a certain time window. The tracing spans are collected at both client- and server-sides, because the relationship is recorded. Due to that, the analysis process has to wait for the client and server spans to match in the same time window, in order to output the result, Service A depending on Service B. So, this time window must be over the duration of this RPC request; otherwise, the conclusion will be lost. This condition makes the analysis would not react the dependency mutation in second level, in production, it sometimes has to set the window duration in 3-5 mins. Also, because of the Windows-based design, if one side involves a long duration task, it can’t easily achieve consistent accuracy. Because in order to make the analysis as fast as possible, the analysis period is less than 5 minutes. But some spans can’t match its parent or children if the analysis is incomplete or crosses two time windows. Even if we added a mechanism to process the spans left in the previous stages, still some would have to be abandoned to keep the dataset size and memory usage reasonable.\nIn the STAM, we introduce a new span and context propagation models, with the new analysis method. These new models add the peer network address (IP or hostname) used at client side, client service instance name and client service name, into the context propagation model. Then it passes the RPC call from client to server, just as the original trace id and span id in the existing tracing system, and collects it in the server-side span. The new analysis method can easily generate the client-server relationship directly without waiting on the client span. It also sets the peer network address as one alias of the server service. After the across cluster node data sync, the client-side span analysis could use this alias metadata to generate the client-server relationship directly too. By using these new models and method in Apache SkyWalking, we remove the time windows-based analysis permanently, and fully use the streaming analysis mode with less than 5 seconds latency and consistent accuracy\nNew Span Model and Context Model The traditional span of a tracing system includes the following fields [1][6][10].\n A trace id to represent the whole trace. A span id to represent the current span. An operation name to describe what operation this span did. A start timestamp. A finish timestamp Service and Service Instance names of current span. A set of zero or more key:value Span Tags. A set of zero or more Span Logs, each of which is itself a key:value map paired with a timestamp. References to zero or more causally related Spans. Reference includes the parent span id and trace id.  In the new span model of STAM we add the following fields in the span.\nSpan type. Enumeration, including exit, local and entry. Entry and Exit spans are used in a networking related library. Entry spans represent a server-side networking library, such as Apache Tomcat[7]. Exit spans represent the client-side networking library, such as Apache HttpComponents [8].\nPeer Network Address. Remote \u0026ldquo;address,\u0026rdquo; suitable for use in exit and entry spans. In Exit spans, the peer network address is the address by the client library to access the server.\nThese fields usually are optionally included in many tracing system,. But in STAM, we require them in all RPC cases.\nContext Model is used to propagate the client-side information to server-side carried by the original RPC call, usually in the header, such as HTTP header or MQ header. In the old design, it carries the trace id and span id of client-side span. In the STAM, we enhance this model, adding the parent service name, parent service instance name and peer of exit span. The names could be literal strings. All these extra fields will help to remove the block of streaming analysis. Compared to the existing context model, this uses a little more bandwidth, but it could be optimized. In Apache SkyWalking, we design a register mechanism to exchange unique IDs to represent these names. As a result, only 3 integers are added in the RPC context, so the increase of bandwidth is at least less than 1% in the production environment.\nThe changes of two models could eliminate the time windows in the analysis process. Server-side span analysis enhances the context aware capability.\nNew Topology Analysis Method The new topology analysis method at the core of STAM is processing the span in stream mode. The analysis of the server-side span, also named entry span, includes the parent service name, parent service instance name and peer of exit span. So the analysis process could establish the following results.\n Set the peer of exit span as client using alias name of current service and instance. Peer network address \u0026lt;-\u0026gt; service name and peer network address \u0026lt;-\u0026gt; Service instance name aliases created. These two will sync with all analysis nodes and persistent in the storage, allowing more analysis processers to have this alias information. Generate relationships of parent service name -\u0026gt; current service name and parent service instance name -\u0026gt; current service instance name, unless there is another different Peer network address \u0026lt;-\u0026gt; Service Instance Name mapping found. In that case, only generate relationships of peer network address \u0026lt;-\u0026gt; service name and peer network address \u0026lt;-\u0026gt; Service instance name.  For analysis of the client-side span (exit span), there could three possibilities.\n The peer in the exit span already has the alias names established by server-side span analysis from step (1). Then use alias names to replace the peer, and generate traffic of current service name -\u0026gt; alias service name and current service instance name -\u0026gt; alias service instance name. If the alias could not be found, then just simply generate traffic for current service name -\u0026gt; peer and current service instance name -\u0026gt; peer. If multiple alias names of peer network address \u0026lt;-\u0026gt; Service Instance Name could be found, then keep generating traffic for current service name -\u0026gt; peer network address and current service instance name -\u0026gt; peer network address.   Figure 2, Apache SkyWalking uses STAM to detect and visualize the topology of distributed systems. Evaluation In this section, we evaluate the new models and analysis method in the context of several typical cases in which the old method loses timeliness and consistent accuracy.\n 1.New Service Online or Auto Scale Out  New services could be added into the whole topology by the developer team randomly, or container operation platform automatically by some scale out policy, like Kubernetes [5]. The monitoring system could not be notified in any case manually. By using STAM, we could detect the new node automatically and also keep the analysis process unblocked and consistent with detected nodes. In this case, a new service and network address (could be IP, port or both) are used. The peer network address \u0026lt;-\u0026gt; service mapping does not exist, the traffic of client service -\u0026gt; peer network address will be generated and persistent in the storage first. After mapping is generated, further traffic of client-service to server-service could be identified, generated and aggregated in the analysis platform. For filling the gap of a few traffic before the mapping generated, we require doing peer network address \u0026lt;-\u0026gt; service mapping translation again in query stage, to merge client service-\u0026gt;peer network address and client-service to server-service. In production, the amount of VM for the whole SkyWalking analysis platform deployment is less than 100, syncing among them will finish less than 10 seconds, in most cases it only takes 3-5 seconds. And in the query stage, the data has been aggregated in minutes or seconds at least. The query merge performance is not related to how much traffic happens before the mapping generated, only affected by sync duration, in here, only 3 seconds. Due to that, in minute level aggregation topology, it only adds 1 or 2 relationship records in the whole topology relationship dataset. Considering an over 100 services topology having over 500 relationship records per minute, the payload increase for this query merge is very limited and affordable. This feature is significant in a large and high load distributed system, as we don’t need to concern its scaling capability. And in some fork versions, they choose to update the existing client service-\u0026gt;peer network address to client-service to server-service after detecting the new mapping for peer generated, in order to remove the extra load at query stage permanently.\n Figure 3, Span analysis by using the new topology analysis method  2.Existing Uninstrumented Nodes  Every topology detection method has to work in this case. In many cases, there are nodes in the production environment that can’t be instrumented. Causes for this might include:(1) Restriction of the technology. In some golang or C++ written applications, there is no easy way in Java or .Net to do auto instrumentation by the agent. So, the codes may not be instrumented automatically. (2) The middleware, such as MQ, database server, has not adopted the tracing system. This would make it difficult or time consuming to implement the middleware instrumentation. (3) A 3rd party service or cloud service doesn’t support work with the current tracing system. (4) Lack of resources: e.g., the developer or operation team lacks time to make the instrumentation ready.\nThe STAM works well even if the client or server side has no instrumentation. It still keeps the topology as accurate as possible.\nIf the client side hasn’t instrumented, the server-side span wouldn’t get any reference through RPC context, so, it would simply use peer to generate traffic, as shown in Figure 4.\n Figure 4, STAM traffic generation when no client-side instrumentation As shown in Figure 5, in the other case, with no server-side instrumentation, the client span analysis doesn’t need to process this case. The STAM analysis core just simply keeps generating client service-\u0026gt;peer network address traffic. As there is no mapping for peer network address generated, there is no merging.\n Figure 5, STAM traffic generation when no server-side instrumentation  3.Uninstrumented Node Having Header Forward Capability  Besides the cases we evaluated in (2) Uninstrumented Nodes, there is one complex and special case: the instrumented node has the capability to propagate the header from downstream to upstream, typically in all proxy, such as Envoy[11], Nginx[12], Spring Cloud Gateway[13]. As proxy, it has the capability to forward all headers from downstream to upstream to keep some of information in the header, including the tracing context, authentication, browser information, and routing information, in order to make them accessible by the business services behind the proxy, like Envoy route configuration. When some proxy can’t be instrumented, no matter what the reason, it should not affect the topology detection.\nIn this case, the proxy address would be used at the client side and propagate through RPC context as peer network address, and the proxy forwards this to different upstream services. Then STAM could detect this case and generate the proxy as a conjectural node. In the STAM, more than one alias names for this network address should be generated. After those two are detected and synchronized to the analysis node, the analysis core knows there is at least one uninstrumented service standing between client and servers. So, it will generate the relationships of client service-\u0026gt;peer network address, peer-\u0026gt;server service B and peer network address -\u0026gt;server service C, as shown in Figure 6.\n Figure 6, STAM traffic generation when the proxy uninstrumentation Conclusion This paper described the STAM, which is to the best of our knowledge the best topology detection method for distributed tracing systems. It replaces the time-window based topology analysis method for tracing-based monitoring systems. It removes the resource cost of disk and memory for time-window baseds analysis permanently and totally, and the barriers of horizontal scale. One STAM implementation, Apache SkyWalking, is widely used for monitoring hundreds of applications in production. Some of them generated over 100 TB tracing data per day and topology for over 200 services in real time.\nAcknowledgments We thank all contributors of Apache SkyWalking project for suggestions, code contributions to implement the STAM, and feedback from using the STAM and SkyWalking in their production environment.\nLicense This paper and the STAM are licensed in the Apache 2.0.\nReferences  Dapper, a Large-Scale Distributed Systems Tracing Infrastructure, https://research.google.com/pubs/pub36356.html?spm=5176.100239.blogcont60165.11.OXME9Z Apache SkyWalking, http://skywalking.apache.org/ Apache Open Users, https://skywalking.apache.org/users/ Zipkin, https://zipkin.io/ Kubernetes, Production-Grade Container Orchestration. Automated container deployment, scaling, and management. https://kubernetes.io/ OpenTracing Specification https://github.com/opentracing/specification/blob/master/specification.md Apache Tomcat, http://tomcat.apache.org/ Apache HttpComponents, https://hc.apache.org/ Zipkin doc, ‘Instrumenting a library’ section, ‘Communicating trace information’ paragraph. https://zipkin.io/pages/instrumenting Jaeger Tracing, https://jaegertracing.io/ Envoy Proxy, http://envoyproxy.io/ Nginx, http://nginx.org/ Spring Cloud Gateway, https://spring.io/projects/spring-cloud-gateway  ","excerpt":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System …","ref":"/docs/main/v9.1.0/en/papers/stam/","title":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System"},{"body":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System  Sheng Wu 吴 晟 wusheng@apache.org  Editor\u0026rsquo;s note This paper was written by Sheng Wu, project founder, in 2017, to describe the fundamental theory of all current agent core concepts. Readers could learn why SkyWalking agents are significantly different from other tracing system and Dapper[1] Paper\u0026rsquo;s description.\nAbstract Monitoring, visualizing and troubleshooting a large-scale distributed system is a major challenge. One common tool used today is the distributed tracing system (e.g., Google Dapper)[1], and detecting topology and metrics based on the tracing data. One big limitation of today’s topology detection is that the analysis depends on aggregating the client-side and server-side tracing spans in a given time window to generate the dependency of services. This causes more latency and memory use, because the client and server spans of every RPC must be matched in millions of randomly occurring requests in a highly distributed system. More importantly, it could fail to match if the duration of RPC between client and server is longer than the prior setup time window, or across the two windows.\nIn this paper, we present the STAM, Streaming Topology Analysis Method. In STAM, we could use auto instrumentation or a manual instrumentation mechanism to intercept and manipulate RPC at both client-side and server-side. In the case of auto instrumentation, STAM manipulates application codes at runtime, such as Java agent. As such, this monitoring system doesn’t require any source code changes from the application development team or RPC framework development team. The STAM injects an RPC network address used at client side, a service name and a service instance name into the RPC context, and binds the server-side service name and service instance name as the alias name for this network address used at the client side. Freeing the dependency analysis from the mechanisms that cause blocking and delay, the analysis core can process the monitoring data in stream mode and generate the accurate topology.\nThe STAM has been implemented in the Apache SkyWalking[2], an open source APM (application performance monitoring system) project of the Apache Software Foundation, which is widely used in many big enterprises[3] including Alibaba, Huawei, Tencent, Didi, Xiaomi, China Mobile and other enterprises (airlines, financial institutions and others) to support their large-scale distributed systems in the production environment. It reduces the load and memory cost significantly, with better horizontal scale capability.\nIntroduction Monitoring the highly distributed system, especially with a micro-service architecture, is very complex. Many RPCs, including HTTP, gRPC, MQ, Cache, and Database accesses, are behind a single client-side request. Allowing the IT team to understand the dependency relationships among thousands of services is the key feature and first step for observability of a whole distributed system. A distributed tracing system is capable of collecting traces, including all distributed request paths. Dependency relationships have been logically included in the trace data. A distributed tracing system, such as Zipkin [4] or Jaeger Tracing [10], provides built-in dependency analysis features, but many analysis features build on top of that. There are at least two fundamental limitations: timeliness and consistent accuracy.\nStrong timeliness is required to match the mutability of distributed application system dependency relationship, including service level and service instance level dependency.\nA Service is a logic group of instances which have the same functions or codes.\nA Service Instance is usually an OS level process, such as a JVM process. The relationships between services and instances are mutable, depending on the configuration, codes and network status. The dependency could change over time.\n Figure 1, Generated spans in traditional Dapper based tracing system. The span model in the Dapper paper and existing tracing systems,such as Zipkin instrumenting mode[9], just propagates the span id to the server side. Due to this model, dependency analysis requires a certain time window. The tracing spans are collected at both client- and server-sides, because the relationship is recorded. Due to that, the analysis process has to wait for the client and server spans to match in the same time window, in order to output the result, Service A depending on Service B. So, this time window must be over the duration of this RPC request; otherwise, the conclusion will be lost. This condition makes the analysis would not react the dependency mutation in second level, in production, it sometimes has to set the window duration in 3-5 mins. Also, because of the Windows-based design, if one side involves a long duration task, it can’t easily achieve consistent accuracy. Because in order to make the analysis as fast as possible, the analysis period is less than 5 minutes. But some spans can’t match its parent or children if the analysis is incomplete or crosses two time windows. Even if we added a mechanism to process the spans left in the previous stages, still some would have to be abandoned to keep the dataset size and memory usage reasonable.\nIn the STAM, we introduce a new span and context propagation models, with the new analysis method. These new models add the peer network address (IP or hostname) used at client side, client service instance name and client service name, into the context propagation model. Then it passes the RPC call from client to server, just as the original trace id and span id in the existing tracing system, and collects it in the server-side span. The new analysis method can easily generate the client-server relationship directly without waiting on the client span. It also sets the peer network address as one alias of the server service. After the across cluster node data sync, the client-side span analysis could use this alias metadata to generate the client-server relationship directly too. By using these new models and method in Apache SkyWalking, we remove the time windows-based analysis permanently, and fully use the streaming analysis mode with less than 5 seconds latency and consistent accuracy\nNew Span Model and Context Model The traditional span of a tracing system includes the following fields [1][6][10].\n A trace id to represent the whole trace. A span id to represent the current span. An operation name to describe what operation this span did. A start timestamp. A finish timestamp Service and Service Instance names of current span. A set of zero or more key:value Span Tags. A set of zero or more Span Logs, each of which is itself a key:value map paired with a timestamp. References to zero or more causally related Spans. Reference includes the parent span id and trace id.  In the new span model of STAM we add the following fields in the span.\nSpan type. Enumeration, including exit, local and entry. Entry and Exit spans are used in a networking related library. Entry spans represent a server-side networking library, such as Apache Tomcat[7]. Exit spans represent the client-side networking library, such as Apache HttpComponents [8].\nPeer Network Address. Remote \u0026ldquo;address,\u0026rdquo; suitable for use in exit and entry spans. In Exit spans, the peer network address is the address by the client library to access the server.\nThese fields usually are optionally included in many tracing system,. But in STAM, we require them in all RPC cases.\nContext Model is used to propagate the client-side information to server-side carried by the original RPC call, usually in the header, such as HTTP header or MQ header. In the old design, it carries the trace id and span id of client-side span. In the STAM, we enhance this model, adding the parent service name, parent service instance name and peer of exit span. The names could be literal strings. All these extra fields will help to remove the block of streaming analysis. Compared to the existing context model, this uses a little more bandwidth, but it could be optimized. In Apache SkyWalking, we design a register mechanism to exchange unique IDs to represent these names. As a result, only 3 integers are added in the RPC context, so the increase of bandwidth is at least less than 1% in the production environment.\nThe changes of two models could eliminate the time windows in the analysis process. Server-side span analysis enhances the context aware capability.\nNew Topology Analysis Method The new topology analysis method at the core of STAM is processing the span in stream mode. The analysis of the server-side span, also named entry span, includes the parent service name, parent service instance name and peer of exit span. So the analysis process could establish the following results.\n Set the peer of exit span as client using alias name of current service and instance. Peer network address \u0026lt;-\u0026gt; service name and peer network address \u0026lt;-\u0026gt; Service instance name aliases created. These two will sync with all analysis nodes and persistent in the storage, allowing more analysis processers to have this alias information. Generate relationships of parent service name -\u0026gt; current service name and parent service instance name -\u0026gt; current service instance name, unless there is another different Peer network address \u0026lt;-\u0026gt; Service Instance Name mapping found. In that case, only generate relationships of peer network address \u0026lt;-\u0026gt; service name and peer network address \u0026lt;-\u0026gt; Service instance name.  For analysis of the client-side span (exit span), there could three possibilities.\n The peer in the exit span already has the alias names established by server-side span analysis from step (1). Then use alias names to replace the peer, and generate traffic of current service name -\u0026gt; alias service name and current service instance name -\u0026gt; alias service instance name. If the alias could not be found, then just simply generate traffic for current service name -\u0026gt; peer and current service instance name -\u0026gt; peer. If multiple alias names of peer network address \u0026lt;-\u0026gt; Service Instance Name could be found, then keep generating traffic for current service name -\u0026gt; peer network address and current service instance name -\u0026gt; peer network address.   Figure 2, Apache SkyWalking uses STAM to detect and visualize the topology of distributed systems. Evaluation In this section, we evaluate the new models and analysis method in the context of several typical cases in which the old method loses timeliness and consistent accuracy.\n 1.New Service Online or Auto Scale Out  New services could be added into the whole topology by the developer team randomly, or container operation platform automatically by some scale out policy, like Kubernetes [5]. The monitoring system could not be notified in any case manually. By using STAM, we could detect the new node automatically and also keep the analysis process unblocked and consistent with detected nodes. In this case, a new service and network address (could be IP, port or both) are used. The peer network address \u0026lt;-\u0026gt; service mapping does not exist, the traffic of client service -\u0026gt; peer network address will be generated and persistent in the storage first. After mapping is generated, further traffic of client-service to server-service could be identified, generated and aggregated in the analysis platform. For filling the gap of a few traffic before the mapping generated, we require doing peer network address \u0026lt;-\u0026gt; service mapping translation again in query stage, to merge client service-\u0026gt;peer network address and client-service to server-service. In production, the amount of VM for the whole SkyWalking analysis platform deployment is less than 100, syncing among them will finish less than 10 seconds, in most cases it only takes 3-5 seconds. And in the query stage, the data has been aggregated in minutes or seconds at least. The query merge performance is not related to how much traffic happens before the mapping generated, only affected by sync duration, in here, only 3 seconds. Due to that, in minute level aggregation topology, it only adds 1 or 2 relationship records in the whole topology relationship dataset. Considering an over 100 services topology having over 500 relationship records per minute, the payload increase for this query merge is very limited and affordable. This feature is significant in a large and high load distributed system, as we don’t need to concern its scaling capability. And in some fork versions, they choose to update the existing client service-\u0026gt;peer network address to client-service to server-service after detecting the new mapping for peer generated, in order to remove the extra load at query stage permanently.\n Figure 3, Span analysis by using the new topology analysis method  2.Existing Uninstrumented Nodes  Every topology detection method has to work in this case. In many cases, there are nodes in the production environment that can’t be instrumented. Causes for this might include:(1) Restriction of the technology. In some golang or C++ written applications, there is no easy way in Java or .Net to do auto instrumentation by the agent. So, the codes may not be instrumented automatically. (2) The middleware, such as MQ, database server, has not adopted the tracing system. This would make it difficult or time consuming to implement the middleware instrumentation. (3) A 3rd party service or cloud service doesn’t support work with the current tracing system. (4) Lack of resources: e.g., the developer or operation team lacks time to make the instrumentation ready.\nThe STAM works well even if the client or server side has no instrumentation. It still keeps the topology as accurate as possible.\nIf the client side hasn’t instrumented, the server-side span wouldn’t get any reference through RPC context, so, it would simply use peer to generate traffic, as shown in Figure 4.\n Figure 4, STAM traffic generation when no client-side instrumentation As shown in Figure 5, in the other case, with no server-side instrumentation, the client span analysis doesn’t need to process this case. The STAM analysis core just simply keeps generating client service-\u0026gt;peer network address traffic. As there is no mapping for peer network address generated, there is no merging.\n Figure 5, STAM traffic generation when no server-side instrumentation  3.Uninstrumented Node Having Header Forward Capability  Besides the cases we evaluated in (2) Uninstrumented Nodes, there is one complex and special case: the instrumented node has the capability to propagate the header from downstream to upstream, typically in all proxy, such as Envoy[11], Nginx[12], Spring Cloud Gateway[13]. As proxy, it has the capability to forward all headers from downstream to upstream to keep some of information in the header, including the tracing context, authentication, browser information, and routing information, in order to make them accessible by the business services behind the proxy, like Envoy route configuration. When some proxy can’t be instrumented, no matter what the reason, it should not affect the topology detection.\nIn this case, the proxy address would be used at the client side and propagate through RPC context as peer network address, and the proxy forwards this to different upstream services. Then STAM could detect this case and generate the proxy as a conjectural node. In the STAM, more than one alias names for this network address should be generated. After those two are detected and synchronized to the analysis node, the analysis core knows there is at least one uninstrumented service standing between client and servers. So, it will generate the relationships of client service-\u0026gt;peer network address, peer-\u0026gt;server service B and peer network address -\u0026gt;server service C, as shown in Figure 6.\n Figure 6, STAM traffic generation when the proxy uninstrumentation Conclusion This paper described the STAM, which is to the best of our knowledge the best topology detection method for distributed tracing systems. It replaces the time-window based topology analysis method for tracing-based monitoring systems. It removes the resource cost of disk and memory for time-window baseds analysis permanently and totally, and the barriers of horizontal scale. One STAM implementation, Apache SkyWalking, is widely used for monitoring hundreds of applications in production. Some of them generated over 100 TB tracing data per day and topology for over 200 services in real time.\nAcknowledgments We thank all contributors of Apache SkyWalking project for suggestions, code contributions to implement the STAM, and feedback from using the STAM and SkyWalking in their production environment.\nLicense This paper and the STAM are licensed in the Apache 2.0.\nReferences  Dapper, a Large-Scale Distributed Systems Tracing Infrastructure, https://research.google.com/pubs/pub36356.html?spm=5176.100239.blogcont60165.11.OXME9Z Apache SkyWalking, http://skywalking.apache.org/ Apache Open Users, https://skywalking.apache.org/users/ Zipkin, https://zipkin.io/ Kubernetes, Production-Grade Container Orchestration. Automated container deployment, scaling, and management. https://kubernetes.io/ OpenTracing Specification https://github.com/opentracing/specification/blob/master/specification.md Apache Tomcat, http://tomcat.apache.org/ Apache HttpComponents, https://hc.apache.org/ Zipkin doc, ‘Instrumenting a library’ section, ‘Communicating trace information’ paragraph. https://zipkin.io/pages/instrumenting Jaeger Tracing, https://jaegertracing.io/ Envoy Proxy, http://envoyproxy.io/ Nginx, http://nginx.org/ Spring Cloud Gateway, https://spring.io/projects/spring-cloud-gateway  ","excerpt":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System …","ref":"/docs/main/v9.2.0/en/papers/stam/","title":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System"},{"body":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System  Sheng Wu 吴 晟 wusheng@apache.org  Editor\u0026rsquo;s note This paper was written by Sheng Wu, project founder, in 2017, to describe the fundamental theory of all current agent core concepts. Readers could learn why SkyWalking agents are significantly different from other tracing system and Dapper[1] Paper\u0026rsquo;s description.\nAbstract Monitoring, visualizing and troubleshooting a large-scale distributed system is a major challenge. One common tool used today is the distributed tracing system (e.g., Google Dapper)[1], and detecting topology and metrics based on the tracing data. One big limitation of today’s topology detection is that the analysis depends on aggregating the client-side and server-side tracing spans in a given time window to generate the dependency of services. This causes more latency and memory use, because the client and server spans of every RPC must be matched in millions of randomly occurring requests in a highly distributed system. More importantly, it could fail to match if the duration of RPC between client and server is longer than the prior setup time window, or across the two windows.\nIn this paper, we present the STAM, Streaming Topology Analysis Method. In STAM, we could use auto instrumentation or a manual instrumentation mechanism to intercept and manipulate RPC at both client-side and server-side. In the case of auto instrumentation, STAM manipulates application codes at runtime, such as Java agent. As such, this monitoring system doesn’t require any source code changes from the application development team or RPC framework development team. The STAM injects an RPC network address used at client side, a service name and a service instance name into the RPC context, and binds the server-side service name and service instance name as the alias name for this network address used at the client side. Freeing the dependency analysis from the mechanisms that cause blocking and delay, the analysis core can process the monitoring data in stream mode and generate the accurate topology.\nThe STAM has been implemented in the Apache SkyWalking[2], an open source APM (application performance monitoring system) project of the Apache Software Foundation, which is widely used in many big enterprises[3] including Alibaba, Huawei, Tencent, Didi, Xiaomi, China Mobile and other enterprises (airlines, financial institutions and others) to support their large-scale distributed systems in the production environment. It reduces the load and memory cost significantly, with better horizontal scale capability.\nIntroduction Monitoring the highly distributed system, especially with a micro-service architecture, is very complex. Many RPCs, including HTTP, gRPC, MQ, Cache, and Database accesses, are behind a single client-side request. Allowing the IT team to understand the dependency relationships among thousands of services is the key feature and first step for observability of a whole distributed system. A distributed tracing system is capable of collecting traces, including all distributed request paths. Dependency relationships have been logically included in the trace data. A distributed tracing system, such as Zipkin [4] or Jaeger Tracing [10], provides built-in dependency analysis features, but many analysis features build on top of that. There are at least two fundamental limitations: timeliness and consistent accuracy.\nStrong timeliness is required to match the mutability of distributed application system dependency relationship, including service level and service instance level dependency.\nA Service is a logic group of instances which have the same functions or codes.\nA Service Instance is usually an OS level process, such as a JVM process. The relationships between services and instances are mutable, depending on the configuration, codes and network status. The dependency could change over time.\n Figure 1, Generated spans in traditional Dapper based tracing system. The span model in the Dapper paper and existing tracing systems,such as Zipkin instrumenting mode[9], just propagates the span id to the server side. Due to this model, dependency analysis requires a certain time window. The tracing spans are collected at both client- and server-sides, because the relationship is recorded. Due to that, the analysis process has to wait for the client and server spans to match in the same time window, in order to output the result, Service A depending on Service B. So, this time window must be over the duration of this RPC request; otherwise, the conclusion will be lost. This condition makes the analysis would not react the dependency mutation in second level, in production, it sometimes has to set the window duration in 3-5 mins. Also, because of the Windows-based design, if one side involves a long duration task, it can’t easily achieve consistent accuracy. Because in order to make the analysis as fast as possible, the analysis period is less than 5 minutes. But some spans can’t match its parent or children if the analysis is incomplete or crosses two time windows. Even if we added a mechanism to process the spans left in the previous stages, still some would have to be abandoned to keep the dataset size and memory usage reasonable.\nIn the STAM, we introduce a new span and context propagation models, with the new analysis method. These new models add the peer network address (IP or hostname) used at client side, client service instance name and client service name, into the context propagation model. Then it passes the RPC call from client to server, just as the original trace id and span id in the existing tracing system, and collects it in the server-side span. The new analysis method can easily generate the client-server relationship directly without waiting on the client span. It also sets the peer network address as one alias of the server service. After the across cluster node data sync, the client-side span analysis could use this alias metadata to generate the client-server relationship directly too. By using these new models and method in Apache SkyWalking, we remove the time windows-based analysis permanently, and fully use the streaming analysis mode with less than 5 seconds latency and consistent accuracy\nNew Span Model and Context Model The traditional span of a tracing system includes the following fields [1][6][10].\n A trace id to represent the whole trace. A span id to represent the current span. An operation name to describe what operation this span did. A start timestamp. A finish timestamp Service and Service Instance names of current span. A set of zero or more key:value Span Tags. A set of zero or more Span Logs, each of which is itself a key:value map paired with a timestamp. References to zero or more causally related Spans. Reference includes the parent span id and trace id.  In the new span model of STAM we add the following fields in the span.\nSpan type. Enumeration, including exit, local and entry. Entry and Exit spans are used in a networking related library. Entry spans represent a server-side networking library, such as Apache Tomcat[7]. Exit spans represent the client-side networking library, such as Apache HttpComponents [8].\nPeer Network Address. Remote \u0026ldquo;address,\u0026rdquo; suitable for use in exit and entry spans. In Exit spans, the peer network address is the address by the client library to access the server.\nThese fields usually are optionally included in many tracing system,. But in STAM, we require them in all RPC cases.\nContext Model is used to propagate the client-side information to server-side carried by the original RPC call, usually in the header, such as HTTP header or MQ header. In the old design, it carries the trace id and span id of client-side span. In the STAM, we enhance this model, adding the parent service name, parent service instance name and peer of exit span. The names could be literal strings. All these extra fields will help to remove the block of streaming analysis. Compared to the existing context model, this uses a little more bandwidth, but it could be optimized. In Apache SkyWalking, we design a register mechanism to exchange unique IDs to represent these names. As a result, only 3 integers are added in the RPC context, so the increase of bandwidth is at least less than 1% in the production environment.\nThe changes of two models could eliminate the time windows in the analysis process. Server-side span analysis enhances the context aware capability.\nNew Topology Analysis Method The new topology analysis method at the core of STAM is processing the span in stream mode. The analysis of the server-side span, also named entry span, includes the parent service name, parent service instance name and peer of exit span. So the analysis process could establish the following results.\n Set the peer of exit span as client using alias name of current service and instance. Peer network address \u0026lt;-\u0026gt; service name and peer network address \u0026lt;-\u0026gt; Service instance name aliases created. These two will sync with all analysis nodes and persistent in the storage, allowing more analysis processers to have this alias information. Generate relationships of parent service name -\u0026gt; current service name and parent service instance name -\u0026gt; current service instance name, unless there is another different Peer network address \u0026lt;-\u0026gt; Service Instance Name mapping found. In that case, only generate relationships of peer network address \u0026lt;-\u0026gt; service name and peer network address \u0026lt;-\u0026gt; Service instance name.  For analysis of the client-side span (exit span), there could three possibilities.\n The peer in the exit span already has the alias names established by server-side span analysis from step (1). Then use alias names to replace the peer, and generate traffic of current service name -\u0026gt; alias service name and current service instance name -\u0026gt; alias service instance name. If the alias could not be found, then just simply generate traffic for current service name -\u0026gt; peer and current service instance name -\u0026gt; peer. If multiple alias names of peer network address \u0026lt;-\u0026gt; Service Instance Name could be found, then keep generating traffic for current service name -\u0026gt; peer network address and current service instance name -\u0026gt; peer network address.   Figure 2, Apache SkyWalking uses STAM to detect and visualize the topology of distributed systems. Evaluation In this section, we evaluate the new models and analysis method in the context of several typical cases in which the old method loses timeliness and consistent accuracy.\n 1.New Service Online or Auto Scale Out  New services could be added into the whole topology by the developer team randomly, or container operation platform automatically by some scale out policy, like Kubernetes [5]. The monitoring system could not be notified in any case manually. By using STAM, we could detect the new node automatically and also keep the analysis process unblocked and consistent with detected nodes. In this case, a new service and network address (could be IP, port or both) are used. The peer network address \u0026lt;-\u0026gt; service mapping does not exist, the traffic of client service -\u0026gt; peer network address will be generated and persistent in the storage first. After mapping is generated, further traffic of client-service to server-service could be identified, generated and aggregated in the analysis platform. For filling the gap of a few traffic before the mapping generated, we require doing peer network address \u0026lt;-\u0026gt; service mapping translation again in query stage, to merge client service-\u0026gt;peer network address and client-service to server-service. In production, the amount of VM for the whole SkyWalking analysis platform deployment is less than 100, syncing among them will finish less than 10 seconds, in most cases it only takes 3-5 seconds. And in the query stage, the data has been aggregated in minutes or seconds at least. The query merge performance is not related to how much traffic happens before the mapping generated, only affected by sync duration, in here, only 3 seconds. Due to that, in minute level aggregation topology, it only adds 1 or 2 relationship records in the whole topology relationship dataset. Considering an over 100 services topology having over 500 relationship records per minute, the payload increase for this query merge is very limited and affordable. This feature is significant in a large and high load distributed system, as we don’t need to concern its scaling capability. And in some fork versions, they choose to update the existing client service-\u0026gt;peer network address to client-service to server-service after detecting the new mapping for peer generated, in order to remove the extra load at query stage permanently.\n Figure 3, Span analysis by using the new topology analysis method  2.Existing Uninstrumented Nodes  Every topology detection method has to work in this case. In many cases, there are nodes in the production environment that can’t be instrumented. Causes for this might include:(1) Restriction of the technology. In some golang or C++ written applications, there is no easy way in Java or .Net to do auto instrumentation by the agent. So, the codes may not be instrumented automatically. (2) The middleware, such as MQ, database server, has not adopted the tracing system. This would make it difficult or time consuming to implement the middleware instrumentation. (3) A 3rd party service or cloud service doesn’t support work with the current tracing system. (4) Lack of resources: e.g., the developer or operation team lacks time to make the instrumentation ready.\nThe STAM works well even if the client or server side has no instrumentation. It still keeps the topology as accurate as possible.\nIf the client side hasn’t instrumented, the server-side span wouldn’t get any reference through RPC context, so, it would simply use peer to generate traffic, as shown in Figure 4.\n Figure 4, STAM traffic generation when no client-side instrumentation As shown in Figure 5, in the other case, with no server-side instrumentation, the client span analysis doesn’t need to process this case. The STAM analysis core just simply keeps generating client service-\u0026gt;peer network address traffic. As there is no mapping for peer network address generated, there is no merging.\n Figure 5, STAM traffic generation when no server-side instrumentation  3.Uninstrumented Node Having Header Forward Capability  Besides the cases we evaluated in (2) Uninstrumented Nodes, there is one complex and special case: the instrumented node has the capability to propagate the header from downstream to upstream, typically in all proxy, such as Envoy[11], Nginx[12], Spring Cloud Gateway[13]. As proxy, it has the capability to forward all headers from downstream to upstream to keep some of information in the header, including the tracing context, authentication, browser information, and routing information, in order to make them accessible by the business services behind the proxy, like Envoy route configuration. When some proxy can’t be instrumented, no matter what the reason, it should not affect the topology detection.\nIn this case, the proxy address would be used at the client side and propagate through RPC context as peer network address, and the proxy forwards this to different upstream services. Then STAM could detect this case and generate the proxy as a conjectural node. In the STAM, more than one alias names for this network address should be generated. After those two are detected and synchronized to the analysis node, the analysis core knows there is at least one uninstrumented service standing between client and servers. So, it will generate the relationships of client service-\u0026gt;peer network address, peer-\u0026gt;server service B and peer network address -\u0026gt;server service C, as shown in Figure 6.\n Figure 6, STAM traffic generation when the proxy uninstrumentation Conclusion This paper described the STAM, which is to the best of our knowledge the best topology detection method for distributed tracing systems. It replaces the time-window based topology analysis method for tracing-based monitoring systems. It removes the resource cost of disk and memory for time-window baseds analysis permanently and totally, and the barriers of horizontal scale. One STAM implementation, Apache SkyWalking, is widely used for monitoring hundreds of applications in production. Some of them generated over 100 TB tracing data per day and topology for over 200 services in real time.\nAcknowledgments We thank all contributors of Apache SkyWalking project for suggestions, code contributions to implement the STAM, and feedback from using the STAM and SkyWalking in their production environment.\nLicense This paper and the STAM are licensed in the Apache 2.0.\nReferences  Dapper, a Large-Scale Distributed Systems Tracing Infrastructure, https://research.google.com/pubs/pub36356.html?spm=5176.100239.blogcont60165.11.OXME9Z Apache SkyWalking, http://skywalking.apache.org/ Apache Open Users, https://skywalking.apache.org/users/ Zipkin, https://zipkin.io/ Kubernetes, Production-Grade Container Orchestration. Automated container deployment, scaling, and management. https://kubernetes.io/ OpenTracing Specification https://github.com/opentracing/specification/blob/master/specification.md Apache Tomcat, http://tomcat.apache.org/ Apache HttpComponents, https://hc.apache.org/ Zipkin doc, ‘Instrumenting a library’ section, ‘Communicating trace information’ paragraph. https://zipkin.io/pages/instrumenting Jaeger Tracing, https://jaegertracing.io/ Envoy Proxy, http://envoyproxy.io/ Nginx, http://nginx.org/ Spring Cloud Gateway, https://spring.io/projects/spring-cloud-gateway  ","excerpt":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System …","ref":"/docs/main/v9.3.0/en/papers/stam/","title":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System"},{"body":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System  Sheng Wu 吴 晟 wusheng@apache.org  Editor\u0026rsquo;s note This paper was written by Sheng Wu, project founder, in 2017, to describe the fundamental theory of all current agent core concepts. Readers could learn why SkyWalking agents are significantly different from other tracing system and Dapper[1] Paper\u0026rsquo;s description.\nAbstract Monitoring, visualizing and troubleshooting a large-scale distributed system is a major challenge. One common tool used today is the distributed tracing system (e.g., Google Dapper)[1], and detecting topology and metrics based on the tracing data. One big limitation of today’s topology detection is that the analysis depends on aggregating the client-side and server-side tracing spans in a given time window to generate the dependency of services. This causes more latency and memory use, because the client and server spans of every RPC must be matched in millions of randomly occurring requests in a highly distributed system. More importantly, it could fail to match if the duration of RPC between client and server is longer than the prior setup time window, or across the two windows.\nIn this paper, we present the STAM, Streaming Topology Analysis Method. In STAM, we could use auto instrumentation or a manual instrumentation mechanism to intercept and manipulate RPC at both client-side and server-side. In the case of auto instrumentation, STAM manipulates application codes at runtime, such as Java agent. As such, this monitoring system doesn’t require any source code changes from the application development team or RPC framework development team. The STAM injects an RPC network address used at client side, a service name and a service instance name into the RPC context, and binds the server-side service name and service instance name as the alias name for this network address used at the client side. Freeing the dependency analysis from the mechanisms that cause blocking and delay, the analysis core can process the monitoring data in stream mode and generate the accurate topology.\nThe STAM has been implemented in the Apache SkyWalking[2], an open source APM (application performance monitoring system) project of the Apache Software Foundation, which is widely used in many big enterprises[3] including Alibaba, Huawei, Tencent, Didi, Xiaomi, China Mobile and other enterprises (airlines, financial institutions and others) to support their large-scale distributed systems in the production environment. It reduces the load and memory cost significantly, with better horizontal scale capability.\nIntroduction Monitoring the highly distributed system, especially with a micro-service architecture, is very complex. Many RPCs, including HTTP, gRPC, MQ, Cache, and Database accesses, are behind a single client-side request. Allowing the IT team to understand the dependency relationships among thousands of services is the key feature and first step for observability of a whole distributed system. A distributed tracing system is capable of collecting traces, including all distributed request paths. Dependency relationships have been logically included in the trace data. A distributed tracing system, such as Zipkin [4] or Jaeger Tracing [10], provides built-in dependency analysis features, but many analysis features build on top of that. There are at least two fundamental limitations: timeliness and consistent accuracy.\nStrong timeliness is required to match the mutability of distributed application system dependency relationship, including service level and service instance level dependency.\nA Service is a logic group of instances which have the same functions or codes.\nA Service Instance is usually an OS level process, such as a JVM process. The relationships between services and instances are mutable, depending on the configuration, codes and network status. The dependency could change over time.\n Figure 1, Generated spans in traditional Dapper based tracing system. The span model in the Dapper paper and existing tracing systems,such as Zipkin instrumenting mode[9], just propagates the span id to the server side. Due to this model, dependency analysis requires a certain time window. The tracing spans are collected at both client- and server-sides, because the relationship is recorded. Due to that, the analysis process has to wait for the client and server spans to match in the same time window, in order to output the result, Service A depending on Service B. So, this time window must be over the duration of this RPC request; otherwise, the conclusion will be lost. This condition makes the analysis would not react the dependency mutation in second level, in production, it sometimes has to set the window duration in 3-5 mins. Also, because of the Windows-based design, if one side involves a long duration task, it can’t easily achieve consistent accuracy. Because in order to make the analysis as fast as possible, the analysis period is less than 5 minutes. But some spans can’t match its parent or children if the analysis is incomplete or crosses two time windows. Even if we added a mechanism to process the spans left in the previous stages, still some would have to be abandoned to keep the dataset size and memory usage reasonable.\nIn the STAM, we introduce a new span and context propagation models, with the new analysis method. These new models add the peer network address (IP or hostname) used at client side, client service instance name and client service name, into the context propagation model. Then it passes the RPC call from client to server, just as the original trace id and span id in the existing tracing system, and collects it in the server-side span. The new analysis method can easily generate the client-server relationship directly without waiting on the client span. It also sets the peer network address as one alias of the server service. After the across cluster node data sync, the client-side span analysis could use this alias metadata to generate the client-server relationship directly too. By using these new models and method in Apache SkyWalking, we remove the time windows-based analysis permanently, and fully use the streaming analysis mode with less than 5 seconds latency and consistent accuracy\nNew Span Model and Context Model The traditional span of a tracing system includes the following fields [1][6][10].\n A trace id to represent the whole trace. A span id to represent the current span. An operation name to describe what operation this span did. A start timestamp. A finish timestamp Service and Service Instance names of current span. A set of zero or more key:value Span Tags. A set of zero or more Span Logs, each of which is itself a key:value map paired with a timestamp. References to zero or more causally related Spans. Reference includes the parent span id and trace id.  In the new span model of STAM we add the following fields in the span.\nSpan type. Enumeration, including exit, local and entry. Entry and Exit spans are used in a networking related library. Entry spans represent a server-side networking library, such as Apache Tomcat[7]. Exit spans represent the client-side networking library, such as Apache HttpComponents [8].\nPeer Network Address. Remote \u0026ldquo;address,\u0026rdquo; suitable for use in exit and entry spans. In Exit spans, the peer network address is the address by the client library to access the server.\nThese fields usually are optionally included in many tracing system,. But in STAM, we require them in all RPC cases.\nContext Model is used to propagate the client-side information to server-side carried by the original RPC call, usually in the header, such as HTTP header or MQ header. In the old design, it carries the trace id and span id of client-side span. In the STAM, we enhance this model, adding the parent service name, parent service instance name and peer of exit span. The names could be literal strings. All these extra fields will help to remove the block of streaming analysis. Compared to the existing context model, this uses a little more bandwidth, but it could be optimized. In Apache SkyWalking, we design a register mechanism to exchange unique IDs to represent these names. As a result, only 3 integers are added in the RPC context, so the increase of bandwidth is at least less than 1% in the production environment.\nThe changes of two models could eliminate the time windows in the analysis process. Server-side span analysis enhances the context aware capability.\nNew Topology Analysis Method The new topology analysis method at the core of STAM is processing the span in stream mode. The analysis of the server-side span, also named entry span, includes the parent service name, parent service instance name and peer of exit span. So the analysis process could establish the following results.\n Set the peer of exit span as client using alias name of current service and instance. Peer network address \u0026lt;-\u0026gt; service name and peer network address \u0026lt;-\u0026gt; Service instance name aliases created. These two will sync with all analysis nodes and persistent in the storage, allowing more analysis processers to have this alias information. Generate relationships of parent service name -\u0026gt; current service name and parent service instance name -\u0026gt; current service instance name, unless there is another different Peer network address \u0026lt;-\u0026gt; Service Instance Name mapping found. In that case, only generate relationships of peer network address \u0026lt;-\u0026gt; service name and peer network address \u0026lt;-\u0026gt; Service instance name.  For analysis of the client-side span (exit span), there could three possibilities.\n The peer in the exit span already has the alias names established by server-side span analysis from step (1). Then use alias names to replace the peer, and generate traffic of current service name -\u0026gt; alias service name and current service instance name -\u0026gt; alias service instance name. If the alias could not be found, then just simply generate traffic for current service name -\u0026gt; peer and current service instance name -\u0026gt; peer. If multiple alias names of peer network address \u0026lt;-\u0026gt; Service Instance Name could be found, then keep generating traffic for current service name -\u0026gt; peer network address and current service instance name -\u0026gt; peer network address.   Figure 2, Apache SkyWalking uses STAM to detect and visualize the topology of distributed systems. Evaluation In this section, we evaluate the new models and analysis method in the context of several typical cases in which the old method loses timeliness and consistent accuracy.\n 1.New Service Online or Auto Scale Out  New services could be added into the whole topology by the developer team randomly, or container operation platform automatically by some scale out policy, like Kubernetes [5]. The monitoring system could not be notified in any case manually. By using STAM, we could detect the new node automatically and also keep the analysis process unblocked and consistent with detected nodes. In this case, a new service and network address (could be IP, port or both) are used. The peer network address \u0026lt;-\u0026gt; service mapping does not exist, the traffic of client service -\u0026gt; peer network address will be generated and persistent in the storage first. After mapping is generated, further traffic of client-service to server-service could be identified, generated and aggregated in the analysis platform. For filling the gap of a few traffic before the mapping generated, we require doing peer network address \u0026lt;-\u0026gt; service mapping translation again in query stage, to merge client service-\u0026gt;peer network address and client-service to server-service. In production, the amount of VM for the whole SkyWalking analysis platform deployment is less than 100, syncing among them will finish less than 10 seconds, in most cases it only takes 3-5 seconds. And in the query stage, the data has been aggregated in minutes or seconds at least. The query merge performance is not related to how much traffic happens before the mapping generated, only affected by sync duration, in here, only 3 seconds. Due to that, in minute level aggregation topology, it only adds 1 or 2 relationship records in the whole topology relationship dataset. Considering an over 100 services topology having over 500 relationship records per minute, the payload increase for this query merge is very limited and affordable. This feature is significant in a large and high load distributed system, as we don’t need to concern its scaling capability. And in some fork versions, they choose to update the existing client service-\u0026gt;peer network address to client-service to server-service after detecting the new mapping for peer generated, in order to remove the extra load at query stage permanently.\n Figure 3, Span analysis by using the new topology analysis method  2.Existing Uninstrumented Nodes  Every topology detection method has to work in this case. In many cases, there are nodes in the production environment that can’t be instrumented. Causes for this might include:(1) Restriction of the technology. In some golang or C++ written applications, there is no easy way in Java or .Net to do auto instrumentation by the agent. So, the codes may not be instrumented automatically. (2) The middleware, such as MQ, database server, has not adopted the tracing system. This would make it difficult or time consuming to implement the middleware instrumentation. (3) A 3rd party service or cloud service doesn’t support work with the current tracing system. (4) Lack of resources: e.g., the developer or operation team lacks time to make the instrumentation ready.\nThe STAM works well even if the client or server side has no instrumentation. It still keeps the topology as accurate as possible.\nIf the client side hasn’t instrumented, the server-side span wouldn’t get any reference through RPC context, so, it would simply use peer to generate traffic, as shown in Figure 4.\n Figure 4, STAM traffic generation when no client-side instrumentation As shown in Figure 5, in the other case, with no server-side instrumentation, the client span analysis doesn’t need to process this case. The STAM analysis core just simply keeps generating client service-\u0026gt;peer network address traffic. As there is no mapping for peer network address generated, there is no merging.\n Figure 5, STAM traffic generation when no server-side instrumentation  3.Uninstrumented Node Having Header Forward Capability  Besides the cases we evaluated in (2) Uninstrumented Nodes, there is one complex and special case: the instrumented node has the capability to propagate the header from downstream to upstream, typically in all proxy, such as Envoy[11], Nginx[12], Spring Cloud Gateway[13]. As proxy, it has the capability to forward all headers from downstream to upstream to keep some of information in the header, including the tracing context, authentication, browser information, and routing information, in order to make them accessible by the business services behind the proxy, like Envoy route configuration. When some proxy can’t be instrumented, no matter what the reason, it should not affect the topology detection.\nIn this case, the proxy address would be used at the client side and propagate through RPC context as peer network address, and the proxy forwards this to different upstream services. Then STAM could detect this case and generate the proxy as a conjectural node. In the STAM, more than one alias names for this network address should be generated. After those two are detected and synchronized to the analysis node, the analysis core knows there is at least one uninstrumented service standing between client and servers. So, it will generate the relationships of client service-\u0026gt;peer network address, peer-\u0026gt;server service B and peer network address -\u0026gt;server service C, as shown in Figure 6.\n Figure 6, STAM traffic generation when the proxy uninstrumentation Conclusion This paper described the STAM, which is to the best of our knowledge the best topology detection method for distributed tracing systems. It replaces the time-window based topology analysis method for tracing-based monitoring systems. It removes the resource cost of disk and memory for time-window baseds analysis permanently and totally, and the barriers of horizontal scale. One STAM implementation, Apache SkyWalking, is widely used for monitoring hundreds of applications in production. Some of them generated over 100 TB tracing data per day and topology for over 200 services in real time.\nAcknowledgments We thank all contributors of Apache SkyWalking project for suggestions, code contributions to implement the STAM, and feedback from using the STAM and SkyWalking in their production environment.\nLicense This paper and the STAM are licensed in the Apache 2.0.\nReferences  Dapper, a Large-Scale Distributed Systems Tracing Infrastructure, https://research.google.com/pubs/pub36356.html?spm=5176.100239.blogcont60165.11.OXME9Z Apache SkyWalking, http://skywalking.apache.org/ Apache Open Users, https://skywalking.apache.org/users/ Zipkin, https://zipkin.io/ Kubernetes, Production-Grade Container Orchestration. Automated container deployment, scaling, and management. https://kubernetes.io/ OpenTracing Specification https://github.com/opentracing/specification/blob/master/specification.md Apache Tomcat, http://tomcat.apache.org/ Apache HttpComponents, https://hc.apache.org/ Zipkin doc, ‘Instrumenting a library’ section, ‘Communicating trace information’ paragraph. https://zipkin.io/pages/instrumenting Jaeger Tracing, https://jaegertracing.io/ Envoy Proxy, http://envoyproxy.io/ Nginx, http://nginx.org/ Spring Cloud Gateway, https://spring.io/projects/spring-cloud-gateway  ","excerpt":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System …","ref":"/docs/main/v9.4.0/en/papers/stam/","title":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System"},{"body":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System  Sheng Wu 吴 晟 wusheng@apache.org  Editor\u0026rsquo;s note This paper was written by Sheng Wu, project founder, in 2017, to describe the fundamental theory of all current agent core concepts. Readers could learn why SkyWalking agents are significantly different from other tracing system and Dapper[1] Paper\u0026rsquo;s description.\nAbstract Monitoring, visualizing and troubleshooting a large-scale distributed system is a major challenge. One common tool used today is the distributed tracing system (e.g., Google Dapper)[1], and detecting topology and metrics based on the tracing data. One big limitation of today’s topology detection is that the analysis depends on aggregating the client-side and server-side tracing spans in a given time window to generate the dependency of services. This causes more latency and memory use, because the client and server spans of every RPC must be matched in millions of randomly occurring requests in a highly distributed system. More importantly, it could fail to match if the duration of RPC between client and server is longer than the prior setup time window, or across the two windows.\nIn this paper, we present the STAM, Streaming Topology Analysis Method. In STAM, we could use auto instrumentation or a manual instrumentation mechanism to intercept and manipulate RPC at both client-side and server-side. In the case of auto instrumentation, STAM manipulates application codes at runtime, such as Java agent. As such, this monitoring system doesn’t require any source code changes from the application development team or RPC framework development team. The STAM injects an RPC network address used at client side, a service name and a service instance name into the RPC context, and binds the server-side service name and service instance name as the alias name for this network address used at the client side. Freeing the dependency analysis from the mechanisms that cause blocking and delay, the analysis core can process the monitoring data in stream mode and generate the accurate topology.\nThe STAM has been implemented in the Apache SkyWalking[2], an open source APM (application performance monitoring system) project of the Apache Software Foundation, which is widely used in many big enterprises[3] including Alibaba, Huawei, Tencent, Didi, Xiaomi, China Mobile and other enterprises (airlines, financial institutions and others) to support their large-scale distributed systems in the production environment. It reduces the load and memory cost significantly, with better horizontal scale capability.\nIntroduction Monitoring the highly distributed system, especially with a micro-service architecture, is very complex. Many RPCs, including HTTP, gRPC, MQ, Cache, and Database accesses, are behind a single client-side request. Allowing the IT team to understand the dependency relationships among thousands of services is the key feature and first step for observability of a whole distributed system. A distributed tracing system is capable of collecting traces, including all distributed request paths. Dependency relationships have been logically included in the trace data. A distributed tracing system, such as Zipkin [4] or Jaeger Tracing [10], provides built-in dependency analysis features, but many analysis features build on top of that. There are at least two fundamental limitations: timeliness and consistent accuracy.\nStrong timeliness is required to match the mutability of distributed application system dependency relationship, including service level and service instance level dependency.\nA Service is a logic group of instances which have the same functions or codes.\nA Service Instance is usually an OS level process, such as a JVM process. The relationships between services and instances are mutable, depending on the configuration, codes and network status. The dependency could change over time.\n Figure 1, Generated spans in traditional Dapper based tracing system. The span model in the Dapper paper and existing tracing systems,such as Zipkin instrumenting mode[9], just propagates the span id to the server side. Due to this model, dependency analysis requires a certain time window. The tracing spans are collected at both client- and server-sides, because the relationship is recorded. Due to that, the analysis process has to wait for the client and server spans to match in the same time window, in order to output the result, Service A depending on Service B. So, this time window must be over the duration of this RPC request; otherwise, the conclusion will be lost. This condition makes the analysis would not react the dependency mutation in second level, in production, it sometimes has to set the window duration in 3-5 mins. Also, because of the Windows-based design, if one side involves a long duration task, it can’t easily achieve consistent accuracy. Because in order to make the analysis as fast as possible, the analysis period is less than 5 minutes. But some spans can’t match its parent or children if the analysis is incomplete or crosses two time windows. Even if we added a mechanism to process the spans left in the previous stages, still some would have to be abandoned to keep the dataset size and memory usage reasonable.\nIn the STAM, we introduce a new span and context propagation models, with the new analysis method. These new models add the peer network address (IP or hostname) used at client side, client service instance name and client service name, into the context propagation model. Then it passes the RPC call from client to server, just as the original trace id and span id in the existing tracing system, and collects it in the server-side span. The new analysis method can easily generate the client-server relationship directly without waiting on the client span. It also sets the peer network address as one alias of the server service. After the across cluster node data sync, the client-side span analysis could use this alias metadata to generate the client-server relationship directly too. By using these new models and method in Apache SkyWalking, we remove the time windows-based analysis permanently, and fully use the streaming analysis mode with less than 5 seconds latency and consistent accuracy\nNew Span Model and Context Model The traditional span of a tracing system includes the following fields [1][6][10].\n A trace id to represent the whole trace. A span id to represent the current span. An operation name to describe what operation this span did. A start timestamp. A finish timestamp Service and Service Instance names of current span. A set of zero or more key:value Span Tags. A set of zero or more Span Logs, each of which is itself a key:value map paired with a timestamp. References to zero or more causally related Spans. Reference includes the parent span id and trace id.  In the new span model of STAM we add the following fields in the span.\nSpan type. Enumeration, including exit, local and entry. Entry and Exit spans are used in a networking related library. Entry spans represent a server-side networking library, such as Apache Tomcat[7]. Exit spans represent the client-side networking library, such as Apache HttpComponents [8].\nPeer Network Address. Remote \u0026ldquo;address,\u0026rdquo; suitable for use in exit and entry spans. In Exit spans, the peer network address is the address by the client library to access the server.\nThese fields usually are optionally included in many tracing system,. But in STAM, we require them in all RPC cases.\nContext Model is used to propagate the client-side information to server-side carried by the original RPC call, usually in the header, such as HTTP header or MQ header. In the old design, it carries the trace id and span id of client-side span. In the STAM, we enhance this model, adding the parent service name, parent service instance name and peer of exit span. The names could be literal strings. All these extra fields will help to remove the block of streaming analysis. Compared to the existing context model, this uses a little more bandwidth, but it could be optimized. In Apache SkyWalking, we design a register mechanism to exchange unique IDs to represent these names. As a result, only 3 integers are added in the RPC context, so the increase of bandwidth is at least less than 1% in the production environment.\nThe changes of two models could eliminate the time windows in the analysis process. Server-side span analysis enhances the context aware capability.\nNew Topology Analysis Method The new topology analysis method at the core of STAM is processing the span in stream mode. The analysis of the server-side span, also named entry span, includes the parent service name, parent service instance name and peer of exit span. So the analysis process could establish the following results.\n Set the peer of exit span as client using alias name of current service and instance. Peer network address \u0026lt;-\u0026gt; service name and peer network address \u0026lt;-\u0026gt; Service instance name aliases created. These two will sync with all analysis nodes and persistent in the storage, allowing more analysis processers to have this alias information. Generate relationships of parent service name -\u0026gt; current service name and parent service instance name -\u0026gt; current service instance name, unless there is another different Peer network address \u0026lt;-\u0026gt; Service Instance Name mapping found. In that case, only generate relationships of peer network address \u0026lt;-\u0026gt; service name and peer network address \u0026lt;-\u0026gt; Service instance name.  For analysis of the client-side span (exit span), there could three possibilities.\n The peer in the exit span already has the alias names established by server-side span analysis from step (1). Then use alias names to replace the peer, and generate traffic of current service name -\u0026gt; alias service name and current service instance name -\u0026gt; alias service instance name. If the alias could not be found, then just simply generate traffic for current service name -\u0026gt; peer and current service instance name -\u0026gt; peer. If multiple alias names of peer network address \u0026lt;-\u0026gt; Service Instance Name could be found, then keep generating traffic for current service name -\u0026gt; peer network address and current service instance name -\u0026gt; peer network address.   Figure 2, Apache SkyWalking uses STAM to detect and visualize the topology of distributed systems. Evaluation In this section, we evaluate the new models and analysis method in the context of several typical cases in which the old method loses timeliness and consistent accuracy.\n 1.New Service Online or Auto Scale Out  New services could be added into the whole topology by the developer team randomly, or container operation platform automatically by some scale out policy, like Kubernetes [5]. The monitoring system could not be notified in any case manually. By using STAM, we could detect the new node automatically and also keep the analysis process unblocked and consistent with detected nodes. In this case, a new service and network address (could be IP, port or both) are used. The peer network address \u0026lt;-\u0026gt; service mapping does not exist, the traffic of client service -\u0026gt; peer network address will be generated and persistent in the storage first. After mapping is generated, further traffic of client-service to server-service could be identified, generated and aggregated in the analysis platform. For filling the gap of a few traffic before the mapping generated, we require doing peer network address \u0026lt;-\u0026gt; service mapping translation again in query stage, to merge client service-\u0026gt;peer network address and client-service to server-service. In production, the amount of VM for the whole SkyWalking analysis platform deployment is less than 100, syncing among them will finish less than 10 seconds, in most cases it only takes 3-5 seconds. And in the query stage, the data has been aggregated in minutes or seconds at least. The query merge performance is not related to how much traffic happens before the mapping generated, only affected by sync duration, in here, only 3 seconds. Due to that, in minute level aggregation topology, it only adds 1 or 2 relationship records in the whole topology relationship dataset. Considering an over 100 services topology having over 500 relationship records per minute, the payload increase for this query merge is very limited and affordable. This feature is significant in a large and high load distributed system, as we don’t need to concern its scaling capability. And in some fork versions, they choose to update the existing client service-\u0026gt;peer network address to client-service to server-service after detecting the new mapping for peer generated, in order to remove the extra load at query stage permanently.\n Figure 3, Span analysis by using the new topology analysis method  2.Existing Uninstrumented Nodes  Every topology detection method has to work in this case. In many cases, there are nodes in the production environment that can’t be instrumented. Causes for this might include:(1) Restriction of the technology. In some golang or C++ written applications, there is no easy way in Java or .Net to do auto instrumentation by the agent. So, the codes may not be instrumented automatically. (2) The middleware, such as MQ, database server, has not adopted the tracing system. This would make it difficult or time consuming to implement the middleware instrumentation. (3) A 3rd party service or cloud service doesn’t support work with the current tracing system. (4) Lack of resources: e.g., the developer or operation team lacks time to make the instrumentation ready.\nThe STAM works well even if the client or server side has no instrumentation. It still keeps the topology as accurate as possible.\nIf the client side hasn’t instrumented, the server-side span wouldn’t get any reference through RPC context, so, it would simply use peer to generate traffic, as shown in Figure 4.\n Figure 4, STAM traffic generation when no client-side instrumentation As shown in Figure 5, in the other case, with no server-side instrumentation, the client span analysis doesn’t need to process this case. The STAM analysis core just simply keeps generating client service-\u0026gt;peer network address traffic. As there is no mapping for peer network address generated, there is no merging.\n Figure 5, STAM traffic generation when no server-side instrumentation  3.Uninstrumented Node Having Header Forward Capability  Besides the cases we evaluated in (2) Uninstrumented Nodes, there is one complex and special case: the instrumented node has the capability to propagate the header from downstream to upstream, typically in all proxy, such as Envoy[11], Nginx[12], Spring Cloud Gateway[13]. As proxy, it has the capability to forward all headers from downstream to upstream to keep some of information in the header, including the tracing context, authentication, browser information, and routing information, in order to make them accessible by the business services behind the proxy, like Envoy route configuration. When some proxy can’t be instrumented, no matter what the reason, it should not affect the topology detection.\nIn this case, the proxy address would be used at the client side and propagate through RPC context as peer network address, and the proxy forwards this to different upstream services. Then STAM could detect this case and generate the proxy as a conjectural node. In the STAM, more than one alias names for this network address should be generated. After those two are detected and synchronized to the analysis node, the analysis core knows there is at least one uninstrumented service standing between client and servers. So, it will generate the relationships of client service-\u0026gt;peer network address, peer-\u0026gt;server service B and peer network address -\u0026gt;server service C, as shown in Figure 6.\n Figure 6, STAM traffic generation when the proxy uninstrumentation Conclusion This paper described the STAM, which is to the best of our knowledge the best topology detection method for distributed tracing systems. It replaces the time-window based topology analysis method for tracing-based monitoring systems. It removes the resource cost of disk and memory for time-window baseds analysis permanently and totally, and the barriers of horizontal scale. One STAM implementation, Apache SkyWalking, is widely used for monitoring hundreds of applications in production. Some of them generated over 100 TB tracing data per day and topology for over 200 services in real time.\nAcknowledgments We thank all contributors of Apache SkyWalking project for suggestions, code contributions to implement the STAM, and feedback from using the STAM and SkyWalking in their production environment.\nLicense This paper and the STAM are licensed in the Apache 2.0.\nReferences  Dapper, a Large-Scale Distributed Systems Tracing Infrastructure, https://research.google.com/pubs/pub36356.html?spm=5176.100239.blogcont60165.11.OXME9Z Apache SkyWalking, http://skywalking.apache.org/ Apache Open Users, https://skywalking.apache.org/users/ Zipkin, https://zipkin.io/ Kubernetes, Production-Grade Container Orchestration. Automated container deployment, scaling, and management. https://kubernetes.io/ OpenTracing Specification https://github.com/opentracing/specification/blob/master/specification.md Apache Tomcat, http://tomcat.apache.org/ Apache HttpComponents, https://hc.apache.org/ Zipkin doc, ‘Instrumenting a library’ section, ‘Communicating trace information’ paragraph. https://zipkin.io/pages/instrumenting Jaeger Tracing, https://jaegertracing.io/ Envoy Proxy, http://envoyproxy.io/ Nginx, http://nginx.org/ Spring Cloud Gateway, https://spring.io/projects/spring-cloud-gateway  ","excerpt":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System …","ref":"/docs/main/v9.5.0/en/papers/stam/","title":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System"},{"body":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System  Sheng Wu 吴 晟 wusheng@apache.org  Editor\u0026rsquo;s note This paper was written by Sheng Wu, project founder, in 2017, to describe the fundamental theory of all current agent core concepts. Readers could learn why SkyWalking agents are significantly different from other tracing system and Dapper[1] Paper\u0026rsquo;s description.\nAbstract Monitoring, visualizing and troubleshooting a large-scale distributed system is a major challenge. One common tool used today is the distributed tracing system (e.g., Google Dapper)[1], and detecting topology and metrics based on the tracing data. One big limitation of today’s topology detection is that the analysis depends on aggregating the client-side and server-side tracing spans in a given time window to generate the dependency of services. This causes more latency and memory use, because the client and server spans of every RPC must be matched in millions of randomly occurring requests in a highly distributed system. More importantly, it could fail to match if the duration of RPC between client and server is longer than the prior setup time window, or across the two windows.\nIn this paper, we present the STAM, Streaming Topology Analysis Method. In STAM, we could use auto instrumentation or a manual instrumentation mechanism to intercept and manipulate RPC at both client-side and server-side. In the case of auto instrumentation, STAM manipulates application codes at runtime, such as Java agent. As such, this monitoring system doesn’t require any source code changes from the application development team or RPC framework development team. The STAM injects an RPC network address used at client side, a service name and a service instance name into the RPC context, and binds the server-side service name and service instance name as the alias name for this network address used at the client side. Freeing the dependency analysis from the mechanisms that cause blocking and delay, the analysis core can process the monitoring data in stream mode and generate the accurate topology.\nThe STAM has been implemented in the Apache SkyWalking[2], an open source APM (application performance monitoring system) project of the Apache Software Foundation, which is widely used in many big enterprises[3] including Alibaba, Huawei, Tencent, Didi, Xiaomi, China Mobile and other enterprises (airlines, financial institutions and others) to support their large-scale distributed systems in the production environment. It reduces the load and memory cost significantly, with better horizontal scale capability.\nIntroduction Monitoring the highly distributed system, especially with a micro-service architecture, is very complex. Many RPCs, including HTTP, gRPC, MQ, Cache, and Database accesses, are behind a single client-side request. Allowing the IT team to understand the dependency relationships among thousands of services is the key feature and first step for observability of a whole distributed system. A distributed tracing system is capable of collecting traces, including all distributed request paths. Dependency relationships have been logically included in the trace data. A distributed tracing system, such as Zipkin [4] or Jaeger Tracing [10], provides built-in dependency analysis features, but many analysis features build on top of that. There are at least two fundamental limitations: timeliness and consistent accuracy.\nStrong timeliness is required to match the mutability of distributed application system dependency relationship, including service level and service instance level dependency.\nA Service is a logic group of instances which have the same functions or codes.\nA Service Instance is usually an OS level process, such as a JVM process. The relationships between services and instances are mutable, depending on the configuration, codes and network status. The dependency could change over time.\n Figure 1, Generated spans in traditional Dapper based tracing system. The span model in the Dapper paper and existing tracing systems,such as Zipkin instrumenting mode[9], just propagates the span id to the server side. Due to this model, dependency analysis requires a certain time window. The tracing spans are collected at both client- and server-sides, because the relationship is recorded. Due to that, the analysis process has to wait for the client and server spans to match in the same time window, in order to output the result, Service A depending on Service B. So, this time window must be over the duration of this RPC request; otherwise, the conclusion will be lost. This condition makes the analysis would not react the dependency mutation in second level, in production, it sometimes has to set the window duration in 3-5 mins. Also, because of the Windows-based design, if one side involves a long duration task, it can’t easily achieve consistent accuracy. Because in order to make the analysis as fast as possible, the analysis period is less than 5 minutes. But some spans can’t match its parent or children if the analysis is incomplete or crosses two time windows. Even if we added a mechanism to process the spans left in the previous stages, still some would have to be abandoned to keep the dataset size and memory usage reasonable.\nIn the STAM, we introduce a new span and context propagation models, with the new analysis method. These new models add the peer network address (IP or hostname) used at client side, client service instance name and client service name, into the context propagation model. Then it passes the RPC call from client to server, just as the original trace id and span id in the existing tracing system, and collects it in the server-side span. The new analysis method can easily generate the client-server relationship directly without waiting on the client span. It also sets the peer network address as one alias of the server service. After the across cluster node data sync, the client-side span analysis could use this alias metadata to generate the client-server relationship directly too. By using these new models and method in Apache SkyWalking, we remove the time windows-based analysis permanently, and fully use the streaming analysis mode with less than 5 seconds latency and consistent accuracy\nNew Span Model and Context Model The traditional span of a tracing system includes the following fields [1][6][10].\n A trace id to represent the whole trace. A span id to represent the current span. An operation name to describe what operation this span did. A start timestamp. A finish timestamp Service and Service Instance names of current span. A set of zero or more key:value Span Tags. A set of zero or more Span Logs, each of which is itself a key:value map paired with a timestamp. References to zero or more causally related Spans. Reference includes the parent span id and trace id.  In the new span model of STAM we add the following fields in the span.\nSpan type. Enumeration, including exit, local and entry. Entry and Exit spans are used in a networking related library. Entry spans represent a server-side networking library, such as Apache Tomcat[7]. Exit spans represent the client-side networking library, such as Apache HttpComponents [8].\nPeer Network Address. Remote \u0026ldquo;address,\u0026rdquo; suitable for use in exit and entry spans. In Exit spans, the peer network address is the address by the client library to access the server.\nThese fields usually are optionally included in many tracing system,. But in STAM, we require them in all RPC cases.\nContext Model is used to propagate the client-side information to server-side carried by the original RPC call, usually in the header, such as HTTP header or MQ header. In the old design, it carries the trace id and span id of client-side span. In the STAM, we enhance this model, adding the parent service name, parent service instance name and peer of exit span. The names could be literal strings. All these extra fields will help to remove the block of streaming analysis. Compared to the existing context model, this uses a little more bandwidth, but it could be optimized. In Apache SkyWalking, we design a register mechanism to exchange unique IDs to represent these names. As a result, only 3 integers are added in the RPC context, so the increase of bandwidth is at least less than 1% in the production environment.\nThe changes of two models could eliminate the time windows in the analysis process. Server-side span analysis enhances the context aware capability.\nNew Topology Analysis Method The new topology analysis method at the core of STAM is processing the span in stream mode. The analysis of the server-side span, also named entry span, includes the parent service name, parent service instance name and peer of exit span. So the analysis process could establish the following results.\n Set the peer of exit span as client using alias name of current service and instance. Peer network address \u0026lt;-\u0026gt; service name and peer network address \u0026lt;-\u0026gt; Service instance name aliases created. These two will sync with all analysis nodes and persistent in the storage, allowing more analysis processers to have this alias information. Generate relationships of parent service name -\u0026gt; current service name and parent service instance name -\u0026gt; current service instance name, unless there is another different Peer network address \u0026lt;-\u0026gt; Service Instance Name mapping found. In that case, only generate relationships of peer network address \u0026lt;-\u0026gt; service name and peer network address \u0026lt;-\u0026gt; Service instance name.  For analysis of the client-side span (exit span), there could three possibilities.\n The peer in the exit span already has the alias names established by server-side span analysis from step (1). Then use alias names to replace the peer, and generate traffic of current service name -\u0026gt; alias service name and current service instance name -\u0026gt; alias service instance name. If the alias could not be found, then just simply generate traffic for current service name -\u0026gt; peer and current service instance name -\u0026gt; peer. If multiple alias names of peer network address \u0026lt;-\u0026gt; Service Instance Name could be found, then keep generating traffic for current service name -\u0026gt; peer network address and current service instance name -\u0026gt; peer network address.   Figure 2, Apache SkyWalking uses STAM to detect and visualize the topology of distributed systems. Evaluation In this section, we evaluate the new models and analysis method in the context of several typical cases in which the old method loses timeliness and consistent accuracy.\n 1.New Service Online or Auto Scale Out  New services could be added into the whole topology by the developer team randomly, or container operation platform automatically by some scale out policy, like Kubernetes [5]. The monitoring system could not be notified in any case manually. By using STAM, we could detect the new node automatically and also keep the analysis process unblocked and consistent with detected nodes. In this case, a new service and network address (could be IP, port or both) are used. The peer network address \u0026lt;-\u0026gt; service mapping does not exist, the traffic of client service -\u0026gt; peer network address will be generated and persistent in the storage first. After mapping is generated, further traffic of client-service to server-service could be identified, generated and aggregated in the analysis platform. For filling the gap of a few traffic before the mapping generated, we require doing peer network address \u0026lt;-\u0026gt; service mapping translation again in query stage, to merge client service-\u0026gt;peer network address and client-service to server-service. In production, the amount of VM for the whole SkyWalking analysis platform deployment is less than 100, syncing among them will finish less than 10 seconds, in most cases it only takes 3-5 seconds. And in the query stage, the data has been aggregated in minutes or seconds at least. The query merge performance is not related to how much traffic happens before the mapping generated, only affected by sync duration, in here, only 3 seconds. Due to that, in minute level aggregation topology, it only adds 1 or 2 relationship records in the whole topology relationship dataset. Considering an over 100 services topology having over 500 relationship records per minute, the payload increase for this query merge is very limited and affordable. This feature is significant in a large and high load distributed system, as we don’t need to concern its scaling capability. And in some fork versions, they choose to update the existing client service-\u0026gt;peer network address to client-service to server-service after detecting the new mapping for peer generated, in order to remove the extra load at query stage permanently.\n Figure 3, Span analysis by using the new topology analysis method  2.Existing Uninstrumented Nodes  Every topology detection method has to work in this case. In many cases, there are nodes in the production environment that can’t be instrumented. Causes for this might include:(1) Restriction of the technology. In some golang or C++ written applications, there is no easy way in Java or .Net to do auto instrumentation by the agent. So, the codes may not be instrumented automatically. (2) The middleware, such as MQ, database server, has not adopted the tracing system. This would make it difficult or time consuming to implement the middleware instrumentation. (3) A 3rd party service or cloud service doesn’t support work with the current tracing system. (4) Lack of resources: e.g., the developer or operation team lacks time to make the instrumentation ready.\nThe STAM works well even if the client or server side has no instrumentation. It still keeps the topology as accurate as possible.\nIf the client side hasn’t instrumented, the server-side span wouldn’t get any reference through RPC context, so, it would simply use peer to generate traffic, as shown in Figure 4.\n Figure 4, STAM traffic generation when no client-side instrumentation As shown in Figure 5, in the other case, with no server-side instrumentation, the client span analysis doesn’t need to process this case. The STAM analysis core just simply keeps generating client service-\u0026gt;peer network address traffic. As there is no mapping for peer network address generated, there is no merging.\n Figure 5, STAM traffic generation when no server-side instrumentation  3.Uninstrumented Node Having Header Forward Capability  Besides the cases we evaluated in (2) Uninstrumented Nodes, there is one complex and special case: the instrumented node has the capability to propagate the header from downstream to upstream, typically in all proxy, such as Envoy[11], Nginx[12], Spring Cloud Gateway[13]. As proxy, it has the capability to forward all headers from downstream to upstream to keep some of information in the header, including the tracing context, authentication, browser information, and routing information, in order to make them accessible by the business services behind the proxy, like Envoy route configuration. When some proxy can’t be instrumented, no matter what the reason, it should not affect the topology detection.\nIn this case, the proxy address would be used at the client side and propagate through RPC context as peer network address, and the proxy forwards this to different upstream services. Then STAM could detect this case and generate the proxy as a conjectural node. In the STAM, more than one alias names for this network address should be generated. After those two are detected and synchronized to the analysis node, the analysis core knows there is at least one uninstrumented service standing between client and servers. So, it will generate the relationships of client service-\u0026gt;peer network address, peer-\u0026gt;server service B and peer network address -\u0026gt;server service C, as shown in Figure 6.\n Figure 6, STAM traffic generation when the proxy uninstrumentation Conclusion This paper described the STAM, which is to the best of our knowledge the best topology detection method for distributed tracing systems. It replaces the time-window based topology analysis method for tracing-based monitoring systems. It removes the resource cost of disk and memory for time-window baseds analysis permanently and totally, and the barriers of horizontal scale. One STAM implementation, Apache SkyWalking, is widely used for monitoring hundreds of applications in production. Some of them generated over 100 TB tracing data per day and topology for over 200 services in real time.\nAcknowledgments We thank all contributors of Apache SkyWalking project for suggestions, code contributions to implement the STAM, and feedback from using the STAM and SkyWalking in their production environment.\nLicense This paper and the STAM are licensed in the Apache 2.0.\nReferences  Dapper, a Large-Scale Distributed Systems Tracing Infrastructure, https://research.google.com/pubs/pub36356.html?spm=5176.100239.blogcont60165.11.OXME9Z Apache SkyWalking, http://skywalking.apache.org/ Apache Open Users, https://skywalking.apache.org/users/ Zipkin, https://zipkin.io/ Kubernetes, Production-Grade Container Orchestration. Automated container deployment, scaling, and management. https://kubernetes.io/ OpenTracing Specification https://github.com/opentracing/specification/blob/master/specification.md Apache Tomcat, http://tomcat.apache.org/ Apache HttpComponents, https://hc.apache.org/ Zipkin doc, ‘Instrumenting a library’ section, ‘Communicating trace information’ paragraph. https://zipkin.io/pages/instrumenting Jaeger Tracing, https://jaegertracing.io/ Envoy Proxy, http://envoyproxy.io/ Nginx, http://nginx.org/ Spring Cloud Gateway, https://spring.io/projects/spring-cloud-gateway  ","excerpt":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System …","ref":"/docs/main/v9.6.0/en/papers/stam/","title":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System"},{"body":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System  Sheng Wu 吴 晟 wusheng@apache.org  Editor\u0026rsquo;s note This paper was written by Sheng Wu, project founder, in 2017, to describe the fundamental theory of all current agent core concepts. Readers could learn why SkyWalking agents are significantly different from other tracing system and Dapper[1] Paper\u0026rsquo;s description.\nAbstract Monitoring, visualizing and troubleshooting a large-scale distributed system is a major challenge. One common tool used today is the distributed tracing system (e.g., Google Dapper)[1], and detecting topology and metrics based on the tracing data. One big limitation of today’s topology detection is that the analysis depends on aggregating the client-side and server-side tracing spans in a given time window to generate the dependency of services. This causes more latency and memory use, because the client and server spans of every RPC must be matched in millions of randomly occurring requests in a highly distributed system. More importantly, it could fail to match if the duration of RPC between client and server is longer than the prior setup time window, or across the two windows.\nIn this paper, we present the STAM, Streaming Topology Analysis Method. In STAM, we could use auto instrumentation or a manual instrumentation mechanism to intercept and manipulate RPC at both client-side and server-side. In the case of auto instrumentation, STAM manipulates application codes at runtime, such as Java agent. As such, this monitoring system doesn’t require any source code changes from the application development team or RPC framework development team. The STAM injects an RPC network address used at client side, a service name and a service instance name into the RPC context, and binds the server-side service name and service instance name as the alias name for this network address used at the client side. Freeing the dependency analysis from the mechanisms that cause blocking and delay, the analysis core can process the monitoring data in stream mode and generate the accurate topology.\nThe STAM has been implemented in the Apache SkyWalking[2], an open source APM (application performance monitoring system) project of the Apache Software Foundation, which is widely used in many big enterprises[3] including Alibaba, Huawei, Tencent, Didi, Xiaomi, China Mobile and other enterprises (airlines, financial institutions and others) to support their large-scale distributed systems in the production environment. It reduces the load and memory cost significantly, with better horizontal scale capability.\nIntroduction Monitoring the highly distributed system, especially with a micro-service architecture, is very complex. Many RPCs, including HTTP, gRPC, MQ, Cache, and Database accesses, are behind a single client-side request. Allowing the IT team to understand the dependency relationships among thousands of services is the key feature and first step for observability of a whole distributed system. A distributed tracing system is capable of collecting traces, including all distributed request paths. Dependency relationships have been logically included in the trace data. A distributed tracing system, such as Zipkin [4] or Jaeger Tracing [10], provides built-in dependency analysis features, but many analysis features build on top of that. There are at least two fundamental limitations: timeliness and consistent accuracy.\nStrong timeliness is required to match the mutability of distributed application system dependency relationship, including service level and service instance level dependency.\nA Service is a logic group of instances which have the same functions or codes.\nA Service Instance is usually an OS level process, such as a JVM process. The relationships between services and instances are mutable, depending on the configuration, codes and network status. The dependency could change over time.\n Figure 1, Generated spans in traditional Dapper based tracing system. The span model in the Dapper paper and existing tracing systems,such as Zipkin instrumenting mode[9], just propagates the span id to the server side. Due to this model, dependency analysis requires a certain time window. The tracing spans are collected at both client- and server-sides, because the relationship is recorded. Due to that, the analysis process has to wait for the client and server spans to match in the same time window, in order to output the result, Service A depending on Service B. So, this time window must be over the duration of this RPC request; otherwise, the conclusion will be lost. This condition makes the analysis would not react the dependency mutation in second level, in production, it sometimes has to set the window duration in 3-5 mins. Also, because of the Windows-based design, if one side involves a long duration task, it can’t easily achieve consistent accuracy. Because in order to make the analysis as fast as possible, the analysis period is less than 5 minutes. But some spans can’t match its parent or children if the analysis is incomplete or crosses two time windows. Even if we added a mechanism to process the spans left in the previous stages, still some would have to be abandoned to keep the dataset size and memory usage reasonable.\nIn the STAM, we introduce a new span and context propagation models, with the new analysis method. These new models add the peer network address (IP or hostname) used at client side, client service instance name and client service name, into the context propagation model. Then it passes the RPC call from client to server, just as the original trace id and span id in the existing tracing system, and collects it in the server-side span. The new analysis method can easily generate the client-server relationship directly without waiting on the client span. It also sets the peer network address as one alias of the server service. After the across cluster node data sync, the client-side span analysis could use this alias metadata to generate the client-server relationship directly too. By using these new models and method in Apache SkyWalking, we remove the time windows-based analysis permanently, and fully use the streaming analysis mode with less than 5 seconds latency and consistent accuracy\nNew Span Model and Context Model The traditional span of a tracing system includes the following fields [1][6][10].\n A trace id to represent the whole trace. A span id to represent the current span. An operation name to describe what operation this span did. A start timestamp. A finish timestamp Service and Service Instance names of current span. A set of zero or more key:value Span Tags. A set of zero or more Span Logs, each of which is itself a key:value map paired with a timestamp. References to zero or more causally related Spans. Reference includes the parent span id and trace id.  In the new span model of STAM we add the following fields in the span.\nSpan type. Enumeration, including exit, local and entry. Entry and Exit spans are used in a networking related library. Entry spans represent a server-side networking library, such as Apache Tomcat[7]. Exit spans represent the client-side networking library, such as Apache HttpComponents [8].\nPeer Network Address. Remote \u0026ldquo;address,\u0026rdquo; suitable for use in exit and entry spans. In Exit spans, the peer network address is the address by the client library to access the server.\nThese fields usually are optionally included in many tracing system,. But in STAM, we require them in all RPC cases.\nContext Model is used to propagate the client-side information to server-side carried by the original RPC call, usually in the header, such as HTTP header or MQ header. In the old design, it carries the trace id and span id of client-side span. In the STAM, we enhance this model, adding the parent service name, parent service instance name and peer of exit span. The names could be literal strings. All these extra fields will help to remove the block of streaming analysis. Compared to the existing context model, this uses a little more bandwidth, but it could be optimized. In Apache SkyWalking, we design a register mechanism to exchange unique IDs to represent these names. As a result, only 3 integers are added in the RPC context, so the increase of bandwidth is at least less than 1% in the production environment.\nThe changes of two models could eliminate the time windows in the analysis process. Server-side span analysis enhances the context aware capability.\nNew Topology Analysis Method The new topology analysis method at the core of STAM is processing the span in stream mode. The analysis of the server-side span, also named entry span, includes the parent service name, parent service instance name and peer of exit span. So the analysis process could establish the following results.\n Set the peer of exit span as client using alias name of current service and instance. Peer network address \u0026lt;-\u0026gt; service name and peer network address \u0026lt;-\u0026gt; Service instance name aliases created. These two will sync with all analysis nodes and persistent in the storage, allowing more analysis processers to have this alias information. Generate relationships of parent service name -\u0026gt; current service name and parent service instance name -\u0026gt; current service instance name, unless there is another different Peer network address \u0026lt;-\u0026gt; Service Instance Name mapping found. In that case, only generate relationships of peer network address \u0026lt;-\u0026gt; service name and peer network address \u0026lt;-\u0026gt; Service instance name.  For analysis of the client-side span (exit span), there could three possibilities.\n The peer in the exit span already has the alias names established by server-side span analysis from step (1). Then use alias names to replace the peer, and generate traffic of current service name -\u0026gt; alias service name and current service instance name -\u0026gt; alias service instance name. If the alias could not be found, then just simply generate traffic for current service name -\u0026gt; peer and current service instance name -\u0026gt; peer. If multiple alias names of peer network address \u0026lt;-\u0026gt; Service Instance Name could be found, then keep generating traffic for current service name -\u0026gt; peer network address and current service instance name -\u0026gt; peer network address.   Figure 2, Apache SkyWalking uses STAM to detect and visualize the topology of distributed systems. Evaluation In this section, we evaluate the new models and analysis method in the context of several typical cases in which the old method loses timeliness and consistent accuracy.\n 1.New Service Online or Auto Scale Out  New services could be added into the whole topology by the developer team randomly, or container operation platform automatically by some scale out policy, like Kubernetes [5]. The monitoring system could not be notified in any case manually. By using STAM, we could detect the new node automatically and also keep the analysis process unblocked and consistent with detected nodes. In this case, a new service and network address (could be IP, port or both) are used. The peer network address \u0026lt;-\u0026gt; service mapping does not exist, the traffic of client service -\u0026gt; peer network address will be generated and persistent in the storage first. After mapping is generated, further traffic of client-service to server-service could be identified, generated and aggregated in the analysis platform. For filling the gap of a few traffic before the mapping generated, we require doing peer network address \u0026lt;-\u0026gt; service mapping translation again in query stage, to merge client service-\u0026gt;peer network address and client-service to server-service. In production, the amount of VM for the whole SkyWalking analysis platform deployment is less than 100, syncing among them will finish less than 10 seconds, in most cases it only takes 3-5 seconds. And in the query stage, the data has been aggregated in minutes or seconds at least. The query merge performance is not related to how much traffic happens before the mapping generated, only affected by sync duration, in here, only 3 seconds. Due to that, in minute level aggregation topology, it only adds 1 or 2 relationship records in the whole topology relationship dataset. Considering an over 100 services topology having over 500 relationship records per minute, the payload increase for this query merge is very limited and affordable. This feature is significant in a large and high load distributed system, as we don’t need to concern its scaling capability. And in some fork versions, they choose to update the existing client service-\u0026gt;peer network address to client-service to server-service after detecting the new mapping for peer generated, in order to remove the extra load at query stage permanently.\n Figure 3, Span analysis by using the new topology analysis method  2.Existing Uninstrumented Nodes  Every topology detection method has to work in this case. In many cases, there are nodes in the production environment that can’t be instrumented. Causes for this might include:(1) Restriction of the technology. In some golang or C++ written applications, there is no easy way in Java or .Net to do auto instrumentation by the agent. So, the codes may not be instrumented automatically. (2) The middleware, such as MQ, database server, has not adopted the tracing system. This would make it difficult or time consuming to implement the middleware instrumentation. (3) A 3rd party service or cloud service doesn’t support work with the current tracing system. (4) Lack of resources: e.g., the developer or operation team lacks time to make the instrumentation ready.\nThe STAM works well even if the client or server side has no instrumentation. It still keeps the topology as accurate as possible.\nIf the client side hasn’t instrumented, the server-side span wouldn’t get any reference through RPC context, so, it would simply use peer to generate traffic, as shown in Figure 4.\n Figure 4, STAM traffic generation when no client-side instrumentation As shown in Figure 5, in the other case, with no server-side instrumentation, the client span analysis doesn’t need to process this case. The STAM analysis core just simply keeps generating client service-\u0026gt;peer network address traffic. As there is no mapping for peer network address generated, there is no merging.\n Figure 5, STAM traffic generation when no server-side instrumentation  3.Uninstrumented Node Having Header Forward Capability  Besides the cases we evaluated in (2) Uninstrumented Nodes, there is one complex and special case: the instrumented node has the capability to propagate the header from downstream to upstream, typically in all proxy, such as Envoy[11], Nginx[12], Spring Cloud Gateway[13]. As proxy, it has the capability to forward all headers from downstream to upstream to keep some of information in the header, including the tracing context, authentication, browser information, and routing information, in order to make them accessible by the business services behind the proxy, like Envoy route configuration. When some proxy can’t be instrumented, no matter what the reason, it should not affect the topology detection.\nIn this case, the proxy address would be used at the client side and propagate through RPC context as peer network address, and the proxy forwards this to different upstream services. Then STAM could detect this case and generate the proxy as a conjectural node. In the STAM, more than one alias names for this network address should be generated. After those two are detected and synchronized to the analysis node, the analysis core knows there is at least one uninstrumented service standing between client and servers. So, it will generate the relationships of client service-\u0026gt;peer network address, peer-\u0026gt;server service B and peer network address -\u0026gt;server service C, as shown in Figure 6.\n Figure 6, STAM traffic generation when the proxy uninstrumentation Conclusion This paper described the STAM, which is to the best of our knowledge the best topology detection method for distributed tracing systems. It replaces the time-window based topology analysis method for tracing-based monitoring systems. It removes the resource cost of disk and memory for time-window baseds analysis permanently and totally, and the barriers of horizontal scale. One STAM implementation, Apache SkyWalking, is widely used for monitoring hundreds of applications in production. Some of them generated over 100 TB tracing data per day and topology for over 200 services in real time.\nAcknowledgments We thank all contributors of Apache SkyWalking project for suggestions, code contributions to implement the STAM, and feedback from using the STAM and SkyWalking in their production environment.\nLicense This paper and the STAM are licensed in the Apache 2.0.\nReferences  Dapper, a Large-Scale Distributed Systems Tracing Infrastructure, https://research.google.com/pubs/pub36356.html?spm=5176.100239.blogcont60165.11.OXME9Z Apache SkyWalking, http://skywalking.apache.org/ Apache Open Users, https://skywalking.apache.org/users/ Zipkin, https://zipkin.io/ Kubernetes, Production-Grade Container Orchestration. Automated container deployment, scaling, and management. https://kubernetes.io/ OpenTracing Specification https://github.com/opentracing/specification/blob/master/specification.md Apache Tomcat, http://tomcat.apache.org/ Apache HttpComponents, https://hc.apache.org/ Zipkin doc, ‘Instrumenting a library’ section, ‘Communicating trace information’ paragraph. https://zipkin.io/pages/instrumenting Jaeger Tracing, https://jaegertracing.io/ Envoy Proxy, http://envoyproxy.io/ Nginx, http://nginx.org/ Spring Cloud Gateway, https://spring.io/projects/spring-cloud-gateway  ","excerpt":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System …","ref":"/docs/main/v9.7.0/en/papers/stam/","title":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System"},{"body":"Standalone Mode The standalone mode is the simplest way to run Banyand. It is suitable for the development and testing environment. The standalone mode is running as a standalone process by\n$ ./banyand-server standalone ██████╗ █████╗ ███╗ ██╗██╗ ██╗ █████╗ ███╗ ██╗██████╗ ██████╗ ██╔══██╗██╔══██╗████╗ ██║╚██╗ ██╔╝██╔══██╗████╗ ██║██╔══██╗██╔══██╗ ██████╔╝███████║██╔██╗ ██║ ╚████╔╝ ███████║██╔██╗ ██║██║ ██║██████╔╝ ██╔══██╗██╔══██║██║╚██╗██║ ╚██╔╝ ██╔══██║██║╚██╗██║██║ ██║██╔══██╗ ██████╔╝██║ ██║██║ ╚████║ ██║ ██║ ██║██║ ╚████║██████╔╝██████╔╝ ╚═════╝ ╚═╝ ╚═╝╚═╝ ╚═══╝ ╚═╝ ╚═╝ ╚═╝╚═╝ ╚═══╝╚═════╝ ╚═════╝ ***starting as a standalone server**** ... ... ***Listening to**** addr::17912 module:LIAISON-GRPC The banyand-server would be listening on the 0.0.0.0:17912 to access gRPC requests. if no errors occurred.\nAt the same time, the banyand-server would be listening on the 0.0.0.0:17913 to access HTTP requests. if no errors occurred. The HTTP server is used for CLI and Web UI.\n","excerpt":"Standalone Mode The standalone mode is the simplest way to run Banyand. It is suitable for the …","ref":"/docs/skywalking-banyandb/latest/installation/standalone/","title":"Standalone Mode"},{"body":"Standalone Mode The standalone mode is the simplest way to run Banyand. It is suitable for the development and testing environment. Once you unpack and extract the skywalking-banyandb-x.x.x-bin.tgz, you could startup BanyanDB server, the standalone mode is running as a standalone process.\n$ cd skywalking-banyandb-x.x.x-bin/bin $ ./banyand-server-static standalone ██████╗ █████╗ ███╗ ██╗██╗ ██╗ █████╗ ███╗ ██╗██████╗ ██████╗ ██╔══██╗██╔══██╗████╗ ██║╚██╗ ██╔╝██╔══██╗████╗ ██║██╔══██╗██╔══██╗ ██████╔╝███████║██╔██╗ ██║ ╚████╔╝ ███████║██╔██╗ ██║██║ ██║██████╔╝ ██╔══██╗██╔══██║██║╚██╗██║ ╚██╔╝ ██╔══██║██║╚██╗██║██║ ██║██╔══██╗ ██████╔╝██║ ██║██║ ╚████║ ██║ ██║ ██║██║ ╚████║██████╔╝██████╔╝ ╚═════╝ ╚═╝ ╚═╝╚═╝ ╚═══╝ ╚═╝ ╚═╝ ╚═╝╚═╝ ╚═══╝╚═════╝ ╚═════╝ ***starting as a standalone server**** ... ... ***Listening to**** addr::17912 module:LIAISON-GRPC The banyand server would be listening on the 0.0.0.0:17912 to access gRPC requests. if no errors occurred.\nAt the same time, the banyand server would be listening on the 0.0.0.0:17913 to access HTTP requests. if no errors occurred. The HTTP server is used for CLI and Web UI.\n","excerpt":"Standalone Mode The standalone mode is the simplest way to run Banyand. It is suitable for the …","ref":"/docs/skywalking-banyandb/next/installation/standalone/","title":"Standalone Mode"},{"body":"Standalone Mode The standalone mode is the simplest way to run Banyand. It is suitable for the development and testing environment. The standalone mode is running as a standalone process by\n$ ./banyand-server standalone ██████╗ █████╗ ███╗ ██╗██╗ ██╗ █████╗ ███╗ ██╗██████╗ ██████╗ ██╔══██╗██╔══██╗████╗ ██║╚██╗ ██╔╝██╔══██╗████╗ ██║██╔══██╗██╔══██╗ ██████╔╝███████║██╔██╗ ██║ ╚████╔╝ ███████║██╔██╗ ██║██║ ██║██████╔╝ ██╔══██╗██╔══██║██║╚██╗██║ ╚██╔╝ ██╔══██║██║╚██╗██║██║ ██║██╔══██╗ ██████╔╝██║ ██║██║ ╚████║ ██║ ██║ ██║██║ ╚████║██████╔╝██████╔╝ ╚═════╝ ╚═╝ ╚═╝╚═╝ ╚═══╝ ╚═╝ ╚═╝ ╚═╝╚═╝ ╚═══╝╚═════╝ ╚═════╝ ***starting as a standalone server**** ... ... ***Listening to**** addr::17912 module:LIAISON-GRPC The banyand-server would be listening on the 0.0.0.0:17912 to access gRPC requests. if no errors occurred.\nAt the same time, the banyand-server would be listening on the 0.0.0.0:17913 to access HTTP requests. if no errors occurred. The HTTP server is used for CLI and Web UI.\n","excerpt":"Standalone Mode The standalone mode is the simplest way to run Banyand. It is suitable for the …","ref":"/docs/skywalking-banyandb/v0.5.0/installation/standalone/","title":"Standalone Mode"},{"body":"Start up mode You may need different startup modes in different deployment tools, such as k8s. We provide two additional optional startup modes.\nDefault mode The default mode carries out tasks to initialize as necessary, starts to listen, and provides services.\nRun /bin/oapService.sh(.bat) to start in this mode. This is also applicable when you\u0026rsquo;re using startup.sh(.bat) to start.\nInit mode In this mode, the OAP server starts up to carry out initialization and then exits. You could use this mode to initialize your storage (such as ElasticSearch indexes, MySQL, and TiDB tables) as well as your data.\nRun /bin/oapServiceInit.sh(.bat) to start in this mode.\nNo-init mode In this mode, the OAP server starts up without carrying out initialization. Rather, it watches out for the ElasticSearch indexes, MySQL, TiDB and other storage tables, starts listening and provides services. In other words, the OAP server would anticipate having another OAP server carrying out the initialization.\nRun /bin/oapServiceNoInit.sh(.bat) to start in this mode.\n","excerpt":"Start up mode You may need different startup modes in different deployment tools, such as k8s. We …","ref":"/docs/main/latest/en/setup/backend/backend-start-up-mode/","title":"Start up mode"},{"body":"Start up mode You may need different startup modes in different deployment tools, such as k8s. We provide two additional optional startup modes.\nDefault mode The default mode carries out tasks to initialize as necessary, starts to listen, and provides services.\nRun /bin/oapService.sh(.bat) to start in this mode. This is also applicable when you\u0026rsquo;re using startup.sh(.bat) to start.\nInit mode In this mode, the OAP server starts up to carry out initialization and then exits. You could use this mode to initialize your storage (such as ElasticSearch indexes, MySQL, and TiDB tables) as well as your data.\nRun /bin/oapServiceInit.sh(.bat) to start in this mode.\nNo-init mode In this mode, the OAP server starts up without carrying out initialization. Rather, it watches out for the ElasticSearch indexes, MySQL, TiDB and other storage tables, starts listening and provides services. In other words, the OAP server would anticipate having another OAP server carrying out the initialization.\nRun /bin/oapServiceNoInit.sh(.bat) to start in this mode.\n","excerpt":"Start up mode You may need different startup modes in different deployment tools, such as k8s. We …","ref":"/docs/main/next/en/setup/backend/backend-start-up-mode/","title":"Start up mode"},{"body":"Start up mode In different deployment tools, such as k8s, you may need different startup modes. We provide two other optional startup modes.\nDefault mode The default mode carries out tasks to initialize as necessary, starts to listen, and provide services.\nRun /bin/oapService.sh(.bat) to start in this mode. This is also applicable when you\u0026rsquo;re using startup.sh(.bat) to start.\nInit mode In this mode, the OAP server starts up to carry out initialization, and then exits. You could use this mode to initialize your storage (such as ElasticSearch indexes, MySQL, and TiDB tables), as well as your data.\nRun /bin/oapServiceInit.sh(.bat) to start in this mode.\nNo-init mode In this mode, the OAP server starts up without carrying out initialization. Rather, it watches out for the ElasticSearch indexes, MySQL, and TiDB tables, starts to listen, and provide services. In other words, the OAP server would anticipate having another OAP server to carry out the initialization.\nRun /bin/oapServiceNoInit.sh(.bat) to start in this mode.\n","excerpt":"Start up mode In different deployment tools, such as k8s, you may need different startup modes. We …","ref":"/docs/main/v9.0.0/en/setup/backend/backend-start-up-mode/","title":"Start up mode"},{"body":"Start up mode You may need different startup modes in different deployment tools, such as k8s. We provide two additional optional startup modes.\nDefault mode The default mode carries out tasks to initialize as necessary, starts to listen, and provides services.\nRun /bin/oapService.sh(.bat) to start in this mode. This is also applicable when you\u0026rsquo;re using startup.sh(.bat) to start.\nInit mode In this mode, the OAP server starts up to carry out initialization and then exits. You could use this mode to initialize your storage (such as ElasticSearch indexes, MySQL, and TiDB tables) as well as your data.\nRun /bin/oapServiceInit.sh(.bat) to start in this mode.\nNo-init mode In this mode, the OAP server starts up without carrying out initialization. Rather, it watches out for the ElasticSearch indexes, MySQL, TiDB and other storage tables, starts listening and provides services. In other words, the OAP server would anticipate having another OAP server carrying out the initialization.\nRun /bin/oapServiceNoInit.sh(.bat) to start in this mode.\n","excerpt":"Start up mode You may need different startup modes in different deployment tools, such as k8s. We …","ref":"/docs/main/v9.1.0/en/setup/backend/backend-start-up-mode/","title":"Start up mode"},{"body":"Start up mode You may need different startup modes in different deployment tools, such as k8s. We provide two additional optional startup modes.\nDefault mode The default mode carries out tasks to initialize as necessary, starts to listen, and provides services.\nRun /bin/oapService.sh(.bat) to start in this mode. This is also applicable when you\u0026rsquo;re using startup.sh(.bat) to start.\nInit mode In this mode, the OAP server starts up to carry out initialization and then exits. You could use this mode to initialize your storage (such as ElasticSearch indexes, MySQL, and TiDB tables) as well as your data.\nRun /bin/oapServiceInit.sh(.bat) to start in this mode.\nNo-init mode In this mode, the OAP server starts up without carrying out initialization. Rather, it watches out for the ElasticSearch indexes, MySQL, TiDB and other storage tables, starts listening and provides services. In other words, the OAP server would anticipate having another OAP server carrying out the initialization.\nRun /bin/oapServiceNoInit.sh(.bat) to start in this mode.\n","excerpt":"Start up mode You may need different startup modes in different deployment tools, such as k8s. We …","ref":"/docs/main/v9.2.0/en/setup/backend/backend-start-up-mode/","title":"Start up mode"},{"body":"Start up mode You may need different startup modes in different deployment tools, such as k8s. We provide two additional optional startup modes.\nDefault mode The default mode carries out tasks to initialize as necessary, starts to listen, and provides services.\nRun /bin/oapService.sh(.bat) to start in this mode. This is also applicable when you\u0026rsquo;re using startup.sh(.bat) to start.\nInit mode In this mode, the OAP server starts up to carry out initialization and then exits. You could use this mode to initialize your storage (such as ElasticSearch indexes, MySQL, and TiDB tables) as well as your data.\nRun /bin/oapServiceInit.sh(.bat) to start in this mode.\nNo-init mode In this mode, the OAP server starts up without carrying out initialization. Rather, it watches out for the ElasticSearch indexes, MySQL, TiDB and other storage tables, starts listening and provides services. In other words, the OAP server would anticipate having another OAP server carrying out the initialization.\nRun /bin/oapServiceNoInit.sh(.bat) to start in this mode.\n","excerpt":"Start up mode You may need different startup modes in different deployment tools, such as k8s. We …","ref":"/docs/main/v9.3.0/en/setup/backend/backend-start-up-mode/","title":"Start up mode"},{"body":"Start up mode You may need different startup modes in different deployment tools, such as k8s. We provide two additional optional startup modes.\nDefault mode The default mode carries out tasks to initialize as necessary, starts to listen, and provides services.\nRun /bin/oapService.sh(.bat) to start in this mode. This is also applicable when you\u0026rsquo;re using startup.sh(.bat) to start.\nInit mode In this mode, the OAP server starts up to carry out initialization and then exits. You could use this mode to initialize your storage (such as ElasticSearch indexes, MySQL, and TiDB tables) as well as your data.\nRun /bin/oapServiceInit.sh(.bat) to start in this mode.\nNo-init mode In this mode, the OAP server starts up without carrying out initialization. Rather, it watches out for the ElasticSearch indexes, MySQL, TiDB and other storage tables, starts listening and provides services. In other words, the OAP server would anticipate having another OAP server carrying out the initialization.\nRun /bin/oapServiceNoInit.sh(.bat) to start in this mode.\n","excerpt":"Start up mode You may need different startup modes in different deployment tools, such as k8s. We …","ref":"/docs/main/v9.4.0/en/setup/backend/backend-start-up-mode/","title":"Start up mode"},{"body":"Start up mode You may need different startup modes in different deployment tools, such as k8s. We provide two additional optional startup modes.\nDefault mode The default mode carries out tasks to initialize as necessary, starts to listen, and provides services.\nRun /bin/oapService.sh(.bat) to start in this mode. This is also applicable when you\u0026rsquo;re using startup.sh(.bat) to start.\nInit mode In this mode, the OAP server starts up to carry out initialization and then exits. You could use this mode to initialize your storage (such as ElasticSearch indexes, MySQL, and TiDB tables) as well as your data.\nRun /bin/oapServiceInit.sh(.bat) to start in this mode.\nNo-init mode In this mode, the OAP server starts up without carrying out initialization. Rather, it watches out for the ElasticSearch indexes, MySQL, TiDB and other storage tables, starts listening and provides services. In other words, the OAP server would anticipate having another OAP server carrying out the initialization.\nRun /bin/oapServiceNoInit.sh(.bat) to start in this mode.\n","excerpt":"Start up mode You may need different startup modes in different deployment tools, such as k8s. We …","ref":"/docs/main/v9.5.0/en/setup/backend/backend-start-up-mode/","title":"Start up mode"},{"body":"Start up mode You may need different startup modes in different deployment tools, such as k8s. We provide two additional optional startup modes.\nDefault mode The default mode carries out tasks to initialize as necessary, starts to listen, and provides services.\nRun /bin/oapService.sh(.bat) to start in this mode. This is also applicable when you\u0026rsquo;re using startup.sh(.bat) to start.\nInit mode In this mode, the OAP server starts up to carry out initialization and then exits. You could use this mode to initialize your storage (such as ElasticSearch indexes, MySQL, and TiDB tables) as well as your data.\nRun /bin/oapServiceInit.sh(.bat) to start in this mode.\nNo-init mode In this mode, the OAP server starts up without carrying out initialization. Rather, it watches out for the ElasticSearch indexes, MySQL, TiDB and other storage tables, starts listening and provides services. In other words, the OAP server would anticipate having another OAP server carrying out the initialization.\nRun /bin/oapServiceNoInit.sh(.bat) to start in this mode.\n","excerpt":"Start up mode You may need different startup modes in different deployment tools, such as k8s. We …","ref":"/docs/main/v9.6.0/en/setup/backend/backend-start-up-mode/","title":"Start up mode"},{"body":"Start up mode You may need different startup modes in different deployment tools, such as k8s. We provide two additional optional startup modes.\nDefault mode The default mode carries out tasks to initialize as necessary, starts to listen, and provides services.\nRun /bin/oapService.sh(.bat) to start in this mode. This is also applicable when you\u0026rsquo;re using startup.sh(.bat) to start.\nInit mode In this mode, the OAP server starts up to carry out initialization and then exits. You could use this mode to initialize your storage (such as ElasticSearch indexes, MySQL, and TiDB tables) as well as your data.\nRun /bin/oapServiceInit.sh(.bat) to start in this mode.\nNo-init mode In this mode, the OAP server starts up without carrying out initialization. Rather, it watches out for the ElasticSearch indexes, MySQL, TiDB and other storage tables, starts listening and provides services. In other words, the OAP server would anticipate having another OAP server carrying out the initialization.\nRun /bin/oapServiceNoInit.sh(.bat) to start in this mode.\n","excerpt":"Start up mode You may need different startup modes in different deployment tools, such as k8s. We …","ref":"/docs/main/v9.7.0/en/setup/backend/backend-start-up-mode/","title":"Start up mode"},{"body":"Storage Usage In this example, you will learn how to use the Storage.\nInstall Operator Follow Operator installation instrument to install the operator.\nDefine Storage with default setting  sample.yaml(use the internal type)  apiVersion:operator.skywalking.apache.org/v1alpha1kind:Storagemetadata:name:samplespec:type:elasticsearchconnectType:internalversion:7.5.1instances:3image:docker.elastic.co/elasticsearch/elasticsearch:7.5.1security:user:secretName:defaulttls:truesample.yaml(use the external type)  apiVersion:operator.skywalking.apache.org/v1alpha1kind:Storagemetadata:name:samplespec:type:elasticsearchconnectType:externaladdress:\u0026#34;https://elasticsearch\u0026#34;security:user:secretName:defaultDeploy Storage  Deploy the Storage use the below command:  $ kubectl apply -f sample.yaml Check the Storage in Kubernetes:   If you deploy the storage with the internal type:  $ kubectl get storage NAME INSTANCES TYPE VERSION CONNECTTYPE sample 3 elasticsearch 7.5.1 internal  If you deploy the storage with the external type:  $ kubectl get storage NAME INSTANCES TYPE VERSION CONNECTTYPE sample elasticsearch 7.5.1 external Check the Statefulset in Kubernetes:  $ kubectl get statefulset NAME READY AGE sample-elasticsearch 3/3 7s Specify Storage Name in OAP server Here we modify the default OAP server configuration file,the new yaml file as follows:\napiVersion:operator.skywalking.apache.org/v1alpha1kind:OAPServermetadata:name:defaultspec:version:9.5.0instances:1image:apache/skywalking-oap-server:9.5.0service:template:type:ClusterIPstorage:name:sample Deploy the OAP server use the new yaml file:  $ kubectl apply -f oap.yaml Check the OAP server in Kubernetes:  $ kubectl get oapserver NAME INSTANCES RUNNING ADDRESS sample 1 1 sample-oap.default Check whether the pod generated by OAP server is running correctly.  $ kubectl get pod -l app=oap NAME READY STATUS RESTARTS AGE sample-oap-5bc79567b7-tkw6q 1/1 Running 0 6m31s ","excerpt":"Storage Usage In this example, you will learn how to use the Storage.\nInstall Operator Follow …","ref":"/docs/skywalking-swck/latest/examples/storage/","title":"Storage Usage"},{"body":"Storage Usage In this example, you will learn how to use the Storage.\nInstall Operator Follow Operator installation instrument to install the operator.\nDefine Storage with default setting  sample.yaml(use the internal type)  apiVersion:operator.skywalking.apache.org/v1alpha1kind:Storagemetadata:name:samplespec:type:elasticsearchconnectType:internalversion:7.5.1instances:3image:docker.elastic.co/elasticsearch/elasticsearch:7.5.1security:user:secretName:defaulttls:truesample.yaml(use the external type)  apiVersion:operator.skywalking.apache.org/v1alpha1kind:Storagemetadata:name:samplespec:type:elasticsearchconnectType:externaladdress:\u0026#34;https://elasticsearch\u0026#34;security:user:secretName:defaultDeploy Storage  Deploy the Storage use the below command:  $ kubectl apply -f sample.yaml Check the Storage in Kubernetes:   If you deploy the storage with the internal type:  $ kubectl get storage NAME INSTANCES TYPE VERSION CONNECTTYPE sample 3 elasticsearch 7.5.1 internal  If you deploy the storage with the external type:  $ kubectl get storage NAME INSTANCES TYPE VERSION CONNECTTYPE sample elasticsearch 7.5.1 external Check the Statefulset in Kubernetes:  $ kubectl get statefulset NAME READY AGE sample-elasticsearch 3/3 7s Specify Storage Name in OAP server Here we modify the default OAP server configuration file,the new yaml file as follows:\napiVersion:operator.skywalking.apache.org/v1alpha1kind:OAPServermetadata:name:defaultspec:version:9.5.0instances:1image:apache/skywalking-oap-server:9.5.0service:template:type:ClusterIPstorage:name:sample Deploy the OAP server use the new yaml file:  $ kubectl apply -f oap.yaml Check the OAP server in Kubernetes:  $ kubectl get oapserver NAME INSTANCES RUNNING ADDRESS sample 1 1 sample-oap.default Check whether the pod generated by OAP server is running correctly.  $ kubectl get pod -l app=oap NAME READY STATUS RESTARTS AGE sample-oap-5bc79567b7-tkw6q 1/1 Running 0 6m31s ","excerpt":"Storage Usage In this example, you will learn how to use the Storage.\nInstall Operator Follow …","ref":"/docs/skywalking-swck/next/examples/storage/","title":"Storage Usage"},{"body":"Storage Usage In this example, you will learn how to use the Storage.\nInstall Operator Follow Operator installation instrument to install the operator.\nDefine Storage with default setting  sample.yaml(use the internal type)  apiVersion:operator.skywalking.apache.org/v1alpha1kind:Storagemetadata:name:samplespec:type:elasticsearchconnectType:internalversion:7.5.1instances:3image:docker.elastic.co/elasticsearch/elasticsearch:7.5.1security:user:secretName:defaulttls:truesample.yaml(use the external type)  apiVersion:operator.skywalking.apache.org/v1alpha1kind:Storagemetadata:name:samplespec:type:elasticsearchconnectType:externaladdress:\u0026#34;https://elasticsearch\u0026#34;security:user:secretName:defaultDeploy Storage  Deploy the Storage use the below command:  $ kubectl apply -f sample.yaml Check the Storage in Kubernetes:   If you deploy the storage with the internal type:  $ kubectl get storage NAME INSTANCES TYPE VERSION CONNECTTYPE sample 3 elasticsearch 7.5.1 internal  If you deploy the storage with the external type:  $ kubectl get storage NAME INSTANCES TYPE VERSION CONNECTTYPE sample elasticsearch 7.5.1 external Check the Statefulset in Kubernetes:  $ kubectl get statefulset NAME READY AGE sample-elasticsearch 3/3 7s Specify Storage Name in OAP server Here we modify the default OAP server configuration file,the new yaml file as follows:\napiVersion:operator.skywalking.apache.org/v1alpha1kind:OAPServermetadata:name:defaultspec:version:9.5.0instances:1image:apache/skywalking-oap-server:9.5.0service:template:type:ClusterIPstorage:name:sample Deploy the OAP server use the new yaml file:  $ kubectl apply -f oap.yaml Check the OAP server in Kubernetes:  $ kubectl get oapserver NAME INSTANCES RUNNING ADDRESS sample 1 1 sample-oap.default Check whether the pod generated by OAP server is running correctly.  $ kubectl get pod -l app=oap NAME READY STATUS RESTARTS AGE sample-oap-5bc79567b7-tkw6q 1/1 Running 0 6m31s ","excerpt":"Storage Usage In this example, you will learn how to use the Storage.\nInstall Operator Follow …","ref":"/docs/skywalking-swck/v0.9.0/examples/storage/","title":"Storage Usage"},{"body":"Summary The SkyWalking Cloud on Kubernetes is proposed in order to:\n Managing and Monitoring Scaling backend cluster capacity up and down Changing backend cluster configuration Injecting configuration into the target cluster. Securing traffic between target clusters and backend cluster, or between backend cluster with TLS certificate  Motivation If the user of SkyWalking decided to deploy it into Kubernetes, there’re some critical challenges for them.\nFirst of them is the complex of deployment, it doesn’t only mean the OAP server and storage cluster, but also include configuring target cluster to send data to backend. Then they might struggle to keep all of them reliable. The size of the data transferred is very big and the cost of data stored is very high. The user usually faces some problems, for instance, OAP server stuck, Elasticsearch cluster GC rate sharply increases, the system load of some OAP instances is much more than others, and etc.\nWith the help of CRDs and the Controller, we can figure out the above problems and give users a more pleasing experience when using SWCK.\nProposal Production Design I proposed two crucial components for SWCK, backend operator and target injector. The first one intends to solve the problems of the backend operation, and another focus on simplifying the configuration of the target cluster.\nThey should be built as two separate binary/image, then are installed according to user’s requirements.\nBackend Operator The operator might be a GO application that manages and monitors other components, for example, OAP pods, storage pods(ES, MySQL, and etc.), ingress/entry and configuration.\nIt should be capable of HA, performance, and scalability.\nIt should also have the following capabilities:\n Defining CRDs for provisioning and configuring Provisioning backend automatically Splitting OAP instances according to their type(L1/L2), improving the ratio of them. Performance tuning of OAP and storage. Updating configuration dynamically, irrespectively it’s dynamic or not. Upgrading mirror version seamlessly. Health checking and failure recovery Collecting and analyzing metrics and logs, abnormal detection Horizontal scaling and scheduling tuning. Loadbalancing input gPRC stream and GraphQL querying. Supporting externally hosted storage service. Securing traffic  The above items should be accomplished in several versions/releases. The developer should sort the priority of them and grind the design.\nTarget injector The injector can inject agent lib and configuration into the target cluster automatically, enable/disable distributed tracing according to labels marked on resources or namespace.\nIt also integrates backend with service mesh platform, for example, Istio.\nIt should be a GO application and a GO lib to be invoked by swctl to generate pod YAMLs manually.\nTechnology Selection  Development Language: GO Operator dev tool: TBD Building tool: Make(Docker for windows) Installation: Helm3 chart Repository: github.com/apache/skywalking-swck CI: Github action  ","excerpt":"Summary The SkyWalking Cloud on Kubernetes is proposed in order to:\n Managing and Monitoring Scaling …","ref":"/docs/skywalking-swck/latest/design/proposal/","title":"Summary"},{"body":"Summary The SkyWalking Cloud on Kubernetes is proposed in order to:\n Managing and Monitoring Scaling backend cluster capacity up and down Changing backend cluster configuration Injecting configuration into the target cluster. Securing traffic between target clusters and backend cluster, or between backend cluster with TLS certificate  Motivation If the user of SkyWalking decided to deploy it into Kubernetes, there’re some critical challenges for them.\nFirst of them is the complex of deployment, it doesn’t only mean the OAP server and storage cluster, but also include configuring target cluster to send data to backend. Then they might struggle to keep all of them reliable. The size of the data transferred is very big and the cost of data stored is very high. The user usually faces some problems, for instance, OAP server stuck, Elasticsearch cluster GC rate sharply increases, the system load of some OAP instances is much more than others, and etc.\nWith the help of CRDs and the Controller, we can figure out the above problems and give users a more pleasing experience when using SWCK.\nProposal Production Design I proposed two crucial components for SWCK, backend operator and target injector. The first one intends to solve the problems of the backend operation, and another focus on simplifying the configuration of the target cluster.\nThey should be built as two separate binary/image, then are installed according to user’s requirements.\nBackend Operator The operator might be a GO application that manages and monitors other components, for example, OAP pods, storage pods(ES, MySQL, and etc.), ingress/entry and configuration.\nIt should be capable of HA, performance, and scalability.\nIt should also have the following capabilities:\n Defining CRDs for provisioning and configuring Provisioning backend automatically Splitting OAP instances according to their type(L1/L2), improving the ratio of them. Performance tuning of OAP and storage. Updating configuration dynamically, irrespectively it’s dynamic or not. Upgrading mirror version seamlessly. Health checking and failure recovery Collecting and analyzing metrics and logs, abnormal detection Horizontal scaling and scheduling tuning. Loadbalancing input gPRC stream and GraphQL querying. Supporting externally hosted storage service. Securing traffic  The above items should be accomplished in several versions/releases. The developer should sort the priority of them and grind the design.\nTarget injector The injector can inject agent lib and configuration into the target cluster automatically, enable/disable distributed tracing according to labels marked on resources or namespace.\nIt also integrates backend with service mesh platform, for example, Istio.\nIt should be a GO application and a GO lib to be invoked by swctl to generate pod YAMLs manually.\nTechnology Selection  Development Language: GO Operator dev tool: TBD Building tool: Make(Docker for windows) Installation: Helm3 chart Repository: github.com/apache/skywalking-swck CI: Github action  ","excerpt":"Summary The SkyWalking Cloud on Kubernetes is proposed in order to:\n Managing and Monitoring Scaling …","ref":"/docs/skywalking-swck/next/design/proposal/","title":"Summary"},{"body":"Summary The SkyWalking Cloud on Kubernetes is proposed in order to:\n Managing and Monitoring Scaling backend cluster capacity up and down Changing backend cluster configuration Injecting configuration into the target cluster. Securing traffic between target clusters and backend cluster, or between backend cluster with TLS certificate  Motivation If the user of SkyWalking decided to deploy it into Kubernetes, there’re some critical challenges for them.\nFirst of them is the complex of deployment, it doesn’t only mean the OAP server and storage cluster, but also include configuring target cluster to send data to backend. Then they might struggle to keep all of them reliable. The size of the data transferred is very big and the cost of data stored is very high. The user usually faces some problems, for instance, OAP server stuck, Elasticsearch cluster GC rate sharply increases, the system load of some OAP instances is much more than others, and etc.\nWith the help of CRDs and the Controller, we can figure out the above problems and give users a more pleasing experience when using SWCK.\nProposal Production Design I proposed two crucial components for SWCK, backend operator and target injector. The first one intends to solve the problems of the backend operation, and another focus on simplifying the configuration of the target cluster.\nThey should be built as two separate binary/image, then are installed according to user’s requirements.\nBackend Operator The operator might be a GO application that manages and monitors other components, for example, OAP pods, storage pods(ES, MySQL, and etc.), ingress/entry and configuration.\nIt should be capable of HA, performance, and scalability.\nIt should also have the following capabilities:\n Defining CRDs for provisioning and configuring Provisioning backend automatically Splitting OAP instances according to their type(L1/L2), improving the ratio of them. Performance tuning of OAP and storage. Updating configuration dynamically, irrespectively it’s dynamic or not. Upgrading mirror version seamlessly. Health checking and failure recovery Collecting and analyzing metrics and logs, abnormal detection Horizontal scaling and scheduling tuning. Loadbalancing input gPRC stream and GraphQL querying. Supporting externally hosted storage service. Securing traffic  The above items should be accomplished in several versions/releases. The developer should sort the priority of them and grind the design.\nTarget injector The injector can inject agent lib and configuration into the target cluster automatically, enable/disable distributed tracing according to labels marked on resources or namespace.\nIt also integrates backend with service mesh platform, for example, Istio.\nIt should be a GO application and a GO lib to be invoked by swctl to generate pod YAMLs manually.\nTechnology Selection  Development Language: GO Operator dev tool: TBD Building tool: Make(Docker for windows) Installation: Helm3 chart Repository: github.com/apache/skywalking-swck CI: Github action  ","excerpt":"Summary The SkyWalking Cloud on Kubernetes is proposed in order to:\n Managing and Monitoring Scaling …","ref":"/docs/skywalking-swck/v0.9.0/design/proposal/","title":"Summary"},{"body":"Support ActiveMQ classic Monitoring Motivation Apache ActiveMQ Classic is a popular and powerful open source messaging and Integration Patterns server. It supports many Cross Language Clients and Protocols, comes with easy to use Enterprise Integration Patterns and many advanced features.\nNow I want to add ActiveMQ Classic monitoring via the OpenTelemetry Collector which fetches metrics from jmx prometheus exporter run as a Java Agent.\nArchitecture Graph There is no significant architecture-level change.\nProposed Changes Apache ActiveMQ Classic has extensive support for JMX to allow you to monitor and control the behavior of the broker via the JMX MBeans.\nJmx prometheus exporter collects metrics data from ActiveMQ classic, this exporter is intended to be run as a Java Agent, exposing a HTTP server and serving metrics of the local JVM.\nUsing openTelemetry receiver to fetch these metrics to SkyWalking OAP server.\nActiveMQ Cluster Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     System Load Average Count meter_activemq_cluster_system_load_average The average system load, range:[0, 10000]. JMX Prometheus Exporter   Thread Count Count meter_activemq_cluster_thread_count Threads currently used by the JVM. JMX Prometheus Exporter   Init Heap Memory Usage Bytes meter_activemq_cluster_heap_memory_usage_init The initial amount of heap memory available. JMX Prometheus Exporter   Committed Heap Memory Usage Bytes meter_activemq_cluster_heap_memory_usage_committed The memory is guaranteed to be available for the JVM to use. JMX Prometheus Exporter   Used Heap Memory Usage Bytes meter_activemq_cluster_heap_memory_usage_used The amount of JVM heap memory currently in use. JMX Prometheus Exporter   Max Heap Memory Usage Bytes meter_activemq_cluster_heap_memory_usage_max The maximum possible size of the heap memory. JMX Prometheus Exporter   GC G1 Old Collection Count Count meter_activemq_cluster_gc_g1_old_collection_count The gc count of G1 Old Generation(JDK[9,17]). JMX Prometheus Exporter   GC G1 Young Collection Count Count meter_activemq_cluster_gc_g1_young_collection_count The gc count of G1 Young Generation(JDK[9,17]). JMX Prometheus Exporter   GC G1 Old Collection Time ms meter_activemq_cluster_gc_g1_old_collection_time The gc time spent in G1 Old Generation in milliseconds(JDK[9,17]). JMX Prometheus Exporter   GC G1 Young Collection Time ms meter_activemq_cluster_gc_g1_young_collection_time The gc time spent in G1 Young Generation in milliseconds(JDK[9,17]). JMX Prometheus Exporter   GC Parallel Old Collection Count Count meter_activemq_cluster_gc_parallel_old_collection_count The gc count of Parallel Old Generation(JDK[6,8]). JMX Prometheus Exporter   GC Parallel Young Collection Count Count meter_activemq_cluster_gc_parallel_young_collection_count The gc count of Parallel Young Generation(JDK[6,8]). JMX Prometheus Exporter   GC Parallel Old Collection Time ms meter_activemq_cluster_gc_parallel_old_collection_time The gc time spent in Parallel Old Generation in milliseconds(JDK[6,8]). JMX Prometheus Exporter   GC Parallel Young Collection Time ms meter_activemq_cluster_gc_parallel_young_collection_time The gc time spent in Parallel Young Generation in milliseconds(JDK[6,8]). JMX Prometheus Exporter   Enqueue Rate Count/s meter_activemq_cluster_enqueue_rate Number of messages that have been sent to the cluster per second(JDK[6,8]). JMX Prometheus Exporter   Dequeue Rate Count/s meter_activemq_cluster_dequeue_rate Number of messages that have been acknowledged or discarded on the cluster per second. JMX Prometheus Exporter   Dispatch Rate Count/s meter_activemq_cluster_dispatch_rate Number of messages that has been delivered to consumers per second. JMX Prometheus Exporter   Expired Rate Count/s meter_activemq_cluster_expired_rate Number of messages that have been expired per second. JMX Prometheus Exporter   Average Enqueue Time ms meter_activemq_cluster_average_enqueue_time The average time a message was held on this cluster. JMX Prometheus Exporter   Max Enqueue Time ms meter_activemq_cluster_max_enqueue_time The max time a message was held on this cluster. JMX Prometheus Exporter    ActiveMQ Broker Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Uptime sec meter_activemq_broker_uptime Uptime of the broker in day. JMX Prometheus Exporter   State  meter_activemq_broker_state If slave broker 1 else 0. JMX Prometheus Exporter   Current Connections Count meter_activemq_broker_current_connections The number of clients connected to the broker currently. JMX Prometheus Exporter   Current Producer Count Count meter_activemq_broker_current_producer_count The number of producers currently attached to the broker. JMX Prometheus Exporter   Current Consumer Count Count meter_activemq_broker_current_consumer_count The number of consumers consuming messages from the broker. JMX Prometheus Exporter   Producer Count Count meter_activemq_broker_producer_count Number of message producers active on destinations. JMX Prometheus Exporter   Consumer Count Count meter_activemq_broker_consumer_count Number of message consumers subscribed to destinations. JMX Prometheus Exporter   Enqueue Count Count meter_activemq_broker_enqueue_count The total number of messages sent to the broker. JMX Prometheus Exporter   Dequeue Count Count meter_activemq_broker_dequeue_count The total number of messages the broker has delivered to consumers. JMX Prometheus Exporter   Enqueue Rate Count/sec meter_activemq_broker_enqueue_rate The total number of messages sent to the broker per second. JMX Prometheus Exporter   Dequeue Rate Count/sec meter_activemq_broker_dequeue_rate The total number of messages the broker has delivered to consumers per second. JMX Prometheus Exporter   Memory Percent Usage % meter_activemq_broker_memory_percent_usage Percentage of configured memory used by the broker. JMX Prometheus Exporter   Memory Usage Bytes meter_activemq_broker_memory_percent_usage Memory used by undelivered messages in bytes. JMX Prometheus Exporter   Memory Limit Bytes meter_activemq_broker_memory_limit Memory limited used for holding undelivered messages before paging to temporary storage. JMX Prometheus Exporter   Store Percent Usage % meter_activemq_broker_store_percent_usage Percentage of available disk space used for persistent message storage. JMX Prometheus Exporter   Store Limit Bytes meter_activemq_broker_store_limit Disk limited used for persistent messages before producers are blocked. JMX Prometheus Exporter   Temp Percent Usage Bytes meter_activemq_broker_temp_percent_usage Percentage of available disk space used for non-persistent message storage. JMX Prometheus Exporter   Temp Limit Bytes meter_activemq_broker_temp_limit Disk limited used for non-persistent messages and temporary data before producers are blocked. JMX Prometheus Exporter   Average Message Size Bytes meter_activemq_broker_average_message_size Average message size on this broker. JMX Prometheus Exporter   Max Message Size Bytes meter_activemq_broker_max_message_size Max message size on this broker. JMX Prometheus Exporter   Queue Size Count meter_activemq_broker_queue_size Number of messages on this broker that have been dispatched but not acknowledged. JMX Prometheus Exporter    ActiveMQ Destination Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Producer Count Count meter_activemq_destination_producer_count Number of producers attached to this destination. JMX Prometheus Exporter   Consumer Count Count meter_activemq_destination_consumer_count Number of consumers subscribed to this destination. JMX Prometheus Exporter   Topic Consumer Count Count meter_activemq_destination_topic_consumer_count Number of consumers subscribed to the topics. JMX Prometheus Exporter   Queue Size Count meter_activemq_destination_queue_size The number of messages that have not been acknowledged by a consumer. JMX Prometheus Exporter   Memory Usage Bytes meter_activemq_destination_memory_usage Memory used by undelivered messages in bytes. JMX Prometheus Exporter   Memory Percent Usage % meter_activemq_destination_memory_percent_usage Percentage of configured memory used by the destination. JMX Prometheus Exporter   Enqueue Count Count meter_activemq_destination_enqueue_count The number of messages sent to the destination. JMX Prometheus Exporter   Dequeue Count Count meter_activemq_destination_dequeue_count The number of messages the destination has delivered to consumers. JMX Prometheus Exporter   Average Enqueue Time ms meter_activemq_destination_average_enqueue_time The average time a message was held on this destination. JMX Prometheus Exporter   Max Enqueue Time ms meter_activemq_destination_max_enqueue_time The max time a message was held on this destination. JMX Prometheus Exporter   Dispatch Count Count meter_activemq_destination_dispatch_count Number of messages that has been delivered to consumers. JMX Prometheus Exporter   Expired Count Count meter_activemq_destination_expired_count Number of messages that have been expired. JMX Prometheus Exporter   Inflight Count Count meter_activemq_destination_inflight_count Number of messages that have been dispatched to but not acknowledged by consumers. JMX Prometheus Exporter   Average Message Size Bytes meter_activemq_destination_average_message_size Average message size on this destination. JMX Prometheus Exporter   Max Message Size Bytes meter_activemq_destination_max_message_size Max message size on this destination. JMX Prometheus Exporter    Imported Dependencies libs and their licenses. No new dependency.\nCompatibility no breaking changes.\nGeneral usage docs ","excerpt":"Support ActiveMQ classic Monitoring Motivation Apache ActiveMQ Classic is a popular and powerful …","ref":"/docs/main/next/en/swip/swip-8/","title":"Support ActiveMQ classic Monitoring"},{"body":"Support available layers of service in the topology. Motivation UI could jump to the service dashboard and query service hierarchy from the topology node. For now topology node includes name and ID but without layer, as the service could have multiple layers, the limitation is that it is only works on the current layer which the topology represents:\n UI could not jump into another layer\u0026rsquo;s dashboard of the service. UI could not query the service hierarchy from the topology node if the node is not in current layer.  Here are typical use cases: should have a chance to jump into another layer\u0026rsquo;s dashboard of the service:\n In the mesh topology, mesh(layer MESH) and mesh-dp(layer MESH_DP) share a similar topology, one node will have two layers. In the mesh topology, agent(layer GENERAL) + virtual database(layer VIRTUAL_DATABASE), the node is in different layers.  Both of these two cases have hybrid layer topology. If we could support that, we could have a better x-layer interaction.\nArchitecture Graph There is no significant architecture-level change.\nPropose Changes Add the layers info into topology node:\n When building the topology node fetch the layers info from the service according to the service id. Return layers info in the Node when query the topology.  Imported Dependencies libs and their licenses. No new library is planned to be added to the codebase.\nCompatibility About the protocol, there should be no breaking changes, but enhancements only. New field layers is going to be added to the Node in the query protocol topology.graphqls.\ntypeNode{# The service ID of the node.id:ID!# The literal name of the #id.name:String!# The type name may be# 1. The service provider/middleware tech, such as: Tomcat, SpringMVC# 2. Conjectural Service, e.g. MySQL, Redis, Kafkatype:String# It is a conjecture node or real node, to represent a service or endpoint.isReal:Boolean!# The layers of the service.layers:[String!]!}General usage docs This proposal doesn\u0026rsquo;t impact the end user in any way of using SkyWalking. The remarkable change will be in the UI topology map, users could jump into the proper layer\u0026rsquo;s service dashboard and query the service hierarchy from the topology node.\n","excerpt":"Support available layers of service in the topology. Motivation UI could jump to the service …","ref":"/docs/main/next/en/swip/swip-4/","title":"Support available layers of service in the topology."},{"body":"Support ClickHouse Monitoring Motivation ClickHouse is a high-performance, column-oriented SQL database management system (DBMS) for online analytical processing (OLAP). It is available as both an open-source software and a cloud offering.\nNow I want to add ClickHouse monitoring via the OpenTelemetry Collector which fetches metrics from it\u0026rsquo;s own HTTP endpoint to expose metrics data for Prometheus (since ClickHouse v20.1.2.4). Clickhouse Exporter used only for old ClickHouse versions, modern versions have embedded prometheus endpoint.\nArchitecture Graph There is no significant architecture-level change.\nProposed Changes ClickHouse expose own metrics via HTTP endpoint to opentelemetry collector, using skyWalking openTelemetry receiver to fetch these metrics.\nThe exposed metrics are from the system.metrics table / the system.events table / the system.asynchronous_metrics table.\nClickHouse Instance Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     CpuUsage count meter_clickhouse_instance_cpu_usage CPU time spent seen by OS per second(according to ClickHouse.system.dashboard.CPU Usage (cores)). ClickHouse   MemoryUsage percentage meter_clickhouse_instance_memory_usage Total amount of memory (bytes) allocated by the server/ total amount of OS memory. ClickHouse   MemoryAvailable percentage meter_clickhouse_instance_memory_available Total amount of memory (bytes) available for program / total amount of OS memory. ClickHouse   Uptime sec meter_clickhouse_instance_uptime The server uptime in seconds. It includes the time spent for server initialization before accepting connections. ClickHouse   Version string meter_clickhouse_instance_version Version of the server in a single integer number in base-1000. ClickHouse   FileOpen count meter_clickhouse_instance_file_open Number of files opened. ClickHouse    ClickHouse Network Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     TcpConnections count meter_clickhouse_instance_tcp_connectionsmeter_clickhouse_tcp_connections Number of connections to TCP server. ClickHouse   MysqlConnections count meter_clickhouse_instance_mysql_connectionsmeter_clickhouse_mysql_connections Number of client connections using MySQL protocol. ClickHouse   HttpConnections count meter_clickhouse_instance_http_connectionsmeter_clickhouse_mysql_connections Number of connections to HTTP server. ClickHouse   InterserverConnections count meter_clickhouse_instance_interserver_connectionsmeter_clickhouse_interserver_connections Number of connections from other replicas to fetch parts. ClickHouse   PostgresqlConnections count meter_clickhouse_instance_postgresql_connectionsmeter_clickhouse_postgresql_connections Number of client connections using PostgreSQL protocol. ClickHouse   ReceiveBytes bytes meter_clickhouse_instance_network_receive_bytesmeter_clickhouse_network_receive_bytes Total number of bytes received from network. ClickHouse   SendBytes bytes meter_clickhouse_instance_network_send_bytesmeter_clickhouse_network_send_bytes Total number of bytes send to network. ClickHouse    ClickHouse Query Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     QueryCount count meter_clickhouse_instance_querymeter_clickhouse_query Number of executing queries. ClickHouse   SelectQueryCount count meter_clickhouse_instance_query_selectmeter_clickhouse_query_select Number of executing queries, but only for SELECT queries. ClickHouse   InsertQueryCount count meter_clickhouse_instance_query_insertmeter_clickhouse_query_insert Number of executing queries, but only for INSERT queries. ClickHouse   SelectQueryRate count/sec meter_clickhouse_instance_query_select_ratemeter_clickhouse_query_select_rate Number of SELECT queries per second. ClickHouse   InsertQueryRate count/sec meter_clickhouse_instance_query_insert_ratemeter_clickhouse_query_insert_rate Number of INSERT queries per second. ClickHouse   Querytime microsec meter_clickhouse_instance_querytime_microsecondsmeter_clickhouse_querytime_microseconds Total time of all queries. ClickHouse   SelectQuerytime microsec meter_clickhouse_instance_querytime_select_microsecondsmeter_clickhouse_querytime_select_microseconds Total time of SELECT queries. ClickHouse   InsertQuerytime microsec meter_clickhouse_instance_querytime_insert_microsecondsmeter_clickhouse_querytime_insert_microseconds Total time of INSERT queries. ClickHouse   OtherQuerytime microsec meter_clickhouse_instance_querytime_other_microsecondsmeter_clickhouse_querytime_other_microseconds Total time of queries that are not SELECT or INSERT. ClickHouse   QuerySlowCount count meter_clickhouse_instance_query_slowmeter_clickhouse_query_slow Number of reads from a file that were slow. ClickHouse    ClickHouse Insertion Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     InsertQueryCount count meter_clickhouse_instance_query_insertmeter_clickhouse_query_insert Number of executing queries, but only for INSERT queries. ClickHouse   InsertedRowCount count meter_clickhouse_instance_inserted_rowsmeter_clickhouse_inserted_rows Number of rows INSERTed to all tables. ClickHouse   InsertedBytes bytes meter_clickhouse_instance_inserted_bytesmeter_clickhouse_inserted_bytes Number of bytes INSERTed to all tables. ClickHouse   DelayedInsertCount count meter_clickhouse_instance_delayed_insertmeter_clickhouse_delayed_insert Number of times the INSERT of a block to a MergeTree table was throttled due to high number of active data parts for partition. ClickHouse    ClickHouse Replicas Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     ReplicatedChecks count meter_clickhouse_instance_replicated_checksmeter_clickhouse_replicated_checks Number of data parts checking for consistency. ClickHouse   ReplicatedFetch count meter_clickhouse_instance_replicated_fetchmeter_clickhouse_replicated_fetch Number of data parts being fetched from replica. ClickHouse   ReplicatedSend count meter_clickhouse_instance_replicated_sendmeter_clickhouse_replicated_send Number of data parts being sent to replicas. ClickHouse    ClickHouse MergeTree Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     BackgroundMergeCount count meter_clickhouse_instance_background_mergemeter_clickhouse_background_merge Number of executing background merges. ClickHouse   MergeRows count meter_clickhouse_instance_merge_rowsmeter_clickhouse_merge_rows Rows read for background merges. This is the number of rows before merge. ClickHouse   MergeUncompressedBytes bytes meter_clickhouse_instance_merge_uncompressed_bytesmeter_clickhouse_merge_uncompressed_bytes Uncompressed bytes (for columns as they stored in memory) that was read for background merges. This is the number before merge. ClickHouse   MoveCount count meter_clickhouse_instance_movemeter_clickhouse_move Number of currently executing moves. ClickHouse   PartsActive Count meter_clickhouse_instance_parts_activemeter_clickhouse_parts_active Active data part, used by current and upcoming SELECTs. ClickHouse   MutationsCount count meter_clickhouse_instance_mutationsmeter_clickhouse_mutations Number of mutations (ALTER DELETE/UPDATE). ClickHouse    ClickHouse Kafka Table Engine Supported Metrics When table engine works with Apache Kafka.\nKafka lets you:\n Publish or subscribe to data flows. Organize fault-tolerant storage. Process streams as they become available.     Monitoring Panel Unit Metric Name Description Data Source     KafkaMessagesRead count meter_clickhouse_instance_kafka_messages_readmeter_clickhouse_kafka_messages_read Number of Kafka messages already processed by ClickHouse. ClickHouse   KafkaWrites count meter_clickhouse_instance_kafka_writesmeter_clickhouse_kafka_writes Number of writes (inserts) to Kafka tables. ClickHouse   KafkaConsumers count meter_clickhouse_instance_kafka_consumersmeter_clickhouse_kafka_consumers Number of active Kafka consumers. ClickHouse   KafkProducers count meter_clickhouse_instance_kafka_producersmeter_clickhouse_kafka_producers Number of active Kafka producer created. ClickHouse    ClickHouse ZooKeeper Supported Metrics ClickHouse uses ZooKeeper for storing metadata of replicas when using replicated tables. If replicated tables are not used, this section of parameters can be omitted.\n   Monitoring Panel Unit Metric Name Description Data Source     ZookeeperSession count meter_clickhouse_instance_zookeeper_sessionmeter_clickhouse_zookeeper_session Number of sessions (connections) to ZooKeeper. ClickHouse   ZookeeperWatch count meter_clickhouse_instance_zookeeper_watchmeter_clickhouse_zookeeper_watch Number of watches (event subscriptions) in ZooKeeper. ClickHouse   ZookeeperBytesSent bytes meter_clickhouse_instance_zookeeper_bytes_sentmeter_clickhouse_zookeeper_bytes_sent Number of bytes send over network while communicating with ZooKeeper. ClickHouse   ZookeeperBytesReceive bytes meter_clickhouse_instance_zookeeper_bytes_receivedmeter_clickhouse_zookeeper_bytes_received Number of bytes send over network while communicating with ZooKeeper. ClickHouse    ClickHouse Keeper Supported Metrics ClickHouse Keeper provides the coordination system for data replication and distributed DDL queries execution. ClickHouse Keeper is compatible with ZooKeeper.\nClickHouse Keeper can work in embedded mode or standalone cluster mode, the metrics below are for embedded mode.\n   Monitoring Panel Unit Metric Name Description Data Source     KeeperAliveConnections count meter_clickhouse_instance_keeper_connections_alivemeter_clickhouse_keeper_connections_alive Number of alive connections for embedded ClickHouse Keeper. ClickHouse   KeeperOutstandingRequets count meter_clickhouse_instance_keeper_outstanding_requestsmeter_clickhouse_keeper_outstanding_requests Number of outstanding requests for embedded ClickHouse Keeper. ClickHouse    Imported Dependencies libs and their licenses. No new dependency.\nCompatibility no breaking changes.\nGeneral usage docs ","excerpt":"Support ClickHouse Monitoring Motivation ClickHouse is a high-performance, column-oriented SQL …","ref":"/docs/main/next/en/swip/swip-5/","title":"Support ClickHouse Monitoring"},{"body":"Support custom enhance Here is an optional plugin apm-customize-enhance-plugin\nIntroduce SkyWalking has provided Java agent plugin development guide to help developers to build new plugin.\nThis plugin is not designed for replacement but for user convenience. The behaviour is very similar with @Trace toolkit, but without code change requirement, and more powerful, such as provide tag and log.\nHow to configure Implementing enhancements to custom classes requires two steps.\n Active the plugin, move the optional-plugins/apm-customize-enhance-plugin.jar to plugin/apm-customize-enhance-plugin.jar. Set plugin.customize.enhance_file in agent.config, which targets to rule file, such as /absolute/path/to/customize_enhance.xml. Set enhancement rules in customize_enhance.xml. \u0026lt;?xml version=\u0026#34;1.0\u0026#34; encoding=\u0026#34;UTF-8\u0026#34;?\u0026gt; \u0026lt;enhanced\u0026gt; \u0026lt;class class_name=\u0026#34;test.apache.skywalking.testcase.customize.service.TestService1\u0026#34;\u0026gt; \u0026lt;method method=\u0026#34;staticMethod()\u0026#34; operation_name=\u0026#34;/is_static_method\u0026#34; static=\u0026#34;true\u0026#34;/\u0026gt; \u0026lt;method method=\u0026#34;staticMethod(java.lang.String,int.class,java.util.Map,java.util.List,[Ljava.lang.Object;)\u0026#34; operation_name=\u0026#34;/is_static_method_args\u0026#34; static=\u0026#34;true\u0026#34;\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0]\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[1]\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[3].[0]\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;tag key=\u0026#34;tag_1\u0026#34;\u0026gt;arg[2].[\u0026#39;k1\u0026#39;]\u0026lt;/tag\u0026gt; \u0026lt;tag key=\u0026#34;tag_2\u0026#34;\u0026gt;arg[4].[1]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_1\u0026#34;\u0026gt;arg[4].[2]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;method()\u0026#34; static=\u0026#34;false\u0026#34;/\u0026gt; \u0026lt;method method=\u0026#34;method(java.lang.String,int.class)\u0026#34; operation_name=\u0026#34;/method_2\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0]\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;tag key=\u0026#34;tag_1\u0026#34;\u0026gt;arg[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_1\u0026#34;\u0026gt;arg[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;method(test.apache.skywalking.testcase.customize.model.Model0,java.lang.String,int.class)\u0026#34; operation_name=\u0026#34;/method_3\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0].id\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0].model1.name\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0].model1.getId()\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;tag key=\u0026#34;tag_os\u0026#34;\u0026gt;arg[0].os.[1]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;arg[0].getM().[\u0026#39;k1\u0026#39;]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retString(java.lang.String)\u0026#34; operation_name=\u0026#34;/retString\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retModel0(test.apache.skywalking.apm.testcase.customize.model.Model0)\u0026#34; operation_name=\u0026#34;/retModel0\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj.model1.id\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj.model1.getId()\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;/class\u0026gt; \u0026lt;class class_name=\u0026#34;test.apache.skywalking.testcase.customize.service.TestService2\u0026#34;\u0026gt; \u0026lt;method method=\u0026#34;staticMethod(java.lang.String,int.class)\u0026#34; operation_name=\u0026#34;/is_2_static_method\u0026#34; static=\u0026#34;true\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_2_1\u0026#34;\u0026gt;arg[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_1_1\u0026#34;\u0026gt;arg[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;method([Ljava.lang.Object;)\u0026#34; operation_name=\u0026#34;/method_4\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_4_1\u0026#34;\u0026gt;arg[0].[0]\u0026lt;/tag\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;method(java.util.List,int.class)\u0026#34; operation_name=\u0026#34;/method_5\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_5_1\u0026#34;\u0026gt;arg[0].[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_5_1\u0026#34;\u0026gt;arg[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retArray([Ljava.lang.Object;)\u0026#34; operation_name=\u0026#34;/retArray\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj.[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj.[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retList(java.util.List)\u0026#34; operation_name=\u0026#34;/retList\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj.[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj.[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retMap(java.util.Map)\u0026#34; operation_name=\u0026#34;/retMap\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj.[\u0026#39;k1\u0026#39;]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj.[\u0026#39;k2\u0026#39;]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;/class\u0026gt; \u0026lt;/enhanced\u0026gt;    Explanation of the configuration in the file    configuration explanation     class_name The enhanced class   method The interceptor method of the class   operation_name If fill it out, will use it instead of the default operation_name.   operation_name_suffix What it means adding dynamic data after the operation_name.   static Is this method static.   tag Will add a tag in local span. The value of key needs to be represented on the XML node.   log Will add a log in local span. The value of key needs to be represented on the XML node.   arg[x] What it means is to get the input arguments. such as arg[0] is means get first arguments.   .[x] When the parsing object is Array or List, you can use it to get the object at the specified index.   .[\u0026lsquo;key\u0026rsquo;] When the parsing object is Map, you can get the map \u0026lsquo;key\u0026rsquo; through it.   returnedObj What it means is to get the return value.      ","excerpt":"Support custom enhance Here is an optional plugin apm-customize-enhance-plugin\nIntroduce SkyWalking …","ref":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/customize-enhance-trace/","title":"Support custom enhance"},{"body":"Support custom enhance Here is an optional plugin apm-customize-enhance-plugin\nIntroduce SkyWalking has provided Java agent plugin development guide to help developers to build new plugin.\nThis plugin is not designed for replacement but for user convenience. The behaviour is very similar with @Trace toolkit, but without code change requirement, and more powerful, such as provide tag and log.\nHow to configure Implementing enhancements to custom classes requires two steps.\n Active the plugin, move the optional-plugins/apm-customize-enhance-plugin.jar to plugin/apm-customize-enhance-plugin.jar. Set plugin.customize.enhance_file in agent.config, which targets to rule file, such as /absolute/path/to/customize_enhance.xml. Set enhancement rules in customize_enhance.xml. \u0026lt;?xml version=\u0026#34;1.0\u0026#34; encoding=\u0026#34;UTF-8\u0026#34;?\u0026gt; \u0026lt;enhanced\u0026gt; \u0026lt;class class_name=\u0026#34;test.apache.skywalking.testcase.customize.service.TestService1\u0026#34;\u0026gt; \u0026lt;method method=\u0026#34;staticMethod()\u0026#34; operation_name=\u0026#34;/is_static_method\u0026#34; static=\u0026#34;true\u0026#34;/\u0026gt; \u0026lt;method method=\u0026#34;staticMethod(java.lang.String,int.class,java.util.Map,java.util.List,[Ljava.lang.Object;)\u0026#34; operation_name=\u0026#34;/is_static_method_args\u0026#34; static=\u0026#34;true\u0026#34;\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0]\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[1]\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[3].[0]\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;tag key=\u0026#34;tag_1\u0026#34;\u0026gt;arg[2].[\u0026#39;k1\u0026#39;]\u0026lt;/tag\u0026gt; \u0026lt;tag key=\u0026#34;tag_2\u0026#34;\u0026gt;arg[4].[1]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_1\u0026#34;\u0026gt;arg[4].[2]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;method()\u0026#34; static=\u0026#34;false\u0026#34;/\u0026gt; \u0026lt;method method=\u0026#34;method(java.lang.String,int.class)\u0026#34; operation_name=\u0026#34;/method_2\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0]\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;tag key=\u0026#34;tag_1\u0026#34;\u0026gt;arg[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_1\u0026#34;\u0026gt;arg[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;method(test.apache.skywalking.testcase.customize.model.Model0,java.lang.String,int.class)\u0026#34; operation_name=\u0026#34;/method_3\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0].id\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0].model1.name\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0].model1.getId()\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;tag key=\u0026#34;tag_os\u0026#34;\u0026gt;arg[0].os.[1]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;arg[0].getM().[\u0026#39;k1\u0026#39;]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retString(java.lang.String)\u0026#34; operation_name=\u0026#34;/retString\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retModel0(test.apache.skywalking.apm.testcase.customize.model.Model0)\u0026#34; operation_name=\u0026#34;/retModel0\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj.model1.id\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj.model1.getId()\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;/class\u0026gt; \u0026lt;class class_name=\u0026#34;test.apache.skywalking.testcase.customize.service.TestService2\u0026#34;\u0026gt; \u0026lt;method method=\u0026#34;staticMethod(java.lang.String,int.class)\u0026#34; operation_name=\u0026#34;/is_2_static_method\u0026#34; static=\u0026#34;true\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_2_1\u0026#34;\u0026gt;arg[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_1_1\u0026#34;\u0026gt;arg[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;method([Ljava.lang.Object;)\u0026#34; operation_name=\u0026#34;/method_4\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_4_1\u0026#34;\u0026gt;arg[0].[0]\u0026lt;/tag\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;method(java.util.List,int.class)\u0026#34; operation_name=\u0026#34;/method_5\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_5_1\u0026#34;\u0026gt;arg[0].[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_5_1\u0026#34;\u0026gt;arg[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retArray([Ljava.lang.Object;)\u0026#34; operation_name=\u0026#34;/retArray\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj.[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj.[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retList(java.util.List)\u0026#34; operation_name=\u0026#34;/retList\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj.[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj.[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retMap(java.util.Map)\u0026#34; operation_name=\u0026#34;/retMap\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj.[\u0026#39;k1\u0026#39;]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj.[\u0026#39;k2\u0026#39;]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;/class\u0026gt; \u0026lt;/enhanced\u0026gt;    Explanation of the configuration in the file    configuration explanation     class_name The enhanced class   method The interceptor method of the class   operation_name If fill it out, will use it instead of the default operation_name.   operation_name_suffix What it means adding dynamic data after the operation_name.   static Is this method static.   tag Will add a tag in local span. The value of key needs to be represented on the XML node.   log Will add a log in local span. The value of key needs to be represented on the XML node.   arg[x] What it means is to get the input arguments. such as arg[0] is means get first arguments.   .[x] When the parsing object is Array or List, you can use it to get the object at the specified index.   .[\u0026lsquo;key\u0026rsquo;] When the parsing object is Map, you can get the map \u0026lsquo;key\u0026rsquo; through it.   returnedObj What it means is to get the return value.      ","excerpt":"Support custom enhance Here is an optional plugin apm-customize-enhance-plugin\nIntroduce SkyWalking …","ref":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/customize-enhance-trace/","title":"Support custom enhance"},{"body":"Support custom enhance Here is an optional plugin apm-customize-enhance-plugin\nIntroduce SkyWalking has provided Java agent plugin development guide to help developers to build new plugin.\nThis plugin is not designed for replacement but for user convenience. The behaviour is very similar with @Trace toolkit, but without code change requirement, and more powerful, such as provide tag and log.\nHow to configure Implementing enhancements to custom classes requires two steps.\n Active the plugin, move the optional-plugins/apm-customize-enhance-plugin.jar to plugin/apm-customize-enhance-plugin.jar. Set plugin.customize.enhance_file in agent.config, which targets to rule file, such as /absolute/path/to/customize_enhance.xml. Set enhancement rules in customize_enhance.xml. \u0026lt;?xml version=\u0026#34;1.0\u0026#34; encoding=\u0026#34;UTF-8\u0026#34;?\u0026gt; \u0026lt;enhanced\u0026gt; \u0026lt;class class_name=\u0026#34;test.apache.skywalking.testcase.customize.service.TestService1\u0026#34;\u0026gt; \u0026lt;method method=\u0026#34;staticMethod()\u0026#34; operation_name=\u0026#34;/is_static_method\u0026#34; static=\u0026#34;true\u0026#34;/\u0026gt; \u0026lt;method method=\u0026#34;staticMethod(java.lang.String,int.class,java.util.Map,java.util.List,[Ljava.lang.Object;)\u0026#34; operation_name=\u0026#34;/is_static_method_args\u0026#34; static=\u0026#34;true\u0026#34;\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0]\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[1]\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[3].[0]\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;tag key=\u0026#34;tag_1\u0026#34;\u0026gt;arg[2].[\u0026#39;k1\u0026#39;]\u0026lt;/tag\u0026gt; \u0026lt;tag key=\u0026#34;tag_2\u0026#34;\u0026gt;arg[4].[1]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_1\u0026#34;\u0026gt;arg[4].[2]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;method()\u0026#34; static=\u0026#34;false\u0026#34;/\u0026gt; \u0026lt;method method=\u0026#34;method(java.lang.String,int.class)\u0026#34; operation_name=\u0026#34;/method_2\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0]\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;tag key=\u0026#34;tag_1\u0026#34;\u0026gt;arg[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_1\u0026#34;\u0026gt;arg[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;method(test.apache.skywalking.testcase.customize.model.Model0,java.lang.String,int.class)\u0026#34; operation_name=\u0026#34;/method_3\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0].id\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0].model1.name\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0].model1.getId()\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;tag key=\u0026#34;tag_os\u0026#34;\u0026gt;arg[0].os.[1]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;arg[0].getM().[\u0026#39;k1\u0026#39;]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retString(java.lang.String)\u0026#34; operation_name=\u0026#34;/retString\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retModel0(test.apache.skywalking.apm.testcase.customize.model.Model0)\u0026#34; operation_name=\u0026#34;/retModel0\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj.model1.id\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj.model1.getId()\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;/class\u0026gt; \u0026lt;class class_name=\u0026#34;test.apache.skywalking.testcase.customize.service.TestService2\u0026#34;\u0026gt; \u0026lt;method method=\u0026#34;staticMethod(java.lang.String,int.class)\u0026#34; operation_name=\u0026#34;/is_2_static_method\u0026#34; static=\u0026#34;true\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_2_1\u0026#34;\u0026gt;arg[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_1_1\u0026#34;\u0026gt;arg[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;method([Ljava.lang.Object;)\u0026#34; operation_name=\u0026#34;/method_4\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_4_1\u0026#34;\u0026gt;arg[0].[0]\u0026lt;/tag\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;method(java.util.List,int.class)\u0026#34; operation_name=\u0026#34;/method_5\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_5_1\u0026#34;\u0026gt;arg[0].[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_5_1\u0026#34;\u0026gt;arg[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retArray([Ljava.lang.Object;)\u0026#34; operation_name=\u0026#34;/retArray\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj.[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj.[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retList(java.util.List)\u0026#34; operation_name=\u0026#34;/retList\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj.[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj.[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retMap(java.util.Map)\u0026#34; operation_name=\u0026#34;/retMap\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj.[\u0026#39;k1\u0026#39;]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj.[\u0026#39;k2\u0026#39;]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;/class\u0026gt; \u0026lt;/enhanced\u0026gt;    Explanation of the configuration in the file    configuration explanation     class_name The enhanced class   method The interceptor method of the class   operation_name If fill it out, will use it instead of the default operation_name.   operation_name_suffix What it means adding dynamic data after the operation_name.   static Is this method static.   tag Will add a tag in local span. The value of key needs to be represented on the XML node.   log Will add a log in local span. The value of key needs to be represented on the XML node.   arg[x] What it means is to get the input arguments. such as arg[0] is means get first arguments.   .[x] When the parsing object is Array or List, you can use it to get the object at the specified index.   .[\u0026lsquo;key\u0026rsquo;] When the parsing object is Map, you can get the map \u0026lsquo;key\u0026rsquo; through it.   returnedObj What it means is to get the return value.      ","excerpt":"Support custom enhance Here is an optional plugin apm-customize-enhance-plugin\nIntroduce SkyWalking …","ref":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/customize-enhance-trace/","title":"Support custom enhance"},{"body":"Support custom enhance Here is an optional plugin apm-customize-enhance-plugin\nIntroduce SkyWalking has provided Java agent plugin development guide to help developers to build new plugin.\nThis plugin is not designed for replacement but for user convenience. The behaviour is very similar with @Trace toolkit, but without code change requirement, and more powerful, such as provide tag and log.\nHow to configure Implementing enhancements to custom classes requires two steps.\n Active the plugin, move the optional-plugins/apm-customize-enhance-plugin.jar to plugin/apm-customize-enhance-plugin.jar. Set plugin.customize.enhance_file in agent.config, which targets to rule file, such as /absolute/path/to/customize_enhance.xml. Set enhancement rules in customize_enhance.xml. \u0026lt;?xml version=\u0026#34;1.0\u0026#34; encoding=\u0026#34;UTF-8\u0026#34;?\u0026gt; \u0026lt;enhanced\u0026gt; \u0026lt;class class_name=\u0026#34;test.apache.skywalking.testcase.customize.service.TestService1\u0026#34;\u0026gt; \u0026lt;method method=\u0026#34;staticMethod()\u0026#34; operation_name=\u0026#34;/is_static_method\u0026#34; static=\u0026#34;true\u0026#34;/\u0026gt; \u0026lt;method method=\u0026#34;staticMethod(java.lang.String,int.class,java.util.Map,java.util.List,[Ljava.lang.Object;)\u0026#34; operation_name=\u0026#34;/is_static_method_args\u0026#34; static=\u0026#34;true\u0026#34;\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0]\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[1]\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[3].[0]\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;tag key=\u0026#34;tag_1\u0026#34;\u0026gt;arg[2].[\u0026#39;k1\u0026#39;]\u0026lt;/tag\u0026gt; \u0026lt;tag key=\u0026#34;tag_2\u0026#34;\u0026gt;arg[4].[1]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_1\u0026#34;\u0026gt;arg[4].[2]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;method()\u0026#34; static=\u0026#34;false\u0026#34;/\u0026gt; \u0026lt;method method=\u0026#34;method(java.lang.String,int.class)\u0026#34; operation_name=\u0026#34;/method_2\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0]\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;tag key=\u0026#34;tag_1\u0026#34;\u0026gt;arg[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_1\u0026#34;\u0026gt;arg[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;method(test.apache.skywalking.testcase.customize.model.Model0,java.lang.String,int.class)\u0026#34; operation_name=\u0026#34;/method_3\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0].id\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0].model1.name\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0].model1.getId()\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;tag key=\u0026#34;tag_os\u0026#34;\u0026gt;arg[0].os.[1]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;arg[0].getM().[\u0026#39;k1\u0026#39;]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retString(java.lang.String)\u0026#34; operation_name=\u0026#34;/retString\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retModel0(test.apache.skywalking.apm.testcase.customize.model.Model0)\u0026#34; operation_name=\u0026#34;/retModel0\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj.model1.id\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj.model1.getId()\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;/class\u0026gt; \u0026lt;class class_name=\u0026#34;test.apache.skywalking.testcase.customize.service.TestService2\u0026#34;\u0026gt; \u0026lt;method method=\u0026#34;staticMethod(java.lang.String,int.class)\u0026#34; operation_name=\u0026#34;/is_2_static_method\u0026#34; static=\u0026#34;true\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_2_1\u0026#34;\u0026gt;arg[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_1_1\u0026#34;\u0026gt;arg[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;method([Ljava.lang.Object;)\u0026#34; operation_name=\u0026#34;/method_4\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_4_1\u0026#34;\u0026gt;arg[0].[0]\u0026lt;/tag\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;method(java.util.List,int.class)\u0026#34; operation_name=\u0026#34;/method_5\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_5_1\u0026#34;\u0026gt;arg[0].[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_5_1\u0026#34;\u0026gt;arg[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retArray([Ljava.lang.Object;)\u0026#34; operation_name=\u0026#34;/retArray\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj.[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj.[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retList(java.util.List)\u0026#34; operation_name=\u0026#34;/retList\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj.[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj.[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retMap(java.util.Map)\u0026#34; operation_name=\u0026#34;/retMap\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj.[\u0026#39;k1\u0026#39;]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj.[\u0026#39;k2\u0026#39;]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;/class\u0026gt; \u0026lt;/enhanced\u0026gt;    Explanation of the configuration in the file    configuration explanation     class_name The enhanced class   method The interceptor method of the class   operation_name If fill it out, will use it instead of the default operation_name.   operation_name_suffix What it means adding dynamic data after the operation_name.   static Is this method static.   tag Will add a tag in local span. The value of key needs to be represented on the XML node.   log Will add a log in local span. The value of key needs to be represented on the XML node.   arg[x] What it means is to get the input arguments. such as arg[0] is means get first arguments.   .[x] When the parsing object is Array or List, you can use it to get the object at the specified index.   .[\u0026lsquo;key\u0026rsquo;] When the parsing object is Map, you can get the map \u0026lsquo;key\u0026rsquo; through it.   returnedObj What it means is to get the return value.      ","excerpt":"Support custom enhance Here is an optional plugin apm-customize-enhance-plugin\nIntroduce SkyWalking …","ref":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/customize-enhance-trace/","title":"Support custom enhance"},{"body":"Support custom enhance Here is an optional plugin apm-customize-enhance-plugin\nIntroduce SkyWalking has provided Java agent plugin development guide to help developers to build new plugin.\nThis plugin is not designed for replacement but for user convenience. The behaviour is very similar with @Trace toolkit, but without code change requirement, and more powerful, such as provide tag and log.\nHow to configure Implementing enhancements to custom classes requires two steps.\n Active the plugin, move the optional-plugins/apm-customize-enhance-plugin.jar to plugin/apm-customize-enhance-plugin.jar. Set plugin.customize.enhance_file in agent.config, which targets to rule file, such as /absolute/path/to/customize_enhance.xml. Set enhancement rules in customize_enhance.xml. \u0026lt;?xml version=\u0026#34;1.0\u0026#34; encoding=\u0026#34;UTF-8\u0026#34;?\u0026gt; \u0026lt;enhanced\u0026gt; \u0026lt;class class_name=\u0026#34;test.apache.skywalking.testcase.customize.service.TestService1\u0026#34;\u0026gt; \u0026lt;method method=\u0026#34;staticMethod()\u0026#34; operation_name=\u0026#34;/is_static_method\u0026#34; static=\u0026#34;true\u0026#34;/\u0026gt; \u0026lt;method method=\u0026#34;staticMethod(java.lang.String,int.class,java.util.Map,java.util.List,[Ljava.lang.Object;)\u0026#34; operation_name=\u0026#34;/is_static_method_args\u0026#34; static=\u0026#34;true\u0026#34;\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0]\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[1]\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[3].[0]\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;tag key=\u0026#34;tag_1\u0026#34;\u0026gt;arg[2].[\u0026#39;k1\u0026#39;]\u0026lt;/tag\u0026gt; \u0026lt;tag key=\u0026#34;tag_2\u0026#34;\u0026gt;arg[4].[1]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_1\u0026#34;\u0026gt;arg[4].[2]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;method()\u0026#34; static=\u0026#34;false\u0026#34;/\u0026gt; \u0026lt;method method=\u0026#34;method(java.lang.String,int.class)\u0026#34; operation_name=\u0026#34;/method_2\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0]\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;tag key=\u0026#34;tag_1\u0026#34;\u0026gt;arg[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_1\u0026#34;\u0026gt;arg[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;method(test.apache.skywalking.testcase.customize.model.Model0,java.lang.String,int.class)\u0026#34; operation_name=\u0026#34;/method_3\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0].id\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0].model1.name\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0].model1.getId()\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;tag key=\u0026#34;tag_os\u0026#34;\u0026gt;arg[0].os.[1]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;arg[0].getM().[\u0026#39;k1\u0026#39;]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retString(java.lang.String)\u0026#34; operation_name=\u0026#34;/retString\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retModel0(test.apache.skywalking.apm.testcase.customize.model.Model0)\u0026#34; operation_name=\u0026#34;/retModel0\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj.model1.id\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj.model1.getId()\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;/class\u0026gt; \u0026lt;class class_name=\u0026#34;test.apache.skywalking.testcase.customize.service.TestService2\u0026#34;\u0026gt; \u0026lt;method method=\u0026#34;staticMethod(java.lang.String,int.class)\u0026#34; operation_name=\u0026#34;/is_2_static_method\u0026#34; static=\u0026#34;true\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_2_1\u0026#34;\u0026gt;arg[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_1_1\u0026#34;\u0026gt;arg[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;method([Ljava.lang.Object;)\u0026#34; operation_name=\u0026#34;/method_4\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_4_1\u0026#34;\u0026gt;arg[0].[0]\u0026lt;/tag\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;method(java.util.List,int.class)\u0026#34; operation_name=\u0026#34;/method_5\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_5_1\u0026#34;\u0026gt;arg[0].[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_5_1\u0026#34;\u0026gt;arg[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retArray([Ljava.lang.Object;)\u0026#34; operation_name=\u0026#34;/retArray\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj.[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj.[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retList(java.util.List)\u0026#34; operation_name=\u0026#34;/retList\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj.[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj.[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retMap(java.util.Map)\u0026#34; operation_name=\u0026#34;/retMap\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj.[\u0026#39;k1\u0026#39;]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj.[\u0026#39;k2\u0026#39;]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;/class\u0026gt; \u0026lt;/enhanced\u0026gt;    Explanation of the configuration in the file    configuration explanation     class_name The enhanced class   method The interceptor method of the class   operation_name If fill it out, will use it instead of the default operation_name.   operation_name_suffix What it means adding dynamic data after the operation_name.   static Is this method static.   tag Will add a tag in local span. The value of key needs to be represented on the XML node.   log Will add a log in local span. The value of key needs to be represented on the XML node.   arg[x] What it means is to get the input arguments. such as arg[0] is means get first arguments.   .[x] When the parsing object is Array or List, you can use it to get the object at the specified index.   .[\u0026lsquo;key\u0026rsquo;] When the parsing object is Map, you can get the map \u0026lsquo;key\u0026rsquo; through it.   returnedObj What it means is to get the return value.      ","excerpt":"Support custom enhance Here is an optional plugin apm-customize-enhance-plugin\nIntroduce SkyWalking …","ref":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/customize-enhance-trace/","title":"Support custom enhance"},{"body":"Support custom trace ignore Here is an optional plugin apm-trace-ignore-plugin\nNotice: Sampling still works when the trace ignores plug-in activation.\nIntroduce  The purpose of this plugin is to filter endpoint which are expected to be ignored by the tracing system. You can setup multiple URL path patterns, The endpoints match these patterns wouldn\u0026rsquo;t be traced. The current matching rules follow Ant Path match style , like /path/*, /path/**, /path/?. Copy apm-trace-ignore-plugin-x.jar to agent/plugins, restarting the agent can effect the plugin.  How to configure There are two ways to configure ignore patterns. Settings through system env has higher priority.\n Set through the system environment variable,you need to add skywalking.trace.ignore_path to the system variables, the value is the path that you need to ignore, multiple paths should be separated by , Create file named as apm-trace-ignore-plugin.config in /agent/config/ dir, and add rules to filter traces  trace.ignore_path=/your/path/1/**,/your/path/2/** ","excerpt":"Support custom trace ignore Here is an optional plugin apm-trace-ignore-plugin\nNotice: Sampling …","ref":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/agent-optional-plugins/trace-ignore-plugin/","title":"Support custom trace ignore"},{"body":"Support custom trace ignore Here is an optional plugin apm-trace-ignore-plugin\nNotice: Sampling still works when the trace ignores plug-in activation.\nIntroduce  The purpose of this plugin is to filter endpoint which are expected to be ignored by the tracing system. You can setup multiple URL path patterns, The endpoints match these patterns wouldn\u0026rsquo;t be traced. The current matching rules follow Ant Path match style , like /path/*, /path/**, /path/?. Copy apm-trace-ignore-plugin-x.jar to agent/plugins, restarting the agent can effect the plugin.  How to configure There are two ways to configure ignore patterns. Settings through system env has higher priority.\n Set through the system environment variable,you need to add skywalking.trace.ignore_path to the system variables, the value is the path that you need to ignore, multiple paths should be separated by , Create file named as apm-trace-ignore-plugin.config in /agent/config/ dir, and add rules to filter traces  trace.ignore_path=/your/path/1/**,/your/path/2/** ","excerpt":"Support custom trace ignore Here is an optional plugin apm-trace-ignore-plugin\nNotice: Sampling …","ref":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/agent-optional-plugins/trace-ignore-plugin/","title":"Support custom trace ignore"},{"body":"Support custom trace ignore Here is an optional plugin apm-trace-ignore-plugin\nNotice: Sampling still works when the trace ignores plug-in activation.\nIntroduce  The purpose of this plugin is to filter endpoint which are expected to be ignored by the tracing system. You can setup multiple URL path patterns, The endpoints match these patterns wouldn\u0026rsquo;t be traced. The current matching rules follow Ant Path match style , like /path/*, /path/**, /path/?. Copy apm-trace-ignore-plugin-x.jar to agent/plugins, restarting the agent can effect the plugin.  How to configure There are two ways to configure ignore patterns. Settings through system env has higher priority.\n Set through the system environment variable,you need to add skywalking.trace.ignore_path to the system variables, the value is the path that you need to ignore, multiple paths should be separated by , Create file named as apm-trace-ignore-plugin.config in /agent/config/ dir, and add rules to filter traces  trace.ignore_path=/your/path/1/**,/your/path/2/** ","excerpt":"Support custom trace ignore Here is an optional plugin apm-trace-ignore-plugin\nNotice: Sampling …","ref":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/agent-optional-plugins/trace-ignore-plugin/","title":"Support custom trace ignore"},{"body":"Support custom trace ignore Here is an optional plugin apm-trace-ignore-plugin\nNotice: Sampling still works when the trace ignores plug-in activation.\nIntroduce  The purpose of this plugin is to filter endpoint which are expected to be ignored by the tracing system. You can setup multiple URL path patterns, The endpoints match these patterns wouldn\u0026rsquo;t be traced. The current matching rules follow Ant Path match style , like /path/*, /path/**, /path/?. Copy apm-trace-ignore-plugin-x.jar to agent/plugins, restarting the agent can effect the plugin.  How to configure There are two ways to configure ignore patterns. Settings through system env has higher priority.\n Set through the system environment variable,you need to add skywalking.trace.ignore_path to the system variables, the value is the path that you need to ignore, multiple paths should be separated by , Create file named as apm-trace-ignore-plugin.config in /agent/config/ dir, and add rules to filter traces  trace.ignore_path=/your/path/1/**,/your/path/2/** ","excerpt":"Support custom trace ignore Here is an optional plugin apm-trace-ignore-plugin\nNotice: Sampling …","ref":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/agent-optional-plugins/trace-ignore-plugin/","title":"Support custom trace ignore"},{"body":"Support custom trace ignore Here is an optional plugin apm-trace-ignore-plugin\nNotice: Sampling still works when the trace ignores plug-in activation.\nIntroduce  The purpose of this plugin is to filter endpoint which are expected to be ignored by the tracing system. You can setup multiple URL path patterns, The endpoints match these patterns wouldn\u0026rsquo;t be traced. The current matching rules follow Ant Path match style , like /path/*, /path/**, /path/?. Copy apm-trace-ignore-plugin-x.jar to agent/plugins, restarting the agent can effect the plugin.  How to configure There are two ways to configure ignore patterns. Settings through system env has higher priority.\n Set through the system environment variable,you need to add skywalking.trace.ignore_path to the system variables, the value is the path that you need to ignore, multiple paths should be separated by , Create file named as apm-trace-ignore-plugin.config in /agent/config/ dir, and add rules to filter traces  trace.ignore_path=/your/path/1/**,/your/path/2/** ","excerpt":"Support custom trace ignore Here is an optional plugin apm-trace-ignore-plugin\nNotice: Sampling …","ref":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/agent-optional-plugins/trace-ignore-plugin/","title":"Support custom trace ignore"},{"body":"Support RocketMQ Monitoring Motivation RocketMQ is a cloud native messaging and streaming platform, making it simple to build event-driven applications. Now that Skywalking can monitor OpenTelemetry metrics, I want to add RocketMQ monitoring via the OpenTelemetry Collector, which fetches metrics from the RocketMQ Exporter\nArchitecture Graph There is no significant architecture-level change.\nProposed Changes rocketmq-exporter collects metrics from RocketMQ and transport the data to OpenTelemetry collector, using SkyWalking openTelemetry receiver to receive these metrics。 Provide cluster, broker, and topic dimensions monitoring.\nRocketMQ Cluster Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Messages Produced Today Count meter_rocketmq_cluster_messages_produced_today The number of cluster messages produced today. RocketMQ Exporter   Messages Consumed Today Count meter_rocketmq_cluster_messages_consumed_today The number of cluster messages consumed today. RocketMQ Exporter   Total Producer Tps Msg/sec meter_rocketmq_cluster_total_producer_tps The number of messages produced per second. RocketMQ Exporter   Total Consume Tps Msg/sec meter_rocketmq_cluster_total_consumer_tps The number of messages consumed per second. RocketMQ Exporter   Producer Message Size Bytes/sec meter_rocketmq_cluster_producer_message_size The max size of a message produced per second. RocketMQ Exporter   Consumer Message Size Bytes/sec meter_rocketmq_cluster_consumer_message_size The max size of the consumed message per second. RocketMQ Exporter   Messages Produced Until Yesterday Count meter_rocketmq_cluster_messages_produced_until_yesterday The total number of messages put until 12 o\u0026rsquo;clock last night. RocketMQ Exporter   Messages Consumed Until Yesterday Count meter_rocketmq_cluster_messages_consumed_until_yesterday The total number of messages read until 12 o\u0026rsquo;clock last night. RocketMQ Exporter   Max Consumer Latency ms meter_rocketmq_cluster_max_consumer_latency The max number of consumer latency. RocketMQ Exporter   Max CommitLog Disk Ratio % meter_rocketmq_cluster_max_commitLog_disk_ratio The max utilization ratio of the commit log disk. RocketMQ Exporter   CommitLog Disk Ratio % meter_rocketmq_cluster_commitLog_disk_ratio The utilization ratio of the commit log disk per broker IP. RocketMQ Exporter   Pull ThreadPool Queue Head Wait Time ms meter_rocketmq_cluster_pull_threadPool_queue_head_wait_time The wait time in milliseconds for pulling threadPool queue per broker IP. RocketMQ Exporter   Send ThreadPool Queue Head Wait Time ms meter_rocketmq_cluster_send_threadPool_queue_head_wait_time The wait time in milliseconds for sending threadPool queue per broker IP. RocketMQ Exporter   Topic Count Count meter_rocketmq_cluster_topic_count The number of topics that received messages from the producer. RocketMQ Exporter   Broker Count Count meter_rocketmq_cluster_broker_count The number of brokers that received messages from the producer. RocketMQ Exporter    RocketMQ Broker Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Produce TPS Msg/sec meter_rocketmq_broker_produce_tps The number of broker produces messages per second. RocketMQ Exporter   Consume QPS Msg/sec meter_rocketmq_broker_consume_qps The number of broker consumes messages per second. RocketMQ Exporter   Producer Message Size Bytes/sec meter_rocketmq_broker_producer_message_size The max size of the messages produced per second. RocketMQ Exporter   Consumer Message Size Bytes/sec meter_rocketmq_broker_consumer_message_size The max size of the messages consumed per second. RocketMQ Exporter    RocketMQ Topic Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Max Producer Message Size Byte meter_rocketmq_topic_max_producer_message_size The maximum number of messages produced. RocketMQ Exporter   Max Consumer Message Size Byte meter_rocketmq_topic_max_consumer_message_size The maximum number of messages consumed. RocketMQ Exporter   Consumer Latency ms meter_rocketmq_topic_consumer_latency Consumption delay time of a consumer group. RocketMQ Exporter   Producer Tps Msg/sec meter_rocketmq_topic_producer_tps The number of messages produced per second. RocketMQ Exporter   Consumer Group Tps Msg/sec meter_rocketmq_topic_consumer_group_tps The number of messages consumed per second per consumer group. RocketMQ Exporter   Producer Offset Count meter_rocketmq_topic_producer_offset The max progress of a topic\u0026rsquo;s production message. RocketMQ Exporter   Consumer Group Offset Count meter_rocketmq_topic_consumer_group_offset The max progress of a topic\u0026rsquo;s consumption message per consumer group. RocketMQ Exporter   Producer Message Size Byte/sec meter_rocketmq_topic_producer_message_size The max size of messages produced per second. RocketMQ Exporter   Consumer Message Size Byte/sec meter_rocketmq_topic_consumer_message_size The max size of messages consumed per second. RocketMQ Exporter   Consumer Group_Count Count meter_rocketmq_topic_consumer_group_count The number of consumer groups. RocketMQ Exporter   Broker Count Count meter_rocketmq_topic_broker_count The number of topics that received messages from the producer. RocketMQ Exporter    Imported Dependencies libs and their licenses. No new dependency.\nCompatibility no breaking changes.\nGeneral usage docs This feature is out of the box.\n","excerpt":"Support RocketMQ Monitoring Motivation RocketMQ is a cloud native messaging and streaming platform, …","ref":"/docs/main/next/en/swip/swip-3/","title":"Support RocketMQ Monitoring"},{"body":"Support Transport Layer Security (TLS) Transport Layer Security (TLS) is a very common security way when transport data through Internet. In some use cases, end users report the background:\nCreating SSL/TLS Certificates The first step is to generate certificates and key files for encrypting communication. This is fairly straightforward: use openssl from the command line.\nUse this script if you are not familiar with how to generate key files.\nWe need the following files:\n client.pem: A private RSA key to sign and authenticate the public key. It\u0026rsquo;s either a PKCS#8(PEM) or PKCS#1(DER). client.crt: Self-signed X.509 public keys for distribution. ca.crt: A certificate authority public key for a client to validate the server\u0026rsquo;s certificate.  Authentication Mode  Find ca.crt, and use it at client side. In mTLS mode, client.crt and client.pem are required at client side. Find server.crt, server.pem and ca.crt. Use them at server side. Please refer to gRPC Security of the OAP server doc for more details.  Enable TLS  Enable (m)TLS on the OAP server side, read more on this documentation. Following the configuration to enable (m)TLS on the agent side.     Name Environment Variable Required Type Description     reporter.grpc.tls.enable SW_AGENT_REPORTER_GRPC_TLS_ENABLE TLS/mTLS Enable (m)TLS on the gRPC reporter.   reporter.grpc.tls.ca_path SW_AGENT_REPORTER_GRPC_TLS_CA_PATH TLS The path of the CA certificate file. eg: /path/to/ca.cert.   reporter.grpc.tls.client.key_path SW_AGENT_REPORTER_GRPC_TLS_CLIENT_KEY_PATH mTLS The path of the client private key file, eg: /path/to/client.pem.   reporter.grpc.tls.client.client_cert_chain_path SW_AGENT_REPORTER_GRPC_TLS_CLIENT_CERT_CHAIN_PATH mTLS The path of the client certificate file, eg: /path/to/client.crt.   reporter.grpc.tls.insecure_skip_verify SW_AGENT_REPORTER_GRPC_TLS_INSECURE_SKIP_VERIFY TLS/mTLS Skip the server certificate and domain name verification.    ","excerpt":"Support Transport Layer Security (TLS) Transport Layer Security (TLS) is a very common security way …","ref":"/docs/skywalking-go/latest/en/advanced-features/grpc-tls/","title":"Support Transport Layer Security (TLS)"},{"body":"Support Transport Layer Security (TLS) Transport Layer Security (TLS) is a very common security way when transport data through Internet. In some use cases, end users report the background:\nCreating SSL/TLS Certificates The first step is to generate certificates and key files for encrypting communication. This is fairly straightforward: use openssl from the command line.\nUse this script if you are not familiar with how to generate key files.\nWe need the following files:\n client.pem: A private RSA key to sign and authenticate the public key. It\u0026rsquo;s either a PKCS#8(PEM) or PKCS#1(DER). client.crt: Self-signed X.509 public keys for distribution. ca.crt: A certificate authority public key for a client to validate the server\u0026rsquo;s certificate.  Authentication Mode  Find ca.crt, and use it at client side. In mTLS mode, client.crt and client.pem are required at client side. Find server.crt, server.pem and ca.crt. Use them at server side. Please refer to gRPC Security of the OAP server doc for more details.  Enable TLS  Enable (m)TLS on the OAP server side, read more on this documentation. Following the configuration to enable (m)TLS on the agent side.     Name Environment Variable Required Type Description     reporter.grpc.tls.enable SW_AGENT_REPORTER_GRPC_TLS_ENABLE TLS/mTLS Enable (m)TLS on the gRPC reporter.   reporter.grpc.tls.ca_path SW_AGENT_REPORTER_GRPC_TLS_CA_PATH TLS The path of the CA certificate file. eg: /path/to/ca.cert.   reporter.grpc.tls.client.key_path SW_AGENT_REPORTER_GRPC_TLS_CLIENT_KEY_PATH mTLS The path of the client private key file, eg: /path/to/client.pem.   reporter.grpc.tls.client.client_cert_chain_path SW_AGENT_REPORTER_GRPC_TLS_CLIENT_CERT_CHAIN_PATH mTLS The path of the client certificate file, eg: /path/to/client.crt.   reporter.grpc.tls.insecure_skip_verify SW_AGENT_REPORTER_GRPC_TLS_INSECURE_SKIP_VERIFY TLS/mTLS Skip the server certificate and domain name verification.    ","excerpt":"Support Transport Layer Security (TLS) Transport Layer Security (TLS) is a very common security way …","ref":"/docs/skywalking-go/next/en/advanced-features/grpc-tls/","title":"Support Transport Layer Security (TLS)"},{"body":"Support Transport Layer Security (TLS) Transport Layer Security (TLS) is a very common security way when transport data through Internet. In some use cases, end users report the background:\nCreating SSL/TLS Certificates The first step is to generate certificates and key files for encrypting communication. This is fairly straightforward: use openssl from the command line.\nUse this script if you are not familiar with how to generate key files.\nWe need the following files:\n client.pem: A private RSA key to sign and authenticate the public key. It\u0026rsquo;s either a PKCS#8(PEM) or PKCS#1(DER). client.crt: Self-signed X.509 public keys for distribution. ca.crt: A certificate authority public key for a client to validate the server\u0026rsquo;s certificate.  Authentication Mode  Find ca.crt, and use it at client side. In mTLS mode, client.crt and client.pem are required at client side. Find server.crt, server.pem and ca.crt. Use them at server side. Please refer to gRPC Security of the OAP server doc for more details.  Enable TLS  Enable (m)TLS on the OAP server side, read more on this documentation. Following the configuration to enable (m)TLS on the agent side.     Name Environment Variable Required Type Description     reporter.grpc.tls.enable SW_AGENT_REPORTER_GRPC_TLS_ENABLE TLS/mTLS Enable (m)TLS on the gRPC reporter.   reporter.grpc.tls.ca_path SW_AGENT_REPORTER_GRPC_TLS_CA_PATH TLS The path of the CA certificate file. eg: /path/to/ca.cert.   reporter.grpc.tls.client.key_path SW_AGENT_REPORTER_GRPC_TLS_CLIENT_KEY_PATH mTLS The path of the client private key file, eg: /path/to/client.pem.   reporter.grpc.tls.client.client_cert_chain_path SW_AGENT_REPORTER_GRPC_TLS_CLIENT_CERT_CHAIN_PATH mTLS The path of the client certificate file, eg: /path/to/client.crt.   reporter.grpc.tls.insecure_skip_verify SW_AGENT_REPORTER_GRPC_TLS_INSECURE_SKIP_VERIFY TLS/mTLS Skip the server certificate and domain name verification.    ","excerpt":"Support Transport Layer Security (TLS) Transport Layer Security (TLS) is a very common security way …","ref":"/docs/skywalking-go/v0.4.0/en/advanced-features/grpc-tls/","title":"Support Transport Layer Security (TLS)"},{"body":"Support Transport Layer Security (TLS) Transport Layer Security (TLS) is a very common security way when transport data through Internet. In some use cases, end users report the background:\n Target(under monitoring) applications are in a region, which also named VPC, at the same time, the SkyWalking backend is in another region (VPC).\nBecause of that, security requirement is very obvious.\n Creating SSL/TLS Certificates The first step is to generate certificates and key files for encrypting communication. This is fairly straightforward: use openssl from the command line.\nUse this script if you are not familiar with how to generate key files.\nWe need the following files:\n client.pem: A private RSA key to sign and authenticate the public key. It\u0026rsquo;s either a PKCS#8(PEM) or PKCS#1(DER). client.crt: Self-signed X.509 public keys for distribution. ca.crt: A certificate authority public key for a client to validate the server\u0026rsquo;s certificate.  Authentication Mode  Find ca.crt, and use it at client side. In mTLS mode, client.crt and client.pem are required at client side. Find server.crt, server.pem and ca.crt. Use them at server side. Please refer to gRPC Security of the OAP server doc for more details.  Open and config TLS Agent config  Agent enables TLS automatically after the ca.crt(by default /ca folder in agent package) file is detected. TLS with no CA mode could be activated by this setting.  agent.force_tls=${SW_AGENT_FORCE_TLS:true} Enable mutual TLS  Sharing gRPC server must be started with mTLS enabled. More details can be found in receiver-sharing-server section in application.yaml. Please refer to gRPC Security and gRPC/HTTP server for receiver. Copy CA certificate, certificate and private key of client into agent/ca. Configure client-side SSL/TLS in agent.conf. Change SW_AGENT_COLLECTOR_BACKEND_SERVICES targeting to host and port of receiver-sharing-server.  For example:\nagent.force_tls=${SW_AGENT_FORCE_TLS:true} agent.ssl_trusted_ca_path=${SW_AGENT_SSL_TRUSTED_CA_PATH:/ca/ca.crt} agent.ssl_key_path=${SW_AGENT_SSL_KEY_PATH:/ca/client.pem} agent.ssl_cert_chain_path=${SW_AGENT_SSL_CERT_CHAIN_PATH:/ca/client.crt} collector.backend_service=${SW_AGENT_COLLECTOR_BACKEND_SERVICES:skywalking-oap:11801} Notice, the client-side\u0026rsquo;s certificate and the private key are from the same CA certificate with server-side.\n","excerpt":"Support Transport Layer Security (TLS) Transport Layer Security (TLS) is a very common security way …","ref":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/tls/","title":"Support Transport Layer Security (TLS)"},{"body":"Support Transport Layer Security (TLS) Transport Layer Security (TLS) is a very common security way when transport data through Internet. In some use cases, end users report the background:\n Target(under monitoring) applications are in a region, which also named VPC, at the same time, the SkyWalking backend is in another region (VPC).\nBecause of that, security requirement is very obvious.\n Creating SSL/TLS Certificates The first step is to generate certificates and key files for encrypting communication. This is fairly straightforward: use openssl from the command line.\nUse this script if you are not familiar with how to generate key files.\nWe need the following files:\n client.pem: A private RSA key to sign and authenticate the public key. It\u0026rsquo;s either a PKCS#8(PEM) or PKCS#1(DER). client.crt: Self-signed X.509 public keys for distribution. ca.crt: A certificate authority public key for a client to validate the server\u0026rsquo;s certificate.  Authentication Mode  Find ca.crt, and use it at client side. In mTLS mode, client.crt and client.pem are required at client side. Find server.crt, server.pem and ca.crt. Use them at server side. Please refer to gRPC Security of the OAP server doc for more details.  Open and config TLS Agent config  Agent enables TLS automatically after the ca.crt(by default /ca folder in agent package) file is detected. TLS with no CA mode could be activated by this setting.  agent.force_tls=${SW_AGENT_FORCE_TLS:true} Enable mutual TLS  Sharing gRPC server must be started with mTLS enabled. More details can be found in receiver-sharing-server section in application.yaml. Please refer to gRPC Security and gRPC/HTTP server for receiver. Copy CA certificate, certificate and private key of client into agent/ca. Configure client-side SSL/TLS in agent.conf. Change SW_AGENT_COLLECTOR_BACKEND_SERVICES targeting to host and port of receiver-sharing-server.  For example:\nagent.force_tls=${SW_AGENT_FORCE_TLS:true} agent.ssl_trusted_ca_path=${SW_AGENT_SSL_TRUSTED_CA_PATH:/ca/ca.crt} agent.ssl_key_path=${SW_AGENT_SSL_KEY_PATH:/ca/client.pem} agent.ssl_cert_chain_path=${SW_AGENT_SSL_CERT_CHAIN_PATH:/ca/client.crt} collector.backend_service=${SW_AGENT_COLLECTOR_BACKEND_SERVICES:skywalking-oap:11801} Notice, the client-side\u0026rsquo;s certificate and the private key are from the same CA certificate with server-side.\n","excerpt":"Support Transport Layer Security (TLS) Transport Layer Security (TLS) is a very common security way …","ref":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/tls/","title":"Support Transport Layer Security (TLS)"},{"body":"Support Transport Layer Security (TLS) Transport Layer Security (TLS) is a very common security way when transport data through Internet. In some use cases, end users report the background:\n Target(under monitoring) applications are in a region, which also named VPC, at the same time, the SkyWalking backend is in another region (VPC).\nBecause of that, security requirement is very obvious.\n Creating SSL/TLS Certificates The first step is to generate certificates and key files for encrypting communication. This is fairly straightforward: use openssl from the command line.\nUse this script if you are not familiar with how to generate key files.\nWe need the following files:\n client.pem: A private RSA key to sign and authenticate the public key. It\u0026rsquo;s either a PKCS#8(PEM) or PKCS#1(DER). client.crt: Self-signed X.509 public keys for distribution. ca.crt: A certificate authority public key for a client to validate the server\u0026rsquo;s certificate.  Authentication Mode  Find ca.crt, and use it at client side. In mTLS mode, client.crt and client.pem are required at client side. Find server.crt, server.pem and ca.crt. Use them at server side. Please refer to gRPC Security of the OAP server doc for more details.  Open and config TLS Agent config  Agent enables TLS automatically after the ca.crt(by default /ca folder in agent package) file is detected. TLS with no CA mode could be activated by this setting.  agent.force_tls=${SW_AGENT_FORCE_TLS:true} Enable mutual TLS  Sharing gRPC server must be started with mTLS enabled. More details can be found in receiver-sharing-server section in application.yaml. Please refer to gRPC Security and gRPC/HTTP server for receiver. Copy CA certificate, certificate and private key of client into agent/ca. Configure client-side SSL/TLS in agent.conf. Change SW_AGENT_COLLECTOR_BACKEND_SERVICES targeting to host and port of receiver-sharing-server.  For example:\nagent.force_tls=${SW_AGENT_FORCE_TLS:true} agent.ssl_trusted_ca_path=${SW_AGENT_SSL_TRUSTED_CA_PATH:/ca/ca.crt} agent.ssl_key_path=${SW_AGENT_SSL_KEY_PATH:/ca/client.pem} agent.ssl_cert_chain_path=${SW_AGENT_SSL_CERT_CHAIN_PATH:/ca/client.crt} collector.backend_service=${SW_AGENT_COLLECTOR_BACKEND_SERVICES:skywalking-oap:11801} Notice, the client-side\u0026rsquo;s certificate and the private key are from the same CA certificate with server-side.\n","excerpt":"Support Transport Layer Security (TLS) Transport Layer Security (TLS) is a very common security way …","ref":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/tls/","title":"Support Transport Layer Security (TLS)"},{"body":"Support Transport Layer Security (TLS) Transport Layer Security (TLS) is a very common security way when transport data through Internet. In some use cases, end users report the background:\n Target(under monitoring) applications are in a region, which also named VPC, at the same time, the SkyWalking backend is in another region (VPC).\nBecause of that, security requirement is very obvious.\n Creating SSL/TLS Certificates The first step is to generate certificates and key files for encrypting communication. This is fairly straightforward: use openssl from the command line.\nUse this script if you are not familiar with how to generate key files.\nWe need the following files:\n client.pem: A private RSA key to sign and authenticate the public key. It\u0026rsquo;s either a PKCS#8(PEM) or PKCS#1(DER). client.crt: Self-signed X.509 public keys for distribution. ca.crt: A certificate authority public key for a client to validate the server\u0026rsquo;s certificate.  Authentication Mode  Find ca.crt, and use it at client side. In mTLS mode, client.crt and client.pem are required at client side. Find server.crt, server.pem and ca.crt. Use them at server side. Please refer to gRPC Security of the OAP server doc for more details.  Open and config TLS Agent config  Agent enables TLS automatically after the ca.crt(by default /ca folder in agent package) file is detected. TLS with no CA mode could be activated by this setting.  agent.force_tls=${SW_AGENT_FORCE_TLS:true} Enable mutual TLS  Sharing gRPC server must be started with mTLS enabled. More details can be found in receiver-sharing-server section in application.yaml. Please refer to gRPC Security and gRPC/HTTP server for receiver. Copy CA certificate, certificate and private key of client into agent/ca. Configure client-side SSL/TLS in agent.conf. Change SW_AGENT_COLLECTOR_BACKEND_SERVICES targeting to host and port of receiver-sharing-server.  For example:\nagent.force_tls=${SW_AGENT_FORCE_TLS:true} agent.ssl_trusted_ca_path=${SW_AGENT_SSL_TRUSTED_CA_PATH:/ca/ca.crt} agent.ssl_key_path=${SW_AGENT_SSL_KEY_PATH:/ca/client.pem} agent.ssl_cert_chain_path=${SW_AGENT_SSL_CERT_CHAIN_PATH:/ca/client.crt} collector.backend_service=${SW_AGENT_COLLECTOR_BACKEND_SERVICES:skywalking-oap:11801} Notice, the client-side\u0026rsquo;s certificate and the private key are from the same CA certificate with server-side.\n","excerpt":"Support Transport Layer Security (TLS) Transport Layer Security (TLS) is a very common security way …","ref":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/tls/","title":"Support Transport Layer Security (TLS)"},{"body":"Support Transport Layer Security (TLS) Transport Layer Security (TLS) is a very common security way when transport data through Internet. In some use cases, end users report the background:\n Target(under monitoring) applications are in a region, which also named VPC, at the same time, the SkyWalking backend is in another region (VPC).\nBecause of that, security requirement is very obvious.\n Creating SSL/TLS Certificates The first step is to generate certificates and key files for encrypting communication. This is fairly straightforward: use openssl from the command line.\nUse this script if you are not familiar with how to generate key files.\nWe need the following files:\n client.pem: A private RSA key to sign and authenticate the public key. It\u0026rsquo;s either a PKCS#8(PEM) or PKCS#1(DER). client.crt: Self-signed X.509 public keys for distribution. ca.crt: A certificate authority public key for a client to validate the server\u0026rsquo;s certificate.  Authentication Mode  Find ca.crt, and use it at client side. In mTLS mode, client.crt and client.pem are required at client side. Find server.crt, server.pem and ca.crt. Use them at server side. Please refer to gRPC Security of the OAP server doc for more details.  Open and config TLS Agent config  Agent enables TLS automatically after the ca.crt(by default /ca folder in agent package) file is detected. TLS with no CA mode could be activated by this setting.  agent.force_tls=${SW_AGENT_FORCE_TLS:true} Enable mutual TLS  Sharing gRPC server must be started with mTLS enabled. More details can be found in receiver-sharing-server section in application.yaml. Please refer to gRPC Security and gRPC/HTTP server for receiver. Copy CA certificate, certificate and private key of client into agent/ca. Configure client-side SSL/TLS in agent.conf. Change SW_AGENT_COLLECTOR_BACKEND_SERVICES targeting to host and port of receiver-sharing-server.  For example:\nagent.force_tls=${SW_AGENT_FORCE_TLS:true} agent.ssl_trusted_ca_path=${SW_AGENT_SSL_TRUSTED_CA_PATH:/ca/ca.crt} agent.ssl_key_path=${SW_AGENT_SSL_KEY_PATH:/ca/client.pem} agent.ssl_cert_chain_path=${SW_AGENT_SSL_CERT_CHAIN_PATH:/ca/client.crt} collector.backend_service=${SW_AGENT_COLLECTOR_BACKEND_SERVICES:skywalking-oap:11801} Notice, the client-side\u0026rsquo;s certificate and the private key are from the same CA certificate with server-side.\n","excerpt":"Support Transport Layer Security (TLS) Transport Layer Security (TLS) is a very common security way …","ref":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/tls/","title":"Support Transport Layer Security (TLS)"},{"body":"Supported Agent Configuration Options Below is the full list of supported configurations you can set to customize the agent behavior, please take some time to read the descriptions for what they can achieve.\n Usage: (Pass in intrusive setup)\n from skywalking import config, agent config.init(YourConfiguration=YourValue)) agent.start()  Usage: (Pass by environment variables)\n export SW_AGENT_YourConfiguration=YourValue Agent Core Configuration Options    Configuration Environment Variable Type Default Value Description     agent_collector_backend_services SW_AGENT_COLLECTOR_BACKEND_SERVICES \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; oap_host:oap_port The backend OAP server address, 11800 is default OAP gRPC port, 12800 is HTTP, Kafka ignores this option and uses kafka_bootstrap_servers option. This option should be changed accordingly with selected protocol   agent_protocol SW_AGENT_PROTOCOL \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; grpc The protocol to communicate with the backend OAP, http, grpc or kafka, we highly suggest using grpc in production as it\u0026rsquo;s well optimized than http. The kafka protocol provides an alternative way to submit data to the backend.   agent_name SW_AGENT_NAME \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; Python Service Name The name of your awesome Python service   agent_instance_name SW_AGENT_INSTANCE_NAME \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; str(uuid.uuid1()).replace('-', \u0026lsquo;') The name of this particular awesome Python service instance   agent_namespace SW_AGENT_NAMESPACE \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt;  The agent namespace of the Python service (available as tag and the suffix of service name)   kafka_bootstrap_servers SW_KAFKA_BOOTSTRAP_SERVERS \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; localhost:9092 A list of host/port pairs to use for establishing the initial connection to your Kafka cluster. It is in the form of host1:port1,host2:port2,\u0026hellip; (used for Kafka reporter protocol)   kafka_namespace SW_KAFKA_NAMESPACE \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt;  The kafka namespace specified by OAP side SW_NAMESPACE, prepends the following kafka topic names with a -.   kafka_topic_management SW_KAFKA_TOPIC_MANAGEMENT \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; skywalking-managements Specifying Kafka topic name for service instance reporting and registering, this should be in sync with OAP   kafka_topic_segment SW_KAFKA_TOPIC_SEGMENT \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; skywalking-segments Specifying Kafka topic name for Tracing data, this should be in sync with OAP   kafka_topic_log SW_KAFKA_TOPIC_LOG \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; skywalking-logs Specifying Kafka topic name for Log data, this should be in sync with OAP   kafka_topic_meter SW_KAFKA_TOPIC_METER \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; skywalking-meters Specifying Kafka topic name for Meter data, this should be in sync with OAP   kafka_reporter_custom_configurations SW_KAFKA_REPORTER_CUSTOM_CONFIGURATIONS \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt;  The configs to init KafkaProducer, supports the basic arguments (whose type is either str, bool, or int) listed here This config only works from env variables, each one should be passed in SW_KAFKA_REPORTER_CONFIG_\u0026lt;KEY_NAME\u0026gt;   agent_force_tls SW_AGENT_FORCE_TLS \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False Use TLS for communication with SkyWalking OAP (no cert required)   agent_authentication SW_AGENT_AUTHENTICATION \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt;  The authentication token to verify that the agent is trusted by the backend OAP, as for how to configure the backend, refer to the yaml.   agent_logging_level SW_AGENT_LOGGING_LEVEL \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; INFO The level of agent self-logs, could be one of CRITICAL, FATAL, ERROR, WARN(WARNING), INFO, DEBUG. Please turn on debug if an issue is encountered to find out what\u0026rsquo;s going on    Agent Core Danger Zone    Configuration Environment Variable Type Default Value Description     agent_collector_heartbeat_period SW_AGENT_COLLECTOR_HEARTBEAT_PERIOD \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 30 The agent will exchange heartbeat message with SkyWalking OAP backend every period seconds   agent_collector_properties_report_period_factor SW_AGENT_COLLECTOR_PROPERTIES_REPORT_PERIOD_FACTOR \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 10 The agent will report service instance properties every factor * heartbeat period seconds default: 10*30 = 300 seconds   agent_instance_properties_json SW_AGENT_INSTANCE_PROPERTIES_JSON \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt;  A custom JSON string to be reported as service instance properties, e.g. {\u0026quot;key\u0026quot;: \u0026quot;value\u0026quot;}   agent_experimental_fork_support SW_AGENT_EXPERIMENTAL_FORK_SUPPORT \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False The agent will restart itself in any os.fork()-ed child process. Important Note: it\u0026rsquo;s not suitable for short-lived processes as each one will create a new instance in SkyWalking dashboard in format of service_instance-child(pid). This feature may not work when a precise combination of gRPC + Python 3.7 + subprocess (not fork) is used together. The agent will output a warning log when using on Python 3.7 for such a reason.   agent_queue_timeout SW_AGENT_QUEUE_TIMEOUT \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 1 DANGEROUS - This option controls the interval of each bulk report from telemetry data queues Do not modify unless you have evaluated its impact given your service load.    SW_PYTHON Auto Instrumentation CLI    Configuration Environment Variable Type Default Value Description     agent_sw_python_bootstrap_propagate SW_AGENT_SW_PYTHON_BOOTSTRAP_PROPAGATE \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False Special: can only be passed via environment. This config controls the child process agent bootstrap behavior in sw-python CLI, if set to False, a valid child process will not boot up a SkyWalking Agent. Please refer to the CLI Guide for details.   agent_sw_python_cli_debug_enabled SW_AGENT_SW_PYTHON_CLI_DEBUG_ENABLED \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False Special: can only be passed via environment. This config controls the CLI and agent logging debug mode, if set to True, the CLI and agent will print out debug logs. Please refer to the CLI Guide for details. Important: this config will set agent logging level to DEBUG as well, do not use it in production otherwise it will flood your logs. This normally shouldn\u0026rsquo;t be pass as a simple flag -d will be the same.    Trace Reporter Configurations    Configuration Environment Variable Type Default Value Description     agent_trace_reporter_max_buffer_size SW_AGENT_TRACE_REPORTER_MAX_BUFFER_SIZE \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 10000 The maximum queue backlog size for sending the segment data to backend, segments beyond this are silently dropped   agent_trace_ignore_path SW_AGENT_TRACE_IGNORE_PATH \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt;  You can setup multiple URL path patterns, The endpoints match these patterns wouldn\u0026rsquo;t be traced. the current matching rules follow Ant Path match style , like /path/*, /path/**, /path/?.   agent_ignore_suffix SW_AGENT_IGNORE_SUFFIX \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; .jpg,.jpeg,.js,.css,.png,.bmp,.gif,.ico,.mp3,.mp4,.html,.svg If the operation name of the first span is included in this set, this segment should be ignored.   correlation_element_max_number SW_CORRELATION_ELEMENT_MAX_NUMBER \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 3 Max element count of the correlation context.   correlation_value_max_length SW_CORRELATION_VALUE_MAX_LENGTH \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 128 Max value length of correlation context element.    Profiling Configurations    Configuration Environment Variable Type Default Value Description     agent_profile_active SW_AGENT_PROFILE_ACTIVE \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; True If True, Python agent will enable profiler when user create a new profiling task.   agent_collector_get_profile_task_interval SW_AGENT_COLLECTOR_GET_PROFILE_TASK_INTERVAL \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 20 The number of seconds between two profile task query.   agent_profile_max_parallel SW_AGENT_PROFILE_MAX_PARALLEL \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 5 The number of parallel monitor segment count.   agent_profile_duration SW_AGENT_PROFILE_DURATION \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 10 The maximum monitor segment time(minutes), if current segment monitor time out of limit, then stop it.   agent_profile_dump_max_stack_depth SW_AGENT_PROFILE_DUMP_MAX_STACK_DEPTH \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 500 The number of max dump thread stack depth   agent_profile_snapshot_transport_buffer_size SW_AGENT_PROFILE_SNAPSHOT_TRANSPORT_BUFFER_SIZE \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 50 The number of snapshot transport to backend buffer size    Log Reporter Configurations    Configuration Environment Variable Type Default Value Description     agent_log_reporter_active SW_AGENT_LOG_REPORTER_ACTIVE \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; True If True, Python agent will report collected logs to the OAP or Satellite. Otherwise, it disables the feature.   agent_log_reporter_safe_mode SW_AGENT_LOG_REPORTER_SAFE_MODE \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False If True, Python agent will filter out HTTP basic auth information from log records. By default, it disables the feature due to potential performance impact brought by regular expression   agent_log_reporter_max_buffer_size SW_AGENT_LOG_REPORTER_MAX_BUFFER_SIZE \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 10000 The maximum queue backlog size for sending log data to backend, logs beyond this are silently dropped.   agent_log_reporter_level SW_AGENT_LOG_REPORTER_LEVEL \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; WARNING This config specifies the logger levels of concern, any logs with a level below the config will be ignored.   agent_log_reporter_ignore_filter SW_AGENT_LOG_REPORTER_IGNORE_FILTER \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False This config customizes whether to ignore the application-defined logger filters, if True, all logs are reported disregarding any filter rules.   agent_log_reporter_formatted SW_AGENT_LOG_REPORTER_FORMATTED \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; True If True, the log reporter will transmit the logs as formatted. Otherwise, puts logRecord.msg and logRecord.args into message content and tags(argument.n), respectively. Along with an exception tag if an exception was raised. Only applies to logging module.   agent_log_reporter_layout SW_AGENT_LOG_REPORTER_LAYOUT \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; %(asctime)s [%(threadName)s] %(levelname)s %(name)s - %(message)s The log reporter formats the logRecord message based on the layout given. Only applies to logging module.   agent_cause_exception_depth SW_AGENT_CAUSE_EXCEPTION_DEPTH \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 10 This configuration is shared by log reporter and tracer. This config limits agent to report up to limit stacktrace, please refer to [Python traceback](../ https://docs.python.org/3/library/traceback.html#traceback.print_tb) for more explanations.    Meter Reporter Configurations    Configuration Environment Variable Type Default Value Description     agent_meter_reporter_active SW_AGENT_METER_REPORTER_ACTIVE \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; True If True, Python agent will report collected meters to the OAP or Satellite. Otherwise, it disables the feature.   agent_meter_reporter_max_buffer_size SW_AGENT_METER_REPORTER_MAX_BUFFER_SIZE \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 10000 The maximum queue backlog size for sending meter data to backend, meters beyond this are silently dropped.   agent_meter_reporter_period SW_AGENT_METER_REPORTER_PERIOD \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 20 The interval in seconds between each meter data report   agent_pvm_meter_reporter_active SW_AGENT_PVM_METER_REPORTER_ACTIVE \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; True If True, Python agent will report collected Python Virtual Machine (PVM) meters to the OAP or Satellite. Otherwise, it disables the feature.    Plugin Related configurations    Configuration Environment Variable Type Default Value Description     agent_disable_plugins SW_AGENT_DISABLE_PLUGINS \u0026lt;class \u0026lsquo;list\u0026rsquo;\u0026gt; [''] The name patterns in comma-separated pattern, plugins whose name matches one of the pattern won\u0026rsquo;t be installed   plugin_http_http_params_length_threshold SW_PLUGIN_HTTP_HTTP_PARAMS_LENGTH_THRESHOLD \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 1024 When COLLECT_HTTP_PARAMS is enabled, how many characters to keep and send to the OAP backend, use negative values to keep and send the complete parameters, NB. this config item is added for the sake of performance.   plugin_http_ignore_method SW_PLUGIN_HTTP_IGNORE_METHOD \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt;  Comma-delimited list of http methods to ignore (GET, POST, HEAD, OPTIONS, etc\u0026hellip;)   plugin_sql_parameters_max_length SW_PLUGIN_SQL_PARAMETERS_MAX_LENGTH \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 0 The maximum length of the collected parameter, parameters longer than the specified length will be truncated, length 0 turns off parameter tracing   plugin_pymongo_trace_parameters SW_PLUGIN_PYMONGO_TRACE_PARAMETERS \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False Indicates whether to collect the filters of pymongo   plugin_pymongo_parameters_max_length SW_PLUGIN_PYMONGO_PARAMETERS_MAX_LENGTH \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 512 The maximum length of the collected filters, filters longer than the specified length will be truncated   plugin_elasticsearch_trace_dsl SW_PLUGIN_ELASTICSEARCH_TRACE_DSL \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False If true, trace all the DSL(Domain Specific Language) in ElasticSearch access, default is false   plugin_flask_collect_http_params SW_PLUGIN_FLASK_COLLECT_HTTP_PARAMS \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False This config item controls that whether the Flask plugin should collect the parameters of the request.   plugin_sanic_collect_http_params SW_PLUGIN_SANIC_COLLECT_HTTP_PARAMS \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False This config item controls that whether the Sanic plugin should collect the parameters of the request.   plugin_django_collect_http_params SW_PLUGIN_DJANGO_COLLECT_HTTP_PARAMS \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False This config item controls that whether the Django plugin should collect the parameters of the request.   plugin_fastapi_collect_http_params SW_PLUGIN_FASTAPI_COLLECT_HTTP_PARAMS \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False This config item controls that whether the FastAPI plugin should collect the parameters of the request.   plugin_bottle_collect_http_params SW_PLUGIN_BOTTLE_COLLECT_HTTP_PARAMS \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False This config item controls that whether the Bottle plugin should collect the parameters of the request.   plugin_celery_parameters_length SW_PLUGIN_CELERY_PARAMETERS_LENGTH \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 512 The maximum length of celery functions parameters, longer than this will be truncated, 0 turns off    ","excerpt":"Supported Agent Configuration Options Below is the full list of supported configurations you can set …","ref":"/docs/skywalking-python/latest/en/setup/configuration/","title":"Supported Agent Configuration Options"},{"body":"Supported Agent Configuration Options Below is the full list of supported configurations you can set to customize the agent behavior, please take some time to read the descriptions for what they can achieve.\n Usage: (Pass in intrusive setup)\n from skywalking import config, agent config.init(YourConfiguration=YourValue)) agent.start()  Usage: (Pass by environment variables)\n export SW_AGENT_YourConfiguration=YourValue Agent Core Configuration Options    Configuration Environment Variable Type Default Value Description     agent_collector_backend_services SW_AGENT_COLLECTOR_BACKEND_SERVICES \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; oap_host:oap_port The backend OAP server address, 11800 is default OAP gRPC port, 12800 is HTTP, Kafka ignores this option and uses kafka_bootstrap_servers option. This option should be changed accordingly with selected protocol   agent_protocol SW_AGENT_PROTOCOL \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; grpc The protocol to communicate with the backend OAP, http, grpc or kafka, we highly suggest using grpc in production as it\u0026rsquo;s well optimized than http. The kafka protocol provides an alternative way to submit data to the backend.   agent_name SW_AGENT_NAME \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; Python Service Name The name of your awesome Python service   agent_instance_name SW_AGENT_INSTANCE_NAME \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; str(uuid.uuid1()).replace('-', \u0026lsquo;') The name of this particular awesome Python service instance   agent_namespace SW_AGENT_NAMESPACE \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt;  The agent namespace of the Python service (available as tag and the suffix of service name)   kafka_bootstrap_servers SW_KAFKA_BOOTSTRAP_SERVERS \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; localhost:9092 A list of host/port pairs to use for establishing the initial connection to your Kafka cluster. It is in the form of host1:port1,host2:port2,\u0026hellip; (used for Kafka reporter protocol)   kafka_namespace SW_KAFKA_NAMESPACE \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt;  The kafka namespace specified by OAP side SW_NAMESPACE, prepends the following kafka topic names with a -.   kafka_topic_management SW_KAFKA_TOPIC_MANAGEMENT \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; skywalking-managements Specifying Kafka topic name for service instance reporting and registering, this should be in sync with OAP   kafka_topic_segment SW_KAFKA_TOPIC_SEGMENT \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; skywalking-segments Specifying Kafka topic name for Tracing data, this should be in sync with OAP   kafka_topic_log SW_KAFKA_TOPIC_LOG \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; skywalking-logs Specifying Kafka topic name for Log data, this should be in sync with OAP   kafka_topic_meter SW_KAFKA_TOPIC_METER \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; skywalking-meters Specifying Kafka topic name for Meter data, this should be in sync with OAP   kafka_reporter_custom_configurations SW_KAFKA_REPORTER_CUSTOM_CONFIGURATIONS \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt;  The configs to init KafkaProducer, supports the basic arguments (whose type is either str, bool, or int) listed here This config only works from env variables, each one should be passed in SW_KAFKA_REPORTER_CONFIG_\u0026lt;KEY_NAME\u0026gt;   agent_force_tls SW_AGENT_FORCE_TLS \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False Use TLS for communication with SkyWalking OAP (no cert required)   agent_authentication SW_AGENT_AUTHENTICATION \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt;  The authentication token to verify that the agent is trusted by the backend OAP, as for how to configure the backend, refer to the yaml.   agent_logging_level SW_AGENT_LOGGING_LEVEL \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; INFO The level of agent self-logs, could be one of CRITICAL, FATAL, ERROR, WARN(WARNING), INFO, DEBUG. Please turn on debug if an issue is encountered to find out what\u0026rsquo;s going on    Agent Core Danger Zone    Configuration Environment Variable Type Default Value Description     agent_collector_heartbeat_period SW_AGENT_COLLECTOR_HEARTBEAT_PERIOD \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 30 The agent will exchange heartbeat message with SkyWalking OAP backend every period seconds   agent_collector_properties_report_period_factor SW_AGENT_COLLECTOR_PROPERTIES_REPORT_PERIOD_FACTOR \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 10 The agent will report service instance properties every factor * heartbeat period seconds default: 10*30 = 300 seconds   agent_instance_properties_json SW_AGENT_INSTANCE_PROPERTIES_JSON \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt;  A custom JSON string to be reported as service instance properties, e.g. {\u0026quot;key\u0026quot;: \u0026quot;value\u0026quot;}   agent_experimental_fork_support SW_AGENT_EXPERIMENTAL_FORK_SUPPORT \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False The agent will restart itself in any os.fork()-ed child process. Important Note: it\u0026rsquo;s not suitable for short-lived processes as each one will create a new instance in SkyWalking dashboard in format of service_instance-child(pid). This feature may not work when a precise combination of gRPC + Python 3.7 + subprocess (not fork) is used together. The agent will output a warning log when using on Python 3.7 for such a reason.   agent_queue_timeout SW_AGENT_QUEUE_TIMEOUT \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 1 DANGEROUS - This option controls the interval of each bulk report from telemetry data queues Do not modify unless you have evaluated its impact given your service load.   agent_asyncio_enhancement SW_AGENT_ASYNCIO_ENHANCEMENT \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False Replace the threads to asyncio coroutines to report telemetry data to the OAP. This option is experimental and may not work as expected.    SW_PYTHON Auto Instrumentation CLI    Configuration Environment Variable Type Default Value Description     agent_sw_python_bootstrap_propagate SW_AGENT_SW_PYTHON_BOOTSTRAP_PROPAGATE \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False Special: can only be passed via environment. This config controls the child process agent bootstrap behavior in sw-python CLI, if set to False, a valid child process will not boot up a SkyWalking Agent. Please refer to the CLI Guide for details.   agent_sw_python_cli_debug_enabled SW_AGENT_SW_PYTHON_CLI_DEBUG_ENABLED \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False Special: can only be passed via environment. This config controls the CLI and agent logging debug mode, if set to True, the CLI and agent will print out debug logs. Please refer to the CLI Guide for details. Important: this config will set agent logging level to DEBUG as well, do not use it in production otherwise it will flood your logs. This normally shouldn\u0026rsquo;t be pass as a simple flag -d will be the same.    Trace Reporter Configurations    Configuration Environment Variable Type Default Value Description     agent_trace_reporter_max_buffer_size SW_AGENT_TRACE_REPORTER_MAX_BUFFER_SIZE \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 10000 The maximum queue backlog size for sending the segment data to backend, segments beyond this are silently dropped   agent_trace_ignore_path SW_AGENT_TRACE_IGNORE_PATH \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt;  You can setup multiple URL path patterns, The endpoints match these patterns wouldn\u0026rsquo;t be traced. the current matching rules follow Ant Path match style , like /path/*, /path/**, /path/?.   agent_ignore_suffix SW_AGENT_IGNORE_SUFFIX \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; .jpg,.jpeg,.js,.css,.png,.bmp,.gif,.ico,.mp3,.mp4,.html,.svg If the operation name of the first span is included in this set, this segment should be ignored.   correlation_element_max_number SW_CORRELATION_ELEMENT_MAX_NUMBER \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 3 Max element count of the correlation context.   correlation_value_max_length SW_CORRELATION_VALUE_MAX_LENGTH \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 128 Max value length of correlation context element.    Profiling Configurations    Configuration Environment Variable Type Default Value Description     agent_profile_active SW_AGENT_PROFILE_ACTIVE \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; True If True, Python agent will enable profiler when user create a new profiling task.   agent_collector_get_profile_task_interval SW_AGENT_COLLECTOR_GET_PROFILE_TASK_INTERVAL \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 20 The number of seconds between two profile task query.   agent_profile_max_parallel SW_AGENT_PROFILE_MAX_PARALLEL \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 5 The number of parallel monitor segment count.   agent_profile_duration SW_AGENT_PROFILE_DURATION \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 10 The maximum monitor segment time(minutes), if current segment monitor time out of limit, then stop it.   agent_profile_dump_max_stack_depth SW_AGENT_PROFILE_DUMP_MAX_STACK_DEPTH \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 500 The number of max dump thread stack depth   agent_profile_snapshot_transport_buffer_size SW_AGENT_PROFILE_SNAPSHOT_TRANSPORT_BUFFER_SIZE \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 50 The number of snapshot transport to backend buffer size    Log Reporter Configurations    Configuration Environment Variable Type Default Value Description     agent_log_reporter_active SW_AGENT_LOG_REPORTER_ACTIVE \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; True If True, Python agent will report collected logs to the OAP or Satellite. Otherwise, it disables the feature.   agent_log_reporter_safe_mode SW_AGENT_LOG_REPORTER_SAFE_MODE \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False If True, Python agent will filter out HTTP basic auth information from log records. By default, it disables the feature due to potential performance impact brought by regular expression   agent_log_reporter_max_buffer_size SW_AGENT_LOG_REPORTER_MAX_BUFFER_SIZE \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 10000 The maximum queue backlog size for sending log data to backend, logs beyond this are silently dropped.   agent_log_reporter_level SW_AGENT_LOG_REPORTER_LEVEL \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; WARNING This config specifies the logger levels of concern, any logs with a level below the config will be ignored.   agent_log_reporter_ignore_filter SW_AGENT_LOG_REPORTER_IGNORE_FILTER \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False This config customizes whether to ignore the application-defined logger filters, if True, all logs are reported disregarding any filter rules.   agent_log_reporter_formatted SW_AGENT_LOG_REPORTER_FORMATTED \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; True If True, the log reporter will transmit the logs as formatted. Otherwise, puts logRecord.msg and logRecord.args into message content and tags(argument.n), respectively. Along with an exception tag if an exception was raised. Only applies to logging module.   agent_log_reporter_layout SW_AGENT_LOG_REPORTER_LAYOUT \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; %(asctime)s [%(threadName)s] %(levelname)s %(name)s - %(message)s The log reporter formats the logRecord message based on the layout given. Only applies to logging module.   agent_cause_exception_depth SW_AGENT_CAUSE_EXCEPTION_DEPTH \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 10 This configuration is shared by log reporter and tracer. This config limits agent to report up to limit stacktrace, please refer to [Python traceback](../ https://docs.python.org/3/library/traceback.html#traceback.print_tb) for more explanations.    Meter Reporter Configurations    Configuration Environment Variable Type Default Value Description     agent_meter_reporter_active SW_AGENT_METER_REPORTER_ACTIVE \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; True If True, Python agent will report collected meters to the OAP or Satellite. Otherwise, it disables the feature.   agent_meter_reporter_max_buffer_size SW_AGENT_METER_REPORTER_MAX_BUFFER_SIZE \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 10000 The maximum queue backlog size for sending meter data to backend, meters beyond this are silently dropped.   agent_meter_reporter_period SW_AGENT_METER_REPORTER_PERIOD \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 20 The interval in seconds between each meter data report   agent_pvm_meter_reporter_active SW_AGENT_PVM_METER_REPORTER_ACTIVE \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; True If True, Python agent will report collected Python Virtual Machine (PVM) meters to the OAP or Satellite. Otherwise, it disables the feature.    Plugin Related configurations    Configuration Environment Variable Type Default Value Description     agent_disable_plugins SW_AGENT_DISABLE_PLUGINS \u0026lt;class \u0026lsquo;list\u0026rsquo;\u0026gt; [''] The name patterns in comma-separated pattern, plugins whose name matches one of the pattern won\u0026rsquo;t be installed   plugin_http_http_params_length_threshold SW_PLUGIN_HTTP_HTTP_PARAMS_LENGTH_THRESHOLD \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 1024 When COLLECT_HTTP_PARAMS is enabled, how many characters to keep and send to the OAP backend, use negative values to keep and send the complete parameters, NB. this config item is added for the sake of performance.   plugin_http_ignore_method SW_PLUGIN_HTTP_IGNORE_METHOD \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt;  Comma-delimited list of http methods to ignore (GET, POST, HEAD, OPTIONS, etc\u0026hellip;)   plugin_sql_parameters_max_length SW_PLUGIN_SQL_PARAMETERS_MAX_LENGTH \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 0 The maximum length of the collected parameter, parameters longer than the specified length will be truncated, length 0 turns off parameter tracing   plugin_pymongo_trace_parameters SW_PLUGIN_PYMONGO_TRACE_PARAMETERS \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False Indicates whether to collect the filters of pymongo   plugin_pymongo_parameters_max_length SW_PLUGIN_PYMONGO_PARAMETERS_MAX_LENGTH \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 512 The maximum length of the collected filters, filters longer than the specified length will be truncated   plugin_elasticsearch_trace_dsl SW_PLUGIN_ELASTICSEARCH_TRACE_DSL \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False If true, trace all the DSL(Domain Specific Language) in ElasticSearch access, default is false   plugin_flask_collect_http_params SW_PLUGIN_FLASK_COLLECT_HTTP_PARAMS \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False This config item controls that whether the Flask plugin should collect the parameters of the request.   plugin_sanic_collect_http_params SW_PLUGIN_SANIC_COLLECT_HTTP_PARAMS \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False This config item controls that whether the Sanic plugin should collect the parameters of the request.   plugin_django_collect_http_params SW_PLUGIN_DJANGO_COLLECT_HTTP_PARAMS \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False This config item controls that whether the Django plugin should collect the parameters of the request.   plugin_fastapi_collect_http_params SW_PLUGIN_FASTAPI_COLLECT_HTTP_PARAMS \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False This config item controls that whether the FastAPI plugin should collect the parameters of the request.   plugin_bottle_collect_http_params SW_PLUGIN_BOTTLE_COLLECT_HTTP_PARAMS \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False This config item controls that whether the Bottle plugin should collect the parameters of the request.   plugin_celery_parameters_length SW_PLUGIN_CELERY_PARAMETERS_LENGTH \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 512 The maximum length of celery functions parameters, longer than this will be truncated, 0 turns off    ","excerpt":"Supported Agent Configuration Options Below is the full list of supported configurations you can set …","ref":"/docs/skywalking-python/next/en/setup/configuration/","title":"Supported Agent Configuration Options"},{"body":"Supported Agent Configuration Options Below is the full list of supported configurations you can set to customize the agent behavior, please take some time to read the descriptions for what they can achieve.\n Usage: (Pass in intrusive setup)\n from skywalking import config, agent config.init(YourConfiguration=YourValue)) agent.start()  Usage: (Pass by environment variables)\n export SW_AGENT_YourConfiguration=YourValue Agent Core Configuration Options    Configuration Environment Variable Type Default Value Description     agent_collector_backend_services SW_AGENT_COLLECTOR_BACKEND_SERVICES \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; oap_host:oap_port The backend OAP server address, 11800 is default OAP gRPC port, 12800 is HTTP, Kafka ignores this option and uses kafka_bootstrap_servers option. This option should be changed accordingly with selected protocol   agent_protocol SW_AGENT_PROTOCOL \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; grpc The protocol to communicate with the backend OAP, http, grpc or kafka, we highly suggest using grpc in production as it\u0026rsquo;s well optimized than http. The kafka protocol provides an alternative way to submit data to the backend.   agent_name SW_AGENT_NAME \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; Python Service Name The name of your awesome Python service   agent_instance_name SW_AGENT_INSTANCE_NAME \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; str(uuid.uuid1()).replace('-', \u0026lsquo;') The name of this particular awesome Python service instance   agent_namespace SW_AGENT_NAMESPACE \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt;  The agent namespace of the Python service (available as tag and the suffix of service name)   kafka_bootstrap_servers SW_KAFKA_BOOTSTRAP_SERVERS \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; localhost:9092 A list of host/port pairs to use for establishing the initial connection to your Kafka cluster. It is in the form of host1:port1,host2:port2,\u0026hellip; (used for Kafka reporter protocol)   kafka_namespace SW_KAFKA_NAMESPACE \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt;  The kafka namespace specified by OAP side SW_NAMESPACE, prepends the following kafka topic names with a -.   kafka_topic_management SW_KAFKA_TOPIC_MANAGEMENT \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; skywalking-managements Specifying Kafka topic name for service instance reporting and registering, this should be in sync with OAP   kafka_topic_segment SW_KAFKA_TOPIC_SEGMENT \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; skywalking-segments Specifying Kafka topic name for Tracing data, this should be in sync with OAP   kafka_topic_log SW_KAFKA_TOPIC_LOG \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; skywalking-logs Specifying Kafka topic name for Log data, this should be in sync with OAP   kafka_topic_meter SW_KAFKA_TOPIC_METER \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; skywalking-meters Specifying Kafka topic name for Meter data, this should be in sync with OAP   kafka_reporter_custom_configurations SW_KAFKA_REPORTER_CUSTOM_CONFIGURATIONS \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt;  The configs to init KafkaProducer, supports the basic arguments (whose type is either str, bool, or int) listed here This config only works from env variables, each one should be passed in SW_KAFKA_REPORTER_CONFIG_\u0026lt;KEY_NAME\u0026gt;   agent_force_tls SW_AGENT_FORCE_TLS \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False Use TLS for communication with SkyWalking OAP (no cert required)   agent_authentication SW_AGENT_AUTHENTICATION \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt;  The authentication token to verify that the agent is trusted by the backend OAP, as for how to configure the backend, refer to the yaml.   agent_logging_level SW_AGENT_LOGGING_LEVEL \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; INFO The level of agent self-logs, could be one of CRITICAL, FATAL, ERROR, WARN(WARNING), INFO, DEBUG. Please turn on debug if an issue is encountered to find out what\u0026rsquo;s going on    Agent Core Danger Zone    Configuration Environment Variable Type Default Value Description     agent_collector_heartbeat_period SW_AGENT_COLLECTOR_HEARTBEAT_PERIOD \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 30 The agent will exchange heartbeat message with SkyWalking OAP backend every period seconds   agent_collector_properties_report_period_factor SW_AGENT_COLLECTOR_PROPERTIES_REPORT_PERIOD_FACTOR \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 10 The agent will report service instance properties every factor * heartbeat period seconds default: 10*30 = 300 seconds   agent_instance_properties_json SW_AGENT_INSTANCE_PROPERTIES_JSON \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt;  A custom JSON string to be reported as service instance properties, e.g. {\u0026quot;key\u0026quot;: \u0026quot;value\u0026quot;}   agent_experimental_fork_support SW_AGENT_EXPERIMENTAL_FORK_SUPPORT \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False The agent will restart itself in any os.fork()-ed child process. Important Note: it\u0026rsquo;s not suitable for short-lived processes as each one will create a new instance in SkyWalking dashboard in format of service_instance-child(pid). This feature may not work when a precise combination of gRPC + Python 3.7 + subprocess (not fork) is used together. The agent will output a warning log when using on Python 3.7 for such a reason.   agent_queue_timeout SW_AGENT_QUEUE_TIMEOUT \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 1 DANGEROUS - This option controls the interval of each bulk report from telemetry data queues Do not modify unless you have evaluated its impact given your service load.    SW_PYTHON Auto Instrumentation CLI    Configuration Environment Variable Type Default Value Description     agent_sw_python_bootstrap_propagate SW_AGENT_SW_PYTHON_BOOTSTRAP_PROPAGATE \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False Special: can only be passed via environment. This config controls the child process agent bootstrap behavior in sw-python CLI, if set to False, a valid child process will not boot up a SkyWalking Agent. Please refer to the CLI Guide for details.   agent_sw_python_cli_debug_enabled SW_AGENT_SW_PYTHON_CLI_DEBUG_ENABLED \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False Special: can only be passed via environment. This config controls the CLI and agent logging debug mode, if set to True, the CLI and agent will print out debug logs. Please refer to the CLI Guide for details. Important: this config will set agent logging level to DEBUG as well, do not use it in production otherwise it will flood your logs. This normally shouldn\u0026rsquo;t be pass as a simple flag -d will be the same.    Trace Reporter Configurations    Configuration Environment Variable Type Default Value Description     agent_trace_reporter_max_buffer_size SW_AGENT_TRACE_REPORTER_MAX_BUFFER_SIZE \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 10000 The maximum queue backlog size for sending the segment data to backend, segments beyond this are silently dropped   agent_trace_ignore_path SW_AGENT_TRACE_IGNORE_PATH \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt;  You can setup multiple URL path patterns, The endpoints match these patterns wouldn\u0026rsquo;t be traced. the current matching rules follow Ant Path match style , like /path/*, /path/**, /path/?.   agent_ignore_suffix SW_AGENT_IGNORE_SUFFIX \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; .jpg,.jpeg,.js,.css,.png,.bmp,.gif,.ico,.mp3,.mp4,.html,.svg If the operation name of the first span is included in this set, this segment should be ignored.   correlation_element_max_number SW_CORRELATION_ELEMENT_MAX_NUMBER \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 3 Max element count of the correlation context.   correlation_value_max_length SW_CORRELATION_VALUE_MAX_LENGTH \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 128 Max value length of correlation context element.    Profiling Configurations    Configuration Environment Variable Type Default Value Description     agent_profile_active SW_AGENT_PROFILE_ACTIVE \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; True If True, Python agent will enable profiler when user create a new profiling task.   agent_collector_get_profile_task_interval SW_AGENT_COLLECTOR_GET_PROFILE_TASK_INTERVAL \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 20 The number of seconds between two profile task query.   agent_profile_max_parallel SW_AGENT_PROFILE_MAX_PARALLEL \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 5 The number of parallel monitor segment count.   agent_profile_duration SW_AGENT_PROFILE_DURATION \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 10 The maximum monitor segment time(minutes), if current segment monitor time out of limit, then stop it.   agent_profile_dump_max_stack_depth SW_AGENT_PROFILE_DUMP_MAX_STACK_DEPTH \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 500 The number of max dump thread stack depth   agent_profile_snapshot_transport_buffer_size SW_AGENT_PROFILE_SNAPSHOT_TRANSPORT_BUFFER_SIZE \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 50 The number of snapshot transport to backend buffer size    Log Reporter Configurations    Configuration Environment Variable Type Default Value Description     agent_log_reporter_active SW_AGENT_LOG_REPORTER_ACTIVE \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; True If True, Python agent will report collected logs to the OAP or Satellite. Otherwise, it disables the feature.   agent_log_reporter_safe_mode SW_AGENT_LOG_REPORTER_SAFE_MODE \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False If True, Python agent will filter out HTTP basic auth information from log records. By default, it disables the feature due to potential performance impact brought by regular expression   agent_log_reporter_max_buffer_size SW_AGENT_LOG_REPORTER_MAX_BUFFER_SIZE \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 10000 The maximum queue backlog size for sending log data to backend, logs beyond this are silently dropped.   agent_log_reporter_level SW_AGENT_LOG_REPORTER_LEVEL \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; WARNING This config specifies the logger levels of concern, any logs with a level below the config will be ignored.   agent_log_reporter_ignore_filter SW_AGENT_LOG_REPORTER_IGNORE_FILTER \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False This config customizes whether to ignore the application-defined logger filters, if True, all logs are reported disregarding any filter rules.   agent_log_reporter_formatted SW_AGENT_LOG_REPORTER_FORMATTED \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; True If True, the log reporter will transmit the logs as formatted. Otherwise, puts logRecord.msg and logRecord.args into message content and tags(argument.n), respectively. Along with an exception tag if an exception was raised. Only applies to logging module.   agent_log_reporter_layout SW_AGENT_LOG_REPORTER_LAYOUT \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; %(asctime)s [%(threadName)s] %(levelname)s %(name)s - %(message)s The log reporter formats the logRecord message based on the layout given. Only applies to logging module.   agent_cause_exception_depth SW_AGENT_CAUSE_EXCEPTION_DEPTH \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 10 This configuration is shared by log reporter and tracer. This config limits agent to report up to limit stacktrace, please refer to [Python traceback](../ https://docs.python.org/3/library/traceback.html#traceback.print_tb) for more explanations.    Meter Reporter Configurations    Configuration Environment Variable Type Default Value Description     agent_meter_reporter_active SW_AGENT_METER_REPORTER_ACTIVE \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; True If True, Python agent will report collected meters to the OAP or Satellite. Otherwise, it disables the feature.   agent_meter_reporter_max_buffer_size SW_AGENT_METER_REPORTER_MAX_BUFFER_SIZE \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 10000 The maximum queue backlog size for sending meter data to backend, meters beyond this are silently dropped.   agent_meter_reporter_period SW_AGENT_METER_REPORTER_PERIOD \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 20 The interval in seconds between each meter data report   agent_pvm_meter_reporter_active SW_AGENT_PVM_METER_REPORTER_ACTIVE \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; True If True, Python agent will report collected Python Virtual Machine (PVM) meters to the OAP or Satellite. Otherwise, it disables the feature.    Plugin Related configurations    Configuration Environment Variable Type Default Value Description     agent_disable_plugins SW_AGENT_DISABLE_PLUGINS \u0026lt;class \u0026lsquo;list\u0026rsquo;\u0026gt; [''] The name patterns in comma-separated pattern, plugins whose name matches one of the pattern won\u0026rsquo;t be installed   plugin_http_http_params_length_threshold SW_PLUGIN_HTTP_HTTP_PARAMS_LENGTH_THRESHOLD \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 1024 When COLLECT_HTTP_PARAMS is enabled, how many characters to keep and send to the OAP backend, use negative values to keep and send the complete parameters, NB. this config item is added for the sake of performance.   plugin_http_ignore_method SW_PLUGIN_HTTP_IGNORE_METHOD \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt;  Comma-delimited list of http methods to ignore (GET, POST, HEAD, OPTIONS, etc\u0026hellip;)   plugin_sql_parameters_max_length SW_PLUGIN_SQL_PARAMETERS_MAX_LENGTH \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 0 The maximum length of the collected parameter, parameters longer than the specified length will be truncated, length 0 turns off parameter tracing   plugin_pymongo_trace_parameters SW_PLUGIN_PYMONGO_TRACE_PARAMETERS \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False Indicates whether to collect the filters of pymongo   plugin_pymongo_parameters_max_length SW_PLUGIN_PYMONGO_PARAMETERS_MAX_LENGTH \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 512 The maximum length of the collected filters, filters longer than the specified length will be truncated   plugin_elasticsearch_trace_dsl SW_PLUGIN_ELASTICSEARCH_TRACE_DSL \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False If true, trace all the DSL(Domain Specific Language) in ElasticSearch access, default is false   plugin_flask_collect_http_params SW_PLUGIN_FLASK_COLLECT_HTTP_PARAMS \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False This config item controls that whether the Flask plugin should collect the parameters of the request.   plugin_sanic_collect_http_params SW_PLUGIN_SANIC_COLLECT_HTTP_PARAMS \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False This config item controls that whether the Sanic plugin should collect the parameters of the request.   plugin_django_collect_http_params SW_PLUGIN_DJANGO_COLLECT_HTTP_PARAMS \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False This config item controls that whether the Django plugin should collect the parameters of the request.   plugin_fastapi_collect_http_params SW_PLUGIN_FASTAPI_COLLECT_HTTP_PARAMS \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False This config item controls that whether the FastAPI plugin should collect the parameters of the request.   plugin_bottle_collect_http_params SW_PLUGIN_BOTTLE_COLLECT_HTTP_PARAMS \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False This config item controls that whether the Bottle plugin should collect the parameters of the request.   plugin_celery_parameters_length SW_PLUGIN_CELERY_PARAMETERS_LENGTH \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 512 The maximum length of celery functions parameters, longer than this will be truncated, 0 turns off    ","excerpt":"Supported Agent Configuration Options Below is the full list of supported configurations you can set …","ref":"/docs/skywalking-python/v1.0.1/en/setup/configuration/","title":"Supported Agent Configuration Options"},{"body":"Supported Libraries This document is automatically generated from the SkyWalking Python testing matrix.\nThe column of versions only indicates the set of library versions tested in a best-effort manner.\nIf you find newer major versions that are missing from the following table, and it\u0026rsquo;s not documented as a limitation, please PR to update the test matrix in the plugin.\nVersions marked as NOT SUPPORTED may be due to an incompatible version with Python in the original library or a limitation of SkyWalking auto-instrumentation (welcome to contribute!)\nPlugin Support Table    Library Python Version - Lib Version Plugin Name     aiohttp Python \u0026gt;=3.7 - [\u0026lsquo;3.7.*']; sw_aiohttp   aioredis Python \u0026gt;=3.7 - [\u0026lsquo;2.0.*']; sw_aioredis   aiormq Python \u0026gt;=3.7 - [\u0026lsquo;6.3\u0026rsquo;, \u0026lsquo;6.4\u0026rsquo;]; sw_aiormq   amqp Python \u0026gt;=3.7 - [\u0026lsquo;2.6.1\u0026rsquo;]; sw_amqp   asyncpg Python \u0026gt;=3.7 - [\u0026lsquo;0.25.0\u0026rsquo;]; sw_asyncpg   bottle Python \u0026gt;=3.7 - [\u0026lsquo;0.12.23\u0026rsquo;]; sw_bottle   celery Python \u0026gt;=3.7 - [\u0026lsquo;5.1\u0026rsquo;]; sw_celery   confluent_kafka Python \u0026gt;=3.7 - [\u0026lsquo;1.5.0\u0026rsquo;, \u0026lsquo;1.7.0\u0026rsquo;, \u0026lsquo;1.8.2\u0026rsquo;]; sw_confluent_kafka   django Python \u0026gt;=3.7 - [\u0026lsquo;3.2\u0026rsquo;]; sw_django   elasticsearch Python \u0026gt;=3.7 - [\u0026lsquo;7.13\u0026rsquo;, \u0026lsquo;7.14\u0026rsquo;, \u0026lsquo;7.15\u0026rsquo;]; sw_elasticsearch   hug Python \u0026gt;=3.11 - NOT SUPPORTED YET; Python \u0026gt;=3.10 - [\u0026lsquo;2.5\u0026rsquo;, \u0026lsquo;2.6\u0026rsquo;]; Python \u0026gt;=3.7 - [\u0026lsquo;2.4.1\u0026rsquo;, \u0026lsquo;2.5\u0026rsquo;, \u0026lsquo;2.6\u0026rsquo;]; sw_falcon   fastapi Python \u0026gt;=3.7 - [\u0026lsquo;0.89.\u0026rsquo;, \u0026lsquo;0.88.']; sw_fastapi   flask Python \u0026gt;=3.7 - [\u0026lsquo;2.0\u0026rsquo;]; sw_flask   happybase Python \u0026gt;=3.7 - [\u0026lsquo;1.2.0\u0026rsquo;]; sw_happybase   http_server Python \u0026gt;=3.7 - ['*']; sw_http_server   werkzeug Python \u0026gt;=3.7 - [\u0026lsquo;1.0.1\u0026rsquo;, \u0026lsquo;2.0\u0026rsquo;]; sw_http_server   httpx Python \u0026gt;=3.7 - [\u0026lsquo;0.23.\u0026rsquo;, \u0026lsquo;0.22.']; sw_httpx   kafka-python Python \u0026gt;=3.7 - [\u0026lsquo;2.0\u0026rsquo;]; sw_kafka   loguru Python \u0026gt;=3.7 - [\u0026lsquo;0.6.0\u0026rsquo;, \u0026lsquo;0.7.0\u0026rsquo;]; sw_loguru   mysqlclient Python \u0026gt;=3.7 - [\u0026lsquo;2.1.*']; sw_mysqlclient   psycopg[binary] Python \u0026gt;=3.11 - [\u0026lsquo;3.1.']; Python \u0026gt;=3.7 - [\u0026lsquo;3.0.18\u0026rsquo;, \u0026lsquo;3.1.']; sw_psycopg   psycopg2-binary Python \u0026gt;=3.10 - NOT SUPPORTED YET; Python \u0026gt;=3.7 - [\u0026lsquo;2.9\u0026rsquo;]; sw_psycopg2   pymongo Python \u0026gt;=3.7 - [\u0026lsquo;3.11.*']; sw_pymongo   pymysql Python \u0026gt;=3.7 - [\u0026lsquo;1.0\u0026rsquo;]; sw_pymysql   pyramid Python \u0026gt;=3.7 - [\u0026lsquo;1.10\u0026rsquo;, \u0026lsquo;2.0\u0026rsquo;]; sw_pyramid   pika Python \u0026gt;=3.7 - [\u0026lsquo;1.2\u0026rsquo;]; sw_rabbitmq   redis Python \u0026gt;=3.7 - [\u0026lsquo;3.5.*\u0026rsquo;, \u0026lsquo;4.5.1\u0026rsquo;]; sw_redis   requests Python \u0026gt;=3.7 - [\u0026lsquo;2.26\u0026rsquo;, \u0026lsquo;2.25\u0026rsquo;]; sw_requests   sanic Python \u0026gt;=3.10 - NOT SUPPORTED YET; Python \u0026gt;=3.7 - [\u0026lsquo;20.12\u0026rsquo;]; sw_sanic   tornado Python \u0026gt;=3.7 - [\u0026lsquo;6.0\u0026rsquo;, \u0026lsquo;6.1\u0026rsquo;]; sw_tornado   urllib3 Python \u0026gt;=3.7 - [\u0026lsquo;1.26\u0026rsquo;, \u0026lsquo;1.25\u0026rsquo;]; sw_urllib3   urllib_request Python \u0026gt;=3.7 - ['*']; sw_urllib_request   websockets Python \u0026gt;=3.7 - [\u0026lsquo;10.3\u0026rsquo;, \u0026lsquo;10.4\u0026rsquo;]; sw_websockets    Notes  The celery server running with \u0026ldquo;celery -A \u0026hellip;\u0026rdquo; should be run with the HTTP protocol as it uses multiprocessing by default which is not compatible with the gRPC protocol implementation in SkyWalking currently. Celery clients can use whatever protocol they want. While Falcon is instrumented, only Hug is tested. Hug is believed to be abandoned project, use this plugin with a bit more caution. Instead of Hug, plugin test should move to test actual Falcon. The websocket instrumentation only traces client side connection handshake, the actual message exchange (send/recv) is not traced since injecting headers to socket message body is the only way to propagate the trace context, which requires customization of message structure and extreme care. (Feel free to add this feature by instrumenting the send/recv methods commented out in the code by either injecting sw8 headers or propagate the trace context in a separate message)  ","excerpt":"Supported Libraries This document is automatically generated from the SkyWalking Python testing …","ref":"/docs/skywalking-python/latest/en/setup/plugins/","title":"Supported Libraries"},{"body":"Supported Libraries This document is automatically generated from the SkyWalking Python testing matrix.\nThe column of versions only indicates the set of library versions tested in a best-effort manner.\nIf you find newer major versions that are missing from the following table, and it\u0026rsquo;s not documented as a limitation, please PR to update the test matrix in the plugin.\nVersions marked as NOT SUPPORTED may be due to an incompatible version with Python in the original library or a limitation of SkyWalking auto-instrumentation (welcome to contribute!)\nPlugin Support Table    Library Python Version - Lib Version Plugin Name     aiohttp Python \u0026gt;=3.7 - [\u0026lsquo;3.7.*']; sw_aiohttp   aioredis Python \u0026gt;=3.7 - [\u0026lsquo;2.0.*']; sw_aioredis   aiormq Python \u0026gt;=3.7 - [\u0026lsquo;6.3\u0026rsquo;, \u0026lsquo;6.4\u0026rsquo;]; sw_aiormq   amqp Python \u0026gt;=3.7 - [\u0026lsquo;2.6.1\u0026rsquo;]; sw_amqp   asyncpg Python \u0026gt;=3.7 - [\u0026lsquo;0.25.0\u0026rsquo;]; sw_asyncpg   bottle Python \u0026gt;=3.7 - [\u0026lsquo;0.12.23\u0026rsquo;]; sw_bottle   celery Python \u0026gt;=3.7 - [\u0026lsquo;5.1\u0026rsquo;]; sw_celery   confluent_kafka Python \u0026gt;=3.7 - [\u0026lsquo;1.5.0\u0026rsquo;, \u0026lsquo;1.7.0\u0026rsquo;, \u0026lsquo;1.8.2\u0026rsquo;]; sw_confluent_kafka   django Python \u0026gt;=3.7 - [\u0026lsquo;3.2\u0026rsquo;]; sw_django   elasticsearch Python \u0026gt;=3.7 - [\u0026lsquo;7.13\u0026rsquo;, \u0026lsquo;7.14\u0026rsquo;, \u0026lsquo;7.15\u0026rsquo;]; sw_elasticsearch   hug Python \u0026gt;=3.11 - NOT SUPPORTED YET; Python \u0026gt;=3.10 - [\u0026lsquo;2.5\u0026rsquo;, \u0026lsquo;2.6\u0026rsquo;]; Python \u0026gt;=3.7 - [\u0026lsquo;2.4.1\u0026rsquo;, \u0026lsquo;2.5\u0026rsquo;, \u0026lsquo;2.6\u0026rsquo;]; sw_falcon   fastapi Python \u0026gt;=3.7 - [\u0026lsquo;0.89.\u0026rsquo;, \u0026lsquo;0.88.']; sw_fastapi   flask Python \u0026gt;=3.7 - [\u0026lsquo;2.0\u0026rsquo;]; sw_flask   happybase Python \u0026gt;=3.7 - [\u0026lsquo;1.2.0\u0026rsquo;]; sw_happybase   http_server Python \u0026gt;=3.7 - ['*']; sw_http_server   werkzeug Python \u0026gt;=3.7 - [\u0026lsquo;1.0.1\u0026rsquo;, \u0026lsquo;2.0\u0026rsquo;]; sw_http_server   httpx Python \u0026gt;=3.7 - [\u0026lsquo;0.23.\u0026rsquo;, \u0026lsquo;0.22.']; sw_httpx   kafka-python Python \u0026gt;=3.7 - [\u0026lsquo;2.0\u0026rsquo;]; sw_kafka   loguru Python \u0026gt;=3.7 - [\u0026lsquo;0.6.0\u0026rsquo;, \u0026lsquo;0.7.0\u0026rsquo;]; sw_loguru   mysqlclient Python \u0026gt;=3.7 - [\u0026lsquo;2.1.*']; sw_mysqlclient   neo4j Python \u0026gt;=3.7 - [\u0026lsquo;5.*']; sw_neo4j   psycopg[binary] Python \u0026gt;=3.11 - [\u0026lsquo;3.1.']; Python \u0026gt;=3.7 - [\u0026lsquo;3.0.18\u0026rsquo;, \u0026lsquo;3.1.']; sw_psycopg   psycopg2-binary Python \u0026gt;=3.10 - NOT SUPPORTED YET; Python \u0026gt;=3.7 - [\u0026lsquo;2.9\u0026rsquo;]; sw_psycopg2   pymongo Python \u0026gt;=3.7 - [\u0026lsquo;3.11.*']; sw_pymongo   pymysql Python \u0026gt;=3.7 - [\u0026lsquo;1.0\u0026rsquo;]; sw_pymysql   pyramid Python \u0026gt;=3.7 - [\u0026lsquo;1.10\u0026rsquo;, \u0026lsquo;2.0\u0026rsquo;]; sw_pyramid   pika Python \u0026gt;=3.7 - [\u0026lsquo;1.2\u0026rsquo;]; sw_rabbitmq   redis Python \u0026gt;=3.7 - [\u0026lsquo;3.5.*\u0026rsquo;, \u0026lsquo;4.5.1\u0026rsquo;]; sw_redis   requests Python \u0026gt;=3.7 - [\u0026lsquo;2.26\u0026rsquo;, \u0026lsquo;2.25\u0026rsquo;]; sw_requests   sanic Python \u0026gt;=3.10 - NOT SUPPORTED YET; Python \u0026gt;=3.7 - [\u0026lsquo;20.12\u0026rsquo;]; sw_sanic   tornado Python \u0026gt;=3.7 - [\u0026lsquo;6.0\u0026rsquo;, \u0026lsquo;6.1\u0026rsquo;]; sw_tornado   urllib3 Python \u0026gt;=3.7 - [\u0026lsquo;1.26\u0026rsquo;, \u0026lsquo;1.25\u0026rsquo;]; sw_urllib3   urllib_request Python \u0026gt;=3.7 - ['*']; sw_urllib_request   websockets Python \u0026gt;=3.7 - [\u0026lsquo;10.3\u0026rsquo;, \u0026lsquo;10.4\u0026rsquo;]; sw_websockets    Notes  The celery server running with \u0026ldquo;celery -A \u0026hellip;\u0026rdquo; should be run with the HTTP protocol as it uses multiprocessing by default which is not compatible with the gRPC protocol implementation in SkyWalking currently. Celery clients can use whatever protocol they want. While Falcon is instrumented, only Hug is tested. Hug is believed to be abandoned project, use this plugin with a bit more caution. Instead of Hug, plugin test should move to test actual Falcon. The Neo4j plugin integrates neo4j python driver 5.x.x versions which support both Neo4j 5 and 4.4 DBMS. The websocket instrumentation only traces client side connection handshake, the actual message exchange (send/recv) is not traced since injecting headers to socket message body is the only way to propagate the trace context, which requires customization of message structure and extreme care. (Feel free to add this feature by instrumenting the send/recv methods commented out in the code by either injecting sw8 headers or propagate the trace context in a separate message)  ","excerpt":"Supported Libraries This document is automatically generated from the SkyWalking Python testing …","ref":"/docs/skywalking-python/next/en/setup/plugins/","title":"Supported Libraries"},{"body":"Supported Libraries This document is automatically generated from the SkyWalking Python testing matrix.\nThe column of versions only indicates the set of library versions tested in a best-effort manner.\nIf you find newer major versions that are missing from the following table, and it\u0026rsquo;s not documented as a limitation, please PR to update the test matrix in the plugin.\nVersions marked as NOT SUPPORTED may be due to an incompatible version with Python in the original library or a limitation of SkyWalking auto-instrumentation (welcome to contribute!)\nPlugin Support Table    Library Python Version - Lib Version Plugin Name     aiohttp Python \u0026gt;=3.7 - [\u0026lsquo;3.7.*']; sw_aiohttp   aioredis Python \u0026gt;=3.7 - [\u0026lsquo;2.0.*']; sw_aioredis   aiormq Python \u0026gt;=3.7 - [\u0026lsquo;6.3\u0026rsquo;, \u0026lsquo;6.4\u0026rsquo;]; sw_aiormq   amqp Python \u0026gt;=3.7 - [\u0026lsquo;2.6.1\u0026rsquo;]; sw_amqp   asyncpg Python \u0026gt;=3.7 - [\u0026lsquo;0.25.0\u0026rsquo;]; sw_asyncpg   bottle Python \u0026gt;=3.7 - [\u0026lsquo;0.12.23\u0026rsquo;]; sw_bottle   celery Python \u0026gt;=3.7 - [\u0026lsquo;5.1\u0026rsquo;]; sw_celery   confluent_kafka Python \u0026gt;=3.7 - [\u0026lsquo;1.5.0\u0026rsquo;, \u0026lsquo;1.7.0\u0026rsquo;, \u0026lsquo;1.8.2\u0026rsquo;]; sw_confluent_kafka   django Python \u0026gt;=3.7 - [\u0026lsquo;3.2\u0026rsquo;]; sw_django   elasticsearch Python \u0026gt;=3.7 - [\u0026lsquo;7.13\u0026rsquo;, \u0026lsquo;7.14\u0026rsquo;, \u0026lsquo;7.15\u0026rsquo;]; sw_elasticsearch   hug Python \u0026gt;=3.11 - NOT SUPPORTED YET; Python \u0026gt;=3.10 - [\u0026lsquo;2.5\u0026rsquo;, \u0026lsquo;2.6\u0026rsquo;]; Python \u0026gt;=3.7 - [\u0026lsquo;2.4.1\u0026rsquo;, \u0026lsquo;2.5\u0026rsquo;, \u0026lsquo;2.6\u0026rsquo;]; sw_falcon   fastapi Python \u0026gt;=3.7 - [\u0026lsquo;0.89.\u0026rsquo;, \u0026lsquo;0.88.']; sw_fastapi   flask Python \u0026gt;=3.7 - [\u0026lsquo;2.0\u0026rsquo;]; sw_flask   happybase Python \u0026gt;=3.7 - [\u0026lsquo;1.2.0\u0026rsquo;]; sw_happybase   http_server Python \u0026gt;=3.7 - ['*']; sw_http_server   werkzeug Python \u0026gt;=3.7 - [\u0026lsquo;1.0.1\u0026rsquo;, \u0026lsquo;2.0\u0026rsquo;]; sw_http_server   httpx Python \u0026gt;=3.7 - [\u0026lsquo;0.23.\u0026rsquo;, \u0026lsquo;0.22.']; sw_httpx   kafka-python Python \u0026gt;=3.7 - [\u0026lsquo;2.0\u0026rsquo;]; sw_kafka   loguru Python \u0026gt;=3.7 - [\u0026lsquo;0.6.0\u0026rsquo;, \u0026lsquo;0.7.0\u0026rsquo;]; sw_loguru   mysqlclient Python \u0026gt;=3.7 - [\u0026lsquo;2.1.*']; sw_mysqlclient   psycopg[binary] Python \u0026gt;=3.11 - [\u0026lsquo;3.1.']; Python \u0026gt;=3.7 - [\u0026lsquo;3.0.18\u0026rsquo;, \u0026lsquo;3.1.']; sw_psycopg   psycopg2-binary Python \u0026gt;=3.10 - NOT SUPPORTED YET; Python \u0026gt;=3.7 - [\u0026lsquo;2.9\u0026rsquo;]; sw_psycopg2   pymongo Python \u0026gt;=3.7 - [\u0026lsquo;3.11.*']; sw_pymongo   pymysql Python \u0026gt;=3.7 - [\u0026lsquo;1.0\u0026rsquo;]; sw_pymysql   pyramid Python \u0026gt;=3.7 - [\u0026lsquo;1.10\u0026rsquo;, \u0026lsquo;2.0\u0026rsquo;]; sw_pyramid   pika Python \u0026gt;=3.7 - [\u0026lsquo;1.2\u0026rsquo;]; sw_rabbitmq   redis Python \u0026gt;=3.7 - [\u0026lsquo;3.5.*\u0026rsquo;, \u0026lsquo;4.5.1\u0026rsquo;]; sw_redis   requests Python \u0026gt;=3.7 - [\u0026lsquo;2.26\u0026rsquo;, \u0026lsquo;2.25\u0026rsquo;]; sw_requests   sanic Python \u0026gt;=3.10 - NOT SUPPORTED YET; Python \u0026gt;=3.7 - [\u0026lsquo;20.12\u0026rsquo;]; sw_sanic   tornado Python \u0026gt;=3.7 - [\u0026lsquo;6.0\u0026rsquo;, \u0026lsquo;6.1\u0026rsquo;]; sw_tornado   urllib3 Python \u0026gt;=3.7 - [\u0026lsquo;1.26\u0026rsquo;, \u0026lsquo;1.25\u0026rsquo;]; sw_urllib3   urllib_request Python \u0026gt;=3.7 - ['*']; sw_urllib_request   websockets Python \u0026gt;=3.7 - [\u0026lsquo;10.3\u0026rsquo;, \u0026lsquo;10.4\u0026rsquo;]; sw_websockets    Notes  The celery server running with \u0026ldquo;celery -A \u0026hellip;\u0026rdquo; should be run with the HTTP protocol as it uses multiprocessing by default which is not compatible with the gRPC protocol implementation in SkyWalking currently. Celery clients can use whatever protocol they want. While Falcon is instrumented, only Hug is tested. Hug is believed to be abandoned project, use this plugin with a bit more caution. Instead of Hug, plugin test should move to test actual Falcon. The websocket instrumentation only traces client side connection handshake, the actual message exchange (send/recv) is not traced since injecting headers to socket message body is the only way to propagate the trace context, which requires customization of message structure and extreme care. (Feel free to add this feature by instrumenting the send/recv methods commented out in the code by either injecting sw8 headers or propagate the trace context in a separate message)  ","excerpt":"Supported Libraries This document is automatically generated from the SkyWalking Python testing …","ref":"/docs/skywalking-python/v1.0.1/en/setup/plugins/","title":"Supported Libraries"},{"body":"Supported SAPI, extension and library The following plugins provide the distributed tracing capability.\nSupported SAPI  PHP-FPM CLI under Swoole  Supported PHP extension  cURL PDO MySQL Improved Memcached phpredis MongoDB Memcache  Supported PHP library  predis php-amqplib for Message Queuing Producer  ","excerpt":"Supported SAPI, extension and library The following plugins provide the distributed tracing …","ref":"/docs/skywalking-php/latest/en/setup/service-agent/php-agent/supported-list/","title":"Supported SAPI, extension and library"},{"body":"Supported SAPI, extension and library The following plugins provide the distributed tracing capability.\nSupported SAPI  PHP-FPM CLI under Swoole  Supported PHP extension  cURL PDO MySQL Improved Memcached phpredis MongoDB Memcache  Supported PHP library  predis php-amqplib for Message Queuing Producer  ","excerpt":"Supported SAPI, extension and library The following plugins provide the distributed tracing …","ref":"/docs/skywalking-php/next/en/setup/service-agent/php-agent/supported-list/","title":"Supported SAPI, extension and library"},{"body":"Supported SAPI, extension and library The following plugins provide the distributed tracing capability.\nSupported SAPI  PHP-FPM CLI under Swoole  Supported PHP extension  cURL PDO MySQL Improved Memcached phpredis MongoDB Memcache  Supported PHP library  predis php-amqplib for Message Queuing Producer  ","excerpt":"Supported SAPI, extension and library The following plugins provide the distributed tracing …","ref":"/docs/skywalking-php/v0.7.0/en/setup/service-agent/php-agent/supported-list/","title":"Supported SAPI, extension and library"},{"body":"SWIP - SkyWalking Improvement Proposal SWIP - SkyWalking Improvement Proposal, is an official document to propose a new feature and/or feature improvement, which are relative to end users and developers.\nSkyWalking has been very stable since v9.x. We are getting over the rapid changing stage. The core concepts, protocols for reporting telemetry and query, 3rd party integration, and the streaming process kernel are very stable. From now(2024) on, SkyWalking community would focus more on improvement and controllable improvement. All major changes should be evaluated more seriously, and try as good as possible to avoid incompatible breaking changes.\nWhat is considered a major change? The catalogs of a major change are listed as follows\n New Feature. A feature doesn\u0026rsquo;t exist for the latest version. Any change of the network Interfaces, especially for Query Protocol, Data Collect Protocols, Dynamic Configuration APIs, Exporting APIs, AI pipeline APIs. Any change of storage structure.  Q: Is Agent side feature or change considered a SWIP?\nA: Right now, SWIP targets OAP and UI side changes. All agent side changes are pending on the reviews from the committers of those agents.\nSWIP Template The purpose of this template should not be considered a hard requirement. The major purpose of SWIP is helping the PMC and community member to understand the proposal better.\n# Title: SWIP-1234 xxxx  ## Motivation The description of new feature or improvement. ## Architecture Graph Describe the relationship between your new proposal part and existing components. ## Proposed Changes State your proposal in detail. ## Imported Dependencies libs and their licenses.  ## Compatibility Whether breaking configuration, storage structure, or protocols. ## General usage docs This doesn\u0026#39;t have to be a final version, but helps the reviewers to understand how to use this new feature. SWIP Process Here is the process for starting a SWIP.\n Start a SWIP discussion at GitHub Discussion Page with title [DISCUSS] xxxx. Fill in the sections as described above in SWIP Template. At least one SkyWalking committer commented on the discussion to show interest in adopting it. This committer could update this page to grant a SWIP ID, and update the title to [SWIP-ID NO.] [DISCUSS] xxxx. All further discussion could happen on the discussion page. Once the consensus is made by enough committer supporters, and/or through a mail list vote, this SWIP should be added here as SWIP-ID NO.md and listed in the below as Known SWIPs.  All accepted and proposed SWIPs can be found in here.\nKnown SWIPs Next SWIP Number: 8\nAccepted SWIPs  SWIP-8 Support ActiveMQ Monitoring SWIP-5 Support ClickHouse Monitoring SWIP-4 Support available layers of service in the topology SWIP-3 Support RocketMQ Monitoring SWIP-2 Collecting and Gathering Kubernetes Monitoring Data SWIP-1 Create and detect Service Hierarchy Relationship  ","excerpt":"SWIP - SkyWalking Improvement Proposal SWIP - SkyWalking Improvement Proposal, is an official …","ref":"/docs/main/next/en/swip/readme/","title":"SWIP - SkyWalking Improvement Proposal"},{"body":"Table of Agent Configuration Properties This is the properties list supported in agent/config/agent.config.\n   property key Description System Environment Variable Default     agent.service_name The service name to represent a logic group providing the same capabilities/logic. Suggestion: set a unique name for every logic service group, service instance nodes share the same code, Max length is 50(UTF-8 char). Optional, once service_name follows \u0026lt;group name\u0026gt;::\u0026lt;logic name\u0026gt; format, OAP server assigns the group name to the service metadata. SW_AGENT_NAME Your_ApplicationName   agent.namespace Namespace represents a subnet, such as kubernetes namespace, or 172.10.. SW_AGENT_NAMESPACE Not set   agent.cluster Cluster defines the physical cluster in a data center or same network segment. SW_AGENT_CLUSTER Not set   agent.sample_n_per_3_secs Negative or zero means off, by default.SAMPLE_N_PER_3_SECS means sampling N TraceSegment in 3 seconds tops. SW_AGENT_SAMPLE Not set   agent.authentication Authentication active is based on backend setting, see application.yml for more details.For most scenarios, this needs backend extensions, only basic match auth provided in default implementation. SW_AGENT_AUTHENTICATION Not set   agent.trace_segment_ref_limit_per_span The max number of TraceSegmentRef in a single span to keep memory cost estimatable. SW_TRACE_SEGMENT_LIMIT 500   agent.span_limit_per_segment The max number of spans in a single segment. Through this config item, SkyWalking keep your application memory cost estimated. SW_AGENT_SPAN_LIMIT 300   agent.ignore_suffix If the operation name of the first span is included in this set, this segment should be ignored. SW_AGENT_IGNORE_SUFFIX Not set   agent.is_open_debugging_class If true, skywalking agent will save all instrumented classes files in /debugging folder. SkyWalking team may ask for these files in order to resolve compatible problem. SW_AGENT_OPEN_DEBUG Not set   agent.instance_name Instance name is the identity of an instance, should be unique in the service. If empty, SkyWalking agent will generate an 32-bit uuid. Default, use UUID@hostname as the instance name. Max length is 50(UTF-8 char) SW_AGENT_INSTANCE_NAME \u0026quot;\u0026quot;   agent.instance_properties_json={\u0026quot;key\u0026quot;:\u0026quot;value\u0026quot;} Add service instance custom properties in json format. SW_INSTANCE_PROPERTIES_JSON Not set   agent.cause_exception_depth How depth the agent goes, when log all cause exceptions. SW_AGENT_CAUSE_EXCEPTION_DEPTH 5   agent.force_reconnection_period  Force reconnection period of grpc, based on grpc_channel_check_interval. SW_AGENT_FORCE_RECONNECTION_PERIOD 1   agent.operation_name_threshold  The operationName max length, setting this value \u0026gt; 190 is not recommended. SW_AGENT_OPERATION_NAME_THRESHOLD 150   agent.keep_tracing Keep tracing even the backend is not available if this value is true. SW_AGENT_KEEP_TRACING false   agent.force_tls Force open TLS for gRPC channel if this value is true. SW_AGENT_FORCE_TLS false   agent.ssl_trusted_ca_path gRPC SSL trusted ca file. SW_AGENT_SSL_TRUSTED_CA_PATH /ca/ca.crt   agent.ssl_key_path The private key file. Enable mTLS when ssl_key_path and ssl_cert_chain_path exist. SW_AGENT_SSL_KEY_PATH \u0026quot;\u0026quot;   agent.ssl_cert_chain_path The certificate file. Enable mTLS when ssl_key_path and ssl_cert_chain_path exist. SW_AGENT_SSL_CERT_CHAIN_PATH \u0026quot;\u0026quot;   agent.enable Enable the agent kernel services and instrumentation. SW_AGENT_ENABLE true   osinfo.ipv4_list_size Limit the length of the ipv4 list size. SW_AGENT_OSINFO_IPV4_LIST_SIZE 10   collector.grpc_channel_check_interval grpc channel status check interval. SW_AGENT_COLLECTOR_GRPC_CHANNEL_CHECK_INTERVAL 30   collector.heartbeat_period agent heartbeat report period. Unit, second. SW_AGENT_COLLECTOR_HEARTBEAT_PERIOD 30   collector.properties_report_period_factor The agent sends the instance properties to the backend every collector.heartbeat_period * collector.properties_report_period_factor seconds SW_AGENT_COLLECTOR_PROPERTIES_REPORT_PERIOD_FACTOR 10   collector.backend_service Collector SkyWalking trace receiver service addresses. SW_AGENT_COLLECTOR_BACKEND_SERVICES 127.0.0.1:11800   collector.grpc_upstream_timeout How long grpc client will timeout in sending data to upstream. Unit is second. SW_AGENT_COLLECTOR_GRPC_UPSTREAM_TIMEOUT 30 seconds   collector.get_profile_task_interval Sniffer get profile task list interval. SW_AGENT_COLLECTOR_GET_PROFILE_TASK_INTERVAL 20   collector.get_agent_dynamic_config_interval Sniffer get agent dynamic config interval SW_AGENT_COLLECTOR_GET_AGENT_DYNAMIC_CONFIG_INTERVAL 20   collector.is_resolve_dns_periodically If true, skywalking agent will enable periodically resolving DNS to update receiver service addresses. SW_AGENT_COLLECTOR_IS_RESOLVE_DNS_PERIODICALLY false   logging.level Log level: TRACE, DEBUG, INFO, WARN, ERROR, OFF. Default is info. SW_LOGGING_LEVEL INFO   logging.file_name Log file name. SW_LOGGING_FILE_NAME skywalking-api.log   logging.output Log output. Default is FILE. Use CONSOLE means output to stdout. SW_LOGGING_OUTPUT FILE   logging.dir Log files directory. Default is blank string, means, use \u0026ldquo;{theSkywalkingAgentJarDir}/logs \u0026quot; to output logs. {theSkywalkingAgentJarDir} is the directory where the skywalking agent jar file is located SW_LOGGING_DIR \u0026quot;\u0026quot;   logging.resolver Logger resolver: PATTERN or JSON. The default is PATTERN, which uses logging.pattern to print traditional text logs. JSON resolver prints logs in JSON format. SW_LOGGING_RESOLVER PATTERN   logging.pattern  Logging format. There are all conversion specifiers: * %level means log level. * %timestamp means now of time with format yyyy-MM-dd HH:mm:ss:SSS.\n* %thread means name of current thread.\n* %msg means some message which user logged. * %class means SimpleName of TargetClass. * %throwable means a throwable which user called. * %agent_name means agent.service_name. Only apply to the PatternLogger. SW_LOGGING_PATTERN %level %timestamp %thread %class : %msg %throwable   logging.max_file_size The max size of log file. If the size is bigger than this, archive the current file, and write into a new file. SW_LOGGING_MAX_FILE_SIZE 300 * 1024 * 1024   logging.max_history_files The max history log files. When rollover happened, if log files exceed this number,then the oldest file will be delete. Negative or zero means off, by default. SW_LOGGING_MAX_HISTORY_FILES -1   statuscheck.ignored_exceptions Listed exceptions would not be treated as an error. Because in some codes, the exception is being used as a way of controlling business flow. SW_STATUSCHECK_IGNORED_EXCEPTIONS \u0026quot;\u0026quot;   statuscheck.max_recursive_depth The max recursive depth when checking the exception traced by the agent. Typically, we don\u0026rsquo;t recommend setting this more than 10, which could cause a performance issue. Negative value and 0 would be ignored, which means all exceptions would make the span tagged in error status. SW_STATUSCHECK_MAX_RECURSIVE_DEPTH 1   correlation.element_max_number Max element count in the correlation context. SW_CORRELATION_ELEMENT_MAX_NUMBER 3   correlation.value_max_length Max value length of each element. SW_CORRELATION_VALUE_MAX_LENGTH 128   correlation.auto_tag_keys Tag the span by the key/value in the correlation context, when the keys listed here exist. SW_CORRELATION_AUTO_TAG_KEYS \u0026quot;\u0026quot;   jvm.buffer_size The buffer size of collected JVM info. SW_JVM_BUFFER_SIZE 60 * 10   jvm.metrics_collect_period The period in seconds of JVM metrics collection. Unit is second. SW_JVM_METRICS_COLLECT_PERIOD 1   buffer.channel_size The buffer channel size. SW_BUFFER_CHANNEL_SIZE 5   buffer.buffer_size The buffer size. SW_BUFFER_BUFFER_SIZE 300   profile.active If true, skywalking agent will enable profile when user create a new profile task. Otherwise disable profile. SW_AGENT_PROFILE_ACTIVE true   profile.max_parallel Parallel monitor segment count SW_AGENT_PROFILE_MAX_PARALLEL 5   profile.max_accept_sub_parallel Max monitoring sub-tasks count of one single endpoint access SW_AGENT_PROFILE_MAX_ACCEPT_SUB_PARALLEL 5   profile.duration Max monitor segment time(minutes), if current segment monitor time out of limit, then stop it. SW_AGENT_PROFILE_DURATION 10   profile.dump_max_stack_depth Max dump thread stack depth SW_AGENT_PROFILE_DUMP_MAX_STACK_DEPTH 500   profile.snapshot_transport_buffer_size Snapshot transport to backend buffer size SW_AGENT_PROFILE_SNAPSHOT_TRANSPORT_BUFFER_SIZE 4500   meter.active If true, the agent collects and reports metrics to the backend. SW_METER_ACTIVE true   meter.report_interval Report meters interval. The unit is second SW_METER_REPORT_INTERVAL 20   meter.max_meter_size Max size of the meter pool SW_METER_MAX_METER_SIZE 500   log.max_message_size The max size of message to send to server.Default is 10 MB. SW_GRPC_LOG_MAX_MESSAGE_SIZE 10485760   plugin.mount Mount the specific folders of the plugins. Plugins in mounted folders would work. SW_MOUNT_FOLDERS plugins,activations   plugin.peer_max_length  Peer maximum description limit. SW_PLUGIN_PEER_MAX_LENGTH 200   plugin.exclude_plugins  Exclude some plugins define in plugins dir,Multiple plugins are separated by comma.Plugin names is defined in Agent plugin list SW_EXCLUDE_PLUGINS \u0026quot;\u0026quot;   plugin.mongodb.trace_param If true, trace all the parameters in MongoDB access, default is false. Only trace the operation, not include parameters. SW_PLUGIN_MONGODB_TRACE_PARAM false   plugin.mongodb.filter_length_limit If set to positive number, the WriteRequest.params would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_MONGODB_FILTER_LENGTH_LIMIT 256   plugin.elasticsearch.trace_dsl If true, trace all the DSL(Domain Specific Language) in ElasticSearch access, default is false. SW_PLUGIN_ELASTICSEARCH_TRACE_DSL false   plugin.springmvc.use_qualified_name_as_endpoint_name If true, the fully qualified method name will be used as the endpoint name instead of the request URL, default is false. SW_PLUGIN_SPRINGMVC_USE_QUALIFIED_NAME_AS_ENDPOINT_NAME false   plugin.toolkit.use_qualified_name_as_operation_name If true, the fully qualified method name will be used as the operation name instead of the given operation name, default is false. SW_PLUGIN_TOOLKIT_USE_QUALIFIED_NAME_AS_OPERATION_NAME false   plugin.jdbc.trace_sql_parameters If set to true, the parameters of the sql (typically java.sql.PreparedStatement) would be collected. SW_JDBC_TRACE_SQL_PARAMETERS false   plugin.jdbc.sql_parameters_max_length If set to positive number, the db.sql.parameters would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_JDBC_SQL_PARAMETERS_MAX_LENGTH 512   plugin.jdbc.sql_body_max_length If set to positive number, the db.statement would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_JDBC_SQL_BODY_MAX_LENGTH 2048   plugin.solrj.trace_statement If true, trace all the query parameters(include deleteByIds and deleteByQuery) in Solr query request, default is false. SW_PLUGIN_SOLRJ_TRACE_STATEMENT false   plugin.solrj.trace_ops_params If true, trace all the operation parameters in Solr request, default is false. SW_PLUGIN_SOLRJ_TRACE_OPS_PARAMS false   plugin.light4j.trace_handler_chain If true, trace all middleware/business handlers that are part of the Light4J handler chain for a request. SW_PLUGIN_LIGHT4J_TRACE_HANDLER_CHAIN false   plugin.springtransaction.simplify_transaction_definition_name If true, the transaction definition name will be simplified. SW_PLUGIN_SPRINGTRANSACTION_SIMPLIFY_TRANSACTION_DEFINITION_NAME false   plugin.jdkthreading.threading_class_prefixes Threading classes (java.lang.Runnable and java.util.concurrent.Callable) and their subclasses, including anonymous inner classes whose name match any one of the THREADING_CLASS_PREFIXES (splitted by ,) will be instrumented, make sure to only specify as narrow prefixes as what you\u0026rsquo;re expecting to instrument, (java. and javax. will be ignored due to safety issues) SW_PLUGIN_JDKTHREADING_THREADING_CLASS_PREFIXES Not set   plugin.tomcat.collect_http_params This config item controls that whether the Tomcat plugin should collect the parameters of the request. Also, activate implicitly in the profiled trace. SW_PLUGIN_TOMCAT_COLLECT_HTTP_PARAMS false   plugin.springmvc.collect_http_params This config item controls that whether the SpringMVC plugin should collect the parameters of the request, when your Spring application is based on Tomcat, consider only setting either plugin.tomcat.collect_http_params or plugin.springmvc.collect_http_params. Also, activate implicitly in the profiled trace. SW_PLUGIN_SPRINGMVC_COLLECT_HTTP_PARAMS false   plugin.httpclient.collect_http_params This config item controls that whether the HttpClient plugin should collect the parameters of the request SW_PLUGIN_HTTPCLIENT_COLLECT_HTTP_PARAMS false   plugin.http.http_params_length_threshold When COLLECT_HTTP_PARAMS is enabled, how many characters to keep and send to the OAP backend, use negative values to keep and send the complete parameters, NB. this config item is added for the sake of performance. SW_PLUGIN_HTTP_HTTP_PARAMS_LENGTH_THRESHOLD 1024   plugin.http.http_headers_length_threshold When include_http_headers declares header names, this threshold controls the length limitation of all header values. use negative values to keep and send the complete headers. Note. this config item is added for the sake of performance. SW_PLUGIN_HTTP_HTTP_HEADERS_LENGTH_THRESHOLD 2048   plugin.http.include_http_headers Set the header names, which should be collected by the plugin. Header name must follow javax.servlet.http definition. Multiple names should be split by comma. SW_PLUGIN_HTTP_INCLUDE_HTTP_HEADERS ``(No header would be collected) |   plugin.feign.collect_request_body This config item controls that whether the Feign plugin should collect the http body of the request. SW_PLUGIN_FEIGN_COLLECT_REQUEST_BODY false   plugin.feign.filter_length_limit When COLLECT_REQUEST_BODY is enabled, how many characters to keep and send to the OAP backend, use negative values to keep and send the complete body. SW_PLUGIN_FEIGN_FILTER_LENGTH_LIMIT 1024   plugin.feign.supported_content_types_prefix When COLLECT_REQUEST_BODY is enabled and content-type start with SUPPORTED_CONTENT_TYPES_PREFIX, collect the body of the request , multiple paths should be separated by , SW_PLUGIN_FEIGN_SUPPORTED_CONTENT_TYPES_PREFIX application/json,text/   plugin.influxdb.trace_influxql If true, trace all the influxql(query and write) in InfluxDB access, default is true. SW_PLUGIN_INFLUXDB_TRACE_INFLUXQL true   plugin.dubbo.collect_consumer_arguments Apache Dubbo consumer collect arguments in RPC call, use Object#toString to collect arguments. SW_PLUGIN_DUBBO_COLLECT_CONSUMER_ARGUMENTS false   plugin.dubbo.consumer_arguments_length_threshold When plugin.dubbo.collect_consumer_arguments is true, Arguments of length from the front will to the OAP backend SW_PLUGIN_DUBBO_CONSUMER_ARGUMENTS_LENGTH_THRESHOLD 256   plugin.dubbo.collect_provider_arguments Apache Dubbo provider collect arguments in RPC call, use Object#toString to collect arguments. SW_PLUGIN_DUBBO_COLLECT_PROVIDER_ARGUMENTS false   plugin.dubbo.provider_arguments_length_threshold When plugin.dubbo.collect_provider_arguments is true, Arguments of length from the front will to the OAP backend SW_PLUGIN_DUBBO_PROVIDER_ARGUMENTS_LENGTH_THRESHOLD 256   plugin.kafka.bootstrap_servers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_KAFKA_BOOTSTRAP_SERVERS localhost:9092   plugin.kafka.get_topic_timeout Timeout period of reading topics from the Kafka server, the unit is second. SW_GET_TOPIC_TIMEOUT 10   plugin.kafka.producer_config Kafka producer configuration. Read producer configure to get more details. Check Kafka report doc for more details and examples. SW_PLUGIN_KAFKA_PRODUCER_CONFIG    plugin.kafka.producer_config_json Configure Kafka Producer configuration in JSON format. Notice it will be overridden by plugin.kafka.producer_config[key], if the key duplication. SW_PLUGIN_KAFKA_PRODUCER_CONFIG_JSON    plugin.kafka.topic_meter Specify which Kafka topic name for Meter System data to report to. SW_PLUGIN_KAFKA_TOPIC_METER skywalking-meters   plugin.kafka.topic_metrics Specify which Kafka topic name for JVM metrics data to report to. SW_PLUGIN_KAFKA_TOPIC_METRICS skywalking-metrics   plugin.kafka.topic_segment Specify which Kafka topic name for traces data to report to. SW_PLUGIN_KAFKA_TOPIC_SEGMENT skywalking-segments   plugin.kafka.topic_profiling Specify which Kafka topic name for Thread Profiling snapshot to report to. SW_PLUGIN_KAFKA_TOPIC_PROFILINGS skywalking-profilings   plugin.kafka.topic_management Specify which Kafka topic name for the register or heartbeat data of Service Instance to report to. SW_PLUGIN_KAFKA_TOPIC_MANAGEMENT skywalking-managements   plugin.kafka.topic_logging Specify which Kafka topic name for the logging data to report to. SW_PLUGIN_KAFKA_TOPIC_LOGGING skywalking-logging   plugin.kafka.namespace isolate multi OAP server when using same Kafka cluster (final topic name will append namespace before Kafka topics with - ). SW_KAFKA_NAMESPACE `` |   plugin.kafka.decode_class Specify which class to decode encoded configuration of kafka.You can set encoded information in plugin.kafka.producer_config_json or plugin.kafka.producer_config if you need. SW_KAFKA_DECODE_CLASS `` |   plugin.springannotation.classname_match_regex Match spring beans with regular expression for the class name. Multiple expressions could be separated by a comma. This only works when Spring annotation plugin has been activated. SW_SPRINGANNOTATION_CLASSNAME_MATCH_REGEX All the spring beans tagged with @Bean,@Service,@Dao, or @Repository.   plugin.toolkit.log.transmit_formatted Whether or not to transmit logged data as formatted or un-formatted. SW_PLUGIN_TOOLKIT_LOG_TRANSMIT_FORMATTED true   plugin.lettuce.trace_redis_parameters If set to true, the parameters of Redis commands would be collected by Lettuce agent. SW_PLUGIN_LETTUCE_TRACE_REDIS_PARAMETERS false   plugin.lettuce.redis_parameter_max_length If set to positive number and plugin.lettuce.trace_redis_parameters is set to true, Redis command parameters would be collected and truncated to this length. SW_PLUGIN_LETTUCE_REDIS_PARAMETER_MAX_LENGTH 128   plugin.lettuce.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_LETTUCE_OPERATION_MAPPING_WRITE    plugin.lettuce.operation_mapping_read  Specify which command should be converted to read operation SW_PLUGIN_LETTUCE_OPERATION_MAPPING_READ Referenc Lettuce-5.x-plugin   plugin.jedis.trace_redis_parameters If set to true, the parameters of Redis commands would be collected by Jedis agent. SW_PLUGIN_JEDIS_TRACE_REDIS_PARAMETERS false   plugin.jedis.redis_parameter_max_length If set to positive number and plugin.jedis.trace_redis_parameters is set to true, Redis command parameters would be collected and truncated to this length. SW_PLUGIN_JEDIS_REDIS_PARAMETER_MAX_LENGTH 128   plugin.jedis.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_JEDIS_OPERATION_MAPPING_WRITE    plugin.jedis.operation_mapping_read  Specify which command should be converted to read operation SW_PLUGIN_JEDIS_OPERATION_MAPPING_READ Referenc Jedis-4.x-plugin jedis-2.x-3.x-plugin   plugin.redisson.trace_redis_parameters If set to true, the parameters of Redis commands would be collected by Redisson agent. SW_PLUGIN_REDISSON_TRACE_REDIS_PARAMETERS false   plugin.redisson.redis_parameter_max_length If set to positive number and plugin.redisson.trace_redis_parameters is set to true, Redis command parameters would be collected and truncated to this length. SW_PLUGIN_REDISSON_REDIS_PARAMETER_MAX_LENGTH 128   plugin.redisson.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_REDISSON_OPERATION_MAPPING_WRITE    plugin.redisson.operation_mapping_read  Specify which command should be converted to read operation SW_PLUGIN_REDISSON_OPERATION_MAPPING_READ Referenc Redisson-3.x-plugin   plugin.neo4j.trace_cypher_parameters If set to true, the parameters of the cypher would be collected. SW_PLUGIN_NEO4J_TRACE_CYPHER_PARAMETERS false   plugin.neo4j.cypher_parameters_max_length If set to positive number, the db.cypher.parameters would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_NEO4J_CYPHER_PARAMETERS_MAX_LENGTH 512   plugin.neo4j.cypher_body_max_length If set to positive number, the db.statement would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_NEO4J_CYPHER_BODY_MAX_LENGTH 2048   plugin.cpupolicy.sample_cpu_usage_percent_limit If set to a positive number and activate trace sampler CPU policy plugin, the trace would not be collected when agent process CPU usage percent is greater than plugin.cpupolicy.sample_cpu_usage_percent_limit. SW_SAMPLE_CPU_USAGE_PERCENT_LIMIT -1   plugin.micronauthttpclient.collect_http_params This config item controls that whether the Micronaut http client plugin should collect the parameters of the request. Also, activate implicitly in the profiled trace. SW_PLUGIN_MICRONAUTHTTPCLIENT_COLLECT_HTTP_PARAMS false   plugin.micronauthttpserver.collect_http_params This config item controls that whether the Micronaut http server plugin should collect the parameters of the request. Also, activate implicitly in the profiled trace. SW_PLUGIN_MICRONAUTHTTPSERVER_COLLECT_HTTP_PARAMS false   plugin.memcached.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_MEMCACHED_OPERATION_MAPPING_WRITE get,gets,getAndTouch,getKeys,getKeysWithExpiryCheck,getKeysNoDuplicateCheck   plugin.memcached.operation_mapping_read Specify which command should be converted to read operation SW_PLUGIN_MEMCACHED_OPERATION_MAPPING_READ set,add,replace,append,prepend,cas,delete,touch,incr,decr   plugin.ehcache.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_EHCACHE_OPERATION_MAPPING_WRITE get,getAll,getQuiet,getKeys,getKeysWithExpiryCheck,getKeysNoDuplicateCheck,releaseRead,tryRead,getWithLoader,getAll,loadAll,getAllWithLoader   plugin.ehcache.operation_mapping_read Specify which command should be converted to read operation SW_PLUGIN_EHCACHE_OPERATION_MAPPING_READ tryRemoveImmediately,remove,removeAndReturnElement,removeAll,removeQuiet,removeWithWriter,put,putAll,replace,removeQuiet,removeWithWriter,removeElement,removeAll,putWithWriter,putQuiet,putIfAbsent,putIfAbsent   plugin.guavacache.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_GUAVACACHE_OPERATION_MAPPING_WRITE getIfPresent,get,getAllPresent,size   plugin.guavacache.operation_mapping_read Specify which command should be converted to read operation SW_PLUGIN_GUAVACACHE_OPERATION_MAPPING_READ put,putAll,invalidate,invalidateAll,invalidateAll,cleanUp   plugin.nettyhttp.collect_request_body This config item controls that whether the Netty-http plugin should collect the http body of the request. SW_PLUGIN_NETTY_HTTP_COLLECT_REQUEST_BODY false   plugin.nettyhttp.filter_length_limit When COLLECT_REQUEST_BODY is enabled, how many characters to keep and send to the OAP backend, use negative values to keep and send the complete body. SW_PLUGIN_NETTY_HTTP_FILTER_LENGTH_LIMIT 1024   plugin.nettyhttp.supported_content_types_prefix When COLLECT_REQUEST_BODY is enabled and content-type start with HTTP_SUPPORTED_CONTENT_TYPES_PREFIX, collect the body of the request , multiple paths should be separated by , SW_PLUGIN_NETTY_HTTP_SUPPORTED_CONTENT_TYPES_PREFIX application/json,text/   plugin.rocketmqclient.collect_message_keys If set to true, the keys of messages would be collected by the plugin for RocketMQ Java client.     plugin.rocketmqclient.collect_message_tags If set to true, the tags of messages would be collected by the plugin for RocketMQ Java client.            Reset Collection/Map type configurations as empty collection.  Collection type config, e.g. using  plugin.kafka.topics= to override default plugin.kafka.topics=a,b,c,d Map type config, e.g. using plugin.kafka.producer_config[]= to override default plugin.kafka.producer_config[key]=value  Dynamic Configurations All configurations above are static, if you need to change some agent settings at runtime, please read CDS - Configuration Discovery Service document for more details.\n","excerpt":"Table of Agent Configuration Properties This is the properties list supported in …","ref":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/configurations/","title":"Table of Agent Configuration Properties"},{"body":"Table of Agent Configuration Properties This is the properties list supported in agent/config/agent.config.\n   property key Description System Environment Variable Default     agent.service_name The service name to represent a logic group providing the same capabilities/logic. Suggestion: set a unique name for every logic service group, service instance nodes share the same code, Max length is 50(UTF-8 char). Optional, once service_name follows \u0026lt;group name\u0026gt;::\u0026lt;logic name\u0026gt; format, OAP server assigns the group name to the service metadata. SW_AGENT_NAME Your_ApplicationName   agent.namespace Namespace represents a subnet, such as kubernetes namespace, or 172.10.. SW_AGENT_NAMESPACE Not set   agent.cluster Cluster defines the physical cluster in a data center or same network segment. SW_AGENT_CLUSTER Not set   agent.sample_n_per_3_secs Negative or zero means off, by default.SAMPLE_N_PER_3_SECS means sampling N TraceSegment in 3 seconds tops. SW_AGENT_SAMPLE Not set   agent.authentication Authentication active is based on backend setting, see application.yml for more details.For most scenarios, this needs backend extensions, only basic match auth provided in default implementation. SW_AGENT_AUTHENTICATION Not set   agent.trace_segment_ref_limit_per_span The max number of TraceSegmentRef in a single span to keep memory cost estimatable. SW_TRACE_SEGMENT_LIMIT 500   agent.span_limit_per_segment The max number of spans in a single segment. Through this config item, SkyWalking keep your application memory cost estimated. SW_AGENT_SPAN_LIMIT 300   agent.ignore_suffix If the operation name of the first span is included in this set, this segment should be ignored. SW_AGENT_IGNORE_SUFFIX Not set   agent.is_open_debugging_class If true, skywalking agent will save all instrumented classes files in /debugging folder. SkyWalking team may ask for these files in order to resolve compatible problem. SW_AGENT_OPEN_DEBUG Not set   agent.instance_name Instance name is the identity of an instance, should be unique in the service. If empty, SkyWalking agent will generate an 32-bit uuid. Default, use UUID@hostname as the instance name. Max length is 50(UTF-8 char) SW_AGENT_INSTANCE_NAME \u0026quot;\u0026quot;   agent.instance_properties_json={\u0026quot;key\u0026quot;:\u0026quot;value\u0026quot;} Add service instance custom properties in json format. SW_INSTANCE_PROPERTIES_JSON Not set   agent.cause_exception_depth How depth the agent goes, when log all cause exceptions. SW_AGENT_CAUSE_EXCEPTION_DEPTH 5   agent.force_reconnection_period  Force reconnection period of grpc, based on grpc_channel_check_interval. SW_AGENT_FORCE_RECONNECTION_PERIOD 1   agent.operation_name_threshold  The operationName max length, setting this value \u0026gt; 190 is not recommended. SW_AGENT_OPERATION_NAME_THRESHOLD 150   agent.keep_tracing Keep tracing even the backend is not available if this value is true. SW_AGENT_KEEP_TRACING false   agent.force_tls Force open TLS for gRPC channel if this value is true. SW_AGENT_FORCE_TLS false   agent.ssl_trusted_ca_path gRPC SSL trusted ca file. SW_AGENT_SSL_TRUSTED_CA_PATH /ca/ca.crt   agent.ssl_key_path The private key file. Enable mTLS when ssl_key_path and ssl_cert_chain_path exist. SW_AGENT_SSL_KEY_PATH \u0026quot;\u0026quot;   agent.ssl_cert_chain_path The certificate file. Enable mTLS when ssl_key_path and ssl_cert_chain_path exist. SW_AGENT_SSL_CERT_CHAIN_PATH \u0026quot;\u0026quot;   agent.enable Enable the agent kernel services and instrumentation. SW_AGENT_ENABLE true   osinfo.ipv4_list_size Limit the length of the ipv4 list size. SW_AGENT_OSINFO_IPV4_LIST_SIZE 10   collector.grpc_channel_check_interval grpc channel status check interval. SW_AGENT_COLLECTOR_GRPC_CHANNEL_CHECK_INTERVAL 30   collector.heartbeat_period agent heartbeat report period. Unit, second. SW_AGENT_COLLECTOR_HEARTBEAT_PERIOD 30   collector.properties_report_period_factor The agent sends the instance properties to the backend every collector.heartbeat_period * collector.properties_report_period_factor seconds SW_AGENT_COLLECTOR_PROPERTIES_REPORT_PERIOD_FACTOR 10   collector.backend_service Collector SkyWalking trace receiver service addresses. SW_AGENT_COLLECTOR_BACKEND_SERVICES 127.0.0.1:11800   collector.grpc_upstream_timeout How long grpc client will timeout in sending data to upstream. Unit is second. SW_AGENT_COLLECTOR_GRPC_UPSTREAM_TIMEOUT 30 seconds   collector.get_profile_task_interval Sniffer get profile task list interval. SW_AGENT_COLLECTOR_GET_PROFILE_TASK_INTERVAL 20   collector.get_agent_dynamic_config_interval Sniffer get agent dynamic config interval SW_AGENT_COLLECTOR_GET_AGENT_DYNAMIC_CONFIG_INTERVAL 20   collector.is_resolve_dns_periodically If true, skywalking agent will enable periodically resolving DNS to update receiver service addresses. SW_AGENT_COLLECTOR_IS_RESOLVE_DNS_PERIODICALLY false   logging.level Log level: TRACE, DEBUG, INFO, WARN, ERROR, OFF. Default is info. SW_LOGGING_LEVEL INFO   logging.file_name Log file name. SW_LOGGING_FILE_NAME skywalking-api.log   logging.output Log output. Default is FILE. Use CONSOLE means output to stdout. SW_LOGGING_OUTPUT FILE   logging.dir Log files directory. Default is blank string, means, use \u0026ldquo;{theSkywalkingAgentJarDir}/logs \u0026quot; to output logs. {theSkywalkingAgentJarDir} is the directory where the skywalking agent jar file is located SW_LOGGING_DIR \u0026quot;\u0026quot;   logging.resolver Logger resolver: PATTERN or JSON. The default is PATTERN, which uses logging.pattern to print traditional text logs. JSON resolver prints logs in JSON format. SW_LOGGING_RESOLVER PATTERN   logging.pattern  Logging format. There are all conversion specifiers: * %level means log level. * %timestamp means now of time with format yyyy-MM-dd HH:mm:ss:SSS.\n* %thread means name of current thread.\n* %msg means some message which user logged. * %class means SimpleName of TargetClass. * %throwable means a throwable which user called. * %agent_name means agent.service_name. Only apply to the PatternLogger. SW_LOGGING_PATTERN %level %timestamp %thread %class : %msg %throwable   logging.max_file_size The max size of log file. If the size is bigger than this, archive the current file, and write into a new file. SW_LOGGING_MAX_FILE_SIZE 300 * 1024 * 1024   logging.max_history_files The max history log files. When rollover happened, if log files exceed this number,then the oldest file will be delete. Negative or zero means off, by default. SW_LOGGING_MAX_HISTORY_FILES -1   statuscheck.ignored_exceptions Listed exceptions would not be treated as an error. Because in some codes, the exception is being used as a way of controlling business flow. SW_STATUSCHECK_IGNORED_EXCEPTIONS \u0026quot;\u0026quot;   statuscheck.max_recursive_depth The max recursive depth when checking the exception traced by the agent. Typically, we don\u0026rsquo;t recommend setting this more than 10, which could cause a performance issue. Negative value and 0 would be ignored, which means all exceptions would make the span tagged in error status. SW_STATUSCHECK_MAX_RECURSIVE_DEPTH 1   correlation.element_max_number Max element count in the correlation context. SW_CORRELATION_ELEMENT_MAX_NUMBER 3   correlation.value_max_length Max value length of each element. SW_CORRELATION_VALUE_MAX_LENGTH 128   correlation.auto_tag_keys Tag the span by the key/value in the correlation context, when the keys listed here exist. SW_CORRELATION_AUTO_TAG_KEYS \u0026quot;\u0026quot;   jvm.buffer_size The buffer size of collected JVM info. SW_JVM_BUFFER_SIZE 60 * 10   jvm.metrics_collect_period The period in seconds of JVM metrics collection. Unit is second. SW_JVM_METRICS_COLLECT_PERIOD 1   buffer.channel_size The buffer channel size. SW_BUFFER_CHANNEL_SIZE 5   buffer.buffer_size The buffer size. SW_BUFFER_BUFFER_SIZE 300   profile.active If true, skywalking agent will enable profile when user create a new profile task. Otherwise disable profile. SW_AGENT_PROFILE_ACTIVE true   profile.max_parallel Parallel monitor segment count SW_AGENT_PROFILE_MAX_PARALLEL 5   profile.max_accept_sub_parallel Max monitoring sub-tasks count of one single endpoint access SW_AGENT_PROFILE_MAX_ACCEPT_SUB_PARALLEL 5   profile.duration Max monitor segment time(minutes), if current segment monitor time out of limit, then stop it. SW_AGENT_PROFILE_DURATION 10   profile.dump_max_stack_depth Max dump thread stack depth SW_AGENT_PROFILE_DUMP_MAX_STACK_DEPTH 500   profile.snapshot_transport_buffer_size Snapshot transport to backend buffer size SW_AGENT_PROFILE_SNAPSHOT_TRANSPORT_BUFFER_SIZE 4500   meter.active If true, the agent collects and reports metrics to the backend. SW_METER_ACTIVE true   meter.report_interval Report meters interval. The unit is second SW_METER_REPORT_INTERVAL 20   meter.max_meter_size Max size of the meter pool SW_METER_MAX_METER_SIZE 500   log.max_message_size The max size of message to send to server.Default is 10 MB. SW_GRPC_LOG_MAX_MESSAGE_SIZE 10485760   plugin.mount Mount the specific folders of the plugins. Plugins in mounted folders would work. SW_MOUNT_FOLDERS plugins,activations   plugin.peer_max_length  Peer maximum description limit. SW_PLUGIN_PEER_MAX_LENGTH 200   plugin.exclude_plugins  Exclude some plugins define in plugins dir,Multiple plugins are separated by comma.Plugin names is defined in Agent plugin list SW_EXCLUDE_PLUGINS \u0026quot;\u0026quot;   plugin.mongodb.trace_param If true, trace all the parameters in MongoDB access, default is false. Only trace the operation, not include parameters. SW_PLUGIN_MONGODB_TRACE_PARAM false   plugin.mongodb.filter_length_limit If set to positive number, the WriteRequest.params would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_MONGODB_FILTER_LENGTH_LIMIT 256   plugin.elasticsearch.trace_dsl If true, trace all the DSL(Domain Specific Language) in ElasticSearch access, default is false. SW_PLUGIN_ELASTICSEARCH_TRACE_DSL false   plugin.springmvc.use_qualified_name_as_endpoint_name If true, the fully qualified method name will be used as the endpoint name instead of the request URL, default is false. SW_PLUGIN_SPRINGMVC_USE_QUALIFIED_NAME_AS_ENDPOINT_NAME false   plugin.toolkit.use_qualified_name_as_operation_name If true, the fully qualified method name will be used as the operation name instead of the given operation name, default is false. SW_PLUGIN_TOOLKIT_USE_QUALIFIED_NAME_AS_OPERATION_NAME false   plugin.jdbc.trace_sql_parameters If set to true, the parameters of the sql (typically java.sql.PreparedStatement) would be collected. SW_JDBC_TRACE_SQL_PARAMETERS false   plugin.jdbc.sql_parameters_max_length If set to positive number, the db.sql.parameters would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_JDBC_SQL_PARAMETERS_MAX_LENGTH 512   plugin.jdbc.sql_body_max_length If set to positive number, the db.statement would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_JDBC_SQL_BODY_MAX_LENGTH 2048   plugin.solrj.trace_statement If true, trace all the query parameters(include deleteByIds and deleteByQuery) in Solr query request, default is false. SW_PLUGIN_SOLRJ_TRACE_STATEMENT false   plugin.solrj.trace_ops_params If true, trace all the operation parameters in Solr request, default is false. SW_PLUGIN_SOLRJ_TRACE_OPS_PARAMS false   plugin.light4j.trace_handler_chain If true, trace all middleware/business handlers that are part of the Light4J handler chain for a request. SW_PLUGIN_LIGHT4J_TRACE_HANDLER_CHAIN false   plugin.springtransaction.simplify_transaction_definition_name If true, the transaction definition name will be simplified. SW_PLUGIN_SPRINGTRANSACTION_SIMPLIFY_TRANSACTION_DEFINITION_NAME false   plugin.jdkthreading.threading_class_prefixes Threading classes (java.lang.Runnable and java.util.concurrent.Callable) and their subclasses, including anonymous inner classes whose name match any one of the THREADING_CLASS_PREFIXES (splitted by ,) will be instrumented, make sure to only specify as narrow prefixes as what you\u0026rsquo;re expecting to instrument, (java. and javax. will be ignored due to safety issues) SW_PLUGIN_JDKTHREADING_THREADING_CLASS_PREFIXES Not set   plugin.tomcat.collect_http_params This config item controls that whether the Tomcat plugin should collect the parameters of the request. Also, activate implicitly in the profiled trace. SW_PLUGIN_TOMCAT_COLLECT_HTTP_PARAMS false   plugin.springmvc.collect_http_params This config item controls that whether the SpringMVC plugin should collect the parameters of the request, when your Spring application is based on Tomcat, consider only setting either plugin.tomcat.collect_http_params or plugin.springmvc.collect_http_params. Also, activate implicitly in the profiled trace. SW_PLUGIN_SPRINGMVC_COLLECT_HTTP_PARAMS false   plugin.httpclient.collect_http_params This config item controls that whether the HttpClient plugin should collect the parameters of the request SW_PLUGIN_HTTPCLIENT_COLLECT_HTTP_PARAMS false   plugin.http.http_params_length_threshold When COLLECT_HTTP_PARAMS is enabled, how many characters to keep and send to the OAP backend, use negative values to keep and send the complete parameters, NB. this config item is added for the sake of performance. SW_PLUGIN_HTTP_HTTP_PARAMS_LENGTH_THRESHOLD 1024   plugin.http.http_headers_length_threshold When include_http_headers declares header names, this threshold controls the length limitation of all header values. use negative values to keep and send the complete headers. Note. this config item is added for the sake of performance. SW_PLUGIN_HTTP_HTTP_HEADERS_LENGTH_THRESHOLD 2048   plugin.http.include_http_headers Set the header names, which should be collected by the plugin. Header name must follow javax.servlet.http definition. Multiple names should be split by comma. SW_PLUGIN_HTTP_INCLUDE_HTTP_HEADERS ``(No header would be collected) |   plugin.feign.collect_request_body This config item controls that whether the Feign plugin should collect the http body of the request. SW_PLUGIN_FEIGN_COLLECT_REQUEST_BODY false   plugin.feign.filter_length_limit When COLLECT_REQUEST_BODY is enabled, how many characters to keep and send to the OAP backend, use negative values to keep and send the complete body. SW_PLUGIN_FEIGN_FILTER_LENGTH_LIMIT 1024   plugin.feign.supported_content_types_prefix When COLLECT_REQUEST_BODY is enabled and content-type start with SUPPORTED_CONTENT_TYPES_PREFIX, collect the body of the request , multiple paths should be separated by , SW_PLUGIN_FEIGN_SUPPORTED_CONTENT_TYPES_PREFIX application/json,text/   plugin.influxdb.trace_influxql If true, trace all the influxql(query and write) in InfluxDB access, default is true. SW_PLUGIN_INFLUXDB_TRACE_INFLUXQL true   plugin.dubbo.collect_consumer_arguments Apache Dubbo consumer collect arguments in RPC call, use Object#toString to collect arguments. SW_PLUGIN_DUBBO_COLLECT_CONSUMER_ARGUMENTS false   plugin.dubbo.consumer_arguments_length_threshold When plugin.dubbo.collect_consumer_arguments is true, Arguments of length from the front will to the OAP backend SW_PLUGIN_DUBBO_CONSUMER_ARGUMENTS_LENGTH_THRESHOLD 256   plugin.dubbo.collect_provider_arguments Apache Dubbo provider collect arguments in RPC call, use Object#toString to collect arguments. SW_PLUGIN_DUBBO_COLLECT_PROVIDER_ARGUMENTS false   plugin.dubbo.provider_arguments_length_threshold When plugin.dubbo.collect_provider_arguments is true, Arguments of length from the front will to the OAP backend SW_PLUGIN_DUBBO_PROVIDER_ARGUMENTS_LENGTH_THRESHOLD 256   plugin.kafka.bootstrap_servers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_KAFKA_BOOTSTRAP_SERVERS localhost:9092   plugin.kafka.get_topic_timeout Timeout period of reading topics from the Kafka server, the unit is second. SW_GET_TOPIC_TIMEOUT 10   plugin.kafka.producer_config Kafka producer configuration. Read producer configure to get more details. Check Kafka report doc for more details and examples. SW_PLUGIN_KAFKA_PRODUCER_CONFIG    plugin.kafka.producer_config_json Configure Kafka Producer configuration in JSON format. Notice it will be overridden by plugin.kafka.producer_config[key], if the key duplication. SW_PLUGIN_KAFKA_PRODUCER_CONFIG_JSON    plugin.kafka.topic_meter Specify which Kafka topic name for Meter System data to report to. SW_PLUGIN_KAFKA_TOPIC_METER skywalking-meters   plugin.kafka.topic_metrics Specify which Kafka topic name for JVM metrics data to report to. SW_PLUGIN_KAFKA_TOPIC_METRICS skywalking-metrics   plugin.kafka.topic_segment Specify which Kafka topic name for traces data to report to. SW_PLUGIN_KAFKA_TOPIC_SEGMENT skywalking-segments   plugin.kafka.topic_profiling Specify which Kafka topic name for Thread Profiling snapshot to report to. SW_PLUGIN_KAFKA_TOPIC_PROFILINGS skywalking-profilings   plugin.kafka.topic_management Specify which Kafka topic name for the register or heartbeat data of Service Instance to report to. SW_PLUGIN_KAFKA_TOPIC_MANAGEMENT skywalking-managements   plugin.kafka.topic_logging Specify which Kafka topic name for the logging data to report to. SW_PLUGIN_KAFKA_TOPIC_LOGGING skywalking-logging   plugin.kafka.namespace isolate multi OAP server when using same Kafka cluster (final topic name will append namespace before Kafka topics with - ). SW_KAFKA_NAMESPACE `` |   plugin.kafka.decode_class Specify which class to decode encoded configuration of kafka.You can set encoded information in plugin.kafka.producer_config_json or plugin.kafka.producer_config if you need. SW_KAFKA_DECODE_CLASS `` |   plugin.springannotation.classname_match_regex Match spring beans with regular expression for the class name. Multiple expressions could be separated by a comma. This only works when Spring annotation plugin has been activated. SW_SPRINGANNOTATION_CLASSNAME_MATCH_REGEX All the spring beans tagged with @Bean,@Service,@Dao, or @Repository.   plugin.toolkit.log.transmit_formatted Whether or not to transmit logged data as formatted or un-formatted. SW_PLUGIN_TOOLKIT_LOG_TRANSMIT_FORMATTED true   plugin.lettuce.trace_redis_parameters If set to true, the parameters of Redis commands would be collected by Lettuce agent. SW_PLUGIN_LETTUCE_TRACE_REDIS_PARAMETERS false   plugin.lettuce.redis_parameter_max_length If set to positive number and plugin.lettuce.trace_redis_parameters is set to true, Redis command parameters would be collected and truncated to this length. SW_PLUGIN_LETTUCE_REDIS_PARAMETER_MAX_LENGTH 128   plugin.lettuce.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_LETTUCE_OPERATION_MAPPING_WRITE    plugin.lettuce.operation_mapping_read  Specify which command should be converted to read operation SW_PLUGIN_LETTUCE_OPERATION_MAPPING_READ Referenc Lettuce-5.x-plugin   plugin.jedis.trace_redis_parameters If set to true, the parameters of Redis commands would be collected by Jedis agent. SW_PLUGIN_JEDIS_TRACE_REDIS_PARAMETERS false   plugin.jedis.redis_parameter_max_length If set to positive number and plugin.jedis.trace_redis_parameters is set to true, Redis command parameters would be collected and truncated to this length. SW_PLUGIN_JEDIS_REDIS_PARAMETER_MAX_LENGTH 128   plugin.jedis.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_JEDIS_OPERATION_MAPPING_WRITE    plugin.jedis.operation_mapping_read  Specify which command should be converted to read operation SW_PLUGIN_JEDIS_OPERATION_MAPPING_READ Referenc Jedis-4.x-plugin jedis-2.x-3.x-plugin   plugin.redisson.trace_redis_parameters If set to true, the parameters of Redis commands would be collected by Redisson agent. SW_PLUGIN_REDISSON_TRACE_REDIS_PARAMETERS false   plugin.redisson.redis_parameter_max_length If set to positive number and plugin.redisson.trace_redis_parameters is set to true, Redis command parameters would be collected and truncated to this length. SW_PLUGIN_REDISSON_REDIS_PARAMETER_MAX_LENGTH 128   plugin.redisson.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_REDISSON_OPERATION_MAPPING_WRITE    plugin.redisson.operation_mapping_read  Specify which command should be converted to read operation SW_PLUGIN_REDISSON_OPERATION_MAPPING_READ Referenc Redisson-3.x-plugin   plugin.neo4j.trace_cypher_parameters If set to true, the parameters of the cypher would be collected. SW_PLUGIN_NEO4J_TRACE_CYPHER_PARAMETERS false   plugin.neo4j.cypher_parameters_max_length If set to positive number, the db.cypher.parameters would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_NEO4J_CYPHER_PARAMETERS_MAX_LENGTH 512   plugin.neo4j.cypher_body_max_length If set to positive number, the db.statement would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_NEO4J_CYPHER_BODY_MAX_LENGTH 2048   plugin.cpupolicy.sample_cpu_usage_percent_limit If set to a positive number and activate trace sampler CPU policy plugin, the trace would not be collected when agent process CPU usage percent is greater than plugin.cpupolicy.sample_cpu_usage_percent_limit. SW_SAMPLE_CPU_USAGE_PERCENT_LIMIT -1   plugin.micronauthttpclient.collect_http_params This config item controls that whether the Micronaut http client plugin should collect the parameters of the request. Also, activate implicitly in the profiled trace. SW_PLUGIN_MICRONAUTHTTPCLIENT_COLLECT_HTTP_PARAMS false   plugin.micronauthttpserver.collect_http_params This config item controls that whether the Micronaut http server plugin should collect the parameters of the request. Also, activate implicitly in the profiled trace. SW_PLUGIN_MICRONAUTHTTPSERVER_COLLECT_HTTP_PARAMS false   plugin.memcached.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_MEMCACHED_OPERATION_MAPPING_WRITE get,gets,getAndTouch,getKeys,getKeysWithExpiryCheck,getKeysNoDuplicateCheck   plugin.memcached.operation_mapping_read Specify which command should be converted to read operation SW_PLUGIN_MEMCACHED_OPERATION_MAPPING_READ set,add,replace,append,prepend,cas,delete,touch,incr,decr   plugin.ehcache.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_EHCACHE_OPERATION_MAPPING_WRITE get,getAll,getQuiet,getKeys,getKeysWithExpiryCheck,getKeysNoDuplicateCheck,releaseRead,tryRead,getWithLoader,getAll,loadAll,getAllWithLoader   plugin.ehcache.operation_mapping_read Specify which command should be converted to read operation SW_PLUGIN_EHCACHE_OPERATION_MAPPING_READ tryRemoveImmediately,remove,removeAndReturnElement,removeAll,removeQuiet,removeWithWriter,put,putAll,replace,removeQuiet,removeWithWriter,removeElement,removeAll,putWithWriter,putQuiet,putIfAbsent,putIfAbsent   plugin.guavacache.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_GUAVACACHE_OPERATION_MAPPING_WRITE getIfPresent,get,getAllPresent,size   plugin.guavacache.operation_mapping_read Specify which command should be converted to read operation SW_PLUGIN_GUAVACACHE_OPERATION_MAPPING_READ put,putAll,invalidate,invalidateAll,invalidateAll,cleanUp   plugin.nettyhttp.collect_request_body This config item controls that whether the Netty-http plugin should collect the http body of the request. SW_PLUGIN_NETTY_HTTP_COLLECT_REQUEST_BODY false   plugin.nettyhttp.filter_length_limit When COLLECT_REQUEST_BODY is enabled, how many characters to keep and send to the OAP backend, use negative values to keep and send the complete body. SW_PLUGIN_NETTY_HTTP_FILTER_LENGTH_LIMIT 1024   plugin.nettyhttp.supported_content_types_prefix When COLLECT_REQUEST_BODY is enabled and content-type start with HTTP_SUPPORTED_CONTENT_TYPES_PREFIX, collect the body of the request , multiple paths should be separated by , SW_PLUGIN_NETTY_HTTP_SUPPORTED_CONTENT_TYPES_PREFIX application/json,text/   plugin.rocketmqclient.collect_message_keys If set to true, the keys of messages would be collected by the plugin for RocketMQ Java client.     plugin.rocketmqclient.collect_message_tags If set to true, the tags of messages would be collected by the plugin for RocketMQ Java client.            Reset Collection/Map type configurations as empty collection.  Collection type config, e.g. using  plugin.kafka.topics= to override default plugin.kafka.topics=a,b,c,d Map type config, e.g. using plugin.kafka.producer_config[]= to override default plugin.kafka.producer_config[key]=value  Dynamic Configurations All configurations above are static, if you need to change some agent settings at runtime, please read CDS - Configuration Discovery Service document for more details.\n","excerpt":"Table of Agent Configuration Properties This is the properties list supported in …","ref":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/configurations/","title":"Table of Agent Configuration Properties"},{"body":"Table of Agent Configuration Properties This is the properties list supported in agent/config/agent.config.\n   property key Description System Environment Variable Default     agent.service_name The service name to represent a logic group providing the same capabilities/logic. Suggestion: set a unique name for every logic service group, service instance nodes share the same code, Max length is 50(UTF-8 char). Optional, once service_name follows \u0026lt;group name\u0026gt;::\u0026lt;logic name\u0026gt; format, OAP server assigns the group name to the service metadata. SW_AGENT_NAME Your_ApplicationName   agent.namespace Namespace represents a subnet, such as kubernetes namespace, or 172.10.. SW_AGENT_NAMESPACE Not set   agent.cluster Cluster defines the physical cluster in a data center or same network segment. SW_AGENT_CLUSTER Not set   agent.sample_n_per_3_secs Negative or zero means off, by default.SAMPLE_N_PER_3_SECS means sampling N TraceSegment in 3 seconds tops. SW_AGENT_SAMPLE Not set   agent.authentication Authentication active is based on backend setting, see application.yml for more details.For most scenarios, this needs backend extensions, only basic match auth provided in default implementation. SW_AGENT_AUTHENTICATION Not set   agent.trace_segment_ref_limit_per_span The max number of TraceSegmentRef in a single span to keep memory cost estimatable. SW_TRACE_SEGMENT_LIMIT 500   agent.span_limit_per_segment The max number of spans in a single segment. Through this config item, SkyWalking keep your application memory cost estimated. SW_AGENT_SPAN_LIMIT 300   agent.ignore_suffix If the operation name of the first span is included in this set, this segment should be ignored. SW_AGENT_IGNORE_SUFFIX Not set   agent.is_open_debugging_class If true, skywalking agent will save all instrumented classes files in /debugging folder. SkyWalking team may ask for these files in order to resolve compatible problem. SW_AGENT_OPEN_DEBUG Not set   agent.instance_name Instance name is the identity of an instance, should be unique in the service. If empty, SkyWalking agent will generate an 32-bit uuid. Default, use UUID@hostname as the instance name. Max length is 50(UTF-8 char) SW_AGENT_INSTANCE_NAME \u0026quot;\u0026quot;   agent.instance_properties_json={\u0026quot;key\u0026quot;:\u0026quot;value\u0026quot;} Add service instance custom properties in json format. SW_INSTANCE_PROPERTIES_JSON Not set   agent.cause_exception_depth How depth the agent goes, when log all cause exceptions. SW_AGENT_CAUSE_EXCEPTION_DEPTH 5   agent.force_reconnection_period  Force reconnection period of grpc, based on grpc_channel_check_interval. SW_AGENT_FORCE_RECONNECTION_PERIOD 1   agent.operation_name_threshold  The operationName max length, setting this value \u0026gt; 190 is not recommended. SW_AGENT_OPERATION_NAME_THRESHOLD 150   agent.keep_tracing Keep tracing even the backend is not available if this value is true. SW_AGENT_KEEP_TRACING false   agent.force_tls Force open TLS for gRPC channel if this value is true. SW_AGENT_FORCE_TLS false   agent.ssl_trusted_ca_path gRPC SSL trusted ca file. SW_AGENT_SSL_TRUSTED_CA_PATH /ca/ca.crt   agent.ssl_key_path The private key file. Enable mTLS when ssl_key_path and ssl_cert_chain_path exist. SW_AGENT_SSL_KEY_PATH \u0026quot;\u0026quot;   agent.ssl_cert_chain_path The certificate file. Enable mTLS when ssl_key_path and ssl_cert_chain_path exist. SW_AGENT_SSL_CERT_CHAIN_PATH \u0026quot;\u0026quot;   agent.enable Enable the agent kernel services and instrumentation. SW_AGENT_ENABLE true   osinfo.ipv4_list_size Limit the length of the ipv4 list size. SW_AGENT_OSINFO_IPV4_LIST_SIZE 10   collector.grpc_channel_check_interval grpc channel status check interval. SW_AGENT_COLLECTOR_GRPC_CHANNEL_CHECK_INTERVAL 30   collector.heartbeat_period agent heartbeat report period. Unit, second. SW_AGENT_COLLECTOR_HEARTBEAT_PERIOD 30   collector.properties_report_period_factor The agent sends the instance properties to the backend every collector.heartbeat_period * collector.properties_report_period_factor seconds SW_AGENT_COLLECTOR_PROPERTIES_REPORT_PERIOD_FACTOR 10   collector.backend_service Collector SkyWalking trace receiver service addresses. SW_AGENT_COLLECTOR_BACKEND_SERVICES 127.0.0.1:11800   collector.grpc_upstream_timeout How long grpc client will timeout in sending data to upstream. Unit is second. SW_AGENT_COLLECTOR_GRPC_UPSTREAM_TIMEOUT 30 seconds   collector.get_profile_task_interval Sniffer get profile task list interval. SW_AGENT_COLLECTOR_GET_PROFILE_TASK_INTERVAL 20   collector.get_agent_dynamic_config_interval Sniffer get agent dynamic config interval SW_AGENT_COLLECTOR_GET_AGENT_DYNAMIC_CONFIG_INTERVAL 20   collector.is_resolve_dns_periodically If true, skywalking agent will enable periodically resolving DNS to update receiver service addresses. SW_AGENT_COLLECTOR_IS_RESOLVE_DNS_PERIODICALLY false   logging.level Log level: TRACE, DEBUG, INFO, WARN, ERROR, OFF. Default is info. SW_LOGGING_LEVEL INFO   logging.file_name Log file name. SW_LOGGING_FILE_NAME skywalking-api.log   logging.output Log output. Default is FILE. Use CONSOLE means output to stdout. SW_LOGGING_OUTPUT FILE   logging.dir Log files directory. Default is blank string, means, use \u0026ldquo;{theSkywalkingAgentJarDir}/logs \u0026quot; to output logs. {theSkywalkingAgentJarDir} is the directory where the skywalking agent jar file is located SW_LOGGING_DIR \u0026quot;\u0026quot;   logging.resolver Logger resolver: PATTERN or JSON. The default is PATTERN, which uses logging.pattern to print traditional text logs. JSON resolver prints logs in JSON format. SW_LOGGING_RESOLVER PATTERN   logging.pattern  Logging format. There are all conversion specifiers: * %level means log level. * %timestamp means now of time with format yyyy-MM-dd HH:mm:ss:SSS.\n* %thread means name of current thread.\n* %msg means some message which user logged. * %class means SimpleName of TargetClass. * %throwable means a throwable which user called. * %agent_name means agent.service_name. Only apply to the PatternLogger. SW_LOGGING_PATTERN %level %timestamp %thread %class : %msg %throwable   logging.max_file_size The max size of log file. If the size is bigger than this, archive the current file, and write into a new file. SW_LOGGING_MAX_FILE_SIZE 300 * 1024 * 1024   logging.max_history_files The max history log files. When rollover happened, if log files exceed this number,then the oldest file will be delete. Negative or zero means off, by default. SW_LOGGING_MAX_HISTORY_FILES -1   statuscheck.ignored_exceptions Listed exceptions would not be treated as an error. Because in some codes, the exception is being used as a way of controlling business flow. SW_STATUSCHECK_IGNORED_EXCEPTIONS \u0026quot;\u0026quot;   statuscheck.max_recursive_depth The max recursive depth when checking the exception traced by the agent. Typically, we don\u0026rsquo;t recommend setting this more than 10, which could cause a performance issue. Negative value and 0 would be ignored, which means all exceptions would make the span tagged in error status. SW_STATUSCHECK_MAX_RECURSIVE_DEPTH 1   correlation.element_max_number Max element count in the correlation context. SW_CORRELATION_ELEMENT_MAX_NUMBER 3   correlation.value_max_length Max value length of each element. SW_CORRELATION_VALUE_MAX_LENGTH 128   correlation.auto_tag_keys Tag the span by the key/value in the correlation context, when the keys listed here exist. SW_CORRELATION_AUTO_TAG_KEYS \u0026quot;\u0026quot;   jvm.buffer_size The buffer size of collected JVM info. SW_JVM_BUFFER_SIZE 60 * 10   jvm.metrics_collect_period The period in seconds of JVM metrics collection. Unit is second. SW_JVM_METRICS_COLLECT_PERIOD 1   buffer.channel_size The buffer channel size. SW_BUFFER_CHANNEL_SIZE 5   buffer.buffer_size The buffer size. SW_BUFFER_BUFFER_SIZE 300   profile.active If true, skywalking agent will enable profile when user create a new profile task. Otherwise disable profile. SW_AGENT_PROFILE_ACTIVE true   profile.max_parallel Parallel monitor segment count SW_AGENT_PROFILE_MAX_PARALLEL 5   profile.max_accept_sub_parallel Max monitoring sub-tasks count of one single endpoint access SW_AGENT_PROFILE_MAX_ACCEPT_SUB_PARALLEL 5   profile.duration Max monitor segment time(minutes), if current segment monitor time out of limit, then stop it. SW_AGENT_PROFILE_DURATION 10   profile.dump_max_stack_depth Max dump thread stack depth SW_AGENT_PROFILE_DUMP_MAX_STACK_DEPTH 500   profile.snapshot_transport_buffer_size Snapshot transport to backend buffer size SW_AGENT_PROFILE_SNAPSHOT_TRANSPORT_BUFFER_SIZE 4500   meter.active If true, the agent collects and reports metrics to the backend. SW_METER_ACTIVE true   meter.report_interval Report meters interval. The unit is second SW_METER_REPORT_INTERVAL 20   meter.max_meter_size Max size of the meter pool SW_METER_MAX_METER_SIZE 500   log.max_message_size The max size of message to send to server.Default is 10 MB. SW_GRPC_LOG_MAX_MESSAGE_SIZE 10485760   plugin.mount Mount the specific folders of the plugins. Plugins in mounted folders would work. SW_MOUNT_FOLDERS plugins,activations   plugin.peer_max_length  Peer maximum description limit. SW_PLUGIN_PEER_MAX_LENGTH 200   plugin.exclude_plugins  Exclude some plugins define in plugins dir,Multiple plugins are separated by comma.Plugin names is defined in Agent plugin list SW_EXCLUDE_PLUGINS \u0026quot;\u0026quot;   plugin.mongodb.trace_param If true, trace all the parameters in MongoDB access, default is false. Only trace the operation, not include parameters. SW_PLUGIN_MONGODB_TRACE_PARAM false   plugin.mongodb.filter_length_limit If set to positive number, the WriteRequest.params would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_MONGODB_FILTER_LENGTH_LIMIT 256   plugin.elasticsearch.trace_dsl If true, trace all the DSL(Domain Specific Language) in ElasticSearch access, default is false. SW_PLUGIN_ELASTICSEARCH_TRACE_DSL false   plugin.springmvc.use_qualified_name_as_endpoint_name If true, the fully qualified method name will be used as the endpoint name instead of the request URL, default is false. SW_PLUGIN_SPRINGMVC_USE_QUALIFIED_NAME_AS_ENDPOINT_NAME false   plugin.toolkit.use_qualified_name_as_operation_name If true, the fully qualified method name will be used as the operation name instead of the given operation name, default is false. SW_PLUGIN_TOOLKIT_USE_QUALIFIED_NAME_AS_OPERATION_NAME false   plugin.jdbc.trace_sql_parameters If set to true, the parameters of the sql (typically java.sql.PreparedStatement) would be collected. SW_JDBC_TRACE_SQL_PARAMETERS false   plugin.jdbc.sql_parameters_max_length If set to positive number, the db.sql.parameters would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_JDBC_SQL_PARAMETERS_MAX_LENGTH 512   plugin.jdbc.sql_body_max_length If set to positive number, the db.statement would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_JDBC_SQL_BODY_MAX_LENGTH 2048   plugin.solrj.trace_statement If true, trace all the query parameters(include deleteByIds and deleteByQuery) in Solr query request, default is false. SW_PLUGIN_SOLRJ_TRACE_STATEMENT false   plugin.solrj.trace_ops_params If true, trace all the operation parameters in Solr request, default is false. SW_PLUGIN_SOLRJ_TRACE_OPS_PARAMS false   plugin.light4j.trace_handler_chain If true, trace all middleware/business handlers that are part of the Light4J handler chain for a request. SW_PLUGIN_LIGHT4J_TRACE_HANDLER_CHAIN false   plugin.springtransaction.simplify_transaction_definition_name If true, the transaction definition name will be simplified. SW_PLUGIN_SPRINGTRANSACTION_SIMPLIFY_TRANSACTION_DEFINITION_NAME false   plugin.jdkthreading.threading_class_prefixes Threading classes (java.lang.Runnable and java.util.concurrent.Callable) and their subclasses, including anonymous inner classes whose name match any one of the THREADING_CLASS_PREFIXES (splitted by ,) will be instrumented, make sure to only specify as narrow prefixes as what you\u0026rsquo;re expecting to instrument, (java. and javax. will be ignored due to safety issues) SW_PLUGIN_JDKTHREADING_THREADING_CLASS_PREFIXES Not set   plugin.tomcat.collect_http_params This config item controls that whether the Tomcat plugin should collect the parameters of the request. Also, activate implicitly in the profiled trace. SW_PLUGIN_TOMCAT_COLLECT_HTTP_PARAMS false   plugin.springmvc.collect_http_params This config item controls that whether the SpringMVC plugin should collect the parameters of the request, when your Spring application is based on Tomcat, consider only setting either plugin.tomcat.collect_http_params or plugin.springmvc.collect_http_params. Also, activate implicitly in the profiled trace. SW_PLUGIN_SPRINGMVC_COLLECT_HTTP_PARAMS false   plugin.httpclient.collect_http_params This config item controls that whether the HttpClient plugin should collect the parameters of the request SW_PLUGIN_HTTPCLIENT_COLLECT_HTTP_PARAMS false   plugin.http.http_params_length_threshold When COLLECT_HTTP_PARAMS is enabled, how many characters to keep and send to the OAP backend, use negative values to keep and send the complete parameters, NB. this config item is added for the sake of performance. SW_PLUGIN_HTTP_HTTP_PARAMS_LENGTH_THRESHOLD 1024   plugin.http.http_headers_length_threshold When include_http_headers declares header names, this threshold controls the length limitation of all header values. use negative values to keep and send the complete headers. Note. this config item is added for the sake of performance. SW_PLUGIN_HTTP_HTTP_HEADERS_LENGTH_THRESHOLD 2048   plugin.http.include_http_headers Set the header names, which should be collected by the plugin. Header name must follow javax.servlet.http definition. Multiple names should be split by comma. SW_PLUGIN_HTTP_INCLUDE_HTTP_HEADERS ``(No header would be collected) |   plugin.feign.collect_request_body This config item controls that whether the Feign plugin should collect the http body of the request. SW_PLUGIN_FEIGN_COLLECT_REQUEST_BODY false   plugin.feign.filter_length_limit When COLLECT_REQUEST_BODY is enabled, how many characters to keep and send to the OAP backend, use negative values to keep and send the complete body. SW_PLUGIN_FEIGN_FILTER_LENGTH_LIMIT 1024   plugin.feign.supported_content_types_prefix When COLLECT_REQUEST_BODY is enabled and content-type start with SUPPORTED_CONTENT_TYPES_PREFIX, collect the body of the request , multiple paths should be separated by , SW_PLUGIN_FEIGN_SUPPORTED_CONTENT_TYPES_PREFIX application/json,text/   plugin.influxdb.trace_influxql If true, trace all the influxql(query and write) in InfluxDB access, default is true. SW_PLUGIN_INFLUXDB_TRACE_INFLUXQL true   plugin.dubbo.collect_consumer_arguments Apache Dubbo consumer collect arguments in RPC call, use Object#toString to collect arguments. SW_PLUGIN_DUBBO_COLLECT_CONSUMER_ARGUMENTS false   plugin.dubbo.consumer_arguments_length_threshold When plugin.dubbo.collect_consumer_arguments is true, Arguments of length from the front will to the OAP backend SW_PLUGIN_DUBBO_CONSUMER_ARGUMENTS_LENGTH_THRESHOLD 256   plugin.dubbo.collect_provider_arguments Apache Dubbo provider collect arguments in RPC call, use Object#toString to collect arguments. SW_PLUGIN_DUBBO_COLLECT_PROVIDER_ARGUMENTS false   plugin.dubbo.provider_arguments_length_threshold When plugin.dubbo.collect_provider_arguments is true, Arguments of length from the front will to the OAP backend SW_PLUGIN_DUBBO_PROVIDER_ARGUMENTS_LENGTH_THRESHOLD 256   plugin.kafka.bootstrap_servers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_KAFKA_BOOTSTRAP_SERVERS localhost:9092   plugin.kafka.get_topic_timeout Timeout period of reading topics from the Kafka server, the unit is second. SW_GET_TOPIC_TIMEOUT 10   plugin.kafka.producer_config Kafka producer configuration. Read producer configure to get more details. Check Kafka report doc for more details and examples. sw_plugin_kafka_producer_config    plugin.kafka.producer_config_json Configure Kafka Producer configuration in JSON format. Notice it will be overridden by plugin.kafka.producer_config[key], if the key duplication. SW_PLUGIN_KAFKA_PRODUCER_CONFIG_JSON    plugin.kafka.topic_meter Specify which Kafka topic name for Meter System data to report to. SW_PLUGIN_KAFKA_TOPIC_METER skywalking-meters   plugin.kafka.topic_metrics Specify which Kafka topic name for JVM metrics data to report to. SW_PLUGIN_KAFKA_TOPIC_METRICS skywalking-metrics   plugin.kafka.topic_segment Specify which Kafka topic name for traces data to report to. SW_PLUGIN_KAFKA_TOPIC_SEGMENT skywalking-segments   plugin.kafka.topic_profiling Specify which Kafka topic name for Thread Profiling snapshot to report to. SW_PLUGIN_KAFKA_TOPIC_PROFILINGS skywalking-profilings   plugin.kafka.topic_management Specify which Kafka topic name for the register or heartbeat data of Service Instance to report to. SW_PLUGIN_KAFKA_TOPIC_MANAGEMENT skywalking-managements   plugin.kafka.topic_logging Specify which Kafka topic name for the logging data to report to. SW_PLUGIN_KAFKA_TOPIC_LOGGING skywalking-logging   plugin.kafka.namespace isolate multi OAP server when using same Kafka cluster (final topic name will append namespace before Kafka topics with - ). SW_KAFKA_NAMESPACE `` |   plugin.kafka.decode_class Specify which class to decode encoded configuration of kafka.You can set encoded information in plugin.kafka.producer_config_json or plugin.kafka.producer_config if you need. SW_KAFKA_DECODE_CLASS `` |   plugin.springannotation.classname_match_regex Match spring beans with regular expression for the class name. Multiple expressions could be separated by a comma. This only works when Spring annotation plugin has been activated. SW_SPRINGANNOTATION_CLASSNAME_MATCH_REGEX All the spring beans tagged with @Bean,@Service,@Dao, or @Repository.   plugin.toolkit.log.transmit_formatted Whether or not to transmit logged data as formatted or un-formatted. SW_PLUGIN_TOOLKIT_LOG_TRANSMIT_FORMATTED true   plugin.lettuce.trace_redis_parameters If set to true, the parameters of Redis commands would be collected by Lettuce agent. SW_PLUGIN_LETTUCE_TRACE_REDIS_PARAMETERS false   plugin.lettuce.redis_parameter_max_length If set to positive number and plugin.lettuce.trace_redis_parameters is set to true, Redis command parameters would be collected and truncated to this length. SW_PLUGIN_LETTUCE_REDIS_PARAMETER_MAX_LENGTH 128   plugin.lettuce.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_LETTUCE_OPERATION_MAPPING_WRITE    plugin.lettuce.operation_mapping_read  Specify which command should be converted to read operation SW_PLUGIN_LETTUCE_OPERATION_MAPPING_READ Referenc Lettuce-5.x-plugin   plugin.jedis.trace_redis_parameters If set to true, the parameters of Redis commands would be collected by Jedis agent. SW_PLUGIN_JEDIS_TRACE_REDIS_PARAMETERS false   plugin.jedis.redis_parameter_max_length If set to positive number and plugin.jedis.trace_redis_parameters is set to true, Redis command parameters would be collected and truncated to this length. SW_PLUGIN_JEDIS_REDIS_PARAMETER_MAX_LENGTH 128   plugin.jedis.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_JEDIS_OPERATION_MAPPING_WRITE    plugin.jedis.operation_mapping_read  Specify which command should be converted to read operation SW_PLUGIN_JEDIS_OPERATION_MAPPING_READ Referenc Jedis-4.x-plugin jedis-2.x-3.x-plugin   plugin.redisson.trace_redis_parameters If set to true, the parameters of Redis commands would be collected by Redisson agent. SW_PLUGIN_REDISSON_TRACE_REDIS_PARAMETERS false   plugin.redisson.redis_parameter_max_length If set to positive number and plugin.redisson.trace_redis_parameters is set to true, Redis command parameters would be collected and truncated to this length. SW_PLUGIN_REDISSON_REDIS_PARAMETER_MAX_LENGTH 128   plugin.redisson.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_REDISSON_OPERATION_MAPPING_WRITE    plugin.redisson.operation_mapping_read  Specify which command should be converted to read operation SW_PLUGIN_REDISSON_OPERATION_MAPPING_READ Referenc Redisson-3.x-plugin   plugin.neo4j.trace_cypher_parameters If set to true, the parameters of the cypher would be collected. SW_PLUGIN_NEO4J_TRACE_CYPHER_PARAMETERS false   plugin.neo4j.cypher_parameters_max_length If set to positive number, the db.cypher.parameters would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_NEO4J_CYPHER_PARAMETERS_MAX_LENGTH 512   plugin.neo4j.cypher_body_max_length If set to positive number, the db.statement would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_NEO4J_CYPHER_BODY_MAX_LENGTH 2048   plugin.cpupolicy.sample_cpu_usage_percent_limit If set to a positive number and activate trace sampler CPU policy plugin, the trace would not be collected when agent process CPU usage percent is greater than plugin.cpupolicy.sample_cpu_usage_percent_limit. SW_SAMPLE_CPU_USAGE_PERCENT_LIMIT -1   plugin.micronauthttpclient.collect_http_params This config item controls that whether the Micronaut http client plugin should collect the parameters of the request. Also, activate implicitly in the profiled trace. SW_PLUGIN_MICRONAUTHTTPCLIENT_COLLECT_HTTP_PARAMS false   plugin.micronauthttpserver.collect_http_params This config item controls that whether the Micronaut http server plugin should collect the parameters of the request. Also, activate implicitly in the profiled trace. SW_PLUGIN_MICRONAUTHTTPSERVER_COLLECT_HTTP_PARAMS false   plugin.memcached.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_MEMCACHED_OPERATION_MAPPING_WRITE get,gets,getAndTouch,getKeys,getKeysWithExpiryCheck,getKeysNoDuplicateCheck   plugin.memcached.operation_mapping_read Specify which command should be converted to read operation SW_PLUGIN_MEMCACHED_OPERATION_MAPPING_READ set,add,replace,append,prepend,cas,delete,touch,incr,decr   plugin.ehcache.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_EHCACHE_OPERATION_MAPPING_WRITE get,getAll,getQuiet,getKeys,getKeysWithExpiryCheck,getKeysNoDuplicateCheck,releaseRead,tryRead,getWithLoader,getAll,loadAll,getAllWithLoader   plugin.ehcache.operation_mapping_read Specify which command should be converted to read operation SW_PLUGIN_EHCACHE_OPERATION_MAPPING_READ tryRemoveImmediately,remove,removeAndReturnElement,removeAll,removeQuiet,removeWithWriter,put,putAll,replace,removeQuiet,removeWithWriter,removeElement,removeAll,putWithWriter,putQuiet,putIfAbsent,putIfAbsent   plugin.guavacache.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_GUAVACACHE_OPERATION_MAPPING_WRITE getIfPresent,get,getAllPresent,size   plugin.guavacache.operation_mapping_read Specify which command should be converted to read operation SW_PLUGIN_GUAVACACHE_OPERATION_MAPPING_READ put,putAll,invalidate,invalidateAll,invalidateAll,cleanUp    Reset Collection/Map type configurations as empty collection.  Collection type config, e.g. using  plugin.kafka.topics= to override default plugin.kafka.topics=a,b,c,d Map type config, e.g. using plugin.kafka.producer_config[]= to override default plugin.kafka.producer_config[key]=value  Dynamic Configurations All configurations above are static, if you need to change some agent settings at runtime, please read CDS - Configuration Discovery Service document for more details.\n","excerpt":"Table of Agent Configuration Properties This is the properties list supported in …","ref":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/configurations/","title":"Table of Agent Configuration Properties"},{"body":"Table of Agent Configuration Properties This is the properties list supported in agent/config/agent.config.\n   property key Description System Environment Variable Default     agent.service_name The service name to represent a logic group providing the same capabilities/logic. Suggestion: set a unique name for every logic service group, service instance nodes share the same code, Max length is 50(UTF-8 char). Optional, once service_name follows \u0026lt;group name\u0026gt;::\u0026lt;logic name\u0026gt; format, OAP server assigns the group name to the service metadata. SW_AGENT_NAME Your_ApplicationName   agent.namespace Namespace represents a subnet, such as kubernetes namespace, or 172.10.. SW_AGENT_NAMESPACE Not set   agent.cluster Cluster defines the physical cluster in a data center or same network segment. SW_AGENT_CLUSTER Not set   agent.sample_n_per_3_secs Negative or zero means off, by default.SAMPLE_N_PER_3_SECS means sampling N TraceSegment in 3 seconds tops. SW_AGENT_SAMPLE Not set   agent.authentication Authentication active is based on backend setting, see application.yml for more details.For most scenarios, this needs backend extensions, only basic match auth provided in default implementation. SW_AGENT_AUTHENTICATION Not set   agent.trace_segment_ref_limit_per_span The max number of TraceSegmentRef in a single span to keep memory cost estimatable. SW_TRACE_SEGMENT_LIMIT 500   agent.span_limit_per_segment The max number of spans in a single segment. Through this config item, SkyWalking keep your application memory cost estimated. SW_AGENT_SPAN_LIMIT 300   agent.ignore_suffix If the operation name of the first span is included in this set, this segment should be ignored. SW_AGENT_IGNORE_SUFFIX Not set   agent.is_open_debugging_class If true, skywalking agent will save all instrumented classes files in /debugging folder. SkyWalking team may ask for these files in order to resolve compatible problem. SW_AGENT_OPEN_DEBUG Not set   agent.instance_name Instance name is the identity of an instance, should be unique in the service. If empty, SkyWalking agent will generate an 32-bit uuid. Default, use UUID@hostname as the instance name. Max length is 50(UTF-8 char) SW_AGENT_INSTANCE_NAME \u0026quot;\u0026quot;   agent.instance_properties_json={\u0026quot;key\u0026quot;:\u0026quot;value\u0026quot;} Add service instance custom properties in json format. SW_INSTANCE_PROPERTIES_JSON Not set   agent.cause_exception_depth How depth the agent goes, when log all cause exceptions. SW_AGENT_CAUSE_EXCEPTION_DEPTH 5   agent.force_reconnection_period  Force reconnection period of grpc, based on grpc_channel_check_interval. SW_AGENT_FORCE_RECONNECTION_PERIOD 1   agent.operation_name_threshold  The operationName max length, setting this value \u0026gt; 190 is not recommended. SW_AGENT_OPERATION_NAME_THRESHOLD 150   agent.keep_tracing Keep tracing even the backend is not available if this value is true. SW_AGENT_KEEP_TRACING false   agent.force_tls Force open TLS for gRPC channel if this value is true. SW_AGENT_FORCE_TLS false   agent.ssl_trusted_ca_path gRPC SSL trusted ca file. SW_AGENT_SSL_TRUSTED_CA_PATH /ca/ca.crt   agent.ssl_key_path The private key file. Enable mTLS when ssl_key_path and ssl_cert_chain_path exist. SW_AGENT_SSL_KEY_PATH \u0026quot;\u0026quot;   agent.ssl_cert_chain_path The certificate file. Enable mTLS when ssl_key_path and ssl_cert_chain_path exist. SW_AGENT_SSL_CERT_CHAIN_PATH \u0026quot;\u0026quot;   agent.enable Enable the agent kernel services and instrumentation. SW_AGENT_ENABLE true   osinfo.ipv4_list_size Limit the length of the ipv4 list size. SW_AGENT_OSINFO_IPV4_LIST_SIZE 10   collector.grpc_channel_check_interval grpc channel status check interval. SW_AGENT_COLLECTOR_GRPC_CHANNEL_CHECK_INTERVAL 30   collector.heartbeat_period agent heartbeat report period. Unit, second. SW_AGENT_COLLECTOR_HEARTBEAT_PERIOD 30   collector.properties_report_period_factor The agent sends the instance properties to the backend every collector.heartbeat_period * collector.properties_report_period_factor seconds SW_AGENT_COLLECTOR_PROPERTIES_REPORT_PERIOD_FACTOR 10   collector.backend_service Collector SkyWalking trace receiver service addresses. SW_AGENT_COLLECTOR_BACKEND_SERVICES 127.0.0.1:11800   collector.grpc_upstream_timeout How long grpc client will timeout in sending data to upstream. Unit is second. SW_AGENT_COLLECTOR_GRPC_UPSTREAM_TIMEOUT 30 seconds   collector.get_profile_task_interval Sniffer get profile task list interval. SW_AGENT_COLLECTOR_GET_PROFILE_TASK_INTERVAL 20   collector.get_agent_dynamic_config_interval Sniffer get agent dynamic config interval SW_AGENT_COLLECTOR_GET_AGENT_DYNAMIC_CONFIG_INTERVAL 20   collector.is_resolve_dns_periodically If true, skywalking agent will enable periodically resolving DNS to update receiver service addresses. SW_AGENT_COLLECTOR_IS_RESOLVE_DNS_PERIODICALLY false   logging.level Log level: TRACE, DEBUG, INFO, WARN, ERROR, OFF. Default is info. SW_LOGGING_LEVEL INFO   logging.file_name Log file name. SW_LOGGING_FILE_NAME skywalking-api.log   logging.output Log output. Default is FILE. Use CONSOLE means output to stdout. SW_LOGGING_OUTPUT FILE   logging.dir Log files directory. Default is blank string, means, use \u0026ldquo;{theSkywalkingAgentJarDir}/logs \u0026quot; to output logs. {theSkywalkingAgentJarDir} is the directory where the skywalking agent jar file is located SW_LOGGING_DIR \u0026quot;\u0026quot;   logging.resolver Logger resolver: PATTERN or JSON. The default is PATTERN, which uses logging.pattern to print traditional text logs. JSON resolver prints logs in JSON format. SW_LOGGING_RESOLVER PATTERN   logging.pattern  Logging format. There are all conversion specifiers: * %level means log level. * %timestamp means now of time with format yyyy-MM-dd HH:mm:ss:SSS.\n* %thread means name of current thread.\n* %msg means some message which user logged. * %class means SimpleName of TargetClass. * %throwable means a throwable which user called. * %agent_name means agent.service_name. Only apply to the PatternLogger. SW_LOGGING_PATTERN %level %timestamp %thread %class : %msg %throwable   logging.max_file_size The max size of log file. If the size is bigger than this, archive the current file, and write into a new file. SW_LOGGING_MAX_FILE_SIZE 300 * 1024 * 1024   logging.max_history_files The max history log files. When rollover happened, if log files exceed this number,then the oldest file will be delete. Negative or zero means off, by default. SW_LOGGING_MAX_HISTORY_FILES -1   statuscheck.ignored_exceptions Listed exceptions would not be treated as an error. Because in some codes, the exception is being used as a way of controlling business flow. SW_STATUSCHECK_IGNORED_EXCEPTIONS \u0026quot;\u0026quot;   statuscheck.max_recursive_depth The max recursive depth when checking the exception traced by the agent. Typically, we don\u0026rsquo;t recommend setting this more than 10, which could cause a performance issue. Negative value and 0 would be ignored, which means all exceptions would make the span tagged in error status. SW_STATUSCHECK_MAX_RECURSIVE_DEPTH 1   correlation.element_max_number Max element count in the correlation context. SW_CORRELATION_ELEMENT_MAX_NUMBER 3   correlation.value_max_length Max value length of each element. SW_CORRELATION_VALUE_MAX_LENGTH 128   correlation.auto_tag_keys Tag the span by the key/value in the correlation context, when the keys listed here exist. SW_CORRELATION_AUTO_TAG_KEYS \u0026quot;\u0026quot;   jvm.buffer_size The buffer size of collected JVM info. SW_JVM_BUFFER_SIZE 60 * 10   jvm.metrics_collect_period The period in seconds of JVM metrics collection. Unit is second. SW_JVM_METRICS_COLLECT_PERIOD 1   buffer.channel_size The buffer channel size. SW_BUFFER_CHANNEL_SIZE 5   buffer.buffer_size The buffer size. SW_BUFFER_BUFFER_SIZE 300   profile.active If true, skywalking agent will enable profile when user create a new profile task. Otherwise disable profile. SW_AGENT_PROFILE_ACTIVE true   profile.max_parallel Parallel monitor segment count SW_AGENT_PROFILE_MAX_PARALLEL 5   profile.max_accept_sub_parallel Max monitoring sub-tasks count of one single endpoint access SW_AGENT_PROFILE_MAX_ACCEPT_SUB_PARALLEL 5   profile.duration Max monitor segment time(minutes), if current segment monitor time out of limit, then stop it. SW_AGENT_PROFILE_DURATION 10   profile.dump_max_stack_depth Max dump thread stack depth SW_AGENT_PROFILE_DUMP_MAX_STACK_DEPTH 500   profile.snapshot_transport_buffer_size Snapshot transport to backend buffer size SW_AGENT_PROFILE_SNAPSHOT_TRANSPORT_BUFFER_SIZE 4500   meter.active If true, the agent collects and reports metrics to the backend. SW_METER_ACTIVE true   meter.report_interval Report meters interval. The unit is second SW_METER_REPORT_INTERVAL 20   meter.max_meter_size Max size of the meter pool SW_METER_MAX_METER_SIZE 500   log.max_message_size The max size of message to send to server.Default is 10 MB. SW_GRPC_LOG_MAX_MESSAGE_SIZE 10485760   plugin.mount Mount the specific folders of the plugins. Plugins in mounted folders would work. SW_MOUNT_FOLDERS plugins,activations   plugin.peer_max_length  Peer maximum description limit. SW_PLUGIN_PEER_MAX_LENGTH 200   plugin.exclude_plugins  Exclude some plugins define in plugins dir,Multiple plugins are separated by comma.Plugin names is defined in Agent plugin list SW_EXCLUDE_PLUGINS \u0026quot;\u0026quot;   plugin.mongodb.trace_param If true, trace all the parameters in MongoDB access, default is false. Only trace the operation, not include parameters. SW_PLUGIN_MONGODB_TRACE_PARAM false   plugin.mongodb.filter_length_limit If set to positive number, the WriteRequest.params would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_MONGODB_FILTER_LENGTH_LIMIT 256   plugin.elasticsearch.trace_dsl If true, trace all the DSL(Domain Specific Language) in ElasticSearch access, default is false. SW_PLUGIN_ELASTICSEARCH_TRACE_DSL false   plugin.springmvc.use_qualified_name_as_endpoint_name If true, the fully qualified method name will be used as the endpoint name instead of the request URL, default is false. SW_PLUGIN_SPRINGMVC_USE_QUALIFIED_NAME_AS_ENDPOINT_NAME false   plugin.toolkit.use_qualified_name_as_operation_name If true, the fully qualified method name will be used as the operation name instead of the given operation name, default is false. SW_PLUGIN_TOOLKIT_USE_QUALIFIED_NAME_AS_OPERATION_NAME false   plugin.jdbc.trace_sql_parameters If set to true, the parameters of the sql (typically java.sql.PreparedStatement) would be collected. SW_JDBC_TRACE_SQL_PARAMETERS false   plugin.jdbc.sql_parameters_max_length If set to positive number, the db.sql.parameters would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_JDBC_SQL_PARAMETERS_MAX_LENGTH 512   plugin.jdbc.sql_body_max_length If set to positive number, the db.statement would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_JDBC_SQL_BODY_MAX_LENGTH 2048   plugin.solrj.trace_statement If true, trace all the query parameters(include deleteByIds and deleteByQuery) in Solr query request, default is false. SW_PLUGIN_SOLRJ_TRACE_STATEMENT false   plugin.solrj.trace_ops_params If true, trace all the operation parameters in Solr request, default is false. SW_PLUGIN_SOLRJ_TRACE_OPS_PARAMS false   plugin.light4j.trace_handler_chain If true, trace all middleware/business handlers that are part of the Light4J handler chain for a request. SW_PLUGIN_LIGHT4J_TRACE_HANDLER_CHAIN false   plugin.springtransaction.simplify_transaction_definition_name If true, the transaction definition name will be simplified. SW_PLUGIN_SPRINGTRANSACTION_SIMPLIFY_TRANSACTION_DEFINITION_NAME false   plugin.jdkthreading.threading_class_prefixes Threading classes (java.lang.Runnable and java.util.concurrent.Callable) and their subclasses, including anonymous inner classes whose name match any one of the THREADING_CLASS_PREFIXES (splitted by ,) will be instrumented, make sure to only specify as narrow prefixes as what you\u0026rsquo;re expecting to instrument, (java. and javax. will be ignored due to safety issues) SW_PLUGIN_JDKTHREADING_THREADING_CLASS_PREFIXES Not set   plugin.tomcat.collect_http_params This config item controls that whether the Tomcat plugin should collect the parameters of the request. Also, activate implicitly in the profiled trace. SW_PLUGIN_TOMCAT_COLLECT_HTTP_PARAMS false   plugin.springmvc.collect_http_params This config item controls that whether the SpringMVC plugin should collect the parameters of the request, when your Spring application is based on Tomcat, consider only setting either plugin.tomcat.collect_http_params or plugin.springmvc.collect_http_params. Also, activate implicitly in the profiled trace. SW_PLUGIN_SPRINGMVC_COLLECT_HTTP_PARAMS false   plugin.httpclient.collect_http_params This config item controls that whether the HttpClient plugin should collect the parameters of the request SW_PLUGIN_HTTPCLIENT_COLLECT_HTTP_PARAMS false   plugin.http.http_params_length_threshold When COLLECT_HTTP_PARAMS is enabled, how many characters to keep and send to the OAP backend, use negative values to keep and send the complete parameters, NB. this config item is added for the sake of performance. SW_PLUGIN_HTTP_HTTP_PARAMS_LENGTH_THRESHOLD 1024   plugin.http.http_headers_length_threshold When include_http_headers declares header names, this threshold controls the length limitation of all header values. use negative values to keep and send the complete headers. Note. this config item is added for the sake of performance. SW_PLUGIN_HTTP_HTTP_HEADERS_LENGTH_THRESHOLD 2048   plugin.http.include_http_headers Set the header names, which should be collected by the plugin. Header name must follow javax.servlet.http definition. Multiple names should be split by comma. SW_PLUGIN_HTTP_INCLUDE_HTTP_HEADERS ``(No header would be collected) |   plugin.feign.collect_request_body This config item controls that whether the Feign plugin should collect the http body of the request. SW_PLUGIN_FEIGN_COLLECT_REQUEST_BODY false   plugin.feign.filter_length_limit When COLLECT_REQUEST_BODY is enabled, how many characters to keep and send to the OAP backend, use negative values to keep and send the complete body. SW_PLUGIN_FEIGN_FILTER_LENGTH_LIMIT 1024   plugin.feign.supported_content_types_prefix When COLLECT_REQUEST_BODY is enabled and content-type start with SUPPORTED_CONTENT_TYPES_PREFIX, collect the body of the request , multiple paths should be separated by , SW_PLUGIN_FEIGN_SUPPORTED_CONTENT_TYPES_PREFIX application/json,text/   plugin.influxdb.trace_influxql If true, trace all the influxql(query and write) in InfluxDB access, default is true. SW_PLUGIN_INFLUXDB_TRACE_INFLUXQL true   plugin.dubbo.collect_consumer_arguments Apache Dubbo consumer collect arguments in RPC call, use Object#toString to collect arguments. SW_PLUGIN_DUBBO_COLLECT_CONSUMER_ARGUMENTS false   plugin.dubbo.consumer_arguments_length_threshold When plugin.dubbo.collect_consumer_arguments is true, Arguments of length from the front will to the OAP backend SW_PLUGIN_DUBBO_CONSUMER_ARGUMENTS_LENGTH_THRESHOLD 256   plugin.dubbo.collect_provider_arguments Apache Dubbo provider collect arguments in RPC call, use Object#toString to collect arguments. SW_PLUGIN_DUBBO_COLLECT_PROVIDER_ARGUMENTS false   plugin.dubbo.provider_arguments_length_threshold When plugin.dubbo.collect_provider_arguments is true, Arguments of length from the front will to the OAP backend SW_PLUGIN_DUBBO_PROVIDER_ARGUMENTS_LENGTH_THRESHOLD 256   plugin.kafka.bootstrap_servers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_KAFKA_BOOTSTRAP_SERVERS localhost:9092   plugin.kafka.get_topic_timeout Timeout period of reading topics from the Kafka server, the unit is second. SW_GET_TOPIC_TIMEOUT 10   plugin.kafka.producer_config Kafka producer configuration. Read producer configure to get more details. Check Kafka report doc for more details and examples. sw_plugin_kafka_producer_config    plugin.kafka.producer_config_json Configure Kafka Producer configuration in JSON format. Notice it will be overridden by plugin.kafka.producer_config[key], if the key duplication. SW_PLUGIN_KAFKA_PRODUCER_CONFIG_JSON    plugin.kafka.topic_meter Specify which Kafka topic name for Meter System data to report to. SW_PLUGIN_KAFKA_TOPIC_METER skywalking-meters   plugin.kafka.topic_metrics Specify which Kafka topic name for JVM metrics data to report to. SW_PLUGIN_KAFKA_TOPIC_METRICS skywalking-metrics   plugin.kafka.topic_segment Specify which Kafka topic name for traces data to report to. SW_PLUGIN_KAFKA_TOPIC_SEGMENT skywalking-segments   plugin.kafka.topic_profiling Specify which Kafka topic name for Thread Profiling snapshot to report to. SW_PLUGIN_KAFKA_TOPIC_PROFILINGS skywalking-profilings   plugin.kafka.topic_management Specify which Kafka topic name for the register or heartbeat data of Service Instance to report to. SW_PLUGIN_KAFKA_TOPIC_MANAGEMENT skywalking-managements   plugin.kafka.topic_logging Specify which Kafka topic name for the logging data to report to. SW_PLUGIN_KAFKA_TOPIC_LOGGING skywalking-logging   plugin.kafka.namespace isolate multi OAP server when using same Kafka cluster (final topic name will append namespace before Kafka topics with - ). SW_KAFKA_NAMESPACE `` |   plugin.kafka.decode_class Specify which class to decode encoded configuration of kafka.You can set encoded information in plugin.kafka.producer_config_json or plugin.kafka.producer_config if you need. SW_KAFKA_DECODE_CLASS `` |   plugin.springannotation.classname_match_regex Match spring beans with regular expression for the class name. Multiple expressions could be separated by a comma. This only works when Spring annotation plugin has been activated. SW_SPRINGANNOTATION_CLASSNAME_MATCH_REGEX All the spring beans tagged with @Bean,@Service,@Dao, or @Repository.   plugin.toolkit.log.transmit_formatted Whether or not to transmit logged data as formatted or un-formatted. SW_PLUGIN_TOOLKIT_LOG_TRANSMIT_FORMATTED true   plugin.lettuce.trace_redis_parameters If set to true, the parameters of Redis commands would be collected by Lettuce agent. SW_PLUGIN_LETTUCE_TRACE_REDIS_PARAMETERS false   plugin.lettuce.redis_parameter_max_length If set to positive number and plugin.lettuce.trace_redis_parameters is set to true, Redis command parameters would be collected and truncated to this length. SW_PLUGIN_LETTUCE_REDIS_PARAMETER_MAX_LENGTH 128   plugin.lettuce.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_LETTUCE_OPERATION_MAPPING_WRITE    plugin.lettuce.operation_mapping_read  Specify which command should be converted to read operation SW_PLUGIN_LETTUCE_OPERATION_MAPPING_READ Referenc Lettuce-5.x-plugin   plugin.jedis.trace_redis_parameters If set to true, the parameters of Redis commands would be collected by Jedis agent. SW_PLUGIN_JEDIS_TRACE_REDIS_PARAMETERS false   plugin.jedis.redis_parameter_max_length If set to positive number and plugin.jedis.trace_redis_parameters is set to true, Redis command parameters would be collected and truncated to this length. SW_PLUGIN_JEDIS_REDIS_PARAMETER_MAX_LENGTH 128   plugin.jedis.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_JEDIS_OPERATION_MAPPING_WRITE    plugin.jedis.operation_mapping_read  Specify which command should be converted to read operation SW_PLUGIN_JEDIS_OPERATION_MAPPING_READ Referenc Jedis-4.x-plugin jedis-2.x-3.x-plugin   plugin.redisson.trace_redis_parameters If set to true, the parameters of Redis commands would be collected by Redisson agent. SW_PLUGIN_REDISSON_TRACE_REDIS_PARAMETERS false   plugin.redisson.redis_parameter_max_length If set to positive number and plugin.redisson.trace_redis_parameters is set to true, Redis command parameters would be collected and truncated to this length. SW_PLUGIN_REDISSON_REDIS_PARAMETER_MAX_LENGTH 128   plugin.redisson.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_REDISSON_OPERATION_MAPPING_WRITE    plugin.redisson.operation_mapping_read  Specify which command should be converted to read operation SW_PLUGIN_REDISSON_OPERATION_MAPPING_READ Referenc Redisson-3.x-plugin   plugin.neo4j.trace_cypher_parameters If set to true, the parameters of the cypher would be collected. SW_PLUGIN_NEO4J_TRACE_CYPHER_PARAMETERS false   plugin.neo4j.cypher_parameters_max_length If set to positive number, the db.cypher.parameters would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_NEO4J_CYPHER_PARAMETERS_MAX_LENGTH 512   plugin.neo4j.cypher_body_max_length If set to positive number, the db.statement would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_NEO4J_CYPHER_BODY_MAX_LENGTH 2048   plugin.cpupolicy.sample_cpu_usage_percent_limit If set to a positive number and activate trace sampler CPU policy plugin, the trace would not be collected when agent process CPU usage percent is greater than plugin.cpupolicy.sample_cpu_usage_percent_limit. SW_SAMPLE_CPU_USAGE_PERCENT_LIMIT -1   plugin.micronauthttpclient.collect_http_params This config item controls that whether the Micronaut http client plugin should collect the parameters of the request. Also, activate implicitly in the profiled trace. SW_PLUGIN_MICRONAUTHTTPCLIENT_COLLECT_HTTP_PARAMS false   plugin.micronauthttpserver.collect_http_params This config item controls that whether the Micronaut http server plugin should collect the parameters of the request. Also, activate implicitly in the profiled trace. SW_PLUGIN_MICRONAUTHTTPSERVER_COLLECT_HTTP_PARAMS false   plugin.memcached.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_MEMCACHED_OPERATION_MAPPING_WRITE get,gets,getAndTouch,getKeys,getKeysWithExpiryCheck,getKeysNoDuplicateCheck   plugin.memcached.operation_mapping_read Specify which command should be converted to read operation SW_PLUGIN_MEMCACHED_OPERATION_MAPPING_READ set,add,replace,append,prepend,cas,delete,touch,incr,decr   plugin.ehcache.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_EHCACHE_OPERATION_MAPPING_WRITE get,getAll,getQuiet,getKeys,getKeysWithExpiryCheck,getKeysNoDuplicateCheck,releaseRead,tryRead,getWithLoader,getAll,loadAll,getAllWithLoader   plugin.ehcache.operation_mapping_read Specify which command should be converted to read operation SW_PLUGIN_EHCACHE_OPERATION_MAPPING_READ tryRemoveImmediately,remove,removeAndReturnElement,removeAll,removeQuiet,removeWithWriter,put,putAll,replace,removeQuiet,removeWithWriter,removeElement,removeAll,putWithWriter,putQuiet,putIfAbsent,putIfAbsent   plugin.guavacache.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_GUAVACACHE_OPERATION_MAPPING_WRITE getIfPresent,get,getAllPresent,size   plugin.guavacache.operation_mapping_read Specify which command should be converted to read operation SW_PLUGIN_GUAVACACHE_OPERATION_MAPPING_READ put,putAll,invalidate,invalidateAll,invalidateAll,cleanUp   plugin.nettyhttp.collect_request_body This config item controls that whether the Netty-http plugin should collect the http body of the request. SW_PLUGIN_NETTY_HTTP_COLLECT_REQUEST_BODY false   plugin.nettyhttp.filter_length_limit When COLLECT_REQUEST_BODY is enabled, how many characters to keep and send to the OAP backend, use negative values to keep and send the complete body. SW_PLUGIN_NETTY_HTTP_FILTER_LENGTH_LIMIT 1024   plugin.nettyhttp.supported_content_types_prefix When COLLECT_REQUEST_BODY is enabled and content-type start with HTTP_SUPPORTED_CONTENT_TYPES_PREFIX, collect the body of the request , multiple paths should be separated by , SW_PLUGIN_NETTY_HTTP_SUPPORTED_CONTENT_TYPES_PREFIX application/json,text/          Reset Collection/Map type configurations as empty collection.  Collection type config, e.g. using  plugin.kafka.topics= to override default plugin.kafka.topics=a,b,c,d Map type config, e.g. using plugin.kafka.producer_config[]= to override default plugin.kafka.producer_config[key]=value  Dynamic Configurations All configurations above are static, if you need to change some agent settings at runtime, please read CDS - Configuration Discovery Service document for more details.\n","excerpt":"Table of Agent Configuration Properties This is the properties list supported in …","ref":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/configurations/","title":"Table of Agent Configuration Properties"},{"body":"Table of Agent Configuration Properties This is the properties list supported in agent/config/agent.config.\n   property key Description System Environment Variable Default     agent.service_name The service name to represent a logic group providing the same capabilities/logic. Suggestion: set a unique name for every logic service group, service instance nodes share the same code, Max length is 50(UTF-8 char). Optional, once service_name follows \u0026lt;group name\u0026gt;::\u0026lt;logic name\u0026gt; format, OAP server assigns the group name to the service metadata. SW_AGENT_NAME Your_ApplicationName   agent.namespace Namespace represents a subnet, such as kubernetes namespace, or 172.10.. SW_AGENT_NAMESPACE Not set   agent.cluster Cluster defines the physical cluster in a data center or same network segment. SW_AGENT_CLUSTER Not set   agent.sample_n_per_3_secs Negative or zero means off, by default.SAMPLE_N_PER_3_SECS means sampling N TraceSegment in 3 seconds tops. SW_AGENT_SAMPLE Not set   agent.authentication Authentication active is based on backend setting, see application.yml for more details.For most scenarios, this needs backend extensions, only basic match auth provided in default implementation. SW_AGENT_AUTHENTICATION Not set   agent.trace_segment_ref_limit_per_span The max number of TraceSegmentRef in a single span to keep memory cost estimatable. SW_TRACE_SEGMENT_LIMIT 500   agent.span_limit_per_segment The max number of spans in a single segment. Through this config item, SkyWalking keep your application memory cost estimated. SW_AGENT_SPAN_LIMIT 300   agent.ignore_suffix If the operation name of the first span is included in this set, this segment should be ignored. SW_AGENT_IGNORE_SUFFIX Not set   agent.is_open_debugging_class If true, skywalking agent will save all instrumented classes files in /debugging folder. SkyWalking team may ask for these files in order to resolve compatible problem. SW_AGENT_OPEN_DEBUG Not set   agent.instance_name Instance name is the identity of an instance, should be unique in the service. If empty, SkyWalking agent will generate an 32-bit uuid. Default, use UUID@hostname as the instance name. Max length is 50(UTF-8 char) SW_AGENT_INSTANCE_NAME \u0026quot;\u0026quot;   agent.instance_properties_json={\u0026quot;key\u0026quot;:\u0026quot;value\u0026quot;} Add service instance custom properties in json format. SW_INSTANCE_PROPERTIES_JSON Not set   agent.cause_exception_depth How depth the agent goes, when log all cause exceptions. SW_AGENT_CAUSE_EXCEPTION_DEPTH 5   agent.force_reconnection_period  Force reconnection period of grpc, based on grpc_channel_check_interval. SW_AGENT_FORCE_RECONNECTION_PERIOD 1   agent.operation_name_threshold  The operationName max length, setting this value \u0026gt; 190 is not recommended. SW_AGENT_OPERATION_NAME_THRESHOLD 150   agent.keep_tracing Keep tracing even the backend is not available if this value is true. SW_AGENT_KEEP_TRACING false   agent.force_tls Force open TLS for gRPC channel if this value is true. SW_AGENT_FORCE_TLS false   agent.ssl_trusted_ca_path gRPC SSL trusted ca file. SW_AGENT_SSL_TRUSTED_CA_PATH /ca/ca.crt   agent.ssl_key_path The private key file. Enable mTLS when ssl_key_path and ssl_cert_chain_path exist. SW_AGENT_SSL_KEY_PATH \u0026quot;\u0026quot;   agent.ssl_cert_chain_path The certificate file. Enable mTLS when ssl_key_path and ssl_cert_chain_path exist. SW_AGENT_SSL_CERT_CHAIN_PATH \u0026quot;\u0026quot;   agent.enable Enable the agent kernel services and instrumentation. SW_AGENT_ENABLE true   osinfo.ipv4_list_size Limit the length of the ipv4 list size. SW_AGENT_OSINFO_IPV4_LIST_SIZE 10   collector.grpc_channel_check_interval grpc channel status check interval. SW_AGENT_COLLECTOR_GRPC_CHANNEL_CHECK_INTERVAL 30   collector.heartbeat_period agent heartbeat report period. Unit, second. SW_AGENT_COLLECTOR_HEARTBEAT_PERIOD 30   collector.properties_report_period_factor The agent sends the instance properties to the backend every collector.heartbeat_period * collector.properties_report_period_factor seconds SW_AGENT_COLLECTOR_PROPERTIES_REPORT_PERIOD_FACTOR 10   collector.backend_service Collector SkyWalking trace receiver service addresses. SW_AGENT_COLLECTOR_BACKEND_SERVICES 127.0.0.1:11800   collector.grpc_upstream_timeout How long grpc client will timeout in sending data to upstream. Unit is second. SW_AGENT_COLLECTOR_GRPC_UPSTREAM_TIMEOUT 30 seconds   collector.get_profile_task_interval Sniffer get profile task list interval. SW_AGENT_COLLECTOR_GET_PROFILE_TASK_INTERVAL 20   collector.get_agent_dynamic_config_interval Sniffer get agent dynamic config interval SW_AGENT_COLLECTOR_GET_AGENT_DYNAMIC_CONFIG_INTERVAL 20   collector.is_resolve_dns_periodically If true, skywalking agent will enable periodically resolving DNS to update receiver service addresses. SW_AGENT_COLLECTOR_IS_RESOLVE_DNS_PERIODICALLY false   logging.level Log level: TRACE, DEBUG, INFO, WARN, ERROR, OFF. Default is info. SW_LOGGING_LEVEL INFO   logging.file_name Log file name. SW_LOGGING_FILE_NAME skywalking-api.log   logging.output Log output. Default is FILE. Use CONSOLE means output to stdout. SW_LOGGING_OUTPUT FILE   logging.dir Log files directory. Default is blank string, means, use \u0026ldquo;{theSkywalkingAgentJarDir}/logs \u0026quot; to output logs. {theSkywalkingAgentJarDir} is the directory where the skywalking agent jar file is located SW_LOGGING_DIR \u0026quot;\u0026quot;   logging.resolver Logger resolver: PATTERN or JSON. The default is PATTERN, which uses logging.pattern to print traditional text logs. JSON resolver prints logs in JSON format. SW_LOGGING_RESOLVER PATTERN   logging.pattern  Logging format. There are all conversion specifiers: * %level means log level. * %timestamp means now of time with format yyyy-MM-dd HH:mm:ss:SSS.\n* %thread means name of current thread.\n* %msg means some message which user logged. * %class means SimpleName of TargetClass. * %throwable means a throwable which user called. * %agent_name means agent.service_name. Only apply to the PatternLogger. SW_LOGGING_PATTERN %level %timestamp %thread %class : %msg %throwable   logging.max_file_size The max size of log file. If the size is bigger than this, archive the current file, and write into a new file. SW_LOGGING_MAX_FILE_SIZE 300 * 1024 * 1024   logging.max_history_files The max history log files. When rollover happened, if log files exceed this number,then the oldest file will be delete. Negative or zero means off, by default. SW_LOGGING_MAX_HISTORY_FILES -1   statuscheck.ignored_exceptions Listed exceptions would not be treated as an error. Because in some codes, the exception is being used as a way of controlling business flow. SW_STATUSCHECK_IGNORED_EXCEPTIONS \u0026quot;\u0026quot;   statuscheck.max_recursive_depth The max recursive depth when checking the exception traced by the agent. Typically, we don\u0026rsquo;t recommend setting this more than 10, which could cause a performance issue. Negative value and 0 would be ignored, which means all exceptions would make the span tagged in error status. SW_STATUSCHECK_MAX_RECURSIVE_DEPTH 1   correlation.element_max_number Max element count in the correlation context. SW_CORRELATION_ELEMENT_MAX_NUMBER 3   correlation.value_max_length Max value length of each element. SW_CORRELATION_VALUE_MAX_LENGTH 128   correlation.auto_tag_keys Tag the span by the key/value in the correlation context, when the keys listed here exist. SW_CORRELATION_AUTO_TAG_KEYS \u0026quot;\u0026quot;   jvm.buffer_size The buffer size of collected JVM info. SW_JVM_BUFFER_SIZE 60 * 10   jvm.metrics_collect_period The period in seconds of JVM metrics collection. Unit is second. SW_JVM_METRICS_COLLECT_PERIOD 1   buffer.channel_size The buffer channel size. SW_BUFFER_CHANNEL_SIZE 5   buffer.buffer_size The buffer size. SW_BUFFER_BUFFER_SIZE 300   profile.active If true, skywalking agent will enable profile when user create a new profile task. Otherwise disable profile. SW_AGENT_PROFILE_ACTIVE true   profile.max_parallel Parallel monitor segment count SW_AGENT_PROFILE_MAX_PARALLEL 5   profile.max_accept_sub_parallel Max monitoring sub-tasks count of one single endpoint access SW_AGENT_PROFILE_MAX_ACCEPT_SUB_PARALLEL 5   profile.duration Max monitor segment time(minutes), if current segment monitor time out of limit, then stop it. SW_AGENT_PROFILE_DURATION 10   profile.dump_max_stack_depth Max dump thread stack depth SW_AGENT_PROFILE_DUMP_MAX_STACK_DEPTH 500   profile.snapshot_transport_buffer_size Snapshot transport to backend buffer size SW_AGENT_PROFILE_SNAPSHOT_TRANSPORT_BUFFER_SIZE 4500   meter.active If true, the agent collects and reports metrics to the backend. SW_METER_ACTIVE true   meter.report_interval Report meters interval. The unit is second SW_METER_REPORT_INTERVAL 20   meter.max_meter_size Max size of the meter pool SW_METER_MAX_METER_SIZE 500   log.max_message_size The max size of message to send to server.Default is 10 MB. SW_GRPC_LOG_MAX_MESSAGE_SIZE 10485760   plugin.mount Mount the specific folders of the plugins. Plugins in mounted folders would work. SW_MOUNT_FOLDERS plugins,activations   plugin.peer_max_length  Peer maximum description limit. SW_PLUGIN_PEER_MAX_LENGTH 200   plugin.exclude_plugins  Exclude some plugins define in plugins dir,Multiple plugins are separated by comma.Plugin names is defined in Agent plugin list SW_EXCLUDE_PLUGINS \u0026quot;\u0026quot;   plugin.mongodb.trace_param If true, trace all the parameters in MongoDB access, default is false. Only trace the operation, not include parameters. SW_PLUGIN_MONGODB_TRACE_PARAM false   plugin.mongodb.filter_length_limit If set to positive number, the WriteRequest.params would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_MONGODB_FILTER_LENGTH_LIMIT 256   plugin.elasticsearch.trace_dsl If true, trace all the DSL(Domain Specific Language) in ElasticSearch access, default is false. SW_PLUGIN_ELASTICSEARCH_TRACE_DSL false   plugin.springmvc.use_qualified_name_as_endpoint_name If true, the fully qualified method name will be used as the endpoint name instead of the request URL, default is false. SW_PLUGIN_SPRINGMVC_USE_QUALIFIED_NAME_AS_ENDPOINT_NAME false   plugin.toolkit.use_qualified_name_as_operation_name If true, the fully qualified method name will be used as the operation name instead of the given operation name, default is false. SW_PLUGIN_TOOLKIT_USE_QUALIFIED_NAME_AS_OPERATION_NAME false   plugin.jdbc.trace_sql_parameters If set to true, the parameters of the sql (typically java.sql.PreparedStatement) would be collected. SW_JDBC_TRACE_SQL_PARAMETERS false   plugin.jdbc.sql_parameters_max_length If set to positive number, the db.sql.parameters would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_JDBC_SQL_PARAMETERS_MAX_LENGTH 512   plugin.jdbc.sql_body_max_length If set to positive number, the db.statement would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_JDBC_SQL_BODY_MAX_LENGTH 2048   plugin.solrj.trace_statement If true, trace all the query parameters(include deleteByIds and deleteByQuery) in Solr query request, default is false. SW_PLUGIN_SOLRJ_TRACE_STATEMENT false   plugin.solrj.trace_ops_params If true, trace all the operation parameters in Solr request, default is false. SW_PLUGIN_SOLRJ_TRACE_OPS_PARAMS false   plugin.light4j.trace_handler_chain If true, trace all middleware/business handlers that are part of the Light4J handler chain for a request. SW_PLUGIN_LIGHT4J_TRACE_HANDLER_CHAIN false   plugin.springtransaction.simplify_transaction_definition_name If true, the transaction definition name will be simplified. SW_PLUGIN_SPRINGTRANSACTION_SIMPLIFY_TRANSACTION_DEFINITION_NAME false   plugin.jdkthreading.threading_class_prefixes Threading classes (java.lang.Runnable and java.util.concurrent.Callable) and their subclasses, including anonymous inner classes whose name match any one of the THREADING_CLASS_PREFIXES (splitted by ,) will be instrumented, make sure to only specify as narrow prefixes as what you\u0026rsquo;re expecting to instrument, (java. and javax. will be ignored due to safety issues) SW_PLUGIN_JDKTHREADING_THREADING_CLASS_PREFIXES Not set   plugin.tomcat.collect_http_params This config item controls that whether the Tomcat plugin should collect the parameters of the request. Also, activate implicitly in the profiled trace. SW_PLUGIN_TOMCAT_COLLECT_HTTP_PARAMS false   plugin.springmvc.collect_http_params This config item controls that whether the SpringMVC plugin should collect the parameters of the request, when your Spring application is based on Tomcat, consider only setting either plugin.tomcat.collect_http_params or plugin.springmvc.collect_http_params. Also, activate implicitly in the profiled trace. SW_PLUGIN_SPRINGMVC_COLLECT_HTTP_PARAMS false   plugin.httpclient.collect_http_params This config item controls that whether the HttpClient plugin should collect the parameters of the request SW_PLUGIN_HTTPCLIENT_COLLECT_HTTP_PARAMS false   plugin.http.http_params_length_threshold When COLLECT_HTTP_PARAMS is enabled, how many characters to keep and send to the OAP backend, use negative values to keep and send the complete parameters, NB. this config item is added for the sake of performance. SW_PLUGIN_HTTP_HTTP_PARAMS_LENGTH_THRESHOLD 1024   plugin.http.http_headers_length_threshold When include_http_headers declares header names, this threshold controls the length limitation of all header values. use negative values to keep and send the complete headers. Note. this config item is added for the sake of performance. SW_PLUGIN_HTTP_HTTP_HEADERS_LENGTH_THRESHOLD 2048   plugin.http.include_http_headers Set the header names, which should be collected by the plugin. Header name must follow javax.servlet.http definition. Multiple names should be split by comma. SW_PLUGIN_HTTP_INCLUDE_HTTP_HEADERS ``(No header would be collected) |   plugin.feign.collect_request_body This config item controls that whether the Feign plugin should collect the http body of the request. SW_PLUGIN_FEIGN_COLLECT_REQUEST_BODY false   plugin.feign.filter_length_limit When COLLECT_REQUEST_BODY is enabled, how many characters to keep and send to the OAP backend, use negative values to keep and send the complete body. SW_PLUGIN_FEIGN_FILTER_LENGTH_LIMIT 1024   plugin.feign.supported_content_types_prefix When COLLECT_REQUEST_BODY is enabled and content-type start with SUPPORTED_CONTENT_TYPES_PREFIX, collect the body of the request , multiple paths should be separated by , SW_PLUGIN_FEIGN_SUPPORTED_CONTENT_TYPES_PREFIX application/json,text/   plugin.influxdb.trace_influxql If true, trace all the influxql(query and write) in InfluxDB access, default is true. SW_PLUGIN_INFLUXDB_TRACE_INFLUXQL true   plugin.dubbo.collect_consumer_arguments Apache Dubbo consumer collect arguments in RPC call, use Object#toString to collect arguments. SW_PLUGIN_DUBBO_COLLECT_CONSUMER_ARGUMENTS false   plugin.dubbo.consumer_arguments_length_threshold When plugin.dubbo.collect_consumer_arguments is true, Arguments of length from the front will to the OAP backend SW_PLUGIN_DUBBO_CONSUMER_ARGUMENTS_LENGTH_THRESHOLD 256   plugin.dubbo.collect_provider_arguments Apache Dubbo provider collect arguments in RPC call, use Object#toString to collect arguments. SW_PLUGIN_DUBBO_COLLECT_PROVIDER_ARGUMENTS false   plugin.dubbo.provider_arguments_length_threshold When plugin.dubbo.collect_provider_arguments is true, Arguments of length from the front will to the OAP backend SW_PLUGIN_DUBBO_PROVIDER_ARGUMENTS_LENGTH_THRESHOLD 256   plugin.kafka.bootstrap_servers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_KAFKA_BOOTSTRAP_SERVERS localhost:9092   plugin.kafka.get_topic_timeout Timeout period of reading topics from the Kafka server, the unit is second. SW_GET_TOPIC_TIMEOUT 10   plugin.kafka.producer_config Kafka producer configuration. Read producer configure to get more details. Check Kafka report doc for more details and examples. SW_PLUGIN_KAFKA_PRODUCER_CONFIG    plugin.kafka.producer_config_json Configure Kafka Producer configuration in JSON format. Notice it will be overridden by plugin.kafka.producer_config[key], if the key duplication. SW_PLUGIN_KAFKA_PRODUCER_CONFIG_JSON    plugin.kafka.topic_meter Specify which Kafka topic name for Meter System data to report to. SW_PLUGIN_KAFKA_TOPIC_METER skywalking-meters   plugin.kafka.topic_metrics Specify which Kafka topic name for JVM metrics data to report to. SW_PLUGIN_KAFKA_TOPIC_METRICS skywalking-metrics   plugin.kafka.topic_segment Specify which Kafka topic name for traces data to report to. SW_PLUGIN_KAFKA_TOPIC_SEGMENT skywalking-segments   plugin.kafka.topic_profiling Specify which Kafka topic name for Thread Profiling snapshot to report to. SW_PLUGIN_KAFKA_TOPIC_PROFILINGS skywalking-profilings   plugin.kafka.topic_management Specify which Kafka topic name for the register or heartbeat data of Service Instance to report to. SW_PLUGIN_KAFKA_TOPIC_MANAGEMENT skywalking-managements   plugin.kafka.topic_logging Specify which Kafka topic name for the logging data to report to. SW_PLUGIN_KAFKA_TOPIC_LOGGING skywalking-logging   plugin.kafka.namespace isolate multi OAP server when using same Kafka cluster (final topic name will append namespace before Kafka topics with - ). SW_KAFKA_NAMESPACE `` |   plugin.kafka.decode_class Specify which class to decode encoded configuration of kafka.You can set encoded information in plugin.kafka.producer_config_json or plugin.kafka.producer_config if you need. SW_KAFKA_DECODE_CLASS `` |   plugin.springannotation.classname_match_regex Match spring beans with regular expression for the class name. Multiple expressions could be separated by a comma. This only works when Spring annotation plugin has been activated. SW_SPRINGANNOTATION_CLASSNAME_MATCH_REGEX All the spring beans tagged with @Bean,@Service,@Dao, or @Repository.   plugin.toolkit.log.transmit_formatted Whether or not to transmit logged data as formatted or un-formatted. SW_PLUGIN_TOOLKIT_LOG_TRANSMIT_FORMATTED true   plugin.lettuce.trace_redis_parameters If set to true, the parameters of Redis commands would be collected by Lettuce agent. SW_PLUGIN_LETTUCE_TRACE_REDIS_PARAMETERS false   plugin.lettuce.redis_parameter_max_length If set to positive number and plugin.lettuce.trace_redis_parameters is set to true, Redis command parameters would be collected and truncated to this length. SW_PLUGIN_LETTUCE_REDIS_PARAMETER_MAX_LENGTH 128   plugin.lettuce.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_LETTUCE_OPERATION_MAPPING_WRITE    plugin.lettuce.operation_mapping_read  Specify which command should be converted to read operation SW_PLUGIN_LETTUCE_OPERATION_MAPPING_READ Referenc Lettuce-5.x-plugin   plugin.jedis.trace_redis_parameters If set to true, the parameters of Redis commands would be collected by Jedis agent. SW_PLUGIN_JEDIS_TRACE_REDIS_PARAMETERS false   plugin.jedis.redis_parameter_max_length If set to positive number and plugin.jedis.trace_redis_parameters is set to true, Redis command parameters would be collected and truncated to this length. SW_PLUGIN_JEDIS_REDIS_PARAMETER_MAX_LENGTH 128   plugin.jedis.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_JEDIS_OPERATION_MAPPING_WRITE    plugin.jedis.operation_mapping_read  Specify which command should be converted to read operation SW_PLUGIN_JEDIS_OPERATION_MAPPING_READ Referenc Jedis-4.x-plugin jedis-2.x-3.x-plugin   plugin.redisson.trace_redis_parameters If set to true, the parameters of Redis commands would be collected by Redisson agent. SW_PLUGIN_REDISSON_TRACE_REDIS_PARAMETERS false   plugin.redisson.redis_parameter_max_length If set to positive number and plugin.redisson.trace_redis_parameters is set to true, Redis command parameters would be collected and truncated to this length. SW_PLUGIN_REDISSON_REDIS_PARAMETER_MAX_LENGTH 128   plugin.redisson.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_REDISSON_OPERATION_MAPPING_WRITE    plugin.redisson.operation_mapping_read  Specify which command should be converted to read operation SW_PLUGIN_REDISSON_OPERATION_MAPPING_READ Referenc Redisson-3.x-plugin   plugin.neo4j.trace_cypher_parameters If set to true, the parameters of the cypher would be collected. SW_PLUGIN_NEO4J_TRACE_CYPHER_PARAMETERS false   plugin.neo4j.cypher_parameters_max_length If set to positive number, the db.cypher.parameters would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_NEO4J_CYPHER_PARAMETERS_MAX_LENGTH 512   plugin.neo4j.cypher_body_max_length If set to positive number, the db.statement would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_NEO4J_CYPHER_BODY_MAX_LENGTH 2048   plugin.cpupolicy.sample_cpu_usage_percent_limit If set to a positive number and activate trace sampler CPU policy plugin, the trace would not be collected when agent process CPU usage percent is greater than plugin.cpupolicy.sample_cpu_usage_percent_limit. SW_SAMPLE_CPU_USAGE_PERCENT_LIMIT -1   plugin.micronauthttpclient.collect_http_params This config item controls that whether the Micronaut http client plugin should collect the parameters of the request. Also, activate implicitly in the profiled trace. SW_PLUGIN_MICRONAUTHTTPCLIENT_COLLECT_HTTP_PARAMS false   plugin.micronauthttpserver.collect_http_params This config item controls that whether the Micronaut http server plugin should collect the parameters of the request. Also, activate implicitly in the profiled trace. SW_PLUGIN_MICRONAUTHTTPSERVER_COLLECT_HTTP_PARAMS false   plugin.memcached.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_MEMCACHED_OPERATION_MAPPING_WRITE get,gets,getAndTouch,getKeys,getKeysWithExpiryCheck,getKeysNoDuplicateCheck   plugin.memcached.operation_mapping_read Specify which command should be converted to read operation SW_PLUGIN_MEMCACHED_OPERATION_MAPPING_READ set,add,replace,append,prepend,cas,delete,touch,incr,decr   plugin.ehcache.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_EHCACHE_OPERATION_MAPPING_WRITE get,getAll,getQuiet,getKeys,getKeysWithExpiryCheck,getKeysNoDuplicateCheck,releaseRead,tryRead,getWithLoader,getAll,loadAll,getAllWithLoader   plugin.ehcache.operation_mapping_read Specify which command should be converted to read operation SW_PLUGIN_EHCACHE_OPERATION_MAPPING_READ tryRemoveImmediately,remove,removeAndReturnElement,removeAll,removeQuiet,removeWithWriter,put,putAll,replace,removeQuiet,removeWithWriter,removeElement,removeAll,putWithWriter,putQuiet,putIfAbsent,putIfAbsent   plugin.guavacache.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_GUAVACACHE_OPERATION_MAPPING_WRITE getIfPresent,get,getAllPresent,size   plugin.guavacache.operation_mapping_read Specify which command should be converted to read operation SW_PLUGIN_GUAVACACHE_OPERATION_MAPPING_READ put,putAll,invalidate,invalidateAll,invalidateAll,cleanUp   plugin.nettyhttp.collect_request_body This config item controls that whether the Netty-http plugin should collect the http body of the request. SW_PLUGIN_NETTY_HTTP_COLLECT_REQUEST_BODY false   plugin.nettyhttp.filter_length_limit When COLLECT_REQUEST_BODY is enabled, how many characters to keep and send to the OAP backend, use negative values to keep and send the complete body. SW_PLUGIN_NETTY_HTTP_FILTER_LENGTH_LIMIT 1024   plugin.nettyhttp.supported_content_types_prefix When COLLECT_REQUEST_BODY is enabled and content-type start with HTTP_SUPPORTED_CONTENT_TYPES_PREFIX, collect the body of the request , multiple paths should be separated by , SW_PLUGIN_NETTY_HTTP_SUPPORTED_CONTENT_TYPES_PREFIX application/json,text/   plugin.rocketmqclient.collect_message_keys If set to true, the keys of messages would be collected by the plugin for RocketMQ Java client.     plugin.rocketmqclient.collect_message_tags If set to true, the tags of messages would be collected by the plugin for RocketMQ Java client.            Reset Collection/Map type configurations as empty collection.  Collection type config, e.g. using  plugin.kafka.topics= to override default plugin.kafka.topics=a,b,c,d Map type config, e.g. using plugin.kafka.producer_config[]= to override default plugin.kafka.producer_config[key]=value  Dynamic Configurations All configurations above are static, if you need to change some agent settings at runtime, please read CDS - Configuration Discovery Service document for more details.\n","excerpt":"Table of Agent Configuration Properties This is the properties list supported in …","ref":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/configurations/","title":"Table of Agent Configuration Properties"},{"body":"SkyWalking Team The SkyWalking team is comprised of Members and Contributors, and the growth has never stopped. Members have direct access to the source of SkyWalking project and actively evolve the code-base. Contributors improve the project through submission of patches and suggestions to the Members. All contributions to SkyWalking are appreciated The number of contributors to the project is unbounded. All contributions to SkyWalking are greatly appreciated, whether for trivial cleanups, big new features or other material rewards.\nGet started   Members Project Management Committee    Name Apache ID      Can Li lican  Candy198088   DongXue Si ilucky    Han Liu liuhan  dalek_zero   Haochao Zhuang daming    Haoyang Liu liuhaoyangzz    Hongtao Gao hanahmily    Hongwei Zhai innerpeacez    Ignasi Barrera nacx    Jiajing Lu lujiajing    Jian Tan tanjian    Jiaqi Lin linjiaqi    Jiemin Xia jmjoy    Jinlin Fu withlin    Juntao Zhang zhangjuntao    Kai Wan wankai  wankai123   Kai Wang wangkai    Lang Li lilang         Name Apache ID      Michael Semb Wever mck    Qiuxia Fan qiuxiafan    Sheng Wu (Project V.P.) wusheng  wusheng1108   Shinn Zhang zhangxin  ascrutae   Wei Zhang zhangwei24    Wenbing Wang wangwenbin    Willem Ning Jiang ningjiang    Yang Bai baiyang    Yanlong He heyanlong  YanlongHe   Yao Wang ywang    Ye Cao dashanji    Yihao Chen yihaochen  Superskyyy   Yixiong Cao caoyixiong    Yongsheng Peng pengys    Yuguang Zhao zhaoyuguang    Zhang Kewei zhangkewei    Zhenxu Ke kezhenxu94  kezhenxu94      Committer    Name Apache ID      Brandon Fergerson bfergerson    Gong Dewei kylixs    Gui Cao zifeihan  zifeihan007   Hailin Wang wanghailin    Huaxi Jiang hoshea  Zerone___01   Jiapeng Liu liujiapeng    JunXu Chen chenjunxu    Ke Zhang zhangke  Humbertttttt   Ming Wen wenming    Puguang Yang ypg    Qiang Li liqiang    Qiang Xu xuqiang         Name Apache ID      Ruixian Wang ax1an  Ax1anRISE   Sheng Wang wangsheng    Tomasz Pytel tompytel    Wei Hua alonelaval    Wei Jin kvn    Weijie Zou kdump  RootShellExp   Weiyi Liu wayilau    Xiang Wei weixiang1862    Yueqin Zhang yswdqz    Yuntao Li liyuntao    Zhusheng Xu aderm    Zixin Zhou zhouzixin  starry_loveee      Contributors 882  Search   SkyWalking Showcase    22     kezhenxu94    wu-sheng    wankai123    mrproliu    Fine0830    JaredTan95    pg-yang    arugal    weixiang1862    dashanji    innerpeacez    yswdqz    peachisai    CodePrometheus    hanahmily    JohnDuncan5171    nisiyong    Superskyyy    azibhassan    chenxiaohu    jmjoy    sacloudy     SkyWalking Website    99     wu-sheng    Jtrust    kezhenxu94    mrproliu    hanahmily    rootsongjc    fgksgf    Superskyyy    jmjoy    JaredTan95    Fine0830    arugal    dmsolr    innerpeacez    BFergerson    zhaoyuguang    wankai123    dashanji    TinyAllen    weixiang1862    EvanLjp    peng-yongsheng    heyanlong    Humbertzhang    yswdqz    yanmaipian    lujiajing1126    FingerLeader    gxthrj    Ax1an    YunaiV    LIU-WEI-git    langyan1022    pg-yang    libinglong    alonelaval    nisiyong    x22x22    HHoflittlefish777    CzyerChen    cheenursn    thebouv    Alipebt    PGDream    liuhaoyang    LiteSun    liqiangz    geomonlin    lijing-21    leimeng-ma    klboke    kehuili    JoeCqupt    jjlu521016    jacentsao    hutaishi    hailin0    fushiqinghuan111    chopin-d    apmplus    jxnu-liguobin    zhang98722    yimeng    xu1009    xiongshiyan    xdRight     bing**   weiqiang333    vcjmhg    tristan-tsl    tisonkun    tevahp    sebbASF    FeynmanZhou    peachisai    nic-chen    lucperkins    lilien1010    Dylan-beicheng    devkanro    Johor03    ButterBright    harshaskumar05    kylixs    crl228    Humbedooh    thisisgpy    CharlesMaster    andrewgkew    wayilau    feelwing1314    adriancole    agile6v     394102339**   YoungHu    wang-yeliang    withlin    moonming     983708408**     SkyWalking    492     wu-sheng    peng-yongsheng    kezhenxu94     ascrutae**   ascrutae    acurtain    wankai123    mrproliu    hanahmily    Fine0830    JaredTan95    dmsolr    arugal    zhaoyuguang    lytscu    wingwong-knh     zhangxin**   BFergerson    pg-yang     ascrutae**   lujiajing1126    Ax1an    yswdqz    wayilau    EvanLjp    zifeihan    IanCao     295198088**   weixiang1862    x22x22    innerpeacez     394102339**   Superskyyy    clevertension    liuhaoyang    withlin    liqiangz    xbkaishui     renliangbu**   carlvine500    candyleer    peachisai    hailin0    zhangkewei    bai-yang    heyanlong    tom-pytel    TinyAllen    adermxzs    songzhendong     55846420**   wallezhang    Jtrust    IluckySi    qxo    smartboy37597    CzyerChen    alonelaval    heihaozi    wendal    LIU-WEI-git    CodePrometheus    Humbertzhang    toffentoffen    CalvinKirs    tristaZero     liufei**   zhyyu    stalary    honganan     lxin96**   jjtyro    xuanyu66    J-Cod3r    YunaiV    langyan1022    Liu-XinYuan    SataQiu    Cool-Coding    harvies    xu1009    wuwen5     55846420**   tuohai666    flycash    JohnNiang    yaojingguo    fgksgf    adriancole    codeglzhang    yu199195    yangyiweigege    VictorZeng    TeslaCN    LiWenGu    haoyann    chidaodezhongsheng    xinzhuxiansheng    aiyanbo    darcyda1    sN0wpeak    FatihErdem    chenhaipeng    nisiyong    Z-Beatles    YczYanchengzhe    cyberdak    dagmom    codelipenghui    dominicqi    dio    libinglong    liuzc9     lizl9**   neeuq    snakorse    xiaospider    xiaoy00    Indifer    huangyoje    s00373198    cyejing    Ahoo-Wang    yanfch    devkanro    oflebbe    rabajaj0509    Shikugawa    LinuxSuRen    ScienJus    liu-junchi    WillemJiang    chenpengfei    gnr163    jiang1997    jmjoy    viswaramamoorthy    vcjmhg    tzy1316106836    terranhu    scolia    osiriswd     2278966200**   novayoung    muyun12    mgsheng    makingtime    klboke    katelei6    karott    jinlongwang    hutaishi    Hen1ng    kuaikuai    lkxiaolou    purgeyao    michaelsembwever     bwh12398**   YunfengGao    WildWolfBang    juzhiyuan    SoberChina    KangZhiDong    mufiye     yushuqiang**   zxbu    yazong    xzyJavaX    xcaspar    wuguangkuo    webb2019    evanxuhe    yang-xiaodong    RaigorJiang    Qiliang    Oliverwqcwrw    buxingzhe    tsuilouis    leizhiyuan    Jargon9    potiuk     iluckysi   kim-up    HarryFQG    easonyipj    willseeyou    AlexanderWert    ajanthan    chen-ni    844067874    elk-g    dsc6636926    heihei180    amwyyyy    dengliming    cuiweiwei    coki230    coder-yqj    cngdkxw    chenmudu    beckhampu    cheetah012    ZhuWang1112    zaunist    shichaoyuan    XhangUeiJong    Switch-vov    SummerOfServenteen    maxiaoguang64    maclong1989    sourcelliu    margauxcabrera    Yebemeto    momo0313    Xlinlin     cheatbeater**   lxliuxuankb    lu-xiaoshuang    lpcy    louis-zhou    lngmountain     lixin40**   liuyanggithup    linliaoy     xlz35429674**    seiferhu**    seiferhu**    72372815\u0026#43;royal-dargon**    72775443\u0026#43;raybi-asus**   ralphgj    qiuyu-d    thanq    probeyang    carrypann    pkxiuluo    FeynmanZhou    ooi22    onecloud360    nileblack    chenyi19851209    neatlife    lijial    inversionhourglass    huliangdream    hsoftxl    hi-sb    Heguoya    hardzhang    haotian2015    gzlicanyi    guyukou    gy09535    guochen2    kylixs    gonedays    guodongq    ggndnn    GerryYuan    geekymv    geektcp    leemove    lazycathome    langke93    landonzeng    lagagain    ksewen    killGC    kikupotter    kevinyyyy    ken-duck    kayleyang    aeolusheath    justeene    jsbxyyx    zhangjianweibj    jianglin1008    jialong121    jjlu521016     zhousiliang163**    45602777\u0026#43;zhangzhanhong2**    zcai2**    zaygrzx**    yuyujulin**    yurunchuan**    182148432**    wu_yan_tao**    yanmingbi**    yangxb2010000**    yanbinwei2851**    978861768**    48479214\u0026#43;xuxiawei**    9313869\u0026#43;xuchangjunjx**    yexingren23**    1903636211**    xiaozheng**    281890899**    66098854\u0026#43;tangshan-brs**    88840672\u0026#43;wangwang89**    loushuiyifan**    305542043**    381321959**    zhangliang**    kzd666**    45203823\u0026#43;gzshilu**    28707699**    yqjdcyy**    tanjunchen20**    liuzhengyang**    hey.yanlong**    zygfengyuwuzu**    tmac.back**    xtha**    345434645**    zoidbergwill**    tbdp.hi**    tanzhen**    973117150**    89574863\u0026#43;4ydx3906**    sxzaihua**    hpy253215039**    814464284**    stone_wlg**    stenio**    hoolooday**    songzhe_fish**    wang-yaozheng**    sk163**    101088629\u0026#43;simonluo345**    simonlei**    41794887\u0026#43;sialais**    31874857\u0026#43;sikelangya**    mestarshine**    34833891\u0026#43;xdright**    bing**    23226334**    wujun8**    zzhxccw**    qrw_email**    wind2008hxy**    36367435\u0026#43;whl12345**    45580443\u0026#43;whfjam**    zwj777**    xiongchuang**    lyzhang1999**    52819067\u0026#43;weiqiang-w**    55177318\u0026#43;vcjmhg**    46754544\u0026#43;tristan-tsl**    wander4096**    136082619**    montecristosoul**   Lin1997    coolbeevip    LazyLei    leileiluoluo    lt5227    mostcool    Alipebt    zhentaoJin    kagaya85    augustowebd    j-s-3    JohnDuncan5171    jbampton    zouyx    JoeKerouac    Linda-pan    jim075960758    jiekun    c1ay     chenglei**    chenyao**   npmmirror    nikitap492    nickwongwong    ZhuoSiChen    mikechengwei    mikkeschiren    zeaposs    TheRealHaui    doddi    marcingrzejszczak    maolie    mahmoud-anwer    donotstopplz    liuhaoXD    linghengqian    darcydai    sdanzo    chanjarster    damonxue    cvimer    CommissarXia    ChengDaqi2023    CharlesMaster    shiluo34    brucewu-fly     qq327568824**   ArjenDavid-sjtu    AngryMills     andyzzlms**   AirTrioa    lunchboxav    50168383    1095071913    Jedore    mustangxu     zhongjianno1**   DeadLion    Lighfer    Henry75m39    onurccn    tankilo    Gallardot    AbelCha0    bootsrc    FingerLiu    Felixnoo    DuanYuePeng    efekaptan    qijianbo010    qqeasonchen    devon-ye     295198088**    c feng   buzuotaxuan    mmm9527    wolfboys    beiwangnull    amogege    alidisi    alexkarezin    aix3    adamni135    absorprofess    ZhengBing520    ZhHong    chenbeitang    ZS-Oliver    panniyuyu    fuhuo    ethan256    eoeac    echooymxq    dzx2018    IceSoda177    dvsv2    drgnchan    donbing007    dogblues    divyakumarjain    dd1k    dashanji    cutePanda123    cui-liqiang    cuishuang    crystaldust    wbpcode    TerrellChen    Technoboy-    StreamLang    stevehu    kun-song     826245622**   compilerduck    SheltonZSL    sergicastro    zhangsean    yymoth    ruibaby    rlenferink    remicollet    RandyAbernethy    QHWG67    pengyongqiang666    Patrick0308    yuqichou    Miss-you    ycoe     me**   yanickxia    XinweiLyu    liangyepianzhou    Wooo0    ViberW    wilsonwu    moonming    wyt    victor-yi    Videl    trustin    TomMD    ThisSeanZhang    gitter-badger    Adrian Cole    github-actions[bot]    dependabot[bot]     Booster UI    40     Fine0830    wu-sheng    heyanlong    pg-yang    CzyerChen    yswdqz    techbirds    Superskyyy    peachisai    zhourunjie1988    xu1009    weixiang1862    lsq27    innerpeacez    horochx    drgnchan    smartboy37597    CodePrometheus    WitMiao    liuyib    arugal    wuwen5    songzhendong    pw151294    kezhenxu94    jiang1997    hutaishi    heihei180    hadesy    ZhuWang1112    XinweiLyu    liangyepianzhou    SimonHu1993    LinuxSuRen    binbin666    marcingrzejszczak    toffentoffen    mahmoud-anwer    donotstopplz    BFergerson      Plugin for Service Topology    4     Fine0830    wu-sheng    Superskyyy     fine**     Java Agent    476     wu-sheng    peng-yongsheng     ascrutae**   ascrutae    kezhenxu94    acurtain    hanahmily    JaredTan95    dmsolr    mrproliu    arugal    zhaoyuguang    lytscu    Fine0830     zhangxin**   wingwong-knh    BFergerson    wankai123     ascrutae**   Ax1an    wayilau    zifeihan    EvanLjp    IanCao     295198088**   x22x22     394102339**   xu1009    pg-yang    clevertension    withlin    xbkaishui     renliangbu**   liuhaoyang    lujiajing1126    candyleer    carlvine500    nisiyong    liqiangz    hailin0    wallezhang    zhangkewei    bai-yang    Jtrust    heyanlong    xzyJavaX    songzhendong    adermxzs     55846420**   TinyAllen    heihaozi    CzyerChen    qxo    IluckySi    alonelaval    wendal    tristaZero    Humbertzhang    zhyyu    J-Cod3r    Cool-Coding    jjtyro    honganan    stalary    wuwen5     liufei**   gzlicanyi     lxin96**   tom-pytel    xuanyu66    devkanro    hutaishi    harvies    langyan1022    Liu-XinYuan    YunaiV    SataQiu    adriancole    darcyda1    yaojingguo    JohnNiang    flycash    tuohai666    cyberdak    codelipenghui    peachisai     55846420**   LiWenGu    kylixs    TeslaCN    haoyann    chidaodezhongsheng    xinzhuxiansheng    VictorZeng    xiaqi1210    yu199195    chanjarster    FatihErdem    aiyanbo    sN0wpeak    fgksgf    Oliverwqcwrw    Z-Beatles    alanlvle    dagmom    innerpeacez    dominicqi    weixiang1862    vcjmhg    cyejing    s00373198    huangyoje    Indifer    xiaoy00    snakorse    neeuq     lizl9**   libinglong    gnr163    chenpengfei    YczYanchengzhe    WillemJiang    liu-junchi    ScienJus    oflebbe    yanfch    Ahoo-Wang    dio    codeglzhang    osiriswd    scolia    terranhu    tzy1316106836    viswaramamoorthy    webb2019    gglzf4    kuaikuai     2278966200**   novayoung    muyun12    mgsheng    makingtime    lpcy    klboke    karott    jinlongwang    Hen1ng    Superskyyy    seifeHu    lkxiaolou    purgeyao    PepoRobert    michaelsembwever    marcingrzejszczak     bwh12398**   YunfengGao    WildWolfBang    shichaoyuan    juzhiyuan    SoberChina    KangZhiDong     yushuqiang**   zxbu    yazong    xcaspar    wuguangkuo    geekymv    yang-xiaodong    Shikugawa    Qiliang    buxingzhe    tsuilouis    Leibnizhu    leizhiyuan    CalvinKirs    Jargon9    potiuk     iluckysi   2han9wen71an    844067874    HarryFQG    ForrestWang123    ajanthan    AlexanderWert    willseeyou    ArjenDavid-sjtu    evanxuhe    elk-g    dsc6636926    amwyyyy    dengliming    dashanji    cylx3126    cuiweiwei    coki230    SummerOfServenteen    Switch-vov    tjiuming    XhangUeiJong    zaunist    cheetah012    beckhampu    chenmudu    coder-yqj    cngdkxw    githubcheng2978    FeynmanZhou    onecloud360    nileblack    neatlife    Xlinlin    momo0313    Yebemeto    margauxcabrera    sourcelliu    maxiaoguang64    lxliuxuankb    lvxiao1    guodongq    louis-zhou     lixin40**   pkxiuluo    carrypann    probeyang    qiaoxingxing    thanq    qiuyu-d    ggndnn    ralphgj    raybi-asus    GerryYuan    geektcp    mestarshine     chenyao**   sikelangya    simonlei    sk163    zhangjianweibj    JoeCqupt    jialong121    jjlu521016    hyhyf    hxd123456    huliangdream    xiaomiusa87    hsoftxl    hi-sb    Heguoya    hardzhang    haotian2015    guyukou    gy09535    rechardguo    gonedays    liuyanggithup    linliaoy    lijial    leemove    lbc97    lazycathome    langke93    landonzeng    ksewen    killGC    kikupotter    kevinyyyy    kayleyang    aeolusheath    justeene    jsbxyyx    jmjoy     tmac.back**    345434645**    zoidbergwill**    zhousiliang163**    45602777\u0026#43;zhangzhanhong2**    zcai2**    zaygrzx**    yuyujulin**    yurunchuan**    74546965\u0026#43;yswdqz**    182148432**    wu_yan_tao**    yanmingbi**    yangxb2010000**    yanbinwei2851**    249021408**    9313869\u0026#43;xuchangjunjx**    xiongchuang**    cheatbeater**    66098854\u0026#43;tangshan-brs**    42414099\u0026#43;yanye666**    893979653**    88840672\u0026#43;wangwang89**    loushuiyifan**    lcbiao34**    305542043**    381321959**    orezsilence**    zhangliang**    kzd666**    45203823\u0026#43;gzshilu**    28707699**    tanjunchen20**    70845636\u0026#43;mufiye**    liuzhengyang**    zygfengyuwuzu**    lyzhang1999**    wqp1987**   w2dp    weiqiang-w    tristan-tsl    tincopper    angty    tedli    tbdpmi     tanzhen**   tangxqa    sxzaihua    hepyu    surechen    stone-wlg    stenio2011    zhe1926     xubinghaozs**    yexingren23**    1903636211**    1612202137**    281890899**    34833891\u0026#43;xdright**    bing**    23226334**    wujun8**    809697469**    zzhxccw**    qrw_email**    wind2008hxy**    63728367\u0026#43;will2020-power**    36367435\u0026#43;whl12345**    45580443\u0026#43;whfjam**    zwj777**    weihubeats**   augustowebd    jbampton    zouyx    JoeKerouac    Linda-pan    leihuazhe     zhongjianno1**   DeadLion    Lighfer    kim-up    hardy4yooz    onurccn    guillaume-alvarez    GuiSong01    tankilo    Gallardot    AbelCha0    nikitap492    nickwongwong    ZhuoSiChen    mikkeschiren    zeaposs    TheRealHaui    maolie    donotstopplz    liuhaoXD    lishuo5263    Lin1997    coolbeevip    LazyLei    leileiluoluo    lt5227    zhentaoJin    kagaya85    CharlesMaster    shiluo34    wapkch    thisisgpy    brucewu-fly    BigXin0109    bmk15897     qq327568824**   AngryMills     andyzzlms**   guoxiaod    adaivskenan    Alceatraz    AirTrioa    lunchboxav    50168383    1095071913    bootsrc    ForestWang123    FingerLiu    DuanYuePeng    efekaptan    qijianbo010    qqeasonchen    DominikHubacek    devon-ye    darknesstm    zhaoxiaojie0415    darcydai    sdanzo    dachuan9e    cvimer    CommissarXia    Chenfx-git    furaul    HScarb    c1ay     295198088**    c feng   buzuotaxuan    mmm9527    beiwangnull    andotorg    amogege    alexkarezin    aix3    adamni135    zimmem    ZhHong    chenbeitang    ZS-Oliver    panniyuyu    fuhuo    eoeac    life-    echooymxq    dzx2018    IceSoda177    dvsv2    drgnchan    donbing007    divyakumarjain    AlchemyDing    dd1k    cutePanda123    cui-liqiang    crystaldust    jinrongzhang    wbpcode    TerrellChen    Technoboy-    stevehu    kun-song     826245622**   compilerduck    sergicastro    zhangsean    yymoth    SWHHEART    ruibaby    rlenferink    RickyLau    RandyAbernethy    QHWG67    Patrick0308     chenglei**   yuqichou    yoyofx    Miss-you    ycoe     me**   yanickxia    yangyulely    Wooo0    ViberW    wilsonwu    moonming    victor-yi    Videl    trustin    TomMD    ThisSeanZhang    gitter-badger     Python Agent    39     kezhenxu94    Superskyyy    tom-pytel    alonelaval    jiang1997    Humbertzhang    Jedore    ZEALi    katelei6    SheltonZSL    jaychoww    FAWC438    wu-sheng    probeyang    langyizhao    arcosx    zkscpqm    wuwen5    dafu-wu    VxCoder    taskmgr    Forstwith    fuhuo    dcryans     32413353\u0026#43;cooolr**   c1ay    chestarss    alidisi    XinweiLyu    TomMD    CodePrometheus    shenxiangzhuang    doddi    sungitly    wzy960520    JarvisG495    JaredTan95    fgksgf    zgfh     NodeJS Agent    15     kezhenxu94    tom-pytel    ruleeeer    BFergerson    wu-sheng    michaelzangl    alanlvle    tianyk    ErosZy    QuanjieDeng    TonyKingdom    liu-zhizhu     wxb17742006482**   nd-lqj    wuwen5     Go Agent    23     mrproliu    CodePrometheus    Alipebt    wu-sheng    LinuxSuRen    ShyunnY    IceSoda177    vearne    rfyiamcool    ethan256    jiekun    zheheBao    xuyue97    jarvis-u    icodeasy    YenchangChan    kikoroc    darknos    Ecostack    Ruff-nono    0o001    lujiajing1126    GlqEason     Rust Agent    7     jmjoy    wu-sheng    Shikugawa    tisonkun    CherishCai    dkkb    kezhenxu94     PHP Agent    5     jmjoy    heyanlong    phanalpha    wu-sheng    matikij     Client JavaScript    18     Fine0830    wu-sheng    arugal    Lighfer    kezhenxu94    tianyk    wuwen5    Leo555    qinhang3    min918    tthallos    i7guokui    aoxls    givingwu    Jtrust    JaredTan95    AliceTWu    airene     Nginx LUA Agent    21     wu-sheng    dmsolr    membphis    moonming    mrproliu    spacewander    kezhenxu94    WALL-E    arugal    wangrzneu    yxudong    JaredTan95    jeremie1112    dingdongnigetou    CalvinKirs    lilien1010    Jijun    Dofine-dufei    alonelaval    Frapschen    tzssangglass     Kong Agent    4     dmsolr    wu-sheng    kezhenxu94    CalvinKirs     SkyWalking Satellite    13     mrproliu    EvanLjp    kezhenxu94    gxthrj    wu-sheng    wangrzneu    BFergerson    fgksgf    CalvinKirs    guangdashao    inversionhourglass    nic-chen    arugal     Kubernetes Event Exporter    5     kezhenxu94    wu-sheng    fgksgf    dmsolr    CalvinKirs     SkyWalking Rover    9     mrproliu    wu-sheng    spacewander    jelipo    hkmdxlftjf    IluckySi    LinuxSuRen    caiwc    kezhenxu94      SkyWalking CLI    15     kezhenxu94    mrproliu    fgksgf    wu-sheng    hanahmily    try-agaaain    JarvisG495    arugal    alonelaval    BFergerson    heyanlong    Alexxxing    Superskyyy    clk1st    innerpeacez     Kubernetes Helm    31     innerpeacez    kezhenxu94    wu-sheng    hanahmily    mrproliu    JaredTan95    ButterBright    dashanji    rh-at    chengshiwen    eric-sailfish    geffzhang    glongzh    chenvista    swartz-k    tristan-tsl    vision-ken     wang_weihan**   wayilau    williamyao1982    zshrine    aikin-vip    wankai123    SeanKilleen    ScribblerCoder    rabajaj0509    CalvinKirs    carllhw    zalintyre    Yangfisher1    aviaviavi     SkyWalking Cloud on Kubernetes    19     hanahmily    dashanji    kezhenxu94    mrproliu    weixiang1862    wu-sheng    ESonata    jichengzhi    heyanlong    hwzhuhao    SzyWilliam     rolandma**   robberphex    toffentoffen    CalvinKirs    fgksgf    Duncan-tree-zhou    ButterBright    BFergerson      Data Collect Protocol    23     wu-sheng    mrproliu    arugal    kezhenxu94    liuhaoyang    EvanLjp    Shikugawa    peng-yongsheng    zifeihan    Switch-vov    dmsolr    hanahmily    fgksgf    nacx    yaojingguo    SataQiu    stalary    Z-Beatles    liqiangz    snakorse    xu1009    heyanlong    Liu-XinYuan     Query Protocol    17     wu-sheng    mrproliu    wankai123    arugal    peng-yongsheng    kezhenxu94    hanahmily    x22x22    JaredTan95    BFergerson    MiracleDx    fgksgf    liuhaoyang    Fine0830    chenmudu    liqiangz    heyanlong     Go API    12     mrproliu    wu-sheng    kezhenxu94    arugal    fgksgf     dalekliuhan**   gxthrj    liqiangz    EvanLjp    JaredTan95    CalvinKirs     mrproliu**     BanyanDB    27     hanahmily    lujiajing1126    Fine0830    WuChuSheng1    ButterBright    wu-sheng    HHoflittlefish777    hailin0    zesiar0    sivasathyaseeelan    mikechengwei    Sylvie-Wxr    innerpeacez    sacloudy    caicancai    tisonkun    DevPJ9    LinuxSuRen    sksDonni    mrproliu    BFergerson    Muyu-art    CalvinKirs    qazxcdswe123    achintya-7    e1ijah1    kezhenxu94     BanyanDB Java Client    5     lujiajing1126    wu-sheng    hanahmily    kezhenxu94    hailin0     BanyanDB Helm    5     ButterBright    wu-sheng    hanahmily    wankai123    kezhenxu94      Agent Test Tool    19     dmsolr    kezhenxu94    mrproliu    wu-sheng    arugal    nisiyong    zhyyu    EvanLjp    yaojingguo    CalvinKirs    LeePui    marcingrzejszczak    Shikugawa    dagmom    harvies    alonelaval    jmjoy    pg-yang    OrezzerO     SkyWalking Eyes    37     kezhenxu94    fgksgf    wu-sheng    zooltd    emschu    tisonkun    jmjoy    keiranmraine    MoGuGuai-hzr    mrproliu    dongzl    spacewander    gdams    rovast    elijaholmos    ryanmrichard    freeqaz    heyanlong    zifeihan    mohammedtabish0    acelyc111    Xuanwo    xiaoyawei    stumins    steveklabnik    chengshiwen    crholm    fulmicoton    Two-Hearts    kevgo    halacs    FushuWang    Juneezee    ddlees    dave-tucker    antgamdia    guilload     SkyWalking Infra E2E    15     mrproliu    kezhenxu94    Humbertzhang    fgksgf    chunriyeqiongsaigao    ethan256    Superskyyy    dashanji    lujiajing1126    JohnNiang    CalvinKirs    FeynmanZhou    arugal    heyanlong    wu-sheng      (Archived) Docker Files    12     hanahmily    wu-sheng    JaredTan95    kezhenxu94     lixin40**   aviaviavi    andrewgkew    carlvine500    kkl129    tristan-tsl    arugal    heyanlong     (Archived) Rocketbot UI    66     TinyAllen    Fine0830    x22x22    wu-sheng    JaredTan95    kezhenxu94    heihaozi    bigflybrother    Jtrust    dmsolr    zhaoyuguang    alonelaval    tom-pytel    hanahmily    aeolusheath    arugal    hailin0    Indifer     zhaoyuguang**   xuchangjunjx    wuguangkuo    whfjam    shiluo34    ruibaby    wilsonwu    constanine    horber    liqiangz    leemove    fuhuo     denghaobo**   jianglin1008    codelipenghui    lunamagic1978    novayoung    probeyang    dominicqi    stone-wlg    surechen    wallezhang    wuwen5     bing**   xu1009    huangyoje    heyanlong    llissery     437376068**   aiyanbo    BFergerson    efekaptan    yanfch    grissom-grissom    grissomsh    Humbertzhang    kagaya85    liuhaoyang    tsuilouis    masterxxo    zeaposs    QHWG67    Doublemine    zaunist    xiaoxiangmoe    c1ay    dagmom    fredster33     (Archived) Legacy UI    23     hanahmily    wu-sheng    peng-yongsheng    ascrutae    TinyAllen     zhangxin**    295198088**    qiu_jy**   zhaoyuguang    zuohl    wendal    jjlu521016    withlin    bai-yang    zhangkewei    wynn5a    clevertension    cloudgc     baiyang06**   WillemJiang    liuhaoyang    leizhiyuan    ajanthan     (Archived) OAL Generator    2     wu-sheng    peng-yongsheng      SkyAPM-dotnet    41     liuhaoyang    snakorse    wu-sheng    lu-xiaoshuang    ElderJames    yang-xiaodong    pengweiqhca    Ahoo-Wang    inversionhourglass    feiyun0112    sampsonye    KawhiWei    zeaposs    kaanid    qq362220083    withlin     xiaoweiyu**   witskeeper    beckjin    ShaoHans    misaya    itsvse    zhujinhu21    xclw2000    startewho    refactor2    rider11-dev    linkinshi    limfriend    guochen2    WeihanLi    SeanKilleen    cnlangzi    joesdu    SpringHgui    dimaaan    ChaunceyLin5152    catcherwong    BoydenYubin    andyliyuze    AlseinX     cpp2sky    7     Shikugawa     wbphub**   wuwen5    wu-sheng    makefriend8    wbpcode    JayInnn     SourceMarker    5     BFergerson    MrMineO5    voqaldev    chess-equality    javamak     Java Plugin Extensions    9     wu-sheng    ascrutae    JaredTan95    raybi-asus    zifeihan    nisiyong    bitray    li20020439    pg-yang     uranus    2     harvies    wu-sheng     (outdated) CN Documentations    22     kezhenxu94    SataQiu    wu-sheng    nikyotensai    ccccye123    Frapschen    shalk    wujun8    zhangnew    yazong    xiaoping378    thelight1     lilulu**   Hen1ng    harvies    dagmom    alienwow    system-designer    Superskyyy    JaredTan95    fgksgf    xing-yin      (Retired) Transporter Plugins    5     codeglzhang    wu-sheng    dmsolr    Jargon9    kezhenxu94     (Retired) Go2Sky    25     arugal    wu-sheng    hanahmily    mrproliu    kagaya85    easonyipj    nacx    Luckyboys    fgksgf    Humbertzhang    JaredTan95    JJ-Jasmin    withlin    yaojingguo    Just-maple    kuaikuai    zhuCheer    chwjbn    kehuili    kezhenxu94    limfriend    matianjun1    lokichoggio     bing**   liweiv     (Retired) Go2Sky Plugins    11     arugal    kagaya85    mrproliu    wu-sheng    elza2    matianjun1    dgqypl    zaunist    kehuili    newyue588cc    royal-dargon     (Retired) SkyAPM PHP Agent    33     heyanlong     wangbo78978**   lpf32     songzhian**   songzhian    wu-sheng    jmjoy    remicollet    kilingzhang     songzhian**   xonze    iamif3000    mikkeschiren    anynone    lvxiao1    xinfeingxia85    cyhii    silverkorn    AlpherJang    LJX22222    MrYzys    rovast    SP-66666    tinyu0    xudianyang    huohuanhuan    kezhenxu94    limfriend    ljf-6666    qjgszzx    dickens7    xybingbing    yaowenqiang    az13js     (Retired) SkyAPM Node.js    10     ascrutae    kezhenxu94    wu-sheng    zouyx    Jozdortraz    a526672351    rovast    Runrioter    jasper-zsh    TJ666         ← Team       ","excerpt":"SkyWalking Team The SkyWalking team is comprised of Members and Contributors, and the growth has …","ref":"/team/","title":"Team"},{"body":"Telegraf receiver The Telegraf receiver supports receiving InfluxDB Telegraf\u0026rsquo;s metrics by meter-system. The OAP can load the configuration at bootstrap. The files are located at $CLASSPATH/telegraf-rules. If the new configuration is not well-formed, the OAP may fail to start up.\nThis is the InfluxDB Telegraf Document, the Telegraf receiver can handle Telegraf\u0026rsquo;s CPU Input Plugin, Memory Input Plugin.\nThere are many other telegraf input plugins, users can customize different input plugins' rule files. The rule file should be in YAML format, defined by the scheme described in MAL. Please see the telegraf plugin directory for more input plugins information.\nNotice:\n  The Telegraf receiver module uses HTTP to receive telegraf\u0026rsquo;s metrics, so the outputs method should be set [[outputs.http]] in telegraf.conf file. Please see the http outputs for more details.\n  The Telegraf receiver module only process telegraf\u0026rsquo;s JSON metrics format, the data format should be set data_format = \u0026quot;json\u0026quot; in telegraf.conf file. Please see the JSON data format for more details.\n  The default json_timestamp_units is second in JSON output, and the Telegraf receiver module only process second timestamp unit. If users configure json_timestamp_units in telegraf.conf file, json_timestamp_units = \u0026quot;1s\u0026quot; is feasible. Please see the JSON data format for more details.\n  The following is the default telegraf receiver YAML rule file in the application.yml, Set SW_RECEIVER_TELEGRAF:default through system environment or change SW_RECEIVER_TELEGRAF_ACTIVE_FILES:vm to activate the OpenTelemetry receiver with vm.yml in telegraf-rules.\nreceiver-telegraf:selector:${SW_RECEIVER_TELEGRAF:default}default:activeFiles:${SW_RECEIVER_TELEGRAF_ACTIVE_FILES:vm}   Rule Name Description Configuration File Data Source     vm Metrics of VMs telegraf-rules/vm.yaml Telegraf inputs plugins \u0026ndash;\u0026gt; Telegraf Receiver \u0026ndash;\u0026gt; SkyWalking OAP Server    ","excerpt":"Telegraf receiver The Telegraf receiver supports receiving InfluxDB Telegraf\u0026rsquo;s metrics by …","ref":"/docs/main/latest/en/setup/backend/telegraf-receiver/","title":"Telegraf receiver"},{"body":"Telegraf receiver The Telegraf receiver supports receiving InfluxDB Telegraf\u0026rsquo;s metrics by meter-system. The OAP can load the configuration at bootstrap. The files are located at $CLASSPATH/telegraf-rules. If the new configuration is not well-formed, the OAP may fail to start up.\nThis is the InfluxDB Telegraf Document, the Telegraf receiver can handle Telegraf\u0026rsquo;s CPU Input Plugin, Memory Input Plugin.\nThere are many other telegraf input plugins, users can customize different input plugins' rule files. The rule file should be in YAML format, defined by the scheme described in MAL. Please see the telegraf plugin directory for more input plugins information.\nNotice:\n  The Telegraf receiver module uses HTTP to receive telegraf\u0026rsquo;s metrics, so the outputs method should be set [[outputs.http]] in telegraf.conf file. Please see the http outputs for more details.\n  The Telegraf receiver module only process telegraf\u0026rsquo;s JSON metrics format, the data format should be set data_format = \u0026quot;json\u0026quot; in telegraf.conf file. Please see the JSON data format for more details.\n  The default json_timestamp_units is second in JSON output, and the Telegraf receiver module only process second timestamp unit. If users configure json_timestamp_units in telegraf.conf file, json_timestamp_units = \u0026quot;1s\u0026quot; is feasible. Please see the JSON data format for more details.\n  The following is the default telegraf receiver YAML rule file in the application.yml, Set SW_RECEIVER_TELEGRAF:default through system environment or change SW_RECEIVER_TELEGRAF_ACTIVE_FILES:vm to activate the OpenTelemetry receiver with vm.yml in telegraf-rules.\nreceiver-telegraf:selector:${SW_RECEIVER_TELEGRAF:default}default:activeFiles:${SW_RECEIVER_TELEGRAF_ACTIVE_FILES:vm}   Rule Name Description Configuration File Data Source     vm Metrics of VMs telegraf-rules/vm.yaml Telegraf inputs plugins \u0026ndash;\u0026gt; Telegraf Receiver \u0026ndash;\u0026gt; SkyWalking OAP Server    ","excerpt":"Telegraf receiver The Telegraf receiver supports receiving InfluxDB Telegraf\u0026rsquo;s metrics by …","ref":"/docs/main/next/en/setup/backend/telegraf-receiver/","title":"Telegraf receiver"},{"body":"Telegraf receiver The Telegraf receiver supports receiving InfluxDB Telegraf\u0026rsquo;s metrics by meter-system. The OAP can load the configuration at bootstrap. The files are located at $CLASSPATH/telegraf-rules. If the new configuration is not well-formed, the OAP may fail to start up.\nThis is the InfluxDB Telegraf Document, the Telegraf receiver can handle Telegraf\u0026rsquo;s CPU Input Plugin, Memory Input Plugin.\nThere are many other telegraf input plugins, users can customize different input plugins' rule files. The rule file should be in YAML format, defined by the scheme described in MAL. Please see the telegraf plugin directory for more input plugins information.\nNotice:\n  The Telegraf receiver module uses HTTP to receive telegraf\u0026rsquo;s metrics, so the outputs method should be set [[outputs.http]] in telegraf.conf file. Please see the http outputs for more details.\n  The Telegraf receiver module only process telegraf\u0026rsquo;s JSON metrics format, the data format should be set data_format = \u0026quot;json\u0026quot; in telegraf.conf file. Please see the JSON data format for more details.\n  The default json_timestamp_units is second in JSON output, and the Telegraf receiver module only process second timestamp unit. If users configure json_timestamp_units in telegraf.conf file, json_timestamp_units = \u0026quot;1s\u0026quot; is feasible. Please see the JSON data format for more details.\n  The following is the default telegraf receiver YAML rule file in the application.yml, Set SW_RECEIVER_TELEGRAF:default through system environment or change SW_RECEIVER_TELEGRAF_ACTIVE_FILES:vm to activate the OpenTelemetry receiver with vm.yml in telegraf-rules.\nreceiver-telegraf:selector:${SW_RECEIVER_TELEGRAF:default}default:activeFiles:${SW_RECEIVER_TELEGRAF_ACTIVE_FILES:vm}   Rule Name Description Configuration File Data Source     vm Metrics of VMs telegraf-rules/vm.yaml Telegraf inputs plugins \u0026ndash;\u0026gt; Telegraf Receiver \u0026ndash;\u0026gt; SkyWalking OAP Server    ","excerpt":"Telegraf receiver The Telegraf receiver supports receiving InfluxDB Telegraf\u0026rsquo;s metrics by …","ref":"/docs/main/v9.3.0/en/setup/backend/telegraf-receiver/","title":"Telegraf receiver"},{"body":"Telegraf receiver The Telegraf receiver supports receiving InfluxDB Telegraf\u0026rsquo;s metrics by meter-system. The OAP can load the configuration at bootstrap. The files are located at $CLASSPATH/telegraf-rules. If the new configuration is not well-formed, the OAP may fail to start up.\nThis is the InfluxDB Telegraf Document, the Telegraf receiver can handle Telegraf\u0026rsquo;s CPU Input Plugin, Memory Input Plugin.\nThere are many other telegraf input plugins, users can customize different input plugins' rule files. The rule file should be in YAML format, defined by the scheme described in MAL. Please see the telegraf plugin directory for more input plugins information.\nNotice:\n  The Telegraf receiver module uses HTTP to receive telegraf\u0026rsquo;s metrics, so the outputs method should be set [[outputs.http]] in telegraf.conf file. Please see the http outputs for more details.\n  The Telegraf receiver module only process telegraf\u0026rsquo;s JSON metrics format, the data format should be set data_format = \u0026quot;json\u0026quot; in telegraf.conf file. Please see the JSON data format for more details.\n  The default json_timestamp_units is second in JSON output, and the Telegraf receiver module only process second timestamp unit. If users configure json_timestamp_units in telegraf.conf file, json_timestamp_units = \u0026quot;1s\u0026quot; is feasible. Please see the JSON data format for more details.\n  The following is the default telegraf receiver YAML rule file in the application.yml, Set SW_RECEIVER_TELEGRAF:default through system environment or change SW_RECEIVER_TELEGRAF_ACTIVE_FILES:vm to activate the OpenTelemetry receiver with vm.yml in telegraf-rules.\nreceiver-telegraf:selector:${SW_RECEIVER_TELEGRAF:default}default:activeFiles:${SW_RECEIVER_TELEGRAF_ACTIVE_FILES:vm}   Rule Name Description Configuration File Data Source     vm Metrics of VMs telegraf-rules/vm.yaml Telegraf inputs plugins \u0026ndash;\u0026gt; Telegraf Receiver \u0026ndash;\u0026gt; SkyWalking OAP Server    ","excerpt":"Telegraf receiver The Telegraf receiver supports receiving InfluxDB Telegraf\u0026rsquo;s metrics by …","ref":"/docs/main/v9.4.0/en/setup/backend/telegraf-receiver/","title":"Telegraf receiver"},{"body":"Telegraf receiver The Telegraf receiver supports receiving InfluxDB Telegraf\u0026rsquo;s metrics by meter-system. The OAP can load the configuration at bootstrap. The files are located at $CLASSPATH/telegraf-rules. If the new configuration is not well-formed, the OAP may fail to start up.\nThis is the InfluxDB Telegraf Document, the Telegraf receiver can handle Telegraf\u0026rsquo;s CPU Input Plugin, Memory Input Plugin.\nThere are many other telegraf input plugins, users can customize different input plugins' rule files. The rule file should be in YAML format, defined by the scheme described in MAL. Please see the telegraf plugin directory for more input plugins information.\nNotice:\n  The Telegraf receiver module uses HTTP to receive telegraf\u0026rsquo;s metrics, so the outputs method should be set [[outputs.http]] in telegraf.conf file. Please see the http outputs for more details.\n  The Telegraf receiver module only process telegraf\u0026rsquo;s JSON metrics format, the data format should be set data_format = \u0026quot;json\u0026quot; in telegraf.conf file. Please see the JSON data format for more details.\n  The default json_timestamp_units is second in JSON output, and the Telegraf receiver module only process second timestamp unit. If users configure json_timestamp_units in telegraf.conf file, json_timestamp_units = \u0026quot;1s\u0026quot; is feasible. Please see the JSON data format for more details.\n  The following is the default telegraf receiver YAML rule file in the application.yml, Set SW_RECEIVER_TELEGRAF:default through system environment or change SW_RECEIVER_TELEGRAF_ACTIVE_FILES:vm to activate the OpenTelemetry receiver with vm.yml in telegraf-rules.\nreceiver-telegraf:selector:${SW_RECEIVER_TELEGRAF:default}default:activeFiles:${SW_RECEIVER_TELEGRAF_ACTIVE_FILES:vm}   Rule Name Description Configuration File Data Source     vm Metrics of VMs telegraf-rules/vm.yaml Telegraf inputs plugins \u0026ndash;\u0026gt; Telegraf Receiver \u0026ndash;\u0026gt; SkyWalking OAP Server    ","excerpt":"Telegraf receiver The Telegraf receiver supports receiving InfluxDB Telegraf\u0026rsquo;s metrics by …","ref":"/docs/main/v9.5.0/en/setup/backend/telegraf-receiver/","title":"Telegraf receiver"},{"body":"Telegraf receiver The Telegraf receiver supports receiving InfluxDB Telegraf\u0026rsquo;s metrics by meter-system. The OAP can load the configuration at bootstrap. The files are located at $CLASSPATH/telegraf-rules. If the new configuration is not well-formed, the OAP may fail to start up.\nThis is the InfluxDB Telegraf Document, the Telegraf receiver can handle Telegraf\u0026rsquo;s CPU Input Plugin, Memory Input Plugin.\nThere are many other telegraf input plugins, users can customize different input plugins' rule files. The rule file should be in YAML format, defined by the scheme described in MAL. Please see the telegraf plugin directory for more input plugins information.\nNotice:\n  The Telegraf receiver module uses HTTP to receive telegraf\u0026rsquo;s metrics, so the outputs method should be set [[outputs.http]] in telegraf.conf file. Please see the http outputs for more details.\n  The Telegraf receiver module only process telegraf\u0026rsquo;s JSON metrics format, the data format should be set data_format = \u0026quot;json\u0026quot; in telegraf.conf file. Please see the JSON data format for more details.\n  The default json_timestamp_units is second in JSON output, and the Telegraf receiver module only process second timestamp unit. If users configure json_timestamp_units in telegraf.conf file, json_timestamp_units = \u0026quot;1s\u0026quot; is feasible. Please see the JSON data format for more details.\n  The following is the default telegraf receiver YAML rule file in the application.yml, Set SW_RECEIVER_TELEGRAF:default through system environment or change SW_RECEIVER_TELEGRAF_ACTIVE_FILES:vm to activate the OpenTelemetry receiver with vm.yml in telegraf-rules.\nreceiver-telegraf:selector:${SW_RECEIVER_TELEGRAF:default}default:activeFiles:${SW_RECEIVER_TELEGRAF_ACTIVE_FILES:vm}   Rule Name Description Configuration File Data Source     vm Metrics of VMs telegraf-rules/vm.yaml Telegraf inputs plugins \u0026ndash;\u0026gt; Telegraf Receiver \u0026ndash;\u0026gt; SkyWalking OAP Server    ","excerpt":"Telegraf receiver The Telegraf receiver supports receiving InfluxDB Telegraf\u0026rsquo;s metrics by …","ref":"/docs/main/v9.6.0/en/setup/backend/telegraf-receiver/","title":"Telegraf receiver"},{"body":"Telegraf receiver The Telegraf receiver supports receiving InfluxDB Telegraf\u0026rsquo;s metrics by meter-system. The OAP can load the configuration at bootstrap. The files are located at $CLASSPATH/telegraf-rules. If the new configuration is not well-formed, the OAP may fail to start up.\nThis is the InfluxDB Telegraf Document, the Telegraf receiver can handle Telegraf\u0026rsquo;s CPU Input Plugin, Memory Input Plugin.\nThere are many other telegraf input plugins, users can customize different input plugins' rule files. The rule file should be in YAML format, defined by the scheme described in MAL. Please see the telegraf plugin directory for more input plugins information.\nNotice:\n  The Telegraf receiver module uses HTTP to receive telegraf\u0026rsquo;s metrics, so the outputs method should be set [[outputs.http]] in telegraf.conf file. Please see the http outputs for more details.\n  The Telegraf receiver module only process telegraf\u0026rsquo;s JSON metrics format, the data format should be set data_format = \u0026quot;json\u0026quot; in telegraf.conf file. Please see the JSON data format for more details.\n  The default json_timestamp_units is second in JSON output, and the Telegraf receiver module only process second timestamp unit. If users configure json_timestamp_units in telegraf.conf file, json_timestamp_units = \u0026quot;1s\u0026quot; is feasible. Please see the JSON data format for more details.\n  The following is the default telegraf receiver YAML rule file in the application.yml, Set SW_RECEIVER_TELEGRAF:default through system environment or change SW_RECEIVER_TELEGRAF_ACTIVE_FILES:vm to activate the OpenTelemetry receiver with vm.yml in telegraf-rules.\nreceiver-telegraf:selector:${SW_RECEIVER_TELEGRAF:default}default:activeFiles:${SW_RECEIVER_TELEGRAF_ACTIVE_FILES:vm}   Rule Name Description Configuration File Data Source     vm Metrics of VMs telegraf-rules/vm.yaml Telegraf inputs plugins \u0026ndash;\u0026gt; Telegraf Receiver \u0026ndash;\u0026gt; SkyWalking OAP Server    ","excerpt":"Telegraf receiver The Telegraf receiver supports receiving InfluxDB Telegraf\u0026rsquo;s metrics by …","ref":"/docs/main/v9.7.0/en/setup/backend/telegraf-receiver/","title":"Telegraf receiver"},{"body":"Telemetry Exporter Satellite supports three ways to export its own telemetry data, prometheus, metrics-service or pprof.\nMultiple export methods are supported simultaneously, separated by commas.\nPrometheus Start HTTP port to export the satellite telemetry metrics.\nWhen the following configuration is completed, then the satellite telemetry metrics export to: http://localhost${SATELLITE_TELEMETRY_PROMETHEUS_ADDRESS}${SATELLITE_TELEMETRY_PROMETHEUS_ENDPOINT}, and all the metrics contain the cluster, service and instance tag.\n# The Satellite self telemetry configuration. telemetry: # The space concept for the deployment, such as the namespace concept in the Kubernetes. cluster: ${SATELLITE_TELEMETRY_CLUSTER:satellite-cluster} # The group concept for the deployment, such as the service resource concept in the Kubernetes. service: ${SATELLITE_TELEMETRY_SERVICE:satellite-service} # The minimum running unit, such as the pod concept in the Kubernetes. instance: ${SATELLITE_TELEMETRY_SERVICE:satellite-instance} # Telemetry export type, support \u0026#34;prometheus\u0026#34;, \u0026#34;metrics_service\u0026#34;, \u0026#34;pprof\u0026#34; or \u0026#34;none\u0026#34; export_type: ${SATELLITE_TELEMETRY_EXPORT_TYPE:prometheus} # Export telemetry data through Prometheus server, only works on \u0026#34;export_type=prometheus\u0026#34;. prometheus: # The prometheus server address. address: ${SATELLITE_TELEMETRY_PROMETHEUS_ADDRESS::1234} # The prometheus server metrics endpoint. endpoint: ${SATELLITE_TELEMETRY_PROMETHEUS_ENDPOINT:/metrics} Metrics Service Send the message to the gRPC service that supports SkyWalking\u0026rsquo;s native Meter protocol with interval.\nWhen the following configuration is completed, send the message to the specified grpc-client component at the specified time interval. Among them, service and instance will correspond to the services and service instances in SkyWalking.\n# The Satellite self telemetry configuration. telemetry: # The space concept for the deployment, such as the namespace concept in the Kubernetes. cluster: ${SATELLITE_TELEMETRY_CLUSTER:satellite-cluster} # The group concept for the deployment, such as the service resource concept in the Kubernetes. service: ${SATELLITE_TELEMETRY_SERVICE:satellite-service} # The minimum running unit, such as the pod concept in the Kubernetes. instance: ${SATELLITE_TELEMETRY_SERVICE:satellite-instance} # Telemetry export type, support \u0026#34;prometheus\u0026#34;, \u0026#34;metrics_service\u0026#34;, \u0026#34;pprof\u0026#34; or \u0026#34;none\u0026#34; export_type: ${SATELLITE_TELEMETRY_EXPORT_TYPE:metrics_service} # Export telemetry data through native meter format to OAP backend, only works on \u0026#34;export_type=metrics_service\u0026#34;. metrics_service: # The grpc-client plugin name, using the SkyWalking native batch meter protocol client_name: ${SATELLITE_TELEMETRY_METRICS_SERVICE_CLIENT_NAME:grpc-client} # The interval second for sending metrics interval: ${SATELLITE_TELEMETRY_METRICS_SERVICE_INTERVAL:10} # The prefix of telemetry metric name metric_prefix: ${SATELLITE_TELEMETRY_METRICS_SERVICE_METRIC_PREFIX:sw_stl_} pprof pprof can provide HTTP services to allow remote viewing of service execution status, helping you discover performance issues.\n# The Satellite self telemetry configuration. telemetry: # Telemetry export type, support \u0026#34;prometheus\u0026#34;, \u0026#34;metrics_service\u0026#34;, \u0026#34;pprof\u0026#34; or \u0026#34;none\u0026#34; export_type: ${SATELLITE_TELEMETRY_EXPORT_TYPE:pprof} # Export pprof service for detect performance issue pprof: # The pprof server address. address: ${SATELLITE_TELEMETRY_PPROF_ADDRESS::6060} ","excerpt":"Telemetry Exporter Satellite supports three ways to export its own telemetry data, prometheus, …","ref":"/docs/skywalking-satellite/latest/en/setup/examples/feature/telemetry-exporter/readme/","title":"Telemetry Exporter"},{"body":"Telemetry Exporter Satellite supports three ways to export its own telemetry data, prometheus, metrics-service or pprof.\nMultiple export methods are supported simultaneously, separated by commas.\nPrometheus Start HTTP port to export the satellite telemetry metrics.\nWhen the following configuration is completed, then the satellite telemetry metrics export to: http://localhost${SATELLITE_TELEMETRY_PROMETHEUS_ADDRESS}${SATELLITE_TELEMETRY_PROMETHEUS_ENDPOINT}, and all the metrics contain the cluster, service and instance tag.\n# The Satellite self telemetry configuration. telemetry: # The space concept for the deployment, such as the namespace concept in the Kubernetes. cluster: ${SATELLITE_TELEMETRY_CLUSTER:satellite-cluster} # The group concept for the deployment, such as the service resource concept in the Kubernetes. service: ${SATELLITE_TELEMETRY_SERVICE:satellite-service} # The minimum running unit, such as the pod concept in the Kubernetes. instance: ${SATELLITE_TELEMETRY_SERVICE:satellite-instance} # Telemetry export type, support \u0026#34;prometheus\u0026#34;, \u0026#34;metrics_service\u0026#34;, \u0026#34;pprof\u0026#34; or \u0026#34;none\u0026#34; export_type: ${SATELLITE_TELEMETRY_EXPORT_TYPE:prometheus} # Export telemetry data through Prometheus server, only works on \u0026#34;export_type=prometheus\u0026#34;. prometheus: # The prometheus server address. address: ${SATELLITE_TELEMETRY_PROMETHEUS_ADDRESS::1234} # The prometheus server metrics endpoint. endpoint: ${SATELLITE_TELEMETRY_PROMETHEUS_ENDPOINT:/metrics} Metrics Service Send the message to the gRPC service that supports SkyWalking\u0026rsquo;s native Meter protocol with interval.\nWhen the following configuration is completed, send the message to the specified grpc-client component at the specified time interval. Among them, service and instance will correspond to the services and service instances in SkyWalking.\n# The Satellite self telemetry configuration. telemetry: # The space concept for the deployment, such as the namespace concept in the Kubernetes. cluster: ${SATELLITE_TELEMETRY_CLUSTER:satellite-cluster} # The group concept for the deployment, such as the service resource concept in the Kubernetes. service: ${SATELLITE_TELEMETRY_SERVICE:satellite-service} # The minimum running unit, such as the pod concept in the Kubernetes. instance: ${SATELLITE_TELEMETRY_SERVICE:satellite-instance} # Telemetry export type, support \u0026#34;prometheus\u0026#34;, \u0026#34;metrics_service\u0026#34;, \u0026#34;pprof\u0026#34; or \u0026#34;none\u0026#34; export_type: ${SATELLITE_TELEMETRY_EXPORT_TYPE:metrics_service} # Export telemetry data through native meter format to OAP backend, only works on \u0026#34;export_type=metrics_service\u0026#34;. metrics_service: # The grpc-client plugin name, using the SkyWalking native batch meter protocol client_name: ${SATELLITE_TELEMETRY_METRICS_SERVICE_CLIENT_NAME:grpc-client} # The interval second for sending metrics interval: ${SATELLITE_TELEMETRY_METRICS_SERVICE_INTERVAL:10} # The prefix of telemetry metric name metric_prefix: ${SATELLITE_TELEMETRY_METRICS_SERVICE_METRIC_PREFIX:sw_stl_} pprof pprof can provide HTTP services to allow remote viewing of service execution status, helping you discover performance issues.\n# The Satellite self telemetry configuration. telemetry: # Telemetry export type, support \u0026#34;prometheus\u0026#34;, \u0026#34;metrics_service\u0026#34;, \u0026#34;pprof\u0026#34; or \u0026#34;none\u0026#34; export_type: ${SATELLITE_TELEMETRY_EXPORT_TYPE:pprof} # Export pprof service for detect performance issue pprof: # The pprof server address. address: ${SATELLITE_TELEMETRY_PPROF_ADDRESS::6060} ","excerpt":"Telemetry Exporter Satellite supports three ways to export its own telemetry data, prometheus, …","ref":"/docs/skywalking-satellite/next/en/setup/examples/feature/telemetry-exporter/readme/","title":"Telemetry Exporter"},{"body":"Telemetry Exporter Satellite supports three ways to export its own telemetry data, prometheus, metrics-service or pprof.\nMultiple export methods are supported simultaneously, separated by commas.\nPrometheus Start HTTP port to export the satellite telemetry metrics.\nWhen the following configuration is completed, then the satellite telemetry metrics export to: http://localhost${SATELLITE_TELEMETRY_PROMETHEUS_ADDRESS}${SATELLITE_TELEMETRY_PROMETHEUS_ENDPOINT}, and all the metrics contain the cluster, service and instance tag.\n# The Satellite self telemetry configuration. telemetry: # The space concept for the deployment, such as the namespace concept in the Kubernetes. cluster: ${SATELLITE_TELEMETRY_CLUSTER:satellite-cluster} # The group concept for the deployment, such as the service resource concept in the Kubernetes. service: ${SATELLITE_TELEMETRY_SERVICE:satellite-service} # The minimum running unit, such as the pod concept in the Kubernetes. instance: ${SATELLITE_TELEMETRY_SERVICE:satellite-instance} # Telemetry export type, support \u0026#34;prometheus\u0026#34;, \u0026#34;metrics_service\u0026#34;, \u0026#34;pprof\u0026#34; or \u0026#34;none\u0026#34; export_type: ${SATELLITE_TELEMETRY_EXPORT_TYPE:prometheus} # Export telemetry data through Prometheus server, only works on \u0026#34;export_type=prometheus\u0026#34;. prometheus: # The prometheus server address. address: ${SATELLITE_TELEMETRY_PROMETHEUS_ADDRESS::1234} # The prometheus server metrics endpoint. endpoint: ${SATELLITE_TELEMETRY_PROMETHEUS_ENDPOINT:/metrics} Metrics Service Send the message to the gRPC service that supports SkyWalking\u0026rsquo;s native Meter protocol with interval.\nWhen the following configuration is completed, send the message to the specified grpc-client component at the specified time interval. Among them, service and instance will correspond to the services and service instances in SkyWalking.\n# The Satellite self telemetry configuration. telemetry: # The space concept for the deployment, such as the namespace concept in the Kubernetes. cluster: ${SATELLITE_TELEMETRY_CLUSTER:satellite-cluster} # The group concept for the deployment, such as the service resource concept in the Kubernetes. service: ${SATELLITE_TELEMETRY_SERVICE:satellite-service} # The minimum running unit, such as the pod concept in the Kubernetes. instance: ${SATELLITE_TELEMETRY_SERVICE:satellite-instance} # Telemetry export type, support \u0026#34;prometheus\u0026#34;, \u0026#34;metrics_service\u0026#34;, \u0026#34;pprof\u0026#34; or \u0026#34;none\u0026#34; export_type: ${SATELLITE_TELEMETRY_EXPORT_TYPE:metrics_service} # Export telemetry data through native meter format to OAP backend, only works on \u0026#34;export_type=metrics_service\u0026#34;. metrics_service: # The grpc-client plugin name, using the SkyWalking native batch meter protocol client_name: ${SATELLITE_TELEMETRY_METRICS_SERVICE_CLIENT_NAME:grpc-client} # The interval second for sending metrics interval: ${SATELLITE_TELEMETRY_METRICS_SERVICE_INTERVAL:10} # The prefix of telemetry metric name metric_prefix: ${SATELLITE_TELEMETRY_METRICS_SERVICE_METRIC_PREFIX:sw_stl_} pprof pprof can provide HTTP services to allow remote viewing of service execution status, helping you discover performance issues.\n# The Satellite self telemetry configuration. telemetry: # Telemetry export type, support \u0026#34;prometheus\u0026#34;, \u0026#34;metrics_service\u0026#34;, \u0026#34;pprof\u0026#34; or \u0026#34;none\u0026#34; export_type: ${SATELLITE_TELEMETRY_EXPORT_TYPE:pprof} # Export pprof service for detect performance issue pprof: # The pprof server address. address: ${SATELLITE_TELEMETRY_PPROF_ADDRESS::6060} ","excerpt":"Telemetry Exporter Satellite supports three ways to export its own telemetry data, prometheus, …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/examples/feature/telemetry-exporter/readme/","title":"Telemetry Exporter"},{"body":"Telemetry for backend The OAP backend cluster itself is a distributed streaming process system. To assist the Ops team, we provide the telemetry for the OAP backend itself, also known as self-observability (so11y)\nBy default, the telemetry is disabled by setting selector to none, like this:\ntelemetry:selector:${SW_TELEMETRY:none}none:prometheus:host:${SW_TELEMETRY_PROMETHEUS_HOST:0.0.0.0}port:${SW_TELEMETRY_PROMETHEUS_PORT:1234}sslEnabled:${SW_TELEMETRY_PROMETHEUS_SSL_ENABLED:false}sslKeyPath:${SW_TELEMETRY_PROMETHEUS_SSL_KEY_PATH:\u0026#34;\u0026#34;}sslCertChainPath:${SW_TELEMETRY_PROMETHEUS_SSL_CERT_CHAIN_PATH:\u0026#34;\u0026#34;}You may also set Prometheus to enable them. For more information, refer to the details below.\nSelf Observability SkyWalking supports exposing telemetry data representing OAP running status through Prometheus endpoint. Users could set up OpenTelemetry collector to scrap and forward telemetry data to OAP server for further analysis, eventually showing up UI or GraphQL API.\nStatic IP or hostname Add the following configuration to enable self-observability-related modules.\n Set up prometheus telemetry.  telemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543Set up OpenTelemetry to scrape the metrics from OAP telemetry.  Refer to the E2E test case as an example.\nFor Kubernetes deployments, read the following section, otherwise you should be able to adjust the configurations below to fit your scenarios.\nService discovery on Kubernetes If you deploy an OAP server cluster on Kubernetes, the oap-server instance (pod) would not have a static IP or hostname. We can leverage OpenTelemetry Collector to discover the oap-server instance, and scrape \u0026amp; transfer the metrics to OAP OpenTelemetry receiver.\nOn how to install SkyWalking on k8s, you can refer to Apache SkyWalking Kubernetes.\nSet this up following these steps:\n Set up oap-server.    Set the metrics port.\nprometheus-port: 1234   Set environment variables.\nSW_TELEMETRY=prometheus SW_OTEL_RECEIVER=default SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES=oap Here is an example to install by Apache SkyWalking Kubernetes:\nhelm -n istio-system install skywalking skywalking \\ --set elasticsearch.replicas=1 \\ --set elasticsearch.minimumMasterNodes=1 \\ --set elasticsearch.imageTag=7.5.1 \\ --set oap.replicas=2 \\ --set ui.image.repository=$HUB/skywalking-ui \\ --set ui.image.tag=$TAG \\ --set oap.image.tag=$TAG \\ --set oap.image.repository=$HUB/skywalking-oap \\ --set oap.storageType=elasticsearch \\ --set oap.ports.prometheus-port=1234 \\ # \u0026lt;\u0026lt;\u0026lt; Expose self observability metrics port --set oap.env.SW_TELEMETRY=prometheus \\ --set oap.env.SW_OTEL_RECEIVER=default \\ # \u0026lt;\u0026lt;\u0026lt; Enable Otel receiver --set oap.env.SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES=oap # \u0026lt;\u0026lt;\u0026lt; Add oap analyzer for Otel metrics   Set up OpenTelemetry Collector and config a scrape job:  - job_name:\u0026#39;skywalking-so11y\u0026#39;# make sure to use this in the so11y.yaml to filter only so11y metricsmetrics_path:\u0026#39;/metrics\u0026#39;kubernetes_sd_configs:- role:podrelabel_configs:- source_labels:[__meta_kubernetes_pod_container_name, __meta_kubernetes_pod_container_port_name]action:keepregex:oap;prometheus-port- source_labels:[]target_label:servicereplacement:oap-server- source_labels:[__meta_kubernetes_pod_name]target_label:host_nameregex:(.+)replacement:$$1For the full example for OpenTelemetry Collector configuration and recommended version, you can refer to showcase.\n Users also could leverage the Prometheus endpoint for their own Prometheus and Grafana.\nNOTE: Since Apr 21, 2021, the Grafana project has been relicensed to AGPL-v3, and is no longer licensed for Apache 2.0. Check the LICENSE details. The following Prometheus + Grafana solution is optional rather than recommended.\nPrometheus Prometheus is supported as a telemetry implementor, which collects metrics from SkyWalking\u0026rsquo;s backend.\nSet prometheus to provider. The endpoint opens at http://0.0.0.0:1234/ and http://0.0.0.0:1234/metrics.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:Set host and port if needed.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543Set relevant SSL settings to expose a secure endpoint. Note that the private key file and cert chain file could be uploaded once changes are applied to them.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543sslEnabled:truesslKeyPath:/etc/ssl/key.pemsslCertChainPath:/etc/ssl/cert-chain.pemGrafana Visualization Provide the Grafana dashboard settings. Check SkyWalking OAP Cluster Monitor Dashboard config and SkyWalking OAP Instance Monitor Dashboard config.\n","excerpt":"Telemetry for backend The OAP backend cluster itself is a distributed streaming process system. To …","ref":"/docs/main/latest/en/setup/backend/backend-telemetry/","title":"Telemetry for backend"},{"body":"Telemetry for backend The OAP backend cluster itself is a distributed streaming process system. To assist the Ops team, we provide the telemetry for the OAP backend itself, also known as self-observability (so11y)\nBy default, the telemetry is disabled by setting selector to none, like this:\ntelemetry:selector:${SW_TELEMETRY:none}none:prometheus:host:${SW_TELEMETRY_PROMETHEUS_HOST:0.0.0.0}port:${SW_TELEMETRY_PROMETHEUS_PORT:1234}sslEnabled:${SW_TELEMETRY_PROMETHEUS_SSL_ENABLED:false}sslKeyPath:${SW_TELEMETRY_PROMETHEUS_SSL_KEY_PATH:\u0026#34;\u0026#34;}sslCertChainPath:${SW_TELEMETRY_PROMETHEUS_SSL_CERT_CHAIN_PATH:\u0026#34;\u0026#34;}You may also set Prometheus to enable them. For more information, refer to the details below.\nSelf Observability SkyWalking supports exposing telemetry data representing OAP running status through Prometheus endpoint. Users could set up OpenTelemetry collector to scrap and forward telemetry data to OAP server for further analysis, eventually showing up UI or GraphQL API.\nStatic IP or hostname Add the following configuration to enable self-observability-related modules.\n Set up prometheus telemetry.  telemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543Set up OpenTelemetry to scrape the metrics from OAP telemetry.  Refer to the E2E test case as an example.\nFor Kubernetes deployments, read the following section, otherwise you should be able to adjust the configurations below to fit your scenarios.\nService discovery on Kubernetes If you deploy an OAP server cluster on Kubernetes, the oap-server instance (pod) would not have a static IP or hostname. We can leverage OpenTelemetry Collector to discover the oap-server instance, and scrape \u0026amp; transfer the metrics to OAP OpenTelemetry receiver.\nOn how to install SkyWalking on k8s, you can refer to Apache SkyWalking Kubernetes.\nSet this up following these steps:\n Set up oap-server.    Set the metrics port.\nprometheus-port: 1234   Set environment variables.\nSW_TELEMETRY=prometheus SW_OTEL_RECEIVER=default SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES=oap Here is an example to install by Apache SkyWalking Kubernetes:\nhelm -n istio-system install skywalking skywalking \\ --set elasticsearch.replicas=1 \\ --set elasticsearch.minimumMasterNodes=1 \\ --set elasticsearch.imageTag=7.5.1 \\ --set oap.replicas=2 \\ --set ui.image.repository=$HUB/skywalking-ui \\ --set ui.image.tag=$TAG \\ --set oap.image.tag=$TAG \\ --set oap.image.repository=$HUB/skywalking-oap \\ --set oap.storageType=elasticsearch \\ --set oap.ports.prometheus-port=1234 \\ # \u0026lt;\u0026lt;\u0026lt; Expose self observability metrics port --set oap.env.SW_TELEMETRY=prometheus \\ --set oap.env.SW_OTEL_RECEIVER=default \\ # \u0026lt;\u0026lt;\u0026lt; Enable Otel receiver --set oap.env.SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES=oap # \u0026lt;\u0026lt;\u0026lt; Add oap analyzer for Otel metrics   Set up OpenTelemetry Collector and config a scrape job:  - job_name:\u0026#39;skywalking-so11y\u0026#39;# make sure to use this in the so11y.yaml to filter only so11y metricsmetrics_path:\u0026#39;/metrics\u0026#39;kubernetes_sd_configs:- role:podrelabel_configs:- source_labels:[__meta_kubernetes_pod_container_name, __meta_kubernetes_pod_container_port_name]action:keepregex:oap;prometheus-port- source_labels:[]target_label:servicereplacement:oap-server- source_labels:[__meta_kubernetes_pod_name]target_label:host_nameregex:(.+)replacement:$$1For the full example for OpenTelemetry Collector configuration and recommended version, you can refer to showcase.\n Users also could leverage the Prometheus endpoint for their own Prometheus and Grafana.\nNOTE: Since Apr 21, 2021, the Grafana project has been relicensed to AGPL-v3, and is no longer licensed for Apache 2.0. Check the LICENSE details. The following Prometheus + Grafana solution is optional rather than recommended.\nPrometheus Prometheus is supported as a telemetry implementor, which collects metrics from SkyWalking\u0026rsquo;s backend.\nSet prometheus to provider. The endpoint opens at http://0.0.0.0:1234/ and http://0.0.0.0:1234/metrics.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:Set host and port if needed.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543Set relevant SSL settings to expose a secure endpoint. Note that the private key file and cert chain file could be uploaded once changes are applied to them.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543sslEnabled:truesslKeyPath:/etc/ssl/key.pemsslCertChainPath:/etc/ssl/cert-chain.pemGrafana Visualization Provide the Grafana dashboard settings. Check SkyWalking OAP Cluster Monitor Dashboard config and SkyWalking OAP Instance Monitor Dashboard config.\n","excerpt":"Telemetry for backend The OAP backend cluster itself is a distributed streaming process system. To …","ref":"/docs/main/next/en/setup/backend/backend-telemetry/","title":"Telemetry for backend"},{"body":"Telemetry for backend The OAP backend cluster itself is a distributed streaming process system. To assist the Ops team, we provide the telemetry for the OAP backend itself.\nBy default, the telemetry is disabled by setting selector to none, like this:\ntelemetry:selector:${SW_TELEMETRY:none}none:prometheus:host:${SW_TELEMETRY_PROMETHEUS_HOST:0.0.0.0}port:${SW_TELEMETRY_PROMETHEUS_PORT:1234}sslEnabled:${SW_TELEMETRY_PROMETHEUS_SSL_ENABLED:false}sslKeyPath:${SW_TELEMETRY_PROMETHEUS_SSL_KEY_PATH:\u0026#34;\u0026#34;}sslCertChainPath:${SW_TELEMETRY_PROMETHEUS_SSL_CERT_CHAIN_PATH:\u0026#34;\u0026#34;}You may also set Prometheus to enable them. For more information, refer to the details below.\nSelf Observability Static IP or hostname SkyWalking supports collecting telemetry data into OAP backend directly. Users could check them out through UI or GraphQL API.\nAdd the following configuration to enable self-observability related modules.\n Set up prometheus telemetry.  telemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543Set up prometheus fetcher.  prometheus-fetcher:selector:${SW_PROMETHEUS_FETCHER:default}default:enabledRules:${SW_PROMETHEUS_FETCHER_ENABLED_RULES:\u0026#34;self\u0026#34;}Make sure config/fetcher-prom-rules/self.yaml exists.  Once you deploy an oap-server cluster, the target host should be replaced with a dedicated IP or hostname. For instances, there are three OAP servers in your cluster. Their host is service1, service2, and service3 respectively. You should update each self.yaml to switch the target host.\nservice1:\nfetcherInterval:PT15SfetcherTimeout:PT10SmetricsPath:/metricsstaticConfig:# targets will be labeled as \u0026#34;instance\u0026#34;targets:- service1:1234labels:service:oap-server...service2:\nfetcherInterval:PT15SfetcherTimeout:PT10SmetricsPath:/metricsstaticConfig:# targets will be labeled as \u0026#34;instance\u0026#34;targets:- service2:1234labels:service:oap-server...service3:\nfetcherInterval:PT15SfetcherTimeout:PT10SmetricsPath:/metricsstaticConfig:# targets will be labeled as \u0026#34;instance\u0026#34;targets:- service3:1234labels:service:oap-server...Service discovery (k8s) If you deploy an oap-server cluster on k8s, the oap-server instance (pod) would not have a static IP or hostname. We can leverage OpenTelemetry Collector to discover the oap-server instance, and scrape \u0026amp; transfer the metrics to OAP OpenTelemetry receiver.\nOn how to install SkyWalking on k8s, you can refer to Apache SkyWalking Kubernetes.\nSet this up following these steps:\n Set up oap-server.    Set the metrics port.\nprometheus-port: 1234   Set environment variables.\nSW_TELEMETRY=prometheus SW_OTEL_RECEIVER=default SW_OTEL_RECEIVER_ENABLED_OC_RULES=oap Here is an example to install by Apache SkyWalking Kubernetes:\nhelm -n istio-system install skywalking skywalking \\ --set elasticsearch.replicas=1 \\ --set elasticsearch.minimumMasterNodes=1 \\ --set elasticsearch.imageTag=7.5.1 \\ --set oap.replicas=2 \\ --set ui.image.repository=$HUB/skywalking-ui \\ --set ui.image.tag=$TAG \\ --set oap.image.tag=$TAG \\ --set oap.image.repository=$HUB/skywalking-oap \\ --set oap.storageType=elasticsearch \\ --set oap.ports.prometheus-port=1234 \\ # \u0026lt;\u0026lt;\u0026lt; Expose self observability metrics port --set oap.env.SW_TELEMETRY=prometheus \\ --set oap.env.SW_OTEL_RECEIVER=default \\ # \u0026lt;\u0026lt;\u0026lt; Enable Otel receiver --set oap.env.SW_OTEL_RECEIVER_ENABLED_OC_RULES=oap # \u0026lt;\u0026lt;\u0026lt; Add oap analyzer for Otel metrics   Set up OpenTelemetry Collector and config a scrape job:  - job_name:\u0026#39;skywalking-so11y\u0026#39;# make sure to use this in the so11y.yaml to filter only so11y metricsmetrics_path:\u0026#39;/metrics\u0026#39;kubernetes_sd_configs:- role:podrelabel_configs:- source_labels:[__meta_kubernetes_pod_container_name, __meta_kubernetes_pod_container_port_name]action:keepregex:oap;prometheus-port - source_labels:[]target_label:servicereplacement:oap-server- source_labels:[__meta_kubernetes_pod_name]target_label:host_nameregex:(.+)replacement:$$1 For the full example for OpenTelemetry Collector configuration and recommended version, you can refer to showcase.\n NOTE: Since Apr 21, 2021, the Grafana project has been relicensed to AGPL-v3, and is no longer licensed for Apache 2.0. Check the LICENSE details. The following Prometheus + Grafana solution is optional, rather than recommended.\nPrometheus Prometheus is supported as a telemetry implementor, which collects metrics from SkyWalking\u0026rsquo;s backend.\nSet prometheus to provider. The endpoint opens at http://0.0.0.0:1234/ and http://0.0.0.0:1234/metrics.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:Set host and port if needed.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543Set relevant SSL settings to expose a secure endpoint. Note that the private key file and cert chain file could be uploaded once changes are applied to them.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543sslEnabled:truesslKeyPath:/etc/ssl/key.pemsslCertChainPath:/etc/ssl/cert-chain.pemGrafana Visualization Provide the Grafana dashboard settings. Check SkyWalking OAP Cluster Monitor Dashboard config and SkyWalking OAP Instance Monitor Dashboard config.\n","excerpt":"Telemetry for backend The OAP backend cluster itself is a distributed streaming process system. To …","ref":"/docs/main/v9.0.0/en/setup/backend/backend-telemetry/","title":"Telemetry for backend"},{"body":"Telemetry for backend The OAP backend cluster itself is a distributed streaming process system. To assist the Ops team, we provide the telemetry for the OAP backend itself, also known as self-observability (so11y)\nBy default, the telemetry is disabled by setting selector to none, like this:\ntelemetry:selector:${SW_TELEMETRY:none}none:prometheus:host:${SW_TELEMETRY_PROMETHEUS_HOST:0.0.0.0}port:${SW_TELEMETRY_PROMETHEUS_PORT:1234}sslEnabled:${SW_TELEMETRY_PROMETHEUS_SSL_ENABLED:false}sslKeyPath:${SW_TELEMETRY_PROMETHEUS_SSL_KEY_PATH:\u0026#34;\u0026#34;}sslCertChainPath:${SW_TELEMETRY_PROMETHEUS_SSL_CERT_CHAIN_PATH:\u0026#34;\u0026#34;}You may also set Prometheus to enable them. For more information, refer to the details below.\nSelf Observability Static IP or hostname SkyWalking supports collecting telemetry data into the OAP backend directly. Users could check them out through UI or GraphQL API.\nAdd the following configuration to enable self-observability-related modules.\n Set up prometheus telemetry.  telemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543Set up Prometheus fetcher.  prometheus-fetcher:selector:${SW_PROMETHEUS_FETCHER:default}default:enabledRules:${SW_PROMETHEUS_FETCHER_ENABLED_RULES:\u0026#34;self\u0026#34;}Make sure config/fetcher-prom-rules/self.yaml exists.  Once you deploy an OAP server cluster, the target host should be replaced with a dedicated IP or hostname. For instance, if there are three OAP servers in your cluster, their hosts are service1, service2, and service3, respectively. You should update each self.yaml to switch the target host.\nservice1:\nfetcherInterval:PT15SfetcherTimeout:PT10SmetricsPath:/metricsstaticConfig:# targets will be labeled as \u0026#34;instance\u0026#34;targets:- service1:1234labels:service:oap-server...service2:\nfetcherInterval:PT15SfetcherTimeout:PT10SmetricsPath:/metricsstaticConfig:# targets will be labeled as \u0026#34;instance\u0026#34;targets:- service2:1234labels:service:oap-server...service3:\nfetcherInterval:PT15SfetcherTimeout:PT10SmetricsPath:/metricsstaticConfig:# targets will be labeled as \u0026#34;instance\u0026#34;targets:- service3:1234labels:service:oap-server...Service discovery on Kubernetes If you deploy an OAP server cluster on Kubernetes, the oap-server instance (pod) would not have a static IP or hostname. We can leverage OpenTelemetry Collector to discover the oap-server instance, and scrape \u0026amp; transfer the metrics to OAP OpenTelemetry receiver.\nOn how to install SkyWalking on k8s, you can refer to Apache SkyWalking Kubernetes.\nSet this up following these steps:\n Set up oap-server.    Set the metrics port.\nprometheus-port: 1234   Set environment variables.\nSW_TELEMETRY=prometheus SW_OTEL_RECEIVER=default SW_OTEL_RECEIVER_ENABLED_OC_RULES=oap Here is an example to install by Apache SkyWalking Kubernetes:\nhelm -n istio-system install skywalking skywalking \\ --set elasticsearch.replicas=1 \\ --set elasticsearch.minimumMasterNodes=1 \\ --set elasticsearch.imageTag=7.5.1 \\ --set oap.replicas=2 \\ --set ui.image.repository=$HUB/skywalking-ui \\ --set ui.image.tag=$TAG \\ --set oap.image.tag=$TAG \\ --set oap.image.repository=$HUB/skywalking-oap \\ --set oap.storageType=elasticsearch \\ --set oap.ports.prometheus-port=1234 \\ # \u0026lt;\u0026lt;\u0026lt; Expose self observability metrics port --set oap.env.SW_TELEMETRY=prometheus \\ --set oap.env.SW_OTEL_RECEIVER=default \\ # \u0026lt;\u0026lt;\u0026lt; Enable Otel receiver --set oap.env.SW_OTEL_RECEIVER_ENABLED_OC_RULES=oap # \u0026lt;\u0026lt;\u0026lt; Add oap analyzer for Otel metrics   Set up OpenTelemetry Collector and config a scrape job:  - job_name:\u0026#39;skywalking-so11y\u0026#39;# make sure to use this in the so11y.yaml to filter only so11y metricsmetrics_path:\u0026#39;/metrics\u0026#39;kubernetes_sd_configs:- role:podrelabel_configs:- source_labels:[__meta_kubernetes_pod_container_name, __meta_kubernetes_pod_container_port_name]action:keepregex:oap;prometheus-port - source_labels:[]target_label:servicereplacement:oap-server- source_labels:[__meta_kubernetes_pod_name]target_label:host_nameregex:(.+)replacement:$$1 For the full example for OpenTelemetry Collector configuration and recommended version, you can refer to showcase.\n NOTE: Since Apr 21, 2021, the Grafana project has been relicensed to AGPL-v3, and is no longer licensed for Apache 2.0. Check the LICENSE details. The following Prometheus + Grafana solution is optional rather than recommended.\nPrometheus Prometheus is supported as a telemetry implementor, which collects metrics from SkyWalking\u0026rsquo;s backend.\nSet prometheus to provider. The endpoint opens at http://0.0.0.0:1234/ and http://0.0.0.0:1234/metrics.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:Set host and port if needed.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543Set relevant SSL settings to expose a secure endpoint. Note that the private key file and cert chain file could be uploaded once changes are applied to them.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543sslEnabled:truesslKeyPath:/etc/ssl/key.pemsslCertChainPath:/etc/ssl/cert-chain.pemGrafana Visualization Provide the Grafana dashboard settings. Check SkyWalking OAP Cluster Monitor Dashboard config and SkyWalking OAP Instance Monitor Dashboard config.\n","excerpt":"Telemetry for backend The OAP backend cluster itself is a distributed streaming process system. To …","ref":"/docs/main/v9.1.0/en/setup/backend/backend-telemetry/","title":"Telemetry for backend"},{"body":"Telemetry for backend The OAP backend cluster itself is a distributed streaming process system. To assist the Ops team, we provide the telemetry for the OAP backend itself, also known as self-observability (so11y)\nBy default, the telemetry is disabled by setting selector to none, like this:\ntelemetry:selector:${SW_TELEMETRY:none}none:prometheus:host:${SW_TELEMETRY_PROMETHEUS_HOST:0.0.0.0}port:${SW_TELEMETRY_PROMETHEUS_PORT:1234}sslEnabled:${SW_TELEMETRY_PROMETHEUS_SSL_ENABLED:false}sslKeyPath:${SW_TELEMETRY_PROMETHEUS_SSL_KEY_PATH:\u0026#34;\u0026#34;}sslCertChainPath:${SW_TELEMETRY_PROMETHEUS_SSL_CERT_CHAIN_PATH:\u0026#34;\u0026#34;}You may also set Prometheus to enable them. For more information, refer to the details below.\nSelf Observability Static IP or hostname SkyWalking supports collecting telemetry data into the OAP backend directly. Users could check them out through UI or GraphQL API.\nAdd the following configuration to enable self-observability-related modules.\n Set up prometheus telemetry.  telemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543Set up Prometheus fetcher.  prometheus-fetcher:selector:${SW_PROMETHEUS_FETCHER:default}default:enabledRules:${SW_PROMETHEUS_FETCHER_ENABLED_RULES:\u0026#34;self\u0026#34;}Make sure config/fetcher-prom-rules/self.yaml exists.  Once you deploy an OAP server cluster, the target host should be replaced with a dedicated IP or hostname. For instance, if there are three OAP servers in your cluster, their hosts are service1, service2, and service3, respectively. You should update each self.yaml to switch the target host.\nservice1:\nfetcherInterval:PT15SfetcherTimeout:PT10SmetricsPath:/metricsstaticConfig:# targets will be labeled as \u0026#34;instance\u0026#34;targets:- service1:1234labels:service:oap-server...service2:\nfetcherInterval:PT15SfetcherTimeout:PT10SmetricsPath:/metricsstaticConfig:# targets will be labeled as \u0026#34;instance\u0026#34;targets:- service2:1234labels:service:oap-server...service3:\nfetcherInterval:PT15SfetcherTimeout:PT10SmetricsPath:/metricsstaticConfig:# targets will be labeled as \u0026#34;instance\u0026#34;targets:- service3:1234labels:service:oap-server...Service discovery on Kubernetes If you deploy an OAP server cluster on Kubernetes, the oap-server instance (pod) would not have a static IP or hostname. We can leverage OpenTelemetry Collector to discover the oap-server instance, and scrape \u0026amp; transfer the metrics to OAP OpenTelemetry receiver.\nOn how to install SkyWalking on k8s, you can refer to Apache SkyWalking Kubernetes.\nSet this up following these steps:\n Set up oap-server.    Set the metrics port.\nprometheus-port: 1234   Set environment variables.\nSW_TELEMETRY=prometheus SW_OTEL_RECEIVER=default SW_OTEL_RECEIVER_ENABLED_OTEL_RULES=oap Here is an example to install by Apache SkyWalking Kubernetes:\nhelm -n istio-system install skywalking skywalking \\ --set elasticsearch.replicas=1 \\ --set elasticsearch.minimumMasterNodes=1 \\ --set elasticsearch.imageTag=7.5.1 \\ --set oap.replicas=2 \\ --set ui.image.repository=$HUB/skywalking-ui \\ --set ui.image.tag=$TAG \\ --set oap.image.tag=$TAG \\ --set oap.image.repository=$HUB/skywalking-oap \\ --set oap.storageType=elasticsearch \\ --set oap.ports.prometheus-port=1234 \\ # \u0026lt;\u0026lt;\u0026lt; Expose self observability metrics port --set oap.env.SW_TELEMETRY=prometheus \\ --set oap.env.SW_OTEL_RECEIVER=default \\ # \u0026lt;\u0026lt;\u0026lt; Enable Otel receiver --set oap.env.SW_OTEL_RECEIVER_ENABLED_OTEL_RULES=oap # \u0026lt;\u0026lt;\u0026lt; Add oap analyzer for Otel metrics   Set up OpenTelemetry Collector and config a scrape job:  - job_name:\u0026#39;skywalking-so11y\u0026#39;# make sure to use this in the so11y.yaml to filter only so11y metricsmetrics_path:\u0026#39;/metrics\u0026#39;kubernetes_sd_configs:- role:podrelabel_configs:- source_labels:[__meta_kubernetes_pod_container_name, __meta_kubernetes_pod_container_port_name]action:keepregex:oap;prometheus-port- source_labels:[]target_label:servicereplacement:oap-server- source_labels:[__meta_kubernetes_pod_name]target_label:host_nameregex:(.+)replacement:$$1For the full example for OpenTelemetry Collector configuration and recommended version, you can refer to showcase.\n NOTE: Since Apr 21, 2021, the Grafana project has been relicensed to AGPL-v3, and is no longer licensed for Apache 2.0. Check the LICENSE details. The following Prometheus + Grafana solution is optional rather than recommended.\nPrometheus Prometheus is supported as a telemetry implementor, which collects metrics from SkyWalking\u0026rsquo;s backend.\nSet prometheus to provider. The endpoint opens at http://0.0.0.0:1234/ and http://0.0.0.0:1234/metrics.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:Set host and port if needed.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543Set relevant SSL settings to expose a secure endpoint. Note that the private key file and cert chain file could be uploaded once changes are applied to them.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543sslEnabled:truesslKeyPath:/etc/ssl/key.pemsslCertChainPath:/etc/ssl/cert-chain.pemGrafana Visualization Provide the Grafana dashboard settings. Check SkyWalking OAP Cluster Monitor Dashboard config and SkyWalking OAP Instance Monitor Dashboard config.\n","excerpt":"Telemetry for backend The OAP backend cluster itself is a distributed streaming process system. To …","ref":"/docs/main/v9.2.0/en/setup/backend/backend-telemetry/","title":"Telemetry for backend"},{"body":"Telemetry for backend The OAP backend cluster itself is a distributed streaming process system. To assist the Ops team, we provide the telemetry for the OAP backend itself, also known as self-observability (so11y)\nBy default, the telemetry is disabled by setting selector to none, like this:\ntelemetry:selector:${SW_TELEMETRY:none}none:prometheus:host:${SW_TELEMETRY_PROMETHEUS_HOST:0.0.0.0}port:${SW_TELEMETRY_PROMETHEUS_PORT:1234}sslEnabled:${SW_TELEMETRY_PROMETHEUS_SSL_ENABLED:false}sslKeyPath:${SW_TELEMETRY_PROMETHEUS_SSL_KEY_PATH:\u0026#34;\u0026#34;}sslCertChainPath:${SW_TELEMETRY_PROMETHEUS_SSL_CERT_CHAIN_PATH:\u0026#34;\u0026#34;}You may also set Prometheus to enable them. For more information, refer to the details below.\nSelf Observability Static IP or hostname SkyWalking supports collecting telemetry data into the OAP backend directly. Users could check them out through UI or GraphQL API.\nAdd the following configuration to enable self-observability-related modules.\n Set up prometheus telemetry.  telemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543Set up OpenTelemetry to scrape the metrics from OAP telemetry.  Refer to the E2E test case as an example.\nFor Kubernetes deployments, read the following section, otherwise you should be able to adjust the configurations below to fit your scenarios.\nService discovery on Kubernetes If you deploy an OAP server cluster on Kubernetes, the oap-server instance (pod) would not have a static IP or hostname. We can leverage OpenTelemetry Collector to discover the oap-server instance, and scrape \u0026amp; transfer the metrics to OAP OpenTelemetry receiver.\nOn how to install SkyWalking on k8s, you can refer to Apache SkyWalking Kubernetes.\nSet this up following these steps:\n Set up oap-server.    Set the metrics port.\nprometheus-port: 1234   Set environment variables.\nSW_TELEMETRY=prometheus SW_OTEL_RECEIVER=default SW_OTEL_RECEIVER_ENABLED_OTEL_RULES=oap Here is an example to install by Apache SkyWalking Kubernetes:\nhelm -n istio-system install skywalking skywalking \\ --set elasticsearch.replicas=1 \\ --set elasticsearch.minimumMasterNodes=1 \\ --set elasticsearch.imageTag=7.5.1 \\ --set oap.replicas=2 \\ --set ui.image.repository=$HUB/skywalking-ui \\ --set ui.image.tag=$TAG \\ --set oap.image.tag=$TAG \\ --set oap.image.repository=$HUB/skywalking-oap \\ --set oap.storageType=elasticsearch \\ --set oap.ports.prometheus-port=1234 \\ # \u0026lt;\u0026lt;\u0026lt; Expose self observability metrics port --set oap.env.SW_TELEMETRY=prometheus \\ --set oap.env.SW_OTEL_RECEIVER=default \\ # \u0026lt;\u0026lt;\u0026lt; Enable Otel receiver --set oap.env.SW_OTEL_RECEIVER_ENABLED_OTEL_RULES=oap # \u0026lt;\u0026lt;\u0026lt; Add oap analyzer for Otel metrics   Set up OpenTelemetry Collector and config a scrape job:  - job_name:\u0026#39;skywalking-so11y\u0026#39;# make sure to use this in the so11y.yaml to filter only so11y metricsmetrics_path:\u0026#39;/metrics\u0026#39;kubernetes_sd_configs:- role:podrelabel_configs:- source_labels:[__meta_kubernetes_pod_container_name, __meta_kubernetes_pod_container_port_name]action:keepregex:oap;prometheus-port- source_labels:[]target_label:servicereplacement:oap-server- source_labels:[__meta_kubernetes_pod_name]target_label:host_nameregex:(.+)replacement:$$1For the full example for OpenTelemetry Collector configuration and recommended version, you can refer to showcase.\n NOTE: Since Apr 21, 2021, the Grafana project has been relicensed to AGPL-v3, and is no longer licensed for Apache 2.0. Check the LICENSE details. The following Prometheus + Grafana solution is optional rather than recommended.\nPrometheus Prometheus is supported as a telemetry implementor, which collects metrics from SkyWalking\u0026rsquo;s backend.\nSet prometheus to provider. The endpoint opens at http://0.0.0.0:1234/ and http://0.0.0.0:1234/metrics.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:Set host and port if needed.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543Set relevant SSL settings to expose a secure endpoint. Note that the private key file and cert chain file could be uploaded once changes are applied to them.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543sslEnabled:truesslKeyPath:/etc/ssl/key.pemsslCertChainPath:/etc/ssl/cert-chain.pemGrafana Visualization Provide the Grafana dashboard settings. Check SkyWalking OAP Cluster Monitor Dashboard config and SkyWalking OAP Instance Monitor Dashboard config.\n","excerpt":"Telemetry for backend The OAP backend cluster itself is a distributed streaming process system. To …","ref":"/docs/main/v9.3.0/en/setup/backend/backend-telemetry/","title":"Telemetry for backend"},{"body":"Telemetry for backend The OAP backend cluster itself is a distributed streaming process system. To assist the Ops team, we provide the telemetry for the OAP backend itself, also known as self-observability (so11y)\nBy default, the telemetry is disabled by setting selector to none, like this:\ntelemetry:selector:${SW_TELEMETRY:none}none:prometheus:host:${SW_TELEMETRY_PROMETHEUS_HOST:0.0.0.0}port:${SW_TELEMETRY_PROMETHEUS_PORT:1234}sslEnabled:${SW_TELEMETRY_PROMETHEUS_SSL_ENABLED:false}sslKeyPath:${SW_TELEMETRY_PROMETHEUS_SSL_KEY_PATH:\u0026#34;\u0026#34;}sslCertChainPath:${SW_TELEMETRY_PROMETHEUS_SSL_CERT_CHAIN_PATH:\u0026#34;\u0026#34;}You may also set Prometheus to enable them. For more information, refer to the details below.\nSelf Observability SkyWalking supports exposing telemetry data representing OAP running status through Prometheus endpoint. Users could set up OpenTelemetry collector to scrap and forward telemetry data to OAP server for further analysis, eventually showing up UI or GraphQL API.\nStatic IP or hostname Add the following configuration to enable self-observability-related modules.\n Set up prometheus telemetry.  telemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543Set up OpenTelemetry to scrape the metrics from OAP telemetry.  Refer to the E2E test case as an example.\nFor Kubernetes deployments, read the following section, otherwise you should be able to adjust the configurations below to fit your scenarios.\nService discovery on Kubernetes If you deploy an OAP server cluster on Kubernetes, the oap-server instance (pod) would not have a static IP or hostname. We can leverage OpenTelemetry Collector to discover the oap-server instance, and scrape \u0026amp; transfer the metrics to OAP OpenTelemetry receiver.\nOn how to install SkyWalking on k8s, you can refer to Apache SkyWalking Kubernetes.\nSet this up following these steps:\n Set up oap-server.    Set the metrics port.\nprometheus-port: 1234   Set environment variables.\nSW_TELEMETRY=prometheus SW_OTEL_RECEIVER=default SW_OTEL_RECEIVER_ENABLED_OTEL_RULES=oap Here is an example to install by Apache SkyWalking Kubernetes:\nhelm -n istio-system install skywalking skywalking \\ --set elasticsearch.replicas=1 \\ --set elasticsearch.minimumMasterNodes=1 \\ --set elasticsearch.imageTag=7.5.1 \\ --set oap.replicas=2 \\ --set ui.image.repository=$HUB/skywalking-ui \\ --set ui.image.tag=$TAG \\ --set oap.image.tag=$TAG \\ --set oap.image.repository=$HUB/skywalking-oap \\ --set oap.storageType=elasticsearch \\ --set oap.ports.prometheus-port=1234 \\ # \u0026lt;\u0026lt;\u0026lt; Expose self observability metrics port --set oap.env.SW_TELEMETRY=prometheus \\ --set oap.env.SW_OTEL_RECEIVER=default \\ # \u0026lt;\u0026lt;\u0026lt; Enable Otel receiver --set oap.env.SW_OTEL_RECEIVER_ENABLED_OTEL_RULES=oap # \u0026lt;\u0026lt;\u0026lt; Add oap analyzer for Otel metrics   Set up OpenTelemetry Collector and config a scrape job:  - job_name:\u0026#39;skywalking-so11y\u0026#39;# make sure to use this in the so11y.yaml to filter only so11y metricsmetrics_path:\u0026#39;/metrics\u0026#39;kubernetes_sd_configs:- role:podrelabel_configs:- source_labels:[__meta_kubernetes_pod_container_name, __meta_kubernetes_pod_container_port_name]action:keepregex:oap;prometheus-port- source_labels:[]target_label:servicereplacement:oap-server- source_labels:[__meta_kubernetes_pod_name]target_label:host_nameregex:(.+)replacement:$$1For the full example for OpenTelemetry Collector configuration and recommended version, you can refer to showcase.\n Users also could leverage the Prometheus endpoint for their own Prometheus and Grafana.\nNOTE: Since Apr 21, 2021, the Grafana project has been relicensed to AGPL-v3, and is no longer licensed for Apache 2.0. Check the LICENSE details. The following Prometheus + Grafana solution is optional rather than recommended.\nPrometheus Prometheus is supported as a telemetry implementor, which collects metrics from SkyWalking\u0026rsquo;s backend.\nSet prometheus to provider. The endpoint opens at http://0.0.0.0:1234/ and http://0.0.0.0:1234/metrics.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:Set host and port if needed.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543Set relevant SSL settings to expose a secure endpoint. Note that the private key file and cert chain file could be uploaded once changes are applied to them.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543sslEnabled:truesslKeyPath:/etc/ssl/key.pemsslCertChainPath:/etc/ssl/cert-chain.pemGrafana Visualization Provide the Grafana dashboard settings. Check SkyWalking OAP Cluster Monitor Dashboard config and SkyWalking OAP Instance Monitor Dashboard config.\n","excerpt":"Telemetry for backend The OAP backend cluster itself is a distributed streaming process system. To …","ref":"/docs/main/v9.4.0/en/setup/backend/backend-telemetry/","title":"Telemetry for backend"},{"body":"Telemetry for backend The OAP backend cluster itself is a distributed streaming process system. To assist the Ops team, we provide the telemetry for the OAP backend itself, also known as self-observability (so11y)\nBy default, the telemetry is disabled by setting selector to none, like this:\ntelemetry:selector:${SW_TELEMETRY:none}none:prometheus:host:${SW_TELEMETRY_PROMETHEUS_HOST:0.0.0.0}port:${SW_TELEMETRY_PROMETHEUS_PORT:1234}sslEnabled:${SW_TELEMETRY_PROMETHEUS_SSL_ENABLED:false}sslKeyPath:${SW_TELEMETRY_PROMETHEUS_SSL_KEY_PATH:\u0026#34;\u0026#34;}sslCertChainPath:${SW_TELEMETRY_PROMETHEUS_SSL_CERT_CHAIN_PATH:\u0026#34;\u0026#34;}You may also set Prometheus to enable them. For more information, refer to the details below.\nSelf Observability SkyWalking supports exposing telemetry data representing OAP running status through Prometheus endpoint. Users could set up OpenTelemetry collector to scrap and forward telemetry data to OAP server for further analysis, eventually showing up UI or GraphQL API.\nStatic IP or hostname Add the following configuration to enable self-observability-related modules.\n Set up prometheus telemetry.  telemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543Set up OpenTelemetry to scrape the metrics from OAP telemetry.  Refer to the E2E test case as an example.\nFor Kubernetes deployments, read the following section, otherwise you should be able to adjust the configurations below to fit your scenarios.\nService discovery on Kubernetes If you deploy an OAP server cluster on Kubernetes, the oap-server instance (pod) would not have a static IP or hostname. We can leverage OpenTelemetry Collector to discover the oap-server instance, and scrape \u0026amp; transfer the metrics to OAP OpenTelemetry receiver.\nOn how to install SkyWalking on k8s, you can refer to Apache SkyWalking Kubernetes.\nSet this up following these steps:\n Set up oap-server.    Set the metrics port.\nprometheus-port: 1234   Set environment variables.\nSW_TELEMETRY=prometheus SW_OTEL_RECEIVER=default SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES=oap Here is an example to install by Apache SkyWalking Kubernetes:\nhelm -n istio-system install skywalking skywalking \\ --set elasticsearch.replicas=1 \\ --set elasticsearch.minimumMasterNodes=1 \\ --set elasticsearch.imageTag=7.5.1 \\ --set oap.replicas=2 \\ --set ui.image.repository=$HUB/skywalking-ui \\ --set ui.image.tag=$TAG \\ --set oap.image.tag=$TAG \\ --set oap.image.repository=$HUB/skywalking-oap \\ --set oap.storageType=elasticsearch \\ --set oap.ports.prometheus-port=1234 \\ # \u0026lt;\u0026lt;\u0026lt; Expose self observability metrics port --set oap.env.SW_TELEMETRY=prometheus \\ --set oap.env.SW_OTEL_RECEIVER=default \\ # \u0026lt;\u0026lt;\u0026lt; Enable Otel receiver --set oap.env.SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES=oap # \u0026lt;\u0026lt;\u0026lt; Add oap analyzer for Otel metrics   Set up OpenTelemetry Collector and config a scrape job:  - job_name:\u0026#39;skywalking-so11y\u0026#39;# make sure to use this in the so11y.yaml to filter only so11y metricsmetrics_path:\u0026#39;/metrics\u0026#39;kubernetes_sd_configs:- role:podrelabel_configs:- source_labels:[__meta_kubernetes_pod_container_name, __meta_kubernetes_pod_container_port_name]action:keepregex:oap;prometheus-port- source_labels:[]target_label:servicereplacement:oap-server- source_labels:[__meta_kubernetes_pod_name]target_label:host_nameregex:(.+)replacement:$$1For the full example for OpenTelemetry Collector configuration and recommended version, you can refer to showcase.\n Users also could leverage the Prometheus endpoint for their own Prometheus and Grafana.\nNOTE: Since Apr 21, 2021, the Grafana project has been relicensed to AGPL-v3, and is no longer licensed for Apache 2.0. Check the LICENSE details. The following Prometheus + Grafana solution is optional rather than recommended.\nPrometheus Prometheus is supported as a telemetry implementor, which collects metrics from SkyWalking\u0026rsquo;s backend.\nSet prometheus to provider. The endpoint opens at http://0.0.0.0:1234/ and http://0.0.0.0:1234/metrics.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:Set host and port if needed.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543Set relevant SSL settings to expose a secure endpoint. Note that the private key file and cert chain file could be uploaded once changes are applied to them.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543sslEnabled:truesslKeyPath:/etc/ssl/key.pemsslCertChainPath:/etc/ssl/cert-chain.pemGrafana Visualization Provide the Grafana dashboard settings. Check SkyWalking OAP Cluster Monitor Dashboard config and SkyWalking OAP Instance Monitor Dashboard config.\n","excerpt":"Telemetry for backend The OAP backend cluster itself is a distributed streaming process system. To …","ref":"/docs/main/v9.5.0/en/setup/backend/backend-telemetry/","title":"Telemetry for backend"},{"body":"Telemetry for backend The OAP backend cluster itself is a distributed streaming process system. To assist the Ops team, we provide the telemetry for the OAP backend itself, also known as self-observability (so11y)\nBy default, the telemetry is disabled by setting selector to none, like this:\ntelemetry:selector:${SW_TELEMETRY:none}none:prometheus:host:${SW_TELEMETRY_PROMETHEUS_HOST:0.0.0.0}port:${SW_TELEMETRY_PROMETHEUS_PORT:1234}sslEnabled:${SW_TELEMETRY_PROMETHEUS_SSL_ENABLED:false}sslKeyPath:${SW_TELEMETRY_PROMETHEUS_SSL_KEY_PATH:\u0026#34;\u0026#34;}sslCertChainPath:${SW_TELEMETRY_PROMETHEUS_SSL_CERT_CHAIN_PATH:\u0026#34;\u0026#34;}You may also set Prometheus to enable them. For more information, refer to the details below.\nSelf Observability SkyWalking supports exposing telemetry data representing OAP running status through Prometheus endpoint. Users could set up OpenTelemetry collector to scrap and forward telemetry data to OAP server for further analysis, eventually showing up UI or GraphQL API.\nStatic IP or hostname Add the following configuration to enable self-observability-related modules.\n Set up prometheus telemetry.  telemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543Set up OpenTelemetry to scrape the metrics from OAP telemetry.  Refer to the E2E test case as an example.\nFor Kubernetes deployments, read the following section, otherwise you should be able to adjust the configurations below to fit your scenarios.\nService discovery on Kubernetes If you deploy an OAP server cluster on Kubernetes, the oap-server instance (pod) would not have a static IP or hostname. We can leverage OpenTelemetry Collector to discover the oap-server instance, and scrape \u0026amp; transfer the metrics to OAP OpenTelemetry receiver.\nOn how to install SkyWalking on k8s, you can refer to Apache SkyWalking Kubernetes.\nSet this up following these steps:\n Set up oap-server.    Set the metrics port.\nprometheus-port: 1234   Set environment variables.\nSW_TELEMETRY=prometheus SW_OTEL_RECEIVER=default SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES=oap Here is an example to install by Apache SkyWalking Kubernetes:\nhelm -n istio-system install skywalking skywalking \\ --set elasticsearch.replicas=1 \\ --set elasticsearch.minimumMasterNodes=1 \\ --set elasticsearch.imageTag=7.5.1 \\ --set oap.replicas=2 \\ --set ui.image.repository=$HUB/skywalking-ui \\ --set ui.image.tag=$TAG \\ --set oap.image.tag=$TAG \\ --set oap.image.repository=$HUB/skywalking-oap \\ --set oap.storageType=elasticsearch \\ --set oap.ports.prometheus-port=1234 \\ # \u0026lt;\u0026lt;\u0026lt; Expose self observability metrics port --set oap.env.SW_TELEMETRY=prometheus \\ --set oap.env.SW_OTEL_RECEIVER=default \\ # \u0026lt;\u0026lt;\u0026lt; Enable Otel receiver --set oap.env.SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES=oap # \u0026lt;\u0026lt;\u0026lt; Add oap analyzer for Otel metrics   Set up OpenTelemetry Collector and config a scrape job:  - job_name:\u0026#39;skywalking-so11y\u0026#39;# make sure to use this in the so11y.yaml to filter only so11y metricsmetrics_path:\u0026#39;/metrics\u0026#39;kubernetes_sd_configs:- role:podrelabel_configs:- source_labels:[__meta_kubernetes_pod_container_name, __meta_kubernetes_pod_container_port_name]action:keepregex:oap;prometheus-port- source_labels:[]target_label:servicereplacement:oap-server- source_labels:[__meta_kubernetes_pod_name]target_label:host_nameregex:(.+)replacement:$$1For the full example for OpenTelemetry Collector configuration and recommended version, you can refer to showcase.\n Users also could leverage the Prometheus endpoint for their own Prometheus and Grafana.\nNOTE: Since Apr 21, 2021, the Grafana project has been relicensed to AGPL-v3, and is no longer licensed for Apache 2.0. Check the LICENSE details. The following Prometheus + Grafana solution is optional rather than recommended.\nPrometheus Prometheus is supported as a telemetry implementor, which collects metrics from SkyWalking\u0026rsquo;s backend.\nSet prometheus to provider. The endpoint opens at http://0.0.0.0:1234/ and http://0.0.0.0:1234/metrics.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:Set host and port if needed.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543Set relevant SSL settings to expose a secure endpoint. Note that the private key file and cert chain file could be uploaded once changes are applied to them.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543sslEnabled:truesslKeyPath:/etc/ssl/key.pemsslCertChainPath:/etc/ssl/cert-chain.pemGrafana Visualization Provide the Grafana dashboard settings. Check SkyWalking OAP Cluster Monitor Dashboard config and SkyWalking OAP Instance Monitor Dashboard config.\n","excerpt":"Telemetry for backend The OAP backend cluster itself is a distributed streaming process system. To …","ref":"/docs/main/v9.6.0/en/setup/backend/backend-telemetry/","title":"Telemetry for backend"},{"body":"Telemetry for backend The OAP backend cluster itself is a distributed streaming process system. To assist the Ops team, we provide the telemetry for the OAP backend itself, also known as self-observability (so11y)\nBy default, the telemetry is disabled by setting selector to none, like this:\ntelemetry:selector:${SW_TELEMETRY:none}none:prometheus:host:${SW_TELEMETRY_PROMETHEUS_HOST:0.0.0.0}port:${SW_TELEMETRY_PROMETHEUS_PORT:1234}sslEnabled:${SW_TELEMETRY_PROMETHEUS_SSL_ENABLED:false}sslKeyPath:${SW_TELEMETRY_PROMETHEUS_SSL_KEY_PATH:\u0026#34;\u0026#34;}sslCertChainPath:${SW_TELEMETRY_PROMETHEUS_SSL_CERT_CHAIN_PATH:\u0026#34;\u0026#34;}You may also set Prometheus to enable them. For more information, refer to the details below.\nSelf Observability SkyWalking supports exposing telemetry data representing OAP running status through Prometheus endpoint. Users could set up OpenTelemetry collector to scrap and forward telemetry data to OAP server for further analysis, eventually showing up UI or GraphQL API.\nStatic IP or hostname Add the following configuration to enable self-observability-related modules.\n Set up prometheus telemetry.  telemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543Set up OpenTelemetry to scrape the metrics from OAP telemetry.  Refer to the E2E test case as an example.\nFor Kubernetes deployments, read the following section, otherwise you should be able to adjust the configurations below to fit your scenarios.\nService discovery on Kubernetes If you deploy an OAP server cluster on Kubernetes, the oap-server instance (pod) would not have a static IP or hostname. We can leverage OpenTelemetry Collector to discover the oap-server instance, and scrape \u0026amp; transfer the metrics to OAP OpenTelemetry receiver.\nOn how to install SkyWalking on k8s, you can refer to Apache SkyWalking Kubernetes.\nSet this up following these steps:\n Set up oap-server.    Set the metrics port.\nprometheus-port: 1234   Set environment variables.\nSW_TELEMETRY=prometheus SW_OTEL_RECEIVER=default SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES=oap Here is an example to install by Apache SkyWalking Kubernetes:\nhelm -n istio-system install skywalking skywalking \\ --set elasticsearch.replicas=1 \\ --set elasticsearch.minimumMasterNodes=1 \\ --set elasticsearch.imageTag=7.5.1 \\ --set oap.replicas=2 \\ --set ui.image.repository=$HUB/skywalking-ui \\ --set ui.image.tag=$TAG \\ --set oap.image.tag=$TAG \\ --set oap.image.repository=$HUB/skywalking-oap \\ --set oap.storageType=elasticsearch \\ --set oap.ports.prometheus-port=1234 \\ # \u0026lt;\u0026lt;\u0026lt; Expose self observability metrics port --set oap.env.SW_TELEMETRY=prometheus \\ --set oap.env.SW_OTEL_RECEIVER=default \\ # \u0026lt;\u0026lt;\u0026lt; Enable Otel receiver --set oap.env.SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES=oap # \u0026lt;\u0026lt;\u0026lt; Add oap analyzer for Otel metrics   Set up OpenTelemetry Collector and config a scrape job:  - job_name:\u0026#39;skywalking-so11y\u0026#39;# make sure to use this in the so11y.yaml to filter only so11y metricsmetrics_path:\u0026#39;/metrics\u0026#39;kubernetes_sd_configs:- role:podrelabel_configs:- source_labels:[__meta_kubernetes_pod_container_name, __meta_kubernetes_pod_container_port_name]action:keepregex:oap;prometheus-port- source_labels:[]target_label:servicereplacement:oap-server- source_labels:[__meta_kubernetes_pod_name]target_label:host_nameregex:(.+)replacement:$$1For the full example for OpenTelemetry Collector configuration and recommended version, you can refer to showcase.\n Users also could leverage the Prometheus endpoint for their own Prometheus and Grafana.\nNOTE: Since Apr 21, 2021, the Grafana project has been relicensed to AGPL-v3, and is no longer licensed for Apache 2.0. Check the LICENSE details. The following Prometheus + Grafana solution is optional rather than recommended.\nPrometheus Prometheus is supported as a telemetry implementor, which collects metrics from SkyWalking\u0026rsquo;s backend.\nSet prometheus to provider. The endpoint opens at http://0.0.0.0:1234/ and http://0.0.0.0:1234/metrics.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:Set host and port if needed.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543Set relevant SSL settings to expose a secure endpoint. Note that the private key file and cert chain file could be uploaded once changes are applied to them.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543sslEnabled:truesslKeyPath:/etc/ssl/key.pemsslCertChainPath:/etc/ssl/cert-chain.pemGrafana Visualization Provide the Grafana dashboard settings. Check SkyWalking OAP Cluster Monitor Dashboard config and SkyWalking OAP Instance Monitor Dashboard config.\n","excerpt":"Telemetry for backend The OAP backend cluster itself is a distributed streaming process system. To …","ref":"/docs/main/v9.7.0/en/setup/backend/backend-telemetry/","title":"Telemetry for backend"},{"body":"The Logic Endpoint In default, all the RPC server-side names as entry spans, such as RESTFul API path and gRPC service name, would be endpoints with metrics. At the same time, SkyWalking introduces the logic endpoint concept, which allows plugins and users to add new endpoints without adding new spans. The following logic endpoints are added automatically by plugins.\n GraphQL Query and Mutation are logic endpoints by using the names of them. Spring\u0026rsquo;s ScheduledMethodRunnable jobs are logic endpoints. The name format is SpringScheduled/${className}/${methodName}. Apache ShardingSphere ElasticJob\u0026rsquo;s jobs are logic endpoints. The name format is ElasticJob/${jobName}. XXLJob\u0026rsquo;s jobs are logic endpoints. The name formats include xxl-job/MethodJob/${className}.${methodName}, xxl-job/ScriptJob/${GlueType}/id/${jobId}, and xxl-job/SimpleJob/${className}. Quartz(optional plugin)\u0026rsquo;s jobs are logic endpoints. the name format is quartz-scheduler/${className}.  User could use the SkyWalking\u0026rsquo;s application toolkits to add the tag into the local span to label the span as a logic endpoint in the analysis stage. The tag is, key=x-le and value = {\u0026quot;logic-span\u0026quot;:true}.\n","excerpt":"The Logic Endpoint In default, all the RPC server-side names as entry spans, such as RESTFul API …","ref":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/logic-endpoint/","title":"The Logic Endpoint"},{"body":"The Logic Endpoint In default, all the RPC server-side names as entry spans, such as RESTFul API path and gRPC service name, would be endpoints with metrics. At the same time, SkyWalking introduces the logic endpoint concept, which allows plugins and users to add new endpoints without adding new spans. The following logic endpoints are added automatically by plugins.\n GraphQL Query and Mutation are logic endpoints by using the names of them. Spring\u0026rsquo;s ScheduledMethodRunnable jobs are logic endpoints. The name format is SpringScheduled/${className}/${methodName}. Apache ShardingSphere ElasticJob\u0026rsquo;s jobs are logic endpoints. The name format is ElasticJob/${jobName}. XXLJob\u0026rsquo;s jobs are logic endpoints. The name formats include xxl-job/MethodJob/${className}.${methodName}, xxl-job/ScriptJob/${GlueType}/id/${jobId}, and xxl-job/SimpleJob/${className}. Quartz(optional plugin)\u0026rsquo;s jobs are logic endpoints. the name format is quartz-scheduler/${className}.  User could use the SkyWalking\u0026rsquo;s application toolkits to add the tag into the local span to label the span as a logic endpoint in the analysis stage. The tag is, key=x-le and value = {\u0026quot;logic-span\u0026quot;:true}.\n","excerpt":"The Logic Endpoint In default, all the RPC server-side names as entry spans, such as RESTFul API …","ref":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/logic-endpoint/","title":"The Logic Endpoint"},{"body":"The Logic Endpoint In default, all the RPC server-side names as entry spans, such as RESTFul API path and gRPC service name, would be endpoints with metrics. At the same time, SkyWalking introduces the logic endpoint concept, which allows plugins and users to add new endpoints without adding new spans. The following logic endpoints are added automatically by plugins.\n GraphQL Query and Mutation are logic endpoints by using the names of them. Spring\u0026rsquo;s ScheduledMethodRunnable jobs are logic endpoints. The name format is SpringScheduled/${className}/${methodName}. Apache ShardingSphere ElasticJob\u0026rsquo;s jobs are logic endpoints. The name format is ElasticJob/${jobName}. XXLJob\u0026rsquo;s jobs are logic endpoints. The name formats include xxl-job/MethodJob/${className}.${methodName}, xxl-job/ScriptJob/${GlueType}/id/${jobId}, and xxl-job/SimpleJob/${className}. Quartz(optional plugin)\u0026rsquo;s jobs are logic endpoints. the name format is quartz-scheduler/${className}.  User could use the SkyWalking\u0026rsquo;s application toolkits to add the tag into the local span to label the span as a logic endpoint in the analysis stage. The tag is, key=x-le and value = {\u0026quot;logic-span\u0026quot;:true}.\n","excerpt":"The Logic Endpoint In default, all the RPC server-side names as entry spans, such as RESTFul API …","ref":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/logic-endpoint/","title":"The Logic Endpoint"},{"body":"The Logic Endpoint In default, all the RPC server-side names as entry spans, such as RESTFul API path and gRPC service name, would be endpoints with metrics. At the same time, SkyWalking introduces the logic endpoint concept, which allows plugins and users to add new endpoints without adding new spans. The following logic endpoints are added automatically by plugins.\n GraphQL Query and Mutation are logic endpoints by using the names of them. Spring\u0026rsquo;s ScheduledMethodRunnable jobs are logic endpoints. The name format is SpringScheduled/${className}/${methodName}. Apache ShardingSphere ElasticJob\u0026rsquo;s jobs are logic endpoints. The name format is ElasticJob/${jobName}. XXLJob\u0026rsquo;s jobs are logic endpoints. The name formats include xxl-job/MethodJob/${className}.${methodName}, xxl-job/ScriptJob/${GlueType}/id/${jobId}, and xxl-job/SimpleJob/${className}. Quartz(optional plugin)\u0026rsquo;s jobs are logic endpoints. the name format is quartz-scheduler/${className}.  User could use the SkyWalking\u0026rsquo;s application toolkits to add the tag into the local span to label the span as a logic endpoint in the analysis stage. The tag is, key=x-le and value = {\u0026quot;logic-span\u0026quot;:true}.\n","excerpt":"The Logic Endpoint In default, all the RPC server-side names as entry spans, such as RESTFul API …","ref":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/logic-endpoint/","title":"The Logic Endpoint"},{"body":"The Logic Endpoint In default, all the RPC server-side names as entry spans, such as RESTFul API path and gRPC service name, would be endpoints with metrics. At the same time, SkyWalking introduces the logic endpoint concept, which allows plugins and users to add new endpoints without adding new spans. The following logic endpoints are added automatically by plugins.\n GraphQL Query and Mutation are logic endpoints by using the names of them. Spring\u0026rsquo;s ScheduledMethodRunnable jobs are logic endpoints. The name format is SpringScheduled/${className}/${methodName}. Apache ShardingSphere ElasticJob\u0026rsquo;s jobs are logic endpoints. The name format is ElasticJob/${jobName}. XXLJob\u0026rsquo;s jobs are logic endpoints. The name formats include xxl-job/MethodJob/${className}.${methodName}, xxl-job/ScriptJob/${GlueType}/id/${jobId}, and xxl-job/SimpleJob/${className}. Quartz(optional plugin)\u0026rsquo;s jobs are logic endpoints. the name format is quartz-scheduler/${className}.  User could use the SkyWalking\u0026rsquo;s application toolkits to add the tag into the local span to label the span as a logic endpoint in the analysis stage. The tag is, key=x-le and value = {\u0026quot;logic-span\u0026quot;:true}.\n","excerpt":"The Logic Endpoint In default, all the RPC server-side names as entry spans, such as RESTFul API …","ref":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/logic-endpoint/","title":"The Logic Endpoint"},{"body":"Dependency the toolkit, such as using maven or gradle\nAdd Trace Toolkit apm-toolkit-trace provides the APIs to enhance the trace context, such as createLocalSpan, createExitSpan, createEntrySpan, log, tag, prepareForAsync and asyncFinish. Add the toolkit dependency to your project.\n\u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-trace\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; ","excerpt":"Dependency the toolkit, such as using maven or gradle\nAdd Trace Toolkit apm-toolkit-trace provides …","ref":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/application-toolkit-dependency/","title":"the toolkit, such as using maven or gradle"},{"body":"Dependency the toolkit, such as using maven or gradle\nAdd Trace Toolkit apm-toolkit-trace provides the APIs to enhance the trace context, such as createLocalSpan, createExitSpan, createEntrySpan, log, tag, prepareForAsync and asyncFinish. Add the toolkit dependency to your project.\n\u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-trace\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; ","excerpt":"Dependency the toolkit, such as using maven or gradle\nAdd Trace Toolkit apm-toolkit-trace provides …","ref":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-dependency/","title":"the toolkit, such as using maven or gradle"},{"body":"Dependency the toolkit, such as using maven or gradle\nAdd Trace Toolkit apm-toolkit-trace provides the APIs to enhance the trace context, such as createLocalSpan, createExitSpan, createEntrySpan, log, tag, prepareForAsync and asyncFinish. Add the toolkit dependency to your project.\n\u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-trace\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; ","excerpt":"Dependency the toolkit, such as using maven or gradle\nAdd Trace Toolkit apm-toolkit-trace provides …","ref":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/application-toolkit-dependency/","title":"the toolkit, such as using maven or gradle"},{"body":"Dependency the toolkit, such as using maven or gradle\nAdd Trace Toolkit apm-toolkit-trace provides the APIs to enhance the trace context, such as createLocalSpan, createExitSpan, createEntrySpan, log, tag, prepareForAsync and asyncFinish. Add the toolkit dependency to your project.\n\u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-trace\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; ","excerpt":"Dependency the toolkit, such as using maven or gradle\nAdd Trace Toolkit apm-toolkit-trace provides …","ref":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/application-toolkit-dependency/","title":"the toolkit, such as using maven or gradle"},{"body":"Dependency the toolkit, such as using maven or gradle\nAdd Trace Toolkit apm-toolkit-trace provides the APIs to enhance the trace context, such as createLocalSpan, createExitSpan, createEntrySpan, log, tag, prepareForAsync and asyncFinish. Add the toolkit dependency to your project.\n\u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-trace\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; ","excerpt":"Dependency the toolkit, such as using maven or gradle\nAdd Trace Toolkit apm-toolkit-trace provides …","ref":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/application-toolkit-dependency/","title":"the toolkit, such as using maven or gradle"},{"body":"Thread dump merging mechanism The performance profile is an enhancement feature in the APM system. We are using the thread dump to estimate the method execution time, rather than adding multiple local spans. In this way, the resource cost would be much less than using distributed tracing to locate slow method. This feature is suitable in the production environment. This document introduces how thread dumps are merged into the final report as a stack tree(s).\nThread analyst Read data and transform Read the data from the database and convert it to a data structure in gRPC.\nst=\u0026gt;start: Start e=\u0026gt;end: End op1=\u0026gt;operation: Load data using paging op2=\u0026gt;operation: Transform data using parallel st(right)-\u0026gt;op1(right)-\u0026gt;op2 op2(right)-\u0026gt;e Copy the code and paste it into this link to generate flow chart.\n Use the stream to read data by page (50 records per page). Convert the data into gRPC data structures in the form of parallel streams. Merge into a list of data.  Data analysis Use the group-by and collector modes in the Java parallel stream to group according to the first stack element in the database records, and use the collector to perform data aggregation. Generate a multi-root tree.\nst=\u0026gt;start: Start e=\u0026gt;end: End op1=\u0026gt;operation: Group by first stack element sup=\u0026gt;operation: Generate empty stack tree acc=\u0026gt;operation: Accumulator data to stack tree com=\u0026gt;operation: Combine stack trees fin=\u0026gt;operation: Calculate durations and build result st(right)-\u0026gt;op1-\u0026gt;sup(right)-\u0026gt;acc acc(right)-\u0026gt;com(right)-\u0026gt;fin-\u0026gt;e Copy the code and paste it into this link to generate a flow chart.\n Group by first stack element: Use the first level element in each stack to group, ensuring that the stacks have the same root node. Generate empty stack tree: Generate multiple top-level empty trees to prepare for the following steps. The reason for generating multiple top-level trees is that original data can be added in parallel without generating locks. Accumulator data to stack tree: Add every thread dump into the generated trees.  Iterate through each element in the thread dump to find if there is any child element with the same code signature and same stack depth in the parent element. If not, add this element. Keep the dump sequences and timestamps in each nodes from the source.   Combine stack trees: Combine all trees structures into one by using the same rules as the Accumulator.  Use LDR to traverse the tree node. Use the Stack data structure to avoid recursive calls. Each stack element represents the node that needs to be merged. The task of merging two nodes is to merge the list of children nodes. If they have the same code signature and same parents, save the dump sequences and timestamps in this node. Otherwise, the node needs to be added into the target node as a new child.   Calculate durations and build result: Calculate relevant statistics and generate response.  Use the same traversal node logic as in the Combine stack trees step. Convert to a GraphQL data structure, and put all nodes into a list for subsequent duration calculations. Calculate each node\u0026rsquo;s duration in parallel. For each node, sort the sequences. If there are two continuous sequences, the duration should add the duration of these two seq\u0026rsquo;s timestamp. Calculate each node execution in parallel. For each node, the duration of the current node should deduct the time consumed by all children.    Profile data debugging Please follow the exporter tool to package profile data. Unzip the profile data and use analyzer main function to run it.\n","excerpt":"Thread dump merging mechanism The performance profile is an enhancement feature in the APM system. …","ref":"/docs/main/latest/en/setup/backend/backend-profile-thread-merging/","title":"Thread dump merging mechanism"},{"body":"Thread dump merging mechanism The performance profile is an enhancement feature in the APM system. We are using the thread dump to estimate the method execution time, rather than adding multiple local spans. In this way, the resource cost would be much less than using distributed tracing to locate slow method. This feature is suitable in the production environment. This document introduces how thread dumps are merged into the final report as a stack tree(s).\nThread analyst Read data and transform Read the data from the database and convert it to a data structure in gRPC.\nst=\u0026gt;start: Start e=\u0026gt;end: End op1=\u0026gt;operation: Load data using paging op2=\u0026gt;operation: Transform data using parallel st(right)-\u0026gt;op1(right)-\u0026gt;op2 op2(right)-\u0026gt;e Copy the code and paste it into this link to generate flow chart.\n Use the stream to read data by page (50 records per page). Convert the data into gRPC data structures in the form of parallel streams. Merge into a list of data.  Data analysis Use the group-by and collector modes in the Java parallel stream to group according to the first stack element in the database records, and use the collector to perform data aggregation. Generate a multi-root tree.\nst=\u0026gt;start: Start e=\u0026gt;end: End op1=\u0026gt;operation: Group by first stack element sup=\u0026gt;operation: Generate empty stack tree acc=\u0026gt;operation: Accumulator data to stack tree com=\u0026gt;operation: Combine stack trees fin=\u0026gt;operation: Calculate durations and build result st(right)-\u0026gt;op1-\u0026gt;sup(right)-\u0026gt;acc acc(right)-\u0026gt;com(right)-\u0026gt;fin-\u0026gt;e Copy the code and paste it into this link to generate a flow chart.\n Group by first stack element: Use the first level element in each stack to group, ensuring that the stacks have the same root node. Generate empty stack tree: Generate multiple top-level empty trees to prepare for the following steps. The reason for generating multiple top-level trees is that original data can be added in parallel without generating locks. Accumulator data to stack tree: Add every thread dump into the generated trees.  Iterate through each element in the thread dump to find if there is any child element with the same code signature and same stack depth in the parent element. If not, add this element. Keep the dump sequences and timestamps in each nodes from the source.   Combine stack trees: Combine all trees structures into one by using the same rules as the Accumulator.  Use LDR to traverse the tree node. Use the Stack data structure to avoid recursive calls. Each stack element represents the node that needs to be merged. The task of merging two nodes is to merge the list of children nodes. If they have the same code signature and same parents, save the dump sequences and timestamps in this node. Otherwise, the node needs to be added into the target node as a new child.   Calculate durations and build result: Calculate relevant statistics and generate response.  Use the same traversal node logic as in the Combine stack trees step. Convert to a GraphQL data structure, and put all nodes into a list for subsequent duration calculations. Calculate each node\u0026rsquo;s duration in parallel. For each node, sort the sequences. If there are two continuous sequences, the duration should add the duration of these two seq\u0026rsquo;s timestamp. Calculate each node execution in parallel. For each node, the duration of the current node should deduct the time consumed by all children.    Profile data debugging Please follow the exporter tool to package profile data. Unzip the profile data and use analyzer main function to run it.\n","excerpt":"Thread dump merging mechanism The performance profile is an enhancement feature in the APM system. …","ref":"/docs/main/next/en/setup/backend/backend-profile-thread-merging/","title":"Thread dump merging mechanism"},{"body":"Thread dump merging mechanism The performance profile is an enhancement feature in the APM system. We are using the thread dump to estimate the method execution time, rather than adding multiple local spans. In this way, the resource cost would be much less than using distributed tracing to locate slow method. This feature is suitable in the production environment. This document introduces how thread dumps are merged into the final report as a stack tree(s).\nThread analyst Read data and transform Read the data from the database and convert it to a data structure in gRPC.\nst=\u0026gt;start: Start e=\u0026gt;end: End op1=\u0026gt;operation: Load data using paging op2=\u0026gt;operation: Transform data using parallel st(right)-\u0026gt;op1(right)-\u0026gt;op2 op2(right)-\u0026gt;e Copy the code and paste it into this link to generate flow chart.\n Use the stream to read data by page (50 records per page). Convert the data into gRPC data structures in the form of parallel streams. Merge into a list of data.  Data analysis Use the group-by and collector modes in the Java parallel stream to group according to the first stack element in the database records, and use the collector to perform data aggregation. Generate a multi-root tree.\nst=\u0026gt;start: Start e=\u0026gt;end: End op1=\u0026gt;operation: Group by first stack element sup=\u0026gt;operation: Generate empty stack tree acc=\u0026gt;operation: Accumulator data to stack tree com=\u0026gt;operation: Combine stack trees fin=\u0026gt;operation: Calculate durations and build result st(right)-\u0026gt;op1-\u0026gt;sup(right)-\u0026gt;acc acc(right)-\u0026gt;com(right)-\u0026gt;fin-\u0026gt;e Copy the code and paste it into this link to generate a flow chart.\n Group by first stack element: Use the first level element in each stack to group, ensuring that the stacks have the same root node. Generate empty stack tree: Generate multiple top-level empty trees to prepare for the following steps. The reason for generating multiple top-level trees is that original data can be added in parallel without generating locks. Accumulator data to stack tree: Add every thread dump into the generated trees.  Iterate through each element in the thread dump to find if there is any child element with the same code signature and same stack depth in the parent element. If not, add this element. Keep the dump sequences and timestamps in each nodes from the source.   Combine stack trees: Combine all trees structures into one by using the same rules as the Accumulator.  Use LDR to traverse the tree node. Use the Stack data structure to avoid recursive calls. Each stack element represents the node that needs to be merged. The task of merging two nodes is to merge the list of children nodes. If they have the same code signature and same parents, save the dump sequences and timestamps in this node. Otherwise, the node needs to be added into the target node as a new child.   Calculate durations and build result: Calculate relevant statistics and generate response.  Use the same traversal node logic as in the Combine stack trees step. Convert to a GraphQL data structure, and put all nodes into a list for subsequent duration calculations. Calculate each node\u0026rsquo;s duration in parallel. For each node, sort the sequences. If there are two continuous sequences, the duration should add the duration of these two seq\u0026rsquo;s timestamp. Calculate each node execution in parallel. For each node, the duration of the current node should deduct the time consumed by all children.    Profile data debugging Please follow the exporter tool to package profile data. Unzip the profile data and use analyzer main function to run it.\n","excerpt":"Thread dump merging mechanism The performance profile is an enhancement feature in the APM system. …","ref":"/docs/main/v9.0.0/en/guides/backend-profile/","title":"Thread dump merging mechanism"},{"body":"Thread dump merging mechanism The performance profile is an enhancement feature in the APM system. We are using the thread dump to estimate the method execution time, rather than adding multiple local spans. In this way, the resource cost would be much less than using distributed tracing to locate slow method. This feature is suitable in the production environment. This document introduces how thread dumps are merged into the final report as a stack tree(s).\nThread analyst Read data and transform Read the data from the database and convert it to a data structure in gRPC.\nst=\u0026gt;start: Start e=\u0026gt;end: End op1=\u0026gt;operation: Load data using paging op2=\u0026gt;operation: Transform data using parallel st(right)-\u0026gt;op1(right)-\u0026gt;op2 op2(right)-\u0026gt;e Copy the code and paste it into this link to generate flow chart.\n Use the stream to read data by page (50 records per page). Convert the data into gRPC data structures in the form of parallel streams. Merge into a list of data.  Data analysis Use the group-by and collector modes in the Java parallel stream to group according to the first stack element in the database records, and use the collector to perform data aggregation. Generate a multi-root tree.\nst=\u0026gt;start: Start e=\u0026gt;end: End op1=\u0026gt;operation: Group by first stack element sup=\u0026gt;operation: Generate empty stack tree acc=\u0026gt;operation: Accumulator data to stack tree com=\u0026gt;operation: Combine stack trees fin=\u0026gt;operation: Calculate durations and build result st(right)-\u0026gt;op1-\u0026gt;sup(right)-\u0026gt;acc acc(right)-\u0026gt;com(right)-\u0026gt;fin-\u0026gt;e Copy the code and paste it into this link to generate a flow chart.\n Group by first stack element: Use the first level element in each stack to group, ensuring that the stacks have the same root node. Generate empty stack tree: Generate multiple top-level empty trees to prepare for the following steps. The reason for generating multiple top-level trees is that original data can be added in parallel without generating locks. Accumulator data to stack tree: Add every thread dump into the generated trees.  Iterate through each element in the thread dump to find if there is any child element with the same code signature and same stack depth in the parent element. If not, add this element. Keep the dump sequences and timestamps in each nodes from the source.   Combine stack trees: Combine all trees structures into one by using the same rules as the Accumulator.  Use LDR to traverse the tree node. Use the Stack data structure to avoid recursive calls. Each stack element represents the node that needs to be merged. The task of merging two nodes is to merge the list of children nodes. If they have the same code signature and same parents, save the dump sequences and timestamps in this node. Otherwise, the node needs to be added into the target node as a new child.   Calculate durations and build result: Calculate relevant statistics and generate response.  Use the same traversal node logic as in the Combine stack trees step. Convert to a GraphQL data structure, and put all nodes into a list for subsequent duration calculations. Calculate each node\u0026rsquo;s duration in parallel. For each node, sort the sequences. If there are two continuous sequences, the duration should add the duration of these two seq\u0026rsquo;s timestamp. Calculate each node execution in parallel. For each node, the duration of the current node should deduct the time consumed by all children.    Profile data debugging Please follow the exporter tool to package profile data. Unzip the profile data and use analyzer main function to run it.\n","excerpt":"Thread dump merging mechanism The performance profile is an enhancement feature in the APM system. …","ref":"/docs/main/v9.1.0/en/guides/backend-profile/","title":"Thread dump merging mechanism"},{"body":"Thread dump merging mechanism The performance profile is an enhancement feature in the APM system. We are using the thread dump to estimate the method execution time, rather than adding multiple local spans. In this way, the resource cost would be much less than using distributed tracing to locate slow method. This feature is suitable in the production environment. This document introduces how thread dumps are merged into the final report as a stack tree(s).\nThread analyst Read data and transform Read the data from the database and convert it to a data structure in gRPC.\nst=\u0026gt;start: Start e=\u0026gt;end: End op1=\u0026gt;operation: Load data using paging op2=\u0026gt;operation: Transform data using parallel st(right)-\u0026gt;op1(right)-\u0026gt;op2 op2(right)-\u0026gt;e Copy the code and paste it into this link to generate flow chart.\n Use the stream to read data by page (50 records per page). Convert the data into gRPC data structures in the form of parallel streams. Merge into a list of data.  Data analysis Use the group-by and collector modes in the Java parallel stream to group according to the first stack element in the database records, and use the collector to perform data aggregation. Generate a multi-root tree.\nst=\u0026gt;start: Start e=\u0026gt;end: End op1=\u0026gt;operation: Group by first stack element sup=\u0026gt;operation: Generate empty stack tree acc=\u0026gt;operation: Accumulator data to stack tree com=\u0026gt;operation: Combine stack trees fin=\u0026gt;operation: Calculate durations and build result st(right)-\u0026gt;op1-\u0026gt;sup(right)-\u0026gt;acc acc(right)-\u0026gt;com(right)-\u0026gt;fin-\u0026gt;e Copy the code and paste it into this link to generate a flow chart.\n Group by first stack element: Use the first level element in each stack to group, ensuring that the stacks have the same root node. Generate empty stack tree: Generate multiple top-level empty trees to prepare for the following steps. The reason for generating multiple top-level trees is that original data can be added in parallel without generating locks. Accumulator data to stack tree: Add every thread dump into the generated trees.  Iterate through each element in the thread dump to find if there is any child element with the same code signature and same stack depth in the parent element. If not, add this element. Keep the dump sequences and timestamps in each nodes from the source.   Combine stack trees: Combine all trees structures into one by using the same rules as the Accumulator.  Use LDR to traverse the tree node. Use the Stack data structure to avoid recursive calls. Each stack element represents the node that needs to be merged. The task of merging two nodes is to merge the list of children nodes. If they have the same code signature and same parents, save the dump sequences and timestamps in this node. Otherwise, the node needs to be added into the target node as a new child.   Calculate durations and build result: Calculate relevant statistics and generate response.  Use the same traversal node logic as in the Combine stack trees step. Convert to a GraphQL data structure, and put all nodes into a list for subsequent duration calculations. Calculate each node\u0026rsquo;s duration in parallel. For each node, sort the sequences. If there are two continuous sequences, the duration should add the duration of these two seq\u0026rsquo;s timestamp. Calculate each node execution in parallel. For each node, the duration of the current node should deduct the time consumed by all children.    Profile data debugging Please follow the exporter tool to package profile data. Unzip the profile data and use analyzer main function to run it.\n","excerpt":"Thread dump merging mechanism The performance profile is an enhancement feature in the APM system. …","ref":"/docs/main/v9.2.0/en/guides/backend-profile/","title":"Thread dump merging mechanism"},{"body":"Thread dump merging mechanism The performance profile is an enhancement feature in the APM system. We are using the thread dump to estimate the method execution time, rather than adding multiple local spans. In this way, the resource cost would be much less than using distributed tracing to locate slow method. This feature is suitable in the production environment. This document introduces how thread dumps are merged into the final report as a stack tree(s).\nThread analyst Read data and transform Read the data from the database and convert it to a data structure in gRPC.\nst=\u0026gt;start: Start e=\u0026gt;end: End op1=\u0026gt;operation: Load data using paging op2=\u0026gt;operation: Transform data using parallel st(right)-\u0026gt;op1(right)-\u0026gt;op2 op2(right)-\u0026gt;e Copy the code and paste it into this link to generate flow chart.\n Use the stream to read data by page (50 records per page). Convert the data into gRPC data structures in the form of parallel streams. Merge into a list of data.  Data analysis Use the group-by and collector modes in the Java parallel stream to group according to the first stack element in the database records, and use the collector to perform data aggregation. Generate a multi-root tree.\nst=\u0026gt;start: Start e=\u0026gt;end: End op1=\u0026gt;operation: Group by first stack element sup=\u0026gt;operation: Generate empty stack tree acc=\u0026gt;operation: Accumulator data to stack tree com=\u0026gt;operation: Combine stack trees fin=\u0026gt;operation: Calculate durations and build result st(right)-\u0026gt;op1-\u0026gt;sup(right)-\u0026gt;acc acc(right)-\u0026gt;com(right)-\u0026gt;fin-\u0026gt;e Copy the code and paste it into this link to generate a flow chart.\n Group by first stack element: Use the first level element in each stack to group, ensuring that the stacks have the same root node. Generate empty stack tree: Generate multiple top-level empty trees to prepare for the following steps. The reason for generating multiple top-level trees is that original data can be added in parallel without generating locks. Accumulator data to stack tree: Add every thread dump into the generated trees.  Iterate through each element in the thread dump to find if there is any child element with the same code signature and same stack depth in the parent element. If not, add this element. Keep the dump sequences and timestamps in each nodes from the source.   Combine stack trees: Combine all trees structures into one by using the same rules as the Accumulator.  Use LDR to traverse the tree node. Use the Stack data structure to avoid recursive calls. Each stack element represents the node that needs to be merged. The task of merging two nodes is to merge the list of children nodes. If they have the same code signature and same parents, save the dump sequences and timestamps in this node. Otherwise, the node needs to be added into the target node as a new child.   Calculate durations and build result: Calculate relevant statistics and generate response.  Use the same traversal node logic as in the Combine stack trees step. Convert to a GraphQL data structure, and put all nodes into a list for subsequent duration calculations. Calculate each node\u0026rsquo;s duration in parallel. For each node, sort the sequences. If there are two continuous sequences, the duration should add the duration of these two seq\u0026rsquo;s timestamp. Calculate each node execution in parallel. For each node, the duration of the current node should deduct the time consumed by all children.    Profile data debugging Please follow the exporter tool to package profile data. Unzip the profile data and use analyzer main function to run it.\n","excerpt":"Thread dump merging mechanism The performance profile is an enhancement feature in the APM system. …","ref":"/docs/main/v9.3.0/en/guides/backend-profile/","title":"Thread dump merging mechanism"},{"body":"Thread dump merging mechanism The performance profile is an enhancement feature in the APM system. We are using the thread dump to estimate the method execution time, rather than adding multiple local spans. In this way, the resource cost would be much less than using distributed tracing to locate slow method. This feature is suitable in the production environment. This document introduces how thread dumps are merged into the final report as a stack tree(s).\nThread analyst Read data and transform Read the data from the database and convert it to a data structure in gRPC.\nst=\u0026gt;start: Start e=\u0026gt;end: End op1=\u0026gt;operation: Load data using paging op2=\u0026gt;operation: Transform data using parallel st(right)-\u0026gt;op1(right)-\u0026gt;op2 op2(right)-\u0026gt;e Copy the code and paste it into this link to generate flow chart.\n Use the stream to read data by page (50 records per page). Convert the data into gRPC data structures in the form of parallel streams. Merge into a list of data.  Data analysis Use the group-by and collector modes in the Java parallel stream to group according to the first stack element in the database records, and use the collector to perform data aggregation. Generate a multi-root tree.\nst=\u0026gt;start: Start e=\u0026gt;end: End op1=\u0026gt;operation: Group by first stack element sup=\u0026gt;operation: Generate empty stack tree acc=\u0026gt;operation: Accumulator data to stack tree com=\u0026gt;operation: Combine stack trees fin=\u0026gt;operation: Calculate durations and build result st(right)-\u0026gt;op1-\u0026gt;sup(right)-\u0026gt;acc acc(right)-\u0026gt;com(right)-\u0026gt;fin-\u0026gt;e Copy the code and paste it into this link to generate a flow chart.\n Group by first stack element: Use the first level element in each stack to group, ensuring that the stacks have the same root node. Generate empty stack tree: Generate multiple top-level empty trees to prepare for the following steps. The reason for generating multiple top-level trees is that original data can be added in parallel without generating locks. Accumulator data to stack tree: Add every thread dump into the generated trees.  Iterate through each element in the thread dump to find if there is any child element with the same code signature and same stack depth in the parent element. If not, add this element. Keep the dump sequences and timestamps in each nodes from the source.   Combine stack trees: Combine all trees structures into one by using the same rules as the Accumulator.  Use LDR to traverse the tree node. Use the Stack data structure to avoid recursive calls. Each stack element represents the node that needs to be merged. The task of merging two nodes is to merge the list of children nodes. If they have the same code signature and same parents, save the dump sequences and timestamps in this node. Otherwise, the node needs to be added into the target node as a new child.   Calculate durations and build result: Calculate relevant statistics and generate response.  Use the same traversal node logic as in the Combine stack trees step. Convert to a GraphQL data structure, and put all nodes into a list for subsequent duration calculations. Calculate each node\u0026rsquo;s duration in parallel. For each node, sort the sequences. If there are two continuous sequences, the duration should add the duration of these two seq\u0026rsquo;s timestamp. Calculate each node execution in parallel. For each node, the duration of the current node should deduct the time consumed by all children.    Profile data debugging Please follow the exporter tool to package profile data. Unzip the profile data and use analyzer main function to run it.\n","excerpt":"Thread dump merging mechanism The performance profile is an enhancement feature in the APM system. …","ref":"/docs/main/v9.4.0/en/guides/backend-profile/","title":"Thread dump merging mechanism"},{"body":"Thread dump merging mechanism The performance profile is an enhancement feature in the APM system. We are using the thread dump to estimate the method execution time, rather than adding multiple local spans. In this way, the resource cost would be much less than using distributed tracing to locate slow method. This feature is suitable in the production environment. This document introduces how thread dumps are merged into the final report as a stack tree(s).\nThread analyst Read data and transform Read the data from the database and convert it to a data structure in gRPC.\nst=\u0026gt;start: Start e=\u0026gt;end: End op1=\u0026gt;operation: Load data using paging op2=\u0026gt;operation: Transform data using parallel st(right)-\u0026gt;op1(right)-\u0026gt;op2 op2(right)-\u0026gt;e Copy the code and paste it into this link to generate flow chart.\n Use the stream to read data by page (50 records per page). Convert the data into gRPC data structures in the form of parallel streams. Merge into a list of data.  Data analysis Use the group-by and collector modes in the Java parallel stream to group according to the first stack element in the database records, and use the collector to perform data aggregation. Generate a multi-root tree.\nst=\u0026gt;start: Start e=\u0026gt;end: End op1=\u0026gt;operation: Group by first stack element sup=\u0026gt;operation: Generate empty stack tree acc=\u0026gt;operation: Accumulator data to stack tree com=\u0026gt;operation: Combine stack trees fin=\u0026gt;operation: Calculate durations and build result st(right)-\u0026gt;op1-\u0026gt;sup(right)-\u0026gt;acc acc(right)-\u0026gt;com(right)-\u0026gt;fin-\u0026gt;e Copy the code and paste it into this link to generate a flow chart.\n Group by first stack element: Use the first level element in each stack to group, ensuring that the stacks have the same root node. Generate empty stack tree: Generate multiple top-level empty trees to prepare for the following steps. The reason for generating multiple top-level trees is that original data can be added in parallel without generating locks. Accumulator data to stack tree: Add every thread dump into the generated trees.  Iterate through each element in the thread dump to find if there is any child element with the same code signature and same stack depth in the parent element. If not, add this element. Keep the dump sequences and timestamps in each nodes from the source.   Combine stack trees: Combine all trees structures into one by using the same rules as the Accumulator.  Use LDR to traverse the tree node. Use the Stack data structure to avoid recursive calls. Each stack element represents the node that needs to be merged. The task of merging two nodes is to merge the list of children nodes. If they have the same code signature and same parents, save the dump sequences and timestamps in this node. Otherwise, the node needs to be added into the target node as a new child.   Calculate durations and build result: Calculate relevant statistics and generate response.  Use the same traversal node logic as in the Combine stack trees step. Convert to a GraphQL data structure, and put all nodes into a list for subsequent duration calculations. Calculate each node\u0026rsquo;s duration in parallel. For each node, sort the sequences. If there are two continuous sequences, the duration should add the duration of these two seq\u0026rsquo;s timestamp. Calculate each node execution in parallel. For each node, the duration of the current node should deduct the time consumed by all children.    Profile data debugging Please follow the exporter tool to package profile data. Unzip the profile data and use analyzer main function to run it.\n","excerpt":"Thread dump merging mechanism The performance profile is an enhancement feature in the APM system. …","ref":"/docs/main/v9.5.0/en/setup/backend/backend-profile-thread-merging/","title":"Thread dump merging mechanism"},{"body":"Thread dump merging mechanism The performance profile is an enhancement feature in the APM system. We are using the thread dump to estimate the method execution time, rather than adding multiple local spans. In this way, the resource cost would be much less than using distributed tracing to locate slow method. This feature is suitable in the production environment. This document introduces how thread dumps are merged into the final report as a stack tree(s).\nThread analyst Read data and transform Read the data from the database and convert it to a data structure in gRPC.\nst=\u0026gt;start: Start e=\u0026gt;end: End op1=\u0026gt;operation: Load data using paging op2=\u0026gt;operation: Transform data using parallel st(right)-\u0026gt;op1(right)-\u0026gt;op2 op2(right)-\u0026gt;e Copy the code and paste it into this link to generate flow chart.\n Use the stream to read data by page (50 records per page). Convert the data into gRPC data structures in the form of parallel streams. Merge into a list of data.  Data analysis Use the group-by and collector modes in the Java parallel stream to group according to the first stack element in the database records, and use the collector to perform data aggregation. Generate a multi-root tree.\nst=\u0026gt;start: Start e=\u0026gt;end: End op1=\u0026gt;operation: Group by first stack element sup=\u0026gt;operation: Generate empty stack tree acc=\u0026gt;operation: Accumulator data to stack tree com=\u0026gt;operation: Combine stack trees fin=\u0026gt;operation: Calculate durations and build result st(right)-\u0026gt;op1-\u0026gt;sup(right)-\u0026gt;acc acc(right)-\u0026gt;com(right)-\u0026gt;fin-\u0026gt;e Copy the code and paste it into this link to generate a flow chart.\n Group by first stack element: Use the first level element in each stack to group, ensuring that the stacks have the same root node. Generate empty stack tree: Generate multiple top-level empty trees to prepare for the following steps. The reason for generating multiple top-level trees is that original data can be added in parallel without generating locks. Accumulator data to stack tree: Add every thread dump into the generated trees.  Iterate through each element in the thread dump to find if there is any child element with the same code signature and same stack depth in the parent element. If not, add this element. Keep the dump sequences and timestamps in each nodes from the source.   Combine stack trees: Combine all trees structures into one by using the same rules as the Accumulator.  Use LDR to traverse the tree node. Use the Stack data structure to avoid recursive calls. Each stack element represents the node that needs to be merged. The task of merging two nodes is to merge the list of children nodes. If they have the same code signature and same parents, save the dump sequences and timestamps in this node. Otherwise, the node needs to be added into the target node as a new child.   Calculate durations and build result: Calculate relevant statistics and generate response.  Use the same traversal node logic as in the Combine stack trees step. Convert to a GraphQL data structure, and put all nodes into a list for subsequent duration calculations. Calculate each node\u0026rsquo;s duration in parallel. For each node, sort the sequences. If there are two continuous sequences, the duration should add the duration of these two seq\u0026rsquo;s timestamp. Calculate each node execution in parallel. For each node, the duration of the current node should deduct the time consumed by all children.    Profile data debugging Please follow the exporter tool to package profile data. Unzip the profile data and use analyzer main function to run it.\n","excerpt":"Thread dump merging mechanism The performance profile is an enhancement feature in the APM system. …","ref":"/docs/main/v9.6.0/en/setup/backend/backend-profile-thread-merging/","title":"Thread dump merging mechanism"},{"body":"Thread dump merging mechanism The performance profile is an enhancement feature in the APM system. We are using the thread dump to estimate the method execution time, rather than adding multiple local spans. In this way, the resource cost would be much less than using distributed tracing to locate slow method. This feature is suitable in the production environment. This document introduces how thread dumps are merged into the final report as a stack tree(s).\nThread analyst Read data and transform Read the data from the database and convert it to a data structure in gRPC.\nst=\u0026gt;start: Start e=\u0026gt;end: End op1=\u0026gt;operation: Load data using paging op2=\u0026gt;operation: Transform data using parallel st(right)-\u0026gt;op1(right)-\u0026gt;op2 op2(right)-\u0026gt;e Copy the code and paste it into this link to generate flow chart.\n Use the stream to read data by page (50 records per page). Convert the data into gRPC data structures in the form of parallel streams. Merge into a list of data.  Data analysis Use the group-by and collector modes in the Java parallel stream to group according to the first stack element in the database records, and use the collector to perform data aggregation. Generate a multi-root tree.\nst=\u0026gt;start: Start e=\u0026gt;end: End op1=\u0026gt;operation: Group by first stack element sup=\u0026gt;operation: Generate empty stack tree acc=\u0026gt;operation: Accumulator data to stack tree com=\u0026gt;operation: Combine stack trees fin=\u0026gt;operation: Calculate durations and build result st(right)-\u0026gt;op1-\u0026gt;sup(right)-\u0026gt;acc acc(right)-\u0026gt;com(right)-\u0026gt;fin-\u0026gt;e Copy the code and paste it into this link to generate a flow chart.\n Group by first stack element: Use the first level element in each stack to group, ensuring that the stacks have the same root node. Generate empty stack tree: Generate multiple top-level empty trees to prepare for the following steps. The reason for generating multiple top-level trees is that original data can be added in parallel without generating locks. Accumulator data to stack tree: Add every thread dump into the generated trees.  Iterate through each element in the thread dump to find if there is any child element with the same code signature and same stack depth in the parent element. If not, add this element. Keep the dump sequences and timestamps in each nodes from the source.   Combine stack trees: Combine all trees structures into one by using the same rules as the Accumulator.  Use LDR to traverse the tree node. Use the Stack data structure to avoid recursive calls. Each stack element represents the node that needs to be merged. The task of merging two nodes is to merge the list of children nodes. If they have the same code signature and same parents, save the dump sequences and timestamps in this node. Otherwise, the node needs to be added into the target node as a new child.   Calculate durations and build result: Calculate relevant statistics and generate response.  Use the same traversal node logic as in the Combine stack trees step. Convert to a GraphQL data structure, and put all nodes into a list for subsequent duration calculations. Calculate each node\u0026rsquo;s duration in parallel. For each node, sort the sequences. If there are two continuous sequences, the duration should add the duration of these two seq\u0026rsquo;s timestamp. Calculate each node execution in parallel. For each node, the duration of the current node should deduct the time consumed by all children.    Profile data debugging Please follow the exporter tool to package profile data. Unzip the profile data and use analyzer main function to run it.\n","excerpt":"Thread dump merging mechanism The performance profile is an enhancement feature in the APM system. …","ref":"/docs/main/v9.7.0/en/setup/backend/backend-profile-thread-merging/","title":"Thread dump merging mechanism"},{"body":"TimeSeries Database(TSDB) TSDB is a time-series storage engine designed to store and query large volumes of time-series data. One of the key features of TSDB is its ability to automatically manage data storage over time, optimize performance and ensure that the system can scale to handle large workloads. TSDB empowers Measure and Stream relevant data.\nShard In TSDB, the data in a group is partitioned into shards based on a configurable sharding scheme. Each shard is assigned to a specific set of storage nodes, and those nodes store and process the data within that shard. This allows BanyanDB to scale horizontally by adding more storage nodes to the cluster as needed.\nshard\n Buffer: It is typically implemented as an in-memory queue managed by a shard. When new time-series data is ingested into the system, it is added to the end of the queue, and when the buffer reaches a specific size, the data is flushed to disk in batches. SST: When a bucket of buffer becomes full or reaches a certain size threshold, it is flushed to disk as a new Sorted String Table (SST) file. This process is known as compaction. Segments and Blocks: Time-series data is stored in data segments/blocks within each shard. Blocks contain a fixed number of data points and are organized into time windows. Each data segment includes an index that efficiently retrieves data within the block. Block Cache: It manages the in-memory cache of data blocks, improving query performance by caching frequently accessed data blocks in memory.  Write Path The write path of TSDB begins when time-series data is ingested into the system. TSDB will consult the schema repository to check if the group exists, and if it does, then it will hash the SeriesID to determine which shard it belongs to.\nEach shard in TSDB is responsible for storing a subset of the time-series data, and it uses a write-ahead log to record incoming writes in a durable and fault-tolerant manner. The shard also holds an in-memory index allowing fast lookups of time-series data.\nWhen a shard receives a write request, the data is written to the buffer as a series of buckets. Each bucket is a fixed-size chunk of time-series data typically configured to be several minutes or hours long. As new data is written to the buffer, it is appended to the current bucket until it is full. Once the bucket is full, it is closed, and a new bucket is created to continue buffering writes.\nOnce a bucket is closed, it is stored as a single SST in a shard. The file is indexed and added to the index for the corresponding time range and resolution.\nRead Path The read path in TSDB retrieves time-series data from disk or memory and returns it to the query engine. The read path comprises several components: the buffer, cache, and SST file. The following is a high-level overview of how these components work together to retrieve time-series data in TSDB.\nThe first step in the read path is to perform an index lookup to determine which blocks contain the desired time range. The index contains metadata about each data block, including its start and end time and its location on disk.\nIf the requested data is present in the buffer (i.e., it has been recently written but not yet persisted to disk), the buffer is checked to see if the data can be returned directly from memory. The read path determines which bucket(s) contain the requested time range. If the data is not present in the buffer, the read path proceeds to the next step.\nIf the requested data is present in the cache (i.e., it has been recently read from disk and is still in memory), it is checked to see if the data can be returned directly from memory. The read path proceeds to the next step if the data is not in the cache.\nThe final step in the read path is to look up the appropriate SST file on disk. Files are the on-disk representation of data blocks and are organized by shard and time range. The read path determines which SST files contain the requested time range and reads the appropriate data blocks from the disk.\n","excerpt":"TimeSeries Database(TSDB) TSDB is a time-series storage engine designed to store and query large …","ref":"/docs/skywalking-banyandb/latest/concept/tsdb/","title":"TimeSeries Database(TSDB)"},{"body":"TimeSeries Database(TSDB) TSDB is a time-series storage engine designed to store and query large volumes of time-series data. One of the key features of TSDB is its ability to automatically manage data storage over time, optimize performance and ensure that the system can scale to handle large workloads. TSDB empowers Measure and Stream relevant data.\nShard In TSDB, the data in a group is partitioned into shards based on a configurable sharding scheme. Each shard is assigned to a specific set of storage nodes, and those nodes store and process the data within that shard. This allows BanyanDB to scale horizontally by adding more storage nodes to the cluster as needed.\nWithin each shard, data is stored in different segments based on time ranges. The series indexes are generated based on entities, and the indexes generated based on indexing rules of the Measure types are also stored under the shard.\nSegment Each segment is composed of multiple parts. Whenever SkyWalking sends a batch of data, BanyanDB writes this batch of data into a new part. For data of the Stream type, the inverted indexes generated based on the indexing rules are also stored in the segment. Since BanyanDB adopts a snapshot approach for data read and write operations, the segment also needs to maintain additional snapshot information to record the validity of the parts.\nPart Within a part, data is split into multiple files in a columnar manner. The timestamps are stored in the timestamps.bin file, tags are organized in persistent tag families as various files with the .tf suffix, and fields are stored separately in the fields.bin file.\nIn addition, each part maintains several metadata files. Among them, metadata.json is the metadata file for the part, storing descriptive information, such as start and end times, part size, etc.\nThe meta.bin is a skipping index file that serves as the entry file for the entire part, helping to index the primary.bin file.\nThe primary.bin file contains the index of each block. Through it, the actual data files or the tagFamily metadata files ending with .tfm can be indexed, which in turn helps to locate the data in blocks.\nNotably, for data of the Stream type, since there are no field columns, the fields.bin file does not exist, while the rest of the structure is entirely consistent with the Measure type.\nBlock Each block holds data with the same series ID. The max size of the measure block is controlled by data volume and the number of rows. Meanwhile, the max size of the stream block is controlled by data volume. The diagram below shows the detailed fields within each block. The block is the minimal unit of TSDB, which contains several rows of data. Due to the column-based design, each block is spread over several files.\nWrite Path The write path of TSDB begins when time-series data is ingested into the system. TSDB will consult the schema repository to check if the group exists, and if it does, then it will hash the SeriesID to determine which shard it belongs to.\nEach shard in TSDB is responsible for storing a subset of the time-series data. The shard also holds an in-memory index allowing fast lookups of time-series data.\nWhen a shard receives a write request, the data is written to the buffer as a memory part. Meanwhile, the series index and inverted index will also be updated. The worker in the background periodically flushes data, writing the memory part to the disk. After the flush operation is completed, it triggers a merge operation to combine the parts and remove invalid data.\nWhenever a new memory part is generated, or when a flush or merge operation is triggered, they initiate an update of the snapshot and delete outdated snapshots. The parts in a persistent snapshot could be accessible to the reader.\nRead Path The read path in TSDB retrieves time-series data from disk or memory, and returns it to the query engine. The read path comprises several components: the buffer and parts. The following is a high-level overview of how these components work together to retrieve time-series data in TSDB.\nThe first step in the read path is to perform an index lookup to determine which parts contain the desired time range. The index contains metadata about each data part, including its start and end time.\nIf the requested data is present in the buffer (i.e., it has been recently written but not yet persisted to disk), the buffer is checked to see if the data can be returned directly from memory. The read path determines which memory part(s) contain the requested time range. If the data is not present in the buffer, the read path proceeds to the next step.\nThe next step in the read path is to look up the appropriate parts on disk. Files are the on-disk representation of blocks and are organized by shard and time range. The read path determines which parts contain the requested time range and reads the appropriate blocks from the disk. Due to the column-based storage design, it may be necessary to read multiple data files.\n","excerpt":"TimeSeries Database(TSDB) TSDB is a time-series storage engine designed to store and query large …","ref":"/docs/skywalking-banyandb/next/concept/tsdb/","title":"TimeSeries Database(TSDB)"},{"body":"TimeSeries Database(TSDB) TSDB is a time-series storage engine designed to store and query large volumes of time-series data. One of the key features of TSDB is its ability to automatically manage data storage over time, optimize performance and ensure that the system can scale to handle large workloads. TSDB empowers Measure and Stream relevant data.\nShard In TSDB, the data in a group is partitioned into shards based on a configurable sharding scheme. Each shard is assigned to a specific set of storage nodes, and those nodes store and process the data within that shard. This allows BanyanDB to scale horizontally by adding more storage nodes to the cluster as needed.\nshard\n Buffer: It is typically implemented as an in-memory queue managed by a shard. When new time-series data is ingested into the system, it is added to the end of the queue, and when the buffer reaches a specific size, the data is flushed to disk in batches. SST: When a bucket of buffer becomes full or reaches a certain size threshold, it is flushed to disk as a new Sorted String Table (SST) file. This process is known as compaction. Segments and Blocks: Time-series data is stored in data segments/blocks within each shard. Blocks contain a fixed number of data points and are organized into time windows. Each data segment includes an index that efficiently retrieves data within the block. Block Cache: It manages the in-memory cache of data blocks, improving query performance by caching frequently accessed data blocks in memory.  Write Path The write path of TSDB begins when time-series data is ingested into the system. TSDB will consult the schema repository to check if the group exists, and if it does, then it will hash the SeriesID to determine which shard it belongs to.\nEach shard in TSDB is responsible for storing a subset of the time-series data, and it uses a write-ahead log to record incoming writes in a durable and fault-tolerant manner. The shard also holds an in-memory index allowing fast lookups of time-series data.\nWhen a shard receives a write request, the data is written to the buffer as a series of buckets. Each bucket is a fixed-size chunk of time-series data typically configured to be several minutes or hours long. As new data is written to the buffer, it is appended to the current bucket until it is full. Once the bucket is full, it is closed, and a new bucket is created to continue buffering writes.\nOnce a bucket is closed, it is stored as a single SST in a shard. The file is indexed and added to the index for the corresponding time range and resolution.\nRead Path The read path in TSDB retrieves time-series data from disk or memory and returns it to the query engine. The read path comprises several components: the buffer, cache, and SST file. The following is a high-level overview of how these components work together to retrieve time-series data in TSDB.\nThe first step in the read path is to perform an index lookup to determine which blocks contain the desired time range. The index contains metadata about each data block, including its start and end time and its location on disk.\nIf the requested data is present in the buffer (i.e., it has been recently written but not yet persisted to disk), the buffer is checked to see if the data can be returned directly from memory. The read path determines which bucket(s) contain the requested time range. If the data is not present in the buffer, the read path proceeds to the next step.\nIf the requested data is present in the cache (i.e., it has been recently read from disk and is still in memory), it is checked to see if the data can be returned directly from memory. The read path proceeds to the next step if the data is not in the cache.\nThe final step in the read path is to look up the appropriate SST file on disk. Files are the on-disk representation of data blocks and are organized by shard and time range. The read path determines which SST files contain the requested time range and reads the appropriate data blocks from the disk.\n","excerpt":"TimeSeries Database(TSDB) TSDB is a time-series storage engine designed to store and query large …","ref":"/docs/skywalking-banyandb/v0.5.0/concept/tsdb/","title":"TimeSeries Database(TSDB)"},{"body":"Welcome to Apache SkyWalking Cloud on Kubernetes Document Repository Here you can lean all you need to know about Apache SkyWalking Cloud on Kubernetes(SWCK). This repository provides how to onboard and customize the agent injector, operator and adapter.\n Design. Some materials regarding the design decision under the hood. Setup. Several instruments to onboard the agent injector, operator and adapter. Examples. A number of examples of how to use SWCK.  We\u0026rsquo;re always looking for help to improve our documentation and codes, so please don’t hesitate to file an issue if you see any problems. Or better yet, directly contribute by submitting a pull request to help us get better!\n","excerpt":"Welcome to Apache SkyWalking Cloud on Kubernetes Document Repository Here you can lean all you need …","ref":"/docs/skywalking-swck/latest/readme/","title":"to Apache SkyWalking Cloud on Kubernetes Document Repository"},{"body":"Welcome to Apache SkyWalking Cloud on Kubernetes Document Repository Here you can lean all you need to know about Apache SkyWalking Cloud on Kubernetes(SWCK). This repository provides how to onboard and customize the agent injector, operator and adapter.\n Design. Some materials regarding the design decision under the hood. Setup. Several instruments to onboard the agent injector, operator and adapter. Examples. A number of examples of how to use SWCK.  We\u0026rsquo;re always looking for help to improve our documentation and codes, so please don’t hesitate to file an issue if you see any problems. Or better yet, directly contribute by submitting a pull request to help us get better!\n","excerpt":"Welcome to Apache SkyWalking Cloud on Kubernetes Document Repository Here you can lean all you need …","ref":"/docs/skywalking-swck/next/readme/","title":"to Apache SkyWalking Cloud on Kubernetes Document Repository"},{"body":"Welcome to Apache SkyWalking Cloud on Kubernetes Document Repository Here you can lean all you need to know about Apache SkyWalking Cloud on Kubernetes(SWCK). This repository provides how to onboard and customize the agent injector, operator and adapter.\n Design. Some materials regarding the design decision under the hood. Setup. Several instruments to onboard the agent injector, operator and adapter. Examples. A number of examples of how to use SWCK.  We\u0026rsquo;re always looking for help to improve our documentation and codes, so please don’t hesitate to file an issue if you see any problems. Or better yet, directly contribute by submitting a pull request to help us get better!\n","excerpt":"Welcome to Apache SkyWalking Cloud on Kubernetes Document Repository Here you can lean all you need …","ref":"/docs/skywalking-swck/v0.9.0/readme/","title":"to Apache SkyWalking Cloud on Kubernetes Document Repository"},{"body":"Token Authentication Supported version 7.0.0+\nWhy do we need token authentication after TLS? TLS is about transport security, ensuring a trusted network. On the other hand, token authentication is about monitoring whether application data can be trusted.\nToken In the current version, a token is considered a simple string.\nSet Token  Set token in agent.config file  # Authentication active is based on backend setting, see application.yml for more details. agent.authentication = ${SW_AGENT_AUTHENTICATION:xxxx} Set token in application.yml file  ······receiver-sharing-server:default:authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}······Authentication failure The Skywalking OAP verifies every request from the agent and only allows requests whose token matches the one configured in application.yml to pass through.\nIf the token does not match, you will see the following log in the agent:\norg.apache.skywalking.apm.dependencies.io.grpc.StatusRuntimeException: PERMISSION_DENIED FAQ Can I use token authentication instead of TLS? No, you shouldn\u0026rsquo;t. Of course, it\u0026rsquo;s technically possible, but token and TLS are used for untrusted network environments. In these circumstances, TLS has a higher priority. Tokens can be trusted only under TLS protection, and they can be easily stolen if sent through a non-TLS network.\nDo you support other authentication mechanisms, such as ak/sk? Not for now. But we welcome contributions to this feature.\n","excerpt":"Token Authentication Supported version 7.0.0+\nWhy do we need token authentication after TLS? TLS is …","ref":"/docs/main/latest/en/setup/backend/backend-token-auth/","title":"Token Authentication"},{"body":"Token Authentication Supported version 7.0.0+\nWhy do we need token authentication after TLS? TLS is about transport security, ensuring a trusted network. On the other hand, token authentication is about monitoring whether application data can be trusted.\nToken In the current version, a token is considered a simple string.\nSet Token  Set token in agent.config file  # Authentication active is based on backend setting, see application.yml for more details. agent.authentication = ${SW_AGENT_AUTHENTICATION:xxxx} Set token in application.yml file  ······receiver-sharing-server:default:authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}······Authentication failure The Skywalking OAP verifies every request from the agent and only allows requests whose token matches the one configured in application.yml to pass through.\nIf the token does not match, you will see the following log in the agent:\norg.apache.skywalking.apm.dependencies.io.grpc.StatusRuntimeException: PERMISSION_DENIED FAQ Can I use token authentication instead of TLS? No, you shouldn\u0026rsquo;t. Of course, it\u0026rsquo;s technically possible, but token and TLS are used for untrusted network environments. In these circumstances, TLS has a higher priority. Tokens can be trusted only under TLS protection, and they can be easily stolen if sent through a non-TLS network.\nDo you support other authentication mechanisms, such as ak/sk? Not for now. But we welcome contributions to this feature.\n","excerpt":"Token Authentication Supported version 7.0.0+\nWhy do we need token authentication after TLS? TLS is …","ref":"/docs/main/next/en/setup/backend/backend-token-auth/","title":"Token Authentication"},{"body":"Token Authentication Supported version 7.0.0+\nWhy do we need token authentication after TLS? TLS is about transport security, which makes sure that a network can be trusted. On the other hand, token authentication is about monitoring whether application data can be trusted.\nToken In the current version, token is considered a simple string.\nSet Token  Set token in agent.config file  # Authentication active is based on backend setting, see application.yml for more details. agent.authentication = ${SW_AGENT_AUTHENTICATION:xxxx} Set token in application.yml file  ······receiver-sharing-server:default:authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}······Authentication failure The Skywalking OAP verifies every request from the agent, and only allows requests whose token matches the one configured in application.yml to pass through.\nIf the token does not match, you will see the following log in the agent:\norg.apache.skywalking.apm.dependencies.io.grpc.StatusRuntimeException: PERMISSION_DENIED FAQ Can I use token authentication instead of TLS? No, you shouldn\u0026rsquo;t. Of course it\u0026rsquo;s technically possible, but token and TLS are used for untrusted network environments. In these circumstances, TLS has a higher priority. Tokens can be trusted only under TLS protection, and they can be easily stolen if sent through a non-TLS network.\nDo you support other authentication mechanisms, such as ak/sk? Not for now. But we welcome contributions on this feature.\n","excerpt":"Token Authentication Supported version 7.0.0+\nWhy do we need token authentication after TLS? TLS is …","ref":"/docs/main/v9.0.0/en/setup/backend/backend-token-auth/","title":"Token Authentication"},{"body":"Token Authentication Supported version 7.0.0+\nWhy do we need token authentication after TLS? TLS is about transport security, ensuring a trusted network. On the other hand, token authentication is about monitoring whether application data can be trusted.\nToken In the current version, a token is considered a simple string.\nSet Token  Set token in agent.config file  # Authentication active is based on backend setting, see application.yml for more details. agent.authentication = ${SW_AGENT_AUTHENTICATION:xxxx} Set token in application.yml file  ······receiver-sharing-server:default:authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}······Authentication failure The Skywalking OAP verifies every request from the agent and only allows requests whose token matches the one configured in application.yml to pass through.\nIf the token does not match, you will see the following log in the agent:\norg.apache.skywalking.apm.dependencies.io.grpc.StatusRuntimeException: PERMISSION_DENIED FAQ Can I use token authentication instead of TLS? No, you shouldn\u0026rsquo;t. Of course, it\u0026rsquo;s technically possible, but token and TLS are used for untrusted network environments. In these circumstances, TLS has a higher priority. Tokens can be trusted only under TLS protection, and they can be easily stolen if sent through a non-TLS network.\nDo you support other authentication mechanisms, such as ak/sk? Not for now. But we welcome contributions to this feature.\n","excerpt":"Token Authentication Supported version 7.0.0+\nWhy do we need token authentication after TLS? TLS is …","ref":"/docs/main/v9.1.0/en/setup/backend/backend-token-auth/","title":"Token Authentication"},{"body":"Token Authentication Supported version 7.0.0+\nWhy do we need token authentication after TLS? TLS is about transport security, ensuring a trusted network. On the other hand, token authentication is about monitoring whether application data can be trusted.\nToken In the current version, a token is considered a simple string.\nSet Token  Set token in agent.config file  # Authentication active is based on backend setting, see application.yml for more details. agent.authentication = ${SW_AGENT_AUTHENTICATION:xxxx} Set token in application.yml file  ······receiver-sharing-server:default:authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}······Authentication failure The Skywalking OAP verifies every request from the agent and only allows requests whose token matches the one configured in application.yml to pass through.\nIf the token does not match, you will see the following log in the agent:\norg.apache.skywalking.apm.dependencies.io.grpc.StatusRuntimeException: PERMISSION_DENIED FAQ Can I use token authentication instead of TLS? No, you shouldn\u0026rsquo;t. Of course, it\u0026rsquo;s technically possible, but token and TLS are used for untrusted network environments. In these circumstances, TLS has a higher priority. Tokens can be trusted only under TLS protection, and they can be easily stolen if sent through a non-TLS network.\nDo you support other authentication mechanisms, such as ak/sk? Not for now. But we welcome contributions to this feature.\n","excerpt":"Token Authentication Supported version 7.0.0+\nWhy do we need token authentication after TLS? TLS is …","ref":"/docs/main/v9.2.0/en/setup/backend/backend-token-auth/","title":"Token Authentication"},{"body":"Token Authentication Supported version 7.0.0+\nWhy do we need token authentication after TLS? TLS is about transport security, ensuring a trusted network. On the other hand, token authentication is about monitoring whether application data can be trusted.\nToken In the current version, a token is considered a simple string.\nSet Token  Set token in agent.config file  # Authentication active is based on backend setting, see application.yml for more details. agent.authentication = ${SW_AGENT_AUTHENTICATION:xxxx} Set token in application.yml file  ······receiver-sharing-server:default:authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}······Authentication failure The Skywalking OAP verifies every request from the agent and only allows requests whose token matches the one configured in application.yml to pass through.\nIf the token does not match, you will see the following log in the agent:\norg.apache.skywalking.apm.dependencies.io.grpc.StatusRuntimeException: PERMISSION_DENIED FAQ Can I use token authentication instead of TLS? No, you shouldn\u0026rsquo;t. Of course, it\u0026rsquo;s technically possible, but token and TLS are used for untrusted network environments. In these circumstances, TLS has a higher priority. Tokens can be trusted only under TLS protection, and they can be easily stolen if sent through a non-TLS network.\nDo you support other authentication mechanisms, such as ak/sk? Not for now. But we welcome contributions to this feature.\n","excerpt":"Token Authentication Supported version 7.0.0+\nWhy do we need token authentication after TLS? TLS is …","ref":"/docs/main/v9.3.0/en/setup/backend/backend-token-auth/","title":"Token Authentication"},{"body":"Token Authentication Supported version 7.0.0+\nWhy do we need token authentication after TLS? TLS is about transport security, ensuring a trusted network. On the other hand, token authentication is about monitoring whether application data can be trusted.\nToken In the current version, a token is considered a simple string.\nSet Token  Set token in agent.config file  # Authentication active is based on backend setting, see application.yml for more details. agent.authentication = ${SW_AGENT_AUTHENTICATION:xxxx} Set token in application.yml file  ······receiver-sharing-server:default:authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}······Authentication failure The Skywalking OAP verifies every request from the agent and only allows requests whose token matches the one configured in application.yml to pass through.\nIf the token does not match, you will see the following log in the agent:\norg.apache.skywalking.apm.dependencies.io.grpc.StatusRuntimeException: PERMISSION_DENIED FAQ Can I use token authentication instead of TLS? No, you shouldn\u0026rsquo;t. Of course, it\u0026rsquo;s technically possible, but token and TLS are used for untrusted network environments. In these circumstances, TLS has a higher priority. Tokens can be trusted only under TLS protection, and they can be easily stolen if sent through a non-TLS network.\nDo you support other authentication mechanisms, such as ak/sk? Not for now. But we welcome contributions to this feature.\n","excerpt":"Token Authentication Supported version 7.0.0+\nWhy do we need token authentication after TLS? TLS is …","ref":"/docs/main/v9.4.0/en/setup/backend/backend-token-auth/","title":"Token Authentication"},{"body":"Token Authentication Supported version 7.0.0+\nWhy do we need token authentication after TLS? TLS is about transport security, ensuring a trusted network. On the other hand, token authentication is about monitoring whether application data can be trusted.\nToken In the current version, a token is considered a simple string.\nSet Token  Set token in agent.config file  # Authentication active is based on backend setting, see application.yml for more details. agent.authentication = ${SW_AGENT_AUTHENTICATION:xxxx} Set token in application.yml file  ······receiver-sharing-server:default:authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}······Authentication failure The Skywalking OAP verifies every request from the agent and only allows requests whose token matches the one configured in application.yml to pass through.\nIf the token does not match, you will see the following log in the agent:\norg.apache.skywalking.apm.dependencies.io.grpc.StatusRuntimeException: PERMISSION_DENIED FAQ Can I use token authentication instead of TLS? No, you shouldn\u0026rsquo;t. Of course, it\u0026rsquo;s technically possible, but token and TLS are used for untrusted network environments. In these circumstances, TLS has a higher priority. Tokens can be trusted only under TLS protection, and they can be easily stolen if sent through a non-TLS network.\nDo you support other authentication mechanisms, such as ak/sk? Not for now. But we welcome contributions to this feature.\n","excerpt":"Token Authentication Supported version 7.0.0+\nWhy do we need token authentication after TLS? TLS is …","ref":"/docs/main/v9.5.0/en/setup/backend/backend-token-auth/","title":"Token Authentication"},{"body":"Token Authentication Supported version 7.0.0+\nWhy do we need token authentication after TLS? TLS is about transport security, ensuring a trusted network. On the other hand, token authentication is about monitoring whether application data can be trusted.\nToken In the current version, a token is considered a simple string.\nSet Token  Set token in agent.config file  # Authentication active is based on backend setting, see application.yml for more details. agent.authentication = ${SW_AGENT_AUTHENTICATION:xxxx} Set token in application.yml file  ······receiver-sharing-server:default:authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}······Authentication failure The Skywalking OAP verifies every request from the agent and only allows requests whose token matches the one configured in application.yml to pass through.\nIf the token does not match, you will see the following log in the agent:\norg.apache.skywalking.apm.dependencies.io.grpc.StatusRuntimeException: PERMISSION_DENIED FAQ Can I use token authentication instead of TLS? No, you shouldn\u0026rsquo;t. Of course, it\u0026rsquo;s technically possible, but token and TLS are used for untrusted network environments. In these circumstances, TLS has a higher priority. Tokens can be trusted only under TLS protection, and they can be easily stolen if sent through a non-TLS network.\nDo you support other authentication mechanisms, such as ak/sk? Not for now. But we welcome contributions to this feature.\n","excerpt":"Token Authentication Supported version 7.0.0+\nWhy do we need token authentication after TLS? TLS is …","ref":"/docs/main/v9.6.0/en/setup/backend/backend-token-auth/","title":"Token Authentication"},{"body":"Token Authentication Supported version 7.0.0+\nWhy do we need token authentication after TLS? TLS is about transport security, ensuring a trusted network. On the other hand, token authentication is about monitoring whether application data can be trusted.\nToken In the current version, a token is considered a simple string.\nSet Token  Set token in agent.config file  # Authentication active is based on backend setting, see application.yml for more details. agent.authentication = ${SW_AGENT_AUTHENTICATION:xxxx} Set token in application.yml file  ······receiver-sharing-server:default:authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}······Authentication failure The Skywalking OAP verifies every request from the agent and only allows requests whose token matches the one configured in application.yml to pass through.\nIf the token does not match, you will see the following log in the agent:\norg.apache.skywalking.apm.dependencies.io.grpc.StatusRuntimeException: PERMISSION_DENIED FAQ Can I use token authentication instead of TLS? No, you shouldn\u0026rsquo;t. Of course, it\u0026rsquo;s technically possible, but token and TLS are used for untrusted network environments. In these circumstances, TLS has a higher priority. Tokens can be trusted only under TLS protection, and they can be easily stolen if sent through a non-TLS network.\nDo you support other authentication mechanisms, such as ak/sk? Not for now. But we welcome contributions to this feature.\n","excerpt":"Token Authentication Supported version 7.0.0+\nWhy do we need token authentication after TLS? TLS is …","ref":"/docs/main/v9.7.0/en/setup/backend/backend-token-auth/","title":"Token Authentication"},{"body":"Token Authentication Token In current version, Token is considered as a simple string.\nSet Token Set token in agent.config file\n# Authentication active is based on backend setting, see application.yml for more details. agent.authentication = xxxx Meanwhile, open the backend token authentication.\nAuthentication fails The Collector verifies every request from agent, allowed only the token match.\nIf the token is not right, you will see the following log in agent\norg.apache.skywalking.apm.dependencies.io.grpc.StatusRuntimeException: PERMISSION_DENIED FAQ Can I use token authentication instead of TLS? No, you shouldn\u0026rsquo;t. In tech way, you can of course, but token and TLS are used for untrusted network env. In that circumstance, TLS has higher priority than this. Token can be trusted only under TLS protection.Token can be stolen easily if you send it through a non-TLS network.\nDo you support other authentication mechanisms? Such as ak/sk? For now, no. But we appreciate someone contributes this feature.\n","excerpt":"Token Authentication Token In current version, Token is considered as a simple string.\nSet Token Set …","ref":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/token-auth/","title":"Token Authentication"},{"body":"Token Authentication Token In current version, Token is considered as a simple string.\nSet Token Set token in agent.config file\n# Authentication active is based on backend setting, see application.yml for more details. agent.authentication = xxxx Meanwhile, open the backend token authentication.\nAuthentication fails The Collector verifies every request from agent, allowed only the token match.\nIf the token is not right, you will see the following log in agent\norg.apache.skywalking.apm.dependencies.io.grpc.StatusRuntimeException: PERMISSION_DENIED FAQ Can I use token authentication instead of TLS? No, you shouldn\u0026rsquo;t. In tech way, you can of course, but token and TLS are used for untrusted network env. In that circumstance, TLS has higher priority than this. Token can be trusted only under TLS protection.Token can be stolen easily if you send it through a non-TLS network.\nDo you support other authentication mechanisms? Such as ak/sk? For now, no. But we appreciate someone contributes this feature.\n","excerpt":"Token Authentication Token In current version, Token is considered as a simple string.\nSet Token Set …","ref":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/token-auth/","title":"Token Authentication"},{"body":"Token Authentication Token In current version, Token is considered as a simple string.\nSet Token Set token in agent.config file\n# Authentication active is based on backend setting, see application.yml for more details. agent.authentication = xxxx Meanwhile, open the backend token authentication.\nAuthentication fails The Collector verifies every request from agent, allowed only the token match.\nIf the token is not right, you will see the following log in agent\norg.apache.skywalking.apm.dependencies.io.grpc.StatusRuntimeException: PERMISSION_DENIED FAQ Can I use token authentication instead of TLS? No, you shouldn\u0026rsquo;t. In tech way, you can of course, but token and TLS are used for untrusted network env. In that circumstance, TLS has higher priority than this. Token can be trusted only under TLS protection.Token can be stolen easily if you send it through a non-TLS network.\nDo you support other authentication mechanisms? Such as ak/sk? For now, no. But we appreciate someone contributes this feature.\n","excerpt":"Token Authentication Token In current version, Token is considered as a simple string.\nSet Token Set …","ref":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/token-auth/","title":"Token Authentication"},{"body":"Token Authentication Token In current version, Token is considered as a simple string.\nSet Token Set token in agent.config file\n# Authentication active is based on backend setting, see application.yml for more details. agent.authentication = xxxx Meanwhile, open the backend token authentication.\nAuthentication fails The Collector verifies every request from agent, allowed only the token match.\nIf the token is not right, you will see the following log in agent\norg.apache.skywalking.apm.dependencies.io.grpc.StatusRuntimeException: PERMISSION_DENIED FAQ Can I use token authentication instead of TLS? No, you shouldn\u0026rsquo;t. In tech way, you can of course, but token and TLS are used for untrusted network env. In that circumstance, TLS has higher priority than this. Token can be trusted only under TLS protection.Token can be stolen easily if you send it through a non-TLS network.\nDo you support other authentication mechanisms? Such as ak/sk? For now, no. But we appreciate someone contributes this feature.\n","excerpt":"Token Authentication Token In current version, Token is considered as a simple string.\nSet Token Set …","ref":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/token-auth/","title":"Token Authentication"},{"body":"Token Authentication Token In current version, Token is considered as a simple string.\nSet Token Set token in agent.config file\n# Authentication active is based on backend setting, see application.yml for more details. agent.authentication = xxxx Meanwhile, open the backend token authentication.\nAuthentication fails The Collector verifies every request from agent, allowed only the token match.\nIf the token is not right, you will see the following log in agent\norg.apache.skywalking.apm.dependencies.io.grpc.StatusRuntimeException: PERMISSION_DENIED FAQ Can I use token authentication instead of TLS? No, you shouldn\u0026rsquo;t. In tech way, you can of course, but token and TLS are used for untrusted network env. In that circumstance, TLS has higher priority than this. Token can be trusted only under TLS protection.Token can be stolen easily if you send it through a non-TLS network.\nDo you support other authentication mechanisms? Such as ak/sk? For now, no. But we appreciate someone contributes this feature.\n","excerpt":"Token Authentication Token In current version, Token is considered as a simple string.\nSet Token Set …","ref":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/token-auth/","title":"Token Authentication"},{"body":"Trace Correlation Context Trace correlation context APIs provide a way to put custom data in tracing context. All the data in the context will be propagated with the in-wire process automatically.\n Use TraceContext.putCorrelation() API to put custom data in tracing context.  Optional\u0026lt;String\u0026gt; previous = TraceContext.putCorrelation(\u0026#34;customKey\u0026#34;, \u0026#34;customValue\u0026#34;); CorrelationContext will remove the item when the value is null or empty.\n Use TraceContext.getCorrelation() API to get custom data.  Optional\u0026lt;String\u0026gt; value = TraceContext.getCorrelation(\u0026#34;customKey\u0026#34;); CorrelationContext configuration descriptions could be found in the agent configuration documentation, with correlation. as the prefix. Sample codes only\n","excerpt":"Trace Correlation Context Trace correlation context APIs provide a way to put custom data in tracing …","ref":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/application-toolkit-trace-correlation-context/","title":"Trace Correlation Context"},{"body":"Trace Correlation Context Trace correlation context APIs provide a way to put custom data in tracing context. All the data in the context will be propagated with the in-wire process automatically.\n Use TraceContext.putCorrelation() API to put custom data in tracing context.  Optional\u0026lt;String\u0026gt; previous = TraceContext.putCorrelation(\u0026#34;customKey\u0026#34;, \u0026#34;customValue\u0026#34;); CorrelationContext will remove the item when the value is null or empty.\n Use TraceContext.getCorrelation() API to get custom data.  Optional\u0026lt;String\u0026gt; value = TraceContext.getCorrelation(\u0026#34;customKey\u0026#34;); CorrelationContext configuration descriptions could be found in the agent configuration documentation, with correlation. as the prefix. Sample codes only\n","excerpt":"Trace Correlation Context Trace correlation context APIs provide a way to put custom data in tracing …","ref":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-trace-correlation-context/","title":"Trace Correlation Context"},{"body":"Trace Correlation Context Trace correlation context APIs provide a way to put custom data in tracing context. All the data in the context will be propagated with the in-wire process automatically.\n Use TraceContext.putCorrelation() API to put custom data in tracing context.  Optional\u0026lt;String\u0026gt; previous = TraceContext.putCorrelation(\u0026#34;customKey\u0026#34;, \u0026#34;customValue\u0026#34;); CorrelationContext will remove the item when the value is null or empty.\n Use TraceContext.getCorrelation() API to get custom data.  Optional\u0026lt;String\u0026gt; value = TraceContext.getCorrelation(\u0026#34;customKey\u0026#34;); CorrelationContext configuration descriptions could be found in the agent configuration documentation, with correlation. as the prefix. Sample codes only\n","excerpt":"Trace Correlation Context Trace correlation context APIs provide a way to put custom data in tracing …","ref":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/application-toolkit-trace-correlation-context/","title":"Trace Correlation Context"},{"body":"Trace Correlation Context Trace correlation context APIs provide a way to put custom data in tracing context. All the data in the context will be propagated with the in-wire process automatically.\n Use TraceContext.putCorrelation() API to put custom data in tracing context.  Optional\u0026lt;String\u0026gt; previous = TraceContext.putCorrelation(\u0026#34;customKey\u0026#34;, \u0026#34;customValue\u0026#34;); CorrelationContext will remove the item when the value is null or empty.\n Use TraceContext.getCorrelation() API to get custom data.  Optional\u0026lt;String\u0026gt; value = TraceContext.getCorrelation(\u0026#34;customKey\u0026#34;); CorrelationContext configuration descriptions could be found in the agent configuration documentation, with correlation. as the prefix. Sample codes only\n","excerpt":"Trace Correlation Context Trace correlation context APIs provide a way to put custom data in tracing …","ref":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/application-toolkit-trace-correlation-context/","title":"Trace Correlation Context"},{"body":"Trace Correlation Context Trace correlation context APIs provide a way to put custom data in tracing context. All the data in the context will be propagated with the in-wire process automatically.\n Use TraceContext.putCorrelation() API to put custom data in tracing context.  Optional\u0026lt;String\u0026gt; previous = TraceContext.putCorrelation(\u0026#34;customKey\u0026#34;, \u0026#34;customValue\u0026#34;); CorrelationContext will remove the item when the value is null or empty.\n Use TraceContext.getCorrelation() API to get custom data.  Optional\u0026lt;String\u0026gt; value = TraceContext.getCorrelation(\u0026#34;customKey\u0026#34;); CorrelationContext configuration descriptions could be found in the agent configuration documentation, with correlation. as the prefix. Sample codes only\n","excerpt":"Trace Correlation Context Trace correlation context APIs provide a way to put custom data in tracing …","ref":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/application-toolkit-trace-correlation-context/","title":"Trace Correlation Context"},{"body":"Trace Cross Thread These APIs provide ways to continuous tracing in the cross thread scenario with minimal code changes. All following are sample codes only to demonstrate how to adopt cross thread cases easier.\n Case 1.  @TraceCrossThread public static class MyCallable\u0026lt;String\u0026gt; implements Callable\u0026lt;String\u0026gt; { @Override public String call() throws Exception { return null; } } ... ExecutorService executorService = Executors.newFixedThreadPool(1); executorService.submit(new MyCallable());  Case 2.  ExecutorService executorService = Executors.newFixedThreadPool(1); executorService.submit(CallableWrapper.of(new Callable\u0026lt;String\u0026gt;() { @Override public String call() throws Exception { return null; } })); or\nExecutorService executorService = Executors.newFixedThreadPool(1); executorService.execute(RunnableWrapper.of(new Runnable() { @Override public void run() { //your code  } }));  Case 3.  @TraceCrossThread public class MySupplier\u0026lt;String\u0026gt; implements Supplier\u0026lt;String\u0026gt; { @Override public String get() { return null; } } ... CompletableFuture.supplyAsync(new MySupplier\u0026lt;String\u0026gt;()); or\nCompletableFuture.supplyAsync(SupplierWrapper.of(()-\u0026gt;{ return \u0026#34;SupplierWrapper\u0026#34;; })).thenAccept(System.out::println);  Case 4.  CompletableFuture.supplyAsync(SupplierWrapper.of(() -\u0026gt; { return \u0026#34;SupplierWrapper\u0026#34;; })).thenAcceptAsync(ConsumerWrapper.of(c -\u0026gt; { // your code visit(url)  System.out.println(\u0026#34;ConsumerWrapper\u0026#34;); })); or\nCompletableFuture.supplyAsync(SupplierWrapper.of(() -\u0026gt; { return \u0026#34;SupplierWrapper\u0026#34;; })).thenApplyAsync(FunctionWrapper.of(f -\u0026gt; { // your code visit(url)  return \u0026#34;FunctionWrapper\u0026#34;; })); ","excerpt":"Trace Cross Thread These APIs provide ways to continuous tracing in the cross thread scenario with …","ref":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/application-toolkit-trace-cross-thread/","title":"Trace Cross Thread"},{"body":"Trace Cross Thread These APIs provide ways to continuous tracing in the cross thread scenario with minimal code changes. All following are sample codes only to demonstrate how to adopt cross thread cases easier.\n Case 1.  @TraceCrossThread public static class MyCallable\u0026lt;String\u0026gt; implements Callable\u0026lt;String\u0026gt; { @Override public String call() throws Exception { return null; } } ... ExecutorService executorService = Executors.newFixedThreadPool(1); executorService.submit(new MyCallable());  Case 2.  ExecutorService executorService = Executors.newFixedThreadPool(1); executorService.submit(CallableWrapper.of(new Callable\u0026lt;String\u0026gt;() { @Override public String call() throws Exception { return null; } })); or\nExecutorService executorService = Executors.newFixedThreadPool(1); executorService.execute(RunnableWrapper.of(new Runnable() { @Override public void run() { //your code  } }));  Case 3.  @TraceCrossThread public class MySupplier\u0026lt;String\u0026gt; implements Supplier\u0026lt;String\u0026gt; { @Override public String get() { return null; } } ... CompletableFuture.supplyAsync(new MySupplier\u0026lt;String\u0026gt;()); or\nCompletableFuture.supplyAsync(SupplierWrapper.of(()-\u0026gt;{ return \u0026#34;SupplierWrapper\u0026#34;; })).thenAccept(System.out::println);  Case 4.  CompletableFuture.supplyAsync(SupplierWrapper.of(() -\u0026gt; { return \u0026#34;SupplierWrapper\u0026#34;; })).thenAcceptAsync(ConsumerWrapper.of(c -\u0026gt; { // your code visit(url)  System.out.println(\u0026#34;ConsumerWrapper\u0026#34;); })); or\nCompletableFuture.supplyAsync(SupplierWrapper.of(() -\u0026gt; { return \u0026#34;SupplierWrapper\u0026#34;; })).thenApplyAsync(FunctionWrapper.of(f -\u0026gt; { // your code visit(url)  return \u0026#34;FunctionWrapper\u0026#34;; })); ","excerpt":"Trace Cross Thread These APIs provide ways to continuous tracing in the cross thread scenario with …","ref":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-trace-cross-thread/","title":"Trace Cross Thread"},{"body":"Trace Cross Thread These APIs provide ways to continuous tracing in the cross thread scenario with minimal code changes. All following are sample codes only to demonstrate how to adopt cross thread cases easier.\n Case 1.  @TraceCrossThread public static class MyCallable\u0026lt;String\u0026gt; implements Callable\u0026lt;String\u0026gt; { @Override public String call() throws Exception { return null; } } ... ExecutorService executorService = Executors.newFixedThreadPool(1); executorService.submit(new MyCallable());  Case 2.  ExecutorService executorService = Executors.newFixedThreadPool(1); executorService.submit(CallableWrapper.of(new Callable\u0026lt;String\u0026gt;() { @Override public String call() throws Exception { return null; } })); or\nExecutorService executorService = Executors.newFixedThreadPool(1); executorService.execute(RunnableWrapper.of(new Runnable() { @Override public void run() { //your code  } }));  Case 3.  @TraceCrossThread public class MySupplier\u0026lt;String\u0026gt; implements Supplier\u0026lt;String\u0026gt; { @Override public String get() { return null; } } ... CompletableFuture.supplyAsync(new MySupplier\u0026lt;String\u0026gt;()); or\nCompletableFuture.supplyAsync(SupplierWrapper.of(()-\u0026gt;{ return \u0026#34;SupplierWrapper\u0026#34;; })).thenAccept(System.out::println);  Case 4.  CompletableFuture.supplyAsync(SupplierWrapper.of(() -\u0026gt; { return \u0026#34;SupplierWrapper\u0026#34;; })).thenAcceptAsync(ConsumerWrapper.of(c -\u0026gt; { // your code visit(url)  System.out.println(\u0026#34;ConsumerWrapper\u0026#34;); })); or\nCompletableFuture.supplyAsync(SupplierWrapper.of(() -\u0026gt; { return \u0026#34;SupplierWrapper\u0026#34;; })).thenApplyAsync(FunctionWrapper.of(f -\u0026gt; { // your code visit(url)  return \u0026#34;FunctionWrapper\u0026#34;; })); ","excerpt":"Trace Cross Thread These APIs provide ways to continuous tracing in the cross thread scenario with …","ref":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/application-toolkit-trace-cross-thread/","title":"Trace Cross Thread"},{"body":"Trace Cross Thread These APIs provide ways to continuous tracing in the cross thread scenario with minimal code changes. All following are sample codes only to demonstrate how to adopt cross thread cases easier.\n Case 1.  @TraceCrossThread public static class MyCallable\u0026lt;String\u0026gt; implements Callable\u0026lt;String\u0026gt; { @Override public String call() throws Exception { return null; } } ... ExecutorService executorService = Executors.newFixedThreadPool(1); executorService.submit(new MyCallable());  Case 2.  ExecutorService executorService = Executors.newFixedThreadPool(1); executorService.submit(CallableWrapper.of(new Callable\u0026lt;String\u0026gt;() { @Override public String call() throws Exception { return null; } })); or\nExecutorService executorService = Executors.newFixedThreadPool(1); executorService.execute(RunnableWrapper.of(new Runnable() { @Override public void run() { //your code  } }));  Case 3.  @TraceCrossThread public class MySupplier\u0026lt;String\u0026gt; implements Supplier\u0026lt;String\u0026gt; { @Override public String get() { return null; } } ... CompletableFuture.supplyAsync(new MySupplier\u0026lt;String\u0026gt;()); or\nCompletableFuture.supplyAsync(SupplierWrapper.of(()-\u0026gt;{ return \u0026#34;SupplierWrapper\u0026#34;; })).thenAccept(System.out::println);  Case 4.  CompletableFuture.supplyAsync(SupplierWrapper.of(() -\u0026gt; { return \u0026#34;SupplierWrapper\u0026#34;; })).thenAcceptAsync(ConsumerWrapper.of(c -\u0026gt; { // your code visit(url)  System.out.println(\u0026#34;ConsumerWrapper\u0026#34;); })); or\nCompletableFuture.supplyAsync(SupplierWrapper.of(() -\u0026gt; { return \u0026#34;SupplierWrapper\u0026#34;; })).thenApplyAsync(FunctionWrapper.of(f -\u0026gt; { // your code visit(url)  return \u0026#34;FunctionWrapper\u0026#34;; })); ","excerpt":"Trace Cross Thread These APIs provide ways to continuous tracing in the cross thread scenario with …","ref":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/application-toolkit-trace-cross-thread/","title":"Trace Cross Thread"},{"body":"Trace Cross Thread These APIs provide ways to continuous tracing in the cross thread scenario with minimal code changes. All following are sample codes only to demonstrate how to adopt cross thread cases easier.\n Case 1.  @TraceCrossThread public static class MyCallable\u0026lt;String\u0026gt; implements Callable\u0026lt;String\u0026gt; { @Override public String call() throws Exception { return null; } } ... ExecutorService executorService = Executors.newFixedThreadPool(1); executorService.submit(new MyCallable());  Case 2.  ExecutorService executorService = Executors.newFixedThreadPool(1); executorService.submit(CallableWrapper.of(new Callable\u0026lt;String\u0026gt;() { @Override public String call() throws Exception { return null; } })); or\nExecutorService executorService = Executors.newFixedThreadPool(1); executorService.execute(RunnableWrapper.of(new Runnable() { @Override public void run() { //your code  } }));  Case 3.  @TraceCrossThread public class MySupplier\u0026lt;String\u0026gt; implements Supplier\u0026lt;String\u0026gt; { @Override public String get() { return null; } } ... CompletableFuture.supplyAsync(new MySupplier\u0026lt;String\u0026gt;()); or\nCompletableFuture.supplyAsync(SupplierWrapper.of(()-\u0026gt;{ return \u0026#34;SupplierWrapper\u0026#34;; })).thenAccept(System.out::println);  Case 4.  CompletableFuture.supplyAsync(SupplierWrapper.of(() -\u0026gt; { return \u0026#34;SupplierWrapper\u0026#34;; })).thenAcceptAsync(ConsumerWrapper.of(c -\u0026gt; { // your code visit(url)  System.out.println(\u0026#34;ConsumerWrapper\u0026#34;); })); or\nCompletableFuture.supplyAsync(SupplierWrapper.of(() -\u0026gt; { return \u0026#34;SupplierWrapper\u0026#34;; })).thenApplyAsync(FunctionWrapper.of(f -\u0026gt; { // your code visit(url)  return \u0026#34;FunctionWrapper\u0026#34;; })); ","excerpt":"Trace Cross Thread These APIs provide ways to continuous tracing in the cross thread scenario with …","ref":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/application-toolkit-trace-cross-thread/","title":"Trace Cross Thread"},{"body":"Trace Data Protocol  Version, v3.1  Trace Data Protocol describes the data format between SkyWalking agent/sniffer and backend.\nTrace data protocol is defined and provided in gRPC format, and implemented in HTTP 1.1.\nFor trace format, note that:\n The segment is a unique concept in SkyWalking. It should include all spans for each request in a single OS process, which is usually a single language-based thread. There are three types of spans.    EntrySpan EntrySpan represents a service provider, which is also the endpoint on the server end. As an APM system, SkyWalking targets the application servers. Therefore, almost all the services and MQ-consumers are EntrySpans.\n  LocalSpan LocalSpan represents a typical Java method which is not related to remote services. It is neither a MQ producer/consumer nor a provider/consumer of a service (e.g. HTTP service).\n  ExitSpan ExitSpan represents a client of service or MQ-producer. It is known as the LeafSpan in the early stages of SkyWalking. For example, accessing DB by JDBC, and reading Redis/Memcached are classified as ExitSpans.\n   Cross-thread/process span parent information is called \u0026ldquo;reference\u0026rdquo;. Reference carries the trace ID, segment ID, span ID, service name, service instance name, endpoint name, and target address used on the client end (note: this is not required in cross-thread operations) of this request in the parent. See Cross Process Propagation Headers Protocol v3 for more details.\n  Span#skipAnalysis may be TRUE, if this span doesn\u0026rsquo;t require backend analysis.\n  Trace Report Protocol // The segment is a collection of spans. It includes all collected spans in a simple one request context, such as a HTTP request process. // // We recommend the agent/SDK report all tracked data of one request once for all, such as, // typically, such as in Java, one segment represent all tracked operations(spans) of one request context in the same thread. // At the same time, in some language there is not a clear concept like golang, it could represent all tracked operations of one request context. message SegmentObject { // A string id represents the whole trace.  string traceId = 1; // A unique id represents this segment. Other segments could use this id to reference as a child segment.  string traceSegmentId = 2; // Span collections included in this segment.  repeated SpanObject spans = 3; // **Service**. Represents a set/group of workloads which provide the same behaviours for incoming requests.  //  // The logic name represents the service. This would show as a separate node in the topology.  // The metrics analyzed from the spans, would be aggregated for this entity as the service level.  string service = 4; // **Service Instance**. Each individual workload in the Service group is known as an instance. Like `pods` in Kubernetes, it  // doesn\u0026#39;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process.  //  // The logic name represents the service instance. This would show as a separate node in the instance relationship.  // The metrics analyzed from the spans, would be aggregated for this entity as the service instance level.  string serviceInstance = 5; // Whether the segment includes all tracked spans.  // In the production environment tracked, some tasks could include too many spans for one request context, such as a batch update for a cache, or an async job.  // The agent/SDK could optimize or ignore some tracked spans for better performance.  // In this case, the value should be flagged as TRUE.  bool isSizeLimited = 6;}// Segment reference represents the link between two existing segment. message SegmentReference { // Represent the reference type. It could be across thread or across process.  // Across process means there is a downstream RPC call for this.  // Typically, refType == CrossProcess means SpanObject#spanType = entry.  RefType refType = 1; // A string id represents the whole trace.  string traceId = 2; // Another segment id as the parent.  string parentTraceSegmentId = 3; // The span id in the parent trace segment.  int32 parentSpanId = 4; // The service logic name of the parent segment.  // If refType == CrossThread, this name is as same as the trace segment.  string parentService = 5; // The service logic name instance of the parent segment.  // If refType == CrossThread, this name is as same as the trace segment.  string parentServiceInstance = 6; // The endpoint name of the parent segment.  // **Endpoint**. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature.  // In a trace segment, the endpoint name is the name of first entry span.  string parentEndpoint = 7; // The network address, including ip/hostname and port, which is used in the client side.  // Such as Client --\u0026gt; use 127.0.11.8:913 -\u0026gt; Server  // then, in the reference of entry span reported by Server, the value of this field is 127.0.11.8:913.  // This plays the important role in the SkyWalking STAM(Streaming Topology Analysis Method)  // For more details, read https://wu-sheng.github.io/STAM/  string networkAddressUsedAtPeer = 8;}// Span represents a execution unit in the system, with duration and many other attributes. // Span could be a method, a RPC, MQ message produce or consume. // In the practice, the span should be added when it is really necessary, to avoid payload overhead. // We recommend to creating spans in across process(client/server of RPC/MQ) and across thread cases only. message SpanObject { // The number id of the span. Should be unique in the whole segment.  // Starting at 0.  int32 spanId = 1; // The number id of the parent span in the whole segment.  // -1 represents no parent span.  // Also, be known as the root/first span of the segment.  int32 parentSpanId = 2; // Start timestamp in milliseconds of this span,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 startTime = 3; // End timestamp in milliseconds of this span,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 endTime = 4; // \u0026lt;Optional\u0026gt;  // In the across thread and across process, these references targeting the parent segments.  // The references usually have only one element, but in batch consumer case, such as in MQ or async batch process, it could be multiple.  repeated SegmentReference refs = 5; // A logic name represents this span.  //  // We don\u0026#39;t recommend to include the parameter, such as HTTP request parameters, as a part of the operation, especially this is the name of the entry span.  // All statistic for the endpoints are aggregated base on this name. Those parameters should be added in the tags if necessary.  // If in some cases, it have to be a part of the operation name,  // users should use the Group Parameterized Endpoints capability at the backend to get the meaningful metrics.  // Read https://github.com/apache/skywalking/blob/master/docs/en/setup/backend/endpoint-grouping-rules.md  string operationName = 6; // Remote address of the peer in RPC/MQ case.  // This is required when spanType = Exit, as it is a part of the SkyWalking STAM(Streaming Topology Analysis Method).  // For more details, read https://wu-sheng.github.io/STAM/  string peer = 7; // Span type represents the role in the RPC context.  SpanType spanType = 8; // Span layer represent the component tech stack, related to the network tech.  SpanLayer spanLayer = 9; // Component id is a predefined number id in the SkyWalking.  // It represents the framework, tech stack used by this tracked span, such as Spring.  // All IDs are defined in the https://github.com/apache/skywalking/blob/master/oap-server/server-bootstrap/src/main/resources/component-libraries.yml  // Send a pull request if you want to add languages, components or mapping definitions,  // all public components could be accepted.  // Follow this doc for more details, https://github.com/apache/skywalking/blob/master/docs/en/guides/Component-library-settings.md  int32 componentId = 10; // The status of the span. False means the tracked execution ends in the unexpected status.  // This affects the successful rate statistic in the backend.  // Exception or error code happened in the tracked process doesn\u0026#39;t mean isError == true, the implementations of agent plugin and tracing SDK make the final decision.  bool isError = 11; // String key, String value pair.  // Tags provides more information, includes parameters.  //  // In the OAP backend analysis, some special tag or tag combination could provide other advanced features.  // https://github.com/apache/skywalking/blob/master/docs/en/guides/Java-Plugin-Development-Guide.md#special-span-tags  repeated KeyStringValuePair tags = 12; // String key, String value pair with an accurate timestamp.  // Logging some events happening in the context of the span duration.  repeated Log logs = 13; // Force the backend don\u0026#39;t do analysis, if the value is TRUE.  // The backend has its own configurations to follow or override this.  //  // Use this mostly because the agent/SDK could know more context of the service role.  bool skipAnalysis = 14;}message Log { // The timestamp in milliseconds of this event.,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 time = 1; // String key, String value pair.  repeated KeyStringValuePair data = 2;}// Map to the type of span enum SpanType { // Server side of RPC. Consumer side of MQ.  Entry = 0; // Client side of RPC. Producer side of MQ.  Exit = 1; // A common local code execution.  Local = 2;}// A ID could be represented by multiple string sections. message ID { repeated string id = 1;}// Type of the reference enum RefType { // Map to the reference targeting the segment in another OS process.  CrossProcess = 0; // Map to the reference targeting the segment in the same process of the current one, just across thread.  // This is only used when the coding language has the thread concept.  CrossThread = 1;}// Map to the layer of span enum SpanLayer { // Unknown layer. Could be anything.  Unknown = 0; // A database layer, used in tracing the database client component.  Database = 1; // A RPC layer, used in both client and server sides of RPC component.  RPCFramework = 2; // HTTP is a more specific RPCFramework.  Http = 3; // A MQ layer, used in both producer and consumer sides of the MQ component.  MQ = 4; // A cache layer, used in tracing the cache client component.  Cache = 5;}// The segment collections for trace report in batch and sync mode. message SegmentCollection { repeated SegmentObject segments = 1;}Report Span Attached Events Besides in-process agents, there are other out-of-process agent, such as ebpf agent, could report additional information as attached events for the relative spans.\nSpanAttachedEventReportService#collect for attached event reporting.\n//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // ebpf agent(SkyWalking Rover) collects extra information from the OS(Linux Only) level to attach on the traced span. // Since v3.1 //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// service SpanAttachedEventReportService { // Collect SpanAttachedEvent to the OAP server in the streaming mode.  rpc collect (stream SpanAttachedEvent) returns (Commands) { }}// SpanAttachedEvent represents an attached event for a traced RPC. // // When an RPC is being traced by the in-process language agent, a span would be reported by the client-side agent. // And the rover would be aware of this RPC due to the existing tracing header. // Then, the rover agent collects extra information from the OS level to provide assistance information to diagnose network performance. message SpanAttachedEvent { // The nanosecond timestamp of the event\u0026#39;s start time.  // Notice, most unit of timestamp in SkyWalking is milliseconds, but NANO-SECOND is required here.  // Because the attached event happens in the OS syscall level, most of them are executed rapidly.  Instant startTime = 1; // The official event name.  // For example, the event name is a method signature from syscall stack.  string event = 2; // [Optional] The nanosecond timestamp of the event\u0026#39;s end time.  Instant endTime = 3; // The tags for this event includes some extra OS level information,  // such as  // 1. net_device used for this exit span.  // 2. network L7 protocol  repeated KeyStringValuePair tags = 4; // The summary of statistics during this event.  // Each statistic provides a name(metric name) to represent the name, and an int64/long as the value.  repeated KeyIntValuePair summary = 5; // Refer to a trace context decoded from `sw8` header through network, such as HTTP header, MQ metadata  // https://skywalking.apache.org/docs/main/next/en/protocols/skywalking-cross-process-propagation-headers-protocol-v3/#standard-header-item  SpanReference traceContext = 6; message SpanReference { SpanReferenceType type = 1; // [Optional] A string id represents the whole trace.  string traceId = 2; // A unique id represents this segment. Other segments could use this id to reference as a child segment.  // [Optional] when this span reference  string traceSegmentId = 3; // If type == SKYWALKING  // The number id of the span. Should be unique in the whole segment.  // Starting at 0  //  // If type == ZIPKIN  // The type of span ID is string.  string spanId = 4; } enum SpanReferenceType { SKYWALKING = 0; ZIPKIN = 1; }}Via HTTP Endpoint Detailed information about data format can be found in Instance Management. There are two ways to report segment data: one segment per request or segment array in bulk mode.\nPOST http://localhost:12800/v3/segment Send a single segment object in JSON format.\nInput:\n{ \u0026#34;traceId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34; } OutPut:\nPOST http://localhost:12800/v3/segments Send a segment object list in JSON format.\nInput:\n[{ \u0026#34;traceId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34; }, { \u0026#34;traceId\u0026#34;: \u0026#34;f956699e-5106-4ea3-95e5-da748c55bac1\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577250, \u0026#34;endTime\u0026#34;: 1588664577250, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577250, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577250, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;f956699e-5106-4ea3-95e5-da748c55bac1\u0026#34; }] OutPut:\n","excerpt":"Trace Data Protocol  Version, v3.1  Trace Data Protocol describes the data format between SkyWalking …","ref":"/docs/main/latest/en/api/trace-data-protocol-v3/","title":"Trace Data Protocol"},{"body":"Trace Data Protocol  Version, v3.1  Trace Data Protocol describes the data format between SkyWalking agent/sniffer and backend.\nTrace data protocol is defined and provided in gRPC format, and implemented in HTTP 1.1.\nFor trace format, note that:\n The segment is a unique concept in SkyWalking. It should include all spans for each request in a single OS process, which is usually a single language-based thread. There are three types of spans.    EntrySpan EntrySpan represents a service provider, which is also the endpoint on the server end. As an APM system, SkyWalking targets the application servers. Therefore, almost all the services and MQ-consumers are EntrySpans.\n  LocalSpan LocalSpan represents a typical Java method which is not related to remote services. It is neither a MQ producer/consumer nor a provider/consumer of a service (e.g. HTTP service).\n  ExitSpan ExitSpan represents a client of service or MQ-producer. It is known as the LeafSpan in the early stages of SkyWalking. For example, accessing DB by JDBC, and reading Redis/Memcached are classified as ExitSpans.\n   Cross-thread/process span parent information is called \u0026ldquo;reference\u0026rdquo;. Reference carries the trace ID, segment ID, span ID, service name, service instance name, endpoint name, and target address used on the client end (note: this is not required in cross-thread operations) of this request in the parent. See Cross Process Propagation Headers Protocol v3 for more details.\n  Span#skipAnalysis may be TRUE, if this span doesn\u0026rsquo;t require backend analysis.\n  Trace Report Protocol // The segment is a collection of spans. It includes all collected spans in a simple one request context, such as a HTTP request process. // // We recommend the agent/SDK report all tracked data of one request once for all, such as, // typically, such as in Java, one segment represent all tracked operations(spans) of one request context in the same thread. // At the same time, in some language there is not a clear concept like golang, it could represent all tracked operations of one request context. message SegmentObject { // A string id represents the whole trace.  string traceId = 1; // A unique id represents this segment. Other segments could use this id to reference as a child segment.  string traceSegmentId = 2; // Span collections included in this segment.  repeated SpanObject spans = 3; // **Service**. Represents a set/group of workloads which provide the same behaviours for incoming requests.  //  // The logic name represents the service. This would show as a separate node in the topology.  // The metrics analyzed from the spans, would be aggregated for this entity as the service level.  string service = 4; // **Service Instance**. Each individual workload in the Service group is known as an instance. Like `pods` in Kubernetes, it  // doesn\u0026#39;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process.  //  // The logic name represents the service instance. This would show as a separate node in the instance relationship.  // The metrics analyzed from the spans, would be aggregated for this entity as the service instance level.  string serviceInstance = 5; // Whether the segment includes all tracked spans.  // In the production environment tracked, some tasks could include too many spans for one request context, such as a batch update for a cache, or an async job.  // The agent/SDK could optimize or ignore some tracked spans for better performance.  // In this case, the value should be flagged as TRUE.  bool isSizeLimited = 6;}// Segment reference represents the link between two existing segment. message SegmentReference { // Represent the reference type. It could be across thread or across process.  // Across process means there is a downstream RPC call for this.  // Typically, refType == CrossProcess means SpanObject#spanType = entry.  RefType refType = 1; // A string id represents the whole trace.  string traceId = 2; // Another segment id as the parent.  string parentTraceSegmentId = 3; // The span id in the parent trace segment.  int32 parentSpanId = 4; // The service logic name of the parent segment.  // If refType == CrossThread, this name is as same as the trace segment.  string parentService = 5; // The service logic name instance of the parent segment.  // If refType == CrossThread, this name is as same as the trace segment.  string parentServiceInstance = 6; // The endpoint name of the parent segment.  // **Endpoint**. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature.  // In a trace segment, the endpoint name is the name of first entry span.  string parentEndpoint = 7; // The network address, including ip/hostname and port, which is used in the client side.  // Such as Client --\u0026gt; use 127.0.11.8:913 -\u0026gt; Server  // then, in the reference of entry span reported by Server, the value of this field is 127.0.11.8:913.  // This plays the important role in the SkyWalking STAM(Streaming Topology Analysis Method)  // For more details, read https://wu-sheng.github.io/STAM/  string networkAddressUsedAtPeer = 8;}// Span represents a execution unit in the system, with duration and many other attributes. // Span could be a method, a RPC, MQ message produce or consume. // In the practice, the span should be added when it is really necessary, to avoid payload overhead. // We recommend to creating spans in across process(client/server of RPC/MQ) and across thread cases only. message SpanObject { // The number id of the span. Should be unique in the whole segment.  // Starting at 0.  int32 spanId = 1; // The number id of the parent span in the whole segment.  // -1 represents no parent span.  // Also, be known as the root/first span of the segment.  int32 parentSpanId = 2; // Start timestamp in milliseconds of this span,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 startTime = 3; // End timestamp in milliseconds of this span,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 endTime = 4; // \u0026lt;Optional\u0026gt;  // In the across thread and across process, these references targeting the parent segments.  // The references usually have only one element, but in batch consumer case, such as in MQ or async batch process, it could be multiple.  repeated SegmentReference refs = 5; // A logic name represents this span.  //  // We don\u0026#39;t recommend to include the parameter, such as HTTP request parameters, as a part of the operation, especially this is the name of the entry span.  // All statistic for the endpoints are aggregated base on this name. Those parameters should be added in the tags if necessary.  // If in some cases, it have to be a part of the operation name,  // users should use the Group Parameterized Endpoints capability at the backend to get the meaningful metrics.  // Read https://github.com/apache/skywalking/blob/master/docs/en/setup/backend/endpoint-grouping-rules.md  string operationName = 6; // Remote address of the peer in RPC/MQ case.  // This is required when spanType = Exit, as it is a part of the SkyWalking STAM(Streaming Topology Analysis Method).  // For more details, read https://wu-sheng.github.io/STAM/  string peer = 7; // Span type represents the role in the RPC context.  SpanType spanType = 8; // Span layer represent the component tech stack, related to the network tech.  SpanLayer spanLayer = 9; // Component id is a predefined number id in the SkyWalking.  // It represents the framework, tech stack used by this tracked span, such as Spring.  // All IDs are defined in the https://github.com/apache/skywalking/blob/master/oap-server/server-bootstrap/src/main/resources/component-libraries.yml  // Send a pull request if you want to add languages, components or mapping definitions,  // all public components could be accepted.  // Follow this doc for more details, https://github.com/apache/skywalking/blob/master/docs/en/guides/Component-library-settings.md  int32 componentId = 10; // The status of the span. False means the tracked execution ends in the unexpected status.  // This affects the successful rate statistic in the backend.  // Exception or error code happened in the tracked process doesn\u0026#39;t mean isError == true, the implementations of agent plugin and tracing SDK make the final decision.  bool isError = 11; // String key, String value pair.  // Tags provides more information, includes parameters.  //  // In the OAP backend analysis, some special tag or tag combination could provide other advanced features.  // https://github.com/apache/skywalking/blob/master/docs/en/guides/Java-Plugin-Development-Guide.md#special-span-tags  repeated KeyStringValuePair tags = 12; // String key, String value pair with an accurate timestamp.  // Logging some events happening in the context of the span duration.  repeated Log logs = 13; // Force the backend don\u0026#39;t do analysis, if the value is TRUE.  // The backend has its own configurations to follow or override this.  //  // Use this mostly because the agent/SDK could know more context of the service role.  bool skipAnalysis = 14;}message Log { // The timestamp in milliseconds of this event.,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 time = 1; // String key, String value pair.  repeated KeyStringValuePair data = 2;}// Map to the type of span enum SpanType { // Server side of RPC. Consumer side of MQ.  Entry = 0; // Client side of RPC. Producer side of MQ.  Exit = 1; // A common local code execution.  Local = 2;}// A ID could be represented by multiple string sections. message ID { repeated string id = 1;}// Type of the reference enum RefType { // Map to the reference targeting the segment in another OS process.  CrossProcess = 0; // Map to the reference targeting the segment in the same process of the current one, just across thread.  // This is only used when the coding language has the thread concept.  CrossThread = 1;}// Map to the layer of span enum SpanLayer { // Unknown layer. Could be anything.  Unknown = 0; // A database layer, used in tracing the database client component.  Database = 1; // A RPC layer, used in both client and server sides of RPC component.  RPCFramework = 2; // HTTP is a more specific RPCFramework.  Http = 3; // A MQ layer, used in both producer and consumer sides of the MQ component.  MQ = 4; // A cache layer, used in tracing the cache client component.  Cache = 5;}// The segment collections for trace report in batch and sync mode. message SegmentCollection { repeated SegmentObject segments = 1;}Report Span Attached Events Besides in-process agents, there are other out-of-process agent, such as ebpf agent, could report additional information as attached events for the relative spans.\nSpanAttachedEventReportService#collect for attached event reporting.\n//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // ebpf agent(SkyWalking Rover) collects extra information from the OS(Linux Only) level to attach on the traced span. // Since v3.1 //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// service SpanAttachedEventReportService { // Collect SpanAttachedEvent to the OAP server in the streaming mode.  rpc collect (stream SpanAttachedEvent) returns (Commands) { }}// SpanAttachedEvent represents an attached event for a traced RPC. // // When an RPC is being traced by the in-process language agent, a span would be reported by the client-side agent. // And the rover would be aware of this RPC due to the existing tracing header. // Then, the rover agent collects extra information from the OS level to provide assistance information to diagnose network performance. message SpanAttachedEvent { // The nanosecond timestamp of the event\u0026#39;s start time.  // Notice, most unit of timestamp in SkyWalking is milliseconds, but NANO-SECOND is required here.  // Because the attached event happens in the OS syscall level, most of them are executed rapidly.  Instant startTime = 1; // The official event name.  // For example, the event name is a method signature from syscall stack.  string event = 2; // [Optional] The nanosecond timestamp of the event\u0026#39;s end time.  Instant endTime = 3; // The tags for this event includes some extra OS level information,  // such as  // 1. net_device used for this exit span.  // 2. network L7 protocol  repeated KeyStringValuePair tags = 4; // The summary of statistics during this event.  // Each statistic provides a name(metric name) to represent the name, and an int64/long as the value.  repeated KeyIntValuePair summary = 5; // Refer to a trace context decoded from `sw8` header through network, such as HTTP header, MQ metadata  // https://skywalking.apache.org/docs/main/next/en/protocols/skywalking-cross-process-propagation-headers-protocol-v3/#standard-header-item  SpanReference traceContext = 6; message SpanReference { SpanReferenceType type = 1; // [Optional] A string id represents the whole trace.  string traceId = 2; // A unique id represents this segment. Other segments could use this id to reference as a child segment.  // [Optional] when this span reference  string traceSegmentId = 3; // If type == SKYWALKING  // The number id of the span. Should be unique in the whole segment.  // Starting at 0  //  // If type == ZIPKIN  // The type of span ID is string.  string spanId = 4; } enum SpanReferenceType { SKYWALKING = 0; ZIPKIN = 1; }}Via HTTP Endpoint Detailed information about data format can be found in Instance Management. There are two ways to report segment data: one segment per request or segment array in bulk mode.\nPOST http://localhost:12800/v3/segment Send a single segment object in JSON format.\nInput:\n{ \u0026#34;traceId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34; } OutPut:\nPOST http://localhost:12800/v3/segments Send a segment object list in JSON format.\nInput:\n[{ \u0026#34;traceId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34; }, { \u0026#34;traceId\u0026#34;: \u0026#34;f956699e-5106-4ea3-95e5-da748c55bac1\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577250, \u0026#34;endTime\u0026#34;: 1588664577250, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577250, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577250, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;f956699e-5106-4ea3-95e5-da748c55bac1\u0026#34; }] OutPut:\n","excerpt":"Trace Data Protocol  Version, v3.1  Trace Data Protocol describes the data format between SkyWalking …","ref":"/docs/main/next/en/api/trace-data-protocol-v3/","title":"Trace Data Protocol"},{"body":"Trace Data Protocol  Version, v3.1  Trace Data Protocol describes the data format between SkyWalking agent/sniffer and backend.\nTrace data protocol is defined and provided in gRPC format, and implemented in HTTP 1.1.\nFor trace format, note that:\n The segment is a unique concept in SkyWalking. It should include all spans for each request in a single OS process, which is usually a single language-based thread. There are three types of spans.    EntrySpan EntrySpan represents a service provider, which is also the endpoint on the server end. As an APM system, SkyWalking targets the application servers. Therefore, almost all the services and MQ-consumers are EntrySpans.\n  LocalSpan LocalSpan represents a typical Java method which is not related to remote services. It is neither a MQ producer/consumer nor a provider/consumer of a service (e.g. HTTP service).\n  ExitSpan ExitSpan represents a client of service or MQ-producer. It is known as the LeafSpan in the early stages of SkyWalking. For example, accessing DB by JDBC, and reading Redis/Memcached are classified as ExitSpans.\n   Cross-thread/process span parent information is called \u0026ldquo;reference\u0026rdquo;. Reference carries the trace ID, segment ID, span ID, service name, service instance name, endpoint name, and target address used on the client end (note: this is not required in cross-thread operations) of this request in the parent. See Cross Process Propagation Headers Protocol v3 for more details.\n  Span#skipAnalysis may be TRUE, if this span doesn\u0026rsquo;t require backend analysis.\n  Trace Report Protocol // The segment is a collection of spans. It includes all collected spans in a simple one request context, such as a HTTP request process. // // We recommend the agent/SDK report all tracked data of one request once for all, such as, // typically, such as in Java, one segment represent all tracked operations(spans) of one request context in the same thread. // At the same time, in some language there is not a clear concept like golang, it could represent all tracked operations of one request context. message SegmentObject { // A string id represents the whole trace.  string traceId = 1; // A unique id represents this segment. Other segments could use this id to reference as a child segment.  string traceSegmentId = 2; // Span collections included in this segment.  repeated SpanObject spans = 3; // **Service**. Represents a set/group of workloads which provide the same behaviours for incoming requests.  //  // The logic name represents the service. This would show as a separate node in the topology.  // The metrics analyzed from the spans, would be aggregated for this entity as the service level.  string service = 4; // **Service Instance**. Each individual workload in the Service group is known as an instance. Like `pods` in Kubernetes, it  // doesn\u0026#39;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process.  //  // The logic name represents the service instance. This would show as a separate node in the instance relationship.  // The metrics analyzed from the spans, would be aggregated for this entity as the service instance level.  string serviceInstance = 5; // Whether the segment includes all tracked spans.  // In the production environment tracked, some tasks could include too many spans for one request context, such as a batch update for a cache, or an async job.  // The agent/SDK could optimize or ignore some tracked spans for better performance.  // In this case, the value should be flagged as TRUE.  bool isSizeLimited = 6;}// Segment reference represents the link between two existing segment. message SegmentReference { // Represent the reference type. It could be across thread or across process.  // Across process means there is a downstream RPC call for this.  // Typically, refType == CrossProcess means SpanObject#spanType = entry.  RefType refType = 1; // A string id represents the whole trace.  string traceId = 2; // Another segment id as the parent.  string parentTraceSegmentId = 3; // The span id in the parent trace segment.  int32 parentSpanId = 4; // The service logic name of the parent segment.  // If refType == CrossThread, this name is as same as the trace segment.  string parentService = 5; // The service logic name instance of the parent segment.  // If refType == CrossThread, this name is as same as the trace segment.  string parentServiceInstance = 6; // The endpoint name of the parent segment.  // **Endpoint**. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature.  // In a trace segment, the endpoint name is the name of first entry span.  string parentEndpoint = 7; // The network address, including ip/hostname and port, which is used in the client side.  // Such as Client --\u0026gt; use 127.0.11.8:913 -\u0026gt; Server  // then, in the reference of entry span reported by Server, the value of this field is 127.0.11.8:913.  // This plays the important role in the SkyWalking STAM(Streaming Topology Analysis Method)  // For more details, read https://wu-sheng.github.io/STAM/  string networkAddressUsedAtPeer = 8;}// Span represents a execution unit in the system, with duration and many other attributes. // Span could be a method, a RPC, MQ message produce or consume. // In the practice, the span should be added when it is really necessary, to avoid payload overhead. // We recommend to creating spans in across process(client/server of RPC/MQ) and across thread cases only. message SpanObject { // The number id of the span. Should be unique in the whole segment.  // Starting at 0.  int32 spanId = 1; // The number id of the parent span in the whole segment.  // -1 represents no parent span.  // Also, be known as the root/first span of the segment.  int32 parentSpanId = 2; // Start timestamp in milliseconds of this span,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 startTime = 3; // End timestamp in milliseconds of this span,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 endTime = 4; // \u0026lt;Optional\u0026gt;  // In the across thread and across process, these references targeting the parent segments.  // The references usually have only one element, but in batch consumer case, such as in MQ or async batch process, it could be multiple.  repeated SegmentReference refs = 5; // A logic name represents this span.  //  // We don\u0026#39;t recommend to include the parameter, such as HTTP request parameters, as a part of the operation, especially this is the name of the entry span.  // All statistic for the endpoints are aggregated base on this name. Those parameters should be added in the tags if necessary.  // If in some cases, it have to be a part of the operation name,  // users should use the Group Parameterized Endpoints capability at the backend to get the meaningful metrics.  // Read https://github.com/apache/skywalking/blob/master/docs/en/setup/backend/endpoint-grouping-rules.md  string operationName = 6; // Remote address of the peer in RPC/MQ case.  // This is required when spanType = Exit, as it is a part of the SkyWalking STAM(Streaming Topology Analysis Method).  // For more details, read https://wu-sheng.github.io/STAM/  string peer = 7; // Span type represents the role in the RPC context.  SpanType spanType = 8; // Span layer represent the component tech stack, related to the network tech.  SpanLayer spanLayer = 9; // Component id is a predefined number id in the SkyWalking.  // It represents the framework, tech stack used by this tracked span, such as Spring.  // All IDs are defined in the https://github.com/apache/skywalking/blob/master/oap-server/server-bootstrap/src/main/resources/component-libraries.yml  // Send a pull request if you want to add languages, components or mapping definitions,  // all public components could be accepted.  // Follow this doc for more details, https://github.com/apache/skywalking/blob/master/docs/en/guides/Component-library-settings.md  int32 componentId = 10; // The status of the span. False means the tracked execution ends in the unexpected status.  // This affects the successful rate statistic in the backend.  // Exception or error code happened in the tracked process doesn\u0026#39;t mean isError == true, the implementations of agent plugin and tracing SDK make the final decision.  bool isError = 11; // String key, String value pair.  // Tags provides more information, includes parameters.  //  // In the OAP backend analysis, some special tag or tag combination could provide other advanced features.  // https://github.com/apache/skywalking/blob/master/docs/en/guides/Java-Plugin-Development-Guide.md#special-span-tags  repeated KeyStringValuePair tags = 12; // String key, String value pair with an accurate timestamp.  // Logging some events happening in the context of the span duration.  repeated Log logs = 13; // Force the backend don\u0026#39;t do analysis, if the value is TRUE.  // The backend has its own configurations to follow or override this.  //  // Use this mostly because the agent/SDK could know more context of the service role.  bool skipAnalysis = 14;}message Log { // The timestamp in milliseconds of this event.,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 time = 1; // String key, String value pair.  repeated KeyStringValuePair data = 2;}// Map to the type of span enum SpanType { // Server side of RPC. Consumer side of MQ.  Entry = 0; // Client side of RPC. Producer side of MQ.  Exit = 1; // A common local code execution.  Local = 2;}// A ID could be represented by multiple string sections. message ID { repeated string id = 1;}// Type of the reference enum RefType { // Map to the reference targeting the segment in another OS process.  CrossProcess = 0; // Map to the reference targeting the segment in the same process of the current one, just across thread.  // This is only used when the coding language has the thread concept.  CrossThread = 1;}// Map to the layer of span enum SpanLayer { // Unknown layer. Could be anything.  Unknown = 0; // A database layer, used in tracing the database client component.  Database = 1; // A RPC layer, used in both client and server sides of RPC component.  RPCFramework = 2; // HTTP is a more specific RPCFramework.  Http = 3; // A MQ layer, used in both producer and consumer sides of the MQ component.  MQ = 4; // A cache layer, used in tracing the cache client component.  Cache = 5;}// The segment collections for trace report in batch and sync mode. message SegmentCollection { repeated SegmentObject segments = 1;}Report Span Attached Events Besides in-process agents, there are other out-of-process agent, such as ebpf agent, could report additional information as attached events for the relative spans.\nSpanAttachedEventReportService#collect for attached event reporting.\n//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // ebpf agent(SkyWalking Rover) collects extra information from the OS(Linux Only) level to attach on the traced span. // Since v3.1 //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// service SpanAttachedEventReportService { // Collect SpanAttachedEvent to the OAP server in the streaming mode.  rpc collect (stream SpanAttachedEvent) returns (Commands) { }}// SpanAttachedEvent represents an attached event for a traced RPC. // // When an RPC is being traced by the in-process language agent, a span would be reported by the client-side agent. // And the rover would be aware of this RPC due to the existing tracing header. // Then, the rover agent collects extra information from the OS level to provide assistance information to diagnose network performance. message SpanAttachedEvent { // The nanosecond timestamp of the event\u0026#39;s start time.  // Notice, most unit of timestamp in SkyWalking is milliseconds, but NANO-SECOND is required here.  // Because the attached event happens in the OS syscall level, most of them are executed rapidly.  Instant startTime = 1; // The official event name.  // For example, the event name is a method signature from syscall stack.  string event = 2; // [Optional] The nanosecond timestamp of the event\u0026#39;s end time.  Instant endTime = 3; // The tags for this event includes some extra OS level information,  // such as  // 1. net_device used for this exit span.  // 2. network L7 protocol  repeated KeyStringValuePair tags = 4; // The summary of statistics during this event.  // Each statistic provides a name(metric name) to represent the name, and an int64/long as the value.  repeated KeyIntValuePair summary = 5; // Refer to a trace context decoded from `sw8` header through network, such as HTTP header, MQ metadata  // https://skywalking.apache.org/docs/main/next/en/protocols/skywalking-cross-process-propagation-headers-protocol-v3/#standard-header-item  SpanReference traceContext = 6; message SpanReference { SpanReferenceType type = 1; // [Optional] A string id represents the whole trace.  string traceId = 2; // A unique id represents this segment. Other segments could use this id to reference as a child segment.  // [Optional] when this span reference  string traceSegmentId = 3; // If type == SKYWALKING  // The number id of the span. Should be unique in the whole segment.  // Starting at 0  //  // If type == ZIPKIN  // The type of span ID is string.  string spanId = 4; } enum SpanReferenceType { SKYWALKING = 0; ZIPKIN = 1; }}Via HTTP Endpoint Detailed information about data format can be found in Instance Management. There are two ways to report segment data: one segment per request or segment array in bulk mode.\nPOST http://localhost:12800/v3/segment Send a single segment object in JSON format.\nInput:\n{ \u0026#34;traceId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34; } OutPut:\nPOST http://localhost:12800/v3/segments Send a segment object list in JSON format.\nInput:\n[{ \u0026#34;traceId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34; }, { \u0026#34;traceId\u0026#34;: \u0026#34;f956699e-5106-4ea3-95e5-da748c55bac1\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577250, \u0026#34;endTime\u0026#34;: 1588664577250, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577250, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577250, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;f956699e-5106-4ea3-95e5-da748c55bac1\u0026#34; }] OutPut:\n","excerpt":"Trace Data Protocol  Version, v3.1  Trace Data Protocol describes the data format between SkyWalking …","ref":"/docs/main/v9.4.0/en/api/trace-data-protocol-v3/","title":"Trace Data Protocol"},{"body":"Trace Data Protocol  Version, v3.1  Trace Data Protocol describes the data format between SkyWalking agent/sniffer and backend.\nTrace data protocol is defined and provided in gRPC format, and implemented in HTTP 1.1.\nFor trace format, note that:\n The segment is a unique concept in SkyWalking. It should include all spans for each request in a single OS process, which is usually a single language-based thread. There are three types of spans.    EntrySpan EntrySpan represents a service provider, which is also the endpoint on the server end. As an APM system, SkyWalking targets the application servers. Therefore, almost all the services and MQ-consumers are EntrySpans.\n  LocalSpan LocalSpan represents a typical Java method which is not related to remote services. It is neither a MQ producer/consumer nor a provider/consumer of a service (e.g. HTTP service).\n  ExitSpan ExitSpan represents a client of service or MQ-producer. It is known as the LeafSpan in the early stages of SkyWalking. For example, accessing DB by JDBC, and reading Redis/Memcached are classified as ExitSpans.\n   Cross-thread/process span parent information is called \u0026ldquo;reference\u0026rdquo;. Reference carries the trace ID, segment ID, span ID, service name, service instance name, endpoint name, and target address used on the client end (note: this is not required in cross-thread operations) of this request in the parent. See Cross Process Propagation Headers Protocol v3 for more details.\n  Span#skipAnalysis may be TRUE, if this span doesn\u0026rsquo;t require backend analysis.\n  Trace Report Protocol // The segment is a collection of spans. It includes all collected spans in a simple one request context, such as a HTTP request process. // // We recommend the agent/SDK report all tracked data of one request once for all, such as, // typically, such as in Java, one segment represent all tracked operations(spans) of one request context in the same thread. // At the same time, in some language there is not a clear concept like golang, it could represent all tracked operations of one request context. message SegmentObject { // A string id represents the whole trace.  string traceId = 1; // A unique id represents this segment. Other segments could use this id to reference as a child segment.  string traceSegmentId = 2; // Span collections included in this segment.  repeated SpanObject spans = 3; // **Service**. Represents a set/group of workloads which provide the same behaviours for incoming requests.  //  // The logic name represents the service. This would show as a separate node in the topology.  // The metrics analyzed from the spans, would be aggregated for this entity as the service level.  string service = 4; // **Service Instance**. Each individual workload in the Service group is known as an instance. Like `pods` in Kubernetes, it  // doesn\u0026#39;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process.  //  // The logic name represents the service instance. This would show as a separate node in the instance relationship.  // The metrics analyzed from the spans, would be aggregated for this entity as the service instance level.  string serviceInstance = 5; // Whether the segment includes all tracked spans.  // In the production environment tracked, some tasks could include too many spans for one request context, such as a batch update for a cache, or an async job.  // The agent/SDK could optimize or ignore some tracked spans for better performance.  // In this case, the value should be flagged as TRUE.  bool isSizeLimited = 6;}// Segment reference represents the link between two existing segment. message SegmentReference { // Represent the reference type. It could be across thread or across process.  // Across process means there is a downstream RPC call for this.  // Typically, refType == CrossProcess means SpanObject#spanType = entry.  RefType refType = 1; // A string id represents the whole trace.  string traceId = 2; // Another segment id as the parent.  string parentTraceSegmentId = 3; // The span id in the parent trace segment.  int32 parentSpanId = 4; // The service logic name of the parent segment.  // If refType == CrossThread, this name is as same as the trace segment.  string parentService = 5; // The service logic name instance of the parent segment.  // If refType == CrossThread, this name is as same as the trace segment.  string parentServiceInstance = 6; // The endpoint name of the parent segment.  // **Endpoint**. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature.  // In a trace segment, the endpoint name is the name of first entry span.  string parentEndpoint = 7; // The network address, including ip/hostname and port, which is used in the client side.  // Such as Client --\u0026gt; use 127.0.11.8:913 -\u0026gt; Server  // then, in the reference of entry span reported by Server, the value of this field is 127.0.11.8:913.  // This plays the important role in the SkyWalking STAM(Streaming Topology Analysis Method)  // For more details, read https://wu-sheng.github.io/STAM/  string networkAddressUsedAtPeer = 8;}// Span represents a execution unit in the system, with duration and many other attributes. // Span could be a method, a RPC, MQ message produce or consume. // In the practice, the span should be added when it is really necessary, to avoid payload overhead. // We recommend to creating spans in across process(client/server of RPC/MQ) and across thread cases only. message SpanObject { // The number id of the span. Should be unique in the whole segment.  // Starting at 0.  int32 spanId = 1; // The number id of the parent span in the whole segment.  // -1 represents no parent span.  // Also, be known as the root/first span of the segment.  int32 parentSpanId = 2; // Start timestamp in milliseconds of this span,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 startTime = 3; // End timestamp in milliseconds of this span,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 endTime = 4; // \u0026lt;Optional\u0026gt;  // In the across thread and across process, these references targeting the parent segments.  // The references usually have only one element, but in batch consumer case, such as in MQ or async batch process, it could be multiple.  repeated SegmentReference refs = 5; // A logic name represents this span.  //  // We don\u0026#39;t recommend to include the parameter, such as HTTP request parameters, as a part of the operation, especially this is the name of the entry span.  // All statistic for the endpoints are aggregated base on this name. Those parameters should be added in the tags if necessary.  // If in some cases, it have to be a part of the operation name,  // users should use the Group Parameterized Endpoints capability at the backend to get the meaningful metrics.  // Read https://github.com/apache/skywalking/blob/master/docs/en/setup/backend/endpoint-grouping-rules.md  string operationName = 6; // Remote address of the peer in RPC/MQ case.  // This is required when spanType = Exit, as it is a part of the SkyWalking STAM(Streaming Topology Analysis Method).  // For more details, read https://wu-sheng.github.io/STAM/  string peer = 7; // Span type represents the role in the RPC context.  SpanType spanType = 8; // Span layer represent the component tech stack, related to the network tech.  SpanLayer spanLayer = 9; // Component id is a predefined number id in the SkyWalking.  // It represents the framework, tech stack used by this tracked span, such as Spring.  // All IDs are defined in the https://github.com/apache/skywalking/blob/master/oap-server/server-bootstrap/src/main/resources/component-libraries.yml  // Send a pull request if you want to add languages, components or mapping definitions,  // all public components could be accepted.  // Follow this doc for more details, https://github.com/apache/skywalking/blob/master/docs/en/guides/Component-library-settings.md  int32 componentId = 10; // The status of the span. False means the tracked execution ends in the unexpected status.  // This affects the successful rate statistic in the backend.  // Exception or error code happened in the tracked process doesn\u0026#39;t mean isError == true, the implementations of agent plugin and tracing SDK make the final decision.  bool isError = 11; // String key, String value pair.  // Tags provides more information, includes parameters.  //  // In the OAP backend analysis, some special tag or tag combination could provide other advanced features.  // https://github.com/apache/skywalking/blob/master/docs/en/guides/Java-Plugin-Development-Guide.md#special-span-tags  repeated KeyStringValuePair tags = 12; // String key, String value pair with an accurate timestamp.  // Logging some events happening in the context of the span duration.  repeated Log logs = 13; // Force the backend don\u0026#39;t do analysis, if the value is TRUE.  // The backend has its own configurations to follow or override this.  //  // Use this mostly because the agent/SDK could know more context of the service role.  bool skipAnalysis = 14;}message Log { // The timestamp in milliseconds of this event.,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 time = 1; // String key, String value pair.  repeated KeyStringValuePair data = 2;}// Map to the type of span enum SpanType { // Server side of RPC. Consumer side of MQ.  Entry = 0; // Client side of RPC. Producer side of MQ.  Exit = 1; // A common local code execution.  Local = 2;}// A ID could be represented by multiple string sections. message ID { repeated string id = 1;}// Type of the reference enum RefType { // Map to the reference targeting the segment in another OS process.  CrossProcess = 0; // Map to the reference targeting the segment in the same process of the current one, just across thread.  // This is only used when the coding language has the thread concept.  CrossThread = 1;}// Map to the layer of span enum SpanLayer { // Unknown layer. Could be anything.  Unknown = 0; // A database layer, used in tracing the database client component.  Database = 1; // A RPC layer, used in both client and server sides of RPC component.  RPCFramework = 2; // HTTP is a more specific RPCFramework.  Http = 3; // A MQ layer, used in both producer and consumer sides of the MQ component.  MQ = 4; // A cache layer, used in tracing the cache client component.  Cache = 5;}// The segment collections for trace report in batch and sync mode. message SegmentCollection { repeated SegmentObject segments = 1;}Report Span Attached Events Besides in-process agents, there are other out-of-process agent, such as ebpf agent, could report additional information as attached events for the relative spans.\nSpanAttachedEventReportService#collect for attached event reporting.\n//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // ebpf agent(SkyWalking Rover) collects extra information from the OS(Linux Only) level to attach on the traced span. // Since v3.1 //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// service SpanAttachedEventReportService { // Collect SpanAttachedEvent to the OAP server in the streaming mode.  rpc collect (stream SpanAttachedEvent) returns (Commands) { }}// SpanAttachedEvent represents an attached event for a traced RPC. // // When an RPC is being traced by the in-process language agent, a span would be reported by the client-side agent. // And the rover would be aware of this RPC due to the existing tracing header. // Then, the rover agent collects extra information from the OS level to provide assistance information to diagnose network performance. message SpanAttachedEvent { // The nanosecond timestamp of the event\u0026#39;s start time.  // Notice, most unit of timestamp in SkyWalking is milliseconds, but NANO-SECOND is required here.  // Because the attached event happens in the OS syscall level, most of them are executed rapidly.  Instant startTime = 1; // The official event name.  // For example, the event name is a method signature from syscall stack.  string event = 2; // [Optional] The nanosecond timestamp of the event\u0026#39;s end time.  Instant endTime = 3; // The tags for this event includes some extra OS level information,  // such as  // 1. net_device used for this exit span.  // 2. network L7 protocol  repeated KeyStringValuePair tags = 4; // The summary of statistics during this event.  // Each statistic provides a name(metric name) to represent the name, and an int64/long as the value.  repeated KeyIntValuePair summary = 5; // Refer to a trace context decoded from `sw8` header through network, such as HTTP header, MQ metadata  // https://skywalking.apache.org/docs/main/next/en/protocols/skywalking-cross-process-propagation-headers-protocol-v3/#standard-header-item  SpanReference traceContext = 6; message SpanReference { SpanReferenceType type = 1; // [Optional] A string id represents the whole trace.  string traceId = 2; // A unique id represents this segment. Other segments could use this id to reference as a child segment.  // [Optional] when this span reference  string traceSegmentId = 3; // If type == SKYWALKING  // The number id of the span. Should be unique in the whole segment.  // Starting at 0  //  // If type == ZIPKIN  // The type of span ID is string.  string spanId = 4; } enum SpanReferenceType { SKYWALKING = 0; ZIPKIN = 1; }}Via HTTP Endpoint Detailed information about data format can be found in Instance Management. There are two ways to report segment data: one segment per request or segment array in bulk mode.\nPOST http://localhost:12800/v3/segment Send a single segment object in JSON format.\nInput:\n{ \u0026#34;traceId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34; } OutPut:\nPOST http://localhost:12800/v3/segments Send a segment object list in JSON format.\nInput:\n[{ \u0026#34;traceId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34; }, { \u0026#34;traceId\u0026#34;: \u0026#34;f956699e-5106-4ea3-95e5-da748c55bac1\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577250, \u0026#34;endTime\u0026#34;: 1588664577250, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577250, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577250, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;f956699e-5106-4ea3-95e5-da748c55bac1\u0026#34; }] OutPut:\n","excerpt":"Trace Data Protocol  Version, v3.1  Trace Data Protocol describes the data format between SkyWalking …","ref":"/docs/main/v9.5.0/en/api/trace-data-protocol-v3/","title":"Trace Data Protocol"},{"body":"Trace Data Protocol  Version, v3.1  Trace Data Protocol describes the data format between SkyWalking agent/sniffer and backend.\nTrace data protocol is defined and provided in gRPC format, and implemented in HTTP 1.1.\nFor trace format, note that:\n The segment is a unique concept in SkyWalking. It should include all spans for each request in a single OS process, which is usually a single language-based thread. There are three types of spans.    EntrySpan EntrySpan represents a service provider, which is also the endpoint on the server end. As an APM system, SkyWalking targets the application servers. Therefore, almost all the services and MQ-consumers are EntrySpans.\n  LocalSpan LocalSpan represents a typical Java method which is not related to remote services. It is neither a MQ producer/consumer nor a provider/consumer of a service (e.g. HTTP service).\n  ExitSpan ExitSpan represents a client of service or MQ-producer. It is known as the LeafSpan in the early stages of SkyWalking. For example, accessing DB by JDBC, and reading Redis/Memcached are classified as ExitSpans.\n   Cross-thread/process span parent information is called \u0026ldquo;reference\u0026rdquo;. Reference carries the trace ID, segment ID, span ID, service name, service instance name, endpoint name, and target address used on the client end (note: this is not required in cross-thread operations) of this request in the parent. See Cross Process Propagation Headers Protocol v3 for more details.\n  Span#skipAnalysis may be TRUE, if this span doesn\u0026rsquo;t require backend analysis.\n  Trace Report Protocol // The segment is a collection of spans. It includes all collected spans in a simple one request context, such as a HTTP request process. // // We recommend the agent/SDK report all tracked data of one request once for all, such as, // typically, such as in Java, one segment represent all tracked operations(spans) of one request context in the same thread. // At the same time, in some language there is not a clear concept like golang, it could represent all tracked operations of one request context. message SegmentObject { // A string id represents the whole trace.  string traceId = 1; // A unique id represents this segment. Other segments could use this id to reference as a child segment.  string traceSegmentId = 2; // Span collections included in this segment.  repeated SpanObject spans = 3; // **Service**. Represents a set/group of workloads which provide the same behaviours for incoming requests.  //  // The logic name represents the service. This would show as a separate node in the topology.  // The metrics analyzed from the spans, would be aggregated for this entity as the service level.  string service = 4; // **Service Instance**. Each individual workload in the Service group is known as an instance. Like `pods` in Kubernetes, it  // doesn\u0026#39;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process.  //  // The logic name represents the service instance. This would show as a separate node in the instance relationship.  // The metrics analyzed from the spans, would be aggregated for this entity as the service instance level.  string serviceInstance = 5; // Whether the segment includes all tracked spans.  // In the production environment tracked, some tasks could include too many spans for one request context, such as a batch update for a cache, or an async job.  // The agent/SDK could optimize or ignore some tracked spans for better performance.  // In this case, the value should be flagged as TRUE.  bool isSizeLimited = 6;}// Segment reference represents the link between two existing segment. message SegmentReference { // Represent the reference type. It could be across thread or across process.  // Across process means there is a downstream RPC call for this.  // Typically, refType == CrossProcess means SpanObject#spanType = entry.  RefType refType = 1; // A string id represents the whole trace.  string traceId = 2; // Another segment id as the parent.  string parentTraceSegmentId = 3; // The span id in the parent trace segment.  int32 parentSpanId = 4; // The service logic name of the parent segment.  // If refType == CrossThread, this name is as same as the trace segment.  string parentService = 5; // The service logic name instance of the parent segment.  // If refType == CrossThread, this name is as same as the trace segment.  string parentServiceInstance = 6; // The endpoint name of the parent segment.  // **Endpoint**. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature.  // In a trace segment, the endpoint name is the name of first entry span.  string parentEndpoint = 7; // The network address, including ip/hostname and port, which is used in the client side.  // Such as Client --\u0026gt; use 127.0.11.8:913 -\u0026gt; Server  // then, in the reference of entry span reported by Server, the value of this field is 127.0.11.8:913.  // This plays the important role in the SkyWalking STAM(Streaming Topology Analysis Method)  // For more details, read https://wu-sheng.github.io/STAM/  string networkAddressUsedAtPeer = 8;}// Span represents a execution unit in the system, with duration and many other attributes. // Span could be a method, a RPC, MQ message produce or consume. // In the practice, the span should be added when it is really necessary, to avoid payload overhead. // We recommend to creating spans in across process(client/server of RPC/MQ) and across thread cases only. message SpanObject { // The number id of the span. Should be unique in the whole segment.  // Starting at 0.  int32 spanId = 1; // The number id of the parent span in the whole segment.  // -1 represents no parent span.  // Also, be known as the root/first span of the segment.  int32 parentSpanId = 2; // Start timestamp in milliseconds of this span,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 startTime = 3; // End timestamp in milliseconds of this span,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 endTime = 4; // \u0026lt;Optional\u0026gt;  // In the across thread and across process, these references targeting the parent segments.  // The references usually have only one element, but in batch consumer case, such as in MQ or async batch process, it could be multiple.  repeated SegmentReference refs = 5; // A logic name represents this span.  //  // We don\u0026#39;t recommend to include the parameter, such as HTTP request parameters, as a part of the operation, especially this is the name of the entry span.  // All statistic for the endpoints are aggregated base on this name. Those parameters should be added in the tags if necessary.  // If in some cases, it have to be a part of the operation name,  // users should use the Group Parameterized Endpoints capability at the backend to get the meaningful metrics.  // Read https://github.com/apache/skywalking/blob/master/docs/en/setup/backend/endpoint-grouping-rules.md  string operationName = 6; // Remote address of the peer in RPC/MQ case.  // This is required when spanType = Exit, as it is a part of the SkyWalking STAM(Streaming Topology Analysis Method).  // For more details, read https://wu-sheng.github.io/STAM/  string peer = 7; // Span type represents the role in the RPC context.  SpanType spanType = 8; // Span layer represent the component tech stack, related to the network tech.  SpanLayer spanLayer = 9; // Component id is a predefined number id in the SkyWalking.  // It represents the framework, tech stack used by this tracked span, such as Spring.  // All IDs are defined in the https://github.com/apache/skywalking/blob/master/oap-server/server-bootstrap/src/main/resources/component-libraries.yml  // Send a pull request if you want to add languages, components or mapping definitions,  // all public components could be accepted.  // Follow this doc for more details, https://github.com/apache/skywalking/blob/master/docs/en/guides/Component-library-settings.md  int32 componentId = 10; // The status of the span. False means the tracked execution ends in the unexpected status.  // This affects the successful rate statistic in the backend.  // Exception or error code happened in the tracked process doesn\u0026#39;t mean isError == true, the implementations of agent plugin and tracing SDK make the final decision.  bool isError = 11; // String key, String value pair.  // Tags provides more information, includes parameters.  //  // In the OAP backend analysis, some special tag or tag combination could provide other advanced features.  // https://github.com/apache/skywalking/blob/master/docs/en/guides/Java-Plugin-Development-Guide.md#special-span-tags  repeated KeyStringValuePair tags = 12; // String key, String value pair with an accurate timestamp.  // Logging some events happening in the context of the span duration.  repeated Log logs = 13; // Force the backend don\u0026#39;t do analysis, if the value is TRUE.  // The backend has its own configurations to follow or override this.  //  // Use this mostly because the agent/SDK could know more context of the service role.  bool skipAnalysis = 14;}message Log { // The timestamp in milliseconds of this event.,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 time = 1; // String key, String value pair.  repeated KeyStringValuePair data = 2;}// Map to the type of span enum SpanType { // Server side of RPC. Consumer side of MQ.  Entry = 0; // Client side of RPC. Producer side of MQ.  Exit = 1; // A common local code execution.  Local = 2;}// A ID could be represented by multiple string sections. message ID { repeated string id = 1;}// Type of the reference enum RefType { // Map to the reference targeting the segment in another OS process.  CrossProcess = 0; // Map to the reference targeting the segment in the same process of the current one, just across thread.  // This is only used when the coding language has the thread concept.  CrossThread = 1;}// Map to the layer of span enum SpanLayer { // Unknown layer. Could be anything.  Unknown = 0; // A database layer, used in tracing the database client component.  Database = 1; // A RPC layer, used in both client and server sides of RPC component.  RPCFramework = 2; // HTTP is a more specific RPCFramework.  Http = 3; // A MQ layer, used in both producer and consumer sides of the MQ component.  MQ = 4; // A cache layer, used in tracing the cache client component.  Cache = 5;}// The segment collections for trace report in batch and sync mode. message SegmentCollection { repeated SegmentObject segments = 1;}Report Span Attached Events Besides in-process agents, there are other out-of-process agent, such as ebpf agent, could report additional information as attached events for the relative spans.\nSpanAttachedEventReportService#collect for attached event reporting.\n//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // ebpf agent(SkyWalking Rover) collects extra information from the OS(Linux Only) level to attach on the traced span. // Since v3.1 //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// service SpanAttachedEventReportService { // Collect SpanAttachedEvent to the OAP server in the streaming mode.  rpc collect (stream SpanAttachedEvent) returns (Commands) { }}// SpanAttachedEvent represents an attached event for a traced RPC. // // When an RPC is being traced by the in-process language agent, a span would be reported by the client-side agent. // And the rover would be aware of this RPC due to the existing tracing header. // Then, the rover agent collects extra information from the OS level to provide assistance information to diagnose network performance. message SpanAttachedEvent { // The nanosecond timestamp of the event\u0026#39;s start time.  // Notice, most unit of timestamp in SkyWalking is milliseconds, but NANO-SECOND is required here.  // Because the attached event happens in the OS syscall level, most of them are executed rapidly.  Instant startTime = 1; // The official event name.  // For example, the event name is a method signature from syscall stack.  string event = 2; // [Optional] The nanosecond timestamp of the event\u0026#39;s end time.  Instant endTime = 3; // The tags for this event includes some extra OS level information,  // such as  // 1. net_device used for this exit span.  // 2. network L7 protocol  repeated KeyStringValuePair tags = 4; // The summary of statistics during this event.  // Each statistic provides a name(metric name) to represent the name, and an int64/long as the value.  repeated KeyIntValuePair summary = 5; // Refer to a trace context decoded from `sw8` header through network, such as HTTP header, MQ metadata  // https://skywalking.apache.org/docs/main/next/en/protocols/skywalking-cross-process-propagation-headers-protocol-v3/#standard-header-item  SpanReference traceContext = 6; message SpanReference { SpanReferenceType type = 1; // [Optional] A string id represents the whole trace.  string traceId = 2; // A unique id represents this segment. Other segments could use this id to reference as a child segment.  // [Optional] when this span reference  string traceSegmentId = 3; // If type == SKYWALKING  // The number id of the span. Should be unique in the whole segment.  // Starting at 0  //  // If type == ZIPKIN  // The type of span ID is string.  string spanId = 4; } enum SpanReferenceType { SKYWALKING = 0; ZIPKIN = 1; }}Via HTTP Endpoint Detailed information about data format can be found in Instance Management. There are two ways to report segment data: one segment per request or segment array in bulk mode.\nPOST http://localhost:12800/v3/segment Send a single segment object in JSON format.\nInput:\n{ \u0026#34;traceId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34; } OutPut:\nPOST http://localhost:12800/v3/segments Send a segment object list in JSON format.\nInput:\n[{ \u0026#34;traceId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34; }, { \u0026#34;traceId\u0026#34;: \u0026#34;f956699e-5106-4ea3-95e5-da748c55bac1\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577250, \u0026#34;endTime\u0026#34;: 1588664577250, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577250, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577250, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;f956699e-5106-4ea3-95e5-da748c55bac1\u0026#34; }] OutPut:\n","excerpt":"Trace Data Protocol  Version, v3.1  Trace Data Protocol describes the data format between SkyWalking …","ref":"/docs/main/v9.6.0/en/api/trace-data-protocol-v3/","title":"Trace Data Protocol"},{"body":"Trace Data Protocol  Version, v3.1  Trace Data Protocol describes the data format between SkyWalking agent/sniffer and backend.\nTrace data protocol is defined and provided in gRPC format, and implemented in HTTP 1.1.\nFor trace format, note that:\n The segment is a unique concept in SkyWalking. It should include all spans for each request in a single OS process, which is usually a single language-based thread. There are three types of spans.    EntrySpan EntrySpan represents a service provider, which is also the endpoint on the server end. As an APM system, SkyWalking targets the application servers. Therefore, almost all the services and MQ-consumers are EntrySpans.\n  LocalSpan LocalSpan represents a typical Java method which is not related to remote services. It is neither a MQ producer/consumer nor a provider/consumer of a service (e.g. HTTP service).\n  ExitSpan ExitSpan represents a client of service or MQ-producer. It is known as the LeafSpan in the early stages of SkyWalking. For example, accessing DB by JDBC, and reading Redis/Memcached are classified as ExitSpans.\n   Cross-thread/process span parent information is called \u0026ldquo;reference\u0026rdquo;. Reference carries the trace ID, segment ID, span ID, service name, service instance name, endpoint name, and target address used on the client end (note: this is not required in cross-thread operations) of this request in the parent. See Cross Process Propagation Headers Protocol v3 for more details.\n  Span#skipAnalysis may be TRUE, if this span doesn\u0026rsquo;t require backend analysis.\n  Trace Report Protocol // The segment is a collection of spans. It includes all collected spans in a simple one request context, such as a HTTP request process. // // We recommend the agent/SDK report all tracked data of one request once for all, such as, // typically, such as in Java, one segment represent all tracked operations(spans) of one request context in the same thread. // At the same time, in some language there is not a clear concept like golang, it could represent all tracked operations of one request context. message SegmentObject { // A string id represents the whole trace.  string traceId = 1; // A unique id represents this segment. Other segments could use this id to reference as a child segment.  string traceSegmentId = 2; // Span collections included in this segment.  repeated SpanObject spans = 3; // **Service**. Represents a set/group of workloads which provide the same behaviours for incoming requests.  //  // The logic name represents the service. This would show as a separate node in the topology.  // The metrics analyzed from the spans, would be aggregated for this entity as the service level.  string service = 4; // **Service Instance**. Each individual workload in the Service group is known as an instance. Like `pods` in Kubernetes, it  // doesn\u0026#39;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process.  //  // The logic name represents the service instance. This would show as a separate node in the instance relationship.  // The metrics analyzed from the spans, would be aggregated for this entity as the service instance level.  string serviceInstance = 5; // Whether the segment includes all tracked spans.  // In the production environment tracked, some tasks could include too many spans for one request context, such as a batch update for a cache, or an async job.  // The agent/SDK could optimize or ignore some tracked spans for better performance.  // In this case, the value should be flagged as TRUE.  bool isSizeLimited = 6;}// Segment reference represents the link between two existing segment. message SegmentReference { // Represent the reference type. It could be across thread or across process.  // Across process means there is a downstream RPC call for this.  // Typically, refType == CrossProcess means SpanObject#spanType = entry.  RefType refType = 1; // A string id represents the whole trace.  string traceId = 2; // Another segment id as the parent.  string parentTraceSegmentId = 3; // The span id in the parent trace segment.  int32 parentSpanId = 4; // The service logic name of the parent segment.  // If refType == CrossThread, this name is as same as the trace segment.  string parentService = 5; // The service logic name instance of the parent segment.  // If refType == CrossThread, this name is as same as the trace segment.  string parentServiceInstance = 6; // The endpoint name of the parent segment.  // **Endpoint**. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature.  // In a trace segment, the endpoint name is the name of first entry span.  string parentEndpoint = 7; // The network address, including ip/hostname and port, which is used in the client side.  // Such as Client --\u0026gt; use 127.0.11.8:913 -\u0026gt; Server  // then, in the reference of entry span reported by Server, the value of this field is 127.0.11.8:913.  // This plays the important role in the SkyWalking STAM(Streaming Topology Analysis Method)  // For more details, read https://wu-sheng.github.io/STAM/  string networkAddressUsedAtPeer = 8;}// Span represents a execution unit in the system, with duration and many other attributes. // Span could be a method, a RPC, MQ message produce or consume. // In the practice, the span should be added when it is really necessary, to avoid payload overhead. // We recommend to creating spans in across process(client/server of RPC/MQ) and across thread cases only. message SpanObject { // The number id of the span. Should be unique in the whole segment.  // Starting at 0.  int32 spanId = 1; // The number id of the parent span in the whole segment.  // -1 represents no parent span.  // Also, be known as the root/first span of the segment.  int32 parentSpanId = 2; // Start timestamp in milliseconds of this span,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 startTime = 3; // End timestamp in milliseconds of this span,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 endTime = 4; // \u0026lt;Optional\u0026gt;  // In the across thread and across process, these references targeting the parent segments.  // The references usually have only one element, but in batch consumer case, such as in MQ or async batch process, it could be multiple.  repeated SegmentReference refs = 5; // A logic name represents this span.  //  // We don\u0026#39;t recommend to include the parameter, such as HTTP request parameters, as a part of the operation, especially this is the name of the entry span.  // All statistic for the endpoints are aggregated base on this name. Those parameters should be added in the tags if necessary.  // If in some cases, it have to be a part of the operation name,  // users should use the Group Parameterized Endpoints capability at the backend to get the meaningful metrics.  // Read https://github.com/apache/skywalking/blob/master/docs/en/setup/backend/endpoint-grouping-rules.md  string operationName = 6; // Remote address of the peer in RPC/MQ case.  // This is required when spanType = Exit, as it is a part of the SkyWalking STAM(Streaming Topology Analysis Method).  // For more details, read https://wu-sheng.github.io/STAM/  string peer = 7; // Span type represents the role in the RPC context.  SpanType spanType = 8; // Span layer represent the component tech stack, related to the network tech.  SpanLayer spanLayer = 9; // Component id is a predefined number id in the SkyWalking.  // It represents the framework, tech stack used by this tracked span, such as Spring.  // All IDs are defined in the https://github.com/apache/skywalking/blob/master/oap-server/server-bootstrap/src/main/resources/component-libraries.yml  // Send a pull request if you want to add languages, components or mapping definitions,  // all public components could be accepted.  // Follow this doc for more details, https://github.com/apache/skywalking/blob/master/docs/en/guides/Component-library-settings.md  int32 componentId = 10; // The status of the span. False means the tracked execution ends in the unexpected status.  // This affects the successful rate statistic in the backend.  // Exception or error code happened in the tracked process doesn\u0026#39;t mean isError == true, the implementations of agent plugin and tracing SDK make the final decision.  bool isError = 11; // String key, String value pair.  // Tags provides more information, includes parameters.  //  // In the OAP backend analysis, some special tag or tag combination could provide other advanced features.  // https://github.com/apache/skywalking/blob/master/docs/en/guides/Java-Plugin-Development-Guide.md#special-span-tags  repeated KeyStringValuePair tags = 12; // String key, String value pair with an accurate timestamp.  // Logging some events happening in the context of the span duration.  repeated Log logs = 13; // Force the backend don\u0026#39;t do analysis, if the value is TRUE.  // The backend has its own configurations to follow or override this.  //  // Use this mostly because the agent/SDK could know more context of the service role.  bool skipAnalysis = 14;}message Log { // The timestamp in milliseconds of this event.,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 time = 1; // String key, String value pair.  repeated KeyStringValuePair data = 2;}// Map to the type of span enum SpanType { // Server side of RPC. Consumer side of MQ.  Entry = 0; // Client side of RPC. Producer side of MQ.  Exit = 1; // A common local code execution.  Local = 2;}// A ID could be represented by multiple string sections. message ID { repeated string id = 1;}// Type of the reference enum RefType { // Map to the reference targeting the segment in another OS process.  CrossProcess = 0; // Map to the reference targeting the segment in the same process of the current one, just across thread.  // This is only used when the coding language has the thread concept.  CrossThread = 1;}// Map to the layer of span enum SpanLayer { // Unknown layer. Could be anything.  Unknown = 0; // A database layer, used in tracing the database client component.  Database = 1; // A RPC layer, used in both client and server sides of RPC component.  RPCFramework = 2; // HTTP is a more specific RPCFramework.  Http = 3; // A MQ layer, used in both producer and consumer sides of the MQ component.  MQ = 4; // A cache layer, used in tracing the cache client component.  Cache = 5;}// The segment collections for trace report in batch and sync mode. message SegmentCollection { repeated SegmentObject segments = 1;}Report Span Attached Events Besides in-process agents, there are other out-of-process agent, such as ebpf agent, could report additional information as attached events for the relative spans.\nSpanAttachedEventReportService#collect for attached event reporting.\n//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // ebpf agent(SkyWalking Rover) collects extra information from the OS(Linux Only) level to attach on the traced span. // Since v3.1 //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// service SpanAttachedEventReportService { // Collect SpanAttachedEvent to the OAP server in the streaming mode.  rpc collect (stream SpanAttachedEvent) returns (Commands) { }}// SpanAttachedEvent represents an attached event for a traced RPC. // // When an RPC is being traced by the in-process language agent, a span would be reported by the client-side agent. // And the rover would be aware of this RPC due to the existing tracing header. // Then, the rover agent collects extra information from the OS level to provide assistance information to diagnose network performance. message SpanAttachedEvent { // The nanosecond timestamp of the event\u0026#39;s start time.  // Notice, most unit of timestamp in SkyWalking is milliseconds, but NANO-SECOND is required here.  // Because the attached event happens in the OS syscall level, most of them are executed rapidly.  Instant startTime = 1; // The official event name.  // For example, the event name is a method signature from syscall stack.  string event = 2; // [Optional] The nanosecond timestamp of the event\u0026#39;s end time.  Instant endTime = 3; // The tags for this event includes some extra OS level information,  // such as  // 1. net_device used for this exit span.  // 2. network L7 protocol  repeated KeyStringValuePair tags = 4; // The summary of statistics during this event.  // Each statistic provides a name(metric name) to represent the name, and an int64/long as the value.  repeated KeyIntValuePair summary = 5; // Refer to a trace context decoded from `sw8` header through network, such as HTTP header, MQ metadata  // https://skywalking.apache.org/docs/main/next/en/protocols/skywalking-cross-process-propagation-headers-protocol-v3/#standard-header-item  SpanReference traceContext = 6; message SpanReference { SpanReferenceType type = 1; // [Optional] A string id represents the whole trace.  string traceId = 2; // A unique id represents this segment. Other segments could use this id to reference as a child segment.  // [Optional] when this span reference  string traceSegmentId = 3; // If type == SKYWALKING  // The number id of the span. Should be unique in the whole segment.  // Starting at 0  //  // If type == ZIPKIN  // The type of span ID is string.  string spanId = 4; } enum SpanReferenceType { SKYWALKING = 0; ZIPKIN = 1; }}Via HTTP Endpoint Detailed information about data format can be found in Instance Management. There are two ways to report segment data: one segment per request or segment array in bulk mode.\nPOST http://localhost:12800/v3/segment Send a single segment object in JSON format.\nInput:\n{ \u0026#34;traceId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34; } OutPut:\nPOST http://localhost:12800/v3/segments Send a segment object list in JSON format.\nInput:\n[{ \u0026#34;traceId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34; }, { \u0026#34;traceId\u0026#34;: \u0026#34;f956699e-5106-4ea3-95e5-da748c55bac1\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577250, \u0026#34;endTime\u0026#34;: 1588664577250, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577250, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577250, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;f956699e-5106-4ea3-95e5-da748c55bac1\u0026#34; }] OutPut:\n","excerpt":"Trace Data Protocol  Version, v3.1  Trace Data Protocol describes the data format between SkyWalking …","ref":"/docs/main/v9.7.0/en/api/trace-data-protocol-v3/","title":"Trace Data Protocol"},{"body":"Trace Data Protocol v3 Trace Data Protocol describes the data format between SkyWalking agent/sniffer and backend.\nOverview Trace data protocol is defined and provided in gRPC format, and implemented in HTTP 1.1.\nReport service instance status   Service Instance Properties Service instance contains more information than just a name. In order for the agent to report service instance status, use ManagementService#reportInstanceProperties service to provide a string-key/string-value pair list as the parameter. The language of target instance must be provided as the minimum requirement.\n  Service Ping Service instance should keep alive with the backend. The agent should set a scheduler using ManagementService#keepAlive service every minute.\n  Send trace and metrics After you have the service ID and service instance ID ready, you could send traces and metrics. Now we have\n TraceSegmentReportService#collect for the SkyWalking native trace format JVMMetricReportService#collect for the SkyWalking native JVM format  For trace format, note that:\n The segment is a unique concept in SkyWalking. It should include all spans for each request in a single OS process, which is usually a single language-based thread. There are three types of spans.    EntrySpan EntrySpan represents a service provider, which is also the endpoint on the server end. As an APM system, SkyWalking targets the application servers. Therefore, almost all the services and MQ-consumers are EntrySpans.\n  LocalSpan LocalSpan represents a typical Java method which is not related to remote services. It is neither a MQ producer/consumer nor a provider/consumer of a service (e.g. HTTP service).\n  ExitSpan ExitSpan represents a client of service or MQ-producer. It is known as the LeafSpan in the early stages of SkyWalking. For example, accessing DB by JDBC, and reading Redis/Memcached are classified as ExitSpans.\n   Cross-thread/process span parent information is called \u0026ldquo;reference\u0026rdquo;. Reference carries the trace ID, segment ID, span ID, service name, service instance name, endpoint name, and target address used on the client end (note: this is not required in cross-thread operations) of this request in the parent. See Cross Process Propagation Headers Protocol v3 for more details.\n  Span#skipAnalysis may be TRUE, if this span doesn\u0026rsquo;t require backend analysis.\n  Protocol Definition // The segment is a collection of spans. It includes all collected spans in a simple one request context, such as a HTTP request process. // // We recommend the agent/SDK report all tracked data of one request once for all, such as, // typically, such as in Java, one segment represent all tracked operations(spans) of one request context in the same thread. // At the same time, in some language there is not a clear concept like golang, it could represent all tracked operations of one request context. message SegmentObject { // A string id represents the whole trace.  string traceId = 1; // A unique id represents this segment. Other segments could use this id to reference as a child segment.  string traceSegmentId = 2; // Span collections included in this segment.  repeated SpanObject spans = 3; // **Service**. Represents a set/group of workloads which provide the same behaviours for incoming requests.  //  // The logic name represents the service. This would show as a separate node in the topology.  // The metrics analyzed from the spans, would be aggregated for this entity as the service level.  string service = 4; // **Service Instance**. Each individual workload in the Service group is known as an instance. Like `pods` in Kubernetes, it  // doesn\u0026#39;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process.  //  // The logic name represents the service instance. This would show as a separate node in the instance relationship.  // The metrics analyzed from the spans, would be aggregated for this entity as the service instance level.  string serviceInstance = 5; // Whether the segment includes all tracked spans.  // In the production environment tracked, some tasks could include too many spans for one request context, such as a batch update for a cache, or an async job.  // The agent/SDK could optimize or ignore some tracked spans for better performance.  // In this case, the value should be flagged as TRUE.  bool isSizeLimited = 6;}// Segment reference represents the link between two existing segment. message SegmentReference { // Represent the reference type. It could be across thread or across process.  // Across process means there is a downstream RPC call for this.  // Typically, refType == CrossProcess means SpanObject#spanType = entry.  RefType refType = 1; // A string id represents the whole trace.  string traceId = 2; // Another segment id as the parent.  string parentTraceSegmentId = 3; // The span id in the parent trace segment.  int32 parentSpanId = 4; // The service logic name of the parent segment.  // If refType == CrossThread, this name is as same as the trace segment.  string parentService = 5; // The service logic name instance of the parent segment.  // If refType == CrossThread, this name is as same as the trace segment.  string parentServiceInstance = 6; // The endpoint name of the parent segment.  // **Endpoint**. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature.  // In a trace segment, the endpoint name is the name of first entry span.  string parentEndpoint = 7; // The network address, including ip/hostname and port, which is used in the client side.  // Such as Client --\u0026gt; use 127.0.11.8:913 -\u0026gt; Server  // then, in the reference of entry span reported by Server, the value of this field is 127.0.11.8:913.  // This plays the important role in the SkyWalking STAM(Streaming Topology Analysis Method)  // For more details, read https://wu-sheng.github.io/STAM/  string networkAddressUsedAtPeer = 8;}// Span represents a execution unit in the system, with duration and many other attributes. // Span could be a method, a RPC, MQ message produce or consume. // In the practice, the span should be added when it is really necessary, to avoid payload overhead. // We recommend to creating spans in across process(client/server of RPC/MQ) and across thread cases only. message SpanObject { // The number id of the span. Should be unique in the whole segment.  // Starting at 0.  int32 spanId = 1; // The number id of the parent span in the whole segment.  // -1 represents no parent span.  // Also, be known as the root/first span of the segment.  int32 parentSpanId = 2; // Start timestamp in milliseconds of this span,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 startTime = 3; // End timestamp in milliseconds of this span,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 endTime = 4; // \u0026lt;Optional\u0026gt;  // In the across thread and across process, these references targeting the parent segments.  // The references usually have only one element, but in batch consumer case, such as in MQ or async batch process, it could be multiple.  repeated SegmentReference refs = 5; // A logic name represents this span.  //  // We don\u0026#39;t recommend to include the parameter, such as HTTP request parameters, as a part of the operation, especially this is the name of the entry span.  // All statistic for the endpoints are aggregated base on this name. Those parameters should be added in the tags if necessary.  // If in some cases, it have to be a part of the operation name,  // users should use the Group Parameterized Endpoints capability at the backend to get the meaningful metrics.  // Read https://github.com/apache/skywalking/blob/master/docs/en/setup/backend/endpoint-grouping-rules.md  string operationName = 6; // Remote address of the peer in RPC/MQ case.  // This is required when spanType = Exit, as it is a part of the SkyWalking STAM(Streaming Topology Analysis Method).  // For more details, read https://wu-sheng.github.io/STAM/  string peer = 7; // Span type represents the role in the RPC context.  SpanType spanType = 8; // Span layer represent the component tech stack, related to the network tech.  SpanLayer spanLayer = 9; // Component id is a predefinited number id in the SkyWalking.  // It represents the framework, tech stack used by this tracked span, such as Spring.  // All IDs are defined in the https://github.com/apache/skywalking/blob/master/oap-server/server-bootstrap/src/main/resources/component-libraries.yml  // Send a pull request if you want to add languages, components or mapping defintions,  // all public components could be accepted.  // Follow this doc for more details, https://github.com/apache/skywalking/blob/master/docs/en/guides/Component-library-settings.md  int32 componentId = 10; // The status of the span. False means the tracked execution ends in the unexpected status.  // This affects the successful rate statistic in the backend.  // Exception or error code happened in the tracked process doesn\u0026#39;t mean isError == true, the implementations of agent plugin and tracing SDK make the final decision.  bool isError = 11; // String key, String value pair.  // Tags provides more informance, includes parameters.  //  // In the OAP backend analysis, some special tag or tag combination could provide other advanced features.  // https://github.com/apache/skywalking/blob/master/docs/en/guides/Java-Plugin-Development-Guide.md#special-span-tags  repeated KeyStringValuePair tags = 12; // String key, String value pair with an accurate timestamp.  // Logging some events happening in the context of the span duration.  repeated Log logs = 13; // Force the backend don\u0026#39;t do analysis, if the value is TRUE.  // The backend has its own configurations to follow or override this.  //  // Use this mostly because the agent/SDK could know more context of the service role.  bool skipAnalysis = 14;}message Log { // The timestamp in milliseconds of this event.,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 time = 1; // String key, String value pair.  repeated KeyStringValuePair data = 2;}// Map to the type of span enum SpanType { // Server side of RPC. Consumer side of MQ.  Entry = 0; // Client side of RPC. Producer side of MQ.  Exit = 1; // A common local code execution.  Local = 2;}// A ID could be represented by multiple string sections. message ID { repeated string id = 1;}// Type of the reference enum RefType { // Map to the reference targeting the segment in another OS process.  CrossProcess = 0; // Map to the reference targeting the segment in the same process of the current one, just across thread.  // This is only used when the coding language has the thread concept.  CrossThread = 1;}// Map to the layer of span enum SpanLayer { // Unknown layer. Could be anything.  Unknown = 0; // A database layer, used in tracing the database client component.  Database = 1; // A RPC layer, used in both client and server sides of RPC component.  RPCFramework = 2; // HTTP is a more specific RPCFramework.  Http = 3; // A MQ layer, used in both producer and consuer sides of the MQ component.  MQ = 4; // A cache layer, used in tracing the cache client component.  Cache = 5;}// The segment collections for trace report in batch and sync mode. message SegmentCollection { repeated SegmentObject segments = 1;}","excerpt":"Trace Data Protocol v3 Trace Data Protocol describes the data format between SkyWalking …","ref":"/docs/main/v9.0.0/en/protocols/trace-data-protocol-v3/","title":"Trace Data Protocol v3"},{"body":"Trace Data Protocol v3 Trace Data Protocol describes the data format between SkyWalking agent/sniffer and backend.\nOverview Trace data protocol is defined and provided in gRPC format, and implemented in HTTP 1.1.\nReport service instance status   Service Instance Properties Service instance contains more information than just a name. In order for the agent to report service instance status, use ManagementService#reportInstanceProperties service to provide a string-key/string-value pair list as the parameter. The language of target instance must be provided as the minimum requirement.\n  Service Ping Service instance should keep alive with the backend. The agent should set a scheduler using ManagementService#keepAlive service every minute.\n  Send trace and metrics After you have the service ID and service instance ID ready, you could send traces and metrics. Now we have\n TraceSegmentReportService#collect for the SkyWalking native trace format JVMMetricReportService#collect for the SkyWalking native JVM format  For trace format, note that:\n The segment is a unique concept in SkyWalking. It should include all spans for each request in a single OS process, which is usually a single language-based thread. There are three types of spans.    EntrySpan EntrySpan represents a service provider, which is also the endpoint on the server end. As an APM system, SkyWalking targets the application servers. Therefore, almost all the services and MQ-consumers are EntrySpans.\n  LocalSpan LocalSpan represents a typical Java method which is not related to remote services. It is neither a MQ producer/consumer nor a provider/consumer of a service (e.g. HTTP service).\n  ExitSpan ExitSpan represents a client of service or MQ-producer. It is known as the LeafSpan in the early stages of SkyWalking. For example, accessing DB by JDBC, and reading Redis/Memcached are classified as ExitSpans.\n   Cross-thread/process span parent information is called \u0026ldquo;reference\u0026rdquo;. Reference carries the trace ID, segment ID, span ID, service name, service instance name, endpoint name, and target address used on the client end (note: this is not required in cross-thread operations) of this request in the parent. See Cross Process Propagation Headers Protocol v3 for more details.\n  Span#skipAnalysis may be TRUE, if this span doesn\u0026rsquo;t require backend analysis.\n  Protocol Definition // The segment is a collection of spans. It includes all collected spans in a simple one request context, such as a HTTP request process. // // We recommend the agent/SDK report all tracked data of one request once for all, such as, // typically, such as in Java, one segment represent all tracked operations(spans) of one request context in the same thread. // At the same time, in some language there is not a clear concept like golang, it could represent all tracked operations of one request context. message SegmentObject { // A string id represents the whole trace.  string traceId = 1; // A unique id represents this segment. Other segments could use this id to reference as a child segment.  string traceSegmentId = 2; // Span collections included in this segment.  repeated SpanObject spans = 3; // **Service**. Represents a set/group of workloads which provide the same behaviours for incoming requests.  //  // The logic name represents the service. This would show as a separate node in the topology.  // The metrics analyzed from the spans, would be aggregated for this entity as the service level.  string service = 4; // **Service Instance**. Each individual workload in the Service group is known as an instance. Like `pods` in Kubernetes, it  // doesn\u0026#39;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process.  //  // The logic name represents the service instance. This would show as a separate node in the instance relationship.  // The metrics analyzed from the spans, would be aggregated for this entity as the service instance level.  string serviceInstance = 5; // Whether the segment includes all tracked spans.  // In the production environment tracked, some tasks could include too many spans for one request context, such as a batch update for a cache, or an async job.  // The agent/SDK could optimize or ignore some tracked spans for better performance.  // In this case, the value should be flagged as TRUE.  bool isSizeLimited = 6;}// Segment reference represents the link between two existing segment. message SegmentReference { // Represent the reference type. It could be across thread or across process.  // Across process means there is a downstream RPC call for this.  // Typically, refType == CrossProcess means SpanObject#spanType = entry.  RefType refType = 1; // A string id represents the whole trace.  string traceId = 2; // Another segment id as the parent.  string parentTraceSegmentId = 3; // The span id in the parent trace segment.  int32 parentSpanId = 4; // The service logic name of the parent segment.  // If refType == CrossThread, this name is as same as the trace segment.  string parentService = 5; // The service logic name instance of the parent segment.  // If refType == CrossThread, this name is as same as the trace segment.  string parentServiceInstance = 6; // The endpoint name of the parent segment.  // **Endpoint**. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature.  // In a trace segment, the endpoint name is the name of first entry span.  string parentEndpoint = 7; // The network address, including ip/hostname and port, which is used in the client side.  // Such as Client --\u0026gt; use 127.0.11.8:913 -\u0026gt; Server  // then, in the reference of entry span reported by Server, the value of this field is 127.0.11.8:913.  // This plays the important role in the SkyWalking STAM(Streaming Topology Analysis Method)  // For more details, read https://wu-sheng.github.io/STAM/  string networkAddressUsedAtPeer = 8;}// Span represents a execution unit in the system, with duration and many other attributes. // Span could be a method, a RPC, MQ message produce or consume. // In the practice, the span should be added when it is really necessary, to avoid payload overhead. // We recommend to creating spans in across process(client/server of RPC/MQ) and across thread cases only. message SpanObject { // The number id of the span. Should be unique in the whole segment.  // Starting at 0.  int32 spanId = 1; // The number id of the parent span in the whole segment.  // -1 represents no parent span.  // Also, be known as the root/first span of the segment.  int32 parentSpanId = 2; // Start timestamp in milliseconds of this span,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 startTime = 3; // End timestamp in milliseconds of this span,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 endTime = 4; // \u0026lt;Optional\u0026gt;  // In the across thread and across process, these references targeting the parent segments.  // The references usually have only one element, but in batch consumer case, such as in MQ or async batch process, it could be multiple.  repeated SegmentReference refs = 5; // A logic name represents this span.  //  // We don\u0026#39;t recommend to include the parameter, such as HTTP request parameters, as a part of the operation, especially this is the name of the entry span.  // All statistic for the endpoints are aggregated base on this name. Those parameters should be added in the tags if necessary.  // If in some cases, it have to be a part of the operation name,  // users should use the Group Parameterized Endpoints capability at the backend to get the meaningful metrics.  // Read https://github.com/apache/skywalking/blob/master/docs/en/setup/backend/endpoint-grouping-rules.md  string operationName = 6; // Remote address of the peer in RPC/MQ case.  // This is required when spanType = Exit, as it is a part of the SkyWalking STAM(Streaming Topology Analysis Method).  // For more details, read https://wu-sheng.github.io/STAM/  string peer = 7; // Span type represents the role in the RPC context.  SpanType spanType = 8; // Span layer represent the component tech stack, related to the network tech.  SpanLayer spanLayer = 9; // Component id is a predefinited number id in the SkyWalking.  // It represents the framework, tech stack used by this tracked span, such as Spring.  // All IDs are defined in the https://github.com/apache/skywalking/blob/master/oap-server/server-bootstrap/src/main/resources/component-libraries.yml  // Send a pull request if you want to add languages, components or mapping defintions,  // all public components could be accepted.  // Follow this doc for more details, https://github.com/apache/skywalking/blob/master/docs/en/guides/Component-library-settings.md  int32 componentId = 10; // The status of the span. False means the tracked execution ends in the unexpected status.  // This affects the successful rate statistic in the backend.  // Exception or error code happened in the tracked process doesn\u0026#39;t mean isError == true, the implementations of agent plugin and tracing SDK make the final decision.  bool isError = 11; // String key, String value pair.  // Tags provides more informance, includes parameters.  //  // In the OAP backend analysis, some special tag or tag combination could provide other advanced features.  // https://github.com/apache/skywalking/blob/master/docs/en/guides/Java-Plugin-Development-Guide.md#special-span-tags  repeated KeyStringValuePair tags = 12; // String key, String value pair with an accurate timestamp.  // Logging some events happening in the context of the span duration.  repeated Log logs = 13; // Force the backend don\u0026#39;t do analysis, if the value is TRUE.  // The backend has its own configurations to follow or override this.  //  // Use this mostly because the agent/SDK could know more context of the service role.  bool skipAnalysis = 14;}message Log { // The timestamp in milliseconds of this event.,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 time = 1; // String key, String value pair.  repeated KeyStringValuePair data = 2;}// Map to the type of span enum SpanType { // Server side of RPC. Consumer side of MQ.  Entry = 0; // Client side of RPC. Producer side of MQ.  Exit = 1; // A common local code execution.  Local = 2;}// A ID could be represented by multiple string sections. message ID { repeated string id = 1;}// Type of the reference enum RefType { // Map to the reference targeting the segment in another OS process.  CrossProcess = 0; // Map to the reference targeting the segment in the same process of the current one, just across thread.  // This is only used when the coding language has the thread concept.  CrossThread = 1;}// Map to the layer of span enum SpanLayer { // Unknown layer. Could be anything.  Unknown = 0; // A database layer, used in tracing the database client component.  Database = 1; // A RPC layer, used in both client and server sides of RPC component.  RPCFramework = 2; // HTTP is a more specific RPCFramework.  Http = 3; // A MQ layer, used in both producer and consuer sides of the MQ component.  MQ = 4; // A cache layer, used in tracing the cache client component.  Cache = 5;}// The segment collections for trace report in batch and sync mode. message SegmentCollection { repeated SegmentObject segments = 1;}","excerpt":"Trace Data Protocol v3 Trace Data Protocol describes the data format between SkyWalking …","ref":"/docs/main/v9.1.0/en/protocols/trace-data-protocol-v3/","title":"Trace Data Protocol v3"},{"body":"Trace Data Protocol v3 Trace Data Protocol describes the data format between SkyWalking agent/sniffer and backend.\nOverview Trace data protocol is defined and provided in gRPC format, and implemented in HTTP 1.1.\nReport service instance status   Service Instance Properties Service instance contains more information than just a name. In order for the agent to report service instance status, use ManagementService#reportInstanceProperties service to provide a string-key/string-value pair list as the parameter. The language of target instance must be provided as the minimum requirement.\n  Service Ping Service instance should keep alive with the backend. The agent should set a scheduler using ManagementService#keepAlive service every minute.\n  Send trace and metrics After you have the service ID and service instance ID ready, you could send traces and metrics. Now we have\n TraceSegmentReportService#collect for the SkyWalking native trace format JVMMetricReportService#collect for the SkyWalking native JVM format  For trace format, note that:\n The segment is a unique concept in SkyWalking. It should include all spans for each request in a single OS process, which is usually a single language-based thread. There are three types of spans.    EntrySpan EntrySpan represents a service provider, which is also the endpoint on the server end. As an APM system, SkyWalking targets the application servers. Therefore, almost all the services and MQ-consumers are EntrySpans.\n  LocalSpan LocalSpan represents a typical Java method which is not related to remote services. It is neither a MQ producer/consumer nor a provider/consumer of a service (e.g. HTTP service).\n  ExitSpan ExitSpan represents a client of service or MQ-producer. It is known as the LeafSpan in the early stages of SkyWalking. For example, accessing DB by JDBC, and reading Redis/Memcached are classified as ExitSpans.\n   Cross-thread/process span parent information is called \u0026ldquo;reference\u0026rdquo;. Reference carries the trace ID, segment ID, span ID, service name, service instance name, endpoint name, and target address used on the client end (note: this is not required in cross-thread operations) of this request in the parent. See Cross Process Propagation Headers Protocol v3 for more details.\n  Span#skipAnalysis may be TRUE, if this span doesn\u0026rsquo;t require backend analysis.\n  Protocol Definition // The segment is a collection of spans. It includes all collected spans in a simple one request context, such as a HTTP request process. // // We recommend the agent/SDK report all tracked data of one request once for all, such as, // typically, such as in Java, one segment represent all tracked operations(spans) of one request context in the same thread. // At the same time, in some language there is not a clear concept like golang, it could represent all tracked operations of one request context. message SegmentObject { // A string id represents the whole trace.  string traceId = 1; // A unique id represents this segment. Other segments could use this id to reference as a child segment.  string traceSegmentId = 2; // Span collections included in this segment.  repeated SpanObject spans = 3; // **Service**. Represents a set/group of workloads which provide the same behaviours for incoming requests.  //  // The logic name represents the service. This would show as a separate node in the topology.  // The metrics analyzed from the spans, would be aggregated for this entity as the service level.  string service = 4; // **Service Instance**. Each individual workload in the Service group is known as an instance. Like `pods` in Kubernetes, it  // doesn\u0026#39;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process.  //  // The logic name represents the service instance. This would show as a separate node in the instance relationship.  // The metrics analyzed from the spans, would be aggregated for this entity as the service instance level.  string serviceInstance = 5; // Whether the segment includes all tracked spans.  // In the production environment tracked, some tasks could include too many spans for one request context, such as a batch update for a cache, or an async job.  // The agent/SDK could optimize or ignore some tracked spans for better performance.  // In this case, the value should be flagged as TRUE.  bool isSizeLimited = 6;}// Segment reference represents the link between two existing segment. message SegmentReference { // Represent the reference type. It could be across thread or across process.  // Across process means there is a downstream RPC call for this.  // Typically, refType == CrossProcess means SpanObject#spanType = entry.  RefType refType = 1; // A string id represents the whole trace.  string traceId = 2; // Another segment id as the parent.  string parentTraceSegmentId = 3; // The span id in the parent trace segment.  int32 parentSpanId = 4; // The service logic name of the parent segment.  // If refType == CrossThread, this name is as same as the trace segment.  string parentService = 5; // The service logic name instance of the parent segment.  // If refType == CrossThread, this name is as same as the trace segment.  string parentServiceInstance = 6; // The endpoint name of the parent segment.  // **Endpoint**. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature.  // In a trace segment, the endpoint name is the name of first entry span.  string parentEndpoint = 7; // The network address, including ip/hostname and port, which is used in the client side.  // Such as Client --\u0026gt; use 127.0.11.8:913 -\u0026gt; Server  // then, in the reference of entry span reported by Server, the value of this field is 127.0.11.8:913.  // This plays the important role in the SkyWalking STAM(Streaming Topology Analysis Method)  // For more details, read https://wu-sheng.github.io/STAM/  string networkAddressUsedAtPeer = 8;}// Span represents a execution unit in the system, with duration and many other attributes. // Span could be a method, a RPC, MQ message produce or consume. // In the practice, the span should be added when it is really necessary, to avoid payload overhead. // We recommend to creating spans in across process(client/server of RPC/MQ) and across thread cases only. message SpanObject { // The number id of the span. Should be unique in the whole segment.  // Starting at 0.  int32 spanId = 1; // The number id of the parent span in the whole segment.  // -1 represents no parent span.  // Also, be known as the root/first span of the segment.  int32 parentSpanId = 2; // Start timestamp in milliseconds of this span,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 startTime = 3; // End timestamp in milliseconds of this span,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 endTime = 4; // \u0026lt;Optional\u0026gt;  // In the across thread and across process, these references targeting the parent segments.  // The references usually have only one element, but in batch consumer case, such as in MQ or async batch process, it could be multiple.  repeated SegmentReference refs = 5; // A logic name represents this span.  //  // We don\u0026#39;t recommend to include the parameter, such as HTTP request parameters, as a part of the operation, especially this is the name of the entry span.  // All statistic for the endpoints are aggregated base on this name. Those parameters should be added in the tags if necessary.  // If in some cases, it have to be a part of the operation name,  // users should use the Group Parameterized Endpoints capability at the backend to get the meaningful metrics.  // Read https://github.com/apache/skywalking/blob/master/docs/en/setup/backend/endpoint-grouping-rules.md  string operationName = 6; // Remote address of the peer in RPC/MQ case.  // This is required when spanType = Exit, as it is a part of the SkyWalking STAM(Streaming Topology Analysis Method).  // For more details, read https://wu-sheng.github.io/STAM/  string peer = 7; // Span type represents the role in the RPC context.  SpanType spanType = 8; // Span layer represent the component tech stack, related to the network tech.  SpanLayer spanLayer = 9; // Component id is a predefined number id in the SkyWalking.  // It represents the framework, tech stack used by this tracked span, such as Spring.  // All IDs are defined in the https://github.com/apache/skywalking/blob/master/oap-server/server-bootstrap/src/main/resources/component-libraries.yml  // Send a pull request if you want to add languages, components or mapping definitions,  // all public components could be accepted.  // Follow this doc for more details, https://github.com/apache/skywalking/blob/master/docs/en/guides/Component-library-settings.md  int32 componentId = 10; // The status of the span. False means the tracked execution ends in the unexpected status.  // This affects the successful rate statistic in the backend.  // Exception or error code happened in the tracked process doesn\u0026#39;t mean isError == true, the implementations of agent plugin and tracing SDK make the final decision.  bool isError = 11; // String key, String value pair.  // Tags provides more informance, includes parameters.  //  // In the OAP backend analysis, some special tag or tag combination could provide other advanced features.  // https://github.com/apache/skywalking/blob/master/docs/en/guides/Java-Plugin-Development-Guide.md#special-span-tags  repeated KeyStringValuePair tags = 12; // String key, String value pair with an accurate timestamp.  // Logging some events happening in the context of the span duration.  repeated Log logs = 13; // Force the backend don\u0026#39;t do analysis, if the value is TRUE.  // The backend has its own configurations to follow or override this.  //  // Use this mostly because the agent/SDK could know more context of the service role.  bool skipAnalysis = 14;}message Log { // The timestamp in milliseconds of this event.,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 time = 1; // String key, String value pair.  repeated KeyStringValuePair data = 2;}// Map to the type of span enum SpanType { // Server side of RPC. Consumer side of MQ.  Entry = 0; // Client side of RPC. Producer side of MQ.  Exit = 1; // A common local code execution.  Local = 2;}// A ID could be represented by multiple string sections. message ID { repeated string id = 1;}// Type of the reference enum RefType { // Map to the reference targeting the segment in another OS process.  CrossProcess = 0; // Map to the reference targeting the segment in the same process of the current one, just across thread.  // This is only used when the coding language has the thread concept.  CrossThread = 1;}// Map to the layer of span enum SpanLayer { // Unknown layer. Could be anything.  Unknown = 0; // A database layer, used in tracing the database client component.  Database = 1; // A RPC layer, used in both client and server sides of RPC component.  RPCFramework = 2; // HTTP is a more specific RPCFramework.  Http = 3; // A MQ layer, used in both producer and consuer sides of the MQ component.  MQ = 4; // A cache layer, used in tracing the cache client component.  Cache = 5;}// The segment collections for trace report in batch and sync mode. message SegmentCollection { repeated SegmentObject segments = 1;}","excerpt":"Trace Data Protocol v3 Trace Data Protocol describes the data format between SkyWalking …","ref":"/docs/main/v9.2.0/en/protocols/trace-data-protocol-v3/","title":"Trace Data Protocol v3"},{"body":"Trace Data Protocol v3.1 Trace Data Protocol describes the data format between SkyWalking agent/sniffer and backend.\nTrace data protocol is defined and provided in gRPC format, and implemented in HTTP 1.1.\nReport service instance status   Service Instance Properties Service instance contains more information than just a name. In order for the agent to report service instance status, use ManagementService#reportInstanceProperties service to provide a string-key/string-value pair list as the parameter. The language of target instance must be provided as the minimum requirement.\n  Service Ping Service instance should keep alive with the backend. The agent should set a scheduler using ManagementService#keepAlive service every minute.\n  Send trace and JVM metrics After you have the service ID and service instance ID ready, you could send traces and metrics. Now we have\n TraceSegmentReportService#collect for the SkyWalking native trace format JVMMetricReportService#collect for the SkyWalking native JVM format  For trace format, note that:\n The segment is a unique concept in SkyWalking. It should include all spans for each request in a single OS process, which is usually a single language-based thread. There are three types of spans.    EntrySpan EntrySpan represents a service provider, which is also the endpoint on the server end. As an APM system, SkyWalking targets the application servers. Therefore, almost all the services and MQ-consumers are EntrySpans.\n  LocalSpan LocalSpan represents a typical Java method which is not related to remote services. It is neither a MQ producer/consumer nor a provider/consumer of a service (e.g. HTTP service).\n  ExitSpan ExitSpan represents a client of service or MQ-producer. It is known as the LeafSpan in the early stages of SkyWalking. For example, accessing DB by JDBC, and reading Redis/Memcached are classified as ExitSpans.\n   Cross-thread/process span parent information is called \u0026ldquo;reference\u0026rdquo;. Reference carries the trace ID, segment ID, span ID, service name, service instance name, endpoint name, and target address used on the client end (note: this is not required in cross-thread operations) of this request in the parent. See Cross Process Propagation Headers Protocol v3 for more details.\n  Span#skipAnalysis may be TRUE, if this span doesn\u0026rsquo;t require backend analysis.\n  Trace Report Protocol // The segment is a collection of spans. It includes all collected spans in a simple one request context, such as a HTTP request process. // // We recommend the agent/SDK report all tracked data of one request once for all, such as, // typically, such as in Java, one segment represent all tracked operations(spans) of one request context in the same thread. // At the same time, in some language there is not a clear concept like golang, it could represent all tracked operations of one request context. message SegmentObject { // A string id represents the whole trace.  string traceId = 1; // A unique id represents this segment. Other segments could use this id to reference as a child segment.  string traceSegmentId = 2; // Span collections included in this segment.  repeated SpanObject spans = 3; // **Service**. Represents a set/group of workloads which provide the same behaviours for incoming requests.  //  // The logic name represents the service. This would show as a separate node in the topology.  // The metrics analyzed from the spans, would be aggregated for this entity as the service level.  string service = 4; // **Service Instance**. Each individual workload in the Service group is known as an instance. Like `pods` in Kubernetes, it  // doesn\u0026#39;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process.  //  // The logic name represents the service instance. This would show as a separate node in the instance relationship.  // The metrics analyzed from the spans, would be aggregated for this entity as the service instance level.  string serviceInstance = 5; // Whether the segment includes all tracked spans.  // In the production environment tracked, some tasks could include too many spans for one request context, such as a batch update for a cache, or an async job.  // The agent/SDK could optimize or ignore some tracked spans for better performance.  // In this case, the value should be flagged as TRUE.  bool isSizeLimited = 6;}// Segment reference represents the link between two existing segment. message SegmentReference { // Represent the reference type. It could be across thread or across process.  // Across process means there is a downstream RPC call for this.  // Typically, refType == CrossProcess means SpanObject#spanType = entry.  RefType refType = 1; // A string id represents the whole trace.  string traceId = 2; // Another segment id as the parent.  string parentTraceSegmentId = 3; // The span id in the parent trace segment.  int32 parentSpanId = 4; // The service logic name of the parent segment.  // If refType == CrossThread, this name is as same as the trace segment.  string parentService = 5; // The service logic name instance of the parent segment.  // If refType == CrossThread, this name is as same as the trace segment.  string parentServiceInstance = 6; // The endpoint name of the parent segment.  // **Endpoint**. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature.  // In a trace segment, the endpoint name is the name of first entry span.  string parentEndpoint = 7; // The network address, including ip/hostname and port, which is used in the client side.  // Such as Client --\u0026gt; use 127.0.11.8:913 -\u0026gt; Server  // then, in the reference of entry span reported by Server, the value of this field is 127.0.11.8:913.  // This plays the important role in the SkyWalking STAM(Streaming Topology Analysis Method)  // For more details, read https://wu-sheng.github.io/STAM/  string networkAddressUsedAtPeer = 8;}// Span represents a execution unit in the system, with duration and many other attributes. // Span could be a method, a RPC, MQ message produce or consume. // In the practice, the span should be added when it is really necessary, to avoid payload overhead. // We recommend to creating spans in across process(client/server of RPC/MQ) and across thread cases only. message SpanObject { // The number id of the span. Should be unique in the whole segment.  // Starting at 0.  int32 spanId = 1; // The number id of the parent span in the whole segment.  // -1 represents no parent span.  // Also, be known as the root/first span of the segment.  int32 parentSpanId = 2; // Start timestamp in milliseconds of this span,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 startTime = 3; // End timestamp in milliseconds of this span,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 endTime = 4; // \u0026lt;Optional\u0026gt;  // In the across thread and across process, these references targeting the parent segments.  // The references usually have only one element, but in batch consumer case, such as in MQ or async batch process, it could be multiple.  repeated SegmentReference refs = 5; // A logic name represents this span.  //  // We don\u0026#39;t recommend to include the parameter, such as HTTP request parameters, as a part of the operation, especially this is the name of the entry span.  // All statistic for the endpoints are aggregated base on this name. Those parameters should be added in the tags if necessary.  // If in some cases, it have to be a part of the operation name,  // users should use the Group Parameterized Endpoints capability at the backend to get the meaningful metrics.  // Read https://github.com/apache/skywalking/blob/master/docs/en/setup/backend/endpoint-grouping-rules.md  string operationName = 6; // Remote address of the peer in RPC/MQ case.  // This is required when spanType = Exit, as it is a part of the SkyWalking STAM(Streaming Topology Analysis Method).  // For more details, read https://wu-sheng.github.io/STAM/  string peer = 7; // Span type represents the role in the RPC context.  SpanType spanType = 8; // Span layer represent the component tech stack, related to the network tech.  SpanLayer spanLayer = 9; // Component id is a predefined number id in the SkyWalking.  // It represents the framework, tech stack used by this tracked span, such as Spring.  // All IDs are defined in the https://github.com/apache/skywalking/blob/master/oap-server/server-bootstrap/src/main/resources/component-libraries.yml  // Send a pull request if you want to add languages, components or mapping definitions,  // all public components could be accepted.  // Follow this doc for more details, https://github.com/apache/skywalking/blob/master/docs/en/guides/Component-library-settings.md  int32 componentId = 10; // The status of the span. False means the tracked execution ends in the unexpected status.  // This affects the successful rate statistic in the backend.  // Exception or error code happened in the tracked process doesn\u0026#39;t mean isError == true, the implementations of agent plugin and tracing SDK make the final decision.  bool isError = 11; // String key, String value pair.  // Tags provides more information, includes parameters.  //  // In the OAP backend analysis, some special tag or tag combination could provide other advanced features.  // https://github.com/apache/skywalking/blob/master/docs/en/guides/Java-Plugin-Development-Guide.md#special-span-tags  repeated KeyStringValuePair tags = 12; // String key, String value pair with an accurate timestamp.  // Logging some events happening in the context of the span duration.  repeated Log logs = 13; // Force the backend don\u0026#39;t do analysis, if the value is TRUE.  // The backend has its own configurations to follow or override this.  //  // Use this mostly because the agent/SDK could know more context of the service role.  bool skipAnalysis = 14;}message Log { // The timestamp in milliseconds of this event.,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 time = 1; // String key, String value pair.  repeated KeyStringValuePair data = 2;}// Map to the type of span enum SpanType { // Server side of RPC. Consumer side of MQ.  Entry = 0; // Client side of RPC. Producer side of MQ.  Exit = 1; // A common local code execution.  Local = 2;}// A ID could be represented by multiple string sections. message ID { repeated string id = 1;}// Type of the reference enum RefType { // Map to the reference targeting the segment in another OS process.  CrossProcess = 0; // Map to the reference targeting the segment in the same process of the current one, just across thread.  // This is only used when the coding language has the thread concept.  CrossThread = 1;}// Map to the layer of span enum SpanLayer { // Unknown layer. Could be anything.  Unknown = 0; // A database layer, used in tracing the database client component.  Database = 1; // A RPC layer, used in both client and server sides of RPC component.  RPCFramework = 2; // HTTP is a more specific RPCFramework.  Http = 3; // A MQ layer, used in both producer and consumer sides of the MQ component.  MQ = 4; // A cache layer, used in tracing the cache client component.  Cache = 5;}// The segment collections for trace report in batch and sync mode. message SegmentCollection { repeated SegmentObject segments = 1;}Report Span Attached Events Besides in-process agents, there are other out-of-process agent, such as ebpf agent, could report additional information as attached events for the relative spans.\nSpanAttachedEventReportService#collect for attached event reporting.\n//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // ebpf agent(SkyWalking Rover) collects extra information from the OS(Linux Only) level to attach on the traced span. // Since v3.1 //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// service SpanAttachedEventReportService { // Collect SpanAttachedEvent to the OAP server in the streaming mode.  rpc collect (stream SpanAttachedEvent) returns (Commands) { }}// SpanAttachedEvent represents an attached event for a traced RPC. // // When an RPC is being traced by the in-process language agent, a span would be reported by the client-side agent. // And the rover would be aware of this RPC due to the existing tracing header. // Then, the rover agent collects extra information from the OS level to provide assistance information to diagnose network performance. message SpanAttachedEvent { // The nanosecond timestamp of the event\u0026#39;s start time.  // Notice, most unit of timestamp in SkyWalking is milliseconds, but NANO-SECOND is required here.  // Because the attached event happens in the OS syscall level, most of them are executed rapidly.  Instant startTime = 1; // The official event name.  // For example, the event name is a method signature from syscall stack.  string event = 2; // [Optional] The nanosecond timestamp of the event\u0026#39;s end time.  Instant endTime = 3; // The tags for this event includes some extra OS level information,  // such as  // 1. net_device used for this exit span.  // 2. network L7 protocol  repeated KeyStringValuePair tags = 4; // The summary of statistics during this event.  // Each statistic provides a name(metric name) to represent the name, and an int64/long as the value.  repeated KeyIntValuePair summary = 5; // Refer to a trace context decoded from `sw8` header through network, such as HTTP header, MQ metadata  // https://skywalking.apache.org/docs/main/next/en/protocols/skywalking-cross-process-propagation-headers-protocol-v3/#standard-header-item  SpanReference traceContext = 6; message SpanReference { SpanReferenceType type = 1; // [Optional] A string id represents the whole trace.  string traceId = 2; // A unique id represents this segment. Other segments could use this id to reference as a child segment.  // [Optional] when this span reference  string traceSegmentId = 3; // If type == SKYWALKING  // The number id of the span. Should be unique in the whole segment.  // Starting at 0  //  // If type == ZIPKIN  // The type of span ID is string.  string spanId = 4; } enum SpanReferenceType { SKYWALKING = 0; ZIPKIN = 1; }}","excerpt":"Trace Data Protocol v3.1 Trace Data Protocol describes the data format between SkyWalking …","ref":"/docs/main/v9.3.0/en/protocols/trace-data-protocol-v3/","title":"Trace Data Protocol v3.1"},{"body":"Trace Profiling Trace Profiling is bound within the auto-instrument agent and corresponds to In-Process Profiling.\nIt is delivered to the agent in the form of a task, allowing for dynamic enabling or disabling. Trace Profiling tasks can be created when an endpoint within a service experiences high latency. When the agent receives the task, it periodically samples the thread stack related to the endpoint when requested. Once the sampling is complete, the thread stack within the endpoint can be analyzed to determine the specific line of business code causing the performance issue.\nLean more about the trace profiling, please read this blog.\nActive in the OAP OAP and the agent use a brand-new protocol to exchange Trace Profiling data, so it is necessary to start OAP with the following configuration:\nreceiver-profile:selector:${SW_RECEIVER_PROFILE:default}default:Trace Profiling Task with Analysis To use the Trace Profiling feature, please follow these steps:\n Create profiling task: Use the UI or CLI tool to create a task. Generate requests: Ensure that the service has generated requests. Query task details: Check that the created task has Trace data generated. Analyze the data: Analyze the Trace data to determine where performance bottlenecks exist in the service.  Create profiling task Creating a Trace Profiling task is used to notify all agent nodes that execute the service entity which endpoint needs to perform the Trace Profiling feature. This Endpoint is typically an HTTP request or an RPC request address.\nWhen creating a task, the following configuration fields are required:\n Service: Which agent under the service needs to be monitored. Endpoint: The specific endpoint name, such as \u0026ldquo;POST:/path/to/request.\u0026rdquo; Start Time: The start time of the task, which can be executed immediately or at a future time. Duration: The duration of the task execution. Min Duration Threshold: The monitoring will only be triggered when the specified endpoint\u0026rsquo;s execution time exceeds this threshold. This effectively prevents the collection of ineffective data due to short execution times. Dump Period: The thread stack collection period, which will trigger thread sampling every specified number of milliseconds. Max Sampling Count: The maximum number of traces that can be collected in a task. This effectively prevents the program execution from being affected by excessive trace sampling, such as the Stop The World situation in Java.  When the Agent receives a Trace Profiling task from OAP, it automatically generates a log to notify that the task has been acknowledged. The log contains the following field information:\n Instance: The name of the instance where the Agent is located. Type: Supports \u0026ldquo;NOTIFIED\u0026rdquo; and \u0026ldquo;EXECUTION_FINISHED\u0026rdquo;, with the current log displaying \u0026ldquo;NOTIFIED\u0026rdquo;. Time: The time when the Agent received the task.  Generate Requests At this point, Tracing requests matching the specified Endpoint and other conditions would undergo Profiling.\nNotice, whether profiling is thread sensitive, it relies on the agent side implementation. The Java Agent already supports cross-thread requests, so when a request involves cross-thread operations, it would also be periodically sampled for thread stack.\nQuery task details Once the Tracing request is completed, we can query the Tracing data associated with this Trace Profiling task, which includes the following information:\n TraceId: The Trace ID of the current request. Instance: The instance to which the current profiling data belongs. Duration: The total time taken by the current instance to process the Tracing request. Spans: The list of Spans associated with the current Tracing.  SpanId: The ID of the current span. Parent Span Id: The ID of the parent span, allowing for a tree structure. SegmentId: The ID of the segment to which the span belongs. Refs: References of the current span, note that it only includes \u0026ldquo;CROSS_THREAD\u0026rdquo; type references. Service: The service entity information to which the current span belongs. Instance: The instance entity information to which the current span belongs. Time: The start and end time of the current span. Endpoint Name: The name of the current Span. Type: The type of the current span, either \u0026ldquo;Entry\u0026rdquo;, \u0026ldquo;Local\u0026rdquo;, or \u0026ldquo;Exit\u0026rdquo;. Peer: The remote network address. Component: The name of the component used by the current span. Layer: The layer to which the current span belongs. Tags: The tags information contained in the current span. Logs: The log information in the current span. Profiled: Whether the current span supports Profiling data analysis.    Analyze the data Once we know which segments can be analyzed for profiling, we can then determine the time ranges available for thread stack analysis based on the \u0026ldquo;profiled\u0026rdquo; field in the span. Next, we can provide the following query content to analyze the data:\n segmentId: The segment to be analyzed. Segments are usually bound to individual threads, so we can determine which thread needs to be analyzed. time range: Includes the start and end time.  By combining the segmentId with the time range, we can confirm the data for a specific thread during a specific time period. This allows us to merge the thread stack data from the specified thread and time range and analyze which lines of code take longer to execute. The following fields help you understand the program execution:\n Id: Used to identify the current thread stack frame. Parent Id: Combined with \u0026ldquo;id\u0026rdquo; to determine the hierarchical relationship. Code Signature: The method signature of the current thread stack frame. Duration: The total time consumed by the current thread stack frame. Duration Child Excluded: Excludes the child method calls of the current method, only obtaining the time consumed by the current method. Count: The number of times the current thread stack frame was sampled.  If you want to learn more about the thread stack merging mechanism, please read this documentation.\nExporter If you find that the results of profiling data are not correct, you can report an issue through this documentation.\n","excerpt":"Trace Profiling Trace Profiling is bound within the auto-instrument agent and corresponds to …","ref":"/docs/main/latest/en/setup/backend/backend-trace-profiling/","title":"Trace Profiling"},{"body":"Trace Profiling Trace Profiling is bound within the auto-instrument agent and corresponds to In-Process Profiling.\nIt is delivered to the agent in the form of a task, allowing for dynamic enabling or disabling. Trace Profiling tasks can be created when an endpoint within a service experiences high latency. When the agent receives the task, it periodically samples the thread stack related to the endpoint when requested. Once the sampling is complete, the thread stack within the endpoint can be analyzed to determine the specific line of business code causing the performance issue.\nLean more about the trace profiling, please read this blog.\nActive in the OAP OAP and the agent use a brand-new protocol to exchange Trace Profiling data, so it is necessary to start OAP with the following configuration:\nreceiver-profile:selector:${SW_RECEIVER_PROFILE:default}default:Trace Profiling Task with Analysis To use the Trace Profiling feature, please follow these steps:\n Create profiling task: Use the UI or CLI tool to create a task. Generate requests: Ensure that the service has generated requests. Query task details: Check that the created task has Trace data generated. Analyze the data: Analyze the Trace data to determine where performance bottlenecks exist in the service.  Create profiling task Creating a Trace Profiling task is used to notify all agent nodes that execute the service entity which endpoint needs to perform the Trace Profiling feature. This Endpoint is typically an HTTP request or an RPC request address.\nWhen creating a task, the following configuration fields are required:\n Service: Which agent under the service needs to be monitored. Endpoint: The specific endpoint name, such as \u0026ldquo;POST:/path/to/request.\u0026rdquo; Start Time: The start time of the task, which can be executed immediately or at a future time. Duration: The duration of the task execution. Min Duration Threshold: The monitoring will only be triggered when the specified endpoint\u0026rsquo;s execution time exceeds this threshold. This effectively prevents the collection of ineffective data due to short execution times. Dump Period: The thread stack collection period, which will trigger thread sampling every specified number of milliseconds. Max Sampling Count: The maximum number of traces that can be collected in a task. This effectively prevents the program execution from being affected by excessive trace sampling, such as the Stop The World situation in Java.  When the Agent receives a Trace Profiling task from OAP, it automatically generates a log to notify that the task has been acknowledged. The log contains the following field information:\n Instance: The name of the instance where the Agent is located. Type: Supports \u0026ldquo;NOTIFIED\u0026rdquo; and \u0026ldquo;EXECUTION_FINISHED\u0026rdquo;, with the current log displaying \u0026ldquo;NOTIFIED\u0026rdquo;. Time: The time when the Agent received the task.  Generate Requests At this point, Tracing requests matching the specified Endpoint and other conditions would undergo Profiling.\nNotice, whether profiling is thread sensitive, it relies on the agent side implementation. The Java Agent already supports cross-thread requests, so when a request involves cross-thread operations, it would also be periodically sampled for thread stack.\nQuery task details Once the Tracing request is completed, we can query the Tracing data associated with this Trace Profiling task, which includes the following information:\n TraceId: The Trace ID of the current request. Instance: The instance to which the current profiling data belongs. Duration: The total time taken by the current instance to process the Tracing request. Spans: The list of Spans associated with the current Tracing.  SpanId: The ID of the current span. Parent Span Id: The ID of the parent span, allowing for a tree structure. SegmentId: The ID of the segment to which the span belongs. Refs: References of the current span, note that it only includes \u0026ldquo;CROSS_THREAD\u0026rdquo; type references. Service: The service entity information to which the current span belongs. Instance: The instance entity information to which the current span belongs. Time: The start and end time of the current span. Endpoint Name: The name of the current Span. Type: The type of the current span, either \u0026ldquo;Entry\u0026rdquo;, \u0026ldquo;Local\u0026rdquo;, or \u0026ldquo;Exit\u0026rdquo;. Peer: The remote network address. Component: The name of the component used by the current span. Layer: The layer to which the current span belongs. Tags: The tags information contained in the current span. Logs: The log information in the current span. Profiled: Whether the current span supports Profiling data analysis.    Analyze the data Once we know which segments can be analyzed for profiling, we can then determine the time ranges available for thread stack analysis based on the \u0026ldquo;profiled\u0026rdquo; field in the span. Next, we can provide the following query content to analyze the data:\n segmentId: The segment to be analyzed. Segments are usually bound to individual threads, so we can determine which thread needs to be analyzed. time range: Includes the start and end time.  By combining the segmentId with the time range, we can confirm the data for a specific thread during a specific time period. This allows us to merge the thread stack data from the specified thread and time range and analyze which lines of code take longer to execute. The following fields help you understand the program execution:\n Id: Used to identify the current thread stack frame. Parent Id: Combined with \u0026ldquo;id\u0026rdquo; to determine the hierarchical relationship. Code Signature: The method signature of the current thread stack frame. Duration: The total time consumed by the current thread stack frame. Duration Child Excluded: Excludes the child method calls of the current method, only obtaining the time consumed by the current method. Count: The number of times the current thread stack frame was sampled.  If you want to learn more about the thread stack merging mechanism, please read this documentation.\nExporter If you find that the results of profiling data are not correct, you can report an issue through this documentation.\n","excerpt":"Trace Profiling Trace Profiling is bound within the auto-instrument agent and corresponds to …","ref":"/docs/main/next/en/setup/backend/backend-trace-profiling/","title":"Trace Profiling"},{"body":"Trace Profiling Trace Profiling is bound within the auto-instrument agent and corresponds to In-Process Profiling.\nIt is delivered to the agent in the form of a task, allowing for dynamic enabling or disabling. Trace Profiling tasks can be created when an endpoint within a service experiences high latency. When the agent receives the task, it periodically samples the thread stack related to the endpoint when requested. Once the sampling is complete, the thread stack within the endpoint can be analyzed to determine the specific line of business code causing the performance issue.\nLean more about the trace profiling, please read this blog.\nActive in the OAP OAP and the agent use a brand-new protocol to exchange Trace Profiling data, so it is necessary to start OAP with the following configuration:\nreceiver-profile:selector:${SW_RECEIVER_PROFILE:default}default:Trace Profiling Task with Analysis To use the Trace Profiling feature, please follow these steps:\n Create profiling task: Use the UI or CLI tool to create a task. Generate requests: Ensure that the service has generated requests. Query task details: Check that the created task has Trace data generated. Analyze the data: Analyze the Trace data to determine where performance bottlenecks exist in the service.  Create profiling task Creating a Trace Profiling task is used to notify all agent nodes that execute the service entity which endpoint needs to perform the Trace Profiling feature. This Endpoint is typically an HTTP request or an RPC request address.\nWhen creating a task, the following configuration fields are required:\n Service: Which agent under the service needs to be monitored. Endpoint: The specific endpoint name, such as \u0026ldquo;POST:/path/to/request.\u0026rdquo; Start Time: The start time of the task, which can be executed immediately or at a future time. Duration: The duration of the task execution. Min Duration Threshold: The monitoring will only be triggered when the specified endpoint\u0026rsquo;s execution time exceeds this threshold. This effectively prevents the collection of ineffective data due to short execution times. Dump Period: The thread stack collection period, which will trigger thread sampling every specified number of milliseconds. Max Sampling Count: The maximum number of traces that can be collected in a task. This effectively prevents the program execution from being affected by excessive trace sampling, such as the Stop The World situation in Java.  When the Agent receives a Trace Profiling task from OAP, it automatically generates a log to notify that the task has been acknowledged. The log contains the following field information:\n Instance: The name of the instance where the Agent is located. Type: Supports \u0026ldquo;NOTIFIED\u0026rdquo; and \u0026ldquo;EXECUTION_FINISHED\u0026rdquo;, with the current log displaying \u0026ldquo;NOTIFIED\u0026rdquo;. Time: The time when the Agent received the task.  Generate Requests At this point, Tracing requests matching the specified Endpoint and other conditions would undergo Profiling.\nNotice, whether profiling is thread sensitive, it relies on the agent side implementation. The Java Agent already supports cross-thread requests, so when a request involves cross-thread operations, it would also be periodically sampled for thread stack.\nQuery task details Once the Tracing request is completed, we can query the Tracing data associated with this Trace Profiling task, which includes the following information:\n TraceId: The Trace ID of the current request. Instance: The instance to which the current profiling data belongs. Duration: The total time taken by the current instance to process the Tracing request. Spans: The list of Spans associated with the current Tracing.  SpanId: The ID of the current span. Parent Span Id: The ID of the parent span, allowing for a tree structure. SegmentId: The ID of the segment to which the span belongs. Refs: References of the current span, note that it only includes \u0026ldquo;CROSS_THREAD\u0026rdquo; type references. Service: The service entity information to which the current span belongs. Instance: The instance entity information to which the current span belongs. Time: The start and end time of the current span. Endpoint Name: The name of the current Span. Type: The type of the current span, either \u0026ldquo;Entry\u0026rdquo;, \u0026ldquo;Local\u0026rdquo;, or \u0026ldquo;Exit\u0026rdquo;. Peer: The remote network address. Component: The name of the component used by the current span. Layer: The layer to which the current span belongs. Tags: The tags information contained in the current span. Logs: The log information in the current span. Profiled: Whether the current span supports Profiling data analysis.    Analyze the data Once we know which segments can be analyzed for profiling, we can then determine the time ranges available for thread stack analysis based on the \u0026ldquo;profiled\u0026rdquo; field in the span. Next, we can provide the following query content to analyze the data:\n segmentId: The segment to be analyzed. Segments are usually bound to individual threads, so we can determine which thread needs to be analyzed. time range: Includes the start and end time.  By combining the segmentId with the time range, we can confirm the data for a specific thread during a specific time period. This allows us to merge the thread stack data from the specified thread and time range and analyze which lines of code take longer to execute. The following fields help you understand the program execution:\n Id: Used to identify the current thread stack frame. Parent Id: Combined with \u0026ldquo;id\u0026rdquo; to determine the hierarchical relationship. Code Signature: The method signature of the current thread stack frame. Duration: The total time consumed by the current thread stack frame. Duration Child Excluded: Excludes the child method calls of the current method, only obtaining the time consumed by the current method. Count: The number of times the current thread stack frame was sampled.  If you want to learn more about the thread stack merging mechanism, please read this documentation.\nExporter If you find that the results of profiling data are not correct, you can report an issue through this documentation.\n","excerpt":"Trace Profiling Trace Profiling is bound within the auto-instrument agent and corresponds to …","ref":"/docs/main/v9.5.0/en/setup/backend/backend-trace-profiling/","title":"Trace Profiling"},{"body":"Trace Profiling Trace Profiling is bound within the auto-instrument agent and corresponds to In-Process Profiling.\nIt is delivered to the agent in the form of a task, allowing for dynamic enabling or disabling. Trace Profiling tasks can be created when an endpoint within a service experiences high latency. When the agent receives the task, it periodically samples the thread stack related to the endpoint when requested. Once the sampling is complete, the thread stack within the endpoint can be analyzed to determine the specific line of business code causing the performance issue.\nLean more about the trace profiling, please read this blog.\nActive in the OAP OAP and the agent use a brand-new protocol to exchange Trace Profiling data, so it is necessary to start OAP with the following configuration:\nreceiver-profile:selector:${SW_RECEIVER_PROFILE:default}default:Trace Profiling Task with Analysis To use the Trace Profiling feature, please follow these steps:\n Create profiling task: Use the UI or CLI tool to create a task. Generate requests: Ensure that the service has generated requests. Query task details: Check that the created task has Trace data generated. Analyze the data: Analyze the Trace data to determine where performance bottlenecks exist in the service.  Create profiling task Creating a Trace Profiling task is used to notify all agent nodes that execute the service entity which endpoint needs to perform the Trace Profiling feature. This Endpoint is typically an HTTP request or an RPC request address.\nWhen creating a task, the following configuration fields are required:\n Service: Which agent under the service needs to be monitored. Endpoint: The specific endpoint name, such as \u0026ldquo;POST:/path/to/request.\u0026rdquo; Start Time: The start time of the task, which can be executed immediately or at a future time. Duration: The duration of the task execution. Min Duration Threshold: The monitoring will only be triggered when the specified endpoint\u0026rsquo;s execution time exceeds this threshold. This effectively prevents the collection of ineffective data due to short execution times. Dump Period: The thread stack collection period, which will trigger thread sampling every specified number of milliseconds. Max Sampling Count: The maximum number of traces that can be collected in a task. This effectively prevents the program execution from being affected by excessive trace sampling, such as the Stop The World situation in Java.  When the Agent receives a Trace Profiling task from OAP, it automatically generates a log to notify that the task has been acknowledged. The log contains the following field information:\n Instance: The name of the instance where the Agent is located. Type: Supports \u0026ldquo;NOTIFIED\u0026rdquo; and \u0026ldquo;EXECUTION_FINISHED\u0026rdquo;, with the current log displaying \u0026ldquo;NOTIFIED\u0026rdquo;. Time: The time when the Agent received the task.  Generate Requests At this point, Tracing requests matching the specified Endpoint and other conditions would undergo Profiling.\nNotice, whether profiling is thread sensitive, it relies on the agent side implementation. The Java Agent already supports cross-thread requests, so when a request involves cross-thread operations, it would also be periodically sampled for thread stack.\nQuery task details Once the Tracing request is completed, we can query the Tracing data associated with this Trace Profiling task, which includes the following information:\n TraceId: The Trace ID of the current request. Instance: The instance to which the current profiling data belongs. Duration: The total time taken by the current instance to process the Tracing request. Spans: The list of Spans associated with the current Tracing.  SpanId: The ID of the current span. Parent Span Id: The ID of the parent span, allowing for a tree structure. SegmentId: The ID of the segment to which the span belongs. Refs: References of the current span, note that it only includes \u0026ldquo;CROSS_THREAD\u0026rdquo; type references. Service: The service entity information to which the current span belongs. Instance: The instance entity information to which the current span belongs. Time: The start and end time of the current span. Endpoint Name: The name of the current Span. Type: The type of the current span, either \u0026ldquo;Entry\u0026rdquo;, \u0026ldquo;Local\u0026rdquo;, or \u0026ldquo;Exit\u0026rdquo;. Peer: The remote network address. Component: The name of the component used by the current span. Layer: The layer to which the current span belongs. Tags: The tags information contained in the current span. Logs: The log information in the current span. Profiled: Whether the current span supports Profiling data analysis.    Analyze the data Once we know which segments can be analyzed for profiling, we can then determine the time ranges available for thread stack analysis based on the \u0026ldquo;profiled\u0026rdquo; field in the span. Next, we can provide the following query content to analyze the data:\n segmentId: The segment to be analyzed. Segments are usually bound to individual threads, so we can determine which thread needs to be analyzed. time range: Includes the start and end time.  By combining the segmentId with the time range, we can confirm the data for a specific thread during a specific time period. This allows us to merge the thread stack data from the specified thread and time range and analyze which lines of code take longer to execute. The following fields help you understand the program execution:\n Id: Used to identify the current thread stack frame. Parent Id: Combined with \u0026ldquo;id\u0026rdquo; to determine the hierarchical relationship. Code Signature: The method signature of the current thread stack frame. Duration: The total time consumed by the current thread stack frame. Duration Child Excluded: Excludes the child method calls of the current method, only obtaining the time consumed by the current method. Count: The number of times the current thread stack frame was sampled.  If you want to learn more about the thread stack merging mechanism, please read this documentation.\nExporter If you find that the results of profiling data are not correct, you can report an issue through this documentation.\n","excerpt":"Trace Profiling Trace Profiling is bound within the auto-instrument agent and corresponds to …","ref":"/docs/main/v9.6.0/en/setup/backend/backend-trace-profiling/","title":"Trace Profiling"},{"body":"Trace Profiling Trace Profiling is bound within the auto-instrument agent and corresponds to In-Process Profiling.\nIt is delivered to the agent in the form of a task, allowing for dynamic enabling or disabling. Trace Profiling tasks can be created when an endpoint within a service experiences high latency. When the agent receives the task, it periodically samples the thread stack related to the endpoint when requested. Once the sampling is complete, the thread stack within the endpoint can be analyzed to determine the specific line of business code causing the performance issue.\nLean more about the trace profiling, please read this blog.\nActive in the OAP OAP and the agent use a brand-new protocol to exchange Trace Profiling data, so it is necessary to start OAP with the following configuration:\nreceiver-profile:selector:${SW_RECEIVER_PROFILE:default}default:Trace Profiling Task with Analysis To use the Trace Profiling feature, please follow these steps:\n Create profiling task: Use the UI or CLI tool to create a task. Generate requests: Ensure that the service has generated requests. Query task details: Check that the created task has Trace data generated. Analyze the data: Analyze the Trace data to determine where performance bottlenecks exist in the service.  Create profiling task Creating a Trace Profiling task is used to notify all agent nodes that execute the service entity which endpoint needs to perform the Trace Profiling feature. This Endpoint is typically an HTTP request or an RPC request address.\nWhen creating a task, the following configuration fields are required:\n Service: Which agent under the service needs to be monitored. Endpoint: The specific endpoint name, such as \u0026ldquo;POST:/path/to/request.\u0026rdquo; Start Time: The start time of the task, which can be executed immediately or at a future time. Duration: The duration of the task execution. Min Duration Threshold: The monitoring will only be triggered when the specified endpoint\u0026rsquo;s execution time exceeds this threshold. This effectively prevents the collection of ineffective data due to short execution times. Dump Period: The thread stack collection period, which will trigger thread sampling every specified number of milliseconds. Max Sampling Count: The maximum number of traces that can be collected in a task. This effectively prevents the program execution from being affected by excessive trace sampling, such as the Stop The World situation in Java.  When the Agent receives a Trace Profiling task from OAP, it automatically generates a log to notify that the task has been acknowledged. The log contains the following field information:\n Instance: The name of the instance where the Agent is located. Type: Supports \u0026ldquo;NOTIFIED\u0026rdquo; and \u0026ldquo;EXECUTION_FINISHED\u0026rdquo;, with the current log displaying \u0026ldquo;NOTIFIED\u0026rdquo;. Time: The time when the Agent received the task.  Generate Requests At this point, Tracing requests matching the specified Endpoint and other conditions would undergo Profiling.\nNotice, whether profiling is thread sensitive, it relies on the agent side implementation. The Java Agent already supports cross-thread requests, so when a request involves cross-thread operations, it would also be periodically sampled for thread stack.\nQuery task details Once the Tracing request is completed, we can query the Tracing data associated with this Trace Profiling task, which includes the following information:\n TraceId: The Trace ID of the current request. Instance: The instance to which the current profiling data belongs. Duration: The total time taken by the current instance to process the Tracing request. Spans: The list of Spans associated with the current Tracing.  SpanId: The ID of the current span. Parent Span Id: The ID of the parent span, allowing for a tree structure. SegmentId: The ID of the segment to which the span belongs. Refs: References of the current span, note that it only includes \u0026ldquo;CROSS_THREAD\u0026rdquo; type references. Service: The service entity information to which the current span belongs. Instance: The instance entity information to which the current span belongs. Time: The start and end time of the current span. Endpoint Name: The name of the current Span. Type: The type of the current span, either \u0026ldquo;Entry\u0026rdquo;, \u0026ldquo;Local\u0026rdquo;, or \u0026ldquo;Exit\u0026rdquo;. Peer: The remote network address. Component: The name of the component used by the current span. Layer: The layer to which the current span belongs. Tags: The tags information contained in the current span. Logs: The log information in the current span. Profiled: Whether the current span supports Profiling data analysis.    Analyze the data Once we know which segments can be analyzed for profiling, we can then determine the time ranges available for thread stack analysis based on the \u0026ldquo;profiled\u0026rdquo; field in the span. Next, we can provide the following query content to analyze the data:\n segmentId: The segment to be analyzed. Segments are usually bound to individual threads, so we can determine which thread needs to be analyzed. time range: Includes the start and end time.  By combining the segmentId with the time range, we can confirm the data for a specific thread during a specific time period. This allows us to merge the thread stack data from the specified thread and time range and analyze which lines of code take longer to execute. The following fields help you understand the program execution:\n Id: Used to identify the current thread stack frame. Parent Id: Combined with \u0026ldquo;id\u0026rdquo; to determine the hierarchical relationship. Code Signature: The method signature of the current thread stack frame. Duration: The total time consumed by the current thread stack frame. Duration Child Excluded: Excludes the child method calls of the current method, only obtaining the time consumed by the current method. Count: The number of times the current thread stack frame was sampled.  If you want to learn more about the thread stack merging mechanism, please read this documentation.\nExporter If you find that the results of profiling data are not correct, you can report an issue through this documentation.\n","excerpt":"Trace Profiling Trace Profiling is bound within the auto-instrument agent and corresponds to …","ref":"/docs/main/v9.7.0/en/setup/backend/backend-trace-profiling/","title":"Trace Profiling"},{"body":"Trace Sampling at server side An advantage of a distributed tracing system is that detailed information from the traces can be obtained. However, the downside is that these traces use up a lot of storage.\nIf you enable the trace sampling mechanism at the server-side, you will find that the service metrics, service instance, endpoint, and topology all have the same accuracy as before. The only difference is that they do not save all traces in storage.\nOf course, even if you enable sampling, the traces will be kept as consistent as possible. Being consistent means that once the trace segments have been collected and reported by agents, the backend would make its best effort not to split the traces. See our recommendation to understand why you should keep the traces as consistent as possible and try not to split them.\nSet the sample rate In the agent-analyzer module, you will find the sampleRate setting by the configuration traceSamplingPolicySettingsFile.\nagent-analyzer:default:...# The default sampling rate and the default trace latency time configured by the \u0026#39;traceSamplingPolicySettingsFile\u0026#39; file.traceSamplingPolicySettingsFile:${SW_TRACE_SAMPLING_POLICY_SETTINGS_FILE:trace-sampling-policy-settings.yml}forceSampleErrorSegment:${SW_FORCE_SAMPLE_ERROR_SEGMENT:true}# When sampling mechanism activated, this config would make the error status segment sampled, ignoring the sampling rate.The default trace-sampling-policy-settings.yml uses the following format. Could use dynamic configuration to update the settings in the runtime.\ndefault:# Default sampling rate that replaces the \u0026#39;agent-analyzer.default.sampleRate\u0026#39;# The sample rate precision is 1/10000. 10000 means 100% sample in default.rate:10000# Default trace latency time that replaces the \u0026#39;agent-analyzer.default.slowTraceSegmentThreshold\u0026#39;# Setting this threshold about the latency would make the slow trace segments sampled if they cost more time, even the sampling mechanism is activated. The default value is `-1`, which would not sample slow traces. Unit, millisecond.duration:-1#services:# - name: serverName# rate: 1000 # Sampling rate of this specific service# duration: 10000 # Trace latency threshold for trace sampling for this specific serviceduration.rate allows you to set the sample rate to this backend. The sample rate precision is 1/10000. 10000 means 100% sample by default.\nforceSampleErrorSegment allows you to save all error segments when the sampling mechanism is activated. This config will cause the error status segment to be sampled when the sampling mechanism is activated, ignoring the sampling rate.\ndefault.duration allows you to save all slow trace segments when the sampling mechanism is activated. Setting this threshold on latency (in milliseconds) would cause slow trace segments to be sampled if they use up more time, even if the sampling mechanism is activated. The default value is -1, which means that slow traces would not be sampled.\nNote: services.[].rate and services.[].duration has a higher priority than default.rare and default.duration.\nRecommendation You may choose to set different backend instances with different sampleRate values, although we recommend that you set the values to be the same.\nWhen you set the different rates, let\u0026rsquo;s say:\n Backend-InstanceA.sampleRate = 35 Backend-InstanceB.sampleRate = 55  Assume the agents have reported all trace segments to the backend. 35% of the traces at the global level will be collected and saved in storage consistently/completely together with all spans. 20% of the trace segments reported to Backend-Instance B will be saved in storage, whereas some trace segments may be missed, as they are reported to Backend-InstanceA and ignored.\nNote When you enable sampling, the actual sample rate may exceed sampleRate. The reason is that currently, all error/slow segments will be saved; meanwhile, the upstream and downstream may not be sampled. This feature ensures that you have the error/slow stacks and segments, although it is not guaranteed that you would have the whole traces.\nNote that if most of the accesses have failed or are slow, the sampling rate would be close to 100%. This may cause the backend or storage clusters to crash.\n","excerpt":"Trace Sampling at server side An advantage of a distributed tracing system is that detailed …","ref":"/docs/main/latest/en/setup/backend/trace-sampling/","title":"Trace Sampling at server side"},{"body":"Trace Sampling at server side An advantage of a distributed tracing system is that detailed information from the traces can be obtained. However, the downside is that these traces use up a lot of storage.\nIf you enable the trace sampling mechanism at the server-side, you will find that the service metrics, service instance, endpoint, and topology all have the same accuracy as before. The only difference is that they do not save all traces in storage.\nOf course, even if you enable sampling, the traces will be kept as consistent as possible. Being consistent means that once the trace segments have been collected and reported by agents, the backend would make its best effort not to split the traces. See our recommendation to understand why you should keep the traces as consistent as possible and try not to split them.\nSet the sample rate In the agent-analyzer module, you will find the sampleRate setting by the configuration traceSamplingPolicySettingsFile.\nagent-analyzer:default:...# The default sampling rate and the default trace latency time configured by the \u0026#39;traceSamplingPolicySettingsFile\u0026#39; file.traceSamplingPolicySettingsFile:${SW_TRACE_SAMPLING_POLICY_SETTINGS_FILE:trace-sampling-policy-settings.yml}forceSampleErrorSegment:${SW_FORCE_SAMPLE_ERROR_SEGMENT:true}# When sampling mechanism activated, this config would make the error status segment sampled, ignoring the sampling rate.The default trace-sampling-policy-settings.yml uses the following format. Could use dynamic configuration to update the settings in the runtime.\ndefault:# Default sampling rate that replaces the \u0026#39;agent-analyzer.default.sampleRate\u0026#39;# The sample rate precision is 1/10000. 10000 means 100% sample in default.rate:10000# Default trace latency time that replaces the \u0026#39;agent-analyzer.default.slowTraceSegmentThreshold\u0026#39;# Setting this threshold about the latency would make the slow trace segments sampled if they cost more time, even the sampling mechanism is activated. The default value is `-1`, which would not sample slow traces. Unit, millisecond.duration:-1#services:# - name: serverName# rate: 1000 # Sampling rate of this specific service# duration: 10000 # Trace latency threshold for trace sampling for this specific serviceduration.rate allows you to set the sample rate to this backend. The sample rate precision is 1/10000. 10000 means 100% sample by default.\nforceSampleErrorSegment allows you to save all error segments when the sampling mechanism is activated. This config will cause the error status segment to be sampled when the sampling mechanism is activated, ignoring the sampling rate.\ndefault.duration allows you to save all slow trace segments when the sampling mechanism is activated. Setting this threshold on latency (in milliseconds) would cause slow trace segments to be sampled if they use up more time, even if the sampling mechanism is activated. The default value is -1, which means that slow traces would not be sampled.\nNote: services.[].rate and services.[].duration has a higher priority than default.rare and default.duration.\nRecommendation You may choose to set different backend instances with different sampleRate values, although we recommend that you set the values to be the same.\nWhen you set the different rates, let\u0026rsquo;s say:\n Backend-InstanceA.sampleRate = 35 Backend-InstanceB.sampleRate = 55  Assume the agents have reported all trace segments to the backend. 35% of the traces at the global level will be collected and saved in storage consistently/completely together with all spans. 20% of the trace segments reported to Backend-Instance B will be saved in storage, whereas some trace segments may be missed, as they are reported to Backend-InstanceA and ignored.\nNote When you enable sampling, the actual sample rate may exceed sampleRate. The reason is that currently, all error/slow segments will be saved; meanwhile, the upstream and downstream may not be sampled. This feature ensures that you have the error/slow stacks and segments, although it is not guaranteed that you would have the whole traces.\nNote that if most of the accesses have failed or are slow, the sampling rate would be close to 100%. This may cause the backend or storage clusters to crash.\n","excerpt":"Trace Sampling at server side An advantage of a distributed tracing system is that detailed …","ref":"/docs/main/next/en/setup/backend/trace-sampling/","title":"Trace Sampling at server side"},{"body":"Trace Sampling at server side An advantage of a distributed tracing system is that detailed information from the traces can be obtained. However, the downside is that these traces use up a lot of storage. If you enable the trace sampling mechanism at the server side, you will find that the metrics of the service, service instance, endpoint, and topology all have the same accuracy as before. The only difference is that they do not save all traces into storage.\nOf course, even if you enable sampling, the traces will be kept as consistent as possible. Being consistent means that once the trace segments have been collected and reported by agents, the backend would do their best not to split the traces. See our recommendation to understand why you should keep the traces as consistent as possible and try not to split them.\nSet the sample rate In the agent-analyzer module, you will find the sampleRate setting by the configuration traceSamplingPolicySettingsFile.\nagent-analyzer:default:...# The default sampling rate and the default trace latency time configured by the \u0026#39;traceSamplingPolicySettingsFile\u0026#39; file.traceSamplingPolicySettingsFile:${SW_TRACE_SAMPLING_POLICY_SETTINGS_FILE:trace-sampling-policy-settings.yml}forceSampleErrorSegment:${SW_FORCE_SAMPLE_ERROR_SEGMENT:true}# When sampling mechanism activated, this config would make the error status segment sampled, ignoring the sampling rate.The default trace-sampling-policy-settings.yml uses the following format. Could use dynamic configuration to update the settings in the runtime.\ndefault:# Default sampling rate that replaces the \u0026#39;agent-analyzer.default.sampleRate\u0026#39;# The sample rate precision is 1/10000. 10000 means 100% sample in default.rate:10000# Default trace latency time that replaces the \u0026#39;agent-analyzer.default.slowTraceSegmentThreshold\u0026#39;# Setting this threshold about the latency would make the slow trace segments sampled if they cost more time, even the sampling mechanism activated. The default value is `-1`, which means would not sample slow traces. Unit, millisecond.duration:-1#services:# - name: serverName# rate: 1000 # Sampling rate of this specific service# duration: 10000 # Trace latency threshold for trace sampling for this specific serviceduration.rate allows you to set the sample rate to this backend. The sample rate precision is 1/10000. 10000 means 100% sample by default.\nforceSampleErrorSegment allows you to save all error segments when sampling mechanism is activated. When sampling mechanism is activated, this config would cause the error status segment to be sampled, ignoring the sampling rate.\ndefault.duration allows you to save all slow trace segments when sampling mechanism is activated. Setting this threshold on latency (in milliseconds) would cause slow trace segments to be sampled if they use up more time, even if the sampling mechanism is activated. The default value is -1, which means that slow traces would not be sampled.\nNote: services.[].rate and services.[].duration has a higher priority than default.rare and default.duration.\nRecommendation You may choose to set different backend instances with different sampleRate values, although we recommend that you set the values to be the same.\nWhen you set the different rates, let\u0026rsquo;s say:\n Backend-InstanceA.sampleRate = 35 Backend-InstanceB.sampleRate = 55  Assume the agents have reported all trace segments to the backend. 35% of the traces at the global level will be collected and saved in storage consistently/completely together with all spans. 20% of the trace segments which are reported to Backend-Instance B will be saved in storage, whereas some trace segments may be missed, as they are reported to Backend-InstanceA and ignored.\nNote When you enable sampling, the actual sample rate may exceed sampleRate. The reason is that currently all error/slow segments will be saved; meanwhile, the upstream and downstream may not be sampled. This feature ensures that you have the error/slow stacks and segments, although it is not guaranteed that you would have the whole traces.\nNote also if most of the access have failed or are slow, the sampling rate would be close to 100%. This may cause the backend or storage clusters to crash.\n","excerpt":"Trace Sampling at server side An advantage of a distributed tracing system is that detailed …","ref":"/docs/main/v9.0.0/en/setup/backend/trace-sampling/","title":"Trace Sampling at server side"},{"body":"Trace Sampling at server side An advantage of a distributed tracing system is that detailed information from the traces can be obtained. However, the downside is that these traces use up a lot of storage.\nIf you enable the trace sampling mechanism at the server-side, you will find that the service metrics, service instance, endpoint, and topology all have the same accuracy as before. The only difference is that they do not save all traces in storage.\nOf course, even if you enable sampling, the traces will be kept as consistent as possible. Being consistent means that once the trace segments have been collected and reported by agents, the backend would make its best effort not to split the traces. See our recommendation to understand why you should keep the traces as consistent as possible and try not to split them.\nSet the sample rate In the agent-analyzer module, you will find the sampleRate setting by the configuration traceSamplingPolicySettingsFile.\nagent-analyzer:default:...# The default sampling rate and the default trace latency time configured by the \u0026#39;traceSamplingPolicySettingsFile\u0026#39; file.traceSamplingPolicySettingsFile:${SW_TRACE_SAMPLING_POLICY_SETTINGS_FILE:trace-sampling-policy-settings.yml}forceSampleErrorSegment:${SW_FORCE_SAMPLE_ERROR_SEGMENT:true}# When sampling mechanism activated, this config would make the error status segment sampled, ignoring the sampling rate.The default trace-sampling-policy-settings.yml uses the following format. Could use dynamic configuration to update the settings in the runtime.\ndefault:# Default sampling rate that replaces the \u0026#39;agent-analyzer.default.sampleRate\u0026#39;# The sample rate precision is 1/10000. 10000 means 100% sample in default.rate:10000# Default trace latency time that replaces the \u0026#39;agent-analyzer.default.slowTraceSegmentThreshold\u0026#39;# Setting this threshold about the latency would make the slow trace segments sampled if they cost more time, even the sampling mechanism is activated. The default value is `-1`, which would not sample slow traces. Unit, millisecond.duration:-1#services:# - name: serverName# rate: 1000 # Sampling rate of this specific service# duration: 10000 # Trace latency threshold for trace sampling for this specific serviceduration.rate allows you to set the sample rate to this backend. The sample rate precision is 1/10000. 10000 means 100% sample by default.\nforceSampleErrorSegment allows you to save all error segments when the sampling mechanism is activated. This config will cause the error status segment to be sampled when the sampling mechanism is activated, ignoring the sampling rate.\ndefault.duration allows you to save all slow trace segments when the sampling mechanism is activated. Setting this threshold on latency (in milliseconds) would cause slow trace segments to be sampled if they use up more time, even if the sampling mechanism is activated. The default value is -1, which means that slow traces would not be sampled.\nNote: services.[].rate and services.[].duration has a higher priority than default.rare and default.duration.\nRecommendation You may choose to set different backend instances with different sampleRate values, although we recommend that you set the values to be the same.\nWhen you set the different rates, let\u0026rsquo;s say:\n Backend-InstanceA.sampleRate = 35 Backend-InstanceB.sampleRate = 55  Assume the agents have reported all trace segments to the backend. 35% of the traces at the global level will be collected and saved in storage consistently/completely together with all spans. 20% of the trace segments reported to Backend-Instance B will be saved in storage, whereas some trace segments may be missed, as they are reported to Backend-InstanceA and ignored.\nNote When you enable sampling, the actual sample rate may exceed sampleRate. The reason is that currently, all error/slow segments will be saved; meanwhile, the upstream and downstream may not be sampled. This feature ensures that you have the error/slow stacks and segments, although it is not guaranteed that you would have the whole traces.\nNote that if most of the accesses have failed or are slow, the sampling rate would be close to 100%. This may cause the backend or storage clusters to crash.\n","excerpt":"Trace Sampling at server side An advantage of a distributed tracing system is that detailed …","ref":"/docs/main/v9.1.0/en/setup/backend/trace-sampling/","title":"Trace Sampling at server side"},{"body":"Trace Sampling at server side An advantage of a distributed tracing system is that detailed information from the traces can be obtained. However, the downside is that these traces use up a lot of storage.\nIf you enable the trace sampling mechanism at the server-side, you will find that the service metrics, service instance, endpoint, and topology all have the same accuracy as before. The only difference is that they do not save all traces in storage.\nOf course, even if you enable sampling, the traces will be kept as consistent as possible. Being consistent means that once the trace segments have been collected and reported by agents, the backend would make its best effort not to split the traces. See our recommendation to understand why you should keep the traces as consistent as possible and try not to split them.\nSet the sample rate In the agent-analyzer module, you will find the sampleRate setting by the configuration traceSamplingPolicySettingsFile.\nagent-analyzer:default:...# The default sampling rate and the default trace latency time configured by the \u0026#39;traceSamplingPolicySettingsFile\u0026#39; file.traceSamplingPolicySettingsFile:${SW_TRACE_SAMPLING_POLICY_SETTINGS_FILE:trace-sampling-policy-settings.yml}forceSampleErrorSegment:${SW_FORCE_SAMPLE_ERROR_SEGMENT:true}# When sampling mechanism activated, this config would make the error status segment sampled, ignoring the sampling rate.The default trace-sampling-policy-settings.yml uses the following format. Could use dynamic configuration to update the settings in the runtime.\ndefault:# Default sampling rate that replaces the \u0026#39;agent-analyzer.default.sampleRate\u0026#39;# The sample rate precision is 1/10000. 10000 means 100% sample in default.rate:10000# Default trace latency time that replaces the \u0026#39;agent-analyzer.default.slowTraceSegmentThreshold\u0026#39;# Setting this threshold about the latency would make the slow trace segments sampled if they cost more time, even the sampling mechanism is activated. The default value is `-1`, which would not sample slow traces. Unit, millisecond.duration:-1#services:# - name: serverName# rate: 1000 # Sampling rate of this specific service# duration: 10000 # Trace latency threshold for trace sampling for this specific serviceduration.rate allows you to set the sample rate to this backend. The sample rate precision is 1/10000. 10000 means 100% sample by default.\nforceSampleErrorSegment allows you to save all error segments when the sampling mechanism is activated. This config will cause the error status segment to be sampled when the sampling mechanism is activated, ignoring the sampling rate.\ndefault.duration allows you to save all slow trace segments when the sampling mechanism is activated. Setting this threshold on latency (in milliseconds) would cause slow trace segments to be sampled if they use up more time, even if the sampling mechanism is activated. The default value is -1, which means that slow traces would not be sampled.\nNote: services.[].rate and services.[].duration has a higher priority than default.rare and default.duration.\nRecommendation You may choose to set different backend instances with different sampleRate values, although we recommend that you set the values to be the same.\nWhen you set the different rates, let\u0026rsquo;s say:\n Backend-InstanceA.sampleRate = 35 Backend-InstanceB.sampleRate = 55  Assume the agents have reported all trace segments to the backend. 35% of the traces at the global level will be collected and saved in storage consistently/completely together with all spans. 20% of the trace segments reported to Backend-Instance B will be saved in storage, whereas some trace segments may be missed, as they are reported to Backend-InstanceA and ignored.\nNote When you enable sampling, the actual sample rate may exceed sampleRate. The reason is that currently, all error/slow segments will be saved; meanwhile, the upstream and downstream may not be sampled. This feature ensures that you have the error/slow stacks and segments, although it is not guaranteed that you would have the whole traces.\nNote that if most of the accesses have failed or are slow, the sampling rate would be close to 100%. This may cause the backend or storage clusters to crash.\n","excerpt":"Trace Sampling at server side An advantage of a distributed tracing system is that detailed …","ref":"/docs/main/v9.2.0/en/setup/backend/trace-sampling/","title":"Trace Sampling at server side"},{"body":"Trace Sampling at server side An advantage of a distributed tracing system is that detailed information from the traces can be obtained. However, the downside is that these traces use up a lot of storage.\nIf you enable the trace sampling mechanism at the server-side, you will find that the service metrics, service instance, endpoint, and topology all have the same accuracy as before. The only difference is that they do not save all traces in storage.\nOf course, even if you enable sampling, the traces will be kept as consistent as possible. Being consistent means that once the trace segments have been collected and reported by agents, the backend would make its best effort not to split the traces. See our recommendation to understand why you should keep the traces as consistent as possible and try not to split them.\nSet the sample rate In the agent-analyzer module, you will find the sampleRate setting by the configuration traceSamplingPolicySettingsFile.\nagent-analyzer:default:...# The default sampling rate and the default trace latency time configured by the \u0026#39;traceSamplingPolicySettingsFile\u0026#39; file.traceSamplingPolicySettingsFile:${SW_TRACE_SAMPLING_POLICY_SETTINGS_FILE:trace-sampling-policy-settings.yml}forceSampleErrorSegment:${SW_FORCE_SAMPLE_ERROR_SEGMENT:true}# When sampling mechanism activated, this config would make the error status segment sampled, ignoring the sampling rate.The default trace-sampling-policy-settings.yml uses the following format. Could use dynamic configuration to update the settings in the runtime.\ndefault:# Default sampling rate that replaces the \u0026#39;agent-analyzer.default.sampleRate\u0026#39;# The sample rate precision is 1/10000. 10000 means 100% sample in default.rate:10000# Default trace latency time that replaces the \u0026#39;agent-analyzer.default.slowTraceSegmentThreshold\u0026#39;# Setting this threshold about the latency would make the slow trace segments sampled if they cost more time, even the sampling mechanism is activated. The default value is `-1`, which would not sample slow traces. Unit, millisecond.duration:-1#services:# - name: serverName# rate: 1000 # Sampling rate of this specific service# duration: 10000 # Trace latency threshold for trace sampling for this specific serviceduration.rate allows you to set the sample rate to this backend. The sample rate precision is 1/10000. 10000 means 100% sample by default.\nforceSampleErrorSegment allows you to save all error segments when the sampling mechanism is activated. This config will cause the error status segment to be sampled when the sampling mechanism is activated, ignoring the sampling rate.\ndefault.duration allows you to save all slow trace segments when the sampling mechanism is activated. Setting this threshold on latency (in milliseconds) would cause slow trace segments to be sampled if they use up more time, even if the sampling mechanism is activated. The default value is -1, which means that slow traces would not be sampled.\nNote: services.[].rate and services.[].duration has a higher priority than default.rare and default.duration.\nRecommendation You may choose to set different backend instances with different sampleRate values, although we recommend that you set the values to be the same.\nWhen you set the different rates, let\u0026rsquo;s say:\n Backend-InstanceA.sampleRate = 35 Backend-InstanceB.sampleRate = 55  Assume the agents have reported all trace segments to the backend. 35% of the traces at the global level will be collected and saved in storage consistently/completely together with all spans. 20% of the trace segments reported to Backend-Instance B will be saved in storage, whereas some trace segments may be missed, as they are reported to Backend-InstanceA and ignored.\nNote When you enable sampling, the actual sample rate may exceed sampleRate. The reason is that currently, all error/slow segments will be saved; meanwhile, the upstream and downstream may not be sampled. This feature ensures that you have the error/slow stacks and segments, although it is not guaranteed that you would have the whole traces.\nNote that if most of the accesses have failed or are slow, the sampling rate would be close to 100%. This may cause the backend or storage clusters to crash.\n","excerpt":"Trace Sampling at server side An advantage of a distributed tracing system is that detailed …","ref":"/docs/main/v9.3.0/en/setup/backend/trace-sampling/","title":"Trace Sampling at server side"},{"body":"Trace Sampling at server side An advantage of a distributed tracing system is that detailed information from the traces can be obtained. However, the downside is that these traces use up a lot of storage.\nIf you enable the trace sampling mechanism at the server-side, you will find that the service metrics, service instance, endpoint, and topology all have the same accuracy as before. The only difference is that they do not save all traces in storage.\nOf course, even if you enable sampling, the traces will be kept as consistent as possible. Being consistent means that once the trace segments have been collected and reported by agents, the backend would make its best effort not to split the traces. See our recommendation to understand why you should keep the traces as consistent as possible and try not to split them.\nSet the sample rate In the agent-analyzer module, you will find the sampleRate setting by the configuration traceSamplingPolicySettingsFile.\nagent-analyzer:default:...# The default sampling rate and the default trace latency time configured by the \u0026#39;traceSamplingPolicySettingsFile\u0026#39; file.traceSamplingPolicySettingsFile:${SW_TRACE_SAMPLING_POLICY_SETTINGS_FILE:trace-sampling-policy-settings.yml}forceSampleErrorSegment:${SW_FORCE_SAMPLE_ERROR_SEGMENT:true}# When sampling mechanism activated, this config would make the error status segment sampled, ignoring the sampling rate.The default trace-sampling-policy-settings.yml uses the following format. Could use dynamic configuration to update the settings in the runtime.\ndefault:# Default sampling rate that replaces the \u0026#39;agent-analyzer.default.sampleRate\u0026#39;# The sample rate precision is 1/10000. 10000 means 100% sample in default.rate:10000# Default trace latency time that replaces the \u0026#39;agent-analyzer.default.slowTraceSegmentThreshold\u0026#39;# Setting this threshold about the latency would make the slow trace segments sampled if they cost more time, even the sampling mechanism is activated. The default value is `-1`, which would not sample slow traces. Unit, millisecond.duration:-1#services:# - name: serverName# rate: 1000 # Sampling rate of this specific service# duration: 10000 # Trace latency threshold for trace sampling for this specific serviceduration.rate allows you to set the sample rate to this backend. The sample rate precision is 1/10000. 10000 means 100% sample by default.\nforceSampleErrorSegment allows you to save all error segments when the sampling mechanism is activated. This config will cause the error status segment to be sampled when the sampling mechanism is activated, ignoring the sampling rate.\ndefault.duration allows you to save all slow trace segments when the sampling mechanism is activated. Setting this threshold on latency (in milliseconds) would cause slow trace segments to be sampled if they use up more time, even if the sampling mechanism is activated. The default value is -1, which means that slow traces would not be sampled.\nNote: services.[].rate and services.[].duration has a higher priority than default.rare and default.duration.\nRecommendation You may choose to set different backend instances with different sampleRate values, although we recommend that you set the values to be the same.\nWhen you set the different rates, let\u0026rsquo;s say:\n Backend-InstanceA.sampleRate = 35 Backend-InstanceB.sampleRate = 55  Assume the agents have reported all trace segments to the backend. 35% of the traces at the global level will be collected and saved in storage consistently/completely together with all spans. 20% of the trace segments reported to Backend-Instance B will be saved in storage, whereas some trace segments may be missed, as they are reported to Backend-InstanceA and ignored.\nNote When you enable sampling, the actual sample rate may exceed sampleRate. The reason is that currently, all error/slow segments will be saved; meanwhile, the upstream and downstream may not be sampled. This feature ensures that you have the error/slow stacks and segments, although it is not guaranteed that you would have the whole traces.\nNote that if most of the accesses have failed or are slow, the sampling rate would be close to 100%. This may cause the backend or storage clusters to crash.\n","excerpt":"Trace Sampling at server side An advantage of a distributed tracing system is that detailed …","ref":"/docs/main/v9.4.0/en/setup/backend/trace-sampling/","title":"Trace Sampling at server side"},{"body":"Trace Sampling at server side An advantage of a distributed tracing system is that detailed information from the traces can be obtained. However, the downside is that these traces use up a lot of storage.\nIf you enable the trace sampling mechanism at the server-side, you will find that the service metrics, service instance, endpoint, and topology all have the same accuracy as before. The only difference is that they do not save all traces in storage.\nOf course, even if you enable sampling, the traces will be kept as consistent as possible. Being consistent means that once the trace segments have been collected and reported by agents, the backend would make its best effort not to split the traces. See our recommendation to understand why you should keep the traces as consistent as possible and try not to split them.\nSet the sample rate In the agent-analyzer module, you will find the sampleRate setting by the configuration traceSamplingPolicySettingsFile.\nagent-analyzer:default:...# The default sampling rate and the default trace latency time configured by the \u0026#39;traceSamplingPolicySettingsFile\u0026#39; file.traceSamplingPolicySettingsFile:${SW_TRACE_SAMPLING_POLICY_SETTINGS_FILE:trace-sampling-policy-settings.yml}forceSampleErrorSegment:${SW_FORCE_SAMPLE_ERROR_SEGMENT:true}# When sampling mechanism activated, this config would make the error status segment sampled, ignoring the sampling rate.The default trace-sampling-policy-settings.yml uses the following format. Could use dynamic configuration to update the settings in the runtime.\ndefault:# Default sampling rate that replaces the \u0026#39;agent-analyzer.default.sampleRate\u0026#39;# The sample rate precision is 1/10000. 10000 means 100% sample in default.rate:10000# Default trace latency time that replaces the \u0026#39;agent-analyzer.default.slowTraceSegmentThreshold\u0026#39;# Setting this threshold about the latency would make the slow trace segments sampled if they cost more time, even the sampling mechanism is activated. The default value is `-1`, which would not sample slow traces. Unit, millisecond.duration:-1#services:# - name: serverName# rate: 1000 # Sampling rate of this specific service# duration: 10000 # Trace latency threshold for trace sampling for this specific serviceduration.rate allows you to set the sample rate to this backend. The sample rate precision is 1/10000. 10000 means 100% sample by default.\nforceSampleErrorSegment allows you to save all error segments when the sampling mechanism is activated. This config will cause the error status segment to be sampled when the sampling mechanism is activated, ignoring the sampling rate.\ndefault.duration allows you to save all slow trace segments when the sampling mechanism is activated. Setting this threshold on latency (in milliseconds) would cause slow trace segments to be sampled if they use up more time, even if the sampling mechanism is activated. The default value is -1, which means that slow traces would not be sampled.\nNote: services.[].rate and services.[].duration has a higher priority than default.rare and default.duration.\nRecommendation You may choose to set different backend instances with different sampleRate values, although we recommend that you set the values to be the same.\nWhen you set the different rates, let\u0026rsquo;s say:\n Backend-InstanceA.sampleRate = 35 Backend-InstanceB.sampleRate = 55  Assume the agents have reported all trace segments to the backend. 35% of the traces at the global level will be collected and saved in storage consistently/completely together with all spans. 20% of the trace segments reported to Backend-Instance B will be saved in storage, whereas some trace segments may be missed, as they are reported to Backend-InstanceA and ignored.\nNote When you enable sampling, the actual sample rate may exceed sampleRate. The reason is that currently, all error/slow segments will be saved; meanwhile, the upstream and downstream may not be sampled. This feature ensures that you have the error/slow stacks and segments, although it is not guaranteed that you would have the whole traces.\nNote that if most of the accesses have failed or are slow, the sampling rate would be close to 100%. This may cause the backend or storage clusters to crash.\n","excerpt":"Trace Sampling at server side An advantage of a distributed tracing system is that detailed …","ref":"/docs/main/v9.5.0/en/setup/backend/trace-sampling/","title":"Trace Sampling at server side"},{"body":"Trace Sampling at server side An advantage of a distributed tracing system is that detailed information from the traces can be obtained. However, the downside is that these traces use up a lot of storage.\nIf you enable the trace sampling mechanism at the server-side, you will find that the service metrics, service instance, endpoint, and topology all have the same accuracy as before. The only difference is that they do not save all traces in storage.\nOf course, even if you enable sampling, the traces will be kept as consistent as possible. Being consistent means that once the trace segments have been collected and reported by agents, the backend would make its best effort not to split the traces. See our recommendation to understand why you should keep the traces as consistent as possible and try not to split them.\nSet the sample rate In the agent-analyzer module, you will find the sampleRate setting by the configuration traceSamplingPolicySettingsFile.\nagent-analyzer:default:...# The default sampling rate and the default trace latency time configured by the \u0026#39;traceSamplingPolicySettingsFile\u0026#39; file.traceSamplingPolicySettingsFile:${SW_TRACE_SAMPLING_POLICY_SETTINGS_FILE:trace-sampling-policy-settings.yml}forceSampleErrorSegment:${SW_FORCE_SAMPLE_ERROR_SEGMENT:true}# When sampling mechanism activated, this config would make the error status segment sampled, ignoring the sampling rate.The default trace-sampling-policy-settings.yml uses the following format. Could use dynamic configuration to update the settings in the runtime.\ndefault:# Default sampling rate that replaces the \u0026#39;agent-analyzer.default.sampleRate\u0026#39;# The sample rate precision is 1/10000. 10000 means 100% sample in default.rate:10000# Default trace latency time that replaces the \u0026#39;agent-analyzer.default.slowTraceSegmentThreshold\u0026#39;# Setting this threshold about the latency would make the slow trace segments sampled if they cost more time, even the sampling mechanism is activated. The default value is `-1`, which would not sample slow traces. Unit, millisecond.duration:-1#services:# - name: serverName# rate: 1000 # Sampling rate of this specific service# duration: 10000 # Trace latency threshold for trace sampling for this specific serviceduration.rate allows you to set the sample rate to this backend. The sample rate precision is 1/10000. 10000 means 100% sample by default.\nforceSampleErrorSegment allows you to save all error segments when the sampling mechanism is activated. This config will cause the error status segment to be sampled when the sampling mechanism is activated, ignoring the sampling rate.\ndefault.duration allows you to save all slow trace segments when the sampling mechanism is activated. Setting this threshold on latency (in milliseconds) would cause slow trace segments to be sampled if they use up more time, even if the sampling mechanism is activated. The default value is -1, which means that slow traces would not be sampled.\nNote: services.[].rate and services.[].duration has a higher priority than default.rare and default.duration.\nRecommendation You may choose to set different backend instances with different sampleRate values, although we recommend that you set the values to be the same.\nWhen you set the different rates, let\u0026rsquo;s say:\n Backend-InstanceA.sampleRate = 35 Backend-InstanceB.sampleRate = 55  Assume the agents have reported all trace segments to the backend. 35% of the traces at the global level will be collected and saved in storage consistently/completely together with all spans. 20% of the trace segments reported to Backend-Instance B will be saved in storage, whereas some trace segments may be missed, as they are reported to Backend-InstanceA and ignored.\nNote When you enable sampling, the actual sample rate may exceed sampleRate. The reason is that currently, all error/slow segments will be saved; meanwhile, the upstream and downstream may not be sampled. This feature ensures that you have the error/slow stacks and segments, although it is not guaranteed that you would have the whole traces.\nNote that if most of the accesses have failed or are slow, the sampling rate would be close to 100%. This may cause the backend or storage clusters to crash.\n","excerpt":"Trace Sampling at server side An advantage of a distributed tracing system is that detailed …","ref":"/docs/main/v9.6.0/en/setup/backend/trace-sampling/","title":"Trace Sampling at server side"},{"body":"Trace Sampling at server side An advantage of a distributed tracing system is that detailed information from the traces can be obtained. However, the downside is that these traces use up a lot of storage.\nIf you enable the trace sampling mechanism at the server-side, you will find that the service metrics, service instance, endpoint, and topology all have the same accuracy as before. The only difference is that they do not save all traces in storage.\nOf course, even if you enable sampling, the traces will be kept as consistent as possible. Being consistent means that once the trace segments have been collected and reported by agents, the backend would make its best effort not to split the traces. See our recommendation to understand why you should keep the traces as consistent as possible and try not to split them.\nSet the sample rate In the agent-analyzer module, you will find the sampleRate setting by the configuration traceSamplingPolicySettingsFile.\nagent-analyzer:default:...# The default sampling rate and the default trace latency time configured by the \u0026#39;traceSamplingPolicySettingsFile\u0026#39; file.traceSamplingPolicySettingsFile:${SW_TRACE_SAMPLING_POLICY_SETTINGS_FILE:trace-sampling-policy-settings.yml}forceSampleErrorSegment:${SW_FORCE_SAMPLE_ERROR_SEGMENT:true}# When sampling mechanism activated, this config would make the error status segment sampled, ignoring the sampling rate.The default trace-sampling-policy-settings.yml uses the following format. Could use dynamic configuration to update the settings in the runtime.\ndefault:# Default sampling rate that replaces the \u0026#39;agent-analyzer.default.sampleRate\u0026#39;# The sample rate precision is 1/10000. 10000 means 100% sample in default.rate:10000# Default trace latency time that replaces the \u0026#39;agent-analyzer.default.slowTraceSegmentThreshold\u0026#39;# Setting this threshold about the latency would make the slow trace segments sampled if they cost more time, even the sampling mechanism is activated. The default value is `-1`, which would not sample slow traces. Unit, millisecond.duration:-1#services:# - name: serverName# rate: 1000 # Sampling rate of this specific service# duration: 10000 # Trace latency threshold for trace sampling for this specific serviceduration.rate allows you to set the sample rate to this backend. The sample rate precision is 1/10000. 10000 means 100% sample by default.\nforceSampleErrorSegment allows you to save all error segments when the sampling mechanism is activated. This config will cause the error status segment to be sampled when the sampling mechanism is activated, ignoring the sampling rate.\ndefault.duration allows you to save all slow trace segments when the sampling mechanism is activated. Setting this threshold on latency (in milliseconds) would cause slow trace segments to be sampled if they use up more time, even if the sampling mechanism is activated. The default value is -1, which means that slow traces would not be sampled.\nNote: services.[].rate and services.[].duration has a higher priority than default.rare and default.duration.\nRecommendation You may choose to set different backend instances with different sampleRate values, although we recommend that you set the values to be the same.\nWhen you set the different rates, let\u0026rsquo;s say:\n Backend-InstanceA.sampleRate = 35 Backend-InstanceB.sampleRate = 55  Assume the agents have reported all trace segments to the backend. 35% of the traces at the global level will be collected and saved in storage consistently/completely together with all spans. 20% of the trace segments reported to Backend-Instance B will be saved in storage, whereas some trace segments may be missed, as they are reported to Backend-InstanceA and ignored.\nNote When you enable sampling, the actual sample rate may exceed sampleRate. The reason is that currently, all error/slow segments will be saved; meanwhile, the upstream and downstream may not be sampled. This feature ensures that you have the error/slow stacks and segments, although it is not guaranteed that you would have the whole traces.\nNote that if most of the accesses have failed or are slow, the sampling rate would be close to 100%. This may cause the backend or storage clusters to crash.\n","excerpt":"Trace Sampling at server side An advantage of a distributed tracing system is that detailed …","ref":"/docs/main/v9.7.0/en/setup/backend/trace-sampling/","title":"Trace Sampling at server side"},{"body":"Tracing and Tracing based Metrics Analyze Plugins The following plugins provide the distributed tracing capability, and the OAP backend would analyze the topology and metrics based on the tracing data.\n HTTP Server  Tomcat 7 Tomcat 8 Tomcat 9 Tomcat 10 Spring Boot Web 4.x Spring MVC 3.x, 4.x 5.x with servlet 3.x Spring MVC 6.x (Optional²) Nutz Web Framework 1.x Struts2 MVC 2.3.x -\u0026gt; 2.5.x Resin 3 (Optional¹) Resin 4 (Optional¹) Jetty Server 9.x -\u0026gt; 11.x Spring WebFlux 5.x (Optional¹) -\u0026gt; 6.x (Optional¹) Undertow 1.3.0.Final -\u0026gt; 2.0.27.Final RESTEasy 3.1.0.Final -\u0026gt; 6.2.4.Final Play Framework 2.6.x -\u0026gt; 2.8.x Light4J Microservices Framework 1.6.x -\u0026gt; 2.x Netty SocketIO 1.x Micronaut HTTP Server 3.2.x -\u0026gt; 3.6.x Jersey REST framework 2.x -\u0026gt; 3.x Grizzly 2.3.x -\u0026gt; 4.x WebSphere Liberty 23.x Netty HTTP 4.1.x (Optional²)   HTTP Client  Feign 9.x Netflix Spring Cloud Feign 1.1.x -\u0026gt; 2.x Okhttp 2.x -\u0026gt; 3.x -\u0026gt; 4.x Apache httpcomponent HttpClient 2.0 -\u0026gt; 3.1, 4.2, 4.3, 5.0, 5.1 Spring RestTemplate 4.x Spring RestTemplate 6.x (Optional²) Jetty Client 9.x -\u0026gt; 11.x Apache httpcomponent AsyncClient 4.x AsyncHttpClient 2.1+ Spring Webflux WebClient 5.x -\u0026gt; 6.x JRE HttpURLConnection (Optional²) Hutool-http client 5.x Micronaut HTTP Client 3.2.x -\u0026gt; 3.6.x   HTTP Gateway  Spring Cloud Gateway 2.0.2.RELEASE -\u0026gt; 4.1.x (Optional²) Apache ShenYu (Rich protocol support: HTTP,Spring Cloud,gRPC,Dubbo,SOFARPC,Motan,Tars) 2.4.x (Optional²)   JDBC  Mysql Driver 5.x, 6.x, 8.x Oracle Driver (Optional¹) H2 Driver 1.3.x -\u0026gt; 1.4.x ShardingSphere 3.0.0, 4.0.0, 4.0.1, 4.1.0, 4.1.1, 5.0.0 PostgreSQL Driver 8.x, 9.x, 42.x Mariadb Driver 2.x, 1.8 InfluxDB 2.5 -\u0026gt; 2.17 Mssql-Jtds 1.x Mssql-jdbc 6.x -\u0026gt; 8.x ClickHouse-jdbc 0.3.x Apache-Kylin-Jdbc 2.6.x -\u0026gt; 3.x -\u0026gt; 4.x Impala-jdbc 2.6.x (Optional³)   RPC Frameworks  Dubbo 2.5.4 -\u0026gt; 2.6.0 Dubbox 2.8.4 Apache Dubbo 2.7.x -\u0026gt; 3.x Motan 0.2.x -\u0026gt; 1.1.0 gRPC 1.x Apache ServiceComb Java Chassis 1.x, 2.x SOFARPC 5.4.0 Armeria 0.63.0 -\u0026gt; 1.22.0 Apache Avro 1.7.0 - 1.8.x Finagle 6.44.0 -\u0026gt; 20.1.0 (6.25.0 -\u0026gt; 6.44.0 not tested) Brpc-Java 2.3.7 -\u0026gt; 3.0.5 Thrift 0.10.0 -\u0026gt; 0.12.0 Apache CXF 3.x JSONRPC4J 1.2.0 -\u0026gt; 1.6 Nacos-Client 2.x (Optional²)   MQ  RocketMQ 3.x-\u0026gt; 5.x RocketMQ-gRPC 5.x Kafka 0.11.0.0 -\u0026gt; 3.2.3 Spring-Kafka Spring Kafka Consumer 1.3.x -\u0026gt; 2.3.x (2.0.x and 2.1.x not tested and not recommended by the official document) ActiveMQ 5.10.0 -\u0026gt; 5.15.4 RabbitMQ 3.x-\u0026gt; 5.x Pulsar 2.2.x -\u0026gt; 2.9.x NATS 2.14.x -\u0026gt; 2.15.x ActiveMQ-Artemis 2.30.0 -\u0026gt; 2.31.2 Aliyun ONS 1.x (Optional¹)   NoSQL  aerospike 3.x -\u0026gt; 6.x Redis  Jedis 2.x-4.x Redisson Easy Java Redis client 3.5.2+ Lettuce 5.x   MongoDB Java Driver 2.13-2.14, 3.4.0-3.12.7, 4.0.0-4.1.0 Memcached Client  Spymemcached 2.x Xmemcached 2.x   Elasticsearch  transport-client 5.2.x-5.6.x transport-client 6.2.3-6.8.4 transport-client 7.0.0-7.5.2 rest-high-level-client 6.7.1-6.8.4 rest-high-level-client 7.0.0-7.5.2   Solr  SolrJ 7.x   Cassandra 3.x  cassandra-java-driver 3.7.0-3.7.2   HBase  hbase-client HTable 1.0.0-2.4.2   Neo4j  Neo4j-java 4.x     Service Discovery  Netflix Eureka   Distributed Coordination  Zookeeper 3.4.x (Optional² \u0026amp; Except 3.4.4)   Spring Ecosystem  Spring Bean annotations(@Bean, @Service, @Component, @Repository) 3.x and 4.x (Optional²) Spring Core Async SuccessCallback/FailureCallback/ListenableFutureCallback 4.x Spring Transaction 4.x and 5.x (Optional²)   Hystrix: Latency and Fault Tolerance for Distributed Systems 1.4.20 -\u0026gt; 1.5.18 Sentinel: The Sentinel of Your Microservices 1.7.0 -\u0026gt; 1.8.1 Scheduler  Elastic Job 2.x Apache ShardingSphere-Elasticjob 3.x Spring @Scheduled 3.1+ Quartz Scheduler 2.x (Optional²) XXL Job 2.x   OpenTracing community supported Canal: Alibaba mysql database binlog incremental subscription \u0026amp; consumer components 1.0.25 -\u0026gt; 1.1.2 JSON  GSON 2.8.x (Optional²) Fastjson 1.2.x (Optional²) Jackson 2.x (Optional²)   Vert.x Ecosystem  Vert.x Eventbus 3.2 -\u0026gt; 4.x Vert.x Web 3.x -\u0026gt; 4.x   Thread Schedule Framework  Spring @Async 4.x and 5.x Quasar 0.7.x JRE Callable and Runnable (Optional²) JRE ForkJoinPool (Optional²)   Cache  Ehcache 2.x GuavaCache 18.x -\u0026gt; 23.x (Optional²)   Kotlin  Coroutine 1.0.1 -\u0026gt; 1.3.x (Optional²)   GraphQL  Graphql 8.0 -\u0026gt; 17.x   Pool  Apache Commons DBCP 2.x Alibaba Druid 1.x HikariCP 3.x -\u0026gt; 4.x   Logging Framework  log4j 2.x log4j2 1.2.x logback 1.2.x   ORM  MyBatis 3.4.x -\u0026gt; 3.5.x   Event  GuavaEventBus 19.x -\u0026gt; 31.x-jre    Meter Plugins The meter plugin provides the advanced metrics collections, which are not a part of tracing.\n Thread Pool  Undertow 2.1.x -\u0026gt; 2.6.x Tomcat 7.0.x -\u0026gt; 10.0.x Dubbo 2.5.x -\u0026gt; 2.7.x Jetty 9.1.x -\u0026gt; 11.x Grizzly 2.3.x -\u0026gt; 4.x     ¹Due to license incompatibilities/restrictions these plugins are hosted and released in 3rd part repository, go to SkyAPM java plugin extension repository to get these.\n²These plugins affect the performance or must be used under some conditions, from experiences. So only released in /optional-plugins or /bootstrap-plugins, copy to /plugins in order to make them work.\n³These plugins are not tested in the CI/CD pipeline, as the previous added tests are not able to run according to the latest CI/CD infrastructure limitations, lack of maintenance, or dependencies/images not available(e.g. removed from DockerHub).\n","excerpt":"Tracing and Tracing based Metrics Analyze Plugins The following plugins provide the distributed …","ref":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/supported-list/","title":"Tracing and Tracing based Metrics Analyze Plugins"},{"body":"Tracing and Tracing based Metrics Analyze Plugins The following plugins provide the distributed tracing capability, and the OAP backend would analyze the topology and metrics based on the tracing data.\n HTTP Server  Tomcat 7 Tomcat 8 Tomcat 9 Tomcat 10 Spring Boot Web 4.x Spring MVC 3.x, 4.x 5.x with servlet 3.x Spring MVC 6.x (Optional²) Nutz Web Framework 1.x Struts2 MVC 2.3.x -\u0026gt; 2.5.x Resin 3 (Optional¹) Resin 4 (Optional¹) Jetty Server 9.x -\u0026gt; 11.x Spring WebFlux 5.x (Optional¹) -\u0026gt; 6.x (Optional¹) Undertow 1.3.0.Final -\u0026gt; 2.0.27.Final RESTEasy 3.1.0.Final -\u0026gt; 6.2.4.Final Play Framework 2.6.x -\u0026gt; 2.8.x Light4J Microservices Framework 1.6.x -\u0026gt; 2.x Netty SocketIO 1.x Micronaut HTTP Server 3.2.x -\u0026gt; 3.6.x Jersey REST framework 2.x -\u0026gt; 3.x Grizzly 2.3.x -\u0026gt; 4.x WebSphere Liberty 23.x Netty HTTP 4.1.x (Optional²)   HTTP Client  Feign 9.x Netflix Spring Cloud Feign 1.1.x -\u0026gt; 2.x Okhttp 2.x -\u0026gt; 3.x -\u0026gt; 4.x Apache httpcomponent HttpClient 2.0 -\u0026gt; 3.1, 4.2, 4.3, 5.0, 5.1 Spring RestTemplate 4.x Spring RestTemplate 6.x (Optional²) Jetty Client 9.x -\u0026gt; 11.x Apache httpcomponent AsyncClient 4.x AsyncHttpClient 2.1+ Spring Webflux WebClient 5.x -\u0026gt; 6.x JRE HttpURLConnection (Optional²) Hutool-http client 5.x Micronaut HTTP Client 3.2.x -\u0026gt; 3.6.x   HTTP Gateway  Spring Cloud Gateway 2.0.2.RELEASE -\u0026gt; 4.1.x (Optional²) Apache ShenYu (Rich protocol support: HTTP,Spring Cloud,gRPC,Dubbo,SOFARPC,Motan,Tars) 2.4.x (Optional²)   JDBC  Mysql Driver 5.x, 6.x, 8.x Oracle Driver (Optional¹) H2 Driver 1.3.x -\u0026gt; 1.4.x ShardingSphere 3.0.0, 4.0.0, 4.0.1, 4.1.0, 4.1.1, 5.0.0 PostgreSQL Driver 8.x, 9.x, 42.x Mariadb Driver 2.x, 1.8 InfluxDB 2.5 -\u0026gt; 2.17 Mssql-Jtds 1.x Mssql-jdbc 6.x -\u0026gt; 8.x ClickHouse-jdbc 0.3.x Apache-Kylin-Jdbc 2.6.x -\u0026gt; 3.x -\u0026gt; 4.x Impala-jdbc 2.6.x (Optional³)   RPC Frameworks  Dubbo 2.5.4 -\u0026gt; 2.6.0 Dubbox 2.8.4 Apache Dubbo 2.7.x -\u0026gt; 3.x Motan 0.2.x -\u0026gt; 1.1.0 gRPC 1.x Apache ServiceComb Java Chassis 1.x, 2.x SOFARPC 5.4.0 Armeria 0.63.0 -\u0026gt; 1.22.0 Apache Avro 1.7.0 - 1.8.x Finagle 6.44.0 -\u0026gt; 20.1.0 (6.25.0 -\u0026gt; 6.44.0 not tested) Brpc-Java 2.3.7 -\u0026gt; 3.0.5 Thrift 0.10.0 -\u0026gt; 0.12.0 Apache CXF 3.x JSONRPC4J 1.2.0 -\u0026gt; 1.6 Nacos-Client 2.x (Optional²)   MQ  RocketMQ 3.x-\u0026gt; 5.x RocketMQ-gRPC 5.x Kafka 0.11.0.0 -\u0026gt; 3.2.3 Spring-Kafka Spring Kafka Consumer 1.3.x -\u0026gt; 2.3.x (2.0.x and 2.1.x not tested and not recommended by the official document) ActiveMQ 5.10.0 -\u0026gt; 5.15.4 RabbitMQ 3.x-\u0026gt; 5.x Pulsar 2.2.x -\u0026gt; 2.9.x NATS 2.14.x -\u0026gt; 2.15.x ActiveMQ-Artemis 2.30.0 -\u0026gt; 2.31.2 Aliyun ONS 1.x (Optional¹)   NoSQL  aerospike 3.x -\u0026gt; 6.x Redis  Jedis 2.x-4.x Redisson Easy Java Redis client 3.5.2+ Lettuce 5.x   MongoDB Java Driver 2.13-2.14, 3.4.0-3.12.7, 4.0.0-4.1.0 Memcached Client  Spymemcached 2.x Xmemcached 2.x   Elasticsearch  transport-client 5.2.x-5.6.x transport-client 6.2.3-6.8.4 transport-client 7.0.0-7.5.2 rest-high-level-client 6.7.1-6.8.4 rest-high-level-client 7.0.0-7.5.2   Solr  SolrJ 7.x   Cassandra 3.x  cassandra-java-driver 3.7.0-3.7.2   HBase  hbase-client HTable 1.0.0-2.4.2   Neo4j  Neo4j-java 4.x     Service Discovery  Netflix Eureka   Distributed Coordination  Zookeeper 3.4.x (Optional² \u0026amp; Except 3.4.4)   Spring Ecosystem  Spring Bean annotations(@Bean, @Service, @Component, @Repository) 3.x and 4.x (Optional²) Spring Core Async SuccessCallback/FailureCallback/ListenableFutureCallback 4.x Spring Transaction 4.x and 5.x (Optional²)   Hystrix: Latency and Fault Tolerance for Distributed Systems 1.4.20 -\u0026gt; 1.5.18 Sentinel: The Sentinel of Your Microservices 1.7.0 -\u0026gt; 1.8.1 Scheduler  Elastic Job 2.x Apache ShardingSphere-Elasticjob 3.x Spring @Scheduled 3.1+ Quartz Scheduler 2.x (Optional²) XXL Job 2.x   OpenTracing community supported Canal: Alibaba mysql database binlog incremental subscription \u0026amp; consumer components 1.0.25 -\u0026gt; 1.1.2 JSON  GSON 2.8.x (Optional²) Fastjson 1.2.x (Optional²) Jackson 2.x (Optional²)   Vert.x Ecosystem  Vert.x Eventbus 3.2 -\u0026gt; 4.x Vert.x Web 3.x -\u0026gt; 4.x   Thread Schedule Framework  Spring @Async 4.x and 5.x Quasar 0.7.x JRE Callable and Runnable (Optional²) JRE ForkJoinPool (Optional²)   Cache  Ehcache 2.x GuavaCache 18.x -\u0026gt; 23.x (Optional²)   Kotlin  Coroutine 1.0.1 -\u0026gt; 1.3.x (Optional²)   GraphQL  Graphql 8.0 -\u0026gt; 17.x   Pool  Apache Commons DBCP 2.x Alibaba Druid 1.x HikariCP 3.x -\u0026gt; 4.x C3P0 0.9.0 -\u0026gt; 0.10.0   Logging Framework  log4j 2.x log4j2 1.2.x logback 1.2.x   ORM  MyBatis 3.4.x -\u0026gt; 3.5.x   Event  GuavaEventBus 19.x -\u0026gt; 31.x-jre    Meter Plugins The meter plugin provides the advanced metrics collections, which are not a part of tracing.\n Thread Pool  Undertow 2.1.x -\u0026gt; 2.6.x Tomcat 7.0.x -\u0026gt; 10.0.x Dubbo 2.5.x -\u0026gt; 2.7.x Jetty 9.1.x -\u0026gt; 11.x Grizzly 2.3.x -\u0026gt; 4.x   Connection Pool  Apache Commons DBCP 2.x Alibaba Druid 1.x HikariCP 3.x -\u0026gt; 4.x C3P0 0.9.0 -\u0026gt; 0.10.0     ¹Due to license incompatibilities/restrictions these plugins are hosted and released in 3rd part repository, go to SkyAPM java plugin extension repository to get these.\n²These plugins affect the performance or must be used under some conditions, from experiences. So only released in /optional-plugins or /bootstrap-plugins, copy to /plugins in order to make them work.\n³These plugins are not tested in the CI/CD pipeline, as the previous added tests are not able to run according to the latest CI/CD infrastructure limitations, lack of maintenance, or dependencies/images not available(e.g. removed from DockerHub).\n","excerpt":"Tracing and Tracing based Metrics Analyze Plugins The following plugins provide the distributed …","ref":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/supported-list/","title":"Tracing and Tracing based Metrics Analyze Plugins"},{"body":"Tracing and Tracing based Metrics Analyze Plugins The following plugins provide the distributed tracing capability, and the OAP backend would analyze the topology and metrics based on the tracing data.\n HTTP Server  Tomcat 7 Tomcat 8 Tomcat 9 Tomcat 10 Spring Boot Web 4.x Spring MVC 3.x, 4.x 5.x with servlet 3.x Spring MVC 6.x (Optional²) Nutz Web Framework 1.x Struts2 MVC 2.3.x -\u0026gt; 2.5.x Resin 3 (Optional¹) Resin 4 (Optional¹) Jetty Server 9.x -\u0026gt; 11.x Spring WebFlux 5.x (Optional¹) Undertow 1.3.0.Final -\u0026gt; 2.0.27.Final RESTEasy 3.1.0.Final -\u0026gt; 6.2.4.Final Play Framework 2.6.x -\u0026gt; 2.8.x Light4J Microservices Framework 1.6.x -\u0026gt; 2.x Netty SocketIO 1.x Micronaut HTTP Server 3.2.x -\u0026gt; 3.6.x Jersey REST framework 2.x -\u0026gt; 3.x Grizzly 2.3.x -\u0026gt; 4.x WebSphere Liberty 23.x   HTTP Client  Feign 9.x Netflix Spring Cloud Feign 1.1.x -\u0026gt; 2.x Okhttp 2.x -\u0026gt; 3.x -\u0026gt; 4.x Apache httpcomponent HttpClient 2.0 -\u0026gt; 3.1, 4.2, 4.3, 5.0, 5.1 Spring RestTemplate 4.x Spring RestTemplate 6.x (Optional²) Jetty Client 9.x -\u0026gt; 11.x Apache httpcomponent AsyncClient 4.x AsyncHttpClient 2.1+ JRE HttpURLConnection (Optional²) Hutool-http client 5.x Micronaut HTTP Client 3.2.x -\u0026gt; 3.6.x   HTTP Gateway  Spring Cloud Gateway 2.0.2.RELEASE -\u0026gt; 3.x (Optional²) Apache ShenYu (Rich protocol support: HTTP,Spring Cloud,gRPC,Dubbo,SOFARPC,Motan,Tars) 2.4.x (Optional²)   JDBC  Mysql Driver 5.x, 6.x, 8.x Oracle Driver (Optional¹) H2 Driver 1.3.x -\u0026gt; 1.4.x ShardingSphere 3.0.0, 4.0.0, 4.0.1, 4.1.0, 4.1.1, 5.0.0 PostgreSQL Driver 8.x, 9.x, 42.x Mariadb Driver 2.x, 1.8 InfluxDB 2.5 -\u0026gt; 2.17 Mssql-Jtds 1.x Mssql-jdbc 6.x -\u0026gt; 8.x ClickHouse-jdbc 0.3.x Apache-Kylin-Jdbc 2.6.x -\u0026gt; 3.x -\u0026gt; 4.x Impala-jdbc 2.6.x   RPC Frameworks  Dubbo 2.5.4 -\u0026gt; 2.6.0 Dubbox 2.8.4 Apache Dubbo 2.7.x -\u0026gt; 3.x Motan 0.2.x -\u0026gt; 1.1.0 gRPC 1.x Apache ServiceComb Java Chassis 1.x, 2.x SOFARPC 5.4.0 Armeria 0.63.0 -\u0026gt; 1.22.0 Apache Avro 1.7.0 - 1.8.x Finagle 6.44.0 -\u0026gt; 20.1.0 (6.25.0 -\u0026gt; 6.44.0 not tested) Brpc-Java 2.3.7 -\u0026gt; 3.0.5 Thrift 0.10.0 -\u0026gt; 0.12.0 Apache CXF 3.x JSONRPC4J 1.2.0 -\u0026gt; 1.6 Nacos-Client 2.x (Optional²)   MQ  RocketMQ 3.x-\u0026gt; 5.x RocketMQ-gRPC 5.x Kafka 0.11.0.0 -\u0026gt; 3.2.3 Spring-Kafka Spring Kafka Consumer 1.3.x -\u0026gt; 2.3.x (2.0.x and 2.1.x not tested and not recommended by the official document) ActiveMQ 5.10.0 -\u0026gt; 5.15.4 RabbitMQ 3.x-\u0026gt; 5.x Pulsar 2.2.x -\u0026gt; 2.9.x NATS 2.14.x -\u0026gt; 2.15.x Aliyun ONS 1.x (Optional¹)   NoSQL  aerospike 3.x -\u0026gt; 6.x Redis  Jedis 2.x-4.x Redisson Easy Java Redis client 3.5.2+ Lettuce 5.x   MongoDB Java Driver 2.13-2.14, 3.4.0-3.12.7, 4.0.0-4.1.0 Memcached Client  Spymemcached 2.x Xmemcached 2.x   Elasticsearch  transport-client 5.2.x-5.6.x transport-client 6.2.3-6.8.4 transport-client 7.0.0-7.5.2 rest-high-level-client 6.7.1-6.8.4 rest-high-level-client 7.0.0-7.5.2   Solr  SolrJ 7.x   Cassandra 3.x  cassandra-java-driver 3.7.0-3.7.2   HBase  hbase-client HTable 1.0.0-2.4.2   Neo4j  Neo4j-java 4.x     Service Discovery  Netflix Eureka   Distributed Coordination  Zookeeper 3.4.x (Optional² \u0026amp; Except 3.4.4)   Spring Ecosystem  Spring Bean annotations(@Bean, @Service, @Component, @Repository) 3.x and 4.x (Optional²) Spring Core Async SuccessCallback/FailureCallback/ListenableFutureCallback 4.x Spring Transaction 4.x and 5.x (Optional²)   Hystrix: Latency and Fault Tolerance for Distributed Systems 1.4.20 -\u0026gt; 1.5.18 Sentinel: The Sentinel of Your Microservices 1.7.0 -\u0026gt; 1.8.1 Scheduler  Elastic Job 2.x Apache ShardingSphere-Elasticjob 3.x Spring @Scheduled 3.1+ Quartz Scheduler 2.x (Optional²) XXL Job 2.x   OpenTracing community supported Canal: Alibaba mysql database binlog incremental subscription \u0026amp; consumer components 1.0.25 -\u0026gt; 1.1.2 JSON  GSON 2.8.x (Optional²) Fastjson 1.2.x (Optional²) Jackson 2.x (Optional²)   Vert.x Ecosystem  Vert.x Eventbus 3.2 -\u0026gt; 4.x Vert.x Web 3.x -\u0026gt; 4.x   Thread Schedule Framework  Spring @Async 4.x and 5.x Quasar 0.7.x JRE Callable and Runnable (Optional²) JRE ForkJoinPool (Optional²)   Cache  Ehcache 2.x GuavaCache 18.x -\u0026gt; 23.x (Optional²)   Kotlin  Coroutine 1.0.1 -\u0026gt; 1.3.x (Optional²)   GraphQL  Graphql 8.0 -\u0026gt; 17.x   Pool  Apache Commons DBCP 2.x Alibaba Druid 1.x HikariCP 3.x -\u0026gt; 4.x   Logging Framework  log4j 2.x log4j2 1.2.x logback 1.2.x   ORM  MyBatis 3.4.x -\u0026gt; 3.5.x   Event  GuavaEventBus 19.x -\u0026gt; 31.x-jre    Meter Plugins The meter plugin provides the advanced metrics collections, which are not a part of tracing.\n Thread Pool  Undertow 2.1.x -\u0026gt; 2.6.x Tomcat 7.0.x -\u0026gt; 10.0.x Dubbo 2.5.x -\u0026gt; 2.7.x Jetty 9.1.x -\u0026gt; 11.x Grizzly 2.3.x -\u0026gt; 4.x     ¹Due to license incompatibilities/restrictions these plugins are hosted and released in 3rd part repository, go to SkyAPM java plugin extension repository to get these.\n²These plugins affect the performance or must be used under some conditions, from experiences. So only released in /optional-plugins or /bootstrap-plugins, copy to /plugins in order to make them work.\n","excerpt":"Tracing and Tracing based Metrics Analyze Plugins The following plugins provide the distributed …","ref":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/supported-list/","title":"Tracing and Tracing based Metrics Analyze Plugins"},{"body":"Tracing and Tracing based Metrics Analyze Plugins The following plugins provide the distributed tracing capability, and the OAP backend would analyze the topology and metrics based on the tracing data.\n HTTP Server  Tomcat 7 Tomcat 8 Tomcat 9 Tomcat 10 Spring Boot Web 4.x Spring MVC 3.x, 4.x 5.x with servlet 3.x Spring MVC 6.x (Optional²) Nutz Web Framework 1.x Struts2 MVC 2.3.x -\u0026gt; 2.5.x Resin 3 (Optional¹) Resin 4 (Optional¹) Jetty Server 9.x -\u0026gt; 11.x Spring WebFlux 5.x (Optional¹) Undertow 1.3.0.Final -\u0026gt; 2.0.27.Final RESTEasy 3.1.0.Final -\u0026gt; 6.2.4.Final Play Framework 2.6.x -\u0026gt; 2.8.x Light4J Microservices Framework 1.6.x -\u0026gt; 2.x Netty SocketIO 1.x Micronaut HTTP Server 3.2.x -\u0026gt; 3.6.x Jersey REST framework 2.x -\u0026gt; 3.x Grizzly 2.3.x -\u0026gt; 4.x WebSphere Liberty 23.x Netty HTTP 4.1.x (Optional²)   HTTP Client  Feign 9.x Netflix Spring Cloud Feign 1.1.x -\u0026gt; 2.x Okhttp 2.x -\u0026gt; 3.x -\u0026gt; 4.x Apache httpcomponent HttpClient 2.0 -\u0026gt; 3.1, 4.2, 4.3, 5.0, 5.1 Spring RestTemplate 4.x Spring RestTemplate 6.x (Optional²) Jetty Client 9.x -\u0026gt; 11.x Apache httpcomponent AsyncClient 4.x AsyncHttpClient 2.1+ JRE HttpURLConnection (Optional²) Hutool-http client 5.x Micronaut HTTP Client 3.2.x -\u0026gt; 3.6.x   HTTP Gateway  Spring Cloud Gateway 2.0.2.RELEASE -\u0026gt; 3.x (Optional²) Apache ShenYu (Rich protocol support: HTTP,Spring Cloud,gRPC,Dubbo,SOFARPC,Motan,Tars) 2.4.x (Optional²)   JDBC  Mysql Driver 5.x, 6.x, 8.x Oracle Driver (Optional¹) H2 Driver 1.3.x -\u0026gt; 1.4.x ShardingSphere 3.0.0, 4.0.0, 4.0.1, 4.1.0, 4.1.1, 5.0.0 PostgreSQL Driver 8.x, 9.x, 42.x Mariadb Driver 2.x, 1.8 InfluxDB 2.5 -\u0026gt; 2.17 Mssql-Jtds 1.x Mssql-jdbc 6.x -\u0026gt; 8.x ClickHouse-jdbc 0.3.x Apache-Kylin-Jdbc 2.6.x -\u0026gt; 3.x -\u0026gt; 4.x Impala-jdbc 2.6.x   RPC Frameworks  Dubbo 2.5.4 -\u0026gt; 2.6.0 Dubbox 2.8.4 Apache Dubbo 2.7.x -\u0026gt; 3.x Motan 0.2.x -\u0026gt; 1.1.0 gRPC 1.x Apache ServiceComb Java Chassis 1.x, 2.x SOFARPC 5.4.0 Armeria 0.63.0 -\u0026gt; 1.22.0 Apache Avro 1.7.0 - 1.8.x Finagle 6.44.0 -\u0026gt; 20.1.0 (6.25.0 -\u0026gt; 6.44.0 not tested) Brpc-Java 2.3.7 -\u0026gt; 3.0.5 Thrift 0.10.0 -\u0026gt; 0.12.0 Apache CXF 3.x JSONRPC4J 1.2.0 -\u0026gt; 1.6 Nacos-Client 2.x (Optional²)   MQ  RocketMQ 3.x-\u0026gt; 5.x RocketMQ-gRPC 5.x Kafka 0.11.0.0 -\u0026gt; 3.2.3 Spring-Kafka Spring Kafka Consumer 1.3.x -\u0026gt; 2.3.x (2.0.x and 2.1.x not tested and not recommended by the official document) ActiveMQ 5.10.0 -\u0026gt; 5.15.4 RabbitMQ 3.x-\u0026gt; 5.x Pulsar 2.2.x -\u0026gt; 2.9.x NATS 2.14.x -\u0026gt; 2.15.x Aliyun ONS 1.x (Optional¹)   NoSQL  aerospike 3.x -\u0026gt; 6.x Redis  Jedis 2.x-4.x Redisson Easy Java Redis client 3.5.2+ Lettuce 5.x   MongoDB Java Driver 2.13-2.14, 3.4.0-3.12.7, 4.0.0-4.1.0 Memcached Client  Spymemcached 2.x Xmemcached 2.x   Elasticsearch  transport-client 5.2.x-5.6.x transport-client 6.2.3-6.8.4 transport-client 7.0.0-7.5.2 rest-high-level-client 6.7.1-6.8.4 rest-high-level-client 7.0.0-7.5.2   Solr  SolrJ 7.x   Cassandra 3.x  cassandra-java-driver 3.7.0-3.7.2   HBase  hbase-client HTable 1.0.0-2.4.2   Neo4j  Neo4j-java 4.x     Service Discovery  Netflix Eureka   Distributed Coordination  Zookeeper 3.4.x (Optional² \u0026amp; Except 3.4.4)   Spring Ecosystem  Spring Bean annotations(@Bean, @Service, @Component, @Repository) 3.x and 4.x (Optional²) Spring Core Async SuccessCallback/FailureCallback/ListenableFutureCallback 4.x Spring Transaction 4.x and 5.x (Optional²)   Hystrix: Latency and Fault Tolerance for Distributed Systems 1.4.20 -\u0026gt; 1.5.18 Sentinel: The Sentinel of Your Microservices 1.7.0 -\u0026gt; 1.8.1 Scheduler  Elastic Job 2.x Apache ShardingSphere-Elasticjob 3.x Spring @Scheduled 3.1+ Quartz Scheduler 2.x (Optional²) XXL Job 2.x   OpenTracing community supported Canal: Alibaba mysql database binlog incremental subscription \u0026amp; consumer components 1.0.25 -\u0026gt; 1.1.2 JSON  GSON 2.8.x (Optional²) Fastjson 1.2.x (Optional²) Jackson 2.x (Optional²)   Vert.x Ecosystem  Vert.x Eventbus 3.2 -\u0026gt; 4.x Vert.x Web 3.x -\u0026gt; 4.x   Thread Schedule Framework  Spring @Async 4.x and 5.x Quasar 0.7.x JRE Callable and Runnable (Optional²) JRE ForkJoinPool (Optional²)   Cache  Ehcache 2.x GuavaCache 18.x -\u0026gt; 23.x (Optional²)   Kotlin  Coroutine 1.0.1 -\u0026gt; 1.3.x (Optional²)   GraphQL  Graphql 8.0 -\u0026gt; 17.x   Pool  Apache Commons DBCP 2.x Alibaba Druid 1.x HikariCP 3.x -\u0026gt; 4.x   Logging Framework  log4j 2.x log4j2 1.2.x logback 1.2.x   ORM  MyBatis 3.4.x -\u0026gt; 3.5.x   Event  GuavaEventBus 19.x -\u0026gt; 31.x-jre    Meter Plugins The meter plugin provides the advanced metrics collections, which are not a part of tracing.\n Thread Pool  Undertow 2.1.x -\u0026gt; 2.6.x Tomcat 7.0.x -\u0026gt; 10.0.x Dubbo 2.5.x -\u0026gt; 2.7.x Jetty 9.1.x -\u0026gt; 11.x Grizzly 2.3.x -\u0026gt; 4.x     ¹Due to license incompatibilities/restrictions these plugins are hosted and released in 3rd part repository, go to SkyAPM java plugin extension repository to get these.\n²These plugins affect the performance or must be used under some conditions, from experiences. So only released in /optional-plugins or /bootstrap-plugins, copy to /plugins in order to make them work.\n","excerpt":"Tracing and Tracing based Metrics Analyze Plugins The following plugins provide the distributed …","ref":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/supported-list/","title":"Tracing and Tracing based Metrics Analyze Plugins"},{"body":"Tracing and Tracing based Metrics Analyze Plugins The following plugins provide the distributed tracing capability, and the OAP backend would analyze the topology and metrics based on the tracing data.\n HTTP Server  Tomcat 7 Tomcat 8 Tomcat 9 Tomcat 10 Spring Boot Web 4.x Spring MVC 3.x, 4.x 5.x with servlet 3.x Spring MVC 6.x (Optional²) Nutz Web Framework 1.x Struts2 MVC 2.3.x -\u0026gt; 2.5.x Resin 3 (Optional¹) Resin 4 (Optional¹) Jetty Server 9.x -\u0026gt; 11.x Spring WebFlux 5.x (Optional¹) -\u0026gt; 6.x (Optional¹) Undertow 1.3.0.Final -\u0026gt; 2.0.27.Final RESTEasy 3.1.0.Final -\u0026gt; 6.2.4.Final Play Framework 2.6.x -\u0026gt; 2.8.x Light4J Microservices Framework 1.6.x -\u0026gt; 2.x Netty SocketIO 1.x Micronaut HTTP Server 3.2.x -\u0026gt; 3.6.x Jersey REST framework 2.x -\u0026gt; 3.x Grizzly 2.3.x -\u0026gt; 4.x WebSphere Liberty 23.x Netty HTTP 4.1.x (Optional²)   HTTP Client  Feign 9.x Netflix Spring Cloud Feign 1.1.x -\u0026gt; 2.x Okhttp 2.x -\u0026gt; 3.x -\u0026gt; 4.x Apache httpcomponent HttpClient 2.0 -\u0026gt; 3.1, 4.2, 4.3, 5.0, 5.1 Spring RestTemplate 4.x Spring RestTemplate 6.x (Optional²) Jetty Client 9.x -\u0026gt; 11.x Apache httpcomponent AsyncClient 4.x AsyncHttpClient 2.1+ Spring Webflux WebClient 5.x -\u0026gt; 6.x JRE HttpURLConnection (Optional²) Hutool-http client 5.x Micronaut HTTP Client 3.2.x -\u0026gt; 3.6.x   HTTP Gateway  Spring Cloud Gateway 2.0.2.RELEASE -\u0026gt; 4.1.x (Optional²) Apache ShenYu (Rich protocol support: HTTP,Spring Cloud,gRPC,Dubbo,SOFARPC,Motan,Tars) 2.4.x (Optional²)   JDBC  Mysql Driver 5.x, 6.x, 8.x Oracle Driver (Optional¹) H2 Driver 1.3.x -\u0026gt; 1.4.x ShardingSphere 3.0.0, 4.0.0, 4.0.1, 4.1.0, 4.1.1, 5.0.0 PostgreSQL Driver 8.x, 9.x, 42.x Mariadb Driver 2.x, 1.8 InfluxDB 2.5 -\u0026gt; 2.17 Mssql-Jtds 1.x Mssql-jdbc 6.x -\u0026gt; 8.x ClickHouse-jdbc 0.3.x Apache-Kylin-Jdbc 2.6.x -\u0026gt; 3.x -\u0026gt; 4.x Impala-jdbc 2.6.x (Optional³)   RPC Frameworks  Dubbo 2.5.4 -\u0026gt; 2.6.0 Dubbox 2.8.4 Apache Dubbo 2.7.x -\u0026gt; 3.x Motan 0.2.x -\u0026gt; 1.1.0 gRPC 1.x Apache ServiceComb Java Chassis 1.x, 2.x SOFARPC 5.4.0 Armeria 0.63.0 -\u0026gt; 1.22.0 Apache Avro 1.7.0 - 1.8.x Finagle 6.44.0 -\u0026gt; 20.1.0 (6.25.0 -\u0026gt; 6.44.0 not tested) Brpc-Java 2.3.7 -\u0026gt; 3.0.5 Thrift 0.10.0 -\u0026gt; 0.12.0 Apache CXF 3.x JSONRPC4J 1.2.0 -\u0026gt; 1.6 Nacos-Client 2.x (Optional²)   MQ  RocketMQ 3.x-\u0026gt; 5.x RocketMQ-gRPC 5.x Kafka 0.11.0.0 -\u0026gt; 3.2.3 Spring-Kafka Spring Kafka Consumer 1.3.x -\u0026gt; 2.3.x (2.0.x and 2.1.x not tested and not recommended by the official document) ActiveMQ 5.10.0 -\u0026gt; 5.15.4 RabbitMQ 3.x-\u0026gt; 5.x Pulsar 2.2.x -\u0026gt; 2.9.x NATS 2.14.x -\u0026gt; 2.15.x ActiveMQ-Artemis 2.30.0 -\u0026gt; 2.31.2 Aliyun ONS 1.x (Optional¹)   NoSQL  aerospike 3.x -\u0026gt; 6.x Redis  Jedis 2.x-4.x Redisson Easy Java Redis client 3.5.2+ Lettuce 5.x   MongoDB Java Driver 2.13-2.14, 3.4.0-3.12.7, 4.0.0-4.1.0 Memcached Client  Spymemcached 2.x Xmemcached 2.x   Elasticsearch  transport-client 5.2.x-5.6.x transport-client 6.2.3-6.8.4 transport-client 7.0.0-7.5.2 rest-high-level-client 6.7.1-6.8.4 rest-high-level-client 7.0.0-7.5.2   Solr  SolrJ 7.x   Cassandra 3.x  cassandra-java-driver 3.7.0-3.7.2   HBase  hbase-client HTable 1.0.0-2.4.2   Neo4j  Neo4j-java 4.x     Service Discovery  Netflix Eureka   Distributed Coordination  Zookeeper 3.4.x (Optional² \u0026amp; Except 3.4.4)   Spring Ecosystem  Spring Bean annotations(@Bean, @Service, @Component, @Repository) 3.x and 4.x (Optional²) Spring Core Async SuccessCallback/FailureCallback/ListenableFutureCallback 4.x Spring Transaction 4.x and 5.x (Optional²)   Hystrix: Latency and Fault Tolerance for Distributed Systems 1.4.20 -\u0026gt; 1.5.18 Sentinel: The Sentinel of Your Microservices 1.7.0 -\u0026gt; 1.8.1 Scheduler  Elastic Job 2.x Apache ShardingSphere-Elasticjob 3.x Spring @Scheduled 3.1+ Quartz Scheduler 2.x (Optional²) XXL Job 2.x   OpenTracing community supported Canal: Alibaba mysql database binlog incremental subscription \u0026amp; consumer components 1.0.25 -\u0026gt; 1.1.2 JSON  GSON 2.8.x (Optional²) Fastjson 1.2.x (Optional²) Jackson 2.x (Optional²)   Vert.x Ecosystem  Vert.x Eventbus 3.2 -\u0026gt; 4.x Vert.x Web 3.x -\u0026gt; 4.x   Thread Schedule Framework  Spring @Async 4.x and 5.x Quasar 0.7.x JRE Callable and Runnable (Optional²) JRE ForkJoinPool (Optional²)   Cache  Ehcache 2.x GuavaCache 18.x -\u0026gt; 23.x (Optional²)   Kotlin  Coroutine 1.0.1 -\u0026gt; 1.3.x (Optional²)   GraphQL  Graphql 8.0 -\u0026gt; 17.x   Pool  Apache Commons DBCP 2.x Alibaba Druid 1.x HikariCP 3.x -\u0026gt; 4.x   Logging Framework  log4j 2.x log4j2 1.2.x logback 1.2.x   ORM  MyBatis 3.4.x -\u0026gt; 3.5.x   Event  GuavaEventBus 19.x -\u0026gt; 31.x-jre    Meter Plugins The meter plugin provides the advanced metrics collections, which are not a part of tracing.\n Thread Pool  Undertow 2.1.x -\u0026gt; 2.6.x Tomcat 7.0.x -\u0026gt; 10.0.x Dubbo 2.5.x -\u0026gt; 2.7.x Jetty 9.1.x -\u0026gt; 11.x Grizzly 2.3.x -\u0026gt; 4.x     ¹Due to license incompatibilities/restrictions these plugins are hosted and released in 3rd part repository, go to SkyAPM java plugin extension repository to get these.\n²These plugins affect the performance or must be used under some conditions, from experiences. So only released in /optional-plugins or /bootstrap-plugins, copy to /plugins in order to make them work.\n³These plugins are not tested in the CI/CD pipeline, as the previous added tests are not able to run according to the latest CI/CD infrastructure limitations, lack of maintenance, or dependencies/images not available(e.g. removed from DockerHub).\n","excerpt":"Tracing and Tracing based Metrics Analyze Plugins The following plugins provide the distributed …","ref":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/supported-list/","title":"Tracing and Tracing based Metrics Analyze Plugins"},{"body":"Tracing APIs Add trace Toolkit toolkit/trace provides the APIs to enhance the trace context, such as createLocalSpan, createExitSpan, createEntrySpan, log, tag, prepareForAsync and asyncFinish. Add the toolkit dependency to your project.\nimport \u0026#34;github.com/apache/skywalking-go/toolkit/trace\u0026#34; Use Native Tracing Context Carrier The context carrier is used to pass the context between the difference application.\nWhen creating an Entry Span, you need to obtain the context carrier from the request. When creating an Exit Span, you need to write the context carrier into the target RPC request.\ntype ExtractorRef func(headerKey string) (string, error) type InjectorRef func(headerKey, headerValue string) error The following demo demonstrates how to pass the Context Carrier in the Tracing API:\n// create a new entry span and extract the context carrier from the request trace.CreateEntrySpan(\u0026#34;EntrySpan\u0026#34;, func(headerKey string) (string, error) { return request.Header.Get(headerKey), nil }) // create a new exit span and inject the context carrier into the request trace.CreateExitSpan(\u0026#34;ExitSpan\u0026#34;, request.Host, func(headerKey, headerValue string) error { request.Header.Add(headerKey, headerValue) return nil }) Create Span Use trace.CreateEntrySpan() API to create entry span, and then use SpanRef to contain the reference of created span in agent kernel.\n The first parameter is operation name of span the second parameter is InjectorRef.  spanRef, err := trace.CreateEntrySpan(\u0026#34;operationName\u0026#34;, InjectorRef) Use trace.CreateLocalSpan() API to create local span\n the only parameter is the operation name of span.  spanRef, err := trace.CreateLocalSpan(\u0026#34;operationName\u0026#34;) Use trace.CreateExitSpan() API to create exit span.\n the first parameter is the operation name of span the second parameter is the remote peer which means the peer address of exit operation. the third parameter is the ExtractorRef  spanRef, err := trace.CreateExitSpan(\u0026#34;operationName\u0026#34;, \u0026#34;peer\u0026#34;, ExtractorRef) Use trace.StopSpan() API to stop current span\ntrace.StopSpan() Add Span’s Tag and Log Use trace.AddLog() to record log in span.\nUse trace.SetTag() to add tag to span, the parameters of tag are two String which are key and value respectively.\ntrace.AddLog(...string) trace.SetTag(\u0026#34;key\u0026#34;,\u0026#34;value\u0026#34;) Set ComponentID Use trace.SetComponent() to set the component id of the Span\n the type of parameter is int32.  trace.SetComponent(ComponentID) The Component ID in Span is used to identify the current component, which is declared in the component libraries YAML from the OAP server side.\nAsync Prepare/Finish SpanRef is the return value of CreateSpan.Use SpanRef.PrepareAsync() to make current span still alive until SpanRef.AsyncFinish() called.\n Call PrepareAsync(). Use trace.StopSpan() to stop span in the original goroutine. Propagate the SpanRef to any other goroutine. Call SpanRef.AsyncFinish() in any goroutine.  Capture/Continue Context Snapshot  Use trace.CaptureContext() to get the segment info and store it in ContextSnapshotRef. Propagate the snapshot context to any other goroutine. Use trace.ContinueContext(snapshotRef) to load the snapshotRef in the target goroutine.  Reading Context All following APIs provide readonly features for the tracing context from tracing system. The values are only available when the current thread is traced.\n  Use trace.GetTraceID() API to get traceID.\ntraceID := trace.GetTraceID()   Use trace.GetSegmentID() API to get segmentID.\nsegmentID := trace.GetSegmentID()   Use trace.GetSpanID() API to get spanID.\nspanID := trace.GetSpanID()   Trace Correlation Context Trace correlation context APIs provide a way to put custom data in tracing context. All the data in the context will be propagated with the in-wire process automatically.\nUse trace.SetCorrelation() API to set custom data in tracing context.\ntrace.SetCorrelation(\u0026#34;key\u0026#34;,\u0026#34;value\u0026#34;)  Max element count in the correlation context is 3 Max value length of each element is 128  CorrelationContext will remove the key when the value is empty.\nUse trace.GetCorrelation() API to get custom data.\nvalue := trace.GetCorrelation(\u0026#34;key\u0026#34;) ","excerpt":"Tracing APIs Add trace Toolkit toolkit/trace provides the APIs to enhance the trace context, such as …","ref":"/docs/skywalking-go/latest/en/advanced-features/manual-apis/toolkit-trace/","title":"Tracing APIs"},{"body":"Tracing APIs Add trace Toolkit toolkit/trace provides the APIs to enhance the trace context, such as createLocalSpan, createExitSpan, createEntrySpan, log, tag, prepareForAsync and asyncFinish. Add the toolkit dependency to your project.\nimport \u0026#34;github.com/apache/skywalking-go/toolkit/trace\u0026#34; Use Native Tracing Context Carrier The context carrier is used to pass the context between the difference application.\nWhen creating an Entry Span, you need to obtain the context carrier from the request. When creating an Exit Span, you need to write the context carrier into the target RPC request.\ntype ExtractorRef func(headerKey string) (string, error) type InjectorRef func(headerKey, headerValue string) error The following demo demonstrates how to pass the Context Carrier in the Tracing API:\n// create a new entry span and extract the context carrier from the request trace.CreateEntrySpan(\u0026#34;EntrySpan\u0026#34;, func(headerKey string) (string, error) { return request.Header.Get(headerKey), nil }) // create a new exit span and inject the context carrier into the request trace.CreateExitSpan(\u0026#34;ExitSpan\u0026#34;, request.Host, func(headerKey, headerValue string) error { request.Header.Add(headerKey, headerValue) return nil }) Create Span Use trace.CreateEntrySpan() API to create entry span, and then use SpanRef to contain the reference of created span in agent kernel.\n The first parameter is operation name of span the second parameter is InjectorRef.  spanRef, err := trace.CreateEntrySpan(\u0026#34;operationName\u0026#34;, InjectorRef) Use trace.CreateLocalSpan() API to create local span\n the only parameter is the operation name of span.  spanRef, err := trace.CreateLocalSpan(\u0026#34;operationName\u0026#34;) Use trace.CreateExitSpan() API to create exit span.\n the first parameter is the operation name of span the second parameter is the remote peer which means the peer address of exit operation. the third parameter is the ExtractorRef  spanRef, err := trace.CreateExitSpan(\u0026#34;operationName\u0026#34;, \u0026#34;peer\u0026#34;, ExtractorRef) Use trace.StopSpan() API to stop current span\ntrace.StopSpan() Add Span’s Tag and Log Use trace.AddLog() to record log in span.\nUse trace.SetTag() to add tag to span, the parameters of tag are two String which are key and value respectively.\ntrace.AddLog(...string) trace.SetTag(\u0026#34;key\u0026#34;,\u0026#34;value\u0026#34;) Set ComponentID Use trace.SetComponent() to set the component id of the Span\n the type of parameter is int32.  trace.SetComponent(ComponentID) The Component ID in Span is used to identify the current component, which is declared in the component libraries YAML from the OAP server side.\nAsync Prepare/Finish SpanRef is the return value of CreateSpan.Use SpanRef.PrepareAsync() to make current span still alive until SpanRef.AsyncFinish() called.\n Call PrepareAsync(). Use trace.StopSpan() to stop span in the original goroutine. Propagate the SpanRef to any other goroutine. Call SpanRef.AsyncFinish() in any goroutine.  Capture/Continue Context Snapshot  Use trace.CaptureContext() to get the segment info and store it in ContextSnapshotRef. Propagate the snapshot context to any other goroutine. Use trace.ContinueContext(snapshotRef) to load the snapshotRef in the target goroutine.  Reading Context All following APIs provide readonly features for the tracing context from tracing system. The values are only available when the current thread is traced.\n  Use trace.GetTraceID() API to get traceID.\ntraceID := trace.GetTraceID()   Use trace.GetSegmentID() API to get segmentID.\nsegmentID := trace.GetSegmentID()   Use trace.GetSpanID() API to get spanID.\nspanID := trace.GetSpanID()   Trace Correlation Context Trace correlation context APIs provide a way to put custom data in tracing context. All the data in the context will be propagated with the in-wire process automatically.\nUse trace.SetCorrelation() API to set custom data in tracing context.\ntrace.SetCorrelation(\u0026#34;key\u0026#34;,\u0026#34;value\u0026#34;)  Max element count in the correlation context is 3 Max value length of each element is 128  CorrelationContext will remove the key when the value is empty.\nUse trace.GetCorrelation() API to get custom data.\nvalue := trace.GetCorrelation(\u0026#34;key\u0026#34;) ","excerpt":"Tracing APIs Add trace Toolkit toolkit/trace provides the APIs to enhance the trace context, such as …","ref":"/docs/skywalking-go/next/en/advanced-features/manual-apis/toolkit-trace/","title":"Tracing APIs"},{"body":"Tracing APIs Add trace Toolkit toolkit/trace provides the APIs to enhance the trace context, such as createLocalSpan, createExitSpan, createEntrySpan, log, tag, prepareForAsync and asyncFinish. Add the toolkit dependency to your project.\nimport \u0026#34;github.com/apache/skywalking-go/toolkit/trace\u0026#34; Use Native Tracing Context Carrier The context carrier is used to pass the context between the difference application.\nWhen creating an Entry Span, you need to obtain the context carrier from the request. When creating an Exit Span, you need to write the context carrier into the target RPC request.\ntype ExtractorRef func(headerKey string) (string, error) type InjectorRef func(headerKey, headerValue string) error The following demo demonstrates how to pass the Context Carrier in the Tracing API:\n// create a new entry span and extract the context carrier from the request trace.CreateEntrySpan(\u0026#34;EntrySpan\u0026#34;, func(headerKey string) (string, error) { return request.Header.Get(headerKey), nil }) // create a new exit span and inject the context carrier into the request trace.CreateExitSpan(\u0026#34;ExitSpan\u0026#34;, request.Host, func(headerKey, headerValue string) error { request.Header.Add(headerKey, headerValue) return nil }) Create Span Use trace.CreateEntrySpan() API to create entry span, and then use SpanRef to contain the reference of created span in agent kernel.\n The first parameter is operation name of span the second parameter is InjectorRef.  spanRef, err := trace.CreateEntrySpan(\u0026#34;operationName\u0026#34;, InjectorRef) Use trace.CreateLocalSpan() API to create local span\n the only parameter is the operation name of span.  spanRef, err := trace.CreateLocalSpan(\u0026#34;operationName\u0026#34;) Use trace.CreateExitSpan() API to create exit span.\n the first parameter is the operation name of span the second parameter is the remote peer which means the peer address of exit operation. the third parameter is the ExtractorRef  spanRef, err := trace.CreateExitSpan(\u0026#34;operationName\u0026#34;, \u0026#34;peer\u0026#34;, ExtractorRef) Use trace.StopSpan() API to stop current span\ntrace.StopSpan() Add Span’s Tag and Log Use trace.AddLog() to record log in span.\nUse trace.SetTag() to add tag to span, the parameters of tag are two String which are key and value respectively.\ntrace.AddLog(...string) trace.SetTag(\u0026#34;key\u0026#34;,\u0026#34;value\u0026#34;) Set ComponentID Use trace.SetComponent() to set the component id of the Span\n the type of parameter is int32.  trace.SetComponent(ComponentID) The Component ID in Span is used to identify the current component, which is declared in the component libraries YAML from the OAP server side.\nAsync Prepare/Finish SpanRef is the return value of CreateSpan.Use SpanRef.PrepareAsync() to make current span still alive until SpanRef.AsyncFinish() called.\n Call PrepareAsync(). Use trace.StopSpan() to stop span in the original goroutine. Propagate the SpanRef to any other goroutine. Call SpanRef.AsyncFinish() in any goroutine.  Capture/Continue Context Snapshot  Use trace.CaptureContext() to get the segment info and store it in ContextSnapshotRef. Propagate the snapshot context to any other goroutine. Use trace.ContinueContext(snapshotRef) to load the snapshotRef in the target goroutine.  Reading Context All following APIs provide readonly features for the tracing context from tracing system. The values are only available when the current thread is traced.\n  Use trace.GetTraceID() API to get traceID.\ntraceID := trace.GetTraceID()   Use trace.GetSegmentID() API to get segmentID.\nsegmentID := trace.GetSegmentID()   Use trace.GetSpanID() API to get spanID.\nspanID := trace.GetSpanID()   Trace Correlation Context Trace correlation context APIs provide a way to put custom data in tracing context. All the data in the context will be propagated with the in-wire process automatically.\nUse trace.SetCorrelation() API to set custom data in tracing context.\ntrace.SetCorrelation(\u0026#34;key\u0026#34;,\u0026#34;value\u0026#34;)  Max element count in the correlation context is 3 Max value length of each element is 128  CorrelationContext will remove the key when the value is empty.\nUse trace.GetCorrelation() API to get custom data.\nvalue := trace.GetCorrelation(\u0026#34;key\u0026#34;) ","excerpt":"Tracing APIs Add trace Toolkit toolkit/trace provides the APIs to enhance the trace context, such as …","ref":"/docs/skywalking-go/v0.4.0/en/advanced-features/manual-apis/toolkit-trace/","title":"Tracing APIs"},{"body":"Tracing Plugins The following plugins provide the distributed tracing capability, and the OAP backend would analyze the topology and metrics based on the tracing data.\n HTTP Server  gin: Gin tested v1.7.0 to v1.9.0. http: Native HTTP tested go v1.17 to go v1.20. go-restfulv3: Go-Restful tested v3.7.1 to 3.10.2. mux: Mux tested v1.7.0 to v1.8.0. iris: Iris tested v12.1.0 to 12.2.5. fasthttp: FastHttp tested v1.10.0 to v1.50.0. fiber: Fiber tested v2.49.0 to v2.50.0. echov4: Echov4 tested v4.0.0 to v4.11.4   HTTP Client  http: Native HTTP tested go v1.17 to go v1.20. fasthttp: FastHttp tested v1.10.0 to v1.50.0.   RPC Frameworks  dubbo: Dubbo tested v3.0.1 to v3.0.5. kratosv2: Kratos tested v2.3.1 to v2.6.2. microv4: Go-Micro tested v4.6.0 to v4.10.2. grpc : gRPC tested v1.55.0 to v1.57.0.   Database Client  gorm: GORM tested v1.22.0 to v1.25.1.  MySQL Driver   mongo: Mongo tested v1.11.1 to v1.11.7. sql: Native SQL tested go v1.17 to go v1.20.  MySQL Driver tested v1.4.0 to v1.7.1.     Cache Client  go-redisv9: go-redis tested v9.0.3 to v9.0.5.   MQ Client  rocketMQ: rocketmq-client-go tested v2.1.2. amqp: AMQP tested v1.9.0.    Metrics Plugins The meter plugin provides the advanced metrics collections.\n runtimemetrics: Native Runtime Metrics tested go v1.17 to go v1.20.  Logging Plugins The logging plugin provides the advanced logging collections.\n logrus: Logrus tested v1.8.2 to v1.9.3. zap: Zap tested v1.17.0 to v1.24.0.  ","excerpt":"Tracing Plugins The following plugins provide the distributed tracing capability, and the OAP …","ref":"/docs/skywalking-go/latest/en/agent/support-plugins/","title":"Tracing Plugins"},{"body":"Tracing Plugins The following plugins provide the distributed tracing capability, and the OAP backend would analyze the topology and metrics based on the tracing data.\n HTTP Server  gin: Gin tested v1.7.0 to v1.9.0. http: Native HTTP tested go v1.17 to go v1.20. go-restfulv3: Go-Restful tested v3.7.1 to 3.10.2. mux: Mux tested v1.7.0 to v1.8.0. iris: Iris tested v12.1.0 to 12.2.5. fasthttp: FastHttp tested v1.10.0 to v1.50.0. fiber: Fiber tested v2.49.0 to v2.50.0. echov4: Echov4 tested v4.0.0 to v4.11.4   HTTP Client  http: Native HTTP tested go v1.17 to go v1.20. fasthttp: FastHttp tested v1.10.0 to v1.50.0.   RPC Frameworks  dubbo: Dubbo tested v3.0.1 to v3.0.5. kratosv2: Kratos tested v2.3.1 to v2.6.2. microv4: Go-Micro tested v4.6.0 to v4.10.2. grpc : gRPC tested v1.55.0 to v1.57.0.   Database Client  gorm: GORM tested v1.22.0 to v1.25.1.  MySQL Driver   mongo: Mongo tested v1.11.1 to v1.11.7. sql: Native SQL tested go v1.17 to go v1.20.  MySQL Driver tested v1.4.0 to v1.7.1.     Cache Client  go-redisv9: go-redis tested v9.0.3 to v9.0.5.   MQ Client  rocketMQ: rocketmq-client-go tested v2.1.2. amqp: AMQP tested v1.9.0. pulsar: pulsar-client-go tested v0.12.0. segmentio-kafka: segmentio-kafka tested v0.4.47.    Metrics Plugins The meter plugin provides the advanced metrics collections.\n runtimemetrics: Native Runtime Metrics tested go v1.17 to go v1.20.  Logging Plugins The logging plugin provides the advanced logging collections.\n logrus: Logrus tested v1.8.2 to v1.9.3. zap: Zap tested v1.17.0 to v1.24.0.  ","excerpt":"Tracing Plugins The following plugins provide the distributed tracing capability, and the OAP …","ref":"/docs/skywalking-go/next/en/agent/support-plugins/","title":"Tracing Plugins"},{"body":"Tracing Plugins The following plugins provide the distributed tracing capability, and the OAP backend would analyze the topology and metrics based on the tracing data.\n HTTP Server  gin: Gin tested v1.7.0 to v1.9.0. http: Native HTTP tested go v1.17 to go v1.20. go-restfulv3: Go-Restful tested v3.7.1 to 3.10.2. mux: Mux tested v1.7.0 to v1.8.0. iris: Iris tested v12.1.0 to 12.2.5. fasthttp: FastHttp tested v1.10.0 to v1.50.0. fiber: Fiber tested v2.49.0 to v2.50.0. echov4: Echov4 tested v4.0.0 to v4.11.4   HTTP Client  http: Native HTTP tested go v1.17 to go v1.20. fasthttp: FastHttp tested v1.10.0 to v1.50.0.   RPC Frameworks  dubbo: Dubbo tested v3.0.1 to v3.0.5. kratosv2: Kratos tested v2.3.1 to v2.6.2. microv4: Go-Micro tested v4.6.0 to v4.10.2. grpc : gRPC tested v1.55.0 to v1.57.0.   Database Client  gorm: GORM tested v1.22.0 to v1.25.1.  MySQL Driver   mongo: Mongo tested v1.11.1 to v1.11.7. sql: Native SQL tested go v1.17 to go v1.20.  MySQL Driver tested v1.4.0 to v1.7.1.     Cache Client  go-redisv9: go-redis tested v9.0.3 to v9.0.5.   MQ Client  rocketMQ: rocketmq-client-go tested v2.1.2. amqp: AMQP tested v1.9.0.    Metrics Plugins The meter plugin provides the advanced metrics collections.\n runtimemetrics: Native Runtime Metrics tested go v1.17 to go v1.20.  Logging Plugins The logging plugin provides the advanced logging collections.\n logrus: Logrus tested v1.8.2 to v1.9.3. zap: Zap tested v1.17.0 to v1.24.0.  ","excerpt":"Tracing Plugins The following plugins provide the distributed tracing capability, and the OAP …","ref":"/docs/skywalking-go/v0.4.0/en/agent/support-plugins/","title":"Tracing Plugins"},{"body":"Tracing, Metrics and Logging with Go Agent All plugins in SkyWalking Go Agent are designed to provide functionality for distributed tracing, metrics, and logging data. For a detailed list of supported plugins, please refer to the documentation. This document aims to provide you with some configuration information for your usage. Please ensure that you have followed the documentation to successfully install the SkyWalking Go Agent into your application.\nMetadata Mechanism The Go Agent would be identified by the SkyWalking backend after startup and maintain a heartbeat to keep alive.\n   Name Environment Key Default Value Description     agent.service_name SW_AGENT_NAME Your_Application_Name The name of the service which showed in UI.   agent.instance_env_name  SW_AGENT_INSTANCE_NAME To obtain the environment variable key for the instance name, if it cannot be obtained, an instance name will be automatically generated.    Tracing Distributed tracing is the most common form of plugin in the Go Agent, and it becomes active with each new incoming request. By default, all plugins are enabled. For a specific list of plugins, please refer to the documentation.\nIf you wish to disable a particular plugin to prevent enhancements related to that plugin, please consult the documentation on how to disable plugins.\nThe basic configuration is as follows:\n   Name Environment Key Default Value Description     agent.sampler SW_AGENT_SAMPLER 1 Sampling rate of tracing data, which is a floating-point value that must be between 0 and 1.   agent.ignore_suffix SW_AGENT_IGNORE_SUFFIX .jpg,.jpeg,.js,.css,.png,.bmp,.gif,.ico,.mp3,.mp4,.html,.svg If the operation name of the first span is included in this set, this segment should be ignored.(multiple split by \u0026ldquo;,\u0026quot;).    Metrics The metrics plugin can dynamically monitor the execution status of the current program and aggregate the data into corresponding metrics. Eventually, the data is reported to the SkyWalking backend at a specified interval. For a specific list of plugins, please refer to the documentation.\nThe current configuration information is as follows:\n   Name Environment Key Default Value Description     agent.meter.collect_interval SW_AGENT_METER_COLLECT_INTERVAL 20 The interval of collecting metrics, in seconds.    Logging The logging plugin in SkyWalking Go Agent are used to handle agent and application logs, as well as application log querying. They primarily consist of the following three functionalities:\n Agent Log Adaptation: The plugin detects the logging framework used in the current system and integrates the agent\u0026rsquo;s logs with the system\u0026rsquo;s logging framework. Distributed Tracing Enhancement: It combines the distributed tracing information from the current request with the application logs, allowing you to have real-time visibility into all log contents related to specific requests. Log Reporting: The plugin reports both application and agent logs to the SkyWalking backend for data retrieval and display purposes.  For more details, please refer to the documentation to learn more detail.\n","excerpt":"Tracing, Metrics and Logging with Go Agent All plugins in SkyWalking Go Agent are designed to …","ref":"/docs/skywalking-go/latest/en/agent/tracing-metrics-logging/","title":"Tracing, Metrics and Logging with Go Agent"},{"body":"Tracing, Metrics and Logging with Go Agent All plugins in SkyWalking Go Agent are designed to provide functionality for distributed tracing, metrics, and logging data. For a detailed list of supported plugins, please refer to the documentation. This document aims to provide you with some configuration information for your usage. Please ensure that you have followed the documentation to successfully install the SkyWalking Go Agent into your application.\nMetadata Mechanism The Go Agent would be identified by the SkyWalking backend after startup and maintain a heartbeat to keep alive.\n   Name Environment Key Default Value Description     agent.service_name SW_AGENT_NAME Your_Application_Name The name of the service which showed in UI.   agent.instance_env_name  SW_AGENT_INSTANCE_NAME To obtain the environment variable key for the instance name, if it cannot be obtained, an instance name will be automatically generated.    Tracing Distributed tracing is the most common form of plugin in the Go Agent, and it becomes active with each new incoming request. By default, all plugins are enabled. For a specific list of plugins, please refer to the documentation.\nIf you wish to disable a particular plugin to prevent enhancements related to that plugin, please consult the documentation on how to disable plugins.\nThe basic configuration is as follows:\n   Name Environment Key Default Value Description     agent.sampler SW_AGENT_SAMPLER 1 Sampling rate of tracing data, which is a floating-point value that must be between 0 and 1.   agent.ignore_suffix SW_AGENT_IGNORE_SUFFIX .jpg,.jpeg,.js,.css,.png,.bmp,.gif,.ico,.mp3,.mp4,.html,.svg If the suffix obtained by splitting the operation name by the last index of \u0026ldquo;.\u0026rdquo; in this set, this segment should be ignored.(multiple split by \u0026ldquo;,\u0026quot;).   agent.trace_ignore_path SW_AGENT_TRACE_IGNORE_PATH  If the operation name of the first span is matching, this segment should be ignored.(multiple split by \u0026ldquo;,\u0026quot;).    Metrics The metrics plugin can dynamically monitor the execution status of the current program and aggregate the data into corresponding metrics. Eventually, the data is reported to the SkyWalking backend at a specified interval. For a specific list of plugins, please refer to the documentation.\nThe current configuration information is as follows:\n   Name Environment Key Default Value Description     agent.meter.collect_interval SW_AGENT_METER_COLLECT_INTERVAL 20 The interval of collecting metrics, in seconds.    Logging The logging plugin in SkyWalking Go Agent are used to handle agent and application logs, as well as application log querying. They primarily consist of the following three functionalities:\n Agent Log Adaptation: The plugin detects the logging framework used in the current system and integrates the agent\u0026rsquo;s logs with the system\u0026rsquo;s logging framework. Distributed Tracing Enhancement: It combines the distributed tracing information from the current request with the application logs, allowing you to have real-time visibility into all log contents related to specific requests. Log Reporting: The plugin reports both application and agent logs to the SkyWalking backend for data retrieval and display purposes.  For more details, please refer to the documentation to learn more detail.\n","excerpt":"Tracing, Metrics and Logging with Go Agent All plugins in SkyWalking Go Agent are designed to …","ref":"/docs/skywalking-go/next/en/agent/tracing-metrics-logging/","title":"Tracing, Metrics and Logging with Go Agent"},{"body":"Tracing, Metrics and Logging with Go Agent All plugins in SkyWalking Go Agent are designed to provide functionality for distributed tracing, metrics, and logging data. For a detailed list of supported plugins, please refer to the documentation. This document aims to provide you with some configuration information for your usage. Please ensure that you have followed the documentation to successfully install the SkyWalking Go Agent into your application.\nMetadata Mechanism The Go Agent would be identified by the SkyWalking backend after startup and maintain a heartbeat to keep alive.\n   Name Environment Key Default Value Description     agent.service_name SW_AGENT_NAME Your_Application_Name The name of the service which showed in UI.   agent.instance_env_name  SW_AGENT_INSTANCE_NAME To obtain the environment variable key for the instance name, if it cannot be obtained, an instance name will be automatically generated.    Tracing Distributed tracing is the most common form of plugin in the Go Agent, and it becomes active with each new incoming request. By default, all plugins are enabled. For a specific list of plugins, please refer to the documentation.\nIf you wish to disable a particular plugin to prevent enhancements related to that plugin, please consult the documentation on how to disable plugins.\nThe basic configuration is as follows:\n   Name Environment Key Default Value Description     agent.sampler SW_AGENT_SAMPLER 1 Sampling rate of tracing data, which is a floating-point value that must be between 0 and 1.   agent.ignore_suffix SW_AGENT_IGNORE_SUFFIX .jpg,.jpeg,.js,.css,.png,.bmp,.gif,.ico,.mp3,.mp4,.html,.svg If the operation name of the first span is included in this set, this segment should be ignored.(multiple split by \u0026ldquo;,\u0026quot;).    Metrics The metrics plugin can dynamically monitor the execution status of the current program and aggregate the data into corresponding metrics. Eventually, the data is reported to the SkyWalking backend at a specified interval. For a specific list of plugins, please refer to the documentation.\nThe current configuration information is as follows:\n   Name Environment Key Default Value Description     agent.meter.collect_interval SW_AGENT_METER_COLLECT_INTERVAL 20 The interval of collecting metrics, in seconds.    Logging The logging plugin in SkyWalking Go Agent are used to handle agent and application logs, as well as application log querying. They primarily consist of the following three functionalities:\n Agent Log Adaptation: The plugin detects the logging framework used in the current system and integrates the agent\u0026rsquo;s logs with the system\u0026rsquo;s logging framework. Distributed Tracing Enhancement: It combines the distributed tracing information from the current request with the application logs, allowing you to have real-time visibility into all log contents related to specific requests. Log Reporting: The plugin reports both application and agent logs to the SkyWalking backend for data retrieval and display purposes.  For more details, please refer to the documentation to learn more detail.\n","excerpt":"Tracing, Metrics and Logging with Go Agent All plugins in SkyWalking Go Agent are designed to …","ref":"/docs/skywalking-go/v0.4.0/en/agent/tracing-metrics-logging/","title":"Tracing, Metrics and Logging with Go Agent"},{"body":"Traffic The traffic is used to collecting the network access logs from services through the Service Discovery, and send access logs to the backend server for analyze.\nConfiguration    Name Default Environment Key Description     access_log.active false ROVER_ACCESS_LOG_ACTIVE Is active the access log monitoring.   access_log.exclude_namespaces istio-system,cert-manager,kube-system ROVER_ACCESS_LOG_EXCLUDE_NAMESPACES Exclude processes in the specified Kubernetes namespace. Multiple namespaces split by \u0026ldquo;,\u0026rdquo;   access_log.exclude_cluster  ROVER_ACCESS_LOG_EXCLUDE_CLUSTER Exclude processes in the specified cluster which defined in the process module. Multiple clusters split by \u0026ldquo;,\u0026rdquo;   access_log.flush.max_count 2000 ROVER_ACCESS_LOG_FLUSH_MAX_COUNT The max count of the access log when flush to the backend.   access_log.flush.period 5s ROVER_ACCESS_LOG_FLUSH_PERIOD The period of flush access log to the backend.   access_log_protocol_analyze.per_cpu_buffer 400KB ROVER_ACCESS_LOG_PROTOCOL_ANALYZE_PER_CPU_BUFFER The size of socket data buffer on each CPU.   access_log.protocol_analyze.parallels 2 ROVER_ACCESS_LOG_PROTOCOL_ANALYZE_PARALLELS The count of parallel protocol analyzer.   access_log.protocol_analyze.queue_size 5000 ROVER_ACCESS_LOG_PROTOCOL_ANALYZE_QUEUE_SIZE The size of per paralleled analyze queue.    Collectors Socket Connect/Accept/Close Monitor all socket connect, accept, and close events from monitored processes by attaching eBPF program to the respective trace points.\nSocket traffic Capture all socket traffic from monitored processes by attaching eBPF program to network syscalls.\nProtocol Data collection is followed by protocol analysis. Currently, the supported protocols include:\n HTTP/1.x HTTP/2  Note: As HTTP2 is a stateful protocol, it only supports monitoring processes that start after monitor. Processes already running at the time of monitoring may fail to provide complete data, leading to unsuccessful analysis.\nTLS When a process uses the TLS protocol for data transfer, Rover monitors libraries such as OpenSSL, BoringSSL, GoTLS, and NodeTLS to access the raw content. This feature is also applicable for protocol analysis.\nNote: the parsing of TLS protocols in Java is currently not supported.\nL2-L4 During data transmission, Rover records each packet\u0026rsquo;s through the network layers L2 to L4 using kprobes. This approach enhances the understanding of each packet\u0026rsquo;s transmission process, facilitating easier localization and troubleshooting of network issues.\n","excerpt":"Traffic The traffic is used to collecting the network access logs from services through the Service …","ref":"/docs/skywalking-rover/latest/en/setup/configuration/traffic/","title":"Traffic"},{"body":"Traffic The traffic is used to collecting the network access logs from services through the Service Discovery, and send access logs to the backend server for analyze.\nConfiguration    Name Default Environment Key Description     access_log.active false ROVER_ACCESS_LOG_ACTIVE Is active the access log monitoring.   access_log.exclude_namespaces istio-system,cert-manager,kube-system ROVER_ACCESS_LOG_EXCLUDE_NAMESPACES Exclude processes in the specified Kubernetes namespace. Multiple namespaces split by \u0026ldquo;,\u0026rdquo;   access_log.exclude_cluster  ROVER_ACCESS_LOG_EXCLUDE_CLUSTER Exclude processes in the specified cluster which defined in the process module. Multiple clusters split by \u0026ldquo;,\u0026rdquo;   access_log.flush.max_count 2000 ROVER_ACCESS_LOG_FLUSH_MAX_COUNT The max count of the access log when flush to the backend.   access_log.flush.period 5s ROVER_ACCESS_LOG_FLUSH_PERIOD The period of flush access log to the backend.   access_log_protocol_analyze.per_cpu_buffer 400KB ROVER_ACCESS_LOG_PROTOCOL_ANALYZE_PER_CPU_BUFFER The size of socket data buffer on each CPU.   access_log.protocol_analyze.parallels 2 ROVER_ACCESS_LOG_PROTOCOL_ANALYZE_PARALLELS The count of parallel protocol analyzer.   access_log.protocol_analyze.queue_size 5000 ROVER_ACCESS_LOG_PROTOCOL_ANALYZE_QUEUE_SIZE The size of per paralleled analyze queue.    Collectors Socket Connect/Accept/Close Monitor all socket connect, accept, and close events from monitored processes by attaching eBPF program to the respective trace points.\nSocket traffic Capture all socket traffic from monitored processes by attaching eBPF program to network syscalls.\nProtocol Data collection is followed by protocol analysis. Currently, the supported protocols include:\n HTTP/1.x HTTP/2  Note: As HTTP2 is a stateful protocol, it only supports monitoring processes that start after monitor. Processes already running at the time of monitoring may fail to provide complete data, leading to unsuccessful analysis.\nTLS When a process uses the TLS protocol for data transfer, Rover monitors libraries such as OpenSSL, BoringSSL, GoTLS, and NodeTLS to access the raw content. This feature is also applicable for protocol analysis.\nNote: the parsing of TLS protocols in Java is currently not supported.\nL2-L4 During data transmission, Rover records each packet\u0026rsquo;s through the network layers L2 to L4 using kprobes. This approach enhances the understanding of each packet\u0026rsquo;s transmission process, facilitating easier localization and troubleshooting of network issues.\n","excerpt":"Traffic The traffic is used to collecting the network access logs from services through the Service …","ref":"/docs/skywalking-rover/next/en/setup/configuration/traffic/","title":"Traffic"},{"body":"Traffic The traffic is used to collecting the network access logs from services through the Service Discovery, and send access logs to the backend server for analyze.\nConfiguration    Name Default Environment Key Description     access_log.active false ROVER_ACCESS_LOG_ACTIVE Is active the access log monitoring.   access_log.exclude_namespaces istio-system,cert-manager,kube-system ROVER_ACCESS_LOG_EXCLUDE_NAMESPACES Exclude processes in the specified Kubernetes namespace. Multiple namespaces split by \u0026ldquo;,\u0026rdquo;   access_log.exclude_cluster  ROVER_ACCESS_LOG_EXCLUDE_CLUSTER Exclude processes in the specified cluster which defined in the process module. Multiple clusters split by \u0026ldquo;,\u0026rdquo;   access_log.flush.max_count 2000 ROVER_ACCESS_LOG_FLUSH_MAX_COUNT The max count of the access log when flush to the backend.   access_log.flush.period 5s ROVER_ACCESS_LOG_FLUSH_PERIOD The period of flush access log to the backend.   access_log_protocol_analyze.per_cpu_buffer 400KB ROVER_ACCESS_LOG_PROTOCOL_ANALYZE_PER_CPU_BUFFER The size of socket data buffer on each CPU.   access_log.protocol_analyze.parallels 2 ROVER_ACCESS_LOG_PROTOCOL_ANALYZE_PARALLELS The count of parallel protocol analyzer.   access_log.protocol_analyze.queue_size 5000 ROVER_ACCESS_LOG_PROTOCOL_ANALYZE_QUEUE_SIZE The size of per paralleled analyze queue.    Collectors Socket Connect/Accept/Close Monitor all socket connect, accept, and close events from monitored processes by attaching eBPF program to the respective trace points.\nSocket traffic Capture all socket traffic from monitored processes by attaching eBPF program to network syscalls.\nProtocol Data collection is followed by protocol analysis. Currently, the supported protocols include:\n HTTP/1.x HTTP/2  Note: As HTTP2 is a stateful protocol, it only supports monitoring processes that start after monitor. Processes already running at the time of monitoring may fail to provide complete data, leading to unsuccessful analysis.\nTLS When a process uses the TLS protocol for data transfer, Rover monitors libraries such as OpenSSL, BoringSSL, GoTLS, and NodeTLS to access the raw content. This feature is also applicable for protocol analysis.\nNote: the parsing of TLS protocols in Java is currently not supported.\nL2-L4 During data transmission, Rover records each packet\u0026rsquo;s through the network layers L2 to L4 using kprobes. This approach enhances the understanding of each packet\u0026rsquo;s transmission process, facilitating easier localization and troubleshooting of network issues.\n","excerpt":"Traffic The traffic is used to collecting the network access logs from services through the Service …","ref":"/docs/skywalking-rover/v0.6.0/en/setup/configuration/traffic/","title":"Traffic"},{"body":"Transmit Log to Kafka Using Satellite to receive the SkyWalking log protocol from agent, and transport data to the Kafka Topic.\nConfig Here is config file, set out as follows:\n Declare gRPC server and kafka client to receive and transmit data. Declare the SkyWalking Log protocol gatherer and sender to transmit protocol via pipeline. Expose Self-Observability telemetry data to Prometheus.  ","excerpt":"Transmit Log to Kafka Using Satellite to receive the SkyWalking log protocol from agent, and …","ref":"/docs/skywalking-satellite/latest/en/setup/examples/feature/transmit-log-to-kafka/readme/","title":"Transmit Log to Kafka"},{"body":"Transmit Log to Kafka Using Satellite to receive the SkyWalking log protocol from agent, and transport data to the Kafka Topic.\nConfig Here is config file, set out as follows:\n Declare gRPC server and kafka client to receive and transmit data. Declare the SkyWalking Log protocol gatherer and sender to transmit protocol via pipeline. Expose Self-Observability telemetry data to Prometheus.  ","excerpt":"Transmit Log to Kafka Using Satellite to receive the SkyWalking log protocol from agent, and …","ref":"/docs/skywalking-satellite/next/en/setup/examples/feature/transmit-log-to-kafka/readme/","title":"Transmit Log to Kafka"},{"body":"Transmit Log to Kafka Using Satellite to receive the SkyWalking log protocol from agent, and transport data to the Kafka Topic.\nConfig Here is config file, set out as follows:\n Declare gRPC server and kafka client to receive and transmit data. Declare the SkyWalking Log protocol gatherer and sender to transmit protocol via pipeline. Expose Self-Observability telemetry data to Prometheus.  ","excerpt":"Transmit Log to Kafka Using Satellite to receive the SkyWalking log protocol from agent, and …","ref":"/docs/skywalking-satellite/v1.2.0/en/setup/examples/feature/transmit-log-to-kafka/readme/","title":"Transmit Log to Kafka"},{"body":"TTL In SkyWalking, there are two types of observability data:\n Records include traces, logs, topN sampled statements and alarm. recordDataTTL applies to record data. Metrics include all metrics for service, instance, endpoint, and topology map. Metadata(lists of services, instances, or endpoints) also belongs to metrics. metricsDataTTL applies to Metrics data.  These are the settings for the different types:\n# Set a timeout on metrics data. After the timeout has expired, the metrics data will automatically be deleted.recordDataTTL:${SW_CORE_RECORD_DATA_TTL:3}# Unit is daymetricsDataTTL:${SW_CORE_METRICS_DATA_TTL:7}# Unit is day","excerpt":"TTL In SkyWalking, there are two types of observability data:\n Records include traces, logs, topN …","ref":"/docs/main/latest/en/setup/backend/ttl/","title":"TTL"},{"body":"TTL In SkyWalking, there are two types of observability data:\n Records include traces, logs, topN sampled statements and alarm. recordDataTTL applies to record data. Metrics include all metrics for service, instance, endpoint, and topology map. Metadata(lists of services, instances, or endpoints) also belongs to metrics. metricsDataTTL applies to Metrics data.  These are the settings for the different types:\n# Set a timeout on metrics data. After the timeout has expired, the metrics data will automatically be deleted.recordDataTTL:${SW_CORE_RECORD_DATA_TTL:3}# Unit is daymetricsDataTTL:${SW_CORE_METRICS_DATA_TTL:7}# Unit is day","excerpt":"TTL In SkyWalking, there are two types of observability data:\n Records include traces, logs, topN …","ref":"/docs/main/next/en/setup/backend/ttl/","title":"TTL"},{"body":"TTL In SkyWalking, there are two types of observability data:\n Records include traces, logs, topN sampled statements and alarm. recordDataTTL applies to record data. Metrics include all metrics for service, instance, endpoint, and topology map. Metadata(lists of services, instances, or endpoints) also belongs to metrics. metricsDataTTL applies to Metrics data.  These are the settings for the different types:\n# Set a timeout on metrics data. After the timeout has expired, the metrics data will automatically be deleted.recordDataTTL:${SW_CORE_RECORD_DATA_TTL:3}# Unit is daymetricsDataTTL:${SW_CORE_METRICS_DATA_TTL:7}# Unit is day","excerpt":"TTL In SkyWalking, there are two types of observability data:\n Records include traces, logs, topN …","ref":"/docs/main/v9.0.0/en/setup/backend/ttl/","title":"TTL"},{"body":"TTL In SkyWalking, there are two types of observability data:\n Records include traces, logs, topN sampled statements and alarm. recordDataTTL applies to record data. Metrics include all metrics for service, instance, endpoint, and topology map. Metadata(lists of services, instances, or endpoints) also belongs to metrics. metricsDataTTL applies to Metrics data.  These are the settings for the different types:\n# Set a timeout on metrics data. After the timeout has expired, the metrics data will automatically be deleted.recordDataTTL:${SW_CORE_RECORD_DATA_TTL:3}# Unit is daymetricsDataTTL:${SW_CORE_METRICS_DATA_TTL:7}# Unit is day","excerpt":"TTL In SkyWalking, there are two types of observability data:\n Records include traces, logs, topN …","ref":"/docs/main/v9.1.0/en/setup/backend/ttl/","title":"TTL"},{"body":"TTL In SkyWalking, there are two types of observability data:\n Records include traces, logs, topN sampled statements and alarm. recordDataTTL applies to record data. Metrics include all metrics for service, instance, endpoint, and topology map. Metadata(lists of services, instances, or endpoints) also belongs to metrics. metricsDataTTL applies to Metrics data.  These are the settings for the different types:\n# Set a timeout on metrics data. After the timeout has expired, the metrics data will automatically be deleted.recordDataTTL:${SW_CORE_RECORD_DATA_TTL:3}# Unit is daymetricsDataTTL:${SW_CORE_METRICS_DATA_TTL:7}# Unit is day","excerpt":"TTL In SkyWalking, there are two types of observability data:\n Records include traces, logs, topN …","ref":"/docs/main/v9.2.0/en/setup/backend/ttl/","title":"TTL"},{"body":"TTL In SkyWalking, there are two types of observability data:\n Records include traces, logs, topN sampled statements and alarm. recordDataTTL applies to record data. Metrics include all metrics for service, instance, endpoint, and topology map. Metadata(lists of services, instances, or endpoints) also belongs to metrics. metricsDataTTL applies to Metrics data.  These are the settings for the different types:\n# Set a timeout on metrics data. After the timeout has expired, the metrics data will automatically be deleted.recordDataTTL:${SW_CORE_RECORD_DATA_TTL:3}# Unit is daymetricsDataTTL:${SW_CORE_METRICS_DATA_TTL:7}# Unit is day","excerpt":"TTL In SkyWalking, there are two types of observability data:\n Records include traces, logs, topN …","ref":"/docs/main/v9.3.0/en/setup/backend/ttl/","title":"TTL"},{"body":"TTL In SkyWalking, there are two types of observability data:\n Records include traces, logs, topN sampled statements and alarm. recordDataTTL applies to record data. Metrics include all metrics for service, instance, endpoint, and topology map. Metadata(lists of services, instances, or endpoints) also belongs to metrics. metricsDataTTL applies to Metrics data.  These are the settings for the different types:\n# Set a timeout on metrics data. After the timeout has expired, the metrics data will automatically be deleted.recordDataTTL:${SW_CORE_RECORD_DATA_TTL:3}# Unit is daymetricsDataTTL:${SW_CORE_METRICS_DATA_TTL:7}# Unit is day","excerpt":"TTL In SkyWalking, there are two types of observability data:\n Records include traces, logs, topN …","ref":"/docs/main/v9.4.0/en/setup/backend/ttl/","title":"TTL"},{"body":"TTL In SkyWalking, there are two types of observability data:\n Records include traces, logs, topN sampled statements and alarm. recordDataTTL applies to record data. Metrics include all metrics for service, instance, endpoint, and topology map. Metadata(lists of services, instances, or endpoints) also belongs to metrics. metricsDataTTL applies to Metrics data.  These are the settings for the different types:\n# Set a timeout on metrics data. After the timeout has expired, the metrics data will automatically be deleted.recordDataTTL:${SW_CORE_RECORD_DATA_TTL:3}# Unit is daymetricsDataTTL:${SW_CORE_METRICS_DATA_TTL:7}# Unit is day","excerpt":"TTL In SkyWalking, there are two types of observability data:\n Records include traces, logs, topN …","ref":"/docs/main/v9.5.0/en/setup/backend/ttl/","title":"TTL"},{"body":"TTL In SkyWalking, there are two types of observability data:\n Records include traces, logs, topN sampled statements and alarm. recordDataTTL applies to record data. Metrics include all metrics for service, instance, endpoint, and topology map. Metadata(lists of services, instances, or endpoints) also belongs to metrics. metricsDataTTL applies to Metrics data.  These are the settings for the different types:\n# Set a timeout on metrics data. After the timeout has expired, the metrics data will automatically be deleted.recordDataTTL:${SW_CORE_RECORD_DATA_TTL:3}# Unit is daymetricsDataTTL:${SW_CORE_METRICS_DATA_TTL:7}# Unit is day","excerpt":"TTL In SkyWalking, there are two types of observability data:\n Records include traces, logs, topN …","ref":"/docs/main/v9.6.0/en/setup/backend/ttl/","title":"TTL"},{"body":"TTL In SkyWalking, there are two types of observability data:\n Records include traces, logs, topN sampled statements and alarm. recordDataTTL applies to record data. Metrics include all metrics for service, instance, endpoint, and topology map. Metadata(lists of services, instances, or endpoints) also belongs to metrics. metricsDataTTL applies to Metrics data.  These are the settings for the different types:\n# Set a timeout on metrics data. After the timeout has expired, the metrics data will automatically be deleted.recordDataTTL:${SW_CORE_RECORD_DATA_TTL:3}# Unit is daymetricsDataTTL:${SW_CORE_METRICS_DATA_TTL:7}# Unit is day","excerpt":"TTL In SkyWalking, there are two types of observability data:\n Records include traces, logs, topN …","ref":"/docs/main/v9.7.0/en/setup/backend/ttl/","title":"TTL"},{"body":"UI SkyWalking UI distribution is already included in our Apache official release.\nStartup Startup script is also in /bin/webappService.sh(.bat). UI runs as a Java process, powered-by Armeria.\nSettings The settings file of UI is webapp/webapp.yml in the distribution package. It has three parts.\n Listening port. Backend connect info.  serverPort:${SW_SERVER_PORT:-8080}# Comma separated list of OAP addresses, with `http://` or `https://` prefix.oapServices:${SW_OAP_ADDRESS:-http://localhost:12800}zipkinServices:${SW_ZIPKIN_ADDRESS:http://localhost:9412}Start with Docker Image Start a container to connect OAP server whose address is http://oap:12800.\nexport version=9.0.0 docker run --name oap --restart always -d -e SW_OAP_ADDRESS=http://oap:12800 -e SW_ZIPKIN_ADDRESS=http://oap:9412 apache/skywalking-ui:$version Configuration We could set up environment variables to configure this image.\nSW_OAP_ADDRESS The address of your OAP server. The default value is http://127.0.0.1:12800.\nSW_ZIPKIN_ADDRESS The address of your Zipkin server. The default value is http://127.0.0.1:9412.\n","excerpt":"UI SkyWalking UI distribution is already included in our Apache official release.\nStartup Startup …","ref":"/docs/main/latest/en/setup/backend/ui-setup/","title":"UI"},{"body":"UI SkyWalking UI distribution is already included in our Apache official release.\nStartup Startup script is also in /bin/webappService.sh(.bat). UI runs as a Java process, powered-by Armeria.\nSettings The settings file of UI is webapp/webapp.yml in the distribution package. It has three parts.\n Listening port. Backend connect info.  serverPort:${SW_SERVER_PORT:-8080}# Comma separated list of OAP addresses, with `http://` or `https://` prefix.oapServices:${SW_OAP_ADDRESS:-http://localhost:12800}zipkinServices:${SW_ZIPKIN_ADDRESS:http://localhost:9412}Start with Docker Image Start a container to connect OAP server whose address is http://oap:12800.\nexport version=9.0.0 docker run --name oap --restart always -d -e SW_OAP_ADDRESS=http://oap:12800 -e SW_ZIPKIN_ADDRESS=http://oap:9412 apache/skywalking-ui:$version Configuration We could set up environment variables to configure this image.\nSW_OAP_ADDRESS The address of your OAP server. The default value is http://127.0.0.1:12800.\nSW_ZIPKIN_ADDRESS The address of your Zipkin server. The default value is http://127.0.0.1:9412.\n","excerpt":"UI SkyWalking UI distribution is already included in our Apache official release.\nStartup Startup …","ref":"/docs/main/next/en/setup/backend/ui-setup/","title":"UI"},{"body":"UI SkyWalking UI distribution is already included in our Apache official release.\nStartup Startup script is also in /bin/webappService.sh(.bat). UI runs as an OS Java process, powered-by Zuul.\nSettings Settings file of UI is webapp/webapp.yml in distribution package. It has three parts.\n Listening port. Backend connect info.  server:port:8080spring:cloud:gateway:routes:- id:oap-routeuri:lb://oap-servicepredicates:- Path=/graphql/**discovery:client:simple:instances:oap-service:# Point to all backend\u0026#39;s restHost:restPort, split by URI arrays.- uri:http://127.0.0.1:12800- uri:http://instance-2:12800Start with Docker Image Start a container to connect oap server whose address is http://oap:12800.\ndocker run --name oap --restart always -d -e SW_OAP_ADDRESS=http://oap:12800 apache/skywalking-ui:8.8.0 Configuration We could set up environment variables to configure this image.\nSW_OAP_ADDRESS The address of OAP server. Default value is http://127.0.0.1:12800.\n","excerpt":"UI SkyWalking UI distribution is already included in our Apache official release.\nStartup Startup …","ref":"/docs/main/v9.0.0/en/setup/backend/ui-setup/","title":"UI"},{"body":"UI SkyWalking UI distribution is already included in our Apache official release.\nStartup Startup script is also in /bin/webappService.sh(.bat). UI runs as an OS Java process, powered-by Zuul.\nSettings The settings file of UI is webapp/webapp.yml in the distribution package. It has three parts.\n Listening port. Backend connect info.  server:port:8080spring:cloud:gateway:routes:- id:oap-routeuri:lb://oap-servicepredicates:- Path=/graphql/**discovery:client:simple:instances:oap-service:# Point to all backend\u0026#39;s restHost:restPort, split by URI arrays.- uri:http://127.0.0.1:12800- uri:http://instance-2:12800Start with Docker Image Start a container to connect OAP server whose address is http://oap:12800.\ndocker run --name oap --restart always -d -e SW_OAP_ADDRESS=http://oap:12800 apache/skywalking-ui:8.8.0 Configuration We could set up environment variables to configure this image.\nSW_OAP_ADDRESS The address of your OAP server. The default value is http://127.0.0.1:12800.\n","excerpt":"UI SkyWalking UI distribution is already included in our Apache official release.\nStartup Startup …","ref":"/docs/main/v9.1.0/en/setup/backend/ui-setup/","title":"UI"},{"body":"UI SkyWalking UI distribution is already included in our Apache official release.\nStartup Startup script is also in /bin/webappService.sh(.bat). UI runs as an OS Java process, powered-by Zuul.\nSettings The settings file of UI is webapp/webapp.yml in the distribution package. It has three parts.\n Listening port. Backend connect info.  server:port:8080spring:cloud:gateway:routes:- id:oap-routeuri:lb://oap-servicepredicates:- Path=/graphql/**discovery:client:simple:instances:oap-service:# Point to all backend\u0026#39;s restHost:restPort, split by URI arrays.- uri:http://127.0.0.1:12800- uri:http://instance-2:12800Start with Docker Image Start a container to connect OAP server whose address is http://oap:12800.\ndocker run --name oap --restart always -d -e SW_OAP_ADDRESS=http://oap:12800 apache/skywalking-ui:8.8.0 Configuration We could set up environment variables to configure this image.\nSW_OAP_ADDRESS The address of your OAP server. The default value is http://127.0.0.1:12800.\n","excerpt":"UI SkyWalking UI distribution is already included in our Apache official release.\nStartup Startup …","ref":"/docs/main/v9.2.0/en/setup/backend/ui-setup/","title":"UI"},{"body":"UI SkyWalking UI distribution is already included in our Apache official release.\nStartup Startup script is also in /bin/webappService.sh(.bat). UI runs as an OS Java process, powered-by Zuul.\nSettings The settings file of UI is webapp/webapp.yml in the distribution package. It has three parts.\n Listening port. Backend connect info.  serverPort:${SW_SERVER_PORT:-8080}# Comma separated list of OAP addresses, without http:// prefix.oapServices:${SW_OAP_ADDRESS:-localhost:12800}Start with Docker Image Start a container to connect OAP server whose address is http://oap:12800.\ndocker run --name oap --restart always -d -e SW_OAP_ADDRESS=http://oap:12800 apache/skywalking-ui:8.8.0 Configuration We could set up environment variables to configure this image.\nSW_OAP_ADDRESS The address of your OAP server. The default value is http://127.0.0.1:12800.\n","excerpt":"UI SkyWalking UI distribution is already included in our Apache official release.\nStartup Startup …","ref":"/docs/main/v9.3.0/en/setup/backend/ui-setup/","title":"UI"},{"body":"UI SkyWalking UI distribution is already included in our Apache official release.\nStartup Startup script is also in /bin/webappService.sh(.bat). UI runs as an OS Java process, powered-by Zuul.\nSettings The settings file of UI is webapp/webapp.yml in the distribution package. It has three parts.\n Listening port. Backend connect info.  serverPort:${SW_SERVER_PORT:-8080}# Comma separated list of OAP addresses, without http:// prefix.oapServices:${SW_OAP_ADDRESS:-localhost:12800}zipkinServices:${SW_ZIPKIN_ADDRESS:localhost:9412}Start with Docker Image Start a container to connect OAP server whose address is http://oap:12800.\ndocker run --name oap --restart always -d -e SW_OAP_ADDRESS=http://oap:12800 -e SW_ZIPKIN_ADDRESS=http://oap:9412 apache/skywalking-ui:8.8.0 Configuration We could set up environment variables to configure this image.\nSW_OAP_ADDRESS The address of your OAP server. The default value is http://127.0.0.1:12800.\nSW_ZIPKIN_ADDRESS The address of your Zipkin server. The default value is http://127.0.0.1:9412.\n","excerpt":"UI SkyWalking UI distribution is already included in our Apache official release.\nStartup Startup …","ref":"/docs/main/v9.4.0/en/setup/backend/ui-setup/","title":"UI"},{"body":"UI SkyWalking UI distribution is already included in our Apache official release.\nStartup Startup script is also in /bin/webappService.sh(.bat). UI runs as a Java process, powered-by Armeria.\nSettings The settings file of UI is webapp/webapp.yml in the distribution package. It has three parts.\n Listening port. Backend connect info.  serverPort:${SW_SERVER_PORT:-8080}# Comma separated list of OAP addresses, with `http://` or `https://` prefix.oapServices:${SW_OAP_ADDRESS:-http://localhost:12800}zipkinServices:${SW_ZIPKIN_ADDRESS:http://localhost:9412}Start with Docker Image Start a container to connect OAP server whose address is http://oap:12800.\ndocker run --name oap --restart always -d -e SW_OAP_ADDRESS=http://oap:12800 -e SW_ZIPKIN_ADDRESS=http://oap:9412 apache/skywalking-ui:8.8.0 Configuration We could set up environment variables to configure this image.\nSW_OAP_ADDRESS The address of your OAP server. The default value is http://127.0.0.1:12800.\nSW_ZIPKIN_ADDRESS The address of your Zipkin server. The default value is http://127.0.0.1:9412.\n","excerpt":"UI SkyWalking UI distribution is already included in our Apache official release.\nStartup Startup …","ref":"/docs/main/v9.5.0/en/setup/backend/ui-setup/","title":"UI"},{"body":"UI SkyWalking UI distribution is already included in our Apache official release.\nStartup Startup script is also in /bin/webappService.sh(.bat). UI runs as a Java process, powered-by Armeria.\nSettings The settings file of UI is webapp/webapp.yml in the distribution package. It has three parts.\n Listening port. Backend connect info.  serverPort:${SW_SERVER_PORT:-8080}# Comma separated list of OAP addresses, with `http://` or `https://` prefix.oapServices:${SW_OAP_ADDRESS:-http://localhost:12800}zipkinServices:${SW_ZIPKIN_ADDRESS:http://localhost:9412}Start with Docker Image Start a container to connect OAP server whose address is http://oap:12800.\nexport version=9.0.0 docker run --name oap --restart always -d -e SW_OAP_ADDRESS=http://oap:12800 -e SW_ZIPKIN_ADDRESS=http://oap:9412 apache/skywalking-ui:$version Configuration We could set up environment variables to configure this image.\nSW_OAP_ADDRESS The address of your OAP server. The default value is http://127.0.0.1:12800.\nSW_ZIPKIN_ADDRESS The address of your Zipkin server. The default value is http://127.0.0.1:9412.\n","excerpt":"UI SkyWalking UI distribution is already included in our Apache official release.\nStartup Startup …","ref":"/docs/main/v9.6.0/en/setup/backend/ui-setup/","title":"UI"},{"body":"UI SkyWalking UI distribution is already included in our Apache official release.\nStartup Startup script is also in /bin/webappService.sh(.bat). UI runs as a Java process, powered-by Armeria.\nSettings The settings file of UI is webapp/webapp.yml in the distribution package. It has three parts.\n Listening port. Backend connect info.  serverPort:${SW_SERVER_PORT:-8080}# Comma separated list of OAP addresses, with `http://` or `https://` prefix.oapServices:${SW_OAP_ADDRESS:-http://localhost:12800}zipkinServices:${SW_ZIPKIN_ADDRESS:http://localhost:9412}Start with Docker Image Start a container to connect OAP server whose address is http://oap:12800.\nexport version=9.0.0 docker run --name oap --restart always -d -e SW_OAP_ADDRESS=http://oap:12800 -e SW_ZIPKIN_ADDRESS=http://oap:9412 apache/skywalking-ui:$version Configuration We could set up environment variables to configure this image.\nSW_OAP_ADDRESS The address of your OAP server. The default value is http://127.0.0.1:12800.\nSW_ZIPKIN_ADDRESS The address of your Zipkin server. The default value is http://127.0.0.1:9412.\n","excerpt":"UI SkyWalking UI distribution is already included in our Apache official release.\nStartup Startup …","ref":"/docs/main/v9.7.0/en/setup/backend/ui-setup/","title":"UI"},{"body":"Uninstrumented Gateways/Proxies The uninstrumented gateways are not instrumented by the SkyWalking agent plugin when they start, but they can be configured in gateways.yml file or via Dynamic Configuration. The reason why they can\u0026rsquo;t register to backend automatically is that there are no suitable agent plugins. For example, there are no agent plugins for Nginx, HAProxy, etc. So to visualize the real topology, we provide a way to configure the gateways/proxies manually.\nConfiguration Format The configuration content includes gateway names and their instances:\ngateways:- name:proxy0# the name is not used for nowinstances:- host:127.0.0.1# the host/IP of this gateway instanceport:9099# the port of this gateway instance defaults to 80Note: The host of the instance must be the one that is actually used on the client-side. For example, if instance proxyA has 2 IPs, say 192.168.1.110 and 192.168.1.111, both of which delegate the target service, and the client connects to 192.168.1.110, then configuring 192.168.1.111 as the host won\u0026rsquo;t work properly.\n","excerpt":"Uninstrumented Gateways/Proxies The uninstrumented gateways are not instrumented by the SkyWalking …","ref":"/docs/main/latest/en/setup/backend/uninstrumented-gateways/","title":"Uninstrumented Gateways/Proxies"},{"body":"Uninstrumented Gateways/Proxies The uninstrumented gateways are not instrumented by the SkyWalking agent plugin when they start, but they can be configured in gateways.yml file or via Dynamic Configuration. The reason why they can\u0026rsquo;t register to backend automatically is that there are no suitable agent plugins. For example, there are no agent plugins for Nginx, HAProxy, etc. So to visualize the real topology, we provide a way to configure the gateways/proxies manually.\nConfiguration Format The configuration content includes gateway names and their instances:\ngateways:- name:proxy0# the name is not used for nowinstances:- host:127.0.0.1# the host/IP of this gateway instanceport:9099# the port of this gateway instance defaults to 80Note: The host of the instance must be the one that is actually used on the client-side. For example, if instance proxyA has 2 IPs, say 192.168.1.110 and 192.168.1.111, both of which delegate the target service, and the client connects to 192.168.1.110, then configuring 192.168.1.111 as the host won\u0026rsquo;t work properly.\n","excerpt":"Uninstrumented Gateways/Proxies The uninstrumented gateways are not instrumented by the SkyWalking …","ref":"/docs/main/next/en/setup/backend/uninstrumented-gateways/","title":"Uninstrumented Gateways/Proxies"},{"body":"Uninstrumented Gateways/Proxies The uninstrumented gateways are not instrumented by SkyWalking agent plugin when they are started, but they can be configured in gateways.yml file or via Dynamic Configuration. The reason why they can\u0026rsquo;t register to backend automatically is that there\u0026rsquo;re no suitable agent plugins. For example, there are no agent plugins for Nginx, haproxy, etc. So in order to visualize the real topology, we provide a way to configure the gateways/proxies manually.\nConfiguration Format The configuration content includes gateway names and their instances:\ngateways:- name:proxy0# the name is not used for nowinstances:- host:127.0.0.1# the host/ip of this gateway instanceport:9099# the port of this gateway instance, defaults to 80Note: The host of the instance must be the one that is actually used at client side. For example, if instance proxyA has 2 IPs, say 192.168.1.110 and 192.168.1.111, both of which delegate the target service, and the client connects to 192.168.1.110, then configuring 192.168.1.111 as the host won\u0026rsquo;t work properly.\n","excerpt":"Uninstrumented Gateways/Proxies The uninstrumented gateways are not instrumented by SkyWalking agent …","ref":"/docs/main/v9.0.0/en/setup/backend/uninstrumented-gateways/","title":"Uninstrumented Gateways/Proxies"},{"body":"Uninstrumented Gateways/Proxies The uninstrumented gateways are not instrumented by the SkyWalking agent plugin when they start, but they can be configured in gateways.yml file or via Dynamic Configuration. The reason why they can\u0026rsquo;t register to backend automatically is that there are no suitable agent plugins. For example, there are no agent plugins for Nginx, HAProxy, etc. So to visualize the real topology, we provide a way to configure the gateways/proxies manually.\nConfiguration Format The configuration content includes gateway names and their instances:\ngateways:- name:proxy0# the name is not used for nowinstances:- host:127.0.0.1# the host/IP of this gateway instanceport:9099# the port of this gateway instance defaults to 80Note: The host of the instance must be the one that is actually used on the client-side. For example, if instance proxyA has 2 IPs, say 192.168.1.110 and 192.168.1.111, both of which delegate the target service, and the client connects to 192.168.1.110, then configuring 192.168.1.111 as the host won\u0026rsquo;t work properly.\n","excerpt":"Uninstrumented Gateways/Proxies The uninstrumented gateways are not instrumented by the SkyWalking …","ref":"/docs/main/v9.1.0/en/setup/backend/uninstrumented-gateways/","title":"Uninstrumented Gateways/Proxies"},{"body":"Uninstrumented Gateways/Proxies The uninstrumented gateways are not instrumented by the SkyWalking agent plugin when they start, but they can be configured in gateways.yml file or via Dynamic Configuration. The reason why they can\u0026rsquo;t register to backend automatically is that there are no suitable agent plugins. For example, there are no agent plugins for Nginx, HAProxy, etc. So to visualize the real topology, we provide a way to configure the gateways/proxies manually.\nConfiguration Format The configuration content includes gateway names and their instances:\ngateways:- name:proxy0# the name is not used for nowinstances:- host:127.0.0.1# the host/IP of this gateway instanceport:9099# the port of this gateway instance defaults to 80Note: The host of the instance must be the one that is actually used on the client-side. For example, if instance proxyA has 2 IPs, say 192.168.1.110 and 192.168.1.111, both of which delegate the target service, and the client connects to 192.168.1.110, then configuring 192.168.1.111 as the host won\u0026rsquo;t work properly.\n","excerpt":"Uninstrumented Gateways/Proxies The uninstrumented gateways are not instrumented by the SkyWalking …","ref":"/docs/main/v9.2.0/en/setup/backend/uninstrumented-gateways/","title":"Uninstrumented Gateways/Proxies"},{"body":"Uninstrumented Gateways/Proxies The uninstrumented gateways are not instrumented by the SkyWalking agent plugin when they start, but they can be configured in gateways.yml file or via Dynamic Configuration. The reason why they can\u0026rsquo;t register to backend automatically is that there are no suitable agent plugins. For example, there are no agent plugins for Nginx, HAProxy, etc. So to visualize the real topology, we provide a way to configure the gateways/proxies manually.\nConfiguration Format The configuration content includes gateway names and their instances:\ngateways:- name:proxy0# the name is not used for nowinstances:- host:127.0.0.1# the host/IP of this gateway instanceport:9099# the port of this gateway instance defaults to 80Note: The host of the instance must be the one that is actually used on the client-side. For example, if instance proxyA has 2 IPs, say 192.168.1.110 and 192.168.1.111, both of which delegate the target service, and the client connects to 192.168.1.110, then configuring 192.168.1.111 as the host won\u0026rsquo;t work properly.\n","excerpt":"Uninstrumented Gateways/Proxies The uninstrumented gateways are not instrumented by the SkyWalking …","ref":"/docs/main/v9.3.0/en/setup/backend/uninstrumented-gateways/","title":"Uninstrumented Gateways/Proxies"},{"body":"Uninstrumented Gateways/Proxies The uninstrumented gateways are not instrumented by the SkyWalking agent plugin when they start, but they can be configured in gateways.yml file or via Dynamic Configuration. The reason why they can\u0026rsquo;t register to backend automatically is that there are no suitable agent plugins. For example, there are no agent plugins for Nginx, HAProxy, etc. So to visualize the real topology, we provide a way to configure the gateways/proxies manually.\nConfiguration Format The configuration content includes gateway names and their instances:\ngateways:- name:proxy0# the name is not used for nowinstances:- host:127.0.0.1# the host/IP of this gateway instanceport:9099# the port of this gateway instance defaults to 80Note: The host of the instance must be the one that is actually used on the client-side. For example, if instance proxyA has 2 IPs, say 192.168.1.110 and 192.168.1.111, both of which delegate the target service, and the client connects to 192.168.1.110, then configuring 192.168.1.111 as the host won\u0026rsquo;t work properly.\n","excerpt":"Uninstrumented Gateways/Proxies The uninstrumented gateways are not instrumented by the SkyWalking …","ref":"/docs/main/v9.4.0/en/setup/backend/uninstrumented-gateways/","title":"Uninstrumented Gateways/Proxies"},{"body":"Uninstrumented Gateways/Proxies The uninstrumented gateways are not instrumented by the SkyWalking agent plugin when they start, but they can be configured in gateways.yml file or via Dynamic Configuration. The reason why they can\u0026rsquo;t register to backend automatically is that there are no suitable agent plugins. For example, there are no agent plugins for Nginx, HAProxy, etc. So to visualize the real topology, we provide a way to configure the gateways/proxies manually.\nConfiguration Format The configuration content includes gateway names and their instances:\ngateways:- name:proxy0# the name is not used for nowinstances:- host:127.0.0.1# the host/IP of this gateway instanceport:9099# the port of this gateway instance defaults to 80Note: The host of the instance must be the one that is actually used on the client-side. For example, if instance proxyA has 2 IPs, say 192.168.1.110 and 192.168.1.111, both of which delegate the target service, and the client connects to 192.168.1.110, then configuring 192.168.1.111 as the host won\u0026rsquo;t work properly.\n","excerpt":"Uninstrumented Gateways/Proxies The uninstrumented gateways are not instrumented by the SkyWalking …","ref":"/docs/main/v9.5.0/en/setup/backend/uninstrumented-gateways/","title":"Uninstrumented Gateways/Proxies"},{"body":"Uninstrumented Gateways/Proxies The uninstrumented gateways are not instrumented by the SkyWalking agent plugin when they start, but they can be configured in gateways.yml file or via Dynamic Configuration. The reason why they can\u0026rsquo;t register to backend automatically is that there are no suitable agent plugins. For example, there are no agent plugins for Nginx, HAProxy, etc. So to visualize the real topology, we provide a way to configure the gateways/proxies manually.\nConfiguration Format The configuration content includes gateway names and their instances:\ngateways:- name:proxy0# the name is not used for nowinstances:- host:127.0.0.1# the host/IP of this gateway instanceport:9099# the port of this gateway instance defaults to 80Note: The host of the instance must be the one that is actually used on the client-side. For example, if instance proxyA has 2 IPs, say 192.168.1.110 and 192.168.1.111, both of which delegate the target service, and the client connects to 192.168.1.110, then configuring 192.168.1.111 as the host won\u0026rsquo;t work properly.\n","excerpt":"Uninstrumented Gateways/Proxies The uninstrumented gateways are not instrumented by the SkyWalking …","ref":"/docs/main/v9.6.0/en/setup/backend/uninstrumented-gateways/","title":"Uninstrumented Gateways/Proxies"},{"body":"Uninstrumented Gateways/Proxies The uninstrumented gateways are not instrumented by the SkyWalking agent plugin when they start, but they can be configured in gateways.yml file or via Dynamic Configuration. The reason why they can\u0026rsquo;t register to backend automatically is that there are no suitable agent plugins. For example, there are no agent plugins for Nginx, HAProxy, etc. So to visualize the real topology, we provide a way to configure the gateways/proxies manually.\nConfiguration Format The configuration content includes gateway names and their instances:\ngateways:- name:proxy0# the name is not used for nowinstances:- host:127.0.0.1# the host/IP of this gateway instanceport:9099# the port of this gateway instance defaults to 80Note: The host of the instance must be the one that is actually used on the client-side. For example, if instance proxyA has 2 IPs, say 192.168.1.110 and 192.168.1.111, both of which delegate the target service, and the client connects to 192.168.1.110, then configuring 192.168.1.111 as the host won\u0026rsquo;t work properly.\n","excerpt":"Uninstrumented Gateways/Proxies The uninstrumented gateways are not instrumented by the SkyWalking …","ref":"/docs/main/v9.7.0/en/setup/backend/uninstrumented-gateways/","title":"Uninstrumented Gateways/Proxies"},{"body":"Unit Test For Satellite, the specific plugin may have some common dependencies. So we provide a global test initializer to init the dependencies.\nimport ( _ \u0026quot;github.com/apache/skywalking-satellite/internal/satellite/test\u0026quot; ) ","excerpt":"Unit Test For Satellite, the specific plugin may have some common dependencies. So we provide a …","ref":"/docs/skywalking-satellite/latest/en/guides/test/how-to-unit-test/","title":"Unit Test"},{"body":"Unit Test For Satellite, the specific plugin may have some common dependencies. So we provide a global test initializer to init the dependencies.\nimport ( _ \u0026quot;github.com/apache/skywalking-satellite/internal/satellite/test\u0026quot; ) ","excerpt":"Unit Test For Satellite, the specific plugin may have some common dependencies. So we provide a …","ref":"/docs/skywalking-satellite/next/en/guides/test/how-to-unit-test/","title":"Unit Test"},{"body":"Unit Test For Satellite, the specific plugin may have some common dependencies. So we provide a global test initializer to init the dependencies.\nimport ( _ \u0026quot;github.com/apache/skywalking-satellite/internal/satellite/test\u0026quot; ) ","excerpt":"Unit Test For Satellite, the specific plugin may have some common dependencies. So we provide a …","ref":"/docs/skywalking-satellite/v1.2.0/en/guides/test/how-to-unit-test/","title":"Unit Test"},{"body":"Use annotation to mark the method you want to trace.  Add @Trace to any method you want to trace. After that, you can see the span in the Stack. Methods annotated with @Tag will try to tag the current active span with the given key (Tag#key()) and (Tag#value()), if there is no active span at all, this annotation takes no effect. @Tag can be repeated, and can be used in companion with @Trace, see examples below. The value of Tag is the same as what are supported in Customize Enhance Trace.  /** * The codes below will generate a span, * and two types of tags, one type tag: keys are `tag1` and `tag2`, values are the passed-in parameters, respectively, the other type tag: keys are `username` and `age`, values are the return value in User, respectively */ @Trace @Tag(key = \u0026#34;tag1\u0026#34;, value = \u0026#34;arg[0]\u0026#34;) @Tag(key = \u0026#34;tag2\u0026#34;, value = \u0026#34;arg[1]\u0026#34;) @Tag(key = \u0026#34;username\u0026#34;, value = \u0026#34;returnedObj.username\u0026#34;) @Tag(key = \u0026#34;age\u0026#34;, value = \u0026#34;returnedObj.age\u0026#34;) public User methodYouWantToTrace(String param1, String param2) { // ... } Sample codes only\n","excerpt":"Use annotation to mark the method you want to trace.  Add @Trace to any method you want to trace. …","ref":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/application-toolkit-trace-annotation/","title":"Use annotation to mark the method you want to trace."},{"body":"Use annotation to mark the method you want to trace.  Add @Trace to any method you want to trace. After that, you can see the span in the Stack. Methods annotated with @Tag will try to tag the current active span with the given key (Tag#key()) and (Tag#value()), if there is no active span at all, this annotation takes no effect. @Tag can be repeated, and can be used in companion with @Trace, see examples below. The value of Tag is the same as what are supported in Customize Enhance Trace.  /** * The codes below will generate a span, * and two types of tags, one type tag: keys are `tag1` and `tag2`, values are the passed-in parameters, respectively, the other type tag: keys are `username` and `age`, values are the return value in User, respectively */ @Trace @Tag(key = \u0026#34;tag1\u0026#34;, value = \u0026#34;arg[0]\u0026#34;) @Tag(key = \u0026#34;tag2\u0026#34;, value = \u0026#34;arg[1]\u0026#34;) @Tag(key = \u0026#34;username\u0026#34;, value = \u0026#34;returnedObj.username\u0026#34;) @Tag(key = \u0026#34;age\u0026#34;, value = \u0026#34;returnedObj.age\u0026#34;) public User methodYouWantToTrace(String param1, String param2) { // ... } Sample codes only\n","excerpt":"Use annotation to mark the method you want to trace.  Add @Trace to any method you want to trace. …","ref":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-trace-annotation/","title":"Use annotation to mark the method you want to trace."},{"body":"Use annotation to mark the method you want to trace.  Add @Trace to any method you want to trace. After that, you can see the span in the Stack. Methods annotated with @Tag will try to tag the current active span with the given key (Tag#key()) and (Tag#value()), if there is no active span at all, this annotation takes no effect. @Tag can be repeated, and can be used in companion with @Trace, see examples below. The value of Tag is the same as what are supported in Customize Enhance Trace.  /** * The codes below will generate a span, * and two types of tags, one type tag: keys are `tag1` and `tag2`, values are the passed-in parameters, respectively, the other type tag: keys are `username` and `age`, values are the return value in User, respectively */ @Trace @Tag(key = \u0026#34;tag1\u0026#34;, value = \u0026#34;arg[0]\u0026#34;) @Tag(key = \u0026#34;tag2\u0026#34;, value = \u0026#34;arg[1]\u0026#34;) @Tag(key = \u0026#34;username\u0026#34;, value = \u0026#34;returnedObj.username\u0026#34;) @Tag(key = \u0026#34;age\u0026#34;, value = \u0026#34;returnedObj.age\u0026#34;) public User methodYouWantToTrace(String param1, String param2) { // ... } Sample codes only\n","excerpt":"Use annotation to mark the method you want to trace.  Add @Trace to any method you want to trace. …","ref":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/application-toolkit-trace-annotation/","title":"Use annotation to mark the method you want to trace."},{"body":"Use annotation to mark the method you want to trace.  Add @Trace to any method you want to trace. After that, you can see the span in the Stack. Methods annotated with @Tag will try to tag the current active span with the given key (Tag#key()) and (Tag#value()), if there is no active span at all, this annotation takes no effect. @Tag can be repeated, and can be used in companion with @Trace, see examples below. The value of Tag is the same as what are supported in Customize Enhance Trace.  /** * The codes below will generate a span, * and two types of tags, one type tag: keys are `tag1` and `tag2`, values are the passed-in parameters, respectively, the other type tag: keys are `username` and `age`, values are the return value in User, respectively */ @Trace @Tag(key = \u0026#34;tag1\u0026#34;, value = \u0026#34;arg[0]\u0026#34;) @Tag(key = \u0026#34;tag2\u0026#34;, value = \u0026#34;arg[1]\u0026#34;) @Tag(key = \u0026#34;username\u0026#34;, value = \u0026#34;returnedObj.username\u0026#34;) @Tag(key = \u0026#34;age\u0026#34;, value = \u0026#34;returnedObj.age\u0026#34;) public User methodYouWantToTrace(String param1, String param2) { // ... } Sample codes only\n","excerpt":"Use annotation to mark the method you want to trace.  Add @Trace to any method you want to trace. …","ref":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/application-toolkit-trace-annotation/","title":"Use annotation to mark the method you want to trace."},{"body":"Use annotation to mark the method you want to trace.  Add @Trace to any method you want to trace. After that, you can see the span in the Stack. Methods annotated with @Tag will try to tag the current active span with the given key (Tag#key()) and (Tag#value()), if there is no active span at all, this annotation takes no effect. @Tag can be repeated, and can be used in companion with @Trace, see examples below. The value of Tag is the same as what are supported in Customize Enhance Trace.  /** * The codes below will generate a span, * and two types of tags, one type tag: keys are `tag1` and `tag2`, values are the passed-in parameters, respectively, the other type tag: keys are `username` and `age`, values are the return value in User, respectively */ @Trace @Tag(key = \u0026#34;tag1\u0026#34;, value = \u0026#34;arg[0]\u0026#34;) @Tag(key = \u0026#34;tag2\u0026#34;, value = \u0026#34;arg[1]\u0026#34;) @Tag(key = \u0026#34;username\u0026#34;, value = \u0026#34;returnedObj.username\u0026#34;) @Tag(key = \u0026#34;age\u0026#34;, value = \u0026#34;returnedObj.age\u0026#34;) public User methodYouWantToTrace(String param1, String param2) { // ... } Sample codes only\n","excerpt":"Use annotation to mark the method you want to trace.  Add @Trace to any method you want to trace. …","ref":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/application-toolkit-trace-annotation/","title":"Use annotation to mark the method you want to trace."},{"body":"Use Grafana As The UI SkyWalking provide PromQL Service since 9.4.0 and LogQL Service since 9.6.0. You can choose Grafana as the SkyWalking UI. About the installation and how to use please refer to the official document.\nNotice \u0026lt;1\u0026gt;, Gafana is AGPL-3.0 license, which is very different from Apache 2.0. Please follow AGPL 3.0 license requirements.\nNotice \u0026lt;2\u0026gt;, SkyWalking always uses its native UI as first class. All visualization features are only available on native UI. Grafana UI is an extension on our support of PromQL APIs. We don\u0026rsquo;t maintain or promise the complete Grafana UI dashboard setup.\nConfigure Data Source Prometheus Data Source In the data source config panel, chose the Prometheus and set the url to the OAP server address, the default port is 9090. SkyWalking Data Source Before you start, please install the SkyWalking data source plugin. In the data source config panel, chose the SkyWalking and set the url to the OAP server graphql service address, the default port is 12800. Loki Data Source In the data source config panel, chose the Loki and set the url to the OAP server address, the default port is 3100. Configure Metric Dashboards Dashboards Settings The following steps are the example of config a General Service dashboard:\n Create a dashboard named General Service. A layer is recommended as a dashboard. Configure variables for the dashboard: After configure, you can select the service/instance/endpoint on the top of the dashboard:   Add Panels The following contents show how to add several typical metrics panels. General settings:\n Chose the metrics and chart. Set Query options --\u0026gt; Min interval = 1m, because the metrics min time bucket in SkyWalking is 1m. Add PromQL expressions, use the variables configured above for the labels then you can select the labels value from top. Note: Some metrics values may be required calculations to match units. Select the returned labels you want to show on panel. Test query and save the panel.  Common Value Metrics  For example service_apdex and Time series chart. Add PromQL expression, the metric scope is Service, so add labels service and layer for match. Set Connect null values --\u0026gt; Always and Show points --\u0026gt; Always because when the query interval \u0026gt; 1hour or 1day SkyWalking return the hour/day step metrics values.   Labeled Value Metrics  For example service_percentile and Time series chart. Add PromQL expressions, the metric scope is Service, add labels service and layer for match. And it\u0026rsquo;s a labeled value metric, add labels='0,1,2,3,4' filter the result label, and addrelabels='P50,P75,P90,P95,P99' rename the result label. Set Connect null values --\u0026gt; Always and Show points --\u0026gt; Always because when the query interval \u0026gt; 1hour or 1day SkyWalking return the hour/day step metrics values.   Sort Metrics  For example service_instance_cpm and Bar gauge chart. Add PromQL expressions, add labels parent_service and layer for match, add top_n='10' and order='DES' filter the result. Set the Calculation --\u0026gt; Latest*.   Sampled Records Same as the Sort Metrics.\nConfigure Topology Dashboards Dashboards Settings For now, SkyWalking support General Service and Service Mesh topology dashboards, the layer is GENERAL and MESH respectively. The following configuration can reuse the above General Service dashboard and add a new variable Plugin_SkyWalking for the dashboard: Add Topology Panel  Chose the Node Graph chart. Set Layer and Service by the variables. If you want to show all services in this layer, set Service empty. Set Node Metrics and Edge Metrics which you want to show on the topology.   Configure Log Dashboard Dashboards Settings The following steps are the example of config a log dashboard:\n Create a dashboard named Log. Configure variables for the dashboard:  Please make sure service_instance and endpoint variable enabled Include All option and set Custom all value to * or blank (typed by space button on the keyboard):  Tags variable is a little different from others, for more details, please refer Ad hoc filters:  After configure, you can select log query variables on the top of the dashboard:   Add Log Panel The following steps show how to add a log panel.\n Choose Logs chart. Set the Line limit value (The max number of logs to return in a query) and Order value (Determines the sort order of logs). Add LogQL expressions, use the variables configured above for the labels and searching keyword. service_instance \u0026amp; endpoint variable ref should use raw variable-format-options to prevent it value be escaped. Test query and save the panel.   Preview on demo.skywalking.a.o SkyWalking community provides a preview site for services of General and Service Mesh layers from the demo environment. You could take a glance through Preview metrics on Grafana of the demo deployment.\nNotice, we don\u0026rsquo;t provide all setups due to our monitoring target expanding fast. This demo is for helping you understand the above documents only.\n","excerpt":"Use Grafana As The UI SkyWalking provide PromQL Service since 9.4.0 and LogQL Service since 9.6.0. …","ref":"/docs/main/latest/en/setup/backend/ui-grafana/","title":"Use Grafana As The UI"},{"body":"Use Grafana As The UI SkyWalking provide PromQL Service since 9.4.0 and LogQL Service since 9.6.0. You can choose Grafana as the SkyWalking UI. About the installation and how to use please refer to the official document.\nNotice \u0026lt;1\u0026gt;, Gafana is AGPL-3.0 license, which is very different from Apache 2.0. Please follow AGPL 3.0 license requirements.\nNotice \u0026lt;2\u0026gt;, SkyWalking always uses its native UI as first class. All visualization features are only available on native UI. Grafana UI is an extension on our support of PromQL APIs. We don\u0026rsquo;t maintain or promise the complete Grafana UI dashboard setup.\nConfigure Data Source Prometheus Data Source In the data source config panel, chose the Prometheus and set the url to the OAP server address, the default port is 9090. SkyWalking Data Source Before you start, please install the SkyWalking data source plugin. In the data source config panel, chose the SkyWalking and set the url to the OAP server graphql service address, the default port is 12800. Loki Data Source In the data source config panel, chose the Loki and set the url to the OAP server address, the default port is 3100. Configure Metric Dashboards Dashboards Settings The following steps are the example of config a General Service dashboard:\n Create a dashboard named General Service. A layer is recommended as a dashboard. Configure variables for the dashboard: After configure, you can select the service/instance/endpoint on the top of the dashboard:   Add Panels The following contents show how to add several typical metrics panels. General settings:\n Chose the metrics and chart. Set Query options --\u0026gt; Min interval = 1m, because the metrics min time bucket in SkyWalking is 1m. Add PromQL expressions, use the variables configured above for the labels then you can select the labels value from top. Note: Some metrics values may be required calculations to match units. Select the returned labels you want to show on panel. Test query and save the panel.  Common Value Metrics  For example service_apdex and Time series chart. Add PromQL expression, the metric scope is Service, so add labels service and layer for match. Set Connect null values --\u0026gt; Always and Show points --\u0026gt; Always because when the query interval \u0026gt; 1hour or 1day SkyWalking return the hour/day step metrics values.   Labeled Value Metrics  For example service_percentile and Time series chart. Add PromQL expressions, the metric scope is Service, add labels service and layer for match. And it\u0026rsquo;s a labeled value metric, add labels='0,1,2,3,4' filter the result label, and addrelabels='P50,P75,P90,P95,P99' rename the result label. Set Connect null values --\u0026gt; Always and Show points --\u0026gt; Always because when the query interval \u0026gt; 1hour or 1day SkyWalking return the hour/day step metrics values.   Sort Metrics  For example service_instance_cpm and Bar gauge chart. Add PromQL expressions, add labels parent_service and layer for match, add top_n='10' and order='DES' filter the result. Set the Calculation --\u0026gt; Latest*.   Sampled Records Same as the Sort Metrics.\nConfigure Topology Dashboards Dashboards Settings For now, SkyWalking support General Service and Service Mesh topology dashboards, the layer is GENERAL and MESH respectively. The following configuration can reuse the above General Service dashboard and add a new variable Plugin_SkyWalking for the dashboard: Add Topology Panel  Chose the Node Graph chart. Set Layer and Service by the variables. If you want to show all services in this layer, set Service empty. Set Node Metrics and Edge Metrics which you want to show on the topology.   Configure Log Dashboard Dashboards Settings The following steps are the example of config a log dashboard:\n Create a dashboard named Log. Configure variables for the dashboard:  Please make sure service_instance and endpoint variable enabled Include All option and set Custom all value to * or blank (typed by space button on the keyboard):  Tags variable is a little different from others, for more details, please refer Ad hoc filters:  After configure, you can select log query variables on the top of the dashboard:   Add Log Panel The following steps show how to add a log panel.\n Choose Logs chart. Set the Line limit value (The max number of logs to return in a query) and Order value (Determines the sort order of logs). Add LogQL expressions, use the variables configured above for the labels and searching keyword. service_instance \u0026amp; endpoint variable ref should use raw variable-format-options to prevent it value be escaped. Test query and save the panel.   Preview on demo.skywalking.a.o SkyWalking community provides a preview site for services of General and Service Mesh layers from the demo environment. You could take a glance through Preview metrics on Grafana of the demo deployment.\nNotice, we don\u0026rsquo;t provide all setups due to our monitoring target expanding fast. This demo is for helping you understand the above documents only.\n","excerpt":"Use Grafana As The UI SkyWalking provide PromQL Service since 9.4.0 and LogQL Service since 9.6.0. …","ref":"/docs/main/next/en/setup/backend/ui-grafana/","title":"Use Grafana As The UI"},{"body":"Use Grafana As The UI Since 9.4.0, SkyWalking provide PromQL Service. You can choose Grafana as the SkyWalking UI. About the installation and how to use please refer to the official document.\nNotice \u0026lt;1\u0026gt;, Gafana is AGPL-3.0 license, which is very different from Apache 2.0. Please follow AGPL 3.0 license requirements.\nNotice \u0026lt;2\u0026gt;, SkyWalking always uses its native UI as first class. All visualization features are only available on native UI. Grafana UI is an extension on our support of PromQL APIs. We don\u0026rsquo;t maintain or promise the complete Grafana UI dashboard setup.\nConfigure Data Source In the data source config panel, chose the Prometheus and set the url to the OAP server address, the default port is 9090. Configure Dashboards Dashboards Settings The following steps are the example of config a General Service dashboard:\n Create a dashboard named General Service. A layer is recommended as a dashboard. Configure variables for the dashboard: After configure, you can select the service/instance/endpoint on the top of the dashboard:   Add Panels The following contents show how to add several typical metrics panels. General settings:\n Chose the metrics and chart. Set Query options --\u0026gt; Min interval = 1m, because the metrics min time bucket in SkyWalking is 1m. Add PromQL expressions, use the variables configured above for the labels then you can select the labels value from top. Note: Some metrics values may be required calculations to match units. Select the returned labels you want to show on panel. Test query and save the panel.  Common Value Metrics  For example service_apdex and Time series chart. Add PromQL expression, the metric scope is Service, so add labels service and layer for match. Set Connect null values --\u0026gt; Always and Show points --\u0026gt; Always because when the query interval \u0026gt; 1hour or 1day SkyWalking return the hour/day step metrics values.   Labeled Value Metrics  For example service_percentile and Time series chart. Add PromQL expressions, the metric scope is Service, add labels service and layer for match. And it\u0026rsquo;s a labeled value metric, add labels='0,1,2,3,4' filter the result label, and addrelabels='P50,P75,P90,P95,P99' rename the result label. Set Connect null values --\u0026gt; Always and Show points --\u0026gt; Always because when the query interval \u0026gt; 1hour or 1day SkyWalking return the hour/day step metrics values.   Sort Metrics  For example service_instance_cpm and Bar gauge chart. Add PromQL expressions, add labels parent_service and layer for match, add top_n='10' and order='DES' filter the result. Set the Calculation --\u0026gt; Latest*.   Sampled Records Same as the Sort Metrics.\nPreview on demo.skywalking.a.o SkyWalking community provides a preview site for services of General and Service Mesh layers from the demo environment. You could take a glance through Preview metrics on Grafana of the demo deployment.\nNotice, we don\u0026rsquo;t provide all setups due to our monitoring target expanding fast. This demo is for helping you understand the above documents only.\n","excerpt":"Use Grafana As The UI Since 9.4.0, SkyWalking provide PromQL Service. You can choose Grafana as the …","ref":"/docs/main/v9.4.0/en/setup/backend/ui-grafana/","title":"Use Grafana As The UI"},{"body":"Use Grafana As The UI Since 9.4.0, SkyWalking provide PromQL Service. You can choose Grafana as the SkyWalking UI. About the installation and how to use please refer to the official document.\nNotice \u0026lt;1\u0026gt;, Gafana is AGPL-3.0 license, which is very different from Apache 2.0. Please follow AGPL 3.0 license requirements.\nNotice \u0026lt;2\u0026gt;, SkyWalking always uses its native UI as first class. All visualization features are only available on native UI. Grafana UI is an extension on our support of PromQL APIs. We don\u0026rsquo;t maintain or promise the complete Grafana UI dashboard setup.\nConfigure Data Source In the data source config panel, chose the Prometheus and set the url to the OAP server address, the default port is 9090. Configure Dashboards Dashboards Settings The following steps are the example of config a General Service dashboard:\n Create a dashboard named General Service. A layer is recommended as a dashboard. Configure variables for the dashboard: After configure, you can select the service/instance/endpoint on the top of the dashboard:   Add Panels The following contents show how to add several typical metrics panels. General settings:\n Chose the metrics and chart. Set Query options --\u0026gt; Min interval = 1m, because the metrics min time bucket in SkyWalking is 1m. Add PromQL expressions, use the variables configured above for the labels then you can select the labels value from top. Note: Some metrics values may be required calculations to match units. Select the returned labels you want to show on panel. Test query and save the panel.  Common Value Metrics  For example service_apdex and Time series chart. Add PromQL expression, the metric scope is Service, so add labels service and layer for match. Set Connect null values --\u0026gt; Always and Show points --\u0026gt; Always because when the query interval \u0026gt; 1hour or 1day SkyWalking return the hour/day step metrics values.   Labeled Value Metrics  For example service_percentile and Time series chart. Add PromQL expressions, the metric scope is Service, add labels service and layer for match. And it\u0026rsquo;s a labeled value metric, add labels='0,1,2,3,4' filter the result label, and addrelabels='P50,P75,P90,P95,P99' rename the result label. Set Connect null values --\u0026gt; Always and Show points --\u0026gt; Always because when the query interval \u0026gt; 1hour or 1day SkyWalking return the hour/day step metrics values.   Sort Metrics  For example service_instance_cpm and Bar gauge chart. Add PromQL expressions, add labels parent_service and layer for match, add top_n='10' and order='DES' filter the result. Set the Calculation --\u0026gt; Latest*.   Sampled Records Same as the Sort Metrics.\nPreview on demo.skywalking.a.o SkyWalking community provides a preview site for services of General and Service Mesh layers from the demo environment. You could take a glance through Preview metrics on Grafana of the demo deployment.\nNotice, we don\u0026rsquo;t provide all setups due to our monitoring target expanding fast. This demo is for helping you understand the above documents only.\n","excerpt":"Use Grafana As The UI Since 9.4.0, SkyWalking provide PromQL Service. You can choose Grafana as the …","ref":"/docs/main/v9.5.0/en/setup/backend/ui-grafana/","title":"Use Grafana As The UI"},{"body":"Use Grafana As The UI SkyWalking provide PromQL Service since 9.4.0 and LogQL Service since 9.6.0. You can choose Grafana as the SkyWalking UI. About the installation and how to use please refer to the official document.\nNotice \u0026lt;1\u0026gt;, Gafana is AGPL-3.0 license, which is very different from Apache 2.0. Please follow AGPL 3.0 license requirements.\nNotice \u0026lt;2\u0026gt;, SkyWalking always uses its native UI as first class. All visualization features are only available on native UI. Grafana UI is an extension on our support of PromQL APIs. We don\u0026rsquo;t maintain or promise the complete Grafana UI dashboard setup.\nConfigure Data Source Prometheus Data Source In the data source config panel, chose the Prometheus and set the url to the OAP server address, the default port is 9090. Loki Data Source In the data source config panel, chose the Loki and set the url to the OAP server address, the default port is 3100. Configure Metric Dashboards Dashboards Settings The following steps are the example of config a General Service dashboard:\n Create a dashboard named General Service. A layer is recommended as a dashboard. Configure variables for the dashboard: After configure, you can select the service/instance/endpoint on the top of the dashboard:   Add Panels The following contents show how to add several typical metrics panels. General settings:\n Chose the metrics and chart. Set Query options --\u0026gt; Min interval = 1m, because the metrics min time bucket in SkyWalking is 1m. Add PromQL expressions, use the variables configured above for the labels then you can select the labels value from top. Note: Some metrics values may be required calculations to match units. Select the returned labels you want to show on panel. Test query and save the panel.  Common Value Metrics  For example service_apdex and Time series chart. Add PromQL expression, the metric scope is Service, so add labels service and layer for match. Set Connect null values --\u0026gt; Always and Show points --\u0026gt; Always because when the query interval \u0026gt; 1hour or 1day SkyWalking return the hour/day step metrics values.   Labeled Value Metrics  For example service_percentile and Time series chart. Add PromQL expressions, the metric scope is Service, add labels service and layer for match. And it\u0026rsquo;s a labeled value metric, add labels='0,1,2,3,4' filter the result label, and addrelabels='P50,P75,P90,P95,P99' rename the result label. Set Connect null values --\u0026gt; Always and Show points --\u0026gt; Always because when the query interval \u0026gt; 1hour or 1day SkyWalking return the hour/day step metrics values.   Sort Metrics  For example service_instance_cpm and Bar gauge chart. Add PromQL expressions, add labels parent_service and layer for match, add top_n='10' and order='DES' filter the result. Set the Calculation --\u0026gt; Latest*.   Sampled Records Same as the Sort Metrics.\nConfigure Log Dashboard Dashboards Settings The following steps are the example of config a log dashboard:\n Create a dashboard named Log. Configure variables for the dashboard:  Please make sure service_instance and endpoint variable enabled Include All option and set Custom all value to * or blank (typed by space button on the keyboard):  Tags variable is a little different from others, for more details, please refer Ad hoc filters:  After configure, you can select log query variables on the top of the dashboard:   Add Log Panel The following steps show how to add a log panel.\n Choose Logs chart. Set the Line limit value (The max number of logs to return in a query) and Order value (Determines the sort order of logs). Add LogQL expressions, use the variables configured above for the labels and searching keyword. service_instance \u0026amp; endpoint variable ref should use raw variable-format-options to prevent it value be escaped. Test query and save the panel.   Preview on demo.skywalking.a.o SkyWalking community provides a preview site for services of General and Service Mesh layers from the demo environment. You could take a glance through Preview metrics on Grafana of the demo deployment.\nNotice, we don\u0026rsquo;t provide all setups due to our monitoring target expanding fast. This demo is for helping you understand the above documents only.\n","excerpt":"Use Grafana As The UI SkyWalking provide PromQL Service since 9.4.0 and LogQL Service since 9.6.0. …","ref":"/docs/main/v9.6.0/en/setup/backend/ui-grafana/","title":"Use Grafana As The UI"},{"body":"Use Grafana As The UI SkyWalking provide PromQL Service since 9.4.0 and LogQL Service since 9.6.0. You can choose Grafana as the SkyWalking UI. About the installation and how to use please refer to the official document.\nNotice \u0026lt;1\u0026gt;, Gafana is AGPL-3.0 license, which is very different from Apache 2.0. Please follow AGPL 3.0 license requirements.\nNotice \u0026lt;2\u0026gt;, SkyWalking always uses its native UI as first class. All visualization features are only available on native UI. Grafana UI is an extension on our support of PromQL APIs. We don\u0026rsquo;t maintain or promise the complete Grafana UI dashboard setup.\nConfigure Data Source Prometheus Data Source In the data source config panel, chose the Prometheus and set the url to the OAP server address, the default port is 9090. SkyWalking Data Source Before you start, please install the SkyWalking data source plugin. In the data source config panel, chose the SkyWalking and set the url to the OAP server graphql service address, the default port is 12800. Loki Data Source In the data source config panel, chose the Loki and set the url to the OAP server address, the default port is 3100. Configure Metric Dashboards Dashboards Settings The following steps are the example of config a General Service dashboard:\n Create a dashboard named General Service. A layer is recommended as a dashboard. Configure variables for the dashboard: After configure, you can select the service/instance/endpoint on the top of the dashboard:   Add Panels The following contents show how to add several typical metrics panels. General settings:\n Chose the metrics and chart. Set Query options --\u0026gt; Min interval = 1m, because the metrics min time bucket in SkyWalking is 1m. Add PromQL expressions, use the variables configured above for the labels then you can select the labels value from top. Note: Some metrics values may be required calculations to match units. Select the returned labels you want to show on panel. Test query and save the panel.  Common Value Metrics  For example service_apdex and Time series chart. Add PromQL expression, the metric scope is Service, so add labels service and layer for match. Set Connect null values --\u0026gt; Always and Show points --\u0026gt; Always because when the query interval \u0026gt; 1hour or 1day SkyWalking return the hour/day step metrics values.   Labeled Value Metrics  For example service_percentile and Time series chart. Add PromQL expressions, the metric scope is Service, add labels service and layer for match. And it\u0026rsquo;s a labeled value metric, add labels='0,1,2,3,4' filter the result label, and addrelabels='P50,P75,P90,P95,P99' rename the result label. Set Connect null values --\u0026gt; Always and Show points --\u0026gt; Always because when the query interval \u0026gt; 1hour or 1day SkyWalking return the hour/day step metrics values.   Sort Metrics  For example service_instance_cpm and Bar gauge chart. Add PromQL expressions, add labels parent_service and layer for match, add top_n='10' and order='DES' filter the result. Set the Calculation --\u0026gt; Latest*.   Sampled Records Same as the Sort Metrics.\nConfigure Topology Dashboards Dashboards Settings For now, SkyWalking support General Service and Service Mesh topology dashboards, the layer is GENERAL and MESH respectively. The following configuration can reuse the above General Service dashboard and add a new variable Plugin_SkyWalking for the dashboard: Add Topology Panel  Chose the Node Graph chart. Set Layer and Service by the variables. If you want to show all services in this layer, set Service empty. Set Node Metrics and Edge Metrics which you want to show on the topology.   Configure Log Dashboard Dashboards Settings The following steps are the example of config a log dashboard:\n Create a dashboard named Log. Configure variables for the dashboard:  Please make sure service_instance and endpoint variable enabled Include All option and set Custom all value to * or blank (typed by space button on the keyboard):  Tags variable is a little different from others, for more details, please refer Ad hoc filters:  After configure, you can select log query variables on the top of the dashboard:   Add Log Panel The following steps show how to add a log panel.\n Choose Logs chart. Set the Line limit value (The max number of logs to return in a query) and Order value (Determines the sort order of logs). Add LogQL expressions, use the variables configured above for the labels and searching keyword. service_instance \u0026amp; endpoint variable ref should use raw variable-format-options to prevent it value be escaped. Test query and save the panel.   Preview on demo.skywalking.a.o SkyWalking community provides a preview site for services of General and Service Mesh layers from the demo environment. You could take a glance through Preview metrics on Grafana of the demo deployment.\nNotice, we don\u0026rsquo;t provide all setups due to our monitoring target expanding fast. This demo is for helping you understand the above documents only.\n","excerpt":"Use Grafana As The UI SkyWalking provide PromQL Service since 9.4.0 and LogQL Service since 9.6.0. …","ref":"/docs/main/v9.7.0/en/setup/backend/ui-grafana/","title":"Use Grafana As The UI"},{"body":"Use Profiling to Fix the Blind Spot of Distributed Tracing  This post introduces a way to automatically profile code in production with Apache SkyWalking. We believe the profile method helps reduce maintenance and overhead while increasing the precision in root cause analysis.\n This post introduces a way to automatically profile code in production with Apache SkyWalking. We believe the profile method helps reduce maintenance and overhead while increasing the precision in root cause analysis.\nLimitations of the Distributed Tracing In the early days, metrics and logging systems were the key solutions in monitoring platforms. With the adoption of microservice and distributed system-based architecture, distributed tracing has become more important. Distributed tracing provides relevant service context, such as system topology map and RPC parent-child relationships.\nSome claim that distributed tracing is the best way to discover the cause of performance issues in a distributed system. It’s good at finding issues at the RPC abstraction, or in the scope of components instrumented with spans. However, it isn’t that perfect.\nHave you been surprised to find a span duration longer than expected, but no insight into why? What should you do next? Some may think that the next step is to add more instrumentation, more spans into the trace, thinking that you would eventually find the root cause, with more data points. We’ll argue this is not a good option within a production environment. Here’s why:\n There is a risk of application overhead and system overload. Ad-hoc spans measure the performance of specific scopes or methods, but picking the right place can be difficult. To identify the precise cause, you can “instrument” (add spans to) many suspicious places. The additional instrumentation costs more CPU and memory in the production environment. Next, ad-hoc instrumentation that didn’t help is often forgotten, not deleted. This creates a valueless overhead load. In the worst case, excess instrumentation can cause performance problems in the production app or overload the tracing system. The process of ad-hoc (manual) instrumentation usually implies at least a restart. Trace instrumentation libraries, like Zipkin Brave, are integrated into many framework libraries. To instrument a method’s performance typically implies changing code, even if only an annotation. This implies a re-deploy. Even if you have the way to do auto instrumentation, like Apache SkyWalking, you still need to change the configuration and reboot the app. Otherwise, you take the risk of GC caused by hot dynamic instrumentation. Injecting instrumentation into an uninstrumented third party library is hard and complex. It takes more time and many won’t know how to do this. Usually, we don’t have code line numbers in the distributed tracing. Particularly when lambdas are in use, it can be difficult to identify the line of code associated with a span. Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.  Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.\nProfiling in Production Introduction To reuse distributed tracing to achieve method scope precision requires an understanding of the above limitations and a different approach. We called it PROFILE.\nMost high-level languages build and run on a thread concept. The profile approach takes continuous thread dumps. We merge the thread dumps to estimate the execution time of every method shown in the thread dumps. The key for distributed tracing is the tracing context, identifiers active (or current) for the profiled method. Using this trace context, we can weave data harvested from profiling into existing traces. This allows the system to automate otherwise ad-hoc instrumentation. Let’s dig deeper into how profiling works:\nWe consider a method invocation with the same stack depth and signature (method, line number etc), the same operation. We derive span timestamps from the thread dumps the same operation is in. Let’s put this visually:\nAbove, represents 10 successive thread dumps. If this method is in dumps 4-8, we assume it started before dump 4 and finished after dump 8. We can’t tell exactly when the method started and stopped. but the timestamps of thread dumps are close enough.\nTo reduce overhead caused by thread dumps, we only profile methods enclosed by a specific entry point, such as a URI or MVC Controller method. We identify these entry points through the trace context and the APM system.\nThe profile does thread dump analysis and gives us:\n The root cause, precise to the line number in the code. Reduced maintenance as ad-hoc instrumentation is obviated. Reduced overload risk caused by ad-hoc instrumentation. Dynamic activation: only when necessary and with a very clear profile target.  Implementing Precise Profiling Distributed profiling is built-into Apache SkyWalking application performance monitoring (APM). Let’s demonstrate how the profiling approach locates the root cause of the performance issue.\nfinal CountDownLatchcountDownLatch= new CountDownLatch(2); threadPool.submit(new Task1(countDownLatch)); threadPool.submit(new Task2(countDownLatch)); try { countDownLatch.await(500, TimeUnit.MILLISECONDS); } catch (InterruptedException) { } Task1 and Task2 have a race condition and unstable execution time: they will impact the performance of each other and anything calling them. While this code looks suspicious, it is representative of real life. People in the OPS/SRE team are not usually aware of all code changes and who did them. They only know something in the new code is causing a problem.\nTo make matters interesting, the above code is not always slow: it only happens when the condition is locked. In SkyWalking APM, we have metrics of endpoint p99/p95 latency, so, we are easy to find out the p99 of this endpoint is far from the avg response time. However, this is not the same as understanding the cause of the latency. To locate the root cause, add a profile condition to this endpoint: duration greater than 500ms. This means faster executions will not add profiling load.\nThis is a typical profiled trace segment (part of the whole distributed trace) shown on the SkyWalking UI. We now notice the “service/processWithThreadPool” span is slow as we expected, but why? This method is the one we added the faulty code to. As the UI shows that method, we know the profiler is working. Now, let’s see what the profile analysis result say.\nThis is the profile analysis stack view. We see the stack element names, duration (include/exclude the children) and slowest methods have been highlighted. It shows clearly, “sun.misc.Unsafe.park” costs the most time. If we look for the caller, it is the code we added: CountDownLatch.await.\nThe Limitations of the Profile Method No diagnostic tool can fit all cases, not even the profile method.\nThe first consideration is mistaking a repeatedly called method for a slow method. Thread dumps are periodic. If there is a loop of calling one method, the profile analysis result would say the target method is slow because it is captured every time in the dump process. There could be another reason. A method called many times can also end up captured in each thread dump. Even so, the profile did what it is designed for. It still helps the OPS/SRE team to locate the code having the issue.\nThe second consideration is overhead, the impact of repeated thread dumps is real and can’t be ignored. In SkyWalking, we set the profile dump period to at least 10ms. This means we can’t locate method performance issues if they complete in less than 10ms. SkyWalking has a threshold to control the maximum parallel degree as well.\nThe third consideration is profiling wouldn\u0026rsquo;t work for a low latency trace. Because the trace could be completed before profiling starts. But in reality, this is not an issue, profiling targets slow requests.\nUnderstanding the above keeps distributed tracing and APM systems useful for your OPS/SRE team.\nSupported Agents This feature was first implemented in Java agent since 7.0. The Python agent supported this since 0.7.0. Read this for more details\n","excerpt":"Use Profiling to Fix the Blind Spot of Distributed Tracing  This post introduces a way to …","ref":"/docs/main/latest/en/concepts-and-designs/sdk-profiling/","title":"Use Profiling to Fix the Blind Spot of Distributed Tracing"},{"body":"Use Profiling to Fix the Blind Spot of Distributed Tracing  This post introduces a way to automatically profile code in production with Apache SkyWalking. We believe the profile method helps reduce maintenance and overhead while increasing the precision in root cause analysis.\n This post introduces a way to automatically profile code in production with Apache SkyWalking. We believe the profile method helps reduce maintenance and overhead while increasing the precision in root cause analysis.\nLimitations of the Distributed Tracing In the early days, metrics and logging systems were the key solutions in monitoring platforms. With the adoption of microservice and distributed system-based architecture, distributed tracing has become more important. Distributed tracing provides relevant service context, such as system topology map and RPC parent-child relationships.\nSome claim that distributed tracing is the best way to discover the cause of performance issues in a distributed system. It’s good at finding issues at the RPC abstraction, or in the scope of components instrumented with spans. However, it isn’t that perfect.\nHave you been surprised to find a span duration longer than expected, but no insight into why? What should you do next? Some may think that the next step is to add more instrumentation, more spans into the trace, thinking that you would eventually find the root cause, with more data points. We’ll argue this is not a good option within a production environment. Here’s why:\n There is a risk of application overhead and system overload. Ad-hoc spans measure the performance of specific scopes or methods, but picking the right place can be difficult. To identify the precise cause, you can “instrument” (add spans to) many suspicious places. The additional instrumentation costs more CPU and memory in the production environment. Next, ad-hoc instrumentation that didn’t help is often forgotten, not deleted. This creates a valueless overhead load. In the worst case, excess instrumentation can cause performance problems in the production app or overload the tracing system. The process of ad-hoc (manual) instrumentation usually implies at least a restart. Trace instrumentation libraries, like Zipkin Brave, are integrated into many framework libraries. To instrument a method’s performance typically implies changing code, even if only an annotation. This implies a re-deploy. Even if you have the way to do auto instrumentation, like Apache SkyWalking, you still need to change the configuration and reboot the app. Otherwise, you take the risk of GC caused by hot dynamic instrumentation. Injecting instrumentation into an uninstrumented third party library is hard and complex. It takes more time and many won’t know how to do this. Usually, we don’t have code line numbers in the distributed tracing. Particularly when lambdas are in use, it can be difficult to identify the line of code associated with a span. Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.  Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.\nProfiling in Production Introduction To reuse distributed tracing to achieve method scope precision requires an understanding of the above limitations and a different approach. We called it PROFILE.\nMost high-level languages build and run on a thread concept. The profile approach takes continuous thread dumps. We merge the thread dumps to estimate the execution time of every method shown in the thread dumps. The key for distributed tracing is the tracing context, identifiers active (or current) for the profiled method. Using this trace context, we can weave data harvested from profiling into existing traces. This allows the system to automate otherwise ad-hoc instrumentation. Let’s dig deeper into how profiling works:\nWe consider a method invocation with the same stack depth and signature (method, line number etc), the same operation. We derive span timestamps from the thread dumps the same operation is in. Let’s put this visually:\nAbove, represents 10 successive thread dumps. If this method is in dumps 4-8, we assume it started before dump 4 and finished after dump 8. We can’t tell exactly when the method started and stopped. but the timestamps of thread dumps are close enough.\nTo reduce overhead caused by thread dumps, we only profile methods enclosed by a specific entry point, such as a URI or MVC Controller method. We identify these entry points through the trace context and the APM system.\nThe profile does thread dump analysis and gives us:\n The root cause, precise to the line number in the code. Reduced maintenance as ad-hoc instrumentation is obviated. Reduced overload risk caused by ad-hoc instrumentation. Dynamic activation: only when necessary and with a very clear profile target.  Implementing Precise Profiling Distributed profiling is built-into Apache SkyWalking application performance monitoring (APM). Let’s demonstrate how the profiling approach locates the root cause of the performance issue.\nfinal CountDownLatchcountDownLatch= new CountDownLatch(2); threadPool.submit(new Task1(countDownLatch)); threadPool.submit(new Task2(countDownLatch)); try { countDownLatch.await(500, TimeUnit.MILLISECONDS); } catch (InterruptedException) { } Task1 and Task2 have a race condition and unstable execution time: they will impact the performance of each other and anything calling them. While this code looks suspicious, it is representative of real life. People in the OPS/SRE team are not usually aware of all code changes and who did them. They only know something in the new code is causing a problem.\nTo make matters interesting, the above code is not always slow: it only happens when the condition is locked. In SkyWalking APM, we have metrics of endpoint p99/p95 latency, so, we are easy to find out the p99 of this endpoint is far from the avg response time. However, this is not the same as understanding the cause of the latency. To locate the root cause, add a profile condition to this endpoint: duration greater than 500ms. This means faster executions will not add profiling load.\nThis is a typical profiled trace segment (part of the whole distributed trace) shown on the SkyWalking UI. We now notice the “service/processWithThreadPool” span is slow as we expected, but why? This method is the one we added the faulty code to. As the UI shows that method, we know the profiler is working. Now, let’s see what the profile analysis result say.\nThis is the profile analysis stack view. We see the stack element names, duration (include/exclude the children) and slowest methods have been highlighted. It shows clearly, “sun.misc.Unsafe.park” costs the most time. If we look for the caller, it is the code we added: CountDownLatch.await.\nThe Limitations of the Profile Method No diagnostic tool can fit all cases, not even the profile method.\nThe first consideration is mistaking a repeatedly called method for a slow method. Thread dumps are periodic. If there is a loop of calling one method, the profile analysis result would say the target method is slow because it is captured every time in the dump process. There could be another reason. A method called many times can also end up captured in each thread dump. Even so, the profile did what it is designed for. It still helps the OPS/SRE team to locate the code having the issue.\nThe second consideration is overhead, the impact of repeated thread dumps is real and can’t be ignored. In SkyWalking, we set the profile dump period to at least 10ms. This means we can’t locate method performance issues if they complete in less than 10ms. SkyWalking has a threshold to control the maximum parallel degree as well.\nThe third consideration is profiling wouldn\u0026rsquo;t work for a low latency trace. Because the trace could be completed before profiling starts. But in reality, this is not an issue, profiling targets slow requests.\nUnderstanding the above keeps distributed tracing and APM systems useful for your OPS/SRE team.\nSupported Agents This feature was first implemented in Java agent since 7.0. The Python agent supported this since 0.7.0. Read this for more details\n","excerpt":"Use Profiling to Fix the Blind Spot of Distributed Tracing  This post introduces a way to …","ref":"/docs/main/next/en/concepts-and-designs/sdk-profiling/","title":"Use Profiling to Fix the Blind Spot of Distributed Tracing"},{"body":"Use Profiling to Fix the Blind Spot of Distributed Tracing  This post introduces a way to automatically profile code in production with Apache SkyWalking. We believe the profile method helps reduce maintenance and overhead while increasing the precision in root cause analysis.\n This post introduces a way to automatically profile code in production with Apache SkyWalking. We believe the profile method helps reduce maintenance and overhead while increasing the precision in root cause analysis.\nLimitations of the Distributed Tracing In the early days, metrics and logging systems were the key solutions in monitoring platforms. With the adoption of microservice and distributed system-based architecture, distributed tracing has become more important. Distributed tracing provides relevant service context, such as system topology map and RPC parent-child relationships.\nSome claim that distributed tracing is the best way to discover the cause of performance issues in a distributed system. It’s good at finding issues at the RPC abstraction, or in the scope of components instrumented with spans. However, it isn’t that perfect.\nHave you been surprised to find a span duration longer than expected, but no insight into why? What should you do next? Some may think that the next step is to add more instrumentation, more spans into the trace, thinking that you would eventually find the root cause, with more data points. We’ll argue this is not a good option within a production environment. Here’s why:\n There is a risk of application overhead and system overload. Ad-hoc spans measure the performance of specific scopes or methods, but picking the right place can be difficult. To identify the precise cause, you can “instrument” (add spans to) many suspicious places. The additional instrumentation costs more CPU and memory in the production environment. Next, ad-hoc instrumentation that didn’t help is often forgotten, not deleted. This creates a valueless overhead load. In the worst case, excess instrumentation can cause performance problems in the production app or overload the tracing system. The process of ad-hoc (manual) instrumentation usually implies at least a restart. Trace instrumentation libraries, like Zipkin Brave, are integrated into many framework libraries. To instrument a method’s performance typically implies changing code, even if only an annotation. This implies a re-deploy. Even if you have the way to do auto instrumentation, like Apache SkyWalking, you still need to change the configuration and reboot the app. Otherwise, you take the risk of GC caused by hot dynamic instrumentation. Injecting instrumentation into an uninstrumented third party library is hard and complex. It takes more time and many won’t know how to do this. Usually, we don’t have code line numbers in the distributed tracing. Particularly when lambdas are in use, it can be difficult to identify the line of code associated with a span. Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.  Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.\nProfiling in Production Introduction To reuse distributed tracing to achieve method scope precision requires an understanding of the above limitations and a different approach. We called it PROFILE.\nMost high-level languages build and run on a thread concept. The profile approach takes continuous thread dumps. We merge the thread dumps to estimate the execution time of every method shown in the thread dumps. The key for distributed tracing is the tracing context, identifiers active (or current) for the profiled method. Using this trace context, we can weave data harvested from profiling into existing traces. This allows the system to automate otherwise ad-hoc instrumentation. Let’s dig deeper into how profiling works:\nWe consider a method invocation with the same stack depth and signature (method, line number etc), the same operation. We derive span timestamps from the thread dumps the same operation is in. Let’s put this visually:\nAbove, represents 10 successive thread dumps. If this method is in dumps 4-8, we assume it started before dump 4 and finished after dump 8. We can’t tell exactly when the method started and stopped. but the timestamps of thread dumps are close enough.\nTo reduce overhead caused by thread dumps, we only profile methods enclosed by a specific entry point, such as a URI or MVC Controller method. We identify these entry points through the trace context and the APM system.\nThe profile does thread dump analysis and gives us:\n The root cause, precise to the line number in the code. Reduced maintenance as ad-hoc instrumentation is obviated. Reduced overload risk caused by ad-hoc instrumentation. Dynamic activation: only when necessary and with a very clear profile target.  Implementing Precise Profiling Distributed profiling is built-into Apache SkyWalking application performance monitoring (APM). Let’s demonstrate how the profiling approach locates the root cause of the performance issue.\nfinal CountDownLatchcountDownLatch= new CountDownLatch(2); threadPool.submit(new Task1(countDownLatch)); threadPool.submit(new Task2(countDownLatch)); try { countDownLatch.await(500, TimeUnit.MILLISECONDS); } catch (InterruptedExceptione) { } Task1 and Task2 have a race condition and unstable execution time: they will impact the performance of each other and anything calling them. While this code looks suspicious, it is representative of real life. People in the OPS/SRE team are not usually aware of all code changes and who did them. They only know something in the new code is causing a problem.\nTo make matters interesting, the above code is not always slow: it only happens when the condition is locked. In SkyWalking APM, we have metrics of endpoint p99/p95 latency, so, we are easy to find out the p99 of this endpoint is far from the avg response time. However, this is not the same as understanding the cause of the latency. To locate the root cause, add a profile condition to this endpoint: duration greater than 500ms. This means faster executions will not add profiling load.\nThis is a typical profiled trace segment (part of the whole distributed trace) shown on the SkyWalking UI. We now notice the “service/processWithThreadPool” span is slow as we expected, but why? This method is the one we added the faulty code to. As the UI shows that method, we know the profiler is working. Now, let’s see what the profile analysis result say.\nThis is the profile analysis stack view. We see the stack element names, duration (include/exclude the children) and slowest methods have been highlighted. It shows clearly, “sun.misc.Unsafe.park” costs the most time. If we look for the caller, it is the code we added: CountDownLatch.await.\nThe Limitations of the Profile Method No diagnostic tool can fit all cases, not even the profile method.\nThe first consideration is mistaking a repeatedly called method for a slow method. Thread dumps are periodic. If there is a loop of calling one method, the profile analysis result would say the target method is slow because it is captured every time in the dump process. There could be another reason. A method called many times can also end up captured in each thread dump. Even so, the profile did what it is designed for. It still helps the OPS/SRE team to locate the code having the issue.\nThe second consideration is overhead, the impact of repeated thread dumps is real and can’t be ignored. In SkyWalking, we set the profile dump period to at least 10ms. This means we can’t locate method performance issues if they complete in less than 10ms. SkyWalking has a threshold to control the maximum parallel degree as well.\nThe third consideration is profiling wouldn\u0026rsquo;t work for a low latency trace. Because the trace could be completed before profiling starts. But in reality, this is not an issue, profiling targets slow requests.\nUnderstanding the above keeps distributed tracing and APM systems useful for your OPS/SRE team.\nSupported Agents This feature was first implemented in Java agent since 7.0. The Python agent supported this since 0.7.0. Read this for more details\n","excerpt":"Use Profiling to Fix the Blind Spot of Distributed Tracing  This post introduces a way to …","ref":"/docs/main/v9.0.0/en/concepts-and-designs/sdk-profiling/","title":"Use Profiling to Fix the Blind Spot of Distributed Tracing"},{"body":"Use Profiling to Fix the Blind Spot of Distributed Tracing  This post introduces a way to automatically profile code in production with Apache SkyWalking. We believe the profile method helps reduce maintenance and overhead while increasing the precision in root cause analysis.\n This post introduces a way to automatically profile code in production with Apache SkyWalking. We believe the profile method helps reduce maintenance and overhead while increasing the precision in root cause analysis.\nLimitations of the Distributed Tracing In the early days, metrics and logging systems were the key solutions in monitoring platforms. With the adoption of microservice and distributed system-based architecture, distributed tracing has become more important. Distributed tracing provides relevant service context, such as system topology map and RPC parent-child relationships.\nSome claim that distributed tracing is the best way to discover the cause of performance issues in a distributed system. It’s good at finding issues at the RPC abstraction, or in the scope of components instrumented with spans. However, it isn’t that perfect.\nHave you been surprised to find a span duration longer than expected, but no insight into why? What should you do next? Some may think that the next step is to add more instrumentation, more spans into the trace, thinking that you would eventually find the root cause, with more data points. We’ll argue this is not a good option within a production environment. Here’s why:\n There is a risk of application overhead and system overload. Ad-hoc spans measure the performance of specific scopes or methods, but picking the right place can be difficult. To identify the precise cause, you can “instrument” (add spans to) many suspicious places. The additional instrumentation costs more CPU and memory in the production environment. Next, ad-hoc instrumentation that didn’t help is often forgotten, not deleted. This creates a valueless overhead load. In the worst case, excess instrumentation can cause performance problems in the production app or overload the tracing system. The process of ad-hoc (manual) instrumentation usually implies at least a restart. Trace instrumentation libraries, like Zipkin Brave, are integrated into many framework libraries. To instrument a method’s performance typically implies changing code, even if only an annotation. This implies a re-deploy. Even if you have the way to do auto instrumentation, like Apache SkyWalking, you still need to change the configuration and reboot the app. Otherwise, you take the risk of GC caused by hot dynamic instrumentation. Injecting instrumentation into an uninstrumented third party library is hard and complex. It takes more time and many won’t know how to do this. Usually, we don’t have code line numbers in the distributed tracing. Particularly when lambdas are in use, it can be difficult to identify the line of code associated with a span. Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.  Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.\nProfiling in Production Introduction To reuse distributed tracing to achieve method scope precision requires an understanding of the above limitations and a different approach. We called it PROFILE.\nMost high-level languages build and run on a thread concept. The profile approach takes continuous thread dumps. We merge the thread dumps to estimate the execution time of every method shown in the thread dumps. The key for distributed tracing is the tracing context, identifiers active (or current) for the profiled method. Using this trace context, we can weave data harvested from profiling into existing traces. This allows the system to automate otherwise ad-hoc instrumentation. Let’s dig deeper into how profiling works:\nWe consider a method invocation with the same stack depth and signature (method, line number etc), the same operation. We derive span timestamps from the thread dumps the same operation is in. Let’s put this visually:\nAbove, represents 10 successive thread dumps. If this method is in dumps 4-8, we assume it started before dump 4 and finished after dump 8. We can’t tell exactly when the method started and stopped. but the timestamps of thread dumps are close enough.\nTo reduce overhead caused by thread dumps, we only profile methods enclosed by a specific entry point, such as a URI or MVC Controller method. We identify these entry points through the trace context and the APM system.\nThe profile does thread dump analysis and gives us:\n The root cause, precise to the line number in the code. Reduced maintenance as ad-hoc instrumentation is obviated. Reduced overload risk caused by ad-hoc instrumentation. Dynamic activation: only when necessary and with a very clear profile target.  Implementing Precise Profiling Distributed profiling is built-into Apache SkyWalking application performance monitoring (APM). Let’s demonstrate how the profiling approach locates the root cause of the performance issue.\nfinal CountDownLatchcountDownLatch= new CountDownLatch(2); threadPool.submit(new Task1(countDownLatch)); threadPool.submit(new Task2(countDownLatch)); try { countDownLatch.await(500, TimeUnit.MILLISECONDS); } catch (InterruptedExceptione) { } Task1 and Task2 have a race condition and unstable execution time: they will impact the performance of each other and anything calling them. While this code looks suspicious, it is representative of real life. People in the OPS/SRE team are not usually aware of all code changes and who did them. They only know something in the new code is causing a problem.\nTo make matters interesting, the above code is not always slow: it only happens when the condition is locked. In SkyWalking APM, we have metrics of endpoint p99/p95 latency, so, we are easy to find out the p99 of this endpoint is far from the avg response time. However, this is not the same as understanding the cause of the latency. To locate the root cause, add a profile condition to this endpoint: duration greater than 500ms. This means faster executions will not add profiling load.\nThis is a typical profiled trace segment (part of the whole distributed trace) shown on the SkyWalking UI. We now notice the “service/processWithThreadPool” span is slow as we expected, but why? This method is the one we added the faulty code to. As the UI shows that method, we know the profiler is working. Now, let’s see what the profile analysis result say.\nThis is the profile analysis stack view. We see the stack element names, duration (include/exclude the children) and slowest methods have been highlighted. It shows clearly, “sun.misc.Unsafe.park” costs the most time. If we look for the caller, it is the code we added: CountDownLatch.await.\nThe Limitations of the Profile Method No diagnostic tool can fit all cases, not even the profile method.\nThe first consideration is mistaking a repeatedly called method for a slow method. Thread dumps are periodic. If there is a loop of calling one method, the profile analysis result would say the target method is slow because it is captured every time in the dump process. There could be another reason. A method called many times can also end up captured in each thread dump. Even so, the profile did what it is designed for. It still helps the OPS/SRE team to locate the code having the issue.\nThe second consideration is overhead, the impact of repeated thread dumps is real and can’t be ignored. In SkyWalking, we set the profile dump period to at least 10ms. This means we can’t locate method performance issues if they complete in less than 10ms. SkyWalking has a threshold to control the maximum parallel degree as well.\nThe third consideration is profiling wouldn\u0026rsquo;t work for a low latency trace. Because the trace could be completed before profiling starts. But in reality, this is not an issue, profiling targets slow requests.\nUnderstanding the above keeps distributed tracing and APM systems useful for your OPS/SRE team.\nSupported Agents This feature was first implemented in Java agent since 7.0. The Python agent supported this since 0.7.0. Read this for more details\n","excerpt":"Use Profiling to Fix the Blind Spot of Distributed Tracing  This post introduces a way to …","ref":"/docs/main/v9.1.0/en/concepts-and-designs/sdk-profiling/","title":"Use Profiling to Fix the Blind Spot of Distributed Tracing"},{"body":"Use Profiling to Fix the Blind Spot of Distributed Tracing  This post introduces a way to automatically profile code in production with Apache SkyWalking. We believe the profile method helps reduce maintenance and overhead while increasing the precision in root cause analysis.\n This post introduces a way to automatically profile code in production with Apache SkyWalking. We believe the profile method helps reduce maintenance and overhead while increasing the precision in root cause analysis.\nLimitations of the Distributed Tracing In the early days, metrics and logging systems were the key solutions in monitoring platforms. With the adoption of microservice and distributed system-based architecture, distributed tracing has become more important. Distributed tracing provides relevant service context, such as system topology map and RPC parent-child relationships.\nSome claim that distributed tracing is the best way to discover the cause of performance issues in a distributed system. It’s good at finding issues at the RPC abstraction, or in the scope of components instrumented with spans. However, it isn’t that perfect.\nHave you been surprised to find a span duration longer than expected, but no insight into why? What should you do next? Some may think that the next step is to add more instrumentation, more spans into the trace, thinking that you would eventually find the root cause, with more data points. We’ll argue this is not a good option within a production environment. Here’s why:\n There is a risk of application overhead and system overload. Ad-hoc spans measure the performance of specific scopes or methods, but picking the right place can be difficult. To identify the precise cause, you can “instrument” (add spans to) many suspicious places. The additional instrumentation costs more CPU and memory in the production environment. Next, ad-hoc instrumentation that didn’t help is often forgotten, not deleted. This creates a valueless overhead load. In the worst case, excess instrumentation can cause performance problems in the production app or overload the tracing system. The process of ad-hoc (manual) instrumentation usually implies at least a restart. Trace instrumentation libraries, like Zipkin Brave, are integrated into many framework libraries. To instrument a method’s performance typically implies changing code, even if only an annotation. This implies a re-deploy. Even if you have the way to do auto instrumentation, like Apache SkyWalking, you still need to change the configuration and reboot the app. Otherwise, you take the risk of GC caused by hot dynamic instrumentation. Injecting instrumentation into an uninstrumented third party library is hard and complex. It takes more time and many won’t know how to do this. Usually, we don’t have code line numbers in the distributed tracing. Particularly when lambdas are in use, it can be difficult to identify the line of code associated with a span. Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.  Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.\nProfiling in Production Introduction To reuse distributed tracing to achieve method scope precision requires an understanding of the above limitations and a different approach. We called it PROFILE.\nMost high-level languages build and run on a thread concept. The profile approach takes continuous thread dumps. We merge the thread dumps to estimate the execution time of every method shown in the thread dumps. The key for distributed tracing is the tracing context, identifiers active (or current) for the profiled method. Using this trace context, we can weave data harvested from profiling into existing traces. This allows the system to automate otherwise ad-hoc instrumentation. Let’s dig deeper into how profiling works:\nWe consider a method invocation with the same stack depth and signature (method, line number etc), the same operation. We derive span timestamps from the thread dumps the same operation is in. Let’s put this visually:\nAbove, represents 10 successive thread dumps. If this method is in dumps 4-8, we assume it started before dump 4 and finished after dump 8. We can’t tell exactly when the method started and stopped. but the timestamps of thread dumps are close enough.\nTo reduce overhead caused by thread dumps, we only profile methods enclosed by a specific entry point, such as a URI or MVC Controller method. We identify these entry points through the trace context and the APM system.\nThe profile does thread dump analysis and gives us:\n The root cause, precise to the line number in the code. Reduced maintenance as ad-hoc instrumentation is obviated. Reduced overload risk caused by ad-hoc instrumentation. Dynamic activation: only when necessary and with a very clear profile target.  Implementing Precise Profiling Distributed profiling is built-into Apache SkyWalking application performance monitoring (APM). Let’s demonstrate how the profiling approach locates the root cause of the performance issue.\nfinal CountDownLatchcountDownLatch= new CountDownLatch(2); threadPool.submit(new Task1(countDownLatch)); threadPool.submit(new Task2(countDownLatch)); try { countDownLatch.await(500, TimeUnit.MILLISECONDS); } catch (InterruptedException) { } Task1 and Task2 have a race condition and unstable execution time: they will impact the performance of each other and anything calling them. While this code looks suspicious, it is representative of real life. People in the OPS/SRE team are not usually aware of all code changes and who did them. They only know something in the new code is causing a problem.\nTo make matters interesting, the above code is not always slow: it only happens when the condition is locked. In SkyWalking APM, we have metrics of endpoint p99/p95 latency, so, we are easy to find out the p99 of this endpoint is far from the avg response time. However, this is not the same as understanding the cause of the latency. To locate the root cause, add a profile condition to this endpoint: duration greater than 500ms. This means faster executions will not add profiling load.\nThis is a typical profiled trace segment (part of the whole distributed trace) shown on the SkyWalking UI. We now notice the “service/processWithThreadPool” span is slow as we expected, but why? This method is the one we added the faulty code to. As the UI shows that method, we know the profiler is working. Now, let’s see what the profile analysis result say.\nThis is the profile analysis stack view. We see the stack element names, duration (include/exclude the children) and slowest methods have been highlighted. It shows clearly, “sun.misc.Unsafe.park” costs the most time. If we look for the caller, it is the code we added: CountDownLatch.await.\nThe Limitations of the Profile Method No diagnostic tool can fit all cases, not even the profile method.\nThe first consideration is mistaking a repeatedly called method for a slow method. Thread dumps are periodic. If there is a loop of calling one method, the profile analysis result would say the target method is slow because it is captured every time in the dump process. There could be another reason. A method called many times can also end up captured in each thread dump. Even so, the profile did what it is designed for. It still helps the OPS/SRE team to locate the code having the issue.\nThe second consideration is overhead, the impact of repeated thread dumps is real and can’t be ignored. In SkyWalking, we set the profile dump period to at least 10ms. This means we can’t locate method performance issues if they complete in less than 10ms. SkyWalking has a threshold to control the maximum parallel degree as well.\nThe third consideration is profiling wouldn\u0026rsquo;t work for a low latency trace. Because the trace could be completed before profiling starts. But in reality, this is not an issue, profiling targets slow requests.\nUnderstanding the above keeps distributed tracing and APM systems useful for your OPS/SRE team.\nSupported Agents This feature was first implemented in Java agent since 7.0. The Python agent supported this since 0.7.0. Read this for more details\n","excerpt":"Use Profiling to Fix the Blind Spot of Distributed Tracing  This post introduces a way to …","ref":"/docs/main/v9.2.0/en/concepts-and-designs/sdk-profiling/","title":"Use Profiling to Fix the Blind Spot of Distributed Tracing"},{"body":"Use Profiling to Fix the Blind Spot of Distributed Tracing  This post introduces a way to automatically profile code in production with Apache SkyWalking. We believe the profile method helps reduce maintenance and overhead while increasing the precision in root cause analysis.\n This post introduces a way to automatically profile code in production with Apache SkyWalking. We believe the profile method helps reduce maintenance and overhead while increasing the precision in root cause analysis.\nLimitations of the Distributed Tracing In the early days, metrics and logging systems were the key solutions in monitoring platforms. With the adoption of microservice and distributed system-based architecture, distributed tracing has become more important. Distributed tracing provides relevant service context, such as system topology map and RPC parent-child relationships.\nSome claim that distributed tracing is the best way to discover the cause of performance issues in a distributed system. It’s good at finding issues at the RPC abstraction, or in the scope of components instrumented with spans. However, it isn’t that perfect.\nHave you been surprised to find a span duration longer than expected, but no insight into why? What should you do next? Some may think that the next step is to add more instrumentation, more spans into the trace, thinking that you would eventually find the root cause, with more data points. We’ll argue this is not a good option within a production environment. Here’s why:\n There is a risk of application overhead and system overload. Ad-hoc spans measure the performance of specific scopes or methods, but picking the right place can be difficult. To identify the precise cause, you can “instrument” (add spans to) many suspicious places. The additional instrumentation costs more CPU and memory in the production environment. Next, ad-hoc instrumentation that didn’t help is often forgotten, not deleted. This creates a valueless overhead load. In the worst case, excess instrumentation can cause performance problems in the production app or overload the tracing system. The process of ad-hoc (manual) instrumentation usually implies at least a restart. Trace instrumentation libraries, like Zipkin Brave, are integrated into many framework libraries. To instrument a method’s performance typically implies changing code, even if only an annotation. This implies a re-deploy. Even if you have the way to do auto instrumentation, like Apache SkyWalking, you still need to change the configuration and reboot the app. Otherwise, you take the risk of GC caused by hot dynamic instrumentation. Injecting instrumentation into an uninstrumented third party library is hard and complex. It takes more time and many won’t know how to do this. Usually, we don’t have code line numbers in the distributed tracing. Particularly when lambdas are in use, it can be difficult to identify the line of code associated with a span. Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.  Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.\nProfiling in Production Introduction To reuse distributed tracing to achieve method scope precision requires an understanding of the above limitations and a different approach. We called it PROFILE.\nMost high-level languages build and run on a thread concept. The profile approach takes continuous thread dumps. We merge the thread dumps to estimate the execution time of every method shown in the thread dumps. The key for distributed tracing is the tracing context, identifiers active (or current) for the profiled method. Using this trace context, we can weave data harvested from profiling into existing traces. This allows the system to automate otherwise ad-hoc instrumentation. Let’s dig deeper into how profiling works:\nWe consider a method invocation with the same stack depth and signature (method, line number etc), the same operation. We derive span timestamps from the thread dumps the same operation is in. Let’s put this visually:\nAbove, represents 10 successive thread dumps. If this method is in dumps 4-8, we assume it started before dump 4 and finished after dump 8. We can’t tell exactly when the method started and stopped. but the timestamps of thread dumps are close enough.\nTo reduce overhead caused by thread dumps, we only profile methods enclosed by a specific entry point, such as a URI or MVC Controller method. We identify these entry points through the trace context and the APM system.\nThe profile does thread dump analysis and gives us:\n The root cause, precise to the line number in the code. Reduced maintenance as ad-hoc instrumentation is obviated. Reduced overload risk caused by ad-hoc instrumentation. Dynamic activation: only when necessary and with a very clear profile target.  Implementing Precise Profiling Distributed profiling is built-into Apache SkyWalking application performance monitoring (APM). Let’s demonstrate how the profiling approach locates the root cause of the performance issue.\nfinal CountDownLatchcountDownLatch= new CountDownLatch(2); threadPool.submit(new Task1(countDownLatch)); threadPool.submit(new Task2(countDownLatch)); try { countDownLatch.await(500, TimeUnit.MILLISECONDS); } catch (InterruptedException) { } Task1 and Task2 have a race condition and unstable execution time: they will impact the performance of each other and anything calling them. While this code looks suspicious, it is representative of real life. People in the OPS/SRE team are not usually aware of all code changes and who did them. They only know something in the new code is causing a problem.\nTo make matters interesting, the above code is not always slow: it only happens when the condition is locked. In SkyWalking APM, we have metrics of endpoint p99/p95 latency, so, we are easy to find out the p99 of this endpoint is far from the avg response time. However, this is not the same as understanding the cause of the latency. To locate the root cause, add a profile condition to this endpoint: duration greater than 500ms. This means faster executions will not add profiling load.\nThis is a typical profiled trace segment (part of the whole distributed trace) shown on the SkyWalking UI. We now notice the “service/processWithThreadPool” span is slow as we expected, but why? This method is the one we added the faulty code to. As the UI shows that method, we know the profiler is working. Now, let’s see what the profile analysis result say.\nThis is the profile analysis stack view. We see the stack element names, duration (include/exclude the children) and slowest methods have been highlighted. It shows clearly, “sun.misc.Unsafe.park” costs the most time. If we look for the caller, it is the code we added: CountDownLatch.await.\nThe Limitations of the Profile Method No diagnostic tool can fit all cases, not even the profile method.\nThe first consideration is mistaking a repeatedly called method for a slow method. Thread dumps are periodic. If there is a loop of calling one method, the profile analysis result would say the target method is slow because it is captured every time in the dump process. There could be another reason. A method called many times can also end up captured in each thread dump. Even so, the profile did what it is designed for. It still helps the OPS/SRE team to locate the code having the issue.\nThe second consideration is overhead, the impact of repeated thread dumps is real and can’t be ignored. In SkyWalking, we set the profile dump period to at least 10ms. This means we can’t locate method performance issues if they complete in less than 10ms. SkyWalking has a threshold to control the maximum parallel degree as well.\nThe third consideration is profiling wouldn\u0026rsquo;t work for a low latency trace. Because the trace could be completed before profiling starts. But in reality, this is not an issue, profiling targets slow requests.\nUnderstanding the above keeps distributed tracing and APM systems useful for your OPS/SRE team.\nSupported Agents This feature was first implemented in Java agent since 7.0. The Python agent supported this since 0.7.0. Read this for more details\n","excerpt":"Use Profiling to Fix the Blind Spot of Distributed Tracing  This post introduces a way to …","ref":"/docs/main/v9.3.0/en/concepts-and-designs/sdk-profiling/","title":"Use Profiling to Fix the Blind Spot of Distributed Tracing"},{"body":"Use Profiling to Fix the Blind Spot of Distributed Tracing  This post introduces a way to automatically profile code in production with Apache SkyWalking. We believe the profile method helps reduce maintenance and overhead while increasing the precision in root cause analysis.\n This post introduces a way to automatically profile code in production with Apache SkyWalking. We believe the profile method helps reduce maintenance and overhead while increasing the precision in root cause analysis.\nLimitations of the Distributed Tracing In the early days, metrics and logging systems were the key solutions in monitoring platforms. With the adoption of microservice and distributed system-based architecture, distributed tracing has become more important. Distributed tracing provides relevant service context, such as system topology map and RPC parent-child relationships.\nSome claim that distributed tracing is the best way to discover the cause of performance issues in a distributed system. It’s good at finding issues at the RPC abstraction, or in the scope of components instrumented with spans. However, it isn’t that perfect.\nHave you been surprised to find a span duration longer than expected, but no insight into why? What should you do next? Some may think that the next step is to add more instrumentation, more spans into the trace, thinking that you would eventually find the root cause, with more data points. We’ll argue this is not a good option within a production environment. Here’s why:\n There is a risk of application overhead and system overload. Ad-hoc spans measure the performance of specific scopes or methods, but picking the right place can be difficult. To identify the precise cause, you can “instrument” (add spans to) many suspicious places. The additional instrumentation costs more CPU and memory in the production environment. Next, ad-hoc instrumentation that didn’t help is often forgotten, not deleted. This creates a valueless overhead load. In the worst case, excess instrumentation can cause performance problems in the production app or overload the tracing system. The process of ad-hoc (manual) instrumentation usually implies at least a restart. Trace instrumentation libraries, like Zipkin Brave, are integrated into many framework libraries. To instrument a method’s performance typically implies changing code, even if only an annotation. This implies a re-deploy. Even if you have the way to do auto instrumentation, like Apache SkyWalking, you still need to change the configuration and reboot the app. Otherwise, you take the risk of GC caused by hot dynamic instrumentation. Injecting instrumentation into an uninstrumented third party library is hard and complex. It takes more time and many won’t know how to do this. Usually, we don’t have code line numbers in the distributed tracing. Particularly when lambdas are in use, it can be difficult to identify the line of code associated with a span. Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.  Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.\nProfiling in Production Introduction To reuse distributed tracing to achieve method scope precision requires an understanding of the above limitations and a different approach. We called it PROFILE.\nMost high-level languages build and run on a thread concept. The profile approach takes continuous thread dumps. We merge the thread dumps to estimate the execution time of every method shown in the thread dumps. The key for distributed tracing is the tracing context, identifiers active (or current) for the profiled method. Using this trace context, we can weave data harvested from profiling into existing traces. This allows the system to automate otherwise ad-hoc instrumentation. Let’s dig deeper into how profiling works:\nWe consider a method invocation with the same stack depth and signature (method, line number etc), the same operation. We derive span timestamps from the thread dumps the same operation is in. Let’s put this visually:\nAbove, represents 10 successive thread dumps. If this method is in dumps 4-8, we assume it started before dump 4 and finished after dump 8. We can’t tell exactly when the method started and stopped. but the timestamps of thread dumps are close enough.\nTo reduce overhead caused by thread dumps, we only profile methods enclosed by a specific entry point, such as a URI or MVC Controller method. We identify these entry points through the trace context and the APM system.\nThe profile does thread dump analysis and gives us:\n The root cause, precise to the line number in the code. Reduced maintenance as ad-hoc instrumentation is obviated. Reduced overload risk caused by ad-hoc instrumentation. Dynamic activation: only when necessary and with a very clear profile target.  Implementing Precise Profiling Distributed profiling is built-into Apache SkyWalking application performance monitoring (APM). Let’s demonstrate how the profiling approach locates the root cause of the performance issue.\nfinal CountDownLatchcountDownLatch= new CountDownLatch(2); threadPool.submit(new Task1(countDownLatch)); threadPool.submit(new Task2(countDownLatch)); try { countDownLatch.await(500, TimeUnit.MILLISECONDS); } catch (InterruptedException) { } Task1 and Task2 have a race condition and unstable execution time: they will impact the performance of each other and anything calling them. While this code looks suspicious, it is representative of real life. People in the OPS/SRE team are not usually aware of all code changes and who did them. They only know something in the new code is causing a problem.\nTo make matters interesting, the above code is not always slow: it only happens when the condition is locked. In SkyWalking APM, we have metrics of endpoint p99/p95 latency, so, we are easy to find out the p99 of this endpoint is far from the avg response time. However, this is not the same as understanding the cause of the latency. To locate the root cause, add a profile condition to this endpoint: duration greater than 500ms. This means faster executions will not add profiling load.\nThis is a typical profiled trace segment (part of the whole distributed trace) shown on the SkyWalking UI. We now notice the “service/processWithThreadPool” span is slow as we expected, but why? This method is the one we added the faulty code to. As the UI shows that method, we know the profiler is working. Now, let’s see what the profile analysis result say.\nThis is the profile analysis stack view. We see the stack element names, duration (include/exclude the children) and slowest methods have been highlighted. It shows clearly, “sun.misc.Unsafe.park” costs the most time. If we look for the caller, it is the code we added: CountDownLatch.await.\nThe Limitations of the Profile Method No diagnostic tool can fit all cases, not even the profile method.\nThe first consideration is mistaking a repeatedly called method for a slow method. Thread dumps are periodic. If there is a loop of calling one method, the profile analysis result would say the target method is slow because it is captured every time in the dump process. There could be another reason. A method called many times can also end up captured in each thread dump. Even so, the profile did what it is designed for. It still helps the OPS/SRE team to locate the code having the issue.\nThe second consideration is overhead, the impact of repeated thread dumps is real and can’t be ignored. In SkyWalking, we set the profile dump period to at least 10ms. This means we can’t locate method performance issues if they complete in less than 10ms. SkyWalking has a threshold to control the maximum parallel degree as well.\nThe third consideration is profiling wouldn\u0026rsquo;t work for a low latency trace. Because the trace could be completed before profiling starts. But in reality, this is not an issue, profiling targets slow requests.\nUnderstanding the above keeps distributed tracing and APM systems useful for your OPS/SRE team.\nSupported Agents This feature was first implemented in Java agent since 7.0. The Python agent supported this since 0.7.0. Read this for more details\n","excerpt":"Use Profiling to Fix the Blind Spot of Distributed Tracing  This post introduces a way to …","ref":"/docs/main/v9.4.0/en/concepts-and-designs/sdk-profiling/","title":"Use Profiling to Fix the Blind Spot of Distributed Tracing"},{"body":"Use Profiling to Fix the Blind Spot of Distributed Tracing  This post introduces a way to automatically profile code in production with Apache SkyWalking. We believe the profile method helps reduce maintenance and overhead while increasing the precision in root cause analysis.\n This post introduces a way to automatically profile code in production with Apache SkyWalking. We believe the profile method helps reduce maintenance and overhead while increasing the precision in root cause analysis.\nLimitations of the Distributed Tracing In the early days, metrics and logging systems were the key solutions in monitoring platforms. With the adoption of microservice and distributed system-based architecture, distributed tracing has become more important. Distributed tracing provides relevant service context, such as system topology map and RPC parent-child relationships.\nSome claim that distributed tracing is the best way to discover the cause of performance issues in a distributed system. It’s good at finding issues at the RPC abstraction, or in the scope of components instrumented with spans. However, it isn’t that perfect.\nHave you been surprised to find a span duration longer than expected, but no insight into why? What should you do next? Some may think that the next step is to add more instrumentation, more spans into the trace, thinking that you would eventually find the root cause, with more data points. We’ll argue this is not a good option within a production environment. Here’s why:\n There is a risk of application overhead and system overload. Ad-hoc spans measure the performance of specific scopes or methods, but picking the right place can be difficult. To identify the precise cause, you can “instrument” (add spans to) many suspicious places. The additional instrumentation costs more CPU and memory in the production environment. Next, ad-hoc instrumentation that didn’t help is often forgotten, not deleted. This creates a valueless overhead load. In the worst case, excess instrumentation can cause performance problems in the production app or overload the tracing system. The process of ad-hoc (manual) instrumentation usually implies at least a restart. Trace instrumentation libraries, like Zipkin Brave, are integrated into many framework libraries. To instrument a method’s performance typically implies changing code, even if only an annotation. This implies a re-deploy. Even if you have the way to do auto instrumentation, like Apache SkyWalking, you still need to change the configuration and reboot the app. Otherwise, you take the risk of GC caused by hot dynamic instrumentation. Injecting instrumentation into an uninstrumented third party library is hard and complex. It takes more time and many won’t know how to do this. Usually, we don’t have code line numbers in the distributed tracing. Particularly when lambdas are in use, it can be difficult to identify the line of code associated with a span. Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.  Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.\nProfiling in Production Introduction To reuse distributed tracing to achieve method scope precision requires an understanding of the above limitations and a different approach. We called it PROFILE.\nMost high-level languages build and run on a thread concept. The profile approach takes continuous thread dumps. We merge the thread dumps to estimate the execution time of every method shown in the thread dumps. The key for distributed tracing is the tracing context, identifiers active (or current) for the profiled method. Using this trace context, we can weave data harvested from profiling into existing traces. This allows the system to automate otherwise ad-hoc instrumentation. Let’s dig deeper into how profiling works:\nWe consider a method invocation with the same stack depth and signature (method, line number etc), the same operation. We derive span timestamps from the thread dumps the same operation is in. Let’s put this visually:\nAbove, represents 10 successive thread dumps. If this method is in dumps 4-8, we assume it started before dump 4 and finished after dump 8. We can’t tell exactly when the method started and stopped. but the timestamps of thread dumps are close enough.\nTo reduce overhead caused by thread dumps, we only profile methods enclosed by a specific entry point, such as a URI or MVC Controller method. We identify these entry points through the trace context and the APM system.\nThe profile does thread dump analysis and gives us:\n The root cause, precise to the line number in the code. Reduced maintenance as ad-hoc instrumentation is obviated. Reduced overload risk caused by ad-hoc instrumentation. Dynamic activation: only when necessary and with a very clear profile target.  Implementing Precise Profiling Distributed profiling is built-into Apache SkyWalking application performance monitoring (APM). Let’s demonstrate how the profiling approach locates the root cause of the performance issue.\nfinal CountDownLatchcountDownLatch= new CountDownLatch(2); threadPool.submit(new Task1(countDownLatch)); threadPool.submit(new Task2(countDownLatch)); try { countDownLatch.await(500, TimeUnit.MILLISECONDS); } catch (InterruptedException) { } Task1 and Task2 have a race condition and unstable execution time: they will impact the performance of each other and anything calling them. While this code looks suspicious, it is representative of real life. People in the OPS/SRE team are not usually aware of all code changes and who did them. They only know something in the new code is causing a problem.\nTo make matters interesting, the above code is not always slow: it only happens when the condition is locked. In SkyWalking APM, we have metrics of endpoint p99/p95 latency, so, we are easy to find out the p99 of this endpoint is far from the avg response time. However, this is not the same as understanding the cause of the latency. To locate the root cause, add a profile condition to this endpoint: duration greater than 500ms. This means faster executions will not add profiling load.\nThis is a typical profiled trace segment (part of the whole distributed trace) shown on the SkyWalking UI. We now notice the “service/processWithThreadPool” span is slow as we expected, but why? This method is the one we added the faulty code to. As the UI shows that method, we know the profiler is working. Now, let’s see what the profile analysis result say.\nThis is the profile analysis stack view. We see the stack element names, duration (include/exclude the children) and slowest methods have been highlighted. It shows clearly, “sun.misc.Unsafe.park” costs the most time. If we look for the caller, it is the code we added: CountDownLatch.await.\nThe Limitations of the Profile Method No diagnostic tool can fit all cases, not even the profile method.\nThe first consideration is mistaking a repeatedly called method for a slow method. Thread dumps are periodic. If there is a loop of calling one method, the profile analysis result would say the target method is slow because it is captured every time in the dump process. There could be another reason. A method called many times can also end up captured in each thread dump. Even so, the profile did what it is designed for. It still helps the OPS/SRE team to locate the code having the issue.\nThe second consideration is overhead, the impact of repeated thread dumps is real and can’t be ignored. In SkyWalking, we set the profile dump period to at least 10ms. This means we can’t locate method performance issues if they complete in less than 10ms. SkyWalking has a threshold to control the maximum parallel degree as well.\nThe third consideration is profiling wouldn\u0026rsquo;t work for a low latency trace. Because the trace could be completed before profiling starts. But in reality, this is not an issue, profiling targets slow requests.\nUnderstanding the above keeps distributed tracing and APM systems useful for your OPS/SRE team.\nSupported Agents This feature was first implemented in Java agent since 7.0. The Python agent supported this since 0.7.0. Read this for more details\n","excerpt":"Use Profiling to Fix the Blind Spot of Distributed Tracing  This post introduces a way to …","ref":"/docs/main/v9.5.0/en/concepts-and-designs/sdk-profiling/","title":"Use Profiling to Fix the Blind Spot of Distributed Tracing"},{"body":"Use Profiling to Fix the Blind Spot of Distributed Tracing  This post introduces a way to automatically profile code in production with Apache SkyWalking. We believe the profile method helps reduce maintenance and overhead while increasing the precision in root cause analysis.\n This post introduces a way to automatically profile code in production with Apache SkyWalking. We believe the profile method helps reduce maintenance and overhead while increasing the precision in root cause analysis.\nLimitations of the Distributed Tracing In the early days, metrics and logging systems were the key solutions in monitoring platforms. With the adoption of microservice and distributed system-based architecture, distributed tracing has become more important. Distributed tracing provides relevant service context, such as system topology map and RPC parent-child relationships.\nSome claim that distributed tracing is the best way to discover the cause of performance issues in a distributed system. It’s good at finding issues at the RPC abstraction, or in the scope of components instrumented with spans. However, it isn’t that perfect.\nHave you been surprised to find a span duration longer than expected, but no insight into why? What should you do next? Some may think that the next step is to add more instrumentation, more spans into the trace, thinking that you would eventually find the root cause, with more data points. We’ll argue this is not a good option within a production environment. Here’s why:\n There is a risk of application overhead and system overload. Ad-hoc spans measure the performance of specific scopes or methods, but picking the right place can be difficult. To identify the precise cause, you can “instrument” (add spans to) many suspicious places. The additional instrumentation costs more CPU and memory in the production environment. Next, ad-hoc instrumentation that didn’t help is often forgotten, not deleted. This creates a valueless overhead load. In the worst case, excess instrumentation can cause performance problems in the production app or overload the tracing system. The process of ad-hoc (manual) instrumentation usually implies at least a restart. Trace instrumentation libraries, like Zipkin Brave, are integrated into many framework libraries. To instrument a method’s performance typically implies changing code, even if only an annotation. This implies a re-deploy. Even if you have the way to do auto instrumentation, like Apache SkyWalking, you still need to change the configuration and reboot the app. Otherwise, you take the risk of GC caused by hot dynamic instrumentation. Injecting instrumentation into an uninstrumented third party library is hard and complex. It takes more time and many won’t know how to do this. Usually, we don’t have code line numbers in the distributed tracing. Particularly when lambdas are in use, it can be difficult to identify the line of code associated with a span. Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.  Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.\nProfiling in Production Introduction To reuse distributed tracing to achieve method scope precision requires an understanding of the above limitations and a different approach. We called it PROFILE.\nMost high-level languages build and run on a thread concept. The profile approach takes continuous thread dumps. We merge the thread dumps to estimate the execution time of every method shown in the thread dumps. The key for distributed tracing is the tracing context, identifiers active (or current) for the profiled method. Using this trace context, we can weave data harvested from profiling into existing traces. This allows the system to automate otherwise ad-hoc instrumentation. Let’s dig deeper into how profiling works:\nWe consider a method invocation with the same stack depth and signature (method, line number etc), the same operation. We derive span timestamps from the thread dumps the same operation is in. Let’s put this visually:\nAbove, represents 10 successive thread dumps. If this method is in dumps 4-8, we assume it started before dump 4 and finished after dump 8. We can’t tell exactly when the method started and stopped. but the timestamps of thread dumps are close enough.\nTo reduce overhead caused by thread dumps, we only profile methods enclosed by a specific entry point, such as a URI or MVC Controller method. We identify these entry points through the trace context and the APM system.\nThe profile does thread dump analysis and gives us:\n The root cause, precise to the line number in the code. Reduced maintenance as ad-hoc instrumentation is obviated. Reduced overload risk caused by ad-hoc instrumentation. Dynamic activation: only when necessary and with a very clear profile target.  Implementing Precise Profiling Distributed profiling is built-into Apache SkyWalking application performance monitoring (APM). Let’s demonstrate how the profiling approach locates the root cause of the performance issue.\nfinal CountDownLatchcountDownLatch= new CountDownLatch(2); threadPool.submit(new Task1(countDownLatch)); threadPool.submit(new Task2(countDownLatch)); try { countDownLatch.await(500, TimeUnit.MILLISECONDS); } catch (InterruptedException) { } Task1 and Task2 have a race condition and unstable execution time: they will impact the performance of each other and anything calling them. While this code looks suspicious, it is representative of real life. People in the OPS/SRE team are not usually aware of all code changes and who did them. They only know something in the new code is causing a problem.\nTo make matters interesting, the above code is not always slow: it only happens when the condition is locked. In SkyWalking APM, we have metrics of endpoint p99/p95 latency, so, we are easy to find out the p99 of this endpoint is far from the avg response time. However, this is not the same as understanding the cause of the latency. To locate the root cause, add a profile condition to this endpoint: duration greater than 500ms. This means faster executions will not add profiling load.\nThis is a typical profiled trace segment (part of the whole distributed trace) shown on the SkyWalking UI. We now notice the “service/processWithThreadPool” span is slow as we expected, but why? This method is the one we added the faulty code to. As the UI shows that method, we know the profiler is working. Now, let’s see what the profile analysis result say.\nThis is the profile analysis stack view. We see the stack element names, duration (include/exclude the children) and slowest methods have been highlighted. It shows clearly, “sun.misc.Unsafe.park” costs the most time. If we look for the caller, it is the code we added: CountDownLatch.await.\nThe Limitations of the Profile Method No diagnostic tool can fit all cases, not even the profile method.\nThe first consideration is mistaking a repeatedly called method for a slow method. Thread dumps are periodic. If there is a loop of calling one method, the profile analysis result would say the target method is slow because it is captured every time in the dump process. There could be another reason. A method called many times can also end up captured in each thread dump. Even so, the profile did what it is designed for. It still helps the OPS/SRE team to locate the code having the issue.\nThe second consideration is overhead, the impact of repeated thread dumps is real and can’t be ignored. In SkyWalking, we set the profile dump period to at least 10ms. This means we can’t locate method performance issues if they complete in less than 10ms. SkyWalking has a threshold to control the maximum parallel degree as well.\nThe third consideration is profiling wouldn\u0026rsquo;t work for a low latency trace. Because the trace could be completed before profiling starts. But in reality, this is not an issue, profiling targets slow requests.\nUnderstanding the above keeps distributed tracing and APM systems useful for your OPS/SRE team.\nSupported Agents This feature was first implemented in Java agent since 7.0. The Python agent supported this since 0.7.0. Read this for more details\n","excerpt":"Use Profiling to Fix the Blind Spot of Distributed Tracing  This post introduces a way to …","ref":"/docs/main/v9.6.0/en/concepts-and-designs/sdk-profiling/","title":"Use Profiling to Fix the Blind Spot of Distributed Tracing"},{"body":"Use Profiling to Fix the Blind Spot of Distributed Tracing  This post introduces a way to automatically profile code in production with Apache SkyWalking. We believe the profile method helps reduce maintenance and overhead while increasing the precision in root cause analysis.\n This post introduces a way to automatically profile code in production with Apache SkyWalking. We believe the profile method helps reduce maintenance and overhead while increasing the precision in root cause analysis.\nLimitations of the Distributed Tracing In the early days, metrics and logging systems were the key solutions in monitoring platforms. With the adoption of microservice and distributed system-based architecture, distributed tracing has become more important. Distributed tracing provides relevant service context, such as system topology map and RPC parent-child relationships.\nSome claim that distributed tracing is the best way to discover the cause of performance issues in a distributed system. It’s good at finding issues at the RPC abstraction, or in the scope of components instrumented with spans. However, it isn’t that perfect.\nHave you been surprised to find a span duration longer than expected, but no insight into why? What should you do next? Some may think that the next step is to add more instrumentation, more spans into the trace, thinking that you would eventually find the root cause, with more data points. We’ll argue this is not a good option within a production environment. Here’s why:\n There is a risk of application overhead and system overload. Ad-hoc spans measure the performance of specific scopes or methods, but picking the right place can be difficult. To identify the precise cause, you can “instrument” (add spans to) many suspicious places. The additional instrumentation costs more CPU and memory in the production environment. Next, ad-hoc instrumentation that didn’t help is often forgotten, not deleted. This creates a valueless overhead load. In the worst case, excess instrumentation can cause performance problems in the production app or overload the tracing system. The process of ad-hoc (manual) instrumentation usually implies at least a restart. Trace instrumentation libraries, like Zipkin Brave, are integrated into many framework libraries. To instrument a method’s performance typically implies changing code, even if only an annotation. This implies a re-deploy. Even if you have the way to do auto instrumentation, like Apache SkyWalking, you still need to change the configuration and reboot the app. Otherwise, you take the risk of GC caused by hot dynamic instrumentation. Injecting instrumentation into an uninstrumented third party library is hard and complex. It takes more time and many won’t know how to do this. Usually, we don’t have code line numbers in the distributed tracing. Particularly when lambdas are in use, it can be difficult to identify the line of code associated with a span. Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.  Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.\nProfiling in Production Introduction To reuse distributed tracing to achieve method scope precision requires an understanding of the above limitations and a different approach. We called it PROFILE.\nMost high-level languages build and run on a thread concept. The profile approach takes continuous thread dumps. We merge the thread dumps to estimate the execution time of every method shown in the thread dumps. The key for distributed tracing is the tracing context, identifiers active (or current) for the profiled method. Using this trace context, we can weave data harvested from profiling into existing traces. This allows the system to automate otherwise ad-hoc instrumentation. Let’s dig deeper into how profiling works:\nWe consider a method invocation with the same stack depth and signature (method, line number etc), the same operation. We derive span timestamps from the thread dumps the same operation is in. Let’s put this visually:\nAbove, represents 10 successive thread dumps. If this method is in dumps 4-8, we assume it started before dump 4 and finished after dump 8. We can’t tell exactly when the method started and stopped. but the timestamps of thread dumps are close enough.\nTo reduce overhead caused by thread dumps, we only profile methods enclosed by a specific entry point, such as a URI or MVC Controller method. We identify these entry points through the trace context and the APM system.\nThe profile does thread dump analysis and gives us:\n The root cause, precise to the line number in the code. Reduced maintenance as ad-hoc instrumentation is obviated. Reduced overload risk caused by ad-hoc instrumentation. Dynamic activation: only when necessary and with a very clear profile target.  Implementing Precise Profiling Distributed profiling is built-into Apache SkyWalking application performance monitoring (APM). Let’s demonstrate how the profiling approach locates the root cause of the performance issue.\nfinal CountDownLatchcountDownLatch= new CountDownLatch(2); threadPool.submit(new Task1(countDownLatch)); threadPool.submit(new Task2(countDownLatch)); try { countDownLatch.await(500, TimeUnit.MILLISECONDS); } catch (InterruptedException) { } Task1 and Task2 have a race condition and unstable execution time: they will impact the performance of each other and anything calling them. While this code looks suspicious, it is representative of real life. People in the OPS/SRE team are not usually aware of all code changes and who did them. They only know something in the new code is causing a problem.\nTo make matters interesting, the above code is not always slow: it only happens when the condition is locked. In SkyWalking APM, we have metrics of endpoint p99/p95 latency, so, we are easy to find out the p99 of this endpoint is far from the avg response time. However, this is not the same as understanding the cause of the latency. To locate the root cause, add a profile condition to this endpoint: duration greater than 500ms. This means faster executions will not add profiling load.\nThis is a typical profiled trace segment (part of the whole distributed trace) shown on the SkyWalking UI. We now notice the “service/processWithThreadPool” span is slow as we expected, but why? This method is the one we added the faulty code to. As the UI shows that method, we know the profiler is working. Now, let’s see what the profile analysis result say.\nThis is the profile analysis stack view. We see the stack element names, duration (include/exclude the children) and slowest methods have been highlighted. It shows clearly, “sun.misc.Unsafe.park” costs the most time. If we look for the caller, it is the code we added: CountDownLatch.await.\nThe Limitations of the Profile Method No diagnostic tool can fit all cases, not even the profile method.\nThe first consideration is mistaking a repeatedly called method for a slow method. Thread dumps are periodic. If there is a loop of calling one method, the profile analysis result would say the target method is slow because it is captured every time in the dump process. There could be another reason. A method called many times can also end up captured in each thread dump. Even so, the profile did what it is designed for. It still helps the OPS/SRE team to locate the code having the issue.\nThe second consideration is overhead, the impact of repeated thread dumps is real and can’t be ignored. In SkyWalking, we set the profile dump period to at least 10ms. This means we can’t locate method performance issues if they complete in less than 10ms. SkyWalking has a threshold to control the maximum parallel degree as well.\nThe third consideration is profiling wouldn\u0026rsquo;t work for a low latency trace. Because the trace could be completed before profiling starts. But in reality, this is not an issue, profiling targets slow requests.\nUnderstanding the above keeps distributed tracing and APM systems useful for your OPS/SRE team.\nSupported Agents This feature was first implemented in Java agent since 7.0. The Python agent supported this since 0.7.0. Read this for more details\n","excerpt":"Use Profiling to Fix the Blind Spot of Distributed Tracing  This post introduces a way to …","ref":"/docs/main/v9.7.0/en/concepts-and-designs/sdk-profiling/","title":"Use Profiling to Fix the Blind Spot of Distributed Tracing"},{"body":"Our Users  Various companies and organizations use SkyWalking for research, production and commercial products.                                                                                                                                                         Users are encouraged to add themselves to this page. Send a pull request to add your company or organization information [here].   ","excerpt":"Our Users  Various companies and organizations use SkyWalking for research, production and …","ref":"/users/","title":"Users"},{"body":"V6 upgrade SkyWalking v6 is widely used in many production environments. Follow the steps in the guide below to learn how to upgrade to a new release.\nNOTE: The ways to upgrade are not limited to the steps below.\nUse Canary Release Like all applications, you may upgrade SkyWalking using the canary release method through the following steps.\n Deploy a new cluster by using the latest version of SkyWalking OAP cluster with the new database cluster. Once the target service (i.e. the service being monitored) has upgraded the agent.jar (or simply by rebooting), have collector.backend_service pointing to the new OAP backend, and use/add a new namespace(agent.namespace in Table of Agent Configuration Properties). The namespace will prevent conflicts from arising between different versions. When all target services have been rebooted, the old OAP clusters could be discarded.  The Canary Release method works for any version upgrades.\nOnline Hot Reboot Upgrade The reason we require Canary Release is that the SkyWalking agent has cache mechanisms, and switching to a new cluster causes the cache to become unavailable for new OAP clusters. In version 6.5.0+ (especially for agent versions), we have Agent hot reboot trigger mechanism. This streamlines the upgrade process as we deploy a new cluster by using the latest version of SkyWalking OAP cluster with the new database cluster, and shift the traffic to the new cluster once and for all. Based on the mechanism, all agents will enter the cool_down mode, and come back online. For more details, see the backend setup documentation.\nNOTE: A known bug in 6.4.0 is that its agent may have re-connection issues; therefore, even though this bot reboot mechanism has been included in 6.4.0, it may not work under some network scenarios, especially in Kubernetes.\nAgent Compatibility All versions of SkyWalking 6.x (and even 7.x) are compatible with each other, so users could simply upgrade the OAP servers. As the agent has also been enhanced in the latest versions, according to the SkyWalking team\u0026rsquo;s recommendation, upgrade the agent as soon as practicable.\n","excerpt":"V6 upgrade SkyWalking v6 is widely used in many production environments. Follow the steps in the …","ref":"/docs/main/latest/en/faq/v6-version-upgrade/","title":"V6 upgrade"},{"body":"V6 upgrade SkyWalking v6 is widely used in many production environments. Follow the steps in the guide below to learn how to upgrade to a new release.\nNOTE: The ways to upgrade are not limited to the steps below.\nUse Canary Release Like all applications, you may upgrade SkyWalking using the canary release method through the following steps.\n Deploy a new cluster by using the latest version of SkyWalking OAP cluster with the new database cluster. Once the target service (i.e. the service being monitored) has upgraded the agent.jar (or simply by rebooting), have collector.backend_service pointing to the new OAP backend, and use/add a new namespace(agent.namespace in Table of Agent Configuration Properties). The namespace will prevent conflicts from arising between different versions. When all target services have been rebooted, the old OAP clusters could be discarded.  The Canary Release method works for any version upgrades.\nOnline Hot Reboot Upgrade The reason we require Canary Release is that the SkyWalking agent has cache mechanisms, and switching to a new cluster causes the cache to become unavailable for new OAP clusters. In version 6.5.0+ (especially for agent versions), we have Agent hot reboot trigger mechanism. This streamlines the upgrade process as we deploy a new cluster by using the latest version of SkyWalking OAP cluster with the new database cluster, and shift the traffic to the new cluster once and for all. Based on the mechanism, all agents will enter the cool_down mode, and come back online. For more details, see the backend setup documentation.\nNOTE: A known bug in 6.4.0 is that its agent may have re-connection issues; therefore, even though this bot reboot mechanism has been included in 6.4.0, it may not work under some network scenarios, especially in Kubernetes.\nAgent Compatibility All versions of SkyWalking 6.x (and even 7.x) are compatible with each other, so users could simply upgrade the OAP servers. As the agent has also been enhanced in the latest versions, according to the SkyWalking team\u0026rsquo;s recommendation, upgrade the agent as soon as practicable.\n","excerpt":"V6 upgrade SkyWalking v6 is widely used in many production environments. Follow the steps in the …","ref":"/docs/main/next/en/faq/v6-version-upgrade/","title":"V6 upgrade"},{"body":"V6 upgrade SkyWalking v6 is widely used in many production environments. Follow the steps in the guide below to learn how to upgrade to a new release.\nNOTE: The ways to upgrade are not limited to the steps below.\nUse Canary Release Like all applications, you may upgrade SkyWalking using the canary release method through the following steps.\n Deploy a new cluster by using the latest version of SkyWalking OAP cluster with the new database cluster. Once the target service (i.e. the service being monitored) has upgraded the agent.jar (or simply by rebooting), have collector.backend_service pointing to the new OAP backend, and use/add a new namespace(agent.namespace in Table of Agent Configuration Properties). The namespace will prevent conflicts from arising between different versions. When all target services have been rebooted, the old OAP clusters could be discarded.  The Canary Release method works for any version upgrades.\nOnline Hot Reboot Upgrade The reason we require Canary Release is that the SkyWalking agent has cache mechanisms, and switching to a new cluster causes the cache to become unavailable for new OAP clusters. In version 6.5.0+ (especially for agent versions), we have Agent hot reboot trigger mechanism. This streamlines the upgrade process as we deploy a new cluster by using the latest version of SkyWalking OAP cluster with the new database cluster, and shift the traffic to the new cluster once and for all. Based on the mechanism, all agents will enter the cool_down mode, and come back online. For more details, see the backend setup documentation.\nNOTE: A known bug in 6.4.0 is that its agent may have re-connection issues; therefore, even though this bot reboot mechanism has been included in 6.4.0, it may not work under some network scenarios, especially in Kubernetes.\nAgent Compatibility All versions of SkyWalking 6.x (and even 7.x) are compatible with each other, so users could simply upgrade the OAP servers. As the agent has also been enhanced in the latest versions, according to the SkyWalking team\u0026rsquo;s recommendation, upgrade the agent as soon as practicable.\n","excerpt":"V6 upgrade SkyWalking v6 is widely used in many production environments. Follow the steps in the …","ref":"/docs/main/v9.0.0/en/faq/v6-version-upgrade/","title":"V6 upgrade"},{"body":"V6 upgrade SkyWalking v6 is widely used in many production environments. Follow the steps in the guide below to learn how to upgrade to a new release.\nNOTE: The ways to upgrade are not limited to the steps below.\nUse Canary Release Like all applications, you may upgrade SkyWalking using the canary release method through the following steps.\n Deploy a new cluster by using the latest version of SkyWalking OAP cluster with the new database cluster. Once the target service (i.e. the service being monitored) has upgraded the agent.jar (or simply by rebooting), have collector.backend_service pointing to the new OAP backend, and use/add a new namespace(agent.namespace in Table of Agent Configuration Properties). The namespace will prevent conflicts from arising between different versions. When all target services have been rebooted, the old OAP clusters could be discarded.  The Canary Release method works for any version upgrades.\nOnline Hot Reboot Upgrade The reason we require Canary Release is that the SkyWalking agent has cache mechanisms, and switching to a new cluster causes the cache to become unavailable for new OAP clusters. In version 6.5.0+ (especially for agent versions), we have Agent hot reboot trigger mechanism. This streamlines the upgrade process as we deploy a new cluster by using the latest version of SkyWalking OAP cluster with the new database cluster, and shift the traffic to the new cluster once and for all. Based on the mechanism, all agents will enter the cool_down mode, and come back online. For more details, see the backend setup documentation.\nNOTE: A known bug in 6.4.0 is that its agent may have re-connection issues; therefore, even though this bot reboot mechanism has been included in 6.4.0, it may not work under some network scenarios, especially in Kubernetes.\nAgent Compatibility All versions of SkyWalking 6.x (and even 7.x) are compatible with each other, so users could simply upgrade the OAP servers. As the agent has also been enhanced in the latest versions, according to the SkyWalking team\u0026rsquo;s recommendation, upgrade the agent as soon as practicable.\n","excerpt":"V6 upgrade SkyWalking v6 is widely used in many production environments. Follow the steps in the …","ref":"/docs/main/v9.1.0/en/faq/v6-version-upgrade/","title":"V6 upgrade"},{"body":"V6 upgrade SkyWalking v6 is widely used in many production environments. Follow the steps in the guide below to learn how to upgrade to a new release.\nNOTE: The ways to upgrade are not limited to the steps below.\nUse Canary Release Like all applications, you may upgrade SkyWalking using the canary release method through the following steps.\n Deploy a new cluster by using the latest version of SkyWalking OAP cluster with the new database cluster. Once the target service (i.e. the service being monitored) has upgraded the agent.jar (or simply by rebooting), have collector.backend_service pointing to the new OAP backend, and use/add a new namespace(agent.namespace in Table of Agent Configuration Properties). The namespace will prevent conflicts from arising between different versions. When all target services have been rebooted, the old OAP clusters could be discarded.  The Canary Release method works for any version upgrades.\nOnline Hot Reboot Upgrade The reason we require Canary Release is that the SkyWalking agent has cache mechanisms, and switching to a new cluster causes the cache to become unavailable for new OAP clusters. In version 6.5.0+ (especially for agent versions), we have Agent hot reboot trigger mechanism. This streamlines the upgrade process as we deploy a new cluster by using the latest version of SkyWalking OAP cluster with the new database cluster, and shift the traffic to the new cluster once and for all. Based on the mechanism, all agents will enter the cool_down mode, and come back online. For more details, see the backend setup documentation.\nNOTE: A known bug in 6.4.0 is that its agent may have re-connection issues; therefore, even though this bot reboot mechanism has been included in 6.4.0, it may not work under some network scenarios, especially in Kubernetes.\nAgent Compatibility All versions of SkyWalking 6.x (and even 7.x) are compatible with each other, so users could simply upgrade the OAP servers. As the agent has also been enhanced in the latest versions, according to the SkyWalking team\u0026rsquo;s recommendation, upgrade the agent as soon as practicable.\n","excerpt":"V6 upgrade SkyWalking v6 is widely used in many production environments. Follow the steps in the …","ref":"/docs/main/v9.2.0/en/faq/v6-version-upgrade/","title":"V6 upgrade"},{"body":"V6 upgrade SkyWalking v6 is widely used in many production environments. Follow the steps in the guide below to learn how to upgrade to a new release.\nNOTE: The ways to upgrade are not limited to the steps below.\nUse Canary Release Like all applications, you may upgrade SkyWalking using the canary release method through the following steps.\n Deploy a new cluster by using the latest version of SkyWalking OAP cluster with the new database cluster. Once the target service (i.e. the service being monitored) has upgraded the agent.jar (or simply by rebooting), have collector.backend_service pointing to the new OAP backend, and use/add a new namespace(agent.namespace in Table of Agent Configuration Properties). The namespace will prevent conflicts from arising between different versions. When all target services have been rebooted, the old OAP clusters could be discarded.  The Canary Release method works for any version upgrades.\nOnline Hot Reboot Upgrade The reason we require Canary Release is that the SkyWalking agent has cache mechanisms, and switching to a new cluster causes the cache to become unavailable for new OAP clusters. In version 6.5.0+ (especially for agent versions), we have Agent hot reboot trigger mechanism. This streamlines the upgrade process as we deploy a new cluster by using the latest version of SkyWalking OAP cluster with the new database cluster, and shift the traffic to the new cluster once and for all. Based on the mechanism, all agents will enter the cool_down mode, and come back online. For more details, see the backend setup documentation.\nNOTE: A known bug in 6.4.0 is that its agent may have re-connection issues; therefore, even though this bot reboot mechanism has been included in 6.4.0, it may not work under some network scenarios, especially in Kubernetes.\nAgent Compatibility All versions of SkyWalking 6.x (and even 7.x) are compatible with each other, so users could simply upgrade the OAP servers. As the agent has also been enhanced in the latest versions, according to the SkyWalking team\u0026rsquo;s recommendation, upgrade the agent as soon as practicable.\n","excerpt":"V6 upgrade SkyWalking v6 is widely used in many production environments. Follow the steps in the …","ref":"/docs/main/v9.3.0/en/faq/v6-version-upgrade/","title":"V6 upgrade"},{"body":"V6 upgrade SkyWalking v6 is widely used in many production environments. Follow the steps in the guide below to learn how to upgrade to a new release.\nNOTE: The ways to upgrade are not limited to the steps below.\nUse Canary Release Like all applications, you may upgrade SkyWalking using the canary release method through the following steps.\n Deploy a new cluster by using the latest version of SkyWalking OAP cluster with the new database cluster. Once the target service (i.e. the service being monitored) has upgraded the agent.jar (or simply by rebooting), have collector.backend_service pointing to the new OAP backend, and use/add a new namespace(agent.namespace in Table of Agent Configuration Properties). The namespace will prevent conflicts from arising between different versions. When all target services have been rebooted, the old OAP clusters could be discarded.  The Canary Release method works for any version upgrades.\nOnline Hot Reboot Upgrade The reason we require Canary Release is that the SkyWalking agent has cache mechanisms, and switching to a new cluster causes the cache to become unavailable for new OAP clusters. In version 6.5.0+ (especially for agent versions), we have Agent hot reboot trigger mechanism. This streamlines the upgrade process as we deploy a new cluster by using the latest version of SkyWalking OAP cluster with the new database cluster, and shift the traffic to the new cluster once and for all. Based on the mechanism, all agents will enter the cool_down mode, and come back online. For more details, see the backend setup documentation.\nNOTE: A known bug in 6.4.0 is that its agent may have re-connection issues; therefore, even though this bot reboot mechanism has been included in 6.4.0, it may not work under some network scenarios, especially in Kubernetes.\nAgent Compatibility All versions of SkyWalking 6.x (and even 7.x) are compatible with each other, so users could simply upgrade the OAP servers. As the agent has also been enhanced in the latest versions, according to the SkyWalking team\u0026rsquo;s recommendation, upgrade the agent as soon as practicable.\n","excerpt":"V6 upgrade SkyWalking v6 is widely used in many production environments. Follow the steps in the …","ref":"/docs/main/v9.4.0/en/faq/v6-version-upgrade/","title":"V6 upgrade"},{"body":"V6 upgrade SkyWalking v6 is widely used in many production environments. Follow the steps in the guide below to learn how to upgrade to a new release.\nNOTE: The ways to upgrade are not limited to the steps below.\nUse Canary Release Like all applications, you may upgrade SkyWalking using the canary release method through the following steps.\n Deploy a new cluster by using the latest version of SkyWalking OAP cluster with the new database cluster. Once the target service (i.e. the service being monitored) has upgraded the agent.jar (or simply by rebooting), have collector.backend_service pointing to the new OAP backend, and use/add a new namespace(agent.namespace in Table of Agent Configuration Properties). The namespace will prevent conflicts from arising between different versions. When all target services have been rebooted, the old OAP clusters could be discarded.  The Canary Release method works for any version upgrades.\nOnline Hot Reboot Upgrade The reason we require Canary Release is that the SkyWalking agent has cache mechanisms, and switching to a new cluster causes the cache to become unavailable for new OAP clusters. In version 6.5.0+ (especially for agent versions), we have Agent hot reboot trigger mechanism. This streamlines the upgrade process as we deploy a new cluster by using the latest version of SkyWalking OAP cluster with the new database cluster, and shift the traffic to the new cluster once and for all. Based on the mechanism, all agents will enter the cool_down mode, and come back online. For more details, see the backend setup documentation.\nNOTE: A known bug in 6.4.0 is that its agent may have re-connection issues; therefore, even though this bot reboot mechanism has been included in 6.4.0, it may not work under some network scenarios, especially in Kubernetes.\nAgent Compatibility All versions of SkyWalking 6.x (and even 7.x) are compatible with each other, so users could simply upgrade the OAP servers. As the agent has also been enhanced in the latest versions, according to the SkyWalking team\u0026rsquo;s recommendation, upgrade the agent as soon as practicable.\n","excerpt":"V6 upgrade SkyWalking v6 is widely used in many production environments. Follow the steps in the …","ref":"/docs/main/v9.5.0/en/faq/v6-version-upgrade/","title":"V6 upgrade"},{"body":"V6 upgrade SkyWalking v6 is widely used in many production environments. Follow the steps in the guide below to learn how to upgrade to a new release.\nNOTE: The ways to upgrade are not limited to the steps below.\nUse Canary Release Like all applications, you may upgrade SkyWalking using the canary release method through the following steps.\n Deploy a new cluster by using the latest version of SkyWalking OAP cluster with the new database cluster. Once the target service (i.e. the service being monitored) has upgraded the agent.jar (or simply by rebooting), have collector.backend_service pointing to the new OAP backend, and use/add a new namespace(agent.namespace in Table of Agent Configuration Properties). The namespace will prevent conflicts from arising between different versions. When all target services have been rebooted, the old OAP clusters could be discarded.  The Canary Release method works for any version upgrades.\nOnline Hot Reboot Upgrade The reason we require Canary Release is that the SkyWalking agent has cache mechanisms, and switching to a new cluster causes the cache to become unavailable for new OAP clusters. In version 6.5.0+ (especially for agent versions), we have Agent hot reboot trigger mechanism. This streamlines the upgrade process as we deploy a new cluster by using the latest version of SkyWalking OAP cluster with the new database cluster, and shift the traffic to the new cluster once and for all. Based on the mechanism, all agents will enter the cool_down mode, and come back online. For more details, see the backend setup documentation.\nNOTE: A known bug in 6.4.0 is that its agent may have re-connection issues; therefore, even though this bot reboot mechanism has been included in 6.4.0, it may not work under some network scenarios, especially in Kubernetes.\nAgent Compatibility All versions of SkyWalking 6.x (and even 7.x) are compatible with each other, so users could simply upgrade the OAP servers. As the agent has also been enhanced in the latest versions, according to the SkyWalking team\u0026rsquo;s recommendation, upgrade the agent as soon as practicable.\n","excerpt":"V6 upgrade SkyWalking v6 is widely used in many production environments. Follow the steps in the …","ref":"/docs/main/v9.6.0/en/faq/v6-version-upgrade/","title":"V6 upgrade"},{"body":"V6 upgrade SkyWalking v6 is widely used in many production environments. Follow the steps in the guide below to learn how to upgrade to a new release.\nNOTE: The ways to upgrade are not limited to the steps below.\nUse Canary Release Like all applications, you may upgrade SkyWalking using the canary release method through the following steps.\n Deploy a new cluster by using the latest version of SkyWalking OAP cluster with the new database cluster. Once the target service (i.e. the service being monitored) has upgraded the agent.jar (or simply by rebooting), have collector.backend_service pointing to the new OAP backend, and use/add a new namespace(agent.namespace in Table of Agent Configuration Properties). The namespace will prevent conflicts from arising between different versions. When all target services have been rebooted, the old OAP clusters could be discarded.  The Canary Release method works for any version upgrades.\nOnline Hot Reboot Upgrade The reason we require Canary Release is that the SkyWalking agent has cache mechanisms, and switching to a new cluster causes the cache to become unavailable for new OAP clusters. In version 6.5.0+ (especially for agent versions), we have Agent hot reboot trigger mechanism. This streamlines the upgrade process as we deploy a new cluster by using the latest version of SkyWalking OAP cluster with the new database cluster, and shift the traffic to the new cluster once and for all. Based on the mechanism, all agents will enter the cool_down mode, and come back online. For more details, see the backend setup documentation.\nNOTE: A known bug in 6.4.0 is that its agent may have re-connection issues; therefore, even though this bot reboot mechanism has been included in 6.4.0, it may not work under some network scenarios, especially in Kubernetes.\nAgent Compatibility All versions of SkyWalking 6.x (and even 7.x) are compatible with each other, so users could simply upgrade the OAP servers. As the agent has also been enhanced in the latest versions, according to the SkyWalking team\u0026rsquo;s recommendation, upgrade the agent as soon as practicable.\n","excerpt":"V6 upgrade SkyWalking v6 is widely used in many production environments. Follow the steps in the …","ref":"/docs/main/v9.7.0/en/faq/v6-version-upgrade/","title":"V6 upgrade"},{"body":"V8 upgrade Starting from SkyWalking v8, the v3 protocol has been used. This makes it incompatible with previous releases. Users who intend to upgrade in v8 series releases could follow the steps below.\nRegisters in v6 and v7 have been removed in v8 for better scaling out performance. Please upgrade following the instructions below.\n Use a different storage or a new namespace. You may also consider erasing the whole storage indexes or tables related to SkyWalking. Deploy the whole SkyWalking cluster, and expose it in a new network address. If you are using language agents, upgrade the new agents too; meanwhile, make sure the agents are supported in a different language. Then, set up the backend address to the new SkyWalking OAP cluster.  ","excerpt":"V8 upgrade Starting from SkyWalking v8, the v3 protocol has been used. This makes it incompatible …","ref":"/docs/main/latest/en/faq/v8-version-upgrade/","title":"V8 upgrade"},{"body":"V8 upgrade Starting from SkyWalking v8, the v3 protocol has been used. This makes it incompatible with previous releases. Users who intend to upgrade in v8 series releases could follow the steps below.\nRegisters in v6 and v7 have been removed in v8 for better scaling out performance. Please upgrade following the instructions below.\n Use a different storage or a new namespace. You may also consider erasing the whole storage indexes or tables related to SkyWalking. Deploy the whole SkyWalking cluster, and expose it in a new network address. If you are using language agents, upgrade the new agents too; meanwhile, make sure the agents are supported in a different language. Then, set up the backend address to the new SkyWalking OAP cluster.  ","excerpt":"V8 upgrade Starting from SkyWalking v8, the v3 protocol has been used. This makes it incompatible …","ref":"/docs/main/next/en/faq/v8-version-upgrade/","title":"V8 upgrade"},{"body":"V8 upgrade Starting from SkyWalking v8, the v3 protocol has been used. This makes it incompatible with previous releases. Users who intend to upgrade in v8 series releases could follow the steps below.\nRegisters in v6 and v7 have been removed in v8 for better scaling out performance. Please upgrade following the instructions below.\n Use a different storage or a new namespace. You may also consider erasing the whole storage indexes or tables related to SkyWalking. Deploy the whole SkyWalking cluster, and expose it in a new network address. If you are using language agents, upgrade the new agents too; meanwhile, make sure the agents are supported in a different language. Then, set up the backend address to the new SkyWalking OAP cluster.  ","excerpt":"V8 upgrade Starting from SkyWalking v8, the v3 protocol has been used. This makes it incompatible …","ref":"/docs/main/v9.0.0/en/faq/v8-version-upgrade/","title":"V8 upgrade"},{"body":"V8 upgrade Starting from SkyWalking v8, the v3 protocol has been used. This makes it incompatible with previous releases. Users who intend to upgrade in v8 series releases could follow the steps below.\nRegisters in v6 and v7 have been removed in v8 for better scaling out performance. Please upgrade following the instructions below.\n Use a different storage or a new namespace. You may also consider erasing the whole storage indexes or tables related to SkyWalking. Deploy the whole SkyWalking cluster, and expose it in a new network address. If you are using language agents, upgrade the new agents too; meanwhile, make sure the agents are supported in a different language. Then, set up the backend address to the new SkyWalking OAP cluster.  ","excerpt":"V8 upgrade Starting from SkyWalking v8, the v3 protocol has been used. This makes it incompatible …","ref":"/docs/main/v9.1.0/en/faq/v8-version-upgrade/","title":"V8 upgrade"},{"body":"V8 upgrade Starting from SkyWalking v8, the v3 protocol has been used. This makes it incompatible with previous releases. Users who intend to upgrade in v8 series releases could follow the steps below.\nRegisters in v6 and v7 have been removed in v8 for better scaling out performance. Please upgrade following the instructions below.\n Use a different storage or a new namespace. You may also consider erasing the whole storage indexes or tables related to SkyWalking. Deploy the whole SkyWalking cluster, and expose it in a new network address. If you are using language agents, upgrade the new agents too; meanwhile, make sure the agents are supported in a different language. Then, set up the backend address to the new SkyWalking OAP cluster.  ","excerpt":"V8 upgrade Starting from SkyWalking v8, the v3 protocol has been used. This makes it incompatible …","ref":"/docs/main/v9.2.0/en/faq/v8-version-upgrade/","title":"V8 upgrade"},{"body":"V8 upgrade Starting from SkyWalking v8, the v3 protocol has been used. This makes it incompatible with previous releases. Users who intend to upgrade in v8 series releases could follow the steps below.\nRegisters in v6 and v7 have been removed in v8 for better scaling out performance. Please upgrade following the instructions below.\n Use a different storage or a new namespace. You may also consider erasing the whole storage indexes or tables related to SkyWalking. Deploy the whole SkyWalking cluster, and expose it in a new network address. If you are using language agents, upgrade the new agents too; meanwhile, make sure the agents are supported in a different language. Then, set up the backend address to the new SkyWalking OAP cluster.  ","excerpt":"V8 upgrade Starting from SkyWalking v8, the v3 protocol has been used. This makes it incompatible …","ref":"/docs/main/v9.3.0/en/faq/v8-version-upgrade/","title":"V8 upgrade"},{"body":"V8 upgrade Starting from SkyWalking v8, the v3 protocol has been used. This makes it incompatible with previous releases. Users who intend to upgrade in v8 series releases could follow the steps below.\nRegisters in v6 and v7 have been removed in v8 for better scaling out performance. Please upgrade following the instructions below.\n Use a different storage or a new namespace. You may also consider erasing the whole storage indexes or tables related to SkyWalking. Deploy the whole SkyWalking cluster, and expose it in a new network address. If you are using language agents, upgrade the new agents too; meanwhile, make sure the agents are supported in a different language. Then, set up the backend address to the new SkyWalking OAP cluster.  ","excerpt":"V8 upgrade Starting from SkyWalking v8, the v3 protocol has been used. This makes it incompatible …","ref":"/docs/main/v9.4.0/en/faq/v8-version-upgrade/","title":"V8 upgrade"},{"body":"V8 upgrade Starting from SkyWalking v8, the v3 protocol has been used. This makes it incompatible with previous releases. Users who intend to upgrade in v8 series releases could follow the steps below.\nRegisters in v6 and v7 have been removed in v8 for better scaling out performance. Please upgrade following the instructions below.\n Use a different storage or a new namespace. You may also consider erasing the whole storage indexes or tables related to SkyWalking. Deploy the whole SkyWalking cluster, and expose it in a new network address. If you are using language agents, upgrade the new agents too; meanwhile, make sure the agents are supported in a different language. Then, set up the backend address to the new SkyWalking OAP cluster.  ","excerpt":"V8 upgrade Starting from SkyWalking v8, the v3 protocol has been used. This makes it incompatible …","ref":"/docs/main/v9.5.0/en/faq/v8-version-upgrade/","title":"V8 upgrade"},{"body":"V8 upgrade Starting from SkyWalking v8, the v3 protocol has been used. This makes it incompatible with previous releases. Users who intend to upgrade in v8 series releases could follow the steps below.\nRegisters in v6 and v7 have been removed in v8 for better scaling out performance. Please upgrade following the instructions below.\n Use a different storage or a new namespace. You may also consider erasing the whole storage indexes or tables related to SkyWalking. Deploy the whole SkyWalking cluster, and expose it in a new network address. If you are using language agents, upgrade the new agents too; meanwhile, make sure the agents are supported in a different language. Then, set up the backend address to the new SkyWalking OAP cluster.  ","excerpt":"V8 upgrade Starting from SkyWalking v8, the v3 protocol has been used. This makes it incompatible …","ref":"/docs/main/v9.6.0/en/faq/v8-version-upgrade/","title":"V8 upgrade"},{"body":"V8 upgrade Starting from SkyWalking v8, the v3 protocol has been used. This makes it incompatible with previous releases. Users who intend to upgrade in v8 series releases could follow the steps below.\nRegisters in v6 and v7 have been removed in v8 for better scaling out performance. Please upgrade following the instructions below.\n Use a different storage or a new namespace. You may also consider erasing the whole storage indexes or tables related to SkyWalking. Deploy the whole SkyWalking cluster, and expose it in a new network address. If you are using language agents, upgrade the new agents too; meanwhile, make sure the agents are supported in a different language. Then, set up the backend address to the new SkyWalking OAP cluster.  ","excerpt":"V8 upgrade Starting from SkyWalking v8, the v3 protocol has been used. This makes it incompatible …","ref":"/docs/main/v9.7.0/en/faq/v8-version-upgrade/","title":"V8 upgrade"},{"body":"V9 upgrade Starting from v9, SkyWalking introduces the new core concept Layer. A layer represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), Kubernetes(k8s layer). This kind of layer would be catalogs on the new booster UI of various services/instances detected by different technologies. The query-protocol metadata-v2 has been used. The compatibility with previous releases is as below.\nQuery compatibility from previous version  The query-protocol metadata-v1 is provided on the top of the v2 implementation. All metrics are compatible with the previous data format, so you wouldn\u0026rsquo;t lose metrics.  Notice Incompatibility (1), the UI template configuration protocol is incompatible.\nIncompatibility  The UI configuration protocol has been changed by following the design of new booster UI. So, the RocketBot UI can\u0026rsquo;t work with the v9 backend. You need to remove ui_template index/template/table in your chosen storage, and reboot OAP in default or init mode. MAL: metric level function add an required argument Layer. Previous MAL expressions should add this argument. LAL: Extractor add function layer. If don\u0026rsquo;t set it manual, the default layer is GENERAL and the logs from ALS the default layer is mesh. Storage:Add service_id, short_name and layer columns to table ServiceTraffic. These data would be incompatible with previous versions. Make sure to remove the older ServiceTraffic table before OAP(v9) starts. OAP would generate the new table in the start procedure, and recreate all existing services when traffic comes. Since V9.1, SQL Database: move Tags list from Segment, Logs, Alarms to their additional tables, remove them before OAP starts. UI-template: Re-design for V9. Make sure to remove the older ui_template table before OAP(v9) starts.  ","excerpt":"V9 upgrade Starting from v9, SkyWalking introduces the new core concept Layer. A layer represents an …","ref":"/docs/main/latest/en/faq/v9-version-upgrade/","title":"V9 upgrade"},{"body":"V9 upgrade Starting from v9, SkyWalking introduces the new core concept Layer. A layer represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), Kubernetes(k8s layer). This kind of layer would be catalogs on the new booster UI of various services/instances detected by different technologies. The query-protocol metadata-v2 has been used. The compatibility with previous releases is as below.\nQuery compatibility from previous version  The query-protocol metadata-v1 is provided on the top of the v2 implementation. All metrics are compatible with the previous data format, so you wouldn\u0026rsquo;t lose metrics.  Notice Incompatibility (1), the UI template configuration protocol is incompatible.\nIncompatibility  The UI configuration protocol has been changed by following the design of new booster UI. So, the RocketBot UI can\u0026rsquo;t work with the v9 backend. You need to remove ui_template index/template/table in your chosen storage, and reboot OAP in default or init mode. MAL: metric level function add an required argument Layer. Previous MAL expressions should add this argument. LAL: Extractor add function layer. If don\u0026rsquo;t set it manual, the default layer is GENERAL and the logs from ALS the default layer is mesh. Storage:Add service_id, short_name and layer columns to table ServiceTraffic. These data would be incompatible with previous versions. Make sure to remove the older ServiceTraffic table before OAP(v9) starts. OAP would generate the new table in the start procedure, and recreate all existing services when traffic comes. Since V9.1, SQL Database: move Tags list from Segment, Logs, Alarms to their additional tables, remove them before OAP starts. UI-template: Re-design for V9. Make sure to remove the older ui_template table before OAP(v9) starts.  ","excerpt":"V9 upgrade Starting from v9, SkyWalking introduces the new core concept Layer. A layer represents an …","ref":"/docs/main/next/en/faq/v9-version-upgrade/","title":"V9 upgrade"},{"body":"V9 upgrade Starting from v9, SkyWalking introduces the new core concept Layer. A layer represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), Kubernetes(k8s layer). This kind of layer would be catalogs on the new booster UI of various services/instances detected by different technologies. The query-protocol metadata-v2 has been used. The compatibility with previous releases is as below.\nQuery compatibility from previous version  The query-protocol metadata-v1 is provided on the top of the v2 implementation. All metrics are compatible with the previous data format, so you wouldn\u0026rsquo;t lose metrics.  Notice Incompatibility (1), the UI template configuration protocol is incompatible.\nIncompatibility  The UI configuration protocol has been changed by following the design of new booster UI. So, the RocketBot UI can\u0026rsquo;t work with the v9 backend. You need to remove ui_template index/template/table in your chosen storage, and reboot OAP in default or init mode. MAL: metric level function add an required argument Layer. Previous MAL expressions should add this argument. LAL: Extractor add function layer. If don\u0026rsquo;t set it manual, the default layer is GENERAL and the logs from ALS the default layer is mesh. Storage:Add service_id, short_name and layer columns to table ServiceTraffic, add layer column to table InstanceTraffic. These data would be incompatible with previous versions. Make sure to remove the older ServiceTraffic and InstanceTraffic tables before OAP(v9) starts. OAP would generate the new table in the start procedure, and recreate all existing services and instances when traffic comes. UI-template: Re-design for V9. Make sure to remove the older ui_template table before OAP(v9) starts.  ","excerpt":"V9 upgrade Starting from v9, SkyWalking introduces the new core concept Layer. A layer represents an …","ref":"/docs/main/v9.0.0/en/faq/v9-version-upgrade/","title":"V9 upgrade"},{"body":"V9 upgrade Starting from v9, SkyWalking introduces the new core concept Layer. A layer represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), Kubernetes(k8s layer). This kind of layer would be catalogs on the new booster UI of various services/instances detected by different technologies. The query-protocol metadata-v2 has been used. The compatibility with previous releases is as below.\nQuery compatibility from previous version  The query-protocol metadata-v1 is provided on the top of the v2 implementation. All metrics are compatible with the previous data format, so you wouldn\u0026rsquo;t lose metrics.  Notice Incompatibility (1), the UI template configuration protocol is incompatible.\nIncompatibility  The UI configuration protocol has been changed by following the design of new booster UI. So, the RocketBot UI can\u0026rsquo;t work with the v9 backend. You need to remove ui_template index/template/table in your chosen storage, and reboot OAP in default or init mode. MAL: metric level function add an required argument Layer. Previous MAL expressions should add this argument. LAL: Extractor add function layer. If don\u0026rsquo;t set it manual, the default layer is GENERAL and the logs from ALS the default layer is mesh. Storage:Add service_id, short_name and layer columns to table ServiceTraffic. These data would be incompatible with previous versions. Make sure to remove the older ServiceTraffic table before OAP(v9) starts. OAP would generate the new table in the start procedure, and recreate all existing services when traffic comes. Since V9.1, SQL Database: move Tags list from Segment, Logs, Alarms to their additional tables, remove them before OAP starts. UI-template: Re-design for V9. Make sure to remove the older ui_template table before OAP(v9) starts.  ","excerpt":"V9 upgrade Starting from v9, SkyWalking introduces the new core concept Layer. A layer represents an …","ref":"/docs/main/v9.1.0/en/faq/v9-version-upgrade/","title":"V9 upgrade"},{"body":"V9 upgrade Starting from v9, SkyWalking introduces the new core concept Layer. A layer represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), Kubernetes(k8s layer). This kind of layer would be catalogs on the new booster UI of various services/instances detected by different technologies. The query-protocol metadata-v2 has been used. The compatibility with previous releases is as below.\nQuery compatibility from previous version  The query-protocol metadata-v1 is provided on the top of the v2 implementation. All metrics are compatible with the previous data format, so you wouldn\u0026rsquo;t lose metrics.  Notice Incompatibility (1), the UI template configuration protocol is incompatible.\nIncompatibility  The UI configuration protocol has been changed by following the design of new booster UI. So, the RocketBot UI can\u0026rsquo;t work with the v9 backend. You need to remove ui_template index/template/table in your chosen storage, and reboot OAP in default or init mode. MAL: metric level function add an required argument Layer. Previous MAL expressions should add this argument. LAL: Extractor add function layer. If don\u0026rsquo;t set it manual, the default layer is GENERAL and the logs from ALS the default layer is mesh. Storage:Add service_id, short_name and layer columns to table ServiceTraffic. These data would be incompatible with previous versions. Make sure to remove the older ServiceTraffic table before OAP(v9) starts. OAP would generate the new table in the start procedure, and recreate all existing services when traffic comes. Since V9.1, SQL Database: move Tags list from Segment, Logs, Alarms to their additional tables, remove them before OAP starts. UI-template: Re-design for V9. Make sure to remove the older ui_template table before OAP(v9) starts.  ","excerpt":"V9 upgrade Starting from v9, SkyWalking introduces the new core concept Layer. A layer represents an …","ref":"/docs/main/v9.2.0/en/faq/v9-version-upgrade/","title":"V9 upgrade"},{"body":"V9 upgrade Starting from v9, SkyWalking introduces the new core concept Layer. A layer represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), Kubernetes(k8s layer). This kind of layer would be catalogs on the new booster UI of various services/instances detected by different technologies. The query-protocol metadata-v2 has been used. The compatibility with previous releases is as below.\nQuery compatibility from previous version  The query-protocol metadata-v1 is provided on the top of the v2 implementation. All metrics are compatible with the previous data format, so you wouldn\u0026rsquo;t lose metrics.  Notice Incompatibility (1), the UI template configuration protocol is incompatible.\nIncompatibility  The UI configuration protocol has been changed by following the design of new booster UI. So, the RocketBot UI can\u0026rsquo;t work with the v9 backend. You need to remove ui_template index/template/table in your chosen storage, and reboot OAP in default or init mode. MAL: metric level function add an required argument Layer. Previous MAL expressions should add this argument. LAL: Extractor add function layer. If don\u0026rsquo;t set it manual, the default layer is GENERAL and the logs from ALS the default layer is mesh. Storage:Add service_id, short_name and layer columns to table ServiceTraffic. These data would be incompatible with previous versions. Make sure to remove the older ServiceTraffic table before OAP(v9) starts. OAP would generate the new table in the start procedure, and recreate all existing services when traffic comes. Since V9.1, SQL Database: move Tags list from Segment, Logs, Alarms to their additional tables, remove them before OAP starts. UI-template: Re-design for V9. Make sure to remove the older ui_template table before OAP(v9) starts.  ","excerpt":"V9 upgrade Starting from v9, SkyWalking introduces the new core concept Layer. A layer represents an …","ref":"/docs/main/v9.3.0/en/faq/v9-version-upgrade/","title":"V9 upgrade"},{"body":"V9 upgrade Starting from v9, SkyWalking introduces the new core concept Layer. A layer represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), Kubernetes(k8s layer). This kind of layer would be catalogs on the new booster UI of various services/instances detected by different technologies. The query-protocol metadata-v2 has been used. The compatibility with previous releases is as below.\nQuery compatibility from previous version  The query-protocol metadata-v1 is provided on the top of the v2 implementation. All metrics are compatible with the previous data format, so you wouldn\u0026rsquo;t lose metrics.  Notice Incompatibility (1), the UI template configuration protocol is incompatible.\nIncompatibility  The UI configuration protocol has been changed by following the design of new booster UI. So, the RocketBot UI can\u0026rsquo;t work with the v9 backend. You need to remove ui_template index/template/table in your chosen storage, and reboot OAP in default or init mode. MAL: metric level function add an required argument Layer. Previous MAL expressions should add this argument. LAL: Extractor add function layer. If don\u0026rsquo;t set it manual, the default layer is GENERAL and the logs from ALS the default layer is mesh. Storage:Add service_id, short_name and layer columns to table ServiceTraffic. These data would be incompatible with previous versions. Make sure to remove the older ServiceTraffic table before OAP(v9) starts. OAP would generate the new table in the start procedure, and recreate all existing services when traffic comes. Since V9.1, SQL Database: move Tags list from Segment, Logs, Alarms to their additional tables, remove them before OAP starts. UI-template: Re-design for V9. Make sure to remove the older ui_template table before OAP(v9) starts.  ","excerpt":"V9 upgrade Starting from v9, SkyWalking introduces the new core concept Layer. A layer represents an …","ref":"/docs/main/v9.4.0/en/faq/v9-version-upgrade/","title":"V9 upgrade"},{"body":"V9 upgrade Starting from v9, SkyWalking introduces the new core concept Layer. A layer represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), Kubernetes(k8s layer). This kind of layer would be catalogs on the new booster UI of various services/instances detected by different technologies. The query-protocol metadata-v2 has been used. The compatibility with previous releases is as below.\nQuery compatibility from previous version  The query-protocol metadata-v1 is provided on the top of the v2 implementation. All metrics are compatible with the previous data format, so you wouldn\u0026rsquo;t lose metrics.  Notice Incompatibility (1), the UI template configuration protocol is incompatible.\nIncompatibility  The UI configuration protocol has been changed by following the design of new booster UI. So, the RocketBot UI can\u0026rsquo;t work with the v9 backend. You need to remove ui_template index/template/table in your chosen storage, and reboot OAP in default or init mode. MAL: metric level function add an required argument Layer. Previous MAL expressions should add this argument. LAL: Extractor add function layer. If don\u0026rsquo;t set it manual, the default layer is GENERAL and the logs from ALS the default layer is mesh. Storage:Add service_id, short_name and layer columns to table ServiceTraffic. These data would be incompatible with previous versions. Make sure to remove the older ServiceTraffic table before OAP(v9) starts. OAP would generate the new table in the start procedure, and recreate all existing services when traffic comes. Since V9.1, SQL Database: move Tags list from Segment, Logs, Alarms to their additional tables, remove them before OAP starts. UI-template: Re-design for V9. Make sure to remove the older ui_template table before OAP(v9) starts.  ","excerpt":"V9 upgrade Starting from v9, SkyWalking introduces the new core concept Layer. A layer represents an …","ref":"/docs/main/v9.5.0/en/faq/v9-version-upgrade/","title":"V9 upgrade"},{"body":"V9 upgrade Starting from v9, SkyWalking introduces the new core concept Layer. A layer represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), Kubernetes(k8s layer). This kind of layer would be catalogs on the new booster UI of various services/instances detected by different technologies. The query-protocol metadata-v2 has been used. The compatibility with previous releases is as below.\nQuery compatibility from previous version  The query-protocol metadata-v1 is provided on the top of the v2 implementation. All metrics are compatible with the previous data format, so you wouldn\u0026rsquo;t lose metrics.  Notice Incompatibility (1), the UI template configuration protocol is incompatible.\nIncompatibility  The UI configuration protocol has been changed by following the design of new booster UI. So, the RocketBot UI can\u0026rsquo;t work with the v9 backend. You need to remove ui_template index/template/table in your chosen storage, and reboot OAP in default or init mode. MAL: metric level function add an required argument Layer. Previous MAL expressions should add this argument. LAL: Extractor add function layer. If don\u0026rsquo;t set it manual, the default layer is GENERAL and the logs from ALS the default layer is mesh. Storage:Add service_id, short_name and layer columns to table ServiceTraffic. These data would be incompatible with previous versions. Make sure to remove the older ServiceTraffic table before OAP(v9) starts. OAP would generate the new table in the start procedure, and recreate all existing services when traffic comes. Since V9.1, SQL Database: move Tags list from Segment, Logs, Alarms to their additional tables, remove them before OAP starts. UI-template: Re-design for V9. Make sure to remove the older ui_template table before OAP(v9) starts.  ","excerpt":"V9 upgrade Starting from v9, SkyWalking introduces the new core concept Layer. A layer represents an …","ref":"/docs/main/v9.6.0/en/faq/v9-version-upgrade/","title":"V9 upgrade"},{"body":"V9 upgrade Starting from v9, SkyWalking introduces the new core concept Layer. A layer represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), Kubernetes(k8s layer). This kind of layer would be catalogs on the new booster UI of various services/instances detected by different technologies. The query-protocol metadata-v2 has been used. The compatibility with previous releases is as below.\nQuery compatibility from previous version  The query-protocol metadata-v1 is provided on the top of the v2 implementation. All metrics are compatible with the previous data format, so you wouldn\u0026rsquo;t lose metrics.  Notice Incompatibility (1), the UI template configuration protocol is incompatible.\nIncompatibility  The UI configuration protocol has been changed by following the design of new booster UI. So, the RocketBot UI can\u0026rsquo;t work with the v9 backend. You need to remove ui_template index/template/table in your chosen storage, and reboot OAP in default or init mode. MAL: metric level function add an required argument Layer. Previous MAL expressions should add this argument. LAL: Extractor add function layer. If don\u0026rsquo;t set it manual, the default layer is GENERAL and the logs from ALS the default layer is mesh. Storage:Add service_id, short_name and layer columns to table ServiceTraffic. These data would be incompatible with previous versions. Make sure to remove the older ServiceTraffic table before OAP(v9) starts. OAP would generate the new table in the start procedure, and recreate all existing services when traffic comes. Since V9.1, SQL Database: move Tags list from Segment, Logs, Alarms to their additional tables, remove them before OAP starts. UI-template: Re-design for V9. Make sure to remove the older ui_template table before OAP(v9) starts.  ","excerpt":"V9 upgrade Starting from v9, SkyWalking introduces the new core concept Layer. A layer represents an …","ref":"/docs/main/v9.7.0/en/faq/v9-version-upgrade/","title":"V9 upgrade"},{"body":"Version 3.x -\u0026gt; 5.0.0-alpha Upgrade FAQs Collector Problem There is no information showing in the UI.\nCause In the upgrade from version 3.2.6 to 5.0.0, the existing Elasticsearch indexes are kept, but aren\u0026rsquo;t compatible with 5.0.0-alpha. When service name is registered, ElasticSearch will create this column by default type string, which will lead to an error.\nSolution Clean the data folder in ElasticSearch and restart ElasticSearch, collector and your application under monitoring.\n","excerpt":"Version 3.x -\u0026gt; 5.0.0-alpha Upgrade FAQs Collector Problem There is no information showing in the …","ref":"/docs/main/latest/en/faq/v3-version-upgrade/","title":"Version 3.x -\u003e 5.0.0-alpha Upgrade FAQs"},{"body":"Version 3.x -\u0026gt; 5.0.0-alpha Upgrade FAQs Collector Problem There is no information showing in the UI.\nCause In the upgrade from version 3.2.6 to 5.0.0, the existing Elasticsearch indexes are kept, but aren\u0026rsquo;t compatible with 5.0.0-alpha. When service name is registered, ElasticSearch will create this column by default type string, which will lead to an error.\nSolution Clean the data folder in ElasticSearch and restart ElasticSearch, collector and your application under monitoring.\n","excerpt":"Version 3.x -\u0026gt; 5.0.0-alpha Upgrade FAQs Collector Problem There is no information showing in the …","ref":"/docs/main/next/en/faq/v3-version-upgrade/","title":"Version 3.x -\u003e 5.0.0-alpha Upgrade FAQs"},{"body":"Version 3.x -\u0026gt; 5.0.0-alpha Upgrade FAQs Collector Problem There is no information showing in the UI.\nCause In the upgrade from version 3.2.6 to 5.0.0, the existing Elasticsearch indexes are kept, but aren\u0026rsquo;t compatible with 5.0.0-alpha. When service name is registered, ElasticSearch will create this column by default type string, which will lead to an error.\nSolution Clean the data folder in ElasticSearch and restart ElasticSearch, collector and your application under monitoring.\n","excerpt":"Version 3.x -\u0026gt; 5.0.0-alpha Upgrade FAQs Collector Problem There is no information showing in the …","ref":"/docs/main/v9.0.0/en/faq/v3-version-upgrade/","title":"Version 3.x -\u003e 5.0.0-alpha Upgrade FAQs"},{"body":"Version 3.x -\u0026gt; 5.0.0-alpha Upgrade FAQs Collector Problem There is no information showing in the UI.\nCause In the upgrade from version 3.2.6 to 5.0.0, the existing Elasticsearch indexes are kept, but aren\u0026rsquo;t compatible with 5.0.0-alpha. When service name is registered, ElasticSearch will create this column by default type string, which will lead to an error.\nSolution Clean the data folder in ElasticSearch and restart ElasticSearch, collector and your application under monitoring.\n","excerpt":"Version 3.x -\u0026gt; 5.0.0-alpha Upgrade FAQs Collector Problem There is no information showing in the …","ref":"/docs/main/v9.1.0/en/faq/v3-version-upgrade/","title":"Version 3.x -\u003e 5.0.0-alpha Upgrade FAQs"},{"body":"Version 3.x -\u0026gt; 5.0.0-alpha Upgrade FAQs Collector Problem There is no information showing in the UI.\nCause In the upgrade from version 3.2.6 to 5.0.0, the existing Elasticsearch indexes are kept, but aren\u0026rsquo;t compatible with 5.0.0-alpha. When service name is registered, ElasticSearch will create this column by default type string, which will lead to an error.\nSolution Clean the data folder in ElasticSearch and restart ElasticSearch, collector and your application under monitoring.\n","excerpt":"Version 3.x -\u0026gt; 5.0.0-alpha Upgrade FAQs Collector Problem There is no information showing in the …","ref":"/docs/main/v9.2.0/en/faq/v3-version-upgrade/","title":"Version 3.x -\u003e 5.0.0-alpha Upgrade FAQs"},{"body":"Version 3.x -\u0026gt; 5.0.0-alpha Upgrade FAQs Collector Problem There is no information showing in the UI.\nCause In the upgrade from version 3.2.6 to 5.0.0, the existing Elasticsearch indexes are kept, but aren\u0026rsquo;t compatible with 5.0.0-alpha. When service name is registered, ElasticSearch will create this column by default type string, which will lead to an error.\nSolution Clean the data folder in ElasticSearch and restart ElasticSearch, collector and your application under monitoring.\n","excerpt":"Version 3.x -\u0026gt; 5.0.0-alpha Upgrade FAQs Collector Problem There is no information showing in the …","ref":"/docs/main/v9.3.0/en/faq/v3-version-upgrade/","title":"Version 3.x -\u003e 5.0.0-alpha Upgrade FAQs"},{"body":"Version 3.x -\u0026gt; 5.0.0-alpha Upgrade FAQs Collector Problem There is no information showing in the UI.\nCause In the upgrade from version 3.2.6 to 5.0.0, the existing Elasticsearch indexes are kept, but aren\u0026rsquo;t compatible with 5.0.0-alpha. When service name is registered, ElasticSearch will create this column by default type string, which will lead to an error.\nSolution Clean the data folder in ElasticSearch and restart ElasticSearch, collector and your application under monitoring.\n","excerpt":"Version 3.x -\u0026gt; 5.0.0-alpha Upgrade FAQs Collector Problem There is no information showing in the …","ref":"/docs/main/v9.4.0/en/faq/v3-version-upgrade/","title":"Version 3.x -\u003e 5.0.0-alpha Upgrade FAQs"},{"body":"Version 3.x -\u0026gt; 5.0.0-alpha Upgrade FAQs Collector Problem There is no information showing in the UI.\nCause In the upgrade from version 3.2.6 to 5.0.0, the existing Elasticsearch indexes are kept, but aren\u0026rsquo;t compatible with 5.0.0-alpha. When service name is registered, ElasticSearch will create this column by default type string, which will lead to an error.\nSolution Clean the data folder in ElasticSearch and restart ElasticSearch, collector and your application under monitoring.\n","excerpt":"Version 3.x -\u0026gt; 5.0.0-alpha Upgrade FAQs Collector Problem There is no information showing in the …","ref":"/docs/main/v9.5.0/en/faq/v3-version-upgrade/","title":"Version 3.x -\u003e 5.0.0-alpha Upgrade FAQs"},{"body":"Version 3.x -\u0026gt; 5.0.0-alpha Upgrade FAQs Collector Problem There is no information showing in the UI.\nCause In the upgrade from version 3.2.6 to 5.0.0, the existing Elasticsearch indexes are kept, but aren\u0026rsquo;t compatible with 5.0.0-alpha. When service name is registered, ElasticSearch will create this column by default type string, which will lead to an error.\nSolution Clean the data folder in ElasticSearch and restart ElasticSearch, collector and your application under monitoring.\n","excerpt":"Version 3.x -\u0026gt; 5.0.0-alpha Upgrade FAQs Collector Problem There is no information showing in the …","ref":"/docs/main/v9.6.0/en/faq/v3-version-upgrade/","title":"Version 3.x -\u003e 5.0.0-alpha Upgrade FAQs"},{"body":"Version 3.x -\u0026gt; 5.0.0-alpha Upgrade FAQs Collector Problem There is no information showing in the UI.\nCause In the upgrade from version 3.2.6 to 5.0.0, the existing Elasticsearch indexes are kept, but aren\u0026rsquo;t compatible with 5.0.0-alpha. When service name is registered, ElasticSearch will create this column by default type string, which will lead to an error.\nSolution Clean the data folder in ElasticSearch and restart ElasticSearch, collector and your application under monitoring.\n","excerpt":"Version 3.x -\u0026gt; 5.0.0-alpha Upgrade FAQs Collector Problem There is no information showing in the …","ref":"/docs/main/v9.7.0/en/faq/v3-version-upgrade/","title":"Version 3.x -\u003e 5.0.0-alpha Upgrade FAQs"},{"body":"Virtual Cache Virtual cache represent the cache nodes detected by server agents' plugins. The performance metrics of the cache are also from the Cache client-side perspective.\nFor example, Redis plugins in the Java agent could detect the latency of command As a result, SkyWalking would show traffic, latency, success rate, and sampled slow operations(write/read) powered by backend analysis capabilities in this dashboard.\nThe cache operation span should have\n It is an Exit or Local span Span\u0026rsquo;s layer == CACHE Tag key = cache.type, value = The type of cache system , e.g. redis Tag key = cache.op, value = read or write , indicates the value of tag cache.cmd is used for write or read operation Tag key = cache.cmd, value = the cache command , e.g. get,set,del Tag key = cache.key, value = the cache key If the cache system is in-memory (e.g. Guava-cache), agents' plugin would create a local span usually, and the span\u0026rsquo;s peer would be null ,otherwise the peer is the network address(IP or domain) of Cache server.  Ref slow cache doc to know more slow Cache commands settings.\n","excerpt":"Virtual Cache Virtual cache represent the cache nodes detected by server agents' plugins. The …","ref":"/docs/main/latest/en/setup/service-agent/virtual-cache/","title":"Virtual Cache"},{"body":"Virtual Cache Virtual cache represent the cache nodes detected by server agents' plugins. The performance metrics of the cache are also from the Cache client-side perspective.\nFor example, Redis plugins in the Java agent could detect the latency of command As a result, SkyWalking would show traffic, latency, success rate, and sampled slow operations(write/read) powered by backend analysis capabilities in this dashboard.\nThe cache operation span should have\n It is an Exit or Local span Span\u0026rsquo;s layer == CACHE Tag key = cache.type, value = The type of cache system , e.g. redis Tag key = cache.op, value = read or write , indicates the value of tag cache.cmd is used for write or read operation Tag key = cache.cmd, value = the cache command , e.g. get,set,del Tag key = cache.key, value = the cache key If the cache system is in-memory (e.g. Guava-cache), agents' plugin would create a local span usually, and the span\u0026rsquo;s peer would be null ,otherwise the peer is the network address(IP or domain) of Cache server.  Ref slow cache doc to know more slow Cache commands settings.\n","excerpt":"Virtual Cache Virtual cache represent the cache nodes detected by server agents' plugins. The …","ref":"/docs/main/next/en/setup/service-agent/virtual-cache/","title":"Virtual Cache"},{"body":"Virtual Cache Virtual cache represent the cache nodes detected by server agents' plugins. The performance metrics of the cache are also from the Cache client-side perspective.\nFor example, Redis plugins in the Java agent could detect the latency of command As a result, SkyWalking would show traffic, latency, success rate, and sampled slow operations(write/read) powered by backend analysis capabilities in this dashboard.\nThe cache operation span should have\n It is an Exit or Local span Span\u0026rsquo;s layer == CACHE Tag key = cache.type, value = The type of cache system , e.g. redis Tag key = cache.op, value = read or write , indicates the value of tag cache.cmd is used for write or read operation Tag key = cache.cmd, value = the cache command , e.g. get,set,del Tag key = cache.key, value = the cache key If the cache system is in-memory (e.g. Guava-cache), agents' plugin would create a local span usually, and the span\u0026rsquo;s peer would be null ,otherwise the peer is the network address(IP or domain) of Cache server.  Ref slow cache doc to know more slow Cache commands settings.\n","excerpt":"Virtual Cache Virtual cache represent the cache nodes detected by server agents' plugins. The …","ref":"/docs/main/v9.3.0/en/setup/service-agent/virtual-cache/","title":"Virtual Cache"},{"body":"Virtual Cache Virtual cache represent the cache nodes detected by server agents' plugins. The performance metrics of the cache are also from the Cache client-side perspective.\nFor example, Redis plugins in the Java agent could detect the latency of command As a result, SkyWalking would show traffic, latency, success rate, and sampled slow operations(write/read) powered by backend analysis capabilities in this dashboard.\nThe cache operation span should have\n It is an Exit or Local span Span\u0026rsquo;s layer == CACHE Tag key = cache.type, value = The type of cache system , e.g. redis Tag key = cache.op, value = read or write , indicates the value of tag cache.cmd is used for write or read operation Tag key = cache.cmd, value = the cache command , e.g. get,set,del Tag key = cache.key, value = the cache key If the cache system is in-memory (e.g. Guava-cache), agents' plugin would create a local span usually, and the span\u0026rsquo;s peer would be null ,otherwise the peer is the network address(IP or domain) of Cache server.  Ref slow cache doc to know more slow Cache commands settings.\n","excerpt":"Virtual Cache Virtual cache represent the cache nodes detected by server agents' plugins. The …","ref":"/docs/main/v9.4.0/en/setup/service-agent/virtual-cache/","title":"Virtual Cache"},{"body":"Virtual Cache Virtual cache represent the cache nodes detected by server agents' plugins. The performance metrics of the cache are also from the Cache client-side perspective.\nFor example, Redis plugins in the Java agent could detect the latency of command As a result, SkyWalking would show traffic, latency, success rate, and sampled slow operations(write/read) powered by backend analysis capabilities in this dashboard.\nThe cache operation span should have\n It is an Exit or Local span Span\u0026rsquo;s layer == CACHE Tag key = cache.type, value = The type of cache system , e.g. redis Tag key = cache.op, value = read or write , indicates the value of tag cache.cmd is used for write or read operation Tag key = cache.cmd, value = the cache command , e.g. get,set,del Tag key = cache.key, value = the cache key If the cache system is in-memory (e.g. Guava-cache), agents' plugin would create a local span usually, and the span\u0026rsquo;s peer would be null ,otherwise the peer is the network address(IP or domain) of Cache server.  Ref slow cache doc to know more slow Cache commands settings.\n","excerpt":"Virtual Cache Virtual cache represent the cache nodes detected by server agents' plugins. The …","ref":"/docs/main/v9.5.0/en/setup/service-agent/virtual-cache/","title":"Virtual Cache"},{"body":"Virtual Cache Virtual cache represent the cache nodes detected by server agents' plugins. The performance metrics of the cache are also from the Cache client-side perspective.\nFor example, Redis plugins in the Java agent could detect the latency of command As a result, SkyWalking would show traffic, latency, success rate, and sampled slow operations(write/read) powered by backend analysis capabilities in this dashboard.\nThe cache operation span should have\n It is an Exit or Local span Span\u0026rsquo;s layer == CACHE Tag key = cache.type, value = The type of cache system , e.g. redis Tag key = cache.op, value = read or write , indicates the value of tag cache.cmd is used for write or read operation Tag key = cache.cmd, value = the cache command , e.g. get,set,del Tag key = cache.key, value = the cache key If the cache system is in-memory (e.g. Guava-cache), agents' plugin would create a local span usually, and the span\u0026rsquo;s peer would be null ,otherwise the peer is the network address(IP or domain) of Cache server.  Ref slow cache doc to know more slow Cache commands settings.\n","excerpt":"Virtual Cache Virtual cache represent the cache nodes detected by server agents' plugins. The …","ref":"/docs/main/v9.6.0/en/setup/service-agent/virtual-cache/","title":"Virtual Cache"},{"body":"Virtual Cache Virtual cache represent the cache nodes detected by server agents' plugins. The performance metrics of the cache are also from the Cache client-side perspective.\nFor example, Redis plugins in the Java agent could detect the latency of command As a result, SkyWalking would show traffic, latency, success rate, and sampled slow operations(write/read) powered by backend analysis capabilities in this dashboard.\nThe cache operation span should have\n It is an Exit or Local span Span\u0026rsquo;s layer == CACHE Tag key = cache.type, value = The type of cache system , e.g. redis Tag key = cache.op, value = read or write , indicates the value of tag cache.cmd is used for write or read operation Tag key = cache.cmd, value = the cache command , e.g. get,set,del Tag key = cache.key, value = the cache key If the cache system is in-memory (e.g. Guava-cache), agents' plugin would create a local span usually, and the span\u0026rsquo;s peer would be null ,otherwise the peer is the network address(IP or domain) of Cache server.  Ref slow cache doc to know more slow Cache commands settings.\n","excerpt":"Virtual Cache Virtual cache represent the cache nodes detected by server agents' plugins. The …","ref":"/docs/main/v9.7.0/en/setup/service-agent/virtual-cache/","title":"Virtual Cache"},{"body":"Virtual Database Virtual databases represent the database nodes detected by server agents' plugins. The performance metrics of the databases are also from the Database client-side perspective.\nFor example, JDBC plugins(MySQL, PostgreSQL, MariaDB, MSSQL) in the Java agent could detect the latency of SQL performance and SQL statements. As a result, SkyWalking would show database traffic, latency, success rate, and sampled slow SQLs powered by backend analysis capabilities in this dashboard.\nThe Database access span should have\n It is an Exit span Span\u0026rsquo;s layer == DATABASE Tag key = db.statement, value = SQL statement Tag key = db.type, value = the type of Database Span\u0026rsquo;s peer is the network address(IP or domain) of Database server.  Ref slow cache doc to know more slow SQL settings.\n","excerpt":"Virtual Database Virtual databases represent the database nodes detected by server agents' plugins. …","ref":"/docs/main/latest/en/setup/service-agent/virtual-database/","title":"Virtual Database"},{"body":"Virtual Database Virtual databases represent the database nodes detected by server agents' plugins. The performance metrics of the databases are also from the Database client-side perspective.\nFor example, JDBC plugins(MySQL, PostgreSQL, MariaDB, MSSQL) in the Java agent could detect the latency of SQL performance and SQL statements. As a result, SkyWalking would show database traffic, latency, success rate, and sampled slow SQLs powered by backend analysis capabilities in this dashboard.\nThe Database access span should have\n It is an Exit span Span\u0026rsquo;s layer == DATABASE Tag key = db.statement, value = SQL statement Tag key = db.type, value = the type of Database Span\u0026rsquo;s peer is the network address(IP or domain) of Database server.  Ref slow cache doc to know more slow SQL settings.\n","excerpt":"Virtual Database Virtual databases represent the database nodes detected by server agents' plugins. …","ref":"/docs/main/next/en/setup/service-agent/virtual-database/","title":"Virtual Database"},{"body":"Virtual Database Virtual databases represents the database nodes detected by server agents' plugins. The performance metrics of the databases are also from Database client side perspective.\nFor example, JDBC plugins(MySQL, PostgreSQL, Mariadb, MSSQL) in the Java agent could detect the latency of SQL performance, as well as SQL statements. As a result, in this dashboard, SkyWalking would show database traffic, latency, success rate and sampled slow SQLs powered by backend analysis capabilities.\nThe Database access span should have\n It is an Exit span Span\u0026rsquo;s layer == DATABASE Tag key = db.statement, value = SQL statement Tag key = db.type, value = the type of Database Span\u0026rsquo;s peer is the network address(IP or domain) of Database server.  ","excerpt":"Virtual Database Virtual databases represents the database nodes detected by server agents' plugins. …","ref":"/docs/main/v9.0.0/en/setup/service-agent/virtual-database/","title":"Virtual Database"},{"body":"Virtual Database Virtual databases represent the database nodes detected by server agents' plugins. The performance metrics of the databases are also from the Database client-side perspective.\nFor example, JDBC plugins(MySQL, PostgreSQL, MariaDB, MSSQL) in the Java agent could detect the latency of SQL performance and SQL statements. As a result, SkyWalking would show database traffic, latency, success rate, and sampled slow SQLs powered by backend analysis capabilities in this dashboard.\nThe Database access span should have\n It is an Exit span Span\u0026rsquo;s layer == DATABASE Tag key = db.statement, value = SQL statement Tag key = db.type, value = the type of Database Span\u0026rsquo;s peer is the network address(IP or domain) of Database server.  ","excerpt":"Virtual Database Virtual databases represent the database nodes detected by server agents' plugins. …","ref":"/docs/main/v9.1.0/en/setup/service-agent/virtual-database/","title":"Virtual Database"},{"body":"Virtual Database Virtual databases represent the database nodes detected by server agents' plugins. The performance metrics of the databases are also from the Database client-side perspective.\nFor example, JDBC plugins(MySQL, PostgreSQL, MariaDB, MSSQL) in the Java agent could detect the latency of SQL performance and SQL statements. As a result, SkyWalking would show database traffic, latency, success rate, and sampled slow SQLs powered by backend analysis capabilities in this dashboard.\nThe Database access span should have\n It is an Exit span Span\u0026rsquo;s layer == DATABASE Tag key = db.statement, value = SQL statement Tag key = db.type, value = the type of Database Span\u0026rsquo;s peer is the network address(IP or domain) of Database server.  ","excerpt":"Virtual Database Virtual databases represent the database nodes detected by server agents' plugins. …","ref":"/docs/main/v9.2.0/en/setup/service-agent/virtual-database/","title":"Virtual Database"},{"body":"Virtual Database Virtual databases represent the database nodes detected by server agents' plugins. The performance metrics of the databases are also from the Database client-side perspective.\nFor example, JDBC plugins(MySQL, PostgreSQL, MariaDB, MSSQL) in the Java agent could detect the latency of SQL performance and SQL statements. As a result, SkyWalking would show database traffic, latency, success rate, and sampled slow SQLs powered by backend analysis capabilities in this dashboard.\nThe Database access span should have\n It is an Exit span Span\u0026rsquo;s layer == DATABASE Tag key = db.statement, value = SQL statement Tag key = db.type, value = the type of Database Span\u0026rsquo;s peer is the network address(IP or domain) of Database server.  Ref slow cache doc to know more slow SQL settings.\n","excerpt":"Virtual Database Virtual databases represent the database nodes detected by server agents' plugins. …","ref":"/docs/main/v9.3.0/en/setup/service-agent/virtual-database/","title":"Virtual Database"},{"body":"Virtual Database Virtual databases represent the database nodes detected by server agents' plugins. The performance metrics of the databases are also from the Database client-side perspective.\nFor example, JDBC plugins(MySQL, PostgreSQL, MariaDB, MSSQL) in the Java agent could detect the latency of SQL performance and SQL statements. As a result, SkyWalking would show database traffic, latency, success rate, and sampled slow SQLs powered by backend analysis capabilities in this dashboard.\nThe Database access span should have\n It is an Exit span Span\u0026rsquo;s layer == DATABASE Tag key = db.statement, value = SQL statement Tag key = db.type, value = the type of Database Span\u0026rsquo;s peer is the network address(IP or domain) of Database server.  Ref slow cache doc to know more slow SQL settings.\n","excerpt":"Virtual Database Virtual databases represent the database nodes detected by server agents' plugins. …","ref":"/docs/main/v9.4.0/en/setup/service-agent/virtual-database/","title":"Virtual Database"},{"body":"Virtual Database Virtual databases represent the database nodes detected by server agents' plugins. The performance metrics of the databases are also from the Database client-side perspective.\nFor example, JDBC plugins(MySQL, PostgreSQL, MariaDB, MSSQL) in the Java agent could detect the latency of SQL performance and SQL statements. As a result, SkyWalking would show database traffic, latency, success rate, and sampled slow SQLs powered by backend analysis capabilities in this dashboard.\nThe Database access span should have\n It is an Exit span Span\u0026rsquo;s layer == DATABASE Tag key = db.statement, value = SQL statement Tag key = db.type, value = the type of Database Span\u0026rsquo;s peer is the network address(IP or domain) of Database server.  Ref slow cache doc to know more slow SQL settings.\n","excerpt":"Virtual Database Virtual databases represent the database nodes detected by server agents' plugins. …","ref":"/docs/main/v9.5.0/en/setup/service-agent/virtual-database/","title":"Virtual Database"},{"body":"Virtual Database Virtual databases represent the database nodes detected by server agents' plugins. The performance metrics of the databases are also from the Database client-side perspective.\nFor example, JDBC plugins(MySQL, PostgreSQL, MariaDB, MSSQL) in the Java agent could detect the latency of SQL performance and SQL statements. As a result, SkyWalking would show database traffic, latency, success rate, and sampled slow SQLs powered by backend analysis capabilities in this dashboard.\nThe Database access span should have\n It is an Exit span Span\u0026rsquo;s layer == DATABASE Tag key = db.statement, value = SQL statement Tag key = db.type, value = the type of Database Span\u0026rsquo;s peer is the network address(IP or domain) of Database server.  Ref slow cache doc to know more slow SQL settings.\n","excerpt":"Virtual Database Virtual databases represent the database nodes detected by server agents' plugins. …","ref":"/docs/main/v9.6.0/en/setup/service-agent/virtual-database/","title":"Virtual Database"},{"body":"Virtual Database Virtual databases represent the database nodes detected by server agents' plugins. The performance metrics of the databases are also from the Database client-side perspective.\nFor example, JDBC plugins(MySQL, PostgreSQL, MariaDB, MSSQL) in the Java agent could detect the latency of SQL performance and SQL statements. As a result, SkyWalking would show database traffic, latency, success rate, and sampled slow SQLs powered by backend analysis capabilities in this dashboard.\nThe Database access span should have\n It is an Exit span Span\u0026rsquo;s layer == DATABASE Tag key = db.statement, value = SQL statement Tag key = db.type, value = the type of Database Span\u0026rsquo;s peer is the network address(IP or domain) of Database server.  Ref slow cache doc to know more slow SQL settings.\n","excerpt":"Virtual Database Virtual databases represent the database nodes detected by server agents' plugins. …","ref":"/docs/main/v9.7.0/en/setup/service-agent/virtual-database/","title":"Virtual Database"},{"body":"Virtual Message Queue (MQ) Virtual MQ represent the MQ nodes detected by server agents' plugins. The performance metrics of the MQ are also from the MQ client-side perspective.\nFor example, Kafka plugins in the Java agent could detect the transmission latency of message As a result, SkyWalking would show message count, transmission latency, success rate powered by backend analysis capabilities in this dashboard.\nThe MQ operation span should have\n It is an Exit(at producer side) or Entry(at consumer side) span Span\u0026rsquo;s layer == MQ Tag key = mq.queue, value = MQ queue name Tag key = mq.topic, value = MQ queue topic , it\u0026rsquo;s optional as some MQ don\u0026rsquo;t have topic concept. Tag key = transmission.latency, value = Transmission latency from consumer to producer Set peer at both sides(producer and consumer). And the value of peer should represent the MQ server cluster.  ","excerpt":"Virtual Message Queue (MQ) Virtual MQ represent the MQ nodes detected by server agents' plugins. The …","ref":"/docs/main/latest/en/setup/service-agent/virtual-mq/","title":"Virtual Message Queue (MQ)"},{"body":"Virtual Message Queue (MQ) Virtual MQ represent the MQ nodes detected by server agents' plugins. The performance metrics of the MQ are also from the MQ client-side perspective.\nFor example, Kafka plugins in the Java agent could detect the transmission latency of message As a result, SkyWalking would show message count, transmission latency, success rate powered by backend analysis capabilities in this dashboard.\nThe MQ operation span should have\n It is an Exit(at producer side) or Entry(at consumer side) span Span\u0026rsquo;s layer == MQ Tag key = mq.queue, value = MQ queue name Tag key = mq.topic, value = MQ queue topic , it\u0026rsquo;s optional as some MQ don\u0026rsquo;t have topic concept. Tag key = transmission.latency, value = Transmission latency from consumer to producer Set peer at both sides(producer and consumer). And the value of peer should represent the MQ server cluster.  ","excerpt":"Virtual Message Queue (MQ) Virtual MQ represent the MQ nodes detected by server agents' plugins. The …","ref":"/docs/main/next/en/setup/service-agent/virtual-mq/","title":"Virtual Message Queue (MQ)"},{"body":"Virtual Message Queue (MQ) Virtual MQ represent the MQ nodes detected by server agents' plugins. The performance metrics of the MQ are also from the MQ client-side perspective.\nFor example, Kafka plugins in the Java agent could detect the transmission latency of message As a result, SkyWalking would show message count, transmission latency, success rate powered by backend analysis capabilities in this dashboard.\nThe MQ operation span should have\n It is an Exit(at producer side) or Entry(at consumer side) span Span\u0026rsquo;s layer == MQ Tag key = mq.queue, value = MQ queue name Tag key = mq.topic, value = MQ queue topic , it\u0026rsquo;s optional as some MQ don\u0026rsquo;t have topic concept. Tag key = transmission.latency, value = Transmission latency from consumer to producer Set peer at both sides(producer and consumer). And the value of peer should represent the MQ server cluster.  ","excerpt":"Virtual Message Queue (MQ) Virtual MQ represent the MQ nodes detected by server agents' plugins. The …","ref":"/docs/main/v9.3.0/en/setup/service-agent/virtual-mq/","title":"Virtual Message Queue (MQ)"},{"body":"Virtual Message Queue (MQ) Virtual MQ represent the MQ nodes detected by server agents' plugins. The performance metrics of the MQ are also from the MQ client-side perspective.\nFor example, Kafka plugins in the Java agent could detect the transmission latency of message As a result, SkyWalking would show message count, transmission latency, success rate powered by backend analysis capabilities in this dashboard.\nThe MQ operation span should have\n It is an Exit(at producer side) or Entry(at consumer side) span Span\u0026rsquo;s layer == MQ Tag key = mq.queue, value = MQ queue name Tag key = mq.topic, value = MQ queue topic , it\u0026rsquo;s optional as some MQ don\u0026rsquo;t have topic concept. Tag key = transmission.latency, value = Transmission latency from consumer to producer Set peer at both sides(producer and consumer). And the value of peer should represent the MQ server cluster.  ","excerpt":"Virtual Message Queue (MQ) Virtual MQ represent the MQ nodes detected by server agents' plugins. The …","ref":"/docs/main/v9.4.0/en/setup/service-agent/virtual-mq/","title":"Virtual Message Queue (MQ)"},{"body":"Virtual Message Queue (MQ) Virtual MQ represent the MQ nodes detected by server agents' plugins. The performance metrics of the MQ are also from the MQ client-side perspective.\nFor example, Kafka plugins in the Java agent could detect the transmission latency of message As a result, SkyWalking would show message count, transmission latency, success rate powered by backend analysis capabilities in this dashboard.\nThe MQ operation span should have\n It is an Exit(at producer side) or Entry(at consumer side) span Span\u0026rsquo;s layer == MQ Tag key = mq.queue, value = MQ queue name Tag key = mq.topic, value = MQ queue topic , it\u0026rsquo;s optional as some MQ don\u0026rsquo;t have topic concept. Tag key = transmission.latency, value = Transmission latency from consumer to producer Set peer at both sides(producer and consumer). And the value of peer should represent the MQ server cluster.  ","excerpt":"Virtual Message Queue (MQ) Virtual MQ represent the MQ nodes detected by server agents' plugins. The …","ref":"/docs/main/v9.5.0/en/setup/service-agent/virtual-mq/","title":"Virtual Message Queue (MQ)"},{"body":"Virtual Message Queue (MQ) Virtual MQ represent the MQ nodes detected by server agents' plugins. The performance metrics of the MQ are also from the MQ client-side perspective.\nFor example, Kafka plugins in the Java agent could detect the transmission latency of message As a result, SkyWalking would show message count, transmission latency, success rate powered by backend analysis capabilities in this dashboard.\nThe MQ operation span should have\n It is an Exit(at producer side) or Entry(at consumer side) span Span\u0026rsquo;s layer == MQ Tag key = mq.queue, value = MQ queue name Tag key = mq.topic, value = MQ queue topic , it\u0026rsquo;s optional as some MQ don\u0026rsquo;t have topic concept. Tag key = transmission.latency, value = Transmission latency from consumer to producer Set peer at both sides(producer and consumer). And the value of peer should represent the MQ server cluster.  ","excerpt":"Virtual Message Queue (MQ) Virtual MQ represent the MQ nodes detected by server agents' plugins. The …","ref":"/docs/main/v9.6.0/en/setup/service-agent/virtual-mq/","title":"Virtual Message Queue (MQ)"},{"body":"Virtual Message Queue (MQ) Virtual MQ represent the MQ nodes detected by server agents' plugins. The performance metrics of the MQ are also from the MQ client-side perspective.\nFor example, Kafka plugins in the Java agent could detect the transmission latency of message As a result, SkyWalking would show message count, transmission latency, success rate powered by backend analysis capabilities in this dashboard.\nThe MQ operation span should have\n It is an Exit(at producer side) or Entry(at consumer side) span Span\u0026rsquo;s layer == MQ Tag key = mq.queue, value = MQ queue name Tag key = mq.topic, value = MQ queue topic , it\u0026rsquo;s optional as some MQ don\u0026rsquo;t have topic concept. Tag key = transmission.latency, value = Transmission latency from consumer to producer Set peer at both sides(producer and consumer). And the value of peer should represent the MQ server cluster.  ","excerpt":"Virtual Message Queue (MQ) Virtual MQ represent the MQ nodes detected by server agents' plugins. The …","ref":"/docs/main/v9.7.0/en/setup/service-agent/virtual-mq/","title":"Virtual Message Queue (MQ)"},{"body":"Webflux Tracing Assistant APIs These APIs provide advanced features to enhance interaction capabilities in Webflux cases.\nAdd the toolkit to your project dependency, through Maven or Gradle\n\u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-webflux\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; The following scenarios are supported for tracing assistance.\nContinue Tracing from Client The WebFluxSkyWalkingOperators#continueTracing provides manual tracing continuous capabilities to adopt native Webflux APIs\n@GetMapping(\u0026#34;/testcase/annotation/mono/onnext\u0026#34;) public Mono\u0026lt;String\u0026gt; monoOnNext(@RequestBody(required = false) String body) { return Mono.subscriberContext() .flatMap(ctx -\u0026gt; WebFluxSkyWalkingOperators.continueTracing(ctx, () -\u0026gt; { visit(\u0026#34;http://localhost:\u0026#34; + serverPort + \u0026#34;/testcase/success\u0026#34;); return Mono.just(\u0026#34;Hello World\u0026#34;); })); } @GetMapping(\u0026#34;/login/userFunctions\u0026#34;) public Mono\u0026lt;Response\u0026lt;FunctionInfoResult\u0026gt;\u0026gt; functionInfo(ServerWebExchange exchange, @RequestParam String userId) { return ReactiveSecurityContextHolder.getContext() .flatMap(context -\u0026gt; { return exchange.getSession().map(session -\u0026gt; WebFluxSkyWalkingOperators.continueTracing(exchange, () -\u0026gt; handle(session, userId))); }); } private Response\u0026lt;FunctionInfoResult\u0026gt; handle(WebSession session, String userId) { //...dubbo rpc  } Mono.just(\u0026#34;key\u0026#34;).subscribeOn(Schedulers.boundedElastic()) .doOnEach(WebFluxSkyWalkingOperators.continueTracing(SignalType.ON_NEXT, () -\u0026gt; log.info(\u0026#34;test log with tid\u0026#34;))) .flatMap(key -\u0026gt; Mono.deferContextual(ctx -\u0026gt; WebFluxSkyWalkingOperators.continueTracing(Context.of(ctx), () -\u0026gt; { redis.hasKey(key); return Mono.just(\u0026#34;SUCCESS\u0026#34;); }) )); ... Fetch trace context relative IDs @Override public Mono\u0026lt;Void\u0026gt; filter(ServerWebExchange exchange, GatewayFilterChain chain){ // fetch trace ID  String traceId = WebFluxSkyWalkingTraceContext.traceId(exchange); // fetch segment ID  String segmentId = WebFluxSkyWalkingTraceContext.segmentId(exchange); // fetch span ID  int spanId = WebFluxSkyWalkingTraceContext.spanId(exchange); return chain.filter(exchange); } Manipulate Correlation Context @Override public Mono\u0026lt;Void\u0026gt; filter(ServerWebExchange exchange, GatewayFilterChain chain){ // Set correlation data can be retrieved by upstream nodes.  WebFluxSkyWalkingTraceContext.putCorrelation(exchange, \u0026#34;key1\u0026#34;, \u0026#34;value\u0026#34;); // Get correlation data  Optional\u0026lt;String\u0026gt; value2 = WebFluxSkyWalkingTraceContext.getCorrelation(exchange, \u0026#34;key2\u0026#34;); // dosomething...  return chain.filter(exchange); } Sample codes only\n","excerpt":"Webflux Tracing Assistant APIs These APIs provide advanced features to enhance interaction …","ref":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/application-toolkit-webflux/","title":"Webflux Tracing Assistant APIs"},{"body":"Webflux Tracing Assistant APIs These APIs provide advanced features to enhance interaction capabilities in Webflux cases.\nAdd the toolkit to your project dependency, through Maven or Gradle\n\u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-webflux\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; The following scenarios are supported for tracing assistance.\nContinue Tracing from Client The WebFluxSkyWalkingOperators#continueTracing provides manual tracing continuous capabilities to adopt native Webflux APIs\n@GetMapping(\u0026#34;/testcase/annotation/mono/onnext\u0026#34;) public Mono\u0026lt;String\u0026gt; monoOnNext(@RequestBody(required = false) String body) { return Mono.subscriberContext() .flatMap(ctx -\u0026gt; WebFluxSkyWalkingOperators.continueTracing(ctx, () -\u0026gt; { visit(\u0026#34;http://localhost:\u0026#34; + serverPort + \u0026#34;/testcase/success\u0026#34;); return Mono.just(\u0026#34;Hello World\u0026#34;); })); } @GetMapping(\u0026#34;/login/userFunctions\u0026#34;) public Mono\u0026lt;Response\u0026lt;FunctionInfoResult\u0026gt;\u0026gt; functionInfo(ServerWebExchange exchange, @RequestParam String userId) { return ReactiveSecurityContextHolder.getContext() .flatMap(context -\u0026gt; { return exchange.getSession().map(session -\u0026gt; WebFluxSkyWalkingOperators.continueTracing(exchange, () -\u0026gt; handle(session, userId))); }); } private Response\u0026lt;FunctionInfoResult\u0026gt; handle(WebSession session, String userId) { //...dubbo rpc  } Mono.just(\u0026#34;key\u0026#34;).subscribeOn(Schedulers.boundedElastic()) .doOnEach(WebFluxSkyWalkingOperators.continueTracing(SignalType.ON_NEXT, () -\u0026gt; log.info(\u0026#34;test log with tid\u0026#34;))) .flatMap(key -\u0026gt; Mono.deferContextual(ctx -\u0026gt; WebFluxSkyWalkingOperators.continueTracing(Context.of(ctx), () -\u0026gt; { redis.hasKey(key); return Mono.just(\u0026#34;SUCCESS\u0026#34;); }) )); ... Fetch trace context relative IDs @Override public Mono\u0026lt;Void\u0026gt; filter(ServerWebExchange exchange, GatewayFilterChain chain){ // fetch trace ID  String traceId = WebFluxSkyWalkingTraceContext.traceId(exchange); // fetch segment ID  String segmentId = WebFluxSkyWalkingTraceContext.segmentId(exchange); // fetch span ID  int spanId = WebFluxSkyWalkingTraceContext.spanId(exchange); return chain.filter(exchange); } Manipulate Correlation Context @Override public Mono\u0026lt;Void\u0026gt; filter(ServerWebExchange exchange, GatewayFilterChain chain){ // Set correlation data can be retrieved by upstream nodes.  WebFluxSkyWalkingTraceContext.putCorrelation(exchange, \u0026#34;key1\u0026#34;, \u0026#34;value\u0026#34;); // Get correlation data  Optional\u0026lt;String\u0026gt; value2 = WebFluxSkyWalkingTraceContext.getCorrelation(exchange, \u0026#34;key2\u0026#34;); // dosomething...  return chain.filter(exchange); } Sample codes only\n","excerpt":"Webflux Tracing Assistant APIs These APIs provide advanced features to enhance interaction …","ref":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-webflux/","title":"Webflux Tracing Assistant APIs"},{"body":"Webflux Tracing Assistant APIs These APIs provide advanced features to enhance interaction capabilities in Webflux cases.\nAdd the toolkit to your project dependency, through Maven or Gradle\n\u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-webflux\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; The following scenarios are supported for tracing assistance.\nContinue Tracing from Client The WebFluxSkyWalkingOperators#continueTracing provides manual tracing continuous capabilities to adopt native Webflux APIs\n@GetMapping(\u0026#34;/testcase/annotation/mono/onnext\u0026#34;) public Mono\u0026lt;String\u0026gt; monoOnNext(@RequestBody(required = false) String body) { return Mono.subscriberContext() .flatMap(ctx -\u0026gt; WebFluxSkyWalkingOperators.continueTracing(ctx, () -\u0026gt; { visit(\u0026#34;http://localhost:\u0026#34; + serverPort + \u0026#34;/testcase/success\u0026#34;); return Mono.just(\u0026#34;Hello World\u0026#34;); })); } @GetMapping(\u0026#34;/login/userFunctions\u0026#34;) public Mono\u0026lt;Response\u0026lt;FunctionInfoResult\u0026gt;\u0026gt; functionInfo(ServerWebExchange exchange, @RequestParam String userId) { return ReactiveSecurityContextHolder.getContext() .flatMap(context -\u0026gt; { return exchange.getSession().map(session -\u0026gt; WebFluxSkyWalkingOperators.continueTracing(exchange, () -\u0026gt; handle(session, userId))); }); } private Response\u0026lt;FunctionInfoResult\u0026gt; handle(WebSession session, String userId) { //...dubbo rpc  } Mono.just(\u0026#34;key\u0026#34;).subscribeOn(Schedulers.boundedElastic()) .doOnEach(WebFluxSkyWalkingOperators.continueTracing(SignalType.ON_NEXT, () -\u0026gt; log.info(\u0026#34;test log with tid\u0026#34;))) .flatMap(key -\u0026gt; Mono.deferContextual(ctx -\u0026gt; WebFluxSkyWalkingOperators.continueTracing(Context.of(ctx), () -\u0026gt; { redis.hasKey(key); return Mono.just(\u0026#34;SUCCESS\u0026#34;); }) )); ... Fetch trace context relative IDs @Override public Mono\u0026lt;Void\u0026gt; filter(ServerWebExchange exchange, GatewayFilterChain chain){ // fetch trace ID  String traceId = WebFluxSkyWalkingTraceContext.traceId(exchange); // fetch segment ID  String segmentId = WebFluxSkyWalkingTraceContext.segmentId(exchange); // fetch span ID  int spanId = WebFluxSkyWalkingTraceContext.spanId(exchange); return chain.filter(exchange); } Manipulate Correlation Context @Override public Mono\u0026lt;Void\u0026gt; filter(ServerWebExchange exchange, GatewayFilterChain chain){ // Set correlation data can be retrieved by upstream nodes.  WebFluxSkyWalkingTraceContext.putCorrelation(exchange, \u0026#34;key1\u0026#34;, \u0026#34;value\u0026#34;); // Get correlation data  Optional\u0026lt;String\u0026gt; value2 = WebFluxSkyWalkingTraceContext.getCorrelation(exchange, \u0026#34;key2\u0026#34;); // dosomething...  return chain.filter(exchange); } Sample codes only\n","excerpt":"Webflux Tracing Assistant APIs These APIs provide advanced features to enhance interaction …","ref":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/application-toolkit-webflux/","title":"Webflux Tracing Assistant APIs"},{"body":"Webflux Tracing Assistant APIs These APIs provide advanced features to enhance interaction capabilities in Webflux cases.\nAdd the toolkit to your project dependency, through Maven or Gradle\n\u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-webflux\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; The following scenarios are supported for tracing assistance.\nContinue Tracing from Client The WebFluxSkyWalkingOperators#continueTracing provides manual tracing continuous capabilities to adopt native Webflux APIs\n@GetMapping(\u0026#34;/testcase/annotation/mono/onnext\u0026#34;) public Mono\u0026lt;String\u0026gt; monoOnNext(@RequestBody(required = false) String body) { return Mono.subscriberContext() .flatMap(ctx -\u0026gt; WebFluxSkyWalkingOperators.continueTracing(ctx, () -\u0026gt; { visit(\u0026#34;http://localhost:\u0026#34; + serverPort + \u0026#34;/testcase/success\u0026#34;); return Mono.just(\u0026#34;Hello World\u0026#34;); })); } @GetMapping(\u0026#34;/login/userFunctions\u0026#34;) public Mono\u0026lt;Response\u0026lt;FunctionInfoResult\u0026gt;\u0026gt; functionInfo(ServerWebExchange exchange, @RequestParam String userId) { return ReactiveSecurityContextHolder.getContext() .flatMap(context -\u0026gt; { return exchange.getSession().map(session -\u0026gt; WebFluxSkyWalkingOperators.continueTracing(exchange, () -\u0026gt; handle(session, userId))); }); } private Response\u0026lt;FunctionInfoResult\u0026gt; handle(WebSession session, String userId) { //...dubbo rpc  } Mono.just(\u0026#34;key\u0026#34;).subscribeOn(Schedulers.boundedElastic()) .doOnEach(WebFluxSkyWalkingOperators.continueTracing(SignalType.ON_NEXT, () -\u0026gt; log.info(\u0026#34;test log with tid\u0026#34;))) .flatMap(key -\u0026gt; Mono.deferContextual(ctx -\u0026gt; WebFluxSkyWalkingOperators.continueTracing(Context.of(ctx), () -\u0026gt; { redis.hasKey(key); return Mono.just(\u0026#34;SUCCESS\u0026#34;); }) )); ... Fetch trace context relative IDs @Override public Mono\u0026lt;Void\u0026gt; filter(ServerWebExchange exchange, GatewayFilterChain chain){ // fetch trace ID  String traceId = WebFluxSkyWalkingTraceContext.traceId(exchange); // fetch segment ID  String segmentId = WebFluxSkyWalkingTraceContext.segmentId(exchange); // fetch span ID  int spanId = WebFluxSkyWalkingTraceContext.spanId(exchange); return chain.filter(exchange); } Manipulate Correlation Context @Override public Mono\u0026lt;Void\u0026gt; filter(ServerWebExchange exchange, GatewayFilterChain chain){ // Set correlation data can be retrieved by upstream nodes.  WebFluxSkyWalkingTraceContext.putCorrelation(exchange, \u0026#34;key1\u0026#34;, \u0026#34;value\u0026#34;); // Get correlation data  Optional\u0026lt;String\u0026gt; value2 = WebFluxSkyWalkingTraceContext.getCorrelation(exchange, \u0026#34;key2\u0026#34;); // dosomething...  return chain.filter(exchange); } Sample codes only\n","excerpt":"Webflux Tracing Assistant APIs These APIs provide advanced features to enhance interaction …","ref":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/application-toolkit-webflux/","title":"Webflux Tracing Assistant APIs"},{"body":"Webflux Tracing Assistant APIs These APIs provide advanced features to enhance interaction capabilities in Webflux cases.\nAdd the toolkit to your project dependency, through Maven or Gradle\n\u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-webflux\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; The following scenarios are supported for tracing assistance.\nContinue Tracing from Client The WebFluxSkyWalkingOperators#continueTracing provides manual tracing continuous capabilities to adopt native Webflux APIs\n@GetMapping(\u0026#34;/testcase/annotation/mono/onnext\u0026#34;) public Mono\u0026lt;String\u0026gt; monoOnNext(@RequestBody(required = false) String body) { return Mono.subscriberContext() .flatMap(ctx -\u0026gt; WebFluxSkyWalkingOperators.continueTracing(ctx, () -\u0026gt; { visit(\u0026#34;http://localhost:\u0026#34; + serverPort + \u0026#34;/testcase/success\u0026#34;); return Mono.just(\u0026#34;Hello World\u0026#34;); })); } @GetMapping(\u0026#34;/login/userFunctions\u0026#34;) public Mono\u0026lt;Response\u0026lt;FunctionInfoResult\u0026gt;\u0026gt; functionInfo(ServerWebExchange exchange, @RequestParam String userId) { return ReactiveSecurityContextHolder.getContext() .flatMap(context -\u0026gt; { return exchange.getSession().map(session -\u0026gt; WebFluxSkyWalkingOperators.continueTracing(exchange, () -\u0026gt; handle(session, userId))); }); } private Response\u0026lt;FunctionInfoResult\u0026gt; handle(WebSession session, String userId) { //...dubbo rpc  } Mono.just(\u0026#34;key\u0026#34;).subscribeOn(Schedulers.boundedElastic()) .doOnEach(WebFluxSkyWalkingOperators.continueTracing(SignalType.ON_NEXT, () -\u0026gt; log.info(\u0026#34;test log with tid\u0026#34;))) .flatMap(key -\u0026gt; Mono.deferContextual(ctx -\u0026gt; WebFluxSkyWalkingOperators.continueTracing(Context.of(ctx), () -\u0026gt; { redis.hasKey(key); return Mono.just(\u0026#34;SUCCESS\u0026#34;); }) )); ... Fetch trace context relative IDs @Override public Mono\u0026lt;Void\u0026gt; filter(ServerWebExchange exchange, GatewayFilterChain chain){ // fetch trace ID  String traceId = WebFluxSkyWalkingTraceContext.traceId(exchange); // fetch segment ID  String segmentId = WebFluxSkyWalkingTraceContext.segmentId(exchange); // fetch span ID  int spanId = WebFluxSkyWalkingTraceContext.spanId(exchange); return chain.filter(exchange); } Manipulate Correlation Context @Override public Mono\u0026lt;Void\u0026gt; filter(ServerWebExchange exchange, GatewayFilterChain chain){ // Set correlation data can be retrieved by upstream nodes.  WebFluxSkyWalkingTraceContext.putCorrelation(exchange, \u0026#34;key1\u0026#34;, \u0026#34;value\u0026#34;); // Get correlation data  Optional\u0026lt;String\u0026gt; value2 = WebFluxSkyWalkingTraceContext.getCorrelation(exchange, \u0026#34;key2\u0026#34;); // dosomething...  return chain.filter(exchange); } Sample codes only\n","excerpt":"Webflux Tracing Assistant APIs These APIs provide advanced features to enhance interaction …","ref":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/application-toolkit-webflux/","title":"Webflux Tracing Assistant APIs"},{"body":"Welcome This is the official documentation of SkyWalking 9. Welcome to the SkyWalking community!\nHere you can learn all you need to know about SkyWalking’s architecture, understand how to deploy and use SkyWalking, and contribute to the project based on SkyWalking\u0026rsquo;s contributing guidelines.\n  Concepts and Designs. You\u0026rsquo;ll find the core logic behind SkyWalking. You may start from here if you want to understand what is going on under our cool features and visualization.\n  Setup. A guide to installing SkyWalking for different use cases. It is an observability platform that supports multiple observability modes.\n  Contributing Guides. If you are a PMC member, a committer, or a new contributor, learn how to start contributing with these guides!\n  Protocols. The protocols show how agents/probes and the backend communicate with one another. Anyone interested in uplink telemetry data should definitely read this.\n  FAQs. A manifest of known issues with setup and secondary developments processes. Should you encounter any problems, check here first.\n  You might also find these links interesting:\n  The latest and old releases are all available at Apache SkyWalking release page. The change logs can be found here.\n  SkyWalking WIKI hosts the context of some changes and events.\n  You can find the conference schedules, video recordings, and articles about SkyWalking in the community resource catalog.\n  We\u0026rsquo;re always looking for help to improve our documentation and codes, so please don’t hesitate to file an issue if you see any problems. Or better yet, directly contribute by submitting a pull request to help us get better!\n","excerpt":"Welcome This is the official documentation of SkyWalking 9. Welcome to the SkyWalking community! …","ref":"/docs/main/latest/readme/","title":"Welcome"},{"body":"Welcome This is the official documentation of SkyWalking 10. Welcome to the SkyWalking community!\nHere you can learn all you need to know about SkyWalking\u0026rsquo;s architecture, understand how to deploy and use SkyWalking, and contribute to the project based on SkyWalking\u0026rsquo;s contributing guidelines.\n  Concepts and Designs. You\u0026rsquo;ll find the core logic behind SkyWalking. You may start from here if you want to understand what is going on under our cool features and visualization.\n  Setup. A guide to install SkyWalking for different use cases. It is an observability platform that supports multiple observability modes.\n  Contributing Guides. If you are a PMC member, a committer, or a new contributor, learn how to start contributing with these guides!\n  Protocols. The protocols show how agents/probes and the backend communicate with one another. Anyone interested in uplinking telemetry data should definitely read this.\n  FAQs. A manifest of known issues with setup and secondary developments processes. Should you encounter any problems, check here first.\n  You might also find these links interesting:\n  The latest and old releases are all available at Apache SkyWalking release page. The change logs can be found here.\n  SkyWalking WIKI hosts the context of some changes and events.\n  You can find the conference schedules, video recordings, and articles about SkyWalking in the community resource catalog.\n  We\u0026rsquo;re always looking for help to improve our documentation and codes, so please don’t hesitate to file an issue if you see any problems. Or better yet, directly contribute by submitting a pull request to help us get better!\n","excerpt":"Welcome This is the official documentation of SkyWalking 10. Welcome to the SkyWalking community! …","ref":"/docs/main/next/readme/","title":"Welcome"},{"body":"Welcome This is the official documentation of SkyWalking 9. Welcome to the SkyWalking community!\nHere you can learn all you need to know about SkyWalking’s architecture, understand how to deploy and use SkyWalking, and contribute to the project based on SkyWalking\u0026rsquo;s contributing guidelines.\n  Concepts and Designs. You\u0026rsquo;ll find the core logic behind SkyWalking. You may start from here if you want to understand what is going on under our cool features and visualization.\n  Setup. A guide to installing SkyWalking for different use cases. It is an observability platform that supports multiple observability modes.\n  Contributing Guides. If you are a PMC member, a committer, or a new contributor, learn how to start contributing with these guides!\n  Protocols. The protocols show how agents/probes and the backend communicate with one another. Anyone interested in uplink telemetry data should definitely read this.\n  FAQs. A manifest of known issues with setup and secondary developments processes. Should you encounter any problems, check here first.\n  You might also find these links interesting:\n  The latest and old releases are all available at Apache SkyWalking release page. The change logs can be found here.\n  SkyWalking WIKI hosts the context of some changes and events.\n  You can find the conference schedules, video recordings, and articles about SkyWalking in the community resource catalog.\n  We\u0026rsquo;re always looking for help to improve our documentation and codes, so please don’t hesitate to file an issue if you see any problems. Or better yet, directly contribute by submitting a pull request to help us get better!\n","excerpt":"Welcome This is the official documentation of SkyWalking 9. Welcome to the SkyWalking community! …","ref":"/docs/main/v9.0.0/readme/","title":"Welcome"},{"body":"Welcome This is the official documentation of SkyWalking 9. Welcome to the SkyWalking community!\nHere you can learn all you need to know about SkyWalking’s architecture, understand how to deploy and use SkyWalking, and contribute to the project based on SkyWalking\u0026rsquo;s contributing guidelines.\n  Concepts and Designs. You\u0026rsquo;ll find the core logic behind SkyWalking. You may start from here if you want to understand what is going on under our cool features and visualization.\n  Setup. A guide to installing SkyWalking for different use cases. It is an observability platform that supports multiple observability modes.\n  Contributing Guides. If you are a PMC member, a committer, or a new contributor, learn how to start contributing with these guides!\n  Protocols. The protocols show how agents/probes and the backend communicate with one another. Anyone interested in uplink telemetry data should definitely read this.\n  FAQs. A manifest of known issues with setup and secondary developments processes. Should you encounter any problems, check here first.\n  You might also find these links interesting:\n  The latest and old releases are all available at Apache SkyWalking release page. The change logs can be found here.\n  SkyWalking WIKI hosts the context of some changes and events.\n  You can find the conference schedules, video recordings, and articles about SkyWalking in the community resource catalog.\n  We\u0026rsquo;re always looking for help to improve our documentation and codes, so please don’t hesitate to file an issue if you see any problems. Or better yet, directly contribute by submitting a pull request to help us get better!\n","excerpt":"Welcome This is the official documentation of SkyWalking 9. Welcome to the SkyWalking community! …","ref":"/docs/main/v9.1.0/readme/","title":"Welcome"},{"body":"Welcome This is the official documentation of SkyWalking 9. Welcome to the SkyWalking community!\nHere you can learn all you need to know about SkyWalking’s architecture, understand how to deploy and use SkyWalking, and contribute to the project based on SkyWalking\u0026rsquo;s contributing guidelines.\n  Concepts and Designs. You\u0026rsquo;ll find the core logic behind SkyWalking. You may start from here if you want to understand what is going on under our cool features and visualization.\n  Setup. A guide to installing SkyWalking for different use cases. It is an observability platform that supports multiple observability modes.\n  Contributing Guides. If you are a PMC member, a committer, or a new contributor, learn how to start contributing with these guides!\n  Protocols. The protocols show how agents/probes and the backend communicate with one another. Anyone interested in uplink telemetry data should definitely read this.\n  FAQs. A manifest of known issues with setup and secondary developments processes. Should you encounter any problems, check here first.\n  You might also find these links interesting:\n  The latest and old releases are all available at Apache SkyWalking release page. The change logs can be found here.\n  SkyWalking WIKI hosts the context of some changes and events.\n  You can find the conference schedules, video recordings, and articles about SkyWalking in the community resource catalog.\n  We\u0026rsquo;re always looking for help to improve our documentation and codes, so please don’t hesitate to file an issue if you see any problems. Or better yet, directly contribute by submitting a pull request to help us get better!\n","excerpt":"Welcome This is the official documentation of SkyWalking 9. Welcome to the SkyWalking community! …","ref":"/docs/main/v9.2.0/readme/","title":"Welcome"},{"body":"Welcome This is the official documentation of SkyWalking 9. Welcome to the SkyWalking community!\nHere you can learn all you need to know about SkyWalking’s architecture, understand how to deploy and use SkyWalking, and contribute to the project based on SkyWalking\u0026rsquo;s contributing guidelines.\n  Concepts and Designs. You\u0026rsquo;ll find the core logic behind SkyWalking. You may start from here if you want to understand what is going on under our cool features and visualization.\n  Setup. A guide to installing SkyWalking for different use cases. It is an observability platform that supports multiple observability modes.\n  Contributing Guides. If you are a PMC member, a committer, or a new contributor, learn how to start contributing with these guides!\n  Protocols. The protocols show how agents/probes and the backend communicate with one another. Anyone interested in uplink telemetry data should definitely read this.\n  FAQs. A manifest of known issues with setup and secondary developments processes. Should you encounter any problems, check here first.\n  You might also find these links interesting:\n  The latest and old releases are all available at Apache SkyWalking release page. The change logs can be found here.\n  SkyWalking WIKI hosts the context of some changes and events.\n  You can find the conference schedules, video recordings, and articles about SkyWalking in the community resource catalog.\n  We\u0026rsquo;re always looking for help to improve our documentation and codes, so please don’t hesitate to file an issue if you see any problems. Or better yet, directly contribute by submitting a pull request to help us get better!\n","excerpt":"Welcome This is the official documentation of SkyWalking 9. Welcome to the SkyWalking community! …","ref":"/docs/main/v9.3.0/readme/","title":"Welcome"},{"body":"Welcome This is the official documentation of SkyWalking 9. Welcome to the SkyWalking community!\nHere you can learn all you need to know about SkyWalking’s architecture, understand how to deploy and use SkyWalking, and contribute to the project based on SkyWalking\u0026rsquo;s contributing guidelines.\n  Concepts and Designs. You\u0026rsquo;ll find the core logic behind SkyWalking. You may start from here if you want to understand what is going on under our cool features and visualization.\n  Setup. A guide to installing SkyWalking for different use cases. It is an observability platform that supports multiple observability modes.\n  Contributing Guides. If you are a PMC member, a committer, or a new contributor, learn how to start contributing with these guides!\n  Protocols. The protocols show how agents/probes and the backend communicate with one another. Anyone interested in uplink telemetry data should definitely read this.\n  FAQs. A manifest of known issues with setup and secondary developments processes. Should you encounter any problems, check here first.\n  You might also find these links interesting:\n  The latest and old releases are all available at Apache SkyWalking release page. The change logs can be found here.\n  SkyWalking WIKI hosts the context of some changes and events.\n  You can find the conference schedules, video recordings, and articles about SkyWalking in the community resource catalog.\n  We\u0026rsquo;re always looking for help to improve our documentation and codes, so please don’t hesitate to file an issue if you see any problems. Or better yet, directly contribute by submitting a pull request to help us get better!\n","excerpt":"Welcome This is the official documentation of SkyWalking 9. Welcome to the SkyWalking community! …","ref":"/docs/main/v9.4.0/readme/","title":"Welcome"},{"body":"Welcome This is the official documentation of SkyWalking 9. Welcome to the SkyWalking community!\nHere you can learn all you need to know about SkyWalking’s architecture, understand how to deploy and use SkyWalking, and contribute to the project based on SkyWalking\u0026rsquo;s contributing guidelines.\n  Concepts and Designs. You\u0026rsquo;ll find the core logic behind SkyWalking. You may start from here if you want to understand what is going on under our cool features and visualization.\n  Setup. A guide to installing SkyWalking for different use cases. It is an observability platform that supports multiple observability modes.\n  Contributing Guides. If you are a PMC member, a committer, or a new contributor, learn how to start contributing with these guides!\n  Protocols. The protocols show how agents/probes and the backend communicate with one another. Anyone interested in uplink telemetry data should definitely read this.\n  FAQs. A manifest of known issues with setup and secondary developments processes. Should you encounter any problems, check here first.\n  You might also find these links interesting:\n  The latest and old releases are all available at Apache SkyWalking release page. The change logs can be found here.\n  SkyWalking WIKI hosts the context of some changes and events.\n  You can find the conference schedules, video recordings, and articles about SkyWalking in the community resource catalog.\n  We\u0026rsquo;re always looking for help to improve our documentation and codes, so please don’t hesitate to file an issue if you see any problems. Or better yet, directly contribute by submitting a pull request to help us get better!\n","excerpt":"Welcome This is the official documentation of SkyWalking 9. Welcome to the SkyWalking community! …","ref":"/docs/main/v9.5.0/readme/","title":"Welcome"},{"body":"Welcome This is the official documentation of SkyWalking 9. Welcome to the SkyWalking community!\nHere you can learn all you need to know about SkyWalking’s architecture, understand how to deploy and use SkyWalking, and contribute to the project based on SkyWalking\u0026rsquo;s contributing guidelines.\n  Concepts and Designs. You\u0026rsquo;ll find the core logic behind SkyWalking. You may start from here if you want to understand what is going on under our cool features and visualization.\n  Setup. A guide to installing SkyWalking for different use cases. It is an observability platform that supports multiple observability modes.\n  Contributing Guides. If you are a PMC member, a committer, or a new contributor, learn how to start contributing with these guides!\n  Protocols. The protocols show how agents/probes and the backend communicate with one another. Anyone interested in uplink telemetry data should definitely read this.\n  FAQs. A manifest of known issues with setup and secondary developments processes. Should you encounter any problems, check here first.\n  You might also find these links interesting:\n  The latest and old releases are all available at Apache SkyWalking release page. The change logs can be found here.\n  SkyWalking WIKI hosts the context of some changes and events.\n  You can find the conference schedules, video recordings, and articles about SkyWalking in the community resource catalog.\n  We\u0026rsquo;re always looking for help to improve our documentation and codes, so please don’t hesitate to file an issue if you see any problems. Or better yet, directly contribute by submitting a pull request to help us get better!\n","excerpt":"Welcome This is the official documentation of SkyWalking 9. Welcome to the SkyWalking community! …","ref":"/docs/main/v9.6.0/readme/","title":"Welcome"},{"body":"Welcome This is the official documentation of SkyWalking 9. Welcome to the SkyWalking community!\nHere you can learn all you need to know about SkyWalking’s architecture, understand how to deploy and use SkyWalking, and contribute to the project based on SkyWalking\u0026rsquo;s contributing guidelines.\n  Concepts and Designs. You\u0026rsquo;ll find the core logic behind SkyWalking. You may start from here if you want to understand what is going on under our cool features and visualization.\n  Setup. A guide to installing SkyWalking for different use cases. It is an observability platform that supports multiple observability modes.\n  Contributing Guides. If you are a PMC member, a committer, or a new contributor, learn how to start contributing with these guides!\n  Protocols. The protocols show how agents/probes and the backend communicate with one another. Anyone interested in uplink telemetry data should definitely read this.\n  FAQs. A manifest of known issues with setup and secondary developments processes. Should you encounter any problems, check here first.\n  You might also find these links interesting:\n  The latest and old releases are all available at Apache SkyWalking release page. The change logs can be found here.\n  SkyWalking WIKI hosts the context of some changes and events.\n  You can find the conference schedules, video recordings, and articles about SkyWalking in the community resource catalog.\n  We\u0026rsquo;re always looking for help to improve our documentation and codes, so please don’t hesitate to file an issue if you see any problems. Or better yet, directly contribute by submitting a pull request to help us get better!\n","excerpt":"Welcome This is the official documentation of SkyWalking 9. Welcome to the SkyWalking community! …","ref":"/docs/main/v9.7.0/readme/","title":"Welcome"},{"body":"Welcome Welcome to the BanyanDB Here you can learn all you need to know about BanyanDB.\n Installation. Instruments about how to download and onboard BanyanDB server, Banyand. Clients. Some native clients to access Banyand. Observability. Learn how to effectively monitor, diagnose and optimize Banyand. Concept. Learn the concepts of Banyand. Includes the architecture, data model, and so on. CRUD Operations. To create, read, update, and delete data points or entities on resources in the schema.  You might also find these links interesting:\n  The latest and old releases are all available at Apache SkyWalking release page. The change logs can be found here.\n  SkyWalking WIKI hosts the context of some changes and events.\n  You can find the conference schedules, video recordings, and articles about SkyWalking in the community resource catalog.\n  We\u0026rsquo;re always looking for help to improve our documentation and codes, so please don’t hesitate to file an issue if you see any problems. Or better yet, directly contribute by submitting a pull request to help us get better!\n","excerpt":"Welcome Welcome to the BanyanDB Here you can learn all you need to know about BanyanDB. …","ref":"/docs/skywalking-banyandb/latest/readme/","title":"Welcome"},{"body":"Welcome Welcome to the BanyanDB Here you can learn all you need to know about BanyanDB.\n Installation. Instruments about how to download and onboard BanyanDB server, Banyand. Clients. Some native clients to access Banyand. Observability. Learn how to effectively monitor, diagnose and optimize Banyand. Concept. Learn the concepts of Banyand. Includes the architecture, data model, and so on. CRUD Operations. To create, read, update, and delete data points or entities on resources in the schema.  You might also find these links interesting:\n  The latest and old releases are all available at Apache SkyWalking release page. The change logs can be found here.\n  SkyWalking WIKI hosts the context of some changes and events.\n  You can find the conference schedules, video recordings, and articles about SkyWalking in the community resource catalog.\n  We\u0026rsquo;re always looking for help to improve our documentation and codes, so please don’t hesitate to file an issue if you see any problems. Or better yet, directly contribute by submitting a pull request to help us get better!\n","excerpt":"Welcome Welcome to the BanyanDB Here you can learn all you need to know about BanyanDB. …","ref":"/docs/skywalking-banyandb/next/readme/","title":"Welcome"},{"body":"Welcome Welcome to the BanyanDB Here you can learn all you need to know about BanyanDB.\n Installation. Instruments about how to download and onboard BanyanDB server, Banyand. Clients. Some native clients to access Banyand. Observability. Learn how to effectively monitor, diagnose and optimize Banyand. Concept. Learn the concepts of Banyand. Includes the architecture, data model, and so on. CRUD Operations. To create, read, update, and delete data points or entities on resources in the schema.  You might also find these links interesting:\n  The latest and old releases are all available at Apache SkyWalking release page. The change logs can be found here.\n  SkyWalking WIKI hosts the context of some changes and events.\n  You can find the conference schedules, video recordings, and articles about SkyWalking in the community resource catalog.\n  We\u0026rsquo;re always looking for help to improve our documentation and codes, so please don’t hesitate to file an issue if you see any problems. Or better yet, directly contribute by submitting a pull request to help us get better!\n","excerpt":"Welcome Welcome to the BanyanDB Here you can learn all you need to know about BanyanDB. …","ref":"/docs/skywalking-banyandb/v0.5.0/readme/","title":"Welcome"},{"body":"Welcome Here are SkyWalking Infra E2E official documentations. Welcome to use it.\nSkyWalking Infra E2E is an End-to-End Testing framework that aims to help developers to set up, debug, and verify E2E tests with ease. It’s built based on the lessons learned from tens of hundreds of test cases in the SkyWalking main repo.\nFrom here you can learn all about SkyWalking Infra E2E\u0026rsquo;s architecture, how to set up E2E testing.\n  Concepts and Designs. The most important core ideas about SkyWalking Infra E2E. You can learn from here if you want to understand what is going on under our cool features.\n  Setup. Introduce how to set up and running E2E testing.\n  Contribution. Introduce how to contribute SkyWalking Infra E2E.\n  We\u0026rsquo;re always looking for help improve our documentation and codes, so please don’t hesitate to file an issue if you see any problem. Or better yet, submit your contributions through the pull request to help make them better.\n","excerpt":"Welcome Here are SkyWalking Infra E2E official documentations. Welcome to use it.\nSkyWalking Infra …","ref":"/docs/skywalking-infra-e2e/latest/readme/","title":"Welcome"},{"body":"Welcome Here are SkyWalking Infra E2E official documentations. Welcome to use it.\nSkyWalking Infra E2E is an End-to-End Testing framework that aims to help developers to set up, debug, and verify E2E tests with ease. It’s built based on the lessons learned from tens of hundreds of test cases in the SkyWalking main repo.\nFrom here you can learn all about SkyWalking Infra E2E\u0026rsquo;s architecture, how to set up E2E testing.\n  Concepts and Designs. The most important core ideas about SkyWalking Infra E2E. You can learn from here if you want to understand what is going on under our cool features.\n  Setup. Introduce how to set up and running E2E testing.\n  Contribution. Introduce how to contribute SkyWalking Infra E2E.\n  We\u0026rsquo;re always looking for help improve our documentation and codes, so please don’t hesitate to file an issue if you see any problem. Or better yet, submit your contributions through the pull request to help make them better.\n","excerpt":"Welcome Here are SkyWalking Infra E2E official documentations. Welcome to use it.\nSkyWalking Infra …","ref":"/docs/skywalking-infra-e2e/next/readme/","title":"Welcome"},{"body":"Welcome Here are SkyWalking Infra E2E official documentations. Welcome to use it.\nSkyWalking Infra E2E is an End-to-End Testing framework that aims to help developers to set up, debug, and verify E2E tests with ease. It’s built based on the lessons learned from tens of hundreds of test cases in the SkyWalking main repo.\nFrom here you can learn all about SkyWalking Infra E2E\u0026rsquo;s architecture, how to set up E2E testing.\n  Concepts and Designs. The most important core ideas about SkyWalking Infra E2E. You can learn from here if you want to understand what is going on under our cool features.\n  Setup. Introduce how to set up and running E2E testing.\n  Contribution. Introduce how to contribute SkyWalking Infra E2E.\n  We\u0026rsquo;re always looking for help improve our documentation and codes, so please don’t hesitate to file an issue if you see any problem. Or better yet, submit your contributions through the pull request to help make them better.\n","excerpt":"Welcome Here are SkyWalking Infra E2E official documentations. Welcome to use it.\nSkyWalking Infra …","ref":"/docs/skywalking-infra-e2e/v1.3.0/readme/","title":"Welcome"},{"body":"Welcome Here are SkyWalking Rover official documentation. You\u0026rsquo;re welcome to join us.\nFrom here you can learn all about SkyWalking Rover\u0026rsquo;s architecture, and how to deploy and use SkyWalking Rover.\n  Concepts and Designs. The most important core ideas about SkyWalking Rover. You can learn from here if you want to understand what is going on under our cool features.\n  Setup. Introduce how to set up the SkyWalking Rover.\n  Guides. Guide users to develop or debug SkyWalking Rover.\n  We\u0026rsquo;re always looking for help to improve our documentation and codes, so please don’t hesitate to file an issue if you see any problem. Or better yet, submit your contributions through a pull request to help make them better.\n","excerpt":"Welcome Here are SkyWalking Rover official documentation. You\u0026rsquo;re welcome to join us.\nFrom here …","ref":"/docs/skywalking-rover/latest/readme/","title":"Welcome"},{"body":"Welcome Here are SkyWalking Rover official documentation. You\u0026rsquo;re welcome to join us.\nFrom here you can learn all about SkyWalking Rover\u0026rsquo;s architecture, and how to deploy and use SkyWalking Rover.\n  Concepts and Designs. The most important core ideas about SkyWalking Rover. You can learn from here if you want to understand what is going on under our cool features.\n  Setup. Introduce how to set up the SkyWalking Rover.\n  Guides. Guide users to develop or debug SkyWalking Rover.\n  We\u0026rsquo;re always looking for help to improve our documentation and codes, so please don’t hesitate to file an issue if you see any problem. Or better yet, submit your contributions through a pull request to help make them better.\n","excerpt":"Welcome Here are SkyWalking Rover official documentation. You\u0026rsquo;re welcome to join us.\nFrom here …","ref":"/docs/skywalking-rover/next/readme/","title":"Welcome"},{"body":"Welcome Here are SkyWalking Rover official documentation. You\u0026rsquo;re welcome to join us.\nFrom here you can learn all about SkyWalking Rover\u0026rsquo;s architecture, and how to deploy and use SkyWalking Rover.\n  Concepts and Designs. The most important core ideas about SkyWalking Rover. You can learn from here if you want to understand what is going on under our cool features.\n  Setup. Introduce how to set up the SkyWalking Rover.\n  Guides. Guide users to develop or debug SkyWalking Rover.\n  We\u0026rsquo;re always looking for help to improve our documentation and codes, so please don’t hesitate to file an issue if you see any problem. Or better yet, submit your contributions through a pull request to help make them better.\n","excerpt":"Welcome Here are SkyWalking Rover official documentation. You\u0026rsquo;re welcome to join us.\nFrom here …","ref":"/docs/skywalking-rover/v0.6.0/readme/","title":"Welcome"},{"body":"Welcome Here are SkyWalking Satellite official documentations. You\u0026rsquo;re welcome to join us.\nFrom here you can learn all about SkyWalking Satellite\u0026rsquo;s architecture, how to deploy and use SkyWalking Satellite.\n  Concepts and Designs. The most important core ideas about SkyWalking Satellite. You can learn from here if you want to understand what is going on under our cool features.\n  Setup. Introduce how to set up the SkyWalking Satellite.\n  Guides. Guide users to develop or debug SkyWalking Satellite.\n  Protocols. Protocols show the communication ways between agents/probes, Satellite and SkyWalking. Anyone interested in uplink telemetry data should definitely read this.\n  Change logs. The feature records of the different versions.\n  FAQs. A manifest of already known setup problems, secondary developments experiments. When you are facing a problem, check here first.\n  We\u0026rsquo;re always looking for help improve our documentation and codes, so please don’t hesitate to file an issue if you see any problem. Or better yet, submit your own contributions through pull request to help make them better.\n","excerpt":"Welcome Here are SkyWalking Satellite official documentations. You\u0026rsquo;re welcome to join us.\nFrom …","ref":"/docs/skywalking-satellite/latest/readme/","title":"Welcome"},{"body":"Welcome Here are SkyWalking Satellite official documentations. You\u0026rsquo;re welcome to join us.\nFrom here you can learn all about SkyWalking Satellite\u0026rsquo;s architecture, how to deploy and use SkyWalking Satellite.\n  Concepts and Designs. The most important core ideas about SkyWalking Satellite. You can learn from here if you want to understand what is going on under our cool features.\n  Setup. Introduce how to set up the SkyWalking Satellite.\n  Guides. Guide users to develop or debug SkyWalking Satellite.\n  Protocols. Protocols show the communication ways between agents/probes, Satellite and SkyWalking. Anyone interested in uplink telemetry data should definitely read this.\n  Change logs. The feature records of the different versions.\n  FAQs. A manifest of already known setup problems, secondary developments experiments. When you are facing a problem, check here first.\n  We\u0026rsquo;re always looking for help improve our documentation and codes, so please don’t hesitate to file an issue if you see any problem. Or better yet, submit your own contributions through pull request to help make them better.\n","excerpt":"Welcome Here are SkyWalking Satellite official documentations. You\u0026rsquo;re welcome to join us.\nFrom …","ref":"/docs/skywalking-satellite/next/readme/","title":"Welcome"},{"body":"Welcome Here are SkyWalking Satellite official documentations. You\u0026rsquo;re welcome to join us.\nFrom here you can learn all about SkyWalking Satellite\u0026rsquo;s architecture, how to deploy and use SkyWalking Satellite.\n  Concepts and Designs. The most important core ideas about SkyWalking Satellite. You can learn from here if you want to understand what is going on under our cool features.\n  Setup. Introduce how to set up the SkyWalking Satellite.\n  Guides. Guide users to develop or debug SkyWalking Satellite.\n  Protocols. Protocols show the communication ways between agents/probes, Satellite and SkyWalking. Anyone interested in uplink telemetry data should definitely read this.\n  Change logs. The feature records of the different versions.\n  FAQs. A manifest of already known setup problems, secondary developments experiments. When you are facing a problem, check here first.\n  We\u0026rsquo;re always looking for help improve our documentation and codes, so please don’t hesitate to file an issue if you see any problem. Or better yet, submit your own contributions through pull request to help make them better.\n","excerpt":"Welcome Here are SkyWalking Satellite official documentations. You\u0026rsquo;re welcome to join us.\nFrom …","ref":"/docs/skywalking-satellite/v1.2.0/readme/","title":"Welcome"},{"body":"What is VNode? On the trace page, you may sometimes find nodes with their spans named VNode, and that there are no attributes for such spans.\nVNode is created by the UI itself, rather than being reported by the agent or tracing SDK. It indicates that some spans are missed in the trace data in this query.\nHow does the UI detect the missing span(s)? The UI checks the parent spans and reference segments of all spans in real time. If no parent id(segment id + span id) could be found, then it creates a VNode automatically.\nHow did this happen? The VNode appears when the trace data is incomplete.\n The agent fail-safe mechanism has been activated. The SkyWalking agent could abandon the trace data if there are any network issues between the agent and the OAP (e.g. failure to connect, slow network speeds, etc.), or if the OAP cluster is not capable of processing all traces. Some plug-ins may have bugs, and some segments in the trace do not stop correctly and are held in the memory.  In such case, the trace would not exist in the query, thus the VNode shows up.\n","excerpt":"What is VNode? On the trace page, you may sometimes find nodes with their spans named VNode, and …","ref":"/docs/main/latest/en/faq/vnode/","title":"What is VNode?"},{"body":"What is VNode? On the trace page, you may sometimes find nodes with their spans named VNode, and that there are no attributes for such spans.\nVNode is created by the UI itself, rather than being reported by the agent or tracing SDK. It indicates that some spans are missed in the trace data in this query.\nHow does the UI detect the missing span(s)? The UI checks the parent spans and reference segments of all spans in real time. If no parent id(segment id + span id) could be found, then it creates a VNode automatically.\nHow did this happen? The VNode appears when the trace data is incomplete.\n The agent fail-safe mechanism has been activated. The SkyWalking agent could abandon the trace data if there are any network issues between the agent and the OAP (e.g. failure to connect, slow network speeds, etc.), or if the OAP cluster is not capable of processing all traces. Some plug-ins may have bugs, and some segments in the trace do not stop correctly and are held in the memory.  In such case, the trace would not exist in the query, thus the VNode shows up.\n","excerpt":"What is VNode? On the trace page, you may sometimes find nodes with their spans named VNode, and …","ref":"/docs/main/next/en/faq/vnode/","title":"What is VNode?"},{"body":"What is VNode? On the trace page, you may sometimes find nodes with their spans named VNode, and that there are no attributes for such spans.\nVNode is created by the UI itself, rather than being reported by the agent or tracing SDK. It indicates that some spans are missed in the trace data in this query.\nHow does the UI detect the missing span(s)? The UI checks the parent spans and reference segments of all spans in real time. If no parent id(segment id + span id) could be found, then it creates a VNode automatically.\nHow did this happen? The VNode appears when the trace data is incomplete.\n The agent fail-safe mechanism has been activated. The SkyWalking agent could abandon the trace data if there are any network issues between the agent and the OAP (e.g. failure to connect, slow network speeds, etc.), or if the OAP cluster is not capable of processing all traces. Some plug-ins may have bugs, and some segments in the trace do not stop correctly and are held in the memory.  In such case, the trace would not exist in the query, thus the VNode shows up.\n","excerpt":"What is VNode? On the trace page, you may sometimes find nodes with their spans named VNode, and …","ref":"/docs/main/v9.0.0/en/faq/vnode/","title":"What is VNode?"},{"body":"What is VNode? On the trace page, you may sometimes find nodes with their spans named VNode, and that there are no attributes for such spans.\nVNode is created by the UI itself, rather than being reported by the agent or tracing SDK. It indicates that some spans are missed in the trace data in this query.\nHow does the UI detect the missing span(s)? The UI checks the parent spans and reference segments of all spans in real time. If no parent id(segment id + span id) could be found, then it creates a VNode automatically.\nHow did this happen? The VNode appears when the trace data is incomplete.\n The agent fail-safe mechanism has been activated. The SkyWalking agent could abandon the trace data if there are any network issues between the agent and the OAP (e.g. failure to connect, slow network speeds, etc.), or if the OAP cluster is not capable of processing all traces. Some plug-ins may have bugs, and some segments in the trace do not stop correctly and are held in the memory.  In such case, the trace would not exist in the query, thus the VNode shows up.\n","excerpt":"What is VNode? On the trace page, you may sometimes find nodes with their spans named VNode, and …","ref":"/docs/main/v9.1.0/en/faq/vnode/","title":"What is VNode?"},{"body":"What is VNode? On the trace page, you may sometimes find nodes with their spans named VNode, and that there are no attributes for such spans.\nVNode is created by the UI itself, rather than being reported by the agent or tracing SDK. It indicates that some spans are missed in the trace data in this query.\nHow does the UI detect the missing span(s)? The UI checks the parent spans and reference segments of all spans in real time. If no parent id(segment id + span id) could be found, then it creates a VNode automatically.\nHow did this happen? The VNode appears when the trace data is incomplete.\n The agent fail-safe mechanism has been activated. The SkyWalking agent could abandon the trace data if there are any network issues between the agent and the OAP (e.g. failure to connect, slow network speeds, etc.), or if the OAP cluster is not capable of processing all traces. Some plug-ins may have bugs, and some segments in the trace do not stop correctly and are held in the memory.  In such case, the trace would not exist in the query, thus the VNode shows up.\n","excerpt":"What is VNode? On the trace page, you may sometimes find nodes with their spans named VNode, and …","ref":"/docs/main/v9.2.0/en/faq/vnode/","title":"What is VNode?"},{"body":"What is VNode? On the trace page, you may sometimes find nodes with their spans named VNode, and that there are no attributes for such spans.\nVNode is created by the UI itself, rather than being reported by the agent or tracing SDK. It indicates that some spans are missed in the trace data in this query.\nHow does the UI detect the missing span(s)? The UI checks the parent spans and reference segments of all spans in real time. If no parent id(segment id + span id) could be found, then it creates a VNode automatically.\nHow did this happen? The VNode appears when the trace data is incomplete.\n The agent fail-safe mechanism has been activated. The SkyWalking agent could abandon the trace data if there are any network issues between the agent and the OAP (e.g. failure to connect, slow network speeds, etc.), or if the OAP cluster is not capable of processing all traces. Some plug-ins may have bugs, and some segments in the trace do not stop correctly and are held in the memory.  In such case, the trace would not exist in the query, thus the VNode shows up.\n","excerpt":"What is VNode? On the trace page, you may sometimes find nodes with their spans named VNode, and …","ref":"/docs/main/v9.3.0/en/faq/vnode/","title":"What is VNode?"},{"body":"What is VNode? On the trace page, you may sometimes find nodes with their spans named VNode, and that there are no attributes for such spans.\nVNode is created by the UI itself, rather than being reported by the agent or tracing SDK. It indicates that some spans are missed in the trace data in this query.\nHow does the UI detect the missing span(s)? The UI checks the parent spans and reference segments of all spans in real time. If no parent id(segment id + span id) could be found, then it creates a VNode automatically.\nHow did this happen? The VNode appears when the trace data is incomplete.\n The agent fail-safe mechanism has been activated. The SkyWalking agent could abandon the trace data if there are any network issues between the agent and the OAP (e.g. failure to connect, slow network speeds, etc.), or if the OAP cluster is not capable of processing all traces. Some plug-ins may have bugs, and some segments in the trace do not stop correctly and are held in the memory.  In such case, the trace would not exist in the query, thus the VNode shows up.\n","excerpt":"What is VNode? On the trace page, you may sometimes find nodes with their spans named VNode, and …","ref":"/docs/main/v9.4.0/en/faq/vnode/","title":"What is VNode?"},{"body":"What is VNode? On the trace page, you may sometimes find nodes with their spans named VNode, and that there are no attributes for such spans.\nVNode is created by the UI itself, rather than being reported by the agent or tracing SDK. It indicates that some spans are missed in the trace data in this query.\nHow does the UI detect the missing span(s)? The UI checks the parent spans and reference segments of all spans in real time. If no parent id(segment id + span id) could be found, then it creates a VNode automatically.\nHow did this happen? The VNode appears when the trace data is incomplete.\n The agent fail-safe mechanism has been activated. The SkyWalking agent could abandon the trace data if there are any network issues between the agent and the OAP (e.g. failure to connect, slow network speeds, etc.), or if the OAP cluster is not capable of processing all traces. Some plug-ins may have bugs, and some segments in the trace do not stop correctly and are held in the memory.  In such case, the trace would not exist in the query, thus the VNode shows up.\n","excerpt":"What is VNode? On the trace page, you may sometimes find nodes with their spans named VNode, and …","ref":"/docs/main/v9.5.0/en/faq/vnode/","title":"What is VNode?"},{"body":"What is VNode? On the trace page, you may sometimes find nodes with their spans named VNode, and that there are no attributes for such spans.\nVNode is created by the UI itself, rather than being reported by the agent or tracing SDK. It indicates that some spans are missed in the trace data in this query.\nHow does the UI detect the missing span(s)? The UI checks the parent spans and reference segments of all spans in real time. If no parent id(segment id + span id) could be found, then it creates a VNode automatically.\nHow did this happen? The VNode appears when the trace data is incomplete.\n The agent fail-safe mechanism has been activated. The SkyWalking agent could abandon the trace data if there are any network issues between the agent and the OAP (e.g. failure to connect, slow network speeds, etc.), or if the OAP cluster is not capable of processing all traces. Some plug-ins may have bugs, and some segments in the trace do not stop correctly and are held in the memory.  In such case, the trace would not exist in the query, thus the VNode shows up.\n","excerpt":"What is VNode? On the trace page, you may sometimes find nodes with their spans named VNode, and …","ref":"/docs/main/v9.6.0/en/faq/vnode/","title":"What is VNode?"},{"body":"What is VNode? On the trace page, you may sometimes find nodes with their spans named VNode, and that there are no attributes for such spans.\nVNode is created by the UI itself, rather than being reported by the agent or tracing SDK. It indicates that some spans are missed in the trace data in this query.\nHow does the UI detect the missing span(s)? The UI checks the parent spans and reference segments of all spans in real time. If no parent id(segment id + span id) could be found, then it creates a VNode automatically.\nHow did this happen? The VNode appears when the trace data is incomplete.\n The agent fail-safe mechanism has been activated. The SkyWalking agent could abandon the trace data if there are any network issues between the agent and the OAP (e.g. failure to connect, slow network speeds, etc.), or if the OAP cluster is not capable of processing all traces. Some plug-ins may have bugs, and some segments in the trace do not stop correctly and are held in the memory.  In such case, the trace would not exist in the query, thus the VNode shows up.\n","excerpt":"What is VNode? On the trace page, you may sometimes find nodes with their spans named VNode, and …","ref":"/docs/main/v9.7.0/en/faq/vnode/","title":"What is VNode?"},{"body":"Why can\u0026rsquo;t I see any data in the UI? There are three main reasons no data can be shown by the UI:\n No traces have been sent to the collector. Traces have been sent, but the timezone of your containers is incorrect. Traces are in the collector, but you\u0026rsquo;re not watching the correct timeframe in the UI.  No traces Be sure to check the logs of your agents to see if they are connected to the collector and traces are being sent.\nIncorrect timezone in containers Be sure to check the time in your containers.\nThe UI isn\u0026rsquo;t showing any data Be sure to configure the timeframe shown by the UI.\n","excerpt":"Why can\u0026rsquo;t I see any data in the UI? There are three main reasons no data can be shown by the …","ref":"/docs/main/latest/en/faq/time-and-timezone/","title":"Why can't I see any data in the UI?"},{"body":"Why can\u0026rsquo;t I see any data in the UI? There are three main reasons no data can be shown by the UI:\n No traces have been sent to the collector. Traces have been sent, but the timezone of your containers is incorrect. Traces are in the collector, but you\u0026rsquo;re not watching the correct timeframe in the UI.  No traces Be sure to check the logs of your agents to see if they are connected to the collector and traces are being sent.\nIncorrect timezone in containers Be sure to check the time in your containers.\nThe UI isn\u0026rsquo;t showing any data Be sure to configure the timeframe shown by the UI.\n","excerpt":"Why can\u0026rsquo;t I see any data in the UI? There are three main reasons no data can be shown by the …","ref":"/docs/main/next/en/faq/time-and-timezone/","title":"Why can't I see any data in the UI?"},{"body":"Why can\u0026rsquo;t I see any data in the UI? There are three main reasons no data can be shown by the UI:\n No traces have been sent to the collector. Traces have been sent, but the timezone of your containers is incorrect. Traces are in the collector, but you\u0026rsquo;re not watching the correct timeframe in the UI.  No traces Be sure to check the logs of your agents to see if they are connected to the collector and traces are being sent.\nIncorrect timezone in containers Be sure to check the time in your containers.\nThe UI isn\u0026rsquo;t showing any data Be sure to configure the timeframe shown by the UI.\n","excerpt":"Why can\u0026rsquo;t I see any data in the UI? There are three main reasons no data can be shown by the …","ref":"/docs/main/v9.0.0/en/faq/time-and-timezone/","title":"Why can't I see any data in the UI?"},{"body":"Why can\u0026rsquo;t I see any data in the UI? There are three main reasons no data can be shown by the UI:\n No traces have been sent to the collector. Traces have been sent, but the timezone of your containers is incorrect. Traces are in the collector, but you\u0026rsquo;re not watching the correct timeframe in the UI.  No traces Be sure to check the logs of your agents to see if they are connected to the collector and traces are being sent.\nIncorrect timezone in containers Be sure to check the time in your containers.\nThe UI isn\u0026rsquo;t showing any data Be sure to configure the timeframe shown by the UI.\n","excerpt":"Why can\u0026rsquo;t I see any data in the UI? There are three main reasons no data can be shown by the …","ref":"/docs/main/v9.1.0/en/faq/time-and-timezone/","title":"Why can't I see any data in the UI?"},{"body":"Why can\u0026rsquo;t I see any data in the UI? There are three main reasons no data can be shown by the UI:\n No traces have been sent to the collector. Traces have been sent, but the timezone of your containers is incorrect. Traces are in the collector, but you\u0026rsquo;re not watching the correct timeframe in the UI.  No traces Be sure to check the logs of your agents to see if they are connected to the collector and traces are being sent.\nIncorrect timezone in containers Be sure to check the time in your containers.\nThe UI isn\u0026rsquo;t showing any data Be sure to configure the timeframe shown by the UI.\n","excerpt":"Why can\u0026rsquo;t I see any data in the UI? There are three main reasons no data can be shown by the …","ref":"/docs/main/v9.2.0/en/faq/time-and-timezone/","title":"Why can't I see any data in the UI?"},{"body":"Why can\u0026rsquo;t I see any data in the UI? There are three main reasons no data can be shown by the UI:\n No traces have been sent to the collector. Traces have been sent, but the timezone of your containers is incorrect. Traces are in the collector, but you\u0026rsquo;re not watching the correct timeframe in the UI.  No traces Be sure to check the logs of your agents to see if they are connected to the collector and traces are being sent.\nIncorrect timezone in containers Be sure to check the time in your containers.\nThe UI isn\u0026rsquo;t showing any data Be sure to configure the timeframe shown by the UI.\n","excerpt":"Why can\u0026rsquo;t I see any data in the UI? There are three main reasons no data can be shown by the …","ref":"/docs/main/v9.3.0/en/faq/time-and-timezone/","title":"Why can't I see any data in the UI?"},{"body":"Why can\u0026rsquo;t I see any data in the UI? There are three main reasons no data can be shown by the UI:\n No traces have been sent to the collector. Traces have been sent, but the timezone of your containers is incorrect. Traces are in the collector, but you\u0026rsquo;re not watching the correct timeframe in the UI.  No traces Be sure to check the logs of your agents to see if they are connected to the collector and traces are being sent.\nIncorrect timezone in containers Be sure to check the time in your containers.\nThe UI isn\u0026rsquo;t showing any data Be sure to configure the timeframe shown by the UI.\n","excerpt":"Why can\u0026rsquo;t I see any data in the UI? There are three main reasons no data can be shown by the …","ref":"/docs/main/v9.4.0/en/faq/time-and-timezone/","title":"Why can't I see any data in the UI?"},{"body":"Why can\u0026rsquo;t I see any data in the UI? There are three main reasons no data can be shown by the UI:\n No traces have been sent to the collector. Traces have been sent, but the timezone of your containers is incorrect. Traces are in the collector, but you\u0026rsquo;re not watching the correct timeframe in the UI.  No traces Be sure to check the logs of your agents to see if they are connected to the collector and traces are being sent.\nIncorrect timezone in containers Be sure to check the time in your containers.\nThe UI isn\u0026rsquo;t showing any data Be sure to configure the timeframe shown by the UI.\n","excerpt":"Why can\u0026rsquo;t I see any data in the UI? There are three main reasons no data can be shown by the …","ref":"/docs/main/v9.5.0/en/faq/time-and-timezone/","title":"Why can't I see any data in the UI?"},{"body":"Why can\u0026rsquo;t I see any data in the UI? There are three main reasons no data can be shown by the UI:\n No traces have been sent to the collector. Traces have been sent, but the timezone of your containers is incorrect. Traces are in the collector, but you\u0026rsquo;re not watching the correct timeframe in the UI.  No traces Be sure to check the logs of your agents to see if they are connected to the collector and traces are being sent.\nIncorrect timezone in containers Be sure to check the time in your containers.\nThe UI isn\u0026rsquo;t showing any data Be sure to configure the timeframe shown by the UI.\n","excerpt":"Why can\u0026rsquo;t I see any data in the UI? There are three main reasons no data can be shown by the …","ref":"/docs/main/v9.6.0/en/faq/time-and-timezone/","title":"Why can't I see any data in the UI?"},{"body":"Why can\u0026rsquo;t I see any data in the UI? There are three main reasons no data can be shown by the UI:\n No traces have been sent to the collector. Traces have been sent, but the timezone of your containers is incorrect. Traces are in the collector, but you\u0026rsquo;re not watching the correct timeframe in the UI.  No traces Be sure to check the logs of your agents to see if they are connected to the collector and traces are being sent.\nIncorrect timezone in containers Be sure to check the time in your containers.\nThe UI isn\u0026rsquo;t showing any data Be sure to configure the timeframe shown by the UI.\n","excerpt":"Why can\u0026rsquo;t I see any data in the UI? There are three main reasons no data can be shown by the …","ref":"/docs/main/v9.7.0/en/faq/time-and-timezone/","title":"Why can't I see any data in the UI?"},{"body":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? This issue is to be expected with an upgrade from 6.x to 7.x. See the Downsampling Data Packing feature of the ElasticSearch storage.\nYou may simply delete all expired *-day_xxxxx and *-hour_xxxxx(xxxxx is a timestamp) indexes. Currently, SkyWalking uses the metrics name-xxxxx and metrics name-month_xxxxx indexes only.\n","excerpt":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? This issue …","ref":"/docs/main/latest/en/faq/hour-day-metrics-stopping/","title":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x?"},{"body":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? This issue is to be expected with an upgrade from 6.x to 7.x. See the Downsampling Data Packing feature of the ElasticSearch storage.\nYou may simply delete all expired *-day_xxxxx and *-hour_xxxxx(xxxxx is a timestamp) indexes. Currently, SkyWalking uses the metrics name-xxxxx and metrics name-month_xxxxx indexes only.\n","excerpt":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? This issue …","ref":"/docs/main/next/en/faq/hour-day-metrics-stopping/","title":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x?"},{"body":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? This issue is to be expected with an upgrade from 6.x to 7.x. See the Downsampling Data Packing feature of the ElasticSearch storage.\nYou may simply delete all expired *-day_xxxxx and *-hour_xxxxx(xxxxx is a timestamp) indexes. Currently, SkyWalking uses the metrics name-xxxxx and metrics name-month_xxxxx indexes only.\n","excerpt":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? This issue …","ref":"/docs/main/v9.0.0/en/faq/hour-day-metrics-stopping/","title":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x?"},{"body":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? This issue is to be expected with an upgrade from 6.x to 7.x. See the Downsampling Data Packing feature of the ElasticSearch storage.\nYou may simply delete all expired *-day_xxxxx and *-hour_xxxxx(xxxxx is a timestamp) indexes. Currently, SkyWalking uses the metrics name-xxxxx and metrics name-month_xxxxx indexes only.\n","excerpt":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? This issue …","ref":"/docs/main/v9.1.0/en/faq/hour-day-metrics-stopping/","title":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x?"},{"body":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? This issue is to be expected with an upgrade from 6.x to 7.x. See the Downsampling Data Packing feature of the ElasticSearch storage.\nYou may simply delete all expired *-day_xxxxx and *-hour_xxxxx(xxxxx is a timestamp) indexes. Currently, SkyWalking uses the metrics name-xxxxx and metrics name-month_xxxxx indexes only.\n","excerpt":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? This issue …","ref":"/docs/main/v9.2.0/en/faq/hour-day-metrics-stopping/","title":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x?"},{"body":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? This issue is to be expected with an upgrade from 6.x to 7.x. See the Downsampling Data Packing feature of the ElasticSearch storage.\nYou may simply delete all expired *-day_xxxxx and *-hour_xxxxx(xxxxx is a timestamp) indexes. Currently, SkyWalking uses the metrics name-xxxxx and metrics name-month_xxxxx indexes only.\n","excerpt":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? This issue …","ref":"/docs/main/v9.3.0/en/faq/hour-day-metrics-stopping/","title":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x?"},{"body":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? This issue is to be expected with an upgrade from 6.x to 7.x. See the Downsampling Data Packing feature of the ElasticSearch storage.\nYou may simply delete all expired *-day_xxxxx and *-hour_xxxxx(xxxxx is a timestamp) indexes. Currently, SkyWalking uses the metrics name-xxxxx and metrics name-month_xxxxx indexes only.\n","excerpt":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? This issue …","ref":"/docs/main/v9.4.0/en/faq/hour-day-metrics-stopping/","title":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x?"},{"body":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? This issue is to be expected with an upgrade from 6.x to 7.x. See the Downsampling Data Packing feature of the ElasticSearch storage.\nYou may simply delete all expired *-day_xxxxx and *-hour_xxxxx(xxxxx is a timestamp) indexes. Currently, SkyWalking uses the metrics name-xxxxx and metrics name-month_xxxxx indexes only.\n","excerpt":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? This issue …","ref":"/docs/main/v9.5.0/en/faq/hour-day-metrics-stopping/","title":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x?"},{"body":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? This issue is to be expected with an upgrade from 6.x to 7.x. See the Downsampling Data Packing feature of the ElasticSearch storage.\nYou may simply delete all expired *-day_xxxxx and *-hour_xxxxx(xxxxx is a timestamp) indexes. Currently, SkyWalking uses the metrics name-xxxxx and metrics name-month_xxxxx indexes only.\n","excerpt":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? This issue …","ref":"/docs/main/v9.6.0/en/faq/hour-day-metrics-stopping/","title":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x?"},{"body":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? This issue is to be expected with an upgrade from 6.x to 7.x. See the Downsampling Data Packing feature of the ElasticSearch storage.\nYou may simply delete all expired *-day_xxxxx and *-hour_xxxxx(xxxxx is a timestamp) indexes. Currently, SkyWalking uses the metrics name-xxxxx and metrics name-month_xxxxx indexes only.\n","excerpt":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? This issue …","ref":"/docs/main/v9.7.0/en/faq/hour-day-metrics-stopping/","title":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x?"},{"body":"Why does SkyWalking use RPC(gRPC and RESTful) rather than MQ as transport layer by default? This is often asked by those who are first introduced to SkyWalking. Many believe that MQ should have better performance and should be able to support higher throughput, like the following:\nHere\u0026rsquo;s what we think.\nIs MQ appropriate for communicating with the OAP backend? This question arises when users consider the circumstances where the OAP cluster may not be powerful enough or becomes offline. But the following issues must first be addressed:\n Why do you think that the OAP is not powerful enough? Were it not powerful, the speed of data analysis wouldn\u0026rsquo;t have caught up with the producers (or agents). Then what is the point of adding new deployment requirements? Some may argue that the payload is sometimes higher than usual during peak times. But we must consider how much higher the payload really is. If it is higher by less than 40%, how many resources would you use for the new MQ cluster? How about moving them to new OAP and ES nodes? Say it is higher by 40% or more, such as by 70% to 200%. Then, it is likely that your MQ would use up more resources than it saves. Your MQ would support 2 to 3 times the payload using 10%-20% of the cost during usual times. Furthermore, in this case, if the payload/throughput are so high, how long would it take for the OAP cluster to catch up? The challenge here is that well before it catches up, the next peak times would have come.  With the analysis above in mind, why would you still want the traces to be 100%, given the resources they would cost? The preferred way to do this would be adding a better dynamic trace sampling mechanism at the backend. When throughput exceeds the threshold, gradually modify the active sampling rate from 100% to 10%, which means you could get the OAP and ES 3 times more powerful than usual, while ignoring the traces at peak times.\nIs MQ transport recommended despite its side effects? Even though MQ transport is not recommended from the production perspective, SkyWalking still provides optional plugins named kafka-reporter and kafka-fetcher for this feature since 8.1.0.\nHow about MQ metrics data exporter? Log and trace exporters are using MQ as transport channel. And metrics exporter uses gRPC, as considering the scale.\n","excerpt":"Why does SkyWalking use RPC(gRPC and RESTful) rather than MQ as transport layer by default? This is …","ref":"/docs/main/next/en/faq/why_mq_not_involved/","title":"Why does SkyWalking use RPC(gRPC and RESTful) rather than MQ as transport layer by default?"},{"body":"Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture? This is often asked by those who are first introduced to SkyWalking. Many believe that MQ should have better performance and should be able to support higher throughput, like the following:\nHere\u0026rsquo;s what we think.\nIs MQ appropriate for communicating with the OAP backend? This question arises when users consider the circumstances where the OAP cluster may not be powerful enough or becomes offline. But the following issues must first be addressed:\n Why do you think that the OAP is not powerful enough? Were it not powerful, the speed of data analysis wouldn\u0026rsquo;t have caught up with the producers (or agents). Then what is the point of adding new deployment requirements? Some may argue that the payload is sometimes higher than usual during peak times. But we must consider how much higher the payload really is. If it is higher by less than 40%, how many resources would you use for the new MQ cluster? How about moving them to new OAP and ES nodes? Say it is higher by 40% or more, such as by 70% to 200%. Then, it is likely that your MQ would use up more resources than it saves. Your MQ would support 2 to 3 times the payload using 10%-20% of the cost during usual times. Furthermore, in this case, if the payload/throughput are so high, how long would it take for the OAP cluster to catch up? The challenge here is that well before it catches up, the next peak times would have come.  With the analysis above in mind, why would you still want the traces to be 100%, given the resources they would cost? The preferred way to do this would be adding a better dynamic trace sampling mechanism at the backend. When throughput exceeds the threshold, gradually modify the active sampling rate from 100% to 10%, which means you could get the OAP and ES 3 times more powerful than usual, while ignoring the traces at peak times.\nIs MQ transport recommended despite its side effects? Even though MQ transport is not recommended from the production perspective, SkyWalking still provides optional plugins named kafka-reporter and kafka-fetcher for this feature since 8.1.0.\nHow about MQ metrics data exporter? The answer is that the MQ metrics data exporter is already readily available. The exporter module with gRPC default mechanism is there, and you can easily provide a new implementor of this module.\n","excerpt":"Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture? This is often asked by those who are …","ref":"/docs/main/latest/en/faq/why_mq_not_involved/","title":"Why doesn't SkyWalking involve MQ in its architecture?"},{"body":"Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture? This is often asked by those who are first introduced to SkyWalking. Many believe that MQ should have better performance and should be able to support higher throughput, like the following:\nHere\u0026rsquo;s what we think.\nIs MQ appropriate for communicating with the OAP backend? This question arises when users consider the circumstances where the OAP cluster may not be powerful enough or becomes offline. But the following issues must first be addressed:\n Why do you think that the OAP is not powerful enough? Were it not powerful, the speed of data analysis wouldn\u0026rsquo;t have caught up with the producers (or agents). Then what is the point of adding new deployment requirements? Some may argue that the payload is sometimes higher than usual during peak times. But we must consider how much higher the payload really is. If it is higher by less than 40%, how many resources would you use for the new MQ cluster? How about moving them to new OAP and ES nodes? Say it is higher by 40% or more, such as by 70% to 200%. Then, it is likely that your MQ would use up more resources than it saves. Your MQ would support 2 to 3 times the payload using 10%-20% of the cost during usual times. Furthermore, in this case, if the payload/throughput are so high, how long would it take for the OAP cluster to catch up? The challenge here is that well before it catches up, the next peak times would have come.  With the analysis above in mind, why would you still want the traces to be 100%, given the resources they would cost? The preferred way to do this would be adding a better dynamic trace sampling mechanism at the backend. When throughput exceeds the threshold, gradually modify the active sampling rate from 100% to 10%, which means you could get the OAP and ES 3 times more powerful than usual, while ignoring the traces at peak times.\nIs MQ transport recommended despite its side effects? Even though MQ transport is not recommended from the production perspective, SkyWalking still provides optional plugins named kafka-reporter and kafka-fetcher for this feature since 8.1.0.\nHow about MQ metrics data exporter? The answer is that the MQ metrics data exporter is already readily available. The exporter module with gRPC default mechanism is there, and you can easily provide a new implementor of this module.\n","excerpt":"Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture? This is often asked by those who are …","ref":"/docs/main/v9.0.0/en/faq/why_mq_not_involved/","title":"Why doesn't SkyWalking involve MQ in its architecture?"},{"body":"Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture? This is often asked by those who are first introduced to SkyWalking. Many believe that MQ should have better performance and should be able to support higher throughput, like the following:\nHere\u0026rsquo;s what we think.\nIs MQ appropriate for communicating with the OAP backend? This question arises when users consider the circumstances where the OAP cluster may not be powerful enough or becomes offline. But the following issues must first be addressed:\n Why do you think that the OAP is not powerful enough? Were it not powerful, the speed of data analysis wouldn\u0026rsquo;t have caught up with the producers (or agents). Then what is the point of adding new deployment requirements? Some may argue that the payload is sometimes higher than usual during peak times. But we must consider how much higher the payload really is. If it is higher by less than 40%, how many resources would you use for the new MQ cluster? How about moving them to new OAP and ES nodes? Say it is higher by 40% or more, such as by 70% to 200%. Then, it is likely that your MQ would use up more resources than it saves. Your MQ would support 2 to 3 times the payload using 10%-20% of the cost during usual times. Furthermore, in this case, if the payload/throughput are so high, how long would it take for the OAP cluster to catch up? The challenge here is that well before it catches up, the next peak times would have come.  With the analysis above in mind, why would you still want the traces to be 100%, given the resources they would cost? The preferred way to do this would be adding a better dynamic trace sampling mechanism at the backend. When throughput exceeds the threshold, gradually modify the active sampling rate from 100% to 10%, which means you could get the OAP and ES 3 times more powerful than usual, while ignoring the traces at peak times.\nIs MQ transport recommended despite its side effects? Even though MQ transport is not recommended from the production perspective, SkyWalking still provides optional plugins named kafka-reporter and kafka-fetcher for this feature since 8.1.0.\nHow about MQ metrics data exporter? The answer is that the MQ metrics data exporter is already readily available. The exporter module with gRPC default mechanism is there, and you can easily provide a new implementor of this module.\n","excerpt":"Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture? This is often asked by those who are …","ref":"/docs/main/v9.1.0/en/faq/why_mq_not_involved/","title":"Why doesn't SkyWalking involve MQ in its architecture?"},{"body":"Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture? This is often asked by those who are first introduced to SkyWalking. Many believe that MQ should have better performance and should be able to support higher throughput, like the following:\nHere\u0026rsquo;s what we think.\nIs MQ appropriate for communicating with the OAP backend? This question arises when users consider the circumstances where the OAP cluster may not be powerful enough or becomes offline. But the following issues must first be addressed:\n Why do you think that the OAP is not powerful enough? Were it not powerful, the speed of data analysis wouldn\u0026rsquo;t have caught up with the producers (or agents). Then what is the point of adding new deployment requirements? Some may argue that the payload is sometimes higher than usual during peak times. But we must consider how much higher the payload really is. If it is higher by less than 40%, how many resources would you use for the new MQ cluster? How about moving them to new OAP and ES nodes? Say it is higher by 40% or more, such as by 70% to 200%. Then, it is likely that your MQ would use up more resources than it saves. Your MQ would support 2 to 3 times the payload using 10%-20% of the cost during usual times. Furthermore, in this case, if the payload/throughput are so high, how long would it take for the OAP cluster to catch up? The challenge here is that well before it catches up, the next peak times would have come.  With the analysis above in mind, why would you still want the traces to be 100%, given the resources they would cost? The preferred way to do this would be adding a better dynamic trace sampling mechanism at the backend. When throughput exceeds the threshold, gradually modify the active sampling rate from 100% to 10%, which means you could get the OAP and ES 3 times more powerful than usual, while ignoring the traces at peak times.\nIs MQ transport recommended despite its side effects? Even though MQ transport is not recommended from the production perspective, SkyWalking still provides optional plugins named kafka-reporter and kafka-fetcher for this feature since 8.1.0.\nHow about MQ metrics data exporter? The answer is that the MQ metrics data exporter is already readily available. The exporter module with gRPC default mechanism is there, and you can easily provide a new implementor of this module.\n","excerpt":"Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture? This is often asked by those who are …","ref":"/docs/main/v9.2.0/en/faq/why_mq_not_involved/","title":"Why doesn't SkyWalking involve MQ in its architecture?"},{"body":"Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture? This is often asked by those who are first introduced to SkyWalking. Many believe that MQ should have better performance and should be able to support higher throughput, like the following:\nHere\u0026rsquo;s what we think.\nIs MQ appropriate for communicating with the OAP backend? This question arises when users consider the circumstances where the OAP cluster may not be powerful enough or becomes offline. But the following issues must first be addressed:\n Why do you think that the OAP is not powerful enough? Were it not powerful, the speed of data analysis wouldn\u0026rsquo;t have caught up with the producers (or agents). Then what is the point of adding new deployment requirements? Some may argue that the payload is sometimes higher than usual during peak times. But we must consider how much higher the payload really is. If it is higher by less than 40%, how many resources would you use for the new MQ cluster? How about moving them to new OAP and ES nodes? Say it is higher by 40% or more, such as by 70% to 200%. Then, it is likely that your MQ would use up more resources than it saves. Your MQ would support 2 to 3 times the payload using 10%-20% of the cost during usual times. Furthermore, in this case, if the payload/throughput are so high, how long would it take for the OAP cluster to catch up? The challenge here is that well before it catches up, the next peak times would have come.  With the analysis above in mind, why would you still want the traces to be 100%, given the resources they would cost? The preferred way to do this would be adding a better dynamic trace sampling mechanism at the backend. When throughput exceeds the threshold, gradually modify the active sampling rate from 100% to 10%, which means you could get the OAP and ES 3 times more powerful than usual, while ignoring the traces at peak times.\nIs MQ transport recommended despite its side effects? Even though MQ transport is not recommended from the production perspective, SkyWalking still provides optional plugins named kafka-reporter and kafka-fetcher for this feature since 8.1.0.\nHow about MQ metrics data exporter? The answer is that the MQ metrics data exporter is already readily available. The exporter module with gRPC default mechanism is there, and you can easily provide a new implementor of this module.\n","excerpt":"Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture? This is often asked by those who are …","ref":"/docs/main/v9.3.0/en/faq/why_mq_not_involved/","title":"Why doesn't SkyWalking involve MQ in its architecture?"},{"body":"Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture? This is often asked by those who are first introduced to SkyWalking. Many believe that MQ should have better performance and should be able to support higher throughput, like the following:\nHere\u0026rsquo;s what we think.\nIs MQ appropriate for communicating with the OAP backend? This question arises when users consider the circumstances where the OAP cluster may not be powerful enough or becomes offline. But the following issues must first be addressed:\n Why do you think that the OAP is not powerful enough? Were it not powerful, the speed of data analysis wouldn\u0026rsquo;t have caught up with the producers (or agents). Then what is the point of adding new deployment requirements? Some may argue that the payload is sometimes higher than usual during peak times. But we must consider how much higher the payload really is. If it is higher by less than 40%, how many resources would you use for the new MQ cluster? How about moving them to new OAP and ES nodes? Say it is higher by 40% or more, such as by 70% to 200%. Then, it is likely that your MQ would use up more resources than it saves. Your MQ would support 2 to 3 times the payload using 10%-20% of the cost during usual times. Furthermore, in this case, if the payload/throughput are so high, how long would it take for the OAP cluster to catch up? The challenge here is that well before it catches up, the next peak times would have come.  With the analysis above in mind, why would you still want the traces to be 100%, given the resources they would cost? The preferred way to do this would be adding a better dynamic trace sampling mechanism at the backend. When throughput exceeds the threshold, gradually modify the active sampling rate from 100% to 10%, which means you could get the OAP and ES 3 times more powerful than usual, while ignoring the traces at peak times.\nIs MQ transport recommended despite its side effects? Even though MQ transport is not recommended from the production perspective, SkyWalking still provides optional plugins named kafka-reporter and kafka-fetcher for this feature since 8.1.0.\nHow about MQ metrics data exporter? The answer is that the MQ metrics data exporter is already readily available. The exporter module with gRPC default mechanism is there, and you can easily provide a new implementor of this module.\n","excerpt":"Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture? This is often asked by those who are …","ref":"/docs/main/v9.4.0/en/faq/why_mq_not_involved/","title":"Why doesn't SkyWalking involve MQ in its architecture?"},{"body":"Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture? This is often asked by those who are first introduced to SkyWalking. Many believe that MQ should have better performance and should be able to support higher throughput, like the following:\nHere\u0026rsquo;s what we think.\nIs MQ appropriate for communicating with the OAP backend? This question arises when users consider the circumstances where the OAP cluster may not be powerful enough or becomes offline. But the following issues must first be addressed:\n Why do you think that the OAP is not powerful enough? Were it not powerful, the speed of data analysis wouldn\u0026rsquo;t have caught up with the producers (or agents). Then what is the point of adding new deployment requirements? Some may argue that the payload is sometimes higher than usual during peak times. But we must consider how much higher the payload really is. If it is higher by less than 40%, how many resources would you use for the new MQ cluster? How about moving them to new OAP and ES nodes? Say it is higher by 40% or more, such as by 70% to 200%. Then, it is likely that your MQ would use up more resources than it saves. Your MQ would support 2 to 3 times the payload using 10%-20% of the cost during usual times. Furthermore, in this case, if the payload/throughput are so high, how long would it take for the OAP cluster to catch up? The challenge here is that well before it catches up, the next peak times would have come.  With the analysis above in mind, why would you still want the traces to be 100%, given the resources they would cost? The preferred way to do this would be adding a better dynamic trace sampling mechanism at the backend. When throughput exceeds the threshold, gradually modify the active sampling rate from 100% to 10%, which means you could get the OAP and ES 3 times more powerful than usual, while ignoring the traces at peak times.\nIs MQ transport recommended despite its side effects? Even though MQ transport is not recommended from the production perspective, SkyWalking still provides optional plugins named kafka-reporter and kafka-fetcher for this feature since 8.1.0.\nHow about MQ metrics data exporter? The answer is that the MQ metrics data exporter is already readily available. The exporter module with gRPC default mechanism is there, and you can easily provide a new implementor of this module.\n","excerpt":"Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture? This is often asked by those who are …","ref":"/docs/main/v9.5.0/en/faq/why_mq_not_involved/","title":"Why doesn't SkyWalking involve MQ in its architecture?"},{"body":"Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture? This is often asked by those who are first introduced to SkyWalking. Many believe that MQ should have better performance and should be able to support higher throughput, like the following:\nHere\u0026rsquo;s what we think.\nIs MQ appropriate for communicating with the OAP backend? This question arises when users consider the circumstances where the OAP cluster may not be powerful enough or becomes offline. But the following issues must first be addressed:\n Why do you think that the OAP is not powerful enough? Were it not powerful, the speed of data analysis wouldn\u0026rsquo;t have caught up with the producers (or agents). Then what is the point of adding new deployment requirements? Some may argue that the payload is sometimes higher than usual during peak times. But we must consider how much higher the payload really is. If it is higher by less than 40%, how many resources would you use for the new MQ cluster? How about moving them to new OAP and ES nodes? Say it is higher by 40% or more, such as by 70% to 200%. Then, it is likely that your MQ would use up more resources than it saves. Your MQ would support 2 to 3 times the payload using 10%-20% of the cost during usual times. Furthermore, in this case, if the payload/throughput are so high, how long would it take for the OAP cluster to catch up? The challenge here is that well before it catches up, the next peak times would have come.  With the analysis above in mind, why would you still want the traces to be 100%, given the resources they would cost? The preferred way to do this would be adding a better dynamic trace sampling mechanism at the backend. When throughput exceeds the threshold, gradually modify the active sampling rate from 100% to 10%, which means you could get the OAP and ES 3 times more powerful than usual, while ignoring the traces at peak times.\nIs MQ transport recommended despite its side effects? Even though MQ transport is not recommended from the production perspective, SkyWalking still provides optional plugins named kafka-reporter and kafka-fetcher for this feature since 8.1.0.\nHow about MQ metrics data exporter? The answer is that the MQ metrics data exporter is already readily available. The exporter module with gRPC default mechanism is there, and you can easily provide a new implementor of this module.\n","excerpt":"Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture? This is often asked by those who are …","ref":"/docs/main/v9.6.0/en/faq/why_mq_not_involved/","title":"Why doesn't SkyWalking involve MQ in its architecture?"},{"body":"Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture? This is often asked by those who are first introduced to SkyWalking. Many believe that MQ should have better performance and should be able to support higher throughput, like the following:\nHere\u0026rsquo;s what we think.\nIs MQ appropriate for communicating with the OAP backend? This question arises when users consider the circumstances where the OAP cluster may not be powerful enough or becomes offline. But the following issues must first be addressed:\n Why do you think that the OAP is not powerful enough? Were it not powerful, the speed of data analysis wouldn\u0026rsquo;t have caught up with the producers (or agents). Then what is the point of adding new deployment requirements? Some may argue that the payload is sometimes higher than usual during peak times. But we must consider how much higher the payload really is. If it is higher by less than 40%, how many resources would you use for the new MQ cluster? How about moving them to new OAP and ES nodes? Say it is higher by 40% or more, such as by 70% to 200%. Then, it is likely that your MQ would use up more resources than it saves. Your MQ would support 2 to 3 times the payload using 10%-20% of the cost during usual times. Furthermore, in this case, if the payload/throughput are so high, how long would it take for the OAP cluster to catch up? The challenge here is that well before it catches up, the next peak times would have come.  With the analysis above in mind, why would you still want the traces to be 100%, given the resources they would cost? The preferred way to do this would be adding a better dynamic trace sampling mechanism at the backend. When throughput exceeds the threshold, gradually modify the active sampling rate from 100% to 10%, which means you could get the OAP and ES 3 times more powerful than usual, while ignoring the traces at peak times.\nIs MQ transport recommended despite its side effects? Even though MQ transport is not recommended from the production perspective, SkyWalking still provides optional plugins named kafka-reporter and kafka-fetcher for this feature since 8.1.0.\nHow about MQ metrics data exporter? The answer is that the MQ metrics data exporter is already readily available. The exporter module with gRPC default mechanism is there, and you can easily provide a new implementor of this module.\n","excerpt":"Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture? This is often asked by those who are …","ref":"/docs/main/v9.7.0/en/faq/why_mq_not_involved/","title":"Why doesn't SkyWalking involve MQ in its architecture?"},{"body":"Why is -Djava.ext.dirs not supported? -Djava.ext.dirs provides the extension class loader mechanism which was introduced in JDK 1.2, which was released in 1998. According to JEP 220: Modular Run-Time Images, it ends in JDK 9, to simplify both the Java SE Platform and the JDK we have removed the extension mechanism, including the java.ext.dirs system property and the lib/ext directory.\nThis JEP has been applied since JDK11, which is the most active LTS JDK version. When use -Djava.ext.dirs in JDK11+, the JVM would not be able to boot with following error.\n\u0026lt;JAVA_HOME\u0026gt;/lib/ext exists, extensions mechanism no longer supported; Use -classpath instead. .Error: Could not create the Java Virtual Machine. Error: A fatal exception has occurred. Program will exit. So, SkyWalking agent would not support the extension class loader mechanism.\nHow to resolve this issue? If you are using JDK8 and -Djava.ext.dirs, follow the JRE recommendations, Use -classpath instead. This should be a transparent change, which only affects your booting script.\nAlso, if you insist on keeping using -Djava.ext.dirs, the community had a pull request, which leverages the bootstrap instrumentation core of the agent to support the extension class loader.\nIn theory, this should work, but the SkyWalking doesn\u0026rsquo;t officially verify it before noticing the above JEP. You could take it as a reference.\nThe official recommendation still keeps as Use -classpath instead.\n","excerpt":"Why is -Djava.ext.dirs not supported? -Djava.ext.dirs provides the extension class loader mechanism …","ref":"/docs/skywalking-java/latest/en/faq/ext-dirs/","title":"Why is `-Djava.ext.dirs` not supported?"},{"body":"Why is -Djava.ext.dirs not supported? -Djava.ext.dirs provides the extension class loader mechanism which was introduced in JDK 1.2, which was released in 1998. According to JEP 220: Modular Run-Time Images, it ends in JDK 9, to simplify both the Java SE Platform and the JDK we have removed the extension mechanism, including the java.ext.dirs system property and the lib/ext directory.\nThis JEP has been applied since JDK11, which is the most active LTS JDK version. When use -Djava.ext.dirs in JDK11+, the JVM would not be able to boot with following error.\n\u0026lt;JAVA_HOME\u0026gt;/lib/ext exists, extensions mechanism no longer supported; Use -classpath instead. .Error: Could not create the Java Virtual Machine. Error: A fatal exception has occurred. Program will exit. So, SkyWalking agent would not support the extension class loader mechanism.\nHow to resolve this issue? If you are using JDK8 and -Djava.ext.dirs, follow the JRE recommendations, Use -classpath instead. This should be a transparent change, which only affects your booting script.\nAlso, if you insist on keeping using -Djava.ext.dirs, the community had a pull request, which leverages the bootstrap instrumentation core of the agent to support the extension class loader.\nIn theory, this should work, but the SkyWalking doesn\u0026rsquo;t officially verify it before noticing the above JEP. You could take it as a reference.\nThe official recommendation still keeps as Use -classpath instead.\n","excerpt":"Why is -Djava.ext.dirs not supported? -Djava.ext.dirs provides the extension class loader mechanism …","ref":"/docs/skywalking-java/next/en/faq/ext-dirs/","title":"Why is `-Djava.ext.dirs` not supported?"},{"body":"Why is -Djava.ext.dirs not supported? -Djava.ext.dirs provides the extension class loader mechanism which was introduced in JDK 1.2, which was released in 1998. According to JEP 220: Modular Run-Time Images, it ends in JDK 9, to simplify both the Java SE Platform and the JDK we have removed the extension mechanism, including the java.ext.dirs system property and the lib/ext directory.\nThis JEP has been applied since JDK11, which is the most active LTS JDK version. When use -Djava.ext.dirs in JDK11+, the JVM would not be able to boot with following error.\n\u0026lt;JAVA_HOME\u0026gt;/lib/ext exists, extensions mechanism no longer supported; Use -classpath instead. .Error: Could not create the Java Virtual Machine. Error: A fatal exception has occurred. Program will exit. So, SkyWalking agent would not support the extension class loader mechanism.\nHow to resolve this issue? If you are using JDK8 and -Djava.ext.dirs, follow the JRE recommendations, Use -classpath instead. This should be a transparent change, which only affects your booting script.\nAlso, if you insist on keeping using -Djava.ext.dirs, the community had a pull request, which leverages the bootstrap instrumentation core of the agent to support the extension class loader.\nIn theory, this should work, but the SkyWalking doesn\u0026rsquo;t officially verify it before noticing the above JEP. You could take it as a reference.\nThe official recommendation still keeps as Use -classpath instead.\n","excerpt":"Why is -Djava.ext.dirs not supported? -Djava.ext.dirs provides the extension class loader mechanism …","ref":"/docs/skywalking-java/v9.0.0/en/faq/ext-dirs/","title":"Why is `-Djava.ext.dirs` not supported?"},{"body":"Why is -Djava.ext.dirs not supported? -Djava.ext.dirs provides the extension class loader mechanism which was introduced in JDK 1.2, which was released in 1998. According to JEP 220: Modular Run-Time Images, it ends in JDK 9, to simplify both the Java SE Platform and the JDK we have removed the extension mechanism, including the java.ext.dirs system property and the lib/ext directory.\nThis JEP has been applied since JDK11, which is the most active LTS JDK version. When use -Djava.ext.dirs in JDK11+, the JVM would not be able to boot with following error.\n\u0026lt;JAVA_HOME\u0026gt;/lib/ext exists, extensions mechanism no longer supported; Use -classpath instead. .Error: Could not create the Java Virtual Machine. Error: A fatal exception has occurred. Program will exit. So, SkyWalking agent would not support the extension class loader mechanism.\nHow to resolve this issue? If you are using JDK8 and -Djava.ext.dirs, follow the JRE recommendations, Use -classpath instead. This should be a transparent change, which only affects your booting script.\nAlso, if you insist on keeping using -Djava.ext.dirs, the community had a pull request, which leverages the bootstrap instrumentation core of the agent to support the extension class loader.\nIn theory, this should work, but the SkyWalking doesn\u0026rsquo;t officially verify it before noticing the above JEP. You could take it as a reference.\nThe official recommendation still keeps as Use -classpath instead.\n","excerpt":"Why is -Djava.ext.dirs not supported? -Djava.ext.dirs provides the extension class loader mechanism …","ref":"/docs/skywalking-java/v9.1.0/en/faq/ext-dirs/","title":"Why is `-Djava.ext.dirs` not supported?"},{"body":"Why is -Djava.ext.dirs not supported? -Djava.ext.dirs provides the extension class loader mechanism which was introduced in JDK 1.2, which was released in 1998. According to JEP 220: Modular Run-Time Images, it ends in JDK 9, to simplify both the Java SE Platform and the JDK we have removed the extension mechanism, including the java.ext.dirs system property and the lib/ext directory.\nThis JEP has been applied since JDK11, which is the most active LTS JDK version. When use -Djava.ext.dirs in JDK11+, the JVM would not be able to boot with following error.\n\u0026lt;JAVA_HOME\u0026gt;/lib/ext exists, extensions mechanism no longer supported; Use -classpath instead. .Error: Could not create the Java Virtual Machine. Error: A fatal exception has occurred. Program will exit. So, SkyWalking agent would not support the extension class loader mechanism.\nHow to resolve this issue? If you are using JDK8 and -Djava.ext.dirs, follow the JRE recommendations, Use -classpath instead. This should be a transparent change, which only affects your booting script.\nAlso, if you insist on keeping using -Djava.ext.dirs, the community had a pull request, which leverages the bootstrap instrumentation core of the agent to support the extension class loader.\nIn theory, this should work, but the SkyWalking doesn\u0026rsquo;t officially verify it before noticing the above JEP. You could take it as a reference.\nThe official recommendation still keeps as Use -classpath instead.\n","excerpt":"Why is -Djava.ext.dirs not supported? -Djava.ext.dirs provides the extension class loader mechanism …","ref":"/docs/skywalking-java/v9.2.0/en/faq/ext-dirs/","title":"Why is `-Djava.ext.dirs` not supported?"},{"body":"Why is Clickhouse or Loki or xxx not supported as a storage option? Background In the past several years, community users have asked why Clickhouse, Loki, or some other storage is not supported in the upstream. We have repeated the answer many times, but it is still happening, at here, I would like to write down the summary to help people understand more\nPrevious Discussions All the following issues were about discussing new storage extension topics.\n Loki as storage  https://github.com/apache/skywalking/discussions/9836   ClickHouse  https://github.com/apache/skywalking/issues/11924 https://github.com/apache/skywalking/discussions/9011   Vertica  https://github.com/apache/skywalking/discussions/8817    Generally, all those asking are about adding a new kind of storage.\nWhy they don\u0026rsquo;t exist ? First of all, WHY is not a suitable question. SkyWalking is a volunteer-driven community, the volunteers build this project including bug fixes, maintenance work, and new features from their personal and employer interests. What you saw about the current status is the combination of all those interests rather than responsibilities. So, in SkyWalking, anything you saw existing is/was someone\u0026rsquo;s interest and contributed to upstream.\nThis logic is the same as this question, SkyWalking active maintainers are focusing on JDBC(MySQL and PostgreSQL ecosystem) Database and Elasticsearch for existing users, and moving forward on BanyanDB as the native one. We for now don\u0026rsquo;t have people interested in ClickHouse or any other database. That is why they are not there.\nHow could add one? To add a new feature, including a new storage plugin, you should go through SWIP - SkyWalking Improvement Proposal workflow, and have a full discussion with the maintenance team. SkyWalking has a pluggable storage system, so, ideally new storage option is possible to implement a new provider for the storage module. Meanwhile, in practice, as storage implementation should be in high performance and well optimized, considering our experiences with JDBC and Elasticsearch implementations, some flags and annotations may need to be added in the kernel level and data model declarations.\nFurthermore, as current maintainers are not a fun of Clickhouse or others(otherwise, you should have seen those implementations), they are not going to be involved in the code implementations and they don\u0026rsquo;t know much more from a general perspective about which kind of implementation in that specific database will have a better behavior and performance. So, if you want to propose this to upstream, you should be very experienced in that database, and have enough scale and environments to provide solid benchmark.\nWhat happens next if the new implementation gets accepted/merged/released? Who proposed this new implementation(such as clickhouse storage), has to take the responsibilities of the maintenance. The maintenance means they need to\n Join storage relative discussion to make sure SkyWalking can move forward on a kernel-level optimization without being blocked by these specific storage options. Respond to this storage relative questions, bugs, CVEs, and performance issues. Make the implementation performance match the expectation of the original proposal. Such as, about clickhouse, people are talking about how they are faster and have higher efficiency than Elasticsearch for large-scale deployments. Then we should always be able to see it has better benchmark and product side practice.  Even if the storage gets accepted/merged/released, but no one can\u0026rsquo;t take the above responsibilities or the community doesn\u0026rsquo;t receive the feedback and questions about those storages, SkyWalking PMC(Project Management Committee) will start the process to remove the implementations. This happened before for Apache IoTDB and InfluxDB storage options. Here is the last vote about this,\n https://github.com/apache/skywalking/discussions/9059  ","excerpt":"Why is Clickhouse or Loki or xxx not supported as a storage option? Background In the past several …","ref":"/docs/main/next/en/faq/why-clickhouse-not-supported/","title":"Why is Clickhouse or Loki or xxx not supported as a storage option?"},{"body":"Windows Monitoring SkyWalking leverages Prometheus windows_exporter to collect metrics data from the Windows and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. Windows entity as a Service in OAP and on the Layer: OS_WINDOWS.\nData flow For OpenTelemetry receiver:\n The Prometheus windows_exporter collects metrics data from the VMs. The OpenTelemetry Collector fetches metrics from windows_exporter via Prometheus Receiver and pushes metrics to the SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup For OpenTelemetry receiver:\n Setup Prometheus windows_exporter. Setup OpenTelemetry Collector . This is an example for OpenTelemetry Collector configuration otel-collector-config.yaml. Config SkyWalking OpenTelemetry receiver.  Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     CPU Usage % meter_win_cpu_total_percentage The total percentage usage of the CPU core. If there are 2 cores, the maximum usage is 200%. Prometheus windows_exporter   Memory RAM Usage MB meter_win_memory_used The total RAM usage Prometheus windows_exporter   Memory Swap Usage % meter_win_memory_swap_percentage The percentage usage of swap memory Prometheus windows_exporter   CPU Average Used % meter_win_cpu_average_used The percentage usage of the CPU core in each mode Prometheus windows_exporter   Memory RAM MB meter_win_memory_total\nmeter_win_memory_available\nmeter_win_memory_used The RAM statistics, including Total / Available / Used Prometheus windows_exporter   Memory Swap MB meter_win_memory_swap_free\nmeter_win_memory_swap_total Swap memory statistics, including Free / Total Prometheus windows_exporter   Disk R/W KB/s meter_win_disk_read,meter_win_disk_written The disk read and written Prometheus windows_exporter   Network Bandwidth Usage KB/s meter_win_network_receive\nmeter_win_network_transmit The network receive and transmit Prometheus windows_exporter    Customizing You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/windows.yaml. The dashboard panel confirmations are found in /config/ui-initialized-templates/os_windows.\n","excerpt":"Windows Monitoring SkyWalking leverages Prometheus windows_exporter to collect metrics data from the …","ref":"/docs/main/latest/en/setup/backend/backend-win-monitoring/","title":"Windows Monitoring"},{"body":"Windows Monitoring SkyWalking leverages Prometheus windows_exporter to collect metrics data from the Windows and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. Windows entity as a Service in OAP and on the Layer: OS_WINDOWS.\nData flow For OpenTelemetry receiver:\n The Prometheus windows_exporter collects metrics data from the VMs. The OpenTelemetry Collector fetches metrics from windows_exporter via Prometheus Receiver and pushes metrics to the SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup For OpenTelemetry receiver:\n Setup Prometheus windows_exporter. Setup OpenTelemetry Collector . This is an example for OpenTelemetry Collector configuration otel-collector-config.yaml. Config SkyWalking OpenTelemetry receiver.  Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     CPU Usage % meter_win_cpu_total_percentage The total percentage usage of the CPU core. If there are 2 cores, the maximum usage is 200%. Prometheus windows_exporter   Memory RAM Usage MB meter_win_memory_used The total RAM usage Prometheus windows_exporter   Memory Swap Usage % meter_win_memory_swap_percentage The percentage usage of swap memory Prometheus windows_exporter   CPU Average Used % meter_win_cpu_average_used The percentage usage of the CPU core in each mode Prometheus windows_exporter   Memory RAM MB meter_win_memory_total\nmeter_win_memory_available\nmeter_win_memory_used The RAM statistics, including Total / Available / Used Prometheus windows_exporter   Memory Swap MB meter_win_memory_swap_free\nmeter_win_memory_swap_total Swap memory statistics, including Free / Total Prometheus windows_exporter   Disk R/W KB/s meter_win_disk_read,meter_win_disk_written The disk read and written Prometheus windows_exporter   Network Bandwidth Usage KB/s meter_win_network_receive\nmeter_win_network_transmit The network receive and transmit Prometheus windows_exporter    Customizing You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/windows.yaml. The dashboard panel confirmations are found in /config/ui-initialized-templates/os_windows.\n","excerpt":"Windows Monitoring SkyWalking leverages Prometheus windows_exporter to collect metrics data from the …","ref":"/docs/main/next/en/setup/backend/backend-win-monitoring/","title":"Windows Monitoring"},{"body":"Windows Monitoring SkyWalking leverages Prometheus windows_exporter to collect metrics data from the Windows and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. Windows entity as a Service in OAP and on the Layer: OS_WINDOWS.\nData flow For OpenTelemetry receiver:\n The Prometheus windows_exporter collects metrics data from the VMs. The OpenTelemetry Collector fetches metrics from windows_exporter via Prometheus Receiver and pushes metrics to the SkyWalking OAP Server via the OpenCensus gRPC Exporter or OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup For OpenTelemetry receiver:\n Setup Prometheus windows_exporter. Setup OpenTelemetry Collector . This is an example for OpenTelemetry Collector configuration otel-collector-config.yaml. Config SkyWalking OpenTelemetry receiver.  Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     CPU Usage % meter_win_cpu_total_percentage The total percentage usage of the CPU core. If there are 2 cores, the maximum usage is 200%. Prometheus windows_exporter   Memory RAM Usage MB meter_win_memory_used The total RAM usage Prometheus windows_exporter   Memory Swap Usage % meter_win_memory_swap_percentage The percentage usage of swap memory Prometheus windows_exporter   CPU Average Used % meter_win_cpu_average_used The percentage usage of the CPU core in each mode Prometheus windows_exporter   Memory RAM MB meter_win_memory_total\nmeter_win_memory_available\nmeter_win_memory_used The RAM statistics, including Total / Available / Used Prometheus windows_exporter   Memory Swap MB meter_win_memory_swap_free\nmeter_win_memory_swap_total Swap memory statistics, including Free / Total Prometheus windows_exporter   Disk R/W KB/s meter_win_disk_read,meter_win_disk_written The disk read and written Prometheus windows_exporter   Network Bandwidth Usage KB/s meter_win_network_receive\nmeter_win_network_transmit The network receive and transmit Prometheus windows_exporter    Customizing You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/windows.yaml. The dashboard panel confirmations are found in /config/ui-initialized-templates/os_windows.\n","excerpt":"Windows Monitoring SkyWalking leverages Prometheus windows_exporter to collect metrics data from the …","ref":"/docs/main/v9.4.0/en/setup/backend/backend-win-monitoring/","title":"Windows Monitoring"},{"body":"Windows Monitoring SkyWalking leverages Prometheus windows_exporter to collect metrics data from the Windows and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. Windows entity as a Service in OAP and on the Layer: OS_WINDOWS.\nData flow For OpenTelemetry receiver:\n The Prometheus windows_exporter collects metrics data from the VMs. The OpenTelemetry Collector fetches metrics from windows_exporter via Prometheus Receiver and pushes metrics to the SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup For OpenTelemetry receiver:\n Setup Prometheus windows_exporter. Setup OpenTelemetry Collector . This is an example for OpenTelemetry Collector configuration otel-collector-config.yaml. Config SkyWalking OpenTelemetry receiver.  Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     CPU Usage % meter_win_cpu_total_percentage The total percentage usage of the CPU core. If there are 2 cores, the maximum usage is 200%. Prometheus windows_exporter   Memory RAM Usage MB meter_win_memory_used The total RAM usage Prometheus windows_exporter   Memory Swap Usage % meter_win_memory_swap_percentage The percentage usage of swap memory Prometheus windows_exporter   CPU Average Used % meter_win_cpu_average_used The percentage usage of the CPU core in each mode Prometheus windows_exporter   Memory RAM MB meter_win_memory_total\nmeter_win_memory_available\nmeter_win_memory_used The RAM statistics, including Total / Available / Used Prometheus windows_exporter   Memory Swap MB meter_win_memory_swap_free\nmeter_win_memory_swap_total Swap memory statistics, including Free / Total Prometheus windows_exporter   Disk R/W KB/s meter_win_disk_read,meter_win_disk_written The disk read and written Prometheus windows_exporter   Network Bandwidth Usage KB/s meter_win_network_receive\nmeter_win_network_transmit The network receive and transmit Prometheus windows_exporter    Customizing You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/windows.yaml. The dashboard panel confirmations are found in /config/ui-initialized-templates/os_windows.\n","excerpt":"Windows Monitoring SkyWalking leverages Prometheus windows_exporter to collect metrics data from the …","ref":"/docs/main/v9.5.0/en/setup/backend/backend-win-monitoring/","title":"Windows Monitoring"},{"body":"Windows Monitoring SkyWalking leverages Prometheus windows_exporter to collect metrics data from the Windows and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. Windows entity as a Service in OAP and on the Layer: OS_WINDOWS.\nData flow For OpenTelemetry receiver:\n The Prometheus windows_exporter collects metrics data from the VMs. The OpenTelemetry Collector fetches metrics from windows_exporter via Prometheus Receiver and pushes metrics to the SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup For OpenTelemetry receiver:\n Setup Prometheus windows_exporter. Setup OpenTelemetry Collector . This is an example for OpenTelemetry Collector configuration otel-collector-config.yaml. Config SkyWalking OpenTelemetry receiver.  Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     CPU Usage % meter_win_cpu_total_percentage The total percentage usage of the CPU core. If there are 2 cores, the maximum usage is 200%. Prometheus windows_exporter   Memory RAM Usage MB meter_win_memory_used The total RAM usage Prometheus windows_exporter   Memory Swap Usage % meter_win_memory_swap_percentage The percentage usage of swap memory Prometheus windows_exporter   CPU Average Used % meter_win_cpu_average_used The percentage usage of the CPU core in each mode Prometheus windows_exporter   Memory RAM MB meter_win_memory_total\nmeter_win_memory_available\nmeter_win_memory_used The RAM statistics, including Total / Available / Used Prometheus windows_exporter   Memory Swap MB meter_win_memory_swap_free\nmeter_win_memory_swap_total Swap memory statistics, including Free / Total Prometheus windows_exporter   Disk R/W KB/s meter_win_disk_read,meter_win_disk_written The disk read and written Prometheus windows_exporter   Network Bandwidth Usage KB/s meter_win_network_receive\nmeter_win_network_transmit The network receive and transmit Prometheus windows_exporter    Customizing You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/windows.yaml. The dashboard panel confirmations are found in /config/ui-initialized-templates/os_windows.\n","excerpt":"Windows Monitoring SkyWalking leverages Prometheus windows_exporter to collect metrics data from the …","ref":"/docs/main/v9.6.0/en/setup/backend/backend-win-monitoring/","title":"Windows Monitoring"},{"body":"Windows Monitoring SkyWalking leverages Prometheus windows_exporter to collect metrics data from the Windows and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. Windows entity as a Service in OAP and on the Layer: OS_WINDOWS.\nData flow For OpenTelemetry receiver:\n The Prometheus windows_exporter collects metrics data from the VMs. The OpenTelemetry Collector fetches metrics from windows_exporter via Prometheus Receiver and pushes metrics to the SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup For OpenTelemetry receiver:\n Setup Prometheus windows_exporter. Setup OpenTelemetry Collector . This is an example for OpenTelemetry Collector configuration otel-collector-config.yaml. Config SkyWalking OpenTelemetry receiver.  Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     CPU Usage % meter_win_cpu_total_percentage The total percentage usage of the CPU core. If there are 2 cores, the maximum usage is 200%. Prometheus windows_exporter   Memory RAM Usage MB meter_win_memory_used The total RAM usage Prometheus windows_exporter   Memory Swap Usage % meter_win_memory_swap_percentage The percentage usage of swap memory Prometheus windows_exporter   CPU Average Used % meter_win_cpu_average_used The percentage usage of the CPU core in each mode Prometheus windows_exporter   Memory RAM MB meter_win_memory_total\nmeter_win_memory_available\nmeter_win_memory_used The RAM statistics, including Total / Available / Used Prometheus windows_exporter   Memory Swap MB meter_win_memory_swap_free\nmeter_win_memory_swap_total Swap memory statistics, including Free / Total Prometheus windows_exporter   Disk R/W KB/s meter_win_disk_read,meter_win_disk_written The disk read and written Prometheus windows_exporter   Network Bandwidth Usage KB/s meter_win_network_receive\nmeter_win_network_transmit The network receive and transmit Prometheus windows_exporter    Customizing You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/windows.yaml. The dashboard panel confirmations are found in /config/ui-initialized-templates/os_windows.\n","excerpt":"Windows Monitoring SkyWalking leverages Prometheus windows_exporter to collect metrics data from the …","ref":"/docs/main/v9.7.0/en/setup/backend/backend-win-monitoring/","title":"Windows Monitoring"},{"body":"Working with Istio This document provides instructions on transporting Istio\u0026rsquo;s metrics to the SkyWalking OAP server.\nPrerequisites Istio should be installed in a Kubernetes cluster. Simply follow the steps in Getting Started in Istio.\nDeploy SkyWalking backend Follow the steps in deploying backend in Kubernetes to install the OAP server in the Kubernetes cluster. Refer to OpenTelemetry receiver to ingest metrics.\nDeploy OpenTelemetry Collector OpenTelemetry Collector is the location where Istio telemetry sends metrics, which are then processed and shipped to SkyWalking backend.\nTo deploy this collector, follow the steps in Getting Started in OpenTelemetry Collector. Several components are available in the collector, and they could be combined for different use cases.\nAfter installing the collector, you may configure it to scrape metrics from Istio and send them to SkyWalking backend.\nThe job configuration to scrape metrics from Istio and send them to SkyWalking backend is as follows:\nreceivers:prometheus:config:scrape_configs:- job_name:\u0026#39;istiod-monitor\u0026#39;kubernetes_sd_configs:- role:endpointsrelabel_configs:- source_labels:[__meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name ]action:keepregex:istiod;http-monitoring- action:labelmapregex:__meta_kubernetes_service_label_(.+)- source_labels:[]target_label:clusterreplacement:your-cluster# replace this with your cluster nameexporters:otlp:endpoint:oap.skywalking:11800# replace this with the OAP gRPC service addresstls:insecure:trueservice:pipelines:metrics:receivers:[prometheus ]exporters:[otlp,logging ]Observing Istio Open Istio Dashboard in SkyWaling UI by clicking Dashboard -\u0026gt; Istio. You can then view charts and diagrams generated by Istio metrics. You may also view them through swctl and set up alarm rules based on them.\nNote: If you would like to see metrics of Istio managed services, including topology, you may try our ALS solution.\n","excerpt":"Working with Istio This document provides instructions on transporting Istio\u0026rsquo;s metrics to the …","ref":"/docs/main/latest/en/setup/istio/readme/","title":"Working with Istio"},{"body":"Working with Istio This document provides instructions on transporting Istio\u0026rsquo;s metrics to the SkyWalking OAP server.\nPrerequisites Istio should be installed in a Kubernetes cluster. Simply follow the steps in Getting Started in Istio.\nDeploy SkyWalking backend Follow the steps in deploying backend in Kubernetes to install the OAP server in the Kubernetes cluster. Refer to OpenTelemetry receiver to ingest metrics.\nDeploy OpenTelemetry Collector OpenTelemetry Collector is the location where Istio telemetry sends metrics, which are then processed and shipped to SkyWalking backend.\nTo deploy this collector, follow the steps in Getting Started in OpenTelemetry Collector. Several components are available in the collector, and they could be combined for different use cases.\nAfter installing the collector, you may configure it to scrape metrics from Istio and send them to SkyWalking backend.\nThe job configuration to scrape metrics from Istio and send them to SkyWalking backend is as follows:\nreceivers:prometheus:config:scrape_configs:- job_name:\u0026#39;istiod-monitor\u0026#39;kubernetes_sd_configs:- role:endpointsrelabel_configs:- source_labels:[__meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name ]action:keepregex:istiod;http-monitoring- action:labelmapregex:__meta_kubernetes_service_label_(.+)- source_labels:[]target_label:clusterreplacement:your-cluster# replace this with your cluster nameexporters:otlp:endpoint:oap.skywalking:11800# replace this with the OAP gRPC service addresstls:insecure:trueservice:pipelines:metrics:receivers:[prometheus ]exporters:[otlp,logging ]Observing Istio Open Istio Dashboard in SkyWaling UI by clicking Dashboard -\u0026gt; Istio. You can then view charts and diagrams generated by Istio metrics. You may also view them through swctl and set up alarm rules based on them.\nNote: If you would like to see metrics of Istio managed services, including topology, you may try our ALS solution.\n","excerpt":"Working with Istio This document provides instructions on transporting Istio\u0026rsquo;s metrics to the …","ref":"/docs/main/next/en/setup/istio/readme/","title":"Working with Istio"},{"body":"Working with Istio This document provides instructions on transporting Istio\u0026rsquo;s metrics to the SkyWalking OAP server.\nPrerequisites Istio should be installed in the Kubernetes cluster. Simply follow the steps in Getting Started in Istio.\nDeploying SkyWalking backend Follow the steps in deploying backend in Kubernetes to install the OAP server in the Kubernetes cluster. Refer to OpenTelemetry receiver to ingest metrics. otel-receiver is disabled by default. Set env var SW_OTEL_RECEIVER to default to enable it.\nDeploying OpenTelemetry Collector OpenTelemetry Collector is the location where Istio telemetry sends metrics, which is then processed and sent to SkyWalking backend.\nFollow the steps in Getting Started in OpenTelemetry Collector to deploy this collector. There are several components available in the collector, and they could be combined for different use cases. For the sake of brevity, we use the Prometheus receiver to retrieve metrics from Istio control and data plane, then send them to SkyWalking by OpenCensus exporter.\nPrometheus Receiver Refer to Prometheus Receiver to set up this receiver. You could find more configuration details in Prometheus Integration of Istio to figure out how to direct Prometheus Receiver to query Istio metrics.\nSkyWalking supports receiving multi-cluster metrics in a single OAP cluster. A cluster label should be appended to every metric fetched by this receiver even if there\u0026rsquo;s only a single cluster needed to be collected. You could use relabel to add it, like this:\nrelabel_configs: - source_labels: [] target_label: cluster replacement: \u0026lt;cluster name\u0026gt; or you can do so through Resource Processor:\nprocessors: resource: attributes: - key: cluster value: \u0026quot;\u0026lt;cluster name\u0026gt;\u0026quot; action: upsert Note: If you try the sample Istio Prometheus Kubernetes configuration, you may experience an issue. Try to fix it using the solution described in the issue.\nOpenCensus exporter Follow OpenCensus exporter configuration to set up a connection between OpenTelemetry Collector and OAP cluster. endpoint is the address of OAP gRPC service.\nObserving Istio Open Istio Dashboard in SkyWaling UI by clicking Dashboard -\u0026gt; Istio. You can then view charts and diagrams generated by Istio metrics. You may also view them through swctl and set up alarm rules based on them.\nNOTE: If you would like to see metrics of Istio managed services, including topology, you may try our ALS solution.\n","excerpt":"Working with Istio This document provides instructions on transporting Istio\u0026rsquo;s metrics to the …","ref":"/docs/main/v9.0.0/en/setup/istio/readme/","title":"Working with Istio"},{"body":"Working with Istio This document provides instructions on transporting Istio\u0026rsquo;s metrics to the SkyWalking OAP server.\nPrerequisites Istio should be installed in a Kubernetes cluster. Simply follow the steps in Getting Started in Istio.\nDeploying SkyWalking backend Follow the steps in deploying backend in Kubernetes to install the OAP server in the Kubernetes cluster. Refer to OpenTelemetry receiver to ingest metrics. otel-receiver is disabled by default. Set env var SW_OTEL_RECEIVER to default to enable it.\nDeploying OpenTelemetry Collector OpenTelemetry Collector is the location where Istio telemetry sends metrics, which are then processed and shipped to SkyWalking backend.\nTo deploy this collector, follow the steps in Getting Started in OpenTelemetry Collector. Several components are available in the collector, and they could be combined for different use cases.\nFor the sake of brevity, we use the Prometheus receiver to retrieve metrics from Istio control and data plane, then send them to SkyWalking by OpenCensus exporter.\nPrometheus Receiver Refer to Prometheus Receiver to set up this receiver. You could find more configuration details in Prometheus Integration of Istio to figure out how to direct Prometheus Receiver to query Istio metrics.\nSkyWalking supports receiving multi-cluster metrics in a single OAP cluster. A cluster label should be appended to every metric fetched by this receiver even if there\u0026rsquo;s only a single cluster needed to be collected. You could use relabel to add it, like this:\nrelabel_configs: - source_labels: [] target_label: cluster replacement: \u0026lt;cluster name\u0026gt; or you can do so through Resource Processor:\nprocessors: resource: attributes: - key: cluster value: \u0026quot;\u0026lt;cluster name\u0026gt;\u0026quot; action: upsert Note: If you try the sample Istio Prometheus Kubernetes configuration, you may experience an issue. Try to fix it using the solution described in the issue.\nOpenCensus exporter Follow OpenCensus exporter configuration to set up a connection between OpenTelemetry Collector and OAP cluster. endpoint is the address of the OAP gRPC service.\nObserving Istio Open Istio Dashboard in SkyWaling UI by clicking Dashboard -\u0026gt; Istio. You can then view charts and diagrams generated by Istio metrics. You may also view them through swctl and set up alarm rules based on them.\nNote: If you would like to see metrics of Istio managed services, including topology, you may try our ALS solution.\n","excerpt":"Working with Istio This document provides instructions on transporting Istio\u0026rsquo;s metrics to the …","ref":"/docs/main/v9.1.0/en/setup/istio/readme/","title":"Working with Istio"},{"body":"Working with Istio This document provides instructions on transporting Istio\u0026rsquo;s metrics to the SkyWalking OAP server.\nPrerequisites Istio should be installed in a Kubernetes cluster. Simply follow the steps in Getting Started in Istio.\nDeploying SkyWalking backend Follow the steps in deploying backend in Kubernetes to install the OAP server in the Kubernetes cluster. Refer to OpenTelemetry receiver to ingest metrics. otel-receiver is disabled by default. Set env var SW_OTEL_RECEIVER to default to enable it.\nDeploying OpenTelemetry Collector OpenTelemetry Collector is the location where Istio telemetry sends metrics, which are then processed and shipped to SkyWalking backend.\nTo deploy this collector, follow the steps in Getting Started in OpenTelemetry Collector. Several components are available in the collector, and they could be combined for different use cases.\nFor the sake of brevity, we use the Prometheus receiver to retrieve metrics from Istio control and data plane, then send them to SkyWalking by OpenCensus exporter.\nPrometheus Receiver Refer to Prometheus Receiver to set up this receiver. You could find more configuration details in Prometheus Integration of Istio to figure out how to direct Prometheus Receiver to query Istio metrics.\nSkyWalking supports receiving multi-cluster metrics in a single OAP cluster. A cluster label should be appended to every metric fetched by this receiver even if there\u0026rsquo;s only a single cluster needed to be collected. You could use relabel to add it, like this:\nrelabel_configs: - source_labels: [] target_label: cluster replacement: \u0026lt;cluster name\u0026gt; or you can do so through Resource Processor:\nprocessors: resource: attributes: - key: cluster value: \u0026quot;\u0026lt;cluster name\u0026gt;\u0026quot; action: upsert Note: If you try the sample Istio Prometheus Kubernetes configuration, you may experience an issue. Try to fix it using the solution described in the issue.\nOpenCensus exporter Follow OpenCensus exporter configuration to set up a connection between OpenTelemetry Collector and OAP cluster. endpoint is the address of the OAP gRPC service.\nObserving Istio Open Istio Dashboard in SkyWaling UI by clicking Dashboard -\u0026gt; Istio. You can then view charts and diagrams generated by Istio metrics. You may also view them through swctl and set up alarm rules based on them.\nNote: If you would like to see metrics of Istio managed services, including topology, you may try our ALS solution.\n","excerpt":"Working with Istio This document provides instructions on transporting Istio\u0026rsquo;s metrics to the …","ref":"/docs/main/v9.2.0/en/setup/istio/readme/","title":"Working with Istio"},{"body":"Working with Istio This document provides instructions on transporting Istio\u0026rsquo;s metrics to the SkyWalking OAP server.\nPrerequisites Istio should be installed in a Kubernetes cluster. Simply follow the steps in Getting Started in Istio.\nDeploying SkyWalking backend Follow the steps in deploying backend in Kubernetes to install the OAP server in the Kubernetes cluster. Refer to OpenTelemetry receiver to ingest metrics. otel-receiver is disabled by default. Set env var SW_OTEL_RECEIVER to default to enable it.\nDeploying OpenTelemetry Collector OpenTelemetry Collector is the location where Istio telemetry sends metrics, which are then processed and shipped to SkyWalking backend.\nTo deploy this collector, follow the steps in Getting Started in OpenTelemetry Collector. Several components are available in the collector, and they could be combined for different use cases.\nFor the sake of brevity, we use the Prometheus receiver to retrieve metrics from Istio control and data plane, then send them to SkyWalking by OpenCensus exporter.\nPrometheus Receiver Refer to Prometheus Receiver to set up this receiver. You could find more configuration details in Prometheus Integration of Istio to figure out how to direct Prometheus Receiver to query Istio metrics.\nSkyWalking supports receiving multi-cluster metrics in a single OAP cluster. A cluster label should be appended to every metric fetched by this receiver even if there\u0026rsquo;s only a single cluster needed to be collected. You could use relabel to add it, like this:\nrelabel_configs: - source_labels: [] target_label: cluster replacement: \u0026lt;cluster name\u0026gt; or you can do so through Resource Processor:\nprocessors: resource: attributes: - key: cluster value: \u0026quot;\u0026lt;cluster name\u0026gt;\u0026quot; action: upsert Note: If you try the sample Istio Prometheus Kubernetes configuration, you may experience an issue. Try to fix it using the solution described in the issue.\nOpenCensus exporter Follow OpenCensus exporter configuration to set up a connection between OpenTelemetry Collector and OAP cluster. endpoint is the address of the OAP gRPC service.\nObserving Istio Open Istio Dashboard in SkyWaling UI by clicking Dashboard -\u0026gt; Istio. You can then view charts and diagrams generated by Istio metrics. You may also view them through swctl and set up alarm rules based on them.\nNote: If you would like to see metrics of Istio managed services, including topology, you may try our ALS solution.\n","excerpt":"Working with Istio This document provides instructions on transporting Istio\u0026rsquo;s metrics to the …","ref":"/docs/main/v9.3.0/en/setup/istio/readme/","title":"Working with Istio"},{"body":"Working with Istio This document provides instructions on transporting Istio\u0026rsquo;s metrics to the SkyWalking OAP server.\nPrerequisites Istio should be installed in a Kubernetes cluster. Simply follow the steps in Getting Started in Istio.\nDeploying SkyWalking backend Follow the steps in deploying backend in Kubernetes to install the OAP server in the Kubernetes cluster. Refer to OpenTelemetry receiver to ingest metrics. otel-receiver is disabled by default. Set env var SW_OTEL_RECEIVER to default to enable it.\nDeploying OpenTelemetry Collector OpenTelemetry Collector is the location where Istio telemetry sends metrics, which are then processed and shipped to SkyWalking backend.\nTo deploy this collector, follow the steps in Getting Started in OpenTelemetry Collector. Several components are available in the collector, and they could be combined for different use cases.\nFor the sake of brevity, we use the Prometheus receiver to retrieve metrics from Istio control and data plane, then send them to SkyWalking by OpenCensus exporter.\nPrometheus Receiver Refer to Prometheus Receiver to set up this receiver. You could find more configuration details in Prometheus Integration of Istio to figure out how to direct Prometheus Receiver to query Istio metrics.\nSkyWalking supports receiving multi-cluster metrics in a single OAP cluster. A cluster label should be appended to every metric fetched by this receiver even if there\u0026rsquo;s only a single cluster needed to be collected. You could use relabel to add it, like this:\nrelabel_configs: - source_labels: [] target_label: cluster replacement: \u0026lt;cluster name\u0026gt; or you can do so through Resource Processor:\nprocessors: resource: attributes: - key: cluster value: \u0026quot;\u0026lt;cluster name\u0026gt;\u0026quot; action: upsert Note: If you try the sample Istio Prometheus Kubernetes configuration, you may experience an issue. Try to fix it using the solution described in the issue.\nOpenCensus exporter Follow OpenCensus exporter configuration to set up a connection between OpenTelemetry Collector and OAP cluster. endpoint is the address of the OAP gRPC service.\nObserving Istio Open Istio Dashboard in SkyWaling UI by clicking Dashboard -\u0026gt; Istio. You can then view charts and diagrams generated by Istio metrics. You may also view them through swctl and set up alarm rules based on them.\nNote: If you would like to see metrics of Istio managed services, including topology, you may try our ALS solution.\n","excerpt":"Working with Istio This document provides instructions on transporting Istio\u0026rsquo;s metrics to the …","ref":"/docs/main/v9.4.0/en/setup/istio/readme/","title":"Working with Istio"},{"body":"Working with Istio This document provides instructions on transporting Istio\u0026rsquo;s metrics to the SkyWalking OAP server.\nPrerequisites Istio should be installed in a Kubernetes cluster. Simply follow the steps in Getting Started in Istio.\nDeploy SkyWalking backend Follow the steps in deploying backend in Kubernetes to install the OAP server in the Kubernetes cluster. Refer to OpenTelemetry receiver to ingest metrics.\nDeploy OpenTelemetry Collector OpenTelemetry Collector is the location where Istio telemetry sends metrics, which are then processed and shipped to SkyWalking backend.\nTo deploy this collector, follow the steps in Getting Started in OpenTelemetry Collector. Several components are available in the collector, and they could be combined for different use cases.\nAfter installing the collector, you may configure it to scrape metrics from Istio and send them to SkyWalking backend.\nThe job configuration to scrape metrics from Istio and send them to SkyWalking backend is as follows:\nreceivers:prometheus:config:scrape_configs:- job_name:\u0026#39;istiod-monitor\u0026#39;kubernetes_sd_configs:- role:endpointsrelabel_configs:- source_labels:[__meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name ]action:keepregex:istiod;http-monitoring- action:labelmapregex:__meta_kubernetes_service_label_(.+)- source_labels:[]target_label:clusterreplacement:your-cluster# replace this with your cluster nameexporters:otlp:endpoint:oap.skywalking:11800# replace this with the OAP gRPC service addresstls:insecure:trueservice:pipelines:metrics:receivers:[prometheus ]exporters:[otlp,logging ]Observing Istio Open Istio Dashboard in SkyWaling UI by clicking Dashboard -\u0026gt; Istio. You can then view charts and diagrams generated by Istio metrics. You may also view them through swctl and set up alarm rules based on them.\nNote: If you would like to see metrics of Istio managed services, including topology, you may try our ALS solution.\n","excerpt":"Working with Istio This document provides instructions on transporting Istio\u0026rsquo;s metrics to the …","ref":"/docs/main/v9.5.0/en/setup/istio/readme/","title":"Working with Istio"},{"body":"Working with Istio This document provides instructions on transporting Istio\u0026rsquo;s metrics to the SkyWalking OAP server.\nPrerequisites Istio should be installed in a Kubernetes cluster. Simply follow the steps in Getting Started in Istio.\nDeploy SkyWalking backend Follow the steps in deploying backend in Kubernetes to install the OAP server in the Kubernetes cluster. Refer to OpenTelemetry receiver to ingest metrics.\nDeploy OpenTelemetry Collector OpenTelemetry Collector is the location where Istio telemetry sends metrics, which are then processed and shipped to SkyWalking backend.\nTo deploy this collector, follow the steps in Getting Started in OpenTelemetry Collector. Several components are available in the collector, and they could be combined for different use cases.\nAfter installing the collector, you may configure it to scrape metrics from Istio and send them to SkyWalking backend.\nThe job configuration to scrape metrics from Istio and send them to SkyWalking backend is as follows:\nreceivers:prometheus:config:scrape_configs:- job_name:\u0026#39;istiod-monitor\u0026#39;kubernetes_sd_configs:- role:endpointsrelabel_configs:- source_labels:[__meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name ]action:keepregex:istiod;http-monitoring- action:labelmapregex:__meta_kubernetes_service_label_(.+)- source_labels:[]target_label:clusterreplacement:your-cluster# replace this with your cluster nameexporters:otlp:endpoint:oap.skywalking:11800# replace this with the OAP gRPC service addresstls:insecure:trueservice:pipelines:metrics:receivers:[prometheus ]exporters:[otlp,logging ]Observing Istio Open Istio Dashboard in SkyWaling UI by clicking Dashboard -\u0026gt; Istio. You can then view charts and diagrams generated by Istio metrics. You may also view them through swctl and set up alarm rules based on them.\nNote: If you would like to see metrics of Istio managed services, including topology, you may try our ALS solution.\n","excerpt":"Working with Istio This document provides instructions on transporting Istio\u0026rsquo;s metrics to the …","ref":"/docs/main/v9.6.0/en/setup/istio/readme/","title":"Working with Istio"},{"body":"Working with Istio This document provides instructions on transporting Istio\u0026rsquo;s metrics to the SkyWalking OAP server.\nPrerequisites Istio should be installed in a Kubernetes cluster. Simply follow the steps in Getting Started in Istio.\nDeploy SkyWalking backend Follow the steps in deploying backend in Kubernetes to install the OAP server in the Kubernetes cluster. Refer to OpenTelemetry receiver to ingest metrics.\nDeploy OpenTelemetry Collector OpenTelemetry Collector is the location where Istio telemetry sends metrics, which are then processed and shipped to SkyWalking backend.\nTo deploy this collector, follow the steps in Getting Started in OpenTelemetry Collector. Several components are available in the collector, and they could be combined for different use cases.\nAfter installing the collector, you may configure it to scrape metrics from Istio and send them to SkyWalking backend.\nThe job configuration to scrape metrics from Istio and send them to SkyWalking backend is as follows:\nreceivers:prometheus:config:scrape_configs:- job_name:\u0026#39;istiod-monitor\u0026#39;kubernetes_sd_configs:- role:endpointsrelabel_configs:- source_labels:[__meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name ]action:keepregex:istiod;http-monitoring- action:labelmapregex:__meta_kubernetes_service_label_(.+)- source_labels:[]target_label:clusterreplacement:your-cluster# replace this with your cluster nameexporters:otlp:endpoint:oap.skywalking:11800# replace this with the OAP gRPC service addresstls:insecure:trueservice:pipelines:metrics:receivers:[prometheus ]exporters:[otlp,logging ]Observing Istio Open Istio Dashboard in SkyWaling UI by clicking Dashboard -\u0026gt; Istio. You can then view charts and diagrams generated by Istio metrics. You may also view them through swctl and set up alarm rules based on them.\nNote: If you would like to see metrics of Istio managed services, including topology, you may try our ALS solution.\n","excerpt":"Working with Istio This document provides instructions on transporting Istio\u0026rsquo;s metrics to the …","ref":"/docs/main/v9.7.0/en/setup/istio/readme/","title":"Working with Istio"},{"body":"Write Plugin Test Writing plugin test cases can greatly help you determine if your plugin is running well across multiple versions. If you haven\u0026rsquo;t started developing your plugin yet, please read this Plugin Development Guide first.\nDeveloping a plugin involves the following steps:\n Create a new module: Please create a new module in the specified directory, and it is recommended to name the module the same as the plugin for easy reference. Write the configuration file: This file serves as the declaration file for the plugin, and test cases would be run based on this file. Write the test code: Simulate the actual service operation, including the plugin you want to test. Test execution: Check if the plugin is running properly.  Write Configuration File The configuration file is used to define the basic information of the test plugin. You can use the gin plugin configuration file as an example to write your own. It includes the following information:\n entry-service: The test HTTP service entry URL. When this address is accessed, the plugin code should be triggered. health-checker: Executed before the entry-service is accessed to ensure that the service starts without any issues. Status code of 200 is considered a successful service start. start-script: The script execution file path. Please compile and start the service in this file. framework: The access address of the current framework to be tested. During testing, this address would be used to switch between different framework versions. export-port: The port number for the external service entry. support-version: The version information supported by the current plugin.  go: The supported Golang language version for the current plugin. framework: A list of plugin version information. It would be used to switch between multiple framework versions.   dependencies: If your program relies on certain containers, please declare them here. The syntax is largely similar to the services in docker-compose.  image: The image name of service. hostname: The hostname of the container which deployed. port: The port list of the container which deployed. expose: The export port list of the container which deployed. environment: The environment variables of the container which deployed. command: The start command of the container. healthcheck: The health check command of the container. If the service defines a healthcheck, then the service being tested would depend on the current service\u0026rsquo;s service_healthy status. Otherwise, it depends on the service_started status.    URL Access When the service address is accessed, please use ${HTTP_HOST} and ${HTTP_PORT} to represent the domain name and port number to be accessed. The port number corresponds to the export-port field.\nStart Script The startup script is used to compile and execute the program.\nWhen starting, please add the ${GO_BUILD_OPTS} parameter, which specifies the Go Agent program information for hybrid compilation.\nWhen starting, just let the program keep running.\nVersion Matrix Multi-version support is a crucial step in plugin testing. It can test whether the plugin runs stably across multiple framework versions and go versions.\nPlugin testing would use the go get command to modify the plugin version. Please make sure you have filled in the correct framework and support-version.framework. The format is: ${framework}@${support-version.framework}\nDuring plugin execution, the specified official Golang image would be used, allowing the plugin to run in the designated Golang version.\nExcepted File For each plugin, you need to define the config/expected.yml file, which is used to define the observable data generated after the plugin runs. After the plugin runs, this file would be used to validate the data.\nPlease refer to the documentation to write this file.\nWrite Test Code In the test code, please start an HTTP service and expose the following two interfaces:\n Check service: Used to ensure that the service is running properly. This corresponds to the health-checker address in configuration. Entry service: Write the complete framework business logic at this address. Validate all the features provided by the plugin as much as possible. This corresponds to the entry-service address in configuration.  The test code, like a regular program, needs to import the github.com/apache/skywalking-go package.\nTest Execution Once you have completed the plugin configuration and test code writing, you can proceed to test the framework. Please follow these steps:\n Build tools: Execute the make build command in the test/plugins directory. It would generate some tools needed for testing in the dist folder of this directory. Run the plugin locally: Start the plugin test program and iterate through all framework versions for testing on your local environment. Add to GitHub Action: Fill in the name of the test plugin in this file, and the plugin test would be executed and validated each time a pull request is submitted.  Run the Plugin Test Locally Please execute the run.sh script in the test/plugins directory and pass in the name of the plugin you wrote (the folder name). At this point, the script would read the configuration file of the plugin test and create a workspace directory in this location for temporarily storing files generated by each plugin. Finally, it would start the test code and validate the data sequentially according to the supported version information.\nThe script supports the following two parameters:\n \u0026ndash;clean: Clean up the files and containers generated by the current running environment. \u0026ndash;debug: Enable debug mode for plugin testing. In this mode, the content generated by each framework in the workspace would not be cleared, and the temporary files generated during hybrid compilation would be saved.  ","excerpt":"Write Plugin Test Writing plugin test cases can greatly help you determine if your plugin is running …","ref":"/docs/skywalking-go/latest/en/development-and-contribution/write-plugin-testing/","title":"Write Plugin Test"},{"body":"Write Plugin Test Writing plugin test cases can greatly help you determine if your plugin is running well across multiple versions. If you haven\u0026rsquo;t started developing your plugin yet, please read this Plugin Development Guide first.\nDeveloping a plugin involves the following steps:\n Create a new module: Please create a new module in the specified directory, and it is recommended to name the module the same as the plugin for easy reference. Write the configuration file: This file serves as the declaration file for the plugin, and test cases would be run based on this file. Write the test code: Simulate the actual service operation, including the plugin you want to test. Test execution: Check if the plugin is running properly.  Write Configuration File The configuration file is used to define the basic information of the test plugin. You can use the gin plugin configuration file as an example to write your own. It includes the following information:\n entry-service: The test HTTP service entry URL. When this address is accessed, the plugin code should be triggered. health-checker: Executed before the entry-service is accessed to ensure that the service starts without any issues. Status code of 200 is considered a successful service start. start-script: The script execution file path. Please compile and start the service in this file. framework: The access address of the current framework to be tested. During testing, this address would be used to switch between different framework versions. export-port: The port number for the external service entry. support-version: The version information supported by the current plugin.  go: The supported Golang language version for the current plugin. framework: A list of plugin version information. It would be used to switch between multiple framework versions.   dependencies: If your program relies on certain containers, please declare them here. The syntax is largely similar to the services in docker-compose.  image: The image name of service. hostname: The hostname of the container which deployed. port: The port list of the container which deployed. expose: The export port list of the container which deployed. environment: The environment variables of the container which deployed. command: The start command of the container. healthcheck: The health check command of the container. If the service defines a healthcheck, then the service being tested would depend on the current service\u0026rsquo;s service_healthy status. Otherwise, it depends on the service_started status.    URL Access When the service address is accessed, please use ${HTTP_HOST} and ${HTTP_PORT} to represent the domain name and port number to be accessed. The port number corresponds to the export-port field.\nStart Script The startup script is used to compile and execute the program.\nWhen starting, please add the ${GO_BUILD_OPTS} parameter, which specifies the Go Agent program information for hybrid compilation.\nWhen starting, just let the program keep running.\nVersion Matrix Multi-version support is a crucial step in plugin testing. It can test whether the plugin runs stably across multiple framework versions and go versions.\nPlugin testing would use the go get command to modify the plugin version. Please make sure you have filled in the correct framework and support-version.framework. The format is: ${framework}@${support-version.framework}\nDuring plugin execution, the specified official Golang image would be used, allowing the plugin to run in the designated Golang version.\nExcepted File For each plugin, you need to define the config/expected.yml file, which is used to define the observable data generated after the plugin runs. After the plugin runs, this file would be used to validate the data.\nPlease refer to the documentation to write this file.\nWrite Test Code In the test code, please start an HTTP service and expose the following two interfaces:\n Check service: Used to ensure that the service is running properly. This corresponds to the health-checker address in configuration. Entry service: Write the complete framework business logic at this address. Validate all the features provided by the plugin as much as possible. This corresponds to the entry-service address in configuration.  The test code, like a regular program, needs to import the github.com/apache/skywalking-go package.\nTest Execution Once you have completed the plugin configuration and test code writing, you can proceed to test the framework. Please follow these steps:\n Build tools: Execute the make build command in the test/plugins directory. It would generate some tools needed for testing in the dist folder of this directory. Run the plugin locally: Start the plugin test program and iterate through all framework versions for testing on your local environment. Add to GitHub Action: Fill in the name of the test plugin in this file, and the plugin test would be executed and validated each time a pull request is submitted.  Run the Plugin Test Locally Please execute the run.sh script in the test/plugins directory and pass in the name of the plugin you wrote (the folder name). At this point, the script would read the configuration file of the plugin test and create a workspace directory in this location for temporarily storing files generated by each plugin. Finally, it would start the test code and validate the data sequentially according to the supported version information.\nThe script supports the following two parameters:\n \u0026ndash;clean: Clean up the files and containers generated by the current running environment. \u0026ndash;debug: Enable debug mode for plugin testing. In this mode, the content generated by each framework in the workspace would not be cleared, and the temporary files generated during hybrid compilation would be saved.  ","excerpt":"Write Plugin Test Writing plugin test cases can greatly help you determine if your plugin is running …","ref":"/docs/skywalking-go/next/en/development-and-contribution/write-plugin-testing/","title":"Write Plugin Test"},{"body":"Write Plugin Test Writing plugin test cases can greatly help you determine if your plugin is running well across multiple versions. If you haven\u0026rsquo;t started developing your plugin yet, please read this Plugin Development Guide first.\nDeveloping a plugin involves the following steps:\n Create a new module: Please create a new module in the specified directory, and it is recommended to name the module the same as the plugin for easy reference. Write the configuration file: This file serves as the declaration file for the plugin, and test cases would be run based on this file. Write the test code: Simulate the actual service operation, including the plugin you want to test. Test execution: Check if the plugin is running properly.  Write Configuration File The configuration file is used to define the basic information of the test plugin. You can use the gin plugin configuration file as an example to write your own. It includes the following information:\n entry-service: The test HTTP service entry URL. When this address is accessed, the plugin code should be triggered. health-checker: Executed before the entry-service is accessed to ensure that the service starts without any issues. Status code of 200 is considered a successful service start. start-script: The script execution file path. Please compile and start the service in this file. framework: The access address of the current framework to be tested. During testing, this address would be used to switch between different framework versions. export-port: The port number for the external service entry. support-version: The version information supported by the current plugin.  go: The supported Golang language version for the current plugin. framework: A list of plugin version information. It would be used to switch between multiple framework versions.   dependencies: If your program relies on certain containers, please declare them here. The syntax is largely similar to the services in docker-compose.  image: The image name of service. hostname: The hostname of the container which deployed. port: The port list of the container which deployed. expose: The export port list of the container which deployed. environment: The environment variables of the container which deployed. command: The start command of the container. healthcheck: The health check command of the container. If the service defines a healthcheck, then the service being tested would depend on the current service\u0026rsquo;s service_healthy status. Otherwise, it depends on the service_started status.    URL Access When the service address is accessed, please use ${HTTP_HOST} and ${HTTP_PORT} to represent the domain name and port number to be accessed. The port number corresponds to the export-port field.\nStart Script The startup script is used to compile and execute the program.\nWhen starting, please add the ${GO_BUILD_OPTS} parameter, which specifies the Go Agent program information for hybrid compilation.\nWhen starting, just let the program keep running.\nVersion Matrix Multi-version support is a crucial step in plugin testing. It can test whether the plugin runs stably across multiple framework versions and go versions.\nPlugin testing would use the go get command to modify the plugin version. Please make sure you have filled in the correct framework and support-version.framework. The format is: ${framework}@${support-version.framework}\nDuring plugin execution, the specified official Golang image would be used, allowing the plugin to run in the designated Golang version.\nExcepted File For each plugin, you need to define the config/expected.yml file, which is used to define the observable data generated after the plugin runs. After the plugin runs, this file would be used to validate the data.\nPlease refer to the documentation to write this file.\nWrite Test Code In the test code, please start an HTTP service and expose the following two interfaces:\n Check service: Used to ensure that the service is running properly. This corresponds to the health-checker address in configuration. Entry service: Write the complete framework business logic at this address. Validate all the features provided by the plugin as much as possible. This corresponds to the entry-service address in configuration.  The test code, like a regular program, needs to import the github.com/apache/skywalking-go package.\nTest Execution Once you have completed the plugin configuration and test code writing, you can proceed to test the framework. Please follow these steps:\n Build tools: Execute the make build command in the test/plugins directory. It would generate some tools needed for testing in the dist folder of this directory. Run the plugin locally: Start the plugin test program and iterate through all framework versions for testing on your local environment. Add to GitHub Action: Fill in the name of the test plugin in this file, and the plugin test would be executed and validated each time a pull request is submitted.  Run the Plugin Test Locally Please execute the run.sh script in the test/plugins directory and pass in the name of the plugin you wrote (the folder name). At this point, the script would read the configuration file of the plugin test and create a workspace directory in this location for temporarily storing files generated by each plugin. Finally, it would start the test code and validate the data sequentially according to the supported version information.\nThe script supports the following two parameters:\n \u0026ndash;clean: Clean up the files and containers generated by the current running environment. \u0026ndash;debug: Enable debug mode for plugin testing. In this mode, the content generated by each framework in the workspace would not be cleared, and the temporary files generated during hybrid compilation would be saved.  ","excerpt":"Write Plugin Test Writing plugin test cases can greatly help you determine if your plugin is running …","ref":"/docs/skywalking-go/v0.4.0/en/development-and-contribution/write-plugin-testing/","title":"Write Plugin Test"},{"body":"Zabbix Receiver The Zabbix receiver accepts metrics of Zabbix Agent Active Checks protocol format into the Meter System. Zabbix Agent is based on GPL-2.0 License.\nModule definition receiver-zabbix:selector:${SW_RECEIVER_ZABBIX:default}default:# Export tcp port, Zabbix agent could connected and transport dataport:10051# Bind to hosthost:0.0.0.0# Enable config when receive agent requestactiveFiles:agentConfiguration file The Zabbix receiver is configured via a configuration file that defines everything related to receiving from agents, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP fails to start up. The files are located at $CLASSPATH/zabbix-rules.\nThe file is written in YAML format, defined by the scheme described below. Square brackets indicate that a parameter is optional.\nAn example for Zabbix agent configuration could be found here. You can find details on Zabbix agent items from Zabbix Agent documentation.\nConfiguration file # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# Datasource from Zabbix Item keys.requiredZabbixItemKeys:- \u0026lt;zabbix item keys\u0026gt;# Support agent entities information.entities:# Allow hostname patterns to build metrics.hostPatterns:- \u0026lt;regex string\u0026gt;# Customized metrics label before parse to meter system.labels:[- \u0026lt;labels\u0026gt; ]# Metrics rule allow you to recompute queries.metrics:[- \u0026lt;metrics_rules\u0026gt; ] # Define the label name. The label value must query from `value` or `fromItem` attribute.name:\u0026lt;string\u0026gt;# Appoint value to label.[value:\u0026lt;string\u0026gt;]# Query label value from Zabbix Agent Item key.[fromItem:\u0026lt;string\u0026gt;]\u0026lt;metric_rules\u0026gt; # The name of rule, which combinates with a prefix \u0026#39;meter_\u0026#39; as the index/table name in storage.name:\u0026lt;string\u0026gt;# MAL expression.exp:\u0026lt;string\u0026gt;For more on MAL, please refer to mal.md.\n","excerpt":"Zabbix Receiver The Zabbix receiver accepts metrics of Zabbix Agent Active Checks protocol format …","ref":"/docs/main/latest/en/setup/backend/backend-zabbix/","title":"Zabbix Receiver"},{"body":"Zabbix Receiver The Zabbix receiver accepts metrics of Zabbix Agent Active Checks protocol format into the Meter System. Zabbix Agent is based on GPL-2.0 License.\nModule definition receiver-zabbix:selector:${SW_RECEIVER_ZABBIX:default}default:# Export tcp port, Zabbix agent could connected and transport dataport:10051# Bind to hosthost:0.0.0.0# Enable config when receive agent requestactiveFiles:agentConfiguration file The Zabbix receiver is configured via a configuration file that defines everything related to receiving from agents, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP fails to start up. The files are located at $CLASSPATH/zabbix-rules.\nThe file is written in YAML format, defined by the scheme described below. Square brackets indicate that a parameter is optional.\nAn example for Zabbix agent configuration could be found here. You can find details on Zabbix agent items from Zabbix Agent documentation.\nConfiguration file # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# Datasource from Zabbix Item keys.requiredZabbixItemKeys:- \u0026lt;zabbix item keys\u0026gt;# Support agent entities information.entities:# Allow hostname patterns to build metrics.hostPatterns:- \u0026lt;regex string\u0026gt;# Customized metrics label before parse to meter system.labels:[- \u0026lt;labels\u0026gt; ]# Metrics rule allow you to recompute queries.metrics:[- \u0026lt;metrics_rules\u0026gt; ] # Define the label name. The label value must query from `value` or `fromItem` attribute.name:\u0026lt;string\u0026gt;# Appoint value to label.[value:\u0026lt;string\u0026gt;]# Query label value from Zabbix Agent Item key.[fromItem:\u0026lt;string\u0026gt;]\u0026lt;metric_rules\u0026gt; # The name of rule, which combinates with a prefix \u0026#39;meter_\u0026#39; as the index/table name in storage.name:\u0026lt;string\u0026gt;# MAL expression.exp:\u0026lt;string\u0026gt;For more on MAL, please refer to mal.md.\n","excerpt":"Zabbix Receiver The Zabbix receiver accepts metrics of Zabbix Agent Active Checks protocol format …","ref":"/docs/main/next/en/setup/backend/backend-zabbix/","title":"Zabbix Receiver"},{"body":"Zabbix Receiver The Zabbix receiver accepts metrics of Zabbix Agent Active Checks protocol format into the Meter System. Zabbix Agent is based on GPL-2.0 License.\nModule definition receiver-zabbix:selector:${SW_RECEIVER_ZABBIX:default}default:# Export tcp port, Zabbix agent could connected and transport dataport:10051# Bind to hosthost:0.0.0.0# Enable config when receive agent requestactiveFiles:agentConfiguration file The Zabbix receiver is configured via a configuration file that defines everything related to receiving from agents, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP fails to start up. The files are located at $CLASSPATH/zabbix-rules.\nThe file is written in YAML format, defined by the scheme described below. Square brackets indicate that a parameter is optional.\nAn example for Zabbix agent configuration could be found here. You could find details on Zabbix agent items from Zabbix Agent documentation.\nConfiguration file # insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# Datasource from Zabbix Item keys.requiredZabbixItemKeys:- \u0026lt;zabbix item keys\u0026gt;# Support agent entities information.entities:# Allow hostname patterns to build metrics.hostPatterns:- \u0026lt;regex string\u0026gt;# Customized metrics label before parse to meter system.labels:[- \u0026lt;labels\u0026gt; ]# Metrics rule allow you to recompute queries.metrics:[- \u0026lt;metrics_rules\u0026gt; ] # Define the label name. The label value must query from `value` or `fromItem` attribute.name:\u0026lt;string\u0026gt;# Appoint value to label.[value:\u0026lt;string\u0026gt;]# Query label value from Zabbix Agent Item key.[fromItem:\u0026lt;string\u0026gt;]\u0026lt;metric_rules\u0026gt; # The name of rule, which combinates with a prefix \u0026#39;meter_\u0026#39; as the index/table name in storage.name:\u0026lt;string\u0026gt;# MAL expression.exp:\u0026lt;string\u0026gt;For more on MAL, please refer to mal.md.\n","excerpt":"Zabbix Receiver The Zabbix receiver accepts metrics of Zabbix Agent Active Checks protocol format …","ref":"/docs/main/v9.0.0/en/setup/backend/backend-zabbix/","title":"Zabbix Receiver"},{"body":"Zabbix Receiver The Zabbix receiver accepts metrics of Zabbix Agent Active Checks protocol format into the Meter System. Zabbix Agent is based on GPL-2.0 License.\nModule definition receiver-zabbix:selector:${SW_RECEIVER_ZABBIX:default}default:# Export tcp port, Zabbix agent could connected and transport dataport:10051# Bind to hosthost:0.0.0.0# Enable config when receive agent requestactiveFiles:agentConfiguration file The Zabbix receiver is configured via a configuration file that defines everything related to receiving from agents, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP fails to start up. The files are located at $CLASSPATH/zabbix-rules.\nThe file is written in YAML format, defined by the scheme described below. Square brackets indicate that a parameter is optional.\nAn example for Zabbix agent configuration could be found here. You can find details on Zabbix agent items from Zabbix Agent documentation.\nConfiguration file # insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# Datasource from Zabbix Item keys.requiredZabbixItemKeys:- \u0026lt;zabbix item keys\u0026gt;# Support agent entities information.entities:# Allow hostname patterns to build metrics.hostPatterns:- \u0026lt;regex string\u0026gt;# Customized metrics label before parse to meter system.labels:[- \u0026lt;labels\u0026gt; ]# Metrics rule allow you to recompute queries.metrics:[- \u0026lt;metrics_rules\u0026gt; ] # Define the label name. The label value must query from `value` or `fromItem` attribute.name:\u0026lt;string\u0026gt;# Appoint value to label.[value:\u0026lt;string\u0026gt;]# Query label value from Zabbix Agent Item key.[fromItem:\u0026lt;string\u0026gt;]\u0026lt;metric_rules\u0026gt; # The name of rule, which combinates with a prefix \u0026#39;meter_\u0026#39; as the index/table name in storage.name:\u0026lt;string\u0026gt;# MAL expression.exp:\u0026lt;string\u0026gt;For more on MAL, please refer to mal.md.\n","excerpt":"Zabbix Receiver The Zabbix receiver accepts metrics of Zabbix Agent Active Checks protocol format …","ref":"/docs/main/v9.1.0/en/setup/backend/backend-zabbix/","title":"Zabbix Receiver"},{"body":"Zabbix Receiver The Zabbix receiver accepts metrics of Zabbix Agent Active Checks protocol format into the Meter System. Zabbix Agent is based on GPL-2.0 License.\nModule definition receiver-zabbix:selector:${SW_RECEIVER_ZABBIX:default}default:# Export tcp port, Zabbix agent could connected and transport dataport:10051# Bind to hosthost:0.0.0.0# Enable config when receive agent requestactiveFiles:agentConfiguration file The Zabbix receiver is configured via a configuration file that defines everything related to receiving from agents, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP fails to start up. The files are located at $CLASSPATH/zabbix-rules.\nThe file is written in YAML format, defined by the scheme described below. Square brackets indicate that a parameter is optional.\nAn example for Zabbix agent configuration could be found here. You can find details on Zabbix agent items from Zabbix Agent documentation.\nConfiguration file # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# Datasource from Zabbix Item keys.requiredZabbixItemKeys:- \u0026lt;zabbix item keys\u0026gt;# Support agent entities information.entities:# Allow hostname patterns to build metrics.hostPatterns:- \u0026lt;regex string\u0026gt;# Customized metrics label before parse to meter system.labels:[- \u0026lt;labels\u0026gt; ]# Metrics rule allow you to recompute queries.metrics:[- \u0026lt;metrics_rules\u0026gt; ] # Define the label name. The label value must query from `value` or `fromItem` attribute.name:\u0026lt;string\u0026gt;# Appoint value to label.[value:\u0026lt;string\u0026gt;]# Query label value from Zabbix Agent Item key.[fromItem:\u0026lt;string\u0026gt;]\u0026lt;metric_rules\u0026gt; # The name of rule, which combinates with a prefix \u0026#39;meter_\u0026#39; as the index/table name in storage.name:\u0026lt;string\u0026gt;# MAL expression.exp:\u0026lt;string\u0026gt;For more on MAL, please refer to mal.md.\n","excerpt":"Zabbix Receiver The Zabbix receiver accepts metrics of Zabbix Agent Active Checks protocol format …","ref":"/docs/main/v9.2.0/en/setup/backend/backend-zabbix/","title":"Zabbix Receiver"},{"body":"Zabbix Receiver The Zabbix receiver accepts metrics of Zabbix Agent Active Checks protocol format into the Meter System. Zabbix Agent is based on GPL-2.0 License.\nModule definition receiver-zabbix:selector:${SW_RECEIVER_ZABBIX:default}default:# Export tcp port, Zabbix agent could connected and transport dataport:10051# Bind to hosthost:0.0.0.0# Enable config when receive agent requestactiveFiles:agentConfiguration file The Zabbix receiver is configured via a configuration file that defines everything related to receiving from agents, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP fails to start up. The files are located at $CLASSPATH/zabbix-rules.\nThe file is written in YAML format, defined by the scheme described below. Square brackets indicate that a parameter is optional.\nAn example for Zabbix agent configuration could be found here. You can find details on Zabbix agent items from Zabbix Agent documentation.\nConfiguration file # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# Datasource from Zabbix Item keys.requiredZabbixItemKeys:- \u0026lt;zabbix item keys\u0026gt;# Support agent entities information.entities:# Allow hostname patterns to build metrics.hostPatterns:- \u0026lt;regex string\u0026gt;# Customized metrics label before parse to meter system.labels:[- \u0026lt;labels\u0026gt; ]# Metrics rule allow you to recompute queries.metrics:[- \u0026lt;metrics_rules\u0026gt; ] # Define the label name. The label value must query from `value` or `fromItem` attribute.name:\u0026lt;string\u0026gt;# Appoint value to label.[value:\u0026lt;string\u0026gt;]# Query label value from Zabbix Agent Item key.[fromItem:\u0026lt;string\u0026gt;]\u0026lt;metric_rules\u0026gt; # The name of rule, which combinates with a prefix \u0026#39;meter_\u0026#39; as the index/table name in storage.name:\u0026lt;string\u0026gt;# MAL expression.exp:\u0026lt;string\u0026gt;For more on MAL, please refer to mal.md.\n","excerpt":"Zabbix Receiver The Zabbix receiver accepts metrics of Zabbix Agent Active Checks protocol format …","ref":"/docs/main/v9.3.0/en/setup/backend/backend-zabbix/","title":"Zabbix Receiver"},{"body":"Zabbix Receiver The Zabbix receiver accepts metrics of Zabbix Agent Active Checks protocol format into the Meter System. Zabbix Agent is based on GPL-2.0 License.\nModule definition receiver-zabbix:selector:${SW_RECEIVER_ZABBIX:default}default:# Export tcp port, Zabbix agent could connected and transport dataport:10051# Bind to hosthost:0.0.0.0# Enable config when receive agent requestactiveFiles:agentConfiguration file The Zabbix receiver is configured via a configuration file that defines everything related to receiving from agents, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP fails to start up. The files are located at $CLASSPATH/zabbix-rules.\nThe file is written in YAML format, defined by the scheme described below. Square brackets indicate that a parameter is optional.\nAn example for Zabbix agent configuration could be found here. You can find details on Zabbix agent items from Zabbix Agent documentation.\nConfiguration file # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# Datasource from Zabbix Item keys.requiredZabbixItemKeys:- \u0026lt;zabbix item keys\u0026gt;# Support agent entities information.entities:# Allow hostname patterns to build metrics.hostPatterns:- \u0026lt;regex string\u0026gt;# Customized metrics label before parse to meter system.labels:[- \u0026lt;labels\u0026gt; ]# Metrics rule allow you to recompute queries.metrics:[- \u0026lt;metrics_rules\u0026gt; ] # Define the label name. The label value must query from `value` or `fromItem` attribute.name:\u0026lt;string\u0026gt;# Appoint value to label.[value:\u0026lt;string\u0026gt;]# Query label value from Zabbix Agent Item key.[fromItem:\u0026lt;string\u0026gt;]\u0026lt;metric_rules\u0026gt; # The name of rule, which combinates with a prefix \u0026#39;meter_\u0026#39; as the index/table name in storage.name:\u0026lt;string\u0026gt;# MAL expression.exp:\u0026lt;string\u0026gt;For more on MAL, please refer to mal.md.\n","excerpt":"Zabbix Receiver The Zabbix receiver accepts metrics of Zabbix Agent Active Checks protocol format …","ref":"/docs/main/v9.4.0/en/setup/backend/backend-zabbix/","title":"Zabbix Receiver"},{"body":"Zabbix Receiver The Zabbix receiver accepts metrics of Zabbix Agent Active Checks protocol format into the Meter System. Zabbix Agent is based on GPL-2.0 License.\nModule definition receiver-zabbix:selector:${SW_RECEIVER_ZABBIX:default}default:# Export tcp port, Zabbix agent could connected and transport dataport:10051# Bind to hosthost:0.0.0.0# Enable config when receive agent requestactiveFiles:agentConfiguration file The Zabbix receiver is configured via a configuration file that defines everything related to receiving from agents, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP fails to start up. The files are located at $CLASSPATH/zabbix-rules.\nThe file is written in YAML format, defined by the scheme described below. Square brackets indicate that a parameter is optional.\nAn example for Zabbix agent configuration could be found here. You can find details on Zabbix agent items from Zabbix Agent documentation.\nConfiguration file # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# Datasource from Zabbix Item keys.requiredZabbixItemKeys:- \u0026lt;zabbix item keys\u0026gt;# Support agent entities information.entities:# Allow hostname patterns to build metrics.hostPatterns:- \u0026lt;regex string\u0026gt;# Customized metrics label before parse to meter system.labels:[- \u0026lt;labels\u0026gt; ]# Metrics rule allow you to recompute queries.metrics:[- \u0026lt;metrics_rules\u0026gt; ] # Define the label name. The label value must query from `value` or `fromItem` attribute.name:\u0026lt;string\u0026gt;# Appoint value to label.[value:\u0026lt;string\u0026gt;]# Query label value from Zabbix Agent Item key.[fromItem:\u0026lt;string\u0026gt;]\u0026lt;metric_rules\u0026gt; # The name of rule, which combinates with a prefix \u0026#39;meter_\u0026#39; as the index/table name in storage.name:\u0026lt;string\u0026gt;# MAL expression.exp:\u0026lt;string\u0026gt;For more on MAL, please refer to mal.md.\n","excerpt":"Zabbix Receiver The Zabbix receiver accepts metrics of Zabbix Agent Active Checks protocol format …","ref":"/docs/main/v9.5.0/en/setup/backend/backend-zabbix/","title":"Zabbix Receiver"},{"body":"Zabbix Receiver The Zabbix receiver accepts metrics of Zabbix Agent Active Checks protocol format into the Meter System. Zabbix Agent is based on GPL-2.0 License.\nModule definition receiver-zabbix:selector:${SW_RECEIVER_ZABBIX:default}default:# Export tcp port, Zabbix agent could connected and transport dataport:10051# Bind to hosthost:0.0.0.0# Enable config when receive agent requestactiveFiles:agentConfiguration file The Zabbix receiver is configured via a configuration file that defines everything related to receiving from agents, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP fails to start up. The files are located at $CLASSPATH/zabbix-rules.\nThe file is written in YAML format, defined by the scheme described below. Square brackets indicate that a parameter is optional.\nAn example for Zabbix agent configuration could be found here. You can find details on Zabbix agent items from Zabbix Agent documentation.\nConfiguration file # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# Datasource from Zabbix Item keys.requiredZabbixItemKeys:- \u0026lt;zabbix item keys\u0026gt;# Support agent entities information.entities:# Allow hostname patterns to build metrics.hostPatterns:- \u0026lt;regex string\u0026gt;# Customized metrics label before parse to meter system.labels:[- \u0026lt;labels\u0026gt; ]# Metrics rule allow you to recompute queries.metrics:[- \u0026lt;metrics_rules\u0026gt; ] # Define the label name. The label value must query from `value` or `fromItem` attribute.name:\u0026lt;string\u0026gt;# Appoint value to label.[value:\u0026lt;string\u0026gt;]# Query label value from Zabbix Agent Item key.[fromItem:\u0026lt;string\u0026gt;]\u0026lt;metric_rules\u0026gt; # The name of rule, which combinates with a prefix \u0026#39;meter_\u0026#39; as the index/table name in storage.name:\u0026lt;string\u0026gt;# MAL expression.exp:\u0026lt;string\u0026gt;For more on MAL, please refer to mal.md.\n","excerpt":"Zabbix Receiver The Zabbix receiver accepts metrics of Zabbix Agent Active Checks protocol format …","ref":"/docs/main/v9.6.0/en/setup/backend/backend-zabbix/","title":"Zabbix Receiver"},{"body":"Zabbix Receiver The Zabbix receiver accepts metrics of Zabbix Agent Active Checks protocol format into the Meter System. Zabbix Agent is based on GPL-2.0 License.\nModule definition receiver-zabbix:selector:${SW_RECEIVER_ZABBIX:default}default:# Export tcp port, Zabbix agent could connected and transport dataport:10051# Bind to hosthost:0.0.0.0# Enable config when receive agent requestactiveFiles:agentConfiguration file The Zabbix receiver is configured via a configuration file that defines everything related to receiving from agents, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP fails to start up. The files are located at $CLASSPATH/zabbix-rules.\nThe file is written in YAML format, defined by the scheme described below. Square brackets indicate that a parameter is optional.\nAn example for Zabbix agent configuration could be found here. You can find details on Zabbix agent items from Zabbix Agent documentation.\nConfiguration file # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# Datasource from Zabbix Item keys.requiredZabbixItemKeys:- \u0026lt;zabbix item keys\u0026gt;# Support agent entities information.entities:# Allow hostname patterns to build metrics.hostPatterns:- \u0026lt;regex string\u0026gt;# Customized metrics label before parse to meter system.labels:[- \u0026lt;labels\u0026gt; ]# Metrics rule allow you to recompute queries.metrics:[- \u0026lt;metrics_rules\u0026gt; ] # Define the label name. The label value must query from `value` or `fromItem` attribute.name:\u0026lt;string\u0026gt;# Appoint value to label.[value:\u0026lt;string\u0026gt;]# Query label value from Zabbix Agent Item key.[fromItem:\u0026lt;string\u0026gt;]\u0026lt;metric_rules\u0026gt; # The name of rule, which combinates with a prefix \u0026#39;meter_\u0026#39; as the index/table name in storage.name:\u0026lt;string\u0026gt;# MAL expression.exp:\u0026lt;string\u0026gt;For more on MAL, please refer to mal.md.\n","excerpt":"Zabbix Receiver The Zabbix receiver accepts metrics of Zabbix Agent Active Checks protocol format …","ref":"/docs/main/v9.7.0/en/setup/backend/backend-zabbix/","title":"Zabbix Receiver"},{"body":"Zend observer  Refer to: https://www.datadoghq.com/blog/engineering/php-8-observability-baked-right-in/#the-observability-landscape-before-php-8\n By default, skywalking-php hooks the zend_execute_internal and zend_execute_ex functions to implement auto instrumentation.\nBut there are some drawbacks:\n All PHP function calls are placed on the native C stack, which is limited by the value set in ulimit -s. Not compatible with the new JIT added in PHP 8.  The observer API in PHP 8+ Now, zend observer api is a new generation method, and it is also a method currently recommended by PHP8.\nThis method has no stack problem and will not affect JIT.\nConfiguration The following configuration example enables JIT in PHP8 and zend observer support in skywalking-php at the same time.\n[opcache] zend_extension = opcache ; Enable JIT opcache.jit = tracing [skywalking_agent] extension = skywalking_agent.so ; Switch to use zend observer api to implement auto instrumentation. skywalking_agent.enable_zend_observer = On ","excerpt":"Zend observer  Refer to: …","ref":"/docs/skywalking-php/latest/en/configuration/zend-observer/","title":"Zend observer"},{"body":"Zend observer  Refer to: https://www.datadoghq.com/blog/engineering/php-8-observability-baked-right-in/#the-observability-landscape-before-php-8\n By default, skywalking-php hooks the zend_execute_internal and zend_execute_ex functions to implement auto instrumentation.\nBut there are some drawbacks:\n All PHP function calls are placed on the native C stack, which is limited by the value set in ulimit -s. Not compatible with the new JIT added in PHP 8.  The observer API in PHP 8+ Now, zend observer api is a new generation method, and it is also a method currently recommended by PHP8.\nThis method has no stack problem and will not affect JIT.\nConfiguration The following configuration example enables JIT in PHP8 and zend observer support in skywalking-php at the same time.\n[opcache] zend_extension = opcache ; Enable JIT opcache.jit = tracing [skywalking_agent] extension = skywalking_agent.so ; Switch to use zend observer api to implement auto instrumentation. skywalking_agent.enable_zend_observer = On ","excerpt":"Zend observer  Refer to: …","ref":"/docs/skywalking-php/next/en/configuration/zend-observer/","title":"Zend observer"},{"body":"Zend observer  Refer to: https://www.datadoghq.com/blog/engineering/php-8-observability-baked-right-in/#the-observability-landscape-before-php-8\n By default, skywalking-php hooks the zend_execute_internal and zend_execute_ex functions to implement auto instrumentation.\nBut there are some drawbacks:\n All PHP function calls are placed on the native C stack, which is limited by the value set in ulimit -s. Not compatible with the new JIT added in PHP 8.  The observer API in PHP 8+ Now, zend observer api is a new generation method, and it is also a method currently recommended by PHP8.\nThis method has no stack problem and will not affect JIT.\nConfiguration The following configuration example enables JIT in PHP8 and zend observer support in skywalking-php at the same time.\n[opcache] zend_extension = opcache ; Enable JIT opcache.jit = tracing [skywalking_agent] extension = skywalking_agent.so ; Switch to use zend observer api to implement auto instrumentation. skywalking_agent.enable_zend_observer = On ","excerpt":"Zend observer  Refer to: …","ref":"/docs/skywalking-php/v0.7.0/en/configuration/zend-observer/","title":"Zend observer"},{"body":"Zipkin receiver The Zipkin receiver makes the OAP server work as an alternative Zipkin server implementation for collecting traces. It supports Zipkin v1/v2 formats through the HTTP collector and Kafka collector.\nNOTICE, Zipkin trace would not be analyzed like SkyWalking native trace format.\nUse the following config to activate it. Set enableHttpCollector to enable HTTP collector and enableKafkaCollector to enable Kafka collector.\nreceiver-zipkin:selector:${SW_RECEIVER_ZIPKIN:default}default:# Defines a set of span tag keys which are searchable.# The max length of key=value should be less than 256 or will be dropped.searchableTracesTags:${SW_ZIPKIN_SEARCHABLE_TAG_KEYS:http.method}# The sample rate precision is 1/10000, should be between 0 and 10000sampleRate:${SW_ZIPKIN_SAMPLE_RATE:10000}## The below configs are for OAP collect zipkin trace from HTTPenableHttpCollector:${SW_ZIPKIN_HTTP_COLLECTOR_ENABLED:true}restHost:${SW_RECEIVER_ZIPKIN_REST_HOST:0.0.0.0}restPort:${SW_RECEIVER_ZIPKIN_REST_PORT:9411}restContextPath:${SW_RECEIVER_ZIPKIN_REST_CONTEXT_PATH:/}restMaxThreads:${SW_RECEIVER_ZIPKIN_REST_MAX_THREADS:200}restIdleTimeOut:${SW_RECEIVER_ZIPKIN_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_RECEIVER_ZIPKIN_REST_QUEUE_SIZE:0}## The below configs are for OAP collect zipkin trace from kafkaenableKafkaCollector:${SW_ZIPKIN_KAFKA_COLLECTOR_ENABLED:true}kafkaBootstrapServers:${SW_ZIPKIN_KAFKA_SERVERS:localhost:9092}kafkaGroupId:${SW_ZIPKIN_KAFKA_Group_Id:zipkin}kafkaTopic:${SW_ZIPKIN_KAFKA_TOPIC:zipkin}# Kafka consumer config, JSON format as Properties. If it contains the same key with above, would override.kafkaConsumerConfig:${SW_ZIPKIN_KAFKA_CONSUMER_CONFIG:\u0026#34;{\\\u0026#34;auto.offset.reset\\\u0026#34;:\\\u0026#34;earliest\\\u0026#34;,\\\u0026#34;enable.auto.commit\\\u0026#34;:true}\u0026#34;}# The Count of the topic consumerskafkaConsumers:${SW_ZIPKIN_KAFKA_CONSUMERS:1}kafkaHandlerThreadPoolSize:${SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_SIZE:-1}kafkaHandlerThreadPoolQueueSize:${SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE:-1}Zipkin query The Zipkin receiver makes the OAP server work as an alternative Zipkin server implementation for query traces. It implemented ZipkinQueryApiV2 through the HTTP service, supporting Zipkin-lens UI.\nUse the following config to activate it.\nquery-zipkin:selector:${SW_QUERY_ZIPKIN:default}default:# For HTTP serverrestHost:${SW_QUERY_ZIPKIN_REST_HOST:0.0.0.0}restPort:${SW_QUERY_ZIPKIN_REST_PORT:9412}restContextPath:${SW_QUERY_ZIPKIN_REST_CONTEXT_PATH:/zipkin}restMaxThreads:${SW_QUERY_ZIPKIN_REST_MAX_THREADS:200}restIdleTimeOut:${SW_QUERY_ZIPKIN_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_QUERY_ZIPKIN_REST_QUEUE_SIZE:0}# Default look back for serviceNames, remoteServiceNames and spanNames, 1 day in millislookback:${SW_QUERY_ZIPKIN_LOOKBACK:86400000}# The Cache-Control max-age (seconds) for serviceNames, remoteServiceNames and spanNamesnamesMaxAge:${SW_QUERY_ZIPKIN_NAMES_MAX_AGE:300}## The below config are OAP support for zipkin-lens UI# Default traces query max sizeuiQueryLimit:${SW_QUERY_ZIPKIN_UI_QUERY_LIMIT:10}# Default look back for search traces, 15 minutes in millisuiDefaultLookback:${SW_QUERY_ZIPKIN_UI_DEFAULT_LOOKBACK:900000}Lens UI Lens UI is Zipkin native UI. SkyWalking webapp has bundled it in the binary distribution. {webapp IP}:{webapp port}/zipkin is exposed and accessible for the browser. Meanwhile, Iframe UI component could be used to host Zipkin Lens UI on the SkyWalking booster UI dashboard.(link=/zipkin)\nZipkin Lens UI source codes could be found here.\n","excerpt":"Zipkin receiver The Zipkin receiver makes the OAP server work as an alternative Zipkin server …","ref":"/docs/main/latest/en/setup/backend/zipkin-trace/","title":"Zipkin receiver"},{"body":"Zipkin receiver The Zipkin receiver makes the OAP server work as an alternative Zipkin server implementation for collecting traces. It supports Zipkin v1/v2 formats through the HTTP collector and Kafka collector.\nNOTICE, Zipkin trace would not be analyzed like SkyWalking native trace format.\nUse the following config to activate it. Set enableHttpCollector to enable HTTP collector and enableKafkaCollector to enable Kafka collector.\nreceiver-zipkin:selector:${SW_RECEIVER_ZIPKIN:default}default:# Defines a set of span tag keys which are searchable.# The max length of key=value should be less than 256 or will be dropped.searchableTracesTags:${SW_ZIPKIN_SEARCHABLE_TAG_KEYS:http.method}# The sample rate precision is 1/10000, should be between 0 and 10000sampleRate:${SW_ZIPKIN_SAMPLE_RATE:10000}## The below configs are for OAP collect zipkin trace from HTTPenableHttpCollector:${SW_ZIPKIN_HTTP_COLLECTOR_ENABLED:true}restHost:${SW_RECEIVER_ZIPKIN_REST_HOST:0.0.0.0}restPort:${SW_RECEIVER_ZIPKIN_REST_PORT:9411}restContextPath:${SW_RECEIVER_ZIPKIN_REST_CONTEXT_PATH:/}restMaxThreads:${SW_RECEIVER_ZIPKIN_REST_MAX_THREADS:200}restIdleTimeOut:${SW_RECEIVER_ZIPKIN_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_RECEIVER_ZIPKIN_REST_QUEUE_SIZE:0}## The below configs are for OAP collect zipkin trace from kafkaenableKafkaCollector:${SW_ZIPKIN_KAFKA_COLLECTOR_ENABLED:true}kafkaBootstrapServers:${SW_ZIPKIN_KAFKA_SERVERS:localhost:9092}kafkaGroupId:${SW_ZIPKIN_KAFKA_Group_Id:zipkin}kafkaTopic:${SW_ZIPKIN_KAFKA_TOPIC:zipkin}# Kafka consumer config, JSON format as Properties. If it contains the same key with above, would override.kafkaConsumerConfig:${SW_ZIPKIN_KAFKA_CONSUMER_CONFIG:\u0026#34;{\\\u0026#34;auto.offset.reset\\\u0026#34;:\\\u0026#34;earliest\\\u0026#34;,\\\u0026#34;enable.auto.commit\\\u0026#34;:true}\u0026#34;}# The Count of the topic consumerskafkaConsumers:${SW_ZIPKIN_KAFKA_CONSUMERS:1}kafkaHandlerThreadPoolSize:${SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_SIZE:-1}kafkaHandlerThreadPoolQueueSize:${SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE:-1}Zipkin query The Zipkin receiver makes the OAP server work as an alternative Zipkin server implementation for query traces. It implemented ZipkinQueryApiV2 through the HTTP service, supporting Zipkin-lens UI.\nUse the following config to activate it.\nquery-zipkin:selector:${SW_QUERY_ZIPKIN:default}default:# For HTTP serverrestHost:${SW_QUERY_ZIPKIN_REST_HOST:0.0.0.0}restPort:${SW_QUERY_ZIPKIN_REST_PORT:9412}restContextPath:${SW_QUERY_ZIPKIN_REST_CONTEXT_PATH:/zipkin}restMaxThreads:${SW_QUERY_ZIPKIN_REST_MAX_THREADS:200}restIdleTimeOut:${SW_QUERY_ZIPKIN_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_QUERY_ZIPKIN_REST_QUEUE_SIZE:0}# Default look back for serviceNames, remoteServiceNames and spanNames, 1 day in millislookback:${SW_QUERY_ZIPKIN_LOOKBACK:86400000}# The Cache-Control max-age (seconds) for serviceNames, remoteServiceNames and spanNamesnamesMaxAge:${SW_QUERY_ZIPKIN_NAMES_MAX_AGE:300}## The below config are OAP support for zipkin-lens UI# Default traces query max sizeuiQueryLimit:${SW_QUERY_ZIPKIN_UI_QUERY_LIMIT:10}# Default look back for search traces, 15 minutes in millisuiDefaultLookback:${SW_QUERY_ZIPKIN_UI_DEFAULT_LOOKBACK:900000}Lens UI Lens UI is Zipkin native UI. SkyWalking webapp has bundled it in the binary distribution. {webapp IP}:{webapp port}/zipkin is exposed and accessible for the browser. Meanwhile, Iframe UI component could be used to host Zipkin Lens UI on the SkyWalking booster UI dashboard.(link=/zipkin)\nZipkin Lens UI source codes could be found here.\n","excerpt":"Zipkin receiver The Zipkin receiver makes the OAP server work as an alternative Zipkin server …","ref":"/docs/main/next/en/setup/backend/zipkin-trace/","title":"Zipkin receiver"},{"body":"Zipkin receiver The Zipkin receiver makes the OAP server work as an alternative Zipkin server implementation. It supports Zipkin v1/v2 formats through HTTP service. Make sure you use this with SW_STORAGE=zipkin-elasticsearch option to activate Zipkin storage implementation. Once this receiver and storage are activated, SkyWalking\u0026rsquo;s native traces would be ignored, and SkyWalking wouldn\u0026rsquo;t analyze topology, metrics, and endpoint dependency from Zipkin\u0026rsquo;s trace.\nUse the following config to activate it.\nreceiver-zipkin:selector:${SW_RECEIVER_ZIPKIN:-}default:host:${SW_RECEIVER_ZIPKIN_HOST:0.0.0.0}port:${SW_RECEIVER_ZIPKIN_PORT:9411}contextPath:${SW_RECEIVER_ZIPKIN_CONTEXT_PATH:/}jettyMinThreads:${SW_RECEIVER_ZIPKIN_JETTY_MIN_THREADS:1}jettyMaxThreads:${SW_RECEIVER_ZIPKIN_JETTY_MAX_THREADS:200}jettyIdleTimeOut:${SW_RECEIVER_ZIPKIN_JETTY_IDLE_TIMEOUT:30000}jettyAcceptorPriorityDelta:${SW_RECEIVER_ZIPKIN_JETTY_DELTA:0}jettyAcceptQueueSize:${SW_RECEIVER_ZIPKIN_QUEUE_SIZE:0}NOTE: Zipkin receiver requires zipkin-elasticsearch storage implementation to be activated. Read this doc to learn about Zipkin as a storage option.\n","excerpt":"Zipkin receiver The Zipkin receiver makes the OAP server work as an alternative Zipkin server …","ref":"/docs/main/v9.0.0/en/setup/backend/zipkin-trace/","title":"Zipkin receiver"},{"body":"Zipkin receiver The Zipkin receiver makes the OAP server work as an alternative Zipkin server implementation for collecting traces. It supports Zipkin v1/v2 formats through the HTTP service.\nUse the following config to activate it.\nreceiver-zipkin:selector:${SW_RECEIVER_ZIPKIN:default}default:# For HTTP serverrestHost:${SW_RECEIVER_ZIPKIN_REST_HOST:0.0.0.0}restPort:${SW_RECEIVER_ZIPKIN_REST_PORT:9411}restContextPath:${SW_RECEIVER_ZIPKIN_REST_CONTEXT_PATH:/}restMaxThreads:${SW_RECEIVER_ZIPKIN_REST_MAX_THREADS:200}restIdleTimeOut:${SW_RECEIVER_ZIPKIN_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_RECEIVER_ZIPKIN_REST_QUEUE_SIZE:0}searchableTracesTags:${SW_ZIPKIN_SEARCHABLE_TAG_KEYS:http.method}# The sample rate precision is 1/10000, should be between 0 and 10000sampleRate:${SW_ZIPKIN_SAMPLE_RATE:10000}Zipkin query The Zipkin receiver makes the OAP server work as an alternative Zipkin server implementation for query traces. It implemented ZipkinQueryApiV2 through the HTTP service, supporting Zipkin-lens UI. Notice: Zipkin query API implementation does not support BanyanDB yet.\nUse the following config to activate it.\nquery-zipkin:selector:${SW_QUERY_ZIPKIN:default}default:# For HTTP serverrestHost:${SW_QUERY_ZIPKIN_REST_HOST:0.0.0.0}restPort:${SW_QUERY_ZIPKIN_REST_PORT:9412}restContextPath:${SW_QUERY_ZIPKIN_REST_CONTEXT_PATH:/zipkin}restMaxThreads:${SW_QUERY_ZIPKIN_REST_MAX_THREADS:200}restIdleTimeOut:${SW_QUERY_ZIPKIN_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_QUERY_ZIPKIN_REST_QUEUE_SIZE:0}# Default look back for serviceNames, remoteServiceNames and spanNames, 1 day in millislookback:${SW_QUERY_ZIPKIN_LOOKBACK:86400000}# The Cache-Control max-age (seconds) for serviceNames, remoteServiceNames and spanNamesnamesMaxAge:${SW_QUERY_ZIPKIN_NAMES_MAX_AGE:300}## The below config are OAP support for zipkin-lens UI# Default traces query max sizeuiQueryLimit:${SW_QUERY_ZIPKIN_UI_QUERY_LIMIT:10}# Default look back for search traces, 15 minutes in millisuiDefaultLookback:${SW_QUERY_ZIPKIN_UI_DEFAULT_LOOKBACK:900000}","excerpt":"Zipkin receiver The Zipkin receiver makes the OAP server work as an alternative Zipkin server …","ref":"/docs/main/v9.1.0/en/setup/backend/zipkin-trace/","title":"Zipkin receiver"},{"body":"Zipkin receiver The Zipkin receiver makes the OAP server work as an alternative Zipkin server implementation for collecting traces. It supports Zipkin v1/v2 formats through the HTTP collector and Kafka collector.\nUse the following config to activate it. Set enableHttpCollector to enable HTTP collector and enableKafkaCollector to enable Kafka collector.\nreceiver-zipkin:selector:${SW_RECEIVER_ZIPKIN:default}default:searchableTracesTags:${SW_ZIPKIN_SEARCHABLE_TAG_KEYS:http.method}# The sample rate precision is 1/10000, should be between 0 and 10000sampleRate:${SW_ZIPKIN_SAMPLE_RATE:10000}## The below configs are for OAP collect zipkin trace from HTTPenableHttpCollector:${SW_ZIPKIN_HTTP_COLLECTOR_ENABLED:true}restHost:${SW_RECEIVER_ZIPKIN_REST_HOST:0.0.0.0}restPort:${SW_RECEIVER_ZIPKIN_REST_PORT:9411}restContextPath:${SW_RECEIVER_ZIPKIN_REST_CONTEXT_PATH:/}restMaxThreads:${SW_RECEIVER_ZIPKIN_REST_MAX_THREADS:200}restIdleTimeOut:${SW_RECEIVER_ZIPKIN_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_RECEIVER_ZIPKIN_REST_QUEUE_SIZE:0}## The below configs are for OAP collect zipkin trace from kafkaenableKafkaCollector:${SW_ZIPKIN_KAFKA_COLLECTOR_ENABLED:true}kafkaBootstrapServers:${SW_ZIPKIN_KAFKA_SERVERS:localhost:9092}kafkaGroupId:${SW_ZIPKIN_KAFKA_Group_Id:zipkin}kafkaTopic:${SW_ZIPKIN_KAFKA_TOPIC:zipkin}# Kafka consumer config, JSON format as Properties. If it contains the same key with above, would override.kafkaConsumerConfig:${SW_ZIPKIN_KAFKA_CONSUMER_CONFIG:\u0026#34;{\\\u0026#34;auto.offset.reset\\\u0026#34;:\\\u0026#34;earliest\\\u0026#34;,\\\u0026#34;enable.auto.commit\\\u0026#34;:true}\u0026#34;}# The Count of the topic consumerskafkaConsumers:${SW_ZIPKIN_KAFKA_CONSUMERS:1}kafkaHandlerThreadPoolSize:${SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_SIZE:-1}kafkaHandlerThreadPoolQueueSize:${SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE:-1}Zipkin query The Zipkin receiver makes the OAP server work as an alternative Zipkin server implementation for query traces. It implemented ZipkinQueryApiV2 through the HTTP service, supporting Zipkin-lens UI. Notice: Zipkin query API implementation does not support BanyanDB yet.\nUse the following config to activate it.\nquery-zipkin:selector:${SW_QUERY_ZIPKIN:default}default:# For HTTP serverrestHost:${SW_QUERY_ZIPKIN_REST_HOST:0.0.0.0}restPort:${SW_QUERY_ZIPKIN_REST_PORT:9412}restContextPath:${SW_QUERY_ZIPKIN_REST_CONTEXT_PATH:/zipkin}restMaxThreads:${SW_QUERY_ZIPKIN_REST_MAX_THREADS:200}restIdleTimeOut:${SW_QUERY_ZIPKIN_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_QUERY_ZIPKIN_REST_QUEUE_SIZE:0}# Default look back for serviceNames, remoteServiceNames and spanNames, 1 day in millislookback:${SW_QUERY_ZIPKIN_LOOKBACK:86400000}# The Cache-Control max-age (seconds) for serviceNames, remoteServiceNames and spanNamesnamesMaxAge:${SW_QUERY_ZIPKIN_NAMES_MAX_AGE:300}## The below config are OAP support for zipkin-lens UI# Default traces query max sizeuiQueryLimit:${SW_QUERY_ZIPKIN_UI_QUERY_LIMIT:10}# Default look back for search traces, 15 minutes in millisuiDefaultLookback:${SW_QUERY_ZIPKIN_UI_DEFAULT_LOOKBACK:900000}","excerpt":"Zipkin receiver The Zipkin receiver makes the OAP server work as an alternative Zipkin server …","ref":"/docs/main/v9.2.0/en/setup/backend/zipkin-trace/","title":"Zipkin receiver"},{"body":"Zipkin receiver The Zipkin receiver makes the OAP server work as an alternative Zipkin server implementation for collecting traces. It supports Zipkin v1/v2 formats through the HTTP collector and Kafka collector.\nUse the following config to activate it. Set enableHttpCollector to enable HTTP collector and enableKafkaCollector to enable Kafka collector.\nreceiver-zipkin:selector:${SW_RECEIVER_ZIPKIN:default}default:# Defines a set of span tag keys which are searchable.# The max length of key=value should be less than 256 or will be dropped.searchableTracesTags:${SW_ZIPKIN_SEARCHABLE_TAG_KEYS:http.method}# The sample rate precision is 1/10000, should be between 0 and 10000sampleRate:${SW_ZIPKIN_SAMPLE_RATE:10000}## The below configs are for OAP collect zipkin trace from HTTPenableHttpCollector:${SW_ZIPKIN_HTTP_COLLECTOR_ENABLED:true}restHost:${SW_RECEIVER_ZIPKIN_REST_HOST:0.0.0.0}restPort:${SW_RECEIVER_ZIPKIN_REST_PORT:9411}restContextPath:${SW_RECEIVER_ZIPKIN_REST_CONTEXT_PATH:/}restMaxThreads:${SW_RECEIVER_ZIPKIN_REST_MAX_THREADS:200}restIdleTimeOut:${SW_RECEIVER_ZIPKIN_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_RECEIVER_ZIPKIN_REST_QUEUE_SIZE:0}## The below configs are for OAP collect zipkin trace from kafkaenableKafkaCollector:${SW_ZIPKIN_KAFKA_COLLECTOR_ENABLED:true}kafkaBootstrapServers:${SW_ZIPKIN_KAFKA_SERVERS:localhost:9092}kafkaGroupId:${SW_ZIPKIN_KAFKA_Group_Id:zipkin}kafkaTopic:${SW_ZIPKIN_KAFKA_TOPIC:zipkin}# Kafka consumer config, JSON format as Properties. If it contains the same key with above, would override.kafkaConsumerConfig:${SW_ZIPKIN_KAFKA_CONSUMER_CONFIG:\u0026#34;{\\\u0026#34;auto.offset.reset\\\u0026#34;:\\\u0026#34;earliest\\\u0026#34;,\\\u0026#34;enable.auto.commit\\\u0026#34;:true}\u0026#34;}# The Count of the topic consumerskafkaConsumers:${SW_ZIPKIN_KAFKA_CONSUMERS:1}kafkaHandlerThreadPoolSize:${SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_SIZE:-1}kafkaHandlerThreadPoolQueueSize:${SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE:-1}Zipkin query The Zipkin receiver makes the OAP server work as an alternative Zipkin server implementation for query traces. It implemented ZipkinQueryApiV2 through the HTTP service, supporting Zipkin-lens UI.\nUse the following config to activate it.\nquery-zipkin:selector:${SW_QUERY_ZIPKIN:default}default:# For HTTP serverrestHost:${SW_QUERY_ZIPKIN_REST_HOST:0.0.0.0}restPort:${SW_QUERY_ZIPKIN_REST_PORT:9412}restContextPath:${SW_QUERY_ZIPKIN_REST_CONTEXT_PATH:/zipkin}restMaxThreads:${SW_QUERY_ZIPKIN_REST_MAX_THREADS:200}restIdleTimeOut:${SW_QUERY_ZIPKIN_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_QUERY_ZIPKIN_REST_QUEUE_SIZE:0}# Default look back for serviceNames, remoteServiceNames and spanNames, 1 day in millislookback:${SW_QUERY_ZIPKIN_LOOKBACK:86400000}# The Cache-Control max-age (seconds) for serviceNames, remoteServiceNames and spanNamesnamesMaxAge:${SW_QUERY_ZIPKIN_NAMES_MAX_AGE:300}## The below config are OAP support for zipkin-lens UI# Default traces query max sizeuiQueryLimit:${SW_QUERY_ZIPKIN_UI_QUERY_LIMIT:10}# Default look back for search traces, 15 minutes in millisuiDefaultLookback:${SW_QUERY_ZIPKIN_UI_DEFAULT_LOOKBACK:900000}","excerpt":"Zipkin receiver The Zipkin receiver makes the OAP server work as an alternative Zipkin server …","ref":"/docs/main/v9.3.0/en/setup/backend/zipkin-trace/","title":"Zipkin receiver"},{"body":"Zipkin receiver The Zipkin receiver makes the OAP server work as an alternative Zipkin server implementation for collecting traces. It supports Zipkin v1/v2 formats through the HTTP collector and Kafka collector.\nUse the following config to activate it. Set enableHttpCollector to enable HTTP collector and enableKafkaCollector to enable Kafka collector.\nreceiver-zipkin:selector:${SW_RECEIVER_ZIPKIN:default}default:# Defines a set of span tag keys which are searchable.# The max length of key=value should be less than 256 or will be dropped.searchableTracesTags:${SW_ZIPKIN_SEARCHABLE_TAG_KEYS:http.method}# The sample rate precision is 1/10000, should be between 0 and 10000sampleRate:${SW_ZIPKIN_SAMPLE_RATE:10000}## The below configs are for OAP collect zipkin trace from HTTPenableHttpCollector:${SW_ZIPKIN_HTTP_COLLECTOR_ENABLED:true}restHost:${SW_RECEIVER_ZIPKIN_REST_HOST:0.0.0.0}restPort:${SW_RECEIVER_ZIPKIN_REST_PORT:9411}restContextPath:${SW_RECEIVER_ZIPKIN_REST_CONTEXT_PATH:/}restMaxThreads:${SW_RECEIVER_ZIPKIN_REST_MAX_THREADS:200}restIdleTimeOut:${SW_RECEIVER_ZIPKIN_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_RECEIVER_ZIPKIN_REST_QUEUE_SIZE:0}## The below configs are for OAP collect zipkin trace from kafkaenableKafkaCollector:${SW_ZIPKIN_KAFKA_COLLECTOR_ENABLED:true}kafkaBootstrapServers:${SW_ZIPKIN_KAFKA_SERVERS:localhost:9092}kafkaGroupId:${SW_ZIPKIN_KAFKA_Group_Id:zipkin}kafkaTopic:${SW_ZIPKIN_KAFKA_TOPIC:zipkin}# Kafka consumer config, JSON format as Properties. If it contains the same key with above, would override.kafkaConsumerConfig:${SW_ZIPKIN_KAFKA_CONSUMER_CONFIG:\u0026#34;{\\\u0026#34;auto.offset.reset\\\u0026#34;:\\\u0026#34;earliest\\\u0026#34;,\\\u0026#34;enable.auto.commit\\\u0026#34;:true}\u0026#34;}# The Count of the topic consumerskafkaConsumers:${SW_ZIPKIN_KAFKA_CONSUMERS:1}kafkaHandlerThreadPoolSize:${SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_SIZE:-1}kafkaHandlerThreadPoolQueueSize:${SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE:-1}Zipkin query The Zipkin receiver makes the OAP server work as an alternative Zipkin server implementation for query traces. It implemented ZipkinQueryApiV2 through the HTTP service, supporting Zipkin-lens UI.\nUse the following config to activate it.\nquery-zipkin:selector:${SW_QUERY_ZIPKIN:default}default:# For HTTP serverrestHost:${SW_QUERY_ZIPKIN_REST_HOST:0.0.0.0}restPort:${SW_QUERY_ZIPKIN_REST_PORT:9412}restContextPath:${SW_QUERY_ZIPKIN_REST_CONTEXT_PATH:/zipkin}restMaxThreads:${SW_QUERY_ZIPKIN_REST_MAX_THREADS:200}restIdleTimeOut:${SW_QUERY_ZIPKIN_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_QUERY_ZIPKIN_REST_QUEUE_SIZE:0}# Default look back for serviceNames, remoteServiceNames and spanNames, 1 day in millislookback:${SW_QUERY_ZIPKIN_LOOKBACK:86400000}# The Cache-Control max-age (seconds) for serviceNames, remoteServiceNames and spanNamesnamesMaxAge:${SW_QUERY_ZIPKIN_NAMES_MAX_AGE:300}## The below config are OAP support for zipkin-lens UI# Default traces query max sizeuiQueryLimit:${SW_QUERY_ZIPKIN_UI_QUERY_LIMIT:10}# Default look back for search traces, 15 minutes in millisuiDefaultLookback:${SW_QUERY_ZIPKIN_UI_DEFAULT_LOOKBACK:900000}Lens UI Lens UI is Zipkin native UI. SkyWalking webapp has bundled it in the binary distribution. {webapp IP}:{webapp port}/zipkin is exposed and accessible for the browser. Meanwhile, Iframe UI component could be used to host Zipkin Lens UI on the SkyWalking booster UI dashboard.(link=/zipkin)\nZipkin Lens UI source codes could be found here.\n","excerpt":"Zipkin receiver The Zipkin receiver makes the OAP server work as an alternative Zipkin server …","ref":"/docs/main/v9.4.0/en/setup/backend/zipkin-trace/","title":"Zipkin receiver"},{"body":"Zipkin receiver The Zipkin receiver makes the OAP server work as an alternative Zipkin server implementation for collecting traces. It supports Zipkin v1/v2 formats through the HTTP collector and Kafka collector.\nUse the following config to activate it. Set enableHttpCollector to enable HTTP collector and enableKafkaCollector to enable Kafka collector.\nreceiver-zipkin:selector:${SW_RECEIVER_ZIPKIN:default}default:# Defines a set of span tag keys which are searchable.# The max length of key=value should be less than 256 or will be dropped.searchableTracesTags:${SW_ZIPKIN_SEARCHABLE_TAG_KEYS:http.method}# The sample rate precision is 1/10000, should be between 0 and 10000sampleRate:${SW_ZIPKIN_SAMPLE_RATE:10000}## The below configs are for OAP collect zipkin trace from HTTPenableHttpCollector:${SW_ZIPKIN_HTTP_COLLECTOR_ENABLED:true}restHost:${SW_RECEIVER_ZIPKIN_REST_HOST:0.0.0.0}restPort:${SW_RECEIVER_ZIPKIN_REST_PORT:9411}restContextPath:${SW_RECEIVER_ZIPKIN_REST_CONTEXT_PATH:/}restMaxThreads:${SW_RECEIVER_ZIPKIN_REST_MAX_THREADS:200}restIdleTimeOut:${SW_RECEIVER_ZIPKIN_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_RECEIVER_ZIPKIN_REST_QUEUE_SIZE:0}## The below configs are for OAP collect zipkin trace from kafkaenableKafkaCollector:${SW_ZIPKIN_KAFKA_COLLECTOR_ENABLED:true}kafkaBootstrapServers:${SW_ZIPKIN_KAFKA_SERVERS:localhost:9092}kafkaGroupId:${SW_ZIPKIN_KAFKA_Group_Id:zipkin}kafkaTopic:${SW_ZIPKIN_KAFKA_TOPIC:zipkin}# Kafka consumer config, JSON format as Properties. If it contains the same key with above, would override.kafkaConsumerConfig:${SW_ZIPKIN_KAFKA_CONSUMER_CONFIG:\u0026#34;{\\\u0026#34;auto.offset.reset\\\u0026#34;:\\\u0026#34;earliest\\\u0026#34;,\\\u0026#34;enable.auto.commit\\\u0026#34;:true}\u0026#34;}# The Count of the topic consumerskafkaConsumers:${SW_ZIPKIN_KAFKA_CONSUMERS:1}kafkaHandlerThreadPoolSize:${SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_SIZE:-1}kafkaHandlerThreadPoolQueueSize:${SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE:-1}Zipkin query The Zipkin receiver makes the OAP server work as an alternative Zipkin server implementation for query traces. It implemented ZipkinQueryApiV2 through the HTTP service, supporting Zipkin-lens UI.\nUse the following config to activate it.\nquery-zipkin:selector:${SW_QUERY_ZIPKIN:default}default:# For HTTP serverrestHost:${SW_QUERY_ZIPKIN_REST_HOST:0.0.0.0}restPort:${SW_QUERY_ZIPKIN_REST_PORT:9412}restContextPath:${SW_QUERY_ZIPKIN_REST_CONTEXT_PATH:/zipkin}restMaxThreads:${SW_QUERY_ZIPKIN_REST_MAX_THREADS:200}restIdleTimeOut:${SW_QUERY_ZIPKIN_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_QUERY_ZIPKIN_REST_QUEUE_SIZE:0}# Default look back for serviceNames, remoteServiceNames and spanNames, 1 day in millislookback:${SW_QUERY_ZIPKIN_LOOKBACK:86400000}# The Cache-Control max-age (seconds) for serviceNames, remoteServiceNames and spanNamesnamesMaxAge:${SW_QUERY_ZIPKIN_NAMES_MAX_AGE:300}## The below config are OAP support for zipkin-lens UI# Default traces query max sizeuiQueryLimit:${SW_QUERY_ZIPKIN_UI_QUERY_LIMIT:10}# Default look back for search traces, 15 minutes in millisuiDefaultLookback:${SW_QUERY_ZIPKIN_UI_DEFAULT_LOOKBACK:900000}Lens UI Lens UI is Zipkin native UI. SkyWalking webapp has bundled it in the binary distribution. {webapp IP}:{webapp port}/zipkin is exposed and accessible for the browser. Meanwhile, Iframe UI component could be used to host Zipkin Lens UI on the SkyWalking booster UI dashboard.(link=/zipkin)\nZipkin Lens UI source codes could be found here.\n","excerpt":"Zipkin receiver The Zipkin receiver makes the OAP server work as an alternative Zipkin server …","ref":"/docs/main/v9.5.0/en/setup/backend/zipkin-trace/","title":"Zipkin receiver"},{"body":"Zipkin receiver The Zipkin receiver makes the OAP server work as an alternative Zipkin server implementation for collecting traces. It supports Zipkin v1/v2 formats through the HTTP collector and Kafka collector.\nNOTICE, Zipkin trace would not be analyzed like SkyWalking native trace format.\nUse the following config to activate it. Set enableHttpCollector to enable HTTP collector and enableKafkaCollector to enable Kafka collector.\nreceiver-zipkin:selector:${SW_RECEIVER_ZIPKIN:default}default:# Defines a set of span tag keys which are searchable.# The max length of key=value should be less than 256 or will be dropped.searchableTracesTags:${SW_ZIPKIN_SEARCHABLE_TAG_KEYS:http.method}# The sample rate precision is 1/10000, should be between 0 and 10000sampleRate:${SW_ZIPKIN_SAMPLE_RATE:10000}## The below configs are for OAP collect zipkin trace from HTTPenableHttpCollector:${SW_ZIPKIN_HTTP_COLLECTOR_ENABLED:true}restHost:${SW_RECEIVER_ZIPKIN_REST_HOST:0.0.0.0}restPort:${SW_RECEIVER_ZIPKIN_REST_PORT:9411}restContextPath:${SW_RECEIVER_ZIPKIN_REST_CONTEXT_PATH:/}restMaxThreads:${SW_RECEIVER_ZIPKIN_REST_MAX_THREADS:200}restIdleTimeOut:${SW_RECEIVER_ZIPKIN_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_RECEIVER_ZIPKIN_REST_QUEUE_SIZE:0}## The below configs are for OAP collect zipkin trace from kafkaenableKafkaCollector:${SW_ZIPKIN_KAFKA_COLLECTOR_ENABLED:true}kafkaBootstrapServers:${SW_ZIPKIN_KAFKA_SERVERS:localhost:9092}kafkaGroupId:${SW_ZIPKIN_KAFKA_Group_Id:zipkin}kafkaTopic:${SW_ZIPKIN_KAFKA_TOPIC:zipkin}# Kafka consumer config, JSON format as Properties. If it contains the same key with above, would override.kafkaConsumerConfig:${SW_ZIPKIN_KAFKA_CONSUMER_CONFIG:\u0026#34;{\\\u0026#34;auto.offset.reset\\\u0026#34;:\\\u0026#34;earliest\\\u0026#34;,\\\u0026#34;enable.auto.commit\\\u0026#34;:true}\u0026#34;}# The Count of the topic consumerskafkaConsumers:${SW_ZIPKIN_KAFKA_CONSUMERS:1}kafkaHandlerThreadPoolSize:${SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_SIZE:-1}kafkaHandlerThreadPoolQueueSize:${SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE:-1}Zipkin query The Zipkin receiver makes the OAP server work as an alternative Zipkin server implementation for query traces. It implemented ZipkinQueryApiV2 through the HTTP service, supporting Zipkin-lens UI.\nUse the following config to activate it.\nquery-zipkin:selector:${SW_QUERY_ZIPKIN:default}default:# For HTTP serverrestHost:${SW_QUERY_ZIPKIN_REST_HOST:0.0.0.0}restPort:${SW_QUERY_ZIPKIN_REST_PORT:9412}restContextPath:${SW_QUERY_ZIPKIN_REST_CONTEXT_PATH:/zipkin}restMaxThreads:${SW_QUERY_ZIPKIN_REST_MAX_THREADS:200}restIdleTimeOut:${SW_QUERY_ZIPKIN_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_QUERY_ZIPKIN_REST_QUEUE_SIZE:0}# Default look back for serviceNames, remoteServiceNames and spanNames, 1 day in millislookback:${SW_QUERY_ZIPKIN_LOOKBACK:86400000}# The Cache-Control max-age (seconds) for serviceNames, remoteServiceNames and spanNamesnamesMaxAge:${SW_QUERY_ZIPKIN_NAMES_MAX_AGE:300}## The below config are OAP support for zipkin-lens UI# Default traces query max sizeuiQueryLimit:${SW_QUERY_ZIPKIN_UI_QUERY_LIMIT:10}# Default look back for search traces, 15 minutes in millisuiDefaultLookback:${SW_QUERY_ZIPKIN_UI_DEFAULT_LOOKBACK:900000}Lens UI Lens UI is Zipkin native UI. SkyWalking webapp has bundled it in the binary distribution. {webapp IP}:{webapp port}/zipkin is exposed and accessible for the browser. Meanwhile, Iframe UI component could be used to host Zipkin Lens UI on the SkyWalking booster UI dashboard.(link=/zipkin)\nZipkin Lens UI source codes could be found here.\n","excerpt":"Zipkin receiver The Zipkin receiver makes the OAP server work as an alternative Zipkin server …","ref":"/docs/main/v9.6.0/en/setup/backend/zipkin-trace/","title":"Zipkin receiver"},{"body":"Zipkin receiver The Zipkin receiver makes the OAP server work as an alternative Zipkin server implementation for collecting traces. It supports Zipkin v1/v2 formats through the HTTP collector and Kafka collector.\nNOTICE, Zipkin trace would not be analyzed like SkyWalking native trace format.\nUse the following config to activate it. Set enableHttpCollector to enable HTTP collector and enableKafkaCollector to enable Kafka collector.\nreceiver-zipkin:selector:${SW_RECEIVER_ZIPKIN:default}default:# Defines a set of span tag keys which are searchable.# The max length of key=value should be less than 256 or will be dropped.searchableTracesTags:${SW_ZIPKIN_SEARCHABLE_TAG_KEYS:http.method}# The sample rate precision is 1/10000, should be between 0 and 10000sampleRate:${SW_ZIPKIN_SAMPLE_RATE:10000}## The below configs are for OAP collect zipkin trace from HTTPenableHttpCollector:${SW_ZIPKIN_HTTP_COLLECTOR_ENABLED:true}restHost:${SW_RECEIVER_ZIPKIN_REST_HOST:0.0.0.0}restPort:${SW_RECEIVER_ZIPKIN_REST_PORT:9411}restContextPath:${SW_RECEIVER_ZIPKIN_REST_CONTEXT_PATH:/}restMaxThreads:${SW_RECEIVER_ZIPKIN_REST_MAX_THREADS:200}restIdleTimeOut:${SW_RECEIVER_ZIPKIN_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_RECEIVER_ZIPKIN_REST_QUEUE_SIZE:0}## The below configs are for OAP collect zipkin trace from kafkaenableKafkaCollector:${SW_ZIPKIN_KAFKA_COLLECTOR_ENABLED:true}kafkaBootstrapServers:${SW_ZIPKIN_KAFKA_SERVERS:localhost:9092}kafkaGroupId:${SW_ZIPKIN_KAFKA_Group_Id:zipkin}kafkaTopic:${SW_ZIPKIN_KAFKA_TOPIC:zipkin}# Kafka consumer config, JSON format as Properties. If it contains the same key with above, would override.kafkaConsumerConfig:${SW_ZIPKIN_KAFKA_CONSUMER_CONFIG:\u0026#34;{\\\u0026#34;auto.offset.reset\\\u0026#34;:\\\u0026#34;earliest\\\u0026#34;,\\\u0026#34;enable.auto.commit\\\u0026#34;:true}\u0026#34;}# The Count of the topic consumerskafkaConsumers:${SW_ZIPKIN_KAFKA_CONSUMERS:1}kafkaHandlerThreadPoolSize:${SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_SIZE:-1}kafkaHandlerThreadPoolQueueSize:${SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE:-1}Zipkin query The Zipkin receiver makes the OAP server work as an alternative Zipkin server implementation for query traces. It implemented ZipkinQueryApiV2 through the HTTP service, supporting Zipkin-lens UI.\nUse the following config to activate it.\nquery-zipkin:selector:${SW_QUERY_ZIPKIN:default}default:# For HTTP serverrestHost:${SW_QUERY_ZIPKIN_REST_HOST:0.0.0.0}restPort:${SW_QUERY_ZIPKIN_REST_PORT:9412}restContextPath:${SW_QUERY_ZIPKIN_REST_CONTEXT_PATH:/zipkin}restMaxThreads:${SW_QUERY_ZIPKIN_REST_MAX_THREADS:200}restIdleTimeOut:${SW_QUERY_ZIPKIN_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_QUERY_ZIPKIN_REST_QUEUE_SIZE:0}# Default look back for serviceNames, remoteServiceNames and spanNames, 1 day in millislookback:${SW_QUERY_ZIPKIN_LOOKBACK:86400000}# The Cache-Control max-age (seconds) for serviceNames, remoteServiceNames and spanNamesnamesMaxAge:${SW_QUERY_ZIPKIN_NAMES_MAX_AGE:300}## The below config are OAP support for zipkin-lens UI# Default traces query max sizeuiQueryLimit:${SW_QUERY_ZIPKIN_UI_QUERY_LIMIT:10}# Default look back for search traces, 15 minutes in millisuiDefaultLookback:${SW_QUERY_ZIPKIN_UI_DEFAULT_LOOKBACK:900000}Lens UI Lens UI is Zipkin native UI. SkyWalking webapp has bundled it in the binary distribution. {webapp IP}:{webapp port}/zipkin is exposed and accessible for the browser. Meanwhile, Iframe UI component could be used to host Zipkin Lens UI on the SkyWalking booster UI dashboard.(link=/zipkin)\nZipkin Lens UI source codes could be found here.\n","excerpt":"Zipkin receiver The Zipkin receiver makes the OAP server work as an alternative Zipkin server …","ref":"/docs/main/v9.7.0/en/setup/backend/zipkin-trace/","title":"Zipkin receiver"},{"body":"","excerpt":"","ref":"/zh/","title":"博客"}]
\ No newline at end of file
diff --git a/searchindex.json b/searchindex.json
index 1787276..acf2bba 100644
--- a/searchindex.json
+++ b/searchindex.json
@@ -1 +1 @@
-[{"content":"Apache SkyWalking从2015年开源到2024年,已经走过了9个年头,项目的规模和功能也得到了极大的丰富。 2024年4月至6月,SkyWalking社区联合纵目,举办线上的联合直播,分多个主题介绍SkyWalking的核心特性,也提供更多的答疑时间。\n2024年4月25日,SkyWalking创始人带来了第一次分享和Q\u0026amp;A\n 熟悉SkyWalking项目结构 介绍项目工程划分,边界,定位 SkyWalking文档使用,以及如何使用AI助手 Q\u0026amp;A  B站视频地址\n想参与直播的小伙伴,可以关注后续的直播安排和我们的B站直播预约\n","title":"SkyWalking从入门到精通 - 2024系列线上分享活动(第一讲)","url":"/zh/2024-04-26-skywalking-in-practice-s01e01/"},{"content":"Introduction Apache ActiveMQ Classic is a popular and powerful open-source messaging and integration pattern server. Founded in 2004, it has evolved into a mature and widely used open-source messaging middleware that complies with the Java Message Service (JMS). Today, with its stability and wide range of feature support, it still has a certain number of users of small and medium-sized enterprises. It‘s high-performance version Apache Artemis is developing rapidly and is also attracting attention from users of ActiveMQ.\nActiveMQ has broad support for JMX (Java Management Extensions), allowing to be monitored through JMX MBean. After enabling JMX, you can use JAVA\u0026rsquo;s built-in jconsole or VisualVM to view the metrics. In addition, some Collector components can also be used to convert JMX-style data into Prometheus-style data, which is suitable for more tools.\nOpenTelemetry as an industry-recognized, standardized solution that provides consistent and interoperable telemetry data collection, transmission, and analysis capabilities for distributed systems, and is also used here for data collection and transmission. Although it can directly accept JMX type data, the JMX indicators for collecting ActiveMQ are not in the standard library, and some versions are incompatible, so this article adopts two steps: convert JMX data into Prometheus-style indicator data, and then use OpenTelemetry to scrape HTTP endpoint data.\nSkyWalking as a one-stop distributed system monitoring solution, it accepts metrics from ActiveMQ and provides a basic monitoring dashboard.\nDeployment Please set up the following services:\n SkyWalking OAP, v10.0+. ActiveMQ v6.0.X+. JMX Exporter v0.20.0. If using docker, refer bitnami/jmx-exporter. OpenTelmetry-Collector v0.92.0.  Preparation The following describes how to deploy ActiveMQ with 2 single-node brokers and SkyWalking OAP with one single node. JMX Exporter runs in agent mode (recommended).\nConfiguration  Enable JMX in ActiveMQ, the JMX remote port defaults to 1616, you can change it through ACTIVEMQ_SUNJMX_START. Set up the exporter:  [Recommended] If run exporter in agent mode, need to append the startup parameter -DACTIVEMQ_OPTS=-javaagent:{activemqPath}/bin/jmx_prometheus_javaagent-0.20.0.jar=2345:{activemqPath}/conf/config.yaml in ActiveMQ env, then exporter server starts at the same time. If run exporter in single server, refer here to deploy the server alone. 2345 is open HTTP port that can be customized. JMX\u0026rsquo;s metrics can be queried through http://localhost:2345/metrics.    example of docker-compose.yml with agent exporter for ActiveMQ:\nversion:\u0026#39;3.8\u0026#39;services:amq1:image:apache/activemq-classic:latestcontainer_name:amq1hostname:amq1volumes:- ~/activemq1/conf/activemq.xml:/opt/apache-activemq/conf/activemq.xml- ~/activemq1/bin/jmx_prometheus_javaagent-0.20.0.jar:/opt/apache-activemq/bin/jmx_prometheus_javaagent-0.20.0.jar- ~/activemq1/conf/config.yaml:/opt/apache-activemq/conf/config.yamlports:- \u0026#34;61616:61616\u0026#34;- \u0026#34;8161:8161\u0026#34;- \u0026#34;2345:2345\u0026#34;environment:ACTIVEMQ_OPTS:\u0026#34;-javaagent:/opt/apache-activemq/bin/jmx_prometheus_javaagent-0.20.0.jar=2345:/opt/apache-activemq/conf/config.yaml\u0026#34;ACTIVEMQ_BROKER_NAME:broker-1networks:- amqtest amq2:image:apache/activemq-classic:latestcontainer_name:amq2hostname:amq2volumes:- ~/activemq2/conf/activemq.xml:/opt/apache-activemq/conf/activemq.xml- ~/activemq2/bin/jmx_prometheus_javaagent-0.20.0.jar:/opt/apache-activemq/bin/jmx_prometheus_javaagent-0.20.0.jar- ~/activemq2/conf/config.yaml:/opt/apache-activemq/conf/config.yaml ports:- \u0026#34;61617:61616\u0026#34;- \u0026#34;8162:8161\u0026#34;- \u0026#34;2346:2346\u0026#34;environment:ACTIVEMQ_OPTS:\u0026#34;-javaagent:/opt/apache-activemq/bin/jmx_prometheus_javaagent-0.20.0.jar=2346:/opt/apache-activemq/conf/config.yaml\u0026#34;ACTIVEMQ_BROKER_NAME:broker-2 networks:- amqtestotel-collector1:image:otel/opentelemetry-collector:latestcontainer_name:otel-collector1command:[\u0026#34;--config=/etc/otel-collector-config.yaml\u0026#34;]volumes:- ./otel-collector-config1.yaml:/etc/otel-collector-config.yamldepends_on:- amq1networks:- amqtest otel-collector2:image:otel/opentelemetry-collector:latestcontainer_name:otel-collector2command:[\u0026#34;--config=/etc/otel-collector-config.yaml\u0026#34;]volumes:- ./otel-collector-config2.yaml:/etc/otel-collector-config.yamldepends_on:- amq2networks:- amqtest networks:amqtest:example of otel-collector-config.yaml for OpenTelemetry:\nreceivers:prometheus:config:scrape_configs:- job_name:\u0026#39;activemq-monitoring\u0026#39;scrape_interval:30sstatic_configs:- targets:[\u0026#39;amq1:2345\u0026#39;]labels:cluster:activemq-broker1processors:batch:exporters:otlp:endpoint:oap:11800tls:insecure:trueservice:pipelines:metrics:receivers:- prometheusprocessors:- batchexporters:- otlpexample of config.yaml for ActiveMQ Exporter:\n---startDelaySeconds:10username:adminpassword:activemqssl:falselowercaseOutputName:falselowercaseOutputLabelNames:falseincludeObjectNames:[\u0026#34;org.apache.activemq:*\u0026#34;,\u0026#34;java.lang:type=OperatingSystem\u0026#34;,\u0026#34;java.lang:type=GarbageCollector,*\u0026#34;,\u0026#34;java.lang:type=Threading\u0026#34;,\u0026#34;java.lang:type=Runtime\u0026#34;,\u0026#34;java.lang:type=Memory\u0026#34;,\u0026#34;java.lang:name=*\u0026#34;]excludeObjectNames:[\u0026#34;org.apache.activemq:type=ColumnFamily,*\u0026#34;]autoExcludeObjectNameAttributes:trueexcludeObjectNameAttributes:\u0026#34;java.lang:type=OperatingSystem\u0026#34;:- \u0026#34;ObjectName\u0026#34;\u0026#34;java.lang:type=Runtime\u0026#34;:- \u0026#34;ClassPath\u0026#34;- \u0026#34;SystemProperties\u0026#34;rules:- pattern:\u0026#34;.*\u0026#34;Steps  Start ActiveMQ, and the Exporter(agent) and the service start at the same time. Start SkyWalking OAP and SkyWalking UI. Start OpenTelmetry-Collector.  After completed, node metrics will be captured and pushed to SkyWalking.\nMetrics Monitoring metrics involve in Cluster Metrics, Broker Metrics, and Destination Metrics.\n Cluster Metrics: including memory usage, rates of write/read, and average/max duration of write. Broker Metrics: including node state, number of connections, number of producers/consumers, and rate of write/read under the broker. Depending on the cluster mode, one cluster may include one or more brokers. Destination Metrics: including number of producers/consumers, messages in different states, queues, and enqueue duration in a queue/topic.  Cluster Metrics  System Load: range in [0, 100]. Thread Count: the number of threads currently used by the JVM. Heap Memory: capacity of heap memory. GC: memory of ActiveMQ is managed by Java\u0026rsquo;s garbage collection (GC) process. Enqueue/Dequeue/Dispatch/Expired Rate: growth rate of messages in different states. Average/Max Enqueue Time: time taken to join the queue.  Broker Metrics  Uptime: duration of the node. State: 1 = slave node, 0 = master node. Current Connentions: number of connections. Current Producer/Consumer Count: number of current producers/consumers. Increased Producer/Consumer Count: number of increased producers/consumers. Enqueue/Dequeue Count: number of enqueue and dequeue. Enqueue/Dequeue Rate: rate of enqueue and dequeue. Memory Percent Usage: amount of memory space used by undelivered messages. Store Percent Usage: space used by pending persistent messages. Temp Percent Usage: space used by non-persistent messages. Average/Max Message Size: number of messages. Queue Size: number of messages in the queue.  Destination Metrics  Produser/Consumer Count: number of producers/Consumers. Queue Size: unacknowledged messages of the queue. Memory usage: usage of memory. Enqueue/Dequeue/Dispatch/Expired/Inflight Count: number of messages in different states. Average/Max Message Size: number of messages. Average/Max Enqueue Time: time taken to join the queue.  Reference  ActiveMQ Classic clustering JMX Exporter Configuration JMX Exporter-Running the Standalone HTTP Server OpenTelemetry Collector Contrib Jmxreceiver  ","title":"Monitoring ActiveMQ through SkyWalking","url":"/blog/2024-04-19-monitoring-activemq-through-skywalking/"},{"content":"引言 Apache ActiveMQ Classic 是一个流行且功能强大的开源消息传递和集成模式服务器。始于2004年,逐渐发展成为了一个成熟且广泛使用的开源消息中间件,符合Java消息服务(JMS)规范。 发展至今,凭借其稳定性和广泛的特性支持,仍然拥有一定数量的中小型企业的使用者。其高性能版本 Apache Artemis 目前处于快速发展阶段,也受到了 ActiveMQ 现有使用者的关注。\nActiveMQ 对 JMX(Java Management Extensions) 有广泛的支持,允许通过 JMX MBean 监视和控制代理的行为。 开启JMX之后,就可以使用 JAVA 自带的 jconsole 工具或者 VisualVM 等工具直观查看指标。此外也可以通过一些 Collector 组件,将 JMX 风格的数据转换为 prometheus 风格的数据,适配更多查询与展示工具。\nOpenTelemetry 作为业界公认的标准化解决方案,可为分布式系统提供一致且可互操作的遥测数据收集、传输和分析能力,这里也主要借助它实现数据的采集和传输。 它虽然可以直接接受 JMX 类型的数据,但是关于采集 ActiveMQ 的 JMX 指标并不在标准库,存在部分版本不兼容,因此本文采用两步:将 JMX 数据转换为 Prometheus 风格的指标数据,再使用 OpenTelemetry 传递。\nSkyWalking 作为一站式的分布式系统监控解决方案,接纳来自 ActiveMQ 的指标数据,并提供基础的指标监控面板。\n服务部署 请准备以下服务\n SkyWalking OAP, v10.0+。 ActiveMQ v6.0.X+。 JMX Exporter v0.20.0。如果你使用docker,参考使用 bitnami/jmx-exporter。 OpenTelmetry-Collector v0.92.0。  服务准备 以下通过 SkyWalking OAP 单节点、ActiveMQ 2个单节点服务的部署方式介绍。JMX Exporter 采用推荐的 agent 方式启动。\n配置流程  在 ActiveMQ 中开启JMX,其中 JMX 远程端口默认1616,如需修改可通过 ACTIVEMQ_SUNJMX_START 参数调整。 设置 Exporter:  如果采用推荐的 Agent 方式启动,需要追加启动参数 -DACTIVEMQ_OPTS=-javaagent:{activemqPath}/bin/jmx_prometheus_javaagent-0.20.0.jar=2345:{activemqPath}/conf/config.yaml 如果采用单独服务的方式启动,可以参考这里独立部署 Exporter 服务。 其中 2345 为开放的 HTTP 端口可自定义。最终可通过访问 http://localhost:2345/metrics 查询到 JMX 的指标数据。    采用 Agent Exporter 方式的 docker-compose.yml 配置样例:\nversion:\u0026#39;3.8\u0026#39;services:amq1:image:apache/activemq-classic:latestcontainer_name:amq1hostname:amq1volumes:- ~/activemq1/conf/activemq.xml:/opt/apache-activemq/conf/activemq.xml- ~/activemq1/bin/jmx_prometheus_javaagent-0.20.0.jar:/opt/apache-activemq/bin/jmx_prometheus_javaagent-0.20.0.jar- ~/activemq1/conf/config.yaml:/opt/apache-activemq/conf/config.yamlports:- \u0026#34;61616:61616\u0026#34;- \u0026#34;8161:8161\u0026#34;- \u0026#34;2345:2345\u0026#34;environment:ACTIVEMQ_OPTS:\u0026#34;-javaagent:/opt/apache-activemq/bin/jmx_prometheus_javaagent-0.20.0.jar=2345:/opt/apache-activemq/conf/config.yaml\u0026#34;ACTIVEMQ_BROKER_NAME:broker-1networks:- amqtest amq2:image:apache/activemq-classic:latestcontainer_name:amq2hostname:amq2volumes:- ~/activemq2/conf/activemq.xml:/opt/apache-activemq/conf/activemq.xml- ~/activemq2/bin/jmx_prometheus_javaagent-0.20.0.jar:/opt/apache-activemq/bin/jmx_prometheus_javaagent-0.20.0.jar- ~/activemq2/conf/config.yaml:/opt/apache-activemq/conf/config.yaml ports:- \u0026#34;61617:61616\u0026#34;- \u0026#34;8162:8161\u0026#34;- \u0026#34;2346:2346\u0026#34;environment:ACTIVEMQ_OPTS:\u0026#34;-javaagent:/opt/apache-activemq/bin/jmx_prometheus_javaagent-0.20.0.jar=2346:/opt/apache-activemq/conf/config.yaml\u0026#34;ACTIVEMQ_BROKER_NAME:broker-2 networks:- amqtestotel-collector1:image:otel/opentelemetry-collector:latestcontainer_name:otel-collector1command:[\u0026#34;--config=/etc/otel-collector-config.yaml\u0026#34;]volumes:- ./otel-collector-config1.yaml:/etc/otel-collector-config.yamldepends_on:- amq1networks:- amqtest otel-collector2:image:otel/opentelemetry-collector:latestcontainer_name:otel-collector2command:[\u0026#34;--config=/etc/otel-collector-config.yaml\u0026#34;]volumes:- ./otel-collector-config2.yaml:/etc/otel-collector-config.yamldepends_on:- amq2networks:- amqtest networks:amqtest:OpenTelemetry otel-collector-config.yaml 配置样例:\nreceivers:prometheus:config:scrape_configs:- job_name:\u0026#39;activemq-monitoring\u0026#39;scrape_interval:30sstatic_configs:- targets:[\u0026#39;amq1:2345\u0026#39;]labels:cluster:activemq-broker1processors:batch:exporters:otlp:endpoint:oap:11800tls:insecure:trueservice:pipelines:metrics:receivers:- prometheusprocessors:- batchexporters:- otlpActiveMQ Exporter config.yaml 配置样例:\n---startDelaySeconds:10username:adminpassword:activemqssl:falselowercaseOutputName:falselowercaseOutputLabelNames:falseincludeObjectNames:[\u0026#34;org.apache.activemq:*\u0026#34;,\u0026#34;java.lang:type=OperatingSystem\u0026#34;,\u0026#34;java.lang:type=GarbageCollector,*\u0026#34;,\u0026#34;java.lang:type=Threading\u0026#34;,\u0026#34;java.lang:type=Runtime\u0026#34;,\u0026#34;java.lang:type=Memory\u0026#34;,\u0026#34;java.lang:name=*\u0026#34;]excludeObjectNames:[\u0026#34;org.apache.activemq:type=ColumnFamily,*\u0026#34;]autoExcludeObjectNameAttributes:trueexcludeObjectNameAttributes:\u0026#34;java.lang:type=OperatingSystem\u0026#34;:- \u0026#34;ObjectName\u0026#34;\u0026#34;java.lang:type=Runtime\u0026#34;:- \u0026#34;ClassPath\u0026#34;- \u0026#34;SystemProperties\u0026#34;rules:- pattern:\u0026#34;.*\u0026#34;启动步骤  启动 ActiveMQ,Exporter 和服务同时启动。 启动 SkyWalking OAP 和 SkyWalking UI。 启动 OpenTelmetry-Collector。  以上步骤执行完成后,节点指标就会定时抓取后推送到 SkyWalking,经过分组聚合后前端页面可查看到 ActiveMQ 的面板数据。\n监控指标 监控指标主要分为3类:Cluster 指标、Broker 指标、Destination 指标\n Cluster 指标:主要关注集群的内存使用情况、数据写入与读取速率平均情况、平均与最大的写入时长等。 Broker 指标:主要关注 Broker 下节点状态、连接数、生产者消费者数量、写入读取速率等。根据集群形式不同,一个Cluster可能包括一个或多个Broker。 Destination 指标:主要关注 Queue/Topic 下的生产者消费者数量、不同状态消息数量、队列数量、入队时长等。  Cluster 指标  System Load:[0, 100]的值来反馈系统负载。 Thread Count:JVM 当前使用的线程数。 Heap Memory:堆内存的容量一定程度反映服务的处理性能。 GC:ActiveMQ 在 JVM 中运行,其内存由 Java 的垃圾回收 (GC) 进程管理,GC能直接反映服务的状态。 Enqueue/Dequeue/Dispatch/Expired Rate:不同状态信息的增长速率能直接反映生产活动。 Average/Max Enqueue Time:入队的耗时能一定程度影响生产者。  Broker 指标  Uptime:节点存活时长。 State:是否为从节点,1=从节点,0=主节点。 Current Connentions:目前的连接数。 Current Producer/Consumer Count:目前生产者消费者数量。 Increased Producer/Consumer Count:增长的生产者消费者数量。 Enqueue/Dequeue Count: 入队出队数量。 Enqueue/Dequeue Rate: 入队出队速率。 Memory Percent Usage:未送达消息使用的内存空间。 Store Percent Usage: 挂起的持久性消息占用的空间。 Temp Percent Usage:非持久化消息占用的空间。 Average/Max Message Size:消息量。 Queue Size:队列中消息量。  Destination 指标  Producer/Consumer Count:生产者/消费者数量。 Queue Size:队列的未消费数量。 Memory Usage:内存的使用。 Enqueue/Dequeue/Dispatch/Expired/Inflight Count:不同状态消息数。 Average/Max Enqueue Time:入队的耗时。 Average/Max Message Size:消息量。  参考文档  ActiveMQ Classic clustering JMX Exporter Configuration JMX Exporter-Running the Standalone HTTP Server OpenTelemetry Collector Contrib Jmxreceiver  ","title":"使用 SkyWalking 监控 ActiveMQ","url":"/zh/2024-04-19-monitoring-activemq-through-skywalking/"},{"content":"Zixin Zhou(GitHub ID, CodePrometheus[1]) began the code contributions since Oct 28, 2023.\nUp to date, he has submitted 8 PRs in the Go agent repository, 7 PRs in the main repo, 1 PR in the UI repository and 2 PRs in the showcase repository.\nAt Apr 15th, 2024, the project management committee(PMC) passed the proposal of promoting him as a new committer. He has accepted the invitation at the same day.\nWelcome Zixin Zhou join the committer team.\n[1] https://github.com/CodePrometheus\n","title":"Welcome Zixin Zhou as new committer","url":"/events/welcome-zixin-zhou-as-new-committer/"},{"content":"SkyWalking Eyes 0.6.0 is released. Go to downloads page to find release tars.\n Add | as comment indicator by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/168 Correct the way of joining slack channels by @wu-sheng in https://github.com/apache/skywalking-eyes/pull/169 update: add weak-compatible to dependency check by @Two-Hearts in https://github.com/apache/skywalking-eyes/pull/171 feature: add support for Protocol Buffer by @spacewander in https://github.com/apache/skywalking-eyes/pull/172 feature: add support for OPA policy files by @spacewander in https://github.com/apache/skywalking-eyes/pull/174 add Eclipse Foundation specific Apache 2.0 license header by @gdams in https://github.com/apache/skywalking-eyes/pull/178 add instructions to fix header issues in markdown comment by @gdams in https://github.com/apache/skywalking-eyes/pull/179 bump action/setup-go to v5 by @gdams in https://github.com/apache/skywalking-eyes/pull/180 Draft release notes for 0.6.0 by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/181  Full Changelog: https://github.com/apache/skywalking-eyes/compare/v0.5.0...v0.6.0\n","title":"Release Apache SkyWalking Eyes 0.6.0","url":"/events/release-apache-skywalking-eyes-0-6-0/"},{"content":"SkyWalking Java Agent 9.2.0 is released. Go to downloads page to find release tars. Changes by Version\n9.2.0  Fix NoSuchMethodError in mvc-annotation-commons and change deprecated method. Fix forkjoinpool plugin in JDK11. Support for tracing spring-cloud-gateway 4.x in gateway-4.x-plugin. Fix re-transform bug when plugin enhanced class proxy parent method. Fix error HTTP status codes not recording as SLA failures in Vert.x plugins. Support for HttpExchange request tracing. Support tracing for async producing, batch sync consuming, and batch async consuming in rocketMQ-client-java-5.x-plugin. Convert the Redisson span into an async span. Rename system env name from sw_plugin_kafka_producer_config to SW_PLUGIN_KAFKA_PRODUCER_CONFIG. Support for ActiveMQ-Artemis messaging tracing. Archive the expired plugins impala-jdbc-2.6.x-plugin. Fix a bug in Spring Cloud Gateway if HttpClientFinalizer#send does not invoke, the span created at NettyRoutingFilterInterceptor can not stop. Fix not tracing in HttpClient v5 when HttpHost(arg[0]) is null but RoutingSupport#determineHost works. Support across thread tracing for SOFA-RPC. Update Jedis 4.x plugin to support Sharding and Cluster models.  Documentation  Update docs to describe expired-plugins.  All issues and pull requests are here\n","title":"Release Apache SkyWalking Java Agent 9.2.0","url":"/events/release-apache-skywalking-java-agent-9-2-0/"},{"content":"SkyWalking Rover 0.6.0 is released. Go to downloads page to find release tars.\nFeatures  Enhance compatibility when profiling with SSL. Update LabelValue obtain pod information function to add default value parameter. Add HasOwnerName to judgement pod has owner name. Publish the latest Docker image tag. Improve the stability of Off CPU Profiling. Support collecting the access log from Kubernetes. Remove the scanner mode in the process discovery module. Upgrade Go library to 1.21, eBPF library to 0.13.2. Support using make docker.debug to building the debug docker image.  Bug Fixes Documentation  Update architecture diagram. Delete module design and project structure document. Adjust configuration modules during setup.  Issues and PR  All issues are here All and pull requests are here  ","title":"Release Apache SkyWalking Rover 0.6.0","url":"/events/release-apache-skwaylking-rover-0-6-0/"},{"content":"SkyWalking Cloud on Kubernetes 0.9.0 is released. Go to downloads page to find release tars.\n0.9.0 Features  Add a getting started document about how to deploy swck on the kubernetes cluster.  Bugs  Fix the bug that the java agent is duplicated injected when update the pod.  Chores  Bump up custom-metrics-apiserver Bump up golang to v1.22 Bump up controller-gen to v0.14.0  ","title":"Release Apache SkyWalking Cloud on Kubernetes 0.9.0","url":"/events/release-apache-skywalking-cloud-on-kubernetes-0-9-0/"},{"content":"Background Apache SkyWalking is an open-source Application Performance Management system that helps users gather logs, traces, metrics, and events from various platforms and display them on the UI. With version 9.7.0, SkyWalking can collect access logs from probes in multiple languages and from Service Mesh, generating corresponding topologies, tracing, and other data. However, it could not initially collect and map access logs from applications in Kubernetes environments. This article explores how the 10.0.0 version of Apache SkyWalking employs eBPF technology to collect and store application access logs, addressing this limitation.\nWhy eBPF? To monitor the network traffic in Kubernetes, the following features support be support:\n Cross Language: Applications deployed in Kubernetes may be written in any programming language, making support for diverse languages important. Non-Intrusiveness: It\u0026rsquo;s imperative to monitor network traffic without making any modifications to the applications, as direct intervention with applications in Kubernetes is not feasible. Kernel Metrics Monitoring: Often, diagnosing network issues by analyzing traffic performance at the user-space level is insufficient. A deeper analysis incorporating kernel-space network traffic metrics is frequently necessary. Support for Various Network Protocols: Applications may communicate using different transport protocols, necessitating support for a range of protocols.  Given these requirements, eBPF emerges as a capable solution. In the next section, we will delve into detailed explanations of how Apache SkyWalking Rover resolves these aspects.\nKernel Monitoring and Protocol Analysis In previous articles, we\u0026rsquo;ve discussed how to monitor network traffic from programs written in various languages. This technique remains essential for network traffic monitoring, allowing for the collection of traffic data without language limitations. However, due to the unique aspects of our monitoring trigger mechanism and the specific features of kernel monitoring, these two areas warrant separate explanations.\nKernel Monitoring Kernel monitoring allows users to gain insights into network traffic performance based on the execution at the kernel level, specifically from Layer 2 (Data Link) to Layer 4 (Transport) of the OSI model.\nNetwork monitoring at the kernel layer is deference from the syscall (user-space) layer in terms of the metrics and identifiers used. While the syscalls layer can utilize file descriptors to correlate various operations, kernel layer network operations primarily use packets as unique identifiers. This discrepancy necessitates a mapping relationship that SkyWalking Rover can use to bind these two layers together for comprehensive monitoring.\nLet\u0026rsquo;s dive into the details of how data is monitored in both sending and receiving modes.\nObserve Sending When sending data, tracking the status and timing of each packet is crucial for understanding the state of each transmission. Within the kernel, operations progress from Layer 4 (L4) down to Layer 2 (L2), maintaining the same thread ID as during the syscalls layer, which simplifies data correlation.\nSkyWalking Rover monitors several key kernel functions to observe packet transmission dynamics, listed from L4 to L2:\n kprobe/tcp_sendmsg: Captures the time when a packet enters the L4 protocol stack for sending and the time it finishes processing. This function is essential for tracking the initial handling of packets at the transport layer. kprobe/tcp_transmit_skb: Records the total number of packet transmissions and the size of each packet sent. This function helps identify how many times a packet or a batch of packets is attempted to be sent, which is critical for understanding network throughput and congestion. tracepoint/tcp/tcp_retransmit_skb: Notes whether packet retransmission occurs, providing insights into network reliability and connection quality. Retransmissions can significantly impact application performance and user experience. tracepoint/skb/kfree_skb: Records packet loss during transmission and logs the reason for such occurrences. Understanding packet loss is crucial for diagnosing network issues and ensuring data integrity. kprobe/__ip_queue_xmit: Records the start and end times of processing by the L3 protocol. This function is vital for understanding the time taken for IP-level operations, including routing decisions. kprobe/nf_hook_slow: Records the total time and number of occurrences spent in Netfilter hooks, such as iptables rule evaluations. This monitoring point is important for assessing the impact of firewall rules and other filtering mechanisms on packet flow. kprobe/neigh_resolve_output: If resolving an unknown MAC address is necessary before sending a network request, this function records the occurrences and total time spent on this resolution. MAC address resolution times can affect the initial packet transmission delay. kprobe/__dev_queue_xmit: Records the start and end times of entering the L2 protocol stack, providing insights into the data link layer\u0026rsquo;s processing times. tracepoint/net/net_dev_start_xmit and tracepoint/net/net_dev_xmit: Records the actual time taken to transmit each packet at the network interface card (NIC). These functions are crucial for understanding the hardware-level performance and potential bottlenecks at the point of sending data to the physical network.  According to the interception of the above method, Apache SkyWalking Rover can provide key execution time and metrics for each level when sending network data, from the application layer (Layer 7) to the transport layer (Layer 4), and finally to the data link layer (Layer 2).\nObserve Receiving When receiving data, the focus is often on the time it takes for packets to travel from the network interface card (NIC) to the user space. Unlike the process of sending data, data receiving in the kernel proceeds from the data link layer (Layer 2) up to the transport layer (Layer 4), until the application layer (Layer 7) retrieves the packet\u0026rsquo;s content. In SkyWalking Rover, monitors the following key system functions to observe this process, listed from L2 to L4:\n tracepoint/net/netif_receive_skb: Records the time when a packet is received by the network interface card. This tracepoint is crucial for understanding the initial point of entry for incoming data into the system. kprobe/ip_rcv: Records the start and end times of packet processing at the network layer (Layer 3). This probe provides insights into how long it takes for the IP layer to handle routing, forwarding, and delivering packets to the correct application. kprobe/nf_hook_slow: Records the total time and occurrences spent in Netfilter hooks, same with the sending traffic flow. kprobe/tcp_v4_rcv: Records the start and end times of packet processing at the transport layer (Layer 4). This probe is key to understanding the efficiency of TCP operations, including connection management, congestion control, and data flow. tracepoint/skb/skb_copy_datagram_iovec: When application layer protocols use the data, this tracepoint binds the packet to the syscall layer data at Layer 7. This connection is essential for correlating the kernel\u0026rsquo;s handling of packets with their consumption by user-space applications.  Based on the above methods, network monitoring can help you understand the complete execution process and execution time from when data is received by the network card to when it is used by the program.\nMetrics By intercepting the methods mentioned above, we can gather key metrics that provide insights into network performance and behavior. These metrics include:\n Packets: The size of the packets and the frequency of their transmission or reception. These metric offers a fundamental understanding of the network load and the efficiency of data movement between the sender and receiver. Connections: The number of connections established or accepted between services and the time taken for these connections to be set up. This metric is crucial for analyzing the efficiency of communication and connection management between different services within the network. L2-L4 Events: The time spent on key events within the Layer 2 to Layer 4 protocols. This metric sheds light on the processing efficiency and potential bottlenecks within the lower layers of the network stack, which are essential for data transmission and reception.  Protocol Analyzing In previous articles, we have discussed parsing HTTP/1.x protocols. However, with HTTP/2.x, the protocol\u0026rsquo;s stateful nature and the pre-established connections between services complicate network profiling. This complexity makes it challenging for Apache SkyWalking Rover to fully perceive the connection context, hindering protocol parsing operations.\nTransitioning network monitoring to Daemon mode offers a solution to this challenge. By continuously observing service operations around the clock, SkyWalking Rover can begin monitoring as soon as a service starts. This immediate initiation allows for the tracking of the complete execution context, making the observation of stateful protocols like HTTP/2.x feasible.\nProbes To detect when a process is started, monitoring a specific trace point (tracepoint/sched/sched_process_fork) is essential. This approach enables the system to be aware of process initiation events. Given the necessity to filter process traffic based on certain criteria such as the process\u0026rsquo;s namespace, Apache SkyWalking Rover follows a series of steps to ensure accurate and efficient monitoring. These steps include:\n Monitoring Activation: The process is immediately added to a monitoring whitelist upon detection. This step ensures that the process is considered for monitoring from the moment it starts, without delay. Push to Queue: The process\u0026rsquo;s PID (Process ID) is pushed into a monitoring confirmation queue. This queue holds the PIDs of newly detected processes that are pending further confirmation from a user-space program. This asynchronous approach allows for the separation of immediate detection and subsequent processing, optimizing the monitoring workflow. User-Space Program Confirmation: The user-space program retrieves process PIDs from the queue and assesses whether each process should continue to be monitored. If a process is deemed unnecessary for monitoring, it is removed from the whitelist.  This process ensures that SkyWalking Rover can dynamically adapt its monitoring scope based on real-time conditions and configurations, allowing for both comprehensive coverage and efficient resource use.\nLimitations The monitoring of stateful protocols like HTTP/2.x currently faces certain limitations:\n Inability to Observe Pre-existing Connections: Monitoring the complete request and response cycle requires that monitoring be initiated before any connections are established. This requirement means that connections set up before the start of monitoring cannot be observed. Challenges with TLS Requests: Observing TLS encrypted traffic is complex because it relies on asynchronously attaching uprobes (user-space attaching) for observation. If new requests are made before these uprobes are successfully attached, it becomes impossible to access the data before encryption or after decryption.  Demo Next, let’s quickly demonstrate the Kubernetes monitoring feature, so you can understand more specifically what it accomplishes.\nDeploy SkyWalking Showcase SkyWalking Showcase contains a complete set of example services and can be monitored using SkyWalking. For more information, please check the official documentation.\nIn this demo, we only deploy service, the latest released SkyWalking OAP, and UI.\nexport FEATURE_FLAGS=java-agent-injector,single-node,elasticsearch,rover make deploy.kubernetes After deployment is complete, please run the following script to open SkyWalking UI: http://localhost:8080/.\nkubectl port-forward svc/ui 8080:8080 --namespace default Done Once deployed, Apache SkyWalking Rover automatically begins monitoring traffic within the system upon startup. Then, reports this traffic data to SkyWalking OAP, where it is ultimately stored in a database.\nIn the Service Dashboard within Kubernetes, you can view a list of monitored Kubernetes services. If any of these services have HTTP traffic, this information would be displayed alongside them in the dashboard.\nFigure 1: Kubernetes Service List\nAdditionally, within the Topology Tab, you can observe the topology among related services. In each service or call relationship, there would display relevant TCP and HTTP metrics.\nFigure 2: Kubernetes Service Topology\nWhen you select a specific service from the Service list, you can view service metrics at both the TCP and HTTP levels for the chosen service.\nFigure 3: Kubernetes Service TCP Metrics\nFigure 4: Kubernetes Service HTTP Metrics\nFurthermore, by using the Endpoint Tab, you can see which URIs have been accessed for the current service.\nFigure 5: Kubernetes Service Endpoint List\nConclusion In this article, I\u0026rsquo;ve detailed how to utilize eBPF technology for network monitoring of services within a Kubernetes cluster, a capability that has been implemented in Apache SkyWalking Rover. This approach leverages the power of eBPF to provide deep insights into network traffic and service interactions, enhancing visibility and observability across the cluster.\n","title":"Monitoring Kubernetes network traffic by using eBPF","url":"/blog/2024-03-18-monitor-kubernetes-network-by-ebpf/"},{"content":"SkyWalking Client JS 0.11.0 is released. Go to downloads page to find release tars.\n Fixed the bug that navigator.sendBeacon sent json to backend report \u0026ldquo;No suitable request converter found for a @RequestObject List\u0026rdquo;. Fix reading property from null. Pin selenium version and update license CI. Bump dependencies. Update README.  ","title":"Release Apache SkyWalking Client JS 0.11.0","url":"/events/release-apache-skywalking-client-js-0-11-0/"},{"content":"背景 Apache SkyWalking 是一个开源的应用性能管理系统,帮助用户从各种平台收集日志、跟踪、指标和事件,并在用户界面上展示它们。\n在9.7.0版本中,Apache SkyWalking 可以从多语言的探针和 Service Mesh 中收集访问日志,并生成相应的拓扑图、链路和其他数据。 但是对于Kubernetes环境,暂时无法提供对应用程序的访问日志进行采集并生成拓扑图。本文探讨了Apache SkyWalking 10.0.0版本如何采用eBPF技术来收集和存储应用访问日志,解决了这一限制。\n为什么使用 eBPF? 为了在Kubernetes中监控网络流量,以下特性需得到支持:\n 跨语言: 在Kubernetes部署的应用可能使用任何编程语言编写,因此对多种语言的支持十分重要。 非侵入性: 监控网络流量时不对应用程序进行任何修改是必要的,因为直接干预Kubernetes中的应用程序是不可行的。 内核指标监控: 通常,仅通过分析用户空间级别的流量来诊断网络问题是不够的。经常需要深入分析,结合内核空间的网络流量指标。 支持多种网络协议: 应用程序可能使用不同的传输协议进行通信,这就需要支持一系列的协议。  鉴于这些要求,eBPF显现出作为一个有能力的解决方案。在下一节中,我们将深入讨论Apache SkyWalking Rover是如何解决这些方面作出更详细解释。\n内核监控与协议分析 在之前的文章中,我们讨论了如何对不同编程语言的程序进行网络流量获取。在网络流量监控中,我们仍然会使用该技术进行流量采集。 但是由于这次监控触发方式和内核监控方面的不同特性,所以这两部分会单独进行说明。\n内核监控 内核监控允许用户根据在内核层面的执行,洞察网络流量性能,特别是从OSI模型的第2层(数据链路层)到第4层(传输层)。\n内核层的网络监控与syscall(用户空间系统调用)层在关联指标不同。虽然syscall层可以利用文件描述符来关联各种操作,但内核层的网络操作主要使用数据包作为唯一标识符。 这种差异需要映射关系,Apache SkyWalking Rover可以使用它将这两层绑定在一起,进行全面监控。\n让我们深入了解数据在发送和接收模式下是如何被监控的。\n监控数据发送 在发送数据时,跟踪每个数据包的状态和时间对于理解每次传输的状态至关重要。在内核中,操作从第4层(L4)一直调用到第2层(L2),并且会保持与在syscall层相同的线程ID,这简化了数据的相关性分析。\nSkyWalking Rover监控了几个关键的内核函数,以观察数据包传输动态,顺序从L4到L2:\n kprobe/tcp_sendmsg: 记录数据包进入L4协议栈进行发送以及完成处理的时间。这个函数对于跟踪传输层对数据包的初始处理至关重要。 kprobe/tcp_transmit_skb: 记录数据包传输的总次数和每个发送的数据包的大小。这个函数有助于识别尝试发送一个数据包或一段时间内发送一批数据包的次数,这对于理解网络吞吐量和拥塞至关重要。 tracepoint/tcp/tcp_retransmit_skb: 记录是否发生数据包重传,提供网络可靠性和连接质量的见解。重传可以显著影响应用性能和用户体验。 tracepoint/skb/kfree_skb: 记录传输过程中的数据包丢失,并记录发生这种情况的原因。理解数据包丢失对于诊断网络问题和确保数据完整性至关重要。 kprobe/__ip_queue_xmit: 记录L3协议处理的开始和结束时间。这个功能对于理解IP级操作所需的时间至关重要,包括路由决策。 kprobe/nf_hook_slow: 记录在Netfilter钩子中花费的总时间和发生次数,例如 iptables 规则评估。这个函数对于评估防火墙规则和其他过滤机制对数据流的影响非常重要。 kprobe/neigh_resolve_output: 如果在发送网络请求之前需要解析未知的MAC地址,这个函数会记录发生的次数和在这个解析上花费的总时间。MAC地址解析时间可以影响初始数据包传输的延迟。 kprobe/__dev_queue_xmit: 记录进入L2协议栈的开始和结束时间,提供对数据链路层处理时间的见解。 tracepoint/net/net_dev_start_xmit and tracepoint/net/net_dev_xmit: 记录在网卡(NIC)上传输每个数据包所需的实际时间。这些功能对于理解硬件级性能和在将数据发送到物理网络时可能出现的瓶颈至关重要。  根据上述方法的拦截,Apache SkyWalking Rover可以在发送网络数据时为每个层级提供关键的执行时间和指标,从应用层(第7层)到传输层(第4层),最终到数据链路层(第2层)。\n监控数据接收 在接收数据时,通常关注的是数据包从网卡(NIC)到用户空间的传输时间。与发送数据的过程不同,在内核中接收数据是从数据链路层(第2层)开始,一直上升到传输层(第4层),直到应用层(第7层)检索到数据包的内容。\n在SkyWalking Rover中,监控以下关键系统功能以观察这一过程,顺序从L2到L4:\n tracepoint/net/netif_receive_skb: 记录网卡接收到数据包的时间。这个追踪点对于理解进入系统的传入数据的初始入口点至关重要。 kprobe/ip_rcv: 记录网络层(第3层)数据包处理的开始和结束时间。这个探针提供了IP层处理路由、转发和将数据包正确传递给应用程序所需时间的见解。 kprobe/nf_hook_slow: 记录在Netfilter钩子中花费的总时间和发生次数,与发送流量的情况相同。 kprobe/tcp_v4_rcv: 记录传输层(第4层)数据包处理的开始和结束时间。这个探针对于理解TCP操作的效率至关重要,包括连接管理、拥塞控制和数据流。 tracepoint/skb/skb_copy_datagram_iovec: 当应用层协议使用数据时,这个追踪点在第7层将数据包与syscall层的数据绑定。这种连接对于将内核对数据包的处理与用户空间应用程序的消费相关联是至关重要的。  基于上述方法,网络监控可以帮助您理解从网卡接收数据到程序使用数据的完整执行过程和执行时间。\n指标 通过拦截上述提到的方法,我们可以收集提供网络性能的关键指标。这些指标包括:\n 数据包: 数据包的大小及其传输或接收的频率。这些指标提供了对网络负载和数据在发送者与接收者之间传输效率的基本理解。 连接: 服务之间建立或接收的连接数量,以及设置这些连接所需的时间。这个指标对于分析网络内不同服务之间的通信效率和连接管理至关重要。 L2-L4 事件: 在第2层到第4层协议中关键事件上所花费的时间。这个指标揭示了网络堆栈较低层的处理效率和潜在瓶颈,这对于数据传输至关重要。  协议分析 在之前的文章中,我们已经讨论了解析 HTTP/1.x 协议。然而,对于 HTTP/2.x,协议的有状态性质和服务之间预先建立的连接使得网络分析变得复杂。 这种复杂性使得Apache SkyWalking Rover很难完全感知连接上下文,阻碍了协议解析操作。\n将网络监控转移到守护进程模式提供了一种解决这一挑战的方法。通过全天候不断观察服务,Apache SkyWalking Rover可以在服务启动时立即开始监控。 这种立即启动允许跟踪完整的执行上下文,使得观察像 HTTP/2.x 这样的有状态协议变得可行。\n追踪 为了检测到一个进程何时启动,监控一个特定的追踪点 (tracepoint/sched/sched_process_fork) 是必不可少的。这追踪点使系统能够意识到进程启动事件。\n鉴于需要根据某些标准(如进程的命名空间)过滤进程流量,Apache SkyWalking Rover遵循一系列步骤来确保准确和高效的监控。这些步骤包括:\n 启动监控: 一旦检测到进程,立即将其添加到监控白名单中。这一步确保从进程启动的那一刻起就考虑对其进行监控,不会有延迟。 推送队列: 进程的PID(进程ID)被推送到一个监控确认队列中。这个队列保存了新检测到的进程的PID,这些进程等待来自用户空间程序的进一步确认。这种异步方法对立即检测和后续处理进行分离,优化了监控工作流程。 用户态程序确认: 用户空间程序从队列中检索进程PID,并评估每个进程是否应该继续被监控。如果一个进程被认为不必要进行监控,它将被从白名单中移除。  这个过程确保了Apache SkyWalking Rover可以根据实时条件和配置动态调整其监控范围,允许既全面覆盖又有效的资源监控。\n限制 像 HTTP/2.x 这样的有状态协议的监控目前仍然面临一些限制:\n 无法观察现有连接: 要监控完整的请求和响应周期,需要在建立任何连接之前启动监控。这个要求意味着在监控开始之前建立的连接无法被观察到。 TLS请求的挑战: 观察TLS加密流量是复杂的,因为它依赖于异步加载uprobes(用户空间加载)进行观察。如果在成功加载这些uprobes之前发出新的请求,那么在加密之前或解密之后访问数据就变得不可能。  演示 接下来,让我们快速演示Kubernetes监控功能,以便更具体地了解它的功能。\n部署 SkyWalking Showcase SkyWalking Showcase 包含完整的示例服务,并可以使用 SkyWalking 进行监视。有关详细信息,请查看官方文档。\n在此演示中,我们只部署服务、最新发布的 SkyWalking OAP,UI和Rover。\nexport FEATURE_FLAGS=java-agent-injector,single-node,elasticsearch,rover make deploy.kubernetes 部署完成后,请运行以下脚本以打开 SkyWalking UI:http://localhost:8080/ 。\nkubectl port-forward svc/ui 8080:8080 --namespace default 完成 一旦部署,Apache SkyWalking Rover在启动时会自动开始监控系统中的流量。然后,它将这些流量数据报告给SkyWalking OAP,并最终存储在数据库中。\n在Kubernetes中的服务仪表板中,您可以查看被监控的Kubernetes服务列表。如果其中任何服务具有HTTP流量,这些指标信息将在列表中显示。\n图 1: Kubernetes 服务列表\n此外,在拓扑图选项卡中,您可以观察相关服务之间的拓扑关系。在每个服务节点或服务之间调用关系中,将显示相关的TCP和HTTP指标。\n图 2: Kubernetes 服务拓扑图\n当您从服务列表中选择特定服务时,您可以查看所选服务在TCP和HTTP级别的服务指标。\n图 3: Kubernetes 服务 TCP 指标\n图 4: Kubernetes 服务 HTTP 指标\n此外,通过使用端点选项卡,您可以查看当前服务所访问的URI。\n图 5: Kubernetes 服务端点列表\n结论 在本文中,我详细介绍了如何利用eBPF技术对Kubernetes集群中的服务进行网络流量监控,这是Apache SkyWalking Rover中实现的一项功能。\n这项功能利用了eBPF的强大功能,提供了对网络流量和服务交互的深入洞察,增强了对整个集群的可观测性。\n","title":"使用 eBPF 监控 Kubernetes 网络流量","url":"/zh/2024-03-18-monitor-kubernetes-network-by-ebpf/"},{"content":"Background ClickHouse is an open-source column-oriented database management system that allows generating analytical data reports in real-time, so it is widely used for online analytical processing (OLAP).\nApache SkyWalking is an open-source APM system that provides monitoring, tracing and diagnosing capabilities for distributed systems in Cloud Native architectures. Increasingly, App Service architectures incorporate Skywalking as an essential monitoring component of a service or instance.\nBoth ClickHouse and Skywalking are popular frameworks, and it would be great to monitor your ClickHouse database through Skywalking. Next, let\u0026rsquo;s share how to monitor ClickHouse database with Skywalking.\nPrerequisites and configurations Make sure you\u0026rsquo;ve met the following prerequisites before you start onboarding your monitor.\nConfig steps:\n Exposing prometheus endpoint. Fetching ClickHouse metrics by OpenTelemetry. Exporting metrics to Skywalking OAP server.  Prerequisites for setup The monitoring for ClickHouse relies on the embedded prometheus endpoint of ClickHouse and will not be supported in previous versions starting from v20.1.2.4.\nYou can check the version of your server:\n:) select version(); SELECT version() Query id: 2d3773ca-c320-41f6-b2ac-7ebe37eddc58 ┌─version()───┐ │ 24.2.1.2248 │ └─────────────┘ If your ClickHouse version is earlier than v20.1.2.4, you need to set up ClickHouse-exporter to access data.\nExpose prometheus Endpoint The embedded prometheus endpoint will make it easy for data collection, you just need to open the required configuration in the core configuration file config.xml of ClickHouse. In addition to your original configuration, you only need to modify the configuration of Prometheus.\n/etc/clickhouse-server/config.xml:\n\u0026lt;clickhouse\u0026gt; ...... \u0026lt;prometheus\u0026gt; \u0026lt;endpoint\u0026gt;/metrics\u0026lt;/endpoint\u0026gt; \u0026lt;port\u0026gt;9363\u0026lt;/port\u0026gt; \u0026lt;metrics\u0026gt;true\u0026lt;/metrics\u0026gt; \u0026lt;events\u0026gt;true\u0026lt;/events\u0026gt; \u0026lt;asynchronous_metrics\u0026gt;true\u0026lt;/asynchronous_metrics\u0026gt; \u0026lt;errors\u0026gt;true\u0026lt;/errors\u0026gt; \u0026lt;/prometheus\u0026gt; \u0026lt;/clickhouse\u0026gt; Settings:\n endpoint – HTTP endpoint for scraping metrics by prometheus server. Start from ‘/’. port – Port for endpoint. metrics – Expose metrics from the system.metrics table. events – Expose metrics from the system.events table. asynchronous_metrics – Expose current metrics values from the system.asynchronous_metrics table. errors - Expose the number of errors by error codes occurred since the last server restart. This information could be obtained from the system.errors as well.  Save the config and restart the ClickHouse server.\nIt contains more than 1,000 metrics, covering services、networks、disk、MergeTree、errors and so on. For more details, after restarting the server, you can call curl 127.0.0.1:9363/metrics to know about the metrics.\nYou also can check the metrics by tables to make a contrast.\n:) select * from system.metrics limit 10 SELECT * FROM system.metrics LIMIT 10 Query id: af677622-960e-4589-b2ca-0b6a40c443aa ┌─metric───────────────────────────────┬─value─┬─description─────────────────────────────────────────────────────────────────────┐ │ Query │ 1 │ Number of executing queries │ │ Merge │ 0 │ Number of executing background merges │ │ Move │ 0 │ Number of currently executing moves │ │ PartMutation │ 0 │ Number of mutations (ALTER DELETE/UPDATE) │ │ ReplicatedFetch │ 0 │ Number of data parts being fetched from replica │ │ ReplicatedSend │ 0 │ Number of data parts being sent to replicas │ │ ReplicatedChecks │ 0 │ Number of data parts checking for consistency │ │ BackgroundMergesAndMutationsPoolTask │ 0 │ Number of active merges and mutations in an associated background pool │ │ BackgroundMergesAndMutationsPoolSize │ 64 │ Limit on number of active merges and mutations in an associated background pool │ │ BackgroundFetchesPoolTask │ 0 │ Number of active fetches in an associated background pool │ └──────────────────────────────────────┴───────┴─────────────────────────────────────────────────────────────────────────────────┘ :) select * from system.events limit 10; SELECT * FROM system.events LIMIT 10 Query id: 32c618d0-037a-400a-92a4-59fde832e4e2 ┌─event────────────────────────────┬──value─┬─description────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐ │ Query │ 7 │ Number of queries to be interpreted and potentially executed. Does not include queries that failed to parse or were rejected due to AST size limits, quota limits or limits on the number of simultaneously running queries. May include internal queries initiated by ClickHouse itself. Does not count subqueries. │ │ SelectQuery │ 7 │ Same as Query, but only for SELECT queries. │ │ InitialQuery │ 7 │ Same as Query, but only counts initial queries (see is_initial_query). │ │ QueriesWithSubqueries │ 40 │ Count queries with all subqueries │ │ SelectQueriesWithSubqueries │ 40 │ Count SELECT queries with all subqueries │ │ QueryTimeMicroseconds │ 202862 │ Total time of all queries. │ │ SelectQueryTimeMicroseconds │ 202862 │ Total time of SELECT queries. │ │ FileOpen │ 40473 │ Number of files opened. │ │ Seek │ 100 │ Number of times the \u0026#39;lseek\u0026#39; function was called. │ │ ReadBufferFromFileDescriptorRead │ 67995 │ Number of reads (read/pread) from a file descriptor. Does not include sockets. │ └──────────────────────────────────┴────────┴────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ Start up Opentelemetry-Collector Configure OpenTelemetry based on your own requirements. Following the example below:\notel-collector-config.yaml:\nreceivers: prometheus: config: scrape_configs: - job_name: \u0026#39;clickhouse-monitoring\u0026#39; scrape_interval: 15s static_configs: - targets: [\u0026#39;127.0.0.1:9363\u0026#39;,\u0026#39;127.0.0.1:9364\u0026#39;,\u0026#39;127.0.0.1:9365\u0026#39;] labels: host_name: prometheus-clickhouse processors: batch: exporters: otlp: endpoint: 127.0.0.1:11800 tls: insecure: true service: pipelines: metrics: receivers: - prometheus processors: - batch exporters: - otlp Please ensure:\n job_name: 'clickhouse-monitoring' that marked the data from ClickHouse, If modified, it will be ignored. host_name defines the service name, you have to make one. endpoint point to the oap server address. the network between ClickHouse, OpenTelemetry Collector, and Skywalking OAP Server must be accessible.  If goes well, refresh the Skywalking-ui home page in a few seconds and you can see ClickHouse under the database menu.\nsuccess log:\n2024-03-12T03:57:39.407Z\tinfo\tservice@v0.93.0/telemetry.go:76\tSetting up own telemetry... 2024-03-12T03:57:39.412Z\tinfo\tservice@v0.93.0/telemetry.go:146\tServing metrics\t{\u0026quot;address\u0026quot;: \u0026quot;:8888\u0026quot;, \u0026quot;level\u0026quot;: \u0026quot;Basic\u0026quot;} 2024-03-12T03:57:39.416Z\tinfo\tservice@v0.93.0/service.go:139\tStarting otelcol...\t{\u0026quot;Version\u0026quot;: \u0026quot;0.93.0\u0026quot;, \u0026quot;NumCPU\u0026quot;: 4} 2024-03-12T03:57:39.416Z\tinfo\textensions/extensions.go:34\tStarting extensions... 2024-03-12T03:57:39.423Z\tinfo\tprometheusreceiver@v0.93.0/metrics_receiver.go:240\tStarting discovery manager\t{\u0026quot;kind\u0026quot;: \u0026quot;receiver\u0026quot;, \u0026quot;name\u0026quot;: \u0026quot;prometheus\u0026quot;, \u0026quot;data_type\u0026quot;: \u0026quot;metrics\u0026quot;} 2024-03-12T03:57:59.431Z\tinfo\tprometheusreceiver@v0.93.0/metrics_receiver.go:231\tScrape job added\t{\u0026quot;kind\u0026quot;: \u0026quot;receiver\u0026quot;, \u0026quot;name\u0026quot;: \u0026quot;prometheus\u0026quot;, \u0026quot;data_type\u0026quot;: \u0026quot;metrics\u0026quot;, \u0026quot;jobName\u0026quot;: \u0026quot;clickhouse-monitoring\u0026quot;} 2024-03-12T03:57:59.431Z\tinfo\tservice@v0.93.0/service.go:165\tEverything is ready. Begin running and processing data. 2024-03-12T03:57:59.432Z\tinfo\tprometheusreceiver@v0.93.0/metrics_receiver.go:282\tStarting scrape manager\t{\u0026quot;kind\u0026quot;: \u0026quot;receiver\u0026quot;, \u0026quot;name\u0026quot;: \u0026quot;prometheus\u0026quot;, \u0026quot;data_type\u0026quot;: \u0026quot;metrics\u0026quot;} ClickHouse monitoring dashboard About the dashboard The dashboard includes the service dashboard and the instance dashboard.\nMetrics include servers, queries, networks, insertions, replicas, MergeTree, ZooKeeper and embedded ClickHouse Keeper.\nThe service dashboard displays the metrics of the entire cluster.\nThe instance dashboard displays the metrics of an instance.\nAbout the metrics Here are some meanings of ClickHouse Instance metrics, more here.\n   Monitoring Panel Unit Description Data Source     CpuUsage count CPU time spent seen by OS per second(according to ClickHouse.system.dashboard.CPU Usage (cores)). ClickHouse   MemoryUsage percentage Total amount of memory (bytes) allocated by the server/ total amount of OS memory. ClickHouse   MemoryAvailable percentage Total amount of memory (bytes) available for program / total amount of OS memory. ClickHouse   Uptime sec The server uptime in seconds. It includes the time spent for server initialization before accepting connections. ClickHouse   Version string Version of the server in a single integer number in base-1000. ClickHouse   FileOpen count Number of files opened. ClickHouse     metrics about ZooKeeper are valid when managing cluster by ZooKeeper metrics about embedded ClickHouse Keeper are valid when ClickHouse Keeper is enabled  References  ClickHouse prometheus endpoint ClickHouse built-in observability dashboard ClickHouse Keeper  ","title":"Monitoring Clickhouse Server through SkyWalking","url":"/blog/2024-03-12-monitoring-clickhouse-through-skywalking/"},{"content":"背景介绍 ClickHouse 是一个开源的面向列的数据库管理系统,可以实时生成分析数据报告,因此被广泛用于在线分析处理(OLAP)。\nApache SkyWalking 是一个开源的 APM 系统,为云原生架构中的分布式系统提供监控、跟踪和诊断能力。应用服务体系越来越多地将 Skywalking 作为服务或实例的基本监视组件。\nClickHouse 和 Skywalking 框架都是当下流行的服务组件,通过 Skywalking 监控您的 ClickHouse 数据库将是一个不错的选择。接下来,就来分享一下如何使用 Skywalking 监控 ClickHouse 数据库。\n前提与配置 在开始接入监控之前,请先确认以下前提条件。\n配置步骤:\n 暴露 Prometheus 端点。 通过 OpenTelemetry 拉取 ClickHouse 的指标数据。 将指标数据发送到 Skywalking OAP server.  使用的前提 ClickHouse 的监控依赖于 ClickHouse 的内嵌 Prometheus 端点配置,配置从 v20.1.2.4 开始支持,因此之前的老版本将无法支持。\n您可以检查 ClickHouse 服务的版本:\n:) select version(); SELECT version() Query id: 2d3773ca-c320-41f6-b2ac-7ebe37eddc58 ┌─version()───┐ │ 24.2.1.2248 │ └─────────────┘ 如果您的 ClickHouse 版本低于 v20.1.2.4,则需要依靠 ClickHouse-exporter 获取数据。\n暴露 Prometheus 端点 内嵌的 Prometheus 端点简化了数据采集流程,您只需要在 ClickHouse 的核心配置文件 config.xml 打开所需的配置即可。除了您原来的配置,您只需要参考如下修改 Prometheus 的配置。\n/etc/clickhouse-server/config.xml:\n\u0026lt;clickhouse\u0026gt; ...... \u0026lt;prometheus\u0026gt; \u0026lt;endpoint\u0026gt;/metrics\u0026lt;/endpoint\u0026gt; \u0026lt;port\u0026gt;9363\u0026lt;/port\u0026gt; \u0026lt;metrics\u0026gt;true\u0026lt;/metrics\u0026gt; \u0026lt;events\u0026gt;true\u0026lt;/events\u0026gt; \u0026lt;asynchronous_metrics\u0026gt;true\u0026lt;/asynchronous_metrics\u0026gt; \u0026lt;errors\u0026gt;true\u0026lt;/errors\u0026gt; \u0026lt;/prometheus\u0026gt; \u0026lt;/clickhouse\u0026gt; 配置说明:\n endpoint – 通过 prometheus 服务器抓取指标的 HTTP 端点。从/开始。 port – 端点的端口。 metrics – 暴露 system.metrics 表中的指标。 events – 暴露 system.events 表中的指标。 asynchronous_metrics – 暴露 system.asynchronous_metrics 表中的当前指标值。 errors - 按错误代码暴露自上次服务器重新启动以来发生的错误数。此信息也可以从 system.errors 中获得。  保存配置并重启 ClickHouse 服务。\n端点数据包含1000多个指标,涵盖服务、网络、磁盘、MergeTree、错误等。想了解更多指标细节,在重启服务后,可以调用 curl 127.0.0.1:9363/metrics 看到具体指标的内容。\n您还可以通过数据库表的数据与端点数据进行检查对比。\n:) select * from system.metrics limit 10 SELECT * FROM system.metrics LIMIT 10 Query id: af677622-960e-4589-b2ca-0b6a40c443aa ┌─metric───────────────────────────────┬─value─┬─description─────────────────────────────────────────────────────────────────────┐ │ Query │ 1 │ Number of executing queries │ │ Merge │ 0 │ Number of executing background merges │ │ Move │ 0 │ Number of currently executing moves │ │ PartMutation │ 0 │ Number of mutations (ALTER DELETE/UPDATE) │ │ ReplicatedFetch │ 0 │ Number of data parts being fetched from replica │ │ ReplicatedSend │ 0 │ Number of data parts being sent to replicas │ │ ReplicatedChecks │ 0 │ Number of data parts checking for consistency │ │ BackgroundMergesAndMutationsPoolTask │ 0 │ Number of active merges and mutations in an associated background pool │ │ BackgroundMergesAndMutationsPoolSize │ 64 │ Limit on number of active merges and mutations in an associated background pool │ │ BackgroundFetchesPoolTask │ 0 │ Number of active fetches in an associated background pool │ └──────────────────────────────────────┴───────┴─────────────────────────────────────────────────────────────────────────────────┘ :) select * from system.events limit 10; SELECT * FROM system.events LIMIT 10 Query id: 32c618d0-037a-400a-92a4-59fde832e4e2 ┌─event────────────────────────────┬──value─┬─description────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐ │ Query │ 7 │ Number of queries to be interpreted and potentially executed. Does not include queries that failed to parse or were rejected due to AST size limits, quota limits or limits on the number of simultaneously running queries. May include internal queries initiated by ClickHouse itself. Does not count subqueries. │ │ SelectQuery │ 7 │ Same as Query, but only for SELECT queries. │ │ InitialQuery │ 7 │ Same as Query, but only counts initial queries (see is_initial_query). │ │ QueriesWithSubqueries │ 40 │ Count queries with all subqueries │ │ SelectQueriesWithSubqueries │ 40 │ Count SELECT queries with all subqueries │ │ QueryTimeMicroseconds │ 202862 │ Total time of all queries. │ │ SelectQueryTimeMicroseconds │ 202862 │ Total time of SELECT queries. │ │ FileOpen │ 40473 │ Number of files opened. │ │ Seek │ 100 │ Number of times the \u0026#39;lseek\u0026#39; function was called. │ │ ReadBufferFromFileDescriptorRead │ 67995 │ Number of reads (read/pread) from a file descriptor. Does not include sockets. │ └──────────────────────────────────┴────────┴────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ 启动 Opentelemetry-Collector 根据自身环境 配置 OpenTelemetry。 您可参照下面的例子:\notel-collector-config.yaml:\nreceivers: prometheus: config: scrape_configs: - job_name: \u0026#39;clickhouse-monitoring\u0026#39; scrape_interval: 15s static_configs: - targets: [\u0026#39;127.0.0.1:9363\u0026#39;,\u0026#39;127.0.0.1:9364\u0026#39;,\u0026#39;127.0.0.1:9365\u0026#39;] labels: host_name: prometheus-clickhouse processors: batch: exporters: otlp: endpoint: 127.0.0.1:11800 tls: insecure: true service: pipelines: metrics: receivers: - prometheus processors: - batch exporters: - otlp 请着重关注:\n job_name: 'clickhouse-monitoring' 标记着来自 ClickHouse 的数据,如果自行修改,数据会被服务忽略。 host_name 定义服务的名称。 endpoint 指向您的 OAP 服务地址. ClickHouse、OpenTelemetry Collector 和 Skywalking OAP Server 之间的网络必须可访问。  如果进展顺利,几秒钟后刷新 Skywalking-ui 网页,您可以在数据库的菜单下看到 ClickHouse。\n启动成功日志样例:\n2024-03-12T03:57:39.407Z\tinfo\tservice@v0.93.0/telemetry.go:76\tSetting up own telemetry... 2024-03-12T03:57:39.412Z\tinfo\tservice@v0.93.0/telemetry.go:146\tServing metrics\t{\u0026quot;address\u0026quot;: \u0026quot;:8888\u0026quot;, \u0026quot;level\u0026quot;: \u0026quot;Basic\u0026quot;} 2024-03-12T03:57:39.416Z\tinfo\tservice@v0.93.0/service.go:139\tStarting otelcol...\t{\u0026quot;Version\u0026quot;: \u0026quot;0.93.0\u0026quot;, \u0026quot;NumCPU\u0026quot;: 4} 2024-03-12T03:57:39.416Z\tinfo\textensions/extensions.go:34\tStarting extensions... 2024-03-12T03:57:39.423Z\tinfo\tprometheusreceiver@v0.93.0/metrics_receiver.go:240\tStarting discovery manager\t{\u0026quot;kind\u0026quot;: \u0026quot;receiver\u0026quot;, \u0026quot;name\u0026quot;: \u0026quot;prometheus\u0026quot;, \u0026quot;data_type\u0026quot;: \u0026quot;metrics\u0026quot;} 2024-03-12T03:57:59.431Z\tinfo\tprometheusreceiver@v0.93.0/metrics_receiver.go:231\tScrape job added\t{\u0026quot;kind\u0026quot;: \u0026quot;receiver\u0026quot;, \u0026quot;name\u0026quot;: \u0026quot;prometheus\u0026quot;, \u0026quot;data_type\u0026quot;: \u0026quot;metrics\u0026quot;, \u0026quot;jobName\u0026quot;: \u0026quot;clickhouse-monitoring\u0026quot;} 2024-03-12T03:57:59.431Z\tinfo\tservice@v0.93.0/service.go:165\tEverything is ready. Begin running and processing data. 2024-03-12T03:57:59.432Z\tinfo\tprometheusreceiver@v0.93.0/metrics_receiver.go:282\tStarting scrape manager\t{\u0026quot;kind\u0026quot;: \u0026quot;receiver\u0026quot;, \u0026quot;name\u0026quot;: \u0026quot;prometheus\u0026quot;, \u0026quot;data_type\u0026quot;: \u0026quot;metrics\u0026quot;} ClickHouse 监控面板 关于面板 这个仪表盘包含服务仪表盘和实例仪表盘。\n指标涵盖服务器、查询、网络、插入、副本、MergeTree、ZooKeeper 和内嵌 ClickHouse Keeper。\n服务仪表盘主要展示整个集群相关的指标。\n实例仪表盘主要展示单个实例相关的指标。\n关于指标 以下是ClickHouse实例指标的一些含义,前往了解完整的指标列表。\n   面板名称 单位 指标含义 数据源     CpuUsage count 操作系统每秒花费的 CPU 时间(根据 ClickHouse.system.dashboard.CPU 使用率(核心数))。 ClickHouse   MemoryUsage percentage 服务器分配的内存总量(字节)/操作系统内存总量。 ClickHouse   MemoryAvailable percentage 可用于程序的内存总量(字节)/操作系统内存总量。 ClickHouse   Uptime sec 服务器正常运行时间(以秒为单位)。它包括在接受连接之前进行服务器初始化所花费的时间。 ClickHouse   Version string 以 base-1000 样式展示的服务器版本。 ClickHouse   FileOpen count 打开的文件数。 ClickHouse     ZooKeeper 的指标在 ZooKeeper 管理集群时有效。 内嵌ClickHouse Keeper的指标在开启内嵌 ClickHouse Keeper 配置时有效。  参考文档  ClickHouse prometheus endpoint ClickHouse built-in observability dashboard ClickHouse Keeper  ","title":"使用 SkyWalking 监控 ClickHouse Server","url":"/zh/2024-03-12-monitoring-clickhouse-through-skywalking/"},{"content":"背景介绍 Apache RocketMQ 是一个开源的低延迟、高并发、高可用、高可靠的分布式消息中间件, 从SkyWalking OAP 10.0 版本开始, 新增了 对 RocketMQ Server的监控面板。本文将展示并介绍如何使用 Skywalking来监控RocketMQ\n部署 流程 通过RocketMQ官方提供的RocketMQ exporter来采集RocketMQ Server数据,再通过opentelmetry-collector来拉取RocketMQ exporter并传输到skywalking oap服务来处理\nDataFlow: 准备  Skywalking oap服务,v10.0 + RocketMQ v4.3.2 + RocketMQ exporter v0.0.2+ Opentelmetry-collector v0.87+  启动顺序  启动 RocketMQ namesrv 和 broker 启动 skywalking oap 和 ui 启动 RocketMQ exporter 启动 opentelmetry-collector  具体如何启动和配置请参考以上链接中官方教程.\n需要注意下的是 opentelmetry-collector 的配置文件.\njob_name: \u0026quot;rocketmq-monitoring\u0026quot; 请不要修改,否则 skywalking 不会处理这部分数据.\nrocketmq-exporter 替换成RocketMQ exporter 的地址.\nreplacement: rocketmq-cluster 中的rocketmq-cluster如果想要使用下文介绍的服务分层功能,请自行定义为其他服务层级相匹配的名称.\noap 为 skywalking oap 地址,请自行替换.\nreceivers: prometheus: config: scrape_configs: - job_name: \u0026quot;rocketmq-monitoring\u0026quot; scrape_interval: 30s static_configs: - targets: ['rocketmq-exporter:5557'] relabel_configs: - source_labels: [ ] target_label: cluster replacement: rocketmq-cluster exporters: otlp: endpoint: oap:11800 tls: insecure: true processors: batch: service: pipelines: metrics: receivers: - prometheus processors: - batch exporters: - otlp 监控指标 指标分为 三个维度, cluster,broker,topic\ncluster监控 cluster 主要是站在集群的角度来统计展示,比如\nMessages Produced Today 今日集群产生的消息数\nMax CommitLog Disk Ratio 展示集群中磁盘使用率最高的broker\nTotal Producer Tps 集群生产者tps\nbroker 监控 broker 主要是站在节点的角度来统计展示,比如\nProduce Tps 节点生产者tps\nProducer Message Size(MB)节点生产消息大小\ntopic 监控 topic 主要是站在主题的角度来统计展示,比如\nConsumer Group Count 消费该主题的消费者组个数\nConsumer Latency(s) 消费者组的消费延时时间\nBacklogged Messages 消费者组消费消息堆积\n注意:topic 维度是整个 topic 来聚合,并不是在一个 broker 上的 topic 聚合,在 dashboard 上你也可以看到 broker 跟 topic 是平级的。\n各个指标的含义可以在图标的 tip 上找到解释\n更多指标可以参考文档\ndemo 已经在 skywalking showcase 上线,可以在上面看到展示效果\n服务分层 skywalking 10 新增了重要功能Service Hierarchy,接收来自不同层级的服务数据,比如 java agent 上报,k8s 监控数据或者 otel 的监控数据. 根据设置规则如果发现这些服务名称符合匹配规则,则可以将这些不同层级的服务联系起来。\n如下图所示:\nskywalking 采集部署在 k8s 的 RocketMQ 服务端的k8s 数据,并接收来自 otel 的 RocketMQ 服务端监控数据,根据匹配规则这些服务具有相同的服务名称,则可以在 ui 上观察到它们的联系\n","title":"使用 SkyWalking 监控 RocketMQ Server","url":"/zh/2024-02-29-rocketmq-monitoring-by-skywalking/"},{"content":"SkyWalking Go 0.4.0 is released. Go to downloads page to find release tars.\nFeatures  Add support ignore suffix for span name. Adding go 1.21 and 1.22 in docker image.  Plugins  Support setting a discard type of reporter. Add redis.max_args_bytes parameter for redis plugin. Changing intercept point for gin, make sure interfaces could be grouped when params defined in relativePath. Support RocketMQ MQ. Support AMQP MQ. support Echov4 framework.  Documentation Bug Fixes  Fix users can not use async api in toolkit-trace. Fix cannot enhance the vendor management project. Fix SW_AGENT_REPORTER_GRPC_MAX_SEND_QUEUE not working on metricsSendCh \u0026amp; logSendCh chans of gRPC reporter. Fix ParseVendorModule error for special case in vendor/modules.txt. Fix enhance method error when unknown parameter type. Fix wrong tracing context when trace have been sampled. Fix enhance param error when there are multiple params. Fix lost trace when multi middleware handlerFunc in gin plugin. Fix DBQueryContext execute error in sql plugin. Fix stack overflow as endless logs triggered.  Issues and PR  All issues are here All and pull requests are here  ","title":"Release Apache SkyWalking Go 0.4.0","url":"/events/release-apache-skwaylking-go-0.4.0/"},{"content":"背景介绍 在 Scala 中,纯函数式中主要使用 Fiber,而不是线程,诸如 Cats-Effect、ZIO 等 Effect 框架。 您可以将 Fiber 视为轻量级线程,它是一种并发模型,由框架本身掌控控制权,从而消除了上下文切换的开销。 基于这些 Effect 框架开发的 HTTP、gRCP、GraphQL 库而开发的应用,我们一般称为 纯函数式应用程序。\n我们以 ZIO 为切入点, 演示 SkyWalking Scala 如何支持 Effect 生态。\nZIO Trace 首先,我们想要实现 Fiber 上下文传递,而不是监控 Fiber 本身。对于一个大型应用来说,可能存在成千上万个 Fiber,监控 Fiber 本身的意义不大。\n虽然 Fiber 的 Span 是在活跃时才会创建,但难免会有目前遗漏的场景,所以提供了一个配置 plugin.ziov2.ignore_fiber_regexes。 它将使用正则去匹配 Fiber location,匹配上的 Fiber 将不会创建 Span。\nFiber Span的信息如下:\n下面是我们使用本 ZIO 插件,和一些官方插件(hikaricp、jdbc、pulsar)完成的 Trace:\n分析 在 ZIO 中,Fiber可以有两种方式被调度,它们都是 zio.Executor 的子类。当然您也可以使用自己的线程池,这样也需被 ZIO 包装,其实就类似下面的 blockingExecutor。\nabstract class Executor extends ExecutorPlatformSpecific { self =\u0026gt; def submit(runnable: Runnable)(implicit unsafe: Unsafe): Boolean } 一种是系统默认线程池 defaultExecutor:\nprivate[zio] trait RuntimePlatformSpecific { final val defaultExecutor: Executor = Executor.makeDefault() } 另一种是专用于阻塞 IO 的线程池 blockingExecutor:\nprivate[zio] trait RuntimePlatformSpecific { final val defaultBlockingExecutor: Executor = Blocking.blockingExecutor } 默认线程池 defaultExecutor 对于 defaultExecutor,其本身是很复杂的,但它就是一个 ZIO 的 Fiber 调度(执行)器:\n/** * A `ZScheduler` is an `Executor` that is optimized for running ZIO * applications. Inspired by \u0026#34;Making the Tokio Scheduler 10X Faster\u0026#34; by Carl * Lerche. [[https://tokio.rs/blog/2019-10-scheduler]] */ private final class ZScheduler extends Executor 由于它们都是 zio.Executor 的子类,我们只需要对其及其子类进行增强:\nfinal val ENHANCE_CLASS = LogicalMatchOperation.or( HierarchyMatch.byHierarchyMatch(\u0026#34;zio.Executor\u0026#34;), MultiClassNameMatch.byMultiClassMatch(\u0026#34;zio.Executor\u0026#34;) ) 它们都是线程池,我们只需要在 zio.Executor 的 submit 方法上进行类似 ThreadPoolExecutor 上下文捕获的操作,可以参考 jdk-threadpool-plugin\n这里需要注意,因为 Fiber 也是一种 Runnable:\nprivate[zio] trait FiberRunnable extends Runnable { def location: Trace def run(depth: Int): Unit } zio-v2x-plugin\n阻塞线程池 blockingExecutor 对于 blockingExecutor,其实它只是对 Java 线程池进行了一个包装:\nobject Blocking { val blockingExecutor: zio.Executor = zio.Executor.fromThreadPoolExecutor { val corePoolSize = 0 val maxPoolSize = Int.MaxValue val keepAliveTime = 60000L val timeUnit = TimeUnit.MILLISECONDS val workQueue = new SynchronousQueue[Runnable]() val threadFactory = new NamedThreadFactory(\u0026#34;zio-default-blocking\u0026#34;, true) val threadPool = new ThreadPoolExecutor( corePoolSize, maxPoolSize, keepAliveTime, timeUnit, workQueue, threadFactory ) threadPool } } 由于其本身是对 ThreadPoolExecutor 的封装,所以,当我们已经实现了 zio.Executor 的增强后,只需要使用官方 jdk-threadpool-plugin 插件即可。 这里我们还想要对代码进行定制修改和复用,所以重新使用 Scala 实现了一个 executors-plugin 插件。\n串连 Fiber 上下文 最后,上面谈到过,Fiber 也是一种 Runnable,因此还需要对 zio.internal.FiberRunnable 进行增强。大致分为两点,其实与 jdk-threading-plugin 是一样的。\n 每次创建 zio.internal.FiberRunnable 实例时,都需要保存 现场,即构造函数增强。 每次运行时创建一个过渡的 Span,将当前线程上下文与之前保存在构造函数中的上下文进行关联。Fiber 可能被不同线程执行,所以这是必须的。  zio-v2x-plugin\n说明 当我们完成了对 ZIO Fiber 的上下文传播处理后,任意基于 ZIO 的应用层框架都可以按照普通的 Java 插件思路去开发。 我们只需要找到一个全局切入点,这个切入点应该是每个请求都会调用的方法,然后对这个方法进行增强。\n要想激活插件,只需要在 Release Notes 下载插件,放到您的 skywalking-agent/plugins 目录,重新启动服务即可。\n如果您的项目使用 sbt assembly 打包,您可以参考这个 示例。该项目使用了下列技术栈:\nlibraryDependencies ++= Seq( \u0026#34;io.d11\u0026#34; %% \u0026#34;zhttp\u0026#34; % zioHttp2Version, \u0026#34;dev.zio\u0026#34; %% \u0026#34;zio\u0026#34; % zioVersion, \u0026#34;io.grpc\u0026#34; % \u0026#34;grpc-netty\u0026#34; % \u0026#34;1.50.1\u0026#34;, \u0026#34;com.thesamet.scalapb\u0026#34; %% \u0026#34;scalapb-runtime-grpc\u0026#34; % scalapb.compiler.Version.scalapbVersion ) ++ Seq( \u0026#34;dev.profunktor\u0026#34; %% \u0026#34;redis4cats-effects\u0026#34; % \u0026#34;1.3.0\u0026#34;, \u0026#34;dev.profunktor\u0026#34; %% \u0026#34;redis4cats-log4cats\u0026#34; % \u0026#34;1.3.0\u0026#34;, \u0026#34;dev.profunktor\u0026#34; %% \u0026#34;redis4cats-streams\u0026#34; % \u0026#34;1.3.0\u0026#34;, \u0026#34;org.typelevel\u0026#34; %% \u0026#34;log4cats-slf4j\u0026#34; % \u0026#34;2.5.0\u0026#34;, \u0026#34;dev.zio\u0026#34; %% \u0026#34;zio-interop-cats\u0026#34; % \u0026#34;23.0.03\u0026#34;, \u0026#34;ch.qos.logback\u0026#34; % \u0026#34;logback-classic\u0026#34; % \u0026#34;1.2.11\u0026#34;, \u0026#34;dev.zio\u0026#34; %% \u0026#34;zio-cache\u0026#34; % zioCacheVersion ) ","title":"SkyWalking 如何支持 ZIO 等 Scala Effect Runtime","url":"/zh/2024-01-04-skywalking-for-scala-effect-runtime/"},{"content":"Xiang Wei(GitHub ID, weixiang1862) made a lot of significant contributions to SkyWalking since 2023. He made dozens of pull requests to multiple SkyWalking repositories, including very important features, such as Loki LogQL support, Nginx monitoring, MongoDB monitoring, as well as bug fixes, blog posts, and showcase updates.\nHere are the complete pull request list grouped by repositories.\nskywalking  Support Nginx monitoring. (https://github.com/apache/skywalking/pull/11558) Fix JDBC Log query order. (https://github.com/apache/skywalking/pull/11544) Isolate MAL CounterWindow cache by metric name.(https://github.com/apache/skywalking/pull/11526) Support extract timestamp from patterned datetime string in LAL.(https://github.com/apache/skywalking/pull/11489) Adjust AlarmRecord alarmMessage column length to 512. (https://github.com/apache/skywalking/pull/11404) Use listening mode for Apollo configuration.(https://github.com/apache/skywalking/pull/11186) Support LogQL HTTP query APIs. (https://github.com/apache/skywalking/pull/11168) Support MongoDB monitoring (https://github.com/apache/skywalking/pull/11111) Support reduce aggregate function in MQE.(https://github.com/apache/skywalking/pull/11036) Fix instance query in JDBC implementation.(https://github.com/apache/skywalking/pull/11024) Fix metric session cache saving after batch insert when using mysql-connector-java.(https://github.com/apache/skywalking/pull/11012) Add component ID for WebSphere.(https://github.com/apache/skywalking/pull/10974) Support sumLabeled in MAL (https://github.com/apache/skywalking/pull/10916)  skywalking-java  Optimize plugin selector logic.(https://github.com/apache/skywalking-java/pull/651) Fix config length limitation.(https://github.com/apache/skywalking-java/pull/623) Optimize spring-cloud-gateway 2.1.x, 3.x witness class.(https://github.com/apache/skywalking-java/pull/610) Add WebSphere Liberty 23.x plugin.(https://github.com/apache/skywalking-java/pull/560)  skywalking-swck  Remove SwAgent default env JAVA_TOOL_OPTIONS.(https://github.com/apache/skywalking-swck/pull/106) Fix panic in storage reconciler.(https://github.com/apache/skywalking-swck/pull/94) Support inject java agent bootstrap-plugins.(https://github.com/apache/skywalking-swck/pull/91) Fix number env value format error in template yaml.(https://github.com/apache/skywalking-swck/pull/90)  skywalking-showcase  Nginx monitoring showcase.(https://github.com/apache/skywalking-showcase/pull/153) LogQL showcase. (https://github.com/apache/skywalking-showcase/pull/146) MongoDB monitoring showcase. (https://github.com/apache/skywalking-showcase/pull/144)##  skywalking-website  Add blog: monitoring-nginx-by-skywalking.(https://github.com/apache/skywalking-website/pull/666) Add blog: collect and analyse nginx access log by LAL.(https://github.com/apache/skywalking-website/pull/652) Add blog: integrating-skywalking-with-arthas.(https://github.com/apache/skywalking-website/pull/641)   At Dec. 28th, 2023, the project management committee (PMC) passed the proposal of promoting him as a new committer. He has accepted the invitation at the same day.\nWelcome to join the committer team, Xiang Wei! We are honored to have you in the team.\n","title":"Welcome Xiang Wei as new committer","url":"/events/welcome-xiang-wei-as-new-committer/"},{"content":"Background Apache SkyWalking is an open-source application performance management system that helps users collect and aggregate logs, traces, metrics, and events, and display them on the UI.\nIn order to achieve monitoring capabilities for Nginx, we have introduced the Nginx monitoring dashboard in SkyWalking 9.7, and this article will demonstrate the use of this monitoring dashboard and introduce the meaning of related metrics.\nSetup Monitoring Dashboard Metric Define and Collection Since nginx-lua-prometheus is used to define and expose metrics, we need to install lua_nginx_module for Nginx, or use OpenResty directly.\nIn the following example, we define four metrics via nginx-lua-prometheus and expose the metrics interface via nginx ip:9145/metrics:\n histogram: nginx_http_latency,monitoring http latency gauge: nginx_http_connections,monitoring nginx http connections counter: nginx_http_size_bytes,monitoring http size of request and response counter: nginx_http_requests_total,monitoring total http request numbers  http { log_format main '$remote_addr - $remote_user [$time_local] \u0026quot;$request\u0026quot; ' '$status $body_bytes_sent \u0026quot;$http_referer\u0026quot; ' '\u0026quot;$http_user_agent\u0026quot; \u0026quot;$http_x_forwarded_for\u0026quot;'; access_log /var/log/nginx/access.log main; lua_shared_dict prometheus_metrics 10M; # lua_package_path \u0026quot;/path/to/nginx-lua-prometheus/?.lua;;\u0026quot;; init_worker_by_lua_block { prometheus = require(\u0026quot;prometheus\u0026quot;).init(\u0026quot;prometheus_metrics\u0026quot;) metric_bytes = prometheus:counter( \u0026quot;nginx_http_size_bytes\u0026quot;, \u0026quot;Total size of HTTP\u0026quot;, {\u0026quot;type\u0026quot;, \u0026quot;route\u0026quot;}) metric_requests = prometheus:counter( \u0026quot;nginx_http_requests_total\u0026quot;, \u0026quot;Number of HTTP requests\u0026quot;, {\u0026quot;status\u0026quot;, \u0026quot;route\u0026quot;}) metric_latency = prometheus:histogram( \u0026quot;nginx_http_latency\u0026quot;, \u0026quot;HTTP request latency\u0026quot;, {\u0026quot;route\u0026quot;}) metric_connections = prometheus:gauge( \u0026quot;nginx_http_connections\u0026quot;, \u0026quot;Number of HTTP connections\u0026quot;, {\u0026quot;state\u0026quot;}) } server { listen 8080; location /test { default_type application/json; return 200 '{\u0026quot;code\u0026quot;: 200, \u0026quot;message\u0026quot;: \u0026quot;success\u0026quot;}'; log_by_lua_block { metric_bytes:inc(tonumber(ngx.var.request_length), {\u0026quot;request\u0026quot;, \u0026quot;/test/**\u0026quot;}) metric_bytes:inc(tonumber(ngx.var.bytes_send), {\u0026quot;response\u0026quot;, \u0026quot;/test/**\u0026quot;}) metric_requests:inc(1, {ngx.var.status, \u0026quot;/test/**\u0026quot;}) metric_latency:observe(tonumber(ngx.var.request_time), {\u0026quot;/test/**\u0026quot;}) } } } server { listen 9145; location /metrics { content_by_lua_block { metric_connections:set(ngx.var.connections_reading, {\u0026quot;reading\u0026quot;}) metric_connections:set(ngx.var.connections_waiting, {\u0026quot;waiting\u0026quot;}) metric_connections:set(ngx.var.connections_writing, {\u0026quot;writing\u0026quot;}) prometheus:collect() } } } } In the above example, we exposed the route-level metrics, and you can also choose to expose the host-level metrics according to the monitoring granularity:\nhttp { log_by_lua_block { metric_bytes:inc(tonumber(ngx.var.request_length), {\u0026quot;request\u0026quot;, ngx.var.host}) metric_bytes:inc(tonumber(ngx.var.bytes_send), {\u0026quot;response\u0026quot;, ngx.var.host}) metric_requests:inc(1, {ngx.var.status, ngx.var.host}) metric_latency:observe(tonumber(ngx.var.request_time), {ngx.var.host}) } } or upstream-level metrics:\nupstream backend { server ip:port; } server { location /test_upstream { proxy_pass http://backend; log_by_lua_block { metric_bytes:inc(tonumber(ngx.var.request_length), {\u0026quot;request\u0026quot;, \u0026quot;upstream/backend\u0026quot;}) metric_bytes:inc(tonumber(ngx.var.bytes_send), {\u0026quot;response\u0026quot;, \u0026quot;upstream/backend\u0026quot;}) metric_requests:inc(1, {ngx.var.status, \u0026quot;upstream/backend\u0026quot;}) metric_latency:observe(tonumber(ngx.var.request_time), {\u0026quot;upstream/backend\u0026quot;}) } } } After defining the metrics, we start nginx and opentelemetry-collector to collect the metrics and send them to the SkyWalking backend for analysis and storage.\nPlease ensure that job_name: 'nginx-monitoring', otherwise the reported data will be ignored by SkyWalking. If you have multiple Nginx instances, you can distinguish them using the service and service_instance_id labels:\nreceivers: prometheus: config: scrape_configs: - job_name: 'nginx-monitoring' scrape_interval: 5s metrics_path: \u0026quot;/metrics\u0026quot; static_configs: - targets: ['nginx:9145'] labels: service: nginx service_instance_id: nginx-instance processors: batch: exporters: otlp: endpoint: oap:11800 tls: insecure: true service: pipelines: metrics: receivers: - prometheus processors: - batch exporters: - otlp If everything goes well, you will see the metric data reported by Nginx under the gateway menu of the skywalking-ui:\nAccess \u0026amp; Error Log Collection SkyWalking Nginx monitoring provides log collection and error log analysis. We can use fluent-bit to collect and report access logs and error logs to SkyWalking for analysis and storage.\nFluent-bit configuration below defines the log collection directory as /var/log/nginx/. The access and error logs will be reported through rest port 12800 of oap after being processed by rewrite_access_log and rewrite_error_log functions:\n[SERVICE] Flush 5 Daemon Off Log_Level warn [INPUT] Name tail Tag access Path /var/log/nginx/access.log [INPUT] Name tail Tag error Path /var/log/nginx/error.log [FILTER] Name lua Match access Script fluent-bit-script.lua Call rewrite_access_log [FILTER] Name lua Match error Script fluent-bit-script.lua Call rewrite_error_log [OUTPUT] Name stdout Match * Format json [OUTPUT] Name http Match * Host oap Port 12800 URI /v3/logs Format json In the fluent-bit-script.lua, we use LOG_KIND tag to distinguish between access logs and error logs.\nTo associate with the metrics, please ensure that the values of service and serviceInstance are consistent with the metric collection definition in the previous section.\nfunction rewrite_access_log(tag, timestamp, record) local newRecord = {} newRecord[\u0026quot;layer\u0026quot;] = \u0026quot;NGINX\u0026quot; newRecord[\u0026quot;service\u0026quot;] = \u0026quot;nginx::nginx\u0026quot; newRecord[\u0026quot;serviceInstance\u0026quot;] = \u0026quot;nginx-instance\u0026quot; newRecord[\u0026quot;body\u0026quot;] = { text = { text = record.log } } newRecord[\u0026quot;tags\u0026quot;] = { data = {{ key = \u0026quot;LOG_KIND\u0026quot;, value = \u0026quot;NGINX_ACCESS_LOG\u0026quot;}}} return 1, timestamp, newRecord end function rewrite_error_log(tag, timestamp, record) local newRecord = {} newRecord[\u0026quot;layer\u0026quot;] = \u0026quot;NGINX\u0026quot; newRecord[\u0026quot;service\u0026quot;] = \u0026quot;nginx::nginx\u0026quot; newRecord[\u0026quot;serviceInstance\u0026quot;] = \u0026quot;nginx-instance\u0026quot; newRecord[\u0026quot;body\u0026quot;] = { text = { text = record.log } } newRecord[\u0026quot;tags\u0026quot;] = { data = {{ key = \u0026quot;LOG_KIND\u0026quot;, value = \u0026quot;NGINX_ERROR_LOG\u0026quot; }}} return 1, timestamp, newRecord end After starting fluent-it, we can see the collected log information in the Log tab of the monitoring panel:\nMeaning of Metrics    Metric Name Unit Description Data Source     HTTP Request Trend  The increment rate of HTTP requests nginx-lua-prometheus   HTTP Latency ms The increment rate of the latency of HTTP requests nginx-lua-prometheus   HTTP Bandwidth KB The increment rate of the bandwidth of HTTP requests nginx-lua-prometheus   HTTP Connections  The avg number of the connections nginx-lua-prometheus   HTTP Status Trend % The increment rate of the status of HTTP requests nginx-lua-prometheus   HTTP Status 4xx Percent % The percentage of 4xx status of HTTP requests nginx-lua-prometheus   HTTP Status 5xx Percent % The percentage of 4xx status of HTTP requests nginx-lua-prometheus   Error Log Count  The count of log level of nginx error.log fluent-bit    References  nginx-lua-prometheus fluent-bit-lua-filter skywalking-apisix-monitoring  ","title":"Monitoring Nginx with SkyWalking","url":"/blog/2023-12-23-monitoring-nginx-by-skywalking/"},{"content":"背景介绍 在前面的 Blog 使用 LAL 收集并分析 Nginx access log 中,我们以 Nginx access log 为切入点, 演示了 SkyWalking LAL 的日志分析能力。\n为了实现对 Nginx 更全面的监控能力,我们在 SkyWalking 9.7 中引入了 Nginx 监控面板,本文将演示该监控面板的使用,并介绍相关指标的含义。\n监控面板接入 Metric 定义与采集 由于使用了 nginx-lua-prometheus 来定义及暴露指标, 我们需要为 Nginx 安装 lua_nginx_module, 或者直接使用OpenResty。\n下面的例子中,我们通过 nginx-lua-prometheus 定义了四个指标,并通过 ip:9145/metrics 暴露指标接口:\n histogram: nginx_http_latency,监控 http 延时 gauge: nginx_http_connections,监控 http 连接数 counter: nginx_http_size_bytes,监控 http 请求和响应大小 counter: nginx_http_requests_total,监控 http 请求次数  http { log_format main '$remote_addr - $remote_user [$time_local] \u0026quot;$request\u0026quot; ' '$status $body_bytes_sent \u0026quot;$http_referer\u0026quot; ' '\u0026quot;$http_user_agent\u0026quot; \u0026quot;$http_x_forwarded_for\u0026quot;'; access_log /var/log/nginx/access.log main; lua_shared_dict prometheus_metrics 10M; # lua_package_path \u0026quot;/path/to/nginx-lua-prometheus/?.lua;;\u0026quot;; init_worker_by_lua_block { prometheus = require(\u0026quot;prometheus\u0026quot;).init(\u0026quot;prometheus_metrics\u0026quot;) metric_bytes = prometheus:counter( \u0026quot;nginx_http_size_bytes\u0026quot;, \u0026quot;Total size of HTTP\u0026quot;, {\u0026quot;type\u0026quot;, \u0026quot;route\u0026quot;}) metric_requests = prometheus:counter( \u0026quot;nginx_http_requests_total\u0026quot;, \u0026quot;Number of HTTP requests\u0026quot;, {\u0026quot;status\u0026quot;, \u0026quot;route\u0026quot;}) metric_latency = prometheus:histogram( \u0026quot;nginx_http_latency\u0026quot;, \u0026quot;HTTP request latency\u0026quot;, {\u0026quot;route\u0026quot;}) metric_connections = prometheus:gauge( \u0026quot;nginx_http_connections\u0026quot;, \u0026quot;Number of HTTP connections\u0026quot;, {\u0026quot;state\u0026quot;}) } server { listen 8080; location /test { default_type application/json; return 200 '{\u0026quot;code\u0026quot;: 200, \u0026quot;message\u0026quot;: \u0026quot;success\u0026quot;}'; log_by_lua_block { metric_bytes:inc(tonumber(ngx.var.request_length), {\u0026quot;request\u0026quot;, \u0026quot;/test/**\u0026quot;}) metric_bytes:inc(tonumber(ngx.var.bytes_send), {\u0026quot;response\u0026quot;, \u0026quot;/test/**\u0026quot;}) metric_requests:inc(1, {ngx.var.status, \u0026quot;/test/**\u0026quot;}) metric_latency:observe(tonumber(ngx.var.request_time), {\u0026quot;/test/**\u0026quot;}) } } } server { listen 9145; location /metrics { content_by_lua_block { metric_connections:set(ngx.var.connections_reading, {\u0026quot;reading\u0026quot;}) metric_connections:set(ngx.var.connections_waiting, {\u0026quot;waiting\u0026quot;}) metric_connections:set(ngx.var.connections_writing, {\u0026quot;writing\u0026quot;}) prometheus:collect() } } } } 上面的例子中,我们暴露了 route 级别的指标,你也可以根据监控粒度的需要,选择暴露 host 指标:\nhttp { log_by_lua_block { metric_bytes:inc(tonumber(ngx.var.request_length), {\u0026quot;request\u0026quot;, ngx.var.host}) metric_bytes:inc(tonumber(ngx.var.bytes_send), {\u0026quot;response\u0026quot;, ngx.var.host}) metric_requests:inc(1, {ngx.var.status, ngx.var.host}) metric_latency:observe(tonumber(ngx.var.request_time), {ngx.var.host}) } } 或者 upstream 指标:\nupstream backend { server ip:port; } server { location /test_upstream { proxy_pass http://backend; log_by_lua_block { metric_bytes:inc(tonumber(ngx.var.request_length), {\u0026quot;request\u0026quot;, \u0026quot;upstream/backend\u0026quot;}) metric_bytes:inc(tonumber(ngx.var.bytes_send), {\u0026quot;response\u0026quot;, \u0026quot;upstream/backend\u0026quot;}) metric_requests:inc(1, {ngx.var.status, \u0026quot;upstream/backend\u0026quot;}) metric_latency:observe(tonumber(ngx.var.request_time), {\u0026quot;upstream/backend\u0026quot;}) } } } 完成指标定义后,我们启动 nginx 和 opentelemetry-collector,将指标采集到 SkyWalking 后端进行分析和存储。\n请确保job_name: 'nginx-monitoring',否则上报的数据将被 SkyWalking 忽略。如果你有多个 Nginx 实例,你可以通过service及service_instance_id这两个 label 进行区分:\nreceivers: prometheus: config: scrape_configs: - job_name: 'nginx-monitoring' scrape_interval: 5s metrics_path: \u0026quot;/metrics\u0026quot; static_configs: - targets: ['nginx:9145'] labels: service: nginx service_instance_id: nginx-instance processors: batch: exporters: otlp: endpoint: oap:11800 tls: insecure: true service: pipelines: metrics: receivers: - prometheus processors: - batch exporters: - otlp 如果一切顺利,你将在 skywalking-ui 的网关菜单下看到 nginx 上报的指标数据:\nAccess \u0026amp; Error Log 采集 SkyWalking Nginx 监控提供了日志采集及错误日志统计功能,我们可以借助 fluent-bit 采集并上报 access log、error log 给 SkyWalking 分析存储。\n下面 fluent-bit 配置定义了日志采集目录为/var/log/nginx/,access 和 error log 经过 rewrite_access_log 和 rewrite_error_log 处理后会通过 oap 12800 端口进行上报:\n[SERVICE] Flush 5 Daemon Off Log_Level warn [INPUT] Name tail Tag access Path /var/log/nginx/access.log [INPUT] Name tail Tag error Path /var/log/nginx/error.log [FILTER] Name lua Match access Script fluent-bit-script.lua Call rewrite_access_log [FILTER] Name lua Match error Script fluent-bit-script.lua Call rewrite_error_log [OUTPUT] Name stdout Match * Format json [OUTPUT] Name http Match * Host oap Port 12800 URI /v3/logs Format json 在 fluent-bit-script.lua 中,我们通过 LOG_KIND 来区分 access log 和 error log。\n为了能够关联上文采集的 metric,请确保 service 和 serviceInstance 值与上文中指标采集定义一致。\nfunction rewrite_access_log(tag, timestamp, record) local newRecord = {} newRecord[\u0026quot;layer\u0026quot;] = \u0026quot;NGINX\u0026quot; newRecord[\u0026quot;service\u0026quot;] = \u0026quot;nginx::nginx\u0026quot; newRecord[\u0026quot;serviceInstance\u0026quot;] = \u0026quot;nginx-instance\u0026quot; newRecord[\u0026quot;body\u0026quot;] = { text = { text = record.log } } newRecord[\u0026quot;tags\u0026quot;] = { data = {{ key = \u0026quot;LOG_KIND\u0026quot;, value = \u0026quot;NGINX_ACCESS_LOG\u0026quot;}}} return 1, timestamp, newRecord end function rewrite_error_log(tag, timestamp, record) local newRecord = {} newRecord[\u0026quot;layer\u0026quot;] = \u0026quot;NGINX\u0026quot; newRecord[\u0026quot;service\u0026quot;] = \u0026quot;nginx::nginx\u0026quot; newRecord[\u0026quot;serviceInstance\u0026quot;] = \u0026quot;nginx-instance\u0026quot; newRecord[\u0026quot;body\u0026quot;] = { text = { text = record.log } } newRecord[\u0026quot;tags\u0026quot;] = { data = {{ key = \u0026quot;LOG_KIND\u0026quot;, value = \u0026quot;NGINX_ERROR_LOG\u0026quot; }}} return 1, timestamp, newRecord end 启动 fluent-it 后,我们便可以在监控面板的 Log tab 看到采集到的日志信息:\n面板指标含义    面板名称 单位 指标含义 数据源     HTTP Request Trend  每秒钟平均请求数 nginx-lua-prometheus   HTTP Latency ms 平均响应延时 nginx-lua-prometheus   HTTP Bandwidth KB 请求响应流量 nginx-lua-prometheus   HTTP Connections  nginx http 连接数 nginx-lua-prometheus   HTTP Status Trend % 每分钟 http 状态码统计 nginx-lua-prometheus   HTTP Status 4xx Percent % 4xx状态码比例 nginx-lua-prometheus   HTTP Status 5xx Percent % 5xx状态码比例 nginx-lua-prometheus   Error Log Count  每分钟错误日志数统计 fluent-bit    参考文档  nginx-lua-prometheus fluent-bit-lua-filter skywalking-apisix-monitoring  ","title":"使用 SkyWalking 监控 Nginx","url":"/zh/2023-12-23-monitoring-nginx-by-skywalking/"},{"content":"🚀 Dive into the World of Cutting-Edge Technology with Apache\u0026rsquo;s Finest! 🌐 Join me today as we embark on an exhilarating journey with two of Apache\u0026rsquo;s most brilliant minds - Sheng Wu and Trista Pan. We\u0026rsquo;re exploring the realms of Apache SkyWalking and Apache ShardingSphere, two groundbreaking initiatives that are reshaping the landscape of open-source technology. 🌟\nIn this exclusive session, we delve deep into Apache SkyWalking - an innovative observability platform that\u0026rsquo;s revolutionizing how we monitor and manage distributed systems in the cloud. Witness firsthand how SkyWalking is empowering developers and organizations to gain unparalleled insights into their applications, ensuring performance, reliability, and efficient troubleshooting. 🛰️🔍\nBut there\u0026rsquo;s more! We\u0026rsquo;re also unveiling the secrets of Apache ShardingSphere, a dynamic distributed database ecosystem. Learn how ShardingSphere is making waves in the world of big data, offering scalable, high-performance solutions for data sharding, encryption, and more. This is your gateway to understanding how these technologies are pivotal in handling massive data sets across various industries. 🌐💾\nWhether you\u0026rsquo;re a developer, tech enthusiast, or just curious about the future of open-source technology, this is a conversation you don\u0026rsquo;t want to miss! Get ready to be inspired and informed as we unlock new possibilities and applications of Apache SkyWalking and ShardingSphere. 🚀🌟\nJoin us, and let\u0026rsquo;s decode the future together!\n  Please join and follow Josh\u0026rsquo;s 龙之春 Youtube Coffee + Software with Josh Long Channel to learn more about technology and open source from telanted engineers and industry leads.\n","title":"[Video] Coffee + Software with Josh Long - Apache SkyWalking with Sheng Wu and Apache ShardingSphere with Trista Pan","url":"/blog/2023-12-04-coffee+software-with-josh-long/"},{"content":"SkyWalking CLI 0.13.0 is released. Go to downloads page to find release tars.\nFeatures  Add the sub-command menu get for get the ui menu items by @mrproliu in https://github.com/apache/skywalking-cli/pull/187  Bug Fixes  Fix the record list query does not support new OAP versions (with major version number \u0026gt; 9).  ","title":"Release Apache SkyWalking CLI 0.13.0","url":"/events/release-apache-skywalking-cli-0-13-0/"},{"content":"SkyWalking Java Agent 9.1.0 is released. Go to downloads page to find release tars. Changes by Version\n9.1.0  Fix hbase onConstruct NPE in the file configuration scenario Fix the issue of createSpan failure caused by invalid request URL in HttpClient 4.x/5.x plugin Optimize ElasticSearch 6.x 7.x plugin compatibility Fix an issue with the httpasyncclient component where the isError state is incorrect. Support customization for the length limitation of string configurations Add max length configurations in agent.config file for service_name and instance_name Optimize spring-cloud-gateway 2.1.x, 3.x witness class. Support report MongoDB instance info in Mongodb 4.x plugin. To compatible upper and lower case Oracle TNS url parse. Support collecting ZGC memory pool metrics. Require OAP 9.7.0 to support these new metrics. Upgrade netty-codec-http2 to 4.1.100.Final Add a netty-http 4.1.x plugin to trace HTTP requests. Fix Impala Jdbc URL (including schema without properties) parsing exception. Optimize byte-buddy type description performance. Add eclipse-temurin:21-jre as another base image. Bump byte-buddy to 1.14.9 for JDK21 support. Add JDK21 plugin tests for Spring 6. Bump Lombok to 1.18.30 to adopt JDK21 compiling. Fix PostgreSQL Jdbc URL parsing exception. Bump up grpc version. Optimize plugin selector logic.  Documentation  Fix JDK requirement in the compiling docs. Add JDK21 support in the compiling docs.  All issues and pull requests are here\n","title":"Release Apache SkyWalking Java Agent 9.1.0","url":"/events/release-apache-skywalking-java-agent-9-1-0/"},{"content":"SkyWalking 9.7.0 is released. Go to downloads page to find release tars.\nDark Mode The dafult style mode is changed to the dark mode, and light mode is still available.\nNew Design Log View A new design for the log view is currently available. Easier to locate the logs, and more space for the raw text.\nProject  Bump Java agent to 9.1-dev in the e2e tests. Bump up netty to 4.1.100. Update Groovy 3 to 4.0.15. Support packaging the project in JDK21. Compiler source and target remain in JDK11.  OAP Server  ElasticSearchClient: Add deleteById API. Fix Custom alarm rules are overwritten by \u0026lsquo;resource/alarm-settings.yml\u0026rsquo; Support Kafka Monitoring. Support Pulsar server and BookKeeper server Monitoring. [Breaking Change] Elasticsearch storage merge all management data indices into one index management, including ui_template,ui_menu,continuous_profiling_policy. Add a release mechanism for alarm windows when it is expired in case of OOM. Fix Zipkin trace receiver response: make the HTTP status code from 200 to 202. Update BanyanDB Java Client to 0.5.0. Fix getInstances query in the BanyanDB Metadata DAO. BanyanDBStorageClient: Add keepAliveProperty API. Fix table exists check in the JDBC Storage Plugin. Enhance extensibility of HTTP Server library. Adjust AlarmRecord alarmMessage column length to 512. Fix EventHookCallback build event: build the layer from Service's Layer. Fix AlarmCore doAlarm: catch exception for each callback to avoid interruption. Optimize queryBasicTraces in TraceQueryEsDAO. Fix WebhookCallback send incorrect messages, add catch exception for each callback HTTP Post. Fix AlarmRule expression validation: add labeled metrics mock data for check. Support collect ZGC memory pool metrics. Add a component ID for Netty-http (ID=151). Add a component ID for Fiber (ID=5021). BanyanDBStorageClient: Add define(Property property, PropertyStore.Strategy strategy) API. Correct the file format and fix typos in the filenames for monitoring Kafka\u0026rsquo;s e2e tests. Support extract timestamp from patterned datetime string in LAL. Support output key parameters in the booting logs. Fix cannot query zipkin traces with annotationQuery parameter in the JDBC related storage. Fix limit doesn\u0026rsquo;t work for findEndpoint API in ES storage. Isolate MAL CounterWindow cache by metric name. Fix JDBC Log query order. Change the DataCarrier IF_POSSIBLE strategy to use ArrayBlockingQueue implementation. Change the policy of the queue(DataCarrier) in the L1 metric aggregate worker to IF_POSSIBLE mode. Add self-observability metric metrics_aggregator_abandon to count the number of abandon metrics. Support Nginx monitoring. Fix BanyanDB Metadata Query: make query single instance/process return full tags to avoid NPE. Repleace go2sky E2E to GO agent. Replace Metrics v2 protocol with MQE in UI templates and E2E Test. Fix incorrect apisix metrics otel rules. Support Scratch The OAP Config Dump. Support increase/rate function in the MQE query language. Group service endpoints into _abandoned when endpoints have high cardinality.  UI  Add new menu for kafka monitoring. Fix independent widget duration. Fix the display height of the link tree structure. Replace the name by shortName on service widget. Refactor: update pagination style. No visualization style change. Apply MQE on K8s layer UI-templates. Fix icons display in trace tree diagram. Fix: update tooltip style to support multiple metrics scrolling view in a metrics graph. Add a new widget to show jvm memory pool detail. Fix: avoid querying data with empty parameters. Add a title and a description for trace segments. Add Netty icon for Netty HTTP plugin. Add Pulsar menu i18n files. Refactor Logs view. Implement the Dark Theme. Change UI templates for Text widgets. Add Nginx menu i18n. Fix the height for trace widget. Polish list style. Fix Log associate with Trace. Enhance layout for broken Topology widget. Fix calls metric with call type for Topology widget. Fix changing metrics config for Topology widget. Fix routes for Tab widget. Remove OpenFunction(FAAS layer) relative UI templates and menu item. Fix: change colors to match dark theme for Network Profiling. Remove the description of OpenFunction in the UI i18n. Reduce component chunks to improve page loading resource time.  Documentation  Separate storage docs to different files, and add an estimated timeline for BanyanDB(end of 2023). Add topology configuration in UI-Grafana doc. Add missing metrics to the OpenTelemetry Metrics doc. Polish docs of Concepts and Designs. Fix incorrect notes of slowCacheReadThreshold. Update OAP setup and cluster coordinator docs to explain new booting parameters table in the logs, and how to setup cluster mode.  All issues and pull requests are here\n","title":"Release Apache SkyWalking APM 9.7.0","url":"/events/release-apache-skywalking-apm-9.7.0/"},{"content":"SkyWalking Summit 2023 @ Shanghai 会议时间:2023年11月4日 全天 地点:上海大华虹桥假日酒店 赞助商:纵目科技,Tetrate\n会议议程 与 PDF SkyWalking V9 In 2023 - 5 featured releases  吴晟 PDF  B站视频地址\n使用 Terraform 与 Ansible 快速部署 SkyWalking 集群  柯振旭 PDF  B站视频地址\n基于SkyWalking构建全域一体化观测平台  陈修能 PDF  B站视频地址\n云原生可观测性数据库BanyanDB  高洪涛 PDF  B站视频地址\n基于 SkyWalking Agent 的性能剖析和实时诊断  陆家靖 PDF  B站视频地址\n太保科技-多云环境下Zabbix的运用实践  田川 PDF  B站视频地址\nKubeSphere 在可观测性领域的探索与实践  霍秉杰 PDF  B站视频地址\n大型跨国企业的微服务治理  张文杰 PDF  B站视频地址\n","title":"SkyWalking Summit 2023 @ Shanghai 会议回顾","url":"/zh/2023-11-04-skywalking-summit-shanghai/"},{"content":"SkyWalking Infra E2E 1.3.0 is released. Go to downloads page to find release tars.\nFeatures  Support sha256enc and sha512enc encoding in verify case. Support hasPrefix and hasSuffix string verifier in verify case. Bump up kind to v0.14.0. Add a field kubeconfig to support running e2e test on an existing kubernetes cluster. Support non-fail-fast execution of test cases support verify cases concurrently Add .exe suffix to windows build artifact Export the kubeconfig path during executing the following steps Automatically pull images before loading into KinD Support outputting the result of \u0026lsquo;verify\u0026rsquo; in YAML format and only outputting the summary of the result of \u0026lsquo;verify\u0026rsquo; Make e2e test itself in github action Support outputting the summary of \u0026lsquo;verify\u0026rsquo; in YAML format Make e2e output summary with numeric information Add \u0026lsquo;subtractor\u0026rsquo; function  Improvements  Bump up GHA to avoid too many warnings Leverage the built-in cache in setup-go@v4 Add batchOutput config to reduce outputs Disable batch mode by default, add it to GHA and enable by default Improve GitHub Actions usability and speed by using composite actions' new feature Migrate deprecated GitHub Actions command to recommended ones Bump up kind to v0.14.0 Optimization of the output information of verification verifier: notEmpty should be able to handle nil Remove invalid configuration in GitHub Actions  Bug Fixes  Fix deprecation warnings Ignore cancel error when copying container logs  Documentation  Add a doc to introduce how to use e2e to test itself  Issues and PR  All issues are here All pull requests are here  ","title":"Release Apache SkyWalking Infra E2E 1.3.0","url":"/events/release-apache-skywalking-infra-e2e-1-3-0/"},{"content":"Aapche SkyWalking PMC 和 committer团队参加了\u0026quot;开源之夏 2023\u0026quot;活动,作为导师,共获得了9个官方赞助名额。最终对学生开放如下任务\n SkyWalking 支持 GraalVM Skywalking Infra E2E 自测试 监控Apache Pulsar 统一BanyanDB的查询计划和查询执行器 使用Helm部署BanyanDB 编写go agent的gRPC插件 监控Kafka 集成SkyWalking PHP到SkyWalking E2E 测试 在线黄金指标异常检测  经过3个月的开发,上游评审,PMC成员评议,PMC Chair复议,OSPP官方委员会评审多个步骤,现公布项目参与人员与最终结果\n通过评审项目(共6个) SkyWalking 支持 GraalVM  学生:张跃骎 学校:辽宁大学 本科 合并PR:11354 后续情况说明:GraalVM因为复杂的生态,替代的代码将被分离到SkyWalking GraalVM Distro, 相关讨论,请参见Issue 11518  Skywalking Infra E2E 自测试  学生:王子忱 学校:华中师范大学 本科 合并PR:115, 116, 117, 118, 119 后续情况说明:此特性已经包含在发行版skywalking-infra-e2e v1.3.0中  统一BanyanDB的查询计划和查询执行器  学生:曾家华 学校:电子科技大学 本科 合并PR:343  使用Helm部署BanyanDB  学生:黄友亮 学校:北京邮电大学 硕士研究生 合并PR:1 情况说明:因为BanyanDB Helm为新项目,学生承接了项目初始化、功能提交、自动化测试,发布准备等多项任务。所参与功能包含在skywalking-banyandb-helm v0.1.0中  编写go agent的gRPC插件  学生:胡宇腾 学校:西安邮电大学 合并PR:88, 94 后续情况说明:该学生在开源之夏相关项目外,完成了feature: add support for iris #99和Go agent APIs功能开发。并发表文章SkyWalking Go Toolkit Trace 详解以及英文译本Detailed explanation of SkyWalking Go Toolkit Trace  监控Kafka  学生:王竹 学校:美国东北大学 ( Northeastern University) 合并PR:11282, UI 318  未通过评审项目(3个) 下列项目因为质量无法达到社区要求,违规等原因,将被标定为失败。 注:在开源之夏中失败的项目,其Pull Reqeust可能因为符合社区功能要求,也被接受合并。\n监控Apache Pulsar  学生:孟祥迎 学校:重庆邮电大学 本科 合并PR:11339 失败原因:项目申请成员,作为ASF Pulsar项目的Committer,在担任Pulsar开源之夏项目导师期间,但依然申请了学生参与项目。属于违规行为。SkyWalking PMC审查了此行为并通报开源之夏组委会。开源之夏组委会依据活动规则取消其结项奖金。  集成SkyWalking PHP到SkyWalking E2E 测试  学生:罗文 学校:San Jose State University B.S. 合并PR:11330 失败原因:根据pull reqeust中的提交记录,SkyWalking PMC Chair审查了提交明细,学生参与代码数量大幅度小于导师的提交代码。并在考虑到这个项目难度以及明显低于SkyWalking 开源之夏项目的平均水平的情况下,通报给开源之夏组委会。经过组委会综合评定,项目不合格。  在线黄金指标异常检测  学生:黄颖 学校:同济大学 研究生 合并PR:无 失败原因:项目在进度延迟后实现较为简单且粗糙,并且没有提供算法评估结果和文档等。在 PR 开启后的为期一个月审核合并期间,学生并未能成功按预定计划改善实现的质量和文档。和导师以及 SkyWalking 社区缺少沟通。  结语 SkyWalking社区每年都有近10位PMC成员或Committer参与开源之夏中,帮助在校学生了解顶级开源项目、开源社区的运作方式。我们希望大家在每年经过3个月的时间,能够真正的帮助在校学生了解开源和参与开源。 因为,社区即使在考虑到学生能力的情况下,不会明显的降低pull request的接受标准。希望今后的学生,能够在早期,积极、主动和导师,社区其他成员保持高频率的沟通,对参与的项目有更深入、准确的了解。\n","title":"开源之夏 2023 SkyWalking 社区项目情况公示","url":"/zh/2023-11-09-ospp-summary/"},{"content":"SkyWalking NodeJS 0.7.0 is released. Go to downloads page to find release tars.\n Add deadline config for trace request (#118)  ","title":"Release Apache SkyWalking for NodeJS 0.7.0","url":"/events/release-apache-skywalking-nodejs-0-7-0/"},{"content":"背景介绍 Nginx access log 中包含了丰富的信息,例如:日志时间、状态码、响应时间、body 大小等。通过收集并分析 access log,我们可以实现对 Nginx 中接口状态的监控。\n在本案例中,将由 fluent-bit 收集 access log,并通过 HTTP 将日志信息发送给 SkyWalking OAP Server 进行进一步的分析。\n环境准备 实验需要的 Nginx 及 Fluent-bit 相关配置文件都被上传到了Github,有需要的读者可以自行 git clone 并通过 docker compose 启动,本文中将介绍配置文件中几个关键点。\nNginx日志格式配置 LAL 目前支持 JSON、YAML 及 REGEX 日志解析,为了方便获取到日志中的指标字段,我们将 Nginx 的日志格式定义为 JSON.\nhttp { ... ... log_format main '{\u0026quot;remote_addr\u0026quot;: \u0026quot;$remote_addr\u0026quot;,' '\u0026quot;remote_user\u0026quot;: \u0026quot;$remote_user\u0026quot;,' '\u0026quot;request\u0026quot;: \u0026quot;$request\u0026quot;,' '\u0026quot;time\u0026quot;: \u0026quot;$time_iso8601\u0026quot;,' '\u0026quot;status\u0026quot;: \u0026quot;$status\u0026quot;,' '\u0026quot;request_time\u0026quot;:\u0026quot;$request_time\u0026quot;,' '\u0026quot;body_bytes_sent\u0026quot;: \u0026quot;$body_bytes_sent\u0026quot;,' '\u0026quot;http_referer\u0026quot;: \u0026quot;$http_referer\u0026quot;,' '\u0026quot;http_user_agent\u0026quot;: \u0026quot;$http_user_agent\u0026quot;,' '\u0026quot;http_x_forwarded_for\u0026quot;: \u0026quot;$http_x_forwarded_for\u0026quot;}'; access_log /var/log/nginx/access.log main; ... ... } Fluent bit Filter 我们通过 Fluent bit 的 lua filter 进行日志格式的改写,将其调整为 SkyWalking 所需要的格式,record的各个字段含义如下:\n body:日志内容体 service:服务名称 serviceInstance:实例名称  function rewrite_body(tag, timestamp, record) local newRecord = {} newRecord[\u0026quot;body\u0026quot;] = { json = { json = record.log } } newRecord[\u0026quot;service\u0026quot;] = \u0026quot;nginx::nginx\u0026quot; newRecord[\u0026quot;serviceInstance\u0026quot;] = \u0026quot;localhost\u0026quot; return 1, timestamp, newRecord end OAP 日志分析 LAL定义 在 filter 中,我们通过条件判断,只处理 service=nginx::nginx 的服务,其他服务依旧走默认逻辑:\n第一步,使用 json 指令对日志进行解析,解析的结果会被存放到 parsed 字段中,通过 parsed 字段我们可以获取 json 日志中的字段信息。\n第二步,使用 timestamp 指令解析 parsed.time 并将其赋值给日志的 timestamp 字段,这里的 time 就是access log json 中的 time。\n第三步,使用 tag 指令给日志打上对应的标签,标签的值依然可以通过 parsed 字段获取。\n第四步,使用 metrics 指令从日志中提取出指标信息,我们共提取了四个指标:\n nginx_log_count:Nginx 每次请求都会生成一条 access log,该指标可以帮助我们统计 Nginx 当前的请求数。 nginx_request_time:access log 中会记录请求时间,该指标可以帮助我们统计上游接口的响应时长。 nginx_body_bytes_sent:body 大小指标可以帮助我们了解网关上的流量情况。 nginx_status_code:状态码指标可以实现对状态码的监控,如果出现异常上涨可以结合 alarm 进行告警。  rules:- name:defaultlayer:GENERALdsl:|filter { if (log.service == \u0026#34;nginx::nginx\u0026#34;) { json { abortOnFailure true }extractor {timestamp parsed.time as String, \u0026#34;yyyy-MM-dd\u0026#39;T\u0026#39;HH:mm:ssXXX\u0026#34;tag status:parsed.statustag remote_addr:parsed.remote_addrmetrics {timestamp log.timestamp as Longlabels service: log.service, instance:log.serviceInstancename \u0026#34;nginx_log_count\u0026#34;value 1}metrics {timestamp log.timestamp as Longlabels service: log.service, instance:log.serviceInstancename \u0026#34;nginx_request_time\u0026#34;value parsed.request_time as Double}metrics {timestamp log.timestamp as Longlabels service: log.service, instance:log.serviceInstancename \u0026#34;nginx_body_bytes_sent\u0026#34;value parsed.body_bytes_sent as Long}metrics {timestamp log.timestamp as Longlabels service: log.service, instance: log.serviceInstance, status:parsed.statusname \u0026#34;nginx_status_code\u0026#34;value 1}}}sink {}}经过 LAL 处理后,我们已经可以在日志面板看到日志信息了,接下来我们将对 LAL 中提取的指标进行进一步分析:\nMAL定义 在 MAL 中,我们可以对上一步 LAL 中提取的指标进行进一步的分析聚合,下面的例子里:\nnginx_log_count、nginx_request_time、nginx_status_code 使用 sum 聚合函数处理,并使用 SUM 方式 downsampling,\nnginx_request_time 使用 avg 聚合函数求平均值,默认使用 AVG 方式 downsampling。\n完成聚合分析后,SkyWalking Meter System 会完成对上述指标的持久化。\nexpSuffix:service([\u0026#39;service\u0026#39;], Layer.GENERAL)metricPrefix:nginxmetricsRules:- name:cpmexp:nginx_log_count.sum([\u0026#39;service\u0026#39;]).downsampling(SUM)- name:avg_request_timeexp:nginx_request_time.avg([\u0026#39;service\u0026#39;])- name:body_bytes_sent_countexp:nginx_body_bytes_sent.sum([\u0026#39;service\u0026#39;]).downsampling(SUM)- name:status_code_countexp:nginx_status_code.sum([\u0026#39;service\u0026#39;,\u0026#39;status\u0026#39;]).downsampling(SUM)最后,我们便可以来到 SkyWalking UI 页面新建 Nginx 仪表板,使用刚刚 MAL 中定义的指标信息创建 Nginx Dashboard(也可以通过上文提到仓库中的 dashboard.json 直接导入测试):\n参考文档  Fluent Bit lua Filter Log Analysis Language Meter Analysis Language  ","title":"使用 LAL 收集并分析 Nginx access log","url":"/zh/2023-10-29-collect-and-analyse-nginx-accesslog-by-lal/"},{"content":"SkyWalking BanyanDB 0.5.0 is released. Go to downloads page to find release tars.\nFeatures  List all properties in a group. Implement Write-ahead Logging Document the clustering. Support multiple roles for banyand server. Support for recovery buffer using wal. Register the node role to the metadata registry. Implement the remote queue to spreading data to data nodes. Fix parse environment variables error Implement the distributed query engine. Add mod revision check to write requests. Add TTL to the property. Implement node selector (e.g. PickFirst Selector, Maglev Selector). Unified the buffers separated in blocks to a single buffer in the shard.  Bugs  BanyanDB ui unable to load icon. BanyanDB ui type error Fix timer not released BanyanDB ui misses fields when creating a group Fix data duplicate writing Syncing metadata change events from etcd instead of a local channel.  Chores  Bump several dependencies and tools. Drop redundant \u0026ldquo;discovery\u0026rdquo; module from banyand. \u0026ldquo;metadata\u0026rdquo; module is enough to play the node and shard discovery role.  ","title":"Release Apache SkyWalking BanyanDB 0.5.0","url":"/events/release-apache-skywalking-banyandb-0-5-0/"},{"content":"SkyWalking Go 0.3.0 is released. Go to downloads page to find release tars.\nFeatures  Support manual tracing APIs for users.  Plugins  Support mux HTTP server framework. Support grpc server and client framework. Support iris framework.  Documentation  Add Tracing APIs document into Manual APIs.  Bug Fixes Issues and PR  All issues are here All and pull requests are here  ","title":"Release Apache SkyWalking Go 0.3.0","url":"/events/release-apache-skwaylking-go-0.3.0/"},{"content":"Background SkyWalking Go is an open-source, non-intrusive Golang agent used for monitoring, tracing, and data collection within distributed systems. It enables users to observe the flow and latency of requests within the system, collect performance data from various system components for performance monitoring, and troubleshoot issues by tracing the complete path of requests.\nIn version v0.3.0, Skywalking Go introduced the toolkit trace tool. Trace APIs allow users to include critical operations, functions, or services in the tracing scope in situations where plugins do not support them. This inclusion enables tracking and monitoring of these operations and can be used for fault analysis, diagnosis, and performance monitoring.\nBefore diving into this, you can learn how to use the Skywalking Go agent by referring to the SkyWalking Go Agent Quick Start Guide.\nThe following sections will explain how to use these interfaces in specific scenarios.\nIntroducing the Trace Toolkit Execute the following command in the project\u0026rsquo;s root directory:\ngo get github.com/apache/skywalking-go/toolkit To use the toolkit trace interface, you need to import the package into your project:\n\u0026#34;github.com/apache/skywalking-go/toolkit/trace\u0026#34; Manual Tracing A Span is the fundamental unit of an operation in Tracing. It represents an operation within a specific timeframe, such as a request, a function call, or a specific action. It records essential information about a particular operation, including start and end times, the operation\u0026rsquo;s name, tags (key-value pairs), and relationships between operations. Multiple Spans can form a hierarchical structure.\nIn situations where Skywalking-go doesn\u0026rsquo;t support a particular framework, users can manually create Spans to obtain tracing information.\n(Here, I have removed the supported frameworks for the sake of the example. These are only examples. You should reference this when using the APIs in private and/or unsupported frameworks)\nFor example, when you need to trace an HTTP response, you can create a span using trace.CreateEntrySpan() within the method handling the request, and end the span using trace.StopSpan() after processing. When sending an HTTP request, use trace.CreateExitSpan() to create a span, and end the span after the request returns.\nHere are two HTTP services named consumer and provider. When a user accesses the consumer service, it receives the user\u0026rsquo;s request internally and then accesses the provider to obtain resources.\n// consumer.go package main import ( \u0026#34;io\u0026#34; \u0026#34;net/http\u0026#34; _ \u0026#34;github.com/apache/skywalking-go\u0026#34; \u0026#34;github.com/apache/skywalking-go/toolkit/trace\u0026#34; ) func getProvider() (*http.Response, error) { // Create an HTTP request \treq, err := http.NewRequest(\u0026#34;GET\u0026#34;, \u0026#34;http://localhost:9998/provider\u0026#34;, http.NoBody) // Create an ExitSpan before sending the HTTP request. \ttrace.CreateExitSpan(\u0026#34;GET:/provider\u0026#34;, \u0026#34;localhost:9999\u0026#34;, func(headerKey, headerValue string) error { // Injector adds specific header information to the request. \treq.Header.Add(headerKey, headerValue) return nil }) // Finish the ExitSpan and ensure it executes when the function returns using defer. \tdefer trace.StopSpan() // Send the request. \tclient := \u0026amp;http.Client{} resp, err := client.Do(req) if err != nil { return nil, err } return resp, nil } func consumerHandler(w http.ResponseWriter, r *http.Request) { // Create an EntrySpan to trace the execution of the consumerHandler method. \ttrace.CreateEntrySpan(r.Method+\u0026#34;/consumer\u0026#34;, func(headerKey string) (string, error) { // Extractor retrieves the header information added to the request. \treturn r.Header.Get(headerKey), nil }) // Finish the EntrySpan. \tdefer trace.StopSpan() // Prepare to send an HTTP request. \tresp, err := getProvider() body, err := io.ReadAll(resp.Body) if err != nil { return } _, _ = w.Write(body) } func main() { http.HandleFunc(\u0026#34;/consumer\u0026#34;, consumerHandler) _ = http.ListenAndServe(\u0026#34;:9999\u0026#34;, nil) } // provider.go package main import ( \u0026#34;net/http\u0026#34; _ \u0026#34;github.com/apache/skywalking-go\u0026#34; \u0026#34;github.com/apache/skywalking-go/toolkit/trace\u0026#34; ) func providerHandler(w http.ResponseWriter, r *http.Request) { //Create an EntrySpan to trace the execution of the providerHandler method. \ttrace.CreateEntrySpan(\u0026#34;GET:/provider\u0026#34;, func(headerKey string) (string, error) { return r.Header.Get(headerKey), nil }) // Finish the EntrySpan. \tdefer trace.StopSpan() _, _ = w.Write([]byte(\u0026#34;success from provider\u0026#34;)) } func main() { http.HandleFunc(\u0026#34;/provider\u0026#34;, providerHandler) _ = http.ListenAndServe(\u0026#34;:9998\u0026#34;, nil) } Then, in the terminal, execute:\ngo build -toolexec=\u0026#34;/path/to/go-agent\u0026#34; -a -o consumer ./consumer.go ./consumer go build -toolexec=\u0026#34;/path/to/go-agent\u0026#34; -a -o provider ./provider.go ./provider curl 127.0.0.1:9999/consumer At this point, the UI will display the span information you created.\nIf you need to trace methods that are executed only locally, you can use trace.CreateLocalSpan(). If you don\u0026rsquo;t need to monitor information or states from the other end, you can change ExitSpan and EntrySpan to LocalSpan.\nThe usage examples provided are for illustration purposes, and users can decide the tracing granularity and where in the program they need tracing.\nPlease note that if a program ends too quickly, it may cause tracing data to be unable to be asynchronously sent to the SkyWalking backend.\nPopulate The Span When there\u0026rsquo;s a necessity to record additional information, including creating/updating tags, appending logs, and setting a new operation name of the current traced Span, these APIs should be considered. These actions are used to enhance trace information, providing a more detailed and precise contextual description, which aids in better understanding the events or operations being traced.\nToolkit trace APIs provide a convenient way to access and manipulate trace data, including:\n Setting Tags: SetTag() Adding Logs: AddLog() Setting Span Names: SetOperationName() Getting various IDs: GetTraceID(), GetSegmentID(), GetSpanID()  For example, if you need to record the HTTP status code in a span, you can use the following interfaces while the span is not yet finished:\ntrace.CreateExitSpan(\u0026#34;GET:/provider\u0026#34;, \u0026#34;localhost:9999\u0026#34;, func(headerKey, headerValue string) error { r.Header.Add(headerKey, headerValue) return nil }) resp, err := http.Get(\u0026#34;http://localhost:9999/provider\u0026#34;) trace.SetTag(\u0026#34;status_code\u0026#34;, fmt.Sprintf(\u0026#34;%d\u0026#34;, resp.StatusCode)) spanID := trace.GetSpanID() trace.StopSpan() It\u0026rsquo;s important to note that when making these method calls, the current thread should have an active span.\nAsync APIs Async APIs work for manipulating spans across Goroutines. These scenarios might include:\n Applications involving concurrency or multiple goroutines where operating on Spans across different execution contexts is necessary. Updating or logging information for a Span during asynchronous operations. Requiring a delayed completion of a Span.  To use it, follow these steps:\n Obtain the return value of CreateSpan, which is SpanRef. Call spanRef.PrepareAsync() to prepare for operations in another goroutine. When the current goroutine\u0026rsquo;s work is done, call trace.StopSpan() to end the span (affecting only in the current goroutine). Pass the spanRef to another goroutine. After the work is done in any goroutine, call spanRef.AsyncFinish().  Here\u0026rsquo;s an example:\nspanRef, err := trace.CreateLocalSpan(\u0026#34;LocalSpan\u0026#34;) if err != nil { return } spanRef.PrepareAsync() go func(){ // some work  spanRef.AsyncFinish() }() // some work trace.StopSpan() Correlation Context Correlation Context is used to pass parameters within a Span, and the parent Span will pass the Correlation Context to all its child Spans. It allows the transmission of information between spans across different applications. The default number of elements in the Correlation Context is 3, and the content\u0026rsquo;s length cannot exceed 128 bytes.\nCorrelation Context is commonly applied in the following scenarios:\n Passing Information Between Spans: It facilitates the transfer of critical information between different Spans, enabling upstream and downstream Spans to understand the correlation and context between each other. Passing Business Parameters: In business scenarios, it involves transmitting specific parameters or information between different Spans, such as authentication tokens, business transaction IDs, and more.  Users can set the Correlation Context using trace.SetCorrelation(key, value) and then retrieve the corresponding value in downstream spans using value := trace.GetCorrelation(key).\nFor example, in the code below, we store the value in the tag of the span, making it easier to observe the result:\npackage main import ( _ \u0026#34;github.com/apache/skywalking-go\u0026#34; \u0026#34;github.com/apache/skywalking-go/toolkit/trace\u0026#34; \u0026#34;net/http\u0026#34; ) func providerHandler(w http.ResponseWriter, r *http.Request) { ctxValue := trace.GetCorrelation(\u0026#34;key\u0026#34;) trace.SetTag(\u0026#34;result\u0026#34;, ctxValue) } func consumerHandler(w http.ResponseWriter, r *http.Request) { trace.SetCorrelation(\u0026#34;key\u0026#34;, \u0026#34;value\u0026#34;) _, err := http.Get(\u0026#34;http://localhost:9999/provider\u0026#34;) if err != nil { return } } func main() { http.HandleFunc(\u0026#34;/provider\u0026#34;, providerHandler) http.HandleFunc(\u0026#34;/consumer\u0026#34;, consumerHandler) _ = http.ListenAndServe(\u0026#34;:9999\u0026#34;, nil) } Then, in the terminal, execute:\nexport SW_AGENT_NAME=server go build -toolexec=\u0026#34;/path/to/go-agent\u0026#34; -a -o server ./server.go ./server curl 127.0.0.1:9999/consumer Finally, in the providerHandler() span, you will find the information from the Correlation Context:\nConclusion This article provides an overview of Skywalking Go\u0026rsquo;s Trace APIs and their practical application. These APIs empower users with the ability to customize tracing functionality according to their specific needs.\nFor detailed information about the interfaces, please refer to the documentation: Tracing APIs.\nWelcome everyone to try out the new version.\n","title":"Detailed explanation of SkyWalking Go Toolkit Trace","url":"/blog/2023-10-18-skywalking-toolkit-trace/"},{"content":"背景介绍 SkyWalking Go是一个开源的非侵入式Golang代理程序,用于监控、追踪和在分布式系统中进行数据收集。它使用户能够观察系统内请求的流程和延迟,从各个系统组件收集性能数据以进行性能监控,并通过追踪请求的完整路径来解决问题。\n在版本v0.3.0中,Skywalking Go引入了 toolkit-trace 工具。Trace APIs 允许用户在插件不支持的情况下将关键操作、函数或服务添加到追踪范围。从而实现追踪和监控这些操作,并可用于故障分析、诊断和性能监控。\n在深入了解之前,您可以参考SkyWalking Go Agent快速开始指南来学习如何使用SkyWalking Go Agent。\n下面将会介绍如何在特定场景中使用这些接口。\n导入 Trace Toolkit 在项目的根目录中执行以下命令:\ngo get github.com/apache/skywalking-go/toolkit 使用 toolkit trace 接口前,需要将该包导入到您的项目中:\n\u0026#34;github.com/apache/skywalking-go/toolkit/trace\u0026#34; 手动追踪 Span 是 Tracing 中单个操作的基本单元。它代表在特定时间范围内的操作,比如一个请求、一个函数调用或特定动作。Span记录了特定操作的关键信息,包括开始和结束时间、操作名称、标签(键-值对)以及操作之间的关系。多个 Span 可以形成层次结构。\n在遇到 Skywalking Go 不支持的框架的情况下,用户可以手动创建 Span 以获取追踪信息。\n(为了作为示例,我删除了已支持的框架。以下仅为示例。请在使用私有或不支持的框架的 API 时参考)\n例如,当需要追踪HTTP响应时,可以在处理请求的方法内部使用 trace.CreateEntrySpan() 来创建一个 span,在处理完成后使用 trace.StopSpan() 来结束这个 span。在发送HTTP请求时,使用 trace.CreateExitSpan() 来创建一个 span,在请求返回后结束这个 span。\n这里有两个名为 consumer 和 provider 的HTTP服务。当用户访问 consumer 服务时,它在内部接收用户的请求,然后访问 provider 以获取资源。\n// consumer.go package main import ( \u0026#34;io\u0026#34; \u0026#34;net/http\u0026#34; _ \u0026#34;github.com/apache/skywalking-go\u0026#34; \u0026#34;github.com/apache/skywalking-go/toolkit/trace\u0026#34; ) func getProvider() (*http.Response, error) { // 新建 HTTP 请求 \treq, err := http.NewRequest(\u0026#34;GET\u0026#34;, \u0026#34;http://localhost:9998/provider\u0026#34;, http.NoBody) // 在发送 HTTP 请求之前创建 ExitSpan \ttrace.CreateExitSpan(\u0026#34;GET:/provider\u0026#34;, \u0026#34;localhost:9999\u0026#34;, func(headerKey, headerValue string) error { // Injector 向请求中添加特定的 header 信息 \treq.Header.Add(headerKey, headerValue) return nil }) // 结束 ExitSpan,使用 defer 确保在函数返回时执行 \tdefer trace.StopSpan() // 发送请求 \tclient := \u0026amp;http.Client{} resp, err := client.Do(req) if err != nil { return nil, err } return resp, nil } func consumerHandler(w http.ResponseWriter, r *http.Request) { // 创建 EntrySpan 来追踪 consumerHandler 方法的执行 \ttrace.CreateEntrySpan(r.Method+\u0026#34;/consumer\u0026#34;, func(headerKey string) (string, error) { // Extractor 获取请求中添加的 header 信息 \treturn r.Header.Get(headerKey), nil }) // 结束 EntrySpan \tdefer trace.StopSpan() // 准备发送 HTTP 请求 \tresp, err := getProvider() body, err := io.ReadAll(resp.Body) if err != nil { return } _, _ = w.Write(body) } func main() { http.HandleFunc(\u0026#34;/consumer\u0026#34;, consumerHandler) _ = http.ListenAndServe(\u0026#34;:9999\u0026#34;, nil) } // provider.go package main import ( \u0026#34;net/http\u0026#34; _ \u0026#34;github.com/apache/skywalking-go\u0026#34; \u0026#34;github.com/apache/skywalking-go/toolkit/trace\u0026#34; ) func providerHandler(w http.ResponseWriter, r *http.Request) { // 创建 EntrySpan 来追踪 providerHandler 方法的执行 \ttrace.CreateEntrySpan(\u0026#34;GET:/provider\u0026#34;, func(headerKey string) (string, error) { return r.Header.Get(headerKey), nil }) // 结束 EntrySpan \tdefer trace.StopSpan() _, _ = w.Write([]byte(\u0026#34;success from provider\u0026#34;)) } func main() { http.HandleFunc(\u0026#34;/provider\u0026#34;, providerHandler) _ = http.ListenAndServe(\u0026#34;:9998\u0026#34;, nil) } 然后中终端中执行:\ngo build -toolexec=\u0026#34;/path/to/go-agent\u0026#34; -a -o consumer ./consumer.go ./consumer go build -toolexec=\u0026#34;/path/to/go-agent\u0026#34; -a -o provider ./provider.go ./provider curl 127.0.0.1:9999/consumer 此时 UI 中将会显示你所创建的span信息\n如果需要追踪仅在本地执行的方法,可以使用 trace.CreateLocalSpan()。如果不需要监控来自另一端的信息或状态,可以将 ExitSpan 和 EntrySpan 更改为 LocalSpan。\n以上方法仅作为示例,用户可以决定追踪的粒度以及程序中需要进行追踪的位置。\n注意,如果程序结束得太快,可能会导致 Tracing 数据无法异步发送到 SkyWalking 后端。\n填充 Span 当需要记录额外信息时,包括创建/更新标签、追加日志和设置当前被追踪 Span 的新操作名称时,可以使用这些API。这些操作用于增强追踪信息,提供更详细的上下文描述,有助于更好地理解被追踪的事件或操作。\nToolkit trace APIs 提供了一种简便的方式来访问和操作 Trace 数据:\n 设置标签:SetTag() 添加日志:AddLog() 设置 Span 名称:SetOperationName() 获取各种ID:GetTraceID(), GetSegmentID(), GetSpanID()  例如,如果需要在一个 Span 中记录HTTP状态码,就可以在 Span 未结束时调用以下接口:\ntrace.CreateExitSpan(\u0026#34;GET:/provider\u0026#34;, \u0026#34;localhost:9999\u0026#34;, func(headerKey, headerValue string) error { r.Header.Add(headerKey, headerValue) return nil }) resp, err := http.Get(\u0026#34;http://localhost:9999/provider\u0026#34;) trace.SetTag(\u0026#34;status_code\u0026#34;, fmt.Sprintf(\u0026#34;%d\u0026#34;, resp.StatusCode)) spanID := trace.GetSpanID() trace.StopSpan() 在调用这些方法时,当前线程需要有正在活跃的 span。\n异步 APIs 异步API 用于跨 goroutines 操作 spans。包括以下情况:\n 包含多个 goroutines 的程序,需要在不同上下文中中操作 Span。 在异步操作时更新或记录 Span 的信息。 延迟结束 Span。  按照以下步骤使用:\n 获取 CreateSpan 的返回值 SpanRef。 调用 spanRef.PrepareAsync() ,准备在另一个 goroutine 中执行操作。 当前 goroutine 工作结束后,调用 trace.StopSpan() 结束该 span(仅影响当前 goroutine)。 将 spanRef 传递给另一个 goroutine。 完成工作后在任意 goroutine 中调用 spanRef.AsyncFinish()。  以下为示例:\nspanRef, err := trace.CreateLocalSpan(\u0026#34;LocalSpan\u0026#34;) if err != nil { return } spanRef.PrepareAsync() go func(){ // some work \tspanRef.AsyncFinish() }() // some work trace.StopSpan() Correlation Context Correlation Context 用于在 Span 间传递参数,父 Span 会把 Correlation Context 递给其所有子 Spans。它允许在不同应用程序的 spans 之间传输信息。Correlation Context 的默认元素个数为3,其内容长度不能超过128字节。\nCorrelation Context 通常用于以下等情况:\n 在 Spans 之间传递信息:它允许关键信息在不同 Span 之间传输,使上游和下游 Spans 能够获取彼此之间的关联和上下文。 传递业务参数:在业务场景中,涉及在不同 Span 之间传输特定参数或信息,如认证令牌、交易ID等。  用户可以使用 trace.SetCorrelation(key, value) 设置 Correlation Context ,并可以使用 value := trace.GetCorrelation(key) 在下游 spans 中获取相应的值。\n例如在下面的代码中,我们将值存储在 span 的标签中,以便观察结果:\npackage main import ( _ \u0026#34;github.com/apache/skywalking-go\u0026#34; \u0026#34;github.com/apache/skywalking-go/toolkit/trace\u0026#34; \u0026#34;net/http\u0026#34; ) func providerHandler(w http.ResponseWriter, r *http.Request) { ctxValue := trace.GetCorrelation(\u0026#34;key\u0026#34;) trace.SetTag(\u0026#34;result\u0026#34;, ctxValue) } func consumerHandler(w http.ResponseWriter, r *http.Request) { trace.SetCorrelation(\u0026#34;key\u0026#34;, \u0026#34;value\u0026#34;) _, err := http.Get(\u0026#34;http://localhost:9999/provider\u0026#34;) if err != nil { return } } func main() { http.HandleFunc(\u0026#34;/provider\u0026#34;, providerHandler) http.HandleFunc(\u0026#34;/consumer\u0026#34;, consumerHandler) _ = http.ListenAndServe(\u0026#34;:9999\u0026#34;, nil) } 然后在终端执行:\nexport SW_AGENT_NAME=server go build -toolexec=\u0026#34;/path/to/go-agent\u0026#34; -a -o server ./server.go ./server curl 127.0.0.1:9999/consumer 最后在 providerHandler() 的 Span 中找到了 Correlation Context 的信息:\n总结 本文讲述了Skywalking Go的 Trace APIs 及其应用。它为用户提供了自定义追踪的功能。\n更多关于该接口的介绍见文档:Tracing APIs。\n欢迎大家来使用新版本。\n","title":"SkyWalking Go Toolkit Trace 详解","url":"/zh/2023-10-18-skywalking-toolkit-trace/"},{"content":"CommunityOverCode (原 ApacheCon) 是 Apache 软件基金会(ASF)的官方全球系列大会。自 1998 年以来\u0026ndash;在 ASF 成立之前 \u0026ndash; ApacheCon 已经吸引了各个层次的参与者,在 300 多个 Apache 项目及其不同的社区中探索 \u0026ldquo;明天的技术\u0026rdquo;。CommunityOverCode 通过动手实作、主题演讲、实际案例研究、培训、黑客松活动等方式,展示 Apache 项目的最新发展和新兴创新。\nCommunityOverCode 展示了无处不在的 Apache 项目的最新突破和 Apache 孵化器中即将到来的创新,以及开源开发和以 Apache 之道领导社区驱动的项目。与会者可以了解到独立于商业利益、企业偏见或推销话术之外的核心开源技术。\nSkyWalking的Golang自动探针实践 刘晗 分布式追踪技术在可观测领域尤为重要,促使各个语言的追踪探针的易用性获得了更多的关注。目前在golang语言探针方面大多为手动埋点探针,接入流程过于复杂,而且局限性很强。本次讨论的重点着重于简化golang语言探针的接入方式,创新性的使用了自动埋点技术,并且突破了很多框架中对于上下文信息的依赖限制。\nB站视频地址\nBanyanDB一个高扩展性的分布式追踪数据库 高洪涛 追踪数据是一种用于分析微服务系统性能和故障的重要数据源,它记录了系统中每个请求的调用链路和相关指标。随着微服务系统的规模和复杂度的增长,追踪数据的量级也呈指数级增长,给追踪数据的存储和查询带来了巨大的挑战。传统的关系型数据库或者时序数据库往往难以满足追踪数据的高效存储和灵活查询的需求。 BanyanDB是一个专为追踪数据而设计的分布式数据库,它具有高扩展性、高性能、高可用性和高灵活性的特点。BanyanDB采用了基于时间序列的分片策略,将追踪数据按照时间范围划分为多个分片,每个分片可以独立地进行存储、复制和负载均衡。BanyanDB还支持多维索引,可以根据不同的维度对追踪数据进行快速过滤和聚合。 在本次演讲中,我们将介绍BanyanDB的设计思想、架构和实现细节,以及它在实际场景中的应用和效果。我们也将展示BanyanDB与其他数据库的对比和优势,以及它未来的发展方向和计划。\nB站视频地址\n","title":"CommunityOverCode Conference 2023 Asia","url":"/zh/2023-08-20-coc-asia-2023/"},{"content":"SkyWalking PHP 0.7.0 is released. Go to downloads page to find release tars.\nWhat\u0026rsquo;s Changed  Start 0.7.0 development. by @jmjoy in https://github.com/apache/skywalking-php/pull/90 Add more info for error log. by @jmjoy in https://github.com/apache/skywalking-php/pull/91 Fix amqplib and predis argument problems. by @jmjoy in https://github.com/apache/skywalking-php/pull/92 Add Memcache plugin. by @jmjoy in https://github.com/apache/skywalking-php/pull/93 Refactor mysqli plugin, support procedural api. by @jmjoy in https://github.com/apache/skywalking-php/pull/94 Fix target address in cross process header. by @jmjoy in https://github.com/apache/skywalking-php/pull/95 Release SkyWalking PHP 0.7.0 by @jmjoy in https://github.com/apache/skywalking-php/pull/96  Full Changelog: https://github.com/apache/skywalking-php/compare/v0.7.0...v0.7.0\nPECL https://pecl.php.net/package/skywalking_agent/0.7.0\n","title":"Release Apache SkyWalking PHP 0.7.0","url":"/events/release-apache-skywalking-php-0-7-0/"},{"content":"SkyWalking BanyanDB Helm 0.1.0 is released. Go to downloads page to find release tars.\nFeatures  Deploy banyandb with standalone mode by Chart  ","title":"Release Apache SkyWalking BanyanDB Helm 0.1.0","url":"/events/release-apache-skywalking-banyandb-helm-0-1-0/"},{"content":"背景介绍 Arthas 是一款常用的 Java 诊断工具,我们可以在 SkyWalking 监控到服务异常后,通过 Arthas 进一步分析和诊断以快速定位问题。\n在 Arthas 实际使用中,通常由开发人员拷贝或者下载安装包到服务对应的VM或者容器中,attach 到对应的 Java 进程进行问题排查。这一过程不可避免的会造成服务器敏感运维信息的扩散, 而且在分秒必争的问题排查过程中,这些繁琐的操作无疑会浪费大量时间。\nSkyWalking Java Agent 伴随 Java 服务一起启动,并定期上报服务、实例信息给OAP Server。我们可以借助 SkyWalking Java Agent 的插件化能力,开发一个 Arthas 控制插件, 由该插件管理 Arthas 运行生命周期,通过页面化的方式,完成Arthas的启动与停止。最终实现效果可以参考下图:\n要完成上述功能,我们需要实现以下几个关键点:\n 开发 agent arthas-control-plugin,执行 arthas 的启动与停止命令 开发 oap arthas-controller-module ,下发控制命令给 arthas agent plugin 定制 skywalking-ui, 连接 arthas-tunnel-server,发送 arthas 命令并获取执行结果  以上各个模块之间的交互流程如下图所示:\nconnect disconnect 本文涉及的所有代码均已发布在 github skywalking-x-arthas 上,如有需要,大家可以自行下载代码测试。 文章后半部分将主要介绍代码逻辑及其中包含的SkyWalking扩展点。\nagent arthas-control-plugin 首先在 skywalking-java/apm-sniffer/apm-sdk-plugin 下创建一个 arthas-control-plugin, 该模块在打包后会成为 skywalking-agent/plugins 下的一个插件, 其目录结构如下:\narthas-control-plugin/ ├── pom.xml └── src └── main ├── java │ └── org │ └── apache │ └── skywalking │ └── apm │ └── plugin │ └── arthas │ ├── config │ │ └── ArthasConfig.java # 模块配置 │ ├── service │ │ └── CommandListener.java # boot service,监听 oap command │ └── util │ ├── ArthasCtl.java # 控制 arthas 的启动与停止 │ └── ProcessUtils.java ├── proto │ └── ArthasCommandService.proto # 与oap server通信的 grpc 协议定义 └── resources └── META-INF └── services # boot service spi service └── org.apache.skywalking.apm.agent.core.boot.BootService 16 directories, 7 files 在 ArthasConfig.java 中,我们定义了以下配置,这些参数将在 arthas 启动时传递。\n以下的配置可以通过 agent.config 文件、system prop、env variable指定。 关于 skywalking-agent 配置的初始化的具体流程,大家可以参考 SnifferConfigInitializer 。\npublic class ArthasConfig { public static class Plugin { @PluginConfig(root = ArthasConfig.class) public static class Arthas { // arthas 目录  public static String ARTHAS_HOME; // arthas 启动时连接的tunnel server  public static String TUNNEL_SERVER; // arthas 会话超时时间  public static Long SESSION_TIMEOUT; // 禁用的 arthas command  public static String DISABLED_COMMANDS; } } } 接着,我们看下 CommandListener.java 的实现,CommandListener 实现了 BootService 接口, 并通过 resources/META-INF/services 下的文件暴露给 ServiceLoader。\nBootService 的定义如下,共有prepare()、boot()、onComplete()、shutdown()几个方法,这几个方法分别对应插件生命周期的不同阶段。\npublic interface BootService { void prepare() throws Throwable; void boot() throws Throwable; void onComplete() throws Throwable; void shutdown() throws Throwable; default int priority() { return 0; } } 在 ServiceManager 类的 boot() 方法中, 定义了BootService 的 load 与启动流程,该方法 由SkyWalkingAgent 的 premain 调用,在主程序运行前完成初始化与启动:\npublic enum ServiceManager { INSTANCE; ... ... public void boot() { bootedServices = loadAllServices(); prepare(); startup(); onComplete(); } ... ... } 回到我们 CommandListener 的 boot 方法,该方法在 agent 启动之初定义了一个定时任务,这个定时任务会轮询 oap ,查询是否需要启动或者停止arthas:\npublic class CommandListener implements BootService, GRPCChannelListener { ... ... @Override public void boot() throws Throwable { getCommandFuture = Executors.newSingleThreadScheduledExecutor( new DefaultNamedThreadFactory(\u0026#34;CommandListener\u0026#34;) ).scheduleWithFixedDelay( new RunnableWithExceptionProtection( this::getCommand, t -\u0026gt; LOGGER.error(\u0026#34;get arthas command error.\u0026#34;, t) ), 0, 2, TimeUnit.SECONDS ); } ... ... } getCommand方法中定义了start、stop的处理逻辑,分别对应页面上的 connect 和 disconnect 操作。 这两个 command 有分别转给 ArthasCtl 的 startArthas 和 stopArthas 两个方法处理,用来控制 arthas 的启停。\n在 startArthas 方法中,启动arthas-core.jar 并使用 skywalking-agent 的 serviceName 和 instanceName 注册连接至配置文件中指定的arthas-tunnel-server。\nArthasCtl 逻辑参考自 Arthas 的 BootStrap.java ,由于不是本篇文章的重点,这里不再赘述,感兴趣的小伙伴可以自行查看。\nswitch (commandResponse.getCommand()) { case START: if (alreadyAttached()) { LOGGER.warn(\u0026#34;arthas already attached, no need start again\u0026#34;); return; } try { arthasTelnetPort = SocketUtils.findAvailableTcpPort(); ArthasCtl.startArthas(PidUtils.currentLongPid(), arthasTelnetPort); } catch (Exception e) { LOGGER.info(\u0026#34;error when start arthas\u0026#34;, e); } break; case STOP: if (!alreadyAttached()) { LOGGER.warn(\u0026#34;no arthas attached, no need to stop\u0026#34;); return; } try { ArthasCtl.stopArthas(arthasTelnetPort); arthasTelnetPort = null; } catch (Exception e) { LOGGER.info(\u0026#34;error when stop arthas\u0026#34;, e); } break; } 看完 arthas 的启动与停止控制逻辑,我们回到 CommandListener 的 statusChanged 方法, 由于要和 oap 通信,这里我们按照惯例监听 grpc channel 的状态,只有状态正常时才会执行上面的getCommand轮询。\npublic class CommandListener implements BootService, GRPCChannelListener { ... ... @Override public void statusChanged(final GRPCChannelStatus status) { if (GRPCChannelStatus.CONNECTED.equals(status)) { Object channel = ServiceManager.INSTANCE.findService(GRPCChannelManager.class).getChannel(); // DO NOT REMOVE Channel CAST, or it will throw `incompatible types: org.apache.skywalking.apm.dependencies.io.grpc.Channel  // cannot be converted to io.grpc.Channel` exception when compile due to agent core\u0026#39;s shade of grpc dependencies.  commandServiceBlockingStub = ArthasCommandServiceGrpc.newBlockingStub((Channel) channel); } else { commandServiceBlockingStub = null; } this.status = status; } ... ... } 上面的代码,细心的小伙伴可能会发现,getChannel() 的返回值被向上转型成了 Object, 而在下面的 newBlockingStub 方法中,又强制转成了 Channel。\n看似有点多此一举,其实不然,我们将这里的转型去掉,尝试编译就会收到下面的错误:\n[ERROR] Failed to execute goal org.apache.maven.plugins:maven-compiler-plugin:3.10.1:compile (default-compile) on project arthas-control-plugin: Compilation failure [ERROR] .../CommandListener.java:[59,103] 不兼容的类型: org.apache.skywalking.apm.dependencies.io.grpc.Channel无法转换为io.grpc.Channel 上面的错误提示 ServiceManager.INSTANCE.findService(GRPCChannelManager.class).getChannel() 的返回值类型是 org.apache.skywalking.apm.dependencies.io.grpc.Channel,无法被赋值给 io.grpc.Channel 引用。\n我们查看GRPCChannelManager的getChannel()方法代码会发现,方法定义的返回值明明是 io.grpc.Channel,为什么编译时会报上面的错误?\n其实这是skywalking-agent的一个小魔法,由于 agent-core 最终会被打包进 skywalking-agent.jar,启动时由系统类装载器(或者其他父级类装载器)直接装载, 为了防止所依赖的类库和被监控服务的类发生版本冲突,agent 核心代码在打包时使用了maven-shade-plugin, 该插件会在 maven package 阶段改变 grpc 依赖的包名, 我们在源代码里看到的是 io.grpc.Channel,其实在真正运行时已经被改成了 org.apache.skywalking.apm.dependencies.io.grpc.Channel,这便可解释上面编译报错的原因。\n除了grpc以外,其他一些 well-known 的 dependency 也会进行 shade 操作,详情大家可以参考 apm-agent-core pom.xml :\n\u0026lt;plugin\u0026gt; \u0026lt;artifactId\u0026gt;maven-shade-plugin\u0026lt;/artifactId\u0026gt; \u0026lt;executions\u0026gt; \u0026lt;execution\u0026gt; \u0026lt;phase\u0026gt;package\u0026lt;/phase\u0026gt; \u0026lt;goals\u0026gt; \u0026lt;goal\u0026gt;shade\u0026lt;/goal\u0026gt; \u0026lt;/goals\u0026gt; \u0026lt;configuration\u0026gt; ... ... \u0026lt;relocations\u0026gt; \u0026lt;relocation\u0026gt; \u0026lt;pattern\u0026gt;${shade.com.google.source}\u0026lt;/pattern\u0026gt; \u0026lt;shadedPattern\u0026gt;${shade.com.google.target}\u0026lt;/shadedPattern\u0026gt; \u0026lt;/relocation\u0026gt; \u0026lt;relocation\u0026gt; \u0026lt;pattern\u0026gt;${shade.io.grpc.source}\u0026lt;/pattern\u0026gt; \u0026lt;shadedPattern\u0026gt;${shade.io.grpc.target}\u0026lt;/shadedPattern\u0026gt; \u0026lt;/relocation\u0026gt; \u0026lt;relocation\u0026gt; \u0026lt;pattern\u0026gt;${shade.io.netty.source}\u0026lt;/pattern\u0026gt; \u0026lt;shadedPattern\u0026gt;${shade.io.netty.target}\u0026lt;/shadedPattern\u0026gt; \u0026lt;/relocation\u0026gt; \u0026lt;relocation\u0026gt; \u0026lt;pattern\u0026gt;${shade.io.opencensus.source}\u0026lt;/pattern\u0026gt; \u0026lt;shadedPattern\u0026gt;${shade.io.opencensus.target}\u0026lt;/shadedPattern\u0026gt; \u0026lt;/relocation\u0026gt; \u0026lt;relocation\u0026gt; \u0026lt;pattern\u0026gt;${shade.io.perfmark.source}\u0026lt;/pattern\u0026gt; \u0026lt;shadedPattern\u0026gt;${shade.io.perfmark.target}\u0026lt;/shadedPattern\u0026gt; \u0026lt;/relocation\u0026gt; \u0026lt;relocation\u0026gt; \u0026lt;pattern\u0026gt;${shade.org.slf4j.source}\u0026lt;/pattern\u0026gt; \u0026lt;shadedPattern\u0026gt;${shade.org.slf4j.target}\u0026lt;/shadedPattern\u0026gt; \u0026lt;/relocation\u0026gt; \u0026lt;/relocations\u0026gt; ... ... \u0026lt;/configuration\u0026gt; \u0026lt;/execution\u0026gt; \u0026lt;/executions\u0026gt; \u0026lt;/plugin\u0026gt; 除了上面的注意点以外,我们来看一下另一个场景,假设我们需要在 agent plugin 的 interceptor 中使用 plugin 中定义的 BootService 会发生什么?\n我们回到 BootService 的加载逻辑,为了加载到 plugin 中定义的BootService,ServiceLoader 指定了类装载器为AgentClassLoader.getDefault(), (这行代码历史非常悠久,可以追溯到2018年:Allow use SkyWalking plugin to override service in Agent core. #1111 ), 由此可见,plugin 中定义的 BootService 的 classloader 是 AgentClassLoader.getDefault():\nvoid load(List\u0026lt;BootService\u0026gt; allServices) { for (final BootService bootService : ServiceLoader.load(BootService.class, AgentClassLoader.getDefault())) { allServices.add(bootService); } } 再来看下 interceptor 的加载逻辑,InterceptorInstanceLoader.java 的 load 方法规定了如果父加载器相同,plugin 中的 interceptor 将使用一个新创建的 AgentClassLoader (在绝大部分简单场景中,plugin 的 interceptor 都由同一个 AgentClassLoader 加载):\npublic static \u0026lt;T\u0026gt; T load(String className, ClassLoader targetClassLoader) throws IllegalAccessException, InstantiationException, ClassNotFoundException, AgentPackageNotFoundException { ... ... pluginLoader = EXTEND_PLUGIN_CLASSLOADERS.get(targetClassLoader); if (pluginLoader == null) { pluginLoader = new AgentClassLoader(targetClassLoader); EXTEND_PLUGIN_CLASSLOADERS.put(targetClassLoader, pluginLoader); } ... ... } 按照类装载器的委派机制,interceptor 中如果用到了 BootService,也会由当前的类的装载器去装载。 所以 ServiceManager 中装载的 BootService 和 interceptor 装载的 BootService 并不是同一个 (一个 class 文件被不同的 classloader 装载了两次),如果在 interceptor 中 调用 BootService 方法,同样会发生 cast 异常。 由此可见,目前的实现并不支持我们在interceptor中直接调用 plugin 中 BootService 的方法,如果需要调用,只能将 BootService 放到 agent-core 中,由更高级别的类装载器优先装载。\n这其实并不是 skywalking-agent 的问题,skywalking agent plugin 专注于自己的应用场景,只需要关注 trace、meter 以及默认 BootService 的覆盖就可以了。 只是我们如果有扩展 skywalking-agent 的需求,要对其类装载机制做到心中有数,否则可能会出现一些意想不到的问题。\noap arthas-controller-module 看完 agent-plugin 的实现,我们再来看看 oap 部分的修改,oap 同样是模块化的设计,我们可以很轻松的增加一个新的模块,在 /oap-server/ 目录下新建 arthas-controller 子模块:\narthas-controller/ ├── pom.xml └── src └── main ├── java │ └── org │ └── apache │ └── skywalking │ └── oap │ └── arthas │ ├── ArthasControllerModule.java # 模块定义 │ ├── ArthasControllerProvider.java # 模块逻辑实现者 │ ├── CommandQueue.java │ └── handler │ ├── CommandGrpcHandler.java # grpc handler,供 plugin 通信使用 │ └── CommandRestHandler.java # http handler,供 skywalking-ui 通信使用 ├── proto │ └── ArthasCommandService.proto └── resources └── META-INF └── services # 模块及模块实现的 spi service ├── org.apache.skywalking.oap.server.library.module.ModuleDefine └── org.apache.skywalking.oap.server.library.module.ModuleProvider 模块的定义非常简单,只包含一个模块名,由于我们新增的模块并不需要暴露service给其他模块调用,services 我们返回一个空数组\npublic class ArthasControllerModule extends ModuleDefine { public static final String NAME = \u0026#34;arthas-controller\u0026#34;; public ArthasControllerModule() { super(NAME); } @Override public Class\u0026lt;?\u0026gt;[] services() { return new Class[0]; } } 接着是模块实现者,实现者取名为 default,module 指定该 provider 所属模块,由于没有模块的自定义配置,newConfigCreator 我们返回null即可。 start 方法分别向 CoreModule 的 grpc 服务和 http 服务注册了两个 handler,grpc 服务和 http 服务就是我们熟知的 11800 和 12800 端口:\npublic class ArthasControllerProvider extends ModuleProvider { @Override public String name() { return \u0026#34;default\u0026#34;; } @Override public Class\u0026lt;? extends ModuleDefine\u0026gt; module() { return ArthasControllerModule.class; } @Override public ConfigCreator\u0026lt;?\u0026gt; newConfigCreator() { return null; } @Override public void prepare() throws ServiceNotProvidedException { } @Override public void start() throws ServiceNotProvidedException, ModuleStartException { // grpc service for agent  GRPCHandlerRegister grpcService = getManager().find(CoreModule.NAME) .provider() .getService(GRPCHandlerRegister.class); grpcService.addHandler( new CommandGrpcHandler() ); // rest service for ui  HTTPHandlerRegister restService = getManager().find(CoreModule.NAME) .provider() .getService(HTTPHandlerRegister.class); restService.addHandler( new CommandRestHandler(), Collections.singletonList(HttpMethod.POST) ); } @Override public void notifyAfterCompleted() throws ServiceNotProvidedException { } @Override public String[] requiredModules() { return new String[0]; } } 最后在配置文件中注册本模块及模块实现者,下面的配置表示 arthas-controller 这个 module 由 default provider 提供实现:\narthas-controller:selector:defaultdefault:CommandGrpcHandler 和 CommandHttpHandler 的逻辑非常简单,CommandHttpHandler 定义了 connect 和 disconnect 接口, 收到请求后会放到一个 Queue 中供 CommandGrpcHandler 消费,Queue 的实现如下,这里不再赘述:\npublic class CommandQueue { private static final Map\u0026lt;String, Command\u0026gt; COMMANDS = new ConcurrentHashMap\u0026lt;\u0026gt;(); // produce by connect、disconnect public static void produceCommand(String serviceName, String instanceName, Command command) { COMMANDS.put(serviceName + instanceName, command); } // consume by agent getCommand task public static Optional\u0026lt;Command\u0026gt; consumeCommand(String serviceName, String instanceName) { return Optional.ofNullable(COMMANDS.remove(serviceName + instanceName)); } } skywalking-ui arthas console 完成了 agent 和 oap 的开发,我们再看下 ui 部分:\n connect:调用oap server connect 接口,并连接 arthas-tunnel-server disconnect:调用oap server disconnect 接口,并与 arthas-tunnel-server 断开连接 arthas 命令交互,这部分代码主要参考 arthas,大家可以查看 web-ui console 的实现  修改完skywalking-ui的代码后,我们可以直接通过 npm run dev 测试了。\n如果需要通过主项目打包,别忘了在apm-webapp 的 ApplicationStartUp.java 类中添加一条 arthas 的路由:\nServer .builder() .port(port, SessionProtocol.HTTP) .service(\u0026#34;/arthas\u0026#34;, oap) .service(\u0026#34;/graphql\u0026#34;, oap) .service(\u0026#34;/internal/l7check\u0026#34;, HealthCheckService.of()) .service(\u0026#34;/zipkin/config.json\u0026#34;, zipkin) .serviceUnder(\u0026#34;/zipkin/api\u0026#34;, zipkin) .serviceUnder(\u0026#34;/zipkin\u0026#34;, FileService.of( ApplicationStartUp.class.getClassLoader(), \u0026#34;/zipkin-lens\u0026#34;) .orElse(zipkinIndexPage)) .serviceUnder(\u0026#34;/\u0026#34;, FileService.of( ApplicationStartUp.class.getClassLoader(), \u0026#34;/public\u0026#34;) .orElse(indexPage)) .build() .start() .join(); 总结  BootService 启动及停止流程 如何利用 BootService 实现自定义逻辑 Agent Plugin 的类装载机制 maven-shade-plugin 的使用与注意点 如何利用 ModuleDefine 与 ModuleProvider 定义新的模块 如何向 GRPC、HTTP Service 添加新的 handler  如果你还有任何的疑问,欢迎大家与我交流 。\n","title":"将 Apache SkyWalking 与 Arthas 集成","url":"/zh/2023-09-17-integrating-skywalking-with-arthas/"},{"content":"SkyWalking Eyes 0.5.0 is released. Go to downloads page to find release tars.\n feat(header templates): add support for AGPL-3.0 by @elijaholmos in https://github.com/apache/skywalking-eyes/pull/125 Upgrade go version to 1.18 by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/126 Add MulanPSL-2.0 support. by @jmjoy in https://github.com/apache/skywalking-eyes/pull/127 New Header Template: GPL-3.0-or-later by @ddlees in https://github.com/apache/skywalking-eyes/pull/128 Update README.md by @rovast in https://github.com/apache/skywalking-eyes/pull/129 Add more .env.[mode] support for VueJS project by @rovast in https://github.com/apache/skywalking-eyes/pull/130 Docker Multiple Architecture Support :fixes#9089 by @mohammedtabish0 in https://github.com/apache/skywalking-eyes/pull/132 Polish maven test for convenient debug by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/134 feat: list files by git when possible by @tisonkun in https://github.com/apache/skywalking-eyes/pull/133 Switch to npm ci for reliable builds by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/135 Fix optional dependencies are not excluded by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/136 Fix exclude not work for transitive dependencies and add recursive config by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/137 Add some tests for maven resovler by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/138 feat(header-fix): add Svelte support by @elijaholmos in https://github.com/apache/skywalking-eyes/pull/139 dep: do not write license files if they already exist by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/140 fix: not ignore *.txt to make sure files like CMakeLists.txt can be checked by @acelyc111 in https://github.com/apache/skywalking-eyes/pull/141 fix license header normalizer by @xiaoyawei in https://github.com/apache/skywalking-eyes/pull/142 Substitute variables in license content for header command by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/143 Correct indent in Apache-2.0 template by @tisonkun in https://github.com/apache/skywalking-eyes/pull/144 Add copyright-year configuration by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/145 dep/maven: use output file to store the dep tree for cleaner result by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/146 dep/maven: resolve dependencies before analysis by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/147 gha: switch to composite running mode and set up cache by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/149 gha: switch to composite running mode and set up cache by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/150 Fix GitHub Actions wrong path by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/151 Normalize license for cargo. by @jmjoy in https://github.com/apache/skywalking-eyes/pull/153 Remove space characters in license for cargo. by @jmjoy in https://github.com/apache/skywalking-eyes/pull/154 Bump up dependencies to fix CVE by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/155 Bump up GHA to depress warnings by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/156 Leverage the built-in cache in setup-go@v4 by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/157 Dependencies check should report unknown licneses by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/158 Fix wrong indentation in doc by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/159 Add EPL-2.0 header template by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/160 Fix wrong indentation in doc about multi license config by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/161 dependency resolve with default template and specified output of license by @crholm in https://github.com/apache/skywalking-eyes/pull/163 Bump up go git to support .gitconfig user path by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/164 Draft release notes for 0.5.0 by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/165 Remove \u0026ldquo;portions copyright\u0026rdquo; header normalizer by @antgamdia in https://github.com/apache/skywalking-eyes/pull/166  Full Changelog: https://github.com/apache/skywalking-eyes/compare/v0.4.0...v0.5.0\n","title":"Release Apache SkyWalking Eyes 0.5.0","url":"/events/release-apache-skywalking-eyes-0-5-0/"},{"content":"Abstract Apache SkyWalking hosts SkyWalking Summit 2023 on Nov. 4th, 2023, UTC+8, sponsored by ZMOps and Tetrate.\nWe are going to share SkyWalking\u0026rsquo;s roadmap, features, product experiences, and open-source culture.\nWelcome to join us.\nVenue Addr./地址 上海大华虹桥假日酒店\nDate 8:00 - 17:00, Nov 4th.\nRegister Register for IN-PERSON ticket\nCall For Proposals (CFP) The Call For Proposals open from now to 18:00 on Oct. 27th 2023, UTC+8. Submit your proposal at here\nWe have 1 open session and 8 sessions for the whole event.\n Open session is reserved for SkyWalking PMC members. 6 sessions are opened for CFP process. 2 sessions are reserved for sponsors.  Sponsors  ZMOps Inc. Tetrate Inc.  Anti-harassment policy SkyWalkingDay is dedicated to providing a harassment-free experience for everyone. We do not tolerate harassment of participants in any form. Sexual language and imagery will also not be tolerated in any event venue. Participants violating these rules may be sanctioned or expelled without a refund, at the discretion of the event organizers. Our anti-harassment policy can be found at Apache website.\nContact Us Send mail to dev@skywalking.apache.org.\n","title":"SkyWalking Summit 2023 @ Shanghai China","url":"/events/summit-23-cn/"},{"content":"SkyWalking 9.6.0 is released. Go to downloads page to find release tars.\nNew Alerting Kernel  MQE(Metrics Query Expression) and a new notification mechanism are supported.  Support Loki LogQL  Newly added support for Loki LogQL and Grafana Loki Dashboard for SkyWalking collected logs  WARNING  ElasticSearch 6 storage relative tests are removed. It worked and is not promised due to end of life officially.  Project  Bump up Guava to 32.0.1 to avoid the lib listed as vulnerable due to CVE-2020-8908. This API is never used. Maven artifact skywalking-log-recevier-plugin is renamed to skywalking-log-receiver-plugin. Bump up cli version 0.11 to 0.12. Bump up the version of ASF parent pom to v30. Make builds reproducible for automatic releases CI.  OAP Server  Add Neo4j component ID(112) language: Python. Add Istio ServiceEntry registry to resolve unknown IPs in ALS. Wrap deleteProperty API to the BanyanDBStorageClient. [Breaking change] Remove matchedCounter from HttpUriRecognitionService#feedRawData. Remove patterns from HttpUriRecognitionService#feedRawData and add max 10 candidates of raw URIs for each pattern. Add component ID for WebSphere. Fix AI Pipeline uri caching NullPointer and IllegalArgument Exceptions. Fix NPE in metrics query when the metric is not exist. Remove E2E tests for Istio \u0026lt; 1.15, ElasticSearch \u0026lt; 7.16.3, they might still work but are not supported as planed. Scroll all results in ElasticSearch storage and refactor scrolling logics, including Service, Instance, Endpoint, Process, etc. Improve Kubernetes coordinator to remove Terminating OAP Pods in cluster. Support SW_CORE_SYNC_PERIOD_HTTP_URI_RECOGNITION_PATTERN and SW_CORE_TRAINING_PERIOD_HTTP_URI_RECOGNITION_PATTERN to control the period of training and sync HTTP URI recognition patterns. And shorten the default period to 10s for sync and 60s for training. Fix ElasticSearch scroller bug. Add component ID for Aerospike(ID=149). Packages with name recevier are renamed to receiver. BanyanDBMetricsDAO handles storeIDTag in multiGet for BanyanDBModelExtension. Fix endpoint grouping-related logic and enhance the performance of PatternTree retrieval. Fix metric session cache saving after batch insert when using mysql-connector-java. Support dynamic UI menu query. Add comment for docker/.env to explain the usage. Fix wrong environment variable name SW_OTEL_RECEIVER_ENABLED_OTEL_RULES to right SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES. Fix instance query in JDBC implementation. Set the SW_QUERY_MAX_QUERY_COMPLEXITY default value to 3000(was 1000). Accept length=4000 parameter value of the event. It was 2000. Tolerate parameter value in illegal JSON format. Update BanyanDB Java Client to 0.4.0 Support aggregate Labeled Value Metrics in MQE. [Breaking change] Change the default label name in MQE from label to _. Bump up grpc version to 1.53.0. [Breaking change] Removed \u0026lsquo;\u0026amp;\u0026rsquo; symbols from shell scripts to avoid OAP server process running as a background process. Revert part of #10616 to fix the unexpected changes: if there is no data we should return an array with 0s, but in #10616, an empty array is returned. Cache all service entity in memory for query. Bump up jackson version to 2.15.2. Increase the default memory size to avoid OOM. Bump up graphql-java to 21.0. Add Echo component ID(5015) language: Golang. Fix index out of bounds exception in aggregate_labels MQE function. Support MongoDB Server/Cluster monitoring powered by OTEL. Do not print configurations values in logs to avoid sensitive info leaked. Move created the latest index before retrieval indexes by aliases to avoid the 404 exception. This just prevents some interference from manual operations. Add more Go VM metrics, as new skywalking-go agent provided since its 0.2 release. Add component ID for Lock (ID=5016). [Breaking change] Adjust the structure of hooks in the alarm-settings.yml. Support multiple configs for each hook types and specifying the hooks in the alarm rule. Bump up Armeria to 1.24.3. Fix BooleanMatch and BooleanNotEqualMatch doing Boolean comparison. Support LogQL HTTP query APIs. Add Mux Server component ID(5017) language: Golang. Remove ElasticSearch 6.3.2 from our client lib tests. Bump up ElasticSearch server 8.8.1 to 8.9.0 for latest e2e testing. 8.1.0, 7.16.3 and 7.17.10 are still tested. Add OpenSearch 2.8.0 to our client lib tests. Use listening mode for apollo implementation of dynamic configuration. Add view_as_seq function in MQE for listing metrics in the given prioritized sequence. Fix the wrong default value of k8sServiceNameRule if it\u0026rsquo;s not explicitly set. Improve PromQL to allow for multiple metric operations within a single query. Fix MQE Binary Operation between labeled metrics and other type of value result. Add component ID for Nacos (ID=150). Support Compare Operation in MQE. Fix the Kubernetes resource cache not refreshed. Fix wrong classpath that might cause OOM in startup. Enhance the serviceRelation in MAL by adding settings for the delimiter and component fields. [Breaking change] Support MQE in the Alerting. The Alarm Rules configuration(alarm-settings.yml), add expression field and remove metrics-name/count/threshold/op/only-as-condition fields and remove composite-rules configuration. Check results in ALS as per downstream/upstream instead of per log. Fix GraphQL query listInstances not using endTime query Do not start server and Kafka consumer in init mode. Add Iris component ID(5018). Add OTLP Tracing support as a Zipkin trace input.  UI  Fix metric name browser_app_error_rate in Browser-Root dashboard. Fix display name of endpoint_cpm for endpoint list in General-Service dashboard. Implement customize menus and marketplace page. Fix minTraceDuration and maxTraceDuration types. Fix init minTime to Infinity. Bump dependencies to fix vulnerabilities. Add scss variables. Fix the title of instance list and notices in the continue profiling. Add a link to explain the expression metric, add units in the continue profiling widget. Calculate string width to set Tabs name width. [Breaking change] Removed \u0026lsquo;\u0026amp;\u0026rsquo; symbols from shell scripts to avoid web application server process running as a background process. Reset chart label. Fix service associates instances. Remove node-sass. Fix commit error on Windows. Apply MQE on MYSQL, POSTGRESQL, REDIS, ELASTICSEARCH and DYNAMODB layer UI-templates. Apply MQE on Virtual-Cache layer UI-templates Apply MQE on APISIX, AWS_EKS, AWS_GATEWAY and AWS_S3 layer UI templates. Apply MQE on RabbitMQ Dashboards. Apply MQE on Virtual-MQ layer UI-templates Apply MQE on Infra-Linux layer UI-templates Apply MQE on Infra-Windows layer UI-templates Apply MQE on Browser layer UI-templates. Implement MQE on topology widget. Fix getEndpoints keyword blank. Implement a breadcrumb component as navigation.  Documentation  Add Go agent into the server agent documentation. Add data unit description in the configuration of continuous profiling policy. Remove storage extension doc, as it is expired. Remove how to add menu doc, as SkyWalking supports marketplace and new backend-based setup. Separate contribution docs to a new menu structure. Add a doc to explain how to manage i18n. Add a doc to explain OTLP Trace support. Fix typo in dynamic-config-configmap.md. Fix out-dated docs about Kafka fetcher. Remove 3rd part fetchers from the docs, as they are not maintained anymore.  All issues and pull requests are here\n","title":"Release Apache SkyWalking APM 9.6.0","url":"/events/release-apache-skywalking-apm-9.6.0/"},{"content":"SkyWalking Java Agent 9.0.0 is released. Go to downloads page to find release tars. Changes by Version\n9.0.0 Kernel Updates  Support re-transform/hot-swap classes with other java agents, and remove the obsolete cache enhanced class feature. Implement new naming policies for names of auxiliary type, interceptor delegate field, renamed origin method, method access name, method cache value field. All names are under sw$ name trait. They are predictable and unchanged after re-transform.  * SWAuxiliaryTypeNamingStrategy Auxiliary type name pattern: \u0026lt;origin_class_name\u0026gt;$\u0026lt;name_trait\u0026gt;$auxiliary$\u0026lt;auxiliary_type_instance_hash\u0026gt; * DelegateNamingResolver Interceptor delegate field name pattern: \u0026lt;name_trait\u0026gt;$delegate$\u0026lt;class_name_hash\u0026gt;$\u0026lt;plugin_define_hash\u0026gt;$\u0026lt;intercept_point_hash\u0026gt; * SWMethodNameTransformer Renamed origin method pattern: \u0026lt;name_trait\u0026gt;$original$\u0026lt;method_name\u0026gt;$\u0026lt;method_description_hash\u0026gt; * SWImplementationContextFactory Method cache value field pattern: cachedValue$\u0026lt;name_trait\u0026gt;$\u0026lt;origin_class_name_hash\u0026gt;$\u0026lt;field_value_hash\u0026gt; Accessor method name pattern: \u0026lt;renamed_origin_method\u0026gt;$accessor$\u0026lt;name_trait\u0026gt;$\u0026lt;origin_class_name_hash\u0026gt; Here is an example of manipulated enhanced class with new naming policies of auxiliary classes, fields, and methods\nimport sample.mybatis.controller.HotelController$sw$auxiliary$19cja42; import sample.mybatis.controller.HotelController$sw$auxiliary$p257su0; import sample.mybatis.domain.Hotel; import sample.mybatis.service.HotelService; @RequestMapping(value={\u0026#34;/hotel\u0026#34;}) @RestController public class HotelController implements EnhancedInstance { @Autowired @lazy private HotelService hotelService; private volatile Object _$EnhancedClassField_ws; // Interceptor delegate fields  public static volatile /* synthetic */ InstMethodsInter sw$delegate$td03673$ain2do0$8im5jm1; public static volatile /* synthetic */ InstMethodsInter sw$delegate$td03673$ain2do0$edkmf61; public static volatile /* synthetic */ ConstructorInter sw$delegate$td03673$ain2do0$qs9unv1; public static volatile /* synthetic */ InstMethodsInter sw$delegate$td03673$fl4lnk1$m3ia3a2; public static volatile /* synthetic */ InstMethodsInter sw$delegate$td03673$fl4lnk1$sufrvp1; public static volatile /* synthetic */ ConstructorInter sw$delegate$td03673$fl4lnk1$cteu7s1; // Origin method cache value field  private static final /* synthetic */ Method cachedValue$sw$td03673$g5sobj1; public HotelController() { this(null); sw$delegate$td03673$ain2do0$qs9unv1.intercept(this, new Object[0]); } private /* synthetic */ HotelController(sw.auxiliary.p257su0 p257su02) { } @GetMapping(value={\u0026#34;city/{cityId}\u0026#34;}) public Hotel selectByCityId(@PathVariable(value=\u0026#34;cityId\u0026#34;) int n) { // call interceptor with auxiliary type and parameters and origin method object  return (Hotel)sw$delegate$td03673$ain2do0$8im5jm1.intercept(this, new Object[]{n}, new HotelController$sw$auxiliary$19cja42(this, n), cachedValue$sw$td03673$g5sobj1); } // Renamed origin method  private /* synthetic */ Hotel sw$origin$selectByCityId$a8458p3(int cityId) { /*22*/ return this.hotelService.selectByCityId(cityId); } // Accessor of renamed origin method, calling from auxiliary type  final /* synthetic */ Hotel sw$origin$selectByCityId$a8458p3$accessor$sw$td03673(int n) { // Calling renamed origin method  return this.sw$origin$selectByCityId$a8458p3(n); } @OverRide public Object getSkyWalkingDynamicField() { return this._$EnhancedClassField_ws; } @OverRide public void setSkyWalkingDynamicField(Object object) { this._$EnhancedClassField_ws = object; } static { ClassLoader.getSystemClassLoader().loadClass(\u0026#34;org.apache.skywalking.apm.dependencies.net.bytebuddy.dynamic.Nexus\u0026#34;).getMethod(\u0026#34;initialize\u0026#34;, Class.class, Integer.TYPE).invoke(null, HotelController.class, -1072476370); // Method object  cachedValue$sw$td03673$g5sobj1 = HotelController.class.getMethod(\u0026#34;selectByCityId\u0026#34;, Integer.TYPE); } } Auxiliary type of Constructor :\nclass HotelController$sw$auxiliary$p257su0 { } Auxiliary type of selectByCityId method:\nclass HotelController$sw$auxiliary$19cja42 implements Runnable, Callable { private HotelController argument0; private int argument1; public Object call() throws Exception { return this.argument0.sw$origin$selectByCityId$a8458p3$accessor$sw$td03673(this.argument1); } @OverRide public void run() { this.argument0.sw$origin$selectByCityId$a8458p3$accessor$sw$td03673(this.argument1); } HotelController$sw$auxiliary$19cja42(HotelController hotelController, int n) { this.argument0 = hotelController; this.argument1 = n; } } Features and Bug Fixes  Support Jdk17 ZGC metric collect Support Jetty 11.x plugin Support access to the sky-walking tracer context in spring gateway filter Fix the scenario of using the HBase plugin with spring-data-hadoop. Add RocketMQ 5.x plugin Fix the conflict between the logging kernel and the JDK threadpool plugin. Fix the thread safety bug of finishing operation for the span named \u0026ldquo;SpringCloudGateway/sendRequest\u0026rdquo; Fix NPE in guava-eventbus-plugin. Add WebSphere Liberty 23.x plugin Add Plugin to support aerospike Java client Add ClickHouse parsing to the jdbc-common plugin. Support to trace redisson lock Upgrade netty-codec-http2 to 4.1.94.Final Upgrade guava to 32.0.1 Fix issue with duplicate enhancement by ThreadPoolExecutor Add plugin to support for RESTeasy 6.x. Fix the conditions for resetting UUID, avoid the same uuid causing the configuration not to be updated. Fix witness class in springmvc-annotation-5.x-plugin to avoid falling into v3 use cases. Fix Jedis-2.x plugin bug and add test for Redis cluster scene Merge two instrumentation classes to avoid duplicate enhancements in MySQL plugins. Support asynchronous invocation in jetty client 9.0 and 9.x plugin Add nacos-client 2.x plugin Staticize the tags for preventing synchronization in JDK 8 Add RocketMQ-Client-Java 5.x plugin Fix NullPointerException in lettuce-5.x-plugin.  All issues and pull requests are here\n","title":"Release Apache SkyWalking Java Agent 9.0.0","url":"/events/release-apache-skywalking-java-agent-9-0-0/"},{"content":"SkyWalking PHP 0.6.0 is released. Go to downloads page to find release tars.\nWhat\u0026rsquo;s Changed  Polish doc about Swoole by @wu-sheng in https://github.com/apache/skywalking-php/pull/73 Start 0.6.0 development. by @jmjoy in https://github.com/apache/skywalking-php/pull/74 Fix hook for Doctrine PDO class by @matikij in https://github.com/apache/skywalking-php/pull/76 Log Exception in tracing span when throw. by @jmjoy in https://github.com/apache/skywalking-php/pull/75 Upgrade dependencies and adapt. by @jmjoy in https://github.com/apache/skywalking-php/pull/77 Fix required rust version and add runing php-fpm notice in docs. by @jmjoy in https://github.com/apache/skywalking-php/pull/78 Bump openssl from 0.10.48 to 0.10.55 by @dependabot in https://github.com/apache/skywalking-php/pull/79 Fix the situation where the redis port is string. by @jmjoy in https://github.com/apache/skywalking-php/pull/80 Optionally enable zend observer api for auto instrumentation. by @jmjoy in https://github.com/apache/skywalking-php/pull/81 Fix the empty span situation in redis after hook. by @jmjoy in https://github.com/apache/skywalking-php/pull/82 Add mongodb pluhgin. by @jmjoy in https://github.com/apache/skywalking-php/pull/83 Update rust nightly toolchain in CI and format. by @jmjoy in https://github.com/apache/skywalking-php/pull/84 Add notice document for skywalking_agent.enable. by @jmjoy in https://github.com/apache/skywalking-php/pull/85 Upgrade dependencies. by @jmjoy in https://github.com/apache/skywalking-php/pull/86 Fix docs by @heyanlong in https://github.com/apache/skywalking-php/pull/87 Add kafka reporter. by @jmjoy in https://github.com/apache/skywalking-php/pull/88 Release SkyWalking PHP Agent 0.6.0 by @jmjoy in https://github.com/apache/skywalking-php/pull/89  New Contributors  @matikij made their first contribution in https://github.com/apache/skywalking-php/pull/76  Full Changelog: https://github.com/apache/skywalking-php/compare/v0.5.0...v0.6.0\nPECL https://pecl.php.net/package/skywalking_agent/0.6.0\n","title":"Release Apache SkyWalking PHP 0.6.0","url":"/events/release-apache-skwaylking-php-0-6-0/"},{"content":"On Aug. 10th, 2023, HashiCorp announced to adopt the Business Source License (BSL) from Mozilla Public License v2.0 (MPL 2.0), here is their post. They officially annouced they have changed the license for the ALL of their open-source products from the previous MPL 2.0 to a source-available license, BSL 1.1. Meanwhile, HashiCorp APIs, SDKs, and almost all other libraries will remain MPL 2.0.\nHashiCorp Inc. is one of the most important vendors in the cloud-native landscape, as well as Golang ecosystem. This kind of changes would have potential implications for SkyWalking, which is closely integrated with cloud-native technology stacks.\nConclusion First  What does that mean for SkyWalking users?  SkyWalking community has evaluated our dependencies from HashiCorp products and libraries, the current conclusion is\nSkyWalking users would NOT suffer any implication. All components of SkyWalking don\u0026rsquo;t have hard-dependency on BSL license affected codes.\nSkyWalking community have found out all following dependencies of all relative repositories, all licenses are TRUELY stayed unchanged, and compatible with Apache 2.0 License.\n OAP Server @kezhenxu94 @wu-sheng  consul-client Apache 2.0 Repo archived on Jul 27, 2023   BanyanDB @hanahmily @lujiajing1126  Server @hanahmily  hashicorp/golang-lru MPL-2.0 hashicorp/hcl MPL-2.0   CLI @hanahmily No HashiCorp Dependency   SkyWalking OAP CLI @kezhenxu94  github.com/hashicorp/hcl v1.0.0 MPL-2.0 All under swck as transitive dependencies   SWCK @hanahmily  hashicorp/consul/api MPL-2.0 hashicorp/consul/sdk MPL-2.0 hashicorp/errwrap MPL-2.0 hashicorp/go-cleanhttp MPL-2.0 hashicorp/go-immutable-radix MPL-2.0 hashicorp/go-msgpack MIT hashicorp/go-multierror MPL-2.0 hashicorp/go-rootcerts MPL-2.0 hashicorp/go-sockaddr MPL-2.0 hashicorp/go-syslog MIT hashicorp/go-uuid MPL-2.0 hashicorp/go.net BSD-3 hashicorp/golang-lru MPL-2.0 hashicorp/hcl MPL-2.0 hashicorp/logutils MPL-2.0 hashicorp/mdns MIT hashicorp/memberlist MPL-2.0 hashicorp/serf MPL-2.0   Go agent @mrproliu  hashicorp/consul/api MPL-2.0 hashicorp/consul/sdk MPL-2.0 hashicorp/errwrap MPL-2.0 hashicorp/go-cleanhttp MPL-2.0 hashicorp/go-hclog MIT hashicorp/go-immutable-radix MPL-2.0 hashicorp/go-kms-wrapping/entropy MPL-2.0 hashicorp/go-kms-wrapping/entropy/v2 MPL-2.0 hashicorp/go-msgpack MIT hashicorp/go-multierror MPL-2.0 hashicorp/go-plugin MPL-2.0 hashicorp/go-retryablehttp MPL-2.0 hashicorp/go-rootcerts MPL-2.0 hashicorp/go-secure-stdlib/base62 MPL-2.0 hashicorp/go-secure-stdlib/mlock MPL-2.0 hashicorp/go-secure-stdlib/parseutil MPL-2.0 hashicorp/go-secure-stdlib/password MPL-2.0 hashicorp/go-secure-stdlib/tlsutil MPL-2.0 hashicorp/go-sockaddr MPL-2.0 hashicorp/go-syslog MIT hashicorp/go-uuid MPL-2.0 hashicorp/go-version MPL-2.0 hashicorp/go.net BSD-3-Clause hashicorp/golang-lru MPL-2.0 hashicorp/logutils MPL-2.0 hashicorp/mdns MIT hashicorp/memberlist MPL-2.0 hashicorp/serf MPL-2.0 hashicorp/vault/api MPL-2.0 hashicorp/vault/sdk MPL-2.0 hashicorp/yamux MPL-2.0   SkyWalking eyes @kezhenxu94  none   SkyWalking Infra e2e @kezhenxu94  all under swck as transitive dependencies   SkyWalking rover(ebpf agent) @mrproliu  hashicorp/consul/api MPL-2.0 hashicorp/consul/sdk MPL-2.0 hashicorp/errwrap MPL-2.0 hashicorp/go-cleanhttp MPL-2.0 hashicorp/go-hclog MIT hashicorp/go-immutable-radix MPL-2.0 hashicorp/go-msgpack MIT hashicorp/go-multierror MPL-2.0 hashicorp/go-retryablehttp MPL-2.0 hashicorp/go-rootcerts MPL-2.0 hashicorp/go-sockaddr MPL-2.0 hashicorp/go-syslog MIT hashicorp/go-uuid MPL-2.0 hashicorp/golang-lru MPL-2.0 hashicorp/hcl MPL-2.0 hashicorp/logutils MPL-2.0 hashicorp/mdns MIT hashicorp/memberlist MPL-2.0 hashicorp/serf MPL-2.0   SkyWalking satellite @mrproliu  hashicorp/consul/api MPL-2.0 hashicorp/consul/sdk MPL-2.0 hashicorp/errwrap MPL-2.0 hashicorp/go-cleanhttp MPL-2.0 hashicorp/go-immutable-radix MPL-2.0 hashicorp/go-msgpack MIT hashicorp/go-multierror MPL-2.0 hashicorp/go-rootcerts MPL-2.0 hashicorp/go-sockaddr MPL-2.0 hashicorp/go-syslog MIT hashicorp/go-uuid MPL-2.0 hashicorp/go.net BSD-3-Clause hashicorp/golang-lru MPL-2.0 hashicorp/hcl MPL-2.0 hashicorp/logutils MPL-2.0 hashicorp/mdns MIT hashicorp/memberlist MPL-2.0 hashicorp/serf MPL-2.0   SkyWalking Terraform (scripts) @kezhenxu94  No HashiCorp Dependency The scripts for Terraform users only. No hard requirement.    The GitHub ID is listed about the PMC members did the evaluations.\nFAQ If I am using Consul to manage SkyWalking Cluster or configurations, does this license change bring an implication? YES, anyone using their server sides would be affected once you upgrade to later released versions after Aug. 10th, 2023.\nThis is HashiCorp\u0026rsquo;s statement\n End users can continue to copy, modify, and redistribute the code for all non-commercial and commercial use, except where providing a competitive offering to HashiCorp. Partners can continue to build integrations for our joint customers. We will continue to work closely with the cloud service providers to ensure deep support for our mutual technologies. Customers of enterprise and cloud-managed HashiCorp products will see no change as well. Vendors who provide competitive services built on our community products will no longer be able to incorporate future releases, bug fixes, or security patches contributed to our products.\n So, notice that, the implication about whether voilating BSL 1.1 is determined by the HashiCorp Inc about the status of the identified competitive relationship. We can\u0026rsquo;t provide any suggestions. Please refer to FAQs and contacts for the official explanations.\nWill SkyWalking continoue to use HashiCorp Consul as an optional cluster coordinator and/or an optional dynamic configuration server? For short term, YES, we will keep that part of codes, as the licenses of the SDK and the APIs are still in the MPL 2.0.\nBut, during the evaluation, we noticed the consul client we are using is rickfast/consul-client which had been archived by the owner on Jul 27, 2023. So, we are facing the issues that no maintaining and no version to upgrade. If there is not a new consul Java client lib available, we may have to remove this to avoid CVEs or version incompatible with new released servers.\n","title":"The Statement for SkyWalking users on HashiCorp license changes","url":"/blog/2023-08-13-hashicorp-bsl/"},{"content":"SkyWalking Rust 0.8.0 is released. Go to downloads page to find release tars.\nWhat\u0026rsquo;s Changed  Add kafka reporter. by @jmjoy in https://github.com/apache/skywalking-rust/pull/61 Rename AbstractSpan to HandleSpanObject. by @jmjoy in https://github.com/apache/skywalking-rust/pull/62 Bump to 0.8.0. by @jmjoy in https://github.com/apache/skywalking-rust/pull/63  ","title":"Release Apache SkyWalking Rust 0.8.0","url":"/events/release-apache-skywalking-rust-0-8-0/"},{"content":"SkyWalking Cloud on Kubernetes 0.8.0 is released. Go to downloads page to find release tars.\nFeatures  [Breaking Change] Remove the way to configure the agent through Configmap.  Bugs  Fix errors in banyandb e2e test.  Chores  Bump up golang to v1.20. Bump up golangci-lint to v1.53.3. Bump up skywalking-java-agent to v8.16.0. Bump up kustomize to v4.5.6. Bump up SkyWalking OAP to 9.5.0.  ","title":"Release Apache SkyWalking Cloud on Kubernetes 0.8.0","url":"/events/release-apache-skywalking-cloud-on-kubernetes-0-8-0/"},{"content":"Announcing Apache SkyWalking Go 0.2.0 I\u0026rsquo;m excited to announce the release of Apache SkyWalking Go 0.2.0! This version packs several awesome new features that I\u0026rsquo;ll overview below.\nLog Reporting The log reporting feature allows the Go agent to automatically collect log content from supported logging frameworks like logrus and zap. The logs are organized and sent to the SkyWalking backend for visualization. You can see how the logs appear for each service in the SkyWalking UI:\nMaking Logs Searchable You can configure certain log fields to make them searchable in SkyWalking. Set the SW_AGENT_LOG_REPORTER_LABEL_KEYS environment variable to include additional fields beyond the default log level.\nFor example, with logrus:\n# define log with fields logrus.WithField(\u0026#34;module\u0026#34;, \u0026#34;test-service\u0026#34;).Info(\u0026#34;test log\u0026#34;) Metrics Reporting The agent can now collect and report custom metrics data from runtime/metrics to the backend. Supported metrics are documented here.\nAutomatic Instrumentation In 0.1.0, you had to manually integrate the agent into your apps. Now, the new commands can automatically analyze and instrument projects at a specified path, no code changes needed! Try using the following command to import skywalking-go into your project:\n# inject to project at current path skywalking-go-agent -inject=./ -all Or you can still use the original manual approach if preferred.\nGet It Now! Check out the CHANGELOG for the full list of additions and fixes. I encourage you to try out SkyWalking Go 0.2.0 today! Let me know if you have any feedback.\n","title":"New Features of SkyWalking Go 0.2.0","url":"/blog/2023-07-31-skywalking-go-0.2.0-release/"},{"content":"SkyWalking Go 0.2.0 is released. Go to downloads page to find release tars.\nFeatures  Enhance the plugin rewrite ability to support switch and if/else in the plugin codes. Support inject the skywalking-go into project through agent. Support add configuration for plugin. Support metrics report API for plugin. Support report Golang runtime metrics. Support log reporter. Enhance the logrus logger plugin to support adapt without any settings method invoke. Disable sending observing data if the gRPC connection is not established for reducing the connection error log. Support enhance vendor management project. Support using base docker image to building the application.  Plugins  Support go-redis v9 redis client framework. Support collecting Native HTTP URI parameter on server side. Support Mongo database client framework. Support Native SQL database client framework with MySQL Driver. Support Logrus log report to the backend. Support Zap log report to the backend.  Documentation  Combine Supported Libraries and Performance Test into Plugins section. Add Tracing, Metrics and Logging document into Plugins section.  Bug Fixes  Fix throw panic when log the tracing context before agent core initialized. Fix plugin version matcher tryToFindThePluginVersion to support capital letters in module paths and versions.  Issues and PR  All issues are here All and pull requests are here  ","title":"Release Apache SkyWalking Go 0.2.0","url":"/events/release-apache-skwaylking-go-0.2.0/"},{"content":"今年 COSCUP 2023 在国立台湾科技大学举办。 COSCUP 是由台湾开放原始码社群联合推动的年度研讨会,起源于2006年,是台湾自由软体运动 (FOSSM) 重要的推动者之一。活动包括有讲座、摊位、社团同乐会等,除了邀请国际的重量级演讲者之外,台湾本土的自由软体推动者也经常在此发表演说,会议的发起人、工作人员与演讲者都是志愿参与的志工。COSCUP 的宗旨在于提供一个连接开放原始码开发者、使用者与推广者的平台。希望借由每年一度的研讨会来推动自由及开放原始码软体 (FLOSS)。由于有许多赞助商及热心捐助者,所有议程都是免费参加。\n在Go语言中使用自动增强探针完成链路追踪以及监控 B站视频地址\n刘晗,Tetrate\n  讲师介绍 刘晗,Tetrate 工程师,Apache SkyWalking PMC 成员,专注于应用性能可观测性领域。\n  议题概要\n   为什么需要自动增强探针 Go Agent演示 实现原理 未来展望  ","title":"[视频] 在Go语言中使用自动增强探针完成链路追踪以及监控 - COSCUP Taiwan 2023","url":"/zh/2023-07-30-complete-auto-instrumentation-go-agent-for-distributed-tracing-and-monitoring/"},{"content":"SkyWalking Kubernetes Helm Chart 4.5.0 is released. Go to downloads page to find release tars.\n Add helm chart for swck v0.7.0. Add pprof port export in satellite. Trunc the resource name in swck\u0026rsquo;s helm chart to no more than 63 characters. Adding the configmap into cluster role for oap init mode. Add config to set Pod securityContext. Keep the job name prefix the same as OAP Deployment name. Use startup probe option for first initialization of application Allow setting env for UI deployment. Add Istio ServiceEntry permissions.  ","title":"Release Apache SkyWalking Kubernetes Helm Chart 4.5.0","url":"/events/release-apache-skywalking-kubernetes-helm-chart-4.5.0/"},{"content":"SkyWalking BanyanDB 0.4.0 is released. Go to downloads page to find release tars.\nFeatures  Add TSDB concept document. [UI] Add YAML editor for inputting query criteria. Refactor TopN to support NULL group while keeping seriesID from the source measure. Add a sharded buffer to TSDB to replace Badger\u0026rsquo;s memtable. Badger KV only provides SST. Add a meter system to control the internal metrics. Add multiple metrics for measuring the storage subsystem. Refactor callback of TopNAggregation schema event to avoid deadlock and reload issue. Fix max ModRevision computation with inclusion of TopNAggregation Enhance meter performance Reduce logger creation frequency Add units to memory flags Introduce TSTable to customize the block\u0026rsquo;s structure Add /system endpoint to the monitoring server that displays a list of nodes' system information. Enhance the liaison module by implementing access logging. Add the Istio scenario stress test based on the data generated by the integration access log. Generalize the index\u0026rsquo;s docID to uint64. Remove redundant ID tag type. Improve granularity of index in measure by leveling up from data point to series. [UI] Add measure CRUD operations. [UI] Add indexRule CRUD operations. [UI] Add indexRuleBinding CRUD operations.  Bugs  Fix iterator leaks and ensure proper closure and introduce a closer to guarantee all iterators are closed Fix resource corrupts caused by update indexRule operation Set the maximum integer as the limit for aggregation or grouping operations when performing aggregation or grouping operations in a query plan.  Chores  Bump go to 1.20. Set KV\u0026rsquo;s minimum memtable size to 8MB [docs] Fix docs crud examples error Modified TestGoVersion to check for CPU architecture and Go Version Bump node to 18.16  ","title":"Release Apache SkyWalking BanyanDB 0.4.0","url":"/events/release-apache-skywalking-banyandb-0-4-0/"},{"content":"Background In previous articles, We have discussed how to use SkyWalking and eBPF for performance problem detection within processes and networks. They are good methods to locate issues, but still there are some challenges:\n The timing of the task initiation: It\u0026rsquo;s always challenging to address the processes that require performance monitoring when problems occur. Typically, manual engagement is required to identify processes and the types of performance analysis necessary, which cause extra time during the crash recovery. The root cause locating and the time of crash recovery conflict with each other from time to time. In the real case, rebooting would be the first choice of recovery, meanwhile, it destroys the site of crashing. Resource consumption of tasks: The difficulties to determine the profiling scope. Wider profiling causes more resources than it should. We need a method to manage resource consumption and understand which processes necessitate performance analysis. Engineer capabilities: On-call is usually covered by the whole team, which have junior and senior engineers, even senior engineers have their understanding limitation of the complex distributed system, it is nearly impossible to understand the whole system by a single one person.  The Continuous Profiling is a new created mechanism to resolve the above issues.\nAutomate Profiling As profiling is resource costing and high experience required, how about introducing a method to narrow the scope and automate the profiling driven by polices creates by senior SRE engineer? So, in 9.5.0, SkyWalking first introduced preset policy rules for specific services to be monitored by the eBPF Agent in a low-energy manner, and run profiling when necessary automatically.\nPolicy Policy rules specify how to monitor target processes and determine the type of profiling task to initiate when certain threshold conditions are met.\nThese policy rules primarily consist of the following configuration information:\n Monitoring type: This specifies what kind of monitoring should be implemented on the target process. Threshold determination: This defines how to determine whether the target process requires the initiation of a profiling task. Trigger task: This specifies what kind of performance analysis task should be initiated.  Monitoring type The type of monitoring is determined by observing the data values of a specified process to generate corresponding metrics. These metric values can then facilitate subsequent threshold judgment operations. In eBPF observation, we believe the following metrics can most directly reflect the current performance of the program:\n   Monitor Type Unit Description     System Load Load System load average over a specified period.   Process CPU Percentage The CPU usage of the process as a percentage.   Process Thread Count Count The number of threads in the process.   HTTP Error Rate Percentage The percentage of HTTP requests that result in error responses (e.g., 4xx or 5xx status codes).   HTTP Avg Response Time Millisecond The average response time for HTTP requests.    Network related monitoring Monitoring network type metrics is not as simple as obtaining basic process information. It requires the initiation of eBPF programs and attaching them to the target process for observation. This is similar to the principles of network profiling task we introduced in the previous article, except that we no longer collect the full content of the data packets. Instead, we only collect the content of messages that match specified HTTP prefixes.\nBy using this method, we can significantly reduce the number of times the kernel sends data to the user space, and the user-space program can parse the data content with less system resource usage. This ultimately helps in conserving system resources.\nMetrics collector The eBPF agent would report metrics of processes periodically as follows to indicate the process performance in time.\n   Name Unit Description     process_cpu (0-100)% The CPU usage percent   process_thread_count count The thread count of process   system_load count The average system load for the last minute, each process have same value   http_error_rate (0-100)% The network request error rate percentage   http_avg_response_time ms The network average response duration    Threshold determination For the threshold determination, the judgement is made by the eBPF Agent based on the target monitoring process in its own memory, rather than relying on calculations performed by the SkyWalking backend. The advantage of this approach is that it doesn\u0026rsquo;t have to wait for the results of complex backend computations, and it reduces potential issues brought about by complicated interactions.\nBy using this method, the eBPF Agent can swiftly initiate tasks immediately after conditions are met, without any delay.\nIt includes the following configuration items:\n Threshold: Check if the monitoring value meets the specified expectations. Period: The time period(seconds) for monitoring data, which can also be understood as the most recent duration. Count: The number of times(seconds) the threshold is triggered within the detection period, which can also be understood as the total number of times the specified threshold rule is triggered in the most recent duration(seconds). Once the count check is met, the specified Profiling task will be started.  Trigger task When the eBPF Agent detects that the threshold determination in the specified policy meets the rules, it can initiate the corresponding task according to pre-configured rules. For each different target performance task, their task initiation parameters are different:\n On/Off CPU Profiling: It automatically performs performance analysis on processes that meet the conditions, defaulting to 10 minutes of monitoring. Network Profiling: It performs network performance analysis on all processes in the same Service Instance on the current machine, to prevent the cause of the issue from being unrealizable due to too few process being collected, defaulting to 10 minutes of monitoring.  Once the task is initiated, no new profiling tasks would be started for the current process for a certain period. The main reason for this is to prevent frequent task creation due to low threshold settings, which could affect program execution. The default time period is 20 minutes.\nData Flow The figure 1 illustrates the data flow of the continuous profiling feature:\nFigure 1: Data Flow of Continuous Profiling\neBPF Agent with Process Firstly, we need to ensure that the eBPF Agent and the process to be monitored are deployed on the same host machine, so that we can collect relevant data from the process. When the eBPF Agent detects a threshold validation rule that conforms to the policy, it immediately triggers the profiling task for the target process, thereby reducing any intermediate steps and accelerating the ability to pinpoint performance issues.\nSliding window The sliding window plays a crucial role in the eBPF Agent\u0026rsquo;s threshold determination process, as illustrated in the figure 2:\nFigure 2: Sliding Window in eBPF Agent\nEach element in the array represents the data value for a specified second in time. When the sliding window needs to verify whether it is responsible for a rule, it fetches the content of each element from a certain number of recent elements (period parameter). If an element exceeds the threshold, it is marked in red and counted. If the number of red elements exceeds a certain number, it is deemed to trigger a task.\nUsing a sliding window offers the following two advantages:\n Fast retrieval of recent content: With a sliding window, complex calculations are unnecessary. You can know the data by simply reading a certain number of recent array elements. Solving data spikes issues: Validation through count prevents situations where a data point suddenly spikes and then quickly returns to normal. Verification with multiple values can reveal whether exceeding the threshold is frequent or occasional.  eBPF Agent with SkyWalking Backend The eBPF Agent communicates periodically with the SkyWalking backend, involving three most crucial operations:\n Policy synchronization: Through periodic policy synchronization, the eBPF Agent can keep processes on the local machine updated with the latest policy rules as much as possible. Metrics sending: For processes that are already being monitored, the eBPF Agent periodically sends the collected data to the backend program. This facilitates real-time query of current data values by users, who can also compare this data with historical values or thresholds when problems arise. Profiling task reporting: When the eBPF detects that a certain process has triggered a policy rule, it automatically initiates a performance task, collects relevant information from the current process, and reports it to the SkyWalking backend. This allows users to know when, why, and what type of profiling task was triggered from the interface.  Demo Next, let\u0026rsquo;s quickly demonstrate the continuous profiling feature, so you can understand more specifically what it accomplishes.\nDeploy SkyWalking Showcase SkyWalking Showcase contains a complete set of example services and can be monitored using SkyWalking. For more information, please check the official documentation.\nIn this demo, we only deploy service, the latest released SkyWalking OAP, and UI.\nexport SW_OAP_IMAGE=apache/skywalking-oap-server:9.5.0 export SW_UI_IMAGE=apache/skywalking-ui:9.5.0 export SW_ROVER_IMAGE=apache/skywalking-rover:0.5.0 export FEATURE_FLAGS=mesh-with-agent,single-node,elasticsearch,rover make deploy.kubernetes After deployment is complete, please run the following script to open SkyWalking UI: http://localhost:8080/.\nkubectl port-forward svc/ui 8080:8080 --namespace default Create Continuous Profiling Policy Currently, continues profiling feature is set by default in the Service Mesh panel at the Service level.\nFigure 3: Continuous Policy Tab\nBy clicking on the edit button aside from the Policy List, the polices of current service could be created or updated.\nFigure 4: Edit Continuous Profiling Policy\nMultiple polices are supported. Every policy has the following configurations.\n Target Type: Specifies the type of profiling task to be triggered when the threshold determination is met. Items: For profiling task of the same target, one or more validation items can be specified. As long as one validation item meets the threshold determination, the corresponding performance analysis task will be launched.  Monitor Type: Specifies the type of monitoring to be carried out for the target process. Threshold: Depending on the type of monitoring, you need to fill in the corresponding threshold to complete the verification work. Period: Specifies the number of recent seconds of data you want to monitor. Count: Determines the total number of seconds triggered within the recent period. URI Regex/List: This is applicable to HTTP monitoring types, allowing URL filtering.    Done After clicking the save button, you can see the currently created monitoring rules, as shown in the figure 5:\nFigure 5: Continuous Profiling Monitoring Processes\nThe data can be divided into the following parts:\n Policy list: On the left, you can see the rule list you have created. Monitoring Summary List: Once a rule is selected, you can see which pods and processes would be monitored by this rule. It also summarizes how many profiling tasks have been triggered in the last 48 hours by the current pod or process, as well as the last trigger time. This list is also sorted in descending order by the number of triggers to facilitate your quick review.  When you click on a specific process, a new dashboard would show to list metrics and triggered profiling results.\nFigure 6: Continuous Profiling Triggered Tasks\nThe current figure contains the following data contents:\n Task Timeline: It lists all profiling tasks in the past 48 hours. And when the mouse hovers over a task, it would also display detailed information:  Task start and end time: It indicates when the current performance analysis task was triggered. Trigger reason: It would display the reason why the current process was profiled and list out the value of the metric exceeding the threshold when the profiling was triggered. so you can quickly understand the reason.   Task Detail: Similar to the CPU Profiling and Network Profiling introduced in previous articles, this would display the flame graph or process topology map of the current task, depending on the profiling type.  Meanwhile, on the Metrics tab, metrics relative to profiling policies are collected to retrieve the historical trend, in order to provide a comprehensive explanation of the trigger point about the profiling.\nFigure 7: Continuous Profiling Metrics\nConclusion In this article, I have detailed how the continuous profiling feature in SkyWalking and eBPF works. In general, it involves deploying the eBPF Agent service on the same machine where the process to be monitored resides, and monitoring the target process with low resource consumption. When it meets the threshold conditions, it would initiate more complex CPU Profiling and Network Profiling tasks.\nIn the future, we will offer even more features. Stay tuned!\n Twitter, ASFSkyWalking Slack. Send Request to join SkyWalking slack mail to the mail list(dev@skywalking.apache.org), we will invite you in. Subscribe to our medium list.  ","title":"Activating Automatical Performance Analysis -- Continuous Profiling","url":"/blog/2023-06-25-intruducing-continuous-profiling-skywalking-with-ebpf/"},{"content":"SkyWalking CLI 0.12.0 is released. Go to downloads page to find release tars.\n Add the sub-command records list for adapt the new record query API by @mrproliu in https://github.com/apache/skywalking-cli/pull/167 Add the attached events fields into the trace sub-command by @mrproliu in https://github.com/apache/skywalking-cli/pull/169 Add the sampling config file into the profiling ebpf create network sub-command by @mrproliu in https://github.com/apache/skywalking-cli/pull/171 Add the sub-command profiling continuous for adapt the new continuous profiling API by @mrproliu in https://github.com/apache/skywalking-cli/pull/173 Adapt the sub-command metrics for deprecate scope fron entity by @mrproliu in https://github.com/apache/skywalking-cli/pull/173 Add components in topology related sub-commands. @mrproliu in https://github.com/apache/skywalking-cli/pull/175 Add the sub-command metrics nullable for query the nullable metrics value. @mrproliu in https://github.com/apache/skywalking-cli/pull/176 Adapt the sub-command profiling trace for adapt the new trace profiling protocol. @mrproliu in https://github.com/apache/skywalking-cli/pull/177 Add isEmptyValue field in metrics related sub-commands. @mrproliu in https://github.com/apache/skywalking-cli/pull/180 Add the sub-command metrics execute for execute the metrics query. @mrproliu in https://github.com/apache/skywalking-cli/pull/182 Add the sub-command profiling continuous monitoring for query all continuous profiling monitoring instances. @mrproliu in https://github.com/apache/skywalking-cli/pull/182 Add continuousProfilingCauses.message field in the profiling ebpf list comamnds by @mrproliu in https://github.com/apache/skywalking-cli/pull/184  ","title":"Release Apache SkyWalking CLI 0.12.0","url":"/events/release-apache-skywalking-cli-0-12-0/"},{"content":"SkyWalking Rover 0.5.0 is released. Go to downloads page to find release tars.\nFeatures  Enhance the protocol reader for support long socket data. Add the syscall level event to the trace. Support OpenSSL 3.0.x. Optimized the data structure in BPF. Support continuous profiling. Improve the performance when getting goid in eBPF. Support build multiple architecture docker image: x86_64, arm64.  Bug Fixes  Fix HTTP method name in protocol analyzer. Fixed submitting multiple network profiling tasks with the same uri causing the rover to restart.  Documentation Issues and PR  All issues are here All and pull requests are here  ","title":"Release Apache SkyWalking Rover 0.5.0","url":"/events/release-apache-skwaylking-rover-0-5-0/"},{"content":"SkyWalking Satellite 1.2.0 is released. Go to downloads page to find release tars.\nFeatures  Introduce pprof module. Support export multiple telemetry service. Update the base docker image. Add timeout configuration for gRPC client. Reduce log print when the enqueue data to the pipeline error. Support transmit the Continuous Profiling protocol.  Bug Fixes  Fix CVE-2022-41721. Use Go 19 to build the Docker image to fix CVEs.  Issues and PR  All issues are here All and pull requests are here  ","title":"Release Apache SkyWalking Satellite 1.2.0","url":"/events/release-apache-skwaylking-satellite-1-2-0/"},{"content":"背景 在之前的文章中,我们讨论了如何使用 SkyWalking 和 eBPF 来检测性能问题,包括进程和网络。这些方法可以很好地定位问题,但仍然存在一些挑战:\n 任务启动的时间: 当需要进行性能监控时,解决需要性能监控的进程始终是一个挑战。通常需要手动参与,以标识进程和所需的性能分析类型,这会在崩溃恢复期间耗费额外的时间。根本原因定位和崩溃恢复时间有时会发生冲突。在实际情况中,重新启动可能是恢复的第一选择,同时也会破坏崩溃的现场。 任务的资源消耗: 确定分析范围的困难。过宽的分析范围会导致需要更多的资源。我们需要一种方法来管理资源消耗并了解哪些进程需要性能分析。 工程师能力: 通常由整个团队负责呼叫,其中有初级和高级工程师,即使是高级工程师也对复杂的分布式系统有其理解限制,单个人几乎无法理解整个系统。  持续剖析(Continuous Profiling) 是解决上述问题的新机制。\n自动剖析 由于性能分析的资源消耗和高经验要求,因此引入一种方法以缩小范围并由高级 SRE 工程师创建策略自动剖析。因此,在 9.5.0 中,SkyWalking 首先引入了预设策略规则,以低功耗方式监视特定服务的 eBPF 代理,并在必要时自动运行剖析。\n策略 策略规则指定了如何监视目标进程并确定在满足某些阈值条件时应启动何种类型的分析任务。\n这些策略规则主要包括以下配置信息:\n 监测类型: 这指定了应在目标进程上实施什么样的监测。 阈值确定: 这定义了如何确定目标进程是否需要启动分析任务。 触发任务: 这指定了应启动什么类型的性能分析任务。  监测类型 监测类型是通过观察指定进程的数据值来生成相应的指标来确定的。这些指标值可以促进后续的阈值判断操作。在 eBPF 观测中,我们认为以下指标最能直接反映程序的当前性能:\n   监测类型 单位 描述     系统负载 负载 在指定时间段内的系统负载平均值。   进程 CPU 百分比 进程的 CPU 使用率百分比。   进程线程计数 计数 进程中的线程数。   HTTP 错误率 百分比 导致错误响应(例如,4xx 或 5xx 状态代码)的 HTTP 请求的百分比。   HTTP 平均响应时间 毫秒 HTTP 请求的平均响应时间。    相关网络监测 监测网络类型的指标不像获取基本进程信息那么简单。它需要启动 eBPF 程序并将其附加到目标进程以进行观测。这类似于我们在先前文章中介绍的网络分析任务,不同的是我们不再收集数据包的完整内容。相反,我们仅收集与指定 HTTP 前缀匹配的消息的内容。\n通过使用此方法,我们可以大大减少内核向用户空间发送数据的次数,用户空间程序可以使用更少的系统资源来解析数据内容。这最终有助于节省系统资源。\n指标收集器 eBPF 代理会定期报告以下进程度量,以指示进程性能:\n   名称 单位 描述     process_cpu (0-100)% CPU 使用率百分比   process_thread_count 计数 进程中的线程数   system_load 计数 最近一分钟的平均系统负载,每个进程的值相同   http_error_rate (0-100)% 网络请求错误率百分比   http_avg_response_time 毫秒 网络平均响应持续时间    阈值确定 对于阈值的确定,eBPF 代理是基于其自身内存中的目标监测进程进行判断,而不是依赖于 SkyWalking 后端执行的计算。这种方法的优点在于,它不必等待复杂后端计算的结果,减少了复杂交互所带来的潜在问题。\n通过使用此方法,eBPF 代理可以在条件满足后立即启动任务,而无需任何延迟。\n它包括以下配置项:\n 阈值: 检查监测值是否符合指定的期望值。 周期: 监控数据的时间周期(秒),也可以理解为最近的持续时间。 计数: 检测期间触发阈值的次数(秒),也可以理解为最近持续时间内指定阈值规则触发的总次数(秒)。一旦满足计数检查,指定的分析任务将被开始。  触发任务 当 eBPF Agent 检测到指定策略中的阈值决策符合规则时,根据预配置的规则可以启动相应的任务。对于每个不同的目标性能任务,它们的任务启动参数都不同:\n On/Off CPU Profiling: 它会自动对符合条件的进程进行性能分析,缺省情况下监控时间为 10 分钟。 Network Profiling: 它会对当前机器上同一 Service Instance 中的所有进程进行网络性能分析,以防问题的原因因被收集进程太少而无法实现,缺省情况下监控时间为 10 分钟。  一旦任务启动,当前进程将在一定时间内不会启动新的剖析任务。主要原因是为了防止因低阈值设置而频繁创建任务,从而影响程序执行。缺省时间为 20 分钟。\n数据流 图 1 展示了持续剖析功能的数据流:\n图 1: 持续剖析的数据流\neBPF Agent进行进程跟踪 首先,我们需要确保 eBPF Agent 和要监测的进程部署在同一台主机上,以便我们可以从进程中收集相关数据。当 eBPF Agent 检测到符合策略的阈值验证规则时,它会立即为目标进程触发剖析任务,从而减少任何中间步骤并加速定位性能问题的能力。\n滑动窗口 滑动窗口在 eBPF Agent 的阈值决策过程中发挥着至关重要的作用,如图 2 所示:\n图 2: eBPF Agent 中的滑动窗口\n数组中的每个元素表示指定时间内的数据值。当滑动窗口需要验证是否负责某个规则时,它从最近的一定数量的元素 (period 参数) 中获取每个元素的内容。如果一个元素超过了阈值,则标记为红色并计数。如果红色元素的数量超过一定数量,则被认为触发了任务。\n使用滑动窗口具有以下两个优点:\n 快速检索最近的内容:使用滑动窗口,无需进行复杂的计算。你可以通过简单地读取一定数量的最近数组元素来了解数据。 解决数据峰值问题:通过计数进行验证,可以避免数据点突然增加然后快速返回正常的情况。使用多个值进行验证可以揭示超过阈值是频繁还是偶然发生的。  eBPF Agent与OAP后端通讯 eBPF Agent 定期与 SkyWalking 后端通信,涉及三个最关键的操作:\n 策略同步:通过定期的策略同步,eBPF Agent 可以尽可能地让本地机器上的进程与最新的策略规则保持同步。 指标发送:对于已经被监视的进程,eBPF Agent 定期将收集到的数据发送到后端程序。这就使用户能够实时查询当前数据值,用户也可以在出现问题时将此数据与历史值或阈值进行比较。 剖析任务报告:当 eBPF 检测到某个进程触发了策略规则时,它会自动启动性能任务,从当前进程收集相关信息,并将其报告给 SkyWalking 后端。这使用户可以从界面了解何时、为什么和触发了什么类型的剖析任务。  演示 接下来,让我们快速演示持续剖析功能,以便你更具体地了解它的功能。\n部署 SkyWalking Showcase SkyWalking Showcase 包含完整的示例服务,并可以使用 SkyWalking 进行监视。有关详细信息,请查看官方文档。\n在此演示中,我们只部署服务、最新发布的 SkyWalking OAP 和 UI。\nexport SW_OAP_IMAGE=apache/skywalking-oap-server:9.5.0 export SW_UI_IMAGE=apache/skywalking-ui:9.5.0 export SW_ROVER_IMAGE=apache/skywalking-rover:0.5.0 export FEATURE_FLAGS=mesh-with-agent,single-node,elasticsearch,rover make deploy.kubernetes 部署完成后,请运行以下脚本以打开 SkyWalking UI:http://localhost:8080/。\nkubectl port-forward svc/ui 8080:8080 --namespace default 创建持续剖析策略 目前,持续剖析功能在 Service Mesh 面板的 Service 级别中默认设置。\n图 3: 持续策略选项卡\n通过点击 Policy List 旁边的编辑按钮,可以创建或更新当前服务的策略。\n图 4: 编辑持续剖析策略\n支持多个策略。每个策略都有以下配置。\n Target Type:指定符合阈值决策时要触发的剖析任务的类型。 Items:对于相同目标的剖析任务,可以指定一个或多个验证项目。只要一个验证项目符合阈值决策,就会启动相应的性能分析任务。  Monitor Type:指定要为目标进程执行的监视类型。 Threshold:根据监视类型的不同,需要填写相应的阈值才能完成验证工作。 Period:指定你要监测的最近几秒钟的数据数量。 Count:确定最近时间段内触发的总秒数。 URI 正则表达式/列表:这适用于 HTTP 监控类型,允许 URL 过滤。    完成 单击保存按钮后,你可以看到当前已创建的监控规则,如图 5 所示:\n图 5: 持续剖析监控进程\n数据可以分为以下几个部分:\n 策略列表:在左侧,你可以看到已创建的规则列表。 监测摘要列表:选择规则后,你可以看到哪些 pod 和进程将受到该规则的监视。它还总结了当前 pod 或进程在过去 48 小时内触发的性能分析任务数量,以及最后一个触发时间。该列表还按触发次数降序排列,以便你快速查看。  当你单击特定进程时,将显示一个新的仪表板以列出指标和触发的剖析结果。\n图 6: 持续剖析触发的任务\n当前图包含以下数据内容:\n 任务时间轴:它列出了过去 48 小时的所有剖析任务。当鼠标悬停在任务上时,它还会显示详细信息:  任务的开始和结束时间:它指示当前性能分析任务何时被触发。 触发原因:它会显示为什么会对当前进程进行剖析,并列出当剖析被触发时超过阈值的度量值,以便你快速了解原因。   任务详情:与前几篇文章介绍的 CPU 剖析和网络剖析类似,它会显示当前任务的火焰图或进程拓扑图,具体取决于剖析类型。  同时,在 Metrics 选项卡中,收集与剖析策略相关的指标以检索历史趋势,以便在剖析的触发点提供全面的解释。\n图 7: 持续剖析指标\n结论 在本文中,我详细介绍了 SkyWalking 和 eBPF 中持续剖析功能的工作原理。通常情况下,它涉及将 eBPF Agent 服务部署在要监视的进程所在的同一台计算机上,并以低资源消耗监测目标进程。当它符合阈值条件时,它会启动更复杂的 CPU 剖析和网络剖析任务。\n在未来,我们将提供更多功能。敬请期待!\n Twitter:ASFSkyWalking Slack:向邮件列表 (dev@skywalking.apache.org) 发送“Request to join SkyWalking Slack”,我们会邀请你加入。 订阅我们的 Medium 列表。  ","title":"自动化性能分析——持续剖析","url":"/zh/2023-06-25-intruducing-continuous-profiling-skywalking-with-ebpf/"},{"content":"SkyWalking 9.5.0 is released. Go to downloads page to find release tars.\nNew Topology Layout Elasticsearch Server Monitoring Project  Fix Duplicate class found due to the delombok goal.  OAP Server  Fix wrong layer of metric user error in DynamoDB monitoring. ElasticSearch storage does not check field types when OAP running in no-init mode. Support to bind TLS status as a part of component for service topology. Fix component ID priority bug. Fix component ID of topology overlap due to storage layer bugs. [Breaking Change] Enhance JDBC storage through merging tables and managing day-based table rolling. [Breaking Change] Sharding-MySQL implementations and tests get removed due to we have the day-based rolling mechanism by default Fix otel k8s-cluster rule add namespace dimension for MAL aggregation calculation(Deployment Status,Deployment Spec Replicas) Support continuous profiling feature. Support collect process level related metrics. Fix K8sRetag reads the wrong k8s service from the cache due to a possible namespace mismatch. [Breaking Change] Support cross-thread trace profiling. The data structure and query APIs are changed. Fix PromQL HTTP API /api/v1/labels response missing service label. Fix possible NPE when initialize IntList. Support parse PromQL expression has empty labels in the braces for metadata query. Support alarm metric OP !=. Support metrics query indicates whether value == 0 represents actually zero or no data. Fix NPE when query the not exist series indexes in ElasticSearch storage. Support collecting memory buff/cache metrics in VM monitoring. PromQL: Remove empty values from the query result, fix /api/v1/metadata param limit could cause out of bound. Support monitoring the total number metrics of k8s StatefulSet and DaemonSet. Support Amazon API Gateway monitoring. Bump up graphql-java to fix cve. Bump up Kubernetes Java client. Support Redis Monitoring. Add component ID for amqp, amqp-producer and amqp-consumer. Support no-proxy mode for aws-firehose receiver Bump up armeria to 1.23.1 Support Elasticsearch Monitoring. Fix PromQL HTTP API /api/v1/series response missing service label when matching metric. Support ServerSide TopN for BanyanDB. Add component ID for Jersey. Remove OpenCensus support, the related codes and docs as it\u0026rsquo;s sunsetting. Support dynamic configuration of searchableTracesTags Support exportErrorStatusTraceOnly for export the error status trace segments through the Kafka channel Add component ID for Grizzly. Fix potential NPE in Zipkin receiver when the Span is missing some fields. Filter out unknown_cluster metric data. Support RabbitMQ Monitoring. Support Redis slow logs collection. Fix data loss when query continuous profiling task record. Adapt the continuous profiling task query GraphQL. Support Metrics Query Expression(MQE) and allows users to do simple query-stage calculation through the expression. Deprecated metrics query v2 protocol. Deprecated record query protocol. Add component ID for go-redis. Add OpenSearch 2.8.0 to test case. Add ai-pipeline module. Support HTTP URI formatting through ai-pipeline to do pattern recognition. Add new HTTP URI grouping engine with benchmark. [Breaking Change] Use the new HTTP URI grouping engine to replace the old regex based mechanism. Support sumLabeled in MAL. Migrate from kubernetes-client/java to fabric8 client. Envoy ALS generated relation metrics considers http status codes \u0026gt;= 400 has an error at the client side. Add cause message field when query continuous profiling task.  UI  Revert: cpm5d function. This feature is cancelled from backend. Fix: alerting link breaks on the topology. Refactor Topology widget to make it more hierarchical.  Choose User as the first node. If User node is absent, choose the busiest node(which has the most calls of all). Do a left-to-right flow process. At the same level, list nodes from top to bottom in alphabetical order.   Fix filter ID when ReadRecords metric associates with trace. Add AWS API Gateway menu. Change trace profiling protocol. Add Redis menu. Optimize data types. Support isEmptyValue flag for metrics query. Add elasticsearch menu. [Clean UI templates before upgrade] Set showSymbol: true, and make the data point shows on the Line graph. Please clean ui_template index in elasticsearch storage or table in JDBC storage. [Clean UI templates before upgrade] UI templates: Simplify metric name with the label. Add MQ menu. Add Jeysey icon. Fix: set endpoint and instance selectors with url parameters correctly. Bump up dependencies versions icons-vue 1.1.4, element-plus 2.1.0, nanoid 3.3.6, postcss 8.4.23 Add OpenTelemetry log protocol support. [Breaking Change] Configuration key enabledOtelRules is renamed to enabledOtelMetricsRules and the corresponding environment variable is renamed to SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES. Add grizzly icon. Fix: the Instance List data display error. Fix: set topN type to Number. Support Metrics Query Expression(MQE) and allows users to do simple query-stage calculation through the expression. Bump up zipkin ui dependency to 2.24.1. Bump up vite to 4.0.5. Apply MQE on General and Virtual-Database layer UI-templates.  Documentation  Add Profiling related documentations. Add SUM_PER_MIN to MAL documentation. Make the log relative docs more clear, and easier for further more formats support. Update the cluster management and advanced deployment docs.  All issues and pull requests are here\n","title":"Release Apache SkyWalking APM 9.5.0","url":"/events/release-apache-skywalking-apm-9.5.0/"},{"content":"Celebrating 22k Stars! The Apache SkyWalking community is thrilled to reach the milestone of 22k stars on GitHub! This showcases its popularity and impact as an APM and observability tool.\nSince launching in 2016 to provide an open source APM solution, SkyWalking has evolved into a full stack observability platform with distributed tracing, metrics monitoring and alerting. It\u0026rsquo;s seeing widespread adoption globally, especially in Asia where APM needs are expanding rapidly.\nThe growing user base has enabled SkyWalking to achieve massive deployments demonstrating its ability to scale to extreme levels. There have been reported deployments collecting over 100TB of data from companies' complex distributed applications, monitoring over 8000 microservices and analyzing 100 billion distributed traces - providing end-to-end visibility, performance monitoring and issue troubleshooting for some of the largest distributed systems in the world.\nThis success and widespread adoption has attracted an active community of nearly 800 contributors, thanks in part to programs like GSoC and OSPP(Open Source Promotion Plan) that bring in university contributors. The SkyWalking team remains focused on building a reliable, performant platform to observe complex distributed systems. We\u0026rsquo;ll continue innovating with features like service mesh monitoring and metric analytics.Your ongoing support, feedback and contributions inspire us!\nThank you for helping SkyWalking reach 22k stars on GitHub! This is just the beginning - we have ambitious plans and can\u0026rsquo;t wait to have you along our journey!\n","title":"Celebrate 22k stars","url":"/blog/2023-06-13-celebrate-22k-stars/"},{"content":"本文演示如何将 Dubbo-Go 应用程序与 SkyWalking Go 集成,并在 SkyWalking UI 中查看结果。\n以前,如果你想要在 SkyWalking 中监控 Golang 应用程序,需要将项目与 go2sky 项目集成,并手动编写各种带有 go2sky 插件的框架。现在,我们有一个全新的项目( Skywalking Go ),允许你将 Golang 项目集成到 SkyWalking 中,几乎不需要编码,同时提供更大的灵活性和可扩展性。\n在本文中,我们将指导你快速将 skywalking-go 项目集成到 dubbo-go 项目中。\n演示包括以下步骤:\n 部署 SkyWalking:这涉及设置 SkyWalking 后端和 UI 程序,使你能够看到最终效果。 使用 SkyWalking Go 编译程序:在这里,你将把 SkyWalking Go Agent 编译到要监控的 Golang 程序中。 应用部署:你将导出环境变量并部署应用程序,以促进你的服务与 SkyWalking 后端之间的通信。 在 SkyWalking UI 上可视化:最后,你将发送请求并在 SkyWalking UI 中观察效果。  部署 SkyWalking 请从官方 SkyWalking 网站下载 SkyWalking APM 程序 。然后执行以下两个命令来启动服务:\n# 启动 OAP 后端 \u0026gt; bin/oapService.sh # 启动 UI \u0026gt; bin/webappService.sh 接下来,你可以访问地址 http://localhost:8080/ 。此时,由于尚未部署任何应用程序,因此你将看不到任何数据。\n使用 SkyWalking GO 编译 Dubbo Go 程序 这里将演示如何将 Dubbo-go 程序与SkyWalking Go Agent集成。请依次执行如下命令来创建一个新的项目:\n# 安装dubbo-go基础环境 \u0026gt; export GOPROXY=\u0026#34;https://goproxy.cn\u0026#34; \u0026gt; go install github.com/dubbogo/dubbogo-cli@latest \u0026gt; dubbogo-cli install all # 创建demo项目 \u0026gt; mkdir demo \u0026amp;\u0026amp; cd demo \u0026gt; dubbogo-cli newDemo . # 升级dubbo-go依赖到最新版本 \u0026gt; go get -u dubbo.apache.org/dubbo-go/v3 在项目的根目录中执行以下命令。此命令将下载 skywalking-go 所需的依赖项:\ngo get github.com/apache/skywalking-go 接下来,请分别在服务端和客户端的main包中引入。包含之后,代码将会更新为:\n// go-server/cmd/server.go package main import ( \u0026#34;context\u0026#34; ) import ( \u0026#34;dubbo.apache.org/dubbo-go/v3/common/logger\u0026#34; \u0026#34;dubbo.apache.org/dubbo-go/v3/config\u0026#34; _ \u0026#34;dubbo.apache.org/dubbo-go/v3/imports\u0026#34; \u0026#34;helloworld/api\u0026#34; // 引入skywalking-go \t_ \u0026#34;github.com/apache/skywalking-go\u0026#34; ) type GreeterProvider struct { api.UnimplementedGreeterServer } func (s *GreeterProvider) SayHello(ctx context.Context, in *api.HelloRequest) (*api.User, error) { logger.Infof(\u0026#34;Dubbo3 GreeterProvider get user name = %s\\n\u0026#34;, in.Name) return \u0026amp;api.User{Name: \u0026#34;Hello \u0026#34; + in.Name, Id: \u0026#34;12345\u0026#34;, Age: 21}, nil } // export DUBBO_GO_CONFIG_PATH= PATH_TO_SAMPLES/helloworld/go-server/conf/dubbogo.yaml func main() { config.SetProviderService(\u0026amp;GreeterProvider{}) if err := config.Load(); err != nil { panic(err) } select {} } 在客户端代码中除了需要引入skywalking-go之外,还需要在main方法中的最后一行增加主携程等待语句,以防止因为客户端快速关闭而无法将Tracing数据异步发送到SkyWalking后端:\npackage main import ( \u0026#34;context\u0026#34; ) import ( \u0026#34;dubbo.apache.org/dubbo-go/v3/common/logger\u0026#34; \u0026#34;dubbo.apache.org/dubbo-go/v3/config\u0026#34; _ \u0026#34;dubbo.apache.org/dubbo-go/v3/imports\u0026#34; \u0026#34;helloworld/api\u0026#34; // 引入skywalking-go \t_ \u0026#34;github.com/apache/skywalking-go\u0026#34; ) var grpcGreeterImpl = new(api.GreeterClientImpl) // export DUBBO_GO_CONFIG_PATH= PATH_TO_SAMPLES/helloworld/go-client/conf/dubbogo.yaml func main() { config.SetConsumerService(grpcGreeterImpl) if err := config.Load(); err != nil { panic(err) } logger.Info(\u0026#34;start to test dubbo\u0026#34;) req := \u0026amp;api.HelloRequest{ Name: \u0026#34;laurence\u0026#34;, } reply, err := grpcGreeterImpl.SayHello(context.Background(), req) if err != nil { logger.Error(err) } logger.Infof(\u0026#34;client response result: %v\\n\u0026#34;, reply) // 增加主携程等待语句 \tselect {} } 接下来,请从官方 SkyWalking 网站下载 Go Agent 程序 。当你使用 go build 命令进行编译时,请在 bin 目录中找到与当前操作系统匹配的代理程序,并添加 -toolexec=\u0026quot;/path/to/go-agent -a 参数。例如,请使用以下命令:\n# 进入项目主目录 \u0026gt; cd demo # 分别编译服务端和客户端 # -toolexec 参数定义为go-agent的路径 # -a 参数用于强制重新编译所有依赖项 \u0026gt; cd go-server \u0026amp;\u0026amp; go build -toolexec=\u0026#34;/path/to/go-agent\u0026#34; -a -o go-server cmd/server.go \u0026amp;\u0026amp; cd .. \u0026gt; cd go-client \u0026amp;\u0026amp; go build -toolexec=\u0026#34;/path/to/go-agent\u0026#34; -a -o go-client cmd/client.go \u0026amp;\u0026amp; cd .. 应用部署 在开始部署应用程序之前,你可以通过环境变量更改 SkyWalking 中当前应用程序的服务名称。你还可以更改其配置,例如服务器端的地址。有关详细信息,请参阅文档 。\n在这里,我们分别启动两个终端窗口来分别启动服务端和客户端。\n在服务端,将服务的名称更改为dubbo-server:\n# 导出dubbo-go服务端配置文件路径 export DUBBO_GO_CONFIG_PATH=/path/to/demo/go-server/conf/dubbogo.yaml # 导出skywalking-go的服务名称 export SW_AGENT_NAME=dubbo-server ./go-server/go-server 在客户端,将服务的名称更改为dubbo-client:\n# 导出dubbo-go客户端配置文件路径 export DUBBO_GO_CONFIG_PATH=/path/to/demo/go-client/conf/dubbogo.yaml # 导出skywalking-go的服务名称 export SW_AGENT_NAME=dubbo-client ./go-client/go-client 在 SkyWalking UI 上可视化 现在,由于客户端会自动像服务器端发送请求,现在就可以在 SkyWalking UI 中观察结果。\n几秒钟后,重新访问 http://localhost:8080 的 SkyWalking UI。能够在主页上看到部署的 dubbo-server 和 dubbo-client 服务。\n此外,在追踪页面上,可以看到刚刚发送的请求。\n并可以在拓扑图页面中看到服务之间的关系。\n总结 在本文中,我们指导你快速开发dubbo-go服务,并将其与 SkyWalking Go Agent 集成。这个过程也适用于你自己的任意 Golang 服务。最终,可以在 SkyWalking 服务中查看显示效果。如果你有兴趣了解 SkyWalking Go 代理当前支持的框架,请参阅此文档 。\n将来,我们将继续扩展 SkyWalking Go 的功能,添加更多插件支持。所以,请继续关注!\n","title":"使用SkyWalking go agent快速实现Dubbo Go监控","url":"/zh/2023-06-05-quick-start-using-skywalking-go-monitoring-dubbo-go/"},{"content":"SkyWalking Go 0.1.0 is released. Go to downloads page to find release tars.\nFeatures  Initialize the agent core and user import library. Support gRPC reporter for management, tracing protocols. Automatic detect the log frameworks and inject the log context.  Plugins  Support Gin framework. Support Native HTTP server and client framework. Support Go Restful v3 framework. Support Dubbo server and client framework. Support Kratos v2 server and client framework. Support Go-Micro v4 server and client framework. Support GORM v2 database client framework.  Support MySQL Driver detection.    Documentation  Initialize the documentation.  Issues and PR  All issues are here All and pull requests are here  ","title":"Release Apache SkyWalking Go 0.1.0","url":"/events/release-apache-skwaylking-go-0.1.0/"},{"content":"SkyWalking Java Agent 8.16.0 is released. Go to downloads page to find release tars. Changes by Version\n8.16.0  Exclude synthetic methods for the WitnessMethod mechanism Support ForkJoinPool trace Support clickhouse-jdbc-plugin trace sql parameters Support monitor jetty server work thread pool metric Support Jersey REST framework Fix ClassCastException when SQLServer inserts data [Chore] Exclude org.checkerframework:checker-qual and com.google.j2objc:j2objc-annotations [Chore] Exclude proto files in the generated jar Fix Jedis-2.x plugin can not get host info in jedis 3.3.x+ Change the classloader to locate the agent path in AgentPackagePath, from SystemClassLoader to AgentPackagePath\u0026rsquo;s loader. Support Grizzly Trace Fix possible IllegalStateException when using Micrometer. Support Grizzly Work ThreadPool Metric Monitor Fix the gson dependency in the kafka-reporter-plugin. Fix deserialization of kafka producer json config in the kafka-reporter-plugin. Support to config custom decode methods for kafka configurations  All issues and pull requests are here\n","title":"Release Apache SkyWalking Java Agent 8.16.0","url":"/events/release-apache-skywalking-java-agent-8-16-0/"},{"content":"Background Previously, if you wanted to monitor a Golang application in SkyWalking, you would integrate your project with the go2sky project and manually write various frameworks with go2sky plugins. Now, we have a brand-new project (Skywalking Go) that allows you to integrate your Golang projects into SkyWalking with almost zero coding, while offering greater flexibility and scalability.\nIn this article, we will guide you quickly integrating the skywalking-go project into your Golang project.\nQuick start This demonstration will consist of the following steps:\n Deploy SkyWalking: This involves setting up the SkyWalking backend and UI programs, enabling you to see the final effect. Compile Golang with SkyWalking Go: Here, you\u0026rsquo;ll compile the SkyWalking Go Agent into the Golang program you wish to monitor. Application Deployment: You\u0026rsquo;ll export environment variables and deploy the application to facilitate communication between your service and the SkyWalking backend. Visualization on SkyWalking UI: Finally, you\u0026rsquo;ll send requests and observe the effects within the SkyWalking UI.  Deploy SkyWalking Please download the SkyWalking APM program from the official SkyWalking website. Then execute the following two commands to start the service:\n# startup the OAP backend \u0026gt; bin/oapService.sh # startup the UI \u0026gt; bin/webappService.sh Next, you can access the address at http://localhost:8080/. At this point, as no applications have been deployed yet, you will not see any data.\nCompile Golang with SkyWalking GO Here is a simple business application here that starts an HTTP service.\npackage main import \u0026#34;net/http\u0026#34; func main() { http.HandleFunc(\u0026#34;/hello\u0026#34;, func(writer http.ResponseWriter, request *http.Request) { writer.Write([]byte(\u0026#34;Hello World\u0026#34;)) }) err := http.ListenAndServe(\u0026#34;:8000\u0026#34;, nil) if err != nil { panic(err) } } Execute the following command in the project\u0026rsquo;s root directory. This command will download the dependencies required for skywalking-go:\ngo get github.com/apache/skywalking-go Also, include it in the main package of the project. After the inclusion, the code will update to:\npackage main import ( \u0026#34;net/http\u0026#34; // This is an important step. DON\u0026#39;T MISS IT. \t_ \u0026#34;github.com/apache/skywalking-go\u0026#34; ) func main() { http.HandleFunc(\u0026#34;/hello\u0026#34;, func(writer http.ResponseWriter, request *http.Request) { writer.Write([]byte(\u0026#34;Hello World\u0026#34;)) }) err := http.ListenAndServe(\u0026#34;:8000\u0026#34;, nil) if err != nil { panic(err) } } Next, please download the Go Agent program from the official SkyWalking website. When you compile with the go build command, find the agent program that matches your current operating system in the bin directory, and add the -toolexec=\u0026quot;/path/to/go-agent -a parameter. For example, use the following command:\n# Build application with SkyWalking go agent # -toolexec parameter define the path of go-agent # -a parameter is used to force rebuild all packages \u0026gt; go build -toolexec=\u0026#34;/path/to/go-agent\u0026#34; -a -o test . Application Deployment Before you start to deploy the application, you can change the service name of the current application in SkyWalking through environment variables. You can also change its configuration such as the address with the server-side. For specific details, please refer to the documentation.\nHere, we\u0026rsquo;re just changing the name of the current service to demo.\n# Change the service name \u0026gt; export SW_AGENT_NAME=demo Next, you can start the application:\n# Start the application \u0026gt; ./test Visualization on SkyWalking UI Now, you can send a request to the application and observe the results in the SkyWalking UI.\n# Send a request \u0026gt; curl http://localhost:8000/hello After a few seconds, you can revisit the SkyWalking UI at http://localhost:8080. You will be able to see the demo service you deployed on the homepage.\nMoreover, on the Trace page, you can see the request you just sent.\nConclusion In this article, we\u0026rsquo;ve guided you to quickly develop a demo service and integrate it with SkyWalking Go Agent. This process is also applicable to your own Golang services. Ultimately, you can view the display effect in the SkyWalking service. If you\u0026rsquo;re interested in learning which frameworks the SkyWalking Go agent currently supports, please refer to this documentation.\nIn the future, we will continue to expand the functionality of SkyWalking Go, adding more plugin support. So, stay tuned!\n","title":"Quick start with SkyWalking Go Agent","url":"/blog/2023-06-01-quick-start-with-skywalking-go-agent/"},{"content":"本文演示如何将应用程序与 SkyWalking Go 集成,并在 SkyWalking UI 中查看结果。\n以前,如果你想要在 SkyWalking 中监控 Golang 应用程序,需要将项目与 go2sky 项目集成,并手动编写各种带有 go2sky 插件的框架。现在,我们有一个全新的项目(Skywalking Go ),允许你将 Golang 项目集成到 SkyWalking 中,几乎不需要编码,同时提供更大的灵活性和可扩展性。\n在本文中,我们将指导你快速将 skywalking-go 项目集成到 Golang 项目中。\n演示包括以下步骤:\n 部署 SkyWalking:这涉及设置 SkyWalking 后端和 UI 程序,使你能够看到最终效果。 使用 SkyWalking Go 编译 Golang:在这里,你将把 SkyWalking Go Agent 编译到要监控的 Golang 程序中。 应用部署:你将导出环境变量并部署应用程序,以促进你的服务与 SkyWalking 后端之间的通信。 在 SkyWalking UI 上可视化:最后,你将发送请求并在 SkyWalking UI 中观察效果。  部署 SkyWalking 请从官方 SkyWalking 网站下载 SkyWalking APM 程序 。然后执行以下两个命令来启动服务:\n# 启动 OAP 后端 \u0026gt; bin/oapService.sh # 启动 UI \u0026gt; bin/webappService.sh 接下来,你可以访问地址 http://localhost:8080/ 。此时,由于尚未部署任何应用程序,因此你将看不到任何数据。\n使用 SkyWalking GO 编译 Golang 这里有一个简单的业务应用程序,启动了一个 HTTP 服务。\npackage main import \u0026#34;net/http\u0026#34; func main() { http.HandleFunc(\u0026#34;/hello\u0026#34;, func(writer http.ResponseWriter, request *http.Request) { writer.Write([]byte(\u0026#34;Hello World\u0026#34;)) }) err := http.ListenAndServe(\u0026#34;:8000\u0026#34;, nil) if err != nil { panic(err) } } 在项目的根目录中执行以下命令。此命令将下载 skywalking-go 所需的依赖项:\ngo get github.com/apache/skywalking-go 接下来,请将其包含在项目的 main 包中。包含之后,代码将会更新为:\npackage main import ( \u0026#34;net/http\u0026#34; _ \u0026#34;github.com/apache/skywalking-go\u0026#34; ) func main() { http.HandleFunc(\u0026#34;/hello\u0026#34;, func(writer http.ResponseWriter, request *http.Request) { writer.Write([]byte(\u0026#34;Hello World\u0026#34;)) }) err := http.ListenAndServe(\u0026#34;:8000\u0026#34;, nil) if err != nil { panic(err) } } 接下来,请从官方 SkyWalking 网站下载 Go Agent 程序 。当你使用 go build 命令进行编译时,请在 bin 目录中找到与当前操作系统匹配的代理程序,并添加 -toolexec=\u0026quot;/path/to/go-agent\u0026quot; -a 参数。例如,请使用以下命令:\ngo build -toolexec=\u0026#34;/path/to/go-agent\u0026#34; -a -o test . 应用部署 在开始部署应用程序之前,你可以通过环境变量更改 SkyWalking 中当前应用程序的服务名称。你还可以更改其配置,例如服务器端的地址。有关详细信息,请参阅文档 。\n在这里,我们只是将当前服务的名称更改为 demo。\n接下来,你可以启动应用程序:\nexport SW_AGENT_NAME=demo ./test 在 SkyWalking UI 上可视化 现在,向应用程序发送请求并在 SkyWalking UI 中观察结果。\n几秒钟后,重新访问 http://localhost:8080 的 SkyWalking UI。能够在主页上看到部署的 demo 服务。\n此外,在追踪页面上,可以看到刚刚发送的请求。\n总结 在本文中,我们指导你快速开发 demo 服务,并将其与 SkyWalking Go Agent 集成。这个过程也适用于你自己的 Golang 服务。最终,可以在 SkyWalking 服务中查看显示效果。如果你有兴趣了解 SkyWalking Go 代理当前支持的框架,请参阅此文档 。\n将来,我们将继续扩展 SkyWalking Go 的功能,添加更多插件支持。所以,请继续关注!\n","title":"SkyWalking Go Agent 快速开始指南","url":"/zh/2023-06-01-quick-start-with-skywalking-go-agent/"},{"content":"SkyWalking Rust 0.7.0 is released. Go to downloads page to find release tars.\nWhat\u0026rsquo;s Changed  Obtain Span object without intermediary. by @jmjoy in https://github.com/apache/skywalking-rust/pull/57 Rename module skywalking_proto to proto. by @jmjoy in https://github.com/apache/skywalking-rust/pull/59 Add Span::prepare_for_async method and AbstractSpan trait. by @jmjoy in https://github.com/apache/skywalking-rust/pull/58 Bump to 0.7.0. by @jmjoy in https://github.com/apache/skywalking-rust/pull/60  ","title":"Release Apache SkyWalking Rust 0.7.0","url":"/events/release-apache-skywalking-rust-0-7-0/"},{"content":"SkyWalking PHP 0.5.0 is released. Go to downloads page to find release tars.\nWhat\u0026rsquo;s Changed  Bump openssl from 0.10.45 to 0.10.48 by @dependabot in https://github.com/apache/skywalking-php/pull/60 Make the SKYWALKING_AGENT_ENABLE work in the request hook as well. by @jmjoy in https://github.com/apache/skywalking-php/pull/61 Support tracing curl_multi_* api. by @jmjoy in https://github.com/apache/skywalking-php/pull/62 Fix parent endpoint and peer in segment ref and tag url in entry span. by @jmjoy in https://github.com/apache/skywalking-php/pull/63 Bump h2 from 0.3.15 to 0.3.17 by @dependabot in https://github.com/apache/skywalking-php/pull/65 Add amqplib plugin for producer. by @jmjoy in https://github.com/apache/skywalking-php/pull/64 Upgrade and adapt phper. by @jmjoy in https://github.com/apache/skywalking-php/pull/66 Refactor script create_package_xml. by @jmjoy in https://github.com/apache/skywalking-php/pull/67 Refactor predis plugin to hook Client. by @jmjoy in https://github.com/apache/skywalking-php/pull/68 Canonicalize unknown. by @jmjoy in https://github.com/apache/skywalking-php/pull/69 Bump guzzlehttp/psr7 from 2.4.0 to 2.5.0 in /tests/php by @dependabot in https://github.com/apache/skywalking-php/pull/70 Enhance support for Swoole. by @jmjoy in https://github.com/apache/skywalking-php/pull/71 Bump to 0.5.0. by @jmjoy in https://github.com/apache/skywalking-php/pull/72  Full Changelog: https://github.com/apache/skywalking-php/compare/v0.4.0...v0.5.0\nPECL https://pecl.php.net/package/skywalking_agent/0.5.0\n","title":"Release Apache SkyWalking PHP 0.5.0","url":"/events/release-apache-skwaylking-php-0-5-0/"},{"content":"SkyWalking Python 1.0.1 is released! Go to downloads page to find release tars.\nPyPI Wheel: https://pypi.org/project/apache-skywalking/1.0.1/\nDockerHub Image: https://hub.docker.com/r/apache/skywalking-python\n  Upgrading from v1.0.0 to v1.0.1 is strongly encouraged\n This is a critical performance-oriented patch to address a CPU surge reported in https://github.com/apache/skywalking/issues/10672    Feature:\n Add a new workflow to push docker images for arm64 and amd64 (#297)    Plugins:\n Optimize loguru reporter plugin.(#302)    Fixes:\n Fix sw8 loss when use aiohttp (#299, issue#10669) Critical: Fix a bug that leads to high cpu usage (#300, issue#10672)    Others:\n Use Kraft mode in E2E Kafka reporter tests (#303)    New Contributors  @Forstwith made their first contribution in https://github.com/apache/skywalking-python/pull/299 @FAWC438 made their first contribution in https://github.com/apache/skywalking-python/pull/300  Full Changelog: https://github.com/apache/skywalking-python/compare/v1.0.0...v1.0.1\n","title":"Release Apache SkyWalking Python 1.0.1","url":"/events/release-apache-skywalking-python-1-0-1/"},{"content":"本次活动于 2023 年 4 月 22 日在北京奥加美术馆酒店举行。该会议旨在探讨和分享有关可观测性的最佳实践, 包括在云原生应用程序和基础架构中实现可观测性的最新技术和工具。与会者将有机会了解行业领袖的最新见解,并与同行们分享经验和知识。 我们期待这次会议能够给云原生社区带来更多的启发和动力,推动我们在可观测性方面的进一步发展。\n圆桌讨论:云原生应用可观测性现状及趋势 B站视频地址\n嘉宾\n 罗广明,主持人 吴晟,Tetrate 创始工程师 向阳,云杉科技研发 VP 乔新亮,原苏宁科技副总裁,现彩食鲜 CTO 董江,中国移动云能力中心高级系统架构专家  为 Apache SkyWalking 构建 Grafana dashboards \u0026ndash; 基于对原生 PromQL 的支持 B站视频地址\n万凯,Tetrate\n  讲师介绍 万凯,Tetrate 工程师,Apache SkyWalking PMC 成员,专注于应用性能可观测性领域。\n  议题概要 本次分享将介绍 Apache SkyWalking 的新特性 PromQL Service,它将为 SkyWalking 带来更广泛的生态集成能力: 什么是 PromQL SkyWalking 的 PromQL Service 是什么,能够做什么 SkyWalking 中的基本概念和 metrics 的特性 如何使用 PromQL Service 使用 PromQL Service 构建 Grafana dashboards 的实践\n  ","title":"[视频] 可观测性峰会2023 - Observability Summit","url":"/zh/2023-04-23-obs-summit-china/"},{"content":"SkyWalking Client JS 0.10.0 is released. Go to downloads page to find release tars.\n Fix the ability of Fetch constructure. Update README. Bump up dependencies.  ","title":"Release Apache SkyWalking Client JS 0.10.0","url":"/events/release-apache-skywalking-client-js-0-10-0/"},{"content":"SkyWalking Java Agent 8.15.0 is released. Go to downloads page to find release tars. Changes by Version\n8.15.0  Enhance lettuce plugin to adopt uniform tags. Expose complete Tracing APIs in the tracing toolkit. Add plugin to trace Spring 6 and Resttemplate 6. Move the baseline to JDK 17 for development, the runtime baseline is still Java 8 compatible. Remove Powermock entirely from the test cases. Fix H2 instrumentation point Refactor pipeline in jedis-plugin. Add plugin to support ClickHouse JDBC driver (0.3.2.*). Refactor kotlin coroutine plugin with CoroutineContext. Fix OracleURLParser ignoring actual port when :SID is absent. Change gRPC instrumentation point to fix plugin not working for server side. Fix servicecomb plugin trace break. Adapt Armeria\u0026rsquo;s plugins to the latest version 1.22.x Fix tomcat-10x-plugin and add test case to support tomcat7.x-8.x-9.x. Fix thrift plugin generate duplicate traceid when sendBase error occurs Support keep trace profiling when cross-thread. Fix unexpected whitespace of the command catalogs in several Redis plugins. Fix a thread leak in SamplingService when updated sampling policy in the runtime. Support MySQL plugin tracing SQL parameters when useServerPrepStmts Update the endpoint name of Undertow plugin to Method:Path. Build a dummy(empty) javadoc of finagle and jdk-http plugins due to incompatibility.  Documentation  Update docs of Tracing APIs, reorganize the API docs into six parts. Correct missing package name in native manual API docs. Add a FAQ doc about \u0026ldquo;How to make SkyWalking agent works in OSGI environment?\u0026rdquo;  All issues and pull requests are here\n","title":"Release Apache SkyWalking Java Agent 8.15.0","url":"/events/release-apache-skywalking-java-agent-8-15-0/"},{"content":"SkyWalking PHP 0.4.0 is released. Go to downloads page to find release tars.\nWhat\u0026rsquo;s Changed  Bump tokio from 1.24.1 to 1.24.2 by @dependabot in https://github.com/apache/skywalking-php/pull/52 Bump to 0.4.0-dev by @heyanlong in https://github.com/apache/skywalking-php/pull/53 Avoid potential panic for logger. by @jmjoy in https://github.com/apache/skywalking-php/pull/54 Fix the curl plugin hook curl_setopt by mistake. by @jmjoy in https://github.com/apache/skywalking-php/pull/55 Update documents. by @jmjoy in https://github.com/apache/skywalking-php/pull/56 Upgrade dependencies and adapt the codes. by @jmjoy in https://github.com/apache/skywalking-php/pull/57 Add sub components licenses in dist material. by @jmjoy in https://github.com/apache/skywalking-php/pull/58 Bump to 0.4.0. by @jmjoy in https://github.com/apache/skywalking-php/pull/59  New Contributors  @dependabot made their first contribution in https://github.com/apache/skywalking-php/pull/52  Full Changelog: https://github.com/apache/skywalking-php/compare/v0.3.0...v0.4.0\nPECL https://pecl.php.net/package/skywalking_agent/0.4.0\n","title":"Release Apache SkyWalking PHP 0.4.0","url":"/events/release-apache-skwaylking-php-0-4-0/"},{"content":"Background As an application performance monitoring tool for distributed systems, Apache SkyWalking provides monitoring, tracing, diagnosing capabilities for distributed system in Cloud Native architecture. Prometheus is an open-source systems monitoring and alerting toolkit with an active ecosystem. Especially Prometheus metrics receive widespread support through exporters and integrations. PromQL as Prometheus Querying Language containing a set of expressions and expose HTTP APIs to read metrics.\nSkyWalking supports to ingest Prometheus metrics through OpenTelemetry collector and through the aggregate calculation of these metrics to provide a variety of systems monitoring, such as Linux Monitoring and Kubernetes monitoring. SkyWalking already provides native UI and GraphQL API for users. But as designed to provide wider ecological integration capabilities, since 9.4.0, it provides PromQL Service, the third-party systems or visualization platforms that already support PromQL (such as Grafana), could obtain metrics through it. SkyWalking users will benefit from it when they integrate with different systems.\nWhat is PromQL Service in SkyWalking? PromQL Service is a query engine on the top of SkyWalking native GraphQL query, with additional query stage calculation capabilities powered by Prometheus expressions. It can accept PromQL HTTP API requests, parse Prometheus expressions, and transform between Prometheus metrics and SkyWalking metrics.\nThe PromQL Service follows all PromQL\u0026rsquo;s protocols and grammar and users can use it as they would with PromQL. As SkyWalking is fundamentally different from Prometheus in terms of metric classification, format, storage, etc. PromQL Service doesn\u0026rsquo;t have to implement the full PromQL feature. Refer to the documentation for the detail.\nSkyWalking Basic Concepts Here are some basic concepts and differences from Prometheus that users need to understand in order to use the PromQL service: Prometheus metrics specify the naming format and structure, the actual metric names and labels are determined by the client provider, and the details are stored. The user aggregates and calculates the metrics using the expression in PromQL. Unlike Prometheus, SkyWalking\u0026rsquo;s metric mechanism is built around the following core concepts with a hierarchical structure:\n Layer: represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), Kubernetes(k8s layer). This layer would be the owner of different services detected from different technologies. All Layers definitions can be found here. Service: Represents a set/group of workloads which provides the same behaviors for incoming requests. Service Instance: An individual workload in the Service group. Endpoint: A path in a service for incoming requests. Process: An operating system process. In some scenarios, a service instance is not a process, such as a pod Kubernetes could contain multiple processes.  The metric name and properties (labels) are configured by the SkyWalking OAP server based on the data source as well as OAL and MAL. SkyWalking provides the ability to down-sampling time series metrics, and generate different time bucket data (minute, hour, day).\nThe SkyWalking metric stream is as follows:\nTraffic  The metadata of the Service/ServiceRelation/Instance/ServiceInstanceRelation/Endpoint/EndpointRelation/Process/ProcessRelation. Include names, layers, properties, relations between them, etc.  Metric  Name: metric name, configuration from OAL and MAL. Entity: represents the metrics' belonging and used for the query. An Entity will contain the following information depending on the Scope: Scope represents the metrics level and in query stage represents the Scope catalog, Scope catalog provides high-dimension classifications for all scopes as a hierarchy structure.     Scope Entity Info     Service Service(include layer info)   ServiceInstance Service, ServiceInstance   Endpoint Service, Endpoint   ServiceRelation Service, DestService   ServiceInstanceRelation ServiceInstance, DestServiceInstance   EndpointRelation Endpoint, DestEndpoint   Process Service, ServiceInstance, Process   ProcessRelation Process, ServiceInstance, DestProcess     Value:   single value: long. labeled value: text, label1,value1|label2,value2|..., such as L2 aggregation,5000 | L1 aggregation,8000.   TimeBucket: the time is accurate to minute, hour, day.  How to use PromQL Service Setup PromQL Service is enabled by default after v9.4.0, so no additional configuration is required. The default ports, for example, can be configured by using OAP environment variables:\nrestHost: ${SW_PROMQL_REST_HOST:0.0.0.0} restPort: ${SW_PROMQL_REST_PORT:9090} restContextPath: ${SW_PROMQL_REST_CONTEXT_PATH:/} restMaxThreads: ${SW_PROMQL_REST_MAX_THREADS:200} restIdleTimeOut: ${SW_PROMQL_REST_IDLE_TIMEOUT:30000} restAcceptQueueSize: ${SW_PROMQL_REST_QUEUE_SIZE:0} Use Prometheus expression PromQL matches metric through the Prometheus expression. Here is a typical Prometheus metric.\nTo match the metric, the Prometheus expression is as follows:\nIn the PromQL Service, these reserved labels would be parsed as the metric name and entity info fields with other labels for the query. The mappings are as follows.\n   SkyWalking Concepts Prometheus expression     Metric name Metric name   Layer Label   Service Label   ServiceInstance Label\u0026lt;service_instance\u0026gt;   Endpoint Label   \u0026hellip; \u0026hellip;    For example, the following expressions are used to match query metrics: service_cpm, service_instance_cpm, endpoint_cpm\nservice_cpm{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;} service_instance_cpm{service=\u0026#39;agent::songs\u0026#39;, service_instance=\u0026#39;agent::songs_instance_1\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;} endpoint_cpm{service=\u0026#39;agent::songs\u0026#39;, endpoint=\u0026#39;GET:/songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;} Typical Query Example At here, we take the SkyWalking Showcase deployment as the playground to demonstrate how to use PromQL for SkyWalking metrics.\nThe following examples can be used to query the metadata and metrics of services through PromQL Service.\nGet metrics names Query:\nhttp://localhost:9099/api/v1/label/__name__/values Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;meter_mysql_instance_qps\u0026#34;, \u0026#34;service_cpm\u0026#34;, \u0026#34;envoy_cluster_up_rq_active\u0026#34;, \u0026#34;instance_jvm_class_loaded_class_count\u0026#34;, \u0026#34;k8s_cluster_memory_requests\u0026#34;, \u0026#34;meter_vm_memory_used\u0026#34;, \u0026#34;meter_apisix_sv_bandwidth_unmatched\u0026#34;, \u0026#34;meter_vm_memory_total\u0026#34;, ... ] } Select a metric and get the labels Query:\nhttp://localhost:9099/api/v1/labels?match[]=service_cpm Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;layer\u0026#34;, \u0026#34;service\u0026#34;, \u0026#34;top_n\u0026#34;, \u0026#34;order\u0026#34; ] } Get services from a specific layer Query:\nhttp://127.0.0.1:9099/api/v1/series?match[]=service_traffic{layer=\u0026#39;GENERAL\u0026#39;}\u0026amp;start=1677479336\u0026amp;end=1677479636 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::recommendation\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::app\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::gateway\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::frontend\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; } ] } Query specific metric for a service Query:\nhttp://127.0.0.1:9099/api/v1/query?query=service_cpm{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;} Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1679559960, \u0026#34;6\u0026#34; ] } ] } } About the range query and different metrics type for query can refer to the document here.\nBuild Grafana Dashboard From the above, we know the mechanism and how to query from PromQL Service, now we can build the Grafana Dashboard for the above service example. Note: All the following configurations are based on Grafana version 9.1.0.\nSkyWalking Showcase provides dashboards files such as services of General and Service Mesh layers, we can quickly create a dashboard for the General layer service by importing the dashboard JSON file.\nAfter the Grafana application is deployed, follow the steps below:\nConfigure Data Source First, we need to create a data source: In the data source config panel, chose Prometheus and set the URL to the OAP server address, the default port is 9090. Here set the data source name SkyWalking in case there are multiple Prometheus data sources.\nImport Dashboard File   Create a dashboard folder named SkyWalking.\n  Import the dashboard file into Grafana, there are two ways to get the file:\n From SkyWalking Showcase. Go to SkyWaking Demo: Preview metrics on Grafana, and export it from the General Service dashboard.    Done! Now we can see the dashboard is working, the services are in the drop-down list and the metrics are displayed on the panels.\n  This is an easy way to build, but we need to know how it works if we want to customize it.\nHow the dashboard works Dashboard Settings Open the Settings-Variables we can see the following variables:\nLet\u0026rsquo;s look at what each variable does:\n  $DS_SkyWalking\nThis is a data source ty variable that specifies the Prometheus data source which was defined earlier as SkyWalking.\n  $layer\nThis is a constant type because in the \u0026lsquo;General Service\u0026rsquo; dashboard, all services belong to the \u0026lsquo;GENERAL\u0026rsquo; layer, so they can be used directly in each query Note When you customize other layers, this value must be defined in the Layer mentioned above.\n  $service\nQuery type variable, to get all service names under this layer for the drop-down list.\nQuery expression:\nlabel_values(service_traffic{layer=\u0026#39;$layer\u0026#39;}, service) The query expression will query HTTP API /api/v1/series for service metadata in $layer and fetch the service name according to the label(service).\n  $service_instance\nSame as the $service is a query variable that is used to select all instances of the service in the drop-down list.\nQuery expression:\nlabel_values(instance_traffic{layer=\u0026#39;$layer\u0026#39;, service=\u0026#39;$service\u0026#39;}, service_instance) The query expression here not only specifies the $layer but also contains the variable $service, which is used to correlate with the services for the drop-down list.\n  $endpoint\nSame as the $service is a query variable that is used to select all endpoints of the service in the drop-down list.\nQuery expression:\nlabel_values(endpoint_traffic{layer=\u0026#39;$layer\u0026#39;, service=\u0026#39;$service\u0026#39;, keyword=\u0026#39;$endpoint_keyword\u0026#39;, limit=\u0026#39;$endpoint_limit\u0026#39;}, endpoint) The query expression here specifies the $layer and $service which are used to correlate with the services for the drop-down list. And also accept variables $endpoint_keyword and $endpoint_limit as filtering condition.\n  $endpoint_keyword\nA text type variable that the user can input to filter the return value of $endpoint.\n  $endpoint_limit\nCustom type, which the user can select to limit the maximum number of returned endpoints.\n  Panel Configurations There are several typical metrics panels on this dashboard, let\u0026rsquo;s see how it\u0026rsquo;s configured.\nCommon Value Metrics Select Time series chart panel Service Apdex and click edit.  Query expression service_apdex{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} / 10000 The metric scope is Service, add labels service and layer for the match, and the label value used the variables configured above. The calculation Divided by 10000 is used for matching the result units. The document for the query can refer to here.\n Set Query options --\u0026gt; Min interval = 1m, because the metrics min time bucket in SkyWalking is 1m. Set Connect null values --\u0026gt; Always and Show points --\u0026gt; Always because when the query interval \u0026gt; 1 hour or 1 day SkyWalking returns the hour/day step metrics values.  Labeled Value Metrics Select Time series chart panel Service Response Time Percentile and click edit.  Query expression service_percentile{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;, labels=\u0026#39;0,1,2,3,4\u0026#39;, relabels=\u0026#39;P50,P75,P90,P95,P99\u0026#39;} The metric scope is Service, add labels service and layer for the match, and the label value used the variables configured above. Add labels='0,1,2,3,4' filter the result label, and addrelabels='P50,P75,P90,P95,P99' rename the result label. The document for the query can refer to here.\n Set Query options --\u0026gt; Min interval = 1m, because the metrics min time bucket in SkyWalking is 1m. Set Connect null values --\u0026gt; Always and Show points --\u0026gt; Always because when the query interval \u0026gt; 1 hour or 1 day SkyWalking returns the hour/day step metrics values. Set Legend to {{label}} for show up.  Sort Metrics Select Time series chart panel Service Response Time Percentile and click edit.  Query expression service_instance_cpm{parent_service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;, top_n=\u0026#39;10\u0026#39;, order=\u0026#39;DES\u0026#39;} The expression is used for query the sore metrics under service, so add labels parent_service and layer for the match. Add top_n='10' and order='DES' filter the result. The document for the query can refer to here.\n Set Query options --\u0026gt; Min interval = 1m, because the metrics min time bucket in SkyWalking is 1m. Set the Calculation --\u0026gt; Latest*. Set Legend to {{service_instance}} for show up.  Conclusion In this article, we introduced what is the PromQL Service in SkyWalking and its background. Detailed how to use PromQL Service and the basic concepts related to SkyWalking, and show how to use PromQL Service to build Grafana dashboards for SkyWalking.\nIn the future, there will be more integrations by leveraging this protocol, such as CI/CD, HPA (scaling), etc.\n","title":"Build Grafana dashboards for Apache SkyWalking -- Native PromQL Support","url":"/blog/2023-03-17-build-grafana-dashboards-for-apache-skywalking-native-promql-support/"},{"content":"背景 Apache SkyWalking 作为分布式系统的应用性能监控工具,提供了对云原生架构下的分布式系统的监控、跟踪、诊断能力。Prometheus 是一个开源系统监控和警报工具包,具有活跃的生态系统。特别是 Prometheus 指标通过 导出器和集成 得到广泛支持。 PromQL 作为 Prometheus 查询语言,包含一组表达式并公开 HTTP API 以读取指标。\nSkyWalking 支持通过 OpenTelemetry 收集器 摄取 Prometheus 指标,并通过这些指标的聚合计算提供多种系统监控,例如 Linux 监控和 Kubernetes 监控。SkyWalking 已经为用户提供了 原生 UI 和 GraphQL API。但为了提供更广泛的生态整合能力,从 9.4.0 开始,它提供了 PromQL 服务,已经支持 PromQL 的第三方系统或可视化平台(如 Grafana),可以通过它获取指标。SkyWalking 用户在与不同系统集成时将从中受益。\nSkyWalking 中的 PromQL 服务是什么? PromQL 服务是 SkyWalking 原生 GraphQL 查询之上的查询引擎,具有由 Prometheus 表达式提供支持的附加查询阶段计算能力。它可以接受 PromQL HTTP API 请求,解析 Prometheus 表达式,并在 Prometheus 指标和 SkyWalking 指标之间进行转换。\nPromQL 服务遵循 PromQL 的所有协议和语法,用户可以像使用 PromQL 一样使用它。由于 SkyWalking 在度量分类、格式、存储等方面与 Prometheus 有根本不同,因此 PromQL 服务不必实现完整的 PromQL 功能。有关详细信息,请参阅文档。\nSkyWalking 基本概念 以下是用户使用 PromQL 服务需要了解的一些基本概念和与 Prometheus 的区别: Prometheus 指标指定命名格式和结构,实际指标名称和标签由客户端提供商确定,并存储详细信息。用户使用 PromQL 中的表达式聚合和计算指标。与 Prometheus 不同,SkyWalking 的度量机制是围绕以下具有层次结构的核心概念构建的:\n  层(Layer):表示计算机科学中的一个抽象框架,如 Operating System(OS_LINUX 层)、Kubernetes(k8s 层)。该层将是从不同技术检测到的不同服务的所有者。可以在此处\n找到所有层定义。\n  服务:表示一组 / 一组工作负载,它为传入请求提供相同的行为。\n  服务实例:服务组中的单个工作负载。\n  端点:传入请求的服务路径。\n  进程:操作系统进程。在某些场景下,service instance 不是一个进程,比如一个 Kubernetes Pod 可能包含多个进程。\n  Metric 名称和属性(标签)由 SkyWalking OAP 服务器根据数据源以及 OAL 和 MAL 配置。SkyWalking 提供了对时间序列指标进行下采样(down-sampling),并生成不同时间段数据(分钟、小时、天)的能力。\nSkyWalking 指标流如下:\n流量  Service/ServiceRelation/Instance/ServiceInstanceRelation/Endpoint/EndpointRelation/Process/ProcessRelation 的元数据。包括名称、层、属性、它们之间的关系等。  指标  名称(Name):指标名称,来自 OAL 和 MAL 的配置。 实体(Entity):表示指标的归属,用于查询。一个 Entity 根据 Scope 不同会包含如下信息: Scope 代表指标级别,在查询阶段代表 Scope catalog,Scope catalog 为所有的 scope 提供了高维的分类,层次结构。     Scope 实体信息     Service 服务(包括图层信息)   ServiceInstance 服务、服务实例   Endpoint 服务、端点   ServiceRelation 服务,目标服务   ServiceInstanceRelation 服务实例、目标服务实例   EndpointRelation 端点、目标端点   Process 服务、服务实例、流程   ProcessRelation 进程、服务实例、DestProcess     值:   单值:long 标签值:文本,label1,value1|label2,value2|... ,例如 L2 aggregation,5000 | L1 aggregation,8000   TimeBucket:时间精确到分钟、小时、天  如何使用 PromQL 服务 设置 PromQL 服务在 v9.4.0 之后默认开启,不需要额外配置。例如,可以使用 OAP 环境变量配置默认端口:\nrestHost: ${SW_PROMQL_REST_HOST:0.0.0.0} restPort: ${SW_PROMQL_REST_PORT:9090} restContextPath: ${SW_PROMQL_REST_CONTEXT_PATH:/} restMaxThreads: ${SW_PROMQL_REST_MAX_THREADS:200} restIdleTimeOut: ${SW_PROMQL_REST_IDLE_TIMEOUT:30000} restAcceptQueueSize: ${SW_PROMQL_REST_QUEUE_SIZE:0} 使用 Prometheus 表达式 PromQL 通过 Prometheus 表达式匹配指标。这是一个典型的 Prometheus 指标。\n为了匹配指标,Prometheus 表达式如下:\n在 PromQL 服务中,这些保留的标签将被解析为度量名称和实体信息字段以及用于查询的其他标签。映射如下。\n   SkyWalking 概念 Prometheus 表达     指标名称 指标名称   层 标签   服务 标签   服务实例 标签 \u0026lt;服务实例\u0026gt;   端点 标签   …… ……    例如,以下表达式用于匹配查询指标:service_cpm、service_instance_cpm、endpoint_cpm\nservice_cpm {service='agent::songs', layer='GENERAL'} service_instance_cpm {service='agent::songs', service_instance='agent::songs_instance_1', layer='GENERAL'} endpoint_cpm {service='agent::songs', endpoint='GET:/songs', layer='GENERAL'} 典型查询示例 在这里,我们将 SkyWalking Showcase 部署作为 Playground 来演示如何使用 PromQL 获取 SkyWalking 指标。\n以下示例可用于通过 PromQL 服务查询服务的元数据和指标。\n获取指标名称 查询:\nhttp://localhost:9099/api/v1/label/__name__/values 结果:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;meter_mysql_instance_qps\u0026#34;, \u0026#34;service_cpm\u0026#34;, \u0026#34;envoy_cluster_up_rq_active\u0026#34;, \u0026#34;instance_jvm_class_loaded_class_count\u0026#34;, \u0026#34;k8s_cluster_memory_requests\u0026#34;, \u0026#34;meter_vm_memory_used\u0026#34;, \u0026#34;meter_apisix_sv_bandwidth_unmatched\u0026#34;, \u0026#34;meter_vm_memory_total\u0026#34;, ... ] } 选择一个指标并获取标签 查询:\nhttp://localhost:9099/api/v1/labels?match []=service_cpm 结果:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;layer\u0026#34;, \u0026#34;service\u0026#34;, \u0026#34;top_n\u0026#34;, \u0026#34;order\u0026#34; ] } 从特定层获取服务 查询:\nhttp://127.0.0.1:9099/api/v1/series?match []=service_traffic {layer='GENERAL'}\u0026amp;start=1677479336\u0026amp;end=1677479636 结果:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ {\u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, {\u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::recommendation\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, {\u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::app\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, {\u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::gateway\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, {\u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::frontend\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; } ] } 查询服务的特定指标 查询:\nhttp://127.0.0.1:9099/api/v1/query?query=service_cpm {service='agent::songs', layer='GENERAL'} 结果:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ {\u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; },\u0026#34;value\u0026#34;: [ 1679559960, \u0026#34;6\u0026#34; ] } ] } } 关于range query和不同的metrics type for query 可以参考 这里的 文档。\n构建 Grafana Dashboard 从上面我们知道了 PromQL 服务的机制和查询方式,现在我们可以为上面的服务示例构建 Grafana Dashboard。注:以下所有配置均基于 Grafana 9.1.0 版本。\nSkyWalking Showcase 提供了 General Service 和 Service Mesh 层等 Dashboard 文件,我们可以通过导入 Dashboard JSON 文件快速为层服务创建 Dashboard。\n部署 Grafana 应用程序后,请按照以下步骤操作:\n配置数据源 首先,我们需要创建一个数据源: 在数据源配置面板中,选择 Prometheus 并设置 URL 为 OAP 服务器地址,默认端口为 9090。 SkyWalking 如果有多个 Prometheus 数据源,请在此处设置数据源名称。\n导入 Dashboard 文件   创建一个名为 SkyWalking 的 Dashboard 文件夹。\n  将 Dashboard 文件导入到 Grafana 中,有两种获取文件的方式:\n 来自 SkyWalking Showcase 转到 SkyWaking Demo:在 Grafana 上预览指标,并将其从 General Service Dashboard 导出。    完毕!现在我们可以看到 Dashboard 正在运行,服务位于下拉列表中,指标显示在面板上。\n  这是一种简单的构建方式,但是如果我们想要自定义它,我们需要知道它是如何工作的。\nDashboard 的工作原理 Dashboard 设置 打开 Settings-Variables 我们可以看到如下变量:\n让我们看看每个变量的作用:\n  $DS_SkyWalking\n这是一个数据源 ty 变量,它指定了之前定义为 SkyWalking 的 Prometheus 数据源。\n  $layer\n这是一个常量类型,因为在 \u0026lsquo;General Service\u0026rsquo; Dashboard 中,所有服务都属于 \u0026lsquo;GENERAL\u0026rsquo; 层,因此可以在每个查询中直接使用它们。注意,当您自定义其他层时,必须在 Layer 上面定义该值。\n  $service\n查询类型变量,为下拉列表获取该层下的所有服务名称。\n查询表达式:\nlabel_values (service_traffic {layer='$layer'}, service) 查询表达式将查询 HTTP API /api/v1/series,以获取 $layer 中服务元数据,并根据标签(服务)提取服务名称。\n  $service_instance\n与 $service 一样,是一个查询变量,用于在下拉列表中选择服务的所有实例。\n查询表达式:\nlabel_values (instance_traffic {layer='$layer', service='$service'}, service_instance) 这里的查询表达式不仅指定了 $layer 还包含 $service 变量,用于关联下拉列表的服务。\n  $endpoint\n与 $service 一样,是一个查询变量,用于在下拉列表中选择服务的所有端点。\n查询表达式:\nlabel_values (endpoint_traffic {layer='$layer', service='$service', keyword='$endpoint_keyword', limit='$endpoint_limit'}, endpoint) 此处的查询表达式指定 $layer 和 $service 用于与下拉列表的服务相关联的。并且还接受 $endpoint_keyword 和 $endpoint_limit 变量作为过滤条件。\n  $endpoint_keyword\n一个文本类型的变量,用户可以输入它来过滤 $endpoint 的返回值。\n  $endpoint_limit\n自定义类型,用户可以选择它以限制返回端点的最大数量。\n  Dashboard 配置 这个 Dashboard 上有几个典型的指标面板,让我们看看它是如何配置的。\n普通值指标 选择 Time series chart 面板 Service Apdex 并单击 edit。\n  查询表达式\nservice_apdex {service='$service', layer='$layer'} / 10000 指标范围为 Service,添加 service 和 layer 标签用于匹配,label 值使用上面配置的变量。该计算 Divided by 10000 用于匹配结果单位。查询文档可以参考 这里。\n  设置 Query options --\u0026gt; Min interval = 1m,因为 SkyWalking 中的指标最小时间段是 1m。\n  设置 Connect null values --\u0026gt; AlwaysShow points --\u0026gt; Always,因为当查询间隔大于 1 小时或 1 天时,SkyWalking 返回小时 / 天步长指标值。\n  标签值指标 选择 Time series chart 面板 Service Response Time Percentile 并单击 edit。\n  查询表达式\nservice_percentile {service='$service', layer='$layer', labels='0,1,2,3,4', relabels='P50,P75,P90,P95,P99'} 指标范围为 Service,添加 service 和 layer 标签用于匹配,label 值使用上面配置的变量。添加 labels='0,1,2,3,4' 过滤结果标签,并添加 relabels='P50,P75,P90,P95,P99' 重命名结果标签。查询文档可以参考 这里。\n  设置 Query options --\u0026gt; Min interval = 1m,因为 SkyWalking 中的指标最小时间段是 1m。\n  设置 Connect null values --\u0026gt; AlwaysShow points --\u0026gt; Always,因为当查询间隔 \u0026gt; 1 小时或 1 天时,SkyWalking 返回小时 / 天步长指标值。\n  设置 Legend 为 {{label}} 来展示。\n  排序指标 选择 Time series chart 面板 Service Response Time Percentile 并单击 edit。\n  查询表达式\nservice_instance_cpm {parent_service='$service', layer='$layer', top_n='10', order='DES'} 该表达式用于查询服务下的排序指标,因此添加标签 parent_service 和 layer 进行匹配。添加 top_n='10' 和 order='DES' 过滤结果。查询文档可以参考 这里。\n  设置 Query options --\u0026gt; Min interval = 1m,因为 SkyWalking 中的指标最小时间段是 1m。\n  设置 Calculation --\u0026gt; Latest*。\n  设置 Legend 为 {{service_instance}} 来展示。\n  结论 在这篇文章中,我们介绍了 SkyWalking 中的 PromQL 服务是什么以及它的背景。详细介绍了 PromQL 服务的使用方法和 SkyWalking 相关的基本概念,展示了如何使用 PromQL 服务为 SkyWalking 构建 Grafana Dashboard。\n未来,将会有更多的集成利用这个协议,比如 CI/CD、HPA(缩放)等。\n","title":"为 Apache SkyWalking 构建 Grafana Dashboard —— 原生 PromQL 支持","url":"/zh/2023-03-17-build-grafana-dashboards-for-apache-skywalking-native-promql-support/"},{"content":"Background Apache SkyWalking is an open-source application performance management system that helps users collect and aggregate logs, traces, metrics, and events, and display them on the UI. Starting from OAP 9.4.0, SkyWalking has added AWS Firehose receiver, which is used to receive and calculate the data of CloudWatch metrics. In this article, we will take DynamoDB as an example to show how to use SkyWalking to receive and calculate CloudWatch metrics data for monitoring Amazon Web Services.\nWhat are Amazon CloudWatch and Amazon Kinesis Data Firehose? Amazon CloudWatch is a metrics repository, this tool can collect raw data from AWS (e.g. DynamoDB) and process it into readable metrics in near real-time. Also, we can use Metric Stream to continuously stream CloudWatch metrics to a selected target location for near real-time delivery and low latency. SkyWalking takes advantage of this feature to create metric streams and direct them to Amazon Kinesis Data Firehose transport streams for further transport processing.\nAmazon Kinesis Data Firehoseis an extract, transform, and load (ETL) service that reliably captures, transforms, and delivers streaming data to data lakes, data stores, and analytics services. SkyWalking takes advantage of this feature to eventually direct the metrics stream to the aws-firehose-receiver for OAP to calculate and ultimately display the metrics.\nThe flow chart is as follows.\nNotice  Due to Kinesis Data Firehose specifications, the URL of the HTTP endpoint must use the HTTPS protocol and must use port 443. Also, this URL must be proxied by Gateway and forwarded to the real aws-firehose-receiver. The TLS certificate must be signed by a CA and the self-signed certificate will not be trusted by Kinesis Data Firehose.  Setting up DynamoDB monitoring Next, let\u0026rsquo;s take DynamoDB as an example to illustrate the necessary settings in aws before using OAP to collect CloudWatch metrics:\n Go to Kinesis Console, create a data stream, and select Direct PUT for Source and HTTP Endpoint for Destination. And set HTTP Endpoint URL to Gateway URL. The rest of the configuration options can be configured as needed.  Go to the CloudWatch Console, select Metrics-Stream in the left control panel, and click Create metric stream. Select AWS/DynamoDB for namespace. Also, you can add other namespaces as needed. Kinesis Data Firehose selects the data stream created in the first step. Finally, set the output format to opentelemetry0.7. The rest of the configuration options can be configured as needed.  At this point, the AWS side of DynamoDB monitoring configuration is set up.\nSkyWalking OAP metrics processing analysis SkyWalking uses aws-firehose-receiver to receive and decode AWS metrics streams forwarded by Gateway, and send it to Opentelemetry-receiver for processing and transforming into SkyWalking metrics. Then, the metrics are analyzed and aggregated by Meter Analysis Language (MAL) and finally presented on the UI.\nThe MAL part and the UI part of SkyWalking support users' customization, to display the metrics data in a more diversified way. For details, please refer to MAL doc and UI doc.\nTypical metrics analysis Scope In SkyWalking, there is the concept of scope. By using scopes, we can classify and aggregate metrics more rationally. In the monitoring of DynamoDB, two of these scopes are used - Service and Endpoint.\nService represents a set of workloads that provide the same behavior for incoming requests. Commonly used as cluster-level scopes for services, user accounts are closer to the concept of clusters in AWS. So SkyWalking uses AWS account id as a key to map AWS accounts to Service types.\nSimilarly, Endpoint represents a logical concept, often used in services for the path of incoming requests, such as HTTP URI path or gRPC service class + method signature, and can also represent the table structure in the database. So SkyWalking maps DynamoDB tables to Endpoint type.\nMetrics    Metric Name Meaning     AccountMaxReads / AccountMaxWrites The maximum number of read/write capacity units that can be used by an account.   AccountMaxTableLevelReads / AccountMaxTableLevelWrites The maximum number of read/write capacity units that can be used by a table or global secondary index of an account.   AccountProvisionedReadCapacityUtilization / AccountProvisionedWriteCapacityUtilization The percentage of provisioned read/write capacity units utilized by an account.   MaxProvisionedTableReadCapacityUtilization / MaxProvisionedTableWriteCapacityUtilization The percentage of provisioned read/write capacity utilized by the highest provisioned read table or global secondary index of an account.    Above are some common account metrics (Serivce scope). They are various configuration information in DynamoDB, and SkyWalking can show a complete picture of the database configuration changes by monitoring these metrics.\n   Metric Name Meaning     ConsumedReadCapacityUnits / ConsumedWriteCapacityUnits The number of read/write capacity units consumed over the specified time period.   ReturnedItemCount The number of items returned by Query, Scan or ExecuteStatement (select) operations during the specified time period.   SuccessfulRequestLatency The latency of successful requests to DynamoDB or Amazon DynamoDB Streams during the specified time period.   TimeToLiveDeletedItemCount The number of items deleted by Time to Live (TTL) during the specified time period.    The above are some common table metrics (Endpoint scope), which will also be aggregated into account metrics. These metrics are generally used to analyze the performance of the database, and users can use them to determine the reasonable level of database configuration. For example, users can track how much of their provisioned throughput is used through ConsumedReadCapicityUnits / ConsumedReadCapicityUnits to determine the reasonableness of the preconfigured throughput of a table or account. For more information about provisioned throughput, see Provisioned Throughput Intro.\n   Metric Name Meaning     UserErrors Requests to DynamoDB or Amazon DynamoDB Streams that generate an HTTP 400 status code during the specified time period.   SystemErrors The requests to DynamoDB or Amazon DynamoDB Streams that generate an HTTP 500 status code during the specified time period.   ThrottledRequests Requests to DynamoDB that exceed the provisioned throughput limits on a resource.   TransactionConflict Rejected item-level requests due to transactional conflicts between concurrent requests on the same items.    The above are some common error metrics, among which UserErrors are account-level metrics and the rest are table-level metrics. Users can set alarms on these metrics, and if warnings appear, then it may indicate that there are some problems with the use of the database, and users need to check and verify by themselves.\nNotice SkyWalking\u0026rsquo;s metrics selection for DynamoDB comes directly from CloudWatch metrics, which can also be found at CloudWatch metrics doc to get metrics details.\nDemo In this section, we will demonstrate how to use terraform to create a DynamoDB table and other AWS services that can generate metrics streams, and deploy Skywalking to complete the metrics collection.\nFirst, you need a running gateway instance, such as NGINX, which is responsible for receiving metrics streams from AWS and forwarding them to the aws-firehose-receiver. Note that the gateway needs to be configured with certificates to accept HTTPS protocol requests.\nBelow is an example configuration for NGINX. The configuration does not need to be identical, as long as it can send incoming HTTPS requests to oap host:12801/aws/firehose/metrics.\nserver { listen 443 ssl; ssl_certificate /crt/test.pem; ssl_certificate_key /crt/test.key; ssl_session_timeout 5m; ssl_ciphers ECDHE-RSA-AES128-GCM-SHA256:ECDHE:ECDH:AES:HIGH:!NULL:!aNULL:!MD5:!ADH:!RC4; ssl_protocols TLSv1 TLSv1.1 TLSv1.2; ssl_prefer_server_ciphers on; location /aws/firehose/metrics { proxy_pass http://test.xyz:12801/aws/firehose/metrics; } } Deploying SkyWalking There are various ways to deploy SkyWalking, and you can get them directly from the release page.\nOf course, if you are more comfortable with Kubernetes, you can also find the appropriate deployment method from SkyWalking-kubernetes.\nPlease note that no matter which deployment method you use, please make sure that the OAP and UI version is 9.4.0 or higher and that port 12801 needs to be open.\nThe following is an example of a deployment using the helm command.\nexport SKYWALKING_RELEASE_VERSION=4.3.0 export SKYWALKING_RELEASE_NAME=skywalking export SKYWALKING_RELEASE_NAMESPACE=default helm install \u0026quot;${SKYWALKING_RELEASE_NAME}\u0026quot; \\ oci://registry-1.docker.io/apache/skywalking-helm \\ --version \u0026quot;${SKYWALKING_RELEASE_VERSION}\u0026quot; \\ -n \u0026quot;${SKYWALKING_RELEASE_NAMESPACE}\u0026quot; \\ --set oap.image.tag=9.4.0 \\ --set oap.storageType=elasticsearch \\ --set ui.image.tag=9.4.0 \\ --set oap.ports.firehose=12801 Start the corresponding AWS service The terraform configuration file is as follows (example modified inTerraform Registry - kinesis_firehose_delivery_stream):\n terraform configuration file  provider \u0026quot;aws\u0026quot; { region = \u0026quot;ap-northeast-1\u0026quot; access_key = \u0026quot;[need change]your access_key\u0026quot; secret_key = \u0026quot;[need change]your secret_key\u0026quot; } resource \u0026quot;aws_dynamodb_table\u0026quot; \u0026quot;basic-dynamodb-table\u0026quot; { name = \u0026quot;GameScores\u0026quot; billing_mode = \u0026quot;PROVISIONED\u0026quot; read_capacity = 20 write_capacity = 20 hash_key = \u0026quot;UserId\u0026quot; range_key = \u0026quot;GameTitle\u0026quot; attribute { name = \u0026quot;UserId\u0026quot; type = \u0026quot;S\u0026quot; } attribute { name = \u0026quot;GameTitle\u0026quot; type = \u0026quot;S\u0026quot; } attribute { name = \u0026quot;TopScore\u0026quot; type = \u0026quot;N\u0026quot; } ttl { attribute_name = \u0026quot;TimeToExist\u0026quot; enabled = true } global_secondary_index { name = \u0026quot;GameTitleIndex\u0026quot; hash_key = \u0026quot;GameTitle\u0026quot; range_key = \u0026quot;TopScore\u0026quot; write_capacity = 10 read_capacity = 10 projection_type = \u0026quot;INCLUDE\u0026quot; non_key_attributes = [\u0026quot;UserId\u0026quot;] } tags = { Name = \u0026quot;dynamodb-table-1\u0026quot; Environment = \u0026quot;production\u0026quot; } } resource \u0026quot;aws_cloudwatch_metric_stream\u0026quot; \u0026quot;main\u0026quot; { name = \u0026quot;my-metric-stream\u0026quot; role_arn = aws_iam_role.metric_stream_to_firehose.arn firehose_arn = aws_kinesis_firehose_delivery_stream.http_stream.arn output_format = \u0026quot;opentelemetry0.7\u0026quot; include_filter { namespace = \u0026quot;AWS/DynamoDB\u0026quot; } } # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-metric-streams-trustpolicy.html data \u0026quot;aws_iam_policy_document\u0026quot; \u0026quot;streams_assume_role\u0026quot; { statement { effect = \u0026quot;Allow\u0026quot; principals { type = \u0026quot;Service\u0026quot; identifiers = [\u0026quot;streams.metrics.cloudwatch.amazonaws.com\u0026quot;] } actions = [\u0026quot;sts:AssumeRole\u0026quot;] } } resource \u0026quot;aws_iam_role\u0026quot; \u0026quot;metric_stream_to_firehose\u0026quot; { name = \u0026quot;metric_stream_to_firehose_role\u0026quot; assume_role_policy = data.aws_iam_policy_document.streams_assume_role.json } # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-metric-streams-trustpolicy.html data \u0026quot;aws_iam_policy_document\u0026quot; \u0026quot;metric_stream_to_firehose\u0026quot; { statement { effect = \u0026quot;Allow\u0026quot; actions = [ \u0026quot;firehose:PutRecord\u0026quot;, \u0026quot;firehose:PutRecordBatch\u0026quot;, ] resources = [aws_kinesis_firehose_delivery_stream.http_stream.arn] } } resource \u0026quot;aws_iam_role_policy\u0026quot; \u0026quot;metric_stream_to_firehose\u0026quot; { name = \u0026quot;default\u0026quot; role = aws_iam_role.metric_stream_to_firehose.id policy = data.aws_iam_policy_document.metric_stream_to_firehose.json } resource \u0026quot;aws_s3_bucket\u0026quot; \u0026quot;bucket\u0026quot; { bucket = \u0026quot;metric-stream-test-bucket\u0026quot; } resource \u0026quot;aws_s3_bucket_acl\u0026quot; \u0026quot;bucket_acl\u0026quot; { bucket = aws_s3_bucket.bucket.id acl = \u0026quot;private\u0026quot; } data \u0026quot;aws_iam_policy_document\u0026quot; \u0026quot;firehose_assume_role\u0026quot; { statement { effect = \u0026quot;Allow\u0026quot; principals { type = \u0026quot;Service\u0026quot; identifiers = [\u0026quot;firehose.amazonaws.com\u0026quot;] } actions = [\u0026quot;sts:AssumeRole\u0026quot;] } } resource \u0026quot;aws_iam_role\u0026quot; \u0026quot;firehose_to_s3\u0026quot; { assume_role_policy = data.aws_iam_policy_document.firehose_assume_role.json } data \u0026quot;aws_iam_policy_document\u0026quot; \u0026quot;firehose_to_s3\u0026quot; { statement { effect = \u0026quot;Allow\u0026quot; actions = [ \u0026quot;s3:AbortMultipartUpload\u0026quot;, \u0026quot;s3:GetBucketLocation\u0026quot;, \u0026quot;s3:GetObject\u0026quot;, \u0026quot;s3:ListBucket\u0026quot;, \u0026quot;s3:ListBucketMultipartUploads\u0026quot;, \u0026quot;s3:PutObject\u0026quot;, ] resources = [ aws_s3_bucket.bucket.arn, \u0026quot;${aws_s3_bucket.bucket.arn}/*\u0026quot;, ] } } resource \u0026quot;aws_iam_role_policy\u0026quot; \u0026quot;firehose_to_s3\u0026quot; { name = \u0026quot;default\u0026quot; role = aws_iam_role.firehose_to_s3.id policy = data.aws_iam_policy_document.firehose_to_s3.json } resource \u0026quot;aws_kinesis_firehose_delivery_stream\u0026quot; \u0026quot;http_stream\u0026quot; { name = \u0026quot;metric-stream-test-stream\u0026quot; destination = \u0026quot;http_endpoint\u0026quot; http_endpoint_configuration { name = \u0026quot;test_http_endpoint\u0026quot; url = \u0026quot;[need change]Gateway url\u0026quot; role_arn = aws_iam_role.firehose_to_s3.arn } s3_configuration { role_arn = aws_iam_role.firehose_to_s3.arn bucket_arn = aws_s3_bucket.bucket.arn } }  Steps to use.\n  Get the access_key and secret_key of the AWS account.( For how to get them, please refer to create-access-key )\n  Fill in the access_key and secret_key you got in the previous step, and fill in the corresponding URL of your gateway in the corresponding location of aws_kinesis_firehose_delivery_stream configuration.\n  Copy the above content and save it to the main.tf file.\n  Execute the following code in the corresponding path.\n  terraform init terraform apply At this point, all the required AWS services have been successfully created, and you can check your console to see if the services were successfully created.\nDone! If all the above steps were successful, please wait for about five minutes. After that, you can visit the SkyWalking UI to see the metrics.\nCurrently, the metrics collected by SkyWalking by default are displayed as follows.\naccount metrics:\ntable metrics:\nOther services Currently, SkyWalking officially supports EKS, S3, DynamoDB monitoring. Users also refer to the OpenTelemetry receiver to configure OTel rules to collect and analyze CloudWatch metrics of other AWS services and display them through a custom dashboard.\nMaterial  Monitoring S3 metrics with Amazon CloudWatch Monitoring DynamoDB metrics with Amazon CloudWatch Supported metrics in AWS Firehose receiver of OAP Configuration Vocabulary | Apache SkyWalking  ","title":"Monitoring DynamoDB with SkyWalking","url":"/blog/2023-03-13-skywalking-aws-dynamodb/"},{"content":"背景 Apache SkyWalking 是一个开源应用性能管理系统,帮助用户收集和聚合日志、追踪、指标和事件,并在 UI 上显示。从 OAP 9.4.0 开始,SkyWalking 新增了 AWS Firehose receiver,用来接收,计算CloudWatch metrics的数据。本文将以DynamoDB为例,展示如何使用 SkyWalking接收并计算 CloudWatch metrics 数据,以监控Amazon Web Services。\n什么是 Amazon CloudWatch 与 Amazon Kinesis Data Firehose ? Amazon CloudWatch 是一个指标存储库, 此工具可从 AWS中 ( 如 DynamoDB ) 收集原始数据,近实时处理为可读取的指标。同时,我们也可以使用指标流持续地将 CloudWatch 指标流式传输到所选的目标位置,实现近实时传送和低延迟。SkyWalking 利用此特性,创建指标流并将其导向 Amazon Kinesis Data Firehose 传输流,并由后者进一步传输处理。\nAmazon Kinesis Data Firehose是一项提取、转换、加载服务,可以将流式处理数据以可靠方式捕获、转换和提供到数据湖、数据存储和分析服务中。SkyWalking利用此特性,将指标流最终导向 aws-firehose-receiver,交由OAP计算并最终展示指标。\n整体过程流程图如下:\n注意  由于 Kinesis Data Firehose 规定,HTTP端点的URL必须使用HTTPS协议,且必须使用443端口。同时,此URL必须由Gateway代理并转发到真正的aws-firehose-receiver。 TLS 证书必须由CA签发的,自签证书不会被 Kinesis Data Firehose 信任。  设置DynamoDB监控 接下来以DynamoDB为例说明使用OAP 收集CloudWatch metrics 前,aws中必要的设置:\n 进入 Kinesis 控制台,创建数据流, Source选择 Direct PUT, Destination 选择 HTTP Endpoint. 并且设置HTTP Endpoint URL 为 Gateway对应URL。 其余配置选项可由需要自行配置。  进入 CloudWatch 控制台,在左侧控制面板中选择Metrics-Stream,点击Create metric stream。其中,namespace 选择 AWS/DynamoDB。同时,根据需要,也可以增加其他命名空间。 Kinesis Data Firehose选择在第一步中创建好的数据流。最后,设置输出格式为opentelemetry0.7。其余配置选项可由需要自行配置。  至此,DynamoDB监控配置的AWS方面设置完成。\nSkyWalking OAP 指标处理分析 SkyWalking 利用 aws-firehose-receiver 接收并解码由Gateway转发来的 AWS 指标流,交由Opentelemetry-receiver进行处理,转化为SkyWalking metrics。并由Meter Analysis Language (MAL)进行指标的分析与聚合,最终呈现在UI上。\n其中 MAL 部分以及 UI 部分,SkyWalking支持用户自由定制,从而更多样性的展示指标数据。详情请参考MAL doc 以及 UI doc。\n典型指标分析 作用域 SkyWalking中,有作用域 ( scope ) 的概念。通过作用域, 我们可以对指标进行更合理的分类与聚合。在对DynamoDB的监控中,使用到了其中两种作用域———Service和Endpoint。\nService表示一组工作负荷,这些工作负荷为传入请求提供相同的行为。常用作服务的集群级别作用域,在AWS中,用户的账户更接近集群的概念。 所以SkyWalking将AWS account id作为key,将AWS账户映射为Service类型。\n同理,Endpoint表示一种逻辑概念,常用于服务中用于传入请求的路径,例如 HTTP URI 路径或 gRPC 服务类 + 方法签名,也可以表示数据库中的表结构。所以SkyWalking将DynamoDB表映射为Endpoint类型。\n指标    指标名称 含义     AccountMaxReads / AccountMaxWrites 账户可以使用的最大 读取/写入 容量单位数。   AccountMaxTableLevelReads / AccountMaxTableLevelWrites 账户的表或全局二级索引可以使用的最大 读取/写入 容量单位数。   AccountProvisionedReadCapacityUtilization / AccountProvisionedWriteCapacityUtilization 账户使用的预置 读取/写入 容量单位百分比。   MaxProvisionedTableReadCapacityUtilization / MaxProvisionedTableWriteCapacityUtilization 账户的最高预调配 读取/写入 表或全局二级索引使用的预调配读取容量单位百分比。    以上为一些常用的账户指标(Serivce 作用域)。它们是DynamoDB中的各种配置信息,SkyWalking通过对这些指标的监控,可以完整的展示出数据库配置的变动情况。\n   指标名称 含义     ConsumedReadCapacityUnits / ConsumedWriteCapacityUnits 指定时间段内占用的 读取/写入 容量单位数   ReturnedItemCount Query、Scan 或 ExecuteStatement(可选择)操作在指定时段内返回的项目数。   SuccessfulRequestLatency 指定时间段内对于 DynamoDB 或 Amazon DynamoDB Streams 的成功请求的延迟。   TimeToLiveDeletedItemCount 指定时间段内按存活时间 (TTL) 删除的项目数。    以上为一些常用的表指标(Endpoint作用域),它们也会被聚合到账户指标中。这些指标一般用于分析数据库的性能,用户可以通过它们判断出数据库配置的合理程度。例如,用户可以通过ConsumedReadCapicityUnits / ConsumedReadCapicityUnits,跟踪预置吞吐量的使用,从而判断表或账户的预制吞吐量的合理性。关于预置吞吐量,请参见读/写容量模式。\n   指标名称 含义     UserErrors 在指定时间段内生成 HTTP 400 状态代码的对 DynamoDB 或 Amazon DynamoDB Streams 的请求。HTTP 400 通常表示客户端错误,如参数组合无效,尝试更新不存在的表或请求签名错误。   SystemErrors 在指定的时间段内生成 HTTP 500 状态代码的对 DynamoDB 或 Amazon DynamoDB Streams 的请求。HTTP 500 通常指示内部服务错误。   ThrottledRequests 超出资源(如表或索引)预置吞吐量限制的 DynamoDB 请求。   TransactionConflict 由于同一项目的并发请求之间的事务性冲突而被拒绝的项目级请求。    以上为一些常用的错误指标,其中UserErrors为用户级别指标,其余为表级别指标。用户可以在这些指标上设置告警,如果警告出现,那么可能说明数据库的使用出现了一些问题,需要用户自行查看验证。\n注意 SkyWalking对于DynamoDB的指标选取直接来源于CloudWatch metrics, 您也可以通过CloudWatch metrics doc来获取指标详细信息。\nDemo 在本节中,我们将演示如何利用terraform创建一个DynamoDB表,以及可以产生指标流的其他AWS服务,并部署Skywalking完成指标收集。\n首先,您需要一个正在运行的网关实例,例如 NGINX,它负责接收AWS传来的指标流并且转发到aws-firehose-receiver。注意, 网关需要配置证书以便接受HTTPS协议的请求。\n下面是一个NGINX的示例配置。配置不要求完全一致,只要能将收到的HTTPS请求发送到oap所在host:12801/aws/firehose/metrics即可。\nserver { listen 443 ssl; ssl_certificate /crt/test.pem; ssl_certificate_key /crt/test.key; ssl_session_timeout 5m; ssl_ciphers ECDHE-RSA-AES128-GCM-SHA256:ECDHE:ECDH:AES:HIGH:!NULL:!aNULL:!MD5:!ADH:!RC4; ssl_protocols TLSv1 TLSv1.1 TLSv1.2; ssl_prefer_server_ciphers on; location /aws/firehose/metrics { proxy_pass http://test.xyz:12801/aws/firehose/metrics; } } 部署SkyWalking SkyWalking的部署方式有很多种,您可以直接从release页面中直接获取。\n当然,如果您更习惯于 Kubernetes,您也可以从SkyWalking-kubernetes找到相应部署方式。\n请注意,无论使用哪种部署方式,请确保OAP和UI的版本为9.4.0以上,并且需要开放12801端口。\n下面是一个使用helm指令部署的示例:\nexport SKYWALKING_RELEASE_VERSION=4.3.0 export SKYWALKING_RELEASE_NAME=skywalking export SKYWALKING_RELEASE_NAMESPACE=default helm install \u0026quot;${SKYWALKING_RELEASE_NAME}\u0026quot; \\ oci://registry-1.docker.io/apache/skywalking-helm \\ --version \u0026quot;${SKYWALKING_RELEASE_VERSION}\u0026quot; \\ -n \u0026quot;${SKYWALKING_RELEASE_NAMESPACE}\u0026quot; \\ --set oap.image.tag=9.4.0 \\ --set oap.storageType=elasticsearch \\ --set ui.image.tag=9.4.0 \\ --set oap.ports.firehose=12801 开启对应AWS服务 terraform 配置文件如下(实例修改于Terraform Registry - kinesis_firehose_delivery_stream):\n terraform 配置文件  provider \u0026quot;aws\u0026quot; { region = \u0026quot;ap-northeast-1\u0026quot; access_key = \u0026quot;在这里填入您的access_key\u0026quot; secret_key = \u0026quot;在这里填入您的secret_key\u0026quot; } resource \u0026quot;aws_dynamodb_table\u0026quot; \u0026quot;basic-dynamodb-table\u0026quot; { name = \u0026quot;GameScores\u0026quot; billing_mode = \u0026quot;PROVISIONED\u0026quot; read_capacity = 20 write_capacity = 20 hash_key = \u0026quot;UserId\u0026quot; range_key = \u0026quot;GameTitle\u0026quot; attribute { name = \u0026quot;UserId\u0026quot; type = \u0026quot;S\u0026quot; } attribute { name = \u0026quot;GameTitle\u0026quot; type = \u0026quot;S\u0026quot; } attribute { name = \u0026quot;TopScore\u0026quot; type = \u0026quot;N\u0026quot; } ttl { attribute_name = \u0026quot;TimeToExist\u0026quot; enabled = true } global_secondary_index { name = \u0026quot;GameTitleIndex\u0026quot; hash_key = \u0026quot;GameTitle\u0026quot; range_key = \u0026quot;TopScore\u0026quot; write_capacity = 10 read_capacity = 10 projection_type = \u0026quot;INCLUDE\u0026quot; non_key_attributes = [\u0026quot;UserId\u0026quot;] } tags = { Name = \u0026quot;dynamodb-table-1\u0026quot; Environment = \u0026quot;production\u0026quot; } } resource \u0026quot;aws_cloudwatch_metric_stream\u0026quot; \u0026quot;main\u0026quot; { name = \u0026quot;my-metric-stream\u0026quot; role_arn = aws_iam_role.metric_stream_to_firehose.arn firehose_arn = aws_kinesis_firehose_delivery_stream.http_stream.arn output_format = \u0026quot;opentelemetry0.7\u0026quot; include_filter { namespace = \u0026quot;AWS/DynamoDB\u0026quot; } } # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-metric-streams-trustpolicy.html data \u0026quot;aws_iam_policy_document\u0026quot; \u0026quot;streams_assume_role\u0026quot; { statement { effect = \u0026quot;Allow\u0026quot; principals { type = \u0026quot;Service\u0026quot; identifiers = [\u0026quot;streams.metrics.cloudwatch.amazonaws.com\u0026quot;] } actions = [\u0026quot;sts:AssumeRole\u0026quot;] } } resource \u0026quot;aws_iam_role\u0026quot; \u0026quot;metric_stream_to_firehose\u0026quot; { name = \u0026quot;metric_stream_to_firehose_role\u0026quot; assume_role_policy = data.aws_iam_policy_document.streams_assume_role.json } # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-metric-streams-trustpolicy.html data \u0026quot;aws_iam_policy_document\u0026quot; \u0026quot;metric_stream_to_firehose\u0026quot; { statement { effect = \u0026quot;Allow\u0026quot; actions = [ \u0026quot;firehose:PutRecord\u0026quot;, \u0026quot;firehose:PutRecordBatch\u0026quot;, ] resources = [aws_kinesis_firehose_delivery_stream.http_stream.arn] } } resource \u0026quot;aws_iam_role_policy\u0026quot; \u0026quot;metric_stream_to_firehose\u0026quot; { name = \u0026quot;default\u0026quot; role = aws_iam_role.metric_stream_to_firehose.id policy = data.aws_iam_policy_document.metric_stream_to_firehose.json } resource \u0026quot;aws_s3_bucket\u0026quot; \u0026quot;bucket\u0026quot; { bucket = \u0026quot;metric-stream-test-bucket\u0026quot; } resource \u0026quot;aws_s3_bucket_acl\u0026quot; \u0026quot;bucket_acl\u0026quot; { bucket = aws_s3_bucket.bucket.id acl = \u0026quot;private\u0026quot; } data \u0026quot;aws_iam_policy_document\u0026quot; \u0026quot;firehose_assume_role\u0026quot; { statement { effect = \u0026quot;Allow\u0026quot; principals { type = \u0026quot;Service\u0026quot; identifiers = [\u0026quot;firehose.amazonaws.com\u0026quot;] } actions = [\u0026quot;sts:AssumeRole\u0026quot;] } } resource \u0026quot;aws_iam_role\u0026quot; \u0026quot;firehose_to_s3\u0026quot; { assume_role_policy = data.aws_iam_policy_document.firehose_assume_role.json } data \u0026quot;aws_iam_policy_document\u0026quot; \u0026quot;firehose_to_s3\u0026quot; { statement { effect = \u0026quot;Allow\u0026quot; actions = [ \u0026quot;s3:AbortMultipartUpload\u0026quot;, \u0026quot;s3:GetBucketLocation\u0026quot;, \u0026quot;s3:GetObject\u0026quot;, \u0026quot;s3:ListBucket\u0026quot;, \u0026quot;s3:ListBucketMultipartUploads\u0026quot;, \u0026quot;s3:PutObject\u0026quot;, ] resources = [ aws_s3_bucket.bucket.arn, \u0026quot;${aws_s3_bucket.bucket.arn}/*\u0026quot;, ] } } resource \u0026quot;aws_iam_role_policy\u0026quot; \u0026quot;firehose_to_s3\u0026quot; { name = \u0026quot;default\u0026quot; role = aws_iam_role.firehose_to_s3.id policy = data.aws_iam_policy_document.firehose_to_s3.json } resource \u0026quot;aws_kinesis_firehose_delivery_stream\u0026quot; \u0026quot;http_stream\u0026quot; { name = \u0026quot;metric-stream-test-stream\u0026quot; destination = \u0026quot;http_endpoint\u0026quot; http_endpoint_configuration { name = \u0026quot;test_http_endpoint\u0026quot; url = \u0026quot;这里填入Gateway的url\u0026quot; role_arn = aws_iam_role.firehose_to_s3.arn } s3_configuration { role_arn = aws_iam_role.firehose_to_s3.arn bucket_arn = aws_s3_bucket.bucket.arn } }  使用步骤:\n1.获取AWS账户的access_key以及secret_key。( 关于如何获取,请参考:create-access-key )\n2.将上一步中获取的access_key与secret_key填入对应位置,并将您的网关对应 url 填入 aws_kinesis_firehose_delivery_stream 配置的对应位置中。\n3.复制以上内容并保存到main.tf文件中。\n4.在对应路径下执行以下代码。\nterraform init terraform apply 至此,需要的AWS服务已全部建立成功,您可以检查您的控制台,查看服务是否成功创建。\n完成! 如果以上步骤全部成功,请耐心等待约五分钟。之后您可以访问SkyWalking UI,查看指标变动情况\n目前,SkyWalking 默认收集的指标展示如下:\n账户指标:\n表指标:\n现已支持的服务 目前SkyWalking官方支持EKS,S3,DynamoDB监控。 用户也参考 OpenTelemetry receiver 配置OTEL rules来收集,计算AWS其他服务的CloudWatch metrics,并且通过自定义dashboard展示。\n相关的资料  Monitoring S3 metrics with Amazon CloudWatch Monitoring DynamoDB metrics with Amazon CloudWatch Supported metrics in AWS Firehose receiver of OAP Configuration Vocabulary | Apache SkyWalking  ","title":"使用SkyWalking监控DynamoDB","url":"/zh/2023-03-13-skywalking-aws-dynamodb/"},{"content":"SKyWalking OAP\u0026rsquo;s existing OpenTelemetry receiver can receive metrics through the OTLP protocol, and use MAL to analyze related metrics in real time. Starting from OAP 9.4.0, SkyWalking has added an AWS Firehose receiver to receive and analyze CloudWatch metrics data. This article will take EKS and S3 as examples to introduce the process of SkyWalking OAP receiving and analyzing the indicator data of AWS services.\nEKS OpenTelemetry Collector OpenTelemetry (OTel) is a series of tools, APIs, and SDKs that can generate, collect, and export telemetry data, such as metrics, logs, and traces. OTel Collector is mainly responsible for collecting, processing, and exporting. For telemetry data, Collector consists of the following main components:\n Receiver: Responsible for obtaining telemetry data, different receivers support different data sources, such as prometheus, kafka, otlp. Processor: Process data between receiver and exporter, such as adding or deleting attributes. Exporter: Responsible for sending data to different backends, such as kafka, SkyWalking OAP (via OTLP). Service: Components enabled as a unit configuration, only configured components will be enabled.  OpenTelemetry Protocol Specification(OTLP) OTLP mainly describes how to receive (pull) indicator data through gRPC and HTTP protocols. The OpenTelemetry receiver of SKyWalking OAP implements the OTLP/gRPC protocol, and the indicator data can be exported to OAP through the OTLP/gRPC exporter. Usually the data flow of a Collector is as follows:\nMonitor EKS with OTel EKS monitoring is realized through OTel. You only need to deploy OpenTelemetry Collector in the EKS cluster in the way of DaemonSet  \u0026ndash; use AWS Container Insights Receiver as the receiver, and set the address of otlp exporter to the address of OAP. In addition, it should be noted that OAP is used job_name : aws-cloud-eks-monitoring as the identifier of EKS metrics according to the attribute, so it is necessary to configure a processor in the collector to add this attribute.\nOTel Collector configuration demo extensions:health_check:receivers:awscontainerinsightreceiver:processors:# To enable OAP to correctly identify EKS metrics, add the job_name attributeresource/job-name:attributes:- key:job_name value:aws-cloud-eks-monitoringaction:insert # Specify OAP as exportersexporters:otlp:endpoint:oap-service:11800 tls:insecure:truelogging:loglevel:debug service:pipelines:metrics:receivers:[awscontainerinsightreceiver]processors:[resource/job-name]exporters:[otlp,logging]extensions:[health_check]By default, SkyWalking OAP counts the network, disk, CPU and other related indicator data in the three dimensions of Node, Pod, and Service. Only part of the content is shown here.\nPod dimensions Service dimensions EKS monitoring complete configuration  Click here to view complete k8s resource configuration  apiVersion:v1kind:ServiceAccountmetadata:name:aws-otel-sanamespace:aws-otel-eks---kind:ClusterRoleapiVersion:rbac.authorization.k8s.io/v1metadata:name:aoc-agent-rolerules:- apiGroups:[\u0026#34;\u0026#34;]resources:[\u0026#34;pods\u0026#34;,\u0026#34;nodes\u0026#34;,\u0026#34;endpoints\u0026#34;]verbs:[\u0026#34;list\u0026#34;,\u0026#34;watch\u0026#34;]- apiGroups:[\u0026#34;apps\u0026#34;]resources:[\u0026#34;replicasets\u0026#34;]verbs:[\u0026#34;list\u0026#34;,\u0026#34;watch\u0026#34;]- apiGroups:[\u0026#34;batch\u0026#34;]resources:[\u0026#34;jobs\u0026#34;]verbs:[\u0026#34;list\u0026#34;,\u0026#34;watch\u0026#34;]- apiGroups:[\u0026#34;\u0026#34;]resources:[\u0026#34;nodes/proxy\u0026#34;]verbs:[\u0026#34;get\u0026#34;]- apiGroups:[\u0026#34;\u0026#34;]resources:[\u0026#34;nodes/stats\u0026#34;,\u0026#34;configmaps\u0026#34;,\u0026#34;events\u0026#34;]verbs:[\u0026#34;create\u0026#34;,\u0026#34;get\u0026#34;]- apiGroups:[\u0026#34;\u0026#34;]resources:[\u0026#34;configmaps\u0026#34;]resourceNames:[\u0026#34;otel-container-insight-clusterleader\u0026#34;]verbs:[\u0026#34;get\u0026#34;,\u0026#34;update\u0026#34;]- apiGroups:[\u0026#34;coordination.k8s.io\u0026#34;]resources:[\u0026#34;leases\u0026#34;]verbs:[\u0026#34;create\u0026#34;,\u0026#34;get\u0026#34;,\u0026#34;update\u0026#34;]---kind:ClusterRoleBindingapiVersion:rbac.authorization.k8s.io/v1metadata:name:aoc-agent-role-bindingsubjects:- kind:ServiceAccountname:aws-otel-sanamespace:aws-otel-eksroleRef:kind:ClusterRolename:aoc-agent-roleapiGroup:rbac.authorization.k8s.io---apiVersion:v1kind:ConfigMapmetadata:name:otel-agent-confnamespace:aws-otel-ekslabels:app:opentelemetrycomponent:otel-agent-confdata:otel-agent-config:|extensions: health_check: receivers: awscontainerinsightreceiver: processors: resource/job-name: attributes: - key: job_name value: aws-cloud-eks-monitoring action: insert exporters: otlp: endpoint: oap-service:11800 tls: insecure: true logging: loglevel: debug service: pipelines: metrics: receivers: [awscontainerinsightreceiver] processors: [resource/job-name] exporters: [otlp,logging] extensions: [health_check]---apiVersion:apps/v1kind:DaemonSetmetadata:name:aws-otel-eks-cinamespace:aws-otel-eksspec:selector:matchLabels:name:aws-otel-eks-citemplate:metadata:labels:name:aws-otel-eks-cispec:containers:- name:aws-otel-collectorimage:amazon/aws-otel-collector:v0.23.0env:# Specify region- name:AWS_REGIONvalue:\u0026#34;ap-northeast-1\u0026#34;- name:K8S_NODE_NAMEvalueFrom:fieldRef:fieldPath:spec.nodeName- name:HOST_IPvalueFrom:fieldRef:fieldPath:status.hostIP- name:HOST_NAMEvalueFrom:fieldRef:fieldPath:spec.nodeName- name:K8S_NAMESPACEvalueFrom:fieldRef:fieldPath:metadata.namespaceimagePullPolicy:Alwayscommand:- \u0026#34;/awscollector\u0026#34;- \u0026#34;--config=/conf/otel-agent-config.yaml\u0026#34;volumeMounts:- name:rootfsmountPath:/rootfsreadOnly:true- name:dockersockmountPath:/var/run/docker.sockreadOnly:true- name:varlibdockermountPath:/var/lib/dockerreadOnly:true- name:containerdsockmountPath:/run/containerd/containerd.sockreadOnly:true- name:sysmountPath:/sysreadOnly:true- name:devdiskmountPath:/dev/diskreadOnly:true- name:otel-agent-config-volmountPath:/conf- name:otel-output-vol mountPath:/otel-outputresources:limits:cpu:200mmemory:200Mirequests:cpu:200mmemory:200Mivolumes:- configMap:name:otel-agent-confitems:- key:otel-agent-configpath:otel-agent-config.yamlname:otel-agent-config-vol- name:rootfshostPath:path:/- name:dockersockhostPath:path:/var/run/docker.sock- name:varlibdockerhostPath:path:/var/lib/docker- name:containerdsockhostPath:path:/run/containerd/containerd.sock- name:syshostPath:path:/sys- name:devdiskhostPath:path:/dev/disk/- name:otel-output-vol hostPath:path:/otel-outputserviceAccountName:aws-otel-sa S3 Amazon CloudWatch Amazon CloudWatch is a monitoring service provided by AWS. It is responsible for collecting indicator data of AWS services and resources. CloudWatch metrics stream is responsible for converting indicator data into stream processing data, and supports output in two formats: json and OTel v0.7.0.\nAmazon Kinesis Data Firehose (Firehose) Firehose is an extract, transform, load (ETL) service that reliably captures, transforms, and serves streaming data into data lakes, data stores (such as S3), and analytics services.\nTo ensure that external services can correctly receive indicator data, AWS provides Kinesis Data Firehose HTTP Endpoint Delivery Request and Response Specifications (Firehose Specifications) . Firhose pushes Json data by POST\nJson data example { \u0026#34;requestId\u0026#34;: \u0026#34;ed4acda5-034f-9f42-bba1-f29aea6d7d8f\u0026#34;, \u0026#34;timestamp\u0026#34;: 1578090901599 \u0026#34;records\u0026#34;: [ { \u0026#34;data\u0026#34;: \u0026#34;aGVsbG8=\u0026#34; }, { \u0026#34;data\u0026#34;: \u0026#34;aGVsbG8gd29ybGQ=\u0026#34; } ] }  requestId: Request id, which can achieve deduplication and debugging purposes. timestamp: Firehose generated the timestamp of the request (in milliseconds). records: Actual delivery records  data: The delivered data, encoded in base64, can be in json or OTel v0.7.0 format, depending on the format of CloudWatch data (described later). Skywalking currently supports OTel v0.7.0 format.    aws-firehose-receiver aws-firehose-receiver provides an HTTP Endpoint that implements Firehose Specifications: /aws/firehose/metrics. The figure below shows the data flow of monitoring DynamoDB, S3 and other services through CloudWatch, and using Firehose to send indicator data to SKywalking OAP.\nStep-by-step setup of S3 monitoring  Enter the S3 console and create a filter forRequest metrics: Amazon S3 \u0026gt;\u0026gt; Buckets \u0026gt;\u0026gt; (Your Bucket) \u0026gt;\u0026gt; Metrics \u0026gt;\u0026gt; metrics \u0026gt;\u0026gt; View additional charts \u0026gt;\u0026gt; Request metrics  Enter the Amazon Kinesis console, create a delivery stream, Source select Direct PUT, Destination select HTTP Endpoint. And set HTTP endpoint URL to https://your_domain/aws/firehose/metrics. Other configuration items:   Buffer hints: Set the size and period of the cache Access key just matches the AccessKey in aws-firehose-receiver Retry duration: Retry period Backup settings: Backup settings, optionally backup the posted data to S3 at the same time.  Enter the CloudWatch console Streams and click Create CloudWatch Stream. And Select your Kinesis Data Firehose stream configure the delivery stream created in the second step in the item. Note that it needs to be set Change output format to OpenTelemetry v0.7.0.  At this point, the S3 monitoring configuration settings are complete. The S3 metrics currently collected by SkyWalking by default are shown below:\nOther service Currently SkyWalking officially supports EKS, S3, DynamoDB monitoring. Users also refer to the OpenTelemetry receiver to configure OTel rules to collect and analyze CloudWatch metrics of other AWS services, and display them through a custom dashboard.\nMaterial  Monitoring S3 metrics with Amazon CloudWatch Monitoring DynamoDB metrics with Amazon CloudWatch Supported metrics in AWS Firehose receiver of OAP Configuration Vocabulary | Apache SkyWalking  ","title":"Monitoring AWS EKS and S3 with SkyWalking","url":"/blog/2023-03-12-skywalking-aws-s3-eks/"},{"content":"SKyWalking OAP 现有的 OpenTelemetry receiver 可以通过OTLP协议接收指标(metrics),并且使用MAL实时分析相关指标。从OAP 9.4.0开始,SkyWalking 新增了AWS Firehose receiver,用来接收,分析CloudWatch metrics数据。本文将以EKS和S3为例介绍SkyWalking OAP 接收,分析 AWS 服务的指标数据的过程\nEKS OpenTelemetry Collector OpenTelemetry (OTel) 是一系列tools,API,SDK,可以生成,收集,导出遥测数据,比如 指标(metrics),日志(logs)和链路信息(traces),而OTel Collector主要负责收集、处理和导出遥测数据,Collector由以下主要组件组成:\n receiver: 负责获取遥测数据,不同的receiver支持不同的数据源,比如prometheus ,kafka,otlp, processor:在receiver和exporter之间处理数据,比如增加或者删除attributes, exporter:负责发送数据到不同的后端,比如kafka,SkyWalking OAP(通过OTLP) service: 作为一个单元配置启用的组件,只有配置的组件才会被启用  OpenTelemetry Protocol Specification(OTLP) OTLP 主要描述了如何通过gRPC,HTTP协议接收(拉取)指标数据。SKyWalking OAP的 OpenTelemetry receiver 实现了OTLP/gRPC协议,通过OTLP/gRPC exporter可以将指标数据导出到OAP。通常一个Collector的数据流向如下:\n使用OTel监控EKS EKS的监控就是通过OTel实现的,只需在EKS集群中以DaemonSet  的方式部署 OpenTelemetry Collector,使用 AWS Container Insights Receiver 作为receiver,并且设置otlp exporter的地址为OAP的的地址即可。另外需要注意的是OAP根据attribute job_name : aws-cloud-eks-monitoring 作为EKS metrics的标识,所以还需要再collector中配置一个processor来增加这个属性\nOTel Collector配置demo extensions:health_check:receivers:awscontainerinsightreceiver:processors:# 为了OAP能够正确识别EKS metrics,增加job_name attributeresource/job-name:attributes:- key:job_name value:aws-cloud-eks-monitoringaction:insert # 指定OAP作为 exportersexporters:otlp:endpoint:oap-service:11800 tls:insecure:truelogging:loglevel:debug service:pipelines:metrics:receivers:[awscontainerinsightreceiver]processors:[resource/job-name]exporters:[otlp,logging]extensions:[health_check]SkyWalking OAP 默认统计 Node,Pod,Service 三个维度的网络、磁盘、CPU等相关的指标数据,这里仅展示了部分内容\nPod 维度 Service 维度 EKS监控完整配置  Click here to view complete k8s resource configuration  apiVersion:v1kind:ServiceAccountmetadata:name:aws-otel-sanamespace:aws-otel-eks---kind:ClusterRoleapiVersion:rbac.authorization.k8s.io/v1metadata:name:aoc-agent-rolerules:- apiGroups:[\u0026#34;\u0026#34;]resources:[\u0026#34;pods\u0026#34;,\u0026#34;nodes\u0026#34;,\u0026#34;endpoints\u0026#34;]verbs:[\u0026#34;list\u0026#34;,\u0026#34;watch\u0026#34;]- apiGroups:[\u0026#34;apps\u0026#34;]resources:[\u0026#34;replicasets\u0026#34;]verbs:[\u0026#34;list\u0026#34;,\u0026#34;watch\u0026#34;]- apiGroups:[\u0026#34;batch\u0026#34;]resources:[\u0026#34;jobs\u0026#34;]verbs:[\u0026#34;list\u0026#34;,\u0026#34;watch\u0026#34;]- apiGroups:[\u0026#34;\u0026#34;]resources:[\u0026#34;nodes/proxy\u0026#34;]verbs:[\u0026#34;get\u0026#34;]- apiGroups:[\u0026#34;\u0026#34;]resources:[\u0026#34;nodes/stats\u0026#34;,\u0026#34;configmaps\u0026#34;,\u0026#34;events\u0026#34;]verbs:[\u0026#34;create\u0026#34;,\u0026#34;get\u0026#34;]- apiGroups:[\u0026#34;\u0026#34;]resources:[\u0026#34;configmaps\u0026#34;]resourceNames:[\u0026#34;otel-container-insight-clusterleader\u0026#34;]verbs:[\u0026#34;get\u0026#34;,\u0026#34;update\u0026#34;]- apiGroups:[\u0026#34;coordination.k8s.io\u0026#34;]resources:[\u0026#34;leases\u0026#34;]verbs:[\u0026#34;create\u0026#34;,\u0026#34;get\u0026#34;,\u0026#34;update\u0026#34;]---kind:ClusterRoleBindingapiVersion:rbac.authorization.k8s.io/v1metadata:name:aoc-agent-role-bindingsubjects:- kind:ServiceAccountname:aws-otel-sanamespace:aws-otel-eksroleRef:kind:ClusterRolename:aoc-agent-roleapiGroup:rbac.authorization.k8s.io---apiVersion:v1kind:ConfigMapmetadata:name:otel-agent-confnamespace:aws-otel-ekslabels:app:opentelemetrycomponent:otel-agent-confdata:otel-agent-config:|extensions: health_check: receivers: awscontainerinsightreceiver: processors: resource/job-name: attributes: - key: job_name value: aws-cloud-eks-monitoring action: insert exporters: otlp: endpoint: oap-service:11800 tls: insecure: true logging: loglevel: debug service: pipelines: metrics: receivers: [awscontainerinsightreceiver] processors: [resource/job-name] exporters: [otlp,logging] extensions: [health_check]---apiVersion:apps/v1kind:DaemonSetmetadata:name:aws-otel-eks-cinamespace:aws-otel-eksspec:selector:matchLabels:name:aws-otel-eks-citemplate:metadata:labels:name:aws-otel-eks-cispec:containers:- name:aws-otel-collectorimage:amazon/aws-otel-collector:v0.23.0env:# Specify region- name:AWS_REGIONvalue:\u0026#34;ap-northeast-1\u0026#34;- name:K8S_NODE_NAMEvalueFrom:fieldRef:fieldPath:spec.nodeName- name:HOST_IPvalueFrom:fieldRef:fieldPath:status.hostIP- name:HOST_NAMEvalueFrom:fieldRef:fieldPath:spec.nodeName- name:K8S_NAMESPACEvalueFrom:fieldRef:fieldPath:metadata.namespaceimagePullPolicy:Alwayscommand:- \u0026#34;/awscollector\u0026#34;- \u0026#34;--config=/conf/otel-agent-config.yaml\u0026#34;volumeMounts:- name:rootfsmountPath:/rootfsreadOnly:true- name:dockersockmountPath:/var/run/docker.sockreadOnly:true- name:varlibdockermountPath:/var/lib/dockerreadOnly:true- name:containerdsockmountPath:/run/containerd/containerd.sockreadOnly:true- name:sysmountPath:/sysreadOnly:true- name:devdiskmountPath:/dev/diskreadOnly:true- name:otel-agent-config-volmountPath:/conf- name:otel-output-vol mountPath:/otel-outputresources:limits:cpu:200mmemory:200Mirequests:cpu:200mmemory:200Mivolumes:- configMap:name:otel-agent-confitems:- key:otel-agent-configpath:otel-agent-config.yamlname:otel-agent-config-vol- name:rootfshostPath:path:/- name:dockersockhostPath:path:/var/run/docker.sock- name:varlibdockerhostPath:path:/var/lib/docker- name:containerdsockhostPath:path:/run/containerd/containerd.sock- name:syshostPath:path:/sys- name:devdiskhostPath:path:/dev/disk/- name:otel-output-vol hostPath:path:/otel-outputserviceAccountName:aws-otel-sa S3 Amazon CloudWatch Amazon CloudWatch 是AWS提供的监控服务,负责收集AWS 服务,资源的指标数据,CloudWatch metrics stream 负责将指标数据转换为流式处理数据,支持输出json,OTel v0.7.0 两种格式。\nAmazon Kinesis Data Firehose (Firehose) Firehose 是一项提取、转换、加载(ETL)服务,可以将流式处理数据以可靠方式捕获、转换和提供到数据湖、数据存储(比如S3)和分析服务中。\n为了确保外部服务能够正确地接收指标数据, AWS提供了 Kinesis Data Firehose HTTP Endpoint Delivery Request and Response Specifications (Firehose Specifications)。Firhose以POST的方式推送Json数据\nJson数据示例 { \u0026#34;requestId\u0026#34;: \u0026#34;ed4acda5-034f-9f42-bba1-f29aea6d7d8f\u0026#34;, \u0026#34;timestamp\u0026#34;: 1578090901599 \u0026#34;records\u0026#34;: [ { \u0026#34;data\u0026#34;: \u0026#34;aGVsbG8=\u0026#34; }, { \u0026#34;data\u0026#34;: \u0026#34;aGVsbG8gd29ybGQ=\u0026#34; } ] }  requestId: 请求id,可以实现去重,debug目的 timestamp: Firehose 产生该请求的时间戳(毫秒) records: 实际投递的记录  data: 投递的数据,以base64编码数据,可以是json或者OTel v0.7.0格式,取决于CloudWatch数据数据的格式(稍后会有描述)。Skywalking目前支持OTel v0.7.0格式    aws-firehose-receiver aws-firehose-receiver 就是提供了一个实现了Firehose Specifications的HTTP Endpoint:/aws/firehose/metrics。下图展示了通过CloudWatch监控DynamoDB,S3等服务,并利用Firehose将指标数据发送到SKywalking OAP的数据流向\n从上图可以看到 aws-firehose-receiver 将数据转换后交由 OpenTelemetry-receiver处理 ,所以 OpenTelemetry receiver 中配置的 otel-rules 同样可以适用CloudWatch metrics\n注意  因为 Kinesis Data Firehose 要求,必须在AWS Firehose receiver 前放置一个Gateway用来建立HTTPS链接。aws-firehose-receiver 将从v9.5.0开始支持HTTPS协议 TLS 证书必须是CA签发的  逐步设置S3监控  进入 S3控制台,通过 Amazon S3 \u0026gt;\u0026gt; Buckets \u0026gt;\u0026gt; (Your Bucket) \u0026gt;\u0026gt; Metrics \u0026gt;\u0026gt; metrics \u0026gt;\u0026gt; View additional charts \u0026gt;\u0026gt; Request metrics 为 Request metrics 创建filter  进入Amazon Kinesis 控制台,创建一个delivery stream, Source选择 Direct PUT, Destination 选择 HTTP Endpoint. 并且设置HTTP endpoint URL 为 https://your_domain/aws/firehose/metrics。其他配置项:  Buffer hints: 设置缓存的大小和周期 Access key 与aws-firehose-receiver中的AccessKey一致即可 Retry duration: 重试周期 Backup settings: 备份设置,可选地将投递的数据同时备份到S3。    进入 CloudWatch控制台,Streams 标签创建CloudWatch Stream。并且在Select your Kinesis Data Firehose stream项中配置第二步创建的delivery stream。注意需要设置Change output format 为 OpenTelemetry v0.7.0。  至此,S3监控配置设置完成。目前SkyWalking默认收集的S3 metrics 展示如下\n其他服务 目前SkyWalking官方支持EKS,S3,DynamoDB监控。 用户也参考 OpenTelemetry receiver 配置OTel rules来收集,分析AWS其他服务的CloudWatch metrics,并且通过自定义dashboard展示\n资料  Monitoring S3 metrics with Amazon CloudWatch Monitoring DynamoDB metrics with Amazon CloudWatch Supported metrics in AWS Firehose receiver of OAP Configuration Vocabulary | Apache SkyWalking  ","title":"使用SkyWalking监控AWS EKS和S3","url":"/zh/2023-03-12-skywalking-aws-s3-eks/"},{"content":"SkyWalking Rust 0.6.0 is released. Go to downloads page to find release tars.\nWhat\u0026rsquo;s Changed  Refactor span object api to make it more friendly. by @jmjoy in https://github.com/apache/skywalking-rust/pull/52 Refactor management report and keep alive api. by @jmjoy in https://github.com/apache/skywalking-rust/pull/53 Use stream and completed for a bulk to collect for grpc reporter. by @jmjoy in https://github.com/apache/skywalking-rust/pull/54 Add sub components licenses in dist material. by @jmjoy in https://github.com/apache/skywalking-rust/pull/55 Bump to 0.6.0. by @jmjoy in https://github.com/apache/skywalking-rust/pull/56  ","title":"Release Apache SkyWalking Rust 0.6.0","url":"/events/release-apache-skywalking-rust-0-6-0/"},{"content":"SkyWalking 9.4.0 is released. Go to downloads page to find release tars.\nPromQL and Grafana Support Zipkin Lens UI Bundled AWS S3 and DynamoDB monitoring Project  Bump up Zipkin and Zipkin lens UI dependency to 2.24.0. Bump up Apache parent pom version to 29. Bump up Armeria version to 1.21.0. Clean up maven pom.xmls. Bump up Java version to 11. Bump up snakeyaml to 2.0.  OAP Server  Add ServerStatusService in the core module to provide a new way to expose booting status to other modules. Adds Micrometer as a new component.(ID=141) Refactor session cache in MetricsPersistentWorker. Cache enhancement - don\u0026rsquo;t read new metrics from database in minute dimensionality.   // When // (1) the time bucket of the server's latest stability status is provided // 1.1 the OAP has booted successfully // 1.2 the current dimensionality is in minute. // 1.3 the OAP cluster is rebalanced due to scaling // (2) the metrics are from the time after the timeOfLatestStabilitySts // (3) the metrics don't exist in the cache // the kernel should NOT try to load it from the database. // // Notice, about condition (2), // for the specific minute of booted successfully, the metrics are expected to load from database when // it doesn't exist in the cache.  Remove the offset of metric session timeout according to worker creation sequence. Correct MetricsExtension annotations declarations in manual entities. Support component IDs' priority in process relation metrics. Remove abandon logic in MergableBufferedData, which caused unexpected no-update. Fix miss set LastUpdateTimestamp that caused the metrics session to expire. Rename MAL rule spring-sleuth.yaml to spring-micrometer.yaml. Fix memory leak in Zipkin API. Remove the dependency of refresh_interval of ElasticSearch indices from elasticsearch/flushInterval config. Now, it uses core/persistentPeriod + 5s as refresh_interval for all indices instead. Change elasticsearch/flushInterval to 5s(was 15s). Optimize flushInterval of ElasticSearch BulkProcessor to avoid extra periodical flush in the continuous bulk streams. An unexpected dot is added when exp is a pure metric name and expPrefix != null. Support monitoring MariaDB. Remove measure/stream specific interval settings in BanyanDB. Add global-specific settings used to override global configurations (e.g segmentIntervalDays, blockIntervalHours) in BanyanDB. Use TTL-driven interval settings for the measure-default group in BanyanDB. Fix wrong group of non time-relative metadata in BanyanDB. Refactor StorageData#id to the new StorageID object from a String type. Support multiple component IDs in the service topology level. Add ElasticSearch.Keyword annotation to declare the target field type as keyword. [Breaking Change] Column component_id of service_relation_client_side and service_relation_server_side have been replaced by component_ids. Support priority definition in the component-libraries.yml. Enhance service topology query. When there are multiple components detected from the server side, the component type of the node would be determined by the priority, which was random in the previous release. Remove component_id from service_instance_relation_client_side and service_instance_relation_server_side. Make the satellite E2E test more stable. Add Istio 1.16 to test matrix. Register ValueColumn as Tag for Record in BanyanDB storage plugin. Bump up Netty to 4.1.86. Remove unnecessary additional columns when storage is in logical sharding mode. The cluster coordinator support watch mechanism for notifying RemoteClientManager and ServerStatusService. Fix ServiceMeshServiceDispatcher overwrite ServiceDispatcher debug file when open SW_OAL_ENGINE_DEBUG. Use groupBy and in operators to optimize topology query for BanyanDB storage plugin. Support server status watcher for MetricsPersistentWorker to check the metrics whether required initialization. Fix the meter value are not correct when using sumPerMinLabeld or sumHistogramPercentile MAL function. Fix cannot display attached events when using Zipkin Lens UI query traces. Remove time_bucket for both Stream and Measure kinds in BanyanDB plugin. Merge TIME_BUCKET of Metrics and Record into StorageData. Support no layer in the listServices query. Fix time_bucket of ServiceTraffic not set correctly in slowSql of MAL. Correct the TopN record query DAO of BanyanDB. Tweak interval settings of BanyanDB. Support monitoring AWS Cloud EKS. Bump BanyanDB Java client to 0.3.0-rc1. Remove id tag from measures. Add Banyandb.MeasureField to mark a column as a BanyanDB Measure field. Add BanyanDB.StoreIDTag to store a process\u0026rsquo;s id for searching. [Breaking Change] The supported version of ShardingSphere-Proxy is upgraded from 5.1.2 to 5.3.1. Due to the changes of ShardingSphere\u0026rsquo;s API, versions before 5.3.1 are not compatible. Add the eBPF network profiling E2E Test in the per storage. Fix TCP service instances are lack of instance properties like pod and namespace, which causes Pod log not to work for TCP workloads. Add Python HBase happybase module component ID(94). Fix gRPC alarm cannot update settings from dynamic configuration source. Add batchOfBytes configuration to limit the size of bulk flush. Add Python Websocket module component ID(7018). [Optional] Optimize single trace query performance by customizing routing in ElasticSearch. SkyWalking trace segments and Zipkin spans are using trace ID for routing. This is OFF by default, controlled by storage/elasticsearch/enableCustomRouting. Enhance OAP HTTP server to support HTTPS Remove handler scan in otel receiver, manual initialization instead Add aws-firehose-receiver to support collecting AWS CloudWatch metric(OpenTelemetry format). Notice, no HTTPS/TLS setup support. By following AWS Firehose request, it uses proxy request (https://... instead of /aws/firehose/metrics), there must be a proxy(Nginx, Envoy, etc.). Avoid Antlr dependencies' versions might be different in compile time and runtime. Now PrometheusMetricConverter#escapedName also support converting / to _. Add missing TCP throughput metrics. Refactor @Column annotation, swap Column#name and ElasticSearch.Column#columnAlias and rename ElasticSearch.Column#columnAlias to ElasticSearch.Column#legacyName. Add Python HTTPX module component ID(7019). Migrate tests from junit 4 to junit 5. Refactor http-based alarm plugins and extract common logic to HttpAlarmCallback. Support Amazon Simple Storage Service (Amazon S3) metrics monitoring Support process Sum metrics with AGGREGATION_TEMPORALITY_DELTA case Support Amazon DynamoDB monitoring. Support prometheus HTTP API and promQL. Scope in the Entity of Metrics query v1 protocol is not required and automatical correction. The scope is determined based on the metric itself. Add explicit ReadTimeout for ConsulConfigurationWatcher to avoid IllegalArgumentException: Cache watchInterval=10sec \u0026gt;= networkClientReadTimeout=10000ms. Fix DurationUtils.getDurationPoints exceed, when startTimeBucket equals endTimeBucket. Support process OpenTelemetry ExponentialHistogram metrics Add FreeRedis component ID(3018).  UI  Add Zipkin Lens UI to webapp, and proxy it to context path /zipkin. Migrate the build tool from vue cli to Vite4. Fix Instance Relation and Endpoint Relation dashboards show up. Add Micrometer icon. Update MySQL UI to support MariaDB. Add AWS menu for supporting AWS monitoring. Add missing FastAPI logo. Update the log details page to support the formatted display of JSON content. Fix build config. Avoid being unable to drag process nodes for the first time. Add node folder into ignore list. Add ElPopconfirm to component types. Add an iframe widget for zipkin UI. Optimize graph tooltips to make them more friendly. Bump json5 from 1.0.1 to 1.0.2. Add websockets icon. Implement independent mode for widgets. Bump http-cache-semantics from 4.1.0 to 4.1.1. Update menus for OpenFunction. Add auto fresh to widgets independent mode. Fix: clear trace ID on the Log and Trace widgets after using association. Fix: reset duration for query conditions after time range changes. Add AWS S3 menu. Refactor: optimize side bar component to make it more friendly. Fix: remove duplicate popup message for query result. Add logo for HTTPX. Refactor: optimize the attached events visualization in the trace widget. Update BanyanDB client to 0.3.1. Add AWS DynamoDB menu. Fix: add auto period to the independent mode for widgets. Optimize menus and add Windows monitoring menu. Add a calculation for the cpm5dAvg. add a cpm5d calculation. Fix data processing error in the eBPF profiling widget. Support for double quotes in SlowSQL statements. Fix: the wrong position of the menu when clicking the topology node.  Documentation  Remove Spring Sleuth docs, and add Spring MicroMeter Observations Analysis with the latest Java agent side enhancement. Update monitoring MySQL document to add the MariaDB part. Reorganize the protocols docs to a more clear API docs. Add documentation about replacing Zipkin server with SkyWalking OAP. Add Lens UI relative docs in Zipkin trace section. Add Profiling APIs. Fix backend telemetry doc and so11y dashboard doc as the OAP Prometheus fetcher was removed since 9.3.0  All issues and pull requests are here\n","title":"Release Apache SkyWalking APM 9.4.0","url":"/events/release-apache-skywalking-apm-9.4.0/"},{"content":"SkyWalking BanyanDB 0.3.1 is released. Go to downloads page to find release tars.\nBugs  Fix the broken of schema chain. Add a timeout to all go leaking checkers.  Chores  Bump golang.org/x/net from 0.2.0 to 0.7.0.  ","title":"Release Apache SkyWalking BanyanDB 0.3.1","url":"/events/release-apache-skywalking-banyandb-0-3-1/"},{"content":"SkyWalking Python 1.0.0 is released! Go to downloads page to find release tars.\nPyPI Wheel: https://pypi.org/project/apache-skywalking/1.0.0/\nDockerHub Image: https://hub.docker.com/r/apache/skywalking-python\n  Important Notes and Breaking Changes:\n The new PVM metrics reported from Python agent requires SkyWalking OAP v9.3.0 to show out-of-the-box. BREAKING: Python 3.6 is no longer supported and may not function properly, Python 3.11 support is added and tested. BREAKING: A number of common configuration options and environment variables are renamed to follow the convention of Java agent, please check with the latest official documentation before upgrading. (#273, #282) https://skywalking.apache.org/docs/skywalking-python/v1.0.0/en/setup/configuration/ BREAKING: All agent core capabilities are now covered by test cases and enabled by default (Trace, Log, PVM runtime metrics, Profiler) BREAKING: DockerHub Python agent images since v1.0.0 will no longer include the run part in ENTRYPOINT [\u0026quot;sw-python\u0026quot;, \u0026quot;run\u0026quot;], user should prefix their command with [-d/--debug] run [-p/--prefork] \u0026lt;Command\u0026gt; for extra flexibility. Packaged wheel now provides a extra [all] option to support all three report protocols    Feature:\n Add support for Python 3.11 (#285) Add MeterReportService (gRPC, Kafka reporter) (default:enabled) (#231, #236, #241, #243) Add reporter for PVM runtime metrics (default:enabled) (#238, #247) Add Greenlet profiler (#246) Add test and support for Python Slim base images (#249) Add support for the tags of Virtual Cache for Redis (#263) Add a new configuration kafka_namespace to prefix the kafka topic names (#277) Add log reporter support for loguru (#276) Add experimental support for explicit os.fork(), restarts agent in forked process (#286) Add experimental sw-python CLI sw-python run [-p] flag (-p/\u0026ndash;prefork) to enable non-intrusive uWSGI and Gunicorn postfork support (#288)    Plugins:\n Add aioredis, aiormq, amqp, asyncpg, aio-pika, kombu RMQ plugins (#230 Missing test coverage) Add Confluent Kafka plugin (#233 Missing test coverage) Add HBase plugin Python HappyBase model (#266) Add FastAPI plugin websocket protocol support (#269) Add Websockets (client) plugin (#269) Add HTTPX plugin (#283)    Fixes:\n Allow RabbitMQ BlockingChannel.basic_consume() to link with outgoing spans (#224) Fix RabbitMQ basic_get bug (#225, #226) Fix case when tornado socket name is None (#227) Fix misspelled text \u0026ldquo;PostgreSLQ\u0026rdquo; -\u0026gt; \u0026ldquo;PostgreSQL\u0026rdquo; in Postgres-related plugins (#234) Make sure span.component initialized as Unknown rather than 0 (#242) Ignore websocket connections inside fastapi temporarily (#244, issue#9724) Fix Kafka-python plugin SkyWalking self reporter ignore condition (#249) Add primary endpoint in tracing context and endpoint info to log reporter (#261) Enforce tag class type conversion (#262) Fix sw_logging (log reporter) potentially throw exception leading to traceback confusion (#267) Avoid reporting meaningless tracecontext with logs when there\u0026rsquo;s no active span, UI will now show empty traceID (#272) Fix exception handler in profile_context (#273) Add namespace suffix to service name (#275) Add periodical instance property report to prevent data loss (#279) Fix sw_logging when Logger.disabled is true (#281)    Docs:\n New documentation on how to test locally (#222) New documentation on the newly added meter reporter feature (#240) New documentation on the newly added greenlet profiler and the original threading profiler (#250) Overhaul documentation on development setup and testing (#249) Add tables to state currently supported features of Python agent. (#271) New configuration documentation generator (#273)    Others:\n Pin CI SkyWalking License Eye (#221) Fix dead link due to the \u0026lsquo;next\u0026rsquo; url change (#235) Pin CI SkyWalking Infra-E2E (#251) Sync OAP, SWCTL versions in E2E and fix test cases (#249) Overhaul development flow with Poetry (#249) Fix grpcio-tools generated message type (#253) Switch plugin tests to use slim Python images (#268) Add unit tests to sw_filters (#269)    New Contributors  @ZEALi made their first contribution in https://github.com/apache/skywalking-python/pull/242 @westarest made their first contribution in https://github.com/apache/skywalking-python/pull/246 @Jedore made their first contribution in https://github.com/apache/skywalking-python/pull/263 @alidisi made their first contribution in https://github.com/apache/skywalking-python/pull/266 @SheltonZSL made their first contribution in https://github.com/apache/skywalking-python/pull/275 @XinweiLyu made their first contribution in https://github.com/apache/skywalking-python/pull/283  Full Changelog: https://github.com/apache/skywalking-python/compare/v0.8.0...v1.0.0\n","title":"Release Apache SkyWalking Python 1.0.0","url":"/events/release-apache-skywalking-python-1-0-0/"},{"content":"SkyWalking BanyanDB 0.3.0 is released. Go to downloads page to find release tars.\nFeatures  Support 64-bit float type. Web Application. Close components in tsdb gracefully. Add TLS for the HTTP server. Use the table builder to compress data.  Bugs  Open blocks concurrently. Sync index writing and shard closing. TimestampRange query throws an exception if no data in this time range.  Chores  Fixes issues related to leaked goroutines. Add validations to APIs.  ","title":"Release Apache SkyWalking BanyanDB 0.3.0","url":"/events/release-apache-skywalking-banyandb-0-3-0/"},{"content":"SkyWalking PHP 0.3.0 is released. Go to downloads page to find release tars.\nWhat\u0026rsquo;s Changed  Make explicit rust version requirement by @wu-sheng in https://github.com/apache/skywalking-php/pull/35 Update dependencies version limitation. by @jmjoy in https://github.com/apache/skywalking-php/pull/36 Startup 0.3.0 by @heyanlong in https://github.com/apache/skywalking-php/pull/37 Support PHP 8.2 by @heyanlong in https://github.com/apache/skywalking-php/pull/38 Fix php-fpm freeze after large amount of request. by @jmjoy in https://github.com/apache/skywalking-php/pull/39 Lock develop rust version to 1.65, upgrade deps. by @jmjoy in https://github.com/apache/skywalking-php/pull/41 Fix worker unexpected shutdown. by @jmjoy in https://github.com/apache/skywalking-php/pull/42 Update docs about installing rust. by @jmjoy in https://github.com/apache/skywalking-php/pull/43 Retry cargo test when failed in CI. by @jmjoy in https://github.com/apache/skywalking-php/pull/44 Hack dtor for mysqli to cleanup resources. by @jmjoy in https://github.com/apache/skywalking-php/pull/45 Report instance properties and keep alive. by @jmjoy in https://github.com/apache/skywalking-php/pull/46 Add configuration option skywalking_agent.runtime_dir. by @jmjoy in https://github.com/apache/skywalking-php/pull/47 Add authentication support. by @jmjoy in https://github.com/apache/skywalking-php/pull/48 Support TLS. by @jmjoy in https://github.com/apache/skywalking-php/pull/49 Periodic reporting instance properties. by @jmjoy in https://github.com/apache/skywalking-php/pull/50 Bump to 0.3.0. by @jmjoy in https://github.com/apache/skywalking-php/pull/51  Breaking  Remove http:// scheme in skywalking_agent.server_addr.  New Contributors  @wu-sheng made their first contribution in https://github.com/apache/skywalking-php/pull/35  Full Changelog: https://github.com/apache/skywalking-php/compare/v0.2.0...v0.3.0\nPECL https://pecl.php.net/package/skywalking_agent/0.3.0\n","title":"Release Apache SkyWalking PHP 0.3.0","url":"/events/release-apache-skwaylking-php-0-3-0/"},{"content":"SkyWalking Java Agent 8.14.0 is released. Go to downloads page to find release tars. Changes by Version\n8.14.0  Polish test framework to support arm64/v8 platforms Fix wrong config name plugin.toolkit.use_qualified_name_as_operation_name, and system variable name SW_PLUGIN_TOOLKIT_USE_QUALIFIED_NAME_AS_OPERATION_NAME:false. They were toolit. Rename JDBI to JDBC Support collecting dubbo thread pool metrics Bump up byte-buddy to 1.12.19 Upgrade agent test tools [Breaking Change] Compatible with 3.x and 4.x RabbitMQ Client, rename rabbitmq-5.x-plugin to rabbitmq-plugin Polish JDBC plugins to make DBType accurate Report the agent version to OAP as an instance attribute Polish jedis-4.x-plugin to change command to lowercase, which is consistent with jedis-2.x-3.x-plugin Add micronauthttpclient,micronauthttpserver,memcached,ehcache,guavacache,jedis,redisson plugin config properties to agent.config Add Micrometer Observation support Add tags mq.message.keys and mq.message.tags for RocketMQ producer span Clean the trace context which injected into Pulsar MessageImpl after the instance recycled Fix In the higher version of mysql-connector-java 8x, there is an error in the value of db.instance. Add support for KafkaClients 3.x. Support to customize the collect period of JVM relative metrics. Upgrade netty-codec-http2 to 4.1.86.Final. Put Agent-Version property reading in the premain stage to avoid deadlock when using jarsigner. Add a config agent.enable(default: true) to support disabling the agent through system property -Dskywalking.agent.disable=false or system environment variable setting SW_AGENT_ENABLE=false. Enhance redisson plugin to adopt uniform tags.  Documentation  Update Plugin-test.md, support string operators start with and end with Polish agent configurations doc to fix type error  All issues and pull requests are here\n","title":"Release Apache SkyWalking Java Agent 8.14.0","url":"/events/release-apache-skywalking-java-agent-8-14-0/"},{"content":"Background Apache SkyWalking is an open-source Application Performance Management system that helps users collect and aggregate logs, traces, metrics, and events for display on a UI. In the previous article, we introduced how to use Apache SkyWalking Rover to analyze the network performance issue in the service mesh environment. However, in business scenarios, users often rely on mature layer 7 protocols, such as HTTP, for interactions between systems. In this article, we will discuss how to use eBPF techniques to analyze performance bottlenecks of layer 7 protocols and how to enhance the tracing system using network sampling.\nThis article will show how to use Apache SkyWalking with eBPF to enhance metrics and traces in HTTP observability.\nHTTP Protocol Analysis HTTP is one of the most common Layer 7 protocols and is usually used to provide services to external parties and for inter-system communication. In the following sections, we will show how to identify and analyze HTTP/1.x protocols.\nProtocol Identification In HTTP/1.x, the client and server communicate through a single file descriptor (FD) on each side. Figure 1 shows the process of communication involving the following steps:\n Connect/accept: The client establishes a connection with the HTTP server, or the server accepts a connection from the client. Read/write (multiple times): The client or server reads and writes HTTPS requests and responses. A single request-response pair occurs within the same connection on each side. Close: The client and server close the connection.  To obtain HTTP content, it’s necessary to read it from the second step of this process. As defined in the RFC, the content is contained within the data of the Layer 4 protocol and can be obtained by parsing the data. The request and response pair can be correlated because they both occur within the same connection on each side.\nFigure 1: HTTP communication timeline.\nHTTP Pipeline HTTP pipelining is a feature of HTTP/1.1 that enables multiple HTTP requests to be sent over a single TCP connection without waiting for the corresponding responses. This feature is important because it ensures that the order of the responses on the server side matches the order of the requests.\nFigure 2 illustrates how this works. Consider the following scenario: an HTTP client sends multiple requests to a server, and the server responds by sending the HTTP responses in the same order as the requests. This means that the first request sent by the client will receive the first response from the server, the second request will receive the second response, and so on.\nWhen designing HTTP parsing, we should follow this principle by adding request data to a list and removing the first item when parsing a response. This ensures that the responses are processed in the correct order.\nFigure 2: HTTP/1.1 pipeline.\nMetrics Based on the identification of the HTTP content and process topology diagram mentioned in the previous article, we can combine these two to generate process-to-process metrics data.\nFigure 3 shows the metrics that currently support the analysis between the two processes. Based on the HTTP request and response data, we can analyze the following data:\n   Metrics Name Type Unit Description     Request CPM(Call Per Minute) Counter count The HTTP request count   Response Status CPM(Call Per Minute) Counter count The count of per HTTP response status code   Request Package Size Counter/Histogram Byte The request package size   Response Package Size Counter/Histogram Byte The response package size   Client Duration Counter/Histogram Millisecond The duration of single HTTP response on the client side   Server Duration Counter/Histogram Millisecond The duration of single HTTP response on the server side    Figure 3: Process-to-process metrics.\nHTTP and Trace During the HTTP process, if we unpack the HTTP requests and responses from raw data, we can use this data to correlate with the existing tracing system.\nTrace Context Identification In order to track the flow of requests between multiple services, the trace system usually creates a trace context when a request enters a service and passes it along to other services during the request-response process. For example, when an HTTP request is sent to another server, the trace context is included in the request header.\nFigure 4 displays the raw content of an HTTP request intercepted by Wireshark. The trace context information generated by the Zipkin Tracing system can be identified by the “X-B3” prefix in the header. By using eBPF to intercept the trace context in the HTTP header, we can connect the current request with the trace system.\nFigure 4: View of HTTP headers in Wireshark.\nTrace Event We have added the concept of an event to traces. An event can be attached to a span and consists of start and end times, tags, and summaries, allowing us to attach any desired information to the Trace.\nWhen performing eBPF network profiling, two events can be generated based on the request-response data. Figure 5 illustrates what happens when a service performs an HTTP request with profiling. The trace system generates trace context information and sends it in the request. When the service executes in the kernel, we can generate an event for the corresponding trace span by interacting with the request-response data and execution time in the kernel space.\nPreviously, we could only observe the execution status in the user space. However, by combining traces and eBPF technologies, we can now also get more information about the current trace in the kernel space, which would impact less performance for the target service if we do similar things in the tracing SDK and agent.\nFigure 5: Logical view of profiling an HTTP request and response.\nSampling To ensure efficient data storage and minimize unnecessary data sampling, we use a sampling mechanism for traces in our system. This mechanism triggers sampling only when certain conditions are met. We also provide a list of the top N traces, which allows users to quickly access the relevant request information for a specific trace.\nTo help users easily identify and analyze relevant events, we offer three different sampling rules:\n Slow Traces: Sampling is triggered when the response time for a request exceeds a specified threshold. Response Status [400, 500): Sampling is triggered when the response status code is greater than or equal to 400 and less than 500. Response Status [500, 600): Sampling is triggered when the response status code is greater than or equal to 500 and less than 600.  In addition, we recognize that not all request or response raw data may be necessary for analysis. For example, users may be more interested in requesting data when trying to identify performance issues, while they may be more interested in response data when troubleshooting errors. As such, we also provide configuration options for request or response events to allow users to specify which type of data they would like to sample.\nProfiling in a Service Mesh The SkyWalking and SkyWalking Rover projects have already implemented the HTTP protocol analyze and trace associations. How do they perform when running in a service mesh environment?\nDeployment Figure 6 demonstrates the deployment of SkyWalking and SkyWalking Rover in a service mesh environment. SkyWalking Rover is deployed as a DaemonSet on each machine where a service is located and communicates with the SkyWalking backend cluster. It automatically recognizes the services on the machine and reports metadata information to the SkyWalking backend cluster. When a new network profiling task arises, SkyWalking Rover senses the task and analyzes the designated processes, collecting and aggregating network data before ultimately reporting it back to the SkyWalking backend service.\nFigure 6: SkyWalking rover deployment topology in a service mesh.\nTracing Systems Starting from version 9.3.0, the SkyWalking backend fully supports all functions in the Zipkin server. Therefore, the SkyWalking backend can collect traces from both the SkyWalking and Zipkin protocols. Similarly, SkyWalking Rover can identify and analyze trace context in both the SkyWalking and Zipkin trace systems. In the following two sections, network analysis results will be displayed in the SkyWalking and Zipkin UI respectively.\nSkyWalking When SkyWalking performs network profiling, similar to the TCP metrics in the previous article, the SkyWalking UI will first display the topology between processes. When you open the dashboard of the line representing the traffic metrics between processes, you can see the metrics of HTTP traffic from the “HTTP/1.x” tab and the sampled HTTP requests with tracing in the “HTTP Requests” tab.\nAs shown in Figure 7, there are three lists in the tab, each corresponding to a condition in the event sampling rules. Each list displays the traces that meet the pre-specified conditions. When you click on an item in the trace list, you can view the complete trace.\nFigure 7: Sampled HTTP requests within tracing context.\nWhen you click on an item in the trace list, you can quickly view the specified trace. In Figure 8, we can see that in the current service-related span, there is a tag with a number indicating how many HTTP events are related to that trace span.\nSince we are in a service mesh environment, each service involves interacting with Envoy. Therefore, the current span includes Envoy’s request and response information. Additionally, since the current service has both incoming and outgoing requests, there are events in the corresponding span.\nFigure 8: Events in the trace detail.\nWhen the span is clicked, the details of the span will be displayed. If there are events in the current span, the relevant event information will be displayed on a time axis. As shown in Figure 9, there are a total of 6 related events in the current Span. Each event represents a data sample of an HTTP request/response. One of the events spans multiple time ranges, indicating a longer system call time. It may be due to a blocked system call, depending on the implementation details of the HTTP request in different languages. This can also help us query the possible causes of errors.\nFigure 9: Events in one trace span.\nFinally, we can click on a specific event to see its complete information. As shown in Figure 10, it displays the sampling information of a request, including the SkyWalking trace context protocol contained in the request header from the HTTP raw data. The raw request data allows you to quickly re-request the request to solve any issues.\nFigure 10: The detail of the event.\nZipkin Zipkin is one of the most widely used distributed tracing systems in the world. SkyWalking can function as an alternative server to provide advanced features for Zipkin users. Here, we use this way to bring the feature into the Zipkin ecosystem out-of-box. The new events would also be treated as a kind of Zipkin’s tags and annotations.\nTo add events to a Zipkin span, we need to do the following:\n Split the start and end times of each event into two annotations with a canonical name. Add the sampled HTTP raw data from the event to the Zipkin span tags, using the same event name for corresponding purposes.  Figures 11 and 12 show annotations and tags in the same span. In these figures, we can see that the span includes at least two events with the same event name and sequence suffix (e.g., “Start/Finished HTTP Request/Response Sampling-x” in the figure). Both events have separate timestamps to represent their relative times within the span. In the tags, the data content of the corresponding event is represented by the event name and sequence number, respectively.\nFigure 11: Event timestamp in the Zipkin span annotation.\nFigure 12: Event raw data in the Zipkin span tag.\nDemo In this section, we demonstrate how to perform network profiling in a service mesh and complete metrics collection and HTTP raw data sampling. To follow along, you will need a running Kubernetes environment.\nDeploy SkyWalking Showcase SkyWalking Showcase contains a complete set of example services and can be monitored using SkyWalking. For more information, please check the official documentation.\nIn this demo, we only deploy service, the latest released SkyWalking OAP, and UI.\nexport SW_OAP_IMAGE=apache/skywalking-oap-server:9.3.0 export SW_UI_IMAGE=apache/skywalking-ui:9.3.0 export SW_ROVER_IMAGE=apache/skywalking-rover:0.4.0 export FEATURE_FLAGS=mesh-with-agent,single-node,elasticsearch,rover make deploy.kubernetes After deployment is complete, please run the following script to open SkyWalking UI: http://localhost:8080/.\nkubectl port-forward svc/ui 8080:8080 --namespace default Start Network Profiling Task Currently, we can select the specific instances that we wish to monitor by clicking the Data Plane item in the Service Mesh panel and the Service item in the Kubernetes panel.\nIn figure 13, we have selected an instance with a list of tasks in the network profiling tab.\nFigure 13: Network Profiling tab in the Data Plane.\nWhen we click the Start button, as shown in Figure 14, we need to specify the sampling rules for the profiling task. The sampling rules consist of one or more rules, each of which is distinguished by a different URI regular expression. When the HTTP request URI matches the regular expression, the rule is used. If the URI regular expression is empty, the default rule is used. Using multiple rules can help us make different sampling configurations for different requests.\nEach rule has three parameters to determine if sampling is needed:\n Minimal Request Duration (ms): requests with a response time exceeding the specified time will be sampled. Sampling response status code between 400 and 499: all status codes in the range [400-499) will be sampled. Sampling response status code between 500 and 599: all status codes in the range [500-599) will be sampled.  Once the sampling configuration is complete, we can create the task.\nFigure 14: Create network profiling task page.\nDone! After a few seconds, you will see the process topology appear on the right side of the page.\nWhen you click on the line between processes, you can view the data between the two processes, which is divided into three tabs:\n TCP: displays TCP-related metrics. HTTP/1.x: displays metrics in the HTTP 1 protocol. HTTP Requests: displays the analyzed request and saves it to a list according to the sampling rule.  Figure 16: TCP metrics in a network profiling task.\nFigure 17: HTTP/1.x metrics in a network profiling task.\nFigure 18: HTTP sampled requests in a network profiling task.\nConclusion In this article, we detailed the overview of how to analyze the Layer 7 HTTP/1.x protocol in network analysis, and how to associate it with existing trace systems. This allows us to extend the scope of data we can observe from just user space to also include kernel-space data.\nIn the future, we will delve further into the analysis of kernel data, such as collecting information on TCP packet size, transmission frequency, network card, and help on enhancing distributed tracing from another perspective.\nAdditional Resources  SkyWalking Github Repo › SkyWalking Rover Github Repo › SkyWalking Rover Documentation › Diagnose Service Mesh Network Performance with eBPF blog post \u0026gt; SkyWalking Profiling Documentation \u0026gt; SkyWalking Trace Context Propagation \u0026gt; Zipkin Trace Context Propagation \u0026gt; RFC - Hypertext Transfer Protocol – HTTP/1.1 \u0026gt;  ","title":"eBPF enhanced HTTP observability - L7 metrics and tracing","url":"/blog/ebpf-enhanced-http-observability-l7-metrics-and-tracing/"},{"content":"背景 Apache SkyWalking 是一个开源应用性能管理系统,帮助用户收集和聚合日志、追踪、指标和事件,并在 UI 上显示。在上一篇文章中,我们介绍了如何使用 Apache SkyWalking Rover 分析服务网格环境中的网络性能问题。但是,在商业场景中,用户通常依靠成熟的第 7 层协议(如 HTTP)来进行系统之间的交互。在本文中,我们将讨论如何使用 eBPF 技术来分析第 7 层协议的性能瓶颈,以及如何使用网络采样来增强追踪系统。\n本文将演示如何使用 Apache SkyWalking 与 eBPF 来增强 HTTP 可观察性中的指标和追踪。\nHTTP 协议分析 HTTP 是最常用的 7 层协议之一,通常用于为外部方提供服务和进行系统间通信。在下面的章节中,我们将展示如何识别和分析 HTTP/1.x 协议。\n协议识别 在 HTTP/1.x 中,客户端和服务器通过两端的单个文件描述符(File Descriptor)进行通信。图 1 显示了涉及以下步骤的通信过程:\n Connect/Accept:客户端与 HTTP 服务器建立连接,或者服务器接受客户端的连接。 Read/Write(多次):客户端或服务器读取和写入 HTTPS 请求和响应。单个请求 - 响应对在每边的同一连接内发生。 Close:客户端和服务器关闭连接。  为了获取 HTTP 内容,必须从此过程的第二步读取它。根据 RFC 定义,内容包含在 4 层协议的数据中,可以通过解析数据来获取。请求和响应对可以相关联,因为它们都在两端的同一连接内发生。\n图 1:HTTP 通信时间线。\nHTTP 管线化 HTTP 管线化(Pipelining)是 HTTP/1.1 的一个特性,允许在等待对应的响应的情况下在单个 TCP 连接上发送多个 HTTP 请求。这个特性很重要,因为它确保了服务器端的响应顺序必须与请求的顺序匹配。\n图 2 说明了这是如何工作的,考虑以下情况:HTTP 客户端向服务器发送多个请求,服务器通过按照请求的顺序发送 HTTP 响应来响应。这意味着客户端发送的第一个请求将收到服务器的第一个响应,第二个请求将收到第二个响应,以此类推。\n在设计 HTTP 解析时,我们应该遵循这个原则,将请求数据添加到列表中,并在解析响应时删除第一个项目。这可以确保响应按正确的顺序处理。\n图 2: HTTP/1.1 管道。\n指标 根据前文提到的 HTTP 内容和流程拓扑图的识别,我们可以将这两者结合起来生成进程间的指标数据。\n图 3 显示了目前支持两个进程间分析的指标。基于 HTTP 请求和响应数据,可以分析以下数据:\n   指标名称 类型 单位 描述     请求 CPM(Call Per Minute) 计数器 计数 HTTP 请求计数   响应状态 CPM (Call Per Minute) 计数器 计数 每个 HTTP 响应状态码的计数   请求包大小 计数器 / 直方图 字节 请求包大小   响应包大小 计数器 / 直方图 字节 响应包大小   客户端持续时间 计数器 / 直方图 毫秒 客户端单个 HTTP 响应的持续时间   服务器持续时间 计数器 / 直方图 毫秒 服务器端单个 HTTP 响应的持续时间    图 3:进程到进程指标。\nHTTP 和追踪 在 HTTP 过程中,如果我们能够从原始数据中解包 HTTP 请求和响应,就可以使用这些数据与现有的追踪系统进行关联。\n追踪上下文标识 为了追踪多个服务之间的请求流,追踪系统通常在请求进入服务时创建追踪上下文,并在请求 - 响应过程中将其传递给其他服务。例如,当 HTTP 请求发送到另一个服务器时,追踪上下文包含在请求头中。\n图 4 显示了 Wireshark 拦截的 HTTP 请求的原始内容。由 Zipkin Tracing 系统生成的追踪上下文信息可以通过头中的 “X-B3” 前缀进行标识。通过使用 eBPF 拦截 HTTP 头中的追踪上下文,可以将当前请求与追踪系统连接起来。\n图 4:Wireshark 中的 HTTP Header 视图。\nTrace 事件 我们已经将事件这个概念加入了追踪中。事件可以附加到跨度上,并包含起始和结束时间、标签和摘要,允许我们将任何所需的信息附加到追踪中。\n在执行 eBPF 网络分析时,可以根据请求 - 响应数据生成两个事件。图 5 说明了在带分析的情况下执行 HTTP 请求时发生的情况。追踪系统生成追踪上下文信息并将其发送到请求中。当服务在内核中执行时,我们可以通过与内核空间中的请求 - 响应数据和执行时间交互,为相应的追踪跨度生成事件。\n以前,我们只能观察用户空间的执行状态。现在,通过结合追踪和 eBPF 技术,我们还可以在内核空间获取更多关于当前追踪的信息,如果我们在追踪 SDK 和代理中执行类似的操作,将对目标服务的性能产生较小的影响。\n图 5:分析 HTTP 请求和响应的逻辑视图。\n抽样 该机制仅在满足特定条件时触发抽样。我们还提供了前 N 条追踪的列表,允许用户快速访问特定追踪的相关请求信息。为了帮助用户轻松识别和分析相关事件,我们提供了三种不同的抽样规则:\n 慢速追踪:当请求的响应时间超过指定阈值时触发抽样。 响应状态 [400,500):当响应状态代码大于或等于 400 且小于 500 时触发抽样。 响应状态 [500,600):当响应状态代码大于或等于 500 且小于 600 时触发抽样。  此外,我们认识到分析时可能并不需要所有请求或响应的原始数据。例如,当试图识别性能问题时,用户可能更感兴趣于请求数据,而在解决错误时,他们可能更感兴趣于响应数据。因此,我们还提供了请求或响应事件的配置选项,允许用户指定要抽样的数据类型。\n服务网格中的分析 SkyWalking Rover 项目已经实现了 HTTP 协议的分析和追踪关联。当在服务网格环境中运行时它们的表现如何?\n部署 图 6 演示了 SkyWalking 和 SkyWalking Rover 在服务网格环境中的部署方式。SkyWalking Rover 作为一个 DaemonSet 部署在每台服务所在的机器上,并与 SkyWalking 后端集群通信。它会自动识别机器上的服务并向 SkyWalking 后端集群报告元数据信息。当出现新的网络分析任务时,SkyWalking Rover 会感知该任务并对指定的进程进行分析,在最终将数据报告回 SkyWalking 后端服务之前,收集和聚合网络数据。\n图 6:服务网格中的 SkyWalking rover 部署拓扑。\n追踪系统 从版本 9.3.0 开始,SkyWalking 后端完全支持 Zipkin 服务器中的所有功能。因此,SkyWalking 后端可以收集来自 SkyWalking 和 Zipkin 协议的追踪。同样,SkyWalking Rover 可以在 SkyWalking 和 Zipkin 追踪系统中识别和分析追踪上下文。在接下来的两节中,网络分析结果将分别在 SkyWalking 和 Zipkin UI 中显示。\nSkyWalking 当 SkyWalking 执行网络分析时,与前文中的 TCP 指标类似,SkyWalking UI 会首先显示进程间的拓扑图。当打开代表进程间流量指标的线的仪表板时,您可以在 “HTTP/1.x” 选项卡中看到 HTTP 流量的指标,并在 “HTTP Requests” 选项卡中看到带追踪的抽样的 HTTP 请求。\n如图 7 所示,选项卡中有三个列表,每个列表对应事件抽样规则中的一个条件。每个列表显示符合预先规定条件的追踪。当您单击追踪列表中的一个项目时,就可以查看完整的追踪。\n图 7:Tracing 上下文中的采样 HTTP 请求。\n当您单击追踪列表中的一个项目时,就可以快速查看指定的追踪。在图 8 中,我们可以看到在当前的服务相关的跨度中,有一个带有数字的标签,表示与该追踪跨度相关的 HTTP 事件数。\n由于我们在服务网格环境中,每个服务都涉及与 Envoy 交互。因此,当前的跨度包括 Envoy 的请求和响应信息。此外,由于当前的服务有传入和传出的请求,因此相应的跨度中有事件。\n图 8:Tracing 详细信息中的事件。\n当单击跨度时,将显示跨度的详细信息。如果当前跨度中有事件,则相关事件信息将在时间轴上显示。如图 9 所示,当前跨度中一共有 6 个相关事件。每个事件代表一个 HTTP 请求 / 响应的数据样本。其中一个事件跨越多个时间范围,表示较长的系统调用时间。这可能是由于系统调用被阻塞,具体取决于不同语言中的 HTTP 请求的实现细节。这也可以帮助我们查询错误的可能原因。\n图 9:一个 Tracing 范围内的事件。\n最后,我们可以单击特定的事件查看它的完整信息。如图 10 所示,它显示了一个请求的抽样信息,包括从 HTTP 原始数据中的请求头中包含的 SkyWalking 追踪上下文协议。原始请求数据允许您快速重新请求以解决任何问题。\n图 10:事件的详细信息。\nZipkin Zipkin 是世界上广泛使用的分布式追踪系统。SkyWalking 可以作为替代服务器,提供高级功能。在这里,我们使用这种方式将功能无缝集成到 Zipkin 生态系统中。新事件也将被视为 Zipkin 的标签和注释的一种。\n为 Zipkin 跨度添加事件,需要执行以下操作:\n 将每个事件的开始时间和结束时间分别拆分为两个具有规范名称的注释。 将抽样的 HTTP 原始数据从事件添加到 Zipkin 跨度标签中,使用相同的事件名称用于相应的目的。  图 11 和图 12 显示了同一跨度中的注释和标签。在这些图中,我们可以看到跨度包含至少两个具有相同事件名称和序列后缀的事件(例如,图中的 “Start/Finished HTTP Request/Response Sampling-x”)。这两个事件均具有单独的时间戳,用于表示其在跨度内的相对时间。在标签中,对应事件的数据内容分别由事件名称和序列号表示。\n图 11:Zipkin span 注释中的事件时间戳。\n图 12:Zipkin span 标签中的事件原始数据。\n演示 在本节中,我们将演示如何在服务网格中执行网络分析,并完成指标收集和 HTTP 原始数据抽样。要进行操作,您需要一个运行中的 Kubernetes 环境。\n部署 SkyWalking Showcase SkyWalking Showcase 包含一套完整的示例服务,可以使用 SkyWalking 进行监控。有关详细信息,请参阅官方文档。\n在本演示中,我们只部署了服务、最新发布的 SkyWalking OAP 和 UI。\nexport SW_OAP_IMAGE=apache/skywalking-oap-server:9.3.0 export SW_UI_IMAGE=apache/skywalking-ui:9.3.0 export SW_ROVER_IMAGE=apache/skywalking-rover:0.4.0 export FEATURE_FLAGS=mesh-with-agent,single-node,elasticsearch,rover make deploy.kubernetes 部署完成后,运行下面的脚本启动 SkyWalking UI:http://localhost:8080/。\nkubectl port-forward svc/ui 8080:8080 --namespace default 启动网络分析任务 目前,我们可以通过单击服务网格面板中的 Data Plane 项和 Kubernetes 面板中的 Service 项来选择要监视的特定实例。\n在图 13 中,我们已在网络分析选项卡中选择了一个具有任务列表的实例。\n图 13:数据平面中的网络分析选项卡。\n当我们单击 “开始” 按钮时,如图 14 所示,我们需要为分析任务指定抽样规则。抽样规则由一个或多个规则组成,每个规则都由不同的 URI 正则表达式区分。当 HTTP 请求的 URI 与正则表达式匹配时,将使用该规则。如果 URI 正则表达式为空,则使用默认规则。使用多个规则可以帮助我们为不同的请求配置不同的抽样配置。\n每个规则都有三个参数来确定是否需要抽样:\n 最小请求持续时间(毫秒):响应时间超过指定时间的请求将被抽样。 在 400 和 499 之间的抽样响应状态代码:范围 [400-499) 中的所有状态代码将被抽样。 在 500 和 599 之间的抽样响应状态代码:范围 [500-599) 中的所有状态码将被抽样。  抽样配置完成后,我们就可以创建任务了。\n图 14:创建网络分析任务页面。\n完成 几秒钟后,你会看到页面的右侧出现进程拓扑结构。\n图 15:网络分析任务中的流程拓扑。\n当您单击进程之间的线时,您可以查看两个过程之间的数据,它被分为三个选项卡:\n TCP:显示与 TCP 相关的指标。 HTTP/1.x:显示 HTTP 1 协议中的指标。 HTTP 请求:显示已分析的请求,并根据抽样规则保存到列表中。  图 16:网络分析任务中的 TCP 指标。\n图 17:网络分析任务中的 HTTP/1.x 指标。\n图 18:网络分析任务中的 HTTP 采样请求。\n总结 在本文中,我们详细介绍了如何在网络分析中分析 7 层 HTTP/1.x 协议,以及如何将其与现有追踪系统相关联。这使我们能够将我们能够观察到的数据从用户空间扩展到内核空间数据。\n在未来,我们将进一步探究内核数据的分析,例如收集 TCP 包大小、传输频率、网卡等信息,并从另一个角度提升分布式追踪。\n其他资源  SkyWalking Github Repo › SkyWalking Rover Github Repo › SkyWalking Rover Documentation › Diagnose Service Mesh Network Performance with eBPF blog post \u0026gt; SkyWalking Profiling Documentation \u0026gt; SkyWalking Trace Context Propagation \u0026gt; Zipkin Trace Context Propagation \u0026gt; RFC - Hypertext Transfer Protocol – HTTP/1.1 \u0026gt;  ","title":"使用 eBPF 提升 HTTP 可观测性 - L7 指标和追踪","url":"/zh/ebpf-enhanced-http-observability-l7-metrics-and-tracing/"},{"content":"SkyWalking Rust 0.5.0 is released. Go to downloads page to find release tars.\nWhat\u0026rsquo;s Changed  Add management support. by @jmjoy in https://github.com/apache/skywalking-rust/pull/48 Add missing_docs lint and supply documents. by @jmjoy in https://github.com/apache/skywalking-rust/pull/49 Add authentication and custom intercept support. by @jmjoy in https://github.com/apache/skywalking-rust/pull/50 Bump to 0.5.0. by @jmjoy in https://github.com/apache/skywalking-rust/pull/51  ","title":"Release Apache SkyWalking Rust 0.5.0","url":"/events/release-apache-skywalking-rust-0-5-0/"},{"content":"SkyWalking Satellite 1.1.0 is released. Go to downloads page to find release tars.\nFeatures  Support transmit the OpenTelemetry Metrics protocol. Upgrade to GO 1.18. Add Docker images for arm64 architecture. Support transmit Span Attached Event protocol data. Support dotnet CLRMetric forward.  Bug Fixes  Fix the missing return data when receive metrics in batch mode. Fix CVE-2022-21698, CVE-2022-27664.  Issues and PR  All issues are here All and pull requests are here  ","title":"Release Apache SkyWalking Satellite 1.1.0","url":"/events/release-apache-skwaylking-satellite-1-1-0/"},{"content":"Apache SkyWalking is an open-source APM for a distributed system, Apache Software Foundation top-level project.\nOn Jan. 3rd, 2023, we received reports about Aliyun Trace Analysis Service. It provides a cloud service compatible with SkyWalking trace APIs and agents.\nOn their product page, there is a best-practice document describing about their service is not SkyWalking OAP, but can work with SkyWalking agents to support SkyWalking\u0026rsquo;s In-Process(Trace) Profiling.\nBUT, they copied the whole page of SkyWalking\u0026rsquo;s profiling UI, including page layout, words, and profiling task setup. The only difference is the color schemes.\nSkyWalking UI Aliyun Trace Analysis UI on their document page  The UI visualization is a part of the copyright. Aliyun declared their backend is NOT a re-distribution of SkyWalking repeatedly on their website, and they never mentioned this page is actually copied from upstream.\nThis is a LICENSE issue, violating SkyWalking\u0026rsquo;s copyright and Apache 2.0 License. They don\u0026rsquo;t respect Apache Software Foundation and Apache SkyWalking\u0026rsquo;s IP and Branding.\n","title":"[License Issue] Aliyun(阿里云)'s trace analysis service copied SkyWalking's trace profiling page.","url":"/blog/2023-01-03-aliyun-copy-page/"},{"content":"SkyWalking Rover 0.4.0 is released. Go to downloads page to find release tars.\nFeatures  Enhancing the render context for the Kubernetes process. Simplify the logic of network protocol analysis. Upgrade Go library to 1.18, eBPF library to 0.9.3. Make the Profiling module compatible with more Linux systems. Support monitor HTTP/1.x in the NETWORK profiling.  Bug Fixes Documentation  Adding support version of Linux documentation.  Issues and PR  All issues are here All and pull requests are here  ","title":"Release Apache SkyWalking Rover 0.4.0","url":"/events/release-apache-skwaylking-rover-0-4-0/"},{"content":"Observability for modern distributed applications work is critical for understanding how they behave under a variety of conditions and for troubleshooting and resolving issues when they arise. Traces, metrics, and logs are regarded as fundamental parts of the observability stack. Traces are the footprints of distributed system executions, meanwhile, metrics measure system performance with numbers in the timeline. Essentially, they measure the performance from two dimensions. Being able to quickly visualize the connection between traces and corresponding metrics makes it possible to quickly diagnose which process flows are correlated to potentially pathological behavior. This powerful new capability is now available in SkyWalking 9.3.0.\nThe SkyWalking project started only with tracing, with a focus on 100% sampling-based metrics and topology analysis since 2018. When users face anomaly trends of time-series metrics, like a peak on the line chart, or histogram shows a larger gap between p95 and p95, the immediate question is, why is this happening? One of SkyWalking\u0026rsquo;s latest features, the trace-metric association, makes it much easier to answer that question and to address the root cause.\nHow Are Metrics Generated? SkyWalking provides three ways to calculate metrics:\n Metrics built from trace spans, depending on the span’s layer, kind, and tags. Metrics extracted from logs—a kind of keyword and tags-based metrics extraction. Metrics reported from mature and mainstream metrics/meter systems, such as OpenTelemetry, Prometheus, and Zabbix.  Tracing tracks the processes of requests between an application\u0026rsquo;s services. Most systems that generate traffic and performance-related metrics also generate tracing data, either from server-side trace-based aggregations or through client SDKs.\nUse SkyWalking to Reduce the Traditional Cost of Trace Indexing Tracing data and visualization are critical troubleshooting tools for both developers and operators alike because of how helpful they are in locating issue boundaries. But, because it has traditionally been difficult to find associations between metrics and traces, teams have added increasingly more tags into the spans, and search through various combinations. This trend of increased instrumentation and searching has required increased infrastructure investment to support this kind of search. SkyWalking\u0026rsquo;s metrics and tracing association capabilities can help reduce the cost of indexing and searching that data.\nFind the Associated Trace When looking for association between metrics and traces, the kind of metrics we\u0026rsquo;re dealing with determines their relationships to traces. Let’s review the standard request rate, error, and duration (RED) metrics to see how it works.\nSuccess Rate Metrics The success rate is determined by the return code, RPC response code, or exceptions of the process. When the success rate decreases, looking for errors in the traces of this service or pod are the first place to look to find clues.\nFigure 1: The success rate graph from SkyWalking\u0026rsquo;s 9.3.0 dashboard with the option to view related traces at a particular time.\nDrilling down from the peak of the success rate, SkyWalking lists all traces and their error status that were collected in this particular minute (Figure 2):\nFigure 2: SkyWalking shows related traces with an error status.\nRequests to /test can be located from the trace, and the span’s tag indicates a 404 response code of the HTTP request.\nFigure 3: A detail view of a request to http://frontend/test showing that the URI doesn\u0026rsquo;t exist.\nBy looking at the trace data, it becomes immediately clear that the drop in success rate is caused by requests to a nonexistent URI.\nAverage Response Time The average response time metric provides a general overview of service performance. When average response time is unstable, this usually means that the system is facing serious performance impacts.\nFigure 4: SkyWalking\u0026rsquo;s query UI for searching for related traces showing traces for requests that exceed a particular duration threshold.\nWhen you drill down from this metric, this query condition (Figure 4) will reveal the slowest traces of the service in this specific minute. Notice, at least 168ms is added as a condition automatically, to avoid scanning a large number of rows in the Database.\nApdex Apdex—the Application Performance Index—is a measure of response time based against a set threshold. It measures the ratio of satisfactory response times to unsatisfactory response times (Figure 5). The response time is measured from an asset request to completed delivery back to the requestor.\nFigure 5: The Apdex formula\nA user defines a response time tolerating threshold T. All responses handled in T or less time satisfy the user.\nFor example, if T is 1.2 seconds and a response completes in 0.5 seconds, then the user is satisfied. All responses greater than 1.2 seconds dissatisfy the user. Responses greater than 4.8 seconds frustrate the user.\nWhen the Apdex score decreases, we need to find related traces from two perspectives: slow traces and error status traces. SkyWalking\u0026rsquo;s new related tracing features offers a quick way to view both (Figure 6) directly from the Apdex graph.\nFigure 6: Show slow trace and error status traces from the Apdex graph\nService Response Time Percentile MetricThe percentile graph (Figure 7) provides p50, p75, p90, p95, and p99 latency ranks to measure the long-tail issues of service performance.\nFigure 7: The service response time percentile graph helps to highlight long-tail issues of service performance.\nThis percentile graph shows a typical long-tail issue. P99 latency is four times slower than the P95. When we use the association, we see the traces with latency between P95 - P99 and P99 - Infinity.\nThe traces of requests causing this kind of long-tail phenomena are automatically listing from there.\nFigure 8: Query parameters to search for traces based on latency.\nAre More Associations Available? SkyWalking provides more than just associations between between traces and metrics to help you find possible causal relationships and to avoid looking for the proverbial needle in a haystack.\nCurrently, SkyWalking 9.3.0 offers two more associations: metric-to-metric associations and event-to-metric associations.\nMetric-to-metric Associations There are dozens of metrics on the dashboard—which is great for getting a complete picture of application behavior. During a typical performance issue, the peaks of multiple metrics are affected simultaneously. But, trying to correlate peaks across all of these graphs can be difficult\u0026hellip;\nNow in SkyWalking 9.3.0, when you click the peak of one graph, the pop-out box lets you see associated metrics.\nFigure 9: SkyWalking\u0026rsquo;s option to view associated metrics.\nWhen you choose that option, all associated metrics graphs will show axis pointers (the dotted vertical lines) in all associated graphs like in Figure 10. This makes it easier to correlate the peaks in different graphs with each other. Often, these correlated peaks with have the same root cause.\nFigure 10: Axis pointers (vertical dotted lines) show associations between peaks across multiple metrics graphs.\nEvent-to-Metric Associations SkyWalking provides the event concept to associate possible service performance impacted by the infrastructure, such as new deployment even from k8s. Or, the anomaly had been detected by alerting or integrated AIOps engine.\nThe event to metrics association is also automatically, it could cover the time range of the event on the metric graphs(blue areas). If the area of event and peaks are matched, most likely this event covered this anomaly.\nFigure 11: SkyWalking\u0026rsquo;s event to metric association view.\nSkyWalking Makes it Easier and Faster to Find Root Causes SkyWalking now makes it easy to find associations between metrics, events, and traces, ultimately making it possible to identify root causes and fix problems fast. The associations we\u0026rsquo;ve discussed in this article are available out-of-box in the SkyWalking 9.3.0 release.\nFigure 12: Just click on the dots to see related traces and metrics associations.\nClick the dots on any metric graph, and you will see a View Related Traces item pop-out if this metric has logical mapping traces.\nConclusion In this blog, we took a look at the newly-added association feature between metrics and traces. With this new visualization, it\u0026rsquo;s now much easier to find key traces to identify root cause of issues.Associations in SkyWalking can go even deeper. Associations from metrics to traces is not the end of diagnosing system bottleneck. In the next post, we will introduce an eBPF powered trace enhancement where you’ll be able to see HTTP request and response details associated with tracing spans from network profiling. Stay tuned.\n","title":"Boost Root Cause Analysis Quickly With SkyWalking’s New Trace-Metrics Association Feature","url":"/blog/boost-root-cause-analysis-quickly-with-skywalking-new-trace-metrics-association-feature/"},{"content":"现代分布式应用程序工作的可观测性对于了解它们在各种条件下的行为方式以及在出现问题时进行故障排除和解决至关重要。追踪、指标和日志被视为可观测性堆栈的基本部分。Trace 是分布式系统执行的足迹,而 metric 则是用时间轴上的数字衡量系统性能。本质上,它们从两个维度衡量性能。能够快速可视化追踪和相应指标之间的联系,可以快速诊断哪些流程与潜在的异常相关。SkyWalking 9.3.0 现在提供了这一强大的新功能。\nSkyWalking 项目从 tracing 开始,从 2018 年开始专注于 100% 基于采样的指标和拓扑分析。当用户面对时间序列指标的异常趋势时,比如折线图上的峰值,或者直方图显示 p95 和 p95 之间的差距较大,直接的问题是,为什么会出现这种情况?SkyWalking 的最新功能之一,trace 与 metric 关联,使得回答这个问题和解决根本原因更加容易。\n指标是如何生成的? SkyWalking 提供了三种计算指标的方式:\n 根据追踪跨度构建的指标,具体取决于跨度的层、种类和标签。 从日志中提取指标—— 一种基于关键词和标签的指标提取。 从成熟和主流的指标 / 仪表系统报告的指标,例如 OpenTelemetry、Prometheus 和 Zabbix。  Tracing 追踪应用程序服务之间的请求过程。大多数生成流量和性能相关指标的系统也会生成追踪数据,这些数据来自服务器端基于追踪的聚合或通过客户端 SDK。\n使用 SkyWalking 降低追踪索引的传统成本 Trace 数据和可视化对于开发人员和运维人员来说都是至关重要的故障排除工具,因为它们在定位问题边界方面非常有帮助。但是,由于传统上很难找到指标和痕迹之间的关联,团队已经将越来越多的标签添加到跨度中,并搜索各种组合。这种增加仪器和搜索的趋势需要增加基础设施投资来支持这种搜索。SkyWalking 的指标和追踪关联功能有助于降低索引和搜索该数据的成本。\n查找关联的 trace 在寻找 metric 和 trace 之间的关联时,我们处理的指标类型决定了它们与 trace 的关系。让我们回顾一下标准请求*率、错误和持续时间(RED)*指标,看看它是如何工作的。\n成功率指标 成功率由返回码、RPC 响应码或进程异常决定。当成功率下降时,在这个服务或 Pod 的 trace 中寻找错误是第一个寻找线索的地方。\n图 1:SkyWalking 9.3.0 仪表板的成功率图表,带有在特定时间查看相关 trace 的选项。\n从成功率的峰值向下探索,SkyWalking 列出了在这一特定分钟内收集的所有 trace 及其错误状态(图 2):\n图 2:SkyWalking 显示具有错误状态的相关追踪。\n可以从 trace 中找到对 /test 的请求,并且 span 的标记指示 HTTP 请求的 404 响应代码。\n图 3:显示 URI 不存在的 http://frontend/test 请求的详细视图。\n通过查看 trace 数据,很明显成功率的下降是由对不存在的 URI 的请求引起的。\n平均响应时间 平均响应时间指标提供了服务性能的一般概览。当平均响应时间不稳定时,这通常意味着系统面临严重的性能影响。\n图 4:SkyWalking 用于搜索相关 trace 的查询 UI,显示超过特定持续时间阈值的请求的 trace。\n当您从该指标向下探索时,该查询条件(图 4)将揭示该特定分钟内服务的最慢 trace。请注意,至少 168ms 作为条件自动添加,以避免扫描数据库中的大量行。\nApdex Apdex(应用程序性能指数)是根据设定的阈值衡量响应时间的指标。它测量令人满意的响应时间与不令人满意的响应时间的比率(图 5)。响应时间是从资产请求到完成交付回请求者的时间。\n图 5:Apdex 公式\n用户定义响应时间容忍阈值 T。在 T 或更短时间内处理的所有响应都使用户满意。\n例如,如果 T 为 1.2 秒,响应在 0.5 秒内完成,则用户会感到满意。所有大于 1.2 秒的响应都会让用户不满意。超过 4.8 秒的响应会让用户感到沮丧。\n当 Apdex 分数下降时,我们需要从两个角度寻找相关的 trace:慢速和错误状态的 trace。SkyWalking 的新相关追踪功能提供了一种直接从 Apdex 图表查看两者(图 6)的快速方法。\n图 6:显示 Apdex 图中的慢速 trace 和错误状态 trace\n服务响应时间 百分位指标百分位图(图 7)提供 p50、p75、p90、p95 和 p99 延迟排名,以衡量服务性能的长尾问题。\n图 7:服务响应时间百分位图有助于突出服务性能的长尾问题。\n这个百分位数图显示了一个典型的长尾问题。P99 延迟比 P95 慢四倍。当我们使用关联时,我们会看到 P95 - P99 和 P99 - Infinity 之间具有延迟的 trace。\n造成这种长尾现象的请求 trace,就是从那里自动列出来的。\n图 8:用于根据延迟搜索 trace 的查询参数。\n是否有更多关联可用? SkyWalking 提供的不仅仅是 trace 和 metric 之间的关联,还可以帮助您找到可能的因果关系,避免大海捞针。\n目前,SkyWalking 9.3.0 提供了两种关联:metric-to-metric 关联和 event-to-metric 关联。\nMetric-to-metric 关联 仪表板上有许多指标 —— 这对于全面了解应用程序行为非常有用。在典型的性能问题中,多个指标的峰值会同时受到影响。但是,尝试关联所有这些图表中的峰值可能很困难……\n现在在 SkyWalking 9.3.0 中,当你点击一个图表的峰值时,弹出框可以让你看到相关的指标。\n图 9:SkyWalking 用于查看相关指标的选项。\n当您选择该选项时,所有关联的指标图表将在所有关联的图表中显示轴指针(垂直虚线),如图 10 所示。这使得将不同图表中的峰值相互关联起来变得更加容易。通常,这些相关的峰值具有相同的根本原因。\n图 10:轴指针(垂直虚线)显示多个指标图中峰值之间的关联。\nEvent-to-metric 关联 SkyWalking 提供了事件概念来关联可能受基础设施影响的服务性能,例如来自 Kubernetes 的新部署。或者,已通过警报或集成 AIOps 引擎检测到异常。\n事件到指标的关联也是自动的,它可以覆盖指标图上事件的时间范围(蓝色区域)。如果事件区域和峰值匹配,则很可能该事件覆盖了该异常。\n图 11:SkyWalking 的事件与指标关联视图。\nSkyWalking 使查找根本原因变得更加容易和快速 SkyWalking 现在可以轻松找到指标、事件和追踪之间的关联,最终可以确定根本原因并快速解决问题。我们在本文中讨论的关联在 SkyWalking 9.3.0 版本中开箱即用。\n图 12:只需单击圆点即可查看相关 trace 和 metric 关联。\n单击任何指标图上的点,如果该指标具有逻辑映射,您将看到一个查看相关 trace 弹出窗口。\n结论 在这篇博客中,我们了解了 metric 和 trace 之间新增的关联功能。有了这个新的可视化,现在可以更容易地找到关键 trace 来识别问题的根本原因。SkyWalking 中的关联可以更深入。从 metric 到 trace 的关联并不是诊断系统瓶颈的终点。在下一篇文章中,我们将介绍 eBPF 支持的追踪增强功能,您将看到与网络分析中的追踪跨度相关的 HTTP 请求和响应详细信息。敬请关注。\n","title":"SkyWalking 推出 trace-metric 关联功能助力快速根源问题排查","url":"/zh/boost-root-cause-analysis-quickly-with-skywalking-new-trace-metrics-association-feature/"},{"content":"In cloud native applications, a request often needs to be processed through a series of APIs or backend services, some of which are parallel and some serial and located on different platforms or nodes. How do we determine the service paths and nodes a call goes through to help us troubleshoot the problem? This is where distributed tracing comes into play.\nThis article covers:\n How distributed tracing works How to choose distributed tracing software How to use distributed tracing in Istio How to view distributed tracing data using Bookinfo and SkyWalking as examples  Distributed Tracing Basics Distributed tracing is a method for tracing requests in a distributed system to help users better understand, control, and optimize distributed systems. There are two concepts used in distributed tracing: TraceID and SpanID. You can see them in Figure 1 below.\n TraceID is a globally unique ID that identifies the trace information of a request. All traces of a request belong to the same TraceID, and the TraceID remains constant throughout the trace of the request. SpanID is a locally unique ID that identifies a request’s trace information at a certain time. A request generates different SpanIDs at different periods, and SpanIDs are used to distinguish trace information for a request at different periods.  TraceID and SpanID are the basis of distributed tracing. They provide a uniform identifier for request tracing in distributed systems and facilitate users’ ability to query, manage, and analyze the trace information of requests.\nFigure 1: Trace and span\nThe following is the process of distributed tracing:\n When a system receives a request, the distributed tracing system assigns a TraceID to the request, which is used to chain together the entire chain of invocations. The distributed trace system generates a SpanID and ParentID for each service call within the system for the request, which is used to record the parent-child relationship of the call; a Span without a ParentID is used as the entry point of the call chain. TraceID and SpanID are to be passed during each service call. When viewing a distributed trace, query the full process of a particular request by TraceID.  How Istio Implements Distributed Tracing Istio’s distributed tracing is based on information collected by the Envoy proxy in the data plane. After a service request is intercepted by Envoy, Envoy adds tracing information as headers to the request forwarded to the destination workload. The following headers are relevant for distributed tracing:\n As TraceID: x-request-id Used to establish parent-child relationships for Span in the LightStep trace: x-ot-span-context\u0026lt;/li Used for Zipkin, also for Jaeger, SkyWalking, see b3-propagation:  x-b3-traceid x-b3-traceid x-b3-spanid x-b3-parentspanid x-b3-sampled x-b3-flags b3   For Datadog:  x-datadog-trace-id x-datadog-parent-id x-datadog-sampling-priority   For SkyWalking: sw8 For AWS X-Ray: x-amzn-trace-id  For more information on how to use these headers, please see the Envoy documentation.\nRegardless of the language of your application, Envoy will generate the appropriate tracing headers for you at the Ingress Gateway and forward these headers to the upstream cluster. However, in order to utilize the distributed tracing feature, you must modify your application code to attach the tracing headers to upstream requests. Since neither the service mesh nor the application can automatically propagate these headers, you can integrate the agent for distributed tracing into the application or manually propagate these headers in the application code itself. Once the tracing headers are propagated to all upstream requests, Envoy will send the tracing data to the tracer’s back-end processing, and then you can view the tracing data in the UI.\nFor example, look at the code of the Productpage service in the Bookinfo application. You can see that it integrates the Jaeger client library and synchronizes the header generated by Envoy with the HTTP requests to the Details and Reviews services in the getForwardHeaders (request) function.\ndef getForwardHeaders(request): headers = {} # Using Jaeger agent to get the x-b3-* headers span = get_current_span() carrier = {} tracer.inject( span_context=span.context, format=Format.HTTP_HEADERS, carrier=carrier) headers.update(carrier) # Dealing with the non x-b3-* header manually if \u0026#39;user\u0026#39; in session: headers[\u0026#39;end-user\u0026#39;] = session[\u0026#39;user\u0026#39;] incoming_headers = [ \u0026#39;x-request-id\u0026#39;, \u0026#39;x-ot-span-context\u0026#39;, \u0026#39;x-datadog-trace-id\u0026#39;, \u0026#39;x-datadog-parent-id\u0026#39;, \u0026#39;x-datadog-sampling-priority\u0026#39;, \u0026#39;traceparent\u0026#39;, \u0026#39;tracestate\u0026#39;, \u0026#39;x-cloud-trace-context\u0026#39;, \u0026#39;grpc-trace-bin\u0026#39;, \u0026#39;sw8\u0026#39;, \u0026#39;user-agent\u0026#39;, \u0026#39;cookie\u0026#39;, \u0026#39;authorization\u0026#39;, \u0026#39;jwt\u0026#39;, ] for ihdr in incoming_headers: val = request.headers.get(ihdr) if val is not None: headers[ihdr] = val return headers For more information, the Istio documentation provides answers to frequently asked questions about distributed tracing in Istio.\nHow to Choose A Distributed Tracing System Distributed tracing systems are similar in principle. There are many such systems on the market, such as Apache SkyWalking, Jaeger, Zipkin, Lightstep, Pinpoint, and so on. For our purposes here, we will choose three of them and compare them in several dimensions. Here are our inclusion criteria:\n They are currently the most popular open-source distributed tracing systems. All are based on the OpenTracing specification. They support integration with Istio and Envoy.     Items Apache SkyWalking Jaeger Zipkin     Implementations Language-based probes, service mesh probes, eBPF agent, third-party instrumental libraries (Zipkin currently supported) Language-based probes Language-based probes   Database ES, H2, MySQL, TiDB, Sharding-sphere, BanyanDB ES, MySQL, Cassandra, Memory ES, MySQL, Cassandra, Memory   Supported Languages Java, Rust, PHP, NodeJS, Go, Python, C++, .Net, Lua Java, Go, Python, NodeJS, C#, PHP, Ruby, C++ Java, Go, Python, NodeJS, C#, PHP, Ruby, C++   Initiator Personal Uber Twitter   Governance Apache Foundation CNCF CNCF   Version 9.3.0 1.39.0 2.23.19   Stars 20.9k 16.8k 15.8k    Although Apache SkyWalking’s agent does not support as many languages as Jaeger and Zipkin, SkyWalking’s implementation is richer and compatible with Jaeger and Zipkin trace data, and development is more active, so it is one of the best choices for building a telemetry platform.\nDemo Refer to the Istio documentation to install and configure Apache SkyWalking.\nEnvironment Description The following is the environment for our demo:\n Kubernetes 1.24.5 Istio 1.16 SkyWalking 9.1.0  Install Istio Before installing Istio, you can check the environment for any problems:\n$ istioctl experimental precheck ✔ No issues found when checking the cluster. Istio is safe to install or upgrade! To get started, check out https://istio.io/latest/docs/setup/getting-started/ Then install Istio and configure the destination for sending tracing messages as SkyWalking:\n# Initial Istio Operator istioctl operator init # Configure tracing destination kubectl apply -f - \u0026lt;\u0026lt;EOF apiVersion: install.istio.io/v1alpha1 kind: IstioOperator metadata: namespace: istio-system name: istio-with-skywalking spec: meshConfig: defaultProviders: tracing: - \u0026#34;skywalking\u0026#34; enableTracing: true extensionProviders: - name: \u0026#34;skywalking\u0026#34; skywalking: service: tracing.istio-system.svc.cluster.local port: 11800 EOF Deploy Apache SkyWalking Istio 1.16 supports distributed tracing using Apache SkyWalking. Install SkyWalking by executing the following code:\nkubectl apply -f https://raw.githubusercontent.com/istio/istio/release-1.16/samples/addons/extras/skywalking.yaml It will install the following components under the istio-system namespace:\n SkyWalking Observability Analysis Platform (OAP): Used to receive trace data, supports SkyWalking native data formats, Zipkin v1 and v2 and Jaeger format. UI: Used to query distributed trace data.  For more information about SkyWalking, please refer to the SkyWalking documentation.\nDeploy the Bookinfo Application Execute the following command to install the bookinfo application:\nkubectl label namespace default istio-injection=enabled kubectl apply -f samples/bookinfo/platform/kube/bookinfo.yaml kubectl apply -f samples/bookinfo/networking/bookinfo-gateway.yaml Launch the SkyWalking UI:\nistioctl dashboard skywalking Figure 2 shows all the services available in the bookinfo application:\nFigure 2: SkyWalking General Service page\nYou can also see information about instances, endpoints, topology, tracing, etc. For example, Figure 3 shows the service topology of the bookinfo application:\nFigure 3: Topology diagram of the Bookinfo application\nTracing views in SkyWalking can be displayed in a variety of formats, including list, tree, table, and statistics. See Figure 4:\nFigure 4: SkyWalking General Service trace supports multiple display formats\nTo facilitate our examination, set the sampling rate of the trace to 100%:\nkubectl apply -f - \u0026lt;\u0026lt;EOF apiVersion: telemetry.istio.io/v1alpha1 kind: Telemetry metadata: name: mesh-default namespace: istio-system spec: tracing: - randomSamplingPercentage: 100.00 EOF  Important: It’s generally not good practice to set the sampling rate to 100% in a production environment. To avoid the overhead of generating too many trace logs in production, please adjust the sampling strategy (sampling percentage).\n Uninstall After experimenting, uninstall Istio and SkyWalking by executing the following command.\nsamples/bookinfo/platform/kube/cleanup.sh istioctl unintall --purge kubectl delete namespace istio-system Understanding the Bookinfo Tracing Information Navigate to the General Service tab in the Apache SkyWalking UI, and you can see the trace information for the most recent istio-ingressgateway service, as shown in Figure 5. Click on each span to see the details.\nFigure 5: The table view shows the basic information about each span.\nSwitching to the list view, you can see the execution order and duration of each span, as shown in Figure 6:\nFigure 6: List display\nYou might want to know why such a straightforward application generates so much span data. Because after we inject the Envoy proxy into the pod, every request between services will be intercepted and processed by Envoy, as shown in Figure 7:\nFigure 7: Envoy intercepts requests to generate a span\nThe tracing process is shown in Figure 8:\nFigure 8: Trace of the Bookinfo application\nWe give each span a label with a serial number, and the time taken is indicated in parentheses. For illustration purposes, we have summarized all spans in the table below.\n   No. Endpoint Total Duration (ms) Component Duration (ms) Current Service Description     1 /productpage 190 0 istio-ingressgateway Envoy Outbound   2 /productpage 190 1 istio-ingressgateway Ingress -\u0026gt; Productpage network transmission   3 /productpage 189 1 productpage Envoy Inbound   4 /productpage 188 21 productpage Application internal processing   5 /details/0 8 1 productpage Envoy Outbound   6 /details/0 7 3 productpage Productpage -\u0026gt; Details network transmission   7 /details/0 4 0 details Envoy Inbound   8 /details/0 4 4 details Application internal processing   9 /reviews/0 159 0 productpage Envoy Outbound   10 /reviews/0 159 14 productpage Productpage -\u0026gt; Reviews network transmission   11 /reviews/0 145 1 reviews Envoy Inbound   12 /reviews/0 144 109 reviews Application internal processing   13 /ratings/0 35 2 reviews Envoy Outbound   14 /ratings/0 33 16 reviews Reviews -\u0026gt; Ratings network transmission   15 /ratings/0 17 1 ratings Envoy Inbound   16 /ratings/0 16 16 ratings Application internal processing    From the above information, it can be seen that:\n The total time consumed for this request is 190 ms. In Istio sidecar mode, each traffic flow in and out of the application container must pass through the Envoy proxy once, each time taking 0 to 2 ms. Network requests between Pods take between 1 and 16ms. This is because the data itself has errors and the start time of the Span is not necessarily equal to the end time of the parent Span. We can see that the most time-consuming part is the Reviews application, which takes 109 ms so that we can optimize it for that application.  Summary Distributed tracing is an indispensable tool for analyzing performance and troubleshooting modern distributed applications. In this tutorial, we’ve seen how, with just a few minor changes to your application code to propagate tracing headers, Istio makes distributed tracing simple to use. We’ve also reviewed Apache SkyWalking as one of the best distributed tracing systems that Istio supports. It is a fully functional platform for cloud native application analytics, with features such as metrics and log collection, alerting, Kubernetes monitoring, service mesh performance diagnosis using eBPF, and more.\n If you’re new to service mesh and Kubernetes security, we have a bunch of free online courses available at Tetrate Academy that will quickly get you up to speed with Istio and Envoy.\nIf you’re looking for a fast way to get to production with Istio, check out Tetrate Istio Distribution (TID). TID is Tetrate’s hardened, fully upstream Istio distribution, with FIPS-verified builds and support available. It’s a great way to get started with Istio knowing you have a trusted distribution to begin with, have an expert team supporting you, and also have the option to get to FIPS compliance quickly if you need to.\nOnce you have Istio up and running, you will probably need simpler ways to manage and secure your services beyond what’s available in Istio, that’s where Tetrate Service Bridge comes in. You can learn more about how Tetrate Service Bridge makes service mesh more secure, manageable, and resilient here, or contact us for a quick demo.\n","title":"How to Use SkyWalking for Distributed Tracing in Istio?","url":"/blog/how-to-use-skywalking-for-distributed-tracing-in-istio/"},{"content":"在云原生应用中,一次请求往往需要经过一系列的 API 或后台服务处理才能完成,这些服务有些是并行的,有些是串行的,而且位于不同的平台或节点。那么如何确定一次调用的经过的服务路径和节点以帮助我们进行问题排查?这时候就需要使用到分布式追踪。\n本文将向你介绍:\n 分布式追踪的原理 如何选择分布式追踪软件 在 Istio 中如何使用分布式追踪 以 Bookinfo 和 SkyWalking 为例说明如何查看分布式追踪数据  分布式追踪基础 分布式追踪是一种用来跟踪分布式系统中请求的方法,它可以帮助用户更好地理解、控制和优化分布式系统。分布式追踪中用到了两个概念:TraceID 和 SpanID。\n TraceID 是一个全局唯一的 ID,用来标识一个请求的追踪信息。一个请求的所有追踪信息都属于同一个 TraceID,TraceID 在整个请求的追踪过程中都是不变的; SpanID 是一个局部唯一的 ID,用来标识一个请求在某一时刻的追踪信息。一个请求在不同的时间段会产生不同的 SpanID,SpanID 用来区分一个请求在不同时间段的追踪信息;  TraceID 和 SpanID 是分布式追踪的基础,它们为分布式系统中请求的追踪提供了一个统一的标识,方便用户查询、管理和分析请求的追踪信息。\n下面是分布式追踪的过程:\n 当一个系统收到请求后,分布式追踪系统会为该请求分配一个 TraceID,用于串联起整个调用链; 分布式追踪系统会为该请求在系统内的每一次服务调用生成一个 SpanID 和 ParentID,用于记录调用的父子关系,没有 ParentID 的 Span 将作为调用链的入口; 每个服务调用过程中都要传递 TraceID 和 SpanID; 在查看分布式追踪时,通过 TraceID 查询某次请求的全过程;  Istio 如何实现分布式追踪 Istio 中的分布式追踪是基于数据平面中的 Envoy 代理实现的。服务请求在被劫持到 Envoy 中后,Envoy 在转发请求时会附加大量 Header,其中与分布式追踪相关的有:\n 作为 TraceID:x-request-id 用于在 LightStep 追踪系统中建立 Span 的父子关系:x-ot-span-context 用于 Zipkin,同时适用于 Jaeger、SkyWalking,详见 b3-propagation:  x-b3-traceid x-b3-spanid x-b3-parentspanid x-b3-sampled x-b3-flags b3   用于 Datadog:  x-datadog-trace-id x-datadog-parent-id x-datadog-sampling-priority   用于 SkyWalking:sw8 用于 AWS X-Ray:x-amzn-trace-id  关于这些 Header 的详细用法请参考 Envoy 文档 。\nEnvoy 会在 Ingress Gateway 中为你产生用于追踪的 Header,不论你的应用程序使用何种语言开发,Envoy 都会将这些 Header 转发到上游集群。但是,你还要对应用程序代码做一些小的修改,才能为使用分布式追踪功能。这是因为应用程序无法自动传播这些 Header,可以在程序中集成分布式追踪的 Agent,或者在代码中手动传播这些 Header。Envoy 会将追踪数据发送到 tracer 后端处理,然后就可以在 UI 中查看追踪数据了。\n例如在 Bookinfo 应用中的 Productpage 服务,如果你查看它的代码可以发现,其中集成了 Jaeger 客户端库,并在 getForwardHeaders (request) 方法中将 Envoy 生成的 Header 同步给对 Details 和 Reviews 服务的 HTTP 请求:\ndef getForwardHeaders(request): headers = {} # 使用 Jaeger agent 获取 x-b3-* header span = get_current_span() carrier = {} tracer.inject( span_context=span.context, format=Format.HTTP_HEADERS, carrier=carrier) headers.update(carrier) # 手动处理非 x-b3-* header if \u0026#39;user\u0026#39; in session: headers[\u0026#39;end-user\u0026#39;] = session[\u0026#39;user\u0026#39;] incoming_headers = [ \u0026#39;x-request-id\u0026#39;, \u0026#39;x-ot-span-context\u0026#39;, \u0026#39;x-datadog-trace-id\u0026#39;, \u0026#39;x-datadog-parent-id\u0026#39;, \u0026#39;x-datadog-sampling-priority\u0026#39;, \u0026#39;traceparent\u0026#39;, \u0026#39;tracestate\u0026#39;, \u0026#39;x-cloud-trace-context\u0026#39;, \u0026#39;grpc-trace-bin\u0026#39;, \u0026#39;sw8\u0026#39;, \u0026#39;user-agent\u0026#39;, \u0026#39;cookie\u0026#39;, \u0026#39;authorization\u0026#39;, \u0026#39;jwt\u0026#39;, ] for ihdr in incoming_headers: val = request.headers.get(ihdr) if val is not None: headers[ihdr] = val return headers 关于 Istio 中分布式追踪的常见问题请见 Istio 文档 。\n分布式追踪系统如何选择 分布式追踪系统的原理类似,市面上也有很多这样的系统,例如 Apache SkyWalking 、Jaeger 、Zipkin 、LightStep 、Pinpoint 等。我们将选择其中三个,从多个维度进行对比。之所以选择它们是因为:\n 它们是当前最流行的开源分布式追踪系统; 都是基于 OpenTracing 规范; 都支持与 Istio 及 Envoy 集成;     类别 Apache SkyWalking Jaeger Zipkin     实现方式 基于语言的探针、服务网格探针、eBPF agent、第三方指标库(当前支持 Zipkin) 基于语言的探针 基于语言的探针   数据存储 ES、H2、MySQL、TiDB、Sharding-sphere、BanyanDB ES、MySQL、Cassandra、内存 ES、MySQL、Cassandra、内存   支持语言 Java、Rust、PHP、NodeJS、Go、Python、C++、.NET、Lua Java、Go、Python、NodeJS、C#、PHP、Ruby、C++ Java、Go、Python、NodeJS、C#、PHP、Ruby、C++   发起者 个人 Uber Twitter   治理方式 Apache Foundation CNCF CNCF   版本 9.3.0 1.39.0 2.23.19   Star 数量 20.9k 16.8k 15.8k    分布式追踪系统对比表(数据截止时间 2022-12-07)\n虽然 Apache SkyWalking 的 Agent 支持的语言没有 Jaeger 和 Zipkin 多,但是 SkyWalking 的实现方式更丰富,并且与 Jaeger、Zipkin 的追踪数据兼容,开发更为活跃,且为国人开发,中文资料丰富,是构建遥测平台的最佳选择之一。\n实验 参考 Istio 文档 来安装和配置 Apache SkyWalking。\n环境说明 以下是我们实验的环境:\n Kubernetes 1.24.5 Istio 1.16 SkyWalking 9.1.0  安装 Istio 安装之前可以先检查下环境是否有问题:\n$ istioctl experimental precheck ✔ No issues found when checking the cluster. Istio is safe to install or upgrade! To get started, check out https://istio.io/latest/docs/setup/getting-started/ 然后安装 Istio 同时配置发送追踪信息的目的地为 SkyWalking:\n# 初始化 Istio Operator istioctl operator init # 安装 Istio 并配置使用 SkyWalking kubectl apply -f - \u0026lt;\u0026lt;EOF apiVersion: install.istio.io/v1alpha1 kind: IstioOperator metadata: namespace: istio-system name: istio-with-skywalking spec: meshConfig: defaultProviders: tracing: - \u0026#34;skywalking\u0026#34; enableTracing: true extensionProviders: - name: \u0026#34;skywalking\u0026#34; skywalking: service: tracing.istio-system.svc.cluster.local port: 11800 EOF 部署 Apache SkyWalking Istio 1.16 支持使用 Apache SkyWalking 进行分布式追踪,执行下面的代码安装 SkyWalking:\nkubectl apply -f https://raw.githubusercontent.com/istio/istio/release-1.16/samples/addons/extras/skywalking.yaml 它将在 istio-system 命名空间下安装:\n SkyWalking OAP (Observability Analysis Platform) :用于接收追踪数据,支持 SkyWalking 原生数据格式,Zipkin v1 和 v2 以及 Jaeger 格式。 UI :用于查询分布式追踪数据。  关于 SkyWalking 的详细信息请参考 SkyWalking 文档 。\n部署 Bookinfo 应用 执行下面的命令安装 bookinfo 示例:\nkubectl label namespace default istio-injection=enabled kubectl apply -f samples/bookinfo/platform/kube/bookinfo.yaml kubectl apply -f samples/bookinfo/networking/bookinfo-gateway.yaml 打开 SkyWalking UI:\nistioctl dashboard skywalking SkyWalking 的 General Service 页面展示了 bookinfo 应用中的所有服务。\n你还可以看到实例、端点、拓扑、追踪等信息。例如下图展示了 bookinfo 应用的服务拓扑。\nSkyWalking 的追踪视图有多种显示形式,如列表、树形、表格和统计。\nSkyWalking 通用服务追踪支持多种显示样式\n为了方便我们检查,将追踪的采样率设置为 100%:\nkubectl apply -f - \u0026lt;\u0026lt;EOF apiVersion: telemetry.istio.io/v1alpha1 kind: Telemetry metadata: name: mesh-default namespace: istio-system spec: tracing: - randomSamplingPercentage: 100.00 EOF 卸载 在实验完后,执行下面的命令卸载 Istio 和 SkyWalking:\nsamples/bookinfo/platform/kube/cleanup.sh istioctl unintall --purge kubectl delete namespace istio-system Bookinfo demo 追踪信息说明 在 Apache SkyWalking UI 中导航到 General Service 分页,查看最近的 istio-ingressgateway 服务的追踪信息,表视图如下所示。图中展示了此次请求所有 Span 的基本信息,点击每个 Span 可以查看详细信息。\n切换为列表视图,可以看到每个 Span 的执行顺序及持续时间,如下图所示。\n你可能会感到困惑,为什么这么简单的一个应用会产生如此多的 Span 信息?因为我们为 Pod 注入了 Envoy 代理之后,每个服务间的请求都会被 Envoy 拦截和处理,如下图所示。\n整个追踪流程如下图所示。\n图中给每一个 Span 标记了序号,并在括号里注明了耗时。为了便于说明我们将所有 Span 汇总在下面的表格中。\n   序号 方法 总耗时(ms) 组件耗时(ms) 当前服务 说明     1 /productpage 190 0 istio-ingressgateway Envoy Outbound   2 /productpage 190 1 istio-ingressgateway Ingress -\u0026gt; Productpage 网络传输   3 /productpage 189 1 productpage Envoy Inbound   4 /productpage 188 21 productpage 应用内部处理   5 /details/0 8 1 productpage Envoy Outbound   6 /details/0 7 3 productpage Productpage -\u0026gt; Details 网络传输   7 /details/0 4 0 details Envoy Inbound   8 /details/0 4 4 details 应用内部   9 /reviews/0 159 0 productpage Envoy Outbound   10 /reviews/0 159 14 productpage Productpage -\u0026gt; Reviews 网络传输   11 /reviews/0 145 1 reviews Envoy Inbound   12 /reviews/0 144 109 reviews 应用内部处理   13 /ratings/0 35 2 reviews Envoy Outbound   14 /ratings/0 33 16 reviews Reviews -\u0026gt; Ratings 网络传输   15 /ratings/0 17 1 ratings Envoy Inbound   16 /ratings/0 16 16 ratings 应用内部处理    从以上信息可以发现:\n 本次请求总耗时 190ms; 在 Istio sidecar 模式下,每次流量在进出应用容器时都需要经过一次 Envoy 代理,每次耗时在 0 到 2 ms; 在 Pod 间的网络请求耗时在 1 到 16ms 之间; 将耗时做多的调用链 Ingress Gateway -\u0026gt; Productpage -\u0026gt; Reviews -\u0026gt; Ratings 上的所有耗时累计 182 ms,小于请求总耗时 190ms,这是因为数据本身有误差,以及 Span 的开始时间并不一定等于父 Span 的结束时间,如果你在 SkyWalking 的追踪页面,选择「列表」样式查看追踪数据(见图 2)可以更直观的发现这个问题; 我们可以查看到最耗时的部分是 Reviews 应用,耗时 109ms,因此我们可以针对该应用进行优化;  总结 只要对应用代码稍作修改就可以在 Istio 很方便的使用分布式追踪功能。在 Istio 支持的众多分布式追踪系统中,Apache SkyWalking 是其中的佼佼者。它不仅支持分布式追踪,还支持指标和日志收集、报警、Kubernetes 和服务网格监控,使用 eBPF 诊断服务网格性能 等功能,是一个功能完备的云原生应用分析平台。本文中为了方便演示,将追踪采样率设置为了 100%,在生产使用时请根据需要调整采样策略(采样百分比),防止产生过多的追踪日志。\n 如果您不熟悉服务网格和 Kubernetes 安全性,我们在 Tetrate Academy 提供了一系列免费在线课程,可以让您快速了解 Istio 和 Envoy。\n如果您正在寻找一种快速将 Istio 投入生产的方法,请查看 Tetrate Istio Distribution (TID)。TID 是 Tetrate 的强化、完全上游的 Istio 发行版,具有经过 FIPS 验证的构建和支持。这是开始使用 Istio 的好方法,因为您知道您有一个值得信赖的发行版,有一个支持您的专家团队,并且如果需要,还可以选择快速获得 FIPS 合规性。\n一旦启动并运行 Istio,您可能需要更简单的方法来管理和保护您的服务,而不仅仅是 Istio 中可用的方法,这就是 Tetrate Service Bridge 的用武之地。您可以在这里详细了解 Tetrate Service Bridge 如何使服务网格更安全、更易于管理和弹性,或联系我们进行快速演示。\n","title":"如何在 Istio 中使用 SkyWalking 进行分布式追踪?","url":"/zh/how-to-use-skywalking-for-distributed-tracing-in-istio/"},{"content":"Introduction Apache SkyWalking is an open source APM tool for monitoring and troubleshooting distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. It provides distributed tracing, service mesh observability, metric aggregation and visualization, and alarm.\nIn this article, I will introduce how to quickly set up Apache SkyWalking on AWS EKS and RDS/Aurora, as well as a couple of sample services, monitoring services to observe SkyWalking itself.\nPrerequisites  AWS account AWS CLI Terraform kubectl  We can use the AWS web console or CLI to create all resources needed in this tutorial, but it can be too tedious and hard to debug when something goes wrong. So in this artical I will use Terraform to create all AWS resources, deploy SkyWalking, sample services, and load generator services (Locust).\nArchitecture The demo architecture is as follows:\ngraph LR subgraph AWS subgraph EKS subgraph istio-system namespace direction TB OAP[[SkyWalking OAP]] UI[[SkyWalking UI]] Istio[[istiod]] end subgraph sample namespace Service0[[Service0]] Service1[[Service1]] ServiceN[[Service ...]] end subgraph locust namespace LocustMaster[[Locust Master]] LocustWorkers0[[Locust Worker 0]] LocustWorkers1[[Locust Worker 1]] LocustWorkersN[[Locust Worker ...]] end end RDS[[RDS/Aurora]] end OAP --\u0026gt; RDS Service0 -. telemetry data -.-\u0026gt; OAP Service1 -. telemetry data -.-\u0026gt; OAP ServiceN -. telemetry data -.-\u0026gt; OAP UI --query--\u0026gt; OAP LocustWorkers0 -- traffic --\u0026gt; Service0 LocustWorkers1 -- traffic --\u0026gt; Service0 LocustWorkersN -- traffic --\u0026gt; Service0 Service0 --\u0026gt; Service1 --\u0026gt; ServiceN LocustMaster --\u0026gt; LocustWorkers0 LocustMaster --\u0026gt; LocustWorkers1 LocustMaster --\u0026gt; LocustWorkersN User --\u0026gt; LocustMaster As shown in the architecture diagram, we need to create the following AWS resources:\n EKS cluster RDS instance or Aurora cluster  Sounds simple, but there are a lot of things behind the scenes, such as VPC, subnets, security groups, etc. You have to configure them correctly to make sure the EKS cluster can connect to RDS instance/Aurora cluster otherwise the SkyWalking won\u0026rsquo;t work. Luckily, Terraform can help us to create and destroy all these resources automatically.\nI have created a Terraform module to create all AWS resources needed in this tutorial, you can find it in the GitHub repository.\nCreate AWS resources First, we need to clone the GitHub repository and cd into the folder:\ngit clone https://github.com/kezhenxu94/oap-load-test.git Then, we need to create a file named terraform.tfvars to specify the AWS region and other variables:\ncat \u0026gt; terraform.tfvars \u0026lt;\u0026lt;EOF aws_access_key = \u0026#34;\u0026#34; aws_secret_key = \u0026#34;\u0026#34; cluster_name = \u0026#34;skywalking-on-aws\u0026#34; region = \u0026#34;ap-east-1\u0026#34; db_type = \u0026#34;rds-postgresql\u0026#34; EOF If you have already configured the AWS CLI, you can skip the aws_access_key and aws_secret_key variables. To install SkyWalking with RDS postgresql, set the db_type to rds-postgresql, to install SkyWalking with Aurora postgresql, set the db_type to aurora-postgresql.\nThere are a lot of other variables you can configure, such as tags, sample services count, replicas, etc., you can find them in the variables.tf.\nThen, we can run the following commands to initialize the Terraform module and download the required providers, then create all AWS resources:\nterraform init terraform apply -var-file=terraform.tfvars Type yes to confirm the creation of all AWS resources, or add the -auto-approve flag to the terraform apply to skip the confirmation:\nterraform apply -var-file=terraform.tfvars -auto-approve Now what you need to do is to wait for the creation of all AWS resources to complete, it may take a few minutes. You can check the progress of the creation in the AWS web console, and check the deployment progress of the services inside the EKS cluster.\nGenerate traffic Besides creating necessary AWS resources, the Terraform module also deploys SkyWalking, sample services, and Locust load generator services to the EKS cluster.\nYou can access the Locust web UI to generate traffic to the sample services:\nopen http://$(kubectl get svc -n locust -l app=locust-master -o jsonpath=\u0026#39;{.items[0].status.loadBalancer.ingress[0].hostname}\u0026#39;):8089 The command opens the browser to the Locust web UI, you can configure the number of users and hatch rate to generate traffic.\nObserve SkyWalking You can access the SkyWalking web UI to observe the sample services.\nFirst you need to forward the SkyWalking UI port to local\nkubectl -n istio-system port-forward $(kubectl -n istio-system get pod -l app=skywalking -l component=ui -o name) 8080:8080 And then open the browser to http://localhost:8080 to access the SkyWalking web UI.\nObserve RDS/Aurora You can also access the RDS/Aurora web console to observe the performance of RDS/Aurora instance/Aurora cluste.\nTest Results Test 1: SkyWalking with EKS and RDS PostgreSQL Service Traffic RDS Performance SkyWalking Performance Test 2: SkyWalking with EKS and Aurora PostgreSQL Service Traffic RDS Performance SkyWalking Performance Clean up When you are done with the demo, you can run the following command to destroy all AWS resources:\nterraform destroy -var-file=terraform.tfvars -auto-approve ","title":"How to run Apache SkyWalking on AWS EKS and RDS/Aurora","url":"/blog/2022-12-13-how-to-run-apache-skywalking-on-aws-eks-rds/"},{"content":"介绍 Apache SkyWalking 是一个开源的 APM 工具,用于监控分布式系统和排除故障,特别是为微服务、云原生和基于容器(Docker、Kubernetes、Mesos)的架构而设计。它提供分布式跟踪、服务网格可观测性、指标聚合和可视化以及警报。\n在本文中,我将介绍如何在 AWS EKS 和 RDS/Aurora 上快速设置 Apache SkyWalking,以及几个示例服务,监控服务以观察 SkyWalking 本身。\n先决条件  AWS 账号 AWS CLI Terraform kubectl  我们可以使用 AWS Web 控制台或 CLI 来创建本教程所需的所有资源,但是当出现问题时,它可能过于繁琐且难以调试。因此,在本文中,我将使用 Terraform 创建所有 AWS 资源、部署 SkyWalking、示例服务和负载生成器服务 (Locust)。\n架构 演示架构如下:\ngraph LR subgraph AWS subgraph EKS subgraph istio-system namespace direction TB OAP[[SkyWalking OAP]] UI[[SkyWalking UI]] Istio[[istiod]] end subgraph sample namespace Service0[[Service0]] Service1[[Service1]] ServiceN[[Service ...]] end subgraph locust namespace LocustMaster[[Locust Master]] LocustWorkers0[[Locust Worker 0]] LocustWorkers1[[Locust Worker 1]] LocustWorkersN[[Locust Worker ...]] end end RDS[[RDS/Aurora]] end OAP --\u0026gt; RDS Service0 -. telemetry data -.-\u0026gt; OAP Service1 -. telemetry data -.-\u0026gt; OAP ServiceN -. telemetry data -.-\u0026gt; OAP UI --query--\u0026gt; OAP LocustWorkers0 -- traffic --\u0026gt; Service0 LocustWorkers1 -- traffic --\u0026gt; Service0 LocustWorkersN -- traffic --\u0026gt; Service0 Service0 --\u0026gt; Service1 --\u0026gt; ServiceN LocustMaster --\u0026gt; LocustWorkers0 LocustMaster --\u0026gt; LocustWorkers1 LocustMaster --\u0026gt; LocustWorkersN User --\u0026gt; LocustMaster 如架构图所示,我们需要创建以下 AWS 资源:\n EKS 集群 RDS 实例或 Aurora 集群  听起来很简单,但背后有很多东西,比如 VPC、子网、安全组等。你必须正确配置它们以确保 EKS 集群可以连接到 RDS 实例 / Aurora 集群,否则 SkyWalking 不会不工作。幸运的是,Terraform 可以帮助我们自动创建和销毁所有这些资源。\n我创建了一个 Terraform 模块来创建本教程所需的所有 AWS 资源,您可以在 GitHub 存储库中找到它。\n创建 AWS 资源 首先,我们需要将 GitHub 存储库克隆 cd 到文件夹中:\ngit clone https://github.com/kezhenxu94/oap-load-test.git 然后,我们需要创建一个文件 terraform.tfvars 来指定 AWS 区域和其他变量:\ncat \u0026gt; terraform.tfvars \u0026lt;\u0026lt;EOF aws_access_key = \u0026#34;\u0026#34; aws_secret_key = \u0026#34;\u0026#34; cluster_name = \u0026#34;skywalking-on-aws\u0026#34; region = \u0026#34;ap-east-1\u0026#34; db_type = \u0026#34;rds-postgresql\u0026#34; EOF 如果您已经配置了 AWS CLI,则可以跳过 aws_access_key 和 aws_secret_key 变量。要使用 RDS postgresql 安装 SkyWalking,请将 db_type 设置为 rds-postgresql,要使用 Aurora postgresql 安装 SkyWalking,请将 db_type 设置为 aurora-postgresql。\n您可以配置许多其他变量,例如标签、示例服务计数、副本等,您可以在 variables.tf 中找到它们。\n然后,我们可以运行以下命令来初始化 Terraform 模块并下载所需的提供程序,然后创建所有 AWS 资源:\nterraform init terraform apply -var-file=terraform.tfvars 键入 yes 以确认所有 AWS 资源的创建,或将标志 -auto-approve 添加到 terraform apply 以跳过确认:\nterraform apply -var-file=terraform.tfvars -auto-approve 现在你需要做的就是等待所有 AWS 资源的创建完成,这可能需要几分钟的时间。您可以在 AWS Web 控制台查看创建进度,也可以查看 EKS 集群内部服务的部署进度。\n产生流量 除了创建必要的 AWS 资源外,Terraform 模块还将 SkyWalking、示例服务和 Locust 负载生成器服务部署到 EKS 集群。\n您可以访问 Locust Web UI 以生成到示例服务的流量:\nopen http://$(kubectl get svc -n locust -l app=locust-master -o jsonpath=\u0026#39;{.items[0].status.loadBalancer.ingress[0].hostname}\u0026#39;):8089 该命令将浏览器打开到 Locust web UI,您可以配置用户数量和孵化率以生成流量。\n观察 SkyWalking 您可以访问 SkyWalking Web UI 来观察示例服务。\n首先需要将 SkyWalking UI 端口转发到本地:\nkubectl -n istio-system port-forward $(kubectl -n istio-system get pod -l app=skywalking -l component=ui -o name) 8080:8080 然后在浏览器中打开 http://localhost:8080 访问 SkyWalking web UI。\n观察 RDS/Aurora 您也可以访问 RDS/Aurora web 控制台,观察 RDS/Aurora 实例 / Aurora 集群的性能。\n试验结果 测试 1:使用 EKS 和 RDS PostgreSQL 的 SkyWalking 服务流量 RDS 性能 SkyWalking 性能 测试 2:使用 EKS 和 Aurora PostgreSQL 的 SkyWalking 服务流量 RDS 性能 SkyWalking 性能 清理 完成演示后,您可以运行以下命令销毁所有 AWS 资源:\nterraform destroy -var-file=terraform.tfvars -auto-approve ","title":"如何在 AWS EKS 和 RDS/Aurora 上运行 Apache SkyWalking","url":"/zh/2022-12-13-how-to-run-apache-skywalking-on-aws-eks-rds/"},{"content":"As an application performance monitoring tool for distributed systems, Apache SkyWalking observes metrics, logs, traces, and events in the service mesh.\nSkyWalking OAP’s dataflow processing architecture boasts high performance and is capable of dealing with massive data traffic in real-time. However, storing, updating, and querying massive amounts of data poses a great challenge to its backend storage system.\nBy default, SkyWalking provides storage methods including H2, OpenSearch, ElasticSearch, MySQL, TiDB, PostgreSQL, and BanyanDB. Among them, MySQL storage is suited to a single machine and table (MySQL cluster capability depends on your technology selection). Nevertheless, in the context of high-traffic business systems, the storage of monitoring data is put under great pressure and query performance is lowered.\nBased on MySQL storage, SkyWalking v9.3.0 provides a new storage method: MySQL-Sharding. It supports database and table sharding features thanks to ShardingSphere-Proxy, which is a mature solution for dealing with relational databases’ massive amounts of data.\n1. Architecture Deployment  SkyWalking will only interact with ShardingSphere-Proxy instead of directly connecting to the database. The connection exposed by each MySQL node is a data source managed by ShardingSphere-Proxy. ShardingSphere-Proxy will establish a virtual logical database based on the configuration and then carry out database and table sharding and routing according to the OAP provided data sharding rules. SkyWalking OAP creates data sharding rules and performs DDL and DML on a virtual logical database just like it does with MySQL.  2. Application Scenario Applicable to scenarios where MySQL is used for storage, but the single-table mode cannot meet the performance requirements created by business growth.\n3. How Does Data Sharding Work with SkyWalking? Data sharding defines the data Model in SkyWalking with the annotation @SQLDatabase.Sharding.\n@interface Sharding { ShardingAlgorithm shardingAlgorithm(); String dataSourceShardingColumn() default \u0026#34;\u0026#34;; String tableShardingColumn() default \u0026#34;\u0026#34;; } Note:\n shardingAlgorithm: Table sharding algorithm dataSourceShardingColumn: Database sharding key tableShardingColumn: Table sharding key\n SkyWalking selects database sharding key, table sharding key and table sharding algorithm based on @SQLDatabase.Sharding, in order to dynamically generate sharding rules for each table. Next, it performs rule definition by operating ShardingSphere-Proxy via DistSQL. ShardingSphere-Proxy carries out data sharding based on the rule definition.\n3.1 Database Sharding Method SkyWalking adopts a unified method to carry out database sharding. The number of databases that need to be sharded requires modulo by the hash value of the database sharding key, which should be the numeric suffix of the routing target database. Therefore, the routing target database is:\nds_{dataSourceShardingColumn.hashcode() % dataSourceList.size()} For example, we now have dataSourceList = ds_0…ds_n. If {dataSourceShardingColumn.hashcode() % dataSourceList.size() = 2}, all the data will be routed to the data source node ds_2.\n3.2 Table Sharding Method The table sharding algorithm mainly shards according to the data owing to the TTL mechanism. According to TTL, there will be one sharding table per day:\n{tableName = logicTableName_timeSeries (data)} To ensure that data within the TTL can be written and queried, the time series will generate the current date:\n{timeSeries = currentDate - TTL +1...currentDate + 1} For example, if TTL=3 and currentDate=20220907, sharding tables will be: logicTableName_20220905 logicTableName_20220906 logicTableName_20220907 logicTableName_20220908\nSkyWalking provides table sharding algorithms for different data models:\n   Algorithm Name Sharding Description Time Precision Requirements for Sharding Key Typical Application Data Model     NO_SHARDING No table sharding and single-table mode is maintained. N/A Data model with a small amount of data and no need for sharding.   TIME_RELATIVE_ID_SHARDING_ALGORITHM Shard by day using time_bucket in the ID column. time_bucket can be accurate to seconds, minutes, hours, or days in the same table. Various metrics.   TIME_SEC_RANGE_SHARDING_ALGORITHM Shard by day using time_bucket column. time_bucket must be accurate to seconds. SegmentRecordLogRecord, etc.   TIME_MIN_RANGE_SHARDING_ALGORITHM Shard by day using time_bucket column. time_bucket must be accurate to minutes. EndpointTraffic   TIME_BUCKET_SHARDING_ALGORITHM Shard by day using time_bucket column. time_bucket can be accurate to seconds, minutes, hours, and days in the same table. Service, Instance, Endpoint and other call relations such as ServiceRelationServerSideMetrics    4. TTL Mechanism   For sharding tables, delete the physical table deadline \u0026gt;= timeSeries according to TTL.\n{deadline = new DateTime().plusDays(-ttl)}   TTL timer will delete the expired tables according to the current date while updating sharding rules according to the new date and informing ShardingSphere-Proxy to create new sharding tables.\n  For a single table, use the previous method and delete the row record of deadline \u0026gt;=time_bucket.\n  5. Examples of Sharding Data Storage Next, we’ll take segment (Record type) and service_resp_time (Metrics type) as examples to illustrate the data storage logic and physical distribution. Here, imagine MySQL has two nodes ds_0 and ds_1.\nNote:\n The following storage table structure is just a simplified version as an example, and does not represent the real SkyWalking table structure.\n 5.1 segment The sharding configuration is as follows:\n@SQLDatabase.Sharding(shardingAlgorithm = ShardingAlgorithm.TIME_SEC_RANGE_SHARDING_ALGORITHM, dataSourceShardingColumn = service_id, tableShardingColumn = time_bucket) The logical database, table structures and actual ones are as follows:\n5.2 service_resp_time The sharding configuration is as follows:\n@SQLDatabase.Sharding(shardingAlgorithm = ShardingAlgorithm.TIME_RELATIVE_ID_SHARDING_ALGORITHM, tableShardingColumn = id, dataSourceShardingColumn = entity_id) The logical database and table structures and actual ones are as follows:\n6. How to Use ShardingSphere-Proxy? 6.1 Manual Deployment Here we take the deployment of a single-node SkyWalking OAP and ShardingSphere-Proxy 5.1.2 as an example. Please refer to the relevant documentation for the cluster deployment.\n Prepare the MySQL cluster. Deploy, install and configure ShardingSphere-Proxy:    conf/server.yaml and props.proxy-hint-enabled must be true. Refer to the link for the complete configuration.\n  conf/config-sharding.yaml configures logical database and dataSources list. The dataSource name must be prefixed with ds_ and start with ds_0. For details about the configuration, please refer to this page.\n   Deploy, install and configure SkyWalking OAP:    Set up OAP environment variables: ${SW_STORAGE:mysql-sharding},\n  Configure the connection information based on the actual deployment: ${SW_JDBC_URL} ${SW_DATA_SOURCE_USER} ${SW_DATA_SOURCE_PASSWORD}\n  Note:\n Connection information must correspond to ShardingSphere-Proxy virtual database.\n Configure the data source name configured by conf/config-sharding.yaml in ShardingSphere-Proxy to ${SW_JDBC_SHARDING_DATA_SOURCES} and separate names with commas.   Start the MySQL cluster. Start ShardingSphere-Proxy. Start SkyWalking OAP.  6.2 Running Demo with Docker Our GitHub repository provides a complete and operational demo based on Docker, allowing you to quickly grasp the operation’s effectiveness. The deployment includes the following:\n One OAP service. The TTL of Metrics and Record data set to 2 days. One sharding-proxy service with version 5.1.2. Its external port is 13307 and the logical database name is swtest. Two MySQL services. Their external ports are 3306 and 3307 respectively and they are configured as ds_0 and ds_1 in sharding-proxy’s conf/config-sharding.yaml. One provider service (simulated business programs used to verify trace and metrics and other data). Its external port is 9090. One consumer service (simulated business programs used to verify trace and metrics and other data). Its external port is 9092.  Download the demo program locally and run it directly in the directory skywalking-mysql-sharding-demo.\ndocker-compose up -d Note:\n The first startup may take some time to pull images and create all the tables.\n Once all the services are started, database tools can be used to check the creation of sharding-proxy logical tables and the actual physical sharding table in the two MySQL databases. Additionally, you can also connect the sharding-proxy logical database to view the data query routing. For example:\nPREVIEW SELECT * FROM SEGMENT The result is as follows:\nThe simulated business program provided by the demo can simulate business requests by requesting the consumer service to verify various types of data distribution:\ncurl http://127.0.0.1:9092/info 7. Conclusion In this blog, we introduced SkyWalking’s new storage feature, MySQL sharding, which leverage ShardingSphere-Proxy and covered details of its deployment architecture, application scenarios, sharding logic, and TTL mechanism. We’ve also provided sample data and deployment steps to help get started.\nSkyWalking offers a variety of storage options to fit many use cases. If you need a solution to store large volumes of telemetry data in a relational database, the new MySQL sharding feature is worth a look. For more information on the SkyWalking 9.3.0 release and where to get it, check out the release notes.\n","title":"SkyWalking's New Storage Feature Based on ShardingSphere-Proxy: MySQL-Sharding","url":"/blog/skywalkings-new-storage-feature-based-on-shardingsphere-proxy-mysql-sharding/"},{"content":"SkyWalking NodeJS 0.6.0 is released. Go to downloads page to find release tars.\n Add missing build doc by @kezhenxu94 in https://github.com/apache/skywalking-nodejs/pull/92 Fix invalid url error in axios plugin by @kezhenxu94 in https://github.com/apache/skywalking-nodejs/pull/93 Ignore no requests if ignoreSuffix is empty by @michaelzangl in https://github.com/apache/skywalking-nodejs/pull/94 Escape HTTP method in regexp by @michaelzangl in https://github.com/apache/skywalking-nodejs/pull/95 docs: grammar improvements by @BFergerson in https://github.com/apache/skywalking-nodejs/pull/97 fix: entry span url in endponts using Express middleware/router objects by @BFergerson in https://github.com/apache/skywalking-nodejs/pull/96 chore: use openapi format for endpoint uris by @BFergerson in https://github.com/apache/skywalking-nodejs/pull/98 AWS DynamoDB, Lambda, SQS and SNS plugins, webpack by @tom-pytel in https://github.com/apache/skywalking-nodejs/pull/100 Fix nits by @wu-sheng in https://github.com/apache/skywalking-nodejs/pull/101 Update AxiosPlugin for v1.0+ by @tom-pytel in https://github.com/apache/skywalking-nodejs/pull/102  ","title":"Release Apache SkyWalking for NodeJS 0.6.0","url":"/events/release-apache-skywalking-nodejs-0-6-0/"},{"content":"SkyWalking 9.3.0 is released. Go to downloads page to find release tars.\nMetrics Association    Dashboard Pop-up Trace Query          APISIX Dashboard Use Sharding MySQL as the Database Virtual Cache Performance Virtual MQ Performance Project  Bump up the embedded swctl version in OAP Docker image.  OAP Server  Add component ID(133) for impala JDBC Java agent plugin and component ID(134) for impala server. Use prepareStatement in H2SQLExecutor#getByIDs.(No function change). Bump up snakeyaml to 1.32 for fixing CVE. Fix DurationUtils.convertToTimeBucket missed verify date format. Enhance LAL to support converting LogData to DatabaseSlowStatement. [Breaking Change] Change the LAL script format(Add layer property). Adapt ElasticSearch 8.1+, migrate from removed APIs to recommended APIs. Support monitoring MySQL slow SQLs. Support analyzing cache related spans to provide metrics and slow commands for cache services from client side Optimize virtual database, fix dynamic config watcher NPE when default value is null Remove physical index existing check and keep template existing check only to avoid meaningless retry wait in no-init mode. Make sure instance list ordered in TTL processor to avoid TTL timer never runs. Support monitoring PostgreSQL slow SQLs. [Breaking Change] Support sharding MySQL database instances and tables by Shardingsphere-Proxy. SQL-Database requires removing tables log_tag/segment_tag/zipkin_query before OAP starts, if bump up from previous releases. Fix meter functions avgHistogram, avgHistogramPercentile, avgLabeled, sumHistogram having data conflict when downsampling. Do sorting readLabeledMetricsValues result forcedly in case the storage(database) doesn\u0026rsquo;t return data consistent with the parameter list. Fix the wrong watch semantics in Kubernetes watchers, which causes heavy traffic to API server in some Kubernetes clusters, we should use Get State and Start at Most Recent semantic instead of Start at Exact because we don\u0026rsquo;t need the changing history events, see https://kubernetes.io/docs/reference/using-api/api-concepts/#semantics-for-watch. Unify query services and DAOs codes time range condition to Duration. [Breaking Change]: Remove prometheus-fetcher plugin, please use OpenTelemetry to scrape Prometheus metrics and set up SkyWalking OpenTelemetry receiver instead. BugFix: histogram metrics sent to MAL should be treated as OpenTelemetry style, not Prometheus style: (-infinity, explicit_bounds[i]] for i == 0 (explicit_bounds[i-1], explicit_bounds[i]] for 0 \u0026lt; i \u0026lt; size(explicit_bounds) (explicit_bounds[i-1], +infinity) for i == size(explicit_bounds)  Support Golang runtime metrics analysis. Add APISIX metrics monitoring Support skywalking-client-js report empty service version and page path , set default version as latest and default page path as /(root). Fix the error fetching data (/browser_app_page_pv0) : Can't split endpoint id into 2 parts. [Breaking Change] Limit the max length of trace/log/alarm tag\u0026rsquo;s key=value, set the max length of column tags in tableslog_tag/segment_tag/alarm_record_tag and column query in zipkin_query and column tag_value in tag_autocomplete to 256. SQL-Database requires altering these columns' length or removing these tables before OAP starts, if bump up from previous releases. Optimize the creation conditions of profiling task. Lazy load the Kubernetes metadata and switch from event-driven to polling. Previously we set up watchers to watch the Kubernetes metadata changes, this is perfect when there are deployments changes and SkyWalking can react to the changes in real time. However when the cluster has many events (such as in large cluster or some special Kubernetes engine like OpenShift), the requests sent from SkyWalking becomes unpredictable, i.e. SkyWalking might send massive requests to Kubernetes API server, causing heavy load to the API server. This PR switches from the watcher mechanism to polling mechanism, SkyWalking polls the metadata in a specified interval, so that the requests sent to API server is predictable (~10 requests every interval, 3 minutes), and the requests count is constant regardless of the cluster\u0026rsquo;s changes. However with this change SkyWalking can\u0026rsquo;t react to the cluster changes in time, but the delay is acceptable in our case. Optimize the query time of tasks in ProfileTaskCache. Fix metrics was put into wrong slot of the window in the alerting kernel. Support sumPerMinLabeled in MAL. Bump up jackson databind, snakeyaml, grpc dependencies. Support export Trace and Log through Kafka. Add new config initialization mechanism of module provider. This is a ModuleManager lib kernel level change. [Breaking Change] Support new records query protocol, rename the column named service_id to entity_id for support difference entity. Please re-create top_n_database_statement index/table. Remove improper self-obs metrics in JvmMetricsHandler(for Kafka channel). gRPC stream canceling code is not logged as an error when the client cancels the stream. The client cancels the stream when the pod is terminated. [Breaking Change] Change the way of loading MAL rules(support pattern). Move k8s relative MAL files into /otel-rules/k8s. [Breaking Change] Refactor service mesh protobuf definitions and split TCP-related metrics to individual definition. Add TCP{Service,ServiceInstance,ServiceRelation,ServiceInstanceRelation} sources and split TCP-related entities out from original Service,ServiceInstance,ServiceRelation,ServiceInstanceRelation. [Breaking Change] TCP-related source names are changed, fields of TCP-related sources are changed, please refer to the latest oal/tcp.oal file. Do not log error logs when failed to create ElasticSearch index because the index is created already. Add virtual MQ analysis for native traces. Support Python runtime metrics analysis. Support sampledTrace in LAL. Support multiple rules with different names under the same layer of LAL script. (Optimization) Reduce the buffer size(queue) of MAL(only) metric streams. Set L1 queue size as 1/20, L2 queue size as 1/2. Support monitoring MySQL/PostgreSQL in the cluster mode. [Breaking Change] Migrate to BanyanDB v0.2.0.  Adopt new OR logical operator for,  MeasureIDs query BanyanDBProfileThreadSnapshotQueryDAO query Multiple Event conditions query Metrics query   Simplify Group check and creation Partially apply UITemplate changes Support index_only Return CompletableFuture\u0026lt;Void\u0026gt; directly from BanyanDB client Optimize data binary parse methods in *LogQueryDAO Support different indexType Support configuration for TTL and (block|segment) intervals   Elasticsearch storage: Provide system environment variable(SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS) and support specify the settings (number_of_shards/number_of_replicas) for each index individually. Elasticsearch storage: Support update index settings (number_of_shards/number_of_replicas) for the index template after rebooting. Optimize MQ Topology analysis. Use entry span\u0026rsquo;s peer from the consumer side as source service when no producer instrumentation(no cross-process reference). Refactor JDBC storage implementations to reuse logics. Fix ClassCastException in LoggingConfigWatcher. Support span attached event concept in Zipkin and SkyWalking trace query. Support span attached events on Zipkin lens UI. Force UTF-8 encoding in JsonLogHandler of kafka-fetcher-plugin. Fix max length to 512 of entity, instance and endpoint IDs in trace, log, profiling, topN tables(JDBC storages). The value was 200 by default. Add component IDs(135, 136, 137) for EventMesh server and client-side plugins. Bump up Kafka client to 2.8.1 to fix CVE-2021-38153. Remove lengthEnvVariable for Column as it never works as expected. Add LongText to support longer logs persistent as a text type in ElasticSearch, instead of a keyword, to avoid length limitation. Fix wrong system variable name SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI. It was opaenapi. Fix not-time-series model blocking OAP boots in no-init mode. Fix ShardingTopologyQueryDAO.loadServiceRelationsDetectedAtServerSide invoke backend miss parameter serviceIds. Changed system variable SW_SUPERDATASET_STORAGE_DAY_STEP to SW_STORAGE_ES_SUPER_DATASET_DAY_STEP to be consistent with other ES storage related variables. Fix ESEventQueryDAO missing metric_table boolQuery criteria. Add default entity name(_blank) if absent to avoid NPE in the decoding. This caused Can't split xxx id into 2 parts. Support dynamic config the sampling strategy in network profiling. Zipkin module support BanyanDB storage. Zipkin traces query API, sort the result set by start time by default. Enhance the cache mechanism in the metric persistent process.  This cache only worked when the metric is accessible(readable) from the database. Once the insert execution is delayed due to the scale, the cache loses efficacy. It only works for the last time update per minute, considering our 25s period. Fix ID conflicts for all JDBC storage implementations. Due to the insert delay, the JDBC storage implementation would still generate another new insert statement.   [Breaking Change] Remove core/default/enableDatabaseSession config. [Breaking Change] Add @BanyanDB.TimestampColumn to identify which column in Record is providing the timestamp(milliseconds) for BanyanDB, since BanyanDB stream requires a timestamp in milliseconds. For SQL-Database: add new column timestamp for tables profile_task_log/top_n_database_statement, requires altering this column or removing these tables before OAP starts, if bump up from previous releases. Fix Elasticsearch storage: In No-Sharding Mode, add specific analyzer to the template before index creation to avoid update index error. Internal API: remove undocumented ElasticSearch API usage and use documented one. Fix BanyanDB.ShardingKey annotation missed in the generated OAL metrics classes. Fix Elasticsearch storage: Query sortMetrics missing transform real index column name. Rename BanyanDB.ShardingKey to BanyanDB.SeriesID. Self-Observability: Add counters for metrics reading from DB or cached. Dashboard:Metrics Persistent Cache Count. Self-Observability: Fix GC Time calculation. Fix Elasticsearch storage: In No-Sharding Mode, column\u0026rsquo;s property indexOnly not applied and cannot be updated. Update the trace_id field as storage only(cannot be queried) in top_n_database_statement, top_n_cache_read_command, top_n_cache_read_command index.  UI  Fix: tab active incorrectly, when click tab space Add impala icon for impala JDBC Java agent plugin. (Webapp)Bump up snakeyaml to 1.31 for fixing CVE-2022-25857 [Breaking Change]: migrate from Spring Web to Armeria, now you should use the environment variable name SW_OAP_ADDRESS to change the OAP backend service addresses, like SW_OAP_ADDRESS=localhost:12800,localhost:12801, and use environment variable SW_SERVER_PORT to change the port. Other Spring-related configurations don\u0026rsquo;t take effect anymore. Polish the endpoint list graph. Fix styles for an adaptive height. Fix setting up a new time range after clicking the refresh button. Enhance the process topology graph to support dragging nodes. UI-template: Fix metrics calculation in general-service/mesh-service/faas-function top-list dashboard. Update MySQL dashboard to visualize collected slow SQLs. Add virtual cache dashboard. Remove responseCode fields of all OAL sources, as well as examples to avoid user\u0026rsquo;s confusion. Remove All from the endpoints selector. Enhance menu configurations to make it easier to change. Update PostgreSQL dashboard to visualize collected slow SQLs. Add Golang runtime metrics and cpu/memory used rate panels in General-Instance dashboard. Add gateway apisix menu. Query logs with the specific service ID. Bump d3-color from 3.0.1 to 3.1.0. Add Golang runtime metrics and cpu/memory used rate panels in FaaS-Instance dashboard. Revert logs on trace widget. Add a sub-menu for virtual mq. Add readRecords to metric types. Verify dashboard names for new dashboards. Associate metrics with the trace widget on dashboards. Fix configuration panel styles. Remove a un-use icon. Support labeled value on the service/instance/endpoint list widgets. Add menu for virtual MQ. Set selector props and update configuration panel styles. Add Python runtime metrics and cpu/memory utilization panels to General-Instance and Fass-Instance dashboards. Enhance the legend of metrics graph widget with the summary table. Add apache eventMesh logo file. Fix conditions for trace profiling. Fix tag keys list and duration condition. Fix typo. Fix condition logic for trace tree data. Enhance tags component to search tags with the input value. Fix topology loading style. Fix update metric processor for the readRecords and remove readSampledRecords from metrics selector. Add trace association for FAAS dashboards. Visualize attached events on the trace widget. Add HTTP/1.x metrics and HTTP req/resp body collecting tabs on the network profiling widget. Implement creating tasks ui for network profiling widget. Fix entity types for ProcessRelation. Add trace association for general service dashboards.  Documentation  Add metadata-uid setup doc about Kubernetes coordinator in the cluster management. Add a doc for adding menus to booster UI. Move general good read blogs from Agent Introduction to Academy. Add re-post for blog Scaling with Apache SkyWalking in the academy list. Add re-post for blog Diagnose Service Mesh Network Performance with eBPF in the academy list. Add Security Notice doc. Add new docs for Report Span Attached Events data collecting protocol. Add new docs for Record query protocol Update Server Agents and Compatibility for PHP agent. Add docs for profiling. Update the network profiling documentation.  All issues and pull requests are here\n","title":"Release Apache SkyWalking APM 9.3.0","url":"/events/release-apache-skywalking-apm-9.3.0/"},{"content":"Apache SkyWalking 作为一个分布式系统的应用性能监控工具,它观察服务网格中的指标、日志、痕迹和事件。其中 SkyWalking OAP 高性能的数据流处理架构能够实时处理庞大的数据流量,但是这些海量数据的存储更新和后续查询对后端存储系统带来了挑战。\nSkyWalking 默认已经提供了多种存储支持包括 H2、OpenSearch、ElasticSearch、MySQL、TiDB、PostgreSQL、BanyanDB。其中 MySQL 存储提供的是针对单机和单表的存储方式(MySQL 的集群能力需要自己选型提供),在面对高流量的业务系统时,监控数据的存储存在较大压力,同时影响查询性能。\n在 MySQL 存储基础上 SkyWalking v9.3.0 提供了一种新的存储方式 MySQL-Sharding,它提供了基于 ShardingSphere-Proxy 的分库分表特性,而分库分表是关系型数据库面对大数据量处理的成熟解决方案。\n部署架构 SkyWalking 使用 ShardingSphere-Proxy 的部署方式如下图所示。\n SkyWalking OAP 由直连数据库的方式变成只与 ShardingSphere-Proxy 进行交互; 每一个 MySQL 节点暴露的连接都是一个数据源,由 ShardingSphere-Proxy 进行统一管理; ShardingSphere-Proxy 会根据配置建立一个虚拟逻辑数据库,根据 OAP 提供的分库分表规则进行库表分片和路由; SkyWalking OAP 负责生成分库分表规则并且像操作 MySQL 一样对虚拟逻辑库执行 DDL 和 DML;  适用场景 希望使用 MySQL 作为存储,随着业务规模的增长,单表模式已经无法满足性能需要。\nSkyWalking 分库分表逻辑 分库分表逻辑通过注解 @SQLDatabase.Sharding 对 SkyWalking 中的数据模型 Model 进行定义:\n@interface Sharding { ShardingAlgorithm shardingAlgorithm(); String dataSourceShardingColumn() default \u0026#34;\u0026#34;; String tableShardingColumn() default \u0026#34;\u0026#34;; } 其中:\n  shardingAlgorithm:表分片算法\n  dataSourceShardingColumn:分库键\n  tableShardingColumn:分表键\n  SkyWalking 根据注解 @SQLDatabase.Sharding 选择分库键、分表键以及表分片算法对每个表动态生成分片规则通过 DistSQL 操作 Shardingsphere-Proxy 执行规则定义 Shardingsphere-Proxy 根据规则定义进行数据分片。\n分库方式 SkyWalking 对于分库采用统一的方式,路由目标库的数字后缀使用分库键的哈希值取模需要分库的数据库数量,所以路由目标库为:\nds_{dataSourceShardingColumn.hashcode() % dataSourceList.size()} 例如我们有 dataSourceList = ds_0...ds_n,如果\n{dataSourceShardingColumn.hashcode() % dataSourceList.size() = 2} 那么所有数据将会路由到 ds_2 这个数据源节点上。\n分表方式 由于 TTL 机制的存在,分表算法主要根据时间的日期进行分片,分片表的数量是根据 TTL 每天一个表:\n分片表名 = 逻辑表名_时间序列(日期):{tableName =logicTableName_timeSeries}\n为保证在 TTL 有效期内的数据能够被写入和查询,时间序列将生成当前日期\n{timeSeries = currentDate - TTL +1...currentDate + 1} 例如:如果 TTL=3, currentDate = 20220907,则分片表为:\nlogicTableName_20220905 logicTableName_20220906 logicTableName_20220907 logicTableName_20220908 SkyWalking 提供了多种不同的分表算法用于不同的数据模型:\n   算法名称 分片说明 分片键时间精度要求 典型应用数据模型     NO_SHARDING 不做任何表分片,保持单表模式 / 数据量小无需分片的数据模型   TIME_RELATIVE_ID_SHARDING_ALGORITHM 使用 ID 列中的 time_bucket 按天分片 time_bucket 的精度可以是同一表中的秒、分、小时和天 各类 Metrics 指标   TIME_SEC_RANGE_SHARDING_ALGORITHM 使用 time_bucket 列按天分片 time_bucket 的精度必须是秒 SegmentRecordLogRecord 等   TIME_MIN_RANGE_SHARDING_ALGORITHM 使用 time_bucket 列按天分片 time_bucket 的精度必须是分钟 EndpointTraffic   TIME_BUCKET_SHARDING_ALGORITHM 使用 time_bucket 列按天分片 time_bucket 的精度可以是同一个表中的秒、分、小时和天 Service、Instance、Endpoint 调用关系等如 ServiceRelationServerSideMetrics    TTL 机制  对于进行分片的表根据 TTL 直接删除 deadline \u0026gt;= timeSeries 的物理表 {deadline = new DateTime().plusDays(-ttl)} TTL 定时器在根据当前日期删除过期表的同时也会根据新日期更新分片规则,通知 ShardingSphere-Proxy 创建新的分片表 对于单表的延续之前的方式,删除 deadline \u0026gt;= time_bucket 的行记录  分片数据存储示例 下面以 segment(Record 类型)和 service_resp_time(Metrics 类型)两个为例说明数据存储的逻辑和物理分布。这里假设 MySQL 为 ds_0 和 ds_1 两个节点。\n注意:以下的存储表结构仅为简化后的存储示例,不表示 SkyWalking 真实的表结构。\nsegment 分片配置为:\n@SQLDatabase.Sharding(shardingAlgorithm = ShardingAlgorithm.TIME_SEC_RANGE_SHARDING_ALGORITHM, dataSourceShardingColumn = service_id, tableShardingColumn = time_bucket) 逻辑库表结构和实际库表如下图:\nservice_resp_time 分片配置为:\n@SQLDatabase.Sharding(shardingAlgorithm = ShardingAlgorithm.TIME_RELATIVE_ID_SHARDING_ALGORITHM, tableShardingColumn = id, dataSourceShardingColumn = entity_id) 逻辑库表结构和实际库表如下图:\n如何使用 你可以选择手动或使用 Docker 来运行 Demo。\n手动部署 这里以单节点 SkyWalking OAP 和 Shardingsphere-Proxy 5.1.2 部署为例,集群部署请参考其他相关文档。\n  准备好 MySQL 集群\n  部署安装并配置 Shardingsphere-Proxy:\n conf/server.yaml,props.proxy-hint-enabled 必须为 true,完整配置可参考这里。 conf/config-sharding.yaml,配置逻辑数据库和 dataSources 列表,dataSource 的名称必须以 ds_为前缀,并且从 ds_0 开始,完整配置可参考这里。    部署安装并配置 SkyWalking OAP:\n 设置 OAP 环境变量 ${SW_STORAGE:mysql-sharding} 根据实际部署情况配置连接信息: ${SW_JDBC_URL} ${SW_DATA_SOURCE_USER} ${SW_DATA_SOURCE_PASSWORD}  注意:连接信息需对应 Shardingsphere-Proxy 虚拟数据库。\n  将 Shardingsphere-Proxy 中 conf/config-sharding.yaml 配置的数据源名称配置在 ${SW_JDBC_SHARDING_DATA_SOURCES} 中,用 , 分割\n  启动 MySQL 集群\n  启动 Shardingsphere-Proxy\n  启动 SkyWalking OAP\n  使用 Docker 运行 Demo GitHub 资源库提供了一个基于 Docker 完整可运行的 demo:skywalking-mysql-sharding-demo,可以快速尝试实际运行效果。\n其中部署包含:\n oap 服务 1 个,Metrics 和 Record 数据的 TTL 均设为 2 天 sharding-proxy 服务 1 个版本为 5.1.2,对外端口为 13307,创建的逻辑库名称为 swtest mysql 服务 2 个,对外端口分别为 3306,3307,在 sharding-proxy 的 conf/config-sharding.yaml 中配置为 ds_0 和 ds_1 provider 服务 1 个(模拟业务程序用于验证 trace 和 metrics 等数据),对外端口为 9090 consumer 服务 1 个(模拟业务程序用于验证 trace 和 metrics 等数据),对外端口为 9092  将 Demo 程序获取到本地后,在 skywalking-mysql-sharding-demo 目录下直接运行:\ndocker-compose up -d 注意:初次启动由于拉取镜像和新建所有表可能需要一定的时间。\n所有服务启动完成之后可以通过数据库工具查看 sharding-proxy 逻辑表创建情况,以及两个 MySQL 库中实际的物理分片表创建情况。也可以连接 sharding-proxy 逻辑库 swtest 查看数据查询路由情况,如:\nPREVIEW SELECT * FROM SEGMENT 显示结果如下:\nDemo 提供的模拟业务程序可以通过请求 consumer 服务模拟业务请求,用于验证各类型数据分布:\ncurl http://127.0.0.1:9092/info 总结 在这篇文章中我们详细介绍了 SkyWalking 基于 ShardingSphere-Proxy 的 MySQL-Sharding 存储特性的部署架构、适应场景、核心分库分表逻辑以及 TTL 机制,并提供了运行后的数据存储示例和详细部署配置步骤以便大家快速理解上手。SkyWalking 提供了多种存储方式以供选择,如果你目前的需求如本文所述,欢迎使用该新特性。\n","title":"SkyWalking 基于 ShardingSphere-Proxy 的 MySQL-Sharding 分库分表的存储特性介绍","url":"/zh/skywalking-shardingsphere-proxy/"},{"content":"SkyWalking Kubernetes Helm Chart 4.4.0 is released. Go to downloads page to find release tars.\n [Breaking Change]: remove .Values.oap.initEs, there is no need to use this to control whether to run init job anymore, SkyWalking Helm Chart automatically delete the init job when installing/upgrading. [Breaking Change]: remove files/config.d mechanism and use values.yaml files to put the configurations to override default config files in the /skywalking/config folder, using files/config.d is very limited and you have to clone the source codes if you want to use this mechanism, now you can simply use our Docker Helm Chart to install. Refactor oap init job, and support postgresql storage. Upgrade ElasticSearch Helm Chart dependency version.  ","title":"Release Apache SkyWalking Kubernetes Helm Chart 4.4.0","url":"/events/release-apache-skywalking-kubernetes-helm-chart-4.4.0/"},{"content":"SkyWalking PHP 0.2.0 is released. Go to downloads page to find release tars.\nWhat\u0026rsquo;s Changed  Update PECL user by @heyanlong in https://github.com/apache/skywalking-php/pull/12 Start up 0.2.0 by @heyanlong in https://github.com/apache/skywalking-php/pull/13 Update compiling project document. by @jmjoy in https://github.com/apache/skywalking-php/pull/14 Add PDO plugin, and switch unix datagram to stream. by @jmjoy in https://github.com/apache/skywalking-php/pull/15 Update readme about creating issue. by @jmjoy in https://github.com/apache/skywalking-php/pull/17 Fix package.xml role error by @heyanlong in https://github.com/apache/skywalking-php/pull/16 Add swoole support. by @jmjoy in https://github.com/apache/skywalking-php/pull/19 Add .fleet to .gitignore by @heyanlong in https://github.com/apache/skywalking-php/pull/20 [Feature] Add Mysql Improved Extension by @heyanlong in https://github.com/apache/skywalking-php/pull/18 Add predis plugin. by @jmjoy in https://github.com/apache/skywalking-php/pull/21 Take care of PDO false and DSN tailing semicolons. by @phanalpha in https://github.com/apache/skywalking-php/pull/22 Add container by @heyanlong in https://github.com/apache/skywalking-php/pull/23 Save PDO exceptions. by @phanalpha in https://github.com/apache/skywalking-php/pull/24 Update minimal supported PHP version to 7.2. by @jmjoy in https://github.com/apache/skywalking-php/pull/25 Utilize UnixListener for the worker process to accept reports. by @phanalpha in https://github.com/apache/skywalking-php/pull/26 Kill the worker on module shutdown. by @phanalpha in https://github.com/apache/skywalking-php/pull/28 Add plugin for memcached. by @jmjoy in https://github.com/apache/skywalking-php/pull/27 Upgrade rust mini version to 1.65. by @jmjoy in https://github.com/apache/skywalking-php/pull/30 Add plugin for phpredis. by @jmjoy in https://github.com/apache/skywalking-php/pull/29 Add missing request_id. by @jmjoy in https://github.com/apache/skywalking-php/pull/31 Adapt virtual cache. by @jmjoy in https://github.com/apache/skywalking-php/pull/32 Fix permission denied of unix socket. by @jmjoy in https://github.com/apache/skywalking-php/pull/33 Bump to 0.2.0. by @jmjoy in https://github.com/apache/skywalking-php/pull/34  New Contributors  @phanalpha made their first contribution in https://github.com/apache/skywalking-php/pull/22  Full Changelog: https://github.com/apache/skywalking-php/compare/v0.1.0...v0.2.0\nPECL https://pecl.php.net/package/skywalking_agent/0.2.0\n","title":"Release Apache SkyWalking PHP 0.2.0","url":"/events/release-apache-skwaylking-php-0-2-0/"},{"content":"This is an official annoucement from SkyWalking team.\nDue to the Plan to End-of-life(EOL) all v8 releases in Nov. 2022 had been posted in 3 months, SkyWalking community doesn\u0026rsquo;t received any objection or a proposal about releasing a new patch version.\nNow, it is time to end the v8 series. All documents of v8 are not going to be hosted on the website. You only could find the artifacts and source codes from the Apache\u0026rsquo;s archive repository. The documents of each version are included in /docs/ folder in the source tars.\nThe SkyWalking community would reject the bug reports and release proposal due to its End-of-life(EOL) status. v9 provides more powerful features and covers all capabilities of the latest v8. Recommend upgrading to the latest.\nV8 was a memorable and significative release series, which makes the project globally adopted. It brought dev community scale up to over 500 contributors.\nWe want to highlight and thank all those contributors and end users again. You made today\u0026rsquo;s SkyWalking.\nWelcome more contributors and users to join the community, to contribute your ideas, experiences, and feedback. We need you to improve and enhance the project to a higher level.\n","title":"SkyWalking v8 OAP server End-of-life(EOL)","url":"/events/v8-eol/"},{"content":"SkyWalking BanyanDB 0.2.0 is released. Go to downloads page to find release tars.\nFeatures  Command line tool: bydbctl. Retention controller. Full-text searching. TopN aggregation. Add RESTFul style APIs based on gRPC gateway. Add \u0026ldquo;exists\u0026rdquo; endpoints to the schema registry. Support tag-based CRUD of the property. Support index-only tags. Support logical operator(and \u0026amp; or) for the query.  Bugs  \u0026ldquo;metadata\u0026rdquo; syncing pipeline complains about an \u0026ldquo;unknown group\u0026rdquo;. \u0026ldquo;having\u0026rdquo; semantic inconsistency. \u0026ldquo;tsdb\u0026rdquo; leaked goroutines.  Chores  \u0026ldquo;tsdb\u0026rdquo; structure optimization.  Merge the primary index into the LSM-based index Remove term metadata.   Memory parameters optimization. Bump go to 1.19.  ","title":"Release Apache SkyWalking BanyanDB 0.2.0","url":"/events/release-apache-skywalking-banyandb-0-2-0/"},{"content":"SkyWalking Java Agent 8.13.0 is released. Go to downloads page to find release tars. Changes by Version\n8.13.0 This release begins to adopt SkyWalking 9.3.0+ Virtual Cache Analysis,Virtual MQ Analysis\n Support set-type in the agent or plugin configurations Optimize ConfigInitializer to output warning messages when the config value is truncated. Fix the default value of the Map field would merge rather than override by new values in the config. Support to set the value of Map/List field to an empty map/list. Add plugin to support Impala JDBC 2.6.x. Update guava-cache, jedis, memcached, ehcache plugins to adopt uniform tags. Fix Apache ShenYu plugin traceId empty string value. Add plugin to support brpc-java-3.x Update compose-start-script.template to make compatible with new version docker compose Bump up grpc to 1.50.0 to fix CVE-2022-3171 Polish up nats plugin to unify MQ related tags Correct the duration of the transaction span for Neo4J 4.x. Plugin-test configuration.yml dependencies support docker service command field Polish up rabbitmq-5.x plugin to fix missing broker tag on consumer side Polish up activemq plugin to fix missing broker tag on consumer side Enhance MQ plugin relative tests to check key tags not blank. Add RocketMQ test scenarios for version 4.3 - 4.9. No 4.0 - 4.2 release images for testing. Support mannual propagation of tracing context to next operators for webflux. Add MQ_TOPIC and MQ_BROKER tags for RocketMQ consumer\u0026rsquo;s span. Polish up Pulsar plugins to remove unnecessary dynamic value , set peer at consumer side Polish Kafka plugin to set peer at the consumer side. Polish NATS plugin to set peer at the consumer side. Polish ActiveMQ plugin to set peer at the consumer side. Polish RabbitMQ plugin to set peer at the consumer side.  Documentation  Update configuration doc about overriding default value as empty map/list accordingly. Update plugin dev tags for cache relative tags. Add plugin dev docs for virtual database tags. Add plugin dev docs for virtual MQ tags. Add doc about kafka plugin Manual APIs.  All issues and pull requests are here\n","title":"Release Apache SkyWalking Java Agent 8.13.0","url":"/events/release-apache-skywalking-java-agent-8-13-0/"},{"content":"SkyWalking Client JS 0.9.0 is released. Go to downloads page to find release tars.\n Fix custom configurations when the page router changed for SPA. Fix reporting data by navigator.sendbeacon when pages is closed. Bump dependencies. Add Security Notice. Support adding custom tags to spans. Validate custom parameters for register.  ","title":"Release Apache SkyWalking Client JS 0.9.0","url":"/events/release-apache-skywalking-client-js-0-9-0/"},{"content":"I am excited to announce a new SkyWalking committer, Yueqin Zhang(GitHub ID, yswdqz). Yueqin entered the SkyWalking community on Jul. 3rd[1], 2022, for the first time. Later, I knew he was invited by Yihao Chen, our committer, who is running an open-source program for students who can\u0026rsquo;t join Summer 2022 due to SkyWalking having limited slots.\nHis first PR[2] for Issue #7420 took 20 days to propose. I believe he took incredibly hard work in his own time. For every PMC member, we all were there. Purely following documents and existing codes to build a new feature is always not easy to start.\nAfter that, we had several private talks, he asked for more possible directions to join the community deeper. Then, I am honored to witness a great landscape extension in SkyWalking feature territory, SkyWalking adopts OpenTelemetry features quickly, and is powered by our powerful MAL and v9 kernel/UI, He built MySQL and PostgreSQL server monitoring, metrics, and slow SQLs collecting(through enhancing LAL with a new layer concept), under a new menu, .\nIt is unbelievable to see his contributions in the main repo, 8 PRs[3], LOC 4,857++, 1,627\u0026ndash;\nMeanwhile, this story continues, he is trying to build A lightweight and APM-oriented SQL parser module[4] under my mentoring. This would be another challenging idea, but also very useful to enhance existing virtual database perf. analyzing.\nI believe this would not be the end for the moment between SkyWalking and him.\nWelcome to join the team.\nReferrer \u0026amp; PMC member, Sheng Wu.\n [1] https://github.com/apache/skywalking/issues/7420#issuecomment-1173061870 [2] https://github.com/apache/skywalking-java/pull/286 [3] https://github.com/apache/skywalking/commits?author=yswdqz [4] https://github.com/apache/skywalking/issues/9661  ","title":"Welcome Yueqin Zhang as a new committer","url":"/events/welcome-yueqin-zhang-as-new-committer/"},{"content":"SkyWalking PHP 0.1.0 is released. Go to downloads page to find release tars.\nWhat's Changed  [docs] Update README by @heyanlong in https://github.com/apache/skywalking-php/pull/1 Remove the CI limit first, in order to run CI. by @jmjoy in https://github.com/apache/skywalking-php/pull/3 Setup CI. by @jmjoy in https://github.com/apache/skywalking-php/pull/5 Implementation, with curl support. By @jmjoy in https://github.com/apache/skywalking-php/pull/4 Turn off Swoole support, and fix Makefile. By @jmjoy in https://github.com/apache/skywalking-php/pull/6 Update docs by @heyanlong in https://github.com/apache/skywalking-php/pull/7 Add PECL support. By @jmjoy in https://github.com/apache/skywalking-php/pull/8 Support macOS by replace ipc-channel with socket pair, upgrade dependencies and improve CI. by @jmjoy in https://github.com/apache/skywalking-php/pull/9 Add compile and release docs. By @jmjoy in https://github.com/apache/skywalking-php/pull/10 Update official documentation link. By @jmjoy in https://github.com/apache/skywalking-php/pull/11  New Contributors  @heyanlong made their first contribution in https://github.com/apache/skywalking-php/pull/1 @jmjoy made their first contribution in https://github.com/apache/skywalking-php/pull/3  Full Changelog: https://github.com/apache/skywalking-php/commits/v0.1.0\nPECL https://pecl.php.net/package/skywalking_agent/0.1.0\n","title":"Release Apache SkyWalking PHP 0.1.0","url":"/events/release-apache-skwaylking-php-0-1-0/"},{"content":"Yanlong He (GitHub: heyanlong) is a SkyWalking committer for years. He was working on skyapm-php for years to support the SkyWalking ecosystem. That PHP agent has significant contributions for SkyWalking\u0026rsquo;s users adoption in the PHP landscape. Yanlong keeps active in supporting and maintaining the project to help the community.\nJiemin Xia (GitHub: jmjoy) is a new committer voted in July 2022. He is super active in this year. He took over the maintaince capatbilify from Rei Shimizu, who is too busy in his daily work. He leads on the Rust SDK, and is also a release manager for the Rust SDK.\nRecently, both of them are working with Yanlong He to build a new skywalking PHP agent.\nWe are having our PHP agent v0.1.0 for the community.\nSkyWalking PHP Agent\nNotice, SkyAPM PHP is going to be archived and replaced by SkyWalking PHP agent according to its project maintainer, Yanlong He. Our community would work more closely forward the new PHP agent together.\nLet\u0026rsquo;s welcome and congrats to our 31st and 32nd PMC members, Yanlong He and Jiemin Xia. We are honored to have you.\n","title":"Welcome Yanlong He and Jiemin Xia to join the PMC","url":"/events/welcome-heyanlong-xiajiemin-join-the-pmc/"},{"content":"Background This article will show how to use Apache SkyWalking with eBPF to make network troubleshooting easier in a service mesh environment.\nApache SkyWalking is an application performance monitor tool for distributed systems. It observes metrics, logs, traces, and events in the service mesh environment and uses that data to generate a dependency graph of your pods and services. This dependency graph can provide quick insights into your system, especially when there\u0026rsquo;s an issue.\nHowever, when troubleshooting network issues in SkyWalking\u0026rsquo;s service topology, it is not always easy to pinpoint where the error actually is. There are two reasons for the difficulty:\n Traffic through the Envoy sidecar is not easy to observe. Data from Envoy\u0026rsquo;s Access Log Service (ALS) shows traffic between services (sidecar-to-sidecar), but not metrics on communication between the Envoy sidecar and the service it proxies. Without that information, it is more difficult to understand the impact of the sidecar. There is a lack of data from transport layer (OSI Layer 4) communication. Since services generally use application layer (OSI Layer 7) protocols such as HTTP, observability data is generally restricted to application layer communication. However, the root cause may actually be in the transport layer, which is typically opaque to observability tools.  Access to metrics from Envoy-to-service and transport layer communication can make it easier to diagnose service issues. To this end, SkyWalking needs to collect and analyze transport layer metrics between processes inside Kubernetes pods - a task well suited to eBPF. We investigated using eBPF for this purpose and present our results and a demo below.\nMonitoring Kubernetes Networks with eBPF With its origins as the Extended Berkeley Packet Filter, eBPF is a general purpose mechanism for injecting and running your own code into the Linux kernel and is an excellent tool for monitoring network traffic in Kubernetes Pods. In the next few sections, we'll provide an overview of how to use eBPF for network monitoring as background for introducing Skywalking Rover, a metrics collector and profiler powered by eBPF to diagnose CPU and network performance.\nHow Applications and the Network Interact Interactions between the application and the network can generally be divided into the following steps from higher to lower levels of abstraction:\n User Code: Application code uses high-level network libraries in the application stack to exchange data across the network, like sending and receiving HTTP requests. Network Library: When the network library receives a network request, it interacts with the language API to send the network data. Language API: Each language provides an API for operating the network, system, etc. When a request is received, it interacts with the system API. In Linux, this API is called syscalls. Linux API: When the Linux kernel receives the request through the API, it communicates with the socket to send the data, which is usually closer to an OSI Layer 4 protocol, such as TCP, UDP, etc. Socket Ops: Sending or receiving the data to/from the NIC.  Our hypothesis is that eBPF can monitor the network. There are two ways to implement the interception: User space (uprobe) or Kernel space (kprobe). The table below summarizes the differences.\n    Pros Cons     uprobe •\tGet more application-related contexts, such as whether the current request is HTTP or HTTPS.•\tRequests and responses can be intercepted by a single method •\tData structures can be unstable, so it is more difficult to get the desired data.  •\tImplementation may differ between language/library versions.  •\tDoes not work in applications without symbol tables.   kprobe •\tAvailable for all languages.  •\tThe data structure and methods are stable and do not require much adaptation.  •\tEasier correlation with underlying data, such as getting the destination address of TCP, OSI Layer 4 protocol metrics, etc. •\tA single request and response may be split into multiple probes.  •\tContextual information is not easy to get for stateful requests. For example header compression in HTTP/2.    For the general network performance monitor, we chose to use the kprobe (intercept the syscalls) for the following reasons:\n It\u0026rsquo;s available for applications written in any programming language, and it\u0026rsquo;s stable, so it saves a lot of development/adaptation costs. It can be correlated with metrics from the system level, which makes it easier to troubleshoot. As a single request and response are split into multiple probes, we can use technology to correlate them. For contextual information, It\u0026rsquo;s usually used in OSI Layer 7 protocol network analysis. So, if we just monitor the network performance, then they can be ignored.  Kprobes and network monitoring Following the network syscalls of Linux documentation, we can implement network monitoring by intercepting two types of methods: socket operations and send/receive methods.\nSocket Operations When accepting or connecting with another socket, we can get the following information:\n Connection information: Includes the remote address from the connection which helps us to understand which pod is connected. Connection statics: Includes basic metrics from sockets, such as round-trip time (RTT), lost packet count in TCP, etc. Socket and file descriptor (FD) mapping: Includes the relationship between the Linux file descriptor and socket object. It is useful when sending and receiving data through a Linux file descriptor.  Send/Receive The interface related to sending or receiving data is the focus of performance analysis. It mainly contains the following parameters:\n Socket file descriptor: The file descriptor of the current operation corresponding to the socket. Buffer: The data sent or received, passed as a byte array.  Based on the above parameters, we can analyze the following data:\n Bytes: The size of the packet in bytes. Protocol: The protocol analysis according to the buffer data, such as HTTP, MySQL, etc. Execution Time: The time it takes to send/receive the data.  At this point (Figure 1) we can analyze the following steps for the whole lifecycle of the connection:\n Connect/Accept: When the connection is created. Transform: Sending and receiving data on the connection. Close: When the connection is closed.  Figure 1\nProtocol and TLS The previous section described how to analyze connections using send or receive buffer data. For example, following the HTTP/1.1 message specification to analyze the connection. However, this does not work for TLS requests/responses.\nFigure 2\nWhen TLS is in use, the Linux Kernel transmits data encrypted in user space. In the figure above, The application usually transmits SSL data through a third-party library (such as OpenSSL). For this case, the Linux API can only get the encrypted data, so it cannot recognize any higher layer protocol. To decrypt inside eBPF, we need to follow these steps:\n Read unencrypted data through uprobe: Compatible multiple languages, using uprobe to capture the data that is not encrypted before sending or after receiving. In this way, we can get the original data and associate it with the socket. Associate with socket: We can associate unencrypted data with the socket.  OpenSSL Use case For example, the most common way to send/receive SSL data is to use OpenSSL as a shared library, specifically the SSL_read and SSL_write methods to submit the buffer data with the socket.\nFollowing the documentation, we can intercept these two methods, which are almost identical to the API in Linux. The source code of the SSL structure in OpenSSL shows that the Socket FD exists in the BIO object of the SSL structure, and we can get it by the offset.\nIn summary, with knowledge of how OpenSSL works, we can read unencrypted data in an eBPF function.\nIntroducing SkyWalking Rover, an eBPF-based Metrics Collector and Profiler SkyWalking Rover introduces the eBPF network profiling feature into the SkyWalking ecosystem. It\u0026rsquo;s currently supported in a Kubernetes environment, so must be deployed inside a Kubernetes cluster. Once the deployment is complete, SkyWalking Rover can monitor the network for all processes inside a given Pod. Based on the monitoring data, SkyWalking can generate the topology relationship diagram and metrics between processes.\nTopology Diagram The topology diagram can help us understand the network access between processes inside the same Pod, and between the process and external environment (other Pod or service). Additionally, it can identify the data direction of traffic based on the line flow direction.\nIn Figure 3 below, all nodes within the hexagon are the internal process of a Pod, and nodes outside the hexagon are externally associated services or Pods. Nodes are connected by lines, which indicate the direction of requests or responses between nodes (client or server). The protocol is indicated on the line, and it\u0026rsquo;s either HTTP(S), TCP, or TCP(TLS). Also, we can see in this figure that the line between Envoy and Python applications is bidirectional because Envoy intercepts all application traffic.\nFigure 3\nMetrics Once we recognize the network call relationship between processes through the topology, we can select a specific line and view the TCP metrics between the two processes.\nThe diagram below (Figure 4) shows the metrics of network monitoring between two processes. There are four metrics in each line. Two on the left side are on the client side, and two on the right side are on the server side. If the remote process is not in the same Pod, only one side of the metrics is displayed.\nFigure 4\nThe following two metric types are available:\n Counter: Records the total number of data in a certain period. Each counter contains the following data: a. Count: Execution count. b. Bytes: Packet size in bytes. c. Execution time: Execution duration. Histogram: Records the distribution of data in the buckets.  Based on the above data types, the following metrics are exposed:\n   Name Type Unit Description     Write Counter and histogram Millisecond The socket write counter.   Read Counter and histogram Millisecond The socket read counter.   Write RTT Counter and histogram Microsecond The socket write round trip time (RTT) counter.   Connect Counter and histogram Millisecond The socket connect/accept with another server/client counter.   Close Counter and histogram Millisecond The socket with other socket counter.   Retransmit Counter Millisecond The socket retransmit package counter.   Drop Counter Millisecond The socket drop package counter.    Demo In this section, we demonstrate how to perform network profiling in the service mesh. To follow along, you will need a running Kubernetes environment.\nNOTE: All commands and scripts are available in this GitHub repository.\nInstall Istio Istio is the most widely deployed service mesh, and comes with a complete demo application that we can use for testing. To install Istio and the demo application, follow these steps:\n Install Istio using the demo configuration profile. Label the default namespace, so Istio automatically injects Envoy sidecar proxies when we\u0026rsquo;ll deploy the application. Deploy the bookinfo application to the cluster. Deploy the traffic generator to generate some traffic to the application.  export ISTIO_VERSION=1.13.1 # install istio istioctl install -y --set profile=demo kubectl label namespace default istio-injection=enabled # deploy the bookinfo applications kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/platform/kube/bookinfo.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/bookinfo-gateway.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/destination-rule-all.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/virtual-service-all-v1.yaml # generate traffic kubectl apply -f https://raw.githubusercontent.com/mrproliu/skywalking-network-profiling-demo/main/resources/traffic-generator.yaml Install SkyWalking The following will install the storage, backend, and UI needed for SkyWalking:\ngit clone https://github.com/apache/skywalking-kubernetes.git cd skywalking-kubernetes cd chart helm dep up skywalking helm -n istio-system install skywalking skywalking \\  --set fullnameOverride=skywalking \\  --set elasticsearch.minimumMasterNodes=1 \\  --set elasticsearch.imageTag=7.5.1 \\  --set oap.replicas=1 \\  --set ui.image.repository=apache/skywalking-ui \\  --set ui.image.tag=9.2.0 \\  --set oap.image.tag=9.2.0 \\  --set oap.envoy.als.enabled=true \\  --set oap.image.repository=apache/skywalking-oap-server \\  --set oap.storageType=elasticsearch \\  --set oap.env.SW_METER_ANALYZER_ACTIVE_FILES=\u0026#39;network-profiling\u0026#39; Install SkyWalking Rover SkyWalking Rover is deployed on every node in Kubernetes, and it automatically detects the services in the Kubernetes cluster. The network profiling feature has been released in the version 0.3.0 of SkyWalking Rover. When a network monitoring task is created, the SkyWalking rover sends the data to the SkyWalking backend.\nkubectl apply -f https://raw.githubusercontent.com/mrproliu/skywalking-network-profiling-demo/main/resources/skywalking-rover.yaml Start the Network Profiling Task Once all deployments are completed, we must create a network profiling task for a specific instance of the service in the SkyWalking UI.\nTo open SkyWalking UI, run:\nkubectl port-forward svc/skywalking-ui 8080:80 --namespace istio-system Currently, we can select the specific instances that we wish to monitor by clicking the Data Plane item in the Service Mesh panel and the Service item in the Kubernetes panel.\nIn the figure below, we have selected an instance with a list of tasks in the network profiling tab. When we click the start button, the SkyWalking Rover starts monitoring this instance\u0026rsquo;s network.\nFigure 5\nDone! After a few seconds, you will see the process topology appear on the right side of the page.\nFigure 6\nWhen you click on the line between processes, you can see the TCP metrics between the two processes.\nFigure 7\nConclusion In this article, we detailed a problem that makes troubleshooting service mesh architectures difficult: lack of context between layers in the network stack. These are the cases when eBPF begins to really help with debugging/productivity when existing service mesh/envoy cannot. Then, we researched how eBPF could be applied to common communication, such as TLS. Finally, we demo the implementation of this process with SkyWalking Rover.\nFor now, we have completed the performance analysis for OSI layer 4 (mostly TCP). In the future, we will also introduce the analysis for OSI layer 7 protocols like HTTP.\nGet Started with Istio To get started with service mesh today, Tetrate Istio Distro is the easiest way to install, manage, and upgrade Istio. It provides a vetted upstream distribution of Istio that\u0026rsquo;s tested and optimized for specific platforms by Tetrate plus a CLI that facilitates acquiring, installing, and configuring multiple Istio versions. Tetrate Istio Distro also offers FIPS certified Istio builds for FedRAMP environments.\nFor enterprises that need a unified and consistent way to secure and manage services and traditional workloads across complex, heterogeneous deployment environments, we offer Tetrate Service Bridge, our flagship edge-to-workload application connectivity platform built on Istio and Envoy.\nContact us to learn more.\nAdditional Resources  SkyWalking Github Repo SkyWalking Rover Github Repo SkyWalking Rover Documentation Pinpoint Service Mesh Critical Performance impact by using eBPF blog post Apache SkyWalking with Native eBPF Agent presentation eBPF hook overview  ","title":"Diagnose Service Mesh Network Performance with eBPF","url":"/blog/diagnose-service-mesh-network-performance-with-ebpf/"},{"content":"本文将展示如何利用 Apache SkyWalking 与 eBPF,使服务网格下的网络故障排除更加容易。\nApache SkyWalking 是一个分布式系统的应用性能监控工具。它观察服务网格中的指标、日志、痕迹和事件,并使用这些数据来生成 pod 和服务的依赖图。这个依赖关系图可以帮助你快速系统,尤其是在出现问题的时候。\n然而,在排除 SkyWalking 服务拓扑中的网络问题时,确定错误的实际位置有时候并不容易。造成这种困难的原因有两个:\n 通过 Envoy sidecar 的流量并不容易观察:来自 Envoy 的访问日志服务(ALS)的数据显示了服务之间的流量(sidecar-to-sidecar),但没有关于 Envoy sidecar 和它代理的服务之间的通信指标。如果没有这些信息,就很难理解 sidecar 的影响。 缺乏来自传输层(OSI 第 4 层)通信的数据:由于服务通常使用应用层(OSI 第 7 层)协议,如 HTTP,可观测性数据通常被限制在应用层通信中。然而,根本原因可能实际上是在传输层,而传输层对可观测性工具来说通常是不透明的。  获取 Envoy-to-service 和传输层通信的指标,可以更容易诊断服务问题。为此,SkyWalking 需要收集和分析 Kubernetes pod 内进程之间的传输层指标 —— 这项任务很适合 eBPF。我们调查了为此目的使用 eBPF 的情况,并在下面介绍了我们的结果和演示。\n用 eBPF 监控 Kubernetes 网络 eBPF 起源于 Extended Berkeley Packet Filter,是一种通用的机制,可以在 Linux 内核中注入和运行自己的代码,是监测 Kubernetes Pod 中网络流量的优秀工具。在接下来的几节中,我们将概述如何使用 eBPF 进行网络监控,作为介绍 Skywalking Rover 的背景,这是一个由 eBPF 驱动的指标收集器和分析器,用于诊断 CPU 和网络性能。\n应用程序和网络如何相互作用 应用程序和网络之间的互动一般可分为以下步骤,从较高的抽象层次到较低的抽象层次:\n 用户代码:应用程序代码使用应用程序堆栈中的高级网络库,在网络上交换数据,如发送和接收 HTTP 请求。 网络库:当网络库收到网络请求时,它与语言 API 进行交互以发送网络数据。 语言 API:每种语言都提供了一个操作网络、系统等的 API。当收到一个请求时,它与系统的 API 进行交互。在 Linux 中,这个 API 被称为系统调用(syscalls)。 Linux API:当 Linux 内核通过 API 收到请求时,它与套接字进行通信以发送数据,这通常更接近于 OSI 第四层协议,如 TCP、UDP 等。 Socket Ops:向 / 从网卡发送或接收数据。  我们的假设是,eBPF 可以监控网络。有两种方法可以实现拦截:用户空间(uprobe)或内核空间(kprobe)。下表总结了两者的区别。\n   方式 优点 缺点     uprobe • 获取更多与应用相关的上下文,例如当前请求是 HTTP 还是 HTTPS。 • 请求和响应可以通过一个方法来截获。 • 数据结构可能是不稳定的,所以更难获得所需的数据。 • 不同语言/库版本的实现可能不同。 • 在没有符号表的应用程序中不起作用。   kprobe • 可用于所有语言。 • 数据结构和方法很稳定,不需要太多调整。 • 更容易与底层数据相关联,如获得 TCP 的目标地址、OSI 第四层协议指标等。 • 一个单一的请求和响应可能被分割成多个 probe。 • 对于有状态的请求,上下文信息不容易得到。例如 HTTP/2 中的头压缩。    对于一般的网络性能监控,我们选择使用 kprobe(拦截系统调用),原因如下:\n 它可用于用任何编程语言编写的应用程序,而且很稳定,所以可以节省大量的开发 / 适应成本。 它可以与系统层面的指标相关联,这使得故障排除更加容易。 由于一个请求和响应被分割成多个 probe,我们可以利用技术将它们关联起来。 对于背景信息,它通常用于 OSI 第七层协议网络分析。因此,如果我们只是监测网络性能,那么它们可以被忽略。  Kprobes 和网络监控 按照 Linux 文档中的网络系统调用,我们可以通过两类拦截方法实现网络监控:套接字操作和发送 / 接收方法。\n套接字操作 当接受或与另一个套接字连接时,我们可以得到以下信息:\n 连接信息:包括来自连接的远程地址,这有助于我们了解哪个 pod 被连接。 连接统计 :包括来自套接字的基本指标,如往返时间(RTT)、TCP 的丢包数等。 套接字和文件描述符(FD)的映射:包括 Linux 文件描述符和套接字对象之间的关系。在通过 Linux 文件描述符发送和接收数据时,它很有用。  发送 / 接收 与发送或接收数据有关的接口是性能分析的重点。它主要包含以下参数:\n Socket 文件描述符:当前操作对应的套接字的文件描述符。 缓冲区:发送或接收的数据,以字节数组形式传递。  基于上述参数,我们可以分析以下数据:\n 字节:数据包的大小,以字节为单位。 协议:根据缓冲区的数据进行协议分析,如 HTTP、MySQL 等。 执行时间:发送 / 接收数据所需的时间。  在这一点上(图 1),我们可以分析出连接的整个生命周期的以下步骤:\n 连接 / 接受:当连接被创建时。 转化:在连接上发送和接收数据。 关闭:当连接被关闭时。  图 1\n协议和 TLS 上一节描述了如何使用发送或接收缓冲区数据来分析连接。例如,遵循 HTTP/1.1 消息规范来分析连接。然而,这对 TLS 请求 / 响应不起作用。\n图 2\n当使用 TLS 时,Linux 内核在用户空间中传输加密的数据。在上图中,应用程序通常通过第三方库(如 OpenSSL)传输 SSL 数据。对于这种情况,Linux API 只能得到加密的数据,所以它不能识别任何高层协议。为了在 eBPF 内部解密,我们需要遵循以下步骤:\n 通过 uprobe 读取未加密的数据:兼容多种语言,使用 uprobe 来捕获发送前或接收后没有加密的数据。通过这种方式,我们可以获得原始数据并将其与套接字联系起来。 与套接字关联:我们可以将未加密的数据与套接字关联。  OpenSSL 用例 例如,发送 / 接收 SSL 数据最常见的方法是使用 OpenSSL 作为共享库,特别是 SSL_read 和 SSL_write 方法,以提交缓冲区数据与套接字。\n按照文档,我们可以截获这两种方法,这与 Linux 中的 API 几乎相同。OpenSSL 中 SSL 结构的源代码显示, Socket FD 存在于 SSL 结构的 BIO 对象中,我们可以通过 offset 得到它。\n综上所述,通过对 OpenSSL 工作原理的了解,我们可以在一个 eBPF 函数中读取未加密的数据。\nSkyWalking Rover—— 基于 eBPF 的指标收集器和分析器 SkyWalking Rover 在 SkyWalking 生态系统中引入了 eBPF 网络分析功能。目前已在 Kubernetes 环境中得到支持,所以必须在 Kubernetes 集群内部署。部署完成后,SkyWalking Rover 可以监控特定 Pod 内所有进程的网络。基于监测数据,SkyWalking 可以生成进程之间的拓扑关系图和指标。\n拓扑结构图 拓扑图可以帮助我们了解同一 Pod 内的进程之间以及进程与外部环境(其他 Pod 或服务)之间的网络访问情况。此外,它还可以根据线路的流动方向来确定流量的数据方向。\n在下面的图 3 中,六边形内的所有节点都是一个 Pod 的内部进程,六边形外的节点是外部关联的服务或 Pod。节点由线连接,表示节点之间的请求或响应方向(客户端或服务器)。线条上标明了协议,它是 HTTP (S)、TCP 或 TCP (TLS)。另外,我们可以在这个图中看到,Envoy 和 Python 应用程序之间的线是双向的,因为 Envoy 拦截了所有的应用程序流量。\n图 3\n度量 一旦我们通过拓扑结构认识到进程之间的网络调用关系,我们就可以选择一个特定的线路,查看两个进程之间的 TCP 指标。\n下图(图4)显示了两个进程之间网络监控的指标。每行有四个指标。左边的两个是在客户端,右边的两个是在服务器端。如果远程进程不在同一个 Pod 中,则只显示一边的指标。\n图 4\n有以下两种度量类型。\n 计数器(Counter):记录一定时期内的数据总数。每个计数器包含以下数据。  计数:执行次数。 字节:数据包大小,以字节为单位。 执行时间:执行时间。   柱状图(Histogram):记录数据在桶中的分布。  基于上述数据类型,暴露了以下指标:\n   名称 类型 单位 描述     Write 计数器和柱状图 毫秒 套接字写计数器。   Read 计数器和柱状图 毫秒 套接字读计数器。   Write RTT 计数器和柱状图 微秒 套接字写入往返时间(RTT)计数器。   Connect 计数器和柱状图 毫秒 套接字连接/接受另一个服务器/客户端的计数器。   Close 计数器和柱状图 毫秒 有其他套接字的计数器。   Retransmit 计数器 毫秒 套接字重发包计数器   Drop 计数器 毫秒 套接字掉包计数器。    演示 在本节中,我们将演示如何在服务网格中执行网络分析。要跟上进度,你需要一个正在运行的 Kubernetes 环境。\n注意:所有的命令和脚本都可以在这个 GitHub 资源库中找到。\n安装 Istio Istio是最广泛部署的服务网格,并附带一个完整的演示应用程序,我们可以用来测试。要安装 Istio 和演示应用程序,请遵循以下步骤:\n 使用演示配置文件安装 Istio。 标记 default 命名空间,所以当我们要部署应用程序时,Istio 会自动注入 Envoy 的 sidecar 代理。 将 bookinfo 应用程序部署到集群上。 部署流量生成器,为应用程序生成一些流量。  export ISTIO_VERSION=1.13.1 # 安装 istio istioctl install -y --set profile=demo kubectl label namespace default istio-injection=enabled # 部署 bookinfo 应用程序 kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/platform/kube/bookinfo.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/bookinfo-gateway.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/destination-rule-all.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/virtual-service-all-v1.yaml # 产生流量 kubectl apply -f https://raw.githubusercontent.com/mrproliu/skywalking-network-profiling-demo/main/resources/traffic-generator.yaml 安装 SkyWalking 下面将安装 SkyWalking 所需的存储、后台和用户界面。\ngit clone https://github.com/apache/skywalking-kubernetes.git cd skywalking-kubernetes cd chart helm dep up skywalking helm -n istio-system install skywalking skywalking \\  --set fullnameOverride=skywalking \\  --set elasticsearch.minimumMasterNodes=1 \\  --set elasticsearch.imageTag=7.5.1 \\  --set oap.replicas=1 \\  --set ui.image.repository=apache/skywalking-ui \\  --set ui.image.tag=9.2.0 \\  --set oap.image.tag=9.2.0 \\  --set oap.envoy.als.enabled=true \\  --set oap.image.repository=apache/skywalking-oap-server \\  --set oap.storageType=elasticsearch \\  --set oap.env.SW_METER_ANALYZER_ACTIVE_FILES=\u0026#39;network-profiling\u0026#39; 安装 SkyWalking Rover SkyWalking Rover 部署在 Kubernetes 的每个节点上,它自动检测 Kubernetes 集群中的服务。网络剖析功能已经在 SkyWalking Rover 的 0.3.0 版本中发布。当网络监控任务被创建时,SkyWalking Rover 会将数据发送到 SkyWalking 后台。\nkubectl apply -f https://raw.githubusercontent.com/mrproliu/skywalking-network-profiling-demo/main/resources/skywalking-rover.yaml 启动网络分析任务 一旦所有部署完成,我们必须在 SkyWalking UI 中为服务的特定实例创建一个网络分析任务。\n要打开 SkyWalking UI,请运行:\nkubectl port-forward svc/skywalking-ui 8080:80 --namespace istio-system 目前,我们可以通过点击服务网格面板中的数据平面项目和 Kubernetes 面板中的服务项目来选择我们想要监控的特定实例。\n在下图中,我们选择了一个实例,在网络剖析标签里有一个任务列表。当我们点击启动按钮时,SkyWalking Rover 开始监测这个实例的网络。\n图 5\n完成 几秒钟后,你会看到页面的右侧出现进程拓扑结构。\n图 6\n当你点击进程之间的线时,你可以看到两个进程之间的 TCP 指标。\n图 7\n总结 在这篇文章中,我们详细介绍了一个使服务网格故障排除困难的问题:网络堆栈中各层之间缺乏上下文。这些情况下,当现有的服务网格 /envoy 不能时,eBPF 开始真正帮助调试 / 生产。然后,我们研究了如何将 eBPF 应用于普通的通信,如 TLS。最后,我们用 SkyWalking Rover 演示了这个过程的实现。\n目前,我们已经完成了对 OSI 第四层(主要是 TCP)的性能分析。在未来,我们还将介绍对 OSI 第 7 层协议的分析,如 HTTP。\n开始使用 Istio 开始使用服务网格,Tetrate Istio Distro 是安装、管理和升级 Istio 的最简单方法。它提供了一个经过审查的 Istio 上游发布,由 Tetrate 为特定平台进行测试和优化,加上一个 CLI,方便获取、安装和配置多个 Istio 版本。Tetrate Istio Distro 还为 FedRAMP 环境提供 FIPS 认证的 Istio 构建。\n对于需要以统一和一致的方式在复杂的异构部署环境中保护和管理服务和传统工作负载的企业,我们提供 Tetrate Service Bridge,这是我们建立在 Istio 和 Envoy 上的旗舰工作负载应用连接平台。\n联系我们以了解更多。\n其他资源  SkyWalking Github Repo SkyWalking Rover Github Repo SkyWalking Rover 文件 通过使用 eBPF 博文准确定位服务网格关键性能影响 Apache SkyWalking 与本地 eBPF 代理的介绍 eBPF hook概述  ","title":"使用 eBPF 诊断服务网格网络性能","url":"/zh/diagnose-service-mesh-network-performance-with-ebpf/"},{"content":"SkyWalking CLI 0.11.0 is released. Go to downloads page to find release tars.\n Add .github/scripts to release source tarball by @kezhenxu94 in https://github.com/apache/skywalking-cli/pull/140 Let the eBPF profiling could performs by service level by @mrproliu in https://github.com/apache/skywalking-cli/pull/141 Add the sub-command for estimate the process scale by @mrproliu in https://github.com/apache/skywalking-cli/pull/142 feature: update install.sh version regex by @Alexxxing in https://github.com/apache/skywalking-cli/pull/143 Update the commands relate to the process by @mrproliu in https://github.com/apache/skywalking-cli/pull/144 Add layer to event related commands by @fgksgf in https://github.com/apache/skywalking-cli/pull/145 Add layer to events.graphql by @fgksgf in https://github.com/apache/skywalking-cli/pull/146 Add layer field to alarms.graphql by @fgksgf in https://github.com/apache/skywalking-cli/pull/147 Upgrade crypto lib to fix cve by @kezhenxu94 in https://github.com/apache/skywalking-cli/pull/148 Remove layer field in the instance and process commands by @mrproliu in https://github.com/apache/skywalking-cli/pull/149 Remove duration flag in profiling ebpf schedules by @mrproliu in https://github.com/apache/skywalking-cli/pull/150 Remove total field in trace list and logs list commands by @mrproliu in https://github.com/apache/skywalking-cli/pull/152 Remove total field in event list, browser logs, alarm list commands. by @mrproliu in https://github.com/apache/skywalking-cli/pull/153 Add aggregate flag in profiling ebpf analysis commands by @mrproliu in https://github.com/apache/skywalking-cli/pull/154 event: fix event query should query all types by default by @kezhenxu94 in https://github.com/apache/skywalking-cli/pull/155 Fix a possible lint error and update CI lint version by @JarvisG495 in https://github.com/apache/skywalking-cli/pull/156 Add commands for support network profiling by @mrproliu in https://github.com/apache/skywalking-cli/pull/158 Add the components field in the process relation by @mrproliu in https://github.com/apache/skywalking-cli/pull/159 Trim license headers in query string by @kezhenxu94 in https://github.com/apache/skywalking-cli/pull/160 Bump up dependency swck version to fix CVE by @kezhenxu94 in https://github.com/apache/skywalking-cli/pull/161 Bump up swck dependency for transitive dep upgrade by @kezhenxu94 in https://github.com/apache/skywalking-cli/pull/162 Add the sub-commands for query sorted metrics/records by @mrproliu in https://github.com/apache/skywalking-cli/pull/163 Add compatibility documentation by @mrproliu in https://github.com/apache/skywalking-cli/pull/164 Overhaul licenses, prepare for 0.11.0 by @kezhenxu94 in https://github.com/apache/skywalking-cli/pull/165  ","title":"Release Apache SkyWalking CLI 0.11.0","url":"/events/release-apache-skywalking-cli-0-11-0/"},{"content":"SkyWalking Kubernetes Helm Chart 4.3.0 is released. Go to downloads page to find release tars.\n Fix hasSuffix replace hasPrefix by @geffzhang in https://github.com/apache/skywalking-kubernetes/pull/86 Add \u0026ldquo;pods/log\u0026rdquo; permission to OAP so on-demand Pod log can work by @kezhenxu94 in https://github.com/apache/skywalking-kubernetes/pull/87 add .Values.oap.initEs to work with ES initial by @williamyao1982 in https://github.com/apache/skywalking-kubernetes/pull/88 Remove Istio adapter, add changelog for 4.3.0 by @kezhenxu94 in https://github.com/apache/skywalking-kubernetes/pull/89 Bump up helm chart version by @kezhenxu94 in https://github.com/apache/skywalking-kubernetes/pull/90  ","title":"Release Apache SkyWalking Kubernetes Helm Chart 4.3.0","url":"/events/release-apache-skywalking-kubernetes-helm-chart-4.3.0/"},{"content":"SkyWalking Cloud on Kubernetes 0.7.0 is released. Go to downloads page to find release tars.\nFeatures  Replace go-bindata with embed lib. Add the OAPServerConfig CRD, webhooks and controller. Add the OAPServerDynamicConfig CRD, webhooks and controller. Add the SwAgent CRD, webhooks and controller. [Breaking Change] Remove the way to configure the agent through Configmap.  Bugs  Fix the error in e2e testing. Fix status inconsistent with CI. Bump up prometheus client version to fix cve.  Chores  Bump several dependencies of adapter. Update license eye version. Bump up SkyWalking OAP to 9.0.0. Bump up the k8s api of the e2e environment to v1.21.10.  ","title":"Release Apache SkyWalking Cloud on Kubernetes 0.7.0","url":"/events/release-apache-skywalking-cloud-on-kubernetes-0-7-0/"},{"content":"SkyWalking Rover 0.3.0 is released. Go to downloads page to find release tars.\nFeatures  Support NETWORK Profiling. Let the logger as a configurable module. Support analyze the data of OpenSSL, BoringSSL library, GoTLS, NodeTLS in NETWORK Profiling. Enhancing the kubernetes process finder.  Bug Fixes  Fixed reading process paths incorrect when running as a container. Fix the crash caused by multiple profiling tasks.  Issues and PR  All issues are here All and pull requests are here  ","title":"Release Apache SkyWalking Rover 0.3.0","url":"/events/release-apache-skwaylking-rover-0-3-0/"},{"content":"SkyWalking Java Agent 8.12.0 is released. Go to downloads page to find release tars. Changes by Version\n8.12.0  Fix Shenyu plugin\u0026rsquo;s NPE in reading trace ID when IgnoredTracerContext is used in the context. Update witness class in elasticsearch-6.x-plugin, avoid throw NPE. Fix onHalfClose using span operation name /Request/onComplete instead of the wrong name /Request/onHalfClose. Add plugin to support RESTeasy 4.x. Add plugin to support hutool-http 5.x. Add plugin to support Tomcat 10.x. Save http status code regardless of it\u0026rsquo;s status. Upgrade byte-buddy to 1.12.13, and adopt byte-buddy APIs changes. Upgrade gson to 2.8.9. Upgrade netty-codec-http2 to 4.1.79.Final. Fix race condition causing agent to not reconnect after network error Force the injected high-priority classes in order to avoid NoClassDefFoundError. Plugin to support xxl-job 2.3.x. Add plugin to support Micronaut(HTTP Client/Server) 3.2.x-3.6.x Add plugin to support NATS Java client 2.14.x-2.15.x Remove inappropriate dependency from elasticsearch-7.x-plugin Upgrade jedis plugin to support 3.x(stream),4.x  Documentation  Add a section in Bootstrap-plugins doc, introducing HttpURLConnection Plugin compatibility. Update Plugin automatic test framework, fix inconsistent description about configuration.yml. Update Plugin automatic test framework, add expected data format of the log items.  All issues and pull requests are here\n","title":"Release Apache SkyWalking Java Agent 8.12.0","url":"/events/release-apache-skywalking-java-agent-8-12-0/"},{"content":"This is an official annoucement from SkyWalking team.\nSkyWalking backend server and UI released significant 9.2.0 at Sep. 2nd, 2022. With the new added Layer concept, the ebpf agent, wider middleware server monitoring(Such as MySQL and PostgreSQL servers) powered by OpenTelemetry ecosystem, SkyWalking v9 has been much more powerful than the last v8 version(8.9.1).\nFrom now, we have resolved all found critical bugs since 9.0.0 release which could block the v8 users to upgrade. v9 releases also provide the as same compatibility as the 8.9.1 release. So, end users would not have a block when they apply to upgrade. (We don\u0026rsquo;t provide storage structure compatibility as usually, users should use an empty database to initialize for a new version.)\nAnd more importantly, we are confident that, v9 could provide a stable and higher performance APM in the product environment.\nThe 8.9.1 release was released at Dec., 2021. Since then, there is no one contributed any code, and there is no committer requested to begin a new iteration or plan to run a patch release. From the project management committee perspective, the 8.x had became inactive.\nWe are going to wait for another 3 month to official end 8.x series' life.\nNotice, this could be changed if there are at least 3 committers supporting to work on further 8.x releases officially, and provide a release plan.\n","title":"Plan to End-of-life(EOL) all v8 releases in Nov. 2022","url":"/events/deprecate-v8/"},{"content":"SkyWalking 9.2.0 is released. Go to downloads page to find release tars.\neBPF Network Profiling for K8s Pod Event and Metrics Association MySQL Server Monitoring PostgreSQL Server Monitoring Project  [Critical] Fix a low performance issue of metrics persistent in the ElasticSearch storage implementation. One single metric could have to wait for an unnecessary 7~10s(System Env Variable SW_STORAGE_ES_FLUSH_INTERVAL) since 8.8.0 - 9.1.0 releases. Upgrade Armeria to 1.16.0, Kubernetes Java client to 15.0.1.  OAP Server  Add more entities for Zipkin to improve performance. ElasticSearch: scroll id should be updated when scrolling as it may change. Mesh: fix only last rule works when multiple rules are defined in metadata-service-mapping.yaml. Support sending alarm messages to PagerDuty. Support Zipkin kafka collector. Add VIRTUAL detect type to Process for Network Profiling. Add component ID(128) for Java Hutool plugin. Add Zipkin query exception handler, response error message for illegal arguments. Fix a NullPointerException in the endpoint analysis, which would cause missing MQ-related LocalSpan in the trace. Add forEach, processRelation function to MAL expression. Add expPrefix, initExp in MAL config. Add component ID(7015) for Python Bottle plugin. Remove legacy OAL percentile functions, p99, p95, p90, p75, p50 func(s). Revert #8066. Keep all metrics persistent even it is default value. Skip loading UI templates if folder is empty or doesn\u0026rsquo;t exist. Optimize ElasticSearch query performance by using _mGet and physical index name rather than alias in these scenarios, (a) Metrics aggregation (b) Zipkin query (c) Metrics query (d) Log query Support the NETWORK type of eBPF Profiling task. Support sumHistogram in MAL. [Breaking Change] Make the eBPF Profiling task support to the service instance level, index/table ebpf_profiling_task is required to be re-created when bump up from previous releases. Fix race condition in Banyandb storage Support SUM_PER_MIN downsampling in MAL. Support sumHistogramPercentile in MAL. Add VIRTUAL_CACHE to Layer, to fix conjectured Redis server, which icon can\u0026rsquo;t show on the topology. [Breaking Change] Elasticsearch storage merge all metrics/meter and records(without super datasets) indices into one physical index template metrics-all and records-all on the default setting. Provide system environment variable(SW_STORAGE_ES_LOGIC_SHARDING) to shard metrics/meter indices into multi-physical indices as the previous versions(one index template per metric/meter aggregation function). In the current one index mode, users still could choose to adjust ElasticSearch\u0026rsquo;s shard number(SW_STORAGE_ES_INDEX_SHARDS_NUMBER) to scale out. More details please refer to New ElasticSearch storage option explanation in 9.2.0 and backend-storage.md [Breaking Change] Index/table ebpf_profiling_schedule added a new column ebpf_profiling_schedule_id, the H2/Mysql/Tidb/Postgres storage users are required to re-created it when bump up from previous releases. Fix Zipkin trace query the max size of spans. Add tls and https component IDs for Network Profiling. Support Elasticsearch column alias for the compatibility between storage logicSharding model and no-logicSharding model. Support MySQL monitoring. Support PostgreSQL monitoring. Fix query services by serviceId error when Elasticsearch storage SW_STORAGE_ES_QUERY_MAX_SIZE \u0026gt; 10000. Support sending alarm messages to Discord. Fix query history process data failure. Optimize TTL mechanism for Elasticsearch storage, skip executed indices in one TTL rotation. Add Kubernetes support module to share codes between modules and reduce calls to Kubernetes API server. Bump up Kubernetes Java client to fix cve. Adapt OpenTelemetry native metrics protocol. [Breaking Change] rename configuration folder from otel-oc-rules to otel-rules. [Breaking Change] rename configuration field from enabledOcRules to enabledOtelRules and environment variable name from SW_OTEL_RECEIVER_ENABLED_OC_RULES to SW_OTEL_RECEIVER_ENABLED_OTEL_RULES. [Breaking Change] Fix JDBC TTL to delete additional tables data. SQL Database requires removing segment,segment_tag, logs, logs_tag, alarms, alarms_tag, zipkin_span, zipkin_query before OAP starts. SQL Database: add @SQLDatabase.ExtraColumn4AdditionalEntity to support add an extra column from parent to an additional table. Add component ID(131) for Java Micronaut plugin Add component ID(132) for Nats java client plugin  UI  Fix query conditions for the browser logs. Implement a URL parameter to activate tab index. Fix clear interval fail when switch autoRefresh to off. Optimize log tables. Fix log detail pop-up page doesn\u0026rsquo;t work. Optimize table widget to hide the whole metric column when no metric is set. Implement the Event widget. Remove event menu. Fix span detail text overlap. Add Python Bottle Plugin Logo. Implement an association between widgets(line, bar, area graphs) with time. Fix tag dropdown style. Hide the copy button when db.statement is empty. Fix legend metrics for topology. Dashboard: Add metrics association. Dashboard: Fix FaaS-Root document link and topology service relation dashboard link. Dashboard: Fix Mesh-Instance metric Throughput. Dashboard: Fix Mesh-Service-Relation metric Throughput and Proxy Sidecar Internal Latency in Nanoseconds (Client Response). Dashboard: Fix Mesh-Instance-Relation metric Throughput. Enhance associations for the Event widget. Add event widgets in dashboard where applicable. Fix dashboard list search box not work. Fix short time range. Fix event widget incompatibility in Safari. Refactor the tags component to support searching for tag keys and values. Implement the log widget and the trace widget associate with each other, remove log tables on the trace widget. Add log widget to general service root. Associate the event widget with the trace and log widget. Add the MySQL layer and update layer routers. Fix query order for trace list. Add a calculation to convert seconds to days. q* Add Spring Sleuth dashboard to general service instance. Support the process dashboard and create the time range text widget. Fix picking calendar with a wrong time range and setting a unique value for dashboard grid key. Add PostgreSQL to Database sub-menu. Implement the network profiling widget. Add Micronaut icon for Java plugin. Add Nats icon for Java plugin. Bump moment and @vue/cli-plugin-e2e-cypress. Add Network Profiling for Service Mesh DP instance and K8s pod panels.  Documentation  Fix invalid links in release docs. Clean up doc about event metrics. Add a table for metric calculations in the UI doc. Add an explanation for alerting kernel and its in-memory window mechanism. Add more docs for widget details. Update alarm doc introduce configuration property key Fix dependency license\u0026rsquo;s NOTICE and binary jar included issues in the source release. Add eBPF CPU profiling doc.  All issues and pull requests are here\n","title":"Release Apache SkyWalking APM 9.2.0","url":"/events/release-apache-skywalking-apm-9.2.0/"},{"content":"SkyWalking Rust 0.4.0 is released. Go to downloads page to find release tars.\nWhat\u0026rsquo;s Changed  Publish release doc. by @wu-sheng in https://github.com/apache/skywalking-rust/pull/31 Set up CI and approval requirements by @wu-sheng in https://github.com/apache/skywalking-rust/pull/32 Move skywalking_proto mod to single files. by @jmjoy in https://github.com/apache/skywalking-rust/pull/33 Polish the release doc. by @wu-sheng in https://github.com/apache/skywalking-rust/pull/34 Add serde support for protobuf generated struct. by @jmjoy in https://github.com/apache/skywalking-rust/pull/35 Improve LogReporter and fix tests. by @jmjoy in https://github.com/apache/skywalking-rust/pull/36 Split tracer inner segment sender and receiver into traits. by @jmjoy in https://github.com/apache/skywalking-rust/pull/37 Switch to use nightly rustfmt. by @jmjoy in https://github.com/apache/skywalking-rust/pull/38 Change Span to refer to SpanStack, rather than TracingContext. by @jmjoy in https://github.com/apache/skywalking-rust/pull/39 Adjust the trace structure. by @jmjoy in https://github.com/apache/skywalking-rust/pull/40 Add logging. by @jmjoy in https://github.com/apache/skywalking-rust/pull/41 Upgrade dependencies. by @jmjoy in https://github.com/apache/skywalking-rust/pull/42 Add feature vendored, to auto build protoc. by @jmjoy in https://github.com/apache/skywalking-rust/pull/43 Add metrics. by @jmjoy in https://github.com/apache/skywalking-rust/pull/44 Add more GH labels as new supports by @wu-sheng in https://github.com/apache/skywalking-rust/pull/45 Bump to 0.4.0. by @jmjoy in https://github.com/apache/skywalking-rust/pull/46 Fix trace id is not transmitted. by @jmjoy in https://github.com/apache/skywalking-rust/pull/47  ","title":"Release Apache SkyWalking Rust 0.4.0","url":"/events/release-apache-skywalking-rust-0-4-0/"},{"content":"目录  开篇 为什么需要全链路监控 为什么选择SkyWalking 预研 POC 优化 未来  1、开篇 自从SkyWalking开始在公司推广,时不时会在排查问题的人群中听到这样的话:“你咋还没接SkyWalking?接入后,一眼就看出是哪儿的问题了\u0026hellip;\u0026quot;,正如同事所说的,在许多情况下,SkyWalking就是这么秀。作为实践者,我非常感谢SkyWalking,因为这款国产全链路监控产品给公司的的伙伴们带来了实实在在的帮助;也特别感谢公司的领导和同事们,正因为他们的支持和帮助,才让这套SkyWalking(V8.5.0)系统从起初的有用进化到现在的好用;从几十亿的Segment储能上限、几十秒的查询耗时,优化到千亿级的Segment储能、毫秒级的查询耗时。\n小提示:\n SkyWalking迭代速度很快,公司使用的是8.5.0版本,其新版本的性能肯定有改善。 Segment是SkyWalking中提出的概念,表示一次请求在某个服务内的执行链路片段的合集,一个请求在多个服务中先后产生的Segment串起来构成一个完整的Trace,如下图所示:  SkyWalking的这次实践,截止到现在有一年多的时间,回顾总结一下这段历程中的些许积累和收获,愿能反哺社区,给有需求的道友提供个案例借鉴;也希望能收获到专家们的指导建议,把项目做得更好。因为安全约束,要把有些内容和谐掉,但也努力把这段历程中那些**靓丽的风景,**尽可能完整的呈现给大家。\n2、为什么需要全链路监控 随着微服务架构的演进,单体应用按照服务维度进行拆分,组织架构也随之演进以横向、纵向维度拆分;一个业务请求的执行轨迹,也从单体应用时期一个应用实例内一个接口,变成多个服务实例的多个接口;对应到组织架构,可能跨越多个BU、多个Owner。虽然微服务架构高内聚低耦合的优势是不言而喻的,但是低耦合也有明显的副作用,它在现实中给跨部门沟通、协作带来额外的不可控的开销;因此开发者尤其是终端业务侧的架构师、管理者,特别需要一些可以帮助理解系统拓扑和用于分析性能问题的工具,便于在架构调整、性能检测和发生故障时,缩减沟通协作方面的精力和时间耗费,快速定位并解决问题。\n我所在的平安健康互联网股份有限公司(文中简称公司),是微服务架构的深度实践者。公司用互联网技术搭建医疗服务平台,致力于构筑专业的医患桥梁,提供专业、全面、高品质、一站式企业健康管理服务。为了进一步提高系统服务质量、提升问题响应效率,部门在21年结合自身的一些情况,决定对现行的全链路监控系统进行升级,目的与以下网络中常见的描述基本一致:\n 快速发现问题 判断故障影响范围 梳理服务依赖并判断依赖的合理性 分析链路性能并实施容量规划  3、为什么选择SkyWalking 在做技术选型时,网络中搜集的资料显示,谷歌的 Dapper系统,算是链路追踪领域的始祖。受其公开论文中提出的概念和理念的影响,一些优秀的企业、个人先后做出不少非常nice的产品,有些还在社区开源共建,如:韩国的Pinpoint,Twitter的Zipkin,Uber的Jaeger及中国的SkyWalking 等,我司选型立项的过程中综合考虑的因素较多,这里只归纳一下SkyWalking吸引我们的2个优势:\n  产品的完善度高:\n java生态,功能丰富 社区活跃,迭代迅速    链路追踪、拓扑分析的能力强:\n 插件丰富,探针无侵入。 采用先进的流式拓扑分析设计    “好东西不需要多说,实际行动告诉你“,这句话我个人非常喜欢,关于SkyWalking的众多的优点,网络上可以找到很多,此处先不逐一比较、赘述了。\n4、预研 当时最新版本8.5.0,梳理分析8.x的发布记录后,评估此版本的核心功能是蛮稳定的,于是基于此版本开始了SkyWalking的探索之旅。当时的认知是有限的,串行思维模型驱使我将关注的问题聚焦在架构原理是怎样、有什么副作用这2个方面:\n  架构和原理:\n agent端 主要关注 Java Agent的机制、SkyWalking Agent端的配置、插件的工作机制、数据采集及上报的机制。 服务端 主要关注 角色和职责、模块和配置、数据接收的机制、指标构建的机制、指标聚合的机制及指标存储的机制。 存储端 主要关注 数据量,存储架构要求以及资源评估。    副作用:\n 功能干扰 性能损耗    4.1 架构和原理 SkyWalking社区很棒,官网文档和官方出版的书籍有较系统化的讲解,因为自己在APM系统以及Java Agent方面有一些相关的经验沉淀,通过在这两个渠道的学习,对Agent端和OAP(服务端)很快便有了较系统化的认知。在做系统架构选型时,评估数据量会比较大(成千上万的JVM实例数,每天采集的Segment数量可能是50-100亿的级别),所以传输通道选择Kafka、存储选择Elasticsearch,如此简易版的架构以及数据流转如下图所示:\n这里有几处要解释一下:\n Agent上报数据给OAP端,有grpc通道和kafka通道,当时就盲猜grpc通道可能撑不住,所以选择kafka通道来削峰;kafka通道是在8.x里加入的。 千亿级的数据用ES来做存储肯定是可以的。 图中L1聚合的意思是:SkyWalking OAP服务端 接收数据后,构建metric并完成metric 的Level-1聚合,这里简称L1聚合。 图中L2聚合的意思是:服务端 基于metric的Level-1聚合结果,再做一次聚合,即Level-2聚合,这里简称L2聚合。后续把纯Mixed角色的集群拆成了两个集群。  4.2 副作用 对于质量团队和接入方来说,他们最关注的问题是,接入SkyWalking后:\n 是否对应用有功能性干扰 在运行期能带来哪些性能损耗  这两个问题从3个维度来得到答案:\n  网络资料显示:\n Agent带来的性能损耗在5%以内 未搜到功能性干扰相关的资料(盲猜没有这方面问题)    实现机制评估:\n 字节码增强机制是JVM提供的机制,SkyWalking使用的字节码操控框架ByteBuddy也是成熟稳定的;通过自定义ClassLoader来加载管理插件类,不会产生冲突和污染。 Agent内插件开发所使用的AOP机制是基于模板方法模式实现的,风控很到位,即使插件的实现逻辑有异常也不影响用户逻辑的执行; 插件采集数据跟上报逻辑之间用了一个轻量级的无锁环形队列进行解耦,算是一种保护机制;这个队列在MPSC场景下性能还不错;队列采用满时丢弃的策略,不会有积压阻塞和OOM。    性能测试验证\n 测试的老师针对dubbo、http 这两种常规RPC通信场景,进行压力测试和稳定性测试,结果与网络资料描述一致,符合预期。    5、POC 在POC阶段,接入几十个种子应用,在非生产环境试点观察,同时完善插件补全链路,对接公司的配置中心,对接发布系统,完善自监控.全面准备达到推广就绪状态。\n5.1 对接发布系统 为了对接公司的发布系统,方便系统的发布,将SkyWalking应用拆分为4个子应用:\n   应用 介绍     Webapp Skywalking的web端   Agent Skywalking的Agent端   OAP-Receiver skywakling的服务端,角色是Mixed或Receiver   OAP-Aggregator skywalking的服务端,角色是Aggregator    这里有个考虑,暂定先使用纯Mixed角色的单集群,有性能问题时就试试 Receiver+Aggregator双角色集群模式,最终选哪种视效果而定。\nSkyWalking Agent端是基于Java Agent机制实现的,采用的是启动挂载模式;启动挂载需在启动脚本里加入挂载Java Agent的逻辑,发布系统实现这个功能需要注意2点:\n 启动脚本挂载SkyWalking Agent的环节,尽量让用户无感知。 发布系统在挂载Agent的时候,给Agent指定应用名称和所属分组信息。  SkyWalking Agent的发布和升级也由发布系统来负责;Agent的升级采用了灰度管控的方案,控制的粒度是应用级和实例级两种:\n 按照应用灰度,可给应用指定使用什么版本的Agent 按照应用的实例灰度,可给应用指定其若干实例使用什么版本的Agent  5.2 完善插件补全链路 针对公司OLTP技术栈,量身定制了插件套,其中大部分在开源社区的插件库中有,缺失的部分通过自研快速补齐。\n这些插件给各组件的核心环节埋点,采集数据上报给SkyWalking后,Web端的【追踪】页面就能勾勒出丰满完美的请求执行链路;这对架构师理解真实架构,测试同学验证逻辑变更和分析性能损耗,开发同学精准定位问题都非常的有帮助。这里借官方在线Demo的截图一用(抱歉后端程序员,五毛特效都没做出来,丰满画面还请自行脑补)\n友情小提示:移除不用的插件对程序编译打包和减少应用启动耗时很有帮助。\n5.3压测稳测 测试的老师,针对SkyWalking Agent端的插件套,设计了丰富的用例,压力测试和稳定性测试的结果都符合预期;每家公司的标准不尽一致,此处不再赘述。\n5.4 对接自研的配置中心 把应用中繁杂的配置交给配置中心来管理是非常必要的,配置中心既能提供启动时的静态配置,又能管理运行期的动态配置,而且外部化配置的机制特别容易满足容器场景下应用的无状态化要求。啰嗦一下,举2个例子:\n 调优时,修改参数的值不用来一遍开发到测试再到生产的发布。 观测系统状态,修改日志配置后不需要来一遍开发到测试再到生产的发布。  Skywaling在外接配置中心这块儿,适配了市面中主流的配置中心产品。而公司的配置中心是自研的,需要对接一下,得益于SkyWalking提供的模块化管理机制,只用扩展一个模块即可。\n在POC阶段,梳理服务端各模块的功能,能感受到其配置化做的不错,配置项很丰富,管控的粒度也很细;在POC阶段几乎没有变动,除了对Webapp模块的外部化配置稍作改造,与配置中心打通以便在配置中心管理 Webapp模块中Ribbon和Hystrix的相关配置。\n5.5完善自监控 自监控是说监控SkyWalking系统内各模块的运转情况:\n   组件 监控方案 说明     kafka kafka-manager 它俩是老搭档了   Agent端 Skywalking Agent端会发心跳信息给服务端,可在Web端看到Agent的信息   OAP集群 prometheus 指标还算丰富,感觉缺的可以自己补充   ES集群 prometheus 指标还算丰富    完善自监控后的架构如下图所示:\n5.6 自研Native端SDK 公司移动端的应用很核心,也要使用链路追踪的功能,社区缺了这块,于是基于SkyWalking的协议,移动端的伙伴们自研了一套SDK,弥补了Native端链路数据的缺失,也在后来的秒开页面指标统计中发挥了作用。随着口口相传,不断有团队提出需求、加入建设,所以也在持续迭代中;内容很多,这里先不展开。\n5.7 小结 POC阶段数据量不大,主要是发现系统的各种功能性问题,查缺补漏。\n6、优化 SkyWalking的正式推广采用的是城市包围农村的策略;公司的核心应用作为第一批次接入,这个策略有几个好处:\n 核心应用的监管是重中之重,优先级默认最高。 核心应用的上下游应用,会随着大家对SkyWalking依赖的加深,而逐步自主接入。  当然安全是第一位的,无论新系统多好、多厉害,其引入都需遵守安全稳定的前提要求。既要安全又要快速还要方便,于是基于之前Agent灰度接入的能力,在发布系统中增加应用Owner自助式灰度接入和快速卸载SkyWalking Agent的能力,即应用负责人可自主选择哪个应用接入,接入几个实例,倘若遇到问题仅通过重启即可完成快速卸载;这个能力在推广的前期发挥了巨大的作用;毕竟安全第一,信任也需逐步建立。\n随着应用的接入、使用,我们也逐渐遇到了一些问题,这里按照时间递增的顺序将问题和优化效果快速的介绍给大家,更多技术原理的内容计划在【SkyWalking(v8.5.0)调优系列】补充。开始之前有几个事项要说明:\n 下文中提到的数字仅代表我司的情况,标注的Segment数量是处理这个问题的那段时间的情况,并不是说达到这个数量才开始出现这个现象。 这些数值以及当时的现象,受到宿主机配置、Segment数据的大小、存储处理能力等多种因素的影响;请关注调整的过程和效果,不必把数字和现象对号入座哈。  6.1 启动耗时: 问题: 有同事反馈应用启动变慢,排查发现容器中多数应用启动的总耗时,在接入SkyWalking前是2秒,接入后变成了16秒以上,公司很多核心应用的实例数很多,这样的启动损耗对它们的发布影响太大。\n优化:  记录启动耗时并随着其他启动数据上报到服务端,方便查看对比。 优化Kafka Reporter的启动过程,将启动耗时减少了3-4秒。 优化类匹配和增强环节(重点)后,容器中的应用启动总耗时从之前16秒以上降低到了3秒内。 梳理Kafka 启动和上报的过程中,顺带调整了Agent端的数据上报到kafka的分区选择策略,将一个JVM实例中的数据全部发送到同一个的分区中,如此在L1层的聚合就完成了JVM实例级的Metric聚合,需注意调整Kafka分片数来保证负载均衡。  6.2 kafka积压-6亿segment/天 问题: SkyWalking OAP端消费慢,导致Kafka中Segment积压。未能达到能用的目标。\n优化: 从SkyWalking OAP端的监控指标中没有定位出哪个环节的问题,把服务端单集群拆为双集群,即把 Mixed角色的集群 ,修改为 Receiver 角色(接收和L1聚合)的集群 ,并加入 Aggregation角色(L2聚合)的集群,调整成了双集群模式,数据流传如下图所示:\n6.3 kafka积压-8亿segment/天 问题: SkyWalking OAP端消费慢,导致Kafka中Segment积压,监控指标能看出是在ES存储环节慢,未能达到能用的目标。\n优化:  优化segment保存到ES的批处理过程,调整BulkProcessor的线程数和批处理大小。 优化metrics保存到ES的批处理过程,调整批处理的时间间隔、线程数、批处理大小以及刷盘时间。  6.4 kafka积压-20亿segment/天 问题: Aggregation集群的实例持续Full GC,Receiver集群通过grpc 给Aggregation集群发送metric失败。未能达到能用的目标。\n优化:  增加ES节点、分片,效果不明显。 ES集群有压力,但无法精准定位出是什么数据的什么操作引发的。采用分治策略,尝试将数据拆分,从OAP服务端读写逻辑调整,将ES单集群拆分为 trace集群 和 metric集群;之后对比ES的监控指标明确看出是metric集群读写压力太大。  优化Receiver集群metric的L1聚合,完成1分钟的数据聚合后,再提交给Aggregation集群做L2聚合。 Aggregation集群metric的L2 聚合是基于db实现的,会有 空读-写-再读-累加-更新写 这样的逻辑,每次写都会有读,调整逻辑是:提升读的性能,优化缓存机制减少读的触发;调整间隔,避免触发累加和更新。 将metric批量写ES操作调整成BulkProcessor。 ES的metric集群 使用SSD存储,增加节点数和分片数。  这一次的持续优化具有里程碑式的意义,Kafka消费很快,OAP各机器的Full GC没了,ES的各方面指标也很稳定;接下来开始优化查询,提升易用性。\n6.5 trace查询慢-25亿segment/天 问题: Web端【追踪】页中的查询都很慢,仅保存了15天的数据,按照traceId查询耗时要20多秒,按照条件查询trace列表的耗时更糟糕;这给人的感受就是“一肚子墨水倒不出来”,未能达到好用的目标。\n优化: ES查询优化方面的信息挺多,但通过百度筛选出解决此问题的有效方案,就要看咱家爱犬的品类了;当时搜集整理了并尝试了N多优化条款,可惜没有跟好运偶遇,结论是颜值不可靠。言归正传,影响读写性能的基本要素有3个:读写频率,数据规模,硬件性能;trace的情况从这三个维度来套一套模板:\n   要素 trace的情况 备注     读写频率 宏观来看是写多读少的状况    数据规模 按照每天50亿个segment来算,半个月是750亿,1个月是1500亿。    硬件性能 普通硬盘速度一般     这个分析没有得出具有指导意义的结论,读写频率这里粒度太粗,用户的使用情况跟时间也有紧密的关系,情况大概是:\n 当天的数据是读多写多(当天不断有新数据写入,基于紧急响应的需求,问题出现时可能是近实时的排查处理)。 前一天的数据是读多写少(一般也会有问题隔天密集上报的情况,0点后会有前一天数据延迟到达的情况)。 再早的话无新数据写入,数据越早被读的概率也越小。  基于以上分析,增加时间维度并细化更多的参考因素后,分析模型变成了这样:\n   要素 当天 当天-1 当天-2 ~ 当天-N     写频率 多 少 无   读(查询)频率 多 多 少   读响应速度要求 快 快 慢点也行   数据规模 50亿 50亿 50亿* (N-2)   宿主机性能要求 高 高 次高   硬盘速度要求 高(SSD) 高(SSD) 次高(机械)   硬件成本 高 高 次高   期望成本 低 低 低    从上表可以看出,整体呈现出hot-warm数据架构的需求之势,近1-2天为hot数据,之前的为warm数据;恰好ES7提供了hot-warm架构支持,按照hot-warm改造后架构如下图所示:\n 恰逢公司ES中台调优版的ES发布,其内置的ZSTD压缩算法 空间压缩效果非常显著。 对 trace集群进行hot-warm架构调整,查询耗时从20多秒变成了2-3秒,效果是非常明显的。 从查询逻辑进一步调整,充分利用ES的数据分片、路由机制,把全量检索调整为精准检索,即降低检索时需要扫描的数据量,把2-3秒优化到毫秒。  这里要炫一个5毛特效,这套机制下,Segment数据即使是保留半年的,按照TraceId查询的耗时也是毫秒。\n至此完成了查询千亿级Trace数据只要毫秒级耗时的阶段性优化。\n6.6 仪表盘和拓扑查询慢 问题: Web端的【拓扑】页,在开始只有几十个应用的时候,虽然很慢,但还是能看到数据,随着应用增多后,【拓扑】页面数据请求一直是超时(配置的60s超时)的,精力有限,先通过功能降级把这个页面隐藏了;【仪表盘】的指标查询也非常的慢,未能达到好用的目标。\n优化: Web端的【仪表盘】页和【拓扑】页是对SkyWalking里metric数据的展现,metric数据同trace数据一样满足hot-warm的特征。\n metric集群采用hot-warm架构调整,之后仪表盘中的查询耗时也都减小为毫秒级。 【拓扑】页接口依然是超时(60s),对拓扑这里做了几个针对性的调整:  把内部的循环调用合并,压缩调用次数。 去除非必要的查询。 拆分隔离通用索引中的数据,避免互相干扰。 全量检索调整为精准检索,即降低检索时需要扫描的数据量。    至此完成了拓扑页数据查询毫秒级耗时的阶段性优化。\n6.7 小结 SkyWalking调优这个阶段,恰逢上海疫情封城,既要为生存抢菜,又要翻阅学习着各种ES原理、调优的文档资料,一行一行反复的品味思考SkyWalking相关的源码,尝试各种方案去优化它,梦中都在努力提升它的性能。疫情让很多人变得焦虑烦躁,但以我的感受来看在系统的性能压力下疫情不值一提。凡事贵在坚持,时间搞定了诸多困难,调优的效果是很显著的。\n可能在业务价值驱动的价值观中这些技术优化不产生直接业务价值,顶多是五毛特效,但从其他维度来看它价值显著:\n 对个人来说,技术有提升。 对团队来说,实战练兵提升战力,团队协作加深友情;特别感谢ES中台这段时间的鼎力支持! 对公司来说,易用性的提升将充分发挥SkyWalking的价值,在问题发生时,给到同事们切实、高效的帮助,使得问题可以被快速响应;须知战争拼的是保障。  这期间其实也是有考虑过其他的2个方案的:\n 使用降低采样率的兜底方案;但为了得到更准确的指标数据,以及后续其他的规划而坚持了全采样。 采用ClickHouse优化存储;因为公司有定制优化的ES版本,所以就继续在ES上做存储优化,刚好借此机会验证一下。后续【全链路结构化日志】的存储会使用ClickHouse。  这个章节将内容聚焦在落地推广时期技术层面的准备和调优,未描述团队协调、推广等方面的情况;因每个公司情况不同,所以并未提及;但其实对多数公司来说,有些项目的推广比技术本身可能难度更大,这个项目也遇到过一些困难,PM去推广是既靠能力又靠颜值, 以后有机会再与大家探讨。\n7、未来 H5、Native以及后端应用都在持续接入中,相应的SDK也在不断的迭代;目前正在基于已建立的链路通道,完善【全链路业务状态追踪】和【全链路结构化日志追踪】,旨在给运营、客服、运维、开发等服务在一线的同事们提供多视角一站式的观测平台,全方位提升系统服务质量、提高问题响应速度。\n","title":"SkyWalking on the way - 平安健康千亿级的全链路追踪系统的建设与实践","url":"/zh/2022-08-30-pingan-jiankang/"},{"content":"Observability essential when working with distributed systems. Built on 3 pillars of metrics, logging and tracing, having the right tools in place to quickly identify and determine the root cause of an issue in production is imperative. In this Kongcast interview, we explore the benefits of having observability and demo the use of Apache SkyWalking. We walk through the capabilities that SkyWalking offers out of the box and debug a common HTTP 500 error using the tool.\nAndrew Kew is interviewed by Viktor Gamov, a developer advocate at Kong Inc\nAndrew is a highly passionate technologist with over 16 valuable years experience in building server side and cloud applications. Having spent the majority of his time in the Financial Services domain, his meritocratic rise to CTO of an Algorithmic Trading firm allowed him to not only steer the business from a technology standpoint, but build robust and scalable trading algorithms. His mantra is \u0026ldquo;right first time\u0026rdquo;, thus ensuring the projects or clients he is involved in are left in a better place than they were before he arrived.\nHe is the founder of a boutique software consultancy in the United Kingdom, QuadCorps Ltd, working in the API and Integration Ecosystem space and is currently on a residency programme at Kong Inc as a senior field engineer and technical account manager working across many of their enterprise strategic accounts.\n  ","title":"[Video] Distributed tracing demo using Apache SkyWalking and Kong API Gateway","url":"/blog/2022-08-11-kongcast-20-distributed-tracing-using-skywalking-kong/"},{"content":"SkyWalking Rust 0.3.0 is released. Go to downloads page to find release tars.\nWhat\u0026rsquo;s Changed  Update README.md by @wu-sheng in https://github.com/apache/skywalking-rust/pull/24 Improve errors. by @jmjoy in https://github.com/apache/skywalking-rust/pull/25 Add tracer. by @jmjoy in https://github.com/apache/skywalking-rust/pull/26 Move e2e to workspace. by @jmjoy in https://github.com/apache/skywalking-rust/pull/27 Auto finalize context and span when dropped. by @jmjoy in https://github.com/apache/skywalking-rust/pull/28 Add context capture and continued methods. by @jmjoy in https://github.com/apache/skywalking-rust/pull/29 Bump to 0.3.0. by @jmjoy in https://github.com/apache/skywalking-rust/pull/30  ","title":"Release Apache SkyWalking Rust 0.3.0","url":"/events/release-apache-skywalking-rust-0-3-0/"},{"content":"SkyWalking NodeJS 0.5.1 is released. Go to downloads page to find release tars.\nSkyWalking NodeJS 0.5.1 is a patch release that fixed a vulnerability(CVE-2022-36127) in all previous versions \u0026lt;=0.5.0, we recommend all users who are using versions \u0026lt;=0.5.0 should upgrade to this version.\nThe vulnerability could cause NodeJS services that has this agent installed to be unavailable if the header includes an illegal SkyWalking header, such as\n OAP is unhealthy and the downstream service\u0026rsquo;s agent can\u0026rsquo;t establish the connection. Some sampling mechanism is activated in downstream agents.  ","title":"[CVE-2022-36127] Release Apache SkyWalking for NodeJS 0.5.1","url":"/events/release-apache-skywalking-nodejs-0-5-1/"},{"content":"SkyWalking Eyes 0.4.0 is released. Go to downloads page to find release tars.\n Reorganize GHA by header and dependency. (#123) Add rust cargo support for dep command. (#121) Support license expression in dep check. (#120) Prune npm packages before listing all dependencies (#119) Add support for multiple licenses in the header config section (#118) Add excludes to license resolve config (#117) maven: set group:artifact as dependency name and extend functions in summary template (#116) Stablize summary context to perform consistant output (#115) Add custom license urls for identification (#114) Lazy initialize GitHub client for comment (#111) Make license identifying threshold configurable (#110) Use Google\u0026rsquo;s licensecheck to identify licenses (#107) dep: short circuit if user declare dep license (#108)  ","title":"Release Apache SkyWalking Eyes 0.4.0","url":"/events/release-apache-skywalking-eyes-0-4-0/"},{"content":"SkyWalking NodeJS 0.5.0 is released. Go to downloads page to find release tars.\n Bump up grpc-node to 1.6.7 to fix CVE-2022-25878 (#85) Fix issue #9165 express router entry duplicated (#84) Fix skywalking s3 upload error #8824 (#82) Improved ignore path regex (#81) Upgrade data collect protocol (#78) Fix wrong instance properties (#77) Fix wrong command in release doc (#76)  ","title":"Release Apache SkyWalking for NodeJS 0.5.0","url":"/events/release-apache-skywalking-nodejs-0-5-0/"},{"content":"SkyWalking Infra E2E 1.2.0 is released. Go to downloads page to find release tars.\nFeatures  Expand kind file path with system environment. Support shutdown service during setup phase in compose mode. Expand kind file path with system environment. Support arbitrary os and arch. Support docker-compose v2 container naming. Support installing via go install and add install doc. Add retry when delete kind cluster. Upgrade to go1.18.  Bug Fixes  Fix the problem of parsing verify.retry.interval without setting value.  Documentation  Make trigger.times parameter doc more clear.  Issues and PR  All issues are here All and pull requests are here  ","title":"Release Apache SkyWalking Infra E2E 1.2.0","url":"/events/release-apache-skywalking-infra-e2e-1-2-0/"},{"content":"SkyWalking Python 0.8.0 is released. Go to downloads page to find release tars.\n  Feature:\n Update mySQL plugin to support two different parameter keys. (#186) Add a SW_AGENT_LOG_REPORTER_SAFE_MODE option to control the HTTP basic auth credential filter (#200)    Plugins:\n Add Psycopg(3.x) support (#168) Add MySQL support (#178) Add FastAPI support (#181) Drop support for flask 1.x due to dependency issue in Jinja2 and EOL (#195) Add Bottle support (#214)    Fixes:\n Spans now correctly reference finished parents (#161) Remove potential password leak from Aiohttp outgoing url (#175) Handle error when REMOTE_PORT is missing in Flask (#176) Fix sw-rabbitmq TypeError when there are no headers (#182) Fix agent bootstrap traceback not shown in sw-python CLI (#183) Fix local log stack depth overridden by agent log formatter (#192) Fix typo that cause user sitecustomize.py not loaded (#193) Fix instance property wrongly shown as UNKNOWN in OAP (#194) Fix multiple components inconsistently named on SkyWalking UI (#199) Fix SW_AGENT_LOGGING_LEVEL not properly set during startup (#196) Unify the http tag name with other agents (#208) Remove namespace to instance properties and add pid property (#205) Fix the properties are not set correctly (#198) Improved ignore path regex (#210) Fix sw_psycopg2 register_type() (#211) Fix psycopg2 register_type() second arg default (#212) Enhance Traceback depth (#206) Set spans whose http code \u0026gt; 400 to error (#187)    Docs:\n Add a FAQ doc on how to use with uwsgi (#188)    Others:\n Refactor current Python agent docs to serve on SkyWalking official website (#162) Refactor SkyWalking Python to use the CLI for CI instead of legacy setup (#165) Add support for Python 3.10 (#167) Move flake configs all together (#169) Introduce another set of flake8 extensions (#174) Add E2E test coverage for trace and logging (#199) Now Log reporter cause_exception_depth traceback limit defaults to 10 Enable faster CI by categorical parallelism (#170)    ","title":"Release Apache SkyWalking Python 0.8.0","url":"/events/release-apache-skywalking-python-0-8-0/"},{"content":"SkyWalking Satellite 1.0.1 is released. Go to downloads page to find release tars.\nFeatures Bug Fixes  Fix metadata messed up when transferring Log data.  Issues and PR  All issues are here All and pull requests are here  ","title":"Release Apache SkyWalking Satellite 1.0.1","url":"/events/release-apache-skwaylking-satellite-1-0-1/"},{"content":"Content Background Apache SkyWalking observes metrics, logs, traces, and events for services deployed into the service mesh. When troubleshooting, SkyWalking error analysis can be an invaluable tool helping to pinpoint where an error occurred. However, performance problems are more difficult: It’s often impossible to locate the root cause of performance problems with pre-existing observation data. To move beyond the status quo, dynamic debugging and troubleshooting are essential service performance tools. In this article, we\u0026rsquo;ll discuss how to use eBPF technology to improve the profiling feature in SkyWalking and analyze the performance impact in the service mesh.\nTrace Profiling in SkyWalking Since SkyWalking 7.0.0, Trace Profiling has helped developers find performance problems by periodically sampling the thread stack to let developers know which lines of code take more time. However, Trace Profiling is not suitable for the following scenarios:\n Thread Model: Trace Profiling is most useful for profiling code that executes in a single thread. It is less useful for middleware that relies heavily on async execution models. For example Goroutines in Go or Kotlin Coroutines. Language: Currently, Trace Profiling is only supported in Java and Python, since it’s not easy to obtain the thread stack in the runtimes of some languages such as Go and Node.js. Agent Binding: Trace Profiling requires Agent installation, which can be tricky depending on the language (e.g., PHP has to rely on its C kernel; Rust and C/C++ require manual instrumentation to make install). Trace Correlation: Since Trace Profiling is only associated with a single request it can be hard to determine which request is causing the problem. Short Lifecycle Services: Trace Profiling doesn\u0026rsquo;t support short-lived services for (at least) two reasons:  It\u0026rsquo;s hard to differentiate system performance from class code manipulation in the booting stage. Trace profiling is linked to an endpoint to identify performance impact, but there is no endpoint to match these short-lived services.    Fortunately, there are techniques that can go further than Trace Profiling in these situations.\nIntroduce eBPF We have found that eBPF — a technology that can run sandboxed programs in an operating system kernel and thus safely and efficiently extend the capabilities of the kernel without requiring kernel modifications or loading kernel modules — can help us fill gaps left by Trace Profiling. eBPF is a trending technology because it breaks the traditional barrier between user and kernel space. Programs can now inject bytecode that runs in the kernel, instead of having to recompile the kernel to customize it. This is naturally a good fit for observability.\nIn the figure below, we can see that when the system executes the execve syscalls, the eBPF program is triggered, and the current process runtime information is obtained by using function calls.\nUsing eBPF technology, we can expand the scope of Skywalking\u0026rsquo;s profiling capabilities:\n Global Performance Analysis: Before eBPF, data collection was limited to what agents can observe. Since eBPF programs run in the kernel, they can observe all threads. This is especially useful when you are not sure whether a performance problem is caused by a particular request. Data Content: eBPF can dump both user and kernel space thread stacks, so if a performance issue happens in kernel space, it’s easier to find. Agent Binding: All modern Linux kernels support eBPF, so there is no need to install anything. This means it is an orchestration-free vs an agent model. This reduces friction caused by built-in software which may not have the correct agents installed, such as Envoy in a Service Mesh. Sampling Type: Unlike Trace Profiling, eBPF is event-driven and, therefore, not constrained by interval polling. For example, eBPF can trigger events and collect more data depending on a transfer size threshold. This can allow the system to triage and prioritize data collection under extreme load.  eBPF Limitations While eBPF offers significant advantages for hunting performance bottlenecks, no technology is perfect. eBPF has a number of limitations described below. Fortunately, since SkyWalking does not require eBPF, the impact is limited.\n Linux Version Requirement: eBPF programs require a Linux kernel version above 4.4, with later kernel versions offering more data to be collected. The BCC has documented the features supported by different Linux kernel versions, with the differences between versions usually being what data can be collected with eBPF. Privileges Required: All processes that intend to load eBPF programs into the Linux kernel must be running in privileged mode. As such, bugs or other issues in such code may have a big impact. Weak Support for Dynamic Language: eBPF has weak support for JIT-based dynamic languages, such as Java. It also depends on what data you want to collect. For Profiling, eBPF does not support parsing the symbols of the program, which is why most eBPF-based profiling technologies only support static languages like C, C++, Go, and Rust. However, symbol mapping can sometimes be solved through tools provided by the language. For example, in Java, perf-map-agent can be used to generate the symbol mapping. However, dynamic languages don\u0026rsquo;t support the attach (uprobe) functionality that would allow us to trace execution events through symbols.  Introducing SkyWalking Rover SkyWalking Rover introduces the eBPF profiling feature into the SkyWalking ecosystem. The figure below shows the overall architecture of SkyWalking Rover. SkyWalking Rover is currently supported in Kubernetes environments and must be deployed inside a Kubernetes cluster. After establishing a connection with the SkyWalking backend server, it saves information about the processes on the current machine to SkyWalking. When the user creates an eBPF profiling task via the user interface, SkyWalking Rover receives the task and executes it in the relevant C, C++, Golang, and Rust language-based programs.\nOther than an eBPF-capable kernel, there are no additional prerequisites for deploying SkyWalking Rover.\nCPU Profiling with Rover CPU profiling is the most intuitive way to show service performance. Inspired by Brendan Gregg‘s blog post, we\u0026rsquo;ve divided CPU profiling into two types that we have implemented in Rover:\n On-CPU Profiling: Where threads are spending time running on-CPU. Off-CPU Profiling: Where time is spent waiting while blocked on I/O, locks, timers, paging/swapping, etc.  Profiling Envoy with eBPF Envoy is a popular proxy, used as the data plane by the Istio service mesh. In a Kubernetes cluster, Istio injects Envoy into each service’s pod as a sidecar where it transparently intercepts and processes incoming and outgoing traffic. As the data plane, any performance issues in Envoy can affect all service traffic in the mesh. In this scenario, it’s more powerful to use eBPF profiling to analyze issues in production caused by service mesh configuration.\nDemo Environment If you want to see this scenario in action, we\u0026rsquo;ve built a demo environment where we deploy an Nginx service for stress testing. Traffic is intercepted by Envoy and forwarded to Nginx. The commands to install the whole environment can be accessed through GitHub.\nOn-CPU Profiling On-CPU profiling is suitable for analyzing thread stacks when service CPU usage is high. If the stack is dumped more times, it means that the thread stack occupies more CPU resources.\nWhen installing Istio using the demo configuration profile, we found there are two places where we can optimize performance:\n Zipkin Tracing: Different Zipkin sampling percentages have a direct impact on QPS. Access Log Format: Reducing the fields of the Envoy access log can improve QPS.  Zipkin Tracing Zipkin with 100% sampling In the default demo configuration profile, Envoy is using 100% sampling as default tracing policy. How does that impact the performance?\nAs shown in the figure below, using the on-CPU profiling, we found that it takes about 16% of the CPU overhead. At a fixed consumption of 2 CPUs, its QPS can reach 5.7K.\nDisable Zipkin tracing At this point, we found that if Zipkin is not necessary, the sampling percentage can be reduced or we can even disable tracing. Based on the Istio documentation, we can disable tracing when installing the service mesh using the following command:\nistioctl install -y --set profile=demo \\  --set \u0026#39;meshConfig.enableTracing=false\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.tracing.sampling=0.0\u0026#39; After disabling tracing, we performed on-CPU profiling again. According to the figure below, we found that Zipkin has disappeared from the flame graph. With the same 2 CPU consumption as in the previous example, the QPS reached 9K, which is an almost 60% increase. Tracing with Throughput With the same CPU usage, we\u0026rsquo;ve discovered that Envoy performance greatly improves when the tracing feature is disabled. Of course, this requires us to make trade-offs between the number of samples Zipkin collects and the desired performance of Envoy (QPS).\nThe table below illustrates how different Zipkin sampling percentages under the same CPU usage affect QPS.\n   Zipkin sampling % QPS CPUs Note     100% (default) 5.7K 2 16% used by Zipkin   1% 8.1K 2 0.3% used by Zipkin   disabled 9.2K 2 0% used by Zipkin    Access Log Format Default Log Format In the default demo configuration profile, the default Access Log format contains a lot of data. The flame graph below shows various functions involved in parsing the data such as request headers, response headers, and streaming the body.\nSimplifying Access Log Format Typically, we don’t need all the information in the access log, so we can often simplify it to get what we need. The following command simplifies the access log format to only display basic information:\nistioctl install -y --set profile=demo \\  --set meshConfig.accessLogFormat=\u0026#34;[%START_TIME%] \\\u0026#34;%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\\\u0026#34; %RESPONSE_CODE%\\n\u0026#34; After simplifying the access log format, we found that the QPS increased from 5.7K to 5.9K. When executing the on-CPU profiling again, the CPU usage of log formatting dropped from 2.4% to 0.7%.\nSimplifying the log format helped us to improve the performance.\nOff-CPU Profiling Off-CPU profiling is suitable for performance issues that are not caused by high CPU usage. For example, when there are too many threads in one service, using off-CPU profiling could reveal which threads spend more time context switching.\nWe provide data aggregation in two dimensions:\n Switch count: The number of times a thread switches context. When the thread returns to the CPU, it completes one context switch. A thread stack with a higher switch count spends more time context switching. Switch duration: The time it takes a thread to switch the context. A thread stack with a higher switch duration spends more time off-CPU.  Write Access Log Enable Write Using the same environment and settings as before in the on-CPU test, we performed off-CPU profiling. As shown below, we found that access log writes accounted for about 28% of the total context switches. The \u0026ldquo;__write\u0026rdquo; shown below also indicates that this method is the Linux kernel method.\nDisable Write SkyWalking implements Envoy\u0026rsquo;s Access Log Service (ALS) feature which allows us to send access logs to the SkyWalking Observability Analysis Platform (OAP) using the gRPC protocol. Even by disabling the access logging, we can still use ALS to capture/aggregate the logs. We\u0026rsquo;ve disabled writing to the access log using the following command:\nistioctl install -y --set profile=demo --set meshConfig.accessLogFile=\u0026#34;\u0026#34; After disabling the Access Log feature, we performed the off-CPU profiling. File writing entries have disappeared as shown in the figure below. Envoy throughput also increased from 5.7K to 5.9K.\nConclusion In this article, we\u0026rsquo;ve examined the insights Apache Skywalking\u0026rsquo;s Trace Profiling can give us and how much more can be achieved with eBPF profiling. All of these features are implemented in skywalking-rover. In addition to on- and off-CPU profiling, you will also find the following features:\n Continuous profiling, helps you automatically profile without manual intervention. For example, when Rover detects that the CPU exceeds a configurable threshold, it automatically executes the on-CPU profiling task. More profiling types to enrich usage scenarios, such as network, and memory profiling.  ","title":"Pinpoint Service Mesh Critical Performance Impact by using eBPF","url":"/blog/2022-07-05-pinpoint-service-mesh-critical-performance-impact-by-using-ebpf/"},{"content":"SkyWalking Rust 0.2.0 is released. Go to downloads page to find release tars.\nWhat\u0026rsquo;s Changed  add a description to compile in README.md by @Shikugawa in https://github.com/apache/skywalking-rust/pull/16 Update NOTICE to 2022 by @wu-sheng in https://github.com/apache/skywalking-rust/pull/17 fix ignore /e2e/target folder by @tisonkun in https://github.com/apache/skywalking-rust/pull/18 Remove Cargo.lock, update dependencies, update submodule, disable build grpc server api. by @jmjoy in https://github.com/apache/skywalking-rust/pull/19 Enhance Trace Context machenism. by @jmjoy in https://github.com/apache/skywalking-rust/pull/20 chore(typo): fix typo in context/propagation/context.rs by @CherishCai in https://github.com/apache/skywalking-rust/pull/21 Feature(tonic-build): set tonic-build.build_server(false), do not build Server code. by @CherishCai in https://github.com/apache/skywalking-rust/pull/22 Rename crate name skywalking_rust to skywalking? by @jmjoy in https://github.com/apache/skywalking-rust/pull/23  ","title":"Release Apache SkyWalking Rust 0.2.0","url":"/events/release-apache-skywalking-rust-0-2-0/"},{"content":"B站视频地址\n","title":"阿里云 - 可观测技术峰会 2022 - More than Tracing Logging Metrics","url":"/zh/2022-06-23-more-than-tracing-logging-metrics/"},{"content":"SkyWalking Java Agent 8.11.0 is released. Go to downloads page to find release tars. Changes by Version\n8.11.0  Fix cluster and namespace value duplicated(namespace value) in properties report. Add layer field to event when reporting. Remove redundant shade.package property. Add servicecomb-2.x plugin and Testcase. Fix NPE in gateway plugin when the timer triggers webflux webclient call. Add an optional plugin, trace-sampler-cpu-policy-plugin, which could disable trace collecting in high CPU load. Change the dateformat of logs to yyyy-MM-dd HH:mm:ss.SSS(was yyyy-MM-dd HH:mm:ss:SSS). Fix NPE in elasticsearch plugin. Grpc plugin support trace client async generic call(without grpc stubs), support Method type: UNARY、SERVER_STREAMING. Enhance Apache ShenYu (incubating) plugin: support trace grpc,sofarpc,motan,tars rpc proxy. Add primary endpoint name to log events. Fix Span not finished in gateway plugin when the gateway request timeout. Support -Dlog4j2.contextSelector=org.apache.logging.log4j.core.async.AsyncLoggerContextSelector in gRPC log report. Fix tcnative libraries relocation for aarch64. Add plugin.jdbc.trace_sql_parameters into Configuration Discovery Service. Fix argument type name of Array in postgresql-8.x-plugin from java.lang.String[] to [Ljava.lang.String; Add type name checking in ArgumentTypeNameMatch and ReturnTypeNameMatch Highlight ArgumentTypeNameMatch and ReturnTypeNameMatch type naming rule in docs/en/setup/service-agent/java-agent/Java-Plugin-Development-Guide.md Fix FileWriter scheduled task NPE Optimize gRPC Log reporter to set service name for the first element in the streaming.(No change for Kafka reporter)  All issues and pull requests are here\n","title":"Release Apache SkyWalking Java Agent 8.11.0","url":"/events/release-apache-skywalking-java-agent-8-11-0/"},{"content":"SkyWalking Rover 0.2.0 is released. Go to downloads page to find release tars.\nFeatures  Support OFF_CPU Profiling. Introduce the BTFHub module. Update to using frequency mode to ON_CPU Profiling. Add logs in the profiling module logical.  Bug Fixes  Fix docker based process could not be detected.  Issues and PR  All issues are here All and pull requests are here  ","title":"Release Apache SkyWalking Rover 0.2.0","url":"/events/release-apache-skwaylking-rover-0-2-0/"},{"content":"SkyWalking 9.1.0 is released. Go to downloads page to find release tars.\n eBPF agent(skywalking rover) is integrated in the first time  BanyanDB(skywalking native database) is integrated and passed MVP phase. On-demand logs are provided first time in skywalking for all mesh services and k8s deployment as a zero cost log solution  Zipkin alternative is being official, and Zipkin\u0026rsquo;s HTTP APIs are supported as well as lens UI.  Changes by Version Project  [IMPORTANT] Remove InfluxDB 1.x and Apache IoTDB 0.X as storage options, check details at here. Remove converter-moshi 2.5.0, influx-java 2.15, iotdb java 0.12.5, thrift 0.14.1, moshi 1.5.0, msgpack 0.8.16 dependencies. Remove InfluxDB and IoTDB relative codes and E2E tests. Upgrade OAP dependencies zipkin to 2.23.16, H2 to 2.1.212, Apache Freemarker to 2.3.31, gRPC-java 1.46.0, netty to 4.1.76. Upgrade Webapp dependencies, spring-cloud-dependencies to 2021.0.2, logback-classic to 1.2.11 [IMPORTANT] Add BanyanDB storage implementation. Notice BanyanDB is currently under active development and SHOULD NOT be used in production cluster.  OAP Server  Add component definition(ID=127) for Apache ShenYu (incubating). Fix Zipkin receiver: Decode spans error, missing Layer for V9 and wrong time bucket for generate Service and Endpoint. [Refactor] Move SQLDatabase(H2/MySQL/PostgreSQL), ElasticSearch and BanyanDB specific configurations out of column. Support BanyanDB global index for entities. Log and Segment record entities declare this new feature. Remove unnecessary analyzer settings in columns of templates. Many were added due to analyzer\u0026rsquo;s default value. Simplify the Kafka Fetch configuration in cluster mode. [Breaking Change] Update the eBPF Profiling task to the service level, please delete index/table: ebpf_profiling_task, process_traffic. Fix event can\u0026rsquo;t split service ID into 2 parts. Fix OAP Self-Observability metric GC Time calculation. Set SW_QUERY_MAX_QUERY_COMPLEXITY default value to 1000 Webapp module (for UI) enabled compression. [Breaking Change] Add layer field to event, report an event without layer is not allowed. Fix ES flush thread stops when flush schedule task throws exception, such as ElasticSearch flush failed. Fix ES BulkProcessor in BatchProcessEsDAO was initialized multiple times and created multiple ES flush schedule tasks. HTTPServer support the handler register with allowed HTTP methods. [Critical] Revert Enhance DataCarrier#MultipleChannelsConsumer to add priority to avoid consuming issues. Fix the problem that some configurations (such as group.id) did not take effect due to the override order when using the kafkaConsumerConfig property to extend the configuration in Kafka Fetcher. Remove build time from the OAP version. Add data-generator module to run OAP in testing mode, generating mock data for testing. Support receive Kubernetes processes from gRPC protocol. Fix the problem that es index(TimeSeriesTable, eg. endpoint_traffic, alarm_record) didn\u0026rsquo;t create even after rerun with init-mode. This problem caused the OAP server to fail to start when the OAP server was down for more than a day. Support autocomplete tags in traces query. [Breaking Change] Replace all configurations **_JETTY_** to **_REST_**. Add the support eBPF profiling field into the process entity. E2E: fix log test miss verify LAL and metrics. Enhance Converter mechanism in kernel level to make BanyanDB native feature more effective. Add TermsAggregation properties collect_mode and execution_hint. Add \u0026ldquo;execution_hint\u0026rdquo;: \u0026ldquo;map\u0026rdquo;, \u0026ldquo;collect_mode\u0026rdquo;: \u0026ldquo;breadth_first\u0026rdquo; for aggregation and topology query to improve 5-10x performance. Clean up scroll contexts after used. Support autocomplete tags in logs query. Enhance Deprecated MetricQuery(v1) getValues querying to asynchronous concurrency query Fix the pod match error when the service has multiple selector in kubernetes environment. VM monitoring adapts the 0.50.0 of the opentelemetry-collector. Add Envoy internal cost metrics. Remove Layer concept from ServiceInstance. Remove unnecessary onCompleted on gRPC onError callback. Remove Layer concept form Process. Update to list all eBPF profiling schedulers without duration. Storage(ElasticSearch): add search options to tolerate inexisting indices. Fix the problem that MQ has the wrong Layer type. Fix NoneStream model has wrong downsampling(was Second, should be Minute). SQL Database: provide @SQLDatabase.AdditionalEntity to support create additional tables from a model. [Breaking Change] SQL Database: remove SQL Database config maxSizeOfArrayColumn and numOfSearchableValuesPerTag. [Breaking Change] SQL Database: move Tags list from Segment,Logs,Alarms to their additional table. [Breaking Change] Remove total field in Trace, Log, Event, Browser log, and alarm list query. Support OFF_CPU eBPF Profiling. Fix SumAggregationBuilder#build should use the SumAggregation rather than MaxAggregation. Add TiDB, OpenSearch, Postgres storage optional to Trace and eBPF Profiling E2E testing. Add OFF CPU eBPF Profiling E2E Testing. Fix searchableTag as rpc.status_code and http.status_code. status_code had been removed. Fix scroll query failure exception. Add profileDataQueryBatchSize config in Elasticsearch Storage. Add APIs to query Pod log on demand. Remove OAL for events. Simplify the format index name logical in ES storage. Add instance properties extractor in MAL. Support Zipkin traces collect and zipkin traces query API. [Breaking Change] Zipkin receiver mechanism changes and traces do not stream into OAP Segment anymore.  UI  General service instance: move Thread Pool from JVM to Overview, fix JVM GC Count calculation. Add Apache ShenYu (incubating) component LOGO. Show more metrics on service/instance/endpoint list on the dashboards. Support average values of metrics on the service/list/endpoint table widgets, with pop-up linear graph. Fix viewLogs button query no data. Fix UTC when page loads. Implement the eBPF profile widget on dashboard. Optimize the trace widget. Avoid invalid query for topology metrics. Add the alarm and log tag tips. Fix spans details and task logs. Verify query params to avoid invalid queries. Mobile terminal adaptation. Fix: set dropdown for the Tab widget, init instance/endpoint relation selectors, update sankey graph. Add eBPF Profiling widget into General service, Service Mesh and Kubernetes tabs. Fix jump to endpoint-relation dashboard template. Fix set graph options. Remove the Layer filed from the Instance and Process. Fix date time picker display when set hour to 0. Implement tags auto-complete for Trace and Log. Support multiple trees for the flame graph. Fix the page doesn\u0026rsquo;t need to be re-rendered when the url changes. Remove unexpected data for exporting dashboards. Fix duration time. Remove the total field from query conditions. Fix minDuration and maxDuration for the trace filter. Add Log configuration for the browser templates. Fix query conditions for the browser logs. Add Spanish Translation. Visualize the OFF CPU eBPF profiling. Add Spanish language to UI. Sort spans with startTime or spanId in a segment. Visualize a on-demand log widget. Fix activate the correct tab index after renaming a Tabs name. FaaS dashboard support on-demand log (OpenFunction/functions-framework-go version \u0026gt; 0.3.0).  Documentation  Add eBPF agent into probe introduction.  All issues and pull requests are here\n","title":"Release Apache SkyWalking APM 9.1.0","url":"/events/release-apache-skywalking-apm-9.1.0/"},{"content":"SkyWalking BanyanDB 0.1.0 is released. Go to downloads page to find release tars.\nFeatures  BanyanD is the server of BanyanDB  TSDB module. It provides the primary time series database with a key-value data module. Stream module. It implements the stream data model\u0026rsquo;s writing. Measure module. It implements the measure data model\u0026rsquo;s writing. Metadata module. It implements resource registering and property CRUD. Query module. It handles the querying requests of stream and measure. Liaison module. It\u0026rsquo;s the gateway to other modules and provides access endpoints to clients.   gRPC based APIs Document  API reference Installation instrument Basic concepts   Testing  UT E2E with Java Client and OAP    ","title":"Release Apache SkyWalking BanyanDB 0.1.0","url":"/events/release-apache-skywalking-banyandb-0-1-0/"},{"content":"SkyWalking BanyanDB 0.1.0 is released. Go to downloads page to find release tars.\nFeatures  Support Measure, Stream and Property Query and Write APIs Support Metadata Management APIs for Measure, Stream, IndexRule and IndexRuleBinding  Chores  Set up GitHub actions to check code styles, licenses, and tests.  ","title":"Release Apache SkyWalking BanyanDB Java Client 0.1.0","url":"/events/release-apache-skywalking-banyandb-java-client-0-1-0/"},{"content":"SkyWalking Rover 0.1.0 is released. Go to downloads page to find release tars.\nFeatures  Support detect processes in scanner or kubernetes mode. Support profiling C, C++, Golang, and Rust service.  Bug Fixes Issues and PR  All issues are here All and pull requests are here  ","title":"Release Apache SkyWalking Rover 0.1.0","url":"/events/release-apache-skwaylking-rover-0-1-0/"},{"content":"SkyWalking Satellite 1.0.0 is released. Go to downloads page to find release tars.\nFeatures  Add the compat protocol receiver for the old version of agents. Support transmit the native eBPF Process and Profiling protocol. Change the name of plugin that is not well-named.  Bug Fixes  Fix Metadata lost in the Native Meter protocol.  Issues and PR  All issues are here All and pull requests are here  ","title":"Release Apache SkyWalking Satellite 1.0.0","url":"/events/release-apache-skwaylking-satellite-1-0-0/"},{"content":"SkyWalking Eyes 0.3.0 is released. Go to downloads page to find release tars.\n  Dependency License\n Fix license check in go library testify (#93)    License Header\n fix command supports more languages:  Add comment style for cmake language (#86) Add comment style for hcl (#89) Add mpl-2.0 header template (#87) Support fix license header for tcl files (#102) Add python docstring comment style (#100) Add comment style for makefile \u0026amp; editorconfig (#90)   Support config license header comment style (#97) Trim leading and trailing newlines before rewrite license header cotent (#94) Replace already existing license header based on pattern (#98) [docs] add the usage for config the license header comment style (#99)    Project\n Obtain default github token in github actions (#82) Add tests for bare spdx license header content (#92) Add github action step summary for better experience (#104) Adds an option to the action to run in fix mode (#84) Provide --summary flag to generate the license summary file (#103) Add .exe suffix to windows binary (#101) Fix wrong file path and exclude binary files in src release (#81) Use t.tempdir to create temporary test directory (#95) Config: fix incorrect log message (#91) [docs] correct spelling mistakes (#96)    ","title":"Release Apache SkyWalking Eyes 0.3.0","url":"/events/release-apache-skywalking-eyes-0-3-0/"},{"content":"目录  SkyWalking和ShenYu介绍 ApacheShenYu插件实现原理 给gRPC插件增加泛化调用追踪并保持兼容 ShenYu网关可观测性实践 总结  1.SkyWalking和ShenYu介绍 1.1 SkyWalking SkyWalking是一个针对微服务、分布式系统、云原生的应用性能监控(APM)和可观测性分析平台(OAP), 拥有强大的功能,提供了多维度应用性能分析手段,包含分布式拓扑图、应用性能指标、分布式链路追踪、日志关联分析和告警。同时还拥有非常丰富的生态。广泛应用于各个公司和开源项目。\n1.2 Apache ShenYu (incubating) Apache ShenYu (incubating)是一个高性能,多协议,易扩展,响应式的API网关。 兼容各种主流框架体系,支持热插拔,用户可以定制化开发,满足用户各种场景的现状和未来需求,经历过大规模场景的锤炼。 支持丰富的协议:Http、Spring Cloud、gRPC、Dubbo、SOFARPC、Motan、Tars等等。\n2.ApacheShenYu插件实现原理 ShenYu的异步和以往接触的异步有一点不一样,是一种全链路异步,每一个插件的执行都是异步的,并且线程切换并不是单一固定的情况(和各个插件实现有关)。 网关会发起各种协议类型的服务调用,现有的SkyWalking插件发起服务调用的时候会创建ExitSpan(同步或异步). 网关接收到请求会创建异步的EntrySpan。 异步的EntrySpan需要和同步或异步的ExitSpan串联起来,否则链路会断。 串联方案有2种:\n 快照传递: 将创建EntrySpan之后的快照通过某种方式传递到创建ExitSpan的线程中。\n目前这种方式应用在异步的WebClient插件中,该插件能接收异步快照。ShenYu代理Http服务或SpringCloud服务便是通过快照传递实现span串联。 LocalSpan中转: 其它RPC类插件不像异步WebClient那样可以接收快照实现串联。尽管你可以改动其它RPC插件让其接收快照实现串联,但不推荐也没必要, 因为可以通过在创建ExitSpan的线程中,创建一个LocalSpan就可以实现和ExitSpan串联,然后将异步的EntrySpan和LocalSpan通过快照传递的方式串联。这样实现完全可以不改动原先插件的代码。  span连接如下图所示:\n也许你会问是否可以在一个通用的插件里面创建LocalSpan,而不是ShenYu RPC插件分别创建一个? 答案是不行,因为需要保证LocalSpan和ExitSpan在同一个线程,而ShenYu是全链路异步. 在实现上创建LocalSpan的代码是复用的。\n3. 给gRPC插件增加泛化调用追踪并保持兼容 现有的SkyWalking gRPC插件只支持通过存根的方式发起的调用。而对于网关而言并没有proto文件,网关采取的是泛化调用(不通过存根),所以追踪rpc请求,你会发现链路会在网关节点断掉。 在这种情况下,需要让gRPC插件支持泛化调用,而同时需要保持兼容,不影响原先的追踪方式。实现上通过判断请求参数是否是动态消息(DynamicMessage),如果不是则走原先通过存根的追踪逻辑, 如果是则走泛化调用追踪逻辑。另外的兼容则是在gRPC新旧版本的差异,以及获取服务端IP各种情况的兼容,感兴趣的可以看看源码。\n4. ShenYu网关可观测性实践 上面讲解了SkyWalking ShenYu插件的实现原理,下面部署应用看下效果。SkyWalking功能强大,除了了链路追踪需要开发插件外,其它功能强大功能开箱即用。 这里只描述链路追踪和应用性能剖析部分,如果想体验SkyWalking功能的强大,请参考SkyWalking官方文档。\n版本说明:\n skywalking-java: 8.11.0-SNAPSHOT源码构建。说明:shenyu插件会在8.11.0版本发布,可能会在5月或6月初步发布它。Java代理正处于常规发布阶段。 skywalking: 9.0.0 V9 版本  用法说明:\nSkyWalking的设计非常易用,配置和激活插件请参考官方文档。\n SkyWalking Documentation SkyWalking Java Agent Documentation  4.1 向网关发起请求 通过postman客户端或者其它方式向网关发起各种服务请求\n4.2 请求拓扑图  4.3 请求链路(以gRPC为例) 正常链路: 异常链路: 点击链路节点变可以看到对应的节点信息和异常信息\n服务提供者span 网关请求span 4.4 服务指标监控 服务指标监控 4.5 网关后台指标监控 数据库监控: 线程池和连接池监控 4.6 JVM监控 4.7 接口分析 4.8 异常日志和异常链路分析 日志配置见官方文档\n日志监控 异常日志对应的分布式链路追踪详情 5. 总结 SkyWalking在可观测性方面对指标、链路追踪、日志有着非常全面的支持,功能强大,简单易用,专为大型分布式系统、微服务、云原生、容器架构而设计,拥有丰富的生态。 使用SkyWalking为Apache ShenYu (incubating)提供强大的可观测性支持,让ShenYu如虎添翼。最后,如果你对高性能响应式网关感兴趣,可以关注 Apache ShenYu (incubating) 。 同时感谢SkyWalking这么优秀的开源软件对行业所作的贡献。\n","title":"Apache ShenYu (incubating)插件实现原理和可观测性实践","url":"/zh/2022-05-08-apache-shenyuincubating-integrated-skywalking-practice-observability/"},{"content":"Content  Introduction of SkyWalking and ShenYu Apache ShenYu plugin implementation principle Adding generalized call tracking to the gRPC plugin and keeping it compatible ShenYu Gateway Observability Practice Summary  1. Introduction of SkyWalking and ShenYu 1.1 SkyWalking SkyWalking is an Application Performance Monitoring (APM) and Observability Analysis Platform (OAP) for microservices, distributed systems, and cloud natives, Has powerful features that provide a multi-dimensional means of application performance analysis, including distributed topology diagrams, application performance metrics, distributed link tracing, log correlation analysis and alerts. Also has a very rich ecology. Widely used in various companies and open source projects.\n1.2 Apache ShenYu (incubating) Apache ShenYu (incubating) High-performance,multi-protocol,extensible,responsive API Gateway. Compatible with a variety of mainstream framework systems, support hot plug, users can customize the development, meet the current situation and future needs of users in a variety of scenarios, experienced the temper of large-scale scenes. Rich protocol support: Http, Spring Cloud, gRPC, Dubbo, SOFARPC, Motan, Tars, etc.\n2. Apache ShenYu plugin implementation principle ShenYu\u0026rsquo;s asynchrony is a little different from previous exposure to asynchrony, it is a full-link asynchrony, the execution of each plug-in is asynchronous, and thread switching is not a single fixed situation (and the individual plug-in implementation is related). The gateway initiates service calls of various protocol types, and the existing SkyWalking plugins create ExitSpan (synchronous or asynchronous) when they initiate service calls. The gateway receives the request and creates an asynchronous EntrySpan. The asynchronous EntrySpan needs to be concatenated with the synchronous or asynchronous ExitSpan, otherwise the link will be broken.\nThere are 2 types of tandem solutions:\n Snapshot Delivery:\nPass the snapshot after creating the EntrySpan to the thread that created the ExitSpan in some way.\nCurrently this approach is used in the asynchronous WebClient plugin, which can receive asynchronous snapshots. shenYu proxy Http service or SpringCloud service is to achieve span concatenation through snapshot passing. LocalSpan transit:\nOther RPC class plugins do not receive snapshots for concatenation like Asynchronous WebClient. Although you can modify other RPC plugins to receive snapshots for concatenation, it is not recommended or necessary to do so. This can be achieved by creating a LocalSpan in the thread where the ExitSpan is created, and then connecting the asynchronous EntrySpan and LocalSpan by snapshot passing. This can be done without changing the original plugin code.  The span connection is shown below:\nYou may ask if it is possible to create LocalSpan inside a generic plugin, instead of creating one separately for ShenYu RPC plugin? The answer is no, because you need to ensure that LocalSpan and ExitSpan are in the same thread, and ShenYu is fully linked asynchronously. The code to create LocalSpan is reused in the implementation.\n3. Adding generalized call tracking to the gRPC plugin and keeping it compatible The existing SkyWalking gRPC plugin only supports calls initiated by way of stubs. For the gateway there is no proto file, the gateway takes generalized calls (not through stubs), so tracing RPC requests, you will find that the link will break at the gateway node. In this case, it is necessary to make the gRPC plugin support generalized calls, while at the same time needing to remain compatible and not affect the original tracing method. This is achieved by determining whether the request parameter is a DynamicMessage, and if it is not, then the original tracing logic through the stub is used. If not, then the original tracing logic via stubs is used, and if not, then the generalized call tracing logic is used. The other compatibility is the difference between the old and new versions of gRPC, as well as the compatibility of various cases of obtaining server-side IP, for those interested in the source code.\n4. ShenYu Gateway Observability Practice The above explains the principle of SkyWalking ShenYu plug-in implementation, the following deployment application to see the effect. SkyWalking powerful, in addition to the link tracking requires the development of plug-ins, other powerful features out of the box. Here only describe the link tracking and application performance analysis part, if you want to experience the power of SkyWalking features, please refer to the SkyWalking official documentation.\nVersion description:\n skywalking-java: 8.11.0-SNAPSHOT source code build. Note: The shenyu plugin will be released in version 8.11.0, and will probably release it initially in May or June. the Java agent is in the regular release phase. skywalking: 9.0.0 V9 version  Usage instructions:\nSkyWalking is designed to be very easy to use. Please refer to the official documentation for configuring and activating the shenyu plugin.\n SkyWalking Documentation SkyWalking Java Agent Documentation  4.1 Sending requests to the gateway Initiate various service requests to the gateway via the postman client or other means.\n4.2 Request Topology Diagram   4.3 Request Trace (in the case of gRPC) Normal Trace: Abnormal Trace: Click on the link node to see the corresponding node information and exception information\nService Provider Span Gateway request span 4.4 Service Metrics Monitoring 4.5 Gateway background metrics monitoring Database Monitoring: Thread pool and connection pool monitoring: 4.6 JVM Monitoring 4.7 Endpoint Analysis 4.8 Exception log and exception link analysis See official documentation for log configuration\nLog monitoring Distributed link trace details corresponding to exception logs 5. Summary SkyWalking has very comprehensive support for metrics, link tracing, and logging in observability, and is powerful, easy to use, and designed for large distributed systems, microservices, cloud-native, container architectures, and has a rich ecosystem. Using SkyWalking to provide powerful observability support for Apache ShenYu (incubating) gives ShenYu a boost. Finally, if you are interested in high-performance responsive gateways, you can follow Apache ShenYu (incubating). Also, thanks to SkyWalking such an excellent open source software to the industry contributions.\n","title":"Apache ShenYu(incubating) plugin implementation principles and observability practices","url":"/blog/2022-05-08-apache-shenyuincubating-integrated-skywalking-practice-observability/"},{"content":"SkyWalking Kubernetes Event Exporter 1.0.0 is released. Go to downloads page to find release tars.\n Add Apache SkyWalking exporter to export events into SkyWalking OAP. Add console exporter for debugging purpose.  ","title":"Release Apache SkyWalking Kubernetes Event Exporter 1.0.0","url":"/events/release-apache-skywalking-kubernetes-event-exporter-1.0.0/"},{"content":"content:  Introduction Features Install SWCK Deploy a demo application Verify the injector Concluding remarks  1. Introduction 1.1 What\u0026rsquo;s SWCK? SWCK is a platform for the SkyWalking user, provisions, upgrades, maintains SkyWalking relevant components, and makes them work natively on Kubernetes.\nIn fact, SWCK is an operator developed based on kubebuilder, providing users with Custom Resources ( CR ) and controllers for managing resources ( Controller ), all CustomResourceDefinitions(CRDs)are as follows:\n JavaAgent OAP UI Storage Satellite Fetcher  1.2 What\u0026rsquo;s the java agent injector? For a java application, users need to inject the java agent into the application to get metadata and send it to the SkyWalking backend. To make users use the java agent more natively, we propose the java agent injector to inject the java agent sidecar into a pod. The java agent injector is actually a Kubernetes Mutation Webhook Controller. The controller intercepts pod events and applies mutations to the pod if annotations exist within the request.\n2. Features   Transparent. User’s applications generally run in normal containers while the java agent runs in the init container, and both belong to the same pod. Each container in the pod mounts a shared memory volume that provides a storage path for the java agent. When the pod starts, the java agent in the init container will run before the application container, and the injector will store the java agent file in the shared memory volume. When the application container starts, the injector injects the agent file into the application by setting the JVM parameter. Users can inject the java agent in this way without rebuilding the container image containing the java agent.\n  Configurability. The injector provides two ways to configure the java agent: global configuration and custom configuration. The default global configuration is stored in the configmap, you can update it as your own global configuration, such as backend_service. In addition, you can also set custom configuration for some applications via annotation, such as “service_name”. For more information, please see java-agent-injector.\n  Observability. For each injected java agent, we provide CustomDefinitionResources called JavaAgent to observe the final agent configuration. Please refer to javaagent to get more details.\n  3. Install SWCK In the next steps, we will show how to build a stand-alone Kubernetes cluster and deploy the 0.6.1 version of SWCK on the platform.\n3.1 Tool Preparation Firstly, you need to install some tools as follows:\n kind, which is used to create a stand-alone Kubernetes cluster. kubectl, which is used to communicate with the Kubernetes cluster.  3.2 Install stand-alone Kubernetes cluster After installing kind , you could use the following command to create a stand-alone Kubernetes cluster.\n Notice! If your terminal is configured with a proxy, you need to close it before the cluster is created to avoid some errors.\n $ kind create cluster --image=kindest/node:v1.19.1 After creating a cluster, you can get the pods as below.\n$ kubectl get pod -A NAMESPACE NAME READY STATUS RESTARTS AGE kube-system coredns-f9fd979d6-57xpc 1/1 Running 0 7m16s kube-system coredns-f9fd979d6-8zj8h 1/1 Running 0 7m16s kube-system etcd-kind-control-plane 1/1 Running 0 7m23s kube-system kindnet-gc9gt 1/1 Running 0 7m16s kube-system kube-apiserver-kind-control-plane 1/1 Running 0 7m23s kube-system kube-controller-manager-kind-control-plane 1/1 Running 0 7m23s kube-system kube-proxy-6zbtb 1/1 Running 0 7m16s kube-system kube-scheduler-kind-control-plane 1/1 Running 0 7m23s local-path-storage local-path-provisioner-78776bfc44-jwwcs 1/1 Running 0 7m16s 3.3 Install certificates manger(cert-manger) The certificates of SWCK are distributed and verified by the certificate manager. You need to install the cert-manager through the following command.\n$ kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.3.1/cert-manager.yaml Verify whether cert-manager is installed successfully.\n$ kubectl get pod -n cert-manager NAME READY STATUS RESTARTS AGE cert-manager-7dd5854bb4-slcmd 1/1 Running 0 73s cert-manager-cainjector-64c949654c-tfmt2 1/1 Running 0 73s cert-manager-webhook-6bdffc7c9d-h8cfv 1/1 Running 0 73s 3.4 Install SWCK The java agent injector is a component of the operator, so please follow the next steps to install the operator first.\n Get the deployment yaml file of SWCK and deploy it.  $ curl -Ls https://archive.apache.org/dist/skywalking/swck/0.6.1/skywalking-swck-0.6.1-bin.tgz | tar -zxf - -O ./config/operator-bundle.yaml | kubectl apply -f - Check SWCK as below.  $ kubectl get pod -n skywalking-swck-system NAME READY STATUS RESTARTS AGE skywalking-swck-controller-manager-7f64f996fc-qh8s9 2/2 Running 0 94s 3.5 Install Skywalking components — OAPServer and UI  Deploy the OAPServer and UI in the default namespace.  $ kubectl apply -f https://raw.githubusercontent.com/apache/skywalking-swck/master/operator/config/samples/default.yaml Check the OAPServer.  $ kubectl get oapserver NAME INSTANCES RUNNING ADDRESS default 1 1 default-oap.default Check the UI.  $ kubectl get ui NAME INSTANCES RUNNING INTERNALADDRESS EXTERNALIPS PORTS default 1 1 default-ui.default [80] 4. Deploy a demo application In the third step, we have installed SWCK and related Skywalking components. Next, we will show how to use the java agent injector in SWCK through two java application examples in two ways: global configuration and custom configuration.\n4.1 Set the global configuration When we have installed SWCK, the default configuration is the configmap in the system namespace, we can get it as follows.\n$ kubectl get configmap skywalking-swck-java-agent-configmap -n skywalking-swck-system -oyaml apiVersion: v1 data: agent.config: |- # The service name in UI agent.service_name=${SW_AGENT_NAME:Your_ApplicationName} # Backend service addresses. collector.backend_service=${SW_AGENT_COLLECTOR_BACKEND_SERVICES:127.0.0.1:11800} # Please refer to https://skywalking.apache.org/docs/skywalking-java/latest/en/setup/service-agent/java-agent/configurations/#table-of-agent-configuration-properties to get more details. In the cluster created by kind, the backend_service may not be correct, we need to use the real OAPServer\u0026rsquo;s address default-oap.default to replace the default 127.0.0.1, so we can edit the configmap as follow.\n$ kubectl edit configmap skywalking-swck-java-agent-configmap -n skywalking-swck-system configmap/skywalking-swck-java-agent-configmap edited $ kubectl get configmap skywalking-swck-java-agent-configmap -n skywalking-swck-system -oyaml apiVersion: v1 data: agent.config: |- # The service name in UI agent.service_name=${SW_AGENT_NAME:Your_ApplicationName} # Backend service addresses. collector.backend_service=${SW_AGENT_COLLECTOR_BACKEND_SERVICES:default-oap.default:11800} # Please refer to https://skywalking.apache.org/docs/skywalking-java/latest/en/setup/service-agent/java-agent/configurations/#table-of-agent-configuration-properties to get more details. 4.2 Set the custom configuration In some cases, we need to use the Skywalking component to monitor different java applications, so the agent configuration of different applications may be different, such as the name of the application, and the plugins that the application needs to use, etc. Next, we will take two simple java applications developed based on spring boot and spring cloud gateway as examples for a detailed description. You can use the source code to build the image.\n# build the springboot and springcloudgateway image  $ git clone https://github.com/dashanji/swck-spring-cloud-k8s-demo $ cd swck-spring-cloud-k8s-demo \u0026amp;\u0026amp; make # check the image $ docker images REPOSITORY TAG IMAGE ID CREATED SIZE gateway v0.0.1 51d16251c1d5 48 minutes ago 723MB app v0.0.1 62f4dbcde2ed 48 minutes ago 561MB # load the image into the cluster $ kind load docker-image app:v0.0.1 \u0026amp;\u0026amp; kind load docker-image gateway:v0.0.1 4.3 deploy spring boot application  Create the springboot-system namespace.  $ kubectl create namespace springboot-system Label the springboot-systemnamespace to enable the java agent injector.  $ kubectl label namespace springboot-system swck-injection=enabled Deploy the corresponding deployment file springboot.yaml for the spring boot application, which uses annotation to override the default agent configuration, such as service_name.   Notice! Before using the annotation to override the agent configuration, you need to add strategy.skywalking.apache.org/agent.Overlay: \u0026quot;true\u0026quot; to make the override take effect.\n apiVersion:apps/v1kind:Deploymentmetadata:name:demo-springbootnamespace:springboot-systemspec:selector:matchLabels:app:demo-springboottemplate:metadata:labels:swck-java-agent-injected:\u0026#34;true\u0026#34;# enable the java agent injectorapp:demo-springbootannotations:strategy.skywalking.apache.org/agent.Overlay:\u0026#34;true\u0026#34;# enable the agent overlayagent.skywalking.apache.org/agent.service_name:\u0026#34;backend-service\u0026#34;spec:containers:- name:springbootimagePullPolicy:IfNotPresentimage:app:v0.0.1command:[\u0026#34;java\u0026#34;]args:[\u0026#34;-jar\u0026#34;,\u0026#34;/app.jar\u0026#34;]---apiVersion:v1kind:Servicemetadata:name:demonamespace:springboot-systemspec:type:ClusterIPports:- name:8085-tcpport:8085protocol:TCPtargetPort:8085selector:app:demo-springbootDeploy a spring boot application in the springboot-system namespace.  $ kubectl apply -f springboot.yaml Check for deployment.  $ kubectl get pod -n springboot-system NAME READY STATUS RESTARTS AGE demo-springboot-7c89f79885-dvk8m 1/1 Running 0 11s Get the finnal injected java agent configuration through JavaAgent.  $ kubectl get javaagent -n springboot-system NAME PODSELECTOR SERVICENAME BACKENDSERVICE app-demo-springboot-javaagent app=demo-springboot backend-service default-oap.default:11800 4.4 deploy spring cloud gateway application  Create the gateway-system namespace.  $ kubectl create namespace gateway-system Label the gateway-systemnamespace to enable the java agent injector.  $ kubectl label namespace gateway-system swck-injection=enabled Deploy the corresponding deployment file springgateway.yaml for the spring cloud gateway application, which uses annotation to override the default agent configuration, such as service_name. In addition, when using spring cloud gateway, we need to add the spring cloud gateway plugin to the agent configuration.   Notice! Before using the annotation to override the agent configuration, you need to add strategy.skywalking.apache.org/agent.Overlay: \u0026quot;true\u0026quot; to make the override take effect.\n apiVersion:apps/v1kind:Deploymentmetadata:labels:app:demo-gatewayname:demo-gatewaynamespace:gateway-systemspec:selector:matchLabels:app:demo-gatewaytemplate:metadata:labels:swck-java-agent-injected:\u0026#34;true\u0026#34;app:demo-gatewayannotations:strategy.skywalking.apache.org/agent.Overlay:\u0026#34;true\u0026#34;agent.skywalking.apache.org/agent.service_name:\u0026#34;gateway-service\u0026#34;optional.skywalking.apache.org:\u0026#34;cloud-gateway-3.x\u0026#34;# add spring cloud gateway pluginspec:containers:- image:gateway:v0.0.1name:gatewaycommand:[\u0026#34;java\u0026#34;]args:[\u0026#34;-jar\u0026#34;,\u0026#34;/gateway.jar\u0026#34;]---apiVersion:v1kind:Servicemetadata:name:service-gatewaynamespace:gateway-systemspec:type:ClusterIPports:- name:9999-tcpport:9999protocol:TCPtargetPort:9999selector:app:demo-gatewayDeploy a spring cloud gateway application in the gateway-system namespace.  $ kubectl apply -f springgateway.yaml Check for deployment.  $ kubectl get pod -n gateway-system NAME READY STATUS RESTARTS AGE demo-gateway-5bb77f6d85-9j7c6 1/1 Running 0 15s Get the finnal injected java agent configuration through JavaAgent.  $ kubectl get javaagent -n gateway-system NAME PODSELECTOR SERVICENAME BACKENDSERVICE app-demo-gateway-javaagent app=demo-gateway gateway-service default-oap.default:11800 5. Verify the injector  After completing the above steps, we can view detailed state of the injected pod, like the injected agent container.  # get all injected pod $ kubectl get pod -A -lswck-java-agent-injected=true NAMESPACE NAME READY STATUS RESTARTS AGE gateway-system demo-gateway-5bb77f6d85-lt4z7 1/1 Running 0 69s springboot-system demo-springboot-7c89f79885-lkb5j 1/1 Running 0 75s # view detailed state of the injected pod [demo-springboot] $ kubectl describe pod -l app=demo-springboot -n springboot-system ... Events: Type Reason Age From Message ---- ------ ---- ---- ------- ... Normal Created 91s kubelet,kind-control-plane Created container inject-skywalking-agent Normal Started 91s kubelet,kind-control-plane Started container inject-skywalking-agent ... Normal Created 90s kubelet,kind-control-plane Created container springboot Normal Started 90s kubelet,kind-control-plane Started container springboot # view detailed state of the injected pod [demo-gateway]  $ kubectl describe pod -l app=demo-gateway -n gateway-system ... Events: Type Reason Age From Message ---- ------ ---- ---- ------- ... Normal Created 2m20s kubelet,kind-control-plane Created container inject-skywalking-agent Normal Started 2m20s kubelet,kind-control-plane Started container inject-skywalking-agent ... Normal Created 2m20s kubelet,kind-control-plane Created container gateway Normal Started 2m20s kubelet,kind-control-plane Started container gateway Now we can expose the service and watch the data displayed on the web. First of all, we need to get the gateway service and the ui service as follows.  $ kubectl get service service-gateway -n gateway-system NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE service-gateway ClusterIP 10.99.181.145 \u0026lt;none\u0026gt; 9999/TCP 9m19s $ kubectl get service default-ui NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE default-ui ClusterIP 10.111.39.250 \u0026lt;none\u0026gt; 80/TCP 82m Then open two terminals to expose the service: service-gateway、default-ui.  $ kubectl port-forward service/service-gateway -n gateway-system 9999:9999 Forwarding from 127.0.0.1:9999 -\u0026gt; 9999 Forwarding from [::1]:9999 -\u0026gt; 9999 $ kubectl port-forward service/default-ui 8090:80 Forwarding from 127.0.0.1:8090 -\u0026gt; 8080 Forwarding from [::1]:8090 -\u0026gt; 8080 Use the following commands to access the spring boot demo 10 times through the spring cloud gateway service.  $ for i in {1..10}; do curl http://127.0.0.1:9999/gateway/hello \u0026amp;\u0026amp; echo \u0026#34;\u0026#34;; done Hello World! Hello World! Hello World! Hello World! Hello World! Hello World! Hello World! Hello World! Hello World! Hello World! We can see the Dashboard by accessing http://127.0.0.1:8090.  All services' topology is shown below.  We can see the trace information of gateway-service.  We can see the trace information of backend-service.  6. Concluding remarks If your application is deployed in the Kubernetes platform and requires Skywalking to provide monitoring services, SWCK can help you deploy, upgrade and maintain the Skywalking components in the Kubernetes cluster. In addition to this blog, you can also view swck document and Java agent injector documentation for more information. If you find this project useful, please give SWCK a star! If you have any questions, welcome to ask in Issues or Discussions.\n","title":"How to use the java agent injector?","url":"/blog/2022-04-19-how-to-use-the-java-agent-injector/"},{"content":"目录  介绍 主要特点 安装SWCK 部署demo应用 验证注入器 结束语  1. 介绍 1.1 SWCK 是什么? SWCK是部署在 Kubernetes 环境中,为 Skywalking 用户提供服务的平台,用户可以基于该平台使用、升级和维护 SkyWalking 相关组件。\n实际上,SWCK 是基于 kubebuilder 开发的Operator,为用户提供自定义资源( CR )以及管理资源的控制器( Controller ),所有的自定义资源定义(CRD)如下所示:\n JavaAgent OAP UI Storage Satellite Fetcher  1.2 java 探针注入器是什么? 对于 java 应用来说,用户需要将 java 探针注入到应用程序中获取元数据并发送到 Skywalking 后端。为了让用户在 Kubernetes 平台上更原生地使用 java 探针,我们提供了 java 探针注入器,该注入器能够将 java 探针通过 sidecar 方式注入到应用程序所在的 pod 中。 java 探针注入器实际上是一个Kubernetes Mutation Webhook控制器,如果请求中存在 annotations ,控制器会拦截 pod 事件并将其应用于 pod 上。\n2. 主要特点  透明性。用户应用一般运行在普通容器中而 java 探针则运行在初始化容器中,且两者都属于同一个 pod 。该 pod 中的每个容器都会挂载一个共享内存卷,为 java 探针提供存储路径。在 pod 启动时,初始化容器中的 java 探针会先于应用容器运行,由注入器将其中的探针文件存放在共享内存卷中。在应用容器启动时,注入器通过设置 JVM 参数将探针文件注入到应用程序中。用户可以通过这种方式实现 java 探针的注入,而无需重新构建包含 java 探针的容器镜像。 可配置性。注入器提供两种方式配置 java 探针:全局配置和自定义配置。默认的全局配置存放在 configmap 中,用户可以根据需求修改全局配置,比如修改 backend_service 的地址。此外,用户也能通过 annotation 为特定应用设置自定义的一些配置,比如不同服务的 service_name 名称。详情可见 java探针说明书。 可观察性。每个 java 探针在被注入时,用户可以查看名为 JavaAgent 的 CRD 资源,用于观测注入后的 java 探针配置。详情可见 JavaAgent说明。  3. 安装SWCK 在接下来的几个步骤中,我们将演示如何从0开始搭建单机版的 Kubernetes 集群,并在该平台部署0.6.1版本的 SWCK。\n3.1 工具准备 首先,你需要安装一些必要的工具,如下所示:\n kind,用于创建单机版 Kubernetes集群。 kubectl,用于和Kubernetes 集群交互。  3.2 搭建单机版 Kubernetes集群 在安装完 kind 工具后,可通过如下命令创建一个单机集群。\n 注意!如果你的终端配置了代理,在运行以下命令之前最好先关闭代理,防止一些意外错误的发生。\n $ kind create cluster --image=kindest/node:v1.19.1 在集群创建完毕后,可获得如下的pod信息。\n$ kubectl get pod -A NAMESPACE NAME READY STATUS RESTARTS AGE kube-system coredns-f9fd979d6-57xpc 1/1 Running 0 7m16s kube-system coredns-f9fd979d6-8zj8h 1/1 Running 0 7m16s kube-system etcd-kind-control-plane 1/1 Running 0 7m23s kube-system kindnet-gc9gt 1/1 Running 0 7m16s kube-system kube-apiserver-kind-control-plane 1/1 Running 0 7m23s kube-system kube-controller-manager-kind-control-plane 1/1 Running 0 7m23s kube-system kube-proxy-6zbtb 1/1 Running 0 7m16s kube-system kube-scheduler-kind-control-plane 1/1 Running 0 7m23s local-path-storage local-path-provisioner-78776bfc44-jwwcs 1/1 Running 0 7m16s 3.3 安装证书管理器(cert-manger) SWCK 的证书都是由证书管理器分发和验证,需要先通过如下命令安装证书管理器cert-manger。\n$ kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.3.1/cert-manager.yaml 验证 cert-manger 是否安装成功。\n$ kubectl get pod -n cert-manager NAME READY STATUS RESTARTS AGE cert-manager-7dd5854bb4-slcmd 1/1 Running 0 73s cert-manager-cainjector-64c949654c-tfmt2 1/1 Running 0 73s cert-manager-webhook-6bdffc7c9d-h8cfv 1/1 Running 0 73s 3.4 安装SWCK java 探针注入器是 SWCK 中的一个组件,首先需要按照如下步骤安装 SWCK:\n 输入如下命令获取 SWCK 的 yaml 文件并部署在 Kubernetes 集群中。  $ curl -Ls https://archive.apache.org/dist/skywalking/swck/0.6.1/skywalking-swck-0.6.1-bin.tgz | tar -zxf - -O ./config/operator-bundle.yaml | kubectl apply -f - 检查 SWCK 是否正常运行。  $ kubectl get pod -n skywalking-swck-system NAME READY STATUS RESTARTS AGE skywalking-swck-controller-manager-7f64f996fc-qh8s9 2/2 Running 0 94s 3.5 安装 Skywalking 组件 — OAPServer 和 UI  在 default 命名空间中部署 OAPServer 组件和 UI 组件。  $ kubectl apply -f https://raw.githubusercontent.com/apache/skywalking-swck/master/operator/config/samples/default.yaml 查看 OAPServer 组件部署情况。  $ kubectl get oapserver NAME INSTANCES RUNNING ADDRESS default 1 1 default-oap.default 查看 UI 组件部署情况。  $ kubectl get ui NAME INSTANCES RUNNING INTERNALADDRESS EXTERNALIPS PORTS default 1 1 default-ui.default [80] 4. 部署demo应用 在第3个步骤中,我们已经安装好 SWCK 以及相关的 Skywalking 组件,接下来按照全局配置以及自定义配置两种方式,通过两个 java 应用实例,分别演示如何使用 SWCK 中的 java 探针注入器。\n4.1 设置全局配置 当 SWCK 安装完成后,默认的全局配置就会以 configmap 的形式存储在系统命令空间中,可通过如下命令查看。\n$ kubectl get configmap skywalking-swck-java-agent-configmap -n skywalking-swck-system -oyaml apiVersion: v1 data: agent.config: |- # The service name in UI agent.service_name=${SW_AGENT_NAME:Your_ApplicationName} # Backend service addresses. collector.backend_service=${SW_AGENT_COLLECTOR_BACKEND_SERVICES:127.0.0.1:11800} # Please refer to https://skywalking.apache.org/docs/skywalking-java/latest/en/setup/service-agent/java-agent/configurations/#table-of-agent-configuration-properties to get more details. 在 kind 创建的 Kubernetes 集群中, SkyWalking 后端地址和 configmap 中指定的地址可能不同,我们需要使用真正的 OAPServer 组件的地址 default-oap.default 来代替默认的 127.0.0.1 ,可通过修改 configmap 实现。\n$ kubectl edit configmap skywalking-swck-java-agent-configmap -n skywalking-swck-system configmap/skywalking-swck-java-agent-configmap edited $ kubectl get configmap skywalking-swck-java-agent-configmap -n skywalking-swck-system -oyaml apiVersion: v1 data: agent.config: |- # The service name in UI agent.service_name=${SW_AGENT_NAME:Your_ApplicationName} # Backend service addresses. collector.backend_service=${SW_AGENT_COLLECTOR_BACKEND_SERVICES:default-oap.default:11800} # Please refer to https://skywalking.apache.org/docs/skywalking-java/latest/en/setup/service-agent/java-agent/configurations/#table-of-agent-configuration-properties to get more details. 4.2 设置自定义配置 在实际使用场景中,我们需要使用 Skywalking 组件监控不同的 java 应用,因此不同应用的探针配置可能有所不同,比如应用的名称、应用需要使用的插件等。为了支持自定义配置,注入器提供 annotation 来覆盖默认的全局配置。接下来我们将分别以基于 spring boot 以及 spring cloud gateway 开发的两个简单java应用为例进行详细说明,你可以使用这两个应用的源代码构建镜像。\n# build the springboot and springcloudgateway image  $ git clone https://github.com/dashanji/swck-spring-cloud-k8s-demo $ cd swck-spring-cloud-k8s-demo \u0026amp;\u0026amp; make # check the image $ docker images REPOSITORY TAG IMAGE ID CREATED SIZE gateway v0.0.1 51d16251c1d5 48 minutes ago 723MB app v0.0.1 62f4dbcde2ed 48 minutes ago 561MB # load the image into the cluster $ kind load docker-image app:v0.0.1 \u0026amp;\u0026amp; kind load docker-image gateway:v0.0.1 4.3 部署 spring boot 应用  创建 springboot-system 命名空间。  $ kubectl create namespace springboot-system 给 springboot-system 命名空间打上标签使能 java 探针注入器。  $ kubectl label namespace springboot-system swck-injection=enabled 接下来为 spring boot 应用对应的部署文件 springboot.yaml ,其中使用了 annotation 覆盖默认的探针配置,比如 service_name ,将其覆盖为 backend-service 。   需要注意的是,在使用 annotation 覆盖探针配置之前,需要增加 strategy.skywalking.apache.org/agent.Overlay: \u0026quot;true\u0026quot; 来使覆盖生效。\n apiVersion:apps/v1kind:Deploymentmetadata:name:demo-springbootnamespace:springboot-systemspec:selector:matchLabels:app:demo-springboottemplate:metadata:labels:swck-java-agent-injected:\u0026#34;true\u0026#34;# enable the java agent injectorapp:demo-springbootannotations:strategy.skywalking.apache.org/agent.Overlay:\u0026#34;true\u0026#34;# enable the agent overlayagent.skywalking.apache.org/agent.service_name:\u0026#34;backend-service\u0026#34;spec:containers:- name:springbootimagePullPolicy:IfNotPresentimage:app:v0.0.1command:[\u0026#34;java\u0026#34;]args:[\u0026#34;-jar\u0026#34;,\u0026#34;/app.jar\u0026#34;]---apiVersion:v1kind:Servicemetadata:name:demonamespace:springboot-systemspec:type:ClusterIPports:- name:8085-tcpport:8085protocol:TCPtargetPort:8085selector:app:demo-springboot在 springboot-system 命名空间中部署 spring boot 应用。  $ kubectl apply -f springboot.yaml 查看部署情况。  $ kubectl get pod -n springboot-system NAME READY STATUS RESTARTS AGE demo-springboot-7c89f79885-dvk8m 1/1 Running 0 11s 通过 JavaAgent 查看最终注入的 java 探针配置。  $ kubectl get javaagent -n springboot-system NAME PODSELECTOR SERVICENAME BACKENDSERVICE app-demo-springboot-javaagent app=demo-springboot backend-service default-oap.default:11800 4.4 部署 spring cloud gateway 应用  创建 gateway-system 命名空间。  $ kubectl create namespace gateway-system 给 gateway-system 命名空间打上标签使能 java 探针注入器。  $ kubectl label namespace gateway-system swck-injection=enabled 接下来为 spring cloud gateway 应用对应的部署文件 springgateway.yaml ,其中使用了 annotation 覆盖默认的探针配置,比如 service_name ,将其覆盖为 gateway-service 。此外,在使用 spring cloud gateway 时,我们需要在探针配置中添加 spring cloud gateway 插件。   需要注意的是,在使用 annotation 覆盖探针配置之前,需要增加 strategy.skywalking.apache.org/agent.Overlay: \u0026quot;true\u0026quot; 来使覆盖生效。\n apiVersion:apps/v1kind:Deploymentmetadata:labels:app:demo-gatewayname:demo-gatewaynamespace:gateway-systemspec:selector:matchLabels:app:demo-gatewaytemplate:metadata:labels:swck-java-agent-injected:\u0026#34;true\u0026#34;app:demo-gatewayannotations:strategy.skywalking.apache.org/agent.Overlay:\u0026#34;true\u0026#34;agent.skywalking.apache.org/agent.service_name:\u0026#34;gateway-service\u0026#34;optional.skywalking.apache.org:\u0026#34;cloud-gateway-3.x\u0026#34;# add spring cloud gateway pluginspec:containers:- image:gateway:v0.0.1name:gatewaycommand:[\u0026#34;java\u0026#34;]args:[\u0026#34;-jar\u0026#34;,\u0026#34;/gateway.jar\u0026#34;]---apiVersion:v1kind:Servicemetadata:name:service-gatewaynamespace:gateway-systemspec:type:ClusterIPports:- name:9999-tcpport:9999protocol:TCPtargetPort:9999selector:app:demo-gateway在 gateway-system 命名空间中部署 spring cloud gateway 应用。  $ kubectl apply -f springgateway.yaml 查看部署情况。  $ kubectl get pod -n gateway-system NAME READY STATUS RESTARTS AGE demo-gateway-758899c99-6872s 1/1 Running 0 15s 通过 JavaAgent 获取最终注入的java探针配置。  $ kubectl get javaagent -n gateway-system NAME PODSELECTOR SERVICENAME BACKENDSERVICE app-demo-gateway-javaagent app=demo-gateway gateway-service default-oap.default:11800 5. 验证注入器  当完成上述步骤后,我们可以查看被注入pod的详细状态,比如被注入的agent容器。  # get all injected pod $ kubectl get pod -A -lswck-java-agent-injected=true NAMESPACE NAME READY STATUS RESTARTS AGE gateway-system demo-gateway-5bb77f6d85-lt4z7 1/1 Running 0 69s springboot-system demo-springboot-7c89f79885-lkb5j 1/1 Running 0 75s # view detailed state of the injected pod [demo-springboot] $ kubectl describe pod -l app=demo-springboot -n springboot-system ... Events: Type Reason Age From Message ---- ------ ---- ---- ------- ... Normal Created 91s kubelet,kind-control-plane Created container inject-skywalking-agent Normal Started 91s kubelet,kind-control-plane Started container inject-skywalking-agent ... Normal Created 90s kubelet,kind-control-plane Created container springboot Normal Started 90s kubelet,kind-control-plane Started container springboot # view detailed state of the injected pod [demo-gateway]  $ kubectl describe pod -l app=demo-gateway -n gateway-system ... Events: Type Reason Age From Message ---- ------ ---- ---- ------- ... Normal Created 2m20s kubelet,kind-control-plane Created container inject-skywalking-agent Normal Started 2m20s kubelet,kind-control-plane Started container inject-skywalking-agent ... Normal Created 2m20s kubelet,kind-control-plane Created container gateway Normal Started 2m20s kubelet,kind-control-plane Started container gateway 现在我们可以将服务绑定在某个端口上并通过 web 浏览器查看采样数据。首先,我们需要通过以下命令获取gateway服务和ui服务的信息。  $ kubectl get service service-gateway -n gateway-system NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE service-gateway ClusterIP 10.99.181.145 \u0026lt;none\u0026gt; 9999/TCP 9m19s $ kubectl get service default-ui NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE default-ui ClusterIP 10.111.39.250 \u0026lt;none\u0026gt; 80/TCP 82m 接下来分别启动2个终端将service-gateway 以及 default-ui 绑定到本地端口上,如下所示:  $ kubectl port-forward service/service-gateway -n gateway-system 9999:9999 Forwarding from 127.0.0.1:9999 -\u0026gt; 9999 Forwarding from [::1]:9999 -\u0026gt; 9999 $ kubectl port-forward service/default-ui 8090:80 Forwarding from 127.0.0.1:8090 -\u0026gt; 8080 Forwarding from [::1]:8090 -\u0026gt; 8080 使用以下命令通过spring cloud gateway 网关服务暴露的端口来访问 spring boot 应用服务。  $ for i in {1..10}; do curl http://127.0.0.1:9999/gateway/hello \u0026amp;\u0026amp; echo \u0026#34;\u0026#34;; done Hello World! Hello World! Hello World! Hello World! Hello World! Hello World! Hello World! Hello World! Hello World! Hello World! 我们可以在 web 浏览器中输入 http://127.0.0.1:8090 来访问探针采集到的数据。  所有服务的拓扑图如下所示。  查看 gateway-service 网关服务的 trace 信息。  查看 backend-service 应用服务的 trace 信息。  6. 结束语 如果你的应用部署在 Kubernetes 平台中,且需要 Skywalking 提供监控服务, SWCK 能够帮助你部署、升级和维护 Kubernetes 集群中的 Skywalking 组件。除了本篇博客外,你还可以查看 SWCK文档 以及 java探针注入器文档 获取更多的信息。如果你觉得这个项目好用,请给 SWCK 一个star! 如果你有任何疑问,欢迎在Issues或者Discussions中提出。\n","title":"如何使用java探针注入器?","url":"/zh/2022-04-19-how-to-use-the-java-agent-injector/"},{"content":"Apache SkyWalking 是中国首个,也是目前唯一的个人开源的 Apache 顶级项目。\n作为一个针对分布式系统的应用性能监控 APM 和可观测性分析平台, SkyWalking 提供了媲美商业APM/监控的功能。\nCSDN云原生系列在线峰会第4期,特邀SkyWalking创始人、Apache基金会首位中国董事、Tetrate创始工程师吴晟担任出品人,推出SkyWalking峰会。\nSkyWalking峰会在解读SkyWalking v9新特性的同时,还将首发解密APM的专用数据库BanyanDB,以及分享SkyWalking在原生eBPF探针、监控虚拟机和Kubernetes、云原生函数计算可观测性等方面的应用实践。\n峰会议程:\n14:00-14:30 开场演讲:SkyWalking v9解析 吴晟 Tetrate 创始工程师、Apache 基金会首位中国董事\n14:30-15:00 首发解密:APM的专用数据库BanyanDB\n高洪涛 Tetrate 创始工程师\n15:00-15:30 SkyWalking 原生eBPF探针展示\n刘晗 Tetrate 工程师\n15:30-16:00 Apache SkyWalking MAL实践-监控虚拟机和Kubernetes\n万凯 Tetrate 工程师\n16:00-16:30 SkyWalking助力云原生函数计算可观测\n霍秉杰 青云科技 资深架构师\n峰会视频 B站视频地址\n","title":"Apache SkyWalking 2022 峰会","url":"/zh/2022-04-18-meeting/"},{"content":"SkyWalking Java Agent 8.10.0 is released. Go to downloads page to find release tars. Changes by Version\n8.10.0  [Important] Namespace represents a subnet, such as kubernetes namespace, or 172.10... Make namespace concept as a part of service naming format. [Important] Add cluster concept, also as a part of service naming format. The cluster name would be  Add as {@link #SERVICE_NAME} suffix. Add as exit span\u0026rsquo;s peer, ${CLUSTER} / original peer Cross Process Propagation Header\u0026rsquo;s value addressUsedAtClient[index=8] (Target address of this request used on the client end).   Support Undertow thread pool metrics collecting. Support Tomcat thread pool metric collect. Remove plugin for ServiceComb Java Chassis 0.x Add Guava EventBus plugin. Fix Dubbo 3.x plugin\u0026rsquo;s tracing problem. Fix the bug that maybe generate multiple trace when invoke http request by spring webflux webclient. Support Druid Connection pool metrics collecting. Support HikariCP Connection pool metrics collecting. Support Dbcp2 Connection pool metrics collecting. Ignore the synthetic constructor created by the agent in the Spring patch plugin. Add witness class for vertx-core-3.x plugin. Add witness class for graphql plugin. Add vertx-core-4.x plugin. Renamed graphql-12.x-plugin to graphql-12.x-15.x-plugin and graphql-12.x-scenario to graphql-12.x-15.x-scenario. Add graphql-16plus plugin. [Test] Support to configure plugin test base images. [Breaking Change] Remove deprecated agent.instance_properties configuration. Recommend agent.instance_properties_json. The namespace and cluster would be reported as instance properties, keys are namespace and cluster. Notice, if instance_properties_json includes these two keys, they would be overrided by the agent core. [Breaking Change] Remove the namespace from cross process propagation key. Make sure the parent endpoint in tracing context from existing first ENTRY span, rather than first span only. Fix the bug that maybe causing memory leak and repeated traceId when use gateway-2.1.x-plugin or gateway-3.x-plugin. Fix Grpc 1.x plugin could leak context due to gRPC cancelled. Add JDK ThreadPoolExecutor Plugin. Support default database(not set through JDBC URL) in mysql-5.x plugin.  Documentation  Add link about java agent injector. Update configurations doc, remove agent.instance_properties[key]=value. Update configurations doc, add agent.cluster and update agent.namespace.  All issues and pull requests are here\n","title":"Release Apache SkyWalking Java Agent 8.10.0","url":"/events/release-apache-skywalking-java-agent-8-10-0/"},{"content":"Introduction  The most profound technologies are those that disappear. They weave themselves into the fabric of everyday life until they are indistinguishable from it. - Mark Weiser\n Mark Weiser prophetically argued in the late 1980s, that the most far-reaching technologies are those which vanish into thin air. According to Weiser, \u0026ldquo;Whenever people learn something sufficiently well, they cease to be aware of it.\u0026rdquo; This disappearing act, as Weiser claimed, is not limited to technology but rather human psychology. It is this very experience that allows us to escape lower-level thinking into higher-level thinking. For once we are no longer impeded by mundane details, we are then free to focus on new goals.\nThis realization becomes more relevant as APMs become increasingly popular. As more applications are deployed with APMs, the number of abstract representations of the underlying source code also increases. While this provides great value to many non-development roles within an organization, it does pose additional challenges to those in development roles who must translate these representations into concepts they can work with (i.e. source code). Weiser sums this difficultly up rather succinctly when he states that \u0026ldquo;Programmers should no more be asked to work without access to source code than auto-mechanics should be asked to work without looking at the engine.\u0026rdquo;\nStill, APMs collect more information only to produce a plethora of new abstract representations. In this article, we will introduce a new concept in Source++, the open-source live-coding platform, specifically designed to allow developers to monitor production applications more intuitively.\nLive Views  And we really don\u0026rsquo;t understand even yet, hundreds of metrics later, what make a program easier to understand or modify or reuse or borrow. I don\u0026rsquo;t think we\u0026rsquo;ll find out by looking away from programs to their abstract interfaces. The answers are in the source code. - Mark Weiser\n As APMs move from the \u0026ldquo;nice to have\u0026rdquo; category to the \u0026ldquo;must-have\u0026rdquo; category, there is a fundamental feature holding them back from ubiquity. They must disappear from consciousness. As developers, we should feel no impulse to open our browsers to better understand the underlying source code. The answers are literally in the source code. Instead, we should improve our tools so the source code conveniently tells us what we need to know. Think of how simple life could be if failing code always indicated how and why it failed. This is the idea behind Source++.\nIn our last blog post, we discussed Extending Apache SkyWalking with non-breaking breakpoints. In that post, we introduced a concept called Live Instruments, which developers can use to easily debug live production applications without leaving their IDE. Today, we will discuss how existing SkyWalking installations can be integrated into your IDE via a new concept called Live Views. Unlike Live Instruments, which are designed for debugging live applications, Live Views are designed for increasing application comprehension and awareness. This is accomplished through a variety of commands which are input into the Live Command Palette.\nLive Command Palette The Live Command Palette (LCP) is a contextual command prompt, included in the Source++ JetBrains Plugin, that allows developers to control and query live applications from their IDE. Opened via keyboard shortcut (Ctrl+Shift+S), the LCP allows developers to easily view metrics relevant to the source code they\u0026rsquo;re currently viewing. The following Live View commands are currently supported:\nCommand: view (overview/activity/traces/logs) The view commands display contextual popups with live operational data of the current source code. These commands allow developers to view traditional SkyWalking operational data filtered down to the relevant metrics.\nCommand: watch log The watch log command allows developers to follow individual log statements of a running application in real-time. This command allows developers to negate the need for manually scrolling through the logs to find instances of a specific log statement.\nCommand: (show/hide) quick stats The show quick stats command displays live endpoint metrics for a quick idea of an endpoint\u0026rsquo;s activity. Using this command, developers can quickly assess the status of an endpoint and determine if the endpoint is performing as expected.\nFuture Work  A good tool is an invisible tool. By invisible, I mean that the tool does not intrude on your consciousness; you focus on the task, not the tool. Eyeglasses are a good tool \u0026ndash; you look at the world, not the eyeglasses. - Mark Weiser\n Source++ aims to extend SkyWalking in such a way that SkyWalking itself becomes invisible. To accomplish this, we plan to support custom developer commands. Developers will be able to build customized commands for themselves, as well as commands to share with their team. These commands will recognize context, types, and conditions allowing for a wide possibility of operations. As more commands are added, developers will be able to expose everything SkyWalking has to offer while focusing on what matters most, the source code.\nIf you find these features useful, please consider giving Source++ a try. You can install the plugin directly from your JetBrains IDE, or through the JetBrains Marketplace. If you have any issues or questions, please open an issue. Feedback is always welcome!\n","title":"Integrating Apache SkyWalking with source code","url":"/blog/2022-04-14-integrating-skywalking-with-source-code/"},{"content":"Read this post in original language: English\n介绍  最具影响力的技术是那些消失的技术。他们交织在日常生活中,直到二者完全相融。 - 马克韦瑟\n 马克韦瑟在 1980 年代后期预言,影响最深远的技术是那些消失在空气中的技术。\n“当人们足够熟知它,就不会再意识到它。”\n正如韦瑟所说,这种消失的现象不只源于技术,更是人类的心理。 正是这种经验使我们能够摆脱对底层的考量,进入更高层次的思考。 一旦我们不再被平凡的细枝末节所阻碍,我们就可以自如地专注于新的目标。\n随着 APM(应用性能管理系统) 变得越来越普遍,这种认识变得更加重要。随着更多的应用程序开始使用 APM 部署,底层源代码抽象表示的数量也在同步增加。 虽然这为组织内的许多非开发角色提供了巨大的价值,但它确实也对开发人员提出了额外的挑战 - 他们必须将这些表示转化为可操作的概念(即源代码)。 对此,韦瑟相当简洁的总结道,“就像不应要求汽车机械师在不查看引擎的情况下工作一样,我们不应要求程序员在不访问源代码的情况下工作”。\n尽管如此,APM 收集更多信息只是为了产生充足的新抽象表示。 在本文中,我们将介绍开源实时编码平台 Source++ 中的一个新概念,旨在让开发人员更直观地监控生产应用程序。\n实时查看  我们尚且不理解在收集了数百个指标之后,是什么让程序更容易理解、修改、重复使用或借用。 我不认为我们能够通过原理程序本身而到它们的抽象接口中找到答案。答案就在源代码之中。 - 马克韦瑟\n 随着 APM 从“有了更好”转变为“必须拥有”,有一个基本特性阻碍了它们的普及。 它们必须从意识中消失。作为开发人员,我们不应急于打开浏览器以更好地理解底层源代码,答案就在源代码中。 相反,我们应该改进我们的工具,以便源代码直观地告诉我们需要了解的内容。 想想如果失败的代码总是表明它是如何以及为什么失败的,生活会多么简单。这就是 Source++ 背后的理念。\n在我们的上一篇博客中,我们讨论了不间断断点 Extending Apache SkyWalking。 我们介绍了一个名为 Live Instruments(实时埋点) 的概念,开发人员可以使用它轻松调试实时生产应用程序,而无需离开他们的开发环境。 而今天,我们将讨论如何通过一个名为 Live Views(实时查看)的新概念将现有部署的 SkyWalking 集成到您的 IDE 中。 与专为调试实时应用程序而设计的 Live Instruments (实时埋点) 不同,Live Views(实时查看)旨在提高对应用程序的理解和领悟。 这将通过输入到 Live Command Palette (实时命令面板) 中的各种命令来完成。\n实时命令面板 Live Command Palette (LCP) 是一个当前上下文场景下的命令行面板,这个组件包含在 Source++ JetBrains 插件中,它允许开发人员从 IDE 中直接控制和对实时应用程序发起查询。\nLCP 通过键盘快捷键 (Ctrl+Shift+S) 打开,允许开发人员轻松了解与他们当前正在查看的源代码相关的运行指标。\n目前 LCP 支持以下实时查看命令:\n命令:view(overview/activity/traces/Logs)- 查看 总览/活动/追踪/日志 view 查看命令会展示一个与当前源码的实时运维数据关联的弹窗。 这些命令允许开发人员查看根据相关指标过滤的传统 SkyWalking 的运维数据。\n命令:watch log - 实时监听日志 本日志命令允许开发人员实时跟踪正在运行的应用程序的每一条日志。 通过此命令开发人员无需手动查阅大量日志就可以查找特定日志语句的实例。\n命令:(show/hide) quick stats (显示/隐藏)快速统计 show quick stats 显示快速统计命令显示实时端点指标,以便快速了解端点的活动。 使用此命令,开发人员可以快速评估端点的状态并确定端点是否按预期正常运行。\n未来的工作  好工具是无形的。我所指的无形,是指这个工具不会侵入你的意识; 你专注于任务,而不是工具。 眼镜就是很好的工具——你看的是世界,而不是眼镜。 - 马克韦瑟\n Source++ 旨在扩展 SkyWalking,使 SkyWalking 本身变得无需感知。 为此,我们计划支持自定义的开发人员命令。 开发人员将能够构建自定义命令,以及与团队共享的命令。 这些命令将识别上下文、类型和条件,从而允许广泛的操作。 随着更多命令的添加,开发人员将能够洞悉 SkyWalking 所提供的所有功能,同时专注于最重要的源码。\n如果您觉得这些功能有用,请考虑尝试使用 Source++。 您可以通过 JetBrains Marketplace 或直接从您的 JetBrains IDE 安装插件。 如果您有任何疑问,请到这提 issue。\n欢迎随时反馈!\n","title":"将 Apache SkyWalking 与源代码集成","url":"/zh/2022-04-14-integrating-skywalking-with-source-code/"},{"content":"随着无人驾驶在行业的不断发展和技术的持续革新,规范化、常态化的真无人运营逐渐成为事实标准,而要保障各个场景下的真无人业务运作,一个迫切需要解决的现状就是业务链路长,出现问题难以定位。本文由此前于 KubeSphere 直播上的分享整理而成,主要介绍 SkyWalking 的基本概念和使用方法,以及在无人驾驶领域的一系列实践。\nB站视频地址\n行业背景 驭势科技(UISEE)是国内领先的无人驾驶公司。致力于为全行业、全场景提供 AI 驾驶服务,做赋能出行和物流新生态的 AI 驾驶员。早在三年前, 驭势科技已在机场和厂区领域实现了“去安全员” 无人驾驶常态化运营的重大突破,落地“全场景、真无人、全天候”的自动驾驶技术,并由此迈向大规模商用。要保证各个场景下没有安全员参与的业务运作,我们在链路追踪上做了一系列实践。\n对于无人驾驶来说,从云端到车端的链路长且复杂,任何一层出问题都会导致严重的后果;然而在如下图所示的链路中,准确迅速地定位故障服务并不容易,经常遇到多个服务层层排查的情况。我们希望做到的事情,就是在出现问题以后,能够尽快定位到源头,从而快速解决问题,以绝后患。\n前提条件 SkyWalking 简介 Apache SkyWalking 是一个开源的可观察性平台,用于收集、分析、聚集和可视化来自服务和云原生基础设施的数据。SkyWalking 通过简单的方法,提拱了分布式系统的清晰视图,甚至跨云。它是一个现代的 APM(Application Performence Management),专门为云原生、基于容器的分布式系统设计。它在逻辑上被分成四个部分。探针、平台后端、存储和用户界面。\n 探针收集数据并根据 SkyWalking 的要求重新格式化(不同的探针支持不同的来源)。 平台后端支持数据聚合、分析以及从探针接收数据流的过程,包括 Tracing、Logging、Metrics。 存储系统通过一个开放/可插拔接口容纳 SkyWalking 数据。用户可以选择一个现有的实现,如 ElasticSearch、H2、MySQL、TiDB、InfluxDB,或实现自定义的存储。 UI是一个高度可定制的基于网络的界面,允许 SkyWalking 终端用户可视化和管理 SkyWalking 数据。  综合考虑了对各语言、各框架的支持性、可观测性的全面性以及社区环境等因素,我们选择了 SkyWalking 进行链路追踪。\n链路追踪简介 关于链路追踪的基本概念,可以参看吴晟老师翻译的 OpenTracing 概念和术语 以及 OpenTelemetry。在这里,择取几个重要的概念供大家参考:\n Trace:代表一个潜在的分布式的存在并行数据或者并行执行轨迹的系统。一个 Trace 可以认为是多个 Span 的有向无环图(DAG)。简单来说,在微服务体系下,一个 Trace 代表从第一个服务到最后一个服务经历的一系列的服务的调用链。   Span:在服务中埋点时,最需要关注的内容。一个 Span 代表系统中具有开始时间和执行时长的逻辑运行单元。举例来说,在一个服务发出请求时,可以认为是一个 Span 的开始;在这个服务接收到上游服务的返回值时,可以认为是这个 Span 的结束。Span 之间通过嵌套或者顺序排列建立逻辑因果关系。在 SkyWalking 中,Span 被区分为:  LocalSpan:服务内部调用方法时创建的 Span 类型 EntrySpan:请求进入服务时会创建的 Span 类型(例如处理其他服务对于本服务接口的调用) ExitSpan:请求离开服务时会创建的 Span 类型(例如调用其他服务的接口)   TraceSegment:SkyWalking 中的概念,介于 Trace 和 Span 之间,是一条 Trace 的一段,可以包含多个 Span。一个 TraceSegment 记录了一个线程中的执行过程,一个 Trace 由一个或多个 TraceSegment 组成,一个 TraceSegment 又由一个或多个 Span 组成。 SpanContext:代表跨越进程上下文,传递到下级 Span 的状态。一般包含 Trace ID、Span ID 等信息。 Baggage:存储在 SpanContext 中的一个键值对集合。它会在一条追踪链路上的所有 Span 内全局传输,包含这些 Span 对应的 SpanContext。Baggage 会随着 Trace 一同传播。  SkyWalking 中,上下文数据通过名为 sw8 的头部项进行传递,值中包含 8 个字段,由 - 进行分割(包括 Trace ID,Parent Span ID 等等) 另外 SkyWalking 中还提供名为 sw8-correlation 的扩展头部项,可以传递一些自定义的信息    快速上手 以 Go 为例,介绍如何使用 SkyWalking 在服务中埋点。\n部署 我们选择使用 Helm Chart 在 Kubernetes 中进行部署。\nexport SKYWALKING_RELEASE_NAME=skywalking # change the release name according to your scenario export SKYWALKING_RELEASE_NAMESPACE=default # change the namespace to where you want to install SkyWalking export REPO=skywalking helm repo add ${REPO} https://apache.jfrog.io/artifactory/skywalking-helm helm install \u0026#34;${SKYWALKING_RELEASE_NAME}\u0026#34; ${REPO}/skywalking -n \u0026#34;${SKYWALKING_RELEASE_NAMESPACE}\u0026#34; \\  --set oap.image.tag=8.8.1 \\  --set oap.storageType=elasticsearch \\  --set ui.image.tag=8.8.1 \\  --set elasticsearch.imageTag=6.8.6 埋点 部署完以后,需要在服务中进行埋点,以生成 Span 数据:主要的方式即在服务的入口和出口创建 Span。在代码中,首先我们会创建一个 Reporter,用于向 SkyWalking 后端发送数据。接下来,我们需要创建一个名为 \u0026quot;example\u0026quot; 的 Tracer 实例。此时,我们就可以使用 Tracer 实例来创建 Span。 在 Go 中,主要利用 context.Context 来创建以及传递 Span。\nimport \u0026#34;github.com/SkyAPM/go2sky\u0026#34; // configure to export to OAP server r, err := reporter.NewGRPCReporter(\u0026#34;oap-skywalking:11800\u0026#34;) if err != nil { log.Fatalf(\u0026#34;new reporter error %v \\n\u0026#34;, err) } defer r.Close() tracer, err := go2sky.NewTracer(\u0026#34;example\u0026#34;, go2sky.WithReporter(r)) 服务内部 在下面的代码片段中,通过 context.background() 生成的 Context 创建了一个 Root Span,同时在创建该 Span 的时候,也会产生一个跟这 个 Span 相关联的 Context。利用这个新的 Context,就可以创建一个与 Root Span 相关联的 Child Span。\n// create root span span, ctx, err := tracer.CreateLocalSpan(context.Background()) // create sub span w/ context above subSpan, newCtx, err := tracer.CreateLocalSpan(ctx) 服务间通信 在服务内部,我们会利用 Context 传的递来进行 Span 的创建。但是如果是服务间通信的话,这也是链路追踪最为广泛的应用场景,肯定是没有办法直接传递 Context 参数的。这种情况下,应该怎么做呢?一般来说,SkyWalking 会把 Context 中与当前 Span 相关的键值对进行编码,后续在服务通信时进行传递。例如,在 HTTP 协议中,一般利用请求头进行链路传递。再例如 gRPC 协议,一般想到的就是利用 Metadata 进行传递。\n在服务间通信的时候,我们会利用 EntrySpan 和 ExitSpan 进行链路的串联。以 HTTP 请求为例,在创建 EntrySpan 时,会从请求头中获取到 Span 上下文信息。而在 ExitSpan 中,则在请求中注入了上下文。这里的上下文是经过了 SkyWalking 编码后的字符串,以便在服务间进行传递。除了传递 Span 信息,也可以给 Span 打上 Tag 进行标记。例如,记录 HTTP 请求的方法,URL 等等,以便于后续数据的可视化。\n//Extract context from HTTP request header `sw8` span, ctx, err := tracer.CreateEntrySpan(r.Context(), \u0026#34;/api/login\u0026#34;, func(key string) (string, error) { return r.Header.Get(key), nil }) // Some operation ... // Inject context into HTTP request header `sw8` span, err := tracer.CreateExitSpan(req.Context(), \u0026#34;/service/validate\u0026#34;, \u0026#34;tomcat-service:8080\u0026#34;, func(key, value string) error { req.Header.Set(key, value) return nil }) // tags span.Tag(go2sky.TagHTTPMethod, req.Method) span.Tag(go2sky.TagURL, req.URL.String()) 但是,我们可能也会用到一些不那么常用的协议,比如说 MQTT 协议。在这些情况下,应该如何传递上下文呢?关于这个问题,我们在自定义插件的部分做了实践。\nUI 经过刚才的埋点以后,就可以在 SkyWalking 的 UI 界面看到调用链。SkyWalking 官方提供了一个 Demo 页面,有兴趣可以一探究竟:\n UI http://demo.skywalking.apache.org\nUsername skywalking Password skywalking\n 插件体系 如上述埋点的方式,其实是比较麻烦的。好在 SkyWalking 官方提供了很多插件,一般情况下,直接接入插件便能达到埋点效果。SkyWalking 官方为多种语言都是提供了丰富的插件,对一些主流框架都有插件支持。由于我们部门使用的主要是 Go 和 Python 插件,下文中便主要介绍这两种语言的插件。同时,由于我们的链路复杂,用到的协议较多,不可避免的是也需要开发一些自定义插件。下图中整理了 Go 与 Python 插件的主要思想,以及我们开发的各框架协议自定义插件的研发思路。\n官方插件 Go · Gin 插件 Gin 是 Go 的 Web 框架,利用其中间件,可以进行链路追踪。由于是接收请求,所以需要在中间件中,创建一个 EntrySpan,同时从请求头中获取 Span 的上下文的信息。获取到上下文信息以后,还需要再进行一步操作:把当前请求请求的上下文 c.Request.Context(), 设置成为刚才创建完 EntrySpan 时生成的 Context。这样一来,这个请求的 Context 就会携带有 Span 上下文信息,可以用于在后续的请求处理中进行后续传递。\nfunc Middleware(engine *gin.Engine, tracer *go2sky.Tracer) gin.HandlerFunc { return func(c *gin.Context) { span, ctx, err := tracer.CreateEntrySpan(c.Request.Context(), getOperationName(c), func(key string) (string, error) { return c.Request.Header.Get(key), nil }) // some operation \tc.Request = c.Request.WithContext(ctx) c.Next() span.End() } } Python · requests Requests 插件会直接修改 Requests 库中的request函数,把它替换成 SkyWalking 自定义的_sw_request函数。在这个函数中,创建了 ExitSpan,并将 ExitSpan 上下文注入到请求头中。在服务安装该插件后,实际调用 Requests 库进行请求的时候,就会携带带有上下文的请求体进行请求。\ndef install(): from requests import Session _request = Session.request def _sw_request(this: Session, method, url, other params...): span = get_context().new_exit_span(op=url_param.path or \u0026#39;/\u0026#39;, peer=url_param.netloc, component=Component.Requests) with span: carrier = span.inject() span.layer = Layer.Http if headers is None: headers = {} for item in carrier: headers[item.key] = item.val span.tag(TagHttpMethod(method.upper())) span.tag(TagHttpURL(url_param.geturl())) res = _request(this, method, url, , other params...n) # some operation return res Session.request = _sw_request 自定义插件 Go · Gorm Gorm 框架是 Go 的 ORM 框架。我们自己在开发的时候经常用到这个框架,因此希望能对通过 Gorm 调用数据库的链路进行追踪。\nGorm 有自己的插件体系,会在数据库的操作前调用BeforeCallback函数,数据库的操作后调用AfterCallback函数。于是在BeforeCallback中,我们创建 ExitSpan,并在AfterCallback里结束先前在BeforeCallback中创建的 ExitSpan。\nfunc (s *SkyWalking) BeforeCallback(operation string) func(db *gorm.DB) { // some operation  return func(db *gorm.DB) { tableName := db.Statement.Table operation := fmt.Sprintf(\u0026#34;%s/%s\u0026#34;, tableName, operation) span, err := tracer.CreateExitSpan(db.Statement.Context, operation, peer, func(key, value string) error { return nil }) // set span from db instance\u0026#39;s context to pass span  db.Set(spanKey, span) } } 需要注意的是,因为 Gorm 的插件分为 Before 与 After 两个 Callback,所以需要在两个回调函数间传递 Span,这样我们才可以在AfterCallback中结束当前的 Span。\nfunc (s *SkyWalking) AfterCallback() func(db *gorm.DB) { // some operation  return func(db *gorm.DB) { // get span from db instance\u0026#39;s context  spanInterface, _ := db.Get(spanKey) span, ok := spanInterface.(go2sky.Span) if !ok { return } defer span.End() // some operation  } } Python · MQTT 在 IoT 领域,MQTT 是非常常用的协议,无人驾驶领域自然也相当依赖这个协议。\n以 Publish 为例,根据官方插件的示例,我们直接修改 paho.mqtt 库中的publish函数,改为自己定义的_sw_publish函数。在自定义函数中,创建 ExitSpan,并将上下文注入到 MQTT 的 Payload 中。\ndef install(): from paho.mqtt.client import Client _publish = Client.publish Client.publish = _sw_publish_func(_publish) def _sw_publish_func(_publish): def _sw_publish(this, topic, payload=None, qos=0, retain=False, properties=None): # some operation with get_context().new_exit_span(op=\u0026#34;EMQX/Topic/\u0026#34; + topic + \u0026#34;/Producer\u0026#34; or \u0026#34;/\u0026#34;, peer=peer) as span: carrier = span.inject() span.layer = Layer.MQ span.component = Component.RabbitmqProducer payload = {} if payload is None else json.loads(payload) payload[\u0026#39;headers\u0026#39;] = {} for item in carrier: payload[\u0026#39;headers\u0026#39;][item.key] = item.val # ... return _sw_publish 可能这个方式不是特别优雅:因为我们目前使用 MQTT 3.1 版本,此时尚未引入 Properties 属性(类似于请求头)。直到 MQTT 5.0,才对此有相关支持。我们希望在升级到 MQTT 5.0 以后,能够将上下文注入到 Properties 中进行传递。\n无人驾驶领域的实践 虽然这些插件基本上涵盖了所有的场景,但是链路追踪并不是只要接入插件就万事大吉。在一些复杂场景下,尤其无人驾驶领域的链路追踪,由于微服务架构中涉及的语言环境、中间件种类以及业务诉求通常都比较丰富,导致在接入全链路追踪的过程中,难免遇到各种主观和客观的坑。下面选取了几个典型例子和大家分享。\n【问题一】Kong 网关的插件链路接入 我们的请求在进入服务之前,都会通过 API 网关 Kong,同时我们在 Kong 中定义了一个自定义权限插件,这个插件会调用权限服务接口进行授权。如果只是单独单纯地接入 SkyWalking Kong 插件,对于权限服务的调用无法在调用链中体现。所以我们的解决思路是,直接地在权限插件里进行埋点,而不是使用官方的插件,这样就可以把对于权限服务的调用也纳入到调用链中。\n【问题二】 Context 传递 我们有这样一个场景:一个服务,使用 Gin Web 框架,同时在处理 HTTP 请求时调用上游服务的 gRPC 接口。起初以为只要接入 Gin 的插件以及 gRPC 的插件,这个场景的链路就会轻松地接上。但是结果并不如预期。\n最后发现,Gin 提供一个 Contextc;同时对于某一个请求,可以通过c.Request.Context()获取到请求的 ContextreqCtx,二者不一致;接入 SkyWalking 提供的 Gin 插件后,修改的是reqCtx,使其包含 Span 上下文信息;而现有服务,在 gRPC 调用时传入的 Context 是c,所以一开始 HTTP -\u0026gt; gRPC 无法连接。最后通过一个工具函数,复制了reqCtx的键值对到c后,解决了这个问题。\n【问题三】官方 Python·Redis 插件 Pub/Sub 断路 由于官方提供了 Python ·Redis 插件,所以一开始认为,安装了 Redis 插件,对于一切 Redis 操作,都能互相连接。但是实际上,对于 Pub/Sub 操作,链路会断开。\n查看代码后发现,对于所有的 Redis 操作,插件都创建一个 ExitSpan;也就是说该插件其实仅适用于 Redis 作缓存等情况;但是在我们的场景中,需要进行 Pub/Sub 操作。这导致两个操作都会创建 ExitSpan,而使链路无法相连。通过改造插件,在 Pub 时创建 ExitSpan,在 Sub 时创建 EntrySpan 后,解决该问题。\n【问题四】MQTT Broker 的多种 DataBridge 接入 一般来说,对 MQTT 的追踪链路是 Publisher -\u0026gt; Subscriber,但是在我们的使用场景中,存在 MQTT broker 接收到消息后,通过规则引擎调用其他服务接口这种特殊场景。这便不是 Publisher -\u0026gt; Subscriber,而是 Publisher -\u0026gt; HTTP。\n我们希望能够从 MQTT Payload 中取出 Span 上下文,再注入到 HTTP 的请求头中。然而规则引擎调用接口时,没有办法自定义请求头,所以我们最后的做法是,约定好参数名称,将上下文放到请求体中,在服务收到请求后,从请求体中提取 Context。\n【问题五】Tracing 与 Logging 如何结合 很多时候,只有 Tracing 信息,对于问题排查来说可能还是不充分的,我们非常的期望也能够把 Tracing 和 Logging 进行结合。\n如上图所示,我们会把所有服务的 Tracing 的信息发送到 SkyWalking,同时也会把这个服务产生的日志通过 Fluent Bit 以及 Fluentd 发送到 ElasticSearch。对于这种情况,我们只需要在日志中去记录 Span 的上下文,比如记录 Trace ID 或者 Span ID 等,就可以在 Kibana 里面去进行对于 Trace ID 的搜索,来快速的查看同一次调用链中的日志。\n当然,SkyWalking 它本身也提供了自己的日志收集和分析机制,可以利用 Fluentd 或者 Fluent Bit 等向 SkyWalking 后端发送日志(我们选用了 Fluentd)。当然,像 SkyWalking 后端发送日志的时候,也要符合其日志协议,即可在 UI 上查看相应日志。\n本文介绍了 SkyWalking 的使用方法、插件体系以及实践踩坑等,希望对大家有所帮助。总结一下,SkyWalking 的使用的确是有迹可循的,一般来说我们只要接入插件,基本上可以涵盖大部分的场景,达到链路追踪的目的。但是也要注意,很多时候需要具体问题具体分析,尤其是在链路复杂的情况下,很多地方还是需要根据不同场景来进行一些特殊处理。\n最后,我们正在使用的 FaaS 平台 OpenFunction 近期也接入了 SkyWalking 作为其 链路追踪的解决方案:\nOpenFunction 提供了插件体系,并预先定义了 SkyWalking pre/post 插件;编写函数时,用户无需手动埋点,只需在 OpenFunction 配置文件中简单配置,即可开启 SkyWalking 插件,达到链路追踪的目的。\n 在感叹 OpenFunction 动作迅速的同时,也能够看到 SkyWalking 已成为链路追踪领域的首要选择之一。\n参考资料  OpenTracing 文档:https://wu-sheng.gitbooks.io/opentracing-io/content/pages/spec.html SkyWalking 文档:https://skywalking.apache.org/docs/main/latest/readme/ SkyWalking GitHub:https://github.com/apache/skywalking SkyWalking go2sky GitHub:https://github.com/SkyAPM/go2sky SkyWalking Python GitHub:https://github.com/apache/skywalking-python SkyWalking Helm Chart:https://github.com/apache/skywalking-kubernetes SkyWalking Solution for OpenFunction https://openfunction.dev/docs/best-practices/skywalking-solution-for-openfunction/  ","title":"SkyWalking 在无人驾驶领域的实践","url":"/zh/2022-04-13-skywalking-in-autonomous-driving/"},{"content":"SkyWalking Client JS 0.8.0 is released. Go to downloads page to find release tars.\n Fix fmp metric. Add e2e tese based on skywaling-infra-e2e. Update metric and events. Remove ServiceTag by following SkyWalking v9 new layer model.  ","title":"Release Apache SkyWalking Client JS 0.8.0","url":"/events/release-apache-skywalking-client-js-0-8-0/"},{"content":"SkyWalking 9.0.0 is released. Go to downloads page to find release tars.\nSkyWalking v9 is the next main stream of the OAP and UI.\nStarting from v9, SkyWalking introduces the new core concept Layer. A layer represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), Kubernetes(k8s layer). All detected instances belong to a layer to represent the running environment of this instance, the service would have one or multiple layer definitions according to its instances.\nRocketBot UI has officially been replaced by the Booster UI.\nChanges by Version Project  Upgrade log4j2 to 2.17.1 for CVE-2021-44228, CVE-2021-45046, CVE-2021-45105 and CVE-2021-44832. This CVE only effects on JDK if JNDI is opened in default. Notice, using JVM option -Dlog4j2.formatMsgNoLookups=true or setting the LOG4J_FORMAT_MSG_NO_LOOKUPS=”true” environment variable also avoids CVEs. Upgrade maven-wrapper to 3.1.0, maven to 3.8.4 for performance improvements and ARM more native support. Exclude unnecessary libs when building under JDK 9+. Migrate base Docker image to eclipse-temurin as adoptopenjdk is deprecated. Add E2E test under Java 17. Upgrade protoc to 3.19.2. Add Istio 1.13.1 to E2E test matrix for verification. Upgrade Apache parent pom version to 25. Use the plugin version defined by the Apache maven parent.  Upgrade maven-dependency-plugin to 3.2.0. Upgrade maven-assembly-plugin to 3.3.0. Upgrade maven-failsafe-plugin to 2.22.2. Upgrade maven-surefire-plugin to 2.22.2. Upgrade maven-jar-plugin to 3.2.2. Upgrade maven-enforcer-plugin to 3.0.0. Upgrade maven-compiler-plugin to 3.10.0. Upgrade maven-resources-plugin to 3.2.0. Upgrade maven-source-plugin to 3.2.1.   Update codeStyle.xml to fix incompatibility on M1\u0026rsquo;s IntelliJ IDEA 2021.3.2. Update frontend-maven-plugin to 1.12 and npm to 16.14.0 for booster UI build. Improve CI with the GHA new feature \u0026ldquo;run failed jobs\u0026rdquo;. Fix ./mvnw compile not work if ./mvnw install is not executed at least once. Add JD_PRESERVE_LINE_FEEDS=true in official code style file. Upgrade OAP dependencies gson(2.9.0), guava(31.1), jackson(2.13.2), protobuf-java(3.18.4), commons-io(2.7), postgresql(42.3.3). Remove commons-pool and commons-dbcp from OAP dependencies(Not used before). Upgrade webapp dependencies gson(2.9.0), spring boot(2.6.6), jackson(2.13.2.2), spring cloud(2021.0.1), Apache httpclient(4.5.13).  OAP Server  Fix potential NPE in OAL string match and a bug when right-hand-side variable includes double quotes. Bump up Armeria version to 1.14.1 to fix CVE. Polish ETCD cluster config environment variables. Add the analysis of metrics in Satellite MetricsService. Fix Can't split endpoint id into 2 parts bug for endpoint ID. In the TCP in service mesh observability, endpoint name doesn\u0026rsquo;t exist in TCP traffic. Upgrade H2 version to 2.0.206 to fix CVE-2021-23463 and GHSA-h376-j262-vhq6. Extend column name override mechanism working for ValueColumnMetadata. Introduce new concept Layer and removed NodeType. More details refer to v9-version-upgrade. Fix query sort metrics failure in H2 Storage. Bump up grpc to 1.43.2 and protobuf to 3.19.2 to fix CVE-2021-22569. Add source layer and dest layer to relation. Follow protocol grammar fix GCPhrase -\u0026gt; GCPhase. Set layer to mesh relation. Add FAAS to SpanLayer. Adjust e2e case for V9 core. Support ZGC GC time and count metric collecting. Sync proto buffers files from upstream Envoy (Related to https://github.com/envoyproxy/envoy/pull/18955). Bump up GraphQL related dependencies to latest versions. Add normal to V9 service meta query. Support scope=ALL catalog for metrics. Bump up H2 to 2.1.210 to fix CVE-2022-23221. E2E: Add normal field to Service. Add FreeSql component ID(3017) of dotnet agent. E2E: verify OAP cluster model data aggregation. Fix SelfRemoteClient self observing metrics. Add env variables SW_CLUSTER_INTERNAL_COM_HOST and SW_CLUSTER_INTERNAL_COM_PORT for cluster selectors zookeeper ,consul,etcd and nacos. Doc update: configuration-vocabulary,backend-cluster about env variables SW_CLUSTER_INTERNAL_COM_HOST and SW_CLUSTER_INTERNAL_COM_PORT. Add Python MysqlClient component ID(7013) with mapping information. Support Java thread pool metrics analysis. Fix IoTDB Storage Option insert null index value. Set the default value of SW_STORAGE_IOTDB_SESSIONPOOL_SIZE to 8. Bump up iotdb-session to 0.12.4. Bump up PostgreSQL driver to fix CVE. Add Guava EventBus component ID(123) of Java agent. Add OpenFunction component ID(5013). Expose configuration responseTimeout of ES client. Support datasource metric analysis. [Breaking Change] Keep the endpoint avg resp time meter name the same with others scope. (This may break 3rd party integration and existing alarm rule settings) Add Python FastAPI component ID(7014). Support all metrics from MAL engine in alarm core, including Prometheus, OC receiver, meter receiver. Allow updating non-metrics templates when structure changed. Set default connection timeout of ElasticSearch to 3000 milliseconds. Support ElasticSearch 8 and add it into E2E tests. Disable indexing for field alarm_record.tags_raw_data of binary type in ElasticSearch storage. Fix Zipkin receiver wrong condition for decoding gzip. Add a new sampler (possibility) in LAL. Unify module name receiver_zipkin to receiver-zipkin, remove receiver_jaeger from application.yaml. Introduce the entity of Process type. Set the length of event#parameters to 2000. Limit the length of Event#parameters. Support large service/instance/networkAddressAlias list query by using ElasticSearch scrolling API, add metadataQueryBatchSize to configure scrolling page size. Change default value of metadataQueryMaxSize from 5000 to 10000 Replace deprecated Armeria API BasicToken.of with AuthToken.ofBasic. Implement v9 UI template management protocol. Implement process metadata query protocol. Expose more ElasticSearch health check related logs to help to diagnose Health check fails. reason: No healthy endpoint. Add source event generated metrics to SERVICE_CATALOG_NAME catalog. [Breaking Change] Deprecate All from OAL source. [Breaking Change] Remove SRC_ALL: 'All' from OAL grammar tree. Remove all_heatmap and all_percentile metrics. Fix ElasticSearch normal index couldn\u0026rsquo;t apply mapping and update. Enhance DataCarrier#MultipleChannelsConsumer to add priority for the channels, which makes OAP server has a better performance to activate all analyzers on default. Activate receiver-otel#enabledOcRules receiver with k8s-node,oap,vm rules on default. Activate satellite,spring-sleuth for agent-analyzer#meterAnalyzerActiveFiles on default. Activate receiver-zabbix receiver with agent rule on default. Replace HTTP server (GraphQL, agent HTTP protocol) from Jetty with Armeria. [Breaking Change] Remove configuration restAcceptorPriorityDelta (env var: SW_RECEIVER_SHARING_JETTY_DELTA , SW_CORE_REST_JETTY_DELTA). [Breaking Change] Remove configuration graphql/path (env var: SW_QUERY_GRAPHQL_PATH). Add storage column attribute indexOnly, support ElasticSearch only index and not store some fields. Add indexOnly=true to SegmentRecord.tags, AlarmRecord.tags, AbstractLogRecord.tags, to reduce unnecessary storage. [Breaking Change] Remove configuration restMinThreads (env var: SW_CORE_REST_JETTY_MIN_THREADS , SW_RECEIVER_SHARING_JETTY_MIN_THREADS). Refactor the core Builder mechanism, new storage plugin could implement their own converter and get rid of hard requirement of using HashMap to communicate between data object and database native structure. [Breaking Change] Break all existing 3rd-party storage extensions. Remove hard requirement of BASE64 encoding for binary field. Add complexity limitation for GraphQL query to avoid malicious query. Add Column.shardingKeyIdx for column definition for BanyanDB.  Sharding key is used to group time series data per metric of one entity in one place (same sharding and/or same row for column-oriented database). For example, ServiceA's traffic gauge, service call per minute, includes following timestamp values, then it should be sharded by service ID [ServiceA(encoded ID): 01-28 18:30 values-1, 01-28 18:31 values-2, 01-28 18:32 values-3, 01-28 18:32 values-4] BanyanDB is the 1st storage implementation supporting this. It would make continuous time series metrics stored closely and compressed better. NOTICE, this sharding concept is NOT just for splitting data into different database instances or physical files.  Support ElasticSearch template mappings properties parameters and _source update. Implement the eBPF profiling query and data collect protocol. [Breaking Change] Remove Deprecated responseCode from sources, including Service, ServiceInstance, Endpoint Enhance endpoint dependency analysis to support cross threads cases. Refactor span analysis code structures. Remove isNotNormal service requirement when use alias to merge service topology from client side. All RPCs' peer services from client side are always normal services. This cause the topology is not merged correctly. Fix event type of export data is incorrect, it was EventType.TOTAL always. Reduce redundancy ThreadLocal in MAL core. Improve MAL performance. Trim tag\u0026rsquo;s key and value in log query. Refactor IoTDB storage plugin, add IoTDBDataConverter and fix ModifyCollectionInEnhancedForLoop bug. Bump up iotdb-session to 0.12.5. Fix the configuration of Aggregation and GC Count metrics for oap self observability E2E: Add verify OAP eBPF Profiling. Let multiGet could query without tag value in the InfluxDB storage plugin. Adjust MAL for V9, remove some groups, add a new Service function for the custom delimiter. Add service catalog DatabaseSlowStatement. Add Error Prone Annotations dependency to suppress warnings, which are not errors.  UI  [Breaking Change] Introduce Booster UI, remove RocketBot UI. [Breaking Change] UI Templates have been redesigned totally. GraphQL query is minimal compatible for metadata and metrics query. Remove unused jars (log4j-api.jar) in classpath. Bump up netty version to fix CVE. Add Database Connection pool metric. Re-implement UI template initialization for Booster UI. Add environment variable SW_ENABLE_UPDATE_UI_TEMPLATE to control user edit UI template. Add the Self Observability template of the SkyWalking Satellite. Add the template of OpenFunction observability.  Documentation  Reconstruction doc menu for v9. Update backend-alarm.md doc, support op \u0026ldquo;=\u0026rdquo; to \u0026ldquo;==\u0026rdquo;. Update backend-meter.md doc . Add \u0026lt;STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System\u0026gt; paper. Add Academy menu for recommending articles. Remove All source relative document and examples. Update Booster UI\u0026rsquo;s dependency licenses. Add profiling doc, and remove service mesh intro doc(not necessary). Add a doc for virtual database. Rewrite UI introduction. Update k8s-monitoring, backend-telemetry and v9-version-upgrade doc for v9.  All issues and pull requests are here\n","title":"Release Apache SkyWalking APM 9.0.0","url":"/events/release-apache-skywalking-apm-9.0.0/"},{"content":"SkyWalking CLI 0.10.0 is released. Go to downloads page to find release tars.\nFeatures  Allow setting start and end with relative time (#128) Add some commands for the browser (#126) Add the sub-command service layer to query services according to layer (#133) Add the sub-command layer list to query layer list (#133) Add the sub-command instance get to query single instance (#134) Add the sub-command endpoint get to query single endpoint info (#134) Change the GraphQL method to the v9 version according to the server version (#134) Add normal field to Service entity (#136) Add the command process for query Process metadata (#137) Add the command profiling ebpf for process ebpf profiling (#138) Support getprofiletasklogs query (#125) Support query list alarms (#127) [Breaking Change] Update the command profile as a sub-command profiling trace, and update profiled-analyze command to analysis (#138) profiling ebpf/trace analysis generates the profiling graph HTML on default and saves it to the current work directory (#138)  Bug Fixes  Fix quick install (#131) Set correct go version in publishing snapshot docker image (#124) Stop build kit container after finishing (#130)  Chores  Add cross platform build targets (#129) Update download host (#132)  ","title":"Release Apache SkyWalking CLI 0.10.0","url":"/events/release-apache-skywalking-cli-0-10-0/"},{"content":"SkyWalking is an open-source APM system, including monitoring, tracing, and diagnosing capabilities for distributed systems in Cloud Native architecture. It covers monitoring for Linux, Kubernetes, Service Mesh, Serverless/Function-as-a-Service, agent-attached services, and browsers. With data covering traces, metrics, logs, and events, SkyWalking is a full-stack observability APM system.\nOpen Source Promotion Plan is a summer program organized and long-term supported by Open Source Software Supply Chain Promotion Plan. It aims to encourage college students to actively participate in developing and maintaining open-source software and promote the vigorous development of an excellent open-source software community.\nApache SkyWalking has been accepted in OSPP 2022\n   Project Description Difficulty Mentor / E-mail Expectation Tech. Requirements Repository     SkyAPM-PHP Add switches for monitoring items Advanced Level Yanlong He / heyanlong@apache.org Complete project development work C++, GO, PHP https://github.com/SkyAPM/SkyAPM-php-sdk   SkyWalking-Infra-E2E Optimize verifier Normal Level Huaxi Jiang / hoshea@apache.org 1. Continue to verify cases when other cases fail  2. Merge retry outputs  3. Prettify verify results' output Go https://github.com/apache/skywalking-infra-e2e   SkyWalking Metrics anomaly detection with machine learning Advanced Level Yihao Chen / yihaochen@apache.org An MVP version of ML-powered metrics anomaly detection using dynamic baselines and thresholds Python, Java https://github.com/apache/skywalking   SkyWalking Python Collect PVM metrics and send the metrics to OAP backend, configure dashboard in UI Normal Level Zhenxu Ke / kezhenxu94@apache.org Core Python VM metrics should be collected and displayed in SkyWalking. Python https://github.com/apache/skywalking-python issue   SkyWalking BanyanDB Command line tools for BanyanDB Normal Level Hongtao Gao / hanahmily@apache.org Command line tools should access relevant APIs to manage resources and online data. Go https://github.com/apache/skywalking-banyandb   SkyWalking SWCK CRD and controller for BanyanDB Advance Level Ye Cao / dashanji@apache.org CRD and controller provision BanyanDB as the native Storage resource. Go https://github.com/apache/skywalking-swck   SkyAPM-Go2sky Collect golang metrics such as gc, goroutines and threads, and send the the metrics to OAP backend, configure dashboard in UI Normal Level Wei Zhang / zhangwei24@apache.org Core golang metrics should be collected and displayed in SkyWalking. Go https://github.com/SkyAPM/go2sky   SkyWalking Collect system metrics such as system_load, cpu_usage, mem_usage from telegraf and send the metrics to OAP backend, configure dashboard in UI Normal Level Haoyang Liu / liuhaoyangzz@apache.org System metrics should be collected and displayed in SkyWalking. Java https://github.com/apache/skywalking    Mentors could submit pull requests to update the above list.\nContact the community You could send emails to mentor\u0026rsquo;s personal email to talk about the project and details. The official mail list of the community is dev@skywalking.apache.org. You need to subscribe to the mail list to get all replies. Send mail to dev-suscribe@skywalking.apache.org and follow the replies.\n","title":"Open Source Promotion Plan 2022 -- Project List","url":"/events/summer-ospp-2022/readme/"},{"content":"如果要讨论提高自己系统设计能力的方式,我想大多数人都会选择去阅读优秀开源项目的源代码。近年来我参与了多个监控服务的开发工作,并在工作中大量地使用了 SkyWalking 并对其进行二次开发。在这个过程中,我发现 SkyWalking 天然的因其国产的身份,整套源代码地组织和设计非常符合国人的编程思维。由此我录制了本套课程,旨在和大家分享我的一些浅薄的心得和体会。\n本套课程分为两个阶段,分别讲解 Agent 端和 OAP 端地设计和实现。每个阶段的内容都是以启动流程作为讲解主线,逐步展开相关的功能模块。除了对 SKyWalking 本身内容进行讲解,课程还针对 SKyWalking 使用到的一些较为生僻的知识点进行了补充讲解(如 synthetic、NBAC 机制、自定义类加载器等),以便于大家更清晰地掌握课程内容。\nSkyWalking8.7.0 源码分析 - 视频课程直达链接\n目前课程已更新完 Agent 端的讲解,目录如下:\n 01-开篇和源码环境准备 02-Agent 启动流程 03-Agent 配置加载流程 04-自定义类加载器 AgentClassLoader 05-插件定义体系 07-插件加载 06-定制 Agent 08-什么是 synthetic 09-NBAC 机制 10-服务加载 11-witness 组件版本识别 12-Transform 工作流程 13-静态方法插桩 14-构造器和实例方法插桩 15-插件拦截器加载流程(非常重要) 16-运行时插件效果的字节码讲解 17-JDK 类库插件工作原理 18-服务-GRPCChanelService 19-服务-ServiceManagementClient 20-服务-CommandService 21-服务-SamplingService 22-服务-JVMService 23-服务-KafkaXxxService 24-服务-StatusCheckService 25-链路基础知识 26-链路 ID 生成 27-TraceSegment 28-Span 基本概念 29-Span 完整模型 30-StackBasedTracingSpan 31-ExitSpan 和 LocalSpan 32-链路追踪上下文 TracerContext 33-上下文适配器 ContextManager 34-DataCarrier-Buffer 35-DataCarrier-全解 36-链路数据发送到 OAP  B站视频地址\n","title":"[视频] SkyWalking 8.7.0 源码分析","url":"/zh/2022-03-25-skywalking-source-code-analyzation/"},{"content":"SkyWalking NodeJS 0.4.0 is released. Go to downloads page to find release tars.\n Fix mysql2 plugin install error. (#74) Update IORedis Plugin, fill dbinstance tag as host if condition.select doesn\u0026rsquo;t exist. (#73) Experimental AWS Lambda Function support. (#70) Upgrade dependencies to fix vulnerabilities. (#68) Add lint pre-commit hook and migrate to eslint. (#66, #67) Bump up gRPC version, and use its new release repository. (#65) Regard baseURL when in Axios Plugin. (#63) Add an API to access the trace id. (#60) Use agent test tool snapshot Docker image instead of building in CI. (#59) Wrapped IORedisPlugin call in try/catch. (#58)  ","title":"Release Apache SkyWalking for NodeJS 0.4.0","url":"/events/release-apache-skywalking-nodejs-0-4-0/"},{"content":"大约二十年前我刚开始进入互联网的世界的时候,支撑起整个网络的基础设施,就包括了 Apache 软件基金会(ASF)治下的软件。\nApache Httpd 是开启这个故事的软件,巅峰时期有超过七成的市场占有率,即使是在今天 NGINX 等新技术蓬勃发展的时代,也有三成左右的市场占有率。由 Linux、Apache Httpd、MySQL 和 PHP 组成的 LAMP 技术栈,是开源吞噬软件应用的第一场大型胜利。\n我从 2018 年参与 Apache Flink 开始正式直接接触到成立于 1999 年,如今已经有二十年以上历史的 Apache 软件基金会,并在一年后的 2019 年成为 Apache Flink 项目 Committer 队伍的一员,2020 年成为 Apache Curator 项目 PMC(项目管理委员会)的一员。今年,经由姜宁老师推荐,成为了 Apache Members 之一,也就是 Apache 软件基金会层面的正式成员。\n我想系统性地做一个开源案例库已经很久了。无论怎么分类筛选优秀的开源共同体,The Apache Community 都是无法绕开的。然而,拥有三百余个开源软件项目的 Apache 软件基金会,并不是一篇文章就能讲清楚的案例。本文也没有打算写成一篇长文顾及方方面面,而是启发于自己的新角色,回顾过去近五年在 Apache Community 当中的经历和体验,简单讨论 Apache 的理念,以及这些理念是如何落实到基金会组织、项目组织以及每一个参与者的日常生活事务当中的。\n不过,尽管对讨论的对象做了如此大幅度的缩减,由我自己来定义什么是 Apache 的理念未免也太容易有失偏颇。幸运的是,Apache Community 作为优秀的开源共同体,当然做到了我在《共同创造价值》一文中提到的回答好“我能为你做什么”以及“我应该怎么做到”的问题。Apache Community 的理念之一就是 Open Communications 即开放式讨论,由此产生的公开材料以及基于公开材料整理的文档汗牛充栋。这既是研究 Apache Community 的珍贵材料,也为还原和讨论一个真实的 Apache Community 提出了不小的挑战。\n无论如何,本文将以 Apache 软件基金会在 2020 年发布的纪录片 Trillions and Trillions Served 为主线,结合其他文档和文字材料来介绍 Apache 的理念。\n以人为本 纪录片一开始就讲起了 Apache Httpd 项目的历史,当初的 Apache Group 是基于一个源代码共享的 Web Server 建立起来的邮件列表上的一群人。软件开发当初的印象如同科学研究,因此交流源码在近似科学共同体的开源共同体当中是非常自然的。\n如同 ASF 的联合创始人 Brian Behlendorf 所说,每当有人解决了一个问题或者实现了一个新功能,他出于一种朴素的分享精神,也就是“为什么不把补丁提交回共享的源代码当中呢”的念头,基于开源软件的协作就这样自然发生了。纪录片中有一位提到,她很喜欢 Apache 这个词和 a patchy software 的谐音,共享同一个软件的补丁(patches)就是开源精神最早诞生的形式。\n这是 Apache Community 的根基,我们将会看到这种朴素精神经过发展形成了一个怎样的共同体,在共同体的发展过程当中,这样的根基又是如何深刻地影响了 Apache 理念的方方面面。\nApache Group 的工作模式还有一个重要的特征,那就是每个人都是基于自己的需求修复缺陷或是新增功能,在邮件列表上交流和提交补丁的个人,仅仅只是代表他个人,而没有一个“背后的组织”或者“背后的公司”。因此,ASF 的 How it Works 文档中一直强调,在基金会当中的个体,都只是个体(individuals),或者称之为志愿者(volunteers)。\n我在某公司的分享当中提到过,商业产品可以基于开源软件打造,但是当公司的雇员出现在社群当中的时候,他应该保持自己志愿者的身份。这就像是开源软件可以被用于生产环境或者严肃场景,例如航空器的发射和运行离不开 Linux 操作系统,但是开源软件本身是具有免责条款的。商业公司或专业团队提供服务保障,而开源软件本身是 AS IS 的。同样,社群成员本人可以有商业公司雇员的身份,但是他在社群当中,就是一个志愿者。\n毫无疑问,这种论调当即受到了质疑,因为通常的认知里,我就是拿了公司的钱,就是因为在给这家公司打工,才会去关注这个项目,你非要说我是一个志愿者,我还就真不是一个志愿者,你怎么说?\n其实这个问题,同样在 How it Works 文档中已经有了解答。\n All participants in ASF projects are volunteers and nobody (not even members or officers) is paid directly by the foundation to do their job. There are many examples of committers who are paid to work on projects, but never by the foundation itself. Rather, companies or institutions that use the software and want to enhance it or maintain it provide the salary.\n 我当时基于这样的认识,给到质疑的回答是,如果你不想背负起因为你是员工,因此必须响应社群成员的 issue 或 PR 等信息,那么你可以试着把自己摆在一个 volunteer 的角度来观察和参与社群。实际上,你并没有这样的义务,即使公司要求你必须回答,那也是公司的规定,而不是社群的要求。如果你保持着这样的认识和心态,那么社群于你而言,才有可能是一个跨越职业生涯不同阶段的归属地,而不是工作的附庸。\n社群从来不会从你这里索取什么,因为你的参与本身也是自愿的。其他社群成员会感谢你的参与,并且如果相处得好,这会是一个可爱的去处。社群不是你的敌人,不要因为公司下达了离谱的社群指标而把怒火发泄在社群和社群成员身上。压力来源于公司,作为社群成员的你本来可以不用承受这些。\nApache Community 对个体贡献者组成社群这点有多么重视呢?只看打印出来不过 10 页 A4 纸的 How it Works 文档,volunteer 和 individuals 两个词加起来出现了 19 次。The Apache Way 文档中强调的社群特征就包括了 Independence 一条,唯一并列的另一个是经常被引用的 Community over code 原则。甚至,有一个专门的 Project independence 文档讨论了 ASF 治下的项目如何由个体志愿者开发和维护,又为何因此是中立和非商业性的。\nINDIVIDUALS COMPOSE THE ASF 集中体现了 ASF 以人为本的理念。实际上,不止上面提到的 Independence 强调了社群成员个体志愿者的属性,Community over code 这一原则也在强调 ASF 关注围绕开源软件聚集起来的人,包括开发者、用户和其他各种形式的参与者。人是维持社群常青的根本,在后面具体讨论 The Apache Way 的内容的时候还会展开。\n上善若水 众所周知,Apache License 2.0 (APL-2.0) 是所谓的宽容式软件协议。也就是说,不同于 GPL 3.0 这样的 Copyleft 软件协议要求衍生作品需要以相同的条款发布,其中包括开放源代码和自由修改从而使得软件源代码总是可以获取和修改的,Apache License 在协议内容当中仅保留了著作权和商标,并要求保留软件作者的任何声明(NOTICE)。\nASF 在软件协议上的理念是赋予最大程度的使用自由,鼓励用户和开发者参与到共同体当中来,鼓励与上游共同创造价值,共享补丁。“鼓励”而不是“要求”,是 ASF 和自由软件基金会(Free Software Foundation, FSF)最主要的区别。\n这一倾向可以追溯到 Apache Group 建立的基础。Apache Httpd 派生自伊利诺伊大学的 NCSA Httpd 项目,由于使用并开发这个 web server 的人以邮件列表为纽带聚集在一起,通过交换补丁来开发同一个项目。在项目的发起人 Robert McCool 等大学生毕业以后,Apache Group 的发起人们接过这个软件的维护和开发工作。当时他们看到的软件协议,就是一个 MIT License 精神下的宽容式软件协议。自然而然地,Apache Group 维护 Apache Httpd 的时候,也就继承了这个协议。\n后来,Apache Httpd 打下了 web server 的半壁江山,也验证了这一模式的可靠性。虽然有些路径依赖的嫌疑,但是 ASF 凭借近似“上善若水”的宽容理念,在二十年间成功创造了数以百亿计美元价值的三百多个软件项目。\n纪录片中 ASF 的元老 Ted Dunning 提到,在他早期创造的软件当中,他会在宽容式软件协议之上,添加一个商用的例外条款。这就像是著名开源领域律师 Heather Meeker 起草的 The Commons Clause 附加条款。\n Without limiting other conditions in the License, the grant of rights under the License will not include, and the License does not grant to you, the right to Sell the Software.\n 附加 The Commons Clause 条款的软件都不是符合 OSD 定义的开源软件,也不再是原来的协议了。NebulaGraph 曾经在附加 The Commons Clause 条款的情况下声称自己是 APL-2.0 协议许可的软件,当时的 ASF 董事吴晟就提 issue (vesoft-inc/nebula#3247) 指出这一问题。NebulaGraph 于是删除了所有 The Commons Clause 的字样,保证无误地以 APL-2.0 协议许可该软件。\nTed Dunning 随后提到,这样的附加条款实际上严重影响了软件的采用。他意识到自己实际上并不想为此打官司,因此加上这样的条款对他而言是毫无意义的。Ted Dunning 于是去掉了附加条款,而这使得使用他的软件的条件能够简单的被理解,从而需要这些软件的用户能够大规模的采用。“水利万物而不争”,反而是不去强迫和约束用户行为的做法,为软件赢得了更多贡献。\n我仍然很敬佩采用 GPL 系列协议发布高质量软件的开发者,Linux 和 GCC 这样的软件的成功改变了世人对软件领域的自由的认识。然而,FSF 自己也认识到需要提出修正的 LGPL 来改进应用程序以外的软件的发布和采用,例如基础库。\nAPL-2.0 的思路与之不同,它允许任何人以任何形式使用、修改和分发软件,因此 ASF 治下的项目,以及 Linux Foundation 治下采用 APL-2.0 的项目,以及更多个人或组织采用 APL-2.0 的项目,共同构成了强大的开源软件生态,涵盖了应用软件,基础库,开发工具和框架等等各个方面。事实证明,“鼓励”而不是“要求”用户秉持 upstream first 的理念,尽可能参与到开源共同体并交换知识和补丁,共同创造价值,是能够制造出高质量的软件,构建出繁荣的社群和生态的。\n匠人精神 Apache Community 关注开发者的需要。\nApache Group 成立 ASF 的原因,是在 Apache Httpd 流行起来以后,商业公司和社会团体开始寻求和这个围绕项目形成的群体交流。然而,缺少一个正式的法律实体让组织之间的往来缺乏保障和流程。因此,如同纪录片当中提到的,ASF 成立的主要原因,是为了支撑 Apache Httpd 项目。只不过当初的创始成员们很难想到的是,ASF 最终支撑了数百个开源项目。\n不同于 Linux Foundation 是行业联盟,主要目的是为了促进其成员的共同商业利益,ASF 主要服务于开发者,由此支撑开源项目的开发以及开源共同体的发展。\n举例来说,进入 ASF 孵化器的项目都能够在 ASF Infra 的支持下运行自己的 apache.org 域名的网站,将代码托管在 ASF 仓库中上,例如 Apache GitBox Repositories 和 Apache GitHub Organization 等。这些仓库上运行着自由取用的开发基础设施,例如持续集成和持续发布的工具和资源等等。ASF 还维护了自己的邮件列表和文件服务器等一系列资源,以帮助开源项目建立起自己的共同体和发布自己的构件。\n反观 Linux Foundation 的主要思路,则是关注围绕项目聚集起来的供应商,以行业联盟的形式举办联合市场活动扩大影响,协调谈判推出行业标准等等。典型地,例如 CNCF 一直致力于定义云上应用开发的标准,容器虚拟化技术的标准。上述 ASF Infra 关注的内容和资源,则大多需要项目开发者自己解决,这些开发者往往主要为一个或若干个供应商工作,他们解决的方式通常也是依赖供应商出力。\n当然,上面的对比只是为了说明区别,并无优劣之分,也不相互对立。ASF 的创始成员 Brian Behlendorf 同时是 Linux Foundation 下 Open Source Security Foundation 的经理,以及 Hyperledger 的执行董事。\nASF 关注开发者的需要,体现出 Apache Community 及其成员对开发者的人文关怀。纪录片中谈到 ASF 治下项目的开发体验时,几乎每个人的眼里都有光。他们谈论着匠人精神,称赞知识分享,与人合作,以及打磨技艺的愉快经历。实际上,要想从 Apache 孵化器中成功毕业,相当部分的 mentor 关注的是围绕开源软件形成的共同体,能否支撑开源软件长久的发展和采用,这其中就包括共同体成员是否能够沉下心来做技术,而不是追求花哨的数字指标和人头凑数。\n讲几个具体的开发者福利。\n每个拥有 @apache.org 邮箱的人,即成为 ASF 治下项目 Committer 或 ASF Member 的成员,JetBrains 会提供免费的全家桶订阅授权码。我从 2019 年成为 Apache Flink 项目的 Committer 以后,已经三年沉浸在 IDEA 和 CLion 的包容下,成为彻底使用 IDE 主力开发的程序员了。\nApache GitHub Organization 下的 GitHub Actions 资源是企业级支持,这部分开销也是由 ASF 作为非营利组织募资和运营得到的资金支付的。基本上,如果你的项目成为 Apache 孵化器项目或顶级项目,那么和 GitHub Actions 集成的 CI 体验是非常顺畅的。Apache SkyWalking 只算主仓库就基于 GitHub Actions 运行了十多个端到端测试作业,Apache Pulsar 也全面基于 GitHub Actions 集成了自己的 CI 作业。\n提到匠人精神,一个隐形的开发者福利,其实是 ASF 的成员尤其是孵化器的 mentor 大多是经验非常丰富的开发者。软件开发不只是写代码,Apache Community 成员之间相互帮助,能够帮你跟上全世界最前沿的开发实践。如何提问题,如何做项目管理,如何发布软件,这些平日里在学校在公司很难有机会接触的知识和实践机会,在 Apache Community 当中只要你积极承担责任,都是触手可得的。\n当然,如何写代码也是开发当中最常交流的话题。我深入接触 Maven 开始于跟 Flink Community 的 Chesnay Schepler 的交流。我对 Java 开发的理解,分布式系统开发的知识,很大程度上也得到了 Apache Flink 和 Apache ZooKeeper 等项目的成员的帮助,尤其是 Till Rohrmann 和 Enrico Olivelli 几位。上面提到的 Ted Dunning 开始攻读博士的时候,我还没出生。但是我在项目当中用到 ZooKeeper 的 multi 功能并提出疑问和改进想法的时候,也跟他有过一系列的讨论。\n谈到技艺就会想起人,这也是 ASF 一直坚持以人为本带来的社群风气。\n我跟姜宁老师在一年前认识,交流 The Apache Way 期间萌生出相互认同。姜宁老师在 Apache 孵化器当中帮助众多项目理解 The Apache Way 并予以实践,德高望重。在今年的 ASF Members 年会当中,姜宁老师也被推举为 ASF Board 的一员。\n我跟吴晟老师在去年认识。他经常会强调开发者尤其是没有强烈公司背景的开发者的视角,多次提到这些开发者是整个开源生态的重要组成部分。他作为 PMC Chair 的 Apache SkyWalking 项目相信“没有下一个版本的计划,只知道会有下一个版本”,这是最佳实践的传播,也是伴随技术的文化理念的传播。SkyWalking 项目出于自己需要,也出于为开源世界添砖加瓦的动机创建的 SkyWalking Eyes 项目,被广泛用在不止于 ASF 治下项目,而是整个开源世界的轻量级的软件协议审计和 License Header 检查上。\n主要贡献在 Apache APISIX 的琚致远同学今年也被推选成为 Apache Members 的一员。他最让我印象深刻的是在 APISIX 社群当中积极讨论社群建设的议题,以及作为 APISIX 发布的 GSoC 项目的 mentor 帮助在校学生接触开源,实践开源,锻炼技艺。巧合的是,他跟我年龄相同,于是我痛失 Youngest Apache Member 的噱头,哈哈。\n或许,参与 Apache Community 就是这样的一种体验。并不是什么复杂的叙事,只是找到志同道合的人做出好的软件。我希望能够为提升整个软件行业付出自己的努力,希望我(参与)制造的软件创造出更大的价值,这里的人看起来大都也有相似的想法,这很好。仅此而已。\n原本还想聊聊 The Apache Way 的具体内容,还有介绍 Apache Incubator 这个保持 Apache Community 理念常青,完成代际传承的重要机制,但是到此为止似乎也很好。Apache Community 的故事和经验很难用一篇文章讲完,这两个话题就留待以后再写吧。\n","title":"我眼中的 The Apache Way","url":"/zh/2022-03-14-the-apache-community/"},{"content":"SkyWalking Client Rust 0.1.0 is released. Go to downloads page to find release tars.\n","title":"Release Apache SkyWalking Client Rust 0.1.0","url":"/events/release-apache-skywalking-client-rust-0-1-0/"},{"content":"SkyWalking Java Agent 8.9.0 is released. Go to downloads page to find release tars. Changes by Version\n8.9.0  Support Transaction and fix duplicated methods enhancements for jedis-2.x plugin. Add ConsumerWrapper/FunctionWrapper to support CompletableFuture.x.thenAcceptAsync/thenApplyAsync. Build CLI from Docker instead of source codes, add alpine based Docker image. Support set instance properties in json format. Upgrade grpc-java to 1.42.1 and protoc to 3.17.3 to allow using native Mac osx-aarch_64 artifacts. Add doc about system environment variables to configurations.md Avoid ProfileTaskChannelService.addProfilingSnapshot throw IllegalStateException(Queue full) Increase ProfileTaskChannelService.snapshotQueue default size from 50 to 4500 Support 2.8 and 2.9 of pulsar client. Add dubbo 3.x plugin. Fix TracePathMatcher should match pattern \u0026ldquo;**\u0026rdquo; with paths end by \u0026ldquo;/\u0026rdquo; Add support returnedObj expression for apm-customize-enhance-plugin Fix the bug that httpasyncclient-4.x-plugin puts the dirty tracing context in the connection context Compatible with the versions after dubbo-2.7.14 Follow protocol grammar fix GCPhrase -\u0026gt; GCPhase. Support ZGC GC time and count metric collect. (Require 9.0.0 OAP) Support configuration for collecting redis parameters for jedis-2.x and redisson-3.x plugin. Migrate base images to Temurin and add images for ARM. (Plugin Test) Fix compiling issues in many plugin tests due to they didn\u0026rsquo;t lock the Spring version, and Spring 3 is incompatible with 2.x APIs and JDK8 compiling. Support ShardingSphere 5.0.0 Bump up gRPC to 1.44.0, fix relative CVEs.  Documentation  Add a FAQ, Why is -Djava.ext.dirs not supported?.  All issues and pull requests are here\n","title":"Release Apache SkyWalking Java Agent 8.9.0","url":"/events/release-apache-skywalking-java-agent-8-9-0/"},{"content":"Apache SkyWalking is an open-source APM for a distributed system, Apache Software Foundation top-level project.\nOn Jan. 28th, we received a License violation report from one of the committers (anonymously). They have a cloud service called Application Performance Monitoring - Distributed Tracing (应用性能监控全链路版). At the Java service monitoring section, it provides this agent download link\n wget https://datarangers.com.cn/apminsight/repo/v2/download/java-agent/apminsight-java-agent_latest.tar.gz\n We downloaded it at 23:15 Jan. 28th UTC+8(Beijing), and archived it at here\nWe have confirmed this is a distribution of SkyWalking Java agent.\nWe listed several pieces of evidence to prove this here, every reader could compare with the official SkyWalking source codes\n The first and the easiest one is agent.config file, which is using the same config keys, and the same config format.  This is the Volcengine\u0026rsquo;s version, and check SkyWalking agent.config In the apmplus-agent.jar, Volcengine\u0026rsquo;s agent core jar, you could easily find several core classes exactly as same as SkyWalking\u0026rsquo;s.  The ComponentsDefine class is unchanged, even with component ID and name. This is Volcengine\u0026rsquo;s version, and check SkyWalking\u0026rsquo;s version\nThe whole code names, package names, and hierarchy structure are all as same as SkyWalking 6.x version.  This is the Volcengine package hierarchy structure, and check the SkyWalking\u0026rsquo;s version\n Volcengine Inc.\u0026rsquo;s team changed all package names, removed the Apache Software Foundation\u0026rsquo;s header, and don\u0026rsquo;t keep Apache Software Foundation and Apache SkyWalking\u0026rsquo;s LICENSE and NOTICE file in their redistribution.\nAlso, we can\u0026rsquo;t find anything on their website to declare they are distributing SkyWalking.\nAll above have proved they are violating the Apache 2.0 License, and don\u0026rsquo;t respect Apache Software Foundation and Apache SkyWalking\u0026rsquo;s IP and Branding.\nWe have contacted their legal team, and wait for their official response.\nResolution On Jan. 30th night, UTC+8, 2022. We received a response from Volcengine\u0026rsquo;s APMPlus team. They admitted their violation behaviors, and made the following changes.\n Volcengine\u0026rsquo;s APMPlus service page was updated on January 30th and stated that the agent is a fork version(re-distribution) of Apache SkyWalking agent. Below is the screenshot of Volcengine\u0026rsquo;s APMPlus product page.  Volcengine\u0026rsquo;s APMPlus agent distributions were also updated and include SkyWalking\u0026rsquo;s License and NOTICE now. Below is the screenshot of Volcengine\u0026rsquo;s APMPlus latest agent, you could download from the product page. We keep a copy of their Jan. 30th 2022 at here.  Volcengine\u0026rsquo;s APMPlus team had restored all license headers of SkyWalking in the agent, and the modifications of the project files are also listed in \u0026ldquo;SkyWalking-NOTICE\u0026rdquo;, which you could download from the product page.  We have updated the status to the PMC mail list. This license violation issue has been resolved for now.\n Appendix Inquiries of committers Q: I hope Volcengine Inc. can give a reason for this license issue, not just an afterthought PR. This will not only let us know where the issue is but also avoid similar problems in the future.\nA(apmplus apmplus@volcengine.com):\nThe developers neglected this repository during submitting compliance assessment. Currently, APMPlus team had introduced advanced tools provided by the company for compliance assessment, and we also strengthened training for our developers. In the future, the compliance assessment process will be further improved from tool assessment and manual assessment. ","title":"[Resolved][License Issue] Volcengine Inc.(火山引擎) violates the Apache 2.0 License when using SkyWalking.","url":"/blog/2022-01-28-volcengine-violates-aplv2/"},{"content":"Background In the Apache SkyWalking ecosystem, the OAP obtains metrics, traces, logs, and event data through SkyWalking Agent, Envoy, or other data sources. Under the gRPC protocol, it transmits data by communicating with a single server node. Only when the connection is broken, the reconnecting policy would be used based on DNS round-robin mode. When new services are added at runtime or the OAP load is kept high due to increased traffic of observed services, the OAP cluster needs to scale out for increased traffic. The load of the new OAP node would be less due to all existing agents having connected to previous nodes. Even without scaling, the load of OAP nodes would be unbalanced, because the agent would keep the connection due to random policy at the booting stage. In these cases, it would become a challenge to keep up the health status of all nodes, and be able to scale out when needed.\nIn this article, we mainly discuss how to solve this challenge in SkyWalking.\nHow to Load Balance SkyWalking mainly uses the gRPC protocol for data transmission, so this article mainly introduces load balancing in the gRPC protocol.\nProxy Or Client-side Based on the gRPC official Load Balancing blog, there are two approaches to load balancing:\n Client-side: The client perceives multiple back-end services and uses a load-balancing algorithm to select a back-end service for each RPC. Proxy: The client sends the message to the proxy server, and the proxy server load balances the message to the back-end service.  From the perspective of observability system architecture:\n    Pros Cons     Client-side High performance because of the elimination of extra hop Complex client (cluster awareness, load balancing, health check, etc.)Ensure each data source to be connected provides complex client capabilities   Proxy Simple Client Higher latency    We choose Proxy mode for the following reasons:\n Observable data is not very time-sensitive, a little latency caused by transmission is acceptable. A little extra hop is acceptable and there is no impact on the client-side. As an observability platform, we cannot/should not ask clients to change. They make their own tech decisions and may have their own commercial considerations.  Transmission Policy In the proxy mode, we should determine the transmission path between downstream and upstream.\nDifferent data protocols require different processing policies. There are two transmission policies:\n Synchronous: Suitable for protocols that require data exchange in the client, such as SkyWalking Dynamic Configuration Service. This type of protocol provides real-time results. Asynchronous batch: Used when the client doesn’t care about the upstream processing results, but only the transmitted data (e.g., trace report, log report, etc.)  The synchronization policy requires that the proxy send the message to the upstream server when receiving the client message, and synchronously return the response data to the downstream client. Usually, only a few protocols need to use the synchronization policy.\nAs shown below, after the client sends the request to the Proxy, the proxy would send the message to the server synchronously. When the proxy receives the result, it returns to the client.\nThe asynchronous batch policy means that the data is sent to the upstream server in batches asynchronously. This policy is more common because most protocols in SkyWalking are primarily based on data reporting. We think using the queue as a buffer could have a good effect. The asynchronous batch policy is executed according to the following steps:\n The proxy receives the data and wraps it as an Event object. An event is added into the queue. When the cycle time is reached or when the queue elements reach the fixed number, the elements in the queue will parallel consume and send to the OAP.  The advantage of using queues is:\n Separate data receiving and sending to reduce the mutual influence. The interval quantization mechanism can be used to combine events, which helps to speed up sending events to the OAP. Using multi-threaded consumption queue events can make fuller use of network IO.  As shown below, after the proxy receives the message, the proxy would wrap the message as an event and push it to the queue. The message sender would take batch events from the queue and send them to the upstream OAP.\nRouting Routing algorithms are used to route messages to a single upstream server node.\nThe Round-Robin algorithm selects nodes in order from the list of upstream service nodes. The advantage of this algorithm is that the number of times each node is selected is average. When the size of the data is close to the same, each upstream node can handle the same quantity of data content.\nWith the Weight Round-Robin, each upstream server node has a corresponding routing weight ratio. The difference from Round-Robin is that each upstream node has more chances to be routed according to its weight. This algorithm is more suitable to use when the upstream server node machine configuration is not the same.\nThe Fixed algorithm is a hybrid algorithm. It can ensure that the same data is routed to the same upstream server node, and when the upstream server scales out, it still maintains routing to the same node; unless the upstream node does not exist, it will reroute. This algorithm is mainly used in the SkyWalking Meter protocol because this protocol needs to ensure that the metrics of the same service instance are sent to the same OAP node. The Routing steps are as follows:\n Generate a unique identification string based on the data content, as short as possible. The amount of data is controllable. Get the upstream node of identity from LRU Cache, and use it if it exists. According to the identification, generate the corresponding hash value, and find the upstream server node from the upstream list. Save the mapping relationship between the upstream server node and identification to LRU Cache.  The advantage of this algorithm is to bind the data with the upstream server node as much as possible, so the upstream server can better process continuous data. The disadvantage is that it takes up a certain amount of memory space to save the corresponding relationship.\nAs shown below, the image is divided into two parts:\n The left side represents that the same data content always is routed to the same server node. The right side represents the data routing algorithm. Get the number from the data, and use the remainder algorithm to obtain the position.  We choose to use a combination of Round-Robin and Fixed algorithm for routing:\n The Fixed routing algorithm is suitable for specific protocols, mainly used when passing metrics data to the SkyWalking Meter protocol The Round-Robin algorithm is used by default. When the SkyWalking OAP cluster is deployed, the configuration of the nodes needs to be as much the same as possible, so there would be no need to use the Weight Round-Robin algorithm.  How to balance the load balancer itself? Proxy still needs to deal with the load balancing problem from client to itself, especially when deploying a Proxy cluster in a production environment.\nThere are three ways to solve this problem:\n Connection management: Use the max_connection config on the client-side to specify the maximum connection duration of each connection. For more information, please read the proposal. Cluster awareness: The proxy has cluster awareness, and actively disconnects the connection when the load is unbalanced to allow the client to re-pick up the proxy. Resource limit+HPA: Restrict the connection resource situation of each proxy, and no longer accept new connections when the resource limit is reached. And use the HPA mechanism of Kubernetes to dynamically scale out the number of the proxy.      Connection management Cluster awareness Resource Limit+HPA     Pros Simple to use Ensure that the number of connections in each proxy is relatively  Simple to use   Cons Each client needs to ensure that data is not lostThe client is required to accept GOWAY responses May cause a sudden increase in traffic on some nodesEach client needs to ensure that data is not lost  Traffic will not be particularly balanced in each instance    We choose Limit+HPA for these reasons:\n Easy to config and use the proxy and easy to understand based on basic data metrics. No data loss due to broken connection. There is no need for the client to implement any other protocols to prevent data loss, especially when the client is a commercial product. The connection of each node in the proxy cluster does not need to be particularly balanced, as long as the proxy node itself is high-performance.  SkyWalking-Satellite We have implemented this Proxy in the SkyWalking-Satellite project. It’s used between Client and SkyWalking OAP, effectively solving the load balancing problem.\nAfter the system is deployed, the Satellite would accept the traffic from the Client, and the Satellite will perceive all the nodes of the OAP through Kubernetes Label Selector or manual configuration, and load balance the traffic to the upstream OAP node.\nAs shown below, a single client still maintains a connection with a single Satellite, Satellite would establish the connection with each OAP, and load balance message to the OAP node.\nWhen scaling Satellite, we need to deploy the SWCK adapter and configure the HPA in Kubernetes. SWCK is a platform for the SkyWalking users, provisions, upgrades, maintains SkyWalking relevant components, and makes them work natively on Kubernetes.\nAfter deployment is finished, the following steps would be performed:\n Read metrics from OAP: HPA requests the SWCK metrics adapter to dynamically read the metrics in the OAP. Scaling the Satellite: Kubernetes HPA senses that the metrics values are in line with expectations, so the Satellite would be scaling automatically.  As shown below, use the dotted line to divide the two parts. HPA uses SWCK Adapter to read the metrics in the OAP. When the threshold is met, HPA would scale the Satellite deployment.\nExample In this section, we will demonstrate two cases:\n SkyWalking Scaling: After SkyWalking OAP scaling, the traffic would auto load balancing through Satellite. Satellite Scaling: Satellite’s own traffic load balancing.  NOTE: All commands could be accessed through GitHub.\nSkyWalking Scaling We will use the bookinfo application to demonstrate how to integrate Apache SkyWalking 8.9.1 with Apache SkyWalking-Satellite 0.5.0, and observe the service mesh through the Envoy ALS protocol.\nBefore starting, please make sure that you already have a Kubernetes environment.\nInstall Istio Istio provides a very convenient way to configure the Envoy proxy and enable the access log service. The following step:\n Install the istioctl locally to help manage the Istio mesh. Install Istio into the Kubernetes environment with a demo configuration profile, and enable the Envoy ALS. Transmit the ALS message to the satellite. The satellite we will deploy later. Add the label into the default namespace so Istio could automatically inject Envoy sidecar proxies when you deploy your application later.  # install istioctl export ISTIO_VERSION=1.12.0 curl -L https://istio.io/downloadIstio | sh - sudo mv $PWD/istio-$ISTIO_VERSION/bin/istioctl /usr/local/bin/ # install istio istioctl install -y --set profile=demo \\ \t--set meshConfig.enableEnvoyAccessLogService=true \\ \t--set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-system-satellite.skywalking-system:11800 # enbale envoy proxy in default namespace kubectl label namespace default istio-injection=enabled Install SWCK SWCK provides convenience for users to deploy and upgrade SkyWalking related components based on Kubernetes. The automatic scale function of Satellite also mainly relies on SWCK. For more information, you could refer to the official documentation.\n# Install cert-manager kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.3.1/cert-manager.yaml # Deploy SWCK mkdir -p skywalking-swck \u0026amp;\u0026amp; cd skywalking-swck wget https://dlcdn.apache.org/skywalking/swck/0.6.1/skywalking-swck-0.6.1-bin.tgz tar -zxvf skywalking-swck-0.6.1-bin.tgz cd config kubectl apply -f operator-bundle.yaml Deploy Apache SkyWalking And Apache SkyWalking-Satellite We have provided a simple script to deploy the skywalking OAP, UI, and Satellite.\n# Create the skywalking components namespace kubectl create namespace skywalking-system kubectl label namespace skywalking-system swck-injection=enabled # Deploy components kubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/sw-components.yaml Deploy Bookinfo Application export ISTIO_VERSION=1.12.0 kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/platform/kube/bookinfo.yaml kubectl wait --for=condition=Ready pods --all --timeout=1200s kubectl port-forward service/productpage 9080 Next, please open your browser and visit http://localhost:9080. You should be able to see the Bookinfo application. Refresh the webpage several times to generate enough access logs.\nThen, you can see the topology and metrics of the Bookinfo application on SkyWalking WebUI. At this time, you can see that the Satellite is working!\nDeploy Monitor We need to install OpenTelemetry Collector to collect metrics in OAPs and analyze them.\n# Add OTEL collector kubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/otel-collector-oap.yaml kubectl port-forward -n skywalking-system service/skywalking-system-ui 8080:80 Next, please open your browser and visit http://localhost:8080/ and create a new item on the dashboard. The SkyWalking Web UI pictured below shows how the data content is applied.\nScaling OAP Scaling the number of OAPs by deployment.\nkubectl scale --replicas=3 -n skywalking-system deployment/skywalking-system-oap Done! After a period of time, you will see that the number of OAPs becomes 3, and the ALS traffic is balanced to each OAP.\nSatellite Scaling After we have completed the SkyWalking Scaling, we would carry out the Satellite Scaling demo.\nDeploy SWCK HPA SWCK provides an adapter to implement the Kubernetes external metrics to adapt the HPA through reading the metrics in SkyWalking OAP. We expose the metrics service in Satellite to OAP and configure HPA Resource to auto-scaling the Satellite.\nInstall the SWCK adapter into the Kubernetes environment:\nkubectl apply -f skywalking-swck/config/adapter-bundle.yaml Create the HPA resource, and limit each Satellite to handle a maximum of 10 connections:\nkubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/satellite-hpa.yaml Then, you could see we have 9 connections in one satellite. One envoy proxy may establish multiple connections to the satellite.\n$ kubectl get HorizontalPodAutoscaler -n skywalking-system NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE hpa-demo Deployment/skywalking-system-satellite 9/10 1 3 1 5m18s Scaling Application The scaling application could establish more connections to the satellite, to verify whether the HPA is in effect.\nkubectl scale --replicas=3 deployment/productpage-v1 deployment/details-v1 Done! By default, Satellite will deploy a single instance and a single instance will only accept 11 connections. HPA resources limit one Satellite to handle 10 connections and use a stabilization window to make Satellite stable scaling up. In this case, we deploy the Bookinfo application in 10+ instances after scaling, which means that 10+ connections will be established to the Satellite.\nSo after HPA resources are running, the Satellite would be automatically scaled up to 2 instances. You can learn about the calculation algorithm of replicas through the official documentation. Run the following command to view the running status:\n$ kubectl get HorizontalPodAutoscaler -n skywalking-system --watch NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 1 3m31s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 1 4m20s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 2 4m38s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 2 5m8s hpa-demo Deployment/skywalking-system-satellite 6/10 1 3 2 5m23s By observing the “number of connections” metric, we would be able to see that when the number of connections of each gRPC exceeds 10 connections, then the satellite automatically scales through the HPA rule. As a result, the connection number is down to normal status (in this example, less than 10)\nswctl metrics linear --name satellite_service_grpc_connect_count --service-name satellite::satellite-service ","title":"Scaling with Apache SkyWalking","url":"/blog/2022-01-24-scaling-with-apache-skywalking/"},{"content":"SkyWalking Cloud on Kubernetes 0.6.1 is released. Go to downloads page to find release tars.\n Bugs  Fix could not deploy metrics adapter to GKE    ","title":"Release Apache SkyWalking Cloud on Kubernetes 0.6.1","url":"/events/release-apache-skywalking-cloud-on-kubernetes-0-6-1/"},{"content":"随着业务与用户量的持续发展,系统的瓶颈也逐渐出现。尤其在一些节假日、突发的营销活动中,访问量激增可能会导致系统性能下降,甚至造成系统瘫痪。 全链路压测可以很好的帮助我们预先演练高峰流量,从而提前模拟出系统的执行情况,帮助我们预估系统容量。当流量真正来临时,也可以更从容面对。 Apache SkyWalking 联合 Apache APISIX 及 Apache ShardingSphere,三大顶级开源社区通力合作,共同打造生产级可用的全链路压测解决方案,CyborgFlow。\n介绍 CyborgFlow 是一款面向生产级可用的全链路压测解决方案。总共由三个组件组成,如下图所示。\n Flow Gateway: 压测流量网关。当流量到达该组件时,则会将请求认定为压测流量,并将压测流量标识传递至上游服务。 Database Shadow: 数据库中间件。当数据库中间件感知到当前流量为压测流量时,则会将数据库操作路由至影子表中进行操作。 Agent/Dashboard: 分布式监控系统。与业务系统紧密结合,当感知到压测请求后,自动将其标识传递至上游,无需业务代码改造。并且利用分析能力,构建Dashboard来便于查看流量情况。  以此,便覆盖了单个请求的完整生命周期,在网关层构建压测标识,到业务系统透传标识,最终将请求与影子表交互。同时整个流程拥有完整的监控分析。\n原理 依托于三大社区合作,让这一切变得简单易用。下图为全链路压测系统的运行原理,橙色和蓝色分别代表正常流量和压测流量。\nFlow Gateway Flow Gateway 作为压测流量网关,主要负责接收流量,并传递压测流量表示至上游。\n 添加 skywalking插件 构建链路入口。 依据 proxy-rewrite插件 将压测流量标识注入到上游的请求头中。  Agent/Dashboard 该组件中则分为两部分内容说明。\nAgent Agent与业务程序拥有相同生命周期,负责压测流量标识在各个业务系统之间传递,并与 Database Shadow 交互。\n SkyWalking Agent通过读取从Flow Gateway传递的压测流量标识,利用 透传协议 将该标识在应用之间传递。 当准备进行数据库调用时,则通过判断是否包含压测流量标识来决定是否SQL调用时追加压测流量标识(/* cyborg-flow: true */)。 当检测到当前请求包含压测流量标识后,将该数据与Trace绑定,用于Dashboard数据分析。  Dashboard Dashboard 用于压测过程进行中的监控数据分析,并最终以图表的方式进行展示。\n 接收来自Agent中上报的Trace数据,并依据OAL中的Tag过滤器(.filter(tags contain \u0026quot;cyborg-flow:true\u0026quot;))来生成压测与非压测的指标数据。 利用指标数据便可以在Dashboard中创建图表进行观察。  Database Shadow Database Shadow 作为 Proxy 在业务程序与数据库中间完成数据交互,当检测到压测流量时则会将SQL传递至影子表中处理。\n 检测下游传递的数据库语句中是否包含压测流量标识(/* cyborg-flow: true */),存在时则将SQL交给由用户配置的影子表中处理。  快速上手 下面将带你快速将Cyborg Flow集成至你的项目中。相关组件的下载请至 Github Release 中下载,目前已发布 0.1.0 版本。\n部署 Database Shadow  解压缩cyborg-database-shadow.tar.gz。 将 conf/config-shadow.yaml 文件中的业务数据库与影子数据库配置为自身业务中的配置。 启动 Database Shadow服务,启动脚本位于bin/start.sh中。  如需了解更详细的部署参数配置,请参考 官方文档 。\n部署 Cyborg Dashboard  解压缩cyborg-dashboard.tar.gz。 启动后端与UI界面服务,用于链路数据解析与界面展示,启动脚本位于bin/startup.sh中。 接下来就可以通过打开浏览器并访问http://localhost:8080/,此页面为Cyborg Dashboard界面,由于目前尚未部署任何业务程序,所以暂无任何数据。  如需了解更详细的部署参数配置,请参考 后端服务 与 UI界面服务 的安装文档。\n部署 Cyborg Agent 到业务程序中  解压缩cyborg-agent.tar.gz. 修改config/agent.config中的collector.backend_service为 Cyborg Dashboard 中后端地址(默认为11800端口),用于将监控数据上报至 Cyborg Dashboard 。 修改业务程序中与数据库的链接,将其更改为 Database Shadow 中的配置。默认访问端口为3307,用户名密码均为root。 当程序启动时,增加该参数到启动命令中:-jar path/to/cyborg-agent/skywalking-agent.jar。  如需了解更详细的部署参数配置,请参考 Agent安装文档 。\n部署 Flow Gateway  参考 Flow Gateway 快速开始 进行下载 Apache APISIX 并配置相关插件。 基于 APISIX 创建路由文档 进行路由创建。  完成! 最后,通过Flow Gateway访问业务系统资源,便完成了一次压测流量请求。\n 压测流量最终访问至影子表进行数据操作。 如下图所示,通过观察 Cyborg Dashboard 便可以得知压测与非压测请求的执行情况。  总结 在本文中,我们详细介绍了Cyborg Flow中的各个组件的功能、原理,最终搭配快速上手来快速将该系统与自己的业务系统结合。 如果在使用中有任何问题,欢迎来共同讨论。\n","title":"Cyborg Flow X SkyWalking: 生产环境全链路压测","url":"/zh/2022-01-18-cyborg-flow/"},{"content":"SkyWalking Cloud on Kubernetes 0.6.0 is released. Go to downloads page to find release tars.\n Features  Add the Satellite CRD, webhooks and controller   Bugs  Update release images to set numeric user id Fix the satellite config not support number error Use env JAVA_TOOL_OPTIONS to replace AGENT_OPTS   Chores  Add stabilization windows feature in satellite HPA documentation    ","title":"Release Apache SkyWalking Cloud on Kubernetes 0.6.0","url":"/events/release-apache-skywalking-cloud-on-kubernetes-0-6-0/"},{"content":"SkyWalking Kong Agent 0.2.0 is released. Go to downloads page to find release tars.\n Establish the SkyWalking Kong Agent.  ","title":"Release Apache SkyWalking Kong 0.2.0","url":"/events/release-apache-skywalking-kong-0-2-0/"},{"content":"SkyWalking Satellite 0.5.0 is released. Go to downloads page to find release tars.\nFeatures  Make the gRPC client client_pem_path and client_key_path as an optional config. Remove prometheus-server sharing server plugin. Support let the telemetry metrics export to prometheus or metricsService. Add the resource limit when gRPC server accept connection.  Bug Fixes  Fix the gRPC server enable TLS failure. Fix the native meter protocol message load balance bug.  Issues and PR  All issues are here All and pull requests are here  ","title":"Release Apache SkyWalking Satellite 0.5.0","url":"/events/release-apache-skwaylking-satellite-0-5-0/"},{"content":"SkyWalking LUA Nginx 0.6.0 is released. Go to downloads page to find release tars.\n fix: skywalking_tracer:finish() will not be called in some case such as upstream timeout.  ","title":"Release Apache SkyWalking LUA Nginx 0.6.0","url":"/events/release-apache-skywalking-lua-nginx-0.6.0/"},{"content":"Chaos Mesh is an open-source cloud-native chaos engineering platform. You can use Chaos Mesh to conveniently inject failures and simulate abnormalities that might occur in reality, so you can identify potential problems in your system. Chaos Mesh also offers a Chaos Dashboard which allows you to monitor the status of a chaos experiment. However, this dashboard cannot let you observe how the failures in the experiment impact the service performance of applications. This hinders us from further testing our systems and finding potential problems.\n Apache SkyWalking is an open-source application performance monitor (APM), specially designed to monitor, track, and diagnose cloud native, container-based distributed systems. It collects events that occur and then displays them on its dashboard, allowing you to observe directly the type and number of events that have occurred in your system and how different events impact the service performance.\nWhen you use SkyWalking and Chaos Mesh together during chaos experiments, you can observe how different failures impact the service performance.\nThis tutorial will show you how to configure SkyWalking and Chaos Mesh. You’ll also learn how to leverage the two systems to monitor events and observe in real time how chaos experiments impact applications’ service performance.\nPreparation Before you start to use SkyWalking and Chaos Mesh, you have to:\n Set up a SkyWalking cluster according to the SkyWalking configuration guide. Deploy Chao Mesh using Helm. Install JMeter or other Java testing tools (to increase service loads). Configure SkyWalking and Chaos Mesh according to this guide if you just want to run a demo.  Now, you are fully prepared, and we can cut to the chase.\nStep 1: Access the SkyWalking cluster After you install the SkyWalking cluster, you can access its user interface (UI). However, no service is running at this point, so before you start monitoring, you have to add one and set the agents.\nIn this tutorial, we take Spring Boot, a lightweight microservice framework, as an example to build a simplified demo environment.\n Create a SkyWalking demo in Spring Boot by referring to this document. Execute the command kubectl apply -f demo-deployment.yaml -n skywalking to deploy the demo.  After you finish deployment, you can observe the real-time monitoring results at the SkyWalking UI.\nNote: Spring Boot and SkyWalking have the same default port number: 8080. Be careful when you configure the port forwarding; otherise, you may have port conflicts. For example, you can set Spring Boot’s port to 8079 by using a command like kubectl port-forward svc/spring-boot-skywalking-demo 8079:8080 -n skywalking to avoid conflicts.\nStep 2: Deploy SkyWalking Kubernetes Event Exporter SkyWalking Kubernetes Event Exporter is able to watch, filter, and send Kubernetes events into the SkyWalking backend. SkyWalking then associates the events with the system metrics and displays an overview about when and how the metrics are affected by the events.\nIf you want to deploy SkyWalking Kubernetes Event Explorer with one line of commands, refer to this document to create configuration files in YAML format and then customize the parameters in the filters and exporters. Now, you can use the command kubectl apply to deploy SkyWalking Kubernetes Event Explorer.\nStep 3: Use JMeter to increase service loads To better observe the change in service performance, you need to increase the service loads on Spring Boot. In this tutorial, we use JMeter, a widely adopted Java testing tool, to increase the service loads.\nPerform a stress test on localhost:8079 using JMeter and add five threads to continuously increase the service loads.\nOpen the SkyWalking Dashboard. You can see that the access rate is 100%, and that the service loads reach about 5,300 calls per minute (CPM).\nStep 4: Inject failures via Chaos Mesh and observe results After you finish the three steps above, you can use the Chaos Dashboard to simulate stress scenarios and observe the change in service performance during chaos experiments.\nThe following sections describe how service performance varies under the stress of three chaos conditions:\n  CPU load: 10%; memory load: 128 MB\nThe first chaos experiment simulates low CPU usage. To display when a chaos experiment starts and ends, click the switching button on the right side of the dashboard. To learn whether the experiment is Applied to the system or Recovered from the system, move your cursor onto the short, green line.\nDuring the time period between the two short, green lines, the service load decreases to 4,929 CPM, but returns to normal after the chaos experiment ends.\n  CPU load: 50%; memory load: 128 MB\nWhen the application’s CPU load increases to 50%, the service load decreases to 4,307 CPM.\n  CPU load: 100%; memory load: 128 MB\nWhen the CPU usage is at 100%, the service load decreases to only 40% of what it would be if no chaos experiments were taking place.\nBecause the process scheduling under the Linux system does not allow a process to occupy the CPU all the time, the deployed Spring Boot Demo can still handle 40% of the access requests even in the extreme case of a full CPU load.\n  Summary By combining SkyWalking and Chaos Mesh, you can clearly observe when and to what extent chaos experiments affect application service performance. This combination of tools lets you observe the service performance in various extreme conditions, thus boosting your confidence in your services.\nChaos Mesh has grown a lot in 2021 thanks to the unremitting efforts of all PingCAP engineers and community contributors. In order to continue to upgrade our support for our wide variety of users and learn more about users’ experience in Chaos Engineering, we’d like to invite you to takethis survey and give us your valuable feedback.\nIf you want to know more about Chaos Mesh, you’re welcome to join the Chaos Mesh community on GitHub or our Slack discussions (#project-chaos-mesh). If you find any bugs or missing features when using Chaos Mesh, you can submit your pull requests or issues to our GitHub repository.\n","title":"Chaos Mesh + SkyWalking: Better Observability for Chaos Engineering","url":"/blog/2021-12-21-better-observability-for-chaos-engineering/"},{"content":"SkyWalking Cloud on Kubernetes 0.5.0 is released. Go to downloads page to find release tars.\n Features  Add E2E test cases to verify OAPServer, UI, Java agent and Storage components.   Bugs  Fix operator role patch issues Fix invalid CSR signername Fix bug in the configmap controller   Chores  Bump up KubeBuilder to V3 Bump up metric adapter server to v1.21.0 Split mono-project to two independent projects    ","title":"Release Apache SkyWalking Cloud on Kubernetes 0.5.0","url":"/events/release-apache-skywalking-cloud-on-kubernetes-0-5-0/"},{"content":"We Can integrate Skywalking to Java Application by Java Agent TEC., In typical application, the system runs Java Web applications at the backend of the load balancer, and the most commonly used load balancer is nginx. What should we do if we want to bring it under surveillance? Fortunately, skywalking has provided Nginx agent。 During the integration process, it is found that the examples on the official website only support openresty. For openresty, common modules such as luajit and Lua nginx module have been integrated. Adding skywalking related configurations according to the examples on the official website can take effect. However, when configured for nginx startup, many errors will be reported. We may not want to change a load balancer (nginx to openresty) in order to use skywalking. Therefore, we must solve the integration problem between skywalking and nginx.\nNote: openresty is a high-performance web development platform based on nginx + Lua, which solves the short board that is not easy to program in nginx.\nBased on Skywalking-8.7.0 and Nginx-1.20.1\nUpgrade of nginx: The agent plug-in of nginx is written based on Lua, so nginx needs to add support for Lua, Lua nginx module It just provides this function. The Lua nginx module depends on luajit Therefore, first we need to install luajit. In the environment, it is best to choose version 2.1.\nFor nginx, you need to compile the necessary modules yourself. It depends on the following two modules:\nlua-nginx-module The version is lua-nginx-module-0.10.21rc1\nngx_devel_kit The version using ngx_devel_kit-0.3.1\nCompile nginx parameters\nconfigure arguments: --add-module=/path/to/ngx_devel_kit-0.3.1 --add-module=/path/to/lua-nginx-module-0.10.21rc1 --with-ld-opt=-Wl,-rpath,/usr/local/LuaJIT/lib The following is for skywalking-nginx-lua-0.3.0 and 0.3.0+ are described separately.\nskywalking-nginx-lua-0.3.0 After testing, skywalking-nginx-lua-0.3.0 requires the following Lua related modules\nlua-resty-core https://github.com/openresty/lua-resty-core lua-resty-lrucache https://github.com/openresty/lua-resty-lrucache lua-cjson https://github.com/openresty/lua-cjson The dependent Lua modules are as follows:\nlua_package_path \u0026#34;/path/to/lua-resty-core/lua-resty-core-master/lib/?.lua;/path/to/lua-resty-lrucache-0.11/lib/?.lua;/path/to/skywalking-nginx-lua-0.3.0/lib/?.lua;;\u0026#34;; In the process of make \u0026amp; \u0026amp; make install, Lua cjson needs to pay attention to:\nModify a path in makefile\nLUA_INCLUDE_DIR ?= /usr/local/LuaJIT/include/luajit-2.0\nReference:https://blog.csdn.net/ymeputer/article/details/50146143 \nskywalking-nginx-lua-0.3.0+ For skywalking-nginx-lua-0.3.0+, tablepool support needs to be added, but it seems that cjson is not required\nlua-resty-core https://github.com/openresty/lua-resty-core lua-resty-lrucache https://github.com/openresty/lua-resty-lrucache lua-tablepool https://github.com/openresty/lua-tablepool lua_ package_ path \u0026#34;/path/to/lua-resty-core/lua-resty-core-master/lib/?.lua;/path/to/lua-resty-lrucache-0.11/lib/?.lua;/path/to/lua-tablepool-master/lib/?.lua;/path/to/skywalking-nginx-lua-master/lib/?.lua;;\u0026#34;; tablepool introduces two APIs according to its official documents table new and table. Clear requires luajit2.1, there is a paragraph in the skywalking-nginx-lua document that says you can use \u0026lsquo;require (\u0026ldquo;skywalking. Util\u0026rdquo;) disable_ Tablepool() ` disable tablepool\nWhen you start nginx, you will be prompted to install openresty\u0026rsquo;s own [luajit version]( https://github.com/openresty/luajit2 )\ndetected a LuaJIT version which is not OpenResty\u0026#39;s; many optimizations will be disabled and performance will be compromised (see https://github.com/openresty/luajit2 for OpenResty\u0026#39;s LuaJIT or, even better, consider using the OpenResty releases from https://openresty.org/en/download.html ) here is successful configuration:\nhttp { lua_package_path \u0026#34;/path/to/lua-resty-core/lua-resty-core-master/lib/?.lua;/path/to/lua-resty-lrucache-0.11/lib/?.lua;/path/to/lua-tablepool-master/lib/?.lua;/path/to/skywalking-nginx-lua-master/lib/?.lua;;\u0026#34;; # Buffer represents the register inform and the queue of the finished segment lua_shared_dict tracing_buffer 100m; # Init is the timer setter and keeper # Setup an infinite loop timer to do register and trace report. init_worker_by_lua_block { local metadata_buffer = ngx.shared.tracing_buffer -- Set service name metadata_buffer:set(\u0026#39;serviceName\u0026#39;, \u0026#39;User Service Name\u0026#39;) -- Instance means the number of Nginx deployment, does not mean the worker instances metadata_buffer:set(\u0026#39;serviceInstanceName\u0026#39;, \u0026#39;User Service Instance Name\u0026#39;) -- type \u0026#39;boolean\u0026#39;, mark the entrySpan include host/domain metadata_buffer:set(\u0026#39;includeHostInEntrySpan\u0026#39;, false) -- set random seed require(\u0026#34;skywalking.util\u0026#34;).set_randomseed() require(\u0026#34;skywalking.client\u0026#34;):startBackendTimer(\u0026#34;http://127.0.0.1:12800\u0026#34;) -- If there is a bug of this `tablepool` implementation, we can -- disable it in this way -- require(\u0026#34;skywalking.util\u0026#34;).disable_tablepool() skywalking_tracer = require(\u0026#34;skywalking.tracer\u0026#34;) } server { listen 8090; location /ingress { default_type text/html; rewrite_by_lua_block { ------------------------------------------------------ -- NOTICE, this should be changed manually -- This variable represents the upstream logic address -- Please set them as service logic name or DNS name -- -- Currently, we can not have the upstream real network address ------------------------------------------------------ skywalking_tracer:start(\u0026#34;upstream service\u0026#34;) -- If you want correlation custom data to the downstream service -- skywalking_tracer:start(\u0026#34;upstream service\u0026#34;, {custom = \u0026#34;custom_value\u0026#34;}) } -- Target upstream service proxy_pass http://127.0.0.1:8080/backend; body_filter_by_lua_block { if ngx.arg[2] then skywalking_tracer:finish() end } log_by_lua_block { skywalking_tracer:prepareForReport() } } } } Original post:https://www.cnblogs.com/kebibuluan/p/14440228.html\n","title":"How to integrate skywalking-nginx-lua to Nginx?","url":"/blog/2021-12-13-skywalking-nginx-agent-integration/"},{"content":"SkyWalking 8.9.1 is released. Go to downloads page to find release tars.\nChanges by Version\nProject  Upgrade log4j2 to 2.15.0 for CVE-2021-44228. This CVE only effects on JDK versions below 6u211, 7u201, 8u191 and 11.0.1 according to the post. Notice, using JVM option -Dlog4j2.formatMsgNoLookups=true also avoids CVE if your JRE opened JNDI in default.  ","title":"Release Apache SkyWalking APM 8.9.1","url":"/events/release-apache-skywalking-apm-8-9-1/"},{"content":"In the field of observability, the three main directions of data collection and analysis, Metrics, Logger and Tracing, are usually used to achieve insight into the operational status of applications.\nApache APISIX has integrated Apache SkyWaling Tracing capabilities as early as version 1.4, with features such as error logging and access log collection added in subsequent versions. Now with Apache SkyWalking\u0026rsquo;s support for Metrics, it enables Apache APISIX to implement a one-stop observable solution in integrated mode, covering both logging, metrics and call tracing.\nFeature Development Background Those of you who are familiar with Apache APISIX should know that Apache APISIX produces two types of logs during operation, namely the access log and the error log.\nAccess logs record detailed information about each request and are logs generated within the scope of the request, so they can be directly associated with Tracing. Error logs, on the other hand, are Apache APISIX runtime output log messages, which are application-wide logs, but cannot be 100% associated with requests.\nAt present, Apache APISIX provides very rich log processing plug-ins, including TCP/HTTP/Kafka and other collection and reporting plug-ins, but they are weakly associated with Tracing. Take Apache SkyWalking as an example. We extract the SkyWalking Tracing Conetxt Header from the log records of Apache APISIX and export it to the file system, and then use the log processing framework (fluentbit) to convert the logs into a log format acceptable to SkyWalking. The Tracing Context is then parsed and extracted to obtain the Tracing ID to establish a connection with the Trace.\nObviously, the above way of handling the process is tedious and complicated, and requires additional conversion of log formats. For this reason, in PR#5500 we have implemented the Apache SkyWalking access log into the Apache APISIX plug-in ecosystem to make it easier for users to collect and process logs using Apache SkyWalking in Apache APISIX.\nIntroduction of the New Plugins SkyWalking Logger Pulgin The SkyWalking Logger plugin parses the SkyWalking Tracing Context Header and prints the relevant Tracing Context information to the log, thus enabling the log to be associated with the call chain.\nBy using this plug-in, Apache APISIX can get the SkyWalking Tracing Context and associate it with Tracing even if the SkyWalking Tracing plug-in is not turned on, if Apache SkyWalking is already integrated downstream.\nThe above Content is the log content, where the Apache APISIX metadata configuration is used to collect request-related information. You can later modify the Log Format to customize the log content by Plugin Metadata, please refer to the official documentation.\nHow to Use When using this plugin, since the SkyWalking plugin is \u0026ldquo;not enabled\u0026rdquo; by default, you need to manually modify the plugins section in the conf/default-apisix.yaml file to enable the plugin.\nplugins:...- error-log-logger...Then you can use the SkyWalking Tracing plug-in to get the tracing data directly, so you can verify that the Logging plug-in-related features are enabled and working properly.\nStep 1: Create a route Next, create a route and bind the SkyWalking Tracing plugin and the SkyWalking Logging plugin. More details of the plugin configuration can be found in the official Apache APISIX documentation.\ncurl -X PUT \u0026#39;http://192.168.0.108:9080/apisix/admin/routes/1001\u0026#39; \\ -H \u0026#39;X-API-KEY: edd1c9f034335f136f87ad84b625c8f1\u0026#39; \\ -H \u0026#39;Content-Type: application/json\u0026#39; \\ -d \u0026#39;{ \u0026#34;uri\u0026#34;: \u0026#34;/get\u0026#34;, \u0026#34;plugins\u0026#34;: { \u0026#34;skywalking\u0026#34;: { \u0026#34;sample_ratio\u0026#34;: 1 }, \u0026#34;skywalking-logger\u0026#34;: { \u0026#34;endpoint_addr\u0026#34;: \u0026#34;http://127.0.0.1:12800\u0026#34; } }, \u0026#34;upstream\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;roundrobin\u0026#34;, \u0026#34;nodes\u0026#34;: { \u0026#34;httpbin.org:80\u0026#34;: 1 } } }\u0026#39; Step 2: Log Processing On the Apache SkyWalking side, you can use LAL (Logger Analysis Language) scripts for log processing, such as Tag extraction, SkyWalking metadata correction, and so on.\nThe main purpose of Tag extraction here is to facilitate subsequent retrieval and to add dependencies to the Metrics statistics. The following code can be used to configure the SkyWalking LAL script to complete the Tag extraction. For more information on how to use the SkyWalking LAL script, please refer to the official Apache SkyWalking documentation.\n# The default LAL script to save all logs, behaving like the versions before 8.5.0.rules:- name:defaultdsl:|filter { json { abortOnFailure false } extractor { tag routeId: parsed.route_id tag upstream: parsed.upstream tag clientIp: parsed.client_ip tag latency: parsed.latency } sink { } }After configuring the above LAL script in SkyWalking OAP Server the following log will be displayed.\nDetails of the expanded log are as follows.\nAs you can see from the above, displaying routeId, upstream and clientIp as key-value pairs is much easier than searching directly in the log body. This is because the Tag format not only supports log display format and search, but also generates information such as Metrics using MAL statistics.\nSkyWalking Error Logger Plugin The error-log-logger plug-in now supports the SkyWalking log format, and you can now use the http-error-log plug-in to quickly connect Apache APISIX error logs to Apache SkyWalking. Currently, error logs do not have access to SkyWalking Tracing Context information, and therefore cannot be directly associated with SkyWalking Tracing.\nThe main reason for the error log to be integrated into SkyWalking is to centralize the Apache APISIX log data and to make it easier to view all observable data within SkyWalking.\nHow to Use Since the error-log-logger plugin is \u0026ldquo;not enabled\u0026rdquo; by default, you still need to enable the plugin in the way mentioned above.\nplugins:...- error-log-logger...Step 1: Bind the route After enabling, you need to bind the plugin to routes or global rules. Here we take \u0026ldquo;bind routes\u0026rdquo; as an example.\ncurl -X PUT \u0026#39;http://192.168.0.108:9080/apisix/admin/plugin_metadata/error-log-logger\u0026#39; \\ -H \u0026#39;X-API-KEY: edd1c9f034335f136f87ad84b625c8f1\u0026#39; \\ -H \u0026#39;Content-Type: application/json\u0026#39; \\ -d \u0026#39;{ \u0026#34;inactive_timeout\u0026#34;: 10, \u0026#34;level\u0026#34;: \u0026#34;ERROR\u0026#34;, \u0026#34;skywalking\u0026#34;: { \u0026#34;endpoint_addr\u0026#34;: \u0026#34;http://127.0.0.1:12800/v3/logs\u0026#34; } }\u0026#39;  Note that the endpoint_addr is the SkyWalking OAP Server address and needs to have the URI (i.e. /v3/logs).\n Step 2: LAL Processing In much the same way as the Access Log processing, the logs are also processed by LAL when they reach SkyWalking OAP Server. Therefore, we can still use the SkyWalking LAL script to analyze and process the log messages.\nIt is important to note that the Error Log message body is in text format. If you are extracting tags, you will need to use regular expressions to do this. Unlike Access Log, which handles the message body in a slightly different way, Acces Log uses JSON format and can directly reference the fields of the JSON object using JSON parsing, but the rest of the process is largely the same.\nTags can also be used to optimize the display and retrieval for subsequent metrics calculations using SkyWalking MAL.\nrules: - name: apisix-errlog dsl: | filter { text { regexp \u0026#34;(?\u0026lt;datetime\u0026gt;\\\\d{4}/\\\\d{2}/\\\\d{2} \\\\d{2}:\\\\d{2}:\\\\d{2}) \\\\[(?\u0026lt;level\u0026gt;\\\\w+)\\\\] \\\\d+\\\\#\\\\d+:( \\\\*\\\\d+ \\\\[(?\u0026lt;module\u0026gt;\\\\w+)\\\\] (?\u0026lt;position\u0026gt;.*\\\\.lua:\\\\d+): (?\u0026lt;function\u0026gt;\\\\w+\\\\(\\\\)):)* (?\u0026lt;msg\u0026gt;.+)\u0026#34; } extractor { tag level: parsed.level if (parsed?.module) { tag module: parsed.module tag position: parsed.position tag function: parsed.function } } sink { } } After the LAL script used by SkyWalking OAP Server, some of the Tags will be extracted from the logs, as shown below.\nSummary This article introduces two logging plug-ins for Apache APISIX that integrate with SkyWalking to provide a more convenient operation and environment for logging in Apache APISIX afterwards.\nWe hope that through this article, you will have a fuller understanding of the new features and be able to use Apache APISIX for centralized management of observable data more conveniently in the future.\n","title":"Apache APISIX Integrates with SkyWalking to Create a Full Range of Log Processing","url":"/blog/2021-12-08-apisix-integrate-skywalking-plugin/apisix-integrate-skywalking-plugin/"},{"content":"This document is one of the outcomes of Apache IoTDB - Apache SkyWalking Adapter in Summer 2021 of Open Source Promotion Plan. The design and development work is under the guidance of @jixuan1989 from IoTDB and @wu-sheng from SkyWalking. Thanks for their guidance and the help from community.\nStart with SkyWalking Showcase Before using SkyWalking Showcase to quick start with IoTDB, please ensure your have make installed and Docker daemon running.\nPlease run the command below.\ngit clone https://github.com/LIU-WEI-git/skywalking-showcase.git cd skywalking-showcase make deploy.docker FEATURE_FLAGS=single-node.iotdb,agent The former variable single-node.iotdb will deploy only one single node of SkyWalking OAP-v8.9.0, and SkyWalking RocketBot UI-v8.9.0, IoTDB-v0.12.3 as storage. The latter variable agent will deploy micro-services with SkyWalking agent enabled, which include agents for Java, NodeJS server, browser, Python.\nThese shell command maybe take a long while. After pulling and running docker image, please visit http://localhost:9999/. Then you will see the SkyWalking UI and data from OAP backend.\nIf you want to use more functions of SkyWalking Showcase, please visit its official document and clone official repository.\nStart Manually If you want to download and run IoTDB and SkyWalking manually, here is the guidance.\nInstall and Run IoTDB Apache IoTDB (Database for Internet of Things) is an IoT native database with high performance for data management and analysis, deployable on the edge and the cloud. It is a time-series database storage option for SkyWalking now. Please ensure your IoTDB server version \u0026gt;= 0.12.3 and a single node version is sufficient. For more installation details, please see official document: IoTDB Quick Start and IoTDB Download Page. You could download it from Docker Hub as well.\nThere is some connection tools for IoTDB\n Command Line Interface(CLI)\nIf iotdb-cli connects successfully, you will see   _____ _________ ______ ______ |_ _| | _ _ ||_ _ `.|_ _ \\ | | .--.|_/ | | \\_| | | `. \\ | |_) | | | / .'`\\ \\ | | | | | | | __'. _| |_| \\__. | _| |_ _| |_.' /_| |__) | |_____|'.__.' |_____| |______.'|_______/ version x.x.x IoTDB\u0026gt; login successfully IoTDB\u0026gt;  IoTDB-Grafana\nIoTDB-Grafana is a connector which we developed to show time series data in IoTDB by reading data from IoTDB and sends to Grafana.  Zeppelin-IoTDB\nYou could enable Zeppelin to operate IoTDB via SQL.   For more ecosystem integration, please visit official documents.\nWe will use iotdb-cli in the next examples.\nRun SkyWalking OAP Server There are some SkyWalking official documents which will help you start. Please ensure your SkyWalking version \u0026gt;= 8.9.0. We recommend you download SkyWalking OAP distributions from its official download page or pull docker images.\n SkyWalking Download Page SkyWalking Backend Setup SkyWalking UI Setup  Before starting SkyWalking backend, please edit /config/application.yml, set storage.selector: ${SW_STORAGE:iotdb} or set environment variable SW_STORAGE=iotdb. All config options about IoTDB is following, please edit it or not according to your local environment:\nstorage:selector:${SW_STORAGE:iotdb}iotdb:host:${SW_STORAGE_IOTDB_HOST:127.0.0.1}rpcPort:${SW_STORAGE_IOTDB_RPC_PORT:6667}username:${SW_STORAGE_IOTDB_USERNAME:root}password:${SW_STORAGE_IOTDB_PASSWORD:root}storageGroup:${SW_STORAGE_IOTDB_STORAGE_GROUP:root.skywalking}sessionPoolSize:${SW_STORAGE_IOTDB_SESSIONPOOL_SIZE:16}fetchTaskLogMaxSize:${SW_STORAGE_IOTDB_FETCH_TASK_LOG_MAX_SIZE:1000}# the max number of fetch task log in a requestVisit IoTDB Server and Query SkyWalking Data There are some official document about data model and IoTDB-SQL language:\n Data Model and Terminology DDL (Data Definition Language) DML (Data Manipulation Language) Maintenance Command  Example Model and Insert SQL Before giving any example, we set time display type as long (CLI: set time_display_type=long).\nIn our design, we choose id, entity_id, node_type, service_id, service_group, trace_id as indexes and fix their appearance order. The value of these indexed fields store in the path with double quotation mark wrapping, just like \u0026quot;value\u0026quot;.\nThere is a model named service_traffic with fields id, time_bucket, name, node_type, service_group. In order to see its data, we could use a query SQL: select * from root.skywalking.service_traffic align by device. root.skywalking is the default storage group and align by device could return a more friendly result. The query result is following:\n   Time Device name     1637919540000 root.skywalking.service_traffic.\u0026ldquo;YXBwbGljYXRpb24tZGVtbw==.1\u0026rdquo;.\u0026ldquo;0\u0026rdquo;.\u0026quot;\u0026quot; application-demo   1637919600000 root.skywalking.service_traffic.\u0026ldquo;YXBwbGljYXRpb24tZGVtby1teXNxbA==.1\u0026rdquo;.\u0026ldquo;0\u0026rdquo;.\u0026quot;\u0026quot; application-demo-mysql    Another example model is service_cpm which has fields id, service_id, total, value. Query its data with select * from root.skywalking.service_cpm align by device. The result is following:\n   Time Device total value     1637919540000 root.skywalking.service_cpm.\u0026ldquo;202111261739_YXBwbGljYXRpb24tZGVtbw==.1\u0026rdquo;.\u0026ldquo;YXBwbGljYXRpb24tZGVtbw==.1\u0026rdquo; 2 2   1637919600000 root.skywalking.service_cpm.\u0026ldquo;202111261740_YXBwbGljYXRpb24tZGVtby1teXNxbA==.1\u0026rdquo;.\u0026ldquo;YXBwbGljYXRpb24tZGVtby1teXNxbA==.1\u0026rdquo; 1 1   1637917200000 root.skywalking.service_cpm.\u0026ldquo;2021112617_YXBwbGljYXRpb24tZGVtbw==.1\u0026rdquo;.\u0026ldquo;YXBwbGljYXRpb24tZGVtbw==.1\u0026rdquo; 2 0    For the first data of service_traffic, the mapping between fields and values is following. Notice, all time_bucket are converted to timestamp(also named time in IoTDB) and the value of all indexed fields are stored in the Device path.\n   Field Value     id(indexed) YXBwbGljYXRpb24tZGVtbw==.1   time(converted from time_bucket) 1637919540000   name application-demo   node_type(indexed) 0   service_group(indexed) (empty string)    You could use the SQL below to insert example data.\ncreate storage group root.skywalking insert into root.skywalking.service_traffic.\u0026#34;YXBwbGljYXRpb24tZGVtbw==.1\u0026#34;.\u0026#34;0\u0026#34;.\u0026#34;\u0026#34;(timestamp, name) values(1637919540000, \u0026#34;application-demo\u0026#34;) insert into root.skywalking.service_traffic.\u0026#34;YXBwbGljYXRpb24tZGVtby1teXNxbA==.1\u0026#34;.\u0026#34;0\u0026#34;.\u0026#34;\u0026#34;(timestamp, name) values(1637919600000, \u0026#34;application-demo-mysql\u0026#34;) insert into root.skywalking.service_cpm.\u0026#34;202111261739_YXBwbGljYXRpb24tZGVtbw==.1\u0026#34;.\u0026#34;YXBwbGljYXRpb24tZGVtbw==.1\u0026#34;(timestamp, total, value) values(1637919540000, 2, 2) insert into root.skywalking.service_cpm.\u0026#34;202111261740_YXBwbGljYXRpb24tZGVtby1teXNxbA==.1\u0026#34;.\u0026#34;YXBwbGljYXRpb24tZGVtby1teXNxbA==.1\u0026#34;(timestamp, total, value) values(1637919600000, 1, 1) insert into root.skywalking.service_cpm.\u0026#34;2021112617_YXBwbGljYXRpb24tZGVtbw==.1\u0026#34;.\u0026#34;YXBwbGljYXRpb24tZGVtbw==.1\u0026#34;(timestamp, total, value) values(1637917200000, 2, 0) Query SQL Now, let\u0026rsquo;s show some query examples.\n  Filter Query\n If you want to query name field of service_traffic, the query SQL is select name from root.skywalking.service_traffic align by device. If you want to query service_traffic with id = YXBwbGljYXRpb24tZGVtbw==.1, the query SQL is select * from root.skywalking.service_traffic.\u0026quot;YXBwbGljYXRpb24tZGVtbw==.1\u0026quot; align by device. If you want to query service_traffic with name = application-demo, the query SQL is select * from root.skywalking.service_traffic where name = \u0026quot;application-demo\u0026quot; align by device. Combining the above three, the query SQL is select name from root.skywalking.service_traffic.\u0026quot;YXBwbGljYXRpb24tZGVtbw==.1\u0026quot; where name = \u0026quot;application-demo\u0026quot; align by device.    Fuzzy Query\n If you want to query service_traffic with name contains application, the query SQL is select * from root.skywalking.service_traffic.*.*.* where name like '%application%' align by device.    Aggregate Query\nIoTDB only supports group by time and group by level. The former please refer to Down-Frequency Aggregate Query and the latter please refer to Aggregation By Level. Here is an example about group by level: select sum(total) from root.skywalking.service_cpm.*.* group by level = 3. We couldn\u0026rsquo;t get a expected result since our design make the data of one model spread across multiple devices. So we don\u0026rsquo;t recommend using group by level to query SkyWalking backend data. You could refer to the Discussion #3907 in IoTDB community for more details.\n  Sort Query\nIoTDB only supports order by time, but we could use its select function which contains top_k and bottom_k to get top/bottom k data. For example, select top_k(total, \u0026quot;k\u0026quot;=\u0026quot;3\u0026quot;) from root.skywalking.service_cpm.*.*. We don\u0026rsquo;t recommend using this to query SkyWalking backend data since its result is not friendly. You could refer to the Discussion #3888 in IoTDB community for more details.\n  Pagination Query\nWe could use limit and offset to paginate the query result. Please refer to Row and Column Control over Query Results.\n  Delete\n Delete storage group:  delete storage group root.skywalking   Delete timeseries:  delete timeseries root.skywalking.service_cpm.*.*.total delete timeseries root.skywalking.service_cpm.\u0026quot;202111261739_YXBwbGljYXRpb24tZGVtbw==.1\u0026quot;.\u0026quot;YXBwbGljYXRpb24tZGVtbw==.1\u0026quot;.total   Delete data:  delete from root.skywalking.service_traffic delete from root.skywalking.service_traffic where time \u0026lt; 1637919540000      ","title":"The Application Guide of Apache IoTDB Storage Option","url":"/blog/2021-12-08-application-guide-of-iotdb-storage-option/"},{"content":"Non-breaking breakpoints are breakpoints specifically designed for live production environments. With non-breaking breakpoints, reproducing production bugs locally or in staging is conveniently replaced with capturing them directly in production.\nLike regular breakpoints, non-breaking breakpoints can be:\n placed almost anywhere added and removed at will set to fire on specific conditions expose internal application state persist as long as desired (even between application reboots)  The last feature is especially useful given non-breaking breakpoints can be left in production for days, weeks, and even months at a time while waiting to capture behavior that happens rarely and unpredictably.\nHow do non-breaking breakpoints work? If you\u0026rsquo;re familiar with general distributed tracing concepts, such as \u0026ldquo;traces\u0026rdquo; and \u0026ldquo;spans\u0026rdquo;, then you\u0026rsquo;re already broadly familiar with how non-breaking breakpoints work. Put simply, non-breaking breakpoints are small fragments of code added during runtime that, upon the proper conditions, save a portion of the application\u0026rsquo;s current state, and resume normal execution. In SkyWalking, this can be implemented by simply opening a new local span, adding some tags, and closing the local span.\nWhile this process is relatively simple, the range of functionality that can be achieved through this technique is quite impressive. Save the current and global variables to create a non-breaking breakpoint; add the ability to format log messages to create just-in-time logging; add the ability to trigger metric telemetry to create real-time KPI monitoring. If you keep moving in this direction, you eventually enter the realm of live debugging/coding, and this is where Source++ comes in.\nLive Coding Platform Source++ is an open-source live coding platform designed for production environments, powered by Apache SkyWalking. Using Source++, developers can add breakpoints, logs, metrics, and distributed tracing to live production software in real-time on-demand, right from their IDE or CLI. While capable of stand-alone deployment, the latest version of Source++ makes it easier than ever to integrate into existing Apache SkyWalking installations. This process can be completed in a few minutes and is easy to customize for your specific needs.\nFor a better idea of how Source++ works, take a look at the following diagram:\nIn this diagram, blue components represent existing SkyWalking architecture, black components represent new Source++ architecture, and the red arrows show how non-breaking breakpoints make their way from production to IDEs. A process that is facilitated by Source++ components: Live Probe, Live Processors, Live Platform, and Live Interface.\nLive Probe The Live Probe is currently available for JVM and Python applications. It runs alongside the SkyWalking agent and is responsible for dynamically adding and removing code fragments based on valid instrumentation requests from developers. These code fragments in turn make use of the SkyWalking agent\u0026rsquo;s internal APIs to facilitate production instrumentation.\nLive Processors Live Processors are responsible for finding, extracting, and transforming data found in distributed traces produced via live probes. They run alongside SkyWalking collectors and implement additional post-processing logic, such as PII redaction. Live processors work via uniquely identifiable tags (prefix spp.) added previously by live probes.\nOne could easily view a non-breaking breakpoint ready for processing using Rocketbot, however, it will look like this:\nEven though the above does not resemble what\u0026rsquo;s normally thought of as a breakpoint, the necessary information is there. With live processors added to your SkyWalking installation, this data is refined and may be viewed more traditionally via live interfaces.\nLive Platform The Live Platform is the core part of the Source++ architecture. Unlike the live probe and processors, the live platform does not have a direct correlation with SkyWalking components. It is a standalone server responsible for validating and distributing production breakpoints, logs, metrics, and traces. Each component of the Source++ architecture (probes, processors, interfaces) communicates with each other through the live platform. It is important to ensure the live platform is accessible to all of these components.\nLive Interface Finally, with all the previous parts installed, we\u0026rsquo;re now at the component software developers will find the most useful. A Live Interface is what developers use to create, manage, and view non-breaking breakpoints, and so on. There are a few live interfaces available:\n JetBrains Plugin CLI  With the Live Instrument Processor enabled, and the JetBrains Plugin installed, non-breaking breakpoints appear as such:\nThe above should be a sight far more familiar to software developers. Beyond the fact that you can\u0026rsquo;t step through execution, non-breaking breakpoints look and feel just like regular breakpoints.\n For more details and complete setup instructions, please visit:\n https://github.com/sourceplusplus/deploy-skywalking  ","title":"Extending Apache SkyWalking with non-breaking breakpoints","url":"/blog/2021-12-06-extend-skywalking-with-nbb/"},{"content":"SkyWalking Kubernetes Helm Chart 4.2.0 is released. Go to downloads page to find release tars.\n Fix Can\u0026rsquo;t evaluate field Capabilities in type interface{}. Update the document let that all docker images use the latest version. Fix missing nodes resource permission when the OAP using k8s-mesh analyzer. Fix bug that customized config files are not loaded into es-init job. Add skywalking satellite support.  ","title":"Release Apache SkyWalking Kubernetes Helm Chart 4.2.0","url":"/events/release-apache-skywalking-kubernetes-helm-chart-4.2.0/"},{"content":"SkyWalking Satellite 0.4.0 is released. Go to downloads page to find release tars.\nFeatures  Support partition queue. Using byte array to transmit the ALS streaming, Native tracing segment and log, reducing en/decoding cpu usage. Support using the new ALS protocol to transmit the Envoy accesslog. Support transmit the Native Meter Batch protocol.  Bug Fixes Issues and PR  All issues are here All and pull requests are here  ","title":"Release Apache SkyWalking Satellite 0.4.0","url":"/events/release-apache-skwaylking-satellite-0-4-0/"},{"content":"SkyWalking 8.9.0 is released. Go to downloads page to find release tars.\nChanges by Version\nProject  E2E tests immigrate to e2e-v2. Support JDK 16 and 17. Add Docker images for arm64 architecture.  OAP Server  Add component definition for Jackson. Fix that zipkin-receiver plugin is not packaged into dist. Upgrade Armeria to 1.12, upgrade OpenSearch test version to 1.1.0. Add component definition for Apache-Kylin. Enhance get generation mechanism of OAL engine, support map type of source\u0026rsquo;s field. Add tag(Map) into All, Service, ServiceInstance and Endpoint sources. Fix funcParamExpression and literalExpression can\u0026rsquo;t be used in the same aggregation function. Support cast statement in the OAL core engine. Support (str-\u0026gt;long) and (long) for string to long cast statement. Support (str-\u0026gt;int) and (int) for string to int cast statement. Support Long literal number in the OAL core engine. Support literal string as parameter of aggregation function. Add attributeExpression and attributeExpressionSegment in the OAL grammar tree to support map type for the attribute expression. Refactor the OAL compiler context to improve readability. Fix wrong generated codes of hashCode and remoteHashCode methods for numeric fields. Support != null in OAL engine. Add Message Queue Consuming Count metric for MQ consuming service and endpoint. Add Message Queue Avg Consuming Latency metric for MQ consuming service and endpoint. Support -Inf as bucket in the meter system. Fix setting wrong field when combining Events. Support search browser service. Add getProfileTaskLogs to profile query protocol. Set SW_KAFKA_FETCHER_ENABLE_NATIVE_PROTO_LOG, SW_KAFKA_FETCHER_ENABLE_NATIVE_JSON_LOG default true. Fix unexpected deleting due to TTL mechanism bug for H2, MySQL, TiDB and PostgreSQL. Add a GraphQL query to get OAP version, display OAP version in startup message and error logs. Fix TimeBucket missing in H2, MySQL, TiDB and PostgreSQL bug, which causes TTL doesn\u0026rsquo;t work for service_traffic. Fix TimeBucket missing in ElasticSearch and provide compatible storage2Entity for previous versions. Fix ElasticSearch implementation of queryMetricsValues and readLabeledMetricsValues doesn\u0026rsquo;t fill default values when no available data in the ElasticSearch server. Fix config yaml data type conversion bug when meets special character like !. Optimize metrics of minute dimensionality persistence. The value of metrics, which has declaration of the default value and current value equals the default value logically, the whole row wouldn\u0026rsquo;t be pushed into database. Fix max function in OAL doesn\u0026rsquo;t support negative long. Add MicroBench module to make it easier for developers to write JMH test. Upgrade Kubernetes Java client to 14.0.0, supports GCP token refreshing and fixes some bugs. Change SO11Y metric envoy_als_in_count to calculate the ALS message count. Support Istio 1.10.3, 1.11.4, 1.12.0 release.(Tested through e2e) Add filter mechanism in MAL core to filter metrics. Fix concurrency bug in MAL increase-related calculation. Fix a null pointer bug when building SampleFamily. Fix the so11y latency of persistence execution latency not correct in ElasticSearch storage. Add MeterReportService collectBatch method. Add OpenSearch 1.2.0 to test and verify it works. Upgrade grpc-java to 1.42.1 and protoc to 3.17.3 to allow using native Mac osx-aarch_64 artifacts. Fix TopologyQuery.loadEndpointRelation bug. Support using IoTDB as a new storage option. Add customized envoy ALS protocol receiver for satellite transmit batch data. Remove logback dependencies in IoTDB plugin. Fix StorageModuleElasticsearchProvider doesn\u0026rsquo;t watch on trustStorePath. Fix a wrong check about entity if GraphQL at the endpoint relation level.  UI  Optimize endpoint dependency. Show service name by hovering nodes in the sankey chart. Add Apache Kylin logo. Add ClickHouse logo. Optimize the style and add tips for log conditions. Fix the condition for trace table. Optimize profile functions. Implement a reminder to clear cache for dashboard templates. Support +/- hh:mm in TimeZone setting. Optimize global settings. Fix current endpoint for endpoint dependency. Add version in the global settings popup. Optimize Log page style. Avoid some abnormal settings. Fix query condition of events.  Documentation  Enhance documents about the data report and query protocols. Restructure documents about receivers and fetchers.  Remove general receiver and fetcher docs Add more specific menu with docs to help users to find documents easier.   Add a guidance doc about the logic endpoint. Link Satellite as Load Balancer documentation and compatibility with satellite.  All issues and pull requests are here\n","title":"Release Apache SkyWalking APM 8.9.0","url":"/events/release-apache-skywalking-apm-8-9-0/"},{"content":"Chaos Mesh 是一个开源的云原生混沌工程平台,借助 Chaos Mesh,用户可以很方便地对服务注入异常故障,并配合 Chaos Dashboard 实现对整个混沌实验运行状况的监测 。然而,对混沌实验运行情况的监控并不能告诉我们应用服务性能的变化。从系统可观测性的角度来说,我们可能无法单纯通过混沌实验的动态了解故障的全貌,这也阻碍了我们对系统和故障的进一步了解,调试。\nApache SkyWalking 是一个开源的 APM (Application Performance Monitor) 系统,可以对云原生服务提供监控、跟踪、诊断等功能。SkyWalking 支持收集 Event(事件),可在 Dashboard 中查看分布式系统中发生了哪些事件,并可以直观地观测到不同 Event 对服务性能造成的影响,和 Chaos Mesh 结合使用,便可为混沌实验造成的服务影响提供监控。\n本教程将分享如何通过将 SkyWalking 和 Chaos Mesh 结合,运用 Event 信息监控,实时了解混沌实验对应用服务性能造成的影响。\n准备工作  创建 Skywalking 集群,具体可以参考 SkyWalking Readme。 部署 Chaos Mesh,推荐使用 helm 安装。 安装 Java 测试工具 JMeter (其他工具亦可,仅用于增加服务负载) 如果仅作为 Demo 使用,可以参考 chaos-mesh-on-skywalking 这个仓库进行配置  Step 1 - 访问 SkyWalking 集群 安装 SkyWalking 后,就可以访问它的UI了,但因为还没有服务进行监控,这里还需要添加服务并进行 Agent 埋点设置。本文选用轻量级微服务框架 Spring Boot 作为埋点对象搭建一个简易 Demo 环境。\n可以参考 chaos-mesh-on-skywalking 仓库中的 demo-deployment.yaml 文件创建。之后使用 kubectl apply -f demo-deployment.yaml -n skywalking 进行部署。部署成功后即可在SkyWalking-UI 中看到实时监控的服务信息。\n注意:因为 Spring Boot 的端口也是8080,在端口转发时要避免和 **SkyWalking **的端口冲突,比如使用 kubectl port-forward svc/spring-boot-skywalking-demo 8079:8080 -n skywalking 。\nStep 2 - 部署 SkyWalking Kubernetes Event Exporter SkyWalking Kubernetes Event Exporter 可以用来监控和过滤 Kubernetes 集群中的 Event ,通过设置过滤条件筛选出需要的 Event,并将这些 Event 发送到 SkyWalking 后台, 这样就可以通过 SkyWalking 观察到你的 Kubernetes 集群中的Event 何时影响到服务的各项指标了。如果想要一条命令部署,可以参考此配置创建 yaml 文件 ,设置 filters 和 exporters 的参数后,使用 kubectl apply 进行部署。\nStep 3 - 使用 JMeter 对服务加压 为了达到更好的观察效果,需要先对 Spring Boot 增加服务负载,本文选择使用 JMeter 这一使用广泛的 Java 压力测试工具来对服务加压。\n通过 JMeter 对 localhost:8079 进行压测,添加5个线程持续进行加压。 通过 SkyWalking Dashboard 可以看到,目前访问成功率为100%,服务负载大约在5300 CPM (Calls Per Minute)。\nStep 4 - Chaos Mesh 注入故障,观察效果 做好了这些准备工便可以使用 Chaos Dashboard 进行压力场景模拟,并在实验进程中观察服务性能的变化。\n以下使用不同 Stress Chaos 配置,观测对应服务性能变化:\n  CPU 负载10%,内存负载128 MB 。\n混沌实验开始和结束的时间点标记可以通过右侧开关显示在在图表中,将鼠标移至短线出可以看到是实验的 Applied 或 Recovered。可以看到两个绿色短线之间的时间段里,服务处理调用的的性能降低,为4929 CPM,在实验结束后,性能恢复正常。\n  CPU load 增加到50%,发现服务负载进一步降低至4307 CPM。\n  极端情况下 CPU 负载达到100%,服务负载降至无混沌实验时的40% 。\n  因为 Linux 系统下的进程调度并不会让某个进程一直占据 CPU,所以即使实在 CPU 满载的极端情况下,该部署的 Spring Boot Demo 仍可以处理40%的访问请求。\n小结 通过 SkyWalking 与 Chaos Mesh 的结合,我们可以清晰的观察到服务在何时受到混沌实验的影响,在注入混沌后服务的表现性能又将如何。SkyWalking 与 Chaos Mesh 的结合使得我们轻松地观察到了服务在各种极端情况下的表现,增强了我们对服务的信心。\nChaos Mesh 在 2021 年成长了许多。为了更多地了解用户在实践混沌工程方面的经验,以便持续完善和提升对用户的支持,社区发起了 Chaos Mesh 用户问卷调查,点击【阅读原文】参与调查,谢谢!\nhttps://www.surveymonkey.com/r/X78WQPC\n欢迎大家加入 Chaos Mesh 社区,加入 CNCF Slack (slack.cncf.io) 底下的 Chaos Mesh 频道: project-chaos-mesh,一起参与到项目的讨论与开发中来!大家在使用过程发现 Bug 或缺失什么功能,也可以直接在 GitHub (https://github.com/chaos-mesh) 上提 Issue 或 PR。\n","title":"Chaos Mesh X SkyWalking: 可观测的混沌工程","url":"/zh/2021-11-29-better-observability-for-chaos-engineering/"},{"content":"This plugin is one of the outcomes of Apache IoTDB - Apache SkyWalking Adapter in Summer 2021 of Open Source Promotion Plan. The design and development work is under the guidance of @jixuan1989 from IoTDB and @wu-sheng from SkyWalking. Thanks for their guidance and the help from community.\nIoTDB Storage Plugin Setup IoTDB is a time-series database from Apache, which is one of the storage plugin options. If you want to use iotdb as SkyWalking backend storage, please refer to the following configuration.\nIoTDB storage plugin is still in progress. Its efficiency will improve in the future.\nstorage:selector:${SW_STORAGE:iotdb}iotdb:host:${SW_STORAGE_IOTDB_HOST:127.0.0.1}rpcPort:${SW_STORAGE_IOTDB_RPC_PORT:6667}username:${SW_STORAGE_IOTDB_USERNAME:root}password:${SW_STORAGE_IOTDB_PASSWORD:root}storageGroup:${SW_STORAGE_IOTDB_STORAGE_GROUP:root.skywalking}sessionPoolSize:${SW_STORAGE_IOTDB_SESSIONPOOL_SIZE:16}fetchTaskLogMaxSize:${SW_STORAGE_IOTDB_FETCH_TASK_LOG_MAX_SIZE:1000}# the max number of fetch task log in a requestAll connection related settings, including host, rpcPort, username, and password are found in application.yml. Please ensure the IoTDB version \u0026gt;= 0.12.3.\nIoTDB Introduction Apache IoTDB (Database for Internet of Things) is an IoT native database with high performance for data management and analysis, deployable on the edge and the cloud. It is a time-series database donated by Tsinghua University to Apache Foundation.\nThe Data Model of IoTDB We can use the tree structure to understand the data model of iotdb. If divided according to layers, from high to low is: Storage Group \u0026ndash; (LayerName) \u0026ndash; Device \u0026ndash; Measurement. From the top layer to a certain layer below it is called a Path. The top layer is Storage Group (must start with root), the penultimate layer is Device, and the bottom layer is Measurement. There can be many layers in the middle, and each layer is called a LayerName. For more information, please refer to the Data Model and Terminology in the official document of the version 0.12.x.\nThe Design of IoTDB Storage Plugin The Data Model of SkyWalking Each storage model of SkyWalking can be considered as a Model, which contains multiple Columns. Each Column has ColumnName and ColumnType attributes, representing the name and type of Column respectively. Each Column named ColumnName stores multiple Value of the ColumnType. From a relational database perspective, Model is a relational table and Column is the field in a relational table.\nSchema Design Since each LayerName of IoTDB is stored in memory, it can be considered as an index, and this feature can be fully utilized to improve IoTDB query performance. The default storage group is root.skywalking, it will occupy the first and the second layer of the path. The model name is stored at the next layer of the storage group (the third layer of the path), such as root.skywalking.model_name.\nSkyWalking has its own index requirement, but it isn\u0026rsquo;t applicable to IoTDB. Considering query frequency and referring to the implementation of the other storage options, we choose id, entity_id, node_type, service_id, service_group, trace_id as indexes and fix their appearance order in the path. The value of these indexed columns will occupy the last few layers of the path. If we don\u0026rsquo;t fix their order, we cannot map their value to column, since we only store their value in the path but don\u0026rsquo;t store their column name. The other columns are treated as Measurements.\nThe mapping from SkyWalking data model to IoTDB data model is below.\n   SkyWalking IoTDB     Database Storage Group (1st and 2nd layer of the path)   Model LayerName (3rd layer of the path)   Indexed Column stored in memory through hard-code   Indexed Column Value LayerName (after 3rd layer of the path)   Non-indexed Column Measurement   Non-indexed Value the value of Measurement    For general example There are model1(column11, column12), model2(column21, column22, column23), model3(column31). Underline indicates that the column requires to be indexed. In this example, modelx_name refers to the name of modelx, columnx_name refers to the name of columnx and columnx_value refers to the value of columnx.\nBefore these 3 model storage schema, here are some points we need to know.\n In order to avoid the value of indexed column contains dot(.), all of them should be wrapped in double quotation mark since IoTDB use dot(.) as the separator in the path. We use align by device in query SQL to get a more friendly result. For more information about align by device, please see DML (Data Manipulation Language) and Query by device alignment.  The path of them is following:\n The Model with index:  root.skywalking.model1_name.column11_value.column12_name root.skywalking.model2_name.column21_value.column22_value.column23_name   The Model without index:  root.skywalking.model3_name.column31_Name    Use select * from root.skywalking.modelx_name align by device respectively to get their schema and data. The SQL result is following:\n   Time Device column12_name     1637494020000 root.skywalking.model1_name.\u0026ldquo;column11_value\u0026rdquo; column12_value       Time Device column23_name     1637494020000 root.skywalking.model2_name.\u0026ldquo;column21_value\u0026rdquo;.\u0026ldquo;column22_value\u0026rdquo; column23_value       Time Device column31_name     1637494020000 root.skywalking.model3_name column31_value    For specific example Before 5 typical examples, here are some points we need to know.\n The indexed columns and their order: id, entity_id, node_type, service_id, service_group, trace_id. Other columns are treated as non indexed and stored as Measurement. The storage entity extends Metrics or Record contains a column time_bucket. The time_bucket column in SkyWalking Model can be converted to the timestamp of IoTDB when inserting data. We don\u0026rsquo;t need to store time_bucket separately. In the next examples, we won\u0026rsquo;t list time_bucket anymore. The Time in query result corresponds to the timestamp in insert SQL and API.   Metadata: service_traffic\nservice_traffic entity has 4 columns: id, name, node_type, service_group. When service_traffic entity includes a row with timestamp 1637494020000, the row should be as following: (Notice: the value of service_group is null.)     id name node_type service_group     ZTJlLXNlcnZpY2UtcHJvdmlkZXI=.1 e2e-service-provider 0     And the row stored in IoTDB should be as following: (Query SQL: select from root.skywalking.service_traffic align by device)\n   Time Device name     1637494020000 root.skywalking.service_traffic.\u0026ldquo;ZTJlLXNlcnZpY2UtcHJvdmlkZXI=.1\u0026rdquo;.\u0026ldquo;0\u0026rdquo;.\u0026ldquo;null\u0026rdquo; e2e-service-provider    The value of id, node_type and service_group are stored in the path in the specified order. Notice: If those index value is null, it will be transformed to a string \u0026ldquo;null\u0026rdquo;.\nMetrics: service_cpm\nservice_cpm entity has 4 columns: id, service_id, total, value.\nWhen service_cpm entity includes a row with timestamp 1637494020000, the row should be as following:     id service_id total value     202111211127_ZTJlLXNlcnZpY2UtY29uc3VtZXI=.1 ZTJlLXNlcnZpY2UtY29uc3VtZXI=.1 4 4    And the row stored in IoTDB should be as following: (Query SQL: select from root.skywalking.service_cpm align by device)\n   Time Device total value     1637494020000 root.skywalking.service_cpm.\u0026ldquo;202111211127_ZTJlLXNlcnZpY2UtY29uc3VtZXI=.1\u0026rdquo;.\u0026ldquo;ZTJlLXNlcnZpY2UtY29uc3VtZXI=.1\u0026rdquo; 4 4    The value of id and service_id are stored in the path in the specified order.\nTrace segment: segment\nsegment entity has 10 columns at least: id, segment_id, trace_id, service_id, service_instance_id, endpoint_id, start_time, latency, is_error, data_binary. In addition, it could have variable number of tags.\nWhen segment entity includes 2 rows with timestamp 1637494106000 and 1637494134000, these rows should be as following. The db.type and db.instance are two tags. The first data has two tags, and the second data doesn\u0026rsquo;t have tag.     id segment_id trace_id service_id service_instance_id endpoint_id start_time latency is_error data_binary db.type db.instance     id_1 segment_id_1 trace_id_1 service_id_1 service_instance_id_1 endpoint_id_1 1637494106515 1425 0 data_binary_1 sql testdb   id_2 segment_id_2 trace_id_2 service_id_2 service_instance_id_2 endpoint_id_2 2637494106765 1254 0 data_binary_2      And these row stored in IoTDB should be as following: (Query SQL: select from root.skywalking.segment align by device)\n   Time Device start_time data_binary latency endpoint_id is_error service_instance_id segment_id \u0026ldquo;db.type\u0026rdquo; \u0026ldquo;db.instance\u0026rdquo;     1637494106000 root.skywalking.segment.\u0026ldquo;id_1\u0026rdquo;.\u0026ldquo;service_id_1\u0026rdquo;.\u0026ldquo;trace_id_1\u0026rdquo; 1637494106515 data_binary_1 1425 endpoint_id_1 0 service_instance_id_1 segment_id_1 sql testdb   1637494106000 root.skywalking.segment.\u0026ldquo;id_2\u0026rdquo;.\u0026ldquo;service_id_2\u0026rdquo;.\u0026ldquo;trace_id_2\u0026rdquo; 1637494106765 data_binary_2 1254 endpoint_id_2 0 service_instance_id_2 segment_id_2 null null    The value of id, service_id and trace_id are stored in the path in the specified order. Notice: If the measurement contains dot(.), it will be wrapped in double quotation mark since IoTDB doesn\u0026rsquo;t allow it. In order to align, IoTDB will append null value for those data without tag in some models.\nLog\nlog entity has 12 columns at least: id, unique_id, service_id, service_instance_id, endpoint_id, trace_id, trace_segment_id, span_id, content_type, content, tags_raw_data, timestamp. In addition, it could have variable number of tags. When log entity includes a row with timestamp 1637494052000, the row should be as following and the level is a tag.     id unique_id service_id service_instance_id endpoint_id trace_id trace_segment_id span_id content_type content tags_raw_data timestamp level     id_1 unique_id_1 service_id_1 service_instance_id_1 endpoint_id_1 trace_id_1 trace_segment_id_1 0 1 content_1 tags_raw_data_1 1637494052118 INFO    And the row stored in IoTDB should be as following: (Query SQL: select from root.skywalking.log align by device)\n   Time Device unique_id content_type span_id tags_raw_data \u0026ldquo;timestamp\u0026rdquo; level service_instance_id content trace_segment_id     1637494052000 root.skywalking.\u0026ldquo;id_1\u0026rdquo;.\u0026ldquo;service_id_1\u0026rdquo;.\u0026ldquo;trace_id_1\u0026rdquo; unique_id_1 1 0 tags_raw_data_1 1637494052118 INFO service_instance_id_1 content_1 trace_segment_id_1    The value of id, service_id and trace_id are stored in the path in the specified order. Notice: If the measurement named timestamp, it will be wrapped in double quotation mark since IoTDB doesn\u0026rsquo;t allow it.\nProfiling snapshots: profile_task_segment_snapshot\nprofile_task_segment_snapshot entity has 6 columns: id, task_id, segment_id, dump_time, sequence, stack_binary. When profile_task_segment_snapshot includes a row with timestamp 1637494131000, the row should be as following.     id task_id segment_id dump_time sequence stack_binary     id_1 task_id_1 segment_id_1 1637494131153 0 stack_binary_1    And the row stored in IoTDB should be as following: (Query SQL: select from root.skywalking.profile_task_segment_snapshot align by device)\n   Time Device sequence dump_time stack_binary task_id segment_id     1637494131000 root.skywalking.profile_task_segment_snapshot.\u0026ldquo;id_1\u0026rdquo; 0 1637494131153 stack_binary_1 task_id_1 segment_id_1    The value of id is stored in the path in the specified order.\nQuery In this design, part of the data is stored in memory through LayerName, so data from the same Model is spread across multiple devices. Queries often need to cross multiple devices. But in this aspect, IoTDB\u0026rsquo;s support is not perfect in cross-device aggregation query, sort query and pagination query. In some cases, we have to use a violence method that query all data meets the condition and then aggregate, sort or paginate them. So it might not be efficient. For detailed descriptions, please refer to the Discussion submitted in IoTDB community below.\n Discussion:  一个有关排序查询的问题(A problem about sort query)#3888 一个有关聚合查询的问题(A problem about aggregation query)#3907    Query SQL for the general example above:\n-- query all data in model1 select * from root.skywalking.model1_name align by device; -- query the data in model2 with column22_value=\u0026#34;test\u0026#34; select * from root.skywalking.model2_name.*.\u0026#34;test\u0026#34; align by device; -- query the sum of column23 in model2 and group by column21 select sum(column23) from root.skywalking.model2_name.*.* group by level = 3; iotdb-cli is a useful tools to connect and visit IoTDB server. More information please refer Command Line Interface(CLI)\n","title":"The Design of Apache IoTDB Storage Option","url":"/blog/2021-11-23-design-of-iotdb-storage-option/"},{"content":"SkyWalking Infra E2E 1.1.0 is released. Go to downloads page to find release tars.\nFeatures  Support using setup.init-system-environment to import environment. Support body and headers in http trigger. Add install target in makefile. Stop trigger when cleaning up. Change interval setting to Duration style. Add reasonable default cleanup.on. Support float value compare when type not match Support reuse verify.cases. Ignore trigger when not set. Support export KUBECONFIG to the environment. Support using setup.kind.import-images to load local docker images. Support using setup.kind.expose-ports to declare the resource port for host access. Support save pod/container std log on the Environment.  Bug Fixes  Fix that trigger is not continuously triggered when running e2e trigger. Migrate timeout config to Duration style and wait for node ready in KinD setup. Remove manifest only could apply the default namespace resource.  Issues and PR  All issues are here All and pull requests are here  ","title":"Release Apache SkyWalking Infra E2E 1.1.0","url":"/events/release-apache-skywalking-infra-e2e-1-1-0/"},{"content":"SkyWalking Cloud on Kubernetes 0.4.0 is released. Go to downloads page to find release tars.\n  Support special characters in the metric selector of HPA metric adapter.\n  Add the namespace to HPA metric name.\n  Features\n Add Java agent injector. Add JavaAgent and Storage CRDs of the operator.    Vulnerabilities\n CVE-2021-3121: An issue was discovered in GoGo Protobuf before 1.3.2. plugin/unmarshal/unmarshal.go lacks certain index validation CVE-2020-29652: A nil pointer dereference in the golang.org/x/crypto/ssh component through v0.0.0-20201203163018-be400aefbc4c for Go allows remote attackers to cause a denial of service against SSH servers.    Chores\n Bump up GO to 1.17. Bump up k8s api to 0.20.11. Polish documents. Bump up SkyWalking OAP to 8.8.1.    ","title":"Release Apache SkyWalking Cloud on Kubernetes 0.4.0","url":"/events/release-apache-skywalking-cloud-on-kubernetes-0-4-0/"},{"content":"SkyWalking Satellite 0.3.0 is released. Go to downloads page to find release tars.\nFeatures  Support load-balance GRPC client with the static server list. Support load-balance GRPC client with the Kubernetes selector. Support transmit Envoy ALS v2/v3 protocol. Support transmit Envoy Metrics v2/v3 protocol.  Bug Fixes  Fix errors when converting meter data from histogram and summary.#75  Issues and PR  All issues are here All and pull requests are here  ","title":"Release Apache SkyWalking Satellite 0.3.0","url":"/events/release-apache-skwaylking-satellite-0-3-0/"},{"content":"SkyWalking Java Agent 8.8.0 is released. Go to downloads page to find release tars. Changes by Version\n8.8.0  Split Java agent from the main monorepo. It is a separate repository and going to release separately. Support JDK 8-17 through upgrading byte-buddy to 1.11.18. Upgrade JDK 11 in dockerfile and remove unused java_opts. DataCarrier changes a #consume API to add properties as a parameter to initialize consumer when use Class\u0026lt;? extends IConsumer\u0026lt;T\u0026gt;\u0026gt; consumerClass. Support Multiple DNS period resolving mechanism Modify Tags.STATUS_CODE field name to Tags.HTTP_RESPONSE_STATUS_CODE and type from StringTag to IntegerTag, add Tags.RPC_RESPONSE_STATUS_CODE field to hold rpc response code value. Fix kafka-reporter-plugin shade package conflict Add all config items to agent.conf file for convenient containerization use cases. Advanced Kafka Producer configuration enhancement. Support mTLS for gRPC channel. fix the bug that plugin record wrong time elapse for lettuce plugin fix the bug that the wrong db.instance value displayed on Skywalking-UI when existing multi-database-instance on same host port pair. Add thrift plugin support thrift TMultiplexedProcessor. Add benchmark result for exception-ignore plugin and polish plugin guide. Provide Alibaba Druid database connection pool plugin. Provide HikariCP database connection pool plugin. Fix NumberFormat exception in jdbc-commons plugin when MysqlURLParser parser jdbcurl Provide Alibaba Fastjson parser/generator plugin. Provide Jackson serialization and deserialization plugin. Fix a tracing context leak of SpringMVC plugin, when an internal exception throws due to response can\u0026rsquo;t be found. Make GRPC log reporter sharing GRPC channel with other reporters of agent. Remove config items of agent.conf, plugin.toolkit.log.grpc.reporter.server_host, plugin.toolkit.log.grpc.reporter.server_port, and plugin.toolkit.log.grpc.reporter.upstream_timeout. rename plugin.toolkit.log.grpc.reporter.max_message_size to log.max_message_size. Implement Kafka Log Reporter. Add config item of agnt.conf, plugin.kafka.topic_logging. Add plugin to support Apache HttpClient 5. Format SpringMVC \u0026amp; Tomcat EntrySpan operation name to METHOD:URI. Make HTTP method in the operation name according to runtime, rather than previous code-level definition, which used to have possibilities including multiple HTTP methods. Fix the bug that httpasyncclient-4.x-plugin does not take effect every time. Add plugin to support ClickHouse JDBC driver. Fix version compatibility for JsonRPC4J plugin. Add plugin to support Apache Kylin-jdbc 2.6.x 3.x 4.x Fix instrumentation v2 API doesn\u0026rsquo;t work for constructor instrumentation. Add plugin to support okhttp 2.x Optimize okhttp 3.x 4.x plugin to get span time cost precisely Adapt message header properties of RocketMQ 4.9.x  Documentation All issues and pull requests are here\n","title":"Release Apache SkyWalking Java Agent 8.8.0","url":"/events/release-apache-skywalking-java-agent-8-8-0/"},{"content":"SkyWalking CLI 0.9.0 is released. Go to downloads page to find release tars.\nFeatures  Add the sub-command dependency instance to query instance relationships (#117)  Bug Fixes  fix: multiple-linear command\u0026rsquo;s labels type can be string type (#122) Add missing dest-service-id dest-service-name to metrics linear command (#121) Fix the wrong name when getting destInstance flag (#118)  Chores  Upgrade Go version to 1.16 (#120) Migrate tests to infra-e2e, overhaul the flags names (#119) Publish Docker snapshot images to ghcr (#116) Remove dist directory when build release source tar (#115)  ","title":"Release Apache SkyWalking CLI 0.9.0","url":"/events/release-apache-skywalking-cli-0-9-0/"},{"content":"SkyWalking Eyes 0.2.0 is released. Go to downloads page to find release tars.\n  Dependency License\n Support resolving go.mod for Go Support resolving pom.xml for maven (#50) Support resolving jars' licenses (#53) Support resolving npm dependencies' licenses (#48) Support saving dependencies' licenses (#69) Add dependency check to check dependencies license compatibilities (#58)    License Header\n fix command supports more languages:  Add support for plantuml (#42) Add support for PHP (#40) Add support for Twig template language (#39) Add support for Smarty template language (#38) Add support for MatLab files (#37) Add support for TypeScript language files (#73) Add support for nextflow files (#65) Add support for perl files (#63) Add support for ini extension (#24) Add support for R files (#64) Add support for .rst files and allow fixing header of a single file (#25) Add support for Rust files (#29) Add support for bat files (#32)   Remove .tsx from XML language extensions Honor Python\u0026rsquo;s coding directive (#68) Fix file extension conflict between RenderScript and Rust (#66) Add comment type to cython declaration (#62) header fix: respect user configured license content (#60) Expose license-location-threshold as config item (#34) Fix infinite recursive calls when containing symbolic files (#33) defect: avoid crash when no comment style is found (#23)    Project\n Enhance license identification (#79) Support installing via go install (#76) Speed up the initialization phase (#75) Resolve absolute path in .gitignore to relative path (#67) Reduce img size and add npm env (#59) Make the config file and log level in GitHub Action configurable (#56, #57) doc: add a PlantUML activity diagram of header fixing mechanism (#41) Fix bug: license file is not found but reported message is nil (#49) Add all well-known licenses and polish normalizers (#47) Fix compatibility issues in Windows (#44) feature: add reasonable default config to allow running in a new repo without copying config file (#28) chore: only build linux binary when building inside docker (#26) chore: upgrade to go 1.16 and remove go-bindata (#22) Add documentation about how to use via docker image (#20)    ","title":"Release Apache SkyWalking Eyes 0.2.0","url":"/events/release-apache-skywalking-eyes-0-2-0/"},{"content":"SkyWalking Client JS 0.7.0 is released. Go to downloads page to find release tars.\n Support setting time interval to report segments. Fix segments report only send once. Fix apache/skywalking#7335. Fix apache/skywalking#7793. Fix firstReportedError for SPA.  ","title":"Release Apache SkyWalking Client JS 0.7.0","url":"/events/release-apache-skywalking-client-js-0-7-0/"},{"content":"SkyWalking 8.8.1 is released. Go to downloads page to find release tars.\nThis is a bugfix version that fixes several important bugs in previous version 8.8.0.\nChanges OAP Server  Fix wrong (de)serializer of ElasticSearch client for OpenSearch storage. Fix that traces query with tags will report error. Replace e2e simple cases to e2e-v2. Fix endpoint dependency breaking.  UI  Delete duplicate calls for endpoint dependency.  All issues and pull requests are here\n","title":"Release Apache SkyWalking APM 8.8.1","url":"/events/release-apache-skywalking-apm-8-8-1/"},{"content":"Kai Wan has been involved in SkyWalking for over half a year since the first PR(Dec 21, 2020). He majorly focuses on the Service Mesh and metrics analysis engine(MAL). And recently add the support of OpenAPI specification into SkyWalking.\nHe learnd fast, and dedicates hours every day on the project, and has finished 37 PRs 11,168 LOC++ 1,586 LOC\u0026ndash;. In these days, he is working with PMC and infra-e2e team to upgrade our main repository\u0026rsquo;s test framework to the NGET(Next Generation E2E Test framework).\nIt is our honor to have him join the team.\n","title":"Welcome Kai Wan (万凯) to join the PMC","url":"/events/welcome-kai-wan-to-join-the-pmc/"},{"content":"SkyWalking 8.8.0 is released. Go to downloads page to find release tars.\nThis is a first OAP server + UI release, Java agent will be release independently. Check the latest compatibility document to find suitable agent releases.\nChanges by Version\nProject  Split javaagent into skywalking-java repository. https://github.com/apache/skywalking-java Merge Dockerfiles from apache/skywalking-docker into this codebase.  OAP Server  Fix CVE-2021-35515, CVE-2021-35516, CVE-2021-35517, CVE-2021-36090. Upgrade org.apache.commons:commons-compress to 1.21. kubernetes java client upgrade from 12.0.1 to 13.0.0 Add event http receiver Support Metric level function serviceRelation in MAL. Support envoy metrics binding into the topology. Fix openapi-definitions folder not being read correctly. Trace segment wouldn\u0026rsquo;t be recognized as a TopN sample service. Add through #4694 experimentally, but it caused performance impact. Remove version and endTime in the segment entity. Reduce indexing payload. Fix mapper_parsing_exception in ElasticSearch 7.14. Support component IDs for Go-Kratos framework. [Break Change] Remove endpoint name in the trace query condition. Only support query by endpoint id. Fix ProfileSnapshotExporterTest case on OpenJDK Runtime Environment AdoptOpenJDK-11.0.11+9 (build 11.0.11+9), MacOS. [Break Change] Remove page path in the browser log query condition. Only support query by page path id. [Break Change] Remove endpoint name in the backend log query condition. Only support query by endpoint id. [Break Change] Fix typo for a column page_path_id(was pate_path_id) of storage entity browser_error_log. Add component id for Python falcon plugin. Add rpcStatusCode for rpc.status_code tag. The responseCode field is marked as deprecated and replaced by httpResponseStatusCode field. Remove the duplicated tags to reduce the storage payload. Add a new API to test log analysis language. Harden the security of Groovy-based DSL, MAL and LAL. Fix distinct in Service/Instance/Endpoint query is not working. Support collection type in dynamic configuration core. Support zookeeper grouped dynamic configurations. Fix NPE when OAP nodes synchronize events with each other in cluster mode. Support k8s configmap grouped dynamic configurations. Add desc sort function in H2 and ElasticSearch implementations of IBrowserLogQueryDAO Support configure sampling policy by configuration module dynamically and static configuration file trace-sampling-policy-settings.yml for service dimension on the backend side. Dynamic configurations agent-analyzer.default.sampleRate and agent-analyzer.default.slowTraceSegmentThreshold are replaced by agent-analyzer.default.traceSamplingPolicy. Static configurations agent-analyzer.default.sampleRate and agent-analyzer.default.slowTraceSegmentThreshold are replaced by agent-analyzer.default.traceSamplingPolicySettingsFile. Fix dynamic configuration watch implementation current value not null when the config is deleted. Fix LoggingConfigWatcher return watch.value would not consistent with the real configuration content. Fix ZookeeperConfigWatcherRegister.readConfig() could cause NPE when data.getData() is null. Support nacos grouped dynamic configurations. Support for filter function filtering of int type values. Support mTLS for gRPC channel. Add yaml file suffix limit when reading ui templates. Support consul grouped dynamic configurations. Fix H2MetadataQueryDAO.searchService doesn\u0026rsquo;t support auto grouping. Rebuilt ElasticSearch client on top of their REST API. Fix ElasticSearch storage plugin doesn\u0026rsquo;t work when hot reloading from secretsManagementFile. Support etcd grouped dynamic configurations. Unified the config word namespace in the project. Switch JRE base image for dev images. Support apollo grouped dynamic configurations. Fix ProfileThreadSnapshotQuery.queryProfiledSegments adopts a wrong sort function Support gRPC sync grouped dynamic configurations. Fix H2EventQueryDAO doesn\u0026rsquo;t sort data by Event.START_TIME and uses a wrong pagination query. Fix LogHandler of kafka-fetcher-plugin cannot recognize namespace. Improve the speed of writing TiDB by batching the SQL execution. Fix wrong service name when IP is node IP in k8s-mesh. Support dynamic configurations for openAPI endpoint name grouping rule. Add component definition for Alibaba Druid and HikariCP. Fix Hour and Day dimensionality metrics not accurate, due to the cache read-then-clear mechanism conflicts with low down metrics flush period added in 8.7.0. Fix Slow SQL sampling not accurate, due to TopN works conflict with cache read-then-clear mechanism. The persistent cache is only read when necessary. Add component definition for Alibaba Fastjson. Fix entity(service/instance/endpoint) names in the MAL system(prometheus, native meter, open census, envoy metric service) are not controlled by core\u0026rsquo;s naming-control mechanism. Upgrade netty version to 4.1.68.Final avoid cve-2021-37136.  UI  Fix not found error when refresh UI. Update endpointName to endpointId in the query trace condition. Add Python falcon icon on the UI. Fix searching endpoints with keywords. Support clicking the service name in the chart to link to the trace or log page. Implement the Log Analysis Language text regexp debugger. Fix fetching nodes and calls with serviceIds on the topology side. Implement Alerts for query errors. Fixes graph parameter of query for topology metrics.  Documentation  Add a section in Log Collecting And Analysis doc, introducing the new Python agent log reporter. Add one missing step in otel-receiver doc about how to activate the default receiver. Reorganize dynamic configuration doc. Add more description about meter configurations in backend-meter doc. Fix typo in endpoint-grouping-rules doc.  All issues and pull requests are here\n","title":"Release Apache SkyWalking APM 8.8.0","url":"/events/release-apache-skywalking-apm-8-8-0/"},{"content":"SkyWalking CLI 0.8.0 is released. Go to downloads page to find release tars.\n  Features\n Add profile command Add logs command Add dependency command Support query events protocol Support auto-completion for bash and powershell    Bug Fixes\n Fix missing service instance name in trace command    Chores\n Optimize output by adding color to help information Set display style explicitly for commands in the test script Set different default display style for different commands Add scripts for quick install Update release doc and add scripts for release split into multiple workflows to speed up CI    ","title":"Release Apache SkyWalking CLI 0.8.0","url":"/events/release-apache-skywalking-cli-0-8-0/"},{"content":"SkyWalking Satellite 0.2.0 is released. Go to downloads page to find release tars.\nFeatures  Set MAXPROCS according to real cpu quota. Update golangci-lint version to 1.39.0. Update protoc-gen-go version to 1.26.0. Add prometheus-metrics-fetcher plugin. Add grpc client plugin. Add nativelog-grpc-forwarder plugin. Add meter-grpc-forwarder plugin. Support native management protocol. Support native tracing protocol. Support native profile protocol. Support native CDS protocol. Support native JVM protocol. Support native Meter protocol. Support native Event protocol. Support native protocols E2E testing. Add Prometheus service discovery in Kubernetes.  Bug Fixes  Fix the data race in mmap queue. Fix channel blocking in sender module. Fix pipes.sender.min_flush_events config could not support min number. Remove service name and instance name labels from Prometheus fetcher.  Issues and PR  All issues are here All and pull requests are here  ","title":"Release Apache SkyWalking Satellite 0.2.0","url":"/events/release-apache-skwaylking-satellite-0-2-0/"},{"content":"SkyWalking Python 0.7.0 is released. Go to downloads page to find release tars.\n  Feature:\n Support collecting and reporting logs to backend (#147) Support profiling Python method level performance (#127 Add a new sw-python CLI that enables agent non-intrusive integration (#156) Add exponential reconnection backoff strategy when OAP is down (#157) Support ignoring traces by http method (#143) NoopSpan on queue full, propagation downstream (#141) Support agent namespace. (#126) Support secure connection option for GRPC and HTTP (#134)    Plugins:\n Add Falcon Plugin (#146) Update sw_pymongo.py to be compatible with cluster mode (#150) Add Python celery plugin (#125) Support tornado5+ and tornado6+ (#119)    Fixes:\n Remove HTTP basic auth credentials from log, stacktrace, segment (#152) Fix @trace decorator not work (#136) Fix grpc disconnect, add SW_AGENT_MAX_BUFFER_SIZE to control buffer queue size (#138)    Others:\n Chore: bump up requests version to avoid license issue (#142) Fix module wrapt as normal install dependency (#123) Explicit component inheritance (#132) Provide dockerfile \u0026amp; images for easy integration in containerized scenarios (#159)    ","title":"Release Apache SkyWalking Python 0.7.0","url":"/events/release-apache-skywalking-python-0-7-0/"},{"content":"SkyWalking Infra E2E 1.0.0 is released. Go to downloads page to find release tars.\nFeatures  Support using docker-compose to setup the environment. Support using the HTTP request as trigger. Support verify test case by command-line or file with retry strategy. Support GitHub Action.  Bug Fixes Issues and PR  All issues are here All and pull requests are here  ","title":"Release Apache SkyWalking Infra E2E 1.0.0","url":"/events/release-apache-skywalking-infra-e2e-1-0-0/"},{"content":"The Java Agent of Apache SkyWalking has supported profiling since v7.0.0, and it enables users to troubleshoot the root cause of performance issues, and now we bring it into Python Agent. In this blog, we will show you how to use it, and we will introduce the mechanism of profiling.\nHow to use profiling in Python Agent This feature is released in Python Agent at v0.7.0. It is turned on by default, so you don\u0026rsquo;t need any extra configuration to use it. You can find the environment variables about it here.\nHere are the demo codes of an intentional slow application.\nimport time def method1(): time.sleep(0.02) return \u0026#39;1\u0026#39; def method2(): time.sleep(0.02) return method1() def method3(): time.sleep(0.02) return method2() if __name__ == \u0026#39;__main__\u0026#39;: import socketserver from http.server import BaseHTTPRequestHandler class SimpleHTTPRequestHandler(BaseHTTPRequestHandler): def do_POST(self): method3() time.sleep(0.5) self.send_response(200) self.send_header(\u0026#39;Content-Type\u0026#39;, \u0026#39;application/json\u0026#39;) self.end_headers() self.wfile.write(\u0026#39;{\u0026#34;song\u0026#34;: \u0026#34;Despacito\u0026#34;, \u0026#34;artist\u0026#34;: \u0026#34;Luis Fonsi\u0026#34;}\u0026#39;.encode(\u0026#39;ascii\u0026#39;)) PORT = 19090 Handler = SimpleHTTPRequestHandler with socketserver.TCPServer((\u0026#34;\u0026#34;, PORT), Handler) as httpd: httpd.serve_forever() We can start it with SkyWalking Python Agent CLI without changing any application code now, which is also the latest feature of v0.7.0. We just need to add sw-python run before our start command(i.e. sw-python run python3 main.py), to start the application with python agent attached. More information about sw-python can be found there.\nThen, we should add a new profile task for the / endpoint from the SkyWalking UI, as shown below.\nWe can access it by curl -X POST http://localhost:19090/, after that, we can view the result of this profile task on the SkyWalking UI.\nThe mechanism of profiling When a request lands on an application with the profile function enabled, the agent begins the profiling automatically if the request’s URI is as required by the profiling task. A new thread is spawned to fetch the thread dump periodically until the end of request.\nThe agent sends these thread dumps, called ThreadSnapshot, to SkyWalking OAPServer, and the OAPServer analyzes those ThreadSnapshot(s) and gets the final result. It will take a method invocation with the same stack depth and code signature as the same operation, and estimate the execution time of each method from this.\nLet\u0026rsquo;s demonstrate how this analysis works through the following example. Suppose we have such a program below and we profile it at 10ms intervals.\ndef main(): methodA() def methodA(): methodB() def methodB(): methodC() methodD() def methodC(): time.sleep(0.04) def methodD(): time.sleep(0.06) The agent collects a total of 10 ThreadSnapShot(s) over the entire time period(Diagram A). The first 4 snapshots represent the thread dumps during the execution of function C, and the last 6 snapshots represent the thread dumps during the execution of function D. After the analysis of OAPServer, we can see the result of this profile task on the SkyWalking Rocketbot UI as shown in the right of the diagram. With this result, we can clearly see the function call relationship and the time consumption situation of this program.\nDiagram A You can read more details of profiling theory from this blog.\nWe hope you enjoy the profile in the Python Agent, and if so, you can give us a star on Python Agent and SkyWalking on GitHub.\n","title":"SkyWalking Python Agent Supports Profiling Now","url":"/blog/2021-09-12-skywalking-python-profiling/"},{"content":"SkyWalking Kubernetes Helm Chart 4.1.0 is released. Go to downloads page to find release tars.\n Add missing service account to init job. Improve notes.txt and nodePort configuration. Improve ingress compatibility. Fix bug that customized config files are not loaded into es-init job. Add imagePullSecrets and node selector. Fix istio adapter description. Enhancement: allow mounting binary data files.  ","title":"Release Apache SkyWalking Kubernetes Helm Chart 4.1.0","url":"/events/release-apache-skywalking-kubernetes-helm-chart-4.1.0/"},{"content":"GOUP hosted a webinar, and invited Sheng Wu to introduce Apache SkyWalking. This is a 1.5 hours presentation including the full landscape of Apache SkyWalking 8.x.\nChapter04 Session10 - Apache Skywalking by Sheng Wu   ","title":"[Webinar] SkyWalking 8.x Introduction","url":"/blog/2021-08-01-skywalking-8-intro/"},{"content":"SkyWalking 8.7.0 is released. Go to downloads page to find release tars. Changes by Version\nProject  Extract dependency management to a bom. Add JDK 16 to test matrix. DataCarrier consumer add a new event notification, call nothingToConsume method if the queue has no element to consume. Build and push snapshot Docker images to GitHub Container Registry, this is only for people who want to help to test the master branch codes, please don\u0026rsquo;t use in production environments.  Java Agent  Supports modifying span attributes in async mode. Agent supports the collection of JVM arguments and jar dependency information. [Temporary] Support authentication for log report channel. This feature and grpc channel is going to be removed after Satellite 0.2.0 release. Remove deprecated gRPC method, io.grpc.ManagedChannelBuilder#nameResolverFactory. See gRPC-java 7133 for more details. Add Neo4j-4.x plugin. Correct profile.duration to profile.max_duration in the default agent.config file. Fix the response time of gRPC. Support parameter collection for SqlServer. Add ShardingSphere-5.0.0-beta plugin. Fix some method exception error. Fix async finish repeatedly in spring-webflux-5.x-webclient plugin. Add agent plugin to support Sentinel. Move ehcache-2.x plugin as an optional plugin. Support guava-cache plugin. Enhance the compatibility of mysql-8.x-plugin plugin. Support Kafka SASL login module. Fix gateway plugin async finish repeatedly when fallback url configured. Chore: polish methods naming for Spring-Kafka plugins. Remove plugins for ShardingSphere legacy version. Update agent plugin for ElasticJob GA version Remove the logic of generating instance name in KafkaServiceManagementServiceClient class. Improve okhttp plugin performance by optimizing Class.getDeclaredField(). Fix GRPCLogClientAppender no context warning. Fix spring-webflux-5.x-webclient-plugin NPE.  OAP-Backend  Disable Spring sleuth meter analyzer by default. Only count 5xx as error in Envoy ALS receiver. Upgrade apollo core caused by CVE-2020-15170. Upgrade kubernetes client caused by CVE-2020-28052. Upgrade Elasticsearch 7 client caused by CVE-2020-7014. Upgrade jackson related libs caused by CVE-2018-11307, CVE-2018-14718 ~ CVE-2018-14721, CVE-2018-19360 ~ CVE-2018-19362, CVE-2019-14379, CVE-2019-14540, CVE-2019-14892, CVE-2019-14893, CVE-2019-16335, CVE-2019-16942, CVE-2019-16943, CVE-2019-17267, CVE-2019-17531, CVE-2019-20330, CVE-2020-8840, CVE-2020-9546, CVE-2020-9547, CVE-2020-9548, CVE-2018-12022, CVE-2018-12023, CVE-2019-12086, CVE-2019-14439, CVE-2020-10672, CVE-2020-10673, CVE-2020-10968, CVE-2020-10969, CVE-2020-11111, CVE-2020-11112, CVE-2020-11113, CVE-2020-11619, CVE-2020-11620, CVE-2020-14060, CVE-2020-14061, CVE-2020-14062, CVE-2020-14195, CVE-2020-24616, CVE-2020-24750, CVE-2020-25649, CVE-2020-35490, CVE-2020-35491, CVE-2020-35728 and CVE-2020-36179 ~ CVE-2020-36190. Exclude log4j 1.x caused by CVE-2019-17571. Upgrade log4j 2.x caused by CVE-2020-9488. Upgrade nacos libs caused by CVE-2021-29441 and CVE-2021-29442. Upgrade netty caused by CVE-2019-20444, CVE-2019-20445, CVE-2019-16869, CVE-2020-11612, CVE-2021-21290, CVE-2021-21295 and CVE-2021-21409. Upgrade consul client caused by CVE-2018-1000844, CVE-2018-1000850. Upgrade zookeeper caused by CVE-2019-0201, zookeeper cluster coordinator plugin now requires zookeeper server 3.5+. Upgrade snake yaml caused by CVE-2017-18640. Upgrade embed tomcat caused by CVE-2020-13935. Upgrade commons-lang3 to avoid potential NPE in some JDK versions. OAL supports generating metrics from events. Support endpoint name grouping by OpenAPI definitions. Concurrent create PrepareRequest when persist Metrics Fix CounterWindow increase computing issue. Performance: optimize Envoy ALS analyzer performance in high traffic load scenario (reduce ~1cpu in ~10k RPS). Performance: trim useless metadata fields in Envoy ALS metadata to improve performance. Fix: slowDBAccessThreshold dynamic config error when not configured. Performance: cache regex pattern and result, optimize string concatenation in Envy ALS analyzer. Performance: cache metrics id and entity id in Metrics and ISource. Performance: enhance persistent session mechanism, about differentiating cache timeout for different dimensionality metrics. The timeout of the cache for minute and hour level metrics has been prolonged to ~5 min. Performance: Add L1 aggregation flush period, which reduce the CPU load and help young GC. Support connectTimeout and socketTimeout settings for ElasticSearch6 and ElasticSearch7 storages. Re-implement storage session mechanism, cached metrics are removed only according to their last access timestamp, rather than first time. This makes sure hot data never gets removed unexpectedly. Support session expired threshold configurable. Fix InfluxDB storage-plugin Metrics#multiGet issue. Replace zuul proxy with spring cloud gateway 2.x. in webapp module. Upgrade etcd cluster coordinator and dynamic configuration to v3.x. Configuration: Allow configuring server maximum request header size and ES index template order. Add thread state metric and class loaded info metric to JVMMetric. Performance: compile LAL DSL statically and run with type checked. Add pagination to event query protocol. Performance: optimize Envoy error logs persistence performance. Support envoy cluster manager metrics. Performance: remove the synchronous persistence mechanism from batch ElasticSearch DAO. Because the current enhanced persistent session mechanism, don\u0026rsquo;t require the data queryable immediately after the insert and update anymore. Performance: share flushInterval setting for both metrics and record data, due to synchronous persistence mechanism removed. Record flush interval used to be hardcoded as 10s. Remove syncBulkActions in ElasticSearch storage option. Increase the default bulkActions(env, SW_STORAGE_ES_BULK_ACTIONS) to 5000(from 1000). Increase the flush interval of ElasticSearch indices to 15s(from 10s) Provide distinct for elements of metadata lists. Due to the more aggressive asynchronous flush, metadata lists have more chances including duplicate elements. Don\u0026rsquo;t need this as indicate anymore. Reduce the flush period of hour and day level metrics, only run in 4 times of regular persistent period. This means default flush period of hour and day level metrics are 25s * 4. Performance: optimize IDs read of ElasticSearch storage options(6 and 7). Use the physical index rather than template alias name. Adjust index refresh period as INT(flushInterval * 2/3), it used to be as same as bulk flush period. At the edge case, in low traffic(traffic \u0026lt; bulkActions in the whole period), there is a possible case, 2 period bulks are included in one index refresh rebuild operation, which could cause version conflicts. And this case can\u0026rsquo;t be fixed through core/persistentPeriod as the bulk fresh is not controlled by the persistent timer anymore. The core/maxSyncOperationNum setting(added in 8.5.0) is removed due to metrics persistence is fully asynchronous. The core/syncThreads setting(added in 8.5.0) is removed due to metrics persistence is fully asynchronous. Optimization: Concurrency mode of execution stage for metrics is removed(added in 8.5.0). Only concurrency of prepare stage is meaningful and kept. Fix -meters metrics topic isn\u0026rsquo;t created with namespace issue Enhance persistent session timeout mechanism. Because the enhanced session could cache the metadata metrics forever, new timeout mechanism is designed for avoiding this specific case. Fix Kafka transport topics are created duplicated with and without namespace issue Fix the persistent session timeout mechanism bug. Fix possible version_conflict_engine_exception in bulk execution. Fix PrometheusMetricConverter may throw an IllegalArgumentException when convert metrics to SampleFamily Filtering NaN value samples when build SampleFamily Add Thread and ClassLoader Metrics for the self-observability and otel-oc-rules Simple optimization of trace sql query statement. Avoid \u0026ldquo;select *\u0026rdquo; query method Introduce dynamical logging to update log configuration at runtime Fix Kubernetes ConfigMap configuration center doesn\u0026rsquo;t send delete event Breaking Change: emove qps and add rpm in LAL  UI  Fix the date component for log conditions. Fix selector keys for duplicate options. Add Python celery plugin. Fix default config for metrics. Fix trace table for profile ui. Fix the error of server response time in the topology. Fix chart types for setting metrics configure. Fix logs pages number. Implement a timeline for Events in a new page. Fix style for event details.  Documentation  Add FAQ about Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Add Self Observability service discovery (k8s). Add sending Envoy Metrics to OAP in envoy 1.19 example and bump up to Envoy V3 api.  All issues and pull requests are here\n","title":"Release Apache SkyWalking APM 8.7.0","url":"/events/release-apache-skywalking-apm-8-7-0/"},{"content":"SkyWalking Client JS 0.6.0 is released. Go to downloads page to find release tars.\n Separate production and development environments when building. Upgrade packages to fix vulnerabilities. Fix headers could be null . Fix catching errors for http requests. Fix the firstReportedError is calculated with more types of errors.  ","title":"Release Apache SkyWalking Client JS 0.6.0","url":"/events/release-apache-skywalking-client-js-0-6-0/"},{"content":"SkyWalking is an open source APM (application performance monitor) system, especially designed for microservices, cloud native, and container-based architectures.\nFrom 2020, it has dominated the open source APM market in China, and expanded aggressively in North American, Europe and Asia\u0026rsquo;s other countries.\nWith over 6 years (2015-2021) of development, driven by the global open source community, SkyWalking now provides full stack observability covering metrics, tracing and logging, plus event detector, which are built based on various native and ecosystem solutions.\n Language agent-based(Java, Dot Net, Golang, PHP, NodeJS, Python, C++, LUA) in-process monitoring, is as powerful as commercial APM vendors' agents. Mostly auto-instrumentation, and good interactivity. Service Mesh Observability, working closely with Envoy and Istio teams. Transparent integration of popular metrics ecosystem. Accept metrics from Prometheus SDK, OpenTelemetry collectors, Zabbix agents, etc. Log collection with analysis capability from FluentD, Fluent-bit, Filebeat, etc. agents. Infrastructure monitoring, such as Linux and k8s, is out of the box.  The SkyWalking ecosystem was started by very few people. The community drives the project to cover real scenarios, from tracing to the whole APM field. Even today, more professional open source developers, powered by the vendors behind them, are bringing the project to a different level.\nTypically and most attractively, SkyWalking is going to build the first known open source APM specific database in the world, at least providing\n Time series-based database engine. Support traces/logs and metrics in the database core level. High performance with cluster mode and HPA. Reasonable resource cost.  We nearly doubled the number of contributors in the last year, from ~300 to over 500. The whole community is very energetic. Here, we want to thank our 47 committers(28 PMC members included), listed here, and over 400 other contributors.\nWe together built this humongous Apache Top Level project, and proved the stronge competitiveness of an open-source project.\nThis is a hard-won and impressive achievement. We won\u0026rsquo;t stop here. The trend is there, the ground is solid. We are going to build the top-level APM system relying on our open-source community.\n500 Contributors List    GitHub         1095071913 182148432** 295198088** 394102339** 437376068**   50168383 55846420** 826245622** 844067874 Ahoo-Wang   AirTrioa AlexanderWert AlseinX AngryMills Ax1an   BFergerson BZFYS CalvinKirs CharlesMaster ChaunceyLin5152   CommissarXia Cvimer DeadLion Doublemine Du-fei   ElderJames EvanLjp FatihErdem FeynmanZhou Fine0830   FingerLiu FrankyXu Gallardot GerryYuan HackerRookie   HarryFQ Heguoya Hen1ng HendSame Humbertzhang   IanCao IluckySi Indifer J-Cod3r JaredTan95   Jargon96 Jijun JoeKerouac JohnNiang Johor03   Jozdortraz Jtrust Just-maple KangZhiDong LazyLei   LiWenGu Lin1997 Linda-pan LiteSun Liu-XinYuan   MiracleDx Miss-you MoGuGuai-hzr MrYzys O-ll-O   Patrick0308 QHWG67 Qiliang QuanjieDeng RandyAbernethy   RedzRedz Runrioter SataQiu ScienJus SevenBlue2018   ShaoHans Shikugawa SoberChina SummerOfServenteen Switch-vov   TJ666 Technoboy- TerrellChen TeslaCN TheRealHaui   TinyAllen TomMD ViberW Videl WALL-E   WeihanLi WildWolfBang WillemJiang Wooo0 XhangUeiJong   Xlinlin YczYanchengzhe Yebemeto YoungHu YunaiV   YunfengGao Z-Beatles ZS-Oliver ZhHong ZhuoSiChen   a198720 a1vin-tian a526672351 acurtain adamni135   adermxzs adriancole** aeolusheath agile6v aix3   aiyanbo ajanthan alexkarezin alonelaval amogege   amwyyyy andyliyuze andyzzl aoxls arugal   ascrutae ascrutae** augustowebd aviaviavi bai-yang   beckhampu beckjin beiwangnull bigflybrother bootsrc   bostin brucewu-fly buxingzhe buzuotaxuan bwh12398**   c feng c1ay candyleer carllhw carlvine500   carrypann cheenursn cheetah012 chenbeitang chenglei**   chengshiwen chenmudu chenpengfei chenvista chess-equality   chestarss chidaodezhongsheng chopin-d clevertension clk1st   cngdkxw cnlangzi codeglzhang codelipenghui coder-yqj   coki230 compilerduck constanine coolbeevip crystaldust   cui-liqiang cuiweiwei cutePanda123 cyberdak cyejing   cyhii dafu-wu dagmom dalekliuhan** darcydai   dengliming devkanro devon-ye dickens7 dimaaan   dingdongnigetou dio divyakumarjain dmsolr dominicqi   donbing007 dsc6636926 dvsv2 dzx2018 echooymxq   efekaptan elk-g emschu eoeac evanljp**   evanxuhe feelwing1314 fgksgf fredster33 fuhuo   fulmicoton fushiqinghuan111 geektcp geomonlin ggndnn   gitter-badger givingwu glongzh gnr163 gonedays   grissom-grissom grissomsh guodongq guyukou gxthrj   gy09535 gzshilu hailin0 hanahmily haotian2015   haoyann hardzhang harvies heihaozi hepyu   heyanlong hi-sb honganan horber hsoftxl   huangyoje huliangdream huohuanhuan iluckysi innerpeacez   itsvse jasper-zsh jbampton jialong121 jinlongwang   jjlu521016 jjtyro jmjoy jsbxyyx justeene   juzhiyuan jy00464346 kaanid kagaya85 karott   kayleyang kevinyyyy kezhenxu94 kikupotter kilingzhang   killGC kkl129 klboke ksewen kuaikuai   kun-song kylixs landonzeng langke93 langyan1022   langyizhao lazycathome leemove leizhiyuan libinglong   lijial lilien1010 limfriend linkinshi linliaoy   liqiangz liu-junchi liufei** liuhaoXD liuhaoyang   liuweiyi** liuyanggithup liuzhengyang liweiv lixin40**   lizl9** lkxiaolou llissery louis-zhou lpcy   lpf32 lsyf lucperkins lujiajing1126 lunamagic1978   lunchboxav lxin96** lxliuxuankb lytscu lyzhang1999   mage3k makefriend8 makingtime mantuliu maolie   margauxcabrera masterxxo maxiaoguang64 me** membphis   mestarshine mgsheng michaelsembwever mikkeschiren ming_flycash**   minquan.chen** misaya momo0313 moonming mrproliu   mrproliu** muyun12 nacx neatlife neeuq   nic-chen nickwongwong nikitap492 nileblack nisiyong   novayoung oatiz oflebbe olzhy onecloud360   osiriswd panniyuyu peng-yongsheng pengweiqhca potiuk   probeyang purgeyao qijianbo010 qinhang3 qiuyu-d   qjgszzx qq362220083 qqeasonchen qxo ralphgj   raybi-asus refactor2 remicollet rlenferink rootsongjc   rovast ruibaby s00373198 scolia sdanzo   seifeHu sergicastro shiluo34 sikelangya simonlei   sk163 snakorse songzhendong songzhian songzhian**   sonxy spacewander stalary stenio2011 stevehu   stone-wlg sungitly surechen swartz-k sxzaihua   tangxqa tanjunchen tankilo tanzhen** taskmgr   tbdpmi terranhu terrymanu tevahp thanq   thebouv tianyk tianyuak tincopper tinyu0   tom-pytel tristaZero tristan-tsl trustin tsuilouis   tuohai666 tzsword-2020 tzy1316106836 vcjmhg viktoryi   vision-ken viswaramamoorthy wallezhang wang-yeliang wang_weihan**   wangrzneu wankai123 wbpcode web-xiaxia webb2019   weiqiang-w weiqiang333 wendal wengangJi wenjianzhang   whfjam whl12345 willseeyou wilsonwu wind2008hxy   wingwong-knh withlin wl4g wqr2016 wu-sheng   wuguangkuo wujun8 wuwen5 wuxingye x22x22   xbkaishui xcaspar xdRight xiaoweiyu** xiaoxiangmoe   xiaoy00 xinfeingxia85 xingren23 xinzhuxiansheng xonze   xuanyu66 xuchangjunjx xudianyang yanbw yanfch   yang-xiaodong yangxb2010000 yanickxia yanmaipian yanmingbi   yantaowu yaojingguo yaowenqiang yazong ychandu   ycoe yimeng yu199195 yuqichou yushuqiang**   yuyujulin yxudong yymoth zaunist zaygrzx   zcai2 zeaposs zhang98722 zhanghao001 zhangjianweibj   zhangkewei zhangsean zhangxin** zhaoyuguang zhe1926   zhentaoJin zhongjianno1** zhousiliang163 zhuCheer zhyyu   zifeihan zijin-m zkscpqm zoidbergwill zoumingzm   zouyx zpf1989 zshit zxbu zygfengyuwuzu    ","title":"[Community win] SkyWalking achieved 500 contributors milestone.","url":"/blog/2021-07-12-500-contributors-mark/"},{"content":"时间:2021 年 6 月 26 日\n地点:北京市海淀区西格玛大厦 B1 多功能厅\n视频回放:见 Bilibili\nApache SkyWalking Landscape  吴晟 Sheng Wu. Tetrate Founding Engineer, Apache Software Foundation board director. SkyWalking founder.  SkyWalking 2020-2021 年发展和后续计划\n微服务可观测性分析平台的探索与实践  凌若川 腾讯高级工程师  可观测性分析平台作为云原生时代微服务系统基础组件,开放性与性能是决定平台价值的核心要素。 复杂微服务应用场景与海量多维链路数据,对可观测性分析平台在开放性设计和各环节高性能实现带来诸多挑战。 本次分享中将重点梳理腾讯云微服务团队在构建云原生可观测性分析平台过程中遇到的挑战,介绍我们在架构设计与实现方面的探索与实践。\n 云原生时代微服务可观测性平台面临的性能与可用性挑战 腾讯云在构建高性能微服务可观测性分析平台的探索与实践 微服务可观测性分析平台架构的下一阶段演进方向展望  BanyanDB 数据模型背后的逻辑  高洪涛 Hongtao Gao. Tetrate SRE, SkyWalking PMC, Apache ShardingSphere PMC.  BanyanDB 作为为处理 Apache SkyWalking 产生的 trace,log 和 metric 的数据而特别设计的数据库,其背后数据模型的抉择是非常与众不同的。 在本次分享中,我将根据 RUM 猜想来讨论为什么 BanyanDB 使用的数据模型对于 APM 数据而言是更加高效和可靠的。\n通过本次分享,观众可以:\n 理解数据库设计的取舍 了解 BanyanDB 的数据模型 认识到该模型对于 APM 类数据有特定的优势  Apache SkyWalking 如何做前端监控  范秋霞 Qiuxia Fan,Tetrate FE SRE,SkyWalking PMC.  Apache SkyWalking 对前端进行了监控与跟踪,分别有 Metric, Log, Trace 三部分。本次分享我会介绍页面性能指标的收集与计算,同时用案列进行分析,也会讲解 Log 的采集方法以及 Source Map 错误定位的实施。最后介绍浏览器端 Requets 的跟踪方法。\n通过本次分享,观众可以:\n 了解页面的性能指标以及收集计算方法 了解前端如何做错误日志收集 如何对页面请求进行跟踪以及跟踪的好处  一名普通工程师,该如何正确的理解开源精神?  王晔倞 Yeliang Wang. API7 Partner / Product VP.  开源精神,那也许是一种给于和获取的平衡,有给于才能有获取,有获取才会有给于的动力。无需指责别人只会获取,我们应该懂得开源是一种创造方式,一个没有创造欲和创造力的人加入开源也是无用的。\n通过本次分享,观众可以:\n 为什么国内一些程序员会对开源产生误解? 了解 “开源≠自由≠非商业” 的来龙去脉。 一名普通工程师,如何高效地向开源社区做贡献?  可观测性技术生态和 OpenTelemetry 原理及实践  陈一枭 腾讯. OpenTelemetry docs-cn maintainer、Tencent OpenTelemetry OTeam 创始人  综述云原生可观测性技术生态,介绍 OpenTracing,OpenMetrics,OpenTelemetry 等标准演进。介绍 OpenTelemetry 存在价值意义,介绍 OpenTelemetry 原理及其整体生态规划。介绍腾讯在 OpenTelemetry 方面的实践。\n本次分享内容如下:\n 云原生可观测性技术简介 OpenTelemetry 及其它规范简介 OpenTelemetry 原理 OpenTelemetry 在腾讯的应用及实践  Apache SkyWalking 事件采集系统更快定位故障  柯振旭 Zhenxu Ke,Tetrate SRE, Apache SkyWalking PMC. Apache Incubator PMC. Apache Dubbo committer.  通过本次分享,听众可以:\n 了解 SkyWalking 的事件采集系统; 了解上报事件至 SkyWalking 的多种方式; 学习如何利用 SkyWalking 采集的事件结合 metrics,分析目标系统的性能问题;  可观测性自动注入技术原理探索与实践  詹启新 Tencnet OpenTelemetry Oteam PMC  在可观测领域中自动注入已经成为重要的组成部分之一,其优异简便的使用方式并且可同时覆盖到链路、指标、日志,大大降低了接入成本及运维成本,属于友好的一种接入方式; 本次分享将介绍 Java 中的字节码注入技术原理,及在可观测领域的应用实践\n 常用的自动注入技术原理简介 介绍可观测性在 Java 落地的要点 opentelemetry-java-instrumentation 的核心原理及实现 opentelemetry 自动注入的应用实践  如何利用 Apache APISIX 提升 Nginx 的可观测性  金卫 Wei Jin, API7 Engineer Apache SkyWalking committer. Apache apisix-ingress-controller Founder. Apache APISIX PMC.  在云原生时代,动态和可观测性是 API 网关的标准特性。Apache APISIX 不仅覆盖了 Nginx 的传统功能,在可观测性上也和 SkyWalking 深度合作,大大提升了服务治理能力。本次分享会介绍如何无痛的提升 Nginx 的可观测性和 APISIX 在未来可观测性方面的规划。\n通过本次分享,观众可以:\n 通过 Apache APISIX 实现观测性的几种手段. 了解 Apache APISIX 高效且易用的秘诀. 结合 Apache skywalking 进一步提升可观测性.  ","title":"[视频] SkyWalking Day 2021 演讲视频","url":"/zh/skywalking-day-2021/"},{"content":"SkyWalking CLI 0.7.0 is released. Go to downloads page to find release tars.\n  Features\n Add GitHub Action for integration of event reporter    Bug Fixes\n Fix metrics top can\u0026rsquo;t infer the scope automatically    Chores\n Upgrade dependency crypto Refactor project to use goapi Move parseScope to pkg Update release doc    ","title":"Release Apache SkyWalking CLI 0.7.0","url":"/events/release-apache-skywalking-cli-0-7-0/"},{"content":"SkyWalking 8.6.0 is released. Go to downloads page to find release tars. Changes by Version\nProject  Add OpenSearch as storage option. Upgrade Kubernetes Java client dependency to 11.0. Fix plugin test script error in macOS.  Java Agent  Add trace_segment_ref_limit_per_span configuration mechanism to avoid OOM. Improve GlobalIdGenerator performance. Add an agent plugin to support elasticsearch7. Add jsonrpc4j agent plugin. new options to support multi skywalking cluster use same kafka cluster(plugin.kafka.namespace) resolve agent has no retries if connect kafka cluster failed when bootstrap Add Seata in the component definition. Seata plugin hosts on Seata project. Extended Kafka plugin to properly trace consumers that have topic partitions directly assigned. Support Kafka consumer 2.8.0. Support print SkyWalking context to logs. Add MessageListener enhancement in pulsar plugin. fix a bug that spring-mvc set an error endpoint name if the controller class annotation implements an interface. Add an optional agent plugin to support mybatis. Add spring-cloud-gateway-3.x optional plugin. Add okhttp-4.x plugin. Fix NPE when thrift field is nested in plugin thrift Fix possible NullPointerException in agent\u0026rsquo;s ES plugin. Fix the conversion problem of float type in ConfigInitializer. Fixed part of the dynamic configuration of ConfigurationDiscoveryService that does not take effect under certain circumstances. Introduce method interceptor API v2 Fix ClassCast issue for RequestHolder/ResponseHolder. fixed jdk-threading-plugin memory leak. Optimize multiple field reflection operation in Feign plugin. Fix trace-ignore-plugin TraceIgnorePathPatterns can\u0026rsquo;t set empty value  OAP-Backend  BugFix: filter invalid Envoy access logs whose socket address is empty. Fix K8s monitoring the incorrect metrics calculate. Loop alarm into event system. Support alarm tags. Support WeLink as a channel of alarm notification. Fix: Some defensive codes didn\u0026rsquo;t work in PercentileFunction combine. CVE: fix Jetty vulnerability. https://nvd.nist.gov/vuln/detail/CVE-2019-17638 Fix: MAL function would miss samples name after creating new samples. perf: use iterator.remove() to remove modulesWithoutProvider Support analyzing Envoy TCP access logs and persist error TCP logs. Fix: Envoy error logs are not persisted when no metrics are generated Fix: Memory leakage of low version etcd client. fix-issue Allow multiple definitions as fallback in metadata-service-mapping.yaml file and k8sServiceNameRule. Fix: NPE when configmap has no data. Fix: Dynamic Configuration key slowTraceSegmentThreshold not work Fix: != is not supported in oal when parameters are numbers. Include events of the entity(s) in the alarm. Support native-json format log in kafka-fetcher-plugin. Fix counter misuse in the alarm core. Alarm can\u0026rsquo;t be triggered in time. Events can be configured as alarm source. Make the number of core worker in meter converter thread pool configurable. Add HTTP implementation of logs reporting protocol. Make metrics exporter still work even when storage layer failed. Fix Jetty HTTP TRACE issue, disable HTTP methods except POST. CVE: upgrade snakeyaml to prevent billion laughs attack in dynamic configuration. polish debug logging avoids null value when the segment ignored.  UI  Add logo for kong plugin. Add apisix logo. Refactor js to ts for browser logs and style change. When creating service groups in the topology, it is better if the service names are sorted. Add tooltip for dashboard component. Fix style of endpoint dependency. Support search and visualize alarms with tags. Fix configurations on dashboard. Support to configure the maximum number of displayed items. After changing the durationTime, the topology shows the originally selected group or service. remove the no use maxItemNum for labeled-value metric, etc. Add Azure Functions logo. Support search Endpoint use keyword params in trace view. Add a function which show the statistics infomation during the trace query. Remove the sort button at the column of Type in the trace statistics page. Optimize the APISIX icon in the topology. Implement metrics templates in the topology. Visualize Events on the alarm page. Update duration steps in graphs for Trace and Log.  Documentation  Polish k8s monitoring otel-collector configuration example. Print SkyWalking context to logs configuration example. Update doc about metrics v2 APIs.  All issues and pull requests are here\n","title":"Release Apache SkyWalking APM 8.6.0","url":"/events/release-apache-skywalking-apm-8-6-0/"},{"content":"Abstract Apache SkyWalking hosts SkyWalkingDay Conference 2021 in June 26th, jointly with Tencent and Tetrate.\nWe are going to share SkyWalking\u0026rsquo;s roadmap, features, product experiences and open source culture.\nWelcome to join us.\nVenue Addr./地址 北京市海淀区西格玛大厦B1多功能厅\nDate June 26th.\nRegistration For Free Register for onsite or online\nSessions 10:00 - 10:20 Apache SkyWalking Landscape  吴晟 Sheng Wu. Tetrate Founding Engineer, Apache Software Foundation board director. SkyWalking founder.  SkyWalking 2020-2021年发展和后续计划\n10:20 - 10:50 微服务可观测性分析平台的探索与实践  凌若川 腾讯高级工程师  可观测性分析平台作为云原生时代微服务系统基础组件,开放性与性能是决定平台价值的核心要素。 复杂微服务应用场景与海量多维链路数据,对可观测性分析平台在开放性设计和各环节高性能实现带来诸多挑战。 本次分享中将重点梳理腾讯云微服务团队在构建云原生可观测性分析平台过程中遇到的挑战,介绍我们在架构设计与实现方面的探索与实践。\n 云原生时代微服务可观测性平台面临的性能与可用性挑战 腾讯云在构建高性能微服务可观测性分析平台的探索与实践 微服务可观测性分析平台架构的下一阶段演进方向展望  10:50 - 11:20 BanyanDB数据模型背后的逻辑  高洪涛 Hongtao Gao. Tetrate SRE, SkyWalking PMC, Apache ShardingSphere PMC.  BanyanDB作为为处理Apache SkyWalking产生的trace,log和metric的数据而特别设计的数据库,其背后数据模型的抉择是非常与众不同的。 在本次分享中,我将根据RUM猜想来讨论为什么BanyanDB使用的数据模型对于APM数据而言是更加高效和可靠的。\n通过本次分享,观众可以:\n 理解数据库设计的取舍 了解BanyanDB的数据模型 认识到该模型对于APM类数据有特定的优势  11:20 - 11:50 Apache SkyWalking 如何做前端监控  范秋霞 Qiuxia Fan,Tetrate FE SRE,SkyWalking PMC.  Apache SkyWalking对前端进行了监控与跟踪,分别有Metric, Log, Trace三部分。本次分享我会介绍页面性能指标的收集与计算,同时用案列进行分析,也会讲解Log的采集方法以及Source Map错误定位的实施。最后介绍浏览器端Requets的跟踪方法。\n通过本次分享,观众可以:\n 了解页面的性能指标以及收集计算方法 了解前端如何做错误日志收集 如何对页面请求进行跟踪以及跟踪的好处  午休 13:30 - 14:00 一名普通工程师,该如何正确的理解开源精神?  王晔倞 Yeliang Wang. API7 Partner / Product VP.  开源精神,那也许是一种给于和获取的平衡,有给于才能有获取,有获取才会有给于的动力。无需指责别人只会获取,我们应该懂得开源是一种创造方式,一个没有创造欲和创造力的人加入开源也是无用的。\n通过本次分享,观众可以:\n 为什么国内一些程序员会对开源产生误解? 了解 “开源≠自由≠非商业” 的来龙去脉。 一名普通工程师,如何高效地向开源社区做贡献?  14:00 - 14:30 可观测性技术生态和OpenTelemetry原理及实践  陈一枭 腾讯. OpenTelemetry docs-cn maintainer、Tencent OpenTelemetry OTeam创始人  综述云原生可观测性技术生态,介绍OpenTracing,OpenMetrics,OpenTelemetry等标准演进。介绍OpenTelemetry存在价值意义,介绍OpenTelemetry原理及其整体生态规划。介绍腾讯在OpenTelemetry方面的实践。\n本次分享内容如下:\n 云原生可观测性技术简介 OpenTelemetry及其它规范简介 OpenTelemetry原理 OpenTelemetry在腾讯的应用及实践  14:30 - 15:10 利用 Apache SkyWalking 事件采集系统更快定位故障  柯振旭 Zhenxu Ke,Tetrate SRE, Apache SkyWalking PMC. Apache Incubator PMC. Apache Dubbo committer.  通过本次分享,听众可以:\n 了解 SkyWalking 的事件采集系统; 了解上报事件至 SkyWalking 的多种方式; 学习如何利用 SkyWalking 采集的事件结合 metrics,分析目标系统的性能问题;  15:10 - 15:30 茶歇 15:30 - 16:00 可观测性自动注入技术原理探索与实践  詹启新 Tencnet OpenTelemetry Oteam PMC  在可观测领域中自动注入已经成为重要的组成部分之一,其优异简便的使用方式并且可同时覆盖到链路、指标、日志,大大降低了接入成本及运维成本,属于友好的一种接入方式; 本次分享将介绍Java中的字节码注入技术原理,及在可观测领域的应用实践\n 常用的自动注入技术原理简介 介绍可观测性在Java落地的要点 opentelemetry-java-instrumentation的核心原理及实现 opentelemetry自动注入的应用实践  16:00 - 16:30 如何利用 Apache APISIX 提升 Nginx 的可观测性  金卫 Wei Jin, API7 Engineer Apache SkyWalking committer. Apache apisix-ingress-controller Founder. Apache APISIX PMC.  在云原生时代,动态和可观测性是 API 网关的标准特性。Apache APISIX 不仅覆盖了 Nginx 的传统功能,在可观测性上也和 SkyWalking 深度合作,大大提升了服务治理能力。本次分享会介绍如何无痛的提升 Nginx 的可观测性和 APISIX 在未来可观测性方面的规划。\n通过本次分享,观众可以:\n 通过 Apache APISIX 实现观测性的几种手段. 了解 Apache APISIX 高效且易用的秘诀. 结合 Apache skywalking 进一步提升可观测性.  16:35 抽奖,结束 Sponsors  Tencent Tetrate SegmentFault 思否  Anti-harassment policy SkyWalkingDay is dedicated to providing a harassment-free experience for everyone. We do not tolerate harassment of participants in any form. Sexual language and imagery will also not be tolerated in any event venue. Participants violating these rules may be sanctioned or expelled without a refund, at the discretion of the event organizers. Our anti-harassment policy can be found at Apache website.\nContact Us Send mail to dev@skywalking.apache.org.\n","title":"SkyWalkingDay Conference 2021, relocating at Beijing","url":"/events/skywalkingday-2021/"},{"content":"SkyWalking NodeJS 0.3.0 is released. Go to downloads page to find release tars.\n Add ioredis plugin. (#53) Endpoint cold start detection and marking. (#52) Add mysql2 plugin. (#54) Add AzureHttpTriggerPlugin. (#51) Add Node 15 into test matrix. (#45) Segment reference and reporting overhaul. (#50) Add http ignore by method. (#49) Add secure connection option. (#48) BugFix: wrong context during many async spans. (#46) Add Node Mongoose Plugin. (#44)  ","title":"Release Apache SkyWalking for NodeJS 0.3.0","url":"/events/release-apache-skywalking-nodejs-0-3-0/"},{"content":"SkyWalking Client JS 0.5.1 is released. Go to downloads page to find release tars.\n Add noTraceOrigins option. Fix wrong URL when using relative path. Catch frames errors. Get response.body as a stream with the fetch API. Support reporting multiple logs. Support typescript project.  ","title":"Release Apache SkyWalking Client JS 0.5.1","url":"/events/release-apache-skywalking-client-js-0-5-1/"},{"content":"SkyWalking Kong Agent 0.1.1 is released. Go to downloads page to find release tars.\n Establish the SkyWalking Kong Agent.  ","title":"Release Apache SkyWalking Kong 0.1.1","url":"/events/release-apache-skywalking-kong-0-1-1/"},{"content":"B站视频地址\n","title":"[视频] 大咖说开源 第二季 第4期 | Apache软件基金会20年","url":"/zh/2021-05-09-summer-2021-asf20/"},{"content":"We posted our Response to Elastic 2021 License Change blog 4 months ago. It doesn\u0026rsquo;t have a big impact in the short term, but because of the incompatibility between SSPL and Apache 2.0, we lost the chance of upgrading the storage server, which concerns the community and our users. So, we have to keep looking for a new option as a replacement.\nThere was an open source project, Open Distro for Elasticsearch, maintained by the AWS team. It is an Apache 2.0-licensed distribution of Elasticsearch enhanced with enterprise security, alerting, SQL, and more. After Elastic relicensed its projects, we talked with their team, and they have an agenda to take over the community leadship and keep maintaining Elasticsearch, as it was licensed by Apache 2.0. So, they are good to fork and continue.\nOn April 12th, 2021, AWS announced the new project, OpenSearch, driven by the community, which is initialized from people of AWS, Red Hat, SAP, Capital One, and Logz.io. Read this Introducing OpenSearch blog for more detail.\nOnce we had this news in public, we begin to plan the process of evaluating and testing OpenSearch as SkyWalking\u0026rsquo;s storage option. Read our issue.\nToday, we are glad to ANNOUNCE, OpenSearch could replace ElastcSearch as the storage, and it is still licensed under Apache 2.0.\nThis has been merged in the main stream, and you can find it in the dev doc already.\nOpenSearch OpenSearch storage shares the same configurations as Elasticsearch 7. In order to activate Elasticsearch 7 as storage, set storage provider to elasticsearch7. Please download the apache-skywalking-bin-es7.tar.gz if you want to use OpenSearch as storage.\nSkyWalking community will keep our eyes on the OpenSearch project, and look forward to their first GA release.\n NOTE: we have to add a warning NOTICE to the Elasticsearch storage doc:\nNOTICE: Elastic announced through their blog that Elasticsearch will be moving over to a Server Side Public License (SSPL), which is incompatible with Apache License 2.0. This license change is effective from Elasticsearch version 7.11. So please choose the suitable Elasticsearch version according to your usage.\n","title":"OpenSearch, a new storage option to avoid ElasticSearch's SSPL","url":"/blog/2021-05-09-opensearch-supported/"},{"content":"Hailin Wang(GitHub ID, hailin0) began his SkyWalking journey since Aug 23rd, 2020.\nHe is very active on the code contributions and brought several important features into the SkyWalking ecosystem.\nHe is on the 33rd of the contributor in the main repository[1], focuses on plugin contributions, and logs ecosystem integration, see his code contributions[2]. And also, he started a new and better way to make other open-source projects integrating with SkyWalking.\nHe used over 2 months to make the SkyWalking agent and its plugins as a part of Apache DolphinScheduler\u0026rsquo;s default binary distribution[3], see this PR[4]. This kind of example has affected further community development. Our PMC member, Yuguang Zhao, is using this way to ship our agent and plugins into the Seata project[5]. With SkyWalking\u0026rsquo;s growing, I would not doubt that this kind of integration would be more.\nThe SkyWalking accepts him as a new committer.\nWelcome Hailin Wang join the committer team.\n[1] https://github.com/apache/skywalking/graphs/contributors [2] https://github.com/apache/skywalking/commits?author=hailin0 [3] https://github.com/apache/dolphinscheduler/tree/1.3.6-prepare/ext/skywalking [4] https://github.com/apache/incubator-dolphinscheduler/pull/4852 [5] https://github.com/seata/seata/pull/3652\n","title":"Welcome Hailin Wang as new committer","url":"/events/welcome-hailin-wang-as-new-committer/"},{"content":"SkyWalking LUA Nginx 0.5.0 is released. Go to downloads page to find release tars.\n Adapt to Kong agent. Correct the version format luarock.  ","title":"Release Apache SkyWalking LUA Nginx 0.5.0","url":"/events/release-apache-skywalking-lua-nginx-0.5.0/"},{"content":"SkyWalking 8.5.0 is released. Go to downloads page to find release tars. Changes by Version\nProject  Incompatible Change. Indices and templates of ElasticSearch(6/7, including zipkin-elasticsearch7) storage option have been changed. Update frontend-maven-plugin to 1.11.0, for Download node x64 binary on Apple Silicon. Add E2E test for VM monitoring that metrics from Prometheus node-exporter. Upgrade lombok to 1.18.16. Add Java agent Dockerfile to build Docker image for Java agent.  Java Agent  Remove invalid mysql configuration in agent.config. Add net.bytebuddy.agent.builder.AgentBuilder.RedefinitionStrategy.Listener to show detail message when redefine errors occur. Fix ClassCastException of log4j gRPC reporter. Fix NPE when Kafka reporter activated. Enhance gRPC log appender to allow layout pattern. Fix apm-dubbo-2.7.x-plugin memory leak due to some Dubbo RpcExceptions. Fix lettuce-5.x-plugin get null host in redis sentinel mode. Fix ClassCastException by making CallbackAdapterInterceptor to implement EnhancedInstance interface in the spring-kafka plugin. Fix NullPointerException with KafkaProducer.send(record). Support config agent.span_limit_per_segment can be changed in the runtime. Collect and report agent starting / shutdown events. Support jedis pipeline in jedis-2.x-plugin. Fix apm-toolkit-log4j-2.x-activation no trace Id in async log. Replace hbase-1.x-plugin with hbase-1.x-2.x-plugin to adapt hbase client 2.x Remove the close_before_method and close_after_method parameters of custom-enhance-plugin to avoid memory leaks. Fix bug that springmvc-annotation-4.x-plugin, witness class does not exist in some versions. Add Redis command parameters to \u0026lsquo;db.statement\u0026rsquo; field on Lettuce span UI for displaying more info. Fix NullPointerException with ReactiveRequestHolder.getHeaders. Fix springmvc reactive api can\u0026rsquo;t collect HTTP statusCode. Fix bug that asynchttpclient plugin does not record the response status code. Fix spanLayer is null in optional plugin(gateway-2.0.x-plugin gateway-2.1.x-plugin). Support @Trace, @Tag and @Tags work for static methods.  OAP-Backend  Allow user-defined JAVA_OPTS in the startup script. Metrics combination API supports abandoning results. Add a new concept \u0026ldquo;Event\u0026rdquo; and its implementations to collect events. Add some defensive codes for NPE and bump up Kubernetes client version to expose exception stack trace. Update the timestamp field type for LogQuery. Support Zabbix protocol to receive agent metrics. Update the Apdex metric combine calculator. Enhance MeterSystem to allow creating metrics with same metricName / function / scope. Storage plugin supports postgresql. Fix kubernetes.client.openapi.ApiException. Remove filename suffix in the meter active file config. Introduce log analysis language (LAL). Fix alarm httpclient connection leak. Add sum function in meter system. Remove Jaeger receiver. Remove the experimental Zipkin span analyzer. Upgrade the Zipkin Elasticsearch storage from 6 to 7. Require Zipkin receiver must work with zipkin-elasticsearch7 storage option. Fix DatabaseSlowStatementBuilder statement maybe null. Remove fields of parent entity in the relation sources. Save Envoy http access logs when error occurs. Fix wrong service_instance_sla setting in the topology-instance.yml. Fix wrong metrics name setting in the self-observability.yml. Add telemetry data about metrics in, metrics scraping, mesh error and trace in metrics to zipkin receiver. Fix tags store of log and trace on h2/mysql/pg storage. Merge indices by Metrics Function and Meter Function in Elasticsearch Storage. Fix receiver don\u0026rsquo;t need to get itself when healthCheck Remove group concept from AvgHistogramFunction. Heatmap(function result) doesn\u0026rsquo;t support labels. Support metrics grouped by scope labelValue in MAL, no need global same labelValue as before. Add functions in MAL to filter metrics according to the metric value. Optimize the self monitoring grafana dashboard. Enhance the export service. Add function retagByK8sMeta and opt type K8sRetagType.Pod2Service in MAL for k8s to relate pods and services. Using \u0026ldquo;service.istio.io/canonical-name\u0026rdquo; to replace \u0026ldquo;app\u0026rdquo; label to resolve Envoy ALS service name. Support k8s monitoring. Make the flushing metrics operation concurrent. Fix ALS K8SServiceRegistry didn\u0026rsquo;t remove the correct entry. Using \u0026ldquo;service.istio.io/canonical-name\u0026rdquo; to replace \u0026ldquo;app\u0026rdquo; label to resolve Envoy ALS service name. Append the root slash(/) to getIndex and getTemplate requests in ES(6 and 7) client. Fix disable statement not working. This bug exists since 8.0.0. Remove the useless metric in vm.yaml.  UI  Update selector scroller to show in all pages. Implement searching logs with date. Add nodejs 14 compiling. Fix trace id by clear search conditions. Search endpoints with keywords. Fix pageSize on logs page. Update echarts version to 5.0.2. Fix instance dependency on the topology page. Fix resolved url for vue-property-decorator. Show instance attributes. Copywriting grammar fix. Fix log pages tags column not updated. Fix the problem that the footer and topology group is shaded when the topology radiation is displayed. When the topology radiation chart is displayed, the corresponding button should be highlighted. Refactor the route mapping, Dynamically import routing components, Improve first page loading performance. Support topology of two mutually calling services. Implement a type of table chart in the dashboard. Support event in the dashboard. Show instance name in the trace view. Fix groups of services in the topography.  Documentation  Polish documentation due to we have covered all tracing, logging, and metrics fields. Adjust documentation about Zipkin receiver. Add backend-infrastructure-monitoring doc.  All issues and pull requests are here\n","title":"Release Apache SkyWalking APM 8.5.0","url":"/events/release-apache-skywalking-apm-8-5-0/"},{"content":"SkyWalking Cloud on Kubernetes 0.3.0 is released. Go to downloads page to find release tars.\n Support special characters in the metric selector of HPA metric adapter. Add the namespace to HPA metric name.  ","title":"Release Apache SkyWalking Cloud on Kubernetes 0.3.0","url":"/events/release-apache-skywalking-cloud-on-kubernetes-0-3-0/"},{"content":"SkyWalking NodeJS 0.2.0 is released. Go to downloads page to find release tars.\n Add AMQPLib plugin (RabbitMQ). (#34) Add MongoDB plugin. (#33) Add PgPlugin - PosgreSQL. (#31) Add MySQLPlugin to plugins. (#30) Add http protocol of host to http plugins. (#28) Add tag http.method to plugins. (#26) Bugfix: child spans created on immediate cb from op. (#41) Bugfix: async and preparing child entry/exit. (#36) Bugfix: tsc error of dist lib. (#24) Bugfix: AxiosPlugin async() / resync(). (#21) Bugfix: some requests of express / axios are not close correctly. (#20) Express plugin uses http wrap explicitly if http plugin disabled. (#42)  ","title":"Release Apache SkyWalking for NodeJS 0.2.0","url":"/events/release-apache-skywalking-nodejs-0-2-0/"},{"content":"SkyWalking Python 0.6.0 is released. Go to downloads page to find release tars.\n Fixes:  Segment data loss when gRPC timing out. (#116) sw_tornado plugin async handler status set correctly. (#115) sw_pymysql error when connection haven\u0026rsquo;t db. (#113)    ","title":"Release Apache SkyWalking Python 0.6.0","url":"/events/release-apache-skywalking-python-0-6-0/"},{"content":" Origin: End-User Tracing in a SkyWalking-Observed Browser - The New Stack\n Apache SkyWalking: an APM (application performance monitor) system, especially designed for microservices, cloud native, and container-based (Docker, Kubernetes, Mesos) architectures.\nskywalking-client-js: a lightweight client-side JavaScript exception, performance, and tracing library. It provides metrics and error collection to the SkyWalking backend. It also makes the browser the starting point for distributed tracing.\nBackground Web application performance affects the retention rate of users. If a page load time is too long, the user will give up. So we need to monitor the web application to understand performance and ensure that servers are stable, available and healthy. SkyWalking is an APM tool and the skywalking-client-js extends its monitoring to include the browser, providing performance metrics and error collection to the SkyWalking backend.\nPerformance Metrics The skywalking-client-js uses [window.performance] (https://developer.mozilla.org/en-US/docs/Web/API/Window/performance) for performance data collection. From the MDN doc, the performance interface provides access to performance-related information for the current page. It\u0026rsquo;s part of the High Resolution Time API, but is enhanced by the Performance Timeline API, the Navigation Timing API, the User Timing API, and the Resource Timing API. In skywalking-client-js, all performance metrics are calculated according to the Navigation Timing API defined in the W3C specification. We can get a PerformanceTiming object describing our page using the window.performance.timing property. The PerformanceTiming interface contains properties that offer performance timing information for various events that occur during the loading and use of the current page.\nWe can better understand these attributes when we see them together in the figure below from W3C:\nThe following table contains performance metrics in skywalking-client-js.\n   Metrics Name Describe Calculating Formulae Note     redirectTime Page redirection time redirectEnd - redirectStart If the current document and the document that is redirected to are not from the same origin, set redirectStart, redirectEnd to 0   ttfbTime Time to First Byte responseStart - requestStart According to Google Development   dnsTime Time to DNS query domainLookupEnd - domainLookupStart    tcpTime Time to TCP link connectEnd - connectStart    transTime Time to content transfer responseEnd - responseStart    sslTime Time to SSL secure connection connectEnd - secureConnectionStart Only supports HTTPS   resTime Time to resource loading loadEventStart - domContentLoadedEventEnd Represents a synchronized load resource in pages   fmpTime Time to First Meaningful Paint - Listen for changes in page elements. Traverse each new element, and calculate the total score of these elements. If the element is visible, the score is 1 * weight; if the element is not visible, the score is 0   domAnalysisTime Time to DOM analysis domInteractive - responseEnd    fptTime First Paint Time responseEnd - fetchStart    domReadyTime Time to DOM ready domContentLoadedEventEnd - fetchStart    loadPageTime Page full load time loadEventStart - fetchStart    ttlTime Time to interact domInteractive - fetchStart    firstPackTime Time to first package responseStart - domainLookupStart     Skywalking-client-js collects those performance metrics and sends them to the OAP (Observability Analysis Platform) server , which aggregates data on the back-end side that is then shown in visualizations on the UI side. Users can optimize the page according to these data.\nException Metrics There are five kinds of errors that can be caught in skywalking-client-js:\n The resource loading error is captured by window.addeventlistener ('error ', callback, true) window.onerror catches JS execution errors window.addEventListener('unhandledrejection', callback) is used to catch the promise errors the Vue errors are captured by Vue.config.errorHandler the Ajax errors are captured by addEventListener('error', callback); addEventListener('abort', callback); addEventListener('timeout', callback);  in send callback.  The Skywalking-client-js traces error data to the OAP server, finally visualizing data on the UI side. For an error overview of the App, there are several metrics for basic statistics and trends of errors, including the following metrics.\n App Error Count, the total number of errors in the selected time period. App JS Error Rate, the proportion of PV with JS errors in a selected time period to total PV. All of Apps Error Count, Top N Apps error count ranking. All of Apps JS Error Rate, Top N Apps JS error rate ranking. Error Count of Versions in the Selected App, Top N Error Count of Versions in the Selected App ranking. Error Rate of Versions in the Selected App, Top N JS Error Rate of Versions in the Selected App ranking. Error Count of the Selected App, Top N Error Count of the Selected App ranking. Error Rate of the Selected App, Top N JS Error Rate of the Selected App ranking.  For pages, we use several metrics for basic statistics and trends of errors, including the following metrics:\n Top Unstable Pages / Error Rate, Top N Error Count pages of the Selected version ranking. Top Unstable Pages / Error Count, Top N Error Count pages of the Selected version ranking. Page Error Count Layout, data display of different errors in a period of time.  User Metrics SkyWalking browser monitoring also provides metrics about how the visitors use the monitored websites, such as PV(page views), UV(unique visitors), top N PV(page views), etc.\nIn SPAs (single page applications), the page will be refreshed only once. The traditional method only reports PV once after the page loading, but cannot count the PV of each sub-page, and can\u0026rsquo;t make other types of logs aggregate by sub-page.\nSkyWalking browser monitoring provides two processing methods for SPA pages:\n  Enable SPA automatic parsing. This method is suitable for most single page application scenarios with URL hash as the route. In the initialized configuration item, set enableSPA to true, which will turn on the page\u0026rsquo;s hashchange event listener (trigger re reporting PV), and use URL hash as the page field in other data reporting.\n  Manual reporting. This method can be used in all single page application scenarios. This method can be used if the first method is not usable. The following example provides a set page method to manually update the page name when data is reported. When this method is called, the page PV will be re reported by default:\n  app.on(\u0026#39;routeChange\u0026#39;, function (to) { ClientMonitor.setPerformance({ collector: \u0026#39;http://127.0.0.1:8080\u0026#39;, service: \u0026#39;browser-app\u0026#39;, serviceVersion: \u0026#39;1.0.0\u0026#39;, pagePath: to.path, autoTracePerf: true, enableSPA: true, }); }); Let\u0026rsquo;s take a look at the result found in the following image. It shows the most popular applications and versions, and the changes of PV over a period of time.\nMake the browser the starting point for distributed tracing SkyWalking browser monitoring intercepts HTTP requests to trace segments and spans. It supports tracking these following modes of HTTP requests: XMLHttpRequest and fetch. It also supports tracking libraries and tools based on XMLHttpRequest and fetch - such as Axios, SuperAgent, OpenApi, and so on.\nLet’s see how the SkyWalking browser monitoring intercepts HTTP requests:\nAfter this, use window.addEventListener('xhrReadyStateChange', callback) and set the readyState value tosw8 = xxxx in the request header. At the same time, reporting requests information to the back-end side. Finally, we can view trace data on the trace page. The following graphic is from the trace page:\nTo see how we listen for fetch requests, let’s see the source code of fetch\nAs you can see, it creates a promise and a new XMLHttpRequest object. Because the code of the fetch is built into the browser, it must monitor the code execution first. Therefore, when we add listening events, we can\u0026rsquo;t monitor the code in the fetch. Just after monitoring the code execution, let\u0026rsquo;s rewrite the fetch:\nimport { fetch } from \u0026#39;whatwg-fetch\u0026#39;; window.fetch = fetch; In this way, we can intercept the fetch request through the above method.\nAdditional Resources  End-User Tracing in a SkyWalking-Observed Browser.  ","title":"End-User Tracing in a SkyWalking-Observed Browser","url":"/blog/end-user-tracing-in-a-skywalking-observed-browser/"},{"content":"SourceMarker is an open-source continuous feedback IDE plugin built on top of Apache SkyWalking, a popular open-source APM system with monitoring, tracing, and diagnosing capabilities for distributed software systems. SkyWalking, a truly holistic system, provides the means for automatically producing, storing, and querying software operation metrics. It requires little to no code changes to implement and is lightweight enough to be used in production. By itself, SkyWalking is a formidable force in the realm of continuous monitoring technology.\nSourceMarker, leveraging the continuous monitoring functionality provided by SkyWalking, creates continuous feedback technology by automatically linking software operation metrics to source code and displaying feedback directly inside of the IDE. While currently only supporting JetBrains-based IDEs and JVM-based programming languages, SourceMarker may be extended to support any number of programming languages and IDEs. Using SourceMarker, software developers can understand and validate software operation inside of their IDE. Instead of charts that indicate the health of the application, software developers can view the health of individual source code components and interpret software operation metrics from a much more familiar perspective. Such capabilities improve productivity as time spent continuously context switching from development to monitoring would be eliminated.\nLogging The benefits of continuous feedback technology are immediately apparent with the ability to view and search logs directly from source code. Instead of tailing log files or viewing logs through the browser, SourceMarker allows software developers to navigate production logs just as easily as they navigate source code. By using the source code as the primary perspective for navigating logs, SourceMarker allows software developers to view logs specific to any package, class, method, or line directly from the context of the source code which resulted in those logs.\nTracing Furthermore, continuous feedback technology offers software developers a deeper understanding of software by explicitly tying the implicit software operation to source code. Instead of visualizing software traces as Gantt charts, SourceMarker allows software developers to step through trace stacks while automatically resolving trace tags and logs. With SourceMarker, software developers can navigate production software traces in much the same way one debugs local applications.\nAlerting Most importantly, continuous feedback technology keeps software developers aware of production software operation. Armed with an APM-powered IDE, every software developer can keep track of the behavior of any method, class, package, and even the entire application itself. Moreover, this allows for source code to be the medium through which production bugs are made evident, thereby creating the feasibility of source code with the ability to self-diagnose and convey its own health.\n Download SourceMarker SourceMarker aims to bridge the theoretical and empirical practices of software development through continuous feedback. The goal is to make developing software with empirical data feel natural and intuitive, creating more complete software developers that understand the entire software development cycle.\n https://github.com/sourceplusplus/sourcemarker  This project is still early in its development, so if you think of any ways to improve SourceMarker, please let us know.\n","title":"SourceMarker: Continuous Feedback for Developers","url":"/blog/2021-03-16-continuous-feedback/"},{"content":"SkyWalking LUA Nginx 0.4.1 is released. Go to downloads page to find release tars.\n fix: missing constants in the rockspsec.  ","title":"Release Apache SkyWalking LUA Nginx 0.4.1","url":"/events/release-apache-skywalking-lua-nginx-0.4.1/"},{"content":"SkyWalking LUA Nginx 0.4.0 is released. Go to downloads page to find release tars.\n Add a global field \u0026lsquo;includeHostInEntrySpan\u0026rsquo;, type \u0026lsquo;boolean\u0026rsquo;, mark the entrySpan include host/domain. Add destroyBackendTimer to stop reporting metrics. Doc: set random seed in init_worker phase. Local cache some variables and reuse them in Lua module. Enable local cache and use tablepool to reuse the temporary table.  ","title":"Release Apache SkyWalking LUA Nginx 0.4.0","url":"/events/release-apache-skywalking-lua-nginx-0.4.0/"},{"content":"SkyWalking Client JS 0.4.0 is released. Go to downloads page to find release tars.\n Update stack and message in logs. Fix wrong URL when using relative path in xhr.  ","title":"Release Apache SkyWalking Client JS 0.4.0","url":"/events/release-apache-skywalking-client-js-0-4-0/"},{"content":"SkyWalking Satellite 0.1.0 is released. Go to downloads page to find release tars.\nFeatures  Build the Satellite core structure. Add prometheus self telemetry. Add kafka client plugin. Add none-fallbacker plugin. Add timer-fallbacker plugin. Add nativelog-kafka-forwarder plugin. Add memory-queue plugin. Add mmap-queue plugin. Add grpc-nativelog-receiver plugin. Add http-nativelog-receiver plugin. Add grpc-server plugin. Add http-server plugin. Add prometheus-server plugin.  Bug Fixes Issues and PR  All issues are here All and pull requests are here  ","title":"Release Apache SkyWalking Satellite 0.1.0","url":"/events/release-apache-skwaylking-satellite-0-1-0/"},{"content":"Juntao Zhang leads and finished the re-build process of the whole skywalking website. Immigrate to the whole automatic website update, super friendly to users. Within the re-building process, he took several months contributions to bring the document of our main repository to host on the SkyWalking website, which is also available for host documentations of other repositories. We were waiting for this for years.\nJust in the website repository, he has 3800 LOC contributions through 26 commits.\nWe are honored to have him on the PMC team.\n","title":"Welcome Juntao Zhang (张峻滔) to join the PMC","url":"/events/welcome-juntao-zhang-to-join-the-pmc/"},{"content":" Origin: Observe VM Service Meshes with Apache SkyWalking and the Envoy Access Log Service - The New Stack\n Apache SkyWalking: an APM (application performance monitor) system, especially designed for microservices, cloud native, and container-based (Docker, Kubernetes, Mesos) architectures.\nEnvoy Access Log Service: Access Log Service (ALS) is an Envoy extension that emits detailed access logs of all requests going through Envoy.\nBackground In the previous post, we talked about the observability of service mesh under Kubernetes environment, and applied it to the bookinfo application in practice. We also mentioned that, in order to map the IP addresses into services, SkyWalking needs access to the service metadata from a Kubernetes cluster, which is not available for services deployed in virtual machines (VMs). In this post, we will introduce a new analyzer in SkyWalking that leverages Envoy’s metadata exchange mechanism to decouple with Kubernetes. The analyzer is designed to work in Kubernetes environments, VM environments, and hybrid environments. If there are virtual machines in your service mesh, you might want to try out this new analyzer for better observability, which we will demonstrate in this tutorial.\nHow it works The mechanism of how the analyzer works is the same as what we discussed in the previous post. What makes VMs different from Kubernetes is that, for VM services, there are no places where we can fetch the metadata to map the IP addresses into services.\nThe basic idea we present in this article is to carry the metadata along with Envoy’s access logs, which is called metadata-exchange mechanism in Envoy. When Istio pilot-agent starts an Envoy proxy as a sidecar of a service, it collects the metadata of that service from the Kubernetes platform, or a file on the VM where that service is deployed, and injects the metadata into the bootstrap configuration of Envoy. Envoy will carry the metadata transparently when emitting access logs to the SkyWalking receiver.\nBut how does Envoy compose a piece of a complete access log that involves the client side and server side? When a request goes out from Envoy, a plugin of istio-proxy named \u0026ldquo;metadata-exchange\u0026rdquo; injects the metadata into the http headers (with a prefix like x-envoy-downstream-), and the metadata is propagated to the server side. The Envoy sidecar of the server side receives the request and parses the headers into metadata, and puts the metadata into the access log, keyed by wasm.downstream_peer. The server side Envoy also puts its own metadata into the access log keyed by wasm.upstream_peer. Hence the two sides of a single request are completed.\nWith the metadata-exchange mechanism, we can use the metadata directly without any extra query.\nExample In this tutorial, we will use another demo application Online Boutique that consists of 10+ services so that we can deploy some of them in VMs and make them communicate with other services deployed in Kubernetes.\nTopology of Online Boutique In order to cover as many cases as possible, we will deploy CheckoutService and PaymentService on VM and all the other services on Kubernetes, so that we can cover the cases like Kubernetes → VM (e.g. Frontend → CheckoutService), VM → Kubernetes (e.g. CheckoutService → ShippingService), and VM → VM ( e.g. CheckoutService → PaymentService).\nNOTE: All the commands used in this tutorial are accessible on GitHub.\ngit clone https://github.com/SkyAPMTest/sw-als-vm-demo-scripts cd sw-als-vm-demo-scripts Make sure to init the gcloud SDK properly before moving on. Modify the GCP_PROJECT in file env.sh to your own project name. Most of the other variables should be OK to work if you keep them intact. If you would like to use ISTIO_VERSION \u0026gt;/= 1.8.0, please make sure this patch is included.\n  Prepare Kubernetes cluster and VM instances 00-create-cluster-and-vms.sh creates a new GKE cluster and 2 VM instances that will be used through the entire tutorial, and sets up some necessary firewall rules for them to communicate with each other.\n  Install Istio and SkyWalking 01a-install-istio.sh installs Istio Operator with spec resources/vmintegration.yaml. In the YAML file, we enable the meshExpansion that supports VM in mesh. We also enable the Envoy access log service and specify the address skywalking-oap.istio-system.svc.cluster.local:11800 to which Envoy emits the access logs. 01b-install-skywalking.sh installs Apache SkyWalking and sets the analyzer to mx-mesh.\n  Create files to initialize the VM 02-create-files-to-transfer-to-vm.sh creates necessary files that will be used to initialize the VMs. 03-copy-work-files-to-vm.sh securely transfers the generated files to the VMs with gcloud scp command. Now use ./ssh.sh checkoutservice and ./ssh.sh paymentservice to log into the two VMs respectively, and cd to the ~/work directory, execute ./prep-checkoutservice.sh on checkoutservice VM instance and ./prep-paymentservice.sh on paymentservice VM instance. The Istio sidecar should be installed and started properly. To verify that, use tail -f /var/logs/istio/istio.log to check the Istio logs. The output should be something like:\n2020-12-12T08:07:07.348329Z\tinfo\tsds\tresource:default new connection 2020-12-12T08:07:07.348401Z\tinfo\tsds\tSkipping waiting for gateway secret 2020-12-12T08:07:07.348401Z\tinfo\tsds\tSkipping waiting for gateway secret 2020-12-12T08:07:07.568676Z\tinfo\tcache\tRoot cert has changed, start rotating root cert for SDS clients 2020-12-12T08:07:07.568718Z\tinfo\tcache\tGenerateSecret default 2020-12-12T08:07:07.569398Z\tinfo\tsds\tresource:default pushed key/cert pair to proxy 2020-12-12T08:07:07.949156Z\tinfo\tcache\tLoaded root cert from certificate ROOTCA 2020-12-12T08:07:07.949348Z\tinfo\tsds\tresource:ROOTCA pushed root cert to proxy 2020-12-12T20:12:07.384782Z\tinfo\tsds\tresource:default pushed key/cert pair to proxy 2020-12-12T20:12:07.384832Z\tinfo\tsds\tDynamic push for secret default The dnsmasq configuration address=/.svc.cluster.local/{ISTIO_SERVICE_IP_STUB} also resolves the domain names ended with .svc.cluster.local to Istio service IP, so that you are able to access the Kubernetes services in the VM by fully qualified domain name (FQDN) such as httpbin.default.svc.cluster.local.\n  Deploy demo application Because we want to deploy CheckoutService and PaymentService manually on VM, resources/google-demo.yaml removes the two services from the original YAML . 04a-deploy-demo-app.sh deploys the other services on Kubernetes. Then log into the 2 VMs, run ~/work/deploy-checkoutservice.sh and ~/work/deploy-paymentservice.sh respectively to deploy CheckoutService and PaymentService.\n  Register VMs to Istio Services on VMs can access the services on Kubernetes by FQDN, but that’s not the case when the Kubernetes services want to talk to the VM services. The mesh has no idea where to forward the requests such as checkoutservice.default.svc.cluster.local because checkoutservice is isolated in the VM. Therefore, we need to register the services to the mesh. 04b-register-vm-with-istio.sh registers the VM services to the mesh by creating a \u0026ldquo;dummy\u0026rdquo; service without running Pods, and a WorkloadEntry to bridge the \u0026ldquo;dummy\u0026rdquo; service with the VM service.\n  Done! The demo application contains a load generator service that performs requests repeatedly. We only need to wait a few seconds, and then open the SkyWalking web UI to check the results.\nexport POD_NAME=$(kubectl get pods --namespace istio-system -l \u0026quot;app=skywalking,release=skywalking,component=ui\u0026quot; -o jsonpath=\u0026quot;{.items[0].metadata.name}\u0026quot;) echo \u0026quot;Visit http://127.0.0.1:8080 to use your application\u0026quot; kubectl port-forward $POD_NAME 8080:8080 --namespace istio-system Navigate the browser to http://localhost:8080 . The metrics, topology should be there.\nTroubleshooting If you face any trouble when walking through the steps, here are some common problems and possible solutions:\n  VM service cannot access Kubernetes services? It’s likely the DNS on the VM doesn’t correctly resolve the fully qualified domain names. Try to verify that with nslookup istiod.istio-system.svc.cluster.local. If it doesn’t resolve to the Kubernetes CIDR address, recheck the step in prep-checkoutservice.sh and prep-paymentservice.sh. If the DNS works correctly, try to verify that Envoy has fetched the upstream clusters from the control plane with curl http://localhost:15000/clusters. If it doesn’t contain the target service, recheck prep-checkoutservice.sh.\n  Services are normal but nothing on SkyWalking WebUI? Check the SkyWalking OAP logs via kubectl -n istio-system logs -f $(kubectl get pod -A -l \u0026quot;app=skywalking,release=skywalking,component=oap\u0026quot; -o name) and WebUI logs via kubectl -n istio-system logs -f $(kubectl get pod -A -l \u0026quot;app=skywalking,release=skywalking,component=ui\u0026quot; -o name) to see whether there are any error logs . Also, make sure the time zone at the bottom-right of the browser is set to UTC +0.\n  Additional Resources  Observe a Service Mesh with Envoy ALS.  ","title":"Observe VM Service Meshes with Apache SkyWalking and the Envoy Access Log Service","url":"/blog/obs-service-mesh-vm-with-sw-and-als/"},{"content":"When using SkyWalking java agent, people usually propagate context easily. They even do not need to change the business code. However, it becomes harder when you want to propagate context between threads when using ThreadPoolExecutor. You can use the RunnableWrapper in the maven artifact org.apache.skywalking:apm-toolkit-trace. This way you must change your code. The developer manager usually don\u0026rsquo;t like this because there may be lots of projects, or lots of runnable code. If they don\u0026rsquo;t use SkyWalking some day, the code added will be superfluous and inelegant.\nIs there a way to propagate context without changing the business code? Yes.\nSkywalking java agent enhances a class by add a field and implement an interface. The ThreadPoolExecutor is a special class that is used widely. We even don\u0026rsquo;t know when and where it is loaded. Most JVMs do not allow changes in the class file format for classes that have been loaded previously. So SkyWalking should not enhance the ThreadPoolExecutor successfully by retransforming when the ThreadPoolExecutor has been loaded. However, we can apply advice to the ThreadPoolExecutor#execute method and wrap the Runnable param using our own agent, then enhance the wrapper class by SkyWalking java agent. An advice do not change the layout of a class.\nNow we should decide how to do this. You can use the RunnableWrapper in the maven artifact org.apache.skywalking:apm-toolkit-trace to wrap the param, but you need to face another problem. This RunnableWrapper has a plugin whose active condition is checking if there is @TraceCrossThread. Agent core uses net.bytebuddy.pool.TypePool.Default.WithLazyResolution.LazyTypeDescription to find the annotations of a class. The LazyTypeDescription finds annotations by using a URLClassLoader with no urls if the classloader is null(bootstrap classloader). So it can not find the @TraceCrossThread class unless you change the LocationStrategy of SkyWalking java agent builder.\nIn this project, I write my own wrapper class, and simply add a plugin with a name match condition. Next, Let me show you how these two agents work together.\n  Move the plugin to the skywalking \u0026ldquo;plugins\u0026rdquo; directory.\n  Add this agent after the SkyWalking agent since the wrapper class should not be loaded before SkyWalking agent instrumentation have finished. For example,\n java -javaagent:/path/to/skywalking-agent.jar -javaagent:/path/to/skywalking-tool-agent-v1.0.0.jar \u0026hellip;\n   When our application runs\n SkyWalking java agent adds a transformer by parsing the plugin for enhancing the wrapper class in the tool agent. The tool agent loads the wrapper class into bootstrap classloader. This triggers the previous transformer. The tool agent applies an advice to the ThreadPoolExecutor class, wrapping the java.lang.Runnable param of \u0026ldquo;execute\u0026rdquo; method with the wrapper class. Now SkyWalking propagates the context with the wrapper class.    Enjoy tracing with ThreadPoolExecutor in SkyWalking!\n","title":"Apache SkyWalking: How to propagate context between threads when using ThreadPoolExecutor","url":"/blog/2021-02-09-skywalking-trace-threadpool/"},{"content":"SkyWalking CLI 0.6.0 is released. Go to downloads page to find release tars.\n  Features\n Support authorization when connecting to the OAP Add install command and manifest sub-command Add event command and report sub-command    Bug Fixes\n Fix the bug that can\u0026rsquo;t query JVM instance metrics    Chores\n Set up a simple test with GitHub Actions Reorganize the project layout Update year in NOTICE Add missing license of swck Use license-eye to check license header    ","title":"Release Apache SkyWalking CLI 0.6.0","url":"/events/release-apache-skywalking-cli-0-6-0/"},{"content":" Origin: Tetrate.io blog\n Background Apache SkyWalking\u0026ndash; the APM tool for distributed systems\u0026ndash; has historically focused on providing observability around tracing and metrics, but service performance is often affected by the host. The newest release, SkyWalking 8.4.0, introduces a new feature for monitoring virtual machines. Users can easily detect possible problems from the dashboard\u0026ndash; for example, when CPU usage is overloaded, when there’s not enough memory or disk space, or when the network status is unhealthy, etc.\nHow it works SkyWalking leverages Prometheus and OpenTelemetry for collecting metrics data as we did for Istio control panel metrics; Prometheus is mature and widely used, and we expect to see increased adoption of the new CNCF project, OpenTelemetry. The SkyWalking OAP Server receives these metrics data of OpenCensus format from OpenTelemetry. The process is as follows:\n Prometheus Node Exporter collects metrics data from the VMs. OpenTelemetry Collector fetches metrics from Node Exporters via Prometheus Receiver, and pushes metrics to SkyWalking OAP Server via the OpenCensus GRPC Exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results. The expression rules are in /config/otel-oc-rules/vm.yaml. We can now see the data on the SkyWalking WebUI dashboard.  What to monitor SkyWalking provides default monitoring metrics including:\n CPU Usage (%) Memory RAM Usage (MB) Memory Swap Usage (MB) CPU Average Used CPU Load Memory RAM (total/available/used MB) Memory Swap (total/free MB) File System Mount point Usage (%) Disk R/W (KB/s) Network Bandwidth Usage (receive/transmit KB/s) Network Status (tcp_curr_estab/tcp_tw/tcp_alloc/sockets_used/udp_inuse) File fd Allocated  The following is how it looks when we monitor Linux:\nHow to use To enable this feature, we need to install Prometheus Node Exporter and OpenTelemetry Collector and activate the VM monitoring rules in SkyWalking OAP Server.\nInstall Prometheus Node Exporter wget https://github.com/prometheus/node_exporter/releases/download/v1.0.1/node_exporter-1.0.1.linux-amd64.tar.gz tar xvfz node_exporter-1.0.1.linux-amd64.tar.gz cd node_exporter-1.0.1.linux-amd64 ./node_exporter In linux Node Exporter exposes metrics on port 9100 by default. When it is running, we can get the metrics from the /metrics endpoint. Use a web browser or command curl to verify.\ncurl http://localhost:9100/metrics We should see all the metrics from the output like:\n# HELP go_gc_duration_seconds A summary of the pause duration of garbage collection cycles. # TYPE go_gc_duration_seconds summary go_gc_duration_seconds{quantile=\u0026#34;0\u0026#34;} 7.7777e-05 go_gc_duration_seconds{quantile=\u0026#34;0.25\u0026#34;} 0.000113756 go_gc_duration_seconds{quantile=\u0026#34;0.5\u0026#34;} 0.000127199 go_gc_duration_seconds{quantile=\u0026#34;0.75\u0026#34;} 0.000147778 go_gc_duration_seconds{quantile=\u0026#34;1\u0026#34;} 0.000371894 go_gc_duration_seconds_sum 0.292994058 go_gc_duration_seconds_count 2029 ... Note: We only need to install Node Exporter, rather than Prometheus server. If you want to get more information about Prometheus Node Exporter see: https://prometheus.io/docs/guides/node-exporter/\nInstall OpenTelemetry Collector We can quickly install a OpenTelemetry Collector instance by using docker-compose with the following steps:\n Create a directory to store the configuration files, like /usr/local/otel. Create docker-compose.yaml and otel-collector-config.yaml in this directory represented below:  docker-compose.yaml\nversion:\u0026#34;2\u0026#34;services:# Collectorotel-collector:# Specify the image to start the container fromimage:otel/opentelemetry-collector:0.19.0# Set the otel-collector configfile command:[\u0026#34;--config=/etc/otel-collector-config.yaml\u0026#34;]# Mapping the configfile to host directoryvolumes:- ./otel-collector-config.yaml:/etc/otel-collector-config.yamlports:- \u0026#34;13133:13133\u0026#34;# health_check extension- \u0026#34;55678\u0026#34;# OpenCensus receiverotel-collector-config.yaml\nextensions:health_check:# A receiver is how data gets into the OpenTelemetry Collectorreceivers:# Set Prometheus Receiver to collects metrics from targets# It’s supports the full set of Prometheus configurationprometheus:config:scrape_configs:- job_name:\u0026#39;otel-collector\u0026#39;scrape_interval:10sstatic_configs:# Replace the IP to your VMs‘s IP which has installed Node Exporter- targets:[\u0026#39;vm1:9100\u0026#39;]- targets:[\u0026#39;vm2:9100\u0026#39;]- targets:[‘vm3:9100\u0026#39; ]processors:batch:# An exporter is how data gets sent to different systems/back-endsexporters:# Exports metrics via gRPC using OpenCensus formatopencensus:endpoint:\u0026#34;docker.for.mac.host.internal:11800\u0026#34;# The OAP Server addressinsecure:truelogging:logLevel:debugservice:pipelines:metrics:receivers:[prometheus]processors:[batch]exporters:[logging, opencensus]extensions:[health_check]In this directory use command docker-compose to start up the container:  docker-compose up -d After the container is up and running, you should see metrics already exported in the logs:\n... Metric #165 Descriptor: -\u0026gt; Name: node_network_receive_compressed_total -\u0026gt; Description: Network device statistic receive_compressed. -\u0026gt; Unit: -\u0026gt; DataType: DoubleSum -\u0026gt; IsMonotonic: true -\u0026gt; AggregationTemporality: AGGREGATION_TEMPORALITY_CUMULATIVE DoubleDataPoints #0 Data point labels: -\u0026gt; device: ens4 StartTime: 1612234754364000000 Timestamp: 1612235563448000000 Value: 0.000000 DoubleDataPoints #1 Data point labels: -\u0026gt; device: lo StartTime: 1612234754364000000 Timestamp: 1612235563448000000 Value: 0.000000 ... If you want to get more information about OpenTelemetry Collector see: https://opentelemetry.io/docs/collector/\nSet up SkyWalking OAP Server To activate the oc handler and vm relevant rules, set your environment variables:\nSW_OTEL_RECEIVER=default SW_OTEL_RECEIVER_ENABLED_OC_RULES=vm Note: If there are other rules already activated , you can add vm with use , as a separator.\nSW_OTEL_RECEIVER_ENABLED_OC_RULES=vm,oap Start the SkyWalking OAP Server.\nDone! After all of the above steps are completed, check out the SkyWalking WebUI. Dashboard VM provides the default metrics of all observed virtual machines. Note: Clear the browser local cache if you used it to access deployments of previous SkyWalking versions.\nAdditional Resources  Read more about the SkyWalking 8.4 release highlights. Get more SkyWalking updates on Twitter.  ","title":"SkyWalking 8.4 provides infrastructure monitoring","url":"/blog/2021-02-07-infrastructure-monitoring/"},{"content":" Origin: Tetrate.io blog\n The Apache SkyWalking team today announced the 8.4 release is generally available. This release fills the gap between all previous versions of SkyWalking and the logging domain area. The release also advances SkyWalking’s capabilities for infrastructure observability, starting with virtual machine monitoring.\nBackground SkyWalking has historically focused on the tracing and metrics fields of observability. As its features for tracing, metrics and service level monitoring have become more and more powerful and stable, the SkyWalking team has started to explore new scenarios covered by observability. Because service performance is reflected in the logs, and is highly impacted by the infrastructure on which it runs, SkyWalking brings these two fields into the 8.4 release. This release blog briefly introduces the two new features as well as some other notable changes.\nLogs Metrics, tracing, and logging are considered the three pillars of observability [1]. SkyWalking had the full features of metrics and tracing prior to 8.4; today, as 8.4 is released, the last piece of the jigsaw is now in place.\nFigure 1: Logs Collected By SkyWalking\nFigure 2: Logs Collected By SkyWalking\nThe Java agent firstly provides SDKs to enhance the widely-used logging frameworks, log4j (1.x and 2.x) [2] and logback [3], and send the logs to the SkyWalking backend (OAP). The latter is able to collect logs from wherever the protocol is implemented. This is not a big deal, but when it comes to the correlation between logs and traces, the traditional solution is to print the trace IDs in the logs, and pick the IDs in the error logs to query the related traces. SkyWalking just simplifies the workflow by correlating the logs and traces natively. Navigating between traces and their related logs is as simple as clicking a button.\nFigure 3: Correlation Between Logs and Traces\nInfrastructure Monitoring SkyWalking is known as an application performance monitoring tool. One of the most important factors that impacts the application’s performance is the infrastructure on which the application runs. In the 8.4 release, we added the monitoring metrics of virtual machines into the dashboard.\nFigure 4: VM Metrics\nFundamental metrics such as CPU Used, Memory Used, Disk Read / Write and Network Usage are available on the dashboard. And as usual, those metrics are also available to be configured as alarm triggers when needed.\nDynamic Configurations at Agent Side Dynamic configuration at the backend side has long existed in SkyWalking for several versions. Now, it finally comes to the agent side! Prior to 8.4, you’d have to restart the target services when you modify some configuration items of the agent \u0026ndash; for instance, sampling rate (agent side), ignorable endpoint paths, etc. Now, say goodbye to rebooting. Modifying configurations is not the only usage of the dynamic configuration mechanism. The latter gives countless possibilities to the agent side in terms of dynamic behaviours, e.g. enabling / disabling plugins, enabling / disabling the whole agent, etc. Just imagine!\nGrouped Service Topology This enhancement is from the UI. SkyWalking backend supports grouping the services by user-defined dimensions. In a real world use case, the services are usually grouped by business group or department. When a developer opens the topology map, out of hundreds of services, he or she may just want to focus on the services in charge. The grouped service topology comes to the rescue: one can now choose to display only services belonging to a specified group.\nFigure 5: Grouped Service Topology\nOther Notable Enhancements  Agent: resolves domain names to look up backend service IP addresses. Backend: meter receiver supports meter analysis language (MAL). Backend: several CVE fixes. Backend: supports Envoy {AccessLog,Metrics}Service API V3 and adopts MAL.  Links  [1] https://peter.bourgon.org/blog/2017/02/21/metrics-tracing-and-logging.html [2] https://logging.apache.org/log4j/2.x/ [3] http://logback.qos.ch  Additional Resources  Read more about the SkyWalking 8.4 release highlights. Get more SkyWalking updates on Twitter.  ","title":"Apache SkyWalking 8.4: Logs, VM Monitoring, and Dynamic Configurations at Agent Side","url":"/blog/skywalking8-4-release/"},{"content":"SkyWalking 8.4.0 is released. Go to downloads page to find release tars. Changes by Version\nProject  Incompatible with previous releases when use H2/MySQL/TiDB storage options, due to support multiple alarm rules triggered for one entity. Chore: adapt create_source_release.sh to make it runnable on Linux. Add package to .proto files, prevent polluting top-level namespace in some languages; The OAP server supports previous agent releases, whereas the previous OAP server (\u0026lt;=8.3.0) won\u0026rsquo;t recognize newer agents since this version (\u0026gt;= 8.4.0). Add ElasticSearch 7.10 to test matrix and verify it works. Replace Apache RAT with skywalking-eyes to check license headers. Set up test of Envoy ALS / MetricsService under Istio 1.8.2 to verify Envoy V3 protocol Test: fix flaky E2E test of Kafka.  Java Agent  The operation name of quartz-scheduler plugin, has been changed as the quartz-scheduler/${className} format. Fix jdk-http and okhttp-3.x plugin did not overwrite the old trace header. Add interceptors of method(analyze, searchScroll, clearScroll, searchTemplate and deleteByQuery) for elasticsearch-6.x-plugin. Fix the unexpected RunningContext recreation in the Tomcat plugin. Fix the potential NPE when trace_sql_parameters is enabled. Update byte-buddy to 1.10.19. Fix thrift plugin trace link broken when intermediate service does not mount agent Fix thrift plugin collects wrong args when the method without parameter. Fix DataCarrier\u0026rsquo;s org.apache.skywalking.apm.commons.datacarrier.buffer.Buffer implementation isn\u0026rsquo;t activated in IF_POSSIBLE mode. Fix ArrayBlockingQueueBuffer\u0026rsquo;s useless IF_POSSIBLE mode list Support building gRPC TLS channel but CA file is not required. Add witness method mechanism in the agent plugin core. Add Dolphinscheduler plugin definition. Make sampling still works when the trace ignores plug-in activation. Fix mssql-plugin occur ClassCastException when call the method of return generate key. The operation name of dubbo and dubbo-2.7.x-plugin, has been changed as the groupValue/className.methodName format Fix bug that rocketmq-plugin set the wrong tag. Fix duplicated EnhancedInstance interface added. Fix thread leaks caused by the elasticsearch-6.x-plugin plugin. Support reading segmentId and spanId with toolkit. Fix RestTemplate plugin recording url tag with wrong port Support collecting logs and forwarding through gRPC. Support config agent.sample_n_per_3_secs can be changed in the runtime. Support config agent.ignore_suffix can be changed in the runtime. Support DNS periodic resolving mechanism to update backend service. Support config agent.trace.ignore_path can be changed in the runtime. Added support for transmitting logback 1.x and log4j 2.x formatted \u0026amp; un-formatted messages via gPRC  OAP-Backend  Make meter receiver support MAL. Support influxDB connection response format option. Fix some error when use JSON as influxDB response format. Support Kafka MirrorMaker 2.0 to replicate topics between Kafka clusters. Add the rule name field to alarm record storage entity as a part of ID, to support multiple alarm rules triggered for one entity. The scope id has been removed from the ID. Fix MAL concurrent execution issues. Fix group name can\u0026rsquo;t be queried in the GraphQL. Fix potential gRPC connection leak(not closed) for the channels among OAP instances. Filter OAP instances(unassigned in booting stage) of the empty IP in KubernetesCoordinator. Add component ID for Python aiohttp plugin requester and server. Fix H2 in-memory database table missing issues Add component ID for Python pyramid plugin server. Add component ID for NodeJS Axios plugin. Fix searchService method error in storage-influxdb-plugin. Add JavaScript component ID. Fix CVE of UninstrumentedGateways in Dynamic Configuration activation. Improve query performance in storage-influxdb-plugin. Fix the uuid field in GRPCConfigWatcherRegister is not updated. Support Envoy {AccessLog,Metrics}Service API V3. Adopt the MAL in Envoy metrics service analyzer. Fix the priority setting doesn\u0026rsquo;t work of the ALS analyzers. Fix bug that endpoint-name-grouping.yml is not customizable in Dockerized case. Fix bug that istio version metric type on UI template mismatches the otel rule. Improve ReadWriteSafeCache concurrency read-write performance Fix bug that if use JSON as InfluxDB.ResponseFormat then NumberFormatException maybe occur. Fix timeBucket not taking effect in EqualsAndHashCode annotation of some relationship metrics. Fix SharingServerConfig\u0026rsquo;s propertie is not correct in the application.yml, contextPath -\u0026gt; restConnextPath. Istio control plane: remove redundant metrics and polish panel layout. Fix bug endpoint name grouping not work due to setting service name and endpoint name out of order. Fix receiver analysis error count metrics. Log collecting and query implementation. Support Alarm to feishu. Add the implementation of ConfigurationDiscovery on the OAP side. Fix bug in parseInternalErrorCode where some error codes are never reached. OAL supports multiple values when as numeric. Add node information from the Openensus proto to the labels of the samples, to support the identification of the source of the Metric data. Fix bug that the same sample name in one MAL expression caused IllegalArgumentException in Analyzer.analyse. Add the text analyzer for querying log in the es storage. Chore: Remove duplicate codes in Envoy ALS handler. Remove the strict rule of OAL disable statement parameter. Fix a legal metric query adoption bug. Don\u0026rsquo;t support global level metric query. Add VM MAL and ui-template configration, support Prometheus node-exporter VM metrics that pushed from OpenTelemetry-collector. Remove unused log query parameters.  UI  Fix un-removed tags in trace query. Fix unexpected metrics name on single value component. Don\u0026rsquo;t allow negative value as the refresh period. Fix style issue in trace table view. Separation Log and Dashboard selector data to avoid conflicts. Fix trace instance selector bug. Fix Unnecessary sidebar in tooltips for charts. Refactor dashboard query in a common script. Implement refreshing data for topology by updating date. Implement group selector in the topology. Fix all as default parameter for services selector. Add icon for Python aiohttp plugin. Add icon for Python pyramid plugin. Fix topology render all services nodes when groups changed. Fix rk-footer utc input\u0026rsquo;s width. Update rk-icon and rewrite rk-header svg tags with rk-icon. Add icon for http type. Fix rk-footer utc without local storage. Sort group names in the topology. Add logo for Dolphinscheduler. Fix dashboard wrong instance. Add a legend for the topology. Update the condition of unhealthy cube. Fix: use icons to replace buttons for task list in profile. Fix: support = in the tag value in the trace query page. Add envoy proxy component logo. Chore: set up license-eye to check license headers and add missing license headers. Fix prop for instances-survey and endpoints-survey. Fix envoy icon in topology. Implement the service logs on UI. Change the flask icon to light version for a better view of topology dark theme. Implement viewing logs on trace page. Fix update props of date component. Fix query conditions for logs. Fix style of selectors to word wrap. Fix logs time. Fix search ui for logs.  Documentation  Update the documents of backend fetcher and self observability about the latest configurations. Add documents about the group name of service. Update docs about the latest UI. Update the document of backend trace sampling with the latest configuration. Update kafka plugin support version to 2.6.1. Add FAQ about Fix compiling on Mac M1 chip.  All issues and pull requests are here\n","title":"Release Apache SkyWalking APM 8.4.0","url":"/events/release-apache-skywalking-apm-8-4-0/"},{"content":"Background The verifier is an important part of the next generation End-to-End Testing framework (NGE2E), which is responsible for verifying whether the actual output satisfies the expected template.\nDesign Thinking We will implement the verifier with Go template, plus some enhancements. Firstly, users need to write a Go template file with provided functions and actions to describe how the expected data looks like. Then the verifer renders the template with the actual data object. Finally, the verifier compares the rendered output with the actual data. If the rendered output is not the same with the actual output, it means the actual data is inconsist with the expected data. Otherwise, it means the actual data match the expected data. On failure, the verifier will also print out what are different between expected and actual data.\nBranches / Actions The verifier inherits all the actions from the standard Go template, such as if, with, range, etc. In addition, we also provide some custom actions to satisfy our own needs.\nList Elements Match contains checks if the actual list contains elements that match the given template.\nExamples:\nmetrics:{{- contains .metrics }}- name:{{notEmpty .name }}id:{{notEmpty .id }}value:{{gt .value 0 }}{{- end }}It means that the list metrics must contain an element whose name and id are not empty, and value is greater than 0.\nmetrics:{{- contains .metrics }}- name:p95value:{{gt .value 0 }}- name:p99value:{{gt .value 0 }}{{- end }}This means that the list metrics must contain an element named p95 with a value greater than 0, and an element named p95 with a value greater than 0. Besides the two element, the list metrics may or may not have other random elements.\nFunctions Users can use these provided functions in the template to describe the expected data.\nNot Empty notEmpty checks if the string s is empty.\nExample:\nid:{{notEmpty .id }}Regexp match regexp checks if string s matches the regular expression pattern.\nExamples:\nlabel:{{regexp .label \u0026#34;ratings.*\u0026#34; }}Base64 b64enc s returns the Base64 encoded string of s.\nExamples:\nid:{{b64enc \u0026#34;User\u0026#34; }}.static-suffix# this evalutes the base64 encoded string of \u0026#34;User\u0026#34;, concatenated with a static suffix \u0026#34;.static-suffix\u0026#34;Result:\nid:VXNlcg==.static-suffixFull Example Here is an example of expected data:\n# expected.data.yamlnodes:- id:{{b64enc \u0026#34;User\u0026#34; }}.0name:Usertype:USERisReal:false- id:{{b64enc \u0026#34;Your_ApplicationName\u0026#34; }}.1name:Your_ApplicationNametype:TomcatisReal:true- id:{{$h2ID := (index .nodes 2).id }}{{ notEmpty $h2ID }}# We assert that nodes[2].id is not empty and save it to variable `h2ID` for later usename:localhost:-1type:H2isReal:falsecalls:- id:{{notEmpty (index .calls 0).id }}source:{{b64enc \u0026#34;Your_ApplicationName\u0026#34; }}.1target:{{$h2ID }}# We use the previously assigned variable `h2Id` to asert that the `target` is equal to the `id` of the nodes[2]detectPoints:- CLIENT- id:{{b64enc \u0026#34;User\u0026#34; }}.0-{{ b64enc \u0026#34;Your_ApplicationName\u0026#34; }}.1source:{{b64enc \u0026#34;User\u0026#34; }}.0target:{{b64enc \u0026#34;Your_ApplicationName\u0026#34; }}.1detectPoints:- SERVERwill validate this data:\n# actual.data.yamlnodes:- id:VXNlcg==.0name:Usertype:USERisReal:false- id:WW91cl9BcHBsaWNhdGlvbk5hbWU=.1name:Your_ApplicationNametype:TomcatisReal:true- id:bG9jYWxob3N0Oi0x.0name:localhost:-1type:H2isReal:falsecalls:- id:WW91cl9BcHBsaWNhdGlvbk5hbWU=.1-bG9jYWxob3N0Oi0x.0source:WW91cl9BcHBsaWNhdGlvbk5hbWU=.1detectPoints:- CLIENTtarget:bG9jYWxob3N0Oi0x.0- id:VXNlcg==.0-WW91cl9BcHBsaWNhdGlvbk5hbWU=.1source:VXNlcg==.0detectPoints:- SERVERtarget:WW91cl9BcHBsaWNhdGlvbk5hbWU=.1# expected.data.yamlmetrics:{{- contains .metrics }}- name:{{notEmpty .name }}id:{{notEmpty .id }}value:{{gt .value 0 }}{{- end }}will validate this data:\n# actual.data.yamlmetrics:- name:business-zone::projectAid:YnVzaW5lc3Mtem9uZTo6cHJvamVjdEE=.1value:1- name:system::load balancer1id:c3lzdGVtOjpsb2FkIGJhbGFuY2VyMQ==.1value:0- name:system::load balancer2id:c3lzdGVtOjpsb2FkIGJhbGFuY2VyMg==.1value:0and will report an error when validating this data, because there is no element with a value greater than 0:\n# actual.data.yamlmetrics:- name:business-zone::projectAid:YnVzaW5lc3Mtem9uZTo6cHJvamVjdEE=.1value:0- name:system::load balancer1id:c3lzdGVtOjpsb2FkIGJhbGFuY2VyMQ==.1value:0- name:system::load balancer2id:c3lzdGVtOjpsb2FkIGJhbGFuY2VyMg==.1value:0The contains does an unordered list verification, in order to do list verifications including orders, you can simply use the basic ruls like this:\n# expected.data.yamlmetrics:- name:p99value:{{gt (index .metrics 0).value 0 }}- name:p95value:{{gt (index .metrics 1).value 0 }}which expects the actual metrics list to be exactly ordered, with first element named p99 and value greater 0, second element named p95 and value greater 0.\n","title":"[Design] The Verifier of NGE2E","url":"/blog/2021-02-01-e2e-verifier-design/"},{"content":"SkyWalking Cloud on Kubernetes 0.2.0 is released. Go to downloads page to find release tars.\n Introduce custom metrics adapter to SkyWalking OAP cluster for Kubernetes HPA autoscaling. Add RBAC files and service account to support Kubernetes coordination. Add default and validation webhooks to operator controllers. Add UI CRD to deploy skywalking UI server. Add Fetcher CRD to fetch metrics from other telemetry system, for example, Prometheus.  ","title":"Release Apache SkyWalking Cloud on Kubernetes 0.2.0","url":"/events/release-apache-skywalking-cloud-on-kubernetes-0-2-0/"},{"content":"Apache SkyWalking is an open source APM for distributed system, Apache Software Foundation top-level project.\nAt Jan. 11th, 2021, we noticed the Tencent Cloud Service, Tencent Service Watcher - TSW, for first time. Due to the similar short name, which SkyWalking is also called SW in the community, we connected with the service team of Tencent Cloud, and kindly asked.\nThey used to replay, TSW is purely developed by Tencent team itself, which doesn\u0026rsquo;t have any code dependency on SkyWalking.. We didn\u0026rsquo;t push harder.\nBut one week later, Jan 18th, 2021, our V.P., Sheng got the report again from Haoyang SkyWalking PMC member, through WeChat DM(direct message),. He provided complete evidence to prove TSW actually re-distributed the SkyWalking\u0026rsquo;s Java agent. We keep one copy of their agent\u0026rsquo;s distribution(at Jan. 18th), you could be downloaded here.\nSome typically evidences are here\n  ServiceManager is copied and package-name changed in the TSW\u0026rsquo;s agent.   ContextManager is copied and ackage-name changed in the TSW\u0026rsquo;s agent.   At the same time, we checked their tsw-client-package.zip, it didn\u0026rsquo;t include the SkyWalking\u0026rsquo;s LICENSE and NOTICE. Also, they didn\u0026rsquo;t mention TSW agent is the re-ditribution SkyWalking on their website.\nWith all above information, we had enough reason to believe, from the tech perspective, they were violating the Apache 2.0 License.\nFrom the 18th Jan., 2021, we sent mail [Apache 2.0 License Violation] Tencent Cloud TSW service doesn't follow the Apache 2.0 License to brief the SkyWalking PMC, and took the following actions to connect with Tencent.\n Made direct call to Tencent Open Source Office. Connected with Tencent Cloud TVP program committee, as Sheng Wu(Our VP) is a Tencent Cloud TVP. Talked with the Tencent Cloud team lead.  In all above channels, we provided the evidences of copy-redistribution hebaviors, requested them to revaluate their statements on the website, and follow the License\u0026rsquo;s requirements.\nResolution At Jan. 19th night, UTC+8, 2021. We received response from the Tencent cloud team. They admited their violation behaviors, and did following changes\n  Tencent Cloud TSW service page states, the agent is the fork version(re-distribution) of Apache SkyWalking agent.   TSW agent distributions include the SkyWalking\u0026rsquo;s License and NOTICE. Below is the screenshot, you could download from their product page. We keep a copy of their Jan. 19th 2021 at here.   We have updated the status to the PMC mail list. This license violation issue has been resolved for now.\nThe SkyWalking community and program management committee will keep our eyes on Tencent TSW. ","title":"[Resolved][License Issue] Tencent Cloud TSW service violates the Apache 2.0 License when using SkyWalking.","url":"/blog/2021-01-23-tencent-cloud-violates-aplv2/"},{"content":" 第一节:开篇介绍 第二节:数字游戏(Number Game) 第三节:社区原则(Community “Principles”) 第四节:基金会原则(For public good) 第五节:一些不太好的事情  B站视频地址\n","title":"[视频] 开放原子开源基金会2020年度峰会 - Educate community Over Support community","url":"/zh/2021-01-21-educate-community/"},{"content":"Elastic announced their license change, Upcoming licensing changes to Elasticsearch and Kibana.\n We are moving our Apache 2.0-licensed source code in Elasticsearch and Kibana to be dual licensed under Server Side Public License (SSPL) and the Elastic License, giving users the choice of which license to apply. This license change ensures our community and customers have free and open access to use, modify, redistribute, and collaborate on the code. It also protects our continued investment in developing products that we distribute for free and in the open by restricting cloud service providers from offering Elasticsearch and Kibana as a service without contributing back. This will apply to all maintained branches of these two products and will take place before our upcoming 7.11 release. Our releases will continue to be under the Elastic License as they have been for the last three years.\n Also, they provide the FAQ page for more information about the impact for the users, developers, and vendors.\nIn the perspective of Apache Software Foundation, SSPL has been confirmed as a Catalog X LICENSE(https://www.apache.org/legal/resolved.html#category-x), which means hard-dependency as a part of the core is not allowed. With that, we can\u0026rsquo;t only focus on it anymore. We need to consider other storage options. Right now, we still have InfluxDB, TiDB, H2 server still in Apache 2.0 licensed. Right now, we still have InfluxDB, TiDB, H2 server as storage options still in Apache 2.0 licensed.\nAs one optional plugin, we need to focus on the client driver license. Right now, we are only using ElasticSearch 7.5.0 and 6.3.2 drivers, which are both Apache 2.0 licensed. So, we are safe. For further upgrade, here is their announcement. They answer these typical cases in the FAQ page.\n  I build a SaaS application using Elasticsearch as the backend, how does this affect me?\n This source code license change should not affect you - you can use our default distribution or develop applications on top of it for free, under the Elastic License. This source-available license does not contain any copyleft provisions and the default functionality is free of charge. For a specific example, you can see our response to a question around this at Magento.\nOur users still could use, redistribute, sale the products/services, based on SkyWalking, even they are using self hosting Elastic Search unmodified server.\n  I\u0026rsquo;m using Elasticsearch via APIs, how does this change affect me?\n This change does not affect how you use client libraries to access Elasticsearch. Our client libraries remain licensed under Apache 2.0, with the exception of our Java High Level Rest Client (Java HLRC). The Java HLRC has dependencies on the core of Elasticsearch, and as a result this client library will be licensed under the Elastic License. Over time, we will eliminate this dependency and move the Java HLRC to be licensed under Apache 2.0. Until that time, for the avoidance of doubt, we do not consider using the Java HLRC as a client library in development of an application or library used to access Elasticsearch to constitute a derivative work under the Elastic License, and this will not have any impact on how you license the source code of your application using this client library or how you distribute it.\nThe client driver license incompatible issue will exist, we can\u0026rsquo;t upgrade the driver(s) until they release the Apache 2.0 licensed driver jars. But users are still safe to upgrade the drivers by themselves.\n Apache SkyWalking will discuss the further actions here. If you have any question, welcome to ask. In the later 2021, we will begin to invest the posibility of creating SkyWalking\u0026rsquo;s observability database implementation.\n","title":"Response to Elastic 2021 License Change","url":"/blog/2021-01-17-elastic-change-license/"},{"content":"SkyWalking Client JS 0.3.0 is released. Go to downloads page to find release tars.\n Support tracing starting at the browser. Add traceSDKInternal SDK for tracing SDK internal RPC. Add detailMode SDK for tracing http method and url as tags in spans. Fix conditions of http status.  ","title":"Release Apache SkyWalking Client JS 0.3.0","url":"/events/release-apache-skywalking-client-js-0-3-0/"},{"content":"SkyWalking Eyes 0.1.0 is released. Go to downloads page to find release tars.\n License Header  Add check and fix command. check results can be reported to pull request as comments. fix suggestions can be filed on pull request as edit suggestions.    ","title":"Release Apache SkyWalking Eyes 0.1.0","url":"/events/release-apache-skywalking-eyes-0-1-0/"},{"content":"SkyWalking NodeJS 0.1.0 is released. Go to downloads page to find release tars.\n Initialize project core codes. Built-in http/https plugin. Express plugin. Axios plugin.  ","title":"Release Apache SkyWalking for NodeJS 0.1.0","url":"/events/release-apache-skywalking-nodejs-0-1-0/"},{"content":"SkyWalking Python 0.5.0 is released. Go to downloads page to find release tars.\n  New plugins\n Pyramid Plugin (#102) AioHttp Plugin (#101) Sanic Plugin (#91)    API and enhancements\n @trace decorator supports async functions Supports async task context Optimized path trace ignore Moved exception check to Span.__exit__ Moved Method \u0026amp; Url tags before requests    Fixes:\n BaseExceptions not recorded as errors Allow pending data to send before exit sw_flask general exceptions handled Make skywalking logging Non-global    Chores and tests\n Make tests really run on specified Python version Deprecate 3.5 as it\u0026rsquo;s EOL    ","title":"Release Apache SkyWalking Python 0.5.0","url":"/events/release-apache-skywalking-python-0-5-0/"},{"content":"Apache SkyWalking is an open source APM for distributed system. Provide tracing, service mesh observability, metrics analysis, alarm and visualization.\nJust 11 months ago, on Jan. 20th, 2020, SkyWalking hit the 200 contributors mark. With the growth of the project and the community, SkyWalking now includes over 20 sub(ecosystem) projects covering multiple language agents and service mesh, integration with mature open source projects, like Prometheus, Spring(Sleuth), hundreds of libraries to support all tracing/metrics/logs fields. In the past year, the number of contributors grows super astoundingly , and all its metrics point to its community vibrancy. Many corporate titans are already using SkyWalking in a large-scale production environment, including, Alibaba, Huawei, Baidu, Tencent, etc.\nRecently, our SkyWalking main repository overs 300 contributors.\nOur website has thousands of views from most countries in the world every week.\nAlthough we know that, the metrics like GitHub stars and the numbers of open users and contributors, are not a determinant of vibrancy, they do show the trend, we are very proud to share the increased numbers here, too.\nWe double those numbers and are honored with the development of our community.\nThank you, all of our contributors. Not just these 300 contributors of the main repository, or nearly 400 contributors in all repositories, counted by GitHub. There are countless people contributing codes to SkyWalking\u0026rsquo;s subprojects, ecosystem projects, and private fork versions; writing blogs and guidances, translating documents, books, and presentations; setting up learning sessions for new users; convincing friends to join the community as end-users, contributors, even committers. Companies behinds those contributors support their employees to work with the community to provide feedback and contribute the improvements and features upstream. Conference organizers share the stages with speakers from the SkyWalking community.\nSkyWalking can’t make this happen without your help. You made this community extraordinary.\nAt this crazy distributed computing and cloud native age, we as a community could make DEV, OPS, and SRE teams' work easier by locating the issue(s) in the haystack quicker than before, like why we named the project as SkyWalking, we will have a clear site line when you stand on the glass bridge Skywalk at Grand Canyon West.\n 376 Contributors counted by GitHub account are following. Dec. 22st, 2020. Generated by a tool deveoped by Yousa\n 1095071913 50168383 Ahoo-Wang AirTrioa AlexanderWert AlseinX Ax1an BFergerson BZFYS CharlesMaster ChaunceyLin5152 CommissarXia Cvimer Doublemine ElderJames EvanLjp FatihErdem FeynmanZhou Fine0830 FingerLiu Gallardot GerryYuan HackerRookie Heguoya Hen1ng Humbertzhang IanCao IluckySi Indifer J-Cod3r JaredTan95 Jargon96 Jijun JohnNiang Jozdortraz Jtrust Just-maple KangZhiDong LazyLei LiWenGu Liu-XinYuan Miss-you O-ll-O Patrick0308 QHWG67 Qiliang RandyAbernethy RedzRedz Runrioter SataQiu ScienJus SevenPointOld ShaoHans Shikugawa SoberChina SummerOfServenteen TJ666 TerrellChen TheRealHaui TinyAllen TomMD ViberW Videl WALL-E WeihanLi WildWolfBang WillemJiang Wooo0 XhangUeiJong Xlinlin YczYanchengzhe YoungHu YunaiV ZhHong ZhuoSiChen ZS-Oliver a198720 a526672351 acurtain adamni135 adermxzs adriancole aeolusheath agile6v aix3 aiyanbo ajanthan alexkarezin alonelaval amogege amwyyyy arugal ascrutae augustowebd bai-yang beckhampu beckjin beiwangnull bigflybrother bostin brucewu-fly c1ay candyleer carlvine500 carrypann cheenursn cheetah012 chenpengfei chenvista chess-equality chestarss chidaodezhongsheng chopin-d clevertension clk1st cngdkxw codeglzhang codelipenghui coder-yqj coki230 coolbeevip crystaldust cui-liqiang cuiweiwei cyberdak cyejing dagmom dengliming devkanro devon-ye dimaaan dingdongnigetou dio dmsolr dominicqi donbing007 dsc6636926 duotai dvsv2 dzx2018 echooymxq efekaptan eoeac evanxuhe feelwing1314 fgksgf fuhuo geektcp geomonlin ggndnn gitter-badger glongzh gnr163 gonedays grissom-grissom grissomsh guodongq guyukou gxthrj gzshilu hailin0 hanahmily haotian2015 haoyann hardzhang harvies hepyu heyanlong hi-sb honganan hsoftxl huangyoje huliangdream huohuanhuan innerpeacez itsvse jasonz93 jialong121 jinlongwang jjlu521016 jjtyro jmjoy jsbxyyx justeene juzhiyuan jy00464346 kaanid karott kayleyang kevinyyyy kezhenxu94 kikupotter kilingzhang killGC klboke ksewen kuaikuai kun-song kylixs landonzeng langke93 langyan1022 langyizhao lazycathome leemove leizhiyuan libinglong lilien1010 limfriend linkinshi linliaoy liuhaoXD liuhaoyang liuyanggithup liuzhengyang liweiv lkxiaolou llissery louis-zhou lpf32 lsyf lucperkins lujiajing1126 lunamagic1978 lunchboxav lxliuxuankb lytscu lyzhang1999 magic-akari makingtime maolie masterxxo maxiaoguang64 membphis mestarshine mgsheng michaelsembwever mikkeschiren mm23504570 momo0313 moonming mrproliu muyun12 nacx neatlife neeuq nic-chen nikitap492 nileblack nisiyong novayoung oatiz oflebbe olzhy onecloud360 osiriswd peng-yongsheng pengweiqhca potiuk purgeyao qijianbo010 qinhang3 qiuyu-d qqeasonchen qxo raybi-asus refactor2 remicollet rlenferink rootsongjc rovast scolia sdanzo seifeHu shiluo34 sikelangya simonlei sk163 snakorse songzhendong songzhian sonxy spacewander stalary stenio2011 stevehu stone-wlg sungitly surechen swartz-k sxzaihua tanjunchen tankilo taskmgr tbdpmi terranhu terrymanu tevahp thanq thebouv tianyuak tincopper tinyu0 tom-pytel tristaZero tristan-tsl trustin tsuilouis tuohai666 tzsword-2020 tzy1316106836 vcjmhg vision-ken viswaramamoorthy wankai123 wbpcode web-xiaxia webb2019 weiqiang333 wendal wengangJi wenjianzhang whfjam wind2008hxy withlin wqr2016 wu-sheng wuguangkuo wujun8 wuxingye x22x22 xbkaishui xcaspar xiaoxiangmoe xiaoy00 xinfeingxia85 xinzhuxiansheng xudianyang yanbw yanfch yang-xiaodong yangxb2010000 yanickxia yanmaipian yanmingbi yantaowu yaowenqiang yazong ychandu ycoe yimeng yu199195 yuqichou yuyujulin yymoth zaunist zaygrzx zcai2 zeaposs zhang98722 zhanghao001 zhangjianweibj zhangkewei zhangsean zhaoyuguang zhentaoJin zhousiliang163 zhuCheer zifeihan zkscpqm zoidbergwill zoumingzm zouyx zshit zxbu zygfengyuwuzu  ","title":"Celebrate SkyWalking single repository hits the 300 contributors mark","url":"/blog/2021-01-01-300-contributors-mark/"},{"content":"Ke Zhang (a.k.a. HumbertZhang) mainly focuses on the SkyWalking Python agent, he had participated in the \u0026ldquo;Open Source Promotion Plan - Summer 2020\u0026rdquo; and completed the project smoothly, and won the award \u0026ldquo;Most Potential Students\u0026rdquo; that shows his great willingness to continuously contribute to our community.\nUp to date, he has submitted 8 PRs in the Python agent repository, 7 PRs in the main repo, all in total include ~2000 LOC.\nAt Dec. 13th, 2020, the project management committee (PMC) passed the proposal of promoting him as a new committer. He has accepted the invitation at the same day.\nWelcome to join the committer team, Ke Zhang!\n","title":"Welcome Ke Zhang (张可) as new committer","url":"/events/welcome-ke-zhang-as-new-committer/"},{"content":"今年暑假期间我参加了开源软件供应链点亮计划—暑期 2020 的活动,在这个活动中,我主要参加了 Apache SkyWalking 的 Python Agent 的开发,最终项目顺利结项并获得了”最具潜力奖“,今天我想分享一下我参与这个活动以及开源社区的感受与收获。\n缘起 其实我在参加暑期 2020 活动之前就听说过 SkyWalking 了。我研究生的主要研究方向是微服务和云原生,组里的学长们之前就在使用 SkyWalking 进行一些研究工作,也是通过他们,我了解到了 OpenTracing, SkyWalking 等与微服务相关的 Tracing 工具以及 APM 等,当时我就在想如果有机会可以深度参加这些开源项目就好了。 巧的是,也正是在差不多的时候,本科的一个学长发给了我暑期 2020 活动的链接,我在其中惊喜的发现了 SkyWalking 项目。\n虽然说想要参与 SkyWalking 的开发,但是真的有了机会我却有一些不自信——这可是 Star 上万的 Apache 顶级项目。万幸的是在暑期 2020 活动中,每一个社区都提供了很多题目以供选择,想参与的同学可以提前对要做的事情有所了解,并可以提前做一些准备。我当时也仔细地浏览了项目列表,最终决定申请为 Python Agent 支持 Flask 或 Django 埋点的功能。当时主要考虑的是,我对 Python 语言比较熟悉,同时也有使用 Flask 等 web 框架进行开发的经验,我认为应该可以完成项目要求。为了能让心里更有底一些,我阅读了 Python Agent 的源码,写下了对项目需要做的工作的理解,并向项目的导师柯振旭发送了自荐邮件,最终被选中去完成这个项目。\n过程 被选中后我很激动,也把这份激动化作了参与开源的动力。我在进一步阅读源码,搭建本地环境后,用了三周左右的时间完成了 Django 项目的埋点插件的开发,毕竟我选择的项目是一个低难度的项目,而我在 Python web 方面也有一些经验。在这之后,我的导师和我进行了沟通,在我表达了想要继续做贡献的意愿之后,他给我建议了一些可以进一步进行贡献的方向,我也就继续参与 Python Agent 的开发。接下来,我陆续完成了 PyMongo 埋点插件, 插件版本检查机制, 支持使用 kafka 协议进行数据上报等功能。在提交了暑期 2020 活动的结项申请书后,我又继续参与了在端到端测试中增加对百分位数的验证等功能。\n在整个过程中,我遇到过很多问题,包括对问题认识不够清晰,功能的设计不够完善等等,但是通过与导师的讨论以及 Code Review,这些问题最终都迎刃而解了。此外他还经常会和我交流项目进一步发展方向,并给我以鼓励和肯定,在这里我想特别感谢我的导师在整个项目过程中给我的各种帮助。\n收获 参加暑期 2020 的活动带给我了很多收获,主要有以下几点:\n第一是让我真正参与到了开源项目中。在之前我只向在项目代码或文档中发现的 typo 发起过一些 Pull Request,但是暑期 2020 活动通过列出项目 + 导师指导的方式,明确了所要做的事情,并提供了相应的指导,降低了参与开源的门槛,使得我们学生可以参与到项目的开发中来。\n第二是对我的专业研究方向也有很多启发,我的研究方向就是微服务与云原生相关,通过参与到 SkyWalking 的开发中使得我可以更好地理解研究问题中的一些概念,也让我更得心应手得使用 SkyWalking 来解决一些实际的问题。\n第三是通过参与 SkyWalking Python Agent 以及其他部分的开发,我的贡献得到了社区的承认,并在最近被邀请作为 Committer 加入了社区,这对我而言是很高的认可,也提升了我的自信心。\n​\t第四点就是我通过这个活动认识了不少新朋友,同时也开拓了我的视野,使得我对于开源项目与开源社区有了很多新的认识。\n建议 最后同样是我对想要参与开源社区,想要参与此类活动的同学们的一些建议:\n 虽然奖金很吸引人,但是还是希望大家能抱着长期为项目进行贡献的心态来参与开源项目,以这样的心态参与开源可以让你更好地理解开源社区的运作方式,也可以让你更有机会参与完成激动人心的功能,你在一个东西上付出的时间精力越多,你能收获的往往也越多。 在申请项目的时候,可以提前阅读一下相关功能的源码,并结合自己的思考去写一份清晰明了的 proposal ,这样可以帮助你在申请人中脱颖而出。 在开始着手去完成一个功能之前,首先理清思路,并和自己的导师或了解这一部分的人进行沟通与确认,从而尽量避免在错误的方向上浪费太多时间。  ","title":"暑期 2020 活动学生(张可)心得分享","url":"/zh/2020-12-20-summer2020-activity-sharing2/"},{"content":"背景 我是一个热爱编程、热爱技术的人,⼀直以来都向往着能参与到开源项⽬中锻炼⾃⼰,但当我面对庞大而复杂的项目代码时,却感到手足无措,不知该从何开始。⽽此次的“开源软件供应链点亮计划-暑期2020”活动则正好提供了这样⼀个机会:清晰的任务要求、开源社区成员作为导师提供指导以及一笔丰厚的奖金,让我顺利地踏上了开源这条道路。\n回顾 在“暑期2020”活动的这两个多月里,我为 SkyWalking 的命令行工具实现了一个 dashboard,此外在阅读项目源码的过程中,还发现并修复了几个 bug。到活动结束时,我共提交了11个 PR,贡献了两千多行改动,对 SkyWalking CLI 项目的贡献数量排名第二,还获得了“最具潜力奖”。\n我觉得之所以能够如此顺利地完成这个项⽬主要有两个原因。一方面,我选择的 SkyWalking CLI 项⽬当时最新的版本号为0.3.0,还处于起步阶段,代码量相对较少,⽽且项⽬结构非常清晰,文档也较为详细,这对于我理解整个项⽬⾮常有帮助,从⽽能够更快地上⼿。另一方面,我的项目导师非常认真负责,每次我遇到问题,导师都会及时地为我解答,然后我提交的 PR 也能够很快地被 review。⽽且导师不时会给予我肯定的评论与⿎励,这极⼤地提⾼了我的成就感,让我更加积极地投⼊到下⼀阶段的⼯作,形成⼀个正向的循环。\n收获 回顾整个参与过程,觉得自己收获颇多:\n首先,我学习到了很多可能在学校里接触不到的新技术,了解了开源项目是如何进行协作,开源社区是如何运转治理的,以及开源文化、Apache way 等知识,仿佛进入了一个崭新而精彩的世界。\n其次,我的编程能力得到了锻炼。因为开源项目对于代码的质量有较高的要求,因此我会在编程时有意识地遵守相关的规范,培养良好的编码习惯。然后在导师的 code review 中也学习到了一些编程技巧。\n此外,参与开源为我的科研带来了不少灵感。因为我的研究方向是智能软件工程,旨在将人工智能技术应用在软件工程的各个环节中,这需要我在实践中发现实际问题。而开源则提供了这样一个窗口,让我足不出户即可参与到软件项目的设计、开发、测试和发布等环节。\n最后也是本次活动最大的一个收获,我的贡献得到了社区的认可,被提名成为了 SkyWalking 社区的第一位学生 committer。\n建议 最后,对于将来想要参加此类活动的同学,附上我的一些建议:\n第一,选择活跃、知名的社区。社区对你的影响将是极其深远的,好的社区意味着成熟的协作流程、良好的氛围、严谨的代码规范,以及有更大几率遇到优秀的导师,这些对于你今后在开源方面的发展都是非常有帮助的。\n第二,以兴趣为导向来选择项目,同时要敢于走出舒适区。我最初在选择项目时,初步确定了两个,一个是低难度的 Python 项目,另一个是中等难度的 Go 项目。当时我很纠结:因为我对 Python 语言比较熟悉,选择一个低难度的项目是比较稳妥的,但是项目的代码我看的并不是很懂,具体要怎么做我完全没有头绪;而 Go 项目是一个命令行工具,我对这个比较感兴趣,且有一个大致的思路,但是我对 Go 语言并不是很熟悉,实践经验为零。最后凭借清晰具体的 proposal 我成功申请到了 Go 项目并顺利地完成了,还在实践中快速掌握了一门新的编程语言。\n这次的“暑期2020”活动虽已圆满结束,但我的开源之路才刚刚开始。\n","title":"暑期2020活动心得分享","url":"/zh/2020-12-19-summer2020-activity-sharing/"},{"content":"NGE2E is the next generation End-to-End Testing framework that aims to help developers to set up, debug, and verify E2E tests with ease. It\u0026rsquo;s built based on the lessons learnt from tens of hundreds of test cases in the SkyWalking main repo.\nGoal  Keep the feature parity with the existing E2E framework in SkyWalking main repo; Support both docker-compose and KinD to orchestrate the tested services under different environments; Get rid of the heavy Java/Maven stack, which exists in the current E2E; be language independent as much as possible, users only need to configure YAMLs and run commands, without writing codes;  Non-Goal  This framework is not involved with the build process, i.e. it won\u0026rsquo;t do something like mvn package or docker build, the artifacts (.tar, docker images) should be ready in an earlier process before this; This project doesn\u0026rsquo;t take the plugin tests into account, at least for now; This project doesn\u0026rsquo;t mean to add/remove any new/existing test case to/from the main repo; This documentation won\u0026rsquo;t cover too much technical details of how to implement the framework, that should go into an individual documentation;  Design Before diving into the design details, let\u0026rsquo;s take a quick look at how the end user might use NGE2E.\n All the following commands are mock, and are open to debate.\n To run a test case in a directory /path/to/the/case/directory\ne2e run /path/to/the/case/directory # or cd /path/to/the/case/directory \u0026amp;\u0026amp; e2e run This will run the test case in the specified directory, this command is a wrapper that glues all the following commands, which can be executed separately, for example, to debug the case:\nNOTE: because all the options can be loaded from a configuration file, so as long as a configuration file (say e2e.yaml) is given in the directory, every command should be able to run in bare mode (without any option explicitly specified in the command line);\nSet Up e2e setup --env=compose --file=docker-compose.yaml --wait-for=service/health e2e setup --env=kind --file=kind.yaml --manifests=bookinfo.yaml,gateway.yaml --wait-for=pod/ready e2e setup # If configuration file e2e.yaml is present  --env: the environment, may be compose or kind, represents docker-compose and KinD respectively; --file: the docker-compose.yaml or kind.yaml file that declares how to set up the environment; --manifests: for KinD, the resources files/directories to apply (using kubectl apply -f); --command: a command to run after the environment is started, this may be useful when users need to install some extra tools or apply resources from command line, like istioctl install --profile=demo; --wait-for: can be specified multiple times to give a list of conditions to be met; wait until the given conditions are met; the most frequently-used strategy should be --wait-for=service/health, --wait-for=deployments/available, etc. that make the e2e setup command to wait for all conditions to be met; other possible strategies may be something like --wait-for=\u0026quot;log:Started Successfully\u0026quot;, --wait-for=\u0026quot;http:localhost:8080/healthcheck\u0026quot;, etc. if really needed;  Trigger Inputs e2e trigger --interval=3s --times=0 --action=http --url=\u0026#34;localhost:8080/users\u0026#34; e2e trigger --interval=3s --times=0 --action=cmd --cmd=\u0026#34;curl localhost:8080/users\u0026#34; e2e trigger # If configuration file e2e.yaml is present  --interval=3s: trigger the action every 3 seconds; --times=0: how many times to trigger the action, 0=infinite; --action=http: the action of the trigger, i.e. \u0026ldquo;perform an http request as an input\u0026rdquo;; --action=cmd: the action of the trigger, i.e. \u0026ldquo;execute the cmd as an input\u0026rdquo;;  Query Output swctl service ls this is a project-specific step, different project may use different tools to query the actual output, for SkyWalking, it uses swctl to query the actual output.\nVerify e2e verify --actual=actual.data.yaml --expected=expected.data.yaml e2e verify --query=\u0026#34;swctl service ls\u0026#34; --expected=expected.data.yaml e2e verify # If configuration file e2e.yaml is present   --actual: the actual data file, only YAML file format is supported;\n  --expected: the expected data file, only YAML file format is supported;\n  --query: the query to get the actual data, the query result must have the same format as --actual and --expected;\n The --query option will get the output into a temporary file and use the --actual under the hood;\n   Cleanup e2e cleanup --env=compose --file=docker-compose.yaml e2e cleanup --env=kind --file=kind.yaml --resources=bookinfo.yaml,gateway.yaml e2e cleanup # If configuration file e2e.yaml is present This step requires the same options in the setup step so that it can clean up all things necessarily.\nSummarize To summarize, the directory structure of a test case might be\ncase-name ├── agent-service # optional, an arbitrary project that is used in the docker-compose.yaml if needed │ ├── Dockerfile │ ├── pom.xml │ └── src ├── docker-compose.yaml ├── e2e.yaml # see a sample below └── testdata ├── expected.endpoints.service1.yaml ├── expected.endpoints.service2.yaml └── expected.services.yaml or\ncase-name ├── kind.yaml ├── bookinfo │ ├── bookinfo.yaml │ └── bookinfo-gateway.yaml ├── e2e.yaml # see a sample below └── testdata ├── expected.endpoints.service1.yaml ├── expected.endpoints.service2.yaml └── expected.services.yaml a sample of e2e.yaml may be\nsetup:env:kindfile:kind.yamlmanifests:- path:bookinfo.yamlwait:# you can have multiple conditions to wait- namespace:bookinfolabel-selector:app=productfor:deployment/available- namespace:reviewslabel-selector:app=productfor:deployment/available- namespace:ratingslabel-selector:app=productfor:deployment/availablerun:- command:|# it can be a shell script or anything executableistioctl install --profile=demo -ykubectl label namespace default istio-injection=enabledwait:- namespace:istio-systemlabel-selector:app=istiodfor:deployment/available# OR# env: compose# file: docker-compose.yamltrigger:action:httpinterval:3stimes:0url:localhost:9090/usersverify:- query:swctl service lsexpected:expected.services.yaml- query:swctl endpoint ls --service=\u0026#34;YnVzaW5lc3Mtem9uZTo6cHJvamVjdEM=.1\u0026#34;expected:expected.projectC.endpoints.yamlthen a single command should do the trick.\ne2e run Modules This project is divided into the following modules.\nController A controller command (e2e run) composes all the steps declared in the e2e.yaml, it should be progressive and clearly display which step is currently running. If it failed in a step, the error message should be as much comprehensive as possible. An example of the output might be\ne2e run ✔ Started Kind Cluster - Cluster Name ✔ Checked Pods Readiness - All pods are ready ? Generating Traffic - http localhost:9090/users (progress spinner) ✔ Verified Output - service ls (progress spinner) Verifying Output - endpoint ls ✘ Failed to Verify Output Data - endpoint ls \u0026lt;the diff content\u0026gt; ✔ Clean Up Compared with running the steps one by one, the controller is also responsible for cleaning up env (by executing cleanup command) no mater what status other commands are, even if they are failed, the controller has the following semantics in terms of setup and cleanup.\n// Java try { setup(); // trigger step // verify step // ... } finally { cleanup(); } // GoLang func run() { setup(); defer cleanup(); // trigger step // verify step // ... } Initializer The initializer is responsible for\n  When env==compose\n Start the docker-compose services; Check the services' healthiness; Wait until all services are ready according to the interval, etc.;    When env==kind\n Start the KinD cluster according to the config files; Apply the resources files (--manifests) or/and run the custom init command (--commands); Check the pods' readiness; Wait until all pods are ready according to the interval, etc.;    Verifier According to scenarios we have at the moment, the must-have features are:\n  Matchers\n Exact match Not null Not empty Greater than 0 Regexp match At least one of list element match    Functions\n Base64 encode/decode    in order to help to identify simple bugs from the GitHub Actions workflow, there are some \u0026ldquo;nice to have\u0026rdquo; features:\n Printing the diff content when verification failed is a super helpful bonus proved in the Python agent repo;  Logging When a test case failed, all the necessary logs should be collected into a dedicated directory, which could be uploaded to the GitHub Artifacts for downloading and analysis;\nLogs through the entire process of a test case are:\n KinD clusters logs; Containers/pods logs; The logs from the NGE2E itself;  More Planned Debugging Debugging the E2E locally has been a strong requirement and time killer that we haven\u0026rsquo;t solve up to date, though we have enhancements like https://github.com/apache/skywalking/pull/5198 , but in this framework, we will adopt a new method to \u0026ldquo;really\u0026rdquo; support debugging locally.\nThe most common case when debugging is to run the E2E tests, with one or more services forwarded into the host machine, where the services are run in the IDE or in debug mode.\nFor example, you may run the SkyWalking OAP server in an IDE and run e2e run, expecting the other services (e.g. agent services, SkyWalking WebUI, etc.) inside the containers to connect to your local OAP, instead of the one declared in docker-compose.yaml.\nFor Docker Desktop Mac/Windows, we can access the services running on the host machine inside containers via host.docker.internal, for Linux, it\u0026rsquo;s 172.17.0.1.\nOne possible solution is to add an option --debug-services=oap,other-service-name that rewrites all the router rules inside the containers from oap to host.docker.internal/172.17.0.1.\nCodeGen When adding new test case, a code generator would be of great value to eliminate the repeated labor and copy-pasting issues.\ne2e new \u0026lt;case-name\u0026gt; ","title":"[Design] NGE2E - Next Generation End-to-End Testing Framework","url":"/blog/e2e-design/"},{"content":"这篇文章暂时不讲告警策略, 直接看默认情况下激活的告警目标以及钉钉上的告警效果\nSkyWalking内置了很多默认的告警策略, 然后根据告警策略生成告警目标, 我们可以很容易的在界面上看到\n当我们想去让这些告警目标通知到我们时, 由于SkyWalking目前版本(8.3)已经自带了, 只需要简单配置一下即可\n我们先来钉钉群中创建机器人并勾选加签\n然后再修改告警部分的配置文件, 如果你是默认的配置文件(就像我一样), 你可以直接执行以下命令, 反之你也可以手动修改configs/alarm-settings.yml文件\ntee \u0026lt;your_skywalking_path\u0026gt;/configs/alarm-settings.yml \u0026lt;\u0026lt;-'EOF' dingtalkHooks: textTemplate: |- { \u0026quot;msgtype\u0026quot;: \u0026quot;text\u0026quot;, \u0026quot;text\u0026quot;: { \u0026quot;content\u0026quot;: \u0026quot;Apache SkyWalking Alarm: \\n %s.\u0026quot; } } webhooks: - url: https://oapi.dingtalk.com/robot/send?access_token=\u0026lt;access_token\u0026gt; secret: \u0026lt;加签值\u0026gt; EOF 最终效果如下\n参考文档:\nhttps://github.com/apache/skywalking/blob/master/docs/en/setup/backend/backend-alarm.md\nhttps://ding-doc.dingtalk.com/doc#/serverapi2/qf2nxq/uKPlK\n谢谢观看, 后续我会在SkyWalking告警这块写更多实战文章\n","title":"SkyWalking报警发送到钉钉群","url":"/zh/2020-12-13-skywalking-alarm/"},{"content":"Gui Cao began the code contributions since May 3, 2020. In the past 6 months, his 23 pull requests(GitHub, zifeihan[1]) have been accepted, which includes 5k+ lines of codes.\nMeanwhile, he took part in the tech discussion, and show the interests to contribute more to the project.\nAt Dec. 4th, 2020, the project management committee(PMC) passed the proposal of promoting him as a new committer. He has accepted the invitation at the same day.\nWelcome Gui Cao join the committer team.\n[1] https://github.com/apache/skywalking/commits?author=zifeihan\n","title":"Welcome Gui Cao as new committer","url":"/events/welcome-gui-cao-as-new-committer/"},{"content":" Author: Zhenxu Ke, Sheng Wu, and Tevah Platt. tetrate.io Original link, Tetrate.io blog Dec. 03th, 2020  Apache SkyWalking: an APM (application performance monitor) system, especially designed for microservices, cloud native, and container-based (Docker, Kubernetes, Mesos) architectures.\nEnvoy Access Log Service: Access Log Service (ALS) is an Envoy extension that emits detailed access logs of all requests going through Envoy.\nBackground Apache SkyWalking has long supported observability in service mesh with Istio Mixer adapter. But since v1.5, Istio began to deprecate Mixer due to its poor performance in large scale clusters. Mixer’s functionalities have been moved into the Envoy proxies, and is supported only through the 1.7 Istio release. On the other hand, Sheng Wu and Lizan Zhou presented a better solution based on the Apache SkyWalking and Envoy ALS on KubeCon China 2019, to reduce the performance impact brought by Mixer, while retaining the same observability in service mesh. This solution was initially implemented by Sheng Wu, Hongtao Gao, Lizan Zhou, and Dhi Aurrahman at Tetrate.io. If you are looking for a more efficient solution to observe your service mesh instead of using a Mixer-based solution, this is exactly what you need. In this tutorial, we will explain a little bit how the new solution works, and apply it to the bookinfo application in practice.\nHow it works From a perspective of observability, Envoy can be typically deployed in 2 modes, sidecar, and router. As a sidecar, Envoy mostly represents a single service to receive and send requests (2 and 3 in the picture below). While as a proxy, Envoy may represent many services (1 in the picture below).\nIn both modes, the logs emitted by ALS include a node identifier. The identifier starts with router~ (or ingress~) in router mode and sidecar~ in sidecar proxy mode.\nApart from the node identifier, there are several noteworthy properties in the access logs that will be used in this solution:\n  downstream_direct_remote_address: This field is the downstream direct remote address on which the request from the user was received. Note: This is always the physical peer, even if the remote address is inferred from for example the x-forwarded-for header, proxy protocol, etc.\n  downstream_remote_address: The remote/origin address on which the request from the user was received.\n  downstream_local_address: The local/destination address on which the request from the user was received.\n  upstream_remote_address: The upstream remote/destination address that handles this exchange.\n  upstream_local_address: The upstream local/origin address that handles this exchange.\n  upstream_cluster: The upstream cluster that upstream_remote_address belongs to.\n  We will discuss more about the properties in the following sections.\nSidecar When serving as a sidecar, Envoy is deployed alongside a service, and delegates all the incoming/outgoing requests to/from the service.\n  Delegating incoming requests: in this case, Envoy acts as a server side sidecar, and sets the upstream_cluster in form of inbound|portNumber|portName|Hostname[or]SidecarScopeID.\nThe SkyWalking analyzer checks whether either downstream_remote_address can be mapped to a Kubernetes service:\na. If there is a service (say Service B) whose implementation is running in this IP(and port), then we have a service-to-service relation, Service B -\u0026gt; Service A, which can be used to build the topology. Together with the start_time and duration fields in the access log, we have the latency metrics now.\nb. If there is no service that can be mapped to downstream_remote_address, then the request may come from a service out of the mesh. Since SkyWalking cannot identify the source service where the requests come from, it simply generates the metrics without source service, according to the topology analysis method. The topology can be built as accurately as possible, and the metrics detected from server side are still correct.\n  Delegating outgoing requests: in this case, Envoy acts as a client-side sidecar, and sets the upstream_cluster in form of outbound|\u0026lt;port\u0026gt;|\u0026lt;subset\u0026gt;|\u0026lt;serviceFQDN\u0026gt;.\nClient side detection is relatively simpler than (1. Delegating incoming requests). If upstream_remote_address is another sidecar or proxy, we simply get the mapped service name and generate the topology and metrics. Otherwise, we have no idea what it is and consider it an UNKNOWN service.\n  Proxy role When Envoy is deployed as a proxy, it is an independent service itself and doesn\u0026rsquo;t represent any other service like a sidecar does. Therefore, we can build client-side metrics as well as server-side metrics.\nExample In this section, we will use the typical bookinfo application to demonstrate how Apache SkyWalking 8.3.0+ (the latest version up to Nov. 30th, 2020) works together with Envoy ALS to observe a service mesh.\nInstalling Kubernetes SkyWalking 8.3.0 supports the Envoy ALS solution under both Kubernetes environment and virtual machines (VM) environment, in this tutorial, we’ll only focus on the Kubernetes scenario, for VM solution, please stay tuned for our next blog, so we need to install Kubernetes before taking further steps.\nIn this tutorial, we will use the Minikube tool to quickly set up a local Kubernetes(v1.17) cluster for testing. In order to run all the needed components, including the bookinfo application, the SkyWalking OAP and WebUI, the cluster may need up to 4GB RAM and 2 CPU cores.\nminikube start --memory=4096 --cpus=2 Next, run kubectl get pods --namespace=kube-system --watch to check whether all the Kubernetes components are ready. If not, wait for the readiness before going on.\nInstalling Istio Istio provides a very convenient way to configure the Envoy proxy and enable the access log service. The built-in configuration profiles free us from lots of manual operations. So, for demonstration purposes, we will use Istio through this tutorial.\nexport ISTIO_VERSION=1.7.1 curl -L https://istio.io/downloadIstio | sh - sudo mv $PWD/istio-$ISTIO_VERSION/bin/istioctl /usr/local/bin/ istioctl install --set profile=demo kubectl label namespace default istio-injection=enabled Run kubectl get pods --namespace=istio-system --watch to check whether all the Istio components are ready. If not, wait for the readiness before going on.\nEnabling ALS The demo profile doesn’t enable ALS by default. We need to reconfigure it to enable ALS via some configuration.\nistioctl manifest install \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-oap.istio-system:11800 The example command --set meshConfig.enableEnvoyAccessLogService=true enables the Envoy access log service in the mesh. And as we said earlier, ALS is essentially a gRPC service that emits requests logs. The config meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-oap.istio-system:11800 tells this gRPC service where to emit the logs, say skywalking-oap.istio-system:11800, where we will deploy the SkyWalking ALS receiver later.\nNOTE: You can also enable the ALS when installing Istio so that you don’t need to restart Istio after installation:\nistioctl install --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-oap.istio-system:11800 kubectl label namespace default istio-injection=enabled Deploying Apache SkyWalking The SkyWalking community provides a Helm Chart to make it easier to deploy SkyWalking and its dependent services in Kubernetes. The Helm Chart can be found at the GitHub repository.\n# Install Helm curl -sSLO https://get.helm.sh/helm-v3.0.0-linux-amd64.tar.gz sudo tar xz -C /usr/local/bin --strip-components=1 linux-amd64/helm -f helm-v3.0.0-linux-amd64.tar.gz # Clone SkyWalking Helm Chart git clone https://github.com/apache/skywalking-kubernetes cd skywalking-kubernetes/chart git reset --hard dd749f25913830c47a97430618cefc4167612e75 # Update dependencies helm dep up skywalking # Deploy SkyWalking helm -n istio-system install skywalking skywalking \\  --set oap.storageType=\u0026#39;h2\u0026#39;\\  --set ui.image.tag=8.3.0 \\  --set oap.image.tag=8.3.0-es7 \\  --set oap.replicas=1 \\  --set oap.env.SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=k8s-mesh \\  --set oap.env.JAVA_OPTS=\u0026#39;-Dmode=\u0026#39; \\  --set oap.envoy.als.enabled=true \\  --set elasticsearch.enabled=false We deploy SkyWalking to the namespace istio-system, so that SkyWalking OAP service can be accessed by skywalking-oap.istio-system:11800, to which we told ALS to emit their logs, in the previous step.\nWe also enable the ALS analyzer in the SkyWalking OAP: oap.env.SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=k8s-mesh. The analyzer parses the access logs and maps the IP addresses in the logs to the real service names in the Kubernetes, to build a topology.\nIn order to retrieve the metadata (such as Pod IP and service names) from a Kubernetes cluster for IP mappings, we also set oap.envoy.als.enabled=true, to apply for a ClusterRole that has access to the metadata.\nexport POD_NAME=$(kubectl get pods -A -l \u0026#34;app=skywalking,release=skywalking,component=ui\u0026#34; -o name) echo $POD_NAME kubectl -n istio-system port-forward $POD_NAME 8080:8080 Now navigate your browser to http://localhost:8080 . You should be able to see the SkyWalking dashboard. The dashboard is empty for now, but after we deploy the demo application and generate traffic, it should be filled up later.\nDeploying Bookinfo application Run:\nexport ISTIO_VERSION=1.7.1 kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/platform/kube/bookinfo.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/bookinfo-gateway.yaml kubectl wait --for=condition=Ready pods --all --timeout=1200s minikube tunnel Then navigate your browser to http://localhost/productpage. You should be able to see the typical bookinfo application. Refresh the webpage several times to generate enough access logs.\nDone! And you’re all done! Check out the SkyWalking WebUI again. You should see the topology of the bookinfo application, as well the metrics of each individual service of the bookinfo application.\nTroubleshooting  Check all pods status: kubectl get pods -A. SkyWalking OAP logs: kubectl -n istio-system logs -f $(kubectl get pod -A -l \u0026quot;app=skywalking,release=skywalking,component=oap\u0026quot; -o name). SkyWalking WebUI logs: kubectl -n istio-system logs -f $(kubectl get pod -A -l \u0026quot;app=skywalking,release=skywalking,component=ui\u0026quot; -o name). Make sure the time zone at the bottom-right of the WebUI is set to UTC +0.  Customizing Service Names The SkyWalking community brought more improvements to the ALS solution in the 8.3.0 version. You can decide how to compose the service names when mapping from the IP addresses, with variables service and pod. For instance, configuring K8S_SERVICE_NAME_RULE to the expression ${service.metadata.name}-${pod.metadata.labels.version} gets service names with version label such as reviews-v1, reviews-v2, and reviews-v3, instead of a single service reviews, see the PR.\nWorking ALS with VM Kubernetes is popular, but what about VMs? From what we discussed above, in order to map the IPs to services, SkyWalking needs access to the Kubernetes cluster, fetching service metadata and Pod IPs. But in a VM environment, there is no source from which we can fetch those metadata. In the next post, we will introduce another ALS analyzer based on the Envoy metadata exchange mechanism. With this analyzer, you are able to observe a service mesh in the VM environment. Stay tuned! If you want to have commercial support for the ALS solution or hybrid mesh observability, Tetrate Service Bridge, TSB is another good option out there.\nAdditional Resources  KubeCon 2019 Recorded Video. Get more SkyWalking updates on the official website.  Apache SkyWalking founder Sheng Wu, SkyWalking core maintainer Zhenxu Ke are Tetrate engineers, and Tevah Platt is a content writer for Tetrate. Tetrate helps organizations adopt open source service mesh tools, including Istio, Envoy, and Apache SkyWalking, so they can manage microservices, run service mesh on any infrastructure, and modernize their applications.\n","title":"Observe Service Mesh with SkyWalking and Envoy Access Log Service","url":"/blog/2020-12-03-obs-service-mesh-with-sw-and-als/"},{"content":" 如果你正在寻找在 Mixer 方案以外观察服务网格的更优解,本文正符合你的需要。\n Apache Skywalking︰特别为微服务、云原生和容器化(Docker、Kubernetes、Mesos)架构而设计的 APM(应用性能监控)系统。\nEnvoy 访问日志服务︰访问日志服务(ALS)是 Envoy 的扩展组件,会将所有通过 Envoy 的请求的详细访问日志发送出来。\n背景 Apache SkyWalking 一直通过 Istio Mixer 的适配器,支持服务网格的可观察性。不过自从 v1.5 版本,由于 Mixer 在大型集群中差强人意的表现,Istio 开始弃用 Mixer。Mixer 的功能现已迁至 Envoy 代理,并获 Istio 1.7 版本支持。\n在去年的中国 KubeCon 中,吴晟和周礼赞基于 Apache SkyWalking 和 Envoy ALS,发布了新的方案:不再受制于 Mixer 带来的性能影响,也同时保持服务网格中同等的可观察性。这个方案最初是由吴晟、高洪涛、周礼赞和 Dhi Aurrahman 在 Tetrate.io 实现的。\n如果你正在寻找在 Mixer 方案之外,为你的服务网格进行观察的最优解,本文正是你当前所需的。在这个教程中,我们会解释此方案的运作逻辑,并将它实践到 bookinfo 应用上。\n运作逻辑 从可观察性的角度来说,Envoy 一般有两种部署模式︰Sidecar 和路由模式。 Envoy 代理可以代表多项服务(见下图之 1),或者当它作为 Sidecar 时,一般是代表接收和发送请求的单项服务(下图之 2 和 3)。\n在两种模式中,ALS 发放的日志都会带有一个节点标记符。该标记符在路由模式时,以 router~ (或 ingress~)开头,而在 Sidecar 代理模式时,则以 sidecar~ 开头。\n除了节点标记符之外,这个方案[1]所采用的访问日志也有几个值得一提的字段︰\ndownstream_direct_remote_address︰此字段是下游的直接远程地址,用作接收来自用户的请求。注意︰它永远是对端实体的地址,即使远程地址是从 x-forwarded-for header、代理协议等推断出来的。\ndownstream_remote_address︰远程或原始地址,用作接收来自用户的请求。\ndownstream_local_address︰本地或目标地址,用作接收来自用户的请求。\nupstream_remote_address︰上游的远程或目标地址,用作处理本次交换。\nupstream_local_address︰上游的本地或原始地址,用作处理本次交换。\nupstream_cluster︰upstream_remote_address 所属的上游集群。\n我们会在下面详细讲解各个字段。\nSidecar 当 Envoy 作为 Sidecar 的时候,会搭配服务一起部署,并代理来往服务的传入或传出请求。\n  代理传入请求︰在此情况下,Envoy 会作为服务器端的 Sidecar,以 inbound|portNumber|portName|Hostname[or]SidecarScopeID 格式设定 upstream_cluster。\nSkyWalking 分析器会检查 downstream_remote_address 是否能够找到对应的 Kubernetes 服务。\n如果在此 IP(和端口)中有一个服务(例如服务 B)正在运行,那我们就会建立起服务对服务的关系(即服务 B → 服务 A),帮助建立拓扑。再配合访问日志中的 start_time 和 duration 两个字段,我们就可以获得延迟的指标数据了。\n如果没有任何服务可以和 downstream_remote_address 相对应,那请求就有可能来自网格以外的服务。由于 SkyWalking 无法识别请求的服务来源,在没有源服务的情况下,它简单地根据拓扑分析方法生成数据。拓扑依然可以准确地建立,而从服务器端侦测出来的指标数据也依然是正确的。\n  代理传出请求︰在此情况下,Envoy 会作为客户端的 Sidecar,以 outbound|\u0026lt;port\u0026gt;|\u0026lt;subset\u0026gt;|\u0026lt;serviceFQDN\u0026gt; 格式设定 upstream_cluster。\n客户端的侦测相对来说比代理传入请求容易。如果 upstream_remote_address 是另一个 Sidecar 或代理的话,我们只需要获得它相应的服务名称,便可生成拓扑和指标数据。否则,我们没有办法理解它,只能把它当作 UNKNOWN 服务。\n  代理角色 当 Envoy 被部署为前端代理时,它是独立的服务,并不会像 Sidecar 一样,代表任何其他的服务。所以,我们可以建立客户端以及服务器端的指标数据。\n演示范例 在本章,我们会使用典型的 bookinfo 应用,来演示 Apache SkyWalking 8.3.0+ (截至 2020 年 11 月 30 日的最新版本)如何与 Envoy ALS 合作,联手观察服务网格。\n安装 Kubernetes 在 Kubernetes 和虚拟机器(VM)的环境下,SkyWalking 8.3.0 均支持 Envoy ALS 的方案。在本教程中,我们只会演示在 Kubernetes 的情境,至于 VM 方案,请耐心期待我们下一篇文章。所以在进行下一步之前,我们需要先安装 Kubernetes。\n在本教程中,我们会使用 Minikube 工具来快速设立本地的 Kubernetes(v1.17 版本)集群用作测试。要运行所有必要组件,包括 bookinfo 应用、SkyWalking OAP 和 WebUI,集群需要动用至少 4GB 内存和 2 个 CPU 的核心。\nminikube start --memory=4096 --cpus=2 然后,运行 kubectl get pods --namespace=kube-system --watch,检查所有 Kubernetes 的组件是否已准备好。如果还没,在进行下一步前,请耐心等待准备就绪。\n安装 Istio Istio 为配置 Envoy 代理和实现访问日志服务提供了一个非常方便的方案。内建的配置设定档为我们省去了不少手动的操作。所以,考虑到演示的目的,我们会在本教程全程使用 Istio。\nexport ISTIO_VERSION=1.7.1 curl -L https://istio.io/downloadIstio | sh - sudo mv $PWD/istio-$ISTIO_VERSION/bin/istioctl /usr/local/bin/ istioctl install --set profile=demo kubectl label namespace default istio-injection=enabled 然后,运行 kubectl get pods --namespace=istio-system --watch,检查 Istio 的所有组件是否已准备好。如果还没,在进行下一步前,请耐心等待准备就绪。\n启动访问日志服务 演示的设定档没有预设启动 ALS,我们需要重新配置才能够启动 ALS。\nistioctl manifest install \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-oap.istio-system:11800 范例指令 --set meshConfig.enableEnvoyAccessLogService=true 会在网格中启动访问日志服务。正如之前提到,ALS 本质上是一个会发放请求日志的 gRPC 服务。配置 meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-oap.istio-system:11800 会告诉这个gRPC 服务往哪里发送日志,这里是往 skywalking-oap.istio-system:11800 发送,稍后我们会部署 SkyWalking ALS 接收器到这个地址。\n注意︰\n你也可以在安装 Istio 时启动 ALS,那就不需要在安装后重新启动 Istio︰\nistioctl install --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-oap.istio-system:11800 kubectl label namespace default istio-injection=enabled 部署 Apache SkyWalking SkyWalking 社区提供了 Helm Chart ,让你更轻易地在 Kubernetes 中部署 SkyWalking 以及其依赖服务。 Helm Chart 可以在 GitHub 仓库找到。\n# Install Helm curl -sSLO https://get.helm.sh/helm-v3.0.0-linux-amd64.tar.gz sudo tar xz -C /usr/local/bin --strip-components=1 linux-amd64/helm -f helm-v3.0.0-linux-amd64.tar.gz # Clone SkyWalking Helm Chart git clone https://github.com/apache/skywalking-kubernetes cd skywalking-kubernetes/chart git reset --hard dd749f25913830c47a97430618cefc4167612e75 # Update dependencies helm dep up skywalking # Deploy SkyWalking helm -n istio-system install skywalking skywalking \\  --set oap.storageType=\u0026#39;h2\u0026#39;\\  --set ui.image.tag=8.3.0 \\  --set oap.image.tag=8.3.0-es7 \\  --set oap.replicas=1 \\  --set oap.env.SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=k8s-mesh \\  --set oap.env.JAVA_OPTS=\u0026#39;-Dmode=\u0026#39; \\  --set oap.envoy.als.enabled=true \\  --set elasticsearch.enabled=false 我们在 istio-system 的命名空间内部署 SkyWalking,使 SkyWalking OAP 服务可以使用地址 skywalking-oap.istio-system:11800 访问,在上一步中,我们曾告诉过 ALS 应往此处发放它们的日志。\n我们也在 SkyWalking OAP 中启动 ALS 分析器︰oap.env.SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=k8s-mesh。分析器会对访问日志进行分析,并解析日志中的 IP 地址和 Kubernetes 中的真实服务名称,以建立拓扑。\n为了从 Kubernetes 集群处获取元数据(例如 Pod IP 和服务名称),以识别相应的 IP 地址,我们还会设定 oap.envoy.als.enabled=true,用来申请一个对元数据有访问权的 ClusterRole。\nexport POD_NAME=$(kubectl get pods -A -l \u0026#34;app=skywalking,release=skywalking,component=ui\u0026#34; -o name) echo $POD_NAME kubectl -n istio-system port-forward $POD_NAME 8080:8080 现在到你的浏览器上访问 http://localhost:8080。你应该会看到 SkyWalking 的 Dashboard。 Dashboard 现在应该是空的,但稍后部署应用和生成流量后,它就会被填满。\n部署 Bookinfo 应用 运行︰\nexport ISTIO_VERSION=1.7.1 kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/platform/kube/bookinfo.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/bookinfo-gateway.yaml kubectl wait --for=condition=Ready pods --all --timeout=1200s minikube tunnel 现在到你的浏览器上进入 http://localhost/productpage。你应该会看到典型的 bookinfo 应用画面。重新整理该页面几次,以生成足够的访问日志。\n完成了! 这样做,你就成功完成设置了!再查看 SkyWalking 的 WebUI,你应该会看到 bookinfo 应用的拓扑,以及它每一个单独服务的指标数据。\n疑难解答  检查所有 pod 的状态︰kubectl get pods -A。 SkyWalking OAP 的日志︰kubectl -n istio-system logs -f $(kubectl get pod -A -l \u0026quot;app=skywalking,release=skywalking,component=oap\u0026quot; -o name)。 SkyWalking WebUI 的日志︰kubectl -n istio-system logs -f $(kubectl get pod -A -l \u0026quot;app=skywalking,release=skywalking,component=ui\u0026quot; -o name)。 确保 WebUI 右下方的时区设定在 UTC +0。  自定义服务器名称 SkyWalking 社区在 ALS 方案的 8.3.0 版本中,作出了许多改善。你现在可以在映射 IP 地址时,决定如何用 service 和 pod 变量去自定义服务器的名称。例如,将 K8S_SERVICE_NAME_RULE 设置为 ${service.metadata.name}-${pod.metadata.labels.version},就可以使服务名称带上版本的标签,类似 reviews-v1、reviews-v2 和 reviews- v3,而不再是单个服务 review[2]。\n在 VM 上使用 ALS Kubernetes 很受欢迎,可是 VM 呢?正如我们之前所说,为了替 IP 找到对应的服务,SkyWalking 需要对 Kubernetes 集群有访问权,以获得服务的元数据和 Pod 的 IP。可是在 VM 环境中,我们并没有来源去收集这些元数据。\n在下一篇文章,我们会介绍另外一个 ALS 分析器,它是建立于 Envoy 的元数据交换机制。有了这个分析器,你就可以在 VM 环境中观察服务网格了。万勿错过!\n如果你希望在 ALS 方案或是混合式网格可观察性上获得商业支持,TSB 会是一个好选项。\n额外资源\n KubeCon 2019 的录影视频。 在官方网站上获得更多有关 SkyWalking 的最新消息吧。  如有任何问题或反馈,发送邮件至 learn@tetrate.io。\nApache SkyWalking 创始人吴晟和 SkyWalking 的核心贡献者柯振旭都是 Tetrate 的工程师。 Tetrate 的内容创造者编辑与贡献于本文章。 Tetrate 帮助企业采用开源服务网格工具,包括 Istio、Envoy 和 Apache SkyWalking,让它们轻松管理微服务,在任何架构上运行服务网格,以至现代化他们的应用。\n[1]https://github.com/envoyproxy/envoy/blob/549164c42cae84b59154ca4c36009e408aa10b52/generated_api_shadow/envoy/data/accesslog/v2/accesslog.proto\n[2]https://github.com/apache/skywalking/pull/5722\n","title":"使用 SkyWalking 和 Envoy 访问日志服务对服务网格进行观察","url":"/zh/observe-service-mesh-with-skywalking-and-envoy-access-log-service/"},{"content":"SkyWalking 8.3.0 is released. Go to downloads page to find release tars.\nProject  Test: ElasticSearch version 7.0.0 and 7.9.3 as storage are E2E tested. Test: Bump up testcontainers version to work around the Docker bug on MacOS.  Java Agent  Support propagate the sending timestamp in MQ plugins to calculate the transfer latency in the async MQ scenarios. Support auto-tag with the fixed values propagated in the correlation context. Make HttpClient 3.x, 4.x, and HttpAsyncClient 3.x plugins to support collecting HTTP parameters. Make the Feign plugin to support Java 14 Make the okhttp3 plugin to support Java 14 Polish tracing context related codes. Add the plugin for async-http-client 2.x Fix NPE in the nutz plugin. Provide Apache Commons DBCP 2.x plugin. Add the plugin for mssql-jtds 1.x. Add the plugin for mssql-jdbc 6.x -\u0026gt; 9.x. Fix the default ignore mechanism isn\u0026rsquo;t accurate enough bug. Add the plugin for spring-kafka 1.3.x. Add the plugin for Apache CXF 3.x. Fix okhttp-3.x and async-http-client-2.x did not overwrite the old trace header.  OAP-Backend  Add the @SuperDataset annotation for BrowserErrorLog. Add the thread pool to the Kafka fetcher to increase the performance. Add contain and not contain OPS in OAL. Add Envoy ALS analyzer based on metadata exchange. Add listMetrics GraphQL query. Add group name into services of so11y and istio relevant metrics Support keeping collecting the slowly segments in the sampling mechanism. Support choose files to active the meter analyzer. Support nested class definition in the Service, ServiceInstance, Endpoint, ServiceRelation, and ServiceInstanceRelation sources. Support sideCar.internalErrorCode in the Service, ServiceInstance, Endpoint, ServiceRelation, and ServiceInstanceRelation sources. Improve Kubernetes service registry for ALS analysis. Add health checker for cluster management Support the service auto grouping. Support query service list by the group name. Improve the queryable tags generation. Remove the duplicated tags to reduce the storage payload. Fix the threads of the Kafka fetcher exit if some unexpected exceptions happen. Fix the excessive timeout period set by the kubernetes-client. Fix deadlock problem when using elasticsearch-client-7.0.0. Fix storage-jdbc isExists not set dbname. Fix searchService bug in the InfluxDB storage implementation. Fix CVE in the alarm module, when activating the dynamic configuration feature. Fix CVE in the endpoint grouping, when activating the dynamic configuration feature. Fix CVE in the uninstrumented gateways configs, when activating the dynamic configuration feature. Fix CVE in the Apdex threshold configs, when activating the dynamic configuration feature. Make the codes and doc consistent in sharding server and core server. Fix that chunked string is incorrect while the tag contains colon. Fix the incorrect dynamic configuration key bug of endpoint-name-grouping. Remove unused min date timebucket in jdbc deletehistory logical Fix \u0026ldquo;transaction too large error\u0026rdquo; when use TiDB as storage. Fix \u0026ldquo;index not found\u0026rdquo; in trace query when use ES7 storage. Add otel rules to ui template to observe Istio control plane. Remove istio mixer Support close influxdb batch write model. Check SAN in the ALS (m)TLS process.  UI  Fix incorrect label in radial chart in topology. Replace node-sass with dart-sass. Replace serviceFilter with serviceGroup Removed \u0026ldquo;Les Miserables\u0026rdquo; from radial chart in topology. Add the Promise dropdown option  Documentation  Add VNode FAQ doc. Add logic endpoint section in the agent setup doc. Adjust configuration names and system environment names of the sharing server module Tweak Istio metrics collection doc. Add otel receiver.  All issues and pull requests are here\n","title":"Release Apache SkyWalking APM 8.3.0","url":"/events/release-apache-skwaylking-apm-8-3-0/"},{"content":"Python 作为一门功能强大的编程语言,被广泛的应用于计算机行业之中; 在微服务系统架构盛行的今天,Python 以其丰富的软件生态和灵活的语言特性在服务端编程领域也占有重要的一席之地。 本次分享将阐述 Apache SkyWalking 在微服务架构中要解决的问题,展示如何使用 Apache SkyWalking 来近乎自动化地监控 Python 后端应用服务,并对 Apache SkyWalking 的 Python 语言探针的实现技术进行解读。\nB站视频地址\n","title":"[视频] PyCon China 2020 - Python 微服务应用性能监控","url":"/zh/2020-11-30-pycon/"},{"content":"SkyWalking CLI 0.5.0 is released. Go to downloads page to find release tars.\n  Features\n Use template files in yaml format instead Refactor metrics command to adopt metrics-v2 protocol Use goroutine to speed up dashboard global command Add metrics list command    Bug Fixes\n Add flags of instance, endpoint and normal for metrics command Fix the problem of unable to query database metrics    Chores\n Update release guide doc Add screenshots for use cases in README.md Introduce generated codes into codebase    ","title":"Release Apache SkyWalking CLI 0.5.0","url":"/events/release-apache-skywalking-cli-0-5-0/"},{"content":" Author: Jiapeng Liu. Baidu. skywalking-satellite: The Sidecar Project of Apache SkyWalking Nov. 25th, 2020  A lightweight collector/sidecar which can be deployed close to the target monitored system, to collect metrics, traces, and logs. It also provides advanced features, such as local cache, format transformation, and sampling.\nDesign Thinking Satellite is a 2 level system to collect observability data from other core systems. So, the core element of the design is to guarantee data stability during Pod startup all the way to Pod shutdown avoiding alarm loss. All modules are designed as plugins, and if you have other ideas, you can add them yourself.\nSLO  Single gatherer supports \u0026gt; 1000 ops (Based 0.5 Core,50M) At least once delivery.(Optional) Data stability: 99.999%.(Optional)  Because they are influenced by the choice of plugins, some items in SLO are optional.\nRole Satellite would be running as a Sidecar. Although Daemonset mode would take up fewer resources, it will cause more troubles to the forwarding of agents. So we also want to use Sidecar mode by reducing the costs. But Daemonset mode would be also supported in the future plan.\nCore Modules The Satellite has 3 core modules which are Gatherer, Processor, and Sender.\n The Gatherer module is responsible for fetching or receiving data and pushing the data to Queue. The Processor module is responsible for reading data from the queue and processing data by a series of filter chains. The Sender module is responsible for async processing and forwarding the data to the external services in the batch mode. After sending success, Sender would also acknowledge the offset of Queue in Gatherer.  Detailed Structure The overall design is shown in detail in the figure below. We will explain the specific components one by one.\nGatherer Concepts The Gatherer has 4 components to support the data collection, which are Input, Collector, Worker, and Queue. There are 2 roles in the Worker, which are Fetcher and Receiver.\n The Input is an abstraction of the input source, which is usually mapped to a configuration file. The Collector is created by the Source, but many collectors could be created by the same Source. For example, when a log path has been configured as the /var/*.log in an Input, the number of collectors is the same as the file number in this path. The Fetcher and Receiver is the real worker to collect data. The receiver interface is an abstraction, which has multiple implementations, such as gRPC receiver and HTTP receiver.Here are some specific use cases:  Trace Receiver is a gRPC server for receiving trace data created by Skywalking agents. Log Receiver is also a gRPC server for receiving log data which is collected by Skywalking agents. (In the future we want Skywalking Agent to support log sending, and RPC-based log sending is more efficient and needs fewer resources than file reading. For example, the way of file reading will bring IO pressure and performance cost under multi-line splicing.) Log Fetcher is like Filebeat, which fits the common log collection scenario. This fetcher will have more responsibility than any other workers because it needs to record the offset and process the multi-line splicing. This feature will be implemented in the future. Prometheus Fetcher supports a new way to fetch Prometheus data and push the data to the upstream. \u0026hellip;\u0026hellip;   The Queue is a buffer module to decouple collection and transmission. In the 1st release version, we will use persistent storage to ensure data stability. But the implementation is a plug-in design that can support pure memory queues later.   The data flow We use the Trace Receiver as an example to introduce the data flow. Queue MmapQueue We have simplified the design of MmapQueue to reduce the resources cost on the memory and disk.\nConcepts There are 2 core concepts in MmapQueue.\n Segment: Segment is the real data store center, that provides large-space storage and does not reduce read and write performance as much as possible by using mmap. And we will avoid deleting files by reusing them. Meta: The purpose of meta is to find the data that the consumer needs.  Segment One MmapQueue has a directory to store the whole data. The Queue directory is made up with many segments and 1 meta file. The number of the segments would be computed by 2 params, which are the max cost of the Queue and the cost of each segment. For example, If the max cost is 512M and each segment cost is 256K, the directory can hold up to 2000 files. Once capacity is exceeded, an coverage policy is adopted that means the 2000th would override the first file.\nEach segment in Queue will be N times the size of the page cache and will be read and written in an appended sequence rather than randomly. These would improve the performance of Queue. For example, each Segment is a 128k file, as shown in the figure below.\nMeta The Meta is a mmap file that only contains 56Bit. There are 5 concepts in the Meta.\n Version: A version flag. Watermark Offset: Point to the current writing space.  ID: SegmentID Offset: The offset in Segment.   Writed Offset: Point to the latest refreshed data, that would be overridden by the write offset after period refresh.  ID: SegmentID Offset: The offset in Segment.   Reading Offset: Point to the current reading space.  ID: SegmentID Offset: The offset in Segment.   Committed Offset: Point to the latest committed offset , that is equal to the latest acked offset plus one.  ID: SegmentID Offset: The offset in Segment.    The following diagram illustrates the transformation process.\n The publisher receives data and wants to write to Queue.  The publisher would read Writing Offset to find a space and do plus one. After this, the publisher will write the data to the space.   The consumer wants to read the data from Queue.  The consumer would read Reading Offset to find the current read offset and do plus one. After this, the consumer will read the data from the space.   On period flush, the flusher would override Watermark Offset by using Writing Offset. When the ack operation is triggered, Committed Offset would plus the batch size in the ack batch. When facing crash, Writing Offset and Reading Offset would be overridden by Watermark Offset and Committed Offset. That is because the Reading Offset and Writing Offset cannot guarantee at least once delivery.  Mmap Performance Test The test is to verify the efficiency of mmap in low memory cost.\n The rate of data generation: 7.5K/item 1043 item/s (Based on Aifanfan online pod.) The test structure is based on Bigqueue because of similar structure. Test tool: Go Benchmark Test Command: go test -bench BenchmarkEnqueue -run=none -cpu=1 Result On Mac(15-inch, 2018,16 GB 2400 MHz DDR4, 2.2 GHz Intel Core i7 SSD):  BenchmarkEnqueue/ArenaSize-128KB/MessageSize-8KB/MaxMem-384KB 66501 21606 ns/op 68 B/op 1 allocs/op BenchmarkEnqueue/ArenaSize-128KB/MessageSize-8KB/MaxMem-1.25MB 72348 16649 ns/op 67 B/op 1 allocs/op BenchmarkEnqueue/ArenaSize-128KB/MessageSize-16KB/MaxMem-1.25MB 39996 33199 ns/op 103 B/op 1 allocs/op   Result On Linux(INTEL Xeon E5-2450 V2 8C 2.5GHZ2,INVENTEC PC3L-10600 16G8,INVENTEC SATA 4T 7.2K*8):  BenchmarkEnqueue/ArenaSize-128KB/MessageSize-8KB/MaxMem-384KB 126662\t12070 ns/op\t62 B/op\t1 allocs/op BenchmarkEnqueue/ArenaSize-128KB/MessageSize-8KB/MaxMem-1.25MB 127393\t12097 ns/op\t62 B/op\t1 allocs/op BenchmarkEnqueue/ArenaSize-128KB/MessageSize-16KB/MaxMem-1.25MB 63292\t23806 ns/op\t92 B/op\t1 allocs/op   Conclusion: Based on the above tests, mmap is both satisfied at the write speed and at little memory with very low consumption when running as a sidecar.  Processor The Processor has 3 core components, which are Consumer, Filter, and Context.\n The Consumer is created by the downstream Queue. The consumer has its own read offset and committed offset, which is similar to the offset concept of Spark Streaming. Due to the particularity of APM data preprocessing, Context is a unique concept in the Satellite filter chain, which supports storing the intermediate event because the intermediate state event also needs to be sent in sometimes. The Filter is the core data processing part, which is similar to the processor of beats. Due to the context, the upstream/downstream filters would be logically coupling.  Sender  BatchConverter decouples the Processor and Sender by staging the Buffer structure, providing parallelization. But if BatchBuffer is full, the downstream processors would be blocked. Follower is a real send worker that has a client, such as a gRPC client or Kafka client, and a fallback strategy. Fallback strategy is an interface, we can add more strategies to resolve the abnormal conditions, such as Instability in the network, upgrade the oap cluster. When sent success, Committed Offset in Queue would plus the number of this batch.  High Performance The scenario using Satellite is to collect a lot of APM data collection. We guarantee high performance by the following ways.\n Shorten transmission path, that means only join 2 components,which are Queue and Processor, between receiving and forwarding. High Performance Queue. MmapQueue provides a big, fast and persistent queue based on memory mapped file and ring structure. Processor maintains a linear design, that could be functional processed in one go-routine to avoid too much goroutines switching.  Stability Stability is a core point in Satellite. Stability can be considered in many ways, such as stable resources cost, stable running and crash recovery.\nStable resource cost In terms of resource cost, Memory and CPU should be a concern.\nIn the aspect of the CPU, we keep a sequence structure to avoid a large number of retries occurring when facing network congestion. And Satellite avoids keep pulling when the Queue is empty based on the offset design of Queue.\nIn the aspect of the Memory, we have guaranteed only one data caching in Satellite, that is Queue. For the queue structure, we also keep the size fixed based on the ring structure to maintain stable Memory cost. Also, MmapQueue is designed for minimizing memory consumption and providing persistence while keeping speed as fast as possible. Maybe supports some strategy to dynamically control the size of MmapQueue to process more extreme conditions in the future.\nStable running There are many cases of network congestion, such as the network problem on the host node, OAP cluster is under upgrating, and Kafka cluster is unstable. When facing the above cases, Follower would process fallback strategy and block the downstream processes. Once the failure strategy is finished, such that send success or give up this batch, the Follower would process the next batch.\nCrash Recovery The crash recovery only works when the user selects MmapQueue in Gatherer because of persistent file system design. When facing a crash, Reading Offset would be overridden by Committed Offset that ensure the at least once delivery. And Writed Offset would override Writing Offset that ensures the consumer always works properly and avoid encountering uncrossable defective data blocks.\nBuffer pool The Queue is to store fixed structure objects, object buffer pool would be efficient to reuse memory to avoid GC.\n ackChan batch convertor  Some metrics In Satellite, we should also collect its own monitoring metrics. The following metrics are necessary for Satellite.\n cpu memory go routine number gatherer_writing_offset gatherer_watermark_offset processor_reading_count sender_committed_offset sender_abandoned_count sender_retry_count  Input and Output We will reuse this diagram to explain the input and output.\n Input  Because the push-pull mode is both supported, Queue is a core component. Queue is designed to be a ring-shaped fixed capacity, that means the oldest data would be overridden by the latest data. If users find data loss, users should raise the ceiling of memory Queue. MmapQueue generally doesn\u0026rsquo;t face this problem unless the Sender transport is congested.   Ouput  If the BatchBuffer is full, the processor would be blocked. If the Channel is full, the downstream components would be blocked, such as BatchConvertor and Processor. When SenderWorker sends failure, the batch data would do a failure strategy that would block pulling data from the Channel. The strategy is a part of Sender,the operation mode is synchronous. Once the failure strategy is finished, such that send success or give up this batch, the Sendworker would keep pulling data from the Channel.    Questions How to avoid keep pulling when the Queue is empty? If Watermark Offset is less than or equal to Reading Offset, a signal would be sent to the consumer to avoid keep pulling.\nWhy reusing files in Queue? The unified model is a ring in Queue, that limits fixed resources cost in memory or disk.In Mmap Queue, reusing files turns the delete operations into an overwrite operations, effectively reducing the creation and deletion behavior in files.\nWhat are the strategies for file creation and deletion in MmapQueue? As Satellite running, the number of the files in MmapQueue would keep growing until up to the maximum capacity. After this, the old files will be overridden by the new data to avoid file deletion. When the Pod died, all resources were recycled.\n","title":"The first design of Satellite 0.1.0","url":"/blog/2020-11-25-skywalking-satellite-0.1.0-design/"},{"content":"SkyWalking Python 0.4.0 is released. Go to downloads page to find release tars.\n Feature: Support Kafka reporter protocol (#74) BugFix: Move generated packages into skywalking namespace to avoid conflicts (#72) BugFix: Agent cannot reconnect after server is down (#79) Test: Mitigate unsafe yaml loading (#76)  ","title":"Release Apache SkyWalking Python 0.4.0","url":"/events/release-apache-skywalking-python-0-4-0/"},{"content":"活动介绍 Apache SkyWalking 2020 开发者线下活动,社区创始人,PMC成员和Committer会亲临现场,和大家交流和分享项目中的使用经验。 以及邀请Apache Local Community 北京的成员一起分享Apache文化和Apache之道。\n日程安排 开场演讲 09:30-09:50 SkyWalking\u0026rsquo;s 2019-2020 and beyond\n吴晟,Tetrate.io创始工程师,Apache SkyWalking创始人\nB站视频地址\n 上午 09:55-10:30 贝壳全链路跟踪实践\n赵禹光,赵禹光,贝壳找房监控技术负责人,Apache SkyWalking PMC成员\n10:35-11:15 SkyWalking在百度爱番番部门实践\n刘嘉鹏,百度,SkyWalking contributor\n11:15-11:55 非计算机背景的同学如何贡献开源\n缘于一位本科在读的社会学系的同学的问题,这让我反思我们开源community的定位和Open的程度,于是,适兕从生产、分发、消费的软件供应的角度,根据涉及到的角色,然后再反观现代大学教育体系的专业,进一步对一个开源项目和community需要的专业背景多样性进行一个阐述和探究。并以ALC Beijing为例进行一个事例性的说明。\n适兕,开源布道师,ALC Beijing member,开源之道主创,开源社教育组成员。\nB站视频地址\n 下午 13:30-14:10 如何从 Apache SkyWalking 社区学习 Apache Way\n温铭,支流科技联合创始人&CEO,Apache APISIX 项目 VP, Apache SkyWalking Committer\n14:10-14:50 Apache SkyWalking 在小米公司的应用\n宋振东,小米公司小米信息技术部 skywalking 研发负责人\n14:50-15:30 Istio全生命周期监控\n高洪涛,Tetrate.io创始工程师,Apache SkyWalking PMC成员\n15:30-15:45 茶歇\n15:45-16:25 针对HikariCP数据库连接池的监控\n张鑫 Apache SkyWalking PMC 成员\n16:25-17:00 SkyWalking 与 Nginx 的优化实践\n王院生 深圳支流科技创始人兼 CTO,Apache APISIX 创始人 \u0026amp; PMC成员\nB站视频地址\n","title":"[视频] SkyWalking DevCon 2020","url":"/zh/2020-11-23-devcon/"},{"content":"The APM system provides the tracing or metrics for distributed systems or microservice architectures. Back to APM themselves, they always need backend storage to store the necessary massive data. What are the features required for backend storage? Simple, fewer dependencies, widely used query language, and the efficiency could be into your consideration. Based on that, traditional SQL databases (like MySQL) or NoSQL databases would be better choices. However, this topic will present another backend storage solution for the APM system viewing from NewSQL. Taking Apache Skywalking for instance, this talking will share how to make use of Apache ShardingSphere, a distributed database middleware ecosystem to extend the APM system\u0026rsquo;s storage capability.\nAs a senior DBA worked at JD.com, the responsibility is to develop the distributed database and middleware, and the automated management platform for database clusters. As a PMC of Apache ShardingSphere, I am willing to contribute to the OS community and explore the area of distributed databases and NewSQL.\n  ","title":"[Video] Another backend storage solution for the APM system","url":"/blog/2020-11-21-apachecon-obs-shardingsphere/"},{"content":"Apache APISIX is a cloud-native microservices API gateway, delivering the ultimate performance, security, open-source and scalable platform for all your APIs and microservices. Apache SkyWalking: an APM(application performance monitor) system, especially designed for microservices, cloud-native and container-based (Docker, Kubernetes, Mesos) architectures. Through the powerful plug-in mechanism of Apache APISIX, Apache Skywalking is quickly supported, so that we can see the complete life cycle of requests from the edge to the internal service. Monitor and manage each request in a visual way, and improve the observability of the service.\n  ","title":"[Video] Improve Apache APISIX observability with Apache SkyWalking","url":"/blog/2020-11-21-apachecon-obs-apisix/"},{"content":"Today\u0026rsquo;s monitoring solutions are geared towards operational tasks, displaying behavior as time-series graphs inside dashboards and other abstractions. These abstractions are immensely useful but are largely designed for software operators, whose responsibilities require them to think in systems, rather than the underlying source code. This is problematic given that an ongoing trend of software development is the blurring boundaries between building and operating software. This trend makes it increasingly necessary for programming environments to not just support development-centric activities, but operation-centric activities as well. Such is the goal of the feedback-driven development approach. By combining IDE and APM technology, software developers can intuitively explore multiple dimensions of their software simultaneously with continuous feedback about their software from inception to production.\nBrandon Fergerson is an open-source software developer who does not regard himself as a specialist in the field of programming, but rather as someone who is a devoted admirer. He discovered the beauty of programming at a young age and views programming as an art and those who do it well to be artists. He has an affinity towards getting meta and combining that with admiration of programming, has found source code analysis to be exceptionally interesting. Lately, his primary focus involves researching and building AI-based pair programming technology.\n  ","title":"[Video] SourceMarker - Continuous Feedback for Developers","url":"/blog/2020-11-21-apachecon-obs-sourcemarker/"},{"content":"Over the past few years, and coupled with the growing adoption of microservices, distributed tracing has emerged as one of the most commonly used monitoring and troubleshooting methodologies. New tracing tools are increasingly being introduced, driving adoption even further. One of these tools is Apache SkyWalking, a popular open-source tracing, and APM platform. This talk explores the history of the SkyWalking storage module, shows the evolution of distributed tracing storage layers, from the traditional relational database to document-based search engine. I hope that this talk contributes to the understanding of history and also that it helps to clarify the different types of storage that are available to organizations today.\nHongtao Gao is the engineer of tetrate.io and the former Huawei Cloud expert. One of PMC members of Apache SkyWalking and participates in some popular open-source projects such as Apache ShardingSphere and Elastic-Job. He has an in-depth understanding of distributed databases, container scheduling, microservices, ServicMesh, and other technologies.\n  ","title":"[Video] The history of distributed tracing storage","url":"/blog/2020-11-21-apachecon-obs-storage/"},{"content":" 作者: 赵禹光 原文链接: 亲临百人盛况的Apache SkyWalking 2020 DevCon,看见了什么? 2020 年 10 月 29 日  活动现场 2020年11月14日Apache SkyWalking 2020 DevCon由贝壳找房和tetrate赞助,Apache SkyWalking、云原生、Apache APISIX、Apache Pulsar 和 ALC Beijing 五大社区合作,在贝壳找房一年级会议室盛大举行,本次活动主要面对Apache SkyWalking的使用者、开发者和潜在用户。线上线下共有230多人报名。经统计,实际参加活动人数超过130人,近60%的人愿意抽出自己的休息时间,来交流学习Apache SkyWalking和开源文化。不难看见,在可预见的未来,中国的开源项目很快将进入下一个维度,那必定是更广的社区人员参与,更高技术知识体现,更强的线上稳定性和及时修复能力。\n活动历程: 09:30-09:50 SkyWalking\u0026rsquo;s 2019-2020 and beyond 吴晟老师本次分享:回顾2020年度SkyWalking发布的重要的新特性,出版的《Apache SkyWalking实战》图书,社区的进展,开源爱好者如何参与SkyWalking建设,和已知社区在主导的SkyWalking2021年孵化中的新特性。\n09:55-10:30 贝壳全链路跟踪实践 赵禹光老师(作者)本次分享:回顾了贝壳找房2018年至今,贝壳找房的全链路跟踪项目与SkyWalking的渊源,分享了SkyWalking在实践中遇到的问题,和解决方案。以及SkyWalking近10%的Committer都曾经或正在贝壳人店平台签中研发部,工作过的趣事。\n10:35-11:15 刘嘉鹏老师分享 SkyWalking在百度爱番番部门实践 刘嘉鹏老师本次分享:回顾了百度爱番番部门在使用SkyWalking的发展历程\u0026amp;现状,CRM SAAS产品在近1年使用SkyWalking实践经验,以及如何参与SkyWalking的贡献,并成为的Apache Committer。\n11:15-11:55 适兕老师分享 非计算机背景的同学如何贡献开源 适兕是国内很有名的开源布道师,本次分享从生产、分发、消费的软件供应的角度,根据涉及到的角色,然后再反观现代大学教育体系的专业,进一步对一个开源项目和community需要的专业背景多样性进行一个阐述和探究。并以ALC Beijing为例进行一个事例性的说明,非计算机背景的同学如何贡献开源。\n13:30-14:10 如何从 Apache SkyWalking 社区学习 Apache Way 14:10-14:50 Apache SkyWalking 在小米公司的应用 宋振东老师是小米信息技术部分布式链路追踪系统研发负责人,分别以小米公司,业务开发、架构师、SRE、Leader和QA等多个视角,回顾了SkyWalking在小米公司的应用实践。从APM的产品选型到实际落地,对其他公司准备使用SkyWalking落地,非常有借鉴意义。\n14:50-15:30 Istio全生命周期监控 高洪涛老师本次分享了SkyWalking和可观测云原生等非常前沿的知识布道,其中有,云原生在Logging、Metrics和Tracing的相关知识,Istio,K8S等方面的实践。对一些公司在前沿技术的落地,非常有借鉴意义。\n15:45-16:25 针对HikariCP数据库连接池的监控 张鑫老师本次分享了,以一个SkyWalking无法Tracing的实际线上故障的故事出发,讲述如何定位,和补充SkyWalking插件的不足,并将最后的实践贡献到社区。对大家参与开源很有帮助。\n16:25-17:00 SkyWalking 与 Nginx 的优化实践 王院生老师本次分享SkyWalking社区和APISIX社区合作,在Nginx插件的实践过程,对社区之间的如何开展合作,非常有借鉴意义,院生老师的工作\u0026amp;开源态度,很好的诠释Geek精神,也是我们互联网从业者需要学习恪守的。\nApache SkyWalking 2020 DevCon 讲师PPT Apache SkyWalking 2020 DevCon 讲师 PPT\nSkyWalking 后续发展计划 正如吴晟老师所说:No plan, open to the community,Apache SkyWalking是没有RoadMap。社区的后续发展,依赖于每个人在社区的贡献。与其期待,不如大胆设想,将自己的设计按照Apache Way贡献到SkyWalking,你就是下一个Apache SkyWalking Commiter,加入Member of SkyWalking大家庭,让社区因为你,而更加有活力。\n","title":"亲临百人盛况的Apache SkyWalking 2020 DevCon,看见了什么?","url":"/zh/2020-11-21-what-do-we-see-at-the-apache-skywalking-2020-devcon-event/"},{"content":"Sheng Wu is a founding engineer at tetrate.io, leads the observability for service mesh and hybrid cloud. A searcher, evangelist, and developer in the observability, distributed tracing, and APM. He is a member of the Apache Software Foundation. Love open source software and culture. Created the Apache SkyWalking project and being its VP and PMC member. Co-founder and PMC member of Apache ShardingSphere. Also as a PMC member of Apache Incubator and APISIX. He is awarded as Microsoft MVP, Alibaba Cloud MVP, Tencent Cloud TVP.\nIn the Apache FY2020 report, China is on the top of the download statistics. More China initiated projects joined the incubator, and some of them graduated as the Apache TLP. Sheng joined the Apache community since 2017, in the past 3 years, he witnessed the growth of the open-source culture and Apache way in China. Many developers have joined the ASF as new contributors, committers, foundation members. Chinese enterprises and companies paid more attention to open source contributions, rather than simply using the project like before. In the keynote, he would share the progress about China embracing the Apache culture, and willing of enhancing the whole Apache community.\n  ","title":"[Video] Apache grows in China","url":"/blog/2020-11-21-apachecon-keynote/"},{"content":"SkyWalking Client JS 0.2.0 is released. Go to downloads page to find release tars.\n Bug Fixes  Fixed a bug in sslTime calculate. Fixed a bug in server response status judgment.    ","title":"Release Apache SkyWalking Client JS 0.2.0","url":"/events/release-apache-skywalking-client-js-0-2-0/"},{"content":"SkyWalking Cloud on Kubernetes 0.1.0 is released. Go to downloads page to find release tars.\n Add OAPServer CRDs and controller.  ","title":"Release Apache SkyWalking Cloud on Kubernetes 0.1.0","url":"/events/release-apache-skywalking-cloud-on-kubernetes-0.1.0/"},{"content":"Based on his continuous contributions, Jiapeng Liu (a.k.a evanljp) has been voted as a new committer.\n","title":"Welcome Jiapeng Liu as new committer","url":"/events/welcome-jiapeng-liu-as-new-committer/"},{"content":"SkyWalking Kubernetes Helm Chart 4.0.0 is released. Go to downloads page to find release tars.\n Allow overriding configurations files under /skywalking/config. Unify the usages of different SkyWalking versions. Add Values for init container in case of using private regestry. Add services, endpoints resources in ClusterRole.  ","title":"Release Apache SkyWalking Kubernetes Helm Chart 4.0.0","url":"/events/release-apache-skywalking-kubernetes-helm-chart-4.0.0/"},{"content":"SkyWalking Client JS 0.1.0 is released. Go to downloads page to find release tars.\n Support Browser Side Monitoring. Require SkyWalking APM 8.2+.  ","title":"Release Apache SkyWalking Client JS 0.1.0","url":"/events/release-apache-skywalking-client-js-0-1-0/"},{"content":" Author: Zhenxu Ke, Sheng Wu, Hongtao Gao, and Tevah Platt. tetrate.io Original link, Tetrate.io blog Oct. 29th, 2020  Apache SkyWalking, the observability platform, and open-source application performance monitor (APM) project, today announced the general availability of its 8.2 release. The release extends Apache SkyWalking’s functionalities and monitoring boundary to the browser side.\nBackground SkyWalking is an observability platform and APM tool that works with or without a service mesh, providing automatic instrumentation for microservices, cloud-native and container-based applications. The top-level Apache project is supported by a global community and is used by Alibaba, Huawei, Tencent, Baidu, ByteDance, and scores of others.\nBrowser side monitoring APM helps SRE and Engineering teams to diagnose system failures, or optimize the systems before they become intolerably slow. But is it enough to always make the users happy?\nIn 8.2.0, SkyWalking extends its monitoring boundary to the browser side, e.g., Chrome, or the network between Chrome and the backend service, or the codes running in the browser. With this, not only can we monitor the backend services and requests sent by the browser as usual, but also the front end rendering speed, error logs, etc., which are the most efficient metrics for capturing the experiences of our end users. (This does not currently extend to IoT devices, but this feature moves SkyWalking a step in that direction).\nWhat\u0026rsquo;s more, SkyWalking browser monitoring also provides data about how the users use products, such as PV(page views), UV(unique visitors), top N PV(page views), etc., which can give a product team clues for optimizing their products.\nQuery traces by tags In SkyWalking\u0026rsquo;s Span data model, there are many important fields that are already indexed and can be queried by the users, but for the sake of performance, querying by Span tags was not supported until now. In SkyWalking 8.2.0, we allow users to query traces by specified tags, which is extremely useful. For example, SRE engineers running tests on the product environment can tag the synthetic traffic and query by this tag later.\nMeter Analysis Language In 8.2.0, the meter system provides a functional analysis language called MAL(Meter Analysis Language) that allows users to analyze and aggregate meter data in the OAP streaming system. The result of an expression can be ingested by either the agent analyzer or OpenTelemetry/Prometheus analyzer.\nComposite Alert Rules Alerting is a good way to discover system failures in time. A common problem is that we configure too many triggers just to avoid missing any possible issue. Nobody likes to be woken up by alert messages at midnight, only to find out that the trigger is too sensitive. These kinds of alerts become noisy and don\u0026rsquo;t help at all.\nIn 8.2.0, users can now configure composite alert rules, where composite rules take multiple metrics dimensions into account. With composite alert rules, we can leverage as many metrics as needed to more accurately determine whether there’s a real problem or just an occasional glitch.\nCommon scenarios like successful rate \u0026lt; 90% but there are only 1~2 requests can now be resolved by a composite rule, such as traffic(calls per minute) \u0026gt; n \u0026amp;\u0026amp; successful rate \u0026lt; m%.\nOther Notable Enhancements  The agent toolkit exposes some APIs for users to send customizable metrics. The agent exclude_plugins allows you to exclude some plugins; mount enables you to load a new set of plugins. More than 10 new plugins have been contributed to the agent. The alert system natively supports sending alert messages to Slack, WeChat, DingTalk.  Additional Resources  Read more about the SkyWalking 8.2 release highlights. Get more SkyWalking updates on Twitter.  ","title":"Features in SkyWalking 8.2: Browser Side Monitoring; Query Traces by Tags; Meter Analysis Language","url":"/blog/2020-10-29-skywalking8-2-release/"},{"content":" 作者: 柯振旭, 吴晟, 高洪涛, Tevah Platt. tetrate.io 原文链接: What\u0026rsquo;s new with Apache SkyWalking 8.2? Browser monitoring and more 2020 年 10 月 29 日  Apache SkyWalking,一个可观测性平台,也是一个开源的应用性能监视器(APM)项目,今日宣布 8.2 发行版全面可用。该发行版拓展了核心功能,并将其监控边界拓展到浏览器端。\n背景 SkyWalking 是一个观测平台和 APM 工具。它可以选择性的与 Service Mesh 协同工作,为微服务、云原生和基于容器的应用提供自动的指标。该项目是全球社区支持的 Apache 顶级项目,阿里巴巴、华为、腾讯、百度、字节跳动等许多公司都在使用。\n浏览器端监控 APM 可以帮助 SRE 和工程团队诊断系统故障,也能在系统异常缓慢之前优化它。但它是否足以让用户总是满意呢?\n在 8.2.0 版本中, SkyWalking 将它的监控边界拓展到了浏览器端,比如 Chrome ,或者 Chrome 和后端服务之间的网络。这样,我们不仅可以像以前一样监控浏览器发送给后端服务的与请求,还能看到前端的渲染速度、错误日志等信息——这些信息是获取最终用户体验的最有效指标。(目前此功能尚未拓展到物联网设备中,但这项功能使得 SkyWalking 向着这个方向前进了一步)\n此外,SkyWalking浏览器监视也提供以下数据: PV(page views,页面浏览量), UV(unique visitors,独立访客数),浏览量前 N 的页面(Top N Page Views)等。这些数据可以为产品队伍优化他们的产品提供线索。\n按标签 (tag) 查询链路数据 在 SkyWalking 的 Span 数据模型中,已经有了许多被索引并可供用户查询的重要字段。但出于性能考虑,使用 Span 标签查询链路数据的功能直到现在才正式提供。在 SkyWalking 8.2.0 中,我们允许用户查询被特定标签标记的链路,这非常有用。SRE 工程师可以在生产环境中运行测试,将其打上仿真流量的标签,并稍后通过该标签查找它。\n指标分析语言 在 8.2.0 中,仪表系统提供了一项名为MAL(Meter Analysis Language,指标分析语言)的强大分析语言。该语言允许用户在 OAP 流系统中分析并聚合(aggregate)指标数据。 表达式的结果可以被 Agent 分析器或 OpenTelemetry/Prometheus 分析器获取。\n复合警报规则 警报是及时发现系统失效的有效方式。一个常见的问题是,为了避免错过任何可能的问题,我们通常会配置过多的触发器(triggers)。没有人喜欢半夜被警报叫醒,结果只是因为触发系统太敏感。这种警报很嘈杂并毫无帮助。\n在 8.2.0 版本中,用户选择可以配置考虑了多个度量维度的复合警报规则。使用复合报警规则,我们可以根据需要添加尽可能多的指标来更精确地判断是否存在真正的问题,或者只是一个偶发的小问题。\n一些常见的情况,如 成功率 \u0026lt; 90% 但只有 1~2 个请求,现在可以通过复合规则解决,如流量(即每分钟调用数) \u0026gt; n \u0026amp;\u0026amp; 成功率 \u0026lt; m%。\n其它值得注意的功能增强  agent-toolkit SDK 公开了某些 API,供用户发送自定义指标。 Agent exclude_plgins 配置允许您排除某些插件(plugins); mount 配置使您能够加载一套新的插件。 社区贡献了超过 10 个新 Agent 插件。 报警系统原生支持发送消息到 Slack,企业微信,钉钉。  附加资源   阅读更多关于SkyWalkng 8.2 发行版重点.\n  在推特上获取更多关于 SkyWalking 的更新。\n  Apache SkyWalking DevCon 报名信息 Apache SkyWalking DevCon 2020 开始报名了。 2020 年 11 月 14 日,欢迎大家来线下参加活动和交流, 或者报名观看线上直播。\n","title":"SkyWalking 8.2.0 中的新特性: 浏览器端监控; 使用标签查询; 指标分析语言","url":"/zh/2020-10-29-skywalking8-2-release/"},{"content":"SkyWalking 8.2.0 is released. Go to downloads page to find release tars.\nProject  Support Browser monitoring. Add e2e test for ALS solution of service mesh observability. Support compiling(include testing) in JDK11. Support build a single module.  Java Agent  Support metrics plugin. Support slf4j logs of gRPC and Kafka(when agent uses them) into the agent log files. Add PROPERTIES_REPORT_PERIOD_FACTOR config to avoid the properties of instance cleared. Limit the size of traced SQL to avoid OOM. Support mount command to load a new set of plugins. Add plugin selector mechanism. Enhance the witness classes for MongoDB plugin. Enhance the parameter truncate mechanism of SQL plugins. Enhance the SpringMVC plugin in the reactive APIs. Enhance the SpringMVC plugin to collect HTTP headers as the span tags. Enhance the Kafka plugin, about @KafkaPollAndInvoke Enhance the configuration initialization core. Plugin could have its own plugins. Enhance Feign plugin to collect parameters. Enhance Dubbo plugin to collect parameters. Provide Thrift plugin. Provide XXL-job plugin. Provide MongoDB 4.x plugin. Provide Kafka client 2.1+ plugin. Provide WebFlux-WebClient plugin. Provide ignore-exception plugin. Provide quartz scheduler plugin. Provide ElasticJob 2.x plugin. Provide Spring @Scheduled plugin. Provide Spring-Kafka plugin. Provide HBase client plugin. Provide JSON log format. Move Spring WebFlux plugin to the optional plugin. Fix inconsistent logic bug in PrefixMatch Fix duplicate exit spans in Feign LoadBalancer mechanism. Fix the target service blocked by the Kafka reporter. Fix configurations of Kafka report don\u0026rsquo;t work. Fix rest template concurrent conflict. Fix NPE in the ActiveMQ plugin. Fix conflict between Kafka reporter and sampling plugin. Fix NPE in the log formatter. Fix span layer missing in certain cases, in the Kafka plugin. Fix error format of time in serviceTraffic update. Upgrade bytebuddy to 1.10.14  OAP-Backend  Support Nacos authentication. Support labeled meter in the meter receiver. Separate UI template into multiple files. Provide support for Envoy tracing. Envoy tracer depends on the Envoy community. Support query trace by tags. Support composite alarm rules. Support alarm messages to DingTalk. Support alarm messages to WeChat. Support alarm messages to Slack. Support SSL for Prometheus fetcher and self telemetry. Support labeled histogram in the prometheus format. Support the status of segment based on entry span or first span only. Support the error segment in the sampling mechanism. Support SSL certs of gRPC server. Support labeled metrics in the alarm rule setting. Support to query all labeled data, if no explicit label in the query condition. Add TLS parameters in the mesh analysis. Add health check for InfluxDB storage. Add super dataset concept for the traces/logs. Add separate replicas configuration for super dataset. Add IN operator in the OAL. Add != operator in the OAL. Add like operator in the OAL. Add latest function in the prometheus analysis. Add more configurations in the gRPC server. Optimize the trace query performance. Optimize the CPU usage rate calculation, at least to be 1. Optimize the length of slow SQL column in the MySQL storage. Optimize the topology query, use client side component name when no server side mapping. Add component IDs for Python component. Add component ID range for C++. Fix Slack notification setting NPE. Fix some module missing check of the module manager core. Fix authentication doesn\u0026rsquo;t work in sharing server. Fix metrics batch persistent size bug. Fix trace sampling bug. Fix CLR receiver bug. Fix end time bug in the query process. Fix Exporter INCREMENT mode is not working. Fix an error when executing startup.bat when the log directory exists Add syncBulkActions configuration to set up the batch size of the metrics persistent. Meter Analysis Language.  UI  Add browser dashboard. Add browser log query page. Support query trace by tags. Fix JVM configuration. Fix CLR configuration.  Document  Add the document about SW_NO_UPSTREAM_REAL_ADDRESS. Update ALS setup document. Add Customization Config section for plugin development.  All issues and pull requests are here\n","title":"Release Apache SkyWalking APM 8.2.0","url":"/events/release-apache-skywalking-apm-8-2-0/"},{"content":"高洪涛 美国ServiceMesh服务商tetrate创始工程师。原华为软件开发云技术专家。目前为Apache SkyWalking核心贡献者,参与该开源项目在软件开发云的商业化进程。曾任职当当网系统架构师,开源达人,曾参与Apache ShardingSphere,Elastic-Job等知名开源项目。对分布式数据库,容器调度,微服务,ServicMesh等技术有深入的了解。\n议题简介 定制化Operator模式在面向Kubernetes的云化平台建构中变得越来越流行。Apache SkyWalking社区已经开始尝试使用Operator模式去构建基于Kubernetes平台的PaaS云组件。本次分享给将会给听众带来该项目的初衷,实现与未来演进等相关内容。分享的内容包含:\n 项目动机与设计理念 核心功能展示,包含SkyWalking核心组件的发布,更新与维护。 观测ServiceMesh,包含于Istio的自动集成。 目前的工作进展和对未来的规划。  B站视频地址\n","title":"[视频] Apache SkyWalking Cloud on Kubernetes","url":"/zh/2020-10-25-coscon20-swck/"},{"content":"SkyWalking LUA Nginx 0.3.0 is released. Go to downloads page to find release tars.\n Load the base64 module in utils, different ENV use different library. Add prefix skywalking, avoid conflicts with other lua libraries. Chore: only expose the method of setting random seed, it is optional. Coc: use correct code block type. CI: add upstream_status to tag http.status Add http.status  ","title":"Release Apache SkyWalking LUA Nginx 0.3.0","url":"/events/release-apache-skywalking-lua-nginx-0.3.0/"},{"content":"SkyWalking CLI 0.4.0 is released. Go to downloads page to find release tars.\n Features  Add dashboard global command with auto-refresh Add dashboard global-metrics command Add traces search Refactor metrics thermodynamic command to adopt the new query protocol   Bug Fixes  Fix wrong golang standard time    ","title":"Release Apache SkyWalking CLI 0.4.0","url":"/events/release-apache-skywalking-cli-0-4-0/"},{"content":"Huaxi Jiang (江华禧) (a.k.a. fgksgf) mainly focuses on the SkyWalking CLI project, he had participated in the \u0026ldquo;Open Source Promotion Plan - Summer 2020\u0026rdquo; and completed the project smoothly, and won the award \u0026ldquo;Most Potential Students\u0026rdquo; that shows his great willingness to continuously contribute to our community.\nUp to date, he has submitted 26 PRs in the CLI repository, 3 PRs in the main repo, all in total include ~4000 LOC.\nAt Sep. 28th, 2020, the project management committee (PMC) passed the proposal of promoting him as a new committer. He has accepted the invitation at the same day.\nWelcome to join the committer team, Huaxi!\n","title":"Welcome Huaxi Jiang (江华禧) as new committer","url":"/events/welcome-huaxi-jiang-as-new-committer/"},{"content":"SkyWalking Python 0.3.0 is released. Go to downloads page to find release tars.\n  New plugins\n Urllib3 Plugin (#69) Elasticsearch Plugin (#64) PyMongo Plugin (#60) Rabbitmq Plugin (#53) Make plugin compatible with Django (#52)    API\n Add process propagation (#67) Add tags to decorators (#65) Add Check version of packages when install plugins (#63) Add thread propagation (#62) Add trace ignore (#59) Support snapshot context (#56) Support correlation context (#55)    Chores and tests\n Test: run multiple versions of supported libraries (#66) Chore: add pull request template for plugin (#61) Chore: add dev doc and reorganize the structure (#58) Test: update test health check (#57) Chore: add make goal to package release tar ball (#54)    ","title":"Release Apache SkyWalking Python 0.3.0","url":"/events/release-apache-skywalking-python-0-3-0/"},{"content":"吴晟 吴晟,Apache 基金会会员,Apache SkyWalking 创始人、项目 VP 和 PMC 成员,Apache 孵化器 PMC 成员,Apache ShardingSphere PMC成员,Apache APISIX PMC 成员,Apache ECharts (incubating) 和Apache DolphinScheduler (incubating) 孵化器导师,Zipkin 成员和贡献者。\n分享大纲  分布式追踪兴起的背景 SkyWalking和其他分布式追踪的异同 定位问题的流程和方法 性能剖析的由来、用途和优势  听众收获 听众能够全面的了解分布式追踪的技术背景,和技术原理。以及为什么这些年,分布式追踪和基于分布式追踪的APM系统,Apache SkyWalking,得到了广泛的使用、集成,甚至云厂商的支持。同时,除了针对追踪数据,我们应该关注更多的是,如何利用其产生的监控数据,定位系统的性能问题。以及它有哪些短板,应该如何弥补。\nB站视频地址\n","title":"[视频] 云原生学院 - 后分布式追踪时代的性能问题定位——方法级性能剖析","url":"/zh/2020-08-13-cloud-native-academy/"},{"content":"SkyWalking Chart 3.1.0 is released. Go to downloads page to find release tars.\n Support SkyWalking 8.1.0 Support enable oap dynamic configuration through k8s configmap  ","title":"Release Apache SkyWalking Chart 3.1.0 for SkyWalking 8.1.0","url":"/events/release-apache-skywalking-chart-3-1-0-for-skywalking-8-1-0/"},{"content":" Author: Sheng Wu Original link, Tetrate.io blog  SkyWalking, a top-level Apache project, is the open source APM and observability analysis platform that is solving the problems of 21st-century systems that are increasingly large, distributed, and heterogenous. It\u0026rsquo;s built for the struggles system admins face today: To identify and locate needles in a haystack of interdependent services, to get apples-to-apples metrics across polyglot apps, and to get a complete and meaningful view of performance.\nSkyWalking is a holistic platform that can observe microservices on or off a mesh, and can provide consistent monitoring with a lightweight payload.\nLet\u0026rsquo;s take a look at how SkyWalking evolved to address the problem of observability at scale, and grew from a pure tracing system to a feature-rich observability platform that is now used to analyze deployments that collect tens of billions of traces per day.\nDesigning for scale When SkyWalking was first initialized back in 2015, its primary use case was monitoring the first-generation distributed core system of China Top Telecom companies, China Unicom and China Mobile. In 2013-2014, the telecom companies planned to replace their old traditional monolithic applications with a distributed system. Supporting a super-large distributed system and scaleablity were the high-priority design goals from Day one. So, what matters at scale?\nPull vs. push Pull and push modes relate to the direction of data flow. If the agent collects data and pushes them to the backend for further analysis, we call it \u0026ldquo;push\u0026rdquo; mode. Debate over pull vs. push has gone on for a long time. The key for an observability system is to minimize the cost of the agent, and to be generally suitable for different kinds of observability data.\nThe agent would send the data out a short period after it is collected. Then, we would have less concern about overloading the local cache. One typical case would be endpoint (URI of HTTP, service of gRPC) metrics. Any service could easily have hundreds, even thousands of endpoints. An APM system must have these metrics analysis capabilities.\nFurthermore, metrics aren\u0026rsquo;t the only thing in the observability landscape; traces and logs are important too. SkyWalking is designed to provide a 100% sampling rate tracing capability in the production environment. Clearly, push mode is the only solution.\nAt the same time, using push mode natively doesn\u0026rsquo;t mean SkyWalking can\u0026rsquo;t do data pulling. In recent 8.x releases, SkyWalking supports fetching data from Prometheus-instrumented services for reducing the Non-Recurring Engineering of the end users. Also, pull mode is popular in the MQ based transport, typically as a Kafka consumer. The SkyWalking agent side uses the push mode, and the OAP server uses the pull mode.\nThe conclusion: push mode is the native way, but pull mode works in some special cases too.\nMetrics analysis isn\u0026rsquo;t just mathematical calculation Metrics rely on mathematical theories and calculations. Percentile is a good measure for identifying the long tail issue, and reasonable average response time and successful rate are good SLO(s). But those are not all. Distributed tracing provides not just traces with detailed information, but high values metrics that can be analyzed.\nThe service topology map is required from Ops and SRE teams for the NOC dashboard and confirmation of system data flow. SkyWalking uses the STAM (Streaming Topology Analysis Method) to analyze topology from the traces, or based on ALS (Envoy Access Log Service) in the service mesh environment. This topology and metrics of nodes (services) and lines (service relationships) can\u0026rsquo;t be pulled from simple metrics SDKs.\nAs with fixing the limitation of endpoint metrics collection, SkyWalking needs to do endpoint dependency analysis from trace data too. Endpoint dependency analysis provides more important and specific information, including upstream and downstream. Those dependency relationships and metrics help the developer team to locate the boundaries of a performance issue, to specific code blocks.\nPre-calculation vs. query stage calculation? Query stage calculation provides flexibility. Pre-calculation, in the analysis stage, provides better and much more stable performance. Recall our design principle: SkyWalking targets a large-scale distributed system. Query stage calculation was very limited in scope, and most metrics calculations need to be pre-defined and pre-calculated. The key of supporting large datasets is reducing the size of datasets in the design level. Pre-calculation allows the original data to be merged into aggregated results downstream, to be used in a query or even for an alert check.\nTTL of metrics is another important business enabler. With the near linear performance offered by queries because of pre-calculation, with a similar query infrastructure, organizations can offer higher TTL, thereby providing extended visibility of performance.\nSpeaking of alerts, query-stage calculation also means the alerting query is required to be based on the query engine. But in this case, when the dataset increasing, the query performance could be inconsistent. The same thing happens in a different metrics query.\nCases today Today, SkyWalking is monitoring super large-scale distributed systems in many large enterprises, including Alibaba, Huawei, Tencent, Baidu, China Telecom, and various banks and insurance companies. The online service companies have more traffic than the traditional companies, like banks and telecom suppliers.\nSkyWalking is the observability platform used for a variety of use cases for distributed systems that are super-large by many measures:\n Lagou.com, an online job recruitment platform  SkyWalking is observing \u0026gt;100 services, 500+ JVM instances SkyWalking collects and analyzes 4+ billion traces per day to analyze performance data, including metrics of 300k+ endpoints and dependencies Monitoring \u0026gt;50k traffic per second in the whole cluster   Yonghui SuperMarket, online service  SkyWalking analyzes at least 10+ billion (3B) traces with metrics per day SkyWalking\u0026rsquo;s second, smaller deployment, analyzes 200+ million traces per day   Baidu, internet and AI company, Kubernetes deployment  SkyWalking collects 1T+ traces a day from 1,400+ pods of 120+ services Continues to scale out as more services are added   Beike Zhaofang(ke.com), a Chinese online property brokerage backed by Tencent Holdings and SoftBank Group  Has used SkyWalking from its very beginning, and has two members in the PMC team. Deployments collect 16+ billion traces per day   Ali Yunxiao, DevOps service on the Alibaba Cloud,  SkyWalking collects and analyzes billions of spans per day SkyWalking keeps AliCloud\u0026rsquo;s 45 services and ~300 instances stable   A department of Alibaba TMall, one of the largest business-to-consumer online retailers, spun off from Taobao  A customized version of SkyWalking monitors billions of traces per day At the same time, they are building a load testing platform based on SkyWalking\u0026rsquo;s agent tech stack, leveraging its tracing and context propagation cabilities    Conclusion SkyWalking\u0026rsquo;s approach to observability follows these principles:\n Understand the logic model: don\u0026rsquo;t treat observability as a mathematical tool. Identify dependencies first, then their metrics. Scaling should be accomplished easily and natively. Maintain consistency across different architectures, and in the performance of APM itself.  Resources  Read about the SkyWalking 8.1 release highlights. Get more SkyWalking updates on Twitter. Sign up to hear more about SkyWalking and observability from Tetrate.  ","title":"Observability at Scale: SkyWalking it is","url":"/blog/2020-08-11-observability-at-scale/"},{"content":" 作者:吴晟 翻译:董旭 金蝶医疗 原文链接:Tetrate.io blog  SkyWalking做为Apache的顶级项目,是一个开源的APM和可观测性分析平台,它解决了21世纪日益庞大、分布式和异构的系统的问题。它是为应对当前系统管理所面临的困难而构建的:就像大海捞针,SkyWalking可以在服务依赖复杂且多语言环境下,获取服务对应的指标,以及完整而有意义的性能视图。\nSkyWalking是一个非常全面的平台,无论你的微服务是否在服务网格(Service Mesh)架构下,它都可以提供高性能且一致性的监控。\n让我们来看看,SkyWalking是如何解决大规模集群的可观测性问题,并从一个纯粹的链路跟踪系统,发展成为一个每天分析百亿级跟踪数据,功能丰富的可观测性平台。\n为超大规模而生 SkyWalking的诞生,时间要追溯到2015年,当时它主要应用于监控顶级电信公司(例如:中国联通和中国移动)的第一代分布式核心系统。2013-2014年,这些电信公司计划用分布式系统取代传统的单体架构应用。从诞生那天开始,SkyWalking首要的设计目标,就是能够支持超大型分布式系统,并具有很好可扩展性。那么支撑超大规模系统要考虑什么呢?\n拉取vs推送 与数据流向息息相关的:拉取模式和推送模式。Agent(客户端)收集数据并将其推送到后端,再对数据进一步分析,我们称之为“推送”模式。究竟应该使用拉取还是推送?这个话题已经争论已久。关键因素取决于可观测性系统的目标,即:在Agent端花最小的成本,使其适配不同类型的可观测性数据。\nAgent收集数据后,可以在短时间内发送出去。这样,我们就不必担心本地缓存压力过大。举一个典型的例子,任意服务都可以轻松地拥有数百个甚至数千个端点指标(如:HTTP的URI,gRPC的服务)。那么APM系统就必须具有分析这些数量庞大指标的能力。\n此外,度量指标并不是可观测性领域中的唯一关注点,链路跟踪和日志也很重要。在生产环境下,SkyWalking为了能提供100%采样率的跟踪能力,数据推送模式是唯一可行的解决方案。\nSkyWalking即便使用了推送模式,同时也可进行数据拉取。在最近的8.x的发版本中,SkyWalking支持从已经集成Prometheus的服务中获取终端用户的数据,避免重复工程建设,减少资源浪费。另外,比较常见的是基于MQ的传输构建拉取模式,Kafka消费者就是一个比较典型的例子。SkyWalking的Agent端使用推送模式,OAP服务器端使用拉取模式。\n结论:SkyWalking的推送模式是原生方式,但拉取式模式也适用于某些特殊场景。\n度量指标分析并不仅仅是数学统计 度量指标依赖于数学理论和计算。Percentile(百分位数)是用于反映响应时间的长尾效应。服务具备合理的平均响应时间和成功率,说明服务的服务等级目标(SLO)很好。除此之外,分布式跟踪还为跟踪提供了详细的信息,以及可分析的高价值指标。\n运维团队(OPS)和系统稳定性(SRE)团队通过服务拓扑图,用来观察网络情况(当做NOC dashboard使用)、确认系统数据流。SkyWalking依靠trace(跟踪数据),使用STAM(Streaming Topology Analysis Method)方法进行分析拓扑结构。在服务网格环境下,使用ALS(Envoy Access Log Service)进行拓扑分析。节点(services)和线路(service relationships)的拓扑结构和度量指标数据,无法通过sdk轻而易举的拿到。\n为了解决端点度量指标收集的局限性,SkyWalking还要从跟踪数据中分析端点依赖关系,从而拿到链路上游、下游这些关键具体的信息。这些依赖关系和度量指标信息,有助于开发团队定位引起性能问题的边界,甚至代码块。\n预计算还是查询时计算? 相比查询时计算的灵活性,预计算可以提供更好、更稳定的性能,这在分析场景下尤为重要。回想一下我们的设计原则:SkyWalking是为了一个大规模的分布式系统而设计。查询时计算的使用范围非常有限,大多数度量计算都需要预先定义和预先计算。支持大数据集的关键是:在设计阶段,要减小数据集。预计算允许将原始数据合并到下游的聚合结果中,用于查询,甚至用于警报检查。\n使用SkyWalking的另一个重要因素是:指标的有效期,TTL(Time To Live)。由于采用了预先计算,查询提供了近似线性的高性能。这也帮助“查询系统”这类基础设施系统,提供更好的性能扩展。\n关于警报,使用查询时计算方案,也意味着警报查询需要基于查询引擎。但在这种情况下,随着数据集增加,查询性能会随之下降,其他指标查询也是一样的结果。\n目前使用案例 如今,SkyWalking在许多大型企业的超大规模分布式系统中使用,包括阿里巴巴、华为、腾讯、百度、中国通讯企业以及多家银行和保险公司。上线SkyWalking公司的流量,比银行和电信运营商这种传统公司还要大。\n在很多行业中,SkyWalking是被应用于超大型分布式系统各种场景下的一个可观测性平台:\n  拉勾网\n  SkyWalking正在观测超过100个服务,500多个JVM实例\n  SkyWalking每天收集和分析40多亿个跟踪数据,用来分析性能,其中包括30万个端点和依赖关系的指标\n  在整个群集中监控\u0026gt;50k流量/秒\n    永辉超市\n  SkyWalking每天分析至少100多亿(3B)的跟踪数据\n  其次,SkyWalking用较小的部署,每天分析2亿多个跟踪数据\n    百度\n  SkyWalking每天从1400多个pod中,从120多个服务收集1T以上的跟踪数据\n  随着更多服务的增加,规模会持续增大\n    贝壳找房(ke.com)\n  很早就使用了SkyWalking,有两名成员已经成为PMC\n  Deployments每天收集160多亿个跟踪数据\n    阿里云效\n  SkyWalking每天收集和分析数十亿个span\n  SkyWalking使阿里云的45项服务和~300个实例保持稳定\n    阿里巴巴天猫\n  SkyWalking个性化定制版,每天监控数十亿跟踪数据\n  与此同时,他们基于SkyWalking的Agent技术栈,利用其跟踪和上下文传播能力,正在构建一个全链路压测平台\n    结论 SkyWalking针对可观测性遵循以下原则:\n 理解逻辑模型:不要把可观测性当作数学统计工具。 首先确定依赖关系,然后确定它们的度量指标。 原生和方便的支撑大规模增长。 在不同的架构情况下,APM各方面表现依然保持稳定和一致。  资源  阅读SkyWalking 8.1发布亮点。 在Twitter上获取更多SkyWalking更新。 注册Tetrate以了解更多有关SkyWalking可观测性的信息。  ","title":"SkyWalking 为超大规模而生","url":"/zh/2020-08-11-observability-at-scale-skywalking-it-is/"},{"content":" Author: Sheng Wu, Hongtao Gao, and Tevah Platt(Tetrate) Original link, Tetrate.io blog  Apache SkyWalking, the observability platform, and open-source application performance monitor (APM) project, today announced the general availability of its 8.1 release that extends its functionalities and provides a transport layer to maintain the lightweight of the platform that observes data continuously.\nBackground SkyWalking is an observability platform and APM tool that works with or without a service mesh, providing automatic instrumentation for microservices, cloud-native and container-based applications. The top-level Apache project is supported by a global community and is used by Alibaba, Huawei, Tencent, Baidu, and scores of others.\nTransport traces For a long time, SkyWalking has used gRPC and HTTP to transport traces, metrics, and logs. They provide good performance and are quite lightweight, but people kept asking about the MQ as a transport layer because they want to keep the observability data continuously as much as possible. From SkyWalking’s perspective, the MQ based transport layer consumes more resources required in the deployment and the complexity of deployment and maintenance but brings more powerful throughput capacity between the agent and backend.\nIn 8.1.0, SkyWalking officially provides the typical MQ implementation, Kafka, to transport all observability data, including traces, metrics, logs, and profiling data. At the same time, the backend can support traditional gRPC and HTTP receivers, with the new Kafka consumer at the same time. Different users could choose the transport layer(s) according to their own requirements. Also, by referring to this implementation, the community could contribute various transport plugins for Apache Pulsar, RabbitMQ.\nAutomatic endpoint dependencies detection The 8.1 SkyWalking release offers automatic detection of endpoint dependencies. SkyWalking has long offered automatic endpoint detection, but endpoint dependencies, including upstream and downstream endpoints, are critical for Ops and SRE teams’ performance analysis. The APM system is expected to detect the relationships powered by the distributed tracing. While SkyWalking has been designed to include this important information at the beginning the latest 8.1 release offers a cool visualization about the dependency and metrics between dependent endpoints. It provides a new drill-down angle from the topology. Once you have the performance issue from the service level, you could check on instance and endpoint perspectives:\nSpringSleuth metrics detection In the Java field, the Spring ecosystem is one of the most widely used. Micrometer, the metrics API lib included in the Spring Boot 2.0, is now adopted by SkyWalking’s native meter system APIs and agent. For applications using Micrometer with the SkyWalking agent installed, all Micrometer collected metrics could then be shipped into SkyWalking OAP. With some configurations in the OAP and UI, all metrics are analyzed and visualized in the SkyWalking UI, with all other metrics detected by SkyWalking agents automatically.\nNotable enhancements The Java agent core is enhanced in this release. It could work better in the concurrency class loader case and is more compatible with another agent solution, such as Alibaba’s Arthas.\n With the logic endpoint supported, the local span can be analyzed to get metrics. One span could carry the raw data of more than one endpoint’s performance. GraphQL, InfluxDB Java Client, and Quasar fiber libs are supported to be observed automatically. Kubernetes Configmap can now for the first time be used as the dynamic configuration center– a more cloud-native solution for k8s deployment environments. OAP supports health checks, especially including the storage health status. If the storage (e.g., ElasticSearch) is not available, you could get the unhealth status with explicit reasons through the health status query. Opencensus receiver supports ingesting OpenTelemetry/OpenCensus agent metrics by meter-system.  Additional resources  Read more about the SkyWalking 8.1 release highlights. Read more about SkyWalking from Tetrate on our blog. Get more SkyWalking updates on Twitter. Sign up to hear more about SkyWalking and observability from Tetrate.  ","title":"Features in SkyWalking 8.1: SpringSleuth metrics, endpoint dependency detection, Kafka transport traces and metrics","url":"/blog/2020-08-03-skywalking8-1-release/"},{"content":"SkyWalking APM 8.1.0 is release. Go to downloads page to find release tars.\nProject  Support Kafka as an optional trace, JVM metrics, profiling snapshots and meter system data transport layer. Support Meter system, including the native metrics APIs and the Spring Sleuth adoption. Support JVM thread metrics.  Java Agent  [Core] Fix the concurrency access bug in the Concurrency ClassLoader Case. [Core] Separate the config of the plugins from the core level. [Core] Support instrumented class cached in memory or file, to be compatible with other agents, such as Arthas. Add logic endpoint concept. Could analysis any span or tags flagged by the logic endpoint. Add Spring annotation component name for UI visualization only. Add support to trace Call procedures in MySQL plugin. Support GraphQL plugin. Support Quasar fiber plugin. Support InfluxDB java client plugin. Support brpc java plugin Support ConsoleAppender in the logback v1 plugin. Enhance vert.x endpoint names. Optimize the code to prevent mongo statements from being too long. Fix WebFlux plugin concurrency access bug. Fix ShardingSphere plugins internal conflicts. Fix duplicated Spring MVC endpoint. Fix lettuce plugin sometimes trace doesn‘t show span layer. Fix @Tag returnedObject bug.  OAP-Backend  Support Jetty Server advanced configurations. Support label based filter in the prometheus fetcher and OpenCensus receiver. Support using k8s configmap as the configuration center. Support OAP health check, and storage module health check. Support sampling rate in the dynamic configuration. Add endpoint_relation_sla and endpoint_relation_percentile for endpoint relationship metrics. Add components for Python plugins, including Kafka, Tornado, Redis, Django, PyMysql. Add components for Golang SDK. Add Nacos 1.3.1 back as an optional cluster coordinator and dynamic configuration center. Enhance the metrics query for ElasticSearch implementation to increase the stability. Reduce the length of storage entity names in the self-observability for MySQL and TiDB storage. Fix labels are missing in Prometheus analysis context. Fix column length issue in MySQL/TiDB storage. Fix no data in 2nd level aggregation in self-observability. Fix searchService bug in ES implementation. Fix wrong validation of endpoint relation entity query. Fix the bug caused by the OAL debug flag. Fix endpoint dependency bug in MQ and uninstrumented proxy cases. Fix time bucket conversion issue in the InfluxDB storage implementation. Update k8s client to 8.0.0  UI  Support endpoint dependency graph. Support x-scroll of trace/profile page Fix database selector issue. Add the bar chart in the UI templates.  Document  Update the user logo wall. Add backend configuration vocabulary document. Add agent installation doc for Tomcat9 on Windows. Add istioctl ALS commands for the document. Fix TTL documentation. Add FAQ doc about thread instrumentation.  CVE  Fix fuzzy query sql injection in the MySQL/TiDB storage.  All issues and pull requests are here\n","title":"Release Apache SkyWalking APM 8.1.0","url":"/events/release-apache-skywalking-apm-8-1-0/"},{"content":"Based on his continuous contributions, Wei Hua (a.k.a alonelaval) has been voted as a new committer.\n","title":"Welcome Wei Hua as new committer","url":"/events/welcome-wei-hua-as-new-committer/"},{"content":"SkyWalking Python 0.2.0 is released. Go to downloads page to find release tars.\n  Plugins:\n Kafka Plugin (#50) Tornado Plugin (#48) Redis Plugin (#44) Django Plugin (#37) PyMsql Plugin (#35) Flask plugin (#31)    API\n Add ignore_suffix Config (#40) Add missing log method and simplify test codes (#34) Add content equality of SegmentRef (#30) Validate carrier before using it (#29)    Chores and tests\n Test: print the diff list when validation failed (#46) Created venv builders for linux/windows and req flashers + use documentation (#38)    ","title":"Release Apache SkyWalking Python 0.2.0","url":"/events/release-apache-skywalking-python-0-2-0/"},{"content":"SkyWalking CLI 0.3.0 is released. Go to downloads page to find release tars.\n Command: health check command Command: Add trace command BugFix: Fix wrong metrics graphql path  ","title":"Release Apache SkyWalking CLI 0.3.0","url":"/events/release-apache-skywalking-cli-0-3-0/"},{"content":" Author: Srinivasan Ramaswamy, tetrate Original link, Tetrate.io blog  Asking How are you is more profound than What are your symptoms Background Recently I visited my preferred doctor. Whenever I visit, the doctor greets me with a series of light questions: How’s your day? How about the week before? Any recent trips? Did I break my cycling record? How’s your workout regimen? _Finally _he asks, “Do you have any problems?\u0026quot; On those visits when I didn\u0026rsquo;t feel ok, I would say something like, \u0026ldquo;I\u0026rsquo;m feeling dull this week, and I\u0026rsquo;m feeling more tired towards noon….\u0026quot; It\u0026rsquo;s at this point that he takes out his stethoscope, his pulse oximeter, and blood pressure apparatus. Then, if he feels he needs a more in-depth insight, he starts listing out specific tests to be made.\nWhen I asked him if the first part of the discussion was just an ice-breaker, he said, \u0026ldquo;That\u0026rsquo;s the essential part. It helps me find out how you feel, rather than what your symptoms are.\u0026quot; So, despite appearances, our opening chat about life helped him structure subsequent questions on symptoms, investigations and test results.\nOn the way back, I couldn\u0026rsquo;t stop asking myself, \u0026ldquo;Shouldn\u0026rsquo;t we be managing our mesh this way, too?\u0026quot;\nIf I strike parallels between my own health check and a health check, “tests” would be log analysis, “investigations” would be tracing, and “symptoms” would be the traditional RED (Rate, Errors and Duration) metrics. That leaves the “essential part,” which is what we are talking about here: the Wellness Factor, primarily the health of our mesh.\nHealth in the context of service mesh We can measure the performance of any observed service through RED metrics. RED metrics offer immense value in understanding the performance, reliability, and throughput of every service. Compelling visualizations of these metrics across the mesh make monitoring the entire mesh standardized and scalable. Also, setting alerts based on thresholds for each of these metrics helps to detect anomalies as and when they arise.\nTo establish the context of any service and observe them, it\u0026rsquo;s ideal to visualize the mesh as a topology.\nA topology visualization of the mesh not only allows for picking any service and watching its metrics, but also gives vital information about service dependencies and the potential impact of a given service on the mesh.\nWhile RED metrics of each service offer tremendous insights, the user is more concerned with the overall responsiveness of the mesh rather than each of these services in isolation.\nTo describe the performance of any service, right from submitting the request to receiving a completed http response, we’d be measuring the user\u0026rsquo;s perception of responsiveness. This measure of response time compared with a set threshold is called Apdex. This Apdex is an indicator of the health of a service in the mesh.\nApdex Apdex is a measure of response time considered against a set threshold**. **It is the ratio of satisfactory response times and unsatisfactory response times to total response times.\nApdex is an industry standard to measure the satisfaction of users based on the response time of applications and services. It measures how satisfied your users are with your services, as traditional metrics such as average response time could get skewed quickly.\nSatisfactory response time indicates the number of times when the roundtrip response time of a particular service was less than this threshold. Unsatisfactory response time while meaning the opposite, is further categorized as Tolerating and Frustrating. Tolerating accommodates any performance that is up to four times the threshold, and anything over that or any errors encountered is considered Frustrating. The threshold mentioned here is an ideal roundtrip performance that we expect from any service. We could even start with an organization-wide limit of say, 500ms.\nThe Apdex score is a ratio of satisfied and tolerating requests to the total requests made.\nEach satisfied request counts as one request, while each tolerating request counts as half a satisfied request.\nAn Apdex score takes values from 0 to 1, with 0 being the worst possible score indicating that users were always frustrated, and ‘1’ as the best possible score (100% of response times were Satisfactory).\nA percentage representation of this score also serves as the Health Indicator of the service.\nThe Math The actual computation of this Apdex score is achieved through the following formula.\n\tSatisfiedCount + ( ToleratingCount / 2 ) Apdex Score = ------------------------------------------------------ TotalSamples A percentage representation of this score is known as the Health Indicator of a service.\nExample Computation During a 2-minute period, a host handles 200 requests.\nThe Apdex threshold T = 0.5 seconds (500ms).\n 170 of the requests were handled within 500ms, so they are classified as Satisfied. 20 of the requests were handled between 500ms and 2 seconds (2000 ms), so they are classified as Tolerating. The remaining 10 were not handled properly or took longer than 2 seconds, so they are classified as Frustrated.  The resulting Apdex score is 0.9: (170 + (20/2))/200 = 0.9.\nThe next level At the next level, we can attempt to improve our topology visualization by coloring nodes based on their health. Also, we can include health as a part of the information we show when the user taps on a service.\nApdex specifications recommend the following Apdex Quality Ratings by classifying Apdex Score as Excellent (0.94 - 1.00), Good (0.85 - 0.93), Fair (0.70 - 0.84), Poor (0.50 - 0.69) and Unacceptable (0.00 - 0.49).\nTo visualize this, let’s look at our topology using traffic light colors, marking our nodes as Healthy, At-Risk and Unhealthy, where Unhealthy indicates health that falls below 80%. A rate between 80% and 95% indicates At-Risk, and health at 95% and above is termed Healthy.\nLet’s incorporate this coloring into our topology visualization and take its usability to the next level. If implemented, we will be looking at something like this.\nMoving further Apdex provides tremendous visibility into customer satisfaction on the responsiveness of our services. Even more, by extending the implementation to the edges calling this service we get further insight into the health of the mesh itself.\nTwo services with similar Apdex scores offer the same customer satisfaction to the customer. However, the size of traffic that flows into the service can be of immense help in prioritizing between services to address. A service with higher traffic flow is an indication that this experience is impacting a significant number of users on the mesh.\nWhile health relates to a service, we can also analyze the interactions between two services and calculate the health of the interaction. This health calculation of every interaction on the mesh helps us establish a critical path, based on the health of all interactions in the entire topology.\nIn a big mesh, showing traffic as yet another number will make it more challenging to visualize and monitor. We can, with a bit of creativity, improve the entire visualization by rendering the edges that connect services with different thickness depending on the throughput of the service.\nAn unhealthy service participating in a high throughput transaction could lead to excessive consumption of resources. On the other hand, this visualization also offers a great tip to maximize investment in tuning services.\nTuning service that is a part of a high throughput transaction offers exponential benefits when compared to tuning an occasionally used service.\nIf we look at implementing such a visualization, which includes the health of interactions and throughput of such interactions, we would be looking at something like below :\nThe day is not far These capabilities are already available to users today as one of the UI features of Tetrate’s service mesh platform, using the highly configurable and performant observability and performance management framework: Apache SkyWalking (https://skywalking.apache.org), which monitors traffic across the mesh, aggregates RED metrics for both services and their interactions, continuously computes and monitors health of the services, and enables users to configure alerts and notifications when services cross specific thresholds, thereby having a comprehensive health visibility of the mesh.\nWith such tremendous visibility into our mesh performance, the day is not far when we at our NOC (Network Operations Center) for the mesh have this topology as our HUD (Heads Up Display).\nThis HUD, with the insights and patterns gathered over time, would predict situations and proactively prompt us on potential focus areas to improve customer satisfaction.\nThe visualization with rich historical data can also empower the Network Engineers to go back in time and look at the performance of the mesh on a similar day in the past.\nAn earnest implementation of such a visualization would be something like below :\nTo conclude With all the discussion so far, the health of a mesh is more about how our users feel, and what we can proactively do as service providers to sustain, if not enhance, the experience of our users.\nAs the world advances toward personalized medicine, we\u0026rsquo;re not far from a day when my doctor will text me: \u0026ldquo;How about feasting yourself with ice cream today and take the Gray Butte Trail to Mount Shasta!\u0026rdquo; Likewise, we can do more for our customers by having better insight into their overall wellness.\nTetrate’s approach to “service mesh health” is not only to offer management, monitoring and support but to make infrastructure healthy from the start to reduce the probability of incidents. Powered by the Istio, Envoy, and SkyWalking, Tetrate\u0026rsquo;s solutions enable consistent end-to-end observability, runtime security, and traffic management for any workload in any environment.\nOur customers deserve healthy systems! Please do share your thoughts on making service mesh an exciting and robust experience for our customers.\nReferences  https://en.wikipedia.org/wiki/Apdex https://www.apdex.org/overview.html https://www.apdex.org/index.php/specifications/ https://skywalking.apache.org/  ","title":"The Apdex Score for Measuring Service Mesh Health","url":"/blog/2020-07-26-apdex-and-skywalking/"},{"content":" 作者: Srinivasan Ramaswamy, tetrate 翻译:唐昊杰,南京大学在读学生 校对:吴晟 Original link, Tetrate.io blog July. 26th, 2020  \u0026ldquo;你感觉怎么样\u0026rdquo; 比 \u0026ldquo;你的症状是什么\u0026rdquo; 更重要 背景 最近我拜访了我的医生。每次去看病,医生都会首先问我一连串轻快的问题,比如:你今天过得怎么样?上周过的怎么样?最近有什么出行吗?你打破了自己的骑车记录吗?你的锻炼计划实施如何?最后他会问:“你有什么麻烦吗?”如果这个时候我感觉自己不太好,我会说:“我这周感觉很沉闷,临近中午的时候感觉更累。”这时他就会拿出听诊器、脉搏血氧仪和血压仪。然后,如果他觉得自己需要更深入的了解情况,他就开始列出我需要做的具体检查。\n当我问他,最开始的讨论是否只是为了缓和氛围。他说:“这是必不可少的部分。它帮助我发现你感觉如何,而不是你的症状是什么。\u0026quot;。我们这样关于生活的开场聊天,帮助他组织了后续关于症状、调查和测试结果的问题。\n在回来的路上,我不停地问自己:“我们是不是也应该用这种方式管理我们的网格(service mesh)?”\n如果我把自己的健康检查和网格的健康检查进行类比,“医疗检查”就是日志分析,“调查”就是追踪,“症状”就是传统的RED指标(请求速率、请求错误和请求耗时)。那么根本的问题,就是我们在这里讨论的:健康因素(主要是网格的健康)。\n服务网格中的健康状况 我们可以通过RED指标来衡量任何被观察到的服务的性能。RED指标在了解每个服务的性能、可靠性和吞吐量方面提供了巨大的价值。这些指标在网格上的令人信服的可视化使得监控全部网格变得标准化和可扩展。此外,根据这些指标的阈值设置警报有助于在指标值异常的时候进行异常检测。\n为了建立任何服务的上下文环境并观察它们,理想的做法是将网格可视化为一个拓扑结构。\n网格的拓扑结构可视化不仅允许使用者挑选任意服务并观察其指标,还可以提供有关服务依赖和特定服务在网格上的潜在影响这些重要信息。\n虽然每个服务的RED指标为使用者提供了深刻的洞察能力,但使用者更关心网格的整体响应性,而非每个单独出来的服务的响应性。\n为了描述任意服务的性能(即从提交请求到收到完成了的http响应这段时间内的表现),我们会测量用户对响应性的感知。这种将响应时间与设定的阈值进行比较的衡量标准叫做Apdex。Apdex是衡量一个服务在网格中的健康程度的指标。\nApdex Apdex是根据设定的阈值和响应时间结合考虑的衡量标准。它是满意响应时间和不满意响应时间相对于总响应时间的比率。\nApdex是根据应用和服务的响应时间来衡量使用者满意程度的行业标准。它衡量的是用户对你的服务的满意程度,因为传统的指标(如平均响应时间)可能很快就会容易形成偏差。\n基于满意度的响应时间,表示特定服务的往返响应时间小于设定的阈值的次数。不满意响应时间虽然意思相反,但又进一步分为容忍型和失望型。容忍型包括了了任何响应时间不超过四倍阈值的表现,而任何超过四倍阈值或遇到了错误的表现都被认为是失望型。这里提到的阈值是我们对任意服务所期望的理想响应表现。我们可以设置一个全局范围的阈值,如,500ms。\nApdex得分是满意型请求和容忍型请求与做出的总请求的比率。\n每个_满意的请求_算作一个请求,而每个_容忍的请求_算作半个_满意_的请求。\n一个Apdex得分从0到1的范围内取值。0是最差的分数,表示用户总是感到失望;而'1\u0026rsquo;是最好的分数(100%的响应时间是令人满意的)。\n这个分数的百分比表示也可以用作服务的健康指标。\n数学表示 Apdex得分的实际计算是通过以下公式实现的:\n\t满意请求数 + ( 容忍请求数 / 2 ) Apdex 得分 = ------------------------------------------------------ 总请求数 此公示得到的百分率,即可视为服务的健康度。\n样例计算 在两分钟的采样时间内,主机处理200个请求。\nApdex阈值T设置为0.5秒(500ms)。\n*.\t170个请求在500ms内被处理完成,它们被分类为满意型。 *.\t20个请求在500ms和2秒间被处理,它们被分类为容忍型。 *.\t剩余的10个请求没有被正确处理或者处理时间超过了2秒,所以它们被分类为失望型。\n最终的Apdex得分是0.9,即(170 + (20 / 2))/ 200。\n深入使用 在接下来的层次,我们可以尝试通过根据节点的健康状况来着色节点以改进我们的拓扑可视化。此外,我们还可以在用户点击服务时将健康状况作为我们展示的信息的一部分。\nApdex规范推荐了以下Apdex质量评级,将Apdex得分分为优秀(0.94 - 1.00)、良好(0.85 - 0.93)、一般(0.70 - 0.84)、差(0.50 - 0.69)和不可接受(0.00 - 0.49)。\n为了可视化网格的健康状况,我们用交通灯的颜色将我们的节点标记为健康、有风险和不健康,其中不健康表示健康率低于80%。健康率在80%到95%之间的表示有风险,健康率在95%及以上的称为健康。\n让我们将这种着色融入到我们的拓扑可视化中,并将其可用性提升到一个新的水平。如果实施,我们将看到下图所示的情况。\n更进一步 Apdex为客户对我们服务响应性的满意度提供了可见性。更有甚者,通过将实施范围扩展到调用该服务的调用关系,我们可以进一步了解网格本身的健康状况。\n两个有着相似Apdex分数的服务,为客户提供了相同的客户满意度。然而,流入服务的流量大小对于优先处理哪一服务有着巨大的帮助。流量较高的服务表明这种服务体验影响了网格上更大量的使用者。\n虽然健康程度与单个服务有关,但我们也可以分析两个服务之间的交互并计算交互过程的健康程度。这种对网格上每一个交互的健康程度的计算,可以帮助我们根据整个拓扑结构中所有交互的健康程度,建立一个关键路径。\n在一个大的网格中,将流量展示为另一个数字将使可视化和监控更具挑战性。我们可以根据服务的吞吐量,通过用不同的粗细程度渲染连接服务的边来改善整个可视化的效果。\n一个位于高吞吐量事务的不健康的服务可能会导致资源的过度消耗。另一方面,这种可视化也为调整服务时获取最大化投资效果提供了一个很好的提示。\n与调整一个偶尔使用的服务相比,调整作为高吞吐量事务的一部分的那些服务会带来指数级的收益。\n实施这种包括了交互的健康状况和吞吐量的可视化,我们会看到下图所示的情况:\n这一天即将到来 目前,这些功能已经作为Tetrate服务网格平台的UI功能之一来提供给用户。该平台使用了高速可配置化、高性能的可观测性和监控性能管理平台:Apache SkyWalking (https://skywalking.apache.org),SkyWalking可以监控整个网格的流量,为服务及它们的交互合计RED指标,持续计算和监控服务的健康状况,并使用户能够在服务超过特定阈值时配置报警和通知。这些功能使得SkyWalking对网格拥有全面的健康状况可见性。\n有了这样强大的网格性能可视性,我们将可以在为网格准备的网络运营中心使用这种拓扑结构作为我们的HUD(Heads Up Display)。\nHUD随着时间的推移收集了解到的信息和模式,并将预测各种情况和主动提示我们潜在的重点领域以提高客户满意度。\n丰富的历史数据的可视化也可以使网络工程师能够看看过去中类似的一天的网格表现。\n可视化效果如下图所示。\n总结 综合到目前为止的所有讨论,网格的健康状况更多地是关于用户的感受,以及我们作为服务提供商可以采取积极行动来维持(如果不能增强)用户的体验。\n着个人化医学的发展,现在距离我的医生给我发这样短信的日子并不遥远:“要不今天享用冰淇淋并且沿着灰色小山步道到达沙斯塔山!”相似的,我们可以通过更好地了解客户的整体健康状况为他们做更多的事情。\nTetrate的“服务网格健康程度”方法不仅提供了管理,监视和支持,而且从一开始就使基础架构保持健康以减少事故发生的可能性。在Istio,Envoy和SkyWalking的支持下,Tetrate的解决方案可为任何环境中的任何工作负载提供持续的端到端可观察性,运行时安全性和流量管理。\n我们的客户应该拥有健康的系统!请分享您对使用服务网格为我们的客户带来令人兴奋和强健的体验的想法。\n引用  https://en.wikipedia.org/wiki/Apdex https://www.apdex.org/overview.html https://www.apdex.org/index.php/specifications/ https://skywalking.apache.org/  ","title":"度量服务网格健康度——Apdex得分","url":"/zh/2020-07-26-apdex-and-skywalking/"},{"content":"SkyWalking Python 0.1.0 is released. Go to downloads page to find release tars.\n API: agent core APIs, check the APIs and the examples Plugin: built-in libraries http, urllib.request and third-party library requests are supported. Test: agent test framework is setup, and the corresponding tests of aforementioned plugins are also added.  ","title":"Release Apache SkyWalking Python 0.1.0","url":"/events/release-apache-skywalking-python-0-1-0/"},{"content":"SkyWalking Chart 3.0.0 is released. Go to downloads page to find release tars.\n Support SkyWalking 8.0.1  ","title":"Release Apache SkyWalking Chart 3.0.0 for SkyWalking 8.0.1","url":"/events/release-apache-skywalking-chart-3-0-0-for-skywalking-8-0-1/"},{"content":"Apache SkyWalking 8.0.1 已发布。SkyWalking 是观察性分析平台和应用性能管理系统,提供分布式追踪、服务网格遥测分析、度量聚合和可视化一体化解决方案,支持 Java, .Net Core, PHP, NodeJS, Golang, LUA 语言探针,支持 Envoy + Istio 构建的 Service Mesh。\n与 8.0.0 相比,此版本包含一个热修复程序。\nOAP-Backend\n 修复 no-init 模式在 Elasticsearch 存储中无法运行的错误  8.0.0 值得关注的变化:\n 添加并实现了 v3 协议,旧版本与 8.x 不兼容 移除服务、实例、端点注册机制和 inventory 存储实体 (inventory storage entities) 提供新的 GraphQL 查询协议,同时支持旧协议(计划在今年年底移除) 支持 Prometheus 网络协议,可将 Prometheus 格式的指标传输到 SkyWalking 中 提供 Python agent 移除所有 inventory 缓存 提供 Apache ShardingSphere (4.0.0, 4.1.1) agent 插件 UI dashboard 100% 可配置,可采用后台定义的新指标 修复 H2/MySQL 实现中的 SQL 注入漏洞 Upgrade Nacos to avoid the FastJson CVE in high frequency. 升级 Nacos 以避免 FastJson CVE 升级 jasckson-databind 至 2.9.10  下载地址:http://skywalking.apache.org/downloads/\n","title":"Apache SkyWalking 8.0.1 发布","url":"/zh/2020-06-21-skywalking8-0-1-release/"},{"content":"SkyWalking Nginx LUA 0.2.0 is release. Go to downloads page to find release tars.\n Adapt the new v3 protocol. Implement correlation protocol. Support batch segment report.  ","title":"Relase Apache SkyWalking Nginx LUA 0.2.0","url":"/events/release-apache-skywalking-nginx-lua-0-2-0/"},{"content":"SkyWalking APM 8.0.0 is release. Go to downloads page to find release tars.\nProject  v3 protocol is added and implemented. All previous releases are incompatible with 8.x releases. Service, Instance, Endpoint register mechanism and inventory storage entities are removed. New GraphQL query protocol is provided, the legacy procotol is still supported(plan to remove at the end of this year). Support Prometheus network protocol. Metrics in Prometheus format could be transferred into SkyWalking. Python agent provided. All inventory caches have been removed. Apache ShardingSphere(4.1.0, 4.1.1) agent plugin provided.  Java Agent  Add MariaDB plugin. Vert.x plugin enhancement. More cases are covered. Support v3 extension header. Fix ElasticSearch 5.x plugin TransportClient error. Support Correlation protocol v1. Fix Finagle plugin bug, in processing Noop Span. Make CommandService daemon to avoid blocking target application shutting down gracefully. Refactor spring cloud gateway plugin and support tracing spring cloud gateway 2.2.x  OAP-Backend  Support meter system for Prometheus adoption. In future releases, we will add native meter APIs and MicroMeter(Sleuth) system. Support endpoint grouping. Add SuperDataSet annotation for storage entity. Add superDatasetIndexShardsFactor in the ElasticSearch storage, to provide more shards for @SuperDataSet annotated entites. Typically TraceSegment. Support alarm settings for relationship of service, instance, and endpoint level metrics. Support alarm settings for database(conjecture node in tracing scenario). Data Model could be added in the runtime, don\u0026rsquo;t depend on the bootstrap sequence anymore. Reduce the memory cost, due to no inventory caches. No buffer files in tracing and service mesh cases. New ReadWriteSafe cache implementation. Simplify codes. Provide default way for metrics query, even the metrics doesn\u0026rsquo;t exist. New GraphQL query protocol is provided. Support the metrics type query. Set up length rule of service, instance, and endpoint. Adjust the default jks for ElasticSearch to empty. Fix Apdex function integer overflow issue. Fix profile storage issue. Fix TTL issue. Fix H2 column type bug. Add JRE 8-14 test for the backend.  UI  UI dashboard is 100% configurable to adopt new metrics definited in the backend.  Document  Add v8 upgrade document. Make the coverage accurate including UT and e2e tests. Add miss doc about collecting parameters in the profiled traces.  CVE  Fix SQL Injection vulnerability in H2/MySQL implementation. Upgrade Nacos to avoid the FastJson CVE in high frequency. Upgrade jasckson-databind to 2.9.10.  All issues and pull requests are here\n","title":"Release Apache SkyWalking APM 8.0.0","url":"/events/release-apache-skywalking-apm-8-0-0/"},{"content":"可观察性平台和开源应用程序性能监控(APM)项目 Apache SkyWalking,今天刚宣布 8.0 的发布版本。素以强劲指标、追踪与服务网格能力见称的 SkyWalking ,在最新版本中的功能性延展到用户渴求已久的功能 —— 将指标功能和包括 Prometheus 的其他指标收集系统进行了融合。\n什么是 Apache SkyWalking? SkyWalking 是可观察性平台和 APM 工具,可以选择是否搭载服务网格的使用,为微服务、云原生和容器化应用提供自动度量功能。顶尖的 Apache 项目由来自世界各地的社区人员支持,应用在阿里巴巴、华为、腾讯、百度和大量其他企业。SkyWalking 提供记录、监控和追踪功能,同时也得力于其架构而拥有数据收集终端、分析平台,还有用户界面。\n值得关注的优化包括:  用户界面 Dashboard 上提供百分百的自由度,用户可以任意进行配置,采用后台新定义的指标。 支持 Prometheus 导出格式。Prometheus 格式的指标可以转换至 SkyWalking。 SkyWalking 现已可以自主监控服务网格,为 Istio 和 Envoy 提供指标。 服务、实例、终端地址的注册机制,和库存存储实体已经被移除了。  无须修改原始码的前提下,为用户界面加入新的指标 对于 SkyWalking 的用户,8.0 版本的亮点将会是数据模型的更新,而且传播格式也针对更多语言进行优化。再加上引进了新的 MeterSystem ,除了可以同步运行传统追踪模式,用户还可自定义需要收集的指标。追踪和服务网格专注在拓扑和服务流量的指标上,而 MeterSystem 则汇报用户感兴趣的业务指标,例如是数据库存取性能、圣诞节期间的下单率,或者用户注册或下单的百分比。这些指标数据会在 SkyWalking 的用户界面 Dashboard 上以图像显示。指标的面板数据和拓扑图可以通过 Envoy 的指标绘制,而追踪分析也可以支持 Istio 的遥测。Dashboard 还支持以 JSON 格式导入、导出,而 Dashboard 上的自定义指标也支持设定指标名称、实体种类(服务、实例、终端地址或全部)、标记值等。用户界面模板上已详细描述了用户界面的逻辑和原型配置,以及它的 Dashboard、tab 和组件。\n观察任何配备了 Prometheus 的应用 在这次最新的社区发布中,SkyWalking 可以观察任何配备了 Prometheus 或者提供了 Prometheus 终端地址的应用。这项更新为很多想采用 SkyWalking 指标和追踪的用户节省了不少时间,现在你不再需要重新设置指标工具,就可以获得 Prometheus 数据。因为 Prometheus 更简单、更为人熟悉,是不少用户的不二选择。有了 8.0 版本,Prometheus 网络协议就能够读取所有已设定在 API 上的数据,另外 Prometheus 格式的指标也可转换至 SkyWalking 上。如此一来,通过图像方式展示,所有的指标和拓扑都能一目了然。同时,也支持 Prometheus 的 fetcher。\n监控你的网格 SkyWalking 现在不再只是监控服务或平台,而是监控整个网格。有了 8.0 版本,你除了能获取关于你的网格的指标(包括 Istio 和 Envoy 在内),同时也能通过 SkyWalking 监控自身的性能。因为当监控服务在观察业务集群的同时,它也能实现自我观察,确保运维团队拥有稳定可靠的平台。\n性能优化 最后,8.0 发布移除了注册机制,也不再需要使用独一无二的整数来代表实体。这项改变将大幅优化性能。想了解完整的更新功能列表,可以阅读在 SkyWalking 社区发布的公告页面。\n额外资源  追踪 Twitter 获取更多 SkyWalking 最新资讯 SkyWalking 未来的发布会加入原生指标 API 和融合 Micrometer (Sleuth) 指标集合。  ","title":"SkyWalking 的最新动向?8.0 版本的 MeterSystem 和网格监控","url":"/zh/whats-new-in-skywalking-metersystem-and-mesh-monitoring-in-8-0/"},{"content":"作者:宋净超、张伟\n日前,云原生网络代理 MOSN v0.12.0 发布,观察性分析平台和应用性能管理系统 SkyWalking 完成了与 MOSN 的集成,作为 MOSN 中的支持的分布式追踪系统之一,旨在实现在微服务和 Service Mesh 中的更强大的可观察性。\n背景 相比传统的巨石(Monolith)应用,微服务的一个主要变化是将应用中的不同模块拆分为了独立的进程。在微服务架构下,原来进程内的方法调用成为了跨进程的远程方法调用。相对于单一进程内的方法调用而言,跨进程调用的调试和故障分析是非常困难的,难以使用传统的代码调试程序或者日志打印来对分布式的调用过程进行查看和分析。\n如上图右边所示,微服务架构中系统中各个微服务之间存在复杂的调用关系。\n一个来自客户端的请求在其业务处理过程中经过了多个微服务进程。我们如果想要对该请求的端到端调用过程进行完整的分析,则必须将该请求经过的所有进程的相关信息都收集起来并关联在一起,这就是“分布式追踪”。\n以上关于分布式追踪的介绍引用自 Istio Handbook。\nMOSN 中 tracing 的架构 MOSN 的 tracing 框架由 Driver、Tracer 和 Span 三个部分组成。\nDriver 是 Tracer 的容器,管理注册的 Tracer 实例,Tracer 是 tracing 的入口,根据请求信息创建一个 Span,Span 存储当前跨度的链路信息。\n目前 MOSN tracing 有 SOFATracer 和 SkyWalking 两种实现。SOFATracer 支持 http1 和 xprotocol 协议的链路追踪,将 trace 数据写入本地日志文件中。SkyWalking 支持 http1 协议的链路追踪,使用原生的 Go 语言探针 go2sky 将 trace 数据通过 gRPC 上报到 SkyWalking 后端服务。\n快速开始 下面将使用 Docker 和 docker-compose 来快速开始运行一个集成了 SkyWalking 的分布式追踪示例,该示例代码请见 MOSN GitHub。\n准备 安装 docker 和 docker-compose。\n  安装 docker\n  安装 docker-compose\n  需要一个编译好的 MOSN 程序,您可以下载 MOSN 源码自行编译,或者直接下载 MOSN v0.12.0 发行版以获取 MOSN 的运行时二进制文件。\n下面将以源码编译的方式演示 MOSN 如何与 SkyWalking 集成。\ncd ${projectpath}/cmd/mosn/main go build 获取示例代码目录。\n${targetpath} = ${projectpath}/examples/codes/trace/skywalking/http/ 将编译好的程序移动到示例代码目录。\nmv main ${targetpath}/ cd ${targetpath} 目录结构 下面是 SkyWalking 的目录结构。\n* skywalking └─── http │ main # 编译完成的 MOSN 程序 | server.go # 模拟的 Http Server | clint.go # 模拟的 Http Client | config.json # MOSN 配置 | skywalking-docker-compose.yaml # skywalking docker-compose 运行说明 启动 SkyWalking oap \u0026amp; ui。\ndocker-compose -f skywalking-docker-compose.yaml up -d 启动一个 HTTP Server。\ngo run server.go 启动 MOSN。\n./main start -c config.json 启动一个 HTTP Client。\ngo run client.go 打开 http://127.0.0.1:8080 查看 SkyWalking-UI,SkyWalking Dashboard 界面如下图所示。\n在打开 Dashboard 后请点击右上角的 Auto 按钮以使页面自动刷新。\nDemo 视频 下面来看一下该 Demo 的操作视频。\n\n清理 要想销毁 SkyWalking 后台运行的 docker 容器只需要下面的命令。\ncd ${projectpath}/examples/codes/trace/skywalking/http/ docker-compose -f skywalking-docker-compose.yaml down 未来计划 在今年五月份,SkyWalking 8.0 版本会进行一次全面升级,采用新的探针协议和分析逻辑,探针将更具互感知能力,更好的在 Service Mesh 下使用探针进行监控。同时,SkyWalking 将开放之前仅存在于内核中的 metrics 指标分析体系。Prmoetheus、Spring Cloud Sleuth、Zabbix 等常用的 metrics 监控方式,都会被统一的接入进来,进行分析。此外, SkyWalking 与 MOSN 社区将继续合作:支持追踪 Dubbo 和 SOFARPC,同时适配 sidecar 模式下的链路追踪。\n关于 MOSN MOSN 是一款使用 Go 语言开发的网络代理软件,由蚂蚁金服开源并经过几十万容器的生产级验证。 MOSN 作为云原生的网络数据平面,旨在为服务提供多协议、模块化、智能化、安全的代理能力。 MOSN 是 Modular Open Smart Network 的简称。 MOSN 可以与任何支持 xDS API 的 Service Mesh 集成,亦可以作为独立的四、七层负载均衡,API Gateway、云原生 Ingress 等使用。\n GitHub:https://github.com/mosn/mosn 官网:https://mosn.io  关于 Skywalking SkyWalking 是观察性分析平台和应用性能管理系统。提供分布式追踪、服务网格遥测分析、度量聚合和可视化一体化解决方案。支持 Java、.Net Core、PHP、NodeJS、Golang、LUA 语言探针,支持 Envoy/MOSN + Istio 构建的 Service Mesh。\n GitHub:https://github.com/apache/skywalking 官网:https://skywalking.apache.org  关于本文中的示例请参考 MOSN GitHub 和 MOSN 官方文档。\n","title":"SkyWalking 支持云原生网络代理 MOSN 做分布式追踪","url":"/zh/2020-04-28-skywalking-and-mosn/"},{"content":"Based on his continuous contributions, Wei Zhang (a.k.a arugal) has been invited to join the PMC. Welcome aboard.\n","title":"Welcome Wei Zhang to join the PMC","url":"/events/welcome-wei-zhang-to-join-the-pmc/"},{"content":"目录:\n 1. 概述 2. 搭建 SkyWalking 单机环境 3. 搭建 SkyWalking 集群环境 4. 告警 5. 注意事项 6. Spring Boot 使用示例 6. Spring Cloud 使用示例    作者:芋道源码 原文地址   1. 概述 1.1 概念 SkyWalking 是什么?\n FROM http://skywalking.apache.org/\n分布式系统的应用程序性能监视工具,专为微服务、云原生架构和基于容器(Docker、K8s、Mesos)架构而设计。\n提供分布式追踪、服务网格遥测分析、度量聚合和可视化一体化解决方案。\n 1.2 功能列表 SkyWalking 有哪些功能?\n FROM http://skywalking.apache.org/\n 多种监控手段。可以通过语言探针和 service mesh 获得监控是数据。 多个语言自动探针。包括 Java,.NET Core 和 Node.JS。 轻量高效。无需大数据平台,和大量的服务器资源。 模块化。UI、存储、集群管理都有多种机制可选。 支持告警。 优秀的可视化解决方案。   1.3 整体架构 SkyWalking 整体架构如何?\n FROM http://skywalking.apache.org/\n 整个架构,分成上、下、左、右四部分:\n 考虑到让描述更简单,我们舍弃掉 Metric 指标相关,而着重在 Tracing 链路相关功能。\n  上部分 Agent :负责从应用中,收集链路信息,发送给 SkyWalking OAP 服务器。目前支持 SkyWalking、Zikpin、Jaeger 等提供的 Tracing 数据信息。而我们目前采用的是,SkyWalking Agent 收集 SkyWalking Tracing 数据,传递给服务器。 下部分 SkyWalking OAP :负责接收 Agent 发送的 Tracing 数据信息,然后进行分析(Analysis Core) ,存储到外部存储器( Storage ),最终提供查询( Query )功能。 右部分 Storage :Tracing 数据存储。目前支持 ES、MySQL、Sharding Sphere、TiDB、H2 多种存储器。而我们目前采用的是 ES ,主要考虑是 SkyWalking 开发团队自己的生产环境采用 ES 为主。 左部分 SkyWalking UI :负责提供控台,查看链路等等。  1.4 官方文档 在 https://github.com/apache/skywalking/tree/master/docs 地址下,提供了 SkyWalking 的英文文档。\n考虑到大多数胖友的英语水平和艿艿不相伯仲,再加上胖友一开始对 SkyWalking 比较陌生,所以比较推荐先阅读 https://github.com/SkyAPM/document-cn-translation-of-skywalking 地址,提供了 SkyWalking 的中文文档。\n考虑到胖友使用 SkyWalking 的目的,是实现分布式链路追踪的功能,所以最好去了解下相关的知识。这里推荐阅读两篇文章:\n 《OpenTracing 官方标准 —— 中文版》 Google 论文 《Dapper,大规模分布式系统的跟踪系统》  2. 搭建 SkyWalking 单机环境 考虑到让胖友更快的入门,我们来搭建一个 SkyWalking 单机环境,步骤如下:\n 第一步,搭建一个 Elasticsearch 服务。 第二步,下载 SkyWalking 软件包。 第三步,搭建一个 SkyWalking OAP 服务。 第四步,启动一个 Spring Boot 应用,并配置 SkyWalking Agent。 第五步,搭建一个 SkyWalking UI 服务。  仅仅五步,按照艿艿标题党的性格,应该给本文取个《10 分钟快速搭建 SkyWalking 服务》标题才对,哈哈哈。\n2.1 Elasticsearch 搭建  FROM https://www.elastic.co/cn/products/elasticsearch\nElasticsearch 是一个分布式、RESTful 风格的搜索和数据分析引擎,能够解决不断涌现出的各种用例。 作为 Elastic Stack 的核心,它集中存储您的数据,帮助您发现意料之中以及意料之外的情况。\n 参考《Elasticsearch 极简入门》的「1. 单机部署」小节,搭建一个 Elasticsearch 单机服务。\n不过要注意,本文使用的是 Elasticsearch 7.5.1 版本。因为 SkyWalking 6.6.0 版本,增加了对 Elasticsearch 7.X 版本的支持。当然,如果胖友使用 Elasticsearch 6.X 版本也是可以的。\n2.2 下载 SkyWalking 软件包 对于 SkyWalking 的软件包,有两种方式获取:\n 手动编译 官方包  一般情况下,我们建议使用官方包。手动编译,更多是尝鲜或者等着急修复的 BUG 的版本。\n2.2.1 官方包 在 http://skywalking.apache.org/downloads/ 下,我们下载操作系统对应的发布版。\n这里,我们选择 Binary Distribution for ElasticSearch 7 (Linux) 版本,因为艿艿是 Mac 环境,再加上想使用 Elasticsearch 7.X 版本作为存储。如果胖友想用 Elasticsearch 6.X 版本作为存储,记得下载 Binary Distribution (Linux) 版本。\n① 下载:\n# 创建目录 $ mkdir -p /Users/yunai/skywalking $ cd /Users/yunai/skywalking # 下载 $ wget http://mirror.bit.edu.cn/apache/skywalking/6.6.0/apache-skywalking-apm-es7-6.6.0.tar.gz ② 解压:\n# 解压 $ tar -zxvf apache-skywalking-apm-es7-6.6.0.tar.gz $ cd apache-skywalking-apm-bin-es7 $ ls -ls 4 drwxr-xr-x 8 root root 4096 Sep 9 15:09 agent # SkyWalking Agent 4 drwxr-xr-x 2 root root 4096 Sep 9 15:44 bin # 执行脚本 4 drwxr-xr-x 2 root root 4096 Sep 9 15:44 config # SkyWalking OAP Server 配置文件 32 -rwxr-xr-x 1 root root 28903 Sep 9 14:32 LICENSE 4 drwxr-xr-x 3 root root 4096 Sep 9 15:44 licenses 32 -rwxr-xr-x 1 root root 31850 Sep 9 14:32 NOTICE 16 drwxr-xr-x 2 root root 16384 Sep 9 15:22 oap-libs # SkyWalking OAP Server 4 -rw-r--r-- 1 root root 1978 Sep 9 14:32 README.txt 4 drwxr-xr-x 2 root root 4096 Sep 9 15:44 webapp # SkyWalking UI 2.2.2 手动编译  友情提示:如果胖友没有编译 SkyWalking 源码的诉求,可以跳过本小节。\n 参考 How to build project 文章。\n需要前置安装如下:\n GIT JDK 8+ Maven  ① 克隆代码:\n$ git clone https://github.com/apache/skywalking.git  因为网络问题,可能克隆会有点久。  ② 初始化子模块:\n$ cd skywalking $ git submodule init $ git submodule update ③ 编译\n$ ./mvnw clean package -DskipTests  编译过程,如果机子比较差,花费时间会比较久。  ④ 查看编译结果\n$ cd apm-dist # 编译结果目录 $ cd target $ tar -zxvf apache-skywalking-apm-bin.tar.gz # 解压 Linux 包 $ cd apache-skywalking-apm-bin $ ls -ls 4 drwxr-xr-x 8 root root 4096 Sep 9 15:09 agent # SkyWalking Agent 4 drwxr-xr-x 2 root root 4096 Sep 9 15:44 bin # 执行脚本 4 drwxr-xr-x 2 root root 4096 Sep 9 15:44 config # SkyWalking OAP Server 配置文件 32 -rwxr-xr-x 1 root root 28903 Sep 9 14:32 LICENSE 4 drwxr-xr-x 3 root root 4096 Sep 9 15:44 licenses 32 -rwxr-xr-x 1 root root 31850 Sep 9 14:32 NOTICE 16 drwxr-xr-x 2 root root 16384 Sep 9 15:22 oap-libs # SkyWalking OAP Server 4 -rw-r--r-- 1 root root 1978 Sep 9 14:32 README.txt 4 drwxr-xr-x 2 root root 4096 Sep 9 15:44 webapp # SkyWalking UI 2.3 SkyWalking OAP 搭建 ① 修改 OAP 配置文件\n 友情提示:如果配置文件,适合 SkyWalking 6.X 版本。\n $ vi config/application.ymlstorage:elasticsearch7:nameSpace:${SW_NAMESPACE:\u0026#34;elasticsearch\u0026#34;}clusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:9200}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;http\u0026#34;}# trustStorePath: ${SW_SW_STORAGE_ES_SSL_JKS_PATH:\u0026#34;../es_keystore.jks\u0026#34;}# trustStorePass: ${SW_SW_STORAGE_ES_SSL_JKS_PASS:\u0026#34;\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}password:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}indexShardsNumber:${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:2}indexReplicasNumber:${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:0}# Those data TTL settings will override the same settings in core module.recordDataTTL:${SW_STORAGE_ES_RECORD_DATA_TTL:7}# Unit is dayotherMetricsDataTTL:${SW_STORAGE_ES_OTHER_METRIC_DATA_TTL:45}# Unit is daymonthMetricsDataTTL:${SW_STORAGE_ES_MONTH_METRIC_DATA_TTL:18}# Unit is month# Batch process setting, refer to https://www.elastic.co/guide/en/elasticsearch/client/java-api/5.5/java-docs-bulk-processor.htmlbulkActions:${SW_STORAGE_ES_BULK_ACTIONS:1000}# Execute the bulk every 1000 requestsflushInterval:${SW_STORAGE_ES_FLUSH_INTERVAL:10}# flush the bulk every 10 seconds whatever the number of requestsconcurrentRequests:${SW_STORAGE_ES_CONCURRENT_REQUESTS:2}# the number of concurrent requestsresultWindowMaxSize:${SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE:10000}metadataQueryMaxSize:${SW_STORAGE_ES_QUERY_MAX_SIZE:5000}segmentQueryMaxSize:${SW_STORAGE_ES_QUERY_SEGMENT_SIZE:200}# h2:# driver: ${SW_STORAGE_H2_DRIVER:org.h2.jdbcx.JdbcDataSource}# url: ${SW_STORAGE_H2_URL:jdbc:h2:mem:skywalking-oap-db}# user: ${SW_STORAGE_H2_USER:sa}# metadataQueryMaxSize: ${SW_STORAGE_H2_QUERY_MAX_SIZE:5000} storage.elasticsearch7 配置项,设置使用 Elasticsearch 7.X 版本作为存储器。  这里,我们打开注释,并记得通过 nameSpace 设置 Elasticsearch 集群名。   storage.elasticsearch 配置项,设置使用 Elasticsearch 6.X 版本作为存储器。  这里,我们无需做任何改动。 如果胖友使用 Elasticsearch 6.X 版本作为存储器,记得设置这个配置项,而不是 storage.elasticsearch7 配置项。   storage.h2 配置项,设置使用 H2 作为存储器。  这里,我们需要手动注释掉,因为 H2 是默认配置的存储器。     友情提示:如果配置文件,适合 SkyWalking 7.X 版本。\n  重点修改 storage 配置项,通过 storage.selector 配置项来设置具体使用的存储器。 storage.elasticsearch 配置项,设置使用 Elasticsearch 6.X 版本作为存储器。胖友可以主要修改 nameSpace、clusterNodes 两个配置项即可,设置使用的 Elasticsearch 的集群和命名空间。 storage.elasticsearch7 配置项,设置使用 Elasticsearch 7.X 版本作为存储器。 还有 MySQL、H2、InfluxDB 等等存储器的配置可以选择,胖友自己根据需要去选择哈~  ② 启动 SkyWalking OAP 服务\n$ bin/oapService.sh SkyWalking OAP started successfully! 是否真正启动成功,胖友打开 logs/skywalking-oap-server.log 日志文件,查看是否有错误日志。首次启动时,因为 SkyWalking OAP 会创建 Elasticsearch 的索引,所以会“疯狂”的打印日志。最终,我们看到如下日志,基本可以代表 SkyWalking OAP 服务启动成功:\n 友情提示:因为首次启动会创建 Elasticsearch 索引,所以可能会比较慢。\n 2020-01-02 18:22:53,635 - org.eclipse.jetty.server.Server - 444 [main] INFO [] - Started @35249ms 2.4 SkyWalking UI 搭建 ① 启动 SkyWalking UI 服务\nbin/webappService.sh SkyWalking Web Application started successfully! 是否真正启动成功,胖友打开 logs/logs/webapp.log 日志文件,查看是否有错误日志。最终,我们看到如下日志,基本可以代表 SkyWalking UI 服务启动成功:\n2020-01-02 18:27:02.824 INFO 48250 --- [main] o.a.s.apm.webapp.ApplicationStartUp : Started ApplicationStartUp in 7.774 seconds (JVM running for 8.316) 如果想要修改 SkyWalking UI 服务的参数,可以编辑 webapp/webapp.yml 配置文件。例如说:\n server.port :SkyWalking UI 服务端口。 collector.ribbon.listOfServers :SkyWalking OAP 服务地址数组。因为 SkyWalking UI 界面的数据,是通过请求 SkyWalking OAP 服务来获得的。  ② 访问 UI 界面:\n浏览器打开 http://127.0.0.1:8080 。界面如下图:2.5 SkyWalking Agent 大多数情况下,我们在启动项目的 Shell 脚本上,通过 -javaagent 参数进行配置 SkyWalking Agent 。我们在 「2.3.1 Shell」 小节来看。\n考虑到偶尔我们需要在 IDE 中,也希望使用 SkyWalking Agent ,所以我们在 「2.3.2 IDEA」 小节来看。\n2.3.1 Shell ① Agent 软件包\n我们需要将 apache-skywalking-apm-bin/agent 目录,拷贝到 Java 应用所在的服务器上。这样,Java 应用才可以配置使用该 SkyWalking Agent。我们来看看 Agent 目录下有哪些:\n$ ls -ls total 35176 0 drwxr-xr-x@ 7 yunai staff 224 Dec 24 14:20 activations 0 drwxr-xr-x@ 4 yunai staff 128 Dec 24 14:21 bootstrap-plugins 0 drwxr-xr-x@ 3 yunai staff 96 Dec 24 14:12 config # SkyWalking Agent 配置 0 drwxr-xr-x@ 3 yunai staff 96 Jan 2 19:29 logs # SkyWalking Agent 日志 0 drwxr-xr-x@ 13 yunai staff 416 Dec 24 14:22 optional-plugins # 可选插件 0 drwxr-xr-x@ 68 yunai staff 2176 Dec 24 14:20 plugins # 插件 35176 -rw-r--r--@ 1 yunai staff 18006420 Dec 24 14:12 skywalking-agent.jar # SkyWalking Agent  关于 SkyWalking Agent 提供的插件列表,可以看看《SkyWalking 文档 —— 插件支持列表》。  因为艿艿是在本机测试,所以无需拷贝,SkyWalking Agent 目录是 /Users/yunai/skywalking/apache-skywalking-apm-bin-es7/agent/。\n考虑到方便胖友,艿艿这里提供了一个最简的 Spring Boot 应用 lab-39-demo-2.2.2.RELEASE.jar。对应 Github 仓库是 lab-39-demo。\n② 配置 Java 启动脚本\n# SkyWalking Agent 配置 export SW_AGENT_NAME=demo-application # 配置 Agent 名字。一般来说,我们直接使用 Spring Boot 项目的 `spring.application.name` 。 export SW_AGENT_COLLECTOR_BACKEND_SERVICES=127.0.0.1:11800 # 配置 Collector 地址。 export SW_AGENT_SPAN_LIMIT=2000 # 配置链路的最大 Span 数量。一般情况下,不需要配置,默认为 300 。主要考虑,有些新上 SkyWalking Agent 的项目,代码可能比较糟糕。 export JAVA_AGENT=-javaagent:/Users/yunai/skywalking/apache-skywalking-apm-bin-es7/agent/skywalking-agent.jar # SkyWalking Agent jar 地址。 # Jar 启动 java -jar $JAVA_AGENT -jar lab-39-demo-2.2.2.RELEASE.jar  通过环境变量,进行配置。 更多的变量,可以在 /work/programs/skywalking/apache-skywalking-apm-bin/agent/config/agent.config 查看。要注意,可能有些变量是被注释掉的,例如说 SW_AGENT_SPAN_LIMIT 对应的 agent.span_limit_per_segment 。  ③ 执行脚本:\n直接执行上述的 Shell 脚本,启动 Java 项目。在启动日志中,我们可以看到 SkyWalking Agent 被加载的日志。日志示例如下:\nDEBUG 2020-01-02 19:29:29:400 main AgentPackagePath : The beacon class location is jar:file:/Users/yunai/skywalking/apache-skywalking-apm-bin-es7/agent/skywalking-agent.jar!/org/apache/skywalking/apm/agent/core/boot/AgentPackagePath.class. INFO 2020-01-02 19:29:29:402 main SnifferConfigInitializer : Config file found in /Users/yunai/skywalking/apache-skywalking-apm-bin-es7/agent/config/agent.config. 同时,也可以在 /Users/yunai/skywalking/apache-skywalking-apm-bin-es7/agent/agent/logs/skywalking-api.log 查看对应的 SkyWalking Agent 日志。日志示例如下:\nDEBUG 2020-01-02 19:37:22:539 SkywalkingAgent-5-ServiceAndEndpointRegisterClient-0 ServiceAndEndpointRegisterClient : ServiceAndEndpointRegisterClient running, status:CONNECTED.  这里,我们看到 status:CONNECTED ,表示 SkyWalking Agent 连接 SkyWalking OAP 服务成功。  ④ 简单测试\n完事,可以去 SkyWalking UI 查看是否链路收集成功。\n1、首先,使用浏览器,访问下 http://127.0.0.1:8079/demo/echo 地址,请求下 Spring Boot 应用提供的 API。因为,我们要追踪下该链路。\n2、然后,继续使用浏览器,打开 http://127.0.0.1:8080/ 地址,进入 SkyWalking UI 界面。如下图所示:这里,我们会看到 SkyWalking 中非常重要的三个概念:\n  服务(Service) :表示对请求提供相同行为的一系列或一组工作负载。在使用 Agent 或 SDK 的时候,你可以定义服务的名字。如果不定义的话,SkyWalking 将会使用你在平台(例如说 Istio)上定义的名字。\n 这里,我们可以看到 Spring Boot 应用的服务为 \u0026quot;demo-application\u0026quot;,就是我们在环境变量 SW_AGENT_NAME 中所定义的。\n   服务实例(Service Instance) :上述的一组工作负载中的每一个工作负载称为一个实例。就像 Kubernetes 中的 pods 一样, 服务实例未必就是操作系统上的一个进程。但当你在使用 Agent 的时候, 一个服务实例实际就是操作系统上的一个真实进程。\n 这里,我们可以看到 Spring Boot 应用的服务为 {agent_name}-pid:{pid}@{hostname},由 Agent 自动生成。关于它,我们在「5.1 hostname」小节中,有进一步的讲解,胖友可以瞅瞅。\n   端点(Endpoint) :对于特定服务所接收的请求路径, 如 HTTP 的 URI 路径和 gRPC 服务的类名 + 方法签名。\n 这里,我们可以看到 Spring Boot 应用的一个端点,为 API 接口 /demo/echo。\n   3、之后,点击「拓扑图」菜单,进入查看拓扑图的界面。如下图所示:4、再之后,点击「追踪」菜单,进入查看链路数据的界面。如下图所示:2.3.2 IDEA 我们统一使用 IDEA 作为开发 IDE ,所以忽略 Eclipse 的配置方式。\n具体参考下图,比较简单:3. 搭建 SkyWalking 集群环境 在生产环境下,我们一般推荐搭建 SkyWalking 集群环境。😈 当然,如果公司比较抠门,也可以在生产环境下使用 SkyWalking 单机环境,毕竟 SkyWalking 挂了之后,不影响业务的正常运行。\n搭建一个 SkyWalking 集群环境,步骤如下:\n 第一步,搭建一个 Elasticsearch 服务的集群。 第二步,搭建一个注册中心的集群。目前 SkyWalking 支持 Zookeeper、Kubernetes、Consul、Nacos 作为注册中心。 第三步,搭建一个 SkyWalking OAP 服务的集群,同时参考《SkyWalking 文档 —— 集群管理》,将 SkyWalking OAP 服务注册到注册中心上。 第四步,启动一个 Spring Boot 应用,并配置 SkyWalking Agent。另外,在设置 SkyWaling Agent 的 SW_AGENT_COLLECTOR_BACKEND_SERVICES 地址时,需要设置多个 SkyWalking OAP 服务的地址数组。 第五步,搭建一个 SkyWalking UI 服务的集群,同时使用 Nginx 进行负载均衡。另外,在设置 SkyWalking UI 的 collector.ribbon.listOfServers 地址时,也需要设置多个 SkyWalking OAP 服务的地址数组。  😈 具体的搭建过程,并不复杂,胖友自己去尝试下。\n4. 告警 在 SkyWaling 中,已经提供了告警功能,具体可见《SkyWalking 文档 —— 告警》。\n默认情况下,SkyWalking 已经内置告警规则。同时,我们可以参考告警规则,进行自定义。\n在满足 SkyWalking 告警规则的触发规则时,我们在 SkyWaling UI 的告警界面,可以看到告警内容。如下图所示:同时,我们自定义 Webhook ,对接 SkyWalking 的告警请求。而具体的邮箱、钉钉等告警方式,需要自己进行开发。至于自定义 WebHook 如何实现,可以参考:\n Java 语言:  《基于 SkyWalking 的分布式跟踪系统 - 异常告警》   Go 语言:  dingding-notify-for-skywalking infra-skywalking-webhook    5. 注意事项 5.1 hostname 配置 在 SkyWalking 中,每个被监控的实例的名字,会包含 hostname 。格式为:{agent_name}-pid:{pid}@{hostname} ,例如说:\u0026quot;scrm-scheduler-pid:27629@iZbp1e2xlyvr7fh67qi59oZ\u0026quot; 。\n因为有些服务器未正确设置 hostname ,所以我们一定要去修改,不然都不知道是哪个服务器上的实例(😈 鬼知道 \u0026quot;iZbp1e2xlyvr7fh67qi59oZ\u0026quot; 一串是哪个服务器啊)。\n修改方式如下:\n1、修改 /etc/hosts 的 hostname :\n127.0.0.1 localhost ::1 localhost localhost.localdomain localhost6 localhost6.localdomain6 10.80.62.151 pre-app-01 # 就是这个,其中 10.80.62.151 是本机内网 IP ,pre-app-01 是 hostname 。 2、修改本机 hostname :\n参考 《CentOS7 修改主机名(hostname)》\n$ hostname pre-app-01 # 其中 pre-app-01 就是你希望的 hostname 。 $ hostnamectl set-hostname pre-app-01 # 其中 pre-app-01 就是你希望的 hostname 。 6. Spring Boot 使用示例 在 《芋道 Spring Boot 链路追踪 SkyWalking 入门》 中,我们来详细学习如何在 Spring Boot 中,整合并使用 SkyWalking 收集链路数据。😈 相比「2.5 SkyWaling Agent」来说,我们会提供更加丰富的示例哟。\n7. Spring Cloud 使用示例 在 《芋道 Spring Cloud 链路追踪 SkyWalking 入门》 中,我们来详细学习如何在 Spring Cloud 中,整合并使用 SkyWalking 收集链路数据。😈 相比「2.5 SkyWaling Agent」来说,我们会提供更加丰富的示例哟。\n666. 彩蛋 本文仅仅是简单的 SkyWalking 入门文章,如果胖友想要更好的使用 SkyWalking,推荐通读下《SkyWalking 文档》。\n想要进一步深入的胖友,也可以阅读如下资料:\n 《SkyWalking 源码解析》 《APM 巅峰对决:Apache Skywalking P.K. Pinpoint》 《SkyWalking 官方 —— 博客合集》  😈 最后弱弱的问一句,上完 SkyWaling 之后,有没发现自己系统各种地方慢慢慢!嘻嘻。\n","title":"SkyWalking 极简入门","url":"/zh/2020-04-19-skywalking-quick-start/"},{"content":"This post originally appears on The New Stack\nThis post introduces a way to automatically profile code in production with Apache SkyWalking. We believe the profile method helps reduce maintenance and overhead while increasing the precision in root cause analysis.\nLimitations of the Distributed Tracing In the early days, metrics and logging systems were the key solutions in monitoring platforms. With the adoption of microservice and distributed system-based architecture, distributed tracing has become more important. Distributed tracing provides relevant service context, such as system topology map and RPC parent-child relationships.\nSome claim that distributed tracing is the best way to discover the cause of performance issues in a distributed system. It’s good at finding issues at the RPC abstraction, or in the scope of components instrumented with spans. However, it isn’t that perfect.\nHave you been surprised to find a span duration longer than expected, but no insight into why? What should you do next? Some may think that the next step is to add more instrumentation, more spans into the trace, thinking that you would eventually find the root cause, with more data points. We’ll argue this is not a good option within a production environment. Here’s why:\n There is a risk of application overhead and system overload. Ad-hoc spans measure the performance of specific scopes or methods, but picking the right place can be difficult. To identify the precise cause, you can “instrument” (add spans to) many suspicious places. The additional instrumentation costs more CPU and memory in the production environment. Next, ad-hoc instrumentation that didn’t help is often forgotten, not deleted. This creates a valueless overhead load. In the worst case, excess instrumentation can cause performance problems in the production app or overload the tracing system. The process of ad-hoc (manual) instrumentation usually implies at least a restart. Trace instrumentation libraries, like Zipkin Brave, are integrated into many framework libraries. To instrument a method’s performance typically implies changing code, even if only an annotation. This implies a re-deploy. Even if you have the way to do auto instrumentation, like Apache SkyWalking, you still need to change the configuration and reboot the app. Otherwise, you take the risk of GC caused by hot dynamic instrumentation. Injecting instrumentation into an uninstrumented third party library is hard and complex. It takes more time and many won’t know how to do this. Usually, we don’t have code line numbers in the distributed tracing. Particularly when lambdas are in use, it can be difficult to identify the line of code associated with a span. Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.  Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.\nProfiling in Production Introduction To reuse distributed tracing to achieve method scope precision requires an understanding of the above limitations and a different approach. We called it PROFILE.\nMost high-level languages build and run on a thread concept. The profile approach takes continuous thread dumps. We merge the thread dumps to estimate the execution time of every method shown in the thread dumps. The key for distributed tracing is the tracing context, identifiers active (or current) for the profiled method. Using this trace context, we can weave data harvested from profiling into existing traces. This allows the system to automate otherwise ad-hoc instrumentation. Let’s dig deeper into how profiling works:\nWe consider a method invocation with the same stack depth and signature (method, line number etc), the same operation. We derive span timestamps from the thread dumps the same operation is in. Let’s put this visually:\nAbove, represents 10 successive thread dumps. If this method is in dumps 4-8, we assume it started before dump 4 and finished after dump 8. We can’t tell exactly when the method started and stopped. but the timestamps of thread dumps are close enough.\nTo reduce overhead caused by thread dumps, we only profile methods enclosed by a specific entry point, such as a URI or MVC Controller method. We identify these entry points through the trace context and the APM system.\nThe profile does thread dump analysis and gives us:\n The root cause, precise to the line number in the code. Reduced maintenance as ad-hoc instrumentation is obviated. Reduced overload risk caused by ad-hoc instrumentation. Dynamic activation: only when necessary and with a very clear profile target.  Implementing Precise Profiling with Apache SkyWalking 7 Distributed profiling is built-into Apache SkyWalking application performance monitoring (APM). Let’s demonstrate how the profiling approach locates the root cause of the performance issue.\nfinal CountDownLatchcountDownLatch= new CountDownLatch(2); threadPool.submit(new Task1(countDownLatch)); threadPool.submit(new Task2(countDownLatch)); try { countDownLatch.await(500, TimeUnit.MILLISECONDS); } catch (InterruptedExceptione) { } Task1 and Task2 have a race condition and unstable execution time: they will impact the performance of each other and anything calling them. While this code looks suspicious, it is representative of real life. People in the OPS/SRE team are not usually aware of all code changes and who did them. They only know something in the new code is causing a problem.\nTo make matters interesting, the above code is not always slow: it only happens when the condition is locked. In SkyWalking APM, we have metrics of endpoint p99/p95 latency, so, we are easy to find out the p99 of this endpoint is far from the avg response time. However, this is not the same as understanding the cause of the latency. To locate the root cause, add a profile condition to this endpoint: duration greater than 500ms. This means faster executions will not add profiling load.\nThis is a typical profiled trace segment (part of the whole distributed trace) shown on the SkyWalking UI. We now notice the “service/processWithThreadPool” span is slow as we expected, but why? This method is the one we added the faulty code to. As the UI shows that method, we know the profiler is working. Now, let’s see what the profile analysis result say.\nThis is the profile analysis stack view. We see the stack element names, duration (include/exclude the children) and slowest methods have been highlighted. It shows clearly, “sun.misc.Unsafe.park” costs the most time. If we look for the caller, it is the code we added: CountDownLatch.await.\nThe Limitations of the Profile Method No diagnostic tool can fit all cases, not even the profile method.\nThe first consideration is mistaking a repeatedly called method for a slow method. Thread dumps are periodic. If there is a loop of calling one method, the profile analysis result would say the target method is slow because it is captured every time in the dump process. There could be another reason. A method called many times can also end up captured in each thread dump. Even so, the profile did what it is designed for. It still helps the OPS/SRE team to locate the code having the issue.\nThe second consideration is overhead, the impact of repeated thread dumps is real and can’t be ignored. In SkyWalking, we set the profile dump period to at least 10ms. This means we can’t locate method performance issues if they complete in less than 10ms. SkyWalking has a threshold to control the maximum parallel degree as well.\nUnderstanding the above keeps distributed tracing and APM systems useful for your OPS/SRE team.\nHow to Try This Everything we discussed, including the Apache SkyWalking Java Agent, profile analysis code, and UI, could be found in our GitHub repository. We hope you enjoyed this new profile method, and love Apache SkyWalking. If so, give us a star on GitHub to encourage us.\nSkyWalking 7 has just been released. You can contact the project team through the following channels:\n Follow SkyWalking twitter. Subscribe mailing list: dev@skywalking.apache.org. Send to dev-subscribe@kywalking.apache.org to subscribe to the mail list.  Co-author Sheng Wu is a Tetrate founding engineer and the founder and VP of Apache SkyWalking. He is solving the problem of observability for large-scale service meshes in hybrid and multi-cloud environments.\nAdrian Cole works in the Spring Cloud team at VMware, mostly on Zipkin\nHan Liu is a tech expert at Lagou. He is an Apache SkyWalking committer\n","title":"Apache SkyWalking: Use Profiling to Fix the Blind Spot of Distributed Tracing","url":"/blog/2020-04-13-apache-skywalking-profiling/"},{"content":"SkyWalking Chart 2.0.0 is released. Go to downloads page to find release tars.\n Support SkyWalking 7.0.0 Support set ES user/password Add CI for release  ","title":"Release Apache SkyWalking Chart 2.0.0 for SkyWalking 7.0.0","url":"/events/release-apache-skywalking-chart-2-0-0-for-skywalking-7-0-0/"},{"content":"SkyWalking APM 7.0.0 is release. Go to downloads page to find release tars.\n Upgrade JDK minimal JDK requirement to JDK8 Support profiling code level performance Don\u0026rsquo;t support SkyWalking v5 agent in-wire and out-wire protocol. V6 is required.  ","title":"Release Apache SkyWalking APM 7.0.0","url":"/events/release-apache-skywalking-apm-7-0-0/"},{"content":" 作者:吴晟,刘晗 原文地址  在本文中,我们详细介绍了代码级的性能剖析方法,以及我们在 Apache SkyWalking 中的实践。希望能够帮助大家在线定位系统性能短板,缓解系统压力。\n分布式链路追踪的局限性 在传统的监控系统中,我们如果想要得知系统中的业务是否正常,会采用进程监控、日志收集分析等方式来对系统进行监控。当机器或者服务出现问题时,则会触发告警及时通知负责人。通过这种方式,我们可以得知具体哪些服务出现了问题。但是这时我们并不能得知具体的错误原因出在了哪里,开发人员或者运维人员需要到日志系统里面查看错误日志,甚至需要到真实的业务服务器上查看执行情况来解决问题。\n如此一来,仅仅是发现问题的阶段,可能就会耗费相当长的时间;另外,发现问题但是并不能追溯到问题产生具体原因的情况,也常有发生。这样反反复复极其耗费时间和精力,为此我们便有了基于分布式追踪的 APM 系统。\n通过将业务系统接入分布式追踪中,我们就像是给程序增加了一个放大镜功能,可以清晰看到真实业务请求的整体链路,包括请求时间、请求路径,甚至是操作数据库的语句都可以看得一清二楚。通过这种方式,我们结合告警便可以快速追踪到真实用户请求的完整链路信息,并且这些数据信息完全是持久化的,可以随时进行查询,复盘错误的原因。\n然而随着我们对服务监控理解的加深,我们发现事情并没有那么简单。在分布式链路追踪中我们有这样的两个流派:代码埋点和字节码增强。无论使用哪种方式,底层逻辑一定都逃不过面向切面这个基础逻辑。因为只有这样才可以做到大面积的使用。这也就决定了它只能做到框架级别和 RPC 粒度的监控。这时我们可能依旧会遇到程序执行缓慢或者响应时间不稳定等情况,但无法具体查询到原因。这时候,大家很自然的会考虑到增加埋点粒度,比如对所有的 Spring Bean 方法、甚至主要的业务层方法都加上埋点。但是这种思路会遇到不小的挑战:\n第一,增加埋点时系统开销大,埋点覆盖不够全面。通过这种方式我们确实可以做到具体业务场景具体分析。但随着业务不断迭代上线,弊端也很明显:大量的埋点无疑会加大系统资源的开销,造成 CPU、内存使用率增加,更有可能拖慢整个链路的执行效率。虽然每个埋点消耗的性能很小,在微秒级别,但是因为数量的增加,甚至因为业务代码重用造成重复埋点或者循环使用,此时的性能开销已经无法忽略。\n第二,动态埋点作为一项埋点技术,和手动埋点的性能消耗上十分类似,只是减少的代码修改量,但是因为通用技术的特别,上一个挑战中提到的循环埋点和重复使用的场景甚至更为严重。比如选择所有方法或者特定包下的所有方法埋点,很可能造成系统性能彻底崩溃。\n第三,即使我们通过合理设计和埋点,解决了上述问题,但是 JDK 函数是广泛使用的,我们很难限制对 JDK API 的使用场景。对 JDK 过多方法、特别是非 RPC 方法的监控会造成系统的巨大延迟风险。而且有一些基础类型和底层工具类,是很难通过字节码进行增强的。当我们的 SDK 使用不当或者出现 bug 时,我们无法具体得知真实的错误原因。\n代码级性能剖析方法 方法介绍 基于以上问题,在系统性能监控方法上,我们提出了代码级性能剖析这种在线诊断方法。这种方法基于一个高级语言编程模型共性,即使再复杂的系统,再复杂的业务逻辑,都是基于线程去进行执行的,而且多数逻辑是在单个线程状态下执行的。\n代码级性能剖析就是利用方法栈快照,并对方法执行情况进行分析和汇总。并结合有限的分布式追踪 span 上下文,对代码执行速度进行估算。\n性能剖析激活时,会对指定线程周期性的进行线程栈快照,并将所有的快照进行汇总分析,如果两个连续的快照含有同样的方法栈,则说明此栈中的方法大概率在这个时间间隔内都处于执行状态。从而,通过这种连续快照的时间间隔累加成为估算的方法执行时间。时间估算方法如下图所示:\n在上图中,d0-d10 代表 10 次连续的内存栈快照,实际方法执行时间在 d3-d4 区间,结束时间在 d8-d9 之间。性能剖析无法告诉你方法的准确执行时间,但是他会估算出方法执行时间为 d4-d8 的 4 个快照采集间隔时间之和,这已经是非常的精确的时间估算了。\n而这个过程因为不涉及代码埋点,所以自然性能消耗是稳定和可控的,也无需担心是否被埋点,是否是 JDK 方法等问题。同时,由于上层已经在分布式追踪之下,性能剖析方法可以明确地确定分析开始和结束时间,减少不必要的性能开销。\n性能剖析可以很好的对线程的堆栈信息进行监控,主要有以下几点优势:\n 精确的问题定位,直接到代码方法和代码行; 无需反复的增删埋点,大大减少了人力开发成本; 不用承担过多埋点对目标系统和监控系统的压力和性能风险; 按需使用,平时对系统无消耗,使用时的消耗稳定可能。  SkyWalking 实践实例 我们首先在 Apache SkyWalking APM 中实现此技术方法,下面我们就以一个真实的例子来说明此方法的执行效果。\nfinal CountDownLatchcountDownLatch= new CountDownLatch(2); threadPool.submit(new Task1(countDownLatch)); threadPool.submit(new Task2(countDownLatch)); try { countDownLatch.await(500, TimeUnit.MILLISECONDS); } catch (InterruptedExceptione) { } 这是我们故意加入的问题代码,我们使用 CountDownLanth 设置了两个任务完成后方法执行结束,Task1 和 Task2 是两个执行时间不稳定的任务,所以主任务也会执行速度不稳定。但对于运维和监控团队来说,很难定位到这个方法片段。\n针对于这种情况,我们看看性能剖析会怎样直接定位此问题。\n上图所示的就是我们在进行链路追踪时所看到的真实执行情况,其中我们可以看到在 service/processWithThreadPool 执行速度缓慢,这正是我们植入问题代码的方法。此时在这个调用中没有后续链路了,所以并没有更细致的原因,我们也不打算去 review 代码,从而增加新埋点。这时,我们可以对 HelloService 进行性能剖析,并执行只剖析响应速度大于 500 毫秒的请求。\n注意,指定特定响应时间的剖析是保证剖析有效性的重要特性,如果方法在平均响应时间上已经出现问题,往往通过分布式链路可以快速定位,因为此时链路总时间长,新埋点带来的性能影响相对可控。但是方法性能抖动是不容易用新增埋点来解决的,而且往往只发生在生产环境。\n上图就是我们进行性能剖析后的真实结果图。从左到右分别表示:栈帧名称、该栈帧总计耗时(包含其下面所有自栈帧)、当前栈帧自身耗时和监控次数。我们可以在最后一行看到,线程卡在了 sun.misc.Unsafe.park 中了。如果你熟悉 Java 就可以知道此时进行了锁等待,我们继续按照树的结构向上推,便可以看到线程真正是卡在了 CountDownLatch.await 方法中。\n方法局限性 当然任何的方法都不是万能的,性能剖析也有一些局限性。\n第一, 对于高频反复执行的方法,如循环调用,可能会误报为缓慢方法。但这并不是大问题,因为如果反复执行的耗时较长,必然是系统需要关注的性能瓶颈。\n第二, 由于性能栈快照有一定的性能消耗,所以采集周期不宜过密,如 SkyWalking 实践中,不支持小于 10ms 的采集间隔。所以如果问题方法执行时间过小(比如在 10 毫秒内波动),此方法并不适用。我们也再此强调,方法论和工具的强大,始终不能代替程序员。\n","title":"在线代码级性能剖析,补全分布式追踪的最后一块“短板”","url":"/zh/2020-03-23-using-profiling-to-fix-the-blind-spot-of-distributed-tracing/"},{"content":"SkyWalking CLI 0.2.0 is released. Go to downloads page to find release tars.\n Support visualization of heat map Support top N entities, swctl metrics top 5 --name service_sla Support thermodynamic metrics, swctl metrics thermodynamic --name all_heatmap Support multiple linear metrics, swctl --display=graph --debug metrics multiple-linear --name all_percentile  ","title":"Release Apache SkyWalking CLI 0.2.0","url":"/events/release-apache-skywalking-cli-0-2-0/"},{"content":"SkyWalking Chart 1.1.0 is released. Go to downloads page to find release tars.\n Support SkyWalking 6.6.0 Support deploy Elasticsearch 7 The official helm repo was changed to the official Elasticsearch repo (https://helm.elastic.co/)  ","title":"Release Apache SkyWalking Chart 1.1.0 for SkyWalking 6.6.0","url":"/events/release-apache-skywalking-chart-1-1-0-for-skywalking-6-6-0/"},{"content":"Support tracing and collect metrics from Nginx server. Require SkyWalking APM 7.0+.\n","title":"SkyWalking Nginx LUA 0.1.0 release","url":"/events/skywalking-nginx-lua-0-1-0-release/"},{"content":"Based on his continuous contributions, Ming Wen (a.k.a moonming) has been voted as a new committer.\n","title":"Welcome Ming Wen as new committer","url":"/events/welcome-ming-wen-as-new-committer/"},{"content":"Based on his continuous contributions, Haochao Zhuang (a.k.a dmsolr) has been invited to join the PMC. Welcome aboard.\n","title":"Welcome Haochao Zhuang to join the PMC","url":"/events/welcome-haochao-zhuang-to-join-the-pmc/"},{"content":"Based on his continuous contributions, Zhusheng Xu (a.k.a aderm) has been voted as a new committer.\n","title":"Welcome Zhusheng Xu as new committer","url":"/events/welcome-zhusheng-xu-as-new-committer/"},{"content":"Based on his continuous contributions, Han Liu (a.k.a mrproliu) has been voted as a new committer.\n","title":"Welcome Han Liu as new committer","url":"/events/welcome-han-liu-as-new-committer/"},{"content":" Author: Wu Sheng, tetrate.io, SkyWalking original creator, SkyWalking V.P. GitHub, Twitter, Linkedin  The SkyWalking project provides distributed tracing, topology map analysis, service mesh telemetry analysis, metrics analysis and a super cool visualization targeting distributed systems in k8s or traditional VM deployments.\nThe project is widely used in Alibaba, Huawei, Tencent, DiDi, xiaomi, Pingan, China’s top 3 telecom companies (China Mobile, China telecom, China Unicom), airlines, banks and more. It has over 140 company users listed on our powered by page.\nToday, we welcome and celebrate reaching 200 code contributors on our main repo. We hereby mark this milestone as official today, : Jan. 20th 2020.\nAt this great moment, I would like to share SkyWalking’s 4-year open source journey.\nI wrote the first line on Nov. 1st, 2015, guiding people to understand a distributed system just as micro-services and distributed architecture were becoming popular. In the first 2 years, I never thought it would become such a big and active community. I didn’t even expect it would be an open source project. Initially, the goal was primarily to teach others about distributed tracing and analysis.\nIt was a typical open source project in obscurity in its first two years. But people still showed up, asked questions, and tried to improve the project. I got several invitations to share the project at local meetups.All these made me realize people really needed a good open source APM project.\nIn 2017, I decided to dedicate myself as much as possible to make the project successful, and it became my day job. To be honest, I had no clue about how to do that; at that time in China, it was rare to have this kind of job. So, I began to ask friends around me, “Do you want to collaborate on the open source APM with me?” Most people were busy and gave a clear NO, but two of them agreed to help: Xin Zhang and Yongsheng Peng. We built SkyWalking 3.x and shared the 3.2 release at GOPS Shanghai, China.\nIt became the first adoption version used in production\nCompared to today\u0026rsquo;s SkyWalking, it was a toy prototype, but it had the same tracing design, protocol and analysis method.\nThat year the contributor team was 15-20, and the project had obvious potential to expand. I began to consider bringing the project into a worldwide, top-level open source foundation. Thanks to our initial incubator mentors, Michael Semb Wever, William Jiang, and Luke Han, this really worked. At the end of 2017, SkyWalking joined the Apache Incubator, and kept following the Apache Way to build community. More contributors joined the community.\nWith more people spending time on the project collaborations, including codes, tests, blogs, conference talks, books and uses of the project, a chemical reaction happens. New developers begin to provide bug fixes, new feature requirements and new proposals. At the moment of graduation in spring 2019, the project had 100 contributors. Now, only 9 months later, it’s surged to 200 super quickly. They enhance the project and extend it to frontiers we never imaged: 5 popular language agents, service mesh adoption, CLI tool, super cool visualization. We are even moving on thread profiling, browser performance and Nginx tracing NOW.\nOver the whole 4+ years open source journey, we have had supports from leaders in the tracing open source community around the world, including Adrian Cole, William Jiang, Luke Han, Michael Semb Wever, Ben Sigelman, and Jonah Kowall. And we’ve had critical foundations' help, especially Apache Software Foundation and the Cloud Native Computing Foundation.\nOur contributors also have their support from their employers, including, to the best of my knowledge, Alibaba, Huawei, China Mobile, ke.com, DaoCloud, Lizhi.fm, Yonghui Supermarket, and dangdang.com. I also have support from my employers, tetrate.io, Huawei, and OneAPM.\nThanks to our 200+ contributors and the companies behind them. You make this magic happen.\n","title":"SkyWalking hits 200 contributors mark","url":"/blog/2020-01-20-celebrate-200th-contributor/"},{"content":"Based on his continuous contributions, Hongwei Zhai (a.k.a innerpeacez) has been invited to join the PMC. Welcome aboard.\n","title":"Welcome Hongwei Zhai to join the PMC","url":"/events/welcome-hongwei-zhai-to-join-the-pmc/"},{"content":"Apache APM 6.6.0 release. Go to downloads page to find release tars.\n Service Instance dependency detection are available. Support ElasticSearch 7 as a storage option. Reduce the register load.  ","title":"Release Apache SkyWalking APM 6.6.0","url":"/events/release-apache-skywalking-apm-6-6-0/"},{"content":"SkyWalking Chart 1.0.0 is released. Go to downloads page to find release tars.\n Deploy SkyWalking 6.5.0 by Chart. Elasticsearch deploy optional.  ","title":"Release Apache SkyWalking Chart 1.0.0 for SkyWalking 6.5.0","url":"/events/release-apache-skywalking-chart-1-0-0-for-skywalking-6-5-0/"},{"content":"SkyWalking CLI 0.1.0 is released. Go to downloads page to find release tars.\n Add command swctl service to list services Add command swctl instance and swctl search to list and search instances of service. Add command swctl endpoint to list endpoints of service. Add command swctl linear-metrics to query linear metrics and plot the metrics in Ascii Graph mode. Add command swctl single-metrics to query single-value metrics.  ","title":"Release Apache SkyWalking CLI 0.1.0","url":"/events/release-apache-skywalking-cli-0-1-0/"},{"content":"Based on his continuous contributions, Weiyi Liu (a.k.a wayilau) has been voted as a new committer.\n","title":"Welcome Weiyi Liu as new committer","url":"/events/welcome-weiyi-liu-as-new-committer/"},{"content":"Based on his contributions to the project, he has been accepted as SkyWalking committer. Welcome aboard.\n","title":"Welcome Lang Li as a new committer","url":"/events/welcome-lang-li-as-a-new-committer/"},{"content":"Based on her continuous contributions, Qiuxia Fan (a.k.a Fine0830) has been voted as a new committer.\n","title":"Welcome Qiuxia Fan as new committer","url":"/events/welcome-qiuxia-fan-as-new-committer/"},{"content":"6.5.0 release. Go to downloads page to find release tars.\n New metrics comparison view in UI. Dynamic Alert setting supported. JDK9-12 supported in backend.  ","title":"Release Apache SkyWalking APM 6.5.0","url":"/events/release-apache-skywalking-apm-6-5-0/"},{"content":"Based on his continuous contributions, Wei Zhang (a.k.a arugal) has been voted as a new committer.\n","title":"Welcome Wei Zhang as new committer","url":"/events/welcome-wei-zhang-as-new-committer/"},{"content":"PS:本文仅仅是在我的测试环境实验过,如果有问题,请自行优化调整\n前记:记得skywlking还是6.0版本的时候我就在试用,当时是skywalking基本在两三天左右就会监控数据完全查不出来,elasticsearch日志报错,由于当时也算是初用es,主要用来日志收集,并且时间有限,没有继续深入研究,最近空闲,更新到最新的6.5.0(开发版本)还是会出现同样的问题,下定决心解决下,于是有了本文的浅知拙见\n本次调优环境 skywalking: 6.5.0 elasticsearch:6.3.2(下文用es代替)\n调优过程   当然是百度了,百度后其实翻来翻去就找到一个相关的文章https://my.oschina.net/keking/blog/3025303 ,参考之。\n  调整skywalking的这两个参数试试 bulkActions: 4000 # Execute the bulk every 2000 requests  bulkSize: 60 # flush the bulk every 20mb 然后es还是继续挂,继续频繁的重启\n  继续看这个文章,发现了另外一篇https://www.easyice.cn/archives/207 ,继续参考之\n  这篇文章发现每一个字我都认识,看起来也能懂,但是对于es小白的我来说,着实不知道怎么调整这些参数,姑且先加到es的配置文件里边试试看吧,于是就加了,然后重启es的时候说发现index参数配置,自从5.0之后就不支持这样配置了,还给调了个es的接口去设置,但是设置失败(真够不错的),朝着这个思路去百度,百度到快放弃,后来就寻思,再试试看吧,(百度的结果是知道了index有静态参数和动态参数,动态的参数是可以随时设置,静态的只能创建或者关闭状态的索引才可以设置) 然鹅并不知道怎么关闭索引,继续百度,(怎么全特么百度,好吧不百度了,直接来干货)\n 关闭索引(我的skywalking索引命名空间是dry_trace) curl -XPOST \u0026quot;http://localhost:9200/dry_trace*/_close\u0026quot; 设置参数 curl -XPUT 'http://localhost:9200/dry_trace*/_settings?preserve_existing=true' -H 'Content-type:application/json' -d '{ \u0026quot;index.refresh_interval\u0026quot; : \u0026quot;10s\u0026quot;, \u0026quot;index.translog.durability\u0026quot; : \u0026quot;async\u0026quot;, \u0026quot;index.translog.flush_threshold_size\u0026quot; : \u0026quot;1024mb\u0026quot;, \u0026quot;index.translog.sync_interval\u0026quot; : \u0026quot;120s\u0026quot; }'  打开索引 curl -XPOST \u0026quot;http://localhost:9200/dry_trace*/_open\u0026quot;    还有一点,第四步的方式只适用于现有的索引设置,那么新的索引设置呢,总不能每天重复下第四步吧。当然不需要,来干货 首先登陆kinaba控制台找到开发工具 贴入以下代码\n   PUT /_template/dry_trace_tmp { \u0026quot;index_patterns\u0026quot;: \u0026quot;dry_trace*\u0026quot;, \u0026quot;order\u0026quot;: 1, \u0026quot;settings\u0026quot;: { \u0026quot;index\u0026quot;: { \u0026quot;refresh_interval\u0026quot;: \u0026quot;30s\u0026quot;, \u0026quot;translog\u0026quot;: { \u0026quot;flush_threshold_size\u0026quot;: \u0026quot;1GB\u0026quot;, \u0026quot;sync_interval\u0026quot;: \u0026quot;60s\u0026quot;, \u0026quot;durability\u0026quot;: \u0026quot;async\u0026quot; } } } } 截止目前为止运行一周,还未发现挂掉,一切看起来正常   完结\u0026mdash; 于 2019年11月\n","title":"SkyWalking 使用 ElasticSearch 存储的优化","url":"/zh/2019-11-07-skywalking-elasticsearch-storage-optimization/"},{"content":"Based on his continuous contributions, Haochao Zhuang (a.k.a dmsolr) has been voted as a new committer.\n","title":"Welcome Haochao Zhuang as new committer","url":"/events/welcome-haochao-zhuang-as-new-committer/"},{"content":" 作者:innerpeacez 原文地址  本文主要讲述的是如何使用 Helm Charts 将 SkyWalking 部署到 Kubernetes 集群中,相关文档可以参考skywalking-kubernetes 和 backend-k8s 文档 。\n目前推荐的四种方式:\n 使用 helm 2 提供的 helm serve 启动本地 helm repo 使用本地 chart 文件部署 使用 harbor 提供的 repo 功能 直接从官方 repo 进行部署  注意:目前 skywalking 的 chart 还没有提交到官方仓库,请先参照前三种方式进行部署\nHelm 2 提供的 helm serve 打包对应版本的 skywalking chart 1.配置 helm 环境,参考 Helm 环境配置 ,如果你要部署 helm2 相关 chart 可以直接配置 helm2 的相关环境\n2.克隆/下载ZIP skywalking-kubernetes 这个仓库,仓库关于chart的目录结构如下\n helm-chart\n helm2  6.0.0-GA 6.1.0   helm3  6.3.0 6.4.0     克隆/下载ZIP 完成后进入指定目录打包对应版本的chart\ncd skywalking-kubernetes/helm-chart/\u0026lt;helm-version\u0026gt;/\u0026lt;skywalking-version\u0026gt; 注意:helm-version 为对应的 helm 版本目录,skywalking-version 为对应的 skywalking 版本目录,下面以helm3 和 skywalking 6.3.0 为例\ncd skywalking-kubernetes/helm-chart/helm3/6.3.0 3.由于skywalking 依赖 elasticsearch 作为存储库,执行以下命令更新依赖,默认会从官方repo进行拉取\nhelm dep up skywalking  Hang tight while we grab the latest from your chart repositories\u0026hellip; \u0026hellip;Successfully got an update from the \u0026ldquo;stable\u0026rdquo; chart repository Update Complete. ⎈Happy Helming!⎈ Saving 1 charts Downloading elasticsearch from repo https://kubernetes-charts.storage.googleapis.com/ Deleting outdated charts\n 如果官方 repo 不存在,请先添加官方仓库\nhelm repo add stable https://kubernetes-charts.storage.googleapis.com  \u0026ldquo;stable\u0026rdquo; has been added to your repositories\n 4.打包 skywalking , 执行以下命令\nhelm package skywalking/  Successfully packaged chart and saved it to: C:\\code\\innerpeacez_github\\skywalking-kubernetes\\helm-chart\\helm3\\6.3.0\\skywalking-0.1.0.tgz\n 打包完成后会在当前目录的同级目录生成 .tgz 文件\n ls  skywalking/ skywalking-0.1.0.tgz\n 启动 helm serve 由于上文配置的 helm 为 helm3 ,但是 helm 3中移除了 helm serve 的相关命令,所以需要另外一个环境配置helm2 的相关环境,下载 helm 2.14.3 的二进制文件,配置基本上没有大的差别,不在赘述\n初始化 helm\nhelm init 将上文生成的 skywalking-0.1.0.tgz 文件复制到 helm 相关目录 /root/.helm/repository/local,启动 serve\nhelm serve --address \u0026lt;ip\u0026gt;:8879 --repo-path /root/.helm/repository/local 注意: ip 为要能够被上文配置 helm 3 环境的机器访问到\n可以访问一下看看服务 serve 是否启动成功\ncurl ip:8879 部署 skywalking 1.在helm3 环境中添加启动的本地 repo\nhelm repo add local http://\u0026lt;ip\u0026gt;:8879 2.查看 skywalking chart 是否存在于本地仓库中\nhelm search skywalking  NAME CHART VERSION\tAPP VERSION\tDESCRIPTION local/skywalking 0.1.0 6.3.0 Apache SkyWalking APM System\n 3.部署\nhelm -n test install skywalking local/skywalking 这样 skywalking 就部署到了 k8s 集群中的 test 命名空间了,至此本地安装skywalking 就完成了。\n本地文件部署 如果你不想存储到 chart 到仓库中也可以直接使用本地文件部署 skywalking,按照上面的步骤将skywalking chart 打包完成之后,直接使用以下命令进行部署\nhelm -n test install skywalking skywalking-0.1.0.tgz harbor 作为 repo 存储 charts harbor 目前已经提供了,charts repo 的能力,这样就可以将 docker 镜像和 chart 存储在一个仓库中了,方便维护,具体harbor 的部署方法参考 Harbor 作为存储仓库存储 chart\n官方 repo 部署 目前没有发布到官方 repo 中,后续发布完成后,只需要执行下面命令即可\nhelm install -n test stable/skywalking 总结 四种方式都可以进行部署,如果你想要自定义 chart ,需要使用上述两种本地方法及 harbor 存储的方式,以便你修改好 chart 之后进行部署.\n","title":"使用 chart 部署 SkyWalking","url":"/zh/2019-10-08-how-to-use-sw-chart/"},{"content":" Author: Wei Qiang GitHub  Background SkyWalking backend provides the alarm function, we can define some Alarm rules, call webhook after the rule is triggered. I share my implementation\nDemonstration SkyWalking alarm UI\ndingtalk message body\nIntroduction  install  go get -u github.com/weiqiang333/infra-skywalking-webhook cd $GOPATH/src/github.com/weiqiang333/infra-skywalking-webhook/ bash build/build.sh ./bin/infra-skywalking-webhook help  Configuration  main configs file:configs/production.ymldingtalk:p3:token... Example  ./bin/infra-skywalking-webhook --config configs/production.yml --address 0.0.0.0:8000  SkyWalking backend alarm settings  webhooks:- http://127.0.0.1:8000/dingtalkCollaboration Hope that we can improve together webhook\nSkyWalking alarm rules may add more metric names (eg priority name), we can send different channels by locating different levels of alerts (dingtalk / SMS / phone)\nThanks.\n","title":"SkyWalking alarm webhook sharing","url":"/blog/2019-09-25-alarm-webhook-share/"},{"content":"作者: SkyWalking committer,Kdump\n本文介绍申请Apache SkyWalking Committer流程, 流程包括以下步骤\n 与PMC成员表达想成为committer的意愿(主动/被动) PMC内部投票 PMC正式邮件邀请 填写Apache iCLA申请表 设置ApacheID和邮箱 设置GitHub加入Apache组织 GitHub其它一些不重要设置  前期过程  与PMC成员表达想成为committer的意愿(主动/被动) PMC内部投票  当你对项目的贡献活跃度足够高或足够多时, Skywalking项目的PMC(项目管理委员会)会找到你并询问你是否有意愿成为项目的Committer, 或者也可以主动联系项目的PMC表达自己的意向, 在此之后PMC们会进行内部讨论和投票并告知你是否可以进入下一个环节.这个过程可能需要一周. 如果PMC主动邀请你进行非正式的意愿咨询, 你可以选择接受或拒绝.\nPS:PMC会向你索要你的个人邮箱, 建议提供Gmail, 因为后期绑定Apache邮箱需要用到, 其它邮箱我不确定是否能绑定.\nPS:从Apache官方的流程来讲, 现有的PMC会在没有通知候选人的情况下先进行候选人投票, 但是Skywalking项目的PMC有可能更倾向于先得到候选人的意愿再进行投票.\n正式阶段   PMC正式邮件邀请\n 当你收到PMC正式的邀请邮件时, 恭喜你, 你已经通过了PMC的内部投票, 你需要用英文回答接受邀请或者拒绝邀请, 记住回复的时候一定要选择全部回复.    填写Apache iCLA申请表\n  在你收到的PMC邮件中, 有几个ASF官方链接需要你去浏览, 重点的内容是查看CLAs, 并填写Individual Contributor License Agreement, 你可以将icla.pdf文件下载到本地, 使用PDF工具填写里面所需的信息, 并打印出来签名(一定要手写签名, 否则会被要求重新签名), 再扫描(或手机拍照)成电子文档(需要回复PDF格式, 文件名建议重命名为你的名字-icla.pdf), 使用gpg对电子文档进行签名(参考[HOW-TO: SUBMITTING LICENSE AGREEMENTS AND GRANTS\n](http://www.apache.org/licenses/contributor-agreements.html#submitting)), Window可以使用GnuPG或者Gpg4win.\n  完成gpg签名后, 请将你签名用的公钥上送到pool.sks-keyservers.net服务器, 并在这个页面中验证你的公钥是否可以被搜索到, 搜索关键词可以是你秘钥中填写的名字或者邮箱地址.\n  gpg签名后, 会生成.pdf.asc的文件, 需要将你的你的名字-icla.pdf和你的名字-icla.pdf.asc以附件的方式一起发送到secretary@apache.org, 并抄送给private@skywalking.apache.org.\n    设置ApacheID和邮箱\n 大概5个工作日内, 你会收到一封来至于root@apache.org的邮件, 主题为Welcome to the Apache Software Foundation (ASF)!, 恭喜你, 你已经获得了ApacheID, 这时候你需要根据邮件内容的提示去设置你的ApacheID密码, 密码设置完成后, 需要在Apache Account Utility页面中重点设置Forwarding email address和Your GitHub Username两个信息.保存信息的时候需要你填写当前的ApacheID的密码. 现在进入Gmail, 选择右上角的齿轮-\u0026gt;设置-\u0026gt;账号和导入-\u0026gt;添加其他电子邮件地址-\u0026gt;参考Sending email from your apache.org email address给出的信息根据向导填写Apache邮箱.    设置GitHub加入Apache组织\n 进入Welcome to the GitBox Account Linking Utility!, 按照顺序将Apache Account和GitHub Account点绿, 想点绿MFA Status, 需要去GitHub开启2FA, 请参考配置双重身份验证完成2FA的功能. 等待1~2小时后登陆自己的GitHub的dashboard界面, 你应该会看到一条Apache组织邀请你加入的通知, 这个时候接受即可享有Skywalking相关GitHub项目权限了.    其它提示  GitHub其它一些不重要设置  在GitHub首页展示Apache组织的logo: 进入Apache GitHub组织-\u0026gt;People-\u0026gt;搜索自己的GitHubID-\u0026gt;将Private改成Public    ","title":"Apache SkyWalking Committer申请流程","url":"/zh/2019-09-12-apache-skywalking-committer-apply-process/"},{"content":"Based on his contributions to the skywalking ui project, Weijie Zou (a.k.a Kdump) has been accepted as a new committer.\n","title":"Welcome Weijie Zou as a new committer","url":"/events/welcome-weijie-zou-as-a-new-committer/"},{"content":"6.4.0 release. Go to downloads page to find release tars.\n Highly recommend to upgrade due to Pxx metrics calculation bug. Make agent working in JDK9+ Module system.  Read changelog for the details.\n","title":"Release Apache SkyWalking APM 6.4.0","url":"/events/release-apache-skywalking-apm-6-4-0/"},{"content":"  作者:innerpeacez 原文地址   如果你还不知道 Skywalking agent 是什么,请点击这里查看 Probe 或者这里查看快速了解agent,由于我这边大部分都是 JAVA 服务,所以下文以 Java 中使用 agent 为例,提供了以下三种方式供你选择\n三种方式:  使用官方提供的基础镜像 将 agent 包构建到已经存在的基础镜像中 sidecar 模式挂载 agent  1.使用官方提供的基础镜像 查看官方 docker hub 提供的基础镜像,只需要在你构建服务镜像是 From 这个镜像即可,直接集成到 Jenkins 中可以更加方便\n2.将 agent 包构建到已经存在的基础镜像中 提供这种方式的原因是:官方的镜像属于精简镜像,并且是 openjdk ,可能很多命令没有,需要自己二次安装,以下是我构建的过程\n  下载 oracle jdk\n这个现在 oracle 有点恶心了,wget 各种不行,然后我放弃了,直接从官网下载了\n  下载 skywalking 官方发行包,并解压(以6.3.0为例)\nwget https://www.apache.org/dyn/closer.cgi/skywalking/6.3.0/apache-skywalking-apm-6.3.0.tar.gz \u0026amp;\u0026amp; tar -zxvf apache-skywalking-apm-6.3.0.tar.gz   通过以下 dockerfile 构建基础镜像\nFROMalpine:3.8  ENV LANG=C.UTF-8 RUN set -eux \u0026amp;\u0026amp; \\  apk update \u0026amp;\u0026amp; apk upgrade \u0026amp;\u0026amp; \\  wget -q -O /etc/apk/keys/sgerrand.rsa.pub https://alpine-pkgs.sgerrand.com/sgerrand.rsa.pub \u0026amp;\u0026amp;\\  wget https://github.com/sgerrand/alpine-pkg-glibc/releases/download/2.30-r0/glibc-2.30-r0.apk \u0026amp;\u0026amp;\\  apk --no-cache add unzip vim curl git bash ca-certificates glibc-2.30-r0.apk file \u0026amp;\u0026amp; \\  rm -rf /var/lib/apk/* \u0026amp;\u0026amp;\\  mkdir -p /usr/skywalking/agent/ # A streamlined jreADD jdk1.8.0_221/ /usr/java/jdk1.8.0_221/ADD apache-skywalking-apm-bin/agent/ /usr/skywalking/agent/ # set envENV JAVA_HOME /usr/java/jdk1.8.0_221ENV PATH ${PATH}:${JAVA_HOME}/bin # run container with base path:/WORKDIR/ CMD bash  这里由于 alpine 是基于mini lib 的,但是 java 需要 glibc ,所以加入了 glibc 相关的东西,最后构建出的镜像大小在 490M 左右,因为加了挺多命令还是有点大,仅供参考,同样构建出的镜像也可以直接配置到 jenkins 中。\n3.sidecar 模式挂载 agent 如果你们的服务是部署在 Kubernetes 中,你还可以使用这种方式来使用 Skywalking Agent ,这种方式的好处在与不需要修改原来的基础镜像,也不用重新构建新的服务镜像,而是以sidecar 模式,通过共享volume的方式将agent 所需的相关文件挂载到已经存在的服务镜像中\n构建 skywalking agent sidecar 镜像的方法\n  下载skywalking 官方发行包,并解压\nwget https://www.apache.org/dyn/closer.cgi/skywalking/6.3.0/apache-skywalking-apm-6.3.0.tar.gz \u0026amp;\u0026amp; tar -zxvf apache-skywalking-apm-6.3.0.tar.gz   通过以下 dockerfile 进行构建\nFROMbusybox:latest  ENV LANG=C.UTF-8 RUN set -eux \u0026amp;\u0026amp; mkdir -p /usr/skywalking/agent/ ADD apache-skywalking-apm-bin/agent/ /usr/skywalking/agent/ WORKDIR/  注意:这里我没有在dockerfile中下载skywalking 发行包是因为保证构建出的 sidecar 镜像保持最小,bosybox 只有700 k左右,加上 agent 最后大小小于20M\n如何使用 sidecar 呢?\napiVersion:apps/v1kind:Deploymentmetadata:labels:name:demo-swname:demo-swspec:replicas:1selector:matchLabels:name:demo-swtemplate:metadata:labels:name:demo-swspec:initContainers:- image:innerpeacez/sw-agent-sidecar:latestname:sw-agent-sidecarimagePullPolicy:IfNotPresentcommand:[\u0026#39;sh\u0026#39;]args:[\u0026#39;-c\u0026#39;,\u0026#39;mkdir -p /skywalking/agent \u0026amp;\u0026amp; cp -r /usr/skywalking/agent/* /skywalking/agent\u0026#39;]volumeMounts:- mountPath:/skywalking/agentname:sw-agentcontainers:- image:nginx:1.7.9name:nginxvolumeMounts:- mountPath:/usr/skywalking/agentname:sw-agentports:- containerPort:80volumes:- name:sw-agentemptyDir:{}以上是挂载 sidecar 的 deployment.yaml 文件,以nginx 作为服务为例,主要是通过共享 volume 的方式挂载 agent,首先 initContainers 通过 sw-agent 卷挂载了 sw-agent-sidecar 中的 /skywalking/agent ,并且将上面构建好的镜像中的 agent 目录 cp 到了 /skywalking/agent 目录,完成之后 nginx 启动时也挂载了 sw-agent 卷,并将其挂载到了容器的 /usr/skywalking/agent 目录,这样就完成了共享过程。\n总结 这样除去 ServiceMesh 以外,我能想到的方式就介绍完了,希望可以帮助到你。最后给 Skywalking 一个 Star 吧,国人的骄傲。\n","title":"如何使用 SkyWalking Agent ?","url":"/zh/2019-08-30-how-to-use-skywalking-agent/"},{"content":"Based on his continuous contributions, Yuguang Zhao (a.k.a zhaoyuguang) has been invited to join the PMC. Welcome aboard.\n","title":"Welcome Yuguang Zhao to join the PMC","url":"/events/welcome-yuguang-zhao-to-join-the-pmc/"},{"content":"Based on his continuous contributions, Zhenxu Ke (a.k.a kezhenxu94) has been invited to join the PMC. Welcome aboard.\n","title":"Welcome Zhenxu Ke to join the PMC","url":"/events/welcome-zhenxu-ke-to-join-the-pmc/"},{"content":"Based on his contributions to the skywalking PHP project, Yanlong He (a.k.a heyanlong has been accepted as a new committer.\n","title":"Welcome Yanlong He as a new committer","url":"/events/welcome-yanlong-he-as-a-new-committer/"},{"content":"6.3.0 release. Go to downloads page to find release tars.\n Improve ElasticSearch storage implementation performance again. OAP backend re-install w/o agent reboot required.  Read changelog for the details.\n","title":"Release Apache SkyWalking APM 6.3.0","url":"/events/release-apache-skywalking-apm-6-3-0/"},{"content":"6.2.0 release. Go to downloads page to find release tars. ElasticSearch storage implementation changed, high reduce payload to ElasticSearch cluster.\nRead changelog for the details.\n","title":"Release Apache SkyWalking APM 6.2.0","url":"/events/release-apache-skywalking-apm-6-2-0/"},{"content":"Based on his continuous contributions, Zhenxu Ke (a.k.a kezhenxu94) has been voted as a new committer.\n","title":"Welcome Zhenxu Ke as a new committer","url":"/events/welcome-zhenxu-ke-as-a-new-committer/"},{"content":"6.1.0 release. Go to downloads page to find release tars. This is the first top level project version.\nKey updates\n RocketBot UI OAP performance improvement  ","title":"Release Apache SkyWalking APM 6.1.0","url":"/events/release-apache-skywalking-apm-6-1-0/"},{"content":"Apache SkyWalking PMC accept the RocketBot UI contributions. After IP clearance, it will be released in SkyWalking 6.1 soon.\n","title":"RocketBot UI has been accepted as SkyWalking primary UI","url":"/events/rocketbot-ui-has-been-accepted-as-skywalking-primary-ui/"},{"content":"Apache board approved SkyWalking graduated as TLP at April 17th 2019.\n","title":"SkyWalking graduated as Apache Top Level Project","url":"/events/skywalking-graduated-as-apache-top-level-project/"},{"content":"Based on his continuous contributions, he has been accepted as a new committer.\n","title":"Welcome Yuguang Zhao as a new committer","url":"/events/welcome-yuguang-zhao-as-a-new-committer/"},{"content":"APM和调用链跟踪 随着企业经营规模的扩大,以及对内快速诊断效率和对外SLA(服务品质协议,service-level agreement)的追求,对于业务系统的掌控度的要求越来越高,主要体现在:\n 对于第三方依赖的监控,实时/准实时了解第三方的健康状况/服务品质,降低第三方依赖对于自身系统的扰动(服务降级、故障转移) 对于容器的监控,实时/准实时的了解应用部署环境(CPU、内存、进程、线程、网络、带宽)情况,以便快速扩容/缩容、流量控制、业务迁移 业务方对于自己的调用情况,方便作容量规划,同时对于突发的请求也能进行异常告警和应急准备 自己业务的健康、性能监控,实时/准实时的了解自身的业务运行情况,排查业务瓶颈,快速诊断和定位异常,增加对自己业务的掌控力  同时,对于企业来说,能够更精确的了解资源的使用情况,对于成本核算和控制也有非常大的裨益。\n在这种情况下,一般都会引入APM(Application Performance Management \u0026amp; Monitoring)系统,通过各种探针采集数据,收集关键指标,同时搭配数据呈现和监控告警,能够解决上述的大部分问题。\n然而随着RPC框架、微服务、云计算、大数据的发展,同时业务的规模和深度相比过往也都增加了很多,一次业务可能横跨多个模块/服务/容器,依赖的中间件也越来越多,其中任何一个节点出现异常,都可能导致业务出现波动或者异常,这就导致服务质量监控和异常诊断/定位变得异常复杂,于是催生了新的业务监控模式:调用链跟踪\n 能够分布式的抓取多个节点的业务记录,并且通过统一的业务id(traceId,messageId,requestId等)将一次业务在各个节点的记录串联起来,方便排查业务的瓶颈或者异常点  产品对比 APM和调用链跟踪均不是新诞生事务,很多公司已经有了大量的实践,不过开源的并且能够开箱即用的产品并不多,这里主要选取了Pinpoint,Skywalking,CAT来进行对比(当然也有其他的例如Zipkin,Jaeger等产品,不过总体来说不如前面选取的3个完成度高),了解一下APM和调用链跟踪在开源方面的发展状态。\nPinpoint Pinpoint是一个比较早并且成熟度也非常高的APM+调用链监控的项目,在全世界范围内均有用户使用,支持Java和PHP的探针,数据容器为HBase,其界面参考:\nSkywalking Skywalking是一个新晋的项目,最近一两年发展非常迅猛,本身支持OpenTracing规范,优秀的设计提供了良好的扩展性,支持Java、PHP、.Net、NodeJs探针,数据容器为ElasticSearch,其界面参考:\nCAT CAT是由美团开源的一个APM项目,也历经了多年的迭代升级,拥有大量的企业级用户,对于监控和报警整合比较紧密,支持Java、C/C++、.Net、Python、Go、NodeJs,不过CAT目前主要通过侵入性的方式接入,数据容器包括HDFS(存储原始数据)和mysql(二次统计),其界面参考:\n横向对比 上面只是做了一个简介,那这三个项目各自有什么特色或者优势/劣势呢(三者的主要产品均针对Java,这里也主要针对Java的特性)?\n Pinpoint  优势  大企业/长时间验证,稳定性和完成度高 探针收集的数据粒度比较细 HBase的数据密度较大,支持PB级别下的数据查询 代码设计考虑的扩展性较弱,二次开发难度较大(探针为插件式,开发比较简单) 拥有完整的APM和调用链跟踪功能   劣势  代码针对性强,扩展较难 容器为HBase,查询功能较弱(主要为时间维度) 探针的额外消耗较多(探针采集粒度细,大概10%~20%) 项目趋于成熟,而扩展难度较大,目前社区活跃度偏低,基本只进行探针的增加或者升级 缺少自定义指标的设计     Skywalking  优势  数据容器为ES,查询支持的维度较多并且扩展潜力大 项目设计采用微内核+插件,易读性和扩展性都比较强 主要的研发人员为华人并且均比较活跃,能够进行更加直接的沟通 拥有完整的APM和调用链跟踪功能   劣势  项目发展非常快,稳定性有待验证 ES数据密度较小,在PB级别可能会有性能压力 缺少自定义指标的设计     CAT  优势  大企业/长时间验证,稳定性和完成度高 采用手动数据埋点而不是探针,数据采集的灵活性更强 支持自定义指标 代码设计考虑的扩展性较弱,并且数据结构复杂,二次开发难度较大 拥有完善的监控告警机制   劣势  代码针对性强,扩展较难 需要手动接入埋点,代码侵入性强 APM功能完善,但是不支持调用链跟踪      基本组件 如果分别去看Pinpoint/Skywalking/CAT的整体设计,我们会发现三者更像是一个规范的三种实现,虽然各自有不同的机制和特性,但是从模块划分和功能基本是一致的:\n当然也有一些微小的区别:\n Pinpoint基本没有aggregator,同时query和alarm集成在了web中,只有agent,collector和web Skywalking则是把collector、aggregator、alarm集成为OAP(Observability Analysis Platform),并且可以通过集群部署,不同的实例可以分别承担collector或者aggregator+alarm的角色 CAT则和Skywalking类似,把collector、aggregator、alarm集成为cat-consumer,而由于CAT有比较复杂的配置管理,所以query和配置一起集成为cat-home 当然最大的区别是Pinpoint和Skywalking均是通过javaagent做字节码的扩展,通过切面编程采集数据,类似于探针,而CAT的agent则更像是一个工具集,用于手动埋点  Skywalking 前戏这么多,终于开始进入主题,介绍今天的主角:Skywalking,不过通过之前的铺垫,我们基本都知道了Skywalking期望解决的问题以及总体的结构,下面我们则从细节来看Skywalking是怎么一步一步实现的。\n模块构成 首先,Skywalking进行了精准的领域模型划分:\n整个系统分为三部分:\n agent:采集tracing(调用链数据)和metric(指标)信息并上报 OAP:收集tracing和metric信息通过analysis core模块将数据放入持久化容器中(ES,H2(内存数据库),mysql等等),并进行二次统计和监控告警 webapp:前后端分离,前端负责呈现,并将查询请求封装为graphQL提交给后端,后端通过ribbon做负载均衡转发给OAP集群,再将查询结果渲染展示  而整个Skywalking(包括agent和OAP,而webapp后端业务非常简单主要就是认证和请求转发)均通过微内核+插件式的模式进行编码,代码结构和扩展性均非常强,具体设计可以参考: 从Skywalking看如何设计一个微核+插件式扩展的高扩展框架 ,Spring Cloud Gateway的GatewayFilterFactory的扩展也是通过这种plugin define的方式来实现的。\nSkywalking也提供了其他的一些特性:\n 配置重载:支持通过jvm参数覆写默认配置,支持动态配置管理 集群管理:这个主要体现在OAP,通过集群部署分担数据上报的流量压力和二次计算的计算压力,同时集群也可以通过配置切换角色,分别面向数据采集(collector)和计算(aggregator,alarm),需要注意的是agent目前不支持多collector负载均衡,而是随机从集群中选择一个实例进行数据上报 支持k8s和mesh 支持数据容器的扩展,例如官方主推是ES,通过扩展接口,也可以实现插件去支持其他的数据容器 支持数据上报receiver的扩展,例如目前主要是支持gRPC接受agent的上报,但是也可以实现插件支持其他类型的数据上报(官方默认实现了对Zipkin,telemetry和envoy的支持) 支持客户端采样和服务端采样,不过服务端采样最有意义 官方制定了一个数据查询脚本规范:OAL(Observability Analysis Language),语法类似Linq,以简化数据查询扩展的工作量 支持监控预警,通过OAL获取数据指标和阈值进行对比来触发告警,支持webhook扩展告警方式,支持统计周期的自定义,以及告警静默防止重复告警  数据容器 由于Skywalking并没有自己定制的数据容器或者使用多种数据容器增加复杂度,而是主要使用ElasticSearch(当然开源的基本上都是这样来保持简洁,例如Pinpoint也只使用了HBase),所以数据容器的特性以及自己数据结构基本上就限制了业务的上限,以ES为例:\n ES查询功能异常强大,在数据筛选方面碾压其他所有容器,在数据筛选潜力巨大(Skywalking默认的查询维度就比使用HBase的Pinpoint强很多) 支持sharding分片和replicas数据备份,在高可用/高性能/大数据支持都非常好 支持批量插入,高并发下的插入性能大大增强 数据密度低,源于ES会提前构建大量的索引来优化搜索查询,这是查询功能强大和性能好的代价,但是链路跟踪往往有非常多的上下文需要记录,所以Skywalking把这些上下文二进制化然后通过Base64编码放入data_binary字段并且将字段标记为not_analyzed来避免进行预处理建立查询索引  总体来说,Skywalking尽量使用ES在大数据和查询方面的优势,同时尽量减少ES数据密度低的劣势带来的影响,从目前来看,ES在调用链跟踪方面是不二的数据容器,而在数据指标方面,ES也能中规中矩的完成业务,虽然和时序数据库相比要弱一些,但在PB级以下的数据支持也不会有太大问题。\n数据结构 如果说数据容器决定了上限,那么数据结构则决定了实际到达的高度。Skywalking的数据结构主要为:\n 数据维度(ES索引为skywalking_*_inventory)  service:服务 instance:实例 endpoint:接口 network_adress:外部依赖   数据内容  原始数据  调用链跟踪数据(调用链的trace信息,ES索引为skywalking_segment,Skywalking主要的数据消耗都在这里) 指标(主要是jvm或者envoy的运行时指标,例如ES索引skywalking_instance_jvm_cpu)   二次统计指标  指标(按维度/时间二次统计出来的例如pxx、sla等指标,例如ES索引skywalking_database_access_p75_month) 数据库慢查询记录(数据库索引:skywalking_top_n_database_statement)   关联关系(维度/指标之间的关联关系,ES索引为skywalking_*_relation_*) 特别记录  告警信息(ES索引为skywalking_alarm_record) 并发控制(ES索引为skywalking_register_lock)      其中数量占比最大的就是调用链跟踪数据和各种指标,而这些数据均可以通过OAP设置过期时间,以降低历史数据的对磁盘占用和查询效率的影响。\n调用链跟踪数据 作为Skywalking的核心数据,调用链跟踪数据(skywalking_segment)基本上奠定了整个系统的基础,而如果要详细的了解调用链跟踪的话,就不得不提到openTracing。\nopenTracing基本上是目前开源调用链跟踪系统的一个事实标准,它制定了调用链跟踪的基本流程和基本的数据结构,同时也提供了各个语言的实现。如果用一张图来表现openTracing,则是如下:\n其中:\n SpanContext:一个类似于MDC(Slfj)或者ThreadLocal的组件,负责整个调用链数据采集过程中的上下文保持和传递 Trace:一次调用的完整记录  Span:一次调用中的某个节点/步骤,类似于一层堆栈信息,Trace是由多个Span组成,Span和Span之间也有父子或者并列的关系来标志这个节点/步骤在整个调用中的位置  Tag:节点/步骤中的关键信息 Log:节点/步骤中的详细记录,例如异常时的异常堆栈   Baggage:和SpanContext一样并不属于数据结构而是一种机制,主要用于跨Span或者跨实例的上下文传递,Baggage的数据更多是用于运行时,而不会进行持久化    以一个Trace为例:\n首先是外部请求调用A,然后A依次同步调用了B和C,而B被调用时会去同步调用D,C被调用的时候会依次同步调用E和F,F被调用的时候会通过异步调用G,G则会异步调用H,最终完成一次调用。\n上图是通过Span之间的依赖关系来表现一个Trace,而在时间线上,则可以有如下的表达:\n当然,如果是同步调用的话,父Span的时间占用是包括子Span的时间消耗的。\n而落地到Skywalking中,我们以一条skywalking_segment的记录为例:\n{ \u0026quot;trace_id\u0026quot;: \u0026quot;52.70.15530767312125341\u0026quot;, \u0026quot;endpoint_name\u0026quot;: \u0026quot;Mysql/JDBI/Connection/commit\u0026quot;, \u0026quot;latency\u0026quot;: 0, \u0026quot;end_time\u0026quot;: 1553076731212, \u0026quot;endpoint_id\u0026quot;: 96142, \u0026quot;service_instance_id\u0026quot;: 52, \u0026quot;version\u0026quot;: 2, \u0026quot;start_time\u0026quot;: 1553076731212, \u0026quot;data_binary\u0026quot;: \u0026quot;CgwKCjRGnPvp5eikyxsSXhD///////////8BGMz62NSZLSDM+tjUmS0wju8FQChQAVgBYCF6DgoHZGIudHlwZRIDc3FsehcKC2RiLmluc3RhbmNlEghyaXNrZGF0YXoOCgxkYi5zdGF0ZW1lbnQYAiA0\u0026quot;, \u0026quot;service_id\u0026quot;: 2, \u0026quot;time_bucket\u0026quot;: 20190320181211, \u0026quot;is_error\u0026quot;: 0, \u0026quot;segment_id\u0026quot;: \u0026quot;52.70.15530767312125340\u0026quot; } 其中:\n trace_id:本次调用的唯一id,通过snowflake模式生成 endpoint_name:被调用的接口 latency:耗时 end_time:结束时间戳 endpoint_id:被调用的接口的唯一id service_instance_id:被调用的实例的唯一id version:本数据结构的版本号 start_time:开始时间戳 data_binary:里面保存了本次调用的所有Span的数据,序列化并用Base64编码,不会进行分析和用于查询 service_id:服务的唯一id time_bucket:调用所处的时段 is_error:是否失败 segment_id:数据本身的唯一id,类似于主键,通过snowflake模式生成  这里可以看到,目前Skywalking虽然相较于Pinpoint来说查询的维度要多一些,但是也很有限,而且除了endPoint,并没有和业务有关联的字段,只能通过时间/服务/实例/接口/成功标志/耗时来进行非业务相关的查询,如果后续要增强业务相关的搜索查询的话,应该还需要增加一些用于保存动态内容(如messageId,orderId等业务关键字)的字段用于快速定位。\n指标 指标数据相对于Tracing则要简单得多了,一般来说就是指标标志、时间戳、指标值,而Skywalking中的指标有两种:一种是采集的原始指标值,例如jvm的各种运行时指标(例如cpu消耗、内存结构、GC信息等);一种是各种二次统计指标(例如tp性能指标、SLA等,当然也有为了便于查询的更高时间维度的指标,例如基于分钟、小时、天、周、月)\n例如以下是索引skywalking_endpoint_cpm_hour中的一条记录,用于标志一个小时内某个接口的cpm指标:\n{ \u0026quot;total\u0026quot;: 8900, \u0026quot;service_id\u0026quot;: 5, \u0026quot;time_bucket\u0026quot;: 2019031816, \u0026quot;service_instance_id\u0026quot;: 5, \u0026quot;entity_id\u0026quot;: \u0026quot;7\u0026quot;, \u0026quot;value\u0026quot;: 148 } 各个字段的释义如下:\n total:一分钟内的调用总量 service_id:所属服务的唯一id time_bucket:统计的时段 service_instance_id:所属实例的唯一id entity_id:接口(endpoint)的唯一id value:cpm的指标值(cpm=call per minute,即total/60)  工程实现 Skywalking的工程实现堪比Dubbo,框架设计和代码质量都达到非常高的水准,以dubbo为例,即使2012年发布的老版本放到当今,其设计和编码看起来也依然赏心悦目,设计简洁但是覆盖了所有的核心需求,同时又具备非常强的扩展性,二次开发非常简单,然而却又不会像Spring那样过度封装(当然Spring作为一个更加高度通用的框架,更高的封装也是有必要的)导致代码阅读异常困难。\nagent agent(apm-sniffer)是Skywalking的Java探针实现,主要负责:\n 采集应用实例的jvm指标 通过切向编程进行数据埋点,采集调用链数据 通过RPC将采集的数据上报  当然,agent还实现了客户端采样,不过在APM监控系统里进行客户端数据采样都是没有灵魂的,所以这里就不再赘述了。\n首先,agent通过 org.apache.skywalking.apm.agent.core.boot.BootService 实现了整体的插件化,agent启动会加载所有的BootService实现,并通过 ServiceManager 来管理这些插件的生命周期,采集jvm指标、gRPC连接管理、调用链数据维护、数据上报OAP这些服务均是通过这种方式扩展。\n然后,agent还通过bytebuddy以javaagent的模式,通过字节码增强的机制来构造AOP环境,再提供PluginDefine的规范方便探针的开发,最终实现非侵入性的数据埋点,采集调用链数据。\n最终落地到代码上则异常清晰:\n//通过bytebuddy的AgentBuilder构造javaagent增强classLoader new AgentBuilder.Default(byteBuddy) .ignore( //忽略这些包的内容,不进行增强 nameStartsWith(\u0026quot;net.bytebuddy.\u0026quot;) .or(nameStartsWith(\u0026quot;org.slf4j.\u0026quot;)) .or(nameStartsWith(\u0026quot;org.apache.logging.\u0026quot;)) .or(nameStartsWith(\u0026quot;org.groovy.\u0026quot;)) .or(nameContains(\u0026quot;javassist\u0026quot;)) .or(nameContains(\u0026quot;.asm.\u0026quot;)) .or(nameStartsWith(\u0026quot;sun.reflect\u0026quot;)) .or(allSkyWalkingAgentExcludeToolkit()) .or(ElementMatchers.\u0026lt;TypeDescription\u0026gt;isSynthetic())) //通过pluginFinder加载所有的探针扩展,并获取所有可以增强的class .type(pluginFinder.buildMatch()) //按照pluginFinder的实现,去改变字节码增强类 .transform(new Transformer(pluginFinder)) //通过listener订阅增强的操作记录,方便调试 .with(new Listener()) .installOn(instrumentation); try { //加载所有的service实现并启动 ServiceManager.INSTANCE.boot(); } catch (Exception e) { logger.error(e, \u0026quot;Skywalking agent boot failure.\u0026quot;); } agent也提供了非常简单的扩展实现机制,以增强一个普通类的方法为例,首先你需要定义一个切向点:\npublic interface InstanceMethodsInterceptPoint { //定义切向方法的适配器,符合适配器的class将被增强 ElementMatcher\u0026lt;MethodDescription\u0026gt; getMethodsMatcher(); //增强的具体实现类,classReference String getMethodsInterceptor(); //是否重写参数 boolean isOverrideArgs(); } 然后你还需要一个增强的实现类:\npublic interface InstanceMethodsAroundInterceptor { //方法真正执行前执行 void beforeMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, MethodInterceptResult result) throws Throwable; //方法真正执行后执行 Object afterMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Object ret) throws Throwable; //当异常发生时执行 void handleMethodException(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Throwable t); } 一般在执行前和执行后进行数据埋点,就可以采集到想要的数据,当然实际编程要稍微复杂一点,不过官方也实现了对应的abstract类和数据埋点工具类,所以探针的二次开发在Skywalking这个级别确实是非常简单,只需要处理好资源占用和并发问题即可。真正的难点是要对需要增强的对象非常了解,熟悉其运作机制,才能找准切向点,既要所有的流程都需要经过这个点,又可以抓取到期望抓取的上下文信息。同时,多版本的适配和测试也是非常大的工作量,官方虽然提供witness的机制(通过验证某个class是否存在来验证版本),但是作为影响全局的探针,开发和测试都是需要慎之又慎的。\nOAP 同agent类似,OAP作为Skywalking最核心的模块,也实现了自己的扩展机制,不过在这里叫做Module,具体可以参考library-module,在module的机制下,Skywalking实现了自己必须核心组件:\n core:整个OAP核心业务(remoting、cluster、storage、analysis、query、alarm)的规范和接口 cluster:集群管理的具体实现 storage:数据容器的具体实现 query:为前端提供的查询接口的具体实现 receiver:接收探针上报数据的接收器的具体实现 alarm:监控告警的具体实现  以及一个可选组件:\n telemetry:用于监控OAP自身的健康状况  而前面提到的OAP的高扩展性则体现在核心业务的规范均定义在了core中,如果有需要自己扩展的,只需要自己单独做自己的实现,而不需要做侵入式的改动,最典型的示例则是官方支持的storage,不仅支持单机demo的内存数据库H2和经典的ES,连目前开源的Tidb都可以接入。\n初步实践 对于Skywalking的实践我们经历了三个阶段\n 线下测试 第一次生产环境小规模测试 第二次生产环境小规模测试+全量接入  线下测试 环境 由于是线下测试,所以我们直接使用物理机(E5-2680v2 x2, 128G)虚拟了一个集群(实际性能相比云服务器应该偏好一些):\n ES:单机实例,v6.5,4C8G,jvm内存分配为4G OAP:单机实例,v6.1.0-SNAPSHOT,4C8G,jvm内存分配为4G 应用:基于SpringCloud的4个测试实例,调用关系为A-\u0026gt;B-\u0026gt;C-\u0026gt;D,QPS为200  测试结果 拓扑图:\nOAP机器监控:\nES机器监控:\n服务监控面板:\n其中一个调用链记录:\n可以看出,Skywalking非常依赖CPU(不论是OAP还是ES),同时对于网络IO也有一定的要求,至于ES的文件IO在可接受范围内,毕竟确实有大量内容需要持久化。测试结果也基本达到预期要求,调用链和各个指标的监控都工作良好。\n第一次生产环境测试 在线下测试之后,我们再进行了一次基于实际业务针对探针的测试,测试没有发现探针的异常问题,也没有影响业务的正常运作,同时对于jvm实例影响也不是很大,CPU大概提高了5%左右,并不很明显。在这个基础上我们选择了线上的一台服务器,进行了我们第一次生产环境的测试。\n环境  ES:基于现有的一个ES集群,node x 3,v6.0 OAP:2C4G x 2,v6.1.0-SNAPSHOT,jvm内存分配为2G 应用:两个jvm实例  测试时间:03.11-03.16\n测试结果 业务机器负载情况:\n从最敏感的CPU指标上来看,增加agent并没有导致可见的CPU使用率的变化,而其他的内存、网络IO、连接数也基本没有变化。\nOAP负载情况:\n可以看到机器的CPU和网络均有较大的波动,但是也都没有真正打爆服务器,但是我们的实例却经常出现两种日志:\n One trace segment has been abandoned, cause by buffer is full.\n  Collector traceSegment service doesn\u0026rsquo;t response in xxx seconds.\n 通过阅读源码发现:\n agent和OAP只会使用一个长连接阻塞式的交换数据,如果某次数据交换没有得到响应,则会阻塞后续的上报流程(一般长连接的RPC请求会在数据传输期间互相阻塞,但是不会在等待期间互相阻塞,当然这也是源于agent并没有并发上报的机制),所以一旦OAP在接收数据的过程中发生阻塞,就会导致agent本地的缓冲区满,最终只能将监控数据直接丢弃防止内存泄漏  而导致OAP没有及时响应的一方面是OAP本身性能不够(OAP需要承担大量的二次统计工作,通过Jstack统计,长期有超过几十个线程处于RUNNABLE状态,据吴晟描述目前OAP都是高性能模式,后续将会提供配置来支持低性能模式),另一方面可能是ES批量插入效率不够,因此我们修改了OAP的批量插入参数来增加插入频率,降低单次插入数量:\n bulkActions: ${SW_STORAGE_ES_BULK_ACTIONS:2000 -\u0026gt; 20} # Execute the bulk every 2000 requests bulkSize: ${SW_STORAGE_ES_BULK_SIZE:20 -\u0026gt; 2} # flush the bulk every 20mb flushInterval: ${SW_STORAGE_ES_FLUSH_INTERVAL:10 -\u0026gt; 2} # flush the bulk every 10 seconds whatever the number of requests  虽然 service doesn\u0026rsquo;t response 出现的频率明显降低,但是依然还是会偶尔出现,而每一次出现都会伴随大量的 trace segment has been abandoned ,推测OAP和ES可能都存在性能瓶颈(应该进行更进一步的诊断确定问题,不过当时直接和吴晟沟通,确认确实OAP非常消耗CPU资源,考虑到当时部署只是2C,并且还部署有其他业务,就没有进一步的测试)。\n同时,在频繁的数据丢弃过程中,也偶发了一个bug:当agent上报数据超时并且大量丢弃数据之后,即使后续恢复正常也能通过日志看到数据正常上报,在查询界面查询的时候,会查不到这个实例上报的数据,不过在重启OAP和agent之后,之前上报的数据又能查询到,这个也和吴晟沟通过,没有其他的案例,后续想重现却也一直没有成功。\n而同时还发现两个更加严重的问题:\n 我们使用的是线上已经部署好的ES集群,其版本只有6.0,而新的Skywalking使用了6.3的查询特性,导致很多查询执行报错,只能使用最简单的查询 我们的kafka集群版本也非常古老,不支持v1或者更高版本的header,而kafka的探针强依赖header来传输上下文信息,导致kafka客户端直接报错影响业务,所以也立即移除了kafka的探针  在这一次测试中,我们基本确认了agent对于应用的影响,同时也发现了一些我们和Skywalking的一些问题,留待后续测试确认。\n第二次生产环境测试 为了排除性能和ES版本的影响,测试Skywalking本身的可用性,参考吴晟的建议(这也是在最初技术选型的时候没有选择Pinpoint和CAT的部分原因:一方面Skywalking的功能符合我们的要求,更重要的是有更加直接和效率的和项目维护者直接沟通的渠道),所以这一次我们新申请了ES集群和OAP机器。\n环境  ES:腾讯云托管ES集群,4C16G x 3 SSD,v6.4 OAP:16C32G,standalone,jvm分配24G 应用:2~8个jvm实例  测试时间:03.18-至今\n测试结果 OAP负载情况:\nES集群负载:\n测试过程中,我们先接入了一台机器上的两个实例,完全没有遇到一测中的延迟或者数据丢弃的问题,三天后我们又接入了另外两台机器的4个实例,这之后两天我们又接入了另外两台机器的2个实例。依然没有遇到一测中的延迟或者数据丢弃的问题。\n而ES负载的监控也基本验证了一测延迟的问题,Skywalking由于较高的并发插入,对于ES的性能压力很大(批量插入时需要针对每条数据分析并且构建查询索引),大概率是ES批量插入性能不够导致延迟,考虑到我们仅仅接入了8个实例,日均segment插入量大概5000万条(即日均5000万次独立调用),如果想支持更大规模的监控,对于ES容量规划势必要留够足够的冗余。同时OAP和ES集群的网络开销也不容忽视,在支撑大规模的监控时,需要集群并且receiver和aggregattor分离部署来分担网络IO的压力。\n而在磁盘容量占用上,我们设置的原始数据7天过期,目前刚刚开始滚动过期,目前segment索引已经累计了314757240条记录总计158G数据,当然我们目前异常记录较少,如果异常记录较多的话,其磁盘开销将会急剧增加(span中会记录异常堆栈信息)。而由于选择的SSD,磁盘的写入和查询性能都很高,即使只有3个节点,也完全没有任何压力。\n而在新版本的ES集群下,Skywalking的所有查询功能都变得可用,和我们之前自己的单独编写的异常指标监控都能完美对照。当然我们也遇到一个问题:Skywalking仅采集了调用记录,但是对于调用过程中的过程数据,除了异常堆栈其他均没有采集,导致真的出现异常也缺少充足的上下文信息还原现场,于是我们扩展了Skywalking的两个探针(我们项目目前重度依赖的组件):OkHttp(增加对requestBody和responseBody的采集)和SpringMVC(增加了对requestBody的采集),目前工作正常,如果进一步的增加其他的探针,采集到足够的数据,那么我们基本可以脱离ELK了。\n而OAP方面,CPU和内存的消耗远远低于预期的估计,CPU占用率一直较低,而分配的24G内存也仅使用了10+G,完全可以支持更大规模的接入量,不过在网络IO方面可能存在一定的风险,推测应该8C16G的容器就足以支持十万CPM级别的数据接入。\n当然我们在查询也遇到了一些瓶颈,最大的问题就是无法精确的命中某一条调用记录,就如前面的分析,因为segment的数据结构问题,无法进行面向业务的查询(例如messageId、requestId、orderId等),所以如果想精确匹配某一次调用请求,需要通过各个维度的条件约束慢慢缩小范围最后定位。\nSkywalking展望 通过上述对Skywalking的剖析和实践,Skywalking确实是一个优秀的APM+调用链跟踪监控系统,能够覆盖大部分使用场景,让研发和运维能够更加实时/准实时的了解线上服务的运行情况。当然Skywailking也不是尽善尽美,例如下面就是个人觉得目前可见的不满足我们期望的:\n 数据准实时通过gRPC上报,本地缓存的瓶颈(当然官方主要是为了简化模型,减少依赖,否则Skywalking还依赖ELK就玩得有点大了)  缓存队列的长度,过长占据内存,过短容易buffer满丢弃数据 优雅停机同时又不丢失缓存   数据上报需要在起点上报,链路回传的时候需要携带SPAN及子SPAN的信息,当链路较长或者SPAN保存的信息较多时,会额外消耗一定的带宽 skywalking更多是一个APM系统而不是分布式调用链跟踪系统  在整个链路的探针上均缺少输入输出的抓取 在调用链的筛查上并没用进行增强,并且体现在数据结构的设计,例如TAG信息均保存在SPAN信息中,而SPAN信息均被BASE64编码作为数据保存,无法检索,最终trace的筛查只能通过时间/traceId/service/endPoint/state进行非业务相关的搜索   skywalking缺少对三方接口依赖的指标,这个对于系统稳定往往非常重要  而作为一个初级的使用者,个人觉得我们可以使用有限的人力在以下方向进行扩展:\n 增加receiver:整合ELK,通过日志采集采集数据,降低异构系统的采集开发成本 优化数据结构,提供基于业务关键数据的查询接口 优化探针,采集更多的业务数据,争取代替传统的ELK日志简单查询,绝大部分异常诊断和定位均可以通过Skywalking即可完成 增加业务指标监控的模式,能够自定义业务指标(目前官方已经在实现 Metric Exporter )  ","title":"SkyWalking调研与初步实践","url":"/zh/2019-03-29-introduction-of-skywalking-and-simple-practice/"},{"content":"前言 首先描述下问题的背景,博主有个习惯,每天上下班的时候看下skywalking的trace页面的error情况。但是某天突然发现生产环境skywalking页面没有任何数据了,页面也没有显示任何的异常,有点慌,我们线上虽然没有全面铺开对接skywalking,但是也有十多个应用。看了应用agent端日志后,其实也不用太担心,对应用毫无影响。大概情况就是这样,但是问题还是要解决,下面就开始排查skywalking不可用的问题。\n使用到的工具arthas Arthas是阿里巴巴开源的一款在线诊断java应用程序的工具,是greys工具的升级版本,深受开发者喜爱。当你遇到以下类似问题而束手无策时,Arthas可以帮助你解决:\n 这个类从哪个 jar 包加载的?为什么会报各种类相关的 Exception? 我改的代码为什么没有执行到?难道是我没 commit?分支搞错了? 遇到问题无法在线上 debug,难道只能通过加日志再重新发布吗? 线上遇到某个用户的数据处理有问题,但线上同样无法 debug,线下无法重现! 是否有一个全局视角来查看系统的运行状况? 有什么办法可以监控到JVM的实时运行状态? Arthas采用命令行交互模式,同时提供丰富的 Tab 自动补全功能,进一步方便进行问题的定位和诊断。  项目地址:https://github.com/alibaba/arthas\n先定位问题一 查看skywalking-oap-server.log的日志,发现会有一条异常疯狂的在输出,异常详情如下:\n2019-03-01 09:12:11,578 - org.apache.skywalking.oap.server.core.register.worker.RegisterPersistentWorker -3264081149 [DataCarrier.IndicatorPersistentWorker.endpoint_inventory.Consumser.0.Thread] ERROR [] - Validation Failed: 1: id is too long, must be no longer than 512 bytes but was: 684; org.elasticsearch.action.ActionRequestValidationException: Validation Failed: 1: id is too long, must be no longer than 512 bytes but was: 684; at org.elasticsearch.action.ValidateActions.addValidationError(ValidateActions.java:26) ~[elasticsearch-6.3.2.jar:6.3.2] at org.elasticsearch.action.index.IndexRequest.validate(IndexRequest.java:183) ~[elasticsearch-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestHighLevelClient.performRequest(RestHighLevelClient.java:515) ~[elasticsearch-rest-high-level-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestHighLevelClient.performRequestAndParseEntity(RestHighLevelClient.java:508) ~[elasticsearch-rest-high-level-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestHighLevelClient.index(RestHighLevelClient.java:348) ~[elasticsearch-rest-high-level-client-6.3.2.jar:6.3.2] at org.apache.skywalking.oap.server.library.client.elasticsearch.ElasticSearchClient.forceInsert(ElasticSearchClient.java:141) ~[library-client-6.0.0-alpha.jar:6.0.0-alpha] at org.apache.skywalking.oap.server.storage.plugin.elasticsearch.base.RegisterEsDAO.forceInsert(RegisterEsDAO.java:66) ~[storage-elasticsearch-plugin-6.0.0-alpha.jar:6.0.0-alpha] at org.apache.skywalking.oap.server.core.register.worker.RegisterPersistentWorker.lambda$onWork$0(RegisterPersistentWorker.java:83) ~[server-core-6.0.0-alpha.jar:6.0.0-alpha] at java.util.HashMap$Values.forEach(HashMap.java:981) [?:1.8.0_201] at org.apache.skywalking.oap.server.core.register.worker.RegisterPersistentWorker.onWork(RegisterPersistentWorker.java:74) [server-core-6.0.0-alpha.jar:6.0.0-alpha] at org.apache.skywalking.oap.server.core.register.worker.RegisterPersistentWorker.access$100(RegisterPersistentWorker.java:35) [server-core-6.0.0-alpha.jar:6.0.0-alpha] at org.apache.skywalking.oap.server.core.register.worker.RegisterPersistentWorker$PersistentConsumer.consume(RegisterPersistentWorker.java:120) [server-core-6.0.0-alpha.jar:6.0.0-alpha] at org.apache.skywalking.apm.commons.datacarrier.consumer.ConsumerThread.consume(ConsumerThread.java:101) [apm-datacarrier-6.0.0-alpha.jar:6.0.0-alpha] at org.apache.skywalking.apm.commons.datacarrier.consumer.ConsumerThread.run(ConsumerThread.java:68) [apm-datacarrier-6.0.0-alpha.jar:6.0.0-alpha] 2019-03-01 09:12:11,627 - org.apache.skywalking.oap.server.core.register.worker.RegisterPersistentWorker -3264081198 [DataCarrier.IndicatorPersistentWorker.endpoint_inventory.Consumser.0.Thread] ERROR [] - Validation Failed: 1: id is too long, must be no longer than 512 bytes but was: 684; org.elasticsearch.action.ActionRequestValidationException: Validation Failed: 1: id is too long, must be no longer than 512 bytes but was: 684; at org.elasticsearch.action.ValidateActions.addValidationError(ValidateActions.java:26) ~[elasticsearch-6.3.2.jar:6.3.2] at org.elasticsearch.action.index.IndexRequest.validate(IndexRequest.java:183) ~[elasticsearch-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestHighLevelClient.performRequest(RestHighLevelClient.java:515) ~[elasticsearch-rest-high-level-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestHighLevelClient.performRequestAndParseEntity(RestHighLevelClient.java:508) ~[elasticsearch-rest-high-level-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestHighLevelClient.index(RestHighLevelClient.java:348) ~[elasticsearch-rest-high-level-client-6.3.2.jar:6.3.2] at org.apache.skywalking.oap.server.library.client.elasticsearch.ElasticSearchClient.forceInsert(ElasticSearchClient.java:141) ~[library-client-6.0.0-alpha.jar:6.0.0-alpha] at org.apache.skywalking.oap.server.storage.plugin.elasticsearch.base.RegisterEsDAO.forceInsert(RegisterEsDAO.java:66) ~[storage-elasticsearch-plugin-6.0.0-alpha.jar:6.0.0-alpha] at org.apache.skywalking.oap.server.core.register.worker.RegisterPersistentWorker.lambda$onWork$0(RegisterPersistentWorker.java:83) ~[server-core-6.0.0-alpha.jar:6.0.0-alpha] at java.util.HashMap$Values.forEach(HashMap.java:981) [?:1.8.0_201] at org.apache.skywalking.oap.server.core.register.worker.RegisterPersistentWorker.onWork(RegisterPersistentWorker.java:74) [server-core-6.0.0-alpha.jar:6.0.0-alpha] at org.apache.skywalking.oap.server.core.register.worker.RegisterPersistentWorker.access$100(RegisterPersistentWorker.java:35) [server-core-6.0.0-alpha.jar:6.0.0-alpha] at org.apache.skywalking.oap.server.core.register.worker.RegisterPersistentWorker$PersistentConsumer.consume(RegisterPersistentWorker.java:120) [server-core-6.0.0-alpha.jar:6.0.0-alpha] at org.apache.skywalking.apm.commons.datacarrier.consumer.ConsumerThread.consume(ConsumerThread.java:101) [apm-datacarrier-6.0.0-alpha.jar:6.0.0-alpha] at org.apache.skywalking.apm.commons.datacarrier.consumer.ConsumerThread.run(ConsumerThread.java:68) [apm-datacarrier-6.0.0-alpha.jar:6.0.0-alpha] 可以看到,上面的异常输出的时间节点,以这种频率在疯狂的刷新。通过异常message,得知到是因为skywalking在写elasticsearch时,索引的id太长了。下面是elasticsearch的源码:\nif (id != null \u0026amp;\u0026amp; id.getBytes(StandardCharsets.UTF_8).length \u0026gt; 512) { validationException = addValidationError(\u0026#34;id is too long, must be no longer than 512 bytes but was: \u0026#34; + id.getBytes(StandardCharsets.UTF_8).length, validationException); } 具体可见:elasticsearch/action/index/IndexRequest.java#L240\n问题一: 通过日志,初步定位是哪个系统的url太长,skywalking在注册url数据时触发elasticsearch针对索引id校验的异常,而skywalking注册失败后会不断的重试,所以才有了上面日志不断刷的现象。\n问题解决: elasticsearch client在写es前通过硬编码的方式写死了索引id的长度不能超过512字节大小。也就是我们不能通过从ES侧找解决方案了。回到异常的message,只能看到提示id太长,并没有写明id具体是什么,这个异常提示其实是不合格的,博主觉得应该把id的具体内容抛出来,问题就简单了。因为异常没有明确提示,系统又比较多,不能十多个系统依次关闭重启来验证到底是哪个系统的哪个url有问题。这个时候Arthas就派上用场了,在不重启应用不开启debug模式下,查看实例中的属性对象。下面通过Arthas找到具体的url。\n从异常中得知,org.elasticsearch.action.index.IndexRequest这个类的validate方法触发的,这个方法是没有入参的,校验的id属性其实是对象本身的属性,那么我们使用Arthas的watch指令来看下这个实例id属性。先介绍下watch的用法:\n功能说明 让你能方便的观察到指定方法的调用情况。能观察到的范围为:返回值、抛出异常、入参,通过编写 \u0008OGNL 表达式进行对应变量的查看。\n参数说明 watch 的参数比较多,主要是因为它能在 4 个不同的场景观察对象\n   参数名称 参数说明     class-pattern 类名表达式匹配   method-pattern 方法名表达式匹配   express 观察表达式   condition-express 条件表达式   [b] 在方法调用之前观察   [e] 在方法异常之后观察   [s] 在方法返回之后观察   [f] 在方法结束之后(正常返回和异常返回)观察   [E] 开启正则表达式匹配,默认为通配符匹配   [x:] 指定输出结果的属性遍历深度,默认为 1    从上面的用法说明结合异常信息,我们得到了如下的指令脚本:\nwatch org.elasticsearch.action.index.IndexRequest validate \u0026ldquo;target\u0026rdquo;\n执行后,就看到了我们希望了解到的内容,如:\n索引id的具体内容看到后,就好办了。我们暂时把定位到的这个应用启动脚本中的的skywalking agent移除后(计划后面重新设计下接口)重启了下系统验证下。果然疯狂输出的日志停住了,但是问题并没完全解决,skywalking页面上的数据还是没有恢复。\n定位问题二 skywalking数据存储使用了elasticsearch,页面没有数据,很有可能是elasticsearch出问题了。查看elasticsearch日志后,发现elasticsearch正在疯狂的GC,日志如:\n: 139939K-\u0026gt;3479K(153344K), 0.0285655 secs] 473293K-\u0026gt;336991K(5225856K), 0.0286918 secs] [Times: user=0.05 sys=0.00, real=0.03 secs] 2019-02-28T20:05:38.276+0800: 3216940.387: Total time for which application threads were stopped: 0.0301495 seconds, Stopping threads took: 0.0001549 seconds 2019-02-28T20:05:38.535+0800: 3216940.646: [GC (Allocation Failure) 2019-02-28T20:05:38.535+0800: 3216940.646: [ParNew Desired survivor size 8716288 bytes, new threshold 6 (max 6) - age 1: 1220136 bytes, 1220136 total - age 2: 158496 bytes, 1378632 total - age 3: 88200 bytes, 1466832 total - age 4: 46240 bytes, 1513072 total - age 5: 126584 bytes, 1639656 total - age 6: 159224 bytes, 1798880 total : 139799K-\u0026gt;3295K(153344K), 0.0261667 secs] 473311K-\u0026gt;336837K(5225856K), 0.0263158 secs] [Times: user=0.06 sys=0.00, real=0.03 secs] 2019-02-28T20:05:38.562+0800: 3216940.673: Total time for which application threads were stopped: 0.0276971 seconds, Stopping threads took: 0.0001030 seconds 2019-02-28T20:05:38.901+0800: 3216941.012: [GC (Allocation Failure) 2019-02-28T20:05:38.901+0800: 3216941.012: [ParNew Desired survivor size 8716288 bytes, new threshold 6 (max 6) 问题二: 查询后得知,elasticsearch的内存配置偏大了,GC时间太长,导致elasticsearch脱离服务了。elasticsearch所在主机的内存是8G的实际内存7.6G,刚开始配置了5G的堆内存大小,可能Full GC的时候耗时太久了。查询elasticsearch官方文档后,得到如下的jvm优化建议:\n 将最小堆大小(Xms)和最大堆大小(Xmx)设置为彼此相等。 Elasticsearch可用的堆越多,它可用于缓存的内存就越多。但请注意,过多的堆可能会使您陷入长时间的垃圾收集暂停。 设置Xmx为不超过物理RAM的50%,以确保有足够的物理RAM用于内核文件系统缓存。 不要设置Xmx为JVM用于压缩对象指针(压缩oops)的截止值之上; 确切的截止值变化但接近32 GB。  详情见:https://www.elastic.co/guide/en/elasticsearch/reference/6.5/heap-size.html\n问题解决: 根据Xmx不超过物理RAM的50%上面的jvm优化建议。后面将Xms和Xmx都设置成了3G。然后先停掉skywalking(由于skywalking中会缓存部分数据,如果直接先停ES,会报索引找不到的类似异常,这个大部分skywalking用户应该有遇到过),清空skywalking缓存目录下的内容,如:\n在重启elasticsearch,接着启动skywalking后页面终于恢复了\n结语 整个问题排查到解决大概花了半天时间,幸好一点也不影响线上应用的使用,这个要得益于skywalking的设计,不然就是大灾难了。然后要感谢下Arthas的技术团队,写了这么好用的一款产品并且开源了,如果没有Arthas,这个问题真的不好定位,甚至一度想到了换掉elasticsearch,采用mysql来解决索引id过长的问题。Arthas真的是线上找问题的利器,博主在Arthas刚面世的时候就关注了,并且一直在公司推广使用,在这里在硬推一波。\n作者简介: 陈凯玲,2016年5月加入凯京科技。曾任职高级研发和项目经理,现任凯京科技研发中心架构\u0026amp;运维部负责人。pmp项目管理认证,阿里云MVP。热爱开源,先后开源过多个热门项目。热爱分享技术点滴,独立博客KL博客(http://www.kailing.pub)博主。\n","title":"SkyWalking线上问题排查定位","url":"/zh/2019-03-01-skywalking-troubleshoot/"},{"content":" 作者:王振飞, 写于:2019-02-24 说明:此文是个人所写,版本归属作者,代表个人观点,仅供参考,不代表skywalking官方观点。 说明:本次对比基于skywalking-6.0.0-GA和Pinpoint-1.8.2(截止2019-02-19最新版本)。另外,我们这次技术选型直接否定了Zipkin,其最大原因是它对代码有侵入性,CAT也是一样。这是我们所完全无法接受的。\n 这应该是目前最优秀的两款开源APM产品了,而且两款产品都通过字节码注入的方式,实现了对代码完全无任何侵入,他们的对比信息如下:\nOAP说明: skywalking6.x才有OAP这个概念,skywalking5.x叫collector。\n接下来,对每个PK项进行深入分析和对比。更多精彩和首发内容请关注公众号:【阿飞的博客】。\n社区比较\n这一点上面skywalking肯定完胜。一方面,skywalking已经进入apache孵化,社区相当活跃。而且项目发起人是中国人,我们能够进入官方群(Apache SkyWalking交流群:392443393)和项目发起人吴晟零距离沟通,很多问题能第一时间得到大家的帮助(玩过开源的都知道,这个价值有多大)。 而Pinpoint是韩国人开发的,免不了有沟通障碍。至于github上最近一年的commit频率,skywalking和Pinpoint旗鼓相当,都是接近20的水平: 所以,社区方面,skywalking更胜一筹。\n支持语言比较 Pinpoint只支持Java和PHP,而skywalking支持5种语言:Java, C#, PHP, Node.js, Go。如果公司的服务涉及到多个开发语言,那么skywalking会是你更好的选择。并且,如果你要实现自己的探针(比如python语言),skywalking的二次开发成本也比Pinpoint更低。\n 说明:Github上有开发者为Pinpoint贡献了对Node.js的支持,请戳链接:https://github.com/peaksnail/pinpoint-node-agent。但是已经停止维护,几年没更新了!\n 所以,支持语言方面,skywalking更胜一筹。\n协议比较 SkyWalking支持gRPC和http,不过建议使用gRPC,skywalking6.x版本已经不提供http方式(但是还会保留接收5.x的数据),以后会考虑删除。 而Pinpoint使用的是thrift协议。 协议本身没有谁好谁坏。\n存储比较(重要) 笔者认为,存储是skywalking和Pinpoint最大的差异所在,因为底层存储决定了上层功能。\nPinpoint只支持HBase,且扩展代价较大。这就意味着,如果选择Pinpoint,还要有能力hold住一套HBase集群(daocloud从Pinpoint切换到skywalking就是因为HBase的维护代价有点大)。在这方面,skywalking支持的存储就多很多,这样的话,技术选型时可以根据团队技术特点选择合适的存储,而且还可以自行扩展(不过生产环境上应该大部分是以es存储为主)。\nPinpoint只支持HBase的另一个缺陷就是,HBase本身查询能力有限(HBase只能支持三种方式查询:RowKey精确查找,SCAN范围查找,全表扫描)限制了Pinpoint的查询能力,所以其支持的查询一定是在时间的基础上(Pinpoint通过鼠标圈定一个时间范围后查看这个范围内的Trace信息)。而skywalking可以多个维度任意组合查询,例如:时间范围,服务名,Trace状态,请求路径,TraceId等。\n另外,Pinpoint和skywalking都支持TTL,即历史数据保留策略。skywalking是在OAP模块的application.yml中配置从而指定保留时间。而Pinpoint是通过HBase的ttl功能实现,通过Pinpoint提供的hbase脚本https://github.com/naver/pinpoint/blob/master/hbase/scripts/hbase-create.hbase可以看到:ApplicationTraceIndex配置了TTL =\u0026gt; 5184000,SqlMetaData_Ver2配合了TTL =\u0026gt; 15552000,单位是秒。\n 说明:es并不是完全碾压HBase,es和HBase没有绝对的好和坏。es强在检索能力,存储能力偏弱(千亿以下,es还是完全有能力hold的住的)。HBase强在存储能力,检索能力偏弱。如果搜集的日志量非常庞大,那么es存储就比较吃力。当然,没有蹩脚的中间件,只有蹩脚的程序员,无论是es还是HBase,调优才是最关键的。同样的,如果对检索能力有一定的要求,那么HBase肯定满足不了你。所以,又到了根据你的业务和需求决定的时刻了,trade-off真是无所不在。\n UI比较 Pinpoint的UI确实比skywalking稍微好些,尤其是服务的拓扑图展示。不过daocloud根据Pinpoint的风格为skywalking定制了一款UI。请戳链接:https://github.com/TinyAllen/rocketbot,项目介绍是:rocketbot: A UI for Skywalking。截图如下所示; 所以,只比较原生UI的话,Pinpoint更胜一筹。\n扩展性比较 Pinpoint好像设计之初就没有过多考虑扩展性,无论是底层的存储,还是自定义探针实现等。而skywalking核心设计目标之一就是Pluggable,即可插拔。\n以存储为例,pinpoint完全没有考虑扩展性,而skywalking如果要自定义实现一套存储,只需要定义一个类实现接口org.apache.skywalking.oap.server.library.module.ModuleProvider,然后实现一些DAO即可。至于Pinpoint则完全没有考虑过扩展底层存储。\n再以实现一个自己的探针为例(比如我要实现python语言的探针),Pinpoint选择thrift作为数据传输协议标准,而且为了节省数据传输大小,在传递常量的时候也尽量使用数据参考字典,传递一个数字而不是直接传递字符串等等。这些优化也增加了系统的复杂度:包括使用 Thrift 接口的难度、UDP 数据传输的问题、以及数据常量字典的注册问题等等。Pinpoint发展这么年才支持Java和PHP,可见一斑。而skywalking的数据接口就标准很多,并且支持OpenTracing协议,除了官方支持Java以外,C#、PHP和Node.js的支持都是由社区开发并维护。\n还有后面会提到的告警,skywalking的可扩展性也要远好于Pinpoint。\n最后,Pinpoint和skywalking都支持插件开发,Pinpoint插件开发参考:http://naver.github.io/pinpoint/1.8.2/plugindevguide.html。skywalking插件开发参考:https://github.com/apache/incubator-skywalking/blob/master/docs/en/guides/Java-Plugin-Development-Guide.md。\n所以,扩展性方面skywalking更胜一筹。\n告警比较 Pinpoint和skywalking都支持自定义告警规则。\n但是恼人的是,Pinpoint如果要配置告警规则,还需要安装MySQL(配置告警时的用户,用户组信息以及告警规则都持久化保存在MySQL中),这就导致Pinpoint的维护成本又高了一些,既要维护HBase又要维护MySQL。\nPinpoint支持的告警规则有:SLOW COUNT|RATE, ERROR COUNT|RATE, TOTAL COUNT, SLOW COUNT|RATE TO CALLEE, ERROR COUNT|RATE TO CALLEE, ERROR RATE TO CALLEE, HEAP USAGE RATE, JVM CPU USAGE RATE, DATASOURCE CONNECTION USAGE RATE。\nPinpoint每3分钟周期性检查过去5分钟的数据,如果有符合规则的告警,就会发送sms/email给用户组下的所有用户。需要说明的是,实现发送sms/email的逻辑需要自己实现,Pinpoint只提供了接口com.navercorp.pinpoint.web.alarm.AlarmMessageSender。并且Pinpoint发现告警持续时,会递增发送sms/email的时间间隔 3min -\u0026gt; 6min -\u0026gt; 12min -\u0026gt; 24min,防止sms/email狂刷。\n Pinpoint告警参考:http://naver.github.io/pinpoint/1.8.2/alarm.html\n skywalking配置告警不需要引入任何其他存储。skywalking在config/alarm-settings.xml中可以配置告警规则,告警规则支持自定义。\nskywalking支持的告警规则(配置项中的名称是indicator-name)有:service_resp_time, service_sla, service_cpm, service_p99, service_p95, service_p90, service_p75, service_p50, service_instance_sla, service_instance_resp_time, service_instance_cpm, endpoint_cpm, endpoint_avg, endpoint_sla, endpoint_p99, endpoint_p95, endpoint_p90, endpoint_p75, endpoint_p50。\nSkywalking通过HttpClient的方式远程调用在配置项webhooks中定义的告警通知服务地址。skywalking也支持silence-period配置,假设在TN这个时间点触发了告警,那么TN -\u0026gt; TN+period 这段时间内不会再重复发送该告警。\n skywalking告警参考:https://github.com/apache/incubator-skywalking/blob/master/docs/en/setup/backend/backend-alarm.md。目前只支持official_analysis.oal脚本中Service, Service Instance, Endpoint scope的metric,其他scope的metric需要等待后续扩展。\n Pinpoint和skywalking都支持常用的告警规则配置,但是skywalking采用webhooks的方式就灵活很多:短信通知,邮件通知,微信通知都是可以支持的。而Pinpoint只能sms/email通知,并且还需要引入MySQL存储,增加了整个系统复杂度。所以,告警方面,skywalking更胜一筹。\nJVM监控 skywalking支持监控:Heap, Non-Heap, GC(YGC和FGC)。 Pinpoint能够监控的指标主要有:Heap, Non-Heap, FGC, DirectBufferMemory, MappedBufferMemory,但是没有YGC。另外,Pinpoint还支持多个指标同一时间点查看的功能。如下图所示:\n所以,对JVM的监控方面,Pinpoint更胜一筹。\n服务监控 包括操作系统,和部署的服务实例的监控。 Pinpoint支持的维度有:CPU使用率,Open File Descriptor,数据源,活动线程数,RT,TPS。 skywalking支持的维度有:CPU使用率,SLA,RT,CPM(Call Per Minutes)。 所以,这方面两者旗鼓相当,没有明显的差距。\n跟踪粒度比较 Pinpoint在这方面做的非常好,跟踪粒度非常细。如下图所示,是Pinpoint对某个接口的trace信息: 而同一个接口skywalking的trace信息如下图所示:  备注: 此截图是skywalking加载了插件apm-spring-annotation-plugin-6.0.0-GA.jar(这个插件允许跟踪加了@Bean, @Service, @Component and @Repository注解的spring context中的bean的方法)。\n 通过对比发现,在跟踪粒度方面,Pinpoint更胜一筹。\n过滤追踪 Pinpoint和skywalking都可以实现,而且配置的表达式都是基于ant风格。 Pinpoint在Web UI上配置 filter wizard 即可自定义过滤追踪。 skywalking通过加载apm-trace-ignore-plugin插件就能自定义过滤跟踪,skywalking这种方式更灵活,比如一台高配服务器上有若干个服务,在共用的agent配置文件apm-trace-ignore-plugin.config中可以配置通用的过滤规则,然后通过-D的方式为每个服务配置个性化过滤。\n所以,在过滤追踪方面,skywalking更胜一筹。\n性能损耗 由于Pinpoint采集信息太过详细,所以,它对性能的损耗最大。而skywalking默认策略比较保守,对性能损耗很小。 有网友做过压力测试,对比如下:\n 图片来源于:https://juejin.im/post/5a7a9e0af265da4e914b46f1\n 所以,在性能损耗方面,skywalking更胜一筹。\n发布包比较 skywalking与时俱进,全系标配jar包,部署只需要执行start.sh脚本即可。而Pinpoint的collector和web还是war包,部署时依赖web容器(比如Tomcat)。拜托,都9012年了。\n所以,在发布包方面,skywalking更胜一筹。\n支持组件比较 skywalking和Pinpoint支持的中间件对比说明:\n WEB容器说明:Pinpoint支持几乎所有的WEB容器,包括开源和商业的。而wkywalking只支持开源的WEB容器,对2款大名鼎鼎的商业WEB容器Weblogic和Wevsphere都不支持。 RPC框架说明:对RPC框架的支持,skywalking简直秒杀Pinpoint。连小众的motan和sofarpc都支持。 MQ说明:skywalking比Pinpoint多支持一个国产的MQ中间件RocketMQ,毕竟RocketMQ在国内名气大,而在国外就一般了。加之skywalking也是国产的。 RDBMS/NoSQL说明:Pinpoint对RDBMS和NoSQL的支持都要略好于skywalking,RDBMS方面,skywalking不支持MSSQL和MariaDB。而NoSQL方面,skywalking不支持Cassandra和HBase。至于Pinpoint不支持的H2,完全不是问题,毕竟生产环境是肯定不会使用H2作为底层存储的。 Redis客户端说明:虽然skywalking和Pinpoint都支持Redis,但是skywalking支持三种流行的Redis客户端:Jedis,Redisson,Lettuce。而Pinpoint只支持Jedis和Lettuce,再一次,韩国人开发的Pinpoint无视了目前中国人开发的GitHub上star最多的Redis Client \u0026ndash; Redisson。 日志框架说明:Pinpoint居然不支持log4j2?但是已经有人开发了相关功能,详情请戳链接:log4j plugin support log4j2 or not? https://github.com/naver/pinpoint/issues/3055  通过对skywalking和Pinpoint支持中间件的对比我们发现,skywalking对国产软件的支持真的是全方位秒杀Pinpoint,比如小众化的RPC框架:motan(微博出品),sofarpc,阿里的RocketMQ,Redis客户端Redisson,以及分布式任务调度框架elastic-job等。当然也从另一方面反应国产开源软件在世界上的影响力还很小。\n这方面没有谁好谁坏,毕竟每个公司使用的技术栈不一样。如果你对RocketMQ有强需求,那么skywalking是你的最佳选择。如果你对es有强需求,那么skywalking也是你的最佳选择。如果HBase是你的强需求,那么Pinpoint就是你的最佳选择。如果MSSQL是你的强需求,那么Pinpoint也是你的最佳选择。总之,这里完全取决你的项目了。\n总结 经过前面对skywalking和Pinpoint全方位对比后我们发现,对于两款非常优秀的APM软件,有一种既生瑜何生亮的感觉。Pinpoint的优势在于:追踪数据粒度非常细、功能强大的用户界面,以及使用HBase作为存储带来的海量存储能力。而skywalking的优势在于:非常活跃的中文社区,支持多种语言的探针,对国产开源软件非常全面的支持,以及使用es作为底层存储带来的强大的检索能力,并且skywalking的扩展性以及定制化要更优于Pinpoint:\n 如果你有海量的日志存储需求,推荐Pinpoint。 如果你更看重二次开发的便捷性,推荐skywalking。  最后,参考上面的对比,结合你的需求,哪些不能妥协,哪些可以舍弃,从而更好的选择一款最适合你的APM软件。\n参考链接  参考[1]. https://github.com/apache/incubator-skywalking/blob/master/docs/en/setup/service-agent/java-agent/Supported-list.md 参考[2]. http://naver.github.io/pinpoint/1.8.2/main.html#supported-modules 参考[3]. https://juejin.im/post/5a7a9e0af265da4e914b46f1    如果觉得本文不错,请关注作者公众号:【阿飞的博客】,多谢!\n ","title":"APM巅峰对决:SkyWalking P.K. Pinpoint","url":"/zh/2019-02-24-skywalking-pk-pinpoint/"},{"content":"According to Apache Software Foundation branding policy all docker images of Apache Skywalking should be transferred from skywalking to apache with a prefix skywalking-. The transfer details are as follows\n skywalking/base -\u0026gt; apache/skywalking-base skywalking/oap -\u0026gt; apache/skywalking-oap-server skywalking/ui -\u0026gt; apache/skywalking-ui  All of repositories in skywalking will be removed after one week.\n","title":"Transfer Docker Images to Apache Official Repository","url":"/events/transfer-docker-images-to-apache-official-repository/"},{"content":"6.0.0-GA release. Go to downloads page to find release tars. This is an important milestone version, we recommend all users upgrade to this version.\nKey updates\n Bug fixed Register bug fix, refactor and performance improvement New trace UI  ","title":"Release Apache SkyWalking APM 6.0.0-GA","url":"/events/release-apache-skywalking-apm-6-0-0-ga/"},{"content":"Based on his contributions to the project, he has been accepted as SkyWalking PPMC. Welcome aboard.\n","title":"Welcome Jian Tan as a new PPMC","url":"/events/welcome-jian-tan-as-a-new-ppmc/"},{"content":" Author: Hongtao Gao, Apache SkyWalking \u0026amp; ShardingShpere PMC GitHub, Twitter, Linkedin  Service mesh receiver was first introduced in Apache SkyWalking 6.0.0-beta. It is designed to provide a common entrance for receiving telemetry data from service mesh framework, for instance, Istio, Linkerd, Envoy etc. What’s the service mesh? According to Istio’s explain:\nThe term service mesh is used to describe the network of microservices that make up such applications and the interactions between them.\nAs a PMC member of Apache SkyWalking, I tested trace receiver and well understood the performance of collectors in trace scenario. I also would like to figure out the performance of service mesh receiver.\nDifferent between trace and service mesh Following chart presents a typical trace map:\nYou could find a variety of elements in it just like web service, local method, database, cache, MQ and so on. But service mesh only collect service network telemetry data that contains the entrance and exit data of a service for now(more elements will be imported soon, just like Database). A smaller quantity of data is sent to the service mesh receiver than the trace.\nBut using sidecar is a little different.The client requesting “A” that will send a segment to service mesh receiver from “A”’s sidecar. If “A” depends on “B”, another segment will be sent from “A”’s sidecar. But for a trace system, only one segment is received by the collector. The sidecar model splits one segment into small segments, that will increase service mesh receiver network overhead.\nDeployment Architecture In this test, I will pick two different backend deployment. One is called mini unit, consist of one collector and one elasticsearch instance. Another is a standard production cluster, contains three collectors and three elasticsearch instances.\nMini unit is a suitable architecture for dev or test environment. It saves your time and VM resources, speeds up depolyment process.\nThe standard cluster provides good performance and HA for a production scenario. Though you will pay more money and take care of the cluster carefully, the reliability of the cluster will be a good reward to you.\nI pick 8 CPU and 16GB VM to set up the test environment. This test targets the performance of normal usage scenarios, so that choice is reasonable. The cluster is built on Google Kubernetes Engine(GKE), and every node links each other with a VPC network. For running collector is a CPU intensive task, the resource request of collector deployment should be 8 CPU, which means every collector instance occupy a VM node.\nTesting Process Receiving mesh fragments per second(MPS) depends on the following variables.\n Ingress query per second(QPS) The topology of a microservice cluster Service mesh mode(proxy or sidecar)  In this test, I use Bookinfo app as a demo cluster.\nSo every request will touch max 4 nodes. Plus picking the sidecar mode(every request will send two telemetry data), the MPS will be QPS * 4 *2.\nThere are also some important metrics that should be explained\n Client Query Latency: GraphQL API query response time heatmap. Client Mesh Sender: Send mesh segments per second. The total line represents total send amount and the error line is the total number of failed send. Mesh telemetry latency: service mesh receiver handling data heatmap. Mesh telemetry received: received mesh telemetry data per second.  Mini Unit You could find collector can process up to 25k data per second. The CPU usage is about 4 cores. Most of the query latency is less than 50ms. After login the VM on which collector instance running, I know that system load is reaching the limit(max is 8).\nAccording to the previous formula, a single collector instance could process 3k QPS of Bookinfo traffic.\nStandard Cluster Compare to the mini-unit, cluster’s throughput increases linearly. Three instances provide total 80k per second processing power. Query latency increases slightly, but it’s also very small(less than 500ms). I also checked every collector instance system load that all reached the limit. 10k QPS of BookInfo telemetry data could be processed by the cluster.\nConclusion Let’s wrap them up. There are some important things you could get from this test.\n QPS varies by the there variables. The test results in this blog are not important. The user should pick property value according to his system. Collector cluster’s processing power could scale out. The collector is CPU intensive application. So you should provide sufficient CPU resource to it.  This blog gives people a common method to evaluate the throughput of Service Mesh Receiver. Users could use this to design their Apache Skywalking backend deployment architecture.\n","title":"SkyWalking performance in Service Mesh scenario","url":"/blog/2019-01-25-mesh-loadtest/"},{"content":"ps:本文仅写给菜鸟,以及不知道如何远程调试的程序员,并且仅仅适用skywalking的远程调试\n概述 远程调试的目的是为了解决代码或者说程序包部署在服务器上运行,只能通过log来查看问题,以及不能跟在本地IDE运行debug那样查找问题,观看程序运行流程\u0026hellip; 想想当你的程序运行在服务器上,你在本地的IDE随时debug,是不是很爽的感觉。\n好了不废话,切入正题。\n环境篇 IDE:推荐 IntelliJ IDEA\n开发语言: 本文仅限于java,其他语言请自行询问google爸爸或者baidu娘娘\n源代码:自行从github下载,并且确保你运行的skywalking包也源代码的一致,(也就是说你自己从源代码编译打包运行,虽然不一样也可以调试,但是你想想你在本地开发,更改完代码,没有重新运行,debug出现的诡异情况)\n场景篇 假定有如下三台机器\n   IP 用途 备注     10.193.78.1 oap-server skywalking 的oap服务(或者说collector所在的服务器)   10.193.78.2 agent skywalking agent运行所在的服务器   10.193.78.0 IDE 你自己装IDE也就是IntelliJ IDEA的机器    以上环境,场景请自行安装好,并确认正常运行。本文不在赘述\n废话终于说完了\n操作篇 首要条件,下载源码后,先用maven 打包编译。然后使用Idea打开源码的父目录,整体结构大致如下图 1 :agent调试 1)Idea 配置部分 点击Edit Configurations 在弹出窗口中依次找到(红色线框的部分)并点击 打开的界面如下 修改Name值,自己随意,好记即可 然后Host输入10.193.78.2 Port默认或者其他的,重要的是这个端口在10.193.78.2上没有被占用\n然后找到Use module classpath 选择 apm-agent 最终的结果如下: 注意选择目标agent运行的jdk版本,很重要\n然后点击Apply,并找到如下内容,并且复制待用 2)agent配置部分 找到agent配置的脚本,并打开,找到配置agent的地方, 就这个地方,在这个后边加上刚才复制的内容 最终的结果如下 提供一个我配置的weblogic的配置(仅供参考) 然后重启应用(agent)\n3)调试 回到Idea中找到这个地方,并点击debug按钮,你没看错,就是红色圈住的地方 然后控制台如果出现以下字样: 那么恭喜你,可以愉快的加断点调试了。 ps:需要注意的是agent的、 service instance的注册可能不能那么愉快的调试。因为这个注册比较快,而且是在agent启动的时候就发生的, 而远程调试也需要agent打开后才可以调试,所以,如果你手快当我没说这句话。\n2 :oap-server的调试(也就是collector的调试) 具体过程不在赘述,和上一步的agent调试大同小异,不同的是 Use module classpath需要选择oap-server\n","title":"SkyWalking的远程调试","url":"/zh/2019-01-24-skywalking-remote-debug/"},{"content":"引言 《SkyWalking Java 插件贡献实践》:本文将基于SkyWalking 6.0.0-GA-SNAPSHOT版本,以编写Redis客户端Lettuce的SkyWalking Java Agent 插件为例,与大家分享我贡献PR的过程,希望对大家了解SkyWalking Java Agent插件有所帮助。\n基础概念 OpenTracing和SkyWalking链路模块几个很重要的语义概念。\n  Span:可理解为一次方法调用,一个程序块的调用,或一次RPC/数据库访问。只要是一个具有完整时间周期的程序访问,都可以被认为是一个span。SkyWalking Span对象中的重要属性\n   属性 名称 备注     component 组件 插件的组件名称,如:Lettuce,详见:ComponentsDefine.Class。   tag 标签 k-v结构,关键标签,key详见:Tags.Class。   peer 对端资源 用于拓扑图,若DB组件,需记录集群信息。   operationName 操作名称 若span=0,operationName将会搜索的下拉列表。   layer 显示 在链路页显示,详见SpanLayer.Class。      Trace:调用链,通过归属于其的Span来隐性的定义。一条Trace可被认为是一个由多个Span组成的有向无环图(DAG图),在SkyWalking链路模块你可以看到,Trace又由多个归属于其的trace segment组成。\n  Trace segment:Segment是SkyWalking中的一个概念,它应该包括单个OS进程中每个请求的所有范围,通常是基于语言的单线程。由多个归属于本线程操作的Span组成。\n  核心API 跨进程ContextCarrier核心API  为了实现分布式跟踪,需要绑定跨进程的跟踪,并且应该传播上下文 整个过程。 这就是ContextCarrier的职责。 以下是实现有关跨进程传播的步骤:  在客户端,创建一个新的空的ContextCarrier,将ContextCarrier所有信息放到HTTP heads、Dubbo attachments 或者Kafka messages。 通过服务调用,将ContextCarrier传递到服务端。 在服务端,在对应组件的heads、attachments或messages获取ContextCarrier所有消息。将服务端和客户端的链路信息绑定。    跨线程ContextSnapshot核心API  除了跨进程,跨线程也是需要支持的,例如异步线程(内存中的消息队列)和批处理在Java中很常见,跨进程和跨线程十分相似,因为都是需要传播 上下文。 唯一的区别是,不需要跨线程序列化。 以下是实现有关跨线程传播的步骤:  使用ContextManager#capture获取ContextSnapshot对象。 让子线程以任何方式,通过方法参数或由现有参数携带来访问ContextSnapshot。 在子线程中使用ContextManager#continued。    详尽的核心API相关知识,可点击阅读 《插件开发指南-中文版本》\n插件实践 Lettuce操作redis代码 @PostMapping(\u0026#34;/ping\u0026#34;) public String ping(HttpServletRequest request) throws ExecutionException, InterruptedException { RedisClient redisClient = RedisClient.create(\u0026#34;redis://\u0026#34; + \u0026#34;127.0.0.1\u0026#34; + \u0026#34;:6379\u0026#34;); StatefulRedisConnection\u0026lt;String, String\u0026gt; connection0 = redisClient.connect(); RedisAsyncCommands\u0026lt;String, String\u0026gt; asyncCommands0 = connection0.async(); AsyncCommand\u0026lt;String, String, String\u0026gt; future = (AsyncCommand\u0026lt;String, String, String\u0026gt;)asyncCommands0.set(\u0026#34;key_a\u0026#34;, \u0026#34;value_a\u0026#34;); future.onComplete(s -\u0026gt; OkHttpClient.call(\u0026#34;http://skywalking.apache.org\u0026#34;)); future.get(); connection0.close(); redisClient.shutdown(); return \u0026#34;pong\u0026#34;; } 插件源码架构 Lettuce对Redis封装与Redisson Redisson 类似,目的均是实现简单易用,且无学习曲线的Java的Redis客户端。所以要是先对Redis操作的拦截,需要学习对应客户端的源码。\n设计插件 理解插件实现过程,找到最佳InterceptPoint位置是实现插件融入SkyWalking的核心所在。\n代码实现 PR的url:Support lettuce plugin\n实践中遇到的问题  多线程编程使用debug断点会将链路变成同步,建议使用run模式增加log,或者远程debug来解决。 多线程编程,需要使用跨线程ContextSnapshot核心API,否则链路会断裂。 CompleteableCommand.onComplete方法有时会同步执行,这个和内部机制有关,有时候不分离线程。 插件编译版本若为1.7+,需要将插件放到可选插件中。因为sniffer支持的版本是1.6。  插件兼容 为了插件得到插件最终的兼容兼容版本,我们需要使用docker对所有插件版本的测试,具体步骤如下:\n 编写测试用例:关于如何编写测试用例,请按照如何编写文档来实现。 提供自动测试用例。 如:Redisson插件testcase 确保本地几个流行的插件版本,在本地运行起来是和自己的预期是一致的。 在提供自动测试用例并在CI中递交测试后,插件提交者会批准您的插件。 最终得到完整的插件测试报告。  Pull Request 提交PR 提交PR的时候,需要简述自己对插件的设计,这样有助于与社区的贡献者讨论完成codereview。\n申请自动化测试 测试用例编写完成后,可以申请自动化测试,在自己的PR中会生成插件兼容版本的报告。\n插件文档 插件文档需要更新:Supported-list.md相关插件信息的支持。\n插件如果为可选插件需要在agent-optional-plugins可选插件文档中增加对应的描述。\n注释 Lettuce是一个完全无阻塞的Redis客户端,使用netty构建,提供反应,异步和同步数据访问。了解细节可点击阅读 lettuce.io;\nOpenTracing是一个跨编程语言的标准,了解细节可点击阅读 《OpenTracing语义标准》;\nspan:org.apache.skywalking.apm.agent.core.context.trace.AbstractSpan接口定义了所有Span实现需要完成的方法;\nRedisson是一个非常易用Java的Redis客户端, 它没有学习曲线,无需知道任何Redis命令即可开始使用它。了解细节可点击阅读 redisson.org;\n","title":"SkyWalking Java 插件贡献实践","url":"/zh/2019-01-21-agent-plugin-practice/"},{"content":"Jinlin Fu has contributed 4 new plugins, including gson, activemq, rabbitmq and canal, which made SkyWalking supporting all mainstream OSS MQ. Also provide several documents and bug fixes. The SkyWalking PPMC based on these, promote him as new committer. Welcome on board.\n","title":"Welcome Jinlin Fu as new committer","url":"/events/welcome-jinlin-fu-as-new-committer/"},{"content":" 作者:赵瑞栋 原文地址  引言 微服务框架落地后,分布式部署架构带来的问题就会迅速凸显出来。服务之间的相互调用过程中,如果业务出现错误或者异常,如何快速定位问题?如何跟踪业务调用链路?如何分析解决业务瓶颈?\u0026hellip;本文我们来看看如何解决以上问题。\n一、SkyWalking初探 Skywalking 简介 Skywalking是一款国内开源的应用性能监控工具,支持对分布式系统的监控、跟踪和诊断。\n它提供了如下的主要功能特性: Skywalking 技术架构 SW总体可以分为四部分:\n1.Skywalking Agent:使用Javaagent做字节码植入,无侵入式的收集,并通过HTTP或者gRPC方式发送数据到Skywalking Collector。\nSkywalking Collector :链路数据收集器,对agent传过来的数据进行整合分析处理并落入相关的数据存储中。 Storage:Skywalking的存储,时间更迭,sw已经开发迭代到了6.x版本,在6.x版本中支持以ElasticSearch、Mysql、TiDB、H2、作为存储介质进行数据存储。 UI :Web可视化平台,用来展示落地的数据。  Skywalking Agent配置 通过了解配置,可以对一个组件功能有一个大致的了解。让我们一起看一下skywalking的相关配置。\n解压开skywalking的压缩包,在agent/config文件夹中可以看到agent的配置文件。\n从skywalking支持环境变量配置加载,在启动的时候优先读取环境变量中的相关配置。\n agent.namespace: 跨进程链路中的header,不同的namespace会导致跨进程的链路中断 agent.service_name:一个服务(项目)的唯一标识,这个字段决定了在sw的UI上的关于service的展示名称 agent.sample_n_per_3_secs: 客户端采样率,默认是-1代表全采样 agent.authentication: 与collector进行通信的安全认证,需要同collector中配置相同 agent.ignore_suffix: 忽略特定请求后缀的trace collecttor.backend_service: agent需要同collector进行数据传输的IP和端口 logging.level: agent记录日志级别  skywalking agent使用javaagent无侵入式的配合collector实现对分布式系统的追踪和相关数据的上下文传递。\nSkywalking Collector关键配置 Collector支持集群部署,zookeeper、kubernetes(如果你的应用是部署在容器中的)、consul(GO语言开发的服务发现工具)是sw可选的集群管理工具,结合大家具体的部署方式进行选择。详细配置大家可以去Skywalking官网下载介质包进行了解。\nCollector端口设置\n downsampling: 采样汇总统计维度,会分别按照分钟、【小时、天、月】(可选)来统计各项指标数据。 通过设置TTL相关配置项可以对数据进行自动清理。  Skywalking 在6.X中简化了配置。collector提供了gRPC和HTTP两种通信方式。\nUI使用rest http通信,agent在大多数场景下使用grpc方式通信,在语言不支持的情况下会使用http通信。\n关于绑定IP和端口需要注意的一点是,通过绑定IP,agent和collector必须配置对应ip才可以正常通信。\nCollector存储配置\n在application.yml中配置的storage模块配置中选择要使用的数据库类型,并填写相关的配置信息。\nCollector Receiver\nReceiver是Skywalking在6.x提出的新的概念,负责从被监控的系统中接受指标数据。用户完全可以参照OpenTracing规范来上传自定义的监控数据。Skywalking官方提供了service-mesh、istio、zipkin的相关能力。\n现在Skywalking支持服务端采样,配置项为sampleRate,比例采样,如果配置为5000则采样率就是50%。\n关于采样设置的一点注意事项\n关于服务采样配置的一点建议,如果Collector以集群方式部署,比如:Acollector和Bcollector,建议Acollector.sampleRate = Bcollector.sampleRate。如果采样率设置不相同可能会出现数据丢失问题。\n假设Agent端将所有数据发送到后端Collector处,A采样率设置为30%,B采样率为50%。\n假设有30%的数据,发送到A上,这些数据被全部正确接受并存储,极端情况(与期望的采样数据量相同)下,如果剩下20%待采样的数据发送到了B,这个时候一切都是正常的,如果这20%中有一部分数据被送到了A那么,这些数据将是被忽略的,由此就会造成数据丢失。\n二、业务调用链路监控 Service Topology监控 调用链路监控可以从两个角度去看待。我们先从整体上来认识一下我们所监控的系统。\n通过给服务添加探针并产生实际的调用之后,我们可以通过Skywalking的前端UI查看服务之间的调用关系。\n我们简单模拟一次服务之间的调用。新建两个服务,service-provider以及service-consumer,服务之间简单的通过Feign Client 来模拟远程调用。\n从图中可以看到:\n 有两个服务节点:provider \u0026amp; consumer 有一个数据库节点:localhost【mysql】 一个注册中心节点  consumer消费了provider提供出来的接口。\n一个系统的拓扑图让我们清晰的认识到系统之间的应用的依赖关系以及当前状态下的业务流转流程。细心的可能发现图示节点consumer上有一部分是红色的,红色是什么意思呢?\n红色代表当前流经consumer节点的请求有一断时间内是响应异常的。当节点全部变红的时候证明服务现阶段内就彻底不可用了。运维人员可以通过Topology迅速发现某一个服务潜在的问题,并进行下一步的排查并做到预防。\nSkywalking Trace监控 Skywalking通过业务调用监控进行依赖分析,提供给我们了服务之间的服务调用拓扑关系、以及针对每个endpoint的trace记录。\n我们在之前看到consumer节点服务中发生了错误,让我们一起来定位下错误是发生在了什么地方又是什么原因呢?\n在每一条trace的信息中都可以看到当前请求的时间、GloableId、以及请求被调用的时间。我们分别看一看正确的调用和异常的调用。\nTrace调用链路监控 图示展示的是一次正常的响应,这条响应总耗时19ms,它有4个span:\n span1 /getStore = 19ms 响应的总流转时间 span2 /demo2/stores = 14ms feign client 开始调用远程服务后的响应的总时间 span3 /stores = 14ms 接口服务响应总时间 span4 Mysql = 1ms 服务提供端查询数据库的时间  这里span2和span3的时间表现相同,其实是不同的,因为这里时间取了整。\n在每个Span中可以查看当前Span的相关属性。\n 组件类型: SpringMVC、Feign Span状态: false HttpMethod: GET Url: http://192.168.16.125:10002/demo2/stores  这是一次正常的请求调用Trace日志,可能我们并不关心正常的时候,毕竟一切正常不就是我们期待的么!\n我们再来看下,异常状态下我们的Trace以及Span又是什么样的呢。\n发生错误的调用链中Span中的is error标识变为true,并且在名为Logs的TAB中可以看到错误发生的具体原因。根据异常情况我们就可以轻松定位到影响业务的具体原因,从而快速定位问题,解决问题。\n通过Log我们看到连接被拒,那么可能是我们的网络出现了问题(可能性小,因为实际情况如果网络出现问题我们连这个trace都看不到了),也有可能是服务端配置问题无法正确建立连接。通过异常日志,我们迅速就找到了问题的关键。\n实际情况是,我把服务方停掉了,做了一次简单的模拟。可见,通过拓扑图示我们可以清晰的看到众多服务中哪个服务是出现了问题的,通过trace日志我们可以很快就定位到问题所在,在最短的时间内解决问题。\n三、服务性能指标监控 Skywalking还可以查看具体Service的性能指标,根据相关的性能指标可以分析系统的瓶颈所在并提出优化方案。\nSkywalking 性能监控 在服务调用拓扑图上点击相应的节点我们可以看到该服务的\n SLA: 服务可用性(主要是通过请求成功与失败次数来计算) CPM: 每分钟调用次数 Avg Response Time: 平均响应时间  从应用整体外部来看我们可以监测到应用在一定时间段内的\n 服务可用性指标SLA 每分钟平均响应数 平均响应时间 服务进程PID 服务所在物理机的IP、HostName、Operation System  Service JVM信息监控 还可以监控到Service运行时的CPU、堆内存、非堆内存使用率、以及GC情况。这些信息来源于JVM。注意这里的数据可不是机器本身的数据。\n四、服务告警 前文我们提到了通过查看拓扑图以及调用链路可以定位问题,可是运维人员又不可能一直盯着这些数据,那么我们就需要告警能力,在异常达到一定阈值的时候主动的提示我们去查看系统状态。\n在Sywalking 6.x版本中新增了对服务状态的告警能力。它通过webhook的方式让我们可以自定义我们告警信息的通知方式。诸如:邮件通知、微信通知、短信通知等。\nSkywalking 服务告警 先来看一下告警的规则配置。在alarm-settings.xml中可以配置告警规则,告警规则支持自定义。\n一份告警配置由以下几部分组成:\n service_resp_time_rule:告警规则名称 ***_rule (规则名称可以自定义但是必须以’_rule’结尾 indicator-name:指标数据名称: 定义参见http://t.cn/EGhfbmd op: 操作符: \u0026gt; , \u0026lt; , = 【当然你可以自己扩展开发其他的操作符】 threshold:目标值:指标数据的目标数据 如sample中的1000就是服务响应时间,配合上操作符就是大于1000ms的服务响应 period: 告警检查周期:多久检查一次当前的指标数据是否符合告警规则 counts: 达到告警阈值的次数 silence-period:忽略相同告警信息的周期 message:告警信息 webhooks:服务告警通知服务地址  Skywalking通过HttpClient的方式远程调用在配置项webhooks中定义的告警通知服务地址。\n了解了SW所传送的数据格式我们就可以对告警信息进行接收处理,实现我们需要的告警通知服务啦!\n我们将一个服务停掉,并将另外一个服务的某个对外暴露的接口让他休眠一定的时间。然后调用一定的次数观察服务的状态信息以及告警情况。\n总结 本文简单的通过skwaylking的配置来对skywlaking的功能进行一次初步的了解,对skwaylking新提出的概念以及新功能进行简单的诠释,方便大家了解和使用。通过使用APM工具,可以让我们方便的查看微服务架构中系统瓶颈以及性能问题等。\n精选提问 问1:想问问选型的时候用pinpoint还是SK好?\n答:选型问题\n 要结合具体的业务场景, 比如你的代码运行环境 是java、php、net还是什么。 pinpoint在安装部署上要比skywalking略微复杂 pinpoint和sw支持的组件列表是不同的。 https://github.com/apache/incubator-skywalking/blob/master/docs/en/setup/service-agent/java-agent/Supported-list.md你可以参照这里的支持列表对比下pinpoint的支持对象做一个简单对比。 sw经过测试在并发量较高的情况下比pinpoint的吞吐量更好一些。  问2:有没有指标统计,比如某个url 的top10 请求、响应最慢的10个请求?某个服务在整个链条中的耗时占比?\n答:1.sw自带有响应最慢的请求top10统计针对所有的endpoint的统计。 2.针对每个url的top10统计,sw本身没有做统计,数据都是现成的通过简单的检索就可以搜到你想要的结果。 3.没有具体的耗时占比,但是有具体总链路时间统计以及某个服务的耗时统计,至于占比自己算吧,可以看ppt中的调用链路监控的span时间解释。\n问3:能不能具体说一下在你们系统中的应用?\n答:EOS8LA版本中,我们整合sw对应用提供拓扑、调用链路、性能指标的监控、并在sw数据的基础上增加系统的维度。 当服务数很庞大的时候,整体的拓扑其实就是一张密密麻麻的蜘蛛网。我们可以通过系统来选择具体某个系统下的应用。 8LA中SW是5.0.0alpha版本,受限于sw功能,我们并没有提供告警能力,这在之后会是我们的考虑目标。\n问4:业务访问日志大概每天100G,kubernetes 环境中部署,使用稳定吗?\n答:监控数据没有长时间的存储必要,除非你有特定的需求。它有一定的时效性,你可以设置ttl自动清除过时信息。100g,es集群还是能轻松支撑的。\n问5:和pinpoint相比有什么优势吗?\n答:\n 部署方式、使用方式简单 功能特性支持的更多 高并发性能会更好一些  问6:skywalking的侵入式追踪功能方便进行单服务链的服务追踪。但是跨多台服务器多项目的整体服务链追踪是否有整体设计考虑?\n答:sw本身特性就是对分布式系统的追踪,他是无侵入式的。无关你的应用部署在多少台服务器上。\n问7:应用在加上代理之后性能会下降。请问您有什么解决方法吗?\n答:性能下降是在所难免的,但是据我了解,以及官方的测试,他的性能影响是很低的。这是sw的测试数据供你参考。 https://skywalkingtest.github.io/Agent-Benchmarks/README_zh.html。\n问8:有异构系统需求的话可以用sw吗?\n答:只要skywalking的探针支持的应该都是可以的。\n问9:sw对于商用的web中间件,如bes、tongweb、websphere、weblogic的支持如何?\n答:商业组件支持的比较少,因为涉及到相关license的问题,sw项目组需要获得他们的支持来进行数据上报,据我了解,支持不是很好。\n","title":"SkyWalking 微服务监控分析","url":"/zh/2019-01-03-monitor-microservice/"},{"content":"SkyWalking 依赖 elasticsearch 集群,如果 elasticsearch 安装有 x-pack 插件的话,那么就会存在一个 Basic 认证,导致 skywalking 无法调用 elasticsearch, 解决方法是使用 nginx 做代理,让 nginx 来做这个 Basic 认证,那么这个问题就自然解决了。\n方法如下:\n 安装 nginx   yum install -y nginx\n 配置 nginx  server { listen 9200 default_server; server_name _; location / { proxy_set_header Host $host; proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_pass http://localhost:9200; #Basic字符串就是使用你的用户名(admin),密码(12345)编码后的值 #注意:在进行Basic加密的时候要使用如下格式如:admin:123456 注意中间有个冒号 proxy_set_header Authorization \u0026#34;Basic YWRtaW4gMTIzNDU2\u0026#34;; } } 验证   curl localhost:9200\n { \u0026#34;name\u0026#34; : \u0026#34;Yd0rCp9\u0026#34;, \u0026#34;cluster_name\u0026#34; : \u0026#34;es-cn-4590xv9md0009doky\u0026#34;, \u0026#34;cluster_uuid\u0026#34; : \u0026#34;jAPLrqY5R6KWWgHnGCWOAA\u0026#34;, \u0026#34;version\u0026#34; : { \u0026#34;number\u0026#34; : \u0026#34;6.3.2\u0026#34;, \u0026#34;build_flavor\u0026#34; : \u0026#34;default\u0026#34;, \u0026#34;build_type\u0026#34; : \u0026#34;tar\u0026#34;, \u0026#34;build_hash\u0026#34; : \u0026#34;053779d\u0026#34;, \u0026#34;build_date\u0026#34; : \u0026#34;2018-07-20T05:20:23.451332Z\u0026#34;, \u0026#34;build_snapshot\u0026#34; : false, \u0026#34;lucene_version\u0026#34; : \u0026#34;7.3.1\u0026#34;, \u0026#34;minimum_wire_compatibility_version\u0026#34; : \u0026#34;5.6.0\u0026#34;, \u0026#34;minimum_index_compatibility_version\u0026#34; : \u0026#34;5.0.0\u0026#34; }, \u0026#34;tagline\u0026#34; : \u0026#34;You Know, for Search\u0026#34; } 看到如上结果那么恭喜你成功了。\n","title":"关于 ElasticSearch 因 basic 认证导致 SkyWalking 无法正常调用接口问题","url":"/zh/2019-01-02-skywalking-elasticsearch-basic/"},{"content":" 作者: Wu Sheng, tetrate, SkyWalking original creator GitHub, Twitter, Linkedin 翻译: jjlu521016  背景 在当前的微服务架构中分布式链路追踪是很有必要的一部分,但是对于一些用户来说如何去理解和使用分布式链路追踪的相关数据是不清楚的。 这个博客概述了典型的分布式跟踪用例,以及Skywalking的V6版本中新的可视化功能。我们希望新的用户通过这些示例来更好的理解。\n指标和拓扑图 跟踪数据支持两个众所周知的分析特性:指标和拓扑图\n指标: 每个service, service instance, endpoint的指标都是从跟踪中的入口span派生的。指标代表响应时间的性能。所以可以有一个平均响应时间,99%的响应时间,成功率等。它们按service, service instance, endpoint进行分解。\n拓扑图: 拓扑表示服务之间的链接,是分布式跟踪最有吸引力的特性。拓扑结构允许所有用户理解分布式服务关系和依赖关系,即使它们是不同的或复杂的。这一点很重要,因为它为所有相关方提供了一个单一的视图,无论他们是开发人员、设计者还是操作者。\n这里有一个拓扑图的例子包含了4个项目,包括kafka和两个外部依赖。\n-在skywalking的可选择UI0RocketBot的拓扑图-\nTrace 在分布式链路追踪系统中,我们花费大量资源(CPU、内存、磁盘和网络)来生成、传输和持久跟踪数据。让我们试着回答为什么要这样做?我们可以用跟踪数据回答哪些典型的诊断和系统性能问题?\nSkywalking v6包含两种追踪视图:\n   TreeMode: 第一次提供,帮助您更容易识别问题。    ListMode: 常规的时间线视图,通常也出现在其他跟踪系统中,如Zipkin。    发生错误 在trace视图,最简单的部分是定位错误,可能是由代码异常或网络故障引起的。通过span详情提供的细节,ListMode和TreeMode都能够找到错误 -ListMode 错误span-\n-TreeMode 错误span-\n慢span 一个高优先级的特性是识别跟踪中最慢的span。这将使用应用程序代理捕获的执行持续时间。在旧的ListMode跟踪视图中,由于嵌套,父span几乎总是包括子span的持续时间。换句话说,一个缓慢的span通常会导致它的父节点也变慢,在Skywalking 6中,我们提供了 最慢的前5个span 过滤器来帮助你您直接定位span。\n-最慢的前5个span-\n太多子span 在某些情况下,个别持续时间很快,但跟踪速度仍然很慢,如: -没有慢span的追踪-\n如果要了解根问题是否与太多操作相关,请使用子范围号的Top 5 of children span number,筛选器显示每个span的子级数量,突出显示前5个。 -13个数据库访问相关的span-\n在这个截图中,有一个包含13个子项的span,这些子项都是数据库访问。另外,当您看到跟踪的概述时,这个2000ms跟踪的数据库花费了1380ms。 -1380ms花费在数据库访问-\n在本例中,根本原因是数据库访问太多。这在其他场景中也很常见,比如太多的RPC或缓存访问。\n链路深度 跟踪深度也与延迟有关。像太多子span的场景一样,每个span延迟看起来不错,但整个链路追踪的过程很慢。 -链路深度-\n上图所示,最慢的span小鱼500ms,对于2000毫秒的跟踪来说,速度并不太慢。当您看到第一行时,有四种不同的颜色表示这个分布式跟踪中涉及的四个services。每一个都需要100~400ms,这四个都需要近2000ms,从这里我们知道这个缓慢的跟踪是由一个序列中的3个RPC造成的。\n结束语 分布式链路追踪和APM 工具帮助我们确定造成问题的根源,允许开发和操作团队进行相应的优化。我们希望您喜欢这一点,并且喜欢Apache Skywalking和我们的新链路追踪可视化界面。如果你喜欢的话,在github上面给我们加start来鼓励我们\nSkywakling 6计划在2019年的1月底完成release。您可以通过以下渠道联系项目团队成员\n 关注 skywalking推特 订阅邮件:dev@skywalking.apache.org。发送邮件到 dev-subscribe@kywalking.apache.org 来订阅. 加入Gitter聊天室  ","title":"更容易理解将要到来的分布式链路追踪 6.0GA (翻译)","url":"/zh/2019-01-02-understand-trace-trans2cn/"},{"content":"Background Distributed tracing is a necessary part of modern microservices architecture, but how to understand or use distributed tracing data is unclear to some end users. This blog overviews typical distributed tracing use cases with new visualization features in SkyWalking v6. We hope new users will understand more through these examples.\nMetric and topology Trace data underpins in two well known analysis features: metric and topology\nMetric of each service, service instance, endpoint are derived from entry spans in trace. Metrics represent response time performance. So, you could have average response time, 99% response time, success rate, etc. These are broken down by service, service instance, endpoint.\nTopology represents links between services and is distributed tracing\u0026rsquo;s most attractive feature. Topologies allows all users to understand distributed service relationships and dependencies even when they are varied or complex. This is important as it brings a single view to all interested parties, regardless of if they are a developer, designer or operator.\nHere\u0026rsquo;s an example topology of 4 projects, including Kafka and two outside dependencies.\nTopology in SkyWalking optional UI, RocketBot\nTrace In a distributed tracing system, we spend a lot of resources(CPU, Memory, Disk and Network) to generate, transport and persistent trace data. Let\u0026rsquo;s try to answer why we do this? What are the typical diagnosis and system performance questions we can answer with trace data?\nSkyWalking v6 includes two trace views:\n TreeMode: The first time provided. Help you easier to identify issues. ListMode: Traditional view in time line, also usually seen in other tracing system, such as Zipkin.  Error occurred In the trace view, the easiest part is locating the error, possibly caused by a code exception or network fault. Both ListMode and TreeMode can identify errors, while the span detail screen provides details.\nListMode error span\nTreeMode error span\nSlow span A high priority feature is identifying the slowest spans in a trace. This uses execution duration captured by application agents. In the old ListMode trace view, parent span almost always includes the child span\u0026rsquo;s duration, due to nesting. In other words, a slow span usually causes its parent to also become slow. In SkyWalking 6, we provide Top 5 of slow span filter to help you locate the spans directly.\nTop 5 slow span\nThe above screenshot highlights the top 5 slow spans, excluding child span duration. Also, this shows all spans' execution time, which helps identify the slowest ones.\nToo many child spans In some cases, individual durations are quick, but the trace is still slow, like this one:\nTrace with no slow span\nTo understand if the root problem is related to too many operations, use Top 5 of children span number. This filter shows the amount of children each span has, highlighting the top 5.\n13 database accesses of a span\nIn this screenshot, there is a span with 13 children, which are all Database accesses. Also, when you see overview of trace, database cost 1380ms of this 2000ms trace.\n1380ms database accesses\nIn this example, the root cause is too many database accesses. This is also typical in other scenarios like too many RPCs or cache accesses.\nTrace depth Trace depth is also related latency. Like the too many child spans scenario, each span latency looks good, but the whole trace is slow.\nTrace depth\nHere, the slowest spans are less than 500ms, which are not too slow for a 2000ms trace. When you see the first line, there are four different colors representing four services involved in this distributed trace. Every one of them costs 100~400ms. For all four, there nearly 2000ms. From here, we know this slow trace is caused by 3 RPCs in a serial sequence.\nAt the end Distributed tracing and APM tools help users identify root causes, allowing development and operation teams to optimize accordingly. We hope you enjoyed this, and love Apache SkyWalking and our new trace visualization. If so, give us a star on GitHub to encourage us.\nSkyWalking 6 is scheduled to release at the end of January 2019. You can contact the project team through the following channels:\n Follow SkyWalking twitter Subscribe mailing list: dev@skywalking.apache.org . Send to dev-subscribe@kywalking.apache.org to subscribe the mail list. Join Gitter room.  ","title":"Understand distributed trace easier in the incoming 6-GA","url":"/blog/2019-01-01-understand-trace/"},{"content":"6.0.0-beta release. Go to downloads page to find release tars.\nKey updates\n Bugs fixed, closed to GA New protocols provided, old still compatible. Spring 5 supported MySQL and TiDB as optional storage  ","title":"Release Apache SkyWalking APM 6.0.0-beta","url":"/events/release-apache-skywalking-apm-6-0-0-beta/"},{"content":"Based on his contributions. Including created RocketBot as our secondary UI, new website and very cool trace view page in next release. he has been accepted as SkyWalking PPMC. Welcome aboard.\n","title":"Welcome Yao Wang as a new PPMC","url":"/events/welcome-yao-wang-as-a-new-ppmc/"},{"content":"导读  SkyWalking 中 Java 探针是使用 JavaAgent 的两大字节码操作工具之一的 Byte Buddy(另外是 Javassist)实现的。项目还包含.Net core 和 Nodejs 自动探针,以及 Service Mesh Istio 的监控。总体上,SkyWalking 是一个多语言,多场景的适配,特别为微服务、云原生和基于容器架构设计的可观测性分析平台(Observability Analysis Platform)。 本文基于 SkyWalking 5.0.0-RC2 和 Byte Buddy 1.7.9 版本,会从以下几个章节,让大家掌握 SkyWalking Java 探针的使用,进而让 SkyWalking 在自己公司中的二次开发变得触手可及。  Byte Buddy 实现 JavaAgent 项目 迭代 JavaAgent 项目的方法论 SkyWalking agent 项目如何 Debug SkyWalking 插件开发实践   文章底部有 SkyWalking 和 Byte Buddy 相应的学习资源。  Byte Buddy 实现  首先如果你对 JavaAgent 还不是很了解可以先百度一下,或在公众号内看下《JavaAgent 原理与实践》简单入门下。 SpringMVC 分发请求的关键方法相信已经不用我在赘述了,那我们来编写 Byte Buddy JavaAgent 代码吧。  public class AgentMain { public static void premain(String agentOps, Instrumentation instrumentation) { new AgentBuilder.Default() .type(ElementMatchers.named(\u0026#34;org.springframework.web.servlet.DispatcherServlet\u0026#34;)) .transform((builder, type, classLoader, module) -\u0026gt; builder.method(ElementMatchers.named(\u0026#34;doDispatch\u0026#34;)) .intercept(MethodDelegation.to(DoDispatchInterceptor.class))) .installOn(instrumentation); } }  编写 DispatcherServlet doDispatch 拦截器代码(是不是跟 AOP 如出一辙)  public class DoDispatchInterceptor { @RuntimeType public static Object intercept(@Argument(0) HttpServletRequest request, @SuperCall Callable\u0026lt;?\u0026gt; callable) { final StringBuilder in = new StringBuilder(); if (request.getParameterMap() != null \u0026amp;\u0026amp; request.getParameterMap().size() \u0026gt; 0) { request.getParameterMap().keySet().forEach(key -\u0026gt; in.append(\u0026#34;key=\u0026#34; + key + \u0026#34;_value=\u0026#34; + request.getParameter(key) + \u0026#34;,\u0026#34;)); } long agentStart = System.currentTimeMillis(); try { return callable.call(); } catch (Exception e) { System.out.println(\u0026#34;Exception :\u0026#34; + e.getMessage()); return null; } finally { System.out.println(\u0026#34;path:\u0026#34; + request.getRequestURI() + \u0026#34; 入参:\u0026#34; + in + \u0026#34; 耗时:\u0026#34; + (System.currentTimeMillis() - agentStart)); } } }  resources/META-INF/MANIFEST.MF  Manifest-Version: 1.0 Premain-Class: com.z.test.agent.AgentMain Can-Redefine-Classes: true  pom.xml 文件  dependencies +net.bytebuddy.byte-buddy +javax.servlet.javax.servlet-api *scope=provided plugins +maven-jar-plugin *manifestFile=src/main/resources/META-INF/MANIFEST.MF +maven-shade-plugin *include:net.bytebuddy:byte-buddy:jar: +maven-compiler-plugin  小结:没几十行代码就完成了,通过 Byte Buddy 实现应用组件 SpringMVC 记录请求路径、入参、执行时间 JavaAgent 项目,是不是觉得自己很优秀。  持续迭代 JavaAgent  本章节主要介绍 JavaAgent 如何 Debug,以及持续集成的方法论。 首先我的 JavaAgent 项目目录结构如图所示: 应用项目是用几行代码实现的 SpringBootWeb 项目:  @SpringBootApplication(scanBasePackages = {\u0026#34;com\u0026#34;}) public class TestBootWeb { public static void main(String[] args) { SpringApplication.run(TestBootWeb.class, args); } @RestController public class ApiController { @PostMapping(\u0026#34;/ping\u0026#34;) public String ping(HttpServletRequest request) { return \u0026#34;pong\u0026#34;; } } }  下面是关键 JavaAgent 项目如何持续迭代与集成:  VM options增加:-JavaAgent:{$HOME}/Code/github/z_my_test/test-agent/target/test-agent-1.0-SNAPSHOT.jar=args Before launch 在Build之前增加: Working directory:{$HOME}/Code/github/incubator-skywalking Command line:-T 1C -pl test-agent -am clean package -Denforcer.skip=true -Dmaven.test.skip=true -Dmaven.compile.fork=true  小结:看到这里的将 JavaAgent 持续迭代集成方法,是不是瞬间觉得自己手心已经发痒起来,很想编写一个自己的 agent 项目了呢,等等还有一个好消息:test-demo 这 10 几行的代码实现的 Web 服务,居然有 5k 左右的类可以使用 agent 增强。 注意 mvn 编译加速的命令是 maven3 + 版本以上才支持的哈。  SkyWalking Debug  峰回路转,到了文章的主题《SkyWalking 之高级用法》的正文啦。首先,JavaAgent 项目想 Debug,还需要将 agent 代码与接入 agent 项目至少在同一个工作空间内,网上方法有很多,这里我推荐大家一个最简单的方法。File-\u0026gt;New-\u0026gt;Module from Exisiting Sources… 引入 skywalking-agent 源码即可 详细的 idea 编辑器配置: 优化 SkyWalking agent 编译时间,我的集成时间优化到 30 秒左右:  VM options增加:-JavaAgent:-JavaAgent:{$HOME}/Code/github/incubator-skywalking/skywalking-agent/skywalking-agent.jar:不要用dist里面的skywalking-agent.jar,具体原因大家可以看看源码:apm-sniffer/apm-agent/pom.xml中的maven插件的使用。 Before launch 在Build之前增加: Working directory:{$HOME}/Code/github/incubator-skywalking Command line:-T 1C -pl apm-sniffer/apm-sdk-plugin -amd clean package -Denforcer.skip=true -Dmaven.test.skip=true -Dmaven.compile.fork=true: 这里我针对插件包,因为紧接着下文要开发插件 另外根pom注释maven-checkstyle-plugin也可加速编译 kob 之 SkyWalking 插件编写  kob(贝壳分布式作业调度框架)是贝壳找房项目微服务集群中的基础组件,通过编写贝壳分布式作业调度框架的 SkyWalking 插件,可以实时收集作业调度任务的执行链路信息,从而及时得到基础组件的稳定性,了解细节可点击阅读《贝壳分布式调度框架简介》。想详细了解 SkyWalking 插件编写可在文章底部参考链接中,跳转至对应的官方资源,好话不多说,代码一把唆起来。 apm-sdk-plugin pom.xml 增加自己的插件 model  \u0026lt;artifactId\u0026gt;apm-sdk-plugin\u0026lt;/artifactId\u0026gt; \u0026lt;modules\u0026gt; \u0026lt;module\u0026gt;kob-plugin\u0026lt;/module\u0026gt; ... \u0026lt;modules\u0026gt;  resources.skywalking-plugin.def 增加自己的描述  kob=org.apache.skywalking.apm.plugin.kob.KobInstrumentation  在 SkyWalking 的项目中,通过继承 ClassInstanceMethodsEnhancePluginDefine 可以定义需要拦截的类和增强的方法,编写作业调度方法的 instrumentation  public class KobInstrumentation extends ClassInstanceMethodsEnhancePluginDefine { private static final String ENHANCE_CLASS = \u0026#34;com.ke.kob.client.spring.core.TaskDispatcher\u0026#34;; private static final String INTERCEPT_CLASS = \u0026#34;org.apache.skywalking.apm.plugin.kob.KobInterceptor\u0026#34;; @Override protected ClassMatch enhanceClass() { return NameMatch.byName(ENHANCE_CLASS); } @Override protected ConstructorInterceptPoint[] getConstructorsInterceptPoints() { return null; } @Override protected InstanceMethodsInterceptPoint[] getInstanceMethodsInterceptPoints() { return new InstanceMethodsInterceptPoint[] { new InstanceMethodsInterceptPoint() { @Override public ElementMatcher\u0026lt;MethodDescription\u0026gt; getMethodsMatcher() { return named(\u0026#34;dispatcher1\u0026#34;); } @Override public String getMethodsInterceptor() { return INTERCEPT_CLASS; } @Override public boolean isOverrideArgs() { return false; } } }; } }  通过实现 InstanceMethodsAroundInterceptor 后,定义 beforeMethod、afterMethod 和 handleMethodException 的实现方法,可以环绕增强指定目标方法,下面自定义 interceptor 实现 span 的跟踪(这里需要注意 SkyWalking 中 span 的生命周期,在 afterMethod 方法中结束 span)  public class KobInterceptor implements InstanceMethodsAroundInterceptor { @Override public void beforeMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, MethodInterceptResult result) throws Throwable { final ContextCarrier contextCarrier = new ContextCarrier(); com.ke.kob.client.spring.model.TaskContext context = (TaskContext) allArguments[0]; CarrierItem next = contextCarrier.items(); while (next.hasNext()) { next = next.next(); next.setHeadValue(JSON.toJSONString(context.getUserParam())); } AbstractSpan span = ContextManager.createEntrySpan(\u0026#34;client:\u0026#34;+allArguments[1]+\u0026#34;,task:\u0026#34;+context.getTaskKey(), contextCarrier); span.setComponent(ComponentsDefine.TRANSPORT_CLIENT); SpanLayer.asRPCFramework(span); } @Override public Object afterMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Object ret) throws Throwable { ContextManager.stopSpan(); return ret; } @Override public void handleMethodException(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Throwable t) { } }  实现效果,将操作名改成任务执行节点 + 任务执行方法,实现 kob 的 SkyWalking 的插件编写,加上报警体系,可以进一步增加公司基础组件的稳定性。  参考链接  Apache SkyWalking Byte Buddy(runtime code generation for the Java virtual machine)  ","title":"SkyWalking apm-sniffer 原理学习与插件编写","url":"/zh/2018-12-21-skywalking-apm-sniffer-beginning/"},{"content":"搭建调试环境 阅读 SkyWalking 源码,从配置调试环境开始。\n一定一定一定不要干读代码,而是通过调试的方式。\n 01 通过 Skywalking-5.x 版本的源码构建并运行 👉:哔哩哔哩 | 腾讯视频 02 通过 Skywalking-6.x 版本的源码构建并运行 👉:哔哩哔哩 | 腾讯视频 03 Java 应用(探针)接入 Skywalking[6.x] 👉:哔哩哔哩 | 腾讯视频  SkyWalking 3.X 源码解析合集 虽然是基于 3.X 版本的源码解析,但是对于阅读 SkyWalking Java Agent 和插件部分,同样适用。\n对于 SkyWalking Collector 部分,可以作为一定的参考。\n 《SkyWalking 源码分析 —— 调试环境搭建》 《SkyWalking 源码分析 —— Agent 初始化》 《SkyWalking 源码分析 —— Agent 插件体系》 《SkyWalking 源码分析 —— Collector 初始化》 《SkyWalking 源码分析 —— Collector Cluster 集群管理》 《SkyWalking 源码分析 —— Collector Client Component 客户端组件》 《SkyWalking 源码分析 —— Collector Server Component 服务器组件》 《SkyWalking 源码分析 —— Collector Jetty Server Manager》 《SkyWalking 源码分析 —— Collector gRPC Server Manager》 《SkyWalking 源码分析 —— Collector Naming Server 命名服务》 《SkyWalking 源码分析 —— Collector Queue 队列组件》 《SkyWalking 源码分析 —— Collector Storage 存储组件》 《SkyWalking 源码分析 —— Collector Streaming Computing 流式处理(一)》 《SkyWalking 源码分析 —— Collector Streaming Computing 流式处理(二)》 《SkyWalking 源码分析 —— Collector Cache 缓存组件》 《SkyWalking 源码分析 —— Collector Remote 远程通信服务》 《SkyWalking 源码分析 —— DataCarrier 异步处理库》 《SkyWalking 源码分析 —— Agent Remote 远程通信服务》 《SkyWalking 源码分析 —— 应用于应用实例的注册》 《SkyWalking 源码分析 —— Agent DictionaryManager 字典管理》 《SkyWalking 源码分析 —— Agent 收集 Trace 数据》 《SkyWalking 源码分析 —— Agent 发送 Trace 数据》 《SkyWalking 源码分析 —— Collector 接收 Trace 数据》 《SkyWalking 源码分析 —— Collector 存储 Trace 数据》 《SkyWalking 源码分析 —— JVM 指标的收集与存储》 《SkyWalking 源码分析 —— 运维界面(一)之应用视角》 《SkyWalking 源码分析 —— 运维界面(二)之应用实例视角》 《SkyWalking 源码分析 —— 运维界面(三)之链路追踪视角》 《SkyWalking 源码分析 —— 运维界面(四)之操作视角》 《SkyWalking 源码分析 —— @Trace 注解想要追踪的任何方法》 《SkyWalking 源码分析 —— traceId 集成到日志组件》 《SkyWalking 源码分析 —— Agent 插件(一)之 Tomcat》 《SkyWalking 源码分析 —— Agent 插件(二)之 Dubbo》 《SkyWalking 源码分析 —— Agent 插件(三)之 SpringMVC》 《SkyWalking 源码分析 —— Agent 插件(四)之 MongoDB》  SkyWalking 6.X 源码解析合集  《SkyWalking 6.x 源码分析 —— 调试环境搭建》  ","title":"SkyWalking 源码解析合集","url":"/zh/2018-12-21-skywalking-source-code-read/"},{"content":"版本选择 我们采用的是 5.0.0-RC2 的版本,SkyWalking 的版本信息可以参考 https://github.com/apache/incubator-skywalking/blob/5.x/CHANGES.md\n那么为什么我们没有采用 5.1.0 版本呢,这是因为我们公司内部需要支持 es x-pack,但是在官方发布里面,没有支持 xpack 的版本。\n在 Apache SkyWalking 官方文档 https://github.com/CharlesMaster/incubator-skywalking/tree/master/docs/others/cn 中有提到,SkyWalking 5.x 仍受社区支持。\n对于用户计划从 5.x 升级到 6.x,您应该知道关于有一些概念的定义的变更。最重要的两个改变了的概念是:\n Application(在 5.x 中)更改为 Service(在 6.x 中),Application Instance 也更改为 Service Instance。 Service(在 5.x 中)更改为 Endpoint(在 6.x 中)。  图文详解 Apache SkyWalking 的监控界面由 Monitor 和 Trace 两者构成,Monitor 菜单又包括 Dashbord、Topology、Application、Service、Alarm 五个子菜单构成。本文就是围绕这些菜单分别逐一进行介绍。\nMonitor 当用户通过 SkyWalking 登陆界面使用用户名、密码登陆以后,就会默认进入到 SkyWalking 的 Monitor 下的 Dashboard 界面\nDashboard 下图就是用户登陆之后都会看到的关键 Dashboard 页面,在这个页面的下方的关键指标,图中都做了详细的解释。\n上图中 app 需要强调的是,52 个 app 并不代表 52 个应用,比如 paycenter 有两台 paycenter1 和 paycenter2 就算了 2 个 app,当然还有一些应用是 3 个以上的。在我们公司,paycenter1、paycenter2 这些运维都和我们跳板机管理平台上的名称设置的一样,约定大于配置,开发人员可以更加便捷的排查问题。\n 再次修正一下,关于 dashboard 页面的 app 数,语言类探针,是探针的 app_code 来决定的。比如我们公司的线上配置就是 agent.application_code=auth-center-1\n 上图中需要解释两个概念:\n cpm 代表每分钟请求次数 SLA=(TRANSACTION_CALLS- TRANSACTION_ERROR_CALLS ) * 10000 ) / TRANSACTION_CALLS  该页面主要支持四个跳转:\n一、在上图中,App 板块上的帮助选项是可以直接跳转到 Application 监控页面的。 二、 Service 板块上的帮助选项是可以直接跳转到 Service 监控页面的。\n三、 Slow Service 列表中的每一个慢服务点击以后都会进入到其专项的 Service 监控页面。\n四、 Application Throughput 列表中的每一个 Application 点击以后也都是可以进入到其专项的 Application 监控页面。\n 关于 Application 和 Service 的详细介绍我们后续会展开\n 在 Dashboard 的页面上部分,还有一个选择功能模块: 左侧部分可以定期 refresh Dashboard 的数据,右侧则可以调整整体的查询区间。\nTopology 点击 Monitor 菜单下的 Topology 你会看到下面这张拓扑图\n当然这张图太过于夸张了,如果接入 SkyWalking 的应用并不是很多,会如下图所示: 左侧的三个小按钮可以调整你的视图,支持拖拽。右侧可以输入你所关心的应用名。比如我们输入一个支付和订单两个应用,左侧的拓扑图会变得更加清晰:\n另外,上图中的绿色圆圈都是可以点击的,如果你点击以后,还会出现节点信息: Application 点击 Monitor 菜单下的 Application 你会看到下面这张图,这张图里你可以看到的东西都做了注解。\n这张图里有一个惊喜,就是如果你点开 More Server Details,你可以看到更多的信息\n是的,除了 Host、IPv4、Pid、OS 以外,你还可以看到 CPU、Heap、Non-Heap、GC(Young GC、Old GC)等详细监控信息。\nService 点击 Monitor 菜单下的 Service 你会看到下面这张图,这张图里你可以看到的同样都做了注解。 关于 Dependency Map 这张图我们再补充一下,鼠标悬停可以看到每个阶段的执行时间,这是 Service 下的功能 我们点开图中该图中 Top 20 Slow Traces 下面的被我马赛克掉的 trace 的按钮框,可以看到如下更加详细的信息:\n这些信息可以帮助我们知道每一个方法在哪个阶段那个具体实现耗时了多久。\n如上图所示,每一行基本都是可以打开的,每一行都包含了 Tags、Logs 等监控内容\nAlarm 点击 Monitor 菜单下的 Alarm 你会看到告警菜单。目前 5.X 版本的还没有接入邮件、短信等告警方式,后续 6 支持 webhook,用户可以自己去接短信和邮件。\n告警内容中你可以看到 Applicaion、Server 和 Service 三个层面的告警内容\nTrace Trace 是一个非常实用的功能,用户可以根据精确的 TraceId 去查找\n也可以设定时间段去查找\n我在写使用手册时候,非常巧的是,看到了上图三起异常,于是我们往下拉列表看到了具体的数据\n点击进去,我们可以看到具体的失败原因 当然用户也可以直接将 Trace State 调整为 Error 级别进行查询\n再回顾一遍 一、首先我们进入首页:\n二、点击一下首页的 Slow Service 的 projectC,可以看到如下信息:\n三、如果点击首页的 Appliation Throughput 中的 projectD,可以看到如下信息:\n四、继续点进去右下角的这个 slow service 里的 Consumer,我们可以看到下图:\n参考资料  https://twitter.com/AsfSkyWalking/status/1013616673218179072 https://twitter.com/AsfSkyWalking/status/1013617100143800320  ","title":"Apache SkyWalking 5.0 中文版图文详解使用手册","url":"/zh/2018-12-18-apache-skywalking-5-0-userguide/"},{"content":"Based on his contributions to the project, he has been accepted as SkyWalking committer. Welcome aboard.\n","title":"Welcome Yixiong Cao as a new committer","url":"/events/welcome-yixiong-cao-as-a-new-committer/"},{"content":"Original link, Tetrate.io blog\nContext The integration of SkyWalking and Istio Service Mesh yields an essential open-source tool for resolving the chaos created by the proliferation of siloed, cloud-based services.\nApache SkyWalking is an open, modern performance management tool for distributed services, designed especially for microservices, cloud native and container-based (Docker, K8s, Mesos) architectures. We at Tetrate believe it is going to be an important project for understanding the performance of microservices. The recently released v6 integrates with Istio Service Mesh and focuses on metrics and tracing. It natively understands the most common language runtimes (Java, .Net, and NodeJS). With its new core code, SkyWalking v6 also supports Istrio telemetry data formats, providing consistent analysis, persistence, and visualization.\nSkyWalking has evolved into an Observability Analysis Platform that enables observation and monitoring of hundreds of services all at once. It promises solutions for some of the trickiest problems faced by system administrators using complex arrays of abundant services: Identifying why and where a request is slow, distinguishing normal from deviant system performance, comparing apples-to-apples metrics across apps regardless of programming language, and attaining a complete and meaningful view of performance.\nSkyWalking History Launched in China by Wu Sheng in 2015, SkyWalking started as just a distributed tracing system, like Zipkin, but with auto instrumentation from a Java agent. This enabled JVM users to see distributed traces without any change to their source code. In the last two years, it has been used for research and production by more than 50 companies. With its expanded capabilities, we expect to see it adopted more globally.\nWhat\u0026rsquo;s new Service Mesh Integration Istio has picked up a lot of steam as the framework of choice for distributed services. Based on all the interest in the Istio project, and community feedback, some SkyWalking (P)PMC members decided to integrate with Istio Service Mesh to move SkyWalking to a higher level.\nSo now you can use Skywalking to get metrics and understand the topology of your applications. This works not just for Java, .NET and Node using our language agents, but also for microservices running under the Istio service mesh. You can get a full topology of both kinds of applications.\nObservability analysis platform With its roots in tracing, SkyWalking is now transitioning into an open-standards based Observability Analysis Platform, which means the following:\n It can accept different kinds and formats of telemetry data from mesh like Istio telemetry. Its agents support various popular software technologies and frameworks like Tomcat, Spring, Kafka. The whole supported framework list is here. It can accept data from other compliant sources like Zipkin-formatted traces reported from Zipkin, Jaeger, or OpenCensus clients.  SkyWalking is logically split into four parts: Probes, Platform Backend, Storage and UI:\nThere are two kinds of probes:\n Language agents or SDKs following SkyWalking across-thread propagation formats and trace formats, run in the user’s application process. The Istio mixer adaptor, which collects telemetry from the Service Mesh.  The platform backend provides gRPC and RESTful HTTP endpoints for all SkyWalking-supported trace and metric telemetry data. For example, you can stream these metrics into an analysis system.\nStorage supports multiple implementations such as ElasticSearch, H2 (alpha), MySQL, and Apache ShardingSphere for MySQL Cluster. TiDB will be supported in next release.\nSkyWalking’s built-in UI with a GraphQL endpoint for data allows intuitive, customizable integration.\nSome examples of SkyWalking’s UI:\n Observe a Spring app using the SkyWalking JVM-agent   Observe on Istio without any agent, no matter what langugage the service is written in   See fine-grained metrics like request/Call per Minute, P99/95/90/75/50 latency, avg response time, heatmap   Service dependencies and metrics  Service Focused At Tetrate, we are focused on discovery, reliability, and security of your running services. This is why we are embracing Skywalking, which makes service performance observable.\nBehind this admittedly cool UI, the aggregation logic is very easy to understand, making it easy to customize SkyWalking in its Observability Analysis Language (OAL) script.\nWe’ll post more about OAL for developers looking to customize SkyWalking, and you can read the official OAL introduction document.\nScripts are based on three core concepts:\n  Service represents a group of workloads that provide the same behaviours for incoming requests. You can define the service name whether you are using instrument agents or SDKs. Otherwise, SkyWalking uses the name you defined in the underlying platform, such as Istio.\n  Service Instance Each workload in the Service group is called an instance. Like Pods in Kubernetes, it doesn\u0026rsquo;t need to be a single OS process. If you are using an instrument agent, an instance does map to one OS process.\n  Endpoint is a path in a certain service that handles incoming requests, such as HTTP paths or a gRPC service + method. Mesh telemetry and trace data are formatted as source objects (aka scope). These are the input for the aggregation, with the script describing how to aggregate, including input, conditions, and the resulting metric name.\n  Core Features The other core features in SkyWalking v6 are:\n Service, service instance, endpoint metrics analysis. Consistent visualization in Service Mesh and no mesh. Topology discovery, Service dependency analysis. Distributed tracing. Slow services and endpoints detected. Alarms.  Of course, SkyWalking has some more upgrades from v5, such as:\n ElasticSearch 6 as storage is supported. H2 storage implementor is back. Kubernetes cluster management is provided. You don’t need Zookeeper to keep the backend running in cluster mode. Totally new alarm core. Easier configuration. More cloud native style. MySQL will be supported in the next release.  Please: Test and Provide Feedback! We would love everyone to try to test our new version. You can find everything you need in our Apache repository,read the document for further details. You can contact the project team through the following channels:\n Submit an issue on GitHub repository Mailing list: dev@skywalking.apache.org . Send to dev-subscribe@kywalking.apache.org to subscribe the mail list. Gitter Project twitter  Oh, and one last thing! If you like our project, don\u0026rsquo;t forget to give us a star on GitHub.\n","title":"SkyWalking v6 is Service Mesh ready","url":"/blog/2018-12-12-skywalking-service-mesh-ready/"},{"content":"Based on his contributions to the project, he has been accepted as SkyWalking committer. Welcome aboard.\n","title":"Welcome Jian Tan as a new committer","url":"/events/welcome-jian-tan-as-a-new-committer/"},{"content":"APM consistently compatible in language agent(Java, .Net, NodeJS), 3rd party format(Zipkin) and service mesh telemetry(Istio). Go to downloads page to find release tars.\n","title":"Release Apache SkyWalking 6.0.0-alpha","url":"/events/release-apache-skywalking-6-0-0-alpha/"},{"content":"A stable version of 5.x release. Go to downloads page to find release tars.\n","title":"Release Apache SkyWalking 5.0.0-GA","url":"/events/release-apache-skywalking-5-0-0-ga/"},{"content":"5.0.0-RC2 release. Go to downloads page to find release tars.\n","title":"Release Apache SkyWalking 5.0.0-RC2","url":"/events/release-apache-skywalking-5-0-0-rc2/"},{"content":"5.0.0-beta2 release. Go to downloads page to find release tars.\n","title":"Release Apache SkyWalking 5.0.0-beta2","url":"/events/release-apache-skywalking-5-0-0-beta2/"},{"content":"Translated by Sheng Wu.\nIn many big systems, distributed and especially microservice architectures become more and more popular. With the increase of modules and services, one incoming request could cross dozens of service. How to pinpoint the issues of the online system, and the bottleneck of the whole distributed system? This became a very important problem, which must be resolved.\nTo resolve the problems in distributed system, Google published the paper “Dapper, a Large-Scale Distributed Systems Tracing Infrastructure”, which mentioned the designs and ideas of building a distributed system. Many projects are inspired by it, created in the last 10 years. At 2015, Apache SkyWalking was created by Wu Sheng as a simple distributed system at first and open source. Through almost 3 years developments, at 2018, according to its 5.0.0-alpha/beta releases, it had already became a cool open source APM system for cloud native, container based system.\nAt the early of this year, I was trying to build the Butterfly open source APM in .NET Core, and that is when I met the Apache SkyWalking team and its creator. I decided to join them, and cooperate with them, to provide .NET Core agent native compatible with SkyWalking. At April, I released the first version .NET core agent 0.1.0. After several weeks interation, we released 0.2.0, for increasing the stability and adding HttpClient, Database driver supports.\nBefore we used .NET Core agent, we need to deploy SkyWalking collector, UI and ElasticSearch 5.x. You can download the release versions at here: http://skywalking.apache.org/downloads/ and follow the docs (Deploy-backend-in-standalone-mode, Deploy-backend-in-cluster-mode) to setup the backend.\nAt here, I are giving a quick start to represent, how to monitor a demo distributed .NET Core applications. I can say, that is easy.\n git clone https://github.com/OpenSkywalking/skywalking-netcore.git\n  cd skywalking-netcore\n  dotnet restore\n  dotnet run -p sample/SkyWalking.Sample.Backend dotnet run -p sample/SkyWalking.Sample.Frontend\n Now you can open http://localhost:5001/api/values to access the demo application. Then you can open SkyWalking WebUI http://localhost:8080\n  Overview of the whole distributed system   Topology of distributed system   Application view   Trace query   Span’s tags, logs and related traces   GitHub  Website: http://skywalking.apache.org/ SkyWalking Github Repo: https://github.com/apache/incubator-skywalking SkyWalking-NetCore Github Repo: https://github.com/OpenSkywalking/skywalking-netcore  ","title":"Apache SkyWalking provides open source APM and distributed tracing in .NET Core field","url":"/blog/2018-05-24-skywalking-net/"},{"content":"在大型网站系统设计中,随着分布式架构,特别是微服务架构的流行,我们将系统解耦成更小的单元,通过不断的添加新的、小的模块或者重用已经有的模块来构建复杂的系统。随着模块的不断增多,一次请求可能会涉及到十几个甚至几十个服务的协同处理,那么如何准确快速的定位到线上故障和性能瓶颈,便成为我们不得不面对的棘手问题。\n为解决分布式架构中复杂的服务定位和性能问题,Google 在论文《Dapper, a Large-Scale Distributed Systems Tracing Infrastructure》中提出了分布式跟踪系统的设计和构建思路。在这样的背景下,Apache SkyWalking 创建于 2015 年,参考 Dapper 论文实现分布式追踪功能,并逐渐进化为一个完整功能的 Application Performance Management 系统,用于追踪、监控和诊断大型分布式系统,尤其是容器和云原生下的微服务系统。\n今年初我在尝试使用.NET Core 构建分布式追踪系统 Butterfly 时接触到 SkyWalking 团队,开始和 SkyWalking 团队合作探索 SkyWalking 对.NET Core 的支持,并于 4 月发布 SkyWalking .NET Core 探针的 第一个版本,同时我也有幸加入 SkyWalking 团队共同进行 SkyWalking 在多语言生态的推动。在.NET Core 探针 v0.1 版本发布之后,得到了一些同学的尝鲜使用,也得到诸多改进的建议。经过几周的迭代,SkyWalking .NET Core 探针于今天发布 v0.2 release,在 v0.1 的基础上增加了\u0008稳定性和 HttpClient 及数据库驱动的追踪支持。\n在使用 SkyWalking 对.NET Core 应用追踪之前,我们需要先部署 SkyWalking Collector 收集分析 Trace 和 Elasticsearch 作为 Trace 数据存储。SkyWalking 支持 5.x 的 ES,所以我们需要下载安装对应版本的 ES,并配置 ES 的 cluster.name 为 CollectorDBCluster。然后部署 SkyWalking 5.0 beta 或更高版本 (下载地址:http://skywalking.apache.org/downloads/)。更详细的 Collector 部署文档,请参考 Deploy-backend-in-standalone-mode 和 Deploy-backend-in-cluster-mode。\n最后我们使用示例项目来演示在.NET Core 应用中使用 SkyWalking 进行追踪和监控,克隆 SkyWalking-NetCore 项目到本地:\ngit clone https://github.com/OpenSkywalking/skywalking-netcore.git 进入 skywalking-netcore 目录:\ncd skywalking-netcore 还原 nuget package:\ndotnet restore 启动示例项目:\ndotnet run -p sample/SkyWalking.Sample.Backend dotnet run -p sample/SkyWalking.Sample.Frontend 访问示例应用:\n打开 SkyWalking WebUI 即可看到我们的应用监控面板 http://localhost:8080\nDashboard 视图\nTopologyMap 视图\nApplication 视图\nTrace 视图\nTraceDetails 视图\nGitHub  SkyWalking Github Repo:https://github.com/apache/incubator-skywalking SkyWalking-NetCore Github Repo:https://github.com/OpenSkywalking/skywalking-netcore  ","title":"Apache SkyWalking 为.NET Core带来开箱即用的分布式追踪和应用性能监控","url":"/zh/2018-05-24-skywalking-net/"},{"content":"5.0.0-beta release. Go to downloads page to find release tars.\n","title":"Release Apache SkyWalking 5.0.0-beta","url":"/events/release-apache-skywalking-5-0-0-beta/"},{"content":"5.0.0-alpha release. Go to downloads page to find release tars.\n","title":"Release Apache SkyWalking APM 5.0.0-alpha","url":"/events/release-apache-skywalking-apm-5-0-0-alpha/"},{"content":"","title":"","url":"/index.json"},{"content":"10.0.0 Project  Support Java 21 runtime. Support oap-java21 image for Java 21 runtime. Upgrade OTEL collector version to 0.92.0 in all e2e tests. Switch CI macOS runner to m1. Upgrade PostgreSQL driver to 42.4.4 to fix CVE-2024-1597. Remove CLI(swctl) from the image. Remove CLI_VERSION variable from Makefile build. Add BanyanDB to docker-compose quickstart. Bump up Armeria, jackson, netty, jetcd and grpc to fix CVEs.  OAP Server  Add layer parameter to the global topology graphQL query. Add is_present function in MQE for check if the list metrics has a value or not. Remove unreasonable default configurations for gRPC thread executor. Remove gRPCThreadPoolQueueSize (SW_RECEIVER_GRPC_POOL_QUEUE_SIZE) configuration. Allow excluding ServiceEntries in some namespaces when looking up ServiceEntries as a final resolution method of service metadata. Set up the length of source and dest IDs in relation entities of service, instance, endpoint, and process to 250(was 200). Support build Service/Instance Hierarchy and query. Change the string field in Elasticsearch storage from keyword type to text type if it set more than 32766 length. [Break Change] Change the configuration field of ui_template and ui_menu in Elasticsearch storage from keyword type to text. Support Service Hierarchy auto matching, add auto matching layer relationships (upper -\u0026gt; lower) as following:  MESH -\u0026gt; MESH_DP MESH -\u0026gt; K8S_SERVICE MESH_DP -\u0026gt; K8S_SERVICE GENERAL -\u0026gt; K8S_SERVICE   Add namespace suffix for K8S_SERVICE_NAME_RULE/ISTIO_SERVICE_NAME_RULE and metadata-service-mapping.yaml as default. Allow using a dedicated port for ALS receiver. Fix log query by traceId in JDBCLogQueryDAO. Support handler eBPF access log protocol. Fix SumPerMinFunctionTest error function. Remove unnecessary annotations and functions from Meter Functions. Add max and min functions for MAL down sampling. Fix critical bug of uncontrolled memory cost of TopN statistics. Change topN group key from StorageId to entityId + timeBucket. Add Service Hierarchy auto matching layer relationships (upper -\u0026gt; lower) as following:  MYSQL -\u0026gt; K8S_SERVICE POSTGRESQL -\u0026gt; K8S_SERVICE SO11Y_OAP -\u0026gt; K8S_SERVICE VIRTUAL_DATABASE -\u0026gt; MYSQL VIRTUAL_DATABASE -\u0026gt; POSTGRESQL   Add Golang as a supported language for AMQP. Support available layers of service in the topology. Add count aggregation function for MAL Add Service Hierarchy auto matching layer relationships (upper -\u0026gt; lower) as following:  NGINX -\u0026gt; K8S_SERVICE APISIX -\u0026gt; K8S_SERVICE GENERAL -\u0026gt; APISIX   Add Golang as a supported language for RocketMQ. Support Apache RocketMQ server monitoring. Add Service Hierarchy auto matching layer relationships (upper -\u0026gt; lower) as following:  ROCKETMQ -\u0026gt; K8S_SERVICE VIRTUAL_MQ -\u0026gt; ROCKETMQ   Fix ServiceInstance in query. Mock /api/v1/status/buildinfo for PromQL API. Fix table exists check in the JDBC Storage Plugin. Fix day-based table rolling time range strategy in JDBC storage. Add maxInboundMessageSize (SW_DCS_MAX_INBOUND_MESSAGE_SIZE) configuration to change the max inbound message size of DCS. Fix Service Layer when building Events in the EventHookCallback. Add Golang as a supported language for Pulsar. Add Service Hierarchy auto matching layer relationships (upper -\u0026gt; lower) as following:  RABBITMQ -\u0026gt; K8S_SERVICE VIRTUAL_MQ -\u0026gt; RABBITMQ   Remove Column#function mechanism in the kernel. Make query readMetricValue always return the average value of the duration. Add Service Hierarchy auto matching layer relationships (upper -\u0026gt; lower) as following:  KAFKA -\u0026gt; K8S_SERVICE VIRTUAL_MQ -\u0026gt; KAFKA   Support ClickHouse server monitoring. Add Service Hierarchy auto matching layer relationships (upper -\u0026gt; lower) as following:  CLICKHOUSE -\u0026gt; K8S_SERVICE VIRTUAL_DATABASE -\u0026gt; CLICKHOUSE   Add Service Hierarchy auto matching layer relationships (upper -\u0026gt; lower) as following:  PULSAR -\u0026gt; K8S_SERVICE VIRTUAL_MQ -\u0026gt; PULSAR   Add Golang as a supported language for Kafka. Support displaying the port services listen to from OAP and UI during server start. Refactor data-generator to support generating metrics. Fix AvgHistogramPercentileFunction legacy name. [Break Change] Labeled Metrics support multiple labels.  Storage: store all label names and values instead of only the values. MQE:  Support querying by multiple labels(name and value) instead using _ as the anonymous label name. aggregate_labels function support aggregate by specific labels. relabels function require target label and rename label name and value.   PromQL:  Support querying by multiple labels(name and value) instead using lables as the anonymous label name. Remove general labels labels/relabels/label function. API /api/v1/labels and /api/v1/label/\u0026lt;label_name\u0026gt;/values support return matched metrics labels.   OAL:  Deprecate percentile function and introduce percentile2 function instead.     Bump up Kafka to fix CVE. Fix NullPointerException in Istio ServiceEntry registry. Remove unnecessary componentIds as series ID in the ServiceRelationClientSideMetrics and ServiceRelationServerSideMetrics entities. Fix not throw error when part of expression not matched any expression node in the MQE and `PromQL. Remove kafka-fetcher/default/createTopicIfNotExist as the creation is automatically since #7326 (v8.7.0). Fix inaccuracy nginx service metrics. Fix/Change Windows metrics name(Swap -\u0026gt; Virtual Memory)  memory_swap_free -\u0026gt; memory_virtual_memory_free memory_swap_total -\u0026gt; memory_virtual_memory_total memory_swap_percentage -\u0026gt; memory_virtual_memory_percentage   Fix/Change UI init setting for Windows Swap -\u0026gt; Virtual Memory Fix Memory Swap Usage/Virtual Memory Usage display with UI init.(Linux/Windows) Fix inaccurate APISIX metrics. Fix inaccurate MongoDB Metrics. Support Apache ActiveMQ server monitoring. Add Service Hierarchy auto matching layer relationships (upper -\u0026gt; lower) as following:  ACTIVEMQ -\u0026gt; K8S_SERVICE   Calculate Nginx service HTTP Latency by MQE. MQE query: make metadata not return null. MQE labeled metrics Binary Operation: return empty value if the labels not match rather than report error. Fix inaccurate Hierarchy of RabbitMQ Server monitoring metrics. Fix inaccurate MySQL/MariaDB, Redis, PostgreSQL metrics. Support DoubleValue,IntValue,BoolValue in OTEL metrics attributes. [Break Change] gGRPC metrics exporter unified the metric value type and support labeled metrics. Add component definition(ID=152) for c3p0(JDBC3 Connection and Statement Pooling). Fix MQE top_n global query. Fix inaccurate Pulsar and Bookkeeper metrics.  UI  Fix the mismatch between the unit and calculation of the \u0026ldquo;Network Bandwidth Usage\u0026rdquo; widget in Linux-Service Dashboard. Add theme change animation. Implement the Service and Instance hierarchy topology. Support Tabs in the widget visible when MQE expressions. Support search on Marketplace. Fix default route. Fix layout on the Log widget. Fix Trace associates with Log widget. Add isDefault to the dashboard configuration. Add expressions to dashboard configurations on the dashboard list page. Update Kubernetes related UI templates for adapt data from eBPF access log. Fix dashboard K8S-Service-Root metrics expression. Add dashboards for Service/Instance Hierarchy. Fix MQE in dashboards when using Card widget. Optimize tooltips style. Fix resizing window causes the trace graph to display incorrectly. Add the not found page(404). Enhance VNode logic and support multiple Trace IDs in span\u0026rsquo;s ref. Add the layers filed and associate layers dashboards for the service topology nodes. Fix Nginx-Instance metrics to instance level. Update tabs of the Kubernetes service page. Add Airflow menu i18n. Add Support for dragging in the trace panel. Add workflow icon. Metrics support multiple labels. Support the SINGLE_VALUE for table widgets. Remove the General metric mode and related logical code. Remove metrics for unreal nodes in the topology. Enhance the Trace widget for batch consuming spans. Clean the unused elements in the UI-templates.  Documentation  Update the release doc to remove the announcement as the tests are through e2e rather than manually. Update the release notification mail a little. Polish docs structure. Move customization docs separately from the introduction docs. Add webhook/gRPC hooks settings example for backend-alarm.md. Begin the process of SWIP - SkyWalking Improvement Proposal. Add SWIP-1 Create and detect Service Hierarchy Relationship. Add SWIP-2 Collecting and Gathering Kubernetes Monitoring Data. Update the Overview docs to add the Service Hierarchy Relationship section. Fix incorrect words for backend-bookkeeper-monitoring.md and backend-pulsar-monitoring.md Document a new way to load balance OAP. Add SWIP-3 Support RocketMQ monitoring. Add OpenTelemetry SkyWalking Exporter deprecated warning doc. Update i18n for rocketmq monitoring. Fix: remove click event after unmounted. Fix: end loading without query results. Update nanoid version to 3.3.7. Update postcss version to 8.4.33. Fix kafka topic name in exporter doc. Fix query-protocol.md, make it consistent with the GraphQL query protocol. Add SWIP-5 Support ClickHouse Monitoring. Remove OpenTelemetry Exporter support from meter doc, as this has been flagged as unmaintained on OTEL upstream. Add doc of one-line quick start script for different storage types. Add FAQ for Why is Clickhouse or Loki or xxx not supported as a storage option?. Add SWIP-8 Support ActiveMQ Monitoring.  All issues and pull requests are here\n","title":"10.0.0","url":"/docs/main/next/en/changes/changes/"},{"content":"5.1.0 Agent Changes  Fix spring inherit issue in another way Fix classloader dead lock in jdk7+ - 5.x Support Spring mvc 5.x Support Spring webflux 5.x  Collector Changes  Fix too many open files. Fix the buffer file cannot delete.  5.0.0-GA Agent Changes  Add several package names ignore in agent settings. Classes in these packages would be enhanced, even plugin declared. Support Undertow 2.x plugin. Fix wrong class names of Motan plugin, not a feature related issue, just naming.  Collector Changes  Make buffer file handler close more safety. Fix NPE in AlarmService  Documentation  Fix compiling doc link. Update new live demo address.  5.0.0-RC2 Agent Changes  Support ActiveMQ 5.x Support RuntimeContext used out of TracingContext. Support Oracle ojdbc8 Plugin. Support ElasticSearch client transport 5.2-5.6 Plugin Support using agent.config with given path through system properties. Add a new way to transmit the Request and Response, to avoid bugs in Hytrix scenarios. Fix HTTPComponent client v4 operation name is empty. Fix 2 possible NPEs in Spring plugin. Fix a possible span leak in SpringMVC plugin. Fix NPE in Spring callback plugin.  Collector Changes  Add GZip support for Zipkin receiver. Add new component IDs for nodejs. Fix Zipkin span receiver may miss data in request. Optimize codes in heatmap calculation. Reduce unnecessary divide. Fix NPE in Alarm content generation. Fix the precision lost in ServiceNameService#startTimeMillis. Fix GC count is 0. Fix topology breaks when RPC client uses the async thread call.  UI Changes  Fix UI port can\u0026rsquo;t be set by startup script in Windows. Fix Topology self link error. Fix stack color mismatch label color in gc time chart.  Documentation  Add users list. Fix several document typo. Sync the Chinese documents. Add OpenAPM badge. Add icon/font documents to NOTICE files.  Issues and Pull requests\n5.0.0-beta2 UI -\u0026gt; Collector GraphQL query protocol  Add order and status in trace query.  Agent Changes  Add SOFA plugin. Add witness class for Kafka plugin. Add RuntimeContext in Context. Fix RuntimeContext fail in Tomcat plugin. Fix incompatible for getPropertyDescriptors in Spring core. Fix spymemcached plugin bug. Fix database URL parser bug. Fix StringIndexOutOfBoundsException when mysql jdbc url without databaseName。 Fix duplicate slash in Spring MVC plugin bug. Fix namespace bug. Fix NPE in Okhttp plugin when connect failed. FIx MalformedURLException in httpClientComponent plugin. Remove unused dependencies in Dubbo plugin. Remove gRPC timeout to avoid out of memory leak. Rewrite Async http client plugin. [Incubating] Add trace custom ignore optional plugin.  Collector Changes  Topology query optimization for more than 100 apps. Error rate alarm is not triggered. Tolerate unsupported segments. Support Integer Array, Long Array, String Array, Double Array in streaming data model. Support multiple entry span and multiple service name in one segment durtaion record. Use BulkProcessor to control the linear writing of data by multiple threads. Determine the log is enabled for the DEBUG level before printing message. Add static modifier to Logger. Add AspNet component. Filter inactive service in query. Support to query service based on Application. Fix RemoteDataMappingIdNotFoundException Exclude component-libaries.xml file in collector-*.jar, make sure it is in /conf only. Separate a single TTL in minute to in minute, hour, day, month metric and trace. Add order and status in trace query. Add folder lock to buffer folder. Modify operationName search from match to match_phrase. [Incubating] Add Zipkin span receiver. Support analysis Zipkin v1/v2 formats. [Incubating] Support sharding-sphere as storage implementor.  UI Changes  Support login and access control. Add new webapp.yml configuration file. Modify webapp startup script. Link to trace query from Thermodynamic graph Add application selector in service view. Add order and status in trace query.  Documentation  Add architecture design doc. Reformat deploy document. Adjust Tomcat deploy document. Remove all Apache licenses files in dist release packages. Update user cases. Update UI licenses. Add incubating sections in doc.  Issues and Pull requests\n5.0.0-beta UI -\u0026gt; Collector GraphQL query protocol  Replace all tps to throughput/cpm(calls per min) Add getThermodynamic service Update version to beta  Agent Changes  Support TLS. Support namespace. Support direct link. Support token. Add across thread toolkit. Add new plugin extend machenism to override agent core implementations. Fix an agent start up sequence bug. Fix wrong gc count. Remove system env override. Add Spring AOP aspect patch to avoid aop conflicts.  Collector Changes  Trace query based on timeline. Delete JVM aggregation in second. Support TLS. Support namespace. Support token auth. Group and aggregate requests based on response time and timeline, support Thermodynamic chart query Support component librariy setting through yml file for better extendibility. Optimize performance. Support short column name in ES or other storage implementor. Add a new cache module implementor, based on Caffeine. Support system property override settings. Refactor settings initialization. Provide collector instrumentation agent. Support .NET core component libraries. Fix divide zero in query. Fix Data don't remove as expected in ES implementor. Add some checks in collector modulization core. Add some test cases.  UI Changes  New trace query UI. New Application UI, merge server tab(removed) into application as sub page. New Topology UI. New response time / throughput TopN list. Add Thermodynamic chart in overview page. Change all tps to cpm(calls per minutes). Fix wrong osName in server view. Fix wrong startTime in trace view. Fix some icons internet requirements.  Documentation  Add TLS document. Add namespace document. Add direct link document. Add token document. Add across thread toolkit document. Add a FAQ about, Agent or collector version upgrade. Sync all English document to Chinese.  Issues and Pull requests\n5.0.0-alpha Agent -\u0026gt; Collector protocol  Remove C++ keywords Move Ref into Span from Segment Add span type, when register an operation  UI -\u0026gt; Collector GraphQL query protocol  First version protocol  Agent Changes  Support gRPC 1.x plugin Support kafka 0.11 and 1.x plugin Support ServiceComb 0.x plugin Support optional plugin mechanism. Support Spring 3.x and 4.x bean annotation optional plugin Support Apache httpcomponent AsyncClient 4.x plugin Provide automatic agent daily tests, and release reports here. Refactor Postgresql, Oracle, MySQL plugin for compatible. Fix jetty client 9 plugin error Fix async APIs of okhttp plugin error Fix log config didn\u0026rsquo;t work Fix a class loader error in okhttp plugin  Collector Changes  Support metrics analysis and aggregation for application, application instance and service in minute, hour, day and month. Support new GraphQL query protocol Support alarm Provide a prototype instrument for collector. Support node speculate in cluster and application topology. (Provider Node -\u0026gt; Consumer Node) -\u0026gt; (Provider Node -\u0026gt; MQ Server -\u0026gt; Consumer Node)  UI Changes  New 5.0.0 UI!!!  Issues and Pull requests\n","title":"5.1.0","url":"/docs/main/latest/en/changes/changes-5.x/"},{"content":"5.1.0 Agent Changes  Fix spring inherit issue in another way Fix classloader dead lock in jdk7+ - 5.x Support Spring mvc 5.x Support Spring webflux 5.x  Collector Changes  Fix too many open files. Fix the buffer file cannot delete.  5.0.0-GA Agent Changes  Add several package names ignore in agent settings. Classes in these packages would be enhanced, even plugin declared. Support Undertow 2.x plugin. Fix wrong class names of Motan plugin, not a feature related issue, just naming.  Collector Changes  Make buffer file handler close more safety. Fix NPE in AlarmService  Documentation  Fix compiling doc link. Update new live demo address.  5.0.0-RC2 Agent Changes  Support ActiveMQ 5.x Support RuntimeContext used out of TracingContext. Support Oracle ojdbc8 Plugin. Support ElasticSearch client transport 5.2-5.6 Plugin Support using agent.config with given path through system properties. Add a new way to transmit the Request and Response, to avoid bugs in Hytrix scenarios. Fix HTTPComponent client v4 operation name is empty. Fix 2 possible NPEs in Spring plugin. Fix a possible span leak in SpringMVC plugin. Fix NPE in Spring callback plugin.  Collector Changes  Add GZip support for Zipkin receiver. Add new component IDs for nodejs. Fix Zipkin span receiver may miss data in request. Optimize codes in heatmap calculation. Reduce unnecessary divide. Fix NPE in Alarm content generation. Fix the precision lost in ServiceNameService#startTimeMillis. Fix GC count is 0. Fix topology breaks when RPC client uses the async thread call.  UI Changes  Fix UI port can\u0026rsquo;t be set by startup script in Windows. Fix Topology self link error. Fix stack color mismatch label color in gc time chart.  Documentation  Add users list. Fix several document typo. Sync the Chinese documents. Add OpenAPM badge. Add icon/font documents to NOTICE files.  Issues and Pull requests\n5.0.0-beta2 UI -\u0026gt; Collector GraphQL query protocol  Add order and status in trace query.  Agent Changes  Add SOFA plugin. Add witness class for Kafka plugin. Add RuntimeContext in Context. Fix RuntimeContext fail in Tomcat plugin. Fix incompatible for getPropertyDescriptors in Spring core. Fix spymemcached plugin bug. Fix database URL parser bug. Fix StringIndexOutOfBoundsException when mysql jdbc url without databaseName。 Fix duplicate slash in Spring MVC plugin bug. Fix namespace bug. Fix NPE in Okhttp plugin when connect failed. FIx MalformedURLException in httpClientComponent plugin. Remove unused dependencies in Dubbo plugin. Remove gRPC timeout to avoid out of memory leak. Rewrite Async http client plugin. [Incubating] Add trace custom ignore optional plugin.  Collector Changes  Topology query optimization for more than 100 apps. Error rate alarm is not triggered. Tolerate unsupported segments. Support Integer Array, Long Array, String Array, Double Array in streaming data model. Support multiple entry span and multiple service name in one segment durtaion record. Use BulkProcessor to control the linear writing of data by multiple threads. Determine the log is enabled for the DEBUG level before printing message. Add static modifier to Logger. Add AspNet component. Filter inactive service in query. Support to query service based on Application. Fix RemoteDataMappingIdNotFoundException Exclude component-libaries.xml file in collector-*.jar, make sure it is in /conf only. Separate a single TTL in minute to in minute, hour, day, month metric and trace. Add order and status in trace query. Add folder lock to buffer folder. Modify operationName search from match to match_phrase. [Incubating] Add Zipkin span receiver. Support analysis Zipkin v1/v2 formats. [Incubating] Support sharding-sphere as storage implementor.  UI Changes  Support login and access control. Add new webapp.yml configuration file. Modify webapp startup script. Link to trace query from Thermodynamic graph Add application selector in service view. Add order and status in trace query.  Documentation  Add architecture design doc. Reformat deploy document. Adjust Tomcat deploy document. Remove all Apache licenses files in dist release packages. Update user cases. Update UI licenses. Add incubating sections in doc.  Issues and Pull requests\n5.0.0-beta UI -\u0026gt; Collector GraphQL query protocol  Replace all tps to throughput/cpm(calls per min) Add getThermodynamic service Update version to beta  Agent Changes  Support TLS. Support namespace. Support direct link. Support token. Add across thread toolkit. Add new plugin extend machenism to override agent core implementations. Fix an agent start up sequence bug. Fix wrong gc count. Remove system env override. Add Spring AOP aspect patch to avoid aop conflicts.  Collector Changes  Trace query based on timeline. Delete JVM aggregation in second. Support TLS. Support namespace. Support token auth. Group and aggregate requests based on response time and timeline, support Thermodynamic chart query Support component librariy setting through yml file for better extendibility. Optimize performance. Support short column name in ES or other storage implementor. Add a new cache module implementor, based on Caffeine. Support system property override settings. Refactor settings initialization. Provide collector instrumentation agent. Support .NET core component libraries. Fix divide zero in query. Fix Data don't remove as expected in ES implementor. Add some checks in collector modulization core. Add some test cases.  UI Changes  New trace query UI. New Application UI, merge server tab(removed) into application as sub page. New Topology UI. New response time / throughput TopN list. Add Thermodynamic chart in overview page. Change all tps to cpm(calls per minutes). Fix wrong osName in server view. Fix wrong startTime in trace view. Fix some icons internet requirements.  Documentation  Add TLS document. Add namespace document. Add direct link document. Add token document. Add across thread toolkit document. Add a FAQ about, Agent or collector version upgrade. Sync all English document to Chinese.  Issues and Pull requests\n5.0.0-alpha Agent -\u0026gt; Collector protocol  Remove C++ keywords Move Ref into Span from Segment Add span type, when register an operation  UI -\u0026gt; Collector GraphQL query protocol  First version protocol  Agent Changes  Support gRPC 1.x plugin Support kafka 0.11 and 1.x plugin Support ServiceComb 0.x plugin Support optional plugin mechanism. Support Spring 3.x and 4.x bean annotation optional plugin Support Apache httpcomponent AsyncClient 4.x plugin Provide automatic agent daily tests, and release reports here. Refactor Postgresql, Oracle, MySQL plugin for compatible. Fix jetty client 9 plugin error Fix async APIs of okhttp plugin error Fix log config didn\u0026rsquo;t work Fix a class loader error in okhttp plugin  Collector Changes  Support metrics analysis and aggregation for application, application instance and service in minute, hour, day and month. Support new GraphQL query protocol Support alarm Provide a prototype instrument for collector. Support node speculate in cluster and application topology. (Provider Node -\u0026gt; Consumer Node) -\u0026gt; (Provider Node -\u0026gt; MQ Server -\u0026gt; Consumer Node)  UI Changes  New 5.0.0 UI!!!  Issues and Pull requests\n","title":"5.1.0","url":"/docs/main/next/en/changes/changes-5.x/"},{"content":"5.1.0 Agent Changes  Fix spring inherit issue in another way Fix classloader dead lock in jdk7+ - 5.x Support Spring mvc 5.x Support Spring webflux 5.x  Collector Changes  Fix too many open files. Fix the buffer file cannot delete.  5.0.0-GA Agent Changes  Add several package names ignore in agent settings. Classes in these packages would be enhanced, even plugin declared. Support Undertow 2.x plugin. Fix wrong class names of Motan plugin, not a feature related issue, just naming.  Collector Changes  Make buffer file handler close more safety. Fix NPE in AlarmService  Documentation  Fix compiling doc link. Update new live demo address.  5.0.0-RC2 Agent Changes  Support ActiveMQ 5.x Support RuntimeContext used out of TracingContext. Support Oracle ojdbc8 Plugin. Support ElasticSearch client transport 5.2-5.6 Plugin Support using agent.config with given path through system properties. Add a new way to transmit the Request and Response, to avoid bugs in Hytrix scenarios. Fix HTTPComponent client v4 operation name is empty. Fix 2 possible NPEs in Spring plugin. Fix a possible span leak in SpringMVC plugin. Fix NPE in Spring callback plugin.  Collector Changes  Add GZip support for Zipkin receiver. Add new component IDs for nodejs. Fix Zipkin span receiver may miss data in request. Optimize codes in heatmap calculation. Reduce unnecessary divide. Fix NPE in Alarm content generation. Fix the precision lost in ServiceNameService#startTimeMillis. Fix GC count is 0. Fix topology breaks when RPC client uses the async thread call.  UI Changes  Fix UI port can\u0026rsquo;t be set by startup script in Windows. Fix Topology self link error. Fix stack color mismatch label color in gc time chart.  Documentation  Add users list. Fix several document typo. Sync the Chinese documents. Add OpenAPM badge. Add icon/font documents to NOTICE files.  Issues and Pull requests\n5.0.0-beta2 UI -\u0026gt; Collector GraphQL query protocol  Add order and status in trace query.  Agent Changes  Add SOFA plugin. Add witness class for Kafka plugin. Add RuntimeContext in Context. Fix RuntimeContext fail in Tomcat plugin. Fix incompatible for getPropertyDescriptors in Spring core. Fix spymemcached plugin bug. Fix database URL parser bug. Fix StringIndexOutOfBoundsException when mysql jdbc url without databaseName。 Fix duplicate slash in Spring MVC plugin bug. Fix namespace bug. Fix NPE in Okhttp plugin when connect failed. FIx MalformedURLException in httpClientComponent plugin. Remove unused dependencies in Dubbo plugin. Remove gRPC timeout to avoid out of memory leak. Rewrite Async http client plugin. [Incubating] Add trace custom ignore optional plugin.  Collector Changes  Topology query optimization for more than 100 apps. Error rate alarm is not triggered. Tolerate unsupported segments. Support Integer Array, Long Array, String Array, Double Array in streaming data model. Support multiple entry span and multiple service name in one segment durtaion record. Use BulkProcessor to control the linear writing of data by multiple threads. Determine the log is enabled for the DEBUG level before printing message. Add static modifier to Logger. Add AspNet component. Filter inactive service in query. Support to query service based on Application. Fix RemoteDataMappingIdNotFoundException Exclude component-libaries.xml file in collector-*.jar, make sure it is in /conf only. Separate a single TTL in minute to in minute, hour, day, month metric and trace. Add order and status in trace query. Add folder lock to buffer folder. Modify operationName search from match to match_phrase. [Incubating] Add Zipkin span receiver. Support analysis Zipkin v1/v2 formats. [Incubating] Support sharding-sphere as storage implementor.  UI Changes  Support login and access control. Add new webapp.yml configuration file. Modify webapp startup script. Link to trace query from Thermodynamic graph Add application selector in service view. Add order and status in trace query.  Documentation  Add architecture design doc. Reformat deploy document. Adjust Tomcat deploy document. Remove all Apache licenses files in dist release packages. Update user cases. Update UI licenses. Add incubating sections in doc.  Issues and Pull requests\n5.0.0-beta UI -\u0026gt; Collector GraphQL query protocol  Replace all tps to throughput/cpm(calls per min) Add getThermodynamic service Update version to beta  Agent Changes  Support TLS. Support namespace. Support direct link. Support token. Add across thread toolkit. Add new plugin extend machenism to override agent core implementations. Fix an agent start up sequence bug. Fix wrong gc count. Remove system env override. Add Spring AOP aspect patch to avoid aop conflicts.  Collector Changes  Trace query based on timeline. Delete JVM aggregation in second. Support TLS. Support namespace. Support token auth. Group and aggregate requests based on response time and timeline, support Thermodynamic chart query Support component librariy setting through yml file for better extendibility. Optimize performance. Support short column name in ES or other storage implementor. Add a new cache module implementor, based on Caffeine. Support system property override settings. Refactor settings initialization. Provide collector instrumentation agent. Support .NET core component libraries. Fix divide zero in query. Fix Data don't remove as expected in ES implementor. Add some checks in collector modulization core. Add some test cases.  UI Changes  New trace query UI. New Application UI, merge server tab(removed) into application as sub page. New Topology UI. New response time / throughput TopN list. Add Thermodynamic chart in overview page. Change all tps to cpm(calls per minutes). Fix wrong osName in server view. Fix wrong startTime in trace view. Fix some icons internet requirements.  Documentation  Add TLS document. Add namespace document. Add direct link document. Add token document. Add across thread toolkit document. Add a FAQ about, Agent or collector version upgrade. Sync all English document to Chinese.  Issues and Pull requests\n5.0.0-alpha Agent -\u0026gt; Collector protocol  Remove C++ keywords Move Ref into Span from Segment Add span type, when register an operation  UI -\u0026gt; Collector GraphQL query protocol  First version protocol  Agent Changes  Support gRPC 1.x plugin Support kafka 0.11 and 1.x plugin Support ServiceComb 0.x plugin Support optional plugin mechanism. Support Spring 3.x and 4.x bean annotation optional plugin Support Apache httpcomponent AsyncClient 4.x plugin Provide automatic agent daily tests, and release reports here. Refactor Postgresql, Oracle, MySQL plugin for compatible. Fix jetty client 9 plugin error Fix async APIs of okhttp plugin error Fix log config didn\u0026rsquo;t work Fix a class loader error in okhttp plugin  Collector Changes  Support metrics analysis and aggregation for application, application instance and service in minute, hour, day and month. Support new GraphQL query protocol Support alarm Provide a prototype instrument for collector. Support node speculate in cluster and application topology. (Provider Node -\u0026gt; Consumer Node) -\u0026gt; (Provider Node -\u0026gt; MQ Server -\u0026gt; Consumer Node)  UI Changes  New 5.0.0 UI!!!  Issues and Pull requests\n","title":"5.1.0","url":"/docs/main/v9.1.0/en/changes/changes-5.x/"},{"content":"5.1.0 Agent Changes  Fix spring inherit issue in another way Fix classloader dead lock in jdk7+ - 5.x Support Spring mvc 5.x Support Spring webflux 5.x  Collector Changes  Fix too many open files. Fix the buffer file cannot delete.  5.0.0-GA Agent Changes  Add several package names ignore in agent settings. Classes in these packages would be enhanced, even plugin declared. Support Undertow 2.x plugin. Fix wrong class names of Motan plugin, not a feature related issue, just naming.  Collector Changes  Make buffer file handler close more safety. Fix NPE in AlarmService  Documentation  Fix compiling doc link. Update new live demo address.  5.0.0-RC2 Agent Changes  Support ActiveMQ 5.x Support RuntimeContext used out of TracingContext. Support Oracle ojdbc8 Plugin. Support ElasticSearch client transport 5.2-5.6 Plugin Support using agent.config with given path through system properties. Add a new way to transmit the Request and Response, to avoid bugs in Hytrix scenarios. Fix HTTPComponent client v4 operation name is empty. Fix 2 possible NPEs in Spring plugin. Fix a possible span leak in SpringMVC plugin. Fix NPE in Spring callback plugin.  Collector Changes  Add GZip support for Zipkin receiver. Add new component IDs for nodejs. Fix Zipkin span receiver may miss data in request. Optimize codes in heatmap calculation. Reduce unnecessary divide. Fix NPE in Alarm content generation. Fix the precision lost in ServiceNameService#startTimeMillis. Fix GC count is 0. Fix topology breaks when RPC client uses the async thread call.  UI Changes  Fix UI port can\u0026rsquo;t be set by startup script in Windows. Fix Topology self link error. Fix stack color mismatch label color in gc time chart.  Documentation  Add users list. Fix several document typo. Sync the Chinese documents. Add OpenAPM badge. Add icon/font documents to NOTICE files.  Issues and Pull requests\n5.0.0-beta2 UI -\u0026gt; Collector GraphQL query protocol  Add order and status in trace query.  Agent Changes  Add SOFA plugin. Add witness class for Kafka plugin. Add RuntimeContext in Context. Fix RuntimeContext fail in Tomcat plugin. Fix incompatible for getPropertyDescriptors in Spring core. Fix spymemcached plugin bug. Fix database URL parser bug. Fix StringIndexOutOfBoundsException when mysql jdbc url without databaseName。 Fix duplicate slash in Spring MVC plugin bug. Fix namespace bug. Fix NPE in Okhttp plugin when connect failed. FIx MalformedURLException in httpClientComponent plugin. Remove unused dependencies in Dubbo plugin. Remove gRPC timeout to avoid out of memory leak. Rewrite Async http client plugin. [Incubating] Add trace custom ignore optional plugin.  Collector Changes  Topology query optimization for more than 100 apps. Error rate alarm is not triggered. Tolerate unsupported segments. Support Integer Array, Long Array, String Array, Double Array in streaming data model. Support multiple entry span and multiple service name in one segment durtaion record. Use BulkProcessor to control the linear writing of data by multiple threads. Determine the log is enabled for the DEBUG level before printing message. Add static modifier to Logger. Add AspNet component. Filter inactive service in query. Support to query service based on Application. Fix RemoteDataMappingIdNotFoundException Exclude component-libaries.xml file in collector-*.jar, make sure it is in /conf only. Separate a single TTL in minute to in minute, hour, day, month metric and trace. Add order and status in trace query. Add folder lock to buffer folder. Modify operationName search from match to match_phrase. [Incubating] Add Zipkin span receiver. Support analysis Zipkin v1/v2 formats. [Incubating] Support sharding-sphere as storage implementor.  UI Changes  Support login and access control. Add new webapp.yml configuration file. Modify webapp startup script. Link to trace query from Thermodynamic graph Add application selector in service view. Add order and status in trace query.  Documentation  Add architecture design doc. Reformat deploy document. Adjust Tomcat deploy document. Remove all Apache licenses files in dist release packages. Update user cases. Update UI licenses. Add incubating sections in doc.  Issues and Pull requests\n5.0.0-beta UI -\u0026gt; Collector GraphQL query protocol  Replace all tps to throughput/cpm(calls per min) Add getThermodynamic service Update version to beta  Agent Changes  Support TLS. Support namespace. Support direct link. Support token. Add across thread toolkit. Add new plugin extend machenism to override agent core implementations. Fix an agent start up sequence bug. Fix wrong gc count. Remove system env override. Add Spring AOP aspect patch to avoid aop conflicts.  Collector Changes  Trace query based on timeline. Delete JVM aggregation in second. Support TLS. Support namespace. Support token auth. Group and aggregate requests based on response time and timeline, support Thermodynamic chart query Support component librariy setting through yml file for better extendibility. Optimize performance. Support short column name in ES or other storage implementor. Add a new cache module implementor, based on Caffeine. Support system property override settings. Refactor settings initialization. Provide collector instrumentation agent. Support .NET core component libraries. Fix divide zero in query. Fix Data don't remove as expected in ES implementor. Add some checks in collector modulization core. Add some test cases.  UI Changes  New trace query UI. New Application UI, merge server tab(removed) into application as sub page. New Topology UI. New response time / throughput TopN list. Add Thermodynamic chart in overview page. Change all tps to cpm(calls per minutes). Fix wrong osName in server view. Fix wrong startTime in trace view. Fix some icons internet requirements.  Documentation  Add TLS document. Add namespace document. Add direct link document. Add token document. Add across thread toolkit document. Add a FAQ about, Agent or collector version upgrade. Sync all English document to Chinese.  Issues and Pull requests\n5.0.0-alpha Agent -\u0026gt; Collector protocol  Remove C++ keywords Move Ref into Span from Segment Add span type, when register an operation  UI -\u0026gt; Collector GraphQL query protocol  First version protocol  Agent Changes  Support gRPC 1.x plugin Support kafka 0.11 and 1.x plugin Support ServiceComb 0.x plugin Support optional plugin mechanism. Support Spring 3.x and 4.x bean annotation optional plugin Support Apache httpcomponent AsyncClient 4.x plugin Provide automatic agent daily tests, and release reports here. Refactor Postgresql, Oracle, MySQL plugin for compatible. Fix jetty client 9 plugin error Fix async APIs of okhttp plugin error Fix log config didn\u0026rsquo;t work Fix a class loader error in okhttp plugin  Collector Changes  Support metrics analysis and aggregation for application, application instance and service in minute, hour, day and month. Support new GraphQL query protocol Support alarm Provide a prototype instrument for collector. Support node speculate in cluster and application topology. (Provider Node -\u0026gt; Consumer Node) -\u0026gt; (Provider Node -\u0026gt; MQ Server -\u0026gt; Consumer Node)  UI Changes  New 5.0.0 UI!!!  Issues and Pull requests\n","title":"5.1.0","url":"/docs/main/v9.2.0/en/changes/changes-5.x/"},{"content":"5.1.0 Agent Changes  Fix spring inherit issue in another way Fix classloader dead lock in jdk7+ - 5.x Support Spring mvc 5.x Support Spring webflux 5.x  Collector Changes  Fix too many open files. Fix the buffer file cannot delete.  5.0.0-GA Agent Changes  Add several package names ignore in agent settings. Classes in these packages would be enhanced, even plugin declared. Support Undertow 2.x plugin. Fix wrong class names of Motan plugin, not a feature related issue, just naming.  Collector Changes  Make buffer file handler close more safety. Fix NPE in AlarmService  Documentation  Fix compiling doc link. Update new live demo address.  5.0.0-RC2 Agent Changes  Support ActiveMQ 5.x Support RuntimeContext used out of TracingContext. Support Oracle ojdbc8 Plugin. Support ElasticSearch client transport 5.2-5.6 Plugin Support using agent.config with given path through system properties. Add a new way to transmit the Request and Response, to avoid bugs in Hytrix scenarios. Fix HTTPComponent client v4 operation name is empty. Fix 2 possible NPEs in Spring plugin. Fix a possible span leak in SpringMVC plugin. Fix NPE in Spring callback plugin.  Collector Changes  Add GZip support for Zipkin receiver. Add new component IDs for nodejs. Fix Zipkin span receiver may miss data in request. Optimize codes in heatmap calculation. Reduce unnecessary divide. Fix NPE in Alarm content generation. Fix the precision lost in ServiceNameService#startTimeMillis. Fix GC count is 0. Fix topology breaks when RPC client uses the async thread call.  UI Changes  Fix UI port can\u0026rsquo;t be set by startup script in Windows. Fix Topology self link error. Fix stack color mismatch label color in gc time chart.  Documentation  Add users list. Fix several document typo. Sync the Chinese documents. Add OpenAPM badge. Add icon/font documents to NOTICE files.  Issues and Pull requests\n5.0.0-beta2 UI -\u0026gt; Collector GraphQL query protocol  Add order and status in trace query.  Agent Changes  Add SOFA plugin. Add witness class for Kafka plugin. Add RuntimeContext in Context. Fix RuntimeContext fail in Tomcat plugin. Fix incompatible for getPropertyDescriptors in Spring core. Fix spymemcached plugin bug. Fix database URL parser bug. Fix StringIndexOutOfBoundsException when mysql jdbc url without databaseName。 Fix duplicate slash in Spring MVC plugin bug. Fix namespace bug. Fix NPE in Okhttp plugin when connect failed. FIx MalformedURLException in httpClientComponent plugin. Remove unused dependencies in Dubbo plugin. Remove gRPC timeout to avoid out of memory leak. Rewrite Async http client plugin. [Incubating] Add trace custom ignore optional plugin.  Collector Changes  Topology query optimization for more than 100 apps. Error rate alarm is not triggered. Tolerate unsupported segments. Support Integer Array, Long Array, String Array, Double Array in streaming data model. Support multiple entry span and multiple service name in one segment durtaion record. Use BulkProcessor to control the linear writing of data by multiple threads. Determine the log is enabled for the DEBUG level before printing message. Add static modifier to Logger. Add AspNet component. Filter inactive service in query. Support to query service based on Application. Fix RemoteDataMappingIdNotFoundException Exclude component-libaries.xml file in collector-*.jar, make sure it is in /conf only. Separate a single TTL in minute to in minute, hour, day, month metric and trace. Add order and status in trace query. Add folder lock to buffer folder. Modify operationName search from match to match_phrase. [Incubating] Add Zipkin span receiver. Support analysis Zipkin v1/v2 formats. [Incubating] Support sharding-sphere as storage implementor.  UI Changes  Support login and access control. Add new webapp.yml configuration file. Modify webapp startup script. Link to trace query from Thermodynamic graph Add application selector in service view. Add order and status in trace query.  Documentation  Add architecture design doc. Reformat deploy document. Adjust Tomcat deploy document. Remove all Apache licenses files in dist release packages. Update user cases. Update UI licenses. Add incubating sections in doc.  Issues and Pull requests\n5.0.0-beta UI -\u0026gt; Collector GraphQL query protocol  Replace all tps to throughput/cpm(calls per min) Add getThermodynamic service Update version to beta  Agent Changes  Support TLS. Support namespace. Support direct link. Support token. Add across thread toolkit. Add new plugin extend machenism to override agent core implementations. Fix an agent start up sequence bug. Fix wrong gc count. Remove system env override. Add Spring AOP aspect patch to avoid aop conflicts.  Collector Changes  Trace query based on timeline. Delete JVM aggregation in second. Support TLS. Support namespace. Support token auth. Group and aggregate requests based on response time and timeline, support Thermodynamic chart query Support component librariy setting through yml file for better extendibility. Optimize performance. Support short column name in ES or other storage implementor. Add a new cache module implementor, based on Caffeine. Support system property override settings. Refactor settings initialization. Provide collector instrumentation agent. Support .NET core component libraries. Fix divide zero in query. Fix Data don't remove as expected in ES implementor. Add some checks in collector modulization core. Add some test cases.  UI Changes  New trace query UI. New Application UI, merge server tab(removed) into application as sub page. New Topology UI. New response time / throughput TopN list. Add Thermodynamic chart in overview page. Change all tps to cpm(calls per minutes). Fix wrong osName in server view. Fix wrong startTime in trace view. Fix some icons internet requirements.  Documentation  Add TLS document. Add namespace document. Add direct link document. Add token document. Add across thread toolkit document. Add a FAQ about, Agent or collector version upgrade. Sync all English document to Chinese.  Issues and Pull requests\n5.0.0-alpha Agent -\u0026gt; Collector protocol  Remove C++ keywords Move Ref into Span from Segment Add span type, when register an operation  UI -\u0026gt; Collector GraphQL query protocol  First version protocol  Agent Changes  Support gRPC 1.x plugin Support kafka 0.11 and 1.x plugin Support ServiceComb 0.x plugin Support optional plugin mechanism. Support Spring 3.x and 4.x bean annotation optional plugin Support Apache httpcomponent AsyncClient 4.x plugin Provide automatic agent daily tests, and release reports here. Refactor Postgresql, Oracle, MySQL plugin for compatible. Fix jetty client 9 plugin error Fix async APIs of okhttp plugin error Fix log config didn\u0026rsquo;t work Fix a class loader error in okhttp plugin  Collector Changes  Support metrics analysis and aggregation for application, application instance and service in minute, hour, day and month. Support new GraphQL query protocol Support alarm Provide a prototype instrument for collector. Support node speculate in cluster and application topology. (Provider Node -\u0026gt; Consumer Node) -\u0026gt; (Provider Node -\u0026gt; MQ Server -\u0026gt; Consumer Node)  UI Changes  New 5.0.0 UI!!!  Issues and Pull requests\n","title":"5.1.0","url":"/docs/main/v9.3.0/en/changes/changes-5.x/"},{"content":"5.1.0 Agent Changes  Fix spring inherit issue in another way Fix classloader dead lock in jdk7+ - 5.x Support Spring mvc 5.x Support Spring webflux 5.x  Collector Changes  Fix too many open files. Fix the buffer file cannot delete.  5.0.0-GA Agent Changes  Add several package names ignore in agent settings. Classes in these packages would be enhanced, even plugin declared. Support Undertow 2.x plugin. Fix wrong class names of Motan plugin, not a feature related issue, just naming.  Collector Changes  Make buffer file handler close more safety. Fix NPE in AlarmService  Documentation  Fix compiling doc link. Update new live demo address.  5.0.0-RC2 Agent Changes  Support ActiveMQ 5.x Support RuntimeContext used out of TracingContext. Support Oracle ojdbc8 Plugin. Support ElasticSearch client transport 5.2-5.6 Plugin Support using agent.config with given path through system properties. Add a new way to transmit the Request and Response, to avoid bugs in Hytrix scenarios. Fix HTTPComponent client v4 operation name is empty. Fix 2 possible NPEs in Spring plugin. Fix a possible span leak in SpringMVC plugin. Fix NPE in Spring callback plugin.  Collector Changes  Add GZip support for Zipkin receiver. Add new component IDs for nodejs. Fix Zipkin span receiver may miss data in request. Optimize codes in heatmap calculation. Reduce unnecessary divide. Fix NPE in Alarm content generation. Fix the precision lost in ServiceNameService#startTimeMillis. Fix GC count is 0. Fix topology breaks when RPC client uses the async thread call.  UI Changes  Fix UI port can\u0026rsquo;t be set by startup script in Windows. Fix Topology self link error. Fix stack color mismatch label color in gc time chart.  Documentation  Add users list. Fix several document typo. Sync the Chinese documents. Add OpenAPM badge. Add icon/font documents to NOTICE files.  Issues and Pull requests\n5.0.0-beta2 UI -\u0026gt; Collector GraphQL query protocol  Add order and status in trace query.  Agent Changes  Add SOFA plugin. Add witness class for Kafka plugin. Add RuntimeContext in Context. Fix RuntimeContext fail in Tomcat plugin. Fix incompatible for getPropertyDescriptors in Spring core. Fix spymemcached plugin bug. Fix database URL parser bug. Fix StringIndexOutOfBoundsException when mysql jdbc url without databaseName。 Fix duplicate slash in Spring MVC plugin bug. Fix namespace bug. Fix NPE in Okhttp plugin when connect failed. FIx MalformedURLException in httpClientComponent plugin. Remove unused dependencies in Dubbo plugin. Remove gRPC timeout to avoid out of memory leak. Rewrite Async http client plugin. [Incubating] Add trace custom ignore optional plugin.  Collector Changes  Topology query optimization for more than 100 apps. Error rate alarm is not triggered. Tolerate unsupported segments. Support Integer Array, Long Array, String Array, Double Array in streaming data model. Support multiple entry span and multiple service name in one segment durtaion record. Use BulkProcessor to control the linear writing of data by multiple threads. Determine the log is enabled for the DEBUG level before printing message. Add static modifier to Logger. Add AspNet component. Filter inactive service in query. Support to query service based on Application. Fix RemoteDataMappingIdNotFoundException Exclude component-libaries.xml file in collector-*.jar, make sure it is in /conf only. Separate a single TTL in minute to in minute, hour, day, month metric and trace. Add order and status in trace query. Add folder lock to buffer folder. Modify operationName search from match to match_phrase. [Incubating] Add Zipkin span receiver. Support analysis Zipkin v1/v2 formats. [Incubating] Support sharding-sphere as storage implementor.  UI Changes  Support login and access control. Add new webapp.yml configuration file. Modify webapp startup script. Link to trace query from Thermodynamic graph Add application selector in service view. Add order and status in trace query.  Documentation  Add architecture design doc. Reformat deploy document. Adjust Tomcat deploy document. Remove all Apache licenses files in dist release packages. Update user cases. Update UI licenses. Add incubating sections in doc.  Issues and Pull requests\n5.0.0-beta UI -\u0026gt; Collector GraphQL query protocol  Replace all tps to throughput/cpm(calls per min) Add getThermodynamic service Update version to beta  Agent Changes  Support TLS. Support namespace. Support direct link. Support token. Add across thread toolkit. Add new plugin extend machenism to override agent core implementations. Fix an agent start up sequence bug. Fix wrong gc count. Remove system env override. Add Spring AOP aspect patch to avoid aop conflicts.  Collector Changes  Trace query based on timeline. Delete JVM aggregation in second. Support TLS. Support namespace. Support token auth. Group and aggregate requests based on response time and timeline, support Thermodynamic chart query Support component librariy setting through yml file for better extendibility. Optimize performance. Support short column name in ES or other storage implementor. Add a new cache module implementor, based on Caffeine. Support system property override settings. Refactor settings initialization. Provide collector instrumentation agent. Support .NET core component libraries. Fix divide zero in query. Fix Data don't remove as expected in ES implementor. Add some checks in collector modulization core. Add some test cases.  UI Changes  New trace query UI. New Application UI, merge server tab(removed) into application as sub page. New Topology UI. New response time / throughput TopN list. Add Thermodynamic chart in overview page. Change all tps to cpm(calls per minutes). Fix wrong osName in server view. Fix wrong startTime in trace view. Fix some icons internet requirements.  Documentation  Add TLS document. Add namespace document. Add direct link document. Add token document. Add across thread toolkit document. Add a FAQ about, Agent or collector version upgrade. Sync all English document to Chinese.  Issues and Pull requests\n5.0.0-alpha Agent -\u0026gt; Collector protocol  Remove C++ keywords Move Ref into Span from Segment Add span type, when register an operation  UI -\u0026gt; Collector GraphQL query protocol  First version protocol  Agent Changes  Support gRPC 1.x plugin Support kafka 0.11 and 1.x plugin Support ServiceComb 0.x plugin Support optional plugin mechanism. Support Spring 3.x and 4.x bean annotation optional plugin Support Apache httpcomponent AsyncClient 4.x plugin Provide automatic agent daily tests, and release reports here. Refactor Postgresql, Oracle, MySQL plugin for compatible. Fix jetty client 9 plugin error Fix async APIs of okhttp plugin error Fix log config didn\u0026rsquo;t work Fix a class loader error in okhttp plugin  Collector Changes  Support metrics analysis and aggregation for application, application instance and service in minute, hour, day and month. Support new GraphQL query protocol Support alarm Provide a prototype instrument for collector. Support node speculate in cluster and application topology. (Provider Node -\u0026gt; Consumer Node) -\u0026gt; (Provider Node -\u0026gt; MQ Server -\u0026gt; Consumer Node)  UI Changes  New 5.0.0 UI!!!  Issues and Pull requests\n","title":"5.1.0","url":"/docs/main/v9.4.0/en/changes/changes-5.x/"},{"content":"5.1.0 Agent Changes  Fix spring inherit issue in another way Fix classloader dead lock in jdk7+ - 5.x Support Spring mvc 5.x Support Spring webflux 5.x  Collector Changes  Fix too many open files. Fix the buffer file cannot delete.  5.0.0-GA Agent Changes  Add several package names ignore in agent settings. Classes in these packages would be enhanced, even plugin declared. Support Undertow 2.x plugin. Fix wrong class names of Motan plugin, not a feature related issue, just naming.  Collector Changes  Make buffer file handler close more safety. Fix NPE in AlarmService  Documentation  Fix compiling doc link. Update new live demo address.  5.0.0-RC2 Agent Changes  Support ActiveMQ 5.x Support RuntimeContext used out of TracingContext. Support Oracle ojdbc8 Plugin. Support ElasticSearch client transport 5.2-5.6 Plugin Support using agent.config with given path through system properties. Add a new way to transmit the Request and Response, to avoid bugs in Hytrix scenarios. Fix HTTPComponent client v4 operation name is empty. Fix 2 possible NPEs in Spring plugin. Fix a possible span leak in SpringMVC plugin. Fix NPE in Spring callback plugin.  Collector Changes  Add GZip support for Zipkin receiver. Add new component IDs for nodejs. Fix Zipkin span receiver may miss data in request. Optimize codes in heatmap calculation. Reduce unnecessary divide. Fix NPE in Alarm content generation. Fix the precision lost in ServiceNameService#startTimeMillis. Fix GC count is 0. Fix topology breaks when RPC client uses the async thread call.  UI Changes  Fix UI port can\u0026rsquo;t be set by startup script in Windows. Fix Topology self link error. Fix stack color mismatch label color in gc time chart.  Documentation  Add users list. Fix several document typo. Sync the Chinese documents. Add OpenAPM badge. Add icon/font documents to NOTICE files.  Issues and Pull requests\n5.0.0-beta2 UI -\u0026gt; Collector GraphQL query protocol  Add order and status in trace query.  Agent Changes  Add SOFA plugin. Add witness class for Kafka plugin. Add RuntimeContext in Context. Fix RuntimeContext fail in Tomcat plugin. Fix incompatible for getPropertyDescriptors in Spring core. Fix spymemcached plugin bug. Fix database URL parser bug. Fix StringIndexOutOfBoundsException when mysql jdbc url without databaseName。 Fix duplicate slash in Spring MVC plugin bug. Fix namespace bug. Fix NPE in Okhttp plugin when connect failed. FIx MalformedURLException in httpClientComponent plugin. Remove unused dependencies in Dubbo plugin. Remove gRPC timeout to avoid out of memory leak. Rewrite Async http client plugin. [Incubating] Add trace custom ignore optional plugin.  Collector Changes  Topology query optimization for more than 100 apps. Error rate alarm is not triggered. Tolerate unsupported segments. Support Integer Array, Long Array, String Array, Double Array in streaming data model. Support multiple entry span and multiple service name in one segment durtaion record. Use BulkProcessor to control the linear writing of data by multiple threads. Determine the log is enabled for the DEBUG level before printing message. Add static modifier to Logger. Add AspNet component. Filter inactive service in query. Support to query service based on Application. Fix RemoteDataMappingIdNotFoundException Exclude component-libaries.xml file in collector-*.jar, make sure it is in /conf only. Separate a single TTL in minute to in minute, hour, day, month metric and trace. Add order and status in trace query. Add folder lock to buffer folder. Modify operationName search from match to match_phrase. [Incubating] Add Zipkin span receiver. Support analysis Zipkin v1/v2 formats. [Incubating] Support sharding-sphere as storage implementor.  UI Changes  Support login and access control. Add new webapp.yml configuration file. Modify webapp startup script. Link to trace query from Thermodynamic graph Add application selector in service view. Add order and status in trace query.  Documentation  Add architecture design doc. Reformat deploy document. Adjust Tomcat deploy document. Remove all Apache licenses files in dist release packages. Update user cases. Update UI licenses. Add incubating sections in doc.  Issues and Pull requests\n5.0.0-beta UI -\u0026gt; Collector GraphQL query protocol  Replace all tps to throughput/cpm(calls per min) Add getThermodynamic service Update version to beta  Agent Changes  Support TLS. Support namespace. Support direct link. Support token. Add across thread toolkit. Add new plugin extend machenism to override agent core implementations. Fix an agent start up sequence bug. Fix wrong gc count. Remove system env override. Add Spring AOP aspect patch to avoid aop conflicts.  Collector Changes  Trace query based on timeline. Delete JVM aggregation in second. Support TLS. Support namespace. Support token auth. Group and aggregate requests based on response time and timeline, support Thermodynamic chart query Support component librariy setting through yml file for better extendibility. Optimize performance. Support short column name in ES or other storage implementor. Add a new cache module implementor, based on Caffeine. Support system property override settings. Refactor settings initialization. Provide collector instrumentation agent. Support .NET core component libraries. Fix divide zero in query. Fix Data don't remove as expected in ES implementor. Add some checks in collector modulization core. Add some test cases.  UI Changes  New trace query UI. New Application UI, merge server tab(removed) into application as sub page. New Topology UI. New response time / throughput TopN list. Add Thermodynamic chart in overview page. Change all tps to cpm(calls per minutes). Fix wrong osName in server view. Fix wrong startTime in trace view. Fix some icons internet requirements.  Documentation  Add TLS document. Add namespace document. Add direct link document. Add token document. Add across thread toolkit document. Add a FAQ about, Agent or collector version upgrade. Sync all English document to Chinese.  Issues and Pull requests\n5.0.0-alpha Agent -\u0026gt; Collector protocol  Remove C++ keywords Move Ref into Span from Segment Add span type, when register an operation  UI -\u0026gt; Collector GraphQL query protocol  First version protocol  Agent Changes  Support gRPC 1.x plugin Support kafka 0.11 and 1.x plugin Support ServiceComb 0.x plugin Support optional plugin mechanism. Support Spring 3.x and 4.x bean annotation optional plugin Support Apache httpcomponent AsyncClient 4.x plugin Provide automatic agent daily tests, and release reports here. Refactor Postgresql, Oracle, MySQL plugin for compatible. Fix jetty client 9 plugin error Fix async APIs of okhttp plugin error Fix log config didn\u0026rsquo;t work Fix a class loader error in okhttp plugin  Collector Changes  Support metrics analysis and aggregation for application, application instance and service in minute, hour, day and month. Support new GraphQL query protocol Support alarm Provide a prototype instrument for collector. Support node speculate in cluster and application topology. (Provider Node -\u0026gt; Consumer Node) -\u0026gt; (Provider Node -\u0026gt; MQ Server -\u0026gt; Consumer Node)  UI Changes  New 5.0.0 UI!!!  Issues and Pull requests\n","title":"5.1.0","url":"/docs/main/v9.5.0/en/changes/changes-5.x/"},{"content":"5.1.0 Agent Changes  Fix spring inherit issue in another way Fix classloader dead lock in jdk7+ - 5.x Support Spring mvc 5.x Support Spring webflux 5.x  Collector Changes  Fix too many open files. Fix the buffer file cannot delete.  5.0.0-GA Agent Changes  Add several package names ignore in agent settings. Classes in these packages would be enhanced, even plugin declared. Support Undertow 2.x plugin. Fix wrong class names of Motan plugin, not a feature related issue, just naming.  Collector Changes  Make buffer file handler close more safety. Fix NPE in AlarmService  Documentation  Fix compiling doc link. Update new live demo address.  5.0.0-RC2 Agent Changes  Support ActiveMQ 5.x Support RuntimeContext used out of TracingContext. Support Oracle ojdbc8 Plugin. Support ElasticSearch client transport 5.2-5.6 Plugin Support using agent.config with given path through system properties. Add a new way to transmit the Request and Response, to avoid bugs in Hytrix scenarios. Fix HTTPComponent client v4 operation name is empty. Fix 2 possible NPEs in Spring plugin. Fix a possible span leak in SpringMVC plugin. Fix NPE in Spring callback plugin.  Collector Changes  Add GZip support for Zipkin receiver. Add new component IDs for nodejs. Fix Zipkin span receiver may miss data in request. Optimize codes in heatmap calculation. Reduce unnecessary divide. Fix NPE in Alarm content generation. Fix the precision lost in ServiceNameService#startTimeMillis. Fix GC count is 0. Fix topology breaks when RPC client uses the async thread call.  UI Changes  Fix UI port can\u0026rsquo;t be set by startup script in Windows. Fix Topology self link error. Fix stack color mismatch label color in gc time chart.  Documentation  Add users list. Fix several document typo. Sync the Chinese documents. Add OpenAPM badge. Add icon/font documents to NOTICE files.  Issues and Pull requests\n5.0.0-beta2 UI -\u0026gt; Collector GraphQL query protocol  Add order and status in trace query.  Agent Changes  Add SOFA plugin. Add witness class for Kafka plugin. Add RuntimeContext in Context. Fix RuntimeContext fail in Tomcat plugin. Fix incompatible for getPropertyDescriptors in Spring core. Fix spymemcached plugin bug. Fix database URL parser bug. Fix StringIndexOutOfBoundsException when mysql jdbc url without databaseName。 Fix duplicate slash in Spring MVC plugin bug. Fix namespace bug. Fix NPE in Okhttp plugin when connect failed. FIx MalformedURLException in httpClientComponent plugin. Remove unused dependencies in Dubbo plugin. Remove gRPC timeout to avoid out of memory leak. Rewrite Async http client plugin. [Incubating] Add trace custom ignore optional plugin.  Collector Changes  Topology query optimization for more than 100 apps. Error rate alarm is not triggered. Tolerate unsupported segments. Support Integer Array, Long Array, String Array, Double Array in streaming data model. Support multiple entry span and multiple service name in one segment durtaion record. Use BulkProcessor to control the linear writing of data by multiple threads. Determine the log is enabled for the DEBUG level before printing message. Add static modifier to Logger. Add AspNet component. Filter inactive service in query. Support to query service based on Application. Fix RemoteDataMappingIdNotFoundException Exclude component-libaries.xml file in collector-*.jar, make sure it is in /conf only. Separate a single TTL in minute to in minute, hour, day, month metric and trace. Add order and status in trace query. Add folder lock to buffer folder. Modify operationName search from match to match_phrase. [Incubating] Add Zipkin span receiver. Support analysis Zipkin v1/v2 formats. [Incubating] Support sharding-sphere as storage implementor.  UI Changes  Support login and access control. Add new webapp.yml configuration file. Modify webapp startup script. Link to trace query from Thermodynamic graph Add application selector in service view. Add order and status in trace query.  Documentation  Add architecture design doc. Reformat deploy document. Adjust Tomcat deploy document. Remove all Apache licenses files in dist release packages. Update user cases. Update UI licenses. Add incubating sections in doc.  Issues and Pull requests\n5.0.0-beta UI -\u0026gt; Collector GraphQL query protocol  Replace all tps to throughput/cpm(calls per min) Add getThermodynamic service Update version to beta  Agent Changes  Support TLS. Support namespace. Support direct link. Support token. Add across thread toolkit. Add new plugin extend machenism to override agent core implementations. Fix an agent start up sequence bug. Fix wrong gc count. Remove system env override. Add Spring AOP aspect patch to avoid aop conflicts.  Collector Changes  Trace query based on timeline. Delete JVM aggregation in second. Support TLS. Support namespace. Support token auth. Group and aggregate requests based on response time and timeline, support Thermodynamic chart query Support component librariy setting through yml file for better extendibility. Optimize performance. Support short column name in ES or other storage implementor. Add a new cache module implementor, based on Caffeine. Support system property override settings. Refactor settings initialization. Provide collector instrumentation agent. Support .NET core component libraries. Fix divide zero in query. Fix Data don't remove as expected in ES implementor. Add some checks in collector modulization core. Add some test cases.  UI Changes  New trace query UI. New Application UI, merge server tab(removed) into application as sub page. New Topology UI. New response time / throughput TopN list. Add Thermodynamic chart in overview page. Change all tps to cpm(calls per minutes). Fix wrong osName in server view. Fix wrong startTime in trace view. Fix some icons internet requirements.  Documentation  Add TLS document. Add namespace document. Add direct link document. Add token document. Add across thread toolkit document. Add a FAQ about, Agent or collector version upgrade. Sync all English document to Chinese.  Issues and Pull requests\n5.0.0-alpha Agent -\u0026gt; Collector protocol  Remove C++ keywords Move Ref into Span from Segment Add span type, when register an operation  UI -\u0026gt; Collector GraphQL query protocol  First version protocol  Agent Changes  Support gRPC 1.x plugin Support kafka 0.11 and 1.x plugin Support ServiceComb 0.x plugin Support optional plugin mechanism. Support Spring 3.x and 4.x bean annotation optional plugin Support Apache httpcomponent AsyncClient 4.x plugin Provide automatic agent daily tests, and release reports here. Refactor Postgresql, Oracle, MySQL plugin for compatible. Fix jetty client 9 plugin error Fix async APIs of okhttp plugin error Fix log config didn\u0026rsquo;t work Fix a class loader error in okhttp plugin  Collector Changes  Support metrics analysis and aggregation for application, application instance and service in minute, hour, day and month. Support new GraphQL query protocol Support alarm Provide a prototype instrument for collector. Support node speculate in cluster and application topology. (Provider Node -\u0026gt; Consumer Node) -\u0026gt; (Provider Node -\u0026gt; MQ Server -\u0026gt; Consumer Node)  UI Changes  New 5.0.0 UI!!!  Issues and Pull requests\n","title":"5.1.0","url":"/docs/main/v9.6.0/en/changes/changes-5.x/"},{"content":"5.1.0 Agent Changes  Fix spring inherit issue in another way Fix classloader dead lock in jdk7+ - 5.x Support Spring mvc 5.x Support Spring webflux 5.x  Collector Changes  Fix too many open files. Fix the buffer file cannot delete.  5.0.0-GA Agent Changes  Add several package names ignore in agent settings. Classes in these packages would be enhanced, even plugin declared. Support Undertow 2.x plugin. Fix wrong class names of Motan plugin, not a feature related issue, just naming.  Collector Changes  Make buffer file handler close more safety. Fix NPE in AlarmService  Documentation  Fix compiling doc link. Update new live demo address.  5.0.0-RC2 Agent Changes  Support ActiveMQ 5.x Support RuntimeContext used out of TracingContext. Support Oracle ojdbc8 Plugin. Support ElasticSearch client transport 5.2-5.6 Plugin Support using agent.config with given path through system properties. Add a new way to transmit the Request and Response, to avoid bugs in Hytrix scenarios. Fix HTTPComponent client v4 operation name is empty. Fix 2 possible NPEs in Spring plugin. Fix a possible span leak in SpringMVC plugin. Fix NPE in Spring callback plugin.  Collector Changes  Add GZip support for Zipkin receiver. Add new component IDs for nodejs. Fix Zipkin span receiver may miss data in request. Optimize codes in heatmap calculation. Reduce unnecessary divide. Fix NPE in Alarm content generation. Fix the precision lost in ServiceNameService#startTimeMillis. Fix GC count is 0. Fix topology breaks when RPC client uses the async thread call.  UI Changes  Fix UI port can\u0026rsquo;t be set by startup script in Windows. Fix Topology self link error. Fix stack color mismatch label color in gc time chart.  Documentation  Add users list. Fix several document typo. Sync the Chinese documents. Add OpenAPM badge. Add icon/font documents to NOTICE files.  Issues and Pull requests\n5.0.0-beta2 UI -\u0026gt; Collector GraphQL query protocol  Add order and status in trace query.  Agent Changes  Add SOFA plugin. Add witness class for Kafka plugin. Add RuntimeContext in Context. Fix RuntimeContext fail in Tomcat plugin. Fix incompatible for getPropertyDescriptors in Spring core. Fix spymemcached plugin bug. Fix database URL parser bug. Fix StringIndexOutOfBoundsException when mysql jdbc url without databaseName。 Fix duplicate slash in Spring MVC plugin bug. Fix namespace bug. Fix NPE in Okhttp plugin when connect failed. FIx MalformedURLException in httpClientComponent plugin. Remove unused dependencies in Dubbo plugin. Remove gRPC timeout to avoid out of memory leak. Rewrite Async http client plugin. [Incubating] Add trace custom ignore optional plugin.  Collector Changes  Topology query optimization for more than 100 apps. Error rate alarm is not triggered. Tolerate unsupported segments. Support Integer Array, Long Array, String Array, Double Array in streaming data model. Support multiple entry span and multiple service name in one segment durtaion record. Use BulkProcessor to control the linear writing of data by multiple threads. Determine the log is enabled for the DEBUG level before printing message. Add static modifier to Logger. Add AspNet component. Filter inactive service in query. Support to query service based on Application. Fix RemoteDataMappingIdNotFoundException Exclude component-libaries.xml file in collector-*.jar, make sure it is in /conf only. Separate a single TTL in minute to in minute, hour, day, month metric and trace. Add order and status in trace query. Add folder lock to buffer folder. Modify operationName search from match to match_phrase. [Incubating] Add Zipkin span receiver. Support analysis Zipkin v1/v2 formats. [Incubating] Support sharding-sphere as storage implementor.  UI Changes  Support login and access control. Add new webapp.yml configuration file. Modify webapp startup script. Link to trace query from Thermodynamic graph Add application selector in service view. Add order and status in trace query.  Documentation  Add architecture design doc. Reformat deploy document. Adjust Tomcat deploy document. Remove all Apache licenses files in dist release packages. Update user cases. Update UI licenses. Add incubating sections in doc.  Issues and Pull requests\n5.0.0-beta UI -\u0026gt; Collector GraphQL query protocol  Replace all tps to throughput/cpm(calls per min) Add getThermodynamic service Update version to beta  Agent Changes  Support TLS. Support namespace. Support direct link. Support token. Add across thread toolkit. Add new plugin extend machenism to override agent core implementations. Fix an agent start up sequence bug. Fix wrong gc count. Remove system env override. Add Spring AOP aspect patch to avoid aop conflicts.  Collector Changes  Trace query based on timeline. Delete JVM aggregation in second. Support TLS. Support namespace. Support token auth. Group and aggregate requests based on response time and timeline, support Thermodynamic chart query Support component librariy setting through yml file for better extendibility. Optimize performance. Support short column name in ES or other storage implementor. Add a new cache module implementor, based on Caffeine. Support system property override settings. Refactor settings initialization. Provide collector instrumentation agent. Support .NET core component libraries. Fix divide zero in query. Fix Data don't remove as expected in ES implementor. Add some checks in collector modulization core. Add some test cases.  UI Changes  New trace query UI. New Application UI, merge server tab(removed) into application as sub page. New Topology UI. New response time / throughput TopN list. Add Thermodynamic chart in overview page. Change all tps to cpm(calls per minutes). Fix wrong osName in server view. Fix wrong startTime in trace view. Fix some icons internet requirements.  Documentation  Add TLS document. Add namespace document. Add direct link document. Add token document. Add across thread toolkit document. Add a FAQ about, Agent or collector version upgrade. Sync all English document to Chinese.  Issues and Pull requests\n5.0.0-alpha Agent -\u0026gt; Collector protocol  Remove C++ keywords Move Ref into Span from Segment Add span type, when register an operation  UI -\u0026gt; Collector GraphQL query protocol  First version protocol  Agent Changes  Support gRPC 1.x plugin Support kafka 0.11 and 1.x plugin Support ServiceComb 0.x plugin Support optional plugin mechanism. Support Spring 3.x and 4.x bean annotation optional plugin Support Apache httpcomponent AsyncClient 4.x plugin Provide automatic agent daily tests, and release reports here. Refactor Postgresql, Oracle, MySQL plugin for compatible. Fix jetty client 9 plugin error Fix async APIs of okhttp plugin error Fix log config didn\u0026rsquo;t work Fix a class loader error in okhttp plugin  Collector Changes  Support metrics analysis and aggregation for application, application instance and service in minute, hour, day and month. Support new GraphQL query protocol Support alarm Provide a prototype instrument for collector. Support node speculate in cluster and application topology. (Provider Node -\u0026gt; Consumer Node) -\u0026gt; (Provider Node -\u0026gt; MQ Server -\u0026gt; Consumer Node)  UI Changes  New 5.0.0 UI!!!  Issues and Pull requests\n","title":"5.1.0","url":"/docs/main/v9.7.0/en/changes/changes-5.x/"},{"content":"6.6.0 Project  [IMPORTANT] Local span and exit span are not treated as endpoint detected at client and local. Only entry span is the endpoint. Reduce the load of register and memory cost.   Support MiniKube, Istio and SkyWalking on K8s deployment in CI. Support Windows and MacOS build in GitHub Action CI. Support ElasticSearch 7 in official dist. Hundreds plugin cases have been added in GitHub Action CI process.  Java Agent  Remove the local/exit span operation name register mechanism. Add plugin for JDK Threading classes. Add plugin for Armeria. Support set operation name in async span. Enhance webflux plugin, related to Spring Gateway plugin. Webflux plugin is in optional, due to JDK8 required. Fix a possible deadlock. Fix NPE when OAL scripts are different in different OAP nodes, mostly in upgrading stage. Fix bug about wrong peer in ES plugin. Fix NPE in Spring plugin. Fix wrong class name in Dubbo 2.7 conflict patch. Fix spring annotation inheritance problem.  OAP-Backend  Remove the local/exit span operation name register mechanism. Remove client side endpoint register in service mesh. Service instance dependency and related metrics. Support min func in OAL Support apdex func in OAL Support custom ES config setting at the index level. Envoy ALS proto upgraded. Update JODA lib as bugs in UTC +13/+14. Support topN sample period configurable. Ignore no statement DB operations in slow SQL collection. Fix bug in docker-entrypoint.sh when using MySQL as storage  UI  Service topology enhancement. Dive into service, instance and endpoint metrics on topo map. Service instance dependency view and related metrics. Support using URL parameter in trace query page. Support apdex score in service page. Add service dependency metrics into metrics comparison. Fix alarm search not working.  Document  Update user list and user wall. Add document link for CLI. Add deployment guide of agent in Jetty case. Modify Consul cluster doc. Add document about injecting traceId into the logback with logstack in JSON format. ElementUI license and dependency added.  All issues and pull requests are here\n6.5.0 Project  TTL E2E test (#3437) Test coverage is back in pull request check status (#3503) Plugin tests begin to be migrated into main repo, and is in process. (#3528, #3756, #3751, etc.) Switch to SkyWalking CI (exclusive) nodes (#3546) MySQL storage e2e test. (#3648) E2E tests are verified in multiple jdk versions, jdk 8, 9, 11, 12 (#3657) Jenkins build jobs run only when necessary (#3662)  OAP-Backend  Support dynamically configure alarm settings (#3557) Language of instance could be null (#3485) Make query max window size configurable. (#3765) Remove two max size 500 limit. (#3748) Parameterize the cache size. (#3741) ServiceInstanceRelation set error id (#3683) Makes the scope of alarm message more semantic. (#3680) Add register persistent worker latency metrics (#3677) Fix more reasonable error (#3619) Add GraphQL getServiceInstance instanceUuid field. (#3595) Support namespace in Nacos cluster/configuration (#3578) Instead of datasource-settings.properties, use application.yml for MySQLStorageProvider (#3564) Provide consul dynamic configuration center implementation (#3560) Upgrade guava version to support higher jdk version (#3541) Sync latest als from envoy api (#3507) Set telemetry instanced id for Etcd and Nacos plugin (#3492) Support timeout configuration in agent and backend. (#3491) Make sure the cluster register happens before streaming process. (#3471) Agent supports custom properties. (#3367) Miscellaneous bug fixes (#3567)  UI  Feature: node detail display in topo circle-chart view. BugFix: the jvm-maxheap \u0026amp; jvm-maxnonheap is -1, free is no value Fix bug: time select operation not in effect Fix bug: language initialization failed Fix bug: not show instance language Feature: support the trace list display export png Feature: Metrics comparison view BugFix: Fix dashboard top throughput copy  Java Agent  Spring async scenario optimize (#3723) Support log4j2 AsyncLogger (#3715) Add config to collect PostgreSQL sql query params (#3695) Support namespace in Nacos cluster/configuration (#3578) Provide plugin for ehcache 2.x (#3575) Supporting RequestRateLimiterGatewayFilterFactory (#3538) Kafka-plugin compatible with KafkaTemplate (#3505) Add pulsar apm plugin (#3476) Spring-cloud-gateway traceId does not transmit #3411 (#3446) Gateway compatible with downstream loss (#3445) Provide cassandra java driver 3.x plugin (#3410) Fix SpringMVC4 NoSuchMethodError (#3408) BugFix: endpoint grouping rules may be not unique (#3510) Add feature to control the maximum agent log files (#3475) Agent support custom properties. (#3367) Add Light4j plugin (#3323)  Document  Remove travis badge (#3763) Replace user wall to typical users in readme page (#3719) Update istio docs according latest istio release (#3646) Use chart deploy sw docs (#3573) Reorganize the doc, and provide catalog (#3563) Committer vote and set up document. (#3496) Update als setup doc as istio 1.3 released (#3470) Fill faq reply in official document. (#3450)  All issues and pull requests are here\n6.4.0 Project  Highly recommend to upgrade due to Pxx metrics calculation bug. Make agent working in JDK9+ Module system.  Java Agent  Make agent working in JDK9+ Module system. Support Kafka 2.x client libs. Log error in OKHTTP OnFailure callback. Support injecting traceid into logstack appender in logback. Add OperationName(including endpoint name) length max threshold. Support using Regex to group operation name. Support Undertow routing handler. RestTemplate plugin support operation name grouping. Fix ClassCastException in Webflux plugin. Ordering zookeeper server list, to make it better in topology. Fix a Dubbo plugin incompatible issue. Fix MySQL 5 plugin issue. Make log writer cached. Optimize Spring Cloud Gateway plugin Fix and improve gRPC reconnect mechanism. Remove Disruptor dependency from agent.  Backend  Fix Pxx(p50,p75,p90,p95,p99) metrics func bug.(Critical) Support Gateway in backend analysis, even when it doesn\u0026rsquo;t have suitable language agent. Support using HTTPs SSL accessing ElasticSearch storage. Support Zookeeper ACL. Make alarm records listed in order. Fix Pxx data persistence failure in some cases. Fix some bugs in MySQL storage. Setup slow SQL length threshold. Fix TTL settings is not working as expected. Remove scope-meta file.  UI  Enhance alarm page layout. Support trace tree chart resize. Support trace auto completion when partial traces abandoned somehow. Fix dashboard endpoint slow chart. Add radial chart in topology page. Add trace table mode. Fix topology page bug. Fix calender js bug. Fix \u0026ldquo;The \u0026ldquo;topo-services\u0026rdquo; component did not update the data in time after modifying the time range on the topology page.  Document  Restore the broken Istio setup doc. Add etcd config center document. Correct span_limit_per_segment default value in document. Enhance plugin develop doc. Fix error description in build document.  All issues and pull requests are here\n6.3.0 Project  e2e tests have been added, and verify every pull request. Use ArrayList to replace LinkedList in DataCarrier for much better performance. Add plugin instrumentation definition check in CI. DataCarrier performance improvement by avoiding false-sharing.  Java Agent  Java agent supports JDK 9 - 12, but don\u0026rsquo;t support Java Module yet. Support JVM class auto instrumentation, cataloged as bootstrap plugin. Support JVM HttpClient and HttpsClient plugin.[Optional] Support backend upgrade without rebooting required. Open Redefine and Retransform by other agents. Support Servlet 2.5 in Jetty, Tomcat and SpringMVC plugins. Support Spring @Async plugin. Add new config item to restrict the length of span#peer. Refactor ContextManager#stopSpan. Add gRPC timeout. Support Logback AsyncAppender print tid Fix gRPC reconnect bug. Fix trace segment service doesn\u0026rsquo;t report onComplete. Fix wrong logger class name. Fix gRPC plugin bug. Fix ContextManager.activeSpan() API usage error.  Backend  Support agent reset command downstream when the storage is erased, mostly because of backend upgrade. Backend stream flow refactor. High dimensionality metrics(Hour/Day/Month) are changed to lower priority, to ease the storage payload. Add OAP metrics cache to ease the storage query payload and improve performance. Remove DataCarrier in trace persistent of ElasticSearch storage, by leveraging the elasticsearch bulk queue. OAP internal communication protocol changed. Don\u0026rsquo;t be compatible with old releases. Improve ElasticSearch storage bulk performance. Support etcd as dynamic configuration center. Simplify the PxxMetrics and ThermodynamicMetrics functions for better performance and GC. Support JVM metrics self observability. Add the new OAL runtime engine. Add gRPC timeout. Add Charset in the alarm web hook. Fix buffer lost. Fix dirty read in ElasticSearch storage. Fix bug of cluster management plugins in un-Mixed mode. Fix wrong logger class name. Fix delete bug in ElasticSearch when using namespace. Fix MySQL TTL failure. Totally remove IDs can't be null log, to avoid misleading. Fix provider has been initialized repeatedly. Adjust providers conflict log message. Fix using wrong gc time metrics in OAL.  UI  Fix refresh is not working after endpoint and instance changed. Fix endpoint selector but. Fix wrong copy value in slow traces. Fix can\u0026rsquo;t show trace when it is broken partially(Because of agent sampling or fail safe). Fix database and response time graph bugs.  Document  Add bootstrap plugin development document. Alarm documentation typo fixed. Clarify the Docker file purpose. Fix a license typo.  All issues and pull requests are here\n6.2.0 Project  ElasticSearch implementation performance improved, and CHANGED totally. Must delete all existing indexes to do upgrade. CI and Integration tests provided by ASF INFRA. Plan to enhance tests including e2e, plugin tests in all pull requests, powered by ASF INFRA. DataCarrier queue write index controller performance improvement. 3-5 times quicker than before. Add windows compile support in CI.  Java Agent  Support collect SQL parameter in MySQL plugin.[Optional] Support SolrJ plugin. Support RESTEasy plugin. Support Spring Gateway plugin for 2.1.x[Optional] TracingContext performance improvement. Support Apache ShardingSphere(incubating) plugin. Support span#error in application toolkit. Fix OOM by empty stack of exception. FIx wrong cause exception of stack in span log. Fix unclear the running context in SpringMVC plugin. Fix CPU usage accessor calculation issue. Fix SpringMVC plugin span not stop bug when doing HTTP forward. Fix lettuce plugin async commend bug and NPE. Fix webflux plugin cast exception. [CI]Support import check.  Backend  Support time serious ElasticSearch storage. Provide dynamic configuration module and implementation. Slow SQL threshold supports dynamic config today. Dynamic Configuration module provide multiple implementations, DCS(gRPC based), Zookeeper, Apollo, Nacos. Provide P99/95/90/75/50 charts in topology edge. New topology query protocol and implementation. Support Envoy ALS in Service Mesh scenario. Support Nacos cluster management. Enhance metric exporter. Run in increment and total modes. Fix module provider is loaded repeatedly. Change TOP slow SQL storage in ES to Text from Keyword, as too long text issue. Fix H2TopologyQuery tiny bug. Fix H2 log query bug.(No feature provided yet) Filtering pods not in \u0026lsquo;Running\u0026rsquo; phase in mesh scenario. Fix query alarm bug in MySQL and H2 storage. Codes refactor.  UI  Fix some ID is null query(s). Page refactor, especially time-picker, more friendly. Login removed. Trace timestamp visualization issue fixed. Provide P99/95/90/75/50 charts in topology edge. Change all P99/95/90/75/50 charts style. More readable. Fix 404 in trace page.  Document  Go2Sky project has been donated to SkyAPM, change document link. Add FAQ for ElasticSearch storage, and links from document. Add FAQ fro WebSphere installation. Add several open users. Add alarm webhook document.  All issues and pull requests are here\n6.1.0 Project SkyWalking graduated as Apache Top Level Project.\n Support compiling project agent, backend, UI separately.  Java Agent  Support Vert.x Core 3.x plugin. Support Apache Dubbo plugin. Support use_qualified_name_as_endpoint_name and use_qualified_name_as_operation_name configs in SpringMVC plugin. Support span async close APIs in core. Used in Vert.x plugin. Support MySQL 5,8 plugins. Support set instance id manually(optional). Support customize enhance trace plugin in optional list. Support to set peer in Entry Span. Support Zookeeper plugin. Fix Webflux plugin created unexpected Entry Span. Fix Kafka plugin NPE in Kafka 1.1+ Fix wrong operation name in postgre 8.x plugin. Fix RabbitMQ plugin NPE. Fix agent can\u0026rsquo;t run in JVM 6/7, remove module-info.class. Fix agent can\u0026rsquo;t work well, if there is whitespace in agent path. Fix Spring annotation bug and inheritance enhance issue. Fix CPU accessor bug.  Backend Performance improved, especially in CPU limited environment. 3x improvement in service mesh scenario(no trace) in 8C16G VM. Significantly cost less CPU in low payload.\n Support database metrics and SLOW SQL detection. Support to set max size of metadata query. And change default to 5000 from 100. Support ElasticSearch template for new feature in the future. Support shutdown Zipkin trace analysis, because it doesn\u0026rsquo;t fit production environment. Support log type, scope HTTP_ACCESS_LOG and query. No feature provided, prepare for future versions. Support .NET clr receiver. Support Jaeger trace format, no analysis. Support group endpoint name by regax rules in mesh receiver. Support disable statement in OAL. Support basic auth in ElasticSearch connection. Support metrics exporter module and gRPC implementor. Support \u0026gt;, \u0026lt;, \u0026gt;=, \u0026lt;= in OAL. Support role mode in backend. Support Envoy metrics. Support query segment by service instance. Support to set host/port manually at cluster coordinator, rather than based on core settings. Make sure OAP shutdown when it faces startup error. Support set separated gRPC/Jetty ip:port for receiver, default still use core settings. Fix JVM receiver bug. Fix wrong dest service in mesh analysis. Fix search doesn\u0026rsquo;t work as expected. Refactor ScopeDeclaration annotation. Refactor register lock mechanism. Add SmartSql component for .NET Add integration tests for ElasticSearch client. Add test cases for exporter. Add test cases for queue consume.  UI  RocketBot UI has been accepted and bind in this release. Support CLR metrics.  Document  Documents updated, matching Top Level Project requirement. UI licenses updated, according to RocketBot UI IP clearance. User wall and powered-by list updated. CN documents removed, only consider to provide by volunteer out of Apache.  All issues and pull requests are here\n6.0.0-GA Java Agent  Support gson plugin(optional). Support canal plugin. Fix missing ojdbc component id. Fix dubbo plugin conflict. Fix OpenTracing tag match bug. Fix a missing check in ignore plugin.  Backend  Adjust service inventory entity, to add properties. Adjust service instance inventory entity, to add properties. Add nodeType to service inventory entity. Fix when operation name of local and exit spans in ref, the segment lost. Fix the index names don\u0026rsquo;t show right in logs. Fix wrong alarm text. Add test case for span limit mechanism. Add telemetry module and prometheus implementation, with grafana setting. A refactor for register API in storage module. Fix H2 and MySQL endpoint dependency map miss upstream side. Optimize the inventory register and refactor the implementation. Speed up the trace buffer read. Fix and removed unnecessary inventory register operations.  UI  Add new trace view. Add word-break to tag value.  Document  Add two startup modes document. Add PHP agent links. Add some cn documents. Update year to 2019 User wall updated. Fix a wrong description in how-to-build doc.  All issues and pull requests are here\n6.0.0-beta Protocol  Provide Trace Data Protocol v2 Provide SkyWalking Cross Process Propagation Headers Protocol v2.  Java Agent  Support Trace Data Protocol v2 Support SkyWalking Cross Process Propagation Headers Protocol v2. Support SkyWalking Cross Process Propagation Headers Protocol v1 running in compatible way. Need declare open explicitly. Support SpringMVC 5 Support webflux Support a new way to override agent.config by system env. Span tag can override by explicit way. Fix Spring Controller Inherit issue. Fix ElasticSearch plugin NPE. Fix agent classloader dead lock in certain situation. Fix agent log typo. Fix wrong component id in resettemplete plugin. Fix use transform ignore() in wrong way. Fix H2 query bug.  Backend  Support Trace Data Protocol v2. And Trace Data Protocol v1 is still supported. Support MySQL as storage. Support TiDB as storage. Support a new way to override application.yml by system env. Support service instance and endpoint alarm. Support namespace in istio receiver. Support service throughput(cpm), successful rate(sla), avg response time and p99/p95/p90/p75/p50 response time. Support backend trace sampling. Support Zipkin format again. Support init mode. Support namespace in Zookeeper cluster management. Support consul plugin in cluster module. OAL generate tool has been integrated into main repo, in the maven compile stage. Optimize trace paging query. Fix trace query don\u0026rsquo;t use fuzzy query in ElasticSearch storage. Fix alarm can\u0026rsquo;t be active in right way. Fix unnecessary condition in database and cache number query. Fix wrong namespace bug in ElasticSearch storage. Fix Remote clients selector error: / by zero . Fix segment TTL is not working.  UI  Support service throughput(cpm), successful rate(sla), avg response time and p99/p95/p90/p75/p50 response time. Fix TopN endpoint link doesn\u0026rsquo;t work right. Fix trace stack style. Fix CI.  Document  Add more agent setting documents. Add more contribution documents. Update user wall and powered-by page. Add RocketBot UI project link in document.  All issues and pull requests are here\n6.0.0-alpha SkyWalking 6 is totally new milestone for the project. At this point, we are not just a distributing tracing system with analysis and visualization capabilities. We are an Observability Analysis Platform(OAL).\nThe core and most important features in v6 are\n Support to collect telemetry data from different sources, such as multiple language agents and service mesh. Extensible stream analysis core. Make SQL and cache analysis available in core level, although haven\u0026rsquo;t provided in this release. Provide Observability Analysis Language(OAL) to make analysis metrics customization available. New GraphQL query protocol. Not binding with UI now. UI topology is better now. New alarm core provided. In alpha, only on service related metrics.  All issues and pull requests are here\n","title":"6.6.0","url":"/docs/main/latest/en/changes/changes-6.x/"},{"content":"6.6.0 Project  [IMPORTANT] Local span and exit span are not treated as endpoint detected at client and local. Only entry span is the endpoint. Reduce the load of register and memory cost.   Support MiniKube, Istio and SkyWalking on K8s deployment in CI. Support Windows and MacOS build in GitHub Action CI. Support ElasticSearch 7 in official dist. Hundreds plugin cases have been added in GitHub Action CI process.  Java Agent  Remove the local/exit span operation name register mechanism. Add plugin for JDK Threading classes. Add plugin for Armeria. Support set operation name in async span. Enhance webflux plugin, related to Spring Gateway plugin. Webflux plugin is in optional, due to JDK8 required. Fix a possible deadlock. Fix NPE when OAL scripts are different in different OAP nodes, mostly in upgrading stage. Fix bug about wrong peer in ES plugin. Fix NPE in Spring plugin. Fix wrong class name in Dubbo 2.7 conflict patch. Fix spring annotation inheritance problem.  OAP-Backend  Remove the local/exit span operation name register mechanism. Remove client side endpoint register in service mesh. Service instance dependency and related metrics. Support min func in OAL Support apdex func in OAL Support custom ES config setting at the index level. Envoy ALS proto upgraded. Update JODA lib as bugs in UTC +13/+14. Support topN sample period configurable. Ignore no statement DB operations in slow SQL collection. Fix bug in docker-entrypoint.sh when using MySQL as storage  UI  Service topology enhancement. Dive into service, instance and endpoint metrics on topo map. Service instance dependency view and related metrics. Support using URL parameter in trace query page. Support apdex score in service page. Add service dependency metrics into metrics comparison. Fix alarm search not working.  Document  Update user list and user wall. Add document link for CLI. Add deployment guide of agent in Jetty case. Modify Consul cluster doc. Add document about injecting traceId into the logback with logstack in JSON format. ElementUI license and dependency added.  All issues and pull requests are here\n6.5.0 Project  TTL E2E test (#3437) Test coverage is back in pull request check status (#3503) Plugin tests begin to be migrated into main repo, and is in process. (#3528, #3756, #3751, etc.) Switch to SkyWalking CI (exclusive) nodes (#3546) MySQL storage e2e test. (#3648) E2E tests are verified in multiple jdk versions, jdk 8, 9, 11, 12 (#3657) Jenkins build jobs run only when necessary (#3662)  OAP-Backend  Support dynamically configure alarm settings (#3557) Language of instance could be null (#3485) Make query max window size configurable. (#3765) Remove two max size 500 limit. (#3748) Parameterize the cache size. (#3741) ServiceInstanceRelation set error id (#3683) Makes the scope of alarm message more semantic. (#3680) Add register persistent worker latency metrics (#3677) Fix more reasonable error (#3619) Add GraphQL getServiceInstance instanceUuid field. (#3595) Support namespace in Nacos cluster/configuration (#3578) Instead of datasource-settings.properties, use application.yml for MySQLStorageProvider (#3564) Provide consul dynamic configuration center implementation (#3560) Upgrade guava version to support higher jdk version (#3541) Sync latest als from envoy api (#3507) Set telemetry instanced id for Etcd and Nacos plugin (#3492) Support timeout configuration in agent and backend. (#3491) Make sure the cluster register happens before streaming process. (#3471) Agent supports custom properties. (#3367) Miscellaneous bug fixes (#3567)  UI  Feature: node detail display in topo circle-chart view. BugFix: the jvm-maxheap \u0026amp; jvm-maxnonheap is -1, free is no value Fix bug: time select operation not in effect Fix bug: language initialization failed Fix bug: not show instance language Feature: support the trace list display export png Feature: Metrics comparison view BugFix: Fix dashboard top throughput copy  Java Agent  Spring async scenario optimize (#3723) Support log4j2 AsyncLogger (#3715) Add config to collect PostgreSQL sql query params (#3695) Support namespace in Nacos cluster/configuration (#3578) Provide plugin for ehcache 2.x (#3575) Supporting RequestRateLimiterGatewayFilterFactory (#3538) Kafka-plugin compatible with KafkaTemplate (#3505) Add pulsar apm plugin (#3476) Spring-cloud-gateway traceId does not transmit #3411 (#3446) Gateway compatible with downstream loss (#3445) Provide cassandra java driver 3.x plugin (#3410) Fix SpringMVC4 NoSuchMethodError (#3408) BugFix: endpoint grouping rules may be not unique (#3510) Add feature to control the maximum agent log files (#3475) Agent support custom properties. (#3367) Add Light4j plugin (#3323)  Document  Remove travis badge (#3763) Replace user wall to typical users in readme page (#3719) Update istio docs according latest istio release (#3646) Use chart deploy sw docs (#3573) Reorganize the doc, and provide catalog (#3563) Committer vote and set up document. (#3496) Update als setup doc as istio 1.3 released (#3470) Fill faq reply in official document. (#3450)  All issues and pull requests are here\n6.4.0 Project  Highly recommend to upgrade due to Pxx metrics calculation bug. Make agent working in JDK9+ Module system.  Java Agent  Make agent working in JDK9+ Module system. Support Kafka 2.x client libs. Log error in OKHTTP OnFailure callback. Support injecting traceid into logstack appender in logback. Add OperationName(including endpoint name) length max threshold. Support using Regex to group operation name. Support Undertow routing handler. RestTemplate plugin support operation name grouping. Fix ClassCastException in Webflux plugin. Ordering zookeeper server list, to make it better in topology. Fix a Dubbo plugin incompatible issue. Fix MySQL 5 plugin issue. Make log writer cached. Optimize Spring Cloud Gateway plugin Fix and improve gRPC reconnect mechanism. Remove Disruptor dependency from agent.  Backend  Fix Pxx(p50,p75,p90,p95,p99) metrics func bug.(Critical) Support Gateway in backend analysis, even when it doesn\u0026rsquo;t have suitable language agent. Support using HTTPs SSL accessing ElasticSearch storage. Support Zookeeper ACL. Make alarm records listed in order. Fix Pxx data persistence failure in some cases. Fix some bugs in MySQL storage. Setup slow SQL length threshold. Fix TTL settings is not working as expected. Remove scope-meta file.  UI  Enhance alarm page layout. Support trace tree chart resize. Support trace auto completion when partial traces abandoned somehow. Fix dashboard endpoint slow chart. Add radial chart in topology page. Add trace table mode. Fix topology page bug. Fix calender js bug. Fix \u0026ldquo;The \u0026ldquo;topo-services\u0026rdquo; component did not update the data in time after modifying the time range on the topology page.  Document  Restore the broken Istio setup doc. Add etcd config center document. Correct span_limit_per_segment default value in document. Enhance plugin develop doc. Fix error description in build document.  All issues and pull requests are here\n6.3.0 Project  e2e tests have been added, and verify every pull request. Use ArrayList to replace LinkedList in DataCarrier for much better performance. Add plugin instrumentation definition check in CI. DataCarrier performance improvement by avoiding false-sharing.  Java Agent  Java agent supports JDK 9 - 12, but don\u0026rsquo;t support Java Module yet. Support JVM class auto instrumentation, cataloged as bootstrap plugin. Support JVM HttpClient and HttpsClient plugin.[Optional] Support backend upgrade without rebooting required. Open Redefine and Retransform by other agents. Support Servlet 2.5 in Jetty, Tomcat and SpringMVC plugins. Support Spring @Async plugin. Add new config item to restrict the length of span#peer. Refactor ContextManager#stopSpan. Add gRPC timeout. Support Logback AsyncAppender print tid Fix gRPC reconnect bug. Fix trace segment service doesn\u0026rsquo;t report onComplete. Fix wrong logger class name. Fix gRPC plugin bug. Fix ContextManager.activeSpan() API usage error.  Backend  Support agent reset command downstream when the storage is erased, mostly because of backend upgrade. Backend stream flow refactor. High dimensionality metrics(Hour/Day/Month) are changed to lower priority, to ease the storage payload. Add OAP metrics cache to ease the storage query payload and improve performance. Remove DataCarrier in trace persistent of ElasticSearch storage, by leveraging the elasticsearch bulk queue. OAP internal communication protocol changed. Don\u0026rsquo;t be compatible with old releases. Improve ElasticSearch storage bulk performance. Support etcd as dynamic configuration center. Simplify the PxxMetrics and ThermodynamicMetrics functions for better performance and GC. Support JVM metrics self observability. Add the new OAL runtime engine. Add gRPC timeout. Add Charset in the alarm web hook. Fix buffer lost. Fix dirty read in ElasticSearch storage. Fix bug of cluster management plugins in un-Mixed mode. Fix wrong logger class name. Fix delete bug in ElasticSearch when using namespace. Fix MySQL TTL failure. Totally remove IDs can't be null log, to avoid misleading. Fix provider has been initialized repeatedly. Adjust providers conflict log message. Fix using wrong gc time metrics in OAL.  UI  Fix refresh is not working after endpoint and instance changed. Fix endpoint selector but. Fix wrong copy value in slow traces. Fix can\u0026rsquo;t show trace when it is broken partially(Because of agent sampling or fail safe). Fix database and response time graph bugs.  Document  Add bootstrap plugin development document. Alarm documentation typo fixed. Clarify the Docker file purpose. Fix a license typo.  All issues and pull requests are here\n6.2.0 Project  ElasticSearch implementation performance improved, and CHANGED totally. Must delete all existing indexes to do upgrade. CI and Integration tests provided by ASF INFRA. Plan to enhance tests including e2e, plugin tests in all pull requests, powered by ASF INFRA. DataCarrier queue write index controller performance improvement. 3-5 times quicker than before. Add windows compile support in CI.  Java Agent  Support collect SQL parameter in MySQL plugin.[Optional] Support SolrJ plugin. Support RESTEasy plugin. Support Spring Gateway plugin for 2.1.x[Optional] TracingContext performance improvement. Support Apache ShardingSphere(incubating) plugin. Support span#error in application toolkit. Fix OOM by empty stack of exception. FIx wrong cause exception of stack in span log. Fix unclear the running context in SpringMVC plugin. Fix CPU usage accessor calculation issue. Fix SpringMVC plugin span not stop bug when doing HTTP forward. Fix lettuce plugin async commend bug and NPE. Fix webflux plugin cast exception. [CI]Support import check.  Backend  Support time serious ElasticSearch storage. Provide dynamic configuration module and implementation. Slow SQL threshold supports dynamic config today. Dynamic Configuration module provide multiple implementations, DCS(gRPC based), Zookeeper, Apollo, Nacos. Provide P99/95/90/75/50 charts in topology edge. New topology query protocol and implementation. Support Envoy ALS in Service Mesh scenario. Support Nacos cluster management. Enhance metric exporter. Run in increment and total modes. Fix module provider is loaded repeatedly. Change TOP slow SQL storage in ES to Text from Keyword, as too long text issue. Fix H2TopologyQuery tiny bug. Fix H2 log query bug.(No feature provided yet) Filtering pods not in \u0026lsquo;Running\u0026rsquo; phase in mesh scenario. Fix query alarm bug in MySQL and H2 storage. Codes refactor.  UI  Fix some ID is null query(s). Page refactor, especially time-picker, more friendly. Login removed. Trace timestamp visualization issue fixed. Provide P99/95/90/75/50 charts in topology edge. Change all P99/95/90/75/50 charts style. More readable. Fix 404 in trace page.  Document  Go2Sky project has been donated to SkyAPM, change document link. Add FAQ for ElasticSearch storage, and links from document. Add FAQ fro WebSphere installation. Add several open users. Add alarm webhook document.  All issues and pull requests are here\n6.1.0 Project SkyWalking graduated as Apache Top Level Project.\n Support compiling project agent, backend, UI separately.  Java Agent  Support Vert.x Core 3.x plugin. Support Apache Dubbo plugin. Support use_qualified_name_as_endpoint_name and use_qualified_name_as_operation_name configs in SpringMVC plugin. Support span async close APIs in core. Used in Vert.x plugin. Support MySQL 5,8 plugins. Support set instance id manually(optional). Support customize enhance trace plugin in optional list. Support to set peer in Entry Span. Support Zookeeper plugin. Fix Webflux plugin created unexpected Entry Span. Fix Kafka plugin NPE in Kafka 1.1+ Fix wrong operation name in postgre 8.x plugin. Fix RabbitMQ plugin NPE. Fix agent can\u0026rsquo;t run in JVM 6/7, remove module-info.class. Fix agent can\u0026rsquo;t work well, if there is whitespace in agent path. Fix Spring annotation bug and inheritance enhance issue. Fix CPU accessor bug.  Backend Performance improved, especially in CPU limited environment. 3x improvement in service mesh scenario(no trace) in 8C16G VM. Significantly cost less CPU in low payload.\n Support database metrics and SLOW SQL detection. Support to set max size of metadata query. And change default to 5000 from 100. Support ElasticSearch template for new feature in the future. Support shutdown Zipkin trace analysis, because it doesn\u0026rsquo;t fit production environment. Support log type, scope HTTP_ACCESS_LOG and query. No feature provided, prepare for future versions. Support .NET clr receiver. Support Jaeger trace format, no analysis. Support group endpoint name by regax rules in mesh receiver. Support disable statement in OAL. Support basic auth in ElasticSearch connection. Support metrics exporter module and gRPC implementor. Support \u0026gt;, \u0026lt;, \u0026gt;=, \u0026lt;= in OAL. Support role mode in backend. Support Envoy metrics. Support query segment by service instance. Support to set host/port manually at cluster coordinator, rather than based on core settings. Make sure OAP shutdown when it faces startup error. Support set separated gRPC/Jetty ip:port for receiver, default still use core settings. Fix JVM receiver bug. Fix wrong dest service in mesh analysis. Fix search doesn\u0026rsquo;t work as expected. Refactor ScopeDeclaration annotation. Refactor register lock mechanism. Add SmartSql component for .NET Add integration tests for ElasticSearch client. Add test cases for exporter. Add test cases for queue consume.  UI  RocketBot UI has been accepted and bind in this release. Support CLR metrics.  Document  Documents updated, matching Top Level Project requirement. UI licenses updated, according to RocketBot UI IP clearance. User wall and powered-by list updated. CN documents removed, only consider to provide by volunteer out of Apache.  All issues and pull requests are here\n6.0.0-GA Java Agent  Support gson plugin(optional). Support canal plugin. Fix missing ojdbc component id. Fix dubbo plugin conflict. Fix OpenTracing tag match bug. Fix a missing check in ignore plugin.  Backend  Adjust service inventory entity, to add properties. Adjust service instance inventory entity, to add properties. Add nodeType to service inventory entity. Fix when operation name of local and exit spans in ref, the segment lost. Fix the index names don\u0026rsquo;t show right in logs. Fix wrong alarm text. Add test case for span limit mechanism. Add telemetry module and prometheus implementation, with grafana setting. A refactor for register API in storage module. Fix H2 and MySQL endpoint dependency map miss upstream side. Optimize the inventory register and refactor the implementation. Speed up the trace buffer read. Fix and removed unnecessary inventory register operations.  UI  Add new trace view. Add word-break to tag value.  Document  Add two startup modes document. Add PHP agent links. Add some cn documents. Update year to 2019 User wall updated. Fix a wrong description in how-to-build doc.  All issues and pull requests are here\n6.0.0-beta Protocol  Provide Trace Data Protocol v2 Provide SkyWalking Cross Process Propagation Headers Protocol v2.  Java Agent  Support Trace Data Protocol v2 Support SkyWalking Cross Process Propagation Headers Protocol v2. Support SkyWalking Cross Process Propagation Headers Protocol v1 running in compatible way. Need declare open explicitly. Support SpringMVC 5 Support webflux Support a new way to override agent.config by system env. Span tag can override by explicit way. Fix Spring Controller Inherit issue. Fix ElasticSearch plugin NPE. Fix agent classloader dead lock in certain situation. Fix agent log typo. Fix wrong component id in resettemplete plugin. Fix use transform ignore() in wrong way. Fix H2 query bug.  Backend  Support Trace Data Protocol v2. And Trace Data Protocol v1 is still supported. Support MySQL as storage. Support TiDB as storage. Support a new way to override application.yml by system env. Support service instance and endpoint alarm. Support namespace in istio receiver. Support service throughput(cpm), successful rate(sla), avg response time and p99/p95/p90/p75/p50 response time. Support backend trace sampling. Support Zipkin format again. Support init mode. Support namespace in Zookeeper cluster management. Support consul plugin in cluster module. OAL generate tool has been integrated into main repo, in the maven compile stage. Optimize trace paging query. Fix trace query don\u0026rsquo;t use fuzzy query in ElasticSearch storage. Fix alarm can\u0026rsquo;t be active in right way. Fix unnecessary condition in database and cache number query. Fix wrong namespace bug in ElasticSearch storage. Fix Remote clients selector error: / by zero . Fix segment TTL is not working.  UI  Support service throughput(cpm), successful rate(sla), avg response time and p99/p95/p90/p75/p50 response time. Fix TopN endpoint link doesn\u0026rsquo;t work right. Fix trace stack style. Fix CI.  Document  Add more agent setting documents. Add more contribution documents. Update user wall and powered-by page. Add RocketBot UI project link in document.  All issues and pull requests are here\n6.0.0-alpha SkyWalking 6 is totally new milestone for the project. At this point, we are not just a distributing tracing system with analysis and visualization capabilities. We are an Observability Analysis Platform(OAL).\nThe core and most important features in v6 are\n Support to collect telemetry data from different sources, such as multiple language agents and service mesh. Extensible stream analysis core. Make SQL and cache analysis available in core level, although haven\u0026rsquo;t provided in this release. Provide Observability Analysis Language(OAL) to make analysis metrics customization available. New GraphQL query protocol. Not binding with UI now. UI topology is better now. New alarm core provided. In alpha, only on service related metrics.  All issues and pull requests are here\n","title":"6.6.0","url":"/docs/main/next/en/changes/changes-6.x/"},{"content":"6.6.0 Project  [IMPORTANT] Local span and exit span are not treated as endpoint detected at client and local. Only entry span is the endpoint. Reduce the load of register and memory cost.   Support MiniKube, Istio and SkyWalking on K8s deployment in CI. Support Windows and MacOS build in GitHub Action CI. Support ElasticSearch 7 in official dist. Hundreds plugin cases have been added in GitHub Action CI process.  Java Agent  Remove the local/exit span operation name register mechanism. Add plugin for JDK Threading classes. Add plugin for Armeria. Support set operation name in async span. Enhance webflux plugin, related to Spring Gateway plugin. Webflux plugin is in optional, due to JDK8 required. Fix a possible deadlock. Fix NPE when OAL scripts are different in different OAP nodes, mostly in upgrading stage. Fix bug about wrong peer in ES plugin. Fix NPE in Spring plugin. Fix wrong class name in Dubbo 2.7 conflict patch. Fix spring annotation inheritance problem.  OAP-Backend  Remove the local/exit span operation name register mechanism. Remove client side endpoint register in service mesh. Service instance dependency and related metrics. Support min func in OAL Support apdex func in OAL Support custom ES config setting at the index level. Envoy ALS proto upgraded. Update JODA lib as bugs in UTC +13/+14. Support topN sample period configurable. Ignore no statement DB operations in slow SQL collection. Fix bug in docker-entrypoint.sh when using MySQL as storage  UI  Service topology enhancement. Dive into service, instance and endpoint metrics on topo map. Service instance dependency view and related metrics. Support using URL parameter in trace query page. Support apdex score in service page. Add service dependency metrics into metrics comparison. Fix alarm search not working.  Document  Update user list and user wall. Add document link for CLI. Add deployment guide of agent in Jetty case. Modify Consul cluster doc. Add document about injecting traceId into the logback with logstack in JSON format. ElementUI license and dependency added.  All issues and pull requests are here\n6.5.0 Project  TTL E2E test (#3437) Test coverage is back in pull request check status (#3503) Plugin tests begin to be migrated into main repo, and is in process. (#3528, #3756, #3751, etc.) Switch to SkyWalking CI (exclusive) nodes (#3546) MySQL storage e2e test. (#3648) E2E tests are verified in multiple jdk versions, jdk 8, 9, 11, 12 (#3657) Jenkins build jobs run only when necessary (#3662)  OAP-Backend  Support dynamically configure alarm settings (#3557) Language of instance could be null (#3485) Make query max window size configurable. (#3765) Remove two max size 500 limit. (#3748) Parameterize the cache size. (#3741) ServiceInstanceRelation set error id (#3683) Makes the scope of alarm message more semantic. (#3680) Add register persistent worker latency metrics (#3677) Fix more reasonable error (#3619) Add GraphQL getServiceInstance instanceUuid field. (#3595) Support namespace in Nacos cluster/configuration (#3578) Instead of datasource-settings.properties, use application.yml for MySQLStorageProvider (#3564) Provide consul dynamic configuration center implementation (#3560) Upgrade guava version to support higher jdk version (#3541) Sync latest als from envoy api (#3507) Set telemetry instanced id for Etcd and Nacos plugin (#3492) Support timeout configuration in agent and backend. (#3491) Make sure the cluster register happens before streaming process. (#3471) Agent supports custom properties. (#3367) Miscellaneous bug fixes (#3567)  UI  Feature: node detail display in topo circle-chart view. BugFix: the jvm-maxheap \u0026amp; jvm-maxnonheap is -1, free is no value Fix bug: time select operation not in effect Fix bug: language initialization failed Fix bug: not show instance language Feature: support the trace list display export png Feature: Metrics comparison view BugFix: Fix dashboard top throughput copy  Java Agent  Spring async scenario optimize (#3723) Support log4j2 AsyncLogger (#3715) Add config to collect PostgreSQL sql query params (#3695) Support namespace in Nacos cluster/configuration (#3578) Provide plugin for ehcache 2.x (#3575) Supporting RequestRateLimiterGatewayFilterFactory (#3538) Kafka-plugin compatible with KafkaTemplate (#3505) Add pulsar apm plugin (#3476) Spring-cloud-gateway traceId does not transmit #3411 (#3446) Gateway compatible with downstream loss (#3445) Provide cassandra java driver 3.x plugin (#3410) Fix SpringMVC4 NoSuchMethodError (#3408) BugFix: endpoint grouping rules may be not unique (#3510) Add feature to control the maximum agent log files (#3475) Agent support custom properties. (#3367) Add Light4j plugin (#3323)  Document  Remove travis badge (#3763) Replace user wall to typical users in readme page (#3719) Update istio docs according latest istio release (#3646) Use chart deploy sw docs (#3573) Reorganize the doc, and provide catalog (#3563) Committer vote and set up document. (#3496) Update als setup doc as istio 1.3 released (#3470) Fill faq reply in official document. (#3450)  All issues and pull requests are here\n6.4.0 Project  Highly recommend to upgrade due to Pxx metrics calculation bug. Make agent working in JDK9+ Module system.  Java Agent  Make agent working in JDK9+ Module system. Support Kafka 2.x client libs. Log error in OKHTTP OnFailure callback. Support injecting traceid into logstack appender in logback. Add OperationName(including endpoint name) length max threshold. Support using Regex to group operation name. Support Undertow routing handler. RestTemplate plugin support operation name grouping. Fix ClassCastException in Webflux plugin. Ordering zookeeper server list, to make it better in topology. Fix a Dubbo plugin incompatible issue. Fix MySQL 5 plugin issue. Make log writer cached. Optimize Spring Cloud Gateway plugin Fix and improve gRPC reconnect mechanism. Remove Disruptor dependency from agent.  Backend  Fix Pxx(p50,p75,p90,p95,p99) metrics func bug.(Critical) Support Gateway in backend analysis, even when it doesn\u0026rsquo;t have suitable language agent. Support using HTTPs SSL accessing ElasticSearch storage. Support Zookeeper ACL. Make alarm records listed in order. Fix Pxx data persistence failure in some cases. Fix some bugs in MySQL storage. Setup slow SQL length threshold. Fix TTL settings is not working as expected. Remove scope-meta file.  UI  Enhance alarm page layout. Support trace tree chart resize. Support trace auto completion when partial traces abandoned somehow. Fix dashboard endpoint slow chart. Add radial chart in topology page. Add trace table mode. Fix topology page bug. Fix calender js bug. Fix \u0026ldquo;The \u0026ldquo;topo-services\u0026rdquo; component did not update the data in time after modifying the time range on the topology page.  Document  Restore the broken Istio setup doc. Add etcd config center document. Correct span_limit_per_segment default value in document. Enhance plugin develop doc. Fix error description in build document.  All issues and pull requests are here\n6.3.0 Project  e2e tests have been added, and verify every pull request. Use ArrayList to replace LinkedList in DataCarrier for much better performance. Add plugin instrumentation definition check in CI. DataCarrier performance improvement by avoiding false-sharing.  Java Agent  Java agent supports JDK 9 - 12, but don\u0026rsquo;t support Java Module yet. Support JVM class auto instrumentation, cataloged as bootstrap plugin. Support JVM HttpClient and HttpsClient plugin.[Optional] Support backend upgrade without rebooting required. Open Redefine and Retransform by other agents. Support Servlet 2.5 in Jetty, Tomcat and SpringMVC plugins. Support Spring @Async plugin. Add new config item to restrict the length of span#peer. Refactor ContextManager#stopSpan. Add gRPC timeout. Support Logback AsyncAppender print tid Fix gRPC reconnect bug. Fix trace segment service doesn\u0026rsquo;t report onComplete. Fix wrong logger class name. Fix gRPC plugin bug. Fix ContextManager.activeSpan() API usage error.  Backend  Support agent reset command downstream when the storage is erased, mostly because of backend upgrade. Backend stream flow refactor. High dimensionality metrics(Hour/Day/Month) are changed to lower priority, to ease the storage payload. Add OAP metrics cache to ease the storage query payload and improve performance. Remove DataCarrier in trace persistent of ElasticSearch storage, by leveraging the elasticsearch bulk queue. OAP internal communication protocol changed. Don\u0026rsquo;t be compatible with old releases. Improve ElasticSearch storage bulk performance. Support etcd as dynamic configuration center. Simplify the PxxMetrics and ThermodynamicMetrics functions for better performance and GC. Support JVM metrics self observability. Add the new OAL runtime engine. Add gRPC timeout. Add Charset in the alarm web hook. Fix buffer lost. Fix dirty read in ElasticSearch storage. Fix bug of cluster management plugins in un-Mixed mode. Fix wrong logger class name. Fix delete bug in ElasticSearch when using namespace. Fix MySQL TTL failure. Totally remove IDs can't be null log, to avoid misleading. Fix provider has been initialized repeatedly. Adjust providers conflict log message. Fix using wrong gc time metrics in OAL.  UI  Fix refresh is not working after endpoint and instance changed. Fix endpoint selector but. Fix wrong copy value in slow traces. Fix can\u0026rsquo;t show trace when it is broken partially(Because of agent sampling or fail safe). Fix database and response time graph bugs.  Document  Add bootstrap plugin development document. Alarm documentation typo fixed. Clarify the Docker file purpose. Fix a license typo.  All issues and pull requests are here\n6.2.0 Project  ElasticSearch implementation performance improved, and CHANGED totally. Must delete all existing indexes to do upgrade. CI and Integration tests provided by ASF INFRA. Plan to enhance tests including e2e, plugin tests in all pull requests, powered by ASF INFRA. DataCarrier queue write index controller performance improvement. 3-5 times quicker than before. Add windows compile support in CI.  Java Agent  Support collect SQL parameter in MySQL plugin.[Optional] Support SolrJ plugin. Support RESTEasy plugin. Support Spring Gateway plugin for 2.1.x[Optional] TracingContext performance improvement. Support Apache ShardingSphere(incubating) plugin. Support span#error in application toolkit. Fix OOM by empty stack of exception. FIx wrong cause exception of stack in span log. Fix unclear the running context in SpringMVC plugin. Fix CPU usage accessor calculation issue. Fix SpringMVC plugin span not stop bug when doing HTTP forward. Fix lettuce plugin async commend bug and NPE. Fix webflux plugin cast exception. [CI]Support import check.  Backend  Support time serious ElasticSearch storage. Provide dynamic configuration module and implementation. Slow SQL threshold supports dynamic config today. Dynamic Configuration module provide multiple implementations, DCS(gRPC based), Zookeeper, Apollo, Nacos. Provide P99/95/90/75/50 charts in topology edge. New topology query protocol and implementation. Support Envoy ALS in Service Mesh scenario. Support Nacos cluster management. Enhance metric exporter. Run in increment and total modes. Fix module provider is loaded repeatedly. Change TOP slow SQL storage in ES to Text from Keyword, as too long text issue. Fix H2TopologyQuery tiny bug. Fix H2 log query bug.(No feature provided yet) Filtering pods not in \u0026lsquo;Running\u0026rsquo; phase in mesh scenario. Fix query alarm bug in MySQL and H2 storage. Codes refactor.  UI  Fix some ID is null query(s). Page refactor, especially time-picker, more friendly. Login removed. Trace timestamp visualization issue fixed. Provide P99/95/90/75/50 charts in topology edge. Change all P99/95/90/75/50 charts style. More readable. Fix 404 in trace page.  Document  Go2Sky project has been donated to SkyAPM, change document link. Add FAQ for ElasticSearch storage, and links from document. Add FAQ fro WebSphere installation. Add several open users. Add alarm webhook document.  All issues and pull requests are here\n6.1.0 Project SkyWalking graduated as Apache Top Level Project.\n Support compiling project agent, backend, UI separately.  Java Agent  Support Vert.x Core 3.x plugin. Support Apache Dubbo plugin. Support use_qualified_name_as_endpoint_name and use_qualified_name_as_operation_name configs in SpringMVC plugin. Support span async close APIs in core. Used in Vert.x plugin. Support MySQL 5,8 plugins. Support set instance id manually(optional). Support customize enhance trace plugin in optional list. Support to set peer in Entry Span. Support Zookeeper plugin. Fix Webflux plugin created unexpected Entry Span. Fix Kafka plugin NPE in Kafka 1.1+ Fix wrong operation name in postgre 8.x plugin. Fix RabbitMQ plugin NPE. Fix agent can\u0026rsquo;t run in JVM 6/7, remove module-info.class. Fix agent can\u0026rsquo;t work well, if there is whitespace in agent path. Fix Spring annotation bug and inheritance enhance issue. Fix CPU accessor bug.  Backend Performance improved, especially in CPU limited environment. 3x improvement in service mesh scenario(no trace) in 8C16G VM. Significantly cost less CPU in low payload.\n Support database metrics and SLOW SQL detection. Support to set max size of metadata query. And change default to 5000 from 100. Support ElasticSearch template for new feature in the future. Support shutdown Zipkin trace analysis, because it doesn\u0026rsquo;t fit production environment. Support log type, scope HTTP_ACCESS_LOG and query. No feature provided, prepare for future versions. Support .NET clr receiver. Support Jaeger trace format, no analysis. Support group endpoint name by regax rules in mesh receiver. Support disable statement in OAL. Support basic auth in ElasticSearch connection. Support metrics exporter module and gRPC implementor. Support \u0026gt;, \u0026lt;, \u0026gt;=, \u0026lt;= in OAL. Support role mode in backend. Support Envoy metrics. Support query segment by service instance. Support to set host/port manually at cluster coordinator, rather than based on core settings. Make sure OAP shutdown when it faces startup error. Support set separated gRPC/Jetty ip:port for receiver, default still use core settings. Fix JVM receiver bug. Fix wrong dest service in mesh analysis. Fix search doesn\u0026rsquo;t work as expected. Refactor ScopeDeclaration annotation. Refactor register lock mechanism. Add SmartSql component for .NET Add integration tests for ElasticSearch client. Add test cases for exporter. Add test cases for queue consume.  UI  RocketBot UI has been accepted and bind in this release. Support CLR metrics.  Document  Documents updated, matching Top Level Project requirement. UI licenses updated, according to RocketBot UI IP clearance. User wall and powered-by list updated. CN documents removed, only consider to provide by volunteer out of Apache.  All issues and pull requests are here\n6.0.0-GA Java Agent  Support gson plugin(optional). Support canal plugin. Fix missing ojdbc component id. Fix dubbo plugin conflict. Fix OpenTracing tag match bug. Fix a missing check in ignore plugin.  Backend  Adjust service inventory entity, to add properties. Adjust service instance inventory entity, to add properties. Add nodeType to service inventory entity. Fix when operation name of local and exit spans in ref, the segment lost. Fix the index names don\u0026rsquo;t show right in logs. Fix wrong alarm text. Add test case for span limit mechanism. Add telemetry module and prometheus implementation, with grafana setting. A refactor for register API in storage module. Fix H2 and MySQL endpoint dependency map miss upstream side. Optimize the inventory register and refactor the implementation. Speed up the trace buffer read. Fix and removed unnecessary inventory register operations.  UI  Add new trace view. Add word-break to tag value.  Document  Add two startup modes document. Add PHP agent links. Add some cn documents. Update year to 2019 User wall updated. Fix a wrong description in how-to-build doc.  All issues and pull requests are here\n6.0.0-beta Protocol  Provide Trace Data Protocol v2 Provide SkyWalking Cross Process Propagation Headers Protocol v2.  Java Agent  Support Trace Data Protocol v2 Support SkyWalking Cross Process Propagation Headers Protocol v2. Support SkyWalking Cross Process Propagation Headers Protocol v1 running in compatible way. Need declare open explicitly. Support SpringMVC 5 Support webflux Support a new way to override agent.config by system env. Span tag can override by explicit way. Fix Spring Controller Inherit issue. Fix ElasticSearch plugin NPE. Fix agent classloader dead lock in certain situation. Fix agent log typo. Fix wrong component id in resettemplete plugin. Fix use transform ignore() in wrong way. Fix H2 query bug.  Backend  Support Trace Data Protocol v2. And Trace Data Protocol v1 is still supported. Support MySQL as storage. Support TiDB as storage. Support a new way to override application.yml by system env. Support service instance and endpoint alarm. Support namespace in istio receiver. Support service throughput(cpm), successful rate(sla), avg response time and p99/p95/p90/p75/p50 response time. Support backend trace sampling. Support Zipkin format again. Support init mode. Support namespace in Zookeeper cluster management. Support consul plugin in cluster module. OAL generate tool has been integrated into main repo, in the maven compile stage. Optimize trace paging query. Fix trace query don\u0026rsquo;t use fuzzy query in ElasticSearch storage. Fix alarm can\u0026rsquo;t be active in right way. Fix unnecessary condition in database and cache number query. Fix wrong namespace bug in ElasticSearch storage. Fix Remote clients selector error: / by zero . Fix segment TTL is not working.  UI  Support service throughput(cpm), successful rate(sla), avg response time and p99/p95/p90/p75/p50 response time. Fix TopN endpoint link doesn\u0026rsquo;t work right. Fix trace stack style. Fix CI.  Document  Add more agent setting documents. Add more contribution documents. Update user wall and powered-by page. Add RocketBot UI project link in document.  All issues and pull requests are here\n6.0.0-alpha SkyWalking 6 is totally new milestone for the project. At this point, we are not just a distributing tracing system with analysis and visualization capabilities. We are an Observability Analysis Platform(OAL).\nThe core and most important features in v6 are\n Support to collect telemetry data from different sources, such as multiple language agents and service mesh. Extensible stream analysis core. Make SQL and cache analysis available in core level, although haven\u0026rsquo;t provided in this release. Provide Observability Analysis Language(OAL) to make analysis metrics customization available. New GraphQL query protocol. Not binding with UI now. UI topology is better now. New alarm core provided. In alpha, only on service related metrics.  All issues and pull requests are here\n","title":"6.6.0","url":"/docs/main/v9.1.0/en/changes/changes-6.x/"},{"content":"6.6.0 Project  [IMPORTANT] Local span and exit span are not treated as endpoint detected at client and local. Only entry span is the endpoint. Reduce the load of register and memory cost.   Support MiniKube, Istio and SkyWalking on K8s deployment in CI. Support Windows and MacOS build in GitHub Action CI. Support ElasticSearch 7 in official dist. Hundreds plugin cases have been added in GitHub Action CI process.  Java Agent  Remove the local/exit span operation name register mechanism. Add plugin for JDK Threading classes. Add plugin for Armeria. Support set operation name in async span. Enhance webflux plugin, related to Spring Gateway plugin. Webflux plugin is in optional, due to JDK8 required. Fix a possible deadlock. Fix NPE when OAL scripts are different in different OAP nodes, mostly in upgrading stage. Fix bug about wrong peer in ES plugin. Fix NPE in Spring plugin. Fix wrong class name in Dubbo 2.7 conflict patch. Fix spring annotation inheritance problem.  OAP-Backend  Remove the local/exit span operation name register mechanism. Remove client side endpoint register in service mesh. Service instance dependency and related metrics. Support min func in OAL Support apdex func in OAL Support custom ES config setting at the index level. Envoy ALS proto upgraded. Update JODA lib as bugs in UTC +13/+14. Support topN sample period configurable. Ignore no statement DB operations in slow SQL collection. Fix bug in docker-entrypoint.sh when using MySQL as storage  UI  Service topology enhancement. Dive into service, instance and endpoint metrics on topo map. Service instance dependency view and related metrics. Support using URL parameter in trace query page. Support apdex score in service page. Add service dependency metrics into metrics comparison. Fix alarm search not working.  Document  Update user list and user wall. Add document link for CLI. Add deployment guide of agent in Jetty case. Modify Consul cluster doc. Add document about injecting traceId into the logback with logstack in JSON format. ElementUI license and dependency added.  All issues and pull requests are here\n6.5.0 Project  TTL E2E test (#3437) Test coverage is back in pull request check status (#3503) Plugin tests begin to be migrated into main repo, and is in process. (#3528, #3756, #3751, etc.) Switch to SkyWalking CI (exclusive) nodes (#3546) MySQL storage e2e test. (#3648) E2E tests are verified in multiple jdk versions, jdk 8, 9, 11, 12 (#3657) Jenkins build jobs run only when necessary (#3662)  OAP-Backend  Support dynamically configure alarm settings (#3557) Language of instance could be null (#3485) Make query max window size configurable. (#3765) Remove two max size 500 limit. (#3748) Parameterize the cache size. (#3741) ServiceInstanceRelation set error id (#3683) Makes the scope of alarm message more semantic. (#3680) Add register persistent worker latency metrics (#3677) Fix more reasonable error (#3619) Add GraphQL getServiceInstance instanceUuid field. (#3595) Support namespace in Nacos cluster/configuration (#3578) Instead of datasource-settings.properties, use application.yml for MySQLStorageProvider (#3564) Provide consul dynamic configuration center implementation (#3560) Upgrade guava version to support higher jdk version (#3541) Sync latest als from envoy api (#3507) Set telemetry instanced id for Etcd and Nacos plugin (#3492) Support timeout configuration in agent and backend. (#3491) Make sure the cluster register happens before streaming process. (#3471) Agent supports custom properties. (#3367) Miscellaneous bug fixes (#3567)  UI  Feature: node detail display in topo circle-chart view. BugFix: the jvm-maxheap \u0026amp; jvm-maxnonheap is -1, free is no value Fix bug: time select operation not in effect Fix bug: language initialization failed Fix bug: not show instance language Feature: support the trace list display export png Feature: Metrics comparison view BugFix: Fix dashboard top throughput copy  Java Agent  Spring async scenario optimize (#3723) Support log4j2 AsyncLogger (#3715) Add config to collect PostgreSQL sql query params (#3695) Support namespace in Nacos cluster/configuration (#3578) Provide plugin for ehcache 2.x (#3575) Supporting RequestRateLimiterGatewayFilterFactory (#3538) Kafka-plugin compatible with KafkaTemplate (#3505) Add pulsar apm plugin (#3476) Spring-cloud-gateway traceId does not transmit #3411 (#3446) Gateway compatible with downstream loss (#3445) Provide cassandra java driver 3.x plugin (#3410) Fix SpringMVC4 NoSuchMethodError (#3408) BugFix: endpoint grouping rules may be not unique (#3510) Add feature to control the maximum agent log files (#3475) Agent support custom properties. (#3367) Add Light4j plugin (#3323)  Document  Remove travis badge (#3763) Replace user wall to typical users in readme page (#3719) Update istio docs according latest istio release (#3646) Use chart deploy sw docs (#3573) Reorganize the doc, and provide catalog (#3563) Committer vote and set up document. (#3496) Update als setup doc as istio 1.3 released (#3470) Fill faq reply in official document. (#3450)  All issues and pull requests are here\n6.4.0 Project  Highly recommend to upgrade due to Pxx metrics calculation bug. Make agent working in JDK9+ Module system.  Java Agent  Make agent working in JDK9+ Module system. Support Kafka 2.x client libs. Log error in OKHTTP OnFailure callback. Support injecting traceid into logstack appender in logback. Add OperationName(including endpoint name) length max threshold. Support using Regex to group operation name. Support Undertow routing handler. RestTemplate plugin support operation name grouping. Fix ClassCastException in Webflux plugin. Ordering zookeeper server list, to make it better in topology. Fix a Dubbo plugin incompatible issue. Fix MySQL 5 plugin issue. Make log writer cached. Optimize Spring Cloud Gateway plugin Fix and improve gRPC reconnect mechanism. Remove Disruptor dependency from agent.  Backend  Fix Pxx(p50,p75,p90,p95,p99) metrics func bug.(Critical) Support Gateway in backend analysis, even when it doesn\u0026rsquo;t have suitable language agent. Support using HTTPs SSL accessing ElasticSearch storage. Support Zookeeper ACL. Make alarm records listed in order. Fix Pxx data persistence failure in some cases. Fix some bugs in MySQL storage. Setup slow SQL length threshold. Fix TTL settings is not working as expected. Remove scope-meta file.  UI  Enhance alarm page layout. Support trace tree chart resize. Support trace auto completion when partial traces abandoned somehow. Fix dashboard endpoint slow chart. Add radial chart in topology page. Add trace table mode. Fix topology page bug. Fix calender js bug. Fix \u0026ldquo;The \u0026ldquo;topo-services\u0026rdquo; component did not update the data in time after modifying the time range on the topology page.  Document  Restore the broken Istio setup doc. Add etcd config center document. Correct span_limit_per_segment default value in document. Enhance plugin develop doc. Fix error description in build document.  All issues and pull requests are here\n6.3.0 Project  e2e tests have been added, and verify every pull request. Use ArrayList to replace LinkedList in DataCarrier for much better performance. Add plugin instrumentation definition check in CI. DataCarrier performance improvement by avoiding false-sharing.  Java Agent  Java agent supports JDK 9 - 12, but don\u0026rsquo;t support Java Module yet. Support JVM class auto instrumentation, cataloged as bootstrap plugin. Support JVM HttpClient and HttpsClient plugin.[Optional] Support backend upgrade without rebooting required. Open Redefine and Retransform by other agents. Support Servlet 2.5 in Jetty, Tomcat and SpringMVC plugins. Support Spring @Async plugin. Add new config item to restrict the length of span#peer. Refactor ContextManager#stopSpan. Add gRPC timeout. Support Logback AsyncAppender print tid Fix gRPC reconnect bug. Fix trace segment service doesn\u0026rsquo;t report onComplete. Fix wrong logger class name. Fix gRPC plugin bug. Fix ContextManager.activeSpan() API usage error.  Backend  Support agent reset command downstream when the storage is erased, mostly because of backend upgrade. Backend stream flow refactor. High dimensionality metrics(Hour/Day/Month) are changed to lower priority, to ease the storage payload. Add OAP metrics cache to ease the storage query payload and improve performance. Remove DataCarrier in trace persistent of ElasticSearch storage, by leveraging the elasticsearch bulk queue. OAP internal communication protocol changed. Don\u0026rsquo;t be compatible with old releases. Improve ElasticSearch storage bulk performance. Support etcd as dynamic configuration center. Simplify the PxxMetrics and ThermodynamicMetrics functions for better performance and GC. Support JVM metrics self observability. Add the new OAL runtime engine. Add gRPC timeout. Add Charset in the alarm web hook. Fix buffer lost. Fix dirty read in ElasticSearch storage. Fix bug of cluster management plugins in un-Mixed mode. Fix wrong logger class name. Fix delete bug in ElasticSearch when using namespace. Fix MySQL TTL failure. Totally remove IDs can't be null log, to avoid misleading. Fix provider has been initialized repeatedly. Adjust providers conflict log message. Fix using wrong gc time metrics in OAL.  UI  Fix refresh is not working after endpoint and instance changed. Fix endpoint selector but. Fix wrong copy value in slow traces. Fix can\u0026rsquo;t show trace when it is broken partially(Because of agent sampling or fail safe). Fix database and response time graph bugs.  Document  Add bootstrap plugin development document. Alarm documentation typo fixed. Clarify the Docker file purpose. Fix a license typo.  All issues and pull requests are here\n6.2.0 Project  ElasticSearch implementation performance improved, and CHANGED totally. Must delete all existing indexes to do upgrade. CI and Integration tests provided by ASF INFRA. Plan to enhance tests including e2e, plugin tests in all pull requests, powered by ASF INFRA. DataCarrier queue write index controller performance improvement. 3-5 times quicker than before. Add windows compile support in CI.  Java Agent  Support collect SQL parameter in MySQL plugin.[Optional] Support SolrJ plugin. Support RESTEasy plugin. Support Spring Gateway plugin for 2.1.x[Optional] TracingContext performance improvement. Support Apache ShardingSphere(incubating) plugin. Support span#error in application toolkit. Fix OOM by empty stack of exception. FIx wrong cause exception of stack in span log. Fix unclear the running context in SpringMVC plugin. Fix CPU usage accessor calculation issue. Fix SpringMVC plugin span not stop bug when doing HTTP forward. Fix lettuce plugin async commend bug and NPE. Fix webflux plugin cast exception. [CI]Support import check.  Backend  Support time serious ElasticSearch storage. Provide dynamic configuration module and implementation. Slow SQL threshold supports dynamic config today. Dynamic Configuration module provide multiple implementations, DCS(gRPC based), Zookeeper, Apollo, Nacos. Provide P99/95/90/75/50 charts in topology edge. New topology query protocol and implementation. Support Envoy ALS in Service Mesh scenario. Support Nacos cluster management. Enhance metric exporter. Run in increment and total modes. Fix module provider is loaded repeatedly. Change TOP slow SQL storage in ES to Text from Keyword, as too long text issue. Fix H2TopologyQuery tiny bug. Fix H2 log query bug.(No feature provided yet) Filtering pods not in \u0026lsquo;Running\u0026rsquo; phase in mesh scenario. Fix query alarm bug in MySQL and H2 storage. Codes refactor.  UI  Fix some ID is null query(s). Page refactor, especially time-picker, more friendly. Login removed. Trace timestamp visualization issue fixed. Provide P99/95/90/75/50 charts in topology edge. Change all P99/95/90/75/50 charts style. More readable. Fix 404 in trace page.  Document  Go2Sky project has been donated to SkyAPM, change document link. Add FAQ for ElasticSearch storage, and links from document. Add FAQ fro WebSphere installation. Add several open users. Add alarm webhook document.  All issues and pull requests are here\n6.1.0 Project SkyWalking graduated as Apache Top Level Project.\n Support compiling project agent, backend, UI separately.  Java Agent  Support Vert.x Core 3.x plugin. Support Apache Dubbo plugin. Support use_qualified_name_as_endpoint_name and use_qualified_name_as_operation_name configs in SpringMVC plugin. Support span async close APIs in core. Used in Vert.x plugin. Support MySQL 5,8 plugins. Support set instance id manually(optional). Support customize enhance trace plugin in optional list. Support to set peer in Entry Span. Support Zookeeper plugin. Fix Webflux plugin created unexpected Entry Span. Fix Kafka plugin NPE in Kafka 1.1+ Fix wrong operation name in postgre 8.x plugin. Fix RabbitMQ plugin NPE. Fix agent can\u0026rsquo;t run in JVM 6/7, remove module-info.class. Fix agent can\u0026rsquo;t work well, if there is whitespace in agent path. Fix Spring annotation bug and inheritance enhance issue. Fix CPU accessor bug.  Backend Performance improved, especially in CPU limited environment. 3x improvement in service mesh scenario(no trace) in 8C16G VM. Significantly cost less CPU in low payload.\n Support database metrics and SLOW SQL detection. Support to set max size of metadata query. And change default to 5000 from 100. Support ElasticSearch template for new feature in the future. Support shutdown Zipkin trace analysis, because it doesn\u0026rsquo;t fit production environment. Support log type, scope HTTP_ACCESS_LOG and query. No feature provided, prepare for future versions. Support .NET clr receiver. Support Jaeger trace format, no analysis. Support group endpoint name by regax rules in mesh receiver. Support disable statement in OAL. Support basic auth in ElasticSearch connection. Support metrics exporter module and gRPC implementor. Support \u0026gt;, \u0026lt;, \u0026gt;=, \u0026lt;= in OAL. Support role mode in backend. Support Envoy metrics. Support query segment by service instance. Support to set host/port manually at cluster coordinator, rather than based on core settings. Make sure OAP shutdown when it faces startup error. Support set separated gRPC/Jetty ip:port for receiver, default still use core settings. Fix JVM receiver bug. Fix wrong dest service in mesh analysis. Fix search doesn\u0026rsquo;t work as expected. Refactor ScopeDeclaration annotation. Refactor register lock mechanism. Add SmartSql component for .NET Add integration tests for ElasticSearch client. Add test cases for exporter. Add test cases for queue consume.  UI  RocketBot UI has been accepted and bind in this release. Support CLR metrics.  Document  Documents updated, matching Top Level Project requirement. UI licenses updated, according to RocketBot UI IP clearance. User wall and powered-by list updated. CN documents removed, only consider to provide by volunteer out of Apache.  All issues and pull requests are here\n6.0.0-GA Java Agent  Support gson plugin(optional). Support canal plugin. Fix missing ojdbc component id. Fix dubbo plugin conflict. Fix OpenTracing tag match bug. Fix a missing check in ignore plugin.  Backend  Adjust service inventory entity, to add properties. Adjust service instance inventory entity, to add properties. Add nodeType to service inventory entity. Fix when operation name of local and exit spans in ref, the segment lost. Fix the index names don\u0026rsquo;t show right in logs. Fix wrong alarm text. Add test case for span limit mechanism. Add telemetry module and prometheus implementation, with grafana setting. A refactor for register API in storage module. Fix H2 and MySQL endpoint dependency map miss upstream side. Optimize the inventory register and refactor the implementation. Speed up the trace buffer read. Fix and removed unnecessary inventory register operations.  UI  Add new trace view. Add word-break to tag value.  Document  Add two startup modes document. Add PHP agent links. Add some cn documents. Update year to 2019 User wall updated. Fix a wrong description in how-to-build doc.  All issues and pull requests are here\n6.0.0-beta Protocol  Provide Trace Data Protocol v2 Provide SkyWalking Cross Process Propagation Headers Protocol v2.  Java Agent  Support Trace Data Protocol v2 Support SkyWalking Cross Process Propagation Headers Protocol v2. Support SkyWalking Cross Process Propagation Headers Protocol v1 running in compatible way. Need declare open explicitly. Support SpringMVC 5 Support webflux Support a new way to override agent.config by system env. Span tag can override by explicit way. Fix Spring Controller Inherit issue. Fix ElasticSearch plugin NPE. Fix agent classloader dead lock in certain situation. Fix agent log typo. Fix wrong component id in resettemplete plugin. Fix use transform ignore() in wrong way. Fix H2 query bug.  Backend  Support Trace Data Protocol v2. And Trace Data Protocol v1 is still supported. Support MySQL as storage. Support TiDB as storage. Support a new way to override application.yml by system env. Support service instance and endpoint alarm. Support namespace in istio receiver. Support service throughput(cpm), successful rate(sla), avg response time and p99/p95/p90/p75/p50 response time. Support backend trace sampling. Support Zipkin format again. Support init mode. Support namespace in Zookeeper cluster management. Support consul plugin in cluster module. OAL generate tool has been integrated into main repo, in the maven compile stage. Optimize trace paging query. Fix trace query don\u0026rsquo;t use fuzzy query in ElasticSearch storage. Fix alarm can\u0026rsquo;t be active in right way. Fix unnecessary condition in database and cache number query. Fix wrong namespace bug in ElasticSearch storage. Fix Remote clients selector error: / by zero . Fix segment TTL is not working.  UI  Support service throughput(cpm), successful rate(sla), avg response time and p99/p95/p90/p75/p50 response time. Fix TopN endpoint link doesn\u0026rsquo;t work right. Fix trace stack style. Fix CI.  Document  Add more agent setting documents. Add more contribution documents. Update user wall and powered-by page. Add RocketBot UI project link in document.  All issues and pull requests are here\n6.0.0-alpha SkyWalking 6 is totally new milestone for the project. At this point, we are not just a distributing tracing system with analysis and visualization capabilities. We are an Observability Analysis Platform(OAL).\nThe core and most important features in v6 are\n Support to collect telemetry data from different sources, such as multiple language agents and service mesh. Extensible stream analysis core. Make SQL and cache analysis available in core level, although haven\u0026rsquo;t provided in this release. Provide Observability Analysis Language(OAL) to make analysis metrics customization available. New GraphQL query protocol. Not binding with UI now. UI topology is better now. New alarm core provided. In alpha, only on service related metrics.  All issues and pull requests are here\n","title":"6.6.0","url":"/docs/main/v9.2.0/en/changes/changes-6.x/"},{"content":"6.6.0 Project  [IMPORTANT] Local span and exit span are not treated as endpoint detected at client and local. Only entry span is the endpoint. Reduce the load of register and memory cost.   Support MiniKube, Istio and SkyWalking on K8s deployment in CI. Support Windows and MacOS build in GitHub Action CI. Support ElasticSearch 7 in official dist. Hundreds plugin cases have been added in GitHub Action CI process.  Java Agent  Remove the local/exit span operation name register mechanism. Add plugin for JDK Threading classes. Add plugin for Armeria. Support set operation name in async span. Enhance webflux plugin, related to Spring Gateway plugin. Webflux plugin is in optional, due to JDK8 required. Fix a possible deadlock. Fix NPE when OAL scripts are different in different OAP nodes, mostly in upgrading stage. Fix bug about wrong peer in ES plugin. Fix NPE in Spring plugin. Fix wrong class name in Dubbo 2.7 conflict patch. Fix spring annotation inheritance problem.  OAP-Backend  Remove the local/exit span operation name register mechanism. Remove client side endpoint register in service mesh. Service instance dependency and related metrics. Support min func in OAL Support apdex func in OAL Support custom ES config setting at the index level. Envoy ALS proto upgraded. Update JODA lib as bugs in UTC +13/+14. Support topN sample period configurable. Ignore no statement DB operations in slow SQL collection. Fix bug in docker-entrypoint.sh when using MySQL as storage  UI  Service topology enhancement. Dive into service, instance and endpoint metrics on topo map. Service instance dependency view and related metrics. Support using URL parameter in trace query page. Support apdex score in service page. Add service dependency metrics into metrics comparison. Fix alarm search not working.  Document  Update user list and user wall. Add document link for CLI. Add deployment guide of agent in Jetty case. Modify Consul cluster doc. Add document about injecting traceId into the logback with logstack in JSON format. ElementUI license and dependency added.  All issues and pull requests are here\n6.5.0 Project  TTL E2E test (#3437) Test coverage is back in pull request check status (#3503) Plugin tests begin to be migrated into main repo, and is in process. (#3528, #3756, #3751, etc.) Switch to SkyWalking CI (exclusive) nodes (#3546) MySQL storage e2e test. (#3648) E2E tests are verified in multiple jdk versions, jdk 8, 9, 11, 12 (#3657) Jenkins build jobs run only when necessary (#3662)  OAP-Backend  Support dynamically configure alarm settings (#3557) Language of instance could be null (#3485) Make query max window size configurable. (#3765) Remove two max size 500 limit. (#3748) Parameterize the cache size. (#3741) ServiceInstanceRelation set error id (#3683) Makes the scope of alarm message more semantic. (#3680) Add register persistent worker latency metrics (#3677) Fix more reasonable error (#3619) Add GraphQL getServiceInstance instanceUuid field. (#3595) Support namespace in Nacos cluster/configuration (#3578) Instead of datasource-settings.properties, use application.yml for MySQLStorageProvider (#3564) Provide consul dynamic configuration center implementation (#3560) Upgrade guava version to support higher jdk version (#3541) Sync latest als from envoy api (#3507) Set telemetry instanced id for Etcd and Nacos plugin (#3492) Support timeout configuration in agent and backend. (#3491) Make sure the cluster register happens before streaming process. (#3471) Agent supports custom properties. (#3367) Miscellaneous bug fixes (#3567)  UI  Feature: node detail display in topo circle-chart view. BugFix: the jvm-maxheap \u0026amp; jvm-maxnonheap is -1, free is no value Fix bug: time select operation not in effect Fix bug: language initialization failed Fix bug: not show instance language Feature: support the trace list display export png Feature: Metrics comparison view BugFix: Fix dashboard top throughput copy  Java Agent  Spring async scenario optimize (#3723) Support log4j2 AsyncLogger (#3715) Add config to collect PostgreSQL sql query params (#3695) Support namespace in Nacos cluster/configuration (#3578) Provide plugin for ehcache 2.x (#3575) Supporting RequestRateLimiterGatewayFilterFactory (#3538) Kafka-plugin compatible with KafkaTemplate (#3505) Add pulsar apm plugin (#3476) Spring-cloud-gateway traceId does not transmit #3411 (#3446) Gateway compatible with downstream loss (#3445) Provide cassandra java driver 3.x plugin (#3410) Fix SpringMVC4 NoSuchMethodError (#3408) BugFix: endpoint grouping rules may be not unique (#3510) Add feature to control the maximum agent log files (#3475) Agent support custom properties. (#3367) Add Light4j plugin (#3323)  Document  Remove travis badge (#3763) Replace user wall to typical users in readme page (#3719) Update istio docs according latest istio release (#3646) Use chart deploy sw docs (#3573) Reorganize the doc, and provide catalog (#3563) Committer vote and set up document. (#3496) Update als setup doc as istio 1.3 released (#3470) Fill faq reply in official document. (#3450)  All issues and pull requests are here\n6.4.0 Project  Highly recommend to upgrade due to Pxx metrics calculation bug. Make agent working in JDK9+ Module system.  Java Agent  Make agent working in JDK9+ Module system. Support Kafka 2.x client libs. Log error in OKHTTP OnFailure callback. Support injecting traceid into logstack appender in logback. Add OperationName(including endpoint name) length max threshold. Support using Regex to group operation name. Support Undertow routing handler. RestTemplate plugin support operation name grouping. Fix ClassCastException in Webflux plugin. Ordering zookeeper server list, to make it better in topology. Fix a Dubbo plugin incompatible issue. Fix MySQL 5 plugin issue. Make log writer cached. Optimize Spring Cloud Gateway plugin Fix and improve gRPC reconnect mechanism. Remove Disruptor dependency from agent.  Backend  Fix Pxx(p50,p75,p90,p95,p99) metrics func bug.(Critical) Support Gateway in backend analysis, even when it doesn\u0026rsquo;t have suitable language agent. Support using HTTPs SSL accessing ElasticSearch storage. Support Zookeeper ACL. Make alarm records listed in order. Fix Pxx data persistence failure in some cases. Fix some bugs in MySQL storage. Setup slow SQL length threshold. Fix TTL settings is not working as expected. Remove scope-meta file.  UI  Enhance alarm page layout. Support trace tree chart resize. Support trace auto completion when partial traces abandoned somehow. Fix dashboard endpoint slow chart. Add radial chart in topology page. Add trace table mode. Fix topology page bug. Fix calender js bug. Fix \u0026ldquo;The \u0026ldquo;topo-services\u0026rdquo; component did not update the data in time after modifying the time range on the topology page.  Document  Restore the broken Istio setup doc. Add etcd config center document. Correct span_limit_per_segment default value in document. Enhance plugin develop doc. Fix error description in build document.  All issues and pull requests are here\n6.3.0 Project  e2e tests have been added, and verify every pull request. Use ArrayList to replace LinkedList in DataCarrier for much better performance. Add plugin instrumentation definition check in CI. DataCarrier performance improvement by avoiding false-sharing.  Java Agent  Java agent supports JDK 9 - 12, but don\u0026rsquo;t support Java Module yet. Support JVM class auto instrumentation, cataloged as bootstrap plugin. Support JVM HttpClient and HttpsClient plugin.[Optional] Support backend upgrade without rebooting required. Open Redefine and Retransform by other agents. Support Servlet 2.5 in Jetty, Tomcat and SpringMVC plugins. Support Spring @Async plugin. Add new config item to restrict the length of span#peer. Refactor ContextManager#stopSpan. Add gRPC timeout. Support Logback AsyncAppender print tid Fix gRPC reconnect bug. Fix trace segment service doesn\u0026rsquo;t report onComplete. Fix wrong logger class name. Fix gRPC plugin bug. Fix ContextManager.activeSpan() API usage error.  Backend  Support agent reset command downstream when the storage is erased, mostly because of backend upgrade. Backend stream flow refactor. High dimensionality metrics(Hour/Day/Month) are changed to lower priority, to ease the storage payload. Add OAP metrics cache to ease the storage query payload and improve performance. Remove DataCarrier in trace persistent of ElasticSearch storage, by leveraging the elasticsearch bulk queue. OAP internal communication protocol changed. Don\u0026rsquo;t be compatible with old releases. Improve ElasticSearch storage bulk performance. Support etcd as dynamic configuration center. Simplify the PxxMetrics and ThermodynamicMetrics functions for better performance and GC. Support JVM metrics self observability. Add the new OAL runtime engine. Add gRPC timeout. Add Charset in the alarm web hook. Fix buffer lost. Fix dirty read in ElasticSearch storage. Fix bug of cluster management plugins in un-Mixed mode. Fix wrong logger class name. Fix delete bug in ElasticSearch when using namespace. Fix MySQL TTL failure. Totally remove IDs can't be null log, to avoid misleading. Fix provider has been initialized repeatedly. Adjust providers conflict log message. Fix using wrong gc time metrics in OAL.  UI  Fix refresh is not working after endpoint and instance changed. Fix endpoint selector but. Fix wrong copy value in slow traces. Fix can\u0026rsquo;t show trace when it is broken partially(Because of agent sampling or fail safe). Fix database and response time graph bugs.  Document  Add bootstrap plugin development document. Alarm documentation typo fixed. Clarify the Docker file purpose. Fix a license typo.  All issues and pull requests are here\n6.2.0 Project  ElasticSearch implementation performance improved, and CHANGED totally. Must delete all existing indexes to do upgrade. CI and Integration tests provided by ASF INFRA. Plan to enhance tests including e2e, plugin tests in all pull requests, powered by ASF INFRA. DataCarrier queue write index controller performance improvement. 3-5 times quicker than before. Add windows compile support in CI.  Java Agent  Support collect SQL parameter in MySQL plugin.[Optional] Support SolrJ plugin. Support RESTEasy plugin. Support Spring Gateway plugin for 2.1.x[Optional] TracingContext performance improvement. Support Apache ShardingSphere(incubating) plugin. Support span#error in application toolkit. Fix OOM by empty stack of exception. FIx wrong cause exception of stack in span log. Fix unclear the running context in SpringMVC plugin. Fix CPU usage accessor calculation issue. Fix SpringMVC plugin span not stop bug when doing HTTP forward. Fix lettuce plugin async commend bug and NPE. Fix webflux plugin cast exception. [CI]Support import check.  Backend  Support time serious ElasticSearch storage. Provide dynamic configuration module and implementation. Slow SQL threshold supports dynamic config today. Dynamic Configuration module provide multiple implementations, DCS(gRPC based), Zookeeper, Apollo, Nacos. Provide P99/95/90/75/50 charts in topology edge. New topology query protocol and implementation. Support Envoy ALS in Service Mesh scenario. Support Nacos cluster management. Enhance metric exporter. Run in increment and total modes. Fix module provider is loaded repeatedly. Change TOP slow SQL storage in ES to Text from Keyword, as too long text issue. Fix H2TopologyQuery tiny bug. Fix H2 log query bug.(No feature provided yet) Filtering pods not in \u0026lsquo;Running\u0026rsquo; phase in mesh scenario. Fix query alarm bug in MySQL and H2 storage. Codes refactor.  UI  Fix some ID is null query(s). Page refactor, especially time-picker, more friendly. Login removed. Trace timestamp visualization issue fixed. Provide P99/95/90/75/50 charts in topology edge. Change all P99/95/90/75/50 charts style. More readable. Fix 404 in trace page.  Document  Go2Sky project has been donated to SkyAPM, change document link. Add FAQ for ElasticSearch storage, and links from document. Add FAQ fro WebSphere installation. Add several open users. Add alarm webhook document.  All issues and pull requests are here\n6.1.0 Project SkyWalking graduated as Apache Top Level Project.\n Support compiling project agent, backend, UI separately.  Java Agent  Support Vert.x Core 3.x plugin. Support Apache Dubbo plugin. Support use_qualified_name_as_endpoint_name and use_qualified_name_as_operation_name configs in SpringMVC plugin. Support span async close APIs in core. Used in Vert.x plugin. Support MySQL 5,8 plugins. Support set instance id manually(optional). Support customize enhance trace plugin in optional list. Support to set peer in Entry Span. Support Zookeeper plugin. Fix Webflux plugin created unexpected Entry Span. Fix Kafka plugin NPE in Kafka 1.1+ Fix wrong operation name in postgre 8.x plugin. Fix RabbitMQ plugin NPE. Fix agent can\u0026rsquo;t run in JVM 6/7, remove module-info.class. Fix agent can\u0026rsquo;t work well, if there is whitespace in agent path. Fix Spring annotation bug and inheritance enhance issue. Fix CPU accessor bug.  Backend Performance improved, especially in CPU limited environment. 3x improvement in service mesh scenario(no trace) in 8C16G VM. Significantly cost less CPU in low payload.\n Support database metrics and SLOW SQL detection. Support to set max size of metadata query. And change default to 5000 from 100. Support ElasticSearch template for new feature in the future. Support shutdown Zipkin trace analysis, because it doesn\u0026rsquo;t fit production environment. Support log type, scope HTTP_ACCESS_LOG and query. No feature provided, prepare for future versions. Support .NET clr receiver. Support Jaeger trace format, no analysis. Support group endpoint name by regax rules in mesh receiver. Support disable statement in OAL. Support basic auth in ElasticSearch connection. Support metrics exporter module and gRPC implementor. Support \u0026gt;, \u0026lt;, \u0026gt;=, \u0026lt;= in OAL. Support role mode in backend. Support Envoy metrics. Support query segment by service instance. Support to set host/port manually at cluster coordinator, rather than based on core settings. Make sure OAP shutdown when it faces startup error. Support set separated gRPC/Jetty ip:port for receiver, default still use core settings. Fix JVM receiver bug. Fix wrong dest service in mesh analysis. Fix search doesn\u0026rsquo;t work as expected. Refactor ScopeDeclaration annotation. Refactor register lock mechanism. Add SmartSql component for .NET Add integration tests for ElasticSearch client. Add test cases for exporter. Add test cases for queue consume.  UI  RocketBot UI has been accepted and bind in this release. Support CLR metrics.  Document  Documents updated, matching Top Level Project requirement. UI licenses updated, according to RocketBot UI IP clearance. User wall and powered-by list updated. CN documents removed, only consider to provide by volunteer out of Apache.  All issues and pull requests are here\n6.0.0-GA Java Agent  Support gson plugin(optional). Support canal plugin. Fix missing ojdbc component id. Fix dubbo plugin conflict. Fix OpenTracing tag match bug. Fix a missing check in ignore plugin.  Backend  Adjust service inventory entity, to add properties. Adjust service instance inventory entity, to add properties. Add nodeType to service inventory entity. Fix when operation name of local and exit spans in ref, the segment lost. Fix the index names don\u0026rsquo;t show right in logs. Fix wrong alarm text. Add test case for span limit mechanism. Add telemetry module and prometheus implementation, with grafana setting. A refactor for register API in storage module. Fix H2 and MySQL endpoint dependency map miss upstream side. Optimize the inventory register and refactor the implementation. Speed up the trace buffer read. Fix and removed unnecessary inventory register operations.  UI  Add new trace view. Add word-break to tag value.  Document  Add two startup modes document. Add PHP agent links. Add some cn documents. Update year to 2019 User wall updated. Fix a wrong description in how-to-build doc.  All issues and pull requests are here\n6.0.0-beta Protocol  Provide Trace Data Protocol v2 Provide SkyWalking Cross Process Propagation Headers Protocol v2.  Java Agent  Support Trace Data Protocol v2 Support SkyWalking Cross Process Propagation Headers Protocol v2. Support SkyWalking Cross Process Propagation Headers Protocol v1 running in compatible way. Need declare open explicitly. Support SpringMVC 5 Support webflux Support a new way to override agent.config by system env. Span tag can override by explicit way. Fix Spring Controller Inherit issue. Fix ElasticSearch plugin NPE. Fix agent classloader dead lock in certain situation. Fix agent log typo. Fix wrong component id in resettemplete plugin. Fix use transform ignore() in wrong way. Fix H2 query bug.  Backend  Support Trace Data Protocol v2. And Trace Data Protocol v1 is still supported. Support MySQL as storage. Support TiDB as storage. Support a new way to override application.yml by system env. Support service instance and endpoint alarm. Support namespace in istio receiver. Support service throughput(cpm), successful rate(sla), avg response time and p99/p95/p90/p75/p50 response time. Support backend trace sampling. Support Zipkin format again. Support init mode. Support namespace in Zookeeper cluster management. Support consul plugin in cluster module. OAL generate tool has been integrated into main repo, in the maven compile stage. Optimize trace paging query. Fix trace query don\u0026rsquo;t use fuzzy query in ElasticSearch storage. Fix alarm can\u0026rsquo;t be active in right way. Fix unnecessary condition in database and cache number query. Fix wrong namespace bug in ElasticSearch storage. Fix Remote clients selector error: / by zero . Fix segment TTL is not working.  UI  Support service throughput(cpm), successful rate(sla), avg response time and p99/p95/p90/p75/p50 response time. Fix TopN endpoint link doesn\u0026rsquo;t work right. Fix trace stack style. Fix CI.  Document  Add more agent setting documents. Add more contribution documents. Update user wall and powered-by page. Add RocketBot UI project link in document.  All issues and pull requests are here\n6.0.0-alpha SkyWalking 6 is totally new milestone for the project. At this point, we are not just a distributing tracing system with analysis and visualization capabilities. We are an Observability Analysis Platform(OAL).\nThe core and most important features in v6 are\n Support to collect telemetry data from different sources, such as multiple language agents and service mesh. Extensible stream analysis core. Make SQL and cache analysis available in core level, although haven\u0026rsquo;t provided in this release. Provide Observability Analysis Language(OAL) to make analysis metrics customization available. New GraphQL query protocol. Not binding with UI now. UI topology is better now. New alarm core provided. In alpha, only on service related metrics.  All issues and pull requests are here\n","title":"6.6.0","url":"/docs/main/v9.3.0/en/changes/changes-6.x/"},{"content":"6.6.0 Project  [IMPORTANT] Local span and exit span are not treated as endpoint detected at client and local. Only entry span is the endpoint. Reduce the load of register and memory cost.   Support MiniKube, Istio and SkyWalking on K8s deployment in CI. Support Windows and MacOS build in GitHub Action CI. Support ElasticSearch 7 in official dist. Hundreds plugin cases have been added in GitHub Action CI process.  Java Agent  Remove the local/exit span operation name register mechanism. Add plugin for JDK Threading classes. Add plugin for Armeria. Support set operation name in async span. Enhance webflux plugin, related to Spring Gateway plugin. Webflux plugin is in optional, due to JDK8 required. Fix a possible deadlock. Fix NPE when OAL scripts are different in different OAP nodes, mostly in upgrading stage. Fix bug about wrong peer in ES plugin. Fix NPE in Spring plugin. Fix wrong class name in Dubbo 2.7 conflict patch. Fix spring annotation inheritance problem.  OAP-Backend  Remove the local/exit span operation name register mechanism. Remove client side endpoint register in service mesh. Service instance dependency and related metrics. Support min func in OAL Support apdex func in OAL Support custom ES config setting at the index level. Envoy ALS proto upgraded. Update JODA lib as bugs in UTC +13/+14. Support topN sample period configurable. Ignore no statement DB operations in slow SQL collection. Fix bug in docker-entrypoint.sh when using MySQL as storage  UI  Service topology enhancement. Dive into service, instance and endpoint metrics on topo map. Service instance dependency view and related metrics. Support using URL parameter in trace query page. Support apdex score in service page. Add service dependency metrics into metrics comparison. Fix alarm search not working.  Document  Update user list and user wall. Add document link for CLI. Add deployment guide of agent in Jetty case. Modify Consul cluster doc. Add document about injecting traceId into the logback with logstack in JSON format. ElementUI license and dependency added.  All issues and pull requests are here\n6.5.0 Project  TTL E2E test (#3437) Test coverage is back in pull request check status (#3503) Plugin tests begin to be migrated into main repo, and is in process. (#3528, #3756, #3751, etc.) Switch to SkyWalking CI (exclusive) nodes (#3546) MySQL storage e2e test. (#3648) E2E tests are verified in multiple jdk versions, jdk 8, 9, 11, 12 (#3657) Jenkins build jobs run only when necessary (#3662)  OAP-Backend  Support dynamically configure alarm settings (#3557) Language of instance could be null (#3485) Make query max window size configurable. (#3765) Remove two max size 500 limit. (#3748) Parameterize the cache size. (#3741) ServiceInstanceRelation set error id (#3683) Makes the scope of alarm message more semantic. (#3680) Add register persistent worker latency metrics (#3677) Fix more reasonable error (#3619) Add GraphQL getServiceInstance instanceUuid field. (#3595) Support namespace in Nacos cluster/configuration (#3578) Instead of datasource-settings.properties, use application.yml for MySQLStorageProvider (#3564) Provide consul dynamic configuration center implementation (#3560) Upgrade guava version to support higher jdk version (#3541) Sync latest als from envoy api (#3507) Set telemetry instanced id for Etcd and Nacos plugin (#3492) Support timeout configuration in agent and backend. (#3491) Make sure the cluster register happens before streaming process. (#3471) Agent supports custom properties. (#3367) Miscellaneous bug fixes (#3567)  UI  Feature: node detail display in topo circle-chart view. BugFix: the jvm-maxheap \u0026amp; jvm-maxnonheap is -1, free is no value Fix bug: time select operation not in effect Fix bug: language initialization failed Fix bug: not show instance language Feature: support the trace list display export png Feature: Metrics comparison view BugFix: Fix dashboard top throughput copy  Java Agent  Spring async scenario optimize (#3723) Support log4j2 AsyncLogger (#3715) Add config to collect PostgreSQL sql query params (#3695) Support namespace in Nacos cluster/configuration (#3578) Provide plugin for ehcache 2.x (#3575) Supporting RequestRateLimiterGatewayFilterFactory (#3538) Kafka-plugin compatible with KafkaTemplate (#3505) Add pulsar apm plugin (#3476) Spring-cloud-gateway traceId does not transmit #3411 (#3446) Gateway compatible with downstream loss (#3445) Provide cassandra java driver 3.x plugin (#3410) Fix SpringMVC4 NoSuchMethodError (#3408) BugFix: endpoint grouping rules may be not unique (#3510) Add feature to control the maximum agent log files (#3475) Agent support custom properties. (#3367) Add Light4j plugin (#3323)  Document  Remove travis badge (#3763) Replace user wall to typical users in readme page (#3719) Update istio docs according latest istio release (#3646) Use chart deploy sw docs (#3573) Reorganize the doc, and provide catalog (#3563) Committer vote and set up document. (#3496) Update als setup doc as istio 1.3 released (#3470) Fill faq reply in official document. (#3450)  All issues and pull requests are here\n6.4.0 Project  Highly recommend to upgrade due to Pxx metrics calculation bug. Make agent working in JDK9+ Module system.  Java Agent  Make agent working in JDK9+ Module system. Support Kafka 2.x client libs. Log error in OKHTTP OnFailure callback. Support injecting traceid into logstack appender in logback. Add OperationName(including endpoint name) length max threshold. Support using Regex to group operation name. Support Undertow routing handler. RestTemplate plugin support operation name grouping. Fix ClassCastException in Webflux plugin. Ordering zookeeper server list, to make it better in topology. Fix a Dubbo plugin incompatible issue. Fix MySQL 5 plugin issue. Make log writer cached. Optimize Spring Cloud Gateway plugin Fix and improve gRPC reconnect mechanism. Remove Disruptor dependency from agent.  Backend  Fix Pxx(p50,p75,p90,p95,p99) metrics func bug.(Critical) Support Gateway in backend analysis, even when it doesn\u0026rsquo;t have suitable language agent. Support using HTTPs SSL accessing ElasticSearch storage. Support Zookeeper ACL. Make alarm records listed in order. Fix Pxx data persistence failure in some cases. Fix some bugs in MySQL storage. Setup slow SQL length threshold. Fix TTL settings is not working as expected. Remove scope-meta file.  UI  Enhance alarm page layout. Support trace tree chart resize. Support trace auto completion when partial traces abandoned somehow. Fix dashboard endpoint slow chart. Add radial chart in topology page. Add trace table mode. Fix topology page bug. Fix calender js bug. Fix \u0026ldquo;The \u0026ldquo;topo-services\u0026rdquo; component did not update the data in time after modifying the time range on the topology page.  Document  Restore the broken Istio setup doc. Add etcd config center document. Correct span_limit_per_segment default value in document. Enhance plugin develop doc. Fix error description in build document.  All issues and pull requests are here\n6.3.0 Project  e2e tests have been added, and verify every pull request. Use ArrayList to replace LinkedList in DataCarrier for much better performance. Add plugin instrumentation definition check in CI. DataCarrier performance improvement by avoiding false-sharing.  Java Agent  Java agent supports JDK 9 - 12, but don\u0026rsquo;t support Java Module yet. Support JVM class auto instrumentation, cataloged as bootstrap plugin. Support JVM HttpClient and HttpsClient plugin.[Optional] Support backend upgrade without rebooting required. Open Redefine and Retransform by other agents. Support Servlet 2.5 in Jetty, Tomcat and SpringMVC plugins. Support Spring @Async plugin. Add new config item to restrict the length of span#peer. Refactor ContextManager#stopSpan. Add gRPC timeout. Support Logback AsyncAppender print tid Fix gRPC reconnect bug. Fix trace segment service doesn\u0026rsquo;t report onComplete. Fix wrong logger class name. Fix gRPC plugin bug. Fix ContextManager.activeSpan() API usage error.  Backend  Support agent reset command downstream when the storage is erased, mostly because of backend upgrade. Backend stream flow refactor. High dimensionality metrics(Hour/Day/Month) are changed to lower priority, to ease the storage payload. Add OAP metrics cache to ease the storage query payload and improve performance. Remove DataCarrier in trace persistent of ElasticSearch storage, by leveraging the elasticsearch bulk queue. OAP internal communication protocol changed. Don\u0026rsquo;t be compatible with old releases. Improve ElasticSearch storage bulk performance. Support etcd as dynamic configuration center. Simplify the PxxMetrics and ThermodynamicMetrics functions for better performance and GC. Support JVM metrics self observability. Add the new OAL runtime engine. Add gRPC timeout. Add Charset in the alarm web hook. Fix buffer lost. Fix dirty read in ElasticSearch storage. Fix bug of cluster management plugins in un-Mixed mode. Fix wrong logger class name. Fix delete bug in ElasticSearch when using namespace. Fix MySQL TTL failure. Totally remove IDs can't be null log, to avoid misleading. Fix provider has been initialized repeatedly. Adjust providers conflict log message. Fix using wrong gc time metrics in OAL.  UI  Fix refresh is not working after endpoint and instance changed. Fix endpoint selector but. Fix wrong copy value in slow traces. Fix can\u0026rsquo;t show trace when it is broken partially(Because of agent sampling or fail safe). Fix database and response time graph bugs.  Document  Add bootstrap plugin development document. Alarm documentation typo fixed. Clarify the Docker file purpose. Fix a license typo.  All issues and pull requests are here\n6.2.0 Project  ElasticSearch implementation performance improved, and CHANGED totally. Must delete all existing indexes to do upgrade. CI and Integration tests provided by ASF INFRA. Plan to enhance tests including e2e, plugin tests in all pull requests, powered by ASF INFRA. DataCarrier queue write index controller performance improvement. 3-5 times quicker than before. Add windows compile support in CI.  Java Agent  Support collect SQL parameter in MySQL plugin.[Optional] Support SolrJ plugin. Support RESTEasy plugin. Support Spring Gateway plugin for 2.1.x[Optional] TracingContext performance improvement. Support Apache ShardingSphere(incubating) plugin. Support span#error in application toolkit. Fix OOM by empty stack of exception. FIx wrong cause exception of stack in span log. Fix unclear the running context in SpringMVC plugin. Fix CPU usage accessor calculation issue. Fix SpringMVC plugin span not stop bug when doing HTTP forward. Fix lettuce plugin async commend bug and NPE. Fix webflux plugin cast exception. [CI]Support import check.  Backend  Support time serious ElasticSearch storage. Provide dynamic configuration module and implementation. Slow SQL threshold supports dynamic config today. Dynamic Configuration module provide multiple implementations, DCS(gRPC based), Zookeeper, Apollo, Nacos. Provide P99/95/90/75/50 charts in topology edge. New topology query protocol and implementation. Support Envoy ALS in Service Mesh scenario. Support Nacos cluster management. Enhance metric exporter. Run in increment and total modes. Fix module provider is loaded repeatedly. Change TOP slow SQL storage in ES to Text from Keyword, as too long text issue. Fix H2TopologyQuery tiny bug. Fix H2 log query bug.(No feature provided yet) Filtering pods not in \u0026lsquo;Running\u0026rsquo; phase in mesh scenario. Fix query alarm bug in MySQL and H2 storage. Codes refactor.  UI  Fix some ID is null query(s). Page refactor, especially time-picker, more friendly. Login removed. Trace timestamp visualization issue fixed. Provide P99/95/90/75/50 charts in topology edge. Change all P99/95/90/75/50 charts style. More readable. Fix 404 in trace page.  Document  Go2Sky project has been donated to SkyAPM, change document link. Add FAQ for ElasticSearch storage, and links from document. Add FAQ fro WebSphere installation. Add several open users. Add alarm webhook document.  All issues and pull requests are here\n6.1.0 Project SkyWalking graduated as Apache Top Level Project.\n Support compiling project agent, backend, UI separately.  Java Agent  Support Vert.x Core 3.x plugin. Support Apache Dubbo plugin. Support use_qualified_name_as_endpoint_name and use_qualified_name_as_operation_name configs in SpringMVC plugin. Support span async close APIs in core. Used in Vert.x plugin. Support MySQL 5,8 plugins. Support set instance id manually(optional). Support customize enhance trace plugin in optional list. Support to set peer in Entry Span. Support Zookeeper plugin. Fix Webflux plugin created unexpected Entry Span. Fix Kafka plugin NPE in Kafka 1.1+ Fix wrong operation name in postgre 8.x plugin. Fix RabbitMQ plugin NPE. Fix agent can\u0026rsquo;t run in JVM 6/7, remove module-info.class. Fix agent can\u0026rsquo;t work well, if there is whitespace in agent path. Fix Spring annotation bug and inheritance enhance issue. Fix CPU accessor bug.  Backend Performance improved, especially in CPU limited environment. 3x improvement in service mesh scenario(no trace) in 8C16G VM. Significantly cost less CPU in low payload.\n Support database metrics and SLOW SQL detection. Support to set max size of metadata query. And change default to 5000 from 100. Support ElasticSearch template for new feature in the future. Support shutdown Zipkin trace analysis, because it doesn\u0026rsquo;t fit production environment. Support log type, scope HTTP_ACCESS_LOG and query. No feature provided, prepare for future versions. Support .NET clr receiver. Support Jaeger trace format, no analysis. Support group endpoint name by regax rules in mesh receiver. Support disable statement in OAL. Support basic auth in ElasticSearch connection. Support metrics exporter module and gRPC implementor. Support \u0026gt;, \u0026lt;, \u0026gt;=, \u0026lt;= in OAL. Support role mode in backend. Support Envoy metrics. Support query segment by service instance. Support to set host/port manually at cluster coordinator, rather than based on core settings. Make sure OAP shutdown when it faces startup error. Support set separated gRPC/Jetty ip:port for receiver, default still use core settings. Fix JVM receiver bug. Fix wrong dest service in mesh analysis. Fix search doesn\u0026rsquo;t work as expected. Refactor ScopeDeclaration annotation. Refactor register lock mechanism. Add SmartSql component for .NET Add integration tests for ElasticSearch client. Add test cases for exporter. Add test cases for queue consume.  UI  RocketBot UI has been accepted and bind in this release. Support CLR metrics.  Document  Documents updated, matching Top Level Project requirement. UI licenses updated, according to RocketBot UI IP clearance. User wall and powered-by list updated. CN documents removed, only consider to provide by volunteer out of Apache.  All issues and pull requests are here\n6.0.0-GA Java Agent  Support gson plugin(optional). Support canal plugin. Fix missing ojdbc component id. Fix dubbo plugin conflict. Fix OpenTracing tag match bug. Fix a missing check in ignore plugin.  Backend  Adjust service inventory entity, to add properties. Adjust service instance inventory entity, to add properties. Add nodeType to service inventory entity. Fix when operation name of local and exit spans in ref, the segment lost. Fix the index names don\u0026rsquo;t show right in logs. Fix wrong alarm text. Add test case for span limit mechanism. Add telemetry module and prometheus implementation, with grafana setting. A refactor for register API in storage module. Fix H2 and MySQL endpoint dependency map miss upstream side. Optimize the inventory register and refactor the implementation. Speed up the trace buffer read. Fix and removed unnecessary inventory register operations.  UI  Add new trace view. Add word-break to tag value.  Document  Add two startup modes document. Add PHP agent links. Add some cn documents. Update year to 2019 User wall updated. Fix a wrong description in how-to-build doc.  All issues and pull requests are here\n6.0.0-beta Protocol  Provide Trace Data Protocol v2 Provide SkyWalking Cross Process Propagation Headers Protocol v2.  Java Agent  Support Trace Data Protocol v2 Support SkyWalking Cross Process Propagation Headers Protocol v2. Support SkyWalking Cross Process Propagation Headers Protocol v1 running in compatible way. Need declare open explicitly. Support SpringMVC 5 Support webflux Support a new way to override agent.config by system env. Span tag can override by explicit way. Fix Spring Controller Inherit issue. Fix ElasticSearch plugin NPE. Fix agent classloader dead lock in certain situation. Fix agent log typo. Fix wrong component id in resettemplete plugin. Fix use transform ignore() in wrong way. Fix H2 query bug.  Backend  Support Trace Data Protocol v2. And Trace Data Protocol v1 is still supported. Support MySQL as storage. Support TiDB as storage. Support a new way to override application.yml by system env. Support service instance and endpoint alarm. Support namespace in istio receiver. Support service throughput(cpm), successful rate(sla), avg response time and p99/p95/p90/p75/p50 response time. Support backend trace sampling. Support Zipkin format again. Support init mode. Support namespace in Zookeeper cluster management. Support consul plugin in cluster module. OAL generate tool has been integrated into main repo, in the maven compile stage. Optimize trace paging query. Fix trace query don\u0026rsquo;t use fuzzy query in ElasticSearch storage. Fix alarm can\u0026rsquo;t be active in right way. Fix unnecessary condition in database and cache number query. Fix wrong namespace bug in ElasticSearch storage. Fix Remote clients selector error: / by zero . Fix segment TTL is not working.  UI  Support service throughput(cpm), successful rate(sla), avg response time and p99/p95/p90/p75/p50 response time. Fix TopN endpoint link doesn\u0026rsquo;t work right. Fix trace stack style. Fix CI.  Document  Add more agent setting documents. Add more contribution documents. Update user wall and powered-by page. Add RocketBot UI project link in document.  All issues and pull requests are here\n6.0.0-alpha SkyWalking 6 is totally new milestone for the project. At this point, we are not just a distributing tracing system with analysis and visualization capabilities. We are an Observability Analysis Platform(OAL).\nThe core and most important features in v6 are\n Support to collect telemetry data from different sources, such as multiple language agents and service mesh. Extensible stream analysis core. Make SQL and cache analysis available in core level, although haven\u0026rsquo;t provided in this release. Provide Observability Analysis Language(OAL) to make analysis metrics customization available. New GraphQL query protocol. Not binding with UI now. UI topology is better now. New alarm core provided. In alpha, only on service related metrics.  All issues and pull requests are here\n","title":"6.6.0","url":"/docs/main/v9.4.0/en/changes/changes-6.x/"},{"content":"6.6.0 Project  [IMPORTANT] Local span and exit span are not treated as endpoint detected at client and local. Only entry span is the endpoint. Reduce the load of register and memory cost.   Support MiniKube, Istio and SkyWalking on K8s deployment in CI. Support Windows and MacOS build in GitHub Action CI. Support ElasticSearch 7 in official dist. Hundreds plugin cases have been added in GitHub Action CI process.  Java Agent  Remove the local/exit span operation name register mechanism. Add plugin for JDK Threading classes. Add plugin for Armeria. Support set operation name in async span. Enhance webflux plugin, related to Spring Gateway plugin. Webflux plugin is in optional, due to JDK8 required. Fix a possible deadlock. Fix NPE when OAL scripts are different in different OAP nodes, mostly in upgrading stage. Fix bug about wrong peer in ES plugin. Fix NPE in Spring plugin. Fix wrong class name in Dubbo 2.7 conflict patch. Fix spring annotation inheritance problem.  OAP-Backend  Remove the local/exit span operation name register mechanism. Remove client side endpoint register in service mesh. Service instance dependency and related metrics. Support min func in OAL Support apdex func in OAL Support custom ES config setting at the index level. Envoy ALS proto upgraded. Update JODA lib as bugs in UTC +13/+14. Support topN sample period configurable. Ignore no statement DB operations in slow SQL collection. Fix bug in docker-entrypoint.sh when using MySQL as storage  UI  Service topology enhancement. Dive into service, instance and endpoint metrics on topo map. Service instance dependency view and related metrics. Support using URL parameter in trace query page. Support apdex score in service page. Add service dependency metrics into metrics comparison. Fix alarm search not working.  Document  Update user list and user wall. Add document link for CLI. Add deployment guide of agent in Jetty case. Modify Consul cluster doc. Add document about injecting traceId into the logback with logstack in JSON format. ElementUI license and dependency added.  All issues and pull requests are here\n6.5.0 Project  TTL E2E test (#3437) Test coverage is back in pull request check status (#3503) Plugin tests begin to be migrated into main repo, and is in process. (#3528, #3756, #3751, etc.) Switch to SkyWalking CI (exclusive) nodes (#3546) MySQL storage e2e test. (#3648) E2E tests are verified in multiple jdk versions, jdk 8, 9, 11, 12 (#3657) Jenkins build jobs run only when necessary (#3662)  OAP-Backend  Support dynamically configure alarm settings (#3557) Language of instance could be null (#3485) Make query max window size configurable. (#3765) Remove two max size 500 limit. (#3748) Parameterize the cache size. (#3741) ServiceInstanceRelation set error id (#3683) Makes the scope of alarm message more semantic. (#3680) Add register persistent worker latency metrics (#3677) Fix more reasonable error (#3619) Add GraphQL getServiceInstance instanceUuid field. (#3595) Support namespace in Nacos cluster/configuration (#3578) Instead of datasource-settings.properties, use application.yml for MySQLStorageProvider (#3564) Provide consul dynamic configuration center implementation (#3560) Upgrade guava version to support higher jdk version (#3541) Sync latest als from envoy api (#3507) Set telemetry instanced id for Etcd and Nacos plugin (#3492) Support timeout configuration in agent and backend. (#3491) Make sure the cluster register happens before streaming process. (#3471) Agent supports custom properties. (#3367) Miscellaneous bug fixes (#3567)  UI  Feature: node detail display in topo circle-chart view. BugFix: the jvm-maxheap \u0026amp; jvm-maxnonheap is -1, free is no value Fix bug: time select operation not in effect Fix bug: language initialization failed Fix bug: not show instance language Feature: support the trace list display export png Feature: Metrics comparison view BugFix: Fix dashboard top throughput copy  Java Agent  Spring async scenario optimize (#3723) Support log4j2 AsyncLogger (#3715) Add config to collect PostgreSQL sql query params (#3695) Support namespace in Nacos cluster/configuration (#3578) Provide plugin for ehcache 2.x (#3575) Supporting RequestRateLimiterGatewayFilterFactory (#3538) Kafka-plugin compatible with KafkaTemplate (#3505) Add pulsar apm plugin (#3476) Spring-cloud-gateway traceId does not transmit #3411 (#3446) Gateway compatible with downstream loss (#3445) Provide cassandra java driver 3.x plugin (#3410) Fix SpringMVC4 NoSuchMethodError (#3408) BugFix: endpoint grouping rules may be not unique (#3510) Add feature to control the maximum agent log files (#3475) Agent support custom properties. (#3367) Add Light4j plugin (#3323)  Document  Remove travis badge (#3763) Replace user wall to typical users in readme page (#3719) Update istio docs according latest istio release (#3646) Use chart deploy sw docs (#3573) Reorganize the doc, and provide catalog (#3563) Committer vote and set up document. (#3496) Update als setup doc as istio 1.3 released (#3470) Fill faq reply in official document. (#3450)  All issues and pull requests are here\n6.4.0 Project  Highly recommend to upgrade due to Pxx metrics calculation bug. Make agent working in JDK9+ Module system.  Java Agent  Make agent working in JDK9+ Module system. Support Kafka 2.x client libs. Log error in OKHTTP OnFailure callback. Support injecting traceid into logstack appender in logback. Add OperationName(including endpoint name) length max threshold. Support using Regex to group operation name. Support Undertow routing handler. RestTemplate plugin support operation name grouping. Fix ClassCastException in Webflux plugin. Ordering zookeeper server list, to make it better in topology. Fix a Dubbo plugin incompatible issue. Fix MySQL 5 plugin issue. Make log writer cached. Optimize Spring Cloud Gateway plugin Fix and improve gRPC reconnect mechanism. Remove Disruptor dependency from agent.  Backend  Fix Pxx(p50,p75,p90,p95,p99) metrics func bug.(Critical) Support Gateway in backend analysis, even when it doesn\u0026rsquo;t have suitable language agent. Support using HTTPs SSL accessing ElasticSearch storage. Support Zookeeper ACL. Make alarm records listed in order. Fix Pxx data persistence failure in some cases. Fix some bugs in MySQL storage. Setup slow SQL length threshold. Fix TTL settings is not working as expected. Remove scope-meta file.  UI  Enhance alarm page layout. Support trace tree chart resize. Support trace auto completion when partial traces abandoned somehow. Fix dashboard endpoint slow chart. Add radial chart in topology page. Add trace table mode. Fix topology page bug. Fix calender js bug. Fix \u0026ldquo;The \u0026ldquo;topo-services\u0026rdquo; component did not update the data in time after modifying the time range on the topology page.  Document  Restore the broken Istio setup doc. Add etcd config center document. Correct span_limit_per_segment default value in document. Enhance plugin develop doc. Fix error description in build document.  All issues and pull requests are here\n6.3.0 Project  e2e tests have been added, and verify every pull request. Use ArrayList to replace LinkedList in DataCarrier for much better performance. Add plugin instrumentation definition check in CI. DataCarrier performance improvement by avoiding false-sharing.  Java Agent  Java agent supports JDK 9 - 12, but don\u0026rsquo;t support Java Module yet. Support JVM class auto instrumentation, cataloged as bootstrap plugin. Support JVM HttpClient and HttpsClient plugin.[Optional] Support backend upgrade without rebooting required. Open Redefine and Retransform by other agents. Support Servlet 2.5 in Jetty, Tomcat and SpringMVC plugins. Support Spring @Async plugin. Add new config item to restrict the length of span#peer. Refactor ContextManager#stopSpan. Add gRPC timeout. Support Logback AsyncAppender print tid Fix gRPC reconnect bug. Fix trace segment service doesn\u0026rsquo;t report onComplete. Fix wrong logger class name. Fix gRPC plugin bug. Fix ContextManager.activeSpan() API usage error.  Backend  Support agent reset command downstream when the storage is erased, mostly because of backend upgrade. Backend stream flow refactor. High dimensionality metrics(Hour/Day/Month) are changed to lower priority, to ease the storage payload. Add OAP metrics cache to ease the storage query payload and improve performance. Remove DataCarrier in trace persistent of ElasticSearch storage, by leveraging the elasticsearch bulk queue. OAP internal communication protocol changed. Don\u0026rsquo;t be compatible with old releases. Improve ElasticSearch storage bulk performance. Support etcd as dynamic configuration center. Simplify the PxxMetrics and ThermodynamicMetrics functions for better performance and GC. Support JVM metrics self observability. Add the new OAL runtime engine. Add gRPC timeout. Add Charset in the alarm web hook. Fix buffer lost. Fix dirty read in ElasticSearch storage. Fix bug of cluster management plugins in un-Mixed mode. Fix wrong logger class name. Fix delete bug in ElasticSearch when using namespace. Fix MySQL TTL failure. Totally remove IDs can't be null log, to avoid misleading. Fix provider has been initialized repeatedly. Adjust providers conflict log message. Fix using wrong gc time metrics in OAL.  UI  Fix refresh is not working after endpoint and instance changed. Fix endpoint selector but. Fix wrong copy value in slow traces. Fix can\u0026rsquo;t show trace when it is broken partially(Because of agent sampling or fail safe). Fix database and response time graph bugs.  Document  Add bootstrap plugin development document. Alarm documentation typo fixed. Clarify the Docker file purpose. Fix a license typo.  All issues and pull requests are here\n6.2.0 Project  ElasticSearch implementation performance improved, and CHANGED totally. Must delete all existing indexes to do upgrade. CI and Integration tests provided by ASF INFRA. Plan to enhance tests including e2e, plugin tests in all pull requests, powered by ASF INFRA. DataCarrier queue write index controller performance improvement. 3-5 times quicker than before. Add windows compile support in CI.  Java Agent  Support collect SQL parameter in MySQL plugin.[Optional] Support SolrJ plugin. Support RESTEasy plugin. Support Spring Gateway plugin for 2.1.x[Optional] TracingContext performance improvement. Support Apache ShardingSphere(incubating) plugin. Support span#error in application toolkit. Fix OOM by empty stack of exception. FIx wrong cause exception of stack in span log. Fix unclear the running context in SpringMVC plugin. Fix CPU usage accessor calculation issue. Fix SpringMVC plugin span not stop bug when doing HTTP forward. Fix lettuce plugin async commend bug and NPE. Fix webflux plugin cast exception. [CI]Support import check.  Backend  Support time serious ElasticSearch storage. Provide dynamic configuration module and implementation. Slow SQL threshold supports dynamic config today. Dynamic Configuration module provide multiple implementations, DCS(gRPC based), Zookeeper, Apollo, Nacos. Provide P99/95/90/75/50 charts in topology edge. New topology query protocol and implementation. Support Envoy ALS in Service Mesh scenario. Support Nacos cluster management. Enhance metric exporter. Run in increment and total modes. Fix module provider is loaded repeatedly. Change TOP slow SQL storage in ES to Text from Keyword, as too long text issue. Fix H2TopologyQuery tiny bug. Fix H2 log query bug.(No feature provided yet) Filtering pods not in \u0026lsquo;Running\u0026rsquo; phase in mesh scenario. Fix query alarm bug in MySQL and H2 storage. Codes refactor.  UI  Fix some ID is null query(s). Page refactor, especially time-picker, more friendly. Login removed. Trace timestamp visualization issue fixed. Provide P99/95/90/75/50 charts in topology edge. Change all P99/95/90/75/50 charts style. More readable. Fix 404 in trace page.  Document  Go2Sky project has been donated to SkyAPM, change document link. Add FAQ for ElasticSearch storage, and links from document. Add FAQ fro WebSphere installation. Add several open users. Add alarm webhook document.  All issues and pull requests are here\n6.1.0 Project SkyWalking graduated as Apache Top Level Project.\n Support compiling project agent, backend, UI separately.  Java Agent  Support Vert.x Core 3.x plugin. Support Apache Dubbo plugin. Support use_qualified_name_as_endpoint_name and use_qualified_name_as_operation_name configs in SpringMVC plugin. Support span async close APIs in core. Used in Vert.x plugin. Support MySQL 5,8 plugins. Support set instance id manually(optional). Support customize enhance trace plugin in optional list. Support to set peer in Entry Span. Support Zookeeper plugin. Fix Webflux plugin created unexpected Entry Span. Fix Kafka plugin NPE in Kafka 1.1+ Fix wrong operation name in postgre 8.x plugin. Fix RabbitMQ plugin NPE. Fix agent can\u0026rsquo;t run in JVM 6/7, remove module-info.class. Fix agent can\u0026rsquo;t work well, if there is whitespace in agent path. Fix Spring annotation bug and inheritance enhance issue. Fix CPU accessor bug.  Backend Performance improved, especially in CPU limited environment. 3x improvement in service mesh scenario(no trace) in 8C16G VM. Significantly cost less CPU in low payload.\n Support database metrics and SLOW SQL detection. Support to set max size of metadata query. And change default to 5000 from 100. Support ElasticSearch template for new feature in the future. Support shutdown Zipkin trace analysis, because it doesn\u0026rsquo;t fit production environment. Support log type, scope HTTP_ACCESS_LOG and query. No feature provided, prepare for future versions. Support .NET clr receiver. Support Jaeger trace format, no analysis. Support group endpoint name by regax rules in mesh receiver. Support disable statement in OAL. Support basic auth in ElasticSearch connection. Support metrics exporter module and gRPC implementor. Support \u0026gt;, \u0026lt;, \u0026gt;=, \u0026lt;= in OAL. Support role mode in backend. Support Envoy metrics. Support query segment by service instance. Support to set host/port manually at cluster coordinator, rather than based on core settings. Make sure OAP shutdown when it faces startup error. Support set separated gRPC/Jetty ip:port for receiver, default still use core settings. Fix JVM receiver bug. Fix wrong dest service in mesh analysis. Fix search doesn\u0026rsquo;t work as expected. Refactor ScopeDeclaration annotation. Refactor register lock mechanism. Add SmartSql component for .NET Add integration tests for ElasticSearch client. Add test cases for exporter. Add test cases for queue consume.  UI  RocketBot UI has been accepted and bind in this release. Support CLR metrics.  Document  Documents updated, matching Top Level Project requirement. UI licenses updated, according to RocketBot UI IP clearance. User wall and powered-by list updated. CN documents removed, only consider to provide by volunteer out of Apache.  All issues and pull requests are here\n6.0.0-GA Java Agent  Support gson plugin(optional). Support canal plugin. Fix missing ojdbc component id. Fix dubbo plugin conflict. Fix OpenTracing tag match bug. Fix a missing check in ignore plugin.  Backend  Adjust service inventory entity, to add properties. Adjust service instance inventory entity, to add properties. Add nodeType to service inventory entity. Fix when operation name of local and exit spans in ref, the segment lost. Fix the index names don\u0026rsquo;t show right in logs. Fix wrong alarm text. Add test case for span limit mechanism. Add telemetry module and prometheus implementation, with grafana setting. A refactor for register API in storage module. Fix H2 and MySQL endpoint dependency map miss upstream side. Optimize the inventory register and refactor the implementation. Speed up the trace buffer read. Fix and removed unnecessary inventory register operations.  UI  Add new trace view. Add word-break to tag value.  Document  Add two startup modes document. Add PHP agent links. Add some cn documents. Update year to 2019 User wall updated. Fix a wrong description in how-to-build doc.  All issues and pull requests are here\n6.0.0-beta Protocol  Provide Trace Data Protocol v2 Provide SkyWalking Cross Process Propagation Headers Protocol v2.  Java Agent  Support Trace Data Protocol v2 Support SkyWalking Cross Process Propagation Headers Protocol v2. Support SkyWalking Cross Process Propagation Headers Protocol v1 running in compatible way. Need declare open explicitly. Support SpringMVC 5 Support webflux Support a new way to override agent.config by system env. Span tag can override by explicit way. Fix Spring Controller Inherit issue. Fix ElasticSearch plugin NPE. Fix agent classloader dead lock in certain situation. Fix agent log typo. Fix wrong component id in resettemplete plugin. Fix use transform ignore() in wrong way. Fix H2 query bug.  Backend  Support Trace Data Protocol v2. And Trace Data Protocol v1 is still supported. Support MySQL as storage. Support TiDB as storage. Support a new way to override application.yml by system env. Support service instance and endpoint alarm. Support namespace in istio receiver. Support service throughput(cpm), successful rate(sla), avg response time and p99/p95/p90/p75/p50 response time. Support backend trace sampling. Support Zipkin format again. Support init mode. Support namespace in Zookeeper cluster management. Support consul plugin in cluster module. OAL generate tool has been integrated into main repo, in the maven compile stage. Optimize trace paging query. Fix trace query don\u0026rsquo;t use fuzzy query in ElasticSearch storage. Fix alarm can\u0026rsquo;t be active in right way. Fix unnecessary condition in database and cache number query. Fix wrong namespace bug in ElasticSearch storage. Fix Remote clients selector error: / by zero . Fix segment TTL is not working.  UI  Support service throughput(cpm), successful rate(sla), avg response time and p99/p95/p90/p75/p50 response time. Fix TopN endpoint link doesn\u0026rsquo;t work right. Fix trace stack style. Fix CI.  Document  Add more agent setting documents. Add more contribution documents. Update user wall and powered-by page. Add RocketBot UI project link in document.  All issues and pull requests are here\n6.0.0-alpha SkyWalking 6 is totally new milestone for the project. At this point, we are not just a distributing tracing system with analysis and visualization capabilities. We are an Observability Analysis Platform(OAL).\nThe core and most important features in v6 are\n Support to collect telemetry data from different sources, such as multiple language agents and service mesh. Extensible stream analysis core. Make SQL and cache analysis available in core level, although haven\u0026rsquo;t provided in this release. Provide Observability Analysis Language(OAL) to make analysis metrics customization available. New GraphQL query protocol. Not binding with UI now. UI topology is better now. New alarm core provided. In alpha, only on service related metrics.  All issues and pull requests are here\n","title":"6.6.0","url":"/docs/main/v9.5.0/en/changes/changes-6.x/"},{"content":"6.6.0 Project  [IMPORTANT] Local span and exit span are not treated as endpoint detected at client and local. Only entry span is the endpoint. Reduce the load of register and memory cost.   Support MiniKube, Istio and SkyWalking on K8s deployment in CI. Support Windows and MacOS build in GitHub Action CI. Support ElasticSearch 7 in official dist. Hundreds plugin cases have been added in GitHub Action CI process.  Java Agent  Remove the local/exit span operation name register mechanism. Add plugin for JDK Threading classes. Add plugin for Armeria. Support set operation name in async span. Enhance webflux plugin, related to Spring Gateway plugin. Webflux plugin is in optional, due to JDK8 required. Fix a possible deadlock. Fix NPE when OAL scripts are different in different OAP nodes, mostly in upgrading stage. Fix bug about wrong peer in ES plugin. Fix NPE in Spring plugin. Fix wrong class name in Dubbo 2.7 conflict patch. Fix spring annotation inheritance problem.  OAP-Backend  Remove the local/exit span operation name register mechanism. Remove client side endpoint register in service mesh. Service instance dependency and related metrics. Support min func in OAL Support apdex func in OAL Support custom ES config setting at the index level. Envoy ALS proto upgraded. Update JODA lib as bugs in UTC +13/+14. Support topN sample period configurable. Ignore no statement DB operations in slow SQL collection. Fix bug in docker-entrypoint.sh when using MySQL as storage  UI  Service topology enhancement. Dive into service, instance and endpoint metrics on topo map. Service instance dependency view and related metrics. Support using URL parameter in trace query page. Support apdex score in service page. Add service dependency metrics into metrics comparison. Fix alarm search not working.  Document  Update user list and user wall. Add document link for CLI. Add deployment guide of agent in Jetty case. Modify Consul cluster doc. Add document about injecting traceId into the logback with logstack in JSON format. ElementUI license and dependency added.  All issues and pull requests are here\n6.5.0 Project  TTL E2E test (#3437) Test coverage is back in pull request check status (#3503) Plugin tests begin to be migrated into main repo, and is in process. (#3528, #3756, #3751, etc.) Switch to SkyWalking CI (exclusive) nodes (#3546) MySQL storage e2e test. (#3648) E2E tests are verified in multiple jdk versions, jdk 8, 9, 11, 12 (#3657) Jenkins build jobs run only when necessary (#3662)  OAP-Backend  Support dynamically configure alarm settings (#3557) Language of instance could be null (#3485) Make query max window size configurable. (#3765) Remove two max size 500 limit. (#3748) Parameterize the cache size. (#3741) ServiceInstanceRelation set error id (#3683) Makes the scope of alarm message more semantic. (#3680) Add register persistent worker latency metrics (#3677) Fix more reasonable error (#3619) Add GraphQL getServiceInstance instanceUuid field. (#3595) Support namespace in Nacos cluster/configuration (#3578) Instead of datasource-settings.properties, use application.yml for MySQLStorageProvider (#3564) Provide consul dynamic configuration center implementation (#3560) Upgrade guava version to support higher jdk version (#3541) Sync latest als from envoy api (#3507) Set telemetry instanced id for Etcd and Nacos plugin (#3492) Support timeout configuration in agent and backend. (#3491) Make sure the cluster register happens before streaming process. (#3471) Agent supports custom properties. (#3367) Miscellaneous bug fixes (#3567)  UI  Feature: node detail display in topo circle-chart view. BugFix: the jvm-maxheap \u0026amp; jvm-maxnonheap is -1, free is no value Fix bug: time select operation not in effect Fix bug: language initialization failed Fix bug: not show instance language Feature: support the trace list display export png Feature: Metrics comparison view BugFix: Fix dashboard top throughput copy  Java Agent  Spring async scenario optimize (#3723) Support log4j2 AsyncLogger (#3715) Add config to collect PostgreSQL sql query params (#3695) Support namespace in Nacos cluster/configuration (#3578) Provide plugin for ehcache 2.x (#3575) Supporting RequestRateLimiterGatewayFilterFactory (#3538) Kafka-plugin compatible with KafkaTemplate (#3505) Add pulsar apm plugin (#3476) Spring-cloud-gateway traceId does not transmit #3411 (#3446) Gateway compatible with downstream loss (#3445) Provide cassandra java driver 3.x plugin (#3410) Fix SpringMVC4 NoSuchMethodError (#3408) BugFix: endpoint grouping rules may be not unique (#3510) Add feature to control the maximum agent log files (#3475) Agent support custom properties. (#3367) Add Light4j plugin (#3323)  Document  Remove travis badge (#3763) Replace user wall to typical users in readme page (#3719) Update istio docs according latest istio release (#3646) Use chart deploy sw docs (#3573) Reorganize the doc, and provide catalog (#3563) Committer vote and set up document. (#3496) Update als setup doc as istio 1.3 released (#3470) Fill faq reply in official document. (#3450)  All issues and pull requests are here\n6.4.0 Project  Highly recommend to upgrade due to Pxx metrics calculation bug. Make agent working in JDK9+ Module system.  Java Agent  Make agent working in JDK9+ Module system. Support Kafka 2.x client libs. Log error in OKHTTP OnFailure callback. Support injecting traceid into logstack appender in logback. Add OperationName(including endpoint name) length max threshold. Support using Regex to group operation name. Support Undertow routing handler. RestTemplate plugin support operation name grouping. Fix ClassCastException in Webflux plugin. Ordering zookeeper server list, to make it better in topology. Fix a Dubbo plugin incompatible issue. Fix MySQL 5 plugin issue. Make log writer cached. Optimize Spring Cloud Gateway plugin Fix and improve gRPC reconnect mechanism. Remove Disruptor dependency from agent.  Backend  Fix Pxx(p50,p75,p90,p95,p99) metrics func bug.(Critical) Support Gateway in backend analysis, even when it doesn\u0026rsquo;t have suitable language agent. Support using HTTPs SSL accessing ElasticSearch storage. Support Zookeeper ACL. Make alarm records listed in order. Fix Pxx data persistence failure in some cases. Fix some bugs in MySQL storage. Setup slow SQL length threshold. Fix TTL settings is not working as expected. Remove scope-meta file.  UI  Enhance alarm page layout. Support trace tree chart resize. Support trace auto completion when partial traces abandoned somehow. Fix dashboard endpoint slow chart. Add radial chart in topology page. Add trace table mode. Fix topology page bug. Fix calender js bug. Fix \u0026ldquo;The \u0026ldquo;topo-services\u0026rdquo; component did not update the data in time after modifying the time range on the topology page.  Document  Restore the broken Istio setup doc. Add etcd config center document. Correct span_limit_per_segment default value in document. Enhance plugin develop doc. Fix error description in build document.  All issues and pull requests are here\n6.3.0 Project  e2e tests have been added, and verify every pull request. Use ArrayList to replace LinkedList in DataCarrier for much better performance. Add plugin instrumentation definition check in CI. DataCarrier performance improvement by avoiding false-sharing.  Java Agent  Java agent supports JDK 9 - 12, but don\u0026rsquo;t support Java Module yet. Support JVM class auto instrumentation, cataloged as bootstrap plugin. Support JVM HttpClient and HttpsClient plugin.[Optional] Support backend upgrade without rebooting required. Open Redefine and Retransform by other agents. Support Servlet 2.5 in Jetty, Tomcat and SpringMVC plugins. Support Spring @Async plugin. Add new config item to restrict the length of span#peer. Refactor ContextManager#stopSpan. Add gRPC timeout. Support Logback AsyncAppender print tid Fix gRPC reconnect bug. Fix trace segment service doesn\u0026rsquo;t report onComplete. Fix wrong logger class name. Fix gRPC plugin bug. Fix ContextManager.activeSpan() API usage error.  Backend  Support agent reset command downstream when the storage is erased, mostly because of backend upgrade. Backend stream flow refactor. High dimensionality metrics(Hour/Day/Month) are changed to lower priority, to ease the storage payload. Add OAP metrics cache to ease the storage query payload and improve performance. Remove DataCarrier in trace persistent of ElasticSearch storage, by leveraging the elasticsearch bulk queue. OAP internal communication protocol changed. Don\u0026rsquo;t be compatible with old releases. Improve ElasticSearch storage bulk performance. Support etcd as dynamic configuration center. Simplify the PxxMetrics and ThermodynamicMetrics functions for better performance and GC. Support JVM metrics self observability. Add the new OAL runtime engine. Add gRPC timeout. Add Charset in the alarm web hook. Fix buffer lost. Fix dirty read in ElasticSearch storage. Fix bug of cluster management plugins in un-Mixed mode. Fix wrong logger class name. Fix delete bug in ElasticSearch when using namespace. Fix MySQL TTL failure. Totally remove IDs can't be null log, to avoid misleading. Fix provider has been initialized repeatedly. Adjust providers conflict log message. Fix using wrong gc time metrics in OAL.  UI  Fix refresh is not working after endpoint and instance changed. Fix endpoint selector but. Fix wrong copy value in slow traces. Fix can\u0026rsquo;t show trace when it is broken partially(Because of agent sampling or fail safe). Fix database and response time graph bugs.  Document  Add bootstrap plugin development document. Alarm documentation typo fixed. Clarify the Docker file purpose. Fix a license typo.  All issues and pull requests are here\n6.2.0 Project  ElasticSearch implementation performance improved, and CHANGED totally. Must delete all existing indexes to do upgrade. CI and Integration tests provided by ASF INFRA. Plan to enhance tests including e2e, plugin tests in all pull requests, powered by ASF INFRA. DataCarrier queue write index controller performance improvement. 3-5 times quicker than before. Add windows compile support in CI.  Java Agent  Support collect SQL parameter in MySQL plugin.[Optional] Support SolrJ plugin. Support RESTEasy plugin. Support Spring Gateway plugin for 2.1.x[Optional] TracingContext performance improvement. Support Apache ShardingSphere(incubating) plugin. Support span#error in application toolkit. Fix OOM by empty stack of exception. FIx wrong cause exception of stack in span log. Fix unclear the running context in SpringMVC plugin. Fix CPU usage accessor calculation issue. Fix SpringMVC plugin span not stop bug when doing HTTP forward. Fix lettuce plugin async commend bug and NPE. Fix webflux plugin cast exception. [CI]Support import check.  Backend  Support time serious ElasticSearch storage. Provide dynamic configuration module and implementation. Slow SQL threshold supports dynamic config today. Dynamic Configuration module provide multiple implementations, DCS(gRPC based), Zookeeper, Apollo, Nacos. Provide P99/95/90/75/50 charts in topology edge. New topology query protocol and implementation. Support Envoy ALS in Service Mesh scenario. Support Nacos cluster management. Enhance metric exporter. Run in increment and total modes. Fix module provider is loaded repeatedly. Change TOP slow SQL storage in ES to Text from Keyword, as too long text issue. Fix H2TopologyQuery tiny bug. Fix H2 log query bug.(No feature provided yet) Filtering pods not in \u0026lsquo;Running\u0026rsquo; phase in mesh scenario. Fix query alarm bug in MySQL and H2 storage. Codes refactor.  UI  Fix some ID is null query(s). Page refactor, especially time-picker, more friendly. Login removed. Trace timestamp visualization issue fixed. Provide P99/95/90/75/50 charts in topology edge. Change all P99/95/90/75/50 charts style. More readable. Fix 404 in trace page.  Document  Go2Sky project has been donated to SkyAPM, change document link. Add FAQ for ElasticSearch storage, and links from document. Add FAQ fro WebSphere installation. Add several open users. Add alarm webhook document.  All issues and pull requests are here\n6.1.0 Project SkyWalking graduated as Apache Top Level Project.\n Support compiling project agent, backend, UI separately.  Java Agent  Support Vert.x Core 3.x plugin. Support Apache Dubbo plugin. Support use_qualified_name_as_endpoint_name and use_qualified_name_as_operation_name configs in SpringMVC plugin. Support span async close APIs in core. Used in Vert.x plugin. Support MySQL 5,8 plugins. Support set instance id manually(optional). Support customize enhance trace plugin in optional list. Support to set peer in Entry Span. Support Zookeeper plugin. Fix Webflux plugin created unexpected Entry Span. Fix Kafka plugin NPE in Kafka 1.1+ Fix wrong operation name in postgre 8.x plugin. Fix RabbitMQ plugin NPE. Fix agent can\u0026rsquo;t run in JVM 6/7, remove module-info.class. Fix agent can\u0026rsquo;t work well, if there is whitespace in agent path. Fix Spring annotation bug and inheritance enhance issue. Fix CPU accessor bug.  Backend Performance improved, especially in CPU limited environment. 3x improvement in service mesh scenario(no trace) in 8C16G VM. Significantly cost less CPU in low payload.\n Support database metrics and SLOW SQL detection. Support to set max size of metadata query. And change default to 5000 from 100. Support ElasticSearch template for new feature in the future. Support shutdown Zipkin trace analysis, because it doesn\u0026rsquo;t fit production environment. Support log type, scope HTTP_ACCESS_LOG and query. No feature provided, prepare for future versions. Support .NET clr receiver. Support Jaeger trace format, no analysis. Support group endpoint name by regax rules in mesh receiver. Support disable statement in OAL. Support basic auth in ElasticSearch connection. Support metrics exporter module and gRPC implementor. Support \u0026gt;, \u0026lt;, \u0026gt;=, \u0026lt;= in OAL. Support role mode in backend. Support Envoy metrics. Support query segment by service instance. Support to set host/port manually at cluster coordinator, rather than based on core settings. Make sure OAP shutdown when it faces startup error. Support set separated gRPC/Jetty ip:port for receiver, default still use core settings. Fix JVM receiver bug. Fix wrong dest service in mesh analysis. Fix search doesn\u0026rsquo;t work as expected. Refactor ScopeDeclaration annotation. Refactor register lock mechanism. Add SmartSql component for .NET Add integration tests for ElasticSearch client. Add test cases for exporter. Add test cases for queue consume.  UI  RocketBot UI has been accepted and bind in this release. Support CLR metrics.  Document  Documents updated, matching Top Level Project requirement. UI licenses updated, according to RocketBot UI IP clearance. User wall and powered-by list updated. CN documents removed, only consider to provide by volunteer out of Apache.  All issues and pull requests are here\n6.0.0-GA Java Agent  Support gson plugin(optional). Support canal plugin. Fix missing ojdbc component id. Fix dubbo plugin conflict. Fix OpenTracing tag match bug. Fix a missing check in ignore plugin.  Backend  Adjust service inventory entity, to add properties. Adjust service instance inventory entity, to add properties. Add nodeType to service inventory entity. Fix when operation name of local and exit spans in ref, the segment lost. Fix the index names don\u0026rsquo;t show right in logs. Fix wrong alarm text. Add test case for span limit mechanism. Add telemetry module and prometheus implementation, with grafana setting. A refactor for register API in storage module. Fix H2 and MySQL endpoint dependency map miss upstream side. Optimize the inventory register and refactor the implementation. Speed up the trace buffer read. Fix and removed unnecessary inventory register operations.  UI  Add new trace view. Add word-break to tag value.  Document  Add two startup modes document. Add PHP agent links. Add some cn documents. Update year to 2019 User wall updated. Fix a wrong description in how-to-build doc.  All issues and pull requests are here\n6.0.0-beta Protocol  Provide Trace Data Protocol v2 Provide SkyWalking Cross Process Propagation Headers Protocol v2.  Java Agent  Support Trace Data Protocol v2 Support SkyWalking Cross Process Propagation Headers Protocol v2. Support SkyWalking Cross Process Propagation Headers Protocol v1 running in compatible way. Need declare open explicitly. Support SpringMVC 5 Support webflux Support a new way to override agent.config by system env. Span tag can override by explicit way. Fix Spring Controller Inherit issue. Fix ElasticSearch plugin NPE. Fix agent classloader dead lock in certain situation. Fix agent log typo. Fix wrong component id in resettemplete plugin. Fix use transform ignore() in wrong way. Fix H2 query bug.  Backend  Support Trace Data Protocol v2. And Trace Data Protocol v1 is still supported. Support MySQL as storage. Support TiDB as storage. Support a new way to override application.yml by system env. Support service instance and endpoint alarm. Support namespace in istio receiver. Support service throughput(cpm), successful rate(sla), avg response time and p99/p95/p90/p75/p50 response time. Support backend trace sampling. Support Zipkin format again. Support init mode. Support namespace in Zookeeper cluster management. Support consul plugin in cluster module. OAL generate tool has been integrated into main repo, in the maven compile stage. Optimize trace paging query. Fix trace query don\u0026rsquo;t use fuzzy query in ElasticSearch storage. Fix alarm can\u0026rsquo;t be active in right way. Fix unnecessary condition in database and cache number query. Fix wrong namespace bug in ElasticSearch storage. Fix Remote clients selector error: / by zero . Fix segment TTL is not working.  UI  Support service throughput(cpm), successful rate(sla), avg response time and p99/p95/p90/p75/p50 response time. Fix TopN endpoint link doesn\u0026rsquo;t work right. Fix trace stack style. Fix CI.  Document  Add more agent setting documents. Add more contribution documents. Update user wall and powered-by page. Add RocketBot UI project link in document.  All issues and pull requests are here\n6.0.0-alpha SkyWalking 6 is totally new milestone for the project. At this point, we are not just a distributing tracing system with analysis and visualization capabilities. We are an Observability Analysis Platform(OAL).\nThe core and most important features in v6 are\n Support to collect telemetry data from different sources, such as multiple language agents and service mesh. Extensible stream analysis core. Make SQL and cache analysis available in core level, although haven\u0026rsquo;t provided in this release. Provide Observability Analysis Language(OAL) to make analysis metrics customization available. New GraphQL query protocol. Not binding with UI now. UI topology is better now. New alarm core provided. In alpha, only on service related metrics.  All issues and pull requests are here\n","title":"6.6.0","url":"/docs/main/v9.6.0/en/changes/changes-6.x/"},{"content":"6.6.0 Project  [IMPORTANT] Local span and exit span are not treated as endpoint detected at client and local. Only entry span is the endpoint. Reduce the load of register and memory cost.   Support MiniKube, Istio and SkyWalking on K8s deployment in CI. Support Windows and MacOS build in GitHub Action CI. Support ElasticSearch 7 in official dist. Hundreds plugin cases have been added in GitHub Action CI process.  Java Agent  Remove the local/exit span operation name register mechanism. Add plugin for JDK Threading classes. Add plugin for Armeria. Support set operation name in async span. Enhance webflux plugin, related to Spring Gateway plugin. Webflux plugin is in optional, due to JDK8 required. Fix a possible deadlock. Fix NPE when OAL scripts are different in different OAP nodes, mostly in upgrading stage. Fix bug about wrong peer in ES plugin. Fix NPE in Spring plugin. Fix wrong class name in Dubbo 2.7 conflict patch. Fix spring annotation inheritance problem.  OAP-Backend  Remove the local/exit span operation name register mechanism. Remove client side endpoint register in service mesh. Service instance dependency and related metrics. Support min func in OAL Support apdex func in OAL Support custom ES config setting at the index level. Envoy ALS proto upgraded. Update JODA lib as bugs in UTC +13/+14. Support topN sample period configurable. Ignore no statement DB operations in slow SQL collection. Fix bug in docker-entrypoint.sh when using MySQL as storage  UI  Service topology enhancement. Dive into service, instance and endpoint metrics on topo map. Service instance dependency view and related metrics. Support using URL parameter in trace query page. Support apdex score in service page. Add service dependency metrics into metrics comparison. Fix alarm search not working.  Document  Update user list and user wall. Add document link for CLI. Add deployment guide of agent in Jetty case. Modify Consul cluster doc. Add document about injecting traceId into the logback with logstack in JSON format. ElementUI license and dependency added.  All issues and pull requests are here\n6.5.0 Project  TTL E2E test (#3437) Test coverage is back in pull request check status (#3503) Plugin tests begin to be migrated into main repo, and is in process. (#3528, #3756, #3751, etc.) Switch to SkyWalking CI (exclusive) nodes (#3546) MySQL storage e2e test. (#3648) E2E tests are verified in multiple jdk versions, jdk 8, 9, 11, 12 (#3657) Jenkins build jobs run only when necessary (#3662)  OAP-Backend  Support dynamically configure alarm settings (#3557) Language of instance could be null (#3485) Make query max window size configurable. (#3765) Remove two max size 500 limit. (#3748) Parameterize the cache size. (#3741) ServiceInstanceRelation set error id (#3683) Makes the scope of alarm message more semantic. (#3680) Add register persistent worker latency metrics (#3677) Fix more reasonable error (#3619) Add GraphQL getServiceInstance instanceUuid field. (#3595) Support namespace in Nacos cluster/configuration (#3578) Instead of datasource-settings.properties, use application.yml for MySQLStorageProvider (#3564) Provide consul dynamic configuration center implementation (#3560) Upgrade guava version to support higher jdk version (#3541) Sync latest als from envoy api (#3507) Set telemetry instanced id for Etcd and Nacos plugin (#3492) Support timeout configuration in agent and backend. (#3491) Make sure the cluster register happens before streaming process. (#3471) Agent supports custom properties. (#3367) Miscellaneous bug fixes (#3567)  UI  Feature: node detail display in topo circle-chart view. BugFix: the jvm-maxheap \u0026amp; jvm-maxnonheap is -1, free is no value Fix bug: time select operation not in effect Fix bug: language initialization failed Fix bug: not show instance language Feature: support the trace list display export png Feature: Metrics comparison view BugFix: Fix dashboard top throughput copy  Java Agent  Spring async scenario optimize (#3723) Support log4j2 AsyncLogger (#3715) Add config to collect PostgreSQL sql query params (#3695) Support namespace in Nacos cluster/configuration (#3578) Provide plugin for ehcache 2.x (#3575) Supporting RequestRateLimiterGatewayFilterFactory (#3538) Kafka-plugin compatible with KafkaTemplate (#3505) Add pulsar apm plugin (#3476) Spring-cloud-gateway traceId does not transmit #3411 (#3446) Gateway compatible with downstream loss (#3445) Provide cassandra java driver 3.x plugin (#3410) Fix SpringMVC4 NoSuchMethodError (#3408) BugFix: endpoint grouping rules may be not unique (#3510) Add feature to control the maximum agent log files (#3475) Agent support custom properties. (#3367) Add Light4j plugin (#3323)  Document  Remove travis badge (#3763) Replace user wall to typical users in readme page (#3719) Update istio docs according latest istio release (#3646) Use chart deploy sw docs (#3573) Reorganize the doc, and provide catalog (#3563) Committer vote and set up document. (#3496) Update als setup doc as istio 1.3 released (#3470) Fill faq reply in official document. (#3450)  All issues and pull requests are here\n6.4.0 Project  Highly recommend to upgrade due to Pxx metrics calculation bug. Make agent working in JDK9+ Module system.  Java Agent  Make agent working in JDK9+ Module system. Support Kafka 2.x client libs. Log error in OKHTTP OnFailure callback. Support injecting traceid into logstack appender in logback. Add OperationName(including endpoint name) length max threshold. Support using Regex to group operation name. Support Undertow routing handler. RestTemplate plugin support operation name grouping. Fix ClassCastException in Webflux plugin. Ordering zookeeper server list, to make it better in topology. Fix a Dubbo plugin incompatible issue. Fix MySQL 5 plugin issue. Make log writer cached. Optimize Spring Cloud Gateway plugin Fix and improve gRPC reconnect mechanism. Remove Disruptor dependency from agent.  Backend  Fix Pxx(p50,p75,p90,p95,p99) metrics func bug.(Critical) Support Gateway in backend analysis, even when it doesn\u0026rsquo;t have suitable language agent. Support using HTTPs SSL accessing ElasticSearch storage. Support Zookeeper ACL. Make alarm records listed in order. Fix Pxx data persistence failure in some cases. Fix some bugs in MySQL storage. Setup slow SQL length threshold. Fix TTL settings is not working as expected. Remove scope-meta file.  UI  Enhance alarm page layout. Support trace tree chart resize. Support trace auto completion when partial traces abandoned somehow. Fix dashboard endpoint slow chart. Add radial chart in topology page. Add trace table mode. Fix topology page bug. Fix calender js bug. Fix \u0026ldquo;The \u0026ldquo;topo-services\u0026rdquo; component did not update the data in time after modifying the time range on the topology page.  Document  Restore the broken Istio setup doc. Add etcd config center document. Correct span_limit_per_segment default value in document. Enhance plugin develop doc. Fix error description in build document.  All issues and pull requests are here\n6.3.0 Project  e2e tests have been added, and verify every pull request. Use ArrayList to replace LinkedList in DataCarrier for much better performance. Add plugin instrumentation definition check in CI. DataCarrier performance improvement by avoiding false-sharing.  Java Agent  Java agent supports JDK 9 - 12, but don\u0026rsquo;t support Java Module yet. Support JVM class auto instrumentation, cataloged as bootstrap plugin. Support JVM HttpClient and HttpsClient plugin.[Optional] Support backend upgrade without rebooting required. Open Redefine and Retransform by other agents. Support Servlet 2.5 in Jetty, Tomcat and SpringMVC plugins. Support Spring @Async plugin. Add new config item to restrict the length of span#peer. Refactor ContextManager#stopSpan. Add gRPC timeout. Support Logback AsyncAppender print tid Fix gRPC reconnect bug. Fix trace segment service doesn\u0026rsquo;t report onComplete. Fix wrong logger class name. Fix gRPC plugin bug. Fix ContextManager.activeSpan() API usage error.  Backend  Support agent reset command downstream when the storage is erased, mostly because of backend upgrade. Backend stream flow refactor. High dimensionality metrics(Hour/Day/Month) are changed to lower priority, to ease the storage payload. Add OAP metrics cache to ease the storage query payload and improve performance. Remove DataCarrier in trace persistent of ElasticSearch storage, by leveraging the elasticsearch bulk queue. OAP internal communication protocol changed. Don\u0026rsquo;t be compatible with old releases. Improve ElasticSearch storage bulk performance. Support etcd as dynamic configuration center. Simplify the PxxMetrics and ThermodynamicMetrics functions for better performance and GC. Support JVM metrics self observability. Add the new OAL runtime engine. Add gRPC timeout. Add Charset in the alarm web hook. Fix buffer lost. Fix dirty read in ElasticSearch storage. Fix bug of cluster management plugins in un-Mixed mode. Fix wrong logger class name. Fix delete bug in ElasticSearch when using namespace. Fix MySQL TTL failure. Totally remove IDs can't be null log, to avoid misleading. Fix provider has been initialized repeatedly. Adjust providers conflict log message. Fix using wrong gc time metrics in OAL.  UI  Fix refresh is not working after endpoint and instance changed. Fix endpoint selector but. Fix wrong copy value in slow traces. Fix can\u0026rsquo;t show trace when it is broken partially(Because of agent sampling or fail safe). Fix database and response time graph bugs.  Document  Add bootstrap plugin development document. Alarm documentation typo fixed. Clarify the Docker file purpose. Fix a license typo.  All issues and pull requests are here\n6.2.0 Project  ElasticSearch implementation performance improved, and CHANGED totally. Must delete all existing indexes to do upgrade. CI and Integration tests provided by ASF INFRA. Plan to enhance tests including e2e, plugin tests in all pull requests, powered by ASF INFRA. DataCarrier queue write index controller performance improvement. 3-5 times quicker than before. Add windows compile support in CI.  Java Agent  Support collect SQL parameter in MySQL plugin.[Optional] Support SolrJ plugin. Support RESTEasy plugin. Support Spring Gateway plugin for 2.1.x[Optional] TracingContext performance improvement. Support Apache ShardingSphere(incubating) plugin. Support span#error in application toolkit. Fix OOM by empty stack of exception. FIx wrong cause exception of stack in span log. Fix unclear the running context in SpringMVC plugin. Fix CPU usage accessor calculation issue. Fix SpringMVC plugin span not stop bug when doing HTTP forward. Fix lettuce plugin async commend bug and NPE. Fix webflux plugin cast exception. [CI]Support import check.  Backend  Support time serious ElasticSearch storage. Provide dynamic configuration module and implementation. Slow SQL threshold supports dynamic config today. Dynamic Configuration module provide multiple implementations, DCS(gRPC based), Zookeeper, Apollo, Nacos. Provide P99/95/90/75/50 charts in topology edge. New topology query protocol and implementation. Support Envoy ALS in Service Mesh scenario. Support Nacos cluster management. Enhance metric exporter. Run in increment and total modes. Fix module provider is loaded repeatedly. Change TOP slow SQL storage in ES to Text from Keyword, as too long text issue. Fix H2TopologyQuery tiny bug. Fix H2 log query bug.(No feature provided yet) Filtering pods not in \u0026lsquo;Running\u0026rsquo; phase in mesh scenario. Fix query alarm bug in MySQL and H2 storage. Codes refactor.  UI  Fix some ID is null query(s). Page refactor, especially time-picker, more friendly. Login removed. Trace timestamp visualization issue fixed. Provide P99/95/90/75/50 charts in topology edge. Change all P99/95/90/75/50 charts style. More readable. Fix 404 in trace page.  Document  Go2Sky project has been donated to SkyAPM, change document link. Add FAQ for ElasticSearch storage, and links from document. Add FAQ fro WebSphere installation. Add several open users. Add alarm webhook document.  All issues and pull requests are here\n6.1.0 Project SkyWalking graduated as Apache Top Level Project.\n Support compiling project agent, backend, UI separately.  Java Agent  Support Vert.x Core 3.x plugin. Support Apache Dubbo plugin. Support use_qualified_name_as_endpoint_name and use_qualified_name_as_operation_name configs in SpringMVC plugin. Support span async close APIs in core. Used in Vert.x plugin. Support MySQL 5,8 plugins. Support set instance id manually(optional). Support customize enhance trace plugin in optional list. Support to set peer in Entry Span. Support Zookeeper plugin. Fix Webflux plugin created unexpected Entry Span. Fix Kafka plugin NPE in Kafka 1.1+ Fix wrong operation name in postgre 8.x plugin. Fix RabbitMQ plugin NPE. Fix agent can\u0026rsquo;t run in JVM 6/7, remove module-info.class. Fix agent can\u0026rsquo;t work well, if there is whitespace in agent path. Fix Spring annotation bug and inheritance enhance issue. Fix CPU accessor bug.  Backend Performance improved, especially in CPU limited environment. 3x improvement in service mesh scenario(no trace) in 8C16G VM. Significantly cost less CPU in low payload.\n Support database metrics and SLOW SQL detection. Support to set max size of metadata query. And change default to 5000 from 100. Support ElasticSearch template for new feature in the future. Support shutdown Zipkin trace analysis, because it doesn\u0026rsquo;t fit production environment. Support log type, scope HTTP_ACCESS_LOG and query. No feature provided, prepare for future versions. Support .NET clr receiver. Support Jaeger trace format, no analysis. Support group endpoint name by regax rules in mesh receiver. Support disable statement in OAL. Support basic auth in ElasticSearch connection. Support metrics exporter module and gRPC implementor. Support \u0026gt;, \u0026lt;, \u0026gt;=, \u0026lt;= in OAL. Support role mode in backend. Support Envoy metrics. Support query segment by service instance. Support to set host/port manually at cluster coordinator, rather than based on core settings. Make sure OAP shutdown when it faces startup error. Support set separated gRPC/Jetty ip:port for receiver, default still use core settings. Fix JVM receiver bug. Fix wrong dest service in mesh analysis. Fix search doesn\u0026rsquo;t work as expected. Refactor ScopeDeclaration annotation. Refactor register lock mechanism. Add SmartSql component for .NET Add integration tests for ElasticSearch client. Add test cases for exporter. Add test cases for queue consume.  UI  RocketBot UI has been accepted and bind in this release. Support CLR metrics.  Document  Documents updated, matching Top Level Project requirement. UI licenses updated, according to RocketBot UI IP clearance. User wall and powered-by list updated. CN documents removed, only consider to provide by volunteer out of Apache.  All issues and pull requests are here\n6.0.0-GA Java Agent  Support gson plugin(optional). Support canal plugin. Fix missing ojdbc component id. Fix dubbo plugin conflict. Fix OpenTracing tag match bug. Fix a missing check in ignore plugin.  Backend  Adjust service inventory entity, to add properties. Adjust service instance inventory entity, to add properties. Add nodeType to service inventory entity. Fix when operation name of local and exit spans in ref, the segment lost. Fix the index names don\u0026rsquo;t show right in logs. Fix wrong alarm text. Add test case for span limit mechanism. Add telemetry module and prometheus implementation, with grafana setting. A refactor for register API in storage module. Fix H2 and MySQL endpoint dependency map miss upstream side. Optimize the inventory register and refactor the implementation. Speed up the trace buffer read. Fix and removed unnecessary inventory register operations.  UI  Add new trace view. Add word-break to tag value.  Document  Add two startup modes document. Add PHP agent links. Add some cn documents. Update year to 2019 User wall updated. Fix a wrong description in how-to-build doc.  All issues and pull requests are here\n6.0.0-beta Protocol  Provide Trace Data Protocol v2 Provide SkyWalking Cross Process Propagation Headers Protocol v2.  Java Agent  Support Trace Data Protocol v2 Support SkyWalking Cross Process Propagation Headers Protocol v2. Support SkyWalking Cross Process Propagation Headers Protocol v1 running in compatible way. Need declare open explicitly. Support SpringMVC 5 Support webflux Support a new way to override agent.config by system env. Span tag can override by explicit way. Fix Spring Controller Inherit issue. Fix ElasticSearch plugin NPE. Fix agent classloader dead lock in certain situation. Fix agent log typo. Fix wrong component id in resettemplete plugin. Fix use transform ignore() in wrong way. Fix H2 query bug.  Backend  Support Trace Data Protocol v2. And Trace Data Protocol v1 is still supported. Support MySQL as storage. Support TiDB as storage. Support a new way to override application.yml by system env. Support service instance and endpoint alarm. Support namespace in istio receiver. Support service throughput(cpm), successful rate(sla), avg response time and p99/p95/p90/p75/p50 response time. Support backend trace sampling. Support Zipkin format again. Support init mode. Support namespace in Zookeeper cluster management. Support consul plugin in cluster module. OAL generate tool has been integrated into main repo, in the maven compile stage. Optimize trace paging query. Fix trace query don\u0026rsquo;t use fuzzy query in ElasticSearch storage. Fix alarm can\u0026rsquo;t be active in right way. Fix unnecessary condition in database and cache number query. Fix wrong namespace bug in ElasticSearch storage. Fix Remote clients selector error: / by zero . Fix segment TTL is not working.  UI  Support service throughput(cpm), successful rate(sla), avg response time and p99/p95/p90/p75/p50 response time. Fix TopN endpoint link doesn\u0026rsquo;t work right. Fix trace stack style. Fix CI.  Document  Add more agent setting documents. Add more contribution documents. Update user wall and powered-by page. Add RocketBot UI project link in document.  All issues and pull requests are here\n6.0.0-alpha SkyWalking 6 is totally new milestone for the project. At this point, we are not just a distributing tracing system with analysis and visualization capabilities. We are an Observability Analysis Platform(OAL).\nThe core and most important features in v6 are\n Support to collect telemetry data from different sources, such as multiple language agents and service mesh. Extensible stream analysis core. Make SQL and cache analysis available in core level, although haven\u0026rsquo;t provided in this release. Provide Observability Analysis Language(OAL) to make analysis metrics customization available. New GraphQL query protocol. Not binding with UI now. UI topology is better now. New alarm core provided. In alpha, only on service related metrics.  All issues and pull requests are here\n","title":"6.6.0","url":"/docs/main/v9.7.0/en/changes/changes-6.x/"},{"content":"7.0.0 Project  SkyWalking discards the supports of JDK 1.6 and 1.7 on the java agent side. The minimal requirement of JDK is JDK8. Support method performance profile. Provide new E2E test framework. Remove AppVeyor from the CI, use GitHub action only. Provide new plugin test tool. Don\u0026rsquo;t support SkyWalking v5 agent in-wire and out-wire protocol. v6 is required.  Java Agent  Add lazy injection API in the agent core. Support Servlet 2.5 in the Struts plugin. Fix RestTemplate plugin ClassCastException in the Async call. Add Finagle plugin. Add test cases of H2 and struts. Add Armeria 0.98 plugin. Fix ElasticSearch plugin bug. Fix EHCache plugin bug. Fix a potential I/O leak. Support Oracle SID mode. Update Byte-buddy core. Performance tuning: replace AtomicInteger with AtomicIntegerFieldUpdater. Add AVRO plugin. Update to JDK 1.8 Optimize the ignore plugin. Enhance the gRPC plugin. Add Kotlin Coroutine plugin. Support HTTP parameter collection in Tomcat and SpringMVC plugin. Add @Tag annotation in the application toolkit. Move Lettuce into the default plugin list. Move Webflux into the default plugin list. Add HttpClient 3.x plugin.  OAP-Backend  Support InfluxDB as a new storage option. Add selector in the application.yml. Make the provider activation more flexible through System ENV. Support sub-topology map query. Support gRPC SSL. Support HTTP protocol for agent. Support Nginx LUA agent. Support skip the instance relationship analysis if some agents doesn\u0026rsquo;t have upstream address, currently for LUA agent. Support metrics entity name in the storage. Optional, default OFF. Merge the HOUR and DAY metrics into MINUTE in the ElasticSearch storage implementation. Reduce the payload for ElasticSearch server. Support change detection mechanism in DCS. Support Daily step in the ElasticSearch storage implementation for low traffic system. Provide profile export tool. Support alarm gRPC hook. Fix PHP language doesn\u0026rsquo;t show up on the instance page. Add more comments in the source codes. Add a new metrics type, multiple linears. Fix thread concurrency issue in the alarm core.  UI  Support custom topology definition.  Document  Add FAQ about python2 command required in the compiling. Add doc about new e2e framework. Add doc about the new profile feature. Powered-by page updated.  All issues and pull requests are here\n","title":"7.0.0","url":"/docs/main/latest/en/changes/changes-7.0.0/"},{"content":"7.0.0 Project  SkyWalking discards the supports of JDK 1.6 and 1.7 on the java agent side. The minimal requirement of JDK is JDK8. Support method performance profile. Provide new E2E test framework. Remove AppVeyor from the CI, use GitHub action only. Provide new plugin test tool. Don\u0026rsquo;t support SkyWalking v5 agent in-wire and out-wire protocol. v6 is required.  Java Agent  Add lazy injection API in the agent core. Support Servlet 2.5 in the Struts plugin. Fix RestTemplate plugin ClassCastException in the Async call. Add Finagle plugin. Add test cases of H2 and struts. Add Armeria 0.98 plugin. Fix ElasticSearch plugin bug. Fix EHCache plugin bug. Fix a potential I/O leak. Support Oracle SID mode. Update Byte-buddy core. Performance tuning: replace AtomicInteger with AtomicIntegerFieldUpdater. Add AVRO plugin. Update to JDK 1.8 Optimize the ignore plugin. Enhance the gRPC plugin. Add Kotlin Coroutine plugin. Support HTTP parameter collection in Tomcat and SpringMVC plugin. Add @Tag annotation in the application toolkit. Move Lettuce into the default plugin list. Move Webflux into the default plugin list. Add HttpClient 3.x plugin.  OAP-Backend  Support InfluxDB as a new storage option. Add selector in the application.yml. Make the provider activation more flexible through System ENV. Support sub-topology map query. Support gRPC SSL. Support HTTP protocol for agent. Support Nginx LUA agent. Support skip the instance relationship analysis if some agents doesn\u0026rsquo;t have upstream address, currently for LUA agent. Support metrics entity name in the storage. Optional, default OFF. Merge the HOUR and DAY metrics into MINUTE in the ElasticSearch storage implementation. Reduce the payload for ElasticSearch server. Support change detection mechanism in DCS. Support Daily step in the ElasticSearch storage implementation for low traffic system. Provide profile export tool. Support alarm gRPC hook. Fix PHP language doesn\u0026rsquo;t show up on the instance page. Add more comments in the source codes. Add a new metrics type, multiple linears. Fix thread concurrency issue in the alarm core.  UI  Support custom topology definition.  Document  Add FAQ about python2 command required in the compiling. Add doc about new e2e framework. Add doc about the new profile feature. Powered-by page updated.  All issues and pull requests are here\n","title":"7.0.0","url":"/docs/main/next/en/changes/changes-7.0.0/"},{"content":"7.0.0 Project  SkyWalking discards the supports of JDK 1.6 and 1.7 on the java agent side. The minimal requirement of JDK is JDK8. Support method performance profile. Provide new E2E test framework. Remove AppVeyor from the CI, use GitHub action only. Provide new plugin test tool. Don\u0026rsquo;t support SkyWalking v5 agent in-wire and out-wire protocol. v6 is required.  Java Agent  Add lazy injection API in the agent core. Support Servlet 2.5 in the Struts plugin. Fix RestTemplate plugin ClassCastException in the Async call. Add Finagle plugin. Add test cases of H2 and struts. Add Armeria 0.98 plugin. Fix ElasticSearch plugin bug. Fix EHCache plugin bug. Fix a potential I/O leak. Support Oracle SID mode. Update Byte-buddy core. Performance tuning: replace AtomicInteger with AtomicIntegerFieldUpdater. Add AVRO plugin. Update to JDK 1.8 Optimize the ignore plugin. Enhance the gRPC plugin. Add Kotlin Coroutine plugin. Support HTTP parameter collection in Tomcat and SpringMVC plugin. Add @Tag annotation in the application toolkit. Move Lettuce into the default plugin list. Move Webflux into the default plugin list. Add HttpClient 3.x plugin.  OAP-Backend  Support InfluxDB as a new storage option. Add selector in the application.yml. Make the provider activation more flexible through System ENV. Support sub-topology map query. Support gRPC SSL. Support HTTP protocol for agent. Support Nginx LUA agent. Support skip the instance relationship analysis if some agents doesn\u0026rsquo;t have upstream address, currently for LUA agent. Support metrics entity name in the storage. Optional, default OFF. Merge the HOUR and DAY metrics into MINUTE in the ElasticSearch storage implementation. Reduce the payload for ElasticSearch server. Support change detection mechanism in DCS. Support Daily step in the ElasticSearch storage implementation for low traffic system. Provide profile export tool. Support alarm gRPC hook. Fix PHP language doesn\u0026rsquo;t show up on the instance page. Add more comments in the source codes. Add a new metrics type, multiple linears. Fix thread concurrency issue in the alarm core.  UI  Support custom topology definition.  Document  Add FAQ about python2 command required in the compiling. Add doc about new e2e framework. Add doc about the new profile feature. Powered-by page updated.  All issues and pull requests are here\n","title":"7.0.0","url":"/docs/main/v9.1.0/en/changes/changes-7.0.0/"},{"content":"7.0.0 Project  SkyWalking discards the supports of JDK 1.6 and 1.7 on the java agent side. The minimal requirement of JDK is JDK8. Support method performance profile. Provide new E2E test framework. Remove AppVeyor from the CI, use GitHub action only. Provide new plugin test tool. Don\u0026rsquo;t support SkyWalking v5 agent in-wire and out-wire protocol. v6 is required.  Java Agent  Add lazy injection API in the agent core. Support Servlet 2.5 in the Struts plugin. Fix RestTemplate plugin ClassCastException in the Async call. Add Finagle plugin. Add test cases of H2 and struts. Add Armeria 0.98 plugin. Fix ElasticSearch plugin bug. Fix EHCache plugin bug. Fix a potential I/O leak. Support Oracle SID mode. Update Byte-buddy core. Performance tuning: replace AtomicInteger with AtomicIntegerFieldUpdater. Add AVRO plugin. Update to JDK 1.8 Optimize the ignore plugin. Enhance the gRPC plugin. Add Kotlin Coroutine plugin. Support HTTP parameter collection in Tomcat and SpringMVC plugin. Add @Tag annotation in the application toolkit. Move Lettuce into the default plugin list. Move Webflux into the default plugin list. Add HttpClient 3.x plugin.  OAP-Backend  Support InfluxDB as a new storage option. Add selector in the application.yml. Make the provider activation more flexible through System ENV. Support sub-topology map query. Support gRPC SSL. Support HTTP protocol for agent. Support Nginx LUA agent. Support skip the instance relationship analysis if some agents doesn\u0026rsquo;t have upstream address, currently for LUA agent. Support metrics entity name in the storage. Optional, default OFF. Merge the HOUR and DAY metrics into MINUTE in the ElasticSearch storage implementation. Reduce the payload for ElasticSearch server. Support change detection mechanism in DCS. Support Daily step in the ElasticSearch storage implementation for low traffic system. Provide profile export tool. Support alarm gRPC hook. Fix PHP language doesn\u0026rsquo;t show up on the instance page. Add more comments in the source codes. Add a new metrics type, multiple linears. Fix thread concurrency issue in the alarm core.  UI  Support custom topology definition.  Document  Add FAQ about python2 command required in the compiling. Add doc about new e2e framework. Add doc about the new profile feature. Powered-by page updated.  All issues and pull requests are here\n","title":"7.0.0","url":"/docs/main/v9.2.0/en/changes/changes-7.0.0/"},{"content":"7.0.0 Project  SkyWalking discards the supports of JDK 1.6 and 1.7 on the java agent side. The minimal requirement of JDK is JDK8. Support method performance profile. Provide new E2E test framework. Remove AppVeyor from the CI, use GitHub action only. Provide new plugin test tool. Don\u0026rsquo;t support SkyWalking v5 agent in-wire and out-wire protocol. v6 is required.  Java Agent  Add lazy injection API in the agent core. Support Servlet 2.5 in the Struts plugin. Fix RestTemplate plugin ClassCastException in the Async call. Add Finagle plugin. Add test cases of H2 and struts. Add Armeria 0.98 plugin. Fix ElasticSearch plugin bug. Fix EHCache plugin bug. Fix a potential I/O leak. Support Oracle SID mode. Update Byte-buddy core. Performance tuning: replace AtomicInteger with AtomicIntegerFieldUpdater. Add AVRO plugin. Update to JDK 1.8 Optimize the ignore plugin. Enhance the gRPC plugin. Add Kotlin Coroutine plugin. Support HTTP parameter collection in Tomcat and SpringMVC plugin. Add @Tag annotation in the application toolkit. Move Lettuce into the default plugin list. Move Webflux into the default plugin list. Add HttpClient 3.x plugin.  OAP-Backend  Support InfluxDB as a new storage option. Add selector in the application.yml. Make the provider activation more flexible through System ENV. Support sub-topology map query. Support gRPC SSL. Support HTTP protocol for agent. Support Nginx LUA agent. Support skip the instance relationship analysis if some agents doesn\u0026rsquo;t have upstream address, currently for LUA agent. Support metrics entity name in the storage. Optional, default OFF. Merge the HOUR and DAY metrics into MINUTE in the ElasticSearch storage implementation. Reduce the payload for ElasticSearch server. Support change detection mechanism in DCS. Support Daily step in the ElasticSearch storage implementation for low traffic system. Provide profile export tool. Support alarm gRPC hook. Fix PHP language doesn\u0026rsquo;t show up on the instance page. Add more comments in the source codes. Add a new metrics type, multiple linears. Fix thread concurrency issue in the alarm core.  UI  Support custom topology definition.  Document  Add FAQ about python2 command required in the compiling. Add doc about new e2e framework. Add doc about the new profile feature. Powered-by page updated.  All issues and pull requests are here\n","title":"7.0.0","url":"/docs/main/v9.3.0/en/changes/changes-7.0.0/"},{"content":"7.0.0 Project  SkyWalking discards the supports of JDK 1.6 and 1.7 on the java agent side. The minimal requirement of JDK is JDK8. Support method performance profile. Provide new E2E test framework. Remove AppVeyor from the CI, use GitHub action only. Provide new plugin test tool. Don\u0026rsquo;t support SkyWalking v5 agent in-wire and out-wire protocol. v6 is required.  Java Agent  Add lazy injection API in the agent core. Support Servlet 2.5 in the Struts plugin. Fix RestTemplate plugin ClassCastException in the Async call. Add Finagle plugin. Add test cases of H2 and struts. Add Armeria 0.98 plugin. Fix ElasticSearch plugin bug. Fix EHCache plugin bug. Fix a potential I/O leak. Support Oracle SID mode. Update Byte-buddy core. Performance tuning: replace AtomicInteger with AtomicIntegerFieldUpdater. Add AVRO plugin. Update to JDK 1.8 Optimize the ignore plugin. Enhance the gRPC plugin. Add Kotlin Coroutine plugin. Support HTTP parameter collection in Tomcat and SpringMVC plugin. Add @Tag annotation in the application toolkit. Move Lettuce into the default plugin list. Move Webflux into the default plugin list. Add HttpClient 3.x plugin.  OAP-Backend  Support InfluxDB as a new storage option. Add selector in the application.yml. Make the provider activation more flexible through System ENV. Support sub-topology map query. Support gRPC SSL. Support HTTP protocol for agent. Support Nginx LUA agent. Support skip the instance relationship analysis if some agents doesn\u0026rsquo;t have upstream address, currently for LUA agent. Support metrics entity name in the storage. Optional, default OFF. Merge the HOUR and DAY metrics into MINUTE in the ElasticSearch storage implementation. Reduce the payload for ElasticSearch server. Support change detection mechanism in DCS. Support Daily step in the ElasticSearch storage implementation for low traffic system. Provide profile export tool. Support alarm gRPC hook. Fix PHP language doesn\u0026rsquo;t show up on the instance page. Add more comments in the source codes. Add a new metrics type, multiple linears. Fix thread concurrency issue in the alarm core.  UI  Support custom topology definition.  Document  Add FAQ about python2 command required in the compiling. Add doc about new e2e framework. Add doc about the new profile feature. Powered-by page updated.  All issues and pull requests are here\n","title":"7.0.0","url":"/docs/main/v9.4.0/en/changes/changes-7.0.0/"},{"content":"7.0.0 Project  SkyWalking discards the supports of JDK 1.6 and 1.7 on the java agent side. The minimal requirement of JDK is JDK8. Support method performance profile. Provide new E2E test framework. Remove AppVeyor from the CI, use GitHub action only. Provide new plugin test tool. Don\u0026rsquo;t support SkyWalking v5 agent in-wire and out-wire protocol. v6 is required.  Java Agent  Add lazy injection API in the agent core. Support Servlet 2.5 in the Struts plugin. Fix RestTemplate plugin ClassCastException in the Async call. Add Finagle plugin. Add test cases of H2 and struts. Add Armeria 0.98 plugin. Fix ElasticSearch plugin bug. Fix EHCache plugin bug. Fix a potential I/O leak. Support Oracle SID mode. Update Byte-buddy core. Performance tuning: replace AtomicInteger with AtomicIntegerFieldUpdater. Add AVRO plugin. Update to JDK 1.8 Optimize the ignore plugin. Enhance the gRPC plugin. Add Kotlin Coroutine plugin. Support HTTP parameter collection in Tomcat and SpringMVC plugin. Add @Tag annotation in the application toolkit. Move Lettuce into the default plugin list. Move Webflux into the default plugin list. Add HttpClient 3.x plugin.  OAP-Backend  Support InfluxDB as a new storage option. Add selector in the application.yml. Make the provider activation more flexible through System ENV. Support sub-topology map query. Support gRPC SSL. Support HTTP protocol for agent. Support Nginx LUA agent. Support skip the instance relationship analysis if some agents doesn\u0026rsquo;t have upstream address, currently for LUA agent. Support metrics entity name in the storage. Optional, default OFF. Merge the HOUR and DAY metrics into MINUTE in the ElasticSearch storage implementation. Reduce the payload for ElasticSearch server. Support change detection mechanism in DCS. Support Daily step in the ElasticSearch storage implementation for low traffic system. Provide profile export tool. Support alarm gRPC hook. Fix PHP language doesn\u0026rsquo;t show up on the instance page. Add more comments in the source codes. Add a new metrics type, multiple linears. Fix thread concurrency issue in the alarm core.  UI  Support custom topology definition.  Document  Add FAQ about python2 command required in the compiling. Add doc about new e2e framework. Add doc about the new profile feature. Powered-by page updated.  All issues and pull requests are here\n","title":"7.0.0","url":"/docs/main/v9.5.0/en/changes/changes-7.0.0/"},{"content":"7.0.0 Project  SkyWalking discards the supports of JDK 1.6 and 1.7 on the java agent side. The minimal requirement of JDK is JDK8. Support method performance profile. Provide new E2E test framework. Remove AppVeyor from the CI, use GitHub action only. Provide new plugin test tool. Don\u0026rsquo;t support SkyWalking v5 agent in-wire and out-wire protocol. v6 is required.  Java Agent  Add lazy injection API in the agent core. Support Servlet 2.5 in the Struts plugin. Fix RestTemplate plugin ClassCastException in the Async call. Add Finagle plugin. Add test cases of H2 and struts. Add Armeria 0.98 plugin. Fix ElasticSearch plugin bug. Fix EHCache plugin bug. Fix a potential I/O leak. Support Oracle SID mode. Update Byte-buddy core. Performance tuning: replace AtomicInteger with AtomicIntegerFieldUpdater. Add AVRO plugin. Update to JDK 1.8 Optimize the ignore plugin. Enhance the gRPC plugin. Add Kotlin Coroutine plugin. Support HTTP parameter collection in Tomcat and SpringMVC plugin. Add @Tag annotation in the application toolkit. Move Lettuce into the default plugin list. Move Webflux into the default plugin list. Add HttpClient 3.x plugin.  OAP-Backend  Support InfluxDB as a new storage option. Add selector in the application.yml. Make the provider activation more flexible through System ENV. Support sub-topology map query. Support gRPC SSL. Support HTTP protocol for agent. Support Nginx LUA agent. Support skip the instance relationship analysis if some agents doesn\u0026rsquo;t have upstream address, currently for LUA agent. Support metrics entity name in the storage. Optional, default OFF. Merge the HOUR and DAY metrics into MINUTE in the ElasticSearch storage implementation. Reduce the payload for ElasticSearch server. Support change detection mechanism in DCS. Support Daily step in the ElasticSearch storage implementation for low traffic system. Provide profile export tool. Support alarm gRPC hook. Fix PHP language doesn\u0026rsquo;t show up on the instance page. Add more comments in the source codes. Add a new metrics type, multiple linears. Fix thread concurrency issue in the alarm core.  UI  Support custom topology definition.  Document  Add FAQ about python2 command required in the compiling. Add doc about new e2e framework. Add doc about the new profile feature. Powered-by page updated.  All issues and pull requests are here\n","title":"7.0.0","url":"/docs/main/v9.6.0/en/changes/changes-7.0.0/"},{"content":"7.0.0 Project  SkyWalking discards the supports of JDK 1.6 and 1.7 on the java agent side. The minimal requirement of JDK is JDK8. Support method performance profile. Provide new E2E test framework. Remove AppVeyor from the CI, use GitHub action only. Provide new plugin test tool. Don\u0026rsquo;t support SkyWalking v5 agent in-wire and out-wire protocol. v6 is required.  Java Agent  Add lazy injection API in the agent core. Support Servlet 2.5 in the Struts plugin. Fix RestTemplate plugin ClassCastException in the Async call. Add Finagle plugin. Add test cases of H2 and struts. Add Armeria 0.98 plugin. Fix ElasticSearch plugin bug. Fix EHCache plugin bug. Fix a potential I/O leak. Support Oracle SID mode. Update Byte-buddy core. Performance tuning: replace AtomicInteger with AtomicIntegerFieldUpdater. Add AVRO plugin. Update to JDK 1.8 Optimize the ignore plugin. Enhance the gRPC plugin. Add Kotlin Coroutine plugin. Support HTTP parameter collection in Tomcat and SpringMVC plugin. Add @Tag annotation in the application toolkit. Move Lettuce into the default plugin list. Move Webflux into the default plugin list. Add HttpClient 3.x plugin.  OAP-Backend  Support InfluxDB as a new storage option. Add selector in the application.yml. Make the provider activation more flexible through System ENV. Support sub-topology map query. Support gRPC SSL. Support HTTP protocol for agent. Support Nginx LUA agent. Support skip the instance relationship analysis if some agents doesn\u0026rsquo;t have upstream address, currently for LUA agent. Support metrics entity name in the storage. Optional, default OFF. Merge the HOUR and DAY metrics into MINUTE in the ElasticSearch storage implementation. Reduce the payload for ElasticSearch server. Support change detection mechanism in DCS. Support Daily step in the ElasticSearch storage implementation for low traffic system. Provide profile export tool. Support alarm gRPC hook. Fix PHP language doesn\u0026rsquo;t show up on the instance page. Add more comments in the source codes. Add a new metrics type, multiple linears. Fix thread concurrency issue in the alarm core.  UI  Support custom topology definition.  Document  Add FAQ about python2 command required in the compiling. Add doc about new e2e framework. Add doc about the new profile feature. Powered-by page updated.  All issues and pull requests are here\n","title":"7.0.0","url":"/docs/main/v9.7.0/en/changes/changes-7.0.0/"},{"content":"8.0.0 Project  v3 protocol is added and implemented. All previous releases are incompatible with 8.x releases. Service, Instance, Endpoint register mechanism and inventory storage entities are removed. New GraphQL query protocol is provided, the legacy protocol is still supported(plan to remove at the end of this year). Support Prometheus network protocol. Metrics in Prometheus format could be transferred into SkyWalking. Python agent provided. All inventory caches have been removed. Apache ShardingSphere(4.1.0, 4.1.1) agent plugin provided.  Java Agent  Add MariaDB plugin. Vert.x plugin enhancement. More cases are covered. Support v3 extension header. Fix ElasticSearch 5.x plugin TransportClient error. Support Correlation protocol v1. Fix Finagle plugin bug, in processing Noop Span. Make CommandService daemon to avoid blocking target application shutting down gracefully. Refactor spring cloud gateway plugin and support tracing spring cloud gateway 2.2.x  OAP-Backend  Support meter system for Prometheus adoption. In future releases, we will add native meter APIs and MicroMeter(Sleuth) system. Support endpoint grouping. Add SuperDataSet annotation for storage entity. Add superDatasetIndexShardsFactor in the ElasticSearch storage, to provide more shards for @SuperDataSet annotated entites. Typically TraceSegment. Support alarm settings for relationship of service, instance, and endpoint level metrics. Support alarm settings for database(conjecture node in tracing scenario). Data Model could be added in the runtime, don\u0026rsquo;t depend on the bootstrap sequence anymore. Reduce the memory cost, due to no inventory caches. No buffer files in tracing and service mesh cases. New ReadWriteSafe cache implementation. Simplify codes. Provide default way for metrics query, even the metrics doesn\u0026rsquo;t exist. New GraphQL query protocol is provided. Support the metrics type query. Set up length rule of service, instance, and endpoint. Adjust the default jks for ElasticSearch to empty. Fix Apdex function integer overflow issue. Fix profile storage issue. Fix TTL issue. Fix H2 column type bug. Add JRE 8-14 test for the backend.  UI  UI dashboard is 100% configurable to adopt new metrics definited in the backend.  Document  Add v8 upgrade document. Make the coverage accurate including UT and e2e tests. Add miss doc about collecting parameters in the profiled traces.  CVE  Fix SQL Injection vulnerability in H2/MySQL implementation. Upgrade Nacos to avoid the FastJson CVE in high frequency. Upgrade jasckson-databind to 2.9.10.  All issues and pull requests are here\n","title":"8.0.0","url":"/docs/main/latest/en/changes/changes-8.0.0/"},{"content":"8.0.0 Project  v3 protocol is added and implemented. All previous releases are incompatible with 8.x releases. Service, Instance, Endpoint register mechanism and inventory storage entities are removed. New GraphQL query protocol is provided, the legacy protocol is still supported(plan to remove at the end of this year). Support Prometheus network protocol. Metrics in Prometheus format could be transferred into SkyWalking. Python agent provided. All inventory caches have been removed. Apache ShardingSphere(4.1.0, 4.1.1) agent plugin provided.  Java Agent  Add MariaDB plugin. Vert.x plugin enhancement. More cases are covered. Support v3 extension header. Fix ElasticSearch 5.x plugin TransportClient error. Support Correlation protocol v1. Fix Finagle plugin bug, in processing Noop Span. Make CommandService daemon to avoid blocking target application shutting down gracefully. Refactor spring cloud gateway plugin and support tracing spring cloud gateway 2.2.x  OAP-Backend  Support meter system for Prometheus adoption. In future releases, we will add native meter APIs and MicroMeter(Sleuth) system. Support endpoint grouping. Add SuperDataSet annotation for storage entity. Add superDatasetIndexShardsFactor in the ElasticSearch storage, to provide more shards for @SuperDataSet annotated entites. Typically TraceSegment. Support alarm settings for relationship of service, instance, and endpoint level metrics. Support alarm settings for database(conjecture node in tracing scenario). Data Model could be added in the runtime, don\u0026rsquo;t depend on the bootstrap sequence anymore. Reduce the memory cost, due to no inventory caches. No buffer files in tracing and service mesh cases. New ReadWriteSafe cache implementation. Simplify codes. Provide default way for metrics query, even the metrics doesn\u0026rsquo;t exist. New GraphQL query protocol is provided. Support the metrics type query. Set up length rule of service, instance, and endpoint. Adjust the default jks for ElasticSearch to empty. Fix Apdex function integer overflow issue. Fix profile storage issue. Fix TTL issue. Fix H2 column type bug. Add JRE 8-14 test for the backend.  UI  UI dashboard is 100% configurable to adopt new metrics definited in the backend.  Document  Add v8 upgrade document. Make the coverage accurate including UT and e2e tests. Add miss doc about collecting parameters in the profiled traces.  CVE  Fix SQL Injection vulnerability in H2/MySQL implementation. Upgrade Nacos to avoid the FastJson CVE in high frequency. Upgrade jasckson-databind to 2.9.10.  All issues and pull requests are here\n","title":"8.0.0","url":"/docs/main/next/en/changes/changes-8.0.0/"},{"content":"8.0.0 Project  v3 protocol is added and implemented. All previous releases are incompatible with 8.x releases. Service, Instance, Endpoint register mechanism and inventory storage entities are removed. New GraphQL query protocol is provided, the legacy protocol is still supported(plan to remove at the end of this year). Support Prometheus network protocol. Metrics in Prometheus format could be transferred into SkyWalking. Python agent provided. All inventory caches have been removed. Apache ShardingSphere(4.1.0, 4.1.1) agent plugin provided.  Java Agent  Add MariaDB plugin. Vert.x plugin enhancement. More cases are covered. Support v3 extension header. Fix ElasticSearch 5.x plugin TransportClient error. Support Correlation protocol v1. Fix Finagle plugin bug, in processing Noop Span. Make CommandService daemon to avoid blocking target application shutting down gracefully. Refactor spring cloud gateway plugin and support tracing spring cloud gateway 2.2.x  OAP-Backend  Support meter system for Prometheus adoption. In future releases, we will add native meter APIs and MicroMeter(Sleuth) system. Support endpoint grouping. Add SuperDataSet annotation for storage entity. Add superDatasetIndexShardsFactor in the ElasticSearch storage, to provide more shards for @SuperDataSet annotated entites. Typically TraceSegment. Support alarm settings for relationship of service, instance, and endpoint level metrics. Support alarm settings for database(conjecture node in tracing scenario). Data Model could be added in the runtime, don\u0026rsquo;t depend on the bootstrap sequence anymore. Reduce the memory cost, due to no inventory caches. No buffer files in tracing and service mesh cases. New ReadWriteSafe cache implementation. Simplify codes. Provide default way for metrics query, even the metrics doesn\u0026rsquo;t exist. New GraphQL query protocol is provided. Support the metrics type query. Set up length rule of service, instance, and endpoint. Adjust the default jks for ElasticSearch to empty. Fix Apdex function integer overflow issue. Fix profile storage issue. Fix TTL issue. Fix H2 column type bug. Add JRE 8-14 test for the backend.  UI  UI dashboard is 100% configurable to adopt new metrics definited in the backend.  Document  Add v8 upgrade document. Make the coverage accurate including UT and e2e tests. Add miss doc about collecting parameters in the profiled traces.  CVE  Fix SQL Injection vulnerability in H2/MySQL implementation. Upgrade Nacos to avoid the FastJson CVE in high frequency. Upgrade jasckson-databind to 2.9.10.  All issues and pull requests are here\n","title":"8.0.0","url":"/docs/main/v9.1.0/en/changes/changes-8.0.0/"},{"content":"8.0.0 Project  v3 protocol is added and implemented. All previous releases are incompatible with 8.x releases. Service, Instance, Endpoint register mechanism and inventory storage entities are removed. New GraphQL query protocol is provided, the legacy protocol is still supported(plan to remove at the end of this year). Support Prometheus network protocol. Metrics in Prometheus format could be transferred into SkyWalking. Python agent provided. All inventory caches have been removed. Apache ShardingSphere(4.1.0, 4.1.1) agent plugin provided.  Java Agent  Add MariaDB plugin. Vert.x plugin enhancement. More cases are covered. Support v3 extension header. Fix ElasticSearch 5.x plugin TransportClient error. Support Correlation protocol v1. Fix Finagle plugin bug, in processing Noop Span. Make CommandService daemon to avoid blocking target application shutting down gracefully. Refactor spring cloud gateway plugin and support tracing spring cloud gateway 2.2.x  OAP-Backend  Support meter system for Prometheus adoption. In future releases, we will add native meter APIs and MicroMeter(Sleuth) system. Support endpoint grouping. Add SuperDataSet annotation for storage entity. Add superDatasetIndexShardsFactor in the ElasticSearch storage, to provide more shards for @SuperDataSet annotated entites. Typically TraceSegment. Support alarm settings for relationship of service, instance, and endpoint level metrics. Support alarm settings for database(conjecture node in tracing scenario). Data Model could be added in the runtime, don\u0026rsquo;t depend on the bootstrap sequence anymore. Reduce the memory cost, due to no inventory caches. No buffer files in tracing and service mesh cases. New ReadWriteSafe cache implementation. Simplify codes. Provide default way for metrics query, even the metrics doesn\u0026rsquo;t exist. New GraphQL query protocol is provided. Support the metrics type query. Set up length rule of service, instance, and endpoint. Adjust the default jks for ElasticSearch to empty. Fix Apdex function integer overflow issue. Fix profile storage issue. Fix TTL issue. Fix H2 column type bug. Add JRE 8-14 test for the backend.  UI  UI dashboard is 100% configurable to adopt new metrics definited in the backend.  Document  Add v8 upgrade document. Make the coverage accurate including UT and e2e tests. Add miss doc about collecting parameters in the profiled traces.  CVE  Fix SQL Injection vulnerability in H2/MySQL implementation. Upgrade Nacos to avoid the FastJson CVE in high frequency. Upgrade jasckson-databind to 2.9.10.  All issues and pull requests are here\n","title":"8.0.0","url":"/docs/main/v9.2.0/en/changes/changes-8.0.0/"},{"content":"8.0.0 Project  v3 protocol is added and implemented. All previous releases are incompatible with 8.x releases. Service, Instance, Endpoint register mechanism and inventory storage entities are removed. New GraphQL query protocol is provided, the legacy protocol is still supported(plan to remove at the end of this year). Support Prometheus network protocol. Metrics in Prometheus format could be transferred into SkyWalking. Python agent provided. All inventory caches have been removed. Apache ShardingSphere(4.1.0, 4.1.1) agent plugin provided.  Java Agent  Add MariaDB plugin. Vert.x plugin enhancement. More cases are covered. Support v3 extension header. Fix ElasticSearch 5.x plugin TransportClient error. Support Correlation protocol v1. Fix Finagle plugin bug, in processing Noop Span. Make CommandService daemon to avoid blocking target application shutting down gracefully. Refactor spring cloud gateway plugin and support tracing spring cloud gateway 2.2.x  OAP-Backend  Support meter system for Prometheus adoption. In future releases, we will add native meter APIs and MicroMeter(Sleuth) system. Support endpoint grouping. Add SuperDataSet annotation for storage entity. Add superDatasetIndexShardsFactor in the ElasticSearch storage, to provide more shards for @SuperDataSet annotated entites. Typically TraceSegment. Support alarm settings for relationship of service, instance, and endpoint level metrics. Support alarm settings for database(conjecture node in tracing scenario). Data Model could be added in the runtime, don\u0026rsquo;t depend on the bootstrap sequence anymore. Reduce the memory cost, due to no inventory caches. No buffer files in tracing and service mesh cases. New ReadWriteSafe cache implementation. Simplify codes. Provide default way for metrics query, even the metrics doesn\u0026rsquo;t exist. New GraphQL query protocol is provided. Support the metrics type query. Set up length rule of service, instance, and endpoint. Adjust the default jks for ElasticSearch to empty. Fix Apdex function integer overflow issue. Fix profile storage issue. Fix TTL issue. Fix H2 column type bug. Add JRE 8-14 test for the backend.  UI  UI dashboard is 100% configurable to adopt new metrics definited in the backend.  Document  Add v8 upgrade document. Make the coverage accurate including UT and e2e tests. Add miss doc about collecting parameters in the profiled traces.  CVE  Fix SQL Injection vulnerability in H2/MySQL implementation. Upgrade Nacos to avoid the FastJson CVE in high frequency. Upgrade jasckson-databind to 2.9.10.  All issues and pull requests are here\n","title":"8.0.0","url":"/docs/main/v9.3.0/en/changes/changes-8.0.0/"},{"content":"8.0.0 Project  v3 protocol is added and implemented. All previous releases are incompatible with 8.x releases. Service, Instance, Endpoint register mechanism and inventory storage entities are removed. New GraphQL query protocol is provided, the legacy protocol is still supported(plan to remove at the end of this year). Support Prometheus network protocol. Metrics in Prometheus format could be transferred into SkyWalking. Python agent provided. All inventory caches have been removed. Apache ShardingSphere(4.1.0, 4.1.1) agent plugin provided.  Java Agent  Add MariaDB plugin. Vert.x plugin enhancement. More cases are covered. Support v3 extension header. Fix ElasticSearch 5.x plugin TransportClient error. Support Correlation protocol v1. Fix Finagle plugin bug, in processing Noop Span. Make CommandService daemon to avoid blocking target application shutting down gracefully. Refactor spring cloud gateway plugin and support tracing spring cloud gateway 2.2.x  OAP-Backend  Support meter system for Prometheus adoption. In future releases, we will add native meter APIs and MicroMeter(Sleuth) system. Support endpoint grouping. Add SuperDataSet annotation for storage entity. Add superDatasetIndexShardsFactor in the ElasticSearch storage, to provide more shards for @SuperDataSet annotated entites. Typically TraceSegment. Support alarm settings for relationship of service, instance, and endpoint level metrics. Support alarm settings for database(conjecture node in tracing scenario). Data Model could be added in the runtime, don\u0026rsquo;t depend on the bootstrap sequence anymore. Reduce the memory cost, due to no inventory caches. No buffer files in tracing and service mesh cases. New ReadWriteSafe cache implementation. Simplify codes. Provide default way for metrics query, even the metrics doesn\u0026rsquo;t exist. New GraphQL query protocol is provided. Support the metrics type query. Set up length rule of service, instance, and endpoint. Adjust the default jks for ElasticSearch to empty. Fix Apdex function integer overflow issue. Fix profile storage issue. Fix TTL issue. Fix H2 column type bug. Add JRE 8-14 test for the backend.  UI  UI dashboard is 100% configurable to adopt new metrics definited in the backend.  Document  Add v8 upgrade document. Make the coverage accurate including UT and e2e tests. Add miss doc about collecting parameters in the profiled traces.  CVE  Fix SQL Injection vulnerability in H2/MySQL implementation. Upgrade Nacos to avoid the FastJson CVE in high frequency. Upgrade jasckson-databind to 2.9.10.  All issues and pull requests are here\n","title":"8.0.0","url":"/docs/main/v9.4.0/en/changes/changes-8.0.0/"},{"content":"8.0.0 Project  v3 protocol is added and implemented. All previous releases are incompatible with 8.x releases. Service, Instance, Endpoint register mechanism and inventory storage entities are removed. New GraphQL query protocol is provided, the legacy protocol is still supported(plan to remove at the end of this year). Support Prometheus network protocol. Metrics in Prometheus format could be transferred into SkyWalking. Python agent provided. All inventory caches have been removed. Apache ShardingSphere(4.1.0, 4.1.1) agent plugin provided.  Java Agent  Add MariaDB plugin. Vert.x plugin enhancement. More cases are covered. Support v3 extension header. Fix ElasticSearch 5.x plugin TransportClient error. Support Correlation protocol v1. Fix Finagle plugin bug, in processing Noop Span. Make CommandService daemon to avoid blocking target application shutting down gracefully. Refactor spring cloud gateway plugin and support tracing spring cloud gateway 2.2.x  OAP-Backend  Support meter system for Prometheus adoption. In future releases, we will add native meter APIs and MicroMeter(Sleuth) system. Support endpoint grouping. Add SuperDataSet annotation for storage entity. Add superDatasetIndexShardsFactor in the ElasticSearch storage, to provide more shards for @SuperDataSet annotated entites. Typically TraceSegment. Support alarm settings for relationship of service, instance, and endpoint level metrics. Support alarm settings for database(conjecture node in tracing scenario). Data Model could be added in the runtime, don\u0026rsquo;t depend on the bootstrap sequence anymore. Reduce the memory cost, due to no inventory caches. No buffer files in tracing and service mesh cases. New ReadWriteSafe cache implementation. Simplify codes. Provide default way for metrics query, even the metrics doesn\u0026rsquo;t exist. New GraphQL query protocol is provided. Support the metrics type query. Set up length rule of service, instance, and endpoint. Adjust the default jks for ElasticSearch to empty. Fix Apdex function integer overflow issue. Fix profile storage issue. Fix TTL issue. Fix H2 column type bug. Add JRE 8-14 test for the backend.  UI  UI dashboard is 100% configurable to adopt new metrics definited in the backend.  Document  Add v8 upgrade document. Make the coverage accurate including UT and e2e tests. Add miss doc about collecting parameters in the profiled traces.  CVE  Fix SQL Injection vulnerability in H2/MySQL implementation. Upgrade Nacos to avoid the FastJson CVE in high frequency. Upgrade jasckson-databind to 2.9.10.  All issues and pull requests are here\n","title":"8.0.0","url":"/docs/main/v9.5.0/en/changes/changes-8.0.0/"},{"content":"8.0.0 Project  v3 protocol is added and implemented. All previous releases are incompatible with 8.x releases. Service, Instance, Endpoint register mechanism and inventory storage entities are removed. New GraphQL query protocol is provided, the legacy protocol is still supported(plan to remove at the end of this year). Support Prometheus network protocol. Metrics in Prometheus format could be transferred into SkyWalking. Python agent provided. All inventory caches have been removed. Apache ShardingSphere(4.1.0, 4.1.1) agent plugin provided.  Java Agent  Add MariaDB plugin. Vert.x plugin enhancement. More cases are covered. Support v3 extension header. Fix ElasticSearch 5.x plugin TransportClient error. Support Correlation protocol v1. Fix Finagle plugin bug, in processing Noop Span. Make CommandService daemon to avoid blocking target application shutting down gracefully. Refactor spring cloud gateway plugin and support tracing spring cloud gateway 2.2.x  OAP-Backend  Support meter system for Prometheus adoption. In future releases, we will add native meter APIs and MicroMeter(Sleuth) system. Support endpoint grouping. Add SuperDataSet annotation for storage entity. Add superDatasetIndexShardsFactor in the ElasticSearch storage, to provide more shards for @SuperDataSet annotated entites. Typically TraceSegment. Support alarm settings for relationship of service, instance, and endpoint level metrics. Support alarm settings for database(conjecture node in tracing scenario). Data Model could be added in the runtime, don\u0026rsquo;t depend on the bootstrap sequence anymore. Reduce the memory cost, due to no inventory caches. No buffer files in tracing and service mesh cases. New ReadWriteSafe cache implementation. Simplify codes. Provide default way for metrics query, even the metrics doesn\u0026rsquo;t exist. New GraphQL query protocol is provided. Support the metrics type query. Set up length rule of service, instance, and endpoint. Adjust the default jks for ElasticSearch to empty. Fix Apdex function integer overflow issue. Fix profile storage issue. Fix TTL issue. Fix H2 column type bug. Add JRE 8-14 test for the backend.  UI  UI dashboard is 100% configurable to adopt new metrics definited in the backend.  Document  Add v8 upgrade document. Make the coverage accurate including UT and e2e tests. Add miss doc about collecting parameters in the profiled traces.  CVE  Fix SQL Injection vulnerability in H2/MySQL implementation. Upgrade Nacos to avoid the FastJson CVE in high frequency. Upgrade jasckson-databind to 2.9.10.  All issues and pull requests are here\n","title":"8.0.0","url":"/docs/main/v9.6.0/en/changes/changes-8.0.0/"},{"content":"8.0.0 Project  v3 protocol is added and implemented. All previous releases are incompatible with 8.x releases. Service, Instance, Endpoint register mechanism and inventory storage entities are removed. New GraphQL query protocol is provided, the legacy protocol is still supported(plan to remove at the end of this year). Support Prometheus network protocol. Metrics in Prometheus format could be transferred into SkyWalking. Python agent provided. All inventory caches have been removed. Apache ShardingSphere(4.1.0, 4.1.1) agent plugin provided.  Java Agent  Add MariaDB plugin. Vert.x plugin enhancement. More cases are covered. Support v3 extension header. Fix ElasticSearch 5.x plugin TransportClient error. Support Correlation protocol v1. Fix Finagle plugin bug, in processing Noop Span. Make CommandService daemon to avoid blocking target application shutting down gracefully. Refactor spring cloud gateway plugin and support tracing spring cloud gateway 2.2.x  OAP-Backend  Support meter system for Prometheus adoption. In future releases, we will add native meter APIs and MicroMeter(Sleuth) system. Support endpoint grouping. Add SuperDataSet annotation for storage entity. Add superDatasetIndexShardsFactor in the ElasticSearch storage, to provide more shards for @SuperDataSet annotated entites. Typically TraceSegment. Support alarm settings for relationship of service, instance, and endpoint level metrics. Support alarm settings for database(conjecture node in tracing scenario). Data Model could be added in the runtime, don\u0026rsquo;t depend on the bootstrap sequence anymore. Reduce the memory cost, due to no inventory caches. No buffer files in tracing and service mesh cases. New ReadWriteSafe cache implementation. Simplify codes. Provide default way for metrics query, even the metrics doesn\u0026rsquo;t exist. New GraphQL query protocol is provided. Support the metrics type query. Set up length rule of service, instance, and endpoint. Adjust the default jks for ElasticSearch to empty. Fix Apdex function integer overflow issue. Fix profile storage issue. Fix TTL issue. Fix H2 column type bug. Add JRE 8-14 test for the backend.  UI  UI dashboard is 100% configurable to adopt new metrics definited in the backend.  Document  Add v8 upgrade document. Make the coverage accurate including UT and e2e tests. Add miss doc about collecting parameters in the profiled traces.  CVE  Fix SQL Injection vulnerability in H2/MySQL implementation. Upgrade Nacos to avoid the FastJson CVE in high frequency. Upgrade jasckson-databind to 2.9.10.  All issues and pull requests are here\n","title":"8.0.0","url":"/docs/main/v9.7.0/en/changes/changes-8.0.0/"},{"content":"8.0.1 OAP-Backend  Fix no-init mode is not working in ElasticSearch storage.  ","title":"8.0.1","url":"/docs/main/latest/en/changes/changes-8.0.1/"},{"content":"8.0.1 OAP-Backend  Fix no-init mode is not working in ElasticSearch storage.  ","title":"8.0.1","url":"/docs/main/next/en/changes/changes-8.0.1/"},{"content":"8.0.1 OAP-Backend  Fix no-init mode is not working in ElasticSearch storage.  ","title":"8.0.1","url":"/docs/main/v9.1.0/en/changes/changes-8.0.1/"},{"content":"8.0.1 OAP-Backend  Fix no-init mode is not working in ElasticSearch storage.  ","title":"8.0.1","url":"/docs/main/v9.2.0/en/changes/changes-8.0.1/"},{"content":"8.0.1 OAP-Backend  Fix no-init mode is not working in ElasticSearch storage.  ","title":"8.0.1","url":"/docs/main/v9.3.0/en/changes/changes-8.0.1/"},{"content":"8.0.1 OAP-Backend  Fix no-init mode is not working in ElasticSearch storage.  ","title":"8.0.1","url":"/docs/main/v9.4.0/en/changes/changes-8.0.1/"},{"content":"8.0.1 OAP-Backend  Fix no-init mode is not working in ElasticSearch storage.  ","title":"8.0.1","url":"/docs/main/v9.5.0/en/changes/changes-8.0.1/"},{"content":"8.0.1 OAP-Backend  Fix no-init mode is not working in ElasticSearch storage.  ","title":"8.0.1","url":"/docs/main/v9.6.0/en/changes/changes-8.0.1/"},{"content":"8.0.1 OAP-Backend  Fix no-init mode is not working in ElasticSearch storage.  ","title":"8.0.1","url":"/docs/main/v9.7.0/en/changes/changes-8.0.1/"},{"content":"8.1.0 Project  Support Kafka as an optional trace, JVM metrics, profiling snapshots and meter system data transport layer. Support Meter system, including the native metrics APIs and the Spring Sleuth adoption. Support JVM thread metrics.  Java Agent  [Core] Fix the concurrency access bug in the Concurrency ClassLoader Case. [Core] Separate the config of the plugins from the core level. [Core] Support instrumented class cached in memory or file, to be compatible with other agents, such as Arthas. Add logic endpoint concept. Could analysis any span or tags flagged by the logic endpoint. Add Spring annotation component name for UI visualization only. Add support to trace Call procedures in MySQL plugin. Support GraphQL plugin. Support Quasar fiber plugin. Support InfluxDB java client plugin. Support brpc java plugin Support ConsoleAppender in the logback v1 plugin. Enhance vert.x endpoint names. Optimize the code to prevent mongo statements from being too long. Fix WebFlux plugin concurrency access bug. Fix ShardingSphere plugins internal conflicts. Fix duplicated Spring MVC endpoint. Fix lettuce plugin sometimes trace doesn‘t show span layer. Fix @Tag returnedObject bug.  OAP-Backend  Support Jetty Server advanced configurations. Support label based filter in the prometheus fetcher and OpenCensus receiver. Support using k8s configmap as the configuration center. Support OAP health check, and storage module health check. Support sampling rate in the dynamic configuration. Add endpoint_relation_sla and endpoint_relation_percentile for endpoint relationship metrics. Add components for Python plugins, including Kafka, Tornado, Redis, Django, PyMysql. Add components for Golang SDK. Add Nacos 1.3.1 back as an optional cluster coordinator and dynamic configuration center. Enhance the metrics query for ElasticSearch implementation to increase the stability. Reduce the length of storage entity names in the self-observability for MySQL and TiDB storage. Fix labels are missing in Prometheus analysis context. Fix column length issue in MySQL/TiDB storage. Fix no data in 2nd level aggregation in self-observability. Fix searchService bug in ES implementation. Fix wrong validation of endpoint relation entity query. Fix the bug caused by the OAL debug flag. Fix endpoint dependency bug in MQ and uninstrumented proxy cases. Fix time bucket conversion issue in the InfluxDB storage implementation. Update k8s client to 8.0.0  UI  Support endpoint dependency graph. Support x-scroll of trace/profile page Fix database selector issue. Add the bar chart in the UI templates.  Document  Update the user logo wall. Add backend configuration vocabulary document. Add agent installation doc for Tomcat9 on Windows. Add istioctl ALS commands for the document. Fix TTL documentation. Add FAQ doc about thread instrumentation.  CVE  Fix fuzzy query sql injection in the MySQL/TiDB storage.  All issues and pull requests are here\n","title":"8.1.0","url":"/docs/main/latest/en/changes/changes-8.1.0/"},{"content":"8.1.0 Project  Support Kafka as an optional trace, JVM metrics, profiling snapshots and meter system data transport layer. Support Meter system, including the native metrics APIs and the Spring Sleuth adoption. Support JVM thread metrics.  Java Agent  [Core] Fix the concurrency access bug in the Concurrency ClassLoader Case. [Core] Separate the config of the plugins from the core level. [Core] Support instrumented class cached in memory or file, to be compatible with other agents, such as Arthas. Add logic endpoint concept. Could analysis any span or tags flagged by the logic endpoint. Add Spring annotation component name for UI visualization only. Add support to trace Call procedures in MySQL plugin. Support GraphQL plugin. Support Quasar fiber plugin. Support InfluxDB java client plugin. Support brpc java plugin Support ConsoleAppender in the logback v1 plugin. Enhance vert.x endpoint names. Optimize the code to prevent mongo statements from being too long. Fix WebFlux plugin concurrency access bug. Fix ShardingSphere plugins internal conflicts. Fix duplicated Spring MVC endpoint. Fix lettuce plugin sometimes trace doesn‘t show span layer. Fix @Tag returnedObject bug.  OAP-Backend  Support Jetty Server advanced configurations. Support label based filter in the prometheus fetcher and OpenCensus receiver. Support using k8s configmap as the configuration center. Support OAP health check, and storage module health check. Support sampling rate in the dynamic configuration. Add endpoint_relation_sla and endpoint_relation_percentile for endpoint relationship metrics. Add components for Python plugins, including Kafka, Tornado, Redis, Django, PyMysql. Add components for Golang SDK. Add Nacos 1.3.1 back as an optional cluster coordinator and dynamic configuration center. Enhance the metrics query for ElasticSearch implementation to increase the stability. Reduce the length of storage entity names in the self-observability for MySQL and TiDB storage. Fix labels are missing in Prometheus analysis context. Fix column length issue in MySQL/TiDB storage. Fix no data in 2nd level aggregation in self-observability. Fix searchService bug in ES implementation. Fix wrong validation of endpoint relation entity query. Fix the bug caused by the OAL debug flag. Fix endpoint dependency bug in MQ and uninstrumented proxy cases. Fix time bucket conversion issue in the InfluxDB storage implementation. Update k8s client to 8.0.0  UI  Support endpoint dependency graph. Support x-scroll of trace/profile page Fix database selector issue. Add the bar chart in the UI templates.  Document  Update the user logo wall. Add backend configuration vocabulary document. Add agent installation doc for Tomcat9 on Windows. Add istioctl ALS commands for the document. Fix TTL documentation. Add FAQ doc about thread instrumentation.  CVE  Fix fuzzy query sql injection in the MySQL/TiDB storage.  All issues and pull requests are here\n","title":"8.1.0","url":"/docs/main/next/en/changes/changes-8.1.0/"},{"content":"8.1.0 Project  Support Kafka as an optional trace, JVM metrics, profiling snapshots and meter system data transport layer. Support Meter system, including the native metrics APIs and the Spring Sleuth adoption. Support JVM thread metrics.  Java Agent  [Core] Fix the concurrency access bug in the Concurrency ClassLoader Case. [Core] Separate the config of the plugins from the core level. [Core] Support instrumented class cached in memory or file, to be compatible with other agents, such as Arthas. Add logic endpoint concept. Could analysis any span or tags flagged by the logic endpoint. Add Spring annotation component name for UI visualization only. Add support to trace Call procedures in MySQL plugin. Support GraphQL plugin. Support Quasar fiber plugin. Support InfluxDB java client plugin. Support brpc java plugin Support ConsoleAppender in the logback v1 plugin. Enhance vert.x endpoint names. Optimize the code to prevent mongo statements from being too long. Fix WebFlux plugin concurrency access bug. Fix ShardingSphere plugins internal conflicts. Fix duplicated Spring MVC endpoint. Fix lettuce plugin sometimes trace doesn‘t show span layer. Fix @Tag returnedObject bug.  OAP-Backend  Support Jetty Server advanced configurations. Support label based filter in the prometheus fetcher and OpenCensus receiver. Support using k8s configmap as the configuration center. Support OAP health check, and storage module health check. Support sampling rate in the dynamic configuration. Add endpoint_relation_sla and endpoint_relation_percentile for endpoint relationship metrics. Add components for Python plugins, including Kafka, Tornado, Redis, Django, PyMysql. Add components for Golang SDK. Add Nacos 1.3.1 back as an optional cluster coordinator and dynamic configuration center. Enhance the metrics query for ElasticSearch implementation to increase the stability. Reduce the length of storage entity names in the self-observability for MySQL and TiDB storage. Fix labels are missing in Prometheus analysis context. Fix column length issue in MySQL/TiDB storage. Fix no data in 2nd level aggregation in self-observability. Fix searchService bug in ES implementation. Fix wrong validation of endpoint relation entity query. Fix the bug caused by the OAL debug flag. Fix endpoint dependency bug in MQ and uninstrumented proxy cases. Fix time bucket conversion issue in the InfluxDB storage implementation. Update k8s client to 8.0.0  UI  Support endpoint dependency graph. Support x-scroll of trace/profile page Fix database selector issue. Add the bar chart in the UI templates.  Document  Update the user logo wall. Add backend configuration vocabulary document. Add agent installation doc for Tomcat9 on Windows. Add istioctl ALS commands for the document. Fix TTL documentation. Add FAQ doc about thread instrumentation.  CVE  Fix fuzzy query sql injection in the MySQL/TiDB storage.  All issues and pull requests are here\n","title":"8.1.0","url":"/docs/main/v9.1.0/en/changes/changes-8.1.0/"},{"content":"8.1.0 Project  Support Kafka as an optional trace, JVM metrics, profiling snapshots and meter system data transport layer. Support Meter system, including the native metrics APIs and the Spring Sleuth adoption. Support JVM thread metrics.  Java Agent  [Core] Fix the concurrency access bug in the Concurrency ClassLoader Case. [Core] Separate the config of the plugins from the core level. [Core] Support instrumented class cached in memory or file, to be compatible with other agents, such as Arthas. Add logic endpoint concept. Could analysis any span or tags flagged by the logic endpoint. Add Spring annotation component name for UI visualization only. Add support to trace Call procedures in MySQL plugin. Support GraphQL plugin. Support Quasar fiber plugin. Support InfluxDB java client plugin. Support brpc java plugin Support ConsoleAppender in the logback v1 plugin. Enhance vert.x endpoint names. Optimize the code to prevent mongo statements from being too long. Fix WebFlux plugin concurrency access bug. Fix ShardingSphere plugins internal conflicts. Fix duplicated Spring MVC endpoint. Fix lettuce plugin sometimes trace doesn‘t show span layer. Fix @Tag returnedObject bug.  OAP-Backend  Support Jetty Server advanced configurations. Support label based filter in the prometheus fetcher and OpenCensus receiver. Support using k8s configmap as the configuration center. Support OAP health check, and storage module health check. Support sampling rate in the dynamic configuration. Add endpoint_relation_sla and endpoint_relation_percentile for endpoint relationship metrics. Add components for Python plugins, including Kafka, Tornado, Redis, Django, PyMysql. Add components for Golang SDK. Add Nacos 1.3.1 back as an optional cluster coordinator and dynamic configuration center. Enhance the metrics query for ElasticSearch implementation to increase the stability. Reduce the length of storage entity names in the self-observability for MySQL and TiDB storage. Fix labels are missing in Prometheus analysis context. Fix column length issue in MySQL/TiDB storage. Fix no data in 2nd level aggregation in self-observability. Fix searchService bug in ES implementation. Fix wrong validation of endpoint relation entity query. Fix the bug caused by the OAL debug flag. Fix endpoint dependency bug in MQ and uninstrumented proxy cases. Fix time bucket conversion issue in the InfluxDB storage implementation. Update k8s client to 8.0.0  UI  Support endpoint dependency graph. Support x-scroll of trace/profile page Fix database selector issue. Add the bar chart in the UI templates.  Document  Update the user logo wall. Add backend configuration vocabulary document. Add agent installation doc for Tomcat9 on Windows. Add istioctl ALS commands for the document. Fix TTL documentation. Add FAQ doc about thread instrumentation.  CVE  Fix fuzzy query sql injection in the MySQL/TiDB storage.  All issues and pull requests are here\n","title":"8.1.0","url":"/docs/main/v9.2.0/en/changes/changes-8.1.0/"},{"content":"8.1.0 Project  Support Kafka as an optional trace, JVM metrics, profiling snapshots and meter system data transport layer. Support Meter system, including the native metrics APIs and the Spring Sleuth adoption. Support JVM thread metrics.  Java Agent  [Core] Fix the concurrency access bug in the Concurrency ClassLoader Case. [Core] Separate the config of the plugins from the core level. [Core] Support instrumented class cached in memory or file, to be compatible with other agents, such as Arthas. Add logic endpoint concept. Could analysis any span or tags flagged by the logic endpoint. Add Spring annotation component name for UI visualization only. Add support to trace Call procedures in MySQL plugin. Support GraphQL plugin. Support Quasar fiber plugin. Support InfluxDB java client plugin. Support brpc java plugin Support ConsoleAppender in the logback v1 plugin. Enhance vert.x endpoint names. Optimize the code to prevent mongo statements from being too long. Fix WebFlux plugin concurrency access bug. Fix ShardingSphere plugins internal conflicts. Fix duplicated Spring MVC endpoint. Fix lettuce plugin sometimes trace doesn‘t show span layer. Fix @Tag returnedObject bug.  OAP-Backend  Support Jetty Server advanced configurations. Support label based filter in the prometheus fetcher and OpenCensus receiver. Support using k8s configmap as the configuration center. Support OAP health check, and storage module health check. Support sampling rate in the dynamic configuration. Add endpoint_relation_sla and endpoint_relation_percentile for endpoint relationship metrics. Add components for Python plugins, including Kafka, Tornado, Redis, Django, PyMysql. Add components for Golang SDK. Add Nacos 1.3.1 back as an optional cluster coordinator and dynamic configuration center. Enhance the metrics query for ElasticSearch implementation to increase the stability. Reduce the length of storage entity names in the self-observability for MySQL and TiDB storage. Fix labels are missing in Prometheus analysis context. Fix column length issue in MySQL/TiDB storage. Fix no data in 2nd level aggregation in self-observability. Fix searchService bug in ES implementation. Fix wrong validation of endpoint relation entity query. Fix the bug caused by the OAL debug flag. Fix endpoint dependency bug in MQ and uninstrumented proxy cases. Fix time bucket conversion issue in the InfluxDB storage implementation. Update k8s client to 8.0.0  UI  Support endpoint dependency graph. Support x-scroll of trace/profile page Fix database selector issue. Add the bar chart in the UI templates.  Document  Update the user logo wall. Add backend configuration vocabulary document. Add agent installation doc for Tomcat9 on Windows. Add istioctl ALS commands for the document. Fix TTL documentation. Add FAQ doc about thread instrumentation.  CVE  Fix fuzzy query sql injection in the MySQL/TiDB storage.  All issues and pull requests are here\n","title":"8.1.0","url":"/docs/main/v9.3.0/en/changes/changes-8.1.0/"},{"content":"8.1.0 Project  Support Kafka as an optional trace, JVM metrics, profiling snapshots and meter system data transport layer. Support Meter system, including the native metrics APIs and the Spring Sleuth adoption. Support JVM thread metrics.  Java Agent  [Core] Fix the concurrency access bug in the Concurrency ClassLoader Case. [Core] Separate the config of the plugins from the core level. [Core] Support instrumented class cached in memory or file, to be compatible with other agents, such as Arthas. Add logic endpoint concept. Could analysis any span or tags flagged by the logic endpoint. Add Spring annotation component name for UI visualization only. Add support to trace Call procedures in MySQL plugin. Support GraphQL plugin. Support Quasar fiber plugin. Support InfluxDB java client plugin. Support brpc java plugin Support ConsoleAppender in the logback v1 plugin. Enhance vert.x endpoint names. Optimize the code to prevent mongo statements from being too long. Fix WebFlux plugin concurrency access bug. Fix ShardingSphere plugins internal conflicts. Fix duplicated Spring MVC endpoint. Fix lettuce plugin sometimes trace doesn‘t show span layer. Fix @Tag returnedObject bug.  OAP-Backend  Support Jetty Server advanced configurations. Support label based filter in the prometheus fetcher and OpenCensus receiver. Support using k8s configmap as the configuration center. Support OAP health check, and storage module health check. Support sampling rate in the dynamic configuration. Add endpoint_relation_sla and endpoint_relation_percentile for endpoint relationship metrics. Add components for Python plugins, including Kafka, Tornado, Redis, Django, PyMysql. Add components for Golang SDK. Add Nacos 1.3.1 back as an optional cluster coordinator and dynamic configuration center. Enhance the metrics query for ElasticSearch implementation to increase the stability. Reduce the length of storage entity names in the self-observability for MySQL and TiDB storage. Fix labels are missing in Prometheus analysis context. Fix column length issue in MySQL/TiDB storage. Fix no data in 2nd level aggregation in self-observability. Fix searchService bug in ES implementation. Fix wrong validation of endpoint relation entity query. Fix the bug caused by the OAL debug flag. Fix endpoint dependency bug in MQ and uninstrumented proxy cases. Fix time bucket conversion issue in the InfluxDB storage implementation. Update k8s client to 8.0.0  UI  Support endpoint dependency graph. Support x-scroll of trace/profile page Fix database selector issue. Add the bar chart in the UI templates.  Document  Update the user logo wall. Add backend configuration vocabulary document. Add agent installation doc for Tomcat9 on Windows. Add istioctl ALS commands for the document. Fix TTL documentation. Add FAQ doc about thread instrumentation.  CVE  Fix fuzzy query sql injection in the MySQL/TiDB storage.  All issues and pull requests are here\n","title":"8.1.0","url":"/docs/main/v9.4.0/en/changes/changes-8.1.0/"},{"content":"8.1.0 Project  Support Kafka as an optional trace, JVM metrics, profiling snapshots and meter system data transport layer. Support Meter system, including the native metrics APIs and the Spring Sleuth adoption. Support JVM thread metrics.  Java Agent  [Core] Fix the concurrency access bug in the Concurrency ClassLoader Case. [Core] Separate the config of the plugins from the core level. [Core] Support instrumented class cached in memory or file, to be compatible with other agents, such as Arthas. Add logic endpoint concept. Could analysis any span or tags flagged by the logic endpoint. Add Spring annotation component name for UI visualization only. Add support to trace Call procedures in MySQL plugin. Support GraphQL plugin. Support Quasar fiber plugin. Support InfluxDB java client plugin. Support brpc java plugin Support ConsoleAppender in the logback v1 plugin. Enhance vert.x endpoint names. Optimize the code to prevent mongo statements from being too long. Fix WebFlux plugin concurrency access bug. Fix ShardingSphere plugins internal conflicts. Fix duplicated Spring MVC endpoint. Fix lettuce plugin sometimes trace doesn‘t show span layer. Fix @Tag returnedObject bug.  OAP-Backend  Support Jetty Server advanced configurations. Support label based filter in the prometheus fetcher and OpenCensus receiver. Support using k8s configmap as the configuration center. Support OAP health check, and storage module health check. Support sampling rate in the dynamic configuration. Add endpoint_relation_sla and endpoint_relation_percentile for endpoint relationship metrics. Add components for Python plugins, including Kafka, Tornado, Redis, Django, PyMysql. Add components for Golang SDK. Add Nacos 1.3.1 back as an optional cluster coordinator and dynamic configuration center. Enhance the metrics query for ElasticSearch implementation to increase the stability. Reduce the length of storage entity names in the self-observability for MySQL and TiDB storage. Fix labels are missing in Prometheus analysis context. Fix column length issue in MySQL/TiDB storage. Fix no data in 2nd level aggregation in self-observability. Fix searchService bug in ES implementation. Fix wrong validation of endpoint relation entity query. Fix the bug caused by the OAL debug flag. Fix endpoint dependency bug in MQ and uninstrumented proxy cases. Fix time bucket conversion issue in the InfluxDB storage implementation. Update k8s client to 8.0.0  UI  Support endpoint dependency graph. Support x-scroll of trace/profile page Fix database selector issue. Add the bar chart in the UI templates.  Document  Update the user logo wall. Add backend configuration vocabulary document. Add agent installation doc for Tomcat9 on Windows. Add istioctl ALS commands for the document. Fix TTL documentation. Add FAQ doc about thread instrumentation.  CVE  Fix fuzzy query sql injection in the MySQL/TiDB storage.  All issues and pull requests are here\n","title":"8.1.0","url":"/docs/main/v9.5.0/en/changes/changes-8.1.0/"},{"content":"8.1.0 Project  Support Kafka as an optional trace, JVM metrics, profiling snapshots and meter system data transport layer. Support Meter system, including the native metrics APIs and the Spring Sleuth adoption. Support JVM thread metrics.  Java Agent  [Core] Fix the concurrency access bug in the Concurrency ClassLoader Case. [Core] Separate the config of the plugins from the core level. [Core] Support instrumented class cached in memory or file, to be compatible with other agents, such as Arthas. Add logic endpoint concept. Could analysis any span or tags flagged by the logic endpoint. Add Spring annotation component name for UI visualization only. Add support to trace Call procedures in MySQL plugin. Support GraphQL plugin. Support Quasar fiber plugin. Support InfluxDB java client plugin. Support brpc java plugin Support ConsoleAppender in the logback v1 plugin. Enhance vert.x endpoint names. Optimize the code to prevent mongo statements from being too long. Fix WebFlux plugin concurrency access bug. Fix ShardingSphere plugins internal conflicts. Fix duplicated Spring MVC endpoint. Fix lettuce plugin sometimes trace doesn‘t show span layer. Fix @Tag returnedObject bug.  OAP-Backend  Support Jetty Server advanced configurations. Support label based filter in the prometheus fetcher and OpenCensus receiver. Support using k8s configmap as the configuration center. Support OAP health check, and storage module health check. Support sampling rate in the dynamic configuration. Add endpoint_relation_sla and endpoint_relation_percentile for endpoint relationship metrics. Add components for Python plugins, including Kafka, Tornado, Redis, Django, PyMysql. Add components for Golang SDK. Add Nacos 1.3.1 back as an optional cluster coordinator and dynamic configuration center. Enhance the metrics query for ElasticSearch implementation to increase the stability. Reduce the length of storage entity names in the self-observability for MySQL and TiDB storage. Fix labels are missing in Prometheus analysis context. Fix column length issue in MySQL/TiDB storage. Fix no data in 2nd level aggregation in self-observability. Fix searchService bug in ES implementation. Fix wrong validation of endpoint relation entity query. Fix the bug caused by the OAL debug flag. Fix endpoint dependency bug in MQ and uninstrumented proxy cases. Fix time bucket conversion issue in the InfluxDB storage implementation. Update k8s client to 8.0.0  UI  Support endpoint dependency graph. Support x-scroll of trace/profile page Fix database selector issue. Add the bar chart in the UI templates.  Document  Update the user logo wall. Add backend configuration vocabulary document. Add agent installation doc for Tomcat9 on Windows. Add istioctl ALS commands for the document. Fix TTL documentation. Add FAQ doc about thread instrumentation.  CVE  Fix fuzzy query sql injection in the MySQL/TiDB storage.  All issues and pull requests are here\n","title":"8.1.0","url":"/docs/main/v9.6.0/en/changes/changes-8.1.0/"},{"content":"8.1.0 Project  Support Kafka as an optional trace, JVM metrics, profiling snapshots and meter system data transport layer. Support Meter system, including the native metrics APIs and the Spring Sleuth adoption. Support JVM thread metrics.  Java Agent  [Core] Fix the concurrency access bug in the Concurrency ClassLoader Case. [Core] Separate the config of the plugins from the core level. [Core] Support instrumented class cached in memory or file, to be compatible with other agents, such as Arthas. Add logic endpoint concept. Could analysis any span or tags flagged by the logic endpoint. Add Spring annotation component name for UI visualization only. Add support to trace Call procedures in MySQL plugin. Support GraphQL plugin. Support Quasar fiber plugin. Support InfluxDB java client plugin. Support brpc java plugin Support ConsoleAppender in the logback v1 plugin. Enhance vert.x endpoint names. Optimize the code to prevent mongo statements from being too long. Fix WebFlux plugin concurrency access bug. Fix ShardingSphere plugins internal conflicts. Fix duplicated Spring MVC endpoint. Fix lettuce plugin sometimes trace doesn‘t show span layer. Fix @Tag returnedObject bug.  OAP-Backend  Support Jetty Server advanced configurations. Support label based filter in the prometheus fetcher and OpenCensus receiver. Support using k8s configmap as the configuration center. Support OAP health check, and storage module health check. Support sampling rate in the dynamic configuration. Add endpoint_relation_sla and endpoint_relation_percentile for endpoint relationship metrics. Add components for Python plugins, including Kafka, Tornado, Redis, Django, PyMysql. Add components for Golang SDK. Add Nacos 1.3.1 back as an optional cluster coordinator and dynamic configuration center. Enhance the metrics query for ElasticSearch implementation to increase the stability. Reduce the length of storage entity names in the self-observability for MySQL and TiDB storage. Fix labels are missing in Prometheus analysis context. Fix column length issue in MySQL/TiDB storage. Fix no data in 2nd level aggregation in self-observability. Fix searchService bug in ES implementation. Fix wrong validation of endpoint relation entity query. Fix the bug caused by the OAL debug flag. Fix endpoint dependency bug in MQ and uninstrumented proxy cases. Fix time bucket conversion issue in the InfluxDB storage implementation. Update k8s client to 8.0.0  UI  Support endpoint dependency graph. Support x-scroll of trace/profile page Fix database selector issue. Add the bar chart in the UI templates.  Document  Update the user logo wall. Add backend configuration vocabulary document. Add agent installation doc for Tomcat9 on Windows. Add istioctl ALS commands for the document. Fix TTL documentation. Add FAQ doc about thread instrumentation.  CVE  Fix fuzzy query sql injection in the MySQL/TiDB storage.  All issues and pull requests are here\n","title":"8.1.0","url":"/docs/main/v9.7.0/en/changes/changes-8.1.0/"},{"content":"8.2.0 Project  Support Browser monitoring. Add e2e test for ALS solution of service mesh observability. Support compiling(include testing) in JDK11. Support build a single module.  Java Agent  Support metrics plugin. Support slf4j logs of gRPC and Kafka(when agent uses them) into the agent log files. Add PROPERTIES_REPORT_PERIOD_FACTOR config to avoid the properties of instance cleared. Limit the size of traced SQL to avoid OOM. Support mount command to load a new set of plugins. Add plugin selector mechanism. Enhance the witness classes for MongoDB plugin. Enhance the parameter truncate mechanism of SQL plugins. Enhance the SpringMVC plugin in the reactive APIs. Enhance the SpringMVC plugin to collect HTTP headers as the span tags. Enhance the Kafka plugin, about @KafkaPollAndInvoke Enhance the configuration initialization core. Plugin could have its own plugins. Enhance Feign plugin to collect parameters. Enhance Dubbo plugin to collect parameters. Provide Thrift plugin. Provide XXL-job plugin. Provide MongoDB 4.x plugin. Provide Kafka client 2.1+ plugin. Provide WebFlux-WebClient plugin. Provide ignore-exception plugin. Provide quartz scheduler plugin. Provide ElasticJob 2.x plugin. Provide Spring @Scheduled plugin. Provide Spring-Kafka plugin. Provide HBase client plugin. Provide JSON log format. Move Spring WebFlux plugin to the optional plugin. Fix inconsistent logic bug in PrefixMatch Fix duplicate exit spans in Feign LoadBalancer mechanism. Fix the target service blocked by the Kafka reporter. Fix configurations of Kafka report don\u0026rsquo;t work. Fix rest template concurrent conflict. Fix NPE in the ActiveMQ plugin. Fix conflict between Kafka reporter and sampling plugin. Fix NPE in the log formatter. Fix span layer missing in certain cases, in the Kafka plugin. Fix error format of time in serviceTraffic update. Upgrade bytebuddy to 1.10.14  OAP-Backend  Support Nacos authentication. Support labeled meter in the meter receiver. Separate UI template into multiple files. Provide support for Envoy tracing. Envoy tracer depends on the Envoy community. Support query trace by tags. Support composite alarm rules. Support alarm messages to DingTalk. Support alarm messages to WeChat. Support alarm messages to Slack. Support SSL for Prometheus fetcher and self telemetry. Support labeled histogram in the prometheus format. Support the status of segment based on entry span or first span only. Support the error segment in the sampling mechanism. Support SSL certs of gRPC server. Support labeled metrics in the alarm rule setting. Support to query all labeled data, if no explicit label in the query condition. Add TLS parameters in the mesh analysis. Add health check for InfluxDB storage. Add super dataset concept for the traces/logs. Add separate replicas configuration for super dataset. Add IN operator in the OAL. Add != operator in the OAL. Add like operator in the OAL. Add latest function in the prometheus analysis. Add more configurations in the gRPC server. Optimize the trace query performance. Optimize the CPU usage rate calculation, at least to be 1. Optimize the length of slow SQL column in the MySQL storage. Optimize the topology query, use client side component name when no server side mapping. Add component IDs for Python component. Add component ID range for C++. Fix Slack notification setting NPE. Fix some module missing check of the module manager core. Fix authentication doesn\u0026rsquo;t work in sharing server. Fix metrics batch persistent size bug. Fix trace sampling bug. Fix CLR receiver bug. Fix end time bug in the query process. Fix Exporter INCREMENT mode is not working. Fix an error when executing startup.bat when the log directory exists Add syncBulkActions configuration to set up the batch size of the metrics persistent. Meter Analysis Language.  UI  Add browser dashboard. Add browser log query page. Support query trace by tags. Fix JVM configuration. Fix CLR configuration.  Document  Add the document about SW_NO_UPSTREAM_REAL_ADDRESS. Update ALS setup document. Add Customization Config section for plugin development.  All issues and pull requests are here\n","title":"8.2.0","url":"/docs/main/latest/en/changes/changes-8.2.0/"},{"content":"8.2.0 Project  Support Browser monitoring. Add e2e test for ALS solution of service mesh observability. Support compiling(include testing) in JDK11. Support build a single module.  Java Agent  Support metrics plugin. Support slf4j logs of gRPC and Kafka(when agent uses them) into the agent log files. Add PROPERTIES_REPORT_PERIOD_FACTOR config to avoid the properties of instance cleared. Limit the size of traced SQL to avoid OOM. Support mount command to load a new set of plugins. Add plugin selector mechanism. Enhance the witness classes for MongoDB plugin. Enhance the parameter truncate mechanism of SQL plugins. Enhance the SpringMVC plugin in the reactive APIs. Enhance the SpringMVC plugin to collect HTTP headers as the span tags. Enhance the Kafka plugin, about @KafkaPollAndInvoke Enhance the configuration initialization core. Plugin could have its own plugins. Enhance Feign plugin to collect parameters. Enhance Dubbo plugin to collect parameters. Provide Thrift plugin. Provide XXL-job plugin. Provide MongoDB 4.x plugin. Provide Kafka client 2.1+ plugin. Provide WebFlux-WebClient plugin. Provide ignore-exception plugin. Provide quartz scheduler plugin. Provide ElasticJob 2.x plugin. Provide Spring @Scheduled plugin. Provide Spring-Kafka plugin. Provide HBase client plugin. Provide JSON log format. Move Spring WebFlux plugin to the optional plugin. Fix inconsistent logic bug in PrefixMatch Fix duplicate exit spans in Feign LoadBalancer mechanism. Fix the target service blocked by the Kafka reporter. Fix configurations of Kafka report don\u0026rsquo;t work. Fix rest template concurrent conflict. Fix NPE in the ActiveMQ plugin. Fix conflict between Kafka reporter and sampling plugin. Fix NPE in the log formatter. Fix span layer missing in certain cases, in the Kafka plugin. Fix error format of time in serviceTraffic update. Upgrade bytebuddy to 1.10.14  OAP-Backend  Support Nacos authentication. Support labeled meter in the meter receiver. Separate UI template into multiple files. Provide support for Envoy tracing. Envoy tracer depends on the Envoy community. Support query trace by tags. Support composite alarm rules. Support alarm messages to DingTalk. Support alarm messages to WeChat. Support alarm messages to Slack. Support SSL for Prometheus fetcher and self telemetry. Support labeled histogram in the prometheus format. Support the status of segment based on entry span or first span only. Support the error segment in the sampling mechanism. Support SSL certs of gRPC server. Support labeled metrics in the alarm rule setting. Support to query all labeled data, if no explicit label in the query condition. Add TLS parameters in the mesh analysis. Add health check for InfluxDB storage. Add super dataset concept for the traces/logs. Add separate replicas configuration for super dataset. Add IN operator in the OAL. Add != operator in the OAL. Add like operator in the OAL. Add latest function in the prometheus analysis. Add more configurations in the gRPC server. Optimize the trace query performance. Optimize the CPU usage rate calculation, at least to be 1. Optimize the length of slow SQL column in the MySQL storage. Optimize the topology query, use client side component name when no server side mapping. Add component IDs for Python component. Add component ID range for C++. Fix Slack notification setting NPE. Fix some module missing check of the module manager core. Fix authentication doesn\u0026rsquo;t work in sharing server. Fix metrics batch persistent size bug. Fix trace sampling bug. Fix CLR receiver bug. Fix end time bug in the query process. Fix Exporter INCREMENT mode is not working. Fix an error when executing startup.bat when the log directory exists Add syncBulkActions configuration to set up the batch size of the metrics persistent. Meter Analysis Language.  UI  Add browser dashboard. Add browser log query page. Support query trace by tags. Fix JVM configuration. Fix CLR configuration.  Document  Add the document about SW_NO_UPSTREAM_REAL_ADDRESS. Update ALS setup document. Add Customization Config section for plugin development.  All issues and pull requests are here\n","title":"8.2.0","url":"/docs/main/next/en/changes/changes-8.2.0/"},{"content":"8.2.0 Project  Support Browser monitoring. Add e2e test for ALS solution of service mesh observability. Support compiling(include testing) in JDK11. Support build a single module.  Java Agent  Support metrics plugin. Support slf4j logs of gRPC and Kafka(when agent uses them) into the agent log files. Add PROPERTIES_REPORT_PERIOD_FACTOR config to avoid the properties of instance cleared. Limit the size of traced SQL to avoid OOM. Support mount command to load a new set of plugins. Add plugin selector mechanism. Enhance the witness classes for MongoDB plugin. Enhance the parameter truncate mechanism of SQL plugins. Enhance the SpringMVC plugin in the reactive APIs. Enhance the SpringMVC plugin to collect HTTP headers as the span tags. Enhance the Kafka plugin, about @KafkaPollAndInvoke Enhance the configuration initialization core. Plugin could have its own plugins. Enhance Feign plugin to collect parameters. Enhance Dubbo plugin to collect parameters. Provide Thrift plugin. Provide XXL-job plugin. Provide MongoDB 4.x plugin. Provide Kafka client 2.1+ plugin. Provide WebFlux-WebClient plugin. Provide ignore-exception plugin. Provide quartz scheduler plugin. Provide ElasticJob 2.x plugin. Provide Spring @Scheduled plugin. Provide Spring-Kafka plugin. Provide HBase client plugin. Provide JSON log format. Move Spring WebFlux plugin to the optional plugin. Fix inconsistent logic bug in PrefixMatch Fix duplicate exit spans in Feign LoadBalancer mechanism. Fix the target service blocked by the Kafka reporter. Fix configurations of Kafka report don\u0026rsquo;t work. Fix rest template concurrent conflict. Fix NPE in the ActiveMQ plugin. Fix conflict between Kafka reporter and sampling plugin. Fix NPE in the log formatter. Fix span layer missing in certain cases, in the Kafka plugin. Fix error format of time in serviceTraffic update. Upgrade bytebuddy to 1.10.14  OAP-Backend  Support Nacos authentication. Support labeled meter in the meter receiver. Separate UI template into multiple files. Provide support for Envoy tracing. Envoy tracer depends on the Envoy community. Support query trace by tags. Support composite alarm rules. Support alarm messages to DingTalk. Support alarm messages to WeChat. Support alarm messages to Slack. Support SSL for Prometheus fetcher and self telemetry. Support labeled histogram in the prometheus format. Support the status of segment based on entry span or first span only. Support the error segment in the sampling mechanism. Support SSL certs of gRPC server. Support labeled metrics in the alarm rule setting. Support to query all labeled data, if no explicit label in the query condition. Add TLS parameters in the mesh analysis. Add health check for InfluxDB storage. Add super dataset concept for the traces/logs. Add separate replicas configuration for super dataset. Add IN operator in the OAL. Add != operator in the OAL. Add like operator in the OAL. Add latest function in the prometheus analysis. Add more configurations in the gRPC server. Optimize the trace query performance. Optimize the CPU usage rate calculation, at least to be 1. Optimize the length of slow SQL column in the MySQL storage. Optimize the topology query, use client side component name when no server side mapping. Add component IDs for Python component. Add component ID range for C++. Fix Slack notification setting NPE. Fix some module missing check of the module manager core. Fix authentication doesn\u0026rsquo;t work in sharing server. Fix metrics batch persistent size bug. Fix trace sampling bug. Fix CLR receiver bug. Fix end time bug in the query process. Fix Exporter INCREMENT mode is not working. Fix an error when executing startup.bat when the log directory exists Add syncBulkActions configuration to set up the batch size of the metrics persistent. Meter Analysis Language.  UI  Add browser dashboard. Add browser log query page. Support query trace by tags. Fix JVM configuration. Fix CLR configuration.  Document  Add the document about SW_NO_UPSTREAM_REAL_ADDRESS. Update ALS setup document. Add Customization Config section for plugin development.  All issues and pull requests are here\n","title":"8.2.0","url":"/docs/main/v9.1.0/en/changes/changes-8.2.0/"},{"content":"8.2.0 Project  Support Browser monitoring. Add e2e test for ALS solution of service mesh observability. Support compiling(include testing) in JDK11. Support build a single module.  Java Agent  Support metrics plugin. Support slf4j logs of gRPC and Kafka(when agent uses them) into the agent log files. Add PROPERTIES_REPORT_PERIOD_FACTOR config to avoid the properties of instance cleared. Limit the size of traced SQL to avoid OOM. Support mount command to load a new set of plugins. Add plugin selector mechanism. Enhance the witness classes for MongoDB plugin. Enhance the parameter truncate mechanism of SQL plugins. Enhance the SpringMVC plugin in the reactive APIs. Enhance the SpringMVC plugin to collect HTTP headers as the span tags. Enhance the Kafka plugin, about @KafkaPollAndInvoke Enhance the configuration initialization core. Plugin could have its own plugins. Enhance Feign plugin to collect parameters. Enhance Dubbo plugin to collect parameters. Provide Thrift plugin. Provide XXL-job plugin. Provide MongoDB 4.x plugin. Provide Kafka client 2.1+ plugin. Provide WebFlux-WebClient plugin. Provide ignore-exception plugin. Provide quartz scheduler plugin. Provide ElasticJob 2.x plugin. Provide Spring @Scheduled plugin. Provide Spring-Kafka plugin. Provide HBase client plugin. Provide JSON log format. Move Spring WebFlux plugin to the optional plugin. Fix inconsistent logic bug in PrefixMatch Fix duplicate exit spans in Feign LoadBalancer mechanism. Fix the target service blocked by the Kafka reporter. Fix configurations of Kafka report don\u0026rsquo;t work. Fix rest template concurrent conflict. Fix NPE in the ActiveMQ plugin. Fix conflict between Kafka reporter and sampling plugin. Fix NPE in the log formatter. Fix span layer missing in certain cases, in the Kafka plugin. Fix error format of time in serviceTraffic update. Upgrade bytebuddy to 1.10.14  OAP-Backend  Support Nacos authentication. Support labeled meter in the meter receiver. Separate UI template into multiple files. Provide support for Envoy tracing. Envoy tracer depends on the Envoy community. Support query trace by tags. Support composite alarm rules. Support alarm messages to DingTalk. Support alarm messages to WeChat. Support alarm messages to Slack. Support SSL for Prometheus fetcher and self telemetry. Support labeled histogram in the prometheus format. Support the status of segment based on entry span or first span only. Support the error segment in the sampling mechanism. Support SSL certs of gRPC server. Support labeled metrics in the alarm rule setting. Support to query all labeled data, if no explicit label in the query condition. Add TLS parameters in the mesh analysis. Add health check for InfluxDB storage. Add super dataset concept for the traces/logs. Add separate replicas configuration for super dataset. Add IN operator in the OAL. Add != operator in the OAL. Add like operator in the OAL. Add latest function in the prometheus analysis. Add more configurations in the gRPC server. Optimize the trace query performance. Optimize the CPU usage rate calculation, at least to be 1. Optimize the length of slow SQL column in the MySQL storage. Optimize the topology query, use client side component name when no server side mapping. Add component IDs for Python component. Add component ID range for C++. Fix Slack notification setting NPE. Fix some module missing check of the module manager core. Fix authentication doesn\u0026rsquo;t work in sharing server. Fix metrics batch persistent size bug. Fix trace sampling bug. Fix CLR receiver bug. Fix end time bug in the query process. Fix Exporter INCREMENT mode is not working. Fix an error when executing startup.bat when the log directory exists Add syncBulkActions configuration to set up the batch size of the metrics persistent. Meter Analysis Language.  UI  Add browser dashboard. Add browser log query page. Support query trace by tags. Fix JVM configuration. Fix CLR configuration.  Document  Add the document about SW_NO_UPSTREAM_REAL_ADDRESS. Update ALS setup document. Add Customization Config section for plugin development.  All issues and pull requests are here\n","title":"8.2.0","url":"/docs/main/v9.2.0/en/changes/changes-8.2.0/"},{"content":"8.2.0 Project  Support Browser monitoring. Add e2e test for ALS solution of service mesh observability. Support compiling(include testing) in JDK11. Support build a single module.  Java Agent  Support metrics plugin. Support slf4j logs of gRPC and Kafka(when agent uses them) into the agent log files. Add PROPERTIES_REPORT_PERIOD_FACTOR config to avoid the properties of instance cleared. Limit the size of traced SQL to avoid OOM. Support mount command to load a new set of plugins. Add plugin selector mechanism. Enhance the witness classes for MongoDB plugin. Enhance the parameter truncate mechanism of SQL plugins. Enhance the SpringMVC plugin in the reactive APIs. Enhance the SpringMVC plugin to collect HTTP headers as the span tags. Enhance the Kafka plugin, about @KafkaPollAndInvoke Enhance the configuration initialization core. Plugin could have its own plugins. Enhance Feign plugin to collect parameters. Enhance Dubbo plugin to collect parameters. Provide Thrift plugin. Provide XXL-job plugin. Provide MongoDB 4.x plugin. Provide Kafka client 2.1+ plugin. Provide WebFlux-WebClient plugin. Provide ignore-exception plugin. Provide quartz scheduler plugin. Provide ElasticJob 2.x plugin. Provide Spring @Scheduled plugin. Provide Spring-Kafka plugin. Provide HBase client plugin. Provide JSON log format. Move Spring WebFlux plugin to the optional plugin. Fix inconsistent logic bug in PrefixMatch Fix duplicate exit spans in Feign LoadBalancer mechanism. Fix the target service blocked by the Kafka reporter. Fix configurations of Kafka report don\u0026rsquo;t work. Fix rest template concurrent conflict. Fix NPE in the ActiveMQ plugin. Fix conflict between Kafka reporter and sampling plugin. Fix NPE in the log formatter. Fix span layer missing in certain cases, in the Kafka plugin. Fix error format of time in serviceTraffic update. Upgrade bytebuddy to 1.10.14  OAP-Backend  Support Nacos authentication. Support labeled meter in the meter receiver. Separate UI template into multiple files. Provide support for Envoy tracing. Envoy tracer depends on the Envoy community. Support query trace by tags. Support composite alarm rules. Support alarm messages to DingTalk. Support alarm messages to WeChat. Support alarm messages to Slack. Support SSL for Prometheus fetcher and self telemetry. Support labeled histogram in the prometheus format. Support the status of segment based on entry span or first span only. Support the error segment in the sampling mechanism. Support SSL certs of gRPC server. Support labeled metrics in the alarm rule setting. Support to query all labeled data, if no explicit label in the query condition. Add TLS parameters in the mesh analysis. Add health check for InfluxDB storage. Add super dataset concept for the traces/logs. Add separate replicas configuration for super dataset. Add IN operator in the OAL. Add != operator in the OAL. Add like operator in the OAL. Add latest function in the prometheus analysis. Add more configurations in the gRPC server. Optimize the trace query performance. Optimize the CPU usage rate calculation, at least to be 1. Optimize the length of slow SQL column in the MySQL storage. Optimize the topology query, use client side component name when no server side mapping. Add component IDs for Python component. Add component ID range for C++. Fix Slack notification setting NPE. Fix some module missing check of the module manager core. Fix authentication doesn\u0026rsquo;t work in sharing server. Fix metrics batch persistent size bug. Fix trace sampling bug. Fix CLR receiver bug. Fix end time bug in the query process. Fix Exporter INCREMENT mode is not working. Fix an error when executing startup.bat when the log directory exists Add syncBulkActions configuration to set up the batch size of the metrics persistent. Meter Analysis Language.  UI  Add browser dashboard. Add browser log query page. Support query trace by tags. Fix JVM configuration. Fix CLR configuration.  Document  Add the document about SW_NO_UPSTREAM_REAL_ADDRESS. Update ALS setup document. Add Customization Config section for plugin development.  All issues and pull requests are here\n","title":"8.2.0","url":"/docs/main/v9.3.0/en/changes/changes-8.2.0/"},{"content":"8.2.0 Project  Support Browser monitoring. Add e2e test for ALS solution of service mesh observability. Support compiling(include testing) in JDK11. Support build a single module.  Java Agent  Support metrics plugin. Support slf4j logs of gRPC and Kafka(when agent uses them) into the agent log files. Add PROPERTIES_REPORT_PERIOD_FACTOR config to avoid the properties of instance cleared. Limit the size of traced SQL to avoid OOM. Support mount command to load a new set of plugins. Add plugin selector mechanism. Enhance the witness classes for MongoDB plugin. Enhance the parameter truncate mechanism of SQL plugins. Enhance the SpringMVC plugin in the reactive APIs. Enhance the SpringMVC plugin to collect HTTP headers as the span tags. Enhance the Kafka plugin, about @KafkaPollAndInvoke Enhance the configuration initialization core. Plugin could have its own plugins. Enhance Feign plugin to collect parameters. Enhance Dubbo plugin to collect parameters. Provide Thrift plugin. Provide XXL-job plugin. Provide MongoDB 4.x plugin. Provide Kafka client 2.1+ plugin. Provide WebFlux-WebClient plugin. Provide ignore-exception plugin. Provide quartz scheduler plugin. Provide ElasticJob 2.x plugin. Provide Spring @Scheduled plugin. Provide Spring-Kafka plugin. Provide HBase client plugin. Provide JSON log format. Move Spring WebFlux plugin to the optional plugin. Fix inconsistent logic bug in PrefixMatch Fix duplicate exit spans in Feign LoadBalancer mechanism. Fix the target service blocked by the Kafka reporter. Fix configurations of Kafka report don\u0026rsquo;t work. Fix rest template concurrent conflict. Fix NPE in the ActiveMQ plugin. Fix conflict between Kafka reporter and sampling plugin. Fix NPE in the log formatter. Fix span layer missing in certain cases, in the Kafka plugin. Fix error format of time in serviceTraffic update. Upgrade bytebuddy to 1.10.14  OAP-Backend  Support Nacos authentication. Support labeled meter in the meter receiver. Separate UI template into multiple files. Provide support for Envoy tracing. Envoy tracer depends on the Envoy community. Support query trace by tags. Support composite alarm rules. Support alarm messages to DingTalk. Support alarm messages to WeChat. Support alarm messages to Slack. Support SSL for Prometheus fetcher and self telemetry. Support labeled histogram in the prometheus format. Support the status of segment based on entry span or first span only. Support the error segment in the sampling mechanism. Support SSL certs of gRPC server. Support labeled metrics in the alarm rule setting. Support to query all labeled data, if no explicit label in the query condition. Add TLS parameters in the mesh analysis. Add health check for InfluxDB storage. Add super dataset concept for the traces/logs. Add separate replicas configuration for super dataset. Add IN operator in the OAL. Add != operator in the OAL. Add like operator in the OAL. Add latest function in the prometheus analysis. Add more configurations in the gRPC server. Optimize the trace query performance. Optimize the CPU usage rate calculation, at least to be 1. Optimize the length of slow SQL column in the MySQL storage. Optimize the topology query, use client side component name when no server side mapping. Add component IDs for Python component. Add component ID range for C++. Fix Slack notification setting NPE. Fix some module missing check of the module manager core. Fix authentication doesn\u0026rsquo;t work in sharing server. Fix metrics batch persistent size bug. Fix trace sampling bug. Fix CLR receiver bug. Fix end time bug in the query process. Fix Exporter INCREMENT mode is not working. Fix an error when executing startup.bat when the log directory exists Add syncBulkActions configuration to set up the batch size of the metrics persistent. Meter Analysis Language.  UI  Add browser dashboard. Add browser log query page. Support query trace by tags. Fix JVM configuration. Fix CLR configuration.  Document  Add the document about SW_NO_UPSTREAM_REAL_ADDRESS. Update ALS setup document. Add Customization Config section for plugin development.  All issues and pull requests are here\n","title":"8.2.0","url":"/docs/main/v9.4.0/en/changes/changes-8.2.0/"},{"content":"8.2.0 Project  Support Browser monitoring. Add e2e test for ALS solution of service mesh observability. Support compiling(include testing) in JDK11. Support build a single module.  Java Agent  Support metrics plugin. Support slf4j logs of gRPC and Kafka(when agent uses them) into the agent log files. Add PROPERTIES_REPORT_PERIOD_FACTOR config to avoid the properties of instance cleared. Limit the size of traced SQL to avoid OOM. Support mount command to load a new set of plugins. Add plugin selector mechanism. Enhance the witness classes for MongoDB plugin. Enhance the parameter truncate mechanism of SQL plugins. Enhance the SpringMVC plugin in the reactive APIs. Enhance the SpringMVC plugin to collect HTTP headers as the span tags. Enhance the Kafka plugin, about @KafkaPollAndInvoke Enhance the configuration initialization core. Plugin could have its own plugins. Enhance Feign plugin to collect parameters. Enhance Dubbo plugin to collect parameters. Provide Thrift plugin. Provide XXL-job plugin. Provide MongoDB 4.x plugin. Provide Kafka client 2.1+ plugin. Provide WebFlux-WebClient plugin. Provide ignore-exception plugin. Provide quartz scheduler plugin. Provide ElasticJob 2.x plugin. Provide Spring @Scheduled plugin. Provide Spring-Kafka plugin. Provide HBase client plugin. Provide JSON log format. Move Spring WebFlux plugin to the optional plugin. Fix inconsistent logic bug in PrefixMatch Fix duplicate exit spans in Feign LoadBalancer mechanism. Fix the target service blocked by the Kafka reporter. Fix configurations of Kafka report don\u0026rsquo;t work. Fix rest template concurrent conflict. Fix NPE in the ActiveMQ plugin. Fix conflict between Kafka reporter and sampling plugin. Fix NPE in the log formatter. Fix span layer missing in certain cases, in the Kafka plugin. Fix error format of time in serviceTraffic update. Upgrade bytebuddy to 1.10.14  OAP-Backend  Support Nacos authentication. Support labeled meter in the meter receiver. Separate UI template into multiple files. Provide support for Envoy tracing. Envoy tracer depends on the Envoy community. Support query trace by tags. Support composite alarm rules. Support alarm messages to DingTalk. Support alarm messages to WeChat. Support alarm messages to Slack. Support SSL for Prometheus fetcher and self telemetry. Support labeled histogram in the prometheus format. Support the status of segment based on entry span or first span only. Support the error segment in the sampling mechanism. Support SSL certs of gRPC server. Support labeled metrics in the alarm rule setting. Support to query all labeled data, if no explicit label in the query condition. Add TLS parameters in the mesh analysis. Add health check for InfluxDB storage. Add super dataset concept for the traces/logs. Add separate replicas configuration for super dataset. Add IN operator in the OAL. Add != operator in the OAL. Add like operator in the OAL. Add latest function in the prometheus analysis. Add more configurations in the gRPC server. Optimize the trace query performance. Optimize the CPU usage rate calculation, at least to be 1. Optimize the length of slow SQL column in the MySQL storage. Optimize the topology query, use client side component name when no server side mapping. Add component IDs for Python component. Add component ID range for C++. Fix Slack notification setting NPE. Fix some module missing check of the module manager core. Fix authentication doesn\u0026rsquo;t work in sharing server. Fix metrics batch persistent size bug. Fix trace sampling bug. Fix CLR receiver bug. Fix end time bug in the query process. Fix Exporter INCREMENT mode is not working. Fix an error when executing startup.bat when the log directory exists Add syncBulkActions configuration to set up the batch size of the metrics persistent. Meter Analysis Language.  UI  Add browser dashboard. Add browser log query page. Support query trace by tags. Fix JVM configuration. Fix CLR configuration.  Document  Add the document about SW_NO_UPSTREAM_REAL_ADDRESS. Update ALS setup document. Add Customization Config section for plugin development.  All issues and pull requests are here\n","title":"8.2.0","url":"/docs/main/v9.5.0/en/changes/changes-8.2.0/"},{"content":"8.2.0 Project  Support Browser monitoring. Add e2e test for ALS solution of service mesh observability. Support compiling(include testing) in JDK11. Support build a single module.  Java Agent  Support metrics plugin. Support slf4j logs of gRPC and Kafka(when agent uses them) into the agent log files. Add PROPERTIES_REPORT_PERIOD_FACTOR config to avoid the properties of instance cleared. Limit the size of traced SQL to avoid OOM. Support mount command to load a new set of plugins. Add plugin selector mechanism. Enhance the witness classes for MongoDB plugin. Enhance the parameter truncate mechanism of SQL plugins. Enhance the SpringMVC plugin in the reactive APIs. Enhance the SpringMVC plugin to collect HTTP headers as the span tags. Enhance the Kafka plugin, about @KafkaPollAndInvoke Enhance the configuration initialization core. Plugin could have its own plugins. Enhance Feign plugin to collect parameters. Enhance Dubbo plugin to collect parameters. Provide Thrift plugin. Provide XXL-job plugin. Provide MongoDB 4.x plugin. Provide Kafka client 2.1+ plugin. Provide WebFlux-WebClient plugin. Provide ignore-exception plugin. Provide quartz scheduler plugin. Provide ElasticJob 2.x plugin. Provide Spring @Scheduled plugin. Provide Spring-Kafka plugin. Provide HBase client plugin. Provide JSON log format. Move Spring WebFlux plugin to the optional plugin. Fix inconsistent logic bug in PrefixMatch Fix duplicate exit spans in Feign LoadBalancer mechanism. Fix the target service blocked by the Kafka reporter. Fix configurations of Kafka report don\u0026rsquo;t work. Fix rest template concurrent conflict. Fix NPE in the ActiveMQ plugin. Fix conflict between Kafka reporter and sampling plugin. Fix NPE in the log formatter. Fix span layer missing in certain cases, in the Kafka plugin. Fix error format of time in serviceTraffic update. Upgrade bytebuddy to 1.10.14  OAP-Backend  Support Nacos authentication. Support labeled meter in the meter receiver. Separate UI template into multiple files. Provide support for Envoy tracing. Envoy tracer depends on the Envoy community. Support query trace by tags. Support composite alarm rules. Support alarm messages to DingTalk. Support alarm messages to WeChat. Support alarm messages to Slack. Support SSL for Prometheus fetcher and self telemetry. Support labeled histogram in the prometheus format. Support the status of segment based on entry span or first span only. Support the error segment in the sampling mechanism. Support SSL certs of gRPC server. Support labeled metrics in the alarm rule setting. Support to query all labeled data, if no explicit label in the query condition. Add TLS parameters in the mesh analysis. Add health check for InfluxDB storage. Add super dataset concept for the traces/logs. Add separate replicas configuration for super dataset. Add IN operator in the OAL. Add != operator in the OAL. Add like operator in the OAL. Add latest function in the prometheus analysis. Add more configurations in the gRPC server. Optimize the trace query performance. Optimize the CPU usage rate calculation, at least to be 1. Optimize the length of slow SQL column in the MySQL storage. Optimize the topology query, use client side component name when no server side mapping. Add component IDs for Python component. Add component ID range for C++. Fix Slack notification setting NPE. Fix some module missing check of the module manager core. Fix authentication doesn\u0026rsquo;t work in sharing server. Fix metrics batch persistent size bug. Fix trace sampling bug. Fix CLR receiver bug. Fix end time bug in the query process. Fix Exporter INCREMENT mode is not working. Fix an error when executing startup.bat when the log directory exists Add syncBulkActions configuration to set up the batch size of the metrics persistent. Meter Analysis Language.  UI  Add browser dashboard. Add browser log query page. Support query trace by tags. Fix JVM configuration. Fix CLR configuration.  Document  Add the document about SW_NO_UPSTREAM_REAL_ADDRESS. Update ALS setup document. Add Customization Config section for plugin development.  All issues and pull requests are here\n","title":"8.2.0","url":"/docs/main/v9.6.0/en/changes/changes-8.2.0/"},{"content":"8.2.0 Project  Support Browser monitoring. Add e2e test for ALS solution of service mesh observability. Support compiling(include testing) in JDK11. Support build a single module.  Java Agent  Support metrics plugin. Support slf4j logs of gRPC and Kafka(when agent uses them) into the agent log files. Add PROPERTIES_REPORT_PERIOD_FACTOR config to avoid the properties of instance cleared. Limit the size of traced SQL to avoid OOM. Support mount command to load a new set of plugins. Add plugin selector mechanism. Enhance the witness classes for MongoDB plugin. Enhance the parameter truncate mechanism of SQL plugins. Enhance the SpringMVC plugin in the reactive APIs. Enhance the SpringMVC plugin to collect HTTP headers as the span tags. Enhance the Kafka plugin, about @KafkaPollAndInvoke Enhance the configuration initialization core. Plugin could have its own plugins. Enhance Feign plugin to collect parameters. Enhance Dubbo plugin to collect parameters. Provide Thrift plugin. Provide XXL-job plugin. Provide MongoDB 4.x plugin. Provide Kafka client 2.1+ plugin. Provide WebFlux-WebClient plugin. Provide ignore-exception plugin. Provide quartz scheduler plugin. Provide ElasticJob 2.x plugin. Provide Spring @Scheduled plugin. Provide Spring-Kafka plugin. Provide HBase client plugin. Provide JSON log format. Move Spring WebFlux plugin to the optional plugin. Fix inconsistent logic bug in PrefixMatch Fix duplicate exit spans in Feign LoadBalancer mechanism. Fix the target service blocked by the Kafka reporter. Fix configurations of Kafka report don\u0026rsquo;t work. Fix rest template concurrent conflict. Fix NPE in the ActiveMQ plugin. Fix conflict between Kafka reporter and sampling plugin. Fix NPE in the log formatter. Fix span layer missing in certain cases, in the Kafka plugin. Fix error format of time in serviceTraffic update. Upgrade bytebuddy to 1.10.14  OAP-Backend  Support Nacos authentication. Support labeled meter in the meter receiver. Separate UI template into multiple files. Provide support for Envoy tracing. Envoy tracer depends on the Envoy community. Support query trace by tags. Support composite alarm rules. Support alarm messages to DingTalk. Support alarm messages to WeChat. Support alarm messages to Slack. Support SSL for Prometheus fetcher and self telemetry. Support labeled histogram in the prometheus format. Support the status of segment based on entry span or first span only. Support the error segment in the sampling mechanism. Support SSL certs of gRPC server. Support labeled metrics in the alarm rule setting. Support to query all labeled data, if no explicit label in the query condition. Add TLS parameters in the mesh analysis. Add health check for InfluxDB storage. Add super dataset concept for the traces/logs. Add separate replicas configuration for super dataset. Add IN operator in the OAL. Add != operator in the OAL. Add like operator in the OAL. Add latest function in the prometheus analysis. Add more configurations in the gRPC server. Optimize the trace query performance. Optimize the CPU usage rate calculation, at least to be 1. Optimize the length of slow SQL column in the MySQL storage. Optimize the topology query, use client side component name when no server side mapping. Add component IDs for Python component. Add component ID range for C++. Fix Slack notification setting NPE. Fix some module missing check of the module manager core. Fix authentication doesn\u0026rsquo;t work in sharing server. Fix metrics batch persistent size bug. Fix trace sampling bug. Fix CLR receiver bug. Fix end time bug in the query process. Fix Exporter INCREMENT mode is not working. Fix an error when executing startup.bat when the log directory exists Add syncBulkActions configuration to set up the batch size of the metrics persistent. Meter Analysis Language.  UI  Add browser dashboard. Add browser log query page. Support query trace by tags. Fix JVM configuration. Fix CLR configuration.  Document  Add the document about SW_NO_UPSTREAM_REAL_ADDRESS. Update ALS setup document. Add Customization Config section for plugin development.  All issues and pull requests are here\n","title":"8.2.0","url":"/docs/main/v9.7.0/en/changes/changes-8.2.0/"},{"content":"8.3.0  Project  Test: ElasticSearch version 7.0.0 and 7.9.3 as storage are E2E tested. Test: Bump up testcontainers version to work around the Docker bug on MacOS.  Java Agent  Support propagate the sending timestamp in MQ plugins to calculate the transfer latency in the async MQ scenarios. Support auto-tag with the fixed values propagated in the correlation context. Make HttpClient 3.x, 4.x, and HttpAsyncClient 3.x plugins to support collecting HTTP parameters. Make the Feign plugin to support Java 14 Make the okhttp3 plugin to support Java 14 Polish tracing context related codes. Add the plugin for async-http-client 2.x Fix NPE in the nutz plugin. Provide Apache Commons DBCP 2.x plugin. Add the plugin for mssql-jtds 1.x. Add the plugin for mssql-jdbc 6.x -\u0026gt; 9.x. Fix the default ignore mechanism isn\u0026rsquo;t accurate enough bug. Add the plugin for spring-kafka 1.3.x. Add the plugin for Apache CXF 3.x. Fix okhttp-3.x and async-http-client-2.x did not overwrite the old trace header.  OAP-Backend  Add the @SuperDataset annotation for BrowserErrorLog. Add the thread pool to the Kafka fetcher to increase the performance. Add contain and not contain OPS in OAL. Add Envoy ALS analyzer based on metadata exchange. Add listMetrics GraphQL query. Add group name into services of so11y and istio relevant metrics Support keeping collecting the slowly segments in the sampling mechanism. Support choose files to active the meter analyzer. Support nested class definition in the Service, ServiceInstance, Endpoint, ServiceRelation, and ServiceInstanceRelation sources. Support sideCar.internalErrorCode in the Service, ServiceInstance, Endpoint, ServiceRelation, and ServiceInstanceRelation sources. Improve Kubernetes service registry for ALS analysis. Add health checker for cluster management Support the service auto grouping. Support query service list by the group name. Improve the queryable tags generation. Remove the duplicated tags to reduce the storage payload. Fix the threads of the Kafka fetcher exit if some unexpected exceptions happen. Fix the excessive timeout period set by the kubernetes-client. Fix deadlock problem when using elasticsearch-client-7.0.0. Fix storage-jdbc isExists not set dbname. Fix searchService bug in the InfluxDB storage implementation. Fix CVE in the alarm module, when activating the dynamic configuration feature. Fix CVE in the endpoint grouping, when activating the dynamic configuration feature. Fix CVE in the uninstrumented gateways configs, when activating the dynamic configuration feature. Fix CVE in the Apdex threshold configs, when activating the dynamic configuration feature. Make the codes and doc consistent in sharding server and core server. Fix that chunked string is incorrect while the tag contains colon. Fix the incorrect dynamic configuration key bug of endpoint-name-grouping. Remove unused min date timebucket in jdbc deletehistory logical Fix \u0026ldquo;transaction too large error\u0026rdquo; when use TiDB as storage. Fix \u0026ldquo;index not found\u0026rdquo; in trace query when use ES7 storage. Add otel rules to ui template to observe Istio control plane. Remove istio mixer Support close influxdb batch write model. Check SAN in the ALS (m)TLS process.  UI  Fix incorrect label in radial chart in topology. Replace node-sass with dart-sass. Replace serviceFilter with serviceGroup Removed \u0026ldquo;Les Miserables\u0026rdquo; from radial chart in topology. Add the Promise dropdown option  Documentation  Add VNode FAQ doc. Add logic endpoint section in the agent setup doc. Adjust configuration names and system environment names of the sharing server module Tweak Istio metrics collection doc. Add otel receiver.  All issues and pull requests are here\n","title":"8.3.0","url":"/docs/main/latest/en/changes/changes-8.3.0/"},{"content":"8.3.0  Project  Test: ElasticSearch version 7.0.0 and 7.9.3 as storage are E2E tested. Test: Bump up testcontainers version to work around the Docker bug on MacOS.  Java Agent  Support propagate the sending timestamp in MQ plugins to calculate the transfer latency in the async MQ scenarios. Support auto-tag with the fixed values propagated in the correlation context. Make HttpClient 3.x, 4.x, and HttpAsyncClient 3.x plugins to support collecting HTTP parameters. Make the Feign plugin to support Java 14 Make the okhttp3 plugin to support Java 14 Polish tracing context related codes. Add the plugin for async-http-client 2.x Fix NPE in the nutz plugin. Provide Apache Commons DBCP 2.x plugin. Add the plugin for mssql-jtds 1.x. Add the plugin for mssql-jdbc 6.x -\u0026gt; 9.x. Fix the default ignore mechanism isn\u0026rsquo;t accurate enough bug. Add the plugin for spring-kafka 1.3.x. Add the plugin for Apache CXF 3.x. Fix okhttp-3.x and async-http-client-2.x did not overwrite the old trace header.  OAP-Backend  Add the @SuperDataset annotation for BrowserErrorLog. Add the thread pool to the Kafka fetcher to increase the performance. Add contain and not contain OPS in OAL. Add Envoy ALS analyzer based on metadata exchange. Add listMetrics GraphQL query. Add group name into services of so11y and istio relevant metrics Support keeping collecting the slowly segments in the sampling mechanism. Support choose files to active the meter analyzer. Support nested class definition in the Service, ServiceInstance, Endpoint, ServiceRelation, and ServiceInstanceRelation sources. Support sideCar.internalErrorCode in the Service, ServiceInstance, Endpoint, ServiceRelation, and ServiceInstanceRelation sources. Improve Kubernetes service registry for ALS analysis. Add health checker for cluster management Support the service auto grouping. Support query service list by the group name. Improve the queryable tags generation. Remove the duplicated tags to reduce the storage payload. Fix the threads of the Kafka fetcher exit if some unexpected exceptions happen. Fix the excessive timeout period set by the kubernetes-client. Fix deadlock problem when using elasticsearch-client-7.0.0. Fix storage-jdbc isExists not set dbname. Fix searchService bug in the InfluxDB storage implementation. Fix CVE in the alarm module, when activating the dynamic configuration feature. Fix CVE in the endpoint grouping, when activating the dynamic configuration feature. Fix CVE in the uninstrumented gateways configs, when activating the dynamic configuration feature. Fix CVE in the Apdex threshold configs, when activating the dynamic configuration feature. Make the codes and doc consistent in sharding server and core server. Fix that chunked string is incorrect while the tag contains colon. Fix the incorrect dynamic configuration key bug of endpoint-name-grouping. Remove unused min date timebucket in jdbc deletehistory logical Fix \u0026ldquo;transaction too large error\u0026rdquo; when use TiDB as storage. Fix \u0026ldquo;index not found\u0026rdquo; in trace query when use ES7 storage. Add otel rules to ui template to observe Istio control plane. Remove istio mixer Support close influxdb batch write model. Check SAN in the ALS (m)TLS process.  UI  Fix incorrect label in radial chart in topology. Replace node-sass with dart-sass. Replace serviceFilter with serviceGroup Removed \u0026ldquo;Les Miserables\u0026rdquo; from radial chart in topology. Add the Promise dropdown option  Documentation  Add VNode FAQ doc. Add logic endpoint section in the agent setup doc. Adjust configuration names and system environment names of the sharing server module Tweak Istio metrics collection doc. Add otel receiver.  All issues and pull requests are here\n","title":"8.3.0","url":"/docs/main/next/en/changes/changes-8.3.0/"},{"content":"8.3.0  Project  Test: ElasticSearch version 7.0.0 and 7.9.3 as storage are E2E tested. Test: Bump up testcontainers version to work around the Docker bug on MacOS.  Java Agent  Support propagate the sending timestamp in MQ plugins to calculate the transfer latency in the async MQ scenarios. Support auto-tag with the fixed values propagated in the correlation context. Make HttpClient 3.x, 4.x, and HttpAsyncClient 3.x plugins to support collecting HTTP parameters. Make the Feign plugin to support Java 14 Make the okhttp3 plugin to support Java 14 Polish tracing context related codes. Add the plugin for async-http-client 2.x Fix NPE in the nutz plugin. Provide Apache Commons DBCP 2.x plugin. Add the plugin for mssql-jtds 1.x. Add the plugin for mssql-jdbc 6.x -\u0026gt; 9.x. Fix the default ignore mechanism isn\u0026rsquo;t accurate enough bug. Add the plugin for spring-kafka 1.3.x. Add the plugin for Apache CXF 3.x. Fix okhttp-3.x and async-http-client-2.x did not overwrite the old trace header.  OAP-Backend  Add the @SuperDataset annotation for BrowserErrorLog. Add the thread pool to the Kafka fetcher to increase the performance. Add contain and not contain OPS in OAL. Add Envoy ALS analyzer based on metadata exchange. Add listMetrics GraphQL query. Add group name into services of so11y and istio relevant metrics Support keeping collecting the slowly segments in the sampling mechanism. Support choose files to active the meter analyzer. Support nested class definition in the Service, ServiceInstance, Endpoint, ServiceRelation, and ServiceInstanceRelation sources. Support sideCar.internalErrorCode in the Service, ServiceInstance, Endpoint, ServiceRelation, and ServiceInstanceRelation sources. Improve Kubernetes service registry for ALS analysis. Add health checker for cluster management Support the service auto grouping. Support query service list by the group name. Improve the queryable tags generation. Remove the duplicated tags to reduce the storage payload. Fix the threads of the Kafka fetcher exit if some unexpected exceptions happen. Fix the excessive timeout period set by the kubernetes-client. Fix deadlock problem when using elasticsearch-client-7.0.0. Fix storage-jdbc isExists not set dbname. Fix searchService bug in the InfluxDB storage implementation. Fix CVE in the alarm module, when activating the dynamic configuration feature. Fix CVE in the endpoint grouping, when activating the dynamic configuration feature. Fix CVE in the uninstrumented gateways configs, when activating the dynamic configuration feature. Fix CVE in the Apdex threshold configs, when activating the dynamic configuration feature. Make the codes and doc consistent in sharding server and core server. Fix that chunked string is incorrect while the tag contains colon. Fix the incorrect dynamic configuration key bug of endpoint-name-grouping. Remove unused min date timebucket in jdbc deletehistory logical Fix \u0026ldquo;transaction too large error\u0026rdquo; when use TiDB as storage. Fix \u0026ldquo;index not found\u0026rdquo; in trace query when use ES7 storage. Add otel rules to ui template to observe Istio control plane. Remove istio mixer Support close influxdb batch write model. Check SAN in the ALS (m)TLS process.  UI  Fix incorrect label in radial chart in topology. Replace node-sass with dart-sass. Replace serviceFilter with serviceGroup Removed \u0026ldquo;Les Miserables\u0026rdquo; from radial chart in topology. Add the Promise dropdown option  Documentation  Add VNode FAQ doc. Add logic endpoint section in the agent setup doc. Adjust configuration names and system environment names of the sharing server module Tweak Istio metrics collection doc. Add otel receiver.  All issues and pull requests are here\n","title":"8.3.0","url":"/docs/main/v9.1.0/en/changes/changes-8.3.0/"},{"content":"8.3.0  Project  Test: ElasticSearch version 7.0.0 and 7.9.3 as storage are E2E tested. Test: Bump up testcontainers version to work around the Docker bug on MacOS.  Java Agent  Support propagate the sending timestamp in MQ plugins to calculate the transfer latency in the async MQ scenarios. Support auto-tag with the fixed values propagated in the correlation context. Make HttpClient 3.x, 4.x, and HttpAsyncClient 3.x plugins to support collecting HTTP parameters. Make the Feign plugin to support Java 14 Make the okhttp3 plugin to support Java 14 Polish tracing context related codes. Add the plugin for async-http-client 2.x Fix NPE in the nutz plugin. Provide Apache Commons DBCP 2.x plugin. Add the plugin for mssql-jtds 1.x. Add the plugin for mssql-jdbc 6.x -\u0026gt; 9.x. Fix the default ignore mechanism isn\u0026rsquo;t accurate enough bug. Add the plugin for spring-kafka 1.3.x. Add the plugin for Apache CXF 3.x. Fix okhttp-3.x and async-http-client-2.x did not overwrite the old trace header.  OAP-Backend  Add the @SuperDataset annotation for BrowserErrorLog. Add the thread pool to the Kafka fetcher to increase the performance. Add contain and not contain OPS in OAL. Add Envoy ALS analyzer based on metadata exchange. Add listMetrics GraphQL query. Add group name into services of so11y and istio relevant metrics Support keeping collecting the slowly segments in the sampling mechanism. Support choose files to active the meter analyzer. Support nested class definition in the Service, ServiceInstance, Endpoint, ServiceRelation, and ServiceInstanceRelation sources. Support sideCar.internalErrorCode in the Service, ServiceInstance, Endpoint, ServiceRelation, and ServiceInstanceRelation sources. Improve Kubernetes service registry for ALS analysis. Add health checker for cluster management Support the service auto grouping. Support query service list by the group name. Improve the queryable tags generation. Remove the duplicated tags to reduce the storage payload. Fix the threads of the Kafka fetcher exit if some unexpected exceptions happen. Fix the excessive timeout period set by the kubernetes-client. Fix deadlock problem when using elasticsearch-client-7.0.0. Fix storage-jdbc isExists not set dbname. Fix searchService bug in the InfluxDB storage implementation. Fix CVE in the alarm module, when activating the dynamic configuration feature. Fix CVE in the endpoint grouping, when activating the dynamic configuration feature. Fix CVE in the uninstrumented gateways configs, when activating the dynamic configuration feature. Fix CVE in the Apdex threshold configs, when activating the dynamic configuration feature. Make the codes and doc consistent in sharding server and core server. Fix that chunked string is incorrect while the tag contains colon. Fix the incorrect dynamic configuration key bug of endpoint-name-grouping. Remove unused min date timebucket in jdbc deletehistory logical Fix \u0026ldquo;transaction too large error\u0026rdquo; when use TiDB as storage. Fix \u0026ldquo;index not found\u0026rdquo; in trace query when use ES7 storage. Add otel rules to ui template to observe Istio control plane. Remove istio mixer Support close influxdb batch write model. Check SAN in the ALS (m)TLS process.  UI  Fix incorrect label in radial chart in topology. Replace node-sass with dart-sass. Replace serviceFilter with serviceGroup Removed \u0026ldquo;Les Miserables\u0026rdquo; from radial chart in topology. Add the Promise dropdown option  Documentation  Add VNode FAQ doc. Add logic endpoint section in the agent setup doc. Adjust configuration names and system environment names of the sharing server module Tweak Istio metrics collection doc. Add otel receiver.  All issues and pull requests are here\n","title":"8.3.0","url":"/docs/main/v9.2.0/en/changes/changes-8.3.0/"},{"content":"8.3.0  Project  Test: ElasticSearch version 7.0.0 and 7.9.3 as storage are E2E tested. Test: Bump up testcontainers version to work around the Docker bug on MacOS.  Java Agent  Support propagate the sending timestamp in MQ plugins to calculate the transfer latency in the async MQ scenarios. Support auto-tag with the fixed values propagated in the correlation context. Make HttpClient 3.x, 4.x, and HttpAsyncClient 3.x plugins to support collecting HTTP parameters. Make the Feign plugin to support Java 14 Make the okhttp3 plugin to support Java 14 Polish tracing context related codes. Add the plugin for async-http-client 2.x Fix NPE in the nutz plugin. Provide Apache Commons DBCP 2.x plugin. Add the plugin for mssql-jtds 1.x. Add the plugin for mssql-jdbc 6.x -\u0026gt; 9.x. Fix the default ignore mechanism isn\u0026rsquo;t accurate enough bug. Add the plugin for spring-kafka 1.3.x. Add the plugin for Apache CXF 3.x. Fix okhttp-3.x and async-http-client-2.x did not overwrite the old trace header.  OAP-Backend  Add the @SuperDataset annotation for BrowserErrorLog. Add the thread pool to the Kafka fetcher to increase the performance. Add contain and not contain OPS in OAL. Add Envoy ALS analyzer based on metadata exchange. Add listMetrics GraphQL query. Add group name into services of so11y and istio relevant metrics Support keeping collecting the slowly segments in the sampling mechanism. Support choose files to active the meter analyzer. Support nested class definition in the Service, ServiceInstance, Endpoint, ServiceRelation, and ServiceInstanceRelation sources. Support sideCar.internalErrorCode in the Service, ServiceInstance, Endpoint, ServiceRelation, and ServiceInstanceRelation sources. Improve Kubernetes service registry for ALS analysis. Add health checker for cluster management Support the service auto grouping. Support query service list by the group name. Improve the queryable tags generation. Remove the duplicated tags to reduce the storage payload. Fix the threads of the Kafka fetcher exit if some unexpected exceptions happen. Fix the excessive timeout period set by the kubernetes-client. Fix deadlock problem when using elasticsearch-client-7.0.0. Fix storage-jdbc isExists not set dbname. Fix searchService bug in the InfluxDB storage implementation. Fix CVE in the alarm module, when activating the dynamic configuration feature. Fix CVE in the endpoint grouping, when activating the dynamic configuration feature. Fix CVE in the uninstrumented gateways configs, when activating the dynamic configuration feature. Fix CVE in the Apdex threshold configs, when activating the dynamic configuration feature. Make the codes and doc consistent in sharding server and core server. Fix that chunked string is incorrect while the tag contains colon. Fix the incorrect dynamic configuration key bug of endpoint-name-grouping. Remove unused min date timebucket in jdbc deletehistory logical Fix \u0026ldquo;transaction too large error\u0026rdquo; when use TiDB as storage. Fix \u0026ldquo;index not found\u0026rdquo; in trace query when use ES7 storage. Add otel rules to ui template to observe Istio control plane. Remove istio mixer Support close influxdb batch write model. Check SAN in the ALS (m)TLS process.  UI  Fix incorrect label in radial chart in topology. Replace node-sass with dart-sass. Replace serviceFilter with serviceGroup Removed \u0026ldquo;Les Miserables\u0026rdquo; from radial chart in topology. Add the Promise dropdown option  Documentation  Add VNode FAQ doc. Add logic endpoint section in the agent setup doc. Adjust configuration names and system environment names of the sharing server module Tweak Istio metrics collection doc. Add otel receiver.  All issues and pull requests are here\n","title":"8.3.0","url":"/docs/main/v9.3.0/en/changes/changes-8.3.0/"},{"content":"8.3.0  Project  Test: ElasticSearch version 7.0.0 and 7.9.3 as storage are E2E tested. Test: Bump up testcontainers version to work around the Docker bug on MacOS.  Java Agent  Support propagate the sending timestamp in MQ plugins to calculate the transfer latency in the async MQ scenarios. Support auto-tag with the fixed values propagated in the correlation context. Make HttpClient 3.x, 4.x, and HttpAsyncClient 3.x plugins to support collecting HTTP parameters. Make the Feign plugin to support Java 14 Make the okhttp3 plugin to support Java 14 Polish tracing context related codes. Add the plugin for async-http-client 2.x Fix NPE in the nutz plugin. Provide Apache Commons DBCP 2.x plugin. Add the plugin for mssql-jtds 1.x. Add the plugin for mssql-jdbc 6.x -\u0026gt; 9.x. Fix the default ignore mechanism isn\u0026rsquo;t accurate enough bug. Add the plugin for spring-kafka 1.3.x. Add the plugin for Apache CXF 3.x. Fix okhttp-3.x and async-http-client-2.x did not overwrite the old trace header.  OAP-Backend  Add the @SuperDataset annotation for BrowserErrorLog. Add the thread pool to the Kafka fetcher to increase the performance. Add contain and not contain OPS in OAL. Add Envoy ALS analyzer based on metadata exchange. Add listMetrics GraphQL query. Add group name into services of so11y and istio relevant metrics Support keeping collecting the slowly segments in the sampling mechanism. Support choose files to active the meter analyzer. Support nested class definition in the Service, ServiceInstance, Endpoint, ServiceRelation, and ServiceInstanceRelation sources. Support sideCar.internalErrorCode in the Service, ServiceInstance, Endpoint, ServiceRelation, and ServiceInstanceRelation sources. Improve Kubernetes service registry for ALS analysis. Add health checker for cluster management Support the service auto grouping. Support query service list by the group name. Improve the queryable tags generation. Remove the duplicated tags to reduce the storage payload. Fix the threads of the Kafka fetcher exit if some unexpected exceptions happen. Fix the excessive timeout period set by the kubernetes-client. Fix deadlock problem when using elasticsearch-client-7.0.0. Fix storage-jdbc isExists not set dbname. Fix searchService bug in the InfluxDB storage implementation. Fix CVE in the alarm module, when activating the dynamic configuration feature. Fix CVE in the endpoint grouping, when activating the dynamic configuration feature. Fix CVE in the uninstrumented gateways configs, when activating the dynamic configuration feature. Fix CVE in the Apdex threshold configs, when activating the dynamic configuration feature. Make the codes and doc consistent in sharding server and core server. Fix that chunked string is incorrect while the tag contains colon. Fix the incorrect dynamic configuration key bug of endpoint-name-grouping. Remove unused min date timebucket in jdbc deletehistory logical Fix \u0026ldquo;transaction too large error\u0026rdquo; when use TiDB as storage. Fix \u0026ldquo;index not found\u0026rdquo; in trace query when use ES7 storage. Add otel rules to ui template to observe Istio control plane. Remove istio mixer Support close influxdb batch write model. Check SAN in the ALS (m)TLS process.  UI  Fix incorrect label in radial chart in topology. Replace node-sass with dart-sass. Replace serviceFilter with serviceGroup Removed \u0026ldquo;Les Miserables\u0026rdquo; from radial chart in topology. Add the Promise dropdown option  Documentation  Add VNode FAQ doc. Add logic endpoint section in the agent setup doc. Adjust configuration names and system environment names of the sharing server module Tweak Istio metrics collection doc. Add otel receiver.  All issues and pull requests are here\n","title":"8.3.0","url":"/docs/main/v9.4.0/en/changes/changes-8.3.0/"},{"content":"8.3.0  Project  Test: ElasticSearch version 7.0.0 and 7.9.3 as storage are E2E tested. Test: Bump up testcontainers version to work around the Docker bug on MacOS.  Java Agent  Support propagate the sending timestamp in MQ plugins to calculate the transfer latency in the async MQ scenarios. Support auto-tag with the fixed values propagated in the correlation context. Make HttpClient 3.x, 4.x, and HttpAsyncClient 3.x plugins to support collecting HTTP parameters. Make the Feign plugin to support Java 14 Make the okhttp3 plugin to support Java 14 Polish tracing context related codes. Add the plugin for async-http-client 2.x Fix NPE in the nutz plugin. Provide Apache Commons DBCP 2.x plugin. Add the plugin for mssql-jtds 1.x. Add the plugin for mssql-jdbc 6.x -\u0026gt; 9.x. Fix the default ignore mechanism isn\u0026rsquo;t accurate enough bug. Add the plugin for spring-kafka 1.3.x. Add the plugin for Apache CXF 3.x. Fix okhttp-3.x and async-http-client-2.x did not overwrite the old trace header.  OAP-Backend  Add the @SuperDataset annotation for BrowserErrorLog. Add the thread pool to the Kafka fetcher to increase the performance. Add contain and not contain OPS in OAL. Add Envoy ALS analyzer based on metadata exchange. Add listMetrics GraphQL query. Add group name into services of so11y and istio relevant metrics Support keeping collecting the slowly segments in the sampling mechanism. Support choose files to active the meter analyzer. Support nested class definition in the Service, ServiceInstance, Endpoint, ServiceRelation, and ServiceInstanceRelation sources. Support sideCar.internalErrorCode in the Service, ServiceInstance, Endpoint, ServiceRelation, and ServiceInstanceRelation sources. Improve Kubernetes service registry for ALS analysis. Add health checker for cluster management Support the service auto grouping. Support query service list by the group name. Improve the queryable tags generation. Remove the duplicated tags to reduce the storage payload. Fix the threads of the Kafka fetcher exit if some unexpected exceptions happen. Fix the excessive timeout period set by the kubernetes-client. Fix deadlock problem when using elasticsearch-client-7.0.0. Fix storage-jdbc isExists not set dbname. Fix searchService bug in the InfluxDB storage implementation. Fix CVE in the alarm module, when activating the dynamic configuration feature. Fix CVE in the endpoint grouping, when activating the dynamic configuration feature. Fix CVE in the uninstrumented gateways configs, when activating the dynamic configuration feature. Fix CVE in the Apdex threshold configs, when activating the dynamic configuration feature. Make the codes and doc consistent in sharding server and core server. Fix that chunked string is incorrect while the tag contains colon. Fix the incorrect dynamic configuration key bug of endpoint-name-grouping. Remove unused min date timebucket in jdbc deletehistory logical Fix \u0026ldquo;transaction too large error\u0026rdquo; when use TiDB as storage. Fix \u0026ldquo;index not found\u0026rdquo; in trace query when use ES7 storage. Add otel rules to ui template to observe Istio control plane. Remove istio mixer Support close influxdb batch write model. Check SAN in the ALS (m)TLS process.  UI  Fix incorrect label in radial chart in topology. Replace node-sass with dart-sass. Replace serviceFilter with serviceGroup Removed \u0026ldquo;Les Miserables\u0026rdquo; from radial chart in topology. Add the Promise dropdown option  Documentation  Add VNode FAQ doc. Add logic endpoint section in the agent setup doc. Adjust configuration names and system environment names of the sharing server module Tweak Istio metrics collection doc. Add otel receiver.  All issues and pull requests are here\n","title":"8.3.0","url":"/docs/main/v9.5.0/en/changes/changes-8.3.0/"},{"content":"8.3.0  Project  Test: ElasticSearch version 7.0.0 and 7.9.3 as storage are E2E tested. Test: Bump up testcontainers version to work around the Docker bug on MacOS.  Java Agent  Support propagate the sending timestamp in MQ plugins to calculate the transfer latency in the async MQ scenarios. Support auto-tag with the fixed values propagated in the correlation context. Make HttpClient 3.x, 4.x, and HttpAsyncClient 3.x plugins to support collecting HTTP parameters. Make the Feign plugin to support Java 14 Make the okhttp3 plugin to support Java 14 Polish tracing context related codes. Add the plugin for async-http-client 2.x Fix NPE in the nutz plugin. Provide Apache Commons DBCP 2.x plugin. Add the plugin for mssql-jtds 1.x. Add the plugin for mssql-jdbc 6.x -\u0026gt; 9.x. Fix the default ignore mechanism isn\u0026rsquo;t accurate enough bug. Add the plugin for spring-kafka 1.3.x. Add the plugin for Apache CXF 3.x. Fix okhttp-3.x and async-http-client-2.x did not overwrite the old trace header.  OAP-Backend  Add the @SuperDataset annotation for BrowserErrorLog. Add the thread pool to the Kafka fetcher to increase the performance. Add contain and not contain OPS in OAL. Add Envoy ALS analyzer based on metadata exchange. Add listMetrics GraphQL query. Add group name into services of so11y and istio relevant metrics Support keeping collecting the slowly segments in the sampling mechanism. Support choose files to active the meter analyzer. Support nested class definition in the Service, ServiceInstance, Endpoint, ServiceRelation, and ServiceInstanceRelation sources. Support sideCar.internalErrorCode in the Service, ServiceInstance, Endpoint, ServiceRelation, and ServiceInstanceRelation sources. Improve Kubernetes service registry for ALS analysis. Add health checker for cluster management Support the service auto grouping. Support query service list by the group name. Improve the queryable tags generation. Remove the duplicated tags to reduce the storage payload. Fix the threads of the Kafka fetcher exit if some unexpected exceptions happen. Fix the excessive timeout period set by the kubernetes-client. Fix deadlock problem when using elasticsearch-client-7.0.0. Fix storage-jdbc isExists not set dbname. Fix searchService bug in the InfluxDB storage implementation. Fix CVE in the alarm module, when activating the dynamic configuration feature. Fix CVE in the endpoint grouping, when activating the dynamic configuration feature. Fix CVE in the uninstrumented gateways configs, when activating the dynamic configuration feature. Fix CVE in the Apdex threshold configs, when activating the dynamic configuration feature. Make the codes and doc consistent in sharding server and core server. Fix that chunked string is incorrect while the tag contains colon. Fix the incorrect dynamic configuration key bug of endpoint-name-grouping. Remove unused min date timebucket in jdbc deletehistory logical Fix \u0026ldquo;transaction too large error\u0026rdquo; when use TiDB as storage. Fix \u0026ldquo;index not found\u0026rdquo; in trace query when use ES7 storage. Add otel rules to ui template to observe Istio control plane. Remove istio mixer Support close influxdb batch write model. Check SAN in the ALS (m)TLS process.  UI  Fix incorrect label in radial chart in topology. Replace node-sass with dart-sass. Replace serviceFilter with serviceGroup Removed \u0026ldquo;Les Miserables\u0026rdquo; from radial chart in topology. Add the Promise dropdown option  Documentation  Add VNode FAQ doc. Add logic endpoint section in the agent setup doc. Adjust configuration names and system environment names of the sharing server module Tweak Istio metrics collection doc. Add otel receiver.  All issues and pull requests are here\n","title":"8.3.0","url":"/docs/main/v9.6.0/en/changes/changes-8.3.0/"},{"content":"8.3.0  Project  Test: ElasticSearch version 7.0.0 and 7.9.3 as storage are E2E tested. Test: Bump up testcontainers version to work around the Docker bug on MacOS.  Java Agent  Support propagate the sending timestamp in MQ plugins to calculate the transfer latency in the async MQ scenarios. Support auto-tag with the fixed values propagated in the correlation context. Make HttpClient 3.x, 4.x, and HttpAsyncClient 3.x plugins to support collecting HTTP parameters. Make the Feign plugin to support Java 14 Make the okhttp3 plugin to support Java 14 Polish tracing context related codes. Add the plugin for async-http-client 2.x Fix NPE in the nutz plugin. Provide Apache Commons DBCP 2.x plugin. Add the plugin for mssql-jtds 1.x. Add the plugin for mssql-jdbc 6.x -\u0026gt; 9.x. Fix the default ignore mechanism isn\u0026rsquo;t accurate enough bug. Add the plugin for spring-kafka 1.3.x. Add the plugin for Apache CXF 3.x. Fix okhttp-3.x and async-http-client-2.x did not overwrite the old trace header.  OAP-Backend  Add the @SuperDataset annotation for BrowserErrorLog. Add the thread pool to the Kafka fetcher to increase the performance. Add contain and not contain OPS in OAL. Add Envoy ALS analyzer based on metadata exchange. Add listMetrics GraphQL query. Add group name into services of so11y and istio relevant metrics Support keeping collecting the slowly segments in the sampling mechanism. Support choose files to active the meter analyzer. Support nested class definition in the Service, ServiceInstance, Endpoint, ServiceRelation, and ServiceInstanceRelation sources. Support sideCar.internalErrorCode in the Service, ServiceInstance, Endpoint, ServiceRelation, and ServiceInstanceRelation sources. Improve Kubernetes service registry for ALS analysis. Add health checker for cluster management Support the service auto grouping. Support query service list by the group name. Improve the queryable tags generation. Remove the duplicated tags to reduce the storage payload. Fix the threads of the Kafka fetcher exit if some unexpected exceptions happen. Fix the excessive timeout period set by the kubernetes-client. Fix deadlock problem when using elasticsearch-client-7.0.0. Fix storage-jdbc isExists not set dbname. Fix searchService bug in the InfluxDB storage implementation. Fix CVE in the alarm module, when activating the dynamic configuration feature. Fix CVE in the endpoint grouping, when activating the dynamic configuration feature. Fix CVE in the uninstrumented gateways configs, when activating the dynamic configuration feature. Fix CVE in the Apdex threshold configs, when activating the dynamic configuration feature. Make the codes and doc consistent in sharding server and core server. Fix that chunked string is incorrect while the tag contains colon. Fix the incorrect dynamic configuration key bug of endpoint-name-grouping. Remove unused min date timebucket in jdbc deletehistory logical Fix \u0026ldquo;transaction too large error\u0026rdquo; when use TiDB as storage. Fix \u0026ldquo;index not found\u0026rdquo; in trace query when use ES7 storage. Add otel rules to ui template to observe Istio control plane. Remove istio mixer Support close influxdb batch write model. Check SAN in the ALS (m)TLS process.  UI  Fix incorrect label in radial chart in topology. Replace node-sass with dart-sass. Replace serviceFilter with serviceGroup Removed \u0026ldquo;Les Miserables\u0026rdquo; from radial chart in topology. Add the Promise dropdown option  Documentation  Add VNode FAQ doc. Add logic endpoint section in the agent setup doc. Adjust configuration names and system environment names of the sharing server module Tweak Istio metrics collection doc. Add otel receiver.  All issues and pull requests are here\n","title":"8.3.0","url":"/docs/main/v9.7.0/en/changes/changes-8.3.0/"},{"content":"8.4.0 Project  Incompatible with previous releases when use H2/MySQL/TiDB storage options, due to support multiple alarm rules triggered for one entity. Chore: adapt create_source_release.sh to make it runnable on Linux. Add package to .proto files, prevent polluting top-level namespace in some languages; The OAP server supports previous agent releases, whereas the previous OAP server (\u0026lt;=8.3.0) won\u0026rsquo;t recognize newer agents since this version (\u0026gt;= 8.4.0). Add ElasticSearch 7.10 to test matrix and verify it works. Replace Apache RAT with skywalking-eyes to check license headers. Set up test of Envoy ALS / MetricsService under Istio 1.8.2 to verify Envoy V3 protocol Test: fix flaky E2E test of Kafka.  Java Agent  The operation name of quartz-scheduler plugin, has been changed as the quartz-scheduler/${className} format. Fix jdk-http and okhttp-3.x plugin did not overwrite the old trace header. Add interceptors of method(analyze, searchScroll, clearScroll, searchTemplate and deleteByQuery) for elasticsearch-6.x-plugin. Fix the unexpected RunningContext recreation in the Tomcat plugin. Fix the potential NPE when trace_sql_parameters is enabled. Update byte-buddy to 1.10.19. Fix thrift plugin trace link broken when intermediate service does not mount agent Fix thrift plugin collects wrong args when the method without parameter. Fix DataCarrier\u0026rsquo;s org.apache.skywalking.apm.commons.datacarrier.buffer.Buffer implementation isn\u0026rsquo;t activated in IF_POSSIBLE mode. Fix ArrayBlockingQueueBuffer\u0026rsquo;s useless IF_POSSIBLE mode list Support building gRPC TLS channel but CA file is not required. Add witness method mechanism in the agent plugin core. Add Dolphinscheduler plugin definition. Make sampling still works when the trace ignores plug-in activation. Fix mssql-plugin occur ClassCastException when call the method of return generate key. The operation name of dubbo and dubbo-2.7.x-plugin, has been changed as the groupValue/className.methodName format Fix bug that rocketmq-plugin set the wrong tag. Fix duplicated EnhancedInstance interface added. Fix thread leaks caused by the elasticsearch-6.x-plugin plugin. Support reading segmentId and spanId with toolkit. Fix RestTemplate plugin recording url tag with wrong port Support collecting logs and forwarding through gRPC. Support config agent.sample_n_per_3_secs can be changed in the runtime. Support config agent.ignore_suffix can be changed in the runtime. Support DNS periodic resolving mechanism to update backend service. Support config agent.trace.ignore_path can be changed in the runtime. Added support for transmitting logback 1.x and log4j 2.x formatted \u0026amp; un-formatted messages via gPRC  OAP-Backend  Make meter receiver support MAL. Support influxDB connection response format option. Fix some error when use JSON as influxDB response format. Support Kafka MirrorMaker 2.0 to replicate topics between Kafka clusters. Add the rule name field to alarm record storage entity as a part of ID, to support multiple alarm rules triggered for one entity. The scope id has been removed from the ID. Fix MAL concurrent execution issues. Fix group name can\u0026rsquo;t be queried in the GraphQL. Fix potential gRPC connection leak(not closed) for the channels among OAP instances. Filter OAP instances(unassigned in booting stage) of the empty IP in KubernetesCoordinator. Add component ID for Python aiohttp plugin requester and server. Fix H2 in-memory database table missing issues Add component ID for Python pyramid plugin server. Add component ID for NodeJS Axios plugin. Fix searchService method error in storage-influxdb-plugin. Add JavaScript component ID. Fix CVE of UninstrumentedGateways in Dynamic Configuration activation. Improve query performance in storage-influxdb-plugin. Fix the uuid field in GRPCConfigWatcherRegister is not updated. Support Envoy {AccessLog,Metrics}Service API V3. Adopt the MAL in Envoy metrics service analyzer. Fix the priority setting doesn\u0026rsquo;t work of the ALS analyzers. Fix bug that endpoint-name-grouping.yml is not customizable in Dockerized case. Fix bug that istio version metric type on UI template mismatches the otel rule. Improve ReadWriteSafeCache concurrency read-write performance Fix bug that if use JSON as InfluxDB.ResponseFormat then NumberFormatException maybe occur. Fix timeBucket not taking effect in EqualsAndHashCode annotation of some relationship metrics. Fix SharingServerConfig\u0026rsquo;s propertie is not correct in the application.yml, contextPath -\u0026gt; restConnextPath. Istio control plane: remove redundant metrics and polish panel layout. Fix bug endpoint name grouping not work due to setting service name and endpoint name out of order. Fix receiver analysis error count metrics. Log collecting and query implementation. Support Alarm to feishu. Add the implementation of ConfigurationDiscovery on the OAP side. Fix bug in parseInternalErrorCode where some error codes are never reached. OAL supports multiple values when as numeric. Add node information from the Openensus proto to the labels of the samples, to support the identification of the source of the Metric data. Fix bug that the same sample name in one MAL expression caused IllegalArgumentException in Analyzer.analyse. Add the text analyzer for querying log in the es storage. Chore: Remove duplicate codes in Envoy ALS handler. Remove the strict rule of OAL disable statement parameter. Fix a legal metric query adoption bug. Don\u0026rsquo;t support global level metric query. Add VM MAL and ui-template configration, support Prometheus node-exporter VM metrics that pushed from OpenTelemetry-collector. Remove unused log query parameters.  UI  Fix un-removed tags in trace query. Fix unexpected metrics name on single value component. Don\u0026rsquo;t allow negative value as the refresh period. Fix style issue in trace table view. Separation Log and Dashboard selector data to avoid conflicts. Fix trace instance selector bug. Fix Unnecessary sidebar in tooltips for charts. Refactor dashboard query in a common script. Implement refreshing data for topology by updating date. Implement group selector in the topology. Fix all as default parameter for services selector. Add icon for Python aiohttp plugin. Add icon for Python pyramid plugin. Fix topology render all services nodes when groups changed. Fix rk-footer utc input\u0026rsquo;s width. Update rk-icon and rewrite rk-header svg tags with rk-icon. Add icon for http type. Fix rk-footer utc without local storage. Sort group names in the topology. Add logo for Dolphinscheduler. Fix dashboard wrong instance. Add a legend for the topology. Update the condition of unhealthy cube. Fix: use icons to replace buttons for task list in profile. Fix: support = in the tag value in the trace query page. Add envoy proxy component logo. Chore: set up license-eye to check license headers and add missing license headers. Fix prop for instances-survey and endpoints-survey. Fix envoy icon in topology. Implement the service logs on UI. Change the flask icon to light version for a better view of topology dark theme. Implement viewing logs on trace page. Fix update props of date component. Fix query conditions for logs. Fix style of selectors to word wrap. Fix logs time. Fix search ui for logs.  Documentation  Update the documents of backend fetcher and self observability about the latest configurations. Add documents about the group name of service. Update docs about the latest UI. Update the document of backend trace sampling with the latest configuration. Update kafka plugin support version to 2.6.1. Add FAQ about Fix compiling on Mac M1 chip.  All issues and pull requests are here\n","title":"8.4.0","url":"/docs/main/latest/en/changes/changes-8.4.0/"},{"content":"8.4.0 Project  Incompatible with previous releases when use H2/MySQL/TiDB storage options, due to support multiple alarm rules triggered for one entity. Chore: adapt create_source_release.sh to make it runnable on Linux. Add package to .proto files, prevent polluting top-level namespace in some languages; The OAP server supports previous agent releases, whereas the previous OAP server (\u0026lt;=8.3.0) won\u0026rsquo;t recognize newer agents since this version (\u0026gt;= 8.4.0). Add ElasticSearch 7.10 to test matrix and verify it works. Replace Apache RAT with skywalking-eyes to check license headers. Set up test of Envoy ALS / MetricsService under Istio 1.8.2 to verify Envoy V3 protocol Test: fix flaky E2E test of Kafka.  Java Agent  The operation name of quartz-scheduler plugin, has been changed as the quartz-scheduler/${className} format. Fix jdk-http and okhttp-3.x plugin did not overwrite the old trace header. Add interceptors of method(analyze, searchScroll, clearScroll, searchTemplate and deleteByQuery) for elasticsearch-6.x-plugin. Fix the unexpected RunningContext recreation in the Tomcat plugin. Fix the potential NPE when trace_sql_parameters is enabled. Update byte-buddy to 1.10.19. Fix thrift plugin trace link broken when intermediate service does not mount agent Fix thrift plugin collects wrong args when the method without parameter. Fix DataCarrier\u0026rsquo;s org.apache.skywalking.apm.commons.datacarrier.buffer.Buffer implementation isn\u0026rsquo;t activated in IF_POSSIBLE mode. Fix ArrayBlockingQueueBuffer\u0026rsquo;s useless IF_POSSIBLE mode list Support building gRPC TLS channel but CA file is not required. Add witness method mechanism in the agent plugin core. Add Dolphinscheduler plugin definition. Make sampling still works when the trace ignores plug-in activation. Fix mssql-plugin occur ClassCastException when call the method of return generate key. The operation name of dubbo and dubbo-2.7.x-plugin, has been changed as the groupValue/className.methodName format Fix bug that rocketmq-plugin set the wrong tag. Fix duplicated EnhancedInstance interface added. Fix thread leaks caused by the elasticsearch-6.x-plugin plugin. Support reading segmentId and spanId with toolkit. Fix RestTemplate plugin recording url tag with wrong port Support collecting logs and forwarding through gRPC. Support config agent.sample_n_per_3_secs can be changed in the runtime. Support config agent.ignore_suffix can be changed in the runtime. Support DNS periodic resolving mechanism to update backend service. Support config agent.trace.ignore_path can be changed in the runtime. Added support for transmitting logback 1.x and log4j 2.x formatted \u0026amp; un-formatted messages via gPRC  OAP-Backend  Make meter receiver support MAL. Support influxDB connection response format option. Fix some error when use JSON as influxDB response format. Support Kafka MirrorMaker 2.0 to replicate topics between Kafka clusters. Add the rule name field to alarm record storage entity as a part of ID, to support multiple alarm rules triggered for one entity. The scope id has been removed from the ID. Fix MAL concurrent execution issues. Fix group name can\u0026rsquo;t be queried in the GraphQL. Fix potential gRPC connection leak(not closed) for the channels among OAP instances. Filter OAP instances(unassigned in booting stage) of the empty IP in KubernetesCoordinator. Add component ID for Python aiohttp plugin requester and server. Fix H2 in-memory database table missing issues Add component ID for Python pyramid plugin server. Add component ID for NodeJS Axios plugin. Fix searchService method error in storage-influxdb-plugin. Add JavaScript component ID. Fix CVE of UninstrumentedGateways in Dynamic Configuration activation. Improve query performance in storage-influxdb-plugin. Fix the uuid field in GRPCConfigWatcherRegister is not updated. Support Envoy {AccessLog,Metrics}Service API V3. Adopt the MAL in Envoy metrics service analyzer. Fix the priority setting doesn\u0026rsquo;t work of the ALS analyzers. Fix bug that endpoint-name-grouping.yml is not customizable in Dockerized case. Fix bug that istio version metric type on UI template mismatches the otel rule. Improve ReadWriteSafeCache concurrency read-write performance Fix bug that if use JSON as InfluxDB.ResponseFormat then NumberFormatException maybe occur. Fix timeBucket not taking effect in EqualsAndHashCode annotation of some relationship metrics. Fix SharingServerConfig\u0026rsquo;s propertie is not correct in the application.yml, contextPath -\u0026gt; restConnextPath. Istio control plane: remove redundant metrics and polish panel layout. Fix bug endpoint name grouping not work due to setting service name and endpoint name out of order. Fix receiver analysis error count metrics. Log collecting and query implementation. Support Alarm to feishu. Add the implementation of ConfigurationDiscovery on the OAP side. Fix bug in parseInternalErrorCode where some error codes are never reached. OAL supports multiple values when as numeric. Add node information from the Openensus proto to the labels of the samples, to support the identification of the source of the Metric data. Fix bug that the same sample name in one MAL expression caused IllegalArgumentException in Analyzer.analyse. Add the text analyzer for querying log in the es storage. Chore: Remove duplicate codes in Envoy ALS handler. Remove the strict rule of OAL disable statement parameter. Fix a legal metric query adoption bug. Don\u0026rsquo;t support global level metric query. Add VM MAL and ui-template configration, support Prometheus node-exporter VM metrics that pushed from OpenTelemetry-collector. Remove unused log query parameters.  UI  Fix un-removed tags in trace query. Fix unexpected metrics name on single value component. Don\u0026rsquo;t allow negative value as the refresh period. Fix style issue in trace table view. Separation Log and Dashboard selector data to avoid conflicts. Fix trace instance selector bug. Fix Unnecessary sidebar in tooltips for charts. Refactor dashboard query in a common script. Implement refreshing data for topology by updating date. Implement group selector in the topology. Fix all as default parameter for services selector. Add icon for Python aiohttp plugin. Add icon for Python pyramid plugin. Fix topology render all services nodes when groups changed. Fix rk-footer utc input\u0026rsquo;s width. Update rk-icon and rewrite rk-header svg tags with rk-icon. Add icon for http type. Fix rk-footer utc without local storage. Sort group names in the topology. Add logo for Dolphinscheduler. Fix dashboard wrong instance. Add a legend for the topology. Update the condition of unhealthy cube. Fix: use icons to replace buttons for task list in profile. Fix: support = in the tag value in the trace query page. Add envoy proxy component logo. Chore: set up license-eye to check license headers and add missing license headers. Fix prop for instances-survey and endpoints-survey. Fix envoy icon in topology. Implement the service logs on UI. Change the flask icon to light version for a better view of topology dark theme. Implement viewing logs on trace page. Fix update props of date component. Fix query conditions for logs. Fix style of selectors to word wrap. Fix logs time. Fix search ui for logs.  Documentation  Update the documents of backend fetcher and self observability about the latest configurations. Add documents about the group name of service. Update docs about the latest UI. Update the document of backend trace sampling with the latest configuration. Update kafka plugin support version to 2.6.1. Add FAQ about Fix compiling on Mac M1 chip.  All issues and pull requests are here\n","title":"8.4.0","url":"/docs/main/next/en/changes/changes-8.4.0/"},{"content":"8.4.0 Project  Incompatible with previous releases when use H2/MySQL/TiDB storage options, due to support multiple alarm rules triggered for one entity. Chore: adapt create_source_release.sh to make it runnable on Linux. Add package to .proto files, prevent polluting top-level namespace in some languages; The OAP server supports previous agent releases, whereas the previous OAP server (\u0026lt;=8.3.0) won\u0026rsquo;t recognize newer agents since this version (\u0026gt;= 8.4.0). Add ElasticSearch 7.10 to test matrix and verify it works. Replace Apache RAT with skywalking-eyes to check license headers. Set up test of Envoy ALS / MetricsService under Istio 1.8.2 to verify Envoy V3 protocol Test: fix flaky E2E test of Kafka.  Java Agent  The operation name of quartz-scheduler plugin, has been changed as the quartz-scheduler/${className} format. Fix jdk-http and okhttp-3.x plugin did not overwrite the old trace header. Add interceptors of method(analyze, searchScroll, clearScroll, searchTemplate and deleteByQuery) for elasticsearch-6.x-plugin. Fix the unexpected RunningContext recreation in the Tomcat plugin. Fix the potential NPE when trace_sql_parameters is enabled. Update byte-buddy to 1.10.19. Fix thrift plugin trace link broken when intermediate service does not mount agent Fix thrift plugin collects wrong args when the method without parameter. Fix DataCarrier\u0026rsquo;s org.apache.skywalking.apm.commons.datacarrier.buffer.Buffer implementation isn\u0026rsquo;t activated in IF_POSSIBLE mode. Fix ArrayBlockingQueueBuffer\u0026rsquo;s useless IF_POSSIBLE mode list Support building gRPC TLS channel but CA file is not required. Add witness method mechanism in the agent plugin core. Add Dolphinscheduler plugin definition. Make sampling still works when the trace ignores plug-in activation. Fix mssql-plugin occur ClassCastException when call the method of return generate key. The operation name of dubbo and dubbo-2.7.x-plugin, has been changed as the groupValue/className.methodName format Fix bug that rocketmq-plugin set the wrong tag. Fix duplicated EnhancedInstance interface added. Fix thread leaks caused by the elasticsearch-6.x-plugin plugin. Support reading segmentId and spanId with toolkit. Fix RestTemplate plugin recording url tag with wrong port Support collecting logs and forwarding through gRPC. Support config agent.sample_n_per_3_secs can be changed in the runtime. Support config agent.ignore_suffix can be changed in the runtime. Support DNS periodic resolving mechanism to update backend service. Support config agent.trace.ignore_path can be changed in the runtime. Added support for transmitting logback 1.x and log4j 2.x formatted \u0026amp; un-formatted messages via gPRC  OAP-Backend  Make meter receiver support MAL. Support influxDB connection response format option. Fix some error when use JSON as influxDB response format. Support Kafka MirrorMaker 2.0 to replicate topics between Kafka clusters. Add the rule name field to alarm record storage entity as a part of ID, to support multiple alarm rules triggered for one entity. The scope id has been removed from the ID. Fix MAL concurrent execution issues. Fix group name can\u0026rsquo;t be queried in the GraphQL. Fix potential gRPC connection leak(not closed) for the channels among OAP instances. Filter OAP instances(unassigned in booting stage) of the empty IP in KubernetesCoordinator. Add component ID for Python aiohttp plugin requester and server. Fix H2 in-memory database table missing issues Add component ID for Python pyramid plugin server. Add component ID for NodeJS Axios plugin. Fix searchService method error in storage-influxdb-plugin. Add JavaScript component ID. Fix CVE of UninstrumentedGateways in Dynamic Configuration activation. Improve query performance in storage-influxdb-plugin. Fix the uuid field in GRPCConfigWatcherRegister is not updated. Support Envoy {AccessLog,Metrics}Service API V3. Adopt the MAL in Envoy metrics service analyzer. Fix the priority setting doesn\u0026rsquo;t work of the ALS analyzers. Fix bug that endpoint-name-grouping.yml is not customizable in Dockerized case. Fix bug that istio version metric type on UI template mismatches the otel rule. Improve ReadWriteSafeCache concurrency read-write performance Fix bug that if use JSON as InfluxDB.ResponseFormat then NumberFormatException maybe occur. Fix timeBucket not taking effect in EqualsAndHashCode annotation of some relationship metrics. Fix SharingServerConfig\u0026rsquo;s propertie is not correct in the application.yml, contextPath -\u0026gt; restConnextPath. Istio control plane: remove redundant metrics and polish panel layout. Fix bug endpoint name grouping not work due to setting service name and endpoint name out of order. Fix receiver analysis error count metrics. Log collecting and query implementation. Support Alarm to feishu. Add the implementation of ConfigurationDiscovery on the OAP side. Fix bug in parseInternalErrorCode where some error codes are never reached. OAL supports multiple values when as numeric. Add node information from the Openensus proto to the labels of the samples, to support the identification of the source of the Metric data. Fix bug that the same sample name in one MAL expression caused IllegalArgumentException in Analyzer.analyse. Add the text analyzer for querying log in the es storage. Chore: Remove duplicate codes in Envoy ALS handler. Remove the strict rule of OAL disable statement parameter. Fix a legal metric query adoption bug. Don\u0026rsquo;t support global level metric query. Add VM MAL and ui-template configration, support Prometheus node-exporter VM metrics that pushed from OpenTelemetry-collector. Remove unused log query parameters.  UI  Fix un-removed tags in trace query. Fix unexpected metrics name on single value component. Don\u0026rsquo;t allow negative value as the refresh period. Fix style issue in trace table view. Separation Log and Dashboard selector data to avoid conflicts. Fix trace instance selector bug. Fix Unnecessary sidebar in tooltips for charts. Refactor dashboard query in a common script. Implement refreshing data for topology by updating date. Implement group selector in the topology. Fix all as default parameter for services selector. Add icon for Python aiohttp plugin. Add icon for Python pyramid plugin. Fix topology render all services nodes when groups changed. Fix rk-footer utc input\u0026rsquo;s width. Update rk-icon and rewrite rk-header svg tags with rk-icon. Add icon for http type. Fix rk-footer utc without local storage. Sort group names in the topology. Add logo for Dolphinscheduler. Fix dashboard wrong instance. Add a legend for the topology. Update the condition of unhealthy cube. Fix: use icons to replace buttons for task list in profile. Fix: support = in the tag value in the trace query page. Add envoy proxy component logo. Chore: set up license-eye to check license headers and add missing license headers. Fix prop for instances-survey and endpoints-survey. Fix envoy icon in topology. Implement the service logs on UI. Change the flask icon to light version for a better view of topology dark theme. Implement viewing logs on trace page. Fix update props of date component. Fix query conditions for logs. Fix style of selectors to word wrap. Fix logs time. Fix search ui for logs.  Documentation  Update the documents of backend fetcher and self observability about the latest configurations. Add documents about the group name of service. Update docs about the latest UI. Update the document of backend trace sampling with the latest configuration. Update kafka plugin support version to 2.6.1. Add FAQ about Fix compiling on Mac M1 chip.  All issues and pull requests are here\n","title":"8.4.0","url":"/docs/main/v9.1.0/en/changes/changes-8.4.0/"},{"content":"8.4.0 Project  Incompatible with previous releases when use H2/MySQL/TiDB storage options, due to support multiple alarm rules triggered for one entity. Chore: adapt create_source_release.sh to make it runnable on Linux. Add package to .proto files, prevent polluting top-level namespace in some languages; The OAP server supports previous agent releases, whereas the previous OAP server (\u0026lt;=8.3.0) won\u0026rsquo;t recognize newer agents since this version (\u0026gt;= 8.4.0). Add ElasticSearch 7.10 to test matrix and verify it works. Replace Apache RAT with skywalking-eyes to check license headers. Set up test of Envoy ALS / MetricsService under Istio 1.8.2 to verify Envoy V3 protocol Test: fix flaky E2E test of Kafka.  Java Agent  The operation name of quartz-scheduler plugin, has been changed as the quartz-scheduler/${className} format. Fix jdk-http and okhttp-3.x plugin did not overwrite the old trace header. Add interceptors of method(analyze, searchScroll, clearScroll, searchTemplate and deleteByQuery) for elasticsearch-6.x-plugin. Fix the unexpected RunningContext recreation in the Tomcat plugin. Fix the potential NPE when trace_sql_parameters is enabled. Update byte-buddy to 1.10.19. Fix thrift plugin trace link broken when intermediate service does not mount agent Fix thrift plugin collects wrong args when the method without parameter. Fix DataCarrier\u0026rsquo;s org.apache.skywalking.apm.commons.datacarrier.buffer.Buffer implementation isn\u0026rsquo;t activated in IF_POSSIBLE mode. Fix ArrayBlockingQueueBuffer\u0026rsquo;s useless IF_POSSIBLE mode list Support building gRPC TLS channel but CA file is not required. Add witness method mechanism in the agent plugin core. Add Dolphinscheduler plugin definition. Make sampling still works when the trace ignores plug-in activation. Fix mssql-plugin occur ClassCastException when call the method of return generate key. The operation name of dubbo and dubbo-2.7.x-plugin, has been changed as the groupValue/className.methodName format Fix bug that rocketmq-plugin set the wrong tag. Fix duplicated EnhancedInstance interface added. Fix thread leaks caused by the elasticsearch-6.x-plugin plugin. Support reading segmentId and spanId with toolkit. Fix RestTemplate plugin recording url tag with wrong port Support collecting logs and forwarding through gRPC. Support config agent.sample_n_per_3_secs can be changed in the runtime. Support config agent.ignore_suffix can be changed in the runtime. Support DNS periodic resolving mechanism to update backend service. Support config agent.trace.ignore_path can be changed in the runtime. Added support for transmitting logback 1.x and log4j 2.x formatted \u0026amp; un-formatted messages via gPRC  OAP-Backend  Make meter receiver support MAL. Support influxDB connection response format option. Fix some error when use JSON as influxDB response format. Support Kafka MirrorMaker 2.0 to replicate topics between Kafka clusters. Add the rule name field to alarm record storage entity as a part of ID, to support multiple alarm rules triggered for one entity. The scope id has been removed from the ID. Fix MAL concurrent execution issues. Fix group name can\u0026rsquo;t be queried in the GraphQL. Fix potential gRPC connection leak(not closed) for the channels among OAP instances. Filter OAP instances(unassigned in booting stage) of the empty IP in KubernetesCoordinator. Add component ID for Python aiohttp plugin requester and server. Fix H2 in-memory database table missing issues Add component ID for Python pyramid plugin server. Add component ID for NodeJS Axios plugin. Fix searchService method error in storage-influxdb-plugin. Add JavaScript component ID. Fix CVE of UninstrumentedGateways in Dynamic Configuration activation. Improve query performance in storage-influxdb-plugin. Fix the uuid field in GRPCConfigWatcherRegister is not updated. Support Envoy {AccessLog,Metrics}Service API V3. Adopt the MAL in Envoy metrics service analyzer. Fix the priority setting doesn\u0026rsquo;t work of the ALS analyzers. Fix bug that endpoint-name-grouping.yml is not customizable in Dockerized case. Fix bug that istio version metric type on UI template mismatches the otel rule. Improve ReadWriteSafeCache concurrency read-write performance Fix bug that if use JSON as InfluxDB.ResponseFormat then NumberFormatException maybe occur. Fix timeBucket not taking effect in EqualsAndHashCode annotation of some relationship metrics. Fix SharingServerConfig\u0026rsquo;s propertie is not correct in the application.yml, contextPath -\u0026gt; restConnextPath. Istio control plane: remove redundant metrics and polish panel layout. Fix bug endpoint name grouping not work due to setting service name and endpoint name out of order. Fix receiver analysis error count metrics. Log collecting and query implementation. Support Alarm to feishu. Add the implementation of ConfigurationDiscovery on the OAP side. Fix bug in parseInternalErrorCode where some error codes are never reached. OAL supports multiple values when as numeric. Add node information from the Openensus proto to the labels of the samples, to support the identification of the source of the Metric data. Fix bug that the same sample name in one MAL expression caused IllegalArgumentException in Analyzer.analyse. Add the text analyzer for querying log in the es storage. Chore: Remove duplicate codes in Envoy ALS handler. Remove the strict rule of OAL disable statement parameter. Fix a legal metric query adoption bug. Don\u0026rsquo;t support global level metric query. Add VM MAL and ui-template configration, support Prometheus node-exporter VM metrics that pushed from OpenTelemetry-collector. Remove unused log query parameters.  UI  Fix un-removed tags in trace query. Fix unexpected metrics name on single value component. Don\u0026rsquo;t allow negative value as the refresh period. Fix style issue in trace table view. Separation Log and Dashboard selector data to avoid conflicts. Fix trace instance selector bug. Fix Unnecessary sidebar in tooltips for charts. Refactor dashboard query in a common script. Implement refreshing data for topology by updating date. Implement group selector in the topology. Fix all as default parameter for services selector. Add icon for Python aiohttp plugin. Add icon for Python pyramid plugin. Fix topology render all services nodes when groups changed. Fix rk-footer utc input\u0026rsquo;s width. Update rk-icon and rewrite rk-header svg tags with rk-icon. Add icon for http type. Fix rk-footer utc without local storage. Sort group names in the topology. Add logo for Dolphinscheduler. Fix dashboard wrong instance. Add a legend for the topology. Update the condition of unhealthy cube. Fix: use icons to replace buttons for task list in profile. Fix: support = in the tag value in the trace query page. Add envoy proxy component logo. Chore: set up license-eye to check license headers and add missing license headers. Fix prop for instances-survey and endpoints-survey. Fix envoy icon in topology. Implement the service logs on UI. Change the flask icon to light version for a better view of topology dark theme. Implement viewing logs on trace page. Fix update props of date component. Fix query conditions for logs. Fix style of selectors to word wrap. Fix logs time. Fix search ui for logs.  Documentation  Update the documents of backend fetcher and self observability about the latest configurations. Add documents about the group name of service. Update docs about the latest UI. Update the document of backend trace sampling with the latest configuration. Update kafka plugin support version to 2.6.1. Add FAQ about Fix compiling on Mac M1 chip.  All issues and pull requests are here\n","title":"8.4.0","url":"/docs/main/v9.2.0/en/changes/changes-8.4.0/"},{"content":"8.4.0 Project  Incompatible with previous releases when use H2/MySQL/TiDB storage options, due to support multiple alarm rules triggered for one entity. Chore: adapt create_source_release.sh to make it runnable on Linux. Add package to .proto files, prevent polluting top-level namespace in some languages; The OAP server supports previous agent releases, whereas the previous OAP server (\u0026lt;=8.3.0) won\u0026rsquo;t recognize newer agents since this version (\u0026gt;= 8.4.0). Add ElasticSearch 7.10 to test matrix and verify it works. Replace Apache RAT with skywalking-eyes to check license headers. Set up test of Envoy ALS / MetricsService under Istio 1.8.2 to verify Envoy V3 protocol Test: fix flaky E2E test of Kafka.  Java Agent  The operation name of quartz-scheduler plugin, has been changed as the quartz-scheduler/${className} format. Fix jdk-http and okhttp-3.x plugin did not overwrite the old trace header. Add interceptors of method(analyze, searchScroll, clearScroll, searchTemplate and deleteByQuery) for elasticsearch-6.x-plugin. Fix the unexpected RunningContext recreation in the Tomcat plugin. Fix the potential NPE when trace_sql_parameters is enabled. Update byte-buddy to 1.10.19. Fix thrift plugin trace link broken when intermediate service does not mount agent Fix thrift plugin collects wrong args when the method without parameter. Fix DataCarrier\u0026rsquo;s org.apache.skywalking.apm.commons.datacarrier.buffer.Buffer implementation isn\u0026rsquo;t activated in IF_POSSIBLE mode. Fix ArrayBlockingQueueBuffer\u0026rsquo;s useless IF_POSSIBLE mode list Support building gRPC TLS channel but CA file is not required. Add witness method mechanism in the agent plugin core. Add Dolphinscheduler plugin definition. Make sampling still works when the trace ignores plug-in activation. Fix mssql-plugin occur ClassCastException when call the method of return generate key. The operation name of dubbo and dubbo-2.7.x-plugin, has been changed as the groupValue/className.methodName format Fix bug that rocketmq-plugin set the wrong tag. Fix duplicated EnhancedInstance interface added. Fix thread leaks caused by the elasticsearch-6.x-plugin plugin. Support reading segmentId and spanId with toolkit. Fix RestTemplate plugin recording url tag with wrong port Support collecting logs and forwarding through gRPC. Support config agent.sample_n_per_3_secs can be changed in the runtime. Support config agent.ignore_suffix can be changed in the runtime. Support DNS periodic resolving mechanism to update backend service. Support config agent.trace.ignore_path can be changed in the runtime. Added support for transmitting logback 1.x and log4j 2.x formatted \u0026amp; un-formatted messages via gPRC  OAP-Backend  Make meter receiver support MAL. Support influxDB connection response format option. Fix some error when use JSON as influxDB response format. Support Kafka MirrorMaker 2.0 to replicate topics between Kafka clusters. Add the rule name field to alarm record storage entity as a part of ID, to support multiple alarm rules triggered for one entity. The scope id has been removed from the ID. Fix MAL concurrent execution issues. Fix group name can\u0026rsquo;t be queried in the GraphQL. Fix potential gRPC connection leak(not closed) for the channels among OAP instances. Filter OAP instances(unassigned in booting stage) of the empty IP in KubernetesCoordinator. Add component ID for Python aiohttp plugin requester and server. Fix H2 in-memory database table missing issues Add component ID for Python pyramid plugin server. Add component ID for NodeJS Axios plugin. Fix searchService method error in storage-influxdb-plugin. Add JavaScript component ID. Fix CVE of UninstrumentedGateways in Dynamic Configuration activation. Improve query performance in storage-influxdb-plugin. Fix the uuid field in GRPCConfigWatcherRegister is not updated. Support Envoy {AccessLog,Metrics}Service API V3. Adopt the MAL in Envoy metrics service analyzer. Fix the priority setting doesn\u0026rsquo;t work of the ALS analyzers. Fix bug that endpoint-name-grouping.yml is not customizable in Dockerized case. Fix bug that istio version metric type on UI template mismatches the otel rule. Improve ReadWriteSafeCache concurrency read-write performance Fix bug that if use JSON as InfluxDB.ResponseFormat then NumberFormatException maybe occur. Fix timeBucket not taking effect in EqualsAndHashCode annotation of some relationship metrics. Fix SharingServerConfig\u0026rsquo;s propertie is not correct in the application.yml, contextPath -\u0026gt; restConnextPath. Istio control plane: remove redundant metrics and polish panel layout. Fix bug endpoint name grouping not work due to setting service name and endpoint name out of order. Fix receiver analysis error count metrics. Log collecting and query implementation. Support Alarm to feishu. Add the implementation of ConfigurationDiscovery on the OAP side. Fix bug in parseInternalErrorCode where some error codes are never reached. OAL supports multiple values when as numeric. Add node information from the Openensus proto to the labels of the samples, to support the identification of the source of the Metric data. Fix bug that the same sample name in one MAL expression caused IllegalArgumentException in Analyzer.analyse. Add the text analyzer for querying log in the es storage. Chore: Remove duplicate codes in Envoy ALS handler. Remove the strict rule of OAL disable statement parameter. Fix a legal metric query adoption bug. Don\u0026rsquo;t support global level metric query. Add VM MAL and ui-template configration, support Prometheus node-exporter VM metrics that pushed from OpenTelemetry-collector. Remove unused log query parameters.  UI  Fix un-removed tags in trace query. Fix unexpected metrics name on single value component. Don\u0026rsquo;t allow negative value as the refresh period. Fix style issue in trace table view. Separation Log and Dashboard selector data to avoid conflicts. Fix trace instance selector bug. Fix Unnecessary sidebar in tooltips for charts. Refactor dashboard query in a common script. Implement refreshing data for topology by updating date. Implement group selector in the topology. Fix all as default parameter for services selector. Add icon for Python aiohttp plugin. Add icon for Python pyramid plugin. Fix topology render all services nodes when groups changed. Fix rk-footer utc input\u0026rsquo;s width. Update rk-icon and rewrite rk-header svg tags with rk-icon. Add icon for http type. Fix rk-footer utc without local storage. Sort group names in the topology. Add logo for Dolphinscheduler. Fix dashboard wrong instance. Add a legend for the topology. Update the condition of unhealthy cube. Fix: use icons to replace buttons for task list in profile. Fix: support = in the tag value in the trace query page. Add envoy proxy component logo. Chore: set up license-eye to check license headers and add missing license headers. Fix prop for instances-survey and endpoints-survey. Fix envoy icon in topology. Implement the service logs on UI. Change the flask icon to light version for a better view of topology dark theme. Implement viewing logs on trace page. Fix update props of date component. Fix query conditions for logs. Fix style of selectors to word wrap. Fix logs time. Fix search ui for logs.  Documentation  Update the documents of backend fetcher and self observability about the latest configurations. Add documents about the group name of service. Update docs about the latest UI. Update the document of backend trace sampling with the latest configuration. Update kafka plugin support version to 2.6.1. Add FAQ about Fix compiling on Mac M1 chip.  All issues and pull requests are here\n","title":"8.4.0","url":"/docs/main/v9.3.0/en/changes/changes-8.4.0/"},{"content":"8.4.0 Project  Incompatible with previous releases when use H2/MySQL/TiDB storage options, due to support multiple alarm rules triggered for one entity. Chore: adapt create_source_release.sh to make it runnable on Linux. Add package to .proto files, prevent polluting top-level namespace in some languages; The OAP server supports previous agent releases, whereas the previous OAP server (\u0026lt;=8.3.0) won\u0026rsquo;t recognize newer agents since this version (\u0026gt;= 8.4.0). Add ElasticSearch 7.10 to test matrix and verify it works. Replace Apache RAT with skywalking-eyes to check license headers. Set up test of Envoy ALS / MetricsService under Istio 1.8.2 to verify Envoy V3 protocol Test: fix flaky E2E test of Kafka.  Java Agent  The operation name of quartz-scheduler plugin, has been changed as the quartz-scheduler/${className} format. Fix jdk-http and okhttp-3.x plugin did not overwrite the old trace header. Add interceptors of method(analyze, searchScroll, clearScroll, searchTemplate and deleteByQuery) for elasticsearch-6.x-plugin. Fix the unexpected RunningContext recreation in the Tomcat plugin. Fix the potential NPE when trace_sql_parameters is enabled. Update byte-buddy to 1.10.19. Fix thrift plugin trace link broken when intermediate service does not mount agent Fix thrift plugin collects wrong args when the method without parameter. Fix DataCarrier\u0026rsquo;s org.apache.skywalking.apm.commons.datacarrier.buffer.Buffer implementation isn\u0026rsquo;t activated in IF_POSSIBLE mode. Fix ArrayBlockingQueueBuffer\u0026rsquo;s useless IF_POSSIBLE mode list Support building gRPC TLS channel but CA file is not required. Add witness method mechanism in the agent plugin core. Add Dolphinscheduler plugin definition. Make sampling still works when the trace ignores plug-in activation. Fix mssql-plugin occur ClassCastException when call the method of return generate key. The operation name of dubbo and dubbo-2.7.x-plugin, has been changed as the groupValue/className.methodName format Fix bug that rocketmq-plugin set the wrong tag. Fix duplicated EnhancedInstance interface added. Fix thread leaks caused by the elasticsearch-6.x-plugin plugin. Support reading segmentId and spanId with toolkit. Fix RestTemplate plugin recording url tag with wrong port Support collecting logs and forwarding through gRPC. Support config agent.sample_n_per_3_secs can be changed in the runtime. Support config agent.ignore_suffix can be changed in the runtime. Support DNS periodic resolving mechanism to update backend service. Support config agent.trace.ignore_path can be changed in the runtime. Added support for transmitting logback 1.x and log4j 2.x formatted \u0026amp; un-formatted messages via gPRC  OAP-Backend  Make meter receiver support MAL. Support influxDB connection response format option. Fix some error when use JSON as influxDB response format. Support Kafka MirrorMaker 2.0 to replicate topics between Kafka clusters. Add the rule name field to alarm record storage entity as a part of ID, to support multiple alarm rules triggered for one entity. The scope id has been removed from the ID. Fix MAL concurrent execution issues. Fix group name can\u0026rsquo;t be queried in the GraphQL. Fix potential gRPC connection leak(not closed) for the channels among OAP instances. Filter OAP instances(unassigned in booting stage) of the empty IP in KubernetesCoordinator. Add component ID for Python aiohttp plugin requester and server. Fix H2 in-memory database table missing issues Add component ID for Python pyramid plugin server. Add component ID for NodeJS Axios plugin. Fix searchService method error in storage-influxdb-plugin. Add JavaScript component ID. Fix CVE of UninstrumentedGateways in Dynamic Configuration activation. Improve query performance in storage-influxdb-plugin. Fix the uuid field in GRPCConfigWatcherRegister is not updated. Support Envoy {AccessLog,Metrics}Service API V3. Adopt the MAL in Envoy metrics service analyzer. Fix the priority setting doesn\u0026rsquo;t work of the ALS analyzers. Fix bug that endpoint-name-grouping.yml is not customizable in Dockerized case. Fix bug that istio version metric type on UI template mismatches the otel rule. Improve ReadWriteSafeCache concurrency read-write performance Fix bug that if use JSON as InfluxDB.ResponseFormat then NumberFormatException maybe occur. Fix timeBucket not taking effect in EqualsAndHashCode annotation of some relationship metrics. Fix SharingServerConfig\u0026rsquo;s propertie is not correct in the application.yml, contextPath -\u0026gt; restConnextPath. Istio control plane: remove redundant metrics and polish panel layout. Fix bug endpoint name grouping not work due to setting service name and endpoint name out of order. Fix receiver analysis error count metrics. Log collecting and query implementation. Support Alarm to feishu. Add the implementation of ConfigurationDiscovery on the OAP side. Fix bug in parseInternalErrorCode where some error codes are never reached. OAL supports multiple values when as numeric. Add node information from the Openensus proto to the labels of the samples, to support the identification of the source of the Metric data. Fix bug that the same sample name in one MAL expression caused IllegalArgumentException in Analyzer.analyse. Add the text analyzer for querying log in the es storage. Chore: Remove duplicate codes in Envoy ALS handler. Remove the strict rule of OAL disable statement parameter. Fix a legal metric query adoption bug. Don\u0026rsquo;t support global level metric query. Add VM MAL and ui-template configration, support Prometheus node-exporter VM metrics that pushed from OpenTelemetry-collector. Remove unused log query parameters.  UI  Fix un-removed tags in trace query. Fix unexpected metrics name on single value component. Don\u0026rsquo;t allow negative value as the refresh period. Fix style issue in trace table view. Separation Log and Dashboard selector data to avoid conflicts. Fix trace instance selector bug. Fix Unnecessary sidebar in tooltips for charts. Refactor dashboard query in a common script. Implement refreshing data for topology by updating date. Implement group selector in the topology. Fix all as default parameter for services selector. Add icon for Python aiohttp plugin. Add icon for Python pyramid plugin. Fix topology render all services nodes when groups changed. Fix rk-footer utc input\u0026rsquo;s width. Update rk-icon and rewrite rk-header svg tags with rk-icon. Add icon for http type. Fix rk-footer utc without local storage. Sort group names in the topology. Add logo for Dolphinscheduler. Fix dashboard wrong instance. Add a legend for the topology. Update the condition of unhealthy cube. Fix: use icons to replace buttons for task list in profile. Fix: support = in the tag value in the trace query page. Add envoy proxy component logo. Chore: set up license-eye to check license headers and add missing license headers. Fix prop for instances-survey and endpoints-survey. Fix envoy icon in topology. Implement the service logs on UI. Change the flask icon to light version for a better view of topology dark theme. Implement viewing logs on trace page. Fix update props of date component. Fix query conditions for logs. Fix style of selectors to word wrap. Fix logs time. Fix search ui for logs.  Documentation  Update the documents of backend fetcher and self observability about the latest configurations. Add documents about the group name of service. Update docs about the latest UI. Update the document of backend trace sampling with the latest configuration. Update kafka plugin support version to 2.6.1. Add FAQ about Fix compiling on Mac M1 chip.  All issues and pull requests are here\n","title":"8.4.0","url":"/docs/main/v9.4.0/en/changes/changes-8.4.0/"},{"content":"8.4.0 Project  Incompatible with previous releases when use H2/MySQL/TiDB storage options, due to support multiple alarm rules triggered for one entity. Chore: adapt create_source_release.sh to make it runnable on Linux. Add package to .proto files, prevent polluting top-level namespace in some languages; The OAP server supports previous agent releases, whereas the previous OAP server (\u0026lt;=8.3.0) won\u0026rsquo;t recognize newer agents since this version (\u0026gt;= 8.4.0). Add ElasticSearch 7.10 to test matrix and verify it works. Replace Apache RAT with skywalking-eyes to check license headers. Set up test of Envoy ALS / MetricsService under Istio 1.8.2 to verify Envoy V3 protocol Test: fix flaky E2E test of Kafka.  Java Agent  The operation name of quartz-scheduler plugin, has been changed as the quartz-scheduler/${className} format. Fix jdk-http and okhttp-3.x plugin did not overwrite the old trace header. Add interceptors of method(analyze, searchScroll, clearScroll, searchTemplate and deleteByQuery) for elasticsearch-6.x-plugin. Fix the unexpected RunningContext recreation in the Tomcat plugin. Fix the potential NPE when trace_sql_parameters is enabled. Update byte-buddy to 1.10.19. Fix thrift plugin trace link broken when intermediate service does not mount agent Fix thrift plugin collects wrong args when the method without parameter. Fix DataCarrier\u0026rsquo;s org.apache.skywalking.apm.commons.datacarrier.buffer.Buffer implementation isn\u0026rsquo;t activated in IF_POSSIBLE mode. Fix ArrayBlockingQueueBuffer\u0026rsquo;s useless IF_POSSIBLE mode list Support building gRPC TLS channel but CA file is not required. Add witness method mechanism in the agent plugin core. Add Dolphinscheduler plugin definition. Make sampling still works when the trace ignores plug-in activation. Fix mssql-plugin occur ClassCastException when call the method of return generate key. The operation name of dubbo and dubbo-2.7.x-plugin, has been changed as the groupValue/className.methodName format Fix bug that rocketmq-plugin set the wrong tag. Fix duplicated EnhancedInstance interface added. Fix thread leaks caused by the elasticsearch-6.x-plugin plugin. Support reading segmentId and spanId with toolkit. Fix RestTemplate plugin recording url tag with wrong port Support collecting logs and forwarding through gRPC. Support config agent.sample_n_per_3_secs can be changed in the runtime. Support config agent.ignore_suffix can be changed in the runtime. Support DNS periodic resolving mechanism to update backend service. Support config agent.trace.ignore_path can be changed in the runtime. Added support for transmitting logback 1.x and log4j 2.x formatted \u0026amp; un-formatted messages via gPRC  OAP-Backend  Make meter receiver support MAL. Support influxDB connection response format option. Fix some error when use JSON as influxDB response format. Support Kafka MirrorMaker 2.0 to replicate topics between Kafka clusters. Add the rule name field to alarm record storage entity as a part of ID, to support multiple alarm rules triggered for one entity. The scope id has been removed from the ID. Fix MAL concurrent execution issues. Fix group name can\u0026rsquo;t be queried in the GraphQL. Fix potential gRPC connection leak(not closed) for the channels among OAP instances. Filter OAP instances(unassigned in booting stage) of the empty IP in KubernetesCoordinator. Add component ID for Python aiohttp plugin requester and server. Fix H2 in-memory database table missing issues Add component ID for Python pyramid plugin server. Add component ID for NodeJS Axios plugin. Fix searchService method error in storage-influxdb-plugin. Add JavaScript component ID. Fix CVE of UninstrumentedGateways in Dynamic Configuration activation. Improve query performance in storage-influxdb-plugin. Fix the uuid field in GRPCConfigWatcherRegister is not updated. Support Envoy {AccessLog,Metrics}Service API V3. Adopt the MAL in Envoy metrics service analyzer. Fix the priority setting doesn\u0026rsquo;t work of the ALS analyzers. Fix bug that endpoint-name-grouping.yml is not customizable in Dockerized case. Fix bug that istio version metric type on UI template mismatches the otel rule. Improve ReadWriteSafeCache concurrency read-write performance Fix bug that if use JSON as InfluxDB.ResponseFormat then NumberFormatException maybe occur. Fix timeBucket not taking effect in EqualsAndHashCode annotation of some relationship metrics. Fix SharingServerConfig\u0026rsquo;s propertie is not correct in the application.yml, contextPath -\u0026gt; restConnextPath. Istio control plane: remove redundant metrics and polish panel layout. Fix bug endpoint name grouping not work due to setting service name and endpoint name out of order. Fix receiver analysis error count metrics. Log collecting and query implementation. Support Alarm to feishu. Add the implementation of ConfigurationDiscovery on the OAP side. Fix bug in parseInternalErrorCode where some error codes are never reached. OAL supports multiple values when as numeric. Add node information from the Openensus proto to the labels of the samples, to support the identification of the source of the Metric data. Fix bug that the same sample name in one MAL expression caused IllegalArgumentException in Analyzer.analyse. Add the text analyzer for querying log in the es storage. Chore: Remove duplicate codes in Envoy ALS handler. Remove the strict rule of OAL disable statement parameter. Fix a legal metric query adoption bug. Don\u0026rsquo;t support global level metric query. Add VM MAL and ui-template configration, support Prometheus node-exporter VM metrics that pushed from OpenTelemetry-collector. Remove unused log query parameters.  UI  Fix un-removed tags in trace query. Fix unexpected metrics name on single value component. Don\u0026rsquo;t allow negative value as the refresh period. Fix style issue in trace table view. Separation Log and Dashboard selector data to avoid conflicts. Fix trace instance selector bug. Fix Unnecessary sidebar in tooltips for charts. Refactor dashboard query in a common script. Implement refreshing data for topology by updating date. Implement group selector in the topology. Fix all as default parameter for services selector. Add icon for Python aiohttp plugin. Add icon for Python pyramid plugin. Fix topology render all services nodes when groups changed. Fix rk-footer utc input\u0026rsquo;s width. Update rk-icon and rewrite rk-header svg tags with rk-icon. Add icon for http type. Fix rk-footer utc without local storage. Sort group names in the topology. Add logo for Dolphinscheduler. Fix dashboard wrong instance. Add a legend for the topology. Update the condition of unhealthy cube. Fix: use icons to replace buttons for task list in profile. Fix: support = in the tag value in the trace query page. Add envoy proxy component logo. Chore: set up license-eye to check license headers and add missing license headers. Fix prop for instances-survey and endpoints-survey. Fix envoy icon in topology. Implement the service logs on UI. Change the flask icon to light version for a better view of topology dark theme. Implement viewing logs on trace page. Fix update props of date component. Fix query conditions for logs. Fix style of selectors to word wrap. Fix logs time. Fix search ui for logs.  Documentation  Update the documents of backend fetcher and self observability about the latest configurations. Add documents about the group name of service. Update docs about the latest UI. Update the document of backend trace sampling with the latest configuration. Update kafka plugin support version to 2.6.1. Add FAQ about Fix compiling on Mac M1 chip.  All issues and pull requests are here\n","title":"8.4.0","url":"/docs/main/v9.5.0/en/changes/changes-8.4.0/"},{"content":"8.4.0 Project  Incompatible with previous releases when use H2/MySQL/TiDB storage options, due to support multiple alarm rules triggered for one entity. Chore: adapt create_source_release.sh to make it runnable on Linux. Add package to .proto files, prevent polluting top-level namespace in some languages; The OAP server supports previous agent releases, whereas the previous OAP server (\u0026lt;=8.3.0) won\u0026rsquo;t recognize newer agents since this version (\u0026gt;= 8.4.0). Add ElasticSearch 7.10 to test matrix and verify it works. Replace Apache RAT with skywalking-eyes to check license headers. Set up test of Envoy ALS / MetricsService under Istio 1.8.2 to verify Envoy V3 protocol Test: fix flaky E2E test of Kafka.  Java Agent  The operation name of quartz-scheduler plugin, has been changed as the quartz-scheduler/${className} format. Fix jdk-http and okhttp-3.x plugin did not overwrite the old trace header. Add interceptors of method(analyze, searchScroll, clearScroll, searchTemplate and deleteByQuery) for elasticsearch-6.x-plugin. Fix the unexpected RunningContext recreation in the Tomcat plugin. Fix the potential NPE when trace_sql_parameters is enabled. Update byte-buddy to 1.10.19. Fix thrift plugin trace link broken when intermediate service does not mount agent Fix thrift plugin collects wrong args when the method without parameter. Fix DataCarrier\u0026rsquo;s org.apache.skywalking.apm.commons.datacarrier.buffer.Buffer implementation isn\u0026rsquo;t activated in IF_POSSIBLE mode. Fix ArrayBlockingQueueBuffer\u0026rsquo;s useless IF_POSSIBLE mode list Support building gRPC TLS channel but CA file is not required. Add witness method mechanism in the agent plugin core. Add Dolphinscheduler plugin definition. Make sampling still works when the trace ignores plug-in activation. Fix mssql-plugin occur ClassCastException when call the method of return generate key. The operation name of dubbo and dubbo-2.7.x-plugin, has been changed as the groupValue/className.methodName format Fix bug that rocketmq-plugin set the wrong tag. Fix duplicated EnhancedInstance interface added. Fix thread leaks caused by the elasticsearch-6.x-plugin plugin. Support reading segmentId and spanId with toolkit. Fix RestTemplate plugin recording url tag with wrong port Support collecting logs and forwarding through gRPC. Support config agent.sample_n_per_3_secs can be changed in the runtime. Support config agent.ignore_suffix can be changed in the runtime. Support DNS periodic resolving mechanism to update backend service. Support config agent.trace.ignore_path can be changed in the runtime. Added support for transmitting logback 1.x and log4j 2.x formatted \u0026amp; un-formatted messages via gPRC  OAP-Backend  Make meter receiver support MAL. Support influxDB connection response format option. Fix some error when use JSON as influxDB response format. Support Kafka MirrorMaker 2.0 to replicate topics between Kafka clusters. Add the rule name field to alarm record storage entity as a part of ID, to support multiple alarm rules triggered for one entity. The scope id has been removed from the ID. Fix MAL concurrent execution issues. Fix group name can\u0026rsquo;t be queried in the GraphQL. Fix potential gRPC connection leak(not closed) for the channels among OAP instances. Filter OAP instances(unassigned in booting stage) of the empty IP in KubernetesCoordinator. Add component ID for Python aiohttp plugin requester and server. Fix H2 in-memory database table missing issues Add component ID for Python pyramid plugin server. Add component ID for NodeJS Axios plugin. Fix searchService method error in storage-influxdb-plugin. Add JavaScript component ID. Fix CVE of UninstrumentedGateways in Dynamic Configuration activation. Improve query performance in storage-influxdb-plugin. Fix the uuid field in GRPCConfigWatcherRegister is not updated. Support Envoy {AccessLog,Metrics}Service API V3. Adopt the MAL in Envoy metrics service analyzer. Fix the priority setting doesn\u0026rsquo;t work of the ALS analyzers. Fix bug that endpoint-name-grouping.yml is not customizable in Dockerized case. Fix bug that istio version metric type on UI template mismatches the otel rule. Improve ReadWriteSafeCache concurrency read-write performance Fix bug that if use JSON as InfluxDB.ResponseFormat then NumberFormatException maybe occur. Fix timeBucket not taking effect in EqualsAndHashCode annotation of some relationship metrics. Fix SharingServerConfig\u0026rsquo;s propertie is not correct in the application.yml, contextPath -\u0026gt; restConnextPath. Istio control plane: remove redundant metrics and polish panel layout. Fix bug endpoint name grouping not work due to setting service name and endpoint name out of order. Fix receiver analysis error count metrics. Log collecting and query implementation. Support Alarm to feishu. Add the implementation of ConfigurationDiscovery on the OAP side. Fix bug in parseInternalErrorCode where some error codes are never reached. OAL supports multiple values when as numeric. Add node information from the Openensus proto to the labels of the samples, to support the identification of the source of the Metric data. Fix bug that the same sample name in one MAL expression caused IllegalArgumentException in Analyzer.analyse. Add the text analyzer for querying log in the es storage. Chore: Remove duplicate codes in Envoy ALS handler. Remove the strict rule of OAL disable statement parameter. Fix a legal metric query adoption bug. Don\u0026rsquo;t support global level metric query. Add VM MAL and ui-template configration, support Prometheus node-exporter VM metrics that pushed from OpenTelemetry-collector. Remove unused log query parameters.  UI  Fix un-removed tags in trace query. Fix unexpected metrics name on single value component. Don\u0026rsquo;t allow negative value as the refresh period. Fix style issue in trace table view. Separation Log and Dashboard selector data to avoid conflicts. Fix trace instance selector bug. Fix Unnecessary sidebar in tooltips for charts. Refactor dashboard query in a common script. Implement refreshing data for topology by updating date. Implement group selector in the topology. Fix all as default parameter for services selector. Add icon for Python aiohttp plugin. Add icon for Python pyramid plugin. Fix topology render all services nodes when groups changed. Fix rk-footer utc input\u0026rsquo;s width. Update rk-icon and rewrite rk-header svg tags with rk-icon. Add icon for http type. Fix rk-footer utc without local storage. Sort group names in the topology. Add logo for Dolphinscheduler. Fix dashboard wrong instance. Add a legend for the topology. Update the condition of unhealthy cube. Fix: use icons to replace buttons for task list in profile. Fix: support = in the tag value in the trace query page. Add envoy proxy component logo. Chore: set up license-eye to check license headers and add missing license headers. Fix prop for instances-survey and endpoints-survey. Fix envoy icon in topology. Implement the service logs on UI. Change the flask icon to light version for a better view of topology dark theme. Implement viewing logs on trace page. Fix update props of date component. Fix query conditions for logs. Fix style of selectors to word wrap. Fix logs time. Fix search ui for logs.  Documentation  Update the documents of backend fetcher and self observability about the latest configurations. Add documents about the group name of service. Update docs about the latest UI. Update the document of backend trace sampling with the latest configuration. Update kafka plugin support version to 2.6.1. Add FAQ about Fix compiling on Mac M1 chip.  All issues and pull requests are here\n","title":"8.4.0","url":"/docs/main/v9.6.0/en/changes/changes-8.4.0/"},{"content":"8.4.0 Project  Incompatible with previous releases when use H2/MySQL/TiDB storage options, due to support multiple alarm rules triggered for one entity. Chore: adapt create_source_release.sh to make it runnable on Linux. Add package to .proto files, prevent polluting top-level namespace in some languages; The OAP server supports previous agent releases, whereas the previous OAP server (\u0026lt;=8.3.0) won\u0026rsquo;t recognize newer agents since this version (\u0026gt;= 8.4.0). Add ElasticSearch 7.10 to test matrix and verify it works. Replace Apache RAT with skywalking-eyes to check license headers. Set up test of Envoy ALS / MetricsService under Istio 1.8.2 to verify Envoy V3 protocol Test: fix flaky E2E test of Kafka.  Java Agent  The operation name of quartz-scheduler plugin, has been changed as the quartz-scheduler/${className} format. Fix jdk-http and okhttp-3.x plugin did not overwrite the old trace header. Add interceptors of method(analyze, searchScroll, clearScroll, searchTemplate and deleteByQuery) for elasticsearch-6.x-plugin. Fix the unexpected RunningContext recreation in the Tomcat plugin. Fix the potential NPE when trace_sql_parameters is enabled. Update byte-buddy to 1.10.19. Fix thrift plugin trace link broken when intermediate service does not mount agent Fix thrift plugin collects wrong args when the method without parameter. Fix DataCarrier\u0026rsquo;s org.apache.skywalking.apm.commons.datacarrier.buffer.Buffer implementation isn\u0026rsquo;t activated in IF_POSSIBLE mode. Fix ArrayBlockingQueueBuffer\u0026rsquo;s useless IF_POSSIBLE mode list Support building gRPC TLS channel but CA file is not required. Add witness method mechanism in the agent plugin core. Add Dolphinscheduler plugin definition. Make sampling still works when the trace ignores plug-in activation. Fix mssql-plugin occur ClassCastException when call the method of return generate key. The operation name of dubbo and dubbo-2.7.x-plugin, has been changed as the groupValue/className.methodName format Fix bug that rocketmq-plugin set the wrong tag. Fix duplicated EnhancedInstance interface added. Fix thread leaks caused by the elasticsearch-6.x-plugin plugin. Support reading segmentId and spanId with toolkit. Fix RestTemplate plugin recording url tag with wrong port Support collecting logs and forwarding through gRPC. Support config agent.sample_n_per_3_secs can be changed in the runtime. Support config agent.ignore_suffix can be changed in the runtime. Support DNS periodic resolving mechanism to update backend service. Support config agent.trace.ignore_path can be changed in the runtime. Added support for transmitting logback 1.x and log4j 2.x formatted \u0026amp; un-formatted messages via gPRC  OAP-Backend  Make meter receiver support MAL. Support influxDB connection response format option. Fix some error when use JSON as influxDB response format. Support Kafka MirrorMaker 2.0 to replicate topics between Kafka clusters. Add the rule name field to alarm record storage entity as a part of ID, to support multiple alarm rules triggered for one entity. The scope id has been removed from the ID. Fix MAL concurrent execution issues. Fix group name can\u0026rsquo;t be queried in the GraphQL. Fix potential gRPC connection leak(not closed) for the channels among OAP instances. Filter OAP instances(unassigned in booting stage) of the empty IP in KubernetesCoordinator. Add component ID for Python aiohttp plugin requester and server. Fix H2 in-memory database table missing issues Add component ID for Python pyramid plugin server. Add component ID for NodeJS Axios plugin. Fix searchService method error in storage-influxdb-plugin. Add JavaScript component ID. Fix CVE of UninstrumentedGateways in Dynamic Configuration activation. Improve query performance in storage-influxdb-plugin. Fix the uuid field in GRPCConfigWatcherRegister is not updated. Support Envoy {AccessLog,Metrics}Service API V3. Adopt the MAL in Envoy metrics service analyzer. Fix the priority setting doesn\u0026rsquo;t work of the ALS analyzers. Fix bug that endpoint-name-grouping.yml is not customizable in Dockerized case. Fix bug that istio version metric type on UI template mismatches the otel rule. Improve ReadWriteSafeCache concurrency read-write performance Fix bug that if use JSON as InfluxDB.ResponseFormat then NumberFormatException maybe occur. Fix timeBucket not taking effect in EqualsAndHashCode annotation of some relationship metrics. Fix SharingServerConfig\u0026rsquo;s propertie is not correct in the application.yml, contextPath -\u0026gt; restConnextPath. Istio control plane: remove redundant metrics and polish panel layout. Fix bug endpoint name grouping not work due to setting service name and endpoint name out of order. Fix receiver analysis error count metrics. Log collecting and query implementation. Support Alarm to feishu. Add the implementation of ConfigurationDiscovery on the OAP side. Fix bug in parseInternalErrorCode where some error codes are never reached. OAL supports multiple values when as numeric. Add node information from the Openensus proto to the labels of the samples, to support the identification of the source of the Metric data. Fix bug that the same sample name in one MAL expression caused IllegalArgumentException in Analyzer.analyse. Add the text analyzer for querying log in the es storage. Chore: Remove duplicate codes in Envoy ALS handler. Remove the strict rule of OAL disable statement parameter. Fix a legal metric query adoption bug. Don\u0026rsquo;t support global level metric query. Add VM MAL and ui-template configration, support Prometheus node-exporter VM metrics that pushed from OpenTelemetry-collector. Remove unused log query parameters.  UI  Fix un-removed tags in trace query. Fix unexpected metrics name on single value component. Don\u0026rsquo;t allow negative value as the refresh period. Fix style issue in trace table view. Separation Log and Dashboard selector data to avoid conflicts. Fix trace instance selector bug. Fix Unnecessary sidebar in tooltips for charts. Refactor dashboard query in a common script. Implement refreshing data for topology by updating date. Implement group selector in the topology. Fix all as default parameter for services selector. Add icon for Python aiohttp plugin. Add icon for Python pyramid plugin. Fix topology render all services nodes when groups changed. Fix rk-footer utc input\u0026rsquo;s width. Update rk-icon and rewrite rk-header svg tags with rk-icon. Add icon for http type. Fix rk-footer utc without local storage. Sort group names in the topology. Add logo for Dolphinscheduler. Fix dashboard wrong instance. Add a legend for the topology. Update the condition of unhealthy cube. Fix: use icons to replace buttons for task list in profile. Fix: support = in the tag value in the trace query page. Add envoy proxy component logo. Chore: set up license-eye to check license headers and add missing license headers. Fix prop for instances-survey and endpoints-survey. Fix envoy icon in topology. Implement the service logs on UI. Change the flask icon to light version for a better view of topology dark theme. Implement viewing logs on trace page. Fix update props of date component. Fix query conditions for logs. Fix style of selectors to word wrap. Fix logs time. Fix search ui for logs.  Documentation  Update the documents of backend fetcher and self observability about the latest configurations. Add documents about the group name of service. Update docs about the latest UI. Update the document of backend trace sampling with the latest configuration. Update kafka plugin support version to 2.6.1. Add FAQ about Fix compiling on Mac M1 chip.  All issues and pull requests are here\n","title":"8.4.0","url":"/docs/main/v9.7.0/en/changes/changes-8.4.0/"},{"content":"8.5.0 Project  Incompatible Change. Indices and templates of ElasticSearch(6/7, including zipkin-elasticsearch7) storage option have been changed. Update frontend-maven-plugin to 1.11.0, for Download node x64 binary on Apple Silicon. Add E2E test for VM monitoring that metrics from Prometheus node-exporter. Upgrade lombok to 1.18.16. Add Java agent Dockerfile to build Docker image for Java agent.  Java Agent  Remove invalid mysql configuration in agent.config. Add net.bytebuddy.agent.builder.AgentBuilder.RedefinitionStrategy.Listener to show detail message when redefine errors occur. Fix ClassCastException of log4j gRPC reporter. Fix NPE when Kafka reporter activated. Enhance gRPC log appender to allow layout pattern. Fix apm-dubbo-2.7.x-plugin memory leak due to some Dubbo RpcExceptions. Fix lettuce-5.x-plugin get null host in redis sentinel mode. Fix ClassCastException by making CallbackAdapterInterceptor to implement EnhancedInstance interface in the spring-kafka plugin. Fix NullPointerException with KafkaProducer.send(record). Support config agent.span_limit_per_segment can be changed in the runtime. Collect and report agent starting / shutdown events. Support jedis pipeline in jedis-2.x-plugin. Fix apm-toolkit-log4j-2.x-activation no trace Id in async log. Replace hbase-1.x-plugin with hbase-1.x-2.x-plugin to adapt hbase client 2.x Remove the close_before_method and close_after_method parameters of custom-enhance-plugin to avoid memory leaks. Fix bug that springmvc-annotation-4.x-plugin, witness class does not exist in some versions. Add Redis command parameters to \u0026lsquo;db.statement\u0026rsquo; field on Lettuce span UI for displaying more info. Fix NullPointerException with ReactiveRequestHolder.getHeaders. Fix springmvc reactive api can\u0026rsquo;t collect HTTP statusCode. Fix bug that asynchttpclient plugin does not record the response status code. Fix spanLayer is null in optional plugin(gateway-2.0.x-plugin gateway-2.1.x-plugin). Support @Trace, @Tag and @Tags work for static methods.  OAP-Backend  Allow user-defined JAVA_OPTS in the startup script. Metrics combination API supports abandoning results. Add a new concept \u0026ldquo;Event\u0026rdquo; and its implementations to collect events. Add some defensive codes for NPE and bump up Kubernetes client version to expose exception stack trace. Update the timestamp field type for LogQuery. Support Zabbix protocol to receive agent metrics. Update the Apdex metric combine calculator. Enhance MeterSystem to allow creating metrics with same metricName / function / scope. Storage plugin supports postgresql. Fix kubernetes.client.openapi.ApiException. Remove filename suffix in the meter active file config. Introduce log analysis language (LAL). Fix alarm httpclient connection leak. Add sum function in meter system. Remove Jaeger receiver. Remove the experimental Zipkin span analyzer. Upgrade the Zipkin Elasticsearch storage from 6 to 7. Require Zipkin receiver must work with zipkin-elasticsearch7 storage option. Fix DatabaseSlowStatementBuilder statement maybe null. Remove fields of parent entity in the relation sources. Save Envoy http access logs when error occurs. Fix wrong service_instance_sla setting in the topology-instance.yml. Fix wrong metrics name setting in the self-observability.yml. Add telemetry data about metrics in, metrics scraping, mesh error and trace in metrics to zipkin receiver. Fix tags store of log and trace on h2/mysql/pg storage. Merge indices by Metrics Function and Meter Function in Elasticsearch Storage. Fix receiver don\u0026rsquo;t need to get itself when healthCheck Remove group concept from AvgHistogramFunction. Heatmap(function result) doesn\u0026rsquo;t support labels. Support metrics grouped by scope labelValue in MAL, no need global same labelValue as before. Add functions in MAL to filter metrics according to the metric value. Optimize the self monitoring grafana dashboard. Enhance the export service. Add function retagByK8sMeta and opt type K8sRetagType.Pod2Service in MAL for k8s to relate pods and services. Using \u0026ldquo;service.istio.io/canonical-name\u0026rdquo; to replace \u0026ldquo;app\u0026rdquo; label to resolve Envoy ALS service name. Support k8s monitoring. Make the flushing metrics operation concurrent. Fix ALS K8SServiceRegistry didn\u0026rsquo;t remove the correct entry. Using \u0026ldquo;service.istio.io/canonical-name\u0026rdquo; to replace \u0026ldquo;app\u0026rdquo; label to resolve Envoy ALS service name. Append the root slash(/) to getIndex and getTemplate requests in ES(6 and 7) client. Fix disable statement not working. This bug exists since 8.0.0. Remove the useless metric in vm.yaml.  UI  Update selector scroller to show in all pages. Implement searching logs with date. Add nodejs 14 compiling. Fix trace id by clear search conditions. Search endpoints with keywords. Fix pageSize on logs page. Update echarts version to 5.0.2. Fix instance dependency on the topology page. Fix resolved url for vue-property-decorator. Show instance attributes. Copywriting grammar fix. Fix log pages tags column not updated. Fix the problem that the footer and topology group is shaded when the topology radiation is displayed. When the topology radiation chart is displayed, the corresponding button should be highlighted. Refactor the route mapping, Dynamically import routing components, Improve first page loading performance. Support topology of two mutually calling services. Implement a type of table chart in the dashboard. Support event in the dashboard. Show instance name in the trace view. Fix groups of services in the topography.  Documentation  Polish documentation due to we have covered all tracing, logging, and metrics fields. Adjust documentation about Zipkin receiver. Add backend-infrastructure-monitoring doc.  All issues and pull requests are here\n","title":"8.5.0","url":"/docs/main/latest/en/changes/changes-8.5.0/"},{"content":"8.5.0 Project  Incompatible Change. Indices and templates of ElasticSearch(6/7, including zipkin-elasticsearch7) storage option have been changed. Update frontend-maven-plugin to 1.11.0, for Download node x64 binary on Apple Silicon. Add E2E test for VM monitoring that metrics from Prometheus node-exporter. Upgrade lombok to 1.18.16. Add Java agent Dockerfile to build Docker image for Java agent.  Java Agent  Remove invalid mysql configuration in agent.config. Add net.bytebuddy.agent.builder.AgentBuilder.RedefinitionStrategy.Listener to show detail message when redefine errors occur. Fix ClassCastException of log4j gRPC reporter. Fix NPE when Kafka reporter activated. Enhance gRPC log appender to allow layout pattern. Fix apm-dubbo-2.7.x-plugin memory leak due to some Dubbo RpcExceptions. Fix lettuce-5.x-plugin get null host in redis sentinel mode. Fix ClassCastException by making CallbackAdapterInterceptor to implement EnhancedInstance interface in the spring-kafka plugin. Fix NullPointerException with KafkaProducer.send(record). Support config agent.span_limit_per_segment can be changed in the runtime. Collect and report agent starting / shutdown events. Support jedis pipeline in jedis-2.x-plugin. Fix apm-toolkit-log4j-2.x-activation no trace Id in async log. Replace hbase-1.x-plugin with hbase-1.x-2.x-plugin to adapt hbase client 2.x Remove the close_before_method and close_after_method parameters of custom-enhance-plugin to avoid memory leaks. Fix bug that springmvc-annotation-4.x-plugin, witness class does not exist in some versions. Add Redis command parameters to \u0026lsquo;db.statement\u0026rsquo; field on Lettuce span UI for displaying more info. Fix NullPointerException with ReactiveRequestHolder.getHeaders. Fix springmvc reactive api can\u0026rsquo;t collect HTTP statusCode. Fix bug that asynchttpclient plugin does not record the response status code. Fix spanLayer is null in optional plugin(gateway-2.0.x-plugin gateway-2.1.x-plugin). Support @Trace, @Tag and @Tags work for static methods.  OAP-Backend  Allow user-defined JAVA_OPTS in the startup script. Metrics combination API supports abandoning results. Add a new concept \u0026ldquo;Event\u0026rdquo; and its implementations to collect events. Add some defensive codes for NPE and bump up Kubernetes client version to expose exception stack trace. Update the timestamp field type for LogQuery. Support Zabbix protocol to receive agent metrics. Update the Apdex metric combine calculator. Enhance MeterSystem to allow creating metrics with same metricName / function / scope. Storage plugin supports postgresql. Fix kubernetes.client.openapi.ApiException. Remove filename suffix in the meter active file config. Introduce log analysis language (LAL). Fix alarm httpclient connection leak. Add sum function in meter system. Remove Jaeger receiver. Remove the experimental Zipkin span analyzer. Upgrade the Zipkin Elasticsearch storage from 6 to 7. Require Zipkin receiver must work with zipkin-elasticsearch7 storage option. Fix DatabaseSlowStatementBuilder statement maybe null. Remove fields of parent entity in the relation sources. Save Envoy http access logs when error occurs. Fix wrong service_instance_sla setting in the topology-instance.yml. Fix wrong metrics name setting in the self-observability.yml. Add telemetry data about metrics in, metrics scraping, mesh error and trace in metrics to zipkin receiver. Fix tags store of log and trace on h2/mysql/pg storage. Merge indices by Metrics Function and Meter Function in Elasticsearch Storage. Fix receiver don\u0026rsquo;t need to get itself when healthCheck Remove group concept from AvgHistogramFunction. Heatmap(function result) doesn\u0026rsquo;t support labels. Support metrics grouped by scope labelValue in MAL, no need global same labelValue as before. Add functions in MAL to filter metrics according to the metric value. Optimize the self monitoring grafana dashboard. Enhance the export service. Add function retagByK8sMeta and opt type K8sRetagType.Pod2Service in MAL for k8s to relate pods and services. Using \u0026ldquo;service.istio.io/canonical-name\u0026rdquo; to replace \u0026ldquo;app\u0026rdquo; label to resolve Envoy ALS service name. Support k8s monitoring. Make the flushing metrics operation concurrent. Fix ALS K8SServiceRegistry didn\u0026rsquo;t remove the correct entry. Using \u0026ldquo;service.istio.io/canonical-name\u0026rdquo; to replace \u0026ldquo;app\u0026rdquo; label to resolve Envoy ALS service name. Append the root slash(/) to getIndex and getTemplate requests in ES(6 and 7) client. Fix disable statement not working. This bug exists since 8.0.0. Remove the useless metric in vm.yaml.  UI  Update selector scroller to show in all pages. Implement searching logs with date. Add nodejs 14 compiling. Fix trace id by clear search conditions. Search endpoints with keywords. Fix pageSize on logs page. Update echarts version to 5.0.2. Fix instance dependency on the topology page. Fix resolved url for vue-property-decorator. Show instance attributes. Copywriting grammar fix. Fix log pages tags column not updated. Fix the problem that the footer and topology group is shaded when the topology radiation is displayed. When the topology radiation chart is displayed, the corresponding button should be highlighted. Refactor the route mapping, Dynamically import routing components, Improve first page loading performance. Support topology of two mutually calling services. Implement a type of table chart in the dashboard. Support event in the dashboard. Show instance name in the trace view. Fix groups of services in the topography.  Documentation  Polish documentation due to we have covered all tracing, logging, and metrics fields. Adjust documentation about Zipkin receiver. Add backend-infrastructure-monitoring doc.  All issues and pull requests are here\n","title":"8.5.0","url":"/docs/main/next/en/changes/changes-8.5.0/"},{"content":"8.5.0 Project  Incompatible Change. Indices and templates of ElasticSearch(6/7, including zipkin-elasticsearch7) storage option have been changed. Update frontend-maven-plugin to 1.11.0, for Download node x64 binary on Apple Silicon. Add E2E test for VM monitoring that metrics from Prometheus node-exporter. Upgrade lombok to 1.18.16. Add Java agent Dockerfile to build Docker image for Java agent.  Java Agent  Remove invalid mysql configuration in agent.config. Add net.bytebuddy.agent.builder.AgentBuilder.RedefinitionStrategy.Listener to show detail message when redefine errors occur. Fix ClassCastException of log4j gRPC reporter. Fix NPE when Kafka reporter activated. Enhance gRPC log appender to allow layout pattern. Fix apm-dubbo-2.7.x-plugin memory leak due to some Dubbo RpcExceptions. Fix lettuce-5.x-plugin get null host in redis sentinel mode. Fix ClassCastException by making CallbackAdapterInterceptor to implement EnhancedInstance interface in the spring-kafka plugin. Fix NullPointerException with KafkaProducer.send(record). Support config agent.span_limit_per_segment can be changed in the runtime. Collect and report agent starting / shutdown events. Support jedis pipeline in jedis-2.x-plugin. Fix apm-toolkit-log4j-2.x-activation no trace Id in async log. Replace hbase-1.x-plugin with hbase-1.x-2.x-plugin to adapt hbase client 2.x Remove the close_before_method and close_after_method parameters of custom-enhance-plugin to avoid memory leaks. Fix bug that springmvc-annotation-4.x-plugin, witness class does not exist in some versions. Add Redis command parameters to \u0026lsquo;db.statement\u0026rsquo; field on Lettuce span UI for displaying more info. Fix NullPointerException with ReactiveRequestHolder.getHeaders. Fix springmvc reactive api can\u0026rsquo;t collect HTTP statusCode. Fix bug that asynchttpclient plugin does not record the response status code. Fix spanLayer is null in optional plugin(gateway-2.0.x-plugin gateway-2.1.x-plugin). Support @Trace, @Tag and @Tags work for static methods.  OAP-Backend  Allow user-defined JAVA_OPTS in the startup script. Metrics combination API supports abandoning results. Add a new concept \u0026ldquo;Event\u0026rdquo; and its implementations to collect events. Add some defensive codes for NPE and bump up Kubernetes client version to expose exception stack trace. Update the timestamp field type for LogQuery. Support Zabbix protocol to receive agent metrics. Update the Apdex metric combine calculator. Enhance MeterSystem to allow creating metrics with same metricName / function / scope. Storage plugin supports postgresql. Fix kubernetes.client.openapi.ApiException. Remove filename suffix in the meter active file config. Introduce log analysis language (LAL). Fix alarm httpclient connection leak. Add sum function in meter system. Remove Jaeger receiver. Remove the experimental Zipkin span analyzer. Upgrade the Zipkin Elasticsearch storage from 6 to 7. Require Zipkin receiver must work with zipkin-elasticsearch7 storage option. Fix DatabaseSlowStatementBuilder statement maybe null. Remove fields of parent entity in the relation sources. Save Envoy http access logs when error occurs. Fix wrong service_instance_sla setting in the topology-instance.yml. Fix wrong metrics name setting in the self-observability.yml. Add telemetry data about metrics in, metrics scraping, mesh error and trace in metrics to zipkin receiver. Fix tags store of log and trace on h2/mysql/pg storage. Merge indices by Metrics Function and Meter Function in Elasticsearch Storage. Fix receiver don\u0026rsquo;t need to get itself when healthCheck Remove group concept from AvgHistogramFunction. Heatmap(function result) doesn\u0026rsquo;t support labels. Support metrics grouped by scope labelValue in MAL, no need global same labelValue as before. Add functions in MAL to filter metrics according to the metric value. Optimize the self monitoring grafana dashboard. Enhance the export service. Add function retagByK8sMeta and opt type K8sRetagType.Pod2Service in MAL for k8s to relate pods and services. Using \u0026ldquo;service.istio.io/canonical-name\u0026rdquo; to replace \u0026ldquo;app\u0026rdquo; label to resolve Envoy ALS service name. Support k8s monitoring. Make the flushing metrics operation concurrent. Fix ALS K8SServiceRegistry didn\u0026rsquo;t remove the correct entry. Using \u0026ldquo;service.istio.io/canonical-name\u0026rdquo; to replace \u0026ldquo;app\u0026rdquo; label to resolve Envoy ALS service name. Append the root slash(/) to getIndex and getTemplate requests in ES(6 and 7) client. Fix disable statement not working. This bug exists since 8.0.0. Remove the useless metric in vm.yaml.  UI  Update selector scroller to show in all pages. Implement searching logs with date. Add nodejs 14 compiling. Fix trace id by clear search conditions. Search endpoints with keywords. Fix pageSize on logs page. Update echarts version to 5.0.2. Fix instance dependency on the topology page. Fix resolved url for vue-property-decorator. Show instance attributes. Copywriting grammar fix. Fix log pages tags column not updated. Fix the problem that the footer and topology group is shaded when the topology radiation is displayed. When the topology radiation chart is displayed, the corresponding button should be highlighted. Refactor the route mapping, Dynamically import routing components, Improve first page loading performance. Support topology of two mutually calling services. Implement a type of table chart in the dashboard. Support event in the dashboard. Show instance name in the trace view. Fix groups of services in the topography.  Documentation  Polish documentation due to we have covered all tracing, logging, and metrics fields. Adjust documentation about Zipkin receiver. Add backend-infrastructure-monitoring doc.  All issues and pull requests are here\n","title":"8.5.0","url":"/docs/main/v9.1.0/en/changes/changes-8.5.0/"},{"content":"8.5.0 Project  Incompatible Change. Indices and templates of ElasticSearch(6/7, including zipkin-elasticsearch7) storage option have been changed. Update frontend-maven-plugin to 1.11.0, for Download node x64 binary on Apple Silicon. Add E2E test for VM monitoring that metrics from Prometheus node-exporter. Upgrade lombok to 1.18.16. Add Java agent Dockerfile to build Docker image for Java agent.  Java Agent  Remove invalid mysql configuration in agent.config. Add net.bytebuddy.agent.builder.AgentBuilder.RedefinitionStrategy.Listener to show detail message when redefine errors occur. Fix ClassCastException of log4j gRPC reporter. Fix NPE when Kafka reporter activated. Enhance gRPC log appender to allow layout pattern. Fix apm-dubbo-2.7.x-plugin memory leak due to some Dubbo RpcExceptions. Fix lettuce-5.x-plugin get null host in redis sentinel mode. Fix ClassCastException by making CallbackAdapterInterceptor to implement EnhancedInstance interface in the spring-kafka plugin. Fix NullPointerException with KafkaProducer.send(record). Support config agent.span_limit_per_segment can be changed in the runtime. Collect and report agent starting / shutdown events. Support jedis pipeline in jedis-2.x-plugin. Fix apm-toolkit-log4j-2.x-activation no trace Id in async log. Replace hbase-1.x-plugin with hbase-1.x-2.x-plugin to adapt hbase client 2.x Remove the close_before_method and close_after_method parameters of custom-enhance-plugin to avoid memory leaks. Fix bug that springmvc-annotation-4.x-plugin, witness class does not exist in some versions. Add Redis command parameters to \u0026lsquo;db.statement\u0026rsquo; field on Lettuce span UI for displaying more info. Fix NullPointerException with ReactiveRequestHolder.getHeaders. Fix springmvc reactive api can\u0026rsquo;t collect HTTP statusCode. Fix bug that asynchttpclient plugin does not record the response status code. Fix spanLayer is null in optional plugin(gateway-2.0.x-plugin gateway-2.1.x-plugin). Support @Trace, @Tag and @Tags work for static methods.  OAP-Backend  Allow user-defined JAVA_OPTS in the startup script. Metrics combination API supports abandoning results. Add a new concept \u0026ldquo;Event\u0026rdquo; and its implementations to collect events. Add some defensive codes for NPE and bump up Kubernetes client version to expose exception stack trace. Update the timestamp field type for LogQuery. Support Zabbix protocol to receive agent metrics. Update the Apdex metric combine calculator. Enhance MeterSystem to allow creating metrics with same metricName / function / scope. Storage plugin supports postgresql. Fix kubernetes.client.openapi.ApiException. Remove filename suffix in the meter active file config. Introduce log analysis language (LAL). Fix alarm httpclient connection leak. Add sum function in meter system. Remove Jaeger receiver. Remove the experimental Zipkin span analyzer. Upgrade the Zipkin Elasticsearch storage from 6 to 7. Require Zipkin receiver must work with zipkin-elasticsearch7 storage option. Fix DatabaseSlowStatementBuilder statement maybe null. Remove fields of parent entity in the relation sources. Save Envoy http access logs when error occurs. Fix wrong service_instance_sla setting in the topology-instance.yml. Fix wrong metrics name setting in the self-observability.yml. Add telemetry data about metrics in, metrics scraping, mesh error and trace in metrics to zipkin receiver. Fix tags store of log and trace on h2/mysql/pg storage. Merge indices by Metrics Function and Meter Function in Elasticsearch Storage. Fix receiver don\u0026rsquo;t need to get itself when healthCheck Remove group concept from AvgHistogramFunction. Heatmap(function result) doesn\u0026rsquo;t support labels. Support metrics grouped by scope labelValue in MAL, no need global same labelValue as before. Add functions in MAL to filter metrics according to the metric value. Optimize the self monitoring grafana dashboard. Enhance the export service. Add function retagByK8sMeta and opt type K8sRetagType.Pod2Service in MAL for k8s to relate pods and services. Using \u0026ldquo;service.istio.io/canonical-name\u0026rdquo; to replace \u0026ldquo;app\u0026rdquo; label to resolve Envoy ALS service name. Support k8s monitoring. Make the flushing metrics operation concurrent. Fix ALS K8SServiceRegistry didn\u0026rsquo;t remove the correct entry. Using \u0026ldquo;service.istio.io/canonical-name\u0026rdquo; to replace \u0026ldquo;app\u0026rdquo; label to resolve Envoy ALS service name. Append the root slash(/) to getIndex and getTemplate requests in ES(6 and 7) client. Fix disable statement not working. This bug exists since 8.0.0. Remove the useless metric in vm.yaml.  UI  Update selector scroller to show in all pages. Implement searching logs with date. Add nodejs 14 compiling. Fix trace id by clear search conditions. Search endpoints with keywords. Fix pageSize on logs page. Update echarts version to 5.0.2. Fix instance dependency on the topology page. Fix resolved url for vue-property-decorator. Show instance attributes. Copywriting grammar fix. Fix log pages tags column not updated. Fix the problem that the footer and topology group is shaded when the topology radiation is displayed. When the topology radiation chart is displayed, the corresponding button should be highlighted. Refactor the route mapping, Dynamically import routing components, Improve first page loading performance. Support topology of two mutually calling services. Implement a type of table chart in the dashboard. Support event in the dashboard. Show instance name in the trace view. Fix groups of services in the topography.  Documentation  Polish documentation due to we have covered all tracing, logging, and metrics fields. Adjust documentation about Zipkin receiver. Add backend-infrastructure-monitoring doc.  All issues and pull requests are here\n","title":"8.5.0","url":"/docs/main/v9.2.0/en/changes/changes-8.5.0/"},{"content":"8.5.0 Project  Incompatible Change. Indices and templates of ElasticSearch(6/7, including zipkin-elasticsearch7) storage option have been changed. Update frontend-maven-plugin to 1.11.0, for Download node x64 binary on Apple Silicon. Add E2E test for VM monitoring that metrics from Prometheus node-exporter. Upgrade lombok to 1.18.16. Add Java agent Dockerfile to build Docker image for Java agent.  Java Agent  Remove invalid mysql configuration in agent.config. Add net.bytebuddy.agent.builder.AgentBuilder.RedefinitionStrategy.Listener to show detail message when redefine errors occur. Fix ClassCastException of log4j gRPC reporter. Fix NPE when Kafka reporter activated. Enhance gRPC log appender to allow layout pattern. Fix apm-dubbo-2.7.x-plugin memory leak due to some Dubbo RpcExceptions. Fix lettuce-5.x-plugin get null host in redis sentinel mode. Fix ClassCastException by making CallbackAdapterInterceptor to implement EnhancedInstance interface in the spring-kafka plugin. Fix NullPointerException with KafkaProducer.send(record). Support config agent.span_limit_per_segment can be changed in the runtime. Collect and report agent starting / shutdown events. Support jedis pipeline in jedis-2.x-plugin. Fix apm-toolkit-log4j-2.x-activation no trace Id in async log. Replace hbase-1.x-plugin with hbase-1.x-2.x-plugin to adapt hbase client 2.x Remove the close_before_method and close_after_method parameters of custom-enhance-plugin to avoid memory leaks. Fix bug that springmvc-annotation-4.x-plugin, witness class does not exist in some versions. Add Redis command parameters to \u0026lsquo;db.statement\u0026rsquo; field on Lettuce span UI for displaying more info. Fix NullPointerException with ReactiveRequestHolder.getHeaders. Fix springmvc reactive api can\u0026rsquo;t collect HTTP statusCode. Fix bug that asynchttpclient plugin does not record the response status code. Fix spanLayer is null in optional plugin(gateway-2.0.x-plugin gateway-2.1.x-plugin). Support @Trace, @Tag and @Tags work for static methods.  OAP-Backend  Allow user-defined JAVA_OPTS in the startup script. Metrics combination API supports abandoning results. Add a new concept \u0026ldquo;Event\u0026rdquo; and its implementations to collect events. Add some defensive codes for NPE and bump up Kubernetes client version to expose exception stack trace. Update the timestamp field type for LogQuery. Support Zabbix protocol to receive agent metrics. Update the Apdex metric combine calculator. Enhance MeterSystem to allow creating metrics with same metricName / function / scope. Storage plugin supports postgresql. Fix kubernetes.client.openapi.ApiException. Remove filename suffix in the meter active file config. Introduce log analysis language (LAL). Fix alarm httpclient connection leak. Add sum function in meter system. Remove Jaeger receiver. Remove the experimental Zipkin span analyzer. Upgrade the Zipkin Elasticsearch storage from 6 to 7. Require Zipkin receiver must work with zipkin-elasticsearch7 storage option. Fix DatabaseSlowStatementBuilder statement maybe null. Remove fields of parent entity in the relation sources. Save Envoy http access logs when error occurs. Fix wrong service_instance_sla setting in the topology-instance.yml. Fix wrong metrics name setting in the self-observability.yml. Add telemetry data about metrics in, metrics scraping, mesh error and trace in metrics to zipkin receiver. Fix tags store of log and trace on h2/mysql/pg storage. Merge indices by Metrics Function and Meter Function in Elasticsearch Storage. Fix receiver don\u0026rsquo;t need to get itself when healthCheck Remove group concept from AvgHistogramFunction. Heatmap(function result) doesn\u0026rsquo;t support labels. Support metrics grouped by scope labelValue in MAL, no need global same labelValue as before. Add functions in MAL to filter metrics according to the metric value. Optimize the self monitoring grafana dashboard. Enhance the export service. Add function retagByK8sMeta and opt type K8sRetagType.Pod2Service in MAL for k8s to relate pods and services. Using \u0026ldquo;service.istio.io/canonical-name\u0026rdquo; to replace \u0026ldquo;app\u0026rdquo; label to resolve Envoy ALS service name. Support k8s monitoring. Make the flushing metrics operation concurrent. Fix ALS K8SServiceRegistry didn\u0026rsquo;t remove the correct entry. Using \u0026ldquo;service.istio.io/canonical-name\u0026rdquo; to replace \u0026ldquo;app\u0026rdquo; label to resolve Envoy ALS service name. Append the root slash(/) to getIndex and getTemplate requests in ES(6 and 7) client. Fix disable statement not working. This bug exists since 8.0.0. Remove the useless metric in vm.yaml.  UI  Update selector scroller to show in all pages. Implement searching logs with date. Add nodejs 14 compiling. Fix trace id by clear search conditions. Search endpoints with keywords. Fix pageSize on logs page. Update echarts version to 5.0.2. Fix instance dependency on the topology page. Fix resolved url for vue-property-decorator. Show instance attributes. Copywriting grammar fix. Fix log pages tags column not updated. Fix the problem that the footer and topology group is shaded when the topology radiation is displayed. When the topology radiation chart is displayed, the corresponding button should be highlighted. Refactor the route mapping, Dynamically import routing components, Improve first page loading performance. Support topology of two mutually calling services. Implement a type of table chart in the dashboard. Support event in the dashboard. Show instance name in the trace view. Fix groups of services in the topography.  Documentation  Polish documentation due to we have covered all tracing, logging, and metrics fields. Adjust documentation about Zipkin receiver. Add backend-infrastructure-monitoring doc.  All issues and pull requests are here\n","title":"8.5.0","url":"/docs/main/v9.3.0/en/changes/changes-8.5.0/"},{"content":"8.5.0 Project  Incompatible Change. Indices and templates of ElasticSearch(6/7, including zipkin-elasticsearch7) storage option have been changed. Update frontend-maven-plugin to 1.11.0, for Download node x64 binary on Apple Silicon. Add E2E test for VM monitoring that metrics from Prometheus node-exporter. Upgrade lombok to 1.18.16. Add Java agent Dockerfile to build Docker image for Java agent.  Java Agent  Remove invalid mysql configuration in agent.config. Add net.bytebuddy.agent.builder.AgentBuilder.RedefinitionStrategy.Listener to show detail message when redefine errors occur. Fix ClassCastException of log4j gRPC reporter. Fix NPE when Kafka reporter activated. Enhance gRPC log appender to allow layout pattern. Fix apm-dubbo-2.7.x-plugin memory leak due to some Dubbo RpcExceptions. Fix lettuce-5.x-plugin get null host in redis sentinel mode. Fix ClassCastException by making CallbackAdapterInterceptor to implement EnhancedInstance interface in the spring-kafka plugin. Fix NullPointerException with KafkaProducer.send(record). Support config agent.span_limit_per_segment can be changed in the runtime. Collect and report agent starting / shutdown events. Support jedis pipeline in jedis-2.x-plugin. Fix apm-toolkit-log4j-2.x-activation no trace Id in async log. Replace hbase-1.x-plugin with hbase-1.x-2.x-plugin to adapt hbase client 2.x Remove the close_before_method and close_after_method parameters of custom-enhance-plugin to avoid memory leaks. Fix bug that springmvc-annotation-4.x-plugin, witness class does not exist in some versions. Add Redis command parameters to \u0026lsquo;db.statement\u0026rsquo; field on Lettuce span UI for displaying more info. Fix NullPointerException with ReactiveRequestHolder.getHeaders. Fix springmvc reactive api can\u0026rsquo;t collect HTTP statusCode. Fix bug that asynchttpclient plugin does not record the response status code. Fix spanLayer is null in optional plugin(gateway-2.0.x-plugin gateway-2.1.x-plugin). Support @Trace, @Tag and @Tags work for static methods.  OAP-Backend  Allow user-defined JAVA_OPTS in the startup script. Metrics combination API supports abandoning results. Add a new concept \u0026ldquo;Event\u0026rdquo; and its implementations to collect events. Add some defensive codes for NPE and bump up Kubernetes client version to expose exception stack trace. Update the timestamp field type for LogQuery. Support Zabbix protocol to receive agent metrics. Update the Apdex metric combine calculator. Enhance MeterSystem to allow creating metrics with same metricName / function / scope. Storage plugin supports postgresql. Fix kubernetes.client.openapi.ApiException. Remove filename suffix in the meter active file config. Introduce log analysis language (LAL). Fix alarm httpclient connection leak. Add sum function in meter system. Remove Jaeger receiver. Remove the experimental Zipkin span analyzer. Upgrade the Zipkin Elasticsearch storage from 6 to 7. Require Zipkin receiver must work with zipkin-elasticsearch7 storage option. Fix DatabaseSlowStatementBuilder statement maybe null. Remove fields of parent entity in the relation sources. Save Envoy http access logs when error occurs. Fix wrong service_instance_sla setting in the topology-instance.yml. Fix wrong metrics name setting in the self-observability.yml. Add telemetry data about metrics in, metrics scraping, mesh error and trace in metrics to zipkin receiver. Fix tags store of log and trace on h2/mysql/pg storage. Merge indices by Metrics Function and Meter Function in Elasticsearch Storage. Fix receiver don\u0026rsquo;t need to get itself when healthCheck Remove group concept from AvgHistogramFunction. Heatmap(function result) doesn\u0026rsquo;t support labels. Support metrics grouped by scope labelValue in MAL, no need global same labelValue as before. Add functions in MAL to filter metrics according to the metric value. Optimize the self monitoring grafana dashboard. Enhance the export service. Add function retagByK8sMeta and opt type K8sRetagType.Pod2Service in MAL for k8s to relate pods and services. Using \u0026ldquo;service.istio.io/canonical-name\u0026rdquo; to replace \u0026ldquo;app\u0026rdquo; label to resolve Envoy ALS service name. Support k8s monitoring. Make the flushing metrics operation concurrent. Fix ALS K8SServiceRegistry didn\u0026rsquo;t remove the correct entry. Using \u0026ldquo;service.istio.io/canonical-name\u0026rdquo; to replace \u0026ldquo;app\u0026rdquo; label to resolve Envoy ALS service name. Append the root slash(/) to getIndex and getTemplate requests in ES(6 and 7) client. Fix disable statement not working. This bug exists since 8.0.0. Remove the useless metric in vm.yaml.  UI  Update selector scroller to show in all pages. Implement searching logs with date. Add nodejs 14 compiling. Fix trace id by clear search conditions. Search endpoints with keywords. Fix pageSize on logs page. Update echarts version to 5.0.2. Fix instance dependency on the topology page. Fix resolved url for vue-property-decorator. Show instance attributes. Copywriting grammar fix. Fix log pages tags column not updated. Fix the problem that the footer and topology group is shaded when the topology radiation is displayed. When the topology radiation chart is displayed, the corresponding button should be highlighted. Refactor the route mapping, Dynamically import routing components, Improve first page loading performance. Support topology of two mutually calling services. Implement a type of table chart in the dashboard. Support event in the dashboard. Show instance name in the trace view. Fix groups of services in the topography.  Documentation  Polish documentation due to we have covered all tracing, logging, and metrics fields. Adjust documentation about Zipkin receiver. Add backend-infrastructure-monitoring doc.  All issues and pull requests are here\n","title":"8.5.0","url":"/docs/main/v9.4.0/en/changes/changes-8.5.0/"},{"content":"8.5.0 Project  Incompatible Change. Indices and templates of ElasticSearch(6/7, including zipkin-elasticsearch7) storage option have been changed. Update frontend-maven-plugin to 1.11.0, for Download node x64 binary on Apple Silicon. Add E2E test for VM monitoring that metrics from Prometheus node-exporter. Upgrade lombok to 1.18.16. Add Java agent Dockerfile to build Docker image for Java agent.  Java Agent  Remove invalid mysql configuration in agent.config. Add net.bytebuddy.agent.builder.AgentBuilder.RedefinitionStrategy.Listener to show detail message when redefine errors occur. Fix ClassCastException of log4j gRPC reporter. Fix NPE when Kafka reporter activated. Enhance gRPC log appender to allow layout pattern. Fix apm-dubbo-2.7.x-plugin memory leak due to some Dubbo RpcExceptions. Fix lettuce-5.x-plugin get null host in redis sentinel mode. Fix ClassCastException by making CallbackAdapterInterceptor to implement EnhancedInstance interface in the spring-kafka plugin. Fix NullPointerException with KafkaProducer.send(record). Support config agent.span_limit_per_segment can be changed in the runtime. Collect and report agent starting / shutdown events. Support jedis pipeline in jedis-2.x-plugin. Fix apm-toolkit-log4j-2.x-activation no trace Id in async log. Replace hbase-1.x-plugin with hbase-1.x-2.x-plugin to adapt hbase client 2.x Remove the close_before_method and close_after_method parameters of custom-enhance-plugin to avoid memory leaks. Fix bug that springmvc-annotation-4.x-plugin, witness class does not exist in some versions. Add Redis command parameters to \u0026lsquo;db.statement\u0026rsquo; field on Lettuce span UI for displaying more info. Fix NullPointerException with ReactiveRequestHolder.getHeaders. Fix springmvc reactive api can\u0026rsquo;t collect HTTP statusCode. Fix bug that asynchttpclient plugin does not record the response status code. Fix spanLayer is null in optional plugin(gateway-2.0.x-plugin gateway-2.1.x-plugin). Support @Trace, @Tag and @Tags work for static methods.  OAP-Backend  Allow user-defined JAVA_OPTS in the startup script. Metrics combination API supports abandoning results. Add a new concept \u0026ldquo;Event\u0026rdquo; and its implementations to collect events. Add some defensive codes for NPE and bump up Kubernetes client version to expose exception stack trace. Update the timestamp field type for LogQuery. Support Zabbix protocol to receive agent metrics. Update the Apdex metric combine calculator. Enhance MeterSystem to allow creating metrics with same metricName / function / scope. Storage plugin supports postgresql. Fix kubernetes.client.openapi.ApiException. Remove filename suffix in the meter active file config. Introduce log analysis language (LAL). Fix alarm httpclient connection leak. Add sum function in meter system. Remove Jaeger receiver. Remove the experimental Zipkin span analyzer. Upgrade the Zipkin Elasticsearch storage from 6 to 7. Require Zipkin receiver must work with zipkin-elasticsearch7 storage option. Fix DatabaseSlowStatementBuilder statement maybe null. Remove fields of parent entity in the relation sources. Save Envoy http access logs when error occurs. Fix wrong service_instance_sla setting in the topology-instance.yml. Fix wrong metrics name setting in the self-observability.yml. Add telemetry data about metrics in, metrics scraping, mesh error and trace in metrics to zipkin receiver. Fix tags store of log and trace on h2/mysql/pg storage. Merge indices by Metrics Function and Meter Function in Elasticsearch Storage. Fix receiver don\u0026rsquo;t need to get itself when healthCheck Remove group concept from AvgHistogramFunction. Heatmap(function result) doesn\u0026rsquo;t support labels. Support metrics grouped by scope labelValue in MAL, no need global same labelValue as before. Add functions in MAL to filter metrics according to the metric value. Optimize the self monitoring grafana dashboard. Enhance the export service. Add function retagByK8sMeta and opt type K8sRetagType.Pod2Service in MAL for k8s to relate pods and services. Using \u0026ldquo;service.istio.io/canonical-name\u0026rdquo; to replace \u0026ldquo;app\u0026rdquo; label to resolve Envoy ALS service name. Support k8s monitoring. Make the flushing metrics operation concurrent. Fix ALS K8SServiceRegistry didn\u0026rsquo;t remove the correct entry. Using \u0026ldquo;service.istio.io/canonical-name\u0026rdquo; to replace \u0026ldquo;app\u0026rdquo; label to resolve Envoy ALS service name. Append the root slash(/) to getIndex and getTemplate requests in ES(6 and 7) client. Fix disable statement not working. This bug exists since 8.0.0. Remove the useless metric in vm.yaml.  UI  Update selector scroller to show in all pages. Implement searching logs with date. Add nodejs 14 compiling. Fix trace id by clear search conditions. Search endpoints with keywords. Fix pageSize on logs page. Update echarts version to 5.0.2. Fix instance dependency on the topology page. Fix resolved url for vue-property-decorator. Show instance attributes. Copywriting grammar fix. Fix log pages tags column not updated. Fix the problem that the footer and topology group is shaded when the topology radiation is displayed. When the topology radiation chart is displayed, the corresponding button should be highlighted. Refactor the route mapping, Dynamically import routing components, Improve first page loading performance. Support topology of two mutually calling services. Implement a type of table chart in the dashboard. Support event in the dashboard. Show instance name in the trace view. Fix groups of services in the topography.  Documentation  Polish documentation due to we have covered all tracing, logging, and metrics fields. Adjust documentation about Zipkin receiver. Add backend-infrastructure-monitoring doc.  All issues and pull requests are here\n","title":"8.5.0","url":"/docs/main/v9.5.0/en/changes/changes-8.5.0/"},{"content":"8.5.0 Project  Incompatible Change. Indices and templates of ElasticSearch(6/7, including zipkin-elasticsearch7) storage option have been changed. Update frontend-maven-plugin to 1.11.0, for Download node x64 binary on Apple Silicon. Add E2E test for VM monitoring that metrics from Prometheus node-exporter. Upgrade lombok to 1.18.16. Add Java agent Dockerfile to build Docker image for Java agent.  Java Agent  Remove invalid mysql configuration in agent.config. Add net.bytebuddy.agent.builder.AgentBuilder.RedefinitionStrategy.Listener to show detail message when redefine errors occur. Fix ClassCastException of log4j gRPC reporter. Fix NPE when Kafka reporter activated. Enhance gRPC log appender to allow layout pattern. Fix apm-dubbo-2.7.x-plugin memory leak due to some Dubbo RpcExceptions. Fix lettuce-5.x-plugin get null host in redis sentinel mode. Fix ClassCastException by making CallbackAdapterInterceptor to implement EnhancedInstance interface in the spring-kafka plugin. Fix NullPointerException with KafkaProducer.send(record). Support config agent.span_limit_per_segment can be changed in the runtime. Collect and report agent starting / shutdown events. Support jedis pipeline in jedis-2.x-plugin. Fix apm-toolkit-log4j-2.x-activation no trace Id in async log. Replace hbase-1.x-plugin with hbase-1.x-2.x-plugin to adapt hbase client 2.x Remove the close_before_method and close_after_method parameters of custom-enhance-plugin to avoid memory leaks. Fix bug that springmvc-annotation-4.x-plugin, witness class does not exist in some versions. Add Redis command parameters to \u0026lsquo;db.statement\u0026rsquo; field on Lettuce span UI for displaying more info. Fix NullPointerException with ReactiveRequestHolder.getHeaders. Fix springmvc reactive api can\u0026rsquo;t collect HTTP statusCode. Fix bug that asynchttpclient plugin does not record the response status code. Fix spanLayer is null in optional plugin(gateway-2.0.x-plugin gateway-2.1.x-plugin). Support @Trace, @Tag and @Tags work for static methods.  OAP-Backend  Allow user-defined JAVA_OPTS in the startup script. Metrics combination API supports abandoning results. Add a new concept \u0026ldquo;Event\u0026rdquo; and its implementations to collect events. Add some defensive codes for NPE and bump up Kubernetes client version to expose exception stack trace. Update the timestamp field type for LogQuery. Support Zabbix protocol to receive agent metrics. Update the Apdex metric combine calculator. Enhance MeterSystem to allow creating metrics with same metricName / function / scope. Storage plugin supports postgresql. Fix kubernetes.client.openapi.ApiException. Remove filename suffix in the meter active file config. Introduce log analysis language (LAL). Fix alarm httpclient connection leak. Add sum function in meter system. Remove Jaeger receiver. Remove the experimental Zipkin span analyzer. Upgrade the Zipkin Elasticsearch storage from 6 to 7. Require Zipkin receiver must work with zipkin-elasticsearch7 storage option. Fix DatabaseSlowStatementBuilder statement maybe null. Remove fields of parent entity in the relation sources. Save Envoy http access logs when error occurs. Fix wrong service_instance_sla setting in the topology-instance.yml. Fix wrong metrics name setting in the self-observability.yml. Add telemetry data about metrics in, metrics scraping, mesh error and trace in metrics to zipkin receiver. Fix tags store of log and trace on h2/mysql/pg storage. Merge indices by Metrics Function and Meter Function in Elasticsearch Storage. Fix receiver don\u0026rsquo;t need to get itself when healthCheck Remove group concept from AvgHistogramFunction. Heatmap(function result) doesn\u0026rsquo;t support labels. Support metrics grouped by scope labelValue in MAL, no need global same labelValue as before. Add functions in MAL to filter metrics according to the metric value. Optimize the self monitoring grafana dashboard. Enhance the export service. Add function retagByK8sMeta and opt type K8sRetagType.Pod2Service in MAL for k8s to relate pods and services. Using \u0026ldquo;service.istio.io/canonical-name\u0026rdquo; to replace \u0026ldquo;app\u0026rdquo; label to resolve Envoy ALS service name. Support k8s monitoring. Make the flushing metrics operation concurrent. Fix ALS K8SServiceRegistry didn\u0026rsquo;t remove the correct entry. Using \u0026ldquo;service.istio.io/canonical-name\u0026rdquo; to replace \u0026ldquo;app\u0026rdquo; label to resolve Envoy ALS service name. Append the root slash(/) to getIndex and getTemplate requests in ES(6 and 7) client. Fix disable statement not working. This bug exists since 8.0.0. Remove the useless metric in vm.yaml.  UI  Update selector scroller to show in all pages. Implement searching logs with date. Add nodejs 14 compiling. Fix trace id by clear search conditions. Search endpoints with keywords. Fix pageSize on logs page. Update echarts version to 5.0.2. Fix instance dependency on the topology page. Fix resolved url for vue-property-decorator. Show instance attributes. Copywriting grammar fix. Fix log pages tags column not updated. Fix the problem that the footer and topology group is shaded when the topology radiation is displayed. When the topology radiation chart is displayed, the corresponding button should be highlighted. Refactor the route mapping, Dynamically import routing components, Improve first page loading performance. Support topology of two mutually calling services. Implement a type of table chart in the dashboard. Support event in the dashboard. Show instance name in the trace view. Fix groups of services in the topography.  Documentation  Polish documentation due to we have covered all tracing, logging, and metrics fields. Adjust documentation about Zipkin receiver. Add backend-infrastructure-monitoring doc.  All issues and pull requests are here\n","title":"8.5.0","url":"/docs/main/v9.6.0/en/changes/changes-8.5.0/"},{"content":"8.5.0 Project  Incompatible Change. Indices and templates of ElasticSearch(6/7, including zipkin-elasticsearch7) storage option have been changed. Update frontend-maven-plugin to 1.11.0, for Download node x64 binary on Apple Silicon. Add E2E test for VM monitoring that metrics from Prometheus node-exporter. Upgrade lombok to 1.18.16. Add Java agent Dockerfile to build Docker image for Java agent.  Java Agent  Remove invalid mysql configuration in agent.config. Add net.bytebuddy.agent.builder.AgentBuilder.RedefinitionStrategy.Listener to show detail message when redefine errors occur. Fix ClassCastException of log4j gRPC reporter. Fix NPE when Kafka reporter activated. Enhance gRPC log appender to allow layout pattern. Fix apm-dubbo-2.7.x-plugin memory leak due to some Dubbo RpcExceptions. Fix lettuce-5.x-plugin get null host in redis sentinel mode. Fix ClassCastException by making CallbackAdapterInterceptor to implement EnhancedInstance interface in the spring-kafka plugin. Fix NullPointerException with KafkaProducer.send(record). Support config agent.span_limit_per_segment can be changed in the runtime. Collect and report agent starting / shutdown events. Support jedis pipeline in jedis-2.x-plugin. Fix apm-toolkit-log4j-2.x-activation no trace Id in async log. Replace hbase-1.x-plugin with hbase-1.x-2.x-plugin to adapt hbase client 2.x Remove the close_before_method and close_after_method parameters of custom-enhance-plugin to avoid memory leaks. Fix bug that springmvc-annotation-4.x-plugin, witness class does not exist in some versions. Add Redis command parameters to \u0026lsquo;db.statement\u0026rsquo; field on Lettuce span UI for displaying more info. Fix NullPointerException with ReactiveRequestHolder.getHeaders. Fix springmvc reactive api can\u0026rsquo;t collect HTTP statusCode. Fix bug that asynchttpclient plugin does not record the response status code. Fix spanLayer is null in optional plugin(gateway-2.0.x-plugin gateway-2.1.x-plugin). Support @Trace, @Tag and @Tags work for static methods.  OAP-Backend  Allow user-defined JAVA_OPTS in the startup script. Metrics combination API supports abandoning results. Add a new concept \u0026ldquo;Event\u0026rdquo; and its implementations to collect events. Add some defensive codes for NPE and bump up Kubernetes client version to expose exception stack trace. Update the timestamp field type for LogQuery. Support Zabbix protocol to receive agent metrics. Update the Apdex metric combine calculator. Enhance MeterSystem to allow creating metrics with same metricName / function / scope. Storage plugin supports postgresql. Fix kubernetes.client.openapi.ApiException. Remove filename suffix in the meter active file config. Introduce log analysis language (LAL). Fix alarm httpclient connection leak. Add sum function in meter system. Remove Jaeger receiver. Remove the experimental Zipkin span analyzer. Upgrade the Zipkin Elasticsearch storage from 6 to 7. Require Zipkin receiver must work with zipkin-elasticsearch7 storage option. Fix DatabaseSlowStatementBuilder statement maybe null. Remove fields of parent entity in the relation sources. Save Envoy http access logs when error occurs. Fix wrong service_instance_sla setting in the topology-instance.yml. Fix wrong metrics name setting in the self-observability.yml. Add telemetry data about metrics in, metrics scraping, mesh error and trace in metrics to zipkin receiver. Fix tags store of log and trace on h2/mysql/pg storage. Merge indices by Metrics Function and Meter Function in Elasticsearch Storage. Fix receiver don\u0026rsquo;t need to get itself when healthCheck Remove group concept from AvgHistogramFunction. Heatmap(function result) doesn\u0026rsquo;t support labels. Support metrics grouped by scope labelValue in MAL, no need global same labelValue as before. Add functions in MAL to filter metrics according to the metric value. Optimize the self monitoring grafana dashboard. Enhance the export service. Add function retagByK8sMeta and opt type K8sRetagType.Pod2Service in MAL for k8s to relate pods and services. Using \u0026ldquo;service.istio.io/canonical-name\u0026rdquo; to replace \u0026ldquo;app\u0026rdquo; label to resolve Envoy ALS service name. Support k8s monitoring. Make the flushing metrics operation concurrent. Fix ALS K8SServiceRegistry didn\u0026rsquo;t remove the correct entry. Using \u0026ldquo;service.istio.io/canonical-name\u0026rdquo; to replace \u0026ldquo;app\u0026rdquo; label to resolve Envoy ALS service name. Append the root slash(/) to getIndex and getTemplate requests in ES(6 and 7) client. Fix disable statement not working. This bug exists since 8.0.0. Remove the useless metric in vm.yaml.  UI  Update selector scroller to show in all pages. Implement searching logs with date. Add nodejs 14 compiling. Fix trace id by clear search conditions. Search endpoints with keywords. Fix pageSize on logs page. Update echarts version to 5.0.2. Fix instance dependency on the topology page. Fix resolved url for vue-property-decorator. Show instance attributes. Copywriting grammar fix. Fix log pages tags column not updated. Fix the problem that the footer and topology group is shaded when the topology radiation is displayed. When the topology radiation chart is displayed, the corresponding button should be highlighted. Refactor the route mapping, Dynamically import routing components, Improve first page loading performance. Support topology of two mutually calling services. Implement a type of table chart in the dashboard. Support event in the dashboard. Show instance name in the trace view. Fix groups of services in the topography.  Documentation  Polish documentation due to we have covered all tracing, logging, and metrics fields. Adjust documentation about Zipkin receiver. Add backend-infrastructure-monitoring doc.  All issues and pull requests are here\n","title":"8.5.0","url":"/docs/main/v9.7.0/en/changes/changes-8.5.0/"},{"content":"8.6.0 Project  Add OpenSearch as storage option. Upgrade Kubernetes Java client dependency to 11.0. Fix plugin test script error in macOS.  Java Agent  Add trace_segment_ref_limit_per_span configuration mechanism to avoid OOM. Improve GlobalIdGenerator performance. Add an agent plugin to support elasticsearch7. Add jsonrpc4j agent plugin. new options to support multi skywalking cluster use same kafka cluster(plugin.kafka.namespace) resolve agent has no retries if connect kafka cluster failed when bootstrap Add Seata in the component definition. Seata plugin hosts on Seata project. Extended Kafka plugin to properly trace consumers that have topic partitions directly assigned. Support Kafka consumer 2.8.0. Support print SkyWalking context to logs. Add MessageListener enhancement in pulsar plugin. fix a bug that spring-mvc set an error endpoint name if the controller class annotation implements an interface. Add an optional agent plugin to support mybatis. Add spring-cloud-gateway-3.x optional plugin. Add okhttp-4.x plugin. Fix NPE when thrift field is nested in plugin thrift Fix possible NullPointerException in agent\u0026rsquo;s ES plugin. Fix the conversion problem of float type in ConfigInitializer. Fixed part of the dynamic configuration of ConfigurationDiscoveryService that does not take effect under certain circumstances. Introduce method interceptor API v2 Fix ClassCast issue for RequestHolder/ResponseHolder. fixed jdk-threading-plugin memory leak. Optimize multiple field reflection operation in Feign plugin. Fix trace-ignore-plugin TraceIgnorePathPatterns can\u0026rsquo;t set empty value  OAP-Backend  BugFix: filter invalid Envoy access logs whose socket address is empty. Fix K8s monitoring the incorrect metrics calculate. Loop alarm into event system. Support alarm tags. Support WeLink as a channel of alarm notification. Fix: Some defensive codes didn\u0026rsquo;t work in PercentileFunction combine. CVE: fix Jetty vulnerability. https://nvd.nist.gov/vuln/detail/CVE-2019-17638 Fix: MAL function would miss samples name after creating new samples. perf: use iterator.remove() to remove modulesWithoutProvider Support analyzing Envoy TCP access logs and persist error TCP logs. Fix: Envoy error logs are not persisted when no metrics are generated Fix: Memory leakage of low version etcd client. fix-issue Allow multiple definitions as fallback in metadata-service-mapping.yaml file and k8sServiceNameRule. Fix: NPE when configmap has no data. Fix: Dynamic Configuration key slowTraceSegmentThreshold not work Fix: != is not supported in oal when parameters are numbers. Include events of the entity(s) in the alarm. Support native-json format log in kafka-fetcher-plugin. Fix counter misuse in the alarm core. Alarm can\u0026rsquo;t be triggered in time. Events can be configured as alarm source. Make the number of core worker in meter converter thread pool configurable. Add HTTP implementation of logs reporting protocol. Make metrics exporter still work even when storage layer failed. Fix Jetty HTTP TRACE issue, disable HTTP methods except POST. CVE: upgrade snakeyaml to prevent billion laughs attack in dynamic configuration. polish debug logging avoids null value when the segment ignored.  UI  Add logo for kong plugin. Add apisix logo. Refactor js to ts for browser logs and style change. When creating service groups in the topology, it is better if the service names are sorted. Add tooltip for dashboard component. Fix style of endpoint dependency. Support search and visualize alarms with tags. Fix configurations on dashboard. Support to configure the maximum number of displayed items. After changing the durationTime, the topology shows the originally selected group or service. remove the no use maxItemNum for labeled-value metric, etc. Add Azure Functions logo. Support search Endpoint use keyword params in trace view. Add a function which show the statistics information during the trace query. Remove the sort button at the column of Type in the trace statistics page. Optimize the APISIX icon in the topology. Implement metrics templates in the topology. Visualize Events on the alarm page. Update duration steps in graphs for Trace and Log.  Documentation  Polish k8s monitoring otel-collector configuration example. Print SkyWalking context to logs configuration example. Update doc about metrics v2 APIs.  All issues and pull requests are here\n Find change logs of all versions here.\n","title":"8.6.0","url":"/docs/main/latest/en/changes/changes-8.6.0/"},{"content":"8.6.0 Project  Add OpenSearch as storage option. Upgrade Kubernetes Java client dependency to 11.0. Fix plugin test script error in macOS.  Java Agent  Add trace_segment_ref_limit_per_span configuration mechanism to avoid OOM. Improve GlobalIdGenerator performance. Add an agent plugin to support elasticsearch7. Add jsonrpc4j agent plugin. new options to support multi skywalking cluster use same kafka cluster(plugin.kafka.namespace) resolve agent has no retries if connect kafka cluster failed when bootstrap Add Seata in the component definition. Seata plugin hosts on Seata project. Extended Kafka plugin to properly trace consumers that have topic partitions directly assigned. Support Kafka consumer 2.8.0. Support print SkyWalking context to logs. Add MessageListener enhancement in pulsar plugin. fix a bug that spring-mvc set an error endpoint name if the controller class annotation implements an interface. Add an optional agent plugin to support mybatis. Add spring-cloud-gateway-3.x optional plugin. Add okhttp-4.x plugin. Fix NPE when thrift field is nested in plugin thrift Fix possible NullPointerException in agent\u0026rsquo;s ES plugin. Fix the conversion problem of float type in ConfigInitializer. Fixed part of the dynamic configuration of ConfigurationDiscoveryService that does not take effect under certain circumstances. Introduce method interceptor API v2 Fix ClassCast issue for RequestHolder/ResponseHolder. fixed jdk-threading-plugin memory leak. Optimize multiple field reflection operation in Feign plugin. Fix trace-ignore-plugin TraceIgnorePathPatterns can\u0026rsquo;t set empty value  OAP-Backend  BugFix: filter invalid Envoy access logs whose socket address is empty. Fix K8s monitoring the incorrect metrics calculate. Loop alarm into event system. Support alarm tags. Support WeLink as a channel of alarm notification. Fix: Some defensive codes didn\u0026rsquo;t work in PercentileFunction combine. CVE: fix Jetty vulnerability. https://nvd.nist.gov/vuln/detail/CVE-2019-17638 Fix: MAL function would miss samples name after creating new samples. perf: use iterator.remove() to remove modulesWithoutProvider Support analyzing Envoy TCP access logs and persist error TCP logs. Fix: Envoy error logs are not persisted when no metrics are generated Fix: Memory leakage of low version etcd client. fix-issue Allow multiple definitions as fallback in metadata-service-mapping.yaml file and k8sServiceNameRule. Fix: NPE when configmap has no data. Fix: Dynamic Configuration key slowTraceSegmentThreshold not work Fix: != is not supported in oal when parameters are numbers. Include events of the entity(s) in the alarm. Support native-json format log in kafka-fetcher-plugin. Fix counter misuse in the alarm core. Alarm can\u0026rsquo;t be triggered in time. Events can be configured as alarm source. Make the number of core worker in meter converter thread pool configurable. Add HTTP implementation of logs reporting protocol. Make metrics exporter still work even when storage layer failed. Fix Jetty HTTP TRACE issue, disable HTTP methods except POST. CVE: upgrade snakeyaml to prevent billion laughs attack in dynamic configuration. polish debug logging avoids null value when the segment ignored.  UI  Add logo for kong plugin. Add apisix logo. Refactor js to ts for browser logs and style change. When creating service groups in the topology, it is better if the service names are sorted. Add tooltip for dashboard component. Fix style of endpoint dependency. Support search and visualize alarms with tags. Fix configurations on dashboard. Support to configure the maximum number of displayed items. After changing the durationTime, the topology shows the originally selected group or service. remove the no use maxItemNum for labeled-value metric, etc. Add Azure Functions logo. Support search Endpoint use keyword params in trace view. Add a function which show the statistics information during the trace query. Remove the sort button at the column of Type in the trace statistics page. Optimize the APISIX icon in the topology. Implement metrics templates in the topology. Visualize Events on the alarm page. Update duration steps in graphs for Trace and Log.  Documentation  Polish k8s monitoring otel-collector configuration example. Print SkyWalking context to logs configuration example. Update doc about metrics v2 APIs.  All issues and pull requests are here\n Find change logs of all versions here.\n","title":"8.6.0","url":"/docs/main/next/en/changes/changes-8.6.0/"},{"content":"8.6.0 Project  Add OpenSearch as storage option. Upgrade Kubernetes Java client dependency to 11.0. Fix plugin test script error in macOS.  Java Agent  Add trace_segment_ref_limit_per_span configuration mechanism to avoid OOM. Improve GlobalIdGenerator performance. Add an agent plugin to support elasticsearch7. Add jsonrpc4j agent plugin. new options to support multi skywalking cluster use same kafka cluster(plugin.kafka.namespace) resolve agent has no retries if connect kafka cluster failed when bootstrap Add Seata in the component definition. Seata plugin hosts on Seata project. Extended Kafka plugin to properly trace consumers that have topic partitions directly assigned. Support Kafka consumer 2.8.0. Support print SkyWalking context to logs. Add MessageListener enhancement in pulsar plugin. fix a bug that spring-mvc set an error endpoint name if the controller class annotation implements an interface. Add an optional agent plugin to support mybatis. Add spring-cloud-gateway-3.x optional plugin. Add okhttp-4.x plugin. Fix NPE when thrift field is nested in plugin thrift Fix possible NullPointerException in agent\u0026rsquo;s ES plugin. Fix the conversion problem of float type in ConfigInitializer. Fixed part of the dynamic configuration of ConfigurationDiscoveryService that does not take effect under certain circumstances. Introduce method interceptor API v2 Fix ClassCast issue for RequestHolder/ResponseHolder. fixed jdk-threading-plugin memory leak. Optimize multiple field reflection operation in Feign plugin. Fix trace-ignore-plugin TraceIgnorePathPatterns can\u0026rsquo;t set empty value  OAP-Backend  BugFix: filter invalid Envoy access logs whose socket address is empty. Fix K8s monitoring the incorrect metrics calculate. Loop alarm into event system. Support alarm tags. Support WeLink as a channel of alarm notification. Fix: Some defensive codes didn\u0026rsquo;t work in PercentileFunction combine. CVE: fix Jetty vulnerability. https://nvd.nist.gov/vuln/detail/CVE-2019-17638 Fix: MAL function would miss samples name after creating new samples. perf: use iterator.remove() to remove modulesWithoutProvider Support analyzing Envoy TCP access logs and persist error TCP logs. Fix: Envoy error logs are not persisted when no metrics are generated Fix: Memory leakage of low version etcd client. fix-issue Allow multiple definitions as fallback in metadata-service-mapping.yaml file and k8sServiceNameRule. Fix: NPE when configmap has no data. Fix: Dynamic Configuration key slowTraceSegmentThreshold not work Fix: != is not supported in oal when parameters are numbers. Include events of the entity(s) in the alarm. Support native-json format log in kafka-fetcher-plugin. Fix counter misuse in the alarm core. Alarm can\u0026rsquo;t be triggered in time. Events can be configured as alarm source. Make the number of core worker in meter converter thread pool configurable. Add HTTP implementation of logs reporting protocol. Make metrics exporter still work even when storage layer failed. Fix Jetty HTTP TRACE issue, disable HTTP methods except POST. CVE: upgrade snakeyaml to prevent billion laughs attack in dynamic configuration. polish debug logging avoids null value when the segment ignored.  UI  Add logo for kong plugin. Add apisix logo. Refactor js to ts for browser logs and style change. When creating service groups in the topology, it is better if the service names are sorted. Add tooltip for dashboard component. Fix style of endpoint dependency. Support search and visualize alarms with tags. Fix configurations on dashboard. Support to configure the maximum number of displayed items. After changing the durationTime, the topology shows the originally selected group or service. remove the no use maxItemNum for labeled-value metric, etc. Add Azure Functions logo. Support search Endpoint use keyword params in trace view. Add a function which show the statistics infomation during the trace query. Remove the sort button at the column of Type in the trace statistics page. Optimize the APISIX icon in the topology. Implement metrics templates in the topology. Visualize Events on the alarm page. Update duration steps in graphs for Trace and Log.  Documentation  Polish k8s monitoring otel-collector configuration example. Print SkyWalking context to logs configuration example. Update doc about metrics v2 APIs.  All issues and pull requests are here\n Find change logs of all versions here.\n","title":"8.6.0","url":"/docs/main/v9.1.0/en/changes/changes-8.6.0/"},{"content":"8.6.0 Project  Add OpenSearch as storage option. Upgrade Kubernetes Java client dependency to 11.0. Fix plugin test script error in macOS.  Java Agent  Add trace_segment_ref_limit_per_span configuration mechanism to avoid OOM. Improve GlobalIdGenerator performance. Add an agent plugin to support elasticsearch7. Add jsonrpc4j agent plugin. new options to support multi skywalking cluster use same kafka cluster(plugin.kafka.namespace) resolve agent has no retries if connect kafka cluster failed when bootstrap Add Seata in the component definition. Seata plugin hosts on Seata project. Extended Kafka plugin to properly trace consumers that have topic partitions directly assigned. Support Kafka consumer 2.8.0. Support print SkyWalking context to logs. Add MessageListener enhancement in pulsar plugin. fix a bug that spring-mvc set an error endpoint name if the controller class annotation implements an interface. Add an optional agent plugin to support mybatis. Add spring-cloud-gateway-3.x optional plugin. Add okhttp-4.x plugin. Fix NPE when thrift field is nested in plugin thrift Fix possible NullPointerException in agent\u0026rsquo;s ES plugin. Fix the conversion problem of float type in ConfigInitializer. Fixed part of the dynamic configuration of ConfigurationDiscoveryService that does not take effect under certain circumstances. Introduce method interceptor API v2 Fix ClassCast issue for RequestHolder/ResponseHolder. fixed jdk-threading-plugin memory leak. Optimize multiple field reflection operation in Feign plugin. Fix trace-ignore-plugin TraceIgnorePathPatterns can\u0026rsquo;t set empty value  OAP-Backend  BugFix: filter invalid Envoy access logs whose socket address is empty. Fix K8s monitoring the incorrect metrics calculate. Loop alarm into event system. Support alarm tags. Support WeLink as a channel of alarm notification. Fix: Some defensive codes didn\u0026rsquo;t work in PercentileFunction combine. CVE: fix Jetty vulnerability. https://nvd.nist.gov/vuln/detail/CVE-2019-17638 Fix: MAL function would miss samples name after creating new samples. perf: use iterator.remove() to remove modulesWithoutProvider Support analyzing Envoy TCP access logs and persist error TCP logs. Fix: Envoy error logs are not persisted when no metrics are generated Fix: Memory leakage of low version etcd client. fix-issue Allow multiple definitions as fallback in metadata-service-mapping.yaml file and k8sServiceNameRule. Fix: NPE when configmap has no data. Fix: Dynamic Configuration key slowTraceSegmentThreshold not work Fix: != is not supported in oal when parameters are numbers. Include events of the entity(s) in the alarm. Support native-json format log in kafka-fetcher-plugin. Fix counter misuse in the alarm core. Alarm can\u0026rsquo;t be triggered in time. Events can be configured as alarm source. Make the number of core worker in meter converter thread pool configurable. Add HTTP implementation of logs reporting protocol. Make metrics exporter still work even when storage layer failed. Fix Jetty HTTP TRACE issue, disable HTTP methods except POST. CVE: upgrade snakeyaml to prevent billion laughs attack in dynamic configuration. polish debug logging avoids null value when the segment ignored.  UI  Add logo for kong plugin. Add apisix logo. Refactor js to ts for browser logs and style change. When creating service groups in the topology, it is better if the service names are sorted. Add tooltip for dashboard component. Fix style of endpoint dependency. Support search and visualize alarms with tags. Fix configurations on dashboard. Support to configure the maximum number of displayed items. After changing the durationTime, the topology shows the originally selected group or service. remove the no use maxItemNum for labeled-value metric, etc. Add Azure Functions logo. Support search Endpoint use keyword params in trace view. Add a function which show the statistics information during the trace query. Remove the sort button at the column of Type in the trace statistics page. Optimize the APISIX icon in the topology. Implement metrics templates in the topology. Visualize Events on the alarm page. Update duration steps in graphs for Trace and Log.  Documentation  Polish k8s monitoring otel-collector configuration example. Print SkyWalking context to logs configuration example. Update doc about metrics v2 APIs.  All issues and pull requests are here\n Find change logs of all versions here.\n","title":"8.6.0","url":"/docs/main/v9.2.0/en/changes/changes-8.6.0/"},{"content":"8.6.0 Project  Add OpenSearch as storage option. Upgrade Kubernetes Java client dependency to 11.0. Fix plugin test script error in macOS.  Java Agent  Add trace_segment_ref_limit_per_span configuration mechanism to avoid OOM. Improve GlobalIdGenerator performance. Add an agent plugin to support elasticsearch7. Add jsonrpc4j agent plugin. new options to support multi skywalking cluster use same kafka cluster(plugin.kafka.namespace) resolve agent has no retries if connect kafka cluster failed when bootstrap Add Seata in the component definition. Seata plugin hosts on Seata project. Extended Kafka plugin to properly trace consumers that have topic partitions directly assigned. Support Kafka consumer 2.8.0. Support print SkyWalking context to logs. Add MessageListener enhancement in pulsar plugin. fix a bug that spring-mvc set an error endpoint name if the controller class annotation implements an interface. Add an optional agent plugin to support mybatis. Add spring-cloud-gateway-3.x optional plugin. Add okhttp-4.x plugin. Fix NPE when thrift field is nested in plugin thrift Fix possible NullPointerException in agent\u0026rsquo;s ES plugin. Fix the conversion problem of float type in ConfigInitializer. Fixed part of the dynamic configuration of ConfigurationDiscoveryService that does not take effect under certain circumstances. Introduce method interceptor API v2 Fix ClassCast issue for RequestHolder/ResponseHolder. fixed jdk-threading-plugin memory leak. Optimize multiple field reflection operation in Feign plugin. Fix trace-ignore-plugin TraceIgnorePathPatterns can\u0026rsquo;t set empty value  OAP-Backend  BugFix: filter invalid Envoy access logs whose socket address is empty. Fix K8s monitoring the incorrect metrics calculate. Loop alarm into event system. Support alarm tags. Support WeLink as a channel of alarm notification. Fix: Some defensive codes didn\u0026rsquo;t work in PercentileFunction combine. CVE: fix Jetty vulnerability. https://nvd.nist.gov/vuln/detail/CVE-2019-17638 Fix: MAL function would miss samples name after creating new samples. perf: use iterator.remove() to remove modulesWithoutProvider Support analyzing Envoy TCP access logs and persist error TCP logs. Fix: Envoy error logs are not persisted when no metrics are generated Fix: Memory leakage of low version etcd client. fix-issue Allow multiple definitions as fallback in metadata-service-mapping.yaml file and k8sServiceNameRule. Fix: NPE when configmap has no data. Fix: Dynamic Configuration key slowTraceSegmentThreshold not work Fix: != is not supported in oal when parameters are numbers. Include events of the entity(s) in the alarm. Support native-json format log in kafka-fetcher-plugin. Fix counter misuse in the alarm core. Alarm can\u0026rsquo;t be triggered in time. Events can be configured as alarm source. Make the number of core worker in meter converter thread pool configurable. Add HTTP implementation of logs reporting protocol. Make metrics exporter still work even when storage layer failed. Fix Jetty HTTP TRACE issue, disable HTTP methods except POST. CVE: upgrade snakeyaml to prevent billion laughs attack in dynamic configuration. polish debug logging avoids null value when the segment ignored.  UI  Add logo for kong plugin. Add apisix logo. Refactor js to ts for browser logs and style change. When creating service groups in the topology, it is better if the service names are sorted. Add tooltip for dashboard component. Fix style of endpoint dependency. Support search and visualize alarms with tags. Fix configurations on dashboard. Support to configure the maximum number of displayed items. After changing the durationTime, the topology shows the originally selected group or service. remove the no use maxItemNum for labeled-value metric, etc. Add Azure Functions logo. Support search Endpoint use keyword params in trace view. Add a function which show the statistics information during the trace query. Remove the sort button at the column of Type in the trace statistics page. Optimize the APISIX icon in the topology. Implement metrics templates in the topology. Visualize Events on the alarm page. Update duration steps in graphs for Trace and Log.  Documentation  Polish k8s monitoring otel-collector configuration example. Print SkyWalking context to logs configuration example. Update doc about metrics v2 APIs.  All issues and pull requests are here\n Find change logs of all versions here.\n","title":"8.6.0","url":"/docs/main/v9.3.0/en/changes/changes-8.6.0/"},{"content":"8.6.0 Project  Add OpenSearch as storage option. Upgrade Kubernetes Java client dependency to 11.0. Fix plugin test script error in macOS.  Java Agent  Add trace_segment_ref_limit_per_span configuration mechanism to avoid OOM. Improve GlobalIdGenerator performance. Add an agent plugin to support elasticsearch7. Add jsonrpc4j agent plugin. new options to support multi skywalking cluster use same kafka cluster(plugin.kafka.namespace) resolve agent has no retries if connect kafka cluster failed when bootstrap Add Seata in the component definition. Seata plugin hosts on Seata project. Extended Kafka plugin to properly trace consumers that have topic partitions directly assigned. Support Kafka consumer 2.8.0. Support print SkyWalking context to logs. Add MessageListener enhancement in pulsar plugin. fix a bug that spring-mvc set an error endpoint name if the controller class annotation implements an interface. Add an optional agent plugin to support mybatis. Add spring-cloud-gateway-3.x optional plugin. Add okhttp-4.x plugin. Fix NPE when thrift field is nested in plugin thrift Fix possible NullPointerException in agent\u0026rsquo;s ES plugin. Fix the conversion problem of float type in ConfigInitializer. Fixed part of the dynamic configuration of ConfigurationDiscoveryService that does not take effect under certain circumstances. Introduce method interceptor API v2 Fix ClassCast issue for RequestHolder/ResponseHolder. fixed jdk-threading-plugin memory leak. Optimize multiple field reflection operation in Feign plugin. Fix trace-ignore-plugin TraceIgnorePathPatterns can\u0026rsquo;t set empty value  OAP-Backend  BugFix: filter invalid Envoy access logs whose socket address is empty. Fix K8s monitoring the incorrect metrics calculate. Loop alarm into event system. Support alarm tags. Support WeLink as a channel of alarm notification. Fix: Some defensive codes didn\u0026rsquo;t work in PercentileFunction combine. CVE: fix Jetty vulnerability. https://nvd.nist.gov/vuln/detail/CVE-2019-17638 Fix: MAL function would miss samples name after creating new samples. perf: use iterator.remove() to remove modulesWithoutProvider Support analyzing Envoy TCP access logs and persist error TCP logs. Fix: Envoy error logs are not persisted when no metrics are generated Fix: Memory leakage of low version etcd client. fix-issue Allow multiple definitions as fallback in metadata-service-mapping.yaml file and k8sServiceNameRule. Fix: NPE when configmap has no data. Fix: Dynamic Configuration key slowTraceSegmentThreshold not work Fix: != is not supported in oal when parameters are numbers. Include events of the entity(s) in the alarm. Support native-json format log in kafka-fetcher-plugin. Fix counter misuse in the alarm core. Alarm can\u0026rsquo;t be triggered in time. Events can be configured as alarm source. Make the number of core worker in meter converter thread pool configurable. Add HTTP implementation of logs reporting protocol. Make metrics exporter still work even when storage layer failed. Fix Jetty HTTP TRACE issue, disable HTTP methods except POST. CVE: upgrade snakeyaml to prevent billion laughs attack in dynamic configuration. polish debug logging avoids null value when the segment ignored.  UI  Add logo for kong plugin. Add apisix logo. Refactor js to ts for browser logs and style change. When creating service groups in the topology, it is better if the service names are sorted. Add tooltip for dashboard component. Fix style of endpoint dependency. Support search and visualize alarms with tags. Fix configurations on dashboard. Support to configure the maximum number of displayed items. After changing the durationTime, the topology shows the originally selected group or service. remove the no use maxItemNum for labeled-value metric, etc. Add Azure Functions logo. Support search Endpoint use keyword params in trace view. Add a function which show the statistics information during the trace query. Remove the sort button at the column of Type in the trace statistics page. Optimize the APISIX icon in the topology. Implement metrics templates in the topology. Visualize Events on the alarm page. Update duration steps in graphs for Trace and Log.  Documentation  Polish k8s monitoring otel-collector configuration example. Print SkyWalking context to logs configuration example. Update doc about metrics v2 APIs.  All issues and pull requests are here\n Find change logs of all versions here.\n","title":"8.6.0","url":"/docs/main/v9.4.0/en/changes/changes-8.6.0/"},{"content":"8.6.0 Project  Add OpenSearch as storage option. Upgrade Kubernetes Java client dependency to 11.0. Fix plugin test script error in macOS.  Java Agent  Add trace_segment_ref_limit_per_span configuration mechanism to avoid OOM. Improve GlobalIdGenerator performance. Add an agent plugin to support elasticsearch7. Add jsonrpc4j agent plugin. new options to support multi skywalking cluster use same kafka cluster(plugin.kafka.namespace) resolve agent has no retries if connect kafka cluster failed when bootstrap Add Seata in the component definition. Seata plugin hosts on Seata project. Extended Kafka plugin to properly trace consumers that have topic partitions directly assigned. Support Kafka consumer 2.8.0. Support print SkyWalking context to logs. Add MessageListener enhancement in pulsar plugin. fix a bug that spring-mvc set an error endpoint name if the controller class annotation implements an interface. Add an optional agent plugin to support mybatis. Add spring-cloud-gateway-3.x optional plugin. Add okhttp-4.x plugin. Fix NPE when thrift field is nested in plugin thrift Fix possible NullPointerException in agent\u0026rsquo;s ES plugin. Fix the conversion problem of float type in ConfigInitializer. Fixed part of the dynamic configuration of ConfigurationDiscoveryService that does not take effect under certain circumstances. Introduce method interceptor API v2 Fix ClassCast issue for RequestHolder/ResponseHolder. fixed jdk-threading-plugin memory leak. Optimize multiple field reflection operation in Feign plugin. Fix trace-ignore-plugin TraceIgnorePathPatterns can\u0026rsquo;t set empty value  OAP-Backend  BugFix: filter invalid Envoy access logs whose socket address is empty. Fix K8s monitoring the incorrect metrics calculate. Loop alarm into event system. Support alarm tags. Support WeLink as a channel of alarm notification. Fix: Some defensive codes didn\u0026rsquo;t work in PercentileFunction combine. CVE: fix Jetty vulnerability. https://nvd.nist.gov/vuln/detail/CVE-2019-17638 Fix: MAL function would miss samples name after creating new samples. perf: use iterator.remove() to remove modulesWithoutProvider Support analyzing Envoy TCP access logs and persist error TCP logs. Fix: Envoy error logs are not persisted when no metrics are generated Fix: Memory leakage of low version etcd client. fix-issue Allow multiple definitions as fallback in metadata-service-mapping.yaml file and k8sServiceNameRule. Fix: NPE when configmap has no data. Fix: Dynamic Configuration key slowTraceSegmentThreshold not work Fix: != is not supported in oal when parameters are numbers. Include events of the entity(s) in the alarm. Support native-json format log in kafka-fetcher-plugin. Fix counter misuse in the alarm core. Alarm can\u0026rsquo;t be triggered in time. Events can be configured as alarm source. Make the number of core worker in meter converter thread pool configurable. Add HTTP implementation of logs reporting protocol. Make metrics exporter still work even when storage layer failed. Fix Jetty HTTP TRACE issue, disable HTTP methods except POST. CVE: upgrade snakeyaml to prevent billion laughs attack in dynamic configuration. polish debug logging avoids null value when the segment ignored.  UI  Add logo for kong plugin. Add apisix logo. Refactor js to ts for browser logs and style change. When creating service groups in the topology, it is better if the service names are sorted. Add tooltip for dashboard component. Fix style of endpoint dependency. Support search and visualize alarms with tags. Fix configurations on dashboard. Support to configure the maximum number of displayed items. After changing the durationTime, the topology shows the originally selected group or service. remove the no use maxItemNum for labeled-value metric, etc. Add Azure Functions logo. Support search Endpoint use keyword params in trace view. Add a function which show the statistics information during the trace query. Remove the sort button at the column of Type in the trace statistics page. Optimize the APISIX icon in the topology. Implement metrics templates in the topology. Visualize Events on the alarm page. Update duration steps in graphs for Trace and Log.  Documentation  Polish k8s monitoring otel-collector configuration example. Print SkyWalking context to logs configuration example. Update doc about metrics v2 APIs.  All issues and pull requests are here\n Find change logs of all versions here.\n","title":"8.6.0","url":"/docs/main/v9.5.0/en/changes/changes-8.6.0/"},{"content":"8.6.0 Project  Add OpenSearch as storage option. Upgrade Kubernetes Java client dependency to 11.0. Fix plugin test script error in macOS.  Java Agent  Add trace_segment_ref_limit_per_span configuration mechanism to avoid OOM. Improve GlobalIdGenerator performance. Add an agent plugin to support elasticsearch7. Add jsonrpc4j agent plugin. new options to support multi skywalking cluster use same kafka cluster(plugin.kafka.namespace) resolve agent has no retries if connect kafka cluster failed when bootstrap Add Seata in the component definition. Seata plugin hosts on Seata project. Extended Kafka plugin to properly trace consumers that have topic partitions directly assigned. Support Kafka consumer 2.8.0. Support print SkyWalking context to logs. Add MessageListener enhancement in pulsar plugin. fix a bug that spring-mvc set an error endpoint name if the controller class annotation implements an interface. Add an optional agent plugin to support mybatis. Add spring-cloud-gateway-3.x optional plugin. Add okhttp-4.x plugin. Fix NPE when thrift field is nested in plugin thrift Fix possible NullPointerException in agent\u0026rsquo;s ES plugin. Fix the conversion problem of float type in ConfigInitializer. Fixed part of the dynamic configuration of ConfigurationDiscoveryService that does not take effect under certain circumstances. Introduce method interceptor API v2 Fix ClassCast issue for RequestHolder/ResponseHolder. fixed jdk-threading-plugin memory leak. Optimize multiple field reflection operation in Feign plugin. Fix trace-ignore-plugin TraceIgnorePathPatterns can\u0026rsquo;t set empty value  OAP-Backend  BugFix: filter invalid Envoy access logs whose socket address is empty. Fix K8s monitoring the incorrect metrics calculate. Loop alarm into event system. Support alarm tags. Support WeLink as a channel of alarm notification. Fix: Some defensive codes didn\u0026rsquo;t work in PercentileFunction combine. CVE: fix Jetty vulnerability. https://nvd.nist.gov/vuln/detail/CVE-2019-17638 Fix: MAL function would miss samples name after creating new samples. perf: use iterator.remove() to remove modulesWithoutProvider Support analyzing Envoy TCP access logs and persist error TCP logs. Fix: Envoy error logs are not persisted when no metrics are generated Fix: Memory leakage of low version etcd client. fix-issue Allow multiple definitions as fallback in metadata-service-mapping.yaml file and k8sServiceNameRule. Fix: NPE when configmap has no data. Fix: Dynamic Configuration key slowTraceSegmentThreshold not work Fix: != is not supported in oal when parameters are numbers. Include events of the entity(s) in the alarm. Support native-json format log in kafka-fetcher-plugin. Fix counter misuse in the alarm core. Alarm can\u0026rsquo;t be triggered in time. Events can be configured as alarm source. Make the number of core worker in meter converter thread pool configurable. Add HTTP implementation of logs reporting protocol. Make metrics exporter still work even when storage layer failed. Fix Jetty HTTP TRACE issue, disable HTTP methods except POST. CVE: upgrade snakeyaml to prevent billion laughs attack in dynamic configuration. polish debug logging avoids null value when the segment ignored.  UI  Add logo for kong plugin. Add apisix logo. Refactor js to ts for browser logs and style change. When creating service groups in the topology, it is better if the service names are sorted. Add tooltip for dashboard component. Fix style of endpoint dependency. Support search and visualize alarms with tags. Fix configurations on dashboard. Support to configure the maximum number of displayed items. After changing the durationTime, the topology shows the originally selected group or service. remove the no use maxItemNum for labeled-value metric, etc. Add Azure Functions logo. Support search Endpoint use keyword params in trace view. Add a function which show the statistics information during the trace query. Remove the sort button at the column of Type in the trace statistics page. Optimize the APISIX icon in the topology. Implement metrics templates in the topology. Visualize Events on the alarm page. Update duration steps in graphs for Trace and Log.  Documentation  Polish k8s monitoring otel-collector configuration example. Print SkyWalking context to logs configuration example. Update doc about metrics v2 APIs.  All issues and pull requests are here\n Find change logs of all versions here.\n","title":"8.6.0","url":"/docs/main/v9.6.0/en/changes/changes-8.6.0/"},{"content":"8.6.0 Project  Add OpenSearch as storage option. Upgrade Kubernetes Java client dependency to 11.0. Fix plugin test script error in macOS.  Java Agent  Add trace_segment_ref_limit_per_span configuration mechanism to avoid OOM. Improve GlobalIdGenerator performance. Add an agent plugin to support elasticsearch7. Add jsonrpc4j agent plugin. new options to support multi skywalking cluster use same kafka cluster(plugin.kafka.namespace) resolve agent has no retries if connect kafka cluster failed when bootstrap Add Seata in the component definition. Seata plugin hosts on Seata project. Extended Kafka plugin to properly trace consumers that have topic partitions directly assigned. Support Kafka consumer 2.8.0. Support print SkyWalking context to logs. Add MessageListener enhancement in pulsar plugin. fix a bug that spring-mvc set an error endpoint name if the controller class annotation implements an interface. Add an optional agent plugin to support mybatis. Add spring-cloud-gateway-3.x optional plugin. Add okhttp-4.x plugin. Fix NPE when thrift field is nested in plugin thrift Fix possible NullPointerException in agent\u0026rsquo;s ES plugin. Fix the conversion problem of float type in ConfigInitializer. Fixed part of the dynamic configuration of ConfigurationDiscoveryService that does not take effect under certain circumstances. Introduce method interceptor API v2 Fix ClassCast issue for RequestHolder/ResponseHolder. fixed jdk-threading-plugin memory leak. Optimize multiple field reflection operation in Feign plugin. Fix trace-ignore-plugin TraceIgnorePathPatterns can\u0026rsquo;t set empty value  OAP-Backend  BugFix: filter invalid Envoy access logs whose socket address is empty. Fix K8s monitoring the incorrect metrics calculate. Loop alarm into event system. Support alarm tags. Support WeLink as a channel of alarm notification. Fix: Some defensive codes didn\u0026rsquo;t work in PercentileFunction combine. CVE: fix Jetty vulnerability. https://nvd.nist.gov/vuln/detail/CVE-2019-17638 Fix: MAL function would miss samples name after creating new samples. perf: use iterator.remove() to remove modulesWithoutProvider Support analyzing Envoy TCP access logs and persist error TCP logs. Fix: Envoy error logs are not persisted when no metrics are generated Fix: Memory leakage of low version etcd client. fix-issue Allow multiple definitions as fallback in metadata-service-mapping.yaml file and k8sServiceNameRule. Fix: NPE when configmap has no data. Fix: Dynamic Configuration key slowTraceSegmentThreshold not work Fix: != is not supported in oal when parameters are numbers. Include events of the entity(s) in the alarm. Support native-json format log in kafka-fetcher-plugin. Fix counter misuse in the alarm core. Alarm can\u0026rsquo;t be triggered in time. Events can be configured as alarm source. Make the number of core worker in meter converter thread pool configurable. Add HTTP implementation of logs reporting protocol. Make metrics exporter still work even when storage layer failed. Fix Jetty HTTP TRACE issue, disable HTTP methods except POST. CVE: upgrade snakeyaml to prevent billion laughs attack in dynamic configuration. polish debug logging avoids null value when the segment ignored.  UI  Add logo for kong plugin. Add apisix logo. Refactor js to ts for browser logs and style change. When creating service groups in the topology, it is better if the service names are sorted. Add tooltip for dashboard component. Fix style of endpoint dependency. Support search and visualize alarms with tags. Fix configurations on dashboard. Support to configure the maximum number of displayed items. After changing the durationTime, the topology shows the originally selected group or service. remove the no use maxItemNum for labeled-value metric, etc. Add Azure Functions logo. Support search Endpoint use keyword params in trace view. Add a function which show the statistics information during the trace query. Remove the sort button at the column of Type in the trace statistics page. Optimize the APISIX icon in the topology. Implement metrics templates in the topology. Visualize Events on the alarm page. Update duration steps in graphs for Trace and Log.  Documentation  Polish k8s monitoring otel-collector configuration example. Print SkyWalking context to logs configuration example. Update doc about metrics v2 APIs.  All issues and pull requests are here\n Find change logs of all versions here.\n","title":"8.6.0","url":"/docs/main/v9.7.0/en/changes/changes-8.6.0/"},{"content":"8.7.0 Project  Extract dependency management to a bom. Add JDK 16 to test matrix. DataCarrier consumer add a new event notification, call nothingToConsume method if the queue has no element to consume. Build and push snapshot Docker images to GitHub Container Registry, this is only for people who want to help to test the master branch codes, please don\u0026rsquo;t use in production environments.  Java Agent  Supports modifying span attributes in async mode. Agent supports the collection of JVM arguments and jar dependency information. [Temporary] Support authentication for log report channel. This feature and grpc channel is going to be removed after Satellite 0.2.0 release. Remove deprecated gRPC method, io.grpc.ManagedChannelBuilder#nameResolverFactory. See gRPC-java 7133 for more details. Add Neo4j-4.x plugin. Correct profile.duration to profile.max_duration in the default agent.config file. Fix the response time of gRPC. Support parameter collection for SqlServer. Add ShardingSphere-5.0.0-beta plugin. Fix some method exception error. Fix async finish repeatedly in spring-webflux-5.x-webclient plugin. Add agent plugin to support Sentinel. Move ehcache-2.x plugin as an optional plugin. Support guava-cache plugin. Enhance the compatibility of mysql-8.x-plugin plugin. Support Kafka SASL login module. Fix gateway plugin async finish repeatedly when fallback url configured. Chore: polish methods naming for Spring-Kafka plugins. Remove plugins for ShardingSphere legacy version. Update agent plugin for ElasticJob GA version Remove the logic of generating instance name in KafkaServiceManagementServiceClient class. Improve okhttp plugin performance by optimizing Class.getDeclaredField(). Fix GRPCLogClientAppender no context warning. Fix spring-webflux-5.x-webclient-plugin NPE.  OAP-Backend  Disable Spring sleuth meter analyzer by default. Only count 5xx as error in Envoy ALS receiver. Upgrade apollo core caused by CVE-2020-15170. Upgrade kubernetes client caused by CVE-2020-28052. Upgrade Elasticsearch 7 client caused by CVE-2020-7014. Upgrade jackson related libs caused by CVE-2018-11307, CVE-2018-14718 ~ CVE-2018-14721, CVE-2018-19360 ~ CVE-2018-19362, CVE-2019-14379, CVE-2019-14540, CVE-2019-14892, CVE-2019-14893, CVE-2019-16335, CVE-2019-16942, CVE-2019-16943, CVE-2019-17267, CVE-2019-17531, CVE-2019-20330, CVE-2020-8840, CVE-2020-9546, CVE-2020-9547, CVE-2020-9548, CVE-2018-12022, CVE-2018-12023, CVE-2019-12086, CVE-2019-14439, CVE-2020-10672, CVE-2020-10673, CVE-2020-10968, CVE-2020-10969, CVE-2020-11111, CVE-2020-11112, CVE-2020-11113, CVE-2020-11619, CVE-2020-11620, CVE-2020-14060, CVE-2020-14061, CVE-2020-14062, CVE-2020-14195, CVE-2020-24616, CVE-2020-24750, CVE-2020-25649, CVE-2020-35490, CVE-2020-35491, CVE-2020-35728 and CVE-2020-36179 ~ CVE-2020-36190. Exclude log4j 1.x caused by CVE-2019-17571. Upgrade log4j 2.x caused by CVE-2020-9488. Upgrade nacos libs caused by CVE-2021-29441 and CVE-2021-29442. Upgrade netty caused by CVE-2019-20444, CVE-2019-20445, CVE-2019-16869, CVE-2020-11612, CVE-2021-21290, CVE-2021-21295 and CVE-2021-21409. Upgrade consul client caused by CVE-2018-1000844, CVE-2018-1000850. Upgrade zookeeper caused by CVE-2019-0201, zookeeper cluster coordinator plugin now requires zookeeper server 3.5+. Upgrade snake yaml caused by CVE-2017-18640. Upgrade embed tomcat caused by CVE-2020-13935. Upgrade commons-lang3 to avoid potential NPE in some JDK versions. OAL supports generating metrics from events. Support endpoint name grouping by OpenAPI definitions. Concurrent create PrepareRequest when persist Metrics Fix CounterWindow increase computing issue. Performance: optimize Envoy ALS analyzer performance in high traffic load scenario (reduce ~1cpu in ~10k RPS). Performance: trim useless metadata fields in Envoy ALS metadata to improve performance. Fix: slowDBAccessThreshold dynamic config error when not configured. Performance: cache regex pattern and result, optimize string concatenation in Envy ALS analyzer. Performance: cache metrics id and entity id in Metrics and ISource. Performance: enhance persistent session mechanism, about differentiating cache timeout for different dimensionality metrics. The timeout of the cache for minute and hour level metrics has been prolonged to ~5 min. Performance: Add L1 aggregation flush period, which reduce the CPU load and help young GC. Support connectTimeout and socketTimeout settings for ElasticSearch6 and ElasticSearch7 storages. Re-implement storage session mechanism, cached metrics are removed only according to their last access timestamp, rather than first time. This makes sure hot data never gets removed unexpectedly. Support session expired threshold configurable. Fix InfluxDB storage-plugin Metrics#multiGet issue. Replace zuul proxy with spring cloud gateway 2.x. in webapp module. Upgrade etcd cluster coordinator and dynamic configuration to v3.x. Configuration: Allow configuring server maximum request header size and ES index template order. Add thread state metric and class loaded info metric to JVMMetric. Performance: compile LAL DSL statically and run with type checked. Add pagination to event query protocol. Performance: optimize Envoy error logs persistence performance. Support envoy cluster manager metrics. Performance: remove the synchronous persistence mechanism from batch ElasticSearch DAO. Because the current enhanced persistent session mechanism, don\u0026rsquo;t require the data queryable immediately after the insert and update anymore. Performance: share flushInterval setting for both metrics and record data, due to synchronous persistence mechanism removed. Record flush interval used to be hardcoded as 10s. Remove syncBulkActions in ElasticSearch storage option. Increase the default bulkActions(env, SW_STORAGE_ES_BULK_ACTIONS) to 5000(from 1000). Increase the flush interval of ElasticSearch indices to 15s(from 10s) Provide distinct for elements of metadata lists. Due to the more aggressive asynchronous flush, metadata lists have more chances including duplicate elements. Don\u0026rsquo;t need this as indicate anymore. Reduce the flush period of hour and day level metrics, only run in 4 times of regular persistent period. This means default flush period of hour and day level metrics are 25s * 4. Performance: optimize IDs read of ElasticSearch storage options(6 and 7). Use the physical index rather than template alias name. Adjust index refresh period as INT(flushInterval * 2/3), it used to be as same as bulk flush period. At the edge case, in low traffic(traffic \u0026lt; bulkActions in the whole period), there is a possible case, 2 period bulks are included in one index refresh rebuild operation, which could cause version conflicts. And this case can\u0026rsquo;t be fixed through core/persistentPeriod as the bulk fresh is not controlled by the persistent timer anymore. The core/maxSyncOperationNum setting(added in 8.5.0) is removed due to metrics persistence is fully asynchronous. The core/syncThreads setting(added in 8.5.0) is removed due to metrics persistence is fully asynchronous. Optimization: Concurrency mode of execution stage for metrics is removed(added in 8.5.0). Only concurrency of prepare stage is meaningful and kept. Fix -meters metrics topic isn\u0026rsquo;t created with namespace issue Enhance persistent session timeout mechanism. Because the enhanced session could cache the metadata metrics forever, new timeout mechanism is designed for avoiding this specific case. Fix Kafka transport topics are created duplicated with and without namespace issue Fix the persistent session timeout mechanism bug. Fix possible version_conflict_engine_exception in bulk execution. Fix PrometheusMetricConverter may throw an IllegalArgumentException when convert metrics to SampleFamily Filtering NaN value samples when build SampleFamily Add Thread and ClassLoader Metrics for the self-observability and otel-oc-rules Simple optimization of trace sql query statement. Avoid \u0026ldquo;select *\u0026rdquo; query method Introduce dynamical logging to update log configuration at runtime Fix Kubernetes ConfigMap configuration center doesn\u0026rsquo;t send delete event Breaking Change: emove qps and add rpm in LAL  UI  Fix the date component for log conditions. Fix selector keys for duplicate options. Add Python celery plugin. Fix default config for metrics. Fix trace table for profile ui. Fix the error of server response time in the topology. Fix chart types for setting metrics configure. Fix logs pages number. Implement a timeline for Events in a new page. Fix style for event details.  Documentation  Add FAQ about Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Add Self Observability service discovery (k8s). Add sending Envoy Metrics to OAP in envoy 1.19 example and bump up to Envoy V3 api.  All issues and pull requests are here\n","title":"8.7.0","url":"/docs/main/latest/en/changes/changes-8.7.0/"},{"content":"8.7.0 Project  Extract dependency management to a bom. Add JDK 16 to test matrix. DataCarrier consumer add a new event notification, call nothingToConsume method if the queue has no element to consume. Build and push snapshot Docker images to GitHub Container Registry, this is only for people who want to help to test the master branch codes, please don\u0026rsquo;t use in production environments.  Java Agent  Supports modifying span attributes in async mode. Agent supports the collection of JVM arguments and jar dependency information. [Temporary] Support authentication for log report channel. This feature and grpc channel is going to be removed after Satellite 0.2.0 release. Remove deprecated gRPC method, io.grpc.ManagedChannelBuilder#nameResolverFactory. See gRPC-java 7133 for more details. Add Neo4j-4.x plugin. Correct profile.duration to profile.max_duration in the default agent.config file. Fix the response time of gRPC. Support parameter collection for SqlServer. Add ShardingSphere-5.0.0-beta plugin. Fix some method exception error. Fix async finish repeatedly in spring-webflux-5.x-webclient plugin. Add agent plugin to support Sentinel. Move ehcache-2.x plugin as an optional plugin. Support guava-cache plugin. Enhance the compatibility of mysql-8.x-plugin plugin. Support Kafka SASL login module. Fix gateway plugin async finish repeatedly when fallback url configured. Chore: polish methods naming for Spring-Kafka plugins. Remove plugins for ShardingSphere legacy version. Update agent plugin for ElasticJob GA version Remove the logic of generating instance name in KafkaServiceManagementServiceClient class. Improve okhttp plugin performance by optimizing Class.getDeclaredField(). Fix GRPCLogClientAppender no context warning. Fix spring-webflux-5.x-webclient-plugin NPE.  OAP-Backend  Disable Spring sleuth meter analyzer by default. Only count 5xx as error in Envoy ALS receiver. Upgrade apollo core caused by CVE-2020-15170. Upgrade kubernetes client caused by CVE-2020-28052. Upgrade Elasticsearch 7 client caused by CVE-2020-7014. Upgrade jackson related libs caused by CVE-2018-11307, CVE-2018-14718 ~ CVE-2018-14721, CVE-2018-19360 ~ CVE-2018-19362, CVE-2019-14379, CVE-2019-14540, CVE-2019-14892, CVE-2019-14893, CVE-2019-16335, CVE-2019-16942, CVE-2019-16943, CVE-2019-17267, CVE-2019-17531, CVE-2019-20330, CVE-2020-8840, CVE-2020-9546, CVE-2020-9547, CVE-2020-9548, CVE-2018-12022, CVE-2018-12023, CVE-2019-12086, CVE-2019-14439, CVE-2020-10672, CVE-2020-10673, CVE-2020-10968, CVE-2020-10969, CVE-2020-11111, CVE-2020-11112, CVE-2020-11113, CVE-2020-11619, CVE-2020-11620, CVE-2020-14060, CVE-2020-14061, CVE-2020-14062, CVE-2020-14195, CVE-2020-24616, CVE-2020-24750, CVE-2020-25649, CVE-2020-35490, CVE-2020-35491, CVE-2020-35728 and CVE-2020-36179 ~ CVE-2020-36190. Exclude log4j 1.x caused by CVE-2019-17571. Upgrade log4j 2.x caused by CVE-2020-9488. Upgrade nacos libs caused by CVE-2021-29441 and CVE-2021-29442. Upgrade netty caused by CVE-2019-20444, CVE-2019-20445, CVE-2019-16869, CVE-2020-11612, CVE-2021-21290, CVE-2021-21295 and CVE-2021-21409. Upgrade consul client caused by CVE-2018-1000844, CVE-2018-1000850. Upgrade zookeeper caused by CVE-2019-0201, zookeeper cluster coordinator plugin now requires zookeeper server 3.5+. Upgrade snake yaml caused by CVE-2017-18640. Upgrade embed tomcat caused by CVE-2020-13935. Upgrade commons-lang3 to avoid potential NPE in some JDK versions. OAL supports generating metrics from events. Support endpoint name grouping by OpenAPI definitions. Concurrent create PrepareRequest when persist Metrics Fix CounterWindow increase computing issue. Performance: optimize Envoy ALS analyzer performance in high traffic load scenario (reduce ~1cpu in ~10k RPS). Performance: trim useless metadata fields in Envoy ALS metadata to improve performance. Fix: slowDBAccessThreshold dynamic config error when not configured. Performance: cache regex pattern and result, optimize string concatenation in Envy ALS analyzer. Performance: cache metrics id and entity id in Metrics and ISource. Performance: enhance persistent session mechanism, about differentiating cache timeout for different dimensionality metrics. The timeout of the cache for minute and hour level metrics has been prolonged to ~5 min. Performance: Add L1 aggregation flush period, which reduce the CPU load and help young GC. Support connectTimeout and socketTimeout settings for ElasticSearch6 and ElasticSearch7 storages. Re-implement storage session mechanism, cached metrics are removed only according to their last access timestamp, rather than first time. This makes sure hot data never gets removed unexpectedly. Support session expired threshold configurable. Fix InfluxDB storage-plugin Metrics#multiGet issue. Replace zuul proxy with spring cloud gateway 2.x. in webapp module. Upgrade etcd cluster coordinator and dynamic configuration to v3.x. Configuration: Allow configuring server maximum request header size and ES index template order. Add thread state metric and class loaded info metric to JVMMetric. Performance: compile LAL DSL statically and run with type checked. Add pagination to event query protocol. Performance: optimize Envoy error logs persistence performance. Support envoy cluster manager metrics. Performance: remove the synchronous persistence mechanism from batch ElasticSearch DAO. Because the current enhanced persistent session mechanism, don\u0026rsquo;t require the data queryable immediately after the insert and update anymore. Performance: share flushInterval setting for both metrics and record data, due to synchronous persistence mechanism removed. Record flush interval used to be hardcoded as 10s. Remove syncBulkActions in ElasticSearch storage option. Increase the default bulkActions(env, SW_STORAGE_ES_BULK_ACTIONS) to 5000(from 1000). Increase the flush interval of ElasticSearch indices to 15s(from 10s) Provide distinct for elements of metadata lists. Due to the more aggressive asynchronous flush, metadata lists have more chances including duplicate elements. Don\u0026rsquo;t need this as indicate anymore. Reduce the flush period of hour and day level metrics, only run in 4 times of regular persistent period. This means default flush period of hour and day level metrics are 25s * 4. Performance: optimize IDs read of ElasticSearch storage options(6 and 7). Use the physical index rather than template alias name. Adjust index refresh period as INT(flushInterval * 2/3), it used to be as same as bulk flush period. At the edge case, in low traffic(traffic \u0026lt; bulkActions in the whole period), there is a possible case, 2 period bulks are included in one index refresh rebuild operation, which could cause version conflicts. And this case can\u0026rsquo;t be fixed through core/persistentPeriod as the bulk fresh is not controlled by the persistent timer anymore. The core/maxSyncOperationNum setting(added in 8.5.0) is removed due to metrics persistence is fully asynchronous. The core/syncThreads setting(added in 8.5.0) is removed due to metrics persistence is fully asynchronous. Optimization: Concurrency mode of execution stage for metrics is removed(added in 8.5.0). Only concurrency of prepare stage is meaningful and kept. Fix -meters metrics topic isn\u0026rsquo;t created with namespace issue Enhance persistent session timeout mechanism. Because the enhanced session could cache the metadata metrics forever, new timeout mechanism is designed for avoiding this specific case. Fix Kafka transport topics are created duplicated with and without namespace issue Fix the persistent session timeout mechanism bug. Fix possible version_conflict_engine_exception in bulk execution. Fix PrometheusMetricConverter may throw an IllegalArgumentException when convert metrics to SampleFamily Filtering NaN value samples when build SampleFamily Add Thread and ClassLoader Metrics for the self-observability and otel-oc-rules Simple optimization of trace sql query statement. Avoid \u0026ldquo;select *\u0026rdquo; query method Introduce dynamical logging to update log configuration at runtime Fix Kubernetes ConfigMap configuration center doesn\u0026rsquo;t send delete event Breaking Change: emove qps and add rpm in LAL  UI  Fix the date component for log conditions. Fix selector keys for duplicate options. Add Python celery plugin. Fix default config for metrics. Fix trace table for profile ui. Fix the error of server response time in the topology. Fix chart types for setting metrics configure. Fix logs pages number. Implement a timeline for Events in a new page. Fix style for event details.  Documentation  Add FAQ about Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Add Self Observability service discovery (k8s). Add sending Envoy Metrics to OAP in envoy 1.19 example and bump up to Envoy V3 api.  All issues and pull requests are here\n","title":"8.7.0","url":"/docs/main/next/en/changes/changes-8.7.0/"},{"content":"8.7.0 Project  Extract dependency management to a bom. Add JDK 16 to test matrix. DataCarrier consumer add a new event notification, call nothingToConsume method if the queue has no element to consume. Build and push snapshot Docker images to GitHub Container Registry, this is only for people who want to help to test the master branch codes, please don\u0026rsquo;t use in production environments.  Java Agent  Supports modifying span attributes in async mode. Agent supports the collection of JVM arguments and jar dependency information. [Temporary] Support authentication for log report channel. This feature and grpc channel is going to be removed after Satellite 0.2.0 release. Remove deprecated gRPC method, io.grpc.ManagedChannelBuilder#nameResolverFactory. See gRPC-java 7133 for more details. Add Neo4j-4.x plugin. Correct profile.duration to profile.max_duration in the default agent.config file. Fix the response time of gRPC. Support parameter collection for SqlServer. Add ShardingSphere-5.0.0-beta plugin. Fix some method exception error. Fix async finish repeatedly in spring-webflux-5.x-webclient plugin. Add agent plugin to support Sentinel. Move ehcache-2.x plugin as an optional plugin. Support guava-cache plugin. Enhance the compatibility of mysql-8.x-plugin plugin. Support Kafka SASL login module. Fix gateway plugin async finish repeatedly when fallback url configured. Chore: polish methods naming for Spring-Kafka plugins. Remove plugins for ShardingSphere legacy version. Update agent plugin for ElasticJob GA version Remove the logic of generating instance name in KafkaServiceManagementServiceClient class. Improve okhttp plugin performance by optimizing Class.getDeclaredField(). Fix GRPCLogClientAppender no context warning. Fix spring-webflux-5.x-webclient-plugin NPE.  OAP-Backend  Disable Spring sleuth meter analyzer by default. Only count 5xx as error in Envoy ALS receiver. Upgrade apollo core caused by CVE-2020-15170. Upgrade kubernetes client caused by CVE-2020-28052. Upgrade Elasticsearch 7 client caused by CVE-2020-7014. Upgrade jackson related libs caused by CVE-2018-11307, CVE-2018-14718 ~ CVE-2018-14721, CVE-2018-19360 ~ CVE-2018-19362, CVE-2019-14379, CVE-2019-14540, CVE-2019-14892, CVE-2019-14893, CVE-2019-16335, CVE-2019-16942, CVE-2019-16943, CVE-2019-17267, CVE-2019-17531, CVE-2019-20330, CVE-2020-8840, CVE-2020-9546, CVE-2020-9547, CVE-2020-9548, CVE-2018-12022, CVE-2018-12023, CVE-2019-12086, CVE-2019-14439, CVE-2020-10672, CVE-2020-10673, CVE-2020-10968, CVE-2020-10969, CVE-2020-11111, CVE-2020-11112, CVE-2020-11113, CVE-2020-11619, CVE-2020-11620, CVE-2020-14060, CVE-2020-14061, CVE-2020-14062, CVE-2020-14195, CVE-2020-24616, CVE-2020-24750, CVE-2020-25649, CVE-2020-35490, CVE-2020-35491, CVE-2020-35728 and CVE-2020-36179 ~ CVE-2020-36190. Exclude log4j 1.x caused by CVE-2019-17571. Upgrade log4j 2.x caused by CVE-2020-9488. Upgrade nacos libs caused by CVE-2021-29441 and CVE-2021-29442. Upgrade netty caused by CVE-2019-20444, CVE-2019-20445, CVE-2019-16869, CVE-2020-11612, CVE-2021-21290, CVE-2021-21295 and CVE-2021-21409. Upgrade consul client caused by CVE-2018-1000844, CVE-2018-1000850. Upgrade zookeeper caused by CVE-2019-0201, zookeeper cluster coordinator plugin now requires zookeeper server 3.5+. Upgrade snake yaml caused by CVE-2017-18640. Upgrade embed tomcat caused by CVE-2020-13935. Upgrade commons-lang3 to avoid potential NPE in some JDK versions. OAL supports generating metrics from events. Support endpoint name grouping by OpenAPI definitions. Concurrent create PrepareRequest when persist Metrics Fix CounterWindow increase computing issue. Performance: optimize Envoy ALS analyzer performance in high traffic load scenario (reduce ~1cpu in ~10k RPS). Performance: trim useless metadata fields in Envoy ALS metadata to improve performance. Fix: slowDBAccessThreshold dynamic config error when not configured. Performance: cache regex pattern and result, optimize string concatenation in Envy ALS analyzer. Performance: cache metrics id and entity id in Metrics and ISource. Performance: enhance persistent session mechanism, about differentiating cache timeout for different dimensionality metrics. The timeout of the cache for minute and hour level metrics has been prolonged to ~5 min. Performance: Add L1 aggregation flush period, which reduce the CPU load and help young GC. Support connectTimeout and socketTimeout settings for ElasticSearch6 and ElasticSearch7 storages. Re-implement storage session mechanism, cached metrics are removed only according to their last access timestamp, rather than first time. This makes sure hot data never gets removed unexpectedly. Support session expired threshold configurable. Fix InfluxDB storage-plugin Metrics#multiGet issue. Replace zuul proxy with spring cloud gateway 2.x. in webapp module. Upgrade etcd cluster coordinator and dynamic configuration to v3.x. Configuration: Allow configuring server maximum request header size and ES index template order. Add thread state metric and class loaded info metric to JVMMetric. Performance: compile LAL DSL statically and run with type checked. Add pagination to event query protocol. Performance: optimize Envoy error logs persistence performance. Support envoy cluster manager metrics. Performance: remove the synchronous persistence mechanism from batch ElasticSearch DAO. Because the current enhanced persistent session mechanism, don\u0026rsquo;t require the data queryable immediately after the insert and update anymore. Performance: share flushInterval setting for both metrics and record data, due to synchronous persistence mechanism removed. Record flush interval used to be hardcoded as 10s. Remove syncBulkActions in ElasticSearch storage option. Increase the default bulkActions(env, SW_STORAGE_ES_BULK_ACTIONS) to 5000(from 1000). Increase the flush interval of ElasticSearch indices to 15s(from 10s) Provide distinct for elements of metadata lists. Due to the more aggressive asynchronous flush, metadata lists have more chances including duplicate elements. Don\u0026rsquo;t need this as indicate anymore. Reduce the flush period of hour and day level metrics, only run in 4 times of regular persistent period. This means default flush period of hour and day level metrics are 25s * 4. Performance: optimize IDs read of ElasticSearch storage options(6 and 7). Use the physical index rather than template alias name. Adjust index refresh period as INT(flushInterval * 2/3), it used to be as same as bulk flush period. At the edge case, in low traffic(traffic \u0026lt; bulkActions in the whole period), there is a possible case, 2 period bulks are included in one index refresh rebuild operation, which could cause version conflicts. And this case can\u0026rsquo;t be fixed through core/persistentPeriod as the bulk fresh is not controlled by the persistent timer anymore. The core/maxSyncOperationNum setting(added in 8.5.0) is removed due to metrics persistence is fully asynchronous. The core/syncThreads setting(added in 8.5.0) is removed due to metrics persistence is fully asynchronous. Optimization: Concurrency mode of execution stage for metrics is removed(added in 8.5.0). Only concurrency of prepare stage is meaningful and kept. Fix -meters metrics topic isn\u0026rsquo;t created with namespace issue Enhance persistent session timeout mechanism. Because the enhanced session could cache the metadata metrics forever, new timeout mechanism is designed for avoiding this specific case. Fix Kafka transport topics are created duplicated with and without namespace issue Fix the persistent session timeout mechanism bug. Fix possible version_conflict_engine_exception in bulk execution. Fix PrometheusMetricConverter may throw an IllegalArgumentException when convert metrics to SampleFamily Filtering NaN value samples when build SampleFamily Add Thread and ClassLoader Metrics for the self-observability and otel-oc-rules Simple optimization of trace sql query statement. Avoid \u0026ldquo;select *\u0026rdquo; query method Introduce dynamical logging to update log configuration at runtime Fix Kubernetes ConfigMap configuration center doesn\u0026rsquo;t send delete event Breaking Change: emove qps and add rpm in LAL  UI  Fix the date component for log conditions. Fix selector keys for duplicate options. Add Python celery plugin. Fix default config for metrics. Fix trace table for profile ui. Fix the error of server response time in the topology. Fix chart types for setting metrics configure. Fix logs pages number. Implement a timeline for Events in a new page. Fix style for event details.  Documentation  Add FAQ about Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Add Self Observability service discovery (k8s). Add sending Envoy Metrics to OAP in envoy 1.19 example and bump up to Envoy V3 api.  All issues and pull requests are here\n","title":"8.7.0","url":"/docs/main/v9.1.0/en/changes/changes-8.7.0/"},{"content":"8.7.0 Project  Extract dependency management to a bom. Add JDK 16 to test matrix. DataCarrier consumer add a new event notification, call nothingToConsume method if the queue has no element to consume. Build and push snapshot Docker images to GitHub Container Registry, this is only for people who want to help to test the master branch codes, please don\u0026rsquo;t use in production environments.  Java Agent  Supports modifying span attributes in async mode. Agent supports the collection of JVM arguments and jar dependency information. [Temporary] Support authentication for log report channel. This feature and grpc channel is going to be removed after Satellite 0.2.0 release. Remove deprecated gRPC method, io.grpc.ManagedChannelBuilder#nameResolverFactory. See gRPC-java 7133 for more details. Add Neo4j-4.x plugin. Correct profile.duration to profile.max_duration in the default agent.config file. Fix the response time of gRPC. Support parameter collection for SqlServer. Add ShardingSphere-5.0.0-beta plugin. Fix some method exception error. Fix async finish repeatedly in spring-webflux-5.x-webclient plugin. Add agent plugin to support Sentinel. Move ehcache-2.x plugin as an optional plugin. Support guava-cache plugin. Enhance the compatibility of mysql-8.x-plugin plugin. Support Kafka SASL login module. Fix gateway plugin async finish repeatedly when fallback url configured. Chore: polish methods naming for Spring-Kafka plugins. Remove plugins for ShardingSphere legacy version. Update agent plugin for ElasticJob GA version Remove the logic of generating instance name in KafkaServiceManagementServiceClient class. Improve okhttp plugin performance by optimizing Class.getDeclaredField(). Fix GRPCLogClientAppender no context warning. Fix spring-webflux-5.x-webclient-plugin NPE.  OAP-Backend  Disable Spring sleuth meter analyzer by default. Only count 5xx as error in Envoy ALS receiver. Upgrade apollo core caused by CVE-2020-15170. Upgrade kubernetes client caused by CVE-2020-28052. Upgrade Elasticsearch 7 client caused by CVE-2020-7014. Upgrade jackson related libs caused by CVE-2018-11307, CVE-2018-14718 ~ CVE-2018-14721, CVE-2018-19360 ~ CVE-2018-19362, CVE-2019-14379, CVE-2019-14540, CVE-2019-14892, CVE-2019-14893, CVE-2019-16335, CVE-2019-16942, CVE-2019-16943, CVE-2019-17267, CVE-2019-17531, CVE-2019-20330, CVE-2020-8840, CVE-2020-9546, CVE-2020-9547, CVE-2020-9548, CVE-2018-12022, CVE-2018-12023, CVE-2019-12086, CVE-2019-14439, CVE-2020-10672, CVE-2020-10673, CVE-2020-10968, CVE-2020-10969, CVE-2020-11111, CVE-2020-11112, CVE-2020-11113, CVE-2020-11619, CVE-2020-11620, CVE-2020-14060, CVE-2020-14061, CVE-2020-14062, CVE-2020-14195, CVE-2020-24616, CVE-2020-24750, CVE-2020-25649, CVE-2020-35490, CVE-2020-35491, CVE-2020-35728 and CVE-2020-36179 ~ CVE-2020-36190. Exclude log4j 1.x caused by CVE-2019-17571. Upgrade log4j 2.x caused by CVE-2020-9488. Upgrade nacos libs caused by CVE-2021-29441 and CVE-2021-29442. Upgrade netty caused by CVE-2019-20444, CVE-2019-20445, CVE-2019-16869, CVE-2020-11612, CVE-2021-21290, CVE-2021-21295 and CVE-2021-21409. Upgrade consul client caused by CVE-2018-1000844, CVE-2018-1000850. Upgrade zookeeper caused by CVE-2019-0201, zookeeper cluster coordinator plugin now requires zookeeper server 3.5+. Upgrade snake yaml caused by CVE-2017-18640. Upgrade embed tomcat caused by CVE-2020-13935. Upgrade commons-lang3 to avoid potential NPE in some JDK versions. OAL supports generating metrics from events. Support endpoint name grouping by OpenAPI definitions. Concurrent create PrepareRequest when persist Metrics Fix CounterWindow increase computing issue. Performance: optimize Envoy ALS analyzer performance in high traffic load scenario (reduce ~1cpu in ~10k RPS). Performance: trim useless metadata fields in Envoy ALS metadata to improve performance. Fix: slowDBAccessThreshold dynamic config error when not configured. Performance: cache regex pattern and result, optimize string concatenation in Envy ALS analyzer. Performance: cache metrics id and entity id in Metrics and ISource. Performance: enhance persistent session mechanism, about differentiating cache timeout for different dimensionality metrics. The timeout of the cache for minute and hour level metrics has been prolonged to ~5 min. Performance: Add L1 aggregation flush period, which reduce the CPU load and help young GC. Support connectTimeout and socketTimeout settings for ElasticSearch6 and ElasticSearch7 storages. Re-implement storage session mechanism, cached metrics are removed only according to their last access timestamp, rather than first time. This makes sure hot data never gets removed unexpectedly. Support session expired threshold configurable. Fix InfluxDB storage-plugin Metrics#multiGet issue. Replace zuul proxy with spring cloud gateway 2.x. in webapp module. Upgrade etcd cluster coordinator and dynamic configuration to v3.x. Configuration: Allow configuring server maximum request header size and ES index template order. Add thread state metric and class loaded info metric to JVMMetric. Performance: compile LAL DSL statically and run with type checked. Add pagination to event query protocol. Performance: optimize Envoy error logs persistence performance. Support envoy cluster manager metrics. Performance: remove the synchronous persistence mechanism from batch ElasticSearch DAO. Because the current enhanced persistent session mechanism, don\u0026rsquo;t require the data queryable immediately after the insert and update anymore. Performance: share flushInterval setting for both metrics and record data, due to synchronous persistence mechanism removed. Record flush interval used to be hardcoded as 10s. Remove syncBulkActions in ElasticSearch storage option. Increase the default bulkActions(env, SW_STORAGE_ES_BULK_ACTIONS) to 5000(from 1000). Increase the flush interval of ElasticSearch indices to 15s(from 10s) Provide distinct for elements of metadata lists. Due to the more aggressive asynchronous flush, metadata lists have more chances including duplicate elements. Don\u0026rsquo;t need this as indicate anymore. Reduce the flush period of hour and day level metrics, only run in 4 times of regular persistent period. This means default flush period of hour and day level metrics are 25s * 4. Performance: optimize IDs read of ElasticSearch storage options(6 and 7). Use the physical index rather than template alias name. Adjust index refresh period as INT(flushInterval * 2/3), it used to be as same as bulk flush period. At the edge case, in low traffic(traffic \u0026lt; bulkActions in the whole period), there is a possible case, 2 period bulks are included in one index refresh rebuild operation, which could cause version conflicts. And this case can\u0026rsquo;t be fixed through core/persistentPeriod as the bulk fresh is not controlled by the persistent timer anymore. The core/maxSyncOperationNum setting(added in 8.5.0) is removed due to metrics persistence is fully asynchronous. The core/syncThreads setting(added in 8.5.0) is removed due to metrics persistence is fully asynchronous. Optimization: Concurrency mode of execution stage for metrics is removed(added in 8.5.0). Only concurrency of prepare stage is meaningful and kept. Fix -meters metrics topic isn\u0026rsquo;t created with namespace issue Enhance persistent session timeout mechanism. Because the enhanced session could cache the metadata metrics forever, new timeout mechanism is designed for avoiding this specific case. Fix Kafka transport topics are created duplicated with and without namespace issue Fix the persistent session timeout mechanism bug. Fix possible version_conflict_engine_exception in bulk execution. Fix PrometheusMetricConverter may throw an IllegalArgumentException when convert metrics to SampleFamily Filtering NaN value samples when build SampleFamily Add Thread and ClassLoader Metrics for the self-observability and otel-oc-rules Simple optimization of trace sql query statement. Avoid \u0026ldquo;select *\u0026rdquo; query method Introduce dynamical logging to update log configuration at runtime Fix Kubernetes ConfigMap configuration center doesn\u0026rsquo;t send delete event Breaking Change: emove qps and add rpm in LAL  UI  Fix the date component for log conditions. Fix selector keys for duplicate options. Add Python celery plugin. Fix default config for metrics. Fix trace table for profile ui. Fix the error of server response time in the topology. Fix chart types for setting metrics configure. Fix logs pages number. Implement a timeline for Events in a new page. Fix style for event details.  Documentation  Add FAQ about Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Add Self Observability service discovery (k8s). Add sending Envoy Metrics to OAP in envoy 1.19 example and bump up to Envoy V3 api.  All issues and pull requests are here\n","title":"8.7.0","url":"/docs/main/v9.2.0/en/changes/changes-8.7.0/"},{"content":"8.7.0 Project  Extract dependency management to a bom. Add JDK 16 to test matrix. DataCarrier consumer add a new event notification, call nothingToConsume method if the queue has no element to consume. Build and push snapshot Docker images to GitHub Container Registry, this is only for people who want to help to test the master branch codes, please don\u0026rsquo;t use in production environments.  Java Agent  Supports modifying span attributes in async mode. Agent supports the collection of JVM arguments and jar dependency information. [Temporary] Support authentication for log report channel. This feature and grpc channel is going to be removed after Satellite 0.2.0 release. Remove deprecated gRPC method, io.grpc.ManagedChannelBuilder#nameResolverFactory. See gRPC-java 7133 for more details. Add Neo4j-4.x plugin. Correct profile.duration to profile.max_duration in the default agent.config file. Fix the response time of gRPC. Support parameter collection for SqlServer. Add ShardingSphere-5.0.0-beta plugin. Fix some method exception error. Fix async finish repeatedly in spring-webflux-5.x-webclient plugin. Add agent plugin to support Sentinel. Move ehcache-2.x plugin as an optional plugin. Support guava-cache plugin. Enhance the compatibility of mysql-8.x-plugin plugin. Support Kafka SASL login module. Fix gateway plugin async finish repeatedly when fallback url configured. Chore: polish methods naming for Spring-Kafka plugins. Remove plugins for ShardingSphere legacy version. Update agent plugin for ElasticJob GA version Remove the logic of generating instance name in KafkaServiceManagementServiceClient class. Improve okhttp plugin performance by optimizing Class.getDeclaredField(). Fix GRPCLogClientAppender no context warning. Fix spring-webflux-5.x-webclient-plugin NPE.  OAP-Backend  Disable Spring sleuth meter analyzer by default. Only count 5xx as error in Envoy ALS receiver. Upgrade apollo core caused by CVE-2020-15170. Upgrade kubernetes client caused by CVE-2020-28052. Upgrade Elasticsearch 7 client caused by CVE-2020-7014. Upgrade jackson related libs caused by CVE-2018-11307, CVE-2018-14718 ~ CVE-2018-14721, CVE-2018-19360 ~ CVE-2018-19362, CVE-2019-14379, CVE-2019-14540, CVE-2019-14892, CVE-2019-14893, CVE-2019-16335, CVE-2019-16942, CVE-2019-16943, CVE-2019-17267, CVE-2019-17531, CVE-2019-20330, CVE-2020-8840, CVE-2020-9546, CVE-2020-9547, CVE-2020-9548, CVE-2018-12022, CVE-2018-12023, CVE-2019-12086, CVE-2019-14439, CVE-2020-10672, CVE-2020-10673, CVE-2020-10968, CVE-2020-10969, CVE-2020-11111, CVE-2020-11112, CVE-2020-11113, CVE-2020-11619, CVE-2020-11620, CVE-2020-14060, CVE-2020-14061, CVE-2020-14062, CVE-2020-14195, CVE-2020-24616, CVE-2020-24750, CVE-2020-25649, CVE-2020-35490, CVE-2020-35491, CVE-2020-35728 and CVE-2020-36179 ~ CVE-2020-36190. Exclude log4j 1.x caused by CVE-2019-17571. Upgrade log4j 2.x caused by CVE-2020-9488. Upgrade nacos libs caused by CVE-2021-29441 and CVE-2021-29442. Upgrade netty caused by CVE-2019-20444, CVE-2019-20445, CVE-2019-16869, CVE-2020-11612, CVE-2021-21290, CVE-2021-21295 and CVE-2021-21409. Upgrade consul client caused by CVE-2018-1000844, CVE-2018-1000850. Upgrade zookeeper caused by CVE-2019-0201, zookeeper cluster coordinator plugin now requires zookeeper server 3.5+. Upgrade snake yaml caused by CVE-2017-18640. Upgrade embed tomcat caused by CVE-2020-13935. Upgrade commons-lang3 to avoid potential NPE in some JDK versions. OAL supports generating metrics from events. Support endpoint name grouping by OpenAPI definitions. Concurrent create PrepareRequest when persist Metrics Fix CounterWindow increase computing issue. Performance: optimize Envoy ALS analyzer performance in high traffic load scenario (reduce ~1cpu in ~10k RPS). Performance: trim useless metadata fields in Envoy ALS metadata to improve performance. Fix: slowDBAccessThreshold dynamic config error when not configured. Performance: cache regex pattern and result, optimize string concatenation in Envy ALS analyzer. Performance: cache metrics id and entity id in Metrics and ISource. Performance: enhance persistent session mechanism, about differentiating cache timeout for different dimensionality metrics. The timeout of the cache for minute and hour level metrics has been prolonged to ~5 min. Performance: Add L1 aggregation flush period, which reduce the CPU load and help young GC. Support connectTimeout and socketTimeout settings for ElasticSearch6 and ElasticSearch7 storages. Re-implement storage session mechanism, cached metrics are removed only according to their last access timestamp, rather than first time. This makes sure hot data never gets removed unexpectedly. Support session expired threshold configurable. Fix InfluxDB storage-plugin Metrics#multiGet issue. Replace zuul proxy with spring cloud gateway 2.x. in webapp module. Upgrade etcd cluster coordinator and dynamic configuration to v3.x. Configuration: Allow configuring server maximum request header size and ES index template order. Add thread state metric and class loaded info metric to JVMMetric. Performance: compile LAL DSL statically and run with type checked. Add pagination to event query protocol. Performance: optimize Envoy error logs persistence performance. Support envoy cluster manager metrics. Performance: remove the synchronous persistence mechanism from batch ElasticSearch DAO. Because the current enhanced persistent session mechanism, don\u0026rsquo;t require the data queryable immediately after the insert and update anymore. Performance: share flushInterval setting for both metrics and record data, due to synchronous persistence mechanism removed. Record flush interval used to be hardcoded as 10s. Remove syncBulkActions in ElasticSearch storage option. Increase the default bulkActions(env, SW_STORAGE_ES_BULK_ACTIONS) to 5000(from 1000). Increase the flush interval of ElasticSearch indices to 15s(from 10s) Provide distinct for elements of metadata lists. Due to the more aggressive asynchronous flush, metadata lists have more chances including duplicate elements. Don\u0026rsquo;t need this as indicate anymore. Reduce the flush period of hour and day level metrics, only run in 4 times of regular persistent period. This means default flush period of hour and day level metrics are 25s * 4. Performance: optimize IDs read of ElasticSearch storage options(6 and 7). Use the physical index rather than template alias name. Adjust index refresh period as INT(flushInterval * 2/3), it used to be as same as bulk flush period. At the edge case, in low traffic(traffic \u0026lt; bulkActions in the whole period), there is a possible case, 2 period bulks are included in one index refresh rebuild operation, which could cause version conflicts. And this case can\u0026rsquo;t be fixed through core/persistentPeriod as the bulk fresh is not controlled by the persistent timer anymore. The core/maxSyncOperationNum setting(added in 8.5.0) is removed due to metrics persistence is fully asynchronous. The core/syncThreads setting(added in 8.5.0) is removed due to metrics persistence is fully asynchronous. Optimization: Concurrency mode of execution stage for metrics is removed(added in 8.5.0). Only concurrency of prepare stage is meaningful and kept. Fix -meters metrics topic isn\u0026rsquo;t created with namespace issue Enhance persistent session timeout mechanism. Because the enhanced session could cache the metadata metrics forever, new timeout mechanism is designed for avoiding this specific case. Fix Kafka transport topics are created duplicated with and without namespace issue Fix the persistent session timeout mechanism bug. Fix possible version_conflict_engine_exception in bulk execution. Fix PrometheusMetricConverter may throw an IllegalArgumentException when convert metrics to SampleFamily Filtering NaN value samples when build SampleFamily Add Thread and ClassLoader Metrics for the self-observability and otel-oc-rules Simple optimization of trace sql query statement. Avoid \u0026ldquo;select *\u0026rdquo; query method Introduce dynamical logging to update log configuration at runtime Fix Kubernetes ConfigMap configuration center doesn\u0026rsquo;t send delete event Breaking Change: emove qps and add rpm in LAL  UI  Fix the date component for log conditions. Fix selector keys for duplicate options. Add Python celery plugin. Fix default config for metrics. Fix trace table for profile ui. Fix the error of server response time in the topology. Fix chart types for setting metrics configure. Fix logs pages number. Implement a timeline for Events in a new page. Fix style for event details.  Documentation  Add FAQ about Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Add Self Observability service discovery (k8s). Add sending Envoy Metrics to OAP in envoy 1.19 example and bump up to Envoy V3 api.  All issues and pull requests are here\n","title":"8.7.0","url":"/docs/main/v9.3.0/en/changes/changes-8.7.0/"},{"content":"8.7.0 Project  Extract dependency management to a bom. Add JDK 16 to test matrix. DataCarrier consumer add a new event notification, call nothingToConsume method if the queue has no element to consume. Build and push snapshot Docker images to GitHub Container Registry, this is only for people who want to help to test the master branch codes, please don\u0026rsquo;t use in production environments.  Java Agent  Supports modifying span attributes in async mode. Agent supports the collection of JVM arguments and jar dependency information. [Temporary] Support authentication for log report channel. This feature and grpc channel is going to be removed after Satellite 0.2.0 release. Remove deprecated gRPC method, io.grpc.ManagedChannelBuilder#nameResolverFactory. See gRPC-java 7133 for more details. Add Neo4j-4.x plugin. Correct profile.duration to profile.max_duration in the default agent.config file. Fix the response time of gRPC. Support parameter collection for SqlServer. Add ShardingSphere-5.0.0-beta plugin. Fix some method exception error. Fix async finish repeatedly in spring-webflux-5.x-webclient plugin. Add agent plugin to support Sentinel. Move ehcache-2.x plugin as an optional plugin. Support guava-cache plugin. Enhance the compatibility of mysql-8.x-plugin plugin. Support Kafka SASL login module. Fix gateway plugin async finish repeatedly when fallback url configured. Chore: polish methods naming for Spring-Kafka plugins. Remove plugins for ShardingSphere legacy version. Update agent plugin for ElasticJob GA version Remove the logic of generating instance name in KafkaServiceManagementServiceClient class. Improve okhttp plugin performance by optimizing Class.getDeclaredField(). Fix GRPCLogClientAppender no context warning. Fix spring-webflux-5.x-webclient-plugin NPE.  OAP-Backend  Disable Spring sleuth meter analyzer by default. Only count 5xx as error in Envoy ALS receiver. Upgrade apollo core caused by CVE-2020-15170. Upgrade kubernetes client caused by CVE-2020-28052. Upgrade Elasticsearch 7 client caused by CVE-2020-7014. Upgrade jackson related libs caused by CVE-2018-11307, CVE-2018-14718 ~ CVE-2018-14721, CVE-2018-19360 ~ CVE-2018-19362, CVE-2019-14379, CVE-2019-14540, CVE-2019-14892, CVE-2019-14893, CVE-2019-16335, CVE-2019-16942, CVE-2019-16943, CVE-2019-17267, CVE-2019-17531, CVE-2019-20330, CVE-2020-8840, CVE-2020-9546, CVE-2020-9547, CVE-2020-9548, CVE-2018-12022, CVE-2018-12023, CVE-2019-12086, CVE-2019-14439, CVE-2020-10672, CVE-2020-10673, CVE-2020-10968, CVE-2020-10969, CVE-2020-11111, CVE-2020-11112, CVE-2020-11113, CVE-2020-11619, CVE-2020-11620, CVE-2020-14060, CVE-2020-14061, CVE-2020-14062, CVE-2020-14195, CVE-2020-24616, CVE-2020-24750, CVE-2020-25649, CVE-2020-35490, CVE-2020-35491, CVE-2020-35728 and CVE-2020-36179 ~ CVE-2020-36190. Exclude log4j 1.x caused by CVE-2019-17571. Upgrade log4j 2.x caused by CVE-2020-9488. Upgrade nacos libs caused by CVE-2021-29441 and CVE-2021-29442. Upgrade netty caused by CVE-2019-20444, CVE-2019-20445, CVE-2019-16869, CVE-2020-11612, CVE-2021-21290, CVE-2021-21295 and CVE-2021-21409. Upgrade consul client caused by CVE-2018-1000844, CVE-2018-1000850. Upgrade zookeeper caused by CVE-2019-0201, zookeeper cluster coordinator plugin now requires zookeeper server 3.5+. Upgrade snake yaml caused by CVE-2017-18640. Upgrade embed tomcat caused by CVE-2020-13935. Upgrade commons-lang3 to avoid potential NPE in some JDK versions. OAL supports generating metrics from events. Support endpoint name grouping by OpenAPI definitions. Concurrent create PrepareRequest when persist Metrics Fix CounterWindow increase computing issue. Performance: optimize Envoy ALS analyzer performance in high traffic load scenario (reduce ~1cpu in ~10k RPS). Performance: trim useless metadata fields in Envoy ALS metadata to improve performance. Fix: slowDBAccessThreshold dynamic config error when not configured. Performance: cache regex pattern and result, optimize string concatenation in Envy ALS analyzer. Performance: cache metrics id and entity id in Metrics and ISource. Performance: enhance persistent session mechanism, about differentiating cache timeout for different dimensionality metrics. The timeout of the cache for minute and hour level metrics has been prolonged to ~5 min. Performance: Add L1 aggregation flush period, which reduce the CPU load and help young GC. Support connectTimeout and socketTimeout settings for ElasticSearch6 and ElasticSearch7 storages. Re-implement storage session mechanism, cached metrics are removed only according to their last access timestamp, rather than first time. This makes sure hot data never gets removed unexpectedly. Support session expired threshold configurable. Fix InfluxDB storage-plugin Metrics#multiGet issue. Replace zuul proxy with spring cloud gateway 2.x. in webapp module. Upgrade etcd cluster coordinator and dynamic configuration to v3.x. Configuration: Allow configuring server maximum request header size and ES index template order. Add thread state metric and class loaded info metric to JVMMetric. Performance: compile LAL DSL statically and run with type checked. Add pagination to event query protocol. Performance: optimize Envoy error logs persistence performance. Support envoy cluster manager metrics. Performance: remove the synchronous persistence mechanism from batch ElasticSearch DAO. Because the current enhanced persistent session mechanism, don\u0026rsquo;t require the data queryable immediately after the insert and update anymore. Performance: share flushInterval setting for both metrics and record data, due to synchronous persistence mechanism removed. Record flush interval used to be hardcoded as 10s. Remove syncBulkActions in ElasticSearch storage option. Increase the default bulkActions(env, SW_STORAGE_ES_BULK_ACTIONS) to 5000(from 1000). Increase the flush interval of ElasticSearch indices to 15s(from 10s) Provide distinct for elements of metadata lists. Due to the more aggressive asynchronous flush, metadata lists have more chances including duplicate elements. Don\u0026rsquo;t need this as indicate anymore. Reduce the flush period of hour and day level metrics, only run in 4 times of regular persistent period. This means default flush period of hour and day level metrics are 25s * 4. Performance: optimize IDs read of ElasticSearch storage options(6 and 7). Use the physical index rather than template alias name. Adjust index refresh period as INT(flushInterval * 2/3), it used to be as same as bulk flush period. At the edge case, in low traffic(traffic \u0026lt; bulkActions in the whole period), there is a possible case, 2 period bulks are included in one index refresh rebuild operation, which could cause version conflicts. And this case can\u0026rsquo;t be fixed through core/persistentPeriod as the bulk fresh is not controlled by the persistent timer anymore. The core/maxSyncOperationNum setting(added in 8.5.0) is removed due to metrics persistence is fully asynchronous. The core/syncThreads setting(added in 8.5.0) is removed due to metrics persistence is fully asynchronous. Optimization: Concurrency mode of execution stage for metrics is removed(added in 8.5.0). Only concurrency of prepare stage is meaningful and kept. Fix -meters metrics topic isn\u0026rsquo;t created with namespace issue Enhance persistent session timeout mechanism. Because the enhanced session could cache the metadata metrics forever, new timeout mechanism is designed for avoiding this specific case. Fix Kafka transport topics are created duplicated with and without namespace issue Fix the persistent session timeout mechanism bug. Fix possible version_conflict_engine_exception in bulk execution. Fix PrometheusMetricConverter may throw an IllegalArgumentException when convert metrics to SampleFamily Filtering NaN value samples when build SampleFamily Add Thread and ClassLoader Metrics for the self-observability and otel-oc-rules Simple optimization of trace sql query statement. Avoid \u0026ldquo;select *\u0026rdquo; query method Introduce dynamical logging to update log configuration at runtime Fix Kubernetes ConfigMap configuration center doesn\u0026rsquo;t send delete event Breaking Change: emove qps and add rpm in LAL  UI  Fix the date component for log conditions. Fix selector keys for duplicate options. Add Python celery plugin. Fix default config for metrics. Fix trace table for profile ui. Fix the error of server response time in the topology. Fix chart types for setting metrics configure. Fix logs pages number. Implement a timeline for Events in a new page. Fix style for event details.  Documentation  Add FAQ about Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Add Self Observability service discovery (k8s). Add sending Envoy Metrics to OAP in envoy 1.19 example and bump up to Envoy V3 api.  All issues and pull requests are here\n","title":"8.7.0","url":"/docs/main/v9.4.0/en/changes/changes-8.7.0/"},{"content":"8.7.0 Project  Extract dependency management to a bom. Add JDK 16 to test matrix. DataCarrier consumer add a new event notification, call nothingToConsume method if the queue has no element to consume. Build and push snapshot Docker images to GitHub Container Registry, this is only for people who want to help to test the master branch codes, please don\u0026rsquo;t use in production environments.  Java Agent  Supports modifying span attributes in async mode. Agent supports the collection of JVM arguments and jar dependency information. [Temporary] Support authentication for log report channel. This feature and grpc channel is going to be removed after Satellite 0.2.0 release. Remove deprecated gRPC method, io.grpc.ManagedChannelBuilder#nameResolverFactory. See gRPC-java 7133 for more details. Add Neo4j-4.x plugin. Correct profile.duration to profile.max_duration in the default agent.config file. Fix the response time of gRPC. Support parameter collection for SqlServer. Add ShardingSphere-5.0.0-beta plugin. Fix some method exception error. Fix async finish repeatedly in spring-webflux-5.x-webclient plugin. Add agent plugin to support Sentinel. Move ehcache-2.x plugin as an optional plugin. Support guava-cache plugin. Enhance the compatibility of mysql-8.x-plugin plugin. Support Kafka SASL login module. Fix gateway plugin async finish repeatedly when fallback url configured. Chore: polish methods naming for Spring-Kafka plugins. Remove plugins for ShardingSphere legacy version. Update agent plugin for ElasticJob GA version Remove the logic of generating instance name in KafkaServiceManagementServiceClient class. Improve okhttp plugin performance by optimizing Class.getDeclaredField(). Fix GRPCLogClientAppender no context warning. Fix spring-webflux-5.x-webclient-plugin NPE.  OAP-Backend  Disable Spring sleuth meter analyzer by default. Only count 5xx as error in Envoy ALS receiver. Upgrade apollo core caused by CVE-2020-15170. Upgrade kubernetes client caused by CVE-2020-28052. Upgrade Elasticsearch 7 client caused by CVE-2020-7014. Upgrade jackson related libs caused by CVE-2018-11307, CVE-2018-14718 ~ CVE-2018-14721, CVE-2018-19360 ~ CVE-2018-19362, CVE-2019-14379, CVE-2019-14540, CVE-2019-14892, CVE-2019-14893, CVE-2019-16335, CVE-2019-16942, CVE-2019-16943, CVE-2019-17267, CVE-2019-17531, CVE-2019-20330, CVE-2020-8840, CVE-2020-9546, CVE-2020-9547, CVE-2020-9548, CVE-2018-12022, CVE-2018-12023, CVE-2019-12086, CVE-2019-14439, CVE-2020-10672, CVE-2020-10673, CVE-2020-10968, CVE-2020-10969, CVE-2020-11111, CVE-2020-11112, CVE-2020-11113, CVE-2020-11619, CVE-2020-11620, CVE-2020-14060, CVE-2020-14061, CVE-2020-14062, CVE-2020-14195, CVE-2020-24616, CVE-2020-24750, CVE-2020-25649, CVE-2020-35490, CVE-2020-35491, CVE-2020-35728 and CVE-2020-36179 ~ CVE-2020-36190. Exclude log4j 1.x caused by CVE-2019-17571. Upgrade log4j 2.x caused by CVE-2020-9488. Upgrade nacos libs caused by CVE-2021-29441 and CVE-2021-29442. Upgrade netty caused by CVE-2019-20444, CVE-2019-20445, CVE-2019-16869, CVE-2020-11612, CVE-2021-21290, CVE-2021-21295 and CVE-2021-21409. Upgrade consul client caused by CVE-2018-1000844, CVE-2018-1000850. Upgrade zookeeper caused by CVE-2019-0201, zookeeper cluster coordinator plugin now requires zookeeper server 3.5+. Upgrade snake yaml caused by CVE-2017-18640. Upgrade embed tomcat caused by CVE-2020-13935. Upgrade commons-lang3 to avoid potential NPE in some JDK versions. OAL supports generating metrics from events. Support endpoint name grouping by OpenAPI definitions. Concurrent create PrepareRequest when persist Metrics Fix CounterWindow increase computing issue. Performance: optimize Envoy ALS analyzer performance in high traffic load scenario (reduce ~1cpu in ~10k RPS). Performance: trim useless metadata fields in Envoy ALS metadata to improve performance. Fix: slowDBAccessThreshold dynamic config error when not configured. Performance: cache regex pattern and result, optimize string concatenation in Envy ALS analyzer. Performance: cache metrics id and entity id in Metrics and ISource. Performance: enhance persistent session mechanism, about differentiating cache timeout for different dimensionality metrics. The timeout of the cache for minute and hour level metrics has been prolonged to ~5 min. Performance: Add L1 aggregation flush period, which reduce the CPU load and help young GC. Support connectTimeout and socketTimeout settings for ElasticSearch6 and ElasticSearch7 storages. Re-implement storage session mechanism, cached metrics are removed only according to their last access timestamp, rather than first time. This makes sure hot data never gets removed unexpectedly. Support session expired threshold configurable. Fix InfluxDB storage-plugin Metrics#multiGet issue. Replace zuul proxy with spring cloud gateway 2.x. in webapp module. Upgrade etcd cluster coordinator and dynamic configuration to v3.x. Configuration: Allow configuring server maximum request header size and ES index template order. Add thread state metric and class loaded info metric to JVMMetric. Performance: compile LAL DSL statically and run with type checked. Add pagination to event query protocol. Performance: optimize Envoy error logs persistence performance. Support envoy cluster manager metrics. Performance: remove the synchronous persistence mechanism from batch ElasticSearch DAO. Because the current enhanced persistent session mechanism, don\u0026rsquo;t require the data queryable immediately after the insert and update anymore. Performance: share flushInterval setting for both metrics and record data, due to synchronous persistence mechanism removed. Record flush interval used to be hardcoded as 10s. Remove syncBulkActions in ElasticSearch storage option. Increase the default bulkActions(env, SW_STORAGE_ES_BULK_ACTIONS) to 5000(from 1000). Increase the flush interval of ElasticSearch indices to 15s(from 10s) Provide distinct for elements of metadata lists. Due to the more aggressive asynchronous flush, metadata lists have more chances including duplicate elements. Don\u0026rsquo;t need this as indicate anymore. Reduce the flush period of hour and day level metrics, only run in 4 times of regular persistent period. This means default flush period of hour and day level metrics are 25s * 4. Performance: optimize IDs read of ElasticSearch storage options(6 and 7). Use the physical index rather than template alias name. Adjust index refresh period as INT(flushInterval * 2/3), it used to be as same as bulk flush period. At the edge case, in low traffic(traffic \u0026lt; bulkActions in the whole period), there is a possible case, 2 period bulks are included in one index refresh rebuild operation, which could cause version conflicts. And this case can\u0026rsquo;t be fixed through core/persistentPeriod as the bulk fresh is not controlled by the persistent timer anymore. The core/maxSyncOperationNum setting(added in 8.5.0) is removed due to metrics persistence is fully asynchronous. The core/syncThreads setting(added in 8.5.0) is removed due to metrics persistence is fully asynchronous. Optimization: Concurrency mode of execution stage for metrics is removed(added in 8.5.0). Only concurrency of prepare stage is meaningful and kept. Fix -meters metrics topic isn\u0026rsquo;t created with namespace issue Enhance persistent session timeout mechanism. Because the enhanced session could cache the metadata metrics forever, new timeout mechanism is designed for avoiding this specific case. Fix Kafka transport topics are created duplicated with and without namespace issue Fix the persistent session timeout mechanism bug. Fix possible version_conflict_engine_exception in bulk execution. Fix PrometheusMetricConverter may throw an IllegalArgumentException when convert metrics to SampleFamily Filtering NaN value samples when build SampleFamily Add Thread and ClassLoader Metrics for the self-observability and otel-oc-rules Simple optimization of trace sql query statement. Avoid \u0026ldquo;select *\u0026rdquo; query method Introduce dynamical logging to update log configuration at runtime Fix Kubernetes ConfigMap configuration center doesn\u0026rsquo;t send delete event Breaking Change: emove qps and add rpm in LAL  UI  Fix the date component for log conditions. Fix selector keys for duplicate options. Add Python celery plugin. Fix default config for metrics. Fix trace table for profile ui. Fix the error of server response time in the topology. Fix chart types for setting metrics configure. Fix logs pages number. Implement a timeline for Events in a new page. Fix style for event details.  Documentation  Add FAQ about Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Add Self Observability service discovery (k8s). Add sending Envoy Metrics to OAP in envoy 1.19 example and bump up to Envoy V3 api.  All issues and pull requests are here\n","title":"8.7.0","url":"/docs/main/v9.5.0/en/changes/changes-8.7.0/"},{"content":"8.7.0 Project  Extract dependency management to a bom. Add JDK 16 to test matrix. DataCarrier consumer add a new event notification, call nothingToConsume method if the queue has no element to consume. Build and push snapshot Docker images to GitHub Container Registry, this is only for people who want to help to test the master branch codes, please don\u0026rsquo;t use in production environments.  Java Agent  Supports modifying span attributes in async mode. Agent supports the collection of JVM arguments and jar dependency information. [Temporary] Support authentication for log report channel. This feature and grpc channel is going to be removed after Satellite 0.2.0 release. Remove deprecated gRPC method, io.grpc.ManagedChannelBuilder#nameResolverFactory. See gRPC-java 7133 for more details. Add Neo4j-4.x plugin. Correct profile.duration to profile.max_duration in the default agent.config file. Fix the response time of gRPC. Support parameter collection for SqlServer. Add ShardingSphere-5.0.0-beta plugin. Fix some method exception error. Fix async finish repeatedly in spring-webflux-5.x-webclient plugin. Add agent plugin to support Sentinel. Move ehcache-2.x plugin as an optional plugin. Support guava-cache plugin. Enhance the compatibility of mysql-8.x-plugin plugin. Support Kafka SASL login module. Fix gateway plugin async finish repeatedly when fallback url configured. Chore: polish methods naming for Spring-Kafka plugins. Remove plugins for ShardingSphere legacy version. Update agent plugin for ElasticJob GA version Remove the logic of generating instance name in KafkaServiceManagementServiceClient class. Improve okhttp plugin performance by optimizing Class.getDeclaredField(). Fix GRPCLogClientAppender no context warning. Fix spring-webflux-5.x-webclient-plugin NPE.  OAP-Backend  Disable Spring sleuth meter analyzer by default. Only count 5xx as error in Envoy ALS receiver. Upgrade apollo core caused by CVE-2020-15170. Upgrade kubernetes client caused by CVE-2020-28052. Upgrade Elasticsearch 7 client caused by CVE-2020-7014. Upgrade jackson related libs caused by CVE-2018-11307, CVE-2018-14718 ~ CVE-2018-14721, CVE-2018-19360 ~ CVE-2018-19362, CVE-2019-14379, CVE-2019-14540, CVE-2019-14892, CVE-2019-14893, CVE-2019-16335, CVE-2019-16942, CVE-2019-16943, CVE-2019-17267, CVE-2019-17531, CVE-2019-20330, CVE-2020-8840, CVE-2020-9546, CVE-2020-9547, CVE-2020-9548, CVE-2018-12022, CVE-2018-12023, CVE-2019-12086, CVE-2019-14439, CVE-2020-10672, CVE-2020-10673, CVE-2020-10968, CVE-2020-10969, CVE-2020-11111, CVE-2020-11112, CVE-2020-11113, CVE-2020-11619, CVE-2020-11620, CVE-2020-14060, CVE-2020-14061, CVE-2020-14062, CVE-2020-14195, CVE-2020-24616, CVE-2020-24750, CVE-2020-25649, CVE-2020-35490, CVE-2020-35491, CVE-2020-35728 and CVE-2020-36179 ~ CVE-2020-36190. Exclude log4j 1.x caused by CVE-2019-17571. Upgrade log4j 2.x caused by CVE-2020-9488. Upgrade nacos libs caused by CVE-2021-29441 and CVE-2021-29442. Upgrade netty caused by CVE-2019-20444, CVE-2019-20445, CVE-2019-16869, CVE-2020-11612, CVE-2021-21290, CVE-2021-21295 and CVE-2021-21409. Upgrade consul client caused by CVE-2018-1000844, CVE-2018-1000850. Upgrade zookeeper caused by CVE-2019-0201, zookeeper cluster coordinator plugin now requires zookeeper server 3.5+. Upgrade snake yaml caused by CVE-2017-18640. Upgrade embed tomcat caused by CVE-2020-13935. Upgrade commons-lang3 to avoid potential NPE in some JDK versions. OAL supports generating metrics from events. Support endpoint name grouping by OpenAPI definitions. Concurrent create PrepareRequest when persist Metrics Fix CounterWindow increase computing issue. Performance: optimize Envoy ALS analyzer performance in high traffic load scenario (reduce ~1cpu in ~10k RPS). Performance: trim useless metadata fields in Envoy ALS metadata to improve performance. Fix: slowDBAccessThreshold dynamic config error when not configured. Performance: cache regex pattern and result, optimize string concatenation in Envy ALS analyzer. Performance: cache metrics id and entity id in Metrics and ISource. Performance: enhance persistent session mechanism, about differentiating cache timeout for different dimensionality metrics. The timeout of the cache for minute and hour level metrics has been prolonged to ~5 min. Performance: Add L1 aggregation flush period, which reduce the CPU load and help young GC. Support connectTimeout and socketTimeout settings for ElasticSearch6 and ElasticSearch7 storages. Re-implement storage session mechanism, cached metrics are removed only according to their last access timestamp, rather than first time. This makes sure hot data never gets removed unexpectedly. Support session expired threshold configurable. Fix InfluxDB storage-plugin Metrics#multiGet issue. Replace zuul proxy with spring cloud gateway 2.x. in webapp module. Upgrade etcd cluster coordinator and dynamic configuration to v3.x. Configuration: Allow configuring server maximum request header size and ES index template order. Add thread state metric and class loaded info metric to JVMMetric. Performance: compile LAL DSL statically and run with type checked. Add pagination to event query protocol. Performance: optimize Envoy error logs persistence performance. Support envoy cluster manager metrics. Performance: remove the synchronous persistence mechanism from batch ElasticSearch DAO. Because the current enhanced persistent session mechanism, don\u0026rsquo;t require the data queryable immediately after the insert and update anymore. Performance: share flushInterval setting for both metrics and record data, due to synchronous persistence mechanism removed. Record flush interval used to be hardcoded as 10s. Remove syncBulkActions in ElasticSearch storage option. Increase the default bulkActions(env, SW_STORAGE_ES_BULK_ACTIONS) to 5000(from 1000). Increase the flush interval of ElasticSearch indices to 15s(from 10s) Provide distinct for elements of metadata lists. Due to the more aggressive asynchronous flush, metadata lists have more chances including duplicate elements. Don\u0026rsquo;t need this as indicate anymore. Reduce the flush period of hour and day level metrics, only run in 4 times of regular persistent period. This means default flush period of hour and day level metrics are 25s * 4. Performance: optimize IDs read of ElasticSearch storage options(6 and 7). Use the physical index rather than template alias name. Adjust index refresh period as INT(flushInterval * 2/3), it used to be as same as bulk flush period. At the edge case, in low traffic(traffic \u0026lt; bulkActions in the whole period), there is a possible case, 2 period bulks are included in one index refresh rebuild operation, which could cause version conflicts. And this case can\u0026rsquo;t be fixed through core/persistentPeriod as the bulk fresh is not controlled by the persistent timer anymore. The core/maxSyncOperationNum setting(added in 8.5.0) is removed due to metrics persistence is fully asynchronous. The core/syncThreads setting(added in 8.5.0) is removed due to metrics persistence is fully asynchronous. Optimization: Concurrency mode of execution stage for metrics is removed(added in 8.5.0). Only concurrency of prepare stage is meaningful and kept. Fix -meters metrics topic isn\u0026rsquo;t created with namespace issue Enhance persistent session timeout mechanism. Because the enhanced session could cache the metadata metrics forever, new timeout mechanism is designed for avoiding this specific case. Fix Kafka transport topics are created duplicated with and without namespace issue Fix the persistent session timeout mechanism bug. Fix possible version_conflict_engine_exception in bulk execution. Fix PrometheusMetricConverter may throw an IllegalArgumentException when convert metrics to SampleFamily Filtering NaN value samples when build SampleFamily Add Thread and ClassLoader Metrics for the self-observability and otel-oc-rules Simple optimization of trace sql query statement. Avoid \u0026ldquo;select *\u0026rdquo; query method Introduce dynamical logging to update log configuration at runtime Fix Kubernetes ConfigMap configuration center doesn\u0026rsquo;t send delete event Breaking Change: emove qps and add rpm in LAL  UI  Fix the date component for log conditions. Fix selector keys for duplicate options. Add Python celery plugin. Fix default config for metrics. Fix trace table for profile ui. Fix the error of server response time in the topology. Fix chart types for setting metrics configure. Fix logs pages number. Implement a timeline for Events in a new page. Fix style for event details.  Documentation  Add FAQ about Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Add Self Observability service discovery (k8s). Add sending Envoy Metrics to OAP in envoy 1.19 example and bump up to Envoy V3 api.  All issues and pull requests are here\n","title":"8.7.0","url":"/docs/main/v9.6.0/en/changes/changes-8.7.0/"},{"content":"8.7.0 Project  Extract dependency management to a bom. Add JDK 16 to test matrix. DataCarrier consumer add a new event notification, call nothingToConsume method if the queue has no element to consume. Build and push snapshot Docker images to GitHub Container Registry, this is only for people who want to help to test the master branch codes, please don\u0026rsquo;t use in production environments.  Java Agent  Supports modifying span attributes in async mode. Agent supports the collection of JVM arguments and jar dependency information. [Temporary] Support authentication for log report channel. This feature and grpc channel is going to be removed after Satellite 0.2.0 release. Remove deprecated gRPC method, io.grpc.ManagedChannelBuilder#nameResolverFactory. See gRPC-java 7133 for more details. Add Neo4j-4.x plugin. Correct profile.duration to profile.max_duration in the default agent.config file. Fix the response time of gRPC. Support parameter collection for SqlServer. Add ShardingSphere-5.0.0-beta plugin. Fix some method exception error. Fix async finish repeatedly in spring-webflux-5.x-webclient plugin. Add agent plugin to support Sentinel. Move ehcache-2.x plugin as an optional plugin. Support guava-cache plugin. Enhance the compatibility of mysql-8.x-plugin plugin. Support Kafka SASL login module. Fix gateway plugin async finish repeatedly when fallback url configured. Chore: polish methods naming for Spring-Kafka plugins. Remove plugins for ShardingSphere legacy version. Update agent plugin for ElasticJob GA version Remove the logic of generating instance name in KafkaServiceManagementServiceClient class. Improve okhttp plugin performance by optimizing Class.getDeclaredField(). Fix GRPCLogClientAppender no context warning. Fix spring-webflux-5.x-webclient-plugin NPE.  OAP-Backend  Disable Spring sleuth meter analyzer by default. Only count 5xx as error in Envoy ALS receiver. Upgrade apollo core caused by CVE-2020-15170. Upgrade kubernetes client caused by CVE-2020-28052. Upgrade Elasticsearch 7 client caused by CVE-2020-7014. Upgrade jackson related libs caused by CVE-2018-11307, CVE-2018-14718 ~ CVE-2018-14721, CVE-2018-19360 ~ CVE-2018-19362, CVE-2019-14379, CVE-2019-14540, CVE-2019-14892, CVE-2019-14893, CVE-2019-16335, CVE-2019-16942, CVE-2019-16943, CVE-2019-17267, CVE-2019-17531, CVE-2019-20330, CVE-2020-8840, CVE-2020-9546, CVE-2020-9547, CVE-2020-9548, CVE-2018-12022, CVE-2018-12023, CVE-2019-12086, CVE-2019-14439, CVE-2020-10672, CVE-2020-10673, CVE-2020-10968, CVE-2020-10969, CVE-2020-11111, CVE-2020-11112, CVE-2020-11113, CVE-2020-11619, CVE-2020-11620, CVE-2020-14060, CVE-2020-14061, CVE-2020-14062, CVE-2020-14195, CVE-2020-24616, CVE-2020-24750, CVE-2020-25649, CVE-2020-35490, CVE-2020-35491, CVE-2020-35728 and CVE-2020-36179 ~ CVE-2020-36190. Exclude log4j 1.x caused by CVE-2019-17571. Upgrade log4j 2.x caused by CVE-2020-9488. Upgrade nacos libs caused by CVE-2021-29441 and CVE-2021-29442. Upgrade netty caused by CVE-2019-20444, CVE-2019-20445, CVE-2019-16869, CVE-2020-11612, CVE-2021-21290, CVE-2021-21295 and CVE-2021-21409. Upgrade consul client caused by CVE-2018-1000844, CVE-2018-1000850. Upgrade zookeeper caused by CVE-2019-0201, zookeeper cluster coordinator plugin now requires zookeeper server 3.5+. Upgrade snake yaml caused by CVE-2017-18640. Upgrade embed tomcat caused by CVE-2020-13935. Upgrade commons-lang3 to avoid potential NPE in some JDK versions. OAL supports generating metrics from events. Support endpoint name grouping by OpenAPI definitions. Concurrent create PrepareRequest when persist Metrics Fix CounterWindow increase computing issue. Performance: optimize Envoy ALS analyzer performance in high traffic load scenario (reduce ~1cpu in ~10k RPS). Performance: trim useless metadata fields in Envoy ALS metadata to improve performance. Fix: slowDBAccessThreshold dynamic config error when not configured. Performance: cache regex pattern and result, optimize string concatenation in Envy ALS analyzer. Performance: cache metrics id and entity id in Metrics and ISource. Performance: enhance persistent session mechanism, about differentiating cache timeout for different dimensionality metrics. The timeout of the cache for minute and hour level metrics has been prolonged to ~5 min. Performance: Add L1 aggregation flush period, which reduce the CPU load and help young GC. Support connectTimeout and socketTimeout settings for ElasticSearch6 and ElasticSearch7 storages. Re-implement storage session mechanism, cached metrics are removed only according to their last access timestamp, rather than first time. This makes sure hot data never gets removed unexpectedly. Support session expired threshold configurable. Fix InfluxDB storage-plugin Metrics#multiGet issue. Replace zuul proxy with spring cloud gateway 2.x. in webapp module. Upgrade etcd cluster coordinator and dynamic configuration to v3.x. Configuration: Allow configuring server maximum request header size and ES index template order. Add thread state metric and class loaded info metric to JVMMetric. Performance: compile LAL DSL statically and run with type checked. Add pagination to event query protocol. Performance: optimize Envoy error logs persistence performance. Support envoy cluster manager metrics. Performance: remove the synchronous persistence mechanism from batch ElasticSearch DAO. Because the current enhanced persistent session mechanism, don\u0026rsquo;t require the data queryable immediately after the insert and update anymore. Performance: share flushInterval setting for both metrics and record data, due to synchronous persistence mechanism removed. Record flush interval used to be hardcoded as 10s. Remove syncBulkActions in ElasticSearch storage option. Increase the default bulkActions(env, SW_STORAGE_ES_BULK_ACTIONS) to 5000(from 1000). Increase the flush interval of ElasticSearch indices to 15s(from 10s) Provide distinct for elements of metadata lists. Due to the more aggressive asynchronous flush, metadata lists have more chances including duplicate elements. Don\u0026rsquo;t need this as indicate anymore. Reduce the flush period of hour and day level metrics, only run in 4 times of regular persistent period. This means default flush period of hour and day level metrics are 25s * 4. Performance: optimize IDs read of ElasticSearch storage options(6 and 7). Use the physical index rather than template alias name. Adjust index refresh period as INT(flushInterval * 2/3), it used to be as same as bulk flush period. At the edge case, in low traffic(traffic \u0026lt; bulkActions in the whole period), there is a possible case, 2 period bulks are included in one index refresh rebuild operation, which could cause version conflicts. And this case can\u0026rsquo;t be fixed through core/persistentPeriod as the bulk fresh is not controlled by the persistent timer anymore. The core/maxSyncOperationNum setting(added in 8.5.0) is removed due to metrics persistence is fully asynchronous. The core/syncThreads setting(added in 8.5.0) is removed due to metrics persistence is fully asynchronous. Optimization: Concurrency mode of execution stage for metrics is removed(added in 8.5.0). Only concurrency of prepare stage is meaningful and kept. Fix -meters metrics topic isn\u0026rsquo;t created with namespace issue Enhance persistent session timeout mechanism. Because the enhanced session could cache the metadata metrics forever, new timeout mechanism is designed for avoiding this specific case. Fix Kafka transport topics are created duplicated with and without namespace issue Fix the persistent session timeout mechanism bug. Fix possible version_conflict_engine_exception in bulk execution. Fix PrometheusMetricConverter may throw an IllegalArgumentException when convert metrics to SampleFamily Filtering NaN value samples when build SampleFamily Add Thread and ClassLoader Metrics for the self-observability and otel-oc-rules Simple optimization of trace sql query statement. Avoid \u0026ldquo;select *\u0026rdquo; query method Introduce dynamical logging to update log configuration at runtime Fix Kubernetes ConfigMap configuration center doesn\u0026rsquo;t send delete event Breaking Change: emove qps and add rpm in LAL  UI  Fix the date component for log conditions. Fix selector keys for duplicate options. Add Python celery plugin. Fix default config for metrics. Fix trace table for profile ui. Fix the error of server response time in the topology. Fix chart types for setting metrics configure. Fix logs pages number. Implement a timeline for Events in a new page. Fix style for event details.  Documentation  Add FAQ about Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Add Self Observability service discovery (k8s). Add sending Envoy Metrics to OAP in envoy 1.19 example and bump up to Envoy V3 api.  All issues and pull requests are here\n","title":"8.7.0","url":"/docs/main/v9.7.0/en/changes/changes-8.7.0/"},{"content":"8.8.0 Project  Split javaagent into skywalking-java repository. https://github.com/apache/skywalking-java Merge Dockerfiles from apache/skywalking-docker into this codebase.  OAP Server  Fix CVE-2021-35515, CVE-2021-35516, CVE-2021-35517, CVE-2021-36090. Upgrade org.apache.commons:commons-compress to 1.21. kubernetes java client upgrade from 12.0.1 to 13.0.0 Add event http receiver Support Metric level function serviceRelation in MAL. Support envoy metrics binding into the topology. Fix openapi-definitions folder not being read correctly. Trace segment wouldn\u0026rsquo;t be recognized as a TopN sample service. Add through #4694 experimentally, but it caused performance impact. Remove version and endTime in the segment entity. Reduce indexing payload. Fix mapper_parsing_exception in ElasticSearch 7.14. Support component IDs for Go-Kratos framework. [Break Change] Remove endpoint name in the trace query condition. Only support query by endpoint id. Fix ProfileSnapshotExporterTest case on OpenJDK Runtime Environment AdoptOpenJDK-11.0.11+9 (build 11.0.11+9), MacOS. [Break Change] Remove page path in the browser log query condition. Only support query by page path id. [Break Change] Remove endpoint name in the backend log query condition. Only support query by endpoint id. [Break Change] Fix typo for a column page_path_id(was pate_path_id) of storage entity browser_error_log. Add component id for Python falcon plugin. Add rpcStatusCode for rpc.status_code tag. The responseCode field is marked as deprecated and replaced by httpResponseStatusCode field. Remove the duplicated tags to reduce the storage payload. Add a new API to test log analysis language. Harden the security of Groovy-based DSL, MAL and LAL. Fix distinct in Service/Instance/Endpoint query is not working. Support collection type in dynamic configuration core. Support zookeeper grouped dynamic configurations. Fix NPE when OAP nodes synchronize events with each other in cluster mode. Support k8s configmap grouped dynamic configurations. Add desc sort function in H2 and ElasticSearch implementations of IBrowserLogQueryDAO Support configure sampling policy by configuration module dynamically and static configuration file trace-sampling-policy-settings.yml for service dimension on the backend side. Dynamic configurations agent-analyzer.default.sampleRate and agent-analyzer.default.slowTraceSegmentThreshold are replaced by agent-analyzer.default.traceSamplingPolicy. Static configurations agent-analyzer.default.sampleRate and agent-analyzer.default.slowTraceSegmentThreshold are replaced by agent-analyzer.default.traceSamplingPolicySettingsFile. Fix dynamic configuration watch implementation current value not null when the config is deleted. Fix LoggingConfigWatcher return watch.value would not consistent with the real configuration content. Fix ZookeeperConfigWatcherRegister.readConfig() could cause NPE when data.getData() is null. Support nacos grouped dynamic configurations. Support for filter function filtering of int type values. Support mTLS for gRPC channel. Add yaml file suffix limit when reading ui templates. Support consul grouped dynamic configurations. Fix H2MetadataQueryDAO.searchService doesn\u0026rsquo;t support auto grouping. Rebuilt ElasticSearch client on top of their REST API. Fix ElasticSearch storage plugin doesn\u0026rsquo;t work when hot reloading from secretsManagementFile. Support etcd grouped dynamic configurations. Unified the config word namespace in the project. Switch JRE base image for dev images. Support apollo grouped dynamic configurations. Fix ProfileThreadSnapshotQuery.queryProfiledSegments adopts a wrong sort function Support gRPC sync grouped dynamic configurations. Fix H2EventQueryDAO doesn\u0026rsquo;t sort data by Event.START_TIME and uses a wrong pagination query. Fix LogHandler of kafka-fetcher-plugin cannot recognize namespace. Improve the speed of writing TiDB by batching the SQL execution. Fix wrong service name when IP is node IP in k8s-mesh. Support dynamic configurations for openAPI endpoint name grouping rule. Add component definition for Alibaba Druid and HikariCP. Fix Hour and Day dimensionality metrics not accurate, due to the cache read-then-clear mechanism conflicts with low down metrics flush period added in 8.7.0. Fix Slow SQL sampling not accurate, due to TopN works conflict with cache read-then-clear mechanism. The persistent cache is only read when necessary. Add component definition for Alibaba Fastjson. Fix entity(service/instance/endpoint) names in the MAL system(prometheus, native meter, open census, envoy metric service) are not controlled by core\u0026rsquo;s naming-control mechanism. Upgrade netty version to 4.1.68.Final avoid cve-2021-37136.  UI  Fix not found error when refresh UI. Update endpointName to endpointId in the query trace condition. Add Python falcon icon on the UI. Fix searching endpoints with keywords. Support clicking the service name in the chart to link to the trace or log page. Implement the Log Analysis Language text regexp debugger. Fix fetching nodes and calls with serviceIds on the topology side. Implement Alerts for query errors. Fixes graph parameter of query for topology metrics.  Documentation  Add a section in Log Collecting And Analysis doc, introducing the new Python agent log reporter. Add one missing step in otel-receiver doc about how to activate the default receiver. Reorganize dynamic configuration doc. Add more description about meter configurations in backend-meter doc. Fix typo in endpoint-grouping-rules doc.  All issues and pull requests are here\n","title":"8.8.0","url":"/docs/main/latest/en/changes/changes-8.8.0/"},{"content":"8.8.0 Project  Split javaagent into skywalking-java repository. https://github.com/apache/skywalking-java Merge Dockerfiles from apache/skywalking-docker into this codebase.  OAP Server  Fix CVE-2021-35515, CVE-2021-35516, CVE-2021-35517, CVE-2021-36090. Upgrade org.apache.commons:commons-compress to 1.21. kubernetes java client upgrade from 12.0.1 to 13.0.0 Add event http receiver Support Metric level function serviceRelation in MAL. Support envoy metrics binding into the topology. Fix openapi-definitions folder not being read correctly. Trace segment wouldn\u0026rsquo;t be recognized as a TopN sample service. Add through #4694 experimentally, but it caused performance impact. Remove version and endTime in the segment entity. Reduce indexing payload. Fix mapper_parsing_exception in ElasticSearch 7.14. Support component IDs for Go-Kratos framework. [Break Change] Remove endpoint name in the trace query condition. Only support query by endpoint id. Fix ProfileSnapshotExporterTest case on OpenJDK Runtime Environment AdoptOpenJDK-11.0.11+9 (build 11.0.11+9), MacOS. [Break Change] Remove page path in the browser log query condition. Only support query by page path id. [Break Change] Remove endpoint name in the backend log query condition. Only support query by endpoint id. [Break Change] Fix typo for a column page_path_id(was pate_path_id) of storage entity browser_error_log. Add component id for Python falcon plugin. Add rpcStatusCode for rpc.status_code tag. The responseCode field is marked as deprecated and replaced by httpResponseStatusCode field. Remove the duplicated tags to reduce the storage payload. Add a new API to test log analysis language. Harden the security of Groovy-based DSL, MAL and LAL. Fix distinct in Service/Instance/Endpoint query is not working. Support collection type in dynamic configuration core. Support zookeeper grouped dynamic configurations. Fix NPE when OAP nodes synchronize events with each other in cluster mode. Support k8s configmap grouped dynamic configurations. Add desc sort function in H2 and ElasticSearch implementations of IBrowserLogQueryDAO Support configure sampling policy by configuration module dynamically and static configuration file trace-sampling-policy-settings.yml for service dimension on the backend side. Dynamic configurations agent-analyzer.default.sampleRate and agent-analyzer.default.slowTraceSegmentThreshold are replaced by agent-analyzer.default.traceSamplingPolicy. Static configurations agent-analyzer.default.sampleRate and agent-analyzer.default.slowTraceSegmentThreshold are replaced by agent-analyzer.default.traceSamplingPolicySettingsFile. Fix dynamic configuration watch implementation current value not null when the config is deleted. Fix LoggingConfigWatcher return watch.value would not consistent with the real configuration content. Fix ZookeeperConfigWatcherRegister.readConfig() could cause NPE when data.getData() is null. Support nacos grouped dynamic configurations. Support for filter function filtering of int type values. Support mTLS for gRPC channel. Add yaml file suffix limit when reading ui templates. Support consul grouped dynamic configurations. Fix H2MetadataQueryDAO.searchService doesn\u0026rsquo;t support auto grouping. Rebuilt ElasticSearch client on top of their REST API. Fix ElasticSearch storage plugin doesn\u0026rsquo;t work when hot reloading from secretsManagementFile. Support etcd grouped dynamic configurations. Unified the config word namespace in the project. Switch JRE base image for dev images. Support apollo grouped dynamic configurations. Fix ProfileThreadSnapshotQuery.queryProfiledSegments adopts a wrong sort function Support gRPC sync grouped dynamic configurations. Fix H2EventQueryDAO doesn\u0026rsquo;t sort data by Event.START_TIME and uses a wrong pagination query. Fix LogHandler of kafka-fetcher-plugin cannot recognize namespace. Improve the speed of writing TiDB by batching the SQL execution. Fix wrong service name when IP is node IP in k8s-mesh. Support dynamic configurations for openAPI endpoint name grouping rule. Add component definition for Alibaba Druid and HikariCP. Fix Hour and Day dimensionality metrics not accurate, due to the cache read-then-clear mechanism conflicts with low down metrics flush period added in 8.7.0. Fix Slow SQL sampling not accurate, due to TopN works conflict with cache read-then-clear mechanism. The persistent cache is only read when necessary. Add component definition for Alibaba Fastjson. Fix entity(service/instance/endpoint) names in the MAL system(prometheus, native meter, open census, envoy metric service) are not controlled by core\u0026rsquo;s naming-control mechanism. Upgrade netty version to 4.1.68.Final avoid cve-2021-37136.  UI  Fix not found error when refresh UI. Update endpointName to endpointId in the query trace condition. Add Python falcon icon on the UI. Fix searching endpoints with keywords. Support clicking the service name in the chart to link to the trace or log page. Implement the Log Analysis Language text regexp debugger. Fix fetching nodes and calls with serviceIds on the topology side. Implement Alerts for query errors. Fixes graph parameter of query for topology metrics.  Documentation  Add a section in Log Collecting And Analysis doc, introducing the new Python agent log reporter. Add one missing step in otel-receiver doc about how to activate the default receiver. Reorganize dynamic configuration doc. Add more description about meter configurations in backend-meter doc. Fix typo in endpoint-grouping-rules doc.  All issues and pull requests are here\n","title":"8.8.0","url":"/docs/main/next/en/changes/changes-8.8.0/"},{"content":"8.8.0 Project  Split javaagent into skywalking-java repository. https://github.com/apache/skywalking-java Merge Dockerfiles from apache/skywalking-docker into this codebase.  OAP Server  Fix CVE-2021-35515, CVE-2021-35516, CVE-2021-35517, CVE-2021-36090. Upgrade org.apache.commons:commons-compress to 1.21. kubernetes java client upgrade from 12.0.1 to 13.0.0 Add event http receiver Support Metric level function serviceRelation in MAL. Support envoy metrics binding into the topology. Fix openapi-definitions folder not being read correctly. Trace segment wouldn\u0026rsquo;t be recognized as a TopN sample service. Add through #4694 experimentally, but it caused performance impact. Remove version and endTime in the segment entity. Reduce indexing payload. Fix mapper_parsing_exception in ElasticSearch 7.14. Support component IDs for Go-Kratos framework. [Break Change] Remove endpoint name in the trace query condition. Only support query by endpoint id. Fix ProfileSnapshotExporterTest case on OpenJDK Runtime Environment AdoptOpenJDK-11.0.11+9 (build 11.0.11+9), MacOS. [Break Change] Remove page path in the browser log query condition. Only support query by page path id. [Break Change] Remove endpoint name in the backend log query condition. Only support query by endpoint id. [Break Change] Fix typo for a column page_path_id(was pate_path_id) of storage entity browser_error_log. Add component id for Python falcon plugin. Add rpcStatusCode for rpc.status_code tag. The responseCode field is marked as deprecated and replaced by httpResponseStatusCode field. Remove the duplicated tags to reduce the storage payload. Add a new API to test log analysis language. Harden the security of Groovy-based DSL, MAL and LAL. Fix distinct in Service/Instance/Endpoint query is not working. Support collection type in dynamic configuration core. Support zookeeper grouped dynamic configurations. Fix NPE when OAP nodes synchronize events with each other in cluster mode. Support k8s configmap grouped dynamic configurations. Add desc sort function in H2 and ElasticSearch implementations of IBrowserLogQueryDAO Support configure sampling policy by configuration module dynamically and static configuration file trace-sampling-policy-settings.yml for service dimension on the backend side. Dynamic configurations agent-analyzer.default.sampleRate and agent-analyzer.default.slowTraceSegmentThreshold are replaced by agent-analyzer.default.traceSamplingPolicy. Static configurations agent-analyzer.default.sampleRate and agent-analyzer.default.slowTraceSegmentThreshold are replaced by agent-analyzer.default.traceSamplingPolicySettingsFile. Fix dynamic configuration watch implementation current value not null when the config is deleted. Fix LoggingConfigWatcher return watch.value would not consistent with the real configuration content. Fix ZookeeperConfigWatcherRegister.readConfig() could cause NPE when data.getData() is null. Support nacos grouped dynamic configurations. Support for filter function filtering of int type values. Support mTLS for gRPC channel. Add yaml file suffix limit when reading ui templates. Support consul grouped dynamic configurations. Fix H2MetadataQueryDAO.searchService doesn\u0026rsquo;t support auto grouping. Rebuilt ElasticSearch client on top of their REST API. Fix ElasticSearch storage plugin doesn\u0026rsquo;t work when hot reloading from secretsManagementFile. Support etcd grouped dynamic configurations. Unified the config word namespace in the project. Switch JRE base image for dev images. Support apollo grouped dynamic configurations. Fix ProfileThreadSnapshotQuery.queryProfiledSegments adopts a wrong sort function Support gRPC sync grouped dynamic configurations. Fix H2EventQueryDAO doesn\u0026rsquo;t sort data by Event.START_TIME and uses a wrong pagination query. Fix LogHandler of kafka-fetcher-plugin cannot recognize namespace. Improve the speed of writing TiDB by batching the SQL execution. Fix wrong service name when IP is node IP in k8s-mesh. Support dynamic configurations for openAPI endpoint name grouping rule. Add component definition for Alibaba Druid and HikariCP. Fix Hour and Day dimensionality metrics not accurate, due to the cache read-then-clear mechanism conflicts with low down metrics flush period added in 8.7.0. Fix Slow SQL sampling not accurate, due to TopN works conflict with cache read-then-clear mechanism. The persistent cache is only read when necessary. Add component definition for Alibaba Fastjson. Fix entity(service/instance/endpoint) names in the MAL system(prometheus, native meter, open census, envoy metric service) are not controlled by core\u0026rsquo;s naming-control mechanism. Upgrade netty version to 4.1.68.Final avoid cve-2021-37136.  UI  Fix not found error when refresh UI. Update endpointName to endpointId in the query trace condition. Add Python falcon icon on the UI. Fix searching endpoints with keywords. Support clicking the service name in the chart to link to the trace or log page. Implement the Log Analysis Language text regexp debugger. Fix fetching nodes and calls with serviceIds on the topology side. Implement Alerts for query errors. Fixes graph parameter of query for topology metrics.  Documentation  Add a section in Log Collecting And Analysis doc, introducing the new Python agent log reporter. Add one missing step in otel-receiver doc about how to activate the default receiver. Reorganize dynamic configuration doc. Add more description about meter configurations in backend-meter doc. Fix typo in endpoint-grouping-rules doc.  All issues and pull requests are here\n","title":"8.8.0","url":"/docs/main/v9.1.0/en/changes/changes-8.8.0/"},{"content":"8.8.0 Project  Split javaagent into skywalking-java repository. https://github.com/apache/skywalking-java Merge Dockerfiles from apache/skywalking-docker into this codebase.  OAP Server  Fix CVE-2021-35515, CVE-2021-35516, CVE-2021-35517, CVE-2021-36090. Upgrade org.apache.commons:commons-compress to 1.21. kubernetes java client upgrade from 12.0.1 to 13.0.0 Add event http receiver Support Metric level function serviceRelation in MAL. Support envoy metrics binding into the topology. Fix openapi-definitions folder not being read correctly. Trace segment wouldn\u0026rsquo;t be recognized as a TopN sample service. Add through #4694 experimentally, but it caused performance impact. Remove version and endTime in the segment entity. Reduce indexing payload. Fix mapper_parsing_exception in ElasticSearch 7.14. Support component IDs for Go-Kratos framework. [Break Change] Remove endpoint name in the trace query condition. Only support query by endpoint id. Fix ProfileSnapshotExporterTest case on OpenJDK Runtime Environment AdoptOpenJDK-11.0.11+9 (build 11.0.11+9), MacOS. [Break Change] Remove page path in the browser log query condition. Only support query by page path id. [Break Change] Remove endpoint name in the backend log query condition. Only support query by endpoint id. [Break Change] Fix typo for a column page_path_id(was pate_path_id) of storage entity browser_error_log. Add component id for Python falcon plugin. Add rpcStatusCode for rpc.status_code tag. The responseCode field is marked as deprecated and replaced by httpResponseStatusCode field. Remove the duplicated tags to reduce the storage payload. Add a new API to test log analysis language. Harden the security of Groovy-based DSL, MAL and LAL. Fix distinct in Service/Instance/Endpoint query is not working. Support collection type in dynamic configuration core. Support zookeeper grouped dynamic configurations. Fix NPE when OAP nodes synchronize events with each other in cluster mode. Support k8s configmap grouped dynamic configurations. Add desc sort function in H2 and ElasticSearch implementations of IBrowserLogQueryDAO Support configure sampling policy by configuration module dynamically and static configuration file trace-sampling-policy-settings.yml for service dimension on the backend side. Dynamic configurations agent-analyzer.default.sampleRate and agent-analyzer.default.slowTraceSegmentThreshold are replaced by agent-analyzer.default.traceSamplingPolicy. Static configurations agent-analyzer.default.sampleRate and agent-analyzer.default.slowTraceSegmentThreshold are replaced by agent-analyzer.default.traceSamplingPolicySettingsFile. Fix dynamic configuration watch implementation current value not null when the config is deleted. Fix LoggingConfigWatcher return watch.value would not consistent with the real configuration content. Fix ZookeeperConfigWatcherRegister.readConfig() could cause NPE when data.getData() is null. Support nacos grouped dynamic configurations. Support for filter function filtering of int type values. Support mTLS for gRPC channel. Add yaml file suffix limit when reading ui templates. Support consul grouped dynamic configurations. Fix H2MetadataQueryDAO.searchService doesn\u0026rsquo;t support auto grouping. Rebuilt ElasticSearch client on top of their REST API. Fix ElasticSearch storage plugin doesn\u0026rsquo;t work when hot reloading from secretsManagementFile. Support etcd grouped dynamic configurations. Unified the config word namespace in the project. Switch JRE base image for dev images. Support apollo grouped dynamic configurations. Fix ProfileThreadSnapshotQuery.queryProfiledSegments adopts a wrong sort function Support gRPC sync grouped dynamic configurations. Fix H2EventQueryDAO doesn\u0026rsquo;t sort data by Event.START_TIME and uses a wrong pagination query. Fix LogHandler of kafka-fetcher-plugin cannot recognize namespace. Improve the speed of writing TiDB by batching the SQL execution. Fix wrong service name when IP is node IP in k8s-mesh. Support dynamic configurations for openAPI endpoint name grouping rule. Add component definition for Alibaba Druid and HikariCP. Fix Hour and Day dimensionality metrics not accurate, due to the cache read-then-clear mechanism conflicts with low down metrics flush period added in 8.7.0. Fix Slow SQL sampling not accurate, due to TopN works conflict with cache read-then-clear mechanism. The persistent cache is only read when necessary. Add component definition for Alibaba Fastjson. Fix entity(service/instance/endpoint) names in the MAL system(prometheus, native meter, open census, envoy metric service) are not controlled by core\u0026rsquo;s naming-control mechanism. Upgrade netty version to 4.1.68.Final avoid cve-2021-37136.  UI  Fix not found error when refresh UI. Update endpointName to endpointId in the query trace condition. Add Python falcon icon on the UI. Fix searching endpoints with keywords. Support clicking the service name in the chart to link to the trace or log page. Implement the Log Analysis Language text regexp debugger. Fix fetching nodes and calls with serviceIds on the topology side. Implement Alerts for query errors. Fixes graph parameter of query for topology metrics.  Documentation  Add a section in Log Collecting And Analysis doc, introducing the new Python agent log reporter. Add one missing step in otel-receiver doc about how to activate the default receiver. Reorganize dynamic configuration doc. Add more description about meter configurations in backend-meter doc. Fix typo in endpoint-grouping-rules doc.  All issues and pull requests are here\n","title":"8.8.0","url":"/docs/main/v9.2.0/en/changes/changes-8.8.0/"},{"content":"8.8.0 Project  Split javaagent into skywalking-java repository. https://github.com/apache/skywalking-java Merge Dockerfiles from apache/skywalking-docker into this codebase.  OAP Server  Fix CVE-2021-35515, CVE-2021-35516, CVE-2021-35517, CVE-2021-36090. Upgrade org.apache.commons:commons-compress to 1.21. kubernetes java client upgrade from 12.0.1 to 13.0.0 Add event http receiver Support Metric level function serviceRelation in MAL. Support envoy metrics binding into the topology. Fix openapi-definitions folder not being read correctly. Trace segment wouldn\u0026rsquo;t be recognized as a TopN sample service. Add through #4694 experimentally, but it caused performance impact. Remove version and endTime in the segment entity. Reduce indexing payload. Fix mapper_parsing_exception in ElasticSearch 7.14. Support component IDs for Go-Kratos framework. [Break Change] Remove endpoint name in the trace query condition. Only support query by endpoint id. Fix ProfileSnapshotExporterTest case on OpenJDK Runtime Environment AdoptOpenJDK-11.0.11+9 (build 11.0.11+9), MacOS. [Break Change] Remove page path in the browser log query condition. Only support query by page path id. [Break Change] Remove endpoint name in the backend log query condition. Only support query by endpoint id. [Break Change] Fix typo for a column page_path_id(was pate_path_id) of storage entity browser_error_log. Add component id for Python falcon plugin. Add rpcStatusCode for rpc.status_code tag. The responseCode field is marked as deprecated and replaced by httpResponseStatusCode field. Remove the duplicated tags to reduce the storage payload. Add a new API to test log analysis language. Harden the security of Groovy-based DSL, MAL and LAL. Fix distinct in Service/Instance/Endpoint query is not working. Support collection type in dynamic configuration core. Support zookeeper grouped dynamic configurations. Fix NPE when OAP nodes synchronize events with each other in cluster mode. Support k8s configmap grouped dynamic configurations. Add desc sort function in H2 and ElasticSearch implementations of IBrowserLogQueryDAO Support configure sampling policy by configuration module dynamically and static configuration file trace-sampling-policy-settings.yml for service dimension on the backend side. Dynamic configurations agent-analyzer.default.sampleRate and agent-analyzer.default.slowTraceSegmentThreshold are replaced by agent-analyzer.default.traceSamplingPolicy. Static configurations agent-analyzer.default.sampleRate and agent-analyzer.default.slowTraceSegmentThreshold are replaced by agent-analyzer.default.traceSamplingPolicySettingsFile. Fix dynamic configuration watch implementation current value not null when the config is deleted. Fix LoggingConfigWatcher return watch.value would not consistent with the real configuration content. Fix ZookeeperConfigWatcherRegister.readConfig() could cause NPE when data.getData() is null. Support nacos grouped dynamic configurations. Support for filter function filtering of int type values. Support mTLS for gRPC channel. Add yaml file suffix limit when reading ui templates. Support consul grouped dynamic configurations. Fix H2MetadataQueryDAO.searchService doesn\u0026rsquo;t support auto grouping. Rebuilt ElasticSearch client on top of their REST API. Fix ElasticSearch storage plugin doesn\u0026rsquo;t work when hot reloading from secretsManagementFile. Support etcd grouped dynamic configurations. Unified the config word namespace in the project. Switch JRE base image for dev images. Support apollo grouped dynamic configurations. Fix ProfileThreadSnapshotQuery.queryProfiledSegments adopts a wrong sort function Support gRPC sync grouped dynamic configurations. Fix H2EventQueryDAO doesn\u0026rsquo;t sort data by Event.START_TIME and uses a wrong pagination query. Fix LogHandler of kafka-fetcher-plugin cannot recognize namespace. Improve the speed of writing TiDB by batching the SQL execution. Fix wrong service name when IP is node IP in k8s-mesh. Support dynamic configurations for openAPI endpoint name grouping rule. Add component definition for Alibaba Druid and HikariCP. Fix Hour and Day dimensionality metrics not accurate, due to the cache read-then-clear mechanism conflicts with low down metrics flush period added in 8.7.0. Fix Slow SQL sampling not accurate, due to TopN works conflict with cache read-then-clear mechanism. The persistent cache is only read when necessary. Add component definition for Alibaba Fastjson. Fix entity(service/instance/endpoint) names in the MAL system(prometheus, native meter, open census, envoy metric service) are not controlled by core\u0026rsquo;s naming-control mechanism. Upgrade netty version to 4.1.68.Final avoid cve-2021-37136.  UI  Fix not found error when refresh UI. Update endpointName to endpointId in the query trace condition. Add Python falcon icon on the UI. Fix searching endpoints with keywords. Support clicking the service name in the chart to link to the trace or log page. Implement the Log Analysis Language text regexp debugger. Fix fetching nodes and calls with serviceIds on the topology side. Implement Alerts for query errors. Fixes graph parameter of query for topology metrics.  Documentation  Add a section in Log Collecting And Analysis doc, introducing the new Python agent log reporter. Add one missing step in otel-receiver doc about how to activate the default receiver. Reorganize dynamic configuration doc. Add more description about meter configurations in backend-meter doc. Fix typo in endpoint-grouping-rules doc.  All issues and pull requests are here\n","title":"8.8.0","url":"/docs/main/v9.3.0/en/changes/changes-8.8.0/"},{"content":"8.8.0 Project  Split javaagent into skywalking-java repository. https://github.com/apache/skywalking-java Merge Dockerfiles from apache/skywalking-docker into this codebase.  OAP Server  Fix CVE-2021-35515, CVE-2021-35516, CVE-2021-35517, CVE-2021-36090. Upgrade org.apache.commons:commons-compress to 1.21. kubernetes java client upgrade from 12.0.1 to 13.0.0 Add event http receiver Support Metric level function serviceRelation in MAL. Support envoy metrics binding into the topology. Fix openapi-definitions folder not being read correctly. Trace segment wouldn\u0026rsquo;t be recognized as a TopN sample service. Add through #4694 experimentally, but it caused performance impact. Remove version and endTime in the segment entity. Reduce indexing payload. Fix mapper_parsing_exception in ElasticSearch 7.14. Support component IDs for Go-Kratos framework. [Break Change] Remove endpoint name in the trace query condition. Only support query by endpoint id. Fix ProfileSnapshotExporterTest case on OpenJDK Runtime Environment AdoptOpenJDK-11.0.11+9 (build 11.0.11+9), MacOS. [Break Change] Remove page path in the browser log query condition. Only support query by page path id. [Break Change] Remove endpoint name in the backend log query condition. Only support query by endpoint id. [Break Change] Fix typo for a column page_path_id(was pate_path_id) of storage entity browser_error_log. Add component id for Python falcon plugin. Add rpcStatusCode for rpc.status_code tag. The responseCode field is marked as deprecated and replaced by httpResponseStatusCode field. Remove the duplicated tags to reduce the storage payload. Add a new API to test log analysis language. Harden the security of Groovy-based DSL, MAL and LAL. Fix distinct in Service/Instance/Endpoint query is not working. Support collection type in dynamic configuration core. Support zookeeper grouped dynamic configurations. Fix NPE when OAP nodes synchronize events with each other in cluster mode. Support k8s configmap grouped dynamic configurations. Add desc sort function in H2 and ElasticSearch implementations of IBrowserLogQueryDAO Support configure sampling policy by configuration module dynamically and static configuration file trace-sampling-policy-settings.yml for service dimension on the backend side. Dynamic configurations agent-analyzer.default.sampleRate and agent-analyzer.default.slowTraceSegmentThreshold are replaced by agent-analyzer.default.traceSamplingPolicy. Static configurations agent-analyzer.default.sampleRate and agent-analyzer.default.slowTraceSegmentThreshold are replaced by agent-analyzer.default.traceSamplingPolicySettingsFile. Fix dynamic configuration watch implementation current value not null when the config is deleted. Fix LoggingConfigWatcher return watch.value would not consistent with the real configuration content. Fix ZookeeperConfigWatcherRegister.readConfig() could cause NPE when data.getData() is null. Support nacos grouped dynamic configurations. Support for filter function filtering of int type values. Support mTLS for gRPC channel. Add yaml file suffix limit when reading ui templates. Support consul grouped dynamic configurations. Fix H2MetadataQueryDAO.searchService doesn\u0026rsquo;t support auto grouping. Rebuilt ElasticSearch client on top of their REST API. Fix ElasticSearch storage plugin doesn\u0026rsquo;t work when hot reloading from secretsManagementFile. Support etcd grouped dynamic configurations. Unified the config word namespace in the project. Switch JRE base image for dev images. Support apollo grouped dynamic configurations. Fix ProfileThreadSnapshotQuery.queryProfiledSegments adopts a wrong sort function Support gRPC sync grouped dynamic configurations. Fix H2EventQueryDAO doesn\u0026rsquo;t sort data by Event.START_TIME and uses a wrong pagination query. Fix LogHandler of kafka-fetcher-plugin cannot recognize namespace. Improve the speed of writing TiDB by batching the SQL execution. Fix wrong service name when IP is node IP in k8s-mesh. Support dynamic configurations for openAPI endpoint name grouping rule. Add component definition for Alibaba Druid and HikariCP. Fix Hour and Day dimensionality metrics not accurate, due to the cache read-then-clear mechanism conflicts with low down metrics flush period added in 8.7.0. Fix Slow SQL sampling not accurate, due to TopN works conflict with cache read-then-clear mechanism. The persistent cache is only read when necessary. Add component definition for Alibaba Fastjson. Fix entity(service/instance/endpoint) names in the MAL system(prometheus, native meter, open census, envoy metric service) are not controlled by core\u0026rsquo;s naming-control mechanism. Upgrade netty version to 4.1.68.Final avoid cve-2021-37136.  UI  Fix not found error when refresh UI. Update endpointName to endpointId in the query trace condition. Add Python falcon icon on the UI. Fix searching endpoints with keywords. Support clicking the service name in the chart to link to the trace or log page. Implement the Log Analysis Language text regexp debugger. Fix fetching nodes and calls with serviceIds on the topology side. Implement Alerts for query errors. Fixes graph parameter of query for topology metrics.  Documentation  Add a section in Log Collecting And Analysis doc, introducing the new Python agent log reporter. Add one missing step in otel-receiver doc about how to activate the default receiver. Reorganize dynamic configuration doc. Add more description about meter configurations in backend-meter doc. Fix typo in endpoint-grouping-rules doc.  All issues and pull requests are here\n","title":"8.8.0","url":"/docs/main/v9.4.0/en/changes/changes-8.8.0/"},{"content":"8.8.0 Project  Split javaagent into skywalking-java repository. https://github.com/apache/skywalking-java Merge Dockerfiles from apache/skywalking-docker into this codebase.  OAP Server  Fix CVE-2021-35515, CVE-2021-35516, CVE-2021-35517, CVE-2021-36090. Upgrade org.apache.commons:commons-compress to 1.21. kubernetes java client upgrade from 12.0.1 to 13.0.0 Add event http receiver Support Metric level function serviceRelation in MAL. Support envoy metrics binding into the topology. Fix openapi-definitions folder not being read correctly. Trace segment wouldn\u0026rsquo;t be recognized as a TopN sample service. Add through #4694 experimentally, but it caused performance impact. Remove version and endTime in the segment entity. Reduce indexing payload. Fix mapper_parsing_exception in ElasticSearch 7.14. Support component IDs for Go-Kratos framework. [Break Change] Remove endpoint name in the trace query condition. Only support query by endpoint id. Fix ProfileSnapshotExporterTest case on OpenJDK Runtime Environment AdoptOpenJDK-11.0.11+9 (build 11.0.11+9), MacOS. [Break Change] Remove page path in the browser log query condition. Only support query by page path id. [Break Change] Remove endpoint name in the backend log query condition. Only support query by endpoint id. [Break Change] Fix typo for a column page_path_id(was pate_path_id) of storage entity browser_error_log. Add component id for Python falcon plugin. Add rpcStatusCode for rpc.status_code tag. The responseCode field is marked as deprecated and replaced by httpResponseStatusCode field. Remove the duplicated tags to reduce the storage payload. Add a new API to test log analysis language. Harden the security of Groovy-based DSL, MAL and LAL. Fix distinct in Service/Instance/Endpoint query is not working. Support collection type in dynamic configuration core. Support zookeeper grouped dynamic configurations. Fix NPE when OAP nodes synchronize events with each other in cluster mode. Support k8s configmap grouped dynamic configurations. Add desc sort function in H2 and ElasticSearch implementations of IBrowserLogQueryDAO Support configure sampling policy by configuration module dynamically and static configuration file trace-sampling-policy-settings.yml for service dimension on the backend side. Dynamic configurations agent-analyzer.default.sampleRate and agent-analyzer.default.slowTraceSegmentThreshold are replaced by agent-analyzer.default.traceSamplingPolicy. Static configurations agent-analyzer.default.sampleRate and agent-analyzer.default.slowTraceSegmentThreshold are replaced by agent-analyzer.default.traceSamplingPolicySettingsFile. Fix dynamic configuration watch implementation current value not null when the config is deleted. Fix LoggingConfigWatcher return watch.value would not consistent with the real configuration content. Fix ZookeeperConfigWatcherRegister.readConfig() could cause NPE when data.getData() is null. Support nacos grouped dynamic configurations. Support for filter function filtering of int type values. Support mTLS for gRPC channel. Add yaml file suffix limit when reading ui templates. Support consul grouped dynamic configurations. Fix H2MetadataQueryDAO.searchService doesn\u0026rsquo;t support auto grouping. Rebuilt ElasticSearch client on top of their REST API. Fix ElasticSearch storage plugin doesn\u0026rsquo;t work when hot reloading from secretsManagementFile. Support etcd grouped dynamic configurations. Unified the config word namespace in the project. Switch JRE base image for dev images. Support apollo grouped dynamic configurations. Fix ProfileThreadSnapshotQuery.queryProfiledSegments adopts a wrong sort function Support gRPC sync grouped dynamic configurations. Fix H2EventQueryDAO doesn\u0026rsquo;t sort data by Event.START_TIME and uses a wrong pagination query. Fix LogHandler of kafka-fetcher-plugin cannot recognize namespace. Improve the speed of writing TiDB by batching the SQL execution. Fix wrong service name when IP is node IP in k8s-mesh. Support dynamic configurations for openAPI endpoint name grouping rule. Add component definition for Alibaba Druid and HikariCP. Fix Hour and Day dimensionality metrics not accurate, due to the cache read-then-clear mechanism conflicts with low down metrics flush period added in 8.7.0. Fix Slow SQL sampling not accurate, due to TopN works conflict with cache read-then-clear mechanism. The persistent cache is only read when necessary. Add component definition for Alibaba Fastjson. Fix entity(service/instance/endpoint) names in the MAL system(prometheus, native meter, open census, envoy metric service) are not controlled by core\u0026rsquo;s naming-control mechanism. Upgrade netty version to 4.1.68.Final avoid cve-2021-37136.  UI  Fix not found error when refresh UI. Update endpointName to endpointId in the query trace condition. Add Python falcon icon on the UI. Fix searching endpoints with keywords. Support clicking the service name in the chart to link to the trace or log page. Implement the Log Analysis Language text regexp debugger. Fix fetching nodes and calls with serviceIds on the topology side. Implement Alerts for query errors. Fixes graph parameter of query for topology metrics.  Documentation  Add a section in Log Collecting And Analysis doc, introducing the new Python agent log reporter. Add one missing step in otel-receiver doc about how to activate the default receiver. Reorganize dynamic configuration doc. Add more description about meter configurations in backend-meter doc. Fix typo in endpoint-grouping-rules doc.  All issues and pull requests are here\n","title":"8.8.0","url":"/docs/main/v9.5.0/en/changes/changes-8.8.0/"},{"content":"8.8.0 Project  Split javaagent into skywalking-java repository. https://github.com/apache/skywalking-java Merge Dockerfiles from apache/skywalking-docker into this codebase.  OAP Server  Fix CVE-2021-35515, CVE-2021-35516, CVE-2021-35517, CVE-2021-36090. Upgrade org.apache.commons:commons-compress to 1.21. kubernetes java client upgrade from 12.0.1 to 13.0.0 Add event http receiver Support Metric level function serviceRelation in MAL. Support envoy metrics binding into the topology. Fix openapi-definitions folder not being read correctly. Trace segment wouldn\u0026rsquo;t be recognized as a TopN sample service. Add through #4694 experimentally, but it caused performance impact. Remove version and endTime in the segment entity. Reduce indexing payload. Fix mapper_parsing_exception in ElasticSearch 7.14. Support component IDs for Go-Kratos framework. [Break Change] Remove endpoint name in the trace query condition. Only support query by endpoint id. Fix ProfileSnapshotExporterTest case on OpenJDK Runtime Environment AdoptOpenJDK-11.0.11+9 (build 11.0.11+9), MacOS. [Break Change] Remove page path in the browser log query condition. Only support query by page path id. [Break Change] Remove endpoint name in the backend log query condition. Only support query by endpoint id. [Break Change] Fix typo for a column page_path_id(was pate_path_id) of storage entity browser_error_log. Add component id for Python falcon plugin. Add rpcStatusCode for rpc.status_code tag. The responseCode field is marked as deprecated and replaced by httpResponseStatusCode field. Remove the duplicated tags to reduce the storage payload. Add a new API to test log analysis language. Harden the security of Groovy-based DSL, MAL and LAL. Fix distinct in Service/Instance/Endpoint query is not working. Support collection type in dynamic configuration core. Support zookeeper grouped dynamic configurations. Fix NPE when OAP nodes synchronize events with each other in cluster mode. Support k8s configmap grouped dynamic configurations. Add desc sort function in H2 and ElasticSearch implementations of IBrowserLogQueryDAO Support configure sampling policy by configuration module dynamically and static configuration file trace-sampling-policy-settings.yml for service dimension on the backend side. Dynamic configurations agent-analyzer.default.sampleRate and agent-analyzer.default.slowTraceSegmentThreshold are replaced by agent-analyzer.default.traceSamplingPolicy. Static configurations agent-analyzer.default.sampleRate and agent-analyzer.default.slowTraceSegmentThreshold are replaced by agent-analyzer.default.traceSamplingPolicySettingsFile. Fix dynamic configuration watch implementation current value not null when the config is deleted. Fix LoggingConfigWatcher return watch.value would not consistent with the real configuration content. Fix ZookeeperConfigWatcherRegister.readConfig() could cause NPE when data.getData() is null. Support nacos grouped dynamic configurations. Support for filter function filtering of int type values. Support mTLS for gRPC channel. Add yaml file suffix limit when reading ui templates. Support consul grouped dynamic configurations. Fix H2MetadataQueryDAO.searchService doesn\u0026rsquo;t support auto grouping. Rebuilt ElasticSearch client on top of their REST API. Fix ElasticSearch storage plugin doesn\u0026rsquo;t work when hot reloading from secretsManagementFile. Support etcd grouped dynamic configurations. Unified the config word namespace in the project. Switch JRE base image for dev images. Support apollo grouped dynamic configurations. Fix ProfileThreadSnapshotQuery.queryProfiledSegments adopts a wrong sort function Support gRPC sync grouped dynamic configurations. Fix H2EventQueryDAO doesn\u0026rsquo;t sort data by Event.START_TIME and uses a wrong pagination query. Fix LogHandler of kafka-fetcher-plugin cannot recognize namespace. Improve the speed of writing TiDB by batching the SQL execution. Fix wrong service name when IP is node IP in k8s-mesh. Support dynamic configurations for openAPI endpoint name grouping rule. Add component definition for Alibaba Druid and HikariCP. Fix Hour and Day dimensionality metrics not accurate, due to the cache read-then-clear mechanism conflicts with low down metrics flush period added in 8.7.0. Fix Slow SQL sampling not accurate, due to TopN works conflict with cache read-then-clear mechanism. The persistent cache is only read when necessary. Add component definition for Alibaba Fastjson. Fix entity(service/instance/endpoint) names in the MAL system(prometheus, native meter, open census, envoy metric service) are not controlled by core\u0026rsquo;s naming-control mechanism. Upgrade netty version to 4.1.68.Final avoid cve-2021-37136.  UI  Fix not found error when refresh UI. Update endpointName to endpointId in the query trace condition. Add Python falcon icon on the UI. Fix searching endpoints with keywords. Support clicking the service name in the chart to link to the trace or log page. Implement the Log Analysis Language text regexp debugger. Fix fetching nodes and calls with serviceIds on the topology side. Implement Alerts for query errors. Fixes graph parameter of query for topology metrics.  Documentation  Add a section in Log Collecting And Analysis doc, introducing the new Python agent log reporter. Add one missing step in otel-receiver doc about how to activate the default receiver. Reorganize dynamic configuration doc. Add more description about meter configurations in backend-meter doc. Fix typo in endpoint-grouping-rules doc.  All issues and pull requests are here\n","title":"8.8.0","url":"/docs/main/v9.6.0/en/changes/changes-8.8.0/"},{"content":"8.8.0 Project  Split javaagent into skywalking-java repository. https://github.com/apache/skywalking-java Merge Dockerfiles from apache/skywalking-docker into this codebase.  OAP Server  Fix CVE-2021-35515, CVE-2021-35516, CVE-2021-35517, CVE-2021-36090. Upgrade org.apache.commons:commons-compress to 1.21. kubernetes java client upgrade from 12.0.1 to 13.0.0 Add event http receiver Support Metric level function serviceRelation in MAL. Support envoy metrics binding into the topology. Fix openapi-definitions folder not being read correctly. Trace segment wouldn\u0026rsquo;t be recognized as a TopN sample service. Add through #4694 experimentally, but it caused performance impact. Remove version and endTime in the segment entity. Reduce indexing payload. Fix mapper_parsing_exception in ElasticSearch 7.14. Support component IDs for Go-Kratos framework. [Break Change] Remove endpoint name in the trace query condition. Only support query by endpoint id. Fix ProfileSnapshotExporterTest case on OpenJDK Runtime Environment AdoptOpenJDK-11.0.11+9 (build 11.0.11+9), MacOS. [Break Change] Remove page path in the browser log query condition. Only support query by page path id. [Break Change] Remove endpoint name in the backend log query condition. Only support query by endpoint id. [Break Change] Fix typo for a column page_path_id(was pate_path_id) of storage entity browser_error_log. Add component id for Python falcon plugin. Add rpcStatusCode for rpc.status_code tag. The responseCode field is marked as deprecated and replaced by httpResponseStatusCode field. Remove the duplicated tags to reduce the storage payload. Add a new API to test log analysis language. Harden the security of Groovy-based DSL, MAL and LAL. Fix distinct in Service/Instance/Endpoint query is not working. Support collection type in dynamic configuration core. Support zookeeper grouped dynamic configurations. Fix NPE when OAP nodes synchronize events with each other in cluster mode. Support k8s configmap grouped dynamic configurations. Add desc sort function in H2 and ElasticSearch implementations of IBrowserLogQueryDAO Support configure sampling policy by configuration module dynamically and static configuration file trace-sampling-policy-settings.yml for service dimension on the backend side. Dynamic configurations agent-analyzer.default.sampleRate and agent-analyzer.default.slowTraceSegmentThreshold are replaced by agent-analyzer.default.traceSamplingPolicy. Static configurations agent-analyzer.default.sampleRate and agent-analyzer.default.slowTraceSegmentThreshold are replaced by agent-analyzer.default.traceSamplingPolicySettingsFile. Fix dynamic configuration watch implementation current value not null when the config is deleted. Fix LoggingConfigWatcher return watch.value would not consistent with the real configuration content. Fix ZookeeperConfigWatcherRegister.readConfig() could cause NPE when data.getData() is null. Support nacos grouped dynamic configurations. Support for filter function filtering of int type values. Support mTLS for gRPC channel. Add yaml file suffix limit when reading ui templates. Support consul grouped dynamic configurations. Fix H2MetadataQueryDAO.searchService doesn\u0026rsquo;t support auto grouping. Rebuilt ElasticSearch client on top of their REST API. Fix ElasticSearch storage plugin doesn\u0026rsquo;t work when hot reloading from secretsManagementFile. Support etcd grouped dynamic configurations. Unified the config word namespace in the project. Switch JRE base image for dev images. Support apollo grouped dynamic configurations. Fix ProfileThreadSnapshotQuery.queryProfiledSegments adopts a wrong sort function Support gRPC sync grouped dynamic configurations. Fix H2EventQueryDAO doesn\u0026rsquo;t sort data by Event.START_TIME and uses a wrong pagination query. Fix LogHandler of kafka-fetcher-plugin cannot recognize namespace. Improve the speed of writing TiDB by batching the SQL execution. Fix wrong service name when IP is node IP in k8s-mesh. Support dynamic configurations for openAPI endpoint name grouping rule. Add component definition for Alibaba Druid and HikariCP. Fix Hour and Day dimensionality metrics not accurate, due to the cache read-then-clear mechanism conflicts with low down metrics flush period added in 8.7.0. Fix Slow SQL sampling not accurate, due to TopN works conflict with cache read-then-clear mechanism. The persistent cache is only read when necessary. Add component definition for Alibaba Fastjson. Fix entity(service/instance/endpoint) names in the MAL system(prometheus, native meter, open census, envoy metric service) are not controlled by core\u0026rsquo;s naming-control mechanism. Upgrade netty version to 4.1.68.Final avoid cve-2021-37136.  UI  Fix not found error when refresh UI. Update endpointName to endpointId in the query trace condition. Add Python falcon icon on the UI. Fix searching endpoints with keywords. Support clicking the service name in the chart to link to the trace or log page. Implement the Log Analysis Language text regexp debugger. Fix fetching nodes and calls with serviceIds on the topology side. Implement Alerts for query errors. Fixes graph parameter of query for topology metrics.  Documentation  Add a section in Log Collecting And Analysis doc, introducing the new Python agent log reporter. Add one missing step in otel-receiver doc about how to activate the default receiver. Reorganize dynamic configuration doc. Add more description about meter configurations in backend-meter doc. Fix typo in endpoint-grouping-rules doc.  All issues and pull requests are here\n","title":"8.8.0","url":"/docs/main/v9.7.0/en/changes/changes-8.8.0/"},{"content":"8.8.1 OAP Server  Fix wrong (de)serializer of ElasticSearch client for OpenSearch storage. Fix that traces query with tags will report error. Replace e2e simple cases to e2e-v2. Fix endpoint dependency breaking.  UI  Delete duplicate calls for endpoint dependency.  Documentation All issues and pull requests are here\n","title":"8.8.1","url":"/docs/main/latest/en/changes/changes-8.8.1/"},{"content":"8.8.1 OAP Server  Fix wrong (de)serializer of ElasticSearch client for OpenSearch storage. Fix that traces query with tags will report error. Replace e2e simple cases to e2e-v2. Fix endpoint dependency breaking.  UI  Delete duplicate calls for endpoint dependency.  Documentation All issues and pull requests are here\n","title":"8.8.1","url":"/docs/main/next/en/changes/changes-8.8.1/"},{"content":"8.8.1 OAP Server  Fix wrong (de)serializer of ElasticSearch client for OpenSearch storage. Fix that traces query with tags will report error. Replace e2e simple cases to e2e-v2. Fix endpoint dependency breaking.  UI  Delete duplicate calls for endpoint dependency.  Documentation All issues and pull requests are here\n","title":"8.8.1","url":"/docs/main/v9.1.0/en/changes/changes-8.8.1/"},{"content":"8.8.1 OAP Server  Fix wrong (de)serializer of ElasticSearch client for OpenSearch storage. Fix that traces query with tags will report error. Replace e2e simple cases to e2e-v2. Fix endpoint dependency breaking.  UI  Delete duplicate calls for endpoint dependency.  Documentation All issues and pull requests are here\n","title":"8.8.1","url":"/docs/main/v9.2.0/en/changes/changes-8.8.1/"},{"content":"8.8.1 OAP Server  Fix wrong (de)serializer of ElasticSearch client for OpenSearch storage. Fix that traces query with tags will report error. Replace e2e simple cases to e2e-v2. Fix endpoint dependency breaking.  UI  Delete duplicate calls for endpoint dependency.  Documentation All issues and pull requests are here\n","title":"8.8.1","url":"/docs/main/v9.3.0/en/changes/changes-8.8.1/"},{"content":"8.8.1 OAP Server  Fix wrong (de)serializer of ElasticSearch client for OpenSearch storage. Fix that traces query with tags will report error. Replace e2e simple cases to e2e-v2. Fix endpoint dependency breaking.  UI  Delete duplicate calls for endpoint dependency.  Documentation All issues and pull requests are here\n","title":"8.8.1","url":"/docs/main/v9.4.0/en/changes/changes-8.8.1/"},{"content":"8.8.1 OAP Server  Fix wrong (de)serializer of ElasticSearch client for OpenSearch storage. Fix that traces query with tags will report error. Replace e2e simple cases to e2e-v2. Fix endpoint dependency breaking.  UI  Delete duplicate calls for endpoint dependency.  Documentation All issues and pull requests are here\n","title":"8.8.1","url":"/docs/main/v9.5.0/en/changes/changes-8.8.1/"},{"content":"8.8.1 OAP Server  Fix wrong (de)serializer of ElasticSearch client for OpenSearch storage. Fix that traces query with tags will report error. Replace e2e simple cases to e2e-v2. Fix endpoint dependency breaking.  UI  Delete duplicate calls for endpoint dependency.  Documentation All issues and pull requests are here\n","title":"8.8.1","url":"/docs/main/v9.6.0/en/changes/changes-8.8.1/"},{"content":"8.8.1 OAP Server  Fix wrong (de)serializer of ElasticSearch client for OpenSearch storage. Fix that traces query with tags will report error. Replace e2e simple cases to e2e-v2. Fix endpoint dependency breaking.  UI  Delete duplicate calls for endpoint dependency.  Documentation All issues and pull requests are here\n","title":"8.8.1","url":"/docs/main/v9.7.0/en/changes/changes-8.8.1/"},{"content":"8.9.0 Project  E2E tests immigrate to e2e-v2. Support JDK 16 and 17. Add Docker images for arm64 architecture.  OAP Server  Add component definition for Jackson. Fix that zipkin-receiver plugin is not packaged into dist. Upgrade Armeria to 1.12, upgrade OpenSearch test version to 1.1.0. Add component definition for Apache-Kylin. Enhance get generation mechanism of OAL engine, support map type of source\u0026rsquo;s field. Add tag(Map) into All, Service, ServiceInstance and Endpoint sources. Fix funcParamExpression and literalExpression can\u0026rsquo;t be used in the same aggregation function. Support cast statement in the OAL core engine. Support (str-\u0026gt;long) and (long) for string to long cast statement. Support (str-\u0026gt;int) and (int) for string to int cast statement. Support Long literal number in the OAL core engine. Support literal string as parameter of aggregation function. Add attributeExpression and attributeExpressionSegment in the OAL grammar tree to support map type for the attribute expression. Refactor the OAL compiler context to improve readability. Fix wrong generated codes of hashCode and remoteHashCode methods for numeric fields. Support != null in OAL engine. Add Message Queue Consuming Count metric for MQ consuming service and endpoint. Add Message Queue Avg Consuming Latency metric for MQ consuming service and endpoint. Support -Inf as bucket in the meter system. Fix setting wrong field when combining Events. Support search browser service. Add getProfileTaskLogs to profile query protocol. Set SW_KAFKA_FETCHER_ENABLE_NATIVE_PROTO_LOG, SW_KAFKA_FETCHER_ENABLE_NATIVE_JSON_LOG default true. Fix unexpected deleting due to TTL mechanism bug for H2, MySQL, TiDB and PostgreSQL. Add a GraphQL query to get OAP version, display OAP version in startup message and error logs. Fix TimeBucket missing in H2, MySQL, TiDB and PostgreSQL bug, which causes TTL doesn\u0026rsquo;t work for service_traffic. Fix TimeBucket missing in ElasticSearch and provide compatible storage2Entity for previous versions. Fix ElasticSearch implementation of queryMetricsValues and readLabeledMetricsValues doesn\u0026rsquo;t fill default values when no available data in the ElasticSearch server. Fix config yaml data type conversion bug when meets special character like !. Optimize metrics of minute dimensionality persistence. The value of metrics, which has declaration of the default value and current value equals the default value logically, the whole row wouldn\u0026rsquo;t be pushed into database. Fix max function in OAL doesn\u0026rsquo;t support negative long. Add MicroBench module to make it easier for developers to write JMH test. Upgrade Kubernetes Java client to 14.0.0, supports GCP token refreshing and fixes some bugs. Change SO11Y metric envoy_als_in_count to calculate the ALS message count. Support Istio 1.10.3, 1.11.4, 1.12.0 release.(Tested through e2e) Add filter mechanism in MAL core to filter metrics. Fix concurrency bug in MAL increase-related calculation. Fix a null pointer bug when building SampleFamily. Fix the so11y latency of persistence execution latency not correct in ElasticSearch storage. Add MeterReportService collectBatch method. Add OpenSearch 1.2.0 to test and verify it works. Upgrade grpc-java to 1.42.1 and protoc to 3.17.3 to allow using native Mac osx-aarch_64 artifacts. Fix TopologyQuery.loadEndpointRelation bug. Support using IoTDB as a new storage option. Add customized envoy ALS protocol receiver for satellite transmit batch data. Remove logback dependencies in IoTDB plugin. Fix StorageModuleElasticsearchProvider doesn\u0026rsquo;t watch on trustStorePath. Fix a wrong check about entity if GraphQL at the endpoint relation level.  UI  Optimize endpoint dependency. Show service name by hovering nodes in the sankey chart. Add Apache Kylin logo. Add ClickHouse logo. Optimize the style and add tips for log conditions. Fix the condition for trace table. Optimize profile functions. Implement a reminder to clear cache for dashboard templates. Support +/- hh:mm in TimeZone setting. Optimize global settings. Fix current endpoint for endpoint dependency. Add version in the global settings popup. Optimize Log page style. Avoid some abnormal settings. Fix query condition of events.  Documentation  Enhance documents about the data report and query protocols. Restructure documents about receivers and fetchers.  Remove general receiver and fetcher docs Add more specific menu with docs to help users to find documents easier.   Add a guidance doc about the logic endpoint. Link Satellite as Load Balancer documentation and compatibility with satellite.  All issues and pull requests are here\n","title":"8.9.0","url":"/docs/main/latest/en/changes/changes-8.9.0/"},{"content":"8.9.0 Project  E2E tests immigrate to e2e-v2. Support JDK 16 and 17. Add Docker images for arm64 architecture.  OAP Server  Add component definition for Jackson. Fix that zipkin-receiver plugin is not packaged into dist. Upgrade Armeria to 1.12, upgrade OpenSearch test version to 1.1.0. Add component definition for Apache-Kylin. Enhance get generation mechanism of OAL engine, support map type of source\u0026rsquo;s field. Add tag(Map) into All, Service, ServiceInstance and Endpoint sources. Fix funcParamExpression and literalExpression can\u0026rsquo;t be used in the same aggregation function. Support cast statement in the OAL core engine. Support (str-\u0026gt;long) and (long) for string to long cast statement. Support (str-\u0026gt;int) and (int) for string to int cast statement. Support Long literal number in the OAL core engine. Support literal string as parameter of aggregation function. Add attributeExpression and attributeExpressionSegment in the OAL grammar tree to support map type for the attribute expression. Refactor the OAL compiler context to improve readability. Fix wrong generated codes of hashCode and remoteHashCode methods for numeric fields. Support != null in OAL engine. Add Message Queue Consuming Count metric for MQ consuming service and endpoint. Add Message Queue Avg Consuming Latency metric for MQ consuming service and endpoint. Support -Inf as bucket in the meter system. Fix setting wrong field when combining Events. Support search browser service. Add getProfileTaskLogs to profile query protocol. Set SW_KAFKA_FETCHER_ENABLE_NATIVE_PROTO_LOG, SW_KAFKA_FETCHER_ENABLE_NATIVE_JSON_LOG default true. Fix unexpected deleting due to TTL mechanism bug for H2, MySQL, TiDB and PostgreSQL. Add a GraphQL query to get OAP version, display OAP version in startup message and error logs. Fix TimeBucket missing in H2, MySQL, TiDB and PostgreSQL bug, which causes TTL doesn\u0026rsquo;t work for service_traffic. Fix TimeBucket missing in ElasticSearch and provide compatible storage2Entity for previous versions. Fix ElasticSearch implementation of queryMetricsValues and readLabeledMetricsValues doesn\u0026rsquo;t fill default values when no available data in the ElasticSearch server. Fix config yaml data type conversion bug when meets special character like !. Optimize metrics of minute dimensionality persistence. The value of metrics, which has declaration of the default value and current value equals the default value logically, the whole row wouldn\u0026rsquo;t be pushed into database. Fix max function in OAL doesn\u0026rsquo;t support negative long. Add MicroBench module to make it easier for developers to write JMH test. Upgrade Kubernetes Java client to 14.0.0, supports GCP token refreshing and fixes some bugs. Change SO11Y metric envoy_als_in_count to calculate the ALS message count. Support Istio 1.10.3, 1.11.4, 1.12.0 release.(Tested through e2e) Add filter mechanism in MAL core to filter metrics. Fix concurrency bug in MAL increase-related calculation. Fix a null pointer bug when building SampleFamily. Fix the so11y latency of persistence execution latency not correct in ElasticSearch storage. Add MeterReportService collectBatch method. Add OpenSearch 1.2.0 to test and verify it works. Upgrade grpc-java to 1.42.1 and protoc to 3.17.3 to allow using native Mac osx-aarch_64 artifacts. Fix TopologyQuery.loadEndpointRelation bug. Support using IoTDB as a new storage option. Add customized envoy ALS protocol receiver for satellite transmit batch data. Remove logback dependencies in IoTDB plugin. Fix StorageModuleElasticsearchProvider doesn\u0026rsquo;t watch on trustStorePath. Fix a wrong check about entity if GraphQL at the endpoint relation level.  UI  Optimize endpoint dependency. Show service name by hovering nodes in the sankey chart. Add Apache Kylin logo. Add ClickHouse logo. Optimize the style and add tips for log conditions. Fix the condition for trace table. Optimize profile functions. Implement a reminder to clear cache for dashboard templates. Support +/- hh:mm in TimeZone setting. Optimize global settings. Fix current endpoint for endpoint dependency. Add version in the global settings popup. Optimize Log page style. Avoid some abnormal settings. Fix query condition of events.  Documentation  Enhance documents about the data report and query protocols. Restructure documents about receivers and fetchers.  Remove general receiver and fetcher docs Add more specific menu with docs to help users to find documents easier.   Add a guidance doc about the logic endpoint. Link Satellite as Load Balancer documentation and compatibility with satellite.  All issues and pull requests are here\n","title":"8.9.0","url":"/docs/main/next/en/changes/changes-8.9.0/"},{"content":"8.9.0 Project  E2E tests immigrate to e2e-v2. Support JDK 16 and 17. Add Docker images for arm64 architecture.  OAP Server  Add component definition for Jackson. Fix that zipkin-receiver plugin is not packaged into dist. Upgrade Armeria to 1.12, upgrade OpenSearch test version to 1.1.0. Add component definition for Apache-Kylin. Enhance get generation mechanism of OAL engine, support map type of source\u0026rsquo;s field. Add tag(Map) into All, Service, ServiceInstance and Endpoint sources. Fix funcParamExpression and literalExpression can\u0026rsquo;t be used in the same aggregation function. Support cast statement in the OAL core engine. Support (str-\u0026gt;long) and (long) for string to long cast statement. Support (str-\u0026gt;int) and (int) for string to int cast statement. Support Long literal number in the OAL core engine. Support literal string as parameter of aggregation function. Add attributeExpression and attributeExpressionSegment in the OAL grammar tree to support map type for the attribute expression. Refactor the OAL compiler context to improve readability. Fix wrong generated codes of hashCode and remoteHashCode methods for numeric fields. Support != null in OAL engine. Add Message Queue Consuming Count metric for MQ consuming service and endpoint. Add Message Queue Avg Consuming Latency metric for MQ consuming service and endpoint. Support -Inf as bucket in the meter system. Fix setting wrong field when combining Events. Support search browser service. Add getProfileTaskLogs to profile query protocol. Set SW_KAFKA_FETCHER_ENABLE_NATIVE_PROTO_LOG, SW_KAFKA_FETCHER_ENABLE_NATIVE_JSON_LOG default true. Fix unexpected deleting due to TTL mechanism bug for H2, MySQL, TiDB and PostgreSQL. Add a GraphQL query to get OAP version, display OAP version in startup message and error logs. Fix TimeBucket missing in H2, MySQL, TiDB and PostgreSQL bug, which causes TTL doesn\u0026rsquo;t work for service_traffic. Fix TimeBucket missing in ElasticSearch and provide compatible storage2Entity for previous versions. Fix ElasticSearch implementation of queryMetricsValues and readLabeledMetricsValues doesn\u0026rsquo;t fill default values when no available data in the ElasticSearch server. Fix config yaml data type conversion bug when meets special character like !. Optimize metrics of minute dimensionality persistence. The value of metrics, which has declaration of the default value and current value equals the default value logically, the whole row wouldn\u0026rsquo;t be pushed into database. Fix max function in OAL doesn\u0026rsquo;t support negative long. Add MicroBench module to make it easier for developers to write JMH test. Upgrade Kubernetes Java client to 14.0.0, supports GCP token refreshing and fixes some bugs. Change SO11Y metric envoy_als_in_count to calculate the ALS message count. Support Istio 1.10.3, 1.11.4, 1.12.0 release.(Tested through e2e) Add filter mechanism in MAL core to filter metrics. Fix concurrency bug in MAL increase-related calculation. Fix a null pointer bug when building SampleFamily. Fix the so11y latency of persistence execution latency not correct in ElasticSearch storage. Add MeterReportService collectBatch method. Add OpenSearch 1.2.0 to test and verify it works. Upgrade grpc-java to 1.42.1 and protoc to 3.17.3 to allow using native Mac osx-aarch_64 artifacts. Fix TopologyQuery.loadEndpointRelation bug. Support using IoTDB as a new storage option. Add customized envoy ALS protocol receiver for satellite transmit batch data. Remove logback dependencies in IoTDB plugin. Fix StorageModuleElasticsearchProvider doesn\u0026rsquo;t watch on trustStorePath. Fix a wrong check about entity if GraphQL at the endpoint relation level.  UI  Optimize endpoint dependency. Show service name by hovering nodes in the sankey chart. Add Apache Kylin logo. Add ClickHouse logo. Optimize the style and add tips for log conditions. Fix the condition for trace table. Optimize profile functions. Implement a reminder to clear cache for dashboard templates. Support +/- hh:mm in TimeZone setting. Optimize global settings. Fix current endpoint for endpoint dependency. Add version in the global settings popup. Optimize Log page style. Avoid some abnormal settings. Fix query condition of events.  Documentation  Enhance documents about the data report and query protocols. Restructure documents about receivers and fetchers.  Remove general receiver and fetcher docs Add more specific menu with docs to help users to find documents easier.   Add a guidance doc about the logic endpoint. Link Satellite as Load Balancer documentation and compatibility with satellite.  All issues and pull requests are here\n","title":"8.9.0","url":"/docs/main/v9.1.0/en/changes/changes-8.9.0/"},{"content":"8.9.0 Project  E2E tests immigrate to e2e-v2. Support JDK 16 and 17. Add Docker images for arm64 architecture.  OAP Server  Add component definition for Jackson. Fix that zipkin-receiver plugin is not packaged into dist. Upgrade Armeria to 1.12, upgrade OpenSearch test version to 1.1.0. Add component definition for Apache-Kylin. Enhance get generation mechanism of OAL engine, support map type of source\u0026rsquo;s field. Add tag(Map) into All, Service, ServiceInstance and Endpoint sources. Fix funcParamExpression and literalExpression can\u0026rsquo;t be used in the same aggregation function. Support cast statement in the OAL core engine. Support (str-\u0026gt;long) and (long) for string to long cast statement. Support (str-\u0026gt;int) and (int) for string to int cast statement. Support Long literal number in the OAL core engine. Support literal string as parameter of aggregation function. Add attributeExpression and attributeExpressionSegment in the OAL grammar tree to support map type for the attribute expression. Refactor the OAL compiler context to improve readability. Fix wrong generated codes of hashCode and remoteHashCode methods for numeric fields. Support != null in OAL engine. Add Message Queue Consuming Count metric for MQ consuming service and endpoint. Add Message Queue Avg Consuming Latency metric for MQ consuming service and endpoint. Support -Inf as bucket in the meter system. Fix setting wrong field when combining Events. Support search browser service. Add getProfileTaskLogs to profile query protocol. Set SW_KAFKA_FETCHER_ENABLE_NATIVE_PROTO_LOG, SW_KAFKA_FETCHER_ENABLE_NATIVE_JSON_LOG default true. Fix unexpected deleting due to TTL mechanism bug for H2, MySQL, TiDB and PostgreSQL. Add a GraphQL query to get OAP version, display OAP version in startup message and error logs. Fix TimeBucket missing in H2, MySQL, TiDB and PostgreSQL bug, which causes TTL doesn\u0026rsquo;t work for service_traffic. Fix TimeBucket missing in ElasticSearch and provide compatible storage2Entity for previous versions. Fix ElasticSearch implementation of queryMetricsValues and readLabeledMetricsValues doesn\u0026rsquo;t fill default values when no available data in the ElasticSearch server. Fix config yaml data type conversion bug when meets special character like !. Optimize metrics of minute dimensionality persistence. The value of metrics, which has declaration of the default value and current value equals the default value logically, the whole row wouldn\u0026rsquo;t be pushed into database. Fix max function in OAL doesn\u0026rsquo;t support negative long. Add MicroBench module to make it easier for developers to write JMH test. Upgrade Kubernetes Java client to 14.0.0, supports GCP token refreshing and fixes some bugs. Change SO11Y metric envoy_als_in_count to calculate the ALS message count. Support Istio 1.10.3, 1.11.4, 1.12.0 release.(Tested through e2e) Add filter mechanism in MAL core to filter metrics. Fix concurrency bug in MAL increase-related calculation. Fix a null pointer bug when building SampleFamily. Fix the so11y latency of persistence execution latency not correct in ElasticSearch storage. Add MeterReportService collectBatch method. Add OpenSearch 1.2.0 to test and verify it works. Upgrade grpc-java to 1.42.1 and protoc to 3.17.3 to allow using native Mac osx-aarch_64 artifacts. Fix TopologyQuery.loadEndpointRelation bug. Support using IoTDB as a new storage option. Add customized envoy ALS protocol receiver for satellite transmit batch data. Remove logback dependencies in IoTDB plugin. Fix StorageModuleElasticsearchProvider doesn\u0026rsquo;t watch on trustStorePath. Fix a wrong check about entity if GraphQL at the endpoint relation level.  UI  Optimize endpoint dependency. Show service name by hovering nodes in the sankey chart. Add Apache Kylin logo. Add ClickHouse logo. Optimize the style and add tips for log conditions. Fix the condition for trace table. Optimize profile functions. Implement a reminder to clear cache for dashboard templates. Support +/- hh:mm in TimeZone setting. Optimize global settings. Fix current endpoint for endpoint dependency. Add version in the global settings popup. Optimize Log page style. Avoid some abnormal settings. Fix query condition of events.  Documentation  Enhance documents about the data report and query protocols. Restructure documents about receivers and fetchers.  Remove general receiver and fetcher docs Add more specific menu with docs to help users to find documents easier.   Add a guidance doc about the logic endpoint. Link Satellite as Load Balancer documentation and compatibility with satellite.  All issues and pull requests are here\n","title":"8.9.0","url":"/docs/main/v9.2.0/en/changes/changes-8.9.0/"},{"content":"8.9.0 Project  E2E tests immigrate to e2e-v2. Support JDK 16 and 17. Add Docker images for arm64 architecture.  OAP Server  Add component definition for Jackson. Fix that zipkin-receiver plugin is not packaged into dist. Upgrade Armeria to 1.12, upgrade OpenSearch test version to 1.1.0. Add component definition for Apache-Kylin. Enhance get generation mechanism of OAL engine, support map type of source\u0026rsquo;s field. Add tag(Map) into All, Service, ServiceInstance and Endpoint sources. Fix funcParamExpression and literalExpression can\u0026rsquo;t be used in the same aggregation function. Support cast statement in the OAL core engine. Support (str-\u0026gt;long) and (long) for string to long cast statement. Support (str-\u0026gt;int) and (int) for string to int cast statement. Support Long literal number in the OAL core engine. Support literal string as parameter of aggregation function. Add attributeExpression and attributeExpressionSegment in the OAL grammar tree to support map type for the attribute expression. Refactor the OAL compiler context to improve readability. Fix wrong generated codes of hashCode and remoteHashCode methods for numeric fields. Support != null in OAL engine. Add Message Queue Consuming Count metric for MQ consuming service and endpoint. Add Message Queue Avg Consuming Latency metric for MQ consuming service and endpoint. Support -Inf as bucket in the meter system. Fix setting wrong field when combining Events. Support search browser service. Add getProfileTaskLogs to profile query protocol. Set SW_KAFKA_FETCHER_ENABLE_NATIVE_PROTO_LOG, SW_KAFKA_FETCHER_ENABLE_NATIVE_JSON_LOG default true. Fix unexpected deleting due to TTL mechanism bug for H2, MySQL, TiDB and PostgreSQL. Add a GraphQL query to get OAP version, display OAP version in startup message and error logs. Fix TimeBucket missing in H2, MySQL, TiDB and PostgreSQL bug, which causes TTL doesn\u0026rsquo;t work for service_traffic. Fix TimeBucket missing in ElasticSearch and provide compatible storage2Entity for previous versions. Fix ElasticSearch implementation of queryMetricsValues and readLabeledMetricsValues doesn\u0026rsquo;t fill default values when no available data in the ElasticSearch server. Fix config yaml data type conversion bug when meets special character like !. Optimize metrics of minute dimensionality persistence. The value of metrics, which has declaration of the default value and current value equals the default value logically, the whole row wouldn\u0026rsquo;t be pushed into database. Fix max function in OAL doesn\u0026rsquo;t support negative long. Add MicroBench module to make it easier for developers to write JMH test. Upgrade Kubernetes Java client to 14.0.0, supports GCP token refreshing and fixes some bugs. Change SO11Y metric envoy_als_in_count to calculate the ALS message count. Support Istio 1.10.3, 1.11.4, 1.12.0 release.(Tested through e2e) Add filter mechanism in MAL core to filter metrics. Fix concurrency bug in MAL increase-related calculation. Fix a null pointer bug when building SampleFamily. Fix the so11y latency of persistence execution latency not correct in ElasticSearch storage. Add MeterReportService collectBatch method. Add OpenSearch 1.2.0 to test and verify it works. Upgrade grpc-java to 1.42.1 and protoc to 3.17.3 to allow using native Mac osx-aarch_64 artifacts. Fix TopologyQuery.loadEndpointRelation bug. Support using IoTDB as a new storage option. Add customized envoy ALS protocol receiver for satellite transmit batch data. Remove logback dependencies in IoTDB plugin. Fix StorageModuleElasticsearchProvider doesn\u0026rsquo;t watch on trustStorePath. Fix a wrong check about entity if GraphQL at the endpoint relation level.  UI  Optimize endpoint dependency. Show service name by hovering nodes in the sankey chart. Add Apache Kylin logo. Add ClickHouse logo. Optimize the style and add tips for log conditions. Fix the condition for trace table. Optimize profile functions. Implement a reminder to clear cache for dashboard templates. Support +/- hh:mm in TimeZone setting. Optimize global settings. Fix current endpoint for endpoint dependency. Add version in the global settings popup. Optimize Log page style. Avoid some abnormal settings. Fix query condition of events.  Documentation  Enhance documents about the data report and query protocols. Restructure documents about receivers and fetchers.  Remove general receiver and fetcher docs Add more specific menu with docs to help users to find documents easier.   Add a guidance doc about the logic endpoint. Link Satellite as Load Balancer documentation and compatibility with satellite.  All issues and pull requests are here\n","title":"8.9.0","url":"/docs/main/v9.3.0/en/changes/changes-8.9.0/"},{"content":"8.9.0 Project  E2E tests immigrate to e2e-v2. Support JDK 16 and 17. Add Docker images for arm64 architecture.  OAP Server  Add component definition for Jackson. Fix that zipkin-receiver plugin is not packaged into dist. Upgrade Armeria to 1.12, upgrade OpenSearch test version to 1.1.0. Add component definition for Apache-Kylin. Enhance get generation mechanism of OAL engine, support map type of source\u0026rsquo;s field. Add tag(Map) into All, Service, ServiceInstance and Endpoint sources. Fix funcParamExpression and literalExpression can\u0026rsquo;t be used in the same aggregation function. Support cast statement in the OAL core engine. Support (str-\u0026gt;long) and (long) for string to long cast statement. Support (str-\u0026gt;int) and (int) for string to int cast statement. Support Long literal number in the OAL core engine. Support literal string as parameter of aggregation function. Add attributeExpression and attributeExpressionSegment in the OAL grammar tree to support map type for the attribute expression. Refactor the OAL compiler context to improve readability. Fix wrong generated codes of hashCode and remoteHashCode methods for numeric fields. Support != null in OAL engine. Add Message Queue Consuming Count metric for MQ consuming service and endpoint. Add Message Queue Avg Consuming Latency metric for MQ consuming service and endpoint. Support -Inf as bucket in the meter system. Fix setting wrong field when combining Events. Support search browser service. Add getProfileTaskLogs to profile query protocol. Set SW_KAFKA_FETCHER_ENABLE_NATIVE_PROTO_LOG, SW_KAFKA_FETCHER_ENABLE_NATIVE_JSON_LOG default true. Fix unexpected deleting due to TTL mechanism bug for H2, MySQL, TiDB and PostgreSQL. Add a GraphQL query to get OAP version, display OAP version in startup message and error logs. Fix TimeBucket missing in H2, MySQL, TiDB and PostgreSQL bug, which causes TTL doesn\u0026rsquo;t work for service_traffic. Fix TimeBucket missing in ElasticSearch and provide compatible storage2Entity for previous versions. Fix ElasticSearch implementation of queryMetricsValues and readLabeledMetricsValues doesn\u0026rsquo;t fill default values when no available data in the ElasticSearch server. Fix config yaml data type conversion bug when meets special character like !. Optimize metrics of minute dimensionality persistence. The value of metrics, which has declaration of the default value and current value equals the default value logically, the whole row wouldn\u0026rsquo;t be pushed into database. Fix max function in OAL doesn\u0026rsquo;t support negative long. Add MicroBench module to make it easier for developers to write JMH test. Upgrade Kubernetes Java client to 14.0.0, supports GCP token refreshing and fixes some bugs. Change SO11Y metric envoy_als_in_count to calculate the ALS message count. Support Istio 1.10.3, 1.11.4, 1.12.0 release.(Tested through e2e) Add filter mechanism in MAL core to filter metrics. Fix concurrency bug in MAL increase-related calculation. Fix a null pointer bug when building SampleFamily. Fix the so11y latency of persistence execution latency not correct in ElasticSearch storage. Add MeterReportService collectBatch method. Add OpenSearch 1.2.0 to test and verify it works. Upgrade grpc-java to 1.42.1 and protoc to 3.17.3 to allow using native Mac osx-aarch_64 artifacts. Fix TopologyQuery.loadEndpointRelation bug. Support using IoTDB as a new storage option. Add customized envoy ALS protocol receiver for satellite transmit batch data. Remove logback dependencies in IoTDB plugin. Fix StorageModuleElasticsearchProvider doesn\u0026rsquo;t watch on trustStorePath. Fix a wrong check about entity if GraphQL at the endpoint relation level.  UI  Optimize endpoint dependency. Show service name by hovering nodes in the sankey chart. Add Apache Kylin logo. Add ClickHouse logo. Optimize the style and add tips for log conditions. Fix the condition for trace table. Optimize profile functions. Implement a reminder to clear cache for dashboard templates. Support +/- hh:mm in TimeZone setting. Optimize global settings. Fix current endpoint for endpoint dependency. Add version in the global settings popup. Optimize Log page style. Avoid some abnormal settings. Fix query condition of events.  Documentation  Enhance documents about the data report and query protocols. Restructure documents about receivers and fetchers.  Remove general receiver and fetcher docs Add more specific menu with docs to help users to find documents easier.   Add a guidance doc about the logic endpoint. Link Satellite as Load Balancer documentation and compatibility with satellite.  All issues and pull requests are here\n","title":"8.9.0","url":"/docs/main/v9.4.0/en/changes/changes-8.9.0/"},{"content":"8.9.0 Project  E2E tests immigrate to e2e-v2. Support JDK 16 and 17. Add Docker images for arm64 architecture.  OAP Server  Add component definition for Jackson. Fix that zipkin-receiver plugin is not packaged into dist. Upgrade Armeria to 1.12, upgrade OpenSearch test version to 1.1.0. Add component definition for Apache-Kylin. Enhance get generation mechanism of OAL engine, support map type of source\u0026rsquo;s field. Add tag(Map) into All, Service, ServiceInstance and Endpoint sources. Fix funcParamExpression and literalExpression can\u0026rsquo;t be used in the same aggregation function. Support cast statement in the OAL core engine. Support (str-\u0026gt;long) and (long) for string to long cast statement. Support (str-\u0026gt;int) and (int) for string to int cast statement. Support Long literal number in the OAL core engine. Support literal string as parameter of aggregation function. Add attributeExpression and attributeExpressionSegment in the OAL grammar tree to support map type for the attribute expression. Refactor the OAL compiler context to improve readability. Fix wrong generated codes of hashCode and remoteHashCode methods for numeric fields. Support != null in OAL engine. Add Message Queue Consuming Count metric for MQ consuming service and endpoint. Add Message Queue Avg Consuming Latency metric for MQ consuming service and endpoint. Support -Inf as bucket in the meter system. Fix setting wrong field when combining Events. Support search browser service. Add getProfileTaskLogs to profile query protocol. Set SW_KAFKA_FETCHER_ENABLE_NATIVE_PROTO_LOG, SW_KAFKA_FETCHER_ENABLE_NATIVE_JSON_LOG default true. Fix unexpected deleting due to TTL mechanism bug for H2, MySQL, TiDB and PostgreSQL. Add a GraphQL query to get OAP version, display OAP version in startup message and error logs. Fix TimeBucket missing in H2, MySQL, TiDB and PostgreSQL bug, which causes TTL doesn\u0026rsquo;t work for service_traffic. Fix TimeBucket missing in ElasticSearch and provide compatible storage2Entity for previous versions. Fix ElasticSearch implementation of queryMetricsValues and readLabeledMetricsValues doesn\u0026rsquo;t fill default values when no available data in the ElasticSearch server. Fix config yaml data type conversion bug when meets special character like !. Optimize metrics of minute dimensionality persistence. The value of metrics, which has declaration of the default value and current value equals the default value logically, the whole row wouldn\u0026rsquo;t be pushed into database. Fix max function in OAL doesn\u0026rsquo;t support negative long. Add MicroBench module to make it easier for developers to write JMH test. Upgrade Kubernetes Java client to 14.0.0, supports GCP token refreshing and fixes some bugs. Change SO11Y metric envoy_als_in_count to calculate the ALS message count. Support Istio 1.10.3, 1.11.4, 1.12.0 release.(Tested through e2e) Add filter mechanism in MAL core to filter metrics. Fix concurrency bug in MAL increase-related calculation. Fix a null pointer bug when building SampleFamily. Fix the so11y latency of persistence execution latency not correct in ElasticSearch storage. Add MeterReportService collectBatch method. Add OpenSearch 1.2.0 to test and verify it works. Upgrade grpc-java to 1.42.1 and protoc to 3.17.3 to allow using native Mac osx-aarch_64 artifacts. Fix TopologyQuery.loadEndpointRelation bug. Support using IoTDB as a new storage option. Add customized envoy ALS protocol receiver for satellite transmit batch data. Remove logback dependencies in IoTDB plugin. Fix StorageModuleElasticsearchProvider doesn\u0026rsquo;t watch on trustStorePath. Fix a wrong check about entity if GraphQL at the endpoint relation level.  UI  Optimize endpoint dependency. Show service name by hovering nodes in the sankey chart. Add Apache Kylin logo. Add ClickHouse logo. Optimize the style and add tips for log conditions. Fix the condition for trace table. Optimize profile functions. Implement a reminder to clear cache for dashboard templates. Support +/- hh:mm in TimeZone setting. Optimize global settings. Fix current endpoint for endpoint dependency. Add version in the global settings popup. Optimize Log page style. Avoid some abnormal settings. Fix query condition of events.  Documentation  Enhance documents about the data report and query protocols. Restructure documents about receivers and fetchers.  Remove general receiver and fetcher docs Add more specific menu with docs to help users to find documents easier.   Add a guidance doc about the logic endpoint. Link Satellite as Load Balancer documentation and compatibility with satellite.  All issues and pull requests are here\n","title":"8.9.0","url":"/docs/main/v9.5.0/en/changes/changes-8.9.0/"},{"content":"8.9.0 Project  E2E tests immigrate to e2e-v2. Support JDK 16 and 17. Add Docker images for arm64 architecture.  OAP Server  Add component definition for Jackson. Fix that zipkin-receiver plugin is not packaged into dist. Upgrade Armeria to 1.12, upgrade OpenSearch test version to 1.1.0. Add component definition for Apache-Kylin. Enhance get generation mechanism of OAL engine, support map type of source\u0026rsquo;s field. Add tag(Map) into All, Service, ServiceInstance and Endpoint sources. Fix funcParamExpression and literalExpression can\u0026rsquo;t be used in the same aggregation function. Support cast statement in the OAL core engine. Support (str-\u0026gt;long) and (long) for string to long cast statement. Support (str-\u0026gt;int) and (int) for string to int cast statement. Support Long literal number in the OAL core engine. Support literal string as parameter of aggregation function. Add attributeExpression and attributeExpressionSegment in the OAL grammar tree to support map type for the attribute expression. Refactor the OAL compiler context to improve readability. Fix wrong generated codes of hashCode and remoteHashCode methods for numeric fields. Support != null in OAL engine. Add Message Queue Consuming Count metric for MQ consuming service and endpoint. Add Message Queue Avg Consuming Latency metric for MQ consuming service and endpoint. Support -Inf as bucket in the meter system. Fix setting wrong field when combining Events. Support search browser service. Add getProfileTaskLogs to profile query protocol. Set SW_KAFKA_FETCHER_ENABLE_NATIVE_PROTO_LOG, SW_KAFKA_FETCHER_ENABLE_NATIVE_JSON_LOG default true. Fix unexpected deleting due to TTL mechanism bug for H2, MySQL, TiDB and PostgreSQL. Add a GraphQL query to get OAP version, display OAP version in startup message and error logs. Fix TimeBucket missing in H2, MySQL, TiDB and PostgreSQL bug, which causes TTL doesn\u0026rsquo;t work for service_traffic. Fix TimeBucket missing in ElasticSearch and provide compatible storage2Entity for previous versions. Fix ElasticSearch implementation of queryMetricsValues and readLabeledMetricsValues doesn\u0026rsquo;t fill default values when no available data in the ElasticSearch server. Fix config yaml data type conversion bug when meets special character like !. Optimize metrics of minute dimensionality persistence. The value of metrics, which has declaration of the default value and current value equals the default value logically, the whole row wouldn\u0026rsquo;t be pushed into database. Fix max function in OAL doesn\u0026rsquo;t support negative long. Add MicroBench module to make it easier for developers to write JMH test. Upgrade Kubernetes Java client to 14.0.0, supports GCP token refreshing and fixes some bugs. Change SO11Y metric envoy_als_in_count to calculate the ALS message count. Support Istio 1.10.3, 1.11.4, 1.12.0 release.(Tested through e2e) Add filter mechanism in MAL core to filter metrics. Fix concurrency bug in MAL increase-related calculation. Fix a null pointer bug when building SampleFamily. Fix the so11y latency of persistence execution latency not correct in ElasticSearch storage. Add MeterReportService collectBatch method. Add OpenSearch 1.2.0 to test and verify it works. Upgrade grpc-java to 1.42.1 and protoc to 3.17.3 to allow using native Mac osx-aarch_64 artifacts. Fix TopologyQuery.loadEndpointRelation bug. Support using IoTDB as a new storage option. Add customized envoy ALS protocol receiver for satellite transmit batch data. Remove logback dependencies in IoTDB plugin. Fix StorageModuleElasticsearchProvider doesn\u0026rsquo;t watch on trustStorePath. Fix a wrong check about entity if GraphQL at the endpoint relation level.  UI  Optimize endpoint dependency. Show service name by hovering nodes in the sankey chart. Add Apache Kylin logo. Add ClickHouse logo. Optimize the style and add tips for log conditions. Fix the condition for trace table. Optimize profile functions. Implement a reminder to clear cache for dashboard templates. Support +/- hh:mm in TimeZone setting. Optimize global settings. Fix current endpoint for endpoint dependency. Add version in the global settings popup. Optimize Log page style. Avoid some abnormal settings. Fix query condition of events.  Documentation  Enhance documents about the data report and query protocols. Restructure documents about receivers and fetchers.  Remove general receiver and fetcher docs Add more specific menu with docs to help users to find documents easier.   Add a guidance doc about the logic endpoint. Link Satellite as Load Balancer documentation and compatibility with satellite.  All issues and pull requests are here\n","title":"8.9.0","url":"/docs/main/v9.6.0/en/changes/changes-8.9.0/"},{"content":"8.9.0 Project  E2E tests immigrate to e2e-v2. Support JDK 16 and 17. Add Docker images for arm64 architecture.  OAP Server  Add component definition for Jackson. Fix that zipkin-receiver plugin is not packaged into dist. Upgrade Armeria to 1.12, upgrade OpenSearch test version to 1.1.0. Add component definition for Apache-Kylin. Enhance get generation mechanism of OAL engine, support map type of source\u0026rsquo;s field. Add tag(Map) into All, Service, ServiceInstance and Endpoint sources. Fix funcParamExpression and literalExpression can\u0026rsquo;t be used in the same aggregation function. Support cast statement in the OAL core engine. Support (str-\u0026gt;long) and (long) for string to long cast statement. Support (str-\u0026gt;int) and (int) for string to int cast statement. Support Long literal number in the OAL core engine. Support literal string as parameter of aggregation function. Add attributeExpression and attributeExpressionSegment in the OAL grammar tree to support map type for the attribute expression. Refactor the OAL compiler context to improve readability. Fix wrong generated codes of hashCode and remoteHashCode methods for numeric fields. Support != null in OAL engine. Add Message Queue Consuming Count metric for MQ consuming service and endpoint. Add Message Queue Avg Consuming Latency metric for MQ consuming service and endpoint. Support -Inf as bucket in the meter system. Fix setting wrong field when combining Events. Support search browser service. Add getProfileTaskLogs to profile query protocol. Set SW_KAFKA_FETCHER_ENABLE_NATIVE_PROTO_LOG, SW_KAFKA_FETCHER_ENABLE_NATIVE_JSON_LOG default true. Fix unexpected deleting due to TTL mechanism bug for H2, MySQL, TiDB and PostgreSQL. Add a GraphQL query to get OAP version, display OAP version in startup message and error logs. Fix TimeBucket missing in H2, MySQL, TiDB and PostgreSQL bug, which causes TTL doesn\u0026rsquo;t work for service_traffic. Fix TimeBucket missing in ElasticSearch and provide compatible storage2Entity for previous versions. Fix ElasticSearch implementation of queryMetricsValues and readLabeledMetricsValues doesn\u0026rsquo;t fill default values when no available data in the ElasticSearch server. Fix config yaml data type conversion bug when meets special character like !. Optimize metrics of minute dimensionality persistence. The value of metrics, which has declaration of the default value and current value equals the default value logically, the whole row wouldn\u0026rsquo;t be pushed into database. Fix max function in OAL doesn\u0026rsquo;t support negative long. Add MicroBench module to make it easier for developers to write JMH test. Upgrade Kubernetes Java client to 14.0.0, supports GCP token refreshing and fixes some bugs. Change SO11Y metric envoy_als_in_count to calculate the ALS message count. Support Istio 1.10.3, 1.11.4, 1.12.0 release.(Tested through e2e) Add filter mechanism in MAL core to filter metrics. Fix concurrency bug in MAL increase-related calculation. Fix a null pointer bug when building SampleFamily. Fix the so11y latency of persistence execution latency not correct in ElasticSearch storage. Add MeterReportService collectBatch method. Add OpenSearch 1.2.0 to test and verify it works. Upgrade grpc-java to 1.42.1 and protoc to 3.17.3 to allow using native Mac osx-aarch_64 artifacts. Fix TopologyQuery.loadEndpointRelation bug. Support using IoTDB as a new storage option. Add customized envoy ALS protocol receiver for satellite transmit batch data. Remove logback dependencies in IoTDB plugin. Fix StorageModuleElasticsearchProvider doesn\u0026rsquo;t watch on trustStorePath. Fix a wrong check about entity if GraphQL at the endpoint relation level.  UI  Optimize endpoint dependency. Show service name by hovering nodes in the sankey chart. Add Apache Kylin logo. Add ClickHouse logo. Optimize the style and add tips for log conditions. Fix the condition for trace table. Optimize profile functions. Implement a reminder to clear cache for dashboard templates. Support +/- hh:mm in TimeZone setting. Optimize global settings. Fix current endpoint for endpoint dependency. Add version in the global settings popup. Optimize Log page style. Avoid some abnormal settings. Fix query condition of events.  Documentation  Enhance documents about the data report and query protocols. Restructure documents about receivers and fetchers.  Remove general receiver and fetcher docs Add more specific menu with docs to help users to find documents easier.   Add a guidance doc about the logic endpoint. Link Satellite as Load Balancer documentation and compatibility with satellite.  All issues and pull requests are here\n","title":"8.9.0","url":"/docs/main/v9.7.0/en/changes/changes-8.9.0/"},{"content":"8.9.1 Project  Upgrade log4j2 to 2.15.0 for CVE-2021-44228  ","title":"8.9.1","url":"/docs/main/latest/en/changes/changes-8.9.1/"},{"content":"8.9.1 Project  Upgrade log4j2 to 2.15.0 for CVE-2021-44228  ","title":"8.9.1","url":"/docs/main/next/en/changes/changes-8.9.1/"},{"content":"8.9.1 Project  Upgrade log4j2 to 2.15.0 for CVE-2021-44228  ","title":"8.9.1","url":"/docs/main/v9.1.0/en/changes/changes-8.9.1/"},{"content":"8.9.1 Project  Upgrade log4j2 to 2.15.0 for CVE-2021-44228  ","title":"8.9.1","url":"/docs/main/v9.2.0/en/changes/changes-8.9.1/"},{"content":"8.9.1 Project  Upgrade log4j2 to 2.15.0 for CVE-2021-44228  ","title":"8.9.1","url":"/docs/main/v9.3.0/en/changes/changes-8.9.1/"},{"content":"8.9.1 Project  Upgrade log4j2 to 2.15.0 for CVE-2021-44228  ","title":"8.9.1","url":"/docs/main/v9.4.0/en/changes/changes-8.9.1/"},{"content":"8.9.1 Project  Upgrade log4j2 to 2.15.0 for CVE-2021-44228  ","title":"8.9.1","url":"/docs/main/v9.5.0/en/changes/changes-8.9.1/"},{"content":"8.9.1 Project  Upgrade log4j2 to 2.15.0 for CVE-2021-44228  ","title":"8.9.1","url":"/docs/main/v9.6.0/en/changes/changes-8.9.1/"},{"content":"8.9.1 Project  Upgrade log4j2 to 2.15.0 for CVE-2021-44228  ","title":"8.9.1","url":"/docs/main/v9.7.0/en/changes/changes-8.9.1/"},{"content":"9.0.0 Project  Upgrade log4j2 to 2.17.1 for CVE-2021-44228, CVE-2021-45046, CVE-2021-45105 and CVE-2021-44832. This CVE only effects on JDK if JNDI is opened in default. Notice, using JVM option -Dlog4j2.formatMsgNoLookups=true or setting the LOG4J_FORMAT_MSG_NO_LOOKUPS=”true” environment variable also avoids CVEs. Upgrade maven-wrapper to 3.1.0, maven to 3.8.4 for performance improvements and ARM more native support. Exclude unnecessary libs when building under JDK 9+. Migrate base Docker image to eclipse-temurin as adoptopenjdk is deprecated. Add E2E test under Java 17. Upgrade protoc to 3.19.2. Add Istio 1.13.1 to E2E test matrix for verification. Upgrade Apache parent pom version to 25. Use the plugin version defined by the Apache maven parent.  Upgrade maven-dependency-plugin to 3.2.0. Upgrade maven-assembly-plugin to 3.3.0. Upgrade maven-failsafe-plugin to 2.22.2. Upgrade maven-surefire-plugin to 2.22.2. Upgrade maven-jar-plugin to 3.2.2. Upgrade maven-enforcer-plugin to 3.0.0. Upgrade maven-compiler-plugin to 3.10.0. Upgrade maven-resources-plugin to 3.2.0. Upgrade maven-source-plugin to 3.2.1.   Update codeStyle.xml to fix incompatibility on M1\u0026rsquo;s IntelliJ IDEA 2021.3.2. Update frontend-maven-plugin to 1.12 and npm to 16.14.0 for booster UI build. Improve CI with the GHA new feature \u0026ldquo;run failed jobs\u0026rdquo;. Fix ./mvnw compile not work if ./mvnw install is not executed at least once. Add JD_PRESERVE_LINE_FEEDS=true in official code style file. Upgrade OAP dependencies gson(2.9.0), guava(31.1), jackson(2.13.2), protobuf-java(3.18.4), commons-io(2.7), postgresql(42.3.3). Remove commons-pool and commons-dbcp from OAP dependencies(Not used before). Upgrade webapp dependencies gson(2.9.0), spring boot(2.6.6), jackson(2.13.2.2), spring cloud(2021.0.1), Apache httpclient(4.5.13).  OAP Server  Fix potential NPE in OAL string match and a bug when right-hand-side variable includes double quotes. Bump up Armeria version to 1.14.1 to fix CVE. Polish ETCD cluster config environment variables. Add the analysis of metrics in Satellite MetricsService. Fix Can't split endpoint id into 2 parts bug for endpoint ID. In the TCP in service mesh observability, endpoint name doesn\u0026rsquo;t exist in TCP traffic. Upgrade H2 version to 2.0.206 to fix CVE-2021-23463 and GHSA-h376-j262-vhq6. Extend column name override mechanism working for ValueColumnMetadata. Introduce new concept Layer and removed NodeType. More details refer to v9-version-upgrade. Fix query sort metrics failure in H2 Storage. Bump up grpc to 1.43.2 and protobuf to 3.19.2 to fix CVE-2021-22569. Add source layer and dest layer to relation. Follow protocol grammar fix GCPhrase -\u0026gt; GCPhase. Set layer to mesh relation. Add FAAS to SpanLayer. Adjust e2e case for V9 core. Support ZGC GC time and count metric collecting. Sync proto buffers files from upstream Envoy (Related to https://github.com/envoyproxy/envoy/pull/18955). Bump up GraphQL related dependencies to latest versions. Add normal to V9 service meta query. Support scope=ALL catalog for metrics. Bump up H2 to 2.1.210 to fix CVE-2022-23221. E2E: Add normal field to Service. Add FreeSql component ID(3017) of dotnet agent. E2E: verify OAP cluster model data aggregation. Fix SelfRemoteClient self observing metrics. Add env variables SW_CLUSTER_INTERNAL_COM_HOST and SW_CLUSTER_INTERNAL_COM_PORT for cluster selectors zookeeper ,consul,etcd and nacos. Doc update: configuration-vocabulary,backend-cluster about env variables SW_CLUSTER_INTERNAL_COM_HOST and SW_CLUSTER_INTERNAL_COM_PORT. Add Python MysqlClient component ID(7013) with mapping information. Support Java thread pool metrics analysis. Fix IoTDB Storage Option insert null index value. Set the default value of SW_STORAGE_IOTDB_SESSIONPOOL_SIZE to 8. Bump up iotdb-session to 0.12.4. Bump up PostgreSQL driver to fix CVE. Add Guava EventBus component ID(123) of Java agent. Add OpenFunction component ID(5013). Expose configuration responseTimeout of ES client. Support datasource metric analysis. [Breaking Change] Keep the endpoint avg resp time meter name the same with others scope. (This may break 3rd party integration and existing alarm rule settings) Add Python FastAPI component ID(7014). Support all metrics from MAL engine in alarm core, including Prometheus, OC receiver, meter receiver. Allow updating non-metrics templates when structure changed. Set default connection timeout of ElasticSearch to 3000 milliseconds. Support ElasticSearch 8 and add it into E2E tests. Disable indexing for field alarm_record.tags_raw_data of binary type in ElasticSearch storage. Fix Zipkin receiver wrong condition for decoding gzip. Add a new sampler (possibility) in LAL. Unify module name receiver_zipkin to receiver-zipkin, remove receiver_jaeger from application.yaml. Introduce the entity of Process type. Set the length of event#parameters to 2000. Limit the length of Event#parameters. Support large service/instance/networkAddressAlias list query by using ElasticSearch scrolling API, add metadataQueryBatchSize to configure scrolling page size. Change default value of metadataQueryMaxSize from 5000 to 10000 Replace deprecated Armeria API BasicToken.of with AuthToken.ofBasic. Implement v9 UI template management protocol. Implement process metadata query protocol. Expose more ElasticSearch health check related logs to help to diagnose Health check fails. reason: No healthy endpoint. Add source event generated metrics to SERVICE_CATALOG_NAME catalog. [Breaking Change] Deprecate All from OAL source. [Breaking Change] Remove SRC_ALL: 'All' from OAL grammar tree. Remove all_heatmap and all_percentile metrics. Fix ElasticSearch normal index couldn\u0026rsquo;t apply mapping and update. Enhance DataCarrier#MultipleChannelsConsumer to add priority for the channels, which makes OAP server has a better performance to activate all analyzers on default. Activate receiver-otel#enabledOcRules receiver with k8s-node,oap,vm rules on default. Activate satellite,spring-sleuth for agent-analyzer#meterAnalyzerActiveFiles on default. Activate receiver-zabbix receiver with agent rule on default. Replace HTTP server (GraphQL, agent HTTP protocol) from Jetty with Armeria. [Breaking Change] Remove configuration restAcceptorPriorityDelta (env var: SW_RECEIVER_SHARING_JETTY_DELTA , SW_CORE_REST_JETTY_DELTA). [Breaking Change] Remove configuration graphql/path (env var: SW_QUERY_GRAPHQL_PATH). Add storage column attribute indexOnly, support ElasticSearch only index and not store some fields. Add indexOnly=true to SegmentRecord.tags, AlarmRecord.tags, AbstractLogRecord.tags, to reduce unnecessary storage. [Breaking Change] Remove configuration restMinThreads (env var: SW_CORE_REST_JETTY_MIN_THREADS , SW_RECEIVER_SHARING_JETTY_MIN_THREADS). Refactor the core Builder mechanism, new storage plugin could implement their own converter and get rid of hard requirement of using HashMap to communicate between data object and database native structure. [Breaking Change] Break all existing 3rd-party storage extensions. Remove hard requirement of BASE64 encoding for binary field. Add complexity limitation for GraphQL query to avoid malicious query. Add Column.shardingKeyIdx for column definition for BanyanDB.  Sharding key is used to group time series data per metric of one entity in one place (same sharding and/or same row for column-oriented database). For example, ServiceA's traffic gauge, service call per minute, includes following timestamp values, then it should be sharded by service ID [ServiceA(encoded ID): 01-28 18:30 values-1, 01-28 18:31 values-2, 01-28 18:32 values-3, 01-28 18:32 values-4] BanyanDB is the 1st storage implementation supporting this. It would make continuous time series metrics stored closely and compressed better. NOTICE, this sharding concept is NOT just for splitting data into different database instances or physical files.  Support ElasticSearch template mappings properties parameters and _source update. Implement the eBPF profiling query and data collect protocol. [Breaking Change] Remove Deprecated responseCode from sources, including Service, ServiceInstance, Endpoint Enhance endpoint dependency analysis to support cross threads cases. Refactor span analysis code structures. Remove isNotNormal service requirement when use alias to merge service topology from client side. All RPCs' peer services from client side are always normal services. This cause the topology is not merged correctly. Fix event type of export data is incorrect, it was EventType.TOTAL always. Reduce redundancy ThreadLocal in MAL core. Improve MAL performance. Trim tag\u0026rsquo;s key and value in log query. Refactor IoTDB storage plugin, add IoTDBDataConverter and fix ModifyCollectionInEnhancedForLoop bug. Bump up iotdb-session to 0.12.5. Fix the configuration of Aggregation and GC Count metrics for oap self observability E2E: Add verify OAP eBPF Profiling. Let multiGet could query without tag value in the InfluxDB storage plugin. Adjust MAL for V9, remove some groups, add a new Service function for the custom delimiter. Add service catalog DatabaseSlowStatement. Add Error Prone Annotations dependency to suppress warnings, which are not errors.  UI  [Breaking Change] Introduce Booster UI, remove RocketBot UI. [Breaking Change] UI Templates have been redesigned totally. GraphQL query is minimal compatible for metadata and metrics query. Remove unused jars (log4j-api.jar) in classpath. Bump up netty version to fix CVE. Add Database Connection pool metric. Re-implement UI template initialization for Booster UI. Add environment variable SW_ENABLE_UPDATE_UI_TEMPLATE to control user edit UI template. Add the Self Observability template of the SkyWalking Satellite. Add the template of OpenFunction observability.  Documentation  Reconstruction doc menu for v9. Update backend-alarm.md doc, support op \u0026ldquo;=\u0026rdquo; to \u0026ldquo;==\u0026rdquo;. Update backend-meter.md doc . Add \u0026lt;STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System\u0026gt; paper. Add Academy menu for recommending articles. Remove All source relative document and examples. Update Booster UI\u0026rsquo;s dependency licenses. Add profiling doc, and remove service mesh intro doc(not necessary). Add a doc for virtual database. Rewrite UI introduction. Update k8s-monitoring, backend-telemetry and v9-version-upgrade doc for v9.  All issues and pull requests are here\n","title":"9.0.0","url":"/docs/main/latest/en/changes/changes-9.0.0/"},{"content":"9.0.0 Project  Upgrade log4j2 to 2.17.1 for CVE-2021-44228, CVE-2021-45046, CVE-2021-45105 and CVE-2021-44832. This CVE only effects on JDK if JNDI is opened in default. Notice, using JVM option -Dlog4j2.formatMsgNoLookups=true or setting the LOG4J_FORMAT_MSG_NO_LOOKUPS=”true” environment variable also avoids CVEs. Upgrade maven-wrapper to 3.1.0, maven to 3.8.4 for performance improvements and ARM more native support. Exclude unnecessary libs when building under JDK 9+. Migrate base Docker image to eclipse-temurin as adoptopenjdk is deprecated. Add E2E test under Java 17. Upgrade protoc to 3.19.2. Add Istio 1.13.1 to E2E test matrix for verification. Upgrade Apache parent pom version to 25. Use the plugin version defined by the Apache maven parent.  Upgrade maven-dependency-plugin to 3.2.0. Upgrade maven-assembly-plugin to 3.3.0. Upgrade maven-failsafe-plugin to 2.22.2. Upgrade maven-surefire-plugin to 2.22.2. Upgrade maven-jar-plugin to 3.2.2. Upgrade maven-enforcer-plugin to 3.0.0. Upgrade maven-compiler-plugin to 3.10.0. Upgrade maven-resources-plugin to 3.2.0. Upgrade maven-source-plugin to 3.2.1.   Update codeStyle.xml to fix incompatibility on M1\u0026rsquo;s IntelliJ IDEA 2021.3.2. Update frontend-maven-plugin to 1.12 and npm to 16.14.0 for booster UI build. Improve CI with the GHA new feature \u0026ldquo;run failed jobs\u0026rdquo;. Fix ./mvnw compile not work if ./mvnw install is not executed at least once. Add JD_PRESERVE_LINE_FEEDS=true in official code style file. Upgrade OAP dependencies gson(2.9.0), guava(31.1), jackson(2.13.2), protobuf-java(3.18.4), commons-io(2.7), postgresql(42.3.3). Remove commons-pool and commons-dbcp from OAP dependencies(Not used before). Upgrade webapp dependencies gson(2.9.0), spring boot(2.6.6), jackson(2.13.2.2), spring cloud(2021.0.1), Apache httpclient(4.5.13).  OAP Server  Fix potential NPE in OAL string match and a bug when right-hand-side variable includes double quotes. Bump up Armeria version to 1.14.1 to fix CVE. Polish ETCD cluster config environment variables. Add the analysis of metrics in Satellite MetricsService. Fix Can't split endpoint id into 2 parts bug for endpoint ID. In the TCP in service mesh observability, endpoint name doesn\u0026rsquo;t exist in TCP traffic. Upgrade H2 version to 2.0.206 to fix CVE-2021-23463 and GHSA-h376-j262-vhq6. Extend column name override mechanism working for ValueColumnMetadata. Introduce new concept Layer and removed NodeType. More details refer to v9-version-upgrade. Fix query sort metrics failure in H2 Storage. Bump up grpc to 1.43.2 and protobuf to 3.19.2 to fix CVE-2021-22569. Add source layer and dest layer to relation. Follow protocol grammar fix GCPhrase -\u0026gt; GCPhase. Set layer to mesh relation. Add FAAS to SpanLayer. Adjust e2e case for V9 core. Support ZGC GC time and count metric collecting. Sync proto buffers files from upstream Envoy (Related to https://github.com/envoyproxy/envoy/pull/18955). Bump up GraphQL related dependencies to latest versions. Add normal to V9 service meta query. Support scope=ALL catalog for metrics. Bump up H2 to 2.1.210 to fix CVE-2022-23221. E2E: Add normal field to Service. Add FreeSql component ID(3017) of dotnet agent. E2E: verify OAP cluster model data aggregation. Fix SelfRemoteClient self observing metrics. Add env variables SW_CLUSTER_INTERNAL_COM_HOST and SW_CLUSTER_INTERNAL_COM_PORT for cluster selectors zookeeper ,consul,etcd and nacos. Doc update: configuration-vocabulary,backend-cluster about env variables SW_CLUSTER_INTERNAL_COM_HOST and SW_CLUSTER_INTERNAL_COM_PORT. Add Python MysqlClient component ID(7013) with mapping information. Support Java thread pool metrics analysis. Fix IoTDB Storage Option insert null index value. Set the default value of SW_STORAGE_IOTDB_SESSIONPOOL_SIZE to 8. Bump up iotdb-session to 0.12.4. Bump up PostgreSQL driver to fix CVE. Add Guava EventBus component ID(123) of Java agent. Add OpenFunction component ID(5013). Expose configuration responseTimeout of ES client. Support datasource metric analysis. [Breaking Change] Keep the endpoint avg resp time meter name the same with others scope. (This may break 3rd party integration and existing alarm rule settings) Add Python FastAPI component ID(7014). Support all metrics from MAL engine in alarm core, including Prometheus, OC receiver, meter receiver. Allow updating non-metrics templates when structure changed. Set default connection timeout of ElasticSearch to 3000 milliseconds. Support ElasticSearch 8 and add it into E2E tests. Disable indexing for field alarm_record.tags_raw_data of binary type in ElasticSearch storage. Fix Zipkin receiver wrong condition for decoding gzip. Add a new sampler (possibility) in LAL. Unify module name receiver_zipkin to receiver-zipkin, remove receiver_jaeger from application.yaml. Introduce the entity of Process type. Set the length of event#parameters to 2000. Limit the length of Event#parameters. Support large service/instance/networkAddressAlias list query by using ElasticSearch scrolling API, add metadataQueryBatchSize to configure scrolling page size. Change default value of metadataQueryMaxSize from 5000 to 10000 Replace deprecated Armeria API BasicToken.of with AuthToken.ofBasic. Implement v9 UI template management protocol. Implement process metadata query protocol. Expose more ElasticSearch health check related logs to help to diagnose Health check fails. reason: No healthy endpoint. Add source event generated metrics to SERVICE_CATALOG_NAME catalog. [Breaking Change] Deprecate All from OAL source. [Breaking Change] Remove SRC_ALL: 'All' from OAL grammar tree. Remove all_heatmap and all_percentile metrics. Fix ElasticSearch normal index couldn\u0026rsquo;t apply mapping and update. Enhance DataCarrier#MultipleChannelsConsumer to add priority for the channels, which makes OAP server has a better performance to activate all analyzers on default. Activate receiver-otel#enabledOcRules receiver with k8s-node,oap,vm rules on default. Activate satellite,spring-sleuth for agent-analyzer#meterAnalyzerActiveFiles on default. Activate receiver-zabbix receiver with agent rule on default. Replace HTTP server (GraphQL, agent HTTP protocol) from Jetty with Armeria. [Breaking Change] Remove configuration restAcceptorPriorityDelta (env var: SW_RECEIVER_SHARING_JETTY_DELTA , SW_CORE_REST_JETTY_DELTA). [Breaking Change] Remove configuration graphql/path (env var: SW_QUERY_GRAPHQL_PATH). Add storage column attribute indexOnly, support ElasticSearch only index and not store some fields. Add indexOnly=true to SegmentRecord.tags, AlarmRecord.tags, AbstractLogRecord.tags, to reduce unnecessary storage. [Breaking Change] Remove configuration restMinThreads (env var: SW_CORE_REST_JETTY_MIN_THREADS , SW_RECEIVER_SHARING_JETTY_MIN_THREADS). Refactor the core Builder mechanism, new storage plugin could implement their own converter and get rid of hard requirement of using HashMap to communicate between data object and database native structure. [Breaking Change] Break all existing 3rd-party storage extensions. Remove hard requirement of BASE64 encoding for binary field. Add complexity limitation for GraphQL query to avoid malicious query. Add Column.shardingKeyIdx for column definition for BanyanDB.  Sharding key is used to group time series data per metric of one entity in one place (same sharding and/or same row for column-oriented database). For example, ServiceA's traffic gauge, service call per minute, includes following timestamp values, then it should be sharded by service ID [ServiceA(encoded ID): 01-28 18:30 values-1, 01-28 18:31 values-2, 01-28 18:32 values-3, 01-28 18:32 values-4] BanyanDB is the 1st storage implementation supporting this. It would make continuous time series metrics stored closely and compressed better. NOTICE, this sharding concept is NOT just for splitting data into different database instances or physical files.  Support ElasticSearch template mappings properties parameters and _source update. Implement the eBPF profiling query and data collect protocol. [Breaking Change] Remove Deprecated responseCode from sources, including Service, ServiceInstance, Endpoint Enhance endpoint dependency analysis to support cross threads cases. Refactor span analysis code structures. Remove isNotNormal service requirement when use alias to merge service topology from client side. All RPCs' peer services from client side are always normal services. This cause the topology is not merged correctly. Fix event type of export data is incorrect, it was EventType.TOTAL always. Reduce redundancy ThreadLocal in MAL core. Improve MAL performance. Trim tag\u0026rsquo;s key and value in log query. Refactor IoTDB storage plugin, add IoTDBDataConverter and fix ModifyCollectionInEnhancedForLoop bug. Bump up iotdb-session to 0.12.5. Fix the configuration of Aggregation and GC Count metrics for oap self observability E2E: Add verify OAP eBPF Profiling. Let multiGet could query without tag value in the InfluxDB storage plugin. Adjust MAL for V9, remove some groups, add a new Service function for the custom delimiter. Add service catalog DatabaseSlowStatement. Add Error Prone Annotations dependency to suppress warnings, which are not errors.  UI  [Breaking Change] Introduce Booster UI, remove RocketBot UI. [Breaking Change] UI Templates have been redesigned totally. GraphQL query is minimal compatible for metadata and metrics query. Remove unused jars (log4j-api.jar) in classpath. Bump up netty version to fix CVE. Add Database Connection pool metric. Re-implement UI template initialization for Booster UI. Add environment variable SW_ENABLE_UPDATE_UI_TEMPLATE to control user edit UI template. Add the Self Observability template of the SkyWalking Satellite. Add the template of OpenFunction observability.  Documentation  Reconstruction doc menu for v9. Update backend-alarm.md doc, support op \u0026ldquo;=\u0026rdquo; to \u0026ldquo;==\u0026rdquo;. Update backend-meter.md doc . Add \u0026lt;STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System\u0026gt; paper. Add Academy menu for recommending articles. Remove All source relative document and examples. Update Booster UI\u0026rsquo;s dependency licenses. Add profiling doc, and remove service mesh intro doc(not necessary). Add a doc for virtual database. Rewrite UI introduction. Update k8s-monitoring, backend-telemetry and v9-version-upgrade doc for v9.  All issues and pull requests are here\n","title":"9.0.0","url":"/docs/main/next/en/changes/changes-9.0.0/"},{"content":"9.0.0 Project  Upgrade log4j2 to 2.17.1 for CVE-2021-44228, CVE-2021-45046, CVE-2021-45105 and CVE-2021-44832. This CVE only effects on JDK if JNDI is opened in default. Notice, using JVM option -Dlog4j2.formatMsgNoLookups=true or setting the LOG4J_FORMAT_MSG_NO_LOOKUPS=”true” environment variable also avoids CVEs. Upgrade maven-wrapper to 3.1.0, maven to 3.8.4 for performance improvements and ARM more native support. Exclude unnecessary libs when building under JDK 9+. Migrate base Docker image to eclipse-temurin as adoptopenjdk is deprecated. Add E2E test under Java 17. Upgrade protoc to 3.19.2. Add Istio 1.13.1 to E2E test matrix for verification. Upgrade Apache parent pom version to 25. Use the plugin version defined by the Apache maven parent.  Upgrade maven-dependency-plugin to 3.2.0. Upgrade maven-assembly-plugin to 3.3.0. Upgrade maven-failsafe-plugin to 2.22.2. Upgrade maven-surefire-plugin to 2.22.2. Upgrade maven-jar-plugin to 3.2.2. Upgrade maven-enforcer-plugin to 3.0.0. Upgrade maven-compiler-plugin to 3.10.0. Upgrade maven-resources-plugin to 3.2.0. Upgrade maven-source-plugin to 3.2.1.   Update codeStyle.xml to fix incompatibility on M1\u0026rsquo;s IntelliJ IDEA 2021.3.2. Update frontend-maven-plugin to 1.12 and npm to 16.14.0 for booster UI build. Improve CI with the GHA new feature \u0026ldquo;run failed jobs\u0026rdquo;. Fix ./mvnw compile not work if ./mvnw install is not executed at least once. Add JD_PRESERVE_LINE_FEEDS=true in official code style file. Upgrade OAP dependencies gson(2.9.0), guava(31.1), jackson(2.13.2), protobuf-java(3.18.4), commons-io(2.7), postgresql(42.3.3). Remove commons-pool and commons-dbcp from OAP dependencies(Not used before). Upgrade webapp dependencies gson(2.9.0), spring boot(2.6.6), jackson(2.13.2.2), spring cloud(2021.0.1), Apache httpclient(4.5.13).  OAP Server  Fix potential NPE in OAL string match and a bug when right-hand-side variable includes double quotes. Bump up Armeria version to 1.14.1 to fix CVE. Polish ETCD cluster config environment variables. Add the analysis of metrics in Satellite MetricsService. Fix Can't split endpoint id into 2 parts bug for endpoint ID. In the TCP in service mesh observability, endpoint name doesn\u0026rsquo;t exist in TCP traffic. Upgrade H2 version to 2.0.206 to fix CVE-2021-23463 and GHSA-h376-j262-vhq6. Extend column name override mechanism working for ValueColumnMetadata. Introduce new concept Layer and removed NodeType. More details refer to v9-version-upgrade. Fix query sort metrics failure in H2 Storage. Bump up grpc to 1.43.2 and protobuf to 3.19.2 to fix CVE-2021-22569. Add source layer and dest layer to relation. Follow protocol grammar fix GCPhrase -\u0026gt; GCPhase. Set layer to mesh relation. Add FAAS to SpanLayer. Adjust e2e case for V9 core. Support ZGC GC time and count metric collecting. Sync proto buffers files from upstream Envoy (Related to https://github.com/envoyproxy/envoy/pull/18955). Bump up GraphQL related dependencies to latest versions. Add normal to V9 service meta query. Support scope=ALL catalog for metrics. Bump up H2 to 2.1.210 to fix CVE-2022-23221. E2E: Add normal field to Service. Add FreeSql component ID(3017) of dotnet agent. E2E: verify OAP cluster model data aggregation. Fix SelfRemoteClient self observing metrics. Add env variables SW_CLUSTER_INTERNAL_COM_HOST and SW_CLUSTER_INTERNAL_COM_PORT for cluster selectors zookeeper ,consul,etcd and nacos. Doc update: configuration-vocabulary,backend-cluster about env variables SW_CLUSTER_INTERNAL_COM_HOST and SW_CLUSTER_INTERNAL_COM_PORT. Add Python MysqlClient component ID(7013) with mapping information. Support Java thread pool metrics analysis. Fix IoTDB Storage Option insert null index value. Set the default value of SW_STORAGE_IOTDB_SESSIONPOOL_SIZE to 8. Bump up iotdb-session to 0.12.4. Bump up PostgreSQL driver to fix CVE. Add Guava EventBus component ID(123) of Java agent. Add OpenFunction component ID(5013). Expose configuration responseTimeout of ES client. Support datasource metric analysis. [Breaking Change] Keep the endpoint avg resp time meter name the same with others scope. (This may break 3rd party integration and existing alarm rule settings) Add Python FastAPI component ID(7014). Support all metrics from MAL engine in alarm core, including Prometheus, OC receiver, meter receiver. Allow updating non-metrics templates when structure changed. Set default connection timeout of ElasticSearch to 3000 milliseconds. Support ElasticSearch 8 and add it into E2E tests. Disable indexing for field alarm_record.tags_raw_data of binary type in ElasticSearch storage. Fix Zipkin receiver wrong condition for decoding gzip. Add a new sampler (possibility) in LAL. Unify module name receiver_zipkin to receiver-zipkin, remove receiver_jaeger from application.yaml. Introduce the entity of Process type. Set the length of event#parameters to 2000. Limit the length of Event#parameters. Support large service/instance/networkAddressAlias list query by using ElasticSearch scrolling API, add metadataQueryBatchSize to configure scrolling page size. Change default value of metadataQueryMaxSize from 5000 to 10000 Replace deprecated Armeria API BasicToken.of with AuthToken.ofBasic. Implement v9 UI template management protocol. Implement process metadata query protocol. Expose more ElasticSearch health check related logs to help to diagnose Health check fails. reason: No healthy endpoint. Add source event generated metrics to SERVICE_CATALOG_NAME catalog. [Breaking Change] Deprecate All from OAL source. [Breaking Change] Remove SRC_ALL: 'All' from OAL grammar tree. Remove all_heatmap and all_percentile metrics. Fix ElasticSearch normal index couldn\u0026rsquo;t apply mapping and update. Enhance DataCarrier#MultipleChannelsConsumer to add priority for the channels, which makes OAP server has a better performance to activate all analyzers on default. Activate receiver-otel#enabledOcRules receiver with k8s-node,oap,vm rules on default. Activate satellite,spring-sleuth for agent-analyzer#meterAnalyzerActiveFiles on default. Activate receiver-zabbix receiver with agent rule on default. Replace HTTP server (GraphQL, agent HTTP protocol) from Jetty with Armeria. [Breaking Change] Remove configuration restAcceptorPriorityDelta (env var: SW_RECEIVER_SHARING_JETTY_DELTA , SW_CORE_REST_JETTY_DELTA). [Breaking Change] Remove configuration graphql/path (env var: SW_QUERY_GRAPHQL_PATH). Add storage column attribute indexOnly, support ElasticSearch only index and not store some fields. Add indexOnly=true to SegmentRecord.tags, AlarmRecord.tags, AbstractLogRecord.tags, to reduce unnecessary storage. [Breaking Change] Remove configuration restMinThreads (env var: SW_CORE_REST_JETTY_MIN_THREADS , SW_RECEIVER_SHARING_JETTY_MIN_THREADS). Refactor the core Builder mechanism, new storage plugin could implement their own converter and get rid of hard requirement of using HashMap to communicate between data object and database native structure. [Breaking Change] Break all existing 3rd-party storage extensions. Remove hard requirement of BASE64 encoding for binary field. Add complexity limitation for GraphQL query to avoid malicious query. Add Column.shardingKeyIdx for column definition for BanyanDB.  Sharding key is used to group time series data per metric of one entity in one place (same sharding and/or same row for column-oriented database). For example, ServiceA's traffic gauge, service call per minute, includes following timestamp values, then it should be sharded by service ID [ServiceA(encoded ID): 01-28 18:30 values-1, 01-28 18:31 values-2, 01-28 18:32 values-3, 01-28 18:32 values-4] BanyanDB is the 1st storage implementation supporting this. It would make continuous time series metrics stored closely and compressed better. NOTICE, this sharding concept is NOT just for splitting data into different database instances or physical files.  Support ElasticSearch template mappings properties parameters and _source update. Implement the eBPF profiling query and data collect protocol. [Breaking Change] Remove Deprecated responseCode from sources, including Service, ServiceInstance, Endpoint Enhance endpoint dependency analysis to support cross threads cases. Refactor span analysis code structures. Remove isNotNormal service requirement when use alias to merge service topology from client side. All RPCs' peer services from client side are always normal services. This cause the topology is not merged correctly. Fix event type of export data is incorrect, it was EventType.TOTAL always. Reduce redundancy ThreadLocal in MAL core. Improve MAL performance. Trim tag\u0026rsquo;s key and value in log query. Refactor IoTDB storage plugin, add IoTDBDataConverter and fix ModifyCollectionInEnhancedForLoop bug. Bump up iotdb-session to 0.12.5. Fix the configuration of Aggregation and GC Count metrics for oap self observability E2E: Add verify OAP eBPF Profiling. Let multiGet could query without tag value in the InfluxDB storage plugin. Adjust MAL for V9, remove some groups, add a new Service function for the custom delimiter. Add service catalog DatabaseSlowStatement. Add Error Prone Annotations dependency to suppress warnings, which are not errors.  UI  [Breaking Change] Introduce Booster UI, remove RocketBot UI. [Breaking Change] UI Templates have been redesigned totally. GraphQL query is minimal compatible for metadata and metrics query. Remove unused jars (log4j-api.jar) in classpath. Bump up netty version to fix CVE. Add Database Connection pool metric. Re-implement UI template initialization for Booster UI. Add environment variable SW_ENABLE_UPDATE_UI_TEMPLATE to control user edit UI template. Add the Self Observability template of the SkyWalking Satellite. Add the template of OpenFunction observability.  Documentation  Reconstruction doc menu for v9. Update backend-alarm.md doc, support op \u0026ldquo;=\u0026rdquo; to \u0026ldquo;==\u0026rdquo;. Update backend-meter.md doc . Add \u0026lt;STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System\u0026gt; paper. Add Academy menu for recommending articles. Remove All source relative document and examples. Update Booster UI\u0026rsquo;s dependency licenses. Add profiling doc, and remove service mesh intro doc(not necessary). Add a doc for virtual database. Rewrite UI introduction. Update k8s-monitoring, backend-telemetry and v9-version-upgrade doc for v9.  All issues and pull requests are here\n","title":"9.0.0","url":"/docs/main/v9.1.0/en/changes/changes-9.0.0/"},{"content":"9.0.0 Project  Upgrade log4j2 to 2.17.1 for CVE-2021-44228, CVE-2021-45046, CVE-2021-45105 and CVE-2021-44832. This CVE only effects on JDK if JNDI is opened in default. Notice, using JVM option -Dlog4j2.formatMsgNoLookups=true or setting the LOG4J_FORMAT_MSG_NO_LOOKUPS=”true” environment variable also avoids CVEs. Upgrade maven-wrapper to 3.1.0, maven to 3.8.4 for performance improvements and ARM more native support. Exclude unnecessary libs when building under JDK 9+. Migrate base Docker image to eclipse-temurin as adoptopenjdk is deprecated. Add E2E test under Java 17. Upgrade protoc to 3.19.2. Add Istio 1.13.1 to E2E test matrix for verification. Upgrade Apache parent pom version to 25. Use the plugin version defined by the Apache maven parent.  Upgrade maven-dependency-plugin to 3.2.0. Upgrade maven-assembly-plugin to 3.3.0. Upgrade maven-failsafe-plugin to 2.22.2. Upgrade maven-surefire-plugin to 2.22.2. Upgrade maven-jar-plugin to 3.2.2. Upgrade maven-enforcer-plugin to 3.0.0. Upgrade maven-compiler-plugin to 3.10.0. Upgrade maven-resources-plugin to 3.2.0. Upgrade maven-source-plugin to 3.2.1.   Update codeStyle.xml to fix incompatibility on M1\u0026rsquo;s IntelliJ IDEA 2021.3.2. Update frontend-maven-plugin to 1.12 and npm to 16.14.0 for booster UI build. Improve CI with the GHA new feature \u0026ldquo;run failed jobs\u0026rdquo;. Fix ./mvnw compile not work if ./mvnw install is not executed at least once. Add JD_PRESERVE_LINE_FEEDS=true in official code style file. Upgrade OAP dependencies gson(2.9.0), guava(31.1), jackson(2.13.2), protobuf-java(3.18.4), commons-io(2.7), postgresql(42.3.3). Remove commons-pool and commons-dbcp from OAP dependencies(Not used before). Upgrade webapp dependencies gson(2.9.0), spring boot(2.6.6), jackson(2.13.2.2), spring cloud(2021.0.1), Apache httpclient(4.5.13).  OAP Server  Fix potential NPE in OAL string match and a bug when right-hand-side variable includes double quotes. Bump up Armeria version to 1.14.1 to fix CVE. Polish ETCD cluster config environment variables. Add the analysis of metrics in Satellite MetricsService. Fix Can't split endpoint id into 2 parts bug for endpoint ID. In the TCP in service mesh observability, endpoint name doesn\u0026rsquo;t exist in TCP traffic. Upgrade H2 version to 2.0.206 to fix CVE-2021-23463 and GHSA-h376-j262-vhq6. Extend column name override mechanism working for ValueColumnMetadata. Introduce new concept Layer and removed NodeType. More details refer to v9-version-upgrade. Fix query sort metrics failure in H2 Storage. Bump up grpc to 1.43.2 and protobuf to 3.19.2 to fix CVE-2021-22569. Add source layer and dest layer to relation. Follow protocol grammar fix GCPhrase -\u0026gt; GCPhase. Set layer to mesh relation. Add FAAS to SpanLayer. Adjust e2e case for V9 core. Support ZGC GC time and count metric collecting. Sync proto buffers files from upstream Envoy (Related to https://github.com/envoyproxy/envoy/pull/18955). Bump up GraphQL related dependencies to latest versions. Add normal to V9 service meta query. Support scope=ALL catalog for metrics. Bump up H2 to 2.1.210 to fix CVE-2022-23221. E2E: Add normal field to Service. Add FreeSql component ID(3017) of dotnet agent. E2E: verify OAP cluster model data aggregation. Fix SelfRemoteClient self observing metrics. Add env variables SW_CLUSTER_INTERNAL_COM_HOST and SW_CLUSTER_INTERNAL_COM_PORT for cluster selectors zookeeper ,consul,etcd and nacos. Doc update: configuration-vocabulary,backend-cluster about env variables SW_CLUSTER_INTERNAL_COM_HOST and SW_CLUSTER_INTERNAL_COM_PORT. Add Python MysqlClient component ID(7013) with mapping information. Support Java thread pool metrics analysis. Fix IoTDB Storage Option insert null index value. Set the default value of SW_STORAGE_IOTDB_SESSIONPOOL_SIZE to 8. Bump up iotdb-session to 0.12.4. Bump up PostgreSQL driver to fix CVE. Add Guava EventBus component ID(123) of Java agent. Add OpenFunction component ID(5013). Expose configuration responseTimeout of ES client. Support datasource metric analysis. [Breaking Change] Keep the endpoint avg resp time meter name the same with others scope. (This may break 3rd party integration and existing alarm rule settings) Add Python FastAPI component ID(7014). Support all metrics from MAL engine in alarm core, including Prometheus, OC receiver, meter receiver. Allow updating non-metrics templates when structure changed. Set default connection timeout of ElasticSearch to 3000 milliseconds. Support ElasticSearch 8 and add it into E2E tests. Disable indexing for field alarm_record.tags_raw_data of binary type in ElasticSearch storage. Fix Zipkin receiver wrong condition for decoding gzip. Add a new sampler (possibility) in LAL. Unify module name receiver_zipkin to receiver-zipkin, remove receiver_jaeger from application.yaml. Introduce the entity of Process type. Set the length of event#parameters to 2000. Limit the length of Event#parameters. Support large service/instance/networkAddressAlias list query by using ElasticSearch scrolling API, add metadataQueryBatchSize to configure scrolling page size. Change default value of metadataQueryMaxSize from 5000 to 10000 Replace deprecated Armeria API BasicToken.of with AuthToken.ofBasic. Implement v9 UI template management protocol. Implement process metadata query protocol. Expose more ElasticSearch health check related logs to help to diagnose Health check fails. reason: No healthy endpoint. Add source event generated metrics to SERVICE_CATALOG_NAME catalog. [Breaking Change] Deprecate All from OAL source. [Breaking Change] Remove SRC_ALL: 'All' from OAL grammar tree. Remove all_heatmap and all_percentile metrics. Fix ElasticSearch normal index couldn\u0026rsquo;t apply mapping and update. Enhance DataCarrier#MultipleChannelsConsumer to add priority for the channels, which makes OAP server has a better performance to activate all analyzers on default. Activate receiver-otel#enabledOcRules receiver with k8s-node,oap,vm rules on default. Activate satellite,spring-sleuth for agent-analyzer#meterAnalyzerActiveFiles on default. Activate receiver-zabbix receiver with agent rule on default. Replace HTTP server (GraphQL, agent HTTP protocol) from Jetty with Armeria. [Breaking Change] Remove configuration restAcceptorPriorityDelta (env var: SW_RECEIVER_SHARING_JETTY_DELTA , SW_CORE_REST_JETTY_DELTA). [Breaking Change] Remove configuration graphql/path (env var: SW_QUERY_GRAPHQL_PATH). Add storage column attribute indexOnly, support ElasticSearch only index and not store some fields. Add indexOnly=true to SegmentRecord.tags, AlarmRecord.tags, AbstractLogRecord.tags, to reduce unnecessary storage. [Breaking Change] Remove configuration restMinThreads (env var: SW_CORE_REST_JETTY_MIN_THREADS , SW_RECEIVER_SHARING_JETTY_MIN_THREADS). Refactor the core Builder mechanism, new storage plugin could implement their own converter and get rid of hard requirement of using HashMap to communicate between data object and database native structure. [Breaking Change] Break all existing 3rd-party storage extensions. Remove hard requirement of BASE64 encoding for binary field. Add complexity limitation for GraphQL query to avoid malicious query. Add Column.shardingKeyIdx for column definition for BanyanDB.  Sharding key is used to group time series data per metric of one entity in one place (same sharding and/or same row for column-oriented database). For example, ServiceA's traffic gauge, service call per minute, includes following timestamp values, then it should be sharded by service ID [ServiceA(encoded ID): 01-28 18:30 values-1, 01-28 18:31 values-2, 01-28 18:32 values-3, 01-28 18:32 values-4] BanyanDB is the 1st storage implementation supporting this. It would make continuous time series metrics stored closely and compressed better. NOTICE, this sharding concept is NOT just for splitting data into different database instances or physical files.  Support ElasticSearch template mappings properties parameters and _source update. Implement the eBPF profiling query and data collect protocol. [Breaking Change] Remove Deprecated responseCode from sources, including Service, ServiceInstance, Endpoint Enhance endpoint dependency analysis to support cross threads cases. Refactor span analysis code structures. Remove isNotNormal service requirement when use alias to merge service topology from client side. All RPCs' peer services from client side are always normal services. This cause the topology is not merged correctly. Fix event type of export data is incorrect, it was EventType.TOTAL always. Reduce redundancy ThreadLocal in MAL core. Improve MAL performance. Trim tag\u0026rsquo;s key and value in log query. Refactor IoTDB storage plugin, add IoTDBDataConverter and fix ModifyCollectionInEnhancedForLoop bug. Bump up iotdb-session to 0.12.5. Fix the configuration of Aggregation and GC Count metrics for oap self observability E2E: Add verify OAP eBPF Profiling. Let multiGet could query without tag value in the InfluxDB storage plugin. Adjust MAL for V9, remove some groups, add a new Service function for the custom delimiter. Add service catalog DatabaseSlowStatement. Add Error Prone Annotations dependency to suppress warnings, which are not errors.  UI  [Breaking Change] Introduce Booster UI, remove RocketBot UI. [Breaking Change] UI Templates have been redesigned totally. GraphQL query is minimal compatible for metadata and metrics query. Remove unused jars (log4j-api.jar) in classpath. Bump up netty version to fix CVE. Add Database Connection pool metric. Re-implement UI template initialization for Booster UI. Add environment variable SW_ENABLE_UPDATE_UI_TEMPLATE to control user edit UI template. Add the Self Observability template of the SkyWalking Satellite. Add the template of OpenFunction observability.  Documentation  Reconstruction doc menu for v9. Update backend-alarm.md doc, support op \u0026ldquo;=\u0026rdquo; to \u0026ldquo;==\u0026rdquo;. Update backend-meter.md doc . Add \u0026lt;STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System\u0026gt; paper. Add Academy menu for recommending articles. Remove All source relative document and examples. Update Booster UI\u0026rsquo;s dependency licenses. Add profiling doc, and remove service mesh intro doc(not necessary). Add a doc for virtual database. Rewrite UI introduction. Update k8s-monitoring, backend-telemetry and v9-version-upgrade doc for v9.  All issues and pull requests are here\n","title":"9.0.0","url":"/docs/main/v9.2.0/en/changes/changes-9.0.0/"},{"content":"9.0.0 Project  Upgrade log4j2 to 2.17.1 for CVE-2021-44228, CVE-2021-45046, CVE-2021-45105 and CVE-2021-44832. This CVE only effects on JDK if JNDI is opened in default. Notice, using JVM option -Dlog4j2.formatMsgNoLookups=true or setting the LOG4J_FORMAT_MSG_NO_LOOKUPS=”true” environment variable also avoids CVEs. Upgrade maven-wrapper to 3.1.0, maven to 3.8.4 for performance improvements and ARM more native support. Exclude unnecessary libs when building under JDK 9+. Migrate base Docker image to eclipse-temurin as adoptopenjdk is deprecated. Add E2E test under Java 17. Upgrade protoc to 3.19.2. Add Istio 1.13.1 to E2E test matrix for verification. Upgrade Apache parent pom version to 25. Use the plugin version defined by the Apache maven parent.  Upgrade maven-dependency-plugin to 3.2.0. Upgrade maven-assembly-plugin to 3.3.0. Upgrade maven-failsafe-plugin to 2.22.2. Upgrade maven-surefire-plugin to 2.22.2. Upgrade maven-jar-plugin to 3.2.2. Upgrade maven-enforcer-plugin to 3.0.0. Upgrade maven-compiler-plugin to 3.10.0. Upgrade maven-resources-plugin to 3.2.0. Upgrade maven-source-plugin to 3.2.1.   Update codeStyle.xml to fix incompatibility on M1\u0026rsquo;s IntelliJ IDEA 2021.3.2. Update frontend-maven-plugin to 1.12 and npm to 16.14.0 for booster UI build. Improve CI with the GHA new feature \u0026ldquo;run failed jobs\u0026rdquo;. Fix ./mvnw compile not work if ./mvnw install is not executed at least once. Add JD_PRESERVE_LINE_FEEDS=true in official code style file. Upgrade OAP dependencies gson(2.9.0), guava(31.1), jackson(2.13.2), protobuf-java(3.18.4), commons-io(2.7), postgresql(42.3.3). Remove commons-pool and commons-dbcp from OAP dependencies(Not used before). Upgrade webapp dependencies gson(2.9.0), spring boot(2.6.6), jackson(2.13.2.2), spring cloud(2021.0.1), Apache httpclient(4.5.13).  OAP Server  Fix potential NPE in OAL string match and a bug when right-hand-side variable includes double quotes. Bump up Armeria version to 1.14.1 to fix CVE. Polish ETCD cluster config environment variables. Add the analysis of metrics in Satellite MetricsService. Fix Can't split endpoint id into 2 parts bug for endpoint ID. In the TCP in service mesh observability, endpoint name doesn\u0026rsquo;t exist in TCP traffic. Upgrade H2 version to 2.0.206 to fix CVE-2021-23463 and GHSA-h376-j262-vhq6. Extend column name override mechanism working for ValueColumnMetadata. Introduce new concept Layer and removed NodeType. More details refer to v9-version-upgrade. Fix query sort metrics failure in H2 Storage. Bump up grpc to 1.43.2 and protobuf to 3.19.2 to fix CVE-2021-22569. Add source layer and dest layer to relation. Follow protocol grammar fix GCPhrase -\u0026gt; GCPhase. Set layer to mesh relation. Add FAAS to SpanLayer. Adjust e2e case for V9 core. Support ZGC GC time and count metric collecting. Sync proto buffers files from upstream Envoy (Related to https://github.com/envoyproxy/envoy/pull/18955). Bump up GraphQL related dependencies to latest versions. Add normal to V9 service meta query. Support scope=ALL catalog for metrics. Bump up H2 to 2.1.210 to fix CVE-2022-23221. E2E: Add normal field to Service. Add FreeSql component ID(3017) of dotnet agent. E2E: verify OAP cluster model data aggregation. Fix SelfRemoteClient self observing metrics. Add env variables SW_CLUSTER_INTERNAL_COM_HOST and SW_CLUSTER_INTERNAL_COM_PORT for cluster selectors zookeeper ,consul,etcd and nacos. Doc update: configuration-vocabulary,backend-cluster about env variables SW_CLUSTER_INTERNAL_COM_HOST and SW_CLUSTER_INTERNAL_COM_PORT. Add Python MysqlClient component ID(7013) with mapping information. Support Java thread pool metrics analysis. Fix IoTDB Storage Option insert null index value. Set the default value of SW_STORAGE_IOTDB_SESSIONPOOL_SIZE to 8. Bump up iotdb-session to 0.12.4. Bump up PostgreSQL driver to fix CVE. Add Guava EventBus component ID(123) of Java agent. Add OpenFunction component ID(5013). Expose configuration responseTimeout of ES client. Support datasource metric analysis. [Breaking Change] Keep the endpoint avg resp time meter name the same with others scope. (This may break 3rd party integration and existing alarm rule settings) Add Python FastAPI component ID(7014). Support all metrics from MAL engine in alarm core, including Prometheus, OC receiver, meter receiver. Allow updating non-metrics templates when structure changed. Set default connection timeout of ElasticSearch to 3000 milliseconds. Support ElasticSearch 8 and add it into E2E tests. Disable indexing for field alarm_record.tags_raw_data of binary type in ElasticSearch storage. Fix Zipkin receiver wrong condition for decoding gzip. Add a new sampler (possibility) in LAL. Unify module name receiver_zipkin to receiver-zipkin, remove receiver_jaeger from application.yaml. Introduce the entity of Process type. Set the length of event#parameters to 2000. Limit the length of Event#parameters. Support large service/instance/networkAddressAlias list query by using ElasticSearch scrolling API, add metadataQueryBatchSize to configure scrolling page size. Change default value of metadataQueryMaxSize from 5000 to 10000 Replace deprecated Armeria API BasicToken.of with AuthToken.ofBasic. Implement v9 UI template management protocol. Implement process metadata query protocol. Expose more ElasticSearch health check related logs to help to diagnose Health check fails. reason: No healthy endpoint. Add source event generated metrics to SERVICE_CATALOG_NAME catalog. [Breaking Change] Deprecate All from OAL source. [Breaking Change] Remove SRC_ALL: 'All' from OAL grammar tree. Remove all_heatmap and all_percentile metrics. Fix ElasticSearch normal index couldn\u0026rsquo;t apply mapping and update. Enhance DataCarrier#MultipleChannelsConsumer to add priority for the channels, which makes OAP server has a better performance to activate all analyzers on default. Activate receiver-otel#enabledOcRules receiver with k8s-node,oap,vm rules on default. Activate satellite,spring-sleuth for agent-analyzer#meterAnalyzerActiveFiles on default. Activate receiver-zabbix receiver with agent rule on default. Replace HTTP server (GraphQL, agent HTTP protocol) from Jetty with Armeria. [Breaking Change] Remove configuration restAcceptorPriorityDelta (env var: SW_RECEIVER_SHARING_JETTY_DELTA , SW_CORE_REST_JETTY_DELTA). [Breaking Change] Remove configuration graphql/path (env var: SW_QUERY_GRAPHQL_PATH). Add storage column attribute indexOnly, support ElasticSearch only index and not store some fields. Add indexOnly=true to SegmentRecord.tags, AlarmRecord.tags, AbstractLogRecord.tags, to reduce unnecessary storage. [Breaking Change] Remove configuration restMinThreads (env var: SW_CORE_REST_JETTY_MIN_THREADS , SW_RECEIVER_SHARING_JETTY_MIN_THREADS). Refactor the core Builder mechanism, new storage plugin could implement their own converter and get rid of hard requirement of using HashMap to communicate between data object and database native structure. [Breaking Change] Break all existing 3rd-party storage extensions. Remove hard requirement of BASE64 encoding for binary field. Add complexity limitation for GraphQL query to avoid malicious query. Add Column.shardingKeyIdx for column definition for BanyanDB.  Sharding key is used to group time series data per metric of one entity in one place (same sharding and/or same row for column-oriented database). For example, ServiceA's traffic gauge, service call per minute, includes following timestamp values, then it should be sharded by service ID [ServiceA(encoded ID): 01-28 18:30 values-1, 01-28 18:31 values-2, 01-28 18:32 values-3, 01-28 18:32 values-4] BanyanDB is the 1st storage implementation supporting this. It would make continuous time series metrics stored closely and compressed better. NOTICE, this sharding concept is NOT just for splitting data into different database instances or physical files.  Support ElasticSearch template mappings properties parameters and _source update. Implement the eBPF profiling query and data collect protocol. [Breaking Change] Remove Deprecated responseCode from sources, including Service, ServiceInstance, Endpoint Enhance endpoint dependency analysis to support cross threads cases. Refactor span analysis code structures. Remove isNotNormal service requirement when use alias to merge service topology from client side. All RPCs' peer services from client side are always normal services. This cause the topology is not merged correctly. Fix event type of export data is incorrect, it was EventType.TOTAL always. Reduce redundancy ThreadLocal in MAL core. Improve MAL performance. Trim tag\u0026rsquo;s key and value in log query. Refactor IoTDB storage plugin, add IoTDBDataConverter and fix ModifyCollectionInEnhancedForLoop bug. Bump up iotdb-session to 0.12.5. Fix the configuration of Aggregation and GC Count metrics for oap self observability E2E: Add verify OAP eBPF Profiling. Let multiGet could query without tag value in the InfluxDB storage plugin. Adjust MAL for V9, remove some groups, add a new Service function for the custom delimiter. Add service catalog DatabaseSlowStatement. Add Error Prone Annotations dependency to suppress warnings, which are not errors.  UI  [Breaking Change] Introduce Booster UI, remove RocketBot UI. [Breaking Change] UI Templates have been redesigned totally. GraphQL query is minimal compatible for metadata and metrics query. Remove unused jars (log4j-api.jar) in classpath. Bump up netty version to fix CVE. Add Database Connection pool metric. Re-implement UI template initialization for Booster UI. Add environment variable SW_ENABLE_UPDATE_UI_TEMPLATE to control user edit UI template. Add the Self Observability template of the SkyWalking Satellite. Add the template of OpenFunction observability.  Documentation  Reconstruction doc menu for v9. Update backend-alarm.md doc, support op \u0026ldquo;=\u0026rdquo; to \u0026ldquo;==\u0026rdquo;. Update backend-meter.md doc . Add \u0026lt;STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System\u0026gt; paper. Add Academy menu for recommending articles. Remove All source relative document and examples. Update Booster UI\u0026rsquo;s dependency licenses. Add profiling doc, and remove service mesh intro doc(not necessary). Add a doc for virtual database. Rewrite UI introduction. Update k8s-monitoring, backend-telemetry and v9-version-upgrade doc for v9.  All issues and pull requests are here\n","title":"9.0.0","url":"/docs/main/v9.3.0/en/changes/changes-9.0.0/"},{"content":"9.0.0 Project  Upgrade log4j2 to 2.17.1 for CVE-2021-44228, CVE-2021-45046, CVE-2021-45105 and CVE-2021-44832. This CVE only effects on JDK if JNDI is opened in default. Notice, using JVM option -Dlog4j2.formatMsgNoLookups=true or setting the LOG4J_FORMAT_MSG_NO_LOOKUPS=”true” environment variable also avoids CVEs. Upgrade maven-wrapper to 3.1.0, maven to 3.8.4 for performance improvements and ARM more native support. Exclude unnecessary libs when building under JDK 9+. Migrate base Docker image to eclipse-temurin as adoptopenjdk is deprecated. Add E2E test under Java 17. Upgrade protoc to 3.19.2. Add Istio 1.13.1 to E2E test matrix for verification. Upgrade Apache parent pom version to 25. Use the plugin version defined by the Apache maven parent.  Upgrade maven-dependency-plugin to 3.2.0. Upgrade maven-assembly-plugin to 3.3.0. Upgrade maven-failsafe-plugin to 2.22.2. Upgrade maven-surefire-plugin to 2.22.2. Upgrade maven-jar-plugin to 3.2.2. Upgrade maven-enforcer-plugin to 3.0.0. Upgrade maven-compiler-plugin to 3.10.0. Upgrade maven-resources-plugin to 3.2.0. Upgrade maven-source-plugin to 3.2.1.   Update codeStyle.xml to fix incompatibility on M1\u0026rsquo;s IntelliJ IDEA 2021.3.2. Update frontend-maven-plugin to 1.12 and npm to 16.14.0 for booster UI build. Improve CI with the GHA new feature \u0026ldquo;run failed jobs\u0026rdquo;. Fix ./mvnw compile not work if ./mvnw install is not executed at least once. Add JD_PRESERVE_LINE_FEEDS=true in official code style file. Upgrade OAP dependencies gson(2.9.0), guava(31.1), jackson(2.13.2), protobuf-java(3.18.4), commons-io(2.7), postgresql(42.3.3). Remove commons-pool and commons-dbcp from OAP dependencies(Not used before). Upgrade webapp dependencies gson(2.9.0), spring boot(2.6.6), jackson(2.13.2.2), spring cloud(2021.0.1), Apache httpclient(4.5.13).  OAP Server  Fix potential NPE in OAL string match and a bug when right-hand-side variable includes double quotes. Bump up Armeria version to 1.14.1 to fix CVE. Polish ETCD cluster config environment variables. Add the analysis of metrics in Satellite MetricsService. Fix Can't split endpoint id into 2 parts bug for endpoint ID. In the TCP in service mesh observability, endpoint name doesn\u0026rsquo;t exist in TCP traffic. Upgrade H2 version to 2.0.206 to fix CVE-2021-23463 and GHSA-h376-j262-vhq6. Extend column name override mechanism working for ValueColumnMetadata. Introduce new concept Layer and removed NodeType. More details refer to v9-version-upgrade. Fix query sort metrics failure in H2 Storage. Bump up grpc to 1.43.2 and protobuf to 3.19.2 to fix CVE-2021-22569. Add source layer and dest layer to relation. Follow protocol grammar fix GCPhrase -\u0026gt; GCPhase. Set layer to mesh relation. Add FAAS to SpanLayer. Adjust e2e case for V9 core. Support ZGC GC time and count metric collecting. Sync proto buffers files from upstream Envoy (Related to https://github.com/envoyproxy/envoy/pull/18955). Bump up GraphQL related dependencies to latest versions. Add normal to V9 service meta query. Support scope=ALL catalog for metrics. Bump up H2 to 2.1.210 to fix CVE-2022-23221. E2E: Add normal field to Service. Add FreeSql component ID(3017) of dotnet agent. E2E: verify OAP cluster model data aggregation. Fix SelfRemoteClient self observing metrics. Add env variables SW_CLUSTER_INTERNAL_COM_HOST and SW_CLUSTER_INTERNAL_COM_PORT for cluster selectors zookeeper ,consul,etcd and nacos. Doc update: configuration-vocabulary,backend-cluster about env variables SW_CLUSTER_INTERNAL_COM_HOST and SW_CLUSTER_INTERNAL_COM_PORT. Add Python MysqlClient component ID(7013) with mapping information. Support Java thread pool metrics analysis. Fix IoTDB Storage Option insert null index value. Set the default value of SW_STORAGE_IOTDB_SESSIONPOOL_SIZE to 8. Bump up iotdb-session to 0.12.4. Bump up PostgreSQL driver to fix CVE. Add Guava EventBus component ID(123) of Java agent. Add OpenFunction component ID(5013). Expose configuration responseTimeout of ES client. Support datasource metric analysis. [Breaking Change] Keep the endpoint avg resp time meter name the same with others scope. (This may break 3rd party integration and existing alarm rule settings) Add Python FastAPI component ID(7014). Support all metrics from MAL engine in alarm core, including Prometheus, OC receiver, meter receiver. Allow updating non-metrics templates when structure changed. Set default connection timeout of ElasticSearch to 3000 milliseconds. Support ElasticSearch 8 and add it into E2E tests. Disable indexing for field alarm_record.tags_raw_data of binary type in ElasticSearch storage. Fix Zipkin receiver wrong condition for decoding gzip. Add a new sampler (possibility) in LAL. Unify module name receiver_zipkin to receiver-zipkin, remove receiver_jaeger from application.yaml. Introduce the entity of Process type. Set the length of event#parameters to 2000. Limit the length of Event#parameters. Support large service/instance/networkAddressAlias list query by using ElasticSearch scrolling API, add metadataQueryBatchSize to configure scrolling page size. Change default value of metadataQueryMaxSize from 5000 to 10000 Replace deprecated Armeria API BasicToken.of with AuthToken.ofBasic. Implement v9 UI template management protocol. Implement process metadata query protocol. Expose more ElasticSearch health check related logs to help to diagnose Health check fails. reason: No healthy endpoint. Add source event generated metrics to SERVICE_CATALOG_NAME catalog. [Breaking Change] Deprecate All from OAL source. [Breaking Change] Remove SRC_ALL: 'All' from OAL grammar tree. Remove all_heatmap and all_percentile metrics. Fix ElasticSearch normal index couldn\u0026rsquo;t apply mapping and update. Enhance DataCarrier#MultipleChannelsConsumer to add priority for the channels, which makes OAP server has a better performance to activate all analyzers on default. Activate receiver-otel#enabledOcRules receiver with k8s-node,oap,vm rules on default. Activate satellite,spring-sleuth for agent-analyzer#meterAnalyzerActiveFiles on default. Activate receiver-zabbix receiver with agent rule on default. Replace HTTP server (GraphQL, agent HTTP protocol) from Jetty with Armeria. [Breaking Change] Remove configuration restAcceptorPriorityDelta (env var: SW_RECEIVER_SHARING_JETTY_DELTA , SW_CORE_REST_JETTY_DELTA). [Breaking Change] Remove configuration graphql/path (env var: SW_QUERY_GRAPHQL_PATH). Add storage column attribute indexOnly, support ElasticSearch only index and not store some fields. Add indexOnly=true to SegmentRecord.tags, AlarmRecord.tags, AbstractLogRecord.tags, to reduce unnecessary storage. [Breaking Change] Remove configuration restMinThreads (env var: SW_CORE_REST_JETTY_MIN_THREADS , SW_RECEIVER_SHARING_JETTY_MIN_THREADS). Refactor the core Builder mechanism, new storage plugin could implement their own converter and get rid of hard requirement of using HashMap to communicate between data object and database native structure. [Breaking Change] Break all existing 3rd-party storage extensions. Remove hard requirement of BASE64 encoding for binary field. Add complexity limitation for GraphQL query to avoid malicious query. Add Column.shardingKeyIdx for column definition for BanyanDB.  Sharding key is used to group time series data per metric of one entity in one place (same sharding and/or same row for column-oriented database). For example, ServiceA's traffic gauge, service call per minute, includes following timestamp values, then it should be sharded by service ID [ServiceA(encoded ID): 01-28 18:30 values-1, 01-28 18:31 values-2, 01-28 18:32 values-3, 01-28 18:32 values-4] BanyanDB is the 1st storage implementation supporting this. It would make continuous time series metrics stored closely and compressed better. NOTICE, this sharding concept is NOT just for splitting data into different database instances or physical files.  Support ElasticSearch template mappings properties parameters and _source update. Implement the eBPF profiling query and data collect protocol. [Breaking Change] Remove Deprecated responseCode from sources, including Service, ServiceInstance, Endpoint Enhance endpoint dependency analysis to support cross threads cases. Refactor span analysis code structures. Remove isNotNormal service requirement when use alias to merge service topology from client side. All RPCs' peer services from client side are always normal services. This cause the topology is not merged correctly. Fix event type of export data is incorrect, it was EventType.TOTAL always. Reduce redundancy ThreadLocal in MAL core. Improve MAL performance. Trim tag\u0026rsquo;s key and value in log query. Refactor IoTDB storage plugin, add IoTDBDataConverter and fix ModifyCollectionInEnhancedForLoop bug. Bump up iotdb-session to 0.12.5. Fix the configuration of Aggregation and GC Count metrics for oap self observability E2E: Add verify OAP eBPF Profiling. Let multiGet could query without tag value in the InfluxDB storage plugin. Adjust MAL for V9, remove some groups, add a new Service function for the custom delimiter. Add service catalog DatabaseSlowStatement. Add Error Prone Annotations dependency to suppress warnings, which are not errors.  UI  [Breaking Change] Introduce Booster UI, remove RocketBot UI. [Breaking Change] UI Templates have been redesigned totally. GraphQL query is minimal compatible for metadata and metrics query. Remove unused jars (log4j-api.jar) in classpath. Bump up netty version to fix CVE. Add Database Connection pool metric. Re-implement UI template initialization for Booster UI. Add environment variable SW_ENABLE_UPDATE_UI_TEMPLATE to control user edit UI template. Add the Self Observability template of the SkyWalking Satellite. Add the template of OpenFunction observability.  Documentation  Reconstruction doc menu for v9. Update backend-alarm.md doc, support op \u0026ldquo;=\u0026rdquo; to \u0026ldquo;==\u0026rdquo;. Update backend-meter.md doc . Add \u0026lt;STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System\u0026gt; paper. Add Academy menu for recommending articles. Remove All source relative document and examples. Update Booster UI\u0026rsquo;s dependency licenses. Add profiling doc, and remove service mesh intro doc(not necessary). Add a doc for virtual database. Rewrite UI introduction. Update k8s-monitoring, backend-telemetry and v9-version-upgrade doc for v9.  All issues and pull requests are here\n","title":"9.0.0","url":"/docs/main/v9.4.0/en/changes/changes-9.0.0/"},{"content":"9.0.0 Project  Upgrade log4j2 to 2.17.1 for CVE-2021-44228, CVE-2021-45046, CVE-2021-45105 and CVE-2021-44832. This CVE only effects on JDK if JNDI is opened in default. Notice, using JVM option -Dlog4j2.formatMsgNoLookups=true or setting the LOG4J_FORMAT_MSG_NO_LOOKUPS=”true” environment variable also avoids CVEs. Upgrade maven-wrapper to 3.1.0, maven to 3.8.4 for performance improvements and ARM more native support. Exclude unnecessary libs when building under JDK 9+. Migrate base Docker image to eclipse-temurin as adoptopenjdk is deprecated. Add E2E test under Java 17. Upgrade protoc to 3.19.2. Add Istio 1.13.1 to E2E test matrix for verification. Upgrade Apache parent pom version to 25. Use the plugin version defined by the Apache maven parent.  Upgrade maven-dependency-plugin to 3.2.0. Upgrade maven-assembly-plugin to 3.3.0. Upgrade maven-failsafe-plugin to 2.22.2. Upgrade maven-surefire-plugin to 2.22.2. Upgrade maven-jar-plugin to 3.2.2. Upgrade maven-enforcer-plugin to 3.0.0. Upgrade maven-compiler-plugin to 3.10.0. Upgrade maven-resources-plugin to 3.2.0. Upgrade maven-source-plugin to 3.2.1.   Update codeStyle.xml to fix incompatibility on M1\u0026rsquo;s IntelliJ IDEA 2021.3.2. Update frontend-maven-plugin to 1.12 and npm to 16.14.0 for booster UI build. Improve CI with the GHA new feature \u0026ldquo;run failed jobs\u0026rdquo;. Fix ./mvnw compile not work if ./mvnw install is not executed at least once. Add JD_PRESERVE_LINE_FEEDS=true in official code style file. Upgrade OAP dependencies gson(2.9.0), guava(31.1), jackson(2.13.2), protobuf-java(3.18.4), commons-io(2.7), postgresql(42.3.3). Remove commons-pool and commons-dbcp from OAP dependencies(Not used before). Upgrade webapp dependencies gson(2.9.0), spring boot(2.6.6), jackson(2.13.2.2), spring cloud(2021.0.1), Apache httpclient(4.5.13).  OAP Server  Fix potential NPE in OAL string match and a bug when right-hand-side variable includes double quotes. Bump up Armeria version to 1.14.1 to fix CVE. Polish ETCD cluster config environment variables. Add the analysis of metrics in Satellite MetricsService. Fix Can't split endpoint id into 2 parts bug for endpoint ID. In the TCP in service mesh observability, endpoint name doesn\u0026rsquo;t exist in TCP traffic. Upgrade H2 version to 2.0.206 to fix CVE-2021-23463 and GHSA-h376-j262-vhq6. Extend column name override mechanism working for ValueColumnMetadata. Introduce new concept Layer and removed NodeType. More details refer to v9-version-upgrade. Fix query sort metrics failure in H2 Storage. Bump up grpc to 1.43.2 and protobuf to 3.19.2 to fix CVE-2021-22569. Add source layer and dest layer to relation. Follow protocol grammar fix GCPhrase -\u0026gt; GCPhase. Set layer to mesh relation. Add FAAS to SpanLayer. Adjust e2e case for V9 core. Support ZGC GC time and count metric collecting. Sync proto buffers files from upstream Envoy (Related to https://github.com/envoyproxy/envoy/pull/18955). Bump up GraphQL related dependencies to latest versions. Add normal to V9 service meta query. Support scope=ALL catalog for metrics. Bump up H2 to 2.1.210 to fix CVE-2022-23221. E2E: Add normal field to Service. Add FreeSql component ID(3017) of dotnet agent. E2E: verify OAP cluster model data aggregation. Fix SelfRemoteClient self observing metrics. Add env variables SW_CLUSTER_INTERNAL_COM_HOST and SW_CLUSTER_INTERNAL_COM_PORT for cluster selectors zookeeper ,consul,etcd and nacos. Doc update: configuration-vocabulary,backend-cluster about env variables SW_CLUSTER_INTERNAL_COM_HOST and SW_CLUSTER_INTERNAL_COM_PORT. Add Python MysqlClient component ID(7013) with mapping information. Support Java thread pool metrics analysis. Fix IoTDB Storage Option insert null index value. Set the default value of SW_STORAGE_IOTDB_SESSIONPOOL_SIZE to 8. Bump up iotdb-session to 0.12.4. Bump up PostgreSQL driver to fix CVE. Add Guava EventBus component ID(123) of Java agent. Add OpenFunction component ID(5013). Expose configuration responseTimeout of ES client. Support datasource metric analysis. [Breaking Change] Keep the endpoint avg resp time meter name the same with others scope. (This may break 3rd party integration and existing alarm rule settings) Add Python FastAPI component ID(7014). Support all metrics from MAL engine in alarm core, including Prometheus, OC receiver, meter receiver. Allow updating non-metrics templates when structure changed. Set default connection timeout of ElasticSearch to 3000 milliseconds. Support ElasticSearch 8 and add it into E2E tests. Disable indexing for field alarm_record.tags_raw_data of binary type in ElasticSearch storage. Fix Zipkin receiver wrong condition for decoding gzip. Add a new sampler (possibility) in LAL. Unify module name receiver_zipkin to receiver-zipkin, remove receiver_jaeger from application.yaml. Introduce the entity of Process type. Set the length of event#parameters to 2000. Limit the length of Event#parameters. Support large service/instance/networkAddressAlias list query by using ElasticSearch scrolling API, add metadataQueryBatchSize to configure scrolling page size. Change default value of metadataQueryMaxSize from 5000 to 10000 Replace deprecated Armeria API BasicToken.of with AuthToken.ofBasic. Implement v9 UI template management protocol. Implement process metadata query protocol. Expose more ElasticSearch health check related logs to help to diagnose Health check fails. reason: No healthy endpoint. Add source event generated metrics to SERVICE_CATALOG_NAME catalog. [Breaking Change] Deprecate All from OAL source. [Breaking Change] Remove SRC_ALL: 'All' from OAL grammar tree. Remove all_heatmap and all_percentile metrics. Fix ElasticSearch normal index couldn\u0026rsquo;t apply mapping and update. Enhance DataCarrier#MultipleChannelsConsumer to add priority for the channels, which makes OAP server has a better performance to activate all analyzers on default. Activate receiver-otel#enabledOcRules receiver with k8s-node,oap,vm rules on default. Activate satellite,spring-sleuth for agent-analyzer#meterAnalyzerActiveFiles on default. Activate receiver-zabbix receiver with agent rule on default. Replace HTTP server (GraphQL, agent HTTP protocol) from Jetty with Armeria. [Breaking Change] Remove configuration restAcceptorPriorityDelta (env var: SW_RECEIVER_SHARING_JETTY_DELTA , SW_CORE_REST_JETTY_DELTA). [Breaking Change] Remove configuration graphql/path (env var: SW_QUERY_GRAPHQL_PATH). Add storage column attribute indexOnly, support ElasticSearch only index and not store some fields. Add indexOnly=true to SegmentRecord.tags, AlarmRecord.tags, AbstractLogRecord.tags, to reduce unnecessary storage. [Breaking Change] Remove configuration restMinThreads (env var: SW_CORE_REST_JETTY_MIN_THREADS , SW_RECEIVER_SHARING_JETTY_MIN_THREADS). Refactor the core Builder mechanism, new storage plugin could implement their own converter and get rid of hard requirement of using HashMap to communicate between data object and database native structure. [Breaking Change] Break all existing 3rd-party storage extensions. Remove hard requirement of BASE64 encoding for binary field. Add complexity limitation for GraphQL query to avoid malicious query. Add Column.shardingKeyIdx for column definition for BanyanDB.  Sharding key is used to group time series data per metric of one entity in one place (same sharding and/or same row for column-oriented database). For example, ServiceA's traffic gauge, service call per minute, includes following timestamp values, then it should be sharded by service ID [ServiceA(encoded ID): 01-28 18:30 values-1, 01-28 18:31 values-2, 01-28 18:32 values-3, 01-28 18:32 values-4] BanyanDB is the 1st storage implementation supporting this. It would make continuous time series metrics stored closely and compressed better. NOTICE, this sharding concept is NOT just for splitting data into different database instances or physical files.  Support ElasticSearch template mappings properties parameters and _source update. Implement the eBPF profiling query and data collect protocol. [Breaking Change] Remove Deprecated responseCode from sources, including Service, ServiceInstance, Endpoint Enhance endpoint dependency analysis to support cross threads cases. Refactor span analysis code structures. Remove isNotNormal service requirement when use alias to merge service topology from client side. All RPCs' peer services from client side are always normal services. This cause the topology is not merged correctly. Fix event type of export data is incorrect, it was EventType.TOTAL always. Reduce redundancy ThreadLocal in MAL core. Improve MAL performance. Trim tag\u0026rsquo;s key and value in log query. Refactor IoTDB storage plugin, add IoTDBDataConverter and fix ModifyCollectionInEnhancedForLoop bug. Bump up iotdb-session to 0.12.5. Fix the configuration of Aggregation and GC Count metrics for oap self observability E2E: Add verify OAP eBPF Profiling. Let multiGet could query without tag value in the InfluxDB storage plugin. Adjust MAL for V9, remove some groups, add a new Service function for the custom delimiter. Add service catalog DatabaseSlowStatement. Add Error Prone Annotations dependency to suppress warnings, which are not errors.  UI  [Breaking Change] Introduce Booster UI, remove RocketBot UI. [Breaking Change] UI Templates have been redesigned totally. GraphQL query is minimal compatible for metadata and metrics query. Remove unused jars (log4j-api.jar) in classpath. Bump up netty version to fix CVE. Add Database Connection pool metric. Re-implement UI template initialization for Booster UI. Add environment variable SW_ENABLE_UPDATE_UI_TEMPLATE to control user edit UI template. Add the Self Observability template of the SkyWalking Satellite. Add the template of OpenFunction observability.  Documentation  Reconstruction doc menu for v9. Update backend-alarm.md doc, support op \u0026ldquo;=\u0026rdquo; to \u0026ldquo;==\u0026rdquo;. Update backend-meter.md doc . Add \u0026lt;STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System\u0026gt; paper. Add Academy menu for recommending articles. Remove All source relative document and examples. Update Booster UI\u0026rsquo;s dependency licenses. Add profiling doc, and remove service mesh intro doc(not necessary). Add a doc for virtual database. Rewrite UI introduction. Update k8s-monitoring, backend-telemetry and v9-version-upgrade doc for v9.  All issues and pull requests are here\n","title":"9.0.0","url":"/docs/main/v9.5.0/en/changes/changes-9.0.0/"},{"content":"9.0.0 Project  Upgrade log4j2 to 2.17.1 for CVE-2021-44228, CVE-2021-45046, CVE-2021-45105 and CVE-2021-44832. This CVE only effects on JDK if JNDI is opened in default. Notice, using JVM option -Dlog4j2.formatMsgNoLookups=true or setting the LOG4J_FORMAT_MSG_NO_LOOKUPS=”true” environment variable also avoids CVEs. Upgrade maven-wrapper to 3.1.0, maven to 3.8.4 for performance improvements and ARM more native support. Exclude unnecessary libs when building under JDK 9+. Migrate base Docker image to eclipse-temurin as adoptopenjdk is deprecated. Add E2E test under Java 17. Upgrade protoc to 3.19.2. Add Istio 1.13.1 to E2E test matrix for verification. Upgrade Apache parent pom version to 25. Use the plugin version defined by the Apache maven parent.  Upgrade maven-dependency-plugin to 3.2.0. Upgrade maven-assembly-plugin to 3.3.0. Upgrade maven-failsafe-plugin to 2.22.2. Upgrade maven-surefire-plugin to 2.22.2. Upgrade maven-jar-plugin to 3.2.2. Upgrade maven-enforcer-plugin to 3.0.0. Upgrade maven-compiler-plugin to 3.10.0. Upgrade maven-resources-plugin to 3.2.0. Upgrade maven-source-plugin to 3.2.1.   Update codeStyle.xml to fix incompatibility on M1\u0026rsquo;s IntelliJ IDEA 2021.3.2. Update frontend-maven-plugin to 1.12 and npm to 16.14.0 for booster UI build. Improve CI with the GHA new feature \u0026ldquo;run failed jobs\u0026rdquo;. Fix ./mvnw compile not work if ./mvnw install is not executed at least once. Add JD_PRESERVE_LINE_FEEDS=true in official code style file. Upgrade OAP dependencies gson(2.9.0), guava(31.1), jackson(2.13.2), protobuf-java(3.18.4), commons-io(2.7), postgresql(42.3.3). Remove commons-pool and commons-dbcp from OAP dependencies(Not used before). Upgrade webapp dependencies gson(2.9.0), spring boot(2.6.6), jackson(2.13.2.2), spring cloud(2021.0.1), Apache httpclient(4.5.13).  OAP Server  Fix potential NPE in OAL string match and a bug when right-hand-side variable includes double quotes. Bump up Armeria version to 1.14.1 to fix CVE. Polish ETCD cluster config environment variables. Add the analysis of metrics in Satellite MetricsService. Fix Can't split endpoint id into 2 parts bug for endpoint ID. In the TCP in service mesh observability, endpoint name doesn\u0026rsquo;t exist in TCP traffic. Upgrade H2 version to 2.0.206 to fix CVE-2021-23463 and GHSA-h376-j262-vhq6. Extend column name override mechanism working for ValueColumnMetadata. Introduce new concept Layer and removed NodeType. More details refer to v9-version-upgrade. Fix query sort metrics failure in H2 Storage. Bump up grpc to 1.43.2 and protobuf to 3.19.2 to fix CVE-2021-22569. Add source layer and dest layer to relation. Follow protocol grammar fix GCPhrase -\u0026gt; GCPhase. Set layer to mesh relation. Add FAAS to SpanLayer. Adjust e2e case for V9 core. Support ZGC GC time and count metric collecting. Sync proto buffers files from upstream Envoy (Related to https://github.com/envoyproxy/envoy/pull/18955). Bump up GraphQL related dependencies to latest versions. Add normal to V9 service meta query. Support scope=ALL catalog for metrics. Bump up H2 to 2.1.210 to fix CVE-2022-23221. E2E: Add normal field to Service. Add FreeSql component ID(3017) of dotnet agent. E2E: verify OAP cluster model data aggregation. Fix SelfRemoteClient self observing metrics. Add env variables SW_CLUSTER_INTERNAL_COM_HOST and SW_CLUSTER_INTERNAL_COM_PORT for cluster selectors zookeeper ,consul,etcd and nacos. Doc update: configuration-vocabulary,backend-cluster about env variables SW_CLUSTER_INTERNAL_COM_HOST and SW_CLUSTER_INTERNAL_COM_PORT. Add Python MysqlClient component ID(7013) with mapping information. Support Java thread pool metrics analysis. Fix IoTDB Storage Option insert null index value. Set the default value of SW_STORAGE_IOTDB_SESSIONPOOL_SIZE to 8. Bump up iotdb-session to 0.12.4. Bump up PostgreSQL driver to fix CVE. Add Guava EventBus component ID(123) of Java agent. Add OpenFunction component ID(5013). Expose configuration responseTimeout of ES client. Support datasource metric analysis. [Breaking Change] Keep the endpoint avg resp time meter name the same with others scope. (This may break 3rd party integration and existing alarm rule settings) Add Python FastAPI component ID(7014). Support all metrics from MAL engine in alarm core, including Prometheus, OC receiver, meter receiver. Allow updating non-metrics templates when structure changed. Set default connection timeout of ElasticSearch to 3000 milliseconds. Support ElasticSearch 8 and add it into E2E tests. Disable indexing for field alarm_record.tags_raw_data of binary type in ElasticSearch storage. Fix Zipkin receiver wrong condition for decoding gzip. Add a new sampler (possibility) in LAL. Unify module name receiver_zipkin to receiver-zipkin, remove receiver_jaeger from application.yaml. Introduce the entity of Process type. Set the length of event#parameters to 2000. Limit the length of Event#parameters. Support large service/instance/networkAddressAlias list query by using ElasticSearch scrolling API, add metadataQueryBatchSize to configure scrolling page size. Change default value of metadataQueryMaxSize from 5000 to 10000 Replace deprecated Armeria API BasicToken.of with AuthToken.ofBasic. Implement v9 UI template management protocol. Implement process metadata query protocol. Expose more ElasticSearch health check related logs to help to diagnose Health check fails. reason: No healthy endpoint. Add source event generated metrics to SERVICE_CATALOG_NAME catalog. [Breaking Change] Deprecate All from OAL source. [Breaking Change] Remove SRC_ALL: 'All' from OAL grammar tree. Remove all_heatmap and all_percentile metrics. Fix ElasticSearch normal index couldn\u0026rsquo;t apply mapping and update. Enhance DataCarrier#MultipleChannelsConsumer to add priority for the channels, which makes OAP server has a better performance to activate all analyzers on default. Activate receiver-otel#enabledOcRules receiver with k8s-node,oap,vm rules on default. Activate satellite,spring-sleuth for agent-analyzer#meterAnalyzerActiveFiles on default. Activate receiver-zabbix receiver with agent rule on default. Replace HTTP server (GraphQL, agent HTTP protocol) from Jetty with Armeria. [Breaking Change] Remove configuration restAcceptorPriorityDelta (env var: SW_RECEIVER_SHARING_JETTY_DELTA , SW_CORE_REST_JETTY_DELTA). [Breaking Change] Remove configuration graphql/path (env var: SW_QUERY_GRAPHQL_PATH). Add storage column attribute indexOnly, support ElasticSearch only index and not store some fields. Add indexOnly=true to SegmentRecord.tags, AlarmRecord.tags, AbstractLogRecord.tags, to reduce unnecessary storage. [Breaking Change] Remove configuration restMinThreads (env var: SW_CORE_REST_JETTY_MIN_THREADS , SW_RECEIVER_SHARING_JETTY_MIN_THREADS). Refactor the core Builder mechanism, new storage plugin could implement their own converter and get rid of hard requirement of using HashMap to communicate between data object and database native structure. [Breaking Change] Break all existing 3rd-party storage extensions. Remove hard requirement of BASE64 encoding for binary field. Add complexity limitation for GraphQL query to avoid malicious query. Add Column.shardingKeyIdx for column definition for BanyanDB.  Sharding key is used to group time series data per metric of one entity in one place (same sharding and/or same row for column-oriented database). For example, ServiceA's traffic gauge, service call per minute, includes following timestamp values, then it should be sharded by service ID [ServiceA(encoded ID): 01-28 18:30 values-1, 01-28 18:31 values-2, 01-28 18:32 values-3, 01-28 18:32 values-4] BanyanDB is the 1st storage implementation supporting this. It would make continuous time series metrics stored closely and compressed better. NOTICE, this sharding concept is NOT just for splitting data into different database instances or physical files.  Support ElasticSearch template mappings properties parameters and _source update. Implement the eBPF profiling query and data collect protocol. [Breaking Change] Remove Deprecated responseCode from sources, including Service, ServiceInstance, Endpoint Enhance endpoint dependency analysis to support cross threads cases. Refactor span analysis code structures. Remove isNotNormal service requirement when use alias to merge service topology from client side. All RPCs' peer services from client side are always normal services. This cause the topology is not merged correctly. Fix event type of export data is incorrect, it was EventType.TOTAL always. Reduce redundancy ThreadLocal in MAL core. Improve MAL performance. Trim tag\u0026rsquo;s key and value in log query. Refactor IoTDB storage plugin, add IoTDBDataConverter and fix ModifyCollectionInEnhancedForLoop bug. Bump up iotdb-session to 0.12.5. Fix the configuration of Aggregation and GC Count metrics for oap self observability E2E: Add verify OAP eBPF Profiling. Let multiGet could query without tag value in the InfluxDB storage plugin. Adjust MAL for V9, remove some groups, add a new Service function for the custom delimiter. Add service catalog DatabaseSlowStatement. Add Error Prone Annotations dependency to suppress warnings, which are not errors.  UI  [Breaking Change] Introduce Booster UI, remove RocketBot UI. [Breaking Change] UI Templates have been redesigned totally. GraphQL query is minimal compatible for metadata and metrics query. Remove unused jars (log4j-api.jar) in classpath. Bump up netty version to fix CVE. Add Database Connection pool metric. Re-implement UI template initialization for Booster UI. Add environment variable SW_ENABLE_UPDATE_UI_TEMPLATE to control user edit UI template. Add the Self Observability template of the SkyWalking Satellite. Add the template of OpenFunction observability.  Documentation  Reconstruction doc menu for v9. Update backend-alarm.md doc, support op \u0026ldquo;=\u0026rdquo; to \u0026ldquo;==\u0026rdquo;. Update backend-meter.md doc . Add \u0026lt;STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System\u0026gt; paper. Add Academy menu for recommending articles. Remove All source relative document and examples. Update Booster UI\u0026rsquo;s dependency licenses. Add profiling doc, and remove service mesh intro doc(not necessary). Add a doc for virtual database. Rewrite UI introduction. Update k8s-monitoring, backend-telemetry and v9-version-upgrade doc for v9.  All issues and pull requests are here\n","title":"9.0.0","url":"/docs/main/v9.6.0/en/changes/changes-9.0.0/"},{"content":"9.0.0 Project  Upgrade log4j2 to 2.17.1 for CVE-2021-44228, CVE-2021-45046, CVE-2021-45105 and CVE-2021-44832. This CVE only effects on JDK if JNDI is opened in default. Notice, using JVM option -Dlog4j2.formatMsgNoLookups=true or setting the LOG4J_FORMAT_MSG_NO_LOOKUPS=”true” environment variable also avoids CVEs. Upgrade maven-wrapper to 3.1.0, maven to 3.8.4 for performance improvements and ARM more native support. Exclude unnecessary libs when building under JDK 9+. Migrate base Docker image to eclipse-temurin as adoptopenjdk is deprecated. Add E2E test under Java 17. Upgrade protoc to 3.19.2. Add Istio 1.13.1 to E2E test matrix for verification. Upgrade Apache parent pom version to 25. Use the plugin version defined by the Apache maven parent.  Upgrade maven-dependency-plugin to 3.2.0. Upgrade maven-assembly-plugin to 3.3.0. Upgrade maven-failsafe-plugin to 2.22.2. Upgrade maven-surefire-plugin to 2.22.2. Upgrade maven-jar-plugin to 3.2.2. Upgrade maven-enforcer-plugin to 3.0.0. Upgrade maven-compiler-plugin to 3.10.0. Upgrade maven-resources-plugin to 3.2.0. Upgrade maven-source-plugin to 3.2.1.   Update codeStyle.xml to fix incompatibility on M1\u0026rsquo;s IntelliJ IDEA 2021.3.2. Update frontend-maven-plugin to 1.12 and npm to 16.14.0 for booster UI build. Improve CI with the GHA new feature \u0026ldquo;run failed jobs\u0026rdquo;. Fix ./mvnw compile not work if ./mvnw install is not executed at least once. Add JD_PRESERVE_LINE_FEEDS=true in official code style file. Upgrade OAP dependencies gson(2.9.0), guava(31.1), jackson(2.13.2), protobuf-java(3.18.4), commons-io(2.7), postgresql(42.3.3). Remove commons-pool and commons-dbcp from OAP dependencies(Not used before). Upgrade webapp dependencies gson(2.9.0), spring boot(2.6.6), jackson(2.13.2.2), spring cloud(2021.0.1), Apache httpclient(4.5.13).  OAP Server  Fix potential NPE in OAL string match and a bug when right-hand-side variable includes double quotes. Bump up Armeria version to 1.14.1 to fix CVE. Polish ETCD cluster config environment variables. Add the analysis of metrics in Satellite MetricsService. Fix Can't split endpoint id into 2 parts bug for endpoint ID. In the TCP in service mesh observability, endpoint name doesn\u0026rsquo;t exist in TCP traffic. Upgrade H2 version to 2.0.206 to fix CVE-2021-23463 and GHSA-h376-j262-vhq6. Extend column name override mechanism working for ValueColumnMetadata. Introduce new concept Layer and removed NodeType. More details refer to v9-version-upgrade. Fix query sort metrics failure in H2 Storage. Bump up grpc to 1.43.2 and protobuf to 3.19.2 to fix CVE-2021-22569. Add source layer and dest layer to relation. Follow protocol grammar fix GCPhrase -\u0026gt; GCPhase. Set layer to mesh relation. Add FAAS to SpanLayer. Adjust e2e case for V9 core. Support ZGC GC time and count metric collecting. Sync proto buffers files from upstream Envoy (Related to https://github.com/envoyproxy/envoy/pull/18955). Bump up GraphQL related dependencies to latest versions. Add normal to V9 service meta query. Support scope=ALL catalog for metrics. Bump up H2 to 2.1.210 to fix CVE-2022-23221. E2E: Add normal field to Service. Add FreeSql component ID(3017) of dotnet agent. E2E: verify OAP cluster model data aggregation. Fix SelfRemoteClient self observing metrics. Add env variables SW_CLUSTER_INTERNAL_COM_HOST and SW_CLUSTER_INTERNAL_COM_PORT for cluster selectors zookeeper ,consul,etcd and nacos. Doc update: configuration-vocabulary,backend-cluster about env variables SW_CLUSTER_INTERNAL_COM_HOST and SW_CLUSTER_INTERNAL_COM_PORT. Add Python MysqlClient component ID(7013) with mapping information. Support Java thread pool metrics analysis. Fix IoTDB Storage Option insert null index value. Set the default value of SW_STORAGE_IOTDB_SESSIONPOOL_SIZE to 8. Bump up iotdb-session to 0.12.4. Bump up PostgreSQL driver to fix CVE. Add Guava EventBus component ID(123) of Java agent. Add OpenFunction component ID(5013). Expose configuration responseTimeout of ES client. Support datasource metric analysis. [Breaking Change] Keep the endpoint avg resp time meter name the same with others scope. (This may break 3rd party integration and existing alarm rule settings) Add Python FastAPI component ID(7014). Support all metrics from MAL engine in alarm core, including Prometheus, OC receiver, meter receiver. Allow updating non-metrics templates when structure changed. Set default connection timeout of ElasticSearch to 3000 milliseconds. Support ElasticSearch 8 and add it into E2E tests. Disable indexing for field alarm_record.tags_raw_data of binary type in ElasticSearch storage. Fix Zipkin receiver wrong condition for decoding gzip. Add a new sampler (possibility) in LAL. Unify module name receiver_zipkin to receiver-zipkin, remove receiver_jaeger from application.yaml. Introduce the entity of Process type. Set the length of event#parameters to 2000. Limit the length of Event#parameters. Support large service/instance/networkAddressAlias list query by using ElasticSearch scrolling API, add metadataQueryBatchSize to configure scrolling page size. Change default value of metadataQueryMaxSize from 5000 to 10000 Replace deprecated Armeria API BasicToken.of with AuthToken.ofBasic. Implement v9 UI template management protocol. Implement process metadata query protocol. Expose more ElasticSearch health check related logs to help to diagnose Health check fails. reason: No healthy endpoint. Add source event generated metrics to SERVICE_CATALOG_NAME catalog. [Breaking Change] Deprecate All from OAL source. [Breaking Change] Remove SRC_ALL: 'All' from OAL grammar tree. Remove all_heatmap and all_percentile metrics. Fix ElasticSearch normal index couldn\u0026rsquo;t apply mapping and update. Enhance DataCarrier#MultipleChannelsConsumer to add priority for the channels, which makes OAP server has a better performance to activate all analyzers on default. Activate receiver-otel#enabledOcRules receiver with k8s-node,oap,vm rules on default. Activate satellite,spring-sleuth for agent-analyzer#meterAnalyzerActiveFiles on default. Activate receiver-zabbix receiver with agent rule on default. Replace HTTP server (GraphQL, agent HTTP protocol) from Jetty with Armeria. [Breaking Change] Remove configuration restAcceptorPriorityDelta (env var: SW_RECEIVER_SHARING_JETTY_DELTA , SW_CORE_REST_JETTY_DELTA). [Breaking Change] Remove configuration graphql/path (env var: SW_QUERY_GRAPHQL_PATH). Add storage column attribute indexOnly, support ElasticSearch only index and not store some fields. Add indexOnly=true to SegmentRecord.tags, AlarmRecord.tags, AbstractLogRecord.tags, to reduce unnecessary storage. [Breaking Change] Remove configuration restMinThreads (env var: SW_CORE_REST_JETTY_MIN_THREADS , SW_RECEIVER_SHARING_JETTY_MIN_THREADS). Refactor the core Builder mechanism, new storage plugin could implement their own converter and get rid of hard requirement of using HashMap to communicate between data object and database native structure. [Breaking Change] Break all existing 3rd-party storage extensions. Remove hard requirement of BASE64 encoding for binary field. Add complexity limitation for GraphQL query to avoid malicious query. Add Column.shardingKeyIdx for column definition for BanyanDB.  Sharding key is used to group time series data per metric of one entity in one place (same sharding and/or same row for column-oriented database). For example, ServiceA's traffic gauge, service call per minute, includes following timestamp values, then it should be sharded by service ID [ServiceA(encoded ID): 01-28 18:30 values-1, 01-28 18:31 values-2, 01-28 18:32 values-3, 01-28 18:32 values-4] BanyanDB is the 1st storage implementation supporting this. It would make continuous time series metrics stored closely and compressed better. NOTICE, this sharding concept is NOT just for splitting data into different database instances or physical files.  Support ElasticSearch template mappings properties parameters and _source update. Implement the eBPF profiling query and data collect protocol. [Breaking Change] Remove Deprecated responseCode from sources, including Service, ServiceInstance, Endpoint Enhance endpoint dependency analysis to support cross threads cases. Refactor span analysis code structures. Remove isNotNormal service requirement when use alias to merge service topology from client side. All RPCs' peer services from client side are always normal services. This cause the topology is not merged correctly. Fix event type of export data is incorrect, it was EventType.TOTAL always. Reduce redundancy ThreadLocal in MAL core. Improve MAL performance. Trim tag\u0026rsquo;s key and value in log query. Refactor IoTDB storage plugin, add IoTDBDataConverter and fix ModifyCollectionInEnhancedForLoop bug. Bump up iotdb-session to 0.12.5. Fix the configuration of Aggregation and GC Count metrics for oap self observability E2E: Add verify OAP eBPF Profiling. Let multiGet could query without tag value in the InfluxDB storage plugin. Adjust MAL for V9, remove some groups, add a new Service function for the custom delimiter. Add service catalog DatabaseSlowStatement. Add Error Prone Annotations dependency to suppress warnings, which are not errors.  UI  [Breaking Change] Introduce Booster UI, remove RocketBot UI. [Breaking Change] UI Templates have been redesigned totally. GraphQL query is minimal compatible for metadata and metrics query. Remove unused jars (log4j-api.jar) in classpath. Bump up netty version to fix CVE. Add Database Connection pool metric. Re-implement UI template initialization for Booster UI. Add environment variable SW_ENABLE_UPDATE_UI_TEMPLATE to control user edit UI template. Add the Self Observability template of the SkyWalking Satellite. Add the template of OpenFunction observability.  Documentation  Reconstruction doc menu for v9. Update backend-alarm.md doc, support op \u0026ldquo;=\u0026rdquo; to \u0026ldquo;==\u0026rdquo;. Update backend-meter.md doc . Add \u0026lt;STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System\u0026gt; paper. Add Academy menu for recommending articles. Remove All source relative document and examples. Update Booster UI\u0026rsquo;s dependency licenses. Add profiling doc, and remove service mesh intro doc(not necessary). Add a doc for virtual database. Rewrite UI introduction. Update k8s-monitoring, backend-telemetry and v9-version-upgrade doc for v9.  All issues and pull requests are here\n","title":"9.0.0","url":"/docs/main/v9.7.0/en/changes/changes-9.0.0/"},{"content":"9.1.0 Project  [IMPORTANT] Remove InfluxDB 1.x and Apache IoTDB 0.X as storage options, check details at here. Remove converter-moshi 2.5.0, influx-java 2.15, iotdb java 0.12.5, thrift 0.14.1, moshi 1.5.0, msgpack 0.8.16 dependencies. Remove InfluxDB and IoTDB relative codes and E2E tests. Upgrade OAP dependencies zipkin to 2.23.16, H2 to 2.1.212, Apache Freemarker to 2.3.31, gRPC-java 1.46.0, netty to 4.1.76. Upgrade Webapp dependencies, spring-cloud-dependencies to 2021.0.2, logback-classic to 1.2.11 [IMPORTANT] Add BanyanDB storage implementation. Notice BanyanDB is currently under active development and SHOULD NOT be used in production cluster.  OAP Server  Add component definition(ID=127) for Apache ShenYu (incubating). Fix Zipkin receiver: Decode spans error, missing Layer for V9 and wrong time bucket for generate Service and Endpoint. [Refactor] Move SQLDatabase(H2/MySQL/PostgreSQL), ElasticSearch and BanyanDB specific configurations out of column. Support BanyanDB global index for entities. Log and Segment record entities declare this new feature. Remove unnecessary analyzer settings in columns of templates. Many were added due to analyzer\u0026rsquo;s default value. Simplify the Kafka Fetch configuration in cluster mode. [Breaking Change] Update the eBPF Profiling task to the service level, please delete index/table: ebpf_profiling_task, process_traffic. Fix event can\u0026rsquo;t split service ID into 2 parts. Fix OAP Self-Observability metric GC Time calculation. Set SW_QUERY_MAX_QUERY_COMPLEXITY default value to 1000 Webapp module (for UI) enabled compression. [Breaking Change] Add layer field to event, report an event without layer is not allowed. Fix ES flush thread stops when flush schedule task throws exception, such as ElasticSearch flush failed. Fix ES BulkProcessor in BatchProcessEsDAO was initialized multiple times and created multiple ES flush schedule tasks. HTTPServer support the handler register with allowed HTTP methods. [Critical] Revert Enhance DataCarrier#MultipleChannelsConsumer to add priority to avoid consuming issues. Fix the problem that some configurations (such as group.id) did not take effect due to the override order when using the kafkaConsumerConfig property to extend the configuration in Kafka Fetcher. Remove build time from the OAP version. Add data-generator module to run OAP in testing mode, generating mock data for testing. Support receive Kubernetes processes from gRPC protocol. Fix the problem that es index(TimeSeriesTable, eg. endpoint_traffic, alarm_record) didn\u0026rsquo;t create even after rerun with init-mode. This problem caused the OAP server to fail to start when the OAP server was down for more than a day. Support autocomplete tags in traces query. [Breaking Change] Replace all configurations **_JETTY_** to **_REST_**. Add the support eBPF profiling field into the process entity. E2E: fix log test miss verify LAL and metrics. Enhance Converter mechanism in kernel level to make BanyanDB native feature more effective. Add TermsAggregation properties collect_mode and execution_hint. Add \u0026ldquo;execution_hint\u0026rdquo;: \u0026ldquo;map\u0026rdquo;, \u0026ldquo;collect_mode\u0026rdquo;: \u0026ldquo;breadth_first\u0026rdquo; for aggregation and topology query to improve 5-10x performance. Clean up scroll contexts after used. Support autocomplete tags in logs query. Enhance Deprecated MetricQuery(v1) getValues querying to asynchronous concurrency query Fix the pod match error when the service has multiple selector in kubernetes environment. VM monitoring adapts the 0.50.0 of the opentelemetry-collector. Add Envoy internal cost metrics. Remove Layer concept from ServiceInstance. Remove unnecessary onCompleted on gRPC onError callback. Remove Layer concept form Process. Update to list all eBPF profiling schedulers without duration. Storage(ElasticSearch): add search options to tolerate inexisting indices. Fix the problem that MQ has the wrong Layer type. Fix NoneStream model has wrong downsampling(was Second, should be Minute). SQL Database: provide @SQLDatabase.AdditionalEntity to support create additional tables from a model. [Breaking Change] SQL Database: remove SQL Database config maxSizeOfArrayColumn and numOfSearchableValuesPerTag. [Breaking Change] SQL Database: move Tags list from Segment,Logs,Alarms to their additional table. [Breaking Change] Remove total field in Trace, Log, Event, Browser log, and alarm list query. Support OFF_CPU eBPF Profiling. Fix SumAggregationBuilder#build should use the SumAggregation rather than MaxAggregation. Add TiDB, OpenSearch, Postgres storage optional to Trace and eBPF Profiling E2E testing. Add OFF CPU eBPF Profiling E2E Testing. Fix searchableTag as rpc.status_code and http.status_code. status_code had been removed. Fix scroll query failure exception. Add profileDataQueryBatchSize config in Elasticsearch Storage. Add APIs to query Pod log on demand. Remove OAL for events. Simplify the format index name logical in ES storage. Add instance properties extractor in MAL. Support Zipkin traces collect and zipkin traces query API. [Breaking Change] Zipkin receiver mechanism changes and traces do not stream into OAP Segment anymore.  UI  General service instance: move Thread Pool from JVM to Overview, fix JVM GC Count calculation. Add Apache ShenYu (incubating) component LOGO. Show more metrics on service/instance/endpoint list on the dashboards. Support average values of metrics on the service/list/endpoint table widgets, with pop-up linear graph. Fix viewLogs button query no data. Fix UTC when page loads. Implement the eBPF profile widget on dashboard. Optimize the trace widget. Avoid invalid query for topology metrics. Add the alarm and log tag tips. Fix spans details and task logs. Verify query params to avoid invalid queries. Mobile terminal adaptation. Fix: set dropdown for the Tab widget, init instance/endpoint relation selectors, update sankey graph. Add eBPF Profiling widget into General service, Service Mesh and Kubernetes tabs. Fix jump to endpoint-relation dashboard template. Fix set graph options. Remove the Layer filed from the Instance and Process. Fix date time picker display when set hour to 0. Implement tags auto-complete for Trace and Log. Support multiple trees for the flame graph. Fix the page doesn\u0026rsquo;t need to be re-rendered when the url changes. Remove unexpected data for exporting dashboards. Fix duration time. Remove the total field from query conditions. Fix minDuration and maxDuration for the trace filter. Add Log configuration for the browser templates. Fix query conditions for the browser logs. Add Spanish Translation. Visualize the OFF CPU eBPF profiling. Add Spanish language to UI. Sort spans with startTime or spanId in a segment. Visualize a on-demand log widget. Fix activate the correct tab index after renaming a Tabs name. FaaS dashboard support on-demand log (OpenFunction/functions-framework-go version \u0026gt; 0.3.0).  Documentation  Add eBPF agent into probe introduction.  All issues and pull requests are here\n","title":"9.1.0","url":"/docs/main/latest/en/changes/changes-9.1.0/"},{"content":"9.1.0 Project  [IMPORTANT] Remove InfluxDB 1.x and Apache IoTDB 0.X as storage options, check details at here. Remove converter-moshi 2.5.0, influx-java 2.15, iotdb java 0.12.5, thrift 0.14.1, moshi 1.5.0, msgpack 0.8.16 dependencies. Remove InfluxDB and IoTDB relative codes and E2E tests. Upgrade OAP dependencies zipkin to 2.23.16, H2 to 2.1.212, Apache Freemarker to 2.3.31, gRPC-java 1.46.0, netty to 4.1.76. Upgrade Webapp dependencies, spring-cloud-dependencies to 2021.0.2, logback-classic to 1.2.11 [IMPORTANT] Add BanyanDB storage implementation. Notice BanyanDB is currently under active development and SHOULD NOT be used in production cluster.  OAP Server  Add component definition(ID=127) for Apache ShenYu (incubating). Fix Zipkin receiver: Decode spans error, missing Layer for V9 and wrong time bucket for generate Service and Endpoint. [Refactor] Move SQLDatabase(H2/MySQL/PostgreSQL), ElasticSearch and BanyanDB specific configurations out of column. Support BanyanDB global index for entities. Log and Segment record entities declare this new feature. Remove unnecessary analyzer settings in columns of templates. Many were added due to analyzer\u0026rsquo;s default value. Simplify the Kafka Fetch configuration in cluster mode. [Breaking Change] Update the eBPF Profiling task to the service level, please delete index/table: ebpf_profiling_task, process_traffic. Fix event can\u0026rsquo;t split service ID into 2 parts. Fix OAP Self-Observability metric GC Time calculation. Set SW_QUERY_MAX_QUERY_COMPLEXITY default value to 1000 Webapp module (for UI) enabled compression. [Breaking Change] Add layer field to event, report an event without layer is not allowed. Fix ES flush thread stops when flush schedule task throws exception, such as ElasticSearch flush failed. Fix ES BulkProcessor in BatchProcessEsDAO was initialized multiple times and created multiple ES flush schedule tasks. HTTPServer support the handler register with allowed HTTP methods. [Critical] Revert Enhance DataCarrier#MultipleChannelsConsumer to add priority to avoid consuming issues. Fix the problem that some configurations (such as group.id) did not take effect due to the override order when using the kafkaConsumerConfig property to extend the configuration in Kafka Fetcher. Remove build time from the OAP version. Add data-generator module to run OAP in testing mode, generating mock data for testing. Support receive Kubernetes processes from gRPC protocol. Fix the problem that es index(TimeSeriesTable, eg. endpoint_traffic, alarm_record) didn\u0026rsquo;t create even after rerun with init-mode. This problem caused the OAP server to fail to start when the OAP server was down for more than a day. Support autocomplete tags in traces query. [Breaking Change] Replace all configurations **_JETTY_** to **_REST_**. Add the support eBPF profiling field into the process entity. E2E: fix log test miss verify LAL and metrics. Enhance Converter mechanism in kernel level to make BanyanDB native feature more effective. Add TermsAggregation properties collect_mode and execution_hint. Add \u0026ldquo;execution_hint\u0026rdquo;: \u0026ldquo;map\u0026rdquo;, \u0026ldquo;collect_mode\u0026rdquo;: \u0026ldquo;breadth_first\u0026rdquo; for aggregation and topology query to improve 5-10x performance. Clean up scroll contexts after used. Support autocomplete tags in logs query. Enhance Deprecated MetricQuery(v1) getValues querying to asynchronous concurrency query Fix the pod match error when the service has multiple selector in kubernetes environment. VM monitoring adapts the 0.50.0 of the opentelemetry-collector. Add Envoy internal cost metrics. Remove Layer concept from ServiceInstance. Remove unnecessary onCompleted on gRPC onError callback. Remove Layer concept form Process. Update to list all eBPF profiling schedulers without duration. Storage(ElasticSearch): add search options to tolerate inexisting indices. Fix the problem that MQ has the wrong Layer type. Fix NoneStream model has wrong downsampling(was Second, should be Minute). SQL Database: provide @SQLDatabase.AdditionalEntity to support create additional tables from a model. [Breaking Change] SQL Database: remove SQL Database config maxSizeOfArrayColumn and numOfSearchableValuesPerTag. [Breaking Change] SQL Database: move Tags list from Segment,Logs,Alarms to their additional table. [Breaking Change] Remove total field in Trace, Log, Event, Browser log, and alarm list query. Support OFF_CPU eBPF Profiling. Fix SumAggregationBuilder#build should use the SumAggregation rather than MaxAggregation. Add TiDB, OpenSearch, Postgres storage optional to Trace and eBPF Profiling E2E testing. Add OFF CPU eBPF Profiling E2E Testing. Fix searchableTag as rpc.status_code and http.status_code. status_code had been removed. Fix scroll query failure exception. Add profileDataQueryBatchSize config in Elasticsearch Storage. Add APIs to query Pod log on demand. Remove OAL for events. Simplify the format index name logical in ES storage. Add instance properties extractor in MAL. Support Zipkin traces collect and zipkin traces query API. [Breaking Change] Zipkin receiver mechanism changes and traces do not stream into OAP Segment anymore.  UI  General service instance: move Thread Pool from JVM to Overview, fix JVM GC Count calculation. Add Apache ShenYu (incubating) component LOGO. Show more metrics on service/instance/endpoint list on the dashboards. Support average values of metrics on the service/list/endpoint table widgets, with pop-up linear graph. Fix viewLogs button query no data. Fix UTC when page loads. Implement the eBPF profile widget on dashboard. Optimize the trace widget. Avoid invalid query for topology metrics. Add the alarm and log tag tips. Fix spans details and task logs. Verify query params to avoid invalid queries. Mobile terminal adaptation. Fix: set dropdown for the Tab widget, init instance/endpoint relation selectors, update sankey graph. Add eBPF Profiling widget into General service, Service Mesh and Kubernetes tabs. Fix jump to endpoint-relation dashboard template. Fix set graph options. Remove the Layer filed from the Instance and Process. Fix date time picker display when set hour to 0. Implement tags auto-complete for Trace and Log. Support multiple trees for the flame graph. Fix the page doesn\u0026rsquo;t need to be re-rendered when the url changes. Remove unexpected data for exporting dashboards. Fix duration time. Remove the total field from query conditions. Fix minDuration and maxDuration for the trace filter. Add Log configuration for the browser templates. Fix query conditions for the browser logs. Add Spanish Translation. Visualize the OFF CPU eBPF profiling. Add Spanish language to UI. Sort spans with startTime or spanId in a segment. Visualize a on-demand log widget. Fix activate the correct tab index after renaming a Tabs name. FaaS dashboard support on-demand log (OpenFunction/functions-framework-go version \u0026gt; 0.3.0).  Documentation  Add eBPF agent into probe introduction.  All issues and pull requests are here\n","title":"9.1.0","url":"/docs/main/next/en/changes/changes-9.1.0/"},{"content":"9.1.0 Project  [IMPORTANT] Remove InfluxDB 1.x and Apache IoTDB 0.X as storage options, check details at here. Remove converter-moshi 2.5.0, influx-java 2.15, iotdb java 0.12.5, thrift 0.14.1, moshi 1.5.0, msgpack 0.8.16 dependencies. Remove InfluxDB and IoTDB relative codes and E2E tests. Upgrade OAP dependencies zipkin to 2.23.16, H2 to 2.1.212, Apache Freemarker to 2.3.31, gRPC-java 1.46.0, netty to 4.1.76. Upgrade Webapp dependencies, spring-cloud-dependencies to 2021.0.2, logback-classic to 1.2.11 [IMPORTANT] Add BanyanDB storage implementation. Notice BanyanDB is currently under active development and SHOULD NOT be used in production cluster.  OAP Server  Add component definition(ID=127) for Apache ShenYu (incubating). Fix Zipkin receiver: Decode spans error, missing Layer for V9 and wrong time bucket for generate Service and Endpoint. [Refactor] Move SQLDatabase(H2/MySQL/PostgreSQL), ElasticSearch and BanyanDB specific configurations out of column. Support BanyanDB global index for entities. Log and Segment record entities declare this new feature. Remove unnecessary analyzer settings in columns of templates. Many were added due to analyzer\u0026rsquo;s default value. Simplify the Kafka Fetch configuration in cluster mode. [Breaking Change] Update the eBPF Profiling task to the service level, please delete index/table: ebpf_profiling_task, process_traffic. Fix event can\u0026rsquo;t split service ID into 2 parts. Fix OAP Self-Observability metric GC Time calculation. Set SW_QUERY_MAX_QUERY_COMPLEXITY default value to 1000 Webapp module (for UI) enabled compression. [Breaking Change] Add layer field to event, report an event without layer is not allowed. Fix ES flush thread stops when flush schedule task throws exception, such as ElasticSearch flush failed. Fix ES BulkProcessor in BatchProcessEsDAO was initialized multiple times and created multiple ES flush schedule tasks. HTTPServer support the handler register with allowed HTTP methods. [Critical] Revert Enhance DataCarrier#MultipleChannelsConsumer to add priority to avoid consuming issues. Fix the problem that some configurations (such as group.id) did not take effect due to the override order when using the kafkaConsumerConfig property to extend the configuration in Kafka Fetcher. Remove build time from the OAP version. Add data-generator module to run OAP in testing mode, generating mock data for testing. Support receive Kubernetes processes from gRPC protocol. Fix the problem that es index(TimeSeriesTable, eg. endpoint_traffic, alarm_record) didn\u0026rsquo;t create even after rerun with init-mode. This problem caused the OAP server to fail to start when the OAP server was down for more than a day. Support autocomplete tags in traces query. [Breaking Change] Replace all configurations **_JETTY_** to **_REST_**. Add the support eBPF profiling field into the process entity. E2E: fix log test miss verify LAL and metrics. Enhance Converter mechanism in kernel level to make BanyanDB native feature more effective. Add TermsAggregation properties collect_mode and execution_hint. Add \u0026ldquo;execution_hint\u0026rdquo;: \u0026ldquo;map\u0026rdquo;, \u0026ldquo;collect_mode\u0026rdquo;: \u0026ldquo;breadth_first\u0026rdquo; for aggregation and topology query to improve 5-10x performance. Clean up scroll contexts after used. Support autocomplete tags in logs query. Enhance Deprecated MetricQuery(v1) getValues querying to asynchronous concurrency query Fix the pod match error when the service has multiple selector in kubernetes environment. VM monitoring adapts the 0.50.0 of the opentelemetry-collector. Add Envoy internal cost metrics. Remove Layer concept from ServiceInstance. Remove unnecessary onCompleted on gRPC onError callback. Remove Layer concept form Process. Update to list all eBPF profiling schedulers without duration. Storage(ElasticSearch): add search options to tolerate inexisting indices. Fix the problem that MQ has the wrong Layer type. Fix NoneStream model has wrong downsampling(was Second, should be Minute). SQL Database: provide @SQLDatabase.AdditionalEntity to support create additional tables from a model. [Breaking Change] SQL Database: remove SQL Database config maxSizeOfArrayColumn and numOfSearchableValuesPerTag. [Breaking Change] SQL Database: move Tags list from Segment,Logs,Alarms to their additional table. [Breaking Change] Remove total field in Trace, Log, Event, Browser log, and alarm list query. Support OFF_CPU eBPF Profiling. Fix SumAggregationBuilder#build should use the SumAggregation rather than MaxAggregation. Add TiDB, OpenSearch, Postgres storage optional to Trace and eBPF Profiling E2E testing. Add OFF CPU eBPF Profiling E2E Testing. Fix searchableTag as rpc.status_code and http.status_code. status_code had been removed. Fix scroll query failure exception. Add profileDataQueryBatchSize config in Elasticsearch Storage. Add APIs to query Pod log on demand. Remove OAL for events. Simplify the format index name logical in ES storage. Add instance properties extractor in MAL. Support Zipkin traces collect and zipkin traces query API. [Breaking Change] Zipkin receiver mechanism changes and traces do not stream into OAP Segment anymore.  UI  General service instance: move Thread Pool from JVM to Overview, fix JVM GC Count calculation. Add Apache ShenYu (incubating) component LOGO. Show more metrics on service/instance/endpoint list on the dashboards. Support average values of metrics on the service/list/endpoint table widgets, with pop-up linear graph. Fix viewLogs button query no data. Fix UTC when page loads. Implement the eBPF profile widget on dashboard. Optimize the trace widget. Avoid invalid query for topology metrics. Add the alarm and log tag tips. Fix spans details and task logs. Verify query params to avoid invalid queries. Mobile terminal adaptation. Fix: set dropdown for the Tab widget, init instance/endpoint relation selectors, update sankey graph. Add eBPF Profiling widget into General service, Service Mesh and Kubernetes tabs. Fix jump to endpoint-relation dashboard template. Fix set graph options. Remove the Layer filed from the Instance and Process. Fix date time picker display when set hour to 0. Implement tags auto-complete for Trace and Log. Support multiple trees for the flame graph. Fix the page doesn\u0026rsquo;t need to be re-rendered when the url changes. Remove unexpected data for exporting dashboards. Fix duration time. Remove the total field from query conditions. Fix minDuration and maxDuration for the trace filter. Add Log configuration for the browser templates. Fix query conditions for the browser logs. Add Spanish Translation. Visualize the OFF CPU eBPF profiling. Add Spanish language to UI. Sort spans with startTime or spanId in a segment. Visualize a on-demand log widget. Fix activate the correct tab index after renaming a Tabs name. FaaS dashboard support on-demand log (OpenFunction/functions-framework-go version \u0026gt; 0.3.0).  Documentation  Add eBPF agent into probe introduction.  All issues and pull requests are here\n","title":"9.1.0","url":"/docs/main/v9.1.0/en/changes/changes/"},{"content":"9.1.0 Project  [IMPORTANT] Remove InfluxDB 1.x and Apache IoTDB 0.X as storage options, check details at here. Remove converter-moshi 2.5.0, influx-java 2.15, iotdb java 0.12.5, thrift 0.14.1, moshi 1.5.0, msgpack 0.8.16 dependencies. Remove InfluxDB and IoTDB relative codes and E2E tests. Upgrade OAP dependencies zipkin to 2.23.16, H2 to 2.1.212, Apache Freemarker to 2.3.31, gRPC-java 1.46.0, netty to 4.1.76. Upgrade Webapp dependencies, spring-cloud-dependencies to 2021.0.2, logback-classic to 1.2.11 [IMPORTANT] Add BanyanDB storage implementation. Notice BanyanDB is currently under active development and SHOULD NOT be used in production cluster.  OAP Server  Add component definition(ID=127) for Apache ShenYu (incubating). Fix Zipkin receiver: Decode spans error, missing Layer for V9 and wrong time bucket for generate Service and Endpoint. [Refactor] Move SQLDatabase(H2/MySQL/PostgreSQL), ElasticSearch and BanyanDB specific configurations out of column. Support BanyanDB global index for entities. Log and Segment record entities declare this new feature. Remove unnecessary analyzer settings in columns of templates. Many were added due to analyzer\u0026rsquo;s default value. Simplify the Kafka Fetch configuration in cluster mode. [Breaking Change] Update the eBPF Profiling task to the service level, please delete index/table: ebpf_profiling_task, process_traffic. Fix event can\u0026rsquo;t split service ID into 2 parts. Fix OAP Self-Observability metric GC Time calculation. Set SW_QUERY_MAX_QUERY_COMPLEXITY default value to 1000 Webapp module (for UI) enabled compression. [Breaking Change] Add layer field to event, report an event without layer is not allowed. Fix ES flush thread stops when flush schedule task throws exception, such as ElasticSearch flush failed. Fix ES BulkProcessor in BatchProcessEsDAO was initialized multiple times and created multiple ES flush schedule tasks. HTTPServer support the handler register with allowed HTTP methods. [Critical] Revert Enhance DataCarrier#MultipleChannelsConsumer to add priority to avoid consuming issues. Fix the problem that some configurations (such as group.id) did not take effect due to the override order when using the kafkaConsumerConfig property to extend the configuration in Kafka Fetcher. Remove build time from the OAP version. Add data-generator module to run OAP in testing mode, generating mock data for testing. Support receive Kubernetes processes from gRPC protocol. Fix the problem that es index(TimeSeriesTable, eg. endpoint_traffic, alarm_record) didn\u0026rsquo;t create even after rerun with init-mode. This problem caused the OAP server to fail to start when the OAP server was down for more than a day. Support autocomplete tags in traces query. [Breaking Change] Replace all configurations **_JETTY_** to **_REST_**. Add the support eBPF profiling field into the process entity. E2E: fix log test miss verify LAL and metrics. Enhance Converter mechanism in kernel level to make BanyanDB native feature more effective. Add TermsAggregation properties collect_mode and execution_hint. Add \u0026ldquo;execution_hint\u0026rdquo;: \u0026ldquo;map\u0026rdquo;, \u0026ldquo;collect_mode\u0026rdquo;: \u0026ldquo;breadth_first\u0026rdquo; for aggregation and topology query to improve 5-10x performance. Clean up scroll contexts after used. Support autocomplete tags in logs query. Enhance Deprecated MetricQuery(v1) getValues querying to asynchronous concurrency query Fix the pod match error when the service has multiple selector in kubernetes environment. VM monitoring adapts the 0.50.0 of the opentelemetry-collector. Add Envoy internal cost metrics. Remove Layer concept from ServiceInstance. Remove unnecessary onCompleted on gRPC onError callback. Remove Layer concept form Process. Update to list all eBPF profiling schedulers without duration. Storage(ElasticSearch): add search options to tolerate inexisting indices. Fix the problem that MQ has the wrong Layer type. Fix NoneStream model has wrong downsampling(was Second, should be Minute). SQL Database: provide @SQLDatabase.AdditionalEntity to support create additional tables from a model. [Breaking Change] SQL Database: remove SQL Database config maxSizeOfArrayColumn and numOfSearchableValuesPerTag. [Breaking Change] SQL Database: move Tags list from Segment,Logs,Alarms to their additional table. [Breaking Change] Remove total field in Trace, Log, Event, Browser log, and alarm list query. Support OFF_CPU eBPF Profiling. Fix SumAggregationBuilder#build should use the SumAggregation rather than MaxAggregation. Add TiDB, OpenSearch, Postgres storage optional to Trace and eBPF Profiling E2E testing. Add OFF CPU eBPF Profiling E2E Testing. Fix searchableTag as rpc.status_code and http.status_code. status_code had been removed. Fix scroll query failure exception. Add profileDataQueryBatchSize config in Elasticsearch Storage. Add APIs to query Pod log on demand. Remove OAL for events. Simplify the format index name logical in ES storage. Add instance properties extractor in MAL. Support Zipkin traces collect and zipkin traces query API. [Breaking Change] Zipkin receiver mechanism changes and traces do not stream into OAP Segment anymore.  UI  General service instance: move Thread Pool from JVM to Overview, fix JVM GC Count calculation. Add Apache ShenYu (incubating) component LOGO. Show more metrics on service/instance/endpoint list on the dashboards. Support average values of metrics on the service/list/endpoint table widgets, with pop-up linear graph. Fix viewLogs button query no data. Fix UTC when page loads. Implement the eBPF profile widget on dashboard. Optimize the trace widget. Avoid invalid query for topology metrics. Add the alarm and log tag tips. Fix spans details and task logs. Verify query params to avoid invalid queries. Mobile terminal adaptation. Fix: set dropdown for the Tab widget, init instance/endpoint relation selectors, update sankey graph. Add eBPF Profiling widget into General service, Service Mesh and Kubernetes tabs. Fix jump to endpoint-relation dashboard template. Fix set graph options. Remove the Layer filed from the Instance and Process. Fix date time picker display when set hour to 0. Implement tags auto-complete for Trace and Log. Support multiple trees for the flame graph. Fix the page doesn\u0026rsquo;t need to be re-rendered when the url changes. Remove unexpected data for exporting dashboards. Fix duration time. Remove the total field from query conditions. Fix minDuration and maxDuration for the trace filter. Add Log configuration for the browser templates. Fix query conditions for the browser logs. Add Spanish Translation. Visualize the OFF CPU eBPF profiling. Add Spanish language to UI. Sort spans with startTime or spanId in a segment. Visualize a on-demand log widget. Fix activate the correct tab index after renaming a Tabs name. FaaS dashboard support on-demand log (OpenFunction/functions-framework-go version \u0026gt; 0.3.0).  Documentation  Add eBPF agent into probe introduction.  All issues and pull requests are here\n","title":"9.1.0","url":"/docs/main/v9.2.0/en/changes/changes-9.1.0/"},{"content":"9.1.0 Project  [IMPORTANT] Remove InfluxDB 1.x and Apache IoTDB 0.X as storage options, check details at here. Remove converter-moshi 2.5.0, influx-java 2.15, iotdb java 0.12.5, thrift 0.14.1, moshi 1.5.0, msgpack 0.8.16 dependencies. Remove InfluxDB and IoTDB relative codes and E2E tests. Upgrade OAP dependencies zipkin to 2.23.16, H2 to 2.1.212, Apache Freemarker to 2.3.31, gRPC-java 1.46.0, netty to 4.1.76. Upgrade Webapp dependencies, spring-cloud-dependencies to 2021.0.2, logback-classic to 1.2.11 [IMPORTANT] Add BanyanDB storage implementation. Notice BanyanDB is currently under active development and SHOULD NOT be used in production cluster.  OAP Server  Add component definition(ID=127) for Apache ShenYu (incubating). Fix Zipkin receiver: Decode spans error, missing Layer for V9 and wrong time bucket for generate Service and Endpoint. [Refactor] Move SQLDatabase(H2/MySQL/PostgreSQL), ElasticSearch and BanyanDB specific configurations out of column. Support BanyanDB global index for entities. Log and Segment record entities declare this new feature. Remove unnecessary analyzer settings in columns of templates. Many were added due to analyzer\u0026rsquo;s default value. Simplify the Kafka Fetch configuration in cluster mode. [Breaking Change] Update the eBPF Profiling task to the service level, please delete index/table: ebpf_profiling_task, process_traffic. Fix event can\u0026rsquo;t split service ID into 2 parts. Fix OAP Self-Observability metric GC Time calculation. Set SW_QUERY_MAX_QUERY_COMPLEXITY default value to 1000 Webapp module (for UI) enabled compression. [Breaking Change] Add layer field to event, report an event without layer is not allowed. Fix ES flush thread stops when flush schedule task throws exception, such as ElasticSearch flush failed. Fix ES BulkProcessor in BatchProcessEsDAO was initialized multiple times and created multiple ES flush schedule tasks. HTTPServer support the handler register with allowed HTTP methods. [Critical] Revert Enhance DataCarrier#MultipleChannelsConsumer to add priority to avoid consuming issues. Fix the problem that some configurations (such as group.id) did not take effect due to the override order when using the kafkaConsumerConfig property to extend the configuration in Kafka Fetcher. Remove build time from the OAP version. Add data-generator module to run OAP in testing mode, generating mock data for testing. Support receive Kubernetes processes from gRPC protocol. Fix the problem that es index(TimeSeriesTable, eg. endpoint_traffic, alarm_record) didn\u0026rsquo;t create even after rerun with init-mode. This problem caused the OAP server to fail to start when the OAP server was down for more than a day. Support autocomplete tags in traces query. [Breaking Change] Replace all configurations **_JETTY_** to **_REST_**. Add the support eBPF profiling field into the process entity. E2E: fix log test miss verify LAL and metrics. Enhance Converter mechanism in kernel level to make BanyanDB native feature more effective. Add TermsAggregation properties collect_mode and execution_hint. Add \u0026ldquo;execution_hint\u0026rdquo;: \u0026ldquo;map\u0026rdquo;, \u0026ldquo;collect_mode\u0026rdquo;: \u0026ldquo;breadth_first\u0026rdquo; for aggregation and topology query to improve 5-10x performance. Clean up scroll contexts after used. Support autocomplete tags in logs query. Enhance Deprecated MetricQuery(v1) getValues querying to asynchronous concurrency query Fix the pod match error when the service has multiple selector in kubernetes environment. VM monitoring adapts the 0.50.0 of the opentelemetry-collector. Add Envoy internal cost metrics. Remove Layer concept from ServiceInstance. Remove unnecessary onCompleted on gRPC onError callback. Remove Layer concept form Process. Update to list all eBPF profiling schedulers without duration. Storage(ElasticSearch): add search options to tolerate inexisting indices. Fix the problem that MQ has the wrong Layer type. Fix NoneStream model has wrong downsampling(was Second, should be Minute). SQL Database: provide @SQLDatabase.AdditionalEntity to support create additional tables from a model. [Breaking Change] SQL Database: remove SQL Database config maxSizeOfArrayColumn and numOfSearchableValuesPerTag. [Breaking Change] SQL Database: move Tags list from Segment,Logs,Alarms to their additional table. [Breaking Change] Remove total field in Trace, Log, Event, Browser log, and alarm list query. Support OFF_CPU eBPF Profiling. Fix SumAggregationBuilder#build should use the SumAggregation rather than MaxAggregation. Add TiDB, OpenSearch, Postgres storage optional to Trace and eBPF Profiling E2E testing. Add OFF CPU eBPF Profiling E2E Testing. Fix searchableTag as rpc.status_code and http.status_code. status_code had been removed. Fix scroll query failure exception. Add profileDataQueryBatchSize config in Elasticsearch Storage. Add APIs to query Pod log on demand. Remove OAL for events. Simplify the format index name logical in ES storage. Add instance properties extractor in MAL. Support Zipkin traces collect and zipkin traces query API. [Breaking Change] Zipkin receiver mechanism changes and traces do not stream into OAP Segment anymore.  UI  General service instance: move Thread Pool from JVM to Overview, fix JVM GC Count calculation. Add Apache ShenYu (incubating) component LOGO. Show more metrics on service/instance/endpoint list on the dashboards. Support average values of metrics on the service/list/endpoint table widgets, with pop-up linear graph. Fix viewLogs button query no data. Fix UTC when page loads. Implement the eBPF profile widget on dashboard. Optimize the trace widget. Avoid invalid query for topology metrics. Add the alarm and log tag tips. Fix spans details and task logs. Verify query params to avoid invalid queries. Mobile terminal adaptation. Fix: set dropdown for the Tab widget, init instance/endpoint relation selectors, update sankey graph. Add eBPF Profiling widget into General service, Service Mesh and Kubernetes tabs. Fix jump to endpoint-relation dashboard template. Fix set graph options. Remove the Layer filed from the Instance and Process. Fix date time picker display when set hour to 0. Implement tags auto-complete for Trace and Log. Support multiple trees for the flame graph. Fix the page doesn\u0026rsquo;t need to be re-rendered when the url changes. Remove unexpected data for exporting dashboards. Fix duration time. Remove the total field from query conditions. Fix minDuration and maxDuration for the trace filter. Add Log configuration for the browser templates. Fix query conditions for the browser logs. Add Spanish Translation. Visualize the OFF CPU eBPF profiling. Add Spanish language to UI. Sort spans with startTime or spanId in a segment. Visualize a on-demand log widget. Fix activate the correct tab index after renaming a Tabs name. FaaS dashboard support on-demand log (OpenFunction/functions-framework-go version \u0026gt; 0.3.0).  Documentation  Add eBPF agent into probe introduction.  All issues and pull requests are here\n","title":"9.1.0","url":"/docs/main/v9.3.0/en/changes/changes-9.1.0/"},{"content":"9.1.0 Project  [IMPORTANT] Remove InfluxDB 1.x and Apache IoTDB 0.X as storage options, check details at here. Remove converter-moshi 2.5.0, influx-java 2.15, iotdb java 0.12.5, thrift 0.14.1, moshi 1.5.0, msgpack 0.8.16 dependencies. Remove InfluxDB and IoTDB relative codes and E2E tests. Upgrade OAP dependencies zipkin to 2.23.16, H2 to 2.1.212, Apache Freemarker to 2.3.31, gRPC-java 1.46.0, netty to 4.1.76. Upgrade Webapp dependencies, spring-cloud-dependencies to 2021.0.2, logback-classic to 1.2.11 [IMPORTANT] Add BanyanDB storage implementation. Notice BanyanDB is currently under active development and SHOULD NOT be used in production cluster.  OAP Server  Add component definition(ID=127) for Apache ShenYu (incubating). Fix Zipkin receiver: Decode spans error, missing Layer for V9 and wrong time bucket for generate Service and Endpoint. [Refactor] Move SQLDatabase(H2/MySQL/PostgreSQL), ElasticSearch and BanyanDB specific configurations out of column. Support BanyanDB global index for entities. Log and Segment record entities declare this new feature. Remove unnecessary analyzer settings in columns of templates. Many were added due to analyzer\u0026rsquo;s default value. Simplify the Kafka Fetch configuration in cluster mode. [Breaking Change] Update the eBPF Profiling task to the service level, please delete index/table: ebpf_profiling_task, process_traffic. Fix event can\u0026rsquo;t split service ID into 2 parts. Fix OAP Self-Observability metric GC Time calculation. Set SW_QUERY_MAX_QUERY_COMPLEXITY default value to 1000 Webapp module (for UI) enabled compression. [Breaking Change] Add layer field to event, report an event without layer is not allowed. Fix ES flush thread stops when flush schedule task throws exception, such as ElasticSearch flush failed. Fix ES BulkProcessor in BatchProcessEsDAO was initialized multiple times and created multiple ES flush schedule tasks. HTTPServer support the handler register with allowed HTTP methods. [Critical] Revert Enhance DataCarrier#MultipleChannelsConsumer to add priority to avoid consuming issues. Fix the problem that some configurations (such as group.id) did not take effect due to the override order when using the kafkaConsumerConfig property to extend the configuration in Kafka Fetcher. Remove build time from the OAP version. Add data-generator module to run OAP in testing mode, generating mock data for testing. Support receive Kubernetes processes from gRPC protocol. Fix the problem that es index(TimeSeriesTable, eg. endpoint_traffic, alarm_record) didn\u0026rsquo;t create even after rerun with init-mode. This problem caused the OAP server to fail to start when the OAP server was down for more than a day. Support autocomplete tags in traces query. [Breaking Change] Replace all configurations **_JETTY_** to **_REST_**. Add the support eBPF profiling field into the process entity. E2E: fix log test miss verify LAL and metrics. Enhance Converter mechanism in kernel level to make BanyanDB native feature more effective. Add TermsAggregation properties collect_mode and execution_hint. Add \u0026ldquo;execution_hint\u0026rdquo;: \u0026ldquo;map\u0026rdquo;, \u0026ldquo;collect_mode\u0026rdquo;: \u0026ldquo;breadth_first\u0026rdquo; for aggregation and topology query to improve 5-10x performance. Clean up scroll contexts after used. Support autocomplete tags in logs query. Enhance Deprecated MetricQuery(v1) getValues querying to asynchronous concurrency query Fix the pod match error when the service has multiple selector in kubernetes environment. VM monitoring adapts the 0.50.0 of the opentelemetry-collector. Add Envoy internal cost metrics. Remove Layer concept from ServiceInstance. Remove unnecessary onCompleted on gRPC onError callback. Remove Layer concept form Process. Update to list all eBPF profiling schedulers without duration. Storage(ElasticSearch): add search options to tolerate inexisting indices. Fix the problem that MQ has the wrong Layer type. Fix NoneStream model has wrong downsampling(was Second, should be Minute). SQL Database: provide @SQLDatabase.AdditionalEntity to support create additional tables from a model. [Breaking Change] SQL Database: remove SQL Database config maxSizeOfArrayColumn and numOfSearchableValuesPerTag. [Breaking Change] SQL Database: move Tags list from Segment,Logs,Alarms to their additional table. [Breaking Change] Remove total field in Trace, Log, Event, Browser log, and alarm list query. Support OFF_CPU eBPF Profiling. Fix SumAggregationBuilder#build should use the SumAggregation rather than MaxAggregation. Add TiDB, OpenSearch, Postgres storage optional to Trace and eBPF Profiling E2E testing. Add OFF CPU eBPF Profiling E2E Testing. Fix searchableTag as rpc.status_code and http.status_code. status_code had been removed. Fix scroll query failure exception. Add profileDataQueryBatchSize config in Elasticsearch Storage. Add APIs to query Pod log on demand. Remove OAL for events. Simplify the format index name logical in ES storage. Add instance properties extractor in MAL. Support Zipkin traces collect and zipkin traces query API. [Breaking Change] Zipkin receiver mechanism changes and traces do not stream into OAP Segment anymore.  UI  General service instance: move Thread Pool from JVM to Overview, fix JVM GC Count calculation. Add Apache ShenYu (incubating) component LOGO. Show more metrics on service/instance/endpoint list on the dashboards. Support average values of metrics on the service/list/endpoint table widgets, with pop-up linear graph. Fix viewLogs button query no data. Fix UTC when page loads. Implement the eBPF profile widget on dashboard. Optimize the trace widget. Avoid invalid query for topology metrics. Add the alarm and log tag tips. Fix spans details and task logs. Verify query params to avoid invalid queries. Mobile terminal adaptation. Fix: set dropdown for the Tab widget, init instance/endpoint relation selectors, update sankey graph. Add eBPF Profiling widget into General service, Service Mesh and Kubernetes tabs. Fix jump to endpoint-relation dashboard template. Fix set graph options. Remove the Layer filed from the Instance and Process. Fix date time picker display when set hour to 0. Implement tags auto-complete for Trace and Log. Support multiple trees for the flame graph. Fix the page doesn\u0026rsquo;t need to be re-rendered when the url changes. Remove unexpected data for exporting dashboards. Fix duration time. Remove the total field from query conditions. Fix minDuration and maxDuration for the trace filter. Add Log configuration for the browser templates. Fix query conditions for the browser logs. Add Spanish Translation. Visualize the OFF CPU eBPF profiling. Add Spanish language to UI. Sort spans with startTime or spanId in a segment. Visualize a on-demand log widget. Fix activate the correct tab index after renaming a Tabs name. FaaS dashboard support on-demand log (OpenFunction/functions-framework-go version \u0026gt; 0.3.0).  Documentation  Add eBPF agent into probe introduction.  All issues and pull requests are here\n","title":"9.1.0","url":"/docs/main/v9.4.0/en/changes/changes-9.1.0/"},{"content":"9.1.0 Project  [IMPORTANT] Remove InfluxDB 1.x and Apache IoTDB 0.X as storage options, check details at here. Remove converter-moshi 2.5.0, influx-java 2.15, iotdb java 0.12.5, thrift 0.14.1, moshi 1.5.0, msgpack 0.8.16 dependencies. Remove InfluxDB and IoTDB relative codes and E2E tests. Upgrade OAP dependencies zipkin to 2.23.16, H2 to 2.1.212, Apache Freemarker to 2.3.31, gRPC-java 1.46.0, netty to 4.1.76. Upgrade Webapp dependencies, spring-cloud-dependencies to 2021.0.2, logback-classic to 1.2.11 [IMPORTANT] Add BanyanDB storage implementation. Notice BanyanDB is currently under active development and SHOULD NOT be used in production cluster.  OAP Server  Add component definition(ID=127) for Apache ShenYu (incubating). Fix Zipkin receiver: Decode spans error, missing Layer for V9 and wrong time bucket for generate Service and Endpoint. [Refactor] Move SQLDatabase(H2/MySQL/PostgreSQL), ElasticSearch and BanyanDB specific configurations out of column. Support BanyanDB global index for entities. Log and Segment record entities declare this new feature. Remove unnecessary analyzer settings in columns of templates. Many were added due to analyzer\u0026rsquo;s default value. Simplify the Kafka Fetch configuration in cluster mode. [Breaking Change] Update the eBPF Profiling task to the service level, please delete index/table: ebpf_profiling_task, process_traffic. Fix event can\u0026rsquo;t split service ID into 2 parts. Fix OAP Self-Observability metric GC Time calculation. Set SW_QUERY_MAX_QUERY_COMPLEXITY default value to 1000 Webapp module (for UI) enabled compression. [Breaking Change] Add layer field to event, report an event without layer is not allowed. Fix ES flush thread stops when flush schedule task throws exception, such as ElasticSearch flush failed. Fix ES BulkProcessor in BatchProcessEsDAO was initialized multiple times and created multiple ES flush schedule tasks. HTTPServer support the handler register with allowed HTTP methods. [Critical] Revert Enhance DataCarrier#MultipleChannelsConsumer to add priority to avoid consuming issues. Fix the problem that some configurations (such as group.id) did not take effect due to the override order when using the kafkaConsumerConfig property to extend the configuration in Kafka Fetcher. Remove build time from the OAP version. Add data-generator module to run OAP in testing mode, generating mock data for testing. Support receive Kubernetes processes from gRPC protocol. Fix the problem that es index(TimeSeriesTable, eg. endpoint_traffic, alarm_record) didn\u0026rsquo;t create even after rerun with init-mode. This problem caused the OAP server to fail to start when the OAP server was down for more than a day. Support autocomplete tags in traces query. [Breaking Change] Replace all configurations **_JETTY_** to **_REST_**. Add the support eBPF profiling field into the process entity. E2E: fix log test miss verify LAL and metrics. Enhance Converter mechanism in kernel level to make BanyanDB native feature more effective. Add TermsAggregation properties collect_mode and execution_hint. Add \u0026ldquo;execution_hint\u0026rdquo;: \u0026ldquo;map\u0026rdquo;, \u0026ldquo;collect_mode\u0026rdquo;: \u0026ldquo;breadth_first\u0026rdquo; for aggregation and topology query to improve 5-10x performance. Clean up scroll contexts after used. Support autocomplete tags in logs query. Enhance Deprecated MetricQuery(v1) getValues querying to asynchronous concurrency query Fix the pod match error when the service has multiple selector in kubernetes environment. VM monitoring adapts the 0.50.0 of the opentelemetry-collector. Add Envoy internal cost metrics. Remove Layer concept from ServiceInstance. Remove unnecessary onCompleted on gRPC onError callback. Remove Layer concept form Process. Update to list all eBPF profiling schedulers without duration. Storage(ElasticSearch): add search options to tolerate inexisting indices. Fix the problem that MQ has the wrong Layer type. Fix NoneStream model has wrong downsampling(was Second, should be Minute). SQL Database: provide @SQLDatabase.AdditionalEntity to support create additional tables from a model. [Breaking Change] SQL Database: remove SQL Database config maxSizeOfArrayColumn and numOfSearchableValuesPerTag. [Breaking Change] SQL Database: move Tags list from Segment,Logs,Alarms to their additional table. [Breaking Change] Remove total field in Trace, Log, Event, Browser log, and alarm list query. Support OFF_CPU eBPF Profiling. Fix SumAggregationBuilder#build should use the SumAggregation rather than MaxAggregation. Add TiDB, OpenSearch, Postgres storage optional to Trace and eBPF Profiling E2E testing. Add OFF CPU eBPF Profiling E2E Testing. Fix searchableTag as rpc.status_code and http.status_code. status_code had been removed. Fix scroll query failure exception. Add profileDataQueryBatchSize config in Elasticsearch Storage. Add APIs to query Pod log on demand. Remove OAL for events. Simplify the format index name logical in ES storage. Add instance properties extractor in MAL. Support Zipkin traces collect and zipkin traces query API. [Breaking Change] Zipkin receiver mechanism changes and traces do not stream into OAP Segment anymore.  UI  General service instance: move Thread Pool from JVM to Overview, fix JVM GC Count calculation. Add Apache ShenYu (incubating) component LOGO. Show more metrics on service/instance/endpoint list on the dashboards. Support average values of metrics on the service/list/endpoint table widgets, with pop-up linear graph. Fix viewLogs button query no data. Fix UTC when page loads. Implement the eBPF profile widget on dashboard. Optimize the trace widget. Avoid invalid query for topology metrics. Add the alarm and log tag tips. Fix spans details and task logs. Verify query params to avoid invalid queries. Mobile terminal adaptation. Fix: set dropdown for the Tab widget, init instance/endpoint relation selectors, update sankey graph. Add eBPF Profiling widget into General service, Service Mesh and Kubernetes tabs. Fix jump to endpoint-relation dashboard template. Fix set graph options. Remove the Layer filed from the Instance and Process. Fix date time picker display when set hour to 0. Implement tags auto-complete for Trace and Log. Support multiple trees for the flame graph. Fix the page doesn\u0026rsquo;t need to be re-rendered when the url changes. Remove unexpected data for exporting dashboards. Fix duration time. Remove the total field from query conditions. Fix minDuration and maxDuration for the trace filter. Add Log configuration for the browser templates. Fix query conditions for the browser logs. Add Spanish Translation. Visualize the OFF CPU eBPF profiling. Add Spanish language to UI. Sort spans with startTime or spanId in a segment. Visualize a on-demand log widget. Fix activate the correct tab index after renaming a Tabs name. FaaS dashboard support on-demand log (OpenFunction/functions-framework-go version \u0026gt; 0.3.0).  Documentation  Add eBPF agent into probe introduction.  All issues and pull requests are here\n","title":"9.1.0","url":"/docs/main/v9.5.0/en/changes/changes-9.1.0/"},{"content":"9.1.0 Project  [IMPORTANT] Remove InfluxDB 1.x and Apache IoTDB 0.X as storage options, check details at here. Remove converter-moshi 2.5.0, influx-java 2.15, iotdb java 0.12.5, thrift 0.14.1, moshi 1.5.0, msgpack 0.8.16 dependencies. Remove InfluxDB and IoTDB relative codes and E2E tests. Upgrade OAP dependencies zipkin to 2.23.16, H2 to 2.1.212, Apache Freemarker to 2.3.31, gRPC-java 1.46.0, netty to 4.1.76. Upgrade Webapp dependencies, spring-cloud-dependencies to 2021.0.2, logback-classic to 1.2.11 [IMPORTANT] Add BanyanDB storage implementation. Notice BanyanDB is currently under active development and SHOULD NOT be used in production cluster.  OAP Server  Add component definition(ID=127) for Apache ShenYu (incubating). Fix Zipkin receiver: Decode spans error, missing Layer for V9 and wrong time bucket for generate Service and Endpoint. [Refactor] Move SQLDatabase(H2/MySQL/PostgreSQL), ElasticSearch and BanyanDB specific configurations out of column. Support BanyanDB global index for entities. Log and Segment record entities declare this new feature. Remove unnecessary analyzer settings in columns of templates. Many were added due to analyzer\u0026rsquo;s default value. Simplify the Kafka Fetch configuration in cluster mode. [Breaking Change] Update the eBPF Profiling task to the service level, please delete index/table: ebpf_profiling_task, process_traffic. Fix event can\u0026rsquo;t split service ID into 2 parts. Fix OAP Self-Observability metric GC Time calculation. Set SW_QUERY_MAX_QUERY_COMPLEXITY default value to 1000 Webapp module (for UI) enabled compression. [Breaking Change] Add layer field to event, report an event without layer is not allowed. Fix ES flush thread stops when flush schedule task throws exception, such as ElasticSearch flush failed. Fix ES BulkProcessor in BatchProcessEsDAO was initialized multiple times and created multiple ES flush schedule tasks. HTTPServer support the handler register with allowed HTTP methods. [Critical] Revert Enhance DataCarrier#MultipleChannelsConsumer to add priority to avoid consuming issues. Fix the problem that some configurations (such as group.id) did not take effect due to the override order when using the kafkaConsumerConfig property to extend the configuration in Kafka Fetcher. Remove build time from the OAP version. Add data-generator module to run OAP in testing mode, generating mock data for testing. Support receive Kubernetes processes from gRPC protocol. Fix the problem that es index(TimeSeriesTable, eg. endpoint_traffic, alarm_record) didn\u0026rsquo;t create even after rerun with init-mode. This problem caused the OAP server to fail to start when the OAP server was down for more than a day. Support autocomplete tags in traces query. [Breaking Change] Replace all configurations **_JETTY_** to **_REST_**. Add the support eBPF profiling field into the process entity. E2E: fix log test miss verify LAL and metrics. Enhance Converter mechanism in kernel level to make BanyanDB native feature more effective. Add TermsAggregation properties collect_mode and execution_hint. Add \u0026ldquo;execution_hint\u0026rdquo;: \u0026ldquo;map\u0026rdquo;, \u0026ldquo;collect_mode\u0026rdquo;: \u0026ldquo;breadth_first\u0026rdquo; for aggregation and topology query to improve 5-10x performance. Clean up scroll contexts after used. Support autocomplete tags in logs query. Enhance Deprecated MetricQuery(v1) getValues querying to asynchronous concurrency query Fix the pod match error when the service has multiple selector in kubernetes environment. VM monitoring adapts the 0.50.0 of the opentelemetry-collector. Add Envoy internal cost metrics. Remove Layer concept from ServiceInstance. Remove unnecessary onCompleted on gRPC onError callback. Remove Layer concept form Process. Update to list all eBPF profiling schedulers without duration. Storage(ElasticSearch): add search options to tolerate inexisting indices. Fix the problem that MQ has the wrong Layer type. Fix NoneStream model has wrong downsampling(was Second, should be Minute). SQL Database: provide @SQLDatabase.AdditionalEntity to support create additional tables from a model. [Breaking Change] SQL Database: remove SQL Database config maxSizeOfArrayColumn and numOfSearchableValuesPerTag. [Breaking Change] SQL Database: move Tags list from Segment,Logs,Alarms to their additional table. [Breaking Change] Remove total field in Trace, Log, Event, Browser log, and alarm list query. Support OFF_CPU eBPF Profiling. Fix SumAggregationBuilder#build should use the SumAggregation rather than MaxAggregation. Add TiDB, OpenSearch, Postgres storage optional to Trace and eBPF Profiling E2E testing. Add OFF CPU eBPF Profiling E2E Testing. Fix searchableTag as rpc.status_code and http.status_code. status_code had been removed. Fix scroll query failure exception. Add profileDataQueryBatchSize config in Elasticsearch Storage. Add APIs to query Pod log on demand. Remove OAL for events. Simplify the format index name logical in ES storage. Add instance properties extractor in MAL. Support Zipkin traces collect and zipkin traces query API. [Breaking Change] Zipkin receiver mechanism changes and traces do not stream into OAP Segment anymore.  UI  General service instance: move Thread Pool from JVM to Overview, fix JVM GC Count calculation. Add Apache ShenYu (incubating) component LOGO. Show more metrics on service/instance/endpoint list on the dashboards. Support average values of metrics on the service/list/endpoint table widgets, with pop-up linear graph. Fix viewLogs button query no data. Fix UTC when page loads. Implement the eBPF profile widget on dashboard. Optimize the trace widget. Avoid invalid query for topology metrics. Add the alarm and log tag tips. Fix spans details and task logs. Verify query params to avoid invalid queries. Mobile terminal adaptation. Fix: set dropdown for the Tab widget, init instance/endpoint relation selectors, update sankey graph. Add eBPF Profiling widget into General service, Service Mesh and Kubernetes tabs. Fix jump to endpoint-relation dashboard template. Fix set graph options. Remove the Layer filed from the Instance and Process. Fix date time picker display when set hour to 0. Implement tags auto-complete for Trace and Log. Support multiple trees for the flame graph. Fix the page doesn\u0026rsquo;t need to be re-rendered when the url changes. Remove unexpected data for exporting dashboards. Fix duration time. Remove the total field from query conditions. Fix minDuration and maxDuration for the trace filter. Add Log configuration for the browser templates. Fix query conditions for the browser logs. Add Spanish Translation. Visualize the OFF CPU eBPF profiling. Add Spanish language to UI. Sort spans with startTime or spanId in a segment. Visualize a on-demand log widget. Fix activate the correct tab index after renaming a Tabs name. FaaS dashboard support on-demand log (OpenFunction/functions-framework-go version \u0026gt; 0.3.0).  Documentation  Add eBPF agent into probe introduction.  All issues and pull requests are here\n","title":"9.1.0","url":"/docs/main/v9.6.0/en/changes/changes-9.1.0/"},{"content":"9.1.0 Project  [IMPORTANT] Remove InfluxDB 1.x and Apache IoTDB 0.X as storage options, check details at here. Remove converter-moshi 2.5.0, influx-java 2.15, iotdb java 0.12.5, thrift 0.14.1, moshi 1.5.0, msgpack 0.8.16 dependencies. Remove InfluxDB and IoTDB relative codes and E2E tests. Upgrade OAP dependencies zipkin to 2.23.16, H2 to 2.1.212, Apache Freemarker to 2.3.31, gRPC-java 1.46.0, netty to 4.1.76. Upgrade Webapp dependencies, spring-cloud-dependencies to 2021.0.2, logback-classic to 1.2.11 [IMPORTANT] Add BanyanDB storage implementation. Notice BanyanDB is currently under active development and SHOULD NOT be used in production cluster.  OAP Server  Add component definition(ID=127) for Apache ShenYu (incubating). Fix Zipkin receiver: Decode spans error, missing Layer for V9 and wrong time bucket for generate Service and Endpoint. [Refactor] Move SQLDatabase(H2/MySQL/PostgreSQL), ElasticSearch and BanyanDB specific configurations out of column. Support BanyanDB global index for entities. Log and Segment record entities declare this new feature. Remove unnecessary analyzer settings in columns of templates. Many were added due to analyzer\u0026rsquo;s default value. Simplify the Kafka Fetch configuration in cluster mode. [Breaking Change] Update the eBPF Profiling task to the service level, please delete index/table: ebpf_profiling_task, process_traffic. Fix event can\u0026rsquo;t split service ID into 2 parts. Fix OAP Self-Observability metric GC Time calculation. Set SW_QUERY_MAX_QUERY_COMPLEXITY default value to 1000 Webapp module (for UI) enabled compression. [Breaking Change] Add layer field to event, report an event without layer is not allowed. Fix ES flush thread stops when flush schedule task throws exception, such as ElasticSearch flush failed. Fix ES BulkProcessor in BatchProcessEsDAO was initialized multiple times and created multiple ES flush schedule tasks. HTTPServer support the handler register with allowed HTTP methods. [Critical] Revert Enhance DataCarrier#MultipleChannelsConsumer to add priority to avoid consuming issues. Fix the problem that some configurations (such as group.id) did not take effect due to the override order when using the kafkaConsumerConfig property to extend the configuration in Kafka Fetcher. Remove build time from the OAP version. Add data-generator module to run OAP in testing mode, generating mock data for testing. Support receive Kubernetes processes from gRPC protocol. Fix the problem that es index(TimeSeriesTable, eg. endpoint_traffic, alarm_record) didn\u0026rsquo;t create even after rerun with init-mode. This problem caused the OAP server to fail to start when the OAP server was down for more than a day. Support autocomplete tags in traces query. [Breaking Change] Replace all configurations **_JETTY_** to **_REST_**. Add the support eBPF profiling field into the process entity. E2E: fix log test miss verify LAL and metrics. Enhance Converter mechanism in kernel level to make BanyanDB native feature more effective. Add TermsAggregation properties collect_mode and execution_hint. Add \u0026ldquo;execution_hint\u0026rdquo;: \u0026ldquo;map\u0026rdquo;, \u0026ldquo;collect_mode\u0026rdquo;: \u0026ldquo;breadth_first\u0026rdquo; for aggregation and topology query to improve 5-10x performance. Clean up scroll contexts after used. Support autocomplete tags in logs query. Enhance Deprecated MetricQuery(v1) getValues querying to asynchronous concurrency query Fix the pod match error when the service has multiple selector in kubernetes environment. VM monitoring adapts the 0.50.0 of the opentelemetry-collector. Add Envoy internal cost metrics. Remove Layer concept from ServiceInstance. Remove unnecessary onCompleted on gRPC onError callback. Remove Layer concept form Process. Update to list all eBPF profiling schedulers without duration. Storage(ElasticSearch): add search options to tolerate inexisting indices. Fix the problem that MQ has the wrong Layer type. Fix NoneStream model has wrong downsampling(was Second, should be Minute). SQL Database: provide @SQLDatabase.AdditionalEntity to support create additional tables from a model. [Breaking Change] SQL Database: remove SQL Database config maxSizeOfArrayColumn and numOfSearchableValuesPerTag. [Breaking Change] SQL Database: move Tags list from Segment,Logs,Alarms to their additional table. [Breaking Change] Remove total field in Trace, Log, Event, Browser log, and alarm list query. Support OFF_CPU eBPF Profiling. Fix SumAggregationBuilder#build should use the SumAggregation rather than MaxAggregation. Add TiDB, OpenSearch, Postgres storage optional to Trace and eBPF Profiling E2E testing. Add OFF CPU eBPF Profiling E2E Testing. Fix searchableTag as rpc.status_code and http.status_code. status_code had been removed. Fix scroll query failure exception. Add profileDataQueryBatchSize config in Elasticsearch Storage. Add APIs to query Pod log on demand. Remove OAL for events. Simplify the format index name logical in ES storage. Add instance properties extractor in MAL. Support Zipkin traces collect and zipkin traces query API. [Breaking Change] Zipkin receiver mechanism changes and traces do not stream into OAP Segment anymore.  UI  General service instance: move Thread Pool from JVM to Overview, fix JVM GC Count calculation. Add Apache ShenYu (incubating) component LOGO. Show more metrics on service/instance/endpoint list on the dashboards. Support average values of metrics on the service/list/endpoint table widgets, with pop-up linear graph. Fix viewLogs button query no data. Fix UTC when page loads. Implement the eBPF profile widget on dashboard. Optimize the trace widget. Avoid invalid query for topology metrics. Add the alarm and log tag tips. Fix spans details and task logs. Verify query params to avoid invalid queries. Mobile terminal adaptation. Fix: set dropdown for the Tab widget, init instance/endpoint relation selectors, update sankey graph. Add eBPF Profiling widget into General service, Service Mesh and Kubernetes tabs. Fix jump to endpoint-relation dashboard template. Fix set graph options. Remove the Layer filed from the Instance and Process. Fix date time picker display when set hour to 0. Implement tags auto-complete for Trace and Log. Support multiple trees for the flame graph. Fix the page doesn\u0026rsquo;t need to be re-rendered when the url changes. Remove unexpected data for exporting dashboards. Fix duration time. Remove the total field from query conditions. Fix minDuration and maxDuration for the trace filter. Add Log configuration for the browser templates. Fix query conditions for the browser logs. Add Spanish Translation. Visualize the OFF CPU eBPF profiling. Add Spanish language to UI. Sort spans with startTime or spanId in a segment. Visualize a on-demand log widget. Fix activate the correct tab index after renaming a Tabs name. FaaS dashboard support on-demand log (OpenFunction/functions-framework-go version \u0026gt; 0.3.0).  Documentation  Add eBPF agent into probe introduction.  All issues and pull requests are here\n","title":"9.1.0","url":"/docs/main/v9.7.0/en/changes/changes-9.1.0/"},{"content":"9.2.0 Project  [Critical] Fix a low performance issue of metrics persistent in the ElasticSearch storage implementation. One single metric could have to wait for an unnecessary 7~10s(System Env Variable SW_STORAGE_ES_FLUSH_INTERVAL) since 8.8.0 - 9.1.0 releases. Upgrade Armeria to 1.16.0, Kubernetes Java client to 15.0.1.  OAP Server  Add more entities for Zipkin to improve performance. ElasticSearch: scroll id should be updated when scrolling as it may change. Mesh: fix only last rule works when multiple rules are defined in metadata-service-mapping.yaml. Support sending alarm messages to PagerDuty. Support Zipkin kafka collector. Add VIRTUAL detect type to Process for Network Profiling. Add component ID(128) for Java Hutool plugin. Add Zipkin query exception handler, response error message for illegal arguments. Fix a NullPointerException in the endpoint analysis, which would cause missing MQ-related LocalSpan in the trace. Add forEach, processRelation function to MAL expression. Add expPrefix, initExp in MAL config. Add component ID(7015) for Python Bottle plugin. Remove legacy OAL percentile functions, p99, p95, p90, p75, p50 func(s). Revert #8066. Keep all metrics persistent even it is default value. Skip loading UI templates if folder is empty or doesn\u0026rsquo;t exist. Optimize ElasticSearch query performance by using _mGet and physical index name rather than alias in these scenarios, (a) Metrics aggregation (b) Zipkin query (c) Metrics query (d) Log query Support the NETWORK type of eBPF Profiling task. Support sumHistogram in MAL. [Breaking Change] Make the eBPF Profiling task support to the service instance level, index/table ebpf_profiling_task is required to be re-created when bump up from previous releases. Fix race condition in Banyandb storage Support SUM_PER_MIN downsampling in MAL. Support sumHistogramPercentile in MAL. Add VIRTUAL_CACHE to Layer, to fix conjectured Redis server, which icon can\u0026rsquo;t show on the topology. [Breaking Change] Elasticsearch storage merge all metrics/meter and records(without super datasets) indices into one physical index template metrics-all and records-all on the default setting. Provide system environment variable(SW_STORAGE_ES_LOGIC_SHARDING) to shard metrics/meter indices into multi-physical indices as the previous versions(one index template per metric/meter aggregation function). In the current one index mode, users still could choose to adjust ElasticSearch\u0026rsquo;s shard number(SW_STORAGE_ES_INDEX_SHARDS_NUMBER) to scale out. More details please refer to New ElasticSearch storage option explanation in 9.2.0 and backend-storage.md [Breaking Change] Index/table ebpf_profiling_schedule added a new column ebpf_profiling_schedule_id, the H2/Mysql/Tidb/Postgres storage users are required to re-created it when bump up from previous releases. Fix Zipkin trace query the max size of spans. Add tls and https component IDs for Network Profiling. Support Elasticsearch column alias for the compatibility between storage logicSharding model and no-logicSharding model. Support MySQL monitoring. Support PostgreSQL monitoring. Fix query services by serviceId error when Elasticsearch storage SW_STORAGE_ES_QUERY_MAX_SIZE \u0026gt; 10000. Support sending alarm messages to Discord. Fix query history process data failure. Optimize TTL mechanism for Elasticsearch storage, skip executed indices in one TTL rotation. Add Kubernetes support module to share codes between modules and reduce calls to Kubernetes API server. Bump up Kubernetes Java client to fix cve. Adapt OpenTelemetry native metrics protocol. [Breaking Change] rename configuration folder from otel-oc-rules to otel-rules. [Breaking Change] rename configuration field from enabledOcRules to enabledOtelRules and environment variable name from SW_OTEL_RECEIVER_ENABLED_OC_RULES to SW_OTEL_RECEIVER_ENABLED_OTEL_RULES. [Breaking Change] Fix JDBC TTL to delete additional tables data. SQL Database requires removing segment,segment_tag, logs, logs_tag, alarms, alarms_tag, zipkin_span, zipkin_query before OAP starts. SQL Database: add @SQLDatabase.ExtraColumn4AdditionalEntity to support add an extra column from parent to an additional table. Add component ID(131) for Java Micronaut plugin Add component ID(132) for Nats java client plugin  UI  Fix query conditions for the browser logs. Implement a url parameter to activate tab index. Fix clear interval fail when switch autoRefresh to off. Optimize log tables. Fix log detail pop-up page doesn\u0026rsquo;t work. Optimize table widget to hide the whole metric column when no metric is set. Implement the Event widget. Remove event menu. Fix span detail text overlap. Add Python Bottle Plugin Logo. Implement an association between widgets(line, bar, area graphs) with time. Fix tag dropdown style. Hide the copy button when db.statement is empty. Fix legend metrics for topology. Dashboard: Add metrics association. Dashboard: Fix FaaS-Root document link and topology service relation dashboard link. Dashboard: Fix Mesh-Instance metric Throughput. Dashboard: Fix Mesh-Service-Relation metric Throughput and Proxy Sidecar Internal Latency in Nanoseconds (Client Response). Dashboard: Fix Mesh-Instance-Relation metric Throughput. Enhance associations for the Event widget. Add event widgets in dashboard where applicable. Fix dashboard list search box not work. Fix short time range. Fix event widget incompatibility in Safari. Refactor the tags component to support searching for tag keys and values. Implement the log widget and the trace widget associate with each other, remove log tables on the trace widget. Add log widget to general service root. Associate the event widget with the trace and log widget. Add the MYSQL layer and update layer routers. Fix query order for trace list. Add a calculation to convert seconds to days. q* Add Spring Sleuth dashboard to general service instance. Support the process dashboard and create the time range text widget. Fix picking calendar with a wrong time range and setting a unique value for dashboard grid key. Add PostgreSQL to Database sub-menu. Implement the network profiling widget. Add Micronaut icon for Java plugin. Add Nats icon for Java plugin. Bump moment and @vue/cli-plugin-e2e-cypress. Add Network Profiling for Service Mesh DP instance and K8s pod panels.  Documentation  Fix invalid links in release docs. Clean up doc about event metrics. Add a table for metric calculations in the ui doc. Add an explanation for alerting kernel and its in-memory window mechanism. Add more docs for widget details. Update alarm doc introduce configuration property key Fix dependency license\u0026rsquo;s NOTICE and binary jar included issues in the source release. Add eBPF CPU profiling doc.  All issues and pull requests are here\n","title":"9.2.0","url":"/docs/main/latest/en/changes/changes-9.2.0/"},{"content":"9.2.0 Project  [Critical] Fix a low performance issue of metrics persistent in the ElasticSearch storage implementation. One single metric could have to wait for an unnecessary 7~10s(System Env Variable SW_STORAGE_ES_FLUSH_INTERVAL) since 8.8.0 - 9.1.0 releases. Upgrade Armeria to 1.16.0, Kubernetes Java client to 15.0.1.  OAP Server  Add more entities for Zipkin to improve performance. ElasticSearch: scroll id should be updated when scrolling as it may change. Mesh: fix only last rule works when multiple rules are defined in metadata-service-mapping.yaml. Support sending alarm messages to PagerDuty. Support Zipkin kafka collector. Add VIRTUAL detect type to Process for Network Profiling. Add component ID(128) for Java Hutool plugin. Add Zipkin query exception handler, response error message for illegal arguments. Fix a NullPointerException in the endpoint analysis, which would cause missing MQ-related LocalSpan in the trace. Add forEach, processRelation function to MAL expression. Add expPrefix, initExp in MAL config. Add component ID(7015) for Python Bottle plugin. Remove legacy OAL percentile functions, p99, p95, p90, p75, p50 func(s). Revert #8066. Keep all metrics persistent even it is default value. Skip loading UI templates if folder is empty or doesn\u0026rsquo;t exist. Optimize ElasticSearch query performance by using _mGet and physical index name rather than alias in these scenarios, (a) Metrics aggregation (b) Zipkin query (c) Metrics query (d) Log query Support the NETWORK type of eBPF Profiling task. Support sumHistogram in MAL. [Breaking Change] Make the eBPF Profiling task support to the service instance level, index/table ebpf_profiling_task is required to be re-created when bump up from previous releases. Fix race condition in Banyandb storage Support SUM_PER_MIN downsampling in MAL. Support sumHistogramPercentile in MAL. Add VIRTUAL_CACHE to Layer, to fix conjectured Redis server, which icon can\u0026rsquo;t show on the topology. [Breaking Change] Elasticsearch storage merge all metrics/meter and records(without super datasets) indices into one physical index template metrics-all and records-all on the default setting. Provide system environment variable(SW_STORAGE_ES_LOGIC_SHARDING) to shard metrics/meter indices into multi-physical indices as the previous versions(one index template per metric/meter aggregation function). In the current one index mode, users still could choose to adjust ElasticSearch\u0026rsquo;s shard number(SW_STORAGE_ES_INDEX_SHARDS_NUMBER) to scale out. More details please refer to New ElasticSearch storage option explanation in 9.2.0 and backend-storage.md [Breaking Change] Index/table ebpf_profiling_schedule added a new column ebpf_profiling_schedule_id, the H2/Mysql/Tidb/Postgres storage users are required to re-created it when bump up from previous releases. Fix Zipkin trace query the max size of spans. Add tls and https component IDs for Network Profiling. Support Elasticsearch column alias for the compatibility between storage logicSharding model and no-logicSharding model. Support MySQL monitoring. Support PostgreSQL monitoring. Fix query services by serviceId error when Elasticsearch storage SW_STORAGE_ES_QUERY_MAX_SIZE \u0026gt; 10000. Support sending alarm messages to Discord. Fix query history process data failure. Optimize TTL mechanism for Elasticsearch storage, skip executed indices in one TTL rotation. Add Kubernetes support module to share codes between modules and reduce calls to Kubernetes API server. Bump up Kubernetes Java client to fix cve. Adapt OpenTelemetry native metrics protocol. [Breaking Change] rename configuration folder from otel-oc-rules to otel-rules. [Breaking Change] rename configuration field from enabledOcRules to enabledOtelRules and environment variable name from SW_OTEL_RECEIVER_ENABLED_OC_RULES to SW_OTEL_RECEIVER_ENABLED_OTEL_RULES. [Breaking Change] Fix JDBC TTL to delete additional tables data. SQL Database requires removing segment,segment_tag, logs, logs_tag, alarms, alarms_tag, zipkin_span, zipkin_query before OAP starts. SQL Database: add @SQLDatabase.ExtraColumn4AdditionalEntity to support add an extra column from parent to an additional table. Add component ID(131) for Java Micronaut plugin Add component ID(132) for Nats java client plugin  UI  Fix query conditions for the browser logs. Implement a url parameter to activate tab index. Fix clear interval fail when switch autoRefresh to off. Optimize log tables. Fix log detail pop-up page doesn\u0026rsquo;t work. Optimize table widget to hide the whole metric column when no metric is set. Implement the Event widget. Remove event menu. Fix span detail text overlap. Add Python Bottle Plugin Logo. Implement an association between widgets(line, bar, area graphs) with time. Fix tag dropdown style. Hide the copy button when db.statement is empty. Fix legend metrics for topology. Dashboard: Add metrics association. Dashboard: Fix FaaS-Root document link and topology service relation dashboard link. Dashboard: Fix Mesh-Instance metric Throughput. Dashboard: Fix Mesh-Service-Relation metric Throughput and Proxy Sidecar Internal Latency in Nanoseconds (Client Response). Dashboard: Fix Mesh-Instance-Relation metric Throughput. Enhance associations for the Event widget. Add event widgets in dashboard where applicable. Fix dashboard list search box not work. Fix short time range. Fix event widget incompatibility in Safari. Refactor the tags component to support searching for tag keys and values. Implement the log widget and the trace widget associate with each other, remove log tables on the trace widget. Add log widget to general service root. Associate the event widget with the trace and log widget. Add the MYSQL layer and update layer routers. Fix query order for trace list. Add a calculation to convert seconds to days. q* Add Spring Sleuth dashboard to general service instance. Support the process dashboard and create the time range text widget. Fix picking calendar with a wrong time range and setting a unique value for dashboard grid key. Add PostgreSQL to Database sub-menu. Implement the network profiling widget. Add Micronaut icon for Java plugin. Add Nats icon for Java plugin. Bump moment and @vue/cli-plugin-e2e-cypress. Add Network Profiling for Service Mesh DP instance and K8s pod panels.  Documentation  Fix invalid links in release docs. Clean up doc about event metrics. Add a table for metric calculations in the ui doc. Add an explanation for alerting kernel and its in-memory window mechanism. Add more docs for widget details. Update alarm doc introduce configuration property key Fix dependency license\u0026rsquo;s NOTICE and binary jar included issues in the source release. Add eBPF CPU profiling doc.  All issues and pull requests are here\n","title":"9.2.0","url":"/docs/main/next/en/changes/changes-9.2.0/"},{"content":"9.2.0 Project  [Critical] Fix a low performance issue of metrics persistent in the ElasticSearch storage implementation. One single metric could have to wait for an unnecessary 7~10s(System Env Variable SW_STORAGE_ES_FLUSH_INTERVAL) since 8.8.0 - 9.1.0 releases. Upgrade Armeria to 1.16.0, Kubernetes Java client to 15.0.1.  OAP Server  Add more entities for Zipkin to improve performance. ElasticSearch: scroll id should be updated when scrolling as it may change. Mesh: fix only last rule works when multiple rules are defined in metadata-service-mapping.yaml. Support sending alarm messages to PagerDuty. Support Zipkin kafka collector. Add VIRTUAL detect type to Process for Network Profiling. Add component ID(128) for Java Hutool plugin. Add Zipkin query exception handler, response error message for illegal arguments. Fix a NullPointerException in the endpoint analysis, which would cause missing MQ-related LocalSpan in the trace. Add forEach, processRelation function to MAL expression. Add expPrefix, initExp in MAL config. Add component ID(7015) for Python Bottle plugin. Remove legacy OAL percentile functions, p99, p95, p90, p75, p50 func(s). Revert #8066. Keep all metrics persistent even it is default value. Skip loading UI templates if folder is empty or doesn\u0026rsquo;t exist. Optimize ElasticSearch query performance by using _mGet and physical index name rather than alias in these scenarios, (a) Metrics aggregation (b) Zipkin query (c) Metrics query (d) Log query Support the NETWORK type of eBPF Profiling task. Support sumHistogram in MAL. [Breaking Change] Make the eBPF Profiling task support to the service instance level, index/table ebpf_profiling_task is required to be re-created when bump up from previous releases. Fix race condition in Banyandb storage Support SUM_PER_MIN downsampling in MAL. Support sumHistogramPercentile in MAL. Add VIRTUAL_CACHE to Layer, to fix conjectured Redis server, which icon can\u0026rsquo;t show on the topology. [Breaking Change] Elasticsearch storage merge all metrics/meter and records(without super datasets) indices into one physical index template metrics-all and records-all on the default setting. Provide system environment variable(SW_STORAGE_ES_LOGIC_SHARDING) to shard metrics/meter indices into multi-physical indices as the previous versions(one index template per metric/meter aggregation function). In the current one index mode, users still could choose to adjust ElasticSearch\u0026rsquo;s shard number(SW_STORAGE_ES_INDEX_SHARDS_NUMBER) to scale out. More details please refer to New ElasticSearch storage option explanation in 9.2.0 and backend-storage.md [Breaking Change] Index/table ebpf_profiling_schedule added a new column ebpf_profiling_schedule_id, the H2/Mysql/Tidb/Postgres storage users are required to re-created it when bump up from previous releases. Fix Zipkin trace query the max size of spans. Add tls and https component IDs for Network Profiling. Support Elasticsearch column alias for the compatibility between storage logicSharding model and no-logicSharding model. Support MySQL monitoring. Support PostgreSQL monitoring. Fix query services by serviceId error when Elasticsearch storage SW_STORAGE_ES_QUERY_MAX_SIZE \u0026gt; 10000. Support sending alarm messages to Discord. Fix query history process data failure. Optimize TTL mechanism for Elasticsearch storage, skip executed indices in one TTL rotation. Add Kubernetes support module to share codes between modules and reduce calls to Kubernetes API server. Bump up Kubernetes Java client to fix cve. Adapt OpenTelemetry native metrics protocol. [Breaking Change] rename configuration folder from otel-oc-rules to otel-rules. [Breaking Change] rename configuration field from enabledOcRules to enabledOtelRules and environment variable name from SW_OTEL_RECEIVER_ENABLED_OC_RULES to SW_OTEL_RECEIVER_ENABLED_OTEL_RULES. [Breaking Change] Fix JDBC TTL to delete additional tables data. SQL Database requires removing segment,segment_tag, logs, logs_tag, alarms, alarms_tag, zipkin_span, zipkin_query before OAP starts. SQL Database: add @SQLDatabase.ExtraColumn4AdditionalEntity to support add an extra column from parent to an additional table. Add component ID(131) for Java Micronaut plugin Add component ID(132) for Nats java client plugin  UI  Fix query conditions for the browser logs. Implement a url parameter to activate tab index. Fix clear interval fail when switch autoRefresh to off. Optimize log tables. Fix log detail pop-up page doesn\u0026rsquo;t work. Optimize table widget to hide the whole metric column when no metric is set. Implement the Event widget. Remove event menu. Fix span detail text overlap. Add Python Bottle Plugin Logo. Implement an association between widgets(line, bar, area graphs) with time. Fix tag dropdown style. Hide the copy button when db.statement is empty. Fix legend metrics for topology. Dashboard: Add metrics association. Dashboard: Fix FaaS-Root document link and topology service relation dashboard link. Dashboard: Fix Mesh-Instance metric Throughput. Dashboard: Fix Mesh-Service-Relation metric Throughput and Proxy Sidecar Internal Latency in Nanoseconds (Client Response). Dashboard: Fix Mesh-Instance-Relation metric Throughput. Enhance associations for the Event widget. Add event widgets in dashboard where applicable. Fix dashboard list search box not work. Fix short time range. Fix event widget incompatibility in Safari. Refactor the tags component to support searching for tag keys and values. Implement the log widget and the trace widget associate with each other, remove log tables on the trace widget. Add log widget to general service root. Associate the event widget with the trace and log widget. Add the MYSQL layer and update layer routers. Fix query order for trace list. Add a calculation to convert seconds to days. q* Add Spring Sleuth dashboard to general service instance. Support the process dashboard and create the time range text widget. Fix picking calendar with a wrong time range and setting a unique value for dashboard grid key. Add PostgreSQL to Database sub-menu. Implement the network profiling widget. Add Micronaut icon for Java plugin. Add Nats icon for Java plugin. Bump moment and @vue/cli-plugin-e2e-cypress. Add Network Profiling for Service Mesh DP instance and K8s pod panels.  Documentation  Fix invalid links in release docs. Clean up doc about event metrics. Add a table for metric calculations in the ui doc. Add an explanation for alerting kernel and its in-memory window mechanism. Add more docs for widget details. Update alarm doc introduce configuration property key Fix dependency license\u0026rsquo;s NOTICE and binary jar included issues in the source release. Add eBPF CPU profiling doc.  All issues and pull requests are here\n","title":"9.2.0","url":"/docs/main/v9.2.0/en/changes/changes/"},{"content":"9.2.0 Project  [Critical] Fix a low performance issue of metrics persistent in the ElasticSearch storage implementation. One single metric could have to wait for an unnecessary 7~10s(System Env Variable SW_STORAGE_ES_FLUSH_INTERVAL) since 8.8.0 - 9.1.0 releases. Upgrade Armeria to 1.16.0, Kubernetes Java client to 15.0.1.  OAP Server  Add more entities for Zipkin to improve performance. ElasticSearch: scroll id should be updated when scrolling as it may change. Mesh: fix only last rule works when multiple rules are defined in metadata-service-mapping.yaml. Support sending alarm messages to PagerDuty. Support Zipkin kafka collector. Add VIRTUAL detect type to Process for Network Profiling. Add component ID(128) for Java Hutool plugin. Add Zipkin query exception handler, response error message for illegal arguments. Fix a NullPointerException in the endpoint analysis, which would cause missing MQ-related LocalSpan in the trace. Add forEach, processRelation function to MAL expression. Add expPrefix, initExp in MAL config. Add component ID(7015) for Python Bottle plugin. Remove legacy OAL percentile functions, p99, p95, p90, p75, p50 func(s). Revert #8066. Keep all metrics persistent even it is default value. Skip loading UI templates if folder is empty or doesn\u0026rsquo;t exist. Optimize ElasticSearch query performance by using _mGet and physical index name rather than alias in these scenarios, (a) Metrics aggregation (b) Zipkin query (c) Metrics query (d) Log query Support the NETWORK type of eBPF Profiling task. Support sumHistogram in MAL. [Breaking Change] Make the eBPF Profiling task support to the service instance level, index/table ebpf_profiling_task is required to be re-created when bump up from previous releases. Fix race condition in Banyandb storage Support SUM_PER_MIN downsampling in MAL. Support sumHistogramPercentile in MAL. Add VIRTUAL_CACHE to Layer, to fix conjectured Redis server, which icon can\u0026rsquo;t show on the topology. [Breaking Change] Elasticsearch storage merge all metrics/meter and records(without super datasets) indices into one physical index template metrics-all and records-all on the default setting. Provide system environment variable(SW_STORAGE_ES_LOGIC_SHARDING) to shard metrics/meter indices into multi-physical indices as the previous versions(one index template per metric/meter aggregation function). In the current one index mode, users still could choose to adjust ElasticSearch\u0026rsquo;s shard number(SW_STORAGE_ES_INDEX_SHARDS_NUMBER) to scale out. More details please refer to New ElasticSearch storage option explanation in 9.2.0 and backend-storage.md [Breaking Change] Index/table ebpf_profiling_schedule added a new column ebpf_profiling_schedule_id, the H2/Mysql/Tidb/Postgres storage users are required to re-created it when bump up from previous releases. Fix Zipkin trace query the max size of spans. Add tls and https component IDs for Network Profiling. Support Elasticsearch column alias for the compatibility between storage logicSharding model and no-logicSharding model. Support MySQL monitoring. Support PostgreSQL monitoring. Fix query services by serviceId error when Elasticsearch storage SW_STORAGE_ES_QUERY_MAX_SIZE \u0026gt; 10000. Support sending alarm messages to Discord. Fix query history process data failure. Optimize TTL mechanism for Elasticsearch storage, skip executed indices in one TTL rotation. Add Kubernetes support module to share codes between modules and reduce calls to Kubernetes API server. Bump up Kubernetes Java client to fix cve. Adapt OpenTelemetry native metrics protocol. [Breaking Change] rename configuration folder from otel-oc-rules to otel-rules. [Breaking Change] rename configuration field from enabledOcRules to enabledOtelRules and environment variable name from SW_OTEL_RECEIVER_ENABLED_OC_RULES to SW_OTEL_RECEIVER_ENABLED_OTEL_RULES. [Breaking Change] Fix JDBC TTL to delete additional tables data. SQL Database requires removing segment,segment_tag, logs, logs_tag, alarms, alarms_tag, zipkin_span, zipkin_query before OAP starts. SQL Database: add @SQLDatabase.ExtraColumn4AdditionalEntity to support add an extra column from parent to an additional table. Add component ID(131) for Java Micronaut plugin Add component ID(132) for Nats java client plugin  UI  Fix query conditions for the browser logs. Implement a url parameter to activate tab index. Fix clear interval fail when switch autoRefresh to off. Optimize log tables. Fix log detail pop-up page doesn\u0026rsquo;t work. Optimize table widget to hide the whole metric column when no metric is set. Implement the Event widget. Remove event menu. Fix span detail text overlap. Add Python Bottle Plugin Logo. Implement an association between widgets(line, bar, area graphs) with time. Fix tag dropdown style. Hide the copy button when db.statement is empty. Fix legend metrics for topology. Dashboard: Add metrics association. Dashboard: Fix FaaS-Root document link and topology service relation dashboard link. Dashboard: Fix Mesh-Instance metric Throughput. Dashboard: Fix Mesh-Service-Relation metric Throughput and Proxy Sidecar Internal Latency in Nanoseconds (Client Response). Dashboard: Fix Mesh-Instance-Relation metric Throughput. Enhance associations for the Event widget. Add event widgets in dashboard where applicable. Fix dashboard list search box not work. Fix short time range. Fix event widget incompatibility in Safari. Refactor the tags component to support searching for tag keys and values. Implement the log widget and the trace widget associate with each other, remove log tables on the trace widget. Add log widget to general service root. Associate the event widget with the trace and log widget. Add the MYSQL layer and update layer routers. Fix query order for trace list. Add a calculation to convert seconds to days. q* Add Spring Sleuth dashboard to general service instance. Support the process dashboard and create the time range text widget. Fix picking calendar with a wrong time range and setting a unique value for dashboard grid key. Add PostgreSQL to Database sub-menu. Implement the network profiling widget. Add Micronaut icon for Java plugin. Add Nats icon for Java plugin. Bump moment and @vue/cli-plugin-e2e-cypress. Add Network Profiling for Service Mesh DP instance and K8s pod panels.  Documentation  Fix invalid links in release docs. Clean up doc about event metrics. Add a table for metric calculations in the ui doc. Add an explanation for alerting kernel and its in-memory window mechanism. Add more docs for widget details. Update alarm doc introduce configuration property key Fix dependency license\u0026rsquo;s NOTICE and binary jar included issues in the source release. Add eBPF CPU profiling doc.  All issues and pull requests are here\n","title":"9.2.0","url":"/docs/main/v9.3.0/en/changes/changes-9.2.0/"},{"content":"9.2.0 Project  [Critical] Fix a low performance issue of metrics persistent in the ElasticSearch storage implementation. One single metric could have to wait for an unnecessary 7~10s(System Env Variable SW_STORAGE_ES_FLUSH_INTERVAL) since 8.8.0 - 9.1.0 releases. Upgrade Armeria to 1.16.0, Kubernetes Java client to 15.0.1.  OAP Server  Add more entities for Zipkin to improve performance. ElasticSearch: scroll id should be updated when scrolling as it may change. Mesh: fix only last rule works when multiple rules are defined in metadata-service-mapping.yaml. Support sending alarm messages to PagerDuty. Support Zipkin kafka collector. Add VIRTUAL detect type to Process for Network Profiling. Add component ID(128) for Java Hutool plugin. Add Zipkin query exception handler, response error message for illegal arguments. Fix a NullPointerException in the endpoint analysis, which would cause missing MQ-related LocalSpan in the trace. Add forEach, processRelation function to MAL expression. Add expPrefix, initExp in MAL config. Add component ID(7015) for Python Bottle plugin. Remove legacy OAL percentile functions, p99, p95, p90, p75, p50 func(s). Revert #8066. Keep all metrics persistent even it is default value. Skip loading UI templates if folder is empty or doesn\u0026rsquo;t exist. Optimize ElasticSearch query performance by using _mGet and physical index name rather than alias in these scenarios, (a) Metrics aggregation (b) Zipkin query (c) Metrics query (d) Log query Support the NETWORK type of eBPF Profiling task. Support sumHistogram in MAL. [Breaking Change] Make the eBPF Profiling task support to the service instance level, index/table ebpf_profiling_task is required to be re-created when bump up from previous releases. Fix race condition in Banyandb storage Support SUM_PER_MIN downsampling in MAL. Support sumHistogramPercentile in MAL. Add VIRTUAL_CACHE to Layer, to fix conjectured Redis server, which icon can\u0026rsquo;t show on the topology. [Breaking Change] Elasticsearch storage merge all metrics/meter and records(without super datasets) indices into one physical index template metrics-all and records-all on the default setting. Provide system environment variable(SW_STORAGE_ES_LOGIC_SHARDING) to shard metrics/meter indices into multi-physical indices as the previous versions(one index template per metric/meter aggregation function). In the current one index mode, users still could choose to adjust ElasticSearch\u0026rsquo;s shard number(SW_STORAGE_ES_INDEX_SHARDS_NUMBER) to scale out. More details please refer to New ElasticSearch storage option explanation in 9.2.0 and backend-storage.md [Breaking Change] Index/table ebpf_profiling_schedule added a new column ebpf_profiling_schedule_id, the H2/Mysql/Tidb/Postgres storage users are required to re-created it when bump up from previous releases. Fix Zipkin trace query the max size of spans. Add tls and https component IDs for Network Profiling. Support Elasticsearch column alias for the compatibility between storage logicSharding model and no-logicSharding model. Support MySQL monitoring. Support PostgreSQL monitoring. Fix query services by serviceId error when Elasticsearch storage SW_STORAGE_ES_QUERY_MAX_SIZE \u0026gt; 10000. Support sending alarm messages to Discord. Fix query history process data failure. Optimize TTL mechanism for Elasticsearch storage, skip executed indices in one TTL rotation. Add Kubernetes support module to share codes between modules and reduce calls to Kubernetes API server. Bump up Kubernetes Java client to fix cve. Adapt OpenTelemetry native metrics protocol. [Breaking Change] rename configuration folder from otel-oc-rules to otel-rules. [Breaking Change] rename configuration field from enabledOcRules to enabledOtelRules and environment variable name from SW_OTEL_RECEIVER_ENABLED_OC_RULES to SW_OTEL_RECEIVER_ENABLED_OTEL_RULES. [Breaking Change] Fix JDBC TTL to delete additional tables data. SQL Database requires removing segment,segment_tag, logs, logs_tag, alarms, alarms_tag, zipkin_span, zipkin_query before OAP starts. SQL Database: add @SQLDatabase.ExtraColumn4AdditionalEntity to support add an extra column from parent to an additional table. Add component ID(131) for Java Micronaut plugin Add component ID(132) for Nats java client plugin  UI  Fix query conditions for the browser logs. Implement a url parameter to activate tab index. Fix clear interval fail when switch autoRefresh to off. Optimize log tables. Fix log detail pop-up page doesn\u0026rsquo;t work. Optimize table widget to hide the whole metric column when no metric is set. Implement the Event widget. Remove event menu. Fix span detail text overlap. Add Python Bottle Plugin Logo. Implement an association between widgets(line, bar, area graphs) with time. Fix tag dropdown style. Hide the copy button when db.statement is empty. Fix legend metrics for topology. Dashboard: Add metrics association. Dashboard: Fix FaaS-Root document link and topology service relation dashboard link. Dashboard: Fix Mesh-Instance metric Throughput. Dashboard: Fix Mesh-Service-Relation metric Throughput and Proxy Sidecar Internal Latency in Nanoseconds (Client Response). Dashboard: Fix Mesh-Instance-Relation metric Throughput. Enhance associations for the Event widget. Add event widgets in dashboard where applicable. Fix dashboard list search box not work. Fix short time range. Fix event widget incompatibility in Safari. Refactor the tags component to support searching for tag keys and values. Implement the log widget and the trace widget associate with each other, remove log tables on the trace widget. Add log widget to general service root. Associate the event widget with the trace and log widget. Add the MYSQL layer and update layer routers. Fix query order for trace list. Add a calculation to convert seconds to days. q* Add Spring Sleuth dashboard to general service instance. Support the process dashboard and create the time range text widget. Fix picking calendar with a wrong time range and setting a unique value for dashboard grid key. Add PostgreSQL to Database sub-menu. Implement the network profiling widget. Add Micronaut icon for Java plugin. Add Nats icon for Java plugin. Bump moment and @vue/cli-plugin-e2e-cypress. Add Network Profiling for Service Mesh DP instance and K8s pod panels.  Documentation  Fix invalid links in release docs. Clean up doc about event metrics. Add a table for metric calculations in the ui doc. Add an explanation for alerting kernel and its in-memory window mechanism. Add more docs for widget details. Update alarm doc introduce configuration property key Fix dependency license\u0026rsquo;s NOTICE and binary jar included issues in the source release. Add eBPF CPU profiling doc.  All issues and pull requests are here\n","title":"9.2.0","url":"/docs/main/v9.4.0/en/changes/changes-9.2.0/"},{"content":"9.2.0 Project  [Critical] Fix a low performance issue of metrics persistent in the ElasticSearch storage implementation. One single metric could have to wait for an unnecessary 7~10s(System Env Variable SW_STORAGE_ES_FLUSH_INTERVAL) since 8.8.0 - 9.1.0 releases. Upgrade Armeria to 1.16.0, Kubernetes Java client to 15.0.1.  OAP Server  Add more entities for Zipkin to improve performance. ElasticSearch: scroll id should be updated when scrolling as it may change. Mesh: fix only last rule works when multiple rules are defined in metadata-service-mapping.yaml. Support sending alarm messages to PagerDuty. Support Zipkin kafka collector. Add VIRTUAL detect type to Process for Network Profiling. Add component ID(128) for Java Hutool plugin. Add Zipkin query exception handler, response error message for illegal arguments. Fix a NullPointerException in the endpoint analysis, which would cause missing MQ-related LocalSpan in the trace. Add forEach, processRelation function to MAL expression. Add expPrefix, initExp in MAL config. Add component ID(7015) for Python Bottle plugin. Remove legacy OAL percentile functions, p99, p95, p90, p75, p50 func(s). Revert #8066. Keep all metrics persistent even it is default value. Skip loading UI templates if folder is empty or doesn\u0026rsquo;t exist. Optimize ElasticSearch query performance by using _mGet and physical index name rather than alias in these scenarios, (a) Metrics aggregation (b) Zipkin query (c) Metrics query (d) Log query Support the NETWORK type of eBPF Profiling task. Support sumHistogram in MAL. [Breaking Change] Make the eBPF Profiling task support to the service instance level, index/table ebpf_profiling_task is required to be re-created when bump up from previous releases. Fix race condition in Banyandb storage Support SUM_PER_MIN downsampling in MAL. Support sumHistogramPercentile in MAL. Add VIRTUAL_CACHE to Layer, to fix conjectured Redis server, which icon can\u0026rsquo;t show on the topology. [Breaking Change] Elasticsearch storage merge all metrics/meter and records(without super datasets) indices into one physical index template metrics-all and records-all on the default setting. Provide system environment variable(SW_STORAGE_ES_LOGIC_SHARDING) to shard metrics/meter indices into multi-physical indices as the previous versions(one index template per metric/meter aggregation function). In the current one index mode, users still could choose to adjust ElasticSearch\u0026rsquo;s shard number(SW_STORAGE_ES_INDEX_SHARDS_NUMBER) to scale out. More details please refer to New ElasticSearch storage option explanation in 9.2.0 and backend-storage.md [Breaking Change] Index/table ebpf_profiling_schedule added a new column ebpf_profiling_schedule_id, the H2/Mysql/Tidb/Postgres storage users are required to re-created it when bump up from previous releases. Fix Zipkin trace query the max size of spans. Add tls and https component IDs for Network Profiling. Support Elasticsearch column alias for the compatibility between storage logicSharding model and no-logicSharding model. Support MySQL monitoring. Support PostgreSQL monitoring. Fix query services by serviceId error when Elasticsearch storage SW_STORAGE_ES_QUERY_MAX_SIZE \u0026gt; 10000. Support sending alarm messages to Discord. Fix query history process data failure. Optimize TTL mechanism for Elasticsearch storage, skip executed indices in one TTL rotation. Add Kubernetes support module to share codes between modules and reduce calls to Kubernetes API server. Bump up Kubernetes Java client to fix cve. Adapt OpenTelemetry native metrics protocol. [Breaking Change] rename configuration folder from otel-oc-rules to otel-rules. [Breaking Change] rename configuration field from enabledOcRules to enabledOtelRules and environment variable name from SW_OTEL_RECEIVER_ENABLED_OC_RULES to SW_OTEL_RECEIVER_ENABLED_OTEL_RULES. [Breaking Change] Fix JDBC TTL to delete additional tables data. SQL Database requires removing segment,segment_tag, logs, logs_tag, alarms, alarms_tag, zipkin_span, zipkin_query before OAP starts. SQL Database: add @SQLDatabase.ExtraColumn4AdditionalEntity to support add an extra column from parent to an additional table. Add component ID(131) for Java Micronaut plugin Add component ID(132) for Nats java client plugin  UI  Fix query conditions for the browser logs. Implement a url parameter to activate tab index. Fix clear interval fail when switch autoRefresh to off. Optimize log tables. Fix log detail pop-up page doesn\u0026rsquo;t work. Optimize table widget to hide the whole metric column when no metric is set. Implement the Event widget. Remove event menu. Fix span detail text overlap. Add Python Bottle Plugin Logo. Implement an association between widgets(line, bar, area graphs) with time. Fix tag dropdown style. Hide the copy button when db.statement is empty. Fix legend metrics for topology. Dashboard: Add metrics association. Dashboard: Fix FaaS-Root document link and topology service relation dashboard link. Dashboard: Fix Mesh-Instance metric Throughput. Dashboard: Fix Mesh-Service-Relation metric Throughput and Proxy Sidecar Internal Latency in Nanoseconds (Client Response). Dashboard: Fix Mesh-Instance-Relation metric Throughput. Enhance associations for the Event widget. Add event widgets in dashboard where applicable. Fix dashboard list search box not work. Fix short time range. Fix event widget incompatibility in Safari. Refactor the tags component to support searching for tag keys and values. Implement the log widget and the trace widget associate with each other, remove log tables on the trace widget. Add log widget to general service root. Associate the event widget with the trace and log widget. Add the MYSQL layer and update layer routers. Fix query order for trace list. Add a calculation to convert seconds to days. q* Add Spring Sleuth dashboard to general service instance. Support the process dashboard and create the time range text widget. Fix picking calendar with a wrong time range and setting a unique value for dashboard grid key. Add PostgreSQL to Database sub-menu. Implement the network profiling widget. Add Micronaut icon for Java plugin. Add Nats icon for Java plugin. Bump moment and @vue/cli-plugin-e2e-cypress. Add Network Profiling for Service Mesh DP instance and K8s pod panels.  Documentation  Fix invalid links in release docs. Clean up doc about event metrics. Add a table for metric calculations in the ui doc. Add an explanation for alerting kernel and its in-memory window mechanism. Add more docs for widget details. Update alarm doc introduce configuration property key Fix dependency license\u0026rsquo;s NOTICE and binary jar included issues in the source release. Add eBPF CPU profiling doc.  All issues and pull requests are here\n","title":"9.2.0","url":"/docs/main/v9.5.0/en/changes/changes-9.2.0/"},{"content":"9.2.0 Project  [Critical] Fix a low performance issue of metrics persistent in the ElasticSearch storage implementation. One single metric could have to wait for an unnecessary 7~10s(System Env Variable SW_STORAGE_ES_FLUSH_INTERVAL) since 8.8.0 - 9.1.0 releases. Upgrade Armeria to 1.16.0, Kubernetes Java client to 15.0.1.  OAP Server  Add more entities for Zipkin to improve performance. ElasticSearch: scroll id should be updated when scrolling as it may change. Mesh: fix only last rule works when multiple rules are defined in metadata-service-mapping.yaml. Support sending alarm messages to PagerDuty. Support Zipkin kafka collector. Add VIRTUAL detect type to Process for Network Profiling. Add component ID(128) for Java Hutool plugin. Add Zipkin query exception handler, response error message for illegal arguments. Fix a NullPointerException in the endpoint analysis, which would cause missing MQ-related LocalSpan in the trace. Add forEach, processRelation function to MAL expression. Add expPrefix, initExp in MAL config. Add component ID(7015) for Python Bottle plugin. Remove legacy OAL percentile functions, p99, p95, p90, p75, p50 func(s). Revert #8066. Keep all metrics persistent even it is default value. Skip loading UI templates if folder is empty or doesn\u0026rsquo;t exist. Optimize ElasticSearch query performance by using _mGet and physical index name rather than alias in these scenarios, (a) Metrics aggregation (b) Zipkin query (c) Metrics query (d) Log query Support the NETWORK type of eBPF Profiling task. Support sumHistogram in MAL. [Breaking Change] Make the eBPF Profiling task support to the service instance level, index/table ebpf_profiling_task is required to be re-created when bump up from previous releases. Fix race condition in Banyandb storage Support SUM_PER_MIN downsampling in MAL. Support sumHistogramPercentile in MAL. Add VIRTUAL_CACHE to Layer, to fix conjectured Redis server, which icon can\u0026rsquo;t show on the topology. [Breaking Change] Elasticsearch storage merge all metrics/meter and records(without super datasets) indices into one physical index template metrics-all and records-all on the default setting. Provide system environment variable(SW_STORAGE_ES_LOGIC_SHARDING) to shard metrics/meter indices into multi-physical indices as the previous versions(one index template per metric/meter aggregation function). In the current one index mode, users still could choose to adjust ElasticSearch\u0026rsquo;s shard number(SW_STORAGE_ES_INDEX_SHARDS_NUMBER) to scale out. More details please refer to New ElasticSearch storage option explanation in 9.2.0 and backend-storage.md [Breaking Change] Index/table ebpf_profiling_schedule added a new column ebpf_profiling_schedule_id, the H2/Mysql/Tidb/Postgres storage users are required to re-created it when bump up from previous releases. Fix Zipkin trace query the max size of spans. Add tls and https component IDs for Network Profiling. Support Elasticsearch column alias for the compatibility between storage logicSharding model and no-logicSharding model. Support MySQL monitoring. Support PostgreSQL monitoring. Fix query services by serviceId error when Elasticsearch storage SW_STORAGE_ES_QUERY_MAX_SIZE \u0026gt; 10000. Support sending alarm messages to Discord. Fix query history process data failure. Optimize TTL mechanism for Elasticsearch storage, skip executed indices in one TTL rotation. Add Kubernetes support module to share codes between modules and reduce calls to Kubernetes API server. Bump up Kubernetes Java client to fix cve. Adapt OpenTelemetry native metrics protocol. [Breaking Change] rename configuration folder from otel-oc-rules to otel-rules. [Breaking Change] rename configuration field from enabledOcRules to enabledOtelRules and environment variable name from SW_OTEL_RECEIVER_ENABLED_OC_RULES to SW_OTEL_RECEIVER_ENABLED_OTEL_RULES. [Breaking Change] Fix JDBC TTL to delete additional tables data. SQL Database requires removing segment,segment_tag, logs, logs_tag, alarms, alarms_tag, zipkin_span, zipkin_query before OAP starts. SQL Database: add @SQLDatabase.ExtraColumn4AdditionalEntity to support add an extra column from parent to an additional table. Add component ID(131) for Java Micronaut plugin Add component ID(132) for Nats java client plugin  UI  Fix query conditions for the browser logs. Implement a url parameter to activate tab index. Fix clear interval fail when switch autoRefresh to off. Optimize log tables. Fix log detail pop-up page doesn\u0026rsquo;t work. Optimize table widget to hide the whole metric column when no metric is set. Implement the Event widget. Remove event menu. Fix span detail text overlap. Add Python Bottle Plugin Logo. Implement an association between widgets(line, bar, area graphs) with time. Fix tag dropdown style. Hide the copy button when db.statement is empty. Fix legend metrics for topology. Dashboard: Add metrics association. Dashboard: Fix FaaS-Root document link and topology service relation dashboard link. Dashboard: Fix Mesh-Instance metric Throughput. Dashboard: Fix Mesh-Service-Relation metric Throughput and Proxy Sidecar Internal Latency in Nanoseconds (Client Response). Dashboard: Fix Mesh-Instance-Relation metric Throughput. Enhance associations for the Event widget. Add event widgets in dashboard where applicable. Fix dashboard list search box not work. Fix short time range. Fix event widget incompatibility in Safari. Refactor the tags component to support searching for tag keys and values. Implement the log widget and the trace widget associate with each other, remove log tables on the trace widget. Add log widget to general service root. Associate the event widget with the trace and log widget. Add the MYSQL layer and update layer routers. Fix query order for trace list. Add a calculation to convert seconds to days. q* Add Spring Sleuth dashboard to general service instance. Support the process dashboard and create the time range text widget. Fix picking calendar with a wrong time range and setting a unique value for dashboard grid key. Add PostgreSQL to Database sub-menu. Implement the network profiling widget. Add Micronaut icon for Java plugin. Add Nats icon for Java plugin. Bump moment and @vue/cli-plugin-e2e-cypress. Add Network Profiling for Service Mesh DP instance and K8s pod panels.  Documentation  Fix invalid links in release docs. Clean up doc about event metrics. Add a table for metric calculations in the ui doc. Add an explanation for alerting kernel and its in-memory window mechanism. Add more docs for widget details. Update alarm doc introduce configuration property key Fix dependency license\u0026rsquo;s NOTICE and binary jar included issues in the source release. Add eBPF CPU profiling doc.  All issues and pull requests are here\n","title":"9.2.0","url":"/docs/main/v9.6.0/en/changes/changes-9.2.0/"},{"content":"9.2.0 Project  [Critical] Fix a low performance issue of metrics persistent in the ElasticSearch storage implementation. One single metric could have to wait for an unnecessary 7~10s(System Env Variable SW_STORAGE_ES_FLUSH_INTERVAL) since 8.8.0 - 9.1.0 releases. Upgrade Armeria to 1.16.0, Kubernetes Java client to 15.0.1.  OAP Server  Add more entities for Zipkin to improve performance. ElasticSearch: scroll id should be updated when scrolling as it may change. Mesh: fix only last rule works when multiple rules are defined in metadata-service-mapping.yaml. Support sending alarm messages to PagerDuty. Support Zipkin kafka collector. Add VIRTUAL detect type to Process for Network Profiling. Add component ID(128) for Java Hutool plugin. Add Zipkin query exception handler, response error message for illegal arguments. Fix a NullPointerException in the endpoint analysis, which would cause missing MQ-related LocalSpan in the trace. Add forEach, processRelation function to MAL expression. Add expPrefix, initExp in MAL config. Add component ID(7015) for Python Bottle plugin. Remove legacy OAL percentile functions, p99, p95, p90, p75, p50 func(s). Revert #8066. Keep all metrics persistent even it is default value. Skip loading UI templates if folder is empty or doesn\u0026rsquo;t exist. Optimize ElasticSearch query performance by using _mGet and physical index name rather than alias in these scenarios, (a) Metrics aggregation (b) Zipkin query (c) Metrics query (d) Log query Support the NETWORK type of eBPF Profiling task. Support sumHistogram in MAL. [Breaking Change] Make the eBPF Profiling task support to the service instance level, index/table ebpf_profiling_task is required to be re-created when bump up from previous releases. Fix race condition in Banyandb storage Support SUM_PER_MIN downsampling in MAL. Support sumHistogramPercentile in MAL. Add VIRTUAL_CACHE to Layer, to fix conjectured Redis server, which icon can\u0026rsquo;t show on the topology. [Breaking Change] Elasticsearch storage merge all metrics/meter and records(without super datasets) indices into one physical index template metrics-all and records-all on the default setting. Provide system environment variable(SW_STORAGE_ES_LOGIC_SHARDING) to shard metrics/meter indices into multi-physical indices as the previous versions(one index template per metric/meter aggregation function). In the current one index mode, users still could choose to adjust ElasticSearch\u0026rsquo;s shard number(SW_STORAGE_ES_INDEX_SHARDS_NUMBER) to scale out. More details please refer to New ElasticSearch storage option explanation in 9.2.0 and backend-storage.md [Breaking Change] Index/table ebpf_profiling_schedule added a new column ebpf_profiling_schedule_id, the H2/Mysql/Tidb/Postgres storage users are required to re-created it when bump up from previous releases. Fix Zipkin trace query the max size of spans. Add tls and https component IDs for Network Profiling. Support Elasticsearch column alias for the compatibility between storage logicSharding model and no-logicSharding model. Support MySQL monitoring. Support PostgreSQL monitoring. Fix query services by serviceId error when Elasticsearch storage SW_STORAGE_ES_QUERY_MAX_SIZE \u0026gt; 10000. Support sending alarm messages to Discord. Fix query history process data failure. Optimize TTL mechanism for Elasticsearch storage, skip executed indices in one TTL rotation. Add Kubernetes support module to share codes between modules and reduce calls to Kubernetes API server. Bump up Kubernetes Java client to fix cve. Adapt OpenTelemetry native metrics protocol. [Breaking Change] rename configuration folder from otel-oc-rules to otel-rules. [Breaking Change] rename configuration field from enabledOcRules to enabledOtelRules and environment variable name from SW_OTEL_RECEIVER_ENABLED_OC_RULES to SW_OTEL_RECEIVER_ENABLED_OTEL_RULES. [Breaking Change] Fix JDBC TTL to delete additional tables data. SQL Database requires removing segment,segment_tag, logs, logs_tag, alarms, alarms_tag, zipkin_span, zipkin_query before OAP starts. SQL Database: add @SQLDatabase.ExtraColumn4AdditionalEntity to support add an extra column from parent to an additional table. Add component ID(131) for Java Micronaut plugin Add component ID(132) for Nats java client plugin  UI  Fix query conditions for the browser logs. Implement a url parameter to activate tab index. Fix clear interval fail when switch autoRefresh to off. Optimize log tables. Fix log detail pop-up page doesn\u0026rsquo;t work. Optimize table widget to hide the whole metric column when no metric is set. Implement the Event widget. Remove event menu. Fix span detail text overlap. Add Python Bottle Plugin Logo. Implement an association between widgets(line, bar, area graphs) with time. Fix tag dropdown style. Hide the copy button when db.statement is empty. Fix legend metrics for topology. Dashboard: Add metrics association. Dashboard: Fix FaaS-Root document link and topology service relation dashboard link. Dashboard: Fix Mesh-Instance metric Throughput. Dashboard: Fix Mesh-Service-Relation metric Throughput and Proxy Sidecar Internal Latency in Nanoseconds (Client Response). Dashboard: Fix Mesh-Instance-Relation metric Throughput. Enhance associations for the Event widget. Add event widgets in dashboard where applicable. Fix dashboard list search box not work. Fix short time range. Fix event widget incompatibility in Safari. Refactor the tags component to support searching for tag keys and values. Implement the log widget and the trace widget associate with each other, remove log tables on the trace widget. Add log widget to general service root. Associate the event widget with the trace and log widget. Add the MYSQL layer and update layer routers. Fix query order for trace list. Add a calculation to convert seconds to days. q* Add Spring Sleuth dashboard to general service instance. Support the process dashboard and create the time range text widget. Fix picking calendar with a wrong time range and setting a unique value for dashboard grid key. Add PostgreSQL to Database sub-menu. Implement the network profiling widget. Add Micronaut icon for Java plugin. Add Nats icon for Java plugin. Bump moment and @vue/cli-plugin-e2e-cypress. Add Network Profiling for Service Mesh DP instance and K8s pod panels.  Documentation  Fix invalid links in release docs. Clean up doc about event metrics. Add a table for metric calculations in the ui doc. Add an explanation for alerting kernel and its in-memory window mechanism. Add more docs for widget details. Update alarm doc introduce configuration property key Fix dependency license\u0026rsquo;s NOTICE and binary jar included issues in the source release. Add eBPF CPU profiling doc.  All issues and pull requests are here\n","title":"9.2.0","url":"/docs/main/v9.7.0/en/changes/changes-9.2.0/"},{"content":"9.3.0 Project  Bump up the embedded swctl version in OAP Docker image.  OAP Server  Add component ID(133) for impala JDBC Java agent plugin and component ID(134) for impala server. Use prepareStatement in H2SQLExecutor#getByIDs.(No function change). Bump up snakeyaml to 1.32 for fixing CVE. Fix DurationUtils.convertToTimeBucket missed verify date format. Enhance LAL to support converting LogData to DatabaseSlowStatement. [Breaking Change] Change the LAL script format(Add layer property). Adapt ElasticSearch 8.1+, migrate from removed APIs to recommended APIs. Support monitoring MySQL slow SQLs. Support analyzing cache related spans to provide metrics and slow commands for cache services from client side Optimize virtual database, fix dynamic config watcher NPE when default value is null Remove physical index existing check and keep template existing check only to avoid meaningless retry wait in no-init mode. Make sure instance list ordered in TTL processor to avoid TTL timer never runs. Support monitoring PostgreSQL slow SQLs. [Breaking Change] Support sharding MySQL database instances and tables by Shardingsphere-Proxy. SQL-Database requires removing tables log_tag/segment_tag/zipkin_query before OAP starts, if bump up from previous releases. Fix meter functions avgHistogram, avgHistogramPercentile, avgLabeled, sumHistogram having data conflict when downsampling. Do sorting readLabeledMetricsValues result forcedly in case the storage(database) doesn\u0026rsquo;t return data consistent with the parameter list. Fix the wrong watch semantics in Kubernetes watchers, which causes heavy traffic to API server in some Kubernetes clusters, we should use Get State and Start at Most Recent semantic instead of Start at Exact because we don\u0026rsquo;t need the changing history events, see https://kubernetes.io/docs/reference/using-api/api-concepts/#semantics-for-watch. Unify query services and DAOs codes time range condition to Duration. [Breaking Change]: Remove prometheus-fetcher plugin, please use OpenTelemetry to scrape Prometheus metrics and set up SkyWalking OpenTelemetry receiver instead. BugFix: histogram metrics sent to MAL should be treated as OpenTelemetry style, not Prometheus style: (-infinity, explicit_bounds[i]] for i == 0 (explicit_bounds[i-1], explicit_bounds[i]] for 0 \u0026lt; i \u0026lt; size(explicit_bounds) (explicit_bounds[i-1], +infinity) for i == size(explicit_bounds)  Support Golang runtime metrics analysis. Add APISIX metrics monitoring Support skywalking-client-js report empty service version and page path , set default version as latest and default page path as /(root). Fix the error fetching data (/browser_app_page_pv0) : Can't split endpoint id into 2 parts. [Breaking Change] Limit the max length of trace/log/alarm tag\u0026rsquo;s key=value, set the max length of column tags in tableslog_tag/segment_tag/alarm_record_tag and column query in zipkin_query and column tag_value in tag_autocomplete to 256. SQL-Database requires altering these columns' length or removing these tables before OAP starts, if bump up from previous releases. Optimize the creation conditions of profiling task. Lazy load the Kubernetes metadata and switch from event-driven to polling. Previously we set up watchers to watch the Kubernetes metadata changes, this is perfect when there are deployments changes and SkyWalking can react to the changes in real time. However when the cluster has many events (such as in large cluster or some special Kubernetes engine like OpenShift), the requests sent from SkyWalking becomes unpredictable, i.e. SkyWalking might send massive requests to Kubernetes API server, causing heavy load to the API server. This PR switches from the watcher mechanism to polling mechanism, SkyWalking polls the metadata in a specified interval, so that the requests sent to API server is predictable (~10 requests every interval, 3 minutes), and the requests count is constant regardless of the cluster\u0026rsquo;s changes. However with this change SkyWalking can\u0026rsquo;t react to the cluster changes in time, but the delay is acceptable in our case. Optimize the query time of tasks in ProfileTaskCache. Fix metrics was put into wrong slot of the window in the alerting kernel. Support sumPerMinLabeled in MAL. Bump up jackson databind, snakeyaml, grpc dependencies. Support export Trace and Log through Kafka. Add new config initialization mechanism of module provider. This is a ModuleManager lib kernel level change. [Breaking Change] Support new records query protocol, rename the column named service_id to entity_id for support difference entity. Please re-create top_n_database_statement index/table. Remove improper self-obs metrics in JvmMetricsHandler(for Kafka channel). gRPC stream canceling code is not logged as an error when the client cancels the stream. The client cancels the stream when the pod is terminated. [Breaking Change] Change the way of loading MAL rules(support pattern). Move k8s relative MAL files into /otel-rules/k8s. [Breaking Change] Refactor service mesh protobuf definitions and split TCP-related metrics to individual definition. Add TCP{Service,ServiceInstance,ServiceRelation,ServiceInstanceRelation} sources and split TCP-related entities out from original Service,ServiceInstance,ServiceRelation,ServiceInstanceRelation. [Breaking Change] TCP-related source names are changed, fields of TCP-related sources are changed, please refer to the latest oal/tcp.oal file. Do not log error logs when failed to create ElasticSearch index because the index is created already. Add virtual MQ analysis for native traces. Support Python runtime metrics analysis. Support sampledTrace in LAL. Support multiple rules with different names under the same layer of LAL script. (Optimization) Reduce the buffer size(queue) of MAL(only) metric streams. Set L1 queue size as 1/20, L2 queue size as 1/2. Support monitoring MySQL/PostgreSQL in the cluster mode. [Breaking Change] Migrate to BanyanDB v0.2.0.  Adopt new OR logical operator for,  MeasureIDs query BanyanDBProfileThreadSnapshotQueryDAO query Multiple Event conditions query Metrics query   Simplify Group check and creation Partially apply UITemplate changes Support index_only Return CompletableFuture\u0026lt;Void\u0026gt; directly from BanyanDB client Optimize data binary parse methods in *LogQueryDAO Support different indexType Support configuration for TTL and (block|segment) intervals   Elasticsearch storage: Provide system environment variable(SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS) and support specify the settings (number_of_shards/number_of_replicas) for each index individually. Elasticsearch storage: Support update index settings (number_of_shards/number_of_replicas) for the index template after rebooting. Optimize MQ Topology analysis. Use entry span\u0026rsquo;s peer from the consumer side as source service when no producer instrumentation(no cross-process reference). Refactor JDBC storage implementations to reuse logics. Fix ClassCastException in LoggingConfigWatcher. Support span attached event concept in Zipkin and SkyWalking trace query. Support span attached events on Zipkin lens UI. Force UTF-8 encoding in JsonLogHandler of kafka-fetcher-plugin. Fix max length to 512 of entity, instance and endpoint IDs in trace, log, profiling, topN tables(JDBC storages). The value was 200 by default. Add component IDs(135, 136, 137) for EventMesh server and client-side plugins. Bump up Kafka client to 2.8.1 to fix CVE-2021-38153. Remove lengthEnvVariable for Column as it never works as expected. Add LongText to support longer logs persistent as a text type in ElasticSearch, instead of a keyword, to avoid length limitation. Fix wrong system variable name SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI. It was opaenapi. Fix not-time-series model blocking OAP boots in no-init mode. Fix ShardingTopologyQueryDAO.loadServiceRelationsDetectedAtServerSide invoke backend miss parameter serviceIds. Changed system variable SW_SUPERDATASET_STORAGE_DAY_STEP to SW_STORAGE_ES_SUPER_DATASET_DAY_STEP to be consistent with other ES storage related variables. Fix ESEventQueryDAO missing metric_table boolQuery criteria. Add default entity name(_blank) if absent to avoid NPE in the decoding. This caused Can't split xxx id into 2 parts. Support dynamic config the sampling strategy in network profiling. Zipkin module support BanyanDB storage. Zipkin traces query API, sort the result set by start time by default. Enhance the cache mechanism in the metric persistent process.  This cache only worked when the metric is accessible(readable) from the database. Once the insert execution is delayed due to the scale, the cache loses efficacy. It only works for the last time update per minute, considering our 25s period. Fix ID conflicts for all JDBC storage implementations. Due to the insert delay, the JDBC storage implementation would still generate another new insert statement.   [Breaking Change] Remove core/default/enableDatabaseSession config. [Breaking Change] Add @BanyanDB.TimestampColumn to identify which column in Record is providing the timestamp(milliseconds) for BanyanDB, since BanyanDB stream requires a timestamp in milliseconds. For SQL-Database: add new column timestamp for tables profile_task_log/top_n_database_statement, requires altering this column or removing these tables before OAP starts, if bump up from previous releases. Fix Elasticsearch storage: In No-Sharding Mode, add specific analyzer to the template before index creation to avoid update index error. Internal API: remove undocumented ElasticSearch API usage and use documented one. Fix BanyanDB.ShardingKey annotation missed in the generated OAL metrics classes. Fix Elasticsearch storage: Query sortMetrics missing transform real index column name. Rename BanyanDB.ShardingKey to BanyanDB.SeriesID. Self-Observability: Add counters for metrics reading from DB or cached. Dashboard:Metrics Persistent Cache Count. Self-Observability: Fix GC Time calculation. Fix Elasticsearch storage: In No-Sharding Mode, column\u0026rsquo;s property indexOnly not applied and cannot be updated. Update the trace_id field as storage only(cannot be queried) in top_n_database_statement, top_n_cache_read_command, top_n_cache_read_command index.  UI  Fix: tab active incorrectly, when click tab space Add impala icon for impala JDBC Java agent plugin. (Webapp)Bump up snakeyaml to 1.31 for fixing CVE-2022-25857 [Breaking Change]: migrate from Spring Web to Armeria, now you should use the environment variable name SW_OAP_ADDRESS to change the OAP backend service addresses, like SW_OAP_ADDRESS=localhost:12800,localhost:12801, and use environment variable SW_SERVER_PORT to change the port. Other Spring-related configurations don\u0026rsquo;t take effect anymore. Polish the endpoint list graph. Fix styles for an adaptive height. Fix setting up a new time range after clicking the refresh button. Enhance the process topology graph to support dragging nodes. UI-template: Fix metrics calculation in general-service/mesh-service/faas-function top-list dashboard. Update MySQL dashboard to visualize collected slow SQLs. Add virtual cache dashboard. Remove responseCode fields of all OAL sources, as well as examples to avoid user\u0026rsquo;s confusion. Remove All from the endpoints selector. Enhance menu configurations to make it easier to change. Update PostgreSQL dashboard to visualize collected slow SQLs. Add Golang runtime metrics and cpu/memory used rate panels in General-Instance dashboard. Add gateway apisix menu. Query logs with the specific service ID. Bump d3-color from 3.0.1 to 3.1.0. Add Golang runtime metrics and cpu/memory used rate panels in FaaS-Instance dashboard. Revert logs on trace widget. Add a sub-menu for virtual mq. Add readRecords to metric types. Verify dashboard names for new dashboards. Associate metrics with the trace widget on dashboards. Fix configuration panel styles. Remove a un-use icon. Support labeled value on the service/instance/endpoint list widgets. Add menu for virtual MQ. Set selector props and update configuration panel styles. Add Python runtime metrics and cpu/memory utilization panels to General-Instance and Fass-Instance dashboards. Enhance the legend of metrics graph widget with the summary table. Add apache eventMesh logo file. Fix conditions for trace profiling. Fix tag keys list and duration condition. Fix typo. Fix condition logic for trace tree data. Enhance tags component to search tags with the input value. Fix topology loading style. Fix update metric processor for the readRecords and remove readSampledRecords from metrics selector. Add trace association for FAAS dashboards. Visualize attached events on the trace widget. Add HTTP/1.x metrics and HTTP req/resp body collecting tabs on the network profiling widget. Implement creating tasks ui for network profiling widget. Fix entity types for ProcessRelation. Add trace association for general service dashboards.  Documentation  Add metadata-uid setup doc about Kubernetes coordinator in the cluster management. Add a doc for adding menus to booster UI. Move general good read blogs from Agent Introduction to Academy. Add re-post for blog Scaling with Apache SkyWalking in the academy list. Add re-post for blog Diagnose Service Mesh Network Performance with eBPF in the academy list. Add Security Notice doc. Add new docs for Report Span Attached Events data collecting protocol. Add new docs for Record query protocol Update Server Agents and Compatibility for PHP agent. Add docs for profiling. Update the network profiling documentation.  All issues and pull requests are here\n","title":"9.3.0","url":"/docs/main/latest/en/changes/changes-9.3.0/"},{"content":"9.3.0 Project  Bump up the embedded swctl version in OAP Docker image.  OAP Server  Add component ID(133) for impala JDBC Java agent plugin and component ID(134) for impala server. Use prepareStatement in H2SQLExecutor#getByIDs.(No function change). Bump up snakeyaml to 1.32 for fixing CVE. Fix DurationUtils.convertToTimeBucket missed verify date format. Enhance LAL to support converting LogData to DatabaseSlowStatement. [Breaking Change] Change the LAL script format(Add layer property). Adapt ElasticSearch 8.1+, migrate from removed APIs to recommended APIs. Support monitoring MySQL slow SQLs. Support analyzing cache related spans to provide metrics and slow commands for cache services from client side Optimize virtual database, fix dynamic config watcher NPE when default value is null Remove physical index existing check and keep template existing check only to avoid meaningless retry wait in no-init mode. Make sure instance list ordered in TTL processor to avoid TTL timer never runs. Support monitoring PostgreSQL slow SQLs. [Breaking Change] Support sharding MySQL database instances and tables by Shardingsphere-Proxy. SQL-Database requires removing tables log_tag/segment_tag/zipkin_query before OAP starts, if bump up from previous releases. Fix meter functions avgHistogram, avgHistogramPercentile, avgLabeled, sumHistogram having data conflict when downsampling. Do sorting readLabeledMetricsValues result forcedly in case the storage(database) doesn\u0026rsquo;t return data consistent with the parameter list. Fix the wrong watch semantics in Kubernetes watchers, which causes heavy traffic to API server in some Kubernetes clusters, we should use Get State and Start at Most Recent semantic instead of Start at Exact because we don\u0026rsquo;t need the changing history events, see https://kubernetes.io/docs/reference/using-api/api-concepts/#semantics-for-watch. Unify query services and DAOs codes time range condition to Duration. [Breaking Change]: Remove prometheus-fetcher plugin, please use OpenTelemetry to scrape Prometheus metrics and set up SkyWalking OpenTelemetry receiver instead. BugFix: histogram metrics sent to MAL should be treated as OpenTelemetry style, not Prometheus style: (-infinity, explicit_bounds[i]] for i == 0 (explicit_bounds[i-1], explicit_bounds[i]] for 0 \u0026lt; i \u0026lt; size(explicit_bounds) (explicit_bounds[i-1], +infinity) for i == size(explicit_bounds)  Support Golang runtime metrics analysis. Add APISIX metrics monitoring Support skywalking-client-js report empty service version and page path , set default version as latest and default page path as /(root). Fix the error fetching data (/browser_app_page_pv0) : Can't split endpoint id into 2 parts. [Breaking Change] Limit the max length of trace/log/alarm tag\u0026rsquo;s key=value, set the max length of column tags in tableslog_tag/segment_tag/alarm_record_tag and column query in zipkin_query and column tag_value in tag_autocomplete to 256. SQL-Database requires altering these columns' length or removing these tables before OAP starts, if bump up from previous releases. Optimize the creation conditions of profiling task. Lazy load the Kubernetes metadata and switch from event-driven to polling. Previously we set up watchers to watch the Kubernetes metadata changes, this is perfect when there are deployments changes and SkyWalking can react to the changes in real time. However when the cluster has many events (such as in large cluster or some special Kubernetes engine like OpenShift), the requests sent from SkyWalking becomes unpredictable, i.e. SkyWalking might send massive requests to Kubernetes API server, causing heavy load to the API server. This PR switches from the watcher mechanism to polling mechanism, SkyWalking polls the metadata in a specified interval, so that the requests sent to API server is predictable (~10 requests every interval, 3 minutes), and the requests count is constant regardless of the cluster\u0026rsquo;s changes. However with this change SkyWalking can\u0026rsquo;t react to the cluster changes in time, but the delay is acceptable in our case. Optimize the query time of tasks in ProfileTaskCache. Fix metrics was put into wrong slot of the window in the alerting kernel. Support sumPerMinLabeled in MAL. Bump up jackson databind, snakeyaml, grpc dependencies. Support export Trace and Log through Kafka. Add new config initialization mechanism of module provider. This is a ModuleManager lib kernel level change. [Breaking Change] Support new records query protocol, rename the column named service_id to entity_id for support difference entity. Please re-create top_n_database_statement index/table. Remove improper self-obs metrics in JvmMetricsHandler(for Kafka channel). gRPC stream canceling code is not logged as an error when the client cancels the stream. The client cancels the stream when the pod is terminated. [Breaking Change] Change the way of loading MAL rules(support pattern). Move k8s relative MAL files into /otel-rules/k8s. [Breaking Change] Refactor service mesh protobuf definitions and split TCP-related metrics to individual definition. Add TCP{Service,ServiceInstance,ServiceRelation,ServiceInstanceRelation} sources and split TCP-related entities out from original Service,ServiceInstance,ServiceRelation,ServiceInstanceRelation. [Breaking Change] TCP-related source names are changed, fields of TCP-related sources are changed, please refer to the latest oal/tcp.oal file. Do not log error logs when failed to create ElasticSearch index because the index is created already. Add virtual MQ analysis for native traces. Support Python runtime metrics analysis. Support sampledTrace in LAL. Support multiple rules with different names under the same layer of LAL script. (Optimization) Reduce the buffer size(queue) of MAL(only) metric streams. Set L1 queue size as 1/20, L2 queue size as 1/2. Support monitoring MySQL/PostgreSQL in the cluster mode. [Breaking Change] Migrate to BanyanDB v0.2.0.  Adopt new OR logical operator for,  MeasureIDs query BanyanDBProfileThreadSnapshotQueryDAO query Multiple Event conditions query Metrics query   Simplify Group check and creation Partially apply UITemplate changes Support index_only Return CompletableFuture\u0026lt;Void\u0026gt; directly from BanyanDB client Optimize data binary parse methods in *LogQueryDAO Support different indexType Support configuration for TTL and (block|segment) intervals   Elasticsearch storage: Provide system environment variable(SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS) and support specify the settings (number_of_shards/number_of_replicas) for each index individually. Elasticsearch storage: Support update index settings (number_of_shards/number_of_replicas) for the index template after rebooting. Optimize MQ Topology analysis. Use entry span\u0026rsquo;s peer from the consumer side as source service when no producer instrumentation(no cross-process reference). Refactor JDBC storage implementations to reuse logics. Fix ClassCastException in LoggingConfigWatcher. Support span attached event concept in Zipkin and SkyWalking trace query. Support span attached events on Zipkin lens UI. Force UTF-8 encoding in JsonLogHandler of kafka-fetcher-plugin. Fix max length to 512 of entity, instance and endpoint IDs in trace, log, profiling, topN tables(JDBC storages). The value was 200 by default. Add component IDs(135, 136, 137) for EventMesh server and client-side plugins. Bump up Kafka client to 2.8.1 to fix CVE-2021-38153. Remove lengthEnvVariable for Column as it never works as expected. Add LongText to support longer logs persistent as a text type in ElasticSearch, instead of a keyword, to avoid length limitation. Fix wrong system variable name SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI. It was opaenapi. Fix not-time-series model blocking OAP boots in no-init mode. Fix ShardingTopologyQueryDAO.loadServiceRelationsDetectedAtServerSide invoke backend miss parameter serviceIds. Changed system variable SW_SUPERDATASET_STORAGE_DAY_STEP to SW_STORAGE_ES_SUPER_DATASET_DAY_STEP to be consistent with other ES storage related variables. Fix ESEventQueryDAO missing metric_table boolQuery criteria. Add default entity name(_blank) if absent to avoid NPE in the decoding. This caused Can't split xxx id into 2 parts. Support dynamic config the sampling strategy in network profiling. Zipkin module support BanyanDB storage. Zipkin traces query API, sort the result set by start time by default. Enhance the cache mechanism in the metric persistent process.  This cache only worked when the metric is accessible(readable) from the database. Once the insert execution is delayed due to the scale, the cache loses efficacy. It only works for the last time update per minute, considering our 25s period. Fix ID conflicts for all JDBC storage implementations. Due to the insert delay, the JDBC storage implementation would still generate another new insert statement.   [Breaking Change] Remove core/default/enableDatabaseSession config. [Breaking Change] Add @BanyanDB.TimestampColumn to identify which column in Record is providing the timestamp(milliseconds) for BanyanDB, since BanyanDB stream requires a timestamp in milliseconds. For SQL-Database: add new column timestamp for tables profile_task_log/top_n_database_statement, requires altering this column or removing these tables before OAP starts, if bump up from previous releases. Fix Elasticsearch storage: In No-Sharding Mode, add specific analyzer to the template before index creation to avoid update index error. Internal API: remove undocumented ElasticSearch API usage and use documented one. Fix BanyanDB.ShardingKey annotation missed in the generated OAL metrics classes. Fix Elasticsearch storage: Query sortMetrics missing transform real index column name. Rename BanyanDB.ShardingKey to BanyanDB.SeriesID. Self-Observability: Add counters for metrics reading from DB or cached. Dashboard:Metrics Persistent Cache Count. Self-Observability: Fix GC Time calculation. Fix Elasticsearch storage: In No-Sharding Mode, column\u0026rsquo;s property indexOnly not applied and cannot be updated. Update the trace_id field as storage only(cannot be queried) in top_n_database_statement, top_n_cache_read_command, top_n_cache_read_command index.  UI  Fix: tab active incorrectly, when click tab space Add impala icon for impala JDBC Java agent plugin. (Webapp)Bump up snakeyaml to 1.31 for fixing CVE-2022-25857 [Breaking Change]: migrate from Spring Web to Armeria, now you should use the environment variable name SW_OAP_ADDRESS to change the OAP backend service addresses, like SW_OAP_ADDRESS=localhost:12800,localhost:12801, and use environment variable SW_SERVER_PORT to change the port. Other Spring-related configurations don\u0026rsquo;t take effect anymore. Polish the endpoint list graph. Fix styles for an adaptive height. Fix setting up a new time range after clicking the refresh button. Enhance the process topology graph to support dragging nodes. UI-template: Fix metrics calculation in general-service/mesh-service/faas-function top-list dashboard. Update MySQL dashboard to visualize collected slow SQLs. Add virtual cache dashboard. Remove responseCode fields of all OAL sources, as well as examples to avoid user\u0026rsquo;s confusion. Remove All from the endpoints selector. Enhance menu configurations to make it easier to change. Update PostgreSQL dashboard to visualize collected slow SQLs. Add Golang runtime metrics and cpu/memory used rate panels in General-Instance dashboard. Add gateway apisix menu. Query logs with the specific service ID. Bump d3-color from 3.0.1 to 3.1.0. Add Golang runtime metrics and cpu/memory used rate panels in FaaS-Instance dashboard. Revert logs on trace widget. Add a sub-menu for virtual mq. Add readRecords to metric types. Verify dashboard names for new dashboards. Associate metrics with the trace widget on dashboards. Fix configuration panel styles. Remove a un-use icon. Support labeled value on the service/instance/endpoint list widgets. Add menu for virtual MQ. Set selector props and update configuration panel styles. Add Python runtime metrics and cpu/memory utilization panels to General-Instance and Fass-Instance dashboards. Enhance the legend of metrics graph widget with the summary table. Add apache eventMesh logo file. Fix conditions for trace profiling. Fix tag keys list and duration condition. Fix typo. Fix condition logic for trace tree data. Enhance tags component to search tags with the input value. Fix topology loading style. Fix update metric processor for the readRecords and remove readSampledRecords from metrics selector. Add trace association for FAAS dashboards. Visualize attached events on the trace widget. Add HTTP/1.x metrics and HTTP req/resp body collecting tabs on the network profiling widget. Implement creating tasks ui for network profiling widget. Fix entity types for ProcessRelation. Add trace association for general service dashboards.  Documentation  Add metadata-uid setup doc about Kubernetes coordinator in the cluster management. Add a doc for adding menus to booster UI. Move general good read blogs from Agent Introduction to Academy. Add re-post for blog Scaling with Apache SkyWalking in the academy list. Add re-post for blog Diagnose Service Mesh Network Performance with eBPF in the academy list. Add Security Notice doc. Add new docs for Report Span Attached Events data collecting protocol. Add new docs for Record query protocol Update Server Agents and Compatibility for PHP agent. Add docs for profiling. Update the network profiling documentation.  All issues and pull requests are here\n","title":"9.3.0","url":"/docs/main/next/en/changes/changes-9.3.0/"},{"content":"9.3.0 Project  Bump up the embedded swctl version in OAP Docker image.  OAP Server  Add component ID(133) for impala JDBC Java agent plugin and component ID(134) for impala server. Use prepareStatement in H2SQLExecutor#getByIDs.(No function change). Bump up snakeyaml to 1.32 for fixing CVE. Fix DurationUtils.convertToTimeBucket missed verify date format. Enhance LAL to support converting LogData to DatabaseSlowStatement. [Breaking Change] Change the LAL script format(Add layer property). Adapt ElasticSearch 8.1+, migrate from removed APIs to recommended APIs. Support monitoring MySQL slow SQLs. Support analyzing cache related spans to provide metrics and slow commands for cache services from client side Optimize virtual database, fix dynamic config watcher NPE when default value is null Remove physical index existing check and keep template existing check only to avoid meaningless retry wait in no-init mode. Make sure instance list ordered in TTL processor to avoid TTL timer never runs. Support monitoring PostgreSQL slow SQLs. [Breaking Change] Support sharding MySQL database instances and tables by Shardingsphere-Proxy. SQL-Database requires removing tables log_tag/segment_tag/zipkin_query before OAP starts, if bump up from previous releases. Fix meter functions avgHistogram, avgHistogramPercentile, avgLabeled, sumHistogram having data conflict when downsampling. Do sorting readLabeledMetricsValues result forcedly in case the storage(database) doesn\u0026rsquo;t return data consistent with the parameter list. Fix the wrong watch semantics in Kubernetes watchers, which causes heavy traffic to API server in some Kubernetes clusters, we should use Get State and Start at Most Recent semantic instead of Start at Exact because we don\u0026rsquo;t need the changing history events, see https://kubernetes.io/docs/reference/using-api/api-concepts/#semantics-for-watch. Unify query services and DAOs codes time range condition to Duration. [Breaking Change]: Remove prometheus-fetcher plugin, please use OpenTelemetry to scrape Prometheus metrics and set up SkyWalking OpenTelemetry receiver instead. BugFix: histogram metrics sent to MAL should be treated as OpenTelemetry style, not Prometheus style: (-infinity, explicit_bounds[i]] for i == 0 (explicit_bounds[i-1], explicit_bounds[i]] for 0 \u0026lt; i \u0026lt; size(explicit_bounds) (explicit_bounds[i-1], +infinity) for i == size(explicit_bounds)  Support Golang runtime metrics analysis. Add APISIX metrics monitoring Support skywalking-client-js report empty service version and page path , set default version as latest and default page path as /(root). Fix the error fetching data (/browser_app_page_pv0) : Can't split endpoint id into 2 parts. [Breaking Change] Limit the max length of trace/log/alarm tag\u0026rsquo;s key=value, set the max length of column tags in tableslog_tag/segment_tag/alarm_record_tag and column query in zipkin_query and column tag_value in tag_autocomplete to 256. SQL-Database requires altering these columns' length or removing these tables before OAP starts, if bump up from previous releases. Optimize the creation conditions of profiling task. Lazy load the Kubernetes metadata and switch from event-driven to polling. Previously we set up watchers to watch the Kubernetes metadata changes, this is perfect when there are deployments changes and SkyWalking can react to the changes in real time. However when the cluster has many events (such as in large cluster or some special Kubernetes engine like OpenShift), the requests sent from SkyWalking becomes unpredictable, i.e. SkyWalking might send massive requests to Kubernetes API server, causing heavy load to the API server. This PR switches from the watcher mechanism to polling mechanism, SkyWalking polls the metadata in a specified interval, so that the requests sent to API server is predictable (~10 requests every interval, 3 minutes), and the requests count is constant regardless of the cluster\u0026rsquo;s changes. However with this change SkyWalking can\u0026rsquo;t react to the cluster changes in time, but the delay is acceptable in our case. Optimize the query time of tasks in ProfileTaskCache. Fix metrics was put into wrong slot of the window in the alerting kernel. Support sumPerMinLabeled in MAL. Bump up jackson databind, snakeyaml, grpc dependencies. Support export Trace and Log through Kafka. Add new config initialization mechanism of module provider. This is a ModuleManager lib kernel level change. [Breaking Change] Support new records query protocol, rename the column named service_id to entity_id for support difference entity. Please re-create top_n_database_statement index/table. Remove improper self-obs metrics in JvmMetricsHandler(for Kafka channel). gRPC stream canceling code is not logged as an error when the client cancels the stream. The client cancels the stream when the pod is terminated. [Breaking Change] Change the way of loading MAL rules(support pattern). Move k8s relative MAL files into /otel-rules/k8s. [Breaking Change] Refactor service mesh protobuf definitions and split TCP-related metrics to individual definition. Add TCP{Service,ServiceInstance,ServiceRelation,ServiceInstanceRelation} sources and split TCP-related entities out from original Service,ServiceInstance,ServiceRelation,ServiceInstanceRelation. [Breaking Change] TCP-related source names are changed, fields of TCP-related sources are changed, please refer to the latest oal/tcp.oal file. Do not log error logs when failed to create ElasticSearch index because the index is created already. Add virtual MQ analysis for native traces. Support Python runtime metrics analysis. Support sampledTrace in LAL. Support multiple rules with different names under the same layer of LAL script. (Optimization) Reduce the buffer size(queue) of MAL(only) metric streams. Set L1 queue size as 1/20, L2 queue size as 1/2. Support monitoring MySQL/PostgreSQL in the cluster mode. [Breaking Change] Migrate to BanyanDB v0.2.0.  Adopt new OR logical operator for,  MeasureIDs query BanyanDBProfileThreadSnapshotQueryDAO query Multiple Event conditions query Metrics query   Simplify Group check and creation Partially apply UITemplate changes Support index_only Return CompletableFuture\u0026lt;Void\u0026gt; directly from BanyanDB client Optimize data binary parse methods in *LogQueryDAO Support different indexType Support configuration for TTL and (block|segment) intervals   Elasticsearch storage: Provide system environment variable(SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS) and support specify the settings (number_of_shards/number_of_replicas) for each index individually. Elasticsearch storage: Support update index settings (number_of_shards/number_of_replicas) for the index template after rebooting. Optimize MQ Topology analysis. Use entry span\u0026rsquo;s peer from the consumer side as source service when no producer instrumentation(no cross-process reference). Refactor JDBC storage implementations to reuse logics. Fix ClassCastException in LoggingConfigWatcher. Support span attached event concept in Zipkin and SkyWalking trace query. Support span attached events on Zipkin lens UI. Force UTF-8 encoding in JsonLogHandler of kafka-fetcher-plugin. Fix max length to 512 of entity, instance and endpoint IDs in trace, log, profiling, topN tables(JDBC storages). The value was 200 by default. Add component IDs(135, 136, 137) for EventMesh server and client-side plugins. Bump up Kafka client to 2.8.1 to fix CVE-2021-38153. Remove lengthEnvVariable for Column as it never works as expected. Add LongText to support longer logs persistent as a text type in ElasticSearch, instead of a keyword, to avoid length limitation. Fix wrong system variable name SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI. It was opaenapi. Fix not-time-series model blocking OAP boots in no-init mode. Fix ShardingTopologyQueryDAO.loadServiceRelationsDetectedAtServerSide invoke backend miss parameter serviceIds. Changed system variable SW_SUPERDATASET_STORAGE_DAY_STEP to SW_STORAGE_ES_SUPER_DATASET_DAY_STEP to be consistent with other ES storage related variables. Fix ESEventQueryDAO missing metric_table boolQuery criteria. Add default entity name(_blank) if absent to avoid NPE in the decoding. This caused Can't split xxx id into 2 parts. Support dynamic config the sampling strategy in network profiling. Zipkin module support BanyanDB storage. Zipkin traces query API, sort the result set by start time by default. Enhance the cache mechanism in the metric persistent process.  This cache only worked when the metric is accessible(readable) from the database. Once the insert execution is delayed due to the scale, the cache loses efficacy. It only works for the last time update per minute, considering our 25s period. Fix ID conflicts for all JDBC storage implementations. Due to the insert delay, the JDBC storage implementation would still generate another new insert statement.   [Breaking Change] Remove core/default/enableDatabaseSession config. [Breaking Change] Add @BanyanDB.TimestampColumn to identify which column in Record is providing the timestamp(milliseconds) for BanyanDB, since BanyanDB stream requires a timestamp in milliseconds. For SQL-Database: add new column timestamp for tables profile_task_log/top_n_database_statement, requires altering this column or removing these tables before OAP starts, if bump up from previous releases. Fix Elasticsearch storage: In No-Sharding Mode, add specific analyzer to the template before index creation to avoid update index error. Internal API: remove undocumented ElasticSearch API usage and use documented one. Fix BanyanDB.ShardingKey annotation missed in the generated OAL metrics classes. Fix Elasticsearch storage: Query sortMetrics missing transform real index column name. Rename BanyanDB.ShardingKey to BanyanDB.SeriesID. Self-Observability: Add counters for metrics reading from DB or cached. Dashboard:Metrics Persistent Cache Count. Self-Observability: Fix GC Time calculation. Fix Elasticsearch storage: In No-Sharding Mode, column\u0026rsquo;s property indexOnly not applied and cannot be updated. Update the trace_id field as storage only(cannot be queried) in top_n_database_statement, top_n_cache_read_command, top_n_cache_read_command index.  UI  Fix: tab active incorrectly, when click tab space Add impala icon for impala JDBC Java agent plugin. (Webapp)Bump up snakeyaml to 1.31 for fixing CVE-2022-25857 [Breaking Change]: migrate from Spring Web to Armeria, now you should use the environment variable name SW_OAP_ADDRESS to change the OAP backend service addresses, like SW_OAP_ADDRESS=localhost:12800,localhost:12801, and use environment variable SW_SERVER_PORT to change the port. Other Spring-related configurations don\u0026rsquo;t take effect anymore. Polish the endpoint list graph. Fix styles for an adaptive height. Fix setting up a new time range after clicking the refresh button. Enhance the process topology graph to support dragging nodes. UI-template: Fix metrics calculation in general-service/mesh-service/faas-function top-list dashboard. Update MySQL dashboard to visualize collected slow SQLs. Add virtual cache dashboard. Remove responseCode fields of all OAL sources, as well as examples to avoid user\u0026rsquo;s confusion. Remove All from the endpoints selector. Enhance menu configurations to make it easier to change. Update PostgreSQL dashboard to visualize collected slow SQLs. Add Golang runtime metrics and cpu/memory used rate panels in General-Instance dashboard. Add gateway apisix menu. Query logs with the specific service ID. Bump d3-color from 3.0.1 to 3.1.0. Add Golang runtime metrics and cpu/memory used rate panels in FaaS-Instance dashboard. Revert logs on trace widget. Add a sub-menu for virtual mq. Add readRecords to metric types. Verify dashboard names for new dashboards. Associate metrics with the trace widget on dashboards. Fix configuration panel styles. Remove a un-use icon. Support labeled value on the service/instance/endpoint list widgets. Add menu for virtual MQ. Set selector props and update configuration panel styles. Add Python runtime metrics and cpu/memory utilization panels to General-Instance and Fass-Instance dashboards. Enhance the legend of metrics graph widget with the summary table. Add apache eventMesh logo file. Fix conditions for trace profiling. Fix tag keys list and duration condition. Fix typo. Fix condition logic for trace tree data. Enhance tags component to search tags with the input value. Fix topology loading style. Fix update metric processor for the readRecords and remove readSampledRecords from metrics selector. Add trace association for FAAS dashboards. Visualize attached events on the trace widget. Add HTTP/1.x metrics and HTTP req/resp body collecting tabs on the network profiling widget. Implement creating tasks ui for network profiling widget. Fix entity types for ProcessRelation. Add trace association for general service dashboards.  Documentation  Add metadata-uid setup doc about Kubernetes coordinator in the cluster management. Add a doc for adding menus to booster UI. Move general good read blogs from Agent Introduction to Academy. Add re-post for blog Scaling with Apache SkyWalking in the academy list. Add re-post for blog Diagnose Service Mesh Network Performance with eBPF in the academy list. Add Security Notice doc. Add new docs for Report Span Attached Events data collecting protocol. Add new docs for Record query protocol Update Server Agents and Compatibility for PHP agent. Add docs for profiling. Update the network profiling documentation.  All issues and pull requests are here\n","title":"9.3.0","url":"/docs/main/v9.3.0/en/changes/changes/"},{"content":"9.3.0 Project  Bump up the embedded swctl version in OAP Docker image.  OAP Server  Add component ID(133) for impala JDBC Java agent plugin and component ID(134) for impala server. Use prepareStatement in H2SQLExecutor#getByIDs.(No function change). Bump up snakeyaml to 1.32 for fixing CVE. Fix DurationUtils.convertToTimeBucket missed verify date format. Enhance LAL to support converting LogData to DatabaseSlowStatement. [Breaking Change] Change the LAL script format(Add layer property). Adapt ElasticSearch 8.1+, migrate from removed APIs to recommended APIs. Support monitoring MySQL slow SQLs. Support analyzing cache related spans to provide metrics and slow commands for cache services from client side Optimize virtual database, fix dynamic config watcher NPE when default value is null Remove physical index existing check and keep template existing check only to avoid meaningless retry wait in no-init mode. Make sure instance list ordered in TTL processor to avoid TTL timer never runs. Support monitoring PostgreSQL slow SQLs. [Breaking Change] Support sharding MySQL database instances and tables by Shardingsphere-Proxy. SQL-Database requires removing tables log_tag/segment_tag/zipkin_query before OAP starts, if bump up from previous releases. Fix meter functions avgHistogram, avgHistogramPercentile, avgLabeled, sumHistogram having data conflict when downsampling. Do sorting readLabeledMetricsValues result forcedly in case the storage(database) doesn\u0026rsquo;t return data consistent with the parameter list. Fix the wrong watch semantics in Kubernetes watchers, which causes heavy traffic to API server in some Kubernetes clusters, we should use Get State and Start at Most Recent semantic instead of Start at Exact because we don\u0026rsquo;t need the changing history events, see https://kubernetes.io/docs/reference/using-api/api-concepts/#semantics-for-watch. Unify query services and DAOs codes time range condition to Duration. [Breaking Change]: Remove prometheus-fetcher plugin, please use OpenTelemetry to scrape Prometheus metrics and set up SkyWalking OpenTelemetry receiver instead. BugFix: histogram metrics sent to MAL should be treated as OpenTelemetry style, not Prometheus style: (-infinity, explicit_bounds[i]] for i == 0 (explicit_bounds[i-1], explicit_bounds[i]] for 0 \u0026lt; i \u0026lt; size(explicit_bounds) (explicit_bounds[i-1], +infinity) for i == size(explicit_bounds)  Support Golang runtime metrics analysis. Add APISIX metrics monitoring Support skywalking-client-js report empty service version and page path , set default version as latest and default page path as /(root). Fix the error fetching data (/browser_app_page_pv0) : Can't split endpoint id into 2 parts. [Breaking Change] Limit the max length of trace/log/alarm tag\u0026rsquo;s key=value, set the max length of column tags in tableslog_tag/segment_tag/alarm_record_tag and column query in zipkin_query and column tag_value in tag_autocomplete to 256. SQL-Database requires altering these columns' length or removing these tables before OAP starts, if bump up from previous releases. Optimize the creation conditions of profiling task. Lazy load the Kubernetes metadata and switch from event-driven to polling. Previously we set up watchers to watch the Kubernetes metadata changes, this is perfect when there are deployments changes and SkyWalking can react to the changes in real time. However when the cluster has many events (such as in large cluster or some special Kubernetes engine like OpenShift), the requests sent from SkyWalking becomes unpredictable, i.e. SkyWalking might send massive requests to Kubernetes API server, causing heavy load to the API server. This PR switches from the watcher mechanism to polling mechanism, SkyWalking polls the metadata in a specified interval, so that the requests sent to API server is predictable (~10 requests every interval, 3 minutes), and the requests count is constant regardless of the cluster\u0026rsquo;s changes. However with this change SkyWalking can\u0026rsquo;t react to the cluster changes in time, but the delay is acceptable in our case. Optimize the query time of tasks in ProfileTaskCache. Fix metrics was put into wrong slot of the window in the alerting kernel. Support sumPerMinLabeled in MAL. Bump up jackson databind, snakeyaml, grpc dependencies. Support export Trace and Log through Kafka. Add new config initialization mechanism of module provider. This is a ModuleManager lib kernel level change. [Breaking Change] Support new records query protocol, rename the column named service_id to entity_id for support difference entity. Please re-create top_n_database_statement index/table. Remove improper self-obs metrics in JvmMetricsHandler(for Kafka channel). gRPC stream canceling code is not logged as an error when the client cancels the stream. The client cancels the stream when the pod is terminated. [Breaking Change] Change the way of loading MAL rules(support pattern). Move k8s relative MAL files into /otel-rules/k8s. [Breaking Change] Refactor service mesh protobuf definitions and split TCP-related metrics to individual definition. Add TCP{Service,ServiceInstance,ServiceRelation,ServiceInstanceRelation} sources and split TCP-related entities out from original Service,ServiceInstance,ServiceRelation,ServiceInstanceRelation. [Breaking Change] TCP-related source names are changed, fields of TCP-related sources are changed, please refer to the latest oal/tcp.oal file. Do not log error logs when failed to create ElasticSearch index because the index is created already. Add virtual MQ analysis for native traces. Support Python runtime metrics analysis. Support sampledTrace in LAL. Support multiple rules with different names under the same layer of LAL script. (Optimization) Reduce the buffer size(queue) of MAL(only) metric streams. Set L1 queue size as 1/20, L2 queue size as 1/2. Support monitoring MySQL/PostgreSQL in the cluster mode. [Breaking Change] Migrate to BanyanDB v0.2.0.  Adopt new OR logical operator for,  MeasureIDs query BanyanDBProfileThreadSnapshotQueryDAO query Multiple Event conditions query Metrics query   Simplify Group check and creation Partially apply UITemplate changes Support index_only Return CompletableFuture\u0026lt;Void\u0026gt; directly from BanyanDB client Optimize data binary parse methods in *LogQueryDAO Support different indexType Support configuration for TTL and (block|segment) intervals   Elasticsearch storage: Provide system environment variable(SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS) and support specify the settings (number_of_shards/number_of_replicas) for each index individually. Elasticsearch storage: Support update index settings (number_of_shards/number_of_replicas) for the index template after rebooting. Optimize MQ Topology analysis. Use entry span\u0026rsquo;s peer from the consumer side as source service when no producer instrumentation(no cross-process reference). Refactor JDBC storage implementations to reuse logics. Fix ClassCastException in LoggingConfigWatcher. Support span attached event concept in Zipkin and SkyWalking trace query. Support span attached events on Zipkin lens UI. Force UTF-8 encoding in JsonLogHandler of kafka-fetcher-plugin. Fix max length to 512 of entity, instance and endpoint IDs in trace, log, profiling, topN tables(JDBC storages). The value was 200 by default. Add component IDs(135, 136, 137) for EventMesh server and client-side plugins. Bump up Kafka client to 2.8.1 to fix CVE-2021-38153. Remove lengthEnvVariable for Column as it never works as expected. Add LongText to support longer logs persistent as a text type in ElasticSearch, instead of a keyword, to avoid length limitation. Fix wrong system variable name SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI. It was opaenapi. Fix not-time-series model blocking OAP boots in no-init mode. Fix ShardingTopologyQueryDAO.loadServiceRelationsDetectedAtServerSide invoke backend miss parameter serviceIds. Changed system variable SW_SUPERDATASET_STORAGE_DAY_STEP to SW_STORAGE_ES_SUPER_DATASET_DAY_STEP to be consistent with other ES storage related variables. Fix ESEventQueryDAO missing metric_table boolQuery criteria. Add default entity name(_blank) if absent to avoid NPE in the decoding. This caused Can't split xxx id into 2 parts. Support dynamic config the sampling strategy in network profiling. Zipkin module support BanyanDB storage. Zipkin traces query API, sort the result set by start time by default. Enhance the cache mechanism in the metric persistent process.  This cache only worked when the metric is accessible(readable) from the database. Once the insert execution is delayed due to the scale, the cache loses efficacy. It only works for the last time update per minute, considering our 25s period. Fix ID conflicts for all JDBC storage implementations. Due to the insert delay, the JDBC storage implementation would still generate another new insert statement.   [Breaking Change] Remove core/default/enableDatabaseSession config. [Breaking Change] Add @BanyanDB.TimestampColumn to identify which column in Record is providing the timestamp(milliseconds) for BanyanDB, since BanyanDB stream requires a timestamp in milliseconds. For SQL-Database: add new column timestamp for tables profile_task_log/top_n_database_statement, requires altering this column or removing these tables before OAP starts, if bump up from previous releases. Fix Elasticsearch storage: In No-Sharding Mode, add specific analyzer to the template before index creation to avoid update index error. Internal API: remove undocumented ElasticSearch API usage and use documented one. Fix BanyanDB.ShardingKey annotation missed in the generated OAL metrics classes. Fix Elasticsearch storage: Query sortMetrics missing transform real index column name. Rename BanyanDB.ShardingKey to BanyanDB.SeriesID. Self-Observability: Add counters for metrics reading from DB or cached. Dashboard:Metrics Persistent Cache Count. Self-Observability: Fix GC Time calculation. Fix Elasticsearch storage: In No-Sharding Mode, column\u0026rsquo;s property indexOnly not applied and cannot be updated. Update the trace_id field as storage only(cannot be queried) in top_n_database_statement, top_n_cache_read_command, top_n_cache_read_command index.  UI  Fix: tab active incorrectly, when click tab space Add impala icon for impala JDBC Java agent plugin. (Webapp)Bump up snakeyaml to 1.31 for fixing CVE-2022-25857 [Breaking Change]: migrate from Spring Web to Armeria, now you should use the environment variable name SW_OAP_ADDRESS to change the OAP backend service addresses, like SW_OAP_ADDRESS=localhost:12800,localhost:12801, and use environment variable SW_SERVER_PORT to change the port. Other Spring-related configurations don\u0026rsquo;t take effect anymore. Polish the endpoint list graph. Fix styles for an adaptive height. Fix setting up a new time range after clicking the refresh button. Enhance the process topology graph to support dragging nodes. UI-template: Fix metrics calculation in general-service/mesh-service/faas-function top-list dashboard. Update MySQL dashboard to visualize collected slow SQLs. Add virtual cache dashboard. Remove responseCode fields of all OAL sources, as well as examples to avoid user\u0026rsquo;s confusion. Remove All from the endpoints selector. Enhance menu configurations to make it easier to change. Update PostgreSQL dashboard to visualize collected slow SQLs. Add Golang runtime metrics and cpu/memory used rate panels in General-Instance dashboard. Add gateway apisix menu. Query logs with the specific service ID. Bump d3-color from 3.0.1 to 3.1.0. Add Golang runtime metrics and cpu/memory used rate panels in FaaS-Instance dashboard. Revert logs on trace widget. Add a sub-menu for virtual mq. Add readRecords to metric types. Verify dashboard names for new dashboards. Associate metrics with the trace widget on dashboards. Fix configuration panel styles. Remove a un-use icon. Support labeled value on the service/instance/endpoint list widgets. Add menu for virtual MQ. Set selector props and update configuration panel styles. Add Python runtime metrics and cpu/memory utilization panels to General-Instance and Fass-Instance dashboards. Enhance the legend of metrics graph widget with the summary table. Add apache eventMesh logo file. Fix conditions for trace profiling. Fix tag keys list and duration condition. Fix typo. Fix condition logic for trace tree data. Enhance tags component to search tags with the input value. Fix topology loading style. Fix update metric processor for the readRecords and remove readSampledRecords from metrics selector. Add trace association for FAAS dashboards. Visualize attached events on the trace widget. Add HTTP/1.x metrics and HTTP req/resp body collecting tabs on the network profiling widget. Implement creating tasks ui for network profiling widget. Fix entity types for ProcessRelation. Add trace association for general service dashboards.  Documentation  Add metadata-uid setup doc about Kubernetes coordinator in the cluster management. Add a doc for adding menus to booster UI. Move general good read blogs from Agent Introduction to Academy. Add re-post for blog Scaling with Apache SkyWalking in the academy list. Add re-post for blog Diagnose Service Mesh Network Performance with eBPF in the academy list. Add Security Notice doc. Add new docs for Report Span Attached Events data collecting protocol. Add new docs for Record query protocol Update Server Agents and Compatibility for PHP agent. Add docs for profiling. Update the network profiling documentation.  All issues and pull requests are here\n","title":"9.3.0","url":"/docs/main/v9.4.0/en/changes/changes-9.3.0/"},{"content":"9.3.0 Project  Bump up the embedded swctl version in OAP Docker image.  OAP Server  Add component ID(133) for impala JDBC Java agent plugin and component ID(134) for impala server. Use prepareStatement in H2SQLExecutor#getByIDs.(No function change). Bump up snakeyaml to 1.32 for fixing CVE. Fix DurationUtils.convertToTimeBucket missed verify date format. Enhance LAL to support converting LogData to DatabaseSlowStatement. [Breaking Change] Change the LAL script format(Add layer property). Adapt ElasticSearch 8.1+, migrate from removed APIs to recommended APIs. Support monitoring MySQL slow SQLs. Support analyzing cache related spans to provide metrics and slow commands for cache services from client side Optimize virtual database, fix dynamic config watcher NPE when default value is null Remove physical index existing check and keep template existing check only to avoid meaningless retry wait in no-init mode. Make sure instance list ordered in TTL processor to avoid TTL timer never runs. Support monitoring PostgreSQL slow SQLs. [Breaking Change] Support sharding MySQL database instances and tables by Shardingsphere-Proxy. SQL-Database requires removing tables log_tag/segment_tag/zipkin_query before OAP starts, if bump up from previous releases. Fix meter functions avgHistogram, avgHistogramPercentile, avgLabeled, sumHistogram having data conflict when downsampling. Do sorting readLabeledMetricsValues result forcedly in case the storage(database) doesn\u0026rsquo;t return data consistent with the parameter list. Fix the wrong watch semantics in Kubernetes watchers, which causes heavy traffic to API server in some Kubernetes clusters, we should use Get State and Start at Most Recent semantic instead of Start at Exact because we don\u0026rsquo;t need the changing history events, see https://kubernetes.io/docs/reference/using-api/api-concepts/#semantics-for-watch. Unify query services and DAOs codes time range condition to Duration. [Breaking Change]: Remove prometheus-fetcher plugin, please use OpenTelemetry to scrape Prometheus metrics and set up SkyWalking OpenTelemetry receiver instead. BugFix: histogram metrics sent to MAL should be treated as OpenTelemetry style, not Prometheus style: (-infinity, explicit_bounds[i]] for i == 0 (explicit_bounds[i-1], explicit_bounds[i]] for 0 \u0026lt; i \u0026lt; size(explicit_bounds) (explicit_bounds[i-1], +infinity) for i == size(explicit_bounds)  Support Golang runtime metrics analysis. Add APISIX metrics monitoring Support skywalking-client-js report empty service version and page path , set default version as latest and default page path as /(root). Fix the error fetching data (/browser_app_page_pv0) : Can't split endpoint id into 2 parts. [Breaking Change] Limit the max length of trace/log/alarm tag\u0026rsquo;s key=value, set the max length of column tags in tableslog_tag/segment_tag/alarm_record_tag and column query in zipkin_query and column tag_value in tag_autocomplete to 256. SQL-Database requires altering these columns' length or removing these tables before OAP starts, if bump up from previous releases. Optimize the creation conditions of profiling task. Lazy load the Kubernetes metadata and switch from event-driven to polling. Previously we set up watchers to watch the Kubernetes metadata changes, this is perfect when there are deployments changes and SkyWalking can react to the changes in real time. However when the cluster has many events (such as in large cluster or some special Kubernetes engine like OpenShift), the requests sent from SkyWalking becomes unpredictable, i.e. SkyWalking might send massive requests to Kubernetes API server, causing heavy load to the API server. This PR switches from the watcher mechanism to polling mechanism, SkyWalking polls the metadata in a specified interval, so that the requests sent to API server is predictable (~10 requests every interval, 3 minutes), and the requests count is constant regardless of the cluster\u0026rsquo;s changes. However with this change SkyWalking can\u0026rsquo;t react to the cluster changes in time, but the delay is acceptable in our case. Optimize the query time of tasks in ProfileTaskCache. Fix metrics was put into wrong slot of the window in the alerting kernel. Support sumPerMinLabeled in MAL. Bump up jackson databind, snakeyaml, grpc dependencies. Support export Trace and Log through Kafka. Add new config initialization mechanism of module provider. This is a ModuleManager lib kernel level change. [Breaking Change] Support new records query protocol, rename the column named service_id to entity_id for support difference entity. Please re-create top_n_database_statement index/table. Remove improper self-obs metrics in JvmMetricsHandler(for Kafka channel). gRPC stream canceling code is not logged as an error when the client cancels the stream. The client cancels the stream when the pod is terminated. [Breaking Change] Change the way of loading MAL rules(support pattern). Move k8s relative MAL files into /otel-rules/k8s. [Breaking Change] Refactor service mesh protobuf definitions and split TCP-related metrics to individual definition. Add TCP{Service,ServiceInstance,ServiceRelation,ServiceInstanceRelation} sources and split TCP-related entities out from original Service,ServiceInstance,ServiceRelation,ServiceInstanceRelation. [Breaking Change] TCP-related source names are changed, fields of TCP-related sources are changed, please refer to the latest oal/tcp.oal file. Do not log error logs when failed to create ElasticSearch index because the index is created already. Add virtual MQ analysis for native traces. Support Python runtime metrics analysis. Support sampledTrace in LAL. Support multiple rules with different names under the same layer of LAL script. (Optimization) Reduce the buffer size(queue) of MAL(only) metric streams. Set L1 queue size as 1/20, L2 queue size as 1/2. Support monitoring MySQL/PostgreSQL in the cluster mode. [Breaking Change] Migrate to BanyanDB v0.2.0.  Adopt new OR logical operator for,  MeasureIDs query BanyanDBProfileThreadSnapshotQueryDAO query Multiple Event conditions query Metrics query   Simplify Group check and creation Partially apply UITemplate changes Support index_only Return CompletableFuture\u0026lt;Void\u0026gt; directly from BanyanDB client Optimize data binary parse methods in *LogQueryDAO Support different indexType Support configuration for TTL and (block|segment) intervals   Elasticsearch storage: Provide system environment variable(SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS) and support specify the settings (number_of_shards/number_of_replicas) for each index individually. Elasticsearch storage: Support update index settings (number_of_shards/number_of_replicas) for the index template after rebooting. Optimize MQ Topology analysis. Use entry span\u0026rsquo;s peer from the consumer side as source service when no producer instrumentation(no cross-process reference). Refactor JDBC storage implementations to reuse logics. Fix ClassCastException in LoggingConfigWatcher. Support span attached event concept in Zipkin and SkyWalking trace query. Support span attached events on Zipkin lens UI. Force UTF-8 encoding in JsonLogHandler of kafka-fetcher-plugin. Fix max length to 512 of entity, instance and endpoint IDs in trace, log, profiling, topN tables(JDBC storages). The value was 200 by default. Add component IDs(135, 136, 137) for EventMesh server and client-side plugins. Bump up Kafka client to 2.8.1 to fix CVE-2021-38153. Remove lengthEnvVariable for Column as it never works as expected. Add LongText to support longer logs persistent as a text type in ElasticSearch, instead of a keyword, to avoid length limitation. Fix wrong system variable name SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI. It was opaenapi. Fix not-time-series model blocking OAP boots in no-init mode. Fix ShardingTopologyQueryDAO.loadServiceRelationsDetectedAtServerSide invoke backend miss parameter serviceIds. Changed system variable SW_SUPERDATASET_STORAGE_DAY_STEP to SW_STORAGE_ES_SUPER_DATASET_DAY_STEP to be consistent with other ES storage related variables. Fix ESEventQueryDAO missing metric_table boolQuery criteria. Add default entity name(_blank) if absent to avoid NPE in the decoding. This caused Can't split xxx id into 2 parts. Support dynamic config the sampling strategy in network profiling. Zipkin module support BanyanDB storage. Zipkin traces query API, sort the result set by start time by default. Enhance the cache mechanism in the metric persistent process.  This cache only worked when the metric is accessible(readable) from the database. Once the insert execution is delayed due to the scale, the cache loses efficacy. It only works for the last time update per minute, considering our 25s period. Fix ID conflicts for all JDBC storage implementations. Due to the insert delay, the JDBC storage implementation would still generate another new insert statement.   [Breaking Change] Remove core/default/enableDatabaseSession config. [Breaking Change] Add @BanyanDB.TimestampColumn to identify which column in Record is providing the timestamp(milliseconds) for BanyanDB, since BanyanDB stream requires a timestamp in milliseconds. For SQL-Database: add new column timestamp for tables profile_task_log/top_n_database_statement, requires altering this column or removing these tables before OAP starts, if bump up from previous releases. Fix Elasticsearch storage: In No-Sharding Mode, add specific analyzer to the template before index creation to avoid update index error. Internal API: remove undocumented ElasticSearch API usage and use documented one. Fix BanyanDB.ShardingKey annotation missed in the generated OAL metrics classes. Fix Elasticsearch storage: Query sortMetrics missing transform real index column name. Rename BanyanDB.ShardingKey to BanyanDB.SeriesID. Self-Observability: Add counters for metrics reading from DB or cached. Dashboard:Metrics Persistent Cache Count. Self-Observability: Fix GC Time calculation. Fix Elasticsearch storage: In No-Sharding Mode, column\u0026rsquo;s property indexOnly not applied and cannot be updated. Update the trace_id field as storage only(cannot be queried) in top_n_database_statement, top_n_cache_read_command, top_n_cache_read_command index.  UI  Fix: tab active incorrectly, when click tab space Add impala icon for impala JDBC Java agent plugin. (Webapp)Bump up snakeyaml to 1.31 for fixing CVE-2022-25857 [Breaking Change]: migrate from Spring Web to Armeria, now you should use the environment variable name SW_OAP_ADDRESS to change the OAP backend service addresses, like SW_OAP_ADDRESS=localhost:12800,localhost:12801, and use environment variable SW_SERVER_PORT to change the port. Other Spring-related configurations don\u0026rsquo;t take effect anymore. Polish the endpoint list graph. Fix styles for an adaptive height. Fix setting up a new time range after clicking the refresh button. Enhance the process topology graph to support dragging nodes. UI-template: Fix metrics calculation in general-service/mesh-service/faas-function top-list dashboard. Update MySQL dashboard to visualize collected slow SQLs. Add virtual cache dashboard. Remove responseCode fields of all OAL sources, as well as examples to avoid user\u0026rsquo;s confusion. Remove All from the endpoints selector. Enhance menu configurations to make it easier to change. Update PostgreSQL dashboard to visualize collected slow SQLs. Add Golang runtime metrics and cpu/memory used rate panels in General-Instance dashboard. Add gateway apisix menu. Query logs with the specific service ID. Bump d3-color from 3.0.1 to 3.1.0. Add Golang runtime metrics and cpu/memory used rate panels in FaaS-Instance dashboard. Revert logs on trace widget. Add a sub-menu for virtual mq. Add readRecords to metric types. Verify dashboard names for new dashboards. Associate metrics with the trace widget on dashboards. Fix configuration panel styles. Remove a un-use icon. Support labeled value on the service/instance/endpoint list widgets. Add menu for virtual MQ. Set selector props and update configuration panel styles. Add Python runtime metrics and cpu/memory utilization panels to General-Instance and Fass-Instance dashboards. Enhance the legend of metrics graph widget with the summary table. Add apache eventMesh logo file. Fix conditions for trace profiling. Fix tag keys list and duration condition. Fix typo. Fix condition logic for trace tree data. Enhance tags component to search tags with the input value. Fix topology loading style. Fix update metric processor for the readRecords and remove readSampledRecords from metrics selector. Add trace association for FAAS dashboards. Visualize attached events on the trace widget. Add HTTP/1.x metrics and HTTP req/resp body collecting tabs on the network profiling widget. Implement creating tasks ui for network profiling widget. Fix entity types for ProcessRelation. Add trace association for general service dashboards.  Documentation  Add metadata-uid setup doc about Kubernetes coordinator in the cluster management. Add a doc for adding menus to booster UI. Move general good read blogs from Agent Introduction to Academy. Add re-post for blog Scaling with Apache SkyWalking in the academy list. Add re-post for blog Diagnose Service Mesh Network Performance with eBPF in the academy list. Add Security Notice doc. Add new docs for Report Span Attached Events data collecting protocol. Add new docs for Record query protocol Update Server Agents and Compatibility for PHP agent. Add docs for profiling. Update the network profiling documentation.  All issues and pull requests are here\n","title":"9.3.0","url":"/docs/main/v9.5.0/en/changes/changes-9.3.0/"},{"content":"9.3.0 Project  Bump up the embedded swctl version in OAP Docker image.  OAP Server  Add component ID(133) for impala JDBC Java agent plugin and component ID(134) for impala server. Use prepareStatement in H2SQLExecutor#getByIDs.(No function change). Bump up snakeyaml to 1.32 for fixing CVE. Fix DurationUtils.convertToTimeBucket missed verify date format. Enhance LAL to support converting LogData to DatabaseSlowStatement. [Breaking Change] Change the LAL script format(Add layer property). Adapt ElasticSearch 8.1+, migrate from removed APIs to recommended APIs. Support monitoring MySQL slow SQLs. Support analyzing cache related spans to provide metrics and slow commands for cache services from client side Optimize virtual database, fix dynamic config watcher NPE when default value is null Remove physical index existing check and keep template existing check only to avoid meaningless retry wait in no-init mode. Make sure instance list ordered in TTL processor to avoid TTL timer never runs. Support monitoring PostgreSQL slow SQLs. [Breaking Change] Support sharding MySQL database instances and tables by Shardingsphere-Proxy. SQL-Database requires removing tables log_tag/segment_tag/zipkin_query before OAP starts, if bump up from previous releases. Fix meter functions avgHistogram, avgHistogramPercentile, avgLabeled, sumHistogram having data conflict when downsampling. Do sorting readLabeledMetricsValues result forcedly in case the storage(database) doesn\u0026rsquo;t return data consistent with the parameter list. Fix the wrong watch semantics in Kubernetes watchers, which causes heavy traffic to API server in some Kubernetes clusters, we should use Get State and Start at Most Recent semantic instead of Start at Exact because we don\u0026rsquo;t need the changing history events, see https://kubernetes.io/docs/reference/using-api/api-concepts/#semantics-for-watch. Unify query services and DAOs codes time range condition to Duration. [Breaking Change]: Remove prometheus-fetcher plugin, please use OpenTelemetry to scrape Prometheus metrics and set up SkyWalking OpenTelemetry receiver instead. BugFix: histogram metrics sent to MAL should be treated as OpenTelemetry style, not Prometheus style: (-infinity, explicit_bounds[i]] for i == 0 (explicit_bounds[i-1], explicit_bounds[i]] for 0 \u0026lt; i \u0026lt; size(explicit_bounds) (explicit_bounds[i-1], +infinity) for i == size(explicit_bounds)  Support Golang runtime metrics analysis. Add APISIX metrics monitoring Support skywalking-client-js report empty service version and page path , set default version as latest and default page path as /(root). Fix the error fetching data (/browser_app_page_pv0) : Can't split endpoint id into 2 parts. [Breaking Change] Limit the max length of trace/log/alarm tag\u0026rsquo;s key=value, set the max length of column tags in tableslog_tag/segment_tag/alarm_record_tag and column query in zipkin_query and column tag_value in tag_autocomplete to 256. SQL-Database requires altering these columns' length or removing these tables before OAP starts, if bump up from previous releases. Optimize the creation conditions of profiling task. Lazy load the Kubernetes metadata and switch from event-driven to polling. Previously we set up watchers to watch the Kubernetes metadata changes, this is perfect when there are deployments changes and SkyWalking can react to the changes in real time. However when the cluster has many events (such as in large cluster or some special Kubernetes engine like OpenShift), the requests sent from SkyWalking becomes unpredictable, i.e. SkyWalking might send massive requests to Kubernetes API server, causing heavy load to the API server. This PR switches from the watcher mechanism to polling mechanism, SkyWalking polls the metadata in a specified interval, so that the requests sent to API server is predictable (~10 requests every interval, 3 minutes), and the requests count is constant regardless of the cluster\u0026rsquo;s changes. However with this change SkyWalking can\u0026rsquo;t react to the cluster changes in time, but the delay is acceptable in our case. Optimize the query time of tasks in ProfileTaskCache. Fix metrics was put into wrong slot of the window in the alerting kernel. Support sumPerMinLabeled in MAL. Bump up jackson databind, snakeyaml, grpc dependencies. Support export Trace and Log through Kafka. Add new config initialization mechanism of module provider. This is a ModuleManager lib kernel level change. [Breaking Change] Support new records query protocol, rename the column named service_id to entity_id for support difference entity. Please re-create top_n_database_statement index/table. Remove improper self-obs metrics in JvmMetricsHandler(for Kafka channel). gRPC stream canceling code is not logged as an error when the client cancels the stream. The client cancels the stream when the pod is terminated. [Breaking Change] Change the way of loading MAL rules(support pattern). Move k8s relative MAL files into /otel-rules/k8s. [Breaking Change] Refactor service mesh protobuf definitions and split TCP-related metrics to individual definition. Add TCP{Service,ServiceInstance,ServiceRelation,ServiceInstanceRelation} sources and split TCP-related entities out from original Service,ServiceInstance,ServiceRelation,ServiceInstanceRelation. [Breaking Change] TCP-related source names are changed, fields of TCP-related sources are changed, please refer to the latest oal/tcp.oal file. Do not log error logs when failed to create ElasticSearch index because the index is created already. Add virtual MQ analysis for native traces. Support Python runtime metrics analysis. Support sampledTrace in LAL. Support multiple rules with different names under the same layer of LAL script. (Optimization) Reduce the buffer size(queue) of MAL(only) metric streams. Set L1 queue size as 1/20, L2 queue size as 1/2. Support monitoring MySQL/PostgreSQL in the cluster mode. [Breaking Change] Migrate to BanyanDB v0.2.0.  Adopt new OR logical operator for,  MeasureIDs query BanyanDBProfileThreadSnapshotQueryDAO query Multiple Event conditions query Metrics query   Simplify Group check and creation Partially apply UITemplate changes Support index_only Return CompletableFuture\u0026lt;Void\u0026gt; directly from BanyanDB client Optimize data binary parse methods in *LogQueryDAO Support different indexType Support configuration for TTL and (block|segment) intervals   Elasticsearch storage: Provide system environment variable(SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS) and support specify the settings (number_of_shards/number_of_replicas) for each index individually. Elasticsearch storage: Support update index settings (number_of_shards/number_of_replicas) for the index template after rebooting. Optimize MQ Topology analysis. Use entry span\u0026rsquo;s peer from the consumer side as source service when no producer instrumentation(no cross-process reference). Refactor JDBC storage implementations to reuse logics. Fix ClassCastException in LoggingConfigWatcher. Support span attached event concept in Zipkin and SkyWalking trace query. Support span attached events on Zipkin lens UI. Force UTF-8 encoding in JsonLogHandler of kafka-fetcher-plugin. Fix max length to 512 of entity, instance and endpoint IDs in trace, log, profiling, topN tables(JDBC storages). The value was 200 by default. Add component IDs(135, 136, 137) for EventMesh server and client-side plugins. Bump up Kafka client to 2.8.1 to fix CVE-2021-38153. Remove lengthEnvVariable for Column as it never works as expected. Add LongText to support longer logs persistent as a text type in ElasticSearch, instead of a keyword, to avoid length limitation. Fix wrong system variable name SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI. It was opaenapi. Fix not-time-series model blocking OAP boots in no-init mode. Fix ShardingTopologyQueryDAO.loadServiceRelationsDetectedAtServerSide invoke backend miss parameter serviceIds. Changed system variable SW_SUPERDATASET_STORAGE_DAY_STEP to SW_STORAGE_ES_SUPER_DATASET_DAY_STEP to be consistent with other ES storage related variables. Fix ESEventQueryDAO missing metric_table boolQuery criteria. Add default entity name(_blank) if absent to avoid NPE in the decoding. This caused Can't split xxx id into 2 parts. Support dynamic config the sampling strategy in network profiling. Zipkin module support BanyanDB storage. Zipkin traces query API, sort the result set by start time by default. Enhance the cache mechanism in the metric persistent process.  This cache only worked when the metric is accessible(readable) from the database. Once the insert execution is delayed due to the scale, the cache loses efficacy. It only works for the last time update per minute, considering our 25s period. Fix ID conflicts for all JDBC storage implementations. Due to the insert delay, the JDBC storage implementation would still generate another new insert statement.   [Breaking Change] Remove core/default/enableDatabaseSession config. [Breaking Change] Add @BanyanDB.TimestampColumn to identify which column in Record is providing the timestamp(milliseconds) for BanyanDB, since BanyanDB stream requires a timestamp in milliseconds. For SQL-Database: add new column timestamp for tables profile_task_log/top_n_database_statement, requires altering this column or removing these tables before OAP starts, if bump up from previous releases. Fix Elasticsearch storage: In No-Sharding Mode, add specific analyzer to the template before index creation to avoid update index error. Internal API: remove undocumented ElasticSearch API usage and use documented one. Fix BanyanDB.ShardingKey annotation missed in the generated OAL metrics classes. Fix Elasticsearch storage: Query sortMetrics missing transform real index column name. Rename BanyanDB.ShardingKey to BanyanDB.SeriesID. Self-Observability: Add counters for metrics reading from DB or cached. Dashboard:Metrics Persistent Cache Count. Self-Observability: Fix GC Time calculation. Fix Elasticsearch storage: In No-Sharding Mode, column\u0026rsquo;s property indexOnly not applied and cannot be updated. Update the trace_id field as storage only(cannot be queried) in top_n_database_statement, top_n_cache_read_command, top_n_cache_read_command index.  UI  Fix: tab active incorrectly, when click tab space Add impala icon for impala JDBC Java agent plugin. (Webapp)Bump up snakeyaml to 1.31 for fixing CVE-2022-25857 [Breaking Change]: migrate from Spring Web to Armeria, now you should use the environment variable name SW_OAP_ADDRESS to change the OAP backend service addresses, like SW_OAP_ADDRESS=localhost:12800,localhost:12801, and use environment variable SW_SERVER_PORT to change the port. Other Spring-related configurations don\u0026rsquo;t take effect anymore. Polish the endpoint list graph. Fix styles for an adaptive height. Fix setting up a new time range after clicking the refresh button. Enhance the process topology graph to support dragging nodes. UI-template: Fix metrics calculation in general-service/mesh-service/faas-function top-list dashboard. Update MySQL dashboard to visualize collected slow SQLs. Add virtual cache dashboard. Remove responseCode fields of all OAL sources, as well as examples to avoid user\u0026rsquo;s confusion. Remove All from the endpoints selector. Enhance menu configurations to make it easier to change. Update PostgreSQL dashboard to visualize collected slow SQLs. Add Golang runtime metrics and cpu/memory used rate panels in General-Instance dashboard. Add gateway apisix menu. Query logs with the specific service ID. Bump d3-color from 3.0.1 to 3.1.0. Add Golang runtime metrics and cpu/memory used rate panels in FaaS-Instance dashboard. Revert logs on trace widget. Add a sub-menu for virtual mq. Add readRecords to metric types. Verify dashboard names for new dashboards. Associate metrics with the trace widget on dashboards. Fix configuration panel styles. Remove a un-use icon. Support labeled value on the service/instance/endpoint list widgets. Add menu for virtual MQ. Set selector props and update configuration panel styles. Add Python runtime metrics and cpu/memory utilization panels to General-Instance and Fass-Instance dashboards. Enhance the legend of metrics graph widget with the summary table. Add apache eventMesh logo file. Fix conditions for trace profiling. Fix tag keys list and duration condition. Fix typo. Fix condition logic for trace tree data. Enhance tags component to search tags with the input value. Fix topology loading style. Fix update metric processor for the readRecords and remove readSampledRecords from metrics selector. Add trace association for FAAS dashboards. Visualize attached events on the trace widget. Add HTTP/1.x metrics and HTTP req/resp body collecting tabs on the network profiling widget. Implement creating tasks ui for network profiling widget. Fix entity types for ProcessRelation. Add trace association for general service dashboards.  Documentation  Add metadata-uid setup doc about Kubernetes coordinator in the cluster management. Add a doc for adding menus to booster UI. Move general good read blogs from Agent Introduction to Academy. Add re-post for blog Scaling with Apache SkyWalking in the academy list. Add re-post for blog Diagnose Service Mesh Network Performance with eBPF in the academy list. Add Security Notice doc. Add new docs for Report Span Attached Events data collecting protocol. Add new docs for Record query protocol Update Server Agents and Compatibility for PHP agent. Add docs for profiling. Update the network profiling documentation.  All issues and pull requests are here\n","title":"9.3.0","url":"/docs/main/v9.6.0/en/changes/changes-9.3.0/"},{"content":"9.3.0 Project  Bump up the embedded swctl version in OAP Docker image.  OAP Server  Add component ID(133) for impala JDBC Java agent plugin and component ID(134) for impala server. Use prepareStatement in H2SQLExecutor#getByIDs.(No function change). Bump up snakeyaml to 1.32 for fixing CVE. Fix DurationUtils.convertToTimeBucket missed verify date format. Enhance LAL to support converting LogData to DatabaseSlowStatement. [Breaking Change] Change the LAL script format(Add layer property). Adapt ElasticSearch 8.1+, migrate from removed APIs to recommended APIs. Support monitoring MySQL slow SQLs. Support analyzing cache related spans to provide metrics and slow commands for cache services from client side Optimize virtual database, fix dynamic config watcher NPE when default value is null Remove physical index existing check and keep template existing check only to avoid meaningless retry wait in no-init mode. Make sure instance list ordered in TTL processor to avoid TTL timer never runs. Support monitoring PostgreSQL slow SQLs. [Breaking Change] Support sharding MySQL database instances and tables by Shardingsphere-Proxy. SQL-Database requires removing tables log_tag/segment_tag/zipkin_query before OAP starts, if bump up from previous releases. Fix meter functions avgHistogram, avgHistogramPercentile, avgLabeled, sumHistogram having data conflict when downsampling. Do sorting readLabeledMetricsValues result forcedly in case the storage(database) doesn\u0026rsquo;t return data consistent with the parameter list. Fix the wrong watch semantics in Kubernetes watchers, which causes heavy traffic to API server in some Kubernetes clusters, we should use Get State and Start at Most Recent semantic instead of Start at Exact because we don\u0026rsquo;t need the changing history events, see https://kubernetes.io/docs/reference/using-api/api-concepts/#semantics-for-watch. Unify query services and DAOs codes time range condition to Duration. [Breaking Change]: Remove prometheus-fetcher plugin, please use OpenTelemetry to scrape Prometheus metrics and set up SkyWalking OpenTelemetry receiver instead. BugFix: histogram metrics sent to MAL should be treated as OpenTelemetry style, not Prometheus style: (-infinity, explicit_bounds[i]] for i == 0 (explicit_bounds[i-1], explicit_bounds[i]] for 0 \u0026lt; i \u0026lt; size(explicit_bounds) (explicit_bounds[i-1], +infinity) for i == size(explicit_bounds)  Support Golang runtime metrics analysis. Add APISIX metrics monitoring Support skywalking-client-js report empty service version and page path , set default version as latest and default page path as /(root). Fix the error fetching data (/browser_app_page_pv0) : Can't split endpoint id into 2 parts. [Breaking Change] Limit the max length of trace/log/alarm tag\u0026rsquo;s key=value, set the max length of column tags in tableslog_tag/segment_tag/alarm_record_tag and column query in zipkin_query and column tag_value in tag_autocomplete to 256. SQL-Database requires altering these columns' length or removing these tables before OAP starts, if bump up from previous releases. Optimize the creation conditions of profiling task. Lazy load the Kubernetes metadata and switch from event-driven to polling. Previously we set up watchers to watch the Kubernetes metadata changes, this is perfect when there are deployments changes and SkyWalking can react to the changes in real time. However when the cluster has many events (such as in large cluster or some special Kubernetes engine like OpenShift), the requests sent from SkyWalking becomes unpredictable, i.e. SkyWalking might send massive requests to Kubernetes API server, causing heavy load to the API server. This PR switches from the watcher mechanism to polling mechanism, SkyWalking polls the metadata in a specified interval, so that the requests sent to API server is predictable (~10 requests every interval, 3 minutes), and the requests count is constant regardless of the cluster\u0026rsquo;s changes. However with this change SkyWalking can\u0026rsquo;t react to the cluster changes in time, but the delay is acceptable in our case. Optimize the query time of tasks in ProfileTaskCache. Fix metrics was put into wrong slot of the window in the alerting kernel. Support sumPerMinLabeled in MAL. Bump up jackson databind, snakeyaml, grpc dependencies. Support export Trace and Log through Kafka. Add new config initialization mechanism of module provider. This is a ModuleManager lib kernel level change. [Breaking Change] Support new records query protocol, rename the column named service_id to entity_id for support difference entity. Please re-create top_n_database_statement index/table. Remove improper self-obs metrics in JvmMetricsHandler(for Kafka channel). gRPC stream canceling code is not logged as an error when the client cancels the stream. The client cancels the stream when the pod is terminated. [Breaking Change] Change the way of loading MAL rules(support pattern). Move k8s relative MAL files into /otel-rules/k8s. [Breaking Change] Refactor service mesh protobuf definitions and split TCP-related metrics to individual definition. Add TCP{Service,ServiceInstance,ServiceRelation,ServiceInstanceRelation} sources and split TCP-related entities out from original Service,ServiceInstance,ServiceRelation,ServiceInstanceRelation. [Breaking Change] TCP-related source names are changed, fields of TCP-related sources are changed, please refer to the latest oal/tcp.oal file. Do not log error logs when failed to create ElasticSearch index because the index is created already. Add virtual MQ analysis for native traces. Support Python runtime metrics analysis. Support sampledTrace in LAL. Support multiple rules with different names under the same layer of LAL script. (Optimization) Reduce the buffer size(queue) of MAL(only) metric streams. Set L1 queue size as 1/20, L2 queue size as 1/2. Support monitoring MySQL/PostgreSQL in the cluster mode. [Breaking Change] Migrate to BanyanDB v0.2.0.  Adopt new OR logical operator for,  MeasureIDs query BanyanDBProfileThreadSnapshotQueryDAO query Multiple Event conditions query Metrics query   Simplify Group check and creation Partially apply UITemplate changes Support index_only Return CompletableFuture\u0026lt;Void\u0026gt; directly from BanyanDB client Optimize data binary parse methods in *LogQueryDAO Support different indexType Support configuration for TTL and (block|segment) intervals   Elasticsearch storage: Provide system environment variable(SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS) and support specify the settings (number_of_shards/number_of_replicas) for each index individually. Elasticsearch storage: Support update index settings (number_of_shards/number_of_replicas) for the index template after rebooting. Optimize MQ Topology analysis. Use entry span\u0026rsquo;s peer from the consumer side as source service when no producer instrumentation(no cross-process reference). Refactor JDBC storage implementations to reuse logics. Fix ClassCastException in LoggingConfigWatcher. Support span attached event concept in Zipkin and SkyWalking trace query. Support span attached events on Zipkin lens UI. Force UTF-8 encoding in JsonLogHandler of kafka-fetcher-plugin. Fix max length to 512 of entity, instance and endpoint IDs in trace, log, profiling, topN tables(JDBC storages). The value was 200 by default. Add component IDs(135, 136, 137) for EventMesh server and client-side plugins. Bump up Kafka client to 2.8.1 to fix CVE-2021-38153. Remove lengthEnvVariable for Column as it never works as expected. Add LongText to support longer logs persistent as a text type in ElasticSearch, instead of a keyword, to avoid length limitation. Fix wrong system variable name SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI. It was opaenapi. Fix not-time-series model blocking OAP boots in no-init mode. Fix ShardingTopologyQueryDAO.loadServiceRelationsDetectedAtServerSide invoke backend miss parameter serviceIds. Changed system variable SW_SUPERDATASET_STORAGE_DAY_STEP to SW_STORAGE_ES_SUPER_DATASET_DAY_STEP to be consistent with other ES storage related variables. Fix ESEventQueryDAO missing metric_table boolQuery criteria. Add default entity name(_blank) if absent to avoid NPE in the decoding. This caused Can't split xxx id into 2 parts. Support dynamic config the sampling strategy in network profiling. Zipkin module support BanyanDB storage. Zipkin traces query API, sort the result set by start time by default. Enhance the cache mechanism in the metric persistent process.  This cache only worked when the metric is accessible(readable) from the database. Once the insert execution is delayed due to the scale, the cache loses efficacy. It only works for the last time update per minute, considering our 25s period. Fix ID conflicts for all JDBC storage implementations. Due to the insert delay, the JDBC storage implementation would still generate another new insert statement.   [Breaking Change] Remove core/default/enableDatabaseSession config. [Breaking Change] Add @BanyanDB.TimestampColumn to identify which column in Record is providing the timestamp(milliseconds) for BanyanDB, since BanyanDB stream requires a timestamp in milliseconds. For SQL-Database: add new column timestamp for tables profile_task_log/top_n_database_statement, requires altering this column or removing these tables before OAP starts, if bump up from previous releases. Fix Elasticsearch storage: In No-Sharding Mode, add specific analyzer to the template before index creation to avoid update index error. Internal API: remove undocumented ElasticSearch API usage and use documented one. Fix BanyanDB.ShardingKey annotation missed in the generated OAL metrics classes. Fix Elasticsearch storage: Query sortMetrics missing transform real index column name. Rename BanyanDB.ShardingKey to BanyanDB.SeriesID. Self-Observability: Add counters for metrics reading from DB or cached. Dashboard:Metrics Persistent Cache Count. Self-Observability: Fix GC Time calculation. Fix Elasticsearch storage: In No-Sharding Mode, column\u0026rsquo;s property indexOnly not applied and cannot be updated. Update the trace_id field as storage only(cannot be queried) in top_n_database_statement, top_n_cache_read_command, top_n_cache_read_command index.  UI  Fix: tab active incorrectly, when click tab space Add impala icon for impala JDBC Java agent plugin. (Webapp)Bump up snakeyaml to 1.31 for fixing CVE-2022-25857 [Breaking Change]: migrate from Spring Web to Armeria, now you should use the environment variable name SW_OAP_ADDRESS to change the OAP backend service addresses, like SW_OAP_ADDRESS=localhost:12800,localhost:12801, and use environment variable SW_SERVER_PORT to change the port. Other Spring-related configurations don\u0026rsquo;t take effect anymore. Polish the endpoint list graph. Fix styles for an adaptive height. Fix setting up a new time range after clicking the refresh button. Enhance the process topology graph to support dragging nodes. UI-template: Fix metrics calculation in general-service/mesh-service/faas-function top-list dashboard. Update MySQL dashboard to visualize collected slow SQLs. Add virtual cache dashboard. Remove responseCode fields of all OAL sources, as well as examples to avoid user\u0026rsquo;s confusion. Remove All from the endpoints selector. Enhance menu configurations to make it easier to change. Update PostgreSQL dashboard to visualize collected slow SQLs. Add Golang runtime metrics and cpu/memory used rate panels in General-Instance dashboard. Add gateway apisix menu. Query logs with the specific service ID. Bump d3-color from 3.0.1 to 3.1.0. Add Golang runtime metrics and cpu/memory used rate panels in FaaS-Instance dashboard. Revert logs on trace widget. Add a sub-menu for virtual mq. Add readRecords to metric types. Verify dashboard names for new dashboards. Associate metrics with the trace widget on dashboards. Fix configuration panel styles. Remove a un-use icon. Support labeled value on the service/instance/endpoint list widgets. Add menu for virtual MQ. Set selector props and update configuration panel styles. Add Python runtime metrics and cpu/memory utilization panels to General-Instance and Fass-Instance dashboards. Enhance the legend of metrics graph widget with the summary table. Add apache eventMesh logo file. Fix conditions for trace profiling. Fix tag keys list and duration condition. Fix typo. Fix condition logic for trace tree data. Enhance tags component to search tags with the input value. Fix topology loading style. Fix update metric processor for the readRecords and remove readSampledRecords from metrics selector. Add trace association for FAAS dashboards. Visualize attached events on the trace widget. Add HTTP/1.x metrics and HTTP req/resp body collecting tabs on the network profiling widget. Implement creating tasks ui for network profiling widget. Fix entity types for ProcessRelation. Add trace association for general service dashboards.  Documentation  Add metadata-uid setup doc about Kubernetes coordinator in the cluster management. Add a doc for adding menus to booster UI. Move general good read blogs from Agent Introduction to Academy. Add re-post for blog Scaling with Apache SkyWalking in the academy list. Add re-post for blog Diagnose Service Mesh Network Performance with eBPF in the academy list. Add Security Notice doc. Add new docs for Report Span Attached Events data collecting protocol. Add new docs for Record query protocol Update Server Agents and Compatibility for PHP agent. Add docs for profiling. Update the network profiling documentation.  All issues and pull requests are here\n","title":"9.3.0","url":"/docs/main/v9.7.0/en/changes/changes-9.3.0/"},{"content":"9.4.0 Project  Bump up Zipkin and Zipkin lens UI dependency to 2.24.0. Bump up Apache parent pom version to 29. Bump up Armeria version to 1.21.0. Clean up maven pom.xmls. Bump up Java version to 11. Bump up snakeyaml to 2.0.  OAP Server  Add ServerStatusService in the core module to provide a new way to expose booting status to other modules. Adds Micrometer as a new component.(ID=141) Refactor session cache in MetricsPersistentWorker. Cache enhancement - don\u0026rsquo;t read new metrics from database in minute dimensionality.   // When // (1) the time bucket of the server's latest stability status is provided // 1.1 the OAP has booted successfully // 1.2 the current dimensionality is in minute. // 1.3 the OAP cluster is rebalanced due to scaling // (2) the metrics are from the time after the timeOfLatestStabilitySts // (3) the metrics don't exist in the cache // the kernel should NOT try to load it from the database. // // Notice, about condition (2), // for the specific minute of booted successfully, the metrics are expected to load from database when // it doesn't exist in the cache.  Remove the offset of metric session timeout according to worker creation sequence. Correct MetricsExtension annotations declarations in manual entities. Support component IDs' priority in process relation metrics. Remove abandon logic in MergableBufferedData, which caused unexpected no-update. Fix miss set LastUpdateTimestamp that caused the metrics session to expire. Rename MAL rule spring-sleuth.yaml to spring-micrometer.yaml. Fix memory leak in Zipkin API. Remove the dependency of refresh_interval of ElasticSearch indices from elasticsearch/flushInterval config. Now, it uses core/persistentPeriod + 5s as refresh_interval for all indices instead. Change elasticsearch/flushInterval to 5s(was 15s). Optimize flushInterval of ElasticSearch BulkProcessor to avoid extra periodical flush in the continuous bulk streams. An unexpected dot is added when exp is a pure metric name and expPrefix != null. Support monitoring MariaDB. Remove measure/stream specific interval settings in BanyanDB. Add global-specific settings used to override global configurations (e.g segmentIntervalDays, blockIntervalHours) in BanyanDB. Use TTL-driven interval settings for the measure-default group in BanyanDB. Fix wrong group of non time-relative metadata in BanyanDB. Refactor StorageData#id to the new StorageID object from a String type. Support multiple component IDs in the service topology level. Add ElasticSearch.Keyword annotation to declare the target field type as keyword. [Breaking Change] Column component_id of service_relation_client_side and service_relation_server_side have been replaced by component_ids. Support priority definition in the component-libraries.yml. Enhance service topology query. When there are multiple components detected from the server side, the component type of the node would be determined by the priority, which was random in the previous release. Remove component_id from service_instance_relation_client_side and service_instance_relation_server_side. Make the satellite E2E test more stable. Add Istio 1.16 to test matrix. Register ValueColumn as Tag for Record in BanyanDB storage plugin. Bump up Netty to 4.1.86. Remove unnecessary additional columns when storage is in logical sharding mode. The cluster coordinator support watch mechanism for notifying RemoteClientManager and ServerStatusService. Fix ServiceMeshServiceDispatcher overwrite ServiceDispatcher debug file when open SW_OAL_ENGINE_DEBUG. Use groupBy and in operators to optimize topology query for BanyanDB storage plugin. Support server status watcher for MetricsPersistentWorker to check the metrics whether required initialization. Fix the meter value are not correct when using sumPerMinLabeld or sumHistogramPercentile MAL function. Fix cannot display attached events when using Zipkin Lens UI query traces. Remove time_bucket for both Stream and Measure kinds in BanyanDB plugin. Merge TIME_BUCKET of Metrics and Record into StorageData. Support no layer in the listServices query. Fix time_bucket of ServiceTraffic not set correctly in slowSql of MAL. Correct the TopN record query DAO of BanyanDB. Tweak interval settings of BanyanDB. Support monitoring AWS Cloud EKS. Bump BanyanDB Java client to 0.3.0-rc1. Remove id tag from measures. Add Banyandb.MeasureField to mark a column as a BanyanDB Measure field. Add BanyanDB.StoreIDTag to store a process\u0026rsquo;s id for searching. [Breaking Change] The supported version of ShardingSphere-Proxy is upgraded from 5.1.2 to 5.3.1. Due to the changes of ShardingSphere\u0026rsquo;s API, versions before 5.3.1 are not compatible. Add the eBPF network profiling E2E Test in the per storage. Fix TCP service instances are lack of instance properties like pod and namespace, which causes Pod log not to work for TCP workloads. Add Python HBase happybase module component ID(94). Fix gRPC alarm cannot update settings from dynamic configuration source. Add batchOfBytes configuration to limit the size of bulk flush. Add Python Websocket module component ID(7018). [Optional] Optimize single trace query performance by customizing routing in ElasticSearch. SkyWalking trace segments and Zipkin spans are using trace ID for routing. This is OFF by default, controlled by storage/elasticsearch/enableCustomRouting. Enhance OAP HTTP server to support HTTPS Remove handler scan in otel receiver, manual initialization instead Add aws-firehose-receiver to support collecting AWS CloudWatch metric(OpenTelemetry format). Notice, no HTTPS/TLS setup support. By following AWS Firehose request, it uses proxy request (https://... instead of /aws/firehose/metrics), there must be a proxy(Nginx, Envoy, etc.). Avoid Antlr dependencies' versions might be different in compile time and runtime. Now PrometheusMetricConverter#escapedName also support converting / to _. Add missing TCP throughput metrics. Refactor @Column annotation, swap Column#name and ElasticSearch.Column#columnAlias and rename ElasticSearch.Column#columnAlias to ElasticSearch.Column#legacyName. Add Python HTTPX module component ID(7019). Migrate tests from junit 4 to junit 5. Refactor http-based alarm plugins and extract common logic to HttpAlarmCallback. Support Amazon Simple Storage Service (Amazon S3) metrics monitoring Support process Sum metrics with AGGREGATION_TEMPORALITY_DELTA case Support Amazon DynamoDB monitoring. Support prometheus HTTP API and promQL. Scope in the Entity of Metrics query v1 protocol is not required and automatical correction. The scope is determined based on the metric itself. Add explicit ReadTimeout for ConsulConfigurationWatcher to avoid IllegalArgumentException: Cache watchInterval=10sec \u0026gt;= networkClientReadTimeout=10000ms. Fix DurationUtils.getDurationPoints exceed, when startTimeBucket equals endTimeBucket. Support process OpenTelemetry ExponentialHistogram metrics Add FreeRedis component ID(3018).  UI  Add Zipkin Lens UI to webapp, and proxy it to context path /zipkin. Migrate the build tool from vue cli to Vite4. Fix Instance Relation and Endpoint Relation dashboards show up. Add Micrometer icon. Update MySQL UI to support MariaDB. Add AWS menu for supporting AWS monitoring. Add missing FastAPI logo. Update the log details page to support the formatted display of JSON content. Fix build config. Avoid being unable to drag process nodes for the first time. Add node folder into ignore list. Add ElPopconfirm to component types. Add an iframe widget for zipkin UI. Optimize graph tooltips to make them more friendly. Bump json5 from 1.0.1 to 1.0.2. Add websockets icon. Implement independent mode for widgets. Bump http-cache-semantics from 4.1.0 to 4.1.1. Update menus for OpenFunction. Add auto fresh to widgets independent mode. Fix: clear trace ID on the Log and Trace widgets after using association. Fix: reset duration for query conditions after time range changes. Add AWS S3 menu. Refactor: optimize side bar component to make it more friendly. Fix: remove duplicate popup message for query result. Add logo for HTTPX. Refactor: optimize the attached events visualization in the trace widget. Update BanyanDB client to 0.3.1. Add AWS DynamoDB menu. Fix: add auto period to the independent mode for widgets. Optimize menus and add Windows monitoring menu. Add a calculation for the cpm5dAvg. add a cpm5d calculation. Fix data processing error in the eBPF profiling widget. Support for double quotes in SlowSQL statements. Fix: the wrong position of the menu when clicking the topology node.  Documentation  Remove Spring Sleuth docs, and add Spring MicroMeter Observations Analysis with the latest Java agent side enhancement. Update monitoring MySQL document to add the MariaDB part. Reorganize the protocols docs to a more clear API docs. Add documentation about replacing Zipkin server with SkyWalking OAP. Add Lens UI relative docs in Zipkin trace section. Add Profiling APIs. Fix backend telemetry doc and so11y dashboard doc as the OAP Prometheus fetcher was removed since 9.3.0  All issues and pull requests are here\n","title":"9.4.0","url":"/docs/main/latest/en/changes/changes-9.4.0/"},{"content":"9.4.0 Project  Bump up Zipkin and Zipkin lens UI dependency to 2.24.0. Bump up Apache parent pom version to 29. Bump up Armeria version to 1.21.0. Clean up maven pom.xmls. Bump up Java version to 11. Bump up snakeyaml to 2.0.  OAP Server  Add ServerStatusService in the core module to provide a new way to expose booting status to other modules. Adds Micrometer as a new component.(ID=141) Refactor session cache in MetricsPersistentWorker. Cache enhancement - don\u0026rsquo;t read new metrics from database in minute dimensionality.   // When // (1) the time bucket of the server's latest stability status is provided // 1.1 the OAP has booted successfully // 1.2 the current dimensionality is in minute. // 1.3 the OAP cluster is rebalanced due to scaling // (2) the metrics are from the time after the timeOfLatestStabilitySts // (3) the metrics don't exist in the cache // the kernel should NOT try to load it from the database. // // Notice, about condition (2), // for the specific minute of booted successfully, the metrics are expected to load from database when // it doesn't exist in the cache.  Remove the offset of metric session timeout according to worker creation sequence. Correct MetricsExtension annotations declarations in manual entities. Support component IDs' priority in process relation metrics. Remove abandon logic in MergableBufferedData, which caused unexpected no-update. Fix miss set LastUpdateTimestamp that caused the metrics session to expire. Rename MAL rule spring-sleuth.yaml to spring-micrometer.yaml. Fix memory leak in Zipkin API. Remove the dependency of refresh_interval of ElasticSearch indices from elasticsearch/flushInterval config. Now, it uses core/persistentPeriod + 5s as refresh_interval for all indices instead. Change elasticsearch/flushInterval to 5s(was 15s). Optimize flushInterval of ElasticSearch BulkProcessor to avoid extra periodical flush in the continuous bulk streams. An unexpected dot is added when exp is a pure metric name and expPrefix != null. Support monitoring MariaDB. Remove measure/stream specific interval settings in BanyanDB. Add global-specific settings used to override global configurations (e.g segmentIntervalDays, blockIntervalHours) in BanyanDB. Use TTL-driven interval settings for the measure-default group in BanyanDB. Fix wrong group of non time-relative metadata in BanyanDB. Refactor StorageData#id to the new StorageID object from a String type. Support multiple component IDs in the service topology level. Add ElasticSearch.Keyword annotation to declare the target field type as keyword. [Breaking Change] Column component_id of service_relation_client_side and service_relation_server_side have been replaced by component_ids. Support priority definition in the component-libraries.yml. Enhance service topology query. When there are multiple components detected from the server side, the component type of the node would be determined by the priority, which was random in the previous release. Remove component_id from service_instance_relation_client_side and service_instance_relation_server_side. Make the satellite E2E test more stable. Add Istio 1.16 to test matrix. Register ValueColumn as Tag for Record in BanyanDB storage plugin. Bump up Netty to 4.1.86. Remove unnecessary additional columns when storage is in logical sharding mode. The cluster coordinator support watch mechanism for notifying RemoteClientManager and ServerStatusService. Fix ServiceMeshServiceDispatcher overwrite ServiceDispatcher debug file when open SW_OAL_ENGINE_DEBUG. Use groupBy and in operators to optimize topology query for BanyanDB storage plugin. Support server status watcher for MetricsPersistentWorker to check the metrics whether required initialization. Fix the meter value are not correct when using sumPerMinLabeld or sumHistogramPercentile MAL function. Fix cannot display attached events when using Zipkin Lens UI query traces. Remove time_bucket for both Stream and Measure kinds in BanyanDB plugin. Merge TIME_BUCKET of Metrics and Record into StorageData. Support no layer in the listServices query. Fix time_bucket of ServiceTraffic not set correctly in slowSql of MAL. Correct the TopN record query DAO of BanyanDB. Tweak interval settings of BanyanDB. Support monitoring AWS Cloud EKS. Bump BanyanDB Java client to 0.3.0-rc1. Remove id tag from measures. Add Banyandb.MeasureField to mark a column as a BanyanDB Measure field. Add BanyanDB.StoreIDTag to store a process\u0026rsquo;s id for searching. [Breaking Change] The supported version of ShardingSphere-Proxy is upgraded from 5.1.2 to 5.3.1. Due to the changes of ShardingSphere\u0026rsquo;s API, versions before 5.3.1 are not compatible. Add the eBPF network profiling E2E Test in the per storage. Fix TCP service instances are lack of instance properties like pod and namespace, which causes Pod log not to work for TCP workloads. Add Python HBase happybase module component ID(94). Fix gRPC alarm cannot update settings from dynamic configuration source. Add batchOfBytes configuration to limit the size of bulk flush. Add Python Websocket module component ID(7018). [Optional] Optimize single trace query performance by customizing routing in ElasticSearch. SkyWalking trace segments and Zipkin spans are using trace ID for routing. This is OFF by default, controlled by storage/elasticsearch/enableCustomRouting. Enhance OAP HTTP server to support HTTPS Remove handler scan in otel receiver, manual initialization instead Add aws-firehose-receiver to support collecting AWS CloudWatch metric(OpenTelemetry format). Notice, no HTTPS/TLS setup support. By following AWS Firehose request, it uses proxy request (https://... instead of /aws/firehose/metrics), there must be a proxy(Nginx, Envoy, etc.). Avoid Antlr dependencies' versions might be different in compile time and runtime. Now PrometheusMetricConverter#escapedName also support converting / to _. Add missing TCP throughput metrics. Refactor @Column annotation, swap Column#name and ElasticSearch.Column#columnAlias and rename ElasticSearch.Column#columnAlias to ElasticSearch.Column#legacyName. Add Python HTTPX module component ID(7019). Migrate tests from junit 4 to junit 5. Refactor http-based alarm plugins and extract common logic to HttpAlarmCallback. Support Amazon Simple Storage Service (Amazon S3) metrics monitoring Support process Sum metrics with AGGREGATION_TEMPORALITY_DELTA case Support Amazon DynamoDB monitoring. Support prometheus HTTP API and promQL. Scope in the Entity of Metrics query v1 protocol is not required and automatical correction. The scope is determined based on the metric itself. Add explicit ReadTimeout for ConsulConfigurationWatcher to avoid IllegalArgumentException: Cache watchInterval=10sec \u0026gt;= networkClientReadTimeout=10000ms. Fix DurationUtils.getDurationPoints exceed, when startTimeBucket equals endTimeBucket. Support process OpenTelemetry ExponentialHistogram metrics Add FreeRedis component ID(3018).  UI  Add Zipkin Lens UI to webapp, and proxy it to context path /zipkin. Migrate the build tool from vue cli to Vite4. Fix Instance Relation and Endpoint Relation dashboards show up. Add Micrometer icon. Update MySQL UI to support MariaDB. Add AWS menu for supporting AWS monitoring. Add missing FastAPI logo. Update the log details page to support the formatted display of JSON content. Fix build config. Avoid being unable to drag process nodes for the first time. Add node folder into ignore list. Add ElPopconfirm to component types. Add an iframe widget for zipkin UI. Optimize graph tooltips to make them more friendly. Bump json5 from 1.0.1 to 1.0.2. Add websockets icon. Implement independent mode for widgets. Bump http-cache-semantics from 4.1.0 to 4.1.1. Update menus for OpenFunction. Add auto fresh to widgets independent mode. Fix: clear trace ID on the Log and Trace widgets after using association. Fix: reset duration for query conditions after time range changes. Add AWS S3 menu. Refactor: optimize side bar component to make it more friendly. Fix: remove duplicate popup message for query result. Add logo for HTTPX. Refactor: optimize the attached events visualization in the trace widget. Update BanyanDB client to 0.3.1. Add AWS DynamoDB menu. Fix: add auto period to the independent mode for widgets. Optimize menus and add Windows monitoring menu. Add a calculation for the cpm5dAvg. add a cpm5d calculation. Fix data processing error in the eBPF profiling widget. Support for double quotes in SlowSQL statements. Fix: the wrong position of the menu when clicking the topology node.  Documentation  Remove Spring Sleuth docs, and add Spring MicroMeter Observations Analysis with the latest Java agent side enhancement. Update monitoring MySQL document to add the MariaDB part. Reorganize the protocols docs to a more clear API docs. Add documentation about replacing Zipkin server with SkyWalking OAP. Add Lens UI relative docs in Zipkin trace section. Add Profiling APIs. Fix backend telemetry doc and so11y dashboard doc as the OAP Prometheus fetcher was removed since 9.3.0  All issues and pull requests are here\n","title":"9.4.0","url":"/docs/main/next/en/changes/changes-9.4.0/"},{"content":"9.4.0 Project  Bump up Zipkin and Zipkin lens UI dependency to 2.24.0. Bump up Apache parent pom version to 29. Bump up Armeria version to 1.21.0. Clean up maven pom.xmls. Bump up Java version to 11. Bump up snakeyaml to 2.0.  OAP Server  Add ServerStatusService in the core module to provide a new way to expose booting status to other modules. Adds Micrometer as a new component.(ID=141) Refactor session cache in MetricsPersistentWorker. Cache enhancement - don\u0026rsquo;t read new metrics from database in minute dimensionality.   // When // (1) the time bucket of the server's latest stability status is provided // 1.1 the OAP has booted successfully // 1.2 the current dimensionality is in minute. // 1.3 the OAP cluster is rebalanced due to scaling // (2) the metrics are from the time after the timeOfLatestStabilitySts // (3) the metrics don't exist in the cache // the kernel should NOT try to load it from the database. // // Notice, about condition (2), // for the specific minute of booted successfully, the metrics are expected to load from database when // it doesn't exist in the cache.  Remove the offset of metric session timeout according to worker creation sequence. Correct MetricsExtension annotations declarations in manual entities. Support component IDs' priority in process relation metrics. Remove abandon logic in MergableBufferedData, which caused unexpected no-update. Fix miss set LastUpdateTimestamp that caused the metrics session to expire. Rename MAL rule spring-sleuth.yaml to spring-micrometer.yaml. Fix memory leak in Zipkin API. Remove the dependency of refresh_interval of ElasticSearch indices from elasticsearch/flushInterval config. Now, it uses core/persistentPeriod + 5s as refresh_interval for all indices instead. Change elasticsearch/flushInterval to 5s(was 15s). Optimize flushInterval of ElasticSearch BulkProcessor to avoid extra periodical flush in the continuous bulk streams. An unexpected dot is added when exp is a pure metric name and expPrefix != null. Support monitoring MariaDB. Remove measure/stream specific interval settings in BanyanDB. Add global-specific settings used to override global configurations (e.g segmentIntervalDays, blockIntervalHours) in BanyanDB. Use TTL-driven interval settings for the measure-default group in BanyanDB. Fix wrong group of non time-relative metadata in BanyanDB. Refactor StorageData#id to the new StorageID object from a String type. Support multiple component IDs in the service topology level. Add ElasticSearch.Keyword annotation to declare the target field type as keyword. [Breaking Change] Column component_id of service_relation_client_side and service_relation_server_side have been replaced by component_ids. Support priority definition in the component-libraries.yml. Enhance service topology query. When there are multiple components detected from the server side, the component type of the node would be determined by the priority, which was random in the previous release. Remove component_id from service_instance_relation_client_side and service_instance_relation_server_side. Make the satellite E2E test more stable. Add Istio 1.16 to test matrix. Register ValueColumn as Tag for Record in BanyanDB storage plugin. Bump up Netty to 4.1.86. Remove unnecessary additional columns when storage is in logical sharding mode. The cluster coordinator support watch mechanism for notifying RemoteClientManager and ServerStatusService. Fix ServiceMeshServiceDispatcher overwrite ServiceDispatcher debug file when open SW_OAL_ENGINE_DEBUG. Use groupBy and in operators to optimize topology query for BanyanDB storage plugin. Support server status watcher for MetricsPersistentWorker to check the metrics whether required initialization. Fix the meter value are not correct when using sumPerMinLabeld or sumHistogramPercentile MAL function. Fix cannot display attached events when using Zipkin Lens UI query traces. Remove time_bucket for both Stream and Measure kinds in BanyanDB plugin. Merge TIME_BUCKET of Metrics and Record into StorageData. Support no layer in the listServices query. Fix time_bucket of ServiceTraffic not set correctly in slowSql of MAL. Correct the TopN record query DAO of BanyanDB. Tweak interval settings of BanyanDB. Support monitoring AWS Cloud EKS. Bump BanyanDB Java client to 0.3.0-rc1. Remove id tag from measures. Add Banyandb.MeasureField to mark a column as a BanyanDB Measure field. Add BanyanDB.StoreIDTag to store a process\u0026rsquo;s id for searching. [Breaking Change] The supported version of ShardingSphere-Proxy is upgraded from 5.1.2 to 5.3.1. Due to the changes of ShardingSphere\u0026rsquo;s API, versions before 5.3.1 are not compatible. Add the eBPF network profiling E2E Test in the per storage. Fix TCP service instances are lack of instance properties like pod and namespace, which causes Pod log not to work for TCP workloads. Add Python HBase happybase module component ID(94). Fix gRPC alarm cannot update settings from dynamic configuration source. Add batchOfBytes configuration to limit the size of bulk flush. Add Python Websocket module component ID(7018). [Optional] Optimize single trace query performance by customizing routing in ElasticSearch. SkyWalking trace segments and Zipkin spans are using trace ID for routing. This is OFF by default, controlled by storage/elasticsearch/enableCustomRouting. Enhance OAP HTTP server to support HTTPS Remove handler scan in otel receiver, manual initialization instead Add aws-firehose-receiver to support collecting AWS CloudWatch metric(OpenTelemetry format). Notice, no HTTPS/TLS setup support. By following AWS Firehose request, it uses proxy request (https://... instead of /aws/firehose/metrics), there must be a proxy(Nginx, Envoy, etc.). Avoid Antlr dependencies' versions might be different in compile time and runtime. Now PrometheusMetricConverter#escapedName also support converting / to _. Add missing TCP throughput metrics. Refactor @Column annotation, swap Column#name and ElasticSearch.Column#columnAlias and rename ElasticSearch.Column#columnAlias to ElasticSearch.Column#legacyName. Add Python HTTPX module component ID(7019). Migrate tests from junit 4 to junit 5. Refactor http-based alarm plugins and extract common logic to HttpAlarmCallback. Support Amazon Simple Storage Service (Amazon S3) metrics monitoring Support process Sum metrics with AGGREGATION_TEMPORALITY_DELTA case Support Amazon DynamoDB monitoring. Support prometheus HTTP API and promQL. Scope in the Entity of Metrics query v1 protocol is not required and automatical correction. The scope is determined based on the metric itself. Add explicit ReadTimeout for ConsulConfigurationWatcher to avoid IllegalArgumentException: Cache watchInterval=10sec \u0026gt;= networkClientReadTimeout=10000ms. Fix DurationUtils.getDurationPoints exceed, when startTimeBucket equals endTimeBucket. Support process OpenTelemetry ExponentialHistogram metrics Add FreeRedis component ID(3018).  UI  Add Zipkin Lens UI to webapp, and proxy it to context path /zipkin. Migrate the build tool from vue cli to Vite4. Fix Instance Relation and Endpoint Relation dashboards show up. Add Micrometer icon. Update MySQL UI to support MariaDB. Add AWS menu for supporting AWS monitoring. Add missing FastAPI logo. Update the log details page to support the formatted display of JSON content. Fix build config. Avoid being unable to drag process nodes for the first time. Add node folder into ignore list. Add ElPopconfirm to component types. Add an iframe widget for zipkin UI. Optimize graph tooltips to make them more friendly. Bump json5 from 1.0.1 to 1.0.2. Add websockets icon. Implement independent mode for widgets. Bump http-cache-semantics from 4.1.0 to 4.1.1. Update menus for OpenFunction. Add auto fresh to widgets independent mode. Fix: clear trace ID on the Log and Trace widgets after using association. Fix: reset duration for query conditions after time range changes. Add AWS S3 menu. Refactor: optimize side bar component to make it more friendly. Fix: remove duplicate popup message for query result. Add logo for HTTPX. Refactor: optimize the attached events visualization in the trace widget. Update BanyanDB client to 0.3.1. Add AWS DynamoDB menu. Fix: add auto period to the independent mode for widgets. Optimize menus and add Windows monitoring menu. Add a calculation for the cpm5dAvg. add a cpm5d calculation. Fix data processing error in the eBPF profiling widget. Support for double quotes in SlowSQL statements. Fix: the wrong position of the menu when clicking the topology node.  Documentation  Remove Spring Sleuth docs, and add Spring MicroMeter Observations Analysis with the latest Java agent side enhancement. Update monitoring MySQL document to add the MariaDB part. Reorganize the protocols docs to a more clear API docs. Add documentation about replacing Zipkin server with SkyWalking OAP. Add Lens UI relative docs in Zipkin trace section. Add Profiling APIs. Fix backend telemetry doc and so11y dashboard doc as the OAP Prometheus fetcher was removed since 9.3.0  All issues and pull requests are here\n","title":"9.4.0","url":"/docs/main/v9.4.0/en/changes/changes/"},{"content":"9.4.0 Project  Bump up Zipkin and Zipkin lens UI dependency to 2.24.0. Bump up Apache parent pom version to 29. Bump up Armeria version to 1.21.0. Clean up maven pom.xmls. Bump up Java version to 11. Bump up snakeyaml to 2.0.  OAP Server  Add ServerStatusService in the core module to provide a new way to expose booting status to other modules. Adds Micrometer as a new component.(ID=141) Refactor session cache in MetricsPersistentWorker. Cache enhancement - don\u0026rsquo;t read new metrics from database in minute dimensionality.   // When // (1) the time bucket of the server's latest stability status is provided // 1.1 the OAP has booted successfully // 1.2 the current dimensionality is in minute. // 1.3 the OAP cluster is rebalanced due to scaling // (2) the metrics are from the time after the timeOfLatestStabilitySts // (3) the metrics don't exist in the cache // the kernel should NOT try to load it from the database. // // Notice, about condition (2), // for the specific minute of booted successfully, the metrics are expected to load from database when // it doesn't exist in the cache.  Remove the offset of metric session timeout according to worker creation sequence. Correct MetricsExtension annotations declarations in manual entities. Support component IDs' priority in process relation metrics. Remove abandon logic in MergableBufferedData, which caused unexpected no-update. Fix miss set LastUpdateTimestamp that caused the metrics session to expire. Rename MAL rule spring-sleuth.yaml to spring-micrometer.yaml. Fix memory leak in Zipkin API. Remove the dependency of refresh_interval of ElasticSearch indices from elasticsearch/flushInterval config. Now, it uses core/persistentPeriod + 5s as refresh_interval for all indices instead. Change elasticsearch/flushInterval to 5s(was 15s). Optimize flushInterval of ElasticSearch BulkProcessor to avoid extra periodical flush in the continuous bulk streams. An unexpected dot is added when exp is a pure metric name and expPrefix != null. Support monitoring MariaDB. Remove measure/stream specific interval settings in BanyanDB. Add global-specific settings used to override global configurations (e.g segmentIntervalDays, blockIntervalHours) in BanyanDB. Use TTL-driven interval settings for the measure-default group in BanyanDB. Fix wrong group of non time-relative metadata in BanyanDB. Refactor StorageData#id to the new StorageID object from a String type. Support multiple component IDs in the service topology level. Add ElasticSearch.Keyword annotation to declare the target field type as keyword. [Breaking Change] Column component_id of service_relation_client_side and service_relation_server_side have been replaced by component_ids. Support priority definition in the component-libraries.yml. Enhance service topology query. When there are multiple components detected from the server side, the component type of the node would be determined by the priority, which was random in the previous release. Remove component_id from service_instance_relation_client_side and service_instance_relation_server_side. Make the satellite E2E test more stable. Add Istio 1.16 to test matrix. Register ValueColumn as Tag for Record in BanyanDB storage plugin. Bump up Netty to 4.1.86. Remove unnecessary additional columns when storage is in logical sharding mode. The cluster coordinator support watch mechanism for notifying RemoteClientManager and ServerStatusService. Fix ServiceMeshServiceDispatcher overwrite ServiceDispatcher debug file when open SW_OAL_ENGINE_DEBUG. Use groupBy and in operators to optimize topology query for BanyanDB storage plugin. Support server status watcher for MetricsPersistentWorker to check the metrics whether required initialization. Fix the meter value are not correct when using sumPerMinLabeld or sumHistogramPercentile MAL function. Fix cannot display attached events when using Zipkin Lens UI query traces. Remove time_bucket for both Stream and Measure kinds in BanyanDB plugin. Merge TIME_BUCKET of Metrics and Record into StorageData. Support no layer in the listServices query. Fix time_bucket of ServiceTraffic not set correctly in slowSql of MAL. Correct the TopN record query DAO of BanyanDB. Tweak interval settings of BanyanDB. Support monitoring AWS Cloud EKS. Bump BanyanDB Java client to 0.3.0-rc1. Remove id tag from measures. Add Banyandb.MeasureField to mark a column as a BanyanDB Measure field. Add BanyanDB.StoreIDTag to store a process\u0026rsquo;s id for searching. [Breaking Change] The supported version of ShardingSphere-Proxy is upgraded from 5.1.2 to 5.3.1. Due to the changes of ShardingSphere\u0026rsquo;s API, versions before 5.3.1 are not compatible. Add the eBPF network profiling E2E Test in the per storage. Fix TCP service instances are lack of instance properties like pod and namespace, which causes Pod log not to work for TCP workloads. Add Python HBase happybase module component ID(94). Fix gRPC alarm cannot update settings from dynamic configuration source. Add batchOfBytes configuration to limit the size of bulk flush. Add Python Websocket module component ID(7018). [Optional] Optimize single trace query performance by customizing routing in ElasticSearch. SkyWalking trace segments and Zipkin spans are using trace ID for routing. This is OFF by default, controlled by storage/elasticsearch/enableCustomRouting. Enhance OAP HTTP server to support HTTPS Remove handler scan in otel receiver, manual initialization instead Add aws-firehose-receiver to support collecting AWS CloudWatch metric(OpenTelemetry format). Notice, no HTTPS/TLS setup support. By following AWS Firehose request, it uses proxy request (https://... instead of /aws/firehose/metrics), there must be a proxy(Nginx, Envoy, etc.). Avoid Antlr dependencies' versions might be different in compile time and runtime. Now PrometheusMetricConverter#escapedName also support converting / to _. Add missing TCP throughput metrics. Refactor @Column annotation, swap Column#name and ElasticSearch.Column#columnAlias and rename ElasticSearch.Column#columnAlias to ElasticSearch.Column#legacyName. Add Python HTTPX module component ID(7019). Migrate tests from junit 4 to junit 5. Refactor http-based alarm plugins and extract common logic to HttpAlarmCallback. Support Amazon Simple Storage Service (Amazon S3) metrics monitoring Support process Sum metrics with AGGREGATION_TEMPORALITY_DELTA case Support Amazon DynamoDB monitoring. Support prometheus HTTP API and promQL. Scope in the Entity of Metrics query v1 protocol is not required and automatical correction. The scope is determined based on the metric itself. Add explicit ReadTimeout for ConsulConfigurationWatcher to avoid IllegalArgumentException: Cache watchInterval=10sec \u0026gt;= networkClientReadTimeout=10000ms. Fix DurationUtils.getDurationPoints exceed, when startTimeBucket equals endTimeBucket. Support process OpenTelemetry ExponentialHistogram metrics Add FreeRedis component ID(3018).  UI  Add Zipkin Lens UI to webapp, and proxy it to context path /zipkin. Migrate the build tool from vue cli to Vite4. Fix Instance Relation and Endpoint Relation dashboards show up. Add Micrometer icon. Update MySQL UI to support MariaDB. Add AWS menu for supporting AWS monitoring. Add missing FastAPI logo. Update the log details page to support the formatted display of JSON content. Fix build config. Avoid being unable to drag process nodes for the first time. Add node folder into ignore list. Add ElPopconfirm to component types. Add an iframe widget for zipkin UI. Optimize graph tooltips to make them more friendly. Bump json5 from 1.0.1 to 1.0.2. Add websockets icon. Implement independent mode for widgets. Bump http-cache-semantics from 4.1.0 to 4.1.1. Update menus for OpenFunction. Add auto fresh to widgets independent mode. Fix: clear trace ID on the Log and Trace widgets after using association. Fix: reset duration for query conditions after time range changes. Add AWS S3 menu. Refactor: optimize side bar component to make it more friendly. Fix: remove duplicate popup message for query result. Add logo for HTTPX. Refactor: optimize the attached events visualization in the trace widget. Update BanyanDB client to 0.3.1. Add AWS DynamoDB menu. Fix: add auto period to the independent mode for widgets. Optimize menus and add Windows monitoring menu. Add a calculation for the cpm5dAvg. add a cpm5d calculation. Fix data processing error in the eBPF profiling widget. Support for double quotes in SlowSQL statements. Fix: the wrong position of the menu when clicking the topology node.  Documentation  Remove Spring Sleuth docs, and add Spring MicroMeter Observations Analysis with the latest Java agent side enhancement. Update monitoring MySQL document to add the MariaDB part. Reorganize the protocols docs to a more clear API docs. Add documentation about replacing Zipkin server with SkyWalking OAP. Add Lens UI relative docs in Zipkin trace section. Add Profiling APIs. Fix backend telemetry doc and so11y dashboard doc as the OAP Prometheus fetcher was removed since 9.3.0  All issues and pull requests are here\n","title":"9.4.0","url":"/docs/main/v9.5.0/en/changes/changes-9.4.0/"},{"content":"9.4.0 Project  Bump up Zipkin and Zipkin lens UI dependency to 2.24.0. Bump up Apache parent pom version to 29. Bump up Armeria version to 1.21.0. Clean up maven pom.xmls. Bump up Java version to 11. Bump up snakeyaml to 2.0.  OAP Server  Add ServerStatusService in the core module to provide a new way to expose booting status to other modules. Adds Micrometer as a new component.(ID=141) Refactor session cache in MetricsPersistentWorker. Cache enhancement - don\u0026rsquo;t read new metrics from database in minute dimensionality.   // When // (1) the time bucket of the server's latest stability status is provided // 1.1 the OAP has booted successfully // 1.2 the current dimensionality is in minute. // 1.3 the OAP cluster is rebalanced due to scaling // (2) the metrics are from the time after the timeOfLatestStabilitySts // (3) the metrics don't exist in the cache // the kernel should NOT try to load it from the database. // // Notice, about condition (2), // for the specific minute of booted successfully, the metrics are expected to load from database when // it doesn't exist in the cache.  Remove the offset of metric session timeout according to worker creation sequence. Correct MetricsExtension annotations declarations in manual entities. Support component IDs' priority in process relation metrics. Remove abandon logic in MergableBufferedData, which caused unexpected no-update. Fix miss set LastUpdateTimestamp that caused the metrics session to expire. Rename MAL rule spring-sleuth.yaml to spring-micrometer.yaml. Fix memory leak in Zipkin API. Remove the dependency of refresh_interval of ElasticSearch indices from elasticsearch/flushInterval config. Now, it uses core/persistentPeriod + 5s as refresh_interval for all indices instead. Change elasticsearch/flushInterval to 5s(was 15s). Optimize flushInterval of ElasticSearch BulkProcessor to avoid extra periodical flush in the continuous bulk streams. An unexpected dot is added when exp is a pure metric name and expPrefix != null. Support monitoring MariaDB. Remove measure/stream specific interval settings in BanyanDB. Add global-specific settings used to override global configurations (e.g segmentIntervalDays, blockIntervalHours) in BanyanDB. Use TTL-driven interval settings for the measure-default group in BanyanDB. Fix wrong group of non time-relative metadata in BanyanDB. Refactor StorageData#id to the new StorageID object from a String type. Support multiple component IDs in the service topology level. Add ElasticSearch.Keyword annotation to declare the target field type as keyword. [Breaking Change] Column component_id of service_relation_client_side and service_relation_server_side have been replaced by component_ids. Support priority definition in the component-libraries.yml. Enhance service topology query. When there are multiple components detected from the server side, the component type of the node would be determined by the priority, which was random in the previous release. Remove component_id from service_instance_relation_client_side and service_instance_relation_server_side. Make the satellite E2E test more stable. Add Istio 1.16 to test matrix. Register ValueColumn as Tag for Record in BanyanDB storage plugin. Bump up Netty to 4.1.86. Remove unnecessary additional columns when storage is in logical sharding mode. The cluster coordinator support watch mechanism for notifying RemoteClientManager and ServerStatusService. Fix ServiceMeshServiceDispatcher overwrite ServiceDispatcher debug file when open SW_OAL_ENGINE_DEBUG. Use groupBy and in operators to optimize topology query for BanyanDB storage plugin. Support server status watcher for MetricsPersistentWorker to check the metrics whether required initialization. Fix the meter value are not correct when using sumPerMinLabeld or sumHistogramPercentile MAL function. Fix cannot display attached events when using Zipkin Lens UI query traces. Remove time_bucket for both Stream and Measure kinds in BanyanDB plugin. Merge TIME_BUCKET of Metrics and Record into StorageData. Support no layer in the listServices query. Fix time_bucket of ServiceTraffic not set correctly in slowSql of MAL. Correct the TopN record query DAO of BanyanDB. Tweak interval settings of BanyanDB. Support monitoring AWS Cloud EKS. Bump BanyanDB Java client to 0.3.0-rc1. Remove id tag from measures. Add Banyandb.MeasureField to mark a column as a BanyanDB Measure field. Add BanyanDB.StoreIDTag to store a process\u0026rsquo;s id for searching. [Breaking Change] The supported version of ShardingSphere-Proxy is upgraded from 5.1.2 to 5.3.1. Due to the changes of ShardingSphere\u0026rsquo;s API, versions before 5.3.1 are not compatible. Add the eBPF network profiling E2E Test in the per storage. Fix TCP service instances are lack of instance properties like pod and namespace, which causes Pod log not to work for TCP workloads. Add Python HBase happybase module component ID(94). Fix gRPC alarm cannot update settings from dynamic configuration source. Add batchOfBytes configuration to limit the size of bulk flush. Add Python Websocket module component ID(7018). [Optional] Optimize single trace query performance by customizing routing in ElasticSearch. SkyWalking trace segments and Zipkin spans are using trace ID for routing. This is OFF by default, controlled by storage/elasticsearch/enableCustomRouting. Enhance OAP HTTP server to support HTTPS Remove handler scan in otel receiver, manual initialization instead Add aws-firehose-receiver to support collecting AWS CloudWatch metric(OpenTelemetry format). Notice, no HTTPS/TLS setup support. By following AWS Firehose request, it uses proxy request (https://... instead of /aws/firehose/metrics), there must be a proxy(Nginx, Envoy, etc.). Avoid Antlr dependencies' versions might be different in compile time and runtime. Now PrometheusMetricConverter#escapedName also support converting / to _. Add missing TCP throughput metrics. Refactor @Column annotation, swap Column#name and ElasticSearch.Column#columnAlias and rename ElasticSearch.Column#columnAlias to ElasticSearch.Column#legacyName. Add Python HTTPX module component ID(7019). Migrate tests from junit 4 to junit 5. Refactor http-based alarm plugins and extract common logic to HttpAlarmCallback. Support Amazon Simple Storage Service (Amazon S3) metrics monitoring Support process Sum metrics with AGGREGATION_TEMPORALITY_DELTA case Support Amazon DynamoDB monitoring. Support prometheus HTTP API and promQL. Scope in the Entity of Metrics query v1 protocol is not required and automatical correction. The scope is determined based on the metric itself. Add explicit ReadTimeout for ConsulConfigurationWatcher to avoid IllegalArgumentException: Cache watchInterval=10sec \u0026gt;= networkClientReadTimeout=10000ms. Fix DurationUtils.getDurationPoints exceed, when startTimeBucket equals endTimeBucket. Support process OpenTelemetry ExponentialHistogram metrics Add FreeRedis component ID(3018).  UI  Add Zipkin Lens UI to webapp, and proxy it to context path /zipkin. Migrate the build tool from vue cli to Vite4. Fix Instance Relation and Endpoint Relation dashboards show up. Add Micrometer icon. Update MySQL UI to support MariaDB. Add AWS menu for supporting AWS monitoring. Add missing FastAPI logo. Update the log details page to support the formatted display of JSON content. Fix build config. Avoid being unable to drag process nodes for the first time. Add node folder into ignore list. Add ElPopconfirm to component types. Add an iframe widget for zipkin UI. Optimize graph tooltips to make them more friendly. Bump json5 from 1.0.1 to 1.0.2. Add websockets icon. Implement independent mode for widgets. Bump http-cache-semantics from 4.1.0 to 4.1.1. Update menus for OpenFunction. Add auto fresh to widgets independent mode. Fix: clear trace ID on the Log and Trace widgets after using association. Fix: reset duration for query conditions after time range changes. Add AWS S3 menu. Refactor: optimize side bar component to make it more friendly. Fix: remove duplicate popup message for query result. Add logo for HTTPX. Refactor: optimize the attached events visualization in the trace widget. Update BanyanDB client to 0.3.1. Add AWS DynamoDB menu. Fix: add auto period to the independent mode for widgets. Optimize menus and add Windows monitoring menu. Add a calculation for the cpm5dAvg. add a cpm5d calculation. Fix data processing error in the eBPF profiling widget. Support for double quotes in SlowSQL statements. Fix: the wrong position of the menu when clicking the topology node.  Documentation  Remove Spring Sleuth docs, and add Spring MicroMeter Observations Analysis with the latest Java agent side enhancement. Update monitoring MySQL document to add the MariaDB part. Reorganize the protocols docs to a more clear API docs. Add documentation about replacing Zipkin server with SkyWalking OAP. Add Lens UI relative docs in Zipkin trace section. Add Profiling APIs. Fix backend telemetry doc and so11y dashboard doc as the OAP Prometheus fetcher was removed since 9.3.0  All issues and pull requests are here\n","title":"9.4.0","url":"/docs/main/v9.6.0/en/changes/changes-9.4.0/"},{"content":"9.4.0 Project  Bump up Zipkin and Zipkin lens UI dependency to 2.24.0. Bump up Apache parent pom version to 29. Bump up Armeria version to 1.21.0. Clean up maven pom.xmls. Bump up Java version to 11. Bump up snakeyaml to 2.0.  OAP Server  Add ServerStatusService in the core module to provide a new way to expose booting status to other modules. Adds Micrometer as a new component.(ID=141) Refactor session cache in MetricsPersistentWorker. Cache enhancement - don\u0026rsquo;t read new metrics from database in minute dimensionality.   // When // (1) the time bucket of the server's latest stability status is provided // 1.1 the OAP has booted successfully // 1.2 the current dimensionality is in minute. // 1.3 the OAP cluster is rebalanced due to scaling // (2) the metrics are from the time after the timeOfLatestStabilitySts // (3) the metrics don't exist in the cache // the kernel should NOT try to load it from the database. // // Notice, about condition (2), // for the specific minute of booted successfully, the metrics are expected to load from database when // it doesn't exist in the cache.  Remove the offset of metric session timeout according to worker creation sequence. Correct MetricsExtension annotations declarations in manual entities. Support component IDs' priority in process relation metrics. Remove abandon logic in MergableBufferedData, which caused unexpected no-update. Fix miss set LastUpdateTimestamp that caused the metrics session to expire. Rename MAL rule spring-sleuth.yaml to spring-micrometer.yaml. Fix memory leak in Zipkin API. Remove the dependency of refresh_interval of ElasticSearch indices from elasticsearch/flushInterval config. Now, it uses core/persistentPeriod + 5s as refresh_interval for all indices instead. Change elasticsearch/flushInterval to 5s(was 15s). Optimize flushInterval of ElasticSearch BulkProcessor to avoid extra periodical flush in the continuous bulk streams. An unexpected dot is added when exp is a pure metric name and expPrefix != null. Support monitoring MariaDB. Remove measure/stream specific interval settings in BanyanDB. Add global-specific settings used to override global configurations (e.g segmentIntervalDays, blockIntervalHours) in BanyanDB. Use TTL-driven interval settings for the measure-default group in BanyanDB. Fix wrong group of non time-relative metadata in BanyanDB. Refactor StorageData#id to the new StorageID object from a String type. Support multiple component IDs in the service topology level. Add ElasticSearch.Keyword annotation to declare the target field type as keyword. [Breaking Change] Column component_id of service_relation_client_side and service_relation_server_side have been replaced by component_ids. Support priority definition in the component-libraries.yml. Enhance service topology query. When there are multiple components detected from the server side, the component type of the node would be determined by the priority, which was random in the previous release. Remove component_id from service_instance_relation_client_side and service_instance_relation_server_side. Make the satellite E2E test more stable. Add Istio 1.16 to test matrix. Register ValueColumn as Tag for Record in BanyanDB storage plugin. Bump up Netty to 4.1.86. Remove unnecessary additional columns when storage is in logical sharding mode. The cluster coordinator support watch mechanism for notifying RemoteClientManager and ServerStatusService. Fix ServiceMeshServiceDispatcher overwrite ServiceDispatcher debug file when open SW_OAL_ENGINE_DEBUG. Use groupBy and in operators to optimize topology query for BanyanDB storage plugin. Support server status watcher for MetricsPersistentWorker to check the metrics whether required initialization. Fix the meter value are not correct when using sumPerMinLabeld or sumHistogramPercentile MAL function. Fix cannot display attached events when using Zipkin Lens UI query traces. Remove time_bucket for both Stream and Measure kinds in BanyanDB plugin. Merge TIME_BUCKET of Metrics and Record into StorageData. Support no layer in the listServices query. Fix time_bucket of ServiceTraffic not set correctly in slowSql of MAL. Correct the TopN record query DAO of BanyanDB. Tweak interval settings of BanyanDB. Support monitoring AWS Cloud EKS. Bump BanyanDB Java client to 0.3.0-rc1. Remove id tag from measures. Add Banyandb.MeasureField to mark a column as a BanyanDB Measure field. Add BanyanDB.StoreIDTag to store a process\u0026rsquo;s id for searching. [Breaking Change] The supported version of ShardingSphere-Proxy is upgraded from 5.1.2 to 5.3.1. Due to the changes of ShardingSphere\u0026rsquo;s API, versions before 5.3.1 are not compatible. Add the eBPF network profiling E2E Test in the per storage. Fix TCP service instances are lack of instance properties like pod and namespace, which causes Pod log not to work for TCP workloads. Add Python HBase happybase module component ID(94). Fix gRPC alarm cannot update settings from dynamic configuration source. Add batchOfBytes configuration to limit the size of bulk flush. Add Python Websocket module component ID(7018). [Optional] Optimize single trace query performance by customizing routing in ElasticSearch. SkyWalking trace segments and Zipkin spans are using trace ID for routing. This is OFF by default, controlled by storage/elasticsearch/enableCustomRouting. Enhance OAP HTTP server to support HTTPS Remove handler scan in otel receiver, manual initialization instead Add aws-firehose-receiver to support collecting AWS CloudWatch metric(OpenTelemetry format). Notice, no HTTPS/TLS setup support. By following AWS Firehose request, it uses proxy request (https://... instead of /aws/firehose/metrics), there must be a proxy(Nginx, Envoy, etc.). Avoid Antlr dependencies' versions might be different in compile time and runtime. Now PrometheusMetricConverter#escapedName also support converting / to _. Add missing TCP throughput metrics. Refactor @Column annotation, swap Column#name and ElasticSearch.Column#columnAlias and rename ElasticSearch.Column#columnAlias to ElasticSearch.Column#legacyName. Add Python HTTPX module component ID(7019). Migrate tests from junit 4 to junit 5. Refactor http-based alarm plugins and extract common logic to HttpAlarmCallback. Support Amazon Simple Storage Service (Amazon S3) metrics monitoring Support process Sum metrics with AGGREGATION_TEMPORALITY_DELTA case Support Amazon DynamoDB monitoring. Support prometheus HTTP API and promQL. Scope in the Entity of Metrics query v1 protocol is not required and automatical correction. The scope is determined based on the metric itself. Add explicit ReadTimeout for ConsulConfigurationWatcher to avoid IllegalArgumentException: Cache watchInterval=10sec \u0026gt;= networkClientReadTimeout=10000ms. Fix DurationUtils.getDurationPoints exceed, when startTimeBucket equals endTimeBucket. Support process OpenTelemetry ExponentialHistogram metrics Add FreeRedis component ID(3018).  UI  Add Zipkin Lens UI to webapp, and proxy it to context path /zipkin. Migrate the build tool from vue cli to Vite4. Fix Instance Relation and Endpoint Relation dashboards show up. Add Micrometer icon. Update MySQL UI to support MariaDB. Add AWS menu for supporting AWS monitoring. Add missing FastAPI logo. Update the log details page to support the formatted display of JSON content. Fix build config. Avoid being unable to drag process nodes for the first time. Add node folder into ignore list. Add ElPopconfirm to component types. Add an iframe widget for zipkin UI. Optimize graph tooltips to make them more friendly. Bump json5 from 1.0.1 to 1.0.2. Add websockets icon. Implement independent mode for widgets. Bump http-cache-semantics from 4.1.0 to 4.1.1. Update menus for OpenFunction. Add auto fresh to widgets independent mode. Fix: clear trace ID on the Log and Trace widgets after using association. Fix: reset duration for query conditions after time range changes. Add AWS S3 menu. Refactor: optimize side bar component to make it more friendly. Fix: remove duplicate popup message for query result. Add logo for HTTPX. Refactor: optimize the attached events visualization in the trace widget. Update BanyanDB client to 0.3.1. Add AWS DynamoDB menu. Fix: add auto period to the independent mode for widgets. Optimize menus and add Windows monitoring menu. Add a calculation for the cpm5dAvg. add a cpm5d calculation. Fix data processing error in the eBPF profiling widget. Support for double quotes in SlowSQL statements. Fix: the wrong position of the menu when clicking the topology node.  Documentation  Remove Spring Sleuth docs, and add Spring MicroMeter Observations Analysis with the latest Java agent side enhancement. Update monitoring MySQL document to add the MariaDB part. Reorganize the protocols docs to a more clear API docs. Add documentation about replacing Zipkin server with SkyWalking OAP. Add Lens UI relative docs in Zipkin trace section. Add Profiling APIs. Fix backend telemetry doc and so11y dashboard doc as the OAP Prometheus fetcher was removed since 9.3.0  All issues and pull requests are here\n","title":"9.4.0","url":"/docs/main/v9.7.0/en/changes/changes-9.4.0/"},{"content":"9.5.0 Project  Fix Duplicate class found due to the delombok goal.  OAP Server  Fix wrong layer of metric user error in DynamoDB monitoring. ElasticSearch storage does not check field types when OAP running in no-init mode. Support to bind TLS status as a part of component for service topology. Fix component ID priority bug. Fix component ID of topology overlap due to storage layer bugs. [Breaking Change] Enhance JDBC storage through merging tables and managing day-based table rolling. [Breaking Change] Sharding-MySQL implementations and tests get removed due to we have the day-based rolling mechanism by default Fix otel k8s-cluster rule add namespace dimension for MAL aggregation calculation(Deployment Status,Deployment Spec Replicas) Support continuous profiling feature. Support collect process level related metrics. Fix K8sRetag reads the wrong k8s service from the cache due to a possible namespace mismatch. [Breaking Change] Support cross-thread trace profiling. The data structure and query APIs are changed. Fix PromQL HTTP API /api/v1/labels response missing service label. Fix possible NPE when initialize IntList. Support parse PromQL expression has empty labels in the braces for metadata query. Support alarm metric OP !=. Support metrics query indicates whether value == 0 represents actually zero or no data. Fix NPE when query the not exist series indexes in ElasticSearch storage. Support collecting memory buff/cache metrics in VM monitoring. PromQL: Remove empty values from the query result, fix /api/v1/metadata param limit could cause out of bound. Support monitoring the total number metrics of k8s StatefulSet and DaemonSet. Support Amazon API Gateway monitoring. Bump up graphql-java to fix cve. Bump up Kubernetes Java client. Support Redis Monitoring. Add component ID for amqp, amqp-producer and amqp-consumer. Support no-proxy mode for aws-firehose receiver Bump up armeria to 1.23.1 Support Elasticsearch Monitoring. Fix PromQL HTTP API /api/v1/series response missing service label when matching metric. Support ServerSide TopN for BanyanDB. Add component ID for Jersey. Remove OpenCensus support, the related codes and docs as it\u0026rsquo;s sunsetting. Support dynamic configuration of searchableTracesTags Support exportErrorStatusTraceOnly for export the error status trace segments through the Kafka channel Add component ID for Grizzly. Fix potential NPE in Zipkin receiver when the Span is missing some fields. Filter out unknown_cluster metric data. Support RabbitMQ Monitoring. Support Redis slow logs collection. Fix data loss when query continuous profiling task record. Adapt the continuous profiling task query GraphQL. Support Metrics Query Expression(MQE) and allows users to do simple query-stage calculation through the expression. Deprecated metrics query v2 protocol. Deprecated record query protocol. Add component ID for go-redis. Add OpenSearch 2.8.0 to test case. Add ai-pipeline module. Support HTTP URI formatting through ai-pipeline to do pattern recognition. Add new HTTP URI grouping engine with benchmark. [Breaking Change] Use the new HTTP URI grouping engine to replace the old regex based mechanism. Support sumLabeled in MAL. Migrate from kubernetes-client/java to fabric8 client. Envoy ALS generated relation metrics considers http status codes \u0026gt;= 400 has an error at the client side. Add cause message field when query continuous profiling task.  UI  Revert: cpm5d function. This feature is cancelled from backend. Fix: alerting link breaks on the topology. Refactor Topology widget to make it more hierarchical.  Choose User as the first node. If User node is absent, choose the busiest node(which has the most calls of all). Do a left-to-right flow process. At the same level, list nodes from top to bottom in alphabetical order.   Fix filter ID when ReadRecords metric associates with trace. Add AWS API Gateway menu. Change trace profiling protocol. Add Redis menu. Optimize data types. Support isEmptyValue flag for metrics query. Add elasticsearch menu. [Clean UI templates before upgrade] Set showSymbol: true, and make the data point shows on the Line graph. Please clean ui_template index in elasticsearch storage or table in JDBC storage. [Clean UI templates before upgrade] UI templates: Simplify metric name with the label. Add MQ menu. Add Jeysey icon. Fix: set endpoint and instance selectors with url parameters correctly. Bump up dependencies versions icons-vue 1.1.4, element-plus 2.1.0, nanoid 3.3.6, postcss 8.4.23 Add OpenTelemetry log protocol support. [Breaking Change] Configuration key enabledOtelRules is renamed to enabledOtelMetricsRules and the corresponding environment variable is renamed to SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES. Add grizzly icon. Fix: the Instance List data display error. Fix: set topN type to Number. Support Metrics Query Expression(MQE) and allows users to do simple query-stage calculation through the expression. Bump up zipkin ui dependency to 2.24.1. Bump up vite to 4.0.5. Apply MQE on General and Virtual-Database layer UI-templates. Add Continuous Profiling tab on Mesh layer UI-templates.  Documentation  Add Profiling related documentations. Add SUM_PER_MIN to MAL documentation. Make the log relative docs more clear, and easier for further more formats support. Update the cluster management and advanced deployment docs.  All issues and pull requests are here\n","title":"9.5.0","url":"/docs/main/latest/en/changes/changes-9.5.0/"},{"content":"9.5.0 Project  Fix Duplicate class found due to the delombok goal.  OAP Server  Fix wrong layer of metric user error in DynamoDB monitoring. ElasticSearch storage does not check field types when OAP running in no-init mode. Support to bind TLS status as a part of component for service topology. Fix component ID priority bug. Fix component ID of topology overlap due to storage layer bugs. [Breaking Change] Enhance JDBC storage through merging tables and managing day-based table rolling. [Breaking Change] Sharding-MySQL implementations and tests get removed due to we have the day-based rolling mechanism by default Fix otel k8s-cluster rule add namespace dimension for MAL aggregation calculation(Deployment Status,Deployment Spec Replicas) Support continuous profiling feature. Support collect process level related metrics. Fix K8sRetag reads the wrong k8s service from the cache due to a possible namespace mismatch. [Breaking Change] Support cross-thread trace profiling. The data structure and query APIs are changed. Fix PromQL HTTP API /api/v1/labels response missing service label. Fix possible NPE when initialize IntList. Support parse PromQL expression has empty labels in the braces for metadata query. Support alarm metric OP !=. Support metrics query indicates whether value == 0 represents actually zero or no data. Fix NPE when query the not exist series indexes in ElasticSearch storage. Support collecting memory buff/cache metrics in VM monitoring. PromQL: Remove empty values from the query result, fix /api/v1/metadata param limit could cause out of bound. Support monitoring the total number metrics of k8s StatefulSet and DaemonSet. Support Amazon API Gateway monitoring. Bump up graphql-java to fix cve. Bump up Kubernetes Java client. Support Redis Monitoring. Add component ID for amqp, amqp-producer and amqp-consumer. Support no-proxy mode for aws-firehose receiver Bump up armeria to 1.23.1 Support Elasticsearch Monitoring. Fix PromQL HTTP API /api/v1/series response missing service label when matching metric. Support ServerSide TopN for BanyanDB. Add component ID for Jersey. Remove OpenCensus support, the related codes and docs as it\u0026rsquo;s sunsetting. Support dynamic configuration of searchableTracesTags Support exportErrorStatusTraceOnly for export the error status trace segments through the Kafka channel Add component ID for Grizzly. Fix potential NPE in Zipkin receiver when the Span is missing some fields. Filter out unknown_cluster metric data. Support RabbitMQ Monitoring. Support Redis slow logs collection. Fix data loss when query continuous profiling task record. Adapt the continuous profiling task query GraphQL. Support Metrics Query Expression(MQE) and allows users to do simple query-stage calculation through the expression. Deprecated metrics query v2 protocol. Deprecated record query protocol. Add component ID for go-redis. Add OpenSearch 2.8.0 to test case. Add ai-pipeline module. Support HTTP URI formatting through ai-pipeline to do pattern recognition. Add new HTTP URI grouping engine with benchmark. [Breaking Change] Use the new HTTP URI grouping engine to replace the old regex based mechanism. Support sumLabeled in MAL. Migrate from kubernetes-client/java to fabric8 client. Envoy ALS generated relation metrics considers http status codes \u0026gt;= 400 has an error at the client side. Add cause message field when query continuous profiling task.  UI  Revert: cpm5d function. This feature is cancelled from backend. Fix: alerting link breaks on the topology. Refactor Topology widget to make it more hierarchical.  Choose User as the first node. If User node is absent, choose the busiest node(which has the most calls of all). Do a left-to-right flow process. At the same level, list nodes from top to bottom in alphabetical order.   Fix filter ID when ReadRecords metric associates with trace. Add AWS API Gateway menu. Change trace profiling protocol. Add Redis menu. Optimize data types. Support isEmptyValue flag for metrics query. Add elasticsearch menu. [Clean UI templates before upgrade] Set showSymbol: true, and make the data point shows on the Line graph. Please clean ui_template index in elasticsearch storage or table in JDBC storage. [Clean UI templates before upgrade] UI templates: Simplify metric name with the label. Add MQ menu. Add Jeysey icon. Fix: set endpoint and instance selectors with url parameters correctly. Bump up dependencies versions icons-vue 1.1.4, element-plus 2.1.0, nanoid 3.3.6, postcss 8.4.23 Add OpenTelemetry log protocol support. [Breaking Change] Configuration key enabledOtelRules is renamed to enabledOtelMetricsRules and the corresponding environment variable is renamed to SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES. Add grizzly icon. Fix: the Instance List data display error. Fix: set topN type to Number. Support Metrics Query Expression(MQE) and allows users to do simple query-stage calculation through the expression. Bump up zipkin ui dependency to 2.24.1. Bump up vite to 4.0.5. Apply MQE on General and Virtual-Database layer UI-templates. Add Continuous Profiling tab on Mesh layer UI-templates.  Documentation  Add Profiling related documentations. Add SUM_PER_MIN to MAL documentation. Make the log relative docs more clear, and easier for further more formats support. Update the cluster management and advanced deployment docs.  All issues and pull requests are here\n","title":"9.5.0","url":"/docs/main/next/en/changes/changes-9.5.0/"},{"content":"9.5.0 Project  Fix Duplicate class found due to the delombok goal.  OAP Server  Fix wrong layer of metric user error in DynamoDB monitoring. ElasticSearch storage does not check field types when OAP running in no-init mode. Support to bind TLS status as a part of component for service topology. Fix component ID priority bug. Fix component ID of topology overlap due to storage layer bugs. [Breaking Change] Enhance JDBC storage through merging tables and managing day-based table rolling. [Breaking Change] Sharding-MySQL implementations and tests get removed due to we have the day-based rolling mechanism by default Fix otel k8s-cluster rule add namespace dimension for MAL aggregation calculation(Deployment Status,Deployment Spec Replicas) Support continuous profiling feature. Support collect process level related metrics. Fix K8sRetag reads the wrong k8s service from the cache due to a possible namespace mismatch. [Breaking Change] Support cross-thread trace profiling. The data structure and query APIs are changed. Fix PromQL HTTP API /api/v1/labels response missing service label. Fix possible NPE when initialize IntList. Support parse PromQL expression has empty labels in the braces for metadata query. Support alarm metric OP !=. Support metrics query indicates whether value == 0 represents actually zero or no data. Fix NPE when query the not exist series indexes in ElasticSearch storage. Support collecting memory buff/cache metrics in VM monitoring. PromQL: Remove empty values from the query result, fix /api/v1/metadata param limit could cause out of bound. Support monitoring the total number metrics of k8s StatefulSet and DaemonSet. Support Amazon API Gateway monitoring. Bump up graphql-java to fix cve. Bump up Kubernetes Java client. Support Redis Monitoring. Add component ID for amqp, amqp-producer and amqp-consumer. Support no-proxy mode for aws-firehose receiver Bump up armeria to 1.23.1 Support Elasticsearch Monitoring. Fix PromQL HTTP API /api/v1/series response missing service label when matching metric. Support ServerSide TopN for BanyanDB. Add component ID for Jersey. Remove OpenCensus support, the related codes and docs as it\u0026rsquo;s sunsetting. Support dynamic configuration of searchableTracesTags Support exportErrorStatusTraceOnly for export the error status trace segments through the Kafka channel Add component ID for Grizzly. Fix potential NPE in Zipkin receiver when the Span is missing some fields. Filter out unknown_cluster metric data. Support RabbitMQ Monitoring. Support Redis slow logs collection. Fix data loss when query continuous profiling task record. Adapt the continuous profiling task query GraphQL. Support Metrics Query Expression(MQE) and allows users to do simple query-stage calculation through the expression. Deprecated metrics query v2 protocol. Deprecated record query protocol. Add component ID for go-redis. Add OpenSearch 2.8.0 to test case. Add ai-pipeline module. Support HTTP URI formatting through ai-pipeline to do pattern recognition. Add new HTTP URI grouping engine with benchmark. [Breaking Change] Use the new HTTP URI grouping engine to replace the old regex based mechanism. Support sumLabeled in MAL. Migrate from kubernetes-client/java to fabric8 client. Envoy ALS generated relation metrics considers http status codes \u0026gt;= 400 has an error at the client side. Add cause message field when query continuous profiling task.  UI  Revert: cpm5d function. This feature is cancelled from backend. Fix: alerting link breaks on the topology. Refactor Topology widget to make it more hierarchical.  Choose User as the first node. If User node is absent, choose the busiest node(which has the most calls of all). Do a left-to-right flow process. At the same level, list nodes from top to bottom in alphabetical order.   Fix filter ID when ReadRecords metric associates with trace. Add AWS API Gateway menu. Change trace profiling protocol. Add Redis menu. Optimize data types. Support isEmptyValue flag for metrics query. Add elasticsearch menu. [Clean UI templates before upgrade] Set showSymbol: true, and make the data point shows on the Line graph. Please clean ui_template index in elasticsearch storage or table in JDBC storage. [Clean UI templates before upgrade] UI templates: Simplify metric name with the label. Add MQ menu. Add Jeysey icon. Fix: set endpoint and instance selectors with url parameters correctly. Bump up dependencies versions icons-vue 1.1.4, element-plus 2.1.0, nanoid 3.3.6, postcss 8.4.23 Add OpenTelemetry log protocol support. [Breaking Change] Configuration key enabledOtelRules is renamed to enabledOtelMetricsRules and the corresponding environment variable is renamed to SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES. Add grizzly icon. Fix: the Instance List data display error. Fix: set topN type to Number. Support Metrics Query Expression(MQE) and allows users to do simple query-stage calculation through the expression. Bump up zipkin ui dependency to 2.24.1. Bump up vite to 4.0.5. Apply MQE on General and Virtual-Database layer UI-templates. Add Continuous Profiling tab on Mesh layer UI-templates.  Documentation  Add Profiling related documentations. Add SUM_PER_MIN to MAL documentation. Make the log relative docs more clear, and easier for further more formats support. Update the cluster management and advanced deployment docs.  All issues and pull requests are here\n","title":"9.5.0","url":"/docs/main/v9.5.0/en/changes/changes/"},{"content":"9.5.0 Project  Fix Duplicate class found due to the delombok goal.  OAP Server  Fix wrong layer of metric user error in DynamoDB monitoring. ElasticSearch storage does not check field types when OAP running in no-init mode. Support to bind TLS status as a part of component for service topology. Fix component ID priority bug. Fix component ID of topology overlap due to storage layer bugs. [Breaking Change] Enhance JDBC storage through merging tables and managing day-based table rolling. [Breaking Change] Sharding-MySQL implementations and tests get removed due to we have the day-based rolling mechanism by default Fix otel k8s-cluster rule add namespace dimension for MAL aggregation calculation(Deployment Status,Deployment Spec Replicas) Support continuous profiling feature. Support collect process level related metrics. Fix K8sRetag reads the wrong k8s service from the cache due to a possible namespace mismatch. [Breaking Change] Support cross-thread trace profiling. The data structure and query APIs are changed. Fix PromQL HTTP API /api/v1/labels response missing service label. Fix possible NPE when initialize IntList. Support parse PromQL expression has empty labels in the braces for metadata query. Support alarm metric OP !=. Support metrics query indicates whether value == 0 represents actually zero or no data. Fix NPE when query the not exist series indexes in ElasticSearch storage. Support collecting memory buff/cache metrics in VM monitoring. PromQL: Remove empty values from the query result, fix /api/v1/metadata param limit could cause out of bound. Support monitoring the total number metrics of k8s StatefulSet and DaemonSet. Support Amazon API Gateway monitoring. Bump up graphql-java to fix cve. Bump up Kubernetes Java client. Support Redis Monitoring. Add component ID for amqp, amqp-producer and amqp-consumer. Support no-proxy mode for aws-firehose receiver Bump up armeria to 1.23.1 Support Elasticsearch Monitoring. Fix PromQL HTTP API /api/v1/series response missing service label when matching metric. Support ServerSide TopN for BanyanDB. Add component ID for Jersey. Remove OpenCensus support, the related codes and docs as it\u0026rsquo;s sunsetting. Support dynamic configuration of searchableTracesTags Support exportErrorStatusTraceOnly for export the error status trace segments through the Kafka channel Add component ID for Grizzly. Fix potential NPE in Zipkin receiver when the Span is missing some fields. Filter out unknown_cluster metric data. Support RabbitMQ Monitoring. Support Redis slow logs collection. Fix data loss when query continuous profiling task record. Adapt the continuous profiling task query GraphQL. Support Metrics Query Expression(MQE) and allows users to do simple query-stage calculation through the expression. Deprecated metrics query v2 protocol. Deprecated record query protocol. Add component ID for go-redis. Add OpenSearch 2.8.0 to test case. Add ai-pipeline module. Support HTTP URI formatting through ai-pipeline to do pattern recognition. Add new HTTP URI grouping engine with benchmark. [Breaking Change] Use the new HTTP URI grouping engine to replace the old regex based mechanism. Support sumLabeled in MAL. Migrate from kubernetes-client/java to fabric8 client. Envoy ALS generated relation metrics considers http status codes \u0026gt;= 400 has an error at the client side. Add cause message field when query continuous profiling task.  UI  Revert: cpm5d function. This feature is cancelled from backend. Fix: alerting link breaks on the topology. Refactor Topology widget to make it more hierarchical.  Choose User as the first node. If User node is absent, choose the busiest node(which has the most calls of all). Do a left-to-right flow process. At the same level, list nodes from top to bottom in alphabetical order.   Fix filter ID when ReadRecords metric associates with trace. Add AWS API Gateway menu. Change trace profiling protocol. Add Redis menu. Optimize data types. Support isEmptyValue flag for metrics query. Add elasticsearch menu. [Clean UI templates before upgrade] Set showSymbol: true, and make the data point shows on the Line graph. Please clean ui_template index in elasticsearch storage or table in JDBC storage. [Clean UI templates before upgrade] UI templates: Simplify metric name with the label. Add MQ menu. Add Jeysey icon. Fix: set endpoint and instance selectors with url parameters correctly. Bump up dependencies versions icons-vue 1.1.4, element-plus 2.1.0, nanoid 3.3.6, postcss 8.4.23 Add OpenTelemetry log protocol support. [Breaking Change] Configuration key enabledOtelRules is renamed to enabledOtelMetricsRules and the corresponding environment variable is renamed to SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES. Add grizzly icon. Fix: the Instance List data display error. Fix: set topN type to Number. Support Metrics Query Expression(MQE) and allows users to do simple query-stage calculation through the expression. Bump up zipkin ui dependency to 2.24.1. Bump up vite to 4.0.5. Apply MQE on General and Virtual-Database layer UI-templates. Add Continuous Profiling tab on Mesh layer UI-templates.  Documentation  Add Profiling related documentations. Add SUM_PER_MIN to MAL documentation. Make the log relative docs more clear, and easier for further more formats support. Update the cluster management and advanced deployment docs.  All issues and pull requests are here\n","title":"9.5.0","url":"/docs/main/v9.6.0/en/changes/changes-9.5.0/"},{"content":"9.5.0 Project  Fix Duplicate class found due to the delombok goal.  OAP Server  Fix wrong layer of metric user error in DynamoDB monitoring. ElasticSearch storage does not check field types when OAP running in no-init mode. Support to bind TLS status as a part of component for service topology. Fix component ID priority bug. Fix component ID of topology overlap due to storage layer bugs. [Breaking Change] Enhance JDBC storage through merging tables and managing day-based table rolling. [Breaking Change] Sharding-MySQL implementations and tests get removed due to we have the day-based rolling mechanism by default Fix otel k8s-cluster rule add namespace dimension for MAL aggregation calculation(Deployment Status,Deployment Spec Replicas) Support continuous profiling feature. Support collect process level related metrics. Fix K8sRetag reads the wrong k8s service from the cache due to a possible namespace mismatch. [Breaking Change] Support cross-thread trace profiling. The data structure and query APIs are changed. Fix PromQL HTTP API /api/v1/labels response missing service label. Fix possible NPE when initialize IntList. Support parse PromQL expression has empty labels in the braces for metadata query. Support alarm metric OP !=. Support metrics query indicates whether value == 0 represents actually zero or no data. Fix NPE when query the not exist series indexes in ElasticSearch storage. Support collecting memory buff/cache metrics in VM monitoring. PromQL: Remove empty values from the query result, fix /api/v1/metadata param limit could cause out of bound. Support monitoring the total number metrics of k8s StatefulSet and DaemonSet. Support Amazon API Gateway monitoring. Bump up graphql-java to fix cve. Bump up Kubernetes Java client. Support Redis Monitoring. Add component ID for amqp, amqp-producer and amqp-consumer. Support no-proxy mode for aws-firehose receiver Bump up armeria to 1.23.1 Support Elasticsearch Monitoring. Fix PromQL HTTP API /api/v1/series response missing service label when matching metric. Support ServerSide TopN for BanyanDB. Add component ID for Jersey. Remove OpenCensus support, the related codes and docs as it\u0026rsquo;s sunsetting. Support dynamic configuration of searchableTracesTags Support exportErrorStatusTraceOnly for export the error status trace segments through the Kafka channel Add component ID for Grizzly. Fix potential NPE in Zipkin receiver when the Span is missing some fields. Filter out unknown_cluster metric data. Support RabbitMQ Monitoring. Support Redis slow logs collection. Fix data loss when query continuous profiling task record. Adapt the continuous profiling task query GraphQL. Support Metrics Query Expression(MQE) and allows users to do simple query-stage calculation through the expression. Deprecated metrics query v2 protocol. Deprecated record query protocol. Add component ID for go-redis. Add OpenSearch 2.8.0 to test case. Add ai-pipeline module. Support HTTP URI formatting through ai-pipeline to do pattern recognition. Add new HTTP URI grouping engine with benchmark. [Breaking Change] Use the new HTTP URI grouping engine to replace the old regex based mechanism. Support sumLabeled in MAL. Migrate from kubernetes-client/java to fabric8 client. Envoy ALS generated relation metrics considers http status codes \u0026gt;= 400 has an error at the client side. Add cause message field when query continuous profiling task.  UI  Revert: cpm5d function. This feature is cancelled from backend. Fix: alerting link breaks on the topology. Refactor Topology widget to make it more hierarchical.  Choose User as the first node. If User node is absent, choose the busiest node(which has the most calls of all). Do a left-to-right flow process. At the same level, list nodes from top to bottom in alphabetical order.   Fix filter ID when ReadRecords metric associates with trace. Add AWS API Gateway menu. Change trace profiling protocol. Add Redis menu. Optimize data types. Support isEmptyValue flag for metrics query. Add elasticsearch menu. [Clean UI templates before upgrade] Set showSymbol: true, and make the data point shows on the Line graph. Please clean ui_template index in elasticsearch storage or table in JDBC storage. [Clean UI templates before upgrade] UI templates: Simplify metric name with the label. Add MQ menu. Add Jeysey icon. Fix: set endpoint and instance selectors with url parameters correctly. Bump up dependencies versions icons-vue 1.1.4, element-plus 2.1.0, nanoid 3.3.6, postcss 8.4.23 Add OpenTelemetry log protocol support. [Breaking Change] Configuration key enabledOtelRules is renamed to enabledOtelMetricsRules and the corresponding environment variable is renamed to SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES. Add grizzly icon. Fix: the Instance List data display error. Fix: set topN type to Number. Support Metrics Query Expression(MQE) and allows users to do simple query-stage calculation through the expression. Bump up zipkin ui dependency to 2.24.1. Bump up vite to 4.0.5. Apply MQE on General and Virtual-Database layer UI-templates. Add Continuous Profiling tab on Mesh layer UI-templates.  Documentation  Add Profiling related documentations. Add SUM_PER_MIN to MAL documentation. Make the log relative docs more clear, and easier for further more formats support. Update the cluster management and advanced deployment docs.  All issues and pull requests are here\n","title":"9.5.0","url":"/docs/main/v9.7.0/en/changes/changes-9.5.0/"},{"content":"9.6.0 Project  Bump up Guava to 32.0.1 to avoid the lib listed as vulnerable due to CVE-2020-8908. This API is never used. Maven artifact skywalking-log-recevier-plugin is renamed to skywalking-log-receiver-plugin. Bump up cli version 0.11 to 0.12. Bump up the version of ASF parent pom to v30. Make builds reproducible for automatic releases CI.  OAP Server  Add Neo4j component ID(112) language: Python. Add Istio ServiceEntry registry to resolve unknown IPs in ALS. Wrap deleteProperty API to the BanyanDBStorageClient. [Breaking change] Remove matchedCounter from HttpUriRecognitionService#feedRawData. Remove patterns from HttpUriRecognitionService#feedRawData and add max 10 candidates of raw URIs for each pattern. Add component ID for WebSphere. Fix AI Pipeline uri caching NullPointer and IllegalArgument Exceptions. Fix NPE in metrics query when the metric is not exist. Remove E2E tests for Istio \u0026lt; 1.15, ElasticSearch \u0026lt; 7.16.3, they might still work but are not supported as planed. Scroll all results in ElasticSearch storage and refactor scrolling logics, including Service, Instance, Endpoint, Process, etc. Improve Kubernetes coordinator to remove Terminating OAP Pods in cluster. Support SW_CORE_SYNC_PERIOD_HTTP_URI_RECOGNITION_PATTERN and SW_CORE_TRAINING_PERIOD_HTTP_URI_RECOGNITION_PATTERN to control the period of training and sync HTTP URI recognition patterns. And shorten the default period to 10s for sync and 60s for training. Fix ElasticSearch scroller bug. Add component ID for Aerospike(ID=149). Packages with name recevier are renamed to receiver. BanyanDBMetricsDAO handles storeIDTag in multiGet for BanyanDBModelExtension. Fix endpoint grouping-related logic and enhance the performance of PatternTree retrieval. Fix metric session cache saving after batch insert when using mysql-connector-java. Support dynamic UI menu query. Add comment for docker/.env to explain the usage. Fix wrong environment variable name SW_OTEL_RECEIVER_ENABLED_OTEL_RULES to right SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES. Fix instance query in JDBC implementation. Set the SW_QUERY_MAX_QUERY_COMPLEXITY default value to 3000(was 1000). Accept length=4000 parameter value of the event. It was 2000. Tolerate parameter value in illegal JSON format. Update BanyanDB Java Client to 0.4.0 Support aggregate Labeled Value Metrics in MQE. [Breaking change] Change the default label name in MQE from label to _. Bump up grpc version to 1.53.0. [Breaking change] Removed \u0026lsquo;\u0026amp;\u0026rsquo; symbols from shell scripts to avoid OAP server process running as a background process. Revert part of #10616 to fix the unexpected changes: if there is no data we should return an array with 0s, but in #10616, an empty array is returned. Cache all service entity in memory for query. Bump up jackson version to 2.15.2. Increase the default memory size to avoid OOM. Bump up graphql-java to 21.0. Add Echo component ID(5015) language: Golang. Fix index out of bounds exception in aggregate_labels MQE function. Support MongoDB Server/Cluster monitoring powered by OTEL. Do not print configurations values in logs to avoid sensitive info leaked. Move created the latest index before retrieval indexes by aliases to avoid the 404 exception. This just prevents some interference from manual operations. Add more Go VM metrics, as new skywalking-go agent provided since its 0.2 release. Add component ID for Lock (ID=5016). [Breaking change] Adjust the structure of hooks in the alarm-settings.yml. Support multiple configs for each hook types and specifying the hooks in the alarm rule. Bump up Armeria to 1.24.3. Fix BooleanMatch and BooleanNotEqualMatch doing Boolean comparison. Support LogQL HTTP query APIs. Add Mux Server component ID(5017) language: Golang. Remove ElasticSearch 6.3.2 from our client lib tests. Bump up ElasticSearch server 8.8.1 to 8.9.0 for latest e2e testing. 8.1.0, 7.16.3 and 7.17.10 are still tested. Add OpenSearch 2.8.0 to our client lib tests. Use listening mode for apollo implementation of dynamic configuration. Add view_as_seq function in MQE for listing metrics in the given prioritized sequence. Fix the wrong default value of k8sServiceNameRule if it\u0026rsquo;s not explicitly set. Improve PromQL to allow for multiple metric operations within a single query. Fix MQE Binary Operation between labeled metrics and other type of value result. Add component ID for Nacos (ID=150). Support Compare Operation in MQE. Fix the Kubernetes resource cache not refreshed. Fix wrong classpath that might cause OOM in startup. Enhance the serviceRelation in MAL by adding settings for the delimiter and component fields. [Breaking change] Support MQE in the Alerting. The Alarm Rules configuration(alarm-settings.yml), add expression field and remove metrics-name/count/threshold/op/only-as-condition fields and remove composite-rules configuration. Check results in ALS as per downstream/upstream instead of per log. Fix GraphQL query listInstances not using endTime query Do not start server and Kafka consumer in init mode. Add Iris component ID(5018). Add OTLP Tracing support as a Zipkin trace input.  UI  Fix metric name browser_app_error_rate in Browser-Root dashboard. Fix display name of endpoint_cpm for endpoint list in General-Service dashboard. Implement customize menus and marketplace page. Fix minTraceDuration and maxTraceDuration types. Fix init minTime to Infinity. Bump dependencies to fix vulnerabilities. Add scss variables. Fix the title of instance list and notices in the continue profiling. Add a link to explain the expression metric, add units in the continue profiling widget. Calculate string width to set Tabs name width. [Breaking change] Removed \u0026lsquo;\u0026amp;\u0026rsquo; symbols from shell scripts to avoid web application server process running as a background process. Reset chart label. Fix service associates instances. Remove node-sass. Fix commit error on Windows. Apply MQE on MYSQL, POSTGRESQL, REDIS, ELASTICSEARCH and DYNAMODB layer UI-templates. Apply MQE on Virtual-Cache layer UI-templates Apply MQE on APISIX, AWS_EKS, AWS_GATEWAY and AWS_S3 layer UI templates. Apply MQE on RabbitMQ Dashboards. Apply MQE on Virtual-MQ layer UI-templates Apply MQE on Infra-Linux layer UI-templates Apply MQE on Infra-Windows layer UI-templates Apply MQE on Browser layer UI-templates. Implement MQE on topology widget. Fix getEndpoints keyword blank. Implement a breadcrumb component as navigation.  Documentation  Add Go agent into the server agent documentation. Add data unit description in the configuration of continuous profiling policy. Remove storage extension doc, as it is expired. Remove how to add menu doc, as SkyWalking supports marketplace and new backend-based setup. Separate contribution docs to a new menu structure. Add a doc to explain how to manage i18n. Add a doc to explain OTLP Trace support. Fix typo in dynamic-config-configmap.md. Fix out-dated docs about Kafka fetcher. Remove 3rd part fetchers from the docs, as they are not maintained anymore.  All issues and pull requests are here\n","title":"9.6.0","url":"/docs/main/latest/en/changes/changes-9.6.0/"},{"content":"9.6.0 Project  Bump up Guava to 32.0.1 to avoid the lib listed as vulnerable due to CVE-2020-8908. This API is never used. Maven artifact skywalking-log-recevier-plugin is renamed to skywalking-log-receiver-plugin. Bump up cli version 0.11 to 0.12. Bump up the version of ASF parent pom to v30. Make builds reproducible for automatic releases CI.  OAP Server  Add Neo4j component ID(112) language: Python. Add Istio ServiceEntry registry to resolve unknown IPs in ALS. Wrap deleteProperty API to the BanyanDBStorageClient. [Breaking change] Remove matchedCounter from HttpUriRecognitionService#feedRawData. Remove patterns from HttpUriRecognitionService#feedRawData and add max 10 candidates of raw URIs for each pattern. Add component ID for WebSphere. Fix AI Pipeline uri caching NullPointer and IllegalArgument Exceptions. Fix NPE in metrics query when the metric is not exist. Remove E2E tests for Istio \u0026lt; 1.15, ElasticSearch \u0026lt; 7.16.3, they might still work but are not supported as planed. Scroll all results in ElasticSearch storage and refactor scrolling logics, including Service, Instance, Endpoint, Process, etc. Improve Kubernetes coordinator to remove Terminating OAP Pods in cluster. Support SW_CORE_SYNC_PERIOD_HTTP_URI_RECOGNITION_PATTERN and SW_CORE_TRAINING_PERIOD_HTTP_URI_RECOGNITION_PATTERN to control the period of training and sync HTTP URI recognition patterns. And shorten the default period to 10s for sync and 60s for training. Fix ElasticSearch scroller bug. Add component ID for Aerospike(ID=149). Packages with name recevier are renamed to receiver. BanyanDBMetricsDAO handles storeIDTag in multiGet for BanyanDBModelExtension. Fix endpoint grouping-related logic and enhance the performance of PatternTree retrieval. Fix metric session cache saving after batch insert when using mysql-connector-java. Support dynamic UI menu query. Add comment for docker/.env to explain the usage. Fix wrong environment variable name SW_OTEL_RECEIVER_ENABLED_OTEL_RULES to right SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES. Fix instance query in JDBC implementation. Set the SW_QUERY_MAX_QUERY_COMPLEXITY default value to 3000(was 1000). Accept length=4000 parameter value of the event. It was 2000. Tolerate parameter value in illegal JSON format. Update BanyanDB Java Client to 0.4.0 Support aggregate Labeled Value Metrics in MQE. [Breaking change] Change the default label name in MQE from label to _. Bump up grpc version to 1.53.0. [Breaking change] Removed \u0026lsquo;\u0026amp;\u0026rsquo; symbols from shell scripts to avoid OAP server process running as a background process. Revert part of #10616 to fix the unexpected changes: if there is no data we should return an array with 0s, but in #10616, an empty array is returned. Cache all service entity in memory for query. Bump up jackson version to 2.15.2. Increase the default memory size to avoid OOM. Bump up graphql-java to 21.0. Add Echo component ID(5015) language: Golang. Fix index out of bounds exception in aggregate_labels MQE function. Support MongoDB Server/Cluster monitoring powered by OTEL. Do not print configurations values in logs to avoid sensitive info leaked. Move created the latest index before retrieval indexes by aliases to avoid the 404 exception. This just prevents some interference from manual operations. Add more Go VM metrics, as new skywalking-go agent provided since its 0.2 release. Add component ID for Lock (ID=5016). [Breaking change] Adjust the structure of hooks in the alarm-settings.yml. Support multiple configs for each hook types and specifying the hooks in the alarm rule. Bump up Armeria to 1.24.3. Fix BooleanMatch and BooleanNotEqualMatch doing Boolean comparison. Support LogQL HTTP query APIs. Add Mux Server component ID(5017) language: Golang. Remove ElasticSearch 6.3.2 from our client lib tests. Bump up ElasticSearch server 8.8.1 to 8.9.0 for latest e2e testing. 8.1.0, 7.16.3 and 7.17.10 are still tested. Add OpenSearch 2.8.0 to our client lib tests. Use listening mode for apollo implementation of dynamic configuration. Add view_as_seq function in MQE for listing metrics in the given prioritized sequence. Fix the wrong default value of k8sServiceNameRule if it\u0026rsquo;s not explicitly set. Improve PromQL to allow for multiple metric operations within a single query. Fix MQE Binary Operation between labeled metrics and other type of value result. Add component ID for Nacos (ID=150). Support Compare Operation in MQE. Fix the Kubernetes resource cache not refreshed. Fix wrong classpath that might cause OOM in startup. Enhance the serviceRelation in MAL by adding settings for the delimiter and component fields. [Breaking change] Support MQE in the Alerting. The Alarm Rules configuration(alarm-settings.yml), add expression field and remove metrics-name/count/threshold/op/only-as-condition fields and remove composite-rules configuration. Check results in ALS as per downstream/upstream instead of per log. Fix GraphQL query listInstances not using endTime query Do not start server and Kafka consumer in init mode. Add Iris component ID(5018). Add OTLP Tracing support as a Zipkin trace input.  UI  Fix metric name browser_app_error_rate in Browser-Root dashboard. Fix display name of endpoint_cpm for endpoint list in General-Service dashboard. Implement customize menus and marketplace page. Fix minTraceDuration and maxTraceDuration types. Fix init minTime to Infinity. Bump dependencies to fix vulnerabilities. Add scss variables. Fix the title of instance list and notices in the continue profiling. Add a link to explain the expression metric, add units in the continue profiling widget. Calculate string width to set Tabs name width. [Breaking change] Removed \u0026lsquo;\u0026amp;\u0026rsquo; symbols from shell scripts to avoid web application server process running as a background process. Reset chart label. Fix service associates instances. Remove node-sass. Fix commit error on Windows. Apply MQE on MYSQL, POSTGRESQL, REDIS, ELASTICSEARCH and DYNAMODB layer UI-templates. Apply MQE on Virtual-Cache layer UI-templates Apply MQE on APISIX, AWS_EKS, AWS_GATEWAY and AWS_S3 layer UI templates. Apply MQE on RabbitMQ Dashboards. Apply MQE on Virtual-MQ layer UI-templates Apply MQE on Infra-Linux layer UI-templates Apply MQE on Infra-Windows layer UI-templates Apply MQE on Browser layer UI-templates. Implement MQE on topology widget. Fix getEndpoints keyword blank. Implement a breadcrumb component as navigation.  Documentation  Add Go agent into the server agent documentation. Add data unit description in the configuration of continuous profiling policy. Remove storage extension doc, as it is expired. Remove how to add menu doc, as SkyWalking supports marketplace and new backend-based setup. Separate contribution docs to a new menu structure. Add a doc to explain how to manage i18n. Add a doc to explain OTLP Trace support. Fix typo in dynamic-config-configmap.md. Fix out-dated docs about Kafka fetcher. Remove 3rd part fetchers from the docs, as they are not maintained anymore.  All issues and pull requests are here\n","title":"9.6.0","url":"/docs/main/next/en/changes/changes-9.6.0/"},{"content":"9.6.0 Project  Bump up Guava to 32.0.1 to avoid the lib listed as vulnerable due to CVE-2020-8908. This API is never used. Maven artifact skywalking-log-recevier-plugin is renamed to skywalking-log-receiver-plugin. Bump up cli version 0.11 to 0.12. Bump up the version of ASF parent pom to v30. Make builds reproducible for automatic releases CI.  OAP Server  Add Neo4j component ID(112) language: Python. Add Istio ServiceEntry registry to resolve unknown IPs in ALS. Wrap deleteProperty API to the BanyanDBStorageClient. [Breaking change] Remove matchedCounter from HttpUriRecognitionService#feedRawData. Remove patterns from HttpUriRecognitionService#feedRawData and add max 10 candidates of raw URIs for each pattern. Add component ID for WebSphere. Fix AI Pipeline uri caching NullPointer and IllegalArgument Exceptions. Fix NPE in metrics query when the metric is not exist. Remove E2E tests for Istio \u0026lt; 1.15, ElasticSearch \u0026lt; 7.16.3, they might still work but are not supported as planed. Scroll all results in ElasticSearch storage and refactor scrolling logics, including Service, Instance, Endpoint, Process, etc. Improve Kubernetes coordinator to remove Terminating OAP Pods in cluster. Support SW_CORE_SYNC_PERIOD_HTTP_URI_RECOGNITION_PATTERN and SW_CORE_TRAINING_PERIOD_HTTP_URI_RECOGNITION_PATTERN to control the period of training and sync HTTP URI recognition patterns. And shorten the default period to 10s for sync and 60s for training. Fix ElasticSearch scroller bug. Add component ID for Aerospike(ID=149). Packages with name recevier are renamed to receiver. BanyanDBMetricsDAO handles storeIDTag in multiGet for BanyanDBModelExtension. Fix endpoint grouping-related logic and enhance the performance of PatternTree retrieval. Fix metric session cache saving after batch insert when using mysql-connector-java. Support dynamic UI menu query. Add comment for docker/.env to explain the usage. Fix wrong environment variable name SW_OTEL_RECEIVER_ENABLED_OTEL_RULES to right SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES. Fix instance query in JDBC implementation. Set the SW_QUERY_MAX_QUERY_COMPLEXITY default value to 3000(was 1000). Accept length=4000 parameter value of the event. It was 2000. Tolerate parameter value in illegal JSON format. Update BanyanDB Java Client to 0.4.0 Support aggregate Labeled Value Metrics in MQE. [Breaking change] Change the default label name in MQE from label to _. Bump up grpc version to 1.53.0. [Breaking change] Removed \u0026lsquo;\u0026amp;\u0026rsquo; symbols from shell scripts to avoid OAP server process running as a background process. Revert part of #10616 to fix the unexpected changes: if there is no data we should return an array with 0s, but in #10616, an empty array is returned. Cache all service entity in memory for query. Bump up jackson version to 2.15.2. Increase the default memory size to avoid OOM. Bump up graphql-java to 21.0. Add Echo component ID(5015) language: Golang. Fix index out of bounds exception in aggregate_labels MQE function. Support MongoDB Server/Cluster monitoring powered by OTEL. Do not print configurations values in logs to avoid sensitive info leaked. Move created the latest index before retrieval indexes by aliases to avoid the 404 exception. This just prevents some interference from manual operations. Add more Go VM metrics, as new skywalking-go agent provided since its 0.2 release. Add component ID for Lock (ID=5016). [Breaking change] Adjust the structure of hooks in the alarm-settings.yml. Support multiple configs for each hook types and specifying the hooks in the alarm rule. Bump up Armeria to 1.24.3. Fix BooleanMatch and BooleanNotEqualMatch doing Boolean comparison. Support LogQL HTTP query APIs. Add Mux Server component ID(5017) language: Golang. Remove ElasticSearch 6.3.2 from our client lib tests. Bump up ElasticSearch server 8.8.1 to 8.9.0 for latest e2e testing. 8.1.0, 7.16.3 and 7.17.10 are still tested. Add OpenSearch 2.8.0 to our client lib tests. Use listening mode for apollo implementation of dynamic configuration. Add view_as_seq function in MQE for listing metrics in the given prioritized sequence. Fix the wrong default value of k8sServiceNameRule if it\u0026rsquo;s not explicitly set. Improve PromQL to allow for multiple metric operations within a single query. Fix MQE Binary Operation between labeled metrics and other type of value result. Add component ID for Nacos (ID=150). Support Compare Operation in MQE. Fix the Kubernetes resource cache not refreshed. Fix wrong classpath that might cause OOM in startup. Enhance the serviceRelation in MAL by adding settings for the delimiter and component fields. [Breaking change] Support MQE in the Alerting. The Alarm Rules configuration(alarm-settings.yml), add expression field and remove metrics-name/count/threshold/op/only-as-condition fields and remove composite-rules configuration. Check results in ALS as per downstream/upstream instead of per log. Fix GraphQL query listInstances not using endTime query Do not start server and Kafka consumer in init mode. Add Iris component ID(5018). Add OTLP Tracing support as a Zipkin trace input.  UI  Fix metric name browser_app_error_rate in Browser-Root dashboard. Fix display name of endpoint_cpm for endpoint list in General-Service dashboard. Implement customize menus and marketplace page. Fix minTraceDuration and maxTraceDuration types. Fix init minTime to Infinity. Bump dependencies to fix vulnerabilities. Add scss variables. Fix the title of instance list and notices in the continue profiling. Add a link to explain the expression metric, add units in the continue profiling widget. Calculate string width to set Tabs name width. [Breaking change] Removed \u0026lsquo;\u0026amp;\u0026rsquo; symbols from shell scripts to avoid web application server process running as a background process. Reset chart label. Fix service associates instances. Remove node-sass. Fix commit error on Windows. Apply MQE on MYSQL, POSTGRESQL, REDIS, ELASTICSEARCH and DYNAMODB layer UI-templates. Apply MQE on Virtual-Cache layer UI-templates Apply MQE on APISIX, AWS_EKS, AWS_GATEWAY and AWS_S3 layer UI templates. Apply MQE on RabbitMQ Dashboards. Apply MQE on Virtual-MQ layer UI-templates Apply MQE on Infra-Linux layer UI-templates Apply MQE on Infra-Windows layer UI-templates Apply MQE on Browser layer UI-templates. Implement MQE on topology widget. Fix getEndpoints keyword blank. Implement a breadcrumb component as navigation.  Documentation  Add Go agent into the server agent documentation. Add data unit description in the configuration of continuous profiling policy. Remove storage extension doc, as it is expired. Remove how to add menu doc, as SkyWalking supports marketplace and new backend-based setup. Separate contribution docs to a new menu structure. Add a doc to explain how to manage i18n. Add a doc to explain OTLP Trace support. Fix typo in dynamic-config-configmap.md. Fix out-dated docs about Kafka fetcher. Remove 3rd part fetchers from the docs, as they are not maintained anymore.  All issues and pull requests are here\n","title":"9.6.0","url":"/docs/main/v9.6.0/en/changes/changes/"},{"content":"9.6.0 Project  Bump up Guava to 32.0.1 to avoid the lib listed as vulnerable due to CVE-2020-8908. This API is never used. Maven artifact skywalking-log-recevier-plugin is renamed to skywalking-log-receiver-plugin. Bump up cli version 0.11 to 0.12. Bump up the version of ASF parent pom to v30. Make builds reproducible for automatic releases CI.  OAP Server  Add Neo4j component ID(112) language: Python. Add Istio ServiceEntry registry to resolve unknown IPs in ALS. Wrap deleteProperty API to the BanyanDBStorageClient. [Breaking change] Remove matchedCounter from HttpUriRecognitionService#feedRawData. Remove patterns from HttpUriRecognitionService#feedRawData and add max 10 candidates of raw URIs for each pattern. Add component ID for WebSphere. Fix AI Pipeline uri caching NullPointer and IllegalArgument Exceptions. Fix NPE in metrics query when the metric is not exist. Remove E2E tests for Istio \u0026lt; 1.15, ElasticSearch \u0026lt; 7.16.3, they might still work but are not supported as planed. Scroll all results in ElasticSearch storage and refactor scrolling logics, including Service, Instance, Endpoint, Process, etc. Improve Kubernetes coordinator to remove Terminating OAP Pods in cluster. Support SW_CORE_SYNC_PERIOD_HTTP_URI_RECOGNITION_PATTERN and SW_CORE_TRAINING_PERIOD_HTTP_URI_RECOGNITION_PATTERN to control the period of training and sync HTTP URI recognition patterns. And shorten the default period to 10s for sync and 60s for training. Fix ElasticSearch scroller bug. Add component ID for Aerospike(ID=149). Packages with name recevier are renamed to receiver. BanyanDBMetricsDAO handles storeIDTag in multiGet for BanyanDBModelExtension. Fix endpoint grouping-related logic and enhance the performance of PatternTree retrieval. Fix metric session cache saving after batch insert when using mysql-connector-java. Support dynamic UI menu query. Add comment for docker/.env to explain the usage. Fix wrong environment variable name SW_OTEL_RECEIVER_ENABLED_OTEL_RULES to right SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES. Fix instance query in JDBC implementation. Set the SW_QUERY_MAX_QUERY_COMPLEXITY default value to 3000(was 1000). Accept length=4000 parameter value of the event. It was 2000. Tolerate parameter value in illegal JSON format. Update BanyanDB Java Client to 0.4.0 Support aggregate Labeled Value Metrics in MQE. [Breaking change] Change the default label name in MQE from label to _. Bump up grpc version to 1.53.0. [Breaking change] Removed \u0026lsquo;\u0026amp;\u0026rsquo; symbols from shell scripts to avoid OAP server process running as a background process. Revert part of #10616 to fix the unexpected changes: if there is no data we should return an array with 0s, but in #10616, an empty array is returned. Cache all service entity in memory for query. Bump up jackson version to 2.15.2. Increase the default memory size to avoid OOM. Bump up graphql-java to 21.0. Add Echo component ID(5015) language: Golang. Fix index out of bounds exception in aggregate_labels MQE function. Support MongoDB Server/Cluster monitoring powered by OTEL. Do not print configurations values in logs to avoid sensitive info leaked. Move created the latest index before retrieval indexes by aliases to avoid the 404 exception. This just prevents some interference from manual operations. Add more Go VM metrics, as new skywalking-go agent provided since its 0.2 release. Add component ID for Lock (ID=5016). [Breaking change] Adjust the structure of hooks in the alarm-settings.yml. Support multiple configs for each hook types and specifying the hooks in the alarm rule. Bump up Armeria to 1.24.3. Fix BooleanMatch and BooleanNotEqualMatch doing Boolean comparison. Support LogQL HTTP query APIs. Add Mux Server component ID(5017) language: Golang. Remove ElasticSearch 6.3.2 from our client lib tests. Bump up ElasticSearch server 8.8.1 to 8.9.0 for latest e2e testing. 8.1.0, 7.16.3 and 7.17.10 are still tested. Add OpenSearch 2.8.0 to our client lib tests. Use listening mode for apollo implementation of dynamic configuration. Add view_as_seq function in MQE for listing metrics in the given prioritized sequence. Fix the wrong default value of k8sServiceNameRule if it\u0026rsquo;s not explicitly set. Improve PromQL to allow for multiple metric operations within a single query. Fix MQE Binary Operation between labeled metrics and other type of value result. Add component ID for Nacos (ID=150). Support Compare Operation in MQE. Fix the Kubernetes resource cache not refreshed. Fix wrong classpath that might cause OOM in startup. Enhance the serviceRelation in MAL by adding settings for the delimiter and component fields. [Breaking change] Support MQE in the Alerting. The Alarm Rules configuration(alarm-settings.yml), add expression field and remove metrics-name/count/threshold/op/only-as-condition fields and remove composite-rules configuration. Check results in ALS as per downstream/upstream instead of per log. Fix GraphQL query listInstances not using endTime query Do not start server and Kafka consumer in init mode. Add Iris component ID(5018). Add OTLP Tracing support as a Zipkin trace input.  UI  Fix metric name browser_app_error_rate in Browser-Root dashboard. Fix display name of endpoint_cpm for endpoint list in General-Service dashboard. Implement customize menus and marketplace page. Fix minTraceDuration and maxTraceDuration types. Fix init minTime to Infinity. Bump dependencies to fix vulnerabilities. Add scss variables. Fix the title of instance list and notices in the continue profiling. Add a link to explain the expression metric, add units in the continue profiling widget. Calculate string width to set Tabs name width. [Breaking change] Removed \u0026lsquo;\u0026amp;\u0026rsquo; symbols from shell scripts to avoid web application server process running as a background process. Reset chart label. Fix service associates instances. Remove node-sass. Fix commit error on Windows. Apply MQE on MYSQL, POSTGRESQL, REDIS, ELASTICSEARCH and DYNAMODB layer UI-templates. Apply MQE on Virtual-Cache layer UI-templates Apply MQE on APISIX, AWS_EKS, AWS_GATEWAY and AWS_S3 layer UI templates. Apply MQE on RabbitMQ Dashboards. Apply MQE on Virtual-MQ layer UI-templates Apply MQE on Infra-Linux layer UI-templates Apply MQE on Infra-Windows layer UI-templates Apply MQE on Browser layer UI-templates. Implement MQE on topology widget. Fix getEndpoints keyword blank. Implement a breadcrumb component as navigation.  Documentation  Add Go agent into the server agent documentation. Add data unit description in the configuration of continuous profiling policy. Remove storage extension doc, as it is expired. Remove how to add menu doc, as SkyWalking supports marketplace and new backend-based setup. Separate contribution docs to a new menu structure. Add a doc to explain how to manage i18n. Add a doc to explain OTLP Trace support. Fix typo in dynamic-config-configmap.md. Fix out-dated docs about Kafka fetcher. Remove 3rd part fetchers from the docs, as they are not maintained anymore.  All issues and pull requests are here\n","title":"9.6.0","url":"/docs/main/v9.7.0/en/changes/changes-9.6.0/"},{"content":"9.7.0 Project  Bump Java agent to 9.1-dev in the e2e tests. Bump up netty to 4.1.100. Update Groovy 3 to 4.0.15. Support packaging the project in JDK21. Compiler source and target remain in JDK11.  OAP Server  ElasticSearchClient: Add deleteById API. Fix Custom alarm rules are overwritten by \u0026lsquo;resource/alarm-settings.yml\u0026rsquo; Support Kafka Monitoring. Support Pulsar server and BookKeeper server Monitoring. [Breaking Change] Elasticsearch storage merge all management data indices into one index management, including ui_template,ui_menu,continuous_profiling_policy. Add a release mechanism for alarm windows when it is expired in case of OOM. Fix Zipkin trace receiver response: make the HTTP status code from 200 to 202. Update BanyanDB Java Client to 0.5.0. Fix getInstances query in the BanyanDB Metadata DAO. BanyanDBStorageClient: Add keepAliveProperty API. Fix table exists check in the JDBC Storage Plugin. Enhance extensibility of HTTP Server library. Adjust AlarmRecord alarmMessage column length to 512. Fix EventHookCallback build event: build the layer from Service's Layer. Fix AlarmCore doAlarm: catch exception for each callback to avoid interruption. Optimize queryBasicTraces in TraceQueryEsDAO. Fix WebhookCallback send incorrect messages, add catch exception for each callback HTTP Post. Fix AlarmRule expression validation: add labeled metrics mock data for check. Support collect ZGC memory pool metrics. Add a component ID for Netty-http (ID=151). Add a component ID for Fiber (ID=5021). BanyanDBStorageClient: Add define(Property property, PropertyStore.Strategy strategy) API. Correct the file format and fix typos in the filenames for monitoring Kafka\u0026rsquo;s e2e tests. Support extract timestamp from patterned datetime string in LAL. Support output key parameters in the booting logs. Fix cannot query zipkin traces with annotationQuery parameter in the JDBC related storage. Fix limit doesn\u0026rsquo;t work for findEndpoint API in ES storage. Isolate MAL CounterWindow cache by metric name. Fix JDBC Log query order. Change the DataCarrier IF_POSSIBLE strategy to use ArrayBlockingQueue implementation. Change the policy of the queue(DataCarrier) in the L1 metric aggregate worker to IF_POSSIBLE mode. Add self-observability metric metrics_aggregator_abandon to count the number of abandon metrics. Support Nginx monitoring. Fix BanyanDB Metadata Query: make query single instance/process return full tags to avoid NPE. Repleace go2sky E2E to GO agent. Replace Metrics v2 protocol with MQE in UI templates and E2E Test. Fix incorrect apisix metrics otel rules. Support Scratch The OAP Config Dump. Support increase/rate function in the MQE query language. Group service endpoints into _abandoned when endpoints have high cardinality.  UI  Add new menu for kafka monitoring. Fix independent widget duration. Fix the display height of the link tree structure. Replace the name by shortName on service widget. Refactor: update pagination style. No visualization style change. Apply MQE on K8s layer UI-templates. Fix icons display in trace tree diagram. Fix: update tooltip style to support multiple metrics scrolling view in a metrics graph. Add a new widget to show jvm memory pool detail. Fix: avoid querying data with empty parameters. Add a title and a description for trace segments. Add Netty icon for Netty HTTP plugin. Add Pulsar menu i18n files. Refactor Logs view. Implement the Dark Theme. Change UI templates for Text widgets. Add Nginx menu i18n. Fix the height for trace widget. Polish list style. Fix Log associate with Trace. Enhance layout for broken Topology widget. Fix calls metric with call type for Topology widget. Fix changing metrics config for Topology widget. Fix routes for Tab widget. Remove OpenFunction(FAAS layer) relative UI templates and menu item. Fix: change colors to match dark theme for Network Profiling. Remove the description of OpenFunction in the UI i18n. Reduce component chunks to improve page loading resource time.  Documentation  Separate storage docs to different files, and add an estimated timeline for BanyanDB(end of 2023). Add topology configuration in UI-Grafana doc. Add missing metrics to the OpenTelemetry Metrics doc. Polish docs of Concepts and Designs. Fix incorrect notes of slowCacheReadThreshold. Update OAP setup and cluster coordinator docs to explain new booting parameters table in the logs, and how to setup cluster mode.  All issues and pull requests are here\n","title":"9.7.0","url":"/docs/main/latest/en/changes/changes/"},{"content":"9.7.0 Project  Bump Java agent to 9.1-dev in the e2e tests. Bump up netty to 4.1.100. Update Groovy 3 to 4.0.15. Support packaging the project in JDK21. Compiler source and target remain in JDK11.  OAP Server  ElasticSearchClient: Add deleteById API. Fix Custom alarm rules are overwritten by \u0026lsquo;resource/alarm-settings.yml\u0026rsquo; Support Kafka Monitoring. Support Pulsar server and BookKeeper server Monitoring. [Breaking Change] Elasticsearch storage merge all management data indices into one index management, including ui_template,ui_menu,continuous_profiling_policy. Add a release mechanism for alarm windows when it is expired in case of OOM. Fix Zipkin trace receiver response: make the HTTP status code from 200 to 202. Update BanyanDB Java Client to 0.5.0. Fix getInstances query in the BanyanDB Metadata DAO. BanyanDBStorageClient: Add keepAliveProperty API. Fix table exists check in the JDBC Storage Plugin. Enhance extensibility of HTTP Server library. Adjust AlarmRecord alarmMessage column length to 512. Fix EventHookCallback build event: build the layer from Service's Layer. Fix AlarmCore doAlarm: catch exception for each callback to avoid interruption. Optimize queryBasicTraces in TraceQueryEsDAO. Fix WebhookCallback send incorrect messages, add catch exception for each callback HTTP Post. Fix AlarmRule expression validation: add labeled metrics mock data for check. Support collect ZGC memory pool metrics. Add a component ID for Netty-http (ID=151). Add a component ID for Fiber (ID=5021). BanyanDBStorageClient: Add define(Property property, PropertyStore.Strategy strategy) API. Correct the file format and fix typos in the filenames for monitoring Kafka\u0026rsquo;s e2e tests. Support extract timestamp from patterned datetime string in LAL. Support output key parameters in the booting logs. Fix cannot query zipkin traces with annotationQuery parameter in the JDBC related storage. Fix limit doesn\u0026rsquo;t work for findEndpoint API in ES storage. Isolate MAL CounterWindow cache by metric name. Fix JDBC Log query order. Change the DataCarrier IF_POSSIBLE strategy to use ArrayBlockingQueue implementation. Change the policy of the queue(DataCarrier) in the L1 metric aggregate worker to IF_POSSIBLE mode. Add self-observability metric metrics_aggregator_abandon to count the number of abandon metrics. Support Nginx monitoring. Fix BanyanDB Metadata Query: make query single instance/process return full tags to avoid NPE. Repleace go2sky E2E to GO agent. Replace Metrics v2 protocol with MQE in UI templates and E2E Test. Fix incorrect apisix metrics otel rules. Support Scratch The OAP Config Dump. Support increase/rate function in the MQE query language. Group service endpoints into _abandoned when endpoints have high cardinality.  UI  Add new menu for kafka monitoring. Fix independent widget duration. Fix the display height of the link tree structure. Replace the name by shortName on service widget. Refactor: update pagination style. No visualization style change. Apply MQE on K8s layer UI-templates. Fix icons display in trace tree diagram. Fix: update tooltip style to support multiple metrics scrolling view in a metrics graph. Add a new widget to show jvm memory pool detail. Fix: avoid querying data with empty parameters. Add a title and a description for trace segments. Add Netty icon for Netty HTTP plugin. Add Pulsar menu i18n files. Refactor Logs view. Implement the Dark Theme. Change UI templates for Text widgets. Add Nginx menu i18n. Fix the height for trace widget. Polish list style. Fix Log associate with Trace. Enhance layout for broken Topology widget. Fix calls metric with call type for Topology widget. Fix changing metrics config for Topology widget. Fix routes for Tab widget. Remove OpenFunction(FAAS layer) relative UI templates and menu item. Fix: change colors to match dark theme for Network Profiling. Remove the description of OpenFunction in the UI i18n. Reduce component chunks to improve page loading resource time.  Documentation  Separate storage docs to different files, and add an estimated timeline for BanyanDB(end of 2023). Add topology configuration in UI-Grafana doc. Add missing metrics to the OpenTelemetry Metrics doc. Polish docs of Concepts and Designs. Fix incorrect notes of slowCacheReadThreshold. Update OAP setup and cluster coordinator docs to explain new booting parameters table in the logs, and how to setup cluster mode.  All issues and pull requests are here\n","title":"9.7.0","url":"/docs/main/next/en/changes/changes-9.7.0/"},{"content":"9.7.0 Project  Bump Java agent to 9.1-dev in the e2e tests. Bump up netty to 4.1.100. Update Groovy 3 to 4.0.15. Support packaging the project in JDK21. Compiler source and target remain in JDK11.  OAP Server  ElasticSearchClient: Add deleteById API. Fix Custom alarm rules are overwritten by \u0026lsquo;resource/alarm-settings.yml\u0026rsquo; Support Kafka Monitoring. Support Pulsar server and BookKeeper server Monitoring. [Breaking Change] Elasticsearch storage merge all management data indices into one index management, including ui_template,ui_menu,continuous_profiling_policy. Add a release mechanism for alarm windows when it is expired in case of OOM. Fix Zipkin trace receiver response: make the HTTP status code from 200 to 202. Update BanyanDB Java Client to 0.5.0. Fix getInstances query in the BanyanDB Metadata DAO. BanyanDBStorageClient: Add keepAliveProperty API. Fix table exists check in the JDBC Storage Plugin. Enhance extensibility of HTTP Server library. Adjust AlarmRecord alarmMessage column length to 512. Fix EventHookCallback build event: build the layer from Service's Layer. Fix AlarmCore doAlarm: catch exception for each callback to avoid interruption. Optimize queryBasicTraces in TraceQueryEsDAO. Fix WebhookCallback send incorrect messages, add catch exception for each callback HTTP Post. Fix AlarmRule expression validation: add labeled metrics mock data for check. Support collect ZGC memory pool metrics. Add a component ID for Netty-http (ID=151). Add a component ID for Fiber (ID=5021). BanyanDBStorageClient: Add define(Property property, PropertyStore.Strategy strategy) API. Correct the file format and fix typos in the filenames for monitoring Kafka\u0026rsquo;s e2e tests. Support extract timestamp from patterned datetime string in LAL. Support output key parameters in the booting logs. Fix cannot query zipkin traces with annotationQuery parameter in the JDBC related storage. Fix limit doesn\u0026rsquo;t work for findEndpoint API in ES storage. Isolate MAL CounterWindow cache by metric name. Fix JDBC Log query order. Change the DataCarrier IF_POSSIBLE strategy to use ArrayBlockingQueue implementation. Change the policy of the queue(DataCarrier) in the L1 metric aggregate worker to IF_POSSIBLE mode. Add self-observability metric metrics_aggregator_abandon to count the number of abandon metrics. Support Nginx monitoring. Fix BanyanDB Metadata Query: make query single instance/process return full tags to avoid NPE. Repleace go2sky E2E to GO agent. Replace Metrics v2 protocol with MQE in UI templates and E2E Test. Fix incorrect apisix metrics otel rules. Support Scratch The OAP Config Dump. Support increase/rate function in the MQE query language. Group service endpoints into _abandoned when endpoints have high cardinality.  UI  Add new menu for kafka monitoring. Fix independent widget duration. Fix the display height of the link tree structure. Replace the name by shortName on service widget. Refactor: update pagination style. No visualization style change. Apply MQE on K8s layer UI-templates. Fix icons display in trace tree diagram. Fix: update tooltip style to support multiple metrics scrolling view in a metrics graph. Add a new widget to show jvm memory pool detail. Fix: avoid querying data with empty parameters. Add a title and a description for trace segments. Add Netty icon for Netty HTTP plugin. Add Pulsar menu i18n files. Refactor Logs view. Implement the Dark Theme. Change UI templates for Text widgets. Add Nginx menu i18n. Fix the height for trace widget. Polish list style. Fix Log associate with Trace. Enhance layout for broken Topology widget. Fix calls metric with call type for Topology widget. Fix changing metrics config for Topology widget. Fix routes for Tab widget. Remove OpenFunction(FAAS layer) relative UI templates and menu item. Fix: change colors to match dark theme for Network Profiling. Remove the description of OpenFunction in the UI i18n. Reduce component chunks to improve page loading resource time.  Documentation  Separate storage docs to different files, and add an estimated timeline for BanyanDB(end of 2023). Add topology configuration in UI-Grafana doc. Add missing metrics to the OpenTelemetry Metrics doc. Polish docs of Concepts and Designs. Fix incorrect notes of slowCacheReadThreshold. Update OAP setup and cluster coordinator docs to explain new booting parameters table in the logs, and how to setup cluster mode.  All issues and pull requests are here\n","title":"9.7.0","url":"/docs/main/v9.7.0/en/changes/changes/"},{"content":"Academy Academy is an article/video list recommended by the committer team.\n  STAM Paper about the fundamental theory of SkyWalking tracing models.\n  Blog about Scaling SkyWalking server automatically in kubernetes.\n  Blog about Use Profiling to Fix the Blind Spot of Distributed Tracing.\n  Blog about observing Istio + Envoy service mesh with ALS solution.\n  Blog about observing Istio + Envoy service mesh with ALS Metadata-Exchange mechanism (in VMs and / or Kubernetes).\n  ","title":"Academy","url":"/docs/main/v9.0.0/en/academy/list/"},{"content":"Academy Academy is an article/video list recommended by the committer team.\n  STAM Paper about the fundamental theory of SkyWalking tracing models.\n  Blog about Scaling SkyWalking server automatically in kubernetes.\n  Blog about Use Profiling to Fix the Blind Spot of Distributed Tracing.\n  Blog about observing Istio + Envoy service mesh with ALS solution.\n  Blog about observing Istio + Envoy service mesh with ALS Metadata-Exchange mechanism (in VMs and / or Kubernetes).\n  ","title":"Academy","url":"/docs/main/v9.1.0/en/academy/list/"},{"content":"Academy Academy is an article/video list recommended by the committer team.\n  STAM Paper about the fundamental theory of SkyWalking tracing models.\n  Blog about Scaling SkyWalking server automatically in kubernetes.\n  Blog about Use Profiling to Fix the Blind Spot of Distributed Tracing.\n  Blog about observing Istio + Envoy service mesh with ALS solution.\n  Blog about observing Istio + Envoy service mesh with ALS Metadata-Exchange mechanism (in VMs and / or Kubernetes).\n  Blog about using eBPF Profiling to pinpoint service mesh critical performance Impact.\n  ","title":"Academy","url":"/docs/main/v9.2.0/en/academy/list/"},{"content":"ActiveMQ classic monitoring SkyWalking leverages jmx prometheus exporter for collecting metrics data from ActiveMQ classic. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  ActiveMQ classic has extensive support for JMX to allow you to monitor and control the behavior of the broker via the JMX MBeans. The jmx prometheus exporter collects metrics data from ActiveMQ classic, this exporter is intended to be run as a Java Agent, exposing a HTTP server and serving metrics of the local JVM. OpenTelemetry Collector fetches metrics from jmx prometheus exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Enable JMX in activemq.xml, the JMX remote port defaults to 1616, you can change it through ACTIVEMQ_SUNJMX_START. The example for ActiveMQ configuration, refer to here. Set up jmx prometheus exporter which runs as a Java Agent(recommended) of ActiveMQ classic. If you work with docker, you also can set up a single server for exporter, refer to here(note the configuration of includeObjectNames). Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  ActiveMQ classic Monitoring ActiveMQ classic monitoring provides multidimensional metrics monitoring of ActiveMQ Exporter as Layer: ActiveMQ Service in the OAP. In each cluster, the broker is represented as Instance and the destination is represented as Endpoint.\nActiveMQ Cluster Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     System Load Average Count meter_activemq_cluster_system_load_average The average system load, range:[0, 10000]. JMX Prometheus Exporter   Thread Count Count meter_activemq_cluster_thread_count Threads currently used by the JVM. JMX Prometheus Exporter   Init Heap Memory Usage Bytes meter_activemq_cluster_heap_memory_usage_init The initial amount of heap memory available. JMX Prometheus Exporter   Committed Heap Memory Usage Bytes meter_activemq_cluster_heap_memory_usage_committed The memory is guaranteed to be available for the JVM to use. JMX Prometheus Exporter   Used Heap Memory Usage Bytes meter_activemq_cluster_heap_memory_usage_used The amount of JVM heap memory currently in use. JMX Prometheus Exporter   Max Heap Memory Usage Bytes meter_activemq_cluster_heap_memory_usage_max The maximum possible size of the heap memory. JMX Prometheus Exporter   GC G1 Old Collection Count Count meter_activemq_cluster_gc_g1_old_collection_count The gc count of G1 Old Generation(JDK[9,17]). JMX Prometheus Exporter   GC G1 Young Collection Count Count meter_activemq_cluster_gc_g1_young_collection_count The gc count of G1 Young Generation(JDK[9,17]). JMX Prometheus Exporter   GC G1 Old Collection Time ms meter_activemq_cluster_gc_g1_old_collection_time The gc time spent in G1 Old Generation in milliseconds(JDK[9,17]). JMX Prometheus Exporter   GC G1 Young Collection Time ms meter_activemq_cluster_gc_g1_young_collection_time The gc time spent in G1 Young Generation in milliseconds(JDK[9,17]). JMX Prometheus Exporter   GC Parallel Old Collection Count Count meter_activemq_cluster_gc_parallel_old_collection_count The gc count of Parallel Old Generation(JDK[6,8]). JMX Prometheus Exporter   GC Parallel Young Collection Count Count meter_activemq_cluster_gc_parallel_young_collection_count The gc count of Parallel Young Generation(JDK[6,8]). JMX Prometheus Exporter   GC Parallel Old Collection Time ms meter_activemq_cluster_gc_parallel_old_collection_time The gc time spent in Parallel Old Generation in milliseconds(JDK[6,8]). JMX Prometheus Exporter   GC Parallel Young Collection Time ms meter_activemq_cluster_gc_parallel_young_collection_time The gc time spent in Parallel Young Generation in milliseconds(JDK[6,8]). JMX Prometheus Exporter   Enqueue Rate Count/s meter_activemq_cluster_enqueue_rate Number of messages that have been sent to the cluster per second(JDK[6,8]). JMX Prometheus Exporter   Dequeue Rate Count/s meter_activemq_cluster_dequeue_rate Number of messages that have been acknowledged or discarded on the cluster per second. JMX Prometheus Exporter   Dispatch Rate Count/s meter_activemq_cluster_dispatch_rate Number of messages that has been delivered to consumers per second. JMX Prometheus Exporter   Expired Rate Count/s meter_activemq_cluster_expired_rate Number of messages that have been expired per second. JMX Prometheus Exporter   Average Enqueue Time ms meter_activemq_cluster_average_enqueue_time The average time a message was held on this cluster. JMX Prometheus Exporter   Max Enqueue Time ms meter_activemq_cluster_max_enqueue_time The max time a message was held on this cluster. JMX Prometheus Exporter    ActiveMQ Broker Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Uptime sec meter_activemq_broker_uptime Uptime of the broker in day. JMX Prometheus Exporter   State  meter_activemq_broker_state If slave broker 1 else 0. JMX Prometheus Exporter   Current Connections Count meter_activemq_broker_current_connections The number of clients connected to the broker currently. JMX Prometheus Exporter   Current Producer Count Count meter_activemq_broker_current_producer_count The number of producers currently attached to the broker. JMX Prometheus Exporter   Current Consumer Count Count meter_activemq_broker_current_consumer_count The number of consumers consuming messages from the broker. JMX Prometheus Exporter   Producer Count Count meter_activemq_broker_producer_count Number of message producers active on destinations. JMX Prometheus Exporter   Consumer Count Count meter_activemq_broker_consumer_count Number of message consumers subscribed to destinations. JMX Prometheus Exporter   Enqueue Count Count meter_activemq_broker_enqueue_count The total number of messages sent to the broker. JMX Prometheus Exporter   Dequeue Count Count meter_activemq_broker_dequeue_count The total number of messages the broker has delivered to consumers. JMX Prometheus Exporter   Enqueue Rate Count/sec meter_activemq_broker_enqueue_rate The total number of messages sent to the broker per second. JMX Prometheus Exporter   Dequeue Rate Count/sec meter_activemq_broker_dequeue_rate The total number of messages the broker has delivered to consumers per second. JMX Prometheus Exporter   Memory Percent Usage % meter_activemq_broker_memory_percent_usage Percentage of configured memory used by the broker. JMX Prometheus Exporter   Memory Usage Bytes meter_activemq_broker_memory_percent_usage Memory used by undelivered messages in bytes. JMX Prometheus Exporter   Memory Limit Bytes meter_activemq_broker_memory_limit Memory limited used for holding undelivered messages before paging to temporary storage. JMX Prometheus Exporter   Store Percent Usage % meter_activemq_broker_store_percent_usage Percentage of available disk space used for persistent message storage. JMX Prometheus Exporter   Store Limit Bytes meter_activemq_broker_store_limit Disk limited used for persistent messages before producers are blocked. JMX Prometheus Exporter   Temp Percent Usage Bytes meter_activemq_broker_temp_percent_usage Percentage of available disk space used for non-persistent message storage. JMX Prometheus Exporter   Temp Limit Bytes meter_activemq_broker_temp_limit Disk limited used for non-persistent messages and temporary data before producers are blocked. JMX Prometheus Exporter   Average Message Size Bytes meter_activemq_broker_average_message_size Average message size on this broker. JMX Prometheus Exporter   Max Message Size Bytes meter_activemq_broker_max_message_size Max message size on this broker. JMX Prometheus Exporter   Queue Size Count meter_activemq_broker_queue_size Number of messages on this broker that have been dispatched but not acknowledged. JMX Prometheus Exporter    ActiveMQ Destination Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Producer Count Count meter_activemq_destination_producer_count Number of producers attached to this destination. JMX Prometheus Exporter   Consumer Count Count meter_activemq_destination_consumer_count Number of consumers subscribed to this destination. JMX Prometheus Exporter   Topic Consumer Count Count meter_activemq_destination_topic_consumer_count Number of consumers subscribed to the topics. JMX Prometheus Exporter   Queue Size Count meter_activemq_destination_queue_size The number of messages that have not been acknowledged by a consumer. JMX Prometheus Exporter   Memory Usage Bytes meter_activemq_destination_memory_usage Memory used by undelivered messages in bytes. JMX Prometheus Exporter   Memory Percent Usage % meter_activemq_destination_memory_percent_usage Percentage of configured memory used by the destination. JMX Prometheus Exporter   Enqueue Count Count meter_activemq_destination_enqueue_count The number of messages sent to the destination. JMX Prometheus Exporter   Dequeue Count Count meter_activemq_destination_dequeue_count The number of messages the destination has delivered to consumers. JMX Prometheus Exporter   Average Enqueue Time ms meter_activemq_destination_average_enqueue_time The average time a message was held on this destination. JMX Prometheus Exporter   Max Enqueue Time ms meter_activemq_destination_max_enqueue_time The max time a message was held on this destination. JMX Prometheus Exporter   Dispatch Count Count meter_activemq_destination_dispatch_count Number of messages that has been delivered to consumers. JMX Prometheus Exporter   Expired Count Count meter_activemq_destination_expired_count Number of messages that have been expired. JMX Prometheus Exporter   Inflight Count Count meter_activemq_destination_inflight_count Number of messages that have been dispatched to but not acknowledged by consumers. JMX Prometheus Exporter   Average Message Size Bytes meter_activemq_destination_average_message_size Average message size on this destination. JMX Prometheus Exporter   Max Message Size Bytes meter_activemq_destination_max_message_size Max message size on this destination. JMX Prometheus Exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in otel-rules/activemq/activemq-cluster.yaml, otel-rules/activemq/activemq-broker.yaml, otel-rules/activemq/activemq-destination.yaml. The ActiveMQ dashboard panel configurations are found in ui-initialized-templates/activemq.\n","title":"ActiveMQ classic monitoring","url":"/docs/main/next/en/setup/backend/backend-activemq-monitoring/"},{"content":"Advanced deployment OAP servers communicate with each other in a cluster environment to do distributed aggregation. In the cluster mode, all OAP nodes are running in Mixed mode by default.\nThe available roles for OAP are,\n Mixed(default) Receiver Aggregator  Sometimes users may wish to deploy cluster nodes with a clearly defined role. They could then use this function.\nMixed By default, the OAP is responsible for:\n Receiving agent traces or metrics. L1 aggregation Internal communication (sending/receiving) L2 aggregation Persistence Alarm  Receiver The OAP is responsible for:\n Receiving agent traces or metrics. L1 aggregation Internal communication (sending)  Aggregator The OAP is responsible for:\n Internal communication(receiving from Receiver and Mixed roles OAP) L2 aggregation Persistence Alarm   These roles are designed for complex deployment requirements on security and network policy.\nKubernetes If you are using our native Kubernetes coordinator, and you insist to install OAP nodes with a clearly defined role. There should be two deployments for each role, one for receiver OAPs and the other for aggregator OAPs to separate different system environment settings. Then, the labelSelector should be set for Aggregator role selection rules to choose the right OAP deployment based on your needs.\n","title":"Advanced deployment","url":"/docs/main/latest/en/setup/backend/advanced-deployment/"},{"content":"Advanced deployment OAP servers communicate with each other in a cluster environment to do distributed aggregation. In the cluster mode, all OAP nodes are running in Mixed mode by default.\nThe available roles for OAP are,\n Mixed(default) Receiver Aggregator  Sometimes users may wish to deploy cluster nodes with a clearly defined role. They could then use this function.\nMixed By default, the OAP is responsible for:\n Receiving agent traces or metrics. L1 aggregation Internal communication (sending/receiving) L2 aggregation Persistence Alarm  Receiver The OAP is responsible for:\n Receiving agent traces or metrics. L1 aggregation Internal communication (sending)  Aggregator The OAP is responsible for:\n Internal communication(receiving from Receiver and Mixed roles OAP) L2 aggregation Persistence Alarm   These roles are designed for complex deployment requirements on security and network policy.\nKubernetes If you are using our native Kubernetes coordinator, and you insist to install OAP nodes with a clearly defined role. There should be two deployments for each role, one for receiver OAPs and the other for aggregator OAPs to separate different system environment settings. Then, the labelSelector should be set for Aggregator role selection rules to choose the right OAP deployment based on your needs.\n","title":"Advanced deployment","url":"/docs/main/next/en/setup/backend/advanced-deployment/"},{"content":"Advanced deployment OAP servers communicate with each other in a cluster environment. In the cluster mode, you could run in different roles.\n Mixed(default) Receiver Aggregator  Sometimes users may wish to deploy cluster nodes with a clearly defined role. They could then use this function.\nMixed By default, the OAP is responsible for:\n Receiving agent traces or metrics. L1 aggregation Internal communication (sending/receiving) L2 aggregation Persistence Alarm  Receiver The OAP is responsible for:\n Receiving agent traces or metrics. L1 aggregation Internal communication (sending)  Aggregator The OAP is responsible for:\n Internal communication(receive) L2 aggregation Persistence Alarm   These roles are designed for complex deployment requirements on security and network policy.\nKubernetes If you are using our native Kubernetes coordinator, the labelSelector setting is used for Aggregator role selection rules. Choose the right OAP deployment based on your needs.\n","title":"Advanced deployment","url":"/docs/main/v9.0.0/en/setup/backend/advanced-deployment/"},{"content":"Advanced deployment OAP servers communicate with each other in a cluster environment. In the cluster mode, you could run in different roles.\n Mixed(default) Receiver Aggregator  Sometimes users may wish to deploy cluster nodes with a clearly defined role. They could then use this function.\nMixed By default, the OAP is responsible for:\n Receiving agent traces or metrics. L1 aggregation Internal communication (sending/receiving) L2 aggregation Persistence Alarm  Receiver The OAP is responsible for:\n Receiving agent traces or metrics. L1 aggregation Internal communication (sending)  Aggregator The OAP is responsible for:\n Internal communication(receive) L2 aggregation Persistence Alarm   These roles are designed for complex deployment requirements on security and network policy.\nKubernetes If you are using our native Kubernetes coordinator, the labelSelector setting is used for Aggregator role selection rules. Choose the right OAP deployment based on your needs.\n","title":"Advanced deployment","url":"/docs/main/v9.1.0/en/setup/backend/advanced-deployment/"},{"content":"Advanced deployment OAP servers communicate with each other in a cluster environment. In the cluster mode, you could run in different roles.\n Mixed(default) Receiver Aggregator  Sometimes users may wish to deploy cluster nodes with a clearly defined role. They could then use this function.\nMixed By default, the OAP is responsible for:\n Receiving agent traces or metrics. L1 aggregation Internal communication (sending/receiving) L2 aggregation Persistence Alarm  Receiver The OAP is responsible for:\n Receiving agent traces or metrics. L1 aggregation Internal communication (sending)  Aggregator The OAP is responsible for:\n Internal communication(receive) L2 aggregation Persistence Alarm   These roles are designed for complex deployment requirements on security and network policy.\nKubernetes If you are using our native Kubernetes coordinator, the labelSelector setting is used for Aggregator role selection rules. Choose the right OAP deployment based on your needs.\n","title":"Advanced deployment","url":"/docs/main/v9.2.0/en/setup/backend/advanced-deployment/"},{"content":"Advanced deployment OAP servers communicate with each other in a cluster environment. In the cluster mode, you could run in different roles.\n Mixed(default) Receiver Aggregator  Sometimes users may wish to deploy cluster nodes with a clearly defined role. They could then use this function.\nMixed By default, the OAP is responsible for:\n Receiving agent traces or metrics. L1 aggregation Internal communication (sending/receiving) L2 aggregation Persistence Alarm  Receiver The OAP is responsible for:\n Receiving agent traces or metrics. L1 aggregation Internal communication (sending)  Aggregator The OAP is responsible for:\n Internal communication(receive) L2 aggregation Persistence Alarm   These roles are designed for complex deployment requirements on security and network policy.\nKubernetes If you are using our native Kubernetes coordinator, the labelSelector setting is used for Aggregator role selection rules. Choose the right OAP deployment based on your needs.\n","title":"Advanced deployment","url":"/docs/main/v9.3.0/en/setup/backend/advanced-deployment/"},{"content":"Advanced deployment OAP servers communicate with each other in a cluster environment. In the cluster mode, you could run in different roles.\n Mixed(default) Receiver Aggregator  Sometimes users may wish to deploy cluster nodes with a clearly defined role. They could then use this function.\nMixed By default, the OAP is responsible for:\n Receiving agent traces or metrics. L1 aggregation Internal communication (sending/receiving) L2 aggregation Persistence Alarm  Receiver The OAP is responsible for:\n Receiving agent traces or metrics. L1 aggregation Internal communication (sending)  Aggregator The OAP is responsible for:\n Internal communication(receive) L2 aggregation Persistence Alarm   These roles are designed for complex deployment requirements on security and network policy.\nKubernetes If you are using our native Kubernetes coordinator, the labelSelector setting is used for Aggregator role selection rules. Choose the right OAP deployment based on your needs.\n","title":"Advanced deployment","url":"/docs/main/v9.4.0/en/setup/backend/advanced-deployment/"},{"content":"Advanced deployment OAP servers communicate with each other in a cluster environment to do distributed aggregation. In the cluster mode, all OAP nodes are running in Mixed mode by default.\nThe available roles for OAP are,\n Mixed(default) Receiver Aggregator  Sometimes users may wish to deploy cluster nodes with a clearly defined role. They could then use this function.\nMixed By default, the OAP is responsible for:\n Receiving agent traces or metrics. L1 aggregation Internal communication (sending/receiving) L2 aggregation Persistence Alarm  Receiver The OAP is responsible for:\n Receiving agent traces or metrics. L1 aggregation Internal communication (sending)  Aggregator The OAP is responsible for:\n Internal communication(receiving from Receiver and Mixed roles OAP) L2 aggregation Persistence Alarm   These roles are designed for complex deployment requirements on security and network policy.\nKubernetes If you are using our native Kubernetes coordinator, and you insist to install OAP nodes with a clearly defined role. There should be two deployments for each role, one for receiver OAPs and the other for aggregator OAPs to separate different system environment settings. Then, the labelSelector should be set for Aggregator role selection rules to choose the right OAP deployment based on your needs.\n","title":"Advanced deployment","url":"/docs/main/v9.5.0/en/setup/backend/advanced-deployment/"},{"content":"Advanced deployment OAP servers communicate with each other in a cluster environment to do distributed aggregation. In the cluster mode, all OAP nodes are running in Mixed mode by default.\nThe available roles for OAP are,\n Mixed(default) Receiver Aggregator  Sometimes users may wish to deploy cluster nodes with a clearly defined role. They could then use this function.\nMixed By default, the OAP is responsible for:\n Receiving agent traces or metrics. L1 aggregation Internal communication (sending/receiving) L2 aggregation Persistence Alarm  Receiver The OAP is responsible for:\n Receiving agent traces or metrics. L1 aggregation Internal communication (sending)  Aggregator The OAP is responsible for:\n Internal communication(receiving from Receiver and Mixed roles OAP) L2 aggregation Persistence Alarm   These roles are designed for complex deployment requirements on security and network policy.\nKubernetes If you are using our native Kubernetes coordinator, and you insist to install OAP nodes with a clearly defined role. There should be two deployments for each role, one for receiver OAPs and the other for aggregator OAPs to separate different system environment settings. Then, the labelSelector should be set for Aggregator role selection rules to choose the right OAP deployment based on your needs.\n","title":"Advanced deployment","url":"/docs/main/v9.6.0/en/setup/backend/advanced-deployment/"},{"content":"Advanced deployment OAP servers communicate with each other in a cluster environment to do distributed aggregation. In the cluster mode, all OAP nodes are running in Mixed mode by default.\nThe available roles for OAP are,\n Mixed(default) Receiver Aggregator  Sometimes users may wish to deploy cluster nodes with a clearly defined role. They could then use this function.\nMixed By default, the OAP is responsible for:\n Receiving agent traces or metrics. L1 aggregation Internal communication (sending/receiving) L2 aggregation Persistence Alarm  Receiver The OAP is responsible for:\n Receiving agent traces or metrics. L1 aggregation Internal communication (sending)  Aggregator The OAP is responsible for:\n Internal communication(receiving from Receiver and Mixed roles OAP) L2 aggregation Persistence Alarm   These roles are designed for complex deployment requirements on security and network policy.\nKubernetes If you are using our native Kubernetes coordinator, and you insist to install OAP nodes with a clearly defined role. There should be two deployments for each role, one for receiver OAPs and the other for aggregator OAPs to separate different system environment settings. Then, the labelSelector should be set for Aggregator role selection rules to choose the right OAP deployment based on your needs.\n","title":"Advanced deployment","url":"/docs/main/v9.7.0/en/setup/backend/advanced-deployment/"},{"content":"Advanced Features  Set the settings through system properties for config file override. Read setting override. Use gRPC TLS to link backend. See open TLS Set client token if backend open the token authentication. Application Toolkit, are a collection of libraries, provided by SkyWalking APM. Using them, you have a bridge between your application and SkyWalking APM agent.  If you want your codes to interact with SkyWalking agent, including getting trace id, setting tags, propagating custom data etc.. Try SkyWalking manual APIs. If you require customized metrics, try SkyWalking Meter System Toolkit. If you want to continue traces across thread manually, use across thread solution APIs. If you want to forward Micrometer metrics / observations, use SkyWalking Micrometer Register. If you want to use OpenTracing Java APIs, try SkyWalking OpenTracing compatible tracer. More details you could find at http://opentracing.io If you want to tolerate some exceptions, read tolerate custom exception doc. If you want to print trace context(e.g. traceId) in your logs, or collect logs, choose the log frameworks, log4j, log4j2, logback.   If you want to specify the path of your agent.config file. Read set config file through system properties  ","title":"Advanced Features","url":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/advanced-features/"},{"content":"Advanced Features  Set the settings through system properties for config file override. Read setting override. Use gRPC TLS to link backend. See open TLS Set client token if backend open the token authentication. Application Toolkit, are a collection of libraries, provided by SkyWalking APM. Using them, you have a bridge between your application and SkyWalking APM agent.  If you want your codes to interact with SkyWalking agent, including getting trace id, setting tags, propagating custom data etc.. Try SkyWalking manual APIs. If you require customized metrics, try SkyWalking Meter System Toolkit. If you want to continue traces across thread manually, use across thread solution APIs. If you want to forward Micrometer metrics / observations, use SkyWalking Micrometer Register. If you want to use OpenTracing Java APIs, try SkyWalking OpenTracing compatible tracer. More details you could find at http://opentracing.io If you want to tolerate some exceptions, read tolerate custom exception doc. If you want to print trace context(e.g. traceId) in your logs, or collect logs, choose the log frameworks, log4j, log4j2, logback.   If you want to specify the path of your agent.config file. Read set config file through system properties  ","title":"Advanced Features","url":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/advanced-features/"},{"content":"Advanced Features  Set the settings through system properties for config file override. Read setting override. Use gRPC TLS to link backend. See open TLS Set client token if backend open the token authentication. Application Toolkit, are a collection of libraries, provided by SkyWalking APM. Using them, you have a bridge between your application and SkyWalking APM agent.  If you want your codes to interact with SkyWalking agent, including getting trace id, setting tags, propagating custom data etc.. Try SkyWalking manual APIs. If you require customized metrics, try SkyWalking Meter System Toolkit. If you want to continue traces across thread manually, use across thread solution APIs. If you want to forward Micrometer metrics / observations, use SkyWalking Micrometer Register. If you want to use OpenTracing Java APIs, try SkyWalking OpenTracing compatible tracer. More details you could find at http://opentracing.io If you want to tolerate some exceptions, read tolerate custom exception doc. If you want to print trace context(e.g. traceId) in your logs, or collect logs, choose the log frameworks, log4j, log4j2, logback.   If you want to specify the path of your agent.config file. Read set config file through system properties  ","title":"Advanced Features","url":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/advanced-features/"},{"content":"Advanced Features  Set the settings through system properties for config file override. Read setting override. Use gRPC TLS to link backend. See open TLS Set client token if backend open the token authentication. Application Toolkit, are a collection of libraries, provided by SkyWalking APM. Using them, you have a bridge between your application and SkyWalking APM agent.  If you want your codes to interact with SkyWalking agent, including getting trace id, setting tags, propagating custom data etc.. Try SkyWalking manual APIs. If you require customized metrics, try SkyWalking Meter System Toolkit. If you want to continue traces across thread manually, use across thread solution APIs. If you want to forward Micrometer metrics / observations, use SkyWalking Micrometer Register. If you want to use OpenTracing Java APIs, try SkyWalking OpenTracing compatible tracer. More details you could find at http://opentracing.io If you want to tolerate some exceptions, read tolerate custom exception doc. If you want to print trace context(e.g. traceId) in your logs, or collect logs, choose the log frameworks, log4j, log4j2, logback.   If you want to specify the path of your agent.config file. Read set config file through system properties  ","title":"Advanced Features","url":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/advanced-features/"},{"content":"Advanced Features  Set the settings through system properties for config file override. Read setting override. Use gRPC TLS to link backend. See open TLS Set client token if backend open the token authentication. Application Toolkit, are a collection of libraries, provided by SkyWalking APM. Using them, you have a bridge between your application and SkyWalking APM agent.  If you want your codes to interact with SkyWalking agent, including getting trace id, setting tags, propagating custom data etc.. Try SkyWalking manual APIs. If you require customized metrics, try SkyWalking Meter System Toolkit. If you want to continue traces across thread manually, use across thread solution APIs. If you want to forward Micrometer metrics / observations, use SkyWalking Micrometer Register. If you want to use OpenTracing Java APIs, try SkyWalking OpenTracing compatible tracer. More details you could find at http://opentracing.io If you want to tolerate some exceptions, read tolerate custom exception doc. If you want to print trace context(e.g. traceId) in your logs, or collect logs, choose the log frameworks, log4j, log4j2, logback.   If you want to specify the path of your agent.config file. Read set config file through system properties  ","title":"Advanced Features","url":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/advanced-features/"},{"content":"Advanced Reporters The advanced report provides an alternative way to submit the agent collected data to the backend. All of them are in the optional-reporter-plugins folder, move the one you needed into the reporter-plugins folder for the activation. Notice, don\u0026rsquo;t try to activate multiple reporters, that could cause unexpected fatal errors.\nKafka Reporter The Kafka reporter plugin support report traces, JVM metrics, Instance Properties, and profiled snapshots to Kafka cluster, which is disabled in default. Move the jar of the plugin, kafka-reporter-plugin-x.y.z.jar, from agent/optional-reporter-plugins to agent/plugins for activating.\nIf you configure to use compression.type such as lz4, zstd, snappy, etc., you also need to move the jar of the plugin, lz4-java-x.y.z.jar or zstd-jni-x.y.z.jar or snappy-java.x.y.z.jar, from agent/optional-reporter-plugins to agent/plugins.\nNotice, currently, the agent still needs to configure GRPC receiver for delivering the task of profiling. In other words, the following configure cannot be omitted.\n# Backend service addresses. collector.backend_service=${SW_AGENT_COLLECTOR_BACKEND_SERVICES:127.0.0.1:11800} # Kafka producer configuration plugin.kafka.bootstrap_servers=${SW_KAFKA_BOOTSTRAP_SERVERS:localhost:9092} plugin.kafka.get_topic_timeout=${SW_GET_TOPIC_TIMEOUT:10} Before you activated the Kafka reporter, you have to make sure that Kafka fetcher of OAP server has been opened in service.\nAdvanced Kafka Producer Configurations Kafka reporter plugin support to customize all configurations of listed in here. For example:\nplugin.kafka.producer_config[delivery.timeout.ms]=12000 Since SkyWalking 8.8.0, support to configure advanced Producer configurations in JSON format, like this:\nplugin.kafka.producer_config_json={\u0026quot;delivery.timeout.ms\u0026quot;: 12000, \u0026quot;compression.type\u0026quot;: \u0026quot;snappy\u0026quot;} Currently, there are 2 ways to configure advanced configurations below. Notice that, the new way, configured in JSON format, will be overridden by plugin.kafka.producer_config[key]=value when they have the duplication keys.\nSince 8.16.0, users could implement their decoder for kafka configurations rather than using plain configurations(such as password) of Kafka producer, Including plugin.kafka.producer_config_json,plugin.kafka.producer_config or environment variable SW_PLUGIN_KAFKA_PRODUCER_CONFIG_JSON.\nBy doing that, add the kafka-config-extension dependency to your decoder project and implement decode interface.\n Add the KafkaConfigExtension dependency to your project.  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;kafka-config-extension\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;scope\u0026gt;provided\u0026lt;/scope\u0026gt; \u0026lt;/dependency\u0026gt;  Implement your custom decode method.Like this:  package org.apache.skywalking.apm.agent.sample; import org.apache.skywalking.apm.agent.core.kafka.KafkaConfigExtension; import java.util.Map; /** * Custom decode class */ public class DecodeUtil implements KafkaConfigExtension { /** * Custom decode method. * @param config the value of `plugin.kafka.producer_config` or `plugin.kafka.producer_config_json` in `agent.config`. * @return the decoded configuration if you implement your custom decode logic. */ public Map\u0026lt;String, String\u0026gt; decode(Map\u0026lt;String, String\u0026gt; config) { /** * implement your custom decode logic * */ return config; } } Then, package your decoder project as a jar and move to agent/plugins.\nNotice, the jar package should contain all the dependencies required for your custom decode code.\nThe last step is to activate the decoder class in agent.config like this:\nplugin.kafka.decrypt_class=\u0026quot;org.apache.skywalking.apm.agent.sample.DecodeUtil\u0026quot; or configure by environment variable\nSW_KAFKA_DECRYPT_CLASS=\u0026quot;org.apache.skywalking.apm.agent.sample.DecodeUtil\u0026quot; 3rd party reporters There are other reporter implementations from out of the Apache Software Foundation.\nPulsar Reporter Go to Pulsar-reporter-plugin for more details.\nRocketMQ Reporter Go to RocketMQ-reporter-plugin for more details.\n","title":"Advanced Reporters","url":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/advanced-reporters/"},{"content":"Advanced Reporters The advanced report provides an alternative way to submit the agent collected data to the backend. All of them are in the optional-reporter-plugins folder, move the one you needed into the plugins folder for the activation. Notice, don\u0026rsquo;t try to activate multiple reporters, that could cause unexpected fatal errors.\nKafka Reporter The Kafka reporter plugin support report traces, JVM metrics, Instance Properties, and profiled snapshots to Kafka cluster, which is disabled in default. Move the jar of the plugin, kafka-reporter-plugin-x.y.z.jar, from agent/optional-reporter-plugins to agent/plugins for activating.\nIf you configure to use compression.type such as lz4, zstd, snappy, etc., you also need to move the jar of the plugin, lz4-java-x.y.z.jar or zstd-jni-x.y.z.jar or snappy-java.x.y.z.jar, from agent/optional-reporter-plugins to agent/plugins.\nNotice, currently, the agent still needs to configure GRPC receiver for delivering the task of profiling. In other words, the following configure cannot be omitted.\n# Backend service addresses. collector.backend_service=${SW_AGENT_COLLECTOR_BACKEND_SERVICES:127.0.0.1:11800} # Kafka producer configuration plugin.kafka.bootstrap_servers=${SW_KAFKA_BOOTSTRAP_SERVERS:localhost:9092} plugin.kafka.get_topic_timeout=${SW_GET_TOPIC_TIMEOUT:10} Before you activated the Kafka reporter, you have to make sure that Kafka fetcher of OAP server has been opened in service.\nAdvanced Kafka Producer Configurations Kafka reporter plugin support to customize all configurations of listed in here. For example:\nplugin.kafka.producer_config[delivery.timeout.ms]=12000 Since SkyWalking 8.8.0, support to configure advanced Producer configurations in JSON format, like this:\nplugin.kafka.producer_config_json={\u0026quot;delivery.timeout.ms\u0026quot;: 12000, \u0026quot;compression.type\u0026quot;: \u0026quot;snappy\u0026quot;} Currently, there are 2 ways to configure advanced configurations below. Notice that, the new way, configured in JSON format, will be overridden by plugin.kafka.producer_config[key]=value when they have the duplication keys.\nSince 8.16.0, users could implement their decoder for kafka configurations rather than using plain configurations(such as password) of Kafka producer, Including plugin.kafka.producer_config_json,plugin.kafka.producer_config or environment variable SW_PLUGIN_KAFKA_PRODUCER_CONFIG_JSON.\nBy doing that, add the kafka-config-extension dependency to your decoder project and implement decode interface.\n Add the KafkaConfigExtension dependency to your project.  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;kafka-config-extension\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;scope\u0026gt;provided\u0026lt;/scope\u0026gt; \u0026lt;/dependency\u0026gt;  Implement your custom decode method.Like this:  package org.apache.skywalking.apm.agent.sample; import org.apache.skywalking.apm.agent.core.kafka.KafkaConfigExtension; import java.util.Map; /** * Custom decode class */ public class DecodeUtil implements KafkaConfigExtension { /** * Custom decode method. * @param config the value of `plugin.kafka.producer_config` or `plugin.kafka.producer_config_json` in `agent.config`. * @return the decoded configuration if you implement your custom decode logic. */ public Map\u0026lt;String, String\u0026gt; decode(Map\u0026lt;String, String\u0026gt; config) { /** * implement your custom decode logic * */ return config; } } Then, package your decoder project as a jar and move to agent/plugins.\nNotice, the jar package should contain all the dependencies required for your custom decode code.\nThe last step is to activate the decoder class in agent.config like this:\nplugin.kafka.decrypt_class=\u0026quot;org.apache.skywalking.apm.agent.sample.DecodeUtil\u0026quot; or configure by environment variable\nSW_KAFKA_DECRYPT_CLASS=\u0026quot;org.apache.skywalking.apm.agent.sample.DecodeUtil\u0026quot; 3rd party reporters There are other reporter implementations from out of the Apache Software Foundation.\nPulsar Reporter Go to Pulsar-reporter-plugin for more details.\nRocketMQ Reporter Go to RocketMQ-reporter-plugin for more details.\n","title":"Advanced Reporters","url":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/advanced-reporters/"},{"content":"Advanced Reporters The advanced report provides an alternative way to submit the agent collected data to the backend. All of them are in the optional-reporter-plugins folder, move the one you needed into the reporter-plugins folder for the activation. Notice, don\u0026rsquo;t try to activate multiple reporters, that could cause unexpected fatal errors.\nKafka Reporter The Kafka reporter plugin support report traces, JVM metrics, Instance Properties, and profiled snapshots to Kafka cluster, which is disabled in default. Move the jar of the plugin, kafka-reporter-plugin-x.y.z.jar, from agent/optional-reporter-plugins to agent/plugins for activating.\nIf you configure to use compression.type such as lz4, zstd, snappy, etc., you also need to move the jar of the plugin, lz4-java-x.y.z.jar or zstd-jni-x.y.z.jar or snappy-java.x.y.z.jar, from agent/optional-reporter-plugins to agent/plugins.\nNotice, currently, the agent still needs to configure GRPC receiver for delivering the task of profiling. In other words, the following configure cannot be omitted.\n# Backend service addresses. collector.backend_service=${SW_AGENT_COLLECTOR_BACKEND_SERVICES:127.0.0.1:11800} # Kafka producer configuration plugin.kafka.bootstrap_servers=${SW_KAFKA_BOOTSTRAP_SERVERS:localhost:9092} plugin.kafka.get_topic_timeout=${SW_GET_TOPIC_TIMEOUT:10} Before you activated the Kafka reporter, you have to make sure that Kafka fetcher of OAP server has been opened in service.\nAdvanced Kafka Producer Configurations Kafka reporter plugin support to customize all configurations of listed in here. For example:\nplugin.kafka.producer_config[delivery.timeout.ms]=12000 Since SkyWalking 8.8.0, support to configure advanced Producer configurations in JSON format, like this:\nplugin.kafka.producer_config_json={\u0026quot;delivery.timeout.ms\u0026quot;: 12000, \u0026quot;compression.type\u0026quot;: \u0026quot;snappy\u0026quot;} Currently, there are 2 ways to configure advanced configurations below. Notice that, the new way, configured in JSON format, will be overridden by plugin.kafka.producer_config[key]=value when they have the duplication keys.\nSince 8.16.0, users could implement their decoder for kafka configurations rather than using plain configurations(such as password) of Kafka producer, Including plugin.kafka.producer_config_json,plugin.kafka.producer_config or environment variable SW_PLUGIN_KAFKA_PRODUCER_CONFIG_JSON.\nBy doing that, add the kafka-config-extension dependency to your decoder project and implement decode interface.\n Add the KafkaConfigExtension dependency to your project.  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;kafka-config-extension\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;scope\u0026gt;provided\u0026lt;/scope\u0026gt; \u0026lt;/dependency\u0026gt;  Implement your custom decode method.Like this:  package org.apache.skywalking.apm.agent.sample; import org.apache.skywalking.apm.agent.core.kafka.KafkaConfigExtension; import java.util.Map; /** * Custom decode class */ public class DecodeUtil implements KafkaConfigExtension { /** * Custom decode method. * @param config the value of `plugin.kafka.producer_config` or `plugin.kafka.producer_config_json` in `agent.config`. * @return the decoded configuration if you implement your custom decode logic. */ public Map\u0026lt;String, String\u0026gt; decode(Map\u0026lt;String, String\u0026gt; config) { /** * implement your custom decode logic * */ return config; } } Then, package your decoder project as a jar and move to agent/plugins.\nNotice, the jar package should contain all the dependencies required for your custom decode code.\nThe last step is to activate the decoder class in agent.config like this:\nplugin.kafka.decrypt_class=\u0026quot;org.apache.skywalking.apm.agent.sample.DecodeUtil\u0026quot; or configure by environment variable\nSW_KAFKA_DECRYPT_CLASS=\u0026quot;org.apache.skywalking.apm.agent.sample.DecodeUtil\u0026quot; 3rd party reporters There are other reporter implementations from out of the Apache Software Foundation.\nPulsar Reporter Go to Pulsar-reporter-plugin for more details.\nRocketMQ Reporter Go to RocketMQ-reporter-plugin for more details.\n","title":"Advanced Reporters","url":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/advanced-reporters/"},{"content":"Advanced Reporters The advanced report provides an alternative way to submit the agent collected data to the backend. All of them are in the optional-reporter-plugins folder, move the one you needed into the reporter-plugins folder for the activation. Notice, don\u0026rsquo;t try to activate multiple reporters, that could cause unexpected fatal errors.\nKafka Reporter The Kafka reporter plugin support report traces, JVM metrics, Instance Properties, and profiled snapshots to Kafka cluster, which is disabled in default. Move the jar of the plugin, kafka-reporter-plugin-x.y.z.jar, from agent/optional-reporter-plugins to agent/plugins for activating.\nIf you configure to use compression.type such as lz4, zstd, snappy, etc., you also need to move the jar of the plugin, lz4-java-x.y.z.jar or zstd-jni-x.y.z.jar or snappy-java.x.y.z.jar, from agent/optional-reporter-plugins to agent/plugins.\nNotice, currently, the agent still needs to configure GRPC receiver for delivering the task of profiling. In other words, the following configure cannot be omitted.\n# Backend service addresses. collector.backend_service=${SW_AGENT_COLLECTOR_BACKEND_SERVICES:127.0.0.1:11800} # Kafka producer configuration plugin.kafka.bootstrap_servers=${SW_KAFKA_BOOTSTRAP_SERVERS:localhost:9092} plugin.kafka.get_topic_timeout=${SW_GET_TOPIC_TIMEOUT:10} Before you activated the Kafka reporter, you have to make sure that Kafka fetcher of OAP server has been opened in service.\nAdvanced Kafka Producer Configurations Kafka reporter plugin support to customize all configurations of listed in here. For example:\nplugin.kafka.producer_config[delivery.timeout.ms]=12000 Since SkyWalking 8.8.0, support to configure advanced Producer configurations in JSON format, like this:\nplugin.kafka.producer_config_json={\u0026quot;delivery.timeout.ms\u0026quot;: 12000, \u0026quot;compression.type\u0026quot;: \u0026quot;snappy\u0026quot;} Currently, there are 2 ways to configure advanced configurations below. Notice that, the new way, configured in JSON format, will be overridden by plugin.kafka.producer_config[key]=value when they have the duplication keys.\nSince 8.16.0, users could implement their decoder for kafka configurations rather than using plain configurations(such as password) of Kafka producer, Including plugin.kafka.producer_config_json,plugin.kafka.producer_config or environment variable SW_PLUGIN_KAFKA_PRODUCER_CONFIG_JSON.\nBy doing that, add the kafka-config-extension dependency to your decoder project and implement decode interface.\n Add the KafkaConfigExtension dependency to your project.  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;kafka-config-extension\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;scope\u0026gt;provided\u0026lt;/scope\u0026gt; \u0026lt;/dependency\u0026gt;  Implement your custom decode method.Like this:  package org.apache.skywalking.apm.agent.sample; import org.apache.skywalking.apm.agent.core.kafka.KafkaConfigExtension; import java.util.Map; /** * Custom decode class */ public class DecodeUtil implements KafkaConfigExtension { /** * Custom decode method. * @param config the value of `plugin.kafka.producer_config` or `plugin.kafka.producer_config_json` in `agent.config`. * @return the decoded configuration if you implement your custom decode logic. */ public Map\u0026lt;String, String\u0026gt; decode(Map\u0026lt;String, String\u0026gt; config) { /** * implement your custom decode logic * */ return config; } } Then, package your decoder project as a jar and move to agent/plugins.\nNotice, the jar package should contain all the dependencies required for your custom decode code.\nThe last step is to activate the decoder class in agent.config like this:\nplugin.kafka.decrypt_class=\u0026quot;org.apache.skywalking.apm.agent.sample.DecodeUtil\u0026quot; or configure by environment variable\nSW_KAFKA_DECRYPT_CLASS=\u0026quot;org.apache.skywalking.apm.agent.sample.DecodeUtil\u0026quot; 3rd party reporters There are other reporter implementations from out of the Apache Software Foundation.\nPulsar Reporter Go to Pulsar-reporter-plugin for more details.\nRocketMQ Reporter Go to RocketMQ-reporter-plugin for more details.\n","title":"Advanced Reporters","url":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/advanced-reporters/"},{"content":"Advanced Reporters The advanced report provides an alternative way to submit the agent collected data to the backend. All of them are in the optional-reporter-plugins folder, move the one you needed into the reporter-plugins folder for the activation. Notice, don\u0026rsquo;t try to activate multiple reporters, that could cause unexpected fatal errors.\nKafka Reporter The Kafka reporter plugin support report traces, JVM metrics, Instance Properties, and profiled snapshots to Kafka cluster, which is disabled in default. Move the jar of the plugin, kafka-reporter-plugin-x.y.z.jar, from agent/optional-reporter-plugins to agent/plugins for activating.\nIf you configure to use compression.type such as lz4, zstd, snappy, etc., you also need to move the jar of the plugin, lz4-java-x.y.z.jar or zstd-jni-x.y.z.jar or snappy-java.x.y.z.jar, from agent/optional-reporter-plugins to agent/plugins.\nNotice, currently, the agent still needs to configure GRPC receiver for delivering the task of profiling. In other words, the following configure cannot be omitted.\n# Backend service addresses. collector.backend_service=${SW_AGENT_COLLECTOR_BACKEND_SERVICES:127.0.0.1:11800} # Kafka producer configuration plugin.kafka.bootstrap_servers=${SW_KAFKA_BOOTSTRAP_SERVERS:localhost:9092} plugin.kafka.get_topic_timeout=${SW_GET_TOPIC_TIMEOUT:10} Before you activated the Kafka reporter, you have to make sure that Kafka fetcher of OAP server has been opened in service.\nAdvanced Kafka Producer Configurations Kafka reporter plugin support to customize all configurations of listed in here. For example:\nplugin.kafka.producer_config[delivery.timeout.ms]=12000 Since SkyWalking 8.8.0, support to configure advanced Producer configurations in JSON format, like this:\nplugin.kafka.producer_config_json={\u0026quot;delivery.timeout.ms\u0026quot;: 12000, \u0026quot;compression.type\u0026quot;: \u0026quot;snappy\u0026quot;} Currently, there are 2 ways to configure advanced configurations below. Notice that, the new way, configured in JSON format, will be overridden by plugin.kafka.producer_config[key]=value when they have the duplication keys.\nSince 8.16.0, users could implement their decoder for kafka configurations rather than using plain configurations(such as password) of Kafka producer, Including plugin.kafka.producer_config_json,plugin.kafka.producer_config or environment variable SW_PLUGIN_KAFKA_PRODUCER_CONFIG_JSON.\nBy doing that, add the kafka-config-extension dependency to your decoder project and implement decode interface.\n Add the KafkaConfigExtension dependency to your project.  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;kafka-config-extension\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;scope\u0026gt;provided\u0026lt;/scope\u0026gt; \u0026lt;/dependency\u0026gt;  Implement your custom decode method.Like this:  package org.apache.skywalking.apm.agent.sample; import org.apache.skywalking.apm.agent.core.kafka.KafkaConfigExtension; import java.util.Map; /** * Custom decode class */ public class DecodeUtil implements KafkaConfigExtension { /** * Custom decode method. * @param config the value of `plugin.kafka.producer_config` or `plugin.kafka.producer_config_json` in `agent.config`. * @return the decoded configuration if you implement your custom decode logic. */ public Map\u0026lt;String, String\u0026gt; decode(Map\u0026lt;String, String\u0026gt; config) { /** * implement your custom decode logic * */ return config; } } Then, package your decoder project as a jar and move to agent/plugins.\nNotice, the jar package should contain all the dependencies required for your custom decode code.\nThe last step is to activate the decoder class in agent.config like this:\nplugin.kafka.decrypt_class=\u0026quot;org.apache.skywalking.apm.agent.sample.DecodeUtil\u0026quot; or configure by environment variable\nSW_KAFKA_DECRYPT_CLASS=\u0026quot;org.apache.skywalking.apm.agent.sample.DecodeUtil\u0026quot; 3rd party reporters There are other reporter implementations from out of the Apache Software Foundation.\nPulsar Reporter Go to Pulsar-reporter-plugin for more details.\nRocketMQ Reporter Go to RocketMQ-reporter-plugin for more details.\n","title":"Advanced Reporters","url":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/advanced-reporters/"},{"content":"AI Pipeline Warning, this module is still in the ALPHA stage. This is not stable.\nPattern Recognition, Machine Learning(ML) and Artificial Intelligence(AI) are common technology to identify patterns in data. This module provides a way to integrate these technologies in a standardized way about shipping the data from OAP kernel to 3rd party.\nFrom the industry practice, Pattern Recognition, Machine Learning(ML) and Artificial Intelligence(AI) are always overestimated, they are good at many things but have to run in a clear context.\nThe ai-pipeline module is activated by default.\nai-pipeline:selector:${SW_AI_PIPELINE:default}default:uriRecognitionServerAddr:${SW_AI_PIPELINE_URI_RECOGNITION_SERVER_ADDR:}uriRecognitionServerPort:${SW_AI_PIPELINE_URI_RECOGNITION_SERVER_PORT:17128}Supported Scenarios  HTTP Restful URI recognition.  ","title":"AI Pipeline","url":"/docs/main/latest/en/setup/ai-pipeline/introduction/"},{"content":"AI Pipeline Warning, this module is still in the ALPHA stage. This is not stable.\nPattern Recognition, Machine Learning(ML) and Artificial Intelligence(AI) are common technology to identify patterns in data. This module provides a way to integrate these technologies in a standardized way about shipping the data from OAP kernel to 3rd party.\nFrom the industry practice, Pattern Recognition, Machine Learning(ML) and Artificial Intelligence(AI) are always overestimated, they are good at many things but have to run in a clear context.\nThe ai-pipeline module is activated by default.\nai-pipeline:selector:${SW_AI_PIPELINE:default}default:uriRecognitionServerAddr:${SW_AI_PIPELINE_URI_RECOGNITION_SERVER_ADDR:}uriRecognitionServerPort:${SW_AI_PIPELINE_URI_RECOGNITION_SERVER_PORT:17128}Supported Scenarios  HTTP Restful URI recognition.  ","title":"AI Pipeline","url":"/docs/main/next/en/setup/ai-pipeline/introduction/"},{"content":"AI Pipeline Warning, this module is still in the ALPHA stage. This is not stable.\nPattern Recognition, Machine Learning(ML) and Artificial Intelligence(AI) are common technology to identify patterns in data. This module provides a way to integrate these technologies in a standardized way about shipping the data from OAP kernel to 3rd party.\nFrom the industry practice, Pattern Recognition, Machine Learning(ML) and Artificial Intelligence(AI) are always overestimated, they are good at many things but have to run in a clear context.\nThe ai-pipeline module is activated by default.\nai-pipeline:selector:${SW_AI_PIPELINE:default}default:uriRecognitionServerAddr:${SW_AI_PIPELINE_URI_RECOGNITION_SERVER_ADDR:}uriRecognitionServerPort:${SW_AI_PIPELINE_URI_RECOGNITION_SERVER_PORT:17128}Supported Scenarios  HTTP Restful URI recognition.  ","title":"AI Pipeline","url":"/docs/main/v9.5.0/en/setup/ai-pipeline/introduction/"},{"content":"AI Pipeline Warning, this module is still in the ALPHA stage. This is not stable.\nPattern Recognition, Machine Learning(ML) and Artificial Intelligence(AI) are common technology to identify patterns in data. This module provides a way to integrate these technologies in a standardized way about shipping the data from OAP kernel to 3rd party.\nFrom the industry practice, Pattern Recognition, Machine Learning(ML) and Artificial Intelligence(AI) are always overestimated, they are good at many things but have to run in a clear context.\nThe ai-pipeline module is activated by default.\nai-pipeline:selector:${SW_AI_PIPELINE:default}default:uriRecognitionServerAddr:${SW_AI_PIPELINE_URI_RECOGNITION_SERVER_ADDR:}uriRecognitionServerPort:${SW_AI_PIPELINE_URI_RECOGNITION_SERVER_PORT:17128}Supported Scenarios  HTTP Restful URI recognition.  ","title":"AI Pipeline","url":"/docs/main/v9.6.0/en/setup/ai-pipeline/introduction/"},{"content":"AI Pipeline Warning, this module is still in the ALPHA stage. This is not stable.\nPattern Recognition, Machine Learning(ML) and Artificial Intelligence(AI) are common technology to identify patterns in data. This module provides a way to integrate these technologies in a standardized way about shipping the data from OAP kernel to 3rd party.\nFrom the industry practice, Pattern Recognition, Machine Learning(ML) and Artificial Intelligence(AI) are always overestimated, they are good at many things but have to run in a clear context.\nThe ai-pipeline module is activated by default.\nai-pipeline:selector:${SW_AI_PIPELINE:default}default:uriRecognitionServerAddr:${SW_AI_PIPELINE_URI_RECOGNITION_SERVER_ADDR:}uriRecognitionServerPort:${SW_AI_PIPELINE_URI_RECOGNITION_SERVER_PORT:17128}Supported Scenarios  HTTP Restful URI recognition.  ","title":"AI Pipeline","url":"/docs/main/v9.7.0/en/setup/ai-pipeline/introduction/"},{"content":"Alarm Alarm core is driven by a collection of rules, which are defined in config/alarm-settings.yml. There are three parts in alarm rule definition.\n Alarm rules. They define how metrics alarm should be triggered and what conditions should be considered. Webhooks. The list of web service endpoints, which should be called after the alarm is triggered. gRPCHook. The host and port of the remote gRPC method, which should be called after the alarm is triggered.  Entity name Defines the relation between scope and entity name.\n Service: Service name Instance: {Instance name} of {Service name} Endpoint: {Endpoint name} in {Service name} Database: Database service name Service Relation: {Source service name} to {Dest service name} Instance Relation: {Source instance name} of {Source service name} to {Dest instance name} of {Dest service name} Endpoint Relation: {Source endpoint name} in {Source Service name} to {Dest endpoint name} in {Dest service name}  Rules There are two types of rules: individual rules and composite rules. A composite rule is a combination of individual rules.\nIndividual rules An alarm rule is made up of the following elements:\n Rule name. A unique name shown in the alarm message. It must end with _rule. Metrics name. This is also the metrics name in the OAL script. Only long, double, int types are supported. See the list of all potential metrics name. Events can be also configured as the source of alarm, please refer to the event doc for more details. Include names. Entity names which are included in this rule. Please follow the entity name definitions. Exclude names. Entity names which are excluded from this rule. Please follow the entity name definitions. Include names regex. A regex that includes entity names. If both include-name list and include-name regex are set, both rules will take effect. Exclude names regex. A regex that excludes entity names. If both exclude-name list and exclude-name regex are set, both rules will take effect. Include labels. Metric labels which are included in this rule. Exclude labels. Metric labels which are excluded from this rule. Include labels regex. A regex that includes labels. If both include-label list and include-label regex are set, both rules will take effect. Exclude labels regex. A regex that exclude labels. If both the exclude-label list and exclude-label regex are set, both rules will take effect. Tags. Tags are key/value pairs that are attached to alarms. Tags are used to specify distinguishing attributes of alarms that are meaningful and relevant to users. If you would like to make these tags searchable on the SkyWalking UI, you may set the tag keys in core/default/searchableAlarmTags, or through system environment variable SW_SEARCHABLE_ALARM_TAG_KEYS. The key level is supported by default.  Label settings are required by the meter-system. They are used to store metrics from the label-system platform, such as Prometheus, Micrometer, etc. The four label settings mentioned above must implement LabeledValueHolder.\n Threshold. The target value. For multiple-value metrics, such as percentile, the threshold is an array. It is described as: value1, value2, value3, value4, value5. Each value may serve as the threshold for each value of the metrics. Set the value to - if you do not wish to trigger the alarm by one or more of the values.\nFor example in percentile, value1 is the threshold of P50, and -, -, value3, value4, value5 means that there is no threshold for P50 and P75 in the percentile alarm rule. OP. The operator. It supports \u0026gt;, \u0026gt;=, \u0026lt;, \u0026lt;=, ==. We welcome contributions of all OPs. Period. The size of metrics cache in minutes for checking the alarm conditions. This is a time window that corresponds to the backend deployment env time. Count. Within a period window, if the number of times which value goes over the threshold (based on OP) reaches count, then an alarm will be sent. Only as condition. Indicates if the rule can send notifications, or if it simply serves as an condition of the composite rule. Silence period. After the alarm is triggered in Time-N, there will be silence during the TN -\u0026gt; TN + period. By default, it works in the same manner as period. The same alarm (having the same ID in the same metrics name) may only be triggered once within a period.  Composite rules NOTE: Composite rules are only applicable to alarm rules targeting the same entity level, such as service-level alarm rules (service_percent_rule \u0026amp;\u0026amp; service_resp_time_percentile_rule). Do not compose alarm rules of different entity levels, such as an alarm rule of the service metrics with another rule of the endpoint metrics.\nA composite rule is made up of the following elements:\n Rule name. A unique name shown in the alarm message. Must end with _rule. Expression. Specifies how to compose rules, and supports \u0026amp;\u0026amp;, ||, and (). Message. The notification message to be sent out when the rule is triggered. Tags. Tags are key/value pairs that are attached to alarms. Tags are used to specify distinguishing attributes of alarms that are meaningful and relevant to users.  rules:# Rule unique name, must be ended with `_rule`.endpoint_percent_rule:# Metrics value need to be long, double or intmetrics-name:endpoint_percentthreshold:75op:\u0026lt;# The length of time to evaluate the metricsperiod:10# How many times after the metrics match the condition, will trigger alarmcount:3# How many times of checks, the alarm keeps silence after alarm triggered, default as same as period.silence-period:10# Specify if the rule can send notification or just as an condition of composite ruleonly-as-condition:falsetags:level:WARNINGservice_percent_rule:metrics-name:service_percent# [Optional] Default, match all services in this metricsinclude-names:- service_a- service_bexclude-names:- service_c# Single value metrics threshold.threshold:85op:\u0026lt;period:10count:4only-as-condition:falseservice_resp_time_percentile_rule:# Metrics value need to be long, double or intmetrics-name:service_percentileop:\u0026#34;\u0026gt;\u0026#34;# Multiple value metrics threshold. Thresholds for P50, P75, P90, P95, P99.threshold:1000,1000,1000,1000,1000period:10count:3silence-period:5message:Percentile response time of service {name} alarm in 3 minutes of last 10 minutes, due to more than one condition of p50 \u0026gt; 1000, p75 \u0026gt; 1000, p90 \u0026gt; 1000, p95 \u0026gt; 1000, p99 \u0026gt; 1000only-as-condition:falsemeter_service_status_code_rule:metrics-name:meter_status_codeexclude-labels:- \u0026#34;200\u0026#34;op:\u0026#34;\u0026gt;\u0026#34;threshold:10period:10count:3silence-period:5message:The request number of entity {name} non-200 status is more than expected.only-as-condition:falsecomposite-rules:comp_rule:# Must satisfied percent rule and resp time rule expression:service_percent_rule \u0026amp;\u0026amp; service_resp_time_percentile_rulemessage:Service {name} successful rate is less than 80% and P50 of response time is over 1000mstags:level:CRITICALDefault alarm rules For convenience\u0026rsquo;s sake, we have provided a default alarm-setting.yml in our release. It includes the following rules:\n Service average response time over 1s in the last 3 minutes. Service success rate lower than 80% in the last 2 minutes. Percentile of service response time over 1s in the last 3 minutes Service Instance average response time over 1s in the last 2 minutes, and the instance name matches the regex. Endpoint average response time over 1s in the last 2 minutes. Database access average response time over 1s in the last 2 minutes. Endpoint relation average response time over 1s in the last 2 minutes.  List of all potential metrics name The metrics names are defined in the official OAL scripts and MAL scripts, the Event names can also serve as the metrics names, all possible event names can be also found in the Event doc.\nCurrently, metrics from the Service, Service Instance, Endpoint, Service Relation, Service Instance Relation, Endpoint Relation scopes could be used in Alarm, and the Database access scope is same as Service.\nSubmit an issue or a pull request if you want to support any other scopes in alarm.\nWebhook The Webhook requires the peer to be a web container. The alarm message will be sent through HTTP post by application/json content type. The JSON format is based on List\u0026lt;org.apache.skywalking.oap.server.core.alarm.AlarmMessage\u0026gt; with the following key information:\n scopeId, scope. All scopes are defined in org.apache.skywalking.oap.server.core.source.DefaultScopeDefine. name. Target scope entity name. Please follow the entity name definitions. id0. The ID of the scope entity that matches with the name. When using the relation scope, it is the source entity ID. id1. When using the relation scope, it is the destination entity ID. Otherwise, it is empty. ruleName. The rule name configured in alarm-settings.yml. alarmMessage. The alarm text message. startTime. The alarm time measured in milliseconds, which occurs between the current time and the midnight of January 1, 1970 UTC. tags. The tags configured in alarm-settings.yml.  See the following example:\n[{ \u0026#34;scopeId\u0026#34;: 1, \u0026#34;scope\u0026#34;: \u0026#34;SERVICE\u0026#34;, \u0026#34;name\u0026#34;: \u0026#34;serviceA\u0026#34;, \u0026#34;id0\u0026#34;: \u0026#34;12\u0026#34;, \u0026#34;id1\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;ruleName\u0026#34;: \u0026#34;service_resp_time_rule\u0026#34;, \u0026#34;alarmMessage\u0026#34;: \u0026#34;alarmMessage xxxx\u0026#34;, \u0026#34;startTime\u0026#34;: 1560524171000, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;WARNING\u0026#34; }] }, { \u0026#34;scopeId\u0026#34;: 1, \u0026#34;scope\u0026#34;: \u0026#34;SERVICE\u0026#34;, \u0026#34;name\u0026#34;: \u0026#34;serviceB\u0026#34;, \u0026#34;id0\u0026#34;: \u0026#34;23\u0026#34;, \u0026#34;id1\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;ruleName\u0026#34;: \u0026#34;service_resp_time_rule\u0026#34;, \u0026#34;alarmMessage\u0026#34;: \u0026#34;alarmMessage yyy\u0026#34;, \u0026#34;startTime\u0026#34;: 1560524171000, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;CRITICAL\u0026#34; }] }] gRPCHook The alarm message will be sent through remote gRPC method by Protobuf content type. The message contains key information which are defined in oap-server/server-alarm-plugin/src/main/proto/alarm-hook.proto.\nPart of the protocol looks like this:\nmessage AlarmMessage { int64 scopeId = 1; string scope = 2; string name = 3; string id0 = 4; string id1 = 5; string ruleName = 6; string alarmMessage = 7; int64 startTime = 8; AlarmTags tags = 9;}message AlarmTags { // String key, String value pair.  repeated KeyStringValuePair data = 1;}message KeyStringValuePair { string key = 1; string value = 2;}Slack Chat Hook Follow the Getting Started with Incoming Webhooks guide and create new Webhooks.\nThe alarm message will be sent through HTTP post by application/json content type if you have configured Slack Incoming Webhooks as follows:\nslackHooks:textTemplate:|-{ \u0026#34;type\u0026#34;: \u0026#34;section\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;mrkdwn\u0026#34;, \u0026#34;text\u0026#34;: \u0026#34;:alarm_clock: *Apache Skywalking Alarm* \\n **%s**.\u0026#34; } }webhooks:- https://hooks.slack.com/services/x/y/zWeChat Hook Note that only the WeChat Company Edition (WeCom) supports WebHooks. To use the WeChat WebHook, follow the Wechat Webhooks guide. The alarm message will be sent through HTTP post by application/json content type after you have set up Wechat Webhooks as follows:\nwechatHooks:textTemplate:|-{ \u0026#34;msgtype\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;content\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; } }webhooks:- https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=dummy_keyDingtalk Hook Follow the Dingtalk Webhooks guide and create new Webhooks. For security purposes, you can config an optional secret for an individual webhook URL. The alarm message will be sent through HTTP post by application/json content type if you have configured Dingtalk Webhooks as follows:\ndingtalkHooks:textTemplate:|-{ \u0026#34;msgtype\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;content\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; } }webhooks:- url:https://oapi.dingtalk.com/robot/send?access_token=dummy_tokensecret:dummysecretFeishu Hook Follow the Feishu Webhooks guide and create new Webhooks. For security purposes, you can config an optional secret for an individual webhook URL. If you would like to direct a text to a user, you can config ats which is the feishu\u0026rsquo;s user_id and separated by \u0026ldquo;,\u0026rdquo; . The alarm message will be sent through HTTP post by application/json content type if you have configured Feishu Webhooks as follows:\nfeishuHooks:textTemplate:|-{ \u0026#34;msg_type\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;content\u0026#34;: { \u0026#34;text\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; }, \u0026#34;ats\u0026#34;:\u0026#34;feishu_user_id_1,feishu_user_id_2\u0026#34; }webhooks:- url:https://open.feishu.cn/open-apis/bot/v2/hook/dummy_tokensecret:dummysecretWeLink Hook Follow the WeLink Webhooks guide and create new Webhooks. The alarm message will be sent through HTTP post by application/json content type if you have configured WeLink Webhooks as follows:\nwelinkHooks:textTemplate:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;webhooks:# you may find your own client_id and client_secret in your app, below are dummy, need to change.- client_id:\u0026#34;dummy_client_id\u0026#34;client_secret:dummy_secret_keyaccess_token_url:https://open.welink.huaweicloud.com/api/auth/v2/ticketsmessage_url:https://open.welink.huaweicloud.com/api/welinkim/v1/im-service/chat/group-chat# if you send to multi group at a time, separate group_ids with commas, e.g. \u0026#34;123xx\u0026#34;,\u0026#34;456xx\u0026#34;group_ids:\u0026#34;dummy_group_id\u0026#34;# make a name you like for the robot, it will display in grouprobot_name:robotUpdate the settings dynamically Since 6.5.0, the alarm settings can be updated dynamically at runtime by Dynamic Configuration, which will override the settings in alarm-settings.yml.\nIn order to determine whether an alarm rule is triggered or not, SkyWalking needs to cache the metrics of a time window for each alarm rule. If any attribute (metrics-name, op, threshold, period, count, etc.) of a rule is changed, the sliding window will be destroyed and re-created, causing the alarm of this specific rule to restart again.\n","title":"Alarm","url":"/docs/main/v9.0.0/en/setup/backend/backend-alarm/"},{"content":"Alarm The alarm core is driven by a collection of rules defined in config/alarm-settings.yml. There are three parts to alarm rule definitions.\n Alarm rules. They define how metrics alarm should be triggered and what conditions should be considered. Webhooks. The list of web service endpoints, which should be called after an alarm is triggered. gRPCHook. The host and port of the remote gRPC method, which should be called after an alarm is triggered.  Entity name Defines the relation between scope and entity name.\n Service: Service name Instance: {Instance name} of {Service name} Endpoint: {Endpoint name} in {Service name} Database: Database service name Service Relation: {Source service name} to {Dest service name} Instance Relation: {Source instance name} of {Source service name} to {Dest instance name} of {Dest service name} Endpoint Relation: {Source endpoint name} in {Source Service name} to {Dest endpoint name} in {Dest service name}  Rules There are two types of rules: individual rules and composite rules. A composite rule is a combination of individual rules.\nIndividual rules An alarm rule is made up of the following elements:\n Rule name. A unique name shown in the alarm message. It must end with _rule. Metrics name. This is also the metrics name in the OAL script. Only long, double, int types are supported. See the list of all potential metrics name. Events can also be configured as the source of Alarm. Please refer to the event doc for more details. Include names. Entity names that are included in this rule. Please follow the entity name definitions. Exclude names. Entity names that are excluded from this rule. Please follow the entity name definitions. Include names regex. A regex that includes entity names. If both include-name list and include-name regex are set, both rules will take effect. Exclude names regex. A regex that excludes entity names. Both rules will take effect if both include-label list and include-label regex are set. Include labels. Metric labels that are included in this rule. Exclude labels. Metric labels that are excluded from this rule. Include labels regex. A regex that includes labels. If both include-label list and include-label regex are set, both rules will take effect. Exclude labels regex. A regex that excludes labels. Both rules will take effect if both exclude-label list and exclude-label regex are set. Tags. Tags are key/value pairs that are attached to alarms. Tags are used to specify distinguishing attributes of alarms that are meaningful and relevant to users. If you want to make these tags searchable on the SkyWalking UI, you may set the tag keys in core/default/searchableAlarmTags or through the system environment variable SW_SEARCHABLE_ALARM_TAG_KEYS. The key level is supported by default.  Label settings are required by the meter system. They are used to store metrics from the label-system platform, such as Prometheus, Micrometer, etc. The four label settings mentioned above must implement LabeledValueHolder.\n Threshold. The target value. For multiple-value metrics, such as percentile, the threshold is an array. It is described as: value1, value2, value3, value4, value5. Each value may serve as the threshold for each value of the metrics. Set the value to - if you do not wish to trigger the Alarm by one or more of the values.\nFor example, in percentile, value1 is the threshold of P50, and -, -, value3, value4, value5 means that there is no threshold for P50 and P75 in the percentile alarm rule. OP. The operator. It supports \u0026gt;, \u0026gt;=, \u0026lt;, \u0026lt;=, ==. We welcome contributions of all OPs. Period. The size of metrics cache in minutes for checking the alarm conditions. This is a time window that corresponds to the backend deployment env time. Count. Within a period window, if the number of times which value goes over the threshold (based on OP) reaches count, then an alarm will be sent. Only as condition. Indicates if the rule can send notifications or if it simply serves as a condition of the composite rule. Silence period. After the alarm is triggered at Time-N (TN), there will be silence during the TN -\u0026gt; TN + period. By default, it works in the same manner as period. The same Alarm (having the same ID in the same metrics name) may only be triggered once within a period.  Composite rules NOTE: Composite rules are only applicable to alarm rules targeting the same entity level, such as service-level alarm rules (service_percent_rule \u0026amp;\u0026amp; service_resp_time_percentile_rule). Do not compose alarm rules of different entity levels, such as an alarm rule of the service metrics with another rule of the endpoint metrics.\nA composite rule is made up of the following elements:\n Rule name. A unique name shown in the alarm message. Must end with _rule. Expression. Specifies how to compose rules, and supports \u0026amp;\u0026amp;, ||, and (). Message. The notification message to be sent out when the rule is triggered. Tags. Tags are key/value pairs that are attached to alarms. Tags are used to specify distinguishing attributes of alarms that are meaningful and relevant to users.  rules:# Rule unique name, must be ended with `_rule`.endpoint_percent_rule:# Metrics value need to be long, double or intmetrics-name:endpoint_percentthreshold:75op:\u0026lt;# The length of time to evaluate the metricsperiod:10# How many times after the metrics match the condition, will trigger alarmcount:3# How many times of checks, the alarm keeps silence after alarm triggered, default as same as period.silence-period:10# Specify if the rule can send notification or just as an condition of composite ruleonly-as-condition:falsetags:level:WARNINGservice_percent_rule:metrics-name:service_percent# [Optional] Default, match all services in this metricsinclude-names:- service_a- service_bexclude-names:- service_c# Single value metrics threshold.threshold:85op:\u0026lt;period:10count:4only-as-condition:falseservice_resp_time_percentile_rule:# Metrics value need to be long, double or intmetrics-name:service_percentileop:\u0026#34;\u0026gt;\u0026#34;# Multiple value metrics threshold. Thresholds for P50, P75, P90, P95, P99.threshold:1000,1000,1000,1000,1000period:10count:3silence-period:5message:Percentile response time of service {name} alarm in 3 minutes of last 10 minutes, due to more than one condition of p50 \u0026gt; 1000, p75 \u0026gt; 1000, p90 \u0026gt; 1000, p95 \u0026gt; 1000, p99 \u0026gt; 1000only-as-condition:falsemeter_service_status_code_rule:metrics-name:meter_status_codeexclude-labels:- \u0026#34;200\u0026#34;op:\u0026#34;\u0026gt;\u0026#34;threshold:10period:10count:3silence-period:5message:The request number of entity {name} non-200 status is more than expected.only-as-condition:falsecomposite-rules:comp_rule:# Must satisfied percent rule and resp time rule expression:service_percent_rule \u0026amp;\u0026amp; service_resp_time_percentile_rulemessage:Service {name} successful rate is less than 80% and P50 of response time is over 1000mstags:level:CRITICALDefault alarm rules For convenience\u0026rsquo;s sake, we have provided a default alarm-setting.yml in our release. It includes the following rules:\n Service average response time over 1s in the last 3 minutes. Service success rate lower than 80% in the last 2 minutes. Percentile of service response time over 1s in the last 3 minutes Service Instance average response time over 1s in the last 2 minutes, and the instance name matches the regex. Endpoint average response time over 1s in the last 2 minutes. Database access average response time over 1s in the last 2 minutes. Endpoint relation average response time over 1s in the last 2 minutes.  List of all potential metrics name The metrics names are defined in the official OAL scripts and MAL scripts, the Event names can also serve as the metrics names, all possible event names can be also found in the Event doc.\nCurrently, metrics from the Service, Service Instance, Endpoint, Service Relation, Service Instance Relation, Endpoint Relation scopes could be used in Alarm, and the Database access scope is the same as Service.\nSubmit an issue or a pull request if you want to support any other scopes in Alarm.\nWebhook The Webhook requires the peer to be a web container. The alarm message will be sent through HTTP post by application/json content type. The JSON format is based on List\u0026lt;org.apache.skywalking.oap.server.core.alarm.AlarmMessage\u0026gt; with the following key information:\n scopeId, scope. All scopes are defined in org.apache.skywalking.oap.server.core.source.DefaultScopeDefine. name. Target scope entity name. Please follow the entity name definitions. id0. The ID of the scope entity that matches with the name. When using the relation scope, it is the source entity ID. id1. When using the relation scope, it is the destination entity ID. Otherwise, it is empty. ruleName. The rule name configured in alarm-settings.yml. alarmMessage. The alarm text message. startTime. The alarm time measured in milliseconds, which occurs between the current time and the midnight of January 1, 1970 UTC. tags. The tags configured in alarm-settings.yml.  See the following example:\n[{ \u0026#34;scopeId\u0026#34;: 1, \u0026#34;scope\u0026#34;: \u0026#34;SERVICE\u0026#34;, \u0026#34;name\u0026#34;: \u0026#34;serviceA\u0026#34;, \u0026#34;id0\u0026#34;: \u0026#34;12\u0026#34;, \u0026#34;id1\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;ruleName\u0026#34;: \u0026#34;service_resp_time_rule\u0026#34;, \u0026#34;alarmMessage\u0026#34;: \u0026#34;alarmMessage xxxx\u0026#34;, \u0026#34;startTime\u0026#34;: 1560524171000, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;WARNING\u0026#34; }] }, { \u0026#34;scopeId\u0026#34;: 1, \u0026#34;scope\u0026#34;: \u0026#34;SERVICE\u0026#34;, \u0026#34;name\u0026#34;: \u0026#34;serviceB\u0026#34;, \u0026#34;id0\u0026#34;: \u0026#34;23\u0026#34;, \u0026#34;id1\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;ruleName\u0026#34;: \u0026#34;service_resp_time_rule\u0026#34;, \u0026#34;alarmMessage\u0026#34;: \u0026#34;alarmMessage yyy\u0026#34;, \u0026#34;startTime\u0026#34;: 1560524171000, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;CRITICAL\u0026#34; }] }] gRPCHook The alarm message will be sent through remote gRPC method by Protobuf content type. The message contains key information which are defined in oap-server/server-alarm-plugin/src/main/proto/alarm-hook.proto.\nPart of the protocol looks like this:\nmessage AlarmMessage { int64 scopeId = 1; string scope = 2; string name = 3; string id0 = 4; string id1 = 5; string ruleName = 6; string alarmMessage = 7; int64 startTime = 8; AlarmTags tags = 9;}message AlarmTags { // String key, String value pair.  repeated KeyStringValuePair data = 1;}message KeyStringValuePair { string key = 1; string value = 2;}Slack Chat Hook Follow the Getting Started with Incoming Webhooks guide and create new Webhooks.\nThe alarm message will be sent through HTTP post by application/json content type if you have configured Slack Incoming Webhooks as follows:\nslackHooks:textTemplate:|-{ \u0026#34;type\u0026#34;: \u0026#34;section\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;mrkdwn\u0026#34;, \u0026#34;text\u0026#34;: \u0026#34;:alarm_clock: *Apache Skywalking Alarm* \\n **%s**.\u0026#34; } }webhooks:- https://hooks.slack.com/services/x/y/zWeChat Hook Note that only the WeChat Company Edition (WeCom) supports WebHooks. To use the WeChat WebHook, follow the Wechat Webhooks guide. The alarm message will be sent through HTTP post by application/json content type after you have set up Wechat Webhooks as follows:\nwechatHooks:textTemplate:|-{ \u0026#34;msgtype\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;content\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; } }webhooks:- https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=dummy_keyDingTalk Hook Follow the Dingtalk Webhooks guide and create new Webhooks. You can configure an optional secret for an individual webhook URL for security purposes. The alarm message will be sent through HTTP post by application/json content type if you have configured DingTalk Webhooks as follows:\ndingtalkHooks:textTemplate:|-{ \u0026#34;msgtype\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;content\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; } }webhooks:- url:https://oapi.dingtalk.com/robot/send?access_token=dummy_tokensecret:dummysecretFeishu Hook Follow the Feishu Webhooks guide and create new Webhooks. You can configure an optional secret for an individual webhook URL for security purposes. If you want to direct a text to a user, you can configure ats, which is Feishu\u0026rsquo;s user_id and separated by \u0026ldquo;,\u0026rdquo; . The alarm message will be sent through HTTP post by application/json content type if you have configured Feishu Webhooks as follows:\nfeishuHooks:textTemplate:|-{ \u0026#34;msg_type\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;content\u0026#34;: { \u0026#34;text\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; }, \u0026#34;ats\u0026#34;:\u0026#34;feishu_user_id_1,feishu_user_id_2\u0026#34; }webhooks:- url:https://open.feishu.cn/open-apis/bot/v2/hook/dummy_tokensecret:dummysecretWeLink Hook Follow the WeLink Webhooks guide and create new Webhooks. The alarm message will be sent through HTTP post by application/json content type if you have configured WeLink Webhooks as follows:\nwelinkHooks:textTemplate:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;webhooks:# you may find your own client_id and client_secret in your app, below are dummy, need to change.- client_id:\u0026#34;dummy_client_id\u0026#34;client_secret:dummy_secret_keyaccess_token_url:https://open.welink.huaweicloud.com/api/auth/v2/ticketsmessage_url:https://open.welink.huaweicloud.com/api/welinkim/v1/im-service/chat/group-chat# if you send to multi group at a time, separate group_ids with commas, e.g. \u0026#34;123xx\u0026#34;,\u0026#34;456xx\u0026#34;group_ids:\u0026#34;dummy_group_id\u0026#34;# make a name you like for the robot, it will display in grouprobot_name:robotUpdate the settings dynamically Since 6.5.0, the alarm settings can be updated dynamically at runtime by Dynamic Configuration, which will override the settings in alarm-settings.yml.\nIn order to determine whether an alarm rule is triggered or not, SkyWalking needs to cache the metrics of a time window for each alarm rule. If any attribute (metrics-name, op, threshold, period, count, etc.) of a rule is changed, the sliding window will be destroyed and re-created, causing the Alarm of this specific rule to restart again.\n","title":"Alarm","url":"/docs/main/v9.1.0/en/setup/backend/backend-alarm/"},{"content":"Alerting Alerting mechanism measures system performance according to the metrics of services/instances/endpoints from different layers. Alerting kernel is an in-memory, time-window based queue.\nThe alerting core is driven by a collection of rules defined in config/alarm-settings.yml. There are three parts to alerting rule definitions.\n alerting rules. They define how metrics alerting should be triggered and what conditions should be considered. hooks. The list of hooks, which should be called after an alerting is triggered.  Entity name Defines the relation between scope and entity name.\n Service: Service name Instance: {Instance name} of {Service name} Endpoint: {Endpoint name} in {Service name} Service Relation: {Source service name} to {Dest service name} Instance Relation: {Source instance name} of {Source service name} to {Dest instance name} of {Dest service name} Endpoint Relation: {Source endpoint name} in {Source Service name} to {Dest endpoint name} in {Dest service name}  Rules An alerting rule is made up of the following elements:\n Rule name. A unique name shown in the alarm message. It must end with _rule. Expression. A MQE expression that defines the conditions of the rule. The result type must be SINGLE_VALUE and the root operation of the expression must be a Compare Operation which provides 1(true) or 0(false) result. When the result is 1(true), the alarm will be triggered. For example, avg(service_resp_time / 1000) \u0026gt; 1 is a valid expression to indicate the request latency is slower than 1s. The typical illegal expressions are  avg(service_resp_time \u0026gt; 1000) + 1 expression root doesn\u0026rsquo;t use Compare Operation service_resp_time \u0026gt; 1000 expression return a TIME_SERIES_VALUES type of values rather than a SINGLE_VALUE value.    The metrics names in the expression could be found in the list of all potential metrics name doc.\n Include names. Entity names that are included in this rule. Please follow the entity name definitions. Exclude names. Entity names that are excluded from this rule. Please follow the entity name definitions. Include names regex. A regex that includes entity names. If both include-name list and include-name regex are set, both rules will take effect. Exclude names regex. A regex that excludes entity names. Both rules will take effect if both include-label list and include-label regex are set. Tags. Tags are key/value pairs that are attached to alarms. Tags are used to specify distinguishing attributes of alarms that are meaningful and relevant to users. If you want to make these tags searchable on the SkyWalking UI, you may set the tag keys in core/default/searchableAlarmTags or through the system environment variable SW_SEARCHABLE_ALARM_TAG_KEYS. The key level is supported by default. Period. The size of metrics cache in minutes for checking the alarm conditions. This is a time window that corresponds to the backend deployment env time. Hooks. Binding the specific names of the hooks when the alarm is triggered. The name format is {hookType}.{hookName} (slack.custom1 e.g.) and must be defined in the hooks section of the alarm-settings.yml file. If the hook name is not specified, the global hook will be used. Silence period. After the alarm is triggered at Time-N (TN), there will be silence during the TN -\u0026gt; TN + period. By default, it works in the same manner as period. The same Alarm (having the same ID in the same metrics name) may only be triggered once within a period.  Such as for a metric, there is a shifting window as following at T7.\n   T1 T2 T3 T4 T5 T6 T7     Value1 Value2 Value3 Value4 Value5 Value6 Value7     Period(Time point T1 ~ T7) are continuous data points for minutes. Notice, alerts are not supported above minute-by-minute periods as they would not be efficient. Values(Value1 ~ Value7) are the values or labeled values for every time point. Expression is calculated based on the metric values(Value1 ~ Value7). For example, expression avg(service_resp_time) \u0026gt; 1000, if the value are 1001, 1001, 1001, 1001, 1001, 1001, 1001, the calculation is ((1001 + 10001 + ... + 1001) / 7) \u0026gt; 1000 and the result would be 1(true). Then the alarm would be triggered. In every minute, the window would shift automatically. At T8, Value8 would be cached, and T1/Value1 would be removed from the window.  NOTE:\n If the expression include labeled metrics and result has multiple labeled value(e.g. sum(service_percentile{_='0,1'} \u0026gt; 1000) \u0026gt;= 3), the alarm will be triggered if any of the labeled value result matches 3 times of the condition(P50 \u0026gt; 1000 or P75 \u0026gt; 1000). One alarm rule is targeting the same entity level, such as service-level expression (avg(service_resp_time) \u0026gt; 1000). Set entity names(Include/Exclude names\u0026hellip;) according to metrics entity levels, do not include different entity levels metrics in the same expression, such as service metrics and endpoint metrics.  rules:# Rule unique name, must be ended with `_rule`.endpoint_percent_rule:# A MQE expression and the root operation of the expression must be a Compare Operation.expression:sum((endpoint_sla / 100) \u0026lt; 75) \u0026gt;= 3# The length of time to evaluate the metricsperiod:10# How many times of checks, the alarm keeps silence after alarm triggered, default as same as period.silence-period:10message:Successful rate of endpoint {name} is lower than 75%tags:level:WARNINGservice_percent_rule:expression:sum((service_sla / 100) \u0026lt; 85) \u0026gt;= 4# [Optional] Default, match all services in this metricsinclude-names:- service_a- service_bexclude-names:- service_cperiod:10message:Service {name} successful rate is less than 85%service_resp_time_percentile_rule:expression:sum(service_percentile{_=\u0026#39;0,1,2,3,4\u0026#39;} \u0026gt; 1000) \u0026gt;= 3period:10silence-period:5message:Percentile response time of service {name} alarm in 3 minutes of last 10 minutes, due to more than one condition of p50 \u0026gt; 1000, p75 \u0026gt; 1000, p90 \u0026gt; 1000, p95 \u0026gt; 1000, p99 \u0026gt; 1000meter_service_status_code_rule:expression:sum(aggregate_labels(meter_status_code{_=\u0026#39;4xx,5xx\u0026#39;},sum) \u0026gt; 10) \u0026gt; 3period:10count:3silence-period:5message:The request number of entity {name} 4xx and 5xx status is more than expected.hooks:- \u0026#34;slack.custom1\u0026#34;- \u0026#34;pagerduty.custom1\u0026#34;comp_rule:expression:(avg(service_sla / 100) \u0026gt; 80) * (avg(service_percentile{_=\u0026#39;0\u0026#39;}) \u0026gt; 1000) == 1period:10message:Service {name} avg successful rate is less than 80% and P50 of avg response time is over 1000ms in last 10 minutes.tags:level:CRITICALhooks:- \u0026#34;slack.default\u0026#34;- \u0026#34;slack.custom1\u0026#34;- \u0026#34;pagerduty.custom1\u0026#34;Default alarm rules For convenience\u0026rsquo;s sake, we have provided a default alarm-setting.yml in our release. It includes the following rules:\n Service average response time over 1s in the last 3 minutes. Service success rate lower than 80% in the last 2 minutes. Percentile of service response time over 1s in the last 3 minutes Service Instance average response time over 1s in the last 2 minutes, and the instance name matches the regex. Endpoint average response time over 1s in the last 2 minutes. Database access average response time over 1s in the last 2 minutes. Endpoint relation average response time over 1s in the last 2 minutes.  List of all potential metrics name The metrics names are defined in the official OAL scripts and MAL scripts.\nCurrently, metrics from the Service, Service Instance, Endpoint, Service Relation, Service Instance Relation, Endpoint Relation scopes could be used in Alarm, and the Database access scope is the same as Service.\nSubmit an issue or a pull request if you want to support any other scopes in Alarm.\nHooks Hooks are a way to send alarm messages to the outside world. SkyWalking supports multiple hooks of the same type, each hook can support different configurations. For example, you can configure two Slack hooks, one named default and set is-default: true means this hook will apply on all Alarm Rules without config hooks. Another named custom1 will only apply on the Alarm Rules which with config hooks and include the name slack.custom1.\nhooks:slack:# default here is just a name, set the field \u0026#39;is-default: true\u0026#39; if this notification hook is expected to be default globally.default:# If true, this hook will apply on all rules, unless a rule has its own specific hook. Could have more than one default hooks in the same hook type.is-default:truetext-template:|-{ \u0026#34;type\u0026#34;: \u0026#34;section\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;mrkdwn\u0026#34;, \u0026#34;text\u0026#34;: \u0026#34;:alarm_clock: *Apache Skywalking Alarm* \\n **%s**.\u0026#34; } }webhooks:- https://hooks.slack.com/services/x/y/zsssscustom1:text-template:|-{ \u0026#34;type\u0026#34;: \u0026#34;section\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;mrkdwn\u0026#34;, \u0026#34;text\u0026#34;: \u0026#34;:alarm_clock: *Apache Skywalking Alarm* \\n **%s**.\u0026#34; } }webhooks:- https://hooks.slack.com/services/x/y/custom1Currently, SkyWalking supports the following hook types:\nWebhook The Webhook requires the peer to be a web container. The alarm message will be sent through HTTP post by application/json content type after you have set up Webhook hooks as follows:\nwebhook:default:is-default:trueurls:- http://ip:port/xxx- http://ip:port/yyyThe JSON format is based on List\u0026lt;org.apache.skywalking.oap.server.core.alarm.AlarmMessage\u0026gt; with the following key information:\n scopeId, scope. All scopes are defined in org.apache.skywalking.oap.server.core.source.DefaultScopeDefine. name. Target scope entity name. Please follow the entity name definitions. id0. The ID of the scope entity that matches with the name. When using the relation scope, it is the source entity ID. id1. When using the relation scope, it is the destination entity ID. Otherwise, it is empty. ruleName. The rule name configured in alarm-settings.yml. alarmMessage. The alarm text message. startTime. The alarm time measured in milliseconds, which occurs between the current time and the midnight of January 1, 1970 UTC. tags. The tags configured in alarm-settings.yml.  See the following example:\n[{ \u0026#34;scopeId\u0026#34;: 1, \u0026#34;scope\u0026#34;: \u0026#34;SERVICE\u0026#34;, \u0026#34;name\u0026#34;: \u0026#34;serviceA\u0026#34;, \u0026#34;id0\u0026#34;: \u0026#34;12\u0026#34;, \u0026#34;id1\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;ruleName\u0026#34;: \u0026#34;service_resp_time_rule\u0026#34;, \u0026#34;alarmMessage\u0026#34;: \u0026#34;alarmMessage xxxx\u0026#34;, \u0026#34;startTime\u0026#34;: 1560524171000, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;WARNING\u0026#34; }] }, { \u0026#34;scopeId\u0026#34;: 1, \u0026#34;scope\u0026#34;: \u0026#34;SERVICE\u0026#34;, \u0026#34;name\u0026#34;: \u0026#34;serviceB\u0026#34;, \u0026#34;id0\u0026#34;: \u0026#34;23\u0026#34;, \u0026#34;id1\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;ruleName\u0026#34;: \u0026#34;service_resp_time_rule\u0026#34;, \u0026#34;alarmMessage\u0026#34;: \u0026#34;alarmMessage yyy\u0026#34;, \u0026#34;startTime\u0026#34;: 1560524171000, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;CRITICAL\u0026#34; }] }] gRPC The alarm message will be sent through remote gRPC method by Protobuf content type after you have set up gRPC hooks as follows:\ngRPC:default:is-default:truetarget-host:iptarget-port:portThe message contains key information which are defined in oap-server/server-alarm-plugin/src/main/proto/alarm-hook.proto.\nPart of the protocol looks like this:\nmessage AlarmMessage { int64 scopeId = 1; string scope = 2; string name = 3; string id0 = 4; string id1 = 5; string ruleName = 6; string alarmMessage = 7; int64 startTime = 8; AlarmTags tags = 9;}message AlarmTags { // String key, String value pair.  repeated KeyStringValuePair data = 1;}message KeyStringValuePair { string key = 1; string value = 2;}Slack Chat Follow the Getting Started with Incoming Webhooks guide and create new Webhooks.\nThe alarm message will be sent through HTTP post by application/json content type if you have configured Slack Incoming Webhooks as follows:\nslack:default:is-default:truetext-template:|-{ \u0026#34;type\u0026#34;: \u0026#34;section\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;mrkdwn\u0026#34;, \u0026#34;text\u0026#34;: \u0026#34;:alarm_clock: *Apache Skywalking Alarm* \\n **%s**.\u0026#34; } }webhooks:- https://hooks.slack.com/services/x/y/zWeChat Note that only the WeChat Company Edition (WeCom) supports WebHooks. To use the WeChat WebHook, follow the Wechat Webhooks guide. The alarm message will be sent through HTTP post by application/json content type after you have set up Wechat Webhooks as follows:\nwechat:default:is-default:truetext-template:|-{ \u0026#34;msgtype\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;content\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; } }webhooks:- https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=dummy_keyDingTalk Follow the Dingtalk Webhooks guide and create new Webhooks. You can configure an optional secret for an individual webhook URL for security purposes. The alarm message will be sent through HTTP post by application/json content type if you have configured DingTalk Webhooks as follows:\ndingtalk:default:is-default:truetext-template:|-{ \u0026#34;msgtype\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;content\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; } }webhooks:- url:https://oapi.dingtalk.com/robot/send?access_token=dummy_tokensecret:dummysecretFeishu Follow the Feishu Webhooks guide and create new Webhooks. You can configure an optional secret for an individual webhook URL for security purposes. If you want to direct a text to a user, you can configure ats, which is Feishu\u0026rsquo;s user_id and separated by \u0026ldquo;,\u0026rdquo; . The alarm message will be sent through HTTP post by application/json content type if you have configured Feishu Webhooks as follows:\nfeishu:default:is-default:truetext-template:|-{ \u0026#34;msg_type\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;content\u0026#34;: { \u0026#34;text\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; }, \u0026#34;ats\u0026#34;:\u0026#34;feishu_user_id_1,feishu_user_id_2\u0026#34; }webhooks:- url:https://open.feishu.cn/open-apis/bot/v2/hook/dummy_tokensecret:dummysecretWeLink Follow the WeLink Webhooks guide and create new Webhooks. The alarm message will be sent through HTTP post by application/json content type if you have configured WeLink Webhooks as follows:\nwelink:default:is-default:truetext-template:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;webhooks:# you may find your own client_id and client_secret in your app, below are dummy, need to change.- client-id:\u0026#34;dummy_client_id\u0026#34;client-secret:dummy_secret_keyaccess-token-url:https://open.welink.huaweicloud.com/api/auth/v2/ticketsmessage-url:https://open.welink.huaweicloud.com/api/welinkim/v1/im-service/chat/group-chat# if you send to multi group at a time, separate group_ids with commas, e.g. \u0026#34;123xx\u0026#34;,\u0026#34;456xx\u0026#34;group-ids:\u0026#34;dummy_group_id\u0026#34;# make a name you like for the robot, it will display in grouprobot-name:robotPagerDuty The PagerDuty hook is based on Events API v2.\nFollow the Getting Started section to create an Events API v2 integration on your PagerDuty service and copy the integration key.\nThen configure as follows:\npagerduty:default:is-default:truetext-template:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;integration-keys:- 5c6d805c9dcf4e03d09dfa81e8789ba1You can also configure multiple integration keys.\nDiscord Follow the Discord Webhooks guide and create a new webhook.\nThen configure as follows:\ndiscord:default:is-default:truetext-template:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;webhooks:- url:https://discordapp.com/api/webhooks/1008166889777414645/8e0Am4Zb-YGbBqqbiiq0jSHPTEEaHa4j1vIC-zSSm231T8ewGxgY0_XUYpY-k1nN4HBlusername:robotUpdate the settings dynamically Since 6.5.0, the alerting settings can be updated dynamically at runtime by Dynamic Configuration, which will override the settings in alarm-settings.yml.\nIn order to determine whether an alerting rule is triggered or not, SkyWalking needs to cache the metrics of a time window for each alerting rule. If any attribute (expression, period, etc.) of a rule is changed, the sliding window will be destroyed and re-created, causing the Alarm of this specific rule to restart again.\nKeys with data types of alerting rule configuration file    Alerting element Configuration property key Type Description     Expression expression string MQE expression   Include names include-names string array    Exclude names exclude-names string array    Include names regex include-names-regex string Java regex Pattern   Exclude names regex exclude-names-regex string Java regex Pattern   Tags tags key-value pair    Period Period int    Silence period silence-period int    Message message string    Hooks hooks string array     ","title":"Alerting","url":"/docs/main/latest/en/setup/backend/backend-alarm/"},{"content":"Alerting Alerting mechanism measures system performance according to the metrics of services/instances/endpoints from different layers. Alerting kernel is an in-memory, time-window based queue.\nThe alerting core is driven by a collection of rules defined in config/alarm-settings.yml. There are three parts to alerting rule definitions.\n alerting rules. They define how metrics alerting should be triggered and what conditions should be considered. hooks. The list of hooks, which should be called after an alerting is triggered.  Entity name Defines the relation between scope and entity name.\n Service: Service name Instance: {Instance name} of {Service name} Endpoint: {Endpoint name} in {Service name} Service Relation: {Source service name} to {Dest service name} Instance Relation: {Source instance name} of {Source service name} to {Dest instance name} of {Dest service name} Endpoint Relation: {Source endpoint name} in {Source Service name} to {Dest endpoint name} in {Dest service name}  Rules An alerting rule is made up of the following elements:\n Rule name. A unique name shown in the alarm message. It must end with _rule. Expression. A MQE expression that defines the conditions of the rule. The result type must be SINGLE_VALUE and the root operation of the expression must be a Compare Operation which provides 1(true) or 0(false) result. When the result is 1(true), the alarm will be triggered. For example, avg(service_resp_time / 1000) \u0026gt; 1 is a valid expression to indicate the request latency is slower than 1s. The typical illegal expressions are  avg(service_resp_time \u0026gt; 1000) + 1 expression root doesn\u0026rsquo;t use Compare Operation service_resp_time \u0026gt; 1000 expression return a TIME_SERIES_VALUES type of values rather than a SINGLE_VALUE value.    The metrics names in the expression could be found in the list of all potential metrics name doc.\n Include names. Entity names that are included in this rule. Please follow the entity name definitions. Exclude names. Entity names that are excluded from this rule. Please follow the entity name definitions. Include names regex. A regex that includes entity names. If both include-name list and include-name regex are set, both rules will take effect. Exclude names regex. A regex that excludes entity names. Both rules will take effect if both include-label list and include-label regex are set. Tags. Tags are key/value pairs that are attached to alarms. Tags are used to specify distinguishing attributes of alarms that are meaningful and relevant to users. If you want to make these tags searchable on the SkyWalking UI, you may set the tag keys in core/default/searchableAlarmTags or through the system environment variable SW_SEARCHABLE_ALARM_TAG_KEYS. The key level is supported by default. Period. The size of metrics cache in minutes for checking the alarm conditions. This is a time window that corresponds to the backend deployment env time. Hooks. Binding the specific names of the hooks when the alarm is triggered. The name format is {hookType}.{hookName} (slack.custom1 e.g.) and must be defined in the hooks section of the alarm-settings.yml file. If the hook name is not specified, the global hook will be used. Silence period. After the alarm is triggered at Time-N (TN), there will be silence during the TN -\u0026gt; TN + period. By default, it works in the same manner as period. The same Alarm (having the same ID in the same metrics name) may only be triggered once within a period.  Such as for a metric, there is a shifting window as following at T7.\n   T1 T2 T3 T4 T5 T6 T7     Value1 Value2 Value3 Value4 Value5 Value6 Value7     Period(Time point T1 ~ T7) are continuous data points for minutes. Notice, alerts are not supported above minute-by-minute periods as they would not be efficient. Values(Value1 ~ Value7) are the values or labeled values for every time point. Expression is calculated based on the metric values(Value1 ~ Value7). For example, expression avg(service_resp_time) \u0026gt; 1000, if the value are 1001, 1001, 1001, 1001, 1001, 1001, 1001, the calculation is ((1001 + 10001 + ... + 1001) / 7) \u0026gt; 1000 and the result would be 1(true). Then the alarm would be triggered. In every minute, the window would shift automatically. At T8, Value8 would be cached, and T1/Value1 would be removed from the window.  NOTE:\n If the expression include labeled metrics and result has multiple labeled value(e.g. sum(service_percentile{p='50,75'} \u0026gt; 1000) \u0026gt;= 3), the alarm will be triggered if any of the labeled value result matches 3 times of the condition(P50 \u0026gt; 1000 or P75 \u0026gt; 1000). One alarm rule is targeting the same entity level, such as service-level expression (avg(service_resp_time) \u0026gt; 1000). Set entity names(Include/Exclude names\u0026hellip;) according to metrics entity levels, do not include different entity levels metrics in the same expression, such as service metrics and endpoint metrics.  rules:# Rule unique name, must be ended with `_rule`.endpoint_percent_rule:# A MQE expression and the root operation of the expression must be a Compare Operation.expression:sum((endpoint_sla / 100) \u0026lt; 75) \u0026gt;= 3# The length of time to evaluate the metricsperiod:10# How many times of checks, the alarm keeps silence after alarm triggered, default as same as period.silence-period:10message:Successful rate of endpoint {name} is lower than 75%tags:level:WARNINGservice_percent_rule:expression:sum((service_sla / 100) \u0026lt; 85) \u0026gt;= 4# [Optional] Default, match all services in this metricsinclude-names:- service_a- service_bexclude-names:- service_cperiod:10message:Service {name} successful rate is less than 85%service_resp_time_percentile_rule:expression:sum(service_percentile{p=\u0026#39;50,75,90,95,99\u0026#39;} \u0026gt; 1000) \u0026gt;= 3period:10silence-period:5message:Percentile response time of service {name} alarm in 3 minutes of last 10 minutes, due to more than one condition of p50 \u0026gt; 1000, p75 \u0026gt; 1000, p90 \u0026gt; 1000, p95 \u0026gt; 1000, p99 \u0026gt; 1000meter_service_status_code_rule:expression:sum(aggregate_labels(meter_status_code{_=\u0026#39;4xx,5xx\u0026#39;},sum) \u0026gt; 10) \u0026gt; 3period:10count:3silence-period:5message:The request number of entity {name} 4xx and 5xx status is more than expected.hooks:- \u0026#34;slack.custom1\u0026#34;- \u0026#34;pagerduty.custom1\u0026#34;comp_rule:expression:(avg(service_sla / 100) \u0026gt; 80) * (avg(service_percentile{_=\u0026#39;0\u0026#39;}) \u0026gt; 1000) == 1period:10message:Service {name} avg successful rate is less than 80% and P50 of avg response time is over 1000ms in last 10 minutes.tags:level:CRITICALhooks:- \u0026#34;slack.default\u0026#34;- \u0026#34;slack.custom1\u0026#34;- \u0026#34;pagerduty.custom1\u0026#34;Default alarm rules For convenience\u0026rsquo;s sake, we have provided a default alarm-setting.yml in our release. It includes the following rules:\n Service average response time over 1s in the last 3 minutes. Service success rate lower than 80% in the last 2 minutes. Percentile of service response time over 1s in the last 3 minutes Service Instance average response time over 1s in the last 2 minutes, and the instance name matches the regex. Endpoint average response time over 1s in the last 2 minutes. Database access average response time over 1s in the last 2 minutes. Endpoint relation average response time over 1s in the last 2 minutes.  List of all potential metrics name The metrics names are defined in the official OAL scripts and MAL scripts.\nCurrently, metrics from the Service, Service Instance, Endpoint, Service Relation, Service Instance Relation, Endpoint Relation scopes could be used in Alarm, and the Database access scope is the same as Service.\nSubmit an issue or a pull request if you want to support any other scopes in Alarm.\nHooks Hooks are a way to send alarm messages to the outside world. SkyWalking supports multiple hooks of the same type, each hook can support different configurations. For example, you can configure two Slack hooks, one named default and set is-default: true means this hook will apply on all Alarm Rules without config hooks. Another named custom1 will only apply on the Alarm Rules which with config hooks and include the name slack.custom1.\nhooks:slack:# default here is just a name, set the field \u0026#39;is-default: true\u0026#39; if this notification hook is expected to be default globally.default:# If true, this hook will apply on all rules, unless a rule has its own specific hook. Could have more than one default hooks in the same hook type.is-default:truetext-template:|-{ \u0026#34;type\u0026#34;: \u0026#34;section\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;mrkdwn\u0026#34;, \u0026#34;text\u0026#34;: \u0026#34;:alarm_clock: *Apache Skywalking Alarm* \\n **%s**.\u0026#34; } }webhooks:- https://hooks.slack.com/services/x/y/zsssscustom1:text-template:|-{ \u0026#34;type\u0026#34;: \u0026#34;section\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;mrkdwn\u0026#34;, \u0026#34;text\u0026#34;: \u0026#34;:alarm_clock: *Apache Skywalking Alarm* \\n **%s**.\u0026#34; } }webhooks:- https://hooks.slack.com/services/x/y/custom1Currently, SkyWalking supports the following hook types:\nWebhook The Webhook requires the peer to be a web container. The alarm message will be sent through HTTP post by application/json content type after you have set up Webhook hooks as follows:\nwebhook:default:is-default:trueurls:- http://ip:port/xxx- http://ip:port/yyyThe JSON format is based on List\u0026lt;org.apache.skywalking.oap.server.core.alarm.AlarmMessage\u0026gt; with the following key information:\n scopeId, scope. All scopes are defined in org.apache.skywalking.oap.server.core.source.DefaultScopeDefine. name. Target scope entity name. Please follow the entity name definitions. id0. The ID of the scope entity that matches with the name. When using the relation scope, it is the source entity ID. id1. When using the relation scope, it is the destination entity ID. Otherwise, it is empty. ruleName. The rule name configured in alarm-settings.yml. alarmMessage. The alarm text message. startTime. The alarm time measured in milliseconds, which occurs between the current time and the midnight of January 1, 1970 UTC. tags. The tags configured in alarm-settings.yml.  See the following example:\n[{ \u0026#34;scopeId\u0026#34;: 1, \u0026#34;scope\u0026#34;: \u0026#34;SERVICE\u0026#34;, \u0026#34;name\u0026#34;: \u0026#34;serviceA\u0026#34;, \u0026#34;id0\u0026#34;: \u0026#34;12\u0026#34;, \u0026#34;id1\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;ruleName\u0026#34;: \u0026#34;service_resp_time_rule\u0026#34;, \u0026#34;alarmMessage\u0026#34;: \u0026#34;alarmMessage xxxx\u0026#34;, \u0026#34;startTime\u0026#34;: 1560524171000, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;WARNING\u0026#34; }] }, { \u0026#34;scopeId\u0026#34;: 1, \u0026#34;scope\u0026#34;: \u0026#34;SERVICE\u0026#34;, \u0026#34;name\u0026#34;: \u0026#34;serviceB\u0026#34;, \u0026#34;id0\u0026#34;: \u0026#34;23\u0026#34;, \u0026#34;id1\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;ruleName\u0026#34;: \u0026#34;service_resp_time_rule\u0026#34;, \u0026#34;alarmMessage\u0026#34;: \u0026#34;alarmMessage yyy\u0026#34;, \u0026#34;startTime\u0026#34;: 1560524171000, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;CRITICAL\u0026#34; }] }] gRPC The alarm message will be sent through remote gRPC method by Protobuf content type after you have set up gRPC hooks as follows:\ngRPC:default:is-default:truetarget-host:iptarget-port:portThe message contains key information which are defined in oap-server/server-alarm-plugin/src/main/proto/alarm-hook.proto.\nPart of the protocol looks like this:\nmessage AlarmMessage { int64 scopeId = 1; string scope = 2; string name = 3; string id0 = 4; string id1 = 5; string ruleName = 6; string alarmMessage = 7; int64 startTime = 8; AlarmTags tags = 9;}message AlarmTags { // String key, String value pair.  repeated KeyStringValuePair data = 1;}message KeyStringValuePair { string key = 1; string value = 2;}Slack Chat Follow the Getting Started with Incoming Webhooks guide and create new Webhooks.\nThe alarm message will be sent through HTTP post by application/json content type if you have configured Slack Incoming Webhooks as follows:\nslack:default:is-default:truetext-template:|-{ \u0026#34;type\u0026#34;: \u0026#34;section\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;mrkdwn\u0026#34;, \u0026#34;text\u0026#34;: \u0026#34;:alarm_clock: *Apache Skywalking Alarm* \\n **%s**.\u0026#34; } }webhooks:- https://hooks.slack.com/services/x/y/zWeChat Note that only the WeChat Company Edition (WeCom) supports WebHooks. To use the WeChat WebHook, follow the Wechat Webhooks guide. The alarm message will be sent through HTTP post by application/json content type after you have set up Wechat Webhooks as follows:\nwechat:default:is-default:truetext-template:|-{ \u0026#34;msgtype\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;content\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; } }webhooks:- https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=dummy_keyDingTalk Follow the Dingtalk Webhooks guide and create new Webhooks. You can configure an optional secret for an individual webhook URL for security purposes. The alarm message will be sent through HTTP post by application/json content type if you have configured DingTalk Webhooks as follows:\ndingtalk:default:is-default:truetext-template:|-{ \u0026#34;msgtype\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;content\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; } }webhooks:- url:https://oapi.dingtalk.com/robot/send?access_token=dummy_tokensecret:dummysecretFeishu Follow the Feishu Webhooks guide and create new Webhooks. You can configure an optional secret for an individual webhook URL for security purposes. If you want to direct a text to a user, you can configure ats, which is Feishu\u0026rsquo;s user_id and separated by \u0026ldquo;,\u0026rdquo; . The alarm message will be sent through HTTP post by application/json content type if you have configured Feishu Webhooks as follows:\nfeishu:default:is-default:truetext-template:|-{ \u0026#34;msg_type\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;content\u0026#34;: { \u0026#34;text\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; }, \u0026#34;ats\u0026#34;:\u0026#34;feishu_user_id_1,feishu_user_id_2\u0026#34; }webhooks:- url:https://open.feishu.cn/open-apis/bot/v2/hook/dummy_tokensecret:dummysecretWeLink Follow the WeLink Webhooks guide and create new Webhooks. The alarm message will be sent through HTTP post by application/json content type if you have configured WeLink Webhooks as follows:\nwelink:default:is-default:truetext-template:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;webhooks:# you may find your own client_id and client_secret in your app, below are dummy, need to change.- client-id:\u0026#34;dummy_client_id\u0026#34;client-secret:dummy_secret_keyaccess-token-url:https://open.welink.huaweicloud.com/api/auth/v2/ticketsmessage-url:https://open.welink.huaweicloud.com/api/welinkim/v1/im-service/chat/group-chat# if you send to multi group at a time, separate group_ids with commas, e.g. \u0026#34;123xx\u0026#34;,\u0026#34;456xx\u0026#34;group-ids:\u0026#34;dummy_group_id\u0026#34;# make a name you like for the robot, it will display in grouprobot-name:robotPagerDuty The PagerDuty hook is based on Events API v2.\nFollow the Getting Started section to create an Events API v2 integration on your PagerDuty service and copy the integration key.\nThen configure as follows:\npagerduty:default:is-default:truetext-template:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;integration-keys:- 5c6d805c9dcf4e03d09dfa81e8789ba1You can also configure multiple integration keys.\nDiscord Follow the Discord Webhooks guide and create a new webhook.\nThen configure as follows:\ndiscord:default:is-default:truetext-template:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;webhooks:- url:https://discordapp.com/api/webhooks/1008166889777414645/8e0Am4Zb-YGbBqqbiiq0jSHPTEEaHa4j1vIC-zSSm231T8ewGxgY0_XUYpY-k1nN4HBlusername:robotUpdate the settings dynamically Since 6.5.0, the alerting settings can be updated dynamically at runtime by Dynamic Configuration, which will override the settings in alarm-settings.yml.\nIn order to determine whether an alerting rule is triggered or not, SkyWalking needs to cache the metrics of a time window for each alerting rule. If any attribute (expression, period, etc.) of a rule is changed, the sliding window will be destroyed and re-created, causing the Alarm of this specific rule to restart again.\nKeys with data types of alerting rule configuration file    Alerting element Configuration property key Type Description     Expression expression string MQE expression   Include names include-names string array    Exclude names exclude-names string array    Include names regex include-names-regex string Java regex Pattern   Exclude names regex exclude-names-regex string Java regex Pattern   Tags tags key-value pair    Period Period int    Silence period silence-period int    Message message string    Hooks hooks string array     ","title":"Alerting","url":"/docs/main/next/en/setup/backend/backend-alarm/"},{"content":"Alerting Alerting mechanism measures system performance according to the metrics of services/instances/endpoints from different layers. Alerting kernel is an in-memory, time-window based queue.\nThe alerting core is driven by a collection of rules defined in config/alarm-settings.yml. There are three parts to alerting rule definitions.\n alerting rules. They define how metrics alerting should be triggered and what conditions should be considered. Webhooks. The list of web service endpoints, which should be called after an alerting is triggered. gRPCHook. The host and port of the remote gRPC method, which should be called after an alerting is triggered.  Entity name Defines the relation between scope and entity name.\n Service: Service name Instance: {Instance name} of {Service name} Endpoint: {Endpoint name} in {Service name} Database: Database service name Service Relation: {Source service name} to {Dest service name} Instance Relation: {Source instance name} of {Source service name} to {Dest instance name} of {Dest service name} Endpoint Relation: {Source endpoint name} in {Source Service name} to {Dest endpoint name} in {Dest service name}  Rules There are two types of rules: individual rules and composite rules. A composite rule is a combination of individual rules.\nIndividual rules An alerting rule is made up of the following elements:\n Rule name. A unique name shown in the alarm message. It must end with _rule. Metrics name. This is also the metrics name in the OAL script. Only long, double, int types are supported. See the list of all potential metrics name. Events can also be configured as the source of Alarm. Please refer to the event doc for more details. Include names. Entity names that are included in this rule. Please follow the entity name definitions. Exclude names. Entity names that are excluded from this rule. Please follow the entity name definitions. Include names regex. A regex that includes entity names. If both include-name list and include-name regex are set, both rules will take effect. Exclude names regex. A regex that excludes entity names. Both rules will take effect if both include-label list and include-label regex are set. Include labels. Metric labels that are included in this rule. Exclude labels. Metric labels that are excluded from this rule. Include labels regex. A regex that includes labels. If both include-label list and include-label regex are set, both rules will take effect. Exclude labels regex. A regex that excludes labels. Both rules will take effect if both exclude-label list and exclude-label regex are set. Tags. Tags are key/value pairs that are attached to alarms. Tags are used to specify distinguishing attributes of alarms that are meaningful and relevant to users. If you want to make these tags searchable on the SkyWalking UI, you may set the tag keys in core/default/searchableAlarmTags or through the system environment variable SW_SEARCHABLE_ALARM_TAG_KEYS. The key level is supported by default.  Label settings are required by the meter system. They are used to store metrics from the label-system platform, such as Prometheus, Micrometer, etc. The four label settings mentioned above must implement LabeledValueHolder.\n Threshold. The target value. For multiple-value metrics, such as percentile, the threshold is an array. It is described as: value1, value2, value3, value4, value5. Each value may serve as the threshold for each value of the metrics. Set the value to - if you do not wish to trigger the Alarm by one or more of the values.\nFor example, in percentile, value1 is the threshold of P50, and -, -, value3, value4, value5 means that there is no threshold for P50 and P75 in the percentile alarm rule. OP. The operator. It supports \u0026gt;, \u0026gt;=, \u0026lt;, \u0026lt;=, ==. We welcome contributions of all OPs. Period. The size of metrics cache in minutes for checking the alarm conditions. This is a time window that corresponds to the backend deployment env time. Count. Within a period window, if the number of times which value goes over the threshold (based on OP) reaches count, then an alarm will be sent. Only as condition. Indicates if the rule can send notifications or if it simply serves as a condition of the composite rule. Silence period. After the alarm is triggered at Time-N (TN), there will be silence during the TN -\u0026gt; TN + period. By default, it works in the same manner as period. The same Alarm (having the same ID in the same metrics name) may only be triggered once within a period.  Such as for a metric, there is a shifting window as following at T7.\n   T1 T2 T3 T4 T5 T6 T7     Value1 Value2 Value3 Value4 Value5 Value6 Value7     Period(Time point T1 ~ T7) are continuous data points for minutes. Notice, alerts are not supported above minute-by-minute periods as they would not be efficient. Values(Value1 ~ Value7) are the values or labeled values for every time point. Count\u0026rsquo;s value(N) represents there are N values in the window matched the operator and threshold. In every minute, the window would shift automatically. At T8, Value8 would be cached, and T1/Value1 would be removed from the window.  Composite rules NOTE: Composite rules are only applicable to alerting rules targeting the same entity level, such as service-level alarm rules (service_percent_rule \u0026amp;\u0026amp; service_resp_time_percentile_rule). Do not compose alarm rules of different entity levels, such as an alarm rule of the service metrics with another rule of the endpoint metrics.\nA composite rule is made up of the following elements:\n Rule name. A unique name shown in the alarm message. Must end with _rule. Expression. Specifies how to compose rules, and supports \u0026amp;\u0026amp;, ||, and (). Message. The notification message to be sent out when the rule is triggered. Tags. Tags are key/value pairs that are attached to alarms. Tags are used to specify distinguishing attributes of alarms that are meaningful and relevant to users.  rules:# Rule unique name, must be ended with `_rule`.endpoint_percent_rule:# Metrics value need to be long, double or intmetrics-name:endpoint_percentthreshold:75op:\u0026lt;# The length of time to evaluate the metricsperiod:10# How many times after the metrics match the condition, will trigger alarmcount:3# How many times of checks, the alarm keeps silence after alarm triggered, default as same as period.silence-period:10# Specify if the rule can send notification or just as an condition of composite ruleonly-as-condition:falsetags:level:WARNINGservice_percent_rule:metrics-name:service_percent# [Optional] Default, match all services in this metricsinclude-names:- service_a- service_bexclude-names:- service_c# Single value metrics threshold.threshold:85op:\u0026lt;period:10count:4only-as-condition:falseservice_resp_time_percentile_rule:# Metrics value need to be long, double or intmetrics-name:service_percentileop:\u0026#34;\u0026gt;\u0026#34;# Multiple value metrics threshold. Thresholds for P50, P75, P90, P95, P99.threshold:1000,1000,1000,1000,1000period:10count:3silence-period:5message:Percentile response time of service {name} alarm in 3 minutes of last 10 minutes, due to more than one condition of p50 \u0026gt; 1000, p75 \u0026gt; 1000, p90 \u0026gt; 1000, p95 \u0026gt; 1000, p99 \u0026gt; 1000only-as-condition:falsemeter_service_status_code_rule:metrics-name:meter_status_codeexclude-labels:- \u0026#34;200\u0026#34;op:\u0026#34;\u0026gt;\u0026#34;threshold:10period:10count:3silence-period:5message:The request number of entity {name} non-200 status is more than expected.only-as-condition:falsecomposite-rules:comp_rule:# Must satisfied percent rule and resp time rule expression:service_percent_rule \u0026amp;\u0026amp; service_resp_time_percentile_rulemessage:Service {name} successful rate is less than 80% and P50 of response time is over 1000mstags:level:CRITICALDefault alarm rules For convenience\u0026rsquo;s sake, we have provided a default alarm-setting.yml in our release. It includes the following rules:\n Service average response time over 1s in the last 3 minutes. Service success rate lower than 80% in the last 2 minutes. Percentile of service response time over 1s in the last 3 minutes Service Instance average response time over 1s in the last 2 minutes, and the instance name matches the regex. Endpoint average response time over 1s in the last 2 minutes. Database access average response time over 1s in the last 2 minutes. Endpoint relation average response time over 1s in the last 2 minutes.  List of all potential metrics name The metrics names are defined in the official OAL scripts and MAL scripts, the Event names can also serve as the metrics names, all possible event names can be also found in the Event doc.\nCurrently, metrics from the Service, Service Instance, Endpoint, Service Relation, Service Instance Relation, Endpoint Relation scopes could be used in Alarm, and the Database access scope is the same as Service.\nSubmit an issue or a pull request if you want to support any other scopes in Alarm.\nWebhook The Webhook requires the peer to be a web container. The alarm message will be sent through HTTP post by application/json content type. The JSON format is based on List\u0026lt;org.apache.skywalking.oap.server.core.alarm.AlarmMessage\u0026gt; with the following key information:\n scopeId, scope. All scopes are defined in org.apache.skywalking.oap.server.core.source.DefaultScopeDefine. name. Target scope entity name. Please follow the entity name definitions. id0. The ID of the scope entity that matches with the name. When using the relation scope, it is the source entity ID. id1. When using the relation scope, it is the destination entity ID. Otherwise, it is empty. ruleName. The rule name configured in alarm-settings.yml. alarmMessage. The alarm text message. startTime. The alarm time measured in milliseconds, which occurs between the current time and the midnight of January 1, 1970 UTC. tags. The tags configured in alarm-settings.yml.  See the following example:\n[{ \u0026#34;scopeId\u0026#34;: 1, \u0026#34;scope\u0026#34;: \u0026#34;SERVICE\u0026#34;, \u0026#34;name\u0026#34;: \u0026#34;serviceA\u0026#34;, \u0026#34;id0\u0026#34;: \u0026#34;12\u0026#34;, \u0026#34;id1\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;ruleName\u0026#34;: \u0026#34;service_resp_time_rule\u0026#34;, \u0026#34;alarmMessage\u0026#34;: \u0026#34;alarmMessage xxxx\u0026#34;, \u0026#34;startTime\u0026#34;: 1560524171000, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;WARNING\u0026#34; }] }, { \u0026#34;scopeId\u0026#34;: 1, \u0026#34;scope\u0026#34;: \u0026#34;SERVICE\u0026#34;, \u0026#34;name\u0026#34;: \u0026#34;serviceB\u0026#34;, \u0026#34;id0\u0026#34;: \u0026#34;23\u0026#34;, \u0026#34;id1\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;ruleName\u0026#34;: \u0026#34;service_resp_time_rule\u0026#34;, \u0026#34;alarmMessage\u0026#34;: \u0026#34;alarmMessage yyy\u0026#34;, \u0026#34;startTime\u0026#34;: 1560524171000, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;CRITICAL\u0026#34; }] }] gRPCHook The alarm message will be sent through remote gRPC method by Protobuf content type. The message contains key information which are defined in oap-server/server-alarm-plugin/src/main/proto/alarm-hook.proto.\nPart of the protocol looks like this:\nmessage AlarmMessage { int64 scopeId = 1; string scope = 2; string name = 3; string id0 = 4; string id1 = 5; string ruleName = 6; string alarmMessage = 7; int64 startTime = 8; AlarmTags tags = 9;}message AlarmTags { // String key, String value pair.  repeated KeyStringValuePair data = 1;}message KeyStringValuePair { string key = 1; string value = 2;}Slack Chat Hook Follow the Getting Started with Incoming Webhooks guide and create new Webhooks.\nThe alarm message will be sent through HTTP post by application/json content type if you have configured Slack Incoming Webhooks as follows:\nslackHooks:textTemplate:|-{ \u0026#34;type\u0026#34;: \u0026#34;section\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;mrkdwn\u0026#34;, \u0026#34;text\u0026#34;: \u0026#34;:alarm_clock: *Apache Skywalking Alarm* \\n **%s**.\u0026#34; } }webhooks:- https://hooks.slack.com/services/x/y/zWeChat Hook Note that only the WeChat Company Edition (WeCom) supports WebHooks. To use the WeChat WebHook, follow the Wechat Webhooks guide. The alarm message will be sent through HTTP post by application/json content type after you have set up Wechat Webhooks as follows:\nwechatHooks:textTemplate:|-{ \u0026#34;msgtype\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;content\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; } }webhooks:- https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=dummy_keyDingTalk Hook Follow the Dingtalk Webhooks guide and create new Webhooks. You can configure an optional secret for an individual webhook URL for security purposes. The alarm message will be sent through HTTP post by application/json content type if you have configured DingTalk Webhooks as follows:\ndingtalkHooks:textTemplate:|-{ \u0026#34;msgtype\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;content\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; } }webhooks:- url:https://oapi.dingtalk.com/robot/send?access_token=dummy_tokensecret:dummysecretFeishu Hook Follow the Feishu Webhooks guide and create new Webhooks. You can configure an optional secret for an individual webhook URL for security purposes. If you want to direct a text to a user, you can configure ats, which is Feishu\u0026rsquo;s user_id and separated by \u0026ldquo;,\u0026rdquo; . The alarm message will be sent through HTTP post by application/json content type if you have configured Feishu Webhooks as follows:\nfeishuHooks:textTemplate:|-{ \u0026#34;msg_type\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;content\u0026#34;: { \u0026#34;text\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; }, \u0026#34;ats\u0026#34;:\u0026#34;feishu_user_id_1,feishu_user_id_2\u0026#34; }webhooks:- url:https://open.feishu.cn/open-apis/bot/v2/hook/dummy_tokensecret:dummysecretWeLink Hook Follow the WeLink Webhooks guide and create new Webhooks. The alarm message will be sent through HTTP post by application/json content type if you have configured WeLink Webhooks as follows:\nwelinkHooks:textTemplate:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;webhooks:# you may find your own client_id and client_secret in your app, below are dummy, need to change.- client_id:\u0026#34;dummy_client_id\u0026#34;client_secret:dummy_secret_keyaccess_token_url:https://open.welink.huaweicloud.com/api/auth/v2/ticketsmessage_url:https://open.welink.huaweicloud.com/api/welinkim/v1/im-service/chat/group-chat# if you send to multi group at a time, separate group_ids with commas, e.g. \u0026#34;123xx\u0026#34;,\u0026#34;456xx\u0026#34;group_ids:\u0026#34;dummy_group_id\u0026#34;# make a name you like for the robot, it will display in grouprobot_name:robotPagerDuty Hook The PagerDuty hook is based on Events API v2.\nFollow the Getting Started section to create an Events API v2 integration on your PagerDuty service and copy the integration key.\nThen configure as follows:\npagerDutyHooks:textTemplate:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;integrationKeys:- 5c6d805c9dcf4e03d09dfa81e8789ba1You can also configure multiple integration keys.\nDiscord Hook Follow the Discord Webhooks guide and create a new webhook.\nThen configure as follows:\ndiscordHooks:textTemplate:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;webhooks:- url:https://discordapp.com/api/webhooks/1008166889777414645/8e0Am4Zb-YGbBqqbiiq0jSHPTEEaHa4j1vIC-zSSm231T8ewGxgY0_XUYpY-k1nN4HBlusername:robotUpdate the settings dynamically Since 6.5.0, the alerting settings can be updated dynamically at runtime by Dynamic Configuration, which will override the settings in alarm-settings.yml.\nIn order to determine whether an alerting rule is triggered or not, SkyWalking needs to cache the metrics of a time window for each alerting rule. If any attribute (metrics-name, op, threshold, period, count, etc.) of a rule is changed, the sliding window will be destroyed and re-created, causing the Alarm of this specific rule to restart again.\nKeys with data types of alerting rule configuration file    Alerting element Configuration property key Type Description     Include names include-names string array    Exclude names exclude-names string array    Include names regex include-names-regex string Java regex Pattern   Exclude names regex exclude-names-regex string Java regex Pattern   Include labels include-labels string array    Exclude labels exclude-labels string array    Include labels regex include-labels-regex string Java regex Pattern   Exclude labels regex exclude-labels-regex string Java regex Pattern   Tags tags key-value pair    Threshold threshold number    OP op operator example: \u0026gt;, \u0026gt;=   Period Period int    Count count int    Only as condition only-as-condition boolean    Silence period silence-period int    Message message string     ","title":"Alerting","url":"/docs/main/v9.2.0/en/setup/backend/backend-alarm/"},{"content":"Alerting Alerting mechanism measures system performance according to the metrics of services/instances/endpoints from different layers. Alerting kernel is an in-memory, time-window based queue.\nThe alerting core is driven by a collection of rules defined in config/alarm-settings.yml. There are three parts to alerting rule definitions.\n alerting rules. They define how metrics alerting should be triggered and what conditions should be considered. Webhooks. The list of web service endpoints, which should be called after an alerting is triggered. gRPCHook. The host and port of the remote gRPC method, which should be called after an alerting is triggered.  Entity name Defines the relation between scope and entity name.\n Service: Service name Instance: {Instance name} of {Service name} Endpoint: {Endpoint name} in {Service name} Database: Database service name Service Relation: {Source service name} to {Dest service name} Instance Relation: {Source instance name} of {Source service name} to {Dest instance name} of {Dest service name} Endpoint Relation: {Source endpoint name} in {Source Service name} to {Dest endpoint name} in {Dest service name}  Rules There are two types of rules: individual rules and composite rules. A composite rule is a combination of individual rules.\nIndividual rules An alerting rule is made up of the following elements:\n Rule name. A unique name shown in the alarm message. It must end with _rule. Metrics name. This is also the metrics name in the OAL script. Only long, double, int types are supported. See the list of all potential metrics name. Events can also be configured as the source of Alarm. Please refer to the event doc for more details. Include names. Entity names that are included in this rule. Please follow the entity name definitions. Exclude names. Entity names that are excluded from this rule. Please follow the entity name definitions. Include names regex. A regex that includes entity names. If both include-name list and include-name regex are set, both rules will take effect. Exclude names regex. A regex that excludes entity names. Both rules will take effect if both include-label list and include-label regex are set. Include labels. Metric labels that are included in this rule. Exclude labels. Metric labels that are excluded from this rule. Include labels regex. A regex that includes labels. If both include-label list and include-label regex are set, both rules will take effect. Exclude labels regex. A regex that excludes labels. Both rules will take effect if both exclude-label list and exclude-label regex are set. Tags. Tags are key/value pairs that are attached to alarms. Tags are used to specify distinguishing attributes of alarms that are meaningful and relevant to users. If you want to make these tags searchable on the SkyWalking UI, you may set the tag keys in core/default/searchableAlarmTags or through the system environment variable SW_SEARCHABLE_ALARM_TAG_KEYS. The key level is supported by default.  Label settings are required by the meter system. They are used to store metrics from the label-system platform, such as Prometheus, Micrometer, etc. The four label settings mentioned above must implement LabeledValueHolder.\n Threshold. The target value. For multiple-value metrics, such as percentile, the threshold is an array. It is described as: value1, value2, value3, value4, value5. Each value may serve as the threshold for each value of the metrics. Set the value to - if you do not wish to trigger the Alarm by one or more of the values.\nFor example, in percentile, value1 is the threshold of P50, and -, -, value3, value4, value5 means that there is no threshold for P50 and P75 in the percentile alarm rule. OP. The operator. It supports \u0026gt;, \u0026gt;=, \u0026lt;, \u0026lt;=, ==. We welcome contributions of all OPs. Period. The size of metrics cache in minutes for checking the alarm conditions. This is a time window that corresponds to the backend deployment env time. Count. Within a period window, if the number of times which value goes over the threshold (based on OP) reaches count, then an alarm will be sent. Only as condition. Indicates if the rule can send notifications or if it simply serves as a condition of the composite rule. Silence period. After the alarm is triggered at Time-N (TN), there will be silence during the TN -\u0026gt; TN + period. By default, it works in the same manner as period. The same Alarm (having the same ID in the same metrics name) may only be triggered once within a period.  Such as for a metric, there is a shifting window as following at T7.\n   T1 T2 T3 T4 T5 T6 T7     Value1 Value2 Value3 Value4 Value5 Value6 Value7     Period(Time point T1 ~ T7) are continuous data points for minutes. Notice, alerts are not supported above minute-by-minute periods as they would not be efficient. Values(Value1 ~ Value7) are the values or labeled values for every time point. Count\u0026rsquo;s value(N) represents there are N values in the window matched the operator and threshold. In every minute, the window would shift automatically. At T8, Value8 would be cached, and T1/Value1 would be removed from the window.  Composite rules NOTE: Composite rules are only applicable to alerting rules targeting the same entity level, such as service-level alarm rules (service_percent_rule \u0026amp;\u0026amp; service_resp_time_percentile_rule). Do not compose alarm rules of different entity levels, such as an alarm rule of the service metrics with another rule of the endpoint metrics.\nA composite rule is made up of the following elements:\n Rule name. A unique name shown in the alarm message. Must end with _rule. Expression. Specifies how to compose rules, and supports \u0026amp;\u0026amp;, ||, and (). Message. The notification message to be sent out when the rule is triggered. Tags. Tags are key/value pairs that are attached to alarms. Tags are used to specify distinguishing attributes of alarms that are meaningful and relevant to users.  rules:# Rule unique name, must be ended with `_rule`.endpoint_percent_rule:# Metrics value need to be long, double or intmetrics-name:endpoint_percentthreshold:75op:\u0026lt;# The length of time to evaluate the metricsperiod:10# How many times after the metrics match the condition, will trigger alarmcount:3# How many times of checks, the alarm keeps silence after alarm triggered, default as same as period.silence-period:10# Specify if the rule can send notification or just as an condition of composite ruleonly-as-condition:falsetags:level:WARNINGservice_percent_rule:metrics-name:service_percent# [Optional] Default, match all services in this metricsinclude-names:- service_a- service_bexclude-names:- service_c# Single value metrics threshold.threshold:85op:\u0026lt;period:10count:4only-as-condition:falseservice_resp_time_percentile_rule:# Metrics value need to be long, double or intmetrics-name:service_percentileop:\u0026#34;\u0026gt;\u0026#34;# Multiple value metrics threshold. Thresholds for P50, P75, P90, P95, P99.threshold:1000,1000,1000,1000,1000period:10count:3silence-period:5message:Percentile response time of service {name} alarm in 3 minutes of last 10 minutes, due to more than one condition of p50 \u0026gt; 1000, p75 \u0026gt; 1000, p90 \u0026gt; 1000, p95 \u0026gt; 1000, p99 \u0026gt; 1000only-as-condition:falsemeter_service_status_code_rule:metrics-name:meter_status_codeexclude-labels:- \u0026#34;200\u0026#34;op:\u0026#34;\u0026gt;\u0026#34;threshold:10period:10count:3silence-period:5message:The request number of entity {name} non-200 status is more than expected.only-as-condition:falsecomposite-rules:comp_rule:# Must satisfied percent rule and resp time rule expression:service_percent_rule \u0026amp;\u0026amp; service_resp_time_percentile_rulemessage:Service {name} successful rate is less than 80% and P50 of response time is over 1000mstags:level:CRITICALDefault alarm rules For convenience\u0026rsquo;s sake, we have provided a default alarm-setting.yml in our release. It includes the following rules:\n Service average response time over 1s in the last 3 minutes. Service success rate lower than 80% in the last 2 minutes. Percentile of service response time over 1s in the last 3 minutes Service Instance average response time over 1s in the last 2 minutes, and the instance name matches the regex. Endpoint average response time over 1s in the last 2 minutes. Database access average response time over 1s in the last 2 minutes. Endpoint relation average response time over 1s in the last 2 minutes.  List of all potential metrics name The metrics names are defined in the official OAL scripts and MAL scripts, the Event names can also serve as the metrics names, all possible event names can be also found in the Event doc.\nCurrently, metrics from the Service, Service Instance, Endpoint, Service Relation, Service Instance Relation, Endpoint Relation scopes could be used in Alarm, and the Database access scope is the same as Service.\nSubmit an issue or a pull request if you want to support any other scopes in Alarm.\nWebhook The Webhook requires the peer to be a web container. The alarm message will be sent through HTTP post by application/json content type. The JSON format is based on List\u0026lt;org.apache.skywalking.oap.server.core.alarm.AlarmMessage\u0026gt; with the following key information:\n scopeId, scope. All scopes are defined in org.apache.skywalking.oap.server.core.source.DefaultScopeDefine. name. Target scope entity name. Please follow the entity name definitions. id0. The ID of the scope entity that matches with the name. When using the relation scope, it is the source entity ID. id1. When using the relation scope, it is the destination entity ID. Otherwise, it is empty. ruleName. The rule name configured in alarm-settings.yml. alarmMessage. The alarm text message. startTime. The alarm time measured in milliseconds, which occurs between the current time and the midnight of January 1, 1970 UTC. tags. The tags configured in alarm-settings.yml.  See the following example:\n[{ \u0026#34;scopeId\u0026#34;: 1, \u0026#34;scope\u0026#34;: \u0026#34;SERVICE\u0026#34;, \u0026#34;name\u0026#34;: \u0026#34;serviceA\u0026#34;, \u0026#34;id0\u0026#34;: \u0026#34;12\u0026#34;, \u0026#34;id1\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;ruleName\u0026#34;: \u0026#34;service_resp_time_rule\u0026#34;, \u0026#34;alarmMessage\u0026#34;: \u0026#34;alarmMessage xxxx\u0026#34;, \u0026#34;startTime\u0026#34;: 1560524171000, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;WARNING\u0026#34; }] }, { \u0026#34;scopeId\u0026#34;: 1, \u0026#34;scope\u0026#34;: \u0026#34;SERVICE\u0026#34;, \u0026#34;name\u0026#34;: \u0026#34;serviceB\u0026#34;, \u0026#34;id0\u0026#34;: \u0026#34;23\u0026#34;, \u0026#34;id1\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;ruleName\u0026#34;: \u0026#34;service_resp_time_rule\u0026#34;, \u0026#34;alarmMessage\u0026#34;: \u0026#34;alarmMessage yyy\u0026#34;, \u0026#34;startTime\u0026#34;: 1560524171000, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;CRITICAL\u0026#34; }] }] gRPCHook The alarm message will be sent through remote gRPC method by Protobuf content type. The message contains key information which are defined in oap-server/server-alarm-plugin/src/main/proto/alarm-hook.proto.\nPart of the protocol looks like this:\nmessage AlarmMessage { int64 scopeId = 1; string scope = 2; string name = 3; string id0 = 4; string id1 = 5; string ruleName = 6; string alarmMessage = 7; int64 startTime = 8; AlarmTags tags = 9;}message AlarmTags { // String key, String value pair.  repeated KeyStringValuePair data = 1;}message KeyStringValuePair { string key = 1; string value = 2;}Slack Chat Hook Follow the Getting Started with Incoming Webhooks guide and create new Webhooks.\nThe alarm message will be sent through HTTP post by application/json content type if you have configured Slack Incoming Webhooks as follows:\nslackHooks:textTemplate:|-{ \u0026#34;type\u0026#34;: \u0026#34;section\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;mrkdwn\u0026#34;, \u0026#34;text\u0026#34;: \u0026#34;:alarm_clock: *Apache Skywalking Alarm* \\n **%s**.\u0026#34; } }webhooks:- https://hooks.slack.com/services/x/y/zWeChat Hook Note that only the WeChat Company Edition (WeCom) supports WebHooks. To use the WeChat WebHook, follow the Wechat Webhooks guide. The alarm message will be sent through HTTP post by application/json content type after you have set up Wechat Webhooks as follows:\nwechatHooks:textTemplate:|-{ \u0026#34;msgtype\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;content\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; } }webhooks:- https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=dummy_keyDingTalk Hook Follow the Dingtalk Webhooks guide and create new Webhooks. You can configure an optional secret for an individual webhook URL for security purposes. The alarm message will be sent through HTTP post by application/json content type if you have configured DingTalk Webhooks as follows:\ndingtalkHooks:textTemplate:|-{ \u0026#34;msgtype\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;content\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; } }webhooks:- url:https://oapi.dingtalk.com/robot/send?access_token=dummy_tokensecret:dummysecretFeishu Hook Follow the Feishu Webhooks guide and create new Webhooks. You can configure an optional secret for an individual webhook URL for security purposes. If you want to direct a text to a user, you can configure ats, which is Feishu\u0026rsquo;s user_id and separated by \u0026ldquo;,\u0026rdquo; . The alarm message will be sent through HTTP post by application/json content type if you have configured Feishu Webhooks as follows:\nfeishuHooks:textTemplate:|-{ \u0026#34;msg_type\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;content\u0026#34;: { \u0026#34;text\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; }, \u0026#34;ats\u0026#34;:\u0026#34;feishu_user_id_1,feishu_user_id_2\u0026#34; }webhooks:- url:https://open.feishu.cn/open-apis/bot/v2/hook/dummy_tokensecret:dummysecretWeLink Hook Follow the WeLink Webhooks guide and create new Webhooks. The alarm message will be sent through HTTP post by application/json content type if you have configured WeLink Webhooks as follows:\nwelinkHooks:textTemplate:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;webhooks:# you may find your own client_id and client_secret in your app, below are dummy, need to change.- client_id:\u0026#34;dummy_client_id\u0026#34;client_secret:dummy_secret_keyaccess_token_url:https://open.welink.huaweicloud.com/api/auth/v2/ticketsmessage_url:https://open.welink.huaweicloud.com/api/welinkim/v1/im-service/chat/group-chat# if you send to multi group at a time, separate group_ids with commas, e.g. \u0026#34;123xx\u0026#34;,\u0026#34;456xx\u0026#34;group_ids:\u0026#34;dummy_group_id\u0026#34;# make a name you like for the robot, it will display in grouprobot_name:robotPagerDuty Hook The PagerDuty hook is based on Events API v2.\nFollow the Getting Started section to create an Events API v2 integration on your PagerDuty service and copy the integration key.\nThen configure as follows:\npagerDutyHooks:textTemplate:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;integrationKeys:- 5c6d805c9dcf4e03d09dfa81e8789ba1You can also configure multiple integration keys.\nDiscord Hook Follow the Discord Webhooks guide and create a new webhook.\nThen configure as follows:\ndiscordHooks:textTemplate:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;webhooks:- url:https://discordapp.com/api/webhooks/1008166889777414645/8e0Am4Zb-YGbBqqbiiq0jSHPTEEaHa4j1vIC-zSSm231T8ewGxgY0_XUYpY-k1nN4HBlusername:robotUpdate the settings dynamically Since 6.5.0, the alerting settings can be updated dynamically at runtime by Dynamic Configuration, which will override the settings in alarm-settings.yml.\nIn order to determine whether an alerting rule is triggered or not, SkyWalking needs to cache the metrics of a time window for each alerting rule. If any attribute (metrics-name, op, threshold, period, count, etc.) of a rule is changed, the sliding window will be destroyed and re-created, causing the Alarm of this specific rule to restart again.\nKeys with data types of alerting rule configuration file    Alerting element Configuration property key Type Description     Include names include-names string array    Exclude names exclude-names string array    Include names regex include-names-regex string Java regex Pattern   Exclude names regex exclude-names-regex string Java regex Pattern   Include labels include-labels string array    Exclude labels exclude-labels string array    Include labels regex include-labels-regex string Java regex Pattern   Exclude labels regex exclude-labels-regex string Java regex Pattern   Tags tags key-value pair    Threshold threshold number    OP op operator example: \u0026gt;, \u0026gt;=   Period Period int    Count count int    Only as condition only-as-condition boolean    Silence period silence-period int    Message message string     ","title":"Alerting","url":"/docs/main/v9.3.0/en/setup/backend/backend-alarm/"},{"content":"Alerting Alerting mechanism measures system performance according to the metrics of services/instances/endpoints from different layers. Alerting kernel is an in-memory, time-window based queue.\nThe alerting core is driven by a collection of rules defined in config/alarm-settings.yml. There are three parts to alerting rule definitions.\n alerting rules. They define how metrics alerting should be triggered and what conditions should be considered. Webhooks. The list of web service endpoints, which should be called after an alerting is triggered. gRPCHook. The host and port of the remote gRPC method, which should be called after an alerting is triggered.  Entity name Defines the relation between scope and entity name.\n Service: Service name Instance: {Instance name} of {Service name} Endpoint: {Endpoint name} in {Service name} Database: Database service name Service Relation: {Source service name} to {Dest service name} Instance Relation: {Source instance name} of {Source service name} to {Dest instance name} of {Dest service name} Endpoint Relation: {Source endpoint name} in {Source Service name} to {Dest endpoint name} in {Dest service name}  Rules There are two types of rules: individual rules and composite rules. A composite rule is a combination of individual rules.\nIndividual rules An alerting rule is made up of the following elements:\n Rule name. A unique name shown in the alarm message. It must end with _rule. Metrics name. This is also the metrics name in the OAL script. Only long, double, int types are supported. See the list of all potential metrics name. Events can also be configured as the source of Alarm. Please refer to the event doc for more details. Include names. Entity names that are included in this rule. Please follow the entity name definitions. Exclude names. Entity names that are excluded from this rule. Please follow the entity name definitions. Include names regex. A regex that includes entity names. If both include-name list and include-name regex are set, both rules will take effect. Exclude names regex. A regex that excludes entity names. Both rules will take effect if both include-label list and include-label regex are set. Include labels. Metric labels that are included in this rule. Exclude labels. Metric labels that are excluded from this rule. Include labels regex. A regex that includes labels. If both include-label list and include-label regex are set, both rules will take effect. Exclude labels regex. A regex that excludes labels. Both rules will take effect if both exclude-label list and exclude-label regex are set. Tags. Tags are key/value pairs that are attached to alarms. Tags are used to specify distinguishing attributes of alarms that are meaningful and relevant to users. If you want to make these tags searchable on the SkyWalking UI, you may set the tag keys in core/default/searchableAlarmTags or through the system environment variable SW_SEARCHABLE_ALARM_TAG_KEYS. The key level is supported by default.  Label settings are required by the meter system. They are used to store metrics from the label-system platform, such as Prometheus, Micrometer, etc. The four label settings mentioned above must implement LabeledValueHolder.\n Threshold. The target value. For multiple-value metrics, such as percentile, the threshold is an array. It is described as: value1, value2, value3, value4, value5. Each value may serve as the threshold for each value of the metrics. Set the value to - if you do not wish to trigger the Alarm by one or more of the values.\nFor example, in percentile, value1 is the threshold of P50, and -, -, value3, value4, value5 means that there is no threshold for P50 and P75 in the percentile alarm rule. OP. The operator. It supports \u0026gt;, \u0026gt;=, \u0026lt;, \u0026lt;=, ==. We welcome contributions of all OPs. Period. The size of metrics cache in minutes for checking the alarm conditions. This is a time window that corresponds to the backend deployment env time. Count. Within a period window, if the number of times which value goes over the threshold (based on OP) reaches count, then an alarm will be sent. Only as condition. Indicates if the rule can send notifications or if it simply serves as a condition of the composite rule. Silence period. After the alarm is triggered at Time-N (TN), there will be silence during the TN -\u0026gt; TN + period. By default, it works in the same manner as period. The same Alarm (having the same ID in the same metrics name) may only be triggered once within a period.  Such as for a metric, there is a shifting window as following at T7.\n   T1 T2 T3 T4 T5 T6 T7     Value1 Value2 Value3 Value4 Value5 Value6 Value7     Period(Time point T1 ~ T7) are continuous data points for minutes. Notice, alerts are not supported above minute-by-minute periods as they would not be efficient. Values(Value1 ~ Value7) are the values or labeled values for every time point. Count\u0026rsquo;s value(N) represents there are N values in the window matched the operator and threshold. In every minute, the window would shift automatically. At T8, Value8 would be cached, and T1/Value1 would be removed from the window.  Composite rules NOTE: Composite rules are only applicable to alerting rules targeting the same entity level, such as service-level alarm rules (service_percent_rule \u0026amp;\u0026amp; service_resp_time_percentile_rule). Do not compose alarm rules of different entity levels, such as an alarm rule of the service metrics with another rule of the endpoint metrics.\nA composite rule is made up of the following elements:\n Rule name. A unique name shown in the alarm message. Must end with _rule. Expression. Specifies how to compose rules, and supports \u0026amp;\u0026amp;, ||, and (). Message. The notification message to be sent out when the rule is triggered. Tags. Tags are key/value pairs that are attached to alarms. Tags are used to specify distinguishing attributes of alarms that are meaningful and relevant to users.  rules:# Rule unique name, must be ended with `_rule`.endpoint_percent_rule:# Metrics value need to be long, double or intmetrics-name:endpoint_percentthreshold:75op:\u0026lt;# The length of time to evaluate the metricsperiod:10# How many times after the metrics match the condition, will trigger alarmcount:3# How many times of checks, the alarm keeps silence after alarm triggered, default as same as period.silence-period:10# Specify if the rule can send notification or just as an condition of composite ruleonly-as-condition:falsetags:level:WARNINGservice_percent_rule:metrics-name:service_percent# [Optional] Default, match all services in this metricsinclude-names:- service_a- service_bexclude-names:- service_c# Single value metrics threshold.threshold:85op:\u0026lt;period:10count:4only-as-condition:falseservice_resp_time_percentile_rule:# Metrics value need to be long, double or intmetrics-name:service_percentileop:\u0026#34;\u0026gt;\u0026#34;# Multiple value metrics threshold. Thresholds for P50, P75, P90, P95, P99.threshold:1000,1000,1000,1000,1000period:10count:3silence-period:5message:Percentile response time of service {name} alarm in 3 minutes of last 10 minutes, due to more than one condition of p50 \u0026gt; 1000, p75 \u0026gt; 1000, p90 \u0026gt; 1000, p95 \u0026gt; 1000, p99 \u0026gt; 1000only-as-condition:falsemeter_service_status_code_rule:metrics-name:meter_status_codeexclude-labels:- \u0026#34;200\u0026#34;op:\u0026#34;\u0026gt;\u0026#34;threshold:10period:10count:3silence-period:5message:The request number of entity {name} non-200 status is more than expected.only-as-condition:falsecomposite-rules:comp_rule:# Must satisfied percent rule and resp time rule expression:service_percent_rule \u0026amp;\u0026amp; service_resp_time_percentile_rulemessage:Service {name} successful rate is less than 80% and P50 of response time is over 1000mstags:level:CRITICALDefault alarm rules For convenience\u0026rsquo;s sake, we have provided a default alarm-setting.yml in our release. It includes the following rules:\n Service average response time over 1s in the last 3 minutes. Service success rate lower than 80% in the last 2 minutes. Percentile of service response time over 1s in the last 3 minutes Service Instance average response time over 1s in the last 2 minutes, and the instance name matches the regex. Endpoint average response time over 1s in the last 2 minutes. Database access average response time over 1s in the last 2 minutes. Endpoint relation average response time over 1s in the last 2 minutes.  List of all potential metrics name The metrics names are defined in the official OAL scripts and MAL scripts, the Event names can also serve as the metrics names, all possible event names can be also found in the Event doc.\nCurrently, metrics from the Service, Service Instance, Endpoint, Service Relation, Service Instance Relation, Endpoint Relation scopes could be used in Alarm, and the Database access scope is the same as Service.\nSubmit an issue or a pull request if you want to support any other scopes in Alarm.\nWebhook The Webhook requires the peer to be a web container. The alarm message will be sent through HTTP post by application/json content type. The JSON format is based on List\u0026lt;org.apache.skywalking.oap.server.core.alarm.AlarmMessage\u0026gt; with the following key information:\n scopeId, scope. All scopes are defined in org.apache.skywalking.oap.server.core.source.DefaultScopeDefine. name. Target scope entity name. Please follow the entity name definitions. id0. The ID of the scope entity that matches with the name. When using the relation scope, it is the source entity ID. id1. When using the relation scope, it is the destination entity ID. Otherwise, it is empty. ruleName. The rule name configured in alarm-settings.yml. alarmMessage. The alarm text message. startTime. The alarm time measured in milliseconds, which occurs between the current time and the midnight of January 1, 1970 UTC. tags. The tags configured in alarm-settings.yml.  See the following example:\n[{ \u0026#34;scopeId\u0026#34;: 1, \u0026#34;scope\u0026#34;: \u0026#34;SERVICE\u0026#34;, \u0026#34;name\u0026#34;: \u0026#34;serviceA\u0026#34;, \u0026#34;id0\u0026#34;: \u0026#34;12\u0026#34;, \u0026#34;id1\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;ruleName\u0026#34;: \u0026#34;service_resp_time_rule\u0026#34;, \u0026#34;alarmMessage\u0026#34;: \u0026#34;alarmMessage xxxx\u0026#34;, \u0026#34;startTime\u0026#34;: 1560524171000, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;WARNING\u0026#34; }] }, { \u0026#34;scopeId\u0026#34;: 1, \u0026#34;scope\u0026#34;: \u0026#34;SERVICE\u0026#34;, \u0026#34;name\u0026#34;: \u0026#34;serviceB\u0026#34;, \u0026#34;id0\u0026#34;: \u0026#34;23\u0026#34;, \u0026#34;id1\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;ruleName\u0026#34;: \u0026#34;service_resp_time_rule\u0026#34;, \u0026#34;alarmMessage\u0026#34;: \u0026#34;alarmMessage yyy\u0026#34;, \u0026#34;startTime\u0026#34;: 1560524171000, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;CRITICAL\u0026#34; }] }] gRPCHook The alarm message will be sent through remote gRPC method by Protobuf content type. The message contains key information which are defined in oap-server/server-alarm-plugin/src/main/proto/alarm-hook.proto.\nPart of the protocol looks like this:\nmessage AlarmMessage { int64 scopeId = 1; string scope = 2; string name = 3; string id0 = 4; string id1 = 5; string ruleName = 6; string alarmMessage = 7; int64 startTime = 8; AlarmTags tags = 9;}message AlarmTags { // String key, String value pair.  repeated KeyStringValuePair data = 1;}message KeyStringValuePair { string key = 1; string value = 2;}Slack Chat Hook Follow the Getting Started with Incoming Webhooks guide and create new Webhooks.\nThe alarm message will be sent through HTTP post by application/json content type if you have configured Slack Incoming Webhooks as follows:\nslackHooks:textTemplate:|-{ \u0026#34;type\u0026#34;: \u0026#34;section\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;mrkdwn\u0026#34;, \u0026#34;text\u0026#34;: \u0026#34;:alarm_clock: *Apache Skywalking Alarm* \\n **%s**.\u0026#34; } }webhooks:- https://hooks.slack.com/services/x/y/zWeChat Hook Note that only the WeChat Company Edition (WeCom) supports WebHooks. To use the WeChat WebHook, follow the Wechat Webhooks guide. The alarm message will be sent through HTTP post by application/json content type after you have set up Wechat Webhooks as follows:\nwechatHooks:textTemplate:|-{ \u0026#34;msgtype\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;content\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; } }webhooks:- https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=dummy_keyDingTalk Hook Follow the Dingtalk Webhooks guide and create new Webhooks. You can configure an optional secret for an individual webhook URL for security purposes. The alarm message will be sent through HTTP post by application/json content type if you have configured DingTalk Webhooks as follows:\ndingtalkHooks:textTemplate:|-{ \u0026#34;msgtype\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;content\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; } }webhooks:- url:https://oapi.dingtalk.com/robot/send?access_token=dummy_tokensecret:dummysecretFeishu Hook Follow the Feishu Webhooks guide and create new Webhooks. You can configure an optional secret for an individual webhook URL for security purposes. If you want to direct a text to a user, you can configure ats, which is Feishu\u0026rsquo;s user_id and separated by \u0026ldquo;,\u0026rdquo; . The alarm message will be sent through HTTP post by application/json content type if you have configured Feishu Webhooks as follows:\nfeishuHooks:textTemplate:|-{ \u0026#34;msg_type\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;content\u0026#34;: { \u0026#34;text\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; }, \u0026#34;ats\u0026#34;:\u0026#34;feishu_user_id_1,feishu_user_id_2\u0026#34; }webhooks:- url:https://open.feishu.cn/open-apis/bot/v2/hook/dummy_tokensecret:dummysecretWeLink Hook Follow the WeLink Webhooks guide and create new Webhooks. The alarm message will be sent through HTTP post by application/json content type if you have configured WeLink Webhooks as follows:\nwelinkHooks:textTemplate:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;webhooks:# you may find your own client_id and client_secret in your app, below are dummy, need to change.- client_id:\u0026#34;dummy_client_id\u0026#34;client_secret:dummy_secret_keyaccess_token_url:https://open.welink.huaweicloud.com/api/auth/v2/ticketsmessage_url:https://open.welink.huaweicloud.com/api/welinkim/v1/im-service/chat/group-chat# if you send to multi group at a time, separate group_ids with commas, e.g. \u0026#34;123xx\u0026#34;,\u0026#34;456xx\u0026#34;group_ids:\u0026#34;dummy_group_id\u0026#34;# make a name you like for the robot, it will display in grouprobot_name:robotPagerDuty Hook The PagerDuty hook is based on Events API v2.\nFollow the Getting Started section to create an Events API v2 integration on your PagerDuty service and copy the integration key.\nThen configure as follows:\npagerDutyHooks:textTemplate:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;integrationKeys:- 5c6d805c9dcf4e03d09dfa81e8789ba1You can also configure multiple integration keys.\nDiscord Hook Follow the Discord Webhooks guide and create a new webhook.\nThen configure as follows:\ndiscordHooks:textTemplate:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;webhooks:- url:https://discordapp.com/api/webhooks/1008166889777414645/8e0Am4Zb-YGbBqqbiiq0jSHPTEEaHa4j1vIC-zSSm231T8ewGxgY0_XUYpY-k1nN4HBlusername:robotUpdate the settings dynamically Since 6.5.0, the alerting settings can be updated dynamically at runtime by Dynamic Configuration, which will override the settings in alarm-settings.yml.\nIn order to determine whether an alerting rule is triggered or not, SkyWalking needs to cache the metrics of a time window for each alerting rule. If any attribute (metrics-name, op, threshold, period, count, etc.) of a rule is changed, the sliding window will be destroyed and re-created, causing the Alarm of this specific rule to restart again.\nKeys with data types of alerting rule configuration file    Alerting element Configuration property key Type Description     Include names include-names string array    Exclude names exclude-names string array    Include names regex include-names-regex string Java regex Pattern   Exclude names regex exclude-names-regex string Java regex Pattern   Include labels include-labels string array    Exclude labels exclude-labels string array    Include labels regex include-labels-regex string Java regex Pattern   Exclude labels regex exclude-labels-regex string Java regex Pattern   Tags tags key-value pair    Threshold threshold number    OP op operator example: \u0026gt;, \u0026gt;=   Period Period int    Count count int    Only as condition only-as-condition boolean    Silence period silence-period int    Message message string     ","title":"Alerting","url":"/docs/main/v9.4.0/en/setup/backend/backend-alarm/"},{"content":"Alerting Alerting mechanism measures system performance according to the metrics of services/instances/endpoints from different layers. Alerting kernel is an in-memory, time-window based queue.\nThe alerting core is driven by a collection of rules defined in config/alarm-settings.yml. There are three parts to alerting rule definitions.\n alerting rules. They define how metrics alerting should be triggered and what conditions should be considered. Webhooks. The list of web service endpoints, which should be called after an alerting is triggered. gRPCHook. The host and port of the remote gRPC method, which should be called after an alerting is triggered.  Entity name Defines the relation between scope and entity name.\n Service: Service name Instance: {Instance name} of {Service name} Endpoint: {Endpoint name} in {Service name} Database: Database service name Service Relation: {Source service name} to {Dest service name} Instance Relation: {Source instance name} of {Source service name} to {Dest instance name} of {Dest service name} Endpoint Relation: {Source endpoint name} in {Source Service name} to {Dest endpoint name} in {Dest service name}  Rules There are two types of rules: individual rules and composite rules. A composite rule is a combination of individual rules.\nIndividual rules An alerting rule is made up of the following elements:\n Rule name. A unique name shown in the alarm message. It must end with _rule. Metrics name. This is also the metrics name in the OAL script. Only long, double, int types are supported. See the list of all potential metrics name. Events can also be configured as the source of Alarm. Please refer to the event doc for more details. Include names. Entity names that are included in this rule. Please follow the entity name definitions. Exclude names. Entity names that are excluded from this rule. Please follow the entity name definitions. Include names regex. A regex that includes entity names. If both include-name list and include-name regex are set, both rules will take effect. Exclude names regex. A regex that excludes entity names. Both rules will take effect if both include-label list and include-label regex are set. Include labels. Metric labels that are included in this rule. Exclude labels. Metric labels that are excluded from this rule. Include labels regex. A regex that includes labels. If both include-label list and include-label regex are set, both rules will take effect. Exclude labels regex. A regex that excludes labels. Both rules will take effect if both exclude-label list and exclude-label regex are set. Tags. Tags are key/value pairs that are attached to alarms. Tags are used to specify distinguishing attributes of alarms that are meaningful and relevant to users. If you want to make these tags searchable on the SkyWalking UI, you may set the tag keys in core/default/searchableAlarmTags or through the system environment variable SW_SEARCHABLE_ALARM_TAG_KEYS. The key level is supported by default.  Label settings are required by the meter system. They are used to store metrics from the label-system platform, such as Prometheus, Micrometer, etc. The four label settings mentioned above must implement LabeledValueHolder.\n Threshold. The target value. For multiple-value metrics, such as percentile, the threshold is an array. It is described as: value1, value2, value3, value4, value5. Each value may serve as the threshold for each value of the metrics. Set the value to - if you do not wish to trigger the Alarm by one or more of the values.\nFor example, in percentile, value1 is the threshold of P50, and -, -, value3, value4, value5 means that there is no threshold for P50 and P75 in the percentile alarm rule. OP. The operator. It supports \u0026gt;, \u0026gt;=, \u0026lt;, \u0026lt;=, ==, !=. We welcome contributions of all OPs. Period. The size of metrics cache in minutes for checking the alarm conditions. This is a time window that corresponds to the backend deployment env time. Count. Within a period window, if the number of times which value goes over the threshold (based on OP) reaches count, then an alarm will be sent. Only as condition. Indicates if the rule can send notifications or if it simply serves as a condition of the composite rule. Silence period. After the alarm is triggered at Time-N (TN), there will be silence during the TN -\u0026gt; TN + period. By default, it works in the same manner as period. The same Alarm (having the same ID in the same metrics name) may only be triggered once within a period.  Such as for a metric, there is a shifting window as following at T7.\n   T1 T2 T3 T4 T5 T6 T7     Value1 Value2 Value3 Value4 Value5 Value6 Value7     Period(Time point T1 ~ T7) are continuous data points for minutes. Notice, alerts are not supported above minute-by-minute periods as they would not be efficient. Values(Value1 ~ Value7) are the values or labeled values for every time point. Count\u0026rsquo;s value(N) represents there are N values in the window matched the operator and threshold. In every minute, the window would shift automatically. At T8, Value8 would be cached, and T1/Value1 would be removed from the window.  Composite rules NOTE: Composite rules are only applicable to alerting rules targeting the same entity level, such as service-level alarm rules (service_percent_rule \u0026amp;\u0026amp; service_resp_time_percentile_rule). Do not compose alarm rules of different entity levels, such as an alarm rule of the service metrics with another rule of the endpoint metrics.\nA composite rule is made up of the following elements:\n Rule name. A unique name shown in the alarm message. Must end with _rule. Expression. Specifies how to compose rules, and supports \u0026amp;\u0026amp;, ||, and (). Message. The notification message to be sent out when the rule is triggered. Tags. Tags are key/value pairs that are attached to alarms. Tags are used to specify distinguishing attributes of alarms that are meaningful and relevant to users.  rules:# Rule unique name, must be ended with `_rule`.endpoint_percent_rule:# Metrics value need to be long, double or intmetrics-name:endpoint_percentthreshold:75op:\u0026lt;# The length of time to evaluate the metricsperiod:10# How many times after the metrics match the condition, will trigger alarmcount:3# How many times of checks, the alarm keeps silence after alarm triggered, default as same as period.silence-period:10# Specify if the rule can send notification or just as an condition of composite ruleonly-as-condition:falsetags:level:WARNINGservice_percent_rule:metrics-name:service_percent# [Optional] Default, match all services in this metricsinclude-names:- service_a- service_bexclude-names:- service_c# Single value metrics threshold.threshold:85op:\u0026lt;period:10count:4only-as-condition:falseservice_resp_time_percentile_rule:# Metrics value need to be long, double or intmetrics-name:service_percentileop:\u0026#34;\u0026gt;\u0026#34;# Multiple value metrics threshold. Thresholds for P50, P75, P90, P95, P99.threshold:1000,1000,1000,1000,1000period:10count:3silence-period:5message:Percentile response time of service {name} alarm in 3 minutes of last 10 minutes, due to more than one condition of p50 \u0026gt; 1000, p75 \u0026gt; 1000, p90 \u0026gt; 1000, p95 \u0026gt; 1000, p99 \u0026gt; 1000only-as-condition:falsemeter_service_status_code_rule:metrics-name:meter_status_codeexclude-labels:- \u0026#34;200\u0026#34;op:\u0026#34;\u0026gt;\u0026#34;threshold:10period:10count:3silence-period:5message:The request number of entity {name} non-200 status is more than expected.only-as-condition:falsecomposite-rules:comp_rule:# Must satisfied percent rule and resp time rule expression:service_percent_rule \u0026amp;\u0026amp; service_resp_time_percentile_rulemessage:Service {name} successful rate is less than 80% and P50 of response time is over 1000mstags:level:CRITICALDefault alarm rules For convenience\u0026rsquo;s sake, we have provided a default alarm-setting.yml in our release. It includes the following rules:\n Service average response time over 1s in the last 3 minutes. Service success rate lower than 80% in the last 2 minutes. Percentile of service response time over 1s in the last 3 minutes Service Instance average response time over 1s in the last 2 minutes, and the instance name matches the regex. Endpoint average response time over 1s in the last 2 minutes. Database access average response time over 1s in the last 2 minutes. Endpoint relation average response time over 1s in the last 2 minutes.  List of all potential metrics name The metrics names are defined in the official OAL scripts and MAL scripts, the Event names can also serve as the metrics names, all possible event names can be also found in the Event doc.\nCurrently, metrics from the Service, Service Instance, Endpoint, Service Relation, Service Instance Relation, Endpoint Relation scopes could be used in Alarm, and the Database access scope is the same as Service.\nSubmit an issue or a pull request if you want to support any other scopes in Alarm.\nWebhook The Webhook requires the peer to be a web container. The alarm message will be sent through HTTP post by application/json content type. The JSON format is based on List\u0026lt;org.apache.skywalking.oap.server.core.alarm.AlarmMessage\u0026gt; with the following key information:\n scopeId, scope. All scopes are defined in org.apache.skywalking.oap.server.core.source.DefaultScopeDefine. name. Target scope entity name. Please follow the entity name definitions. id0. The ID of the scope entity that matches with the name. When using the relation scope, it is the source entity ID. id1. When using the relation scope, it is the destination entity ID. Otherwise, it is empty. ruleName. The rule name configured in alarm-settings.yml. alarmMessage. The alarm text message. startTime. The alarm time measured in milliseconds, which occurs between the current time and the midnight of January 1, 1970 UTC. tags. The tags configured in alarm-settings.yml.  See the following example:\n[{ \u0026#34;scopeId\u0026#34;: 1, \u0026#34;scope\u0026#34;: \u0026#34;SERVICE\u0026#34;, \u0026#34;name\u0026#34;: \u0026#34;serviceA\u0026#34;, \u0026#34;id0\u0026#34;: \u0026#34;12\u0026#34;, \u0026#34;id1\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;ruleName\u0026#34;: \u0026#34;service_resp_time_rule\u0026#34;, \u0026#34;alarmMessage\u0026#34;: \u0026#34;alarmMessage xxxx\u0026#34;, \u0026#34;startTime\u0026#34;: 1560524171000, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;WARNING\u0026#34; }] }, { \u0026#34;scopeId\u0026#34;: 1, \u0026#34;scope\u0026#34;: \u0026#34;SERVICE\u0026#34;, \u0026#34;name\u0026#34;: \u0026#34;serviceB\u0026#34;, \u0026#34;id0\u0026#34;: \u0026#34;23\u0026#34;, \u0026#34;id1\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;ruleName\u0026#34;: \u0026#34;service_resp_time_rule\u0026#34;, \u0026#34;alarmMessage\u0026#34;: \u0026#34;alarmMessage yyy\u0026#34;, \u0026#34;startTime\u0026#34;: 1560524171000, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;CRITICAL\u0026#34; }] }] gRPCHook The alarm message will be sent through remote gRPC method by Protobuf content type. The message contains key information which are defined in oap-server/server-alarm-plugin/src/main/proto/alarm-hook.proto.\nPart of the protocol looks like this:\nmessage AlarmMessage { int64 scopeId = 1; string scope = 2; string name = 3; string id0 = 4; string id1 = 5; string ruleName = 6; string alarmMessage = 7; int64 startTime = 8; AlarmTags tags = 9;}message AlarmTags { // String key, String value pair.  repeated KeyStringValuePair data = 1;}message KeyStringValuePair { string key = 1; string value = 2;}Slack Chat Hook Follow the Getting Started with Incoming Webhooks guide and create new Webhooks.\nThe alarm message will be sent through HTTP post by application/json content type if you have configured Slack Incoming Webhooks as follows:\nslackHooks:textTemplate:|-{ \u0026#34;type\u0026#34;: \u0026#34;section\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;mrkdwn\u0026#34;, \u0026#34;text\u0026#34;: \u0026#34;:alarm_clock: *Apache Skywalking Alarm* \\n **%s**.\u0026#34; } }webhooks:- https://hooks.slack.com/services/x/y/zWeChat Hook Note that only the WeChat Company Edition (WeCom) supports WebHooks. To use the WeChat WebHook, follow the Wechat Webhooks guide. The alarm message will be sent through HTTP post by application/json content type after you have set up Wechat Webhooks as follows:\nwechatHooks:textTemplate:|-{ \u0026#34;msgtype\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;content\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; } }webhooks:- https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=dummy_keyDingTalk Hook Follow the Dingtalk Webhooks guide and create new Webhooks. You can configure an optional secret for an individual webhook URL for security purposes. The alarm message will be sent through HTTP post by application/json content type if you have configured DingTalk Webhooks as follows:\ndingtalkHooks:textTemplate:|-{ \u0026#34;msgtype\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;content\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; } }webhooks:- url:https://oapi.dingtalk.com/robot/send?access_token=dummy_tokensecret:dummysecretFeishu Hook Follow the Feishu Webhooks guide and create new Webhooks. You can configure an optional secret for an individual webhook URL for security purposes. If you want to direct a text to a user, you can configure ats, which is Feishu\u0026rsquo;s user_id and separated by \u0026ldquo;,\u0026rdquo; . The alarm message will be sent through HTTP post by application/json content type if you have configured Feishu Webhooks as follows:\nfeishuHooks:textTemplate:|-{ \u0026#34;msg_type\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;content\u0026#34;: { \u0026#34;text\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; }, \u0026#34;ats\u0026#34;:\u0026#34;feishu_user_id_1,feishu_user_id_2\u0026#34; }webhooks:- url:https://open.feishu.cn/open-apis/bot/v2/hook/dummy_tokensecret:dummysecretWeLink Hook Follow the WeLink Webhooks guide and create new Webhooks. The alarm message will be sent through HTTP post by application/json content type if you have configured WeLink Webhooks as follows:\nwelinkHooks:textTemplate:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;webhooks:# you may find your own client_id and client_secret in your app, below are dummy, need to change.- client_id:\u0026#34;dummy_client_id\u0026#34;client_secret:dummy_secret_keyaccess_token_url:https://open.welink.huaweicloud.com/api/auth/v2/ticketsmessage_url:https://open.welink.huaweicloud.com/api/welinkim/v1/im-service/chat/group-chat# if you send to multi group at a time, separate group_ids with commas, e.g. \u0026#34;123xx\u0026#34;,\u0026#34;456xx\u0026#34;group_ids:\u0026#34;dummy_group_id\u0026#34;# make a name you like for the robot, it will display in grouprobot_name:robotPagerDuty Hook The PagerDuty hook is based on Events API v2.\nFollow the Getting Started section to create an Events API v2 integration on your PagerDuty service and copy the integration key.\nThen configure as follows:\npagerDutyHooks:textTemplate:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;integrationKeys:- 5c6d805c9dcf4e03d09dfa81e8789ba1You can also configure multiple integration keys.\nDiscord Hook Follow the Discord Webhooks guide and create a new webhook.\nThen configure as follows:\ndiscordHooks:textTemplate:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;webhooks:- url:https://discordapp.com/api/webhooks/1008166889777414645/8e0Am4Zb-YGbBqqbiiq0jSHPTEEaHa4j1vIC-zSSm231T8ewGxgY0_XUYpY-k1nN4HBlusername:robotUpdate the settings dynamically Since 6.5.0, the alerting settings can be updated dynamically at runtime by Dynamic Configuration, which will override the settings in alarm-settings.yml.\nIn order to determine whether an alerting rule is triggered or not, SkyWalking needs to cache the metrics of a time window for each alerting rule. If any attribute (metrics-name, op, threshold, period, count, etc.) of a rule is changed, the sliding window will be destroyed and re-created, causing the Alarm of this specific rule to restart again.\nKeys with data types of alerting rule configuration file    Alerting element Configuration property key Type Description     Include names include-names string array    Exclude names exclude-names string array    Include names regex include-names-regex string Java regex Pattern   Exclude names regex exclude-names-regex string Java regex Pattern   Include labels include-labels string array    Exclude labels exclude-labels string array    Include labels regex include-labels-regex string Java regex Pattern   Exclude labels regex exclude-labels-regex string Java regex Pattern   Tags tags key-value pair    Threshold threshold number    OP op operator example: \u0026gt;, \u0026gt;=   Period Period int    Count count int    Only as condition only-as-condition boolean    Silence period silence-period int    Message message string     ","title":"Alerting","url":"/docs/main/v9.5.0/en/setup/backend/backend-alarm/"},{"content":"Alerting Alerting mechanism measures system performance according to the metrics of services/instances/endpoints from different layers. Alerting kernel is an in-memory, time-window based queue.\nThe alerting core is driven by a collection of rules defined in config/alarm-settings.yml. There are three parts to alerting rule definitions.\n alerting rules. They define how metrics alerting should be triggered and what conditions should be considered. hooks. The list of hooks, which should be called after an alerting is triggered.  Entity name Defines the relation between scope and entity name.\n Service: Service name Instance: {Instance name} of {Service name} Endpoint: {Endpoint name} in {Service name} Service Relation: {Source service name} to {Dest service name} Instance Relation: {Source instance name} of {Source service name} to {Dest instance name} of {Dest service name} Endpoint Relation: {Source endpoint name} in {Source Service name} to {Dest endpoint name} in {Dest service name}  Rules An alerting rule is made up of the following elements:\n Rule name. A unique name shown in the alarm message. It must end with _rule. Expression. A MQE expression that defines the conditions of the rule. The result type must be SINGLE_VALUE and the root operation of the expression must be a Compare Operation which provides 1(true) or 0(false) result. When the result is 1(true), the alarm will be triggered. For example, avg(service_resp_time / 1000) \u0026gt; 1 is a valid expression to indicate the request latency is slower than 1s. The typical illegal expressions are  avg(service_resp_time \u0026gt; 1000) + 1 expression root doesn\u0026rsquo;t use Compare Operation service_resp_time \u0026gt; 1000 expression return a TIME_SERIES_VALUES type of values rather than a SINGLE_VALUE value.    The metrics names in the expression could be found in the list of all potential metrics name doc.\n Include names. Entity names that are included in this rule. Please follow the entity name definitions. Exclude names. Entity names that are excluded from this rule. Please follow the entity name definitions. Include names regex. A regex that includes entity names. If both include-name list and include-name regex are set, both rules will take effect. Exclude names regex. A regex that excludes entity names. Both rules will take effect if both include-label list and include-label regex are set. Tags. Tags are key/value pairs that are attached to alarms. Tags are used to specify distinguishing attributes of alarms that are meaningful and relevant to users. If you want to make these tags searchable on the SkyWalking UI, you may set the tag keys in core/default/searchableAlarmTags or through the system environment variable SW_SEARCHABLE_ALARM_TAG_KEYS. The key level is supported by default. Period. The size of metrics cache in minutes for checking the alarm conditions. This is a time window that corresponds to the backend deployment env time. Hooks. Binding the specific names of the hooks when the alarm is triggered. The name format is {hookType}.{hookName} (slack.custom1 e.g.) and must be defined in the hooks section of the alarm-settings.yml file. If the hook name is not specified, the global hook will be used. Silence period. After the alarm is triggered at Time-N (TN), there will be silence during the TN -\u0026gt; TN + period. By default, it works in the same manner as period. The same Alarm (having the same ID in the same metrics name) may only be triggered once within a period.  Such as for a metric, there is a shifting window as following at T7.\n   T1 T2 T3 T4 T5 T6 T7     Value1 Value2 Value3 Value4 Value5 Value6 Value7     Period(Time point T1 ~ T7) are continuous data points for minutes. Notice, alerts are not supported above minute-by-minute periods as they would not be efficient. Values(Value1 ~ Value7) are the values or labeled values for every time point. Expression is calculated based on the metric values(Value1 ~ Value7). For example, expression avg(service_resp_time) \u0026gt; 1000, if the value are 1001, 1001, 1001, 1001, 1001, 1001, 1001, the calculation is ((1001 + 10001 + ... + 1001) / 7) \u0026gt; 1000 and the result would be 1(true). Then the alarm would be triggered. In every minute, the window would shift automatically. At T8, Value8 would be cached, and T1/Value1 would be removed from the window.  NOTE:\n If the expression include labeled metrics and result has multiple labeled value(e.g. sum(service_percentile{_='0,1'} \u0026gt; 1000) \u0026gt;= 3), the alarm will be triggered if any of the labeled value result matches 3 times of the condition(P50 \u0026gt; 1000 or P75 \u0026gt; 1000). One alarm rule is targeting the same entity level, such as service-level expression (avg(service_resp_time) \u0026gt; 1000). Set entity names(Include/Exclude names\u0026hellip;) according to metrics entity levels, do not include different entity levels metrics in the same expression, such as service metrics and endpoint metrics.  rules:# Rule unique name, must be ended with `_rule`.endpoint_percent_rule:# A MQE expression and the root operation of the expression must be a Compare Operation.expression:sum((endpoint_sla / 100) \u0026lt; 75) \u0026gt;= 3# The length of time to evaluate the metricsperiod:10# How many times of checks, the alarm keeps silence after alarm triggered, default as same as period.silence-period:10message:Successful rate of endpoint {name} is lower than 75%tags:level:WARNINGservice_percent_rule:expression:sum((service_sla / 100) \u0026lt; 85) \u0026gt;= 4# [Optional] Default, match all services in this metricsinclude-names:- service_a- service_bexclude-names:- service_cperiod:10message:Service {name} successful rate is less than 85%service_resp_time_percentile_rule:expression:sum(service_percentile{_=\u0026#39;0,1,2,3,4\u0026#39;} \u0026gt; 1000) \u0026gt;= 3period:10silence-period:5message:Percentile response time of service {name} alarm in 3 minutes of last 10 minutes, due to more than one condition of p50 \u0026gt; 1000, p75 \u0026gt; 1000, p90 \u0026gt; 1000, p95 \u0026gt; 1000, p99 \u0026gt; 1000meter_service_status_code_rule:expression:sum(aggregate_labels(meter_status_code{_=\u0026#39;4xx,5xx\u0026#39;},sum) \u0026gt; 10) \u0026gt; 3period:10count:3silence-period:5message:The request number of entity {name} 4xx and 5xx status is more than expected.hooks:- \u0026#34;slack.custom1\u0026#34;- \u0026#34;pagerduty.custom1\u0026#34;comp_rule:expression:(avg(service_sla / 100) \u0026gt; 80) * (avg(service_percentile{_=\u0026#39;0\u0026#39;}) \u0026gt; 1000) == 1period:10message:Service {name} avg successful rate is less than 80% and P50 of avg response time is over 1000ms in last 10 minutes.tags:level:CRITICALhooks:- \u0026#34;slack.default\u0026#34;- \u0026#34;slack.custom1\u0026#34;- \u0026#34;pagerduty.custom1\u0026#34;Default alarm rules For convenience\u0026rsquo;s sake, we have provided a default alarm-setting.yml in our release. It includes the following rules:\n Service average response time over 1s in the last 3 minutes. Service success rate lower than 80% in the last 2 minutes. Percentile of service response time over 1s in the last 3 minutes Service Instance average response time over 1s in the last 2 minutes, and the instance name matches the regex. Endpoint average response time over 1s in the last 2 minutes. Database access average response time over 1s in the last 2 minutes. Endpoint relation average response time over 1s in the last 2 minutes.  List of all potential metrics name The metrics names are defined in the official OAL scripts and MAL scripts.\nCurrently, metrics from the Service, Service Instance, Endpoint, Service Relation, Service Instance Relation, Endpoint Relation scopes could be used in Alarm, and the Database access scope is the same as Service.\nSubmit an issue or a pull request if you want to support any other scopes in Alarm.\nHooks Hooks are a way to send alarm messages to the outside world. SkyWalking supports multiple hooks of the same type, each hook can support different configurations. For example, you can configure two Slack hooks, one named default and set is-default: true means this hook will apply on all Alarm Rules without config hooks. Another named custom1 will only apply on the Alarm Rules which with config hooks and include the name slack.custom1.\nhooks:slack:# default here is just a name, set the field \u0026#39;is-default: true\u0026#39; if this notification hook is expected to be default globally.default:# If true, this hook will apply on all rules, unless a rule has its own specific hook. Could have more than one default hooks in the same hook type.is-default:truetext-template:|-{ \u0026#34;type\u0026#34;: \u0026#34;section\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;mrkdwn\u0026#34;, \u0026#34;text\u0026#34;: \u0026#34;:alarm_clock: *Apache Skywalking Alarm* \\n **%s**.\u0026#34; } }webhooks:- https://hooks.slack.com/services/x/y/zsssscustom1:text-template:|-{ \u0026#34;type\u0026#34;: \u0026#34;section\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;mrkdwn\u0026#34;, \u0026#34;text\u0026#34;: \u0026#34;:alarm_clock: *Apache Skywalking Alarm* \\n **%s**.\u0026#34; } }webhooks:- https://hooks.slack.com/services/x/y/custom1Currently, SkyWalking supports the following hook types:\nWebhook The Webhook requires the peer to be a web container. The alarm message will be sent through HTTP post by application/json content type. The JSON format is based on List\u0026lt;org.apache.skywalking.oap.server.core.alarm.AlarmMessage\u0026gt; with the following key information:\n scopeId, scope. All scopes are defined in org.apache.skywalking.oap.server.core.source.DefaultScopeDefine. name. Target scope entity name. Please follow the entity name definitions. id0. The ID of the scope entity that matches with the name. When using the relation scope, it is the source entity ID. id1. When using the relation scope, it is the destination entity ID. Otherwise, it is empty. ruleName. The rule name configured in alarm-settings.yml. alarmMessage. The alarm text message. startTime. The alarm time measured in milliseconds, which occurs between the current time and the midnight of January 1, 1970 UTC. tags. The tags configured in alarm-settings.yml.  See the following example:\n[{ \u0026#34;scopeId\u0026#34;: 1, \u0026#34;scope\u0026#34;: \u0026#34;SERVICE\u0026#34;, \u0026#34;name\u0026#34;: \u0026#34;serviceA\u0026#34;, \u0026#34;id0\u0026#34;: \u0026#34;12\u0026#34;, \u0026#34;id1\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;ruleName\u0026#34;: \u0026#34;service_resp_time_rule\u0026#34;, \u0026#34;alarmMessage\u0026#34;: \u0026#34;alarmMessage xxxx\u0026#34;, \u0026#34;startTime\u0026#34;: 1560524171000, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;WARNING\u0026#34; }] }, { \u0026#34;scopeId\u0026#34;: 1, \u0026#34;scope\u0026#34;: \u0026#34;SERVICE\u0026#34;, \u0026#34;name\u0026#34;: \u0026#34;serviceB\u0026#34;, \u0026#34;id0\u0026#34;: \u0026#34;23\u0026#34;, \u0026#34;id1\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;ruleName\u0026#34;: \u0026#34;service_resp_time_rule\u0026#34;, \u0026#34;alarmMessage\u0026#34;: \u0026#34;alarmMessage yyy\u0026#34;, \u0026#34;startTime\u0026#34;: 1560524171000, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;CRITICAL\u0026#34; }] }] gRPC The alarm message will be sent through remote gRPC method by Protobuf content type. The message contains key information which are defined in oap-server/server-alarm-plugin/src/main/proto/alarm-hook.proto.\nPart of the protocol looks like this:\nmessage AlarmMessage { int64 scopeId = 1; string scope = 2; string name = 3; string id0 = 4; string id1 = 5; string ruleName = 6; string alarmMessage = 7; int64 startTime = 8; AlarmTags tags = 9;}message AlarmTags { // String key, String value pair.  repeated KeyStringValuePair data = 1;}message KeyStringValuePair { string key = 1; string value = 2;}Slack Chat Follow the Getting Started with Incoming Webhooks guide and create new Webhooks.\nThe alarm message will be sent through HTTP post by application/json content type if you have configured Slack Incoming Webhooks as follows:\nslack:default:is-default:truetext-template:|-{ \u0026#34;type\u0026#34;: \u0026#34;section\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;mrkdwn\u0026#34;, \u0026#34;text\u0026#34;: \u0026#34;:alarm_clock: *Apache Skywalking Alarm* \\n **%s**.\u0026#34; } }webhooks:- https://hooks.slack.com/services/x/y/zWeChat Note that only the WeChat Company Edition (WeCom) supports WebHooks. To use the WeChat WebHook, follow the Wechat Webhooks guide. The alarm message will be sent through HTTP post by application/json content type after you have set up Wechat Webhooks as follows:\nwechat:default:is-default:truetext-template:|-{ \u0026#34;msgtype\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;content\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; } }webhooks:- https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=dummy_keyDingTalk Follow the Dingtalk Webhooks guide and create new Webhooks. You can configure an optional secret for an individual webhook URL for security purposes. The alarm message will be sent through HTTP post by application/json content type if you have configured DingTalk Webhooks as follows:\ndingtalk:default:is-default:truetext-template:|-{ \u0026#34;msgtype\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;content\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; } }webhooks:- url:https://oapi.dingtalk.com/robot/send?access_token=dummy_tokensecret:dummysecretFeishu Follow the Feishu Webhooks guide and create new Webhooks. You can configure an optional secret for an individual webhook URL for security purposes. If you want to direct a text to a user, you can configure ats, which is Feishu\u0026rsquo;s user_id and separated by \u0026ldquo;,\u0026rdquo; . The alarm message will be sent through HTTP post by application/json content type if you have configured Feishu Webhooks as follows:\nfeishu:default:is-default:truetext-template:|-{ \u0026#34;msg_type\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;content\u0026#34;: { \u0026#34;text\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; }, \u0026#34;ats\u0026#34;:\u0026#34;feishu_user_id_1,feishu_user_id_2\u0026#34; }webhooks:- url:https://open.feishu.cn/open-apis/bot/v2/hook/dummy_tokensecret:dummysecretWeLink Follow the WeLink Webhooks guide and create new Webhooks. The alarm message will be sent through HTTP post by application/json content type if you have configured WeLink Webhooks as follows:\nwelink:default:is-default:truetext-template:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;webhooks:# you may find your own client_id and client_secret in your app, below are dummy, need to change.- client-id:\u0026#34;dummy_client_id\u0026#34;client-secret:dummy_secret_keyaccess-token-url:https://open.welink.huaweicloud.com/api/auth/v2/ticketsmessage-url:https://open.welink.huaweicloud.com/api/welinkim/v1/im-service/chat/group-chat# if you send to multi group at a time, separate group_ids with commas, e.g. \u0026#34;123xx\u0026#34;,\u0026#34;456xx\u0026#34;group-ids:\u0026#34;dummy_group_id\u0026#34;# make a name you like for the robot, it will display in grouprobot-name:robotPagerDuty The PagerDuty hook is based on Events API v2.\nFollow the Getting Started section to create an Events API v2 integration on your PagerDuty service and copy the integration key.\nThen configure as follows:\npagerduty:default:is-default:truetext-template:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;integration-keys:- 5c6d805c9dcf4e03d09dfa81e8789ba1You can also configure multiple integration keys.\nDiscord Follow the Discord Webhooks guide and create a new webhook.\nThen configure as follows:\ndiscord:default:is-default:truetext-template:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;webhooks:- url:https://discordapp.com/api/webhooks/1008166889777414645/8e0Am4Zb-YGbBqqbiiq0jSHPTEEaHa4j1vIC-zSSm231T8ewGxgY0_XUYpY-k1nN4HBlusername:robotUpdate the settings dynamically Since 6.5.0, the alerting settings can be updated dynamically at runtime by Dynamic Configuration, which will override the settings in alarm-settings.yml.\nIn order to determine whether an alerting rule is triggered or not, SkyWalking needs to cache the metrics of a time window for each alerting rule. If any attribute (expression, period, etc.) of a rule is changed, the sliding window will be destroyed and re-created, causing the Alarm of this specific rule to restart again.\nKeys with data types of alerting rule configuration file    Alerting element Configuration property key Type Description     Expression expression string MQE expression   Include names include-names string array    Exclude names exclude-names string array    Include names regex include-names-regex string Java regex Pattern   Exclude names regex exclude-names-regex string Java regex Pattern   Tags tags key-value pair    Period Period int    Silence period silence-period int    Message message string    Hooks hooks string array     ","title":"Alerting","url":"/docs/main/v9.6.0/en/setup/backend/backend-alarm/"},{"content":"Alerting Alerting mechanism measures system performance according to the metrics of services/instances/endpoints from different layers. Alerting kernel is an in-memory, time-window based queue.\nThe alerting core is driven by a collection of rules defined in config/alarm-settings.yml. There are three parts to alerting rule definitions.\n alerting rules. They define how metrics alerting should be triggered and what conditions should be considered. hooks. The list of hooks, which should be called after an alerting is triggered.  Entity name Defines the relation between scope and entity name.\n Service: Service name Instance: {Instance name} of {Service name} Endpoint: {Endpoint name} in {Service name} Service Relation: {Source service name} to {Dest service name} Instance Relation: {Source instance name} of {Source service name} to {Dest instance name} of {Dest service name} Endpoint Relation: {Source endpoint name} in {Source Service name} to {Dest endpoint name} in {Dest service name}  Rules An alerting rule is made up of the following elements:\n Rule name. A unique name shown in the alarm message. It must end with _rule. Expression. A MQE expression that defines the conditions of the rule. The result type must be SINGLE_VALUE and the root operation of the expression must be a Compare Operation which provides 1(true) or 0(false) result. When the result is 1(true), the alarm will be triggered. For example, avg(service_resp_time / 1000) \u0026gt; 1 is a valid expression to indicate the request latency is slower than 1s. The typical illegal expressions are  avg(service_resp_time \u0026gt; 1000) + 1 expression root doesn\u0026rsquo;t use Compare Operation service_resp_time \u0026gt; 1000 expression return a TIME_SERIES_VALUES type of values rather than a SINGLE_VALUE value.    The metrics names in the expression could be found in the list of all potential metrics name doc.\n Include names. Entity names that are included in this rule. Please follow the entity name definitions. Exclude names. Entity names that are excluded from this rule. Please follow the entity name definitions. Include names regex. A regex that includes entity names. If both include-name list and include-name regex are set, both rules will take effect. Exclude names regex. A regex that excludes entity names. Both rules will take effect if both include-label list and include-label regex are set. Tags. Tags are key/value pairs that are attached to alarms. Tags are used to specify distinguishing attributes of alarms that are meaningful and relevant to users. If you want to make these tags searchable on the SkyWalking UI, you may set the tag keys in core/default/searchableAlarmTags or through the system environment variable SW_SEARCHABLE_ALARM_TAG_KEYS. The key level is supported by default. Period. The size of metrics cache in minutes for checking the alarm conditions. This is a time window that corresponds to the backend deployment env time. Hooks. Binding the specific names of the hooks when the alarm is triggered. The name format is {hookType}.{hookName} (slack.custom1 e.g.) and must be defined in the hooks section of the alarm-settings.yml file. If the hook name is not specified, the global hook will be used. Silence period. After the alarm is triggered at Time-N (TN), there will be silence during the TN -\u0026gt; TN + period. By default, it works in the same manner as period. The same Alarm (having the same ID in the same metrics name) may only be triggered once within a period.  Such as for a metric, there is a shifting window as following at T7.\n   T1 T2 T3 T4 T5 T6 T7     Value1 Value2 Value3 Value4 Value5 Value6 Value7     Period(Time point T1 ~ T7) are continuous data points for minutes. Notice, alerts are not supported above minute-by-minute periods as they would not be efficient. Values(Value1 ~ Value7) are the values or labeled values for every time point. Expression is calculated based on the metric values(Value1 ~ Value7). For example, expression avg(service_resp_time) \u0026gt; 1000, if the value are 1001, 1001, 1001, 1001, 1001, 1001, 1001, the calculation is ((1001 + 10001 + ... + 1001) / 7) \u0026gt; 1000 and the result would be 1(true). Then the alarm would be triggered. In every minute, the window would shift automatically. At T8, Value8 would be cached, and T1/Value1 would be removed from the window.  NOTE:\n If the expression include labeled metrics and result has multiple labeled value(e.g. sum(service_percentile{_='0,1'} \u0026gt; 1000) \u0026gt;= 3), the alarm will be triggered if any of the labeled value result matches 3 times of the condition(P50 \u0026gt; 1000 or P75 \u0026gt; 1000). One alarm rule is targeting the same entity level, such as service-level expression (avg(service_resp_time) \u0026gt; 1000). Set entity names(Include/Exclude names\u0026hellip;) according to metrics entity levels, do not include different entity levels metrics in the same expression, such as service metrics and endpoint metrics.  rules:# Rule unique name, must be ended with `_rule`.endpoint_percent_rule:# A MQE expression and the root operation of the expression must be a Compare Operation.expression:sum((endpoint_sla / 100) \u0026lt; 75) \u0026gt;= 3# The length of time to evaluate the metricsperiod:10# How many times of checks, the alarm keeps silence after alarm triggered, default as same as period.silence-period:10message:Successful rate of endpoint {name} is lower than 75%tags:level:WARNINGservice_percent_rule:expression:sum((service_sla / 100) \u0026lt; 85) \u0026gt;= 4# [Optional] Default, match all services in this metricsinclude-names:- service_a- service_bexclude-names:- service_cperiod:10message:Service {name} successful rate is less than 85%service_resp_time_percentile_rule:expression:sum(service_percentile{_=\u0026#39;0,1,2,3,4\u0026#39;} \u0026gt; 1000) \u0026gt;= 3period:10silence-period:5message:Percentile response time of service {name} alarm in 3 minutes of last 10 minutes, due to more than one condition of p50 \u0026gt; 1000, p75 \u0026gt; 1000, p90 \u0026gt; 1000, p95 \u0026gt; 1000, p99 \u0026gt; 1000meter_service_status_code_rule:expression:sum(aggregate_labels(meter_status_code{_=\u0026#39;4xx,5xx\u0026#39;},sum) \u0026gt; 10) \u0026gt; 3period:10count:3silence-period:5message:The request number of entity {name} 4xx and 5xx status is more than expected.hooks:- \u0026#34;slack.custom1\u0026#34;- \u0026#34;pagerduty.custom1\u0026#34;comp_rule:expression:(avg(service_sla / 100) \u0026gt; 80) * (avg(service_percentile{_=\u0026#39;0\u0026#39;}) \u0026gt; 1000) == 1period:10message:Service {name} avg successful rate is less than 80% and P50 of avg response time is over 1000ms in last 10 minutes.tags:level:CRITICALhooks:- \u0026#34;slack.default\u0026#34;- \u0026#34;slack.custom1\u0026#34;- \u0026#34;pagerduty.custom1\u0026#34;Default alarm rules For convenience\u0026rsquo;s sake, we have provided a default alarm-setting.yml in our release. It includes the following rules:\n Service average response time over 1s in the last 3 minutes. Service success rate lower than 80% in the last 2 minutes. Percentile of service response time over 1s in the last 3 minutes Service Instance average response time over 1s in the last 2 minutes, and the instance name matches the regex. Endpoint average response time over 1s in the last 2 minutes. Database access average response time over 1s in the last 2 minutes. Endpoint relation average response time over 1s in the last 2 minutes.  List of all potential metrics name The metrics names are defined in the official OAL scripts and MAL scripts.\nCurrently, metrics from the Service, Service Instance, Endpoint, Service Relation, Service Instance Relation, Endpoint Relation scopes could be used in Alarm, and the Database access scope is the same as Service.\nSubmit an issue or a pull request if you want to support any other scopes in Alarm.\nHooks Hooks are a way to send alarm messages to the outside world. SkyWalking supports multiple hooks of the same type, each hook can support different configurations. For example, you can configure two Slack hooks, one named default and set is-default: true means this hook will apply on all Alarm Rules without config hooks. Another named custom1 will only apply on the Alarm Rules which with config hooks and include the name slack.custom1.\nhooks:slack:# default here is just a name, set the field \u0026#39;is-default: true\u0026#39; if this notification hook is expected to be default globally.default:# If true, this hook will apply on all rules, unless a rule has its own specific hook. Could have more than one default hooks in the same hook type.is-default:truetext-template:|-{ \u0026#34;type\u0026#34;: \u0026#34;section\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;mrkdwn\u0026#34;, \u0026#34;text\u0026#34;: \u0026#34;:alarm_clock: *Apache Skywalking Alarm* \\n **%s**.\u0026#34; } }webhooks:- https://hooks.slack.com/services/x/y/zsssscustom1:text-template:|-{ \u0026#34;type\u0026#34;: \u0026#34;section\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;mrkdwn\u0026#34;, \u0026#34;text\u0026#34;: \u0026#34;:alarm_clock: *Apache Skywalking Alarm* \\n **%s**.\u0026#34; } }webhooks:- https://hooks.slack.com/services/x/y/custom1Currently, SkyWalking supports the following hook types:\nWebhook The Webhook requires the peer to be a web container. The alarm message will be sent through HTTP post by application/json content type after you have set up Webhook hooks as follows:\nwebhook:default:is-default:trueurls:- http://ip:port/xxx- http://ip:port/yyyThe JSON format is based on List\u0026lt;org.apache.skywalking.oap.server.core.alarm.AlarmMessage\u0026gt; with the following key information:\n scopeId, scope. All scopes are defined in org.apache.skywalking.oap.server.core.source.DefaultScopeDefine. name. Target scope entity name. Please follow the entity name definitions. id0. The ID of the scope entity that matches with the name. When using the relation scope, it is the source entity ID. id1. When using the relation scope, it is the destination entity ID. Otherwise, it is empty. ruleName. The rule name configured in alarm-settings.yml. alarmMessage. The alarm text message. startTime. The alarm time measured in milliseconds, which occurs between the current time and the midnight of January 1, 1970 UTC. tags. The tags configured in alarm-settings.yml.  See the following example:\n[{ \u0026#34;scopeId\u0026#34;: 1, \u0026#34;scope\u0026#34;: \u0026#34;SERVICE\u0026#34;, \u0026#34;name\u0026#34;: \u0026#34;serviceA\u0026#34;, \u0026#34;id0\u0026#34;: \u0026#34;12\u0026#34;, \u0026#34;id1\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;ruleName\u0026#34;: \u0026#34;service_resp_time_rule\u0026#34;, \u0026#34;alarmMessage\u0026#34;: \u0026#34;alarmMessage xxxx\u0026#34;, \u0026#34;startTime\u0026#34;: 1560524171000, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;WARNING\u0026#34; }] }, { \u0026#34;scopeId\u0026#34;: 1, \u0026#34;scope\u0026#34;: \u0026#34;SERVICE\u0026#34;, \u0026#34;name\u0026#34;: \u0026#34;serviceB\u0026#34;, \u0026#34;id0\u0026#34;: \u0026#34;23\u0026#34;, \u0026#34;id1\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;ruleName\u0026#34;: \u0026#34;service_resp_time_rule\u0026#34;, \u0026#34;alarmMessage\u0026#34;: \u0026#34;alarmMessage yyy\u0026#34;, \u0026#34;startTime\u0026#34;: 1560524171000, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;CRITICAL\u0026#34; }] }] gRPC The alarm message will be sent through remote gRPC method by Protobuf content type after you have set up gRPC hooks as follows:\ngRPC:default:is-default:truetarget-host:iptarget-port:portThe message contains key information which are defined in oap-server/server-alarm-plugin/src/main/proto/alarm-hook.proto.\nPart of the protocol looks like this:\nmessage AlarmMessage { int64 scopeId = 1; string scope = 2; string name = 3; string id0 = 4; string id1 = 5; string ruleName = 6; string alarmMessage = 7; int64 startTime = 8; AlarmTags tags = 9;}message AlarmTags { // String key, String value pair.  repeated KeyStringValuePair data = 1;}message KeyStringValuePair { string key = 1; string value = 2;}Slack Chat Follow the Getting Started with Incoming Webhooks guide and create new Webhooks.\nThe alarm message will be sent through HTTP post by application/json content type if you have configured Slack Incoming Webhooks as follows:\nslack:default:is-default:truetext-template:|-{ \u0026#34;type\u0026#34;: \u0026#34;section\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;mrkdwn\u0026#34;, \u0026#34;text\u0026#34;: \u0026#34;:alarm_clock: *Apache Skywalking Alarm* \\n **%s**.\u0026#34; } }webhooks:- https://hooks.slack.com/services/x/y/zWeChat Note that only the WeChat Company Edition (WeCom) supports WebHooks. To use the WeChat WebHook, follow the Wechat Webhooks guide. The alarm message will be sent through HTTP post by application/json content type after you have set up Wechat Webhooks as follows:\nwechat:default:is-default:truetext-template:|-{ \u0026#34;msgtype\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;content\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; } }webhooks:- https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=dummy_keyDingTalk Follow the Dingtalk Webhooks guide and create new Webhooks. You can configure an optional secret for an individual webhook URL for security purposes. The alarm message will be sent through HTTP post by application/json content type if you have configured DingTalk Webhooks as follows:\ndingtalk:default:is-default:truetext-template:|-{ \u0026#34;msgtype\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;content\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; } }webhooks:- url:https://oapi.dingtalk.com/robot/send?access_token=dummy_tokensecret:dummysecretFeishu Follow the Feishu Webhooks guide and create new Webhooks. You can configure an optional secret for an individual webhook URL for security purposes. If you want to direct a text to a user, you can configure ats, which is Feishu\u0026rsquo;s user_id and separated by \u0026ldquo;,\u0026rdquo; . The alarm message will be sent through HTTP post by application/json content type if you have configured Feishu Webhooks as follows:\nfeishu:default:is-default:truetext-template:|-{ \u0026#34;msg_type\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;content\u0026#34;: { \u0026#34;text\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; }, \u0026#34;ats\u0026#34;:\u0026#34;feishu_user_id_1,feishu_user_id_2\u0026#34; }webhooks:- url:https://open.feishu.cn/open-apis/bot/v2/hook/dummy_tokensecret:dummysecretWeLink Follow the WeLink Webhooks guide and create new Webhooks. The alarm message will be sent through HTTP post by application/json content type if you have configured WeLink Webhooks as follows:\nwelink:default:is-default:truetext-template:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;webhooks:# you may find your own client_id and client_secret in your app, below are dummy, need to change.- client-id:\u0026#34;dummy_client_id\u0026#34;client-secret:dummy_secret_keyaccess-token-url:https://open.welink.huaweicloud.com/api/auth/v2/ticketsmessage-url:https://open.welink.huaweicloud.com/api/welinkim/v1/im-service/chat/group-chat# if you send to multi group at a time, separate group_ids with commas, e.g. \u0026#34;123xx\u0026#34;,\u0026#34;456xx\u0026#34;group-ids:\u0026#34;dummy_group_id\u0026#34;# make a name you like for the robot, it will display in grouprobot-name:robotPagerDuty The PagerDuty hook is based on Events API v2.\nFollow the Getting Started section to create an Events API v2 integration on your PagerDuty service and copy the integration key.\nThen configure as follows:\npagerduty:default:is-default:truetext-template:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;integration-keys:- 5c6d805c9dcf4e03d09dfa81e8789ba1You can also configure multiple integration keys.\nDiscord Follow the Discord Webhooks guide and create a new webhook.\nThen configure as follows:\ndiscord:default:is-default:truetext-template:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;webhooks:- url:https://discordapp.com/api/webhooks/1008166889777414645/8e0Am4Zb-YGbBqqbiiq0jSHPTEEaHa4j1vIC-zSSm231T8ewGxgY0_XUYpY-k1nN4HBlusername:robotUpdate the settings dynamically Since 6.5.0, the alerting settings can be updated dynamically at runtime by Dynamic Configuration, which will override the settings in alarm-settings.yml.\nIn order to determine whether an alerting rule is triggered or not, SkyWalking needs to cache the metrics of a time window for each alerting rule. If any attribute (expression, period, etc.) of a rule is changed, the sliding window will be destroyed and re-created, causing the Alarm of this specific rule to restart again.\nKeys with data types of alerting rule configuration file    Alerting element Configuration property key Type Description     Expression expression string MQE expression   Include names include-names string array    Exclude names exclude-names string array    Include names regex include-names-regex string Java regex Pattern   Exclude names regex exclude-names-regex string Java regex Pattern   Tags tags key-value pair    Period Period int    Silence period silence-period int    Message message string    Hooks hooks string array     ","title":"Alerting","url":"/docs/main/v9.7.0/en/setup/backend/backend-alarm/"},{"content":"ALS Load Balance Using satellite as a load balancer in envoy and OAP can effectively prevent the problem of unbalanced messages received by OAP.\nIn this case, we mainly use memory queues for intermediate data storage.\nDeference Envoy Count, OAP performance could impact the Satellite transmit performance.\n   Envoy Instance Concurrent User ALS OPS Satellite CPU Satellite Memory     150 100 ~50K 1.2C 0.5-1.0G   150 300 ~80K 1.8C 1.0-1.5G   300 100 ~50K 1.4C 0.8-1.2G   300 300 ~100K 2.2C 1.3-2.0G   800 100 ~50K 1.5C 0.9-1.5G   800 300 ~100K 2.6C 1.7-2.7G   1500 100 ~50K 1.7C 1.4-2.4G   1500 300 ~100K 2.7C 2.3-3.0G   2300 150 ~50K 1.8C 1.9-3.1G   2300 300 ~90K 2.5C 2.3-4.0G   2300 500 ~110K 3.2C 2.8-4.7G    Detail Environment Using GKE Environment, helm to build cluster.\n   Module Version Replicate Count CPU Limit Memory Limit Description     OAP 8.9.0 6 12C 32Gi Using ElasticSearch as Storage   Satellite 0.4.0 1 8C 16Gi    ElasticSearch 7.5.1 3 8 16Gi     Setting 800 Envoy, 100K QPS ALS.\n   Module Environment Config Use Value Default Value Description Recommend Value     Satellite SATELLITE_QUEUE_PARTITION 50 4 Support several goroutines concurrently to consume the queue Satellite CPU number * 4-6, It could help improve throughput, but the default value also could handle 800 Envoy Instance and 100K QPS ALS message.   Satellite SATELLITE_QUEUE_EVENT_BUFFER_SIZE 3000 1000 The size of the queue in each concurrency This is related to the number of Envoys. If the number of Envoys is large, it is recommended to increase the value.   Satellite SATELLITE_ENVOY_ALS_V3_PIPE_RECEIVER_FLUSH_TIME 3000 1000 When the Satellite receives the message, how long(millisecond) will the ALS message be merged into an Event. If a certain time delay is accepted, the value can be adjusted larger, which can effectively reduce CPU usage and make the Satellite more stable   Satellite SATELLITE_ENVOY_ALS_V3_PIPE_SENDER_FLUSH_TIME 3000 1000 How long(millisecond) is the memory queue data for each Goroutine to be summarized and sent to OAP This depends on the amount of data in your queue, you can keep it consistent with SATELLITE_ENVOY_ALS_V3_PIPE_RECEIVER_FLUSH_TIME   OAP SW_CORE_GRPC_MAX_CONCURRENT_CALL 50 4 A link between Satellite and OAP, how many requests parallelism is supported Same with SATELLITE_QUEUE_PARTITION in Satellite    ","title":"ALS Load Balance","url":"/docs/skywalking-satellite/latest/en/setup/performance/als-load-balance/readme/"},{"content":"ALS Load Balance Using satellite as a load balancer in envoy and OAP can effectively prevent the problem of unbalanced messages received by OAP.\nIn this case, we mainly use memory queues for intermediate data storage.\nDeference Envoy Count, OAP performance could impact the Satellite transmit performance.\n   Envoy Instance Concurrent User ALS OPS Satellite CPU Satellite Memory     150 100 ~50K 1.2C 0.5-1.0G   150 300 ~80K 1.8C 1.0-1.5G   300 100 ~50K 1.4C 0.8-1.2G   300 300 ~100K 2.2C 1.3-2.0G   800 100 ~50K 1.5C 0.9-1.5G   800 300 ~100K 2.6C 1.7-2.7G   1500 100 ~50K 1.7C 1.4-2.4G   1500 300 ~100K 2.7C 2.3-3.0G   2300 150 ~50K 1.8C 1.9-3.1G   2300 300 ~90K 2.5C 2.3-4.0G   2300 500 ~110K 3.2C 2.8-4.7G    Detail Environment Using GKE Environment, helm to build cluster.\n   Module Version Replicate Count CPU Limit Memory Limit Description     OAP 8.9.0 6 12C 32Gi Using ElasticSearch as Storage   Satellite 0.4.0 1 8C 16Gi    ElasticSearch 7.5.1 3 8 16Gi     Setting 800 Envoy, 100K QPS ALS.\n   Module Environment Config Use Value Default Value Description Recommend Value     Satellite SATELLITE_QUEUE_PARTITION 50 4 Support several goroutines concurrently to consume the queue Satellite CPU number * 4-6, It could help improve throughput, but the default value also could handle 800 Envoy Instance and 100K QPS ALS message.   Satellite SATELLITE_QUEUE_EVENT_BUFFER_SIZE 3000 1000 The size of the queue in each concurrency This is related to the number of Envoys. If the number of Envoys is large, it is recommended to increase the value.   Satellite SATELLITE_ENVOY_ALS_V3_PIPE_RECEIVER_FLUSH_TIME 3000 1000 When the Satellite receives the message, how long(millisecond) will the ALS message be merged into an Event. If a certain time delay is accepted, the value can be adjusted larger, which can effectively reduce CPU usage and make the Satellite more stable   Satellite SATELLITE_ENVOY_ALS_V3_PIPE_SENDER_FLUSH_TIME 3000 1000 How long(millisecond) is the memory queue data for each Goroutine to be summarized and sent to OAP This depends on the amount of data in your queue, you can keep it consistent with SATELLITE_ENVOY_ALS_V3_PIPE_RECEIVER_FLUSH_TIME   OAP SW_CORE_GRPC_MAX_CONCURRENT_CALL 50 4 A link between Satellite and OAP, how many requests parallelism is supported Same with SATELLITE_QUEUE_PARTITION in Satellite    ","title":"ALS Load Balance","url":"/docs/skywalking-satellite/next/en/setup/performance/als-load-balance/readme/"},{"content":"ALS Load Balance Using satellite as a load balancer in envoy and OAP can effectively prevent the problem of unbalanced messages received by OAP.\nIn this case, we mainly use memory queues for intermediate data storage.\nDeference Envoy Count, OAP performance could impact the Satellite transmit performance.\n   Envoy Instance Concurrent User ALS OPS Satellite CPU Satellite Memory     150 100 ~50K 1.2C 0.5-1.0G   150 300 ~80K 1.8C 1.0-1.5G   300 100 ~50K 1.4C 0.8-1.2G   300 300 ~100K 2.2C 1.3-2.0G   800 100 ~50K 1.5C 0.9-1.5G   800 300 ~100K 2.6C 1.7-2.7G   1500 100 ~50K 1.7C 1.4-2.4G   1500 300 ~100K 2.7C 2.3-3.0G   2300 150 ~50K 1.8C 1.9-3.1G   2300 300 ~90K 2.5C 2.3-4.0G   2300 500 ~110K 3.2C 2.8-4.7G    Detail Environment Using GKE Environment, helm to build cluster.\n   Module Version Replicate Count CPU Limit Memory Limit Description     OAP 8.9.0 6 12C 32Gi Using ElasticSearch as Storage   Satellite 0.4.0 1 8C 16Gi    ElasticSearch 7.5.1 3 8 16Gi     Setting 800 Envoy, 100K QPS ALS.\n   Module Environment Config Use Value Default Value Description Recommend Value     Satellite SATELLITE_QUEUE_PARTITION 50 4 Support several goroutines concurrently to consume the queue Satellite CPU number * 4-6, It could help improve throughput, but the default value also could handle 800 Envoy Instance and 100K QPS ALS message.   Satellite SATELLITE_QUEUE_EVENT_BUFFER_SIZE 3000 1000 The size of the queue in each concurrency This is related to the number of Envoys. If the number of Envoys is large, it is recommended to increase the value.   Satellite SATELLITE_ENVOY_ALS_V3_PIPE_RECEIVER_FLUSH_TIME 3000 1000 When the Satellite receives the message, how long(millisecond) will the ALS message be merged into an Event. If a certain time delay is accepted, the value can be adjusted larger, which can effectively reduce CPU usage and make the Satellite more stable   Satellite SATELLITE_ENVOY_ALS_V3_PIPE_SENDER_FLUSH_TIME 3000 1000 How long(millisecond) is the memory queue data for each Goroutine to be summarized and sent to OAP This depends on the amount of data in your queue, you can keep it consistent with SATELLITE_ENVOY_ALS_V3_PIPE_RECEIVER_FLUSH_TIME   OAP SW_CORE_GRPC_MAX_CONCURRENT_CALL 50 4 A link between Satellite and OAP, how many requests parallelism is supported Same with SATELLITE_QUEUE_PARTITION in Satellite    ","title":"ALS Load Balance","url":"/docs/skywalking-satellite/v1.2.0/en/setup/performance/als-load-balance/readme/"},{"content":"Analysis Native Streaming Traces and Service Mesh Traffic The traces in SkyWalking native format and Service Mesh Traffic(Access Log in gRPC) are able to be analyzed by OAL, to build metrics of services, service instances and endpoints, and to build topology/dependency of services, service instances and endpoints(traces-oriented analysis only).\nThe spans of traces relative with RPC, such as HTTP, gRPC, Dubbo, RocketMQ, Kafka, would be converted to service input/output traffic, like access logs collected from service mesh. Both of those traffic would be cataloged as the defined sources in the Observability Analysis Language engine.\nThe metrics are customizable through Observability Analysis Language(OAL) scripts, and the topology/dependency is built by the SkyWalking OAP kernel automatically without explicit OAL scripts.\nObservability Analysis Language OAL(Observability Analysis Language) serves to analyze incoming data in streaming mode.\nOAL focuses on metrics in Service, Service Instance and Endpoint. Therefore, the language is easy to learn and use.\nOAL scripts are now found in the /config folder, and users could simply change and reboot the server to run them. However, the OAL script is a compiled language, and the OAL Runtime generates java codes dynamically. Don\u0026rsquo;t expect to mount the changes of those scripts in the runtime. If your OAP servers are running in a cluster mode, these script defined metrics should be aligned.\nYou can open set SW_OAL_ENGINE_DEBUG=Y at system env to see which classes are generated.\nGrammar Scripts should be named *.oal\n// Declare the metrics. METRICS_NAME = from(CAST SCOPE.(* | [FIELD][,FIELD ...])) [.filter(CAST FIELD OP [INT | STRING])] .FUNCTION([PARAM][, PARAM ...]) // Disable hard code disable(METRICS_NAME); From The from statement defines the data source of this OAL expression.\nPrimary SCOPEs are Service, ServiceInstance, Endpoint, ServiceRelation, ServiceInstanceRelation, and EndpointRelation. There are also some secondary scopes which belong to a primary scope.\nSee Scope Definitions, where you can find all existing Scopes and Fields.\nFilter Use filter to build conditions for the value of fields by using field name and expression.\nThe filter expressions run as a chain, generally connected with logic AND. The OPs support ==, !=, \u0026gt;, \u0026lt;, \u0026gt;=, \u0026lt;=, in [...] ,like %..., like ...% , like %...% , contain and not contain, with type detection based on field type. In the event of incompatibility, compile or code generation errors may be triggered.\nAggregation Function The default functions are provided by the SkyWalking OAP core, and it is possible to implement additional functions.\nFunctions provided\n longAvg. The avg of all input per scope entity. The input field must be a long.   instance_jvm_memory_max = from(ServiceInstanceJVMMemory.max).longAvg();\n In this case, the input represents the request of each ServiceInstanceJVMMemory scope, and avg is based on field max.\n doubleAvg. The avg of all input per scope entity. The input field must be a double.   instance_jvm_cpu = from(ServiceInstanceJVMCPU.usePercent).doubleAvg();\n In this case, the input represents the request of each ServiceInstanceJVMCPU scope, and avg is based on field usePercent.\n percent. The number or ratio is expressed as a fraction of 100, where the input matches with the condition.   endpoint_percent = from(Endpoint.*).percent(status == true);\n In this case, all input represents requests of each endpoint, and the condition is endpoint.status == true.\n rate. The rate expressed is as a fraction of 100, where the input matches with the condition.   browser_app_error_rate = from(BrowserAppTraffic.*).rate(trafficCategory == BrowserAppTrafficCategory.FIRST_ERROR, trafficCategory == BrowserAppTrafficCategory.NORMAL);\n In this case, all input represents requests of each browser app traffic, the numerator condition is trafficCategory == BrowserAppTrafficCategory.FIRST_ERROR and denominator condition is trafficCategory == BrowserAppTrafficCategory.NORMAL. Parameter (1) is the numerator condition. Parameter (2) is the denominator condition.\n count. The sum of calls per scope entity.   service_calls_sum = from(Service.*).count();\n In this case, the number of calls of each service.\n histogram. See Heatmap in WIKI.   service_heatmap = from(Service.latency).histogram(100, 20);\n In this case, the thermodynamic heatmap of all incoming requests. Parameter (1) is the precision of latency calculation, such as in the above case, where 113ms and 193ms are considered the same in the 101-200ms group. Parameter (2) is the group amount. In the above case, 21(param value + 1) groups are 0-100ms, 101-200ms, \u0026hellip; 1901-2000ms, 2000+ms\n apdex. See Apdex in WIKI.   service_apdex = from(Service.latency).apdex(name, status);\n In this case, the apdex score of each service. Parameter (1) is the service name, which reflects the Apdex threshold value loaded from service-apdex-threshold.yml in the config folder. Parameter (2) is the status of this request. The status(success/failure) reflects the Apdex calculation.\n p99, p95, p90, p75, p50. See percentile in WIKI.   service_percentile = from(Service.latency).percentile(10);\n percentile is the first multiple-value metric, which has been introduced since 7.0.0. As a metric with multiple values, it could be queried through the getMultipleLinearIntValues GraphQL query. In this case, see p99, p95, p90, p75, and p50 of all incoming requests. The parameter is precise to a latency at p99, such as in the above case, and 120ms and 124ms are considered to produce the same response time.\nIn this case, the p99 value of all incoming requests. The parameter is precise to a latency at p99, such as in the above case, and 120ms and 124ms are considered to produce the same response time.\nMetrics name The metrics name for storage implementor, alarm and query modules. The type inference is supported by core.\nGroup All metrics data will be grouped by Scope.ID and min-level TimeBucket.\n In the Endpoint scope, the Scope.ID is same as the Endpoint ID (i.e. the unique ID based on service and its endpoint).  Cast Fields of source are static type. In some cases, the type required by the filter expression and aggregation function doesn\u0026rsquo;t match the type in the source, such as tag value in the source is String type, most aggregation calculation requires numeric.\nCast expression is provided to do so.\n (str-\u0026gt;long) or (long), cast string type into long. (str-\u0026gt;int) or (int), cast string type into int.  mq_consume_latency = from((str-\u0026gt;long)Service.tag[\u0026quot;transmission.latency\u0026quot;]).longAvg(); // the value of tag is string type. Cast statement is supported in\n From statement. from((cast)source.attre). Filter expression. .filter((cast)tag[\u0026quot;transmission.latency\u0026quot;] \u0026gt; 0) Aggregation function parameter. .longAvg((cast)strField1== 1, (cast)strField2)  Disable Disable is an advanced statement in OAL, which is only used in certain cases. Some of the aggregation and metrics are defined through core hard codes. Examples include segment and top_n_database_statement. This disable statement is designed to render them inactive. By default, none of them are disabled.\nNOTICE, all disable statements should be in oal/disable.oal script file.\nExamples // Calculate p99 of both Endpoint1 and Endpoint2 endpoint_p99 = from(Endpoint.latency).filter(name in (\u0026quot;Endpoint1\u0026quot;, \u0026quot;Endpoint2\u0026quot;)).summary(0.99) // Calculate p99 of Endpoint name started with `serv` serv_Endpoint_p99 = from(Endpoint.latency).filter(name like \u0026quot;serv%\u0026quot;).summary(0.99) // Calculate the avg response time of each Endpoint endpoint_resp_time = from(Endpoint.latency).avg() // Calculate the p50, p75, p90, p95 and p99 of each Endpoint by 50 ms steps. endpoint_percentile = from(Endpoint.latency).percentile(10) // Calculate the percent of response status is true, for each service. endpoint_success = from(Endpoint.*).filter(status == true).percent() // Calculate the sum of response code in [404, 500, 503], for each service. endpoint_abnormal = from(Endpoint.*).filter(httpResponseStatusCode in [404, 500, 503]).count() // Calculate the sum of request type in [RequestType.RPC, RequestType.gRPC], for each service. endpoint_rpc_calls_sum = from(Endpoint.*).filter(type in [RequestType.RPC, RequestType.gRPC]).count() // Calculate the sum of endpoint name in [\u0026quot;/v1\u0026quot;, \u0026quot;/v2\u0026quot;], for each service. endpoint_url_sum = from(Endpoint.*).filter(name in [\u0026quot;/v1\u0026quot;, \u0026quot;/v2\u0026quot;]).count() // Calculate the sum of calls for each service. endpoint_calls = from(Endpoint.*).count() // Calculate the CPM with the GET method for each service.The value is made up with `tagKey:tagValue`. // Option 1, use `tags contain`. service_cpm_http_get = from(Service.*).filter(tags contain \u0026quot;http.method:GET\u0026quot;).cpm() // Option 2, use `tag[key]`. service_cpm_http_get = from(Service.*).filter(tag[\u0026quot;http.method\u0026quot;] == \u0026quot;GET\u0026quot;).cpm(); // Calculate the CPM with the HTTP method except for the GET method for each service.The value is made up with `tagKey:tagValue`. service_cpm_http_other = from(Service.*).filter(tags not contain \u0026quot;http.method:GET\u0026quot;).cpm() disable(segment); disable(endpoint_relation_server_side); disable(top_n_database_statement); ","title":"Analysis Native Streaming Traces and Service Mesh Traffic","url":"/docs/main/latest/en/concepts-and-designs/oal/"},{"content":"Analysis Native Streaming Traces and Service Mesh Traffic The traces in SkyWalking native format and Service Mesh Traffic(Access Log in gRPC) are able to be analyzed by OAL, to build metrics of services, service instances and endpoints, and to build topology/dependency of services, service instances and endpoints(traces-oriented analysis only).\nThe spans of traces relative with RPC, such as HTTP, gRPC, Dubbo, RocketMQ, Kafka, would be converted to service input/output traffic, like access logs collected from service mesh. Both of those traffic would be cataloged as the defined sources in the Observability Analysis Language engine.\nThe metrics are customizable through Observability Analysis Language(OAL) scripts, and the topology/dependency is built by the SkyWalking OAP kernel automatically without explicit OAL scripts.\nObservability Analysis Language OAL(Observability Analysis Language) serves to analyze incoming data in streaming mode.\nOAL focuses on metrics in Service, Service Instance and Endpoint. Therefore, the language is easy to learn and use.\nOAL scripts are now found in the /config folder, and users could simply change and reboot the server to run them. However, the OAL script is a compiled language, and the OAL Runtime generates java codes dynamically. Don\u0026rsquo;t expect to mount the changes of those scripts in the runtime. If your OAP servers are running in a cluster mode, these script defined metrics should be aligned.\nYou can open set SW_OAL_ENGINE_DEBUG=Y at system env to see which classes are generated.\nGrammar Scripts should be named *.oal\n// Declare the metrics. METRICS_NAME = from(CAST SCOPE.(* | [FIELD][,FIELD ...])) [.filter(CAST FIELD OP [INT | STRING])] .FUNCTION([PARAM][, PARAM ...]) // Disable hard code disable(METRICS_NAME); From The from statement defines the data source of this OAL expression.\nPrimary SCOPEs are Service, ServiceInstance, Endpoint, ServiceRelation, ServiceInstanceRelation, and EndpointRelation. There are also some secondary scopes which belong to a primary scope.\nSee Scope Definitions, where you can find all existing Scopes and Fields.\nFilter Use filter to build conditions for the value of fields by using field name and expression.\nThe filter expressions run as a chain, generally connected with logic AND. The OPs support ==, !=, \u0026gt;, \u0026lt;, \u0026gt;=, \u0026lt;=, in [...] ,like %..., like ...% , like %...% , contain and not contain, with type detection based on field type. In the event of incompatibility, compile or code generation errors may be triggered.\nAggregation Function The default functions are provided by the SkyWalking OAP core, and it is possible to implement additional functions.\nFunctions provided\n longAvg. The avg of all input per scope entity. The input field must be a long.   instance_jvm_memory_max = from(ServiceInstanceJVMMemory.max).longAvg();\n In this case, the input represents the request of each ServiceInstanceJVMMemory scope, and avg is based on field max.\n doubleAvg. The avg of all input per scope entity. The input field must be a double.   instance_jvm_cpu = from(ServiceInstanceJVMCPU.usePercent).doubleAvg();\n In this case, the input represents the request of each ServiceInstanceJVMCPU scope, and avg is based on field usePercent.\n percent. The number or ratio is expressed as a fraction of 100, where the input matches with the condition.   endpoint_percent = from(Endpoint.*).percent(status == true);\n In this case, all input represents requests of each endpoint, and the condition is endpoint.status == true.\n rate. The rate expressed is as a fraction of 100, where the input matches with the condition.   browser_app_error_rate = from(BrowserAppTraffic.*).rate(trafficCategory == BrowserAppTrafficCategory.FIRST_ERROR, trafficCategory == BrowserAppTrafficCategory.NORMAL);\n In this case, all input represents requests of each browser app traffic, the numerator condition is trafficCategory == BrowserAppTrafficCategory.FIRST_ERROR and denominator condition is trafficCategory == BrowserAppTrafficCategory.NORMAL. Parameter (1) is the numerator condition. Parameter (2) is the denominator condition.\n count. The sum of calls per scope entity.   service_calls_sum = from(Service.*).count();\n In this case, the number of calls of each service.\n histogram. See Heatmap in WIKI.   service_heatmap = from(Service.latency).histogram(100, 20);\n In this case, the thermodynamic heatmap of all incoming requests. Parameter (1) is the precision of latency calculation, such as in the above case, where 113ms and 193ms are considered the same in the 101-200ms group. Parameter (2) is the group amount. In the above case, 21(param value + 1) groups are 0-100ms, 101-200ms, \u0026hellip; 1901-2000ms, 2000+ms\n apdex. See Apdex in WIKI.   service_apdex = from(Service.latency).apdex(name, status);\n In this case, the apdex score of each service. Parameter (1) is the service name, which reflects the Apdex threshold value loaded from service-apdex-threshold.yml in the config folder. Parameter (2) is the status of this request. The status(success/failure) reflects the Apdex calculation.\n p99, p95, p90, p75, p50. See percentile in WIKI.   service_percentile = from(Service.latency).percentile2(10);\n percentile (deprecated since 10.0.0) is the first multiple-value metric, which has been introduced since 7.0.0. As a metric with multiple values, it could be queried through the getMultipleLinearIntValues GraphQL query. percentile2 Since 10.0.0, the percentile function has been instead by percentile2. The percentile2 function is a labeled-value metric with default label name p and label values 50,75,90,95,99. In this case, see p99, p95, p90, p75, and p50 of all incoming requests. The parameter is precise to a latency at p99, such as in the above case, and 120ms and 124ms are considered to produce the same response time.\nIn this case, the p99 value of all incoming requests. The parameter is precise to a latency at p99, such as in the above case, and 120ms and 124ms are considered to produce the same response time.\nMetrics name The metrics name for storage implementor, alarm and query modules. The type inference is supported by core.\nGroup All metrics data will be grouped by Scope.ID and min-level TimeBucket.\n In the Endpoint scope, the Scope.ID is same as the Endpoint ID (i.e. the unique ID based on service and its endpoint).  Cast Fields of source are static type. In some cases, the type required by the filter expression and aggregation function doesn\u0026rsquo;t match the type in the source, such as tag value in the source is String type, most aggregation calculation requires numeric.\nCast expression is provided to do so.\n (str-\u0026gt;long) or (long), cast string type into long. (str-\u0026gt;int) or (int), cast string type into int.  mq_consume_latency = from((str-\u0026gt;long)Service.tag[\u0026quot;transmission.latency\u0026quot;]).longAvg(); // the value of tag is string type. Cast statement is supported in\n From statement. from((cast)source.attre). Filter expression. .filter((cast)tag[\u0026quot;transmission.latency\u0026quot;] \u0026gt; 0) Aggregation function parameter. .longAvg((cast)strField1== 1, (cast)strField2)  Disable Disable is an advanced statement in OAL, which is only used in certain cases. Some of the aggregation and metrics are defined through core hard codes. Examples include segment and top_n_database_statement. This disable statement is designed to render them inactive. By default, none of them are disabled.\nNOTICE, all disable statements should be in oal/disable.oal script file.\nExamples // Calculate p99 of both Endpoint1 and Endpoint2 endpoint_p99 = from(Endpoint.latency).filter(name in (\u0026quot;Endpoint1\u0026quot;, \u0026quot;Endpoint2\u0026quot;)).summary(0.99) // Calculate p99 of Endpoint name started with `serv` serv_Endpoint_p99 = from(Endpoint.latency).filter(name like \u0026quot;serv%\u0026quot;).summary(0.99) // Calculate the avg response time of each Endpoint endpoint_resp_time = from(Endpoint.latency).avg() // Calculate the p50, p75, p90, p95 and p99 of each Endpoint by 50 ms steps. endpoint_percentile = from(Endpoint.latency).percentile2(10) // Calculate the percent of response status is true, for each service. endpoint_success = from(Endpoint.*).filter(status == true).percent() // Calculate the sum of response code in [404, 500, 503], for each service. endpoint_abnormal = from(Endpoint.*).filter(httpResponseStatusCode in [404, 500, 503]).count() // Calculate the sum of request type in [RequestType.RPC, RequestType.gRPC], for each service. endpoint_rpc_calls_sum = from(Endpoint.*).filter(type in [RequestType.RPC, RequestType.gRPC]).count() // Calculate the sum of endpoint name in [\u0026quot;/v1\u0026quot;, \u0026quot;/v2\u0026quot;], for each service. endpoint_url_sum = from(Endpoint.*).filter(name in [\u0026quot;/v1\u0026quot;, \u0026quot;/v2\u0026quot;]).count() // Calculate the sum of calls for each service. endpoint_calls = from(Endpoint.*).count() // Calculate the CPM with the GET method for each service.The value is made up with `tagKey:tagValue`. // Option 1, use `tags contain`. service_cpm_http_get = from(Service.*).filter(tags contain \u0026quot;http.method:GET\u0026quot;).cpm() // Option 2, use `tag[key]`. service_cpm_http_get = from(Service.*).filter(tag[\u0026quot;http.method\u0026quot;] == \u0026quot;GET\u0026quot;).cpm(); // Calculate the CPM with the HTTP method except for the GET method for each service.The value is made up with `tagKey:tagValue`. service_cpm_http_other = from(Service.*).filter(tags not contain \u0026quot;http.method:GET\u0026quot;).cpm() disable(segment); disable(endpoint_relation_server_side); disable(top_n_database_statement); ","title":"Analysis Native Streaming Traces and Service Mesh Traffic","url":"/docs/main/next/en/concepts-and-designs/oal/"},{"content":"Analysis Native Streaming Traces and Service Mesh Traffic The traces in SkyWalking native format and Service Mesh Traffic(Access Log in gRPC) are able to be analyzed by OAL, to build metrics of services, service instances and endpoints, and to build topology/dependency of services, service instances and endpoints(traces-oriented analysis only).\nThe spans of traces relative with RPC, such as HTTP, gRPC, Dubbo, RocketMQ, Kafka, would be converted to service input/output traffic, like access logs collected from service mesh. Both of those traffic would be cataloged as the defined sources in the Observability Analysis Language engine.\nThe metrics are customizable through Observability Analysis Language(OAL) scripts, and the topology/dependency is built by the SkyWalking OAP kernel automatically without explicit OAL scripts.\nObservability Analysis Language OAL(Observability Analysis Language) serves to analyze incoming data in streaming mode.\nOAL focuses on metrics in Service, Service Instance and Endpoint. Therefore, the language is easy to learn and use.\nOAL scripts are now found in the /config folder, and users could simply change and reboot the server to run them. However, the OAL script is a compiled language, and the OAL Runtime generates java codes dynamically. Don\u0026rsquo;t expect to mount the changes of those scripts in the runtime. If your OAP servers are running in a cluster mode, these script defined metrics should be aligned.\nYou can open set SW_OAL_ENGINE_DEBUG=Y at system env to see which classes are generated.\nGrammar Scripts should be named *.oal\n// Declare the metrics. METRICS_NAME = from(CAST SCOPE.(* | [FIELD][,FIELD ...])) [.filter(CAST FIELD OP [INT | STRING])] .FUNCTION([PARAM][, PARAM ...]) // Disable hard code disable(METRICS_NAME); From The from statement defines the data source of this OAL expression.\nPrimary SCOPEs are Service, ServiceInstance, Endpoint, ServiceRelation, ServiceInstanceRelation, and EndpointRelation. There are also some secondary scopes which belong to a primary scope.\nSee Scope Definitions, where you can find all existing Scopes and Fields.\nFilter Use filter to build conditions for the value of fields by using field name and expression.\nThe filter expressions run as a chain, generally connected with logic AND. The OPs support ==, !=, \u0026gt;, \u0026lt;, \u0026gt;=, \u0026lt;=, in [...] ,like %..., like ...% , like %...% , contain and not contain, with type detection based on field type. In the event of incompatibility, compile or code generation errors may be triggered.\nAggregation Function The default functions are provided by the SkyWalking OAP core, and it is possible to implement additional functions.\nFunctions provided\n longAvg. The avg of all input per scope entity. The input field must be a long.   instance_jvm_memory_max = from(ServiceInstanceJVMMemory.max).longAvg();\n In this case, the input represents the request of each ServiceInstanceJVMMemory scope, and avg is based on field max.\n doubleAvg. The avg of all input per scope entity. The input field must be a double.   instance_jvm_cpu = from(ServiceInstanceJVMCPU.usePercent).doubleAvg();\n In this case, the input represents the request of each ServiceInstanceJVMCPU scope, and avg is based on field usePercent.\n percent. The number or ratio is expressed as a fraction of 100, where the input matches with the condition.   endpoint_percent = from(Endpoint.*).percent(status == true);\n In this case, all input represents requests of each endpoint, and the condition is endpoint.status == true.\n rate. The rate expressed is as a fraction of 100, where the input matches with the condition.   browser_app_error_rate = from(BrowserAppTraffic.*).rate(trafficCategory == BrowserAppTrafficCategory.FIRST_ERROR, trafficCategory == BrowserAppTrafficCategory.NORMAL);\n In this case, all input represents requests of each browser app traffic, the numerator condition is trafficCategory == BrowserAppTrafficCategory.FIRST_ERROR and denominator condition is trafficCategory == BrowserAppTrafficCategory.NORMAL. Parameter (1) is the numerator condition. Parameter (2) is the denominator condition.\n count. The sum of calls per scope entity.   service_calls_sum = from(Service.*).count();\n In this case, the number of calls of each service.\n histogram. See Heatmap in WIKI.   service_heatmap = from(Service.latency).histogram(100, 20);\n In this case, the thermodynamic heatmap of all incoming requests. Parameter (1) is the precision of latency calculation, such as in the above case, where 113ms and 193ms are considered the same in the 101-200ms group. Parameter (2) is the group amount. In the above case, 21(param value + 1) groups are 0-100ms, 101-200ms, \u0026hellip; 1901-2000ms, 2000+ms\n apdex. See Apdex in WIKI.   service_apdex = from(Service.latency).apdex(name, status);\n In this case, the apdex score of each service. Parameter (1) is the service name, which reflects the Apdex threshold value loaded from service-apdex-threshold.yml in the config folder. Parameter (2) is the status of this request. The status(success/failure) reflects the Apdex calculation.\n p99, p95, p90, p75, p50. See percentile in WIKI.   service_percentile = from(Service.latency).percentile(10);\n percentile is the first multiple-value metric, which has been introduced since 7.0.0. As a metric with multiple values, it could be queried through the getMultipleLinearIntValues GraphQL query. In this case, see p99, p95, p90, p75, and p50 of all incoming requests. The parameter is precise to a latency at p99, such as in the above case, and 120ms and 124ms are considered to produce the same response time.\nIn this case, the p99 value of all incoming requests. The parameter is precise to a latency at p99, such as in the above case, and 120ms and 124ms are considered to produce the same response time.\nMetrics name The metrics name for storage implementor, alarm and query modules. The type inference is supported by core.\nGroup All metrics data will be grouped by Scope.ID and min-level TimeBucket.\n In the Endpoint scope, the Scope.ID is same as the Endpoint ID (i.e. the unique ID based on service and its endpoint).  Cast Fields of source are static type. In some cases, the type required by the filter expression and aggregation function doesn\u0026rsquo;t match the type in the source, such as tag value in the source is String type, most aggregation calculation requires numeric.\nCast expression is provided to do so.\n (str-\u0026gt;long) or (long), cast string type into long. (str-\u0026gt;int) or (int), cast string type into int.  mq_consume_latency = from((str-\u0026gt;long)Service.tag[\u0026quot;transmission.latency\u0026quot;]).longAvg(); // the value of tag is string type. Cast statement is supported in\n From statement. from((cast)source.attre). Filter expression. .filter((cast)tag[\u0026quot;transmission.latency\u0026quot;] \u0026gt; 0) Aggregation function parameter. .longAvg((cast)strField1== 1, (cast)strField2)  Disable Disable is an advanced statement in OAL, which is only used in certain cases. Some of the aggregation and metrics are defined through core hard codes. Examples include segment and top_n_database_statement. This disable statement is designed to render them inactive. By default, none of them are disabled.\nNOTICE, all disable statements should be in oal/disable.oal script file.\nExamples // Calculate p99 of both Endpoint1 and Endpoint2 endpoint_p99 = from(Endpoint.latency).filter(name in (\u0026quot;Endpoint1\u0026quot;, \u0026quot;Endpoint2\u0026quot;)).summary(0.99) // Calculate p99 of Endpoint name started with `serv` serv_Endpoint_p99 = from(Endpoint.latency).filter(name like \u0026quot;serv%\u0026quot;).summary(0.99) // Calculate the avg response time of each Endpoint endpoint_resp_time = from(Endpoint.latency).avg() // Calculate the p50, p75, p90, p95 and p99 of each Endpoint by 50 ms steps. endpoint_percentile = from(Endpoint.latency).percentile(10) // Calculate the percent of response status is true, for each service. endpoint_success = from(Endpoint.*).filter(status == true).percent() // Calculate the sum of response code in [404, 500, 503], for each service. endpoint_abnormal = from(Endpoint.*).filter(httpResponseStatusCode in [404, 500, 503]).count() // Calculate the sum of request type in [RequestType.RPC, RequestType.gRPC], for each service. endpoint_rpc_calls_sum = from(Endpoint.*).filter(type in [RequestType.RPC, RequestType.gRPC]).count() // Calculate the sum of endpoint name in [\u0026quot;/v1\u0026quot;, \u0026quot;/v2\u0026quot;], for each service. endpoint_url_sum = from(Endpoint.*).filter(name in [\u0026quot;/v1\u0026quot;, \u0026quot;/v2\u0026quot;]).count() // Calculate the sum of calls for each service. endpoint_calls = from(Endpoint.*).count() // Calculate the CPM with the GET method for each service.The value is made up with `tagKey:tagValue`. // Option 1, use `tags contain`. service_cpm_http_get = from(Service.*).filter(tags contain \u0026quot;http.method:GET\u0026quot;).cpm() // Option 2, use `tag[key]`. service_cpm_http_get = from(Service.*).filter(tag[\u0026quot;http.method\u0026quot;] == \u0026quot;GET\u0026quot;).cpm(); // Calculate the CPM with the HTTP method except for the GET method for each service.The value is made up with `tagKey:tagValue`. service_cpm_http_other = from(Service.*).filter(tags not contain \u0026quot;http.method:GET\u0026quot;).cpm() disable(segment); disable(endpoint_relation_server_side); disable(top_n_database_statement); ","title":"Analysis Native Streaming Traces and Service Mesh Traffic","url":"/docs/main/v9.7.0/en/concepts-and-designs/oal/"},{"content":"Apache SkyWalking Agent Containerized Scenarios Docker images are not official ASF releases but provided for convenience. Recommended usage is always to build the source\nThis image only hosts the pre-built SkyWalking Java agent jars, and provides some convenient configurations for containerized scenarios.\nHow to use this image Docker FROMapache/skywalking-java-agent:8.5.0-jdk8# ... build your java applicationYou can start your Java application with CMD or ENTRYPOINT, but you don\u0026rsquo;t need to care about the Java options to enable SkyWalking agent, it should be adopted automatically.\nKubernetes Currently, SkyWalking provides two ways to install the java agent on your services on Kubernetes.\n  To use the java agent more natively, you can try the java agent injector to inject the java agent image as a sidecar.\n  If you think it\u0026rsquo;s hard to install the injector, you can also use this java agent image as a sidecar as below.\n  apiVersion:v1kind:Podmetadata:name:agent-as-sidecarspec:restartPolicy:Nevervolumes:- name:skywalking-agentemptyDir:{}initContainers:- name:agent-containerimage:apache/skywalking-java-agent:8.7.0-alpinevolumeMounts:- name:skywalking-agentmountPath:/agentcommand:[\u0026#34;/bin/sh\u0026#34;]args:[\u0026#34;-c\u0026#34;,\u0026#34;cp -R /skywalking/agent /agent/\u0026#34;]containers:- name:app-containerimage:springio/gs-spring-boot-dockervolumeMounts:- name:skywalking-agentmountPath:/skywalkingenv:- name:JAVA_TOOL_OPTIONSvalue:\u0026#34;-javaagent:/skywalking/agent/skywalking-agent.jar\u0026#34;","title":"Apache SkyWalking Agent Containerized Scenarios","url":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/containerization/"},{"content":"Apache SkyWalking Agent Containerized Scenarios Docker images are not official ASF releases but provided for convenience. Recommended usage is always to build the source\nThis image only hosts the pre-built SkyWalking Java agent jars, and provides some convenient configurations for containerized scenarios.\nHow to use this image Docker FROMapache/skywalking-java-agent:8.5.0-jdk8# ... build your java applicationYou can start your Java application with CMD or ENTRYPOINT, but you don\u0026rsquo;t need to care about the Java options to enable SkyWalking agent, it should be adopted automatically.\nKubernetes Currently, SkyWalking provides two ways to install the java agent on your services on Kubernetes.\n  To use the java agent more natively, you can try the java agent injector to inject the java agent image as a sidecar.\n  If you think it\u0026rsquo;s hard to install the injector, you can also use this java agent image as a sidecar as below.\n  apiVersion:v1kind:Podmetadata:name:agent-as-sidecarspec:restartPolicy:Nevervolumes:- name:skywalking-agentemptyDir:{}initContainers:- name:agent-containerimage:apache/skywalking-java-agent:8.7.0-alpinevolumeMounts:- name:skywalking-agentmountPath:/agentcommand:[\u0026#34;/bin/sh\u0026#34;]args:[\u0026#34;-c\u0026#34;,\u0026#34;cp -R /skywalking/agent /agent/\u0026#34;]containers:- name:app-containerimage:springio/gs-spring-boot-dockervolumeMounts:- name:skywalking-agentmountPath:/skywalkingenv:- name:JAVA_TOOL_OPTIONSvalue:\u0026#34;-javaagent:/skywalking/agent/skywalking-agent.jar\u0026#34;","title":"Apache SkyWalking Agent Containerized Scenarios","url":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/containerization/"},{"content":"Apache SkyWalking Agent Containerized Scenarios Docker images are not official ASF releases but provided for convenience. Recommended usage is always to build the source\nThis image only hosts the pre-built SkyWalking Java agent jars, and provides some convenient configurations for containerized scenarios.\nHow to use this image Docker FROMapache/skywalking-java-agent:8.5.0-jdk8# ... build your java applicationYou can start your Java application with CMD or ENTRYPOINT, but you don\u0026rsquo;t need to care about the Java options to enable SkyWalking agent, it should be adopted automatically.\nKubernetes Currently, SkyWalking provides two ways to install the java agent on your services on Kubernetes.\n  To use the java agent more natively, you can try the java agent injector to inject the java agent image as a sidecar.\n  If you think it\u0026rsquo;s hard to install the injector, you can also use this java agent image as a sidecar as below.\n  apiVersion:v1kind:Podmetadata:name:agent-as-sidecarspec:restartPolicy:Nevervolumes:- name:skywalking-agentemptyDir:{}initContainers:- name:agent-containerimage:apache/skywalking-java-agent:8.7.0-alpinevolumeMounts:- name:skywalking-agentmountPath:/agentcommand:[\u0026#34;/bin/sh\u0026#34;]args:[\u0026#34;-c\u0026#34;,\u0026#34;cp -R /skywalking/agent /agent/\u0026#34;]containers:- name:app-containerimage:springio/gs-spring-boot-dockervolumeMounts:- name:skywalking-agentmountPath:/skywalkingenv:- name:JAVA_TOOL_OPTIONSvalue:\u0026#34;-javaagent:/skywalking/agent/skywalking-agent.jar\u0026#34;","title":"Apache SkyWalking Agent Containerized Scenarios","url":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/containerization/"},{"content":"Apache SkyWalking Agent Containerized Scenarios Docker images are not official ASF releases but provided for convenience. Recommended usage is always to build the source\nThis image only hosts the pre-built SkyWalking Java agent jars, and provides some convenient configurations for containerized scenarios.\nHow to use this image Docker FROMapache/skywalking-java-agent:8.5.0-jdk8# ... build your java applicationYou can start your Java application with CMD or ENTRYPOINT, but you don\u0026rsquo;t need to care about the Java options to enable SkyWalking agent, it should be adopted automatically.\nKubernetes Currently, SkyWalking provides two ways to install the java agent on your services on Kubernetes.\n  To use the java agent more natively, you can try the java agent injector to inject the java agent image as a sidecar.\n  If you think it\u0026rsquo;s hard to install the injector, you can also use this java agent image as a sidecar as below.\n  apiVersion:v1kind:Podmetadata:name:agent-as-sidecarspec:restartPolicy:Nevervolumes:- name:skywalking-agentemptyDir:{}initContainers:- name:agent-containerimage:apache/skywalking-java-agent:8.7.0-alpinevolumeMounts:- name:skywalking-agentmountPath:/agentcommand:[\u0026#34;/bin/sh\u0026#34;]args:[\u0026#34;-c\u0026#34;,\u0026#34;cp -R /skywalking/agent /agent/\u0026#34;]containers:- name:app-containerimage:springio/gs-spring-boot-dockervolumeMounts:- name:skywalking-agentmountPath:/skywalkingenv:- name:JAVA_TOOL_OPTIONSvalue:\u0026#34;-javaagent:/skywalking/agent/skywalking-agent.jar\u0026#34;","title":"Apache SkyWalking Agent Containerized Scenarios","url":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/containerization/"},{"content":"Apache SkyWalking Agent Containerized Scenarios Docker images are not official ASF releases but provided for convenience. Recommended usage is always to build the source\nThis image only hosts the pre-built SkyWalking Java agent jars, and provides some convenient configurations for containerized scenarios.\nHow to use this image Docker FROMapache/skywalking-java-agent:8.5.0-jdk8# ... build your java applicationYou can start your Java application with CMD or ENTRYPOINT, but you don\u0026rsquo;t need to care about the Java options to enable SkyWalking agent, it should be adopted automatically.\nKubernetes Currently, SkyWalking provides two ways to install the java agent on your services on Kubernetes.\n  To use the java agent more natively, you can try the java agent injector to inject the java agent image as a sidecar.\n  If you think it\u0026rsquo;s hard to install the injector, you can also use this java agent image as a sidecar as below.\n  apiVersion:v1kind:Podmetadata:name:agent-as-sidecarspec:restartPolicy:Nevervolumes:- name:skywalking-agentemptyDir:{}initContainers:- name:agent-containerimage:apache/skywalking-java-agent:8.7.0-alpinevolumeMounts:- name:skywalking-agentmountPath:/agentcommand:[\u0026#34;/bin/sh\u0026#34;]args:[\u0026#34;-c\u0026#34;,\u0026#34;cp -R /skywalking/agent /agent/\u0026#34;]containers:- name:app-containerimage:springio/gs-spring-boot-dockervolumeMounts:- name:skywalking-agentmountPath:/skywalkingenv:- name:JAVA_TOOL_OPTIONSvalue:\u0026#34;-javaagent:/skywalking/agent/skywalking-agent.jar\u0026#34;","title":"Apache SkyWalking Agent Containerized Scenarios","url":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/containerization/"},{"content":"Apache SkyWalking BanyanDB release guide This documentation guides the release manager to release the SkyWalking BanyanDB in the Apache Way, and also helps people to check the release for vote.\nPrerequisites  Close(if finished, or move to next milestone otherwise) all issues in the current milestone from skywalking-banyandb and skywalking, create a new milestone if needed. Update CHANGES.md.  Add your GPG public key to Apache svn   Log in id.apache.org and submit your key fingerprint.\n  Add your GPG public key into SkyWalking GPG KEYS file, you can do this only if you are a PMC member. You can ask a PMC member for help. DO NOT override the existed KEYS file content, only append your key at the end of the file.\n  Build and sign the source code package export VERSION=\u0026lt;the version to release\u0026gt; git clone git@github.com:apache/skywalking-banyandb \u0026amp;\u0026amp; cd skywalking-banyandb git tag -a \u0026#34;v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking BanyanDB $VERSION\u0026#34; git push --tags make clean \u0026amp;\u0026amp; make release-assembly The skywalking-banyandb-${VERSION}-bin.tgz, skywalking-banyandb-${VERSION}-src.tgz, and their corresponding asc, sha512. In total, six files should be automatically generated in the directory.\nUpload to Apache svn svn co https://dist.apache.org/repos/dist/dev/skywalking/ mkdir -p skywalking/banyandb/\u0026#34;$VERSION\u0026#34; cp skywalking-banyandb/build/skywalking-banyandb*.tgz skywalking/banyandb/\u0026#34;$VERSION\u0026#34; cp skywalking-banyandb/build/skywalking-banyandb*.tgz.asc skywalking/banyandb/\u0026#34;$VERSION\u0026#34; cp skywalking-banyandb/build/skywalking-banyandb*.tgz.sha512 skywalking/banyandb/\u0026#34;$VERSION\u0026#34; cd skywalking/banyandb \u0026amp;\u0026amp; svn add \u0026#34;$VERSION\u0026#34; \u0026amp;\u0026amp; svn commit -m \u0026#34;Draft Apache SkyWalking BanyanDB release $VERSION\u0026#34; Call for vote in dev@ mailing list Call for vote in dev@skywalking.apache.org\nSubject: [VOTE] Release Apache SkyWalking BanyanDB version $VERSION Content: Hi the SkyWalking Community: This is a call for vote to release Apache SkyWalking BanyanDB version $VERSION. Release notes: * https://github.com/apache/skywalking-banyandb/blob/v$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/banyandb/$VERSION * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-banyandb-src-x.x.x.tgz - sha512xxxxyyyzzz apache-skywalking-banyandb-bin-x.x.x.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-banyandb/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-banyandb/blob/v$VERSION/docs/installation.md Voting will start now and will remain open for at least 72 hours, all PMC members are required to give their votes. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Thanks. [1] https://github.com/apache/skywalking/blob/master/docs/en/guides/How-to-release.md#vote-check Vote Check All PMC members and committers should check these before voting +1:\n Features test. All artifacts in staging repository are published with .asc, .md5, and sha files. Source codes and distribution packages (apache-skywalking-banyandb-{src,bin}-$VERSION.tgz) are in https://dist.apache.org/repos/dist/dev/skywalking/banyandb/$VERSION with .asc, .sha512. LICENSE and NOTICE are in source codes and distribution package. Check shasum -c apache-skywalking-banyandb-{src,bin}-$VERSION.tgz.sha512. Check GPG signature. Download KEYS and import them by curl https://www.apache.org/dist/skywalking/KEYS -o KEYS \u0026amp;\u0026amp; gpg --import KEYS. Check gpg --batch --verify apache-skywalking-banyandb-{src,bin}-$VERSION.tgz.asc apache-skywalking-banyandb-{src,bin}-$VERSION.tgz Build distribution from source code package by following this the build guide. Licenses header check.  Vote result should follow these:\n  PMC vote is +1 binding, all others is +1 no binding.\n  Within 72 hours, you get at least 3 (+1 binding), and have more +1 than -1. Vote pass.\n  Send the closing vote mail to announce the result. When count the binding and no binding votes, please list the names of voters. An example like this:\n[RESULT][VOTE] Release Apache SkyWalking BanyanDB version $VERSION 3 days passed, we’ve got ($NUMBER) +1 bindings: xxx xxx xxx ... (list names) I’ll continue the release process.   Publish release   Move source codes tar balls and distributions to https://dist.apache.org/repos/dist/release/skywalking/, you can do this only if you are a PMC member.\nexport SVN_EDITOR=vim svn mv https://dist.apache.org/repos/dist/dev/skywalking/banyandb/$VERSION https://dist.apache.org/repos/dist/release/skywalking/banyandb # .... # enter your apache password # ....   Remove last released tar balls from https://dist.apache.org/repos/dist/release/skywalking\n  Refer to the previous PR, update news and links on the website. There are seven files need to modify.\n  Update Github release page, follow the previous convention.\n  Send ANNOUNCE email to dev@skywalking.apache.org and announce@apache.org, the sender should use his/her Apache email account. You can get the permlink of vote thread at here.\nSubject: [ANNOUNCEMENT] Apache SkyWalking BanyanDB $VERSION Released Content: Hi the SkyWalking Community On behalf of the SkyWalking Team, I’m glad to announce that SkyWalking BanyanDB $VERSION is now released. SkyWalking BanyanDB: An observability database, aims to ingest, analyze and store Metrics, Tracing and Logging data. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. Vote Thread: $VOTE_THREAD_PERMALINK Download Links: https://skywalking.apache.org/downloads/ Release Notes : https://github.com/apache/skywalking-banyandb/blob/v$VERSION/CHANGES.md Website: https://skywalking.apache.org/ SkyWalking BanyanDB Resources: - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Documents: https://github.com/apache/skywalking-banyandb/blob/v$VERSION/README.md The Apache SkyWalking Team   ","title":"Apache SkyWalking BanyanDB release guide","url":"/docs/skywalking-banyandb/latest/release/"},{"content":"Apache SkyWalking BanyanDB release guide This documentation guides the release manager to release the SkyWalking BanyanDB in the Apache Way, and also helps people to check the release for vote.\nPrerequisites  Close(if finished, or move to next milestone otherwise) all issues in the current milestone from skywalking-banyandb and skywalking, create a new milestone if needed. Update CHANGES.md.  Add your GPG public key to Apache svn   Log in id.apache.org and submit your key fingerprint.\n  Add your GPG public key into SkyWalking GPG KEYS file, you can do this only if you are a PMC member. You can ask a PMC member for help. DO NOT override the existed KEYS file content, only append your key at the end of the file.\n  Build and sign the source code package export VERSION=\u0026lt;the version to release\u0026gt; git clone git@github.com:apache/skywalking-banyandb \u0026amp;\u0026amp; cd skywalking-banyandb git tag -a \u0026#34;v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking BanyanDB $VERSION\u0026#34; git push --tags make clean \u0026amp;\u0026amp; make release-assembly The skywalking-banyandb-${VERSION}-bin.tgz, skywalking-banyandb-${VERSION}-src.tgz, and their corresponding asc, sha512. In total, six files should be automatically generated in the directory.\nUpload to Apache svn svn co https://dist.apache.org/repos/dist/dev/skywalking/ mkdir -p skywalking/banyandb/\u0026#34;$VERSION\u0026#34; cp skywalking-banyandb/build/skywalking-banyandb*.tgz skywalking/banyandb/\u0026#34;$VERSION\u0026#34; cp skywalking-banyandb/build/skywalking-banyandb*.tgz.asc skywalking/banyandb/\u0026#34;$VERSION\u0026#34; cp skywalking-banyandb/build/skywalking-banyandb*.tgz.sha512 skywalking/banyandb/\u0026#34;$VERSION\u0026#34; cd skywalking/banyandb \u0026amp;\u0026amp; svn add \u0026#34;$VERSION\u0026#34; \u0026amp;\u0026amp; svn commit -m \u0026#34;Draft Apache SkyWalking BanyanDB release $VERSION\u0026#34; Call for vote in dev@ mailing list Call for vote in dev@skywalking.apache.org\nSubject: [VOTE] Release Apache SkyWalking BanyanDB version $VERSION Content: Hi the SkyWalking Community: This is a call for vote to release Apache SkyWalking BanyanDB version $VERSION. Release notes: * https://github.com/apache/skywalking-banyandb/blob/v$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/banyandb/$VERSION * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-banyandb-src-x.x.x.tgz - sha512xxxxyyyzzz apache-skywalking-banyandb-bin-x.x.x.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-banyandb/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-banyandb/blob/v$VERSION/docs/installation/binaries.md#Build-From-Source Voting will start now and will remain open for at least 72 hours, all PMC members are required to give their votes. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Thanks. [1] https://github.com/apache/skywalking/blob/master/docs/en/guides/How-to-release.md#vote-check Vote Check All PMC members and committers should check these before voting +1:\n Features test. All artifacts in staging repository are published with .asc, .md5, and sha files. Source codes and distribution packages (apache-skywalking-banyandb-{src,bin}-$VERSION.tgz) are in https://dist.apache.org/repos/dist/dev/skywalking/banyandb/$VERSION with .asc, .sha512. LICENSE and NOTICE are in source codes and distribution package. Check shasum -c apache-skywalking-banyandb-{src,bin}-$VERSION.tgz.sha512. Check GPG signature. Download KEYS and import them by curl https://www.apache.org/dist/skywalking/KEYS -o KEYS \u0026amp;\u0026amp; gpg --import KEYS. Check gpg --batch --verify apache-skywalking-banyandb-{src,bin}-$VERSION.tgz.asc apache-skywalking-banyandb-{src,bin}-$VERSION.tgz Build distribution from source code package by following this the build guide. Licenses header check.  Vote result should follow these:\n  PMC vote is +1 binding, all others is +1 no binding.\n  Within 72 hours, you get at least 3 (+1 binding), and have more +1 than -1. Vote pass.\n  Send the closing vote mail to announce the result. When count the binding and no binding votes, please list the names of voters. An example like this:\n[RESULT][VOTE] Release Apache SkyWalking BanyanDB version $VERSION 3 days passed, we’ve got ($NUMBER) +1 bindings: xxx xxx xxx ... (list names) I’ll continue the release process.   Publish release   Move source codes tar balls and distributions to https://dist.apache.org/repos/dist/release/skywalking/, you can do this only if you are a PMC member.\nexport SVN_EDITOR=vim svn mv https://dist.apache.org/repos/dist/dev/skywalking/banyandb/$VERSION https://dist.apache.org/repos/dist/release/skywalking/banyandb # .... # enter your apache password # ....   Remove last released tar balls from https://dist.apache.org/repos/dist/release/skywalking\n  Refer to the previous PR, update news and links on the website. There are seven files need to modify.\n  Update Github release page, follow the previous convention.\n  Send ANNOUNCE email to dev@skywalking.apache.org and announce@apache.org, the sender should use his/her Apache email account. You can get the permlink of vote thread at here.\nSubject: [ANNOUNCEMENT] Apache SkyWalking BanyanDB $VERSION Released Content: Hi the SkyWalking Community On behalf of the SkyWalking Team, I’m glad to announce that SkyWalking BanyanDB $VERSION is now released. SkyWalking BanyanDB: An observability database, aims to ingest, analyze and store Metrics, Tracing and Logging data. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. Vote Thread: $VOTE_THREAD_PERMALINK Download Links: https://skywalking.apache.org/downloads/ Release Notes : https://github.com/apache/skywalking-banyandb/blob/v$VERSION/CHANGES.md Website: https://skywalking.apache.org/ SkyWalking BanyanDB Resources: - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Documents: https://github.com/apache/skywalking-banyandb/blob/v$VERSION/README.md The Apache SkyWalking Team   ","title":"Apache SkyWalking BanyanDB release guide","url":"/docs/skywalking-banyandb/next/release/"},{"content":"Apache SkyWalking BanyanDB release guide This documentation guides the release manager to release the SkyWalking BanyanDB in the Apache Way, and also helps people to check the release for vote.\nPrerequisites  Close(if finished, or move to next milestone otherwise) all issues in the current milestone from skywalking-banyandb and skywalking, create a new milestone if needed. Update CHANGES.md.  Add your GPG public key to Apache svn   Log in id.apache.org and submit your key fingerprint.\n  Add your GPG public key into SkyWalking GPG KEYS file, you can do this only if you are a PMC member. You can ask a PMC member for help. DO NOT override the existed KEYS file content, only append your key at the end of the file.\n  Build and sign the source code package export VERSION=\u0026lt;the version to release\u0026gt; git clone git@github.com:apache/skywalking-banyandb \u0026amp;\u0026amp; cd skywalking-banyandb git tag -a \u0026#34;v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking BanyanDB $VERSION\u0026#34; git push --tags make clean \u0026amp;\u0026amp; make release-assembly The skywalking-banyandb-${VERSION}-bin.tgz, skywalking-banyandb-${VERSION}-src.tgz, and their corresponding asc, sha512. In total, six files should be automatically generated in the directory.\nUpload to Apache svn svn co https://dist.apache.org/repos/dist/dev/skywalking/ mkdir -p skywalking/banyandb/\u0026#34;$VERSION\u0026#34; cp skywalking-banyandb/build/skywalking-banyandb*.tgz skywalking/banyandb/\u0026#34;$VERSION\u0026#34; cp skywalking-banyandb/build/skywalking-banyandb*.tgz.asc skywalking/banyandb/\u0026#34;$VERSION\u0026#34; cp skywalking-banyandb/build/skywalking-banyandb*.tgz.sha512 skywalking/banyandb/\u0026#34;$VERSION\u0026#34; cd skywalking/banyandb \u0026amp;\u0026amp; svn add \u0026#34;$VERSION\u0026#34; \u0026amp;\u0026amp; svn commit -m \u0026#34;Draft Apache SkyWalking BanyanDB release $VERSION\u0026#34; Call for vote in dev@ mailing list Call for vote in dev@skywalking.apache.org\nSubject: [VOTE] Release Apache SkyWalking BanyanDB version $VERSION Content: Hi the SkyWalking Community: This is a call for vote to release Apache SkyWalking BanyanDB version $VERSION. Release notes: * https://github.com/apache/skywalking-banyandb/blob/v$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/banyandb/$VERSION * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-banyandb-src-x.x.x.tgz - sha512xxxxyyyzzz apache-skywalking-banyandb-bin-x.x.x.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-banyandb/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-banyandb/blob/v$VERSION/docs/installation.md Voting will start now and will remain open for at least 72 hours, all PMC members are required to give their votes. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Thanks. [1] https://github.com/apache/skywalking/blob/master/docs/en/guides/How-to-release.md#vote-check Vote Check All PMC members and committers should check these before voting +1:\n Features test. All artifacts in staging repository are published with .asc, .md5, and sha files. Source codes and distribution packages (apache-skywalking-banyandb-{src,bin}-$VERSION.tgz) are in https://dist.apache.org/repos/dist/dev/skywalking/banyandb/$VERSION with .asc, .sha512. LICENSE and NOTICE are in source codes and distribution package. Check shasum -c apache-skywalking-banyandb-{src,bin}-$VERSION.tgz.sha512. Check GPG signature. Download KEYS and import them by curl https://www.apache.org/dist/skywalking/KEYS -o KEYS \u0026amp;\u0026amp; gpg --import KEYS. Check gpg --batch --verify apache-skywalking-banyandb-{src,bin}-$VERSION.tgz.asc apache-skywalking-banyandb-{src,bin}-$VERSION.tgz Build distribution from source code package by following this the build guide. Licenses header check.  Vote result should follow these:\n  PMC vote is +1 binding, all others is +1 no binding.\n  Within 72 hours, you get at least 3 (+1 binding), and have more +1 than -1. Vote pass.\n  Send the closing vote mail to announce the result. When count the binding and no binding votes, please list the names of voters. An example like this:\n[RESULT][VOTE] Release Apache SkyWalking BanyanDB version $VERSION 3 days passed, we’ve got ($NUMBER) +1 bindings: xxx xxx xxx ... (list names) I’ll continue the release process.   Publish release   Move source codes tar balls and distributions to https://dist.apache.org/repos/dist/release/skywalking/, you can do this only if you are a PMC member.\nexport SVN_EDITOR=vim svn mv https://dist.apache.org/repos/dist/dev/skywalking/banyandb/$VERSION https://dist.apache.org/repos/dist/release/skywalking/banyandb # .... # enter your apache password # ....   Remove last released tar balls from https://dist.apache.org/repos/dist/release/skywalking\n  Refer to the previous PR, update news and links on the website. There are seven files need to modify.\n  Update Github release page, follow the previous convention.\n  Send ANNOUNCE email to dev@skywalking.apache.org and announce@apache.org, the sender should use his/her Apache email account. You can get the permlink of vote thread at here.\nSubject: [ANNOUNCEMENT] Apache SkyWalking BanyanDB $VERSION Released Content: Hi the SkyWalking Community On behalf of the SkyWalking Team, I’m glad to announce that SkyWalking BanyanDB $VERSION is now released. SkyWalking BanyanDB: An observability database, aims to ingest, analyze and store Metrics, Tracing and Logging data. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. Vote Thread: $VOTE_THREAD_PERMALINK Download Links: https://skywalking.apache.org/downloads/ Release Notes : https://github.com/apache/skywalking-banyandb/blob/v$VERSION/CHANGES.md Website: https://skywalking.apache.org/ SkyWalking BanyanDB Resources: - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Documents: https://github.com/apache/skywalking-banyandb/blob/v$VERSION/README.md The Apache SkyWalking Team   ","title":"Apache SkyWalking BanyanDB release guide","url":"/docs/skywalking-banyandb/v0.5.0/release/"},{"content":"Apache SkyWalking Cloud on Kubernetes release guide This documentation guides the release manager to release the SkyWalking Cloud on Kubernetes in the Apache Way, and also helps people to check the release for vote.\nPrerequisites  Close(if finished, or move to next milestone otherwise) all issues in the current milestone from skywalking-swck and skywalking, create a new milestone if needed. Update CHANGES.md. Update image tags of adapter and operator.  Add your GPG public key to Apache svn   Log in id.apache.org and submit your key fingerprint.\n  Add your GPG public key into SkyWalking GPG KEYS file, you can do this only if you are a PMC member. You can ask a PMC member for help. DO NOT override the existed KEYS file content, only append your key at the end of the file.\n  Build and sign the source code package export VERSION=\u0026lt;the version to release\u0026gt; git clone git@github.com:apache/skywalking-swck \u0026amp;\u0026amp; cd skywalking-swck git tag -a \u0026#34;v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking Cloud on Kubernetes $VERSION\u0026#34; git push --tags make clean \u0026amp;\u0026amp; make release The skywalking-swck-${VERSION}-bin.tgz, skywalking-swck-${VERSION}-src.tgz, and their corresponding asc, sha512. In total, six files should be automatically generated in the directory.\nUpload to Apache svn svn co https://dist.apache.org/repos/dist/dev/skywalking/ mkdir -p skywalking/swck/\u0026#34;$VERSION\u0026#34; cp skywalking-swck/build/release/skywalking-swck*.tgz skywalking/swck/\u0026#34;$VERSION\u0026#34; cp skywalking-swck/build/release/skywalking-swck*.tgz.asc skywalking/swck/\u0026#34;$VERSION\u0026#34; cp skywalking-swck/build/release/skywalking-swck*.tgz.sha512 skywalking/swck/\u0026#34;$VERSION\u0026#34; cd skywalking/swck \u0026amp;\u0026amp; svn add \u0026#34;$VERSION\u0026#34; \u0026amp;\u0026amp; svn commit -m \u0026#34;Draft Apache SkyWalking-SWCK release $VERSION\u0026#34; Make the internal announcement Send an announcement email to dev@ mailing list.\nSubject: [ANNOUNCEMENT] SkyWalking Cloud on Kubernetes $VERSION test build available Content: The test build of SkyWalking Cloud on Kubernetes $VERSION is now available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking-swck/blob/$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/swck/$VERSION * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-swck-bin-x.x.x.tgz - sha512xxxxyyyzzz apache-skywalking-swck-src-x.x.x.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-swck/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-swck/blob/$VERSION/docs/operator.md#build-from-sources * https://github.com/apache/skywalking-swck/blob/$VERSION/docs/custom-metrics-adapter.md#use-kustomize-to-customise-your-deployment * https://github.com/apache/skywalking-swck/blob/$VERSION/docs/release.md A vote regarding the quality of this test build will be initiated within the next couple of days. Wait at least 48 hours for test responses Any PMC, committer or contributor can test features for releasing, and feedback. Based on that, PMC will decide whether to start a vote or not.\nCall for vote in dev@ mailing list Call for vote in dev@skywalking.apache.org\nSubject: [VOTE] Release Apache SkyWalking Cloud on Kubernetes version $VERSION Content: Hi the SkyWalking Community: This is a call for vote to release Apache SkyWalking Cloud on Kubernetes version $VERSION. Release notes: * https://github.com/apache/skywalking-swck/blob/$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/swck/$VERSION * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-swck-src-x.x.x.tgz - sha512xxxxyyyzzz apache-skywalking-swck-bin-x.x.x.tgz Release Tag : * (Git Tag) $VERSION Release Commit Hash : * https://github.com/apache/skywalking-swck/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-swck/blob/$VERSION/docs/release.md Voting will start now and will remain open for at least 72 hours, all PMC members are required to give their votes. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Thanks. [1] https://github.com/apache/skywalking/blob/master/docs/en/guides/How-to-release.md#vote-check Vote Check All PMC members and committers should check these before voting +1:\n Features test. All artifacts in staging repository are published with .asc, .md5, and sha files. Source codes and distribution packages (apache-skywalking-swck-{src,bin}-$VERSION.tgz) are in https://dist.apache.org/repos/dist/dev/skywalking/swck/$VERSION with .asc, .sha512. LICENSE and NOTICE are in source codes and distribution package. Check shasum -c apache-skywalking-swck-{src,bin}-$VERSION.tgz.sha512. Check GPG signature. Download KEYS and import them by curl https://www.apache.org/dist/skywalking/KEYS -o KEYS \u0026amp;\u0026amp; gpg --import KEYS. Check gpg --batch --verify apache-skywalking-swck-{src,bin}-$VERSION.tgz.asc apache-skywalking-swck-{src,bin}-$VERSION.tgz Build distribution from source code package by following this the build guide. Licenses header check.  Vote result should follow these:\n  PMC vote is +1 binding, all others is +1 no binding.\n  Within 72 hours, you get at least 3 (+1 binding), and have more +1 than -1. Vote pass.\n  Send the closing vote mail to announce the result. When count the binding and no binding votes, please list the names of voters. An example like this:\n[RESULT][VOTE] Release Apache SkyWalking Cloud on Kubernetes version $VERSION 3 days passed, we’ve got ($NUMBER) +1 bindings: xxx xxx xxx ... (list names) I’ll continue the release process.   Publish release   Move source codes tar balls and distributions to https://dist.apache.org/repos/dist/release/skywalking/, you can do this only if you are a PMC member.\nexport SVN_EDITOR=vim svn mv https://dist.apache.org/repos/dist/dev/skywalking/swck/$VERSION https://dist.apache.org/repos/dist/release/skywalking/swck # .... # enter your apache password # ....   Remove last released tar balls from https://dist.apache.org/repos/dist/release/skywalking\n  Refer to the previous PR, update news and links on the website. There are seven files need to modify.\n  Update Github release page, follow the previous convention.\n  Send ANNOUNCE email to dev@skywalking.apache.org and announce@apache.org, the sender should use his/her Apache email account. You can get the permlink of vote thread at here.\nSubject: [ANNOUNCEMENT] Apache SkyWalking Cloud on Kubernetes $VERSION Released Content: Hi the SkyWalking Community On behalf of the SkyWalking Team, I’m glad to announce that SkyWalking Cloud on Kubernetes $VERSION is now released. SkyWalking Cloud on Kubernetes: A bridge platform between Apache SkyWalking and Kubernetes. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. Vote Thread: $VOTE_THREAD_PERMALINK Download Links: https://skywalking.apache.org/downloads/ Release Notes : https://github.com/apache/skywalking-swck/blob/$VERSION/CHANGES.md Website: https://skywalking.apache.org/ SkyWalking Cloud on Kubernetes Resources: - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Documents: https://github.com/apache/skywalking-swck/blob/$VERSION/README.md The Apache SkyWalking Team   ","title":"Apache SkyWalking Cloud on Kubernetes release guide","url":"/docs/skywalking-swck/latest/release/"},{"content":"Apache SkyWalking Cloud on Kubernetes release guide This documentation guides the release manager to release the SkyWalking Cloud on Kubernetes in the Apache Way, and also helps people to check the release for vote.\nPrerequisites  Close(if finished, or move to next milestone otherwise) all issues in the current milestone from skywalking-swck and skywalking, create a new milestone if needed. Update CHANGES.md. Update image tags of adapter and operator.  Add your GPG public key to Apache svn   Log in id.apache.org and submit your key fingerprint.\n  Add your GPG public key into SkyWalking GPG KEYS file, you can do this only if you are a PMC member. You can ask a PMC member for help. DO NOT override the existed KEYS file content, only append your key at the end of the file.\n  Build and sign the source code package export VERSION=\u0026lt;the version to release\u0026gt; git clone git@github.com:apache/skywalking-swck \u0026amp;\u0026amp; cd skywalking-swck git tag -a \u0026#34;v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking Cloud on Kubernetes $VERSION\u0026#34; git push --tags make clean \u0026amp;\u0026amp; make release The skywalking-swck-${VERSION}-bin.tgz, skywalking-swck-${VERSION}-src.tgz, and their corresponding asc, sha512. In total, six files should be automatically generated in the directory.\nUpload to Apache svn svn co https://dist.apache.org/repos/dist/dev/skywalking/ mkdir -p skywalking/swck/\u0026#34;$VERSION\u0026#34; cp skywalking-swck/build/release/skywalking-swck*.tgz skywalking/swck/\u0026#34;$VERSION\u0026#34; cp skywalking-swck/build/release/skywalking-swck*.tgz.asc skywalking/swck/\u0026#34;$VERSION\u0026#34; cp skywalking-swck/build/release/skywalking-swck*.tgz.sha512 skywalking/swck/\u0026#34;$VERSION\u0026#34; cd skywalking/swck \u0026amp;\u0026amp; svn add \u0026#34;$VERSION\u0026#34; \u0026amp;\u0026amp; svn commit -m \u0026#34;Draft Apache SkyWalking-SWCK release $VERSION\u0026#34; Make the internal announcement Send an announcement email to dev@ mailing list.\nSubject: [ANNOUNCEMENT] SkyWalking Cloud on Kubernetes $VERSION test build available Content: The test build of SkyWalking Cloud on Kubernetes $VERSION is now available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking-swck/blob/$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/swck/$VERSION * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-swck-bin-x.x.x.tgz - sha512xxxxyyyzzz apache-skywalking-swck-src-x.x.x.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-swck/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-swck/blob/$VERSION/docs/operator.md#build-from-sources * https://github.com/apache/skywalking-swck/blob/$VERSION/docs/custom-metrics-adapter.md#use-kustomize-to-customise-your-deployment * https://github.com/apache/skywalking-swck/blob/$VERSION/docs/release.md A vote regarding the quality of this test build will be initiated within the next couple of days. Wait at least 48 hours for test responses Any PMC, committer or contributor can test features for releasing, and feedback. Based on that, PMC will decide whether to start a vote or not.\nCall for vote in dev@ mailing list Call for vote in dev@skywalking.apache.org\nSubject: [VOTE] Release Apache SkyWalking Cloud on Kubernetes version $VERSION Content: Hi the SkyWalking Community: This is a call for vote to release Apache SkyWalking Cloud on Kubernetes version $VERSION. Release notes: * https://github.com/apache/skywalking-swck/blob/$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/swck/$VERSION * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-swck-src-x.x.x.tgz - sha512xxxxyyyzzz apache-skywalking-swck-bin-x.x.x.tgz Release Tag : * (Git Tag) $VERSION Release Commit Hash : * https://github.com/apache/skywalking-swck/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-swck/blob/$VERSION/docs/release.md Voting will start now and will remain open for at least 72 hours, all PMC members are required to give their votes. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Thanks. [1] https://github.com/apache/skywalking/blob/master/docs/en/guides/How-to-release.md#vote-check Vote Check All PMC members and committers should check these before voting +1:\n Features test. All artifacts in staging repository are published with .asc, .md5, and sha files. Source codes and distribution packages (apache-skywalking-swck-{src,bin}-$VERSION.tgz) are in https://dist.apache.org/repos/dist/dev/skywalking/swck/$VERSION with .asc, .sha512. LICENSE and NOTICE are in source codes and distribution package. Check shasum -c apache-skywalking-swck-{src,bin}-$VERSION.tgz.sha512. Check GPG signature. Download KEYS and import them by curl https://www.apache.org/dist/skywalking/KEYS -o KEYS \u0026amp;\u0026amp; gpg --import KEYS. Check gpg --batch --verify apache-skywalking-swck-{src,bin}-$VERSION.tgz.asc apache-skywalking-swck-{src,bin}-$VERSION.tgz Build distribution from source code package by following this the build guide. Licenses header check.  Vote result should follow these:\n  PMC vote is +1 binding, all others is +1 no binding.\n  Within 72 hours, you get at least 3 (+1 binding), and have more +1 than -1. Vote pass.\n  Send the closing vote mail to announce the result. When count the binding and no binding votes, please list the names of voters. An example like this:\n[RESULT][VOTE] Release Apache SkyWalking Cloud on Kubernetes version $VERSION 3 days passed, we’ve got ($NUMBER) +1 bindings: xxx xxx xxx ... (list names) I’ll continue the release process.   Publish release   Move source codes tar balls and distributions to https://dist.apache.org/repos/dist/release/skywalking/, you can do this only if you are a PMC member.\nexport SVN_EDITOR=vim svn mv https://dist.apache.org/repos/dist/dev/skywalking/swck/$VERSION https://dist.apache.org/repos/dist/release/skywalking/swck # .... # enter your apache password # ....   Remove last released tar balls from https://dist.apache.org/repos/dist/release/skywalking\n  Refer to the previous PR, update news and links on the website. There are seven files need to modify.\n  Update Github release page, follow the previous convention.\n  Send ANNOUNCE email to dev@skywalking.apache.org and announce@apache.org, the sender should use his/her Apache email account. You can get the permlink of vote thread at here.\nSubject: [ANNOUNCEMENT] Apache SkyWalking Cloud on Kubernetes $VERSION Released Content: Hi the SkyWalking Community On behalf of the SkyWalking Team, I’m glad to announce that SkyWalking Cloud on Kubernetes $VERSION is now released. SkyWalking Cloud on Kubernetes: A bridge platform between Apache SkyWalking and Kubernetes. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. Vote Thread: $VOTE_THREAD_PERMALINK Download Links: https://skywalking.apache.org/downloads/ Release Notes : https://github.com/apache/skywalking-swck/blob/$VERSION/CHANGES.md Website: https://skywalking.apache.org/ SkyWalking Cloud on Kubernetes Resources: - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Documents: https://github.com/apache/skywalking-swck/blob/$VERSION/README.md The Apache SkyWalking Team   ","title":"Apache SkyWalking Cloud on Kubernetes release guide","url":"/docs/skywalking-swck/next/release/"},{"content":"Apache SkyWalking Cloud on Kubernetes release guide This documentation guides the release manager to release the SkyWalking Cloud on Kubernetes in the Apache Way, and also helps people to check the release for vote.\nPrerequisites  Close(if finished, or move to next milestone otherwise) all issues in the current milestone from skywalking-swck and skywalking, create a new milestone if needed. Update CHANGES.md. Update image tags of adapter and operator.  Add your GPG public key to Apache svn   Log in id.apache.org and submit your key fingerprint.\n  Add your GPG public key into SkyWalking GPG KEYS file, you can do this only if you are a PMC member. You can ask a PMC member for help. DO NOT override the existed KEYS file content, only append your key at the end of the file.\n  Build and sign the source code package export VERSION=\u0026lt;the version to release\u0026gt; git clone git@github.com:apache/skywalking-swck \u0026amp;\u0026amp; cd skywalking-swck git tag -a \u0026#34;v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking Cloud on Kubernetes $VERSION\u0026#34; git push --tags make clean \u0026amp;\u0026amp; make release The skywalking-swck-${VERSION}-bin.tgz, skywalking-swck-${VERSION}-src.tgz, and their corresponding asc, sha512. In total, six files should be automatically generated in the directory.\nUpload to Apache svn svn co https://dist.apache.org/repos/dist/dev/skywalking/ mkdir -p skywalking/swck/\u0026#34;$VERSION\u0026#34; cp skywalking-swck/build/release/skywalking-swck*.tgz skywalking/swck/\u0026#34;$VERSION\u0026#34; cp skywalking-swck/build/release/skywalking-swck*.tgz.asc skywalking/swck/\u0026#34;$VERSION\u0026#34; cp skywalking-swck/build/release/skywalking-swck*.tgz.sha512 skywalking/swck/\u0026#34;$VERSION\u0026#34; cd skywalking/swck \u0026amp;\u0026amp; svn add \u0026#34;$VERSION\u0026#34; \u0026amp;\u0026amp; svn commit -m \u0026#34;Draft Apache SkyWalking-SWCK release $VERSION\u0026#34; Make the internal announcement Send an announcement email to dev@ mailing list.\nSubject: [ANNOUNCEMENT] SkyWalking Cloud on Kubernetes $VERSION test build available Content: The test build of SkyWalking Cloud on Kubernetes $VERSION is now available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking-swck/blob/$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/swck/$VERSION * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-swck-bin-x.x.x.tgz - sha512xxxxyyyzzz apache-skywalking-swck-src-x.x.x.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-swck/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-swck/blob/$VERSION/docs/operator.md#build-from-sources * https://github.com/apache/skywalking-swck/blob/$VERSION/docs/custom-metrics-adapter.md#use-kustomize-to-customise-your-deployment * https://github.com/apache/skywalking-swck/blob/$VERSION/docs/release.md A vote regarding the quality of this test build will be initiated within the next couple of days. Wait at least 48 hours for test responses Any PMC, committer or contributor can test features for releasing, and feedback. Based on that, PMC will decide whether to start a vote or not.\nCall for vote in dev@ mailing list Call for vote in dev@skywalking.apache.org\nSubject: [VOTE] Release Apache SkyWalking Cloud on Kubernetes version $VERSION Content: Hi the SkyWalking Community: This is a call for vote to release Apache SkyWalking Cloud on Kubernetes version $VERSION. Release notes: * https://github.com/apache/skywalking-swck/blob/$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/swck/$VERSION * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-swck-src-x.x.x.tgz - sha512xxxxyyyzzz apache-skywalking-swck-bin-x.x.x.tgz Release Tag : * (Git Tag) $VERSION Release Commit Hash : * https://github.com/apache/skywalking-swck/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-swck/blob/$VERSION/docs/release.md Voting will start now and will remain open for at least 72 hours, all PMC members are required to give their votes. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Thanks. [1] https://github.com/apache/skywalking/blob/master/docs/en/guides/How-to-release.md#vote-check Vote Check All PMC members and committers should check these before voting +1:\n Features test. All artifacts in staging repository are published with .asc, .md5, and sha files. Source codes and distribution packages (apache-skywalking-swck-{src,bin}-$VERSION.tgz) are in https://dist.apache.org/repos/dist/dev/skywalking/swck/$VERSION with .asc, .sha512. LICENSE and NOTICE are in source codes and distribution package. Check shasum -c apache-skywalking-swck-{src,bin}-$VERSION.tgz.sha512. Check GPG signature. Download KEYS and import them by curl https://www.apache.org/dist/skywalking/KEYS -o KEYS \u0026amp;\u0026amp; gpg --import KEYS. Check gpg --batch --verify apache-skywalking-swck-{src,bin}-$VERSION.tgz.asc apache-skywalking-swck-{src,bin}-$VERSION.tgz Build distribution from source code package by following this the build guide. Licenses header check.  Vote result should follow these:\n  PMC vote is +1 binding, all others is +1 no binding.\n  Within 72 hours, you get at least 3 (+1 binding), and have more +1 than -1. Vote pass.\n  Send the closing vote mail to announce the result. When count the binding and no binding votes, please list the names of voters. An example like this:\n[RESULT][VOTE] Release Apache SkyWalking Cloud on Kubernetes version $VERSION 3 days passed, we’ve got ($NUMBER) +1 bindings: xxx xxx xxx ... (list names) I’ll continue the release process.   Publish release   Move source codes tar balls and distributions to https://dist.apache.org/repos/dist/release/skywalking/, you can do this only if you are a PMC member.\nexport SVN_EDITOR=vim svn mv https://dist.apache.org/repos/dist/dev/skywalking/swck/$VERSION https://dist.apache.org/repos/dist/release/skywalking/swck # .... # enter your apache password # ....   Remove last released tar balls from https://dist.apache.org/repos/dist/release/skywalking\n  Refer to the previous PR, update news and links on the website. There are seven files need to modify.\n  Update Github release page, follow the previous convention.\n  Send ANNOUNCE email to dev@skywalking.apache.org and announce@apache.org, the sender should use his/her Apache email account. You can get the permlink of vote thread at here.\nSubject: [ANNOUNCEMENT] Apache SkyWalking Cloud on Kubernetes $VERSION Released Content: Hi the SkyWalking Community On behalf of the SkyWalking Team, I’m glad to announce that SkyWalking Cloud on Kubernetes $VERSION is now released. SkyWalking Cloud on Kubernetes: A bridge platform between Apache SkyWalking and Kubernetes. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. Vote Thread: $VOTE_THREAD_PERMALINK Download Links: https://skywalking.apache.org/downloads/ Release Notes : https://github.com/apache/skywalking-swck/blob/$VERSION/CHANGES.md Website: https://skywalking.apache.org/ SkyWalking Cloud on Kubernetes Resources: - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Documents: https://github.com/apache/skywalking-swck/blob/$VERSION/README.md The Apache SkyWalking Team   ","title":"Apache SkyWalking Cloud on Kubernetes release guide","url":"/docs/skywalking-swck/v0.9.0/release/"},{"content":"Apache SkyWalking committer SkyWalking Project Management Committee (PMC) is responsible for assessing the contributions of candidates.\nLike many Apache projects, SkyWalking welcome all contributions, including code contributions, blog entries, guides for new users, public speeches, and enhancement of the project in various ways.\nCommitter Nominate new committer In SkyWalking, new committer nomination could only be officially started by existing PMC members. If a new committer feels that he/she is qualified, he/she should contact any existing PMC member and discuss. If this is agreed among some members of the PMC, the process will kick off.\nThe following steps are recommended (to be initiated only by an existing PMC member):\n Send an email titled [DISCUSS] Promote xxx as new committer to private@skywalking.a.o. List the important contributions of the candidate, so you could gather support from other PMC members for your proposal. Keep the discussion open for more than 3 days but no more than 1 week, unless there is any express objection or concern. If the PMC generally agrees to the proposal, send an email titled [VOTE] Promote xxx as new committer to private@skywalking.a.o. Keep the voting process open for more than 3 days, but no more than 1 week. Consider the result as Consensus Approval if there are three +1 votes and +1 votes \u0026gt; -1 votes. Send an email titled [RESULT][VOTE] Promote xxx as new committer to private@skywalking.a.o, and list the voting details, including who the voters are.  Invite new committer The PMC member who starts the promotion is responsible for sending an invitation to the new committer and guiding him/her to set up the ASF env.\nThe PMC member should send an email using the following template to the new committer:\nTo: JoeBloggs@foo.net Cc: private@skywalking.apache.org Subject: Invitation to become SkyWalking committer: Joe Bloggs Hello [invitee name], The SkyWalking Project Management Committee] (PMC) hereby offers you committer privileges to the project. These privileges are offered on the understanding that you'll use them reasonably and with common sense. We like to work on trust rather than unnecessary constraints. Being a committer enables you to more easily make changes without needing to go through the patch submission process. Being a committer does not require you to participate any more than you already do. It does tend to make one even more committed. You will probably find that you spend more time here. Of course, you can decline and instead remain as a contributor, participating as you do now. A. This personal invitation is a chance for you to accept or decline in private. Either way, please let us know in reply to the [private@skywalking.apache.org] address only. B. If you accept, the next step is to register an iCLA: 1. Details of the iCLA and the forms are found through this link: http://www.apache.org/licenses/#clas 2. Instructions for its completion and return to the Secretary of the ASF are found at http://www.apache.org/licenses/#submitting 3. When you transmit the completed iCLA, request to notify the Apache SkyWalking and choose a unique Apache id. Look to see if your preferred id is already taken at http://people.apache.org/committer-index.html This will allow the Secretary to notify the PMC when your iCLA has been recorded. When recording of your iCLA is noticed, you will receive a follow-up message with the next steps for establishing you as a committer. Invitation acceptance process The new committer should reply to private@skywalking.apache.org (choose reply all), and express his/her intention to accept the invitation. Then, this invitation will be treated as accepted by the project\u0026rsquo;s PMC. Of course, the new committer may also choose to decline the invitation.\nOnce the invitation has been accepted, the new committer has to take the following steps:\n Subscribe to dev@skywalking.apache.org. Usually this is already done. Choose a Apache ID that is not on the apache committers list page. Download the ICLA (If the new committer contributes to the project as a day job, CCLA is expected). After filling in the icla.pdf (or ccla.pdf) with the correct information, print, sign it by hand, scan it as an PDF, and send it as an attachment to secretary@apache.org. (If electronic signature is preferred, please follow the steps on this page) The PMC will wait for the Apache secretary to confirm the ICLA (or CCLA) filed. The new committer and PMC will receive the following email:  Dear XXX, This message acknowledges receipt of your ICLA, which has been filed in the Apache Software Foundation records. Your account has been requested for you and you should receive email with next steps within the next few days (can take up to a week). Please refer to https://www.apache.org/foundation/how-it-works.html#developers for more information about roles at Apache. In the unlikely event that the account has not yet been requested, the PMC member should contact the project V.P.. The V.P. could request through the Apache Account Submission Helper Form.\nAfter several days, the new committer will receive an email confirming creation of the account, titled Welcome to the Apache Software Foundation (ASF)!. Congratulations! The new committer now has an official Apache ID.\nThe PMC member should add the new committer to the official committer list through roster.\nSet up the Apache ID and dev env  Go to Apache Account Utility Platform, create your password, set up your personal mailbox (Forwarding email address) and GitHub account(Your GitHub Username). An organizational invite will be sent to you via email shortly thereafter (within 2 hours). If you would like to use the xxx@apache.org email service, please refer to here. Gmail is recommended, because this forwarding mode is not easy to find in most mailbox service settings. Follow the authorized GitHub 2FA wiki to enable two-factor authorization (2FA) on Github. When you set 2FA to \u0026ldquo;off\u0026rdquo;, it will be delisted by the corresponding Apache committer write permission group until you set it up again. (NOTE: Treat your recovery codes with the same level of attention as you would your password!) Use GitBox Account Linking Utility to obtain write permission of the SkyWalking project. Follow this doc to update the website.  If you would like to show up publicly in the Apache GitHub org, you need to go to the Apache GitHub org people page, search for yourself, and choose Organization visibility to Public.\nCommitter rights, duties, and responsibilities The SkyWalking project doesn\u0026rsquo;t require continuing contributions from you after you have become a committer, but we truly hope that you will continue to play a part in our community!\nAs a committer, you could\n Review and merge the pull request to the master branch in the Apache repo. A pull request often contains multiple commits. Those commits must be squashed and merged into a single commit with explanatory comments. It is recommended for new committers to request recheck of the pull request from senior committers. Create and push codes to the new branch in the Apache repo. Follow the release process to prepare a new release. Remember to confirm with the committer team that it is the right time to create the release.  The PMC hopes that the new committer will take part in the release process as well as release voting, even though their vote will be regarded as +1 no binding. Being familiar with the release process is key to being promoted to the role of PMC member.\nProject Management Committee The Project Management Committee (PMC) member does not have any special rights in code contributions. They simply oversee the project and make sure that it follows the Apache requirements. Its functions include:\n Binding voting for releases and license checks; New committer and PMC member recognition; Identification of branding issues and brand protection; and Responding to questions raised by the ASF board, and taking necessary actions.  The V.P. and chair of the PMC is the secretary, who is responsible for initializing the board report.\nIn most cases, a new PMC member is nominated from the committer team. But it is also possible to become a PMC member directly, so long as the PMC agrees to the nomination and is confident that the candidate is ready. For instance, this can be demonstrated by the fact that he/she has been an Apache member, an Apache officer, or a PMC member of another project.\nThe new PMC voting process should also follow the [DISCUSS], [VOTE] and [RESULT][VOTE] procedures using a private mail list, just like the voting process for new committers. Before sending the invitation, the PMC must also send a NOTICE mail to the Apache board.\nTo: board@apache.org Cc: private@skywalking.apache.org Subject: [NOTICE] Jane Doe for SkyWalking PMC SkyWalking proposes to invite Jane Doe (janedoe) to join the PMC. (include if a vote was held) The vote result is available here: https://lists.apache.org/... After 72 hours, if the board doesn\u0026rsquo;t object to the nomination (which it won\u0026rsquo;t most cases), an invitation may then be sent to the candidate.\nOnce the invitation is accepted, a PMC member should add the new member to the official PMC list through roster.\n","title":"Apache SkyWalking committer","url":"/docs/main/latest/en/guides/asf/committer/"},{"content":"Apache SkyWalking committer SkyWalking Project Management Committee (PMC) is responsible for assessing the contributions of candidates.\nLike many Apache projects, SkyWalking welcome all contributions, including code contributions, blog entries, guides for new users, public speeches, and enhancement of the project in various ways.\nCommitter Nominate new committer In SkyWalking, new committer nomination could only be officially started by existing PMC members. If a new committer feels that he/she is qualified, he/she should contact any existing PMC member and discuss. If this is agreed among some members of the PMC, the process will kick off.\nThe following steps are recommended (to be initiated only by an existing PMC member):\n Send an email titled [DISCUSS] Promote xxx as new committer to private@skywalking.a.o. List the important contributions of the candidate, so you could gather support from other PMC members for your proposal. Keep the discussion open for more than 3 days but no more than 1 week, unless there is any express objection or concern. If the PMC generally agrees to the proposal, send an email titled [VOTE] Promote xxx as new committer to private@skywalking.a.o. Keep the voting process open for more than 3 days, but no more than 1 week. Consider the result as Consensus Approval if there are three +1 votes and +1 votes \u0026gt; -1 votes. Send an email titled [RESULT][VOTE] Promote xxx as new committer to private@skywalking.a.o, and list the voting details, including who the voters are.  Invite new committer The PMC member who starts the promotion is responsible for sending an invitation to the new committer and guiding him/her to set up the ASF env.\nThe PMC member should send an email using the following template to the new committer:\nTo: JoeBloggs@foo.net Cc: private@skywalking.apache.org Subject: Invitation to become SkyWalking committer: Joe Bloggs Hello [invitee name], The SkyWalking Project Management Committee] (PMC) hereby offers you committer privileges to the project. These privileges are offered on the understanding that you'll use them reasonably and with common sense. We like to work on trust rather than unnecessary constraints. Being a committer enables you to more easily make changes without needing to go through the patch submission process. Being a committer does not require you to participate any more than you already do. It does tend to make one even more committed. You will probably find that you spend more time here. Of course, you can decline and instead remain as a contributor, participating as you do now. A. This personal invitation is a chance for you to accept or decline in private. Either way, please let us know in reply to the [private@skywalking.apache.org] address only. B. If you accept, the next step is to register an iCLA: 1. Details of the iCLA and the forms are found through this link: http://www.apache.org/licenses/#clas 2. Instructions for its completion and return to the Secretary of the ASF are found at http://www.apache.org/licenses/#submitting 3. When you transmit the completed iCLA, request to notify the Apache SkyWalking and choose a unique Apache id. Look to see if your preferred id is already taken at http://people.apache.org/committer-index.html This will allow the Secretary to notify the PMC when your iCLA has been recorded. When recording of your iCLA is noticed, you will receive a follow-up message with the next steps for establishing you as a committer. Invitation acceptance process The new committer should reply to private@skywalking.apache.org (choose reply all), and express his/her intention to accept the invitation. Then, this invitation will be treated as accepted by the project\u0026rsquo;s PMC. Of course, the new committer may also choose to decline the invitation.\nOnce the invitation has been accepted, the new committer has to take the following steps:\n Subscribe to dev@skywalking.apache.org. Usually this is already done. Choose a Apache ID that is not on the apache committers list page. Download the ICLA (If the new committer contributes to the project as a day job, CCLA is expected). After filling in the icla.pdf (or ccla.pdf) with the correct information, print, sign it by hand, scan it as an PDF, and send it as an attachment to secretary@apache.org. (If electronic signature is preferred, please follow the steps on this page) The PMC will wait for the Apache secretary to confirm the ICLA (or CCLA) filed. The new committer and PMC will receive the following email:  Dear XXX, This message acknowledges receipt of your ICLA, which has been filed in the Apache Software Foundation records. Your account has been requested for you and you should receive email with next steps within the next few days (can take up to a week). Please refer to https://www.apache.org/foundation/how-it-works.html#developers for more information about roles at Apache. In the unlikely event that the account has not yet been requested, the PMC member should contact the project V.P.. The V.P. could request through the Apache Account Submission Helper Form.\nAfter several days, the new committer will receive an email confirming creation of the account, titled Welcome to the Apache Software Foundation (ASF)!. Congratulations! The new committer now has an official Apache ID.\nThe PMC member should add the new committer to the official committer list through roster.\nSet up the Apache ID and dev env  Go to Apache Account Utility Platform, create your password, set up your personal mailbox (Forwarding email address) and GitHub account(Your GitHub Username). An organizational invite will be sent to you via email shortly thereafter (within 2 hours). If you would like to use the xxx@apache.org email service, please refer to here. Gmail is recommended, because this forwarding mode is not easy to find in most mailbox service settings. Follow the authorized GitHub 2FA wiki to enable two-factor authorization (2FA) on Github. When you set 2FA to \u0026ldquo;off\u0026rdquo;, it will be delisted by the corresponding Apache committer write permission group until you set it up again. (NOTE: Treat your recovery codes with the same level of attention as you would your password!) Use GitBox Account Linking Utility to obtain write permission of the SkyWalking project. Follow this doc to update the website.  If you would like to show up publicly in the Apache GitHub org, you need to go to the Apache GitHub org people page, search for yourself, and choose Organization visibility to Public.\nCommitter rights, duties, and responsibilities The SkyWalking project doesn\u0026rsquo;t require continuing contributions from you after you have become a committer, but we truly hope that you will continue to play a part in our community!\nAs a committer, you could\n Review and merge the pull request to the master branch in the Apache repo. A pull request often contains multiple commits. Those commits must be squashed and merged into a single commit with explanatory comments. It is recommended for new committers to request recheck of the pull request from senior committers. Create and push codes to the new branch in the Apache repo. Follow the release process to prepare a new release. Remember to confirm with the committer team that it is the right time to create the release.  The PMC hopes that the new committer will take part in the release process as well as release voting, even though their vote will be regarded as +1 no binding. Being familiar with the release process is key to being promoted to the role of PMC member.\nProject Management Committee The Project Management Committee (PMC) member does not have any special rights in code contributions. They simply oversee the project and make sure that it follows the Apache requirements. Its functions include:\n Binding voting for releases and license checks; New committer and PMC member recognition; Identification of branding issues and brand protection; and Responding to questions raised by the ASF board, and taking necessary actions.  The V.P. and chair of the PMC is the secretary, who is responsible for initializing the board report.\nIn most cases, a new PMC member is nominated from the committer team. But it is also possible to become a PMC member directly, so long as the PMC agrees to the nomination and is confident that the candidate is ready. For instance, this can be demonstrated by the fact that he/she has been an Apache member, an Apache officer, or a PMC member of another project.\nThe new PMC voting process should also follow the [DISCUSS], [VOTE] and [RESULT][VOTE] procedures using a private mail list, just like the voting process for new committers. Before sending the invitation, the PMC must also send a NOTICE mail to the Apache board.\nTo: board@apache.org Cc: private@skywalking.apache.org Subject: [NOTICE] Jane Doe for SkyWalking PMC SkyWalking proposes to invite Jane Doe (janedoe) to join the PMC. (include if a vote was held) The vote result is available here: https://lists.apache.org/... After 72 hours, if the board doesn\u0026rsquo;t object to the nomination (which it won\u0026rsquo;t most cases), an invitation may then be sent to the candidate.\nOnce the invitation is accepted, a PMC member should add the new member to the official PMC list through roster.\n","title":"Apache SkyWalking committer","url":"/docs/main/next/en/guides/asf/committer/"},{"content":"Apache SkyWalking committer SkyWalking Project Management Committee (PMC) is responsible for assessing the contributions of candidates.\nLike many Apache projects, SkyWalking welcome all contributions, including code contributions, blog entries, guides for new users, public speeches, and enhancement of the project in various ways.\nCommitter Nominate new committer In SkyWalking, new committer nomination could only be officially started by existing PMC members. If a new committer feels that he/she is qualified, he/she should contact any existing PMC member and discuss. If this is agreed among some members of the PMC, the process will kick off.\nThe following steps are recommended (to be initiated only by an existing PMC member):\n Send an email titled [DISCUSS] Promote xxx as new committer to private@skywalking.a.o. List the important contributions of the candidate, so you could gather support from other PMC members for your proposal. Keep the discussion open for more than 3 days but no more than 1 week, unless there is any express objection or concern. If the PMC generally agrees to the proposal, send an email titled [VOTE] Promote xxx as new committer to private@skywalking.a.o. Keep the voting process open for more than 3 days, but no more than 1 week. Consider the result as Consensus Approval if there are three +1 votes and +1 votes \u0026gt; -1 votes. Send an email titled [RESULT][VOTE] Promote xxx as new committer to private@skywalking.a.o, and list the voting details, including who the voters are.  Invite new committer The PMC member who starts the promotion is responsible for sending an invitation to the new committer and guiding him/her to set up the ASF env.\nThe PMC member should send an email using the following template to the new committer:\nTo: JoeBloggs@foo.net Cc: private@skywalking.apache.org Subject: Invitation to become SkyWalking committer: Joe Bloggs Hello [invitee name], The SkyWalking Project Management Committee] (PMC) hereby offers you committer privileges to the project. These privileges are offered on the understanding that you'll use them reasonably and with common sense. We like to work on trust rather than unnecessary constraints. Being a committer enables you to more easily make changes without needing to go through the patch submission process. Being a committer does not require you to participate any more than you already do. It does tend to make one even more committed. You will probably find that you spend more time here. Of course, you can decline and instead remain as a contributor, participating as you do now. A. This personal invitation is a chance for you to accept or decline in private. Either way, please let us know in reply to the [private@skywalking.apache.org] address only. B. If you accept, the next step is to register an iCLA: 1. Details of the iCLA and the forms are found through this link: http://www.apache.org/licenses/#clas 2. Instructions for its completion and return to the Secretary of the ASF are found at http://www.apache.org/licenses/#submitting 3. When you transmit the completed iCLA, request to notify the Apache SkyWalking and choose a unique Apache id. Look to see if your preferred id is already taken at http://people.apache.org/committer-index.html This will allow the Secretary to notify the PMC when your iCLA has been recorded. When recording of your iCLA is noticed, you will receive a follow-up message with the next steps for establishing you as a committer. Invitation acceptance process The new committer should reply to private@skywalking.apache.org (choose reply all), and express his/her intention to accept the invitation. Then, this invitation will be treated as accepted by the project\u0026rsquo;s PMC. Of course, the new committer may also choose to decline the invitation.\nOnce the invitation has been accepted, the new committer has to take the following steps:\n Subscribe to dev@skywalking.apache.org. Usually this is already done. Choose a Apache ID that is not on the apache committers list page. Download the ICLA (If the new committer contributes to the project as a day job, CCLA is expected). After filling in the icla.pdf (or ccla.pdf) with the correct information, print, sign it by hand, scan it as an PDF, and send it as an attachment to secretary@apache.org. (If electronic signature is preferred, please follow the steps on this page) The PMC will wait for the Apache secretary to confirm the ICLA (or CCLA) filed. The new committer and PMC will receive the following email:  Dear XXX, This message acknowledges receipt of your ICLA, which has been filed in the Apache Software Foundation records. Your account has been requested for you and you should receive email with next steps within the next few days (can take up to a week). Please refer to https://www.apache.org/foundation/how-it-works.html#developers for more information about roles at Apache. In the unlikely event that the account has not yet been requested, the PMC member should contact the project V.P.. The V.P. could request through the Apache Account Submission Helper Form.\nAfter several days, the new committer will receive an email confirming creation of the account, titled Welcome to the Apache Software Foundation (ASF)!. Congratulations! The new committer now has an official Apache ID.\nThe PMC member should add the new committer to the official committer list through roster.\nSet up the Apache ID and dev env  Go to Apache Account Utility Platform, create your password, set up your personal mailbox (Forwarding email address) and GitHub account(Your GitHub Username). An organizational invite will be sent to you via email shortly thereafter (within 2 hours). If you would like to use the xxx@apache.org email service, please refer to here. Gmail is recommended, because this forwarding mode is not easy to find in most mailbox service settings. Follow the authorized GitHub 2FA wiki to enable two-factor authorization (2FA) on Github. When you set 2FA to \u0026ldquo;off\u0026rdquo;, it will be delisted by the corresponding Apache committer write permission group until you set it up again. (NOTE: Treat your recovery codes with the same level of attention as you would your password!) Use GitBox Account Linking Utility to obtain write permission of the SkyWalking project. Follow this doc to update the website.  If you would like to show up publicly in the Apache GitHub org, you need to go to the Apache GitHub org people page, search for yourself, and choose Organization visibility to Public.\nCommitter rights, duties, and responsibilities The SkyWalking project doesn\u0026rsquo;t require continuing contributions from you after you have become a committer, but we truly hope that you will continue to play a part in our community!\nAs a committer, you could\n Review and merge the pull request to the master branch in the Apache repo. A pull request often contains multiple commits. Those commits must be squashed and merged into a single commit with explanatory comments. It is recommended for new committers to request recheck of the pull request from senior committers. Create and push codes to the new branch in the Apache repo. Follow the release process to prepare a new release. Remember to confirm with the committer team that it is the right time to create the release.  The PMC hopes that the new committer will take part in the release process as well as release voting, even though their vote will be regarded as +1 no binding. Being familiar with the release process is key to being promoted to the role of PMC member.\nProject Management Committee The Project Management Committee (PMC) member does not have any special rights in code contributions. They simply oversee the project and make sure that it follows the Apache requirements. Its functions include:\n Binding voting for releases and license checks; New committer and PMC member recognition; Identification of branding issues and brand protection; and Responding to questions raised by the ASF board, and taking necessary actions.  The V.P. and chair of the PMC is the secretary, who is responsible for initializing the board report.\nIn most cases, a new PMC member is nominated from the committer team. But it is also possible to become a PMC member directly, so long as the PMC agrees to the nomination and is confident that the candidate is ready. For instance, this can be demonstrated by the fact that he/she has been an Apache member, an Apache officer, or a PMC member of another project.\nThe new PMC voting process should also follow the [DISCUSS], [VOTE] and [RESULT][VOTE] procedures using a private mail list, just like the voting process for new committers. Before sending the invitation, the PMC must also send a NOTICE mail to the Apache board.\nTo: board@apache.org Cc: private@skywalking.apache.org Subject: [NOTICE] Jane Doe for SkyWalking PMC SkyWalking proposes to invite Jane Doe (janedoe) to join the PMC. (include if a vote was held) The vote result is available here: https://lists.apache.org/... After 72 hours, if the board doesn\u0026rsquo;t object to the nomination (which it won\u0026rsquo;t most cases), an invitation may then be sent to the candidate.\nOnce the invitation is accepted, a PMC member should add the new member to the official PMC list through roster.\n","title":"Apache SkyWalking committer","url":"/docs/main/v9.0.0/en/guides/asf/committer/"},{"content":"Apache SkyWalking committer SkyWalking Project Management Committee (PMC) is responsible for assessing the contributions of candidates.\nLike many Apache projects, SkyWalking welcome all contributions, including code contributions, blog entries, guides for new users, public speeches, and enhancement of the project in various ways.\nCommitter Nominate new committer In SkyWalking, new committer nomination could only be officially started by existing PMC members. If a new committer feels that he/she is qualified, he/she should contact any existing PMC member and discuss. If this is agreed among some members of the PMC, the process will kick off.\nThe following steps are recommended (to be initiated only by an existing PMC member):\n Send an email titled [DISCUSS] Promote xxx as new committer to private@skywalking.a.o. List the important contributions of the candidate, so you could gather support from other PMC members for your proposal. Keep the discussion open for more than 3 days but no more than 1 week, unless there is any express objection or concern. If the PMC generally agrees to the proposal, send an email titled [VOTE] Promote xxx as new committer to private@skywalking.a.o. Keep the voting process open for more than 3 days, but no more than 1 week. Consider the result as Consensus Approval if there are three +1 votes and +1 votes \u0026gt; -1 votes. Send an email titled [RESULT][VOTE] Promote xxx as new committer to private@skywalking.a.o, and list the voting details, including who the voters are.  Invite new committer The PMC member who starts the promotion is responsible for sending an invitation to the new committer and guiding him/her to set up the ASF env.\nThe PMC member should send an email using the following template to the new committer:\nTo: JoeBloggs@foo.net Cc: private@skywalking.apache.org Subject: Invitation to become SkyWalking committer: Joe Bloggs Hello [invitee name], The SkyWalking Project Management Committee] (PMC) hereby offers you committer privileges to the project. These privileges are offered on the understanding that you'll use them reasonably and with common sense. We like to work on trust rather than unnecessary constraints. Being a committer enables you to more easily make changes without needing to go through the patch submission process. Being a committer does not require you to participate any more than you already do. It does tend to make one even more committed. You will probably find that you spend more time here. Of course, you can decline and instead remain as a contributor, participating as you do now. A. This personal invitation is a chance for you to accept or decline in private. Either way, please let us know in reply to the [private@skywalking.apache.org] address only. B. If you accept, the next step is to register an iCLA: 1. Details of the iCLA and the forms are found through this link: http://www.apache.org/licenses/#clas 2. Instructions for its completion and return to the Secretary of the ASF are found at http://www.apache.org/licenses/#submitting 3. When you transmit the completed iCLA, request to notify the Apache SkyWalking and choose a unique Apache id. Look to see if your preferred id is already taken at http://people.apache.org/committer-index.html This will allow the Secretary to notify the PMC when your iCLA has been recorded. When recording of your iCLA is noticed, you will receive a follow-up message with the next steps for establishing you as a committer. Invitation acceptance process The new committer should reply to private@skywalking.apache.org (choose reply all), and express his/her intention to accept the invitation. Then, this invitation will be treated as accepted by the project\u0026rsquo;s PMC. Of course, the new committer may also choose to decline the invitation.\nOnce the invitation has been accepted, the new committer has to take the following steps:\n Subscribe to dev@skywalking.apache.org. Usually this is already done. Choose a Apache ID that is not on the apache committers list page. Download the ICLA (If the new committer contributes to the project as a day job, CCLA is expected). After filling in the icla.pdf (or ccla.pdf) with the correct information, print, sign it by hand, scan it as an PDF, and send it as an attachment to secretary@apache.org. (If electronic signature is preferred, please follow the steps on this page) The PMC will wait for the Apache secretary to confirm the ICLA (or CCLA) filed. The new committer and PMC will receive the following email:  Dear XXX, This message acknowledges receipt of your ICLA, which has been filed in the Apache Software Foundation records. Your account has been requested for you and you should receive email with next steps within the next few days (can take up to a week). Please refer to https://www.apache.org/foundation/how-it-works.html#developers for more information about roles at Apache. In the unlikely event that the account has not yet been requested, the PMC member should contact the project V.P.. The V.P. could request through the Apache Account Submission Helper Form.\nAfter several days, the new committer will receive an email confirming creation of the account, titled Welcome to the Apache Software Foundation (ASF)!. Congratulations! The new committer now has an official Apache ID.\nThe PMC member should add the new committer to the official committer list through roster.\nSet up the Apache ID and dev env  Go to Apache Account Utility Platform, create your password, set up your personal mailbox (Forwarding email address) and GitHub account(Your GitHub Username). An organizational invite will be sent to you via email shortly thereafter (within 2 hours). If you would like to use the xxx@apache.org email service, please refer to here. Gmail is recommended, because this forwarding mode is not easy to find in most mailbox service settings. Follow the authorized GitHub 2FA wiki to enable two-factor authorization (2FA) on Github. When you set 2FA to \u0026ldquo;off\u0026rdquo;, it will be delisted by the corresponding Apache committer write permission group until you set it up again. (NOTE: Treat your recovery codes with the same level of attention as you would your password!) Use GitBox Account Linking Utility to obtain write permission of the SkyWalking project. Follow this doc to update the website.  If you would like to show up publicly in the Apache GitHub org, you need to go to the Apache GitHub org people page, search for yourself, and choose Organization visibility to Public.\nCommitter rights, duties, and responsibilities The SkyWalking project doesn\u0026rsquo;t require continuing contributions from you after you have become a committer, but we truly hope that you will continue to play a part in our community!\nAs a committer, you could\n Review and merge the pull request to the master branch in the Apache repo. A pull request often contains multiple commits. Those commits must be squashed and merged into a single commit with explanatory comments. It is recommended for new committers to request recheck of the pull request from senior committers. Create and push codes to the new branch in the Apache repo. Follow the release process to prepare a new release. Remember to confirm with the committer team that it is the right time to create the release.  The PMC hopes that the new committer will take part in the release process as well as release voting, even though their vote will be regarded as +1 no binding. Being familiar with the release process is key to being promoted to the role of PMC member.\nProject Management Committee The Project Management Committee (PMC) member does not have any special rights in code contributions. They simply oversee the project and make sure that it follows the Apache requirements. Its functions include:\n Binding voting for releases and license checks; New committer and PMC member recognition; Identification of branding issues and brand protection; and Responding to questions raised by the ASF board, and taking necessary actions.  The V.P. and chair of the PMC is the secretary, who is responsible for initializing the board report.\nIn most cases, a new PMC member is nominated from the committer team. But it is also possible to become a PMC member directly, so long as the PMC agrees to the nomination and is confident that the candidate is ready. For instance, this can be demonstrated by the fact that he/she has been an Apache member, an Apache officer, or a PMC member of another project.\nThe new PMC voting process should also follow the [DISCUSS], [VOTE] and [RESULT][VOTE] procedures using a private mail list, just like the voting process for new committers. Before sending the invitation, the PMC must also send a NOTICE mail to the Apache board.\nTo: board@apache.org Cc: private@skywalking.apache.org Subject: [NOTICE] Jane Doe for SkyWalking PMC SkyWalking proposes to invite Jane Doe (janedoe) to join the PMC. (include if a vote was held) The vote result is available here: https://lists.apache.org/... After 72 hours, if the board doesn\u0026rsquo;t object to the nomination (which it won\u0026rsquo;t most cases), an invitation may then be sent to the candidate.\nOnce the invitation is accepted, a PMC member should add the new member to the official PMC list through roster.\n","title":"Apache SkyWalking committer","url":"/docs/main/v9.1.0/en/guides/asf/committer/"},{"content":"Apache SkyWalking committer SkyWalking Project Management Committee (PMC) is responsible for assessing the contributions of candidates.\nLike many Apache projects, SkyWalking welcome all contributions, including code contributions, blog entries, guides for new users, public speeches, and enhancement of the project in various ways.\nCommitter Nominate new committer In SkyWalking, new committer nomination could only be officially started by existing PMC members. If a new committer feels that he/she is qualified, he/she should contact any existing PMC member and discuss. If this is agreed among some members of the PMC, the process will kick off.\nThe following steps are recommended (to be initiated only by an existing PMC member):\n Send an email titled [DISCUSS] Promote xxx as new committer to private@skywalking.a.o. List the important contributions of the candidate, so you could gather support from other PMC members for your proposal. Keep the discussion open for more than 3 days but no more than 1 week, unless there is any express objection or concern. If the PMC generally agrees to the proposal, send an email titled [VOTE] Promote xxx as new committer to private@skywalking.a.o. Keep the voting process open for more than 3 days, but no more than 1 week. Consider the result as Consensus Approval if there are three +1 votes and +1 votes \u0026gt; -1 votes. Send an email titled [RESULT][VOTE] Promote xxx as new committer to private@skywalking.a.o, and list the voting details, including who the voters are.  Invite new committer The PMC member who starts the promotion is responsible for sending an invitation to the new committer and guiding him/her to set up the ASF env.\nThe PMC member should send an email using the following template to the new committer:\nTo: JoeBloggs@foo.net Cc: private@skywalking.apache.org Subject: Invitation to become SkyWalking committer: Joe Bloggs Hello [invitee name], The SkyWalking Project Management Committee] (PMC) hereby offers you committer privileges to the project. These privileges are offered on the understanding that you'll use them reasonably and with common sense. We like to work on trust rather than unnecessary constraints. Being a committer enables you to more easily make changes without needing to go through the patch submission process. Being a committer does not require you to participate any more than you already do. It does tend to make one even more committed. You will probably find that you spend more time here. Of course, you can decline and instead remain as a contributor, participating as you do now. A. This personal invitation is a chance for you to accept or decline in private. Either way, please let us know in reply to the [private@skywalking.apache.org] address only. B. If you accept, the next step is to register an iCLA: 1. Details of the iCLA and the forms are found through this link: http://www.apache.org/licenses/#clas 2. Instructions for its completion and return to the Secretary of the ASF are found at http://www.apache.org/licenses/#submitting 3. When you transmit the completed iCLA, request to notify the Apache SkyWalking and choose a unique Apache id. Look to see if your preferred id is already taken at http://people.apache.org/committer-index.html This will allow the Secretary to notify the PMC when your iCLA has been recorded. When recording of your iCLA is noticed, you will receive a follow-up message with the next steps for establishing you as a committer. Invitation acceptance process The new committer should reply to private@skywalking.apache.org (choose reply all), and express his/her intention to accept the invitation. Then, this invitation will be treated as accepted by the project\u0026rsquo;s PMC. Of course, the new committer may also choose to decline the invitation.\nOnce the invitation has been accepted, the new committer has to take the following steps:\n Subscribe to dev@skywalking.apache.org. Usually this is already done. Choose a Apache ID that is not on the apache committers list page. Download the ICLA (If the new committer contributes to the project as a day job, CCLA is expected). After filling in the icla.pdf (or ccla.pdf) with the correct information, print, sign it by hand, scan it as an PDF, and send it as an attachment to secretary@apache.org. (If electronic signature is preferred, please follow the steps on this page) The PMC will wait for the Apache secretary to confirm the ICLA (or CCLA) filed. The new committer and PMC will receive the following email:  Dear XXX, This message acknowledges receipt of your ICLA, which has been filed in the Apache Software Foundation records. Your account has been requested for you and you should receive email with next steps within the next few days (can take up to a week). Please refer to https://www.apache.org/foundation/how-it-works.html#developers for more information about roles at Apache. In the unlikely event that the account has not yet been requested, the PMC member should contact the project V.P.. The V.P. could request through the Apache Account Submission Helper Form.\nAfter several days, the new committer will receive an email confirming creation of the account, titled Welcome to the Apache Software Foundation (ASF)!. Congratulations! The new committer now has an official Apache ID.\nThe PMC member should add the new committer to the official committer list through roster.\nSet up the Apache ID and dev env  Go to Apache Account Utility Platform, create your password, set up your personal mailbox (Forwarding email address) and GitHub account(Your GitHub Username). An organizational invite will be sent to you via email shortly thereafter (within 2 hours). If you would like to use the xxx@apache.org email service, please refer to here. Gmail is recommended, because this forwarding mode is not easy to find in most mailbox service settings. Follow the authorized GitHub 2FA wiki to enable two-factor authorization (2FA) on Github. When you set 2FA to \u0026ldquo;off\u0026rdquo;, it will be delisted by the corresponding Apache committer write permission group until you set it up again. (NOTE: Treat your recovery codes with the same level of attention as you would your password!) Use GitBox Account Linking Utility to obtain write permission of the SkyWalking project. Follow this doc to update the website.  If you would like to show up publicly in the Apache GitHub org, you need to go to the Apache GitHub org people page, search for yourself, and choose Organization visibility to Public.\nCommitter rights, duties, and responsibilities The SkyWalking project doesn\u0026rsquo;t require continuing contributions from you after you have become a committer, but we truly hope that you will continue to play a part in our community!\nAs a committer, you could\n Review and merge the pull request to the master branch in the Apache repo. A pull request often contains multiple commits. Those commits must be squashed and merged into a single commit with explanatory comments. It is recommended for new committers to request recheck of the pull request from senior committers. Create and push codes to the new branch in the Apache repo. Follow the release process to prepare a new release. Remember to confirm with the committer team that it is the right time to create the release.  The PMC hopes that the new committer will take part in the release process as well as release voting, even though their vote will be regarded as +1 no binding. Being familiar with the release process is key to being promoted to the role of PMC member.\nProject Management Committee The Project Management Committee (PMC) member does not have any special rights in code contributions. They simply oversee the project and make sure that it follows the Apache requirements. Its functions include:\n Binding voting for releases and license checks; New committer and PMC member recognition; Identification of branding issues and brand protection; and Responding to questions raised by the ASF board, and taking necessary actions.  The V.P. and chair of the PMC is the secretary, who is responsible for initializing the board report.\nIn most cases, a new PMC member is nominated from the committer team. But it is also possible to become a PMC member directly, so long as the PMC agrees to the nomination and is confident that the candidate is ready. For instance, this can be demonstrated by the fact that he/she has been an Apache member, an Apache officer, or a PMC member of another project.\nThe new PMC voting process should also follow the [DISCUSS], [VOTE] and [RESULT][VOTE] procedures using a private mail list, just like the voting process for new committers. Before sending the invitation, the PMC must also send a NOTICE mail to the Apache board.\nTo: board@apache.org Cc: private@skywalking.apache.org Subject: [NOTICE] Jane Doe for SkyWalking PMC SkyWalking proposes to invite Jane Doe (janedoe) to join the PMC. (include if a vote was held) The vote result is available here: https://lists.apache.org/... After 72 hours, if the board doesn\u0026rsquo;t object to the nomination (which it won\u0026rsquo;t most cases), an invitation may then be sent to the candidate.\nOnce the invitation is accepted, a PMC member should add the new member to the official PMC list through roster.\n","title":"Apache SkyWalking committer","url":"/docs/main/v9.2.0/en/guides/asf/committer/"},{"content":"Apache SkyWalking committer SkyWalking Project Management Committee (PMC) is responsible for assessing the contributions of candidates.\nLike many Apache projects, SkyWalking welcome all contributions, including code contributions, blog entries, guides for new users, public speeches, and enhancement of the project in various ways.\nCommitter Nominate new committer In SkyWalking, new committer nomination could only be officially started by existing PMC members. If a new committer feels that he/she is qualified, he/she should contact any existing PMC member and discuss. If this is agreed among some members of the PMC, the process will kick off.\nThe following steps are recommended (to be initiated only by an existing PMC member):\n Send an email titled [DISCUSS] Promote xxx as new committer to private@skywalking.a.o. List the important contributions of the candidate, so you could gather support from other PMC members for your proposal. Keep the discussion open for more than 3 days but no more than 1 week, unless there is any express objection or concern. If the PMC generally agrees to the proposal, send an email titled [VOTE] Promote xxx as new committer to private@skywalking.a.o. Keep the voting process open for more than 3 days, but no more than 1 week. Consider the result as Consensus Approval if there are three +1 votes and +1 votes \u0026gt; -1 votes. Send an email titled [RESULT][VOTE] Promote xxx as new committer to private@skywalking.a.o, and list the voting details, including who the voters are.  Invite new committer The PMC member who starts the promotion is responsible for sending an invitation to the new committer and guiding him/her to set up the ASF env.\nThe PMC member should send an email using the following template to the new committer:\nTo: JoeBloggs@foo.net Cc: private@skywalking.apache.org Subject: Invitation to become SkyWalking committer: Joe Bloggs Hello [invitee name], The SkyWalking Project Management Committee] (PMC) hereby offers you committer privileges to the project. These privileges are offered on the understanding that you'll use them reasonably and with common sense. We like to work on trust rather than unnecessary constraints. Being a committer enables you to more easily make changes without needing to go through the patch submission process. Being a committer does not require you to participate any more than you already do. It does tend to make one even more committed. You will probably find that you spend more time here. Of course, you can decline and instead remain as a contributor, participating as you do now. A. This personal invitation is a chance for you to accept or decline in private. Either way, please let us know in reply to the [private@skywalking.apache.org] address only. B. If you accept, the next step is to register an iCLA: 1. Details of the iCLA and the forms are found through this link: http://www.apache.org/licenses/#clas 2. Instructions for its completion and return to the Secretary of the ASF are found at http://www.apache.org/licenses/#submitting 3. When you transmit the completed iCLA, request to notify the Apache SkyWalking and choose a unique Apache id. Look to see if your preferred id is already taken at http://people.apache.org/committer-index.html This will allow the Secretary to notify the PMC when your iCLA has been recorded. When recording of your iCLA is noticed, you will receive a follow-up message with the next steps for establishing you as a committer. Invitation acceptance process The new committer should reply to private@skywalking.apache.org (choose reply all), and express his/her intention to accept the invitation. Then, this invitation will be treated as accepted by the project\u0026rsquo;s PMC. Of course, the new committer may also choose to decline the invitation.\nOnce the invitation has been accepted, the new committer has to take the following steps:\n Subscribe to dev@skywalking.apache.org. Usually this is already done. Choose a Apache ID that is not on the apache committers list page. Download the ICLA (If the new committer contributes to the project as a day job, CCLA is expected). After filling in the icla.pdf (or ccla.pdf) with the correct information, print, sign it by hand, scan it as an PDF, and send it as an attachment to secretary@apache.org. (If electronic signature is preferred, please follow the steps on this page) The PMC will wait for the Apache secretary to confirm the ICLA (or CCLA) filed. The new committer and PMC will receive the following email:  Dear XXX, This message acknowledges receipt of your ICLA, which has been filed in the Apache Software Foundation records. Your account has been requested for you and you should receive email with next steps within the next few days (can take up to a week). Please refer to https://www.apache.org/foundation/how-it-works.html#developers for more information about roles at Apache. In the unlikely event that the account has not yet been requested, the PMC member should contact the project V.P.. The V.P. could request through the Apache Account Submission Helper Form.\nAfter several days, the new committer will receive an email confirming creation of the account, titled Welcome to the Apache Software Foundation (ASF)!. Congratulations! The new committer now has an official Apache ID.\nThe PMC member should add the new committer to the official committer list through roster.\nSet up the Apache ID and dev env  Go to Apache Account Utility Platform, create your password, set up your personal mailbox (Forwarding email address) and GitHub account(Your GitHub Username). An organizational invite will be sent to you via email shortly thereafter (within 2 hours). If you would like to use the xxx@apache.org email service, please refer to here. Gmail is recommended, because this forwarding mode is not easy to find in most mailbox service settings. Follow the authorized GitHub 2FA wiki to enable two-factor authorization (2FA) on Github. When you set 2FA to \u0026ldquo;off\u0026rdquo;, it will be delisted by the corresponding Apache committer write permission group until you set it up again. (NOTE: Treat your recovery codes with the same level of attention as you would your password!) Use GitBox Account Linking Utility to obtain write permission of the SkyWalking project. Follow this doc to update the website.  If you would like to show up publicly in the Apache GitHub org, you need to go to the Apache GitHub org people page, search for yourself, and choose Organization visibility to Public.\nCommitter rights, duties, and responsibilities The SkyWalking project doesn\u0026rsquo;t require continuing contributions from you after you have become a committer, but we truly hope that you will continue to play a part in our community!\nAs a committer, you could\n Review and merge the pull request to the master branch in the Apache repo. A pull request often contains multiple commits. Those commits must be squashed and merged into a single commit with explanatory comments. It is recommended for new committers to request recheck of the pull request from senior committers. Create and push codes to the new branch in the Apache repo. Follow the release process to prepare a new release. Remember to confirm with the committer team that it is the right time to create the release.  The PMC hopes that the new committer will take part in the release process as well as release voting, even though their vote will be regarded as +1 no binding. Being familiar with the release process is key to being promoted to the role of PMC member.\nProject Management Committee The Project Management Committee (PMC) member does not have any special rights in code contributions. They simply oversee the project and make sure that it follows the Apache requirements. Its functions include:\n Binding voting for releases and license checks; New committer and PMC member recognition; Identification of branding issues and brand protection; and Responding to questions raised by the ASF board, and taking necessary actions.  The V.P. and chair of the PMC is the secretary, who is responsible for initializing the board report.\nIn most cases, a new PMC member is nominated from the committer team. But it is also possible to become a PMC member directly, so long as the PMC agrees to the nomination and is confident that the candidate is ready. For instance, this can be demonstrated by the fact that he/she has been an Apache member, an Apache officer, or a PMC member of another project.\nThe new PMC voting process should also follow the [DISCUSS], [VOTE] and [RESULT][VOTE] procedures using a private mail list, just like the voting process for new committers. Before sending the invitation, the PMC must also send a NOTICE mail to the Apache board.\nTo: board@apache.org Cc: private@skywalking.apache.org Subject: [NOTICE] Jane Doe for SkyWalking PMC SkyWalking proposes to invite Jane Doe (janedoe) to join the PMC. (include if a vote was held) The vote result is available here: https://lists.apache.org/... After 72 hours, if the board doesn\u0026rsquo;t object to the nomination (which it won\u0026rsquo;t most cases), an invitation may then be sent to the candidate.\nOnce the invitation is accepted, a PMC member should add the new member to the official PMC list through roster.\n","title":"Apache SkyWalking committer","url":"/docs/main/v9.3.0/en/guides/asf/committer/"},{"content":"Apache SkyWalking committer SkyWalking Project Management Committee (PMC) is responsible for assessing the contributions of candidates.\nLike many Apache projects, SkyWalking welcome all contributions, including code contributions, blog entries, guides for new users, public speeches, and enhancement of the project in various ways.\nCommitter Nominate new committer In SkyWalking, new committer nomination could only be officially started by existing PMC members. If a new committer feels that he/she is qualified, he/she should contact any existing PMC member and discuss. If this is agreed among some members of the PMC, the process will kick off.\nThe following steps are recommended (to be initiated only by an existing PMC member):\n Send an email titled [DISCUSS] Promote xxx as new committer to private@skywalking.a.o. List the important contributions of the candidate, so you could gather support from other PMC members for your proposal. Keep the discussion open for more than 3 days but no more than 1 week, unless there is any express objection or concern. If the PMC generally agrees to the proposal, send an email titled [VOTE] Promote xxx as new committer to private@skywalking.a.o. Keep the voting process open for more than 3 days, but no more than 1 week. Consider the result as Consensus Approval if there are three +1 votes and +1 votes \u0026gt; -1 votes. Send an email titled [RESULT][VOTE] Promote xxx as new committer to private@skywalking.a.o, and list the voting details, including who the voters are.  Invite new committer The PMC member who starts the promotion is responsible for sending an invitation to the new committer and guiding him/her to set up the ASF env.\nThe PMC member should send an email using the following template to the new committer:\nTo: JoeBloggs@foo.net Cc: private@skywalking.apache.org Subject: Invitation to become SkyWalking committer: Joe Bloggs Hello [invitee name], The SkyWalking Project Management Committee] (PMC) hereby offers you committer privileges to the project. These privileges are offered on the understanding that you'll use them reasonably and with common sense. We like to work on trust rather than unnecessary constraints. Being a committer enables you to more easily make changes without needing to go through the patch submission process. Being a committer does not require you to participate any more than you already do. It does tend to make one even more committed. You will probably find that you spend more time here. Of course, you can decline and instead remain as a contributor, participating as you do now. A. This personal invitation is a chance for you to accept or decline in private. Either way, please let us know in reply to the [private@skywalking.apache.org] address only. B. If you accept, the next step is to register an iCLA: 1. Details of the iCLA and the forms are found through this link: http://www.apache.org/licenses/#clas 2. Instructions for its completion and return to the Secretary of the ASF are found at http://www.apache.org/licenses/#submitting 3. When you transmit the completed iCLA, request to notify the Apache SkyWalking and choose a unique Apache id. Look to see if your preferred id is already taken at http://people.apache.org/committer-index.html This will allow the Secretary to notify the PMC when your iCLA has been recorded. When recording of your iCLA is noticed, you will receive a follow-up message with the next steps for establishing you as a committer. Invitation acceptance process The new committer should reply to private@skywalking.apache.org (choose reply all), and express his/her intention to accept the invitation. Then, this invitation will be treated as accepted by the project\u0026rsquo;s PMC. Of course, the new committer may also choose to decline the invitation.\nOnce the invitation has been accepted, the new committer has to take the following steps:\n Subscribe to dev@skywalking.apache.org. Usually this is already done. Choose a Apache ID that is not on the apache committers list page. Download the ICLA (If the new committer contributes to the project as a day job, CCLA is expected). After filling in the icla.pdf (or ccla.pdf) with the correct information, print, sign it by hand, scan it as an PDF, and send it as an attachment to secretary@apache.org. (If electronic signature is preferred, please follow the steps on this page) The PMC will wait for the Apache secretary to confirm the ICLA (or CCLA) filed. The new committer and PMC will receive the following email:  Dear XXX, This message acknowledges receipt of your ICLA, which has been filed in the Apache Software Foundation records. Your account has been requested for you and you should receive email with next steps within the next few days (can take up to a week). Please refer to https://www.apache.org/foundation/how-it-works.html#developers for more information about roles at Apache. In the unlikely event that the account has not yet been requested, the PMC member should contact the project V.P.. The V.P. could request through the Apache Account Submission Helper Form.\nAfter several days, the new committer will receive an email confirming creation of the account, titled Welcome to the Apache Software Foundation (ASF)!. Congratulations! The new committer now has an official Apache ID.\nThe PMC member should add the new committer to the official committer list through roster.\nSet up the Apache ID and dev env  Go to Apache Account Utility Platform, create your password, set up your personal mailbox (Forwarding email address) and GitHub account(Your GitHub Username). An organizational invite will be sent to you via email shortly thereafter (within 2 hours). If you would like to use the xxx@apache.org email service, please refer to here. Gmail is recommended, because this forwarding mode is not easy to find in most mailbox service settings. Follow the authorized GitHub 2FA wiki to enable two-factor authorization (2FA) on Github. When you set 2FA to \u0026ldquo;off\u0026rdquo;, it will be delisted by the corresponding Apache committer write permission group until you set it up again. (NOTE: Treat your recovery codes with the same level of attention as you would your password!) Use GitBox Account Linking Utility to obtain write permission of the SkyWalking project. Follow this doc to update the website.  If you would like to show up publicly in the Apache GitHub org, you need to go to the Apache GitHub org people page, search for yourself, and choose Organization visibility to Public.\nCommitter rights, duties, and responsibilities The SkyWalking project doesn\u0026rsquo;t require continuing contributions from you after you have become a committer, but we truly hope that you will continue to play a part in our community!\nAs a committer, you could\n Review and merge the pull request to the master branch in the Apache repo. A pull request often contains multiple commits. Those commits must be squashed and merged into a single commit with explanatory comments. It is recommended for new committers to request recheck of the pull request from senior committers. Create and push codes to the new branch in the Apache repo. Follow the release process to prepare a new release. Remember to confirm with the committer team that it is the right time to create the release.  The PMC hopes that the new committer will take part in the release process as well as release voting, even though their vote will be regarded as +1 no binding. Being familiar with the release process is key to being promoted to the role of PMC member.\nProject Management Committee The Project Management Committee (PMC) member does not have any special rights in code contributions. They simply oversee the project and make sure that it follows the Apache requirements. Its functions include:\n Binding voting for releases and license checks; New committer and PMC member recognition; Identification of branding issues and brand protection; and Responding to questions raised by the ASF board, and taking necessary actions.  The V.P. and chair of the PMC is the secretary, who is responsible for initializing the board report.\nIn most cases, a new PMC member is nominated from the committer team. But it is also possible to become a PMC member directly, so long as the PMC agrees to the nomination and is confident that the candidate is ready. For instance, this can be demonstrated by the fact that he/she has been an Apache member, an Apache officer, or a PMC member of another project.\nThe new PMC voting process should also follow the [DISCUSS], [VOTE] and [RESULT][VOTE] procedures using a private mail list, just like the voting process for new committers. Before sending the invitation, the PMC must also send a NOTICE mail to the Apache board.\nTo: board@apache.org Cc: private@skywalking.apache.org Subject: [NOTICE] Jane Doe for SkyWalking PMC SkyWalking proposes to invite Jane Doe (janedoe) to join the PMC. (include if a vote was held) The vote result is available here: https://lists.apache.org/... After 72 hours, if the board doesn\u0026rsquo;t object to the nomination (which it won\u0026rsquo;t most cases), an invitation may then be sent to the candidate.\nOnce the invitation is accepted, a PMC member should add the new member to the official PMC list through roster.\n","title":"Apache SkyWalking committer","url":"/docs/main/v9.4.0/en/guides/asf/committer/"},{"content":"Apache SkyWalking committer SkyWalking Project Management Committee (PMC) is responsible for assessing the contributions of candidates.\nLike many Apache projects, SkyWalking welcome all contributions, including code contributions, blog entries, guides for new users, public speeches, and enhancement of the project in various ways.\nCommitter Nominate new committer In SkyWalking, new committer nomination could only be officially started by existing PMC members. If a new committer feels that he/she is qualified, he/she should contact any existing PMC member and discuss. If this is agreed among some members of the PMC, the process will kick off.\nThe following steps are recommended (to be initiated only by an existing PMC member):\n Send an email titled [DISCUSS] Promote xxx as new committer to private@skywalking.a.o. List the important contributions of the candidate, so you could gather support from other PMC members for your proposal. Keep the discussion open for more than 3 days but no more than 1 week, unless there is any express objection or concern. If the PMC generally agrees to the proposal, send an email titled [VOTE] Promote xxx as new committer to private@skywalking.a.o. Keep the voting process open for more than 3 days, but no more than 1 week. Consider the result as Consensus Approval if there are three +1 votes and +1 votes \u0026gt; -1 votes. Send an email titled [RESULT][VOTE] Promote xxx as new committer to private@skywalking.a.o, and list the voting details, including who the voters are.  Invite new committer The PMC member who starts the promotion is responsible for sending an invitation to the new committer and guiding him/her to set up the ASF env.\nThe PMC member should send an email using the following template to the new committer:\nTo: JoeBloggs@foo.net Cc: private@skywalking.apache.org Subject: Invitation to become SkyWalking committer: Joe Bloggs Hello [invitee name], The SkyWalking Project Management Committee] (PMC) hereby offers you committer privileges to the project. These privileges are offered on the understanding that you'll use them reasonably and with common sense. We like to work on trust rather than unnecessary constraints. Being a committer enables you to more easily make changes without needing to go through the patch submission process. Being a committer does not require you to participate any more than you already do. It does tend to make one even more committed. You will probably find that you spend more time here. Of course, you can decline and instead remain as a contributor, participating as you do now. A. This personal invitation is a chance for you to accept or decline in private. Either way, please let us know in reply to the [private@skywalking.apache.org] address only. B. If you accept, the next step is to register an iCLA: 1. Details of the iCLA and the forms are found through this link: http://www.apache.org/licenses/#clas 2. Instructions for its completion and return to the Secretary of the ASF are found at http://www.apache.org/licenses/#submitting 3. When you transmit the completed iCLA, request to notify the Apache SkyWalking and choose a unique Apache id. Look to see if your preferred id is already taken at http://people.apache.org/committer-index.html This will allow the Secretary to notify the PMC when your iCLA has been recorded. When recording of your iCLA is noticed, you will receive a follow-up message with the next steps for establishing you as a committer. Invitation acceptance process The new committer should reply to private@skywalking.apache.org (choose reply all), and express his/her intention to accept the invitation. Then, this invitation will be treated as accepted by the project\u0026rsquo;s PMC. Of course, the new committer may also choose to decline the invitation.\nOnce the invitation has been accepted, the new committer has to take the following steps:\n Subscribe to dev@skywalking.apache.org. Usually this is already done. Choose a Apache ID that is not on the apache committers list page. Download the ICLA (If the new committer contributes to the project as a day job, CCLA is expected). After filling in the icla.pdf (or ccla.pdf) with the correct information, print, sign it by hand, scan it as an PDF, and send it as an attachment to secretary@apache.org. (If electronic signature is preferred, please follow the steps on this page) The PMC will wait for the Apache secretary to confirm the ICLA (or CCLA) filed. The new committer and PMC will receive the following email:  Dear XXX, This message acknowledges receipt of your ICLA, which has been filed in the Apache Software Foundation records. Your account has been requested for you and you should receive email with next steps within the next few days (can take up to a week). Please refer to https://www.apache.org/foundation/how-it-works.html#developers for more information about roles at Apache. In the unlikely event that the account has not yet been requested, the PMC member should contact the project V.P.. The V.P. could request through the Apache Account Submission Helper Form.\nAfter several days, the new committer will receive an email confirming creation of the account, titled Welcome to the Apache Software Foundation (ASF)!. Congratulations! The new committer now has an official Apache ID.\nThe PMC member should add the new committer to the official committer list through roster.\nSet up the Apache ID and dev env  Go to Apache Account Utility Platform, create your password, set up your personal mailbox (Forwarding email address) and GitHub account(Your GitHub Username). An organizational invite will be sent to you via email shortly thereafter (within 2 hours). If you would like to use the xxx@apache.org email service, please refer to here. Gmail is recommended, because this forwarding mode is not easy to find in most mailbox service settings. Follow the authorized GitHub 2FA wiki to enable two-factor authorization (2FA) on Github. When you set 2FA to \u0026ldquo;off\u0026rdquo;, it will be delisted by the corresponding Apache committer write permission group until you set it up again. (NOTE: Treat your recovery codes with the same level of attention as you would your password!) Use GitBox Account Linking Utility to obtain write permission of the SkyWalking project. Follow this doc to update the website.  If you would like to show up publicly in the Apache GitHub org, you need to go to the Apache GitHub org people page, search for yourself, and choose Organization visibility to Public.\nCommitter rights, duties, and responsibilities The SkyWalking project doesn\u0026rsquo;t require continuing contributions from you after you have become a committer, but we truly hope that you will continue to play a part in our community!\nAs a committer, you could\n Review and merge the pull request to the master branch in the Apache repo. A pull request often contains multiple commits. Those commits must be squashed and merged into a single commit with explanatory comments. It is recommended for new committers to request recheck of the pull request from senior committers. Create and push codes to the new branch in the Apache repo. Follow the release process to prepare a new release. Remember to confirm with the committer team that it is the right time to create the release.  The PMC hopes that the new committer will take part in the release process as well as release voting, even though their vote will be regarded as +1 no binding. Being familiar with the release process is key to being promoted to the role of PMC member.\nProject Management Committee The Project Management Committee (PMC) member does not have any special rights in code contributions. They simply oversee the project and make sure that it follows the Apache requirements. Its functions include:\n Binding voting for releases and license checks; New committer and PMC member recognition; Identification of branding issues and brand protection; and Responding to questions raised by the ASF board, and taking necessary actions.  The V.P. and chair of the PMC is the secretary, who is responsible for initializing the board report.\nIn most cases, a new PMC member is nominated from the committer team. But it is also possible to become a PMC member directly, so long as the PMC agrees to the nomination and is confident that the candidate is ready. For instance, this can be demonstrated by the fact that he/she has been an Apache member, an Apache officer, or a PMC member of another project.\nThe new PMC voting process should also follow the [DISCUSS], [VOTE] and [RESULT][VOTE] procedures using a private mail list, just like the voting process for new committers. Before sending the invitation, the PMC must also send a NOTICE mail to the Apache board.\nTo: board@apache.org Cc: private@skywalking.apache.org Subject: [NOTICE] Jane Doe for SkyWalking PMC SkyWalking proposes to invite Jane Doe (janedoe) to join the PMC. (include if a vote was held) The vote result is available here: https://lists.apache.org/... After 72 hours, if the board doesn\u0026rsquo;t object to the nomination (which it won\u0026rsquo;t most cases), an invitation may then be sent to the candidate.\nOnce the invitation is accepted, a PMC member should add the new member to the official PMC list through roster.\n","title":"Apache SkyWalking committer","url":"/docs/main/v9.5.0/en/guides/asf/committer/"},{"content":"Apache SkyWalking committer SkyWalking Project Management Committee (PMC) is responsible for assessing the contributions of candidates.\nLike many Apache projects, SkyWalking welcome all contributions, including code contributions, blog entries, guides for new users, public speeches, and enhancement of the project in various ways.\nCommitter Nominate new committer In SkyWalking, new committer nomination could only be officially started by existing PMC members. If a new committer feels that he/she is qualified, he/she should contact any existing PMC member and discuss. If this is agreed among some members of the PMC, the process will kick off.\nThe following steps are recommended (to be initiated only by an existing PMC member):\n Send an email titled [DISCUSS] Promote xxx as new committer to private@skywalking.a.o. List the important contributions of the candidate, so you could gather support from other PMC members for your proposal. Keep the discussion open for more than 3 days but no more than 1 week, unless there is any express objection or concern. If the PMC generally agrees to the proposal, send an email titled [VOTE] Promote xxx as new committer to private@skywalking.a.o. Keep the voting process open for more than 3 days, but no more than 1 week. Consider the result as Consensus Approval if there are three +1 votes and +1 votes \u0026gt; -1 votes. Send an email titled [RESULT][VOTE] Promote xxx as new committer to private@skywalking.a.o, and list the voting details, including who the voters are.  Invite new committer The PMC member who starts the promotion is responsible for sending an invitation to the new committer and guiding him/her to set up the ASF env.\nThe PMC member should send an email using the following template to the new committer:\nTo: JoeBloggs@foo.net Cc: private@skywalking.apache.org Subject: Invitation to become SkyWalking committer: Joe Bloggs Hello [invitee name], The SkyWalking Project Management Committee] (PMC) hereby offers you committer privileges to the project. These privileges are offered on the understanding that you'll use them reasonably and with common sense. We like to work on trust rather than unnecessary constraints. Being a committer enables you to more easily make changes without needing to go through the patch submission process. Being a committer does not require you to participate any more than you already do. It does tend to make one even more committed. You will probably find that you spend more time here. Of course, you can decline and instead remain as a contributor, participating as you do now. A. This personal invitation is a chance for you to accept or decline in private. Either way, please let us know in reply to the [private@skywalking.apache.org] address only. B. If you accept, the next step is to register an iCLA: 1. Details of the iCLA and the forms are found through this link: http://www.apache.org/licenses/#clas 2. Instructions for its completion and return to the Secretary of the ASF are found at http://www.apache.org/licenses/#submitting 3. When you transmit the completed iCLA, request to notify the Apache SkyWalking and choose a unique Apache id. Look to see if your preferred id is already taken at http://people.apache.org/committer-index.html This will allow the Secretary to notify the PMC when your iCLA has been recorded. When recording of your iCLA is noticed, you will receive a follow-up message with the next steps for establishing you as a committer. Invitation acceptance process The new committer should reply to private@skywalking.apache.org (choose reply all), and express his/her intention to accept the invitation. Then, this invitation will be treated as accepted by the project\u0026rsquo;s PMC. Of course, the new committer may also choose to decline the invitation.\nOnce the invitation has been accepted, the new committer has to take the following steps:\n Subscribe to dev@skywalking.apache.org. Usually this is already done. Choose a Apache ID that is not on the apache committers list page. Download the ICLA (If the new committer contributes to the project as a day job, CCLA is expected). After filling in the icla.pdf (or ccla.pdf) with the correct information, print, sign it by hand, scan it as an PDF, and send it as an attachment to secretary@apache.org. (If electronic signature is preferred, please follow the steps on this page) The PMC will wait for the Apache secretary to confirm the ICLA (or CCLA) filed. The new committer and PMC will receive the following email:  Dear XXX, This message acknowledges receipt of your ICLA, which has been filed in the Apache Software Foundation records. Your account has been requested for you and you should receive email with next steps within the next few days (can take up to a week). Please refer to https://www.apache.org/foundation/how-it-works.html#developers for more information about roles at Apache. In the unlikely event that the account has not yet been requested, the PMC member should contact the project V.P.. The V.P. could request through the Apache Account Submission Helper Form.\nAfter several days, the new committer will receive an email confirming creation of the account, titled Welcome to the Apache Software Foundation (ASF)!. Congratulations! The new committer now has an official Apache ID.\nThe PMC member should add the new committer to the official committer list through roster.\nSet up the Apache ID and dev env  Go to Apache Account Utility Platform, create your password, set up your personal mailbox (Forwarding email address) and GitHub account(Your GitHub Username). An organizational invite will be sent to you via email shortly thereafter (within 2 hours). If you would like to use the xxx@apache.org email service, please refer to here. Gmail is recommended, because this forwarding mode is not easy to find in most mailbox service settings. Follow the authorized GitHub 2FA wiki to enable two-factor authorization (2FA) on Github. When you set 2FA to \u0026ldquo;off\u0026rdquo;, it will be delisted by the corresponding Apache committer write permission group until you set it up again. (NOTE: Treat your recovery codes with the same level of attention as you would your password!) Use GitBox Account Linking Utility to obtain write permission of the SkyWalking project. Follow this doc to update the website.  If you would like to show up publicly in the Apache GitHub org, you need to go to the Apache GitHub org people page, search for yourself, and choose Organization visibility to Public.\nCommitter rights, duties, and responsibilities The SkyWalking project doesn\u0026rsquo;t require continuing contributions from you after you have become a committer, but we truly hope that you will continue to play a part in our community!\nAs a committer, you could\n Review and merge the pull request to the master branch in the Apache repo. A pull request often contains multiple commits. Those commits must be squashed and merged into a single commit with explanatory comments. It is recommended for new committers to request recheck of the pull request from senior committers. Create and push codes to the new branch in the Apache repo. Follow the release process to prepare a new release. Remember to confirm with the committer team that it is the right time to create the release.  The PMC hopes that the new committer will take part in the release process as well as release voting, even though their vote will be regarded as +1 no binding. Being familiar with the release process is key to being promoted to the role of PMC member.\nProject Management Committee The Project Management Committee (PMC) member does not have any special rights in code contributions. They simply oversee the project and make sure that it follows the Apache requirements. Its functions include:\n Binding voting for releases and license checks; New committer and PMC member recognition; Identification of branding issues and brand protection; and Responding to questions raised by the ASF board, and taking necessary actions.  The V.P. and chair of the PMC is the secretary, who is responsible for initializing the board report.\nIn most cases, a new PMC member is nominated from the committer team. But it is also possible to become a PMC member directly, so long as the PMC agrees to the nomination and is confident that the candidate is ready. For instance, this can be demonstrated by the fact that he/she has been an Apache member, an Apache officer, or a PMC member of another project.\nThe new PMC voting process should also follow the [DISCUSS], [VOTE] and [RESULT][VOTE] procedures using a private mail list, just like the voting process for new committers. Before sending the invitation, the PMC must also send a NOTICE mail to the Apache board.\nTo: board@apache.org Cc: private@skywalking.apache.org Subject: [NOTICE] Jane Doe for SkyWalking PMC SkyWalking proposes to invite Jane Doe (janedoe) to join the PMC. (include if a vote was held) The vote result is available here: https://lists.apache.org/... After 72 hours, if the board doesn\u0026rsquo;t object to the nomination (which it won\u0026rsquo;t most cases), an invitation may then be sent to the candidate.\nOnce the invitation is accepted, a PMC member should add the new member to the official PMC list through roster.\n","title":"Apache SkyWalking committer","url":"/docs/main/v9.6.0/en/guides/asf/committer/"},{"content":"Apache SkyWalking committer SkyWalking Project Management Committee (PMC) is responsible for assessing the contributions of candidates.\nLike many Apache projects, SkyWalking welcome all contributions, including code contributions, blog entries, guides for new users, public speeches, and enhancement of the project in various ways.\nCommitter Nominate new committer In SkyWalking, new committer nomination could only be officially started by existing PMC members. If a new committer feels that he/she is qualified, he/she should contact any existing PMC member and discuss. If this is agreed among some members of the PMC, the process will kick off.\nThe following steps are recommended (to be initiated only by an existing PMC member):\n Send an email titled [DISCUSS] Promote xxx as new committer to private@skywalking.a.o. List the important contributions of the candidate, so you could gather support from other PMC members for your proposal. Keep the discussion open for more than 3 days but no more than 1 week, unless there is any express objection or concern. If the PMC generally agrees to the proposal, send an email titled [VOTE] Promote xxx as new committer to private@skywalking.a.o. Keep the voting process open for more than 3 days, but no more than 1 week. Consider the result as Consensus Approval if there are three +1 votes and +1 votes \u0026gt; -1 votes. Send an email titled [RESULT][VOTE] Promote xxx as new committer to private@skywalking.a.o, and list the voting details, including who the voters are.  Invite new committer The PMC member who starts the promotion is responsible for sending an invitation to the new committer and guiding him/her to set up the ASF env.\nThe PMC member should send an email using the following template to the new committer:\nTo: JoeBloggs@foo.net Cc: private@skywalking.apache.org Subject: Invitation to become SkyWalking committer: Joe Bloggs Hello [invitee name], The SkyWalking Project Management Committee] (PMC) hereby offers you committer privileges to the project. These privileges are offered on the understanding that you'll use them reasonably and with common sense. We like to work on trust rather than unnecessary constraints. Being a committer enables you to more easily make changes without needing to go through the patch submission process. Being a committer does not require you to participate any more than you already do. It does tend to make one even more committed. You will probably find that you spend more time here. Of course, you can decline and instead remain as a contributor, participating as you do now. A. This personal invitation is a chance for you to accept or decline in private. Either way, please let us know in reply to the [private@skywalking.apache.org] address only. B. If you accept, the next step is to register an iCLA: 1. Details of the iCLA and the forms are found through this link: http://www.apache.org/licenses/#clas 2. Instructions for its completion and return to the Secretary of the ASF are found at http://www.apache.org/licenses/#submitting 3. When you transmit the completed iCLA, request to notify the Apache SkyWalking and choose a unique Apache id. Look to see if your preferred id is already taken at http://people.apache.org/committer-index.html This will allow the Secretary to notify the PMC when your iCLA has been recorded. When recording of your iCLA is noticed, you will receive a follow-up message with the next steps for establishing you as a committer. Invitation acceptance process The new committer should reply to private@skywalking.apache.org (choose reply all), and express his/her intention to accept the invitation. Then, this invitation will be treated as accepted by the project\u0026rsquo;s PMC. Of course, the new committer may also choose to decline the invitation.\nOnce the invitation has been accepted, the new committer has to take the following steps:\n Subscribe to dev@skywalking.apache.org. Usually this is already done. Choose a Apache ID that is not on the apache committers list page. Download the ICLA (If the new committer contributes to the project as a day job, CCLA is expected). After filling in the icla.pdf (or ccla.pdf) with the correct information, print, sign it by hand, scan it as an PDF, and send it as an attachment to secretary@apache.org. (If electronic signature is preferred, please follow the steps on this page) The PMC will wait for the Apache secretary to confirm the ICLA (or CCLA) filed. The new committer and PMC will receive the following email:  Dear XXX, This message acknowledges receipt of your ICLA, which has been filed in the Apache Software Foundation records. Your account has been requested for you and you should receive email with next steps within the next few days (can take up to a week). Please refer to https://www.apache.org/foundation/how-it-works.html#developers for more information about roles at Apache. In the unlikely event that the account has not yet been requested, the PMC member should contact the project V.P.. The V.P. could request through the Apache Account Submission Helper Form.\nAfter several days, the new committer will receive an email confirming creation of the account, titled Welcome to the Apache Software Foundation (ASF)!. Congratulations! The new committer now has an official Apache ID.\nThe PMC member should add the new committer to the official committer list through roster.\nSet up the Apache ID and dev env  Go to Apache Account Utility Platform, create your password, set up your personal mailbox (Forwarding email address) and GitHub account(Your GitHub Username). An organizational invite will be sent to you via email shortly thereafter (within 2 hours). If you would like to use the xxx@apache.org email service, please refer to here. Gmail is recommended, because this forwarding mode is not easy to find in most mailbox service settings. Follow the authorized GitHub 2FA wiki to enable two-factor authorization (2FA) on Github. When you set 2FA to \u0026ldquo;off\u0026rdquo;, it will be delisted by the corresponding Apache committer write permission group until you set it up again. (NOTE: Treat your recovery codes with the same level of attention as you would your password!) Use GitBox Account Linking Utility to obtain write permission of the SkyWalking project. Follow this doc to update the website.  If you would like to show up publicly in the Apache GitHub org, you need to go to the Apache GitHub org people page, search for yourself, and choose Organization visibility to Public.\nCommitter rights, duties, and responsibilities The SkyWalking project doesn\u0026rsquo;t require continuing contributions from you after you have become a committer, but we truly hope that you will continue to play a part in our community!\nAs a committer, you could\n Review and merge the pull request to the master branch in the Apache repo. A pull request often contains multiple commits. Those commits must be squashed and merged into a single commit with explanatory comments. It is recommended for new committers to request recheck of the pull request from senior committers. Create and push codes to the new branch in the Apache repo. Follow the release process to prepare a new release. Remember to confirm with the committer team that it is the right time to create the release.  The PMC hopes that the new committer will take part in the release process as well as release voting, even though their vote will be regarded as +1 no binding. Being familiar with the release process is key to being promoted to the role of PMC member.\nProject Management Committee The Project Management Committee (PMC) member does not have any special rights in code contributions. They simply oversee the project and make sure that it follows the Apache requirements. Its functions include:\n Binding voting for releases and license checks; New committer and PMC member recognition; Identification of branding issues and brand protection; and Responding to questions raised by the ASF board, and taking necessary actions.  The V.P. and chair of the PMC is the secretary, who is responsible for initializing the board report.\nIn most cases, a new PMC member is nominated from the committer team. But it is also possible to become a PMC member directly, so long as the PMC agrees to the nomination and is confident that the candidate is ready. For instance, this can be demonstrated by the fact that he/she has been an Apache member, an Apache officer, or a PMC member of another project.\nThe new PMC voting process should also follow the [DISCUSS], [VOTE] and [RESULT][VOTE] procedures using a private mail list, just like the voting process for new committers. Before sending the invitation, the PMC must also send a NOTICE mail to the Apache board.\nTo: board@apache.org Cc: private@skywalking.apache.org Subject: [NOTICE] Jane Doe for SkyWalking PMC SkyWalking proposes to invite Jane Doe (janedoe) to join the PMC. (include if a vote was held) The vote result is available here: https://lists.apache.org/... After 72 hours, if the board doesn\u0026rsquo;t object to the nomination (which it won\u0026rsquo;t most cases), an invitation may then be sent to the candidate.\nOnce the invitation is accepted, a PMC member should add the new member to the official PMC list through roster.\n","title":"Apache SkyWalking committer","url":"/docs/main/v9.7.0/en/guides/asf/committer/"},{"content":"Apache SkyWalking Go Release Guide This documentation guides the release manager to release the SkyWalking Go in the Apache Way, and also helps people to check the release for vote.\nPrerequisites  Close(if finished, or move to next milestone otherwise) all issues in the current milestone from skywalking-go and skywalking, create a new milestone if needed. Update CHANGES.md. Check the dependency licenses including all dependencies.  Add your GPG public key to Apache svn   Upload your GPG public key to a public GPG site, such as MIT\u0026rsquo;s site.\n  Log in id.apache.org and submit your key fingerprint.\n  Add your GPG public key into SkyWalking GPG KEYS file, you can do this only if you are a PMC member. You can ask a PMC member for help. DO NOT override the existed KEYS file content, only append your key at the end of the file.\n  Build and sign the source code package export VERSION=\u0026lt;the version to release\u0026gt; git clone git@github.com:apache/skywalking-go \u0026amp;\u0026amp; cd skywalking-go git tag -a \u0026#34;v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking-Go v$VERSION\u0026#34; git tag -a \u0026#34;toolkit/v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking-Go Toolkit v$VERSION\u0026#34; git push --tags make release In total, six files should be automatically generated in the directory: apache-skywalking-go-${VERSION}-bin.tgz, apache-skywalking-go-${VERSION}-src.tgz, and their corresponding asc, sha512 files.\nUpload to Apache svn svn co https://dist.apache.org/repos/dist/dev/skywalking/ mkdir -p skywalking/go/\u0026#34;$VERSION\u0026#34; cp skywalking-go/apache-skywalking*.tgz skywalking/go/\u0026#34;$VERSION\u0026#34; cp skywalking-go/apache-skywalking*.tgz.asc skywalking/go/\u0026#34;$VERSION\u0026#34; cp skywalking-go/apache-skywalking*.tgz.sha512 skywalking/go/\u0026#34;$VERSION\u0026#34; cd skywalking/go \u0026amp;\u0026amp; svn add \u0026#34;$VERSION\u0026#34; \u0026amp;\u0026amp; svn commit -m \u0026#34;Draft Apache SkyWalking-Go release $VERSION\u0026#34; Call for vote in dev@ mailing list Call for vote in dev@skywalking.apache.org, please check all links before sending the email.\nSubject: [VOTE] Release Apache SkyWalking Go version $VERSION Content: Hi the SkyWalking Community: This is a call for vote to release Apache SkyWalking Go version $VERSION. Release notes: * https://github.com/apache/skywalking-go/blob/v$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/go/$VERSION * sha512 checksums - sha512xxxxyyyzzz skywalking-go-x.x.x-src.tgz - sha512xxxxyyyzzz skywalking-go-x.x.x-bin.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-go/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-go/blob/v$VERSION/docs/en/development-and-contribution/how-to-release.md Voting will start now and will remain open for at least 72 hours, all PMC members are required to give their votes. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Thanks. [1] https://github.com/apache/skywalking/blob/master/docs/en/guides/How-to-release.md#vote-check Vote Check All PMC members and committers should check these before voting +1:\n Features test. All artifacts in staging repository are published with .asc, .md5, and sha files. Source codes and distribution packages (skywalking-go-$VERSION-{src,bin}.tgz) are in https://dist.apache.org/repos/dist/dev/skywalking/go/$VERSION with .asc, .sha512. LICENSE and NOTICE are in source codes and distribution package. Check shasum -c skywalking-go-$VERSION-{src,bin}.tgz.sha512. Check gpg --verify skywalking-go-$VERSION-{src,bin}.tgz.asc skywalking-go-$VERSION-{src,bin}.tgz. Build distribution from source code package by following this command, make build.  Vote result should follow these:\n  PMC vote is +1 binding, all others is +1 no binding.\n  Within 72 hours, you get at least 3 (+1 binding), and have more +1 than -1. Vote pass.\n  Send the closing vote mail to announce the result. When count the binding and no binding votes, please list the names of voters. An example like this:\n[RESULT][VOTE] Release Apache SkyWalking Go version $VERSION 3 days passed, we’ve got ($NUMBER) +1 bindings (and ... +1 non-bindings): (list names) +1 bindings: xxx ... +1 non-bindings: xxx ... Thank you for voting, I’ll continue the release process.   Publish release   Move source codes tar balls and distributions to https://dist.apache.org/repos/dist/release/skywalking/, you can do this only if you are a PMC member.\nexport SVN_EDITOR=vim svn mv https://dist.apache.org/repos/dist/dev/skywalking/go/$VERSION https://dist.apache.org/repos/dist/release/skywalking/go   Refer to the previous PR, update the event and download links on the website.\n  Update Github release page, follow the previous convention.\n  Send ANNOUNCE email to dev@skywalking.apache.org and announce@apache.org, the sender should use his/her Apache email account, please check all links before sending the email.\nSubject: [ANNOUNCEMENT] Apache SkyWalking Go $VERSION Released Content: Hi the SkyWalking Community On behalf of the SkyWalking Team, I’m glad to announce that SkyWalking Go $VERSION is now released. SkyWalking Go: The Golang auto-instrument Agent for Apache SkyWalking, which provides the native tracing/metrics/logging abilities for Golang projects. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. Download Links: http://skywalking.apache.org/downloads/ Release Notes : https://github.com/apache/skywalking-go/blob/v$VERSION/CHANGES.md Website: http://skywalking.apache.org/ SkyWalking Go Resources: - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalking.apache.org - Documents: https://github.com/apache/skywalking-go/blob/v$VERSION/README.md The Apache SkyWalking Team   Remove Unnecessary Releases Please remember to remove all unnecessary releases in the mirror svn (https://dist.apache.org/repos/dist/release/skywalking/), if you don\u0026rsquo;t recommend users to choose those version. For example, you have removed the download and documentation links from the website. If they want old ones, the Archive repository has all of them.\n","title":"Apache SkyWalking Go Release Guide","url":"/docs/skywalking-go/latest/en/development-and-contribution/how-to-release/"},{"content":"Apache SkyWalking Go Release Guide This documentation guides the release manager to release the SkyWalking Go in the Apache Way, and also helps people to check the release for vote.\nPrerequisites  Close(if finished, or move to next milestone otherwise) all issues in the current milestone from skywalking-go and skywalking, create a new milestone if needed. Update CHANGES.md. Check the dependency licenses including all dependencies.  Add your GPG public key to Apache svn   Upload your GPG public key to a public GPG site, such as MIT\u0026rsquo;s site.\n  Log in id.apache.org and submit your key fingerprint.\n  Add your GPG public key into SkyWalking GPG KEYS file, you can do this only if you are a PMC member. You can ask a PMC member for help. DO NOT override the existed KEYS file content, only append your key at the end of the file.\n  Build and sign the source code package export VERSION=\u0026lt;the version to release\u0026gt; git clone git@github.com:apache/skywalking-go \u0026amp;\u0026amp; cd skywalking-go git tag -a \u0026#34;v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking-Go v$VERSION\u0026#34; git tag -a \u0026#34;toolkit/v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking-Go Toolkit v$VERSION\u0026#34; git push --tags make release In total, six files should be automatically generated in the directory: apache-skywalking-go-${VERSION}-bin.tgz, apache-skywalking-go-${VERSION}-src.tgz, and their corresponding asc, sha512 files.\nUpload to Apache svn svn co https://dist.apache.org/repos/dist/dev/skywalking/ mkdir -p skywalking/go/\u0026#34;$VERSION\u0026#34; cp skywalking-go/apache-skywalking*.tgz skywalking/go/\u0026#34;$VERSION\u0026#34; cp skywalking-go/apache-skywalking*.tgz.asc skywalking/go/\u0026#34;$VERSION\u0026#34; cp skywalking-go/apache-skywalking*.tgz.sha512 skywalking/go/\u0026#34;$VERSION\u0026#34; cd skywalking/go \u0026amp;\u0026amp; svn add \u0026#34;$VERSION\u0026#34; \u0026amp;\u0026amp; svn commit -m \u0026#34;Draft Apache SkyWalking-Go release $VERSION\u0026#34; Call for vote in dev@ mailing list Call for vote in dev@skywalking.apache.org, please check all links before sending the email.\nSubject: [VOTE] Release Apache SkyWalking Go version $VERSION Content: Hi the SkyWalking Community: This is a call for vote to release Apache SkyWalking Go version $VERSION. Release notes: * https://github.com/apache/skywalking-go/blob/v$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/go/$VERSION * sha512 checksums - sha512xxxxyyyzzz skywalking-go-x.x.x-src.tgz - sha512xxxxyyyzzz skywalking-go-x.x.x-bin.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-go/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-go/blob/v$VERSION/docs/en/development-and-contribution/how-to-release.md Voting will start now and will remain open for at least 72 hours, all PMC members are required to give their votes. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Thanks. [1] https://github.com/apache/skywalking/blob/master/docs/en/guides/How-to-release.md#vote-check Vote Check All PMC members and committers should check these before voting +1:\n Features test. All artifacts in staging repository are published with .asc, .md5, and sha files. Source codes and distribution packages (skywalking-go-$VERSION-{src,bin}.tgz) are in https://dist.apache.org/repos/dist/dev/skywalking/go/$VERSION with .asc, .sha512. LICENSE and NOTICE are in source codes and distribution package. Check shasum -c skywalking-go-$VERSION-{src,bin}.tgz.sha512. Check gpg --verify skywalking-go-$VERSION-{src,bin}.tgz.asc skywalking-go-$VERSION-{src,bin}.tgz. Build distribution from source code package by following this command, make build.  Vote result should follow these:\n  PMC vote is +1 binding, all others is +1 no binding.\n  Within 72 hours, you get at least 3 (+1 binding), and have more +1 than -1. Vote pass.\n  Send the closing vote mail to announce the result. When count the binding and no binding votes, please list the names of voters. An example like this:\n[RESULT][VOTE] Release Apache SkyWalking Go version $VERSION 3 days passed, we’ve got ($NUMBER) +1 bindings (and ... +1 non-bindings): (list names) +1 bindings: xxx ... +1 non-bindings: xxx ... Thank you for voting, I’ll continue the release process.   Publish release   Move source codes tar balls and distributions to https://dist.apache.org/repos/dist/release/skywalking/, you can do this only if you are a PMC member.\nexport SVN_EDITOR=vim svn mv https://dist.apache.org/repos/dist/dev/skywalking/go/$VERSION https://dist.apache.org/repos/dist/release/skywalking/go   Refer to the previous PR, update the event and download links on the website.\n  Update Github release page, follow the previous convention.\n  Send ANNOUNCE email to dev@skywalking.apache.org and announce@apache.org, the sender should use his/her Apache email account, please check all links before sending the email.\nSubject: [ANNOUNCEMENT] Apache SkyWalking Go $VERSION Released Content: Hi the SkyWalking Community On behalf of the SkyWalking Team, I’m glad to announce that SkyWalking Go $VERSION is now released. SkyWalking Go: The Golang auto-instrument Agent for Apache SkyWalking, which provides the native tracing/metrics/logging abilities for Golang projects. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. Download Links: http://skywalking.apache.org/downloads/ Release Notes : https://github.com/apache/skywalking-go/blob/v$VERSION/CHANGES.md Website: http://skywalking.apache.org/ SkyWalking Go Resources: - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalking.apache.org - Documents: https://github.com/apache/skywalking-go/blob/v$VERSION/README.md The Apache SkyWalking Team   Remove Unnecessary Releases Please remember to remove all unnecessary releases in the mirror svn (https://dist.apache.org/repos/dist/release/skywalking/), if you don\u0026rsquo;t recommend users to choose those version. For example, you have removed the download and documentation links from the website. If they want old ones, the Archive repository has all of them.\n","title":"Apache SkyWalking Go Release Guide","url":"/docs/skywalking-go/next/en/development-and-contribution/how-to-release/"},{"content":"Apache SkyWalking Go Release Guide This documentation guides the release manager to release the SkyWalking Go in the Apache Way, and also helps people to check the release for vote.\nPrerequisites  Close(if finished, or move to next milestone otherwise) all issues in the current milestone from skywalking-go and skywalking, create a new milestone if needed. Update CHANGES.md. Check the dependency licenses including all dependencies.  Add your GPG public key to Apache svn   Upload your GPG public key to a public GPG site, such as MIT\u0026rsquo;s site.\n  Log in id.apache.org and submit your key fingerprint.\n  Add your GPG public key into SkyWalking GPG KEYS file, you can do this only if you are a PMC member. You can ask a PMC member for help. DO NOT override the existed KEYS file content, only append your key at the end of the file.\n  Build and sign the source code package export VERSION=\u0026lt;the version to release\u0026gt; git clone git@github.com:apache/skywalking-go \u0026amp;\u0026amp; cd skywalking-go git tag -a \u0026#34;v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking-Go v$VERSION\u0026#34; git tag -a \u0026#34;toolkit/v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking-Go Toolkit v$VERSION\u0026#34; git push --tags make release In total, six files should be automatically generated in the directory: apache-skywalking-go-${VERSION}-bin.tgz, apache-skywalking-go-${VERSION}-src.tgz, and their corresponding asc, sha512 files.\nUpload to Apache svn svn co https://dist.apache.org/repos/dist/dev/skywalking/ mkdir -p skywalking/go/\u0026#34;$VERSION\u0026#34; cp skywalking-go/apache-skywalking*.tgz skywalking/go/\u0026#34;$VERSION\u0026#34; cp skywalking-go/apache-skywalking*.tgz.asc skywalking/go/\u0026#34;$VERSION\u0026#34; cp skywalking-go/apache-skywalking*.tgz.sha512 skywalking/go/\u0026#34;$VERSION\u0026#34; cd skywalking/go \u0026amp;\u0026amp; svn add \u0026#34;$VERSION\u0026#34; \u0026amp;\u0026amp; svn commit -m \u0026#34;Draft Apache SkyWalking-Go release $VERSION\u0026#34; Call for vote in dev@ mailing list Call for vote in dev@skywalking.apache.org, please check all links before sending the email.\nSubject: [VOTE] Release Apache SkyWalking Go version $VERSION Content: Hi the SkyWalking Community: This is a call for vote to release Apache SkyWalking Go version $VERSION. Release notes: * https://github.com/apache/skywalking-go/blob/v$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/go/$VERSION * sha512 checksums - sha512xxxxyyyzzz skywalking-go-x.x.x-src.tgz - sha512xxxxyyyzzz skywalking-go-x.x.x-bin.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-go/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-go/blob/v$VERSION/docs/en/development-and-contribution/how-to-release.md Voting will start now and will remain open for at least 72 hours, all PMC members are required to give their votes. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Thanks. [1] https://github.com/apache/skywalking/blob/master/docs/en/guides/How-to-release.md#vote-check Vote Check All PMC members and committers should check these before voting +1:\n Features test. All artifacts in staging repository are published with .asc, .md5, and sha files. Source codes and distribution packages (skywalking-go-$VERSION-{src,bin}.tgz) are in https://dist.apache.org/repos/dist/dev/skywalking/go/$VERSION with .asc, .sha512. LICENSE and NOTICE are in source codes and distribution package. Check shasum -c skywalking-go-$VERSION-{src,bin}.tgz.sha512. Check gpg --verify skywalking-go-$VERSION-{src,bin}.tgz.asc skywalking-go-$VERSION-{src,bin}.tgz. Build distribution from source code package by following this command, make build.  Vote result should follow these:\n  PMC vote is +1 binding, all others is +1 no binding.\n  Within 72 hours, you get at least 3 (+1 binding), and have more +1 than -1. Vote pass.\n  Send the closing vote mail to announce the result. When count the binding and no binding votes, please list the names of voters. An example like this:\n[RESULT][VOTE] Release Apache SkyWalking Go version $VERSION 3 days passed, we’ve got ($NUMBER) +1 bindings (and ... +1 non-bindings): (list names) +1 bindings: xxx ... +1 non-bindings: xxx ... Thank you for voting, I’ll continue the release process.   Publish release   Move source codes tar balls and distributions to https://dist.apache.org/repos/dist/release/skywalking/, you can do this only if you are a PMC member.\nexport SVN_EDITOR=vim svn mv https://dist.apache.org/repos/dist/dev/skywalking/go/$VERSION https://dist.apache.org/repos/dist/release/skywalking/go   Refer to the previous PR, update the event and download links on the website.\n  Update Github release page, follow the previous convention.\n  Send ANNOUNCE email to dev@skywalking.apache.org and announce@apache.org, the sender should use his/her Apache email account, please check all links before sending the email.\nSubject: [ANNOUNCEMENT] Apache SkyWalking Go $VERSION Released Content: Hi the SkyWalking Community On behalf of the SkyWalking Team, I’m glad to announce that SkyWalking Go $VERSION is now released. SkyWalking Go: The Golang auto-instrument Agent for Apache SkyWalking, which provides the native tracing/metrics/logging abilities for Golang projects. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. Download Links: http://skywalking.apache.org/downloads/ Release Notes : https://github.com/apache/skywalking-go/blob/v$VERSION/CHANGES.md Website: http://skywalking.apache.org/ SkyWalking Go Resources: - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalking.apache.org - Documents: https://github.com/apache/skywalking-go/blob/v$VERSION/README.md The Apache SkyWalking Team   Remove Unnecessary Releases Please remember to remove all unnecessary releases in the mirror svn (https://dist.apache.org/repos/dist/release/skywalking/), if you don\u0026rsquo;t recommend users to choose those version. For example, you have removed the download and documentation links from the website. If they want old ones, the Archive repository has all of them.\n","title":"Apache SkyWalking Go Release Guide","url":"/docs/skywalking-go/v0.4.0/en/development-and-contribution/how-to-release/"},{"content":"Apache SkyWalking Infra E2E Release Guide This documentation guides the release manager to release the SkyWalking Infra E2E in the Apache Way, and also helps people to check the release for voting.\nPrerequisites  Close (if finished, or move to next milestone otherwise) all issues in the current milestone from skywalking-infra-e2e and skywalking, create a new milestone if needed. Update CHANGES.md.  Add your GPG public key to Apache svn   Upload your GPG public key to a public GPG site, such as MIT\u0026rsquo;s site.\n  Log in id.apache.org and submit your key fingerprint.\n  Add your GPG public key into SkyWalking GPG KEYS file, you can do this only if you are a PMC member. You can ask a PMC member for help. DO NOT override the existed KEYS file content, only append your key at the end of the file.\n  Build and sign the source code package export VERSION=\u0026lt;the version to release\u0026gt; git clone --recurse-submodules git@github.com:apache/skywalking-infra-e2e.git \u0026amp;\u0026amp; cd skywalking-infra-e2e git tag -a \u0026#34;v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking-Infra-E2E $VERSION\u0026#34; git push --tags make clean make test # this is optional, it runs sanity checks to verify the features make release Upload to Apache svn svn co https://dist.apache.org/repos/dist/dev/skywalking/infra-e2e release/skywalking/infra-e2e mkdir -p release/skywalking/infra-e2e/\u0026#34;$VERSION\u0026#34; cp skywalking-infra-e2e/skywalking*.tgz release/skywalking/infra-e2e/\u0026#34;$VERSION\u0026#34; cp skywalking-infra-e2e/skywalking*.tgz.asc release/skywalking/infra-e2e/\u0026#34;$VERSION\u0026#34; cp skywalking-infra-e2e/skywalking*.tgz.sha512 release/skywalking/infra-e2e/\u0026#34;$VERSION\u0026#34; cd release/skywalking \u0026amp;\u0026amp; svn add infra-e2e/$VERSION \u0026amp;\u0026amp; svn commit infra-e2e -m \u0026#34;Draft Apache SkyWalking-Infra-E2E release $VERSION\u0026#34; Call for vote in dev@ mailing list Call for vote in dev@skywalking.apache.org.\nSubject: [VOTE] Release Apache SkyWalking Infra E2E version $VERSION Content: Hi the SkyWalking Community: This is a call for vote to release Apache SkyWalking Infra E2E version $VERSION. Release notes: * https://github.com/apache/skywalking-infra-e2e/blob/v$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/infra-e2e/$VERSION * sha512 checksums - sha512xxxxyyyzzz skywalking-e2e-$VERSION-bin.tgz - sha512xxxxyyyzzz skywalking-e2e-$VERSION-src.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-infra-e2e/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-infra-e2e/blob/main/docs/en/contribution/Release-Guidance.md Voting will start now and will remain open for at least 72 hours, all PMC members are required to give their votes. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Thanks. [1] https://github.com/apache/skywalking/blob/master/docs/en/guides/How-to-release.md#vote-check Vote Check All PMC members and committers should check these before voting +1:\n Features test. All artifacts in staging repository are published with .asc, and sha files. Source codes and distribution packages (skywalking-e2e-$VERSION-src.tgz) are in https://dist.apache.org/repos/dist/dev/skywalking/infra-e2e/$VERSION with .asc, .sha512. LICENSE and NOTICE are in source codes and distribution package. Check shasum -c skywalking-e2e-$VERSION-src.tgz.sha512. Check gpg --verify skywalking-e2e-$VERSION-src.tgz.asc skywalking-e2e-$VERSION-src.tgz. Build distribution from source code package by following this the build guide.  Vote result should follow these:\n  PMC vote is +1 binding, all others is +1 no binding.\n  Within 72 hours, you get at least 3 (+1 binding), and have more +1 than -1. Vote pass.\n  Send the closing vote mail to announce the result. When count the binding and no binding votes, please list the names of voters. An example like this:\n[RESULT][VOTE] Release Apache SkyWalking Infra E2E version $VERSION 72+ hours passed, we’ve got ($NUMBER) +1 bindings (and ... +1 non-bindings): (list names) +1 bindings: xxx ... +1 non-bindings: xxx ... Thank you for voting, I’ll continue the release process.   Publish release   Move source codes tar balls and distributions to https://dist.apache.org/repos/dist/release/skywalking/, you can do this only if you are a PMC member.\nsvn mv https://dist.apache.org/repos/dist/dev/skywalking/infra-e2e/\u0026#34;$VERSION\u0026#34; https://dist.apache.org/repos/dist/release/skywalking/infra-e2e/\u0026#34;$VERSION\u0026#34;   Refer to the previous PR, update news and links on the website. There are several files need to modify.\n  Update Github release page, follow the previous convention.\n  Send ANNOUNCE email to dev@skywalking.apache.org and announce@apache.org, the sender should use his/her Apache email account.\nSubject: [ANNOUNCEMENT] Apache SkyWalking Infra E2E $VERSION Released Content: Hi the SkyWalking Community On behalf of the SkyWalking Team, I’m glad to announce that SkyWalking Infra E2E $VERSION is now released. SkyWalking Infra E2E: An End-to-End Testing framework that aims to help developers to set up, debug, and verify E2E tests with ease. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. Download Links: http://skywalking.apache.org/downloads/ Release Notes : https://github.com/apache/skywalking-infra-e2e/blob/v$VERSION/CHANGES.md Website: http://skywalking.apache.org/ SkyWalking Infra E2E Resources: - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalking.apache.org - Documents: https://github.com/apache/skywalking-infra-e2e/blob/v$VERSION/README.md The Apache SkyWalking Team   ","title":"Apache SkyWalking Infra E2E Release Guide","url":"/docs/skywalking-infra-e2e/latest/en/contribution/release-guidance/"},{"content":"Apache SkyWalking Infra E2E Release Guide This documentation guides the release manager to release the SkyWalking Infra E2E in the Apache Way, and also helps people to check the release for voting.\nPrerequisites  Close (if finished, or move to next milestone otherwise) all issues in the current milestone from skywalking-infra-e2e and skywalking, create a new milestone if needed. Update CHANGES.md.  Add your GPG public key to Apache svn   Upload your GPG public key to a public GPG site, such as MIT\u0026rsquo;s site.\n  Log in id.apache.org and submit your key fingerprint.\n  Add your GPG public key into SkyWalking GPG KEYS file, you can do this only if you are a PMC member. You can ask a PMC member for help. DO NOT override the existed KEYS file content, only append your key at the end of the file.\n  Build and sign the source code package export VERSION=\u0026lt;the version to release\u0026gt; git clone --recurse-submodules git@github.com:apache/skywalking-infra-e2e.git \u0026amp;\u0026amp; cd skywalking-infra-e2e git tag -a \u0026#34;v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking-Infra-E2E $VERSION\u0026#34; git push --tags make clean make test # this is optional, it runs sanity checks to verify the features make release Upload to Apache svn svn co https://dist.apache.org/repos/dist/dev/skywalking/infra-e2e release/skywalking/infra-e2e mkdir -p release/skywalking/infra-e2e/\u0026#34;$VERSION\u0026#34; cp skywalking-infra-e2e/skywalking*.tgz release/skywalking/infra-e2e/\u0026#34;$VERSION\u0026#34; cp skywalking-infra-e2e/skywalking*.tgz.asc release/skywalking/infra-e2e/\u0026#34;$VERSION\u0026#34; cp skywalking-infra-e2e/skywalking*.tgz.sha512 release/skywalking/infra-e2e/\u0026#34;$VERSION\u0026#34; cd release/skywalking \u0026amp;\u0026amp; svn add infra-e2e/$VERSION \u0026amp;\u0026amp; svn commit infra-e2e -m \u0026#34;Draft Apache SkyWalking-Infra-E2E release $VERSION\u0026#34; Call for vote in dev@ mailing list Call for vote in dev@skywalking.apache.org.\nSubject: [VOTE] Release Apache SkyWalking Infra E2E version $VERSION Content: Hi the SkyWalking Community: This is a call for vote to release Apache SkyWalking Infra E2E version $VERSION. Release notes: * https://github.com/apache/skywalking-infra-e2e/blob/v$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/infra-e2e/$VERSION * sha512 checksums - sha512xxxxyyyzzz skywalking-e2e-$VERSION-bin.tgz - sha512xxxxyyyzzz skywalking-e2e-$VERSION-src.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-infra-e2e/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-infra-e2e/blob/main/docs/en/contribution/Release-Guidance.md Voting will start now and will remain open for at least 72 hours, all PMC members are required to give their votes. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Thanks. [1] https://github.com/apache/skywalking/blob/master/docs/en/guides/How-to-release.md#vote-check Vote Check All PMC members and committers should check these before voting +1:\n Features test. All artifacts in staging repository are published with .asc, and sha files. Source codes and distribution packages (skywalking-e2e-$VERSION-src.tgz) are in https://dist.apache.org/repos/dist/dev/skywalking/infra-e2e/$VERSION with .asc, .sha512. LICENSE and NOTICE are in source codes and distribution package. Check shasum -c skywalking-e2e-$VERSION-src.tgz.sha512. Check gpg --verify skywalking-e2e-$VERSION-src.tgz.asc skywalking-e2e-$VERSION-src.tgz. Build distribution from source code package by following this the build guide.  Vote result should follow these:\n  PMC vote is +1 binding, all others is +1 no binding.\n  Within 72 hours, you get at least 3 (+1 binding), and have more +1 than -1. Vote pass.\n  Send the closing vote mail to announce the result. When count the binding and no binding votes, please list the names of voters. An example like this:\n[RESULT][VOTE] Release Apache SkyWalking Infra E2E version $VERSION 72+ hours passed, we’ve got ($NUMBER) +1 bindings (and ... +1 non-bindings): (list names) +1 bindings: xxx ... +1 non-bindings: xxx ... Thank you for voting, I’ll continue the release process.   Publish release   Move source codes tar balls and distributions to https://dist.apache.org/repos/dist/release/skywalking/, you can do this only if you are a PMC member.\nsvn mv https://dist.apache.org/repos/dist/dev/skywalking/infra-e2e/\u0026#34;$VERSION\u0026#34; https://dist.apache.org/repos/dist/release/skywalking/infra-e2e/\u0026#34;$VERSION\u0026#34;   Refer to the previous PR, update news and links on the website. There are several files need to modify.\n  Update Github release page, follow the previous convention.\n  Send ANNOUNCE email to dev@skywalking.apache.org and announce@apache.org, the sender should use his/her Apache email account.\nSubject: [ANNOUNCEMENT] Apache SkyWalking Infra E2E $VERSION Released Content: Hi the SkyWalking Community On behalf of the SkyWalking Team, I’m glad to announce that SkyWalking Infra E2E $VERSION is now released. SkyWalking Infra E2E: An End-to-End Testing framework that aims to help developers to set up, debug, and verify E2E tests with ease. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. Download Links: http://skywalking.apache.org/downloads/ Release Notes : https://github.com/apache/skywalking-infra-e2e/blob/v$VERSION/CHANGES.md Website: http://skywalking.apache.org/ SkyWalking Infra E2E Resources: - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalking.apache.org - Documents: https://github.com/apache/skywalking-infra-e2e/blob/v$VERSION/README.md The Apache SkyWalking Team   ","title":"Apache SkyWalking Infra E2E Release Guide","url":"/docs/skywalking-infra-e2e/next/en/contribution/release-guidance/"},{"content":"Apache SkyWalking Infra E2E Release Guide This documentation guides the release manager to release the SkyWalking Infra E2E in the Apache Way, and also helps people to check the release for voting.\nPrerequisites  Close (if finished, or move to next milestone otherwise) all issues in the current milestone from skywalking-infra-e2e and skywalking, create a new milestone if needed. Update CHANGES.md.  Add your GPG public key to Apache svn   Upload your GPG public key to a public GPG site, such as MIT\u0026rsquo;s site.\n  Log in id.apache.org and submit your key fingerprint.\n  Add your GPG public key into SkyWalking GPG KEYS file, you can do this only if you are a PMC member. You can ask a PMC member for help. DO NOT override the existed KEYS file content, only append your key at the end of the file.\n  Build and sign the source code package export VERSION=\u0026lt;the version to release\u0026gt; git clone --recurse-submodules git@github.com:apache/skywalking-infra-e2e.git \u0026amp;\u0026amp; cd skywalking-infra-e2e git tag -a \u0026#34;v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking-Infra-E2E $VERSION\u0026#34; git push --tags make clean make test # this is optional, it runs sanity checks to verify the features make release Upload to Apache svn svn co https://dist.apache.org/repos/dist/dev/skywalking/infra-e2e release/skywalking/infra-e2e mkdir -p release/skywalking/infra-e2e/\u0026#34;$VERSION\u0026#34; cp skywalking-infra-e2e/skywalking*.tgz release/skywalking/infra-e2e/\u0026#34;$VERSION\u0026#34; cp skywalking-infra-e2e/skywalking*.tgz.asc release/skywalking/infra-e2e/\u0026#34;$VERSION\u0026#34; cp skywalking-infra-e2e/skywalking*.tgz.sha512 release/skywalking/infra-e2e/\u0026#34;$VERSION\u0026#34; cd release/skywalking \u0026amp;\u0026amp; svn add infra-e2e/$VERSION \u0026amp;\u0026amp; svn commit infra-e2e -m \u0026#34;Draft Apache SkyWalking-Infra-E2E release $VERSION\u0026#34; Call for vote in dev@ mailing list Call for vote in dev@skywalking.apache.org.\nSubject: [VOTE] Release Apache SkyWalking Infra E2E version $VERSION Content: Hi the SkyWalking Community: This is a call for vote to release Apache SkyWalking Infra E2E version $VERSION. Release notes: * https://github.com/apache/skywalking-infra-e2e/blob/v$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/infra-e2e/$VERSION * sha512 checksums - sha512xxxxyyyzzz skywalking-e2e-$VERSION-bin.tgz - sha512xxxxyyyzzz skywalking-e2e-$VERSION-src.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-infra-e2e/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-infra-e2e/blob/main/docs/en/contribution/Release-Guidance.md Voting will start now and will remain open for at least 72 hours, all PMC members are required to give their votes. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Thanks. [1] https://github.com/apache/skywalking/blob/master/docs/en/guides/How-to-release.md#vote-check Vote Check All PMC members and committers should check these before voting +1:\n Features test. All artifacts in staging repository are published with .asc, and sha files. Source codes and distribution packages (skywalking-e2e-$VERSION-src.tgz) are in https://dist.apache.org/repos/dist/dev/skywalking/infra-e2e/$VERSION with .asc, .sha512. LICENSE and NOTICE are in source codes and distribution package. Check shasum -c skywalking-e2e-$VERSION-src.tgz.sha512. Check gpg --verify skywalking-e2e-$VERSION-src.tgz.asc skywalking-e2e-$VERSION-src.tgz. Build distribution from source code package by following this the build guide.  Vote result should follow these:\n  PMC vote is +1 binding, all others is +1 no binding.\n  Within 72 hours, you get at least 3 (+1 binding), and have more +1 than -1. Vote pass.\n  Send the closing vote mail to announce the result. When count the binding and no binding votes, please list the names of voters. An example like this:\n[RESULT][VOTE] Release Apache SkyWalking Infra E2E version $VERSION 72+ hours passed, we’ve got ($NUMBER) +1 bindings (and ... +1 non-bindings): (list names) +1 bindings: xxx ... +1 non-bindings: xxx ... Thank you for voting, I’ll continue the release process.   Publish release   Move source codes tar balls and distributions to https://dist.apache.org/repos/dist/release/skywalking/, you can do this only if you are a PMC member.\nsvn mv https://dist.apache.org/repos/dist/dev/skywalking/infra-e2e/\u0026#34;$VERSION\u0026#34; https://dist.apache.org/repos/dist/release/skywalking/infra-e2e/\u0026#34;$VERSION\u0026#34;   Refer to the previous PR, update news and links on the website. There are several files need to modify.\n  Update Github release page, follow the previous convention.\n  Send ANNOUNCE email to dev@skywalking.apache.org and announce@apache.org, the sender should use his/her Apache email account.\nSubject: [ANNOUNCEMENT] Apache SkyWalking Infra E2E $VERSION Released Content: Hi the SkyWalking Community On behalf of the SkyWalking Team, I’m glad to announce that SkyWalking Infra E2E $VERSION is now released. SkyWalking Infra E2E: An End-to-End Testing framework that aims to help developers to set up, debug, and verify E2E tests with ease. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. Download Links: http://skywalking.apache.org/downloads/ Release Notes : https://github.com/apache/skywalking-infra-e2e/blob/v$VERSION/CHANGES.md Website: http://skywalking.apache.org/ SkyWalking Infra E2E Resources: - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalking.apache.org - Documents: https://github.com/apache/skywalking-infra-e2e/blob/v$VERSION/README.md The Apache SkyWalking Team   ","title":"Apache SkyWalking Infra E2E Release Guide","url":"/docs/skywalking-infra-e2e/v1.3.0/en/contribution/release-guidance/"},{"content":"Apache SkyWalking PHP Agent release guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking SDK in The Apache Way and start the voting process by reading this document.\nRequirements  Rust(rustc) Cargo PHP(php, php-config) Pecl GPG shasum  Add your GPG public key  Add your GPG public key into the SkyWalking GPG KEYS file. If you are a committer, use your Apache ID and password to log in this svn, and update the file. Don\u0026rsquo;t override the existing file.(Notice, only PMC member could update this file) Upload your GPG public key to the public GPG site, such as MIT\u0026rsquo;s site. This site should be in the Apache maven staging repository checklist.  Draft a new release Open Create a new release page, choose the tag, and click the Generate release notes button, then copy the generated text to local /tmp/notes.txt.\nTest your settings and package ## Make sure local compiling passed \u0026gt; cargo build ## Create package.xml from package.xml.tpl \u0026gt; cargo run -p scripts --release -- create-package-xml --version x.y.z --notes \u0026#34;`cat /tmp/notes.txt`\u0026#34; ## Create local package. The skywalking_agent-x.y.z.tgz should be found in project root \u0026gt; pecl package Sign the package Tag the commit ID of this release as vx.y.z.\nAfter set the version in Cargo.toml with the release number, package locally. Then run the following commands to sign your package.\n\u0026gt; export RELEASE_VERSION=x.y.z ## The package should be signed by your Apache committer mail. \u0026gt; gpg --armor --detach-sig skywalking_agent-$RELEASE_VERSION.tgz \u0026gt; shasum -a 512 skywalking_agent-$RELEASE_VERSION.tgz \u0026gt; skywalking_agent-$RELEASE_VERSION.tgz.sha512 After these, the source tar with its signed asc and sha512 are ready.\nUpload to Apache SVN and tag a release  Use your Apache ID to log in to https://dist.apache.org/repos/dist/dev/skywalking/php. Create a folder and name it by the release version and round, such as: x.y.z Upload tar ball, asc, sha512 files to the new folder.  Call a vote in dev Call a vote in dev@skywalking.apache.org\nMail title: [VOTE] Release Apache SkyWalking PHP version x.y.z Mail content: Hi All, This is a call for vote to release Apache SkyWalking PHP version x.y.z. Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/php/x.y.z/ * sha512 checksums - xxxxxxxx skywalking_agent-x.y.z.tgz Release Tag : * (Git Tag) vx.y.z Release CommitID : * https://github.com/apache/skywalking-php/tree/{commit-id} Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-php/blob/master/docs/en/contribution/compiling.md Voting will start now (Date) and will remain open for at least 72 hours, Request all PMC members to give their vote. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Vote Check The voting process is as follows:\n All PMC member votes are +1 binding, and all other votes are +1 but non-binding. If you obtain at least 3 (+1 binding) votes with more +1 than -1 votes within 72 hours, the release will be approved.  Publish the release   Move source codes tar and distribution packages to https://dist.apache.org/repos/dist/release/skywalking/.\n\u0026gt; export SVN_EDITOR=vim \u0026gt; svn mv https://dist.apache.org/repos/dist/dev/skywalking/php/x.y.z https://dist.apache.org/repos/dist/release/skywalking/php .... enter your apache password ....   Pecl publish package on skywalking_agent.\nMake sure you have a PECL account, and list in package.tpl.xml as \u0026lt;developer\u0026gt;, or reach private@skywalking.apache.org if you are a committer/PMC but not listed.\nYou can request a PECL account via https://pecl.php.net/account-request.php.\n  Add an release event, update download and doc releases on the SkyWalking website.\n  Add the new release on ASF addrelease site.\n  Remove the old releases on https://dist.apache.org/repos/dist/release/skywalking/php/{previous-version}.\n  Send a release announcement Send ANNOUNCE email to dev@skywalking.apache.org, announce@apache.org. The sender should use the Apache email account.\nMail title: [ANNOUNCE] Apache SkyWalking PHP x.y.z released Mail content: Hi all, SkyWalking PHP Agent provides the native tracing abilities for PHP project. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. This release contains a number of new features, bug fixes and improvements compared to version a.b.c(last release). The notable changes since x.y.z include: (Highlight key changes) 1. ... 2. ... 3. ... Apache SkyWalking website: http://skywalking.apache.org/ Downloads: http://skywalking.apache.org/downloads/ Twitter: https://twitter.com/ASFSkyWalking SkyWalking Resources: - GitHub: https://github.com/apache/skywalking - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Apache SkyWalking Team ","title":"Apache SkyWalking PHP Agent release guide","url":"/docs/skywalking-php/latest/en/contribution/release-agent/"},{"content":"Apache SkyWalking PHP Agent release guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking SDK in The Apache Way and start the voting process by reading this document.\nRequirements  Rust(rustc) Cargo PHP(php, php-config) Pecl GPG shasum  Add your GPG public key  Add your GPG public key into the SkyWalking GPG KEYS file. If you are a committer, use your Apache ID and password to log in this svn, and update the file. Don\u0026rsquo;t override the existing file.(Notice, only PMC member could update this file) Upload your GPG public key to the public GPG site, such as MIT\u0026rsquo;s site. This site should be in the Apache maven staging repository checklist.  Draft a new release Open Create a new release page, choose the tag, and click the Generate release notes button, then copy the generated text to local /tmp/notes.txt.\nTest your settings and package ## Make sure local compiling passed \u0026gt; cargo build ## Create package.xml from package.xml.tpl \u0026gt; cargo run -p scripts --release -- create-package-xml --version x.y.z --notes \u0026#34;`cat /tmp/notes.txt`\u0026#34; ## Create local package. The skywalking_agent-x.y.z.tgz should be found in project root \u0026gt; pecl package Sign the package Tag the commit ID of this release as vx.y.z.\nAfter set the version in Cargo.toml with the release number, package locally. Then run the following commands to sign your package.\n\u0026gt; export RELEASE_VERSION=x.y.z ## The package should be signed by your Apache committer mail. \u0026gt; gpg --armor --detach-sig skywalking_agent-$RELEASE_VERSION.tgz \u0026gt; shasum -a 512 skywalking_agent-$RELEASE_VERSION.tgz \u0026gt; skywalking_agent-$RELEASE_VERSION.tgz.sha512 After these, the source tar with its signed asc and sha512 are ready.\nUpload to Apache SVN and tag a release  Use your Apache ID to log in to https://dist.apache.org/repos/dist/dev/skywalking/php. Create a folder and name it by the release version and round, such as: x.y.z Upload tar ball, asc, sha512 files to the new folder.  Call a vote in dev Call a vote in dev@skywalking.apache.org\nMail title: [VOTE] Release Apache SkyWalking PHP version x.y.z Mail content: Hi All, This is a call for vote to release Apache SkyWalking PHP version x.y.z. Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/php/x.y.z/ * sha512 checksums - xxxxxxxx skywalking_agent-x.y.z.tgz Release Tag : * (Git Tag) vx.y.z Release CommitID : * https://github.com/apache/skywalking-php/tree/{commit-id} Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-php/blob/master/docs/en/contribution/compiling.md Voting will start now (Date) and will remain open for at least 72 hours, Request all PMC members to give their vote. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Vote Check The voting process is as follows:\n All PMC member votes are +1 binding, and all other votes are +1 but non-binding. If you obtain at least 3 (+1 binding) votes with more +1 than -1 votes within 72 hours, the release will be approved.  Publish the release   Move source codes tar and distribution packages to https://dist.apache.org/repos/dist/release/skywalking/.\n\u0026gt; export SVN_EDITOR=vim \u0026gt; svn mv https://dist.apache.org/repos/dist/dev/skywalking/php/x.y.z https://dist.apache.org/repos/dist/release/skywalking/php .... enter your apache password ....   Pecl publish package on skywalking_agent.\nMake sure you have a PECL account, and list in package.tpl.xml as \u0026lt;developer\u0026gt;, or reach private@skywalking.apache.org if you are a committer/PMC but not listed.\nYou can request a PECL account via https://pecl.php.net/account-request.php.\n  Add an release event, update download and doc releases on the SkyWalking website.\n  Add the new release on ASF addrelease site.\n  Remove the old releases on https://dist.apache.org/repos/dist/release/skywalking/php/{previous-version}.\n  Send a release announcement Send ANNOUNCE email to dev@skywalking.apache.org, announce@apache.org. The sender should use the Apache email account.\nMail title: [ANNOUNCE] Apache SkyWalking PHP x.y.z released Mail content: Hi all, SkyWalking PHP Agent provides the native tracing abilities for PHP project. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. This release contains a number of new features, bug fixes and improvements compared to version a.b.c(last release). The notable changes since x.y.z include: (Highlight key changes) 1. ... 2. ... 3. ... Apache SkyWalking website: http://skywalking.apache.org/ Downloads: http://skywalking.apache.org/downloads/ Twitter: https://twitter.com/ASFSkyWalking SkyWalking Resources: - GitHub: https://github.com/apache/skywalking - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Apache SkyWalking Team ","title":"Apache SkyWalking PHP Agent release guide","url":"/docs/skywalking-php/next/en/contribution/release-agent/"},{"content":"Apache SkyWalking PHP Agent release guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking SDK in The Apache Way and start the voting process by reading this document.\nRequirements  Rust(rustc) Cargo PHP(php, php-config) Pecl GPG shasum  Add your GPG public key  Add your GPG public key into the SkyWalking GPG KEYS file. If you are a committer, use your Apache ID and password to log in this svn, and update the file. Don\u0026rsquo;t override the existing file.(Notice, only PMC member could update this file) Upload your GPG public key to the public GPG site, such as MIT\u0026rsquo;s site. This site should be in the Apache maven staging repository checklist.  Draft a new release Open Create a new release page, choose the tag, and click the Generate release notes button, then copy the generated text to local /tmp/notes.txt.\nTest your settings and package ## Make sure local compiling passed \u0026gt; cargo build ## Create package.xml from package.xml.tpl \u0026gt; cargo run -p scripts --release -- create-package-xml --version x.y.z --notes \u0026#34;`cat /tmp/notes.txt`\u0026#34; ## Create local package. The skywalking_agent-x.y.z.tgz should be found in project root \u0026gt; pecl package Sign the package Tag the commit ID of this release as vx.y.z.\nAfter set the version in Cargo.toml with the release number, package locally. Then run the following commands to sign your package.\n\u0026gt; export RELEASE_VERSION=x.y.z ## The package should be signed by your Apache committer mail. \u0026gt; gpg --armor --detach-sig skywalking_agent-$RELEASE_VERSION.tgz \u0026gt; shasum -a 512 skywalking_agent-$RELEASE_VERSION.tgz \u0026gt; skywalking_agent-$RELEASE_VERSION.tgz.sha512 After these, the source tar with its signed asc and sha512 are ready.\nUpload to Apache SVN and tag a release  Use your Apache ID to log in to https://dist.apache.org/repos/dist/dev/skywalking/php. Create a folder and name it by the release version and round, such as: x.y.z Upload tar ball, asc, sha512 files to the new folder.  Call a vote in dev Call a vote in dev@skywalking.apache.org\nMail title: [VOTE] Release Apache SkyWalking PHP version x.y.z Mail content: Hi All, This is a call for vote to release Apache SkyWalking PHP version x.y.z. Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/php/x.y.z/ * sha512 checksums - xxxxxxxx skywalking_agent-x.y.z.tgz Release Tag : * (Git Tag) vx.y.z Release CommitID : * https://github.com/apache/skywalking-php/tree/{commit-id} Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-php/blob/master/docs/en/contribution/compiling.md Voting will start now (Date) and will remain open for at least 72 hours, Request all PMC members to give their vote. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Vote Check The voting process is as follows:\n All PMC member votes are +1 binding, and all other votes are +1 but non-binding. If you obtain at least 3 (+1 binding) votes with more +1 than -1 votes within 72 hours, the release will be approved.  Publish the release   Move source codes tar and distribution packages to https://dist.apache.org/repos/dist/release/skywalking/.\n\u0026gt; export SVN_EDITOR=vim \u0026gt; svn mv https://dist.apache.org/repos/dist/dev/skywalking/php/x.y.z https://dist.apache.org/repos/dist/release/skywalking/php .... enter your apache password ....   Pecl publish package on skywalking_agent.\nMake sure you have a PECL account, and list in package.tpl.xml as \u0026lt;developer\u0026gt;, or reach private@skywalking.apache.org if you are a committer/PMC but not listed.\nYou can request a PECL account via https://pecl.php.net/account-request.php.\n  Add an release event, update download and doc releases on the SkyWalking website.\n  Add the new release on ASF addrelease site.\n  Remove the old releases on https://dist.apache.org/repos/dist/release/skywalking/php/{previous-version}.\n  Send a release announcement Send ANNOUNCE email to dev@skywalking.apache.org, announce@apache.org. The sender should use the Apache email account.\nMail title: [ANNOUNCE] Apache SkyWalking PHP x.y.z released Mail content: Hi all, SkyWalking PHP Agent provides the native tracing abilities for PHP project. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. This release contains a number of new features, bug fixes and improvements compared to version a.b.c(last release). The notable changes since x.y.z include: (Highlight key changes) 1. ... 2. ... 3. ... Apache SkyWalking website: http://skywalking.apache.org/ Downloads: http://skywalking.apache.org/downloads/ Twitter: https://twitter.com/ASFSkyWalking SkyWalking Resources: - GitHub: https://github.com/apache/skywalking - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Apache SkyWalking Team ","title":"Apache SkyWalking PHP Agent release guide","url":"/docs/skywalking-php/v0.7.0/en/contribution/release-agent/"},{"content":"Apache SkyWalking Python Agent dockerfile and images Docker images are not official ASF releases but provided for convenience. Recommended usage is always to build the source\nThis image hosts the SkyWalking Python agent package on top of official Python base images (full \u0026amp; slim) providing support from Python 3.7 - 3.11.\nHow to use this image The images are hosted at Docker Hub and available from the skywalking.docker.scarf.sh endpoint.\nskywalking.docker.scarf.sh/apache/skywalking-python\nBuild your Python application image on top of this image Start by pulling the skywalking-python image as the base of your application image. Refer to Docker Hub for the list of tags available.\nFROMapache/skywalking-python:0.7.0-grpc-py3.9# ... build your Python applicationYou could start your Python application with CMD. The Python image already sets an entry point ENTRYPOINT [\u0026quot;sw-python\u0026quot;].\nFor example - CMD ['run', '-p', 'gunicorn', 'app.wsgi'] -p is always needed when using with Gunicorn/uWSGI -\u0026gt; This will be translated to sw-python run -p gunicorn app.wsgi\nYou don\u0026rsquo;t need to care about enabling the SkyWalking Python agent manually, it should be adopted and bootstrapped automatically through the sw-python CLI.\nEnvironment variables should be provided to customize the agent behavior.\nBuild an image from the dockerfile Provide the following arguments to build your own image from the dockerfile.\nBASE_PYTHON_IMAGE # the Python base image to build upon SW_PYTHON_AGENT_VERSION # agent version to be pulled from PyPI SW_PYTHON_AGENT_PROTOCOL # agent protocol - grpc/ http/ kafka ","title":"Apache SkyWalking Python Agent dockerfile and images","url":"/docs/skywalking-python/latest/en/setup/container/"},{"content":"Apache SkyWalking Python Agent dockerfile and images Docker images are not official ASF releases but provided for convenience. Recommended usage is always to build the source\nThis image hosts the SkyWalking Python agent package on top of official Python base images (full \u0026amp; slim) providing support from Python 3.7 - 3.11.\nHow to use this image The images are hosted at Docker Hub.\nThe images come with protocol variants(gRPC, Kafka, HTTP) and base Python variants(Full, Slim).\nBuild your Python application image on top of this image Start by pulling the skywalking-python image as the base of your application image. Refer to Docker Hub for the list of tags available.\nFROMapache/skywalking-python:1.1.0-grpc-py3.10# ... build your Python applicationYou could start your Python application with CMD. The Python image already sets an entry point ENTRYPOINT [\u0026quot;sw-python\u0026quot;].\nFor example - CMD ['run', '-p', 'gunicorn', 'app.wsgi'] -p is always needed when using with Gunicorn/uWSGI -\u0026gt; This will be translated to sw-python run -p gunicorn app.wsgi\nYou don\u0026rsquo;t need to care about enabling the SkyWalking Python agent manually, it should be adopted and bootstrapped automatically through the sw-python CLI.\nEnvironment variables should be provided to customize the agent behavior.\nBuild an image from the dockerfile Provide the following arguments to build your own image from the dockerfile.\nBASE_PYTHON_IMAGE # the Python base image to build upon SW_PYTHON_AGENT_VERSION # agent version to be pulled from PyPI SW_PYTHON_AGENT_PROTOCOL # agent protocol - grpc/ http/ kafka ","title":"Apache SkyWalking Python Agent dockerfile and images","url":"/docs/skywalking-python/next/en/setup/container/"},{"content":"Apache SkyWalking Python Agent dockerfile and images Docker images are not official ASF releases but provided for convenience. Recommended usage is always to build the source\nThis image hosts the SkyWalking Python agent package on top of official Python base images (full \u0026amp; slim) providing support from Python 3.7 - 3.11.\nHow to use this image The images are hosted at Docker Hub and available from the skywalking.docker.scarf.sh endpoint.\nskywalking.docker.scarf.sh/apache/skywalking-python\nBuild your Python application image on top of this image Start by pulling the skywalking-python image as the base of your application image. Refer to Docker Hub for the list of tags available.\nFROMapache/skywalking-python:0.7.0-grpc-py3.9# ... build your Python applicationYou could start your Python application with CMD. The Python image already sets an entry point ENTRYPOINT [\u0026quot;sw-python\u0026quot;].\nFor example - CMD ['run', '-p', 'gunicorn', 'app.wsgi'] -p is always needed when using with Gunicorn/uWSGI -\u0026gt; This will be translated to sw-python run -p gunicorn app.wsgi\nYou don\u0026rsquo;t need to care about enabling the SkyWalking Python agent manually, it should be adopted and bootstrapped automatically through the sw-python CLI.\nEnvironment variables should be provided to customize the agent behavior.\nBuild an image from the dockerfile Provide the following arguments to build your own image from the dockerfile.\nBASE_PYTHON_IMAGE # the Python base image to build upon SW_PYTHON_AGENT_VERSION # agent version to be pulled from PyPI SW_PYTHON_AGENT_PROTOCOL # agent protocol - grpc/ http/ kafka ","title":"Apache SkyWalking Python Agent dockerfile and images","url":"/docs/skywalking-python/v1.0.1/en/setup/container/"},{"content":"Apache SkyWalking Python Image Release Guide This documentation shows the way to build and push the SkyWalking Python images to DockerHub.\nPrerequisites Before building the latest release of images, make sure an official release is pushed to PyPI where the dockerfile will depend on.\nImages This process wil generate a list of images covering most used Python versions and variations(grpc/http/kafka) of the Python agent.\nThe convenience images are published to Docker Hub and available from the skywalking.docker.scarf.sh endpoint.\n skywalking.docker.scarf.sh/apache/skywalking-python (Docker Hub)  How to build Issue the following commands to build relevant docker images for the Python agent. The make command will generate three images(grpc, http, kafka) for each Python version supported.\nAt the root folder -\nexport AGENT_VERSION=\u0026lt;version\u0026gt; make build-image Or at the docker folder -\ncd docker export AGENT_VERSION=\u0026lt;version\u0026gt; make How to publish images After a SkyWalking Apache release for the Python agent and wheels have been pushed to PyPI:\n  Build images from the project root, this step pulls agent wheel from PyPI and installs it:\nexport AGENT_VERSION=\u0026lt;version\u0026gt; make build-image   Verify the images built.\n  Push built images to docker hub repos:\nmake push-image   ","title":"Apache SkyWalking Python Image Release Guide","url":"/docs/skywalking-python/latest/en/contribution/how-to-release-docker/"},{"content":"Apache SkyWalking Python Image Release Guide The official process generating a list of images covering most used Python versions and variations(grpc/http/kafka) of the Python agent is deployed to our GitHub actions and therefore do not rely on this documentation.\nThis documentation shows the way to build and push the SkyWalking Python images manually.\nHow to build manually Before building the latest release of images, make sure an official release is pushed to PyPI where the dockerfile will depend on.\nImages The process generating a list of images covering most used Python versions and variations(grpc/http/kafka) of the Python agent is deployed to our GitHub actions.\nThe convenience images are published to DockerHub\nHow to build Issue the following commands to build relevant docker images for the Python agent. The make command will generate three images(grpc, http, kafka) for each Python version supported.\nAt the root folder -\nexport AGENT_VERSION=\u0026lt;version\u0026gt; make build-image Or at the docker folder -\ncd docker export AGENT_VERSION=\u0026lt;version\u0026gt; make How to publish images After a SkyWalking Apache release for the Python agent and wheels have been pushed to PyPI:\n  Build images from the project root, this step pulls agent wheel from PyPI and installs it:\nexport AGENT_VERSION=\u0026lt;version\u0026gt; make build-image   Verify the images built.\n  Push built images to docker hub repos:\nmake push-image   ","title":"Apache SkyWalking Python Image Release Guide","url":"/docs/skywalking-python/next/en/contribution/how-to-release-docker/"},{"content":"Apache SkyWalking Python Image Release Guide This documentation shows the way to build and push the SkyWalking Python images to DockerHub.\nPrerequisites Before building the latest release of images, make sure an official release is pushed to PyPI where the dockerfile will depend on.\nImages This process wil generate a list of images covering most used Python versions and variations(grpc/http/kafka) of the Python agent.\nThe convenience images are published to Docker Hub and available from the skywalking.docker.scarf.sh endpoint.\n skywalking.docker.scarf.sh/apache/skywalking-python (Docker Hub)  How to build Issue the following commands to build relevant docker images for the Python agent. The make command will generate three images(grpc, http, kafka) for each Python version supported.\nAt the root folder -\nexport AGENT_VERSION=\u0026lt;version\u0026gt; make build-image Or at the docker folder -\ncd docker export AGENT_VERSION=\u0026lt;version\u0026gt; make How to publish images After a SkyWalking Apache release for the Python agent and wheels have been pushed to PyPI:\n  Build images from the project root, this step pulls agent wheel from PyPI and installs it:\nexport AGENT_VERSION=\u0026lt;version\u0026gt; make build-image   Verify the images built.\n  Push built images to docker hub repos:\nmake push-image   ","title":"Apache SkyWalking Python Image Release Guide","url":"/docs/skywalking-python/v1.0.1/en/contribution/how-to-release-docker/"},{"content":"Apache SkyWalking Python Release Guide This documentation guides the release manager to release the SkyWalking Python in the Apache Way, and also helps people to check the release for vote.\nPrerequisites  Close (if finished, or move to next milestone otherwise) all issues in the current milestone from skywalking-python and skywalking, create a new milestone if needed. Update CHANGELOG.md and version in pyproject.toml.  Add your GPG public key to Apache SVN   Log in id.apache.org and submit your key fingerprint.\n  Add your GPG public key into SkyWalking GPG KEYS file, you can do this only if you are a PMC member. You can ask a PMC member for help. DO NOT override the existed KEYS file content, only append your key at the end of the file.\n  Build and sign the source code package export VERSION=\u0026lt;the version to release\u0026gt; git clone --recurse-submodules git@github.com:apache/skywalking-python \u0026amp;\u0026amp; cd skywalking-python git tag -a \u0026#34;v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking-Python $VERSION\u0026#34; git push --tags make clean \u0026amp;\u0026amp; make release Upload to Apache SVN svn co https://dist.apache.org/repos/dist/dev/skywalking/python release/skywalking/python mkdir -p release/skywalking/python/\u0026#34;$VERSION\u0026#34; cp skywalking-python/skywalking*.tgz release/skywalking/python/\u0026#34;$VERSION\u0026#34; cp skywalking-python/skywalking*.tgz.asc release/skywalking/python/\u0026#34;$VERSION\u0026#34; cp skywalking-python/skywalking-python*.tgz.sha512 release/skywalking/python/\u0026#34;$VERSION\u0026#34; cd release/skywalking \u0026amp;\u0026amp; svn add python/$VERSION \u0026amp;\u0026amp; svn commit python -m \u0026#34;Draft Apache SkyWalking-Python release $VERSION\u0026#34; Make the internal announcement Send an announcement email to dev@ mailing list, please check all links before sending the email, the same below.\nSubject: [ANNOUNCEMENT] Apache SkyWalking Python $VERSION test build available Content: The test build of Apache SkyWalking Python $VERSION is now available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking-python/blob/v$VERSION/CHANGELOG.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/python/$VERSION * sha512 checksums - sha512xxxxyyyzzz skywalking-python-src-x.x.x.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-python/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * http://pgp.mit.edu:11371/pks/lookup?op=get\u0026amp;search=0x8BD99F552D9F33D7 corresponding to kezhenxu94@apache.org Guide to build the release from source : * https://github.com/apache/skywalking-python/blob/master/CONTRIBUTING.md#compiling-and-building A vote regarding the quality of this test build will be initiated within the next couple of days. Wait at least 48 hours for test responses Any PMC, committer or contributor can test features for releasing, and feedback. Based on that, PMC will decide whether to start a vote or not.\nCall for vote in dev@ mailing list Call for vote in dev@skywalking.apache.org.\nSubject: [VOTE] Release Apache SkyWalking Python version $VERSION Content: Hi the SkyWalking Community: This is a call for vote to release Apache SkyWalking Python version $VERSION. Release notes: * https://github.com/apache/skywalking-python/blob/v$VERSION/CHANGELOG.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/python/$VERSION * sha512 checksums - sha512xxxxyyyzzz skywalking-python-src-x.x.x.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-python/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-python/blob/master/CONTRIBUTING.md#compiling-and-building Voting will start now and will remain open for at least 72 hours, all PMC members are required to give their votes. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Thanks. [1] https://github.com/apache/skywalking/blob/master/docs/en/guides/How-to-release.md#vote-check Vote Check All PMC members and committers should check these before voting +1:\n Features test. All artifacts in staging repository are published with .asc, .md5, and sha files. Source codes and distribution packages (skywalking-python-src-$VERSION.tgz) are in https://dist.apache.org/repos/dist/dev/skywalking/python/$VERSION with .asc, .sha512. LICENSE and NOTICE are in source codes and distribution package. Check shasum -c skywalking-python-src-$VERSION.tgz.sha512. Check gpg --verify skywalking-python-src-$VERSION.tgz.asc skywalking-python-src-$VERSION.tgz. Build distribution from source code package by following this the build guide. Licenses check, make license.  Vote result should follow these:\n  PMC vote is +1 binding, all others is +1 no binding.\n  Within 72 hours, you get at least 3 (+1 binding), and have more +1 than -1. Vote pass.\n  Send the closing vote mail to announce the result. When count the binding and no binding votes, please list the names of voters. An example like this:\n[RESULT][VOTE] Release Apache SkyWalking Python version $VERSION 72+ hours passed, we’ve got ($NUMBER) +1 bindings (and ... +1 non-bindings): (list names) +1 bindings: xxx ... +1 non-bindings: xxx ... Thank you for voting, I’ll continue the release process.   Publish release   Move source codes tar balls and distributions to https://dist.apache.org/repos/dist/release/skywalking/, you can do this only if you are a PMC member.\nsvn mv https://dist.apache.org/repos/dist/dev/skywalking/python/\u0026#34;$VERSION\u0026#34; https://dist.apache.org/repos/dist/release/skywalking/python/\u0026#34;$VERSION\u0026#34;   Refer to the previous PR, update news and links on the website. There are several files need to modify.\n  Update Github release page, follow the previous convention.\n  Send ANNOUNCE email to dev@skywalking.apache.org and announce@apache.org, the sender should use his/her Apache email account.\nSubject: [ANNOUNCEMENT] Apache SkyWalking Python $VERSION Released Content: Hi the SkyWalking Community On behalf of the SkyWalking Team, I’m glad to announce that SkyWalking Python $VERSION is now released. SkyWalking Python: The Python Agent for Apache SkyWalking provides the native tracing/metrics/logging/profiling abilities for Python projects. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. Download Links: http://skywalking.apache.org/downloads/ Release Notes : https://github.com/apache/skywalking-python/blob/v$VERSION/CHANGELOG.md Website: http://skywalking.apache.org/ SkyWalking Python Resources: - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalking.apache.org - Documents: https://github.com/apache/skywalking-python/blob/v$VERSION/README.md The Apache SkyWalking Team   ","title":"Apache SkyWalking Python Release Guide","url":"/docs/skywalking-python/latest/en/contribution/how-to-release/"},{"content":"Apache SkyWalking Python Release Guide This documentation guides the release manager to release the SkyWalking Python in the Apache Way, and also helps people to check the release for vote.\nPrerequisites  Close (if finished, or move to next milestone otherwise) all issues in the current milestone from skywalking-python and skywalking, create a new milestone if needed. Update CHANGELOG.md and version in pyproject.toml.  Add your GPG public key to Apache SVN   Log in id.apache.org and submit your key fingerprint.\n  Add your GPG public key into SkyWalking GPG KEYS file, you can do this only if you are a PMC member. You can ask a PMC member for help. DO NOT override the existed KEYS file content, only append your key at the end of the file.\n  Build and sign the source code package export VERSION=\u0026lt;the version to release\u0026gt; git clone --recurse-submodules git@github.com:apache/skywalking-python \u0026amp;\u0026amp; cd skywalking-python git tag -a \u0026#34;v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking-Python $VERSION\u0026#34; git push --tags make clean \u0026amp;\u0026amp; make release Upload to Apache SVN svn co https://dist.apache.org/repos/dist/dev/skywalking/python release/skywalking/python mkdir -p release/skywalking/python/\u0026#34;$VERSION\u0026#34; cp skywalking*.tgz release/skywalking/python/\u0026#34;$VERSION\u0026#34; cp skywalking*.tgz.asc release/skywalking/python/\u0026#34;$VERSION\u0026#34; cp skywalking-python*.tgz.sha512 release/skywalking/python/\u0026#34;$VERSION\u0026#34; cd release/skywalking \u0026amp;\u0026amp; svn add python/$VERSION \u0026amp;\u0026amp; svn commit python -m \u0026#34;Draft Apache SkyWalking-Python release $VERSION\u0026#34; Make the internal announcement First, generate a sha512sum for the source code package generated in last step:\nsha512sum release/skywalking/python/\u0026#34;$VERSION\u0026#34;/skywalking-python-src-\u0026#34;$VERSION\u0026#34;.tgz Send an announcement email to dev@ mailing list, please check all links before sending the email, the same as below.\nSubject: [ANNOUNCEMENT] Apache SkyWalking Python $VERSION test build available Content: The test build of Apache SkyWalking Python $VERSION is now available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking-python/blob/v$VERSION/CHANGELOG.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/python/$VERSION * sha512 checksums - sha512xxxxyyyzzz skywalking-python-src-x.x.x.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-python/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * http://pgp.mit.edu:11371/pks/lookup?op=get\u0026amp;search=0x8BD99F552D9F33D7 corresponding to kezhenxu94@apache.org Guide to build the release from source : * https://github.com/apache/skywalking-python/blob/master/CONTRIBUTING.md#compiling-and-building A vote regarding the quality of this test build will be initiated within the next couple of days. Wait at least 48 hours for test responses Any PMC, committer or contributor can test features for releasing, and feedback. Based on that, PMC will decide whether to start a vote or not.\nCall for vote in dev@ mailing list Call for vote in dev@skywalking.apache.org.\nSubject: [VOTE] Release Apache SkyWalking Python version $VERSION Content: Hi the SkyWalking Community: This is a call for vote to release Apache SkyWalking Python version $VERSION. Release notes: * https://github.com/apache/skywalking-python/blob/v$VERSION/CHANGELOG.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/python/$VERSION * sha512 checksums - sha512xxxxyyyzzz skywalking-python-src-x.x.x.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-python/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-python/blob/master/CONTRIBUTING.md#compiling-and-building Voting will start now and will remain open for at least 72 hours, all PMC members are required to give their votes. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Thanks. [1] https://github.com/apache/skywalking/blob/master/docs/en/guides/How-to-release.md#vote-check Vote Check All PMC members and committers should check these before voting +1:\n Features test. All artifacts in staging repository are published with .asc, .md5, and sha files. Source codes and distribution packages (skywalking-python-src-$VERSION.tgz) are in https://dist.apache.org/repos/dist/dev/skywalking/python/$VERSION with .asc, .sha512. LICENSE and NOTICE are in source codes and distribution package. Check shasum -c skywalking-python-src-$VERSION.tgz.sha512. Check gpg --verify skywalking-python-src-$VERSION.tgz.asc skywalking-python-src-$VERSION.tgz. Build distribution from source code package by following this the build guide. Licenses check, make license.  Vote result should follow these:\n  PMC vote is +1 binding, all others is +1 no binding.\n  Within 72 hours, you get at least 3 (+1 binding), and have more +1 than -1. Vote pass.\n  Send the closing vote mail to announce the result. When count the binding and no binding votes, please list the names of voters. An example like this:\n[RESULT][VOTE] Release Apache SkyWalking Python version $VERSION 72+ hours passed, we’ve got ($NUMBER) +1 bindings (and ... +1 non-bindings): (list names) +1 bindings: xxx ... +1 non-bindings: xxx ... Thank you for voting, I’ll continue the release process.   Publish release   Move source codes tar balls and distributions to https://dist.apache.org/repos/dist/release/skywalking/, you can do this only if you are a PMC member.\nsvn mv https://dist.apache.org/repos/dist/dev/skywalking/python/\u0026#34;$VERSION\u0026#34; https://dist.apache.org/repos/dist/release/skywalking/python/\u0026#34;$VERSION\u0026#34;   Refer to the previous PR, update news and links on the website. There are several files need to modify.\n  Publish PyPI package After the official ASF release, we publish the packaged wheel to the PyPI index.\n Make sure the final upload is correct by using the test PyPI index make upload-test. Upload the final artifacts by running make upload.  Publish Docker images After the release on GitHub, a GitHub Action will be triggered to build Docker images based on the latest code.\nImportant We announce the new release by drafting one on Github release page, following the previous convention.\nAn automation via GitHub Actions will automatically trigger upon the mentioned release event to build and upload Docker images to DockerHub.\nSee How-to-release-docker for a detailed description of manual release.\n Send ANNOUNCEMENT email to dev@skywalking.apache.org and announce@apache.org, the sender should use his/her Apache email account.\nSubject: [ANNOUNCEMENT] Apache SkyWalking Python $VERSION Released Content: Hi the SkyWalking Community On behalf of the SkyWalking Team, I’m glad to announce that SkyWalking Python $VERSION is now released. SkyWalking Python: The Python Agent for Apache SkyWalking provides the native tracing/metrics/logging/profiling abilities for Python projects. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. Download Links: http://skywalking.apache.org/downloads/ Release Notes : https://github.com/apache/skywalking-python/blob/v$VERSION/CHANGELOG.md Website: http://skywalking.apache.org/ SkyWalking Python Resources: - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalking.apache.org - Documents: https://github.com/apache/skywalking-python/blob/v$VERSION/README.md The Apache SkyWalking Team   ","title":"Apache SkyWalking Python Release Guide","url":"/docs/skywalking-python/next/en/contribution/how-to-release/"},{"content":"Apache SkyWalking Python Release Guide This documentation guides the release manager to release the SkyWalking Python in the Apache Way, and also helps people to check the release for vote.\nPrerequisites  Close (if finished, or move to next milestone otherwise) all issues in the current milestone from skywalking-python and skywalking, create a new milestone if needed. Update CHANGELOG.md and version in pyproject.toml.  Add your GPG public key to Apache SVN   Log in id.apache.org and submit your key fingerprint.\n  Add your GPG public key into SkyWalking GPG KEYS file, you can do this only if you are a PMC member. You can ask a PMC member for help. DO NOT override the existed KEYS file content, only append your key at the end of the file.\n  Build and sign the source code package export VERSION=\u0026lt;the version to release\u0026gt; git clone --recurse-submodules git@github.com:apache/skywalking-python \u0026amp;\u0026amp; cd skywalking-python git tag -a \u0026#34;v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking-Python $VERSION\u0026#34; git push --tags make clean \u0026amp;\u0026amp; make release Upload to Apache SVN svn co https://dist.apache.org/repos/dist/dev/skywalking/python release/skywalking/python mkdir -p release/skywalking/python/\u0026#34;$VERSION\u0026#34; cp skywalking-python/skywalking*.tgz release/skywalking/python/\u0026#34;$VERSION\u0026#34; cp skywalking-python/skywalking*.tgz.asc release/skywalking/python/\u0026#34;$VERSION\u0026#34; cp skywalking-python/skywalking-python*.tgz.sha512 release/skywalking/python/\u0026#34;$VERSION\u0026#34; cd release/skywalking \u0026amp;\u0026amp; svn add python/$VERSION \u0026amp;\u0026amp; svn commit python -m \u0026#34;Draft Apache SkyWalking-Python release $VERSION\u0026#34; Make the internal announcement Send an announcement email to dev@ mailing list, please check all links before sending the email, the same below.\nSubject: [ANNOUNCEMENT] Apache SkyWalking Python $VERSION test build available Content: The test build of Apache SkyWalking Python $VERSION is now available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking-python/blob/v$VERSION/CHANGELOG.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/python/$VERSION * sha512 checksums - sha512xxxxyyyzzz skywalking-python-src-x.x.x.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-python/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * http://pgp.mit.edu:11371/pks/lookup?op=get\u0026amp;search=0x8BD99F552D9F33D7 corresponding to kezhenxu94@apache.org Guide to build the release from source : * https://github.com/apache/skywalking-python/blob/master/CONTRIBUTING.md#compiling-and-building A vote regarding the quality of this test build will be initiated within the next couple of days. Wait at least 48 hours for test responses Any PMC, committer or contributor can test features for releasing, and feedback. Based on that, PMC will decide whether to start a vote or not.\nCall for vote in dev@ mailing list Call for vote in dev@skywalking.apache.org.\nSubject: [VOTE] Release Apache SkyWalking Python version $VERSION Content: Hi the SkyWalking Community: This is a call for vote to release Apache SkyWalking Python version $VERSION. Release notes: * https://github.com/apache/skywalking-python/blob/v$VERSION/CHANGELOG.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/python/$VERSION * sha512 checksums - sha512xxxxyyyzzz skywalking-python-src-x.x.x.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-python/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-python/blob/master/CONTRIBUTING.md#compiling-and-building Voting will start now and will remain open for at least 72 hours, all PMC members are required to give their votes. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Thanks. [1] https://github.com/apache/skywalking/blob/master/docs/en/guides/How-to-release.md#vote-check Vote Check All PMC members and committers should check these before voting +1:\n Features test. All artifacts in staging repository are published with .asc, .md5, and sha files. Source codes and distribution packages (skywalking-python-src-$VERSION.tgz) are in https://dist.apache.org/repos/dist/dev/skywalking/python/$VERSION with .asc, .sha512. LICENSE and NOTICE are in source codes and distribution package. Check shasum -c skywalking-python-src-$VERSION.tgz.sha512. Check gpg --verify skywalking-python-src-$VERSION.tgz.asc skywalking-python-src-$VERSION.tgz. Build distribution from source code package by following this the build guide. Licenses check, make license.  Vote result should follow these:\n  PMC vote is +1 binding, all others is +1 no binding.\n  Within 72 hours, you get at least 3 (+1 binding), and have more +1 than -1. Vote pass.\n  Send the closing vote mail to announce the result. When count the binding and no binding votes, please list the names of voters. An example like this:\n[RESULT][VOTE] Release Apache SkyWalking Python version $VERSION 72+ hours passed, we’ve got ($NUMBER) +1 bindings (and ... +1 non-bindings): (list names) +1 bindings: xxx ... +1 non-bindings: xxx ... Thank you for voting, I’ll continue the release process.   Publish release   Move source codes tar balls and distributions to https://dist.apache.org/repos/dist/release/skywalking/, you can do this only if you are a PMC member.\nsvn mv https://dist.apache.org/repos/dist/dev/skywalking/python/\u0026#34;$VERSION\u0026#34; https://dist.apache.org/repos/dist/release/skywalking/python/\u0026#34;$VERSION\u0026#34;   Refer to the previous PR, update news and links on the website. There are several files need to modify.\n  Update Github release page, follow the previous convention.\n  Send ANNOUNCE email to dev@skywalking.apache.org and announce@apache.org, the sender should use his/her Apache email account.\nSubject: [ANNOUNCEMENT] Apache SkyWalking Python $VERSION Released Content: Hi the SkyWalking Community On behalf of the SkyWalking Team, I’m glad to announce that SkyWalking Python $VERSION is now released. SkyWalking Python: The Python Agent for Apache SkyWalking provides the native tracing/metrics/logging/profiling abilities for Python projects. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. Download Links: http://skywalking.apache.org/downloads/ Release Notes : https://github.com/apache/skywalking-python/blob/v$VERSION/CHANGELOG.md Website: http://skywalking.apache.org/ SkyWalking Python Resources: - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalking.apache.org - Documents: https://github.com/apache/skywalking-python/blob/v$VERSION/README.md The Apache SkyWalking Team   ","title":"Apache SkyWalking Python Release Guide","url":"/docs/skywalking-python/v1.0.1/en/contribution/how-to-release/"},{"content":"Apache SkyWalking Rover Release Guide This documentation guides the release manager to release the SkyWalking Rover in the Apache Way, and also helps people to check the release for vote.\nPrerequisites  Close(if finished, or move to next milestone otherwise) all issues in the current milestone from skywalking-rover and skywalking, create a new milestone if needed. Update CHANGES.md. Check the dependency licenses including all dependencies.  Add your GPG public key to Apache svn   Upload your GPG public key to a public GPG site, such as MIT\u0026rsquo;s site.\n  Log in id.apache.org and submit your key fingerprint.\n  Add your GPG public key into SkyWalking GPG KEYS file, you can do this only if you are a PMC member. You can ask a PMC member for help. DO NOT override the existed KEYS file content, only append your key at the end of the file.\n  Build and sign the source code package export VERSION=\u0026lt;the version to release\u0026gt; git clone git@github.com:apache/skywalking-rover \u0026amp;\u0026amp; cd skywalking-rover git tag -a \u0026#34;v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking-Rover v$VERSION\u0026#34; git push --tags make release In total, six files should be automatically generated in the directory: apache-skywalking-rover-${VERSION}-bin.tgz, apache-skywalking-rover-${VERSION}-src.tgz, and their corresponding asc, sha512 files.\nUpload to Apache svn svn co https://dist.apache.org/repos/dist/dev/skywalking/ mkdir -p skywalking/rover/\u0026#34;$VERSION\u0026#34; cp skywalking-rover/apache-skywalking*.tgz skywalking/rover/\u0026#34;$VERSION\u0026#34; cp skywalking-rover/apache-skywalking*.tgz.asc skywalking/rover/\u0026#34;$VERSION\u0026#34; cp skywalking-rover/apache-skywalking-rover*.tgz.sha512 skywalking/rover/\u0026#34;$VERSION\u0026#34; cd skywalking/rover \u0026amp;\u0026amp; svn add \u0026#34;$VERSION\u0026#34; \u0026amp;\u0026amp; svn commit -m \u0026#34;Draft Apache SkyWalking-Rover release $VERSION\u0026#34; Call for vote in dev@ mailing list Call for vote in dev@skywalking.apache.org, please check all links before sending the email.\nSubject: [VOTE] Release Apache SkyWalking Rover version $VERSION Content: Hi the SkyWalking Community: This is a call for vote to release Apache SkyWalking Rover version $VERSION. Release notes: * https://github.com/apache/skywalking-rover/blob/v$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/rover/$VERSION * sha512 checksums - sha512xxxxyyyzzz skywalking-rover-x.x.x-src.tgz - sha512xxxxyyyzzz skywalking-rover-x.x.x-bin.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-rover/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-rover/blob/v$VERSION/docs/en/guides/contribution/how-to-release.md Voting will start now and will remain open for at least 72 hours, all PMC members are required to give their votes. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Thanks. [1] https://github.com/apache/skywalking/blob/master/docs/en/guides/How-to-release.md#vote-check Vote Check All PMC members and committers should check these before voting +1:\n Features test. All artifacts in staging repository are published with .asc, .md5, and sha files. Source codes and distribution packages (skywalking-rover-$VERSION-{src,bin}.tgz) are in https://dist.apache.org/repos/dist/dev/skywalking/rover/$VERSION with .asc, .sha512. LICENSE and NOTICE are in source codes and distribution package. Check shasum -c skywalking-rover-$VERSION-{src,bin}.tgz.sha512. Check gpg --verify skywalking-rover-$VERSION-{src,bin}.tgz.asc skywalking-rover-$VERSION-{src,bin}.tgz. Build distribution from source code package by following this command, make container-generate build.  Vote result should follow these:\n  PMC vote is +1 binding, all others is +1 no binding.\n  Within 72 hours, you get at least 3 (+1 binding), and have more +1 than -1. Vote pass.\n  Send the closing vote mail to announce the result. When count the binding and no binding votes, please list the names of voters. An example like this:\n[RESULT][VOTE] Release Apache SkyWalking Rover version $VERSION 3 days passed, we’ve got ($NUMBER) +1 bindings (and ... +1 non-bindings): (list names) +1 bindings: xxx ... +1 non-bindings: xxx ... Thank you for voting, I’ll continue the release process.   Publish release   Move source codes tar balls and distributions to https://dist.apache.org/repos/dist/release/skywalking/, you can do this only if you are a PMC member.\nexport SVN_EDITOR=vim svn mv https://dist.apache.org/repos/dist/dev/skywalking/rover/$VERSION https://dist.apache.org/repos/dist/release/skywalking/rover   Refer to the previous PR, update the event and download links on the website.\n  Update Github release page, follow the previous convention.\n  Push docker image to the Docker Hub, make sure you have the write permission for push image.\nmake docker \u0026amp;\u0026amp; make docker.push   Send ANNOUNCE email to dev@skywalking.apache.org and announce@apache.org, the sender should use his/her Apache email account, please check all links before sending the email.\nSubject: [ANNOUNCEMENT] Apache SkyWalking Rover $VERSION Released Content: Hi the SkyWalking Community On behalf of the SkyWalking Team, I’m glad to announce that SkyWalking Rover $VERSION is now released. SkyWalking Rover: A lightweight collector/sidecar could be deployed closing to the target monitored system, to collect metrics, traces, and logs. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. Download Links: http://skywalking.apache.org/downloads/ Release Notes : https://github.com/apache/skywalking-rover/blob/v$VERSION/CHANGES.md Website: http://skywalking.apache.org/ SkyWalking Rover Resources: - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalking.apache.org - Documents: https://github.com/apache/skywalking-rover/blob/v$VERSION/README.md The Apache SkyWalking Team   Remove Unnecessary Releases Please remember to remove all unnecessary releases in the mirror svn (https://dist.apache.org/repos/dist/release/skywalking/), if you don\u0026rsquo;t recommend users to choose those version. For example, you have removed the download and documentation links from the website. If they want old ones, the Archive repository has all of them.\n","title":"Apache SkyWalking Rover Release Guide","url":"/docs/skywalking-rover/latest/en/guides/contribution/how-to-release/"},{"content":"Apache SkyWalking Rover Release Guide This documentation guides the release manager to release the SkyWalking Rover in the Apache Way, and also helps people to check the release for vote.\nPrerequisites  Close(if finished, or move to next milestone otherwise) all issues in the current milestone from skywalking-rover and skywalking, create a new milestone if needed. Update CHANGES.md. Check the dependency licenses including all dependencies.  Add your GPG public key to Apache svn   Upload your GPG public key to a public GPG site, such as MIT\u0026rsquo;s site.\n  Log in id.apache.org and submit your key fingerprint.\n  Add your GPG public key into SkyWalking GPG KEYS file, you can do this only if you are a PMC member. You can ask a PMC member for help. DO NOT override the existed KEYS file content, only append your key at the end of the file.\n  Build and sign the source code package export VERSION=\u0026lt;the version to release\u0026gt; git clone git@github.com:apache/skywalking-rover \u0026amp;\u0026amp; cd skywalking-rover git tag -a \u0026#34;v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking-Rover v$VERSION\u0026#34; git push --tags make release In total, six files should be automatically generated in the directory: apache-skywalking-rover-${VERSION}-bin.tgz, apache-skywalking-rover-${VERSION}-src.tgz, and their corresponding asc, sha512 files.\nUpload to Apache svn svn co https://dist.apache.org/repos/dist/dev/skywalking/ mkdir -p skywalking/rover/\u0026#34;$VERSION\u0026#34; cp skywalking-rover/apache-skywalking*.tgz skywalking/rover/\u0026#34;$VERSION\u0026#34; cp skywalking-rover/apache-skywalking*.tgz.asc skywalking/rover/\u0026#34;$VERSION\u0026#34; cp skywalking-rover/apache-skywalking-rover*.tgz.sha512 skywalking/rover/\u0026#34;$VERSION\u0026#34; cd skywalking/rover \u0026amp;\u0026amp; svn add \u0026#34;$VERSION\u0026#34; \u0026amp;\u0026amp; svn commit -m \u0026#34;Draft Apache SkyWalking-Rover release $VERSION\u0026#34; Call for vote in dev@ mailing list Call for vote in dev@skywalking.apache.org, please check all links before sending the email.\nSubject: [VOTE] Release Apache SkyWalking Rover version $VERSION Content: Hi the SkyWalking Community: This is a call for vote to release Apache SkyWalking Rover version $VERSION. Release notes: * https://github.com/apache/skywalking-rover/blob/v$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/rover/$VERSION * sha512 checksums - sha512xxxxyyyzzz skywalking-rover-x.x.x-src.tgz - sha512xxxxyyyzzz skywalking-rover-x.x.x-bin.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-rover/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-rover/blob/v$VERSION/docs/en/guides/contribution/how-to-release.md Voting will start now and will remain open for at least 72 hours, all PMC members are required to give their votes. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Thanks. [1] https://github.com/apache/skywalking/blob/master/docs/en/guides/How-to-release.md#vote-check Vote Check All PMC members and committers should check these before voting +1:\n Features test. All artifacts in staging repository are published with .asc, .md5, and sha files. Source codes and distribution packages (skywalking-rover-$VERSION-{src,bin}.tgz) are in https://dist.apache.org/repos/dist/dev/skywalking/rover/$VERSION with .asc, .sha512. LICENSE and NOTICE are in source codes and distribution package. Check shasum -c skywalking-rover-$VERSION-{src,bin}.tgz.sha512. Check gpg --verify skywalking-rover-$VERSION-{src,bin}.tgz.asc skywalking-rover-$VERSION-{src,bin}.tgz. Build distribution from source code package by following this command, make container-generate build.  Vote result should follow these:\n  PMC vote is +1 binding, all others is +1 no binding.\n  Within 72 hours, you get at least 3 (+1 binding), and have more +1 than -1. Vote pass.\n  Send the closing vote mail to announce the result. When count the binding and no binding votes, please list the names of voters. An example like this:\n[RESULT][VOTE] Release Apache SkyWalking Rover version $VERSION 3 days passed, we’ve got ($NUMBER) +1 bindings (and ... +1 non-bindings): (list names) +1 bindings: xxx ... +1 non-bindings: xxx ... Thank you for voting, I’ll continue the release process.   Publish release   Move source codes tar balls and distributions to https://dist.apache.org/repos/dist/release/skywalking/, you can do this only if you are a PMC member.\nexport SVN_EDITOR=vim svn mv https://dist.apache.org/repos/dist/dev/skywalking/rover/$VERSION https://dist.apache.org/repos/dist/release/skywalking/rover   Refer to the previous PR, update the event and download links on the website.\n  Update Github release page, follow the previous convention.\n  Push docker image to the Docker Hub, make sure you have the write permission for push image.\nmake docker \u0026amp;\u0026amp; make docker.push   Send ANNOUNCE email to dev@skywalking.apache.org and announce@apache.org, the sender should use his/her Apache email account, please check all links before sending the email.\nSubject: [ANNOUNCEMENT] Apache SkyWalking Rover $VERSION Released Content: Hi the SkyWalking Community On behalf of the SkyWalking Team, I’m glad to announce that SkyWalking Rover $VERSION is now released. SkyWalking Rover: A lightweight collector/sidecar could be deployed closing to the target monitored system, to collect metrics, traces, and logs. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. Download Links: http://skywalking.apache.org/downloads/ Release Notes : https://github.com/apache/skywalking-rover/blob/v$VERSION/CHANGES.md Website: http://skywalking.apache.org/ SkyWalking Rover Resources: - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalking.apache.org - Documents: https://github.com/apache/skywalking-rover/blob/v$VERSION/README.md The Apache SkyWalking Team   Remove Unnecessary Releases Please remember to remove all unnecessary releases in the mirror svn (https://dist.apache.org/repos/dist/release/skywalking/), if you don\u0026rsquo;t recommend users to choose those version. For example, you have removed the download and documentation links from the website. If they want old ones, the Archive repository has all of them.\n","title":"Apache SkyWalking Rover Release Guide","url":"/docs/skywalking-rover/next/en/guides/contribution/how-to-release/"},{"content":"Apache SkyWalking Rover Release Guide This documentation guides the release manager to release the SkyWalking Rover in the Apache Way, and also helps people to check the release for vote.\nPrerequisites  Close(if finished, or move to next milestone otherwise) all issues in the current milestone from skywalking-rover and skywalking, create a new milestone if needed. Update CHANGES.md. Check the dependency licenses including all dependencies.  Add your GPG public key to Apache svn   Upload your GPG public key to a public GPG site, such as MIT\u0026rsquo;s site.\n  Log in id.apache.org and submit your key fingerprint.\n  Add your GPG public key into SkyWalking GPG KEYS file, you can do this only if you are a PMC member. You can ask a PMC member for help. DO NOT override the existed KEYS file content, only append your key at the end of the file.\n  Build and sign the source code package export VERSION=\u0026lt;the version to release\u0026gt; git clone git@github.com:apache/skywalking-rover \u0026amp;\u0026amp; cd skywalking-rover git tag -a \u0026#34;v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking-Rover v$VERSION\u0026#34; git push --tags make release In total, six files should be automatically generated in the directory: apache-skywalking-rover-${VERSION}-bin.tgz, apache-skywalking-rover-${VERSION}-src.tgz, and their corresponding asc, sha512 files.\nUpload to Apache svn svn co https://dist.apache.org/repos/dist/dev/skywalking/ mkdir -p skywalking/rover/\u0026#34;$VERSION\u0026#34; cp skywalking-rover/apache-skywalking*.tgz skywalking/rover/\u0026#34;$VERSION\u0026#34; cp skywalking-rover/apache-skywalking*.tgz.asc skywalking/rover/\u0026#34;$VERSION\u0026#34; cp skywalking-rover/apache-skywalking-rover*.tgz.sha512 skywalking/rover/\u0026#34;$VERSION\u0026#34; cd skywalking/rover \u0026amp;\u0026amp; svn add \u0026#34;$VERSION\u0026#34; \u0026amp;\u0026amp; svn commit -m \u0026#34;Draft Apache SkyWalking-Rover release $VERSION\u0026#34; Call for vote in dev@ mailing list Call for vote in dev@skywalking.apache.org, please check all links before sending the email.\nSubject: [VOTE] Release Apache SkyWalking Rover version $VERSION Content: Hi the SkyWalking Community: This is a call for vote to release Apache SkyWalking Rover version $VERSION. Release notes: * https://github.com/apache/skywalking-rover/blob/v$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/rover/$VERSION * sha512 checksums - sha512xxxxyyyzzz skywalking-rover-x.x.x-src.tgz - sha512xxxxyyyzzz skywalking-rover-x.x.x-bin.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-rover/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-rover/blob/v$VERSION/docs/en/guides/contribution/how-to-release.md Voting will start now and will remain open for at least 72 hours, all PMC members are required to give their votes. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Thanks. [1] https://github.com/apache/skywalking/blob/master/docs/en/guides/How-to-release.md#vote-check Vote Check All PMC members and committers should check these before voting +1:\n Features test. All artifacts in staging repository are published with .asc, .md5, and sha files. Source codes and distribution packages (skywalking-rover-$VERSION-{src,bin}.tgz) are in https://dist.apache.org/repos/dist/dev/skywalking/rover/$VERSION with .asc, .sha512. LICENSE and NOTICE are in source codes and distribution package. Check shasum -c skywalking-rover-$VERSION-{src,bin}.tgz.sha512. Check gpg --verify skywalking-rover-$VERSION-{src,bin}.tgz.asc skywalking-rover-$VERSION-{src,bin}.tgz. Build distribution from source code package by following this command, make container-generate build.  Vote result should follow these:\n  PMC vote is +1 binding, all others is +1 no binding.\n  Within 72 hours, you get at least 3 (+1 binding), and have more +1 than -1. Vote pass.\n  Send the closing vote mail to announce the result. When count the binding and no binding votes, please list the names of voters. An example like this:\n[RESULT][VOTE] Release Apache SkyWalking Rover version $VERSION 3 days passed, we’ve got ($NUMBER) +1 bindings (and ... +1 non-bindings): (list names) +1 bindings: xxx ... +1 non-bindings: xxx ... Thank you for voting, I’ll continue the release process.   Publish release   Move source codes tar balls and distributions to https://dist.apache.org/repos/dist/release/skywalking/, you can do this only if you are a PMC member.\nexport SVN_EDITOR=vim svn mv https://dist.apache.org/repos/dist/dev/skywalking/rover/$VERSION https://dist.apache.org/repos/dist/release/skywalking/rover   Refer to the previous PR, update the event and download links on the website.\n  Update Github release page, follow the previous convention.\n  Push docker image to the Docker Hub, make sure you have the write permission for push image.\nmake docker \u0026amp;\u0026amp; make docker.push   Send ANNOUNCE email to dev@skywalking.apache.org and announce@apache.org, the sender should use his/her Apache email account, please check all links before sending the email.\nSubject: [ANNOUNCEMENT] Apache SkyWalking Rover $VERSION Released Content: Hi the SkyWalking Community On behalf of the SkyWalking Team, I’m glad to announce that SkyWalking Rover $VERSION is now released. SkyWalking Rover: A lightweight collector/sidecar could be deployed closing to the target monitored system, to collect metrics, traces, and logs. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. Download Links: http://skywalking.apache.org/downloads/ Release Notes : https://github.com/apache/skywalking-rover/blob/v$VERSION/CHANGES.md Website: http://skywalking.apache.org/ SkyWalking Rover Resources: - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalking.apache.org - Documents: https://github.com/apache/skywalking-rover/blob/v$VERSION/README.md The Apache SkyWalking Team   Remove Unnecessary Releases Please remember to remove all unnecessary releases in the mirror svn (https://dist.apache.org/repos/dist/release/skywalking/), if you don\u0026rsquo;t recommend users to choose those version. For example, you have removed the download and documentation links from the website. If they want old ones, the Archive repository has all of them.\n","title":"Apache SkyWalking Rover Release Guide","url":"/docs/skywalking-rover/v0.6.0/en/guides/contribution/how-to-release/"},{"content":"Apache SkyWalking Satellite Release Guide This documentation guides the release manager to release the SkyWalking Satellite in the Apache Way, and also helps people to check the release for vote.\nPrerequisites  Close(if finished, or move to next milestone otherwise) all issues in the current milestone from skywalking-satellite and skywalking, create a new milestone if needed. Update CHANGES.md.  Add your GPG public key to Apache svn   Upload your GPG public key to a public GPG site, such as MIT\u0026rsquo;s site.\n  Log in id.apache.org and submit your key fingerprint.\n  Add your GPG public key into SkyWalking GPG KEYS file, you can do this only if you are a PMC member. You can ask a PMC member for help. DO NOT override the existed KEYS file content, only append your key at the end of the file.\n  Build and sign the source code package export VERSION=\u0026lt;the version to release\u0026gt; git clone git@github.com:apache/skywalking-satellite \u0026amp;\u0026amp; cd skywalking-satellite git tag -a \u0026#34;v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking-Satellite v$VERSION\u0026#34; git push --tags make release In total, six files should be automatically generated in the directory: apache-skywalking-satellite-${VERSION}-bin.tgz, apache-skywalking-satellite-${VERSION}-src.tgz, and their corresponding asc, sha512 files.\nUpload to Apache svn svn co https://dist.apache.org/repos/dist/dev/skywalking/ mkdir -p skywalking/satellite/\u0026#34;$VERSION\u0026#34; cp skywalking-satellite/apache-skywalking*.tgz skywalking/satellite/\u0026#34;$VERSION\u0026#34; cp skywalking-satellite/apache-skywalking*.tgz.asc skywalking/satellite/\u0026#34;$VERSION\u0026#34; cp skywalking-satellite/apache-skywalking-satellite*.tgz.sha512 skywalking/satellite/\u0026#34;$VERSION\u0026#34; cd skywalking/satellite \u0026amp;\u0026amp; svn add \u0026#34;$VERSION\u0026#34; \u0026amp;\u0026amp; svn commit -m \u0026#34;Draft Apache SkyWalking-Satellite release $VERSION\u0026#34; Make the internal announcement Send an announcement email to dev@ mailing list, please check all links before sending the email.\nSubject: [ANNOUNCEMENT] SkyWalking Satellite $VERSION test build available Content: The test build of SkyWalking Satellite $VERSION is now available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking-satellite/blob/$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/satellite/$VERSION * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-satellite-bin-x.x.x.tgz - sha512xxxxyyyzzz apache-skywalking-satellite-src-x.x.x.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-satellite/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * http://pgp.mit.edu:11371/pks/lookup?op=get\u0026amp;search=0x8BD99F552D9F33D7 corresponding to kezhenxu94@apache.org Guide to build the release from source : * https://github.com/apache/skywalking-satellite/blob/v$VERSION/docs/en/guides/contribution/How-to-release.md A vote regarding the quality of this test build will be initiated within the next couple of days. Wait at least 48 hours for test responses Any PMC, committer or contributor can test features for releasing, and feedback. Based on that, PMC will decide whether to start a vote or not.\nCall for vote in dev@ mailing list Call for vote in dev@skywalking.apache.org, please check all links before sending the email.\nSubject: [VOTE] Release Apache SkyWalking Satellite version $VERSION Content: Hi the SkyWalking Community: This is a call for vote to release Apache SkyWalking Satellite version $VERSION. Release notes: * https://github.com/apache/skywalking-satellite/blob/$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/satellite/$VERSION * sha512 checksums - sha512xxxxyyyzzz skywalking-satellite-x.x.x-src.tgz - sha512xxxxyyyzzz skywalking-satellite-x.x.x-bin.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-satellite/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-satellite/blob/$VERSION/docs/en/guides/contribuation/How-to-release.md Voting will start now and will remain open for at least 72 hours, all PMC members are required to give their votes. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Thanks. [1] https://github.com/apache/skywalking/blob/master/docs/en/guides/How-to-release.md#vote-check Vote Check All PMC members and committers should check these before voting +1:\n Features test. All artifacts in staging repository are published with .asc, .md5, and sha files. Source codes and distribution packages (skywalking-satellite-$VERSION-{src,bin}.tgz) are in https://dist.apache.org/repos/dist/dev/skywalking/satellite/$VERSION with .asc, .sha512. LICENSE and NOTICE are in source codes and distribution package. Check shasum -c skywalking-satellite-$VERSION-{src,bin}.tgz.sha512. Check gpg --verify skywalking-satellite-$VERSION-{src,bin}.tgz.asc skywalking-satellite-$VERSION-{src,bin}.tgz. Build distribution from source code package by following this command, make build. Licenses check, make license.  Vote result should follow these:\n  PMC vote is +1 binding, all others is +1 no binding.\n  Within 72 hours, you get at least 3 (+1 binding), and have more +1 than -1. Vote pass.\n  Send the closing vote mail to announce the result. When count the binding and no binding votes, please list the names of voters. An example like this:\n[RESULT][VOTE] Release Apache SkyWalking Satellite version $VERSION 3 days passed, we’ve got ($NUMBER) +1 bindings (and ... +1 non-bindings): (list names) +1 bindings: xxx ... +1 non-bindings: xxx ... Thank you for voting, I’ll continue the release process.   Publish release   Move source codes tar balls and distributions to https://dist.apache.org/repos/dist/release/skywalking/, you can do this only if you are a PMC member.\nexport SVN_EDITOR=vim svn mv https://dist.apache.org/repos/dist/dev/skywalking/satellite/$VERSION https://dist.apache.org/repos/dist/release/skywalking/satellite   Refer to the previous PR, update the event and download links on the website.\n  Update Github release page, follow the previous convention.\n  Push docker image to the Docker Hub, make sure you have the write permission for push image.\nmake docker \u0026amp;\u0026amp; make docker.push   Send ANNOUNCE email to dev@skywalking.apache.org and announce@apache.org, the sender should use his/her Apache email account, please check all links before sending the email.\nSubject: [ANNOUNCEMENT] Apache SkyWalking Satellite $VERSION Released Content: Hi the SkyWalking Community On behalf of the SkyWalking Team, I’m glad to announce that SkyWalking Satellite $VERSION is now released. SkyWalking Satellite: A lightweight collector/sidecar could be deployed closing to the target monitored system, to collect metrics, traces, and logs. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. Download Links: http://skywalking.apache.org/downloads/ Release Notes : https://github.com/apache/skywalking-satellite/blob/$VERSION/CHANGES.md Website: http://skywalking.apache.org/ SkyWalking Satellite Resources: - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalking.apache.org - Documents: https://github.com/apache/skywalking-satellite/blob/$VERSION/README.md The Apache SkyWalking Team   Remove Unnecessary Releases Please remember to remove all unnecessary releases in the mirror svn (https://dist.apache.org/repos/dist/release/skywalking/), if you don\u0026rsquo;t recommend users to choose those version. For example, you have removed the download and documentation links from the website. If they want old ones, the Archive repository has all of them.\n","title":"Apache SkyWalking Satellite Release Guide","url":"/docs/skywalking-satellite/latest/en/guides/contribution/how-to-release/"},{"content":"Apache SkyWalking Satellite Release Guide This documentation guides the release manager to release the SkyWalking Satellite in the Apache Way, and also helps people to check the release for vote.\nPrerequisites  Close(if finished, or move to next milestone otherwise) all issues in the current milestone from skywalking-satellite and skywalking, create a new milestone if needed. Update CHANGES.md.  Add your GPG public key to Apache svn   Upload your GPG public key to a public GPG site, such as MIT\u0026rsquo;s site.\n  Log in id.apache.org and submit your key fingerprint.\n  Add your GPG public key into SkyWalking GPG KEYS file, you can do this only if you are a PMC member. You can ask a PMC member for help. DO NOT override the existed KEYS file content, only append your key at the end of the file.\n  Build and sign the source code package export VERSION=\u0026lt;the version to release\u0026gt; git clone git@github.com:apache/skywalking-satellite \u0026amp;\u0026amp; cd skywalking-satellite git tag -a \u0026#34;v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking-Satellite v$VERSION\u0026#34; git push --tags make release In total, six files should be automatically generated in the directory: apache-skywalking-satellite-${VERSION}-bin.tgz, apache-skywalking-satellite-${VERSION}-src.tgz, and their corresponding asc, sha512 files.\nUpload to Apache svn svn co https://dist.apache.org/repos/dist/dev/skywalking/ mkdir -p skywalking/satellite/\u0026#34;$VERSION\u0026#34; cp skywalking-satellite/apache-skywalking*.tgz skywalking/satellite/\u0026#34;$VERSION\u0026#34; cp skywalking-satellite/apache-skywalking*.tgz.asc skywalking/satellite/\u0026#34;$VERSION\u0026#34; cp skywalking-satellite/apache-skywalking-satellite*.tgz.sha512 skywalking/satellite/\u0026#34;$VERSION\u0026#34; cd skywalking/satellite \u0026amp;\u0026amp; svn add \u0026#34;$VERSION\u0026#34; \u0026amp;\u0026amp; svn commit -m \u0026#34;Draft Apache SkyWalking-Satellite release $VERSION\u0026#34; Make the internal announcement Send an announcement email to dev@ mailing list, please check all links before sending the email.\nSubject: [ANNOUNCEMENT] SkyWalking Satellite $VERSION test build available Content: The test build of SkyWalking Satellite $VERSION is now available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking-satellite/blob/$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/satellite/$VERSION * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-satellite-bin-x.x.x.tgz - sha512xxxxyyyzzz apache-skywalking-satellite-src-x.x.x.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-satellite/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * http://pgp.mit.edu:11371/pks/lookup?op=get\u0026amp;search=0x8BD99F552D9F33D7 corresponding to kezhenxu94@apache.org Guide to build the release from source : * https://github.com/apache/skywalking-satellite/blob/v$VERSION/docs/en/guides/contribution/How-to-release.md A vote regarding the quality of this test build will be initiated within the next couple of days. Wait at least 48 hours for test responses Any PMC, committer or contributor can test features for releasing, and feedback. Based on that, PMC will decide whether to start a vote or not.\nCall for vote in dev@ mailing list Call for vote in dev@skywalking.apache.org, please check all links before sending the email.\nSubject: [VOTE] Release Apache SkyWalking Satellite version $VERSION Content: Hi the SkyWalking Community: This is a call for vote to release Apache SkyWalking Satellite version $VERSION. Release notes: * https://github.com/apache/skywalking-satellite/blob/$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/satellite/$VERSION * sha512 checksums - sha512xxxxyyyzzz skywalking-satellite-x.x.x-src.tgz - sha512xxxxyyyzzz skywalking-satellite-x.x.x-bin.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-satellite/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-satellite/blob/$VERSION/docs/en/guides/contribuation/How-to-release.md Voting will start now and will remain open for at least 72 hours, all PMC members are required to give their votes. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Thanks. [1] https://github.com/apache/skywalking/blob/master/docs/en/guides/How-to-release.md#vote-check Vote Check All PMC members and committers should check these before voting +1:\n Features test. All artifacts in staging repository are published with .asc, .md5, and sha files. Source codes and distribution packages (skywalking-satellite-$VERSION-{src,bin}.tgz) are in https://dist.apache.org/repos/dist/dev/skywalking/satellite/$VERSION with .asc, .sha512. LICENSE and NOTICE are in source codes and distribution package. Check shasum -c skywalking-satellite-$VERSION-{src,bin}.tgz.sha512. Check gpg --verify skywalking-satellite-$VERSION-{src,bin}.tgz.asc skywalking-satellite-$VERSION-{src,bin}.tgz. Build distribution from source code package by following this command, make build. Licenses check, make license.  Vote result should follow these:\n  PMC vote is +1 binding, all others is +1 no binding.\n  Within 72 hours, you get at least 3 (+1 binding), and have more +1 than -1. Vote pass.\n  Send the closing vote mail to announce the result. When count the binding and no binding votes, please list the names of voters. An example like this:\n[RESULT][VOTE] Release Apache SkyWalking Satellite version $VERSION 3 days passed, we’ve got ($NUMBER) +1 bindings (and ... +1 non-bindings): (list names) +1 bindings: xxx ... +1 non-bindings: xxx ... Thank you for voting, I’ll continue the release process.   Publish release   Move source codes tar balls and distributions to https://dist.apache.org/repos/dist/release/skywalking/, you can do this only if you are a PMC member.\nexport SVN_EDITOR=vim svn mv https://dist.apache.org/repos/dist/dev/skywalking/satellite/$VERSION https://dist.apache.org/repos/dist/release/skywalking/satellite   Refer to the previous PR, update the event and download links on the website.\n  Update Github release page, follow the previous convention.\n  Push docker image to the Docker Hub, make sure you have the write permission for push image.\nmake docker \u0026amp;\u0026amp; make docker.push   Send ANNOUNCE email to dev@skywalking.apache.org and announce@apache.org, the sender should use his/her Apache email account, please check all links before sending the email.\nSubject: [ANNOUNCEMENT] Apache SkyWalking Satellite $VERSION Released Content: Hi the SkyWalking Community On behalf of the SkyWalking Team, I’m glad to announce that SkyWalking Satellite $VERSION is now released. SkyWalking Satellite: A lightweight collector/sidecar could be deployed closing to the target monitored system, to collect metrics, traces, and logs. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. Download Links: http://skywalking.apache.org/downloads/ Release Notes : https://github.com/apache/skywalking-satellite/blob/$VERSION/CHANGES.md Website: http://skywalking.apache.org/ SkyWalking Satellite Resources: - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalking.apache.org - Documents: https://github.com/apache/skywalking-satellite/blob/$VERSION/README.md The Apache SkyWalking Team   Remove Unnecessary Releases Please remember to remove all unnecessary releases in the mirror svn (https://dist.apache.org/repos/dist/release/skywalking/), if you don\u0026rsquo;t recommend users to choose those version. For example, you have removed the download and documentation links from the website. If they want old ones, the Archive repository has all of them.\n","title":"Apache SkyWalking Satellite Release Guide","url":"/docs/skywalking-satellite/next/en/guides/contribution/how-to-release/"},{"content":"Apache SkyWalking Satellite Release Guide This documentation guides the release manager to release the SkyWalking Satellite in the Apache Way, and also helps people to check the release for vote.\nPrerequisites  Close(if finished, or move to next milestone otherwise) all issues in the current milestone from skywalking-satellite and skywalking, create a new milestone if needed. Update CHANGES.md.  Add your GPG public key to Apache svn   Upload your GPG public key to a public GPG site, such as MIT\u0026rsquo;s site.\n  Log in id.apache.org and submit your key fingerprint.\n  Add your GPG public key into SkyWalking GPG KEYS file, you can do this only if you are a PMC member. You can ask a PMC member for help. DO NOT override the existed KEYS file content, only append your key at the end of the file.\n  Build and sign the source code package export VERSION=\u0026lt;the version to release\u0026gt; git clone git@github.com:apache/skywalking-satellite \u0026amp;\u0026amp; cd skywalking-satellite git tag -a \u0026#34;v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking-Satellite v$VERSION\u0026#34; git push --tags make release In total, six files should be automatically generated in the directory: apache-skywalking-satellite-${VERSION}-bin.tgz, apache-skywalking-satellite-${VERSION}-src.tgz, and their corresponding asc, sha512 files.\nUpload to Apache svn svn co https://dist.apache.org/repos/dist/dev/skywalking/ mkdir -p skywalking/satellite/\u0026#34;$VERSION\u0026#34; cp skywalking-satellite/apache-skywalking*.tgz skywalking/satellite/\u0026#34;$VERSION\u0026#34; cp skywalking-satellite/apache-skywalking*.tgz.asc skywalking/satellite/\u0026#34;$VERSION\u0026#34; cp skywalking-satellite/apache-skywalking-satellite*.tgz.sha512 skywalking/satellite/\u0026#34;$VERSION\u0026#34; cd skywalking/satellite \u0026amp;\u0026amp; svn add \u0026#34;$VERSION\u0026#34; \u0026amp;\u0026amp; svn commit -m \u0026#34;Draft Apache SkyWalking-Satellite release $VERSION\u0026#34; Make the internal announcement Send an announcement email to dev@ mailing list, please check all links before sending the email.\nSubject: [ANNOUNCEMENT] SkyWalking Satellite $VERSION test build available Content: The test build of SkyWalking Satellite $VERSION is now available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking-satellite/blob/$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/satellite/$VERSION * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-satellite-bin-x.x.x.tgz - sha512xxxxyyyzzz apache-skywalking-satellite-src-x.x.x.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-satellite/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * http://pgp.mit.edu:11371/pks/lookup?op=get\u0026amp;search=0x8BD99F552D9F33D7 corresponding to kezhenxu94@apache.org Guide to build the release from source : * https://github.com/apache/skywalking-satellite/blob/v$VERSION/docs/en/guides/contribution/How-to-release.md A vote regarding the quality of this test build will be initiated within the next couple of days. Wait at least 48 hours for test responses Any PMC, committer or contributor can test features for releasing, and feedback. Based on that, PMC will decide whether to start a vote or not.\nCall for vote in dev@ mailing list Call for vote in dev@skywalking.apache.org, please check all links before sending the email.\nSubject: [VOTE] Release Apache SkyWalking Satellite version $VERSION Content: Hi the SkyWalking Community: This is a call for vote to release Apache SkyWalking Satellite version $VERSION. Release notes: * https://github.com/apache/skywalking-satellite/blob/$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/satellite/$VERSION * sha512 checksums - sha512xxxxyyyzzz skywalking-satellite-x.x.x-src.tgz - sha512xxxxyyyzzz skywalking-satellite-x.x.x-bin.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-satellite/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-satellite/blob/$VERSION/docs/en/guides/contribuation/How-to-release.md Voting will start now and will remain open for at least 72 hours, all PMC members are required to give their votes. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Thanks. [1] https://github.com/apache/skywalking/blob/master/docs/en/guides/How-to-release.md#vote-check Vote Check All PMC members and committers should check these before voting +1:\n Features test. All artifacts in staging repository are published with .asc, .md5, and sha files. Source codes and distribution packages (skywalking-satellite-$VERSION-{src,bin}.tgz) are in https://dist.apache.org/repos/dist/dev/skywalking/satellite/$VERSION with .asc, .sha512. LICENSE and NOTICE are in source codes and distribution package. Check shasum -c skywalking-satellite-$VERSION-{src,bin}.tgz.sha512. Check gpg --verify skywalking-satellite-$VERSION-{src,bin}.tgz.asc skywalking-satellite-$VERSION-{src,bin}.tgz. Build distribution from source code package by following this command, make build. Licenses check, make license.  Vote result should follow these:\n  PMC vote is +1 binding, all others is +1 no binding.\n  Within 72 hours, you get at least 3 (+1 binding), and have more +1 than -1. Vote pass.\n  Send the closing vote mail to announce the result. When count the binding and no binding votes, please list the names of voters. An example like this:\n[RESULT][VOTE] Release Apache SkyWalking Satellite version $VERSION 3 days passed, we’ve got ($NUMBER) +1 bindings (and ... +1 non-bindings): (list names) +1 bindings: xxx ... +1 non-bindings: xxx ... Thank you for voting, I’ll continue the release process.   Publish release   Move source codes tar balls and distributions to https://dist.apache.org/repos/dist/release/skywalking/, you can do this only if you are a PMC member.\nexport SVN_EDITOR=vim svn mv https://dist.apache.org/repos/dist/dev/skywalking/satellite/$VERSION https://dist.apache.org/repos/dist/release/skywalking/satellite   Refer to the previous PR, update the event and download links on the website.\n  Update Github release page, follow the previous convention.\n  Push docker image to the Docker Hub, make sure you have the write permission for push image.\nmake docker \u0026amp;\u0026amp; make docker.push   Send ANNOUNCE email to dev@skywalking.apache.org and announce@apache.org, the sender should use his/her Apache email account, please check all links before sending the email.\nSubject: [ANNOUNCEMENT] Apache SkyWalking Satellite $VERSION Released Content: Hi the SkyWalking Community On behalf of the SkyWalking Team, I’m glad to announce that SkyWalking Satellite $VERSION is now released. SkyWalking Satellite: A lightweight collector/sidecar could be deployed closing to the target monitored system, to collect metrics, traces, and logs. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. Download Links: http://skywalking.apache.org/downloads/ Release Notes : https://github.com/apache/skywalking-satellite/blob/$VERSION/CHANGES.md Website: http://skywalking.apache.org/ SkyWalking Satellite Resources: - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalking.apache.org - Documents: https://github.com/apache/skywalking-satellite/blob/$VERSION/README.md The Apache SkyWalking Team   Remove Unnecessary Releases Please remember to remove all unnecessary releases in the mirror svn (https://dist.apache.org/repos/dist/release/skywalking/), if you don\u0026rsquo;t recommend users to choose those version. For example, you have removed the download and documentation links from the website. If they want old ones, the Archive repository has all of them.\n","title":"Apache SkyWalking Satellite Release Guide","url":"/docs/skywalking-satellite/v1.2.0/en/guides/contribution/how-to-release/"},{"content":"Apdex threshold Apdex is a measure of response time based against a set threshold. It measures the ratio of satisfactory response times to unsatisfactory response times. The response time is measured from an asset request to completed delivery back to the requestor.\nA user defines a response time threshold T. All responses handled in T or less time satisfy the user.\nFor example, if T is 1.2 seconds and a response completes in 0.5 seconds, then the user is satisfied. All responses greater than 1.2 seconds dissatisfy the user. Responses greater than 4.8 seconds frustrate the user.\nThe apdex threshold T can be configured in service-apdex-threshold.yml file or via Dynamic Configuration. The default item will apply to a service that isn\u0026rsquo;t defined in this configuration as the default threshold.\nConfiguration Format The configuration content includes the names and thresholds of the services:\n# default threshold is 500msdefault:500# example:# the threshold of service \u0026#34;tomcat\u0026#34; is 1s# tomcat: 1000# the threshold of service \u0026#34;springboot1\u0026#34; is 50ms# springboot1: 50","title":"Apdex threshold","url":"/docs/main/latest/en/setup/backend/apdex-threshold/"},{"content":"Apdex threshold Apdex is a measure of response time based against a set threshold. It measures the ratio of satisfactory response times to unsatisfactory response times. The response time is measured from an asset request to completed delivery back to the requestor.\nA user defines a response time threshold T. All responses handled in T or less time satisfy the user.\nFor example, if T is 1.2 seconds and a response completes in 0.5 seconds, then the user is satisfied. All responses greater than 1.2 seconds dissatisfy the user. Responses greater than 4.8 seconds frustrate the user.\nThe apdex threshold T can be configured in service-apdex-threshold.yml file or via Dynamic Configuration. The default item will apply to a service that isn\u0026rsquo;t defined in this configuration as the default threshold.\nConfiguration Format The configuration content includes the names and thresholds of the services:\n# default threshold is 500msdefault:500# example:# the threshold of service \u0026#34;tomcat\u0026#34; is 1s# tomcat: 1000# the threshold of service \u0026#34;springboot1\u0026#34; is 50ms# springboot1: 50","title":"Apdex threshold","url":"/docs/main/next/en/setup/backend/apdex-threshold/"},{"content":"Apdex threshold Apdex is a measure of response time based against a set threshold. It measures the ratio of satisfactory response times to unsatisfactory response times. The response time is measured from an asset request to completed delivery back to the requestor.\nA user defines a response time threshold T. All responses handled in T or less time satisfy the user.\nFor example, if T is 1.2 seconds and a response completes in 0.5 seconds, then the user is satisfied. All responses greater than 1.2 seconds dissatisfy the user. Responses greater than 4.8 seconds frustrate the user.\nThe apdex threshold T can be configured in service-apdex-threshold.yml file or via Dynamic Configuration. The default item will apply to a service that isn\u0026rsquo;t defined in this configuration as the default threshold.\nConfiguration Format The configuration content includes the names and thresholds of the services:\n# default threshold is 500msdefault:500# example:# the threshold of service \u0026#34;tomcat\u0026#34; is 1s# tomcat: 1000# the threshold of service \u0026#34;springboot1\u0026#34; is 50ms# springboot1: 50","title":"Apdex threshold","url":"/docs/main/v9.0.0/en/setup/backend/apdex-threshold/"},{"content":"Apdex threshold Apdex is a measure of response time based against a set threshold. It measures the ratio of satisfactory response times to unsatisfactory response times. The response time is measured from an asset request to completed delivery back to the requestor.\nA user defines a response time threshold T. All responses handled in T or less time satisfy the user.\nFor example, if T is 1.2 seconds and a response completes in 0.5 seconds, then the user is satisfied. All responses greater than 1.2 seconds dissatisfy the user. Responses greater than 4.8 seconds frustrate the user.\nThe apdex threshold T can be configured in service-apdex-threshold.yml file or via Dynamic Configuration. The default item will apply to a service that isn\u0026rsquo;t defined in this configuration as the default threshold.\nConfiguration Format The configuration content includes the names and thresholds of the services:\n# default threshold is 500msdefault:500# example:# the threshold of service \u0026#34;tomcat\u0026#34; is 1s# tomcat: 1000# the threshold of service \u0026#34;springboot1\u0026#34; is 50ms# springboot1: 50","title":"Apdex threshold","url":"/docs/main/v9.1.0/en/setup/backend/apdex-threshold/"},{"content":"Apdex threshold Apdex is a measure of response time based against a set threshold. It measures the ratio of satisfactory response times to unsatisfactory response times. The response time is measured from an asset request to completed delivery back to the requestor.\nA user defines a response time threshold T. All responses handled in T or less time satisfy the user.\nFor example, if T is 1.2 seconds and a response completes in 0.5 seconds, then the user is satisfied. All responses greater than 1.2 seconds dissatisfy the user. Responses greater than 4.8 seconds frustrate the user.\nThe apdex threshold T can be configured in service-apdex-threshold.yml file or via Dynamic Configuration. The default item will apply to a service that isn\u0026rsquo;t defined in this configuration as the default threshold.\nConfiguration Format The configuration content includes the names and thresholds of the services:\n# default threshold is 500msdefault:500# example:# the threshold of service \u0026#34;tomcat\u0026#34; is 1s# tomcat: 1000# the threshold of service \u0026#34;springboot1\u0026#34; is 50ms# springboot1: 50","title":"Apdex threshold","url":"/docs/main/v9.2.0/en/setup/backend/apdex-threshold/"},{"content":"Apdex threshold Apdex is a measure of response time based against a set threshold. It measures the ratio of satisfactory response times to unsatisfactory response times. The response time is measured from an asset request to completed delivery back to the requestor.\nA user defines a response time threshold T. All responses handled in T or less time satisfy the user.\nFor example, if T is 1.2 seconds and a response completes in 0.5 seconds, then the user is satisfied. All responses greater than 1.2 seconds dissatisfy the user. Responses greater than 4.8 seconds frustrate the user.\nThe apdex threshold T can be configured in service-apdex-threshold.yml file or via Dynamic Configuration. The default item will apply to a service that isn\u0026rsquo;t defined in this configuration as the default threshold.\nConfiguration Format The configuration content includes the names and thresholds of the services:\n# default threshold is 500msdefault:500# example:# the threshold of service \u0026#34;tomcat\u0026#34; is 1s# tomcat: 1000# the threshold of service \u0026#34;springboot1\u0026#34; is 50ms# springboot1: 50","title":"Apdex threshold","url":"/docs/main/v9.3.0/en/setup/backend/apdex-threshold/"},{"content":"Apdex threshold Apdex is a measure of response time based against a set threshold. It measures the ratio of satisfactory response times to unsatisfactory response times. The response time is measured from an asset request to completed delivery back to the requestor.\nA user defines a response time threshold T. All responses handled in T or less time satisfy the user.\nFor example, if T is 1.2 seconds and a response completes in 0.5 seconds, then the user is satisfied. All responses greater than 1.2 seconds dissatisfy the user. Responses greater than 4.8 seconds frustrate the user.\nThe apdex threshold T can be configured in service-apdex-threshold.yml file or via Dynamic Configuration. The default item will apply to a service that isn\u0026rsquo;t defined in this configuration as the default threshold.\nConfiguration Format The configuration content includes the names and thresholds of the services:\n# default threshold is 500msdefault:500# example:# the threshold of service \u0026#34;tomcat\u0026#34; is 1s# tomcat: 1000# the threshold of service \u0026#34;springboot1\u0026#34; is 50ms# springboot1: 50","title":"Apdex threshold","url":"/docs/main/v9.4.0/en/setup/backend/apdex-threshold/"},{"content":"Apdex threshold Apdex is a measure of response time based against a set threshold. It measures the ratio of satisfactory response times to unsatisfactory response times. The response time is measured from an asset request to completed delivery back to the requestor.\nA user defines a response time threshold T. All responses handled in T or less time satisfy the user.\nFor example, if T is 1.2 seconds and a response completes in 0.5 seconds, then the user is satisfied. All responses greater than 1.2 seconds dissatisfy the user. Responses greater than 4.8 seconds frustrate the user.\nThe apdex threshold T can be configured in service-apdex-threshold.yml file or via Dynamic Configuration. The default item will apply to a service that isn\u0026rsquo;t defined in this configuration as the default threshold.\nConfiguration Format The configuration content includes the names and thresholds of the services:\n# default threshold is 500msdefault:500# example:# the threshold of service \u0026#34;tomcat\u0026#34; is 1s# tomcat: 1000# the threshold of service \u0026#34;springboot1\u0026#34; is 50ms# springboot1: 50","title":"Apdex threshold","url":"/docs/main/v9.5.0/en/setup/backend/apdex-threshold/"},{"content":"Apdex threshold Apdex is a measure of response time based against a set threshold. It measures the ratio of satisfactory response times to unsatisfactory response times. The response time is measured from an asset request to completed delivery back to the requestor.\nA user defines a response time threshold T. All responses handled in T or less time satisfy the user.\nFor example, if T is 1.2 seconds and a response completes in 0.5 seconds, then the user is satisfied. All responses greater than 1.2 seconds dissatisfy the user. Responses greater than 4.8 seconds frustrate the user.\nThe apdex threshold T can be configured in service-apdex-threshold.yml file or via Dynamic Configuration. The default item will apply to a service that isn\u0026rsquo;t defined in this configuration as the default threshold.\nConfiguration Format The configuration content includes the names and thresholds of the services:\n# default threshold is 500msdefault:500# example:# the threshold of service \u0026#34;tomcat\u0026#34; is 1s# tomcat: 1000# the threshold of service \u0026#34;springboot1\u0026#34; is 50ms# springboot1: 50","title":"Apdex threshold","url":"/docs/main/v9.6.0/en/setup/backend/apdex-threshold/"},{"content":"Apdex threshold Apdex is a measure of response time based against a set threshold. It measures the ratio of satisfactory response times to unsatisfactory response times. The response time is measured from an asset request to completed delivery back to the requestor.\nA user defines a response time threshold T. All responses handled in T or less time satisfy the user.\nFor example, if T is 1.2 seconds and a response completes in 0.5 seconds, then the user is satisfied. All responses greater than 1.2 seconds dissatisfy the user. Responses greater than 4.8 seconds frustrate the user.\nThe apdex threshold T can be configured in service-apdex-threshold.yml file or via Dynamic Configuration. The default item will apply to a service that isn\u0026rsquo;t defined in this configuration as the default threshold.\nConfiguration Format The configuration content includes the names and thresholds of the services:\n# default threshold is 500msdefault:500# example:# the threshold of service \u0026#34;tomcat\u0026#34; is 1s# tomcat: 1000# the threshold of service \u0026#34;springboot1\u0026#34; is 50ms# springboot1: 50","title":"Apdex threshold","url":"/docs/main/v9.7.0/en/setup/backend/apdex-threshold/"},{"content":"APISIX monitoring APISIX performance from apisix prometheus plugin SkyWalking leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  APISIX Prometheus plugin collects metrics data from APSIX. OpenTelemetry Collector fetches metrics from APISIX Prometheus plugin via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Enable APISIX APISIX Prometheus plugin . Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  APISIX Monitoring APISIX prometheus plugin provide multiple dimensions metrics for APISIX server , upstream , route , etc. Accordingly, SkyWalking observes the status, payload, and latency of the APISIX server, which is cataloged as a LAYER: APISIX Service in the OAP. Meanwhile, the instances would be recognized as LAYER: APISIX instances. The route rules and nodes would be recognized as endpoints with route/ and upstream/ prefixes.\nSpecify SkyWalking Service name SkyWalking expects OTEL Collector attribute skywalking_service to be the Service name.\nMake sure skywalking_service attribute exists through static_configs of OTEL Prometheus scape config.\nreceivers:prometheus:config:scrape_configs:- job_name:\u0026#39;apisix-monitoring\u0026#39;static_configs:- targets:[\u0026#39;apisix:9091\u0026#39;]labels:skywalking_service:exmple_service_name # Specify SkyWalking Service name You also could leverage OTEL Collector processor to add skywalking_service attribute , as following :\nprocessors:resource/skywalking-service:attributes:- key:skywalking_service value:exmple_service_name# Specify Skywalking Service name action:insert Notice , if you don\u0026rsquo;t specify skywalking_service attribute, SkyWalking OAP would use APISIX as the default service name\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     HTTP status  meter_apisix_sv_http_status Service The increment rate of the status of HTTP requests APISIX Prometheus plugin   HTTP latency  meter_apisix_sv_http_latency Service The increment rate of the latency of HTTP requests APISIX Prometheus plugin   HTTP bandwidth KB meter_apisix_sv_bandwidth Service The increment rate of the bandwidth of HTTP requests APISIX Prometheus plugin   HTTP status of non-matched requests  meter_apisix_sv_http_status Service The increment rate of the status of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP latency non-matched requests  meter_apisix_sv_http_latency Service The increment rate of the latency of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP bandwidth non-matched requests KB meter_apisix_sv_bandwidth Service The increment rate of the bandwidth of HTTP requests ,which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP connection  meter_apisix_sv_http_connections Service The avg number of the connections APISIX Prometheus plugin   HTTP Request Trend  meter_apisix_http_requests Service The increment rate of HTTP requests APISIX Prometheus plugin   HTTP status  meter_apisix_instance_http_status Instance The increment rate of the status of HTTP requests APISIX Prometheus plugin   HTTP latency  meter_apisix_instance_http_latency Instance The increment rate of the latency of HTTP requests APISIX Prometheus plugin   HTTP bandwidth KB meter_apisix_instance_bandwidth Instance The increment rate of the bandwidth of HTTP requests APISIX Prometheus plugin   HTTP status of non-matched requests  meter_apisix_instance_http_status Instance The increment rate of the status of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP latency non-matched requests  meter_apisix_instance_http_latency Instance The increment rate of the latency of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP bandwidth non-matched requests KB meter_apisix_instance_bandwidth Instance The increment rate of the bandwidth of HTTP requests ,which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP connection  meter_apisix_instance_http_connections Instance The avg number of the connections APISIX Prometheus plugin   HTTP Request Trend  meter_apisix_instance_http_requests Instance The increment rate of HTTP requests APISIX Prometheus plugin   Shared dict capacity MB meter_apisix_instance_shared_dict_capacity_bytes Instance The avg capacity of shared dict capacity APISIX Prometheus plugin   Shared free space MB meter_apisix_instance_shared_dict_free_space_bytes Instance The avg free space of shared dict capacity APISIX Prometheus plugin   etcd index  meter_apisix_instance_sv_etcd_indexes Instance etcd modify index for APISIX keys APISIX Prometheus plugin   etcd latest reachability  meter_apisix_instance_sv_etcd_reachable Instance etcd latest reachable , Refer to APISIX Prometheus plugin APISIX Prometheus plugin   HTTP status  meter_apisix_endpoint_node_http_status Endpoint The increment rate of the status of HTTP requests APISIX Prometheus plugin   HTTP latency  meter_apisix_endpoint_node_http_latency Endpoint The increment rate of the latency of HTTP requests APISIX Prometheus plugin   HTTP bandwidth KB meter_apisix_endpoint_node_bandwidth Endpoint The increment rate of the bandwidth of HTTP requests APISIX Prometheus plugin    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/apisix.yaml. The APISIX dashboard panel configurations are found in /config/ui-initialized-templates/apisix.\n","title":"APISIX monitoring","url":"/docs/main/latest/en/setup/backend/backend-apisix-monitoring/"},{"content":"APISIX monitoring APISIX performance from apisix prometheus plugin SkyWalking leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  APISIX Prometheus plugin collects metrics data from APSIX. OpenTelemetry Collector fetches metrics from APISIX Prometheus plugin via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Enable APISIX APISIX Prometheus plugin . Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  APISIX Monitoring APISIX prometheus plugin provide multiple dimensions metrics for APISIX server , upstream , route , etc. Accordingly, SkyWalking observes the status, payload, and latency of the APISIX server, which is cataloged as a LAYER: APISIX Service in the OAP. Meanwhile, the instances would be recognized as LAYER: APISIX instances. The route rules and nodes would be recognized as endpoints with route/ and upstream/ prefixes.\nSpecify SkyWalking Service name SkyWalking expects OTEL Collector attribute skywalking_service to be the Service name.\nMake sure skywalking_service attribute exists through static_configs of OTEL Prometheus scape config.\nreceivers:prometheus:config:scrape_configs:- job_name:\u0026#39;apisix-monitoring\u0026#39;static_configs:- targets:[\u0026#39;apisix:9091\u0026#39;]labels:skywalking_service:exmple_service_name # Specify SkyWalking Service name You also could leverage OTEL Collector processor to add skywalking_service attribute , as following :\nprocessors:resource/skywalking-service:attributes:- key:skywalking_service value:exmple_service_name# Specify Skywalking Service name action:insert Notice , if you don\u0026rsquo;t specify skywalking_service attribute, SkyWalking OAP would use APISIX as the default service name\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     HTTP status  meter_apisix_sv_http_status Service The increment rate of the status of HTTP requests APISIX Prometheus plugin   HTTP latency  meter_apisix_sv_http_latency Service The increment rate of the latency of HTTP requests APISIX Prometheus plugin   HTTP bandwidth KB meter_apisix_sv_bandwidth Service The increment rate of the bandwidth of HTTP requests APISIX Prometheus plugin   HTTP status of non-matched requests  meter_apisix_sv_http_status Service The increment rate of the status of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP latency non-matched requests  meter_apisix_sv_http_latency Service The increment rate of the latency of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP bandwidth non-matched requests KB meter_apisix_sv_bandwidth Service The increment rate of the bandwidth of HTTP requests ,which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP connection  meter_apisix_sv_http_connections Service The avg number of the connections APISIX Prometheus plugin   HTTP Request Trend  meter_apisix_http_requests Service The increment rate of HTTP requests APISIX Prometheus plugin   HTTP status  meter_apisix_instance_http_status Instance The increment rate of the status of HTTP requests APISIX Prometheus plugin   HTTP latency  meter_apisix_instance_http_latency Instance The increment rate of the latency of HTTP requests APISIX Prometheus plugin   HTTP bandwidth KB meter_apisix_instance_bandwidth Instance The increment rate of the bandwidth of HTTP requests APISIX Prometheus plugin   HTTP status of non-matched requests  meter_apisix_instance_http_status Instance The increment rate of the status of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP latency non-matched requests  meter_apisix_instance_http_latency Instance The increment rate of the latency of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP bandwidth non-matched requests KB meter_apisix_instance_bandwidth Instance The increment rate of the bandwidth of HTTP requests ,which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP connection  meter_apisix_instance_http_connections Instance The avg number of the connections APISIX Prometheus plugin   HTTP Request Trend  meter_apisix_instance_http_requests Instance The increment rate of HTTP requests APISIX Prometheus plugin   Shared dict capacity MB meter_apisix_instance_shared_dict_capacity_bytes Instance The avg capacity of shared dict capacity APISIX Prometheus plugin   Shared free space MB meter_apisix_instance_shared_dict_free_space_bytes Instance The avg free space of shared dict capacity APISIX Prometheus plugin   etcd index  meter_apisix_instance_sv_etcd_indexes Instance etcd modify index for APISIX keys APISIX Prometheus plugin   etcd latest reachability  meter_apisix_instance_sv_etcd_reachable Instance etcd latest reachable , Refer to APISIX Prometheus plugin APISIX Prometheus plugin   HTTP status  meter_apisix_endpoint_node_http_status Endpoint The increment rate of the status of HTTP requests APISIX Prometheus plugin   HTTP latency  meter_apisix_endpoint_node_http_latency Endpoint The increment rate of the latency of HTTP requests APISIX Prometheus plugin   HTTP bandwidth KB meter_apisix_endpoint_node_bandwidth Endpoint The increment rate of the bandwidth of HTTP requests APISIX Prometheus plugin    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/apisix.yaml. The APISIX dashboard panel configurations are found in /config/ui-initialized-templates/apisix.\n","title":"APISIX monitoring","url":"/docs/main/next/en/setup/backend/backend-apisix-monitoring/"},{"content":"APISIX monitoring APISIX performance from apisix prometheus plugin SkyWalking leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  APISIX Prometheus plugin collects metrics data from APSIX. OpenTelemetry Collector fetches metrics from APISIX Prometheus plugin via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via the OpenCensus gRPC Exporter or OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Enable APISIX APISIX Prometheus plugin . Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  APISIX Monitoring APISIX prometheus plugin provide multiple dimensions metrics for APISIX server , upstream , route , etc. Accordingly, SkyWalking observes the status, payload, and latency of the APISIX server, which is cataloged as a LAYER: APISIX Service in the OAP. Meanwhile, the instances would be recognized as LAYER: APISIX instances. The route rules and nodes would be recognized as endpoints with route/ and upstream/ prefixes.\nSpecify SkyWalking Service name SkyWalking expects OTEL Collector attribute skywalking_service to be the Service name.\nMake sure skywalking_service attribute exists through static_configs of OTEL Prometheus scape config.\nreceivers:prometheus:config:scrape_configs:- job_name:\u0026#39;apisix-monitoring\u0026#39;static_configs:- targets:[\u0026#39;apisix:9091\u0026#39;]labels:skywalking_service:exmple_service_name # Specify SkyWalking Service name You also could leverage OTEL Collector processor to add skywalking_service attribute , as following :\nprocessors:resource/skywalking-service:attributes:- key:skywalking_service value:exmple_service_name# Specify Skywalking Service name action:insert Notice , if you don\u0026rsquo;t specify skywalking_service attribute, SkyWalking OAP would use APISIX as the default service name\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     HTTP status  meter_apisix_sv_http_status Service The increment rate of the status of HTTP requests APISIX Prometheus plugin   HTTP latency  meter_apisix_sv_http_latency Service The increment rate of the latency of HTTP requests APISIX Prometheus plugin   HTTP bandwidth KB meter_apisix_sv_bandwidth Service The increment rate of the bandwidth of HTTP requests APISIX Prometheus plugin   HTTP status of non-matched requests  meter_apisix_sv_http_status Service The increment rate of the status of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP latency non-matched requests  meter_apisix_sv_http_latency Service The increment rate of the latency of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP bandwidth non-matched requests KB meter_apisix_sv_bandwidth Service The increment rate of the bandwidth of HTTP requests ,which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP connection  meter_apisix_sv_http_connections Service The avg number of the connections APISIX Prometheus plugin   HTTP Request Trend  meter_apisix_http_requests Service The increment rate of HTTP requests APISIX Prometheus plugin   HTTP status  meter_apisix_instance_http_status Instance The increment rate of the status of HTTP requests APISIX Prometheus plugin   HTTP latency  meter_apisix_instance_http_latency Instance The increment rate of the latency of HTTP requests APISIX Prometheus plugin   HTTP bandwidth KB meter_apisix_instance_bandwidth Instance The increment rate of the bandwidth of HTTP requests APISIX Prometheus plugin   HTTP status of non-matched requests  meter_apisix_instance_http_status Instance The increment rate of the status of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP latency non-matched requests  meter_apisix_instance_http_latency Instance The increment rate of the latency of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP bandwidth non-matched requests KB meter_apisix_instance_bandwidth Instance The increment rate of the bandwidth of HTTP requests ,which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP connection  meter_apisix_instance_http_connections Instance The avg number of the connections APISIX Prometheus plugin   HTTP Request Trend  meter_apisix_instance_http_requests Instance The increment rate of HTTP requests APISIX Prometheus plugin   Shared dict capacity MB meter_apisix_instance_shared_dict_capacity_bytes Instance The avg capacity of shared dict capacity APISIX Prometheus plugin   Shared free space MB meter_apisix_instance_shared_dict_free_space_bytes Instance The avg free space of shared dict capacity APISIX Prometheus plugin   etcd index  meter_apisix_instance_sv_etcd_indexes Instance etcd modify index for APISIX keys APISIX Prometheus plugin   etcd latest reachability  meter_apisix_instance_sv_etcd_reachable Instance etcd latest reachable , Refer to APISIX Prometheus plugin APISIX Prometheus plugin   HTTP status  meter_apisix_endpoint_node_http_status Endpoint The increment rate of the status of HTTP requests APISIX Prometheus plugin   HTTP latency  meter_apisix_endpoint_node_http_latency Endpoint The increment rate of the latency of HTTP requests APISIX Prometheus plugin   HTTP bandwidth KB meter_apisix_endpoint_node_bandwidth Endpoint The increment rate of the bandwidth of HTTP requests APISIX Prometheus plugin    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/apisix.yaml. The APISIX dashboard panel configurations are found in /config/ui-initialized-templates/apisix.\n","title":"APISIX monitoring","url":"/docs/main/v9.3.0/en/setup/backend/backend-apisix-monitoring/"},{"content":"APISIX monitoring APISIX performance from apisix prometheus plugin SkyWalking leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  APISIX Prometheus plugin collects metrics data from APSIX. OpenTelemetry Collector fetches metrics from APISIX Prometheus plugin via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via the OpenCensus gRPC Exporter or OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Enable APISIX APISIX Prometheus plugin . Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  APISIX Monitoring APISIX prometheus plugin provide multiple dimensions metrics for APISIX server , upstream , route , etc. Accordingly, SkyWalking observes the status, payload, and latency of the APISIX server, which is cataloged as a LAYER: APISIX Service in the OAP. Meanwhile, the instances would be recognized as LAYER: APISIX instances. The route rules and nodes would be recognized as endpoints with route/ and upstream/ prefixes.\nSpecify SkyWalking Service name SkyWalking expects OTEL Collector attribute skywalking_service to be the Service name.\nMake sure skywalking_service attribute exists through static_configs of OTEL Prometheus scape config.\nreceivers:prometheus:config:scrape_configs:- job_name:\u0026#39;apisix-monitoring\u0026#39;static_configs:- targets:[\u0026#39;apisix:9091\u0026#39;]labels:skywalking_service:exmple_service_name # Specify SkyWalking Service name You also could leverage OTEL Collector processor to add skywalking_service attribute , as following :\nprocessors:resource/skywalking-service:attributes:- key:skywalking_service value:exmple_service_name# Specify Skywalking Service name action:insert Notice , if you don\u0026rsquo;t specify skywalking_service attribute, SkyWalking OAP would use APISIX as the default service name\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     HTTP status  meter_apisix_sv_http_status Service The increment rate of the status of HTTP requests APISIX Prometheus plugin   HTTP latency  meter_apisix_sv_http_latency Service The increment rate of the latency of HTTP requests APISIX Prometheus plugin   HTTP bandwidth KB meter_apisix_sv_bandwidth Service The increment rate of the bandwidth of HTTP requests APISIX Prometheus plugin   HTTP status of non-matched requests  meter_apisix_sv_http_status Service The increment rate of the status of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP latency non-matched requests  meter_apisix_sv_http_latency Service The increment rate of the latency of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP bandwidth non-matched requests KB meter_apisix_sv_bandwidth Service The increment rate of the bandwidth of HTTP requests ,which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP connection  meter_apisix_sv_http_connections Service The avg number of the connections APISIX Prometheus plugin   HTTP Request Trend  meter_apisix_http_requests Service The increment rate of HTTP requests APISIX Prometheus plugin   HTTP status  meter_apisix_instance_http_status Instance The increment rate of the status of HTTP requests APISIX Prometheus plugin   HTTP latency  meter_apisix_instance_http_latency Instance The increment rate of the latency of HTTP requests APISIX Prometheus plugin   HTTP bandwidth KB meter_apisix_instance_bandwidth Instance The increment rate of the bandwidth of HTTP requests APISIX Prometheus plugin   HTTP status of non-matched requests  meter_apisix_instance_http_status Instance The increment rate of the status of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP latency non-matched requests  meter_apisix_instance_http_latency Instance The increment rate of the latency of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP bandwidth non-matched requests KB meter_apisix_instance_bandwidth Instance The increment rate of the bandwidth of HTTP requests ,which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP connection  meter_apisix_instance_http_connections Instance The avg number of the connections APISIX Prometheus plugin   HTTP Request Trend  meter_apisix_instance_http_requests Instance The increment rate of HTTP requests APISIX Prometheus plugin   Shared dict capacity MB meter_apisix_instance_shared_dict_capacity_bytes Instance The avg capacity of shared dict capacity APISIX Prometheus plugin   Shared free space MB meter_apisix_instance_shared_dict_free_space_bytes Instance The avg free space of shared dict capacity APISIX Prometheus plugin   etcd index  meter_apisix_instance_sv_etcd_indexes Instance etcd modify index for APISIX keys APISIX Prometheus plugin   etcd latest reachability  meter_apisix_instance_sv_etcd_reachable Instance etcd latest reachable , Refer to APISIX Prometheus plugin APISIX Prometheus plugin   HTTP status  meter_apisix_endpoint_node_http_status Endpoint The increment rate of the status of HTTP requests APISIX Prometheus plugin   HTTP latency  meter_apisix_endpoint_node_http_latency Endpoint The increment rate of the latency of HTTP requests APISIX Prometheus plugin   HTTP bandwidth KB meter_apisix_endpoint_node_bandwidth Endpoint The increment rate of the bandwidth of HTTP requests APISIX Prometheus plugin    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/apisix.yaml. The APISIX dashboard panel configurations are found in /config/ui-initialized-templates/apisix.\n","title":"APISIX monitoring","url":"/docs/main/v9.4.0/en/setup/backend/backend-apisix-monitoring/"},{"content":"APISIX monitoring APISIX performance from apisix prometheus plugin SkyWalking leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  APISIX Prometheus plugin collects metrics data from APSIX. OpenTelemetry Collector fetches metrics from APISIX Prometheus plugin via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Enable APISIX APISIX Prometheus plugin . Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  APISIX Monitoring APISIX prometheus plugin provide multiple dimensions metrics for APISIX server , upstream , route , etc. Accordingly, SkyWalking observes the status, payload, and latency of the APISIX server, which is cataloged as a LAYER: APISIX Service in the OAP. Meanwhile, the instances would be recognized as LAYER: APISIX instances. The route rules and nodes would be recognized as endpoints with route/ and upstream/ prefixes.\nSpecify SkyWalking Service name SkyWalking expects OTEL Collector attribute skywalking_service to be the Service name.\nMake sure skywalking_service attribute exists through static_configs of OTEL Prometheus scape config.\nreceivers:prometheus:config:scrape_configs:- job_name:\u0026#39;apisix-monitoring\u0026#39;static_configs:- targets:[\u0026#39;apisix:9091\u0026#39;]labels:skywalking_service:exmple_service_name # Specify SkyWalking Service name You also could leverage OTEL Collector processor to add skywalking_service attribute , as following :\nprocessors:resource/skywalking-service:attributes:- key:skywalking_service value:exmple_service_name# Specify Skywalking Service name action:insert Notice , if you don\u0026rsquo;t specify skywalking_service attribute, SkyWalking OAP would use APISIX as the default service name\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     HTTP status  meter_apisix_sv_http_status Service The increment rate of the status of HTTP requests APISIX Prometheus plugin   HTTP latency  meter_apisix_sv_http_latency Service The increment rate of the latency of HTTP requests APISIX Prometheus plugin   HTTP bandwidth KB meter_apisix_sv_bandwidth Service The increment rate of the bandwidth of HTTP requests APISIX Prometheus plugin   HTTP status of non-matched requests  meter_apisix_sv_http_status Service The increment rate of the status of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP latency non-matched requests  meter_apisix_sv_http_latency Service The increment rate of the latency of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP bandwidth non-matched requests KB meter_apisix_sv_bandwidth Service The increment rate of the bandwidth of HTTP requests ,which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP connection  meter_apisix_sv_http_connections Service The avg number of the connections APISIX Prometheus plugin   HTTP Request Trend  meter_apisix_http_requests Service The increment rate of HTTP requests APISIX Prometheus plugin   HTTP status  meter_apisix_instance_http_status Instance The increment rate of the status of HTTP requests APISIX Prometheus plugin   HTTP latency  meter_apisix_instance_http_latency Instance The increment rate of the latency of HTTP requests APISIX Prometheus plugin   HTTP bandwidth KB meter_apisix_instance_bandwidth Instance The increment rate of the bandwidth of HTTP requests APISIX Prometheus plugin   HTTP status of non-matched requests  meter_apisix_instance_http_status Instance The increment rate of the status of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP latency non-matched requests  meter_apisix_instance_http_latency Instance The increment rate of the latency of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP bandwidth non-matched requests KB meter_apisix_instance_bandwidth Instance The increment rate of the bandwidth of HTTP requests ,which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP connection  meter_apisix_instance_http_connections Instance The avg number of the connections APISIX Prometheus plugin   HTTP Request Trend  meter_apisix_instance_http_requests Instance The increment rate of HTTP requests APISIX Prometheus plugin   Shared dict capacity MB meter_apisix_instance_shared_dict_capacity_bytes Instance The avg capacity of shared dict capacity APISIX Prometheus plugin   Shared free space MB meter_apisix_instance_shared_dict_free_space_bytes Instance The avg free space of shared dict capacity APISIX Prometheus plugin   etcd index  meter_apisix_instance_sv_etcd_indexes Instance etcd modify index for APISIX keys APISIX Prometheus plugin   etcd latest reachability  meter_apisix_instance_sv_etcd_reachable Instance etcd latest reachable , Refer to APISIX Prometheus plugin APISIX Prometheus plugin   HTTP status  meter_apisix_endpoint_node_http_status Endpoint The increment rate of the status of HTTP requests APISIX Prometheus plugin   HTTP latency  meter_apisix_endpoint_node_http_latency Endpoint The increment rate of the latency of HTTP requests APISIX Prometheus plugin   HTTP bandwidth KB meter_apisix_endpoint_node_bandwidth Endpoint The increment rate of the bandwidth of HTTP requests APISIX Prometheus plugin    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/apisix.yaml. The APISIX dashboard panel configurations are found in /config/ui-initialized-templates/apisix.\n","title":"APISIX monitoring","url":"/docs/main/v9.5.0/en/setup/backend/backend-apisix-monitoring/"},{"content":"APISIX monitoring APISIX performance from apisix prometheus plugin SkyWalking leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  APISIX Prometheus plugin collects metrics data from APSIX. OpenTelemetry Collector fetches metrics from APISIX Prometheus plugin via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Enable APISIX APISIX Prometheus plugin . Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  APISIX Monitoring APISIX prometheus plugin provide multiple dimensions metrics for APISIX server , upstream , route , etc. Accordingly, SkyWalking observes the status, payload, and latency of the APISIX server, which is cataloged as a LAYER: APISIX Service in the OAP. Meanwhile, the instances would be recognized as LAYER: APISIX instances. The route rules and nodes would be recognized as endpoints with route/ and upstream/ prefixes.\nSpecify SkyWalking Service name SkyWalking expects OTEL Collector attribute skywalking_service to be the Service name.\nMake sure skywalking_service attribute exists through static_configs of OTEL Prometheus scape config.\nreceivers:prometheus:config:scrape_configs:- job_name:\u0026#39;apisix-monitoring\u0026#39;static_configs:- targets:[\u0026#39;apisix:9091\u0026#39;]labels:skywalking_service:exmple_service_name # Specify SkyWalking Service name You also could leverage OTEL Collector processor to add skywalking_service attribute , as following :\nprocessors:resource/skywalking-service:attributes:- key:skywalking_service value:exmple_service_name# Specify Skywalking Service name action:insert Notice , if you don\u0026rsquo;t specify skywalking_service attribute, SkyWalking OAP would use APISIX as the default service name\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     HTTP status  meter_apisix_sv_http_status Service The increment rate of the status of HTTP requests APISIX Prometheus plugin   HTTP latency  meter_apisix_sv_http_latency Service The increment rate of the latency of HTTP requests APISIX Prometheus plugin   HTTP bandwidth KB meter_apisix_sv_bandwidth Service The increment rate of the bandwidth of HTTP requests APISIX Prometheus plugin   HTTP status of non-matched requests  meter_apisix_sv_http_status Service The increment rate of the status of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP latency non-matched requests  meter_apisix_sv_http_latency Service The increment rate of the latency of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP bandwidth non-matched requests KB meter_apisix_sv_bandwidth Service The increment rate of the bandwidth of HTTP requests ,which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP connection  meter_apisix_sv_http_connections Service The avg number of the connections APISIX Prometheus plugin   HTTP Request Trend  meter_apisix_http_requests Service The increment rate of HTTP requests APISIX Prometheus plugin   HTTP status  meter_apisix_instance_http_status Instance The increment rate of the status of HTTP requests APISIX Prometheus plugin   HTTP latency  meter_apisix_instance_http_latency Instance The increment rate of the latency of HTTP requests APISIX Prometheus plugin   HTTP bandwidth KB meter_apisix_instance_bandwidth Instance The increment rate of the bandwidth of HTTP requests APISIX Prometheus plugin   HTTP status of non-matched requests  meter_apisix_instance_http_status Instance The increment rate of the status of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP latency non-matched requests  meter_apisix_instance_http_latency Instance The increment rate of the latency of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP bandwidth non-matched requests KB meter_apisix_instance_bandwidth Instance The increment rate of the bandwidth of HTTP requests ,which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP connection  meter_apisix_instance_http_connections Instance The avg number of the connections APISIX Prometheus plugin   HTTP Request Trend  meter_apisix_instance_http_requests Instance The increment rate of HTTP requests APISIX Prometheus plugin   Shared dict capacity MB meter_apisix_instance_shared_dict_capacity_bytes Instance The avg capacity of shared dict capacity APISIX Prometheus plugin   Shared free space MB meter_apisix_instance_shared_dict_free_space_bytes Instance The avg free space of shared dict capacity APISIX Prometheus plugin   etcd index  meter_apisix_instance_sv_etcd_indexes Instance etcd modify index for APISIX keys APISIX Prometheus plugin   etcd latest reachability  meter_apisix_instance_sv_etcd_reachable Instance etcd latest reachable , Refer to APISIX Prometheus plugin APISIX Prometheus plugin   HTTP status  meter_apisix_endpoint_node_http_status Endpoint The increment rate of the status of HTTP requests APISIX Prometheus plugin   HTTP latency  meter_apisix_endpoint_node_http_latency Endpoint The increment rate of the latency of HTTP requests APISIX Prometheus plugin   HTTP bandwidth KB meter_apisix_endpoint_node_bandwidth Endpoint The increment rate of the bandwidth of HTTP requests APISIX Prometheus plugin    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/apisix.yaml. The APISIX dashboard panel configurations are found in /config/ui-initialized-templates/apisix.\n","title":"APISIX monitoring","url":"/docs/main/v9.6.0/en/setup/backend/backend-apisix-monitoring/"},{"content":"APISIX monitoring APISIX performance from apisix prometheus plugin SkyWalking leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  APISIX Prometheus plugin collects metrics data from APSIX. OpenTelemetry Collector fetches metrics from APISIX Prometheus plugin via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Enable APISIX APISIX Prometheus plugin . Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  APISIX Monitoring APISIX prometheus plugin provide multiple dimensions metrics for APISIX server , upstream , route , etc. Accordingly, SkyWalking observes the status, payload, and latency of the APISIX server, which is cataloged as a LAYER: APISIX Service in the OAP. Meanwhile, the instances would be recognized as LAYER: APISIX instances. The route rules and nodes would be recognized as endpoints with route/ and upstream/ prefixes.\nSpecify SkyWalking Service name SkyWalking expects OTEL Collector attribute skywalking_service to be the Service name.\nMake sure skywalking_service attribute exists through static_configs of OTEL Prometheus scape config.\nreceivers:prometheus:config:scrape_configs:- job_name:\u0026#39;apisix-monitoring\u0026#39;static_configs:- targets:[\u0026#39;apisix:9091\u0026#39;]labels:skywalking_service:exmple_service_name # Specify SkyWalking Service name You also could leverage OTEL Collector processor to add skywalking_service attribute , as following :\nprocessors:resource/skywalking-service:attributes:- key:skywalking_service value:exmple_service_name# Specify Skywalking Service name action:insert Notice , if you don\u0026rsquo;t specify skywalking_service attribute, SkyWalking OAP would use APISIX as the default service name\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     HTTP status  meter_apisix_sv_http_status Service The increment rate of the status of HTTP requests APISIX Prometheus plugin   HTTP latency  meter_apisix_sv_http_latency Service The increment rate of the latency of HTTP requests APISIX Prometheus plugin   HTTP bandwidth KB meter_apisix_sv_bandwidth Service The increment rate of the bandwidth of HTTP requests APISIX Prometheus plugin   HTTP status of non-matched requests  meter_apisix_sv_http_status Service The increment rate of the status of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP latency non-matched requests  meter_apisix_sv_http_latency Service The increment rate of the latency of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP bandwidth non-matched requests KB meter_apisix_sv_bandwidth Service The increment rate of the bandwidth of HTTP requests ,which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP connection  meter_apisix_sv_http_connections Service The avg number of the connections APISIX Prometheus plugin   HTTP Request Trend  meter_apisix_http_requests Service The increment rate of HTTP requests APISIX Prometheus plugin   HTTP status  meter_apisix_instance_http_status Instance The increment rate of the status of HTTP requests APISIX Prometheus plugin   HTTP latency  meter_apisix_instance_http_latency Instance The increment rate of the latency of HTTP requests APISIX Prometheus plugin   HTTP bandwidth KB meter_apisix_instance_bandwidth Instance The increment rate of the bandwidth of HTTP requests APISIX Prometheus plugin   HTTP status of non-matched requests  meter_apisix_instance_http_status Instance The increment rate of the status of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP latency non-matched requests  meter_apisix_instance_http_latency Instance The increment rate of the latency of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP bandwidth non-matched requests KB meter_apisix_instance_bandwidth Instance The increment rate of the bandwidth of HTTP requests ,which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP connection  meter_apisix_instance_http_connections Instance The avg number of the connections APISIX Prometheus plugin   HTTP Request Trend  meter_apisix_instance_http_requests Instance The increment rate of HTTP requests APISIX Prometheus plugin   Shared dict capacity MB meter_apisix_instance_shared_dict_capacity_bytes Instance The avg capacity of shared dict capacity APISIX Prometheus plugin   Shared free space MB meter_apisix_instance_shared_dict_free_space_bytes Instance The avg free space of shared dict capacity APISIX Prometheus plugin   etcd index  meter_apisix_instance_sv_etcd_indexes Instance etcd modify index for APISIX keys APISIX Prometheus plugin   etcd latest reachability  meter_apisix_instance_sv_etcd_reachable Instance etcd latest reachable , Refer to APISIX Prometheus plugin APISIX Prometheus plugin   HTTP status  meter_apisix_endpoint_node_http_status Endpoint The increment rate of the status of HTTP requests APISIX Prometheus plugin   HTTP latency  meter_apisix_endpoint_node_http_latency Endpoint The increment rate of the latency of HTTP requests APISIX Prometheus plugin   HTTP bandwidth KB meter_apisix_endpoint_node_bandwidth Endpoint The increment rate of the bandwidth of HTTP requests APISIX Prometheus plugin    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/apisix.yaml. The APISIX dashboard panel configurations are found in /config/ui-initialized-templates/apisix.\n","title":"APISIX monitoring","url":"/docs/main/v9.7.0/en/setup/backend/backend-apisix-monitoring/"},{"content":"AWS API Gateway monitoring Amazon API Gateway is an AWS service for creating, publishing, maintaining, monitoring, and securing REST, HTTP, and WebSocket APIs. SkyWalking leverages AWS Kinesis Data Firehose receiver to transfer the CloudWatch metrics of API Gateway(HTTP and REST APIs) to OpenTelemetry receiver and into the Meter System.\nData flow  AWS CloudWatch collect metrics for API Gateway(REST and HTTP APIs), refer to API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch CloudWatch metric streams stream CloudWatch metrics of API Gateway to AWS Kinesis Data Firehose AWS Kinesis Data Firehose delivery metrics to AWS Kinesis Data Firehose receiver through the HTTP endpoint  Set up  Enable CloudWatch metrics for API Gateway Create an Amazon Kinesis Data Firehose Delivery Stream, and set AWS Kinesis Data Firehose receiver\u0026rsquo;s address as HTTP(s) Destination, refer to Create Delivery Stream Create CloudWatch metric stream, and select the Firehose Delivery Stream which has been created above, set Select namespaces to AWS/ApiGateway, Select output format to OpenTelemetry 0.7. refer to CloudWatch Metric Streams  Gateway Monitoring SkyWalking observes CloudWatch metrics of the AWS API Gateway, which is cataloged as a LAYER: AWS_GATEWAY Service in the OAP. Meanwhile, the routes would be recognized as LAYER: AWS_GATEWAY endpoints\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     Request Count count aws_gateway_service_count Service The total number API requests in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   4xx Count count aws_gateway_service_4xx Service The number of client-side errors captured in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   5xx Count count aws_gateway_service_5xx Service The number of server-side errors captured in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Request Average Latency ms aws_gateway_service_latency Service The time between when API Gateway receives a request from a client and when it returns a response to the client. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Request Average Integration Latency ms aws_gateway_service_integration_latency Service The time between when API Gateway relays a request to the backend and when it receives a response from the backend. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Data Processed KB aws_gateway_service_data_processed Service The amount of data processed API Gateway HTTP APIs monitoring with CloudWatch   Cache Hit Count Rate % aws_gateway_service_cache_hit_rate Service The number of requests served from the API cache API Gateway REST APIs monitoring with CloudWatch   Cache Miss Count Rate % aws_gateway_service_cache_miss_rate Service The number of requests served from the backend API Gateway REST APIs monitoring with CloudWatch   Request Count count aws_gateway_endpoint_count Endpoint The total number API requests in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   4xx Count count aws_gateway_endpoint_4xx Endpoint The number of client-side errors captured in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   5xx Count count aws_gateway_endpoint_5xx Endpoint The number of server-side errors captured in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Request Average Latency ms aws_gateway_endpoint_latency Endpoint The time between when API Gateway receives a request from a client and when it returns a response to the client. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Request Average Integration Latency ms aws_gateway_endpoint_integration_latency Endpoint The time between when API Gateway relays a request to the backend and when it receives a response from the backend. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Data Processed KB aws_gateway_endpoint_data_processed Endpoint The amount of data processed API Gateway HTTP APIs monitoring with CloudWatch   Cache Hit Count Rate % aws_gateway_endpoint_cache_hit_rate Endpoint The number of requests served from the API cache API Gateway REST APIs monitoring with CloudWatch   Cache Miss Count Rate % aws_gateway_endpoint_cache_miss_rate Endpoint The number of requests served from the backend API Gateway REST APIs monitoring with CloudWatch    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-gateway/. The AWS Cloud EKS dashboard panel configurations are found in /config/ui-initialized-templates/aws_gateway.\n","title":"AWS API Gateway monitoring","url":"/docs/main/latest/en/setup/backend/backend-aws-api-gateway-monitoring/"},{"content":"AWS API Gateway monitoring Amazon API Gateway is an AWS service for creating, publishing, maintaining, monitoring, and securing REST, HTTP, and WebSocket APIs. SkyWalking leverages AWS Kinesis Data Firehose receiver to transfer the CloudWatch metrics of API Gateway(HTTP and REST APIs) to OpenTelemetry receiver and into the Meter System.\nData flow  AWS CloudWatch collect metrics for API Gateway(REST and HTTP APIs), refer to API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch CloudWatch metric streams stream CloudWatch metrics of API Gateway to AWS Kinesis Data Firehose AWS Kinesis Data Firehose delivery metrics to AWS Kinesis Data Firehose receiver through the HTTP endpoint  Set up  Enable CloudWatch metrics for API Gateway Create an Amazon Kinesis Data Firehose Delivery Stream, and set AWS Kinesis Data Firehose receiver\u0026rsquo;s address as HTTP(s) Destination, refer to Create Delivery Stream Create CloudWatch metric stream, and select the Firehose Delivery Stream which has been created above, set Select namespaces to AWS/ApiGateway, Select output format to OpenTelemetry 0.7. refer to CloudWatch Metric Streams  Gateway Monitoring SkyWalking observes CloudWatch metrics of the AWS API Gateway, which is cataloged as a LAYER: AWS_GATEWAY Service in the OAP. Meanwhile, the routes would be recognized as LAYER: AWS_GATEWAY endpoints\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     Request Count count aws_gateway_service_count Service The total number API requests in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   4xx Count count aws_gateway_service_4xx Service The number of client-side errors captured in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   5xx Count count aws_gateway_service_5xx Service The number of server-side errors captured in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Request Average Latency ms aws_gateway_service_latency Service The time between when API Gateway receives a request from a client and when it returns a response to the client. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Request Average Integration Latency ms aws_gateway_service_integration_latency Service The time between when API Gateway relays a request to the backend and when it receives a response from the backend. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Data Processed KB aws_gateway_service_data_processed Service The amount of data processed API Gateway HTTP APIs monitoring with CloudWatch   Cache Hit Count Rate % aws_gateway_service_cache_hit_rate Service The number of requests served from the API cache API Gateway REST APIs monitoring with CloudWatch   Cache Miss Count Rate % aws_gateway_service_cache_miss_rate Service The number of requests served from the backend API Gateway REST APIs monitoring with CloudWatch   Request Count count aws_gateway_endpoint_count Endpoint The total number API requests in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   4xx Count count aws_gateway_endpoint_4xx Endpoint The number of client-side errors captured in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   5xx Count count aws_gateway_endpoint_5xx Endpoint The number of server-side errors captured in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Request Average Latency ms aws_gateway_endpoint_latency Endpoint The time between when API Gateway receives a request from a client and when it returns a response to the client. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Request Average Integration Latency ms aws_gateway_endpoint_integration_latency Endpoint The time between when API Gateway relays a request to the backend and when it receives a response from the backend. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Data Processed KB aws_gateway_endpoint_data_processed Endpoint The amount of data processed API Gateway HTTP APIs monitoring with CloudWatch   Cache Hit Count Rate % aws_gateway_endpoint_cache_hit_rate Endpoint The number of requests served from the API cache API Gateway REST APIs monitoring with CloudWatch   Cache Miss Count Rate % aws_gateway_endpoint_cache_miss_rate Endpoint The number of requests served from the backend API Gateway REST APIs monitoring with CloudWatch    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-gateway/. The AWS Cloud EKS dashboard panel configurations are found in /config/ui-initialized-templates/aws_gateway.\n","title":"AWS API Gateway monitoring","url":"/docs/main/next/en/setup/backend/backend-aws-api-gateway-monitoring/"},{"content":"AWS API Gateway monitoring Amazon API Gateway is an AWS service for creating, publishing, maintaining, monitoring, and securing REST, HTTP, and WebSocket APIs. SkyWalking leverages AWS Kinesis Data Firehose receiver to transfer the CloudWatch metrics of API Gateway(HTTP and REST APIs) to OpenTelemetry receiver and into the Meter System.\nData flow  AWS CloudWatch collect metrics for API Gateway(REST and HTTP APIs), refer to API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch CloudWatch metric streams stream CloudWatch metrics of API Gateway to AWS Kinesis Data Firehose AWS Kinesis Data Firehose delivery metrics to AWS Kinesis Data Firehose receiver through the HTTP endpoint  Set up  Enable CloudWatch metrics for API Gateway Create an Amazon Kinesis Data Firehose Delivery Stream, and set AWS Kinesis Data Firehose receiver\u0026rsquo;s address as HTTP(s) Destination, refer to Create Delivery Stream Create CloudWatch metric stream, and select the Firehose Delivery Stream which has been created above, set Select namespaces to AWS/ApiGateway, Select output format to OpenTelemetry 0.7. refer to CloudWatch Metric Streams  Gateway Monitoring SkyWalking observes CloudWatch metrics of the AWS API Gateway, which is cataloged as a LAYER: AWS_GATEWAY Service in the OAP. Meanwhile, the routes would be recognized as LAYER: AWS_GATEWAY endpoints\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     Request Count count aws_gateway_service_count Service The total number API requests in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   4xx Count count aws_gateway_service_4xx Service The number of client-side errors captured in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   5xx Count count aws_gateway_service_5xx Service The number of server-side errors captured in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Request Average Latency ms aws_gateway_service_latency Service The time between when API Gateway receives a request from a client and when it returns a response to the client. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Request Average Integration Latency ms aws_gateway_service_integration_latency Service The time between when API Gateway relays a request to the backend and when it receives a response from the backend. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Data Processed KB aws_gateway_service_data_processed Service The amount of data processed API Gateway HTTP APIs monitoring with CloudWatch   Cache Hit Count Rate % aws_gateway_service_cache_hit_rate Service The number of requests served from the API cache API Gateway REST APIs monitoring with CloudWatch   Cache Miss Count Rate % aws_gateway_service_cache_miss_rate Service The number of requests served from the backend API Gateway REST APIs monitoring with CloudWatch   Request Count count aws_gateway_endpoint_count Endpoint The total number API requests in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   4xx Count count aws_gateway_endpoint_4xx Endpoint The number of client-side errors captured in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   5xx Count count aws_gateway_endpoint_5xx Endpoint The number of server-side errors captured in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Request Average Latency ms aws_gateway_endpoint_latency Endpoint The time between when API Gateway receives a request from a client and when it returns a response to the client. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Request Average Integration Latency ms aws_gateway_endpoint_integration_latency Endpoint The time between when API Gateway relays a request to the backend and when it receives a response from the backend. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Data Processed KB aws_gateway_endpoint_data_processed Endpoint The amount of data processed API Gateway HTTP APIs monitoring with CloudWatch   Cache Hit Count Rate % aws_gateway_endpoint_cache_hit_rate Endpoint The number of requests served from the API cache API Gateway REST APIs monitoring with CloudWatch   Cache Miss Count Rate % aws_gateway_endpoint_cache_miss_rate Endpoint The number of requests served from the backend API Gateway REST APIs monitoring with CloudWatch    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-gateway/. The AWS Cloud EKS dashboard panel configurations are found in /config/ui-initialized-templates/aws_gateway.\n","title":"AWS API Gateway monitoring","url":"/docs/main/v9.5.0/en/setup/backend/backend-aws-api-gateway-monitoring/"},{"content":"AWS API Gateway monitoring Amazon API Gateway is an AWS service for creating, publishing, maintaining, monitoring, and securing REST, HTTP, and WebSocket APIs. SkyWalking leverages AWS Kinesis Data Firehose receiver to transfer the CloudWatch metrics of API Gateway(HTTP and REST APIs) to OpenTelemetry receiver and into the Meter System.\nData flow  AWS CloudWatch collect metrics for API Gateway(REST and HTTP APIs), refer to API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch CloudWatch metric streams stream CloudWatch metrics of API Gateway to AWS Kinesis Data Firehose AWS Kinesis Data Firehose delivery metrics to AWS Kinesis Data Firehose receiver through the HTTP endpoint  Set up  Enable CloudWatch metrics for API Gateway Create an Amazon Kinesis Data Firehose Delivery Stream, and set AWS Kinesis Data Firehose receiver\u0026rsquo;s address as HTTP(s) Destination, refer to Create Delivery Stream Create CloudWatch metric stream, and select the Firehose Delivery Stream which has been created above, set Select namespaces to AWS/ApiGateway, Select output format to OpenTelemetry 0.7. refer to CloudWatch Metric Streams  Gateway Monitoring SkyWalking observes CloudWatch metrics of the AWS API Gateway, which is cataloged as a LAYER: AWS_GATEWAY Service in the OAP. Meanwhile, the routes would be recognized as LAYER: AWS_GATEWAY endpoints\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     Request Count count aws_gateway_service_count Service The total number API requests in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   4xx Count count aws_gateway_service_4xx Service The number of client-side errors captured in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   5xx Count count aws_gateway_service_5xx Service The number of server-side errors captured in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Request Average Latency ms aws_gateway_service_latency Service The time between when API Gateway receives a request from a client and when it returns a response to the client. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Request Average Integration Latency ms aws_gateway_service_integration_latency Service The time between when API Gateway relays a request to the backend and when it receives a response from the backend. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Data Processed KB aws_gateway_service_data_processed Service The amount of data processed API Gateway HTTP APIs monitoring with CloudWatch   Cache Hit Count Rate % aws_gateway_service_cache_hit_rate Service The number of requests served from the API cache API Gateway REST APIs monitoring with CloudWatch   Cache Miss Count Rate % aws_gateway_service_cache_miss_rate Service The number of requests served from the backend API Gateway REST APIs monitoring with CloudWatch   Request Count count aws_gateway_endpoint_count Endpoint The total number API requests in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   4xx Count count aws_gateway_endpoint_4xx Endpoint The number of client-side errors captured in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   5xx Count count aws_gateway_endpoint_5xx Endpoint The number of server-side errors captured in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Request Average Latency ms aws_gateway_endpoint_latency Endpoint The time between when API Gateway receives a request from a client and when it returns a response to the client. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Request Average Integration Latency ms aws_gateway_endpoint_integration_latency Endpoint The time between when API Gateway relays a request to the backend and when it receives a response from the backend. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Data Processed KB aws_gateway_endpoint_data_processed Endpoint The amount of data processed API Gateway HTTP APIs monitoring with CloudWatch   Cache Hit Count Rate % aws_gateway_endpoint_cache_hit_rate Endpoint The number of requests served from the API cache API Gateway REST APIs monitoring with CloudWatch   Cache Miss Count Rate % aws_gateway_endpoint_cache_miss_rate Endpoint The number of requests served from the backend API Gateway REST APIs monitoring with CloudWatch    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-gateway/. The AWS Cloud EKS dashboard panel configurations are found in /config/ui-initialized-templates/aws_gateway.\n","title":"AWS API Gateway monitoring","url":"/docs/main/v9.6.0/en/setup/backend/backend-aws-api-gateway-monitoring/"},{"content":"AWS API Gateway monitoring Amazon API Gateway is an AWS service for creating, publishing, maintaining, monitoring, and securing REST, HTTP, and WebSocket APIs. SkyWalking leverages AWS Kinesis Data Firehose receiver to transfer the CloudWatch metrics of API Gateway(HTTP and REST APIs) to OpenTelemetry receiver and into the Meter System.\nData flow  AWS CloudWatch collect metrics for API Gateway(REST and HTTP APIs), refer to API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch CloudWatch metric streams stream CloudWatch metrics of API Gateway to AWS Kinesis Data Firehose AWS Kinesis Data Firehose delivery metrics to AWS Kinesis Data Firehose receiver through the HTTP endpoint  Set up  Enable CloudWatch metrics for API Gateway Create an Amazon Kinesis Data Firehose Delivery Stream, and set AWS Kinesis Data Firehose receiver\u0026rsquo;s address as HTTP(s) Destination, refer to Create Delivery Stream Create CloudWatch metric stream, and select the Firehose Delivery Stream which has been created above, set Select namespaces to AWS/ApiGateway, Select output format to OpenTelemetry 0.7. refer to CloudWatch Metric Streams  Gateway Monitoring SkyWalking observes CloudWatch metrics of the AWS API Gateway, which is cataloged as a LAYER: AWS_GATEWAY Service in the OAP. Meanwhile, the routes would be recognized as LAYER: AWS_GATEWAY endpoints\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     Request Count count aws_gateway_service_count Service The total number API requests in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   4xx Count count aws_gateway_service_4xx Service The number of client-side errors captured in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   5xx Count count aws_gateway_service_5xx Service The number of server-side errors captured in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Request Average Latency ms aws_gateway_service_latency Service The time between when API Gateway receives a request from a client and when it returns a response to the client. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Request Average Integration Latency ms aws_gateway_service_integration_latency Service The time between when API Gateway relays a request to the backend and when it receives a response from the backend. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Data Processed KB aws_gateway_service_data_processed Service The amount of data processed API Gateway HTTP APIs monitoring with CloudWatch   Cache Hit Count Rate % aws_gateway_service_cache_hit_rate Service The number of requests served from the API cache API Gateway REST APIs monitoring with CloudWatch   Cache Miss Count Rate % aws_gateway_service_cache_miss_rate Service The number of requests served from the backend API Gateway REST APIs monitoring with CloudWatch   Request Count count aws_gateway_endpoint_count Endpoint The total number API requests in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   4xx Count count aws_gateway_endpoint_4xx Endpoint The number of client-side errors captured in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   5xx Count count aws_gateway_endpoint_5xx Endpoint The number of server-side errors captured in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Request Average Latency ms aws_gateway_endpoint_latency Endpoint The time between when API Gateway receives a request from a client and when it returns a response to the client. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Request Average Integration Latency ms aws_gateway_endpoint_integration_latency Endpoint The time between when API Gateway relays a request to the backend and when it receives a response from the backend. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Data Processed KB aws_gateway_endpoint_data_processed Endpoint The amount of data processed API Gateway HTTP APIs monitoring with CloudWatch   Cache Hit Count Rate % aws_gateway_endpoint_cache_hit_rate Endpoint The number of requests served from the API cache API Gateway REST APIs monitoring with CloudWatch   Cache Miss Count Rate % aws_gateway_endpoint_cache_miss_rate Endpoint The number of requests served from the backend API Gateway REST APIs monitoring with CloudWatch    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-gateway/. The AWS Cloud EKS dashboard panel configurations are found in /config/ui-initialized-templates/aws_gateway.\n","title":"AWS API Gateway monitoring","url":"/docs/main/v9.7.0/en/setup/backend/backend-aws-api-gateway-monitoring/"},{"content":"AWS Cloud EKS monitoring SkyWalking leverages OpenTelemetry Collector with AWS Container Insights Receiver to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  OpenTelemetry Collector fetches metrics from EKS via AWS Container Insights Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Deploy amazon/aws-otel-collector with AWS Container Insights Receiver to EKS Config SkyWalking OpenTelemetry receiver.  Read Monitoring AWS EKS and S3 with SkyWalking for more details\nEKS Monitoring AWS Container Insights Receiver provides multiple dimensions metrics for EKS cluster, node, service, etc. Accordingly, SkyWalking observes the status, and payload of the EKS cluster, which is cataloged as a LAYER: AWS_EKS Service in the OAP. Meanwhile, the k8s nodes would be recognized as LAYER: AWS_EKS instances. The k8s service would be recognized as endpoints.\nSpecify Job Name SkyWalking distinguishes AWS Cloud EKS metrics by attributes job_name, which value is aws-cloud-eks-monitoring. You could leverage OTEL Collector processor to add the attribute as follows:\nprocessors:resource/job-name:attributes:- key:job_namevalue:aws-cloud-eks-monitoringaction:insert Notice, if you don\u0026rsquo;t specify job_name attribute, SkyWalking OAP will ignore the metrics\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     Node Count  eks_cluster_node_count Service The node count of the EKS cluster AWS Container Insights Receiver   Failed Node Count  eks_cluster_failed_node_count Service The failed node count of the EKS cluster AWS Container Insights Receiver   Pod Count (namespace dimension)  eks_cluster_namespace_count Service The count of pod in the EKS cluster(namespace dimension) AWS Container Insights Receiver   Pod Count (service dimension)  eks_cluster_service_count Service The count of pod in the EKS cluster(service dimension) AWS Container Insights Receiver   Network RX Dropped Count (per second) count/s eks_cluster_net_rx_dropped Service Network RX dropped count AWS Container Insights Receiver   Network RX Error Count (per second) count/s eks_cluster_net_rx_error Service Network RX error count AWS Container Insights Receiver   Network TX Dropped Count (per second) count/s eks_cluster_net_rx_dropped Service Network TX dropped count AWS Container Insights Receiver   Network TX Error Count (per second) count/s eks_cluster_net_rx_error Service Network TX error count AWS Container Insights Receiver   Pod Count  eks_cluster_node_pod_number Instance The count of pod running on the node AWS Container Insights Receiver   CPU Utilization percent eks_cluster_node_cpu_utilization Instance The CPU Utilization of the node AWS Container Insights Receiver   Memory Utilization percent eks_cluster_node_memory_utilization Instance The Memory Utilization of the node AWS Container Insights Receiver   Network RX bytes/s eks_cluster_node_net_rx_bytes Instance Network RX bytes of the node AWS Container Insights Receiver   Network RX Error Count count/s eks_cluster_node_net_rx_bytes Instance Network RX error count of the node AWS Container Insights Receiver   Network TX bytes/s eks_cluster_node_net_rx_bytes Instance Network TX bytes of the node AWS Container Insights Receiver   Network TX Error Count count/s eks_cluster_node_net_rx_bytes Instance Network TX error count of the node AWS Container Insights Receiver   Disk IO Write bytes/s eks_cluster_node_net_rx_bytes Instance The IO write bytes of the node AWS Container Insights Receiver   Disk IO Read bytes/s eks_cluster_node_net_rx_bytes Instance The IO read bytes of the node AWS Container Insights Receiver   FS Utilization percent eks_cluster_node_net_rx_bytes Instance The filesystem utilization of the node AWS Container Insights Receiver   CPU Utilization percent eks_cluster_node_pod_cpu_utilization Instance The CPU Utilization of the pod running on the node AWS Container Insights Receiver   Memory Utilization percent eks_cluster_node_pod_memory_utilization Instance The Memory Utilization of the pod running on the node AWS Container Insights Receiver   Network RX bytes/s eks_cluster_node_pod_net_rx_bytes Instance Network RX bytes of the pod running on the node AWS Container Insights Receiver   Network RX Error Count count/s eks_cluster_node_pod_net_rx_error Instance Network RX error count of the pod running on the node AWS Container Insights Receiver   Network TX bytes/s eks_cluster_node_pod_net_tx_bytes Instance Network RX bytes of the pod running on the node AWS Container Insights Receiver   Network TX Error Count count/s eks_cluster_node_pod_net_tx_error Instance Network RX error count of the pod running on the node AWS Container Insights Receiver   CPU Utilization percent eks_cluster_service_pod_cpu_utilization Endpoint The CPU Utilization of pod that belong to the service AWS Container Insights Receiver   Memory Utilization percent eks_cluster_service_pod_memory_utilization Endpoint The Memory Utilization of pod that belong to the service AWS Container Insights Receiver   Network RX bytes/s eks_cluster_service_pod_net_rx_bytes Endpoint Network RX bytes of the pod that belong to the service AWS Container Insights Receiver   Network RX Error Count count/s eks_cluster_service_pod_net_rx_error Endpoint Network TX error count of the pod that belongs to the service AWS Container Insights Receiver   Network TX bytes/s eks_cluster_service_pod_net_tx_bytes Endpoint Network TX bytes of the pod that belong to the service AWS Container Insights Receiver   Network TX Error Count count/s eks_cluster_node_pod_net_tx_error Endpoint Network TX error count of the pod that belongs to the service AWS Container Insights Receiver    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-eks/. The AWS Cloud EKS dashboard panel configurations are found in /config/ui-initialized-templates/aws_eks.\nOTEL Configuration Sample With AWS Container Insights Receiver extensions:health_check:receivers:awscontainerinsightreceiver:processors:resource/job-name:attributes:- key:job_namevalue:aws-cloud-eks-monitoringaction:insertexporters:otlp:endpoint:oap-service:11800tls:insecure:truelogging:loglevel:debugservice:pipelines:metrics:receivers:[awscontainerinsightreceiver]processors:[resource/job-name]exporters:[otlp,logging]extensions:[health_check]Refer to AWS Container Insights Receiver for more information\n","title":"AWS Cloud EKS monitoring","url":"/docs/main/latest/en/setup/backend/backend-aws-eks-monitoring/"},{"content":"AWS Cloud EKS monitoring SkyWalking leverages OpenTelemetry Collector with AWS Container Insights Receiver to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  OpenTelemetry Collector fetches metrics from EKS via AWS Container Insights Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Deploy amazon/aws-otel-collector with AWS Container Insights Receiver to EKS Config SkyWalking OpenTelemetry receiver.  Read Monitoring AWS EKS and S3 with SkyWalking for more details\nEKS Monitoring AWS Container Insights Receiver provides multiple dimensions metrics for EKS cluster, node, service, etc. Accordingly, SkyWalking observes the status, and payload of the EKS cluster, which is cataloged as a LAYER: AWS_EKS Service in the OAP. Meanwhile, the k8s nodes would be recognized as LAYER: AWS_EKS instances. The k8s service would be recognized as endpoints.\nSpecify Job Name SkyWalking distinguishes AWS Cloud EKS metrics by attributes job_name, which value is aws-cloud-eks-monitoring. You could leverage OTEL Collector processor to add the attribute as follows:\nprocessors:resource/job-name:attributes:- key:job_namevalue:aws-cloud-eks-monitoringaction:insert Notice, if you don\u0026rsquo;t specify job_name attribute, SkyWalking OAP will ignore the metrics\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     Node Count  eks_cluster_node_count Service The node count of the EKS cluster AWS Container Insights Receiver   Failed Node Count  eks_cluster_failed_node_count Service The failed node count of the EKS cluster AWS Container Insights Receiver   Pod Count (namespace dimension)  eks_cluster_namespace_count Service The count of pod in the EKS cluster(namespace dimension) AWS Container Insights Receiver   Pod Count (service dimension)  eks_cluster_service_count Service The count of pod in the EKS cluster(service dimension) AWS Container Insights Receiver   Network RX Dropped Count (per second) count/s eks_cluster_net_rx_dropped Service Network RX dropped count AWS Container Insights Receiver   Network RX Error Count (per second) count/s eks_cluster_net_rx_error Service Network RX error count AWS Container Insights Receiver   Network TX Dropped Count (per second) count/s eks_cluster_net_rx_dropped Service Network TX dropped count AWS Container Insights Receiver   Network TX Error Count (per second) count/s eks_cluster_net_rx_error Service Network TX error count AWS Container Insights Receiver   Pod Count  eks_cluster_node_pod_number Instance The count of pod running on the node AWS Container Insights Receiver   CPU Utilization percent eks_cluster_node_cpu_utilization Instance The CPU Utilization of the node AWS Container Insights Receiver   Memory Utilization percent eks_cluster_node_memory_utilization Instance The Memory Utilization of the node AWS Container Insights Receiver   Network RX bytes/s eks_cluster_node_net_rx_bytes Instance Network RX bytes of the node AWS Container Insights Receiver   Network RX Error Count count/s eks_cluster_node_net_rx_bytes Instance Network RX error count of the node AWS Container Insights Receiver   Network TX bytes/s eks_cluster_node_net_rx_bytes Instance Network TX bytes of the node AWS Container Insights Receiver   Network TX Error Count count/s eks_cluster_node_net_rx_bytes Instance Network TX error count of the node AWS Container Insights Receiver   Disk IO Write bytes/s eks_cluster_node_net_rx_bytes Instance The IO write bytes of the node AWS Container Insights Receiver   Disk IO Read bytes/s eks_cluster_node_net_rx_bytes Instance The IO read bytes of the node AWS Container Insights Receiver   FS Utilization percent eks_cluster_node_net_rx_bytes Instance The filesystem utilization of the node AWS Container Insights Receiver   CPU Utilization percent eks_cluster_node_pod_cpu_utilization Instance The CPU Utilization of the pod running on the node AWS Container Insights Receiver   Memory Utilization percent eks_cluster_node_pod_memory_utilization Instance The Memory Utilization of the pod running on the node AWS Container Insights Receiver   Network RX bytes/s eks_cluster_node_pod_net_rx_bytes Instance Network RX bytes of the pod running on the node AWS Container Insights Receiver   Network RX Error Count count/s eks_cluster_node_pod_net_rx_error Instance Network RX error count of the pod running on the node AWS Container Insights Receiver   Network TX bytes/s eks_cluster_node_pod_net_tx_bytes Instance Network RX bytes of the pod running on the node AWS Container Insights Receiver   Network TX Error Count count/s eks_cluster_node_pod_net_tx_error Instance Network RX error count of the pod running on the node AWS Container Insights Receiver   CPU Utilization percent eks_cluster_service_pod_cpu_utilization Endpoint The CPU Utilization of pod that belong to the service AWS Container Insights Receiver   Memory Utilization percent eks_cluster_service_pod_memory_utilization Endpoint The Memory Utilization of pod that belong to the service AWS Container Insights Receiver   Network RX bytes/s eks_cluster_service_pod_net_rx_bytes Endpoint Network RX bytes of the pod that belong to the service AWS Container Insights Receiver   Network RX Error Count count/s eks_cluster_service_pod_net_rx_error Endpoint Network TX error count of the pod that belongs to the service AWS Container Insights Receiver   Network TX bytes/s eks_cluster_service_pod_net_tx_bytes Endpoint Network TX bytes of the pod that belong to the service AWS Container Insights Receiver   Network TX Error Count count/s eks_cluster_node_pod_net_tx_error Endpoint Network TX error count of the pod that belongs to the service AWS Container Insights Receiver    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-eks/. The AWS Cloud EKS dashboard panel configurations are found in /config/ui-initialized-templates/aws_eks.\nOTEL Configuration Sample With AWS Container Insights Receiver extensions:health_check:receivers:awscontainerinsightreceiver:processors:resource/job-name:attributes:- key:job_namevalue:aws-cloud-eks-monitoringaction:insertexporters:otlp:endpoint:oap-service:11800tls:insecure:truelogging:loglevel:debugservice:pipelines:metrics:receivers:[awscontainerinsightreceiver]processors:[resource/job-name]exporters:[otlp,logging]extensions:[health_check]Refer to AWS Container Insights Receiver for more information\n","title":"AWS Cloud EKS monitoring","url":"/docs/main/next/en/setup/backend/backend-aws-eks-monitoring/"},{"content":"AWS Cloud EKS monitoring SkyWalking leverages OpenTelemetry Collector with AWS Container Insights Receiver to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  OpenTelemetry Collector fetches metrics from EKS via AWS Container Insights Receiver and pushes metrics to SkyWalking OAP Server via the OpenCensus gRPC Exporter or OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Deploy amazon/aws-otel-collector with AWS Container Insights Receiver to EKS Config SkyWalking OpenTelemetry receiver.  EKS Monitoring AWS Container Insights Receiver provides multiple dimensions metrics for EKS cluster, node, service, etc. Accordingly, SkyWalking observes the status, and payload of the EKS cluster, which is cataloged as a LAYER: AWS_EKS Service in the OAP. Meanwhile, the k8s nodes would be recognized as LAYER: AWS_EKS instances. The k8s service would be recognized as endpoints.\nSpecify Job Name SkyWalking distinguishes AWS Cloud EKS metrics by attributes job_name, which value is aws-cloud-eks-monitoring. You could leverage OTEL Collector processor to add the attribute as follows:\nprocessors:resource/job-name:attributes:- key:job_namevalue:aws-cloud-eks-monitoringaction:insert Notice, if you don\u0026rsquo;t specify job_name attribute, SkyWalking OAP will ignore the metrics\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     Node Count  eks_cluster_node_count Service The node count of the EKS cluster AWS Container Insights Receiver   Failed Node Count  eks_cluster_failed_node_count Service The failed node count of the EKS cluster AWS Container Insights Receiver   Pod Count (namespace dimension)  eks_cluster_namespace_count Service The count of pod in the EKS cluster(namespace dimension) AWS Container Insights Receiver   Pod Count (service dimension)  eks_cluster_service_count Service The count of pod in the EKS cluster(service dimension) AWS Container Insights Receiver   Network RX Dropped Count (per second) count/s eks_cluster_net_rx_dropped Service Network RX dropped count AWS Container Insights Receiver   Network RX Error Count (per second) count/s eks_cluster_net_rx_error Service Network RX error count AWS Container Insights Receiver   Network TX Dropped Count (per second) count/s eks_cluster_net_rx_dropped Service Network TX dropped count AWS Container Insights Receiver   Network TX Error Count (per second) count/s eks_cluster_net_rx_error Service Network TX error count AWS Container Insights Receiver   Pod Count  eks_cluster_node_pod_number Instance The count of pod running on the node AWS Container Insights Receiver   CPU Utilization percent eks_cluster_node_cpu_utilization Instance The CPU Utilization of the node AWS Container Insights Receiver   Memory Utilization percent eks_cluster_node_memory_utilization Instance The Memory Utilization of the node AWS Container Insights Receiver   Network RX bytes/s eks_cluster_node_net_rx_bytes Instance Network RX bytes of the node AWS Container Insights Receiver   Network RX Error Count count/s eks_cluster_node_net_rx_bytes Instance Network RX error count of the node AWS Container Insights Receiver   Network TX bytes/s eks_cluster_node_net_rx_bytes Instance Network TX bytes of the node AWS Container Insights Receiver   Network TX Error Count count/s eks_cluster_node_net_rx_bytes Instance Network TX error count of the node AWS Container Insights Receiver   Disk IO Write bytes/s eks_cluster_node_net_rx_bytes Instance The IO write bytes of the node AWS Container Insights Receiver   Disk IO Read bytes/s eks_cluster_node_net_rx_bytes Instance The IO read bytes of the node AWS Container Insights Receiver   FS Utilization percent eks_cluster_node_net_rx_bytes Instance The filesystem utilization of the node AWS Container Insights Receiver   CPU Utilization percent eks_cluster_node_pod_cpu_utilization Instance The CPU Utilization of the pod running on the node AWS Container Insights Receiver   Memory Utilization percent eks_cluster_node_pod_memory_utilization Instance The Memory Utilization of the pod running on the node AWS Container Insights Receiver   Network RX bytes/s eks_cluster_node_pod_net_rx_bytes Instance Network RX bytes of the pod running on the node AWS Container Insights Receiver   Network RX Error Count count/s eks_cluster_node_pod_net_rx_error Instance Network RX error count of the pod running on the node AWS Container Insights Receiver   Network TX bytes/s eks_cluster_node_pod_net_tx_bytes Instance Network RX bytes of the pod running on the node AWS Container Insights Receiver   Network TX Error Count count/s eks_cluster_node_pod_net_tx_error Instance Network RX error count of the pod running on the node AWS Container Insights Receiver   CPU Utilization percent eks_cluster_service_pod_cpu_utilization Endpoint The CPU Utilization of pod that belong to the service AWS Container Insights Receiver   Memory Utilization percent eks_cluster_service_pod_memory_utilization Endpoint The Memory Utilization of pod that belong to the service AWS Container Insights Receiver   Network RX bytes/s eks_cluster_service_pod_net_rx_bytes Endpoint Network RX bytes of the pod that belong to the service AWS Container Insights Receiver   Network RX Error Count count/s eks_cluster_service_pod_net_rx_error Endpoint Network TX error count of the pod that belongs to the service AWS Container Insights Receiver   Network TX bytes/s eks_cluster_service_pod_net_tx_bytes Endpoint Network TX bytes of the pod that belong to the service AWS Container Insights Receiver   Network TX Error Count count/s eks_cluster_node_pod_net_tx_error Endpoint Network TX error count of the pod that belongs to the service AWS Container Insights Receiver    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-eks/. The AWS Cloud EKS dashboard panel configurations are found in /config/ui-initialized-templates/aws_eks.\nOTEL Configuration Sample With AWS Container Insights Receiver extensions:health_check:receivers:awscontainerinsightreceiver:processors:resource/job-name:attributes:- key:job_namevalue:aws-cloud-eks-monitoringaction:insertexporters:otlp:endpoint:oap-service:11800tls:insecure:truelogging:loglevel:debugservice:pipelines:metrics:receivers:[awscontainerinsightreceiver]processors:[resource/job-name]exporters:[otlp,logging]extensions:[health_check]Refer to AWS Container Insights Receiver for more information\n","title":"AWS Cloud EKS monitoring","url":"/docs/main/v9.4.0/en/setup/backend/backend-aws-eks-monitoring/"},{"content":"AWS Cloud EKS monitoring SkyWalking leverages OpenTelemetry Collector with AWS Container Insights Receiver to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  OpenTelemetry Collector fetches metrics from EKS via AWS Container Insights Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Deploy amazon/aws-otel-collector with AWS Container Insights Receiver to EKS Config SkyWalking OpenTelemetry receiver.  Read Monitoring AWS EKS and S3 with SkyWalking for more details\nEKS Monitoring AWS Container Insights Receiver provides multiple dimensions metrics for EKS cluster, node, service, etc. Accordingly, SkyWalking observes the status, and payload of the EKS cluster, which is cataloged as a LAYER: AWS_EKS Service in the OAP. Meanwhile, the k8s nodes would be recognized as LAYER: AWS_EKS instances. The k8s service would be recognized as endpoints.\nSpecify Job Name SkyWalking distinguishes AWS Cloud EKS metrics by attributes job_name, which value is aws-cloud-eks-monitoring. You could leverage OTEL Collector processor to add the attribute as follows:\nprocessors:resource/job-name:attributes:- key:job_namevalue:aws-cloud-eks-monitoringaction:insert Notice, if you don\u0026rsquo;t specify job_name attribute, SkyWalking OAP will ignore the metrics\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     Node Count  eks_cluster_node_count Service The node count of the EKS cluster AWS Container Insights Receiver   Failed Node Count  eks_cluster_failed_node_count Service The failed node count of the EKS cluster AWS Container Insights Receiver   Pod Count (namespace dimension)  eks_cluster_namespace_count Service The count of pod in the EKS cluster(namespace dimension) AWS Container Insights Receiver   Pod Count (service dimension)  eks_cluster_service_count Service The count of pod in the EKS cluster(service dimension) AWS Container Insights Receiver   Network RX Dropped Count (per second) count/s eks_cluster_net_rx_dropped Service Network RX dropped count AWS Container Insights Receiver   Network RX Error Count (per second) count/s eks_cluster_net_rx_error Service Network RX error count AWS Container Insights Receiver   Network TX Dropped Count (per second) count/s eks_cluster_net_rx_dropped Service Network TX dropped count AWS Container Insights Receiver   Network TX Error Count (per second) count/s eks_cluster_net_rx_error Service Network TX error count AWS Container Insights Receiver   Pod Count  eks_cluster_node_pod_number Instance The count of pod running on the node AWS Container Insights Receiver   CPU Utilization percent eks_cluster_node_cpu_utilization Instance The CPU Utilization of the node AWS Container Insights Receiver   Memory Utilization percent eks_cluster_node_memory_utilization Instance The Memory Utilization of the node AWS Container Insights Receiver   Network RX bytes/s eks_cluster_node_net_rx_bytes Instance Network RX bytes of the node AWS Container Insights Receiver   Network RX Error Count count/s eks_cluster_node_net_rx_bytes Instance Network RX error count of the node AWS Container Insights Receiver   Network TX bytes/s eks_cluster_node_net_rx_bytes Instance Network TX bytes of the node AWS Container Insights Receiver   Network TX Error Count count/s eks_cluster_node_net_rx_bytes Instance Network TX error count of the node AWS Container Insights Receiver   Disk IO Write bytes/s eks_cluster_node_net_rx_bytes Instance The IO write bytes of the node AWS Container Insights Receiver   Disk IO Read bytes/s eks_cluster_node_net_rx_bytes Instance The IO read bytes of the node AWS Container Insights Receiver   FS Utilization percent eks_cluster_node_net_rx_bytes Instance The filesystem utilization of the node AWS Container Insights Receiver   CPU Utilization percent eks_cluster_node_pod_cpu_utilization Instance The CPU Utilization of the pod running on the node AWS Container Insights Receiver   Memory Utilization percent eks_cluster_node_pod_memory_utilization Instance The Memory Utilization of the pod running on the node AWS Container Insights Receiver   Network RX bytes/s eks_cluster_node_pod_net_rx_bytes Instance Network RX bytes of the pod running on the node AWS Container Insights Receiver   Network RX Error Count count/s eks_cluster_node_pod_net_rx_error Instance Network RX error count of the pod running on the node AWS Container Insights Receiver   Network TX bytes/s eks_cluster_node_pod_net_tx_bytes Instance Network RX bytes of the pod running on the node AWS Container Insights Receiver   Network TX Error Count count/s eks_cluster_node_pod_net_tx_error Instance Network RX error count of the pod running on the node AWS Container Insights Receiver   CPU Utilization percent eks_cluster_service_pod_cpu_utilization Endpoint The CPU Utilization of pod that belong to the service AWS Container Insights Receiver   Memory Utilization percent eks_cluster_service_pod_memory_utilization Endpoint The Memory Utilization of pod that belong to the service AWS Container Insights Receiver   Network RX bytes/s eks_cluster_service_pod_net_rx_bytes Endpoint Network RX bytes of the pod that belong to the service AWS Container Insights Receiver   Network RX Error Count count/s eks_cluster_service_pod_net_rx_error Endpoint Network TX error count of the pod that belongs to the service AWS Container Insights Receiver   Network TX bytes/s eks_cluster_service_pod_net_tx_bytes Endpoint Network TX bytes of the pod that belong to the service AWS Container Insights Receiver   Network TX Error Count count/s eks_cluster_node_pod_net_tx_error Endpoint Network TX error count of the pod that belongs to the service AWS Container Insights Receiver    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-eks/. The AWS Cloud EKS dashboard panel configurations are found in /config/ui-initialized-templates/aws_eks.\nOTEL Configuration Sample With AWS Container Insights Receiver extensions:health_check:receivers:awscontainerinsightreceiver:processors:resource/job-name:attributes:- key:job_namevalue:aws-cloud-eks-monitoringaction:insertexporters:otlp:endpoint:oap-service:11800tls:insecure:truelogging:loglevel:debugservice:pipelines:metrics:receivers:[awscontainerinsightreceiver]processors:[resource/job-name]exporters:[otlp,logging]extensions:[health_check]Refer to AWS Container Insights Receiver for more information\n","title":"AWS Cloud EKS monitoring","url":"/docs/main/v9.5.0/en/setup/backend/backend-aws-eks-monitoring/"},{"content":"AWS Cloud EKS monitoring SkyWalking leverages OpenTelemetry Collector with AWS Container Insights Receiver to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  OpenTelemetry Collector fetches metrics from EKS via AWS Container Insights Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Deploy amazon/aws-otel-collector with AWS Container Insights Receiver to EKS Config SkyWalking OpenTelemetry receiver.  Read Monitoring AWS EKS and S3 with SkyWalking for more details\nEKS Monitoring AWS Container Insights Receiver provides multiple dimensions metrics for EKS cluster, node, service, etc. Accordingly, SkyWalking observes the status, and payload of the EKS cluster, which is cataloged as a LAYER: AWS_EKS Service in the OAP. Meanwhile, the k8s nodes would be recognized as LAYER: AWS_EKS instances. The k8s service would be recognized as endpoints.\nSpecify Job Name SkyWalking distinguishes AWS Cloud EKS metrics by attributes job_name, which value is aws-cloud-eks-monitoring. You could leverage OTEL Collector processor to add the attribute as follows:\nprocessors:resource/job-name:attributes:- key:job_namevalue:aws-cloud-eks-monitoringaction:insert Notice, if you don\u0026rsquo;t specify job_name attribute, SkyWalking OAP will ignore the metrics\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     Node Count  eks_cluster_node_count Service The node count of the EKS cluster AWS Container Insights Receiver   Failed Node Count  eks_cluster_failed_node_count Service The failed node count of the EKS cluster AWS Container Insights Receiver   Pod Count (namespace dimension)  eks_cluster_namespace_count Service The count of pod in the EKS cluster(namespace dimension) AWS Container Insights Receiver   Pod Count (service dimension)  eks_cluster_service_count Service The count of pod in the EKS cluster(service dimension) AWS Container Insights Receiver   Network RX Dropped Count (per second) count/s eks_cluster_net_rx_dropped Service Network RX dropped count AWS Container Insights Receiver   Network RX Error Count (per second) count/s eks_cluster_net_rx_error Service Network RX error count AWS Container Insights Receiver   Network TX Dropped Count (per second) count/s eks_cluster_net_rx_dropped Service Network TX dropped count AWS Container Insights Receiver   Network TX Error Count (per second) count/s eks_cluster_net_rx_error Service Network TX error count AWS Container Insights Receiver   Pod Count  eks_cluster_node_pod_number Instance The count of pod running on the node AWS Container Insights Receiver   CPU Utilization percent eks_cluster_node_cpu_utilization Instance The CPU Utilization of the node AWS Container Insights Receiver   Memory Utilization percent eks_cluster_node_memory_utilization Instance The Memory Utilization of the node AWS Container Insights Receiver   Network RX bytes/s eks_cluster_node_net_rx_bytes Instance Network RX bytes of the node AWS Container Insights Receiver   Network RX Error Count count/s eks_cluster_node_net_rx_bytes Instance Network RX error count of the node AWS Container Insights Receiver   Network TX bytes/s eks_cluster_node_net_rx_bytes Instance Network TX bytes of the node AWS Container Insights Receiver   Network TX Error Count count/s eks_cluster_node_net_rx_bytes Instance Network TX error count of the node AWS Container Insights Receiver   Disk IO Write bytes/s eks_cluster_node_net_rx_bytes Instance The IO write bytes of the node AWS Container Insights Receiver   Disk IO Read bytes/s eks_cluster_node_net_rx_bytes Instance The IO read bytes of the node AWS Container Insights Receiver   FS Utilization percent eks_cluster_node_net_rx_bytes Instance The filesystem utilization of the node AWS Container Insights Receiver   CPU Utilization percent eks_cluster_node_pod_cpu_utilization Instance The CPU Utilization of the pod running on the node AWS Container Insights Receiver   Memory Utilization percent eks_cluster_node_pod_memory_utilization Instance The Memory Utilization of the pod running on the node AWS Container Insights Receiver   Network RX bytes/s eks_cluster_node_pod_net_rx_bytes Instance Network RX bytes of the pod running on the node AWS Container Insights Receiver   Network RX Error Count count/s eks_cluster_node_pod_net_rx_error Instance Network RX error count of the pod running on the node AWS Container Insights Receiver   Network TX bytes/s eks_cluster_node_pod_net_tx_bytes Instance Network RX bytes of the pod running on the node AWS Container Insights Receiver   Network TX Error Count count/s eks_cluster_node_pod_net_tx_error Instance Network RX error count of the pod running on the node AWS Container Insights Receiver   CPU Utilization percent eks_cluster_service_pod_cpu_utilization Endpoint The CPU Utilization of pod that belong to the service AWS Container Insights Receiver   Memory Utilization percent eks_cluster_service_pod_memory_utilization Endpoint The Memory Utilization of pod that belong to the service AWS Container Insights Receiver   Network RX bytes/s eks_cluster_service_pod_net_rx_bytes Endpoint Network RX bytes of the pod that belong to the service AWS Container Insights Receiver   Network RX Error Count count/s eks_cluster_service_pod_net_rx_error Endpoint Network TX error count of the pod that belongs to the service AWS Container Insights Receiver   Network TX bytes/s eks_cluster_service_pod_net_tx_bytes Endpoint Network TX bytes of the pod that belong to the service AWS Container Insights Receiver   Network TX Error Count count/s eks_cluster_node_pod_net_tx_error Endpoint Network TX error count of the pod that belongs to the service AWS Container Insights Receiver    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-eks/. The AWS Cloud EKS dashboard panel configurations are found in /config/ui-initialized-templates/aws_eks.\nOTEL Configuration Sample With AWS Container Insights Receiver extensions:health_check:receivers:awscontainerinsightreceiver:processors:resource/job-name:attributes:- key:job_namevalue:aws-cloud-eks-monitoringaction:insertexporters:otlp:endpoint:oap-service:11800tls:insecure:truelogging:loglevel:debugservice:pipelines:metrics:receivers:[awscontainerinsightreceiver]processors:[resource/job-name]exporters:[otlp,logging]extensions:[health_check]Refer to AWS Container Insights Receiver for more information\n","title":"AWS Cloud EKS monitoring","url":"/docs/main/v9.6.0/en/setup/backend/backend-aws-eks-monitoring/"},{"content":"AWS Cloud EKS monitoring SkyWalking leverages OpenTelemetry Collector with AWS Container Insights Receiver to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  OpenTelemetry Collector fetches metrics from EKS via AWS Container Insights Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Deploy amazon/aws-otel-collector with AWS Container Insights Receiver to EKS Config SkyWalking OpenTelemetry receiver.  Read Monitoring AWS EKS and S3 with SkyWalking for more details\nEKS Monitoring AWS Container Insights Receiver provides multiple dimensions metrics for EKS cluster, node, service, etc. Accordingly, SkyWalking observes the status, and payload of the EKS cluster, which is cataloged as a LAYER: AWS_EKS Service in the OAP. Meanwhile, the k8s nodes would be recognized as LAYER: AWS_EKS instances. The k8s service would be recognized as endpoints.\nSpecify Job Name SkyWalking distinguishes AWS Cloud EKS metrics by attributes job_name, which value is aws-cloud-eks-monitoring. You could leverage OTEL Collector processor to add the attribute as follows:\nprocessors:resource/job-name:attributes:- key:job_namevalue:aws-cloud-eks-monitoringaction:insert Notice, if you don\u0026rsquo;t specify job_name attribute, SkyWalking OAP will ignore the metrics\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     Node Count  eks_cluster_node_count Service The node count of the EKS cluster AWS Container Insights Receiver   Failed Node Count  eks_cluster_failed_node_count Service The failed node count of the EKS cluster AWS Container Insights Receiver   Pod Count (namespace dimension)  eks_cluster_namespace_count Service The count of pod in the EKS cluster(namespace dimension) AWS Container Insights Receiver   Pod Count (service dimension)  eks_cluster_service_count Service The count of pod in the EKS cluster(service dimension) AWS Container Insights Receiver   Network RX Dropped Count (per second) count/s eks_cluster_net_rx_dropped Service Network RX dropped count AWS Container Insights Receiver   Network RX Error Count (per second) count/s eks_cluster_net_rx_error Service Network RX error count AWS Container Insights Receiver   Network TX Dropped Count (per second) count/s eks_cluster_net_rx_dropped Service Network TX dropped count AWS Container Insights Receiver   Network TX Error Count (per second) count/s eks_cluster_net_rx_error Service Network TX error count AWS Container Insights Receiver   Pod Count  eks_cluster_node_pod_number Instance The count of pod running on the node AWS Container Insights Receiver   CPU Utilization percent eks_cluster_node_cpu_utilization Instance The CPU Utilization of the node AWS Container Insights Receiver   Memory Utilization percent eks_cluster_node_memory_utilization Instance The Memory Utilization of the node AWS Container Insights Receiver   Network RX bytes/s eks_cluster_node_net_rx_bytes Instance Network RX bytes of the node AWS Container Insights Receiver   Network RX Error Count count/s eks_cluster_node_net_rx_bytes Instance Network RX error count of the node AWS Container Insights Receiver   Network TX bytes/s eks_cluster_node_net_rx_bytes Instance Network TX bytes of the node AWS Container Insights Receiver   Network TX Error Count count/s eks_cluster_node_net_rx_bytes Instance Network TX error count of the node AWS Container Insights Receiver   Disk IO Write bytes/s eks_cluster_node_net_rx_bytes Instance The IO write bytes of the node AWS Container Insights Receiver   Disk IO Read bytes/s eks_cluster_node_net_rx_bytes Instance The IO read bytes of the node AWS Container Insights Receiver   FS Utilization percent eks_cluster_node_net_rx_bytes Instance The filesystem utilization of the node AWS Container Insights Receiver   CPU Utilization percent eks_cluster_node_pod_cpu_utilization Instance The CPU Utilization of the pod running on the node AWS Container Insights Receiver   Memory Utilization percent eks_cluster_node_pod_memory_utilization Instance The Memory Utilization of the pod running on the node AWS Container Insights Receiver   Network RX bytes/s eks_cluster_node_pod_net_rx_bytes Instance Network RX bytes of the pod running on the node AWS Container Insights Receiver   Network RX Error Count count/s eks_cluster_node_pod_net_rx_error Instance Network RX error count of the pod running on the node AWS Container Insights Receiver   Network TX bytes/s eks_cluster_node_pod_net_tx_bytes Instance Network RX bytes of the pod running on the node AWS Container Insights Receiver   Network TX Error Count count/s eks_cluster_node_pod_net_tx_error Instance Network RX error count of the pod running on the node AWS Container Insights Receiver   CPU Utilization percent eks_cluster_service_pod_cpu_utilization Endpoint The CPU Utilization of pod that belong to the service AWS Container Insights Receiver   Memory Utilization percent eks_cluster_service_pod_memory_utilization Endpoint The Memory Utilization of pod that belong to the service AWS Container Insights Receiver   Network RX bytes/s eks_cluster_service_pod_net_rx_bytes Endpoint Network RX bytes of the pod that belong to the service AWS Container Insights Receiver   Network RX Error Count count/s eks_cluster_service_pod_net_rx_error Endpoint Network TX error count of the pod that belongs to the service AWS Container Insights Receiver   Network TX bytes/s eks_cluster_service_pod_net_tx_bytes Endpoint Network TX bytes of the pod that belong to the service AWS Container Insights Receiver   Network TX Error Count count/s eks_cluster_node_pod_net_tx_error Endpoint Network TX error count of the pod that belongs to the service AWS Container Insights Receiver    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-eks/. The AWS Cloud EKS dashboard panel configurations are found in /config/ui-initialized-templates/aws_eks.\nOTEL Configuration Sample With AWS Container Insights Receiver extensions:health_check:receivers:awscontainerinsightreceiver:processors:resource/job-name:attributes:- key:job_namevalue:aws-cloud-eks-monitoringaction:insertexporters:otlp:endpoint:oap-service:11800tls:insecure:truelogging:loglevel:debugservice:pipelines:metrics:receivers:[awscontainerinsightreceiver]processors:[resource/job-name]exporters:[otlp,logging]extensions:[health_check]Refer to AWS Container Insights Receiver for more information\n","title":"AWS Cloud EKS monitoring","url":"/docs/main/v9.7.0/en/setup/backend/backend-aws-eks-monitoring/"},{"content":"AWS Cloud S3 monitoring Amazon Simple Storage Service (Amazon S3) is an object storage service. SkyWalking leverages AWS Kinesis Data Firehose receiver to transfer the CloudWatch metrics of s3 to OpenTelemetry receiver and into the Meter System.\nData flow  AWS CloudWatch collect metrics for S3, refer to S3 monitoring with CloudWatch CloudWatch metric streams stream CloudWatch metrics of S3 to AWS Kinesis Data Firehose AWS Kinesis Data Firehose delivery metrics to AWS Kinesis Data Firehose receiver through the HTTP endpoint  Set up  Create CloudWatch metrics configuration for S3, refer to S3 metrics configuration Create an Amazon Kinesis Data Firehose Delivery Stream, and set AWS Kinesis Data Firehose receiver\u0026rsquo;s address as HTTP(s) Destination, refer to Create Delivery Stream Create CloudWatch metric stream, and select the Firehose Delivery Stream which has been created above, set Select namespaces to AWS/S3, Select output format to OpenTelemetry 0.7. refer to CloudWatch Metric Streams  Read Monitoring AWS EKS and S3 with SkyWalking for more details\nS3 Monitoring SkyWalking observes CloudWatch metrics of the S3 bucket, which is cataloged as a LAYER: AWS_S3 Service in the OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     4xx Errors count aws_s3_4xx Service The number of HTTP 4xx client error status code requests made to the S3 bucket S3 monitoring with CloudWatch   5xx Errors count aws_s3_5xx Service The number of HTTP 5xx client error status code requests made to the S3 bucket S3 monitoring with CloudWatch   Downloaded bytes aws_s3_downloaded_bytes Service The number of bytes downloaded for requests made to an Amazon S3 bucket S3 monitoring with CloudWatch   Uploaded bytes aws_s3_uploaded_bytes Service The number of bytes uploaded for requests made to an Amazon S3 bucket S3 monitoring with CloudWatch   Request Average Latency bytes aws_s3_request_latency Service The average of elapsed per-request time from the first byte received to the last byte sent to an Amazon S3 bucket S3 monitoring with CloudWatch   First Byte Average Latency bytes aws_s3_request_latency Service The average of per-request time from the complete request being received by an Amazon S3 bucket to when the response starts to be returned S3 monitoring with CloudWatch   All Requests bytes aws_s3_delete_requests Service The number of HTTP All requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch   Get Requests bytes aws_s3_delete_requests Service The number of HTTP Get requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch   Put Requests bytes aws_s3_delete_requests Service The number of HTTP PUT requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch   Delete Requests bytes aws_s3_delete_requests Service The number of HTTP Delete requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-s3/. The AWS Cloud EKS dashboard panel configurations are found in /config/ui-initialized-templates/aws_s3.\n","title":"AWS Cloud S3 monitoring","url":"/docs/main/latest/en/setup/backend/backend-aws-s3-monitoring/"},{"content":"AWS Cloud S3 monitoring Amazon Simple Storage Service (Amazon S3) is an object storage service. SkyWalking leverages AWS Kinesis Data Firehose receiver to transfer the CloudWatch metrics of s3 to OpenTelemetry receiver and into the Meter System.\nData flow  AWS CloudWatch collect metrics for S3, refer to S3 monitoring with CloudWatch CloudWatch metric streams stream CloudWatch metrics of S3 to AWS Kinesis Data Firehose AWS Kinesis Data Firehose delivery metrics to AWS Kinesis Data Firehose receiver through the HTTP endpoint  Set up  Create CloudWatch metrics configuration for S3, refer to S3 metrics configuration Create an Amazon Kinesis Data Firehose Delivery Stream, and set AWS Kinesis Data Firehose receiver\u0026rsquo;s address as HTTP(s) Destination, refer to Create Delivery Stream Create CloudWatch metric stream, and select the Firehose Delivery Stream which has been created above, set Select namespaces to AWS/S3, Select output format to OpenTelemetry 0.7. refer to CloudWatch Metric Streams  Read Monitoring AWS EKS and S3 with SkyWalking for more details\nS3 Monitoring SkyWalking observes CloudWatch metrics of the S3 bucket, which is cataloged as a LAYER: AWS_S3 Service in the OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     4xx Errors count aws_s3_4xx Service The number of HTTP 4xx client error status code requests made to the S3 bucket S3 monitoring with CloudWatch   5xx Errors count aws_s3_5xx Service The number of HTTP 5xx client error status code requests made to the S3 bucket S3 monitoring with CloudWatch   Downloaded bytes aws_s3_downloaded_bytes Service The number of bytes downloaded for requests made to an Amazon S3 bucket S3 monitoring with CloudWatch   Uploaded bytes aws_s3_uploaded_bytes Service The number of bytes uploaded for requests made to an Amazon S3 bucket S3 monitoring with CloudWatch   Request Average Latency bytes aws_s3_request_latency Service The average of elapsed per-request time from the first byte received to the last byte sent to an Amazon S3 bucket S3 monitoring with CloudWatch   First Byte Average Latency bytes aws_s3_request_latency Service The average of per-request time from the complete request being received by an Amazon S3 bucket to when the response starts to be returned S3 monitoring with CloudWatch   All Requests bytes aws_s3_delete_requests Service The number of HTTP All requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch   Get Requests bytes aws_s3_delete_requests Service The number of HTTP Get requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch   Put Requests bytes aws_s3_delete_requests Service The number of HTTP PUT requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch   Delete Requests bytes aws_s3_delete_requests Service The number of HTTP Delete requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-s3/. The AWS Cloud EKS dashboard panel configurations are found in /config/ui-initialized-templates/aws_s3.\n","title":"AWS Cloud S3 monitoring","url":"/docs/main/next/en/setup/backend/backend-aws-s3-monitoring/"},{"content":"AWS Cloud S3 monitoring Amazon Simple Storage Service (Amazon S3) is an object storage service. SkyWalking leverages AWS Kinesis Data Firehose receiver to transfer the CloudWatch metrics of s3 to OpenTelemetry receiver and into the Meter System.\nData flow  AWS CloudWatch collect metrics for S3, refer to S3 monitoring with CloudWatch CloudWatch metric streams stream CloudWatch metrics of S3 to AWS Kinesis Data Firehose AWS Kinesis Data Firehose delivery metrics to AWS Kinesis Data Firehose receiver through the HTTP endpoint  Set up  Create CloudWatch metrics configuration for S3, refer to S3 metrics configuration Create an Amazon Kinesis Data Firehose Delivery Stream, and set AWS Kinesis Data Firehose receiver\u0026rsquo;s address as HTTP(s) Destination, refer to Create Delivery Stream Create CloudWatch metric stream, and select the Firehose Delivery Stream which has been created above, set Select namespaces to AWS/S3, Select output format to OpenTelemetry 0.7. refer to CloudWatch Metric Streams  S3 Monitoring SkyWalking observes CloudWatch metrics of the S3 bucket, which is cataloged as a LAYER: AWS_S3 Service in the OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     4xx Errors count aws_s3_4xx Service The number of HTTP 4xx client error status code requests made to the S3 bucket S3 monitoring with CloudWatch   5xx Errors count aws_s3_5xx Service The number of HTTP 5xx client error status code requests made to the S3 bucket S3 monitoring with CloudWatch   Downloaded bytes aws_s3_downloaded_bytes Service The number of bytes downloaded for requests made to an Amazon S3 bucket S3 monitoring with CloudWatch   Uploaded bytes aws_s3_uploaded_bytes Service The number of bytes uploaded for requests made to an Amazon S3 bucket S3 monitoring with CloudWatch   Request Average Latency bytes aws_s3_request_latency Service The average of elapsed per-request time from the first byte received to the last byte sent to an Amazon S3 bucket S3 monitoring with CloudWatch   First Byte Average Latency bytes aws_s3_request_latency Service The average of per-request time from the complete request being received by an Amazon S3 bucket to when the response starts to be returned S3 monitoring with CloudWatch   All Requests bytes aws_s3_delete_requests Service The number of HTTP All requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch   Get Requests bytes aws_s3_delete_requests Service The number of HTTP Get requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch   Put Requests bytes aws_s3_delete_requests Service The number of HTTP PUT requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch   Delete Requests bytes aws_s3_delete_requests Service The number of HTTP Delete requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-s3/. The AWS Cloud EKS dashboard panel configurations are found in /config/ui-initialized-templates/aws_s3.\n","title":"AWS Cloud S3 monitoring","url":"/docs/main/v9.4.0/en/setup/backend/backend-aws-s3-monitoring/"},{"content":"AWS Cloud S3 monitoring Amazon Simple Storage Service (Amazon S3) is an object storage service. SkyWalking leverages AWS Kinesis Data Firehose receiver to transfer the CloudWatch metrics of s3 to OpenTelemetry receiver and into the Meter System.\nData flow  AWS CloudWatch collect metrics for S3, refer to S3 monitoring with CloudWatch CloudWatch metric streams stream CloudWatch metrics of S3 to AWS Kinesis Data Firehose AWS Kinesis Data Firehose delivery metrics to AWS Kinesis Data Firehose receiver through the HTTP endpoint  Set up  Create CloudWatch metrics configuration for S3, refer to S3 metrics configuration Create an Amazon Kinesis Data Firehose Delivery Stream, and set AWS Kinesis Data Firehose receiver\u0026rsquo;s address as HTTP(s) Destination, refer to Create Delivery Stream Create CloudWatch metric stream, and select the Firehose Delivery Stream which has been created above, set Select namespaces to AWS/S3, Select output format to OpenTelemetry 0.7. refer to CloudWatch Metric Streams  Read Monitoring AWS EKS and S3 with SkyWalking for more details\nS3 Monitoring SkyWalking observes CloudWatch metrics of the S3 bucket, which is cataloged as a LAYER: AWS_S3 Service in the OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     4xx Errors count aws_s3_4xx Service The number of HTTP 4xx client error status code requests made to the S3 bucket S3 monitoring with CloudWatch   5xx Errors count aws_s3_5xx Service The number of HTTP 5xx client error status code requests made to the S3 bucket S3 monitoring with CloudWatch   Downloaded bytes aws_s3_downloaded_bytes Service The number of bytes downloaded for requests made to an Amazon S3 bucket S3 monitoring with CloudWatch   Uploaded bytes aws_s3_uploaded_bytes Service The number of bytes uploaded for requests made to an Amazon S3 bucket S3 monitoring with CloudWatch   Request Average Latency bytes aws_s3_request_latency Service The average of elapsed per-request time from the first byte received to the last byte sent to an Amazon S3 bucket S3 monitoring with CloudWatch   First Byte Average Latency bytes aws_s3_request_latency Service The average of per-request time from the complete request being received by an Amazon S3 bucket to when the response starts to be returned S3 monitoring with CloudWatch   All Requests bytes aws_s3_delete_requests Service The number of HTTP All requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch   Get Requests bytes aws_s3_delete_requests Service The number of HTTP Get requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch   Put Requests bytes aws_s3_delete_requests Service The number of HTTP PUT requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch   Delete Requests bytes aws_s3_delete_requests Service The number of HTTP Delete requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-s3/. The AWS Cloud EKS dashboard panel configurations are found in /config/ui-initialized-templates/aws_s3.\n","title":"AWS Cloud S3 monitoring","url":"/docs/main/v9.5.0/en/setup/backend/backend-aws-s3-monitoring/"},{"content":"AWS Cloud S3 monitoring Amazon Simple Storage Service (Amazon S3) is an object storage service. SkyWalking leverages AWS Kinesis Data Firehose receiver to transfer the CloudWatch metrics of s3 to OpenTelemetry receiver and into the Meter System.\nData flow  AWS CloudWatch collect metrics for S3, refer to S3 monitoring with CloudWatch CloudWatch metric streams stream CloudWatch metrics of S3 to AWS Kinesis Data Firehose AWS Kinesis Data Firehose delivery metrics to AWS Kinesis Data Firehose receiver through the HTTP endpoint  Set up  Create CloudWatch metrics configuration for S3, refer to S3 metrics configuration Create an Amazon Kinesis Data Firehose Delivery Stream, and set AWS Kinesis Data Firehose receiver\u0026rsquo;s address as HTTP(s) Destination, refer to Create Delivery Stream Create CloudWatch metric stream, and select the Firehose Delivery Stream which has been created above, set Select namespaces to AWS/S3, Select output format to OpenTelemetry 0.7. refer to CloudWatch Metric Streams  Read Monitoring AWS EKS and S3 with SkyWalking for more details\nS3 Monitoring SkyWalking observes CloudWatch metrics of the S3 bucket, which is cataloged as a LAYER: AWS_S3 Service in the OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     4xx Errors count aws_s3_4xx Service The number of HTTP 4xx client error status code requests made to the S3 bucket S3 monitoring with CloudWatch   5xx Errors count aws_s3_5xx Service The number of HTTP 5xx client error status code requests made to the S3 bucket S3 monitoring with CloudWatch   Downloaded bytes aws_s3_downloaded_bytes Service The number of bytes downloaded for requests made to an Amazon S3 bucket S3 monitoring with CloudWatch   Uploaded bytes aws_s3_uploaded_bytes Service The number of bytes uploaded for requests made to an Amazon S3 bucket S3 monitoring with CloudWatch   Request Average Latency bytes aws_s3_request_latency Service The average of elapsed per-request time from the first byte received to the last byte sent to an Amazon S3 bucket S3 monitoring with CloudWatch   First Byte Average Latency bytes aws_s3_request_latency Service The average of per-request time from the complete request being received by an Amazon S3 bucket to when the response starts to be returned S3 monitoring with CloudWatch   All Requests bytes aws_s3_delete_requests Service The number of HTTP All requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch   Get Requests bytes aws_s3_delete_requests Service The number of HTTP Get requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch   Put Requests bytes aws_s3_delete_requests Service The number of HTTP PUT requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch   Delete Requests bytes aws_s3_delete_requests Service The number of HTTP Delete requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-s3/. The AWS Cloud EKS dashboard panel configurations are found in /config/ui-initialized-templates/aws_s3.\n","title":"AWS Cloud S3 monitoring","url":"/docs/main/v9.6.0/en/setup/backend/backend-aws-s3-monitoring/"},{"content":"AWS Cloud S3 monitoring Amazon Simple Storage Service (Amazon S3) is an object storage service. SkyWalking leverages AWS Kinesis Data Firehose receiver to transfer the CloudWatch metrics of s3 to OpenTelemetry receiver and into the Meter System.\nData flow  AWS CloudWatch collect metrics for S3, refer to S3 monitoring with CloudWatch CloudWatch metric streams stream CloudWatch metrics of S3 to AWS Kinesis Data Firehose AWS Kinesis Data Firehose delivery metrics to AWS Kinesis Data Firehose receiver through the HTTP endpoint  Set up  Create CloudWatch metrics configuration for S3, refer to S3 metrics configuration Create an Amazon Kinesis Data Firehose Delivery Stream, and set AWS Kinesis Data Firehose receiver\u0026rsquo;s address as HTTP(s) Destination, refer to Create Delivery Stream Create CloudWatch metric stream, and select the Firehose Delivery Stream which has been created above, set Select namespaces to AWS/S3, Select output format to OpenTelemetry 0.7. refer to CloudWatch Metric Streams  Read Monitoring AWS EKS and S3 with SkyWalking for more details\nS3 Monitoring SkyWalking observes CloudWatch metrics of the S3 bucket, which is cataloged as a LAYER: AWS_S3 Service in the OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     4xx Errors count aws_s3_4xx Service The number of HTTP 4xx client error status code requests made to the S3 bucket S3 monitoring with CloudWatch   5xx Errors count aws_s3_5xx Service The number of HTTP 5xx client error status code requests made to the S3 bucket S3 monitoring with CloudWatch   Downloaded bytes aws_s3_downloaded_bytes Service The number of bytes downloaded for requests made to an Amazon S3 bucket S3 monitoring with CloudWatch   Uploaded bytes aws_s3_uploaded_bytes Service The number of bytes uploaded for requests made to an Amazon S3 bucket S3 monitoring with CloudWatch   Request Average Latency bytes aws_s3_request_latency Service The average of elapsed per-request time from the first byte received to the last byte sent to an Amazon S3 bucket S3 monitoring with CloudWatch   First Byte Average Latency bytes aws_s3_request_latency Service The average of per-request time from the complete request being received by an Amazon S3 bucket to when the response starts to be returned S3 monitoring with CloudWatch   All Requests bytes aws_s3_delete_requests Service The number of HTTP All requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch   Get Requests bytes aws_s3_delete_requests Service The number of HTTP Get requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch   Put Requests bytes aws_s3_delete_requests Service The number of HTTP PUT requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch   Delete Requests bytes aws_s3_delete_requests Service The number of HTTP Delete requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-s3/. The AWS Cloud EKS dashboard panel configurations are found in /config/ui-initialized-templates/aws_s3.\n","title":"AWS Cloud S3 monitoring","url":"/docs/main/v9.7.0/en/setup/backend/backend-aws-s3-monitoring/"},{"content":"AWS DynamoDb monitoring SkyWalking leverages Amazon Kinesis Data Filehose with Amazon CloudWatch to transfer the metrics into the Meter System.\nData flow  Amazon CloudWatch fetches metrics from DynamoDB and pushes metrics to SkyWalking OAP Server via Amazon Kinesis data firehose. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Create CloudWatch metrics configuration for DynamoDB, refer to DynamoDB metrics configuration Create an Amazon Kinesis Data Firehose Delivery Stream, and set AWS Kinesis Data Firehose receiver\u0026rsquo;s address as HTTP(s) Destination, refer to Create Delivery Stream3. Create a metric stream, set namespace to DynanoDB, and set Kinesis Data Firehose to the firehose you just created. Config aws-firehose-receiver to receive data. Create CloudWatch metric stream, and select the Firehose Delivery Stream which has been created above, set Select namespaces to AWS/DynamoDB, Select output format to OpenTelemetry 0.7. refer to CloudWatch Metric Streams  Read Monitoring DynamoDB with SkyWalking for more details\nDynamoDB Monitoring DynamoDB monitoring provides monitoring of the status and resources of the DynamoDB server. AWS user id is cataloged as a Layer: AWS_DYNAMODB Service in OAP. Each DynamoDB table is cataloged as an Endpoint in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Read Usage unit/s consumed_read_capacity_units provisioned_read_capacity_units The situation of read capacity units consumed and provisioned over the specified time period Amazon CloudWatch   Write Usage unit/s consumed_write_capacity_units provisioned_write_capacity_units The situation of write capacity units consumed and provisioned over the specified time period Amazon CloudWatch   Successful Request Latency ms get_successful_request_latency put_successful_request_latency query_successful_request_latency scan_successful_request_latency The latency of successful request Amazon CloudWatch   TTL Deleted Item count  time_to_live_deleted_item_count The count of items deleted by TTL Amazon CloudWatch   Throttle Events  read_throttle_events write_throttle_events Requests to DynamoDB that exceed the provisioned read/write capacity units for a table or a global secondary index. Amazon CloudWatch   Throttled Requests  read_throttled_requests write_throttled_requests Requests to DynamoDB that exceed the provisioned throughput limits on a resource (such as a table or an index). Amazon CloudWatch   Scan/Query Operation Returned Item Ccount  scan_returned_item_count query_returned_item_count\n The number of items returned by Query, Scan or ExecuteStatement (select) operations during the specified time period. Amazon CloudWatch   System Errors  read_system_errors\nwrite_system_errors The requests to DynamoDB or Amazon DynamoDB Streams that generate an HTTP 500 status code during the specified time period. Amazon CloudWatch   User Errors  user_errors Requests to DynamoDB or Amazon DynamoDB Streams that generate an HTTP 400 status code during the specified time period. Amazon CloudWatch   Condition Checked Fail Requests  conditional_check_failed_requests The number of failed attempts to perform conditional writes. Amazon CloudWatch   Transaction Conflict  transaction_conflict Rejected item-level requests due to transactional conflicts between concurrent requests on the same items. Amazon CloudWatch    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-dynamodb. The DynamoDB dashboard panel configurations are found in /config/ui-initialized-templates/aws_dynamodb.\n","title":"AWS DynamoDb monitoring","url":"/docs/main/latest/en/setup/backend/backend-aws-dynamodb-monitoring/"},{"content":"AWS DynamoDb monitoring SkyWalking leverages Amazon Kinesis Data Filehose with Amazon CloudWatch to transfer the metrics into the Meter System.\nData flow  Amazon CloudWatch fetches metrics from DynamoDB and pushes metrics to SkyWalking OAP Server via Amazon Kinesis data firehose. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Create CloudWatch metrics configuration for DynamoDB, refer to DynamoDB metrics configuration Create an Amazon Kinesis Data Firehose Delivery Stream, and set AWS Kinesis Data Firehose receiver\u0026rsquo;s address as HTTP(s) Destination, refer to Create Delivery Stream3. Create a metric stream, set namespace to DynanoDB, and set Kinesis Data Firehose to the firehose you just created. Config aws-firehose-receiver to receive data. Create CloudWatch metric stream, and select the Firehose Delivery Stream which has been created above, set Select namespaces to AWS/DynamoDB, Select output format to OpenTelemetry 0.7. refer to CloudWatch Metric Streams  Read Monitoring DynamoDB with SkyWalking for more details\nDynamoDB Monitoring DynamoDB monitoring provides monitoring of the status and resources of the DynamoDB server. AWS user id is cataloged as a Layer: AWS_DYNAMODB Service in OAP. Each DynamoDB table is cataloged as an Endpoint in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Read Usage unit/s consumed_read_capacity_units provisioned_read_capacity_units The situation of read capacity units consumed and provisioned over the specified time period Amazon CloudWatch   Write Usage unit/s consumed_write_capacity_units provisioned_write_capacity_units The situation of write capacity units consumed and provisioned over the specified time period Amazon CloudWatch   Successful Request Latency ms get_successful_request_latency put_successful_request_latency query_successful_request_latency scan_successful_request_latency The latency of successful request Amazon CloudWatch   TTL Deleted Item count  time_to_live_deleted_item_count The count of items deleted by TTL Amazon CloudWatch   Throttle Events  read_throttle_events write_throttle_events Requests to DynamoDB that exceed the provisioned read/write capacity units for a table or a global secondary index. Amazon CloudWatch   Throttled Requests  read_throttled_requests write_throttled_requests Requests to DynamoDB that exceed the provisioned throughput limits on a resource (such as a table or an index). Amazon CloudWatch   Scan/Query Operation Returned Item Ccount  scan_returned_item_count query_returned_item_count\n The number of items returned by Query, Scan or ExecuteStatement (select) operations during the specified time period. Amazon CloudWatch   System Errors  read_system_errors\nwrite_system_errors The requests to DynamoDB or Amazon DynamoDB Streams that generate an HTTP 500 status code during the specified time period. Amazon CloudWatch   User Errors  user_errors Requests to DynamoDB or Amazon DynamoDB Streams that generate an HTTP 400 status code during the specified time period. Amazon CloudWatch   Condition Checked Fail Requests  conditional_check_failed_requests The number of failed attempts to perform conditional writes. Amazon CloudWatch   Transaction Conflict  transaction_conflict Rejected item-level requests due to transactional conflicts between concurrent requests on the same items. Amazon CloudWatch    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-dynamodb. The DynamoDB dashboard panel configurations are found in /config/ui-initialized-templates/aws_dynamodb.\n","title":"AWS DynamoDb monitoring","url":"/docs/main/next/en/setup/backend/backend-aws-dynamodb-monitoring/"},{"content":"AWS DynamoDb monitoring SkyWalking leverages Amazon Kinesis Data Filehose with Amazon CloudWatch to transfer the metrics into the Meter System.\nData flow  Amazon CloudWatch fetches metrics from DynamoDB and pushes metrics to SkyWalking OAP Server via Amazon Kinesis data firehose. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Create CloudWatch metrics configuration for DynamoDB, refer to DynamoDB metrics configuration Create an Amazon Kinesis Data Firehose Delivery Stream, and set AWS Kinesis Data Firehose receiver\u0026rsquo;s address as HTTP(s) Destination, refer to Create Delivery Stream3. Create a metric stream, set namespace to DynanoDB, and set Kinesis Data Firehose to the firehose you just created. Config aws-firehose-receiver to receive data. Create CloudWatch metric stream, and select the Firehose Delivery Stream which has been created above, set Select namespaces to AWS/DynamoDB, Select output format to OpenTelemetry 0.7. refer to CloudWatch Metric Streams  DynamoDB Monitoring DynamoDB monitoring provides monitoring of the status and resources of the DynamoDB server. AWS user id is cataloged as a Layer: AWS_DYNAMODB Service in OAP. Each DynamoDB table is cataloged as an Endpoint in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Read Usage unit/s consumed_read_capacity_units provisioned_read_capacity_units The situation of read capacity units consumed and provisioned over the specified time period Amazon CloudWatch   Write Usage unit/s consumed_write_capacity_units provisioned_write_capacity_units The situation of write capacity units consumed and provisioned over the specified time period Amazon CloudWatch   Successful Request Latency ms get_successful_request_latency put_successful_request_latency query_successful_request_latency scan_successful_request_latency The latency of successful request Amazon CloudWatch   TTL Deleted Item count  time_to_live_deleted_item_count The count of items deleted by TTL Amazon CloudWatch   Throttle Events  read_throttle_events write_throttle_events Requests to DynamoDB that exceed the provisioned read/write capacity units for a table or a global secondary index. Amazon CloudWatch   Throttled Requests  read_throttled_requests write_throttled_requests Requests to DynamoDB that exceed the provisioned throughput limits on a resource (such as a table or an index). Amazon CloudWatch   Scan/Query Operation Returned Item Ccount  scan_returned_item_count query_returned_item_count\n The number of items returned by Query, Scan or ExecuteStatement (select) operations during the specified time period. Amazon CloudWatch   System Errors  read_system_errors\nwrite_system_errors The requests to DynamoDB or Amazon DynamoDB Streams that generate an HTTP 500 status code during the specified time period. Amazon CloudWatch   User Errors  user_errors Requests to DynamoDB or Amazon DynamoDB Streams that generate an HTTP 400 status code during the specified time period. Amazon CloudWatch   Condition Checked Fail Requests  conditional_check_failed_requests The number of failed attempts to perform conditional writes. Amazon CloudWatch   Transaction Conflict  transaction_conflict Rejected item-level requests due to transactional conflicts between concurrent requests on the same items. Amazon CloudWatch    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-dynamodb. The DynamoDB dashboard panel configurations are found in /config/ui-initialized-templates/aws_dynamodb.\n","title":"AWS DynamoDb monitoring","url":"/docs/main/v9.4.0/en/setup/backend/backend-aws-dynamodb-monitoring/"},{"content":"AWS DynamoDb monitoring SkyWalking leverages Amazon Kinesis Data Filehose with Amazon CloudWatch to transfer the metrics into the Meter System.\nData flow  Amazon CloudWatch fetches metrics from DynamoDB and pushes metrics to SkyWalking OAP Server via Amazon Kinesis data firehose. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Create CloudWatch metrics configuration for DynamoDB, refer to DynamoDB metrics configuration Create an Amazon Kinesis Data Firehose Delivery Stream, and set AWS Kinesis Data Firehose receiver\u0026rsquo;s address as HTTP(s) Destination, refer to Create Delivery Stream3. Create a metric stream, set namespace to DynanoDB, and set Kinesis Data Firehose to the firehose you just created. Config aws-firehose-receiver to receive data. Create CloudWatch metric stream, and select the Firehose Delivery Stream which has been created above, set Select namespaces to AWS/DynamoDB, Select output format to OpenTelemetry 0.7. refer to CloudWatch Metric Streams  Read Monitoring DynamoDB with SkyWalking for more details\nDynamoDB Monitoring DynamoDB monitoring provides monitoring of the status and resources of the DynamoDB server. AWS user id is cataloged as a Layer: AWS_DYNAMODB Service in OAP. Each DynamoDB table is cataloged as an Endpoint in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Read Usage unit/s consumed_read_capacity_units provisioned_read_capacity_units The situation of read capacity units consumed and provisioned over the specified time period Amazon CloudWatch   Write Usage unit/s consumed_write_capacity_units provisioned_write_capacity_units The situation of write capacity units consumed and provisioned over the specified time period Amazon CloudWatch   Successful Request Latency ms get_successful_request_latency put_successful_request_latency query_successful_request_latency scan_successful_request_latency The latency of successful request Amazon CloudWatch   TTL Deleted Item count  time_to_live_deleted_item_count The count of items deleted by TTL Amazon CloudWatch   Throttle Events  read_throttle_events write_throttle_events Requests to DynamoDB that exceed the provisioned read/write capacity units for a table or a global secondary index. Amazon CloudWatch   Throttled Requests  read_throttled_requests write_throttled_requests Requests to DynamoDB that exceed the provisioned throughput limits on a resource (such as a table or an index). Amazon CloudWatch   Scan/Query Operation Returned Item Ccount  scan_returned_item_count query_returned_item_count\n The number of items returned by Query, Scan or ExecuteStatement (select) operations during the specified time period. Amazon CloudWatch   System Errors  read_system_errors\nwrite_system_errors The requests to DynamoDB or Amazon DynamoDB Streams that generate an HTTP 500 status code during the specified time period. Amazon CloudWatch   User Errors  user_errors Requests to DynamoDB or Amazon DynamoDB Streams that generate an HTTP 400 status code during the specified time period. Amazon CloudWatch   Condition Checked Fail Requests  conditional_check_failed_requests The number of failed attempts to perform conditional writes. Amazon CloudWatch   Transaction Conflict  transaction_conflict Rejected item-level requests due to transactional conflicts between concurrent requests on the same items. Amazon CloudWatch    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-dynamodb. The DynamoDB dashboard panel configurations are found in /config/ui-initialized-templates/aws_dynamodb.\n","title":"AWS DynamoDb monitoring","url":"/docs/main/v9.5.0/en/setup/backend/backend-aws-dynamodb-monitoring/"},{"content":"AWS DynamoDb monitoring SkyWalking leverages Amazon Kinesis Data Filehose with Amazon CloudWatch to transfer the metrics into the Meter System.\nData flow  Amazon CloudWatch fetches metrics from DynamoDB and pushes metrics to SkyWalking OAP Server via Amazon Kinesis data firehose. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Create CloudWatch metrics configuration for DynamoDB, refer to DynamoDB metrics configuration Create an Amazon Kinesis Data Firehose Delivery Stream, and set AWS Kinesis Data Firehose receiver\u0026rsquo;s address as HTTP(s) Destination, refer to Create Delivery Stream3. Create a metric stream, set namespace to DynanoDB, and set Kinesis Data Firehose to the firehose you just created. Config aws-firehose-receiver to receive data. Create CloudWatch metric stream, and select the Firehose Delivery Stream which has been created above, set Select namespaces to AWS/DynamoDB, Select output format to OpenTelemetry 0.7. refer to CloudWatch Metric Streams  Read Monitoring DynamoDB with SkyWalking for more details\nDynamoDB Monitoring DynamoDB monitoring provides monitoring of the status and resources of the DynamoDB server. AWS user id is cataloged as a Layer: AWS_DYNAMODB Service in OAP. Each DynamoDB table is cataloged as an Endpoint in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Read Usage unit/s consumed_read_capacity_units provisioned_read_capacity_units The situation of read capacity units consumed and provisioned over the specified time period Amazon CloudWatch   Write Usage unit/s consumed_write_capacity_units provisioned_write_capacity_units The situation of write capacity units consumed and provisioned over the specified time period Amazon CloudWatch   Successful Request Latency ms get_successful_request_latency put_successful_request_latency query_successful_request_latency scan_successful_request_latency The latency of successful request Amazon CloudWatch   TTL Deleted Item count  time_to_live_deleted_item_count The count of items deleted by TTL Amazon CloudWatch   Throttle Events  read_throttle_events write_throttle_events Requests to DynamoDB that exceed the provisioned read/write capacity units for a table or a global secondary index. Amazon CloudWatch   Throttled Requests  read_throttled_requests write_throttled_requests Requests to DynamoDB that exceed the provisioned throughput limits on a resource (such as a table or an index). Amazon CloudWatch   Scan/Query Operation Returned Item Ccount  scan_returned_item_count query_returned_item_count\n The number of items returned by Query, Scan or ExecuteStatement (select) operations during the specified time period. Amazon CloudWatch   System Errors  read_system_errors\nwrite_system_errors The requests to DynamoDB or Amazon DynamoDB Streams that generate an HTTP 500 status code during the specified time period. Amazon CloudWatch   User Errors  user_errors Requests to DynamoDB or Amazon DynamoDB Streams that generate an HTTP 400 status code during the specified time period. Amazon CloudWatch   Condition Checked Fail Requests  conditional_check_failed_requests The number of failed attempts to perform conditional writes. Amazon CloudWatch   Transaction Conflict  transaction_conflict Rejected item-level requests due to transactional conflicts between concurrent requests on the same items. Amazon CloudWatch    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-dynamodb. The DynamoDB dashboard panel configurations are found in /config/ui-initialized-templates/aws_dynamodb.\n","title":"AWS DynamoDb monitoring","url":"/docs/main/v9.6.0/en/setup/backend/backend-aws-dynamodb-monitoring/"},{"content":"AWS DynamoDb monitoring SkyWalking leverages Amazon Kinesis Data Filehose with Amazon CloudWatch to transfer the metrics into the Meter System.\nData flow  Amazon CloudWatch fetches metrics from DynamoDB and pushes metrics to SkyWalking OAP Server via Amazon Kinesis data firehose. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Create CloudWatch metrics configuration for DynamoDB, refer to DynamoDB metrics configuration Create an Amazon Kinesis Data Firehose Delivery Stream, and set AWS Kinesis Data Firehose receiver\u0026rsquo;s address as HTTP(s) Destination, refer to Create Delivery Stream3. Create a metric stream, set namespace to DynanoDB, and set Kinesis Data Firehose to the firehose you just created. Config aws-firehose-receiver to receive data. Create CloudWatch metric stream, and select the Firehose Delivery Stream which has been created above, set Select namespaces to AWS/DynamoDB, Select output format to OpenTelemetry 0.7. refer to CloudWatch Metric Streams  Read Monitoring DynamoDB with SkyWalking for more details\nDynamoDB Monitoring DynamoDB monitoring provides monitoring of the status and resources of the DynamoDB server. AWS user id is cataloged as a Layer: AWS_DYNAMODB Service in OAP. Each DynamoDB table is cataloged as an Endpoint in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Read Usage unit/s consumed_read_capacity_units provisioned_read_capacity_units The situation of read capacity units consumed and provisioned over the specified time period Amazon CloudWatch   Write Usage unit/s consumed_write_capacity_units provisioned_write_capacity_units The situation of write capacity units consumed and provisioned over the specified time period Amazon CloudWatch   Successful Request Latency ms get_successful_request_latency put_successful_request_latency query_successful_request_latency scan_successful_request_latency The latency of successful request Amazon CloudWatch   TTL Deleted Item count  time_to_live_deleted_item_count The count of items deleted by TTL Amazon CloudWatch   Throttle Events  read_throttle_events write_throttle_events Requests to DynamoDB that exceed the provisioned read/write capacity units for a table or a global secondary index. Amazon CloudWatch   Throttled Requests  read_throttled_requests write_throttled_requests Requests to DynamoDB that exceed the provisioned throughput limits on a resource (such as a table or an index). Amazon CloudWatch   Scan/Query Operation Returned Item Ccount  scan_returned_item_count query_returned_item_count\n The number of items returned by Query, Scan or ExecuteStatement (select) operations during the specified time period. Amazon CloudWatch   System Errors  read_system_errors\nwrite_system_errors The requests to DynamoDB or Amazon DynamoDB Streams that generate an HTTP 500 status code during the specified time period. Amazon CloudWatch   User Errors  user_errors Requests to DynamoDB or Amazon DynamoDB Streams that generate an HTTP 400 status code during the specified time period. Amazon CloudWatch   Condition Checked Fail Requests  conditional_check_failed_requests The number of failed attempts to perform conditional writes. Amazon CloudWatch   Transaction Conflict  transaction_conflict Rejected item-level requests due to transactional conflicts between concurrent requests on the same items. Amazon CloudWatch    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-dynamodb. The DynamoDB dashboard panel configurations are found in /config/ui-initialized-templates/aws_dynamodb.\n","title":"AWS DynamoDb monitoring","url":"/docs/main/v9.7.0/en/setup/backend/backend-aws-dynamodb-monitoring/"},{"content":"AWS Firehose receiver AWS Firehose receiver listens on 0.0.0.0:12801 by default, and provides an HTTP Endpoint /aws/firehose/metrics that follows Amazon Kinesis Data Firehose Delivery Stream HTTP Endpoint Delivery Specifications You could leverage the receiver to collect AWS CloudWatch metrics, and analysis it through MAL as the receiver bases on OpenTelemetry receiver\nSetup(S3 example)  Create CloudWatch metrics configuration for S3 (refer to S3 CloudWatch metrics) Stream CloudWatch metrics to AWS Kinesis Data Firehose delivery stream by CloudWatch metrics stream Specify AWS Kinesis Data Firehose delivery stream HTTP Endpoint (refer to Choose HTTP Endpoint for Your Destination)  Usually, the AWS CloudWatch metrics process flow with OAP is as follows:\nCloudWatch metrics with S3 --\u0026gt; CloudWatch Metric Stream (OpenTelemetry formart) --\u0026gt; Kinesis Data Firehose Delivery Stream --\u0026gt; AWS Firehose receiver(OAP) --\u0026gt; OpenTelemetry receiver(OAP) The following blogs demonstrate complete setup process for AWS S3 and API Gateway:\n Monitoring DynamoDB with SkyWalking Monitoring AWS EKS and S3 with SkyWalking  Supported metrics    Description Configuration File Data Source     Metrics of AWS Cloud S3 otel-rules/aws-s3/s3-service.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS DynamoDB otel-rules/aws-dynamodb/dynamodb-service.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS DynamoDB otel-rules/aws-dynamodb/dynamodb-endpoint.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS API Gateway otel-rules/aws-gateway/gateway-service.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS API Gateway otel-rules/aws-gateway/gateway-endpoint.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver    Notice  Only OpenTelemetry format is supported (refer to Metric streams output formats) According to HTTPS requirement by AWS Firehose(refer to Amazon Kinesis Data Firehose Delivery Stream HTTP Endpoint Delivery Specifications, users have two options   A proxy(e.g. Nginx, Envoy) is required in front of OAP\u0026rsquo;s Firehose receiver to accept HTTPS requests from AWS Firehose through port 443. (Recommended based on the general security policy) Set aws-firehose/enableTLS=true with suitable cert/key files through aws-firehose/tlsKeyPath and aws-firehose/tlsCertChainPath at OAP side to accept requests from firehose directly.  AWS Firehose receiver support setting accessKey for Kinesis Data Firehose, please refer to configuration vocabulary  ","title":"AWS Firehose receiver","url":"/docs/main/latest/en/setup/backend/aws-firehose-receiver/"},{"content":"AWS Firehose receiver AWS Firehose receiver listens on 0.0.0.0:12801 by default, and provides an HTTP Endpoint /aws/firehose/metrics that follows Amazon Kinesis Data Firehose Delivery Stream HTTP Endpoint Delivery Specifications You could leverage the receiver to collect AWS CloudWatch metrics, and analysis it through MAL as the receiver bases on OpenTelemetry receiver\nSetup(S3 example)  Create CloudWatch metrics configuration for S3 (refer to S3 CloudWatch metrics) Stream CloudWatch metrics to AWS Kinesis Data Firehose delivery stream by CloudWatch metrics stream Specify AWS Kinesis Data Firehose delivery stream HTTP Endpoint (refer to Choose HTTP Endpoint for Your Destination)  Usually, the AWS CloudWatch metrics process flow with OAP is as follows:\nCloudWatch metrics with S3 --\u0026gt; CloudWatch Metric Stream (OpenTelemetry formart) --\u0026gt; Kinesis Data Firehose Delivery Stream --\u0026gt; AWS Firehose receiver(OAP) --\u0026gt; OpenTelemetry receiver(OAP) The following blogs demonstrate complete setup process for AWS S3 and API Gateway:\n Monitoring DynamoDB with SkyWalking Monitoring AWS EKS and S3 with SkyWalking  Supported metrics    Description Configuration File Data Source     Metrics of AWS Cloud S3 otel-rules/aws-s3/s3-service.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS DynamoDB otel-rules/aws-dynamodb/dynamodb-service.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS DynamoDB otel-rules/aws-dynamodb/dynamodb-endpoint.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS API Gateway otel-rules/aws-gateway/gateway-service.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS API Gateway otel-rules/aws-gateway/gateway-endpoint.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver    Notice  Only OpenTelemetry format is supported (refer to Metric streams output formats) According to HTTPS requirement by AWS Firehose(refer to Amazon Kinesis Data Firehose Delivery Stream HTTP Endpoint Delivery Specifications, users have two options   A proxy(e.g. Nginx, Envoy) is required in front of OAP\u0026rsquo;s Firehose receiver to accept HTTPS requests from AWS Firehose through port 443. (Recommended based on the general security policy) Set aws-firehose/enableTLS=true with suitable cert/key files through aws-firehose/tlsKeyPath and aws-firehose/tlsCertChainPath at OAP side to accept requests from firehose directly.  AWS Firehose receiver support setting accessKey for Kinesis Data Firehose, please refer to configuration vocabulary  ","title":"AWS Firehose receiver","url":"/docs/main/next/en/setup/backend/aws-firehose-receiver/"},{"content":"AWS Firehose receiver AWS Firehose receiver listens on 0.0.0.0:12801 by default, and provides an HTTP Endpoint /aws/firehose/metrics that follows Amazon Kinesis Data Firehose Delivery Stream HTTP Endpoint Delivery Specifications You could leverage the receiver to collect AWS CloudWatch metrics, and analysis it through MAL as the receiver bases on OpenTelemetry receiver\nSetup(S3 example)  Create CloudWatch metrics configuration for S3 (refer to S3 CloudWatch metrics) Stream CloudWatch metrics to AWS Kinesis Data Firehose delivery stream by CloudWatch metrics stream Specify AWS Kinesis Data Firehose delivery stream HTTP Endpoint (refer to Choose HTTP Endpoint for Your Destination)  Usually, the AWS CloudWatch metrics process flow with OAP is as follows:\nCloudWatch metrics with S3 --\u0026gt; CloudWatch Metric Stream (OpenTelemetry formart) --\u0026gt; Kinesis Data Firehose Delivery Stream --\u0026gt; AWS Firehose receiver(OAP) --\u0026gt; OpenTelemetry receiver(OAP) Supported metrics    Description Configuration File Data Source     Metrics of AWS Cloud S3 otel-rules/aws-s3/s3-service.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS DynamoDB otel-rules/aws-dynamodb/dynamodb-service.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS DynamoDB otel-rules/aws-dynamodb/dynamodb-endpoint.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver    Notice  Only OpenTelemetry format is supported (refer to Metric streams output formats) A proxy(e.g. Nginx, Envoy) is required in front of OAP\u0026rsquo;s Firehose receiver to accept HTTPS requests from AWS Firehose through port 443 (refer to Amazon Kinesis Data Firehose Delivery Stream HTTP Endpoint Delivery Specifications. AWS Firehose receiver support setting accessKey for Kinesis Data Firehose, please refer to configuration vocabulary  ","title":"AWS Firehose receiver","url":"/docs/main/v9.4.0/en/setup/backend/aws-firehose-receiver/"},{"content":"AWS Firehose receiver AWS Firehose receiver listens on 0.0.0.0:12801 by default, and provides an HTTP Endpoint /aws/firehose/metrics that follows Amazon Kinesis Data Firehose Delivery Stream HTTP Endpoint Delivery Specifications You could leverage the receiver to collect AWS CloudWatch metrics, and analysis it through MAL as the receiver bases on OpenTelemetry receiver\nSetup(S3 example)  Create CloudWatch metrics configuration for S3 (refer to S3 CloudWatch metrics) Stream CloudWatch metrics to AWS Kinesis Data Firehose delivery stream by CloudWatch metrics stream Specify AWS Kinesis Data Firehose delivery stream HTTP Endpoint (refer to Choose HTTP Endpoint for Your Destination)  Usually, the AWS CloudWatch metrics process flow with OAP is as follows:\nCloudWatch metrics with S3 --\u0026gt; CloudWatch Metric Stream (OpenTelemetry formart) --\u0026gt; Kinesis Data Firehose Delivery Stream --\u0026gt; AWS Firehose receiver(OAP) --\u0026gt; OpenTelemetry receiver(OAP) The following blogs demonstrate complete setup process for AWS S3 and API Gateway:\n Monitoring DynamoDB with SkyWalking Monitoring AWS EKS and S3 with SkyWalking  Supported metrics    Description Configuration File Data Source     Metrics of AWS Cloud S3 otel-rules/aws-s3/s3-service.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS DynamoDB otel-rules/aws-dynamodb/dynamodb-service.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS DynamoDB otel-rules/aws-dynamodb/dynamodb-endpoint.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS API Gateway otel-rules/aws-gateway/gateway-service.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS API Gateway otel-rules/aws-gateway/gateway-endpoint.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver    Notice  Only OpenTelemetry format is supported (refer to Metric streams output formats) According to HTTPS requirement by AWS Firehose(refer to Amazon Kinesis Data Firehose Delivery Stream HTTP Endpoint Delivery Specifications, users have two options   A proxy(e.g. Nginx, Envoy) is required in front of OAP\u0026rsquo;s Firehose receiver to accept HTTPS requests from AWS Firehose through port 443. (Recommended based on the general security policy) Set aws-firehose/enableTLS=true with suitable cert/key files through aws-firehose/tlsKeyPath and aws-firehose/tlsCertChainPath at OAP side to accept requests from firehose directly.  AWS Firehose receiver support setting accessKey for Kinesis Data Firehose, please refer to configuration vocabulary  ","title":"AWS Firehose receiver","url":"/docs/main/v9.5.0/en/setup/backend/aws-firehose-receiver/"},{"content":"AWS Firehose receiver AWS Firehose receiver listens on 0.0.0.0:12801 by default, and provides an HTTP Endpoint /aws/firehose/metrics that follows Amazon Kinesis Data Firehose Delivery Stream HTTP Endpoint Delivery Specifications You could leverage the receiver to collect AWS CloudWatch metrics, and analysis it through MAL as the receiver bases on OpenTelemetry receiver\nSetup(S3 example)  Create CloudWatch metrics configuration for S3 (refer to S3 CloudWatch metrics) Stream CloudWatch metrics to AWS Kinesis Data Firehose delivery stream by CloudWatch metrics stream Specify AWS Kinesis Data Firehose delivery stream HTTP Endpoint (refer to Choose HTTP Endpoint for Your Destination)  Usually, the AWS CloudWatch metrics process flow with OAP is as follows:\nCloudWatch metrics with S3 --\u0026gt; CloudWatch Metric Stream (OpenTelemetry formart) --\u0026gt; Kinesis Data Firehose Delivery Stream --\u0026gt; AWS Firehose receiver(OAP) --\u0026gt; OpenTelemetry receiver(OAP) The following blogs demonstrate complete setup process for AWS S3 and API Gateway:\n Monitoring DynamoDB with SkyWalking Monitoring AWS EKS and S3 with SkyWalking  Supported metrics    Description Configuration File Data Source     Metrics of AWS Cloud S3 otel-rules/aws-s3/s3-service.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS DynamoDB otel-rules/aws-dynamodb/dynamodb-service.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS DynamoDB otel-rules/aws-dynamodb/dynamodb-endpoint.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS API Gateway otel-rules/aws-gateway/gateway-service.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS API Gateway otel-rules/aws-gateway/gateway-endpoint.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver    Notice  Only OpenTelemetry format is supported (refer to Metric streams output formats) According to HTTPS requirement by AWS Firehose(refer to Amazon Kinesis Data Firehose Delivery Stream HTTP Endpoint Delivery Specifications, users have two options   A proxy(e.g. Nginx, Envoy) is required in front of OAP\u0026rsquo;s Firehose receiver to accept HTTPS requests from AWS Firehose through port 443. (Recommended based on the general security policy) Set aws-firehose/enableTLS=true with suitable cert/key files through aws-firehose/tlsKeyPath and aws-firehose/tlsCertChainPath at OAP side to accept requests from firehose directly.  AWS Firehose receiver support setting accessKey for Kinesis Data Firehose, please refer to configuration vocabulary  ","title":"AWS Firehose receiver","url":"/docs/main/v9.6.0/en/setup/backend/aws-firehose-receiver/"},{"content":"AWS Firehose receiver AWS Firehose receiver listens on 0.0.0.0:12801 by default, and provides an HTTP Endpoint /aws/firehose/metrics that follows Amazon Kinesis Data Firehose Delivery Stream HTTP Endpoint Delivery Specifications You could leverage the receiver to collect AWS CloudWatch metrics, and analysis it through MAL as the receiver bases on OpenTelemetry receiver\nSetup(S3 example)  Create CloudWatch metrics configuration for S3 (refer to S3 CloudWatch metrics) Stream CloudWatch metrics to AWS Kinesis Data Firehose delivery stream by CloudWatch metrics stream Specify AWS Kinesis Data Firehose delivery stream HTTP Endpoint (refer to Choose HTTP Endpoint for Your Destination)  Usually, the AWS CloudWatch metrics process flow with OAP is as follows:\nCloudWatch metrics with S3 --\u0026gt; CloudWatch Metric Stream (OpenTelemetry formart) --\u0026gt; Kinesis Data Firehose Delivery Stream --\u0026gt; AWS Firehose receiver(OAP) --\u0026gt; OpenTelemetry receiver(OAP) The following blogs demonstrate complete setup process for AWS S3 and API Gateway:\n Monitoring DynamoDB with SkyWalking Monitoring AWS EKS and S3 with SkyWalking  Supported metrics    Description Configuration File Data Source     Metrics of AWS Cloud S3 otel-rules/aws-s3/s3-service.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS DynamoDB otel-rules/aws-dynamodb/dynamodb-service.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS DynamoDB otel-rules/aws-dynamodb/dynamodb-endpoint.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS API Gateway otel-rules/aws-gateway/gateway-service.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS API Gateway otel-rules/aws-gateway/gateway-endpoint.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver    Notice  Only OpenTelemetry format is supported (refer to Metric streams output formats) According to HTTPS requirement by AWS Firehose(refer to Amazon Kinesis Data Firehose Delivery Stream HTTP Endpoint Delivery Specifications, users have two options   A proxy(e.g. Nginx, Envoy) is required in front of OAP\u0026rsquo;s Firehose receiver to accept HTTPS requests from AWS Firehose through port 443. (Recommended based on the general security policy) Set aws-firehose/enableTLS=true with suitable cert/key files through aws-firehose/tlsKeyPath and aws-firehose/tlsCertChainPath at OAP side to accept requests from firehose directly.  AWS Firehose receiver support setting accessKey for Kinesis Data Firehose, please refer to configuration vocabulary  ","title":"AWS Firehose receiver","url":"/docs/main/v9.7.0/en/setup/backend/aws-firehose-receiver/"},{"content":"Backend Load Balancer When setting the Agent or Envoy to connect to the OAP server directly as in default, the OAP server cluster would face the problem of load imbalance. This issue would be severe in high-traffic load scenarios. SkyWalking Satellite is recommended to be used as a native gateway proxy to provide load balancing capabilities for data content before the data from Agent/ Envoy reaches the OAP. The major difference between Satellite and other general wide used proxy(s), like Envoy, is that it would route the data accordingly to contents rather than connection, as gRPC streaming is used widely in SkyWalking.\nFollow instructions in the Setup SkyWalking Satellite to deploy Satellite and connect your application to the Satellite.\nScaling with Apache SkyWalking blog introduces the theory and technology details on how to set up a load balancer for the OAP cluster.\n","title":"Backend Load Balancer","url":"/docs/main/latest/en/setup/backend/backend-load-balancer/"},{"content":"Backend Load Balancer When setting the Agent or Envoy to connect to the OAP server directly by default, the OAP server cluster would face the problem of load imbalance. This issue becomes severe in high-traffic load scenarios. In this doc, we will introduce two means to solve the problem.\nSkyWalking Satellite Project SkyWalking Satellite is recommended to be used as a native gateway proxy to provide load balancing capabilities for data content before the data from Agent/ Envoy reaches the OAP. The major difference between Satellite and other general wide used proxy(s), like Envoy, is that it would route the data accordingly to contents rather than connection, as gRPC streaming is used widely in SkyWalking.\nFollow instructions in the Setup SkyWalking Satellite to deploy Satellite and connect your application to the Satellite.\nScaling with Apache SkyWalking blog introduces the theory and technology details on how to set up a load balancer for the OAP cluster.\nEnvoy Filter to Limit Connections Per OAP Instance If you don\u0026rsquo;t want to deploy skywalking-satellite, you can enable Istio sidecar injection for SkyWalking OAP Pods,\nkubectl label namespace $SKYWALKING_NAMESPACE istio-injection=enabled kubectl -n $SKYWALKING_NAMESPACE rollout restart -l app=skywalking,component=oap and apply an EnvoyFilter to limit the connections per OAP instance, so that each of the OAP instance can have similar amount of gRPC connections.\nBefore that, you need to calculate the number of connections for each OAP instance as follows:\nNUMBER_OF_SERVICE_PODS=\u0026lt;the-number-of-service-pods-that-are-monitored-by-skywalking\u0026gt; # Each service Pod has 2 connections to OAP NUMBER_OF_TOTAL_CONNECTIONS=$((NUMBER_OF_SERVICE_PODS * 2)) # Divide the total connections by the replicas of OAP NUMBER_OF_CONNECTIONS_PER_OAP=$((NUMBER_OF_TOTAL_CONNECTIONS / $NUMBER_OF_OAP_REPLICAS)) And you can apply an EnvoyFilter to limit connections:\nkubectl -n $SKYWALKING_NAMESPACE apply -f - \u0026lt;\u0026lt;EOF apiVersion: networking.istio.io/v1alpha3 kind: EnvoyFilter metadata: name: oap-limit-connections namespace: istio-system spec: configPatches: - applyTo: NETWORK_FILTER match: context: ANY listener: filterChain: filter: name: envoy.filters.network.http_connection_manager portNumber: 11800 patch: operation: INSERT_BEFORE value: name: envoy.filters.network.ConnectionLimit typed_config: \u0026#39;@type\u0026#39;: type.googleapis.com/envoy.extensions.filters.network.connection_limit.v3.ConnectionLimit max_connections: $NUMBER_OF_CONNECTIONS_PER_OAP stat_prefix: envoy_filters_network_connection_limit workloadSelector: labels: app: oap EOF By this approach, we can limit the connections to port 11800 per OAP instance, but there is another corner case when the amount of service Pods are huge. Because the limiting is on connection level, and each service Pod has 2 connections to OAP port 11800, one for Envoy ALS to send access log, the other one for Envoy metrics, and because the traffic of the 2 connections can vary very much, if the number of service Pods is large enough, an extreme case might happen that one OAP instance is serving all Envoy metrics connections and the other OAP instance is serving all Envoy ALS connections, which in turn might be unbalanced again, to solve this, we can split the ALS connections to a dedicated port, and limit the connections to that port only.\nYou can set the environment variable SW_ALS_GRPC_PORT to a port number other than 0 when deploying skywalking, and limit connections to that port only in the EnvoyFilter:\nexport SW_ALS_GRPC_PORT=11802 kubectl -n $SKYWALKING_NAMESPACE apply -f - \u0026lt;\u0026lt;EOF apiVersion: networking.istio.io/v1alpha3 kind: EnvoyFilter metadata: name: oap-limit-connections namespace: istio-system spec: configPatches: - applyTo: NETWORK_FILTER match: context: ANY listener: filterChain: filter: name: envoy.filters.network.http_connection_manager portNumber: $SW_ALS_GRPC_PORT patch: operation: INSERT_BEFORE value: name: envoy.filters.network.ConnectionLimit typed_config: \u0026#39;@type\u0026#39;: type.googleapis.com/envoy.extensions.filters.network.connection_limit.v3.ConnectionLimit max_connections: $NUMBER_OF_CONNECTIONS_PER_OAP stat_prefix: envoy_filters_network_connection_limit workloadSelector: labels: app: oap EOF ","title":"Backend Load Balancer","url":"/docs/main/next/en/setup/backend/backend-load-balancer/"},{"content":"Backend Load Balancer When set the Agent or Envoy connecting to OAP server directly as in default, OAP server cluster would face the problem of OAP load imbalance. This issue would be very serious in high traffic load scenarios. Satellite is recommended to be used as a native gateway proxy, to provide load balancing capabilities for data content before the data from Agent/Envoy reaches the OAP. The major difference between Satellite and other general wide used proxy(s), like Envoy, is that, Satellite would route the data accordingly to contents rather than connection, as gRPC streaming is used widely in SkyWalking.\nFollow instructions in the Setup SkyWalking Satellite to deploy Satellite and connect your application to the satellite.\nScaling with Apache SkyWalking blog introduces the theory and technology details how to set up load balancer for the OAP cluster.\n","title":"Backend Load Balancer","url":"/docs/main/v9.0.0/en/setup/backend/backend-load-balancer/"},{"content":"Backend Load Balancer When setting the Agent or Envoy to connect to the OAP server directly as in default, the OAP server cluster would face the problem of load imbalance. This issue would be severe in high-traffic load scenarios. SkyWalking Satellite is recommended to be used as a native gateway proxy to provide load balancing capabilities for data content before the data from Agent/ Envoy reaches the OAP. The major difference between Satellite and other general wide used proxy(s), like Envoy, is that it would route the data accordingly to contents rather than connection, as gRPC streaming is used widely in SkyWalking.\nFollow instructions in the Setup SkyWalking Satellite to deploy Satellite and connect your application to the Satellite.\nScaling with Apache SkyWalking blog introduces the theory and technology details on how to set up a load balancer for the OAP cluster.\n","title":"Backend Load Balancer","url":"/docs/main/v9.1.0/en/setup/backend/backend-load-balancer/"},{"content":"Backend Load Balancer When setting the Agent or Envoy to connect to the OAP server directly as in default, the OAP server cluster would face the problem of load imbalance. This issue would be severe in high-traffic load scenarios. SkyWalking Satellite is recommended to be used as a native gateway proxy to provide load balancing capabilities for data content before the data from Agent/ Envoy reaches the OAP. The major difference between Satellite and other general wide used proxy(s), like Envoy, is that it would route the data accordingly to contents rather than connection, as gRPC streaming is used widely in SkyWalking.\nFollow instructions in the Setup SkyWalking Satellite to deploy Satellite and connect your application to the Satellite.\nScaling with Apache SkyWalking blog introduces the theory and technology details on how to set up a load balancer for the OAP cluster.\n","title":"Backend Load Balancer","url":"/docs/main/v9.2.0/en/setup/backend/backend-load-balancer/"},{"content":"Backend Load Balancer When setting the Agent or Envoy to connect to the OAP server directly as in default, the OAP server cluster would face the problem of load imbalance. This issue would be severe in high-traffic load scenarios. SkyWalking Satellite is recommended to be used as a native gateway proxy to provide load balancing capabilities for data content before the data from Agent/ Envoy reaches the OAP. The major difference between Satellite and other general wide used proxy(s), like Envoy, is that it would route the data accordingly to contents rather than connection, as gRPC streaming is used widely in SkyWalking.\nFollow instructions in the Setup SkyWalking Satellite to deploy Satellite and connect your application to the Satellite.\nScaling with Apache SkyWalking blog introduces the theory and technology details on how to set up a load balancer for the OAP cluster.\n","title":"Backend Load Balancer","url":"/docs/main/v9.3.0/en/setup/backend/backend-load-balancer/"},{"content":"Backend Load Balancer When setting the Agent or Envoy to connect to the OAP server directly as in default, the OAP server cluster would face the problem of load imbalance. This issue would be severe in high-traffic load scenarios. SkyWalking Satellite is recommended to be used as a native gateway proxy to provide load balancing capabilities for data content before the data from Agent/ Envoy reaches the OAP. The major difference between Satellite and other general wide used proxy(s), like Envoy, is that it would route the data accordingly to contents rather than connection, as gRPC streaming is used widely in SkyWalking.\nFollow instructions in the Setup SkyWalking Satellite to deploy Satellite and connect your application to the Satellite.\nScaling with Apache SkyWalking blog introduces the theory and technology details on how to set up a load balancer for the OAP cluster.\n","title":"Backend Load Balancer","url":"/docs/main/v9.4.0/en/setup/backend/backend-load-balancer/"},{"content":"Backend Load Balancer When setting the Agent or Envoy to connect to the OAP server directly as in default, the OAP server cluster would face the problem of load imbalance. This issue would be severe in high-traffic load scenarios. SkyWalking Satellite is recommended to be used as a native gateway proxy to provide load balancing capabilities for data content before the data from Agent/ Envoy reaches the OAP. The major difference between Satellite and other general wide used proxy(s), like Envoy, is that it would route the data accordingly to contents rather than connection, as gRPC streaming is used widely in SkyWalking.\nFollow instructions in the Setup SkyWalking Satellite to deploy Satellite and connect your application to the Satellite.\nScaling with Apache SkyWalking blog introduces the theory and technology details on how to set up a load balancer for the OAP cluster.\n","title":"Backend Load Balancer","url":"/docs/main/v9.5.0/en/setup/backend/backend-load-balancer/"},{"content":"Backend Load Balancer When setting the Agent or Envoy to connect to the OAP server directly as in default, the OAP server cluster would face the problem of load imbalance. This issue would be severe in high-traffic load scenarios. SkyWalking Satellite is recommended to be used as a native gateway proxy to provide load balancing capabilities for data content before the data from Agent/ Envoy reaches the OAP. The major difference between Satellite and other general wide used proxy(s), like Envoy, is that it would route the data accordingly to contents rather than connection, as gRPC streaming is used widely in SkyWalking.\nFollow instructions in the Setup SkyWalking Satellite to deploy Satellite and connect your application to the Satellite.\nScaling with Apache SkyWalking blog introduces the theory and technology details on how to set up a load balancer for the OAP cluster.\n","title":"Backend Load Balancer","url":"/docs/main/v9.6.0/en/setup/backend/backend-load-balancer/"},{"content":"Backend Load Balancer When setting the Agent or Envoy to connect to the OAP server directly as in default, the OAP server cluster would face the problem of load imbalance. This issue would be severe in high-traffic load scenarios. SkyWalking Satellite is recommended to be used as a native gateway proxy to provide load balancing capabilities for data content before the data from Agent/ Envoy reaches the OAP. The major difference between Satellite and other general wide used proxy(s), like Envoy, is that it would route the data accordingly to contents rather than connection, as gRPC streaming is used widely in SkyWalking.\nFollow instructions in the Setup SkyWalking Satellite to deploy Satellite and connect your application to the Satellite.\nScaling with Apache SkyWalking blog introduces the theory and technology details on how to set up a load balancer for the OAP cluster.\n","title":"Backend Load Balancer","url":"/docs/main/v9.7.0/en/setup/backend/backend-load-balancer/"},{"content":"Backend setup SkyWalking\u0026rsquo;s backend distribution package consists of the following parts:\n  bin/cmd scripts: Located in the /bin folder. Includes startup Linux shell and Windows cmd scripts for the backend server and UI startup.\n  Backend config: Located in the /config folder. Includes settings files of the backend, which are:\n application.yml log4j.xml alarm-settings.yml    Libraries of backend: Located in the /oap-libs folder. All dependencies of the backend can be found there.\n  Webapp env: Located in the webapp folder. UI frontend jar file can be found here, together with its webapp.yml setting file.\n  Requirements and default settings Requirement: JDK11 or JDK17.\nBefore you begin, you should understand that the main purpose of the following quickstart is to help you obtain a basic configuration for previews/demos. Performance and long-term running are NOT among the purposes of the quickstart.\nFor production/QA/tests environments, see Backend and UI deployment documents.\nYou can use bin/startup.sh (or cmd) to start up the backend and UI with their default settings, set out as follows:\n Backend storage uses H2 by default (for an easier start) Backend listens on 0.0.0.0/11800 for gRPC APIs and 0.0.0.0/12800 for HTTP REST APIs.  In Java, DotNetCore, Node.js, and Istio agents/probes, you should set the gRPC service address to ip/host:11800, and IP/host should be where your backend is.\n UI listens on 8080 port and request 127.0.0.1/12800 to run a GraphQL query.  Interaction Before deploying Skywalking in your distributed environment, you should learn about how agents/probes, the backend, and the UI communicate with each other:\n Most native agents and probes, including language-based or mesh probes, use gRPC service (core/default/gRPC* in application.yml) to report data to the backend. Also, the REST service is supported in JSON format. UI uses GraphQL (HTTP) query to access the backend, also in REST service (core/default/rest* in application.yml).  Startup script The default startup scripts are /bin/oapService.sh(.bat). Read the start up mode document to learn other ways to start up the backend.\nKey Parameters In The Booting Logs After the OAP booting process completed, you should be able to see all important parameters listed in the logs.\n2023-11-06 21:10:45,988 org.apache.skywalking.oap.server.starter.OAPServerBootstrap 67 [main] INFO [] - The key booting parameters of Apache SkyWalking OAP are listed as following. Running Mode | null TTL.metrics | 7 TTL.record | 3 Version | 9.7.0-SNAPSHOT-92af797 module.agent-analyzer.provider | default module.ai-pipeline.provider | default module.alarm.provider | default module.aws-firehose.provider | default module.cluster.provider | standalone module.configuration-discovery.provider | default module.configuration.provider | none module.core.provider | default module.envoy-metric.provider | default module.event-analyzer.provider | default module.log-analyzer.provider | default module.logql.provider | default module.promql.provider | default module.query.provider | graphql module.receiver-browser.provider | default module.receiver-clr.provider | default module.receiver-ebpf.provider | default module.receiver-event.provider | default module.receiver-jvm.provider | default module.receiver-log.provider | default module.receiver-meter.provider | default module.receiver-otel.provider | default module.receiver-profile.provider | default module.receiver-register.provider | default module.receiver-sharing-server.provider | default module.receiver-telegraf.provider | default module.receiver-trace.provider | default module.service-mesh.provider | default module.storage.provider | h2 module.telemetry.provider | none oap.external.grpc.host | 0.0.0.0 oap.external.grpc.port | 11800 oap.external.http.host | 0.0.0.0 oap.external.http.port | 12800 oap.internal.comm.host | 0.0.0.0 oap.internal.comm.port | 11800  oap.external.grpc.host:oap.external.grpc.port is for reporting telemetry data through gRPC channel, including native agents, OTEL. oap.external.http.host:oap.external.http.port is for reporting telemetry data through HTTP channel and query, including native GraphQL(UI), PromQL, LogQL. oap.internal.comm.host:oap.internal.comm.port is for OAP cluster internal communication via gRPC/HTTP2 protocol. The default host(0.0.0.0) is not suitable for the cluster mode, unless in k8s deployment. Please read Cluster Doc to understand how to set up the SkyWalking backend in the cluster mode.  application.yml SkyWalking backend startup behaviours are driven by config/application.yml. Understanding the settings file will help you read this document.\nThe core concept behind this setting file is that the SkyWalking collector is based on a pure modular design. End-users can switch or assemble the collector features according to their unique requirements.\nIn application.yml, there are three levels.\n Level 1: Module name. This means that this module is active in running mode. Level 2: Provider option list and provider selector. Available providers are listed here with a selector to indicate which one will actually take effect. If only one provider is listed, the selector is optional and can be omitted. Level 3. Settings of the chosen provider.  Example:\nstorage:selector:mysql# the mysql storage will actually be activated, while the h2 storage takes no effecth2:properties:jdbcUrl:${SW_STORAGE_H2_URL:jdbc:h2:mem:skywalking-oap-db;DB_CLOSE_DELAY=-1;DATABASE_TO_UPPER=FALSE}dataSource.user:${SW_STORAGE_H2_USER:sa}metadataQueryMaxSize:${SW_STORAGE_H2_QUERY_MAX_SIZE:5000}mysql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:3306/swtest?allowMultiQueries=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root@1234}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}# other configurations storage is the module. selector selects one out of all providers listed below. The unselected ones take no effect as if they were deleted. default is the default implementor of the core module. driver, url, \u0026hellip; metadataQueryMaxSize are all setting items of the implementor.  At the same time, there are two types of modules: required and optional. The required modules provide the skeleton of the backend. Even though their modular design supports pluggability, removing those modules does not serve any purpose. For optional modules, some of them have a provider implementation called none, meaning that it only provides a shell with no actual logic, typically such as telemetry. Setting - to the selector means that this whole module will be excluded at runtime. We advise against changing the APIs of those modules unless you understand the SkyWalking project and its codes very well.\nThe required modules are listed here:\n Core. Provides the basic and major skeleton of all data analysis and stream dispatch. Cluster. Manages multiple backend instances in a cluster, which could provide high throughput process capabilities. See Cluster Management for more details. Storage. Makes the analysis result persistent. See Choose storage for more details Query. Provides query interfaces to UI. Receiver and Fetcher. Expose the service to the agents and probes, or read telemetry data from a channel.  FAQs Why do we need to set the timezone? And when do we do it? SkyWalking provides downsampling time-series metrics features. Query and store at each time dimension (minute, hour, day, month metrics indexes) related to timezone when time formatting.\nFor example, metrics time will be formatted like yyyyMMddHHmm in minute dimension metrics, which is timezone-related.\nBy default, SkyWalking\u0026rsquo;s OAP backend chooses the OS default timezone. Please follow the Java and OS documents if you want to override the timezone.\n","title":"Backend setup","url":"/docs/main/latest/en/setup/backend/backend-setup/"},{"content":"Backend setup SkyWalking\u0026rsquo;s backend distribution package consists of the following parts:\n  bin/cmd scripts: Located in the /bin folder. Includes startup Linux shell and Windows cmd scripts for the backend server and UI startup.\n  Backend config: Located in the /config folder. Includes settings files of the backend, which are:\n application.yml log4j.xml alarm-settings.yml    Libraries of backend: Located in the /oap-libs folder. All dependencies of the backend can be found there.\n  Webapp env: Located in the webapp folder. UI frontend jar file can be found here, together with its webapp.yml setting file.\n  Requirements and default settings Requirement: Java 11/17/21.\nBefore you begin, you should understand that the main purpose of the following quickstart is to help you obtain a basic configuration for previews/demos. Performance and long-term running are NOT among the purposes of the quickstart.\nFor production/QA/tests environments, see Backend and UI deployment documents.\nYou can use bin/startup.sh (or cmd) to start up the backend and UI with their default settings, set out as follows:\n Backend storage uses H2 by default (for an easier start) Backend listens on 0.0.0.0/11800 for gRPC APIs and 0.0.0.0/12800 for HTTP REST APIs.  In Java, DotNetCore, Node.js, and Istio agents/probes, you should set the gRPC service address to ip/host:11800, and IP/host should be where your backend is.\n UI listens on 8080 port and request 127.0.0.1/12800 to run a GraphQL query.  Interaction Before deploying Skywalking in your distributed environment, you should learn about how agents/probes, the backend, and the UI communicate with each other:\n Most native agents and probes, including language-based or mesh probes, use gRPC service (core/default/gRPC* in application.yml) to report data to the backend. Also, the REST service is supported in JSON format. UI uses GraphQL (HTTP) query to access the backend, also in REST service (core/default/rest* in application.yml).  Startup script The default startup scripts are /bin/oapService.sh(.bat). Read the start up mode document to learn other ways to start up the backend.\nKey Parameters In The Booting Logs After the OAP booting process completed, you should be able to see all important parameters listed in the logs.\n2023-11-06 21:10:45,988 org.apache.skywalking.oap.server.starter.OAPServerBootstrap 67 [main] INFO [] - The key booting parameters of Apache SkyWalking OAP are listed as following. Running Mode | null TTL.metrics | 7 TTL.record | 3 Version | 9.7.0-SNAPSHOT-92af797 module.agent-analyzer.provider | default module.ai-pipeline.provider | default module.alarm.provider | default module.aws-firehose.provider | default module.cluster.provider | standalone module.configuration-discovery.provider | default module.configuration.provider | none module.core.provider | default module.envoy-metric.provider | default module.event-analyzer.provider | default module.log-analyzer.provider | default module.logql.provider | default module.promql.provider | default module.query.provider | graphql module.receiver-browser.provider | default module.receiver-clr.provider | default module.receiver-ebpf.provider | default module.receiver-event.provider | default module.receiver-jvm.provider | default module.receiver-log.provider | default module.receiver-meter.provider | default module.receiver-otel.provider | default module.receiver-profile.provider | default module.receiver-register.provider | default module.receiver-sharing-server.provider | default module.receiver-telegraf.provider | default module.receiver-trace.provider | default module.service-mesh.provider | default module.storage.provider | h2 module.telemetry.provider | none oap.external.grpc.host | 0.0.0.0 oap.external.grpc.port | 11800 oap.external.http.host | 0.0.0.0 oap.external.http.port | 12800 oap.internal.comm.host | 0.0.0.0 oap.internal.comm.port | 11800  oap.external.grpc.host:oap.external.grpc.port is for reporting telemetry data through gRPC channel, including native agents, OTEL. oap.external.http.host:oap.external.http.port is for reporting telemetry data through HTTP channel and query, including native GraphQL(UI), PromQL, LogQL. oap.internal.comm.host:oap.internal.comm.port is for OAP cluster internal communication via gRPC/HTTP2 protocol. The default host(0.0.0.0) is not suitable for the cluster mode, unless in k8s deployment. Please read Cluster Doc to understand how to set up the SkyWalking backend in the cluster mode.  application.yml SkyWalking backend startup behaviours are driven by config/application.yml. Understanding the settings file will help you read this document.\nThe core concept behind this setting file is that the SkyWalking collector is based on a pure modular design. End-users can switch or assemble the collector features according to their unique requirements.\nIn application.yml, there are three levels.\n Level 1: Module name. This means that this module is active in running mode. Level 2: Provider option list and provider selector. Available providers are listed here with a selector to indicate which one will actually take effect. If only one provider is listed, the selector is optional and can be omitted. Level 3. Settings of the chosen provider.  Example:\nstorage:selector:mysql# the mysql storage will actually be activated, while the h2 storage takes no effecth2:properties:jdbcUrl:${SW_STORAGE_H2_URL:jdbc:h2:mem:skywalking-oap-db;DB_CLOSE_DELAY=-1;DATABASE_TO_UPPER=FALSE}dataSource.user:${SW_STORAGE_H2_USER:sa}metadataQueryMaxSize:${SW_STORAGE_H2_QUERY_MAX_SIZE:5000}mysql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:3306/swtest?allowMultiQueries=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root@1234}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}# other configurations storage is the module. selector selects one out of all providers listed below. The unselected ones take no effect as if they were deleted. default is the default implementor of the core module. driver, url, \u0026hellip; metadataQueryMaxSize are all setting items of the implementor.  At the same time, there are two types of modules: required and optional. The required modules provide the skeleton of the backend. Even though their modular design supports pluggability, removing those modules does not serve any purpose. For optional modules, some of them have a provider implementation called none, meaning that it only provides a shell with no actual logic, typically such as telemetry. Setting - to the selector means that this whole module will be excluded at runtime. We advise against changing the APIs of those modules unless you understand the SkyWalking project and its codes very well.\nThe required modules are listed here:\n Core. Provides the basic and major skeleton of all data analysis and stream dispatch. Cluster. Manages multiple backend instances in a cluster, which could provide high throughput process capabilities. See Cluster Management for more details. Storage. Makes the analysis result persistent. See Choose storage for more details Query. Provides query interfaces to UI. Receiver and Fetcher. Expose the service to the agents and probes, or read telemetry data from a channel.  FAQs Why do we need to set the timezone? And when do we do it? SkyWalking provides downsampling time-series metrics features. Query and store at each time dimension (minute, hour, day, month metrics indexes) related to timezone when time formatting.\nFor example, metrics time will be formatted like yyyyMMddHHmm in minute dimension metrics, which is timezone-related.\nBy default, SkyWalking\u0026rsquo;s OAP backend chooses the OS default timezone. Please follow the Java and OS documents if you want to override the timezone.\n","title":"Backend setup","url":"/docs/main/next/en/setup/backend/backend-setup/"},{"content":"Backend setup SkyWalking\u0026rsquo;s backend distribution package consists of the following parts:\n  bin/cmd scripts: Located in the /bin folder. Includes startup linux shell and Windows cmd scripts for the backend server and UI startup.\n  Backend config: Located in the /config folder. Includes settings files of the backend, which are:\n application.yml log4j.xml alarm-settings.yml    Libraries of backend: Located in the /oap-libs folder. All dependencies of the backend can be found in it.\n  Webapp env: Located in the webapp folder. UI frontend jar file can be found here, together with its webapp.yml setting file.\n  Requirements and default settings Requirement: JDK8 to JDK17 are tested. Other versions are not tested and may or may not work.\nBefore you start, you should know that the main purpose of quickstart is to help you obtain a basic configuration for previews/demo. Performance and long-term running are not our goals.\nFor production/QA/tests environments, see Backend and UI deployment documents.\nYou can use bin/startup.sh (or cmd) to start up the backend and UI with their default settings, set out as follows:\n Backend storage uses H2 by default (for an easier start) Backend listens on 0.0.0.0/11800 for gRPC APIs and 0.0.0.0/12800 for HTTP REST APIs.  In Java, DotNetCore, Node.js, and Istio agents/probes, you should set the gRPC service address to ip/host:11800, and ip/host should be where your backend is.\n UI listens on 8080 port and request 127.0.0.1/12800 to run a GraphQL query.  Interaction Before deploying Skywalking in your distributed environment, you should learn about how agents/probes, the backend, and the UI communicate with each other:\n All native agents and probes, either language based or mesh probe, use the gRPC service (core/default/gRPC* in application.yml) to report data to the backend. Also, the Jetty service is supported in JSON format. UI uses GraphQL (HTTP) query to access the backend also in Jetty service (core/default/rest* in application.yml).  Startup script The default startup scripts are /bin/oapService.sh(.bat). Read the start up mode document to learn about other ways to start up the backend.\napplication.yml SkyWalking backend startup behaviours are driven by config/application.yml. Understanding the setting file will help you read this document. The core concept behind this setting file is that the SkyWalking collector is based on pure modular design. End users can switch or assemble the collector features according to their own requirements.\nIn application.yml, there are three levels.\n Level 1: Module name. This means that this module is active in running mode. Level 2: Provider option list and provider selector. Available providers are listed here with a selector to indicate which one will actually take effect. If there is only one provider listed, the selector is optional and can be omitted. Level 3. Settings of the provider.  Example:\nstorage:selector:mysql# the mysql storage will actually be activated, while the h2 storage takes no effecth2:driver:${SW_STORAGE_H2_DRIVER:org.h2.jdbcx.JdbcDataSource}url:${SW_STORAGE_H2_URL:jdbc:h2:mem:skywalking-oap-db}user:${SW_STORAGE_H2_USER:sa}metadataQueryMaxSize:${SW_STORAGE_H2_QUERY_MAX_SIZE:5000}mysql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:3306/swtest\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root@1234}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}# other configurations storage is the module. selector selects one out of all providers listed below. The unselected ones take no effect as if they were deleted. default is the default implementor of the core module. driver, url, \u0026hellip; metadataQueryMaxSize are all setting items of the implementor.  At the same time, there are two types of modules: required and optional. The required modules provide the skeleton of the backend. Even though their modular design supports pluggability, removing those modules does not serve any purpose. For optional modules, some of them have a provider implementation called none, meaning that it only provides a shell with no actual logic, typically such as telemetry. Setting - to the selector means that this whole module will be excluded at runtime. We advise against trying to change the APIs of those modules, unless you understand the SkyWalking project and its codes very well.\nThe required modules are listed here:\n Core. Provides the basic and major skeleton of all data analysis and stream dispatch. Cluster. Manages multiple backend instances in a cluster, which could provide high throughput process capabilities. See Cluster Management for more details. Storage. Makes the analysis result persistent. See Choose storage for more details Query. Provides query interfaces to UI. Receiver and Fetcher. Expose the service to the agents and probes, or read telemetry data from a channel.  FAQs Why do we need to set the timezone? And when do we do it? SkyWalking provides downsampling time series metrics features. Query and store at each time dimension (minute, hour, day, month metrics indexes) related to timezone when time formatting.\nFor example, metrics time will be formatted like YYYYMMDDHHmm in minute dimension metrics, which is timezone related.\nBy default, SkyWalking\u0026rsquo;s OAP backend chooses the OS default timezone. If you want to override it, please follow the Java and OS documents.\nHow to query the storage directly from a 3rd party tool? SkyWalking provides different options based on browser UI, CLI and GraphQL to support extensions. But some users may want to query data directly from the storage. For example, in the case of ElasticSearch, Kibana is a great tool for doing this.\nBy default, in order to reduce memory, network and storage space usages, SkyWalking saves based64-encoded ID(s) only in metrics entities. But these tools usually don\u0026rsquo;t support nested query, and are not convenient to work with. For these exceptional reasons, SkyWalking provides a config to add all necessary name column(s) into the final metrics entities with ID as a trade-off.\nTake a look at core/default/activeExtraModelColumns config in the application.yaml, and set it as true to enable this feature.\nNote that this feature is simply for 3rd party integration and doesn\u0026rsquo;t provide any new features to native SkyWalking use cases.\n","title":"Backend setup","url":"/docs/main/v9.0.0/en/setup/backend/backend-setup/"},{"content":"Backend setup SkyWalking\u0026rsquo;s backend distribution package consists of the following parts:\n  bin/cmd scripts: Located in the /bin folder. Includes startup Linux shell and Windows cmd scripts for the backend server and UI startup.\n  Backend config: Located in the /config folder. Includes settings files of the backend, which are:\n application.yml log4j.xml alarm-settings.yml    Libraries of backend: Located in the /oap-libs folder. All dependencies of the backend can be found there.\n  Webapp env: Located in the webapp folder. UI frontend jar file can be found here, together with its webapp.yml setting file.\n  Requirements and default settings Requirement: JDK8 to JDK17 are tested. Other versions are not tested and may or may not work.\nBefore you begin, you should understand that the main purpose of the following quickstart is to help you obtain a basic configuration for previews/demos. Performance and long-term running are NOT among the purposes of the quickstart.\nFor production/QA/tests environments, see Backend and UI deployment documents.\nYou can use bin/startup.sh (or cmd) to start up the backend and UI with their default settings, set out as follows:\n Backend storage uses H2 by default (for an easier start) Backend listens on 0.0.0.0/11800 for gRPC APIs and 0.0.0.0/12800 for HTTP REST APIs.  In Java, DotNetCore, Node.js, and Istio agents/probes, you should set the gRPC service address to ip/host:11800, and IP/host should be where your backend is.\n UI listens on 8080 port and request 127.0.0.1/12800 to run a GraphQL query.  Interaction Before deploying Skywalking in your distributed environment, you should learn about how agents/probes, the backend, and the UI communicate with each other:\n Most native agents and probes, including language-based or mesh probes, use gRPC service (core/default/gRPC* in application.yml) to report data to the backend. Also, the REST service is supported in JSON format. UI uses GraphQL (HTTP) query to access the backend, also in REST service (core/default/rest* in application.yml).  Startup script The default startup scripts are /bin/oapService.sh(.bat). Read the start up mode document to learn other ways to start up the backend.\napplication.yml SkyWalking backend startup behaviours are driven by config/application.yml. Understanding the settings file will help you read this document.\nThe core concept behind this setting file is that the SkyWalking collector is based on a pure modular design. End-users can switch or assemble the collector features according to their unique requirements.\nIn application.yml, there are three levels.\n Level 1: Module name. This means that this module is active in running mode. Level 2: Provider option list and provider selector. Available providers are listed here with a selector to indicate which one will actually take effect. If only one provider is listed, the selector is optional and can be omitted. Level 3. Settings of the chosen provider.  Example:\nstorage:selector:mysql# the mysql storage will actually be activated, while the h2 storage takes no effecth2:driver:${SW_STORAGE_H2_DRIVER:org.h2.jdbcx.JdbcDataSource}url:${SW_STORAGE_H2_URL:jdbc:h2:mem:skywalking-oap-db}user:${SW_STORAGE_H2_USER:sa}metadataQueryMaxSize:${SW_STORAGE_H2_QUERY_MAX_SIZE:5000}mysql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:3306/swtest\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root@1234}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}# other configurations storage is the module. selector selects one out of all providers listed below. The unselected ones take no effect as if they were deleted. default is the default implementor of the core module. driver, url, \u0026hellip; metadataQueryMaxSize are all setting items of the implementor.  At the same time, there are two types of modules: required and optional. The required modules provide the skeleton of the backend. Even though their modular design supports pluggability, removing those modules does not serve any purpose. For optional modules, some of them have a provider implementation called none, meaning that it only provides a shell with no actual logic, typically such as telemetry. Setting - to the selector means that this whole module will be excluded at runtime. We advise against changing the APIs of those modules unless you understand the SkyWalking project and its codes very well.\nThe required modules are listed here:\n Core. Provides the basic and major skeleton of all data analysis and stream dispatch. Cluster. Manages multiple backend instances in a cluster, which could provide high throughput process capabilities. See Cluster Management for more details. Storage. Makes the analysis result persistent. See Choose storage for more details Query. Provides query interfaces to UI. Receiver and Fetcher. Expose the service to the agents and probes, or read telemetry data from a channel.  FAQs Why do we need to set the timezone? And when do we do it? SkyWalking provides downsampling time-series metrics features. Query and store at each time dimension (minute, hour, day, month metrics indexes) related to timezone when time formatting.\nFor example, metrics time will be formatted like yyyyMMddHHmm in minute dimension metrics, which is timezone-related.\nBy default, SkyWalking\u0026rsquo;s OAP backend chooses the OS default timezone. Please follow the Java and OS documents if you want to override the timezone.\nHow to query the storage directly from a 3rd party tool? SkyWalking provides different options based on browser UI, CLI and GraphQL to support extensions. But some users may want to query data directly from the storage. For example, in the case of ElasticSearch, Kibana is a great tool for doing this.\nBy default, SkyWalking saves based64-encoded ID(s) only in metrics entities to reduce memory, network and storage space usages. But these tools usually don\u0026rsquo;t support nested queries and are not convenient to work with. For these exceptional reasons, SkyWalking provides a config to add all necessary name column(s) into the final metrics entities with ID as a trade-off.\nTake a look at core/default/activeExtraModelColumns config in the application.yaml, and set it as true to enable this feature.\nNote that this feature is simply for 3rd party integration and doesn\u0026rsquo;t provide any new features to native SkyWalking use cases.\n","title":"Backend setup","url":"/docs/main/v9.1.0/en/setup/backend/backend-setup/"},{"content":"Backend setup SkyWalking\u0026rsquo;s backend distribution package consists of the following parts:\n  bin/cmd scripts: Located in the /bin folder. Includes startup Linux shell and Windows cmd scripts for the backend server and UI startup.\n  Backend config: Located in the /config folder. Includes settings files of the backend, which are:\n application.yml log4j.xml alarm-settings.yml    Libraries of backend: Located in the /oap-libs folder. All dependencies of the backend can be found there.\n  Webapp env: Located in the webapp folder. UI frontend jar file can be found here, together with its webapp.yml setting file.\n  Requirements and default settings Requirement: JDK8 to JDK17 are tested. Other versions are not tested and may or may not work.\nBefore you begin, you should understand that the main purpose of the following quickstart is to help you obtain a basic configuration for previews/demos. Performance and long-term running are NOT among the purposes of the quickstart.\nFor production/QA/tests environments, see Backend and UI deployment documents.\nYou can use bin/startup.sh (or cmd) to start up the backend and UI with their default settings, set out as follows:\n Backend storage uses H2 by default (for an easier start) Backend listens on 0.0.0.0/11800 for gRPC APIs and 0.0.0.0/12800 for HTTP REST APIs.  In Java, DotNetCore, Node.js, and Istio agents/probes, you should set the gRPC service address to ip/host:11800, and IP/host should be where your backend is.\n UI listens on 8080 port and request 127.0.0.1/12800 to run a GraphQL query.  Interaction Before deploying Skywalking in your distributed environment, you should learn about how agents/probes, the backend, and the UI communicate with each other:\n Most native agents and probes, including language-based or mesh probes, use gRPC service (core/default/gRPC* in application.yml) to report data to the backend. Also, the REST service is supported in JSON format. UI uses GraphQL (HTTP) query to access the backend, also in REST service (core/default/rest* in application.yml).  Startup script The default startup scripts are /bin/oapService.sh(.bat). Read the start up mode document to learn other ways to start up the backend.\napplication.yml SkyWalking backend startup behaviours are driven by config/application.yml. Understanding the settings file will help you read this document.\nThe core concept behind this setting file is that the SkyWalking collector is based on a pure modular design. End-users can switch or assemble the collector features according to their unique requirements.\nIn application.yml, there are three levels.\n Level 1: Module name. This means that this module is active in running mode. Level 2: Provider option list and provider selector. Available providers are listed here with a selector to indicate which one will actually take effect. If only one provider is listed, the selector is optional and can be omitted. Level 3. Settings of the chosen provider.  Example:\nstorage:selector:mysql# the mysql storage will actually be activated, while the h2 storage takes no effecth2:driver:${SW_STORAGE_H2_DRIVER:org.h2.jdbcx.JdbcDataSource}url:${SW_STORAGE_H2_URL:jdbc:h2:mem:skywalking-oap-db}user:${SW_STORAGE_H2_USER:sa}metadataQueryMaxSize:${SW_STORAGE_H2_QUERY_MAX_SIZE:5000}mysql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:3306/swtest\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root@1234}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}# other configurations storage is the module. selector selects one out of all providers listed below. The unselected ones take no effect as if they were deleted. default is the default implementor of the core module. driver, url, \u0026hellip; metadataQueryMaxSize are all setting items of the implementor.  At the same time, there are two types of modules: required and optional. The required modules provide the skeleton of the backend. Even though their modular design supports pluggability, removing those modules does not serve any purpose. For optional modules, some of them have a provider implementation called none, meaning that it only provides a shell with no actual logic, typically such as telemetry. Setting - to the selector means that this whole module will be excluded at runtime. We advise against changing the APIs of those modules unless you understand the SkyWalking project and its codes very well.\nThe required modules are listed here:\n Core. Provides the basic and major skeleton of all data analysis and stream dispatch. Cluster. Manages multiple backend instances in a cluster, which could provide high throughput process capabilities. See Cluster Management for more details. Storage. Makes the analysis result persistent. See Choose storage for more details Query. Provides query interfaces to UI. Receiver and Fetcher. Expose the service to the agents and probes, or read telemetry data from a channel.  FAQs Why do we need to set the timezone? And when do we do it? SkyWalking provides downsampling time-series metrics features. Query and store at each time dimension (minute, hour, day, month metrics indexes) related to timezone when time formatting.\nFor example, metrics time will be formatted like yyyyMMddHHmm in minute dimension metrics, which is timezone-related.\nBy default, SkyWalking\u0026rsquo;s OAP backend chooses the OS default timezone. Please follow the Java and OS documents if you want to override the timezone.\nHow to query the storage directly from a 3rd party tool? SkyWalking provides different options based on browser UI, CLI and GraphQL to support extensions. But some users may want to query data directly from the storage. For example, in the case of ElasticSearch, Kibana is a great tool for doing this.\nBy default, SkyWalking saves based64-encoded ID(s) only in metrics entities to reduce memory, network and storage space usages. But these tools usually don\u0026rsquo;t support nested queries and are not convenient to work with. For these exceptional reasons, SkyWalking provides a config to add all necessary name column(s) into the final metrics entities with ID as a trade-off.\nTake a look at core/default/activeExtraModelColumns config in the application.yaml, and set it as true to enable this feature.\nNote that this feature is simply for 3rd party integration and doesn\u0026rsquo;t provide any new features to native SkyWalking use cases.\n","title":"Backend setup","url":"/docs/main/v9.2.0/en/setup/backend/backend-setup/"},{"content":"Backend setup SkyWalking\u0026rsquo;s backend distribution package consists of the following parts:\n  bin/cmd scripts: Located in the /bin folder. Includes startup Linux shell and Windows cmd scripts for the backend server and UI startup.\n  Backend config: Located in the /config folder. Includes settings files of the backend, which are:\n application.yml log4j.xml alarm-settings.yml    Libraries of backend: Located in the /oap-libs folder. All dependencies of the backend can be found there.\n  Webapp env: Located in the webapp folder. UI frontend jar file can be found here, together with its webapp.yml setting file.\n  Requirements and default settings Requirement: JDK8 to JDK17 are tested. Other versions are not tested and may or may not work.\nBefore you begin, you should understand that the main purpose of the following quickstart is to help you obtain a basic configuration for previews/demos. Performance and long-term running are NOT among the purposes of the quickstart.\nFor production/QA/tests environments, see Backend and UI deployment documents.\nYou can use bin/startup.sh (or cmd) to start up the backend and UI with their default settings, set out as follows:\n Backend storage uses H2 by default (for an easier start) Backend listens on 0.0.0.0/11800 for gRPC APIs and 0.0.0.0/12800 for HTTP REST APIs.  In Java, DotNetCore, Node.js, and Istio agents/probes, you should set the gRPC service address to ip/host:11800, and IP/host should be where your backend is.\n UI listens on 8080 port and request 127.0.0.1/12800 to run a GraphQL query.  Interaction Before deploying Skywalking in your distributed environment, you should learn about how agents/probes, the backend, and the UI communicate with each other:\n Most native agents and probes, including language-based or mesh probes, use gRPC service (core/default/gRPC* in application.yml) to report data to the backend. Also, the REST service is supported in JSON format. UI uses GraphQL (HTTP) query to access the backend, also in REST service (core/default/rest* in application.yml).  Startup script The default startup scripts are /bin/oapService.sh(.bat). Read the start up mode document to learn other ways to start up the backend.\napplication.yml SkyWalking backend startup behaviours are driven by config/application.yml. Understanding the settings file will help you read this document.\nThe core concept behind this setting file is that the SkyWalking collector is based on a pure modular design. End-users can switch or assemble the collector features according to their unique requirements.\nIn application.yml, there are three levels.\n Level 1: Module name. This means that this module is active in running mode. Level 2: Provider option list and provider selector. Available providers are listed here with a selector to indicate which one will actually take effect. If only one provider is listed, the selector is optional and can be omitted. Level 3. Settings of the chosen provider.  Example:\nstorage:selector:mysql# the mysql storage will actually be activated, while the h2 storage takes no effecth2:properties:jdbcUrl:${SW_STORAGE_H2_URL:jdbc:h2:mem:skywalking-oap-db;DB_CLOSE_DELAY=-1}dataSource.user:${SW_STORAGE_H2_USER:sa}metadataQueryMaxSize:${SW_STORAGE_H2_QUERY_MAX_SIZE:5000}mysql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:3306/swtest\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root@1234}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}# other configurations storage is the module. selector selects one out of all providers listed below. The unselected ones take no effect as if they were deleted. default is the default implementor of the core module. driver, url, \u0026hellip; metadataQueryMaxSize are all setting items of the implementor.  At the same time, there are two types of modules: required and optional. The required modules provide the skeleton of the backend. Even though their modular design supports pluggability, removing those modules does not serve any purpose. For optional modules, some of them have a provider implementation called none, meaning that it only provides a shell with no actual logic, typically such as telemetry. Setting - to the selector means that this whole module will be excluded at runtime. We advise against changing the APIs of those modules unless you understand the SkyWalking project and its codes very well.\nThe required modules are listed here:\n Core. Provides the basic and major skeleton of all data analysis and stream dispatch. Cluster. Manages multiple backend instances in a cluster, which could provide high throughput process capabilities. See Cluster Management for more details. Storage. Makes the analysis result persistent. See Choose storage for more details Query. Provides query interfaces to UI. Receiver and Fetcher. Expose the service to the agents and probes, or read telemetry data from a channel.  FAQs Why do we need to set the timezone? And when do we do it? SkyWalking provides downsampling time-series metrics features. Query and store at each time dimension (minute, hour, day, month metrics indexes) related to timezone when time formatting.\nFor example, metrics time will be formatted like yyyyMMddHHmm in minute dimension metrics, which is timezone-related.\nBy default, SkyWalking\u0026rsquo;s OAP backend chooses the OS default timezone. Please follow the Java and OS documents if you want to override the timezone.\nHow to query the storage directly from a 3rd party tool? SkyWalking provides different options based on browser UI, CLI and GraphQL to support extensions. But some users may want to query data directly from the storage. For example, in the case of ElasticSearch, Kibana is a great tool for doing this.\nBy default, SkyWalking saves based64-encoded ID(s) only in metrics entities to reduce memory, network and storage space usages. But these tools usually don\u0026rsquo;t support nested queries and are not convenient to work with. For these exceptional reasons, SkyWalking provides a config to add all necessary name column(s) into the final metrics entities with ID as a trade-off.\nTake a look at core/default/activeExtraModelColumns config in the application.yaml, and set it as true to enable this feature.\nNote that this feature is simply for 3rd party integration and doesn\u0026rsquo;t provide any new features to native SkyWalking use cases.\n","title":"Backend setup","url":"/docs/main/v9.3.0/en/setup/backend/backend-setup/"},{"content":"Backend setup SkyWalking\u0026rsquo;s backend distribution package consists of the following parts:\n  bin/cmd scripts: Located in the /bin folder. Includes startup Linux shell and Windows cmd scripts for the backend server and UI startup.\n  Backend config: Located in the /config folder. Includes settings files of the backend, which are:\n application.yml log4j.xml alarm-settings.yml    Libraries of backend: Located in the /oap-libs folder. All dependencies of the backend can be found there.\n  Webapp env: Located in the webapp folder. UI frontend jar file can be found here, together with its webapp.yml setting file.\n  Requirements and default settings Requirement: JDK11 to JDK17 are tested. Other versions are not tested and may or may not work.\nBefore you begin, you should understand that the main purpose of the following quickstart is to help you obtain a basic configuration for previews/demos. Performance and long-term running are NOT among the purposes of the quickstart.\nFor production/QA/tests environments, see Backend and UI deployment documents.\nYou can use bin/startup.sh (or cmd) to start up the backend and UI with their default settings, set out as follows:\n Backend storage uses H2 by default (for an easier start) Backend listens on 0.0.0.0/11800 for gRPC APIs and 0.0.0.0/12800 for HTTP REST APIs.  In Java, DotNetCore, Node.js, and Istio agents/probes, you should set the gRPC service address to ip/host:11800, and IP/host should be where your backend is.\n UI listens on 8080 port and request 127.0.0.1/12800 to run a GraphQL query.  Interaction Before deploying Skywalking in your distributed environment, you should learn about how agents/probes, the backend, and the UI communicate with each other:\n Most native agents and probes, including language-based or mesh probes, use gRPC service (core/default/gRPC* in application.yml) to report data to the backend. Also, the REST service is supported in JSON format. UI uses GraphQL (HTTP) query to access the backend, also in REST service (core/default/rest* in application.yml).  Startup script The default startup scripts are /bin/oapService.sh(.bat). Read the start up mode document to learn other ways to start up the backend.\napplication.yml SkyWalking backend startup behaviours are driven by config/application.yml. Understanding the settings file will help you read this document.\nThe core concept behind this setting file is that the SkyWalking collector is based on a pure modular design. End-users can switch or assemble the collector features according to their unique requirements.\nIn application.yml, there are three levels.\n Level 1: Module name. This means that this module is active in running mode. Level 2: Provider option list and provider selector. Available providers are listed here with a selector to indicate which one will actually take effect. If only one provider is listed, the selector is optional and can be omitted. Level 3. Settings of the chosen provider.  Example:\nstorage:selector:mysql# the mysql storage will actually be activated, while the h2 storage takes no effecth2:properties:jdbcUrl:${SW_STORAGE_H2_URL:jdbc:h2:mem:skywalking-oap-db;DB_CLOSE_DELAY=-1}dataSource.user:${SW_STORAGE_H2_USER:sa}metadataQueryMaxSize:${SW_STORAGE_H2_QUERY_MAX_SIZE:5000}mysql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:3306/swtest\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root@1234}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}# other configurations storage is the module. selector selects one out of all providers listed below. The unselected ones take no effect as if they were deleted. default is the default implementor of the core module. driver, url, \u0026hellip; metadataQueryMaxSize are all setting items of the implementor.  At the same time, there are two types of modules: required and optional. The required modules provide the skeleton of the backend. Even though their modular design supports pluggability, removing those modules does not serve any purpose. For optional modules, some of them have a provider implementation called none, meaning that it only provides a shell with no actual logic, typically such as telemetry. Setting - to the selector means that this whole module will be excluded at runtime. We advise against changing the APIs of those modules unless you understand the SkyWalking project and its codes very well.\nThe required modules are listed here:\n Core. Provides the basic and major skeleton of all data analysis and stream dispatch. Cluster. Manages multiple backend instances in a cluster, which could provide high throughput process capabilities. See Cluster Management for more details. Storage. Makes the analysis result persistent. See Choose storage for more details Query. Provides query interfaces to UI. Receiver and Fetcher. Expose the service to the agents and probes, or read telemetry data from a channel.  FAQs Why do we need to set the timezone? And when do we do it? SkyWalking provides downsampling time-series metrics features. Query and store at each time dimension (minute, hour, day, month metrics indexes) related to timezone when time formatting.\nFor example, metrics time will be formatted like yyyyMMddHHmm in minute dimension metrics, which is timezone-related.\nBy default, SkyWalking\u0026rsquo;s OAP backend chooses the OS default timezone. Please follow the Java and OS documents if you want to override the timezone.\nHow to query the storage directly from a 3rd party tool? SkyWalking provides different options based on browser UI, CLI and GraphQL to support extensions. But some users may want to query data directly from the storage. For example, in the case of ElasticSearch, Kibana is a great tool for doing this.\nBy default, SkyWalking saves based64-encoded ID(s) only in metrics entities to reduce memory, network and storage space usages. But these tools usually don\u0026rsquo;t support nested queries and are not convenient to work with. For these exceptional reasons, SkyWalking provides a config to add all necessary name column(s) into the final metrics entities with ID as a trade-off.\nTake a look at core/default/activeExtraModelColumns config in the application.yaml, and set it as true to enable this feature.\nNote that this feature is simply for 3rd party integration and doesn\u0026rsquo;t provide any new features to native SkyWalking use cases.\n","title":"Backend setup","url":"/docs/main/v9.4.0/en/setup/backend/backend-setup/"},{"content":"Backend setup SkyWalking\u0026rsquo;s backend distribution package consists of the following parts:\n  bin/cmd scripts: Located in the /bin folder. Includes startup Linux shell and Windows cmd scripts for the backend server and UI startup.\n  Backend config: Located in the /config folder. Includes settings files of the backend, which are:\n application.yml log4j.xml alarm-settings.yml    Libraries of backend: Located in the /oap-libs folder. All dependencies of the backend can be found there.\n  Webapp env: Located in the webapp folder. UI frontend jar file can be found here, together with its webapp.yml setting file.\n  Requirements and default settings Requirement: JDK11 to JDK17 are tested. Other versions are not tested and may or may not work.\nBefore you begin, you should understand that the main purpose of the following quickstart is to help you obtain a basic configuration for previews/demos. Performance and long-term running are NOT among the purposes of the quickstart.\nFor production/QA/tests environments, see Backend and UI deployment documents.\nYou can use bin/startup.sh (or cmd) to start up the backend and UI with their default settings, set out as follows:\n Backend storage uses H2 by default (for an easier start) Backend listens on 0.0.0.0/11800 for gRPC APIs and 0.0.0.0/12800 for HTTP REST APIs.  In Java, DotNetCore, Node.js, and Istio agents/probes, you should set the gRPC service address to ip/host:11800, and IP/host should be where your backend is.\n UI listens on 8080 port and request 127.0.0.1/12800 to run a GraphQL query.  Interaction Before deploying Skywalking in your distributed environment, you should learn about how agents/probes, the backend, and the UI communicate with each other:\n Most native agents and probes, including language-based or mesh probes, use gRPC service (core/default/gRPC* in application.yml) to report data to the backend. Also, the REST service is supported in JSON format. UI uses GraphQL (HTTP) query to access the backend, also in REST service (core/default/rest* in application.yml).  Startup script The default startup scripts are /bin/oapService.sh(.bat). Read the start up mode document to learn other ways to start up the backend.\napplication.yml SkyWalking backend startup behaviours are driven by config/application.yml. Understanding the settings file will help you read this document.\nThe core concept behind this setting file is that the SkyWalking collector is based on a pure modular design. End-users can switch or assemble the collector features according to their unique requirements.\nIn application.yml, there are three levels.\n Level 1: Module name. This means that this module is active in running mode. Level 2: Provider option list and provider selector. Available providers are listed here with a selector to indicate which one will actually take effect. If only one provider is listed, the selector is optional and can be omitted. Level 3. Settings of the chosen provider.  Example:\nstorage:selector:mysql# the mysql storage will actually be activated, while the h2 storage takes no effecth2:properties:jdbcUrl:${SW_STORAGE_H2_URL:jdbc:h2:mem:skywalking-oap-db;DB_CLOSE_DELAY=-1;DATABASE_TO_UPPER=FALSE}dataSource.user:${SW_STORAGE_H2_USER:sa}metadataQueryMaxSize:${SW_STORAGE_H2_QUERY_MAX_SIZE:5000}mysql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:3306/swtest?allowMultiQueries=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root@1234}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}# other configurations storage is the module. selector selects one out of all providers listed below. The unselected ones take no effect as if they were deleted. default is the default implementor of the core module. driver, url, \u0026hellip; metadataQueryMaxSize are all setting items of the implementor.  At the same time, there are two types of modules: required and optional. The required modules provide the skeleton of the backend. Even though their modular design supports pluggability, removing those modules does not serve any purpose. For optional modules, some of them have a provider implementation called none, meaning that it only provides a shell with no actual logic, typically such as telemetry. Setting - to the selector means that this whole module will be excluded at runtime. We advise against changing the APIs of those modules unless you understand the SkyWalking project and its codes very well.\nThe required modules are listed here:\n Core. Provides the basic and major skeleton of all data analysis and stream dispatch. Cluster. Manages multiple backend instances in a cluster, which could provide high throughput process capabilities. See Cluster Management for more details. Storage. Makes the analysis result persistent. See Choose storage for more details Query. Provides query interfaces to UI. Receiver and Fetcher. Expose the service to the agents and probes, or read telemetry data from a channel.  FAQs Why do we need to set the timezone? And when do we do it? SkyWalking provides downsampling time-series metrics features. Query and store at each time dimension (minute, hour, day, month metrics indexes) related to timezone when time formatting.\nFor example, metrics time will be formatted like yyyyMMddHHmm in minute dimension metrics, which is timezone-related.\nBy default, SkyWalking\u0026rsquo;s OAP backend chooses the OS default timezone. Please follow the Java and OS documents if you want to override the timezone.\nHow to query the storage directly from a 3rd party tool? SkyWalking provides different options based on browser UI, CLI and GraphQL to support extensions. But some users may want to query data directly from the storage. For example, in the case of ElasticSearch, Kibana is a great tool for doing this.\nBy default, SkyWalking saves based64-encoded ID(s) only in metrics entities to reduce memory, network and storage space usages. But these tools usually don\u0026rsquo;t support nested queries and are not convenient to work with. For these exceptional reasons, SkyWalking provides a config to add all necessary name column(s) into the final metrics entities with ID as a trade-off.\nTake a look at core/default/activeExtraModelColumns config in the application.yaml, and set it as true to enable this feature.\nNote that this feature is simply for 3rd party integration and doesn\u0026rsquo;t provide any new features to native SkyWalking use cases.\n","title":"Backend setup","url":"/docs/main/v9.5.0/en/setup/backend/backend-setup/"},{"content":"Backend setup SkyWalking\u0026rsquo;s backend distribution package consists of the following parts:\n  bin/cmd scripts: Located in the /bin folder. Includes startup Linux shell and Windows cmd scripts for the backend server and UI startup.\n  Backend config: Located in the /config folder. Includes settings files of the backend, which are:\n application.yml log4j.xml alarm-settings.yml    Libraries of backend: Located in the /oap-libs folder. All dependencies of the backend can be found there.\n  Webapp env: Located in the webapp folder. UI frontend jar file can be found here, together with its webapp.yml setting file.\n  Requirements and default settings Requirement: JDK11 to JDK17 are tested. Other versions are not tested and may or may not work.\nBefore you begin, you should understand that the main purpose of the following quickstart is to help you obtain a basic configuration for previews/demos. Performance and long-term running are NOT among the purposes of the quickstart.\nFor production/QA/tests environments, see Backend and UI deployment documents.\nYou can use bin/startup.sh (or cmd) to start up the backend and UI with their default settings, set out as follows:\n Backend storage uses H2 by default (for an easier start) Backend listens on 0.0.0.0/11800 for gRPC APIs and 0.0.0.0/12800 for HTTP REST APIs.  In Java, DotNetCore, Node.js, and Istio agents/probes, you should set the gRPC service address to ip/host:11800, and IP/host should be where your backend is.\n UI listens on 8080 port and request 127.0.0.1/12800 to run a GraphQL query.  Interaction Before deploying Skywalking in your distributed environment, you should learn about how agents/probes, the backend, and the UI communicate with each other:\n Most native agents and probes, including language-based or mesh probes, use gRPC service (core/default/gRPC* in application.yml) to report data to the backend. Also, the REST service is supported in JSON format. UI uses GraphQL (HTTP) query to access the backend, also in REST service (core/default/rest* in application.yml).  Startup script The default startup scripts are /bin/oapService.sh(.bat). Read the start up mode document to learn other ways to start up the backend.\napplication.yml SkyWalking backend startup behaviours are driven by config/application.yml. Understanding the settings file will help you read this document.\nThe core concept behind this setting file is that the SkyWalking collector is based on a pure modular design. End-users can switch or assemble the collector features according to their unique requirements.\nIn application.yml, there are three levels.\n Level 1: Module name. This means that this module is active in running mode. Level 2: Provider option list and provider selector. Available providers are listed here with a selector to indicate which one will actually take effect. If only one provider is listed, the selector is optional and can be omitted. Level 3. Settings of the chosen provider.  Example:\nstorage:selector:mysql# the mysql storage will actually be activated, while the h2 storage takes no effecth2:properties:jdbcUrl:${SW_STORAGE_H2_URL:jdbc:h2:mem:skywalking-oap-db;DB_CLOSE_DELAY=-1;DATABASE_TO_UPPER=FALSE}dataSource.user:${SW_STORAGE_H2_USER:sa}metadataQueryMaxSize:${SW_STORAGE_H2_QUERY_MAX_SIZE:5000}mysql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:3306/swtest?allowMultiQueries=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root@1234}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}# other configurations storage is the module. selector selects one out of all providers listed below. The unselected ones take no effect as if they were deleted. default is the default implementor of the core module. driver, url, \u0026hellip; metadataQueryMaxSize are all setting items of the implementor.  At the same time, there are two types of modules: required and optional. The required modules provide the skeleton of the backend. Even though their modular design supports pluggability, removing those modules does not serve any purpose. For optional modules, some of them have a provider implementation called none, meaning that it only provides a shell with no actual logic, typically such as telemetry. Setting - to the selector means that this whole module will be excluded at runtime. We advise against changing the APIs of those modules unless you understand the SkyWalking project and its codes very well.\nThe required modules are listed here:\n Core. Provides the basic and major skeleton of all data analysis and stream dispatch. Cluster. Manages multiple backend instances in a cluster, which could provide high throughput process capabilities. See Cluster Management for more details. Storage. Makes the analysis result persistent. See Choose storage for more details Query. Provides query interfaces to UI. Receiver and Fetcher. Expose the service to the agents and probes, or read telemetry data from a channel.  FAQs Why do we need to set the timezone? And when do we do it? SkyWalking provides downsampling time-series metrics features. Query and store at each time dimension (minute, hour, day, month metrics indexes) related to timezone when time formatting.\nFor example, metrics time will be formatted like yyyyMMddHHmm in minute dimension metrics, which is timezone-related.\nBy default, SkyWalking\u0026rsquo;s OAP backend chooses the OS default timezone. Please follow the Java and OS documents if you want to override the timezone.\nHow to query the storage directly from a 3rd party tool? SkyWalking provides different options based on browser UI, CLI and GraphQL to support extensions. But some users may want to query data directly from the storage. For example, in the case of ElasticSearch, Kibana is a great tool for doing this.\nBy default, SkyWalking saves based64-encoded ID(s) only in metrics entities to reduce memory, network and storage space usages. But these tools usually don\u0026rsquo;t support nested queries and are not convenient to work with. For these exceptional reasons, SkyWalking provides a config to add all necessary name column(s) into the final metrics entities with ID as a trade-off.\nTake a look at core/default/activeExtraModelColumns config in the application.yaml, and set it as true to enable this feature.\nNote that this feature is simply for 3rd party integration and doesn\u0026rsquo;t provide any new features to native SkyWalking use cases.\n","title":"Backend setup","url":"/docs/main/v9.6.0/en/setup/backend/backend-setup/"},{"content":"Backend setup SkyWalking\u0026rsquo;s backend distribution package consists of the following parts:\n  bin/cmd scripts: Located in the /bin folder. Includes startup Linux shell and Windows cmd scripts for the backend server and UI startup.\n  Backend config: Located in the /config folder. Includes settings files of the backend, which are:\n application.yml log4j.xml alarm-settings.yml    Libraries of backend: Located in the /oap-libs folder. All dependencies of the backend can be found there.\n  Webapp env: Located in the webapp folder. UI frontend jar file can be found here, together with its webapp.yml setting file.\n  Requirements and default settings Requirement: JDK11 or JDK17.\nBefore you begin, you should understand that the main purpose of the following quickstart is to help you obtain a basic configuration for previews/demos. Performance and long-term running are NOT among the purposes of the quickstart.\nFor production/QA/tests environments, see Backend and UI deployment documents.\nYou can use bin/startup.sh (or cmd) to start up the backend and UI with their default settings, set out as follows:\n Backend storage uses H2 by default (for an easier start) Backend listens on 0.0.0.0/11800 for gRPC APIs and 0.0.0.0/12800 for HTTP REST APIs.  In Java, DotNetCore, Node.js, and Istio agents/probes, you should set the gRPC service address to ip/host:11800, and IP/host should be where your backend is.\n UI listens on 8080 port and request 127.0.0.1/12800 to run a GraphQL query.  Interaction Before deploying Skywalking in your distributed environment, you should learn about how agents/probes, the backend, and the UI communicate with each other:\n Most native agents and probes, including language-based or mesh probes, use gRPC service (core/default/gRPC* in application.yml) to report data to the backend. Also, the REST service is supported in JSON format. UI uses GraphQL (HTTP) query to access the backend, also in REST service (core/default/rest* in application.yml).  Startup script The default startup scripts are /bin/oapService.sh(.bat). Read the start up mode document to learn other ways to start up the backend.\nKey Parameters In The Booting Logs After the OAP booting process completed, you should be able to see all important parameters listed in the logs.\n2023-11-06 21:10:45,988 org.apache.skywalking.oap.server.starter.OAPServerBootstrap 67 [main] INFO [] - The key booting parameters of Apache SkyWalking OAP are listed as following. Running Mode | null TTL.metrics | 7 TTL.record | 3 Version | 9.7.0-SNAPSHOT-92af797 module.agent-analyzer.provider | default module.ai-pipeline.provider | default module.alarm.provider | default module.aws-firehose.provider | default module.cluster.provider | standalone module.configuration-discovery.provider | default module.configuration.provider | none module.core.provider | default module.envoy-metric.provider | default module.event-analyzer.provider | default module.log-analyzer.provider | default module.logql.provider | default module.promql.provider | default module.query.provider | graphql module.receiver-browser.provider | default module.receiver-clr.provider | default module.receiver-ebpf.provider | default module.receiver-event.provider | default module.receiver-jvm.provider | default module.receiver-log.provider | default module.receiver-meter.provider | default module.receiver-otel.provider | default module.receiver-profile.provider | default module.receiver-register.provider | default module.receiver-sharing-server.provider | default module.receiver-telegraf.provider | default module.receiver-trace.provider | default module.service-mesh.provider | default module.storage.provider | h2 module.telemetry.provider | none oap.external.grpc.host | 0.0.0.0 oap.external.grpc.port | 11800 oap.external.http.host | 0.0.0.0 oap.external.http.port | 12800 oap.internal.comm.host | 0.0.0.0 oap.internal.comm.port | 11800  oap.external.grpc.host:oap.external.grpc.port is for reporting telemetry data through gRPC channel, including native agents, OTEL. oap.external.http.host:oap.external.http.port is for reporting telemetry data through HTTP channel and query, including native GraphQL(UI), PromQL, LogQL. oap.internal.comm.host:oap.internal.comm.port is for OAP cluster internal communication via gRPC/HTTP2 protocol. The default host(0.0.0.0) is not suitable for the cluster mode, unless in k8s deployment. Please read Cluster Doc to understand how to set up the SkyWalking backend in the cluster mode.  application.yml SkyWalking backend startup behaviours are driven by config/application.yml. Understanding the settings file will help you read this document.\nThe core concept behind this setting file is that the SkyWalking collector is based on a pure modular design. End-users can switch or assemble the collector features according to their unique requirements.\nIn application.yml, there are three levels.\n Level 1: Module name. This means that this module is active in running mode. Level 2: Provider option list and provider selector. Available providers are listed here with a selector to indicate which one will actually take effect. If only one provider is listed, the selector is optional and can be omitted. Level 3. Settings of the chosen provider.  Example:\nstorage:selector:mysql# the mysql storage will actually be activated, while the h2 storage takes no effecth2:properties:jdbcUrl:${SW_STORAGE_H2_URL:jdbc:h2:mem:skywalking-oap-db;DB_CLOSE_DELAY=-1;DATABASE_TO_UPPER=FALSE}dataSource.user:${SW_STORAGE_H2_USER:sa}metadataQueryMaxSize:${SW_STORAGE_H2_QUERY_MAX_SIZE:5000}mysql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:3306/swtest?allowMultiQueries=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root@1234}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}# other configurations storage is the module. selector selects one out of all providers listed below. The unselected ones take no effect as if they were deleted. default is the default implementor of the core module. driver, url, \u0026hellip; metadataQueryMaxSize are all setting items of the implementor.  At the same time, there are two types of modules: required and optional. The required modules provide the skeleton of the backend. Even though their modular design supports pluggability, removing those modules does not serve any purpose. For optional modules, some of them have a provider implementation called none, meaning that it only provides a shell with no actual logic, typically such as telemetry. Setting - to the selector means that this whole module will be excluded at runtime. We advise against changing the APIs of those modules unless you understand the SkyWalking project and its codes very well.\nThe required modules are listed here:\n Core. Provides the basic and major skeleton of all data analysis and stream dispatch. Cluster. Manages multiple backend instances in a cluster, which could provide high throughput process capabilities. See Cluster Management for more details. Storage. Makes the analysis result persistent. See Choose storage for more details Query. Provides query interfaces to UI. Receiver and Fetcher. Expose the service to the agents and probes, or read telemetry data from a channel.  FAQs Why do we need to set the timezone? And when do we do it? SkyWalking provides downsampling time-series metrics features. Query and store at each time dimension (minute, hour, day, month metrics indexes) related to timezone when time formatting.\nFor example, metrics time will be formatted like yyyyMMddHHmm in minute dimension metrics, which is timezone-related.\nBy default, SkyWalking\u0026rsquo;s OAP backend chooses the OS default timezone. Please follow the Java and OS documents if you want to override the timezone.\n","title":"Backend setup","url":"/docs/main/v9.7.0/en/setup/backend/backend-setup/"},{"content":"Backend storage The SkyWalking storage is pluggable. We have provided the following storage solutions, which allow you to easily use one of them by specifying it as the selector in application.yml:\nstorage:selector:${SW_STORAGE:elasticsearch}Natively supported storage:\n H2 OpenSearch ElasticSearch 7 and 8. MySQL and its compatible databases PostgreSQL and its compatible databases BanyanDB(alpha stage)  H2 is the default storage option in the distribution package. It is recommended to use H2 for testing and development ONLY. Elasticsearch and OpenSearch are recommended for production environments, specially for large scale deployments. MySQL and PostgreSQL are recommended for production environments for medium scale deployments, especially for low trace and log sampling rate. Some of their compatible databases may support larger scale better, such as TiDB and AWS Aurora.\nBanyanDB is going to be our next generation storage solution. It is still in alpha stage. It has shown high potential performance improvement. Less than 50% CPU usage and 50% memory usage with 40% disk volume compared to Elasticsearch in the same scale with 100% sampling. We are looking for early adoption, and it would be our first-class recommended storage option since 2024.\n","title":"Backend storage","url":"/docs/main/latest/en/setup/backend/backend-storage/"},{"content":"Backend storage The SkyWalking storage is pluggable. We have provided the following storage solutions, which allow you to easily use one of them by specifying it as the selector in application.yml:\nstorage:selector:${SW_STORAGE:elasticsearch}Natively supported storage:\n H2 OpenSearch ElasticSearch 7 and 8. MySQL and its compatible databases PostgreSQL and its compatible databases BanyanDB(alpha stage)  H2 is the default storage option in the distribution package. It is recommended to use H2 for testing and development ONLY. Elasticsearch and OpenSearch are recommended for production environments, specially for large scale deployments. MySQL and PostgreSQL are recommended for production environments for medium scale deployments, especially for low trace and log sampling rate. Some of their compatible databases may support larger scale better, such as TiDB and AWS Aurora.\nBanyanDB is going to be our next generation storage solution. It is still in alpha stage. It has shown high potential performance improvement. Less than 50% CPU usage and 50% memory usage with 40% disk volume compared to Elasticsearch in the same scale with 100% sampling. We are looking for early adoption, and it would be our first-class recommended storage option since 2024.\n","title":"Backend storage","url":"/docs/main/next/en/setup/backend/backend-storage/"},{"content":"Backend storage The SkyWalking storage is pluggable. We have provided the following storage solutions, which allows you to easily use one of them by specifying it as the selector in application.yml:\nstorage:selector:${SW_STORAGE:elasticsearch}Natively supported storage:\n H2 OpenSearch ElasticSearch 6, 7, 8 MySQL TiDB InfluxDB PostgreSQL IoTDB  H2 Activate H2 as storage, set storage provider to H2 In-Memory Databases. Default in distribution package. Please read Database URL Overview in H2 official document. You can set the target to H2 in Embedded, Server and Mixed modes.\nSetting fragment example\nstorage:selector:${SW_STORAGE:h2}h2:driver:org.h2.jdbcx.JdbcDataSourceurl:jdbc:h2:mem:skywalking-oap-dbuser:samaxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:100}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:1}OpenSearch OpenSearch storage shares the same configurations as ElasticSearch. In order to activate OpenSearch as storage, set storage provider to elasticsearch.\nElasticSearch NOTE: Elastic announced through their blog that Elasticsearch will be moving over to a Server Side Public License (SSPL), which is incompatible with Apache License 2.0. This license change is effective from Elasticsearch version 7.11. So please choose the suitable ElasticSearch version according to your usage.\nSince 8.8.0, SkyWalking rebuilds the ElasticSearch client on top of ElasticSearch REST API and automatically picks up correct request formats according to the server side version, hence you don\u0026rsquo;t need to download different binaries and don\u0026rsquo;t need to configure different storage selector for different ElasticSearch server side version anymore.\nFor now, SkyWalking supports ElasticSearch 6.x, ElasticSearch 7.x, ElasticSearch 8.x, and OpenSearch 1.x, their configurations are as follows:\nstorage:selector:${SW_STORAGE:elasticsearch}elasticsearch:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}clusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:9200}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;http\u0026#34;}trustStorePath:${SW_STORAGE_ES_SSL_JKS_PATH:\u0026#34;\u0026#34;}trustStorePass:${SW_STORAGE_ES_SSL_JKS_PASS:\u0026#34;\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}password:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}secretsManagementFile:${SW_ES_SECRETS_MANAGEMENT_FILE:\u0026#34;\u0026#34;}# Secrets management file in the properties format includes the username, password, which are managed by 3rd party tool.dayStep:${SW_STORAGE_DAY_STEP:1}# Represent the number of days in the one minute/hour/day index.indexShardsNumber:${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:1}# Shard number of new indexesindexReplicasNumber:${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:1}# Replicas number of new indexes# Super data set has been defined in the codes, such as trace segments.The following 3 config would be improve es performance when storage super size data in es.superDatasetDayStep:${SW_SUPERDATASET_STORAGE_DAY_STEP:-1}# Represent the number of days in the super size dataset record index, the default value is the same as dayStep when the value is less than 0superDatasetIndexShardsFactor:${SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR:5}# This factor provides more shards for the super data set, shards number = indexShardsNumber * superDatasetIndexShardsFactor. Also, this factor effects Zipkin and Jaeger traces.superDatasetIndexReplicasNumber:${SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER:0}# Represent the replicas number in the super size dataset record index, the default value is 0.indexTemplateOrder:${SW_STORAGE_ES_INDEX_TEMPLATE_ORDER:0}# the order of index templatebulkActions:${SW_STORAGE_ES_BULK_ACTIONS:1000}# Execute the async bulk record data every ${SW_STORAGE_ES_BULK_ACTIONS} requestsflushInterval:${SW_STORAGE_ES_FLUSH_INTERVAL:10}# flush the bulk every 10 seconds whatever the number of requestsconcurrentRequests:${SW_STORAGE_ES_CONCURRENT_REQUESTS:2}# the number of concurrent requestsresultWindowMaxSize:${SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE:10000}metadataQueryMaxSize:${SW_STORAGE_ES_QUERY_MAX_SIZE:5000}segmentQueryMaxSize:${SW_STORAGE_ES_QUERY_SEGMENT_SIZE:200}profileTaskQueryMaxSize:${SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE:200}oapAnalyzer:${SW_STORAGE_ES_OAP_ANALYZER:\u0026#34;{\\\u0026#34;analyzer\\\u0026#34;:{\\\u0026#34;oap_analyzer\\\u0026#34;:{\\\u0026#34;type\\\u0026#34;:\\\u0026#34;stop\\\u0026#34;}}}\u0026#34;}# the oap analyzer.oapLogAnalyzer:${SW_STORAGE_ES_OAP_LOG_ANALYZER:\u0026#34;{\\\u0026#34;analyzer\\\u0026#34;:{\\\u0026#34;oap_log_analyzer\\\u0026#34;:{\\\u0026#34;type\\\u0026#34;:\\\u0026#34;standard\\\u0026#34;}}}\u0026#34;}# the oap log analyzer. It could be customized by the ES analyzer configuration to support more language log formats, such as Chinese log, Japanese log and etc.advanced:${SW_STORAGE_ES_ADVANCED:\u0026#34;\u0026#34;}ElasticSearch With Https SSL Encrypting communications. Example:\nstorage:selector:${SW_STORAGE:elasticsearch}elasticsearch:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}# User needs to be set when Http Basic authentication is enabledpassword:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}# Password to be set when Http Basic authentication is enabledclusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:443}trustStorePath:${SW_SW_STORAGE_ES_SSL_JKS_PATH:\u0026#34;../es_keystore.jks\u0026#34;}trustStorePass:${SW_SW_STORAGE_ES_SSL_JKS_PASS:\u0026#34;\u0026#34;}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;https\u0026#34;}... File at trustStorePath is being monitored. Once it is changed, the ElasticSearch client will reconnect. trustStorePass could be changed in the runtime through Secrets Management File Of ElasticSearch Authentication.  Daily Index Step Daily index step(storage/elasticsearch/dayStep, default 1) represents the index creation period. In this period, metrics for several days (dayStep value) are saved.\nIn most cases, users don\u0026rsquo;t need to change the value manually, as SkyWalking is designed to observe large scale distributed systems. But in some cases, users may want to set a long TTL value, such as more than 60 days. However, their ElasticSearch cluster may not be powerful enough due to low traffic in the production environment. This value could be increased to 5 (or more), if users could ensure a single index could support the metrics and traces for these days (5 in this case).\nFor example, if dayStep == 11,\n Data in [2000-01-01, 2000-01-11] will be merged into the index-20000101. Data in [2000-01-12, 2000-01-22] will be merged into the index-20000112.  storage/elasticsearch/superDatasetDayStep overrides the storage/elasticsearch/dayStep if the value is positive. This would affect the record-related entities, such as trace segments. In some cases, the size of metrics is much smaller than the record (trace). This would improve the shards balance in the ElasticSearch cluster.\nNOTE: TTL deletion would be affected by these steps. You should set an extra dayStep in your TTL. For example, if you want to have TTL == 30 days and dayStep == 10, you are commended to set TTL = 40.\nSecrets Management File Of ElasticSearch Authentication The value of secretsManagementFile should point to the secrets management file absolute path. The file includes username, password, and JKS password of the ElasticSearch server in the properties format.\nuser=xxx password=yyy trustStorePass=zzz The major difference between using user, password, trustStorePass configs in the application.yaml file is that the Secrets Management File is being watched by the OAP server. Once it is changed manually or through a 3rd party tool, such as Vault, the storage provider will use the new username, password, and JKS password to establish the connection and close the old one. If the information exists in the file, the user/password will be overrided.\nAdvanced Configurations For Elasticsearch Index You can add advanced configurations in JSON format to set ElasticSearch index settings by following ElasticSearch doc\nFor example, set translog settings:\nstorage:elasticsearch:# ......advanced:${SW_STORAGE_ES_ADVANCED:\u0026#34;{\\\u0026#34;index.translog.durability\\\u0026#34;:\\\u0026#34;request\\\u0026#34;,\\\u0026#34;index.translog.sync_interval\\\u0026#34;:\\\u0026#34;5s\\\u0026#34;}\u0026#34;}Recommended ElasticSearch server-side configurations You could add the following configuration to elasticsearch.yml, and set the value based on your environment.\n# In tracing scenario, consider to set more than this at least.thread_pool.index.queue_size:1000# Only suitable for ElasticSearch 6thread_pool.write.queue_size:1000# Suitable for ElasticSearch 6 and 7# When you face query error at trace page, remember to check this.index.max_result_window:1000000We strongly recommend that you read more about these configurations from ElasticSearch\u0026rsquo;s official document, since they have a direct impact on the performance of ElasticSearch.\nElasticSearch with Zipkin trace extension This implementation is very similar to elasticsearch, except that it extends to support Zipkin span storage. The configurations are largely the same.\nstorage:selector:${SW_STORAGE:zipkin-elasticsearch}zipkin-elasticsearch:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}clusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:9200}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;http\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}password:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}indexShardsNumber:${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:2}indexReplicasNumber:${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:0}# Batch process setting, refer to https://www.elastic.co/guide/en/elasticsearch/client/java-api/5.5/java-docs-bulk-processor.htmlbulkActions:${SW_STORAGE_ES_BULK_ACTIONS:2000}# Execute the bulk every 2000 requestsbulkSize:${SW_STORAGE_ES_BULK_SIZE:20}# flush the bulk every 20mbflushInterval:${SW_STORAGE_ES_FLUSH_INTERVAL:10}# flush the bulk every 10 seconds whatever the number of requestsconcurrentRequests:${SW_STORAGE_ES_CONCURRENT_REQUESTS:2}# the number of concurrent requestsAbout Namespace When namespace is set, all index names in ElasticSearch will use it as prefix.\nMySQL Active MySQL as storage, set storage provider to mysql.\nNOTE: MySQL driver is NOT allowed in Apache official distribution and source codes. Please download MySQL driver on your own. Copy the connection driver jar to oap-libs.\nstorage:selector:${SW_STORAGE:mysql}mysql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:3306/swtest?rewriteBatchedStatements=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root@1234}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password are found in application.yml. Only part of the settings are listed here. See the HikariCP connection pool document for full settings. To understand the function of the parameter rewriteBatchedStatements=true in MySQL, see the MySQL official document.\nTiDB Tested TiDB Server 4.0.8 version and MySQL Client driver 8.0.13 version are currently available. Activate TiDB as storage, and set storage provider to tidb.\nstorage:selector:${SW_STORAGE:tidb}tidb:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:4000/swtest?rewriteBatchedStatements=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:\u0026#34;\u0026#34;}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}dataSource.useAffectedRows:${SW_DATA_SOURCE_USE_AFFECTED_ROWS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfArrayColumn:${SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN:20}numOfSearchableValuesPerTag:${SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG:2}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password are found in application.yml. For details on settings, refer to the configuration of MySQL above. To understand the function of the parameter rewriteBatchedStatements=true in TiDB, see the document of TiDB best practices.\nInfluxDB InfluxDB storage provides a time-series database as a new storage option.\nstorage:selector:${SW_STORAGE:influxdb}influxdb:url:${SW_STORAGE_INFLUXDB_URL:http://localhost:8086}user:${SW_STORAGE_INFLUXDB_USER:root}password:${SW_STORAGE_INFLUXDB_PASSWORD:}database:${SW_STORAGE_INFLUXDB_DATABASE:skywalking}actions:${SW_STORAGE_INFLUXDB_ACTIONS:1000}# the number of actions to collectduration:${SW_STORAGE_INFLUXDB_DURATION:1000}# the time to wait at most (milliseconds)fetchTaskLogMaxSize:${SW_STORAGE_INFLUXDB_FETCH_TASK_LOG_MAX_SIZE:5000}# the max number of fetch task log in a requestAll connection related settings, including URL link, username, and password are found in application.yml. For metadata storage provider settings, refer to the configurations of H2/MySQL above.\nPostgreSQL PostgreSQL jdbc driver uses version 42.3.2. It supports PostgreSQL 8.2 or newer. Activate PostgreSQL as storage, and set storage provider to postgresql.\nstorage:selector:${SW_STORAGE:postgresql}postgresql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:postgresql://localhost:5432/skywalking\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:postgres}dataSource.password:${SW_DATA_SOURCE_PASSWORD:123456}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfArrayColumn:${SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN:20}numOfSearchableValuesPerTag:${SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG:2}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password are found in application.yml. Only part of the settings are listed here. Please follow HikariCP connection pool document for full settings.\nIoTDB IoTDB is a time-series database from Apache, which is one of the storage plugin options.\nIoTDB storage plugin is still in progress. Its efficiency will improve in the future.\nstorage:selector:${SW_STORAGE:iotdb}iotdb:host:${SW_STORAGE_IOTDB_HOST:127.0.0.1}rpcPort:${SW_STORAGE_IOTDB_RPC_PORT:6667}username:${SW_STORAGE_IOTDB_USERNAME:root}password:${SW_STORAGE_IOTDB_PASSWORD:root}storageGroup:${SW_STORAGE_IOTDB_STORAGE_GROUP:root.skywalking}sessionPoolSize:${SW_STORAGE_IOTDB_SESSIONPOOL_SIZE:8}# If it\u0026#39;s zero, the SessionPool size will be 2*CPU_CoresfetchTaskLogMaxSize:${SW_STORAGE_IOTDB_FETCH_TASK_LOG_MAX_SIZE:1000}# the max number of fetch task log in a requestAll connection related settings, including host, rpcPort, username, and password are found in application.yml. Please ensure the IoTDB version \u0026gt;= 0.12.3.\nMore storage extension solutions Follow the Storage extension development guide in the Project Extensions document.\n","title":"Backend storage","url":"/docs/main/v9.0.0/en/setup/backend/backend-storage/"},{"content":"Backend storage The SkyWalking storage is pluggable. We have provided the following storage solutions, which allow you to easily use one of them by specifying it as the selector in application.yml:\nstorage:selector:${SW_STORAGE:elasticsearch}Natively supported storage:\n H2 OpenSearch ElasticSearch 6, 7, 8 MySQL TiDB PostgreSQL BanyanDB  H2 Activate H2 as storage, set storage provider to H2 In-Memory Databases. Default in the distribution package. Please read Database URL Overview in H2 official document. You can set the target to H2 in Embedded, Server and Mixed modes.\nSetting fragment example\nstorage:selector:${SW_STORAGE:h2}h2:driver:org.h2.jdbcx.JdbcDataSourceurl:jdbc:h2:mem:skywalking-oap-dbuser:samaxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:100}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:1}OpenSearch OpenSearch storage shares the same configurations as ElasticSearch. In order to activate OpenSearch as storage, set the storage provider to elasticsearch.\nElasticSearch NOTE: Elastic announced through their blog that Elasticsearch will be moving over to a Server Side Public License (SSPL), which is incompatible with Apache License 2.0. This license change is effective from Elasticsearch version 7.11. So please choose the suitable ElasticSearch version according to your usage.\nSince 8.8.0, SkyWalking rebuilds the ElasticSearch client on top of ElasticSearch REST API and automatically picks up correct request formats according to the server-side version, hence you don\u0026rsquo;t need to download different binaries and don\u0026rsquo;t need to configure different storage selectors for different ElasticSearch server-side versions anymore.\nFor now, SkyWalking supports ElasticSearch 6.x, ElasticSearch 7.x, ElasticSearch 8.x, and OpenSearch 1.x, their configurations are as follows:\nstorage:selector:${SW_STORAGE:elasticsearch}elasticsearch:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}clusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:9200}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;http\u0026#34;}trustStorePath:${SW_STORAGE_ES_SSL_JKS_PATH:\u0026#34;\u0026#34;}trustStorePass:${SW_STORAGE_ES_SSL_JKS_PASS:\u0026#34;\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}password:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}secretsManagementFile:${SW_ES_SECRETS_MANAGEMENT_FILE:\u0026#34;\u0026#34;}# Secrets management file in the properties format includes the username, password, which are managed by 3rd party tool.dayStep:${SW_STORAGE_DAY_STEP:1}# Represent the number of days in the one minute/hour/day index.indexShardsNumber:${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:1}# Shard number of new indexesindexReplicasNumber:${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:1}# Replicas number of new indexes# Super data set has been defined in the codes, such as trace segments.The following 3 config would be improve es performance when storage super size data in es.superDatasetDayStep:${SW_SUPERDATASET_STORAGE_DAY_STEP:-1}# Represent the number of days in the super size dataset record index, the default value is the same as dayStep when the value is less than 0superDatasetIndexShardsFactor:${SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR:5}# This factor provides more shards for the super data set, shards number = indexShardsNumber * superDatasetIndexShardsFactor. Also, this factor effects Zipkin and Jaeger traces.superDatasetIndexReplicasNumber:${SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER:0}# Represent the replicas number in the super size dataset record index, the default value is 0.indexTemplateOrder:${SW_STORAGE_ES_INDEX_TEMPLATE_ORDER:0}# the order of index templatebulkActions:${SW_STORAGE_ES_BULK_ACTIONS:1000}# Execute the async bulk record data every ${SW_STORAGE_ES_BULK_ACTIONS} requestsflushInterval:${SW_STORAGE_ES_FLUSH_INTERVAL:10}# flush the bulk every 10 seconds whatever the number of requestsconcurrentRequests:${SW_STORAGE_ES_CONCURRENT_REQUESTS:2}# the number of concurrent requestsresultWindowMaxSize:${SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE:10000}metadataQueryMaxSize:${SW_STORAGE_ES_QUERY_MAX_SIZE:5000}segmentQueryMaxSize:${SW_STORAGE_ES_QUERY_SEGMENT_SIZE:200}profileTaskQueryMaxSize:${SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE:200}profileDataQueryScrollBatchSize:${SW_STORAGE_ES_QUERY_PROFILE_DATA_SCROLLING_BATCH_SIZE:100}oapAnalyzer:${SW_STORAGE_ES_OAP_ANALYZER:\u0026#34;{\\\u0026#34;analyzer\\\u0026#34;:{\\\u0026#34;oap_analyzer\\\u0026#34;:{\\\u0026#34;type\\\u0026#34;:\\\u0026#34;stop\\\u0026#34;}}}\u0026#34;}# the oap analyzer.oapLogAnalyzer:${SW_STORAGE_ES_OAP_LOG_ANALYZER:\u0026#34;{\\\u0026#34;analyzer\\\u0026#34;:{\\\u0026#34;oap_log_analyzer\\\u0026#34;:{\\\u0026#34;type\\\u0026#34;:\\\u0026#34;standard\\\u0026#34;}}}\u0026#34;}# the oap log analyzer. It could be customized by the ES analyzer configuration to support more language log formats, such as Chinese log, Japanese log and etc.advanced:${SW_STORAGE_ES_ADVANCED:\u0026#34;\u0026#34;}ElasticSearch With Https SSL Encrypting communications. Example:\nstorage:selector:${SW_STORAGE:elasticsearch}elasticsearch:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}# User needs to be set when Http Basic authentication is enabledpassword:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}# Password to be set when Http Basic authentication is enabledclusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:443}trustStorePath:${SW_SW_STORAGE_ES_SSL_JKS_PATH:\u0026#34;../es_keystore.jks\u0026#34;}trustStorePass:${SW_SW_STORAGE_ES_SSL_JKS_PASS:\u0026#34;\u0026#34;}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;https\u0026#34;}... File at trustStorePath is being monitored. Once it is changed, the ElasticSearch client will reconnect. trustStorePass could be changed in the runtime through Secrets Management File Of ElasticSearch Authentication.  Daily Index Step Daily index step(storage/elasticsearch/dayStep, default 1) represents the index creation period. In this period, metrics for several days (dayStep value) are saved.\nIn most cases, users don\u0026rsquo;t need to change the value manually, as SkyWalking is designed to observe large-scale distributed systems. But in some cases, users may want to set a long TTL value, such as more than 60 days. However, their ElasticSearch cluster may not be powerful enough due to low traffic in the production environment. This value could be increased to 5 (or more) if users could ensure a single index could support the metrics and traces for these days (5 in this case).\nFor example, if dayStep == 11,\n Data in [2000-01-01, 2000-01-11] will be merged into the index-20000101. Data in [2000-01-12, 2000-01-22] will be merged into the index-20000112.  storage/elasticsearch/superDatasetDayStep overrides the storage/elasticsearch/dayStep if the value is positive. This would affect the record-related entities, such as trace segments. In some cases, the size of metrics is much smaller than the record (trace). This would improve the shards balance in the ElasticSearch cluster.\nNOTE: TTL deletion would be affected by these steps. You should set an extra dayStep in your TTL. For example, if you want to have TTL == 30 days and dayStep == 10, you are recommended to set TTL = 40.\nSecrets Management File Of ElasticSearch Authentication The value of secretsManagementFile should point to the secrets management file absolute path. The file includes the username, password, and JKS password of the ElasticSearch server in the properties format.\nuser=xxx password=yyy trustStorePass=zzz The major difference between using user, password, trustStorePass configs in the application.yaml file is that the Secrets Management File is being watched by the OAP server. Once it is changed manually or through a 3rd party tool, such as Vault, the storage provider will use the new username, password, and JKS password to establish the connection and close the old one. If the information exists in the file, the user/password will be overridden.\nAdvanced Configurations For Elasticsearch Index You can add advanced configurations in JSON format to set ElasticSearch index settings by following ElasticSearch doc\nFor example, set translog settings:\nstorage:elasticsearch:# ......advanced:${SW_STORAGE_ES_ADVANCED:\u0026#34;{\\\u0026#34;index.translog.durability\\\u0026#34;:\\\u0026#34;request\\\u0026#34;,\\\u0026#34;index.translog.sync_interval\\\u0026#34;:\\\u0026#34;5s\\\u0026#34;}\u0026#34;}Recommended ElasticSearch server-side configurations You could add the following configuration to elasticsearch.yml, and set the value based on your environment.\n# In tracing scenario, consider to set more than this at least.thread_pool.index.queue_size:1000# Only suitable for ElasticSearch 6thread_pool.write.queue_size:1000# Suitable for ElasticSearch 6 and 7# When you face a query error on the traces page, remember to check this.index.max_result_window:1000000We strongly recommend that you read more about these configurations from ElasticSearch\u0026rsquo;s official documentation since they directly impact the performance of ElasticSearch.\nAbout Namespace When a namespace is set, all index names in ElasticSearch will use it as the prefix.\nMySQL Activate MySQL as storage, and set storage provider to mysql.\nNOTE: MySQL driver is NOT allowed in Apache official distribution and source codes. Please download the MySQL driver on your own. Copy the connection driver jar to oap-libs.\nstorage:selector:${SW_STORAGE:mysql}mysql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:3306/swtest?rewriteBatchedStatements=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root@1234}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password, are found in application.yml. Only part of the settings is listed here. See the HikariCP connection pool document for full settings. To understand the function of the parameter rewriteBatchedStatements=true in MySQL, see the MySQL official document.\nTiDB Tested TiDB Server 4.0.8 version, and MySQL Client driver 8.0.13 version is currently available. Activate TiDB as storage, and set storage provider to tidb.\nstorage:selector:${SW_STORAGE:tidb}tidb:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:4000/swtest?rewriteBatchedStatements=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:\u0026#34;\u0026#34;}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}dataSource.useAffectedRows:${SW_DATA_SOURCE_USE_AFFECTED_ROWS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfArrayColumn:${SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN:20}numOfSearchableValuesPerTag:${SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG:2}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password are found in application.yml. For details on settings, refer to the configuration of MySQL above. To understand the function of the parameter rewriteBatchedStatements=true in TiDB, see the document of TiDB best practices.\nPostgreSQL PostgreSQL JDBC driver uses version 42.3.2. It supports PostgreSQL 8.2 or newer. Activate PostgreSQL as storage, and set storage provider to postgresql.\nstorage:selector:${SW_STORAGE:postgresql}postgresql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:postgresql://localhost:5432/skywalking\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:postgres}dataSource.password:${SW_DATA_SOURCE_PASSWORD:123456}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfArrayColumn:${SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN:20}numOfSearchableValuesPerTag:${SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG:2}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password, are found in application.yml. Only part of the settings is listed here. Please follow HikariCP connection pool document for full settings.\nBanyanDB BanyanDB is a dedicated storage implementation developed by the SkyWalking Team and the community. Activate BanyanDB as the storage, and set storage provider to banyandb.\nstorage:banyandb:host:${SW_STORAGE_BANYANDB_HOST:127.0.0.1}port:${SW_STORAGE_BANYANDB_PORT:17912}maxBulkSize:${SW_STORAGE_BANYANDB_MAX_BULK_SIZE:5000}flushInterval:${SW_STORAGE_BANYANDB_FLUSH_INTERVAL:15}metricsShardsNumber:${SW_STORAGE_BANYANDB_METRICS_SHARDS_NUMBER:1}recordShardsNumber:${SW_STORAGE_BANYANDB_RECORD_SHARDS_NUMBER:1}superDatasetShardsFactor:${SW_STORAGE_BANYANDB_SUPERDATASET_SHARDS_FACTOR:2}concurrentWriteThreads:${SW_STORAGE_BANYANDB_CONCURRENT_WRITE_THREADS:15}profileTaskQueryMaxSize:${SW_STORAGE_BANYANDB_PROFILE_TASK_QUERY_MAX_SIZE:200}# the max number of fetch task in a requestFor more details, please refer to the documents of BanyanDB and BanyanDB Java Client subprojects.\nMore storage extension solutions Follow the Storage extension development guide in the Project Extensions document.\n","title":"Backend storage","url":"/docs/main/v9.1.0/en/setup/backend/backend-storage/"},{"content":"Backend storage The SkyWalking storage is pluggable. We have provided the following storage solutions, which allow you to easily use one of them by specifying it as the selector in application.yml:\nstorage:selector:${SW_STORAGE:elasticsearch}Natively supported storage:\n H2 OpenSearch ElasticSearch 6, 7, 8 MySQL TiDB PostgreSQL BanyanDB  H2 Activate H2 as storage, set storage provider to H2 In-Memory Databases. Default in the distribution package. Please read Database URL Overview in H2 official document. You can set the target to H2 in Embedded, Server and Mixed modes.\nSetting fragment example\nstorage:selector:${SW_STORAGE:h2}h2:driver:org.h2.jdbcx.JdbcDataSourceurl:jdbc:h2:mem:skywalking-oap-dbuser:samaxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:100}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:1}OpenSearch OpenSearch is a fork from ElasticSearch 7.11 but licensed in Apache 2.0. OpenSearch storage shares the same configurations as ElasticSearch. In order to activate OpenSearch as storage, set the storage provider to elasticsearch.\nElasticSearch NOTE: Elastic announced through their blog that Elasticsearch will be moving over to a Server Side Public License (SSPL), which is incompatible with Apache License 2.0. This license change is effective from Elasticsearch version 7.11. So please choose the suitable ElasticSearch version according to your usage. If you have concerns about SSPL, choose the versions before 7.11 or switch to OpenSearch.\nSince 9.2.0, SkyWalking provides no-sharding/one-index mode to merge all metrics/meter and records(without super datasets) indices into one physical index template metrics-all and records-all on the default setting. In the current one index mode, users still could choose to adjust ElasticSearch\u0026rsquo;s shard number(SW_STORAGE_ES_INDEX_SHARDS_NUMBER) to scale out. After merge all indices, the following indices are available:\n sw_ui_template sw_metrics-all-${day-format} sw_log-${day-format} sw_segment-${day-format} sw_browser_error_log-${day-format} sw_zipkin_span-${day-format} sw_records-all-${day-format}   Provide system environment variable(SW_STORAGE_ES_LOGIC_SHARDING). Set it to true could shard metrics indices into multi-physical indices as same as the versions(one index template per metric/meter aggregation function) before 9.2.0.\n Since 8.8.0, SkyWalking rebuilds the ElasticSearch client on top of ElasticSearch REST API and automatically picks up correct request formats according to the server-side version, hence you don\u0026rsquo;t need to download different binaries and don\u0026rsquo;t need to configure different storage selectors for different ElasticSearch server-side versions anymore.\nFor now, SkyWalking supports ElasticSearch 6.x, ElasticSearch 7.x, ElasticSearch 8.x, and OpenSearch 1.x, their configurations are as follows:\nstorage:selector:${SW_STORAGE:elasticsearch}elasticsearch:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}clusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:9200}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;http\u0026#34;}trustStorePath:${SW_STORAGE_ES_SSL_JKS_PATH:\u0026#34;\u0026#34;}trustStorePass:${SW_STORAGE_ES_SSL_JKS_PASS:\u0026#34;\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}password:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}secretsManagementFile:${SW_ES_SECRETS_MANAGEMENT_FILE:\u0026#34;\u0026#34;}# Secrets management file in the properties format includes the username, password, which are managed by 3rd party tool.dayStep:${SW_STORAGE_DAY_STEP:1}# Represent the number of days in the one minute/hour/day index.indexShardsNumber:${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:1}# Shard number of new indexesindexReplicasNumber:${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:1}# Replicas number of new indexes# Super data set has been defined in the codes, such as trace segments.The following 3 config would be improve es performance when storage super size data in es.superDatasetDayStep:${SW_SUPERDATASET_STORAGE_DAY_STEP:-1}# Represent the number of days in the super size dataset record index, the default value is the same as dayStep when the value is less than 0superDatasetIndexShardsFactor:${SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR:5}# This factor provides more shards for the super data set, shards number = indexShardsNumber * superDatasetIndexShardsFactor. Also, this factor effects Zipkin and Jaeger traces.superDatasetIndexReplicasNumber:${SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER:0}# Represent the replicas number in the super size dataset record index, the default value is 0.indexTemplateOrder:${SW_STORAGE_ES_INDEX_TEMPLATE_ORDER:0}# the order of index templatebulkActions:${SW_STORAGE_ES_BULK_ACTIONS:1000}# Execute the async bulk record data every ${SW_STORAGE_ES_BULK_ACTIONS} requestsflushInterval:${SW_STORAGE_ES_FLUSH_INTERVAL:10}# flush the bulk every 10 seconds whatever the number of requestsconcurrentRequests:${SW_STORAGE_ES_CONCURRENT_REQUESTS:2}# the number of concurrent requestsresultWindowMaxSize:${SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE:10000}metadataQueryMaxSize:${SW_STORAGE_ES_QUERY_MAX_SIZE:5000}segmentQueryMaxSize:${SW_STORAGE_ES_QUERY_SEGMENT_SIZE:200}profileTaskQueryMaxSize:${SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE:200}profileDataQueryScrollBatchSize:${SW_STORAGE_ES_QUERY_PROFILE_DATA_SCROLLING_BATCH_SIZE:100}oapAnalyzer:${SW_STORAGE_ES_OAP_ANALYZER:\u0026#34;{\\\u0026#34;analyzer\\\u0026#34;:{\\\u0026#34;oap_analyzer\\\u0026#34;:{\\\u0026#34;type\\\u0026#34;:\\\u0026#34;stop\\\u0026#34;}}}\u0026#34;}# the oap analyzer.oapLogAnalyzer:${SW_STORAGE_ES_OAP_LOG_ANALYZER:\u0026#34;{\\\u0026#34;analyzer\\\u0026#34;:{\\\u0026#34;oap_log_analyzer\\\u0026#34;:{\\\u0026#34;type\\\u0026#34;:\\\u0026#34;standard\\\u0026#34;}}}\u0026#34;}# the oap log analyzer. It could be customized by the ES analyzer configuration to support more language log formats, such as Chinese log, Japanese log and etc.advanced:${SW_STORAGE_ES_ADVANCED:\u0026#34;\u0026#34;}logicSharding:${SW_STORAGE_ES_LOGIC_SHARDING:false}ElasticSearch With Https SSL Encrypting communications. Example:\nstorage:selector:${SW_STORAGE:elasticsearch}elasticsearch:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}# User needs to be set when Http Basic authentication is enabledpassword:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}# Password to be set when Http Basic authentication is enabledclusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:443}trustStorePath:${SW_SW_STORAGE_ES_SSL_JKS_PATH:\u0026#34;../es_keystore.jks\u0026#34;}trustStorePass:${SW_SW_STORAGE_ES_SSL_JKS_PASS:\u0026#34;\u0026#34;}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;https\u0026#34;}... File at trustStorePath is being monitored. Once it is changed, the ElasticSearch client will reconnect. trustStorePass could be changed in the runtime through Secrets Management File Of ElasticSearch Authentication.  Daily Index Step Daily index step(storage/elasticsearch/dayStep, default 1) represents the index creation period. In this period, metrics for several days (dayStep value) are saved.\nIn most cases, users don\u0026rsquo;t need to change the value manually, as SkyWalking is designed to observe large-scale distributed systems. But in some cases, users may want to set a long TTL value, such as more than 60 days. However, their ElasticSearch cluster may not be powerful enough due to low traffic in the production environment. This value could be increased to 5 (or more) if users could ensure a single index could support the metrics and traces for these days (5 in this case).\nFor example, if dayStep == 11,\n Data in [2000-01-01, 2000-01-11] will be merged into the index-20000101. Data in [2000-01-12, 2000-01-22] will be merged into the index-20000112.  storage/elasticsearch/superDatasetDayStep overrides the storage/elasticsearch/dayStep if the value is positive. This would affect the record-related entities, such as trace segments. In some cases, the size of metrics is much smaller than the record (trace). This would improve the shards balance in the ElasticSearch cluster.\nNOTE: TTL deletion would be affected by these steps. You should set an extra dayStep in your TTL. For example, if you want to have TTL == 30 days and dayStep == 10, you are recommended to set TTL = 40.\nSecrets Management File Of ElasticSearch Authentication The value of secretsManagementFile should point to the secrets management file absolute path. The file includes the username, password, and JKS password of the ElasticSearch server in the properties format.\nuser=xxx password=yyy trustStorePass=zzz The major difference between using user, password, trustStorePass configs in the application.yaml file is that the Secrets Management File is being watched by the OAP server. Once it is changed manually or through a 3rd party tool, such as Vault, the storage provider will use the new username, password, and JKS password to establish the connection and close the old one. If the information exists in the file, the user/password will be overridden.\nAdvanced Configurations For Elasticsearch Index You can add advanced configurations in JSON format to set ElasticSearch index settings by following ElasticSearch doc\nFor example, set translog settings:\nstorage:elasticsearch:# ......advanced:${SW_STORAGE_ES_ADVANCED:\u0026#34;{\\\u0026#34;index.translog.durability\\\u0026#34;:\\\u0026#34;request\\\u0026#34;,\\\u0026#34;index.translog.sync_interval\\\u0026#34;:\\\u0026#34;5s\\\u0026#34;}\u0026#34;}Recommended ElasticSearch server-side configurations You could add the following configuration to elasticsearch.yml, and set the value based on your environment.\n# In tracing scenario, consider to set more than this at least.thread_pool.index.queue_size:1000# Only suitable for ElasticSearch 6thread_pool.write.queue_size:1000# Suitable for ElasticSearch 6 and 7# When you face a query error on the traces page, remember to check this.index.max_result_window:1000000We strongly recommend that you read more about these configurations from ElasticSearch\u0026rsquo;s official documentation since they directly impact the performance of ElasticSearch.\nAbout Namespace When a namespace is set, all index names in ElasticSearch will use it as the prefix.\nMySQL Activate MySQL as storage, and set storage provider to mysql.\nNOTE: MySQL driver is NOT allowed in Apache official distribution and source codes. Please download the MySQL driver on your own. Copy the connection driver jar to oap-libs.\nstorage:selector:${SW_STORAGE:mysql}mysql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:3306/swtest?rewriteBatchedStatements=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root@1234}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password, are found in application.yml. Only part of the settings is listed here. See the HikariCP connection pool document for full settings. To understand the function of the parameter rewriteBatchedStatements=true in MySQL, see the MySQL official document.\nTiDB Tested TiDB Server 4.0.8 version, and MySQL Client driver 8.0.13 version is currently available. Activate TiDB as storage, and set storage provider to tidb.\nstorage:selector:${SW_STORAGE:tidb}tidb:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:4000/swtest?rewriteBatchedStatements=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:\u0026#34;\u0026#34;}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}dataSource.useAffectedRows:${SW_DATA_SOURCE_USE_AFFECTED_ROWS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfArrayColumn:${SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN:20}numOfSearchableValuesPerTag:${SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG:2}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password are found in application.yml. For details on settings, refer to the configuration of MySQL above. To understand the function of the parameter rewriteBatchedStatements=true in TiDB, see the document of TiDB best practices.\nPostgreSQL PostgreSQL JDBC driver uses version 42.3.2. It supports PostgreSQL 8.2 or newer. Activate PostgreSQL as storage, and set storage provider to postgresql.\nstorage:selector:${SW_STORAGE:postgresql}postgresql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:postgresql://localhost:5432/skywalking\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:postgres}dataSource.password:${SW_DATA_SOURCE_PASSWORD:123456}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfArrayColumn:${SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN:20}numOfSearchableValuesPerTag:${SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG:2}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password, are found in application.yml. Only part of the settings is listed here. Please follow HikariCP connection pool document for full settings.\nBanyanDB BanyanDB is a dedicated storage implementation developed by the SkyWalking Team and the community. Activate BanyanDB as the storage, and set storage provider to banyandb.\nstorage:banyandb:host:${SW_STORAGE_BANYANDB_HOST:127.0.0.1}port:${SW_STORAGE_BANYANDB_PORT:17912}maxBulkSize:${SW_STORAGE_BANYANDB_MAX_BULK_SIZE:5000}flushInterval:${SW_STORAGE_BANYANDB_FLUSH_INTERVAL:15}metricsShardsNumber:${SW_STORAGE_BANYANDB_METRICS_SHARDS_NUMBER:1}recordShardsNumber:${SW_STORAGE_BANYANDB_RECORD_SHARDS_NUMBER:1}superDatasetShardsFactor:${SW_STORAGE_BANYANDB_SUPERDATASET_SHARDS_FACTOR:2}concurrentWriteThreads:${SW_STORAGE_BANYANDB_CONCURRENT_WRITE_THREADS:15}profileTaskQueryMaxSize:${SW_STORAGE_BANYANDB_PROFILE_TASK_QUERY_MAX_SIZE:200}# the max number of fetch task in a requestFor more details, please refer to the documents of BanyanDB and BanyanDB Java Client subprojects.\nMore storage extension solutions Follow the Storage extension development guide in the Project Extensions document.\n","title":"Backend storage","url":"/docs/main/v9.2.0/en/setup/backend/backend-storage/"},{"content":"Backend storage The SkyWalking storage is pluggable. We have provided the following storage solutions, which allow you to easily use one of them by specifying it as the selector in application.yml:\nstorage:selector:${SW_STORAGE:elasticsearch}Natively supported storage:\n H2 OpenSearch ElasticSearch 6, 7, 8 MySQL MySQL-Sharding(Shardingsphere-Proxy 5.1.2) TiDB PostgreSQL BanyanDB  H2 Activate H2 as storage, set storage provider to H2 In-Memory Databases. Default in the distribution package. Please read Database URL Overview in H2 official document. You can set the target to H2 in Embedded, Server and Mixed modes.\nSetting fragment example\nstorage:selector:${SW_STORAGE:h2}h2:driver:org.h2.jdbcx.JdbcDataSourceurl:jdbc:h2:mem:skywalking-oap-dbuser:samaxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:100}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:1}OpenSearch OpenSearch is a fork from ElasticSearch 7.11 but licensed in Apache 2.0. OpenSearch storage shares the same configurations as ElasticSearch. In order to activate OpenSearch as storage, set the storage provider to elasticsearch.\nWe support and tested the following versions of OpenSearch:\n 1.1.0, 1.3.6 2.4.0  ElasticSearch NOTE: Elastic announced through their blog that Elasticsearch will be moving over to a Server Side Public License (SSPL), which is incompatible with Apache License 2.0. This license change is effective from Elasticsearch version 7.11. So please choose the suitable ElasticSearch version according to your usage. If you have concerns about SSPL, choose the versions before 7.11 or switch to OpenSearch.\nBy default, SkyWalking uses following indices for various telemetry data.\n sw_ui_template (UI dashboard settings) sw_metrics-all-${day-format} (All metrics/meters generated through MAL and OAL engines, and metadata of service/instance/endpoint) sw_log-${day-format} (Collected logs, exclude browser logs) sw_segment-${day-format} (Native trace segments) sw_browser_error_log-${day-format} (Collected browser logs) sw_zipkin_span-${day-format} (Zipkin trace spans) sw_records-all-${day-format} (All sampled records, e.g. slow SQLs, agent profiling, and ebpf profiling)  SkyWalking rebuilds the ElasticSearch client on top of ElasticSearch REST API and automatically picks up correct request formats according to the server-side version, hence you don\u0026rsquo;t need to download different binaries and don\u0026rsquo;t need to configure different storage selectors for different ElasticSearch server-side versions anymore.\nFor now, SkyWalking supports ElasticSearch 6.x, ElasticSearch 7.x, ElasticSearch 8.x, and OpenSearch 1.x, their configurations are as follows:\nstorage:selector:${SW_STORAGE:elasticsearch}elasticsearch:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}clusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:9200}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;http\u0026#34;}trustStorePath:${SW_STORAGE_ES_SSL_JKS_PATH:\u0026#34;\u0026#34;}trustStorePass:${SW_STORAGE_ES_SSL_JKS_PASS:\u0026#34;\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}password:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}secretsManagementFile:${SW_ES_SECRETS_MANAGEMENT_FILE:\u0026#34;\u0026#34;}# Secrets management file in the properties format includes the username, password, which are managed by 3rd party tool.dayStep:${SW_STORAGE_DAY_STEP:1}# Represent the number of days in the one minute/hour/day index.indexShardsNumber:${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:1}# Shard number of new indexesindexReplicasNumber:${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:1}# Replicas number of new indexes# Specify the settings for each index individually.# If configured, this setting has the highest priority and overrides the generic settings.specificIndexSettings:${SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS:\u0026#34;\u0026#34;}# Super data set has been defined in the codes, such as trace segments.The following 3 config would be improve es performance when storage super size data in es.superDatasetDayStep:${SW_STORAGE_ES_SUPER_DATASET_DAY_STEP:-1}# Represent the number of days in the super size dataset record index, the default value is the same as dayStep when the value is less than 0superDatasetIndexShardsFactor:${SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR:5}# This factor provides more shards for the super data set, shards number = indexShardsNumber * superDatasetIndexShardsFactor. Also, this factor effects Zipkin traces.superDatasetIndexReplicasNumber:${SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER:0}# Represent the replicas number in the super size dataset record index, the default value is 0.indexTemplateOrder:${SW_STORAGE_ES_INDEX_TEMPLATE_ORDER:0}# the order of index templatebulkActions:${SW_STORAGE_ES_BULK_ACTIONS:1000}# Execute the async bulk record data every ${SW_STORAGE_ES_BULK_ACTIONS} requestsflushInterval:${SW_STORAGE_ES_FLUSH_INTERVAL:10}# flush the bulk every 10 seconds whatever the number of requestsconcurrentRequests:${SW_STORAGE_ES_CONCURRENT_REQUESTS:2}# the number of concurrent requestsresultWindowMaxSize:${SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE:10000}metadataQueryMaxSize:${SW_STORAGE_ES_QUERY_MAX_SIZE:5000}segmentQueryMaxSize:${SW_STORAGE_ES_QUERY_SEGMENT_SIZE:200}profileTaskQueryMaxSize:${SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE:200}profileDataQueryScrollBatchSize:${SW_STORAGE_ES_QUERY_PROFILE_DATA_SCROLLING_BATCH_SIZE:100}oapAnalyzer:${SW_STORAGE_ES_OAP_ANALYZER:\u0026#34;{\\\u0026#34;analyzer\\\u0026#34;:{\\\u0026#34;oap_analyzer\\\u0026#34;:{\\\u0026#34;type\\\u0026#34;:\\\u0026#34;stop\\\u0026#34;}}}\u0026#34;}# the oap analyzer.oapLogAnalyzer:${SW_STORAGE_ES_OAP_LOG_ANALYZER:\u0026#34;{\\\u0026#34;analyzer\\\u0026#34;:{\\\u0026#34;oap_log_analyzer\\\u0026#34;:{\\\u0026#34;type\\\u0026#34;:\\\u0026#34;standard\\\u0026#34;}}}\u0026#34;}# the oap log analyzer. It could be customized by the ES analyzer configuration to support more language log formats, such as Chinese log, Japanese log and etc.advanced:${SW_STORAGE_ES_ADVANCED:\u0026#34;\u0026#34;}# Set it to `true` could shard metrics indices into multi-physical indices# as same as the versions(one index template per metric/meter aggregation function) before 9.2.0.logicSharding:${SW_STORAGE_ES_LOGIC_SHARDING:false}ElasticSearch With Https SSL Encrypting communications. Example:\nstorage:selector:${SW_STORAGE:elasticsearch}elasticsearch:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}# User needs to be set when Http Basic authentication is enabledpassword:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}# Password to be set when Http Basic authentication is enabledclusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:443}trustStorePath:${SW_SW_STORAGE_ES_SSL_JKS_PATH:\u0026#34;../es_keystore.jks\u0026#34;}trustStorePass:${SW_SW_STORAGE_ES_SSL_JKS_PASS:\u0026#34;\u0026#34;}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;https\u0026#34;}... File at trustStorePath is being monitored. Once it is changed, the ElasticSearch client will reconnect. trustStorePass could be changed in the runtime through Secrets Management File Of ElasticSearch Authentication.  Daily Index Step Daily index step(storage/elasticsearch/dayStep, default 1) represents the index creation period. In this period, metrics for several days (dayStep value) are saved.\nIn most cases, users don\u0026rsquo;t need to change the value manually, as SkyWalking is designed to observe large-scale distributed systems. But in some cases, users may want to set a long TTL value, such as more than 60 days. However, their ElasticSearch cluster may not be powerful enough due to low traffic in the production environment. This value could be increased to 5 (or more) if users could ensure a single index could support the metrics and traces for these days (5 in this case).\nFor example, if dayStep == 11,\n Data in [2000-01-01, 2000-01-11] will be merged into the index-20000101. Data in [2000-01-12, 2000-01-22] will be merged into the index-20000112.  storage/elasticsearch/superDatasetDayStep overrides the storage/elasticsearch/dayStep if the value is positive. This would affect the record-related entities, such as trace segments. In some cases, the size of metrics is much smaller than the record (trace). This would improve the shards balance in the ElasticSearch cluster.\nNOTE: TTL deletion would be affected by these steps. You should set an extra dayStep in your TTL. For example, if you want to have TTL == 30 days and dayStep == 10, you are recommended to set TTL = 40.\nSecrets Management File Of ElasticSearch Authentication The value of secretsManagementFile should point to the secrets management file absolute path. The file includes the username, password, and JKS password of the ElasticSearch server in the properties format.\nuser=xxx password=yyy trustStorePass=zzz The major difference between using user, password, trustStorePass configs in the application.yaml file is that the Secrets Management File is being watched by the OAP server. Once it is changed manually or through a 3rd party tool, such as Vault, the storage provider will use the new username, password, and JKS password to establish the connection and close the old one. If the information exists in the file, the user/password will be overridden.\nIndex Settings The following settings control the number of shards and replicas for new and existing index templates. The update only got applied after OAP reboots.\nstorage:elasticsearch:# ......indexShardsNumber:${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:1}indexReplicasNumber:${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:1}specificIndexSettings:${SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS:\u0026#34;\u0026#34;}superDatasetIndexShardsFactor:${SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR:5}superDatasetIndexReplicasNumber:${SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER:0}The following table shows the relationship between those config items and Elasticsearch index number_of_shards/number_of_replicas. And also you can specify the settings for each index individually.\n   index number_of_shards number_of_replicas     sw_ui_template indexShardsNumber indexReplicasNumber   sw_metrics-all-${day-format} indexShardsNumber indexReplicasNumber   sw_log-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_segment-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_browser_error_log-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_zipkin_span-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_records-all-${day-format} indexShardsNumber indexReplicasNumber    Advanced Configurations For Elasticsearch Index You can add advanced configurations in JSON format to set ElasticSearch index settings by following ElasticSearch doc\nFor example, set translog settings:\nstorage:elasticsearch:# ......advanced:${SW_STORAGE_ES_ADVANCED:\u0026#34;{\\\u0026#34;index.translog.durability\\\u0026#34;:\\\u0026#34;request\\\u0026#34;,\\\u0026#34;index.translog.sync_interval\\\u0026#34;:\\\u0026#34;5s\\\u0026#34;}\u0026#34;}Specify Settings For Each Elasticsearch Index Individually You can specify the settings for one or more indexes individually by using SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS.\nNOTE: Supported settings:\n number_of_shards number_of_replicas  NOTE: These settings have the highest priority and will override the existing generic settings mentioned in index settings doc.\nThe settings are in JSON format. The index name here is logic entity name, which should exclude the ${SW_NAMESPACE} which is sw by default, e.g.\n{ \u0026#34;metrics-all\u0026#34;:{ \u0026#34;number_of_shards\u0026#34;:\u0026#34;3\u0026#34;, \u0026#34;number_of_replicas\u0026#34;:\u0026#34;2\u0026#34; }, \u0026#34;segment\u0026#34;:{ \u0026#34;number_of_shards\u0026#34;:\u0026#34;6\u0026#34;, \u0026#34;number_of_replicas\u0026#34;:\u0026#34;1\u0026#34; } } This configuration in the YAML file is like this,\nstorage:elasticsearch:# ......specificIndexSettings:${SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS:\u0026#34;{\\\u0026#34;metrics-all\\\u0026#34;:{\\\u0026#34;number_of_shards\\\u0026#34;:\\\u0026#34;3\\\u0026#34;,\\\u0026#34;number_of_replicas\\\u0026#34;:\\\u0026#34;2\\\u0026#34;},\\\u0026#34;segment\\\u0026#34;:{\\\u0026#34;number_of_shards\\\u0026#34;:\\\u0026#34;6\\\u0026#34;,\\\u0026#34;number_of_replicas\\\u0026#34;:\\\u0026#34;1\\\u0026#34;}}\u0026#34;}Recommended ElasticSearch server-side configurations You could add the following configuration to elasticsearch.yml, and set the value based on your environment.\n# In tracing scenario, consider to set more than this at least.thread_pool.index.queue_size:1000# Only suitable for ElasticSearch 6thread_pool.write.queue_size:1000# Suitable for ElasticSearch 6 and 7# When you face a query error on the traces page, remember to check this.index.max_result_window:1000000We strongly recommend that you read more about these configurations from ElasticSearch\u0026rsquo;s official documentation since they directly impact the performance of ElasticSearch.\nAbout Namespace When a namespace is set, all index names in ElasticSearch will use it as the prefix.\nMySQL Activate MySQL as storage, and set storage provider to mysql.\nNOTE: MySQL driver is NOT allowed in Apache official distribution and source codes. Please download the MySQL driver on your own. Copy the connection driver jar to oap-libs.\nstorage:selector:${SW_STORAGE:mysql}mysql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:3306/swtest?rewriteBatchedStatements=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root@1234}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password, are found in application.yml. Only part of the settings is listed here. See the HikariCP connection pool document for full settings. To understand the function of the parameter rewriteBatchedStatements=true in MySQL, see the MySQL official document.\nMySQL-Sharding MySQL-Sharding plugin provides the MySQL database sharding and table sharding, this feature leverage Shardingsphere-Proxy to manage the JDBC between OAP and multi-database instances, and according to the sharding rules do routing to the database and table sharding.\nTested Shardingsphere-Proxy 5.1.2 version, and MySQL Client driver 8.0.13 version is currently available. Activate MySQL and Shardingsphere-Proxy as storage, and set storage provider to mysql-sharding.\nNOTE: MySQL driver is NOT allowed in Apache official distribution and source codes. Please download the MySQL driver on your own. Copy the connection driver jar to oap-libs.\nstorage:selector:${SW_STORAGE:mysql-sharding}mysql-sharding:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:13307/swtest?rewriteBatchedStatements=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}# The dataSources are configured in ShardingSphere-Proxy config-sharding.yaml# The dataSource name should include the prefix \u0026#34;ds_\u0026#34; and separated by \u0026#34;,\u0026#34;dataSources:${SW_JDBC_SHARDING_DATA_SOURCES:ds_0,ds_1}TiDB Tested TiDB Server 4.0.8 version, and MySQL Client driver 8.0.13 version is currently available. Activate TiDB as storage, and set storage provider to tidb.\nstorage:selector:${SW_STORAGE:tidb}tidb:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:4000/swtest?rewriteBatchedStatements=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:\u0026#34;\u0026#34;}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}dataSource.useAffectedRows:${SW_DATA_SOURCE_USE_AFFECTED_ROWS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfArrayColumn:${SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN:20}numOfSearchableValuesPerTag:${SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG:2}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password are found in application.yml. For details on settings, refer to the configuration of MySQL above. To understand the function of the parameter rewriteBatchedStatements=true in TiDB, see the document of TiDB best practices.\nPostgreSQL PostgreSQL JDBC driver uses version 42.3.2. It supports PostgreSQL 8.2 or newer. Activate PostgreSQL as storage, and set storage provider to postgresql.\nstorage:selector:${SW_STORAGE:postgresql}postgresql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:postgresql://localhost:5432/skywalking\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:postgres}dataSource.password:${SW_DATA_SOURCE_PASSWORD:123456}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfArrayColumn:${SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN:20}numOfSearchableValuesPerTag:${SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG:2}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password, are found in application.yml. Only part of the settings is listed here. Please follow HikariCP connection pool document for full settings.\nBanyanDB BanyanDB is a dedicated storage implementation developed by the SkyWalking Team and the community. Activate BanyanDB as the storage, and set storage provider to banyandb.\nstorage:banyandb:host:${SW_STORAGE_BANYANDB_HOST:127.0.0.1}port:${SW_STORAGE_BANYANDB_PORT:17912}maxBulkSize:${SW_STORAGE_BANYANDB_MAX_BULK_SIZE:5000}flushInterval:${SW_STORAGE_BANYANDB_FLUSH_INTERVAL:15}metricsShardsNumber:${SW_STORAGE_BANYANDB_METRICS_SHARDS_NUMBER:1}recordShardsNumber:${SW_STORAGE_BANYANDB_RECORD_SHARDS_NUMBER:1}superDatasetShardsFactor:${SW_STORAGE_BANYANDB_SUPERDATASET_SHARDS_FACTOR:2}concurrentWriteThreads:${SW_STORAGE_BANYANDB_CONCURRENT_WRITE_THREADS:15}profileTaskQueryMaxSize:${SW_STORAGE_BANYANDB_PROFILE_TASK_QUERY_MAX_SIZE:200}# the max number of fetch task in a requeststreamBlockInterval:${SW_STORAGE_BANYANDB_STREAM_BLOCK_INTERVAL:4}# Unit is hourstreamSegmentInterval:${SW_STORAGE_BANYANDB_STREAM_SEGMENT_INTERVAL:24}# Unit is hourmeasureBlockInterval:${SW_STORAGE_BANYANDB_MEASURE_BLOCK_INTERVAL:4}# Unit is hourmeasureSegmentInterval:${SW_STORAGE_BANYANDB_MEASURE_SEGMENT_INTERVAL:24}# Unit is hourFor more details, please refer to the documents of BanyanDB and BanyanDB Java Client subprojects.\nMore storage extension solutions Follow the Storage extension development guide in the Project Extensions document.\n","title":"Backend storage","url":"/docs/main/v9.3.0/en/setup/backend/backend-storage/"},{"content":"Backend storage The SkyWalking storage is pluggable. We have provided the following storage solutions, which allow you to easily use one of them by specifying it as the selector in application.yml:\nstorage:selector:${SW_STORAGE:elasticsearch}Natively supported storage:\n H2 OpenSearch ElasticSearch 6, 7, 8 MySQL MySQL-Sharding(Shardingsphere-Proxy 5.3.1) TiDB PostgreSQL BanyanDB  H2 Activate H2 as storage, set storage provider to H2 In-Memory Databases. Default in the distribution package. Please read Database URL Overview in H2 official document. You can set the target to H2 in Embedded, Server and Mixed modes.\nSetting fragment example\nstorage:selector:${SW_STORAGE:h2}h2:driver:org.h2.jdbcx.JdbcDataSourceurl:jdbc:h2:mem:skywalking-oap-dbuser:samaxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:100}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:1}OpenSearch OpenSearch is a fork from ElasticSearch 7.11 but licensed in Apache 2.0. OpenSearch storage shares the same configurations as ElasticSearch. In order to activate OpenSearch as storage, set the storage provider to elasticsearch.\nWe support and tested the following versions of OpenSearch:\n 1.1.0, 1.3.6 2.4.0  ElasticSearch NOTE: Elastic announced through their blog that Elasticsearch will be moving over to a Server Side Public License (SSPL), which is incompatible with Apache License 2.0. This license change is effective from Elasticsearch version 7.11. So please choose the suitable ElasticSearch version according to your usage. If you have concerns about SSPL, choose the versions before 7.11 or switch to OpenSearch.\nBy default, SkyWalking uses following indices for various telemetry data.\n sw_ui_template (UI dashboard settings) sw_metrics-all-${day-format} (All metrics/meters generated through MAL and OAL engines, and metadata of service/instance/endpoint) sw_log-${day-format} (Collected logs, exclude browser logs) sw_segment-${day-format} (Native trace segments) sw_browser_error_log-${day-format} (Collected browser logs) sw_zipkin_span-${day-format} (Zipkin trace spans) sw_records-all-${day-format} (All sampled records, e.g. slow SQLs, agent profiling, and ebpf profiling)  SkyWalking rebuilds the ElasticSearch client on top of ElasticSearch REST API and automatically picks up correct request formats according to the server-side version, hence you don\u0026rsquo;t need to download different binaries and don\u0026rsquo;t need to configure different storage selectors for different ElasticSearch server-side versions anymore.\nFor now, SkyWalking supports ElasticSearch 6.x, ElasticSearch 7.x, ElasticSearch 8.x, and OpenSearch 1.x, their configurations are as follows:\nstorage:selector:${SW_STORAGE:elasticsearch}elasticsearch:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}clusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:9200}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;http\u0026#34;}trustStorePath:${SW_STORAGE_ES_SSL_JKS_PATH:\u0026#34;\u0026#34;}trustStorePass:${SW_STORAGE_ES_SSL_JKS_PASS:\u0026#34;\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}password:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}secretsManagementFile:${SW_ES_SECRETS_MANAGEMENT_FILE:\u0026#34;\u0026#34;}# Secrets management file in the properties format includes the username, password, which are managed by 3rd party tool.dayStep:${SW_STORAGE_DAY_STEP:1}# Represent the number of days in the one minute/hour/day index.indexShardsNumber:${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:1}# Shard number of new indexesindexReplicasNumber:${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:1}# Replicas number of new indexes# Specify the settings for each index individually.# If configured, this setting has the highest priority and overrides the generic settings.specificIndexSettings:${SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS:\u0026#34;\u0026#34;}# Super data set has been defined in the codes, such as trace segments.The following 3 config would be improve es performance when storage super size data in es.superDatasetDayStep:${SW_STORAGE_ES_SUPER_DATASET_DAY_STEP:-1}# Represent the number of days in the super size dataset record index, the default value is the same as dayStep when the value is less than 0superDatasetIndexShardsFactor:${SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR:5}# This factor provides more shards for the super data set, shards number = indexShardsNumber * superDatasetIndexShardsFactor. Also, this factor effects Zipkin traces.superDatasetIndexReplicasNumber:${SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER:0}# Represent the replicas number in the super size dataset record index, the default value is 0.indexTemplateOrder:${SW_STORAGE_ES_INDEX_TEMPLATE_ORDER:0}# the order of index templatebulkActions:${SW_STORAGE_ES_BULK_ACTIONS:1000}# Execute the async bulk record data every ${SW_STORAGE_ES_BULK_ACTIONS} requestsflushInterval:${SW_STORAGE_ES_FLUSH_INTERVAL:10}# flush the bulk every 10 seconds whatever the number of requestsconcurrentRequests:${SW_STORAGE_ES_CONCURRENT_REQUESTS:2}# the number of concurrent requestsresultWindowMaxSize:${SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE:10000}metadataQueryMaxSize:${SW_STORAGE_ES_QUERY_MAX_SIZE:5000}segmentQueryMaxSize:${SW_STORAGE_ES_QUERY_SEGMENT_SIZE:200}profileTaskQueryMaxSize:${SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE:200}profileDataQueryScrollBatchSize:${SW_STORAGE_ES_QUERY_PROFILE_DATA_SCROLLING_BATCH_SIZE:100}oapAnalyzer:${SW_STORAGE_ES_OAP_ANALYZER:\u0026#34;{\\\u0026#34;analyzer\\\u0026#34;:{\\\u0026#34;oap_analyzer\\\u0026#34;:{\\\u0026#34;type\\\u0026#34;:\\\u0026#34;stop\\\u0026#34;}}}\u0026#34;}# the oap analyzer.oapLogAnalyzer:${SW_STORAGE_ES_OAP_LOG_ANALYZER:\u0026#34;{\\\u0026#34;analyzer\\\u0026#34;:{\\\u0026#34;oap_log_analyzer\\\u0026#34;:{\\\u0026#34;type\\\u0026#34;:\\\u0026#34;standard\\\u0026#34;}}}\u0026#34;}# the oap log analyzer. It could be customized by the ES analyzer configuration to support more language log formats, such as Chinese log, Japanese log and etc.advanced:${SW_STORAGE_ES_ADVANCED:\u0026#34;\u0026#34;}# Set it to `true` could shard metrics indices into multi-physical indices# as same as the versions(one index template per metric/meter aggregation function) before 9.2.0.logicSharding:${SW_STORAGE_ES_LOGIC_SHARDING:false}# Custom routing can reduce the impact of searches. Instead of having to fan out a search request to all the shards in an index, the request can be sent to just the shard that matches the specific routing value (or values).enableCustomRouting:${SW_STORAGE_ES_ENABLE_CUSTOM_ROUTING:false}ElasticSearch With Https SSL Encrypting communications. Example:\nstorage:selector:${SW_STORAGE:elasticsearch}elasticsearch:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}# User needs to be set when Http Basic authentication is enabledpassword:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}# Password to be set when Http Basic authentication is enabledclusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:443}trustStorePath:${SW_SW_STORAGE_ES_SSL_JKS_PATH:\u0026#34;../es_keystore.jks\u0026#34;}trustStorePass:${SW_SW_STORAGE_ES_SSL_JKS_PASS:\u0026#34;\u0026#34;}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;https\u0026#34;}... File at trustStorePath is being monitored. Once it is changed, the ElasticSearch client will reconnect. trustStorePass could be changed in the runtime through Secrets Management File Of ElasticSearch Authentication.  Daily Index Step Daily index step(storage/elasticsearch/dayStep, default 1) represents the index creation period. In this period, metrics for several days (dayStep value) are saved.\nIn most cases, users don\u0026rsquo;t need to change the value manually, as SkyWalking is designed to observe large-scale distributed systems. But in some cases, users may want to set a long TTL value, such as more than 60 days. However, their ElasticSearch cluster may not be powerful enough due to low traffic in the production environment. This value could be increased to 5 (or more) if users could ensure a single index could support the metrics and traces for these days (5 in this case).\nFor example, if dayStep == 11,\n Data in [2000-01-01, 2000-01-11] will be merged into the index-20000101. Data in [2000-01-12, 2000-01-22] will be merged into the index-20000112.  storage/elasticsearch/superDatasetDayStep overrides the storage/elasticsearch/dayStep if the value is positive. This would affect the record-related entities, such as trace segments. In some cases, the size of metrics is much smaller than the record (trace). This would improve the shards balance in the ElasticSearch cluster.\nNOTE: TTL deletion would be affected by these steps. You should set an extra dayStep in your TTL. For example, if you want to have TTL == 30 days and dayStep == 10, you are recommended to set TTL = 40.\nSecrets Management File Of ElasticSearch Authentication The value of secretsManagementFile should point to the secrets management file absolute path. The file includes the username, password, and JKS password of the ElasticSearch server in the properties format.\nuser=xxx password=yyy trustStorePass=zzz The major difference between using user, password, trustStorePass configs in the application.yaml file is that the Secrets Management File is being watched by the OAP server. Once it is changed manually or through a 3rd party tool, such as Vault, the storage provider will use the new username, password, and JKS password to establish the connection and close the old one. If the information exists in the file, the user/password will be overridden.\nIndex Settings The following settings control the number of shards and replicas for new and existing index templates. The update only got applied after OAP reboots.\nstorage:elasticsearch:# ......indexShardsNumber:${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:1}indexReplicasNumber:${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:1}specificIndexSettings:${SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS:\u0026#34;\u0026#34;}superDatasetIndexShardsFactor:${SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR:5}superDatasetIndexReplicasNumber:${SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER:0}The following table shows the relationship between those config items and Elasticsearch index number_of_shards/number_of_replicas. And also you can specify the settings for each index individually.\n   index number_of_shards number_of_replicas     sw_ui_template indexShardsNumber indexReplicasNumber   sw_metrics-all-${day-format} indexShardsNumber indexReplicasNumber   sw_log-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_segment-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_browser_error_log-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_zipkin_span-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_records-all-${day-format} indexShardsNumber indexReplicasNumber    Advanced Configurations For Elasticsearch Index You can add advanced configurations in JSON format to set ElasticSearch index settings by following ElasticSearch doc\nFor example, set translog settings:\nstorage:elasticsearch:# ......advanced:${SW_STORAGE_ES_ADVANCED:\u0026#34;{\\\u0026#34;index.translog.durability\\\u0026#34;:\\\u0026#34;request\\\u0026#34;,\\\u0026#34;index.translog.sync_interval\\\u0026#34;:\\\u0026#34;5s\\\u0026#34;}\u0026#34;}Specify Settings For Each Elasticsearch Index Individually You can specify the settings for one or more indexes individually by using SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS.\nNOTE: Supported settings:\n number_of_shards number_of_replicas  NOTE: These settings have the highest priority and will override the existing generic settings mentioned in index settings doc.\nThe settings are in JSON format. The index name here is logic entity name, which should exclude the ${SW_NAMESPACE} which is sw by default, e.g.\n{ \u0026#34;metrics-all\u0026#34;:{ \u0026#34;number_of_shards\u0026#34;:\u0026#34;3\u0026#34;, \u0026#34;number_of_replicas\u0026#34;:\u0026#34;2\u0026#34; }, \u0026#34;segment\u0026#34;:{ \u0026#34;number_of_shards\u0026#34;:\u0026#34;6\u0026#34;, \u0026#34;number_of_replicas\u0026#34;:\u0026#34;1\u0026#34; } } This configuration in the YAML file is like this,\nstorage:elasticsearch:# ......specificIndexSettings:${SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS:\u0026#34;{\\\u0026#34;metrics-all\\\u0026#34;:{\\\u0026#34;number_of_shards\\\u0026#34;:\\\u0026#34;3\\\u0026#34;,\\\u0026#34;number_of_replicas\\\u0026#34;:\\\u0026#34;2\\\u0026#34;},\\\u0026#34;segment\\\u0026#34;:{\\\u0026#34;number_of_shards\\\u0026#34;:\\\u0026#34;6\\\u0026#34;,\\\u0026#34;number_of_replicas\\\u0026#34;:\\\u0026#34;1\\\u0026#34;}}\u0026#34;}Recommended ElasticSearch server-side configurations You could add the following configuration to elasticsearch.yml, and set the value based on your environment.\n# In tracing scenario, consider to set more than this at least.thread_pool.index.queue_size:1000# Only suitable for ElasticSearch 6thread_pool.write.queue_size:1000# Suitable for ElasticSearch 6 and 7# When you face a query error on the traces page, remember to check this.index.max_result_window:1000000We strongly recommend that you read more about these configurations from ElasticSearch\u0026rsquo;s official documentation since they directly impact the performance of ElasticSearch.\nAbout Namespace When a namespace is set, all index names in ElasticSearch will use it as the prefix.\nMySQL Activate MySQL as storage, and set storage provider to mysql.\nNOTE: MySQL driver is NOT allowed in Apache official distribution and source codes. Please download the MySQL driver on your own. Copy the connection driver jar to oap-libs.\nstorage:selector:${SW_STORAGE:mysql}mysql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:3306/swtest?rewriteBatchedStatements=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root@1234}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password, are found in application.yml. Only part of the settings is listed here. See the HikariCP connection pool document for full settings. To understand the function of the parameter rewriteBatchedStatements=true in MySQL, see the MySQL official document.\nMySQL-Sharding MySQL-Sharding plugin provides the MySQL database sharding and table sharding, this feature leverage Shardingsphere-Proxy to manage the JDBC between OAP and multi-database instances, and according to the sharding rules do routing to the database and table sharding.\nTested Shardingsphere-Proxy 5.3.1 version, and MySQL Client driver 8.0.13 version is currently available. Activate MySQL and Shardingsphere-Proxy as storage, and set storage provider to mysql-sharding.\nNOTE: MySQL driver is NOT allowed in Apache official distribution and source codes. Please download the MySQL driver on your own. Copy the connection driver jar to oap-libs.\nstorage:selector:${SW_STORAGE:mysql-sharding}mysql-sharding:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:13307/swtest?rewriteBatchedStatements=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}# The dataSources are configured in ShardingSphere-Proxy config-sharding.yaml# The dataSource name should include the prefix \u0026#34;ds_\u0026#34; and separated by \u0026#34;,\u0026#34;dataSources:${SW_JDBC_SHARDING_DATA_SOURCES:ds_0,ds_1}TiDB Tested TiDB Server 4.0.8 version, and MySQL Client driver 8.0.13 version is currently available. Activate TiDB as storage, and set storage provider to tidb.\nstorage:selector:${SW_STORAGE:tidb}tidb:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:4000/swtest?rewriteBatchedStatements=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:\u0026#34;\u0026#34;}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}dataSource.useAffectedRows:${SW_DATA_SOURCE_USE_AFFECTED_ROWS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfArrayColumn:${SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN:20}numOfSearchableValuesPerTag:${SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG:2}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password are found in application.yml. For details on settings, refer to the configuration of MySQL above. To understand the function of the parameter rewriteBatchedStatements=true in TiDB, see the document of TiDB best practices.\nPostgreSQL PostgreSQL JDBC driver uses version 42.3.2. It supports PostgreSQL 8.2 or newer. Activate PostgreSQL as storage, and set storage provider to postgresql.\nstorage:selector:${SW_STORAGE:postgresql}postgresql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:postgresql://localhost:5432/skywalking\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:postgres}dataSource.password:${SW_DATA_SOURCE_PASSWORD:123456}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfArrayColumn:${SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN:20}numOfSearchableValuesPerTag:${SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG:2}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password, are found in application.yml. Only part of the settings is listed here. Please follow HikariCP connection pool document for full settings.\nBanyanDB BanyanDB is a dedicated storage implementation developed by the SkyWalking Team and the community. Activate BanyanDB as the storage, and set storage provider to banyandb.\nstorage:banyandb:host:${SW_STORAGE_BANYANDB_HOST:127.0.0.1}port:${SW_STORAGE_BANYANDB_PORT:17912}maxBulkSize:${SW_STORAGE_BANYANDB_MAX_BULK_SIZE:5000}flushInterval:${SW_STORAGE_BANYANDB_FLUSH_INTERVAL:15}metricsShardsNumber:${SW_STORAGE_BANYANDB_METRICS_SHARDS_NUMBER:1}recordShardsNumber:${SW_STORAGE_BANYANDB_RECORD_SHARDS_NUMBER:1}superDatasetShardsFactor:${SW_STORAGE_BANYANDB_SUPERDATASET_SHARDS_FACTOR:2}concurrentWriteThreads:${SW_STORAGE_BANYANDB_CONCURRENT_WRITE_THREADS:15}profileTaskQueryMaxSize:${SW_STORAGE_BANYANDB_PROFILE_TASK_QUERY_MAX_SIZE:200}# the max number of fetch task in a requeststreamBlockInterval:${SW_STORAGE_BANYANDB_STREAM_BLOCK_INTERVAL:4}# Unit is hourstreamSegmentInterval:${SW_STORAGE_BANYANDB_STREAM_SEGMENT_INTERVAL:24}# Unit is hourmeasureBlockInterval:${SW_STORAGE_BANYANDB_MEASURE_BLOCK_INTERVAL:4}# Unit is hourmeasureSegmentInterval:${SW_STORAGE_BANYANDB_MEASURE_SEGMENT_INTERVAL:24}# Unit is hourFor more details, please refer to the documents of BanyanDB and BanyanDB Java Client subprojects.\nMore storage extension solutions Follow the Storage extension development guide in the Project Extensions document.\n","title":"Backend storage","url":"/docs/main/v9.4.0/en/setup/backend/backend-storage/"},{"content":"Backend storage The SkyWalking storage is pluggable. We have provided the following storage solutions, which allow you to easily use one of them by specifying it as the selector in application.yml:\nstorage:selector:${SW_STORAGE:elasticsearch}Natively supported storage:\n H2 OpenSearch ElasticSearch 6, 7, 8 MySQL PostgreSQL BanyanDB  H2 Activate H2 as storage, set storage provider to H2 In-Memory Databases. Default in the distribution package. Please read Database URL Overview in H2 official document. You can set the target to H2 in Embedded, Server and Mixed modes.\nSetting fragment example\nstorage:selector:${SW_STORAGE:h2}h2:driver:org.h2.jdbcx.JdbcDataSourceurl:jdbc:h2:mem:skywalking-oap-dbuser:samaxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:100}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:1}OpenSearch OpenSearch is a fork from ElasticSearch 7.11 but licensed in Apache 2.0. OpenSearch storage shares the same configurations as ElasticSearch. In order to activate OpenSearch as storage, set the storage provider to elasticsearch.\nWe support and tested the following versions of OpenSearch:\n 1.1.0, 1.3.6 2.4.0  ElasticSearch NOTE: Elastic announced through their blog that Elasticsearch will be moving over to a Server Side Public License (SSPL), which is incompatible with Apache License 2.0. This license change is effective from Elasticsearch version 7.11. So please choose the suitable ElasticSearch version according to your usage. If you have concerns about SSPL, choose the versions before 7.11 or switch to OpenSearch.\nBy default, SkyWalking uses following indices for various telemetry data.\n sw_ui_template (UI dashboard settings) sw_metrics-all-${day-format} (All metrics/meters generated through MAL and OAL engines, and metadata of service/instance/endpoint) sw_log-${day-format} (Collected logs, exclude browser logs) sw_segment-${day-format} (Native trace segments) sw_browser_error_log-${day-format} (Collected browser logs) sw_zipkin_span-${day-format} (Zipkin trace spans) sw_records-all-${day-format} (All sampled records, e.g. slow SQLs, agent profiling, and ebpf profiling)  SkyWalking rebuilds the ElasticSearch client on top of ElasticSearch REST API and automatically picks up correct request formats according to the server-side version, hence you don\u0026rsquo;t need to download different binaries and don\u0026rsquo;t need to configure different storage selectors for different ElasticSearch server-side versions anymore.\nFor now, SkyWalking supports ElasticSearch 6.x, ElasticSearch 7.x, ElasticSearch 8.x, and OpenSearch 1.x, their configurations are as follows:\nstorage:selector:${SW_STORAGE:elasticsearch}elasticsearch:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}clusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:9200}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;http\u0026#34;}trustStorePath:${SW_STORAGE_ES_SSL_JKS_PATH:\u0026#34;\u0026#34;}trustStorePass:${SW_STORAGE_ES_SSL_JKS_PASS:\u0026#34;\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}password:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}secretsManagementFile:${SW_ES_SECRETS_MANAGEMENT_FILE:\u0026#34;\u0026#34;}# Secrets management file in the properties format includes the username, password, which are managed by 3rd party tool.dayStep:${SW_STORAGE_DAY_STEP:1}# Represent the number of days in the one minute/hour/day index.indexShardsNumber:${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:1}# Shard number of new indexesindexReplicasNumber:${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:1}# Replicas number of new indexes# Specify the settings for each index individually.# If configured, this setting has the highest priority and overrides the generic settings.specificIndexSettings:${SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS:\u0026#34;\u0026#34;}# Super data set has been defined in the codes, such as trace segments.The following 3 config would be improve es performance when storage super size data in es.superDatasetDayStep:${SW_STORAGE_ES_SUPER_DATASET_DAY_STEP:-1}# Represent the number of days in the super size dataset record index, the default value is the same as dayStep when the value is less than 0superDatasetIndexShardsFactor:${SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR:5}# This factor provides more shards for the super data set, shards number = indexShardsNumber * superDatasetIndexShardsFactor. Also, this factor effects Zipkin traces.superDatasetIndexReplicasNumber:${SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER:0}# Represent the replicas number in the super size dataset record index, the default value is 0.indexTemplateOrder:${SW_STORAGE_ES_INDEX_TEMPLATE_ORDER:0}# the order of index templatebulkActions:${SW_STORAGE_ES_BULK_ACTIONS:1000}# Execute the async bulk record data every ${SW_STORAGE_ES_BULK_ACTIONS} requestsflushInterval:${SW_STORAGE_ES_FLUSH_INTERVAL:10}# flush the bulk every 10 seconds whatever the number of requestsconcurrentRequests:${SW_STORAGE_ES_CONCURRENT_REQUESTS:2}# the number of concurrent requestsresultWindowMaxSize:${SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE:10000}metadataQueryMaxSize:${SW_STORAGE_ES_QUERY_MAX_SIZE:5000}segmentQueryMaxSize:${SW_STORAGE_ES_QUERY_SEGMENT_SIZE:200}profileTaskQueryMaxSize:${SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE:200}profileDataQueryScrollBatchSize:${SW_STORAGE_ES_QUERY_PROFILE_DATA_SCROLLING_BATCH_SIZE:100}oapAnalyzer:${SW_STORAGE_ES_OAP_ANALYZER:\u0026#34;{\\\u0026#34;analyzer\\\u0026#34;:{\\\u0026#34;oap_analyzer\\\u0026#34;:{\\\u0026#34;type\\\u0026#34;:\\\u0026#34;stop\\\u0026#34;}}}\u0026#34;}# the oap analyzer.oapLogAnalyzer:${SW_STORAGE_ES_OAP_LOG_ANALYZER:\u0026#34;{\\\u0026#34;analyzer\\\u0026#34;:{\\\u0026#34;oap_log_analyzer\\\u0026#34;:{\\\u0026#34;type\\\u0026#34;:\\\u0026#34;standard\\\u0026#34;}}}\u0026#34;}# the oap log analyzer. It could be customized by the ES analyzer configuration to support more language log formats, such as Chinese log, Japanese log and etc.advanced:${SW_STORAGE_ES_ADVANCED:\u0026#34;\u0026#34;}# Set it to `true` could shard metrics indices into multi-physical indices# as same as the versions(one index template per metric/meter aggregation function) before 9.2.0.logicSharding:${SW_STORAGE_ES_LOGIC_SHARDING:false}# Custom routing can reduce the impact of searches. Instead of having to fan out a search request to all the shards in an index, the request can be sent to just the shard that matches the specific routing value (or values).enableCustomRouting:${SW_STORAGE_ES_ENABLE_CUSTOM_ROUTING:false}ElasticSearch With Https SSL Encrypting communications. Example:\nstorage:selector:${SW_STORAGE:elasticsearch}elasticsearch:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}# User needs to be set when Http Basic authentication is enabledpassword:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}# Password to be set when Http Basic authentication is enabledclusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:443}trustStorePath:${SW_SW_STORAGE_ES_SSL_JKS_PATH:\u0026#34;../es_keystore.jks\u0026#34;}trustStorePass:${SW_SW_STORAGE_ES_SSL_JKS_PASS:\u0026#34;\u0026#34;}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;https\u0026#34;}... File at trustStorePath is being monitored. Once it is changed, the ElasticSearch client will reconnect. trustStorePass could be changed in the runtime through Secrets Management File Of ElasticSearch Authentication.  Daily Index Step Daily index step(storage/elasticsearch/dayStep, default 1) represents the index creation period. In this period, metrics for several days (dayStep value) are saved.\nIn most cases, users don\u0026rsquo;t need to change the value manually, as SkyWalking is designed to observe large-scale distributed systems. But in some cases, users may want to set a long TTL value, such as more than 60 days. However, their ElasticSearch cluster may not be powerful enough due to low traffic in the production environment. This value could be increased to 5 (or more) if users could ensure a single index could support the metrics and traces for these days (5 in this case).\nFor example, if dayStep == 11,\n Data in [2000-01-01, 2000-01-11] will be merged into the index-20000101. Data in [2000-01-12, 2000-01-22] will be merged into the index-20000112.  storage/elasticsearch/superDatasetDayStep overrides the storage/elasticsearch/dayStep if the value is positive. This would affect the record-related entities, such as trace segments. In some cases, the size of metrics is much smaller than the record (trace). This would improve the shards balance in the ElasticSearch cluster.\nNOTE: TTL deletion would be affected by these steps. You should set an extra dayStep in your TTL. For example, if you want to have TTL == 30 days and dayStep == 10, you are recommended to set TTL = 40.\nSecrets Management File Of ElasticSearch Authentication The value of secretsManagementFile should point to the secrets management file absolute path. The file includes the username, password, and JKS password of the ElasticSearch server in the properties format.\nuser=xxx password=yyy trustStorePass=zzz The major difference between using user, password, trustStorePass configs in the application.yaml file is that the Secrets Management File is being watched by the OAP server. Once it is changed manually or through a 3rd party tool, such as Vault, the storage provider will use the new username, password, and JKS password to establish the connection and close the old one. If the information exists in the file, the user/password will be overridden.\nIndex Settings The following settings control the number of shards and replicas for new and existing index templates. The update only got applied after OAP reboots.\nstorage:elasticsearch:# ......indexShardsNumber:${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:1}indexReplicasNumber:${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:1}specificIndexSettings:${SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS:\u0026#34;\u0026#34;}superDatasetIndexShardsFactor:${SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR:5}superDatasetIndexReplicasNumber:${SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER:0}The following table shows the relationship between those config items and Elasticsearch index number_of_shards/number_of_replicas. And also you can specify the settings for each index individually.\n   index number_of_shards number_of_replicas     sw_ui_template indexShardsNumber indexReplicasNumber   sw_metrics-all-${day-format} indexShardsNumber indexReplicasNumber   sw_log-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_segment-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_browser_error_log-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_zipkin_span-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_records-all-${day-format} indexShardsNumber indexReplicasNumber    Advanced Configurations For Elasticsearch Index You can add advanced configurations in JSON format to set ElasticSearch index settings by following ElasticSearch doc\nFor example, set translog settings:\nstorage:elasticsearch:# ......advanced:${SW_STORAGE_ES_ADVANCED:\u0026#34;{\\\u0026#34;index.translog.durability\\\u0026#34;:\\\u0026#34;request\\\u0026#34;,\\\u0026#34;index.translog.sync_interval\\\u0026#34;:\\\u0026#34;5s\\\u0026#34;}\u0026#34;}Specify Settings For Each Elasticsearch Index Individually You can specify the settings for one or more indexes individually by using SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS.\nNOTE: Supported settings:\n number_of_shards number_of_replicas  NOTE: These settings have the highest priority and will override the existing generic settings mentioned in index settings doc.\nThe settings are in JSON format. The index name here is logic entity name, which should exclude the ${SW_NAMESPACE} which is sw by default, e.g.\n{ \u0026#34;metrics-all\u0026#34;:{ \u0026#34;number_of_shards\u0026#34;:\u0026#34;3\u0026#34;, \u0026#34;number_of_replicas\u0026#34;:\u0026#34;2\u0026#34; }, \u0026#34;segment\u0026#34;:{ \u0026#34;number_of_shards\u0026#34;:\u0026#34;6\u0026#34;, \u0026#34;number_of_replicas\u0026#34;:\u0026#34;1\u0026#34; } } This configuration in the YAML file is like this,\nstorage:elasticsearch:# ......specificIndexSettings:${SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS:\u0026#34;{\\\u0026#34;metrics-all\\\u0026#34;:{\\\u0026#34;number_of_shards\\\u0026#34;:\\\u0026#34;3\\\u0026#34;,\\\u0026#34;number_of_replicas\\\u0026#34;:\\\u0026#34;2\\\u0026#34;},\\\u0026#34;segment\\\u0026#34;:{\\\u0026#34;number_of_shards\\\u0026#34;:\\\u0026#34;6\\\u0026#34;,\\\u0026#34;number_of_replicas\\\u0026#34;:\\\u0026#34;1\\\u0026#34;}}\u0026#34;}Recommended ElasticSearch server-side configurations You could add the following configuration to elasticsearch.yml, and set the value based on your environment.\n# In tracing scenario, consider to set more than this at least.thread_pool.index.queue_size:1000# Only suitable for ElasticSearch 6thread_pool.write.queue_size:1000# Suitable for ElasticSearch 6 and 7# When you face a query error on the traces page, remember to check this.index.max_result_window:1000000We strongly recommend that you read more about these configurations from ElasticSearch\u0026rsquo;s official documentation since they directly impact the performance of ElasticSearch.\nAbout Namespace When a namespace is set, all index names in ElasticSearch will use it as the prefix.\nMySQL Activate MySQL as storage, and set storage provider to mysql.\nNOTE: MySQL driver is NOT allowed in Apache official distribution and source codes. Please download the MySQL driver on your own. Copy the connection driver jar to oap-libs.\nstorage:selector:${SW_STORAGE:mysql}mysql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:3306/swtest?rewriteBatchedStatements=true\u0026amp;allowMultiQueries=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root@1234}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password, are found in application.yml. Only part of the settings is listed here. See the HikariCP connection pool document for full settings. To understand the function of the parameter rewriteBatchedStatements=true in MySQL, see the MySQL official document.\nIn theory, all other databases that are compatible with MySQL protocol should be able to use this storage plugin, such as TiDB. Please compose the JDBC URL according to the database\u0026rsquo;s documentation.\nPostgreSQL PostgreSQL JDBC driver uses version 42.3.2. It supports PostgreSQL 8.2 or newer. Activate PostgreSQL as storage, and set storage provider to postgresql.\nstorage:selector:${SW_STORAGE:postgresql}postgresql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:postgresql://localhost:5432/skywalking\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:postgres}dataSource.password:${SW_DATA_SOURCE_PASSWORD:123456}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfArrayColumn:${SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN:20}numOfSearchableValuesPerTag:${SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG:2}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password, are found in application.yml. Only part of the settings is listed here. Please follow HikariCP connection pool document for full settings.\nBanyanDB BanyanDB is a dedicated storage implementation developed by the SkyWalking Team and the community. Activate BanyanDB as the storage, and set storage provider to banyandb.\nstorage:banyandb:host:${SW_STORAGE_BANYANDB_HOST:127.0.0.1}port:${SW_STORAGE_BANYANDB_PORT:17912}maxBulkSize:${SW_STORAGE_BANYANDB_MAX_BULK_SIZE:5000}flushInterval:${SW_STORAGE_BANYANDB_FLUSH_INTERVAL:15}metricsShardsNumber:${SW_STORAGE_BANYANDB_METRICS_SHARDS_NUMBER:1}recordShardsNumber:${SW_STORAGE_BANYANDB_RECORD_SHARDS_NUMBER:1}superDatasetShardsFactor:${SW_STORAGE_BANYANDB_SUPERDATASET_SHARDS_FACTOR:2}concurrentWriteThreads:${SW_STORAGE_BANYANDB_CONCURRENT_WRITE_THREADS:15}profileTaskQueryMaxSize:${SW_STORAGE_BANYANDB_PROFILE_TASK_QUERY_MAX_SIZE:200}# the max number of fetch task in a requeststreamBlockInterval:${SW_STORAGE_BANYANDB_STREAM_BLOCK_INTERVAL:4}# Unit is hourstreamSegmentInterval:${SW_STORAGE_BANYANDB_STREAM_SEGMENT_INTERVAL:24}# Unit is hourmeasureBlockInterval:${SW_STORAGE_BANYANDB_MEASURE_BLOCK_INTERVAL:4}# Unit is hourmeasureSegmentInterval:${SW_STORAGE_BANYANDB_MEASURE_SEGMENT_INTERVAL:24}# Unit is hourFor more details, please refer to the documents of BanyanDB and BanyanDB Java Client subprojects.\nMore storage extension solutions Follow the Storage extension development guide in the Project Extensions document.\n","title":"Backend storage","url":"/docs/main/v9.5.0/en/setup/backend/backend-storage/"},{"content":"Backend storage The SkyWalking storage is pluggable. We have provided the following storage solutions, which allow you to easily use one of them by specifying it as the selector in application.yml:\nstorage:selector:${SW_STORAGE:elasticsearch}Natively supported storage:\n H2 OpenSearch ElasticSearch 7 and 8. MySQL PostgreSQL BanyanDB  H2 Activate H2 as storage, set storage provider to H2 In-Memory Databases. Default in the distribution package. Please read Database URL Overview in H2 official document. You can set the target to H2 in Embedded, Server and Mixed modes.\nSetting fragment example\nstorage:selector:${SW_STORAGE:h2}h2:driver:org.h2.jdbcx.JdbcDataSourceurl:jdbc:h2:mem:skywalking-oap-dbuser:samaxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:100}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:1}OpenSearch OpenSearch is a fork from ElasticSearch 7.11 but licensed in Apache 2.0. OpenSearch storage shares the same configurations as ElasticSearch. In order to activate OpenSearch as storage, set the storage provider to elasticsearch.\nWe support and tested the following versions of OpenSearch:\n 1.1.0, 1.3.10 2.4.0, 2.8.0  ElasticSearch NOTE: Elastic announced through their blog that Elasticsearch will be moving over to a Server Side Public License (SSPL) and/or Elastic License 2.0(ELv2), since Feb. 2021, which is incompatible with Apache License 2.0. Both of these licenses are not OSS licenses approved by the Open Source Initiative (OSI). This license change is effective from Elasticsearch version 7.11. So please choose the suitable ElasticSearch version according to your usage. If you have concerns about SSPL/ELv2, choose the versions before 7.11 or switch to OpenSearch.\nBy default, SkyWalking uses following indices for various telemetry data.\n sw_ui_template (UI dashboard settings) sw_metrics-all-${day-format} (All metrics/meters generated through MAL and OAL engines, and metadata of service/instance/endpoint) sw_log-${day-format} (Collected logs, exclude browser logs) sw_segment-${day-format} (Native trace segments) sw_browser_error_log-${day-format} (Collected browser logs) sw_zipkin_span-${day-format} (Zipkin trace spans) sw_records-all-${day-format} (All sampled records, e.g. slow SQLs, agent profiling, and ebpf profiling)  SkyWalking rebuilds the ElasticSearch client on top of ElasticSearch REST API and automatically picks up correct request formats according to the server-side version, hence you don\u0026rsquo;t need to download different binaries and don\u0026rsquo;t need to configure different storage selectors for different ElasticSearch server-side versions anymore.\nFor now, SkyWalking supports ElasticSearch 7.x, ElasticSearch 8.x, and OpenSearch 1.x, their configurations are as follows:\nNotice, ElasticSearch 6 worked and is not promised due to end of life officially.\nstorage:selector:${SW_STORAGE:elasticsearch}elasticsearch:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}clusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:9200}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;http\u0026#34;}trustStorePath:${SW_STORAGE_ES_SSL_JKS_PATH:\u0026#34;\u0026#34;}trustStorePass:${SW_STORAGE_ES_SSL_JKS_PASS:\u0026#34;\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}password:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}secretsManagementFile:${SW_ES_SECRETS_MANAGEMENT_FILE:\u0026#34;\u0026#34;}# Secrets management file in the properties format includes the username, password, which are managed by 3rd party tool.dayStep:${SW_STORAGE_DAY_STEP:1}# Represent the number of days in the one minute/hour/day index.indexShardsNumber:${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:1}# Shard number of new indexesindexReplicasNumber:${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:1}# Replicas number of new indexes# Specify the settings for each index individually.# If configured, this setting has the highest priority and overrides the generic settings.specificIndexSettings:${SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS:\u0026#34;\u0026#34;}# Super data set has been defined in the codes, such as trace segments.The following 3 config would be improve es performance when storage super size data in es.superDatasetDayStep:${SW_STORAGE_ES_SUPER_DATASET_DAY_STEP:-1}# Represent the number of days in the super size dataset record index, the default value is the same as dayStep when the value is less than 0superDatasetIndexShardsFactor:${SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR:5}# This factor provides more shards for the super data set, shards number = indexShardsNumber * superDatasetIndexShardsFactor. Also, this factor effects Zipkin traces.superDatasetIndexReplicasNumber:${SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER:0}# Represent the replicas number in the super size dataset record index, the default value is 0.indexTemplateOrder:${SW_STORAGE_ES_INDEX_TEMPLATE_ORDER:0}# the order of index templatebulkActions:${SW_STORAGE_ES_BULK_ACTIONS:1000}# Execute the async bulk record data every ${SW_STORAGE_ES_BULK_ACTIONS} requestsflushInterval:${SW_STORAGE_ES_FLUSH_INTERVAL:10}# flush the bulk every 10 seconds whatever the number of requestsconcurrentRequests:${SW_STORAGE_ES_CONCURRENT_REQUESTS:2}# the number of concurrent requestsresultWindowMaxSize:${SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE:10000}metadataQueryMaxSize:${SW_STORAGE_ES_QUERY_MAX_SIZE:5000}segmentQueryMaxSize:${SW_STORAGE_ES_QUERY_SEGMENT_SIZE:200}profileTaskQueryMaxSize:${SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE:200}profileDataQueryScrollBatchSize:${SW_STORAGE_ES_QUERY_PROFILE_DATA_SCROLLING_BATCH_SIZE:100}oapAnalyzer:${SW_STORAGE_ES_OAP_ANALYZER:\u0026#34;{\\\u0026#34;analyzer\\\u0026#34;:{\\\u0026#34;oap_analyzer\\\u0026#34;:{\\\u0026#34;type\\\u0026#34;:\\\u0026#34;stop\\\u0026#34;}}}\u0026#34;}# the oap analyzer.oapLogAnalyzer:${SW_STORAGE_ES_OAP_LOG_ANALYZER:\u0026#34;{\\\u0026#34;analyzer\\\u0026#34;:{\\\u0026#34;oap_log_analyzer\\\u0026#34;:{\\\u0026#34;type\\\u0026#34;:\\\u0026#34;standard\\\u0026#34;}}}\u0026#34;}# the oap log analyzer. It could be customized by the ES analyzer configuration to support more language log formats, such as Chinese log, Japanese log and etc.advanced:${SW_STORAGE_ES_ADVANCED:\u0026#34;\u0026#34;}# Set it to `true` could shard metrics indices into multi-physical indices# as same as the versions(one index template per metric/meter aggregation function) before 9.2.0.logicSharding:${SW_STORAGE_ES_LOGIC_SHARDING:false}# Custom routing can reduce the impact of searches. Instead of having to fan out a search request to all the shards in an index, the request can be sent to just the shard that matches the specific routing value (or values).enableCustomRouting:${SW_STORAGE_ES_ENABLE_CUSTOM_ROUTING:false}ElasticSearch With Https SSL Encrypting communications. Example:\nstorage:selector:${SW_STORAGE:elasticsearch}elasticsearch:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}# User needs to be set when Http Basic authentication is enabledpassword:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}# Password to be set when Http Basic authentication is enabledclusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:443}trustStorePath:${SW_SW_STORAGE_ES_SSL_JKS_PATH:\u0026#34;../es_keystore.jks\u0026#34;}trustStorePass:${SW_SW_STORAGE_ES_SSL_JKS_PASS:\u0026#34;\u0026#34;}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;https\u0026#34;}... File at trustStorePath is being monitored. Once it is changed, the ElasticSearch client will reconnect. trustStorePass could be changed in the runtime through Secrets Management File Of ElasticSearch Authentication.  Daily Index Step Daily index step(storage/elasticsearch/dayStep, default 1) represents the index creation period. In this period, metrics for several days (dayStep value) are saved.\nIn most cases, users don\u0026rsquo;t need to change the value manually, as SkyWalking is designed to observe large-scale distributed systems. But in some cases, users may want to set a long TTL value, such as more than 60 days. However, their ElasticSearch cluster may not be powerful enough due to low traffic in the production environment. This value could be increased to 5 (or more) if users could ensure a single index could support the metrics and traces for these days (5 in this case).\nFor example, if dayStep == 11,\n Data in [2000-01-01, 2000-01-11] will be merged into the index-20000101. Data in [2000-01-12, 2000-01-22] will be merged into the index-20000112.  storage/elasticsearch/superDatasetDayStep overrides the storage/elasticsearch/dayStep if the value is positive. This would affect the record-related entities, such as trace segments. In some cases, the size of metrics is much smaller than the record (trace). This would improve the shards balance in the ElasticSearch cluster.\nNOTE: TTL deletion would be affected by these steps. You should set an extra dayStep in your TTL. For example, if you want to have TTL == 30 days and dayStep == 10, you are recommended to set TTL = 40.\nSecrets Management File Of ElasticSearch Authentication The value of secretsManagementFile should point to the secrets management file absolute path. The file includes the username, password, and JKS password of the ElasticSearch server in the properties format.\nuser=xxx password=yyy trustStorePass=zzz The major difference between using user, password, trustStorePass configs in the application.yaml file is that the Secrets Management File is being watched by the OAP server. Once it is changed manually or through a 3rd party tool, such as Vault, the storage provider will use the new username, password, and JKS password to establish the connection and close the old one. If the information exists in the file, the user/password will be overridden.\nIndex Settings The following settings control the number of shards and replicas for new and existing index templates. The update only got applied after OAP reboots.\nstorage:elasticsearch:# ......indexShardsNumber:${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:1}indexReplicasNumber:${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:1}specificIndexSettings:${SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS:\u0026#34;\u0026#34;}superDatasetIndexShardsFactor:${SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR:5}superDatasetIndexReplicasNumber:${SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER:0}The following table shows the relationship between those config items and Elasticsearch index number_of_shards/number_of_replicas. And also you can specify the settings for each index individually.\n   index number_of_shards number_of_replicas     sw_ui_template indexShardsNumber indexReplicasNumber   sw_metrics-all-${day-format} indexShardsNumber indexReplicasNumber   sw_log-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_segment-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_browser_error_log-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_zipkin_span-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_records-all-${day-format} indexShardsNumber indexReplicasNumber    Advanced Configurations For Elasticsearch Index You can add advanced configurations in JSON format to set ElasticSearch index settings by following ElasticSearch doc\nFor example, set translog settings:\nstorage:elasticsearch:# ......advanced:${SW_STORAGE_ES_ADVANCED:\u0026#34;{\\\u0026#34;index.translog.durability\\\u0026#34;:\\\u0026#34;request\\\u0026#34;,\\\u0026#34;index.translog.sync_interval\\\u0026#34;:\\\u0026#34;5s\\\u0026#34;}\u0026#34;}Specify Settings For Each Elasticsearch Index Individually You can specify the settings for one or more indexes individually by using SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS.\nNOTE: Supported settings:\n number_of_shards number_of_replicas  NOTE: These settings have the highest priority and will override the existing generic settings mentioned in index settings doc.\nThe settings are in JSON format. The index name here is logic entity name, which should exclude the ${SW_NAMESPACE} which is sw by default, e.g.\n{ \u0026#34;metrics-all\u0026#34;:{ \u0026#34;number_of_shards\u0026#34;:\u0026#34;3\u0026#34;, \u0026#34;number_of_replicas\u0026#34;:\u0026#34;2\u0026#34; }, \u0026#34;segment\u0026#34;:{ \u0026#34;number_of_shards\u0026#34;:\u0026#34;6\u0026#34;, \u0026#34;number_of_replicas\u0026#34;:\u0026#34;1\u0026#34; } } This configuration in the YAML file is like this,\nstorage:elasticsearch:# ......specificIndexSettings:${SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS:\u0026#34;{\\\u0026#34;metrics-all\\\u0026#34;:{\\\u0026#34;number_of_shards\\\u0026#34;:\\\u0026#34;3\\\u0026#34;,\\\u0026#34;number_of_replicas\\\u0026#34;:\\\u0026#34;2\\\u0026#34;},\\\u0026#34;segment\\\u0026#34;:{\\\u0026#34;number_of_shards\\\u0026#34;:\\\u0026#34;6\\\u0026#34;,\\\u0026#34;number_of_replicas\\\u0026#34;:\\\u0026#34;1\\\u0026#34;}}\u0026#34;}Recommended ElasticSearch server-side configurations You could add the following configuration to elasticsearch.yml, and set the value based on your environment.\n# In tracing scenario, consider to set more than this at least.thread_pool.index.queue_size:1000# Only suitable for ElasticSearch 6thread_pool.write.queue_size:1000# Suitable for ElasticSearch 6 and 7# When you face a query error on the traces page, remember to check this.index.max_result_window:1000000We strongly recommend that you read more about these configurations from ElasticSearch\u0026rsquo;s official documentation since they directly impact the performance of ElasticSearch.\nAbout Namespace When a namespace is set, all index names in ElasticSearch will use it as the prefix.\nMySQL Activate MySQL as storage, and set storage provider to mysql.\nNOTE: MySQL driver is NOT allowed in Apache official distribution and source codes. Please download the MySQL driver on your own. Copy the connection driver jar to oap-libs.\nstorage:selector:${SW_STORAGE:mysql}mysql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:3306/swtest?rewriteBatchedStatements=true\u0026amp;allowMultiQueries=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root@1234}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password, are found in application.yml. Only part of the settings is listed here. See the HikariCP connection pool document for full settings. To understand the function of the parameter rewriteBatchedStatements=true in MySQL, see the MySQL official document.\nIn theory, all other databases that are compatible with MySQL protocol should be able to use this storage plugin, such as TiDB. Please compose the JDBC URL according to the database\u0026rsquo;s documentation.\nPostgreSQL PostgreSQL JDBC driver uses version 42.3.2. It supports PostgreSQL 8.2 or newer. Activate PostgreSQL as storage, and set storage provider to postgresql.\nstorage:selector:${SW_STORAGE:postgresql}postgresql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:postgresql://localhost:5432/skywalking\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:postgres}dataSource.password:${SW_DATA_SOURCE_PASSWORD:123456}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfArrayColumn:${SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN:20}numOfSearchableValuesPerTag:${SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG:2}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password, are found in application.yml. Only part of the settings is listed here. Please follow HikariCP connection pool document for full settings.\nBanyanDB BanyanDB is a dedicated storage implementation developed by the SkyWalking Team and the community. Activate BanyanDB as the storage, and set storage provider to banyandb.\nstorage:banyandb:host:${SW_STORAGE_BANYANDB_HOST:127.0.0.1}port:${SW_STORAGE_BANYANDB_PORT:17912}maxBulkSize:${SW_STORAGE_BANYANDB_MAX_BULK_SIZE:5000}flushInterval:${SW_STORAGE_BANYANDB_FLUSH_INTERVAL:15}metricsShardsNumber:${SW_STORAGE_BANYANDB_METRICS_SHARDS_NUMBER:1}recordShardsNumber:${SW_STORAGE_BANYANDB_RECORD_SHARDS_NUMBER:1}superDatasetShardsFactor:${SW_STORAGE_BANYANDB_SUPERDATASET_SHARDS_FACTOR:2}concurrentWriteThreads:${SW_STORAGE_BANYANDB_CONCURRENT_WRITE_THREADS:15}profileTaskQueryMaxSize:${SW_STORAGE_BANYANDB_PROFILE_TASK_QUERY_MAX_SIZE:200}# the max number of fetch task in a requeststreamBlockInterval:${SW_STORAGE_BANYANDB_STREAM_BLOCK_INTERVAL:4}# Unit is hourstreamSegmentInterval:${SW_STORAGE_BANYANDB_STREAM_SEGMENT_INTERVAL:24}# Unit is hourmeasureBlockInterval:${SW_STORAGE_BANYANDB_MEASURE_BLOCK_INTERVAL:4}# Unit is hourmeasureSegmentInterval:${SW_STORAGE_BANYANDB_MEASURE_SEGMENT_INTERVAL:24}# Unit is hourFor more details, please refer to the documents of BanyanDB and BanyanDB Java Client subprojects.\n","title":"Backend storage","url":"/docs/main/v9.6.0/en/setup/backend/backend-storage/"},{"content":"Backend storage The SkyWalking storage is pluggable. We have provided the following storage solutions, which allow you to easily use one of them by specifying it as the selector in application.yml:\nstorage:selector:${SW_STORAGE:elasticsearch}Natively supported storage:\n H2 OpenSearch ElasticSearch 7 and 8. MySQL and its compatible databases PostgreSQL and its compatible databases BanyanDB(alpha stage)  H2 is the default storage option in the distribution package. It is recommended to use H2 for testing and development ONLY. Elasticsearch and OpenSearch are recommended for production environments, specially for large scale deployments. MySQL and PostgreSQL are recommended for production environments for medium scale deployments, especially for low trace and log sampling rate. Some of their compatible databases may support larger scale better, such as TiDB and AWS Aurora.\nBanyanDB is going to be our next generation storage solution. It is still in alpha stage. It has shown high potential performance improvement. Less than 50% CPU usage and 50% memory usage with 40% disk volume compared to Elasticsearch in the same scale with 100% sampling. We are looking for early adoption, and it would be our first-class recommended storage option since 2024.\n","title":"Backend storage","url":"/docs/main/v9.7.0/en/setup/backend/backend-storage/"},{"content":"Background Write Ahead Logging (WAL) is a technique used in databases to ensure that data is not lost due to system crashes or other failures. The basic idea of WAL is to log changes to a database in a separate file before applying them to the database itself. This way, if there is a system failure, the database can be recovered by replaying the log of changes from the WAL file. BanyanDB leverages the WAL to enhance the data buffer for schema resource writing. In such a system, write operations are first written to the WAL file before being applied to the interval buffer. This ensures that the log is written to disk before the actual data is written. Hence the term \u0026ldquo;write ahead\u0026rdquo;.\nFormat A segment refers to a block of data in the WAL file that contains a sequence of database changes. Once rotate is invoked, a new segment is created to continue logging subsequent changes. A \u0026ldquo;WALEntry\u0026rdquo; is a data unit representing a series of changes to a Series. Each WALEntry is written to a segment.\nWAlEntry contains as follows:\n Length:8 bytes, which means the length of a WalEntry. Series ID:8 bytes, the same as request Series ID. Count:4 bytes, how many binary/timestamps in one WalEntry. Timestamp:8 bytes. Binary Length:2 bytes. Binary: value in the write request.  Write process The writing process in WAL is as follows:\n Firstly, the changes are first written to the write buffer. Those with the same series ID will go to the identical WALEntry. When the buffer is full, the WALEntry is created, then flushed to the disk. WAL can optionally use the compression algorithm snappy to compress the data on disk. Each WALEntry is appended to the tail of the WAL file on the disk.  When entries in the buffer are flushed to the disk, the callback function returned by the write operation is invoked. You can ignore this function to improve the writing performance, but it risks losing data.\nRead WAL A client could read a single segment by a segment id. When opening the segment file, the reader will decompress the WAL file if the writing compresses the data.\nRotation WAL supports rotation operation to switch to a new segment. The operation closes the currently open segment and opens a new one, returning the closed segment details.\nDelete A client could delete a segment closed by the rotate operation.\nconfiguration BanyanDB WAL has the following configuration options:\n   Name Default Value Introduction     wal_compression true Compress the WAL entry or not   wal_file_size 64MB The size of the WAL file   wal_buffer_size 16kB The size of WAL buffer.    ","title":"Background","url":"/docs/skywalking-banyandb/latest/concept/wal/"},{"content":"Background Write Ahead Logging (WAL) is a technique used in databases to ensure that data is not lost due to system crashes or other failures. The basic idea of WAL is to log changes to a database in a separate file before applying them to the database itself. This way, if there is a system failure, the database can be recovered by replaying the log of changes from the WAL file. BanyanDB leverages the WAL to enhance the data buffer for schema resource writing. In such a system, write operations are first written to the WAL file before being applied to the interval buffer. This ensures that the log is written to disk before the actual data is written. Hence the term \u0026ldquo;write ahead\u0026rdquo;.\nFormat A segment refers to a block of data in the WAL file that contains a sequence of database changes. Once rotate is invoked, a new segment is created to continue logging subsequent changes. A \u0026ldquo;WALEntry\u0026rdquo; is a data unit representing a series of changes to a Series. Each WALEntry is written to a segment.\nWAlEntry contains as follows:\n Length:8 bytes, which means the length of a WalEntry. Series ID:8 bytes, the same as request Series ID. Count:4 bytes, how many binary/timestamps in one WalEntry. Timestamp:8 bytes. Binary Length:2 bytes. Binary: value in the write request.  Write process The writing process in WAL is as follows:\n Firstly, the changes are first written to the write buffer. Those with the same series ID will go to the identical WALEntry. When the buffer is full, the WALEntry is created, then flushed to the disk. WAL can optionally use the compression algorithm snappy to compress the data on disk. Each WALEntry is appended to the tail of the WAL file on the disk.  When entries in the buffer are flushed to the disk, the callback function returned by the write operation is invoked. You can ignore this function to improve the writing performance, but it risks losing data.\nRead WAL A client could read a single segment by a segment id. When opening the segment file, the reader will decompress the WAL file if the writing compresses the data.\nRotation WAL supports rotation operation to switch to a new segment. The operation closes the currently open segment and opens a new one, returning the closed segment details.\nDelete A client could delete a segment closed by the rotate operation.\nconfiguration BanyanDB WAL has the following configuration options:\n   Name Default Value Introduction     wal_compression true Compress the WAL entry or not   wal_file_size 64MB The size of the WAL file   wal_buffer_size 16kB The size of WAL buffer.    ","title":"Background","url":"/docs/skywalking-banyandb/v0.5.0/concept/wal/"},{"content":"BanyanDB BanyanDB is a dedicated storage implementation developed by the SkyWalking Team and the community. Activate BanyanDB as the storage, and set storage provider to banyandb.\nThe OAP requires BanyanDB 0.5 server. As BanyanDB is still in the beta phase, we don\u0026rsquo;t provide any compatibility besides the required version.\nstorage:banyandb:targets:${SW_STORAGE_BANYANDB_TARGETS:127.0.0.1:17912}maxBulkSize:${SW_STORAGE_BANYANDB_MAX_BULK_SIZE:5000}flushInterval:${SW_STORAGE_BANYANDB_FLUSH_INTERVAL:15}metricsShardsNumber:${SW_STORAGE_BANYANDB_METRICS_SHARDS_NUMBER:1}recordShardsNumber:${SW_STORAGE_BANYANDB_RECORD_SHARDS_NUMBER:1}superDatasetShardsFactor:${SW_STORAGE_BANYANDB_SUPERDATASET_SHARDS_FACTOR:2}concurrentWriteThreads:${SW_STORAGE_BANYANDB_CONCURRENT_WRITE_THREADS:15}profileTaskQueryMaxSize:${SW_STORAGE_BANYANDB_PROFILE_TASK_QUERY_MAX_SIZE:200}# the max number of fetch task in a requeststreamBlockInterval:${SW_STORAGE_BANYANDB_STREAM_BLOCK_INTERVAL:4}# Unit is hourstreamSegmentInterval:${SW_STORAGE_BANYANDB_STREAM_SEGMENT_INTERVAL:24}# Unit is hourmeasureBlockInterval:${SW_STORAGE_BANYANDB_MEASURE_BLOCK_INTERVAL:4}# Unit is hourmeasureSegmentInterval:${SW_STORAGE_BANYANDB_MEASURE_SEGMENT_INTERVAL:24}# Unit is hourFor more details, please refer to the documents of BanyanDB and BanyanDB Java Client subprojects.\n","title":"BanyanDB","url":"/docs/main/latest/en/setup/backend/storages/banyandb/"},{"content":"BanyanDB BanyanDB is a dedicated storage implementation developed by the SkyWalking Team and the community. Activate BanyanDB as the storage, and set storage provider to banyandb.\nThe OAP requires BanyanDB 0.5 server. As BanyanDB is still in the beta phase, we don\u0026rsquo;t provide any compatibility besides the required version.\nstorage:banyandb:targets:${SW_STORAGE_BANYANDB_TARGETS:127.0.0.1:17912}maxBulkSize:${SW_STORAGE_BANYANDB_MAX_BULK_SIZE:5000}flushInterval:${SW_STORAGE_BANYANDB_FLUSH_INTERVAL:15}metricsShardsNumber:${SW_STORAGE_BANYANDB_METRICS_SHARDS_NUMBER:1}recordShardsNumber:${SW_STORAGE_BANYANDB_RECORD_SHARDS_NUMBER:1}superDatasetShardsFactor:${SW_STORAGE_BANYANDB_SUPERDATASET_SHARDS_FACTOR:2}concurrentWriteThreads:${SW_STORAGE_BANYANDB_CONCURRENT_WRITE_THREADS:15}profileTaskQueryMaxSize:${SW_STORAGE_BANYANDB_PROFILE_TASK_QUERY_MAX_SIZE:200}# the max number of fetch task in a requeststreamBlockInterval:${SW_STORAGE_BANYANDB_STREAM_BLOCK_INTERVAL:4}# Unit is hourstreamSegmentInterval:${SW_STORAGE_BANYANDB_STREAM_SEGMENT_INTERVAL:24}# Unit is hourmeasureBlockInterval:${SW_STORAGE_BANYANDB_MEASURE_BLOCK_INTERVAL:4}# Unit is hourmeasureSegmentInterval:${SW_STORAGE_BANYANDB_MEASURE_SEGMENT_INTERVAL:24}# Unit is hourFor more details, please refer to the documents of BanyanDB and BanyanDB Java Client subprojects.\n","title":"BanyanDB","url":"/docs/main/next/en/setup/backend/storages/banyandb/"},{"content":"BanyanDB BanyanDB is a dedicated storage implementation developed by the SkyWalking Team and the community. Activate BanyanDB as the storage, and set storage provider to banyandb.\nThe OAP requires BanyanDB 0.5 server. As BanyanDB is still in the beta phase, we don\u0026rsquo;t provide any compatibility besides the required version.\nstorage:banyandb:targets:${SW_STORAGE_BANYANDB_TARGETS:127.0.0.1:17912}maxBulkSize:${SW_STORAGE_BANYANDB_MAX_BULK_SIZE:5000}flushInterval:${SW_STORAGE_BANYANDB_FLUSH_INTERVAL:15}metricsShardsNumber:${SW_STORAGE_BANYANDB_METRICS_SHARDS_NUMBER:1}recordShardsNumber:${SW_STORAGE_BANYANDB_RECORD_SHARDS_NUMBER:1}superDatasetShardsFactor:${SW_STORAGE_BANYANDB_SUPERDATASET_SHARDS_FACTOR:2}concurrentWriteThreads:${SW_STORAGE_BANYANDB_CONCURRENT_WRITE_THREADS:15}profileTaskQueryMaxSize:${SW_STORAGE_BANYANDB_PROFILE_TASK_QUERY_MAX_SIZE:200}# the max number of fetch task in a requeststreamBlockInterval:${SW_STORAGE_BANYANDB_STREAM_BLOCK_INTERVAL:4}# Unit is hourstreamSegmentInterval:${SW_STORAGE_BANYANDB_STREAM_SEGMENT_INTERVAL:24}# Unit is hourmeasureBlockInterval:${SW_STORAGE_BANYANDB_MEASURE_BLOCK_INTERVAL:4}# Unit is hourmeasureSegmentInterval:${SW_STORAGE_BANYANDB_MEASURE_SEGMENT_INTERVAL:24}# Unit is hourFor more details, please refer to the documents of BanyanDB and BanyanDB Java Client subprojects.\n","title":"BanyanDB","url":"/docs/main/v9.7.0/en/setup/backend/storages/banyandb/"},{"content":"BanyanDB Clustering BanyanDB Clustering introduces a robust and scalable architecture that comprises \u0026ldquo;Liaison Nodes\u0026rdquo;, \u0026ldquo;Data Nodes\u0026rdquo;, and \u0026ldquo;Meta Nodes\u0026rdquo;. This structure allows for effectively distributing and managing time-series data within the system.\n1. Architectural Overview A BanyanDB installation includes three distinct types of nodes: Data Nodes, Meta Nodes, and Liaison Nodes.\n1.1 Data Nodes Data Nodes hold all the raw time series data, metadata, and indexed data. They handle the storage and management of data, including streams and measures, tag keys and values, as well as field keys and values.\nData Nodes also handle the local query execution. When a query is made, it is directed to a Liaison, which then interacts with Data Nodes to execute the distributed query and return results.\nIn addition to persistent raw data, Data Nodes also handle TopN aggregation calculation or other computational tasks.\n1.2 Meta Nodes Meta Nodes is implemented by etcd. They are responsible for maintaining high-level metadata of the cluster, which includes:\n All nodes in the cluster All database schemas  1.3 Liaison Nodes Liaison Nodes serve as gateways, routing traffic to Data Nodes. In addition to routing, they also provide authentication, TTL, and other security services to ensure secure and effective communication without the cluster.\nLiaison Nodes are also responsible for handling computational tasks associated with distributed querying the database. They build query tasks and search for data from Data Nodes.\n1.4 Standalone Mode BanyanDB integrates multiple roles into a single process in the standalone mode, making it simpler and faster to deploy. This mode is especially useful for scenarios with a limited number of data points or for testing and development purposes.\nIn this mode, the single process performs the roles of the Liaison Node, Data Node, and Meta Node. It receives requests, maintains metadata, processes queries, and handles data, all within a unified setup.\n2. Communication within a Cluster All nodes within a BanyanDB cluster communicate with other nodes according to their roles:\n Meta Nodes share high-level metadata about the cluster. Data Nodes store and manage the raw time series data and communicate with Meta Nodes. Liaison Nodes distribute incoming data to the appropriate Data Nodes. They also handle distributed query execution and communicate with Meta Nodes.  Nodes Discovery All nodes in the cluster are discovered by the Meta Nodes. When a node starts up, it registers itself with the Meta Nodes. The Meta Nodes then share this information with the Liaison Nodes which use it to route requests to the appropriate nodes.\n3. Data Organization Different nodes in BanyanDB are responsible for different parts of the database, while Query and Liaison Nodes manage the routing and processing of queries.\n3.1 Meta Nodes Meta Nodes store all high-level metadata that describes the cluster. This data is kept in an etcd-backed database on disk, including information about the shard allocation of each Data Node. This information is used by the Liaison Nodes to route data to the appropriate Data Nodes, based on the sharding key of the data.\nBy storing shard allocation information, Meta Nodes help ensure that data is routed efficiently and accurately across the cluster. This information is constantly updated as the cluster changes, allowing for dynamic allocation of resources and efficient use of available capacity.\n3.2 Data Nodes Data Nodes store all raw time series data, metadata, and indexed data. On disk, the data is organized by \u0026lt;group\u0026gt;/shard-\u0026lt;shard_id\u0026gt;/\u0026lt;segment_id\u0026gt;/. The segment is designed to support retention policy.\n3.3 Liaison Nodes Liaison Nodes do not store data but manage the routing of incoming requests to the appropriate Query or Data Nodes. They also provide authentication, TTL, and other security services.\nThey also handle the computational tasks associated with data queries, interacting directly with Data Nodes to execute queries and return results.\n4. Determining Optimal Node Counts When creating a BanyanDB cluster, choosing the appropriate number of each node type to configure and connect is crucial. The number of Meta Nodes should always be odd, for instance, “3”. The number of Data Nodes scales based on your storage and query needs. The number of Liaison Nodes depends on the expected query load and routing complexity.\nIf the write and read load is from different sources, it is recommended to separate the Liaison Nodes for write and read. For instance, if the write load is from metrics, trace or log collectors and the read load is from a web application, it is recommended to separate the Liaison Nodes for write and read.\nThis separation allows for more efficient routing of requests and better performance. It also allows for scaling out of the cluster based on the specific needs of each type of request. For instance, if the write load is high, you can scale out the write Liaison Nodes to handle the increased load.\nThe BanyanDB architecture allows for efficient clustering, scaling, and high availability, making it a robust choice for time series data management.\n5. Writes in a Cluster In BanyanDB, writing data in a cluster is designed to take advantage of the robust capabilities of underlying storage systems, such as Google Compute Persistent Disk or Amazon S3(TBD). These platforms ensure high levels of data durability, making them an optimal choice for storing raw time series data.\n5.1 Data Replication Unlike some other systems, BanyanDB does not support application-level replication, which can consume significant disk space. Instead, it delegates the task of replication to these underlying storage systems. This approach simplifies the BanyanDB architecture and reduces the complexity of managing replication at the application level. This approach also results in significant data savings.\nThe comparison between using a storage system and application-level replication boils down to several key factors: reliability, scalability, and complexity.\nReliability: A storage system provides built-in data durability by automatically storing data across multiple systems. It\u0026rsquo;s designed to deliver 99.999999999% durability, ensuring data is reliably stored and available when needed. While replication can increase data availability, it\u0026rsquo;s dependent on the application\u0026rsquo;s implementation. Any bugs or issues in the replication logic can lead to data loss or inconsistencies.\nScalability: A storage system is highly scalable by design and can store and retrieve any amount of data from anywhere. As your data grows, the system grows with you. You don\u0026rsquo;t need to worry about outgrowing your storage capacity. Scaling application-level replication can be challenging. As data grows, so does the need for more disk space and compute resources, potentially leading to increased costs and management complexity.\nComplexity: With the storage system handling replication, the complexity is abstracted away from the user. The user need not concern themselves with the details of how replication is handled. Managing replication at the application level can be complex. It requires careful configuration, monitoring, and potentially significant engineering effort to maintain.\nFuthermore, the storage system might be cheaper. For instance, S3 can be more cost-effective because it eliminates the need for additional resources required for application-level replication. Application-level replication also requires ongoing maintenance, potentially increasing operational costs.\n5.2 Data Sharding Data distribution across the cluster is determined based on the shard_num setting for a group and the specified entity in each resource, be it a stream or measure. The resource’s name with its entity is the sharding key, guiding data distribution to the appropriate Data Node during write operations.\nLiaison Nodes retrieve shard mapping information from Meta Nodes to achieve efficient data routing. This information is used to route data to the appropriate Data Nodes based on the sharding key of the data.\nThis sharding strategy ensures the write load is evenly distributed across the cluster, enhancing write performance and overall system efficiency. BanyanDB uses a hash algorithm for sharding. The hash function maps the sharding key (resource name and entity) to a node in the cluster. Each shard is assigned to the node returned by the hash function.\n5.3 Data Write Path Here\u0026rsquo;s a text-based diagram illustrating the data write path in BanyanDB:\nUser | | API Request (Write) | v ------------------------------------ | Liaison Node | \u0026lt;--- Stateless Node, Routes Request | (Identifies relevant Data Nodes | | and dispatches write request) | ------------------------------------ | v ----------------- ----------------- ----------------- | Data Node 1 | | Data Node 2 | | Data Node 3 | | (Shard 1) | | (Shard 2) | | (Shard 3) | ----------------- ----------------- -----------------  A user makes an API request to the Liaison Node. This request is a write request, containing the data to be written to the database. The Liaison Node, which is stateless, identifies the relevant Data Nodes that will store the data based on the entity specified in the request. The write request is executed across the identified Data Nodes. Each Data Node writes the data to its shard.  This architecture allows BanyanDB to execute write requests efficiently across a distributed system, leveraging the stateless nature and routing/writing capabilities of the Liaison Node, and the distributed storage of Data Nodes.\n6. Queries in a Cluster BanyanDB utilizes a distributed architecture that allows for efficient query processing. When a query is made, it is directed to a Liaison Node.\n6.1 Query Routing Liaison Nodes do not use shard mapping information from Meta Nodes to execute distributed queries. Instead, they access all Data Nodes to retrieve the necessary data for queries. As the query load is lower, it is practical for liaison nodes to access all data nodes for this purpose. It may increase network traffic, but simplifies scaling out of the cluster.\nCompared to the write load, the query load is relatively low. For instance, in a time series database, the write load is typically 100x higher than the query load. This is because the write load is driven by the number of devices sending data to the database, while the query load is driven by the number of users accessing the data.\nThis strategy enables scaling out of the cluster. When the cluster scales out, the liaison node can access all data nodes without any mapping info changes. It eliminates the need to backup previous shard mapping information, reducing complexity of scaling out.\n6.2 Query Execution Parallel execution significantly enhances the efficiency of data retrieval and reduces the overall query processing time. It allows for faster response times as the workload of the query is shared across multiple shards, each working on their part of the problem simultaneously. This feature makes BanyanDB particularly effective for large-scale data analysis tasks.\nIn summary, BanyanDB\u0026rsquo;s approach to querying leverages its unique distributed architecture, enabling high-performance data retrieval across multiple shards in parallel.\n6.3 Query Path User | | API Request (Query) | v ------------------------------------ | Liaison Node | \u0026lt;--- Stateless Node, Distributes Query | (Access all Data nodes to | | execute distributed queries) | ------------------------------------ | | | v v v ----------------- ----------------- ----------------- | Data Node 1 | | Data Node 2 | | Data Node 3 | | (Shard 1) | | (Shard 2) | | (Shard 3) | ----------------- ----------------- -----------------  A user makes an API request to the Liaison Node. This request may be a query for specific data. The Liaison Node builds a distributed query to select all data nodes. The query is executed in parallel across all Data Nodes. Each Data Node execute a local query plan to process the data stored in its shard concurrently with the others. The results from each shard are then returned to the Liaison Node, which consolidates them into a single response to the user.  This architecture allows BanyanDB to execute queries efficiently across a distributed system, leveraging the distributed query capabilities of the Liaison Node and the parallel processing of Data Nodes.\n","title":"BanyanDB Clustering","url":"/docs/skywalking-banyandb/latest/concept/clustering/"},{"content":"BanyanDB Clustering BanyanDB Clustering introduces a robust and scalable architecture that comprises \u0026ldquo;Liaison Nodes\u0026rdquo;, \u0026ldquo;Data Nodes\u0026rdquo;, and \u0026ldquo;Meta Nodes\u0026rdquo;. This structure allows for effectively distributing and managing time-series data within the system.\n1. Architectural Overview A BanyanDB installation includes three distinct types of nodes: Data Nodes, Meta Nodes, and Liaison Nodes.\n1.1 Data Nodes Data Nodes hold all the raw time series data, metadata, and indexed data. They handle the storage and management of data, including streams and measures, tag keys and values, as well as field keys and values.\nData Nodes also handle the local query execution. When a query is made, it is directed to a Liaison, which then interacts with Data Nodes to execute the distributed query and return results.\nIn addition to persistent raw data, Data Nodes also handle TopN aggregation calculation or other computational tasks.\n1.2 Meta Nodes Meta Nodes is implemented by etcd. They are responsible for maintaining high-level metadata of the cluster, which includes:\n All nodes in the cluster All database schemas  1.3 Liaison Nodes Liaison Nodes serve as gateways, routing traffic to Data Nodes. In addition to routing, they also provide authentication, TTL, and other security services to ensure secure and effective communication without the cluster.\nLiaison Nodes are also responsible for handling computational tasks associated with distributed querying the database. They build query tasks and search for data from Data Nodes.\n1.4 Standalone Mode BanyanDB integrates multiple roles into a single process in the standalone mode, making it simpler and faster to deploy. This mode is especially useful for scenarios with a limited number of data points or for testing and development purposes.\nIn this mode, the single process performs the roles of the Liaison Node, Data Node, and Meta Node. It receives requests, maintains metadata, processes queries, and handles data, all within a unified setup.\n2. Communication within a Cluster All nodes within a BanyanDB cluster communicate with other nodes according to their roles:\n Meta Nodes share high-level metadata about the cluster. Data Nodes store and manage the raw time series data and communicate with Meta Nodes. Liaison Nodes distribute incoming data to the appropriate Data Nodes. They also handle distributed query execution and communicate with Meta Nodes.  Nodes Discovery All nodes in the cluster are discovered by the Meta Nodes. When a node starts up, it registers itself with the Meta Nodes. The Meta Nodes then share this information with the Liaison Nodes which use it to route requests to the appropriate nodes.\nIf data nodes are unable to connect to the meta nodes due to network partition or other issues, they will be removed from the meta nodes. However, the liaison nodes will not remove the data nodes from their routing list until the data nodes are also unreachable from the liaison nodes' perspective. This approach ensures that the system can continue to function even if some data nodes are temporarily unavailable from the meta nodes.\n3. Data Organization Different nodes in BanyanDB are responsible for different parts of the database, while Query and Liaison Nodes manage the routing and processing of queries.\n3.1 Meta Nodes Meta Nodes store all high-level metadata that describes the cluster. This data is kept in an etcd-backed database on disk, including information about the shard allocation of each Data Node. This information is used by the Liaison Nodes to route data to the appropriate Data Nodes, based on the sharding key of the data.\nBy storing shard allocation information, Meta Nodes help ensure that data is routed efficiently and accurately across the cluster. This information is constantly updated as the cluster changes, allowing for dynamic allocation of resources and efficient use of available capacity.\n3.2 Data Nodes Data Nodes store all raw time series data, metadata, and indexed data. On disk, the data is organized by \u0026lt;group\u0026gt;/shard-\u0026lt;shard_id\u0026gt;/\u0026lt;segment_id\u0026gt;/. The segment is designed to support retention policy.\n3.3 Liaison Nodes Liaison Nodes do not store data but manage the routing of incoming requests to the appropriate Query or Data Nodes. They also provide authentication, TTL, and other security services.\nThey also handle the computational tasks associated with data queries, interacting directly with Data Nodes to execute queries and return results.\n4. Determining Optimal Node Counts When creating a BanyanDB cluster, choosing the appropriate number of each node type to configure and connect is crucial. The number of Meta Nodes should always be odd, for instance, “3”. The number of Data Nodes scales based on your storage and query needs. The number of Liaison Nodes depends on the expected query load and routing complexity.\nIf the write and read load is from different sources, it is recommended to separate the Liaison Nodes for write and read. For instance, if the write load is from metrics, trace or log collectors and the read load is from a web application, it is recommended to separate the Liaison Nodes for write and read.\nThis separation allows for more efficient routing of requests and better performance. It also allows for scaling out of the cluster based on the specific needs of each type of request. For instance, if the write load is high, you can scale out the write Liaison Nodes to handle the increased load.\nThe BanyanDB architecture allows for efficient clustering, scaling, and high availability, making it a robust choice for time series data management.\n5. Writes in a Cluster In BanyanDB, writing data in a cluster is designed to take advantage of the robust capabilities of underlying storage systems, such as Google Compute Persistent Disk or Amazon S3(TBD). These platforms ensure high levels of data durability, making them an optimal choice for storing raw time series data.\n5.1 Data Replication Unlike some other systems, BanyanDB does not support application-level replication, which can consume significant disk space. Instead, it delegates the task of replication to these underlying storage systems. This approach simplifies the BanyanDB architecture and reduces the complexity of managing replication at the application level. This approach also results in significant data savings.\nThe comparison between using a storage system and application-level replication boils down to several key factors: reliability, scalability, and complexity.\nReliability: A storage system provides built-in data durability by automatically storing data across multiple systems. It\u0026rsquo;s designed to deliver 99.999999999% durability, ensuring data is reliably stored and available when needed. While replication can increase data availability, it\u0026rsquo;s dependent on the application\u0026rsquo;s implementation. Any bugs or issues in the replication logic can lead to data loss or inconsistencies.\nScalability: A storage system is highly scalable by design and can store and retrieve any amount of data from anywhere. As your data grows, the system grows with you. You don\u0026rsquo;t need to worry about outgrowing your storage capacity. Scaling application-level replication can be challenging. As data grows, so does the need for more disk space and compute resources, potentially leading to increased costs and management complexity.\nComplexity: With the storage system handling replication, the complexity is abstracted away from the user. The user need not concern themselves with the details of how replication is handled. Managing replication at the application level can be complex. It requires careful configuration, monitoring, and potentially significant engineering effort to maintain.\nFuthermore, the storage system might be cheaper. For instance, S3 can be more cost-effective because it eliminates the need for additional resources required for application-level replication. Application-level replication also requires ongoing maintenance, potentially increasing operational costs.\n5.2 Data Sharding Data distribution across the cluster is determined based on the shard_num setting for a group and the specified entity in each resource, be it a stream or measure. The resource’s name with its entity is the sharding key, guiding data distribution to the appropriate Data Node during write operations.\nLiaison Nodes retrieve shard mapping information from Meta Nodes to achieve efficient data routing. This information is used to route data to the appropriate Data Nodes based on the sharding key of the data.\nThis sharding strategy ensures the write load is evenly distributed across the cluster, enhancing write performance and overall system efficiency. BanyanDB uses a hash algorithm for sharding. The hash function maps the sharding key (resource name and entity) to a node in the cluster. Each shard is assigned to the node returned by the hash function.\n5.3 Data Write Path Here\u0026rsquo;s a text-based diagram illustrating the data write path in BanyanDB:\nUser | | API Request (Write) | v ------------------------------------ | Liaison Node | \u0026lt;--- Stateless Node, Routes Request | (Identifies relevant Data Nodes | | and dispatches write request) | ------------------------------------ | v ----------------- ----------------- ----------------- | Data Node 1 | | Data Node 2 | | Data Node 3 | | (Shard 1) | | (Shard 2) | | (Shard 3) | ----------------- ----------------- -----------------  A user makes an API request to the Liaison Node. This request is a write request, containing the data to be written to the database. The Liaison Node, which is stateless, identifies the relevant Data Nodes that will store the data based on the entity specified in the request. The write request is executed across the identified Data Nodes. Each Data Node writes the data to its shard.  This architecture allows BanyanDB to execute write requests efficiently across a distributed system, leveraging the stateless nature and routing/writing capabilities of the Liaison Node, and the distributed storage of Data Nodes.\n6. Queries in a Cluster BanyanDB utilizes a distributed architecture that allows for efficient query processing. When a query is made, it is directed to a Liaison Node.\n6.1 Query Routing Liaison Nodes do not use shard mapping information from Meta Nodes to execute distributed queries. Instead, they access all Data Nodes to retrieve the necessary data for queries. As the query load is lower, it is practical for liaison nodes to access all data nodes for this purpose. It may increase network traffic, but simplifies scaling out of the cluster.\nCompared to the write load, the query load is relatively low. For instance, in a time series database, the write load is typically 100x higher than the query load. This is because the write load is driven by the number of devices sending data to the database, while the query load is driven by the number of users accessing the data.\nThis strategy enables scaling out of the cluster. When the cluster scales out, the liaison node can access all data nodes without any mapping info changes. It eliminates the need to backup previous shard mapping information, reducing complexity of scaling out.\n6.2 Query Execution Parallel execution significantly enhances the efficiency of data retrieval and reduces the overall query processing time. It allows for faster response times as the workload of the query is shared across multiple shards, each working on their part of the problem simultaneously. This feature makes BanyanDB particularly effective for large-scale data analysis tasks.\nIn summary, BanyanDB\u0026rsquo;s approach to querying leverages its unique distributed architecture, enabling high-performance data retrieval across multiple shards in parallel.\n6.3 Query Path User | | API Request (Query) | v ------------------------------------ | Liaison Node | \u0026lt;--- Stateless Node, Distributes Query | (Access all Data nodes to | | execute distributed queries) | ------------------------------------ | | | v v v ----------------- ----------------- ----------------- | Data Node 1 | | Data Node 2 | | Data Node 3 | | (Shard 1) | | (Shard 2) | | (Shard 3) | ----------------- ----------------- -----------------  A user makes an API request to the Liaison Node. This request may be a query for specific data. The Liaison Node builds a distributed query to select all data nodes. The query is executed in parallel across all Data Nodes. Each Data Node execute a local query plan to process the data stored in its shard concurrently with the others. The results from each shard are then returned to the Liaison Node, which consolidates them into a single response to the user.  This architecture allows BanyanDB to execute queries efficiently across a distributed system, leveraging the distributed query capabilities of the Liaison Node and the parallel processing of Data Nodes.\n7. Failover BanyanDB is designed to be highly available and fault-tolerant.\nIn case of a Data Node failure, the system can automatically recover and continue to operate.\nLiaison nodes have a built-in mechanism to detect the failure of a Data Node. When a Data Node fails, the Liaison Node will automatically route requests to other available Data Nodes with the same shard. This ensures that the system remains operational even in the face of node failures. Thanks to the query mode, which allows Liaison Nodes to access all Data Nodes, the system can continue to function even if some Data Nodes are unavailable. When the failed data nodes are restored, the system won\u0026rsquo;t reply data to them since the data is still retrieved from other nodes.\nIn the case of a Liaison Node failure, the system can be configured to have multiple Liaison Nodes for redundancy. If one Liaison Node fails, the other Liaison Nodes can take over its responsibilities, ensuring that the system remains available.\n Please note that any written request which triggers the failover process will be rejected, and the client should re-send the request.\n ","title":"BanyanDB Clustering","url":"/docs/skywalking-banyandb/next/concept/clustering/"},{"content":"BanyanDB Clustering BanyanDB Clustering introduces a robust and scalable architecture that comprises \u0026ldquo;Liaison Nodes\u0026rdquo;, \u0026ldquo;Data Nodes\u0026rdquo;, and \u0026ldquo;Meta Nodes\u0026rdquo;. This structure allows for effectively distributing and managing time-series data within the system.\n1. Architectural Overview A BanyanDB installation includes three distinct types of nodes: Data Nodes, Meta Nodes, and Liaison Nodes.\n1.1 Data Nodes Data Nodes hold all the raw time series data, metadata, and indexed data. They handle the storage and management of data, including streams and measures, tag keys and values, as well as field keys and values.\nData Nodes also handle the local query execution. When a query is made, it is directed to a Liaison, which then interacts with Data Nodes to execute the distributed query and return results.\nIn addition to persistent raw data, Data Nodes also handle TopN aggregation calculation or other computational tasks.\n1.2 Meta Nodes Meta Nodes is implemented by etcd. They are responsible for maintaining high-level metadata of the cluster, which includes:\n All nodes in the cluster All database schemas  1.3 Liaison Nodes Liaison Nodes serve as gateways, routing traffic to Data Nodes. In addition to routing, they also provide authentication, TTL, and other security services to ensure secure and effective communication without the cluster.\nLiaison Nodes are also responsible for handling computational tasks associated with distributed querying the database. They build query tasks and search for data from Data Nodes.\n1.4 Standalone Mode BanyanDB integrates multiple roles into a single process in the standalone mode, making it simpler and faster to deploy. This mode is especially useful for scenarios with a limited number of data points or for testing and development purposes.\nIn this mode, the single process performs the roles of the Liaison Node, Data Node, and Meta Node. It receives requests, maintains metadata, processes queries, and handles data, all within a unified setup.\n2. Communication within a Cluster All nodes within a BanyanDB cluster communicate with other nodes according to their roles:\n Meta Nodes share high-level metadata about the cluster. Data Nodes store and manage the raw time series data and communicate with Meta Nodes. Liaison Nodes distribute incoming data to the appropriate Data Nodes. They also handle distributed query execution and communicate with Meta Nodes.  Nodes Discovery All nodes in the cluster are discovered by the Meta Nodes. When a node starts up, it registers itself with the Meta Nodes. The Meta Nodes then share this information with the Liaison Nodes which use it to route requests to the appropriate nodes.\n3. Data Organization Different nodes in BanyanDB are responsible for different parts of the database, while Query and Liaison Nodes manage the routing and processing of queries.\n3.1 Meta Nodes Meta Nodes store all high-level metadata that describes the cluster. This data is kept in an etcd-backed database on disk, including information about the shard allocation of each Data Node. This information is used by the Liaison Nodes to route data to the appropriate Data Nodes, based on the sharding key of the data.\nBy storing shard allocation information, Meta Nodes help ensure that data is routed efficiently and accurately across the cluster. This information is constantly updated as the cluster changes, allowing for dynamic allocation of resources and efficient use of available capacity.\n3.2 Data Nodes Data Nodes store all raw time series data, metadata, and indexed data. On disk, the data is organized by \u0026lt;group\u0026gt;/shard-\u0026lt;shard_id\u0026gt;/\u0026lt;segment_id\u0026gt;/. The segment is designed to support retention policy.\n3.3 Liaison Nodes Liaison Nodes do not store data but manage the routing of incoming requests to the appropriate Query or Data Nodes. They also provide authentication, TTL, and other security services.\nThey also handle the computational tasks associated with data queries, interacting directly with Data Nodes to execute queries and return results.\n4. Determining Optimal Node Counts When creating a BanyanDB cluster, choosing the appropriate number of each node type to configure and connect is crucial. The number of Meta Nodes should always be odd, for instance, “3”. The number of Data Nodes scales based on your storage and query needs. The number of Liaison Nodes depends on the expected query load and routing complexity.\nIf the write and read load is from different sources, it is recommended to separate the Liaison Nodes for write and read. For instance, if the write load is from metrics, trace or log collectors and the read load is from a web application, it is recommended to separate the Liaison Nodes for write and read.\nThis separation allows for more efficient routing of requests and better performance. It also allows for scaling out of the cluster based on the specific needs of each type of request. For instance, if the write load is high, you can scale out the write Liaison Nodes to handle the increased load.\nThe BanyanDB architecture allows for efficient clustering, scaling, and high availability, making it a robust choice for time series data management.\n5. Writes in a Cluster In BanyanDB, writing data in a cluster is designed to take advantage of the robust capabilities of underlying storage systems, such as Google Compute Persistent Disk or Amazon S3(TBD). These platforms ensure high levels of data durability, making them an optimal choice for storing raw time series data.\n5.1 Data Replication Unlike some other systems, BanyanDB does not support application-level replication, which can consume significant disk space. Instead, it delegates the task of replication to these underlying storage systems. This approach simplifies the BanyanDB architecture and reduces the complexity of managing replication at the application level. This approach also results in significant data savings.\nThe comparison between using a storage system and application-level replication boils down to several key factors: reliability, scalability, and complexity.\nReliability: A storage system provides built-in data durability by automatically storing data across multiple systems. It\u0026rsquo;s designed to deliver 99.999999999% durability, ensuring data is reliably stored and available when needed. While replication can increase data availability, it\u0026rsquo;s dependent on the application\u0026rsquo;s implementation. Any bugs or issues in the replication logic can lead to data loss or inconsistencies.\nScalability: A storage system is highly scalable by design and can store and retrieve any amount of data from anywhere. As your data grows, the system grows with you. You don\u0026rsquo;t need to worry about outgrowing your storage capacity. Scaling application-level replication can be challenging. As data grows, so does the need for more disk space and compute resources, potentially leading to increased costs and management complexity.\nComplexity: With the storage system handling replication, the complexity is abstracted away from the user. The user need not concern themselves with the details of how replication is handled. Managing replication at the application level can be complex. It requires careful configuration, monitoring, and potentially significant engineering effort to maintain.\nFuthermore, the storage system might be cheaper. For instance, S3 can be more cost-effective because it eliminates the need for additional resources required for application-level replication. Application-level replication also requires ongoing maintenance, potentially increasing operational costs.\n5.2 Data Sharding Data distribution across the cluster is determined based on the shard_num setting for a group and the specified entity in each resource, be it a stream or measure. The resource’s name with its entity is the sharding key, guiding data distribution to the appropriate Data Node during write operations.\nLiaison Nodes retrieve shard mapping information from Meta Nodes to achieve efficient data routing. This information is used to route data to the appropriate Data Nodes based on the sharding key of the data.\nThis sharding strategy ensures the write load is evenly distributed across the cluster, enhancing write performance and overall system efficiency. BanyanDB uses a hash algorithm for sharding. The hash function maps the sharding key (resource name and entity) to a node in the cluster. Each shard is assigned to the node returned by the hash function.\n5.3 Data Write Path Here\u0026rsquo;s a text-based diagram illustrating the data write path in BanyanDB:\nUser | | API Request (Write) | v ------------------------------------ | Liaison Node | \u0026lt;--- Stateless Node, Routes Request | (Identifies relevant Data Nodes | | and dispatches write request) | ------------------------------------ | v ----------------- ----------------- ----------------- | Data Node 1 | | Data Node 2 | | Data Node 3 | | (Shard 1) | | (Shard 2) | | (Shard 3) | ----------------- ----------------- -----------------  A user makes an API request to the Liaison Node. This request is a write request, containing the data to be written to the database. The Liaison Node, which is stateless, identifies the relevant Data Nodes that will store the data based on the entity specified in the request. The write request is executed across the identified Data Nodes. Each Data Node writes the data to its shard.  This architecture allows BanyanDB to execute write requests efficiently across a distributed system, leveraging the stateless nature and routing/writing capabilities of the Liaison Node, and the distributed storage of Data Nodes.\n6. Queries in a Cluster BanyanDB utilizes a distributed architecture that allows for efficient query processing. When a query is made, it is directed to a Liaison Node.\n6.1 Query Routing Liaison Nodes do not use shard mapping information from Meta Nodes to execute distributed queries. Instead, they access all Data Nodes to retrieve the necessary data for queries. As the query load is lower, it is practical for liaison nodes to access all data nodes for this purpose. It may increase network traffic, but simplifies scaling out of the cluster.\nCompared to the write load, the query load is relatively low. For instance, in a time series database, the write load is typically 100x higher than the query load. This is because the write load is driven by the number of devices sending data to the database, while the query load is driven by the number of users accessing the data.\nThis strategy enables scaling out of the cluster. When the cluster scales out, the liaison node can access all data nodes without any mapping info changes. It eliminates the need to backup previous shard mapping information, reducing complexity of scaling out.\n6.2 Query Execution Parallel execution significantly enhances the efficiency of data retrieval and reduces the overall query processing time. It allows for faster response times as the workload of the query is shared across multiple shards, each working on their part of the problem simultaneously. This feature makes BanyanDB particularly effective for large-scale data analysis tasks.\nIn summary, BanyanDB\u0026rsquo;s approach to querying leverages its unique distributed architecture, enabling high-performance data retrieval across multiple shards in parallel.\n6.3 Query Path User | | API Request (Query) | v ------------------------------------ | Liaison Node | \u0026lt;--- Stateless Node, Distributes Query | (Access all Data nodes to | | execute distributed queries) | ------------------------------------ | | | v v v ----------------- ----------------- ----------------- | Data Node 1 | | Data Node 2 | | Data Node 3 | | (Shard 1) | | (Shard 2) | | (Shard 3) | ----------------- ----------------- -----------------  A user makes an API request to the Liaison Node. This request may be a query for specific data. The Liaison Node builds a distributed query to select all data nodes. The query is executed in parallel across all Data Nodes. Each Data Node execute a local query plan to process the data stored in its shard concurrently with the others. The results from each shard are then returned to the Liaison Node, which consolidates them into a single response to the user.  This architecture allows BanyanDB to execute queries efficiently across a distributed system, leveraging the distributed query capabilities of the Liaison Node and the parallel processing of Data Nodes.\n","title":"BanyanDB Clustering","url":"/docs/skywalking-banyandb/v0.5.0/concept/clustering/"},{"content":"BookKeeper monitoring SkyWalking leverages OpenTelemetry Collector to collect metrics data from the BookKeeper and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. Kafka entity as a Service in OAP and on the `Layer: BOOKKEEPER.\nData flow  BookKeeper exposes metrics through Prometheus endpoint. OpenTelemetry Collector fetches metrics from BookKeeper cluster via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.`  Setup  Set up BookKeeper Cluster. Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  BookKeeper Monitoring Bookkeeper monitoring provides multidimensional metrics monitoring of BookKeeper cluster as Layer: BOOKKEEPER Service in the OAP. In each cluster, the nodes are represented as Instance.\nBookKeeper Cluster Supported Metrics    Monitoring Panel Metric Name Description Data Source     Bookie Ledgers Count meter_bookkeeper_bookie_ledgers_count The number of the bookie ledgers. Bookkeeper Cluster   Bookie Ledger Writable Dirs meter_bookkeeper_bookie_ledger_writable_dirs The number of writable directories in the bookie. Bookkeeper Cluster   Bookie Ledger Dir Usage meter_bookkeeper_bookie_ledger_dir_data_bookkeeper_ledgers_usage The number of successfully created connections. Bookkeeper Cluster   Bookie Entries Count meter_bookkeeper_bookie_entries_count The number of the bookie write entries. Bookkeeper Cluster   Bookie Write Cache Size meter_bookkeeper_bookie_write_cache_size The size of the bookie write cache (MB). Bookkeeper Cluster   Bookie Write Cache Entry Count meter_bookkeeper_bookie_write_cache_count The entry count in the bookie write cache. Bookkeeper Cluster   Bookie Read Cache Size meter_bookkeeper_bookie_read_cache_size The size of the bookie read cache (MB). Bookkeeper Cluster   Bookie Read Cache Entry Count meter_bookkeeper_bookie_read_cache_count The entry count in the bookie read cache. Bookkeeper Cluster   Bookie Read Rate meter_bookkeeper_bookie_read_rate The bookie read rate (bytes/s). Bookkeeper Cluster   Bookie Write Rate meter_bookkeeper_bookie_write_rate The bookie write rate (bytes/s). Bookkeeper Cluster    BookKeeper Node Supported Metrics    Monitoring Panel Metric Name Description Data Source     JVM Memory Pool Used meter_bookkeeper_node_jvm_memory_pool_used The usage of the broker jvm memory pool. Bookkeeper Bookie   JVM Memory meter_bookkeeper_node_jvm_memory_used meter_bookkeeper_node_jvm_memory_committed meter_bookkeeper_node_jvm_memory_init The usage of the broker jvm memory. Bookkeeper Bookie   JVM Threads meter_bookkeeper_node_jvm_threads_current meter_bookkeeper_node_jvm_threads_daemon meter_bookkeeper_node_jvm_threads_peak meter_bookkeeper_node_jvm_threads_deadlocked The count of the jvm threads. Bookkeeper Bookie   GC Time meter_bookkeeper_node_jvm_gc_collection_seconds_sum Time spent in a given JVM garbage collector in seconds. Bookkeeper Bookie   GC Count meter_bookkeeper_node_jvm_gc_collection_seconds_count The count of a given JVM garbage. Bookkeeper Bookie   Thread Executor Completed meter_bookkeeper_node_thread_executor_completed The count of the executor thread. Bookkeeper Bookie   Thread Executor Tasks meter_bookkeeper_node_thread_executor_tasks_completed meter_bookkeeper_node_thread_executor_tasks_rejected meter_bookkeeper_node_thread_executor_tasks_failed The count of the executor tasks. Bookkeeper Bookie   Pooled Threads meter_bookkeeper_node_high_priority_threads meter_bookkeeper_node_read_thread_pool_threads The count of the pooled thread. Bookkeeper Bookie   Pooled Threads Max Queue Size meter_bookkeeper_node_high_priority_thread_max_queue_size meter_bookkeeper_node_read_thread_pool_max_queue_size The count of the pooled threads max queue size. Bookkeeper Bookie    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in otel-rules/bookkeeper/bookkeeper-cluster.yaml, otel-rules/bookkeeper/bookkeeper-node.yaml. The RabbitMQ dashboard panel configurations are found in /config/ui-initialized-templates/bookkeeper.\n","title":"BookKeeper monitoring","url":"/docs/main/latest/en/setup/backend/backend-bookkeeper-monitoring/"},{"content":"BookKeeper monitoring SkyWalking leverages OpenTelemetry Collector to collect metrics data from the BookKeeper and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. Kafka entity as a Service in OAP and on the `Layer: BOOKKEEPER.\nData flow  BookKeeper exposes metrics through Prometheus endpoint. OpenTelemetry Collector fetches metrics from BookKeeper cluster via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.`  Setup  Set up BookKeeper Cluster. Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  BookKeeper Monitoring Bookkeeper monitoring provides multidimensional metrics monitoring of BookKeeper cluster as Layer: BOOKKEEPER Service in the OAP. In each cluster, the nodes are represented as Instance.\nBookKeeper Cluster Supported Metrics    Monitoring Panel Metric Name Description Data Source     Bookie Ledgers Count meter_bookkeeper_bookie_ledgers_count The number of the bookie ledgers. Bookkeeper Cluster   Bookie Ledger Writable Dirs meter_bookkeeper_bookie_ledger_writable_dirs The number of writable directories in the bookie. Bookkeeper Cluster   Bookie Ledger Dir Usage meter_bookkeeper_bookie_ledger_dir_data_bookkeeper_ledgers_usage The number of successfully created connections. Bookkeeper Cluster   Bookie Entries Count meter_bookkeeper_bookie_entries_count The number of the bookie write entries. Bookkeeper Cluster   Bookie Write Cache Size meter_bookkeeper_bookie_write_cache_size The size of the bookie write cache (MB). Bookkeeper Cluster   Bookie Write Cache Entry Count meter_bookkeeper_bookie_write_cache_count The entry count in the bookie write cache. Bookkeeper Cluster   Bookie Read Cache Size meter_bookkeeper_bookie_read_cache_size The size of the bookie read cache (MB). Bookkeeper Cluster   Bookie Read Cache Entry Count meter_bookkeeper_bookie_read_cache_count The entry count in the bookie read cache. Bookkeeper Cluster   Bookie Read Rate meter_bookkeeper_bookie_read_rate The bookie read rate (bytes/s). Bookkeeper Cluster   Bookie Write Rate meter_bookkeeper_bookie_write_rate The bookie write rate (bytes/s). Bookkeeper Cluster    BookKeeper Node Supported Metrics    Monitoring Panel Metric Name Description Data Source     JVM Memory Pool Used meter_bookkeeper_node_jvm_memory_pool_used The usage of the broker jvm memory pool. Bookkeeper Bookie   JVM Memory meter_bookkeeper_node_jvm_memory_used meter_bookkeeper_node_jvm_memory_committed meter_bookkeeper_node_jvm_memory_init The usage of the broker jvm memory. Bookkeeper Bookie   JVM Threads meter_bookkeeper_node_jvm_threads_current meter_bookkeeper_node_jvm_threads_daemon meter_bookkeeper_node_jvm_threads_peak meter_bookkeeper_node_jvm_threads_deadlocked The count of the jvm threads. Bookkeeper Bookie   GC Time meter_bookkeeper_node_jvm_gc_collection_seconds_sum Time spent in a given JVM garbage collector in seconds. Bookkeeper Bookie   GC Count meter_bookkeeper_node_jvm_gc_collection_seconds_count The count of a given JVM garbage. Bookkeeper Bookie   Thread Executor Completed meter_bookkeeper_node_thread_executor_completed The count of the executor thread. Bookkeeper Bookie   Thread Executor Tasks meter_bookkeeper_node_thread_executor_tasks_completed meter_bookkeeper_node_thread_executor_tasks_rejected meter_bookkeeper_node_thread_executor_tasks_failed The count of the executor tasks. Bookkeeper Bookie   Pooled Threads meter_bookkeeper_node_high_priority_threads meter_bookkeeper_node_read_thread_pool_threads The count of the pooled thread. Bookkeeper Bookie   Pooled Threads Max Queue Size meter_bookkeeper_node_high_priority_thread_max_queue_size meter_bookkeeper_node_read_thread_pool_max_queue_size The count of the pooled threads max queue size. Bookkeeper Bookie    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in otel-rules/bookkeeper/bookkeeper-cluster.yaml, otel-rules/bookkeeper/bookkeeper-node.yaml. The Bookkeeper dashboard panel configurations are found in /config/ui-initialized-templates/bookkeeper.\n","title":"BookKeeper monitoring","url":"/docs/main/next/en/setup/backend/backend-bookkeeper-monitoring/"},{"content":"BookKeeper monitoring SkyWalking leverages OpenTelemetry Collector to collect metrics data from the BookKeeper and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. Kafka entity as a Service in OAP and on the `Layer: BOOKKEEPER.\nData flow  BookKeeper exposes metrics through Prometheus endpoint. OpenTelemetry Collector fetches metrics from BookKeeper cluster via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.`  Setup  Set up BookKeeper Cluster. Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  BookKeeper Monitoring Bookkeeper monitoring provides multidimensional metrics monitoring of BookKeeper cluster as Layer: BOOKKEEPER Service in the OAP. In each cluster, the nodes are represented as Instance.\nBookKeeper Cluster Supported Metrics    Monitoring Panel Metric Name Description Data Source     Bookie Ledgers Count meter_bookkeeper_bookie_ledgers_count The number of the bookie ledgers. Bookkeeper Cluster   Bookie Ledger Writable Dirs meter_bookkeeper_bookie_ledger_writable_dirs The number of writable directories in the bookie. Bookkeeper Cluster   Bookie Ledger Dir Usage meter_bookkeeper_bookie_ledger_dir_data_bookkeeper_ledgers_usage The number of successfully created connections. Bookkeeper Cluster   Bookie Entries Count meter_bookkeeper_bookie_entries_count The number of the bookie write entries. Bookkeeper Cluster   Bookie Write Cache Size meter_bookkeeper_bookie_write_cache_size The size of the bookie write cache (MB). Bookkeeper Cluster   Bookie Write Cache Entry Count meter_bookkeeper_bookie_write_cache_count The entry count in the bookie write cache. Bookkeeper Cluster   Bookie Read Cache Size meter_bookkeeper_bookie_read_cache_size The size of the bookie read cache (MB). Bookkeeper Cluster   Bookie Read Cache Entry Count meter_bookkeeper_bookie_read_cache_count The entry count in the bookie read cache. Bookkeeper Cluster   Bookie Read Rate meter_bookkeeper_bookie_read_rate The bookie read rate (bytes/s). Bookkeeper Cluster   Bookie Write Rate meter_bookkeeper_bookie_write_rate The bookie write rate (bytes/s). Bookkeeper Cluster    BookKeeper Node Supported Metrics    Monitoring Panel Metric Name Description Data Source     JVM Memory Pool Used meter_bookkeeper_node_jvm_memory_pool_used The usage of the broker jvm memory pool. Bookkeeper Bookie   JVM Memory meter_bookkeeper_node_jvm_memory_used meter_bookkeeper_node_jvm_memory_committed meter_bookkeeper_node_jvm_memory_init The usage of the broker jvm memory. Bookkeeper Bookie   JVM Threads meter_bookkeeper_node_jvm_threads_current meter_bookkeeper_node_jvm_threads_daemon meter_bookkeeper_node_jvm_threads_peak meter_bookkeeper_node_jvm_threads_deadlocked The count of the jvm threads. Bookkeeper Bookie   GC Time meter_bookkeeper_node_jvm_gc_collection_seconds_sum Time spent in a given JVM garbage collector in seconds. Bookkeeper Bookie   GC Count meter_bookkeeper_node_jvm_gc_collection_seconds_count The count of a given JVM garbage. Bookkeeper Bookie   Thread Executor Completed meter_bookkeeper_node_thread_executor_completed The count of the executor thread. Bookkeeper Bookie   Thread Executor Tasks meter_bookkeeper_node_thread_executor_tasks_completed meter_bookkeeper_node_thread_executor_tasks_rejected meter_bookkeeper_node_thread_executor_tasks_failed The count of the executor tasks. Bookkeeper Bookie   Pooled Threads meter_bookkeeper_node_high_priority_threads meter_bookkeeper_node_read_thread_pool_threads The count of the pooled thread. Bookkeeper Bookie   Pooled Threads Max Queue Size meter_bookkeeper_node_high_priority_thread_max_queue_size meter_bookkeeper_node_read_thread_pool_max_queue_size The count of the pooled threads max queue size. Bookkeeper Bookie    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in otel-rules/bookkeeper/bookkeeper-cluster.yaml, otel-rules/bookkeeper/bookkeeper-node.yaml. The RabbitMQ dashboard panel configurations are found in /config/ui-initialized-templates/bookkeeper.\n","title":"BookKeeper monitoring","url":"/docs/main/v9.7.0/en/setup/backend/backend-bookkeeper-monitoring/"},{"content":"Bootstrap class plugins All bootstrap plugins are optional, due to unexpected risk. Bootstrap plugins are provided in bootstrap-plugins folder. For using these plugins, you need to put the target plugin jar file into /plugins.\nNow, we have the following known bootstrap plugins.\n Plugin of JDK HttpURLConnection. Agent is compatible with JDK 1.8+ Plugin of JDK Callable and Runnable. Agent is compatible with JDK 1.8+ Plugin of JDK ThreadPoolExecutor. Agent is compatible with JDK 1.8+ Plugin of JDK ForkJoinPool. Agent is compatible with JDK 1.8+  HttpURLConnection Plugin Notice The plugin of JDK HttpURLConnection depended on sun.net.*. When using Java 9+, You should add some JVM options as follows:\n   Java version JVM option     9-15 Nothing to do. Because --illegal-access default model is permitted.   16 Add --add-exports java.base/sun.net.www=ALL-UNNAMED or --illegal-access=permit   17+ Add --add-exports java.base/sun.net.www=ALL-UNNAMED    For more information\n JEP 403: Strongly Encapsulate JDK Internals A peek into Java 17: Encapsulating the Java runtime internals  ","title":"Bootstrap class plugins","url":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/bootstrap-plugins/"},{"content":"Bootstrap class plugins All bootstrap plugins are optional, due to unexpected risk. Bootstrap plugins are provided in bootstrap-plugins folder. For using these plugins, you need to put the target plugin jar file into /plugins.\nNow, we have the following known bootstrap plugins.\n Plugin of JDK HttpURLConnection. Agent is compatible with JDK 1.8+ Plugin of JDK Callable and Runnable. Agent is compatible with JDK 1.8+ Plugin of JDK ThreadPoolExecutor. Agent is compatible with JDK 1.8+ Plugin of JDK ForkJoinPool. Agent is compatible with JDK 1.8+  HttpURLConnection Plugin Notice The plugin of JDK HttpURLConnection depended on sun.net.*. When using Java 9+, You should add some JVM options as follows:\n   Java version JVM option     9-15 Nothing to do. Because --illegal-access default model is permitted.   16 Add --add-exports java.base/sun.net.www=ALL-UNNAMED or --illegal-access=permit   17+ Add --add-exports java.base/sun.net.www=ALL-UNNAMED    For more information\n JEP 403: Strongly Encapsulate JDK Internals A peek into Java 17: Encapsulating the Java runtime internals  ","title":"Bootstrap class plugins","url":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/bootstrap-plugins/"},{"content":"Bootstrap class plugins All bootstrap plugins are optional, due to unexpected risk. Bootstrap plugins are provided in bootstrap-plugins folder. For using these plugins, you need to put the target plugin jar file into /plugins.\nNow, we have the following known bootstrap plugins.\n Plugin of JDK HttpURLConnection. Agent is compatible with JDK 1.8+ Plugin of JDK Callable and Runnable. Agent is compatible with JDK 1.8+ Plugin of JDK ThreadPoolExecutor. Agent is compatible with JDK 1.8+ Plugin of JDK ForkJoinPool. Agent is compatible with JDK 1.8+  HttpURLConnection Plugin Notice The plugin of JDK HttpURLConnection depended on sun.net.*. When using Java 9+, You should add some JVM options as follows:\n   Java version JVM option     9-15 Nothing to do. Because --illegal-access default model is permitted.   16 Add --add-exports java.base/sun.net.www=ALL-UNNAMED or --illegal-access=permit   17+ Add --add-exports java.base/sun.net.www=ALL-UNNAMED    For more information\n JEP 403: Strongly Encapsulate JDK Internals A peek into Java 17: Encapsulating the Java runtime internals  ","title":"Bootstrap class plugins","url":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/bootstrap-plugins/"},{"content":"Bootstrap class plugins All bootstrap plugins are optional, due to unexpected risk. Bootstrap plugins are provided in bootstrap-plugins folder. For using these plugins, you need to put the target plugin jar file into /plugins.\nNow, we have the following known bootstrap plugins.\n Plugin of JDK HttpURLConnection. Agent is compatible with JDK 1.8+ Plugin of JDK Callable and Runnable. Agent is compatible with JDK 1.8+ Plugin of JDK ThreadPoolExecutor. Agent is compatible with JDK 1.8+ Plugin of JDK ForkJoinPool. Agent is compatible with JDK 1.8+  HttpURLConnection Plugin Notice The plugin of JDK HttpURLConnection depended on sun.net.*. When using Java 9+, You should add some JVM options as follows:\n   Java version JVM option     9-15 Nothing to do. Because --illegal-access default model is permitted.   16 Add --add-exports java.base/sun.net.www=ALL-UNNAMED or --illegal-access=permit   17+ Add --add-exports java.base/sun.net.www=ALL-UNNAMED    For more information\n JEP 403: Strongly Encapsulate JDK Internals A peek into Java 17: Encapsulating the Java runtime internals  ","title":"Bootstrap class plugins","url":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/bootstrap-plugins/"},{"content":"Bootstrap class plugins All bootstrap plugins are optional, due to unexpected risk. Bootstrap plugins are provided in bootstrap-plugins folder. For using these plugins, you need to put the target plugin jar file into /plugins.\nNow, we have the following known bootstrap plugins.\n Plugin of JDK HttpURLConnection. Agent is compatible with JDK 1.8+ Plugin of JDK Callable and Runnable. Agent is compatible with JDK 1.8+ Plugin of JDK ThreadPoolExecutor. Agent is compatible with JDK 1.8+ Plugin of JDK ForkJoinPool. Agent is compatible with JDK 1.8+  HttpURLConnection Plugin Notice The plugin of JDK HttpURLConnection depended on sun.net.*. When using Java 9+, You should add some JVM options as follows:\n   Java version JVM option     9-15 Nothing to do. Because --illegal-access default model is permitted.   16 Add --add-exports java.base/sun.net.www=ALL-UNNAMED or --illegal-access=permit   17+ Add --add-exports java.base/sun.net.www=ALL-UNNAMED    For more information\n JEP 403: Strongly Encapsulate JDK Internals A peek into Java 17: Encapsulating the Java runtime internals  ","title":"Bootstrap class plugins","url":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/bootstrap-plugins/"},{"content":"Browser Monitoring Apache SkyWalking Client JS is a client-side JavaScript exception and tracing library.\nIt has these features:\n Provides metrics and error collection to SkyWalking backend. Lightweight. A simple JavaScript library. No browser plugin is required. Browser serves as a starting point for the entire distributed tracing system.  See Client JS official doc for more information.\nNote: Make sure receiver-browser is enabled. It is ON by default since version 8.2.0.\nreceiver-browser:selector:${SW_RECEIVER_BROWSER:default} // This means activated.default:# The sample rate precision is 1/10000. 10000 means 100% sample in default.sampleRate:${SW_RECEIVER_BROWSER_SAMPLE_RATE:10000}","title":"Browser Monitoring","url":"/docs/main/latest/en/setup/service-agent/browser-agent/"},{"content":"Browser Monitoring Apache SkyWalking Client JS is a client-side JavaScript exception and tracing library.\nIt has these features:\n Provides metrics and error collection to SkyWalking backend. Lightweight. A simple JavaScript library. No browser plugin is required. Browser serves as a starting point for the entire distributed tracing system.  See Client JS official doc for more information.\nNote: Make sure receiver-browser is enabled. It is ON by default since version 8.2.0.\nreceiver-browser:selector:${SW_RECEIVER_BROWSER:default} // This means activated.default:# The sample rate precision is 1/10000. 10000 means 100% sample in default.sampleRate:${SW_RECEIVER_BROWSER_SAMPLE_RATE:10000}","title":"Browser Monitoring","url":"/docs/main/next/en/setup/service-agent/browser-agent/"},{"content":"Browser Monitoring Apache SkyWalking Client JS is a client-side JavaScript exception and tracing library.\nIt has these features:\n Provides metrics and error collection to SkyWalking backend. Lightweight. No browser plugin required. A simple JavaScript library. Browser serves as a starting point for the entire distributed tracing system.  See Client JS official doc for more information.\nNote: Make sure receiver-browser is enabled. It is ON by default since version 8.2.0.\nreceiver-browser:selector:${SW_RECEIVER_BROWSER:default} // This means activated.default:# The sample rate precision is 1/10000. 10000 means 100% sample in default.sampleRate:${SW_RECEIVER_BROWSER_SAMPLE_RATE:10000}","title":"Browser Monitoring","url":"/docs/main/v9.0.0/en/setup/service-agent/browser-agent/"},{"content":"Browser Monitoring Apache SkyWalking Client JS is a client-side JavaScript exception and tracing library.\nIt has these features:\n Provides metrics and error collection to SkyWalking backend. Lightweight. A simple JavaScript library. No browser plugin is required. Browser serves as a starting point for the entire distributed tracing system.  See Client JS official doc for more information.\nNote: Make sure receiver-browser is enabled. It is ON by default since version 8.2.0.\nreceiver-browser:selector:${SW_RECEIVER_BROWSER:default} // This means activated.default:# The sample rate precision is 1/10000. 10000 means 100% sample in default.sampleRate:${SW_RECEIVER_BROWSER_SAMPLE_RATE:10000}","title":"Browser Monitoring","url":"/docs/main/v9.1.0/en/setup/service-agent/browser-agent/"},{"content":"Browser Monitoring Apache SkyWalking Client JS is a client-side JavaScript exception and tracing library.\nIt has these features:\n Provides metrics and error collection to SkyWalking backend. Lightweight. A simple JavaScript library. No browser plugin is required. Browser serves as a starting point for the entire distributed tracing system.  See Client JS official doc for more information.\nNote: Make sure receiver-browser is enabled. It is ON by default since version 8.2.0.\nreceiver-browser:selector:${SW_RECEIVER_BROWSER:default} // This means activated.default:# The sample rate precision is 1/10000. 10000 means 100% sample in default.sampleRate:${SW_RECEIVER_BROWSER_SAMPLE_RATE:10000}","title":"Browser Monitoring","url":"/docs/main/v9.2.0/en/setup/service-agent/browser-agent/"},{"content":"Browser Monitoring Apache SkyWalking Client JS is a client-side JavaScript exception and tracing library.\nIt has these features:\n Provides metrics and error collection to SkyWalking backend. Lightweight. A simple JavaScript library. No browser plugin is required. Browser serves as a starting point for the entire distributed tracing system.  See Client JS official doc for more information.\nNote: Make sure receiver-browser is enabled. It is ON by default since version 8.2.0.\nreceiver-browser:selector:${SW_RECEIVER_BROWSER:default} // This means activated.default:# The sample rate precision is 1/10000. 10000 means 100% sample in default.sampleRate:${SW_RECEIVER_BROWSER_SAMPLE_RATE:10000}","title":"Browser Monitoring","url":"/docs/main/v9.3.0/en/setup/service-agent/browser-agent/"},{"content":"Browser Monitoring Apache SkyWalking Client JS is a client-side JavaScript exception and tracing library.\nIt has these features:\n Provides metrics and error collection to SkyWalking backend. Lightweight. A simple JavaScript library. No browser plugin is required. Browser serves as a starting point for the entire distributed tracing system.  See Client JS official doc for more information.\nNote: Make sure receiver-browser is enabled. It is ON by default since version 8.2.0.\nreceiver-browser:selector:${SW_RECEIVER_BROWSER:default} // This means activated.default:# The sample rate precision is 1/10000. 10000 means 100% sample in default.sampleRate:${SW_RECEIVER_BROWSER_SAMPLE_RATE:10000}","title":"Browser Monitoring","url":"/docs/main/v9.4.0/en/setup/service-agent/browser-agent/"},{"content":"Browser Monitoring Apache SkyWalking Client JS is a client-side JavaScript exception and tracing library.\nIt has these features:\n Provides metrics and error collection to SkyWalking backend. Lightweight. A simple JavaScript library. No browser plugin is required. Browser serves as a starting point for the entire distributed tracing system.  See Client JS official doc for more information.\nNote: Make sure receiver-browser is enabled. It is ON by default since version 8.2.0.\nreceiver-browser:selector:${SW_RECEIVER_BROWSER:default} // This means activated.default:# The sample rate precision is 1/10000. 10000 means 100% sample in default.sampleRate:${SW_RECEIVER_BROWSER_SAMPLE_RATE:10000}","title":"Browser Monitoring","url":"/docs/main/v9.5.0/en/setup/service-agent/browser-agent/"},{"content":"Browser Monitoring Apache SkyWalking Client JS is a client-side JavaScript exception and tracing library.\nIt has these features:\n Provides metrics and error collection to SkyWalking backend. Lightweight. A simple JavaScript library. No browser plugin is required. Browser serves as a starting point for the entire distributed tracing system.  See Client JS official doc for more information.\nNote: Make sure receiver-browser is enabled. It is ON by default since version 8.2.0.\nreceiver-browser:selector:${SW_RECEIVER_BROWSER:default} // This means activated.default:# The sample rate precision is 1/10000. 10000 means 100% sample in default.sampleRate:${SW_RECEIVER_BROWSER_SAMPLE_RATE:10000}","title":"Browser Monitoring","url":"/docs/main/v9.6.0/en/setup/service-agent/browser-agent/"},{"content":"Browser Monitoring Apache SkyWalking Client JS is a client-side JavaScript exception and tracing library.\nIt has these features:\n Provides metrics and error collection to SkyWalking backend. Lightweight. A simple JavaScript library. No browser plugin is required. Browser serves as a starting point for the entire distributed tracing system.  See Client JS official doc for more information.\nNote: Make sure receiver-browser is enabled. It is ON by default since version 8.2.0.\nreceiver-browser:selector:${SW_RECEIVER_BROWSER:default} // This means activated.default:# The sample rate precision is 1/10000. 10000 means 100% sample in default.sampleRate:${SW_RECEIVER_BROWSER_SAMPLE_RATE:10000}","title":"Browser Monitoring","url":"/docs/main/v9.7.0/en/setup/service-agent/browser-agent/"},{"content":"Browser Protocol Browser protocol describes the data format between skywalking-client-js and the backend.\nOverview Browser protocol is defined and provided in gRPC format, and also implemented in HTTP 1.1\nSend performance data and error logs You can send performance data and error logs using the following services:\n BrowserPerfService#collectPerfData for performance data format. BrowserPerfService#collectErrorLogs for error log format.  For error log format, note that:\n BrowserErrorLog#uniqueId should be unique in all distributed environments.  ","title":"Browser Protocol","url":"/docs/main/latest/en/api/browser-protocol/"},{"content":"Browser Protocol Browser protocol describes the data format between skywalking-client-js and the backend.\nOverview Browser protocol is defined and provided in gRPC format, and also implemented in HTTP 1.1\nSend performance data and error logs You can send performance data and error logs using the following services:\n BrowserPerfService#collectPerfData for performance data format. BrowserPerfService#collectErrorLogs for error log format.  For error log format, note that:\n BrowserErrorLog#uniqueId should be unique in all distributed environments.  ","title":"Browser Protocol","url":"/docs/main/next/en/api/browser-protocol/"},{"content":"Browser Protocol Browser protocol describes the data format between skywalking-client-js and the backend.\nOverview Browser protocol is defined and provided in gRPC format, and also implemented in HTTP 1.1\nSend performance data and error logs You can send performance data and error logs using the following services:\n BrowserPerfService#collectPerfData for performance data format. BrowserPerfService#collectErrorLogs for error log format.  For error log format, note that:\n BrowserErrorLog#uniqueId should be unique in all distributed environments.  ","title":"Browser Protocol","url":"/docs/main/v9.0.0/en/protocols/browser-protocol/"},{"content":"Browser Protocol Browser protocol describes the data format between skywalking-client-js and the backend.\nOverview Browser protocol is defined and provided in gRPC format, and also implemented in HTTP 1.1\nSend performance data and error logs You can send performance data and error logs using the following services:\n BrowserPerfService#collectPerfData for performance data format. BrowserPerfService#collectErrorLogs for error log format.  For error log format, note that:\n BrowserErrorLog#uniqueId should be unique in all distributed environments.  ","title":"Browser Protocol","url":"/docs/main/v9.1.0/en/protocols/browser-protocol/"},{"content":"Browser Protocol Browser protocol describes the data format between skywalking-client-js and the backend.\nOverview Browser protocol is defined and provided in gRPC format, and also implemented in HTTP 1.1\nSend performance data and error logs You can send performance data and error logs using the following services:\n BrowserPerfService#collectPerfData for performance data format. BrowserPerfService#collectErrorLogs for error log format.  For error log format, note that:\n BrowserErrorLog#uniqueId should be unique in all distributed environments.  ","title":"Browser Protocol","url":"/docs/main/v9.2.0/en/protocols/browser-protocol/"},{"content":"Browser Protocol Browser protocol describes the data format between skywalking-client-js and the backend.\nOverview Browser protocol is defined and provided in gRPC format, and also implemented in HTTP 1.1\nSend performance data and error logs You can send performance data and error logs using the following services:\n BrowserPerfService#collectPerfData for performance data format. BrowserPerfService#collectErrorLogs for error log format.  For error log format, note that:\n BrowserErrorLog#uniqueId should be unique in all distributed environments.  ","title":"Browser Protocol","url":"/docs/main/v9.3.0/en/protocols/browser-protocol/"},{"content":"Browser Protocol Browser protocol describes the data format between skywalking-client-js and the backend.\nOverview Browser protocol is defined and provided in gRPC format, and also implemented in HTTP 1.1\nSend performance data and error logs You can send performance data and error logs using the following services:\n BrowserPerfService#collectPerfData for performance data format. BrowserPerfService#collectErrorLogs for error log format.  For error log format, note that:\n BrowserErrorLog#uniqueId should be unique in all distributed environments.  ","title":"Browser Protocol","url":"/docs/main/v9.4.0/en/api/browser-protocol/"},{"content":"Browser Protocol Browser protocol describes the data format between skywalking-client-js and the backend.\nOverview Browser protocol is defined and provided in gRPC format, and also implemented in HTTP 1.1\nSend performance data and error logs You can send performance data and error logs using the following services:\n BrowserPerfService#collectPerfData for performance data format. BrowserPerfService#collectErrorLogs for error log format.  For error log format, note that:\n BrowserErrorLog#uniqueId should be unique in all distributed environments.  ","title":"Browser Protocol","url":"/docs/main/v9.5.0/en/api/browser-protocol/"},{"content":"Browser Protocol Browser protocol describes the data format between skywalking-client-js and the backend.\nOverview Browser protocol is defined and provided in gRPC format, and also implemented in HTTP 1.1\nSend performance data and error logs You can send performance data and error logs using the following services:\n BrowserPerfService#collectPerfData for performance data format. BrowserPerfService#collectErrorLogs for error log format.  For error log format, note that:\n BrowserErrorLog#uniqueId should be unique in all distributed environments.  ","title":"Browser Protocol","url":"/docs/main/v9.6.0/en/api/browser-protocol/"},{"content":"Browser Protocol Browser protocol describes the data format between skywalking-client-js and the backend.\nOverview Browser protocol is defined and provided in gRPC format, and also implemented in HTTP 1.1\nSend performance data and error logs You can send performance data and error logs using the following services:\n BrowserPerfService#collectPerfData for performance data format. BrowserPerfService#collectErrorLogs for error log format.  For error log format, note that:\n BrowserErrorLog#uniqueId should be unique in all distributed environments.  ","title":"Browser Protocol","url":"/docs/main/v9.7.0/en/api/browser-protocol/"},{"content":"Build and use the Agent from source codes When you want to build and use the Agent from source code, please follow these steps.\nInstall SkyWalking Go Use go get to import the skywalking-go program.\n// latest or any commit ID go get github.com/apache/skywalking-go@latest Also, import the module to your main package:\nimport _ \u0026#34;github.com/apache/skywalking-go\u0026#34; Build the Agent When building the project, you need to clone the project and build it.\n// git clone the same version(tag or commit ID) as your dependency version. git clone https://github.com/apache/skywalking-go.git cd skywalking-go \u0026amp;\u0026amp; make build Next, you would find several versions of the Go Agent program for different systems in the bin directory of the current project. When you need to compile the program, please add the following statement with the agent program which matches your system:\n-toolexec=\u0026#34;/path/to/go-agent\u0026#34; -a  -toolexec is the path to the Golang enhancement program. -a is the parameter for rebuilding all packages forcibly.  If you want to customize the configuration information for the current service, please add the following parameters, read more please refer the settings override documentation):\n-toolexec=\u0026#34;/path/to/go-agent -config /path/to/config.yaml\u0026#34; -a ","title":"Build and use the Agent from source codes","url":"/docs/skywalking-go/latest/en/development-and-contribution/build-and-use-agent/"},{"content":"Build and use the Agent from source codes When you want to build and use the Agent from source code, please follow these steps.\nInstall SkyWalking Go Use go get to import the skywalking-go program.\n// latest or any commit ID go get github.com/apache/skywalking-go@latest Also, import the module to your main package:\nimport _ \u0026#34;github.com/apache/skywalking-go\u0026#34; Build the Agent When building the project, you need to clone the project and build it.\n// git clone the same version(tag or commit ID) as your dependency version. git clone https://github.com/apache/skywalking-go.git cd skywalking-go \u0026amp;\u0026amp; make build Next, you would find several versions of the Go Agent program for different systems in the bin directory of the current project. When you need to compile the program, please add the following statement with the agent program which matches your system:\n-toolexec=\u0026#34;/path/to/go-agent\u0026#34; -a  -toolexec is the path to the Golang enhancement program. -a is the parameter for rebuilding all packages forcibly.  If you want to customize the configuration information for the current service, please add the following parameters, read more please refer the settings override documentation):\n-toolexec=\u0026#34;/path/to/go-agent -config /path/to/config.yaml\u0026#34; -a ","title":"Build and use the Agent from source codes","url":"/docs/skywalking-go/next/en/development-and-contribution/build-and-use-agent/"},{"content":"Build and use the Agent from source codes When you want to build and use the Agent from source code, please follow these steps.\nInstall SkyWalking Go Use go get to import the skywalking-go program.\n// latest or any commit ID go get github.com/apache/skywalking-go@latest Also, import the module to your main package:\nimport _ \u0026#34;github.com/apache/skywalking-go\u0026#34; Build the Agent When building the project, you need to clone the project and build it.\n// git clone the same version(tag or commit ID) as your dependency version. git clone https://github.com/apache/skywalking-go.git cd skywalking-go \u0026amp;\u0026amp; make build Next, you would find several versions of the Go Agent program for different systems in the bin directory of the current project. When you need to compile the program, please add the following statement with the agent program which matches your system:\n-toolexec=\u0026#34;/path/to/go-agent\u0026#34; -a  -toolexec is the path to the Golang enhancement program. -a is the parameter for rebuilding all packages forcibly.  If you want to customize the configuration information for the current service, please add the following parameters, read more please refer the settings override documentation):\n-toolexec=\u0026#34;/path/to/go-agent -config /path/to/config.yaml\u0026#34; -a ","title":"Build and use the Agent from source codes","url":"/docs/skywalking-go/v0.4.0/en/development-and-contribution/build-and-use-agent/"},{"content":"Building This document will help you compile and build the project in golang environment.\nPlatform Linux, macOS, and Windows are supported in SkyWalking Infra E2E.\nCommand git clone https://github.com/apache/skywalking-infra-e2e.git cd skywalking-infra-e2e make build After these commands, the e2e execute file path is bin/$PLATFORM/e2e.\n","title":"Building","url":"/docs/skywalking-infra-e2e/latest/en/contribution/compiling-guidance/"},{"content":"Building This document will help you compile and build the project in golang environment.\nPlatform Linux, macOS, and Windows are supported in SkyWalking Infra E2E.\nCommand git clone https://github.com/apache/skywalking-infra-e2e.git cd skywalking-infra-e2e make build After these commands, the e2e execute file path is bin/$PLATFORM/e2e.\n","title":"Building","url":"/docs/skywalking-infra-e2e/next/en/contribution/compiling-guidance/"},{"content":"Building This document will help you compile and build the project in golang environment.\nPlatform Linux, macOS, and Windows are supported in SkyWalking Infra E2E.\nCommand git clone https://github.com/apache/skywalking-infra-e2e.git cd skywalking-infra-e2e make build After these commands, the e2e execute file path is bin/$PLATFORM/e2e.\n","title":"Building","url":"/docs/skywalking-infra-e2e/v1.3.0/en/contribution/compiling-guidance/"},{"content":"CDS - Configuration Discovery Service CDS - Configuration Discovery Service provides the dynamic configuration for the agent, defined in gRPC.\nConfiguration Format The configuration content includes the service name and their configs. The\nconfigurations://service nameserviceA:// Configurations of service A// Key and Value are determined by the agent side.// Check the agent setup doc for all available configurations.key1:value1key2:value2...serviceB:...Available key(s) and value(s) in Java Agent. Java agent supports the following dynamic configurations.\n   Config Key Value Description Value Format Example Required Plugin(s)     agent.sample_n_per_3_secs The number of sampled traces per 3 seconds -1 -   agent.ignore_suffix If the operation name of the first span is included in this set, this segment should be ignored. Multiple values should be separated by , .txt,.log -   agent.trace.ignore_path The value is the path that you need to ignore, multiple paths should be separated by , more details /your/path/1/**,/your/path/2/** apm-trace-ignore-plugin   agent.span_limit_per_segment The max number of spans per segment. 300 -   plugin.jdbc.trace_sql_parameters If set to true, the parameters of the sql (typically java.sql.PreparedStatement) would be collected. false -     Required plugin(s), the configuration affects only when the required plugins activated.  ","title":"CDS - Configuration Discovery Service","url":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/configuration-discovery/"},{"content":"CDS - Configuration Discovery Service CDS - Configuration Discovery Service provides the dynamic configuration for the agent, defined in gRPC.\nConfiguration Format The configuration content includes the service name and their configs. The\nconfigurations://service nameserviceA:// Configurations of service A// Key and Value are determined by the agent side.// Check the agent setup doc for all available configurations.key1:value1key2:value2...serviceB:...Available key(s) and value(s) in Java Agent. Java agent supports the following dynamic configurations.\n   Config Key Value Description Value Format Example Required Plugin(s)     agent.sample_n_per_3_secs The number of sampled traces per 3 seconds -1 -   agent.ignore_suffix If the operation name of the first span is included in this set, this segment should be ignored. Multiple values should be separated by , .txt,.log -   agent.trace.ignore_path The value is the path that you need to ignore, multiple paths should be separated by , more details /your/path/1/**,/your/path/2/** apm-trace-ignore-plugin   agent.span_limit_per_segment The max number of spans per segment. 300 -   plugin.jdbc.trace_sql_parameters If set to true, the parameters of the sql (typically java.sql.PreparedStatement) would be collected. false -     Required plugin(s), the configuration affects only when the required plugins activated.  ","title":"CDS - Configuration Discovery Service","url":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/configuration-discovery/"},{"content":"CDS - Configuration Discovery Service CDS - Configuration Discovery Service provides the dynamic configuration for the agent, defined in gRPC.\nConfiguration Format The configuration content includes the service name and their configs. The\nconfigurations://service nameserviceA:// Configurations of service A// Key and Value are determined by the agent side.// Check the agent setup doc for all available configurations.key1:value1key2:value2...serviceB:...Available key(s) and value(s) in Java Agent. Java agent supports the following dynamic configurations.\n   Config Key Value Description Value Format Example Required Plugin(s)     agent.sample_n_per_3_secs The number of sampled traces per 3 seconds -1 -   agent.ignore_suffix If the operation name of the first span is included in this set, this segment should be ignored. Multiple values should be separated by , .txt,.log -   agent.trace.ignore_path The value is the path that you need to ignore, multiple paths should be separated by , more details /your/path/1/**,/your/path/2/** apm-trace-ignore-plugin   agent.span_limit_per_segment The max number of spans per segment. 300 -   plugin.jdbc.trace_sql_parameters If set to true, the parameters of the sql (typically java.sql.PreparedStatement) would be collected. false -     Required plugin(s), the configuration affects only when the required plugins activated.  ","title":"CDS - Configuration Discovery Service","url":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/configuration-discovery/"},{"content":"CDS - Configuration Discovery Service CDS - Configuration Discovery Service provides the dynamic configuration for the agent, defined in gRPC.\nConfiguration Format The configuration content includes the service name and their configs. The\nconfigurations://service nameserviceA:// Configurations of service A// Key and Value are determined by the agent side.// Check the agent setup doc for all available configurations.key1:value1key2:value2...serviceB:...Available key(s) and value(s) in Java Agent. Java agent supports the following dynamic configurations.\n   Config Key Value Description Value Format Example Required Plugin(s)     agent.sample_n_per_3_secs The number of sampled traces per 3 seconds -1 -   agent.ignore_suffix If the operation name of the first span is included in this set, this segment should be ignored. Multiple values should be separated by , .txt,.log -   agent.trace.ignore_path The value is the path that you need to ignore, multiple paths should be separated by , more details /your/path/1/**,/your/path/2/** apm-trace-ignore-plugin   agent.span_limit_per_segment The max number of spans per segment. 300 -   plugin.jdbc.trace_sql_parameters If set to true, the parameters of the sql (typically java.sql.PreparedStatement) would be collected. false -     Required plugin(s), the configuration affects only when the required plugins activated.  ","title":"CDS - Configuration Discovery Service","url":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/configuration-discovery/"},{"content":"CDS - Configuration Discovery Service CDS - Configuration Discovery Service provides the dynamic configuration for the agent, defined in gRPC.\nConfiguration Format The configuration content includes the service name and their configs. The\nconfigurations://service nameserviceA:// Configurations of service A// Key and Value are determined by the agent side.// Check the agent setup doc for all available configurations.key1:value1key2:value2...serviceB:...Available key(s) and value(s) in Java Agent. Java agent supports the following dynamic configurations.\n   Config Key Value Description Value Format Example Required Plugin(s)     agent.sample_n_per_3_secs The number of sampled traces per 3 seconds -1 -   agent.ignore_suffix If the operation name of the first span is included in this set, this segment should be ignored. Multiple values should be separated by , .txt,.log -   agent.trace.ignore_path The value is the path that you need to ignore, multiple paths should be separated by , more details /your/path/1/**,/your/path/2/** apm-trace-ignore-plugin   agent.span_limit_per_segment The max number of spans per segment. 300 -   plugin.jdbc.trace_sql_parameters If set to true, the parameters of the sql (typically java.sql.PreparedStatement) would be collected. false -     Required plugin(s), the configuration affects only when the required plugins activated.  ","title":"CDS - Configuration Discovery Service","url":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/configuration-discovery/"},{"content":"ClickHouse monitoring ClickHouse server performance from built-in metrics data SkyWalking leverages ClickHouse built-in metrics data since v20.1.2.4. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  Configure ClickHouse to expose metrics data for scraping from Prometheus. OpenTelemetry Collector fetches metrics from ClickeHouse server through Prometheus endpoint, and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up built-in prometheus endpoint . Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  ClickHouse Monitoring ClickHouse monitoring provides monitoring of the metrics 、events and asynchronous_metrics of the ClickHouse server. ClickHouse cluster is cataloged as a Layer: CLICKHOUSE Service in OAP. Each ClickHouse server is cataloged as an Instance in OAP.\nClickHouse Instance Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     CpuUsage count meter_clickhouse_instance_cpu_usage CPU time spent seen by OS per second(according to ClickHouse.system.dashboard.CPU Usage (cores)). ClickHouse   MemoryUsage percentage meter_clickhouse_instance_memory_usage Total amount of memory (bytes) allocated by the server/ total amount of OS memory. ClickHouse   MemoryAvailable percentage meter_clickhouse_instance_memory_available Total amount of memory (bytes) available for program / total amount of OS memory. ClickHouse   Uptime sec meter_clickhouse_instance_uptime The server uptime in seconds. It includes the time spent for server initialization before accepting connections. ClickHouse   Version string meter_clickhouse_instance_version Version of the server in a single integer number in base-1000. ClickHouse   FileOpen count meter_clickhouse_instance_file_open Number of files opened. ClickHouse    ClickHouse Network Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     TcpConnections count meter_clickhouse_instance_tcp_connectionsmeter_clickhouse_tcp_connections Number of connections to TCP server. ClickHouse   MysqlConnections count meter_clickhouse_instance_mysql_connectionsmeter_clickhouse_mysql_connections Number of client connections using MySQL protocol. ClickHouse   HttpConnections count meter_clickhouse_instance_http_connectionsmeter_clickhouse_mysql_connections Number of connections to HTTP server. ClickHouse   InterserverConnections count meter_clickhouse_instance_interserver_connectionsmeter_clickhouse_interserver_connections Number of connections from other replicas to fetch parts. ClickHouse   PostgresqlConnections count meter_clickhouse_instance_postgresql_connectionsmeter_clickhouse_postgresql_connections Number of client connections using PostgreSQL protocol. ClickHouse   ReceiveBytes bytes meter_clickhouse_instance_network_receive_bytesmeter_clickhouse_network_receive_bytes Total number of bytes received from network. ClickHouse   SendBytes bytes meter_clickhouse_instance_network_send_bytesmeter_clickhouse_network_send_bytes Total number of bytes send to network. ClickHouse    ClickHouse Query Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     QueryCount count meter_clickhouse_instance_querymeter_clickhouse_query Number of executing queries. ClickHouse   SelectQueryCount count meter_clickhouse_instance_query_selectmeter_clickhouse_query_select Number of executing queries, but only for SELECT queries. ClickHouse   InsertQueryCount count meter_clickhouse_instance_query_insertmeter_clickhouse_query_insert Number of executing queries, but only for INSERT queries. ClickHouse   SelectQueryRate count/sec meter_clickhouse_instance_query_select_ratemeter_clickhouse_query_select_rate Number of SELECT queries per second. ClickHouse   InsertQueryRate count/sec meter_clickhouse_instance_query_insert_ratemeter_clickhouse_query_insert_rate Number of INSERT queries per second. ClickHouse   Querytime microsec meter_clickhouse_instance_querytime_microsecondsmeter_clickhouse_querytime_microseconds Total time of all queries. ClickHouse   SelectQuerytime microsec meter_clickhouse_instance_querytime_select_microsecondsmeter_clickhouse_querytime_select_microseconds Total time of SELECT queries. ClickHouse   InsertQuerytime microsec meter_clickhouse_instance_querytime_insert_microsecondsmeter_clickhouse_querytime_insert_microseconds Total time of INSERT queries. ClickHouse   OtherQuerytime microsec meter_clickhouse_instance_querytime_other_microsecondsmeter_clickhouse_querytime_other_microseconds Total time of queries that are not SELECT or INSERT. ClickHouse   QuerySlowCount count meter_clickhouse_instance_query_slowmeter_clickhouse_query_slow Number of reads from a file that were slow. ClickHouse    ClickHouse Insertion Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     InsertQueryCount count meter_clickhouse_instance_query_insertmeter_clickhouse_query_insert Number of executing queries, but only for INSERT queries. ClickHouse   InsertedRowCount count meter_clickhouse_instance_inserted_rowsmeter_clickhouse_inserted_rows Number of rows INSERTed to all tables. ClickHouse   InsertedBytes bytes meter_clickhouse_instance_inserted_bytesmeter_clickhouse_inserted_bytes Number of bytes INSERTed to all tables. ClickHouse   DelayedInsertCount count meter_clickhouse_instance_delayed_insertmeter_clickhouse_delayed_insert Number of times the INSERT of a block to a MergeTree table was throttled due to high number of active data parts for partition. ClickHouse    ClickHouse Replicas Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     ReplicatedChecks count meter_clickhouse_instance_replicated_checksmeter_clickhouse_replicated_checks Number of data parts checking for consistency. ClickHouse   ReplicatedFetch count meter_clickhouse_instance_replicated_fetchmeter_clickhouse_replicated_fetch Number of data parts being fetched from replica. ClickHouse   ReplicatedSend count meter_clickhouse_instance_replicated_sendmeter_clickhouse_replicated_send Number of data parts being sent to replicas. ClickHouse    ClickHouse MergeTree Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     BackgroundMergeCount count meter_clickhouse_instance_background_mergemeter_clickhouse_background_merge Number of executing background merges. ClickHouse   MergeRows count meter_clickhouse_instance_merge_rowsmeter_clickhouse_merge_rows Rows read for background merges. This is the number of rows before merge. ClickHouse   MergeUncompressedBytes bytes meter_clickhouse_instance_merge_uncompressed_bytesmeter_clickhouse_merge_uncompressed_bytes Uncompressed bytes (for columns as they stored in memory) that was read for background merges. This is the number before merge. ClickHouse   MoveCount count meter_clickhouse_instance_movemeter_clickhouse_move Number of currently executing moves. ClickHouse   PartsActive Count meter_clickhouse_instance_parts_activemeter_clickhouse_parts_active Active data part, used by current and upcoming SELECTs. ClickHouse   MutationsCount count meter_clickhouse_instance_mutationsmeter_clickhouse_mutations Number of mutations (ALTER DELETE/UPDATE). ClickHouse    ClickHouse Kafka Table Engine Supported Metrics When table engine works with Apache Kafka.\nKafka lets you:\n Publish or subscribe to data flows. Organize fault-tolerant storage. Process streams as they become available.     Monitoring Panel Unit Metric Name Description Data Source     KafkaMessagesRead count meter_clickhouse_instance_kafka_messages_readmeter_clickhouse_kafka_messages_read Number of Kafka messages already processed by ClickHouse. ClickHouse   KafkaWrites count meter_clickhouse_instance_kafka_writesmeter_clickhouse_kafka_writes Number of writes (inserts) to Kafka tables. ClickHouse   KafkaConsumers count meter_clickhouse_instance_kafka_consumersmeter_clickhouse_kafka_consumers Number of active Kafka consumers. ClickHouse   KafkProducers count meter_clickhouse_instance_kafka_producersmeter_clickhouse_kafka_producers Number of active Kafka producer created. ClickHouse    ClickHouse ZooKeeper Supported Metrics ClickHouse uses ZooKeeper for storing metadata of replicas when using replicated tables. If replicated tables are not used, this section of parameters can be omitted.\n   Monitoring Panel Unit Metric Name Description Data Source     ZookeeperSession count meter_clickhouse_instance_zookeeper_sessionmeter_clickhouse_zookeeper_session Number of sessions (connections) to ZooKeeper. ClickHouse   ZookeeperWatch count meter_clickhouse_instance_zookeeper_watchmeter_clickhouse_zookeeper_watch Number of watches (event subscriptions) in ZooKeeper. ClickHouse   ZookeeperBytesSent bytes meter_clickhouse_instance_zookeeper_bytes_sentmeter_clickhouse_zookeeper_bytes_sent Number of bytes send over network while communicating with ZooKeeper. ClickHouse   ZookeeperBytesReceive bytes meter_clickhouse_instance_zookeeper_bytes_receivedmeter_clickhouse_zookeeper_bytes_received Number of bytes send over network while communicating with ZooKeeper. ClickHouse    ClickHouse Keeper Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     KeeperAliveConnections count meter_clickhouse_instance_keeper_connections_alivemeter_clickhouse_keeper_connections_alive Number of alive connections for embedded ClickHouse Keeper. ClickHouse   KeeperOutstandingRequets count meter_clickhouse_instance_keeper_outstanding_requestsmeter_clickhouse_keeper_outstanding_requests Number of outstanding requests for embedded ClickHouse Keeper. ClickHouse    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/clickhouse. The ClickHouse dashboard panel configurations are found in /config/ui-initialized-templates/clickhouse.\n","title":"ClickHouse monitoring","url":"/docs/main/next/en/setup/backend/backend-clickhouse-monitoring/"},{"content":"Client/grpc-client Description The gRPC client is a sharing plugin to keep connection with the gRPC server and delivery the data to it.\nDefaultConfig # The gRPC client finder typefinder_type:\u0026#34;static\u0026#34;# The gRPC server address (default localhost:11800), multiple addresses are split by \u0026#34;,\u0026#34;.server_addr:localhost:11800# The gRPC kubernetes server address finderkubernetes_config:# The kind of resourcekind:pod# The resource namespacesnamespaces:- default# How to get the address exported portextra_port:# Resource target portport:11800# The TLS switch (default false).enable_TLS:false# The file path of client.pem. The config only works when opening the TLS switch.client_pem_path:\u0026#34;\u0026#34;# The file path of client.key. The config only works when opening the TLS switch.client_key_path:\u0026#34;\u0026#34;# The file path oca.pem. The config only works when opening the TLS switch.ca_pem_path:\u0026#34;\u0026#34;# InsecureSkipVerify controls whether a client verifies the server\u0026#39;s certificate chain and host name.insecure_skip_verify:true# The auth value when send requestauthentication:\u0026#34;\u0026#34;# How frequently to check the connection(second)check_period:5# The gRPC send request timeouttimeout:# The timeout for unary single requestunary:5s# The timeout for unary stream requeststream:20sConfiguration    Name Type Description     finder_type string The gRPC server address finder type, support \u0026ldquo;static\u0026rdquo; and \u0026ldquo;kubernetes\u0026rdquo;   server_addr string The gRPC server address, only works for \u0026ldquo;static\u0026rdquo; address finder   kubernetes_config *resolvers.KubernetesConfig The kubernetes config to lookup addresses, only works for \u0026ldquo;kubernetes\u0026rdquo; address finder   kubernetes_config.api_server string The kubernetes API server address, If not define means using in kubernetes mode to connect   kubernetes_config.basic_auth *resolvers.BasicAuth The HTTP basic authentication credentials for the targets.   kubernetes_config.basic_auth.username string    kubernetes_config.basic_auth.password resolvers.Secret    kubernetes_config.basic_auth.password_file string    kubernetes_config.bearer_token resolvers.Secret The bearer token for the targets.   kubernetes_config.bearer_token_file string The bearer token file for the targets.   kubernetes_config.proxy_url string HTTP proxy server to use to connect to the targets.   kubernetes_config.tls_config resolvers.TLSConfig TLSConfig to use to connect to the targets.   kubernetes_config.namespaces []string Support to lookup namespaces   kubernetes_config.kind string The kind of api   kubernetes_config.selector resolvers.Selector The kind selector   kubernetes_config.extra_port resolvers.ExtraPort How to get the address exported port   enable_TLS bool Enable TLS connect to server   client_pem_path string The file path of client.pem. The config only works when opening the TLS switch.   client_key_path string The file path of client.key. The config only works when opening the TLS switch.   ca_pem_path string The file path oca.pem. The config only works when opening the TLS switch.   insecure_skip_verify bool Controls whether a client verifies the server\u0026rsquo;s certificate chain and host name.   authentication string The auth value when send request   check_period int How frequently to check the connection(second)   timeout grpc.TimeoutConfig The gRPC send request timeout    ","title":"Client/grpc-client","url":"/docs/skywalking-satellite/latest/en/setup/plugins/client_grpc-client/"},{"content":"Client/grpc-client Description The gRPC client is a sharing plugin to keep connection with the gRPC server and delivery the data to it.\nDefaultConfig # The gRPC client finder typefinder_type:\u0026#34;static\u0026#34;# The gRPC server address (default localhost:11800), multiple addresses are split by \u0026#34;,\u0026#34;.server_addr:localhost:11800# The gRPC kubernetes server address finderkubernetes_config:# The kind of resourcekind:pod# The resource namespacesnamespaces:- default# How to get the address exported portextra_port:# Resource target portport:11800# The TLS switch (default false).enable_TLS:false# The file path of client.pem. The config only works when opening the TLS switch.client_pem_path:\u0026#34;\u0026#34;# The file path of client.key. The config only works when opening the TLS switch.client_key_path:\u0026#34;\u0026#34;# The file path oca.pem. The config only works when opening the TLS switch.ca_pem_path:\u0026#34;\u0026#34;# InsecureSkipVerify controls whether a client verifies the server\u0026#39;s certificate chain and host name.insecure_skip_verify:true# The auth value when send requestauthentication:\u0026#34;\u0026#34;# How frequently to check the connection(second)check_period:5# The gRPC send request timeouttimeout:# The timeout for unary single requestunary:5s# The timeout for unary stream requeststream:20sConfiguration    Name Type Description     finder_type string The gRPC server address finder type, support \u0026ldquo;static\u0026rdquo; and \u0026ldquo;kubernetes\u0026rdquo;   server_addr string The gRPC server address, only works for \u0026ldquo;static\u0026rdquo; address finder   kubernetes_config *resolvers.KubernetesConfig The kubernetes config to lookup addresses, only works for \u0026ldquo;kubernetes\u0026rdquo; address finder   kubernetes_config.api_server string The kubernetes API server address, If not define means using in kubernetes mode to connect   kubernetes_config.basic_auth *resolvers.BasicAuth The HTTP basic authentication credentials for the targets.   kubernetes_config.basic_auth.username string    kubernetes_config.basic_auth.password resolvers.Secret    kubernetes_config.basic_auth.password_file string    kubernetes_config.bearer_token resolvers.Secret The bearer token for the targets.   kubernetes_config.bearer_token_file string The bearer token file for the targets.   kubernetes_config.proxy_url string HTTP proxy server to use to connect to the targets.   kubernetes_config.tls_config resolvers.TLSConfig TLSConfig to use to connect to the targets.   kubernetes_config.namespaces []string Support to lookup namespaces   kubernetes_config.kind string The kind of api   kubernetes_config.selector resolvers.Selector The kind selector   kubernetes_config.extra_port resolvers.ExtraPort How to get the address exported port   enable_TLS bool Enable TLS connect to server   client_pem_path string The file path of client.pem. The config only works when opening the TLS switch.   client_key_path string The file path of client.key. The config only works when opening the TLS switch.   ca_pem_path string The file path oca.pem. The config only works when opening the TLS switch.   insecure_skip_verify bool Controls whether a client verifies the server\u0026rsquo;s certificate chain and host name.   authentication string The auth value when send request   check_period int How frequently to check the connection(second)   timeout grpc.TimeoutConfig The gRPC send request timeout    ","title":"Client/grpc-client","url":"/docs/skywalking-satellite/next/en/setup/plugins/client_grpc-client/"},{"content":"Client/grpc-client Description The gRPC client is a sharing plugin to keep connection with the gRPC server and delivery the data to it.\nDefaultConfig # The gRPC client finder typefinder_type:\u0026#34;static\u0026#34;# The gRPC server address (default localhost:11800), multiple addresses are split by \u0026#34;,\u0026#34;.server_addr:localhost:11800# The gRPC kubernetes server address finderkubernetes_config:# The kind of resourcekind:pod# The resource namespacesnamespaces:- default# How to get the address exported portextra_port:# Resource target portport:11800# The TLS switch (default false).enable_TLS:false# The file path of client.pem. The config only works when opening the TLS switch.client_pem_path:\u0026#34;\u0026#34;# The file path of client.key. The config only works when opening the TLS switch.client_key_path:\u0026#34;\u0026#34;# The file path oca.pem. The config only works when opening the TLS switch.ca_pem_path:\u0026#34;\u0026#34;# InsecureSkipVerify controls whether a client verifies the server\u0026#39;s certificate chain and host name.insecure_skip_verify:true# The auth value when send requestauthentication:\u0026#34;\u0026#34;# How frequently to check the connection(second)check_period:5# The gRPC send request timeouttimeout:# The timeout for unary single requestunary:5s# The timeout for unary stream requeststream:20sConfiguration    Name Type Description     finder_type string The gRPC server address finder type, support \u0026ldquo;static\u0026rdquo; and \u0026ldquo;kubernetes\u0026rdquo;   server_addr string The gRPC server address, only works for \u0026ldquo;static\u0026rdquo; address finder   kubernetes_config *resolvers.KubernetesConfig The kubernetes config to lookup addresses, only works for \u0026ldquo;kubernetes\u0026rdquo; address finder   kubernetes_config.api_server string The kubernetes API server address, If not define means using in kubernetes mode to connect   kubernetes_config.basic_auth *resolvers.BasicAuth The HTTP basic authentication credentials for the targets.   kubernetes_config.basic_auth.username string    kubernetes_config.basic_auth.password resolvers.Secret    kubernetes_config.basic_auth.password_file string    kubernetes_config.bearer_token resolvers.Secret The bearer token for the targets.   kubernetes_config.bearer_token_file string The bearer token file for the targets.   kubernetes_config.proxy_url string HTTP proxy server to use to connect to the targets.   kubernetes_config.tls_config resolvers.TLSConfig TLSConfig to use to connect to the targets.   kubernetes_config.namespaces []string Support to lookup namespaces   kubernetes_config.kind string The kind of api   kubernetes_config.selector resolvers.Selector The kind selector   kubernetes_config.extra_port resolvers.ExtraPort How to get the address exported port   enable_TLS bool Enable TLS connect to server   client_pem_path string The file path of client.pem. The config only works when opening the TLS switch.   client_key_path string The file path of client.key. The config only works when opening the TLS switch.   ca_pem_path string The file path oca.pem. The config only works when opening the TLS switch.   insecure_skip_verify bool Controls whether a client verifies the server\u0026rsquo;s certificate chain and host name.   authentication string The auth value when send request   check_period int How frequently to check the connection(second)   timeout grpc.TimeoutConfig The gRPC send request timeout    ","title":"Client/grpc-client","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/client_grpc-client/"},{"content":"Client/kafka-client Description The Kafka client is a sharing plugin to keep connection with the Kafka brokers and delivery the data to it.\nDefaultConfig # The Kafka broker addresses (default localhost:9092). Multiple values are separated by commas.brokers:localhost:9092# The Kafka version should follow this pattern, which is major_minor_veryMinor_patch (default 1.0.0.0).version:1.0.0.0# The TLS switch (default false).enable_TLS:false# The file path of client.pem. The config only works when opening the TLS switch.client_pem_path:\u0026#34;\u0026#34;# The file path of client.key. The config only works when opening the TLS switch.client_key_path:\u0026#34;\u0026#34;# The file path oca.pem. The config only works when opening the TLS switch.ca_pem_path:\u0026#34;\u0026#34;# 0 means NoResponse, 1 means WaitForLocal and -1 means WaitForAll (default 1).required_acks:1# The producer max retry times (default 3).producer_max_retry:3# The meta max retry times (default 3).meta_max_retry:3# How long to wait for the cluster to settle between retries (default 100ms). Time unit is ms.retry_backoff:100# The max message bytes.max_message_bytes:1000000# If enabled, the producer will ensure that exactly one copy of each message is written (default false).idempotent_writes:false# A user-provided string sent with every request to the brokers for logging, debugging, and auditing purposes (default Satellite).client_id:Satellite# Compression codec represents the various compression codecs recognized by Kafka in messages. 0 : None, 1 : Gzip, 2 : Snappy, 3 : LZ4, 4 : ZSTDcompression_codec:0# How frequently to refresh the cluster metadata in the background. Defaults to 10 minutes. The unit is minute.refresh_period:10# InsecureSkipVerify controls whether a client verifies the server\u0026#39;s certificate chain and host name.insecure_skip_verify:trueConfiguration    Name Type Description     brokers string The Kafka broker addresses (default localhost:9092).   version string The version should follow this pattern, which is major.minor.veryMinor.patch.   enable_TLS bool The TLS switch (default false).   client_pem_path string The file path of client.pem. The config only works when opening the TLS switch.   client_key_path string The file path of client.key. The config only works when opening the TLS switch.   ca_pem_path string The file path oca.pem. The config only works when opening the TLS switch.   required_acks int16 0 means NoResponse, 1 means WaitForLocal and -1 means WaitForAll (default 1).   producer_max_retry int The producer max retry times (default 3).   meta_max_retry int The meta max retry times (default 3).   retry_backoff int How long to wait for the cluster to settle between retries (default 100ms).   max_message_bytes int The max message bytes.   idempotent_writes bool Ensure that exactly one copy of each message is written when is true.   client_id string A user-provided string sent with every request to the brokers.   compression_codec int Represents the various compression codecs recognized by Kafka in messages.   refresh_period int How frequently to refresh the cluster metadata.   insecure_skip_verify bool Controls whether a client verifies the server\u0026rsquo;s certificate chain and host name.    ","title":"Client/kafka-client","url":"/docs/skywalking-satellite/latest/en/setup/plugins/client_kafka-client/"},{"content":"Client/kafka-client Description The Kafka client is a sharing plugin to keep connection with the Kafka brokers and delivery the data to it.\nDefaultConfig # The Kafka broker addresses (default localhost:9092). Multiple values are separated by commas.brokers:localhost:9092# The Kafka version should follow this pattern, which is major_minor_veryMinor_patch (default 1.0.0.0).version:1.0.0.0# The TLS switch (default false).enable_TLS:false# The file path of client.pem. The config only works when opening the TLS switch.client_pem_path:\u0026#34;\u0026#34;# The file path of client.key. The config only works when opening the TLS switch.client_key_path:\u0026#34;\u0026#34;# The file path oca.pem. The config only works when opening the TLS switch.ca_pem_path:\u0026#34;\u0026#34;# 0 means NoResponse, 1 means WaitForLocal and -1 means WaitForAll (default 1).required_acks:1# The producer max retry times (default 3).producer_max_retry:3# The meta max retry times (default 3).meta_max_retry:3# How long to wait for the cluster to settle between retries (default 100ms). Time unit is ms.retry_backoff:100# The max message bytes.max_message_bytes:1000000# If enabled, the producer will ensure that exactly one copy of each message is written (default false).idempotent_writes:false# A user-provided string sent with every request to the brokers for logging, debugging, and auditing purposes (default Satellite).client_id:Satellite# Compression codec represents the various compression codecs recognized by Kafka in messages. 0 : None, 1 : Gzip, 2 : Snappy, 3 : LZ4, 4 : ZSTDcompression_codec:0# How frequently to refresh the cluster metadata in the background. Defaults to 10 minutes. The unit is minute.refresh_period:10# InsecureSkipVerify controls whether a client verifies the server\u0026#39;s certificate chain and host name.insecure_skip_verify:trueConfiguration    Name Type Description     brokers string The Kafka broker addresses (default localhost:9092).   version string The version should follow this pattern, which is major.minor.veryMinor.patch.   enable_TLS bool The TLS switch (default false).   client_pem_path string The file path of client.pem. The config only works when opening the TLS switch.   client_key_path string The file path of client.key. The config only works when opening the TLS switch.   ca_pem_path string The file path oca.pem. The config only works when opening the TLS switch.   required_acks int16 0 means NoResponse, 1 means WaitForLocal and -1 means WaitForAll (default 1).   producer_max_retry int The producer max retry times (default 3).   meta_max_retry int The meta max retry times (default 3).   retry_backoff int How long to wait for the cluster to settle between retries (default 100ms).   max_message_bytes int The max message bytes.   idempotent_writes bool Ensure that exactly one copy of each message is written when is true.   client_id string A user-provided string sent with every request to the brokers.   compression_codec int Represents the various compression codecs recognized by Kafka in messages.   refresh_period int How frequently to refresh the cluster metadata.   insecure_skip_verify bool Controls whether a client verifies the server\u0026rsquo;s certificate chain and host name.    ","title":"Client/kafka-client","url":"/docs/skywalking-satellite/next/en/setup/plugins/client_kafka-client/"},{"content":"Client/kafka-client Description The Kafka client is a sharing plugin to keep connection with the Kafka brokers and delivery the data to it.\nDefaultConfig # The Kafka broker addresses (default localhost:9092). Multiple values are separated by commas.brokers:localhost:9092# The Kafka version should follow this pattern, which is major_minor_veryMinor_patch (default 1.0.0.0).version:1.0.0.0# The TLS switch (default false).enable_TLS:false# The file path of client.pem. The config only works when opening the TLS switch.client_pem_path:\u0026#34;\u0026#34;# The file path of client.key. The config only works when opening the TLS switch.client_key_path:\u0026#34;\u0026#34;# The file path oca.pem. The config only works when opening the TLS switch.ca_pem_path:\u0026#34;\u0026#34;# 0 means NoResponse, 1 means WaitForLocal and -1 means WaitForAll (default 1).required_acks:1# The producer max retry times (default 3).producer_max_retry:3# The meta max retry times (default 3).meta_max_retry:3# How long to wait for the cluster to settle between retries (default 100ms). Time unit is ms.retry_backoff:100# The max message bytes.max_message_bytes:1000000# If enabled, the producer will ensure that exactly one copy of each message is written (default false).idempotent_writes:false# A user-provided string sent with every request to the brokers for logging, debugging, and auditing purposes (default Satellite).client_id:Satellite# Compression codec represents the various compression codecs recognized by Kafka in messages. 0 : None, 1 : Gzip, 2 : Snappy, 3 : LZ4, 4 : ZSTDcompression_codec:0# How frequently to refresh the cluster metadata in the background. Defaults to 10 minutes. The unit is minute.refresh_period:10# InsecureSkipVerify controls whether a client verifies the server\u0026#39;s certificate chain and host name.insecure_skip_verify:trueConfiguration    Name Type Description     brokers string The Kafka broker addresses (default localhost:9092).   version string The version should follow this pattern, which is major.minor.veryMinor.patch.   enable_TLS bool The TLS switch (default false).   client_pem_path string The file path of client.pem. The config only works when opening the TLS switch.   client_key_path string The file path of client.key. The config only works when opening the TLS switch.   ca_pem_path string The file path oca.pem. The config only works when opening the TLS switch.   required_acks int16 0 means NoResponse, 1 means WaitForLocal and -1 means WaitForAll (default 1).   producer_max_retry int The producer max retry times (default 3).   meta_max_retry int The meta max retry times (default 3).   retry_backoff int How long to wait for the cluster to settle between retries (default 100ms).   max_message_bytes int The max message bytes.   idempotent_writes bool Ensure that exactly one copy of each message is written when is true.   client_id string A user-provided string sent with every request to the brokers.   compression_codec int Represents the various compression codecs recognized by Kafka in messages.   refresh_period int How frequently to refresh the cluster metadata.   insecure_skip_verify bool Controls whether a client verifies the server\u0026rsquo;s certificate chain and host name.    ","title":"Client/kafka-client","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/client_kafka-client/"},{"content":"Clients Command Line The command line tool named bydbctl improves users' interactive experience. The examples listed in this folder show how to use this command to create, update, read and delete schemas. Furthermore, bydbctl could help in querying data stored in streams, measures and properties.\nThese are several ways to install:\n Get binaries from download. Build from sources to get latest features.  The config file named .bydbctl.yaml will be created in $HOME folder after the first CRUD command is applied.\n\u0026gt; more ~/.bydbctl.yaml addr: http://127.0.0.1:64299 group: \u0026#34;\u0026#34; bydbctl leverages HTTP endpoints to retrieve data instead of gRPC.\nHTTP client Users could select any HTTP client to access the HTTP based endpoints. The default address is localhost:17913/api\nJava Client The java native client is hosted at skywalking-banyandb-java-client.\nWeb application The web application is hosted at skywalking-banyandb-webapp when you boot up the BanyanDB server.\ngRPC command-line tool Users have a chance to use any command-line tool to interact with the Banyand server\u0026rsquo;s gRPC endpoints. The only limitation is the CLI tool has to support file descriptor files since the database server does not support server reflection.\nBuf is a Protobuf building tooling the BanyanDB relies on. It can provide FileDescriptorSets usable by gRPC CLI tools like grpcurl\nBanyanDB recommends installing Buf by issuing\n$ make -C api generate Protobuf schema files are compiled Above command will compile *.proto after downloading buf into \u0026lt;project_root\u0026gt;/bin\nUsers could leverage buf\u0026rsquo;s internal compiler to generate the FileDescriptorSets\n$ cd api $ ../bin/buf build -o image.bin If grpcurl is the CLI tool to access the APIs of BanyanDb. To use image.bin with it on the fly:\n$ grpcurl -plaintext -protoset image.bin localhost:17912 ... ","title":"Clients","url":"/docs/skywalking-banyandb/latest/clients/"},{"content":"Clients Command Line The command line tool named bydbctl improves users' interactive experience. The examples listed in this folder show how to use this command to create, update, read and delete schemas. Furthermore, bydbctl could help in querying data stored in streams, measures and properties.\nThese are several ways to install:\n Get binaries from download. Build from sources to get latest features.  The config file named .bydbctl.yaml will be created in $HOME folder after the first CRUD command is applied.\n\u0026gt; more ~/.bydbctl.yaml addr: http://127.0.0.1:64299 group: \u0026#34;\u0026#34; bydbctl leverages HTTP endpoints to retrieve data instead of gRPC.\nHTTP client Users could select any HTTP client to access the HTTP based endpoints. The default address is localhost:17913/api\nJava Client The java native client is hosted at skywalking-banyandb-java-client.\nWeb application The web application is hosted at skywalking-banyandb-webapp when you boot up the BanyanDB server.\ngRPC command-line tool Users have a chance to use any command-line tool to interact with the Banyand server\u0026rsquo;s gRPC endpoints. The only limitation is the CLI tool has to support file descriptor files since the database server does not support server reflection.\nBuf is a Protobuf building tooling the BanyanDB relies on. It can provide FileDescriptorSets usable by gRPC CLI tools like grpcurl\nBanyanDB recommends installing Buf by issuing\n$ make -C api generate Protobuf schema files are compiled Above command will compile *.proto after downloading buf into \u0026lt;project_root\u0026gt;/bin\nUsers could leverage buf\u0026rsquo;s internal compiler to generate the FileDescriptorSets\n$ cd api $ ../bin/buf build -o image.bin If grpcurl is the CLI tool to access the APIs of BanyanDb. To use image.bin with it on the fly:\n$ grpcurl -plaintext -protoset image.bin localhost:17912 ... ","title":"Clients","url":"/docs/skywalking-banyandb/next/clients/"},{"content":"Clients Command Line The command line tool named bydbctl improves users' interactive experience. The examples listed in this folder show how to use this command to create, update, read and delete schemas. Furthermore, bydbctl could help in querying data stored in streams, measures and properties.\nThese are several ways to install:\n Get binaries from download. Build from sources to get latest features.  The config file named .bydbctl.yaml will be created in $HOME folder after the first CRUD command is applied.\n\u0026gt; more ~/.bydbctl.yaml addr: http://127.0.0.1:64299 group: \u0026#34;\u0026#34; bydbctl leverages HTTP endpoints to retrieve data instead of gRPC.\nHTTP client Users could select any HTTP client to access the HTTP based endpoints. The default address is localhost:17913/api\nJava Client The java native client is hosted at skywalking-banyandb-java-client.\nWeb application The web application is hosted at skywalking-banyandb-webapp when you boot up the BanyanDB server.\ngRPC command-line tool Users have a chance to use any command-line tool to interact with the Banyand server\u0026rsquo;s gRPC endpoints. The only limitation is the CLI tool has to support file descriptor files since the database server does not support server reflection.\nBuf is a Protobuf building tooling the BanyanDB relies on. It can provide FileDescriptorSets usable by gRPC CLI tools like grpcurl\nBanyanDB recommends installing Buf by issuing\n$ make -C api generate Protobuf schema files are compiled Above command will compile *.proto after downloading buf into \u0026lt;project_root\u0026gt;/bin\nUsers could leverage buf\u0026rsquo;s internal compiler to generate the FileDescriptorSets\n$ cd api $ ../bin/buf build -o image.bin If grpcurl is the CLI tool to access the APIs of BanyanDb. To use image.bin with it on the fly:\n$ grpcurl -plaintext -protoset image.bin localhost:17912 ... ","title":"Clients","url":"/docs/skywalking-banyandb/v0.5.0/clients/"},{"content":"Cluster Installation Setup Meta Nodes Meta nodes are a etcd cluster which is required for the metadata module to provide the metadata service and nodes discovery service for the whole cluster.\nThe etcd cluster can be setup by the etcd installation guide\nRole-base Banyand Cluster There is an example: The etcd cluster is spread across three nodes with the addresses 10.0.0.1:2379, 10.0.0.2:2379, and 10.0.0.3:2379.\nData nodes and liaison nodes are running as independent processes by\n$ ./banyand-server storage --etcd-endpoints=http://10.0.0.1:2379,http://10.0.0.2:2379,http://10.0.0.3:2379 \u0026lt;flags\u0026gt; $ ./banyand-server storage --etcd-endpoints=http://10.0.0.1:2379,http://10.0.0.2:2379,http://10.0.0.3:2379 \u0026lt;flags\u0026gt; $ ./banyand-server storage --etcd-endpoints=http://10.0.0.1:2379,http://10.0.0.2:2379,http://10.0.0.3:2379 \u0026lt;flags\u0026gt; $ ./banyand-server liaison --etcd-endpoints=http://10.0.0.1:2379,http://10.0.0.2:2379,http://10.0.0.3:2379 \u0026lt;flags\u0026gt; Node Discovery The node discovery is based on the etcd cluster. The etcd cluster is required for the metadata module to provide the metadata service and nodes discovery service for the whole cluster.\nThe host is registered to the etcd cluster by the banyand-server automatically based on node-host-provider :\n node-host-provider=hostname : Default. The OS\u0026rsquo;s hostname is registered as the host part in the address. node-host-provider=ip : The OS\u0026rsquo;s the first non-loopback active IP address(IPv4) is registered as the host part in the address. node-host-provider=flag : node-host is registered as the host part in the address.  ","title":"Cluster Installation","url":"/docs/skywalking-banyandb/latest/installation/cluster/"},{"content":"Cluster Installation Setup Meta Nodes Meta nodes are a etcd cluster which is required for the metadata module to provide the metadata service and nodes discovery service for the whole cluster.\nThe etcd cluster can be setup by the etcd installation guide\nRole-base Banyand Cluster There is an example: The etcd cluster is spread across three nodes with the addresses 10.0.0.1:2379, 10.0.0.2:2379, and 10.0.0.3:2379.\nData nodes and liaison nodes are running as independent processes by\n$ ./banyand-server-static storage --etcd-endpoints=http://10.0.0.1:2379,http://10.0.0.2:2379,http://10.0.0.3:2379 \u0026lt;flags\u0026gt; $ ./banyand-server-static storage --etcd-endpoints=http://10.0.0.1:2379,http://10.0.0.2:2379,http://10.0.0.3:2379 \u0026lt;flags\u0026gt; $ ./banyand-server-static storage --etcd-endpoints=http://10.0.0.1:2379,http://10.0.0.2:2379,http://10.0.0.3:2379 \u0026lt;flags\u0026gt; $ ./banyand-server-static liaison --etcd-endpoints=http://10.0.0.1:2379,http://10.0.0.2:2379,http://10.0.0.3:2379 \u0026lt;flags\u0026gt; Node Discovery The node discovery is based on the etcd cluster. The etcd cluster is required for the metadata module to provide the metadata service and nodes discovery service for the whole cluster.\nThe host is registered to the etcd cluster by the banyand-server-static automatically based on node-host-provider :\n node-host-provider=hostname : Default. The OS\u0026rsquo;s hostname is registered as the host part in the address. node-host-provider=ip : The OS\u0026rsquo;s the first non-loopback active IP address(IPv4) is registered as the host part in the address. node-host-provider=flag : node-host is registered as the host part in the address.  Etcd Authentication etcd supports through tls certificates and RBAC-based authentication for both clients to server communication. This section tends to help users set up authentication for BanyanDB.\nAuthentication with username/password The etcd user can be setup by the etcd authentication guide\nThe username/password is configured in the following command:\n etcd-username: The username for etcd client authentication. etcd-password: The password for etcd client authentication.  Note: recommended using environment variables to set username/password for higher security.\n$ ./banyand-server-static storage --etcd-endpoints=your-endpoints --etcd-username=your-username --etcd-password=your-password \u0026lt;flags\u0026gt; $ ./banyand-server-static liaison --etcd-endpoints=your-endpoints --etcd-username=your-username --etcd-password=your-password \u0026lt;flags\u0026gt; Transport security with HTTPS The etcd trusted certificate file can be setup by the etcd transport security model\n etcd-tls-ca-file: The path of the trusted certificate file.  $ ./banyand-server-static storage --etcd-endpoints=your-https-endpoints --etcd-tls-ca-file=youf-file-path \u0026lt;flags\u0026gt; $ ./banyand-server-static liaison --etcd-endpoints=your-https-endpoints --etcd-tls-ca-file=youf-file-path \u0026lt;flags\u0026gt; Authentication with HTTPS client certificates The etcd client certificates can be setup by the etcd transport security model\n etcd-tls-ca-file: The path of the trusted certificate file. etcd-tls-cert-file: Certificate used for SSL/TLS connections to etcd. When this option is set, advertise-client-urls can use the HTTPS schema. etcd-tls-key-file: Key for the certificate. Must be unencrypted.  $ ./banyand-server-static storage --etcd-endpoints=your-https-endpoints --etcd-tls-ca-file=youf-file-path --etcd-tls-cert-file=youf-file-path --etcd-tls-key-file=youf-file-path \u0026lt;flags\u0026gt; $ ./banyand-server-static liaison --etcd-endpoints=your-https-endpoints --etcd-tls-ca-file=youf-file-path --etcd-tls-cert-file=youf-file-path --etcd-tls-key-file=youf-file-path \u0026lt;flags\u0026gt; ","title":"Cluster Installation","url":"/docs/skywalking-banyandb/next/installation/cluster/"},{"content":"Cluster Installation Setup Meta Nodes Meta nodes are a etcd cluster which is required for the metadata module to provide the metadata service and nodes discovery service for the whole cluster.\nThe etcd cluster can be setup by the etcd installation guide\nRole-base Banyand Cluster There is an example: The etcd cluster is spread across three nodes with the addresses 10.0.0.1:2379, 10.0.0.2:2379, and 10.0.0.3:2379.\nData nodes and liaison nodes are running as independent processes by\n$ ./banyand-server storage --etcd-endpoints=http://10.0.0.1:2379,http://10.0.0.2:2379,http://10.0.0.3:2379 \u0026lt;flags\u0026gt; $ ./banyand-server storage --etcd-endpoints=http://10.0.0.1:2379,http://10.0.0.2:2379,http://10.0.0.3:2379 \u0026lt;flags\u0026gt; $ ./banyand-server storage --etcd-endpoints=http://10.0.0.1:2379,http://10.0.0.2:2379,http://10.0.0.3:2379 \u0026lt;flags\u0026gt; $ ./banyand-server liaison --etcd-endpoints=http://10.0.0.1:2379,http://10.0.0.2:2379,http://10.0.0.3:2379 \u0026lt;flags\u0026gt; Node Discovery The node discovery is based on the etcd cluster. The etcd cluster is required for the metadata module to provide the metadata service and nodes discovery service for the whole cluster.\nThe host is registered to the etcd cluster by the banyand-server automatically based on node-host-provider :\n node-host-provider=hostname : Default. The OS\u0026rsquo;s hostname is registered as the host part in the address. node-host-provider=ip : The OS\u0026rsquo;s the first non-loopback active IP address(IPv4) is registered as the host part in the address. node-host-provider=flag : node-host is registered as the host part in the address.  ","title":"Cluster Installation","url":"/docs/skywalking-banyandb/v0.5.0/installation/cluster/"},{"content":"Cluster Management In many production environments, the backend needs to support distributed aggregation, high throughput and provide high availability (HA) to maintain robustness, so you always need to setup CLUSTER management in product env. Otherwise, you would face metrics inaccurate.\ncore/gRPCHost is listening on 0.0.0.0 for quick start as the single mode for most cases. Besides the Kubernetes coordinator, which is using the cloud-native mode to establish cluster, all other coordinators requires core/gRPCHost updated to real IP addresses or take reference of internalComHost and internalComPort in each coordinator doc.\nNOTICE, cluster management doesn\u0026rsquo;t provide a service discovery mechanism for agents and probes. We recommend agents/probes using gateway to load balancer to access OAP clusters.\nThere are various ways to manage the cluster in the backend. Choose the one that best suits your needs.\n Kubernetes. When the backend clusters are deployed inside Kubernetes, you could make use of this method by using k8s native APIs to manage clusters. Zookeeper coordinator. Use Zookeeper to let the backend instances detect and communicate with each other. Consul. Use Consul as the backend cluster management implementor and coordinate backend instances. Etcd. Use Etcd to coordinate backend instances. Nacos. Use Nacos to coordinate backend instances.  In the application.yml file, there are default configurations for the aforementioned coordinators under the section cluster. You can specify any of them in the selector property to enable it.\nCloud Native Kubernetes The required backend clusters are deployed inside Kubernetes. See the guides in Deploy in kubernetes. Set the selector to kubernetes.\ncluster:selector:${SW_CLUSTER:kubernetes}# other configurationsMeanwhile, the OAP cluster requires the pod\u0026rsquo;s UID which is laid at metadata.uid as the value of the system environment variable SKYWALKING_COLLECTOR_UID\ncontainers:# Original configurations of OAP container- name:{{.Values.oap.name }}image:{{.Values.oap.image.repository }}:{{ required \u0026#34;oap.image.tag is required\u0026#34; .Values.oap.image.tag }}# ...# ...env:# Add metadata.uid as the system environment variable, SKYWALKING_COLLECTOR_UID - name:SKYWALKING_COLLECTOR_UIDvalueFrom:fieldRef:fieldPath:metadata.uidRead the complete helm for more details.\nTraditional Coordinator NOTICE In all the following coordinators, oap.internal.comm.host:oap.internal.comm.port is registered as the ID and address for the current OAP node. By default, because they are same in all OAP nodes, the registrations are conflicted, and (may) show as one registered node, which actually would be the node itself. In this case, the cluster mode is NOT working.\nPlease check the registered nodes on your coordinator servers, to make the registration information unique for every node. You could have two options\n Change core/gRPCHost(oap.internal.comm.host) and core/gRPCPort(oap.internal.comm.port) for internal, and setup external communication channels for data reporting and query. Use internalComHost and internalComPort in the config to provide a unique host and port for every OAP node. This host name port should be accessible for other OAP nodes.  Zookeeper coordinator Zookeeper is a very common and widely used cluster coordinator. Set the cluster/selector to zookeeper in the yml to enable it.\nRequired Zookeeper version: 3.5+\ncluster:selector:${SW_CLUSTER:zookeeper}# other configurations hostPort is the list of zookeeper servers. Format is IP1:PORT1,IP2:PORT2,...,IPn:PORTn enableACL enable Zookeeper ACL to control access to its znode. schema is Zookeeper ACL schemas. expression is a expression of ACL. The format of the expression is specific to the schema. hostPort, baseSleepTimeMs and maxRetries are settings of Zookeeper curator client.  Note:\n If Zookeeper ACL is enabled and /skywalking exists, you must ensure that SkyWalking has CREATE, READ and WRITE permissions. If /skywalking does not exist, it will be created by SkyWalking, and all permissions to the specified user will be granted. Simultaneously, znode grants READ permission to anyone. If you set schema as digest, the password of the expression is set in clear text.  In some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes, such as the default host(0.0.0.0) should not be used in cluster mode. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The exposed host name for other OAP nodes in the cluster internal communication. internalComPort: the exposed port for other OAP nodes in the cluster internal communication.  zookeeper:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}hostPort:${SW_CLUSTER_ZK_HOST_PORT:localhost:2181}#Retry PolicybaseSleepTimeMs:${SW_CLUSTER_ZK_SLEEP_TIME:1000}# initial amount of time to wait between retriesmaxRetries:${SW_CLUSTER_ZK_MAX_RETRIES:3}# max number of times to retryinternalComHost:${SW_CLUSTER_INTERNAL_COM_HOST:172.10.4.10}internalComPort:${SW_CLUSTER_INTERNAL_COM_PORT:11800}# Enable ACLenableACL:${SW_ZK_ENABLE_ACL:false}# disable ACL in defaultschema:${SW_ZK_SCHEMA:digest}# only support digest schemaexpression:${SW_ZK_EXPRESSION:skywalking:skywalking}Consul Recently, the Consul system has become more and more popular, and many companies and developers now use Consul as their service discovery solution. Set the cluster/selector to consul in the yml to enable it.\ncluster:selector:${SW_CLUSTER:consul}# other configurationsSame as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes, such as the default host(0.0.0.0) should not be used in cluster mode. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The exposed host name for other OAP nodes in the cluster internal communication. internalComPort: the exposed port for other OAP nodes in the cluster internal communication.  Etcd Set the cluster/selector to etcd in the yml to enable it. The Etcd client has upgraded to v3 protocol and changed to the CoreOS official library. Since 8.7.0, only the v3 protocol is supported for Etcd.\ncluster:selector:${SW_CLUSTER:etcd}# other configurationsetcd:# etcd cluster nodes, example: 10.0.0.1:2379,10.0.0.2:2379,10.0.0.3:2379endpoints:${SW_CLUSTER_ETCD_ENDPOINTS:localhost:2379}namespace:${SW_CLUSTER_ETCD_NAMESPACE:/skywalking}serviceName:${SW_CLUSTER_ETCD_SERVICE_NAME:\u0026#34;SkyWalking_OAP_Cluster\u0026#34;}authentication:${SW_CLUSTER_ETCD_AUTHENTICATION:false}user:${SW_CLUSTER_ETCD_USER:}password:${SW_CLUSTER_ETCD_PASSWORD:}Same as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes, such as the default host(0.0.0.0) should not be used in cluster mode. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The exposed host name for other OAP nodes in the cluster internal communication. internalComPort: the exposed port for other OAP nodes in the cluster internal communication.  Nacos Set the cluster/selector to nacos in the yml to enable it.\ncluster:selector:${SW_CLUSTER:nacos}# other configurationsNacos supports authentication by username or accessKey. Empty means that there is no need for authentication. Extra config is as follows:\nnacos:username:password:accessKey:secretKey:Same as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes, such as the default host(0.0.0.0) should not be used in cluster mode. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The exposed host name for other OAP nodes in the cluster internal communication. internalComPort: the exposed port for other OAP nodes in the cluster internal communication.  ","title":"Cluster Management","url":"/docs/main/latest/en/setup/backend/backend-cluster/"},{"content":"Cluster Management In many production environments, the backend needs to support distributed aggregation, high throughput and provide high availability (HA) to maintain robustness, so you always need to setup CLUSTER management in product env. Otherwise, you would face metrics inaccurate.\ncore/gRPCHost is listening on 0.0.0.0 for quick start as the single mode for most cases. Besides the Kubernetes coordinator, which is using the cloud-native mode to establish cluster, all other coordinators requires core/gRPCHost updated to real IP addresses or take reference of internalComHost and internalComPort in each coordinator doc.\nNOTICE, cluster management doesn\u0026rsquo;t provide a service discovery mechanism for agents and probes. We recommend agents/probes using gateway to load balancer to access OAP clusters.\nThere are various ways to manage the cluster in the backend. Choose the one that best suits your needs.\n Kubernetes. When the backend clusters are deployed inside Kubernetes, you could make use of this method by using k8s native APIs to manage clusters. Zookeeper coordinator. Use Zookeeper to let the backend instances detect and communicate with each other. Consul. Use Consul as the backend cluster management implementor and coordinate backend instances. Etcd. Use Etcd to coordinate backend instances. Nacos. Use Nacos to coordinate backend instances.  In the application.yml file, there are default configurations for the aforementioned coordinators under the section cluster. You can specify any of them in the selector property to enable it.\nCloud Native Kubernetes The required backend clusters are deployed inside Kubernetes. See the guides in Deploy in kubernetes. Set the selector to kubernetes.\ncluster:selector:${SW_CLUSTER:kubernetes}# other configurationsMeanwhile, the OAP cluster requires the pod\u0026rsquo;s UID which is laid at metadata.uid as the value of the system environment variable SKYWALKING_COLLECTOR_UID\ncontainers:# Original configurations of OAP container- name:{{.Values.oap.name }}image:{{.Values.oap.image.repository }}:{{ required \u0026#34;oap.image.tag is required\u0026#34; .Values.oap.image.tag }}# ...# ...env:# Add metadata.uid as the system environment variable, SKYWALKING_COLLECTOR_UID - name:SKYWALKING_COLLECTOR_UIDvalueFrom:fieldRef:fieldPath:metadata.uidRead the complete helm for more details.\nTraditional Coordinator NOTICE In all the following coordinators, oap.internal.comm.host:oap.internal.comm.port is registered as the ID and address for the current OAP node. By default, because they are same in all OAP nodes, the registrations are conflicted, and (may) show as one registered node, which actually would be the node itself. In this case, the cluster mode is NOT working.\nPlease check the registered nodes on your coordinator servers, to make the registration information unique for every node. You could have two options\n Change core/gRPCHost(oap.internal.comm.host) and core/gRPCPort(oap.internal.comm.port) for internal, and setup external communication channels for data reporting and query. Use internalComHost and internalComPort in the config to provide a unique host and port for every OAP node. This host name port should be accessible for other OAP nodes.  Zookeeper coordinator Zookeeper is a very common and widely used cluster coordinator. Set the cluster/selector to zookeeper in the yml to enable it.\nRequired Zookeeper version: 3.5+\ncluster:selector:${SW_CLUSTER:zookeeper}# other configurations hostPort is the list of zookeeper servers. Format is IP1:PORT1,IP2:PORT2,...,IPn:PORTn enableACL enable Zookeeper ACL to control access to its znode. schema is Zookeeper ACL schemas. expression is a expression of ACL. The format of the expression is specific to the schema. hostPort, baseSleepTimeMs and maxRetries are settings of Zookeeper curator client.  Note:\n If Zookeeper ACL is enabled and /skywalking exists, you must ensure that SkyWalking has CREATE, READ and WRITE permissions. If /skywalking does not exist, it will be created by SkyWalking, and all permissions to the specified user will be granted. Simultaneously, znode grants READ permission to anyone. If you set schema as digest, the password of the expression is set in clear text.  In some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes, such as the default host(0.0.0.0) should not be used in cluster mode. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The exposed host name for other OAP nodes in the cluster internal communication. internalComPort: the exposed port for other OAP nodes in the cluster internal communication.  cluster:selector:${SW_CLUSTER:zookeeper}...zookeeper:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}hostPort:${SW_CLUSTER_ZK_HOST_PORT:localhost:2181}#Retry PolicybaseSleepTimeMs:${SW_CLUSTER_ZK_SLEEP_TIME:1000}# initial amount of time to wait between retriesmaxRetries:${SW_CLUSTER_ZK_MAX_RETRIES:3}# max number of times to retryinternalComHost:${SW_CLUSTER_INTERNAL_COM_HOST:172.10.4.10}internalComPort:${SW_CLUSTER_INTERNAL_COM_PORT:11800}# Enable ACLenableACL:${SW_ZK_ENABLE_ACL:false}# disable ACL in defaultschema:${SW_ZK_SCHEMA:digest}# only support digest schemaexpression:${SW_ZK_EXPRESSION:skywalking:skywalking}Consul Recently, the Consul system has become more and more popular, and many companies and developers now use Consul as their service discovery solution. Set the cluster/selector to consul in the yml to enable it.\ncluster:selector:${SW_CLUSTER:consul}...consul:serviceName:${SW_SERVICE_NAME:\u0026#34;SkyWalking_OAP_Cluster\u0026#34;}# Consul cluster nodes, example: 10.0.0.1:8500,10.0.0.2:8500,10.0.0.3:8500hostPort:${SW_CLUSTER_CONSUL_HOST_PORT:localhost:8500}aclToken:${SW_CLUSTER_CONSUL_ACLTOKEN:\u0026#34;\u0026#34;}internalComHost:${SW_CLUSTER_INTERNAL_COM_HOST:\u0026#34;\u0026#34;}internalComPort:${SW_CLUSTER_INTERNAL_COM_PORT:-1}Same as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes, such as the default host(0.0.0.0) should not be used in cluster mode. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The exposed host name for other OAP nodes in the cluster internal communication. internalComPort: the exposed port for other OAP nodes in the cluster internal communication.  Etcd Set the cluster/selector to etcd in the yml to enable it. The Etcd client has upgraded to v3 protocol and changed to the CoreOS official library. Since 8.7.0, only the v3 protocol is supported for Etcd.\ncluster:selector:${SW_CLUSTER:etcd}# other configurationsetcd:# etcd cluster nodes, example: 10.0.0.1:2379,10.0.0.2:2379,10.0.0.3:2379endpoints:${SW_CLUSTER_ETCD_ENDPOINTS:localhost:2379}namespace:${SW_CLUSTER_ETCD_NAMESPACE:/skywalking}serviceName:${SW_CLUSTER_ETCD_SERVICE_NAME:\u0026#34;SkyWalking_OAP_Cluster\u0026#34;}authentication:${SW_CLUSTER_ETCD_AUTHENTICATION:false}user:${SW_CLUSTER_ETCD_USER:}password:${SW_CLUSTER_ETCD_PASSWORD:}Same as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes, such as the default host(0.0.0.0) should not be used in cluster mode. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The exposed host name for other OAP nodes in the cluster internal communication. internalComPort: the exposed port for other OAP nodes in the cluster internal communication.  Nacos Set the cluster/selector to nacos in the yml to enable it.\ncluster:selector:${SW_CLUSTER:nacos}# other configurationsNacos supports authentication by username or accessKey. Empty means that there is no need for authentication. Extra config is as follows:\nnacos:username:password:accessKey:secretKey:Same as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes, such as the default host(0.0.0.0) should not be used in cluster mode. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The exposed host name for other OAP nodes in the cluster internal communication. internalComPort: the exposed port for other OAP nodes in the cluster internal communication.  ","title":"Cluster Management","url":"/docs/main/next/en/setup/backend/backend-cluster/"},{"content":"Cluster Management In many product environments, the backend needs to support high throughput and provide HA to maintain robustness, so you always need cluster management in product env.\nNOTICE, cluster management doesn\u0026rsquo;t provide service discovery mechanism for agents and probes. We recommend agents/probes using gateway to load balancer to access OAP clusters.\nThe core feature of cluster management is supporting the whole OAP cluster running distributed aggregation and analysis for telemetry data.\nThere are various ways to manage the cluster in the backend. Choose the one that best suits your needs.\n Zookeeper coordinator. Use Zookeeper to let the backend instances detect and communicate with each other. Kubernetes. When the backend clusters are deployed inside Kubernetes, you could make use of this method by using k8s native APIs to manage clusters. Consul. Use Consul as the backend cluster management implementor and coordinate backend instances. Etcd. Use Etcd to coordinate backend instances. Nacos. Use Nacos to coordinate backend instances. In the application.yml file, there are default configurations for the aforementioned coordinators under the section cluster. You can specify any of them in the selector property to enable it.  Zookeeper coordinator Zookeeper is a very common and widely used cluster coordinator. Set the cluster/selector to zookeeper in the yml to enable it.\nRequired Zookeeper version: 3.5+\ncluster:selector:${SW_CLUSTER:zookeeper}# other configurations hostPort is the list of zookeeper servers. Format is IP1:PORT1,IP2:PORT2,...,IPn:PORTn enableACL enable Zookeeper ACL to control access to its znode. schema is Zookeeper ACL schemas. expression is a expression of ACL. The format of the expression is specific to the schema. hostPort, baseSleepTimeMs and maxRetries are settings of Zookeeper curator client.  Note:\n If Zookeeper ACL is enabled and /skywalking exists, you must make sure that SkyWalking has CREATE, READ and WRITE permissions. If /skywalking does not exist, it will be created by SkyWalking and all permissions to the specified user will be granted. Simultaneously, znode grants the READ permission to anyone. If you set schema as digest, the password of the expression is set in clear text.  In some cases, the OAP default gRPC host and port in core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: the registered port and other OAP nodes use this to communicate with the current node.  zookeeper:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}hostPort:${SW_CLUSTER_ZK_HOST_PORT:localhost:2181}#Retry PolicybaseSleepTimeMs:${SW_CLUSTER_ZK_SLEEP_TIME:1000}# initial amount of time to wait between retriesmaxRetries:${SW_CLUSTER_ZK_MAX_RETRIES:3}# max number of times to retryinternalComHost:${SW_CLUSTER_INTERNAL_COM_HOST:172.10.4.10}internalComPort:${SW_CLUSTER_INTERNAL_COM_PORT:11800}# Enable ACLenableACL:${SW_ZK_ENABLE_ACL:false}# disable ACL in defaultschema:${SW_ZK_SCHEMA:digest}# only support digest schemaexpression:${SW_ZK_EXPRESSION:skywalking:skywalking}Kubernetes The require backend clusters are deployed inside Kubernetes. See the guides in Deploy in kubernetes. Set the selector to kubernetes.\ncluster:selector:${SW_CLUSTER:kubernetes}# other configurationsConsul Recently, the Consul system has become more and more popular, and many companies and developers now use Consul as their service discovery solution. Set the cluster/selector to consul in the yml to enable it.\ncluster:selector:${SW_CLUSTER:consul}# other configurationsSame as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registed host and other OAP nodes use this to communicate with the current node. internalComPort: The registered port and other OAP nodes use this to communicate with the current node.  Etcd Set the cluster/selector to etcd in the yml to enable it. The Etcd client has upgraded to v3 protocol and changed to the CoreOS official library. Since 8.7.0, only the v3 protocol is supported for Etcd.\ncluster:selector:${SW_CLUSTER:etcd}# other configurationsetcd:# etcd cluster nodes, example: 10.0.0.1:2379,10.0.0.2:2379,10.0.0.3:2379endpoints:${SW_CLUSTER_ETCD_ENDPOINTS:localhost:2379}namespace:${SW_CLUSTER_ETCD_NAMESPACE:/skywalking}serviceName:${SW_CLUSTER_ETCD_SERVICE_NAME:\u0026#34;SkyWalking_OAP_Cluster\u0026#34;}authentication:${SW_CLUSTER_ETCD_AUTHENTICATION:false}user:${SW_CLUSTER_ETCD_USER:}password:${SW_CLUSTER_ETCD_PASSWORD:}Same as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in core are not suitable for internal communication among the oap nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: The registered port and other OAP nodes use this to communicate with the current node.  Nacos Set the cluster/selector to nacos in the yml to enable it.\ncluster:selector:${SW_CLUSTER:nacos}# other configurationsNacos supports authentication by username or accessKey. Empty means that there is no need for authentication. Extra config is as follows:\nnacos:username:password:accessKey:secretKey:Same as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: The registered port and other OAP nodes use this to communicate with the current node.  ","title":"Cluster Management","url":"/docs/main/v9.0.0/en/setup/backend/backend-cluster/"},{"content":"Cluster Management In many production environments, the backend needs to support high throughput and provide high availability (HA) to maintain robustness, so you always need cluster management in product env.\nNOTICE, cluster management doesn\u0026rsquo;t provide a service discovery mechanism for agents and probes. We recommend agents/probes using gateway to load balancer to access OAP clusters.\nThe core feature of cluster management is supporting the whole OAP cluster running distributed aggregation and analysis for telemetry data.\nThere are various ways to manage the cluster in the backend. Choose the one that best suits your needs.\n Zookeeper coordinator. Use Zookeeper to let the backend instances detect and communicate with each other. Kubernetes. When the backend clusters are deployed inside Kubernetes, you could make use of this method by using k8s native APIs to manage clusters. Consul. Use Consul as the backend cluster management implementor and coordinate backend instances. Etcd. Use Etcd to coordinate backend instances. Nacos. Use Nacos to coordinate backend instances. In the application.yml file, there are default configurations for the aforementioned coordinators under the section cluster. You can specify any of them in the selector property to enable it.  Zookeeper coordinator Zookeeper is a very common and widely used cluster coordinator. Set the cluster/selector to zookeeper in the yml to enable it.\nRequired Zookeeper version: 3.5+\ncluster:selector:${SW_CLUSTER:zookeeper}# other configurations hostPort is the list of zookeeper servers. Format is IP1:PORT1,IP2:PORT2,...,IPn:PORTn enableACL enable Zookeeper ACL to control access to its znode. schema is Zookeeper ACL schemas. expression is a expression of ACL. The format of the expression is specific to the schema. hostPort, baseSleepTimeMs and maxRetries are settings of Zookeeper curator client.  Note:\n If Zookeeper ACL is enabled and /skywalking exists, you must ensure that SkyWalking has CREATE, READ and WRITE permissions. If /skywalking does not exist, it will be created by SkyWalking, and all permissions to the specified user will be granted. Simultaneously, znode grants READ permission to anyone. If you set schema as digest, the password of the expression is set in clear text.  In some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: the registered port and other OAP nodes use this to communicate with the current node.  zookeeper:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}hostPort:${SW_CLUSTER_ZK_HOST_PORT:localhost:2181}#Retry PolicybaseSleepTimeMs:${SW_CLUSTER_ZK_SLEEP_TIME:1000}# initial amount of time to wait between retriesmaxRetries:${SW_CLUSTER_ZK_MAX_RETRIES:3}# max number of times to retryinternalComHost:${SW_CLUSTER_INTERNAL_COM_HOST:172.10.4.10}internalComPort:${SW_CLUSTER_INTERNAL_COM_PORT:11800}# Enable ACLenableACL:${SW_ZK_ENABLE_ACL:false}# disable ACL in defaultschema:${SW_ZK_SCHEMA:digest}# only support digest schemaexpression:${SW_ZK_EXPRESSION:skywalking:skywalking}Kubernetes The required backend clusters are deployed inside Kubernetes. See the guides in Deploy in kubernetes. Set the selector to kubernetes.\ncluster:selector:${SW_CLUSTER:kubernetes}# other configurationsConsul Recently, the Consul system has become more and more popular, and many companies and developers now use Consul as their service discovery solution. Set the cluster/selector to consul in the yml to enable it.\ncluster:selector:${SW_CLUSTER:consul}# other configurationsSame as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: The registered port and other OAP nodes use this to communicate with the current node.  Etcd Set the cluster/selector to etcd in the yml to enable it. The Etcd client has upgraded to v3 protocol and changed to the CoreOS official library. Since 8.7.0, only the v3 protocol is supported for Etcd.\ncluster:selector:${SW_CLUSTER:etcd}# other configurationsetcd:# etcd cluster nodes, example: 10.0.0.1:2379,10.0.0.2:2379,10.0.0.3:2379endpoints:${SW_CLUSTER_ETCD_ENDPOINTS:localhost:2379}namespace:${SW_CLUSTER_ETCD_NAMESPACE:/skywalking}serviceName:${SW_CLUSTER_ETCD_SERVICE_NAME:\u0026#34;SkyWalking_OAP_Cluster\u0026#34;}authentication:${SW_CLUSTER_ETCD_AUTHENTICATION:false}user:${SW_CLUSTER_ETCD_USER:}password:${SW_CLUSTER_ETCD_PASSWORD:}Same as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: The registered port and other OAP nodes use this to communicate with the current node.  Nacos Set the cluster/selector to nacos in the yml to enable it.\ncluster:selector:${SW_CLUSTER:nacos}# other configurationsNacos supports authentication by username or accessKey. Empty means that there is no need for authentication. Extra config is as follows:\nnacos:username:password:accessKey:secretKey:Same as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: The registered port and other OAP nodes use this to communicate with the current node.  ","title":"Cluster Management","url":"/docs/main/v9.1.0/en/setup/backend/backend-cluster/"},{"content":"Cluster Management In many production environments, the backend needs to support high throughput and provide high availability (HA) to maintain robustness, so you always need cluster management in product env.\nNOTICE, cluster management doesn\u0026rsquo;t provide a service discovery mechanism for agents and probes. We recommend agents/probes using gateway to load balancer to access OAP clusters.\nThe core feature of cluster management is supporting the whole OAP cluster running distributed aggregation and analysis for telemetry data.\nThere are various ways to manage the cluster in the backend. Choose the one that best suits your needs.\n Zookeeper coordinator. Use Zookeeper to let the backend instances detect and communicate with each other. Kubernetes. When the backend clusters are deployed inside Kubernetes, you could make use of this method by using k8s native APIs to manage clusters. Consul. Use Consul as the backend cluster management implementor and coordinate backend instances. Etcd. Use Etcd to coordinate backend instances. Nacos. Use Nacos to coordinate backend instances. In the application.yml file, there are default configurations for the aforementioned coordinators under the section cluster. You can specify any of them in the selector property to enable it.  Zookeeper coordinator Zookeeper is a very common and widely used cluster coordinator. Set the cluster/selector to zookeeper in the yml to enable it.\nRequired Zookeeper version: 3.5+\ncluster:selector:${SW_CLUSTER:zookeeper}# other configurations hostPort is the list of zookeeper servers. Format is IP1:PORT1,IP2:PORT2,...,IPn:PORTn enableACL enable Zookeeper ACL to control access to its znode. schema is Zookeeper ACL schemas. expression is a expression of ACL. The format of the expression is specific to the schema. hostPort, baseSleepTimeMs and maxRetries are settings of Zookeeper curator client.  Note:\n If Zookeeper ACL is enabled and /skywalking exists, you must ensure that SkyWalking has CREATE, READ and WRITE permissions. If /skywalking does not exist, it will be created by SkyWalking, and all permissions to the specified user will be granted. Simultaneously, znode grants READ permission to anyone. If you set schema as digest, the password of the expression is set in clear text.  In some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: the registered port and other OAP nodes use this to communicate with the current node.  zookeeper:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}hostPort:${SW_CLUSTER_ZK_HOST_PORT:localhost:2181}#Retry PolicybaseSleepTimeMs:${SW_CLUSTER_ZK_SLEEP_TIME:1000}# initial amount of time to wait between retriesmaxRetries:${SW_CLUSTER_ZK_MAX_RETRIES:3}# max number of times to retryinternalComHost:${SW_CLUSTER_INTERNAL_COM_HOST:172.10.4.10}internalComPort:${SW_CLUSTER_INTERNAL_COM_PORT:11800}# Enable ACLenableACL:${SW_ZK_ENABLE_ACL:false}# disable ACL in defaultschema:${SW_ZK_SCHEMA:digest}# only support digest schemaexpression:${SW_ZK_EXPRESSION:skywalking:skywalking}Kubernetes The required backend clusters are deployed inside Kubernetes. See the guides in Deploy in kubernetes. Set the selector to kubernetes.\ncluster:selector:${SW_CLUSTER:kubernetes}# other configurationsConsul Recently, the Consul system has become more and more popular, and many companies and developers now use Consul as their service discovery solution. Set the cluster/selector to consul in the yml to enable it.\ncluster:selector:${SW_CLUSTER:consul}# other configurationsSame as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: The registered port and other OAP nodes use this to communicate with the current node.  Etcd Set the cluster/selector to etcd in the yml to enable it. The Etcd client has upgraded to v3 protocol and changed to the CoreOS official library. Since 8.7.0, only the v3 protocol is supported for Etcd.\ncluster:selector:${SW_CLUSTER:etcd}# other configurationsetcd:# etcd cluster nodes, example: 10.0.0.1:2379,10.0.0.2:2379,10.0.0.3:2379endpoints:${SW_CLUSTER_ETCD_ENDPOINTS:localhost:2379}namespace:${SW_CLUSTER_ETCD_NAMESPACE:/skywalking}serviceName:${SW_CLUSTER_ETCD_SERVICE_NAME:\u0026#34;SkyWalking_OAP_Cluster\u0026#34;}authentication:${SW_CLUSTER_ETCD_AUTHENTICATION:false}user:${SW_CLUSTER_ETCD_USER:}password:${SW_CLUSTER_ETCD_PASSWORD:}Same as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: The registered port and other OAP nodes use this to communicate with the current node.  Nacos Set the cluster/selector to nacos in the yml to enable it.\ncluster:selector:${SW_CLUSTER:nacos}# other configurationsNacos supports authentication by username or accessKey. Empty means that there is no need for authentication. Extra config is as follows:\nnacos:username:password:accessKey:secretKey:Same as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: The registered port and other OAP nodes use this to communicate with the current node.  ","title":"Cluster Management","url":"/docs/main/v9.2.0/en/setup/backend/backend-cluster/"},{"content":"Cluster Management In many production environments, the backend needs to support high throughput and provide high availability (HA) to maintain robustness, so you always need cluster management in product env.\nNOTICE, cluster management doesn\u0026rsquo;t provide a service discovery mechanism for agents and probes. We recommend agents/probes using gateway to load balancer to access OAP clusters.\nThe core feature of cluster management is supporting the whole OAP cluster running distributed aggregation and analysis for telemetry data.\nThere are various ways to manage the cluster in the backend. Choose the one that best suits your needs.\n Zookeeper coordinator. Use Zookeeper to let the backend instances detect and communicate with each other. Kubernetes. When the backend clusters are deployed inside Kubernetes, you could make use of this method by using k8s native APIs to manage clusters. Consul. Use Consul as the backend cluster management implementor and coordinate backend instances. Etcd. Use Etcd to coordinate backend instances. Nacos. Use Nacos to coordinate backend instances. In the application.yml file, there are default configurations for the aforementioned coordinators under the section cluster. You can specify any of them in the selector property to enable it.  Zookeeper coordinator Zookeeper is a very common and widely used cluster coordinator. Set the cluster/selector to zookeeper in the yml to enable it.\nRequired Zookeeper version: 3.5+\ncluster:selector:${SW_CLUSTER:zookeeper}# other configurations hostPort is the list of zookeeper servers. Format is IP1:PORT1,IP2:PORT2,...,IPn:PORTn enableACL enable Zookeeper ACL to control access to its znode. schema is Zookeeper ACL schemas. expression is a expression of ACL. The format of the expression is specific to the schema. hostPort, baseSleepTimeMs and maxRetries are settings of Zookeeper curator client.  Note:\n If Zookeeper ACL is enabled and /skywalking exists, you must ensure that SkyWalking has CREATE, READ and WRITE permissions. If /skywalking does not exist, it will be created by SkyWalking, and all permissions to the specified user will be granted. Simultaneously, znode grants READ permission to anyone. If you set schema as digest, the password of the expression is set in clear text.  In some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: the registered port and other OAP nodes use this to communicate with the current node.  zookeeper:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}hostPort:${SW_CLUSTER_ZK_HOST_PORT:localhost:2181}#Retry PolicybaseSleepTimeMs:${SW_CLUSTER_ZK_SLEEP_TIME:1000}# initial amount of time to wait between retriesmaxRetries:${SW_CLUSTER_ZK_MAX_RETRIES:3}# max number of times to retryinternalComHost:${SW_CLUSTER_INTERNAL_COM_HOST:172.10.4.10}internalComPort:${SW_CLUSTER_INTERNAL_COM_PORT:11800}# Enable ACLenableACL:${SW_ZK_ENABLE_ACL:false}# disable ACL in defaultschema:${SW_ZK_SCHEMA:digest}# only support digest schemaexpression:${SW_ZK_EXPRESSION:skywalking:skywalking}Kubernetes The required backend clusters are deployed inside Kubernetes. See the guides in Deploy in kubernetes. Set the selector to kubernetes.\ncluster:selector:${SW_CLUSTER:kubernetes}# other configurationsMeanwhile, the OAP cluster requires the pod\u0026rsquo;s UID which is laid at metadata.uid as the value of the system environment variable SKYWALKING_COLLECTOR_UID\ncontainers:# Original configurations of OAP container- name:{{.Values.oap.name }}image:{{.Values.oap.image.repository }}:{{ required \u0026#34;oap.image.tag is required\u0026#34; .Values.oap.image.tag }}# ...# ...env:# Add metadata.uid as the system environment variable, SKYWALKING_COLLECTOR_UID - name:SKYWALKING_COLLECTOR_UIDvalueFrom:fieldRef:fieldPath:metadata.uidRead the complete helm for more details.\nConsul Recently, the Consul system has become more and more popular, and many companies and developers now use Consul as their service discovery solution. Set the cluster/selector to consul in the yml to enable it.\ncluster:selector:${SW_CLUSTER:consul}# other configurationsSame as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: The registered port and other OAP nodes use this to communicate with the current node.  Etcd Set the cluster/selector to etcd in the yml to enable it. The Etcd client has upgraded to v3 protocol and changed to the CoreOS official library. Since 8.7.0, only the v3 protocol is supported for Etcd.\ncluster:selector:${SW_CLUSTER:etcd}# other configurationsetcd:# etcd cluster nodes, example: 10.0.0.1:2379,10.0.0.2:2379,10.0.0.3:2379endpoints:${SW_CLUSTER_ETCD_ENDPOINTS:localhost:2379}namespace:${SW_CLUSTER_ETCD_NAMESPACE:/skywalking}serviceName:${SW_CLUSTER_ETCD_SERVICE_NAME:\u0026#34;SkyWalking_OAP_Cluster\u0026#34;}authentication:${SW_CLUSTER_ETCD_AUTHENTICATION:false}user:${SW_CLUSTER_ETCD_USER:}password:${SW_CLUSTER_ETCD_PASSWORD:}Same as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: The registered port and other OAP nodes use this to communicate with the current node.  Nacos Set the cluster/selector to nacos in the yml to enable it.\ncluster:selector:${SW_CLUSTER:nacos}# other configurationsNacos supports authentication by username or accessKey. Empty means that there is no need for authentication. Extra config is as follows:\nnacos:username:password:accessKey:secretKey:Same as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: The registered port and other OAP nodes use this to communicate with the current node.  ","title":"Cluster Management","url":"/docs/main/v9.3.0/en/setup/backend/backend-cluster/"},{"content":"Cluster Management In many production environments, the backend needs to support high throughput and provide high availability (HA) to maintain robustness, so you always need cluster management in product env.\nNOTICE, cluster management doesn\u0026rsquo;t provide a service discovery mechanism for agents and probes. We recommend agents/probes using gateway to load balancer to access OAP clusters.\nThe core feature of cluster management is supporting the whole OAP cluster running distributed aggregation and analysis for telemetry data.\nThere are various ways to manage the cluster in the backend. Choose the one that best suits your needs.\n Zookeeper coordinator. Use Zookeeper to let the backend instances detect and communicate with each other. Kubernetes. When the backend clusters are deployed inside Kubernetes, you could make use of this method by using k8s native APIs to manage clusters. Consul. Use Consul as the backend cluster management implementor and coordinate backend instances. Etcd. Use Etcd to coordinate backend instances. Nacos. Use Nacos to coordinate backend instances. In the application.yml file, there are default configurations for the aforementioned coordinators under the section cluster. You can specify any of them in the selector property to enable it.  Zookeeper coordinator Zookeeper is a very common and widely used cluster coordinator. Set the cluster/selector to zookeeper in the yml to enable it.\nRequired Zookeeper version: 3.5+\ncluster:selector:${SW_CLUSTER:zookeeper}# other configurations hostPort is the list of zookeeper servers. Format is IP1:PORT1,IP2:PORT2,...,IPn:PORTn enableACL enable Zookeeper ACL to control access to its znode. schema is Zookeeper ACL schemas. expression is a expression of ACL. The format of the expression is specific to the schema. hostPort, baseSleepTimeMs and maxRetries are settings of Zookeeper curator client.  Note:\n If Zookeeper ACL is enabled and /skywalking exists, you must ensure that SkyWalking has CREATE, READ and WRITE permissions. If /skywalking does not exist, it will be created by SkyWalking, and all permissions to the specified user will be granted. Simultaneously, znode grants READ permission to anyone. If you set schema as digest, the password of the expression is set in clear text.  In some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: the registered port and other OAP nodes use this to communicate with the current node.  zookeeper:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}hostPort:${SW_CLUSTER_ZK_HOST_PORT:localhost:2181}#Retry PolicybaseSleepTimeMs:${SW_CLUSTER_ZK_SLEEP_TIME:1000}# initial amount of time to wait between retriesmaxRetries:${SW_CLUSTER_ZK_MAX_RETRIES:3}# max number of times to retryinternalComHost:${SW_CLUSTER_INTERNAL_COM_HOST:172.10.4.10}internalComPort:${SW_CLUSTER_INTERNAL_COM_PORT:11800}# Enable ACLenableACL:${SW_ZK_ENABLE_ACL:false}# disable ACL in defaultschema:${SW_ZK_SCHEMA:digest}# only support digest schemaexpression:${SW_ZK_EXPRESSION:skywalking:skywalking}Kubernetes The required backend clusters are deployed inside Kubernetes. See the guides in Deploy in kubernetes. Set the selector to kubernetes.\ncluster:selector:${SW_CLUSTER:kubernetes}# other configurationsMeanwhile, the OAP cluster requires the pod\u0026rsquo;s UID which is laid at metadata.uid as the value of the system environment variable SKYWALKING_COLLECTOR_UID\ncontainers:# Original configurations of OAP container- name:{{.Values.oap.name }}image:{{.Values.oap.image.repository }}:{{ required \u0026#34;oap.image.tag is required\u0026#34; .Values.oap.image.tag }}# ...# ...env:# Add metadata.uid as the system environment variable, SKYWALKING_COLLECTOR_UID - name:SKYWALKING_COLLECTOR_UIDvalueFrom:fieldRef:fieldPath:metadata.uidRead the complete helm for more details.\nConsul Recently, the Consul system has become more and more popular, and many companies and developers now use Consul as their service discovery solution. Set the cluster/selector to consul in the yml to enable it.\ncluster:selector:${SW_CLUSTER:consul}# other configurationsSame as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: The registered port and other OAP nodes use this to communicate with the current node.  Etcd Set the cluster/selector to etcd in the yml to enable it. The Etcd client has upgraded to v3 protocol and changed to the CoreOS official library. Since 8.7.0, only the v3 protocol is supported for Etcd.\ncluster:selector:${SW_CLUSTER:etcd}# other configurationsetcd:# etcd cluster nodes, example: 10.0.0.1:2379,10.0.0.2:2379,10.0.0.3:2379endpoints:${SW_CLUSTER_ETCD_ENDPOINTS:localhost:2379}namespace:${SW_CLUSTER_ETCD_NAMESPACE:/skywalking}serviceName:${SW_CLUSTER_ETCD_SERVICE_NAME:\u0026#34;SkyWalking_OAP_Cluster\u0026#34;}authentication:${SW_CLUSTER_ETCD_AUTHENTICATION:false}user:${SW_CLUSTER_ETCD_USER:}password:${SW_CLUSTER_ETCD_PASSWORD:}Same as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: The registered port and other OAP nodes use this to communicate with the current node.  Nacos Set the cluster/selector to nacos in the yml to enable it.\ncluster:selector:${SW_CLUSTER:nacos}# other configurationsNacos supports authentication by username or accessKey. Empty means that there is no need for authentication. Extra config is as follows:\nnacos:username:password:accessKey:secretKey:Same as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: The registered port and other OAP nodes use this to communicate with the current node.  ","title":"Cluster Management","url":"/docs/main/v9.4.0/en/setup/backend/backend-cluster/"},{"content":"Cluster Management In many production environments, the backend needs to support distributed aggregation, high throughput and provide high availability (HA) to maintain robustness, so you always need to setup CLUSTER management in product env. Otherwise, you would face metrics inaccurate.\ncore/gRPCHost is listening on 0.0.0.0 for quick start as the single mode for most cases. Besides the Kubernetes coordinator, which is using the cloud-native mode to establish cluster, all other coordinators requires core/gRPCHost updated to real IP addresses or take reference of internalComHost and internalComPort in each coordinator doc.\nNOTICE, cluster management doesn\u0026rsquo;t provide a service discovery mechanism for agents and probes. We recommend agents/probes using gateway to load balancer to access OAP clusters.\nThere are various ways to manage the cluster in the backend. Choose the one that best suits your needs.\n Kubernetes. When the backend clusters are deployed inside Kubernetes, you could make use of this method by using k8s native APIs to manage clusters. Zookeeper coordinator. Use Zookeeper to let the backend instances detect and communicate with each other. Consul. Use Consul as the backend cluster management implementor and coordinate backend instances. Etcd. Use Etcd to coordinate backend instances. Nacos. Use Nacos to coordinate backend instances.  In the application.yml file, there are default configurations for the aforementioned coordinators under the section cluster. You can specify any of them in the selector property to enable it.\nKubernetes The required backend clusters are deployed inside Kubernetes. See the guides in Deploy in kubernetes. Set the selector to kubernetes.\ncluster:selector:${SW_CLUSTER:kubernetes}# other configurationsMeanwhile, the OAP cluster requires the pod\u0026rsquo;s UID which is laid at metadata.uid as the value of the system environment variable SKYWALKING_COLLECTOR_UID\ncontainers:# Original configurations of OAP container- name:{{.Values.oap.name }}image:{{.Values.oap.image.repository }}:{{ required \u0026#34;oap.image.tag is required\u0026#34; .Values.oap.image.tag }}# ...# ...env:# Add metadata.uid as the system environment variable, SKYWALKING_COLLECTOR_UID - name:SKYWALKING_COLLECTOR_UIDvalueFrom:fieldRef:fieldPath:metadata.uidRead the complete helm for more details.\nZookeeper coordinator Zookeeper is a very common and widely used cluster coordinator. Set the cluster/selector to zookeeper in the yml to enable it.\nRequired Zookeeper version: 3.5+\ncluster:selector:${SW_CLUSTER:zookeeper}# other configurations hostPort is the list of zookeeper servers. Format is IP1:PORT1,IP2:PORT2,...,IPn:PORTn enableACL enable Zookeeper ACL to control access to its znode. schema is Zookeeper ACL schemas. expression is a expression of ACL. The format of the expression is specific to the schema. hostPort, baseSleepTimeMs and maxRetries are settings of Zookeeper curator client.  Note:\n If Zookeeper ACL is enabled and /skywalking exists, you must ensure that SkyWalking has CREATE, READ and WRITE permissions. If /skywalking does not exist, it will be created by SkyWalking, and all permissions to the specified user will be granted. Simultaneously, znode grants READ permission to anyone. If you set schema as digest, the password of the expression is set in clear text.  In some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: the registered port and other OAP nodes use this to communicate with the current node.  zookeeper:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}hostPort:${SW_CLUSTER_ZK_HOST_PORT:localhost:2181}#Retry PolicybaseSleepTimeMs:${SW_CLUSTER_ZK_SLEEP_TIME:1000}# initial amount of time to wait between retriesmaxRetries:${SW_CLUSTER_ZK_MAX_RETRIES:3}# max number of times to retryinternalComHost:${SW_CLUSTER_INTERNAL_COM_HOST:172.10.4.10}internalComPort:${SW_CLUSTER_INTERNAL_COM_PORT:11800}# Enable ACLenableACL:${SW_ZK_ENABLE_ACL:false}# disable ACL in defaultschema:${SW_ZK_SCHEMA:digest}# only support digest schemaexpression:${SW_ZK_EXPRESSION:skywalking:skywalking}Consul Recently, the Consul system has become more and more popular, and many companies and developers now use Consul as their service discovery solution. Set the cluster/selector to consul in the yml to enable it.\ncluster:selector:${SW_CLUSTER:consul}# other configurationsSame as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: The registered port and other OAP nodes use this to communicate with the current node.  Etcd Set the cluster/selector to etcd in the yml to enable it. The Etcd client has upgraded to v3 protocol and changed to the CoreOS official library. Since 8.7.0, only the v3 protocol is supported for Etcd.\ncluster:selector:${SW_CLUSTER:etcd}# other configurationsetcd:# etcd cluster nodes, example: 10.0.0.1:2379,10.0.0.2:2379,10.0.0.3:2379endpoints:${SW_CLUSTER_ETCD_ENDPOINTS:localhost:2379}namespace:${SW_CLUSTER_ETCD_NAMESPACE:/skywalking}serviceName:${SW_CLUSTER_ETCD_SERVICE_NAME:\u0026#34;SkyWalking_OAP_Cluster\u0026#34;}authentication:${SW_CLUSTER_ETCD_AUTHENTICATION:false}user:${SW_CLUSTER_ETCD_USER:}password:${SW_CLUSTER_ETCD_PASSWORD:}Same as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: The registered port and other OAP nodes use this to communicate with the current node.  Nacos Set the cluster/selector to nacos in the yml to enable it.\ncluster:selector:${SW_CLUSTER:nacos}# other configurationsNacos supports authentication by username or accessKey. Empty means that there is no need for authentication. Extra config is as follows:\nnacos:username:password:accessKey:secretKey:Same as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: The registered port and other OAP nodes use this to communicate with the current node.  ","title":"Cluster Management","url":"/docs/main/v9.5.0/en/setup/backend/backend-cluster/"},{"content":"Cluster Management In many production environments, the backend needs to support distributed aggregation, high throughput and provide high availability (HA) to maintain robustness, so you always need to setup CLUSTER management in product env. Otherwise, you would face metrics inaccurate.\ncore/gRPCHost is listening on 0.0.0.0 for quick start as the single mode for most cases. Besides the Kubernetes coordinator, which is using the cloud-native mode to establish cluster, all other coordinators requires core/gRPCHost updated to real IP addresses or take reference of internalComHost and internalComPort in each coordinator doc.\nNOTICE, cluster management doesn\u0026rsquo;t provide a service discovery mechanism for agents and probes. We recommend agents/probes using gateway to load balancer to access OAP clusters.\nThere are various ways to manage the cluster in the backend. Choose the one that best suits your needs.\n Kubernetes. When the backend clusters are deployed inside Kubernetes, you could make use of this method by using k8s native APIs to manage clusters. Zookeeper coordinator. Use Zookeeper to let the backend instances detect and communicate with each other. Consul. Use Consul as the backend cluster management implementor and coordinate backend instances. Etcd. Use Etcd to coordinate backend instances. Nacos. Use Nacos to coordinate backend instances.  In the application.yml file, there are default configurations for the aforementioned coordinators under the section cluster. You can specify any of them in the selector property to enable it.\nKubernetes The required backend clusters are deployed inside Kubernetes. See the guides in Deploy in kubernetes. Set the selector to kubernetes.\ncluster:selector:${SW_CLUSTER:kubernetes}# other configurationsMeanwhile, the OAP cluster requires the pod\u0026rsquo;s UID which is laid at metadata.uid as the value of the system environment variable SKYWALKING_COLLECTOR_UID\ncontainers:# Original configurations of OAP container- name:{{.Values.oap.name }}image:{{.Values.oap.image.repository }}:{{ required \u0026#34;oap.image.tag is required\u0026#34; .Values.oap.image.tag }}# ...# ...env:# Add metadata.uid as the system environment variable, SKYWALKING_COLLECTOR_UID - name:SKYWALKING_COLLECTOR_UIDvalueFrom:fieldRef:fieldPath:metadata.uidRead the complete helm for more details.\nZookeeper coordinator Zookeeper is a very common and widely used cluster coordinator. Set the cluster/selector to zookeeper in the yml to enable it.\nRequired Zookeeper version: 3.5+\ncluster:selector:${SW_CLUSTER:zookeeper}# other configurations hostPort is the list of zookeeper servers. Format is IP1:PORT1,IP2:PORT2,...,IPn:PORTn enableACL enable Zookeeper ACL to control access to its znode. schema is Zookeeper ACL schemas. expression is a expression of ACL. The format of the expression is specific to the schema. hostPort, baseSleepTimeMs and maxRetries are settings of Zookeeper curator client.  Note:\n If Zookeeper ACL is enabled and /skywalking exists, you must ensure that SkyWalking has CREATE, READ and WRITE permissions. If /skywalking does not exist, it will be created by SkyWalking, and all permissions to the specified user will be granted. Simultaneously, znode grants READ permission to anyone. If you set schema as digest, the password of the expression is set in clear text.  In some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: the registered port and other OAP nodes use this to communicate with the current node.  zookeeper:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}hostPort:${SW_CLUSTER_ZK_HOST_PORT:localhost:2181}#Retry PolicybaseSleepTimeMs:${SW_CLUSTER_ZK_SLEEP_TIME:1000}# initial amount of time to wait between retriesmaxRetries:${SW_CLUSTER_ZK_MAX_RETRIES:3}# max number of times to retryinternalComHost:${SW_CLUSTER_INTERNAL_COM_HOST:172.10.4.10}internalComPort:${SW_CLUSTER_INTERNAL_COM_PORT:11800}# Enable ACLenableACL:${SW_ZK_ENABLE_ACL:false}# disable ACL in defaultschema:${SW_ZK_SCHEMA:digest}# only support digest schemaexpression:${SW_ZK_EXPRESSION:skywalking:skywalking}Consul Recently, the Consul system has become more and more popular, and many companies and developers now use Consul as their service discovery solution. Set the cluster/selector to consul in the yml to enable it.\ncluster:selector:${SW_CLUSTER:consul}# other configurationsSame as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: The registered port and other OAP nodes use this to communicate with the current node.  Etcd Set the cluster/selector to etcd in the yml to enable it. The Etcd client has upgraded to v3 protocol and changed to the CoreOS official library. Since 8.7.0, only the v3 protocol is supported for Etcd.\ncluster:selector:${SW_CLUSTER:etcd}# other configurationsetcd:# etcd cluster nodes, example: 10.0.0.1:2379,10.0.0.2:2379,10.0.0.3:2379endpoints:${SW_CLUSTER_ETCD_ENDPOINTS:localhost:2379}namespace:${SW_CLUSTER_ETCD_NAMESPACE:/skywalking}serviceName:${SW_CLUSTER_ETCD_SERVICE_NAME:\u0026#34;SkyWalking_OAP_Cluster\u0026#34;}authentication:${SW_CLUSTER_ETCD_AUTHENTICATION:false}user:${SW_CLUSTER_ETCD_USER:}password:${SW_CLUSTER_ETCD_PASSWORD:}Same as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: The registered port and other OAP nodes use this to communicate with the current node.  Nacos Set the cluster/selector to nacos in the yml to enable it.\ncluster:selector:${SW_CLUSTER:nacos}# other configurationsNacos supports authentication by username or accessKey. Empty means that there is no need for authentication. Extra config is as follows:\nnacos:username:password:accessKey:secretKey:Same as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: The registered port and other OAP nodes use this to communicate with the current node.  ","title":"Cluster Management","url":"/docs/main/v9.6.0/en/setup/backend/backend-cluster/"},{"content":"Cluster Management In many production environments, the backend needs to support distributed aggregation, high throughput and provide high availability (HA) to maintain robustness, so you always need to setup CLUSTER management in product env. Otherwise, you would face metrics inaccurate.\ncore/gRPCHost is listening on 0.0.0.0 for quick start as the single mode for most cases. Besides the Kubernetes coordinator, which is using the cloud-native mode to establish cluster, all other coordinators requires core/gRPCHost updated to real IP addresses or take reference of internalComHost and internalComPort in each coordinator doc.\nNOTICE, cluster management doesn\u0026rsquo;t provide a service discovery mechanism for agents and probes. We recommend agents/probes using gateway to load balancer to access OAP clusters.\nThere are various ways to manage the cluster in the backend. Choose the one that best suits your needs.\n Kubernetes. When the backend clusters are deployed inside Kubernetes, you could make use of this method by using k8s native APIs to manage clusters. Zookeeper coordinator. Use Zookeeper to let the backend instances detect and communicate with each other. Consul. Use Consul as the backend cluster management implementor and coordinate backend instances. Etcd. Use Etcd to coordinate backend instances. Nacos. Use Nacos to coordinate backend instances.  In the application.yml file, there are default configurations for the aforementioned coordinators under the section cluster. You can specify any of them in the selector property to enable it.\nCloud Native Kubernetes The required backend clusters are deployed inside Kubernetes. See the guides in Deploy in kubernetes. Set the selector to kubernetes.\ncluster:selector:${SW_CLUSTER:kubernetes}# other configurationsMeanwhile, the OAP cluster requires the pod\u0026rsquo;s UID which is laid at metadata.uid as the value of the system environment variable SKYWALKING_COLLECTOR_UID\ncontainers:# Original configurations of OAP container- name:{{.Values.oap.name }}image:{{.Values.oap.image.repository }}:{{ required \u0026#34;oap.image.tag is required\u0026#34; .Values.oap.image.tag }}# ...# ...env:# Add metadata.uid as the system environment variable, SKYWALKING_COLLECTOR_UID - name:SKYWALKING_COLLECTOR_UIDvalueFrom:fieldRef:fieldPath:metadata.uidRead the complete helm for more details.\nTraditional Coordinator NOTICE In all the following coordinators, oap.internal.comm.host:oap.internal.comm.port is registered as the ID and address for the current OAP node. By default, because they are same in all OAP nodes, the registrations are conflicted, and (may) show as one registered node, which actually would be the node itself. In this case, the cluster mode is NOT working.\nPlease check the registered nodes on your coordinator servers, to make the registration information unique for every node. You could have two options\n Change core/gRPCHost(oap.internal.comm.host) and core/gRPCPort(oap.internal.comm.port) for internal, and setup external communication channels for data reporting and query. Use internalComHost and internalComPort in the config to provide a unique host and port for every OAP node. This host name port should be accessible for other OAP nodes.  Zookeeper coordinator Zookeeper is a very common and widely used cluster coordinator. Set the cluster/selector to zookeeper in the yml to enable it.\nRequired Zookeeper version: 3.5+\ncluster:selector:${SW_CLUSTER:zookeeper}# other configurations hostPort is the list of zookeeper servers. Format is IP1:PORT1,IP2:PORT2,...,IPn:PORTn enableACL enable Zookeeper ACL to control access to its znode. schema is Zookeeper ACL schemas. expression is a expression of ACL. The format of the expression is specific to the schema. hostPort, baseSleepTimeMs and maxRetries are settings of Zookeeper curator client.  Note:\n If Zookeeper ACL is enabled and /skywalking exists, you must ensure that SkyWalking has CREATE, READ and WRITE permissions. If /skywalking does not exist, it will be created by SkyWalking, and all permissions to the specified user will be granted. Simultaneously, znode grants READ permission to anyone. If you set schema as digest, the password of the expression is set in clear text.  In some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes, such as the default host(0.0.0.0) should not be used in cluster mode. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The exposed host name for other OAP nodes in the cluster internal communication. internalComPort: the exposed port for other OAP nodes in the cluster internal communication.  zookeeper:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}hostPort:${SW_CLUSTER_ZK_HOST_PORT:localhost:2181}#Retry PolicybaseSleepTimeMs:${SW_CLUSTER_ZK_SLEEP_TIME:1000}# initial amount of time to wait between retriesmaxRetries:${SW_CLUSTER_ZK_MAX_RETRIES:3}# max number of times to retryinternalComHost:${SW_CLUSTER_INTERNAL_COM_HOST:172.10.4.10}internalComPort:${SW_CLUSTER_INTERNAL_COM_PORT:11800}# Enable ACLenableACL:${SW_ZK_ENABLE_ACL:false}# disable ACL in defaultschema:${SW_ZK_SCHEMA:digest}# only support digest schemaexpression:${SW_ZK_EXPRESSION:skywalking:skywalking}Consul Recently, the Consul system has become more and more popular, and many companies and developers now use Consul as their service discovery solution. Set the cluster/selector to consul in the yml to enable it.\ncluster:selector:${SW_CLUSTER:consul}# other configurationsSame as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes, such as the default host(0.0.0.0) should not be used in cluster mode. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The exposed host name for other OAP nodes in the cluster internal communication. internalComPort: the exposed port for other OAP nodes in the cluster internal communication.  Etcd Set the cluster/selector to etcd in the yml to enable it. The Etcd client has upgraded to v3 protocol and changed to the CoreOS official library. Since 8.7.0, only the v3 protocol is supported for Etcd.\ncluster:selector:${SW_CLUSTER:etcd}# other configurationsetcd:# etcd cluster nodes, example: 10.0.0.1:2379,10.0.0.2:2379,10.0.0.3:2379endpoints:${SW_CLUSTER_ETCD_ENDPOINTS:localhost:2379}namespace:${SW_CLUSTER_ETCD_NAMESPACE:/skywalking}serviceName:${SW_CLUSTER_ETCD_SERVICE_NAME:\u0026#34;SkyWalking_OAP_Cluster\u0026#34;}authentication:${SW_CLUSTER_ETCD_AUTHENTICATION:false}user:${SW_CLUSTER_ETCD_USER:}password:${SW_CLUSTER_ETCD_PASSWORD:}Same as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes, such as the default host(0.0.0.0) should not be used in cluster mode. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The exposed host name for other OAP nodes in the cluster internal communication. internalComPort: the exposed port for other OAP nodes in the cluster internal communication.  Nacos Set the cluster/selector to nacos in the yml to enable it.\ncluster:selector:${SW_CLUSTER:nacos}# other configurationsNacos supports authentication by username or accessKey. Empty means that there is no need for authentication. Extra config is as follows:\nnacos:username:password:accessKey:secretKey:Same as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes, such as the default host(0.0.0.0) should not be used in cluster mode. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The exposed host name for other OAP nodes in the cluster internal communication. internalComPort: the exposed port for other OAP nodes in the cluster internal communication.  ","title":"Cluster Management","url":"/docs/main/v9.7.0/en/setup/backend/backend-cluster/"},{"content":"Coding Style for SkyWalking Python String formatting Since Python 3.5 is end of life, we fully utilize the clarity and performance boost brought by f-strings. Please do not use other styles - +, % or .format unless f-string is absolutely unfeasible in the context, or it is a logger message, which is optimized for the % style\nRun make dev-fix to invoke flynt to convert other formats to f-string, pay extra care to possible corner cases leading to a semantically different conversion.\nQuotes As we know both single quotes and double quotes are both acceptable in Python. For a better coding style, we enforce a check for using single quotes when possible.\nPlease only use double quotes on the outside when there are inevitable single quotes inside the string, or when there are nest quotes.\nFor example -\nfoo = f\u0026#34;I\u0026#39;m a string\u0026#34; bar = f\u0026#34;This repo is called \u0026#39;skywalking-python\u0026#39;\u0026#34; Run make dev-fix to invoke unify to deal with your quotes if flake8 complaints about it.\nDebug messages Please import the logger_debug_enabled variable and wrap your debug messages with a check.\nThis should be done for all performance critical components.\nif logger_debug_enabled: logger.debug(\u0026#39;Message - %s\u0026#39;, some_func()) Imports Please make sure the imports are placed in a good order, or flake8-isort will notify you of the violations.\nRun make dev-fix to automatically fix the sorting problem.\nNaming In PEP8 convention, we are required to use snake_case as the accepted style.\nHowever, there are special cases. For example, you are overriding/monkey-patching a method which happens to use the old style camelCase naming, then it is acceptable to have the original naming convention to preserve context.\nPlease mark the line with # noqa to avoid linting.\n","title":"Coding Style for SkyWalking Python","url":"/docs/skywalking-python/latest/en/contribution/codingstyle/"},{"content":"Coding Style for SkyWalking Python String formatting Since Python 3.5 is end of life, we fully utilize the clarity and performance boost brought by f-strings. Please do not use other styles - +, % or .format unless f-string is absolutely unfeasible in the context, or it is a logger message, which is optimized for the % style\nRun make dev-fix to invoke flynt to convert other formats to f-string, pay extra care to possible corner cases leading to a semantically different conversion.\nQuotes As we know both single quotes and double quotes are both acceptable in Python. For a better coding style, we enforce a check for using single quotes when possible.\nPlease only use double quotes on the outside when there are inevitable single quotes inside the string, or when there are nest quotes.\nFor example -\nfoo = f\u0026#34;I\u0026#39;m a string\u0026#34; bar = f\u0026#34;This repo is called \u0026#39;skywalking-python\u0026#39;\u0026#34; Run make dev-fix to invoke unify to deal with your quotes if flake8 complaints about it.\nDebug messages Please import the logger_debug_enabled variable and wrap your debug messages with a check.\nThis should be done for all performance critical components.\nif logger_debug_enabled: logger.debug(\u0026#39;Message - %s\u0026#39;, some_func()) Imports Please make sure the imports are placed in a good order, or flake8-isort will notify you of the violations.\nRun make dev-fix to automatically fix the sorting problem.\nNaming In PEP8 convention, we are required to use snake_case as the accepted style.\nHowever, there are special cases. For example, you are overriding/monkey-patching a method which happens to use the old style camelCase naming, then it is acceptable to have the original naming convention to preserve context.\nPlease mark the line with # noqa to avoid linting.\n","title":"Coding Style for SkyWalking Python","url":"/docs/skywalking-python/next/en/contribution/codingstyle/"},{"content":"Coding Style for SkyWalking Python String formatting Since Python 3.5 is end of life, we fully utilize the clarity and performance boost brought by f-strings. Please do not use other styles - +, % or .format unless f-string is absolutely unfeasible in the context, or it is a logger message, which is optimized for the % style\nRun make dev-fix to invoke flynt to convert other formats to f-string, pay extra care to possible corner cases leading to a semantically different conversion.\nQuotes As we know both single quotes and double quotes are both acceptable in Python. For a better coding style, we enforce a check for using single quotes when possible.\nPlease only use double quotes on the outside when there are inevitable single quotes inside the string, or when there are nest quotes.\nFor example -\nfoo = f\u0026#34;I\u0026#39;m a string\u0026#34; bar = f\u0026#34;This repo is called \u0026#39;skywalking-python\u0026#39;\u0026#34; Run make dev-fix to invoke unify to deal with your quotes if flake8 complaints about it.\nDebug messages Please import the logger_debug_enabled variable and wrap your debug messages with a check.\nThis should be done for all performance critical components.\nif logger_debug_enabled: logger.debug(\u0026#39;Message - %s\u0026#39;, some_func()) Imports Please make sure the imports are placed in a good order, or flake8-isort will notify you of the violations.\nRun make dev-fix to automatically fix the sorting problem.\nNaming In PEP8 convention, we are required to use snake_case as the accepted style.\nHowever, there are special cases. For example, you are overriding/monkey-patching a method which happens to use the old style camelCase naming, then it is acceptable to have the original naming convention to preserve context.\nPlease mark the line with # noqa to avoid linting.\n","title":"Coding Style for SkyWalking Python","url":"/docs/skywalking-python/v1.0.1/en/contribution/codingstyle/"},{"content":"Collecting and Gathering Kubernetes Monitoring Data Motivation SkyWalking has provided an access log collector based on the Agent layer and Service Mesh layer, and can generate corresponding topology maps and metrics based on the data. However, the Kubernetes Layer still lacks corresponding access log collector and analysis work.\nThis proposal is dedicated to collecting and analyzing network access logs in Kubernetes.\nArchitecture Graph There is no significant architecture-level change. Still using the Rover project to collect data and report it to SkyWalking OAP using the gRPC protocol.\nPropose Changes Based on the content in Motivation, if we want to ignore the application types(different program languages) and only monitor network logs, using eBPF is a good choice. It mainly reflects in the following aspects:\n Non-intrusive: When monitoring network access logs with eBPF, the application do not need to make any changes to be monitored. Language-unrestricted: Regardless of which programming language is used in the application, network data will ultimately be accessed through Linux Syscalls. Therefore, we can monitor network data by attaching eBPF to the syscalls layer, thus ignoring programming languages. Kernel interception: Since eBPF can attach to the kernel methods, it can obtain the execution status of each packet at L2-L4 layers and generate more detailed metrics.  Based on these reasons and collected data, they can be implemented in SkyWalking Rover and collected and monitored based on the following steps:\n Monitor the network execution status of all processes in Kubernetes when the Rover system starts. Periodically report data content via gRPC protocol to SkyWalking OAP. SkyWalking OAP parses network access logs and generates corresponding network topology, metrics, etc.  Limitation For content that uses TLS for data transmission, Rover will detect whether the current language uses libraries such as OpenSSL. If it is used, it will asynchronously intercept relevant OpenSSL methods when the process starts to perceive the original data content.\nHowever, this approach is not feasible for Java because Java does not use the OpenSSL library but performs encryption/decryption through Java code. Currently, eBPF cannot intercept Java method calls. Therefore, it results in an inability to perceive the TLS data protocol in Java.\nService with Istio sidecar scenario If the Service is deployed in Istio sidecar, it will still monitor each process. If the Service is a Java service and uses TLS, it can analyze the relevant traffic generated in the sidecar (envoy).\nImported Dependencies libs and their licenses. No new library is planned to be added to the codebase.\nCompatibility About the protocol, there should be no breaking changes, but enhancements only:\n Rover: adding a new gRPC data collection protocol for reporting the access logs. OAP: It should have no protocol updates. The existing query protocols are already sufficient for querying Kubernetes topology and metric data.  Data Generation Entity  service_traffic     column data type value description     name string kubernetes service name   short_name string same with name   service_id string base64(name).1   group string empty string   layer string KUBERNETES     instance_traffic     column data type value description     service_id string base64(service_name).1   name string pod name   last_ping long last access log message timestamp(millisecond)   properties json empty string     endpoint_traffic     column data type value description     service_id string base64(service_name).1   name string access log endpoint name(for HTTP1, is URI)    Entity Relation All entity information is built on connections. If the target address is remote, the name will be resolved in the following order:\n If it is a pod IP, it will be resolved as pod information. If it is a service IP, it will be resolved as service information. If neither exists, only pod information will be displayed.  Different entities have different displays for remote addresses. Please refer to the following table.\n   table name remote info(display by following order)     service_relation service name, remote IP address   instance_relation pod name, remote IP address    NOTICE: If it is the internal data interaction within the pod, such as exchanging data between services and sidecar (envoy), no corresponding traffic will be generated. We only generate and interact with external pods.\nLimitation If the service IP is used to send requests to the upstream, we will use eBPF to perceive the real target PodIP by perceiving relevant conntrack records.\nHowever, if conntrack technology is not used, it is difficult to perceive the real target IP address. In this case, instance relation data of this kind will be dropped, but we will mark all discarded relationship generation counts through a metric for better understanding of the situation.\nMetrics Integrate the data into the OAL system and generate corresponding metrics through predefined data combined with OAL statements.\nGeneral usage docs This proposal will only add a module to Rover that explains the configuration of access logs, and changes in the Kubernetes module on the UI.\nIn the Kubernetes UI, users can see the following additions:\n Topology: A topology diagram showing the calling relationships between services, instances, and processes. Entity Metrics: Metric data for services, instances, and processes. Call Relationship Metrics: Metrics for call relationships between different entities.  ","title":"Collecting and Gathering Kubernetes Monitoring Data","url":"/docs/main/next/en/swip/swip-2/"},{"content":"Collecting File Log Application\u0026rsquo;s logs are important data for troubleshooting, usually they are persistent through local or network file system. SkyWalking provides ways to collect logs from those files by leveraging popular open-source tools.\nLog files collector You can use Filebeat, Fluentd and FluentBit to collect logs, and then transport the logs to SkyWalking OAP through Kafka or HTTP protocol, with the formats Kafka JSON or HTTP JSON array.\nFilebeat Filebeat supports using Kafka to transport logs. Open kafka-fetcher and enable configs enableNativeJsonLog.\nTake the following Filebeat config YAML as an example to set up Filebeat:\n filebeat.yml  Fluentd Fluentd supports using Kafka to transport logs. Open kafka-fetcher and enable configs enableNativeJsonLog.\nTake the following fluentd config file as an example to set up Fluentd:\n fluentd.conf  Fluent-bit Fluent-bit sends logs to OAP directly through HTTP(rest port). Point the output address to restHost:restPort of receiver-sharing-server or core(if receiver-sharing-server is inactivated)\nTake the following fluent-bit config files as an example to set up Fluent-bit:\n fluent-bit.conf  ","title":"Collecting File Log","url":"/docs/main/latest/en/setup/backend/filelog-native/"},{"content":"Collecting File Log Application\u0026rsquo;s logs are important data for troubleshooting, usually they are persistent through local or network file system. SkyWalking provides ways to collect logs from those files by leveraging popular open-source tools.\nLog files collector You can use Filebeat, Fluentd and FluentBit to collect logs, and then transport the logs to SkyWalking OAP through Kafka or HTTP protocol, with the formats Kafka JSON or HTTP JSON array.\nFilebeat Filebeat supports using Kafka to transport logs. Open kafka-fetcher and enable configs enableNativeJsonLog.\nTake the following Filebeat config YAML as an example to set up Filebeat:\n filebeat.yml  Fluentd Fluentd supports using Kafka to transport logs. Open kafka-fetcher and enable configs enableNativeJsonLog.\nTake the following fluentd config file as an example to set up Fluentd:\n fluentd.conf  Fluent-bit Fluent-bit sends logs to OAP directly through HTTP(rest port). Point the output address to restHost:restPort of receiver-sharing-server or core(if receiver-sharing-server is inactivated)\nTake the following fluent-bit config files as an example to set up Fluent-bit:\n fluent-bit.conf  ","title":"Collecting File Log","url":"/docs/main/next/en/setup/backend/filelog-native/"},{"content":"Collecting File Log Application\u0026rsquo;s logs are important data for troubleshooting, usually they are persistent through local or network file system. SkyWalking provides ways to collect logs from those files by leveraging popular open-source tools.\nLog files collector You can use Filebeat, Fluentd and FluentBit to collect logs, and then transport the logs to SkyWalking OAP through Kafka or HTTP protocol, with the formats Kafka JSON or HTTP JSON array.\nFilebeat Filebeat supports using Kafka to transport logs. Open kafka-fetcher and enable configs enableNativeJsonLog.\nTake the following Filebeat config YAML as an example to set up Filebeat:\n filebeat.yml  Fluentd Fluentd supports using Kafka to transport logs. Open kafka-fetcher and enable configs enableNativeJsonLog.\nTake the following fluentd config file as an example to set up Fluentd:\n fluentd.conf  Fluent-bit Fluent-bit sends logs to OAP directly through HTTP(rest port). Point the output address to restHost:restPort of receiver-sharing-server or core(if receiver-sharing-server is inactivated)\nTake the following fluent-bit config files as an example to set up Fluent-bit:\n fluent-bit.conf  ","title":"Collecting File Log","url":"/docs/main/v9.5.0/en/setup/backend/filelog-native/"},{"content":"Collecting File Log Application\u0026rsquo;s logs are important data for troubleshooting, usually they are persistent through local or network file system. SkyWalking provides ways to collect logs from those files by leveraging popular open-source tools.\nLog files collector You can use Filebeat, Fluentd and FluentBit to collect logs, and then transport the logs to SkyWalking OAP through Kafka or HTTP protocol, with the formats Kafka JSON or HTTP JSON array.\nFilebeat Filebeat supports using Kafka to transport logs. Open kafka-fetcher and enable configs enableNativeJsonLog.\nTake the following Filebeat config YAML as an example to set up Filebeat:\n filebeat.yml  Fluentd Fluentd supports using Kafka to transport logs. Open kafka-fetcher and enable configs enableNativeJsonLog.\nTake the following fluentd config file as an example to set up Fluentd:\n fluentd.conf  Fluent-bit Fluent-bit sends logs to OAP directly through HTTP(rest port). Point the output address to restHost:restPort of receiver-sharing-server or core(if receiver-sharing-server is inactivated)\nTake the following fluent-bit config files as an example to set up Fluent-bit:\n fluent-bit.conf  ","title":"Collecting File Log","url":"/docs/main/v9.6.0/en/setup/backend/filelog-native/"},{"content":"Collecting File Log Application\u0026rsquo;s logs are important data for troubleshooting, usually they are persistent through local or network file system. SkyWalking provides ways to collect logs from those files by leveraging popular open-source tools.\nLog files collector You can use Filebeat, Fluentd and FluentBit to collect logs, and then transport the logs to SkyWalking OAP through Kafka or HTTP protocol, with the formats Kafka JSON or HTTP JSON array.\nFilebeat Filebeat supports using Kafka to transport logs. Open kafka-fetcher and enable configs enableNativeJsonLog.\nTake the following Filebeat config YAML as an example to set up Filebeat:\n filebeat.yml  Fluentd Fluentd supports using Kafka to transport logs. Open kafka-fetcher and enable configs enableNativeJsonLog.\nTake the following fluentd config file as an example to set up Fluentd:\n fluentd.conf  Fluent-bit Fluent-bit sends logs to OAP directly through HTTP(rest port). Point the output address to restHost:restPort of receiver-sharing-server or core(if receiver-sharing-server is inactivated)\nTake the following fluent-bit config files as an example to set up Fluent-bit:\n fluent-bit.conf  ","title":"Collecting File Log","url":"/docs/main/v9.7.0/en/setup/backend/filelog-native/"},{"content":"Collecting Logs by Agents Some of SkyWalking native agents support collecting logs and sending them to OAP server without local files and/or file agents, which are listed in here.\nJava agent\u0026rsquo;s toolkits Java agent provides toolkits for log4j, log4j2, and logback to report logs through gRPC with automatically injected trace context.\nSkyWalking Satellite sidecar is a recommended proxy/side that forwards logs (including the use of Kafka MQ to transport logs). When using this, open kafka-fetcher and enable configs enableNativeProtoLog.\nJava agent provides toolkits for log4j, log4j2, and logback to report logs through files with automatically injected trace context.\nLog framework config examples:\n log4j1.x fileAppender log4j2.x fileAppender logback fileAppender  Python agent log reporter SkyWalking Python Agent implements a log reporter for the logging module with functionalities aligning with the Java toolkits.\nTo explore how to enable the reporting features for your use cases, please refer to the Log Reporter Doc for a detailed guide.\n","title":"Collecting Logs by Agents","url":"/docs/main/latest/en/setup/backend/log-agent-native/"},{"content":"Collecting Logs by Agents Some of SkyWalking native agents support collecting logs and sending them to OAP server without local files and/or file agents, which are listed in here.\nJava agent\u0026rsquo;s toolkits Java agent provides toolkits for log4j, log4j2, and logback to report logs through gRPC with automatically injected trace context.\nSkyWalking Satellite sidecar is a recommended proxy/side that forwards logs (including the use of Kafka MQ to transport logs). When using this, open kafka-fetcher and enable configs enableNativeProtoLog.\nJava agent provides toolkits for log4j, log4j2, and logback to report logs through files with automatically injected trace context.\nLog framework config examples:\n log4j1.x fileAppender log4j2.x fileAppender logback fileAppender  Python agent log reporter SkyWalking Python Agent implements a log reporter for the logging module with functionalities aligning with the Java toolkits.\nTo explore how to enable the reporting features for your use cases, please refer to the Log Reporter Doc for a detailed guide.\n","title":"Collecting Logs by Agents","url":"/docs/main/next/en/setup/backend/log-agent-native/"},{"content":"Collecting Logs by Agents Some of SkyWalking native agents support collecting logs and sending them to OAP server without local files and/or file agents, which are listed in here.\nJava agent\u0026rsquo;s toolkits Java agent provides toolkits for log4j, log4j2, and logback to report logs through gRPC with automatically injected trace context.\nSkyWalking Satellite sidecar is a recommended proxy/side that forwards logs (including the use of Kafka MQ to transport logs). When using this, open kafka-fetcher and enable configs enableNativeProtoLog.\nJava agent provides toolkits for log4j, log4j2, and logback to report logs through files with automatically injected trace context.\nLog framework config examples:\n log4j1.x fileAppender log4j2.x fileAppender logback fileAppender  Python agent log reporter SkyWalking Python Agent implements a log reporter for the logging module with functionalities aligning with the Java toolkits.\nTo explore how to enable the reporting features for your use cases, please refer to the Log Reporter Doc for a detailed guide.\n","title":"Collecting Logs by Agents","url":"/docs/main/v9.5.0/en/setup/backend/log-agent-native/"},{"content":"Collecting Logs by Agents Some of SkyWalking native agents support collecting logs and sending them to OAP server without local files and/or file agents, which are listed in here.\nJava agent\u0026rsquo;s toolkits Java agent provides toolkits for log4j, log4j2, and logback to report logs through gRPC with automatically injected trace context.\nSkyWalking Satellite sidecar is a recommended proxy/side that forwards logs (including the use of Kafka MQ to transport logs). When using this, open kafka-fetcher and enable configs enableNativeProtoLog.\nJava agent provides toolkits for log4j, log4j2, and logback to report logs through files with automatically injected trace context.\nLog framework config examples:\n log4j1.x fileAppender log4j2.x fileAppender logback fileAppender  Python agent log reporter SkyWalking Python Agent implements a log reporter for the logging module with functionalities aligning with the Java toolkits.\nTo explore how to enable the reporting features for your use cases, please refer to the Log Reporter Doc for a detailed guide.\n","title":"Collecting Logs by Agents","url":"/docs/main/v9.6.0/en/setup/backend/log-agent-native/"},{"content":"Collecting Logs by Agents Some of SkyWalking native agents support collecting logs and sending them to OAP server without local files and/or file agents, which are listed in here.\nJava agent\u0026rsquo;s toolkits Java agent provides toolkits for log4j, log4j2, and logback to report logs through gRPC with automatically injected trace context.\nSkyWalking Satellite sidecar is a recommended proxy/side that forwards logs (including the use of Kafka MQ to transport logs). When using this, open kafka-fetcher and enable configs enableNativeProtoLog.\nJava agent provides toolkits for log4j, log4j2, and logback to report logs through files with automatically injected trace context.\nLog framework config examples:\n log4j1.x fileAppender log4j2.x fileAppender logback fileAppender  Python agent log reporter SkyWalking Python Agent implements a log reporter for the logging module with functionalities aligning with the Java toolkits.\nTo explore how to enable the reporting features for your use cases, please refer to the Log Reporter Doc for a detailed guide.\n","title":"Collecting Logs by Agents","url":"/docs/main/v9.7.0/en/setup/backend/log-agent-native/"},{"content":"Common configuration Logger Logger is used to configure the system log.\n   Name Default Environment Key Description     logger.level INFO ROVER_LOGGER_LEVEL The lowest level of printing allowed.    Core Core is used to communicate with the backend server. It provides APIs for other modules to establish connections with the backend.\n   Name Default Environment Key Description     core.cluster_name  ROVER_CORE_CLUSTER_NAME The name of the cluster.   core.backend.addr localhost:11800 ROVER_BACKEND_ADDR The backend server address.   core.backend.enable_TLS false ROVER_BACKEND_ENABLE_TLS The TLS switch.   core.backend.client_pem_path client.pem ROVER_BACKEND_PEM_PATH The file path of client.pem. The config only works when opening the TLS switch.   core.backend.client_key_path client.key ROVER_BACKEND_KEY_PATH The file path of client.key. The config only works when opening the TLS switch.   core.backend.insecure_skip_verify false ROVER_BACKEND_INSECURE_SKIP_VERIFY InsecureSkipVerify controls whether a client verifies the server\u0026rsquo;s certificate chain and host name.   core.backend.ca_pem_path ca.pem ROVER_BACKEND_CA_PEM_PATH The file path oca.pem. The config only works when opening the TLS switch.   core.backend.check_period 5 ROVER_BACKEND_CHECK_PERIOD How frequently to check the connection(second).   core.backend.authentication  ROVER_BACKEND_AUTHENTICATION The auth value when send request.    ","title":"Common configuration","url":"/docs/skywalking-rover/latest/en/setup/configuration/common/"},{"content":"Common configuration Logger Logger is used to configure the system log.\n   Name Default Environment Key Description     logger.level INFO ROVER_LOGGER_LEVEL The lowest level of printing allowed.    Core Core is used to communicate with the backend server. It provides APIs for other modules to establish connections with the backend.\n   Name Default Environment Key Description     core.cluster_name  ROVER_CORE_CLUSTER_NAME The name of the cluster.   core.backend.addr localhost:11800 ROVER_BACKEND_ADDR The backend server address.   core.backend.enable_TLS false ROVER_BACKEND_ENABLE_TLS The TLS switch.   core.backend.client_pem_path client.pem ROVER_BACKEND_PEM_PATH The file path of client.pem. The config only works when opening the TLS switch.   core.backend.client_key_path client.key ROVER_BACKEND_KEY_PATH The file path of client.key. The config only works when opening the TLS switch.   core.backend.insecure_skip_verify false ROVER_BACKEND_INSECURE_SKIP_VERIFY InsecureSkipVerify controls whether a client verifies the server\u0026rsquo;s certificate chain and host name.   core.backend.ca_pem_path ca.pem ROVER_BACKEND_CA_PEM_PATH The file path oca.pem. The config only works when opening the TLS switch.   core.backend.check_period 5 ROVER_BACKEND_CHECK_PERIOD How frequently to check the connection(second).   core.backend.authentication  ROVER_BACKEND_AUTHENTICATION The auth value when send request.    ","title":"Common configuration","url":"/docs/skywalking-rover/next/en/setup/configuration/common/"},{"content":"Common configuration Logger Logger is used to configure the system log.\n   Name Default Environment Key Description     logger.level INFO ROVER_LOGGER_LEVEL The lowest level of printing allowed.    Core Core is used to communicate with the backend server. It provides APIs for other modules to establish connections with the backend.\n   Name Default Environment Key Description     core.cluster_name  ROVER_CORE_CLUSTER_NAME The name of the cluster.   core.backend.addr localhost:11800 ROVER_BACKEND_ADDR The backend server address.   core.backend.enable_TLS false ROVER_BACKEND_ENABLE_TLS The TLS switch.   core.backend.client_pem_path client.pem ROVER_BACKEND_PEM_PATH The file path of client.pem. The config only works when opening the TLS switch.   core.backend.client_key_path client.key ROVER_BACKEND_KEY_PATH The file path of client.key. The config only works when opening the TLS switch.   core.backend.insecure_skip_verify false ROVER_BACKEND_INSECURE_SKIP_VERIFY InsecureSkipVerify controls whether a client verifies the server\u0026rsquo;s certificate chain and host name.   core.backend.ca_pem_path ca.pem ROVER_BACKEND_CA_PEM_PATH The file path oca.pem. The config only works when opening the TLS switch.   core.backend.check_period 5 ROVER_BACKEND_CHECK_PERIOD How frequently to check the connection(second).   core.backend.authentication  ROVER_BACKEND_AUTHENTICATION The auth value when send request.    ","title":"Common configuration","url":"/docs/skywalking-rover/v0.6.0/en/setup/configuration/common/"},{"content":"Common configuration The common configuration has 2 parts, which are logger configuration and the telemetry configuration.\nLogger    Config Default Description     log_pattern %time [%level][%field] - %msg The log format pattern configuration.   time_pattern 2006-01-02 15:04:05.000 The time format pattern configuration.   level info The lowest level of printing allowed.    Self Telemetry    Config Default Description     cluster default-cluster The space concept for the deployment, such as the namespace concept in the Kubernetes.   service default-service The group concept for the deployment, such as the service resource concept in the Kubernetes.   instance default-instance The minimum running unit, such as the pod concept in the Kubernetes.    ","title":"Common configuration","url":"/docs/skywalking-satellite/latest/en/setup/configuration/common/"},{"content":"Common configuration The common configuration has 2 parts, which are logger configuration and the telemetry configuration.\nLogger    Config Default Description     log_pattern %time [%level][%field] - %msg The log format pattern configuration.   time_pattern 2006-01-02 15:04:05.000 The time format pattern configuration.   level info The lowest level of printing allowed.    Self Telemetry    Config Default Description     cluster default-cluster The space concept for the deployment, such as the namespace concept in the Kubernetes.   service default-service The group concept for the deployment, such as the service resource concept in the Kubernetes.   instance default-instance The minimum running unit, such as the pod concept in the Kubernetes.    ","title":"Common configuration","url":"/docs/skywalking-satellite/next/en/setup/configuration/common/"},{"content":"Common configuration The common configuration has 2 parts, which are logger configuration and the telemetry configuration.\nLogger    Config Default Description     log_pattern %time [%level][%field] - %msg The log format pattern configuration.   time_pattern 2006-01-02 15:04:05.000 The time format pattern configuration.   level info The lowest level of printing allowed.    Self Telemetry    Config Default Description     cluster default-cluster The space concept for the deployment, such as the namespace concept in the Kubernetes.   service default-service The group concept for the deployment, such as the service resource concept in the Kubernetes.   instance default-instance The minimum running unit, such as the pod concept in the Kubernetes.    ","title":"Common configuration","url":"/docs/skywalking-satellite/v1.2.0/en/setup/configuration/common/"},{"content":"Compatibility SkyWalking 8.0+ uses v3 protocols. Agents don\u0026rsquo;t have to keep the identical versions as the OAP backend.\nSkyWalking Native Agents    OAP Server Version Java Python NodeJS LUA Kong Browser Agent Rust PHP Go Rover Satellite     8.0.1 - 8.1.0 8.0.0 - 8.3.0 \u0026lt; = 0.6.0 \u0026lt; = 0.3.0 All All No All No No No No   8.2.0 - 8.3.0 8.0.0 - 8.3.0 \u0026lt; = 0.6.0 \u0026lt; = 0.3.0 All All All All No No No No   8.4.0 - 8.8.1 \u0026gt; = 8.0.0 All All All All All All All No No No   8.9.0+ \u0026gt; = 8.0.0 All All All All All All All No No \u0026gt; = 0.4.0   9.0.0 \u0026gt; = 8.0.0 All All All All All All All No No \u0026gt; = 0.4.0   9.1.0+ \u0026gt; = 8.0.0 All All All All All All All No \u0026gt; = 0.1.0 \u0026gt; = 1.0.0   9.5.0+ \u0026gt; = 8.0.0 \u0026amp; \u0026gt; = 9.0.0 All All All All All All All \u0026gt; = 0.1.0 \u0026gt; = 0.5.0 \u0026gt; = 1.2.0    Ecosystem Agents All following agent implementations are a part of the SkyWalking ecosystem. All the source codes and their distributions don\u0026rsquo;t belong to the Apache Software Foundation.\n   OAP Server Version DotNet cpp2sky     8.0.1 - 8.3.0 1.0.0 - 1.3.0 \u0026lt; = 0.2.0   8.4.0+ \u0026gt; = 1.0.0 All   9.0.0+ \u0026gt; = 1.0.0 All    All these projects are maintained by their own communities, and please reach them if you face any compatibility issues.\n All above compatibility are only references, and if you face an unimplemented error, it means you need to upgrade the OAP backend to support newer features in the agents.\n","title":"Compatibility","url":"/docs/main/latest/en/setup/service-agent/agent-compatibility/"},{"content":"Compatibility SkyWalking 8.0+ uses v3 protocols. Agents don\u0026rsquo;t have to keep the identical versions as the OAP backend.\nSkyWalking Native Agents    OAP Server Version Java Python NodeJS LUA Kong Browser Agent Rust PHP Go Rover Satellite     8.0.1 - 8.1.0 8.0.0 - 8.3.0 \u0026lt; = 0.6.0 \u0026lt; = 0.3.0 All All No All No No No No   8.2.0 - 8.3.0 8.0.0 - 8.3.0 \u0026lt; = 0.6.0 \u0026lt; = 0.3.0 All All All All No No No No   8.4.0 - 8.8.1 \u0026gt; = 8.0.0 All All All All All All All No No No   8.9.0+ \u0026gt; = 8.0.0 All All All All All All All No No \u0026gt; = 0.4.0   9.0.0 \u0026gt; = 8.0.0 All All All All All All All No No \u0026gt; = 0.4.0   9.1.0+ \u0026gt; = 8.0.0 All All All All All All All No \u0026gt; = 0.1.0 \u0026gt; = 1.0.0   9.5.0+ \u0026gt; = 8.0.0 \u0026amp; \u0026gt; = 9.0.0 All All All All All All All \u0026gt; = 0.1.0 \u0026gt; = 0.5.0 \u0026gt; = 1.2.0    Ecosystem Agents All following agent implementations are a part of the SkyWalking ecosystem. All the source codes and their distributions don\u0026rsquo;t belong to the Apache Software Foundation.\n   OAP Server Version DotNet cpp2sky     8.0.1 - 8.3.0 1.0.0 - 1.3.0 \u0026lt; = 0.2.0   8.4.0+ \u0026gt; = 1.0.0 All   9.0.0+ \u0026gt; = 1.0.0 All    All these projects are maintained by their own communities, and please reach them if you face any compatibility issues.\n All above compatibility are only references, and if you face an unimplemented error, it means you need to upgrade the OAP backend to support newer features in the agents.\n","title":"Compatibility","url":"/docs/main/next/en/setup/service-agent/agent-compatibility/"},{"content":"Compatibility SkyWalking 8.0+ uses v3 protocols. Agents don\u0026rsquo;t have to keep the same versions as the OAP backend.\nSkyWalking Native Agents    OAP Server Version Java Python NodeJS LUA Kong Browser Agent Rust Satellite     8.0.1 - 8.1.0 8.0.0 - 8.3.0 \u0026lt; = 0.6.0 \u0026lt; = 0.3.0 All All No All No   8.2.0 - 8.3.0 8.0.0 - 8.3.0 \u0026lt; = 0.6.0 \u0026lt; = 0.3.0 All All All All No   8.4.0 - 8.8.1 \u0026gt; = 8.0.0 All All All All All All No   8.9.0+ \u0026gt; = 8.0.0 All All All All All All \u0026gt; = 0.4.0   9.0.0+ \u0026gt; = 8.0.0 All All All All All All \u0026gt; = 0.4.0    Ecosystem Agents All following agent implementations are a part of SkyWalking ecosystem. All the source codes and their distributions don\u0026rsquo;t belong to the Apache Software Foundation.\n   OAP Server Version DotNet Go2sky cpp2sky PHP agent     8.0.1 - 8.3.0 1.0.0 - 1.3.0 0.4.0 - 0.6.0 \u0026lt; = 0.2.0 \u0026gt; = 3.0.0   8.4.0+ \u0026gt; = 1.0.0 \u0026gt; = 0.4.0 All \u0026gt; = 3.0.0   9.0.0+ \u0026gt; = 1.0.0 \u0026gt; = 0.4.0 All \u0026gt; = 3.0.0    All these projects are maintained by their own communities, please reach them if you face any compatibility issue.\n All above compatibility are only references, if you face unimplemented error, it means you need to upgrade OAP backend to support newer features in the agents.\n","title":"Compatibility","url":"/docs/main/v9.0.0/en/setup/service-agent/agent-compatibility/"},{"content":"Compatibility SkyWalking 8.0+ uses v3 protocols. Agents don\u0026rsquo;t have to keep the identical versions as the OAP backend.\nSkyWalking Native Agents    OAP Server Version Java Python NodeJS LUA Kong Browser Agent Rust Rover(ebpf agnet) Satellite     8.0.1 - 8.1.0 8.0.0 - 8.3.0 \u0026lt; = 0.6.0 \u0026lt; = 0.3.0 All All No All No No   8.2.0 - 8.3.0 8.0.0 - 8.3.0 \u0026lt; = 0.6.0 \u0026lt; = 0.3.0 All All All All No No   8.4.0 - 8.8.1 \u0026gt; = 8.0.0 All All All All All All No No   8.9.0+ \u0026gt; = 8.0.0 All All All All All All No \u0026gt; = 0.4.0   9.0.0 \u0026gt; = 8.0.0 All All All All All All No \u0026gt; = 0.4.0   9.1.0+ \u0026gt; = 8.0.0 All All All All All All \u0026gt; = 0.1.0 \u0026gt; = 1.0.0    Ecosystem Agents All following agent implementations are a part of the SkyWalking ecosystem. All the source codes and their distributions don\u0026rsquo;t belong to the Apache Software Foundation.\n   OAP Server Version DotNet Go2sky cpp2sky PHP agent     8.0.1 - 8.3.0 1.0.0 - 1.3.0 0.4.0 - 0.6.0 \u0026lt; = 0.2.0 \u0026gt; = 3.0.0   8.4.0+ \u0026gt; = 1.0.0 \u0026gt; = 0.4.0 All \u0026gt; = 3.0.0   9.0.0+ \u0026gt; = 1.0.0 \u0026gt; = 0.4.0 All \u0026gt; = 3.0.0    All these projects are maintained by their own communities, and please reach them if you face any compatibility issues.\n All above compatibility are only references, and if you face an unimplemented error, it means you need to upgrade the OAP backend to support newer features in the agents.\n","title":"Compatibility","url":"/docs/main/v9.1.0/en/setup/service-agent/agent-compatibility/"},{"content":"Compatibility SkyWalking 8.0+ uses v3 protocols. Agents don\u0026rsquo;t have to keep the identical versions as the OAP backend.\nSkyWalking Native Agents    OAP Server Version Java Python NodeJS LUA Kong Browser Agent Rust Rover(ebpf agnet) Satellite     8.0.1 - 8.1.0 8.0.0 - 8.3.0 \u0026lt; = 0.6.0 \u0026lt; = 0.3.0 All All No All No No   8.2.0 - 8.3.0 8.0.0 - 8.3.0 \u0026lt; = 0.6.0 \u0026lt; = 0.3.0 All All All All No No   8.4.0 - 8.8.1 \u0026gt; = 8.0.0 All All All All All All No No   8.9.0+ \u0026gt; = 8.0.0 All All All All All All No \u0026gt; = 0.4.0   9.0.0 \u0026gt; = 8.0.0 All All All All All All No \u0026gt; = 0.4.0   9.1.0+ \u0026gt; = 8.0.0 All All All All All All \u0026gt; = 0.1.0 \u0026gt; = 1.0.0    Ecosystem Agents All following agent implementations are a part of the SkyWalking ecosystem. All the source codes and their distributions don\u0026rsquo;t belong to the Apache Software Foundation.\n   OAP Server Version DotNet Go2sky cpp2sky PHP agent     8.0.1 - 8.3.0 1.0.0 - 1.3.0 0.4.0 - 0.6.0 \u0026lt; = 0.2.0 \u0026gt; = 3.0.0 \u0026amp;\u0026amp; \u0026lt; 5.0.0   8.4.0+ \u0026gt; = 1.0.0 \u0026gt; = 0.4.0 All \u0026gt; = 5.0.0   9.0.0+ \u0026gt; = 1.0.0 \u0026gt; = 0.4.0 All \u0026gt; = 5.0.0    All these projects are maintained by their own communities, and please reach them if you face any compatibility issues.\n All above compatibility are only references, and if you face an unimplemented error, it means you need to upgrade the OAP backend to support newer features in the agents.\n","title":"Compatibility","url":"/docs/main/v9.2.0/en/setup/service-agent/agent-compatibility/"},{"content":"Compatibility SkyWalking 8.0+ uses v3 protocols. Agents don\u0026rsquo;t have to keep the identical versions as the OAP backend.\nSkyWalking Native Agents    OAP Server Version Java Python NodeJS LUA Kong Browser Agent Rust Rover(ebpf agent) Satellite PHP     8.0.1 - 8.1.0 8.0.0 - 8.3.0 \u0026lt; = 0.6.0 \u0026lt; = 0.3.0 All All No All No No No   8.2.0 - 8.3.0 8.0.0 - 8.3.0 \u0026lt; = 0.6.0 \u0026lt; = 0.3.0 All All All All No No No   8.4.0 - 8.8.1 \u0026gt; = 8.0.0 All All All All All All No No All   8.9.0+ \u0026gt; = 8.0.0 All All All All All All No \u0026gt; = 0.4.0 All   9.0.0 \u0026gt; = 8.0.0 All All All All All All No \u0026gt; = 0.4.0 All   9.1.0+ \u0026gt; = 8.0.0 All All All All All All \u0026gt; = 0.1.0 \u0026gt; = 1.0.0 All    Ecosystem Agents All following agent implementations are a part of the SkyWalking ecosystem. All the source codes and their distributions don\u0026rsquo;t belong to the Apache Software Foundation.\n   OAP Server Version DotNet Go2sky cpp2sky     8.0.1 - 8.3.0 1.0.0 - 1.3.0 0.4.0 - 0.6.0 \u0026lt; = 0.2.0   8.4.0+ \u0026gt; = 1.0.0 \u0026gt; = 0.4.0 All   9.0.0+ \u0026gt; = 1.0.0 \u0026gt; = 0.4.0 All    All these projects are maintained by their own communities, and please reach them if you face any compatibility issues.\n All above compatibility are only references, and if you face an unimplemented error, it means you need to upgrade the OAP backend to support newer features in the agents.\n","title":"Compatibility","url":"/docs/main/v9.3.0/en/setup/service-agent/agent-compatibility/"},{"content":"Compatibility SkyWalking 8.0+ uses v3 protocols. Agents don\u0026rsquo;t have to keep the identical versions as the OAP backend.\nSkyWalking Native Agents    OAP Server Version Java Python NodeJS LUA Kong Browser Agent Rust Rover(ebpf agent) Satellite PHP     8.0.1 - 8.1.0 8.0.0 - 8.3.0 \u0026lt; = 0.6.0 \u0026lt; = 0.3.0 All All No All No No No   8.2.0 - 8.3.0 8.0.0 - 8.3.0 \u0026lt; = 0.6.0 \u0026lt; = 0.3.0 All All All All No No No   8.4.0 - 8.8.1 \u0026gt; = 8.0.0 All All All All All All No No All   8.9.0+ \u0026gt; = 8.0.0 All All All All All All No \u0026gt; = 0.4.0 All   9.0.0 \u0026gt; = 8.0.0 All All All All All All No \u0026gt; = 0.4.0 All   9.1.0+ \u0026gt; = 8.0.0 All All All All All All \u0026gt; = 0.1.0 \u0026gt; = 1.0.0 All    Ecosystem Agents All following agent implementations are a part of the SkyWalking ecosystem. All the source codes and their distributions don\u0026rsquo;t belong to the Apache Software Foundation.\n   OAP Server Version DotNet Go2sky cpp2sky     8.0.1 - 8.3.0 1.0.0 - 1.3.0 0.4.0 - 0.6.0 \u0026lt; = 0.2.0   8.4.0+ \u0026gt; = 1.0.0 \u0026gt; = 0.4.0 All   9.0.0+ \u0026gt; = 1.0.0 \u0026gt; = 0.4.0 All    All these projects are maintained by their own communities, and please reach them if you face any compatibility issues.\n All above compatibility are only references, and if you face an unimplemented error, it means you need to upgrade the OAP backend to support newer features in the agents.\n","title":"Compatibility","url":"/docs/main/v9.4.0/en/setup/service-agent/agent-compatibility/"},{"content":"Compatibility SkyWalking 8.0+ uses v3 protocols. Agents don\u0026rsquo;t have to keep the identical versions as the OAP backend.\nSkyWalking Native Agents    OAP Server Version Java Python NodeJS LUA Kong Browser Agent Rust Rover(ebpf agent) Satellite PHP     8.0.1 - 8.1.0 8.0.0 - 8.3.0 \u0026lt; = 0.6.0 \u0026lt; = 0.3.0 All All No All No No No   8.2.0 - 8.3.0 8.0.0 - 8.3.0 \u0026lt; = 0.6.0 \u0026lt; = 0.3.0 All All All All No No No   8.4.0 - 8.8.1 \u0026gt; = 8.0.0 All All All All All All No No All   8.9.0+ \u0026gt; = 8.0.0 All All All All All All No \u0026gt; = 0.4.0 All   9.0.0 \u0026gt; = 8.0.0 All All All All All All No \u0026gt; = 0.4.0 All   9.1.0+ \u0026gt; = 8.0.0 All All All All All All \u0026gt; = 0.1.0 \u0026gt; = 1.0.0 All   9.5.0+ \u0026gt; = 8.0.0 All All All All All All \u0026gt; = 0.5.0 \u0026gt; = 1.2.0 All    Ecosystem Agents All following agent implementations are a part of the SkyWalking ecosystem. All the source codes and their distributions don\u0026rsquo;t belong to the Apache Software Foundation.\n   OAP Server Version DotNet Go2sky cpp2sky     8.0.1 - 8.3.0 1.0.0 - 1.3.0 0.4.0 - 0.6.0 \u0026lt; = 0.2.0   8.4.0+ \u0026gt; = 1.0.0 \u0026gt; = 0.4.0 All   9.0.0+ \u0026gt; = 1.0.0 \u0026gt; = 0.4.0 All    All these projects are maintained by their own communities, and please reach them if you face any compatibility issues.\n All above compatibility are only references, and if you face an unimplemented error, it means you need to upgrade the OAP backend to support newer features in the agents.\n","title":"Compatibility","url":"/docs/main/v9.5.0/en/setup/service-agent/agent-compatibility/"},{"content":"Compatibility SkyWalking 8.0+ uses v3 protocols. Agents don\u0026rsquo;t have to keep the identical versions as the OAP backend.\nSkyWalking Native Agents    OAP Server Version Java Python NodeJS LUA Kong Browser Agent Rust PHP Go Rover Satellite     8.0.1 - 8.1.0 8.0.0 - 8.3.0 \u0026lt; = 0.6.0 \u0026lt; = 0.3.0 All All No All No No No No   8.2.0 - 8.3.0 8.0.0 - 8.3.0 \u0026lt; = 0.6.0 \u0026lt; = 0.3.0 All All All All No No No No   8.4.0 - 8.8.1 \u0026gt; = 8.0.0 All All All All All All All No No No   8.9.0+ \u0026gt; = 8.0.0 All All All All All All All No No \u0026gt; = 0.4.0   9.0.0 \u0026gt; = 8.0.0 All All All All All All All No No \u0026gt; = 0.4.0   9.1.0+ \u0026gt; = 8.0.0 All All All All All All All No \u0026gt; = 0.1.0 \u0026gt; = 1.0.0   9.5.0+ \u0026gt; = 8.0.0 \u0026amp; \u0026gt; = 9.0.0 All All All All All All All \u0026gt; = 0.1.0 \u0026gt; = 0.5.0 \u0026gt; = 1.2.0    Ecosystem Agents All following agent implementations are a part of the SkyWalking ecosystem. All the source codes and their distributions don\u0026rsquo;t belong to the Apache Software Foundation.\n   OAP Server Version DotNet cpp2sky     8.0.1 - 8.3.0 1.0.0 - 1.3.0 \u0026lt; = 0.2.0   8.4.0+ \u0026gt; = 1.0.0 All   9.0.0+ \u0026gt; = 1.0.0 All    All these projects are maintained by their own communities, and please reach them if you face any compatibility issues.\n All above compatibility are only references, and if you face an unimplemented error, it means you need to upgrade the OAP backend to support newer features in the agents.\n","title":"Compatibility","url":"/docs/main/v9.6.0/en/setup/service-agent/agent-compatibility/"},{"content":"Compatibility SkyWalking 8.0+ uses v3 protocols. Agents don\u0026rsquo;t have to keep the identical versions as the OAP backend.\nSkyWalking Native Agents    OAP Server Version Java Python NodeJS LUA Kong Browser Agent Rust PHP Go Rover Satellite     8.0.1 - 8.1.0 8.0.0 - 8.3.0 \u0026lt; = 0.6.0 \u0026lt; = 0.3.0 All All No All No No No No   8.2.0 - 8.3.0 8.0.0 - 8.3.0 \u0026lt; = 0.6.0 \u0026lt; = 0.3.0 All All All All No No No No   8.4.0 - 8.8.1 \u0026gt; = 8.0.0 All All All All All All All No No No   8.9.0+ \u0026gt; = 8.0.0 All All All All All All All No No \u0026gt; = 0.4.0   9.0.0 \u0026gt; = 8.0.0 All All All All All All All No No \u0026gt; = 0.4.0   9.1.0+ \u0026gt; = 8.0.0 All All All All All All All No \u0026gt; = 0.1.0 \u0026gt; = 1.0.0   9.5.0+ \u0026gt; = 8.0.0 \u0026amp; \u0026gt; = 9.0.0 All All All All All All All \u0026gt; = 0.1.0 \u0026gt; = 0.5.0 \u0026gt; = 1.2.0    Ecosystem Agents All following agent implementations are a part of the SkyWalking ecosystem. All the source codes and their distributions don\u0026rsquo;t belong to the Apache Software Foundation.\n   OAP Server Version DotNet cpp2sky     8.0.1 - 8.3.0 1.0.0 - 1.3.0 \u0026lt; = 0.2.0   8.4.0+ \u0026gt; = 1.0.0 All   9.0.0+ \u0026gt; = 1.0.0 All    All these projects are maintained by their own communities, and please reach them if you face any compatibility issues.\n All above compatibility are only references, and if you face an unimplemented error, it means you need to upgrade the OAP backend to support newer features in the agents.\n","title":"Compatibility","url":"/docs/main/v9.7.0/en/setup/service-agent/agent-compatibility/"},{"content":"Compatibility with other Java agent bytecode processes Problem   When using the SkyWalking agent, some other agents, such as Arthas, can\u0026rsquo;t work properly. https://github.com/apache/skywalking/pull/4858\n  The retransform classes in the Java agent conflict with the SkyWalking agent, as illustrated in this demo\n  Cause The SkyWalking agent uses ByteBuddy to transform classes when the Java application starts. ByteBuddy generates auxiliary classes with different random names every time.\nWhen another Java agent retransforms the same class, it triggers the SkyWalking agent to enhance the class again. Since the bytecode has been regenerated by ByteBuddy, the fields and imported class names have been modified, and the JVM verifications on class bytecode have failed, the retransform classes would therefore be unsuccessful.\nResolution 1. Enable the class cache feature\nAdd JVM parameters:\n-Dskywalking.agent.is_cache_enhanced_class=true -Dskywalking.agent.class_cache_mode=MEMORY\nOr uncomment the following options in agent.conf:\n# If true, the SkyWalking agent will cache all instrumented classes files to memory or disk files (as determined by the class cache mode), # Allow other Java agents to enhance those classes that are enhanced by the SkyWalking agent. agent.is_cache_enhanced_class = ${SW_AGENT_CACHE_CLASS:false} # The instrumented classes cache mode: MEMORY or FILE # MEMORY: cache class bytes to memory; if there are too many instrumented classes or if their sizes are too large, it may take up more memory # FILE: cache class bytes to user temp folder starts with 'class-cache', and automatically clean up cached class files when the application exits agent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:MEMORY} If the class cache feature is enabled, save the instrumented class bytecode to memory or a temporary file. When other Java agents retransform the same class, the SkyWalking agent first attempts to load from the cache.\nIf the cached class is found, it will be used directly without regenerating an auxiliary class with a new random name. Then, the process of the subsequent Java agent will not be affected.\n2. Class cache save mode\nWe recommend saving cache classes to memory, if it takes up more memory space. Alternatively, you can use the local file system. Set the class cache mode in one of the folliwng ways:\n-Dskywalking.agent.class_cache_mode=MEMORY : save cache classes to Java memory. -Dskywalking.agent.class_cache_mode=FILE : save cache classes to SkyWalking agent path \u0026lsquo;/class-cache\u0026rsquo;.\nOr modify these options in agent.conf:\nagent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:MEMORY} agent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:FILE}\n","title":"Compatibility with other Java agent bytecode processes","url":"/docs/main/latest/en/faq/compatible-with-other-javaagent-bytecode-processing/"},{"content":"Compatibility with other Java agent bytecode processes Problem   When using the SkyWalking agent, some other agents, such as Arthas, can\u0026rsquo;t work properly. https://github.com/apache/skywalking/pull/4858\n  The retransform classes in the Java agent conflict with the SkyWalking agent, as illustrated in this demo\n  Cause The SkyWalking agent uses ByteBuddy to transform classes when the Java application starts. ByteBuddy generates auxiliary classes with different random names every time.\nWhen another Java agent retransforms the same class, it triggers the SkyWalking agent to enhance the class again. Since the bytecode has been regenerated by ByteBuddy, the fields and imported class names have been modified, and the JVM verifications on class bytecode have failed, the retransform classes would therefore be unsuccessful.\nResolution 1. Enable the class cache feature\nAdd JVM parameters:\n-Dskywalking.agent.is_cache_enhanced_class=true -Dskywalking.agent.class_cache_mode=MEMORY\nOr uncomment the following options in agent.conf:\n# If true, the SkyWalking agent will cache all instrumented classes files to memory or disk files (as determined by the class cache mode), # Allow other Java agents to enhance those classes that are enhanced by the SkyWalking agent. agent.is_cache_enhanced_class = ${SW_AGENT_CACHE_CLASS:false} # The instrumented classes cache mode: MEMORY or FILE # MEMORY: cache class bytes to memory; if there are too many instrumented classes or if their sizes are too large, it may take up more memory # FILE: cache class bytes to user temp folder starts with 'class-cache', and automatically clean up cached class files when the application exits agent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:MEMORY} If the class cache feature is enabled, save the instrumented class bytecode to memory or a temporary file. When other Java agents retransform the same class, the SkyWalking agent first attempts to load from the cache.\nIf the cached class is found, it will be used directly without regenerating an auxiliary class with a new random name. Then, the process of the subsequent Java agent will not be affected.\n2. Class cache save mode\nWe recommend saving cache classes to memory, if it takes up more memory space. Alternatively, you can use the local file system. Set the class cache mode in one of the folliwng ways:\n-Dskywalking.agent.class_cache_mode=MEMORY : save cache classes to Java memory. -Dskywalking.agent.class_cache_mode=FILE : save cache classes to SkyWalking agent path \u0026lsquo;/class-cache\u0026rsquo;.\nOr modify these options in agent.conf:\nagent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:MEMORY} agent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:FILE}\n","title":"Compatibility with other Java agent bytecode processes","url":"/docs/main/next/en/faq/compatible-with-other-javaagent-bytecode-processing/"},{"content":"Compatibility with other Java agent bytecode processes Problem   When using the SkyWalking agent, some other agents, such as Arthas, can\u0026rsquo;t work properly. https://github.com/apache/skywalking/pull/4858\n  The retransform classes in the Java agent conflict with the SkyWalking agent, as illustrated in this demo\n  Cause The SkyWalking agent uses ByteBuddy to transform classes when the Java application starts. ByteBuddy generates auxiliary classes with different random names every time.\nWhen another Java agent retransforms the same class, it triggers the SkyWalking agent to enhance the class again. Since the bytecode has been regenerated by ByteBuddy, the fields and imported class names have been modified, and the JVM verifications on class bytecode have failed, the retransform classes would therefore be unsuccessful.\nResolution 1. Enable the class cache feature\nAdd JVM parameters:\n-Dskywalking.agent.is_cache_enhanced_class=true -Dskywalking.agent.class_cache_mode=MEMORY\nOr uncomment the following options in agent.conf:\n# If true, the SkyWalking agent will cache all instrumented classes files to memory or disk files (as determined by the class cache mode), # Allow other Java agents to enhance those classes that are enhanced by the SkyWalking agent. agent.is_cache_enhanced_class = ${SW_AGENT_CACHE_CLASS:false} # The instrumented classes cache mode: MEMORY or FILE # MEMORY: cache class bytes to memory; if there are too many instrumented classes or if their sizes are too large, it may take up more memory # FILE: cache class bytes to user temp folder starts with 'class-cache', and automatically clean up cached class files when the application exits agent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:MEMORY} If the class cache feature is enabled, save the instrumented class bytecode to memory or a temporary file. When other Java agents retransform the same class, the SkyWalking agent first attempts to load from the cache.\nIf the cached class is found, it will be used directly without regenerating an auxiliary class with a new random name. Then, the process of the subsequent Java agent will not be affected.\n2. Class cache save mode\nWe recommend saving cache classes to memory, if it takes up more memory space. Alternatively, you can use the local file system. Set the class cache mode in one of the folliwng ways:\n-Dskywalking.agent.class_cache_mode=MEMORY : save cache classes to Java memory. -Dskywalking.agent.class_cache_mode=FILE : save cache classes to SkyWalking agent path \u0026lsquo;/class-cache\u0026rsquo;.\nOr modify these options in agent.conf:\nagent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:MEMORY} agent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:FILE}\n","title":"Compatibility with other Java agent bytecode processes","url":"/docs/main/v9.0.0/en/faq/compatible-with-other-javaagent-bytecode-processing/"},{"content":"Compatibility with other Java agent bytecode processes Problem   When using the SkyWalking agent, some other agents, such as Arthas, can\u0026rsquo;t work properly. https://github.com/apache/skywalking/pull/4858\n  The retransform classes in the Java agent conflict with the SkyWalking agent, as illustrated in this demo\n  Cause The SkyWalking agent uses ByteBuddy to transform classes when the Java application starts. ByteBuddy generates auxiliary classes with different random names every time.\nWhen another Java agent retransforms the same class, it triggers the SkyWalking agent to enhance the class again. Since the bytecode has been regenerated by ByteBuddy, the fields and imported class names have been modified, and the JVM verifications on class bytecode have failed, the retransform classes would therefore be unsuccessful.\nResolution 1. Enable the class cache feature\nAdd JVM parameters:\n-Dskywalking.agent.is_cache_enhanced_class=true -Dskywalking.agent.class_cache_mode=MEMORY\nOr uncomment the following options in agent.conf:\n# If true, the SkyWalking agent will cache all instrumented classes files to memory or disk files (as determined by the class cache mode), # Allow other Java agents to enhance those classes that are enhanced by the SkyWalking agent. agent.is_cache_enhanced_class = ${SW_AGENT_CACHE_CLASS:false} # The instrumented classes cache mode: MEMORY or FILE # MEMORY: cache class bytes to memory; if there are too many instrumented classes or if their sizes are too large, it may take up more memory # FILE: cache class bytes to user temp folder starts with 'class-cache', and automatically clean up cached class files when the application exits agent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:MEMORY} If the class cache feature is enabled, save the instrumented class bytecode to memory or a temporary file. When other Java agents retransform the same class, the SkyWalking agent first attempts to load from the cache.\nIf the cached class is found, it will be used directly without regenerating an auxiliary class with a new random name. Then, the process of the subsequent Java agent will not be affected.\n2. Class cache save mode\nWe recommend saving cache classes to memory, if it takes up more memory space. Alternatively, you can use the local file system. Set the class cache mode in one of the folliwng ways:\n-Dskywalking.agent.class_cache_mode=MEMORY : save cache classes to Java memory. -Dskywalking.agent.class_cache_mode=FILE : save cache classes to SkyWalking agent path \u0026lsquo;/class-cache\u0026rsquo;.\nOr modify these options in agent.conf:\nagent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:MEMORY} agent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:FILE}\n","title":"Compatibility with other Java agent bytecode processes","url":"/docs/main/v9.1.0/en/faq/compatible-with-other-javaagent-bytecode-processing/"},{"content":"Compatibility with other Java agent bytecode processes Problem   When using the SkyWalking agent, some other agents, such as Arthas, can\u0026rsquo;t work properly. https://github.com/apache/skywalking/pull/4858\n  The retransform classes in the Java agent conflict with the SkyWalking agent, as illustrated in this demo\n  Cause The SkyWalking agent uses ByteBuddy to transform classes when the Java application starts. ByteBuddy generates auxiliary classes with different random names every time.\nWhen another Java agent retransforms the same class, it triggers the SkyWalking agent to enhance the class again. Since the bytecode has been regenerated by ByteBuddy, the fields and imported class names have been modified, and the JVM verifications on class bytecode have failed, the retransform classes would therefore be unsuccessful.\nResolution 1. Enable the class cache feature\nAdd JVM parameters:\n-Dskywalking.agent.is_cache_enhanced_class=true -Dskywalking.agent.class_cache_mode=MEMORY\nOr uncomment the following options in agent.conf:\n# If true, the SkyWalking agent will cache all instrumented classes files to memory or disk files (as determined by the class cache mode), # Allow other Java agents to enhance those classes that are enhanced by the SkyWalking agent. agent.is_cache_enhanced_class = ${SW_AGENT_CACHE_CLASS:false} # The instrumented classes cache mode: MEMORY or FILE # MEMORY: cache class bytes to memory; if there are too many instrumented classes or if their sizes are too large, it may take up more memory # FILE: cache class bytes to user temp folder starts with 'class-cache', and automatically clean up cached class files when the application exits agent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:MEMORY} If the class cache feature is enabled, save the instrumented class bytecode to memory or a temporary file. When other Java agents retransform the same class, the SkyWalking agent first attempts to load from the cache.\nIf the cached class is found, it will be used directly without regenerating an auxiliary class with a new random name. Then, the process of the subsequent Java agent will not be affected.\n2. Class cache save mode\nWe recommend saving cache classes to memory, if it takes up more memory space. Alternatively, you can use the local file system. Set the class cache mode in one of the folliwng ways:\n-Dskywalking.agent.class_cache_mode=MEMORY : save cache classes to Java memory. -Dskywalking.agent.class_cache_mode=FILE : save cache classes to SkyWalking agent path \u0026lsquo;/class-cache\u0026rsquo;.\nOr modify these options in agent.conf:\nagent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:MEMORY} agent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:FILE}\n","title":"Compatibility with other Java agent bytecode processes","url":"/docs/main/v9.2.0/en/faq/compatible-with-other-javaagent-bytecode-processing/"},{"content":"Compatibility with other Java agent bytecode processes Problem   When using the SkyWalking agent, some other agents, such as Arthas, can\u0026rsquo;t work properly. https://github.com/apache/skywalking/pull/4858\n  The retransform classes in the Java agent conflict with the SkyWalking agent, as illustrated in this demo\n  Cause The SkyWalking agent uses ByteBuddy to transform classes when the Java application starts. ByteBuddy generates auxiliary classes with different random names every time.\nWhen another Java agent retransforms the same class, it triggers the SkyWalking agent to enhance the class again. Since the bytecode has been regenerated by ByteBuddy, the fields and imported class names have been modified, and the JVM verifications on class bytecode have failed, the retransform classes would therefore be unsuccessful.\nResolution 1. Enable the class cache feature\nAdd JVM parameters:\n-Dskywalking.agent.is_cache_enhanced_class=true -Dskywalking.agent.class_cache_mode=MEMORY\nOr uncomment the following options in agent.conf:\n# If true, the SkyWalking agent will cache all instrumented classes files to memory or disk files (as determined by the class cache mode), # Allow other Java agents to enhance those classes that are enhanced by the SkyWalking agent. agent.is_cache_enhanced_class = ${SW_AGENT_CACHE_CLASS:false} # The instrumented classes cache mode: MEMORY or FILE # MEMORY: cache class bytes to memory; if there are too many instrumented classes or if their sizes are too large, it may take up more memory # FILE: cache class bytes to user temp folder starts with 'class-cache', and automatically clean up cached class files when the application exits agent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:MEMORY} If the class cache feature is enabled, save the instrumented class bytecode to memory or a temporary file. When other Java agents retransform the same class, the SkyWalking agent first attempts to load from the cache.\nIf the cached class is found, it will be used directly without regenerating an auxiliary class with a new random name. Then, the process of the subsequent Java agent will not be affected.\n2. Class cache save mode\nWe recommend saving cache classes to memory, if it takes up more memory space. Alternatively, you can use the local file system. Set the class cache mode in one of the folliwng ways:\n-Dskywalking.agent.class_cache_mode=MEMORY : save cache classes to Java memory. -Dskywalking.agent.class_cache_mode=FILE : save cache classes to SkyWalking agent path \u0026lsquo;/class-cache\u0026rsquo;.\nOr modify these options in agent.conf:\nagent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:MEMORY} agent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:FILE}\n","title":"Compatibility with other Java agent bytecode processes","url":"/docs/main/v9.3.0/en/faq/compatible-with-other-javaagent-bytecode-processing/"},{"content":"Compatibility with other Java agent bytecode processes Problem   When using the SkyWalking agent, some other agents, such as Arthas, can\u0026rsquo;t work properly. https://github.com/apache/skywalking/pull/4858\n  The retransform classes in the Java agent conflict with the SkyWalking agent, as illustrated in this demo\n  Cause The SkyWalking agent uses ByteBuddy to transform classes when the Java application starts. ByteBuddy generates auxiliary classes with different random names every time.\nWhen another Java agent retransforms the same class, it triggers the SkyWalking agent to enhance the class again. Since the bytecode has been regenerated by ByteBuddy, the fields and imported class names have been modified, and the JVM verifications on class bytecode have failed, the retransform classes would therefore be unsuccessful.\nResolution 1. Enable the class cache feature\nAdd JVM parameters:\n-Dskywalking.agent.is_cache_enhanced_class=true -Dskywalking.agent.class_cache_mode=MEMORY\nOr uncomment the following options in agent.conf:\n# If true, the SkyWalking agent will cache all instrumented classes files to memory or disk files (as determined by the class cache mode), # Allow other Java agents to enhance those classes that are enhanced by the SkyWalking agent. agent.is_cache_enhanced_class = ${SW_AGENT_CACHE_CLASS:false} # The instrumented classes cache mode: MEMORY or FILE # MEMORY: cache class bytes to memory; if there are too many instrumented classes or if their sizes are too large, it may take up more memory # FILE: cache class bytes to user temp folder starts with 'class-cache', and automatically clean up cached class files when the application exits agent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:MEMORY} If the class cache feature is enabled, save the instrumented class bytecode to memory or a temporary file. When other Java agents retransform the same class, the SkyWalking agent first attempts to load from the cache.\nIf the cached class is found, it will be used directly without regenerating an auxiliary class with a new random name. Then, the process of the subsequent Java agent will not be affected.\n2. Class cache save mode\nWe recommend saving cache classes to memory, if it takes up more memory space. Alternatively, you can use the local file system. Set the class cache mode in one of the folliwng ways:\n-Dskywalking.agent.class_cache_mode=MEMORY : save cache classes to Java memory. -Dskywalking.agent.class_cache_mode=FILE : save cache classes to SkyWalking agent path \u0026lsquo;/class-cache\u0026rsquo;.\nOr modify these options in agent.conf:\nagent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:MEMORY} agent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:FILE}\n","title":"Compatibility with other Java agent bytecode processes","url":"/docs/main/v9.4.0/en/faq/compatible-with-other-javaagent-bytecode-processing/"},{"content":"Compatibility with other Java agent bytecode processes Problem   When using the SkyWalking agent, some other agents, such as Arthas, can\u0026rsquo;t work properly. https://github.com/apache/skywalking/pull/4858\n  The retransform classes in the Java agent conflict with the SkyWalking agent, as illustrated in this demo\n  Cause The SkyWalking agent uses ByteBuddy to transform classes when the Java application starts. ByteBuddy generates auxiliary classes with different random names every time.\nWhen another Java agent retransforms the same class, it triggers the SkyWalking agent to enhance the class again. Since the bytecode has been regenerated by ByteBuddy, the fields and imported class names have been modified, and the JVM verifications on class bytecode have failed, the retransform classes would therefore be unsuccessful.\nResolution 1. Enable the class cache feature\nAdd JVM parameters:\n-Dskywalking.agent.is_cache_enhanced_class=true -Dskywalking.agent.class_cache_mode=MEMORY\nOr uncomment the following options in agent.conf:\n# If true, the SkyWalking agent will cache all instrumented classes files to memory or disk files (as determined by the class cache mode), # Allow other Java agents to enhance those classes that are enhanced by the SkyWalking agent. agent.is_cache_enhanced_class = ${SW_AGENT_CACHE_CLASS:false} # The instrumented classes cache mode: MEMORY or FILE # MEMORY: cache class bytes to memory; if there are too many instrumented classes or if their sizes are too large, it may take up more memory # FILE: cache class bytes to user temp folder starts with 'class-cache', and automatically clean up cached class files when the application exits agent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:MEMORY} If the class cache feature is enabled, save the instrumented class bytecode to memory or a temporary file. When other Java agents retransform the same class, the SkyWalking agent first attempts to load from the cache.\nIf the cached class is found, it will be used directly without regenerating an auxiliary class with a new random name. Then, the process of the subsequent Java agent will not be affected.\n2. Class cache save mode\nWe recommend saving cache classes to memory, if it takes up more memory space. Alternatively, you can use the local file system. Set the class cache mode in one of the folliwng ways:\n-Dskywalking.agent.class_cache_mode=MEMORY : save cache classes to Java memory. -Dskywalking.agent.class_cache_mode=FILE : save cache classes to SkyWalking agent path \u0026lsquo;/class-cache\u0026rsquo;.\nOr modify these options in agent.conf:\nagent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:MEMORY} agent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:FILE}\n","title":"Compatibility with other Java agent bytecode processes","url":"/docs/main/v9.5.0/en/faq/compatible-with-other-javaagent-bytecode-processing/"},{"content":"Compatibility with other Java agent bytecode processes Problem   When using the SkyWalking agent, some other agents, such as Arthas, can\u0026rsquo;t work properly. https://github.com/apache/skywalking/pull/4858\n  The retransform classes in the Java agent conflict with the SkyWalking agent, as illustrated in this demo\n  Cause The SkyWalking agent uses ByteBuddy to transform classes when the Java application starts. ByteBuddy generates auxiliary classes with different random names every time.\nWhen another Java agent retransforms the same class, it triggers the SkyWalking agent to enhance the class again. Since the bytecode has been regenerated by ByteBuddy, the fields and imported class names have been modified, and the JVM verifications on class bytecode have failed, the retransform classes would therefore be unsuccessful.\nResolution 1. Enable the class cache feature\nAdd JVM parameters:\n-Dskywalking.agent.is_cache_enhanced_class=true -Dskywalking.agent.class_cache_mode=MEMORY\nOr uncomment the following options in agent.conf:\n# If true, the SkyWalking agent will cache all instrumented classes files to memory or disk files (as determined by the class cache mode), # Allow other Java agents to enhance those classes that are enhanced by the SkyWalking agent. agent.is_cache_enhanced_class = ${SW_AGENT_CACHE_CLASS:false} # The instrumented classes cache mode: MEMORY or FILE # MEMORY: cache class bytes to memory; if there are too many instrumented classes or if their sizes are too large, it may take up more memory # FILE: cache class bytes to user temp folder starts with 'class-cache', and automatically clean up cached class files when the application exits agent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:MEMORY} If the class cache feature is enabled, save the instrumented class bytecode to memory or a temporary file. When other Java agents retransform the same class, the SkyWalking agent first attempts to load from the cache.\nIf the cached class is found, it will be used directly without regenerating an auxiliary class with a new random name. Then, the process of the subsequent Java agent will not be affected.\n2. Class cache save mode\nWe recommend saving cache classes to memory, if it takes up more memory space. Alternatively, you can use the local file system. Set the class cache mode in one of the folliwng ways:\n-Dskywalking.agent.class_cache_mode=MEMORY : save cache classes to Java memory. -Dskywalking.agent.class_cache_mode=FILE : save cache classes to SkyWalking agent path \u0026lsquo;/class-cache\u0026rsquo;.\nOr modify these options in agent.conf:\nagent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:MEMORY} agent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:FILE}\n","title":"Compatibility with other Java agent bytecode processes","url":"/docs/main/v9.6.0/en/faq/compatible-with-other-javaagent-bytecode-processing/"},{"content":"Compatibility with other Java agent bytecode processes Problem   When using the SkyWalking agent, some other agents, such as Arthas, can\u0026rsquo;t work properly. https://github.com/apache/skywalking/pull/4858\n  The retransform classes in the Java agent conflict with the SkyWalking agent, as illustrated in this demo\n  Cause The SkyWalking agent uses ByteBuddy to transform classes when the Java application starts. ByteBuddy generates auxiliary classes with different random names every time.\nWhen another Java agent retransforms the same class, it triggers the SkyWalking agent to enhance the class again. Since the bytecode has been regenerated by ByteBuddy, the fields and imported class names have been modified, and the JVM verifications on class bytecode have failed, the retransform classes would therefore be unsuccessful.\nResolution 1. Enable the class cache feature\nAdd JVM parameters:\n-Dskywalking.agent.is_cache_enhanced_class=true -Dskywalking.agent.class_cache_mode=MEMORY\nOr uncomment the following options in agent.conf:\n# If true, the SkyWalking agent will cache all instrumented classes files to memory or disk files (as determined by the class cache mode), # Allow other Java agents to enhance those classes that are enhanced by the SkyWalking agent. agent.is_cache_enhanced_class = ${SW_AGENT_CACHE_CLASS:false} # The instrumented classes cache mode: MEMORY or FILE # MEMORY: cache class bytes to memory; if there are too many instrumented classes or if their sizes are too large, it may take up more memory # FILE: cache class bytes to user temp folder starts with 'class-cache', and automatically clean up cached class files when the application exits agent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:MEMORY} If the class cache feature is enabled, save the instrumented class bytecode to memory or a temporary file. When other Java agents retransform the same class, the SkyWalking agent first attempts to load from the cache.\nIf the cached class is found, it will be used directly without regenerating an auxiliary class with a new random name. Then, the process of the subsequent Java agent will not be affected.\n2. Class cache save mode\nWe recommend saving cache classes to memory, if it takes up more memory space. Alternatively, you can use the local file system. Set the class cache mode in one of the folliwng ways:\n-Dskywalking.agent.class_cache_mode=MEMORY : save cache classes to Java memory. -Dskywalking.agent.class_cache_mode=FILE : save cache classes to SkyWalking agent path \u0026lsquo;/class-cache\u0026rsquo;.\nOr modify these options in agent.conf:\nagent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:MEMORY} agent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:FILE}\n","title":"Compatibility with other Java agent bytecode processes","url":"/docs/main/v9.7.0/en/faq/compatible-with-other-javaagent-bytecode-processing/"},{"content":"Compiling Go version Go version 1.18 or higher is supported for compilation.\nPlatform Linux Linux version \u0026gt;= 4.4, and dependency these tools:\n llvm \u0026gt;= 13. libbpf-dev.  MacOS or Windows Make sure it already has a docker environment.\nCommand git clone https://github.com/apache/skywalking-rover cd skywalking-rover # Linux platform make generate build # MacOS or Windows make container-generate build ","title":"Compiling","url":"/docs/skywalking-rover/latest/en/guides/compile/how-to-compile/"},{"content":"Compiling Go version Go version 1.18 or higher is supported for compilation.\nPlatform Linux Linux version \u0026gt;= 4.4, and dependency these tools:\n llvm \u0026gt;= 13. libbpf-dev.  MacOS or Windows Make sure it already has a docker environment.\nCommand git clone https://github.com/apache/skywalking-rover cd skywalking-rover # Linux platform make generate build # MacOS or Windows make container-generate build ","title":"Compiling","url":"/docs/skywalking-rover/next/en/guides/compile/how-to-compile/"},{"content":"Compiling Go version Go version 1.18 or higher is supported for compilation.\nPlatform Linux Linux version \u0026gt;= 4.4, and dependency these tools:\n llvm \u0026gt;= 13. libbpf-dev.  MacOS or Windows Make sure it already has a docker environment.\nCommand git clone https://github.com/apache/skywalking-rover cd skywalking-rover # Linux platform make generate build # MacOS or Windows make container-generate build ","title":"Compiling","url":"/docs/skywalking-rover/v0.6.0/en/guides/compile/how-to-compile/"},{"content":"Compiling Go version Go version 1.18 and 1.19 are supported for compilation.\nPlatform Linux, MacOS and Windows are supported in SkyWalking Satellite. However, some components don\u0026rsquo;t fit the Windows platform, including:\n mmap-queue  Command git clone https://github.com/apache/skywalking-satellite cd skywalking-satellite make build ","title":"Compiling","url":"/docs/skywalking-satellite/latest/en/guides/compile/how-to-compile/"},{"content":"Compiling Go version Go version 1.19 is required for compilation.\nPlatform Linux, MacOS and Windows are supported in SkyWalking Satellite. However, some components don\u0026rsquo;t fit the Windows platform, including:\n mmap-queue  Command git clone https://github.com/apache/skywalking-satellite cd skywalking-satellite make build ","title":"Compiling","url":"/docs/skywalking-satellite/next/en/guides/compile/how-to-compile/"},{"content":"Compiling Go version Go version 1.18 and 1.19 are supported for compilation.\nPlatform Linux, MacOS and Windows are supported in SkyWalking Satellite. However, some components don\u0026rsquo;t fit the Windows platform, including:\n mmap-queue  Command git clone https://github.com/apache/skywalking-satellite cd skywalking-satellite make build ","title":"Compiling","url":"/docs/skywalking-satellite/v1.2.0/en/guides/compile/how-to-compile/"},{"content":"Compiling issues on Mac\u0026rsquo;s M1 chip Problem  When compiling according to How-to-build, The following problems may occur, causing the build to fail.  [ERROR] Failed to execute goal org.xolstice.maven.plugins:protobuf-maven-plugin:0.6.1:compile (grpc-build) on project apm-network: Unable to resolve artifact: Missing: [ERROR] ---------- [ERROR] 1) com.google.protobuf:protoc:exe:osx-aarch_64:3.12.0 [ERROR] [ERROR] Try downloading the file manually from the project website. [ERROR] [ERROR] Then, install it using the command: [ERROR] mvn install:install-file -DgroupId=com.google.protobuf -DartifactId=protoc -Dversion=3.12.0 -Dclassifier=osx-aarch_64 -Dpackaging=exe -Dfile=/path/to/file [ERROR] [ERROR] Alternatively, if you host your own repository you can deploy the file there: [ERROR] mvn deploy:deploy-file -DgroupId=com.google.protobuf -DartifactId=protoc -Dversion=3.12.0 -Dclassifier=osx-aarch_64 -Dpackaging=exe -Dfile=/path/to/file -Durl=[url] -DrepositoryId=[id] [ERROR] [ERROR] Path to dependency: [ERROR] 1) org.apache.skywalking:apm-network:jar:8.4.0-SNAPSHOT [ERROR] 2) com.google.protobuf:protoc:exe:osx-aarch_64:3.12.0 [ERROR] [ERROR] ---------- [ERROR] 1 required artifact is missing. Reason The dependent Protocol Buffers v3.14.0 does not come with an osx-aarch_64 version. You may find the osx-aarch_64 version at the Protocol Buffers Releases link here: https://github.com/protocolbuffers/protobuf/releases. Since Mac\u0026rsquo;s M1 is compatible with the osx-x86_64 version, before this version is available for downloading, you need to manually specify the osx-x86_64 version.\nResolution You may add -Dos.detected.classifier=osx-x86_64 after the original compilation parameters, such as: ./mvnw clean package -DskipTests -Dos.detected.classifier=osx-x86_64. After specifying the version, compile and run normally.\n","title":"Compiling issues on Mac's M1 chip","url":"/docs/main/latest/en/faq/how-to-build-with-mac-m1/"},{"content":"Compiling issues on Mac\u0026rsquo;s M1 chip Problem  When compiling according to How-to-build, The following problems may occur, causing the build to fail.  [ERROR] Failed to execute goal org.xolstice.maven.plugins:protobuf-maven-plugin:0.6.1:compile (grpc-build) on project apm-network: Unable to resolve artifact: Missing: [ERROR] ---------- [ERROR] 1) com.google.protobuf:protoc:exe:osx-aarch_64:3.12.0 [ERROR] [ERROR] Try downloading the file manually from the project website. [ERROR] [ERROR] Then, install it using the command: [ERROR] mvn install:install-file -DgroupId=com.google.protobuf -DartifactId=protoc -Dversion=3.12.0 -Dclassifier=osx-aarch_64 -Dpackaging=exe -Dfile=/path/to/file [ERROR] [ERROR] Alternatively, if you host your own repository you can deploy the file there: [ERROR] mvn deploy:deploy-file -DgroupId=com.google.protobuf -DartifactId=protoc -Dversion=3.12.0 -Dclassifier=osx-aarch_64 -Dpackaging=exe -Dfile=/path/to/file -Durl=[url] -DrepositoryId=[id] [ERROR] [ERROR] Path to dependency: [ERROR] 1) org.apache.skywalking:apm-network:jar:8.4.0-SNAPSHOT [ERROR] 2) com.google.protobuf:protoc:exe:osx-aarch_64:3.12.0 [ERROR] [ERROR] ---------- [ERROR] 1 required artifact is missing. Reason The dependent Protocol Buffers v3.14.0 does not come with an osx-aarch_64 version. You may find the osx-aarch_64 version at the Protocol Buffers Releases link here: https://github.com/protocolbuffers/protobuf/releases. Since Mac\u0026rsquo;s M1 is compatible with the osx-x86_64 version, before this version is available for downloading, you need to manually specify the osx-x86_64 version.\nResolution You may add -Dos.detected.classifier=osx-x86_64 after the original compilation parameters, such as: ./mvnw clean package -DskipTests -Dos.detected.classifier=osx-x86_64. After specifying the version, compile and run normally.\n","title":"Compiling issues on Mac's M1 chip","url":"/docs/main/next/en/faq/how-to-build-with-mac-m1/"},{"content":"Compiling issues on Mac\u0026rsquo;s M1 chip Problem  When compiling according to How-to-build, The following problems may occur, causing the build to fail.  [ERROR] Failed to execute goal org.xolstice.maven.plugins:protobuf-maven-plugin:0.6.1:compile (grpc-build) on project apm-network: Unable to resolve artifact: Missing: [ERROR] ---------- [ERROR] 1) com.google.protobuf:protoc:exe:osx-aarch_64:3.12.0 [ERROR] [ERROR] Try downloading the file manually from the project website. [ERROR] [ERROR] Then, install it using the command: [ERROR] mvn install:install-file -DgroupId=com.google.protobuf -DartifactId=protoc -Dversion=3.12.0 -Dclassifier=osx-aarch_64 -Dpackaging=exe -Dfile=/path/to/file [ERROR] [ERROR] Alternatively, if you host your own repository you can deploy the file there: [ERROR] mvn deploy:deploy-file -DgroupId=com.google.protobuf -DartifactId=protoc -Dversion=3.12.0 -Dclassifier=osx-aarch_64 -Dpackaging=exe -Dfile=/path/to/file -Durl=[url] -DrepositoryId=[id] [ERROR] [ERROR] Path to dependency: [ERROR] 1) org.apache.skywalking:apm-network:jar:8.4.0-SNAPSHOT [ERROR] 2) com.google.protobuf:protoc:exe:osx-aarch_64:3.12.0 [ERROR] [ERROR] ---------- [ERROR] 1 required artifact is missing. Reason The dependent Protocol Buffers v3.14.0 does not come with an osx-aarch_64 version. You may find the osx-aarch_64 version at the Protocol Buffers Releases link here: https://github.com/protocolbuffers/protobuf/releases. Since Mac\u0026rsquo;s M1 is compatible with the osx-x86_64 version, before this version is available for downloading, you need to manually specify the osx-x86_64 version.\nResolution You may add -Dos.detected.classifier=osx-x86_64 after the original compilation parameters, such as: ./mvnw clean package -DskipTests -Dos.detected.classifier=osx-x86_64. After specifying the version, compile and run normally.\n","title":"Compiling issues on Mac's M1 chip","url":"/docs/main/v9.0.0/en/faq/how-to-build-with-mac-m1/"},{"content":"Compiling issues on Mac\u0026rsquo;s M1 chip Problem  When compiling according to How-to-build, The following problems may occur, causing the build to fail.  [ERROR] Failed to execute goal org.xolstice.maven.plugins:protobuf-maven-plugin:0.6.1:compile (grpc-build) on project apm-network: Unable to resolve artifact: Missing: [ERROR] ---------- [ERROR] 1) com.google.protobuf:protoc:exe:osx-aarch_64:3.12.0 [ERROR] [ERROR] Try downloading the file manually from the project website. [ERROR] [ERROR] Then, install it using the command: [ERROR] mvn install:install-file -DgroupId=com.google.protobuf -DartifactId=protoc -Dversion=3.12.0 -Dclassifier=osx-aarch_64 -Dpackaging=exe -Dfile=/path/to/file [ERROR] [ERROR] Alternatively, if you host your own repository you can deploy the file there: [ERROR] mvn deploy:deploy-file -DgroupId=com.google.protobuf -DartifactId=protoc -Dversion=3.12.0 -Dclassifier=osx-aarch_64 -Dpackaging=exe -Dfile=/path/to/file -Durl=[url] -DrepositoryId=[id] [ERROR] [ERROR] Path to dependency: [ERROR] 1) org.apache.skywalking:apm-network:jar:8.4.0-SNAPSHOT [ERROR] 2) com.google.protobuf:protoc:exe:osx-aarch_64:3.12.0 [ERROR] [ERROR] ---------- [ERROR] 1 required artifact is missing. Reason The dependent Protocol Buffers v3.14.0 does not come with an osx-aarch_64 version. You may find the osx-aarch_64 version at the Protocol Buffers Releases link here: https://github.com/protocolbuffers/protobuf/releases. Since Mac\u0026rsquo;s M1 is compatible with the osx-x86_64 version, before this version is available for downloading, you need to manually specify the osx-x86_64 version.\nResolution You may add -Dos.detected.classifier=osx-x86_64 after the original compilation parameters, such as: ./mvnw clean package -DskipTests -Dos.detected.classifier=osx-x86_64. After specifying the version, compile and run normally.\n","title":"Compiling issues on Mac's M1 chip","url":"/docs/main/v9.1.0/en/faq/how-to-build-with-mac-m1/"},{"content":"Compiling issues on Mac\u0026rsquo;s M1 chip Problem  When compiling according to How-to-build, The following problems may occur, causing the build to fail.  [ERROR] Failed to execute goal org.xolstice.maven.plugins:protobuf-maven-plugin:0.6.1:compile (grpc-build) on project apm-network: Unable to resolve artifact: Missing: [ERROR] ---------- [ERROR] 1) com.google.protobuf:protoc:exe:osx-aarch_64:3.12.0 [ERROR] [ERROR] Try downloading the file manually from the project website. [ERROR] [ERROR] Then, install it using the command: [ERROR] mvn install:install-file -DgroupId=com.google.protobuf -DartifactId=protoc -Dversion=3.12.0 -Dclassifier=osx-aarch_64 -Dpackaging=exe -Dfile=/path/to/file [ERROR] [ERROR] Alternatively, if you host your own repository you can deploy the file there: [ERROR] mvn deploy:deploy-file -DgroupId=com.google.protobuf -DartifactId=protoc -Dversion=3.12.0 -Dclassifier=osx-aarch_64 -Dpackaging=exe -Dfile=/path/to/file -Durl=[url] -DrepositoryId=[id] [ERROR] [ERROR] Path to dependency: [ERROR] 1) org.apache.skywalking:apm-network:jar:8.4.0-SNAPSHOT [ERROR] 2) com.google.protobuf:protoc:exe:osx-aarch_64:3.12.0 [ERROR] [ERROR] ---------- [ERROR] 1 required artifact is missing. Reason The dependent Protocol Buffers v3.14.0 does not come with an osx-aarch_64 version. You may find the osx-aarch_64 version at the Protocol Buffers Releases link here: https://github.com/protocolbuffers/protobuf/releases. Since Mac\u0026rsquo;s M1 is compatible with the osx-x86_64 version, before this version is available for downloading, you need to manually specify the osx-x86_64 version.\nResolution You may add -Dos.detected.classifier=osx-x86_64 after the original compilation parameters, such as: ./mvnw clean package -DskipTests -Dos.detected.classifier=osx-x86_64. After specifying the version, compile and run normally.\n","title":"Compiling issues on Mac's M1 chip","url":"/docs/main/v9.2.0/en/faq/how-to-build-with-mac-m1/"},{"content":"Compiling issues on Mac\u0026rsquo;s M1 chip Problem  When compiling according to How-to-build, The following problems may occur, causing the build to fail.  [ERROR] Failed to execute goal org.xolstice.maven.plugins:protobuf-maven-plugin:0.6.1:compile (grpc-build) on project apm-network: Unable to resolve artifact: Missing: [ERROR] ---------- [ERROR] 1) com.google.protobuf:protoc:exe:osx-aarch_64:3.12.0 [ERROR] [ERROR] Try downloading the file manually from the project website. [ERROR] [ERROR] Then, install it using the command: [ERROR] mvn install:install-file -DgroupId=com.google.protobuf -DartifactId=protoc -Dversion=3.12.0 -Dclassifier=osx-aarch_64 -Dpackaging=exe -Dfile=/path/to/file [ERROR] [ERROR] Alternatively, if you host your own repository you can deploy the file there: [ERROR] mvn deploy:deploy-file -DgroupId=com.google.protobuf -DartifactId=protoc -Dversion=3.12.0 -Dclassifier=osx-aarch_64 -Dpackaging=exe -Dfile=/path/to/file -Durl=[url] -DrepositoryId=[id] [ERROR] [ERROR] Path to dependency: [ERROR] 1) org.apache.skywalking:apm-network:jar:8.4.0-SNAPSHOT [ERROR] 2) com.google.protobuf:protoc:exe:osx-aarch_64:3.12.0 [ERROR] [ERROR] ---------- [ERROR] 1 required artifact is missing. Reason The dependent Protocol Buffers v3.14.0 does not come with an osx-aarch_64 version. You may find the osx-aarch_64 version at the Protocol Buffers Releases link here: https://github.com/protocolbuffers/protobuf/releases. Since Mac\u0026rsquo;s M1 is compatible with the osx-x86_64 version, before this version is available for downloading, you need to manually specify the osx-x86_64 version.\nResolution You may add -Dos.detected.classifier=osx-x86_64 after the original compilation parameters, such as: ./mvnw clean package -DskipTests -Dos.detected.classifier=osx-x86_64. After specifying the version, compile and run normally.\n","title":"Compiling issues on Mac's M1 chip","url":"/docs/main/v9.3.0/en/faq/how-to-build-with-mac-m1/"},{"content":"Compiling issues on Mac\u0026rsquo;s M1 chip Problem  When compiling according to How-to-build, The following problems may occur, causing the build to fail.  [ERROR] Failed to execute goal org.xolstice.maven.plugins:protobuf-maven-plugin:0.6.1:compile (grpc-build) on project apm-network: Unable to resolve artifact: Missing: [ERROR] ---------- [ERROR] 1) com.google.protobuf:protoc:exe:osx-aarch_64:3.12.0 [ERROR] [ERROR] Try downloading the file manually from the project website. [ERROR] [ERROR] Then, install it using the command: [ERROR] mvn install:install-file -DgroupId=com.google.protobuf -DartifactId=protoc -Dversion=3.12.0 -Dclassifier=osx-aarch_64 -Dpackaging=exe -Dfile=/path/to/file [ERROR] [ERROR] Alternatively, if you host your own repository you can deploy the file there: [ERROR] mvn deploy:deploy-file -DgroupId=com.google.protobuf -DartifactId=protoc -Dversion=3.12.0 -Dclassifier=osx-aarch_64 -Dpackaging=exe -Dfile=/path/to/file -Durl=[url] -DrepositoryId=[id] [ERROR] [ERROR] Path to dependency: [ERROR] 1) org.apache.skywalking:apm-network:jar:8.4.0-SNAPSHOT [ERROR] 2) com.google.protobuf:protoc:exe:osx-aarch_64:3.12.0 [ERROR] [ERROR] ---------- [ERROR] 1 required artifact is missing. Reason The dependent Protocol Buffers v3.14.0 does not come with an osx-aarch_64 version. You may find the osx-aarch_64 version at the Protocol Buffers Releases link here: https://github.com/protocolbuffers/protobuf/releases. Since Mac\u0026rsquo;s M1 is compatible with the osx-x86_64 version, before this version is available for downloading, you need to manually specify the osx-x86_64 version.\nResolution You may add -Dos.detected.classifier=osx-x86_64 after the original compilation parameters, such as: ./mvnw clean package -DskipTests -Dos.detected.classifier=osx-x86_64. After specifying the version, compile and run normally.\n","title":"Compiling issues on Mac's M1 chip","url":"/docs/main/v9.4.0/en/faq/how-to-build-with-mac-m1/"},{"content":"Compiling issues on Mac\u0026rsquo;s M1 chip Problem  When compiling according to How-to-build, The following problems may occur, causing the build to fail.  [ERROR] Failed to execute goal org.xolstice.maven.plugins:protobuf-maven-plugin:0.6.1:compile (grpc-build) on project apm-network: Unable to resolve artifact: Missing: [ERROR] ---------- [ERROR] 1) com.google.protobuf:protoc:exe:osx-aarch_64:3.12.0 [ERROR] [ERROR] Try downloading the file manually from the project website. [ERROR] [ERROR] Then, install it using the command: [ERROR] mvn install:install-file -DgroupId=com.google.protobuf -DartifactId=protoc -Dversion=3.12.0 -Dclassifier=osx-aarch_64 -Dpackaging=exe -Dfile=/path/to/file [ERROR] [ERROR] Alternatively, if you host your own repository you can deploy the file there: [ERROR] mvn deploy:deploy-file -DgroupId=com.google.protobuf -DartifactId=protoc -Dversion=3.12.0 -Dclassifier=osx-aarch_64 -Dpackaging=exe -Dfile=/path/to/file -Durl=[url] -DrepositoryId=[id] [ERROR] [ERROR] Path to dependency: [ERROR] 1) org.apache.skywalking:apm-network:jar:8.4.0-SNAPSHOT [ERROR] 2) com.google.protobuf:protoc:exe:osx-aarch_64:3.12.0 [ERROR] [ERROR] ---------- [ERROR] 1 required artifact is missing. Reason The dependent Protocol Buffers v3.14.0 does not come with an osx-aarch_64 version. You may find the osx-aarch_64 version at the Protocol Buffers Releases link here: https://github.com/protocolbuffers/protobuf/releases. Since Mac\u0026rsquo;s M1 is compatible with the osx-x86_64 version, before this version is available for downloading, you need to manually specify the osx-x86_64 version.\nResolution You may add -Dos.detected.classifier=osx-x86_64 after the original compilation parameters, such as: ./mvnw clean package -DskipTests -Dos.detected.classifier=osx-x86_64. After specifying the version, compile and run normally.\n","title":"Compiling issues on Mac's M1 chip","url":"/docs/main/v9.5.0/en/faq/how-to-build-with-mac-m1/"},{"content":"Compiling issues on Mac\u0026rsquo;s M1 chip Problem  When compiling according to How-to-build, The following problems may occur, causing the build to fail.  [ERROR] Failed to execute goal org.xolstice.maven.plugins:protobuf-maven-plugin:0.6.1:compile (grpc-build) on project apm-network: Unable to resolve artifact: Missing: [ERROR] ---------- [ERROR] 1) com.google.protobuf:protoc:exe:osx-aarch_64:3.12.0 [ERROR] [ERROR] Try downloading the file manually from the project website. [ERROR] [ERROR] Then, install it using the command: [ERROR] mvn install:install-file -DgroupId=com.google.protobuf -DartifactId=protoc -Dversion=3.12.0 -Dclassifier=osx-aarch_64 -Dpackaging=exe -Dfile=/path/to/file [ERROR] [ERROR] Alternatively, if you host your own repository you can deploy the file there: [ERROR] mvn deploy:deploy-file -DgroupId=com.google.protobuf -DartifactId=protoc -Dversion=3.12.0 -Dclassifier=osx-aarch_64 -Dpackaging=exe -Dfile=/path/to/file -Durl=[url] -DrepositoryId=[id] [ERROR] [ERROR] Path to dependency: [ERROR] 1) org.apache.skywalking:apm-network:jar:8.4.0-SNAPSHOT [ERROR] 2) com.google.protobuf:protoc:exe:osx-aarch_64:3.12.0 [ERROR] [ERROR] ---------- [ERROR] 1 required artifact is missing. Reason The dependent Protocol Buffers v3.14.0 does not come with an osx-aarch_64 version. You may find the osx-aarch_64 version at the Protocol Buffers Releases link here: https://github.com/protocolbuffers/protobuf/releases. Since Mac\u0026rsquo;s M1 is compatible with the osx-x86_64 version, before this version is available for downloading, you need to manually specify the osx-x86_64 version.\nResolution You may add -Dos.detected.classifier=osx-x86_64 after the original compilation parameters, such as: ./mvnw clean package -DskipTests -Dos.detected.classifier=osx-x86_64. After specifying the version, compile and run normally.\n","title":"Compiling issues on Mac's M1 chip","url":"/docs/main/v9.6.0/en/faq/how-to-build-with-mac-m1/"},{"content":"Compiling issues on Mac\u0026rsquo;s M1 chip Problem  When compiling according to How-to-build, The following problems may occur, causing the build to fail.  [ERROR] Failed to execute goal org.xolstice.maven.plugins:protobuf-maven-plugin:0.6.1:compile (grpc-build) on project apm-network: Unable to resolve artifact: Missing: [ERROR] ---------- [ERROR] 1) com.google.protobuf:protoc:exe:osx-aarch_64:3.12.0 [ERROR] [ERROR] Try downloading the file manually from the project website. [ERROR] [ERROR] Then, install it using the command: [ERROR] mvn install:install-file -DgroupId=com.google.protobuf -DartifactId=protoc -Dversion=3.12.0 -Dclassifier=osx-aarch_64 -Dpackaging=exe -Dfile=/path/to/file [ERROR] [ERROR] Alternatively, if you host your own repository you can deploy the file there: [ERROR] mvn deploy:deploy-file -DgroupId=com.google.protobuf -DartifactId=protoc -Dversion=3.12.0 -Dclassifier=osx-aarch_64 -Dpackaging=exe -Dfile=/path/to/file -Durl=[url] -DrepositoryId=[id] [ERROR] [ERROR] Path to dependency: [ERROR] 1) org.apache.skywalking:apm-network:jar:8.4.0-SNAPSHOT [ERROR] 2) com.google.protobuf:protoc:exe:osx-aarch_64:3.12.0 [ERROR] [ERROR] ---------- [ERROR] 1 required artifact is missing. Reason The dependent Protocol Buffers v3.14.0 does not come with an osx-aarch_64 version. You may find the osx-aarch_64 version at the Protocol Buffers Releases link here: https://github.com/protocolbuffers/protobuf/releases. Since Mac\u0026rsquo;s M1 is compatible with the osx-x86_64 version, before this version is available for downloading, you need to manually specify the osx-x86_64 version.\nResolution You may add -Dos.detected.classifier=osx-x86_64 after the original compilation parameters, such as: ./mvnw clean package -DskipTests -Dos.detected.classifier=osx-x86_64. After specifying the version, compile and run normally.\n","title":"Compiling issues on Mac's M1 chip","url":"/docs/main/v9.7.0/en/faq/how-to-build-with-mac-m1/"},{"content":"Compiling project This document will help you compile and build a project in your maven and set your IDE.\nPrepare JDK 17 or 21.\n If you clone codes from https://github.com/apache/skywalking-java  git clone https://github.com/apache/skywalking-java.git cd skywalking-java ./mvnw clean package -Pall  If you download source codes tar from https://skywalking.apache.org/downloads/  ./mvnw clean package The agent binary package is generated in skywalking-agent folder.\nSet Generated Source Codes(grpc-java and java folders in apm-protocol/apm-network/target/generated-sources/protobuf) folders if you are using IntelliJ IDE.\nBuilding Docker images After you have compiled the project and have generated the skywalking-agent folder, you can build Docker images. [make docker] builds the agent Docker images based on alpine image, java8, java11 and java 17 images by default. If you want to only build part of the images, add suffix .alpine or .java\u0026lt;x\u0026gt; to the make target, for example:\n Build Docker images based on alpine, Java 8 and Java 11. make docker.alpine docker.java8 docker.java11   You can also customize the Docker registry and Docker image names by specifying the variable HUB, NAME.\n Set private Docker registry to gcr.io/skywalking and custom name to sw-agent. make docker.alpine HUB=gcr.io/skywalking NAME=sw-agent This will name the Docker image to gcr.io/skywalking/sw-agent:latest-alpine\n  If you want to push the Docker images, add suffix to the make target docker., for example:\n Build and push images based on alpine, Java 8 and Java 11. make docker.push.alpine docker.push.java8 docker.push.java11   ","title":"Compiling project","url":"/docs/skywalking-java/latest/en/contribution/compiling/"},{"content":"Compiling project This document will help you compile and build a project in your maven and set your IDE.\nPrepare JDK 17 or 21.\n If you clone codes from https://github.com/apache/skywalking-java  git clone https://github.com/apache/skywalking-java.git cd skywalking-java ./mvnw clean package -Pall  If you download source codes tar from https://skywalking.apache.org/downloads/  ./mvnw clean package The agent binary package is generated in skywalking-agent folder.\nSet Generated Source Codes(grpc-java and java folders in apm-protocol/apm-network/target/generated-sources/protobuf) folders if you are using IntelliJ IDE.\nBuilding Docker images After you have compiled the project and have generated the skywalking-agent folder, you can build Docker images. [make docker] builds the agent Docker images based on alpine image, java8, java11 and java 17 images by default. If you want to only build part of the images, add suffix .alpine or .java\u0026lt;x\u0026gt; to the make target, for example:\n Build Docker images based on alpine, Java 8 and Java 11. make docker.alpine docker.java8 docker.java11   You can also customize the Docker registry and Docker image names by specifying the variable HUB, NAME.\n Set private Docker registry to gcr.io/skywalking and custom name to sw-agent. make docker.alpine HUB=gcr.io/skywalking NAME=sw-agent This will name the Docker image to gcr.io/skywalking/sw-agent:latest-alpine\n  If you want to push the Docker images, add suffix to the make target docker., for example:\n Build and push images based on alpine, Java 8 and Java 11. make docker.push.alpine docker.push.java8 docker.push.java11   ","title":"Compiling project","url":"/docs/skywalking-java/next/en/contribution/compiling/"},{"content":"Compiling project This document will help you compile and build a project in your maven and set your IDE.\nPrepare JDK 8+.\n If you clone codes from https://github.com/apache/skywalking-java  git clone https://github.com/apache/skywalking-java.git cd skywalking-java ./mvnw clean package -Pall  If you download source codes tar from https://skywalking.apache.org/downloads/  ./mvnw clean package The agent binary package is generated in skywalking-agent folder.\nSet Generated Source Codes(grpc-java and java folders in apm-protocol/apm-network/target/generated-sources/protobuf) folders if you are using IntelliJ IDE.\nBuilding Docker images After you have compiled the project and have generated the skywalking-agent folder, you can build Docker images. [make docker] builds the agent Docker images based on alpine image, java8, java11 and java 17 images by default. If you want to only build part of the images, add suffix .alpine or .java\u0026lt;x\u0026gt; to the make target, for example:\n Build Docker images based on alpine, Java 8 and Java 11. make docker.alpine docker.java8 docker.java11   You can also customize the Docker registry and Docker image names by specifying the variable HUB, NAME.\n Set private Docker registry to gcr.io/skywalking and custom name to sw-agent. make docker.alpine HUB=gcr.io/skywalking NAME=sw-agent This will name the Docker image to gcr.io/skywalking/sw-agent:latest-alpine\n  If you want to push the Docker images, add suffix to the make target docker., for example:\n Build and push images based on alpine, Java 8 and Java 11. make docker.push.alpine docker.push.java8 docker.push.java11   ","title":"Compiling project","url":"/docs/skywalking-java/v9.0.0/en/contribution/compiling/"},{"content":"Compiling project This document will help you compile and build a project in your maven and set your IDE.\nPrepare JDK 17 or 21.\n If you clone codes from https://github.com/apache/skywalking-java  git clone https://github.com/apache/skywalking-java.git cd skywalking-java ./mvnw clean package -Pall  If you download source codes tar from https://skywalking.apache.org/downloads/  ./mvnw clean package The agent binary package is generated in skywalking-agent folder.\nSet Generated Source Codes(grpc-java and java folders in apm-protocol/apm-network/target/generated-sources/protobuf) folders if you are using IntelliJ IDE.\nBuilding Docker images After you have compiled the project and have generated the skywalking-agent folder, you can build Docker images. [make docker] builds the agent Docker images based on alpine image, java8, java11 and java 17 images by default. If you want to only build part of the images, add suffix .alpine or .java\u0026lt;x\u0026gt; to the make target, for example:\n Build Docker images based on alpine, Java 8 and Java 11. make docker.alpine docker.java8 docker.java11   You can also customize the Docker registry and Docker image names by specifying the variable HUB, NAME.\n Set private Docker registry to gcr.io/skywalking and custom name to sw-agent. make docker.alpine HUB=gcr.io/skywalking NAME=sw-agent This will name the Docker image to gcr.io/skywalking/sw-agent:latest-alpine\n  If you want to push the Docker images, add suffix to the make target docker., for example:\n Build and push images based on alpine, Java 8 and Java 11. make docker.push.alpine docker.push.java8 docker.push.java11   ","title":"Compiling project","url":"/docs/skywalking-java/v9.1.0/en/contribution/compiling/"},{"content":"Compiling project This document will help you compile and build a project in your maven and set your IDE.\nPrepare JDK 17 or 21.\n If you clone codes from https://github.com/apache/skywalking-java  git clone https://github.com/apache/skywalking-java.git cd skywalking-java ./mvnw clean package -Pall  If you download source codes tar from https://skywalking.apache.org/downloads/  ./mvnw clean package The agent binary package is generated in skywalking-agent folder.\nSet Generated Source Codes(grpc-java and java folders in apm-protocol/apm-network/target/generated-sources/protobuf) folders if you are using IntelliJ IDE.\nBuilding Docker images After you have compiled the project and have generated the skywalking-agent folder, you can build Docker images. [make docker] builds the agent Docker images based on alpine image, java8, java11 and java 17 images by default. If you want to only build part of the images, add suffix .alpine or .java\u0026lt;x\u0026gt; to the make target, for example:\n Build Docker images based on alpine, Java 8 and Java 11. make docker.alpine docker.java8 docker.java11   You can also customize the Docker registry and Docker image names by specifying the variable HUB, NAME.\n Set private Docker registry to gcr.io/skywalking and custom name to sw-agent. make docker.alpine HUB=gcr.io/skywalking NAME=sw-agent This will name the Docker image to gcr.io/skywalking/sw-agent:latest-alpine\n  If you want to push the Docker images, add suffix to the make target docker., for example:\n Build and push images based on alpine, Java 8 and Java 11. make docker.push.alpine docker.push.java8 docker.push.java11   ","title":"Compiling project","url":"/docs/skywalking-java/v9.2.0/en/contribution/compiling/"},{"content":"Compiling project This document will help you compile and build the package file.\nPrepare PHP and Rust environments.\nInstall PHP Environment For Debian user:\nsudo apt install php-cli php-dev For MacOS user:\nbrew install php Install Rust Environment Install Rust 1.65.0+.\nFor Linux user:\ncurl --proto \u0026#39;=https\u0026#39; --tlsv1.2 -sSf https://sh.rustup.rs | sh For MacOS user:\nbrew install rust Install requirement For Debian user:\nsudo apt install gcc make llvm-dev libclang-dev clang protobuf-compiler For MacOS user:\nbrew install protobuf Build and install Skywalking PHP Agent from archive file For Linux user:\nsudo pecl install skywalking_agent-x.y.z.tgz For MacOS user:\n Running the pecl install command with the php installed in brew may encounter the problem of mkdir, please refer to Installing PHP and PECL Extensions on MacOS.\n pecl install skywalking_agent-x.y.z.tgz The extension file skywalking_agent.so is generated in the php extension folder, get it by run php-config --extension-dir.\n","title":"Compiling project","url":"/docs/skywalking-php/latest/en/contribution/compiling/"},{"content":"Compiling project This document will help you compile and build the package file.\nPrepare PHP and Rust environments.\nInstall PHP Environment For Debian user:\nsudo apt install php-cli php-dev For MacOS user:\nbrew install php Install Rust Environment Install Rust 1.65.0+.\nFor Linux user:\ncurl --proto \u0026#39;=https\u0026#39; --tlsv1.2 -sSf https://sh.rustup.rs | sh For MacOS user:\nbrew install rust Install requirement For Debian user:\nsudo apt install gcc make llvm-dev libclang-dev clang protobuf-compiler For MacOS user:\nbrew install protobuf Build and install Skywalking PHP Agent from archive file For Linux user:\nsudo pecl install skywalking_agent-x.y.z.tgz For MacOS user:\n Running the pecl install command with the php installed in brew may encounter the problem of mkdir, please refer to Installing PHP and PECL Extensions on MacOS.\n pecl install skywalking_agent-x.y.z.tgz The extension file skywalking_agent.so is generated in the php extension folder, get it by run php-config --extension-dir.\n","title":"Compiling project","url":"/docs/skywalking-php/next/en/contribution/compiling/"},{"content":"Compiling project This document will help you compile and build the package file.\nPrepare PHP and Rust environments.\nInstall PHP Environment For Debian user:\nsudo apt install php-cli php-dev For MacOS user:\nbrew install php Install Rust Environment Install Rust 1.65.0+.\nFor Linux user:\ncurl --proto \u0026#39;=https\u0026#39; --tlsv1.2 -sSf https://sh.rustup.rs | sh For MacOS user:\nbrew install rust Install requirement For Debian user:\nsudo apt install gcc make llvm-dev libclang-dev clang protobuf-compiler For MacOS user:\nbrew install protobuf Build and install Skywalking PHP Agent from archive file For Linux user:\nsudo pecl install skywalking_agent-x.y.z.tgz For MacOS user:\n Running the pecl install command with the php installed in brew may encounter the problem of mkdir, please refer to Installing PHP and PECL Extensions on MacOS.\n pecl install skywalking_agent-x.y.z.tgz The extension file skywalking_agent.so is generated in the php extension folder, get it by run php-config --extension-dir.\n","title":"Compiling project","url":"/docs/skywalking-php/v0.7.0/en/contribution/compiling/"},{"content":"Component library settings Component library settings are about your own or third-party libraries used in the monitored application.\nIn agent or SDK, regardless of whether the library name is collected as ID or String (literally, e.g. SpringMVC), the collector formats data in ID for better performance and less storage requirements.\nAlso, the collector conjectures the remote service based on the component library. For example: if the component library is MySQL Driver library, then the remote service should be MySQL Server.\nFor these two reasons, the collector requires two parts of settings in this file:\n Component library ID, names and languages. Remote server mapping based on the local library.  All component names and IDs must be defined in this file.\nComponent Library ID Define all names and IDs from component libraries which are used in the monitored application. This uses a two-way mapping strategy. The agent or SDK could use the value (ID) to represent the component name in uplink data.\n Name: the component name used in agent and UI ID: Unique ID. All IDs are reserved once they are released. Languages: Program languages may use this component. Multi languages should be separated by ,.  ID rules  Java and multi languages shared: (0, 3000) .NET Platform reserved: [3000, 4000) Node.js Platform reserved: [4000, 5000) Go reserved: [5000, 6000) Lua reserved: [6000, 7000) Python reserved: [7000, 8000) PHP reserved: [8000, 9000) C++ reserved: [9000, 10000) Javascript reserved: [10000, 11000) Rust reserved: [11000, 12000)  Example:\nTomcat:id:1languages:JavaHttpClient:id:2languages:Java,C#,Node.jsDubbo:id:3languages:JavaH2:id:4languages:JavaRemote server mapping The remote server will be conjectured by the local component. The mappings are based on names in the component library.\n Key: client component library name Value: server component name  Component-Server-Mappings:Jedis:RedisStackExchange.Redis:RedisRedisson:RedisLettuce:RedisZookeeper:ZookeeperSqlClient:SqlServerNpgsql:PostgreSQLMySqlConnector:MysqlEntityFrameworkCore.InMemory:InMemoryDatabase","title":"Component library settings","url":"/docs/main/latest/en/guides/component-library-settings/"},{"content":"Component library settings Component library settings are about your own or third-party libraries used in the monitored application.\nIn agent or SDK, regardless of whether the library name is collected as ID or String (literally, e.g. SpringMVC), the collector formats data in ID for better performance and less storage requirements.\nAlso, the collector conjectures the remote service based on the component library. For example: if the component library is MySQL Driver library, then the remote service should be MySQL Server.\nFor these two reasons, the collector requires two parts of settings in this file:\n Component library ID, names and languages. Remote server mapping based on the local library.  All component names and IDs must be defined in this file.\nComponent Library ID Define all names and IDs from component libraries which are used in the monitored application. This uses a two-way mapping strategy. The agent or SDK could use the value (ID) to represent the component name in uplink data.\n Name: the component name used in agent and UI ID: Unique ID. All IDs are reserved once they are released. Languages: Program languages may use this component. Multi languages should be separated by ,.  ID rules  Java and multi languages shared: (0, 3000) .NET Platform reserved: [3000, 4000) Node.js Platform reserved: [4000, 5000) Go reserved: [5000, 6000) Lua reserved: [6000, 7000) Python reserved: [7000, 8000) PHP reserved: [8000, 9000) C++ reserved: [9000, 10000) Javascript reserved: [10000, 11000) Rust reserved: [11000, 12000)  Example:\nTomcat:id:1languages:JavaHttpClient:id:2languages:Java,C#,Node.jsDubbo:id:3languages:JavaH2:id:4languages:JavaRemote server mapping The remote server will be conjectured by the local component. The mappings are based on names in the component library.\n Key: client component library name Value: server component name  Component-Server-Mappings:Jedis:RedisStackExchange.Redis:RedisRedisson:RedisLettuce:RedisZookeeper:ZookeeperSqlClient:SqlServerNpgsql:PostgreSQLMySqlConnector:MysqlEntityFrameworkCore.InMemory:InMemoryDatabase","title":"Component library settings","url":"/docs/main/next/en/guides/component-library-settings/"},{"content":"Component library settings Component library settings are about your own or third-party libraries used in the monitored application.\nIn agent or SDK, regardless of whether the library name is collected as ID or String (literally, e.g. SpringMVC), the collector formats data in ID for better performance and less storage requirements.\nAlso, the collector conjectures the remote service based on the component library. For example: if the component library is MySQL Driver library, then the remote service should be MySQL Server.\nFor these two reasons, the collector requires two parts of settings in this file:\n Component library ID, names and languages. Remote server mapping based on the local library.  All component names and IDs must be defined in this file.\nComponent Library ID Define all names and IDs from component libraries which are used in the monitored application. This uses a two-way mapping strategy. The agent or SDK could use the value (ID) to represent the component name in uplink data.\n Name: the component name used in agent and UI ID: Unique ID. All IDs are reserved once they are released. Languages: Program languages may use this component. Multi languages should be separated by ,.  ID rules  Java and multi languages shared: (0, 3000) .NET Platform reserved: [3000, 4000) Node.js Platform reserved: [4000, 5000) Go reserved: [5000, 6000) Lua reserved: [6000, 7000) Python reserved: [7000, 8000) PHP reserved: [8000, 9000) C++ reserved: [9000, 10000)  Example:\nTomcat:id:1languages:JavaHttpClient:id:2languages:Java,C#,Node.jsDubbo:id:3languages:JavaH2:id:4languages:JavaRemote server mapping The remote server will be conjectured by the local component. The mappings are based on names in the component library.\n Key: client component library name Value: server component name  Component-Server-Mappings:Jedis:RedisStackExchange.Redis:RedisRedisson:RedisLettuce:RedisZookeeper:ZookeeperSqlClient:SqlServerNpgsql:PostgreSQLMySqlConnector:MysqlEntityFrameworkCore.InMemory:InMemoryDatabase","title":"Component library settings","url":"/docs/main/v9.0.0/en/guides/component-library-settings/"},{"content":"Component library settings Component library settings are about your own or third-party libraries used in the monitored application.\nIn agent or SDK, regardless of whether the library name is collected as ID or String (literally, e.g. SpringMVC), the collector formats data in ID for better performance and less storage requirements.\nAlso, the collector conjectures the remote service based on the component library. For example: if the component library is MySQL Driver library, then the remote service should be MySQL Server.\nFor these two reasons, the collector requires two parts of settings in this file:\n Component library ID, names and languages. Remote server mapping based on the local library.  All component names and IDs must be defined in this file.\nComponent Library ID Define all names and IDs from component libraries which are used in the monitored application. This uses a two-way mapping strategy. The agent or SDK could use the value (ID) to represent the component name in uplink data.\n Name: the component name used in agent and UI ID: Unique ID. All IDs are reserved once they are released. Languages: Program languages may use this component. Multi languages should be separated by ,.  ID rules  Java and multi languages shared: (0, 3000) .NET Platform reserved: [3000, 4000) Node.js Platform reserved: [4000, 5000) Go reserved: [5000, 6000) Lua reserved: [6000, 7000) Python reserved: [7000, 8000) PHP reserved: [8000, 9000) C++ reserved: [9000, 10000)  Example:\nTomcat:id:1languages:JavaHttpClient:id:2languages:Java,C#,Node.jsDubbo:id:3languages:JavaH2:id:4languages:JavaRemote server mapping The remote server will be conjectured by the local component. The mappings are based on names in the component library.\n Key: client component library name Value: server component name  Component-Server-Mappings:Jedis:RedisStackExchange.Redis:RedisRedisson:RedisLettuce:RedisZookeeper:ZookeeperSqlClient:SqlServerNpgsql:PostgreSQLMySqlConnector:MysqlEntityFrameworkCore.InMemory:InMemoryDatabase","title":"Component library settings","url":"/docs/main/v9.1.0/en/guides/component-library-settings/"},{"content":"Component library settings Component library settings are about your own or third-party libraries used in the monitored application.\nIn agent or SDK, regardless of whether the library name is collected as ID or String (literally, e.g. SpringMVC), the collector formats data in ID for better performance and less storage requirements.\nAlso, the collector conjectures the remote service based on the component library. For example: if the component library is MySQL Driver library, then the remote service should be MySQL Server.\nFor these two reasons, the collector requires two parts of settings in this file:\n Component library ID, names and languages. Remote server mapping based on the local library.  All component names and IDs must be defined in this file.\nComponent Library ID Define all names and IDs from component libraries which are used in the monitored application. This uses a two-way mapping strategy. The agent or SDK could use the value (ID) to represent the component name in uplink data.\n Name: the component name used in agent and UI ID: Unique ID. All IDs are reserved once they are released. Languages: Program languages may use this component. Multi languages should be separated by ,.  ID rules  Java and multi languages shared: (0, 3000) .NET Platform reserved: [3000, 4000) Node.js Platform reserved: [4000, 5000) Go reserved: [5000, 6000) Lua reserved: [6000, 7000) Python reserved: [7000, 8000) PHP reserved: [8000, 9000) C++ reserved: [9000, 10000)  Example:\nTomcat:id:1languages:JavaHttpClient:id:2languages:Java,C#,Node.jsDubbo:id:3languages:JavaH2:id:4languages:JavaRemote server mapping The remote server will be conjectured by the local component. The mappings are based on names in the component library.\n Key: client component library name Value: server component name  Component-Server-Mappings:Jedis:RedisStackExchange.Redis:RedisRedisson:RedisLettuce:RedisZookeeper:ZookeeperSqlClient:SqlServerNpgsql:PostgreSQLMySqlConnector:MysqlEntityFrameworkCore.InMemory:InMemoryDatabase","title":"Component library settings","url":"/docs/main/v9.2.0/en/guides/component-library-settings/"},{"content":"Component library settings Component library settings are about your own or third-party libraries used in the monitored application.\nIn agent or SDK, regardless of whether the library name is collected as ID or String (literally, e.g. SpringMVC), the collector formats data in ID for better performance and less storage requirements.\nAlso, the collector conjectures the remote service based on the component library. For example: if the component library is MySQL Driver library, then the remote service should be MySQL Server.\nFor these two reasons, the collector requires two parts of settings in this file:\n Component library ID, names and languages. Remote server mapping based on the local library.  All component names and IDs must be defined in this file.\nComponent Library ID Define all names and IDs from component libraries which are used in the monitored application. This uses a two-way mapping strategy. The agent or SDK could use the value (ID) to represent the component name in uplink data.\n Name: the component name used in agent and UI ID: Unique ID. All IDs are reserved once they are released. Languages: Program languages may use this component. Multi languages should be separated by ,.  ID rules  Java and multi languages shared: (0, 3000) .NET Platform reserved: [3000, 4000) Node.js Platform reserved: [4000, 5000) Go reserved: [5000, 6000) Lua reserved: [6000, 7000) Python reserved: [7000, 8000) PHP reserved: [8000, 9000) C++ reserved: [9000, 10000) Javascript reserved: [10000, 11000) Rust reserved: [11000, 12000)  Example:\nTomcat:id:1languages:JavaHttpClient:id:2languages:Java,C#,Node.jsDubbo:id:3languages:JavaH2:id:4languages:JavaRemote server mapping The remote server will be conjectured by the local component. The mappings are based on names in the component library.\n Key: client component library name Value: server component name  Component-Server-Mappings:Jedis:RedisStackExchange.Redis:RedisRedisson:RedisLettuce:RedisZookeeper:ZookeeperSqlClient:SqlServerNpgsql:PostgreSQLMySqlConnector:MysqlEntityFrameworkCore.InMemory:InMemoryDatabase","title":"Component library settings","url":"/docs/main/v9.3.0/en/guides/component-library-settings/"},{"content":"Component library settings Component library settings are about your own or third-party libraries used in the monitored application.\nIn agent or SDK, regardless of whether the library name is collected as ID or String (literally, e.g. SpringMVC), the collector formats data in ID for better performance and less storage requirements.\nAlso, the collector conjectures the remote service based on the component library. For example: if the component library is MySQL Driver library, then the remote service should be MySQL Server.\nFor these two reasons, the collector requires two parts of settings in this file:\n Component library ID, names and languages. Remote server mapping based on the local library.  All component names and IDs must be defined in this file.\nComponent Library ID Define all names and IDs from component libraries which are used in the monitored application. This uses a two-way mapping strategy. The agent or SDK could use the value (ID) to represent the component name in uplink data.\n Name: the component name used in agent and UI ID: Unique ID. All IDs are reserved once they are released. Languages: Program languages may use this component. Multi languages should be separated by ,.  ID rules  Java and multi languages shared: (0, 3000) .NET Platform reserved: [3000, 4000) Node.js Platform reserved: [4000, 5000) Go reserved: [5000, 6000) Lua reserved: [6000, 7000) Python reserved: [7000, 8000) PHP reserved: [8000, 9000) C++ reserved: [9000, 10000) Javascript reserved: [10000, 11000) Rust reserved: [11000, 12000)  Example:\nTomcat:id:1languages:JavaHttpClient:id:2languages:Java,C#,Node.jsDubbo:id:3languages:JavaH2:id:4languages:JavaRemote server mapping The remote server will be conjectured by the local component. The mappings are based on names in the component library.\n Key: client component library name Value: server component name  Component-Server-Mappings:Jedis:RedisStackExchange.Redis:RedisRedisson:RedisLettuce:RedisZookeeper:ZookeeperSqlClient:SqlServerNpgsql:PostgreSQLMySqlConnector:MysqlEntityFrameworkCore.InMemory:InMemoryDatabase","title":"Component library settings","url":"/docs/main/v9.4.0/en/guides/component-library-settings/"},{"content":"Component library settings Component library settings are about your own or third-party libraries used in the monitored application.\nIn agent or SDK, regardless of whether the library name is collected as ID or String (literally, e.g. SpringMVC), the collector formats data in ID for better performance and less storage requirements.\nAlso, the collector conjectures the remote service based on the component library. For example: if the component library is MySQL Driver library, then the remote service should be MySQL Server.\nFor these two reasons, the collector requires two parts of settings in this file:\n Component library ID, names and languages. Remote server mapping based on the local library.  All component names and IDs must be defined in this file.\nComponent Library ID Define all names and IDs from component libraries which are used in the monitored application. This uses a two-way mapping strategy. The agent or SDK could use the value (ID) to represent the component name in uplink data.\n Name: the component name used in agent and UI ID: Unique ID. All IDs are reserved once they are released. Languages: Program languages may use this component. Multi languages should be separated by ,.  ID rules  Java and multi languages shared: (0, 3000) .NET Platform reserved: [3000, 4000) Node.js Platform reserved: [4000, 5000) Go reserved: [5000, 6000) Lua reserved: [6000, 7000) Python reserved: [7000, 8000) PHP reserved: [8000, 9000) C++ reserved: [9000, 10000) Javascript reserved: [10000, 11000) Rust reserved: [11000, 12000)  Example:\nTomcat:id:1languages:JavaHttpClient:id:2languages:Java,C#,Node.jsDubbo:id:3languages:JavaH2:id:4languages:JavaRemote server mapping The remote server will be conjectured by the local component. The mappings are based on names in the component library.\n Key: client component library name Value: server component name  Component-Server-Mappings:Jedis:RedisStackExchange.Redis:RedisRedisson:RedisLettuce:RedisZookeeper:ZookeeperSqlClient:SqlServerNpgsql:PostgreSQLMySqlConnector:MysqlEntityFrameworkCore.InMemory:InMemoryDatabase","title":"Component library settings","url":"/docs/main/v9.5.0/en/guides/component-library-settings/"},{"content":"Component library settings Component library settings are about your own or third-party libraries used in the monitored application.\nIn agent or SDK, regardless of whether the library name is collected as ID or String (literally, e.g. SpringMVC), the collector formats data in ID for better performance and less storage requirements.\nAlso, the collector conjectures the remote service based on the component library. For example: if the component library is MySQL Driver library, then the remote service should be MySQL Server.\nFor these two reasons, the collector requires two parts of settings in this file:\n Component library ID, names and languages. Remote server mapping based on the local library.  All component names and IDs must be defined in this file.\nComponent Library ID Define all names and IDs from component libraries which are used in the monitored application. This uses a two-way mapping strategy. The agent or SDK could use the value (ID) to represent the component name in uplink data.\n Name: the component name used in agent and UI ID: Unique ID. All IDs are reserved once they are released. Languages: Program languages may use this component. Multi languages should be separated by ,.  ID rules  Java and multi languages shared: (0, 3000) .NET Platform reserved: [3000, 4000) Node.js Platform reserved: [4000, 5000) Go reserved: [5000, 6000) Lua reserved: [6000, 7000) Python reserved: [7000, 8000) PHP reserved: [8000, 9000) C++ reserved: [9000, 10000) Javascript reserved: [10000, 11000) Rust reserved: [11000, 12000)  Example:\nTomcat:id:1languages:JavaHttpClient:id:2languages:Java,C#,Node.jsDubbo:id:3languages:JavaH2:id:4languages:JavaRemote server mapping The remote server will be conjectured by the local component. The mappings are based on names in the component library.\n Key: client component library name Value: server component name  Component-Server-Mappings:Jedis:RedisStackExchange.Redis:RedisRedisson:RedisLettuce:RedisZookeeper:ZookeeperSqlClient:SqlServerNpgsql:PostgreSQLMySqlConnector:MysqlEntityFrameworkCore.InMemory:InMemoryDatabase","title":"Component library settings","url":"/docs/main/v9.6.0/en/guides/component-library-settings/"},{"content":"Component library settings Component library settings are about your own or third-party libraries used in the monitored application.\nIn agent or SDK, regardless of whether the library name is collected as ID or String (literally, e.g. SpringMVC), the collector formats data in ID for better performance and less storage requirements.\nAlso, the collector conjectures the remote service based on the component library. For example: if the component library is MySQL Driver library, then the remote service should be MySQL Server.\nFor these two reasons, the collector requires two parts of settings in this file:\n Component library ID, names and languages. Remote server mapping based on the local library.  All component names and IDs must be defined in this file.\nComponent Library ID Define all names and IDs from component libraries which are used in the monitored application. This uses a two-way mapping strategy. The agent or SDK could use the value (ID) to represent the component name in uplink data.\n Name: the component name used in agent and UI ID: Unique ID. All IDs are reserved once they are released. Languages: Program languages may use this component. Multi languages should be separated by ,.  ID rules  Java and multi languages shared: (0, 3000) .NET Platform reserved: [3000, 4000) Node.js Platform reserved: [4000, 5000) Go reserved: [5000, 6000) Lua reserved: [6000, 7000) Python reserved: [7000, 8000) PHP reserved: [8000, 9000) C++ reserved: [9000, 10000) Javascript reserved: [10000, 11000) Rust reserved: [11000, 12000)  Example:\nTomcat:id:1languages:JavaHttpClient:id:2languages:Java,C#,Node.jsDubbo:id:3languages:JavaH2:id:4languages:JavaRemote server mapping The remote server will be conjectured by the local component. The mappings are based on names in the component library.\n Key: client component library name Value: server component name  Component-Server-Mappings:Jedis:RedisStackExchange.Redis:RedisRedisson:RedisLettuce:RedisZookeeper:ZookeeperSqlClient:SqlServerNpgsql:PostgreSQLMySqlConnector:MysqlEntityFrameworkCore.InMemory:InMemoryDatabase","title":"Component library settings","url":"/docs/main/v9.7.0/en/guides/component-library-settings/"},{"content":"Concepts and Designs Concepts and Designs help you to learn and understand the SkyWalking Infra E2E and the landscape.\n What is SkyWalking Infra E2E?  Project Goals. Provides the goals, which SkyWalking Infra E2E is trying to focus on and provides features about them.    After you read the above documents, you should understand the basic goals of the SkyWalking Infra E2E. Now, you can choose which following parts you are interested, then dive in.\n Module Design  ","title":"Concepts and Designs","url":"/docs/skywalking-infra-e2e/latest/en/concepts-and-designs/readme/"},{"content":"Concepts and Designs Concepts and Designs help you to learn and understand the SkyWalking Infra E2E and the landscape.\n What is SkyWalking Infra E2E?  Project Goals. Provides the goals, which SkyWalking Infra E2E is trying to focus on and provides features about them.    After you read the above documents, you should understand the basic goals of the SkyWalking Infra E2E. Now, you can choose which following parts you are interested, then dive in.\n Module Design  ","title":"Concepts and Designs","url":"/docs/skywalking-infra-e2e/next/en/concepts-and-designs/readme/"},{"content":"Concepts and Designs Concepts and Designs help you to learn and understand the SkyWalking Infra E2E and the landscape.\n What is SkyWalking Infra E2E?  Project Goals. Provides the goals, which SkyWalking Infra E2E is trying to focus on and provides features about them.    After you read the above documents, you should understand the basic goals of the SkyWalking Infra E2E. Now, you can choose which following parts you are interested, then dive in.\n Module Design  ","title":"Concepts and Designs","url":"/docs/skywalking-infra-e2e/v1.3.0/en/concepts-and-designs/readme/"},{"content":"Concepts and Designs Concepts and Designs help you to learn and understand the SkyWalking Satellite and the landscape.\n What is SkyWalking Satellite?  Overview and Core concepts. Provides a high-level description and introduction, including the problems the project solves. Project Goals. Provides the goals, which SkyWalking Satellite is trying to focus and provide features about them.    After you read the above documents, you should understand basic goals of the SkyWalking Satellite. Now, you can choose which following parts you are interested, then dive in.\n Module Design Plugin Mechanism Project Structure Memory mapped Queue  ","title":"Concepts and Designs","url":"/docs/skywalking-satellite/latest/en/concepts-and-designs/readme/"},{"content":"Concepts and Designs Concepts and Designs help you to learn and understand the SkyWalking Satellite and the landscape.\n What is SkyWalking Satellite?  Overview and Core concepts. Provides a high-level description and introduction, including the problems the project solves. Project Goals. Provides the goals, which SkyWalking Satellite is trying to focus and provide features about them.    After you read the above documents, you should understand basic goals of the SkyWalking Satellite. Now, you can choose which following parts you are interested, then dive in.\n Module Design Plugin Mechanism Project Structure Memory mapped Queue  ","title":"Concepts and Designs","url":"/docs/skywalking-satellite/next/en/concepts-and-designs/readme/"},{"content":"Concepts and Designs Concepts and Designs help you to learn and understand the SkyWalking Satellite and the landscape.\n What is SkyWalking Satellite?  Overview and Core concepts. Provides a high-level description and introduction, including the problems the project solves. Project Goals. Provides the goals, which SkyWalking Satellite is trying to focus and provide features about them.    After you read the above documents, you should understand basic goals of the SkyWalking Satellite. Now, you can choose which following parts you are interested, then dive in.\n Module Design Plugin Mechanism Project Structure Memory mapped Queue  ","title":"Concepts and Designs","url":"/docs/skywalking-satellite/v1.2.0/en/concepts-and-designs/readme/"},{"content":"Configuration Vocabulary The Configuration Vocabulary lists all available configurations provided by application.yml.\n   Module Provider Settings Value(s) and Explanation System Environment Variable¹ Default     core default role Option values: Mixed/Receiver/Aggregator. Receiver mode OAP opens the service to the agents, then analyzes and aggregates the results, and forwards the results for distributed aggregation. Aggregator mode OAP receives data from Mixer and Receiver role OAP nodes, and performs 2nd level aggregation. Mixer means both Receiver and Aggregator. SW_CORE_ROLE Mixed   - - restHost Binding IP of RESTful services. Services include GraphQL query and HTTP data report. SW_CORE_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_CORE_REST_PORT 12800   - - restContextPath Web context path of RESTful services. SW_CORE_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_CORE_REST_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_CORE_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize ServerSocketChannel Backlog of RESTful services. SW_CORE_REST_QUEUE_SIZE 0   - - httpMaxRequestHeaderSize Maximum request header size accepted. SW_CORE_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - gRPCHost Binding IP of gRPC services, including gRPC data report and internal communication among OAP nodes. SW_CORE_GRPC_HOST 0.0.0.0   - - gRPCPort Binding port of gRPC services. SW_CORE_GRPC_PORT 11800   - - gRPCSslEnabled Activates SSL for gRPC services. SW_CORE_GRPC_SSL_ENABLED false   - - gRPCSslKeyPath File path of gRPC SSL key. SW_CORE_GRPC_SSL_KEY_PATH -   - - gRPCSslCertChainPath File path of gRPC SSL cert chain. SW_CORE_GRPC_SSL_CERT_CHAIN_PATH -   - - gRPCSslTrustedCAPath File path of gRPC trusted CA. SW_CORE_GRPC_SSL_TRUSTED_CA_PATH -   - - downsampling Activated level of down sampling aggregation.  Hour,Day   - - enableDataKeeperExecutor Controller of TTL scheduler. Once disabled, TTL wouldn\u0026rsquo;t work. SW_CORE_ENABLE_DATA_KEEPER_EXECUTOR true   - - dataKeeperExecutePeriod Execution period of TTL scheduler (in minutes). Execution doesn\u0026rsquo;t mean deleting data. The storage provider (e.g. ElasticSearch storage) could override this. SW_CORE_DATA_KEEPER_EXECUTE_PERIOD 5   - - recordDataTTL The lifecycle of record data (in days). Record data includes traces, top N sample records, and logs. Minimum value is 2. SW_CORE_RECORD_DATA_TTL 3   - - metricsDataTTL The lifecycle of metrics data (in days), including metadata. We recommend setting metricsDataTTL \u0026gt;= recordDataTTL. Minimum value is 2. SW_CORE_METRICS_DATA_TTL 7   - - l1FlushPeriod The period of L1 aggregation flush to L2 aggregation (in milliseconds). SW_CORE_L1_AGGREGATION_FLUSH_PERIOD 500   - - storageSessionTimeout The threshold of session time (in milliseconds). Default value is 70000. SW_CORE_STORAGE_SESSION_TIMEOUT 70000   - - persistentPeriod The period of doing data persistence. Unit is second.Default value is 25s SW_CORE_PERSISTENT_PERIOD 25   - - topNReportPeriod The execution period (in minutes) of top N sampler, which saves sampled data into the storage. SW_CORE_TOPN_REPORT_PERIOD 10   - - activeExtraModelColumns Appends entity names (e.g. service names) into metrics storage entities. SW_CORE_ACTIVE_EXTRA_MODEL_COLUMNS false   - - serviceNameMaxLength Maximum length limit of service names. SW_SERVICE_NAME_MAX_LENGTH 70   - - instanceNameMaxLength Maximum length limit of service instance names. The maximum length of service + instance names should be less than 200. SW_INSTANCE_NAME_MAX_LENGTH 70   - - endpointNameMaxLength Maximum length limit of endpoint names. The maximum length of service + endpoint names should be less than 240. SW_ENDPOINT_NAME_MAX_LENGTH 150   - - searchableTracesTags Defines a set of span tag keys which are searchable through GraphQL. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_SEARCHABLE_TAG_KEYS http.method,http.status_code,rpc.status_code,db.type,db.instance,mq.queue,mq.topic,mq.broker   - - searchableLogsTags Defines a set of log tag keys which are searchable through GraphQL. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_SEARCHABLE_LOGS_TAG_KEYS level   - - searchableAlarmTags Defines a set of alarm tag keys which are searchable through GraphQL. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_SEARCHABLE_ALARM_TAG_KEYS level   - - autocompleteTagKeysQueryMaxSize The max size of tags keys for autocomplete select. SW_AUTOCOMPLETE_TAG_KEYS_QUERY_MAX_SIZE 100   - - autocompleteTagValuesQueryMaxSize The max size of tags values for autocomplete select. SW_AUTOCOMPLETE_TAG_VALUES_QUERY_MAX_SIZE 100   - - gRPCThreadPoolSize Pool size of gRPC server. SW_CORE_GRPC_THREAD_POOL_SIZE CPU core * 4   - - gRPCThreadPoolQueueSize Queue size of gRPC server. SW_CORE_GRPC_POOL_QUEUE_SIZE 10000   - - maxConcurrentCallsPerConnection The maximum number of concurrent calls permitted for each incoming connection. Defaults to no limit. SW_CORE_GRPC_MAX_CONCURRENT_CALL -   - - maxMessageSize Sets the maximum message size allowed to be received on the server. Empty means 4 MiB. SW_CORE_GRPC_MAX_MESSAGE_SIZE 4M(based on Netty)   - - remoteTimeout Timeout for cluster internal communication (in seconds). - 20   - - maxSizeOfNetworkAddressAlias The maximum size of network address detected in the system being monitored. - 1_000_000   - - maxPageSizeOfQueryProfileSnapshot The maximum size for snapshot analysis in an OAP query. - 500   - - maxSizeOfAnalyzeProfileSnapshot The maximum number of snapshots analyzed by the OAP. - 12000   - - prepareThreads The number of threads used to prepare metrics data to the storage. SW_CORE_PREPARE_THREADS 2   - - enableEndpointNameGroupingByOpenapi Automatically groups endpoints by the given OpenAPI definitions. SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI true   - - maxDurationOfQueryEBPFProfilingData The maximum duration(in second) of query the eBPF profiling data from database. - 30   - - maxThreadCountOfQueryEBPFProfilingData The maximum thread count of query the eBPF profiling data from database. - System CPU core size   - - uiMenuRefreshInterval The period(in seconds) of refreshing the status of all UI menu items. - 20   - - serviceCacheRefreshInterval The period(in seconds) of refreshing the service cache. SW_SERVICE_CACHE_REFRESH_INTERVAL 10   cluster standalone - Standalone is not suitable for running on a single node running. No configuration available. - -   - zookeeper namespace The namespace, represented by root path, isolates the configurations in Zookeeper. SW_NAMESPACE /, root path   - - hostPort Hosts and ports of Zookeeper Cluster. SW_CLUSTER_ZK_HOST_PORT localhost:2181   - - baseSleepTimeMs The period of Zookeeper client between two retries (in milliseconds). SW_CLUSTER_ZK_SLEEP_TIME 1000   - - maxRetries The maximum retry time. SW_CLUSTER_ZK_MAX_RETRIES 3   - - enableACL Opens ACL using schema and expression. SW_ZK_ENABLE_ACL false   - - schema Schema for the authorization. SW_ZK_SCHEMA digest   - - expression Expression for the authorization. SW_ZK_EXPRESSION skywalking:skywalking   - - internalComHost The hostname registered in Zookeeper for the internal communication of OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Zookeeper for the internal communication of OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - kubernetes namespace Namespace deployed by SkyWalking in k8s. SW_CLUSTER_K8S_NAMESPACE default   - - labelSelector Labels used for filtering OAP deployment in k8s. SW_CLUSTER_K8S_LABEL app=collector,release=skywalking   - - uidEnvName Environment variable name for reading uid. SW_CLUSTER_K8S_UID SKYWALKING_COLLECTOR_UID   - consul serviceName Service name for SkyWalking cluster. SW_SERVICE_NAME SkyWalking_OAP_Cluster   - - hostPort Hosts and ports for Consul cluster. SW_CLUSTER_CONSUL_HOST_PORT localhost:8500   - - aclToken ACL Token of Consul. Empty string means without ALC token. SW_CLUSTER_CONSUL_ACLTOKEN -   - - internalComHost The hostname registered in Consul for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Consul for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - etcd serviceName Service name for SkyWalking cluster. SW_CLUSTER_ETCD_SERVICE_NAME SkyWalking_OAP_Cluster   - - endpoints Hosts and ports for etcd cluster. SW_CLUSTER_ETCD_ENDPOINTS localhost:2379   - - namespace Namespace for SkyWalking cluster. SW_CLUSTER_ETCD_NAMESPACE /skywalking   - - authentication Indicates whether there is authentication. SW_CLUSTER_ETCD_AUTHENTICATION false   - - user Etcd auth username. SW_CLUSTER_ETCD_USER    - - password Etcd auth password. SW_CLUSTER_ETCD_PASSWORD    - - internalComHost The hostname registered in etcd for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in etcd for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - Nacos serviceName Service name for SkyWalking cluster. SW_SERVICE_NAME SkyWalking_OAP_Cluster   - - hostPort Hosts and ports for Nacos cluster. SW_CLUSTER_NACOS_HOST_PORT localhost:8848   - - namespace Namespace used by SkyWalking node coordination. SW_CLUSTER_NACOS_NAMESPACE public   - - internalComHost The hostname registered in Nacos for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Nacos for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - - username Nacos Auth username. SW_CLUSTER_NACOS_USERNAME -   - - password Nacos Auth password. SW_CLUSTER_NACOS_PASSWORD -   - - accessKey Nacos Auth accessKey. SW_CLUSTER_NACOS_ACCESSKEY -   - - secretKey Nacos Auth secretKey. SW_CLUSTER_NACOS_SECRETKEY -   - - syncPeriodHttpUriRecognitionPattern The period of HTTP URI recognition pattern synchronization (in seconds). SW_CORE_SYNC_PERIOD_HTTP_URI_RECOGNITION_PATTERN 10   - - trainingPeriodHttpUriRecognitionPattern The period of HTTP URI recognition pattern training (in seconds). SW_CORE_TRAINING_PERIOD_HTTP_URI_RECOGNITION_PATTERN 60   - - maxHttpUrisNumberPerService The maximum number of HTTP URIs per service. SW_MAX_HTTP_URIS_NUMBER_PER_SERVICE 3000   storage elasticsearch - ElasticSearch (and OpenSearch) storage implementation. - -   - - namespace Prefix of indexes created and used by SkyWalking. SW_NAMESPACE -   - - clusterNodes ElasticSearch cluster nodes for client connection. SW_STORAGE_ES_CLUSTER_NODES localhost   - - protocol HTTP or HTTPs. SW_STORAGE_ES_HTTP_PROTOCOL HTTP   - - connectTimeout Connect timeout of ElasticSearch client (in milliseconds). SW_STORAGE_ES_CONNECT_TIMEOUT 3000   - - socketTimeout Socket timeout of ElasticSearch client (in milliseconds). SW_STORAGE_ES_SOCKET_TIMEOUT 30000   - - responseTimeout Response timeout of ElasticSearch client (in milliseconds), 0 disables the timeout. SW_STORAGE_ES_RESPONSE_TIMEOUT 1500   - - numHttpClientThread The number of threads for the underlying HTTP client to perform socket I/O. If the value is \u0026lt;= 0, the number of available processors will be used. SW_STORAGE_ES_NUM_HTTP_CLIENT_THREAD 0   - - user Username of ElasticSearch cluster. SW_ES_USER -   - - password Password of ElasticSearch cluster. SW_ES_PASSWORD -   - - trustStorePath Trust JKS file path. Only works when username and password are enabled. SW_STORAGE_ES_SSL_JKS_PATH -   - - trustStorePass Trust JKS file password. Only works when username and password are enabled. SW_STORAGE_ES_SSL_JKS_PASS -   - - secretsManagementFile Secrets management file in the properties format, including username and password, which are managed by a 3rd party tool. Capable of being updated them at runtime. SW_ES_SECRETS_MANAGEMENT_FILE -   - - dayStep Represents the number of days in the one-minute/hour/day index. SW_STORAGE_DAY_STEP 1   - - indexShardsNumber Shard number of new indexes. SW_STORAGE_ES_INDEX_SHARDS_NUMBER 1   - - indexReplicasNumber Replicas number of new indexes. SW_STORAGE_ES_INDEX_REPLICAS_NUMBER 0   - - specificIndexSettings Specify the settings for each index individually. If configured, this setting has the highest priority and overrides the generic settings. SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS -   - - superDatasetDayStep Represents the number of days in the super size dataset record index. Default value is the same as dayStep when the value is less than 0. SW_STORAGE_ES_SUPER_DATASET_DAY_STEP -1   - - superDatasetIndexShardsFactor Super dataset is defined in the code (e.g. trace segments). This factor provides more shards for the super dataset: shards number = indexShardsNumber * superDatasetIndexShardsFactor. This factor also affects Zipkin and Jaeger traces. SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR 5   - - superDatasetIndexReplicasNumber Represents the replicas number in the super size dataset record index. SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER 0   - - indexTemplateOrder The order of index template. SW_STORAGE_ES_INDEX_TEMPLATE_ORDER 0   - - bulkActions Async bulk size of the record data batch execution. SW_STORAGE_ES_BULK_ACTIONS 5000   - - batchOfBytes A threshold to control the max body size of ElasticSearch Bulk flush. SW_STORAGE_ES_BATCH_OF_BYTES 10485760 (10m)   - - flushInterval Period of flush (in seconds). Does not matter whether bulkActions is reached or not. SW_STORAGE_ES_FLUSH_INTERVAL 5   - - concurrentRequests The number of concurrent requests allowed to be executed. SW_STORAGE_ES_CONCURRENT_REQUESTS 2   - - resultWindowMaxSize The maximum size of dataset when the OAP loads cache, such as network aliases. SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE 10000   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_ES_QUERY_MAX_SIZE 10000   - - scrollingBatchSize The batch size of metadata per iteration when metadataQueryMaxSize or resultWindowMaxSize is too large to be retrieved in a single query. SW_STORAGE_ES_SCROLLING_BATCH_SIZE 5000   - - segmentQueryMaxSize The maximum size of trace segments per query. SW_STORAGE_ES_QUERY_SEGMENT_SIZE 200   - - profileTaskQueryMaxSize The maximum size of profile task per query. SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE 200   - - profileDataQueryScrollBatchSize The batch size of query profiling data. SW_STORAGE_ES_QUERY_PROFILE_DATA_BATCH_SIZE 100   - - advanced All settings of ElasticSearch index creation. The value should be in JSON format. SW_STORAGE_ES_ADVANCED -   - - logicSharding Shard metrics and records indices into multi-physical indices, one index template per metric/meter aggregation function or record. SW_STORAGE_ES_LOGIC_SHARDING false   - h2 - H2 storage is designed for demonstration and running in short term (i.e. 1-2 hours) only. - -   - - url H2 connection URL. Defaults to H2 memory mode. SW_STORAGE_H2_URL jdbc:h2:mem:skywalking-oap-db   - - user Username of H2 database. SW_STORAGE_H2_USER sa   - - password Password of H2 database. - -   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_H2_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 100   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 1   - mysql - MySQL Storage. The MySQL JDBC Driver is not in the dist. Please copy it into the oap-lib folder manually. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - postgresql - PostgreSQL storage. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - banyandb - BanyanDB storage. - -   - - targets Hosts with ports of the BanyanDB. SW_STORAGE_BANYANDB_TARGETS 127.0.0.1:17912   - - maxBulkSize The maximum size of write entities in a single batch write call. SW_STORAGE_BANYANDB_MAX_BULK_SIZE 5000   - - flushInterval Period of flush interval. In the timeunit of seconds. SW_STORAGE_BANYANDB_FLUSH_INTERVAL 15   - - metricsShardsNumber Shards Number for measure/metrics. SW_STORAGE_BANYANDB_METRICS_SHARDS_NUMBER 1   - - recordShardsNumber Shards Number for a normal record. SW_STORAGE_BANYANDB_RECORD_SHARDS_NUMBER 1   - - superDatasetShardsFactor Shards Factor for a super dataset record, i.e. Shard number of a super dataset is recordShardsNumber*superDatasetShardsFactor. SW_STORAGE_BANYANDB_SUPERDATASET_SHARDS_FACTOR 2   - - concurrentWriteThreads Concurrent consumer threads for batch writing. SW_STORAGE_BANYANDB_CONCURRENT_WRITE_THREADS 15   - - profileTaskQueryMaxSize Max size of ProfileTask to be fetched. SW_STORAGE_BANYANDB_PROFILE_TASK_QUERY_MAX_SIZE 200   agent-analyzer default Agent Analyzer. SW_AGENT_ANALYZER default    - - traceSamplingPolicySettingsFile The sampling policy including sampling rate and the threshold of trace segment latency can be configured by the traceSamplingPolicySettingsFile file. SW_TRACE_SAMPLING_POLICY_SETTINGS_FILE trace-sampling-policy-settings.yml   - - slowDBAccessThreshold The slow database access threshold (in milliseconds). SW_SLOW_DB_THRESHOLD default:200,mongodb:100   - - forceSampleErrorSegment When sampling mechanism is activated, this config samples the error status segment and ignores the sampling rate. SW_FORCE_SAMPLE_ERROR_SEGMENT true   - - segmentStatusAnalysisStrategy Determines the final segment status from span status. Available values are FROM_SPAN_STATUS , FROM_ENTRY_SPAN, and FROM_FIRST_SPAN. FROM_SPAN_STATUS indicates that the segment status would be error if any span has an error status. FROM_ENTRY_SPAN means that the segment status would only be determined by the status of entry spans. FROM_FIRST_SPAN means that the segment status would only be determined by the status of the first span. SW_SEGMENT_STATUS_ANALYSIS_STRATEGY FROM_SPAN_STATUS   - - noUpstreamRealAddressAgents Exit spans with the component in the list would not generate client-side instance relation metrics, since some tracing plugins (e.g. Nginx-LUA and Envoy) can\u0026rsquo;t collect the real peer IP address. SW_NO_UPSTREAM_REAL_ADDRESS 6000,9000   - - meterAnalyzerActiveFiles Indicates which files could be instrumented and analyzed. Multiple files are split by \u0026ldquo;,\u0026rdquo;. SW_METER_ANALYZER_ACTIVE_FILES    - - slowCacheWriteThreshold The threshold of slow command which is used for writing operation (in milliseconds). SW_SLOW_CACHE_WRITE_THRESHOLD default:20,redis:10   - - slowCacheReadThreshold The threshold of slow command which is used for reading (getting) operation (in milliseconds). SW_SLOW_CACHE_READ_THRESHOLD default:20,redis:10   receiver-sharing-server default Sharing server provides new gRPC and restful servers for data collection. Ana designates that servers in the core module are to be used for internal communication only. - -    - - restHost Binding IP of RESTful services. Services include GraphQL query and HTTP data report. SW_RECEIVER_SHARING_REST_HOST -   - - restPort Binding port of RESTful services. SW_RECEIVER_SHARING_REST_PORT -   - - restContextPath Web context path of RESTful services. SW_RECEIVER_SHARING_REST_CONTEXT_PATH -   - - restMaxThreads Maximum thread number of RESTful services. SW_RECEIVER_SHARING_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_RECEIVER_SHARING_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize ServerSocketChannel backlog of RESTful services. SW_RECEIVER_SHARING_REST_QUEUE_SIZE 0   - - httpMaxRequestHeaderSize Maximum request header size accepted. SW_RECEIVER_SHARING_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - gRPCHost Binding IP of gRPC services. Services include gRPC data report and internal communication among OAP nodes. SW_RECEIVER_GRPC_HOST 0.0.0.0. Not Activated   - - gRPCPort Binding port of gRPC services. SW_RECEIVER_GRPC_PORT Not Activated   - - gRPCThreadPoolSize Pool size of gRPC server. SW_RECEIVER_GRPC_THREAD_POOL_SIZE CPU core * 4   - - gRPCThreadPoolQueueSize Queue size of gRPC server. SW_RECEIVER_GRPC_POOL_QUEUE_SIZE 10000   - - gRPCSslEnabled Activates SSL for gRPC services. SW_RECEIVER_GRPC_SSL_ENABLED false   - - gRPCSslKeyPath File path of gRPC SSL key. SW_RECEIVER_GRPC_SSL_KEY_PATH -   - - gRPCSslCertChainPath File path of gRPC SSL cert chain. SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH -   - - maxConcurrentCallsPerConnection The maximum number of concurrent calls permitted for each incoming connection. Defaults to no limit. SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL -   - - authentication The token text for authentication. Works for gRPC connection only. Once this is set, the client is required to use the same token. SW_AUTHENTICATION -   log-analyzer default Log Analyzer. SW_LOG_ANALYZER default    - - lalFiles The LAL configuration file names (without file extension) to be activated. Read LAL for more details. SW_LOG_LAL_FILES default   - - malFiles The MAL configuration file names (without file extension) to be activated. Read LAL for more details. SW_LOG_MAL_FILES \u0026quot;\u0026quot;   event-analyzer default Event Analyzer. SW_EVENT_ANALYZER default    receiver-register default gRPC and HTTPRestful services that provide service, service instance and endpoint register. - -    receiver-trace default gRPC and HTTPRestful services that accept SkyWalking format traces. - -    receiver-jvm default gRPC services that accept JVM metrics data. - -    receiver-clr default gRPC services that accept .Net CLR metrics data. - -    receiver-profile default gRPC services that accept profile task status and snapshot reporter. - -    receiver-zabbix default TCP receiver accepts Zabbix format metrics. - -    - - port Exported TCP port. Zabbix agent could connect and transport data. SW_RECEIVER_ZABBIX_PORT 10051   - - host Binds to host. SW_RECEIVER_ZABBIX_HOST 0.0.0.0   - - activeFiles Enables config when agent request is received. SW_RECEIVER_ZABBIX_ACTIVE_FILES agent   service-mesh default gRPC services that accept data from inbound mesh probes. - -    envoy-metric default Envoy metrics_service and ALS(access log service) are supported by this receiver. The OAL script supports all GAUGE type metrics. - -    - - acceptMetricsService Starts Envoy Metrics Service analysis. SW_ENVOY_METRIC_SERVICE true   - - alsHTTPAnalysis Starts Envoy HTTP Access Log Service analysis. Value = k8s-mesh means starting the analysis. SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS -   - - alsTCPAnalysis Starts Envoy TCP Access Log Service analysis. Value = k8s-mesh means starting the analysis. SW_ENVOY_METRIC_ALS_TCP_ANALYSIS -   - - k8sServiceNameRule k8sServiceNameRule allows you to customize the service name in ALS via Kubernetes metadata. The available variables are pod and service. E.g. you can use ${service.metadata.name}-${pod.metadata.labels.version} to append the version number to the service name. Note that when using environment variables to pass this configuration, use single quotes('') to avoid being evaluated by the shell. K8S_SERVICE_NAME_RULE ${pod.metadata.labels.(service.istio.io/canonical-name)}   - - istioServiceNameRule istioServiceNameRule allows you to customize the service name in ALS via Kubernetes metadata. The available variables are serviceEntry. E.g. you can use ${serviceEntry.metadata.name}-${serviceEntry.metadata.labels.version} to append the version number to the service name. Note that when using environment variables to pass this configuration, use single quotes('') to avoid being evaluated by the shell. ISTIO_SERVICE_NAME_RULE ${serviceEntry.metadata.name}   receiver-otel default A receiver for analyzing metrics data from OpenTelemetry. - -    - - enabledHandlers Enabled handlers for otel. SW_OTEL_RECEIVER_ENABLED_HANDLERS -   - - enabledOtelMetricsRules Enabled metric rules for OTLP handler. SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES -   receiver-zipkin default A receiver for Zipkin traces. - -    - - sampleRate The sample rate precision is 1/10000, should be between 0 and 10000 SW_ZIPKIN_SAMPLE_RATE 10000   - - searchableTracesTags Defines a set of span tag keys which are searchable. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_ZIPKIN_SEARCHABLE_TAG_KEYS http.method   - - enableHttpCollector Enable Http Collector. SW_ZIPKIN_HTTP_COLLECTOR_ENABLED true   - - restHost Binding IP of RESTful services. SW_RECEIVER_ZIPKIN_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_RECEIVER_ZIPKIN_REST_PORT 9411   - - restContextPath Web context path of RESTful services. SW_RECEIVER_ZIPKIN_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_RECEIVER_ZIPKIN_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_RECEIVER_ZIPKIN_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_RECEIVER_ZIPKIN_REST_QUEUE_SIZE 0   - - enableKafkaCollector Enable Kafka Collector. SW_ZIPKIN_KAFKA_COLLECTOR_ENABLED false   - - kafkaBootstrapServers Kafka ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG. SW_ZIPKIN_KAFKA_SERVERS localhost:9092   - - kafkaGroupId Kafka ConsumerConfig.GROUP_ID_CONFIG. SW_ZIPKIN_KAFKA_GROUP_ID zipkin   - - kafkaTopic Kafka Topics. SW_ZIPKIN_KAFKA_TOPIC zipkin   - - kafkaConsumerConfig Kafka consumer config, JSON format as Properties. If it contains the same key with above, would override. SW_ZIPKIN_KAFKA_CONSUMER_CONFIG \u0026ldquo;{\u0026quot;auto.offset.reset\u0026quot;:\u0026quot;earliest\u0026quot;,\u0026quot;enable.auto.commit\u0026quot;:true}\u0026rdquo;   - - kafkaConsumers The number of consumers to create. SW_ZIPKIN_KAFKA_CONSUMERS 1   - - kafkaHandlerThreadPoolSize Pool size of Kafka message handler executor. SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_SIZE CPU core * 2   - - kafkaHandlerThreadPoolQueueSize Queue size of Kafka message handler executor. SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE 10000   kafka-fetcher default Read SkyWalking\u0026rsquo;s native metrics/logs/traces through Kafka server. - -    - - bootstrapServers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_KAFKA_FETCHER_SERVERS localhost:9092   - - namespace Namespace aims to isolate multi OAP cluster when using the same Kafka cluster. If you set a namespace for Kafka fetcher, OAP will add a prefix to topic name. You should also set namespace in agent.config. The property is named plugin.kafka.namespace. SW_NAMESPACE -   - - groupId A unique string that identifies the consumer group to which this consumer belongs. - skywalking-consumer   - - createTopicIfNotExist If true, this creates Kafka topic (if it does not already exist). - true   - - partitions The number of partitions for the topic being created. SW_KAFKA_FETCHER_PARTITIONS 3   - - consumers The number of consumers to create. SW_KAFKA_FETCHER_CONSUMERS 1   - - enableNativeProtoLog Enables fetching and handling native proto log data. SW_KAFKA_FETCHER_ENABLE_NATIVE_PROTO_LOG true   - - enableNativeJsonLog Enables fetching and handling native json log data. SW_KAFKA_FETCHER_ENABLE_NATIVE_JSON_LOG true   - - replicationFactor The replication factor for each partition in the topic being created. SW_KAFKA_FETCHER_PARTITIONS_FACTOR 2   - - kafkaHandlerThreadPoolSize Pool size of Kafka message handler executor. SW_KAFKA_HANDLER_THREAD_POOL_SIZE CPU core * 2   - - kafkaHandlerThreadPoolQueueSize Queue size of Kafka message handler executor. SW_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE 10000   - - topicNameOfMeters Kafka topic name for meter system data. - skywalking-meters   - - topicNameOfMetrics Kafka topic name for JVM metrics data. - skywalking-metrics   - - topicNameOfProfiling Kafka topic name for profiling data. - skywalking-profilings   - - topicNameOfTracingSegments Kafka topic name for tracing data. - skywalking-segments   - - topicNameOfManagements Kafka topic name for service instance reporting and registration. - skywalking-managements   - - topicNameOfLogs Kafka topic name for native proto log data. - skywalking-logs   - - topicNameOfJsonLogs Kafka topic name for native json log data. - skywalking-logs-json   receiver-browser default gRPC services that accept browser performance data and error log. - - -   - - sampleRate Sampling rate for receiving trace. Precise to 1/10000. 10000 means sampling rate of 100% by default. SW_RECEIVER_BROWSER_SAMPLE_RATE 10000   query graphql - GraphQL query implementation. -    - - enableLogTestTool Enable the log testing API to test the LAL. NOTE: This API evaluates untrusted code on the OAP server. A malicious script can do significant damage (steal keys and secrets, remove files and directories, install malware, etc). As such, please enable this API only when you completely trust your users. SW_QUERY_GRAPHQL_ENABLE_LOG_TEST_TOOL false   - - maxQueryComplexity Maximum complexity allowed for the GraphQL query that can be used to abort a query if the total number of data fields queried exceeds the defined threshold. SW_QUERY_MAX_QUERY_COMPLEXITY 3000   - - enableUpdateUITemplate Allow user add,disable and update UI template. SW_ENABLE_UPDATE_UI_TEMPLATE false   - - enableOnDemandPodLog Ondemand Pod log: fetch the Pod logs on users' demand, the logs are fetched and displayed in real time, and are not persisted in any kind. This is helpful when users want to do some experiments and monitor the logs and see what\u0026rsquo;s happing inside the service. Note: if you print secrets in the logs, they are also visible to the UI, so for the sake of security, this feature is disabled by default, please set this configuration to enable the feature manually. SW_ENABLE_ON_DEMAND_POD_LOG false   query-zipkin default - This module is for Zipkin query API and support zipkin-lens UI -    - - restHost Binding IP of RESTful services. SW_QUERY_ZIPKIN_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_QUERY_ZIPKIN_REST_PORT 9412   - - restContextPath Web context path of RESTful services. SW_QUERY_ZIPKIN_REST_CONTEXT_PATH zipkin   - - restMaxThreads Maximum thread number of RESTful services. SW_QUERY_ZIPKIN_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_QUERY_ZIPKIN_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_QUERY_ZIPKIN_REST_QUEUE_SIZE 0   - - lookback Default look back for traces and autocompleteTags, 1 day in millis SW_QUERY_ZIPKIN_LOOKBACK 86400000   - - namesMaxAge The Cache-Control max-age (seconds) for serviceNames, remoteServiceNames and spanNames SW_QUERY_ZIPKIN_NAMES_MAX_AGE 300   - - uiQueryLimit Default traces query max size SW_QUERY_ZIPKIN_UI_QUERY_LIMIT 10   - - uiDefaultLookback Default look back on the UI for search traces, 15 minutes in millis SW_QUERY_ZIPKIN_UI_DEFAULT_LOOKBACK 900000   promql default - This module is for PromQL API. -    - - restHost Binding IP of RESTful services. SW_PROMQL_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_PROMQL_REST_PORT 9090   - - restContextPath Web context path of RESTful services. SW_PROMQL_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_PROMQL_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_PROMQL_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_PROMQL_REST_QUEUE_SIZE 0   alarm default - Read alarm doc for more details. -    telemetry - - Read telemetry doc for more details. -    - none - No op implementation. -    - prometheus host Binding host for Prometheus server fetching data. SW_TELEMETRY_PROMETHEUS_HOST 0.0.0.0   - - port Binding port for Prometheus server fetching data. SW_TELEMETRY_PROMETHEUS_PORT 1234   configuration - - Read dynamic configuration doc for more details. -    - grpc host DCS server binding hostname. SW_DCS_SERVER_HOST -   - - port DCS server binding port. SW_DCS_SERVER_PORT 80   - - clusterName Cluster name when reading the latest configuration from DSC server. SW_DCS_CLUSTER_NAME SkyWalking   - - period The period of reading data from DSC server by the OAP (in seconds). SW_DCS_PERIOD 20   - apollo apolloMeta apollo.meta in Apollo. SW_CONFIG_APOLLO http://localhost:8080   - - apolloCluster apollo.cluster in Apollo. SW_CONFIG_APOLLO_CLUSTER default   - - apolloEnv env in Apollo. SW_CONFIG_APOLLO_ENV -   - - appId app.id in Apollo. SW_CONFIG_APOLLO_APP_ID skywalking   - zookeeper namespace The namespace (represented by root path) that isolates the configurations in the Zookeeper. SW_CONFIG_ZK_NAMESPACE /, root path   - - hostPort Hosts and ports of Zookeeper Cluster. SW_CONFIG_ZK_HOST_PORT localhost:2181   - - baseSleepTimeMs The period of Zookeeper client between two retries (in milliseconds). SW_CONFIG_ZK_BASE_SLEEP_TIME_MS 1000   - - maxRetries The maximum retry time. SW_CONFIG_ZK_MAX_RETRIES 3   - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - etcd endpoints Hosts and ports for etcd cluster (separated by commas if multiple). SW_CONFIG_ETCD_ENDPOINTS http://localhost:2379   - - namespace Namespace for SkyWalking cluster. SW_CONFIG_ETCD_NAMESPACE /skywalking   - - authentication Indicates whether there is authentication. SW_CONFIG_ETCD_AUTHENTICATION false   - - user Etcd auth username. SW_CONFIG_ETCD_USER    - - password Etcd auth password. SW_CONFIG_ETCD_PASSWORD    - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - consul hostPort Hosts and ports for Consul cluster. SW_CONFIG_CONSUL_HOST_AND_PORTS localhost:8500   - - aclToken ACL Token of Consul. Empty string means without ACL token. SW_CONFIG_CONSUL_ACL_TOKEN -   - - period The period of data sync (in seconds). SW_CONFIG_CONSUL_PERIOD 60   - k8s-configmap namespace Deployment namespace of the config map. SW_CLUSTER_K8S_NAMESPACE default   - - labelSelector Labels for locating configmap. SW_CLUSTER_K8S_LABEL app=collector,release=skywalking   - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - nacos serverAddr Nacos Server Host. SW_CONFIG_NACOS_SERVER_ADDR 127.0.0.1   - - port Nacos Server Port. SW_CONFIG_NACOS_SERVER_PORT 8848   - - group Nacos Configuration namespace. SW_CONFIG_NACOS_SERVER_NAMESPACE -   - - period The period of data sync (in seconds). SW_CONFIG_CONFIG_NACOS_PERIOD 60   - - username Nacos Auth username. SW_CONFIG_NACOS_USERNAME -   - - password Nacos Auth password. SW_CONFIG_NACOS_PASSWORD -   - - accessKey Nacos Auth accessKey. SW_CONFIG_NACOS_ACCESSKEY -   - - secretKey Nacos Auth secretKey. SW_CONFIG_NACOS_SECRETKEY -   exporter default enableGRPCMetrics Enable gRPC metrics exporter. SW_EXPORTER_ENABLE_GRPC_METRICS false   - - gRPCTargetHost The host of target gRPC server for receiving export data SW_EXPORTER_GRPC_HOST 127.0.0.1   - - gRPCTargetPort The port of target gRPC server for receiving export data. SW_EXPORTER_GRPC_PORT 9870   - - enableKafkaTrace Enable Kafka trace exporter. SW_EXPORTER_ENABLE_KAFKA_TRACE false   - - enableKafkaLog Enable Kafka log exporter. SW_EXPORTER_ENABLE_KAFKA_LOG false   - - kafkaBootstrapServers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_EXPORTER_KAFKA_SERVERS localhost:9092   - - kafkaProducerConfig Kafka producer config, JSON format as Properties. SW_EXPORTER_KAFKA_PRODUCER_CONFIG -   - - kafkaTopicTrace Kafka topic name for trace. SW_EXPORTER_KAFKA_TOPIC_TRACE skywalking-export-trace   - - kafkaTopicLog Kafka topic name for log. SW_EXPORTER_KAFKA_TOPIC_LOG skywalking-export-log   - - exportErrorStatusTraceOnly Export error status trace segments through the Kafka channel. SW_EXPORTER_KAFKA_TRACE_FILTER_ERROR false   health-checker default checkIntervalSeconds The period of checking OAP internal health status (in seconds). SW_HEALTH_CHECKER_INTERVAL_SECONDS 5   debugging-query default       - - keywords4MaskingSecretsOfConfig Include the list of keywords to filter configurations including secrets. Separate keywords by a comma. SW_DEBUGGING_QUERY_KEYWORDS_FOR_MASKING_SECRETS user,password,token,accessKey,secretKey,authentication   configuration-discovery default disableMessageDigest If true, agent receives the latest configuration every time, even without making any changes. By default, OAP uses the SHA512 message digest mechanism to detect changes in configuration. SW_DISABLE_MESSAGE_DIGEST false   receiver-event default gRPC services that handle events data. - -    aws-firehose-receiver default host Binding IP of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_HOST 0.0.0.0   - - port Binding port of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_PORT 12801   - - contextPath Context path of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_CONTEXT_PATH /   - - maxThreads Max Thtread number of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_MAX_THREADS 200   - - idleTimeOut Idle timeout of a connection for keep-alive. SW_RECEIVER_AWS_FIREHOSE_HTTP_IDLE_TIME_OUT 30000   - - acceptQueueSize Maximum allowed number of open connections SW_RECEIVER_AWS_FIREHOSE_HTTP_ACCEPT_QUEUE_SIZE 0   - - maxRequestHeaderSize Maximum length of all headers in an HTTP/1 response SW_RECEIVER_AWS_FIREHOSE_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - firehoseAccessKey The AccessKey of AWS firhose SW_RECEIVER_AWS_FIREHOSE_ACCESS_KEY    - - enableTLS Indicate if enable HTTPS for the server SW_RECEIVER_AWS_FIREHOSE_HTTP_ENABLE_TLS false   - - tlsKeyPath TLS key path SW_RECEIVER_AWS_FIREHOSE_HTTP_TLS_KEY_PATH    - - tlsCertChainPath TLS certificate chain path SW_RECEIVER_AWS_FIREHOSE_HTTP_TLS_CERT_CHAIN_PATH    ai-pipeline default       - - uriRecognitionServerAddr The address of the URI recognition server. SW_AI_PIPELINE_URI_RECOGNITION_SERVER_ADDR -   - - uriRecognitionServerPort The port of the URI recognition server. SW_AI_PIPELINE_URI_RECOGNITION_SERVER_PORT 17128    Note ¹ System Environment Variable name could be declared and changed in application.yml. The names listed here are simply provided in the default application.yml file.\n","title":"Configuration Vocabulary","url":"/docs/main/latest/en/setup/backend/configuration-vocabulary/"},{"content":"Configuration Vocabulary The Configuration Vocabulary lists all available configurations provided by application.yml.\n   Module Provider Settings Value(s) and Explanation System Environment Variable¹ Default     core default role Option values: Mixed/Receiver/Aggregator. Receiver mode OAP opens the service to the agents, then analyzes and aggregates the results, and forwards the results for distributed aggregation. Aggregator mode OAP receives data from Mixer and Receiver role OAP nodes, and performs 2nd level aggregation. Mixer means both Receiver and Aggregator. SW_CORE_ROLE Mixed   - - restHost Binding IP of RESTful services. Services include GraphQL query and HTTP data report. SW_CORE_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_CORE_REST_PORT 12800   - - restContextPath Web context path of RESTful services. SW_CORE_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_CORE_REST_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_CORE_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize ServerSocketChannel Backlog of RESTful services. SW_CORE_REST_QUEUE_SIZE 0   - - httpMaxRequestHeaderSize Maximum request header size accepted. SW_CORE_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - gRPCHost Binding IP of gRPC services, including gRPC data report and internal communication among OAP nodes. SW_CORE_GRPC_HOST 0.0.0.0   - - gRPCPort Binding port of gRPC services. SW_CORE_GRPC_PORT 11800   - - gRPCSslEnabled Activates SSL for gRPC services. SW_CORE_GRPC_SSL_ENABLED false   - - gRPCSslKeyPath File path of gRPC SSL key. SW_CORE_GRPC_SSL_KEY_PATH -   - - gRPCSslCertChainPath File path of gRPC SSL cert chain. SW_CORE_GRPC_SSL_CERT_CHAIN_PATH -   - - gRPCSslTrustedCAPath File path of gRPC trusted CA. SW_CORE_GRPC_SSL_TRUSTED_CA_PATH -   - - downsampling Activated level of down sampling aggregation.  Hour,Day   - - enableDataKeeperExecutor Controller of TTL scheduler. Once disabled, TTL wouldn\u0026rsquo;t work. SW_CORE_ENABLE_DATA_KEEPER_EXECUTOR true   - - dataKeeperExecutePeriod Execution period of TTL scheduler (in minutes). Execution doesn\u0026rsquo;t mean deleting data. The storage provider (e.g. ElasticSearch storage) could override this. SW_CORE_DATA_KEEPER_EXECUTE_PERIOD 5   - - recordDataTTL The lifecycle of record data (in days). Record data includes traces, top N sample records, and logs. Minimum value is 2. SW_CORE_RECORD_DATA_TTL 3   - - metricsDataTTL The lifecycle of metrics data (in days), including metadata. We recommend setting metricsDataTTL \u0026gt;= recordDataTTL. Minimum value is 2. SW_CORE_METRICS_DATA_TTL 7   - - l1FlushPeriod The period of L1 aggregation flush to L2 aggregation (in milliseconds). SW_CORE_L1_AGGREGATION_FLUSH_PERIOD 500   - - storageSessionTimeout The threshold of session time (in milliseconds). Default value is 70000. SW_CORE_STORAGE_SESSION_TIMEOUT 70000   - - persistentPeriod The period of doing data persistence. Unit is second.Default value is 25s SW_CORE_PERSISTENT_PERIOD 25   - - topNReportPeriod The execution period (in minutes) of top N sampler, which saves sampled data into the storage. SW_CORE_TOPN_REPORT_PERIOD 10   - - activeExtraModelColumns Appends entity names (e.g. service names) into metrics storage entities. SW_CORE_ACTIVE_EXTRA_MODEL_COLUMNS false   - - serviceNameMaxLength Maximum length limit of service names. SW_SERVICE_NAME_MAX_LENGTH 70   - - instanceNameMaxLength Maximum length limit of service instance names. The maximum length of service + instance names should be less than 200. SW_INSTANCE_NAME_MAX_LENGTH 70   - - endpointNameMaxLength Maximum length limit of endpoint names. The maximum length of service + endpoint names should be less than 240. SW_ENDPOINT_NAME_MAX_LENGTH 150   - - searchableTracesTags Defines a set of span tag keys which are searchable through GraphQL. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_SEARCHABLE_TAG_KEYS http.method,http.status_code,rpc.status_code,db.type,db.instance,mq.queue,mq.topic,mq.broker   - - searchableLogsTags Defines a set of log tag keys which are searchable through GraphQL. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_SEARCHABLE_LOGS_TAG_KEYS level   - - searchableAlarmTags Defines a set of alarm tag keys which are searchable through GraphQL. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_SEARCHABLE_ALARM_TAG_KEYS level   - - autocompleteTagKeysQueryMaxSize The max size of tags keys for autocomplete select. SW_AUTOCOMPLETE_TAG_KEYS_QUERY_MAX_SIZE 100   - - autocompleteTagValuesQueryMaxSize The max size of tags values for autocomplete select. SW_AUTOCOMPLETE_TAG_VALUES_QUERY_MAX_SIZE 100   - - gRPCThreadPoolSize Pool size of gRPC server. SW_CORE_GRPC_THREAD_POOL_SIZE Default to gRPC\u0026rsquo;s implementation, which is a cached thread pool that can grow infinitely.   - - maxConcurrentCallsPerConnection The maximum number of concurrent calls permitted for each incoming connection. Defaults to no limit. SW_CORE_GRPC_MAX_CONCURRENT_CALL -   - - maxMessageSize Sets the maximum message size allowed to be received on the server. Empty means 4 MiB. SW_CORE_GRPC_MAX_MESSAGE_SIZE 4M(based on Netty)   - - remoteTimeout Timeout for cluster internal communication (in seconds). - 20   - - maxSizeOfNetworkAddressAlias The maximum size of network address detected in the system being monitored. - 1_000_000   - - maxPageSizeOfQueryProfileSnapshot The maximum size for snapshot analysis in an OAP query. - 500   - - maxSizeOfAnalyzeProfileSnapshot The maximum number of snapshots analyzed by the OAP. - 12000   - - prepareThreads The number of threads used to prepare metrics data to the storage. SW_CORE_PREPARE_THREADS 2   - - enableEndpointNameGroupingByOpenapi Automatically groups endpoints by the given OpenAPI definitions. SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI true   - - maxDurationOfQueryEBPFProfilingData The maximum duration(in second) of query the eBPF profiling data from database. - 30   - - maxThreadCountOfQueryEBPFProfilingData The maximum thread count of query the eBPF profiling data from database. - System CPU core size   - - uiMenuRefreshInterval The period(in seconds) of refreshing the status of all UI menu items. - 20   - - serviceCacheRefreshInterval The period(in seconds) of refreshing the service cache. SW_SERVICE_CACHE_REFRESH_INTERVAL 10   - - enableHierarchy If disable the hierarchy, the service and instance hierarchy relation will not be built. And the query of hierarchy will return empty result. All the hierarchy relations are defined in the hierarchy-definition.yml. Notice: some of the configurations only available for kubernetes environments. SW_CORE_ENABLE_HIERARCHY true   cluster standalone - Standalone is not suitable for running on a single node running. No configuration available. - -   - zookeeper namespace The namespace, represented by root path, isolates the configurations in Zookeeper. SW_NAMESPACE /, root path   - - hostPort Hosts and ports of Zookeeper Cluster. SW_CLUSTER_ZK_HOST_PORT localhost:2181   - - baseSleepTimeMs The period of Zookeeper client between two retries (in milliseconds). SW_CLUSTER_ZK_SLEEP_TIME 1000   - - maxRetries The maximum retry time. SW_CLUSTER_ZK_MAX_RETRIES 3   - - enableACL Opens ACL using schema and expression. SW_ZK_ENABLE_ACL false   - - schema Schema for the authorization. SW_ZK_SCHEMA digest   - - expression Expression for the authorization. SW_ZK_EXPRESSION skywalking:skywalking   - - internalComHost The hostname registered in Zookeeper for the internal communication of OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Zookeeper for the internal communication of OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - kubernetes namespace Namespace deployed by SkyWalking in k8s. SW_CLUSTER_K8S_NAMESPACE default   - - labelSelector Labels used for filtering OAP deployment in k8s. SW_CLUSTER_K8S_LABEL app=collector,release=skywalking   - - uidEnvName Environment variable name for reading uid. SW_CLUSTER_K8S_UID SKYWALKING_COLLECTOR_UID   - consul serviceName Service name for SkyWalking cluster. SW_SERVICE_NAME SkyWalking_OAP_Cluster   - - hostPort Hosts and ports for Consul cluster. SW_CLUSTER_CONSUL_HOST_PORT localhost:8500   - - aclToken ACL Token of Consul. Empty string means without ALC token. SW_CLUSTER_CONSUL_ACLTOKEN -   - - internalComHost The hostname registered in Consul for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Consul for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - etcd serviceName Service name for SkyWalking cluster. SW_CLUSTER_ETCD_SERVICE_NAME SkyWalking_OAP_Cluster   - - endpoints Hosts and ports for etcd cluster. SW_CLUSTER_ETCD_ENDPOINTS localhost:2379   - - namespace Namespace for SkyWalking cluster. SW_CLUSTER_ETCD_NAMESPACE /skywalking   - - authentication Indicates whether there is authentication. SW_CLUSTER_ETCD_AUTHENTICATION false   - - user Etcd auth username. SW_CLUSTER_ETCD_USER    - - password Etcd auth password. SW_CLUSTER_ETCD_PASSWORD    - - internalComHost The hostname registered in etcd for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in etcd for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - Nacos serviceName Service name for SkyWalking cluster. SW_SERVICE_NAME SkyWalking_OAP_Cluster   - - hostPort Hosts and ports for Nacos cluster. SW_CLUSTER_NACOS_HOST_PORT localhost:8848   - - namespace Namespace used by SkyWalking node coordination. SW_CLUSTER_NACOS_NAMESPACE public   - - internalComHost The hostname registered in Nacos for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Nacos for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - - username Nacos Auth username. SW_CLUSTER_NACOS_USERNAME -   - - password Nacos Auth password. SW_CLUSTER_NACOS_PASSWORD -   - - accessKey Nacos Auth accessKey. SW_CLUSTER_NACOS_ACCESSKEY -   - - secretKey Nacos Auth secretKey. SW_CLUSTER_NACOS_SECRETKEY -   - - syncPeriodHttpUriRecognitionPattern The period of HTTP URI recognition pattern synchronization (in seconds). SW_CORE_SYNC_PERIOD_HTTP_URI_RECOGNITION_PATTERN 10   - - trainingPeriodHttpUriRecognitionPattern The period of HTTP URI recognition pattern training (in seconds). SW_CORE_TRAINING_PERIOD_HTTP_URI_RECOGNITION_PATTERN 60   - - maxHttpUrisNumberPerService The maximum number of HTTP URIs per service. SW_MAX_HTTP_URIS_NUMBER_PER_SERVICE 3000   storage elasticsearch - ElasticSearch (and OpenSearch) storage implementation. - -   - - namespace Prefix of indexes created and used by SkyWalking. SW_NAMESPACE -   - - clusterNodes ElasticSearch cluster nodes for client connection. SW_STORAGE_ES_CLUSTER_NODES localhost   - - protocol HTTP or HTTPs. SW_STORAGE_ES_HTTP_PROTOCOL HTTP   - - connectTimeout Connect timeout of ElasticSearch client (in milliseconds). SW_STORAGE_ES_CONNECT_TIMEOUT 3000   - - socketTimeout Socket timeout of ElasticSearch client (in milliseconds). SW_STORAGE_ES_SOCKET_TIMEOUT 30000   - - responseTimeout Response timeout of ElasticSearch client (in milliseconds), 0 disables the timeout. SW_STORAGE_ES_RESPONSE_TIMEOUT 1500   - - numHttpClientThread The number of threads for the underlying HTTP client to perform socket I/O. If the value is \u0026lt;= 0, the number of available processors will be used. SW_STORAGE_ES_NUM_HTTP_CLIENT_THREAD 0   - - user Username of ElasticSearch cluster. SW_ES_USER -   - - password Password of ElasticSearch cluster. SW_ES_PASSWORD -   - - trustStorePath Trust JKS file path. Only works when username and password are enabled. SW_STORAGE_ES_SSL_JKS_PATH -   - - trustStorePass Trust JKS file password. Only works when username and password are enabled. SW_STORAGE_ES_SSL_JKS_PASS -   - - secretsManagementFile Secrets management file in the properties format, including username and password, which are managed by a 3rd party tool. Capable of being updated them at runtime. SW_ES_SECRETS_MANAGEMENT_FILE -   - - dayStep Represents the number of days in the one-minute/hour/day index. SW_STORAGE_DAY_STEP 1   - - indexShardsNumber Shard number of new indexes. SW_STORAGE_ES_INDEX_SHARDS_NUMBER 1   - - indexReplicasNumber Replicas number of new indexes. SW_STORAGE_ES_INDEX_REPLICAS_NUMBER 0   - - specificIndexSettings Specify the settings for each index individually. If configured, this setting has the highest priority and overrides the generic settings. SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS -   - - superDatasetDayStep Represents the number of days in the super size dataset record index. Default value is the same as dayStep when the value is less than 0. SW_STORAGE_ES_SUPER_DATASET_DAY_STEP -1   - - superDatasetIndexShardsFactor Super dataset is defined in the code (e.g. trace segments). This factor provides more shards for the super dataset: shards number = indexShardsNumber * superDatasetIndexShardsFactor. This factor also affects Zipkin and Jaeger traces. SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR 5   - - superDatasetIndexReplicasNumber Represents the replicas number in the super size dataset record index. SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER 0   - - indexTemplateOrder The order of index template. SW_STORAGE_ES_INDEX_TEMPLATE_ORDER 0   - - bulkActions Async bulk size of the record data batch execution. SW_STORAGE_ES_BULK_ACTIONS 5000   - - batchOfBytes A threshold to control the max body size of ElasticSearch Bulk flush. SW_STORAGE_ES_BATCH_OF_BYTES 10485760 (10m)   - - flushInterval Period of flush (in seconds). Does not matter whether bulkActions is reached or not. SW_STORAGE_ES_FLUSH_INTERVAL 5   - - concurrentRequests The number of concurrent requests allowed to be executed. SW_STORAGE_ES_CONCURRENT_REQUESTS 2   - - resultWindowMaxSize The maximum size of dataset when the OAP loads cache, such as network aliases. SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE 10000   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_ES_QUERY_MAX_SIZE 10000   - - scrollingBatchSize The batch size of metadata per iteration when metadataQueryMaxSize or resultWindowMaxSize is too large to be retrieved in a single query. SW_STORAGE_ES_SCROLLING_BATCH_SIZE 5000   - - segmentQueryMaxSize The maximum size of trace segments per query. SW_STORAGE_ES_QUERY_SEGMENT_SIZE 200   - - profileTaskQueryMaxSize The maximum size of profile task per query. SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE 200   - - profileDataQueryScrollBatchSize The batch size of query profiling data. SW_STORAGE_ES_QUERY_PROFILE_DATA_BATCH_SIZE 100   - - advanced All settings of ElasticSearch index creation. The value should be in JSON format. SW_STORAGE_ES_ADVANCED -   - - logicSharding Shard metrics and records indices into multi-physical indices, one index template per metric/meter aggregation function or record. SW_STORAGE_ES_LOGIC_SHARDING false   - h2 - H2 storage is designed for demonstration and running in short term (i.e. 1-2 hours) only. - -   - - url H2 connection URL. Defaults to H2 memory mode. SW_STORAGE_H2_URL jdbc:h2:mem:skywalking-oap-db   - - user Username of H2 database. SW_STORAGE_H2_USER sa   - - password Password of H2 database. - -   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_H2_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 100   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 1   - mysql - MySQL Storage. The MySQL JDBC Driver is not in the dist. Please copy it into the oap-lib folder manually. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - postgresql - PostgreSQL storage. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - banyandb - BanyanDB storage. - -   - - targets Hosts with ports of the BanyanDB. SW_STORAGE_BANYANDB_TARGETS 127.0.0.1:17912   - - maxBulkSize The maximum size of write entities in a single batch write call. SW_STORAGE_BANYANDB_MAX_BULK_SIZE 5000   - - flushInterval Period of flush interval. In the timeunit of seconds. SW_STORAGE_BANYANDB_FLUSH_INTERVAL 15   - - metricsShardsNumber Shards Number for measure/metrics. SW_STORAGE_BANYANDB_METRICS_SHARDS_NUMBER 1   - - recordShardsNumber Shards Number for a normal record. SW_STORAGE_BANYANDB_RECORD_SHARDS_NUMBER 1   - - superDatasetShardsFactor Shards Factor for a super dataset record, i.e. Shard number of a super dataset is recordShardsNumber*superDatasetShardsFactor. SW_STORAGE_BANYANDB_SUPERDATASET_SHARDS_FACTOR 2   - - concurrentWriteThreads Concurrent consumer threads for batch writing. SW_STORAGE_BANYANDB_CONCURRENT_WRITE_THREADS 15   - - profileTaskQueryMaxSize Max size of ProfileTask to be fetched. SW_STORAGE_BANYANDB_PROFILE_TASK_QUERY_MAX_SIZE 200   agent-analyzer default Agent Analyzer. SW_AGENT_ANALYZER default    - - traceSamplingPolicySettingsFile The sampling policy including sampling rate and the threshold of trace segment latency can be configured by the traceSamplingPolicySettingsFile file. SW_TRACE_SAMPLING_POLICY_SETTINGS_FILE trace-sampling-policy-settings.yml   - - slowDBAccessThreshold The slow database access threshold (in milliseconds). SW_SLOW_DB_THRESHOLD default:200,mongodb:100   - - forceSampleErrorSegment When sampling mechanism is activated, this config samples the error status segment and ignores the sampling rate. SW_FORCE_SAMPLE_ERROR_SEGMENT true   - - segmentStatusAnalysisStrategy Determines the final segment status from span status. Available values are FROM_SPAN_STATUS , FROM_ENTRY_SPAN, and FROM_FIRST_SPAN. FROM_SPAN_STATUS indicates that the segment status would be error if any span has an error status. FROM_ENTRY_SPAN means that the segment status would only be determined by the status of entry spans. FROM_FIRST_SPAN means that the segment status would only be determined by the status of the first span. SW_SEGMENT_STATUS_ANALYSIS_STRATEGY FROM_SPAN_STATUS   - - noUpstreamRealAddressAgents Exit spans with the component in the list would not generate client-side instance relation metrics, since some tracing plugins (e.g. Nginx-LUA and Envoy) can\u0026rsquo;t collect the real peer IP address. SW_NO_UPSTREAM_REAL_ADDRESS 6000,9000   - - meterAnalyzerActiveFiles Indicates which files could be instrumented and analyzed. Multiple files are split by \u0026ldquo;,\u0026rdquo;. SW_METER_ANALYZER_ACTIVE_FILES    - - slowCacheWriteThreshold The threshold of slow command which is used for writing operation (in milliseconds). SW_SLOW_CACHE_WRITE_THRESHOLD default:20,redis:10   - - slowCacheReadThreshold The threshold of slow command which is used for reading (getting) operation (in milliseconds). SW_SLOW_CACHE_READ_THRESHOLD default:20,redis:10   receiver-sharing-server default Sharing server provides new gRPC and restful servers for data collection. Ana designates that servers in the core module are to be used for internal communication only. - -    - - restHost Binding IP of RESTful services. Services include GraphQL query and HTTP data report. SW_RECEIVER_SHARING_REST_HOST -   - - restPort Binding port of RESTful services. SW_RECEIVER_SHARING_REST_PORT -   - - restContextPath Web context path of RESTful services. SW_RECEIVER_SHARING_REST_CONTEXT_PATH -   - - restMaxThreads Maximum thread number of RESTful services. SW_RECEIVER_SHARING_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_RECEIVER_SHARING_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize ServerSocketChannel backlog of RESTful services. SW_RECEIVER_SHARING_REST_QUEUE_SIZE 0   - - httpMaxRequestHeaderSize Maximum request header size accepted. SW_RECEIVER_SHARING_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - gRPCHost Binding IP of gRPC services. Services include gRPC data report and internal communication among OAP nodes. SW_RECEIVER_GRPC_HOST 0.0.0.0. Not Activated   - - gRPCPort Binding port of gRPC services. SW_RECEIVER_GRPC_PORT Not Activated   - - gRPCThreadPoolSize Pool size of gRPC server. SW_RECEIVER_GRPC_THREAD_POOL_SIZE Default to gRPC\u0026rsquo;s implementation, which is a cached thread pool that can grow infinitely.   - - gRPCSslEnabled Activates SSL for gRPC services. SW_RECEIVER_GRPC_SSL_ENABLED false   - - gRPCSslKeyPath File path of gRPC SSL key. SW_RECEIVER_GRPC_SSL_KEY_PATH -   - - gRPCSslCertChainPath File path of gRPC SSL cert chain. SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH -   - - maxConcurrentCallsPerConnection The maximum number of concurrent calls permitted for each incoming connection. Defaults to no limit. SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL -   - - authentication The token text for authentication. Works for gRPC connection only. Once this is set, the client is required to use the same token. SW_AUTHENTICATION -   log-analyzer default Log Analyzer. SW_LOG_ANALYZER default    - - lalFiles The LAL configuration file names (without file extension) to be activated. Read LAL for more details. SW_LOG_LAL_FILES default   - - malFiles The MAL configuration file names (without file extension) to be activated. Read LAL for more details. SW_LOG_MAL_FILES \u0026quot;\u0026quot;   event-analyzer default Event Analyzer. SW_EVENT_ANALYZER default    receiver-register default gRPC and HTTPRestful services that provide service, service instance and endpoint register. - -    receiver-trace default gRPC and HTTPRestful services that accept SkyWalking format traces. - -    receiver-jvm default gRPC services that accept JVM metrics data. - -    receiver-clr default gRPC services that accept .Net CLR metrics data. - -    receiver-profile default gRPC services that accept profile task status and snapshot reporter. - -    receiver-zabbix default TCP receiver accepts Zabbix format metrics. - -    - - port Exported TCP port. Zabbix agent could connect and transport data. SW_RECEIVER_ZABBIX_PORT 10051   - - host Binds to host. SW_RECEIVER_ZABBIX_HOST 0.0.0.0   - - activeFiles Enables config when agent request is received. SW_RECEIVER_ZABBIX_ACTIVE_FILES agent   service-mesh default gRPC services that accept data from inbound mesh probes. - -    envoy-metric default Envoy metrics_service and ALS(access log service) are supported by this receiver. The OAL script supports all GAUGE type metrics. - -    - - acceptMetricsService Starts Envoy Metrics Service analysis. SW_ENVOY_METRIC_SERVICE true   - - alsHTTPAnalysis Starts Envoy HTTP Access Log Service analysis. Value = k8s-mesh means starting the analysis. SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS -   - - alsTCPAnalysis Starts Envoy TCP Access Log Service analysis. Value = k8s-mesh means starting the analysis. SW_ENVOY_METRIC_ALS_TCP_ANALYSIS -   - - k8sServiceNameRule k8sServiceNameRule allows you to customize the service name in ALS via Kubernetes metadata. The available variables are pod and service. E.g. you can use ${service.metadata.name}-${pod.metadata.labels.version} to append the version number to the service name. Note that when using environment variables to pass this configuration, use single quotes('') to avoid being evaluated by the shell. K8S_SERVICE_NAME_RULE ${pod.metadata.labels.(service.istio.io/canonical-name)}.${pod.metadata.namespace}   - - istioServiceNameRule istioServiceNameRule allows you to customize the service name in ALS via Kubernetes metadata. The available variables are serviceEntry. E.g. you can use ${serviceEntry.metadata.name}-${serviceEntry.metadata.labels.version} to append the version number to the service name. Note that when using environment variables to pass this configuration, use single quotes('') to avoid being evaluated by the shell. ISTIO_SERVICE_NAME_RULE ${serviceEntry.metadata.name}.${serviceEntry.metadata.namespace}   - - istioServiceEntryIgnoredNamespaces When looking up service informations from the Istio ServiceEntries, some of the ServiceEntries might be created in several namespaces automatically by some components, and OAP will randomly pick one of them to build the service name, users can use this config to exclude ServiceEntries that they don\u0026rsquo;t want to be used. Comma separated. SW_ISTIO_SERVICE_ENTRY_IGNORED_NAMESPACES -   - - gRPCHost Binding IP of gRPC service for Envoy access log service. SW_ALS_GRPC_HOST 0.0.0.0. Not Activated   - - gRPCPort Binding port of gRPC service for Envoy access log service. SW_ALS_GRPC_PORT Not Activated   - - gRPCThreadPoolSize Pool size of gRPC server. SW_ALS_GRPC_THREAD_POOL_SIZE Default to gRPC\u0026rsquo;s implementation, which is a cached thread pool that can grow infinitely.   - - gRPCSslEnabled Activates SSL for gRPC services. SW_ALS_GRPC_SSL_ENABLED false   - - gRPCSslKeyPath File path of gRPC SSL key. SW_ALS_GRPC_SSL_KEY_PATH -   - - gRPCSslCertChainPath File path of gRPC SSL cert chain. SW_ALS_GRPC_SSL_CERT_CHAIN_PATH -   - - maxConcurrentCallsPerConnection The maximum number of concurrent calls permitted for each incoming connection. Defaults to no limit. SW_ALS_GRPC_MAX_CONCURRENT_CALL -   - - maxMessageSize Sets the maximum message size allowed to be received on the server. Empty means 4 MiB. SW_ALS_GRPC_MAX_MESSAGE_SIZE 4M(based on Netty)   receiver-otel default A receiver for analyzing metrics data from OpenTelemetry. - -    - - enabledHandlers Enabled handlers for otel. SW_OTEL_RECEIVER_ENABLED_HANDLERS -   - - enabledOtelMetricsRules Enabled metric rules for OTLP handler. SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES -   receiver-zipkin default A receiver for Zipkin traces. - -    - - sampleRate The sample rate precision is 1/10000, should be between 0 and 10000 SW_ZIPKIN_SAMPLE_RATE 10000   - - searchableTracesTags Defines a set of span tag keys which are searchable. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_ZIPKIN_SEARCHABLE_TAG_KEYS http.method   - - enableHttpCollector Enable Http Collector. SW_ZIPKIN_HTTP_COLLECTOR_ENABLED true   - - restHost Binding IP of RESTful services. SW_RECEIVER_ZIPKIN_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_RECEIVER_ZIPKIN_REST_PORT 9411   - - restContextPath Web context path of RESTful services. SW_RECEIVER_ZIPKIN_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_RECEIVER_ZIPKIN_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_RECEIVER_ZIPKIN_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_RECEIVER_ZIPKIN_REST_QUEUE_SIZE 0   - - enableKafkaCollector Enable Kafka Collector. SW_ZIPKIN_KAFKA_COLLECTOR_ENABLED false   - - kafkaBootstrapServers Kafka ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG. SW_ZIPKIN_KAFKA_SERVERS localhost:9092   - - kafkaGroupId Kafka ConsumerConfig.GROUP_ID_CONFIG. SW_ZIPKIN_KAFKA_GROUP_ID zipkin   - - kafkaTopic Kafka Topics. SW_ZIPKIN_KAFKA_TOPIC zipkin   - - kafkaConsumerConfig Kafka consumer config, JSON format as Properties. If it contains the same key with above, would override. SW_ZIPKIN_KAFKA_CONSUMER_CONFIG \u0026ldquo;{\u0026quot;auto.offset.reset\u0026quot;:\u0026quot;earliest\u0026quot;,\u0026quot;enable.auto.commit\u0026quot;:true}\u0026rdquo;   - - kafkaConsumers The number of consumers to create. SW_ZIPKIN_KAFKA_CONSUMERS 1   - - kafkaHandlerThreadPoolSize Pool size of Kafka message handler executor. SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_SIZE CPU core * 2   - - kafkaHandlerThreadPoolQueueSize Queue size of Kafka message handler executor. SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE 10000   kafka-fetcher default Read SkyWalking\u0026rsquo;s native metrics/logs/traces through Kafka server. - -    - - bootstrapServers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_KAFKA_FETCHER_SERVERS localhost:9092   - - namespace Namespace aims to isolate multi OAP cluster when using the same Kafka cluster. If you set a namespace for Kafka fetcher, OAP will add a prefix to topic name. You should also set namespace in agent.config. The property is named plugin.kafka.namespace. SW_NAMESPACE -   - - groupId A unique string that identifies the consumer group to which this consumer belongs. - skywalking-consumer   - - partitions The number of partitions for the topic being created. SW_KAFKA_FETCHER_PARTITIONS 3   - - consumers The number of consumers to create. SW_KAFKA_FETCHER_CONSUMERS 1   - - enableNativeProtoLog Enables fetching and handling native proto log data. SW_KAFKA_FETCHER_ENABLE_NATIVE_PROTO_LOG true   - - enableNativeJsonLog Enables fetching and handling native json log data. SW_KAFKA_FETCHER_ENABLE_NATIVE_JSON_LOG true   - - replicationFactor The replication factor for each partition in the topic being created. SW_KAFKA_FETCHER_PARTITIONS_FACTOR 2   - - kafkaHandlerThreadPoolSize Pool size of Kafka message handler executor. SW_KAFKA_HANDLER_THREAD_POOL_SIZE CPU core * 2   - - kafkaHandlerThreadPoolQueueSize Queue size of Kafka message handler executor. SW_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE 10000   - - topicNameOfMeters Kafka topic name for meter system data. - skywalking-meters   - - topicNameOfMetrics Kafka topic name for JVM metrics data. - skywalking-metrics   - - topicNameOfProfiling Kafka topic name for profiling data. - skywalking-profilings   - - topicNameOfTracingSegments Kafka topic name for tracing data. - skywalking-segments   - - topicNameOfManagements Kafka topic name for service instance reporting and registration. - skywalking-managements   - - topicNameOfLogs Kafka topic name for native proto log data. - skywalking-logs   - - topicNameOfJsonLogs Kafka topic name for native json log data. - skywalking-logs-json   receiver-browser default gRPC services that accept browser performance data and error log. - - -   - - sampleRate Sampling rate for receiving trace. Precise to 1/10000. 10000 means sampling rate of 100% by default. SW_RECEIVER_BROWSER_SAMPLE_RATE 10000   query graphql - GraphQL query implementation. -    - - enableLogTestTool Enable the log testing API to test the LAL. NOTE: This API evaluates untrusted code on the OAP server. A malicious script can do significant damage (steal keys and secrets, remove files and directories, install malware, etc). As such, please enable this API only when you completely trust your users. SW_QUERY_GRAPHQL_ENABLE_LOG_TEST_TOOL false   - - maxQueryComplexity Maximum complexity allowed for the GraphQL query that can be used to abort a query if the total number of data fields queried exceeds the defined threshold. SW_QUERY_MAX_QUERY_COMPLEXITY 3000   - - enableUpdateUITemplate Allow user add,disable and update UI template. SW_ENABLE_UPDATE_UI_TEMPLATE false   - - enableOnDemandPodLog Ondemand Pod log: fetch the Pod logs on users' demand, the logs are fetched and displayed in real time, and are not persisted in any kind. This is helpful when users want to do some experiments and monitor the logs and see what\u0026rsquo;s happing inside the service. Note: if you print secrets in the logs, they are also visible to the UI, so for the sake of security, this feature is disabled by default, please set this configuration to enable the feature manually. SW_ENABLE_ON_DEMAND_POD_LOG false   query-zipkin default - This module is for Zipkin query API and support zipkin-lens UI -    - - restHost Binding IP of RESTful services. SW_QUERY_ZIPKIN_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_QUERY_ZIPKIN_REST_PORT 9412   - - restContextPath Web context path of RESTful services. SW_QUERY_ZIPKIN_REST_CONTEXT_PATH zipkin   - - restMaxThreads Maximum thread number of RESTful services. SW_QUERY_ZIPKIN_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_QUERY_ZIPKIN_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_QUERY_ZIPKIN_REST_QUEUE_SIZE 0   - - lookback Default look back for traces and autocompleteTags, 1 day in millis SW_QUERY_ZIPKIN_LOOKBACK 86400000   - - namesMaxAge The Cache-Control max-age (seconds) for serviceNames, remoteServiceNames and spanNames SW_QUERY_ZIPKIN_NAMES_MAX_AGE 300   - - uiQueryLimit Default traces query max size SW_QUERY_ZIPKIN_UI_QUERY_LIMIT 10   - - uiDefaultLookback Default look back on the UI for search traces, 15 minutes in millis SW_QUERY_ZIPKIN_UI_DEFAULT_LOOKBACK 900000   promql default - This module is for PromQL API. -    - - restHost Binding IP of RESTful services. SW_PROMQL_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_PROMQL_REST_PORT 9090   - - restContextPath Web context path of RESTful services. SW_PROMQL_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_PROMQL_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_PROMQL_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_PROMQL_REST_QUEUE_SIZE 0   - - buildInfoVersion Mock version for API buildInfo SW_PROMQL_BUILD_INFO_VERSION 2.45.0   - - buildInfoRevision Mock revision for API buildInfo SW_PROMQL_BUILD_INFO_REVISION    - - buildInfoBranch Mock branch for API buildInfo SW_PROMQL_BUILD_INFO_BRANCH    - - buildInfoBuildUser Mock build user for API buildInfo SW_PROMQL_BUILD_INFO_BUILD_USER    - - buildInfoBuildDate Mock build date for API buildInfo SW_PROMQL_BUILD_INFO_BUILD_DATE    - - buildInfoGoVersion Mock go version for API buildInfo SW_PROMQL_BUILD_INFO_GO_VERSION    alarm default - Read alarm doc for more details. -    telemetry - - Read telemetry doc for more details. -    - none - No op implementation. -    - prometheus host Binding host for Prometheus server fetching data. SW_TELEMETRY_PROMETHEUS_HOST 0.0.0.0   - - port Binding port for Prometheus server fetching data. SW_TELEMETRY_PROMETHEUS_PORT 1234   configuration - - Read dynamic configuration doc for more details. -    - grpc host DCS server binding hostname. SW_DCS_SERVER_HOST -   - - port DCS server binding port. SW_DCS_SERVER_PORT 80   - - clusterName Cluster name when reading the latest configuration from DSC server. SW_DCS_CLUSTER_NAME SkyWalking   - - period The period of reading data from DSC server by the OAP (in seconds). SW_DCS_PERIOD 20   - - maxInboundMessageSize The max inbound message size of gRPC. SW_DCS_MAX_INBOUND_MESSAGE_SIZE 4194304   - apollo apolloMeta apollo.meta in Apollo. SW_CONFIG_APOLLO http://localhost:8080   - - apolloCluster apollo.cluster in Apollo. SW_CONFIG_APOLLO_CLUSTER default   - - apolloEnv env in Apollo. SW_CONFIG_APOLLO_ENV -   - - appId app.id in Apollo. SW_CONFIG_APOLLO_APP_ID skywalking   - zookeeper namespace The namespace (represented by root path) that isolates the configurations in the Zookeeper. SW_CONFIG_ZK_NAMESPACE /, root path   - - hostPort Hosts and ports of Zookeeper Cluster. SW_CONFIG_ZK_HOST_PORT localhost:2181   - - baseSleepTimeMs The period of Zookeeper client between two retries (in milliseconds). SW_CONFIG_ZK_BASE_SLEEP_TIME_MS 1000   - - maxRetries The maximum retry time. SW_CONFIG_ZK_MAX_RETRIES 3   - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - etcd endpoints Hosts and ports for etcd cluster (separated by commas if multiple). SW_CONFIG_ETCD_ENDPOINTS http://localhost:2379   - - namespace Namespace for SkyWalking cluster. SW_CONFIG_ETCD_NAMESPACE /skywalking   - - authentication Indicates whether there is authentication. SW_CONFIG_ETCD_AUTHENTICATION false   - - user Etcd auth username. SW_CONFIG_ETCD_USER    - - password Etcd auth password. SW_CONFIG_ETCD_PASSWORD    - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - consul hostPort Hosts and ports for Consul cluster. SW_CONFIG_CONSUL_HOST_AND_PORTS localhost:8500   - - aclToken ACL Token of Consul. Empty string means without ACL token. SW_CONFIG_CONSUL_ACL_TOKEN -   - - period The period of data sync (in seconds). SW_CONFIG_CONSUL_PERIOD 60   - k8s-configmap namespace Deployment namespace of the config map. SW_CLUSTER_K8S_NAMESPACE default   - - labelSelector Labels for locating configmap. SW_CLUSTER_K8S_LABEL app=collector,release=skywalking   - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - nacos serverAddr Nacos Server Host. SW_CONFIG_NACOS_SERVER_ADDR 127.0.0.1   - - port Nacos Server Port. SW_CONFIG_NACOS_SERVER_PORT 8848   - - group Nacos Configuration namespace. SW_CONFIG_NACOS_SERVER_NAMESPACE -   - - period The period of data sync (in seconds). SW_CONFIG_CONFIG_NACOS_PERIOD 60   - - username Nacos Auth username. SW_CONFIG_NACOS_USERNAME -   - - password Nacos Auth password. SW_CONFIG_NACOS_PASSWORD -   - - accessKey Nacos Auth accessKey. SW_CONFIG_NACOS_ACCESSKEY -   - - secretKey Nacos Auth secretKey. SW_CONFIG_NACOS_SECRETKEY -   exporter default enableGRPCMetrics Enable gRPC metrics exporter. SW_EXPORTER_ENABLE_GRPC_METRICS false   - - gRPCTargetHost The host of target gRPC server for receiving export data SW_EXPORTER_GRPC_HOST 127.0.0.1   - - gRPCTargetPort The port of target gRPC server for receiving export data. SW_EXPORTER_GRPC_PORT 9870   - - enableKafkaTrace Enable Kafka trace exporter. SW_EXPORTER_ENABLE_KAFKA_TRACE false   - - enableKafkaLog Enable Kafka log exporter. SW_EXPORTER_ENABLE_KAFKA_LOG false   - - kafkaBootstrapServers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_EXPORTER_KAFKA_SERVERS localhost:9092   - - kafkaProducerConfig Kafka producer config, JSON format as Properties. SW_EXPORTER_KAFKA_PRODUCER_CONFIG -   - - kafkaTopicTrace Kafka topic name for trace. SW_EXPORTER_KAFKA_TOPIC_TRACE skywalking-export-trace   - - kafkaTopicLog Kafka topic name for log. SW_EXPORTER_KAFKA_TOPIC_LOG skywalking-export-log   - - exportErrorStatusTraceOnly Export error status trace segments through the Kafka channel. SW_EXPORTER_KAFKA_TRACE_FILTER_ERROR false   health-checker default checkIntervalSeconds The period of checking OAP internal health status (in seconds). SW_HEALTH_CHECKER_INTERVAL_SECONDS 5   debugging-query default       - - keywords4MaskingSecretsOfConfig Include the list of keywords to filter configurations including secrets. Separate keywords by a comma. SW_DEBUGGING_QUERY_KEYWORDS_FOR_MASKING_SECRETS user,password,token,accessKey,secretKey,authentication   configuration-discovery default disableMessageDigest If true, agent receives the latest configuration every time, even without making any changes. By default, OAP uses the SHA512 message digest mechanism to detect changes in configuration. SW_DISABLE_MESSAGE_DIGEST false   receiver-event default gRPC services that handle events data. - -    aws-firehose-receiver default host Binding IP of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_HOST 0.0.0.0   - - port Binding port of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_PORT 12801   - - contextPath Context path of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_CONTEXT_PATH /   - - maxThreads Max Thtread number of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_MAX_THREADS 200   - - idleTimeOut Idle timeout of a connection for keep-alive. SW_RECEIVER_AWS_FIREHOSE_HTTP_IDLE_TIME_OUT 30000   - - acceptQueueSize Maximum allowed number of open connections SW_RECEIVER_AWS_FIREHOSE_HTTP_ACCEPT_QUEUE_SIZE 0   - - maxRequestHeaderSize Maximum length of all headers in an HTTP/1 response SW_RECEIVER_AWS_FIREHOSE_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - firehoseAccessKey The AccessKey of AWS firhose SW_RECEIVER_AWS_FIREHOSE_ACCESS_KEY    - - enableTLS Indicate if enable HTTPS for the server SW_RECEIVER_AWS_FIREHOSE_HTTP_ENABLE_TLS false   - - tlsKeyPath TLS key path SW_RECEIVER_AWS_FIREHOSE_HTTP_TLS_KEY_PATH    - - tlsCertChainPath TLS certificate chain path SW_RECEIVER_AWS_FIREHOSE_HTTP_TLS_CERT_CHAIN_PATH    ai-pipeline default       - - uriRecognitionServerAddr The address of the URI recognition server. SW_AI_PIPELINE_URI_RECOGNITION_SERVER_ADDR -   - - uriRecognitionServerPort The port of the URI recognition server. SW_AI_PIPELINE_URI_RECOGNITION_SERVER_PORT 17128    Note ¹ System Environment Variable name could be declared and changed in application.yml. The names listed here are simply provided in the default application.yml file.\n","title":"Configuration Vocabulary","url":"/docs/main/next/en/setup/backend/configuration-vocabulary/"},{"content":"Configuration Vocabulary The Configuration Vocabulary lists all available configurations provided by application.yml.\n   Module Provider Settings Value(s) and Explanation System Environment Variable¹ Default     core default role Option values: Mixed/Receiver/Aggregator. Receiver mode OAP opens the service to the agents, then analyzes and aggregates the results, and forwards the results for distributed aggregation. Aggregator mode OAP receives data from Mixer and Receiver role OAP nodes, and performs 2nd level aggregation. Mixer means both Receiver and Aggregator. SW_CORE_ROLE Mixed   - - restHost Binding IP of RESTful services. Services include GraphQL query and HTTP data report. SW_CORE_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_CORE_REST_PORT 12800   - - restContextPath Web context path of RESTful services. SW_CORE_REST_CONTEXT_PATH /   - - restMinThreads Minimum thread number of RESTful services. SW_CORE_REST_JETTY_MIN_THREADS 1   - - restMaxThreads Maximum thread number of RESTful services. SW_CORE_REST_JETTY_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_CORE_REST_JETTY_IDLE_TIMEOUT 30000   - - restAcceptQueueSize ServerSocketChannel Backlog of RESTful services. SW_CORE_REST_JETTY_QUEUE_SIZE 0   - - httpMaxRequestHeaderSize Maximum request header size accepted. SW_CORE_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - gRPCHost Binding IP of gRPC services, including gRPC data report and internal communication among OAP nodes. SW_CORE_GRPC_HOST 0.0.0.0   - - gRPCPort Binding port of gRPC services. SW_CORE_GRPC_PORT 11800   - - gRPCSslEnabled Activates SSL for gRPC services. SW_CORE_GRPC_SSL_ENABLED false   - - gRPCSslKeyPath File path of gRPC SSL key. SW_CORE_GRPC_SSL_KEY_PATH -   - - gRPCSslCertChainPath File path of gRPC SSL cert chain. SW_CORE_GRPC_SSL_CERT_CHAIN_PATH -   - - gRPCSslTrustedCAPath File path of gRPC trusted CA. SW_CORE_GRPC_SSL_TRUSTED_CA_PATH -   - - downsampling Activated level of down sampling aggregation.  Hour,Day   - - persistentPeriod Execution period of the persistent timer (in seconds).  25   - - enableDataKeeperExecutor Controller of TTL scheduler. Once disabled, TTL wouldn\u0026rsquo;t work. SW_CORE_ENABLE_DATA_KEEPER_EXECUTOR true   - - dataKeeperExecutePeriod Execution period of TTL scheduler (in minutes). Execution doesn\u0026rsquo;t mean deleting data. The storage provider (e.g. ElasticSearch storage) could override this. SW_CORE_DATA_KEEPER_EXECUTE_PERIOD 5   - - recordDataTTL The lifecycle of record data (in days). Record data includes traces, top N sample records, and logs. Minimum value is 2. SW_CORE_RECORD_DATA_TTL 3   - - metricsDataTTL The lifecycle of metrics data (in days), including metadata. We recommend setting metricsDataTTL \u0026gt;= recordDataTTL. Minimum value is 2. SW_CORE_METRICS_DATA_TTL 7   - - l1FlushPeriod The period of L1 aggregation flush to L2 aggregation (in milliseconds). SW_CORE_L1_AGGREGATION_FLUSH_PERIOD 500   - - storageSessionTimeout The threshold of session time (in milliseconds). Default value is 70000. SW_CORE_STORAGE_SESSION_TIMEOUT 70000   - - persistentPeriod The period of doing data persistence. Unit is second.Default value is 25s SW_CORE_PERSISTENT_PERIOD 25   - - enableDatabaseSession Cache metrics data for 1 minute to reduce database queries, and if the OAP cluster changes within that minute. SW_CORE_ENABLE_DATABASE_SESSION true   - - topNReportPeriod The execution period (in minutes) of top N sampler, which saves sampled data into the storage. SW_CORE_TOPN_REPORT_PERIOD 10   - - activeExtraModelColumns Appends entity names (e.g. service names) into metrics storage entities. SW_CORE_ACTIVE_EXTRA_MODEL_COLUMNS false   - - serviceNameMaxLength Maximum length limit of service names. SW_SERVICE_NAME_MAX_LENGTH 70   - - instanceNameMaxLength Maximum length limit of service instance names. The maximum length of service + instance names should be less than 200. SW_INSTANCE_NAME_MAX_LENGTH 70   - - endpointNameMaxLength Maximum length limit of endpoint names. The maximum length of service + endpoint names should be less than 240. SW_ENDPOINT_NAME_MAX_LENGTH 150   - - searchableTracesTags Defines a set of span tag keys which are searchable through GraphQL. Multiple values are separated by commas. SW_SEARCHABLE_TAG_KEYS http.method,status_code,db.type,db.instance,mq.queue,mq.topic,mq.broker   - - searchableLogsTags Defines a set of log tag keys which are searchable through GraphQL. Multiple values are separated by commas. SW_SEARCHABLE_LOGS_TAG_KEYS level   - - searchableAlarmTags Defines a set of alarm tag keys which are searchable through GraphQL. Multiple values are separated by commas. SW_SEARCHABLE_ALARM_TAG_KEYS level   - - gRPCThreadPoolSize Pool size of gRPC server. SW_CORE_GRPC_THREAD_POOL_SIZE CPU core * 4   - - gRPCThreadPoolQueueSize Queue size of gRPC server. SW_CORE_GRPC_POOL_QUEUE_SIZE 10000   - - maxConcurrentCallsPerConnection The maximum number of concurrent calls permitted for each incoming connection. Defaults to no limit. SW_CORE_GRPC_MAX_CONCURRENT_CALL -   - - maxMessageSize Sets the maximum message size allowed to be received on the server. Empty means 4 MiB. SW_CORE_GRPC_MAX_MESSAGE_SIZE 4M(based on Netty)   - - remoteTimeout Timeout for cluster internal communication (in seconds). - 20   - - maxSizeOfNetworkAddressAlias The maximum size of network address detected in the system being monitored. - 1_000_000   - - maxPageSizeOfQueryProfileSnapshot The maximum size for snapshot analysis in an OAP query. - 500   - - maxSizeOfAnalyzeProfileSnapshot The maximum number of snapshots analyzed by the OAP. - 12000   - - prepareThreads The number of threads used to prepare metrics data to the storage. SW_CORE_PREPARE_THREADS 2   - - enableEndpointNameGroupingByOpenapi Automatically groups endpoints by the given OpenAPI definitions. SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPAENAPI true   - - maxDurationOfAnalyzeEBPFProfiling The maximum duration(in minute) of analyze the eBPF profiling data. - 10   - - maxDurationOfQueryEBPFProfilingData The maximum duration(in second) of query the eBPF profiling data from database. - 30   - - maxThreadCountOfQueryEBPFProfilingData The maximum thread count of query the eBPF profiling data from database. - System CPU core size   cluster standalone - Standalone is not suitable for running on a single node running. No configuration available. - -   - zookeeper namespace The namespace, represented by root path, isolates the configurations in Zookeeper. SW_NAMESPACE /, root path   - - hostPort Hosts and ports of Zookeeper Cluster. SW_CLUSTER_ZK_HOST_PORT localhost:2181   - - baseSleepTimeMs The period of Zookeeper client between two retries (in milliseconds). SW_CLUSTER_ZK_SLEEP_TIME 1000   - - maxRetries The maximum retry time. SW_CLUSTER_ZK_MAX_RETRIES 3   - - enableACL Opens ACL using schema and expression. SW_ZK_ENABLE_ACL false   - - schema Schema for the authorization. SW_ZK_SCHEMA digest   - - expression Expression for the authorization. SW_ZK_EXPRESSION skywalking:skywalking   - - internalComHost The hostname registered in Zookeeper for the internal communication of OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Zookeeper for the internal communication of OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - kubernetes namespace Namespace deployed by SkyWalking in k8s. SW_CLUSTER_K8S_NAMESPACE default   - - labelSelector Labels used for filtering OAP deployment in k8s. SW_CLUSTER_K8S_LABEL app=collector,release=skywalking   - - uidEnvName Environment variable name for reading uid. SW_CLUSTER_K8S_UID SKYWALKING_COLLECTOR_UID   - consul serviceName Service name for SkyWalking cluster. SW_SERVICE_NAME SkyWalking_OAP_Cluster   - - hostPort Hosts and ports for Consul cluster. SW_CLUSTER_CONSUL_HOST_PORT localhost:8500   - - aclToken ACL Token of Consul. Empty string means without ALC token. SW_CLUSTER_CONSUL_ACLTOKEN -   - - internalComHost The hostname registered in Consul for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Consul for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - etcd serviceName Service name for SkyWalking cluster. SW_CLUSTER_ETCD_SERVICE_NAME SkyWalking_OAP_Cluster   - - endpoints Hosts and ports for etcd cluster. SW_CLUSTER_ETCD_ENDPOINTS localhost:2379   - - namespace Namespace for SkyWalking cluster. SW_CLUSTER_ETCD_NAMESPACE /skywalking   - - authentication Indicates whether there is authentication. SW_CLUSTER_ETCD_AUTHENTICATION false   - - user Etcd auth username. SW_CLUSTER_ETCD_USER    - - password Etcd auth password. SW_CLUSTER_ETCD_PASSWORD    - - internalComHost The hostname registered in etcd for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in etcd for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - Nacos serviceName Service name for SkyWalking cluster. SW_SERVICE_NAME SkyWalking_OAP_Cluster   - - hostPort Hosts and ports for Nacos cluster. SW_CLUSTER_NACOS_HOST_PORT localhost:8848   - - namespace Namespace used by SkyWalking node coordination. SW_CLUSTER_NACOS_NAMESPACE public   - - internalComHost The hostname registered in Nacos for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Nacos for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - - username Nacos Auth username. SW_CLUSTER_NACOS_USERNAME -   - - password Nacos Auth password. SW_CLUSTER_NACOS_PASSWORD -   - - accessKey Nacos Auth accessKey. SW_CLUSTER_NACOS_ACCESSKEY -   - - secretKey Nacos Auth secretKey. SW_CLUSTER_NACOS_SECRETKEY -   storage elasticsearch - ElasticSearch (and OpenSearch) storage implementation. - -   - - namespace Prefix of indexes created and used by SkyWalking. SW_NAMESPACE -   - - clusterNodes ElasticSearch cluster nodes for client connection. SW_STORAGE_ES_CLUSTER_NODES localhost   - - protocol HTTP or HTTPs. SW_STORAGE_ES_HTTP_PROTOCOL HTTP   - - connectTimeout Connect timeout of ElasticSearch client (in milliseconds). SW_STORAGE_ES_CONNECT_TIMEOUT 3000   - - socketTimeout Socket timeout of ElasticSearch client (in milliseconds). SW_STORAGE_ES_SOCKET_TIMEOUT 30000   - - responseTimeout Response timeout of ElasticSearch client (in milliseconds), 0 disables the timeout. SW_STORAGE_ES_RESPONSE_TIMEOUT 1500   - - numHttpClientThread The number of threads for the underlying HTTP client to perform socket I/O. If the value is \u0026lt;= 0, the number of available processors will be used. SW_STORAGE_ES_NUM_HTTP_CLIENT_THREAD 0   - - user Username of ElasticSearch cluster. SW_ES_USER -   - - password Password of ElasticSearch cluster. SW_ES_PASSWORD -   - - trustStorePath Trust JKS file path. Only works when username and password are enabled. SW_STORAGE_ES_SSL_JKS_PATH -   - - trustStorePass Trust JKS file password. Only works when username and password are enabled. SW_STORAGE_ES_SSL_JKS_PASS -   - - secretsManagementFile Secrets management file in the properties format, including username and password, which are managed by a 3rd party tool. Capable of being updated them at runtime. SW_ES_SECRETS_MANAGEMENT_FILE -   - - dayStep Represents the number of days in the one-minute/hour/day index. SW_STORAGE_DAY_STEP 1   - - indexShardsNumber Shard number of new indexes. SW_STORAGE_ES_INDEX_SHARDS_NUMBER 1   - - indexReplicasNumber Replicas number of new indexes. SW_STORAGE_ES_INDEX_REPLICAS_NUMBER 0   - - superDatasetDayStep Represents the number of days in the super size dataset record index. Default value is the same as dayStep when the value is less than 0. SW_SUPERDATASET_STORAGE_DAY_STEP -1   - - superDatasetIndexShardsFactor Super dataset is defined in the code (e.g. trace segments). This factor provides more shards for the super dataset: shards number = indexShardsNumber * superDatasetIndexShardsFactor. This factor also affects Zipkin and Jaeger traces. SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR 5   - - superDatasetIndexReplicasNumber Represents the replicas number in the super size dataset record index. SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER 0   - - indexTemplateOrder The order of index template. SW_STORAGE_ES_INDEX_TEMPLATE_ORDER 0   - - bulkActions Async bulk size of the record data batch execution. SW_STORAGE_ES_BULK_ACTIONS 5000   - - flushInterval Period of flush (in seconds). Does not matter whether bulkActions is reached or not. INT(flushInterval * 2/3) is used for index refresh period. SW_STORAGE_ES_FLUSH_INTERVAL 15 (index refresh period = 10)   - - concurrentRequests The number of concurrent requests allowed to be executed. SW_STORAGE_ES_CONCURRENT_REQUESTS 2   - - resultWindowMaxSize The maximum size of dataset when the OAP loads cache, such as network aliases. SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE 10000   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_ES_QUERY_MAX_SIZE 10000   - - scrollingBatchSize The batch size of metadata per iteration when metadataQueryMaxSize or resultWindowMaxSize is too large to be retrieved in a single query. SW_STORAGE_ES_SCROLLING_BATCH_SIZE 5000   - - segmentQueryMaxSize The maximum size of trace segments per query. SW_STORAGE_ES_QUERY_SEGMENT_SIZE 200   - - profileTaskQueryMaxSize The maximum size of profile task per query. SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE 200   - - advanced All settings of ElasticSearch index creation. The value should be in JSON format. SW_STORAGE_ES_ADVANCED -   - h2 - H2 storage is designed for demonstration and running in short term (i.e. 1-2 hours) only. - -   - - driver H2 JDBC driver. SW_STORAGE_H2_DRIVER org.h2.jdbcx.JdbcDataSource   - - url H2 connection URL. Defaults to H2 memory mode. SW_STORAGE_H2_URL jdbc:h2:mem:skywalking-oap-db   - - user Username of H2 database. SW_STORAGE_H2_USER sa   - - password Password of H2 database. - -   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_H2_QUERY_MAX_SIZE 5000   - - maxSizeOfArrayColumn Some entities (e.g. trace segments) include the logic column with multiple values. In H2, we use multiple physical columns to host the values: e.g. change column_a with values [1,2,3,4,5] to column_a_0 = 1, column_a_1 = 2, column_a_2 = 3 , column_a_3 = 4, column_a_4 = 5. SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN 20   - - numOfSearchableValuesPerTag In a trace segment, this includes multiple spans with multiple tags. Different spans may have the same tag key, e.g. multiple HTTP exit spans all have their own http.method tags. This configuration sets the limit on the maximum number of values for the same tag key. SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG 2   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 100   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 1   - mysql - MySQL Storage. The MySQL JDBC Driver is not in the dist. Please copy it into the oap-lib folder manually. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfArrayColumn Some entities (e.g. trace segments) include the logic column with multiple values. In MySQL, we use multiple physical columns to host the values, e.g. change column_a with values [1,2,3,4,5] to column_a_0 = 1, column_a_1 = 2, column_a_2 = 3 , column_a_3 = 4, column_a_4 = 5. SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN 20   - - numOfSearchableValuesPerTag In a trace segment, this includes multiple spans with multiple tags. Different spans may have same tag key, e.g. multiple HTTP exit spans all have their own http.method tags. This configuration sets the limit on the maximum number of values for the same tag key. SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG 2   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - postgresql - PostgreSQL storage. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfArrayColumn Some entities (e.g. trace segments) include the logic column with multiple values. In PostgreSQL, we use multiple physical columns to host the values, e.g. change column_a with values [1,2,3,4,5] to column_a_0 = 1, column_a_1 = 2, column_a_2 = 3 , column_a_3 = 4, column_a_4 = 5 SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN 20   - - numOfSearchableValuesPerTag In a trace segment, this includes multiple spans with multiple tags. Different spans may have same tag key, e.g. multiple HTTP exit spans all have their own http.method tags. This configuration sets the limit on the maximum number of values for the same tag key. SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG 2   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - influxdb - InfluxDB storage. - -   - - url InfluxDB connection URL. SW_STORAGE_INFLUXDB_URL http://localhost:8086   - - user User name of InfluxDB. SW_STORAGE_INFLUXDB_USER root   - - password Password of InfluxDB. SW_STORAGE_INFLUXDB_PASSWORD -   - - database Database of InfluxDB. SW_STORAGE_INFLUXDB_DATABASE skywalking   - - actions The number of actions to collect. SW_STORAGE_INFLUXDB_ACTIONS 1000   - - duration The maximum waiting time (in milliseconds). SW_STORAGE_INFLUXDB_DURATION 1000   - - batchEnabled If true, write points with batch API. SW_STORAGE_INFLUXDB_BATCH_ENABLED true   - - fetchTaskLogMaxSize The maximum number of fetch task log in a request. SW_STORAGE_INFLUXDB_FETCH_TASK_LOG_MAX_SIZE 5000   - - connectionResponseFormat The response format of connection to influxDB. It can only be MSGPACK or JSON. SW_STORAGE_INFLUXDB_CONNECTION_RESPONSE_FORMAT MSGPACK   - iotdb - IoTDB storage. - -   - - host The host of IoTDB server. SW_STORAGE_IOTDB_HOST 127.0.0.1   - - rpcPort The port listened by IoTDB server. SW_STORAGE_IOTDB_RPC_PORT 6667   - - username The username of IoTDB SW_STORAGE_IOTDB_USERNAME root   - - password The password of IoTDB SW_STORAGE_IOTDB_PASSWORD root   - - storageGroup The path of Storage Group and it must start with root. SW_STORAGE_IOTDB_STORAGE_GROUP root.skywalking   - - sessionPoolSize The connection pool size for IoTDB. If the value is 0, the size of SessionPool will be 2 * CPU_Cores SW_STORAGE_IOTDB_SESSIONPOOL_SIZE 8   - - fetchTaskLogMaxSize the max number of fetch task log in a request SW_STORAGE_IOTDB_FETCH_TASK_LOG_MAX_SIZE 1000   agent-analyzer default Agent Analyzer. SW_AGENT_ANALYZER default    - - traceSamplingPolicySettingsFile The sampling policy including sampling rate and the threshold of trace segment latency can be configured by the traceSamplingPolicySettingsFile file. SW_TRACE_SAMPLING_POLICY_SETTINGS_FILE trace-sampling-policy-settings.yml   - - slowDBAccessThreshold The slow database access threshold (in milliseconds). SW_SLOW_DB_THRESHOLD default:200,mongodb:100   - - forceSampleErrorSegment When sampling mechanism is activated, this config samples the error status segment and ignores the sampling rate. SW_FORCE_SAMPLE_ERROR_SEGMENT true   - - segmentStatusAnalysisStrategy Determines the final segment status from span status. Available values are FROM_SPAN_STATUS , FROM_ENTRY_SPAN, and FROM_FIRST_SPAN. FROM_SPAN_STATUS indicates that the segment status would be error if any span has an error status. FROM_ENTRY_SPAN means that the segment status would only be determined by the status of entry spans. FROM_FIRST_SPAN means that the segment status would only be determined by the status of the first span. SW_SEGMENT_STATUS_ANALYSIS_STRATEGY FROM_SPAN_STATUS   - - noUpstreamRealAddressAgents Exit spans with the component in the list would not generate client-side instance relation metrics, since some tracing plugins (e.g. Nginx-LUA and Envoy) can\u0026rsquo;t collect the real peer IP address. SW_NO_UPSTREAM_REAL_ADDRESS 6000,9000   - - meterAnalyzerActiveFiles Indicates which files could be instrumented and analyzed. Multiple files are split by \u0026ldquo;,\u0026rdquo;. SW_METER_ANALYZER_ACTIVE_FILES    receiver-sharing-server default Sharing server provides new gRPC and restful servers for data collection. Ana designates that servers in the core module are to be used for internal communication only. - -    - - restHost Binding IP of RESTful services. Services include GraphQL query and HTTP data report. SW_RECEIVER_SHARING_REST_HOST -   - - restPort Binding port of RESTful services. SW_RECEIVER_SHARING_REST_PORT -   - - restContextPath Web context path of RESTful services. SW_RECEIVER_SHARING_REST_CONTEXT_PATH -   - - restMinThreads Minimum thread number of RESTful services. SW_RECEIVER_SHARING_JETTY_MIN_THREADS 1   - - restMaxThreads Maximum thread number of RESTful services. SW_RECEIVER_SHARING_JETTY_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_RECEIVER_SHARING_JETTY_IDLE_TIMEOUT 30000   - - restAcceptQueueSize ServerSocketChannel backlog of RESTful services. SW_RECEIVER_SHARING_JETTY_QUEUE_SIZE 0   - - httpMaxRequestHeaderSize Maximum request header size accepted. SW_RECEIVER_SHARING_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - gRPCHost Binding IP of gRPC services. Services include gRPC data report and internal communication among OAP nodes. SW_RECEIVER_GRPC_HOST 0.0.0.0. Not Activated   - - gRPCPort Binding port of gRPC services. SW_RECEIVER_GRPC_PORT Not Activated   - - gRPCThreadPoolSize Pool size of gRPC server. SW_RECEIVER_GRPC_THREAD_POOL_SIZE CPU core * 4   - - gRPCThreadPoolQueueSize Queue size of gRPC server. SW_RECEIVER_GRPC_POOL_QUEUE_SIZE 10000   - - gRPCSslEnabled Activates SSL for gRPC services. SW_RECEIVER_GRPC_SSL_ENABLED false   - - gRPCSslKeyPath File path of gRPC SSL key. SW_RECEIVER_GRPC_SSL_KEY_PATH -   - - gRPCSslCertChainPath File path of gRPC SSL cert chain. SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH -   - - maxConcurrentCallsPerConnection The maximum number of concurrent calls permitted for each incoming connection. Defaults to no limit. SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL -   - - authentication The token text for authentication. Works for gRPC connection only. Once this is set, the client is required to use the same token. SW_AUTHENTICATION -   log-analyzer default Log Analyzer. SW_LOG_ANALYZER default    - - lalFiles The LAL configuration file names (without file extension) to be activated. Read LAL for more details. SW_LOG_LAL_FILES default   - - malFiles The MAL configuration file names (without file extension) to be activated. Read LAL for more details. SW_LOG_MAL_FILES \u0026quot;\u0026quot;   event-analyzer default Event Analyzer. SW_EVENT_ANALYZER default    receiver-register default gRPC and HTTPRestful services that provide service, service instance and endpoint register. - -    receiver-trace default gRPC and HTTPRestful services that accept SkyWalking format traces. - -    receiver-jvm default gRPC services that accept JVM metrics data. - -    receiver-clr default gRPC services that accept .Net CLR metrics data. - -    receiver-profile default gRPC services that accept profile task status and snapshot reporter. - -    receiver-zabbix default TCP receiver accepts Zabbix format metrics. - -    - - port Exported TCP port. Zabbix agent could connect and transport data. SW_RECEIVER_ZABBIX_PORT 10051   - - host Binds to host. SW_RECEIVER_ZABBIX_HOST 0.0.0.0   - - activeFiles Enables config when agent request is received. SW_RECEIVER_ZABBIX_ACTIVE_FILES agent   service-mesh default gRPC services that accept data from inbound mesh probes. - -    envoy-metric default Envoy metrics_service and ALS(access log service) are supported by this receiver. The OAL script supports all GAUGE type metrics. - -    - - acceptMetricsService Starts Envoy Metrics Service analysis. SW_ENVOY_METRIC_SERVICE true   - - alsHTTPAnalysis Starts Envoy HTTP Access Log Service analysis. Value = k8s-mesh means starting the analysis. SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS -   - - alsTCPAnalysis Starts Envoy TCP Access Log Service analysis. Value = k8s-mesh means starting the analysis. SW_ENVOY_METRIC_ALS_TCP_ANALYSIS -   - - k8sServiceNameRule k8sServiceNameRule allows you to customize the service name in ALS via Kubernetes metadata. The available variables are pod and service. E.g. you can use ${service.metadata.name}-${pod.metadata.labels.version} to append the version number to the service name. Note that when using environment variables to pass this configuration, use single quotes('') to avoid being evaluated by the shell. -    receiver-otel default A receiver for analyzing metrics data from OpenTelemetry. - -    - - enabledHandlers Enabled handlers for otel. SW_OTEL_RECEIVER_ENABLED_HANDLERS -   - - enabledOcRules Enabled metric rules for OC handler. SW_OTEL_RECEIVER_ENABLED_OC_RULES -   receiver-zipkin default A receiver for Zipkin traces. - -    - - restHost Binding IP of RESTful services. SW_RECEIVER_ZIPKIN_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_RECEIVER_ZIPKIN_PORT 9411   - - restContextPath Web context path of RESTful services. SW_RECEIVER_ZIPKIN_CONTEXT_PATH /   prometheus-fetcher default Prometheus fetcher reads metrics from Prometheus endpoint, and transfer the metrics into SkyWalking native format for the MAL engine. - -    - - enabledRules Enabled rules. SW_PROMETHEUS_FETCHER_ENABLED_RULES self   - - maxConvertWorker The maximize meter convert worker. SW_PROMETHEUS_FETCHER_NUM_CONVERT_WORKER -1(by default, half the number of CPU core(s))   kafka-fetcher default Read SkyWalking\u0026rsquo;s native metrics/logs/traces through Kafka server. - -    - - bootstrapServers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_KAFKA_FETCHER_SERVERS localhost:9092   - - namespace Namespace aims to isolate multi OAP cluster when using the same Kafka cluster. If you set a namespace for Kafka fetcher, OAP will add a prefix to topic name. You should also set namespace in agent.config. The property is named plugin.kafka.namespace. SW_NAMESPACE -   - - groupId A unique string that identifies the consumer group to which this consumer belongs. - skywalking-consumer   - - consumePartitions Indicates which PartitionId(s) of the topics is/are assigned to the OAP server. Separated by commas if multiple. SW_KAFKA_FETCHER_CONSUME_PARTITIONS -   - - isSharding True when OAP Server is in cluster. SW_KAFKA_FETCHER_IS_SHARDING false   - - createTopicIfNotExist If true, this creates Kafka topic (if it does not already exist). - true   - - partitions The number of partitions for the topic being created. SW_KAFKA_FETCHER_PARTITIONS 3   - - enableNativeProtoLog Enables fetching and handling native proto log data. SW_KAFKA_FETCHER_ENABLE_NATIVE_PROTO_LOG true   - - enableNativeJsonLog Enables fetching and handling native json log data. SW_KAFKA_FETCHER_ENABLE_NATIVE_JSON_LOG true   - - replicationFactor The replication factor for each partition in the topic being created. SW_KAFKA_FETCHER_PARTITIONS_FACTOR 2   - - kafkaHandlerThreadPoolSize Pool size of Kafka message handler executor. SW_KAFKA_HANDLER_THREAD_POOL_SIZE CPU core * 2   - - kafkaHandlerThreadPoolQueueSize Queue size of Kafka message handler executor. SW_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE 10000   - - topicNameOfMeters Kafka topic name for meter system data. - skywalking-meters   - - topicNameOfMetrics Kafka topic name for JVM metrics data. - skywalking-metrics   - - topicNameOfProfiling Kafka topic name for profiling data. - skywalking-profilings   - - topicNameOfTracingSegments Kafka topic name for tracing data. - skywalking-segments   - - topicNameOfManagements Kafka topic name for service instance reporting and registration. - skywalking-managements   - - topicNameOfLogs Kafka topic name for native proto log data. - skywalking-logs   - - topicNameOfJsonLogs Kafka topic name for native json log data. - skywalking-logs-json   receiver-browser default gRPC services that accept browser performance data and error log. - - -   - - sampleRate Sampling rate for receiving trace. Precise to 1/10000. 10000 means sampling rate of 100% by default. SW_RECEIVER_BROWSER_SAMPLE_RATE 10000   query graphql - GraphQL query implementation. -    - - enableLogTestTool Enable the log testing API to test the LAL. NOTE: This API evaluates untrusted code on the OAP server. A malicious script can do significant damage (steal keys and secrets, remove files and directories, install malware, etc). As such, please enable this API only when you completely trust your users. SW_QUERY_GRAPHQL_ENABLE_LOG_TEST_TOOL false   - - maxQueryComplexity Maximum complexity allowed for the GraphQL query that can be used to abort a query if the total number of data fields queried exceeds the defined threshold. SW_QUERY_MAX_QUERY_COMPLEXITY 100   - - enableUpdateUITemplate Allow user add,disable and update UI template. SW_ENABLE_UPDATE_UI_TEMPLATE false   alarm default - Read alarm doc for more details. -    telemetry - - Read telemetry doc for more details. -    - none - No op implementation. -    - prometheus host Binding host for Prometheus server fetching data. SW_TELEMETRY_PROMETHEUS_HOST 0.0.0.0   - - port Binding port for Prometheus server fetching data. SW_TELEMETRY_PROMETHEUS_PORT 1234   configuration - - Read dynamic configuration doc for more details. -    - grpc host DCS server binding hostname. SW_DCS_SERVER_HOST -   - - port DCS server binding port. SW_DCS_SERVER_PORT 80   - - clusterName Cluster name when reading the latest configuration from DSC server. SW_DCS_CLUSTER_NAME SkyWalking   - - period The period of reading data from DSC server by the OAP (in seconds). SW_DCS_PERIOD 20   - apollo apolloMeta apollo.meta in Apollo. SW_CONFIG_APOLLO http://localhost:8080   - - apolloCluster apollo.cluster in Apollo. SW_CONFIG_APOLLO_CLUSTER default   - - apolloEnv env in Apollo. SW_CONFIG_APOLLO_ENV -   - - appId app.id in Apollo. SW_CONFIG_APOLLO_APP_ID skywalking   - - period The period of data sync (in seconds). SW_CONFIG_APOLLO_PERIOD 60   - zookeeper namespace The namespace (represented by root path) that isolates the configurations in the Zookeeper. SW_CONFIG_ZK_NAMESPACE /, root path   - - hostPort Hosts and ports of Zookeeper Cluster. SW_CONFIG_ZK_HOST_PORT localhost:2181   - - baseSleepTimeMs The period of Zookeeper client between two retries (in milliseconds). SW_CONFIG_ZK_BASE_SLEEP_TIME_MS 1000   - - maxRetries The maximum retry time. SW_CONFIG_ZK_MAX_RETRIES 3   - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - etcd endpoints Hosts and ports for etcd cluster (separated by commas if multiple). SW_CONFIG_ETCD_ENDPOINTS http://localhost:2379   - - namespace Namespace for SkyWalking cluster. SW_CONFIG_ETCD_NAMESPACE /skywalking   - - authentication Indicates whether there is authentication. SW_CONFIG_ETCD_AUTHENTICATION false   - - user Etcd auth username. SW_CONFIG_ETCD_USER    - - password Etcd auth password. SW_CONFIG_ETCD_PASSWORD    - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - consul hostPort Hosts and ports for Consul cluster. SW_CONFIG_CONSUL_HOST_AND_PORTS localhost:8500   - - aclToken ACL Token of Consul. Empty string means without ACL token. SW_CONFIG_CONSUL_ACL_TOKEN -   - - period The period of data sync (in seconds). SW_CONFIG_CONSUL_PERIOD 60   - k8s-configmap namespace Deployment namespace of the config map. SW_CLUSTER_K8S_NAMESPACE default   - - labelSelector Labels for locating configmap. SW_CLUSTER_K8S_LABEL app=collector,release=skywalking   - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - nacos serverAddr Nacos Server Host. SW_CONFIG_NACOS_SERVER_ADDR 127.0.0.1   - - port Nacos Server Port. SW_CONFIG_NACOS_SERVER_PORT 8848   - - group Nacos Configuration namespace. SW_CONFIG_NACOS_SERVER_NAMESPACE -   - - period The period of data sync (in seconds). SW_CONFIG_CONFIG_NACOS_PERIOD 60   - - username Nacos Auth username. SW_CONFIG_NACOS_USERNAME -   - - password Nacos Auth password. SW_CONFIG_NACOS_PASSWORD -   - - accessKey Nacos Auth accessKey. SW_CONFIG_NACOS_ACCESSKEY -   - - secretKey Nacos Auth secretKey. SW_CONFIG_NACOS_SECRETKEY -   exporter grpc targetHost The host of target gRPC server for receiving export data. SW_EXPORTER_GRPC_HOST 127.0.0.1   - - targetPort The port of target gRPC server for receiving export data. SW_EXPORTER_GRPC_PORT 9870   health-checker default checkIntervalSeconds The period of checking OAP internal health status (in seconds). SW_HEALTH_CHECKER_INTERVAL_SECONDS 5   configuration-discovery default disableMessageDigest If true, agent receives the latest configuration every time, even without making any changes. By default, OAP uses the SHA512 message digest mechanism to detect changes in configuration. SW_DISABLE_MESSAGE_DIGEST false   receiver-event default gRPC services that handle events data. - -     Note ¹ System Environment Variable name could be declared and changed in application.yml. The names listed here are simply provided in the default application.yml file.\n","title":"Configuration Vocabulary","url":"/docs/main/v9.0.0/en/setup/backend/configuration-vocabulary/"},{"content":"Configuration Vocabulary The Configuration Vocabulary lists all available configurations provided by application.yml.\n   Module Provider Settings Value(s) and Explanation System Environment Variable¹ Default     core default role Option values: Mixed/Receiver/Aggregator. Receiver mode OAP opens the service to the agents, then analyzes and aggregates the results, and forwards the results for distributed aggregation. Aggregator mode OAP receives data from Mixer and Receiver role OAP nodes, and performs 2nd level aggregation. Mixer means both Receiver and Aggregator. SW_CORE_ROLE Mixed   - - restHost Binding IP of RESTful services. Services include GraphQL query and HTTP data report. SW_CORE_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_CORE_REST_PORT 12800   - - restContextPath Web context path of RESTful services. SW_CORE_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_CORE_REST_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_CORE_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize ServerSocketChannel Backlog of RESTful services. SW_CORE_REST_QUEUE_SIZE 0   - - httpMaxRequestHeaderSize Maximum request header size accepted. SW_CORE_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - gRPCHost Binding IP of gRPC services, including gRPC data report and internal communication among OAP nodes. SW_CORE_GRPC_HOST 0.0.0.0   - - gRPCPort Binding port of gRPC services. SW_CORE_GRPC_PORT 11800   - - gRPCSslEnabled Activates SSL for gRPC services. SW_CORE_GRPC_SSL_ENABLED false   - - gRPCSslKeyPath File path of gRPC SSL key. SW_CORE_GRPC_SSL_KEY_PATH -   - - gRPCSslCertChainPath File path of gRPC SSL cert chain. SW_CORE_GRPC_SSL_CERT_CHAIN_PATH -   - - gRPCSslTrustedCAPath File path of gRPC trusted CA. SW_CORE_GRPC_SSL_TRUSTED_CA_PATH -   - - downsampling Activated level of down sampling aggregation.  Hour,Day   - - persistentPeriod Execution period of the persistent timer (in seconds).  25   - - enableDataKeeperExecutor Controller of TTL scheduler. Once disabled, TTL wouldn\u0026rsquo;t work. SW_CORE_ENABLE_DATA_KEEPER_EXECUTOR true   - - dataKeeperExecutePeriod Execution period of TTL scheduler (in minutes). Execution doesn\u0026rsquo;t mean deleting data. The storage provider (e.g. ElasticSearch storage) could override this. SW_CORE_DATA_KEEPER_EXECUTE_PERIOD 5   - - recordDataTTL The lifecycle of record data (in days). Record data includes traces, top N sample records, and logs. Minimum value is 2. SW_CORE_RECORD_DATA_TTL 3   - - metricsDataTTL The lifecycle of metrics data (in days), including metadata. We recommend setting metricsDataTTL \u0026gt;= recordDataTTL. Minimum value is 2. SW_CORE_METRICS_DATA_TTL 7   - - l1FlushPeriod The period of L1 aggregation flush to L2 aggregation (in milliseconds). SW_CORE_L1_AGGREGATION_FLUSH_PERIOD 500   - - storageSessionTimeout The threshold of session time (in milliseconds). Default value is 70000. SW_CORE_STORAGE_SESSION_TIMEOUT 70000   - - persistentPeriod The period of doing data persistence. Unit is second.Default value is 25s SW_CORE_PERSISTENT_PERIOD 25   - - enableDatabaseSession Cache metrics data for 1 minute to reduce database queries, and if the OAP cluster changes within that minute. SW_CORE_ENABLE_DATABASE_SESSION true   - - topNReportPeriod The execution period (in minutes) of top N sampler, which saves sampled data into the storage. SW_CORE_TOPN_REPORT_PERIOD 10   - - activeExtraModelColumns Appends entity names (e.g. service names) into metrics storage entities. SW_CORE_ACTIVE_EXTRA_MODEL_COLUMNS false   - - serviceNameMaxLength Maximum length limit of service names. SW_SERVICE_NAME_MAX_LENGTH 70   - - instanceNameMaxLength Maximum length limit of service instance names. The maximum length of service + instance names should be less than 200. SW_INSTANCE_NAME_MAX_LENGTH 70   - - endpointNameMaxLength Maximum length limit of endpoint names. The maximum length of service + endpoint names should be less than 240. SW_ENDPOINT_NAME_MAX_LENGTH 150   - - searchableTracesTags Defines a set of span tag keys which are searchable through GraphQL. Multiple values are separated by commas. SW_SEARCHABLE_TAG_KEYS http.method,http.status_code,rpc.status_code,db.type,db.instance,mq.queue,mq.topic,mq.broker   - - searchableLogsTags Defines a set of log tag keys which are searchable through GraphQL. Multiple values are separated by commas. SW_SEARCHABLE_LOGS_TAG_KEYS level   - - searchableAlarmTags Defines a set of alarm tag keys which are searchable through GraphQL. Multiple values are separated by commas. SW_SEARCHABLE_ALARM_TAG_KEYS level   - - autocompleteTagKeysQueryMaxSize The max size of tags keys for autocomplete select. SW_AUTOCOMPLETE_TAG_KEYS_QUERY_MAX_SIZE 100   - - autocompleteTagValuesQueryMaxSize The max size of tags values for autocomplete select. SW_AUTOCOMPLETE_TAG_VALUES_QUERY_MAX_SIZE 100   - - gRPCThreadPoolSize Pool size of gRPC server. SW_CORE_GRPC_THREAD_POOL_SIZE CPU core * 4   - - gRPCThreadPoolQueueSize Queue size of gRPC server. SW_CORE_GRPC_POOL_QUEUE_SIZE 10000   - - maxConcurrentCallsPerConnection The maximum number of concurrent calls permitted for each incoming connection. Defaults to no limit. SW_CORE_GRPC_MAX_CONCURRENT_CALL -   - - maxMessageSize Sets the maximum message size allowed to be received on the server. Empty means 4 MiB. SW_CORE_GRPC_MAX_MESSAGE_SIZE 4M(based on Netty)   - - remoteTimeout Timeout for cluster internal communication (in seconds). - 20   - - maxSizeOfNetworkAddressAlias The maximum size of network address detected in the system being monitored. - 1_000_000   - - maxPageSizeOfQueryProfileSnapshot The maximum size for snapshot analysis in an OAP query. - 500   - - maxSizeOfAnalyzeProfileSnapshot The maximum number of snapshots analyzed by the OAP. - 12000   - - prepareThreads The number of threads used to prepare metrics data to the storage. SW_CORE_PREPARE_THREADS 2   - - enableEndpointNameGroupingByOpenapi Automatically groups endpoints by the given OpenAPI definitions. SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPAENAPI true   - - maxDurationOfQueryEBPFProfilingData The maximum duration(in second) of query the eBPF profiling data from database. - 30   - - maxThreadCountOfQueryEBPFProfilingData The maximum thread count of query the eBPF profiling data from database. - System CPU core size   cluster standalone - Standalone is not suitable for running on a single node running. No configuration available. - -   - zookeeper namespace The namespace, represented by root path, isolates the configurations in Zookeeper. SW_NAMESPACE /, root path   - - hostPort Hosts and ports of Zookeeper Cluster. SW_CLUSTER_ZK_HOST_PORT localhost:2181   - - baseSleepTimeMs The period of Zookeeper client between two retries (in milliseconds). SW_CLUSTER_ZK_SLEEP_TIME 1000   - - maxRetries The maximum retry time. SW_CLUSTER_ZK_MAX_RETRIES 3   - - enableACL Opens ACL using schema and expression. SW_ZK_ENABLE_ACL false   - - schema Schema for the authorization. SW_ZK_SCHEMA digest   - - expression Expression for the authorization. SW_ZK_EXPRESSION skywalking:skywalking   - - internalComHost The hostname registered in Zookeeper for the internal communication of OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Zookeeper for the internal communication of OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - kubernetes namespace Namespace deployed by SkyWalking in k8s. SW_CLUSTER_K8S_NAMESPACE default   - - labelSelector Labels used for filtering OAP deployment in k8s. SW_CLUSTER_K8S_LABEL app=collector,release=skywalking   - - uidEnvName Environment variable name for reading uid. SW_CLUSTER_K8S_UID SKYWALKING_COLLECTOR_UID   - consul serviceName Service name for SkyWalking cluster. SW_SERVICE_NAME SkyWalking_OAP_Cluster   - - hostPort Hosts and ports for Consul cluster. SW_CLUSTER_CONSUL_HOST_PORT localhost:8500   - - aclToken ACL Token of Consul. Empty string means without ALC token. SW_CLUSTER_CONSUL_ACLTOKEN -   - - internalComHost The hostname registered in Consul for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Consul for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - etcd serviceName Service name for SkyWalking cluster. SW_CLUSTER_ETCD_SERVICE_NAME SkyWalking_OAP_Cluster   - - endpoints Hosts and ports for etcd cluster. SW_CLUSTER_ETCD_ENDPOINTS localhost:2379   - - namespace Namespace for SkyWalking cluster. SW_CLUSTER_ETCD_NAMESPACE /skywalking   - - authentication Indicates whether there is authentication. SW_CLUSTER_ETCD_AUTHENTICATION false   - - user Etcd auth username. SW_CLUSTER_ETCD_USER    - - password Etcd auth password. SW_CLUSTER_ETCD_PASSWORD    - - internalComHost The hostname registered in etcd for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in etcd for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - Nacos serviceName Service name for SkyWalking cluster. SW_SERVICE_NAME SkyWalking_OAP_Cluster   - - hostPort Hosts and ports for Nacos cluster. SW_CLUSTER_NACOS_HOST_PORT localhost:8848   - - namespace Namespace used by SkyWalking node coordination. SW_CLUSTER_NACOS_NAMESPACE public   - - internalComHost The hostname registered in Nacos for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Nacos for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - - username Nacos Auth username. SW_CLUSTER_NACOS_USERNAME -   - - password Nacos Auth password. SW_CLUSTER_NACOS_PASSWORD -   - - accessKey Nacos Auth accessKey. SW_CLUSTER_NACOS_ACCESSKEY -   - - secretKey Nacos Auth secretKey. SW_CLUSTER_NACOS_SECRETKEY -   storage elasticsearch - ElasticSearch (and OpenSearch) storage implementation. - -   - - namespace Prefix of indexes created and used by SkyWalking. SW_NAMESPACE -   - - clusterNodes ElasticSearch cluster nodes for client connection. SW_STORAGE_ES_CLUSTER_NODES localhost   - - protocol HTTP or HTTPs. SW_STORAGE_ES_HTTP_PROTOCOL HTTP   - - connectTimeout Connect timeout of ElasticSearch client (in milliseconds). SW_STORAGE_ES_CONNECT_TIMEOUT 3000   - - socketTimeout Socket timeout of ElasticSearch client (in milliseconds). SW_STORAGE_ES_SOCKET_TIMEOUT 30000   - - responseTimeout Response timeout of ElasticSearch client (in milliseconds), 0 disables the timeout. SW_STORAGE_ES_RESPONSE_TIMEOUT 1500   - - numHttpClientThread The number of threads for the underlying HTTP client to perform socket I/O. If the value is \u0026lt;= 0, the number of available processors will be used. SW_STORAGE_ES_NUM_HTTP_CLIENT_THREAD 0   - - user Username of ElasticSearch cluster. SW_ES_USER -   - - password Password of ElasticSearch cluster. SW_ES_PASSWORD -   - - trustStorePath Trust JKS file path. Only works when username and password are enabled. SW_STORAGE_ES_SSL_JKS_PATH -   - - trustStorePass Trust JKS file password. Only works when username and password are enabled. SW_STORAGE_ES_SSL_JKS_PASS -   - - secretsManagementFile Secrets management file in the properties format, including username and password, which are managed by a 3rd party tool. Capable of being updated them at runtime. SW_ES_SECRETS_MANAGEMENT_FILE -   - - dayStep Represents the number of days in the one-minute/hour/day index. SW_STORAGE_DAY_STEP 1   - - indexShardsNumber Shard number of new indexes. SW_STORAGE_ES_INDEX_SHARDS_NUMBER 1   - - indexReplicasNumber Replicas number of new indexes. SW_STORAGE_ES_INDEX_REPLICAS_NUMBER 0   - - superDatasetDayStep Represents the number of days in the super size dataset record index. Default value is the same as dayStep when the value is less than 0. SW_SUPERDATASET_STORAGE_DAY_STEP -1   - - superDatasetIndexShardsFactor Super dataset is defined in the code (e.g. trace segments). This factor provides more shards for the super dataset: shards number = indexShardsNumber * superDatasetIndexShardsFactor. This factor also affects Zipkin and Jaeger traces. SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR 5   - - superDatasetIndexReplicasNumber Represents the replicas number in the super size dataset record index. SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER 0   - - indexTemplateOrder The order of index template. SW_STORAGE_ES_INDEX_TEMPLATE_ORDER 0   - - bulkActions Async bulk size of the record data batch execution. SW_STORAGE_ES_BULK_ACTIONS 5000   - - flushInterval Period of flush (in seconds). Does not matter whether bulkActions is reached or not. INT(flushInterval * 2/3) is used for index refresh period. SW_STORAGE_ES_FLUSH_INTERVAL 15 (index refresh period = 10)   - - concurrentRequests The number of concurrent requests allowed to be executed. SW_STORAGE_ES_CONCURRENT_REQUESTS 2   - - resultWindowMaxSize The maximum size of dataset when the OAP loads cache, such as network aliases. SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE 10000   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_ES_QUERY_MAX_SIZE 10000   - - scrollingBatchSize The batch size of metadata per iteration when metadataQueryMaxSize or resultWindowMaxSize is too large to be retrieved in a single query. SW_STORAGE_ES_SCROLLING_BATCH_SIZE 5000   - - segmentQueryMaxSize The maximum size of trace segments per query. SW_STORAGE_ES_QUERY_SEGMENT_SIZE 200   - - profileTaskQueryMaxSize The maximum size of profile task per query. SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE 200   - - profileDataQueryScrollBatchSize The batch size of query profiling data. SW_STORAGE_ES_QUERY_PROFILE_DATA_BATCH_SIZE 100   - - advanced All settings of ElasticSearch index creation. The value should be in JSON format. SW_STORAGE_ES_ADVANCED -   - h2 - H2 storage is designed for demonstration and running in short term (i.e. 1-2 hours) only. - -   - - driver H2 JDBC driver. SW_STORAGE_H2_DRIVER org.h2.jdbcx.JdbcDataSource   - - url H2 connection URL. Defaults to H2 memory mode. SW_STORAGE_H2_URL jdbc:h2:mem:skywalking-oap-db   - - user Username of H2 database. SW_STORAGE_H2_USER sa   - - password Password of H2 database. - -   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_H2_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 100   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 1   - mysql - MySQL Storage. The MySQL JDBC Driver is not in the dist. Please copy it into the oap-lib folder manually. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfArrayColumn Some entities (e.g. trace segments) include the logic column with multiple values. In MySQL, we use multiple physical columns to host the values, e.g. change column_a with values [1,2,3,4,5] to column_a_0 = 1, column_a_1 = 2, column_a_2 = 3 , column_a_3 = 4, column_a_4 = 5. SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN 20   - - numOfSearchableValuesPerTag In a trace segment, this includes multiple spans with multiple tags. Different spans may have same tag key, e.g. multiple HTTP exit spans all have their own http.method tags. This configuration sets the limit on the maximum number of values for the same tag key. SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG 2   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - postgresql - PostgreSQL storage. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfArrayColumn Some entities (e.g. trace segments) include the logic column with multiple values. In PostgreSQL, we use multiple physical columns to host the values, e.g. change column_a with values [1,2,3,4,5] to column_a_0 = 1, column_a_1 = 2, column_a_2 = 3 , column_a_3 = 4, column_a_4 = 5 SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN 20   - - numOfSearchableValuesPerTag In a trace segment, this includes multiple spans with multiple tags. Different spans may have same tag key, e.g. multiple HTTP exit spans all have their own http.method tags. This configuration sets the limit on the maximum number of values for the same tag key. SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG 2   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - banyandb - BanyanDB storage. - -   - - host Host of the BanyanDB. SW_STORAGE_BANYANDB_HOST 127.0.0.1   - - port Port of the BanyanDB. SW_STORAGE_BANYANDB_PORT 17912   - - maxBulkSize The maximum size of write entities in a single batch write call. SW_STORAGE_BANYANDB_MAX_BULK_SIZE 5000   - - flushInterval Period of flush interval. In the timeunit of seconds. SW_STORAGE_BANYANDB_FLUSH_INTERVAL 15   - - metricsShardsNumber Shards Number for measure/metrics. SW_STORAGE_BANYANDB_METRICS_SHARDS_NUMBER 1   - - recordShardsNumber Shards Number for a normal record. SW_STORAGE_BANYANDB_RECORD_SHARDS_NUMBER 1   - - superDatasetShardsFactor Shards Factor for a super dataset record, i.e. Shard number of a super dataset is recordShardsNumber*superDatasetShardsFactor. SW_STORAGE_BANYANDB_SUPERDATASET_SHARDS_FACTOR 2   - - concurrentWriteThreads Concurrent consumer threads for batch writing. SW_STORAGE_BANYANDB_CONCURRENT_WRITE_THREADS 15   - - profileTaskQueryMaxSize Max size of ProfileTask to be fetched. SW_STORAGE_BANYANDB_PROFILE_TASK_QUERY_MAX_SIZE 200   agent-analyzer default Agent Analyzer. SW_AGENT_ANALYZER default    - - traceSamplingPolicySettingsFile The sampling policy including sampling rate and the threshold of trace segment latency can be configured by the traceSamplingPolicySettingsFile file. SW_TRACE_SAMPLING_POLICY_SETTINGS_FILE trace-sampling-policy-settings.yml   - - slowDBAccessThreshold The slow database access threshold (in milliseconds). SW_SLOW_DB_THRESHOLD default:200,mongodb:100   - - forceSampleErrorSegment When sampling mechanism is activated, this config samples the error status segment and ignores the sampling rate. SW_FORCE_SAMPLE_ERROR_SEGMENT true   - - segmentStatusAnalysisStrategy Determines the final segment status from span status. Available values are FROM_SPAN_STATUS , FROM_ENTRY_SPAN, and FROM_FIRST_SPAN. FROM_SPAN_STATUS indicates that the segment status would be error if any span has an error status. FROM_ENTRY_SPAN means that the segment status would only be determined by the status of entry spans. FROM_FIRST_SPAN means that the segment status would only be determined by the status of the first span. SW_SEGMENT_STATUS_ANALYSIS_STRATEGY FROM_SPAN_STATUS   - - noUpstreamRealAddressAgents Exit spans with the component in the list would not generate client-side instance relation metrics, since some tracing plugins (e.g. Nginx-LUA and Envoy) can\u0026rsquo;t collect the real peer IP address. SW_NO_UPSTREAM_REAL_ADDRESS 6000,9000   - - meterAnalyzerActiveFiles Indicates which files could be instrumented and analyzed. Multiple files are split by \u0026ldquo;,\u0026rdquo;. SW_METER_ANALYZER_ACTIVE_FILES    receiver-sharing-server default Sharing server provides new gRPC and restful servers for data collection. Ana designates that servers in the core module are to be used for internal communication only. - -    - - restHost Binding IP of RESTful services. Services include GraphQL query and HTTP data report. SW_RECEIVER_SHARING_REST_HOST -   - - restPort Binding port of RESTful services. SW_RECEIVER_SHARING_REST_PORT -   - - restContextPath Web context path of RESTful services. SW_RECEIVER_SHARING_REST_CONTEXT_PATH -   - - restMaxThreads Maximum thread number of RESTful services. SW_RECEIVER_SHARING_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_RECEIVER_SHARING_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize ServerSocketChannel backlog of RESTful services. SW_RECEIVER_SHARING_REST_QUEUE_SIZE 0   - - httpMaxRequestHeaderSize Maximum request header size accepted. SW_RECEIVER_SHARING_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - gRPCHost Binding IP of gRPC services. Services include gRPC data report and internal communication among OAP nodes. SW_RECEIVER_GRPC_HOST 0.0.0.0. Not Activated   - - gRPCPort Binding port of gRPC services. SW_RECEIVER_GRPC_PORT Not Activated   - - gRPCThreadPoolSize Pool size of gRPC server. SW_RECEIVER_GRPC_THREAD_POOL_SIZE CPU core * 4   - - gRPCThreadPoolQueueSize Queue size of gRPC server. SW_RECEIVER_GRPC_POOL_QUEUE_SIZE 10000   - - gRPCSslEnabled Activates SSL for gRPC services. SW_RECEIVER_GRPC_SSL_ENABLED false   - - gRPCSslKeyPath File path of gRPC SSL key. SW_RECEIVER_GRPC_SSL_KEY_PATH -   - - gRPCSslCertChainPath File path of gRPC SSL cert chain. SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH -   - - maxConcurrentCallsPerConnection The maximum number of concurrent calls permitted for each incoming connection. Defaults to no limit. SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL -   - - authentication The token text for authentication. Works for gRPC connection only. Once this is set, the client is required to use the same token. SW_AUTHENTICATION -   log-analyzer default Log Analyzer. SW_LOG_ANALYZER default    - - lalFiles The LAL configuration file names (without file extension) to be activated. Read LAL for more details. SW_LOG_LAL_FILES default   - - malFiles The MAL configuration file names (without file extension) to be activated. Read LAL for more details. SW_LOG_MAL_FILES \u0026quot;\u0026quot;   event-analyzer default Event Analyzer. SW_EVENT_ANALYZER default    receiver-register default gRPC and HTTPRestful services that provide service, service instance and endpoint register. - -    receiver-trace default gRPC and HTTPRestful services that accept SkyWalking format traces. - -    receiver-jvm default gRPC services that accept JVM metrics data. - -    receiver-clr default gRPC services that accept .Net CLR metrics data. - -    receiver-profile default gRPC services that accept profile task status and snapshot reporter. - -    receiver-zabbix default TCP receiver accepts Zabbix format metrics. - -    - - port Exported TCP port. Zabbix agent could connect and transport data. SW_RECEIVER_ZABBIX_PORT 10051   - - host Binds to host. SW_RECEIVER_ZABBIX_HOST 0.0.0.0   - - activeFiles Enables config when agent request is received. SW_RECEIVER_ZABBIX_ACTIVE_FILES agent   service-mesh default gRPC services that accept data from inbound mesh probes. - -    envoy-metric default Envoy metrics_service and ALS(access log service) are supported by this receiver. The OAL script supports all GAUGE type metrics. - -    - - acceptMetricsService Starts Envoy Metrics Service analysis. SW_ENVOY_METRIC_SERVICE true   - - alsHTTPAnalysis Starts Envoy HTTP Access Log Service analysis. Value = k8s-mesh means starting the analysis. SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS -   - - alsTCPAnalysis Starts Envoy TCP Access Log Service analysis. Value = k8s-mesh means starting the analysis. SW_ENVOY_METRIC_ALS_TCP_ANALYSIS -   - - k8sServiceNameRule k8sServiceNameRule allows you to customize the service name in ALS via Kubernetes metadata. The available variables are pod and service. E.g. you can use ${service.metadata.name}-${pod.metadata.labels.version} to append the version number to the service name. Note that when using environment variables to pass this configuration, use single quotes('') to avoid being evaluated by the shell. -    receiver-otel default A receiver for analyzing metrics data from OpenTelemetry. - -    - - enabledHandlers Enabled handlers for otel. SW_OTEL_RECEIVER_ENABLED_HANDLERS -   - - enabledOcRules Enabled metric rules for OC handler. SW_OTEL_RECEIVER_ENABLED_OC_RULES -   receiver-zipkin default A receiver for Zipkin traces. - -    - - restHost Binding IP of RESTful services. SW_RECEIVER_ZIPKIN_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_RECEIVER_ZIPKIN_REST_PORT 9411   - - restContextPath Web context path of RESTful services. SW_RECEIVER_ZIPKIN_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_RECEIVER_ZIPKIN_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_RECEIVER_ZIPKIN_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_RECEIVER_ZIPKIN_REST_QUEUE_SIZE 0   - - sampleRate The sample rate precision is 1/10000, should be between 0 and 10000 SW_ZIPKIN_SAMPLE_RATE 10000   - - searchableTracesTags Defines a set of span tag keys which are searchable. Multiple values are separated by commas. SW_ZIPKIN_SEARCHABLE_TAG_KEYS http.method   prometheus-fetcher default Prometheus fetcher reads metrics from Prometheus endpoint, and transfer the metrics into SkyWalking native format for the MAL engine. - -    - - enabledRules Enabled rules. SW_PROMETHEUS_FETCHER_ENABLED_RULES self   - - maxConvertWorker The maximize meter convert worker. SW_PROMETHEUS_FETCHER_NUM_CONVERT_WORKER -1(by default, half the number of CPU core(s))   kafka-fetcher default Read SkyWalking\u0026rsquo;s native metrics/logs/traces through Kafka server. - -    - - bootstrapServers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_KAFKA_FETCHER_SERVERS localhost:9092   - - namespace Namespace aims to isolate multi OAP cluster when using the same Kafka cluster. If you set a namespace for Kafka fetcher, OAP will add a prefix to topic name. You should also set namespace in agent.config. The property is named plugin.kafka.namespace. SW_NAMESPACE -   - - groupId A unique string that identifies the consumer group to which this consumer belongs. - skywalking-consumer   - - createTopicIfNotExist If true, this creates Kafka topic (if it does not already exist). - true   - - partitions The number of partitions for the topic being created. SW_KAFKA_FETCHER_PARTITIONS 3   - - consumers The number of consumers to create. SW_KAFKA_FETCHER_CONSUMERS 1   - - enableNativeProtoLog Enables fetching and handling native proto log data. SW_KAFKA_FETCHER_ENABLE_NATIVE_PROTO_LOG true   - - enableNativeJsonLog Enables fetching and handling native json log data. SW_KAFKA_FETCHER_ENABLE_NATIVE_JSON_LOG true   - - replicationFactor The replication factor for each partition in the topic being created. SW_KAFKA_FETCHER_PARTITIONS_FACTOR 2   - - kafkaHandlerThreadPoolSize Pool size of Kafka message handler executor. SW_KAFKA_HANDLER_THREAD_POOL_SIZE CPU core * 2   - - kafkaHandlerThreadPoolQueueSize Queue size of Kafka message handler executor. SW_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE 10000   - - topicNameOfMeters Kafka topic name for meter system data. - skywalking-meters   - - topicNameOfMetrics Kafka topic name for JVM metrics data. - skywalking-metrics   - - topicNameOfProfiling Kafka topic name for profiling data. - skywalking-profilings   - - topicNameOfTracingSegments Kafka topic name for tracing data. - skywalking-segments   - - topicNameOfManagements Kafka topic name for service instance reporting and registration. - skywalking-managements   - - topicNameOfLogs Kafka topic name for native proto log data. - skywalking-logs   - - topicNameOfJsonLogs Kafka topic name for native json log data. - skywalking-logs-json   receiver-browser default gRPC services that accept browser performance data and error log. - - -   - - sampleRate Sampling rate for receiving trace. Precise to 1/10000. 10000 means sampling rate of 100% by default. SW_RECEIVER_BROWSER_SAMPLE_RATE 10000   query graphql - GraphQL query implementation. -    - - enableLogTestTool Enable the log testing API to test the LAL. NOTE: This API evaluates untrusted code on the OAP server. A malicious script can do significant damage (steal keys and secrets, remove files and directories, install malware, etc). As such, please enable this API only when you completely trust your users. SW_QUERY_GRAPHQL_ENABLE_LOG_TEST_TOOL false   - - maxQueryComplexity Maximum complexity allowed for the GraphQL query that can be used to abort a query if the total number of data fields queried exceeds the defined threshold. SW_QUERY_MAX_QUERY_COMPLEXITY 1000   - - enableUpdateUITemplate Allow user add,disable and update UI template. SW_ENABLE_UPDATE_UI_TEMPLATE false   - - enableOnDemandPodLog Ondemand Pod log: fetch the Pod logs on users' demand, the logs are fetched and displayed in real time, and are not persisted in any kind. This is helpful when users want to do some experiments and monitor the logs and see what\u0026rsquo;s happing inside the service. Note: if you print secrets in the logs, they are also visible to the UI, so for the sake of security, this feature is disabled by default, please set this configuration to enable the feature manually. SW_ENABLE_ON_DEMAND_POD_LOG false   query graphql - GraphQL query implementation. -    - - restHost Binding IP of RESTful services. SW_QUERY_ZIPKIN_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_QUERY_ZIPKIN_REST_PORT 9412   - - restContextPath Web context path of RESTful services. SW_QUERY_ZIPKIN_REST_CONTEXT_PATH zipkin   - - restMaxThreads Maximum thread number of RESTful services. SW_QUERY_ZIPKIN_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_QUERY_ZIPKIN_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_QUERY_ZIPKIN_REST_QUEUE_SIZE 0   - - lookback Default look back for serviceNames, remoteServiceNames and spanNames, 1 day in millis SW_QUERY_ZIPKIN_LOOKBACK 86400000   - - namesMaxAge The Cache-Control max-age (seconds) for serviceNames, remoteServiceNames and spanNames SW_QUERY_ZIPKIN_NAMES_MAX_AGE 300   - - uiQueryLimit Default traces query max size SW_QUERY_ZIPKIN_UI_QUERY_LIMIT 10   - - uiDefaultLookback Default look back for search traces, 15 minutes in millis SW_QUERY_ZIPKIN_UI_DEFAULT_LOOKBACK 900000   alarm default - Read alarm doc for more details. -    telemetry - - Read telemetry doc for more details. -    - none - No op implementation. -    - prometheus host Binding host for Prometheus server fetching data. SW_TELEMETRY_PROMETHEUS_HOST 0.0.0.0   - - port Binding port for Prometheus server fetching data. SW_TELEMETRY_PROMETHEUS_PORT 1234   configuration - - Read dynamic configuration doc for more details. -    - grpc host DCS server binding hostname. SW_DCS_SERVER_HOST -   - - port DCS server binding port. SW_DCS_SERVER_PORT 80   - - clusterName Cluster name when reading the latest configuration from DSC server. SW_DCS_CLUSTER_NAME SkyWalking   - - period The period of reading data from DSC server by the OAP (in seconds). SW_DCS_PERIOD 20   - apollo apolloMeta apollo.meta in Apollo. SW_CONFIG_APOLLO http://localhost:8080   - - apolloCluster apollo.cluster in Apollo. SW_CONFIG_APOLLO_CLUSTER default   - - apolloEnv env in Apollo. SW_CONFIG_APOLLO_ENV -   - - appId app.id in Apollo. SW_CONFIG_APOLLO_APP_ID skywalking   - - period The period of data sync (in seconds). SW_CONFIG_APOLLO_PERIOD 60   - zookeeper namespace The namespace (represented by root path) that isolates the configurations in the Zookeeper. SW_CONFIG_ZK_NAMESPACE /, root path   - - hostPort Hosts and ports of Zookeeper Cluster. SW_CONFIG_ZK_HOST_PORT localhost:2181   - - baseSleepTimeMs The period of Zookeeper client between two retries (in milliseconds). SW_CONFIG_ZK_BASE_SLEEP_TIME_MS 1000   - - maxRetries The maximum retry time. SW_CONFIG_ZK_MAX_RETRIES 3   - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - etcd endpoints Hosts and ports for etcd cluster (separated by commas if multiple). SW_CONFIG_ETCD_ENDPOINTS http://localhost:2379   - - namespace Namespace for SkyWalking cluster. SW_CONFIG_ETCD_NAMESPACE /skywalking   - - authentication Indicates whether there is authentication. SW_CONFIG_ETCD_AUTHENTICATION false   - - user Etcd auth username. SW_CONFIG_ETCD_USER    - - password Etcd auth password. SW_CONFIG_ETCD_PASSWORD    - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - consul hostPort Hosts and ports for Consul cluster. SW_CONFIG_CONSUL_HOST_AND_PORTS localhost:8500   - - aclToken ACL Token of Consul. Empty string means without ACL token. SW_CONFIG_CONSUL_ACL_TOKEN -   - - period The period of data sync (in seconds). SW_CONFIG_CONSUL_PERIOD 60   - k8s-configmap namespace Deployment namespace of the config map. SW_CLUSTER_K8S_NAMESPACE default   - - labelSelector Labels for locating configmap. SW_CLUSTER_K8S_LABEL app=collector,release=skywalking   - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - nacos serverAddr Nacos Server Host. SW_CONFIG_NACOS_SERVER_ADDR 127.0.0.1   - - port Nacos Server Port. SW_CONFIG_NACOS_SERVER_PORT 8848   - - group Nacos Configuration namespace. SW_CONFIG_NACOS_SERVER_NAMESPACE -   - - period The period of data sync (in seconds). SW_CONFIG_CONFIG_NACOS_PERIOD 60   - - username Nacos Auth username. SW_CONFIG_NACOS_USERNAME -   - - password Nacos Auth password. SW_CONFIG_NACOS_PASSWORD -   - - accessKey Nacos Auth accessKey. SW_CONFIG_NACOS_ACCESSKEY -   - - secretKey Nacos Auth secretKey. SW_CONFIG_NACOS_SECRETKEY -   exporter grpc targetHost The host of target gRPC server for receiving export data. SW_EXPORTER_GRPC_HOST 127.0.0.1   - - targetPort The port of target gRPC server for receiving export data. SW_EXPORTER_GRPC_PORT 9870   health-checker default checkIntervalSeconds The period of checking OAP internal health status (in seconds). SW_HEALTH_CHECKER_INTERVAL_SECONDS 5   configuration-discovery default disableMessageDigest If true, agent receives the latest configuration every time, even without making any changes. By default, OAP uses the SHA512 message digest mechanism to detect changes in configuration. SW_DISABLE_MESSAGE_DIGEST false   receiver-event default gRPC services that handle events data. - -     Note ¹ System Environment Variable name could be declared and changed in application.yml. The names listed here are simply provided in the default application.yml file.\n","title":"Configuration Vocabulary","url":"/docs/main/v9.1.0/en/setup/backend/configuration-vocabulary/"},{"content":"Configuration Vocabulary The Configuration Vocabulary lists all available configurations provided by application.yml.\n   Module Provider Settings Value(s) and Explanation System Environment Variable¹ Default     core default role Option values: Mixed/Receiver/Aggregator. Receiver mode OAP opens the service to the agents, then analyzes and aggregates the results, and forwards the results for distributed aggregation. Aggregator mode OAP receives data from Mixer and Receiver role OAP nodes, and performs 2nd level aggregation. Mixer means both Receiver and Aggregator. SW_CORE_ROLE Mixed   - - restHost Binding IP of RESTful services. Services include GraphQL query and HTTP data report. SW_CORE_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_CORE_REST_PORT 12800   - - restContextPath Web context path of RESTful services. SW_CORE_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_CORE_REST_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_CORE_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize ServerSocketChannel Backlog of RESTful services. SW_CORE_REST_QUEUE_SIZE 0   - - httpMaxRequestHeaderSize Maximum request header size accepted. SW_CORE_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - gRPCHost Binding IP of gRPC services, including gRPC data report and internal communication among OAP nodes. SW_CORE_GRPC_HOST 0.0.0.0   - - gRPCPort Binding port of gRPC services. SW_CORE_GRPC_PORT 11800   - - gRPCSslEnabled Activates SSL for gRPC services. SW_CORE_GRPC_SSL_ENABLED false   - - gRPCSslKeyPath File path of gRPC SSL key. SW_CORE_GRPC_SSL_KEY_PATH -   - - gRPCSslCertChainPath File path of gRPC SSL cert chain. SW_CORE_GRPC_SSL_CERT_CHAIN_PATH -   - - gRPCSslTrustedCAPath File path of gRPC trusted CA. SW_CORE_GRPC_SSL_TRUSTED_CA_PATH -   - - downsampling Activated level of down sampling aggregation.  Hour,Day   - - persistentPeriod Execution period of the persistent timer (in seconds).  25   - - enableDataKeeperExecutor Controller of TTL scheduler. Once disabled, TTL wouldn\u0026rsquo;t work. SW_CORE_ENABLE_DATA_KEEPER_EXECUTOR true   - - dataKeeperExecutePeriod Execution period of TTL scheduler (in minutes). Execution doesn\u0026rsquo;t mean deleting data. The storage provider (e.g. ElasticSearch storage) could override this. SW_CORE_DATA_KEEPER_EXECUTE_PERIOD 5   - - recordDataTTL The lifecycle of record data (in days). Record data includes traces, top N sample records, and logs. Minimum value is 2. SW_CORE_RECORD_DATA_TTL 3   - - metricsDataTTL The lifecycle of metrics data (in days), including metadata. We recommend setting metricsDataTTL \u0026gt;= recordDataTTL. Minimum value is 2. SW_CORE_METRICS_DATA_TTL 7   - - l1FlushPeriod The period of L1 aggregation flush to L2 aggregation (in milliseconds). SW_CORE_L1_AGGREGATION_FLUSH_PERIOD 500   - - storageSessionTimeout The threshold of session time (in milliseconds). Default value is 70000. SW_CORE_STORAGE_SESSION_TIMEOUT 70000   - - persistentPeriod The period of doing data persistence. Unit is second.Default value is 25s SW_CORE_PERSISTENT_PERIOD 25   - - enableDatabaseSession Cache metrics data for 1 minute to reduce database queries, and if the OAP cluster changes within that minute. SW_CORE_ENABLE_DATABASE_SESSION true   - - topNReportPeriod The execution period (in minutes) of top N sampler, which saves sampled data into the storage. SW_CORE_TOPN_REPORT_PERIOD 10   - - activeExtraModelColumns Appends entity names (e.g. service names) into metrics storage entities. SW_CORE_ACTIVE_EXTRA_MODEL_COLUMNS false   - - serviceNameMaxLength Maximum length limit of service names. SW_SERVICE_NAME_MAX_LENGTH 70   - - instanceNameMaxLength Maximum length limit of service instance names. The maximum length of service + instance names should be less than 200. SW_INSTANCE_NAME_MAX_LENGTH 70   - - endpointNameMaxLength Maximum length limit of endpoint names. The maximum length of service + endpoint names should be less than 240. SW_ENDPOINT_NAME_MAX_LENGTH 150   - - searchableTracesTags Defines a set of span tag keys which are searchable through GraphQL. Multiple values are separated by commas. SW_SEARCHABLE_TAG_KEYS http.method,http.status_code,rpc.status_code,db.type,db.instance,mq.queue,mq.topic,mq.broker   - - searchableLogsTags Defines a set of log tag keys which are searchable through GraphQL. Multiple values are separated by commas. SW_SEARCHABLE_LOGS_TAG_KEYS level   - - searchableAlarmTags Defines a set of alarm tag keys which are searchable through GraphQL. Multiple values are separated by commas. SW_SEARCHABLE_ALARM_TAG_KEYS level   - - autocompleteTagKeysQueryMaxSize The max size of tags keys for autocomplete select. SW_AUTOCOMPLETE_TAG_KEYS_QUERY_MAX_SIZE 100   - - autocompleteTagValuesQueryMaxSize The max size of tags values for autocomplete select. SW_AUTOCOMPLETE_TAG_VALUES_QUERY_MAX_SIZE 100   - - gRPCThreadPoolSize Pool size of gRPC server. SW_CORE_GRPC_THREAD_POOL_SIZE CPU core * 4   - - gRPCThreadPoolQueueSize Queue size of gRPC server. SW_CORE_GRPC_POOL_QUEUE_SIZE 10000   - - maxConcurrentCallsPerConnection The maximum number of concurrent calls permitted for each incoming connection. Defaults to no limit. SW_CORE_GRPC_MAX_CONCURRENT_CALL -   - - maxMessageSize Sets the maximum message size allowed to be received on the server. Empty means 4 MiB. SW_CORE_GRPC_MAX_MESSAGE_SIZE 4M(based on Netty)   - - remoteTimeout Timeout for cluster internal communication (in seconds). - 20   - - maxSizeOfNetworkAddressAlias The maximum size of network address detected in the system being monitored. - 1_000_000   - - maxPageSizeOfQueryProfileSnapshot The maximum size for snapshot analysis in an OAP query. - 500   - - maxSizeOfAnalyzeProfileSnapshot The maximum number of snapshots analyzed by the OAP. - 12000   - - prepareThreads The number of threads used to prepare metrics data to the storage. SW_CORE_PREPARE_THREADS 2   - - enableEndpointNameGroupingByOpenapi Automatically groups endpoints by the given OpenAPI definitions. SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPAENAPI true   - - maxDurationOfQueryEBPFProfilingData The maximum duration(in second) of query the eBPF profiling data from database. - 30   - - maxThreadCountOfQueryEBPFProfilingData The maximum thread count of query the eBPF profiling data from database. - System CPU core size   cluster standalone - Standalone is not suitable for running on a single node running. No configuration available. - -   - zookeeper namespace The namespace, represented by root path, isolates the configurations in Zookeeper. SW_NAMESPACE /, root path   - - hostPort Hosts and ports of Zookeeper Cluster. SW_CLUSTER_ZK_HOST_PORT localhost:2181   - - baseSleepTimeMs The period of Zookeeper client between two retries (in milliseconds). SW_CLUSTER_ZK_SLEEP_TIME 1000   - - maxRetries The maximum retry time. SW_CLUSTER_ZK_MAX_RETRIES 3   - - enableACL Opens ACL using schema and expression. SW_ZK_ENABLE_ACL false   - - schema Schema for the authorization. SW_ZK_SCHEMA digest   - - expression Expression for the authorization. SW_ZK_EXPRESSION skywalking:skywalking   - - internalComHost The hostname registered in Zookeeper for the internal communication of OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Zookeeper for the internal communication of OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - kubernetes namespace Namespace deployed by SkyWalking in k8s. SW_CLUSTER_K8S_NAMESPACE default   - - labelSelector Labels used for filtering OAP deployment in k8s. SW_CLUSTER_K8S_LABEL app=collector,release=skywalking   - - uidEnvName Environment variable name for reading uid. SW_CLUSTER_K8S_UID SKYWALKING_COLLECTOR_UID   - consul serviceName Service name for SkyWalking cluster. SW_SERVICE_NAME SkyWalking_OAP_Cluster   - - hostPort Hosts and ports for Consul cluster. SW_CLUSTER_CONSUL_HOST_PORT localhost:8500   - - aclToken ACL Token of Consul. Empty string means without ALC token. SW_CLUSTER_CONSUL_ACLTOKEN -   - - internalComHost The hostname registered in Consul for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Consul for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - etcd serviceName Service name for SkyWalking cluster. SW_CLUSTER_ETCD_SERVICE_NAME SkyWalking_OAP_Cluster   - - endpoints Hosts and ports for etcd cluster. SW_CLUSTER_ETCD_ENDPOINTS localhost:2379   - - namespace Namespace for SkyWalking cluster. SW_CLUSTER_ETCD_NAMESPACE /skywalking   - - authentication Indicates whether there is authentication. SW_CLUSTER_ETCD_AUTHENTICATION false   - - user Etcd auth username. SW_CLUSTER_ETCD_USER    - - password Etcd auth password. SW_CLUSTER_ETCD_PASSWORD    - - internalComHost The hostname registered in etcd for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in etcd for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - Nacos serviceName Service name for SkyWalking cluster. SW_SERVICE_NAME SkyWalking_OAP_Cluster   - - hostPort Hosts and ports for Nacos cluster. SW_CLUSTER_NACOS_HOST_PORT localhost:8848   - - namespace Namespace used by SkyWalking node coordination. SW_CLUSTER_NACOS_NAMESPACE public   - - internalComHost The hostname registered in Nacos for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Nacos for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - - username Nacos Auth username. SW_CLUSTER_NACOS_USERNAME -   - - password Nacos Auth password. SW_CLUSTER_NACOS_PASSWORD -   - - accessKey Nacos Auth accessKey. SW_CLUSTER_NACOS_ACCESSKEY -   - - secretKey Nacos Auth secretKey. SW_CLUSTER_NACOS_SECRETKEY -   storage elasticsearch - ElasticSearch (and OpenSearch) storage implementation. - -   - - namespace Prefix of indexes created and used by SkyWalking. SW_NAMESPACE -   - - clusterNodes ElasticSearch cluster nodes for client connection. SW_STORAGE_ES_CLUSTER_NODES localhost   - - protocol HTTP or HTTPs. SW_STORAGE_ES_HTTP_PROTOCOL HTTP   - - connectTimeout Connect timeout of ElasticSearch client (in milliseconds). SW_STORAGE_ES_CONNECT_TIMEOUT 3000   - - socketTimeout Socket timeout of ElasticSearch client (in milliseconds). SW_STORAGE_ES_SOCKET_TIMEOUT 30000   - - responseTimeout Response timeout of ElasticSearch client (in milliseconds), 0 disables the timeout. SW_STORAGE_ES_RESPONSE_TIMEOUT 1500   - - numHttpClientThread The number of threads for the underlying HTTP client to perform socket I/O. If the value is \u0026lt;= 0, the number of available processors will be used. SW_STORAGE_ES_NUM_HTTP_CLIENT_THREAD 0   - - user Username of ElasticSearch cluster. SW_ES_USER -   - - password Password of ElasticSearch cluster. SW_ES_PASSWORD -   - - trustStorePath Trust JKS file path. Only works when username and password are enabled. SW_STORAGE_ES_SSL_JKS_PATH -   - - trustStorePass Trust JKS file password. Only works when username and password are enabled. SW_STORAGE_ES_SSL_JKS_PASS -   - - secretsManagementFile Secrets management file in the properties format, including username and password, which are managed by a 3rd party tool. Capable of being updated them at runtime. SW_ES_SECRETS_MANAGEMENT_FILE -   - - dayStep Represents the number of days in the one-minute/hour/day index. SW_STORAGE_DAY_STEP 1   - - indexShardsNumber Shard number of new indexes. SW_STORAGE_ES_INDEX_SHARDS_NUMBER 1   - - indexReplicasNumber Replicas number of new indexes. SW_STORAGE_ES_INDEX_REPLICAS_NUMBER 0   - - superDatasetDayStep Represents the number of days in the super size dataset record index. Default value is the same as dayStep when the value is less than 0. SW_SUPERDATASET_STORAGE_DAY_STEP -1   - - superDatasetIndexShardsFactor Super dataset is defined in the code (e.g. trace segments). This factor provides more shards for the super dataset: shards number = indexShardsNumber * superDatasetIndexShardsFactor. This factor also affects Zipkin and Jaeger traces. SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR 5   - - superDatasetIndexReplicasNumber Represents the replicas number in the super size dataset record index. SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER 0   - - indexTemplateOrder The order of index template. SW_STORAGE_ES_INDEX_TEMPLATE_ORDER 0   - - bulkActions Async bulk size of the record data batch execution. SW_STORAGE_ES_BULK_ACTIONS 5000   - - flushInterval Period of flush (in seconds). Does not matter whether bulkActions is reached or not. INT(flushInterval * 2/3) is used for index refresh period. SW_STORAGE_ES_FLUSH_INTERVAL 15 (index refresh period = 10)   - - concurrentRequests The number of concurrent requests allowed to be executed. SW_STORAGE_ES_CONCURRENT_REQUESTS 2   - - resultWindowMaxSize The maximum size of dataset when the OAP loads cache, such as network aliases. SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE 10000   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_ES_QUERY_MAX_SIZE 10000   - - scrollingBatchSize The batch size of metadata per iteration when metadataQueryMaxSize or resultWindowMaxSize is too large to be retrieved in a single query. SW_STORAGE_ES_SCROLLING_BATCH_SIZE 5000   - - segmentQueryMaxSize The maximum size of trace segments per query. SW_STORAGE_ES_QUERY_SEGMENT_SIZE 200   - - profileTaskQueryMaxSize The maximum size of profile task per query. SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE 200   - - profileDataQueryScrollBatchSize The batch size of query profiling data. SW_STORAGE_ES_QUERY_PROFILE_DATA_BATCH_SIZE 100   - - advanced All settings of ElasticSearch index creation. The value should be in JSON format. SW_STORAGE_ES_ADVANCED -   - - logicSharding Shard metrics and records indices into multi-physical indices, one index template per metric/meter aggregation function or record. SW_STORAGE_ES_LOGIC_SHARDING false   - h2 - H2 storage is designed for demonstration and running in short term (i.e. 1-2 hours) only. - -   - - driver H2 JDBC driver. SW_STORAGE_H2_DRIVER org.h2.jdbcx.JdbcDataSource   - - url H2 connection URL. Defaults to H2 memory mode. SW_STORAGE_H2_URL jdbc:h2:mem:skywalking-oap-db   - - user Username of H2 database. SW_STORAGE_H2_USER sa   - - password Password of H2 database. - -   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_H2_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 100   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 1   - mysql - MySQL Storage. The MySQL JDBC Driver is not in the dist. Please copy it into the oap-lib folder manually. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfArrayColumn Some entities (e.g. trace segments) include the logic column with multiple values. In MySQL, we use multiple physical columns to host the values, e.g. change column_a with values [1,2,3,4,5] to column_a_0 = 1, column_a_1 = 2, column_a_2 = 3 , column_a_3 = 4, column_a_4 = 5. SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN 20   - - numOfSearchableValuesPerTag In a trace segment, this includes multiple spans with multiple tags. Different spans may have same tag key, e.g. multiple HTTP exit spans all have their own http.method tags. This configuration sets the limit on the maximum number of values for the same tag key. SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG 2   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - postgresql - PostgreSQL storage. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfArrayColumn Some entities (e.g. trace segments) include the logic column with multiple values. In PostgreSQL, we use multiple physical columns to host the values, e.g. change column_a with values [1,2,3,4,5] to column_a_0 = 1, column_a_1 = 2, column_a_2 = 3 , column_a_3 = 4, column_a_4 = 5 SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN 20   - - numOfSearchableValuesPerTag In a trace segment, this includes multiple spans with multiple tags. Different spans may have same tag key, e.g. multiple HTTP exit spans all have their own http.method tags. This configuration sets the limit on the maximum number of values for the same tag key. SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG 2   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - banyandb - BanyanDB storage. - -   - - host Host of the BanyanDB. SW_STORAGE_BANYANDB_HOST 127.0.0.1   - - port Port of the BanyanDB. SW_STORAGE_BANYANDB_PORT 17912   - - maxBulkSize The maximum size of write entities in a single batch write call. SW_STORAGE_BANYANDB_MAX_BULK_SIZE 5000   - - flushInterval Period of flush interval. In the timeunit of seconds. SW_STORAGE_BANYANDB_FLUSH_INTERVAL 15   - - metricsShardsNumber Shards Number for measure/metrics. SW_STORAGE_BANYANDB_METRICS_SHARDS_NUMBER 1   - - recordShardsNumber Shards Number for a normal record. SW_STORAGE_BANYANDB_RECORD_SHARDS_NUMBER 1   - - superDatasetShardsFactor Shards Factor for a super dataset record, i.e. Shard number of a super dataset is recordShardsNumber*superDatasetShardsFactor. SW_STORAGE_BANYANDB_SUPERDATASET_SHARDS_FACTOR 2   - - concurrentWriteThreads Concurrent consumer threads for batch writing. SW_STORAGE_BANYANDB_CONCURRENT_WRITE_THREADS 15   - - profileTaskQueryMaxSize Max size of ProfileTask to be fetched. SW_STORAGE_BANYANDB_PROFILE_TASK_QUERY_MAX_SIZE 200   agent-analyzer default Agent Analyzer. SW_AGENT_ANALYZER default    - - traceSamplingPolicySettingsFile The sampling policy including sampling rate and the threshold of trace segment latency can be configured by the traceSamplingPolicySettingsFile file. SW_TRACE_SAMPLING_POLICY_SETTINGS_FILE trace-sampling-policy-settings.yml   - - slowDBAccessThreshold The slow database access threshold (in milliseconds). SW_SLOW_DB_THRESHOLD default:200,mongodb:100   - - forceSampleErrorSegment When sampling mechanism is activated, this config samples the error status segment and ignores the sampling rate. SW_FORCE_SAMPLE_ERROR_SEGMENT true   - - segmentStatusAnalysisStrategy Determines the final segment status from span status. Available values are FROM_SPAN_STATUS , FROM_ENTRY_SPAN, and FROM_FIRST_SPAN. FROM_SPAN_STATUS indicates that the segment status would be error if any span has an error status. FROM_ENTRY_SPAN means that the segment status would only be determined by the status of entry spans. FROM_FIRST_SPAN means that the segment status would only be determined by the status of the first span. SW_SEGMENT_STATUS_ANALYSIS_STRATEGY FROM_SPAN_STATUS   - - noUpstreamRealAddressAgents Exit spans with the component in the list would not generate client-side instance relation metrics, since some tracing plugins (e.g. Nginx-LUA and Envoy) can\u0026rsquo;t collect the real peer IP address. SW_NO_UPSTREAM_REAL_ADDRESS 6000,9000   - - meterAnalyzerActiveFiles Indicates which files could be instrumented and analyzed. Multiple files are split by \u0026ldquo;,\u0026rdquo;. SW_METER_ANALYZER_ACTIVE_FILES    receiver-sharing-server default Sharing server provides new gRPC and restful servers for data collection. Ana designates that servers in the core module are to be used for internal communication only. - -    - - restHost Binding IP of RESTful services. Services include GraphQL query and HTTP data report. SW_RECEIVER_SHARING_REST_HOST -   - - restPort Binding port of RESTful services. SW_RECEIVER_SHARING_REST_PORT -   - - restContextPath Web context path of RESTful services. SW_RECEIVER_SHARING_REST_CONTEXT_PATH -   - - restMaxThreads Maximum thread number of RESTful services. SW_RECEIVER_SHARING_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_RECEIVER_SHARING_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize ServerSocketChannel backlog of RESTful services. SW_RECEIVER_SHARING_REST_QUEUE_SIZE 0   - - httpMaxRequestHeaderSize Maximum request header size accepted. SW_RECEIVER_SHARING_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - gRPCHost Binding IP of gRPC services. Services include gRPC data report and internal communication among OAP nodes. SW_RECEIVER_GRPC_HOST 0.0.0.0. Not Activated   - - gRPCPort Binding port of gRPC services. SW_RECEIVER_GRPC_PORT Not Activated   - - gRPCThreadPoolSize Pool size of gRPC server. SW_RECEIVER_GRPC_THREAD_POOL_SIZE CPU core * 4   - - gRPCThreadPoolQueueSize Queue size of gRPC server. SW_RECEIVER_GRPC_POOL_QUEUE_SIZE 10000   - - gRPCSslEnabled Activates SSL for gRPC services. SW_RECEIVER_GRPC_SSL_ENABLED false   - - gRPCSslKeyPath File path of gRPC SSL key. SW_RECEIVER_GRPC_SSL_KEY_PATH -   - - gRPCSslCertChainPath File path of gRPC SSL cert chain. SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH -   - - maxConcurrentCallsPerConnection The maximum number of concurrent calls permitted for each incoming connection. Defaults to no limit. SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL -   - - authentication The token text for authentication. Works for gRPC connection only. Once this is set, the client is required to use the same token. SW_AUTHENTICATION -   log-analyzer default Log Analyzer. SW_LOG_ANALYZER default    - - lalFiles The LAL configuration file names (without file extension) to be activated. Read LAL for more details. SW_LOG_LAL_FILES default   - - malFiles The MAL configuration file names (without file extension) to be activated. Read LAL for more details. SW_LOG_MAL_FILES \u0026quot;\u0026quot;   event-analyzer default Event Analyzer. SW_EVENT_ANALYZER default    receiver-register default gRPC and HTTPRestful services that provide service, service instance and endpoint register. - -    receiver-trace default gRPC and HTTPRestful services that accept SkyWalking format traces. - -    receiver-jvm default gRPC services that accept JVM metrics data. - -    receiver-clr default gRPC services that accept .Net CLR metrics data. - -    receiver-profile default gRPC services that accept profile task status and snapshot reporter. - -    receiver-zabbix default TCP receiver accepts Zabbix format metrics. - -    - - port Exported TCP port. Zabbix agent could connect and transport data. SW_RECEIVER_ZABBIX_PORT 10051   - - host Binds to host. SW_RECEIVER_ZABBIX_HOST 0.0.0.0   - - activeFiles Enables config when agent request is received. SW_RECEIVER_ZABBIX_ACTIVE_FILES agent   service-mesh default gRPC services that accept data from inbound mesh probes. - -    envoy-metric default Envoy metrics_service and ALS(access log service) are supported by this receiver. The OAL script supports all GAUGE type metrics. - -    - - acceptMetricsService Starts Envoy Metrics Service analysis. SW_ENVOY_METRIC_SERVICE true   - - alsHTTPAnalysis Starts Envoy HTTP Access Log Service analysis. Value = k8s-mesh means starting the analysis. SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS -   - - alsTCPAnalysis Starts Envoy TCP Access Log Service analysis. Value = k8s-mesh means starting the analysis. SW_ENVOY_METRIC_ALS_TCP_ANALYSIS -   - - k8sServiceNameRule k8sServiceNameRule allows you to customize the service name in ALS via Kubernetes metadata. The available variables are pod and service. E.g. you can use ${service.metadata.name}-${pod.metadata.labels.version} to append the version number to the service name. Note that when using environment variables to pass this configuration, use single quotes('') to avoid being evaluated by the shell. -    receiver-otel default A receiver for analyzing metrics data from OpenTelemetry. - -    - - enabledHandlers Enabled handlers for otel. SW_OTEL_RECEIVER_ENABLED_HANDLERS -   - - enabledOtelRules Enabled metric rules for OC handler. SW_OTEL_RECEIVER_ENABLED_OTEL_RULES -   receiver-zipkin default A receiver for Zipkin traces. - -    - - sampleRate The sample rate precision is 1/10000, should be between 0 and 10000 SW_ZIPKIN_SAMPLE_RATE 10000   - - searchableTracesTags Defines a set of span tag keys which are searchable. Multiple values are separated by commas. SW_ZIPKIN_SEARCHABLE_TAG_KEYS http.method   - - enableHttpCollector Enable Http Collector. SW_ZIPKIN_HTTP_COLLECTOR_ENABLED true   - - restHost Binding IP of RESTful services. SW_RECEIVER_ZIPKIN_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_RECEIVER_ZIPKIN_REST_PORT 9411   - - restContextPath Web context path of RESTful services. SW_RECEIVER_ZIPKIN_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_RECEIVER_ZIPKIN_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_RECEIVER_ZIPKIN_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_RECEIVER_ZIPKIN_REST_QUEUE_SIZE 0   - - enableKafkaCollector Enable Kafka Collector. SW_ZIPKIN_KAFKA_COLLECTOR_ENABLED false   - - kafkaBootstrapServers Kafka ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG. SW_ZIPKIN_KAFKA_SERVERS localhost:9092   - - kafkaGroupId Kafka ConsumerConfig.GROUP_ID_CONFIG. SW_ZIPKIN_KAFKA_GROUP_ID zipkin   - - kafkaTopic Kafka Topics. SW_ZIPKIN_KAFKA_TOPIC zipkin   - - kafkaConsumerConfig Kafka consumer config, JSON format as Properties. If it contains the same key with above, would override. SW_ZIPKIN_KAFKA_CONSUMER_CONFIG \u0026ldquo;{\u0026quot;auto.offset.reset\u0026quot;:\u0026quot;earliest\u0026quot;,\u0026quot;enable.auto.commit\u0026quot;:true}\u0026rdquo;   - - kafkaConsumers The number of consumers to create. SW_ZIPKIN_KAFKA_CONSUMERS 1   - - kafkaHandlerThreadPoolSize Pool size of Kafka message handler executor. SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_SIZE CPU core * 2   - - kafkaHandlerThreadPoolQueueSize Queue size of Kafka message handler executor. SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE 10000   prometheus-fetcher default Prometheus fetcher reads metrics from Prometheus endpoint, and transfer the metrics into SkyWalking native format for the MAL engine. - -    - - enabledRules Enabled rules. SW_PROMETHEUS_FETCHER_ENABLED_RULES self   - - maxConvertWorker The maximize meter convert worker. SW_PROMETHEUS_FETCHER_NUM_CONVERT_WORKER -1(by default, half the number of CPU core(s))   kafka-fetcher default Read SkyWalking\u0026rsquo;s native metrics/logs/traces through Kafka server. - -    - - bootstrapServers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_KAFKA_FETCHER_SERVERS localhost:9092   - - namespace Namespace aims to isolate multi OAP cluster when using the same Kafka cluster. If you set a namespace for Kafka fetcher, OAP will add a prefix to topic name. You should also set namespace in agent.config. The property is named plugin.kafka.namespace. SW_NAMESPACE -   - - groupId A unique string that identifies the consumer group to which this consumer belongs. - skywalking-consumer   - - createTopicIfNotExist If true, this creates Kafka topic (if it does not already exist). - true   - - partitions The number of partitions for the topic being created. SW_KAFKA_FETCHER_PARTITIONS 3   - - consumers The number of consumers to create. SW_KAFKA_FETCHER_CONSUMERS 1   - - enableNativeProtoLog Enables fetching and handling native proto log data. SW_KAFKA_FETCHER_ENABLE_NATIVE_PROTO_LOG true   - - enableNativeJsonLog Enables fetching and handling native json log data. SW_KAFKA_FETCHER_ENABLE_NATIVE_JSON_LOG true   - - replicationFactor The replication factor for each partition in the topic being created. SW_KAFKA_FETCHER_PARTITIONS_FACTOR 2   - - kafkaHandlerThreadPoolSize Pool size of Kafka message handler executor. SW_KAFKA_HANDLER_THREAD_POOL_SIZE CPU core * 2   - - kafkaHandlerThreadPoolQueueSize Queue size of Kafka message handler executor. SW_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE 10000   - - topicNameOfMeters Kafka topic name for meter system data. - skywalking-meters   - - topicNameOfMetrics Kafka topic name for JVM metrics data. - skywalking-metrics   - - topicNameOfProfiling Kafka topic name for profiling data. - skywalking-profilings   - - topicNameOfTracingSegments Kafka topic name for tracing data. - skywalking-segments   - - topicNameOfManagements Kafka topic name for service instance reporting and registration. - skywalking-managements   - - topicNameOfLogs Kafka topic name for native proto log data. - skywalking-logs   - - topicNameOfJsonLogs Kafka topic name for native json log data. - skywalking-logs-json   receiver-browser default gRPC services that accept browser performance data and error log. - - -   - - sampleRate Sampling rate for receiving trace. Precise to 1/10000. 10000 means sampling rate of 100% by default. SW_RECEIVER_BROWSER_SAMPLE_RATE 10000   query graphql - GraphQL query implementation. -    - - enableLogTestTool Enable the log testing API to test the LAL. NOTE: This API evaluates untrusted code on the OAP server. A malicious script can do significant damage (steal keys and secrets, remove files and directories, install malware, etc). As such, please enable this API only when you completely trust your users. SW_QUERY_GRAPHQL_ENABLE_LOG_TEST_TOOL false   - - maxQueryComplexity Maximum complexity allowed for the GraphQL query that can be used to abort a query if the total number of data fields queried exceeds the defined threshold. SW_QUERY_MAX_QUERY_COMPLEXITY 1000   - - enableUpdateUITemplate Allow user add,disable and update UI template. SW_ENABLE_UPDATE_UI_TEMPLATE false   - - enableOnDemandPodLog Ondemand Pod log: fetch the Pod logs on users' demand, the logs are fetched and displayed in real time, and are not persisted in any kind. This is helpful when users want to do some experiments and monitor the logs and see what\u0026rsquo;s happing inside the service. Note: if you print secrets in the logs, they are also visible to the UI, so for the sake of security, this feature is disabled by default, please set this configuration to enable the feature manually. SW_ENABLE_ON_DEMAND_POD_LOG false   query graphql - GraphQL query implementation. -    - - restHost Binding IP of RESTful services. SW_QUERY_ZIPKIN_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_QUERY_ZIPKIN_REST_PORT 9412   - - restContextPath Web context path of RESTful services. SW_QUERY_ZIPKIN_REST_CONTEXT_PATH zipkin   - - restMaxThreads Maximum thread number of RESTful services. SW_QUERY_ZIPKIN_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_QUERY_ZIPKIN_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_QUERY_ZIPKIN_REST_QUEUE_SIZE 0   - - lookback Default look back for traces and autocompleteTags, 1 day in millis SW_QUERY_ZIPKIN_LOOKBACK 86400000   - - namesMaxAge The Cache-Control max-age (seconds) for serviceNames, remoteServiceNames and spanNames SW_QUERY_ZIPKIN_NAMES_MAX_AGE 300   - - uiQueryLimit Default traces query max size SW_QUERY_ZIPKIN_UI_QUERY_LIMIT 10   - - uiDefaultLookback Default look back on the UI for search traces, 15 minutes in millis SW_QUERY_ZIPKIN_UI_DEFAULT_LOOKBACK 900000   alarm default - Read alarm doc for more details. -    telemetry - - Read telemetry doc for more details. -    - none - No op implementation. -    - prometheus host Binding host for Prometheus server fetching data. SW_TELEMETRY_PROMETHEUS_HOST 0.0.0.0   - - port Binding port for Prometheus server fetching data. SW_TELEMETRY_PROMETHEUS_PORT 1234   configuration - - Read dynamic configuration doc for more details. -    - grpc host DCS server binding hostname. SW_DCS_SERVER_HOST -   - - port DCS server binding port. SW_DCS_SERVER_PORT 80   - - clusterName Cluster name when reading the latest configuration from DSC server. SW_DCS_CLUSTER_NAME SkyWalking   - - period The period of reading data from DSC server by the OAP (in seconds). SW_DCS_PERIOD 20   - apollo apolloMeta apollo.meta in Apollo. SW_CONFIG_APOLLO http://localhost:8080   - - apolloCluster apollo.cluster in Apollo. SW_CONFIG_APOLLO_CLUSTER default   - - apolloEnv env in Apollo. SW_CONFIG_APOLLO_ENV -   - - appId app.id in Apollo. SW_CONFIG_APOLLO_APP_ID skywalking   - - period The period of data sync (in seconds). SW_CONFIG_APOLLO_PERIOD 60   - zookeeper namespace The namespace (represented by root path) that isolates the configurations in the Zookeeper. SW_CONFIG_ZK_NAMESPACE /, root path   - - hostPort Hosts and ports of Zookeeper Cluster. SW_CONFIG_ZK_HOST_PORT localhost:2181   - - baseSleepTimeMs The period of Zookeeper client between two retries (in milliseconds). SW_CONFIG_ZK_BASE_SLEEP_TIME_MS 1000   - - maxRetries The maximum retry time. SW_CONFIG_ZK_MAX_RETRIES 3   - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - etcd endpoints Hosts and ports for etcd cluster (separated by commas if multiple). SW_CONFIG_ETCD_ENDPOINTS http://localhost:2379   - - namespace Namespace for SkyWalking cluster. SW_CONFIG_ETCD_NAMESPACE /skywalking   - - authentication Indicates whether there is authentication. SW_CONFIG_ETCD_AUTHENTICATION false   - - user Etcd auth username. SW_CONFIG_ETCD_USER    - - password Etcd auth password. SW_CONFIG_ETCD_PASSWORD    - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - consul hostPort Hosts and ports for Consul cluster. SW_CONFIG_CONSUL_HOST_AND_PORTS localhost:8500   - - aclToken ACL Token of Consul. Empty string means without ACL token. SW_CONFIG_CONSUL_ACL_TOKEN -   - - period The period of data sync (in seconds). SW_CONFIG_CONSUL_PERIOD 60   - k8s-configmap namespace Deployment namespace of the config map. SW_CLUSTER_K8S_NAMESPACE default   - - labelSelector Labels for locating configmap. SW_CLUSTER_K8S_LABEL app=collector,release=skywalking   - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - nacos serverAddr Nacos Server Host. SW_CONFIG_NACOS_SERVER_ADDR 127.0.0.1   - - port Nacos Server Port. SW_CONFIG_NACOS_SERVER_PORT 8848   - - group Nacos Configuration namespace. SW_CONFIG_NACOS_SERVER_NAMESPACE -   - - period The period of data sync (in seconds). SW_CONFIG_CONFIG_NACOS_PERIOD 60   - - username Nacos Auth username. SW_CONFIG_NACOS_USERNAME -   - - password Nacos Auth password. SW_CONFIG_NACOS_PASSWORD -   - - accessKey Nacos Auth accessKey. SW_CONFIG_NACOS_ACCESSKEY -   - - secretKey Nacos Auth secretKey. SW_CONFIG_NACOS_SECRETKEY -   exporter grpc targetHost The host of target gRPC server for receiving export data. SW_EXPORTER_GRPC_HOST 127.0.0.1   - - targetPort The port of target gRPC server for receiving export data. SW_EXPORTER_GRPC_PORT 9870   health-checker default checkIntervalSeconds The period of checking OAP internal health status (in seconds). SW_HEALTH_CHECKER_INTERVAL_SECONDS 5   configuration-discovery default disableMessageDigest If true, agent receives the latest configuration every time, even without making any changes. By default, OAP uses the SHA512 message digest mechanism to detect changes in configuration. SW_DISABLE_MESSAGE_DIGEST false   receiver-event default gRPC services that handle events data. - -     Note ¹ System Environment Variable name could be declared and changed in application.yml. The names listed here are simply provided in the default application.yml file.\n","title":"Configuration Vocabulary","url":"/docs/main/v9.2.0/en/setup/backend/configuration-vocabulary/"},{"content":"Configuration Vocabulary The Configuration Vocabulary lists all available configurations provided by application.yml.\n   Module Provider Settings Value(s) and Explanation System Environment Variable¹ Default     core default role Option values: Mixed/Receiver/Aggregator. Receiver mode OAP opens the service to the agents, then analyzes and aggregates the results, and forwards the results for distributed aggregation. Aggregator mode OAP receives data from Mixer and Receiver role OAP nodes, and performs 2nd level aggregation. Mixer means both Receiver and Aggregator. SW_CORE_ROLE Mixed   - - restHost Binding IP of RESTful services. Services include GraphQL query and HTTP data report. SW_CORE_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_CORE_REST_PORT 12800   - - restContextPath Web context path of RESTful services. SW_CORE_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_CORE_REST_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_CORE_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize ServerSocketChannel Backlog of RESTful services. SW_CORE_REST_QUEUE_SIZE 0   - - httpMaxRequestHeaderSize Maximum request header size accepted. SW_CORE_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - gRPCHost Binding IP of gRPC services, including gRPC data report and internal communication among OAP nodes. SW_CORE_GRPC_HOST 0.0.0.0   - - gRPCPort Binding port of gRPC services. SW_CORE_GRPC_PORT 11800   - - gRPCSslEnabled Activates SSL for gRPC services. SW_CORE_GRPC_SSL_ENABLED false   - - gRPCSslKeyPath File path of gRPC SSL key. SW_CORE_GRPC_SSL_KEY_PATH -   - - gRPCSslCertChainPath File path of gRPC SSL cert chain. SW_CORE_GRPC_SSL_CERT_CHAIN_PATH -   - - gRPCSslTrustedCAPath File path of gRPC trusted CA. SW_CORE_GRPC_SSL_TRUSTED_CA_PATH -   - - downsampling Activated level of down sampling aggregation.  Hour,Day   - - persistentPeriod Execution period of the persistent timer (in seconds).  25   - - enableDataKeeperExecutor Controller of TTL scheduler. Once disabled, TTL wouldn\u0026rsquo;t work. SW_CORE_ENABLE_DATA_KEEPER_EXECUTOR true   - - dataKeeperExecutePeriod Execution period of TTL scheduler (in minutes). Execution doesn\u0026rsquo;t mean deleting data. The storage provider (e.g. ElasticSearch storage) could override this. SW_CORE_DATA_KEEPER_EXECUTE_PERIOD 5   - - recordDataTTL The lifecycle of record data (in days). Record data includes traces, top N sample records, and logs. Minimum value is 2. SW_CORE_RECORD_DATA_TTL 3   - - metricsDataTTL The lifecycle of metrics data (in days), including metadata. We recommend setting metricsDataTTL \u0026gt;= recordDataTTL. Minimum value is 2. SW_CORE_METRICS_DATA_TTL 7   - - l1FlushPeriod The period of L1 aggregation flush to L2 aggregation (in milliseconds). SW_CORE_L1_AGGREGATION_FLUSH_PERIOD 500   - - storageSessionTimeout The threshold of session time (in milliseconds). Default value is 70000. SW_CORE_STORAGE_SESSION_TIMEOUT 70000   - - persistentPeriod The period of doing data persistence. Unit is second.Default value is 25s SW_CORE_PERSISTENT_PERIOD 25   - - topNReportPeriod The execution period (in minutes) of top N sampler, which saves sampled data into the storage. SW_CORE_TOPN_REPORT_PERIOD 10   - - activeExtraModelColumns Appends entity names (e.g. service names) into metrics storage entities. SW_CORE_ACTIVE_EXTRA_MODEL_COLUMNS false   - - serviceNameMaxLength Maximum length limit of service names. SW_SERVICE_NAME_MAX_LENGTH 70   - - instanceNameMaxLength Maximum length limit of service instance names. The maximum length of service + instance names should be less than 200. SW_INSTANCE_NAME_MAX_LENGTH 70   - - endpointNameMaxLength Maximum length limit of endpoint names. The maximum length of service + endpoint names should be less than 240. SW_ENDPOINT_NAME_MAX_LENGTH 150   - - searchableTracesTags Defines a set of span tag keys which are searchable through GraphQL. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_SEARCHABLE_TAG_KEYS http.method,http.status_code,rpc.status_code,db.type,db.instance,mq.queue,mq.topic,mq.broker   - - searchableLogsTags Defines a set of log tag keys which are searchable through GraphQL. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_SEARCHABLE_LOGS_TAG_KEYS level   - - searchableAlarmTags Defines a set of alarm tag keys which are searchable through GraphQL. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_SEARCHABLE_ALARM_TAG_KEYS level   - - autocompleteTagKeysQueryMaxSize The max size of tags keys for autocomplete select. SW_AUTOCOMPLETE_TAG_KEYS_QUERY_MAX_SIZE 100   - - autocompleteTagValuesQueryMaxSize The max size of tags values for autocomplete select. SW_AUTOCOMPLETE_TAG_VALUES_QUERY_MAX_SIZE 100   - - gRPCThreadPoolSize Pool size of gRPC server. SW_CORE_GRPC_THREAD_POOL_SIZE CPU core * 4   - - gRPCThreadPoolQueueSize Queue size of gRPC server. SW_CORE_GRPC_POOL_QUEUE_SIZE 10000   - - maxConcurrentCallsPerConnection The maximum number of concurrent calls permitted for each incoming connection. Defaults to no limit. SW_CORE_GRPC_MAX_CONCURRENT_CALL -   - - maxMessageSize Sets the maximum message size allowed to be received on the server. Empty means 4 MiB. SW_CORE_GRPC_MAX_MESSAGE_SIZE 4M(based on Netty)   - - remoteTimeout Timeout for cluster internal communication (in seconds). - 20   - - maxSizeOfNetworkAddressAlias The maximum size of network address detected in the system being monitored. - 1_000_000   - - maxPageSizeOfQueryProfileSnapshot The maximum size for snapshot analysis in an OAP query. - 500   - - maxSizeOfAnalyzeProfileSnapshot The maximum number of snapshots analyzed by the OAP. - 12000   - - prepareThreads The number of threads used to prepare metrics data to the storage. SW_CORE_PREPARE_THREADS 2   - - enableEndpointNameGroupingByOpenapi Automatically groups endpoints by the given OpenAPI definitions. SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI true   - - maxDurationOfQueryEBPFProfilingData The maximum duration(in second) of query the eBPF profiling data from database. - 30   - - maxThreadCountOfQueryEBPFProfilingData The maximum thread count of query the eBPF profiling data from database. - System CPU core size   cluster standalone - Standalone is not suitable for running on a single node running. No configuration available. - -   - zookeeper namespace The namespace, represented by root path, isolates the configurations in Zookeeper. SW_NAMESPACE /, root path   - - hostPort Hosts and ports of Zookeeper Cluster. SW_CLUSTER_ZK_HOST_PORT localhost:2181   - - baseSleepTimeMs The period of Zookeeper client between two retries (in milliseconds). SW_CLUSTER_ZK_SLEEP_TIME 1000   - - maxRetries The maximum retry time. SW_CLUSTER_ZK_MAX_RETRIES 3   - - enableACL Opens ACL using schema and expression. SW_ZK_ENABLE_ACL false   - - schema Schema for the authorization. SW_ZK_SCHEMA digest   - - expression Expression for the authorization. SW_ZK_EXPRESSION skywalking:skywalking   - - internalComHost The hostname registered in Zookeeper for the internal communication of OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Zookeeper for the internal communication of OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - kubernetes namespace Namespace deployed by SkyWalking in k8s. SW_CLUSTER_K8S_NAMESPACE default   - - labelSelector Labels used for filtering OAP deployment in k8s. SW_CLUSTER_K8S_LABEL app=collector,release=skywalking   - - uidEnvName Environment variable name for reading uid. SW_CLUSTER_K8S_UID SKYWALKING_COLLECTOR_UID   - consul serviceName Service name for SkyWalking cluster. SW_SERVICE_NAME SkyWalking_OAP_Cluster   - - hostPort Hosts and ports for Consul cluster. SW_CLUSTER_CONSUL_HOST_PORT localhost:8500   - - aclToken ACL Token of Consul. Empty string means without ALC token. SW_CLUSTER_CONSUL_ACLTOKEN -   - - internalComHost The hostname registered in Consul for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Consul for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - etcd serviceName Service name for SkyWalking cluster. SW_CLUSTER_ETCD_SERVICE_NAME SkyWalking_OAP_Cluster   - - endpoints Hosts and ports for etcd cluster. SW_CLUSTER_ETCD_ENDPOINTS localhost:2379   - - namespace Namespace for SkyWalking cluster. SW_CLUSTER_ETCD_NAMESPACE /skywalking   - - authentication Indicates whether there is authentication. SW_CLUSTER_ETCD_AUTHENTICATION false   - - user Etcd auth username. SW_CLUSTER_ETCD_USER    - - password Etcd auth password. SW_CLUSTER_ETCD_PASSWORD    - - internalComHost The hostname registered in etcd for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in etcd for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - Nacos serviceName Service name for SkyWalking cluster. SW_SERVICE_NAME SkyWalking_OAP_Cluster   - - hostPort Hosts and ports for Nacos cluster. SW_CLUSTER_NACOS_HOST_PORT localhost:8848   - - namespace Namespace used by SkyWalking node coordination. SW_CLUSTER_NACOS_NAMESPACE public   - - internalComHost The hostname registered in Nacos for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Nacos for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - - username Nacos Auth username. SW_CLUSTER_NACOS_USERNAME -   - - password Nacos Auth password. SW_CLUSTER_NACOS_PASSWORD -   - - accessKey Nacos Auth accessKey. SW_CLUSTER_NACOS_ACCESSKEY -   - - secretKey Nacos Auth secretKey. SW_CLUSTER_NACOS_SECRETKEY -   storage elasticsearch - ElasticSearch (and OpenSearch) storage implementation. - -   - - namespace Prefix of indexes created and used by SkyWalking. SW_NAMESPACE -   - - clusterNodes ElasticSearch cluster nodes for client connection. SW_STORAGE_ES_CLUSTER_NODES localhost   - - protocol HTTP or HTTPs. SW_STORAGE_ES_HTTP_PROTOCOL HTTP   - - connectTimeout Connect timeout of ElasticSearch client (in milliseconds). SW_STORAGE_ES_CONNECT_TIMEOUT 3000   - - socketTimeout Socket timeout of ElasticSearch client (in milliseconds). SW_STORAGE_ES_SOCKET_TIMEOUT 30000   - - responseTimeout Response timeout of ElasticSearch client (in milliseconds), 0 disables the timeout. SW_STORAGE_ES_RESPONSE_TIMEOUT 1500   - - numHttpClientThread The number of threads for the underlying HTTP client to perform socket I/O. If the value is \u0026lt;= 0, the number of available processors will be used. SW_STORAGE_ES_NUM_HTTP_CLIENT_THREAD 0   - - user Username of ElasticSearch cluster. SW_ES_USER -   - - password Password of ElasticSearch cluster. SW_ES_PASSWORD -   - - trustStorePath Trust JKS file path. Only works when username and password are enabled. SW_STORAGE_ES_SSL_JKS_PATH -   - - trustStorePass Trust JKS file password. Only works when username and password are enabled. SW_STORAGE_ES_SSL_JKS_PASS -   - - secretsManagementFile Secrets management file in the properties format, including username and password, which are managed by a 3rd party tool. Capable of being updated them at runtime. SW_ES_SECRETS_MANAGEMENT_FILE -   - - dayStep Represents the number of days in the one-minute/hour/day index. SW_STORAGE_DAY_STEP 1   - - indexShardsNumber Shard number of new indexes. SW_STORAGE_ES_INDEX_SHARDS_NUMBER 1   - - indexReplicasNumber Replicas number of new indexes. SW_STORAGE_ES_INDEX_REPLICAS_NUMBER 0   - - specificIndexSettings Specify the settings for each index individually. If configured, this setting has the highest priority and overrides the generic settings. SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS -   - - superDatasetDayStep Represents the number of days in the super size dataset record index. Default value is the same as dayStep when the value is less than 0. SW_STORAGE_ES_SUPER_DATASET_DAY_STEP -1   - - superDatasetIndexShardsFactor Super dataset is defined in the code (e.g. trace segments). This factor provides more shards for the super dataset: shards number = indexShardsNumber * superDatasetIndexShardsFactor. This factor also affects Zipkin and Jaeger traces. SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR 5   - - superDatasetIndexReplicasNumber Represents the replicas number in the super size dataset record index. SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER 0   - - indexTemplateOrder The order of index template. SW_STORAGE_ES_INDEX_TEMPLATE_ORDER 0   - - bulkActions Async bulk size of the record data batch execution. SW_STORAGE_ES_BULK_ACTIONS 5000   - - flushInterval Period of flush (in seconds). Does not matter whether bulkActions is reached or not. INT(flushInterval * 2/3) is used for index refresh period. SW_STORAGE_ES_FLUSH_INTERVAL 15 (index refresh period = 10)   - - concurrentRequests The number of concurrent requests allowed to be executed. SW_STORAGE_ES_CONCURRENT_REQUESTS 2   - - resultWindowMaxSize The maximum size of dataset when the OAP loads cache, such as network aliases. SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE 10000   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_ES_QUERY_MAX_SIZE 10000   - - scrollingBatchSize The batch size of metadata per iteration when metadataQueryMaxSize or resultWindowMaxSize is too large to be retrieved in a single query. SW_STORAGE_ES_SCROLLING_BATCH_SIZE 5000   - - segmentQueryMaxSize The maximum size of trace segments per query. SW_STORAGE_ES_QUERY_SEGMENT_SIZE 200   - - profileTaskQueryMaxSize The maximum size of profile task per query. SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE 200   - - profileDataQueryScrollBatchSize The batch size of query profiling data. SW_STORAGE_ES_QUERY_PROFILE_DATA_BATCH_SIZE 100   - - advanced All settings of ElasticSearch index creation. The value should be in JSON format. SW_STORAGE_ES_ADVANCED -   - - logicSharding Shard metrics and records indices into multi-physical indices, one index template per metric/meter aggregation function or record. SW_STORAGE_ES_LOGIC_SHARDING false   - h2 - H2 storage is designed for demonstration and running in short term (i.e. 1-2 hours) only. - -   - - url H2 connection URL. Defaults to H2 memory mode. SW_STORAGE_H2_URL jdbc:h2:mem:skywalking-oap-db   - - user Username of H2 database. SW_STORAGE_H2_USER sa   - - password Password of H2 database. - -   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_H2_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 100   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 1   - mysql - MySQL Storage. The MySQL JDBC Driver is not in the dist. Please copy it into the oap-lib folder manually. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - mysql-sharding - Sharding-Proxy for MySQL properties. The MySQL JDBC Driver is not in the dist. Please copy it into the oap-lib folder manually. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - - dataSources The dataSources are configured in ShardingSphere-Proxy config-sharding.yaml.The dataSource name should include the prefix \u0026ldquo;ds_\u0026rdquo; and separated by \u0026ldquo;,\u0026rdquo; and start from ds_0 SW_JDBC_SHARDING_DATA_SOURCES ds_0,ds_1   - postgresql - PostgreSQL storage. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - banyandb - BanyanDB storage. - -   - - host Host of the BanyanDB. SW_STORAGE_BANYANDB_HOST 127.0.0.1   - - port Port of the BanyanDB. SW_STORAGE_BANYANDB_PORT 17912   - - maxBulkSize The maximum size of write entities in a single batch write call. SW_STORAGE_BANYANDB_MAX_BULK_SIZE 5000   - - flushInterval Period of flush interval. In the timeunit of seconds. SW_STORAGE_BANYANDB_FLUSH_INTERVAL 15   - - metricsShardsNumber Shards Number for measure/metrics. SW_STORAGE_BANYANDB_METRICS_SHARDS_NUMBER 1   - - recordShardsNumber Shards Number for a normal record. SW_STORAGE_BANYANDB_RECORD_SHARDS_NUMBER 1   - - superDatasetShardsFactor Shards Factor for a super dataset record, i.e. Shard number of a super dataset is recordShardsNumber*superDatasetShardsFactor. SW_STORAGE_BANYANDB_SUPERDATASET_SHARDS_FACTOR 2   - - concurrentWriteThreads Concurrent consumer threads for batch writing. SW_STORAGE_BANYANDB_CONCURRENT_WRITE_THREADS 15   - - profileTaskQueryMaxSize Max size of ProfileTask to be fetched. SW_STORAGE_BANYANDB_PROFILE_TASK_QUERY_MAX_SIZE 200   agent-analyzer default Agent Analyzer. SW_AGENT_ANALYZER default    - - traceSamplingPolicySettingsFile The sampling policy including sampling rate and the threshold of trace segment latency can be configured by the traceSamplingPolicySettingsFile file. SW_TRACE_SAMPLING_POLICY_SETTINGS_FILE trace-sampling-policy-settings.yml   - - slowDBAccessThreshold The slow database access threshold (in milliseconds). SW_SLOW_DB_THRESHOLD default:200,mongodb:100   - - forceSampleErrorSegment When sampling mechanism is activated, this config samples the error status segment and ignores the sampling rate. SW_FORCE_SAMPLE_ERROR_SEGMENT true   - - segmentStatusAnalysisStrategy Determines the final segment status from span status. Available values are FROM_SPAN_STATUS , FROM_ENTRY_SPAN, and FROM_FIRST_SPAN. FROM_SPAN_STATUS indicates that the segment status would be error if any span has an error status. FROM_ENTRY_SPAN means that the segment status would only be determined by the status of entry spans. FROM_FIRST_SPAN means that the segment status would only be determined by the status of the first span. SW_SEGMENT_STATUS_ANALYSIS_STRATEGY FROM_SPAN_STATUS   - - noUpstreamRealAddressAgents Exit spans with the component in the list would not generate client-side instance relation metrics, since some tracing plugins (e.g. Nginx-LUA and Envoy) can\u0026rsquo;t collect the real peer IP address. SW_NO_UPSTREAM_REAL_ADDRESS 6000,9000   - - meterAnalyzerActiveFiles Indicates which files could be instrumented and analyzed. Multiple files are split by \u0026ldquo;,\u0026rdquo;. SW_METER_ANALYZER_ACTIVE_FILES    - - slowCacheWriteThreshold The threshold of slow command which is used for writing operation (in milliseconds). SW_SLOW_CACHE_WRITE_THRESHOLD default:20,redis:10   - - slowCacheReadThreshold The threshold of slow command which is used for reading (getting) operation (in milliseconds). SW_SLOW_CACHE_READ_THRESHOLD default:20,redis:10   receiver-sharing-server default Sharing server provides new gRPC and restful servers for data collection. Ana designates that servers in the core module are to be used for internal communication only. - -    - - restHost Binding IP of RESTful services. Services include GraphQL query and HTTP data report. SW_RECEIVER_SHARING_REST_HOST -   - - restPort Binding port of RESTful services. SW_RECEIVER_SHARING_REST_PORT -   - - restContextPath Web context path of RESTful services. SW_RECEIVER_SHARING_REST_CONTEXT_PATH -   - - restMaxThreads Maximum thread number of RESTful services. SW_RECEIVER_SHARING_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_RECEIVER_SHARING_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize ServerSocketChannel backlog of RESTful services. SW_RECEIVER_SHARING_REST_QUEUE_SIZE 0   - - httpMaxRequestHeaderSize Maximum request header size accepted. SW_RECEIVER_SHARING_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - gRPCHost Binding IP of gRPC services. Services include gRPC data report and internal communication among OAP nodes. SW_RECEIVER_GRPC_HOST 0.0.0.0. Not Activated   - - gRPCPort Binding port of gRPC services. SW_RECEIVER_GRPC_PORT Not Activated   - - gRPCThreadPoolSize Pool size of gRPC server. SW_RECEIVER_GRPC_THREAD_POOL_SIZE CPU core * 4   - - gRPCThreadPoolQueueSize Queue size of gRPC server. SW_RECEIVER_GRPC_POOL_QUEUE_SIZE 10000   - - gRPCSslEnabled Activates SSL for gRPC services. SW_RECEIVER_GRPC_SSL_ENABLED false   - - gRPCSslKeyPath File path of gRPC SSL key. SW_RECEIVER_GRPC_SSL_KEY_PATH -   - - gRPCSslCertChainPath File path of gRPC SSL cert chain. SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH -   - - maxConcurrentCallsPerConnection The maximum number of concurrent calls permitted for each incoming connection. Defaults to no limit. SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL -   - - authentication The token text for authentication. Works for gRPC connection only. Once this is set, the client is required to use the same token. SW_AUTHENTICATION -   log-analyzer default Log Analyzer. SW_LOG_ANALYZER default    - - lalFiles The LAL configuration file names (without file extension) to be activated. Read LAL for more details. SW_LOG_LAL_FILES default   - - malFiles The MAL configuration file names (without file extension) to be activated. Read LAL for more details. SW_LOG_MAL_FILES \u0026quot;\u0026quot;   event-analyzer default Event Analyzer. SW_EVENT_ANALYZER default    receiver-register default gRPC and HTTPRestful services that provide service, service instance and endpoint register. - -    receiver-trace default gRPC and HTTPRestful services that accept SkyWalking format traces. - -    receiver-jvm default gRPC services that accept JVM metrics data. - -    receiver-clr default gRPC services that accept .Net CLR metrics data. - -    receiver-profile default gRPC services that accept profile task status and snapshot reporter. - -    receiver-zabbix default TCP receiver accepts Zabbix format metrics. - -    - - port Exported TCP port. Zabbix agent could connect and transport data. SW_RECEIVER_ZABBIX_PORT 10051   - - host Binds to host. SW_RECEIVER_ZABBIX_HOST 0.0.0.0   - - activeFiles Enables config when agent request is received. SW_RECEIVER_ZABBIX_ACTIVE_FILES agent   service-mesh default gRPC services that accept data from inbound mesh probes. - -    envoy-metric default Envoy metrics_service and ALS(access log service) are supported by this receiver. The OAL script supports all GAUGE type metrics. - -    - - acceptMetricsService Starts Envoy Metrics Service analysis. SW_ENVOY_METRIC_SERVICE true   - - alsHTTPAnalysis Starts Envoy HTTP Access Log Service analysis. Value = k8s-mesh means starting the analysis. SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS -   - - alsTCPAnalysis Starts Envoy TCP Access Log Service analysis. Value = k8s-mesh means starting the analysis. SW_ENVOY_METRIC_ALS_TCP_ANALYSIS -   - - k8sServiceNameRule k8sServiceNameRule allows you to customize the service name in ALS via Kubernetes metadata. The available variables are pod and service. E.g. you can use ${service.metadata.name}-${pod.metadata.labels.version} to append the version number to the service name. Note that when using environment variables to pass this configuration, use single quotes('') to avoid being evaluated by the shell. -    receiver-otel default A receiver for analyzing metrics data from OpenTelemetry. - -    - - enabledHandlers Enabled handlers for otel. SW_OTEL_RECEIVER_ENABLED_HANDLERS -   - - enabledOtelRules Enabled metric rules for OC handler. SW_OTEL_RECEIVER_ENABLED_OTEL_RULES -   receiver-zipkin default A receiver for Zipkin traces. - -    - - sampleRate The sample rate precision is 1/10000, should be between 0 and 10000 SW_ZIPKIN_SAMPLE_RATE 10000   - - searchableTracesTags Defines a set of span tag keys which are searchable. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_ZIPKIN_SEARCHABLE_TAG_KEYS http.method   - - enableHttpCollector Enable Http Collector. SW_ZIPKIN_HTTP_COLLECTOR_ENABLED true   - - restHost Binding IP of RESTful services. SW_RECEIVER_ZIPKIN_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_RECEIVER_ZIPKIN_REST_PORT 9411   - - restContextPath Web context path of RESTful services. SW_RECEIVER_ZIPKIN_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_RECEIVER_ZIPKIN_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_RECEIVER_ZIPKIN_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_RECEIVER_ZIPKIN_REST_QUEUE_SIZE 0   - - enableKafkaCollector Enable Kafka Collector. SW_ZIPKIN_KAFKA_COLLECTOR_ENABLED false   - - kafkaBootstrapServers Kafka ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG. SW_ZIPKIN_KAFKA_SERVERS localhost:9092   - - kafkaGroupId Kafka ConsumerConfig.GROUP_ID_CONFIG. SW_ZIPKIN_KAFKA_GROUP_ID zipkin   - - kafkaTopic Kafka Topics. SW_ZIPKIN_KAFKA_TOPIC zipkin   - - kafkaConsumerConfig Kafka consumer config, JSON format as Properties. If it contains the same key with above, would override. SW_ZIPKIN_KAFKA_CONSUMER_CONFIG \u0026ldquo;{\u0026quot;auto.offset.reset\u0026quot;:\u0026quot;earliest\u0026quot;,\u0026quot;enable.auto.commit\u0026quot;:true}\u0026rdquo;   - - kafkaConsumers The number of consumers to create. SW_ZIPKIN_KAFKA_CONSUMERS 1   - - kafkaHandlerThreadPoolSize Pool size of Kafka message handler executor. SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_SIZE CPU core * 2   - - kafkaHandlerThreadPoolQueueSize Queue size of Kafka message handler executor. SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE 10000   kafka-fetcher default Read SkyWalking\u0026rsquo;s native metrics/logs/traces through Kafka server. - -    - - bootstrapServers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_KAFKA_FETCHER_SERVERS localhost:9092   - - namespace Namespace aims to isolate multi OAP cluster when using the same Kafka cluster. If you set a namespace for Kafka fetcher, OAP will add a prefix to topic name. You should also set namespace in agent.config. The property is named plugin.kafka.namespace. SW_NAMESPACE -   - - groupId A unique string that identifies the consumer group to which this consumer belongs. - skywalking-consumer   - - createTopicIfNotExist If true, this creates Kafka topic (if it does not already exist). - true   - - partitions The number of partitions for the topic being created. SW_KAFKA_FETCHER_PARTITIONS 3   - - consumers The number of consumers to create. SW_KAFKA_FETCHER_CONSUMERS 1   - - enableNativeProtoLog Enables fetching and handling native proto log data. SW_KAFKA_FETCHER_ENABLE_NATIVE_PROTO_LOG true   - - enableNativeJsonLog Enables fetching and handling native json log data. SW_KAFKA_FETCHER_ENABLE_NATIVE_JSON_LOG true   - - replicationFactor The replication factor for each partition in the topic being created. SW_KAFKA_FETCHER_PARTITIONS_FACTOR 2   - - kafkaHandlerThreadPoolSize Pool size of Kafka message handler executor. SW_KAFKA_HANDLER_THREAD_POOL_SIZE CPU core * 2   - - kafkaHandlerThreadPoolQueueSize Queue size of Kafka message handler executor. SW_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE 10000   - - topicNameOfMeters Kafka topic name for meter system data. - skywalking-meters   - - topicNameOfMetrics Kafka topic name for JVM metrics data. - skywalking-metrics   - - topicNameOfProfiling Kafka topic name for profiling data. - skywalking-profilings   - - topicNameOfTracingSegments Kafka topic name for tracing data. - skywalking-segments   - - topicNameOfManagements Kafka topic name for service instance reporting and registration. - skywalking-managements   - - topicNameOfLogs Kafka topic name for native proto log data. - skywalking-logs   - - topicNameOfJsonLogs Kafka topic name for native json log data. - skywalking-logs-json   receiver-browser default gRPC services that accept browser performance data and error log. - - -   - - sampleRate Sampling rate for receiving trace. Precise to 1/10000. 10000 means sampling rate of 100% by default. SW_RECEIVER_BROWSER_SAMPLE_RATE 10000   query graphql - GraphQL query implementation. -    - - enableLogTestTool Enable the log testing API to test the LAL. NOTE: This API evaluates untrusted code on the OAP server. A malicious script can do significant damage (steal keys and secrets, remove files and directories, install malware, etc). As such, please enable this API only when you completely trust your users. SW_QUERY_GRAPHQL_ENABLE_LOG_TEST_TOOL false   - - maxQueryComplexity Maximum complexity allowed for the GraphQL query that can be used to abort a query if the total number of data fields queried exceeds the defined threshold. SW_QUERY_MAX_QUERY_COMPLEXITY 1000   - - enableUpdateUITemplate Allow user add,disable and update UI template. SW_ENABLE_UPDATE_UI_TEMPLATE false   - - enableOnDemandPodLog Ondemand Pod log: fetch the Pod logs on users' demand, the logs are fetched and displayed in real time, and are not persisted in any kind. This is helpful when users want to do some experiments and monitor the logs and see what\u0026rsquo;s happing inside the service. Note: if you print secrets in the logs, they are also visible to the UI, so for the sake of security, this feature is disabled by default, please set this configuration to enable the feature manually. SW_ENABLE_ON_DEMAND_POD_LOG false   query graphql - GraphQL query implementation. -    - - restHost Binding IP of RESTful services. SW_QUERY_ZIPKIN_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_QUERY_ZIPKIN_REST_PORT 9412   - - restContextPath Web context path of RESTful services. SW_QUERY_ZIPKIN_REST_CONTEXT_PATH zipkin   - - restMaxThreads Maximum thread number of RESTful services. SW_QUERY_ZIPKIN_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_QUERY_ZIPKIN_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_QUERY_ZIPKIN_REST_QUEUE_SIZE 0   - - lookback Default look back for traces and autocompleteTags, 1 day in millis SW_QUERY_ZIPKIN_LOOKBACK 86400000   - - namesMaxAge The Cache-Control max-age (seconds) for serviceNames, remoteServiceNames and spanNames SW_QUERY_ZIPKIN_NAMES_MAX_AGE 300   - - uiQueryLimit Default traces query max size SW_QUERY_ZIPKIN_UI_QUERY_LIMIT 10   - - uiDefaultLookback Default look back on the UI for search traces, 15 minutes in millis SW_QUERY_ZIPKIN_UI_DEFAULT_LOOKBACK 900000   alarm default - Read alarm doc for more details. -    telemetry - - Read telemetry doc for more details. -    - none - No op implementation. -    - prometheus host Binding host for Prometheus server fetching data. SW_TELEMETRY_PROMETHEUS_HOST 0.0.0.0   - - port Binding port for Prometheus server fetching data. SW_TELEMETRY_PROMETHEUS_PORT 1234   configuration - - Read dynamic configuration doc for more details. -    - grpc host DCS server binding hostname. SW_DCS_SERVER_HOST -   - - port DCS server binding port. SW_DCS_SERVER_PORT 80   - - clusterName Cluster name when reading the latest configuration from DSC server. SW_DCS_CLUSTER_NAME SkyWalking   - - period The period of reading data from DSC server by the OAP (in seconds). SW_DCS_PERIOD 20   - apollo apolloMeta apollo.meta in Apollo. SW_CONFIG_APOLLO http://localhost:8080   - - apolloCluster apollo.cluster in Apollo. SW_CONFIG_APOLLO_CLUSTER default   - - apolloEnv env in Apollo. SW_CONFIG_APOLLO_ENV -   - - appId app.id in Apollo. SW_CONFIG_APOLLO_APP_ID skywalking   - - period The period of data sync (in seconds). SW_CONFIG_APOLLO_PERIOD 60   - zookeeper namespace The namespace (represented by root path) that isolates the configurations in the Zookeeper. SW_CONFIG_ZK_NAMESPACE /, root path   - - hostPort Hosts and ports of Zookeeper Cluster. SW_CONFIG_ZK_HOST_PORT localhost:2181   - - baseSleepTimeMs The period of Zookeeper client between two retries (in milliseconds). SW_CONFIG_ZK_BASE_SLEEP_TIME_MS 1000   - - maxRetries The maximum retry time. SW_CONFIG_ZK_MAX_RETRIES 3   - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - etcd endpoints Hosts and ports for etcd cluster (separated by commas if multiple). SW_CONFIG_ETCD_ENDPOINTS http://localhost:2379   - - namespace Namespace for SkyWalking cluster. SW_CONFIG_ETCD_NAMESPACE /skywalking   - - authentication Indicates whether there is authentication. SW_CONFIG_ETCD_AUTHENTICATION false   - - user Etcd auth username. SW_CONFIG_ETCD_USER    - - password Etcd auth password. SW_CONFIG_ETCD_PASSWORD    - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - consul hostPort Hosts and ports for Consul cluster. SW_CONFIG_CONSUL_HOST_AND_PORTS localhost:8500   - - aclToken ACL Token of Consul. Empty string means without ACL token. SW_CONFIG_CONSUL_ACL_TOKEN -   - - period The period of data sync (in seconds). SW_CONFIG_CONSUL_PERIOD 60   - k8s-configmap namespace Deployment namespace of the config map. SW_CLUSTER_K8S_NAMESPACE default   - - labelSelector Labels for locating configmap. SW_CLUSTER_K8S_LABEL app=collector,release=skywalking   - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - nacos serverAddr Nacos Server Host. SW_CONFIG_NACOS_SERVER_ADDR 127.0.0.1   - - port Nacos Server Port. SW_CONFIG_NACOS_SERVER_PORT 8848   - - group Nacos Configuration namespace. SW_CONFIG_NACOS_SERVER_NAMESPACE -   - - period The period of data sync (in seconds). SW_CONFIG_CONFIG_NACOS_PERIOD 60   - - username Nacos Auth username. SW_CONFIG_NACOS_USERNAME -   - - password Nacos Auth password. SW_CONFIG_NACOS_PASSWORD -   - - accessKey Nacos Auth accessKey. SW_CONFIG_NACOS_ACCESSKEY -   - - secretKey Nacos Auth secretKey. SW_CONFIG_NACOS_SECRETKEY -   exporter default enableGRPCMetrics Enable gRPC metrics exporter. SW_EXPORTER_ENABLE_GRPC_METRICS false   - - gRPCTargetHost The host of target gRPC server for receiving export data SW_EXPORTER_GRPC_HOST 127.0.0.1   - - gRPCTargetPort The port of target gRPC server for receiving export data. SW_EXPORTER_GRPC_PORT 9870   - - enableKafkaTrace Enable Kafka trace exporter. SW_EXPORTER_ENABLE_KAFKA_TRACE false   - - enableKafkaLog Enable Kafka log exporter. SW_EXPORTER_ENABLE_KAFKA_LOG false   - - kafkaBootstrapServers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_EXPORTER_KAFKA_SERVERS localhost:9092   - - kafkaProducerConfig Kafka producer config, JSON format as Properties. SW_EXPORTER_KAFKA_PRODUCER_CONFIG -   - - kafkaTopicTrace Kafka topic name for trace. SW_EXPORTER_KAFKA_TOPIC_TRACE skywalking-export-trace   - - kafkaTopicLog Kafka topic name for log. SW_EXPORTER_KAFKA_TOPIC_LOG skywalking-export-log   health-checker default checkIntervalSeconds The period of checking OAP internal health status (in seconds). SW_HEALTH_CHECKER_INTERVAL_SECONDS 5   configuration-discovery default disableMessageDigest If true, agent receives the latest configuration every time, even without making any changes. By default, OAP uses the SHA512 message digest mechanism to detect changes in configuration. SW_DISABLE_MESSAGE_DIGEST false   receiver-event default gRPC services that handle events data. - -     Note ¹ System Environment Variable name could be declared and changed in application.yml. The names listed here are simply provided in the default application.yml file.\n","title":"Configuration Vocabulary","url":"/docs/main/v9.3.0/en/setup/backend/configuration-vocabulary/"},{"content":"Configuration Vocabulary The Configuration Vocabulary lists all available configurations provided by application.yml.\n   Module Provider Settings Value(s) and Explanation System Environment Variable¹ Default     core default role Option values: Mixed/Receiver/Aggregator. Receiver mode OAP opens the service to the agents, then analyzes and aggregates the results, and forwards the results for distributed aggregation. Aggregator mode OAP receives data from Mixer and Receiver role OAP nodes, and performs 2nd level aggregation. Mixer means both Receiver and Aggregator. SW_CORE_ROLE Mixed   - - restHost Binding IP of RESTful services. Services include GraphQL query and HTTP data report. SW_CORE_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_CORE_REST_PORT 12800   - - restContextPath Web context path of RESTful services. SW_CORE_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_CORE_REST_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_CORE_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize ServerSocketChannel Backlog of RESTful services. SW_CORE_REST_QUEUE_SIZE 0   - - httpMaxRequestHeaderSize Maximum request header size accepted. SW_CORE_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - gRPCHost Binding IP of gRPC services, including gRPC data report and internal communication among OAP nodes. SW_CORE_GRPC_HOST 0.0.0.0   - - gRPCPort Binding port of gRPC services. SW_CORE_GRPC_PORT 11800   - - gRPCSslEnabled Activates SSL for gRPC services. SW_CORE_GRPC_SSL_ENABLED false   - - gRPCSslKeyPath File path of gRPC SSL key. SW_CORE_GRPC_SSL_KEY_PATH -   - - gRPCSslCertChainPath File path of gRPC SSL cert chain. SW_CORE_GRPC_SSL_CERT_CHAIN_PATH -   - - gRPCSslTrustedCAPath File path of gRPC trusted CA. SW_CORE_GRPC_SSL_TRUSTED_CA_PATH -   - - downsampling Activated level of down sampling aggregation.  Hour,Day   - - persistentPeriod Execution period of the persistent timer (in seconds).  25   - - enableDataKeeperExecutor Controller of TTL scheduler. Once disabled, TTL wouldn\u0026rsquo;t work. SW_CORE_ENABLE_DATA_KEEPER_EXECUTOR true   - - dataKeeperExecutePeriod Execution period of TTL scheduler (in minutes). Execution doesn\u0026rsquo;t mean deleting data. The storage provider (e.g. ElasticSearch storage) could override this. SW_CORE_DATA_KEEPER_EXECUTE_PERIOD 5   - - recordDataTTL The lifecycle of record data (in days). Record data includes traces, top N sample records, and logs. Minimum value is 2. SW_CORE_RECORD_DATA_TTL 3   - - metricsDataTTL The lifecycle of metrics data (in days), including metadata. We recommend setting metricsDataTTL \u0026gt;= recordDataTTL. Minimum value is 2. SW_CORE_METRICS_DATA_TTL 7   - - l1FlushPeriod The period of L1 aggregation flush to L2 aggregation (in milliseconds). SW_CORE_L1_AGGREGATION_FLUSH_PERIOD 500   - - storageSessionTimeout The threshold of session time (in milliseconds). Default value is 70000. SW_CORE_STORAGE_SESSION_TIMEOUT 70000   - - persistentPeriod The period of doing data persistence. Unit is second.Default value is 25s SW_CORE_PERSISTENT_PERIOD 25   - - topNReportPeriod The execution period (in minutes) of top N sampler, which saves sampled data into the storage. SW_CORE_TOPN_REPORT_PERIOD 10   - - activeExtraModelColumns Appends entity names (e.g. service names) into metrics storage entities. SW_CORE_ACTIVE_EXTRA_MODEL_COLUMNS false   - - serviceNameMaxLength Maximum length limit of service names. SW_SERVICE_NAME_MAX_LENGTH 70   - - instanceNameMaxLength Maximum length limit of service instance names. The maximum length of service + instance names should be less than 200. SW_INSTANCE_NAME_MAX_LENGTH 70   - - endpointNameMaxLength Maximum length limit of endpoint names. The maximum length of service + endpoint names should be less than 240. SW_ENDPOINT_NAME_MAX_LENGTH 150   - - searchableTracesTags Defines a set of span tag keys which are searchable through GraphQL. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_SEARCHABLE_TAG_KEYS http.method,http.status_code,rpc.status_code,db.type,db.instance,mq.queue,mq.topic,mq.broker   - - searchableLogsTags Defines a set of log tag keys which are searchable through GraphQL. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_SEARCHABLE_LOGS_TAG_KEYS level   - - searchableAlarmTags Defines a set of alarm tag keys which are searchable through GraphQL. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_SEARCHABLE_ALARM_TAG_KEYS level   - - autocompleteTagKeysQueryMaxSize The max size of tags keys for autocomplete select. SW_AUTOCOMPLETE_TAG_KEYS_QUERY_MAX_SIZE 100   - - autocompleteTagValuesQueryMaxSize The max size of tags values for autocomplete select. SW_AUTOCOMPLETE_TAG_VALUES_QUERY_MAX_SIZE 100   - - gRPCThreadPoolSize Pool size of gRPC server. SW_CORE_GRPC_THREAD_POOL_SIZE CPU core * 4   - - gRPCThreadPoolQueueSize Queue size of gRPC server. SW_CORE_GRPC_POOL_QUEUE_SIZE 10000   - - maxConcurrentCallsPerConnection The maximum number of concurrent calls permitted for each incoming connection. Defaults to no limit. SW_CORE_GRPC_MAX_CONCURRENT_CALL -   - - maxMessageSize Sets the maximum message size allowed to be received on the server. Empty means 4 MiB. SW_CORE_GRPC_MAX_MESSAGE_SIZE 4M(based on Netty)   - - remoteTimeout Timeout for cluster internal communication (in seconds). - 20   - - maxSizeOfNetworkAddressAlias The maximum size of network address detected in the system being monitored. - 1_000_000   - - maxPageSizeOfQueryProfileSnapshot The maximum size for snapshot analysis in an OAP query. - 500   - - maxSizeOfAnalyzeProfileSnapshot The maximum number of snapshots analyzed by the OAP. - 12000   - - prepareThreads The number of threads used to prepare metrics data to the storage. SW_CORE_PREPARE_THREADS 2   - - enableEndpointNameGroupingByOpenapi Automatically groups endpoints by the given OpenAPI definitions. SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI true   - - maxDurationOfQueryEBPFProfilingData The maximum duration(in second) of query the eBPF profiling data from database. - 30   - - maxThreadCountOfQueryEBPFProfilingData The maximum thread count of query the eBPF profiling data from database. - System CPU core size   cluster standalone - Standalone is not suitable for running on a single node running. No configuration available. - -   - zookeeper namespace The namespace, represented by root path, isolates the configurations in Zookeeper. SW_NAMESPACE /, root path   - - hostPort Hosts and ports of Zookeeper Cluster. SW_CLUSTER_ZK_HOST_PORT localhost:2181   - - baseSleepTimeMs The period of Zookeeper client between two retries (in milliseconds). SW_CLUSTER_ZK_SLEEP_TIME 1000   - - maxRetries The maximum retry time. SW_CLUSTER_ZK_MAX_RETRIES 3   - - enableACL Opens ACL using schema and expression. SW_ZK_ENABLE_ACL false   - - schema Schema for the authorization. SW_ZK_SCHEMA digest   - - expression Expression for the authorization. SW_ZK_EXPRESSION skywalking:skywalking   - - internalComHost The hostname registered in Zookeeper for the internal communication of OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Zookeeper for the internal communication of OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - kubernetes namespace Namespace deployed by SkyWalking in k8s. SW_CLUSTER_K8S_NAMESPACE default   - - labelSelector Labels used for filtering OAP deployment in k8s. SW_CLUSTER_K8S_LABEL app=collector,release=skywalking   - - uidEnvName Environment variable name for reading uid. SW_CLUSTER_K8S_UID SKYWALKING_COLLECTOR_UID   - consul serviceName Service name for SkyWalking cluster. SW_SERVICE_NAME SkyWalking_OAP_Cluster   - - hostPort Hosts and ports for Consul cluster. SW_CLUSTER_CONSUL_HOST_PORT localhost:8500   - - aclToken ACL Token of Consul. Empty string means without ALC token. SW_CLUSTER_CONSUL_ACLTOKEN -   - - internalComHost The hostname registered in Consul for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Consul for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - etcd serviceName Service name for SkyWalking cluster. SW_CLUSTER_ETCD_SERVICE_NAME SkyWalking_OAP_Cluster   - - endpoints Hosts and ports for etcd cluster. SW_CLUSTER_ETCD_ENDPOINTS localhost:2379   - - namespace Namespace for SkyWalking cluster. SW_CLUSTER_ETCD_NAMESPACE /skywalking   - - authentication Indicates whether there is authentication. SW_CLUSTER_ETCD_AUTHENTICATION false   - - user Etcd auth username. SW_CLUSTER_ETCD_USER    - - password Etcd auth password. SW_CLUSTER_ETCD_PASSWORD    - - internalComHost The hostname registered in etcd for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in etcd for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - Nacos serviceName Service name for SkyWalking cluster. SW_SERVICE_NAME SkyWalking_OAP_Cluster   - - hostPort Hosts and ports for Nacos cluster. SW_CLUSTER_NACOS_HOST_PORT localhost:8848   - - namespace Namespace used by SkyWalking node coordination. SW_CLUSTER_NACOS_NAMESPACE public   - - internalComHost The hostname registered in Nacos for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Nacos for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - - username Nacos Auth username. SW_CLUSTER_NACOS_USERNAME -   - - password Nacos Auth password. SW_CLUSTER_NACOS_PASSWORD -   - - accessKey Nacos Auth accessKey. SW_CLUSTER_NACOS_ACCESSKEY -   - - secretKey Nacos Auth secretKey. SW_CLUSTER_NACOS_SECRETKEY -   storage elasticsearch - ElasticSearch (and OpenSearch) storage implementation. - -   - - namespace Prefix of indexes created and used by SkyWalking. SW_NAMESPACE -   - - clusterNodes ElasticSearch cluster nodes for client connection. SW_STORAGE_ES_CLUSTER_NODES localhost   - - protocol HTTP or HTTPs. SW_STORAGE_ES_HTTP_PROTOCOL HTTP   - - connectTimeout Connect timeout of ElasticSearch client (in milliseconds). SW_STORAGE_ES_CONNECT_TIMEOUT 3000   - - socketTimeout Socket timeout of ElasticSearch client (in milliseconds). SW_STORAGE_ES_SOCKET_TIMEOUT 30000   - - responseTimeout Response timeout of ElasticSearch client (in milliseconds), 0 disables the timeout. SW_STORAGE_ES_RESPONSE_TIMEOUT 1500   - - numHttpClientThread The number of threads for the underlying HTTP client to perform socket I/O. If the value is \u0026lt;= 0, the number of available processors will be used. SW_STORAGE_ES_NUM_HTTP_CLIENT_THREAD 0   - - user Username of ElasticSearch cluster. SW_ES_USER -   - - password Password of ElasticSearch cluster. SW_ES_PASSWORD -   - - trustStorePath Trust JKS file path. Only works when username and password are enabled. SW_STORAGE_ES_SSL_JKS_PATH -   - - trustStorePass Trust JKS file password. Only works when username and password are enabled. SW_STORAGE_ES_SSL_JKS_PASS -   - - secretsManagementFile Secrets management file in the properties format, including username and password, which are managed by a 3rd party tool. Capable of being updated them at runtime. SW_ES_SECRETS_MANAGEMENT_FILE -   - - dayStep Represents the number of days in the one-minute/hour/day index. SW_STORAGE_DAY_STEP 1   - - indexShardsNumber Shard number of new indexes. SW_STORAGE_ES_INDEX_SHARDS_NUMBER 1   - - indexReplicasNumber Replicas number of new indexes. SW_STORAGE_ES_INDEX_REPLICAS_NUMBER 0   - - specificIndexSettings Specify the settings for each index individually. If configured, this setting has the highest priority and overrides the generic settings. SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS -   - - superDatasetDayStep Represents the number of days in the super size dataset record index. Default value is the same as dayStep when the value is less than 0. SW_STORAGE_ES_SUPER_DATASET_DAY_STEP -1   - - superDatasetIndexShardsFactor Super dataset is defined in the code (e.g. trace segments). This factor provides more shards for the super dataset: shards number = indexShardsNumber * superDatasetIndexShardsFactor. This factor also affects Zipkin and Jaeger traces. SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR 5   - - superDatasetIndexReplicasNumber Represents the replicas number in the super size dataset record index. SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER 0   - - indexTemplateOrder The order of index template. SW_STORAGE_ES_INDEX_TEMPLATE_ORDER 0   - - bulkActions Async bulk size of the record data batch execution. SW_STORAGE_ES_BULK_ACTIONS 5000   - - batchOfBytes A threshold to control the max body size of ElasticSearch Bulk flush. SW_STORAGE_ES_BATCH_OF_BYTES 10485760 (10m)   - - flushInterval Period of flush (in seconds). Does not matter whether bulkActions is reached or not. SW_STORAGE_ES_FLUSH_INTERVAL 5   - - concurrentRequests The number of concurrent requests allowed to be executed. SW_STORAGE_ES_CONCURRENT_REQUESTS 2   - - resultWindowMaxSize The maximum size of dataset when the OAP loads cache, such as network aliases. SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE 10000   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_ES_QUERY_MAX_SIZE 10000   - - scrollingBatchSize The batch size of metadata per iteration when metadataQueryMaxSize or resultWindowMaxSize is too large to be retrieved in a single query. SW_STORAGE_ES_SCROLLING_BATCH_SIZE 5000   - - segmentQueryMaxSize The maximum size of trace segments per query. SW_STORAGE_ES_QUERY_SEGMENT_SIZE 200   - - profileTaskQueryMaxSize The maximum size of profile task per query. SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE 200   - - profileDataQueryScrollBatchSize The batch size of query profiling data. SW_STORAGE_ES_QUERY_PROFILE_DATA_BATCH_SIZE 100   - - advanced All settings of ElasticSearch index creation. The value should be in JSON format. SW_STORAGE_ES_ADVANCED -   - - logicSharding Shard metrics and records indices into multi-physical indices, one index template per metric/meter aggregation function or record. SW_STORAGE_ES_LOGIC_SHARDING false   - h2 - H2 storage is designed for demonstration and running in short term (i.e. 1-2 hours) only. - -   - - url H2 connection URL. Defaults to H2 memory mode. SW_STORAGE_H2_URL jdbc:h2:mem:skywalking-oap-db   - - user Username of H2 database. SW_STORAGE_H2_USER sa   - - password Password of H2 database. - -   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_H2_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 100   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 1   - mysql - MySQL Storage. The MySQL JDBC Driver is not in the dist. Please copy it into the oap-lib folder manually. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - mysql-sharding - Sharding-Proxy for MySQL properties. The MySQL JDBC Driver is not in the dist. Please copy it into the oap-lib folder manually. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - - dataSources The dataSources are configured in ShardingSphere-Proxy config-sharding.yaml.The dataSource name should include the prefix \u0026ldquo;ds_\u0026rdquo; and separated by \u0026ldquo;,\u0026rdquo; and start from ds_0 SW_JDBC_SHARDING_DATA_SOURCES ds_0,ds_1   - postgresql - PostgreSQL storage. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - banyandb - BanyanDB storage. - -   - - host Host of the BanyanDB. SW_STORAGE_BANYANDB_HOST 127.0.0.1   - - port Port of the BanyanDB. SW_STORAGE_BANYANDB_PORT 17912   - - maxBulkSize The maximum size of write entities in a single batch write call. SW_STORAGE_BANYANDB_MAX_BULK_SIZE 5000   - - flushInterval Period of flush interval. In the timeunit of seconds. SW_STORAGE_BANYANDB_FLUSH_INTERVAL 15   - - metricsShardsNumber Shards Number for measure/metrics. SW_STORAGE_BANYANDB_METRICS_SHARDS_NUMBER 1   - - recordShardsNumber Shards Number for a normal record. SW_STORAGE_BANYANDB_RECORD_SHARDS_NUMBER 1   - - superDatasetShardsFactor Shards Factor for a super dataset record, i.e. Shard number of a super dataset is recordShardsNumber*superDatasetShardsFactor. SW_STORAGE_BANYANDB_SUPERDATASET_SHARDS_FACTOR 2   - - concurrentWriteThreads Concurrent consumer threads for batch writing. SW_STORAGE_BANYANDB_CONCURRENT_WRITE_THREADS 15   - - profileTaskQueryMaxSize Max size of ProfileTask to be fetched. SW_STORAGE_BANYANDB_PROFILE_TASK_QUERY_MAX_SIZE 200   agent-analyzer default Agent Analyzer. SW_AGENT_ANALYZER default    - - traceSamplingPolicySettingsFile The sampling policy including sampling rate and the threshold of trace segment latency can be configured by the traceSamplingPolicySettingsFile file. SW_TRACE_SAMPLING_POLICY_SETTINGS_FILE trace-sampling-policy-settings.yml   - - slowDBAccessThreshold The slow database access threshold (in milliseconds). SW_SLOW_DB_THRESHOLD default:200,mongodb:100   - - forceSampleErrorSegment When sampling mechanism is activated, this config samples the error status segment and ignores the sampling rate. SW_FORCE_SAMPLE_ERROR_SEGMENT true   - - segmentStatusAnalysisStrategy Determines the final segment status from span status. Available values are FROM_SPAN_STATUS , FROM_ENTRY_SPAN, and FROM_FIRST_SPAN. FROM_SPAN_STATUS indicates that the segment status would be error if any span has an error status. FROM_ENTRY_SPAN means that the segment status would only be determined by the status of entry spans. FROM_FIRST_SPAN means that the segment status would only be determined by the status of the first span. SW_SEGMENT_STATUS_ANALYSIS_STRATEGY FROM_SPAN_STATUS   - - noUpstreamRealAddressAgents Exit spans with the component in the list would not generate client-side instance relation metrics, since some tracing plugins (e.g. Nginx-LUA and Envoy) can\u0026rsquo;t collect the real peer IP address. SW_NO_UPSTREAM_REAL_ADDRESS 6000,9000   - - meterAnalyzerActiveFiles Indicates which files could be instrumented and analyzed. Multiple files are split by \u0026ldquo;,\u0026rdquo;. SW_METER_ANALYZER_ACTIVE_FILES    - - slowCacheWriteThreshold The threshold of slow command which is used for writing operation (in milliseconds). SW_SLOW_CACHE_WRITE_THRESHOLD default:20,redis:10   - - slowCacheReadThreshold The threshold of slow command which is used for reading (getting) operation (in milliseconds). SW_SLOW_CACHE_READ_THRESHOLD default:20,redis:10   receiver-sharing-server default Sharing server provides new gRPC and restful servers for data collection. Ana designates that servers in the core module are to be used for internal communication only. - -    - - restHost Binding IP of RESTful services. Services include GraphQL query and HTTP data report. SW_RECEIVER_SHARING_REST_HOST -   - - restPort Binding port of RESTful services. SW_RECEIVER_SHARING_REST_PORT -   - - restContextPath Web context path of RESTful services. SW_RECEIVER_SHARING_REST_CONTEXT_PATH -   - - restMaxThreads Maximum thread number of RESTful services. SW_RECEIVER_SHARING_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_RECEIVER_SHARING_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize ServerSocketChannel backlog of RESTful services. SW_RECEIVER_SHARING_REST_QUEUE_SIZE 0   - - httpMaxRequestHeaderSize Maximum request header size accepted. SW_RECEIVER_SHARING_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - gRPCHost Binding IP of gRPC services. Services include gRPC data report and internal communication among OAP nodes. SW_RECEIVER_GRPC_HOST 0.0.0.0. Not Activated   - - gRPCPort Binding port of gRPC services. SW_RECEIVER_GRPC_PORT Not Activated   - - gRPCThreadPoolSize Pool size of gRPC server. SW_RECEIVER_GRPC_THREAD_POOL_SIZE CPU core * 4   - - gRPCThreadPoolQueueSize Queue size of gRPC server. SW_RECEIVER_GRPC_POOL_QUEUE_SIZE 10000   - - gRPCSslEnabled Activates SSL for gRPC services. SW_RECEIVER_GRPC_SSL_ENABLED false   - - gRPCSslKeyPath File path of gRPC SSL key. SW_RECEIVER_GRPC_SSL_KEY_PATH -   - - gRPCSslCertChainPath File path of gRPC SSL cert chain. SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH -   - - maxConcurrentCallsPerConnection The maximum number of concurrent calls permitted for each incoming connection. Defaults to no limit. SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL -   - - authentication The token text for authentication. Works for gRPC connection only. Once this is set, the client is required to use the same token. SW_AUTHENTICATION -   log-analyzer default Log Analyzer. SW_LOG_ANALYZER default    - - lalFiles The LAL configuration file names (without file extension) to be activated. Read LAL for more details. SW_LOG_LAL_FILES default   - - malFiles The MAL configuration file names (without file extension) to be activated. Read LAL for more details. SW_LOG_MAL_FILES \u0026quot;\u0026quot;   event-analyzer default Event Analyzer. SW_EVENT_ANALYZER default    receiver-register default gRPC and HTTPRestful services that provide service, service instance and endpoint register. - -    receiver-trace default gRPC and HTTPRestful services that accept SkyWalking format traces. - -    receiver-jvm default gRPC services that accept JVM metrics data. - -    receiver-clr default gRPC services that accept .Net CLR metrics data. - -    receiver-profile default gRPC services that accept profile task status and snapshot reporter. - -    receiver-zabbix default TCP receiver accepts Zabbix format metrics. - -    - - port Exported TCP port. Zabbix agent could connect and transport data. SW_RECEIVER_ZABBIX_PORT 10051   - - host Binds to host. SW_RECEIVER_ZABBIX_HOST 0.0.0.0   - - activeFiles Enables config when agent request is received. SW_RECEIVER_ZABBIX_ACTIVE_FILES agent   service-mesh default gRPC services that accept data from inbound mesh probes. - -    envoy-metric default Envoy metrics_service and ALS(access log service) are supported by this receiver. The OAL script supports all GAUGE type metrics. - -    - - acceptMetricsService Starts Envoy Metrics Service analysis. SW_ENVOY_METRIC_SERVICE true   - - alsHTTPAnalysis Starts Envoy HTTP Access Log Service analysis. Value = k8s-mesh means starting the analysis. SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS -   - - alsTCPAnalysis Starts Envoy TCP Access Log Service analysis. Value = k8s-mesh means starting the analysis. SW_ENVOY_METRIC_ALS_TCP_ANALYSIS -   - - k8sServiceNameRule k8sServiceNameRule allows you to customize the service name in ALS via Kubernetes metadata. The available variables are pod and service. E.g. you can use ${service.metadata.name}-${pod.metadata.labels.version} to append the version number to the service name. Note that when using environment variables to pass this configuration, use single quotes('') to avoid being evaluated by the shell. -    receiver-otel default A receiver for analyzing metrics data from OpenTelemetry. - -    - - enabledHandlers Enabled handlers for otel. SW_OTEL_RECEIVER_ENABLED_HANDLERS -   - - enabledOtelRules Enabled metric rules for OC handler. SW_OTEL_RECEIVER_ENABLED_OTEL_RULES -   receiver-zipkin default A receiver for Zipkin traces. - -    - - sampleRate The sample rate precision is 1/10000, should be between 0 and 10000 SW_ZIPKIN_SAMPLE_RATE 10000   - - searchableTracesTags Defines a set of span tag keys which are searchable. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_ZIPKIN_SEARCHABLE_TAG_KEYS http.method   - - enableHttpCollector Enable Http Collector. SW_ZIPKIN_HTTP_COLLECTOR_ENABLED true   - - restHost Binding IP of RESTful services. SW_RECEIVER_ZIPKIN_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_RECEIVER_ZIPKIN_REST_PORT 9411   - - restContextPath Web context path of RESTful services. SW_RECEIVER_ZIPKIN_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_RECEIVER_ZIPKIN_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_RECEIVER_ZIPKIN_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_RECEIVER_ZIPKIN_REST_QUEUE_SIZE 0   - - enableKafkaCollector Enable Kafka Collector. SW_ZIPKIN_KAFKA_COLLECTOR_ENABLED false   - - kafkaBootstrapServers Kafka ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG. SW_ZIPKIN_KAFKA_SERVERS localhost:9092   - - kafkaGroupId Kafka ConsumerConfig.GROUP_ID_CONFIG. SW_ZIPKIN_KAFKA_GROUP_ID zipkin   - - kafkaTopic Kafka Topics. SW_ZIPKIN_KAFKA_TOPIC zipkin   - - kafkaConsumerConfig Kafka consumer config, JSON format as Properties. If it contains the same key with above, would override. SW_ZIPKIN_KAFKA_CONSUMER_CONFIG \u0026ldquo;{\u0026quot;auto.offset.reset\u0026quot;:\u0026quot;earliest\u0026quot;,\u0026quot;enable.auto.commit\u0026quot;:true}\u0026rdquo;   - - kafkaConsumers The number of consumers to create. SW_ZIPKIN_KAFKA_CONSUMERS 1   - - kafkaHandlerThreadPoolSize Pool size of Kafka message handler executor. SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_SIZE CPU core * 2   - - kafkaHandlerThreadPoolQueueSize Queue size of Kafka message handler executor. SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE 10000   kafka-fetcher default Read SkyWalking\u0026rsquo;s native metrics/logs/traces through Kafka server. - -    - - bootstrapServers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_KAFKA_FETCHER_SERVERS localhost:9092   - - namespace Namespace aims to isolate multi OAP cluster when using the same Kafka cluster. If you set a namespace for Kafka fetcher, OAP will add a prefix to topic name. You should also set namespace in agent.config. The property is named plugin.kafka.namespace. SW_NAMESPACE -   - - groupId A unique string that identifies the consumer group to which this consumer belongs. - skywalking-consumer   - - createTopicIfNotExist If true, this creates Kafka topic (if it does not already exist). - true   - - partitions The number of partitions for the topic being created. SW_KAFKA_FETCHER_PARTITIONS 3   - - consumers The number of consumers to create. SW_KAFKA_FETCHER_CONSUMERS 1   - - enableNativeProtoLog Enables fetching and handling native proto log data. SW_KAFKA_FETCHER_ENABLE_NATIVE_PROTO_LOG true   - - enableNativeJsonLog Enables fetching and handling native json log data. SW_KAFKA_FETCHER_ENABLE_NATIVE_JSON_LOG true   - - replicationFactor The replication factor for each partition in the topic being created. SW_KAFKA_FETCHER_PARTITIONS_FACTOR 2   - - kafkaHandlerThreadPoolSize Pool size of Kafka message handler executor. SW_KAFKA_HANDLER_THREAD_POOL_SIZE CPU core * 2   - - kafkaHandlerThreadPoolQueueSize Queue size of Kafka message handler executor. SW_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE 10000   - - topicNameOfMeters Kafka topic name for meter system data. - skywalking-meters   - - topicNameOfMetrics Kafka topic name for JVM metrics data. - skywalking-metrics   - - topicNameOfProfiling Kafka topic name for profiling data. - skywalking-profilings   - - topicNameOfTracingSegments Kafka topic name for tracing data. - skywalking-segments   - - topicNameOfManagements Kafka topic name for service instance reporting and registration. - skywalking-managements   - - topicNameOfLogs Kafka topic name for native proto log data. - skywalking-logs   - - topicNameOfJsonLogs Kafka topic name for native json log data. - skywalking-logs-json   receiver-browser default gRPC services that accept browser performance data and error log. - - -   - - sampleRate Sampling rate for receiving trace. Precise to 1/10000. 10000 means sampling rate of 100% by default. SW_RECEIVER_BROWSER_SAMPLE_RATE 10000   query graphql - GraphQL query implementation. -    - - enableLogTestTool Enable the log testing API to test the LAL. NOTE: This API evaluates untrusted code on the OAP server. A malicious script can do significant damage (steal keys and secrets, remove files and directories, install malware, etc). As such, please enable this API only when you completely trust your users. SW_QUERY_GRAPHQL_ENABLE_LOG_TEST_TOOL false   - - maxQueryComplexity Maximum complexity allowed for the GraphQL query that can be used to abort a query if the total number of data fields queried exceeds the defined threshold. SW_QUERY_MAX_QUERY_COMPLEXITY 1000   - - enableUpdateUITemplate Allow user add,disable and update UI template. SW_ENABLE_UPDATE_UI_TEMPLATE false   - - enableOnDemandPodLog Ondemand Pod log: fetch the Pod logs on users' demand, the logs are fetched and displayed in real time, and are not persisted in any kind. This is helpful when users want to do some experiments and monitor the logs and see what\u0026rsquo;s happing inside the service. Note: if you print secrets in the logs, they are also visible to the UI, so for the sake of security, this feature is disabled by default, please set this configuration to enable the feature manually. SW_ENABLE_ON_DEMAND_POD_LOG false   query-zipkin default - This module is for Zipkin query API and support zipkin-lens UI -    - - restHost Binding IP of RESTful services. SW_QUERY_ZIPKIN_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_QUERY_ZIPKIN_REST_PORT 9412   - - restContextPath Web context path of RESTful services. SW_QUERY_ZIPKIN_REST_CONTEXT_PATH zipkin   - - restMaxThreads Maximum thread number of RESTful services. SW_QUERY_ZIPKIN_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_QUERY_ZIPKIN_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_QUERY_ZIPKIN_REST_QUEUE_SIZE 0   - - lookback Default look back for traces and autocompleteTags, 1 day in millis SW_QUERY_ZIPKIN_LOOKBACK 86400000   - - namesMaxAge The Cache-Control max-age (seconds) for serviceNames, remoteServiceNames and spanNames SW_QUERY_ZIPKIN_NAMES_MAX_AGE 300   - - uiQueryLimit Default traces query max size SW_QUERY_ZIPKIN_UI_QUERY_LIMIT 10   - - uiDefaultLookback Default look back on the UI for search traces, 15 minutes in millis SW_QUERY_ZIPKIN_UI_DEFAULT_LOOKBACK 900000   promql default - This module is for PromQL API. -    - - restHost Binding IP of RESTful services. SW_PROMQL_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_PROMQL_REST_PORT 9090   - - restContextPath Web context path of RESTful services. SW_PROMQL_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_PROMQL_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_PROMQL_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_PROMQL_REST_QUEUE_SIZE 0   alarm default - Read alarm doc for more details. -    telemetry - - Read telemetry doc for more details. -    - none - No op implementation. -    - prometheus host Binding host for Prometheus server fetching data. SW_TELEMETRY_PROMETHEUS_HOST 0.0.0.0   - - port Binding port for Prometheus server fetching data. SW_TELEMETRY_PROMETHEUS_PORT 1234   configuration - - Read dynamic configuration doc for more details. -    - grpc host DCS server binding hostname. SW_DCS_SERVER_HOST -   - - port DCS server binding port. SW_DCS_SERVER_PORT 80   - - clusterName Cluster name when reading the latest configuration from DSC server. SW_DCS_CLUSTER_NAME SkyWalking   - - period The period of reading data from DSC server by the OAP (in seconds). SW_DCS_PERIOD 20   - apollo apolloMeta apollo.meta in Apollo. SW_CONFIG_APOLLO http://localhost:8080   - - apolloCluster apollo.cluster in Apollo. SW_CONFIG_APOLLO_CLUSTER default   - - apolloEnv env in Apollo. SW_CONFIG_APOLLO_ENV -   - - appId app.id in Apollo. SW_CONFIG_APOLLO_APP_ID skywalking   - - period The period of data sync (in seconds). SW_CONFIG_APOLLO_PERIOD 60   - zookeeper namespace The namespace (represented by root path) that isolates the configurations in the Zookeeper. SW_CONFIG_ZK_NAMESPACE /, root path   - - hostPort Hosts and ports of Zookeeper Cluster. SW_CONFIG_ZK_HOST_PORT localhost:2181   - - baseSleepTimeMs The period of Zookeeper client between two retries (in milliseconds). SW_CONFIG_ZK_BASE_SLEEP_TIME_MS 1000   - - maxRetries The maximum retry time. SW_CONFIG_ZK_MAX_RETRIES 3   - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - etcd endpoints Hosts and ports for etcd cluster (separated by commas if multiple). SW_CONFIG_ETCD_ENDPOINTS http://localhost:2379   - - namespace Namespace for SkyWalking cluster. SW_CONFIG_ETCD_NAMESPACE /skywalking   - - authentication Indicates whether there is authentication. SW_CONFIG_ETCD_AUTHENTICATION false   - - user Etcd auth username. SW_CONFIG_ETCD_USER    - - password Etcd auth password. SW_CONFIG_ETCD_PASSWORD    - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - consul hostPort Hosts and ports for Consul cluster. SW_CONFIG_CONSUL_HOST_AND_PORTS localhost:8500   - - aclToken ACL Token of Consul. Empty string means without ACL token. SW_CONFIG_CONSUL_ACL_TOKEN -   - - period The period of data sync (in seconds). SW_CONFIG_CONSUL_PERIOD 60   - k8s-configmap namespace Deployment namespace of the config map. SW_CLUSTER_K8S_NAMESPACE default   - - labelSelector Labels for locating configmap. SW_CLUSTER_K8S_LABEL app=collector,release=skywalking   - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - nacos serverAddr Nacos Server Host. SW_CONFIG_NACOS_SERVER_ADDR 127.0.0.1   - - port Nacos Server Port. SW_CONFIG_NACOS_SERVER_PORT 8848   - - group Nacos Configuration namespace. SW_CONFIG_NACOS_SERVER_NAMESPACE -   - - period The period of data sync (in seconds). SW_CONFIG_CONFIG_NACOS_PERIOD 60   - - username Nacos Auth username. SW_CONFIG_NACOS_USERNAME -   - - password Nacos Auth password. SW_CONFIG_NACOS_PASSWORD -   - - accessKey Nacos Auth accessKey. SW_CONFIG_NACOS_ACCESSKEY -   - - secretKey Nacos Auth secretKey. SW_CONFIG_NACOS_SECRETKEY -   exporter default enableGRPCMetrics Enable gRPC metrics exporter. SW_EXPORTER_ENABLE_GRPC_METRICS false   - - gRPCTargetHost The host of target gRPC server for receiving export data SW_EXPORTER_GRPC_HOST 127.0.0.1   - - gRPCTargetPort The port of target gRPC server for receiving export data. SW_EXPORTER_GRPC_PORT 9870   - - enableKafkaTrace Enable Kafka trace exporter. SW_EXPORTER_ENABLE_KAFKA_TRACE false   - - enableKafkaLog Enable Kafka log exporter. SW_EXPORTER_ENABLE_KAFKA_LOG false   - - kafkaBootstrapServers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_EXPORTER_KAFKA_SERVERS localhost:9092   - - kafkaProducerConfig Kafka producer config, JSON format as Properties. SW_EXPORTER_KAFKA_PRODUCER_CONFIG -   - - kafkaTopicTrace Kafka topic name for trace. SW_EXPORTER_KAFKA_TOPIC_TRACE skywalking-export-trace   - - kafkaTopicLog Kafka topic name for log. SW_EXPORTER_KAFKA_TOPIC_LOG skywalking-export-log   health-checker default checkIntervalSeconds The period of checking OAP internal health status (in seconds). SW_HEALTH_CHECKER_INTERVAL_SECONDS 5   configuration-discovery default disableMessageDigest If true, agent receives the latest configuration every time, even without making any changes. By default, OAP uses the SHA512 message digest mechanism to detect changes in configuration. SW_DISABLE_MESSAGE_DIGEST false   receiver-event default gRPC services that handle events data. - -    aws-firehose-receiver default host Binding IP of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_HOST 0.0.0.0   - - port Binding port of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_PORT 12801   - - contextPath Context path of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_CONTEXT_PATH /   - - maxThreads Max Thtread number of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_MAX_THREADS 200   - - idleTimeOut Idle timeout of a connection for keep-alive. SW_RECEIVER_AWS_FIREHOSE_HTTP_IDLE_TIME_OUT 30000   - - acceptQueueSize Maximum allowed number of open connections SW_RECEIVER_AWS_FIREHOSE_HTTP_ACCEPT_QUEUE_SIZE 0   - - maxRequestHeaderSize Maximum length of all headers in an HTTP/1 response SW_RECEIVER_AWS_FIREHOSE_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - firehoseAccessKey The AccessKey of AWS firhose SW_RECEIVER_AWS_FIREHOSE_ACCESS_KEY     Note ¹ System Environment Variable name could be declared and changed in application.yml. The names listed here are simply provided in the default application.yml file.\n","title":"Configuration Vocabulary","url":"/docs/main/v9.4.0/en/setup/backend/configuration-vocabulary/"},{"content":"Configuration Vocabulary The Configuration Vocabulary lists all available configurations provided by application.yml.\n   Module Provider Settings Value(s) and Explanation System Environment Variable¹ Default     core default role Option values: Mixed/Receiver/Aggregator. Receiver mode OAP opens the service to the agents, then analyzes and aggregates the results, and forwards the results for distributed aggregation. Aggregator mode OAP receives data from Mixer and Receiver role OAP nodes, and performs 2nd level aggregation. Mixer means both Receiver and Aggregator. SW_CORE_ROLE Mixed   - - restHost Binding IP of RESTful services. Services include GraphQL query and HTTP data report. SW_CORE_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_CORE_REST_PORT 12800   - - restContextPath Web context path of RESTful services. SW_CORE_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_CORE_REST_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_CORE_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize ServerSocketChannel Backlog of RESTful services. SW_CORE_REST_QUEUE_SIZE 0   - - httpMaxRequestHeaderSize Maximum request header size accepted. SW_CORE_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - gRPCHost Binding IP of gRPC services, including gRPC data report and internal communication among OAP nodes. SW_CORE_GRPC_HOST 0.0.0.0   - - gRPCPort Binding port of gRPC services. SW_CORE_GRPC_PORT 11800   - - gRPCSslEnabled Activates SSL for gRPC services. SW_CORE_GRPC_SSL_ENABLED false   - - gRPCSslKeyPath File path of gRPC SSL key. SW_CORE_GRPC_SSL_KEY_PATH -   - - gRPCSslCertChainPath File path of gRPC SSL cert chain. SW_CORE_GRPC_SSL_CERT_CHAIN_PATH -   - - gRPCSslTrustedCAPath File path of gRPC trusted CA. SW_CORE_GRPC_SSL_TRUSTED_CA_PATH -   - - downsampling Activated level of down sampling aggregation.  Hour,Day   - - enableDataKeeperExecutor Controller of TTL scheduler. Once disabled, TTL wouldn\u0026rsquo;t work. SW_CORE_ENABLE_DATA_KEEPER_EXECUTOR true   - - dataKeeperExecutePeriod Execution period of TTL scheduler (in minutes). Execution doesn\u0026rsquo;t mean deleting data. The storage provider (e.g. ElasticSearch storage) could override this. SW_CORE_DATA_KEEPER_EXECUTE_PERIOD 5   - - recordDataTTL The lifecycle of record data (in days). Record data includes traces, top N sample records, and logs. Minimum value is 2. SW_CORE_RECORD_DATA_TTL 3   - - metricsDataTTL The lifecycle of metrics data (in days), including metadata. We recommend setting metricsDataTTL \u0026gt;= recordDataTTL. Minimum value is 2. SW_CORE_METRICS_DATA_TTL 7   - - l1FlushPeriod The period of L1 aggregation flush to L2 aggregation (in milliseconds). SW_CORE_L1_AGGREGATION_FLUSH_PERIOD 500   - - storageSessionTimeout The threshold of session time (in milliseconds). Default value is 70000. SW_CORE_STORAGE_SESSION_TIMEOUT 70000   - - persistentPeriod The period of doing data persistence. Unit is second.Default value is 25s SW_CORE_PERSISTENT_PERIOD 25   - - topNReportPeriod The execution period (in minutes) of top N sampler, which saves sampled data into the storage. SW_CORE_TOPN_REPORT_PERIOD 10   - - activeExtraModelColumns Appends entity names (e.g. service names) into metrics storage entities. SW_CORE_ACTIVE_EXTRA_MODEL_COLUMNS false   - - serviceNameMaxLength Maximum length limit of service names. SW_SERVICE_NAME_MAX_LENGTH 70   - - instanceNameMaxLength Maximum length limit of service instance names. The maximum length of service + instance names should be less than 200. SW_INSTANCE_NAME_MAX_LENGTH 70   - - endpointNameMaxLength Maximum length limit of endpoint names. The maximum length of service + endpoint names should be less than 240. SW_ENDPOINT_NAME_MAX_LENGTH 150   - - searchableTracesTags Defines a set of span tag keys which are searchable through GraphQL. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_SEARCHABLE_TAG_KEYS http.method,http.status_code,rpc.status_code,db.type,db.instance,mq.queue,mq.topic,mq.broker   - - searchableLogsTags Defines a set of log tag keys which are searchable through GraphQL. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_SEARCHABLE_LOGS_TAG_KEYS level   - - searchableAlarmTags Defines a set of alarm tag keys which are searchable through GraphQL. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_SEARCHABLE_ALARM_TAG_KEYS level   - - autocompleteTagKeysQueryMaxSize The max size of tags keys for autocomplete select. SW_AUTOCOMPLETE_TAG_KEYS_QUERY_MAX_SIZE 100   - - autocompleteTagValuesQueryMaxSize The max size of tags values for autocomplete select. SW_AUTOCOMPLETE_TAG_VALUES_QUERY_MAX_SIZE 100   - - gRPCThreadPoolSize Pool size of gRPC server. SW_CORE_GRPC_THREAD_POOL_SIZE CPU core * 4   - - gRPCThreadPoolQueueSize Queue size of gRPC server. SW_CORE_GRPC_POOL_QUEUE_SIZE 10000   - - maxConcurrentCallsPerConnection The maximum number of concurrent calls permitted for each incoming connection. Defaults to no limit. SW_CORE_GRPC_MAX_CONCURRENT_CALL -   - - maxMessageSize Sets the maximum message size allowed to be received on the server. Empty means 4 MiB. SW_CORE_GRPC_MAX_MESSAGE_SIZE 4M(based on Netty)   - - remoteTimeout Timeout for cluster internal communication (in seconds). - 20   - - maxSizeOfNetworkAddressAlias The maximum size of network address detected in the system being monitored. - 1_000_000   - - maxPageSizeOfQueryProfileSnapshot The maximum size for snapshot analysis in an OAP query. - 500   - - maxSizeOfAnalyzeProfileSnapshot The maximum number of snapshots analyzed by the OAP. - 12000   - - prepareThreads The number of threads used to prepare metrics data to the storage. SW_CORE_PREPARE_THREADS 2   - - enableEndpointNameGroupingByOpenapi Automatically groups endpoints by the given OpenAPI definitions. SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI true   - - maxDurationOfQueryEBPFProfilingData The maximum duration(in second) of query the eBPF profiling data from database. - 30   - - maxThreadCountOfQueryEBPFProfilingData The maximum thread count of query the eBPF profiling data from database. - System CPU core size   cluster standalone - Standalone is not suitable for running on a single node running. No configuration available. - -   - zookeeper namespace The namespace, represented by root path, isolates the configurations in Zookeeper. SW_NAMESPACE /, root path   - - hostPort Hosts and ports of Zookeeper Cluster. SW_CLUSTER_ZK_HOST_PORT localhost:2181   - - baseSleepTimeMs The period of Zookeeper client between two retries (in milliseconds). SW_CLUSTER_ZK_SLEEP_TIME 1000   - - maxRetries The maximum retry time. SW_CLUSTER_ZK_MAX_RETRIES 3   - - enableACL Opens ACL using schema and expression. SW_ZK_ENABLE_ACL false   - - schema Schema for the authorization. SW_ZK_SCHEMA digest   - - expression Expression for the authorization. SW_ZK_EXPRESSION skywalking:skywalking   - - internalComHost The hostname registered in Zookeeper for the internal communication of OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Zookeeper for the internal communication of OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - kubernetes namespace Namespace deployed by SkyWalking in k8s. SW_CLUSTER_K8S_NAMESPACE default   - - labelSelector Labels used for filtering OAP deployment in k8s. SW_CLUSTER_K8S_LABEL app=collector,release=skywalking   - - uidEnvName Environment variable name for reading uid. SW_CLUSTER_K8S_UID SKYWALKING_COLLECTOR_UID   - consul serviceName Service name for SkyWalking cluster. SW_SERVICE_NAME SkyWalking_OAP_Cluster   - - hostPort Hosts and ports for Consul cluster. SW_CLUSTER_CONSUL_HOST_PORT localhost:8500   - - aclToken ACL Token of Consul. Empty string means without ALC token. SW_CLUSTER_CONSUL_ACLTOKEN -   - - internalComHost The hostname registered in Consul for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Consul for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - etcd serviceName Service name for SkyWalking cluster. SW_CLUSTER_ETCD_SERVICE_NAME SkyWalking_OAP_Cluster   - - endpoints Hosts and ports for etcd cluster. SW_CLUSTER_ETCD_ENDPOINTS localhost:2379   - - namespace Namespace for SkyWalking cluster. SW_CLUSTER_ETCD_NAMESPACE /skywalking   - - authentication Indicates whether there is authentication. SW_CLUSTER_ETCD_AUTHENTICATION false   - - user Etcd auth username. SW_CLUSTER_ETCD_USER    - - password Etcd auth password. SW_CLUSTER_ETCD_PASSWORD    - - internalComHost The hostname registered in etcd for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in etcd for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - Nacos serviceName Service name for SkyWalking cluster. SW_SERVICE_NAME SkyWalking_OAP_Cluster   - - hostPort Hosts and ports for Nacos cluster. SW_CLUSTER_NACOS_HOST_PORT localhost:8848   - - namespace Namespace used by SkyWalking node coordination. SW_CLUSTER_NACOS_NAMESPACE public   - - internalComHost The hostname registered in Nacos for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Nacos for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - - username Nacos Auth username. SW_CLUSTER_NACOS_USERNAME -   - - password Nacos Auth password. SW_CLUSTER_NACOS_PASSWORD -   - - accessKey Nacos Auth accessKey. SW_CLUSTER_NACOS_ACCESSKEY -   - - secretKey Nacos Auth secretKey. SW_CLUSTER_NACOS_SECRETKEY -   - - maxHttpUrisNumberPerService The maximum number of HTTP URIs per service. SW_MAX_HTTP_URIS_NUMBER_PER_SERVICE 3000   storage elasticsearch - ElasticSearch (and OpenSearch) storage implementation. - -   - - namespace Prefix of indexes created and used by SkyWalking. SW_NAMESPACE -   - - clusterNodes ElasticSearch cluster nodes for client connection. SW_STORAGE_ES_CLUSTER_NODES localhost   - - protocol HTTP or HTTPs. SW_STORAGE_ES_HTTP_PROTOCOL HTTP   - - connectTimeout Connect timeout of ElasticSearch client (in milliseconds). SW_STORAGE_ES_CONNECT_TIMEOUT 3000   - - socketTimeout Socket timeout of ElasticSearch client (in milliseconds). SW_STORAGE_ES_SOCKET_TIMEOUT 30000   - - responseTimeout Response timeout of ElasticSearch client (in milliseconds), 0 disables the timeout. SW_STORAGE_ES_RESPONSE_TIMEOUT 1500   - - numHttpClientThread The number of threads for the underlying HTTP client to perform socket I/O. If the value is \u0026lt;= 0, the number of available processors will be used. SW_STORAGE_ES_NUM_HTTP_CLIENT_THREAD 0   - - user Username of ElasticSearch cluster. SW_ES_USER -   - - password Password of ElasticSearch cluster. SW_ES_PASSWORD -   - - trustStorePath Trust JKS file path. Only works when username and password are enabled. SW_STORAGE_ES_SSL_JKS_PATH -   - - trustStorePass Trust JKS file password. Only works when username and password are enabled. SW_STORAGE_ES_SSL_JKS_PASS -   - - secretsManagementFile Secrets management file in the properties format, including username and password, which are managed by a 3rd party tool. Capable of being updated them at runtime. SW_ES_SECRETS_MANAGEMENT_FILE -   - - dayStep Represents the number of days in the one-minute/hour/day index. SW_STORAGE_DAY_STEP 1   - - indexShardsNumber Shard number of new indexes. SW_STORAGE_ES_INDEX_SHARDS_NUMBER 1   - - indexReplicasNumber Replicas number of new indexes. SW_STORAGE_ES_INDEX_REPLICAS_NUMBER 0   - - specificIndexSettings Specify the settings for each index individually. If configured, this setting has the highest priority and overrides the generic settings. SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS -   - - superDatasetDayStep Represents the number of days in the super size dataset record index. Default value is the same as dayStep when the value is less than 0. SW_STORAGE_ES_SUPER_DATASET_DAY_STEP -1   - - superDatasetIndexShardsFactor Super dataset is defined in the code (e.g. trace segments). This factor provides more shards for the super dataset: shards number = indexShardsNumber * superDatasetIndexShardsFactor. This factor also affects Zipkin and Jaeger traces. SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR 5   - - superDatasetIndexReplicasNumber Represents the replicas number in the super size dataset record index. SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER 0   - - indexTemplateOrder The order of index template. SW_STORAGE_ES_INDEX_TEMPLATE_ORDER 0   - - bulkActions Async bulk size of the record data batch execution. SW_STORAGE_ES_BULK_ACTIONS 5000   - - batchOfBytes A threshold to control the max body size of ElasticSearch Bulk flush. SW_STORAGE_ES_BATCH_OF_BYTES 10485760 (10m)   - - flushInterval Period of flush (in seconds). Does not matter whether bulkActions is reached or not. SW_STORAGE_ES_FLUSH_INTERVAL 5   - - concurrentRequests The number of concurrent requests allowed to be executed. SW_STORAGE_ES_CONCURRENT_REQUESTS 2   - - resultWindowMaxSize The maximum size of dataset when the OAP loads cache, such as network aliases. SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE 10000   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_ES_QUERY_MAX_SIZE 10000   - - scrollingBatchSize The batch size of metadata per iteration when metadataQueryMaxSize or resultWindowMaxSize is too large to be retrieved in a single query. SW_STORAGE_ES_SCROLLING_BATCH_SIZE 5000   - - segmentQueryMaxSize The maximum size of trace segments per query. SW_STORAGE_ES_QUERY_SEGMENT_SIZE 200   - - profileTaskQueryMaxSize The maximum size of profile task per query. SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE 200   - - profileDataQueryScrollBatchSize The batch size of query profiling data. SW_STORAGE_ES_QUERY_PROFILE_DATA_BATCH_SIZE 100   - - advanced All settings of ElasticSearch index creation. The value should be in JSON format. SW_STORAGE_ES_ADVANCED -   - - logicSharding Shard metrics and records indices into multi-physical indices, one index template per metric/meter aggregation function or record. SW_STORAGE_ES_LOGIC_SHARDING false   - h2 - H2 storage is designed for demonstration and running in short term (i.e. 1-2 hours) only. - -   - - url H2 connection URL. Defaults to H2 memory mode. SW_STORAGE_H2_URL jdbc:h2:mem:skywalking-oap-db   - - user Username of H2 database. SW_STORAGE_H2_USER sa   - - password Password of H2 database. - -   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_H2_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 100   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 1   - mysql - MySQL Storage. The MySQL JDBC Driver is not in the dist. Please copy it into the oap-lib folder manually. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - postgresql - PostgreSQL storage. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - banyandb - BanyanDB storage. - -   - - host Host of the BanyanDB. SW_STORAGE_BANYANDB_HOST 127.0.0.1   - - port Port of the BanyanDB. SW_STORAGE_BANYANDB_PORT 17912   - - maxBulkSize The maximum size of write entities in a single batch write call. SW_STORAGE_BANYANDB_MAX_BULK_SIZE 5000   - - flushInterval Period of flush interval. In the timeunit of seconds. SW_STORAGE_BANYANDB_FLUSH_INTERVAL 15   - - metricsShardsNumber Shards Number for measure/metrics. SW_STORAGE_BANYANDB_METRICS_SHARDS_NUMBER 1   - - recordShardsNumber Shards Number for a normal record. SW_STORAGE_BANYANDB_RECORD_SHARDS_NUMBER 1   - - superDatasetShardsFactor Shards Factor for a super dataset record, i.e. Shard number of a super dataset is recordShardsNumber*superDatasetShardsFactor. SW_STORAGE_BANYANDB_SUPERDATASET_SHARDS_FACTOR 2   - - concurrentWriteThreads Concurrent consumer threads for batch writing. SW_STORAGE_BANYANDB_CONCURRENT_WRITE_THREADS 15   - - profileTaskQueryMaxSize Max size of ProfileTask to be fetched. SW_STORAGE_BANYANDB_PROFILE_TASK_QUERY_MAX_SIZE 200   agent-analyzer default Agent Analyzer. SW_AGENT_ANALYZER default    - - traceSamplingPolicySettingsFile The sampling policy including sampling rate and the threshold of trace segment latency can be configured by the traceSamplingPolicySettingsFile file. SW_TRACE_SAMPLING_POLICY_SETTINGS_FILE trace-sampling-policy-settings.yml   - - slowDBAccessThreshold The slow database access threshold (in milliseconds). SW_SLOW_DB_THRESHOLD default:200,mongodb:100   - - forceSampleErrorSegment When sampling mechanism is activated, this config samples the error status segment and ignores the sampling rate. SW_FORCE_SAMPLE_ERROR_SEGMENT true   - - segmentStatusAnalysisStrategy Determines the final segment status from span status. Available values are FROM_SPAN_STATUS , FROM_ENTRY_SPAN, and FROM_FIRST_SPAN. FROM_SPAN_STATUS indicates that the segment status would be error if any span has an error status. FROM_ENTRY_SPAN means that the segment status would only be determined by the status of entry spans. FROM_FIRST_SPAN means that the segment status would only be determined by the status of the first span. SW_SEGMENT_STATUS_ANALYSIS_STRATEGY FROM_SPAN_STATUS   - - noUpstreamRealAddressAgents Exit spans with the component in the list would not generate client-side instance relation metrics, since some tracing plugins (e.g. Nginx-LUA and Envoy) can\u0026rsquo;t collect the real peer IP address. SW_NO_UPSTREAM_REAL_ADDRESS 6000,9000   - - meterAnalyzerActiveFiles Indicates which files could be instrumented and analyzed. Multiple files are split by \u0026ldquo;,\u0026rdquo;. SW_METER_ANALYZER_ACTIVE_FILES    - - slowCacheWriteThreshold The threshold of slow command which is used for writing operation (in milliseconds). SW_SLOW_CACHE_WRITE_THRESHOLD default:20,redis:10   - - slowCacheReadThreshold The threshold of slow command which is used for reading (getting) operation (in milliseconds). SW_SLOW_CACHE_READ_THRESHOLD default:20,redis:10   receiver-sharing-server default Sharing server provides new gRPC and restful servers for data collection. Ana designates that servers in the core module are to be used for internal communication only. - -    - - restHost Binding IP of RESTful services. Services include GraphQL query and HTTP data report. SW_RECEIVER_SHARING_REST_HOST -   - - restPort Binding port of RESTful services. SW_RECEIVER_SHARING_REST_PORT -   - - restContextPath Web context path of RESTful services. SW_RECEIVER_SHARING_REST_CONTEXT_PATH -   - - restMaxThreads Maximum thread number of RESTful services. SW_RECEIVER_SHARING_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_RECEIVER_SHARING_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize ServerSocketChannel backlog of RESTful services. SW_RECEIVER_SHARING_REST_QUEUE_SIZE 0   - - httpMaxRequestHeaderSize Maximum request header size accepted. SW_RECEIVER_SHARING_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - gRPCHost Binding IP of gRPC services. Services include gRPC data report and internal communication among OAP nodes. SW_RECEIVER_GRPC_HOST 0.0.0.0. Not Activated   - - gRPCPort Binding port of gRPC services. SW_RECEIVER_GRPC_PORT Not Activated   - - gRPCThreadPoolSize Pool size of gRPC server. SW_RECEIVER_GRPC_THREAD_POOL_SIZE CPU core * 4   - - gRPCThreadPoolQueueSize Queue size of gRPC server. SW_RECEIVER_GRPC_POOL_QUEUE_SIZE 10000   - - gRPCSslEnabled Activates SSL for gRPC services. SW_RECEIVER_GRPC_SSL_ENABLED false   - - gRPCSslKeyPath File path of gRPC SSL key. SW_RECEIVER_GRPC_SSL_KEY_PATH -   - - gRPCSslCertChainPath File path of gRPC SSL cert chain. SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH -   - - maxConcurrentCallsPerConnection The maximum number of concurrent calls permitted for each incoming connection. Defaults to no limit. SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL -   - - authentication The token text for authentication. Works for gRPC connection only. Once this is set, the client is required to use the same token. SW_AUTHENTICATION -   log-analyzer default Log Analyzer. SW_LOG_ANALYZER default    - - lalFiles The LAL configuration file names (without file extension) to be activated. Read LAL for more details. SW_LOG_LAL_FILES default   - - malFiles The MAL configuration file names (without file extension) to be activated. Read LAL for more details. SW_LOG_MAL_FILES \u0026quot;\u0026quot;   event-analyzer default Event Analyzer. SW_EVENT_ANALYZER default    receiver-register default gRPC and HTTPRestful services that provide service, service instance and endpoint register. - -    receiver-trace default gRPC and HTTPRestful services that accept SkyWalking format traces. - -    receiver-jvm default gRPC services that accept JVM metrics data. - -    receiver-clr default gRPC services that accept .Net CLR metrics data. - -    receiver-profile default gRPC services that accept profile task status and snapshot reporter. - -    receiver-zabbix default TCP receiver accepts Zabbix format metrics. - -    - - port Exported TCP port. Zabbix agent could connect and transport data. SW_RECEIVER_ZABBIX_PORT 10051   - - host Binds to host. SW_RECEIVER_ZABBIX_HOST 0.0.0.0   - - activeFiles Enables config when agent request is received. SW_RECEIVER_ZABBIX_ACTIVE_FILES agent   service-mesh default gRPC services that accept data from inbound mesh probes. - -    envoy-metric default Envoy metrics_service and ALS(access log service) are supported by this receiver. The OAL script supports all GAUGE type metrics. - -    - - acceptMetricsService Starts Envoy Metrics Service analysis. SW_ENVOY_METRIC_SERVICE true   - - alsHTTPAnalysis Starts Envoy HTTP Access Log Service analysis. Value = k8s-mesh means starting the analysis. SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS -   - - alsTCPAnalysis Starts Envoy TCP Access Log Service analysis. Value = k8s-mesh means starting the analysis. SW_ENVOY_METRIC_ALS_TCP_ANALYSIS -   - - k8sServiceNameRule k8sServiceNameRule allows you to customize the service name in ALS via Kubernetes metadata. The available variables are pod and service. E.g. you can use ${service.metadata.name}-${pod.metadata.labels.version} to append the version number to the service name. Note that when using environment variables to pass this configuration, use single quotes('') to avoid being evaluated by the shell. -    receiver-otel default A receiver for analyzing metrics data from OpenTelemetry. - -    - - enabledHandlers Enabled handlers for otel. SW_OTEL_RECEIVER_ENABLED_HANDLERS -   - - enabledOtelMetricsRules Enabled metric rules for OTLP handler. SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES -   receiver-zipkin default A receiver for Zipkin traces. - -    - - sampleRate The sample rate precision is 1/10000, should be between 0 and 10000 SW_ZIPKIN_SAMPLE_RATE 10000   - - searchableTracesTags Defines a set of span tag keys which are searchable. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_ZIPKIN_SEARCHABLE_TAG_KEYS http.method   - - enableHttpCollector Enable Http Collector. SW_ZIPKIN_HTTP_COLLECTOR_ENABLED true   - - restHost Binding IP of RESTful services. SW_RECEIVER_ZIPKIN_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_RECEIVER_ZIPKIN_REST_PORT 9411   - - restContextPath Web context path of RESTful services. SW_RECEIVER_ZIPKIN_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_RECEIVER_ZIPKIN_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_RECEIVER_ZIPKIN_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_RECEIVER_ZIPKIN_REST_QUEUE_SIZE 0   - - enableKafkaCollector Enable Kafka Collector. SW_ZIPKIN_KAFKA_COLLECTOR_ENABLED false   - - kafkaBootstrapServers Kafka ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG. SW_ZIPKIN_KAFKA_SERVERS localhost:9092   - - kafkaGroupId Kafka ConsumerConfig.GROUP_ID_CONFIG. SW_ZIPKIN_KAFKA_GROUP_ID zipkin   - - kafkaTopic Kafka Topics. SW_ZIPKIN_KAFKA_TOPIC zipkin   - - kafkaConsumerConfig Kafka consumer config, JSON format as Properties. If it contains the same key with above, would override. SW_ZIPKIN_KAFKA_CONSUMER_CONFIG \u0026ldquo;{\u0026quot;auto.offset.reset\u0026quot;:\u0026quot;earliest\u0026quot;,\u0026quot;enable.auto.commit\u0026quot;:true}\u0026rdquo;   - - kafkaConsumers The number of consumers to create. SW_ZIPKIN_KAFKA_CONSUMERS 1   - - kafkaHandlerThreadPoolSize Pool size of Kafka message handler executor. SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_SIZE CPU core * 2   - - kafkaHandlerThreadPoolQueueSize Queue size of Kafka message handler executor. SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE 10000   kafka-fetcher default Read SkyWalking\u0026rsquo;s native metrics/logs/traces through Kafka server. - -    - - bootstrapServers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_KAFKA_FETCHER_SERVERS localhost:9092   - - namespace Namespace aims to isolate multi OAP cluster when using the same Kafka cluster. If you set a namespace for Kafka fetcher, OAP will add a prefix to topic name. You should also set namespace in agent.config. The property is named plugin.kafka.namespace. SW_NAMESPACE -   - - groupId A unique string that identifies the consumer group to which this consumer belongs. - skywalking-consumer   - - createTopicIfNotExist If true, this creates Kafka topic (if it does not already exist). - true   - - partitions The number of partitions for the topic being created. SW_KAFKA_FETCHER_PARTITIONS 3   - - consumers The number of consumers to create. SW_KAFKA_FETCHER_CONSUMERS 1   - - enableNativeProtoLog Enables fetching and handling native proto log data. SW_KAFKA_FETCHER_ENABLE_NATIVE_PROTO_LOG true   - - enableNativeJsonLog Enables fetching and handling native json log data. SW_KAFKA_FETCHER_ENABLE_NATIVE_JSON_LOG true   - - replicationFactor The replication factor for each partition in the topic being created. SW_KAFKA_FETCHER_PARTITIONS_FACTOR 2   - - kafkaHandlerThreadPoolSize Pool size of Kafka message handler executor. SW_KAFKA_HANDLER_THREAD_POOL_SIZE CPU core * 2   - - kafkaHandlerThreadPoolQueueSize Queue size of Kafka message handler executor. SW_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE 10000   - - topicNameOfMeters Kafka topic name for meter system data. - skywalking-meters   - - topicNameOfMetrics Kafka topic name for JVM metrics data. - skywalking-metrics   - - topicNameOfProfiling Kafka topic name for profiling data. - skywalking-profilings   - - topicNameOfTracingSegments Kafka topic name for tracing data. - skywalking-segments   - - topicNameOfManagements Kafka topic name for service instance reporting and registration. - skywalking-managements   - - topicNameOfLogs Kafka topic name for native proto log data. - skywalking-logs   - - topicNameOfJsonLogs Kafka topic name for native json log data. - skywalking-logs-json   receiver-browser default gRPC services that accept browser performance data and error log. - - -   - - sampleRate Sampling rate for receiving trace. Precise to 1/10000. 10000 means sampling rate of 100% by default. SW_RECEIVER_BROWSER_SAMPLE_RATE 10000   query graphql - GraphQL query implementation. -    - - enableLogTestTool Enable the log testing API to test the LAL. NOTE: This API evaluates untrusted code on the OAP server. A malicious script can do significant damage (steal keys and secrets, remove files and directories, install malware, etc). As such, please enable this API only when you completely trust your users. SW_QUERY_GRAPHQL_ENABLE_LOG_TEST_TOOL false   - - maxQueryComplexity Maximum complexity allowed for the GraphQL query that can be used to abort a query if the total number of data fields queried exceeds the defined threshold. SW_QUERY_MAX_QUERY_COMPLEXITY 1000   - - enableUpdateUITemplate Allow user add,disable and update UI template. SW_ENABLE_UPDATE_UI_TEMPLATE false   - - enableOnDemandPodLog Ondemand Pod log: fetch the Pod logs on users' demand, the logs are fetched and displayed in real time, and are not persisted in any kind. This is helpful when users want to do some experiments and monitor the logs and see what\u0026rsquo;s happing inside the service. Note: if you print secrets in the logs, they are also visible to the UI, so for the sake of security, this feature is disabled by default, please set this configuration to enable the feature manually. SW_ENABLE_ON_DEMAND_POD_LOG false   query-zipkin default - This module is for Zipkin query API and support zipkin-lens UI -    - - restHost Binding IP of RESTful services. SW_QUERY_ZIPKIN_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_QUERY_ZIPKIN_REST_PORT 9412   - - restContextPath Web context path of RESTful services. SW_QUERY_ZIPKIN_REST_CONTEXT_PATH zipkin   - - restMaxThreads Maximum thread number of RESTful services. SW_QUERY_ZIPKIN_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_QUERY_ZIPKIN_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_QUERY_ZIPKIN_REST_QUEUE_SIZE 0   - - lookback Default look back for traces and autocompleteTags, 1 day in millis SW_QUERY_ZIPKIN_LOOKBACK 86400000   - - namesMaxAge The Cache-Control max-age (seconds) for serviceNames, remoteServiceNames and spanNames SW_QUERY_ZIPKIN_NAMES_MAX_AGE 300   - - uiQueryLimit Default traces query max size SW_QUERY_ZIPKIN_UI_QUERY_LIMIT 10   - - uiDefaultLookback Default look back on the UI for search traces, 15 minutes in millis SW_QUERY_ZIPKIN_UI_DEFAULT_LOOKBACK 900000   promql default - This module is for PromQL API. -    - - restHost Binding IP of RESTful services. SW_PROMQL_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_PROMQL_REST_PORT 9090   - - restContextPath Web context path of RESTful services. SW_PROMQL_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_PROMQL_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_PROMQL_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_PROMQL_REST_QUEUE_SIZE 0   alarm default - Read alarm doc for more details. -    telemetry - - Read telemetry doc for more details. -    - none - No op implementation. -    - prometheus host Binding host for Prometheus server fetching data. SW_TELEMETRY_PROMETHEUS_HOST 0.0.0.0   - - port Binding port for Prometheus server fetching data. SW_TELEMETRY_PROMETHEUS_PORT 1234   configuration - - Read dynamic configuration doc for more details. -    - grpc host DCS server binding hostname. SW_DCS_SERVER_HOST -   - - port DCS server binding port. SW_DCS_SERVER_PORT 80   - - clusterName Cluster name when reading the latest configuration from DSC server. SW_DCS_CLUSTER_NAME SkyWalking   - - period The period of reading data from DSC server by the OAP (in seconds). SW_DCS_PERIOD 20   - apollo apolloMeta apollo.meta in Apollo. SW_CONFIG_APOLLO http://localhost:8080   - - apolloCluster apollo.cluster in Apollo. SW_CONFIG_APOLLO_CLUSTER default   - - apolloEnv env in Apollo. SW_CONFIG_APOLLO_ENV -   - - appId app.id in Apollo. SW_CONFIG_APOLLO_APP_ID skywalking   - - period The period of data sync (in seconds). SW_CONFIG_APOLLO_PERIOD 60   - zookeeper namespace The namespace (represented by root path) that isolates the configurations in the Zookeeper. SW_CONFIG_ZK_NAMESPACE /, root path   - - hostPort Hosts and ports of Zookeeper Cluster. SW_CONFIG_ZK_HOST_PORT localhost:2181   - - baseSleepTimeMs The period of Zookeeper client between two retries (in milliseconds). SW_CONFIG_ZK_BASE_SLEEP_TIME_MS 1000   - - maxRetries The maximum retry time. SW_CONFIG_ZK_MAX_RETRIES 3   - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - etcd endpoints Hosts and ports for etcd cluster (separated by commas if multiple). SW_CONFIG_ETCD_ENDPOINTS http://localhost:2379   - - namespace Namespace for SkyWalking cluster. SW_CONFIG_ETCD_NAMESPACE /skywalking   - - authentication Indicates whether there is authentication. SW_CONFIG_ETCD_AUTHENTICATION false   - - user Etcd auth username. SW_CONFIG_ETCD_USER    - - password Etcd auth password. SW_CONFIG_ETCD_PASSWORD    - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - consul hostPort Hosts and ports for Consul cluster. SW_CONFIG_CONSUL_HOST_AND_PORTS localhost:8500   - - aclToken ACL Token of Consul. Empty string means without ACL token. SW_CONFIG_CONSUL_ACL_TOKEN -   - - period The period of data sync (in seconds). SW_CONFIG_CONSUL_PERIOD 60   - k8s-configmap namespace Deployment namespace of the config map. SW_CLUSTER_K8S_NAMESPACE default   - - labelSelector Labels for locating configmap. SW_CLUSTER_K8S_LABEL app=collector,release=skywalking   - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - nacos serverAddr Nacos Server Host. SW_CONFIG_NACOS_SERVER_ADDR 127.0.0.1   - - port Nacos Server Port. SW_CONFIG_NACOS_SERVER_PORT 8848   - - group Nacos Configuration namespace. SW_CONFIG_NACOS_SERVER_NAMESPACE -   - - period The period of data sync (in seconds). SW_CONFIG_CONFIG_NACOS_PERIOD 60   - - username Nacos Auth username. SW_CONFIG_NACOS_USERNAME -   - - password Nacos Auth password. SW_CONFIG_NACOS_PASSWORD -   - - accessKey Nacos Auth accessKey. SW_CONFIG_NACOS_ACCESSKEY -   - - secretKey Nacos Auth secretKey. SW_CONFIG_NACOS_SECRETKEY -   exporter default enableGRPCMetrics Enable gRPC metrics exporter. SW_EXPORTER_ENABLE_GRPC_METRICS false   - - gRPCTargetHost The host of target gRPC server for receiving export data SW_EXPORTER_GRPC_HOST 127.0.0.1   - - gRPCTargetPort The port of target gRPC server for receiving export data. SW_EXPORTER_GRPC_PORT 9870   - - enableKafkaTrace Enable Kafka trace exporter. SW_EXPORTER_ENABLE_KAFKA_TRACE false   - - enableKafkaLog Enable Kafka log exporter. SW_EXPORTER_ENABLE_KAFKA_LOG false   - - kafkaBootstrapServers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_EXPORTER_KAFKA_SERVERS localhost:9092   - - kafkaProducerConfig Kafka producer config, JSON format as Properties. SW_EXPORTER_KAFKA_PRODUCER_CONFIG -   - - kafkaTopicTrace Kafka topic name for trace. SW_EXPORTER_KAFKA_TOPIC_TRACE skywalking-export-trace   - - kafkaTopicLog Kafka topic name for log. SW_EXPORTER_KAFKA_TOPIC_LOG skywalking-export-log   - - exportErrorStatusTraceOnly Export error status trace segments through the Kafka channel. SW_EXPORTER_KAFKA_TRACE_FILTER_ERROR false   health-checker default checkIntervalSeconds The period of checking OAP internal health status (in seconds). SW_HEALTH_CHECKER_INTERVAL_SECONDS 5   configuration-discovery default disableMessageDigest If true, agent receives the latest configuration every time, even without making any changes. By default, OAP uses the SHA512 message digest mechanism to detect changes in configuration. SW_DISABLE_MESSAGE_DIGEST false   receiver-event default gRPC services that handle events data. - -    aws-firehose-receiver default host Binding IP of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_HOST 0.0.0.0   - - port Binding port of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_PORT 12801   - - contextPath Context path of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_CONTEXT_PATH /   - - maxThreads Max Thtread number of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_MAX_THREADS 200   - - idleTimeOut Idle timeout of a connection for keep-alive. SW_RECEIVER_AWS_FIREHOSE_HTTP_IDLE_TIME_OUT 30000   - - acceptQueueSize Maximum allowed number of open connections SW_RECEIVER_AWS_FIREHOSE_HTTP_ACCEPT_QUEUE_SIZE 0   - - maxRequestHeaderSize Maximum length of all headers in an HTTP/1 response SW_RECEIVER_AWS_FIREHOSE_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - firehoseAccessKey The AccessKey of AWS firhose SW_RECEIVER_AWS_FIREHOSE_ACCESS_KEY    - - enableTLS Indicate if enable HTTPS for the server SW_RECEIVER_AWS_FIREHOSE_HTTP_ENABLE_TLS false   - - tlsKeyPath TLS key path SW_RECEIVER_AWS_FIREHOSE_HTTP_TLS_KEY_PATH    - - tlsCertChainPath TLS certificate chain path SW_RECEIVER_AWS_FIREHOSE_HTTP_TLS_CERT_CHAIN_PATH    ai-pipeline default       - - uriRecognitionServerAddr The address of the URI recognition server. SW_AI_PIPELINE_URI_RECOGNITION_SERVER_ADDR -   - - uriRecognitionServerPort The port of the URI recognition server. SW_AI_PIPELINE_URI_RECOGNITION_SERVER_PORT 17128    Note ¹ System Environment Variable name could be declared and changed in application.yml. The names listed here are simply provided in the default application.yml file.\n","title":"Configuration Vocabulary","url":"/docs/main/v9.5.0/en/setup/backend/configuration-vocabulary/"},{"content":"Configuration Vocabulary The Configuration Vocabulary lists all available configurations provided by application.yml.\n   Module Provider Settings Value(s) and Explanation System Environment Variable¹ Default     core default role Option values: Mixed/Receiver/Aggregator. Receiver mode OAP opens the service to the agents, then analyzes and aggregates the results, and forwards the results for distributed aggregation. Aggregator mode OAP receives data from Mixer and Receiver role OAP nodes, and performs 2nd level aggregation. Mixer means both Receiver and Aggregator. SW_CORE_ROLE Mixed   - - restHost Binding IP of RESTful services. Services include GraphQL query and HTTP data report. SW_CORE_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_CORE_REST_PORT 12800   - - restContextPath Web context path of RESTful services. SW_CORE_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_CORE_REST_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_CORE_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize ServerSocketChannel Backlog of RESTful services. SW_CORE_REST_QUEUE_SIZE 0   - - httpMaxRequestHeaderSize Maximum request header size accepted. SW_CORE_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - gRPCHost Binding IP of gRPC services, including gRPC data report and internal communication among OAP nodes. SW_CORE_GRPC_HOST 0.0.0.0   - - gRPCPort Binding port of gRPC services. SW_CORE_GRPC_PORT 11800   - - gRPCSslEnabled Activates SSL for gRPC services. SW_CORE_GRPC_SSL_ENABLED false   - - gRPCSslKeyPath File path of gRPC SSL key. SW_CORE_GRPC_SSL_KEY_PATH -   - - gRPCSslCertChainPath File path of gRPC SSL cert chain. SW_CORE_GRPC_SSL_CERT_CHAIN_PATH -   - - gRPCSslTrustedCAPath File path of gRPC trusted CA. SW_CORE_GRPC_SSL_TRUSTED_CA_PATH -   - - downsampling Activated level of down sampling aggregation.  Hour,Day   - - enableDataKeeperExecutor Controller of TTL scheduler. Once disabled, TTL wouldn\u0026rsquo;t work. SW_CORE_ENABLE_DATA_KEEPER_EXECUTOR true   - - dataKeeperExecutePeriod Execution period of TTL scheduler (in minutes). Execution doesn\u0026rsquo;t mean deleting data. The storage provider (e.g. ElasticSearch storage) could override this. SW_CORE_DATA_KEEPER_EXECUTE_PERIOD 5   - - recordDataTTL The lifecycle of record data (in days). Record data includes traces, top N sample records, and logs. Minimum value is 2. SW_CORE_RECORD_DATA_TTL 3   - - metricsDataTTL The lifecycle of metrics data (in days), including metadata. We recommend setting metricsDataTTL \u0026gt;= recordDataTTL. Minimum value is 2. SW_CORE_METRICS_DATA_TTL 7   - - l1FlushPeriod The period of L1 aggregation flush to L2 aggregation (in milliseconds). SW_CORE_L1_AGGREGATION_FLUSH_PERIOD 500   - - storageSessionTimeout The threshold of session time (in milliseconds). Default value is 70000. SW_CORE_STORAGE_SESSION_TIMEOUT 70000   - - persistentPeriod The period of doing data persistence. Unit is second.Default value is 25s SW_CORE_PERSISTENT_PERIOD 25   - - topNReportPeriod The execution period (in minutes) of top N sampler, which saves sampled data into the storage. SW_CORE_TOPN_REPORT_PERIOD 10   - - activeExtraModelColumns Appends entity names (e.g. service names) into metrics storage entities. SW_CORE_ACTIVE_EXTRA_MODEL_COLUMNS false   - - serviceNameMaxLength Maximum length limit of service names. SW_SERVICE_NAME_MAX_LENGTH 70   - - instanceNameMaxLength Maximum length limit of service instance names. The maximum length of service + instance names should be less than 200. SW_INSTANCE_NAME_MAX_LENGTH 70   - - endpointNameMaxLength Maximum length limit of endpoint names. The maximum length of service + endpoint names should be less than 240. SW_ENDPOINT_NAME_MAX_LENGTH 150   - - searchableTracesTags Defines a set of span tag keys which are searchable through GraphQL. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_SEARCHABLE_TAG_KEYS http.method,http.status_code,rpc.status_code,db.type,db.instance,mq.queue,mq.topic,mq.broker   - - searchableLogsTags Defines a set of log tag keys which are searchable through GraphQL. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_SEARCHABLE_LOGS_TAG_KEYS level   - - searchableAlarmTags Defines a set of alarm tag keys which are searchable through GraphQL. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_SEARCHABLE_ALARM_TAG_KEYS level   - - autocompleteTagKeysQueryMaxSize The max size of tags keys for autocomplete select. SW_AUTOCOMPLETE_TAG_KEYS_QUERY_MAX_SIZE 100   - - autocompleteTagValuesQueryMaxSize The max size of tags values for autocomplete select. SW_AUTOCOMPLETE_TAG_VALUES_QUERY_MAX_SIZE 100   - - gRPCThreadPoolSize Pool size of gRPC server. SW_CORE_GRPC_THREAD_POOL_SIZE CPU core * 4   - - gRPCThreadPoolQueueSize Queue size of gRPC server. SW_CORE_GRPC_POOL_QUEUE_SIZE 10000   - - maxConcurrentCallsPerConnection The maximum number of concurrent calls permitted for each incoming connection. Defaults to no limit. SW_CORE_GRPC_MAX_CONCURRENT_CALL -   - - maxMessageSize Sets the maximum message size allowed to be received on the server. Empty means 4 MiB. SW_CORE_GRPC_MAX_MESSAGE_SIZE 4M(based on Netty)   - - remoteTimeout Timeout for cluster internal communication (in seconds). - 20   - - maxSizeOfNetworkAddressAlias The maximum size of network address detected in the system being monitored. - 1_000_000   - - maxPageSizeOfQueryProfileSnapshot The maximum size for snapshot analysis in an OAP query. - 500   - - maxSizeOfAnalyzeProfileSnapshot The maximum number of snapshots analyzed by the OAP. - 12000   - - prepareThreads The number of threads used to prepare metrics data to the storage. SW_CORE_PREPARE_THREADS 2   - - enableEndpointNameGroupingByOpenapi Automatically groups endpoints by the given OpenAPI definitions. SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI true   - - maxDurationOfQueryEBPFProfilingData The maximum duration(in second) of query the eBPF profiling data from database. - 30   - - maxThreadCountOfQueryEBPFProfilingData The maximum thread count of query the eBPF profiling data from database. - System CPU core size   - - uiMenuRefreshInterval The period(in seconds) of refreshing the status of all UI menu items. - 20   - - serviceCacheRefreshInterval The period(in seconds) of refreshing the service cache. SW_SERVICE_CACHE_REFRESH_INTERVAL 10   cluster standalone - Standalone is not suitable for running on a single node running. No configuration available. - -   - zookeeper namespace The namespace, represented by root path, isolates the configurations in Zookeeper. SW_NAMESPACE /, root path   - - hostPort Hosts and ports of Zookeeper Cluster. SW_CLUSTER_ZK_HOST_PORT localhost:2181   - - baseSleepTimeMs The period of Zookeeper client between two retries (in milliseconds). SW_CLUSTER_ZK_SLEEP_TIME 1000   - - maxRetries The maximum retry time. SW_CLUSTER_ZK_MAX_RETRIES 3   - - enableACL Opens ACL using schema and expression. SW_ZK_ENABLE_ACL false   - - schema Schema for the authorization. SW_ZK_SCHEMA digest   - - expression Expression for the authorization. SW_ZK_EXPRESSION skywalking:skywalking   - - internalComHost The hostname registered in Zookeeper for the internal communication of OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Zookeeper for the internal communication of OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - kubernetes namespace Namespace deployed by SkyWalking in k8s. SW_CLUSTER_K8S_NAMESPACE default   - - labelSelector Labels used for filtering OAP deployment in k8s. SW_CLUSTER_K8S_LABEL app=collector,release=skywalking   - - uidEnvName Environment variable name for reading uid. SW_CLUSTER_K8S_UID SKYWALKING_COLLECTOR_UID   - consul serviceName Service name for SkyWalking cluster. SW_SERVICE_NAME SkyWalking_OAP_Cluster   - - hostPort Hosts and ports for Consul cluster. SW_CLUSTER_CONSUL_HOST_PORT localhost:8500   - - aclToken ACL Token of Consul. Empty string means without ALC token. SW_CLUSTER_CONSUL_ACLTOKEN -   - - internalComHost The hostname registered in Consul for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Consul for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - etcd serviceName Service name for SkyWalking cluster. SW_CLUSTER_ETCD_SERVICE_NAME SkyWalking_OAP_Cluster   - - endpoints Hosts and ports for etcd cluster. SW_CLUSTER_ETCD_ENDPOINTS localhost:2379   - - namespace Namespace for SkyWalking cluster. SW_CLUSTER_ETCD_NAMESPACE /skywalking   - - authentication Indicates whether there is authentication. SW_CLUSTER_ETCD_AUTHENTICATION false   - - user Etcd auth username. SW_CLUSTER_ETCD_USER    - - password Etcd auth password. SW_CLUSTER_ETCD_PASSWORD    - - internalComHost The hostname registered in etcd for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in etcd for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - Nacos serviceName Service name for SkyWalking cluster. SW_SERVICE_NAME SkyWalking_OAP_Cluster   - - hostPort Hosts and ports for Nacos cluster. SW_CLUSTER_NACOS_HOST_PORT localhost:8848   - - namespace Namespace used by SkyWalking node coordination. SW_CLUSTER_NACOS_NAMESPACE public   - - internalComHost The hostname registered in Nacos for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Nacos for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - - username Nacos Auth username. SW_CLUSTER_NACOS_USERNAME -   - - password Nacos Auth password. SW_CLUSTER_NACOS_PASSWORD -   - - accessKey Nacos Auth accessKey. SW_CLUSTER_NACOS_ACCESSKEY -   - - secretKey Nacos Auth secretKey. SW_CLUSTER_NACOS_SECRETKEY -   - - syncPeriodHttpUriRecognitionPattern The period of HTTP URI recognition pattern synchronization (in seconds). SW_CORE_SYNC_PERIOD_HTTP_URI_RECOGNITION_PATTERN 10   - - trainingPeriodHttpUriRecognitionPattern The period of HTTP URI recognition pattern training (in seconds). SW_CORE_TRAINING_PERIOD_HTTP_URI_RECOGNITION_PATTERN 60   - - maxHttpUrisNumberPerService The maximum number of HTTP URIs per service. SW_MAX_HTTP_URIS_NUMBER_PER_SERVICE 3000   storage elasticsearch - ElasticSearch (and OpenSearch) storage implementation. - -   - - namespace Prefix of indexes created and used by SkyWalking. SW_NAMESPACE -   - - clusterNodes ElasticSearch cluster nodes for client connection. SW_STORAGE_ES_CLUSTER_NODES localhost   - - protocol HTTP or HTTPs. SW_STORAGE_ES_HTTP_PROTOCOL HTTP   - - connectTimeout Connect timeout of ElasticSearch client (in milliseconds). SW_STORAGE_ES_CONNECT_TIMEOUT 3000   - - socketTimeout Socket timeout of ElasticSearch client (in milliseconds). SW_STORAGE_ES_SOCKET_TIMEOUT 30000   - - responseTimeout Response timeout of ElasticSearch client (in milliseconds), 0 disables the timeout. SW_STORAGE_ES_RESPONSE_TIMEOUT 1500   - - numHttpClientThread The number of threads for the underlying HTTP client to perform socket I/O. If the value is \u0026lt;= 0, the number of available processors will be used. SW_STORAGE_ES_NUM_HTTP_CLIENT_THREAD 0   - - user Username of ElasticSearch cluster. SW_ES_USER -   - - password Password of ElasticSearch cluster. SW_ES_PASSWORD -   - - trustStorePath Trust JKS file path. Only works when username and password are enabled. SW_STORAGE_ES_SSL_JKS_PATH -   - - trustStorePass Trust JKS file password. Only works when username and password are enabled. SW_STORAGE_ES_SSL_JKS_PASS -   - - secretsManagementFile Secrets management file in the properties format, including username and password, which are managed by a 3rd party tool. Capable of being updated them at runtime. SW_ES_SECRETS_MANAGEMENT_FILE -   - - dayStep Represents the number of days in the one-minute/hour/day index. SW_STORAGE_DAY_STEP 1   - - indexShardsNumber Shard number of new indexes. SW_STORAGE_ES_INDEX_SHARDS_NUMBER 1   - - indexReplicasNumber Replicas number of new indexes. SW_STORAGE_ES_INDEX_REPLICAS_NUMBER 0   - - specificIndexSettings Specify the settings for each index individually. If configured, this setting has the highest priority and overrides the generic settings. SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS -   - - superDatasetDayStep Represents the number of days in the super size dataset record index. Default value is the same as dayStep when the value is less than 0. SW_STORAGE_ES_SUPER_DATASET_DAY_STEP -1   - - superDatasetIndexShardsFactor Super dataset is defined in the code (e.g. trace segments). This factor provides more shards for the super dataset: shards number = indexShardsNumber * superDatasetIndexShardsFactor. This factor also affects Zipkin and Jaeger traces. SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR 5   - - superDatasetIndexReplicasNumber Represents the replicas number in the super size dataset record index. SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER 0   - - indexTemplateOrder The order of index template. SW_STORAGE_ES_INDEX_TEMPLATE_ORDER 0   - - bulkActions Async bulk size of the record data batch execution. SW_STORAGE_ES_BULK_ACTIONS 5000   - - batchOfBytes A threshold to control the max body size of ElasticSearch Bulk flush. SW_STORAGE_ES_BATCH_OF_BYTES 10485760 (10m)   - - flushInterval Period of flush (in seconds). Does not matter whether bulkActions is reached or not. SW_STORAGE_ES_FLUSH_INTERVAL 5   - - concurrentRequests The number of concurrent requests allowed to be executed. SW_STORAGE_ES_CONCURRENT_REQUESTS 2   - - resultWindowMaxSize The maximum size of dataset when the OAP loads cache, such as network aliases. SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE 10000   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_ES_QUERY_MAX_SIZE 10000   - - scrollingBatchSize The batch size of metadata per iteration when metadataQueryMaxSize or resultWindowMaxSize is too large to be retrieved in a single query. SW_STORAGE_ES_SCROLLING_BATCH_SIZE 5000   - - segmentQueryMaxSize The maximum size of trace segments per query. SW_STORAGE_ES_QUERY_SEGMENT_SIZE 200   - - profileTaskQueryMaxSize The maximum size of profile task per query. SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE 200   - - profileDataQueryScrollBatchSize The batch size of query profiling data. SW_STORAGE_ES_QUERY_PROFILE_DATA_BATCH_SIZE 100   - - advanced All settings of ElasticSearch index creation. The value should be in JSON format. SW_STORAGE_ES_ADVANCED -   - - logicSharding Shard metrics and records indices into multi-physical indices, one index template per metric/meter aggregation function or record. SW_STORAGE_ES_LOGIC_SHARDING false   - h2 - H2 storage is designed for demonstration and running in short term (i.e. 1-2 hours) only. - -   - - url H2 connection URL. Defaults to H2 memory mode. SW_STORAGE_H2_URL jdbc:h2:mem:skywalking-oap-db   - - user Username of H2 database. SW_STORAGE_H2_USER sa   - - password Password of H2 database. - -   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_H2_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 100   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 1   - mysql - MySQL Storage. The MySQL JDBC Driver is not in the dist. Please copy it into the oap-lib folder manually. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - postgresql - PostgreSQL storage. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - banyandb - BanyanDB storage. - -   - - host Host of the BanyanDB. SW_STORAGE_BANYANDB_HOST 127.0.0.1   - - port Port of the BanyanDB. SW_STORAGE_BANYANDB_PORT 17912   - - maxBulkSize The maximum size of write entities in a single batch write call. SW_STORAGE_BANYANDB_MAX_BULK_SIZE 5000   - - flushInterval Period of flush interval. In the timeunit of seconds. SW_STORAGE_BANYANDB_FLUSH_INTERVAL 15   - - metricsShardsNumber Shards Number for measure/metrics. SW_STORAGE_BANYANDB_METRICS_SHARDS_NUMBER 1   - - recordShardsNumber Shards Number for a normal record. SW_STORAGE_BANYANDB_RECORD_SHARDS_NUMBER 1   - - superDatasetShardsFactor Shards Factor for a super dataset record, i.e. Shard number of a super dataset is recordShardsNumber*superDatasetShardsFactor. SW_STORAGE_BANYANDB_SUPERDATASET_SHARDS_FACTOR 2   - - concurrentWriteThreads Concurrent consumer threads for batch writing. SW_STORAGE_BANYANDB_CONCURRENT_WRITE_THREADS 15   - - profileTaskQueryMaxSize Max size of ProfileTask to be fetched. SW_STORAGE_BANYANDB_PROFILE_TASK_QUERY_MAX_SIZE 200   agent-analyzer default Agent Analyzer. SW_AGENT_ANALYZER default    - - traceSamplingPolicySettingsFile The sampling policy including sampling rate and the threshold of trace segment latency can be configured by the traceSamplingPolicySettingsFile file. SW_TRACE_SAMPLING_POLICY_SETTINGS_FILE trace-sampling-policy-settings.yml   - - slowDBAccessThreshold The slow database access threshold (in milliseconds). SW_SLOW_DB_THRESHOLD default:200,mongodb:100   - - forceSampleErrorSegment When sampling mechanism is activated, this config samples the error status segment and ignores the sampling rate. SW_FORCE_SAMPLE_ERROR_SEGMENT true   - - segmentStatusAnalysisStrategy Determines the final segment status from span status. Available values are FROM_SPAN_STATUS , FROM_ENTRY_SPAN, and FROM_FIRST_SPAN. FROM_SPAN_STATUS indicates that the segment status would be error if any span has an error status. FROM_ENTRY_SPAN means that the segment status would only be determined by the status of entry spans. FROM_FIRST_SPAN means that the segment status would only be determined by the status of the first span. SW_SEGMENT_STATUS_ANALYSIS_STRATEGY FROM_SPAN_STATUS   - - noUpstreamRealAddressAgents Exit spans with the component in the list would not generate client-side instance relation metrics, since some tracing plugins (e.g. Nginx-LUA and Envoy) can\u0026rsquo;t collect the real peer IP address. SW_NO_UPSTREAM_REAL_ADDRESS 6000,9000   - - meterAnalyzerActiveFiles Indicates which files could be instrumented and analyzed. Multiple files are split by \u0026ldquo;,\u0026rdquo;. SW_METER_ANALYZER_ACTIVE_FILES    - - slowCacheWriteThreshold The threshold of slow command which is used for writing operation (in milliseconds). SW_SLOW_CACHE_WRITE_THRESHOLD default:20,redis:10   - - slowCacheReadThreshold The threshold of slow command which is used for reading (getting) operation (in milliseconds). SW_SLOW_CACHE_READ_THRESHOLD default:20,redis:10   receiver-sharing-server default Sharing server provides new gRPC and restful servers for data collection. Ana designates that servers in the core module are to be used for internal communication only. - -    - - restHost Binding IP of RESTful services. Services include GraphQL query and HTTP data report. SW_RECEIVER_SHARING_REST_HOST -   - - restPort Binding port of RESTful services. SW_RECEIVER_SHARING_REST_PORT -   - - restContextPath Web context path of RESTful services. SW_RECEIVER_SHARING_REST_CONTEXT_PATH -   - - restMaxThreads Maximum thread number of RESTful services. SW_RECEIVER_SHARING_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_RECEIVER_SHARING_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize ServerSocketChannel backlog of RESTful services. SW_RECEIVER_SHARING_REST_QUEUE_SIZE 0   - - httpMaxRequestHeaderSize Maximum request header size accepted. SW_RECEIVER_SHARING_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - gRPCHost Binding IP of gRPC services. Services include gRPC data report and internal communication among OAP nodes. SW_RECEIVER_GRPC_HOST 0.0.0.0. Not Activated   - - gRPCPort Binding port of gRPC services. SW_RECEIVER_GRPC_PORT Not Activated   - - gRPCThreadPoolSize Pool size of gRPC server. SW_RECEIVER_GRPC_THREAD_POOL_SIZE CPU core * 4   - - gRPCThreadPoolQueueSize Queue size of gRPC server. SW_RECEIVER_GRPC_POOL_QUEUE_SIZE 10000   - - gRPCSslEnabled Activates SSL for gRPC services. SW_RECEIVER_GRPC_SSL_ENABLED false   - - gRPCSslKeyPath File path of gRPC SSL key. SW_RECEIVER_GRPC_SSL_KEY_PATH -   - - gRPCSslCertChainPath File path of gRPC SSL cert chain. SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH -   - - maxConcurrentCallsPerConnection The maximum number of concurrent calls permitted for each incoming connection. Defaults to no limit. SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL -   - - authentication The token text for authentication. Works for gRPC connection only. Once this is set, the client is required to use the same token. SW_AUTHENTICATION -   log-analyzer default Log Analyzer. SW_LOG_ANALYZER default    - - lalFiles The LAL configuration file names (without file extension) to be activated. Read LAL for more details. SW_LOG_LAL_FILES default   - - malFiles The MAL configuration file names (without file extension) to be activated. Read LAL for more details. SW_LOG_MAL_FILES \u0026quot;\u0026quot;   event-analyzer default Event Analyzer. SW_EVENT_ANALYZER default    receiver-register default gRPC and HTTPRestful services that provide service, service instance and endpoint register. - -    receiver-trace default gRPC and HTTPRestful services that accept SkyWalking format traces. - -    receiver-jvm default gRPC services that accept JVM metrics data. - -    receiver-clr default gRPC services that accept .Net CLR metrics data. - -    receiver-profile default gRPC services that accept profile task status and snapshot reporter. - -    receiver-zabbix default TCP receiver accepts Zabbix format metrics. - -    - - port Exported TCP port. Zabbix agent could connect and transport data. SW_RECEIVER_ZABBIX_PORT 10051   - - host Binds to host. SW_RECEIVER_ZABBIX_HOST 0.0.0.0   - - activeFiles Enables config when agent request is received. SW_RECEIVER_ZABBIX_ACTIVE_FILES agent   service-mesh default gRPC services that accept data from inbound mesh probes. - -    envoy-metric default Envoy metrics_service and ALS(access log service) are supported by this receiver. The OAL script supports all GAUGE type metrics. - -    - - acceptMetricsService Starts Envoy Metrics Service analysis. SW_ENVOY_METRIC_SERVICE true   - - alsHTTPAnalysis Starts Envoy HTTP Access Log Service analysis. Value = k8s-mesh means starting the analysis. SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS -   - - alsTCPAnalysis Starts Envoy TCP Access Log Service analysis. Value = k8s-mesh means starting the analysis. SW_ENVOY_METRIC_ALS_TCP_ANALYSIS -   - - k8sServiceNameRule k8sServiceNameRule allows you to customize the service name in ALS via Kubernetes metadata. The available variables are pod and service. E.g. you can use ${service.metadata.name}-${pod.metadata.labels.version} to append the version number to the service name. Note that when using environment variables to pass this configuration, use single quotes('') to avoid being evaluated by the shell. K8S_SERVICE_NAME_RULE ${pod.metadata.labels.(service.istio.io/canonical-name)}   - - istioServiceNameRule istioServiceNameRule allows you to customize the service name in ALS via Kubernetes metadata. The available variables are serviceEntry. E.g. you can use ${serviceEntry.metadata.name}-${serviceEntry.metadata.labels.version} to append the version number to the service name. Note that when using environment variables to pass this configuration, use single quotes('') to avoid being evaluated by the shell. ISTIO_SERVICE_NAME_RULE ${serviceEntry.metadata.name}   receiver-otel default A receiver for analyzing metrics data from OpenTelemetry. - -    - - enabledHandlers Enabled handlers for otel. SW_OTEL_RECEIVER_ENABLED_HANDLERS -   - - enabledOtelMetricsRules Enabled metric rules for OTLP handler. SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES -   receiver-zipkin default A receiver for Zipkin traces. - -    - - sampleRate The sample rate precision is 1/10000, should be between 0 and 10000 SW_ZIPKIN_SAMPLE_RATE 10000   - - searchableTracesTags Defines a set of span tag keys which are searchable. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_ZIPKIN_SEARCHABLE_TAG_KEYS http.method   - - enableHttpCollector Enable Http Collector. SW_ZIPKIN_HTTP_COLLECTOR_ENABLED true   - - restHost Binding IP of RESTful services. SW_RECEIVER_ZIPKIN_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_RECEIVER_ZIPKIN_REST_PORT 9411   - - restContextPath Web context path of RESTful services. SW_RECEIVER_ZIPKIN_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_RECEIVER_ZIPKIN_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_RECEIVER_ZIPKIN_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_RECEIVER_ZIPKIN_REST_QUEUE_SIZE 0   - - enableKafkaCollector Enable Kafka Collector. SW_ZIPKIN_KAFKA_COLLECTOR_ENABLED false   - - kafkaBootstrapServers Kafka ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG. SW_ZIPKIN_KAFKA_SERVERS localhost:9092   - - kafkaGroupId Kafka ConsumerConfig.GROUP_ID_CONFIG. SW_ZIPKIN_KAFKA_GROUP_ID zipkin   - - kafkaTopic Kafka Topics. SW_ZIPKIN_KAFKA_TOPIC zipkin   - - kafkaConsumerConfig Kafka consumer config, JSON format as Properties. If it contains the same key with above, would override. SW_ZIPKIN_KAFKA_CONSUMER_CONFIG \u0026ldquo;{\u0026quot;auto.offset.reset\u0026quot;:\u0026quot;earliest\u0026quot;,\u0026quot;enable.auto.commit\u0026quot;:true}\u0026rdquo;   - - kafkaConsumers The number of consumers to create. SW_ZIPKIN_KAFKA_CONSUMERS 1   - - kafkaHandlerThreadPoolSize Pool size of Kafka message handler executor. SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_SIZE CPU core * 2   - - kafkaHandlerThreadPoolQueueSize Queue size of Kafka message handler executor. SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE 10000   kafka-fetcher default Read SkyWalking\u0026rsquo;s native metrics/logs/traces through Kafka server. - -    - - bootstrapServers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_KAFKA_FETCHER_SERVERS localhost:9092   - - namespace Namespace aims to isolate multi OAP cluster when using the same Kafka cluster. If you set a namespace for Kafka fetcher, OAP will add a prefix to topic name. You should also set namespace in agent.config. The property is named plugin.kafka.namespace. SW_NAMESPACE -   - - groupId A unique string that identifies the consumer group to which this consumer belongs. - skywalking-consumer   - - createTopicIfNotExist If true, this creates Kafka topic (if it does not already exist). - true   - - partitions The number of partitions for the topic being created. SW_KAFKA_FETCHER_PARTITIONS 3   - - consumers The number of consumers to create. SW_KAFKA_FETCHER_CONSUMERS 1   - - enableNativeProtoLog Enables fetching and handling native proto log data. SW_KAFKA_FETCHER_ENABLE_NATIVE_PROTO_LOG true   - - enableNativeJsonLog Enables fetching and handling native json log data. SW_KAFKA_FETCHER_ENABLE_NATIVE_JSON_LOG true   - - replicationFactor The replication factor for each partition in the topic being created. SW_KAFKA_FETCHER_PARTITIONS_FACTOR 2   - - kafkaHandlerThreadPoolSize Pool size of Kafka message handler executor. SW_KAFKA_HANDLER_THREAD_POOL_SIZE CPU core * 2   - - kafkaHandlerThreadPoolQueueSize Queue size of Kafka message handler executor. SW_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE 10000   - - topicNameOfMeters Kafka topic name for meter system data. - skywalking-meters   - - topicNameOfMetrics Kafka topic name for JVM metrics data. - skywalking-metrics   - - topicNameOfProfiling Kafka topic name for profiling data. - skywalking-profilings   - - topicNameOfTracingSegments Kafka topic name for tracing data. - skywalking-segments   - - topicNameOfManagements Kafka topic name for service instance reporting and registration. - skywalking-managements   - - topicNameOfLogs Kafka topic name for native proto log data. - skywalking-logs   - - topicNameOfJsonLogs Kafka topic name for native json log data. - skywalking-logs-json   receiver-browser default gRPC services that accept browser performance data and error log. - - -   - - sampleRate Sampling rate for receiving trace. Precise to 1/10000. 10000 means sampling rate of 100% by default. SW_RECEIVER_BROWSER_SAMPLE_RATE 10000   query graphql - GraphQL query implementation. -    - - enableLogTestTool Enable the log testing API to test the LAL. NOTE: This API evaluates untrusted code on the OAP server. A malicious script can do significant damage (steal keys and secrets, remove files and directories, install malware, etc). As such, please enable this API only when you completely trust your users. SW_QUERY_GRAPHQL_ENABLE_LOG_TEST_TOOL false   - - maxQueryComplexity Maximum complexity allowed for the GraphQL query that can be used to abort a query if the total number of data fields queried exceeds the defined threshold. SW_QUERY_MAX_QUERY_COMPLEXITY 3000   - - enableUpdateUITemplate Allow user add,disable and update UI template. SW_ENABLE_UPDATE_UI_TEMPLATE false   - - enableOnDemandPodLog Ondemand Pod log: fetch the Pod logs on users' demand, the logs are fetched and displayed in real time, and are not persisted in any kind. This is helpful when users want to do some experiments and monitor the logs and see what\u0026rsquo;s happing inside the service. Note: if you print secrets in the logs, they are also visible to the UI, so for the sake of security, this feature is disabled by default, please set this configuration to enable the feature manually. SW_ENABLE_ON_DEMAND_POD_LOG false   query-zipkin default - This module is for Zipkin query API and support zipkin-lens UI -    - - restHost Binding IP of RESTful services. SW_QUERY_ZIPKIN_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_QUERY_ZIPKIN_REST_PORT 9412   - - restContextPath Web context path of RESTful services. SW_QUERY_ZIPKIN_REST_CONTEXT_PATH zipkin   - - restMaxThreads Maximum thread number of RESTful services. SW_QUERY_ZIPKIN_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_QUERY_ZIPKIN_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_QUERY_ZIPKIN_REST_QUEUE_SIZE 0   - - lookback Default look back for traces and autocompleteTags, 1 day in millis SW_QUERY_ZIPKIN_LOOKBACK 86400000   - - namesMaxAge The Cache-Control max-age (seconds) for serviceNames, remoteServiceNames and spanNames SW_QUERY_ZIPKIN_NAMES_MAX_AGE 300   - - uiQueryLimit Default traces query max size SW_QUERY_ZIPKIN_UI_QUERY_LIMIT 10   - - uiDefaultLookback Default look back on the UI for search traces, 15 minutes in millis SW_QUERY_ZIPKIN_UI_DEFAULT_LOOKBACK 900000   promql default - This module is for PromQL API. -    - - restHost Binding IP of RESTful services. SW_PROMQL_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_PROMQL_REST_PORT 9090   - - restContextPath Web context path of RESTful services. SW_PROMQL_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_PROMQL_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_PROMQL_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_PROMQL_REST_QUEUE_SIZE 0   alarm default - Read alarm doc for more details. -    telemetry - - Read telemetry doc for more details. -    - none - No op implementation. -    - prometheus host Binding host for Prometheus server fetching data. SW_TELEMETRY_PROMETHEUS_HOST 0.0.0.0   - - port Binding port for Prometheus server fetching data. SW_TELEMETRY_PROMETHEUS_PORT 1234   configuration - - Read dynamic configuration doc for more details. -    - grpc host DCS server binding hostname. SW_DCS_SERVER_HOST -   - - port DCS server binding port. SW_DCS_SERVER_PORT 80   - - clusterName Cluster name when reading the latest configuration from DSC server. SW_DCS_CLUSTER_NAME SkyWalking   - - period The period of reading data from DSC server by the OAP (in seconds). SW_DCS_PERIOD 20   - apollo apolloMeta apollo.meta in Apollo. SW_CONFIG_APOLLO http://localhost:8080   - - apolloCluster apollo.cluster in Apollo. SW_CONFIG_APOLLO_CLUSTER default   - - apolloEnv env in Apollo. SW_CONFIG_APOLLO_ENV -   - - appId app.id in Apollo. SW_CONFIG_APOLLO_APP_ID skywalking   - zookeeper namespace The namespace (represented by root path) that isolates the configurations in the Zookeeper. SW_CONFIG_ZK_NAMESPACE /, root path   - - hostPort Hosts and ports of Zookeeper Cluster. SW_CONFIG_ZK_HOST_PORT localhost:2181   - - baseSleepTimeMs The period of Zookeeper client between two retries (in milliseconds). SW_CONFIG_ZK_BASE_SLEEP_TIME_MS 1000   - - maxRetries The maximum retry time. SW_CONFIG_ZK_MAX_RETRIES 3   - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - etcd endpoints Hosts and ports for etcd cluster (separated by commas if multiple). SW_CONFIG_ETCD_ENDPOINTS http://localhost:2379   - - namespace Namespace for SkyWalking cluster. SW_CONFIG_ETCD_NAMESPACE /skywalking   - - authentication Indicates whether there is authentication. SW_CONFIG_ETCD_AUTHENTICATION false   - - user Etcd auth username. SW_CONFIG_ETCD_USER    - - password Etcd auth password. SW_CONFIG_ETCD_PASSWORD    - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - consul hostPort Hosts and ports for Consul cluster. SW_CONFIG_CONSUL_HOST_AND_PORTS localhost:8500   - - aclToken ACL Token of Consul. Empty string means without ACL token. SW_CONFIG_CONSUL_ACL_TOKEN -   - - period The period of data sync (in seconds). SW_CONFIG_CONSUL_PERIOD 60   - k8s-configmap namespace Deployment namespace of the config map. SW_CLUSTER_K8S_NAMESPACE default   - - labelSelector Labels for locating configmap. SW_CLUSTER_K8S_LABEL app=collector,release=skywalking   - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - nacos serverAddr Nacos Server Host. SW_CONFIG_NACOS_SERVER_ADDR 127.0.0.1   - - port Nacos Server Port. SW_CONFIG_NACOS_SERVER_PORT 8848   - - group Nacos Configuration namespace. SW_CONFIG_NACOS_SERVER_NAMESPACE -   - - period The period of data sync (in seconds). SW_CONFIG_CONFIG_NACOS_PERIOD 60   - - username Nacos Auth username. SW_CONFIG_NACOS_USERNAME -   - - password Nacos Auth password. SW_CONFIG_NACOS_PASSWORD -   - - accessKey Nacos Auth accessKey. SW_CONFIG_NACOS_ACCESSKEY -   - - secretKey Nacos Auth secretKey. SW_CONFIG_NACOS_SECRETKEY -   exporter default enableGRPCMetrics Enable gRPC metrics exporter. SW_EXPORTER_ENABLE_GRPC_METRICS false   - - gRPCTargetHost The host of target gRPC server for receiving export data SW_EXPORTER_GRPC_HOST 127.0.0.1   - - gRPCTargetPort The port of target gRPC server for receiving export data. SW_EXPORTER_GRPC_PORT 9870   - - enableKafkaTrace Enable Kafka trace exporter. SW_EXPORTER_ENABLE_KAFKA_TRACE false   - - enableKafkaLog Enable Kafka log exporter. SW_EXPORTER_ENABLE_KAFKA_LOG false   - - kafkaBootstrapServers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_EXPORTER_KAFKA_SERVERS localhost:9092   - - kafkaProducerConfig Kafka producer config, JSON format as Properties. SW_EXPORTER_KAFKA_PRODUCER_CONFIG -   - - kafkaTopicTrace Kafka topic name for trace. SW_EXPORTER_KAFKA_TOPIC_TRACE skywalking-export-trace   - - kafkaTopicLog Kafka topic name for log. SW_EXPORTER_KAFKA_TOPIC_LOG skywalking-export-log   - - exportErrorStatusTraceOnly Export error status trace segments through the Kafka channel. SW_EXPORTER_KAFKA_TRACE_FILTER_ERROR false   health-checker default checkIntervalSeconds The period of checking OAP internal health status (in seconds). SW_HEALTH_CHECKER_INTERVAL_SECONDS 5   configuration-discovery default disableMessageDigest If true, agent receives the latest configuration every time, even without making any changes. By default, OAP uses the SHA512 message digest mechanism to detect changes in configuration. SW_DISABLE_MESSAGE_DIGEST false   receiver-event default gRPC services that handle events data. - -    aws-firehose-receiver default host Binding IP of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_HOST 0.0.0.0   - - port Binding port of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_PORT 12801   - - contextPath Context path of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_CONTEXT_PATH /   - - maxThreads Max Thtread number of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_MAX_THREADS 200   - - idleTimeOut Idle timeout of a connection for keep-alive. SW_RECEIVER_AWS_FIREHOSE_HTTP_IDLE_TIME_OUT 30000   - - acceptQueueSize Maximum allowed number of open connections SW_RECEIVER_AWS_FIREHOSE_HTTP_ACCEPT_QUEUE_SIZE 0   - - maxRequestHeaderSize Maximum length of all headers in an HTTP/1 response SW_RECEIVER_AWS_FIREHOSE_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - firehoseAccessKey The AccessKey of AWS firhose SW_RECEIVER_AWS_FIREHOSE_ACCESS_KEY    - - enableTLS Indicate if enable HTTPS for the server SW_RECEIVER_AWS_FIREHOSE_HTTP_ENABLE_TLS false   - - tlsKeyPath TLS key path SW_RECEIVER_AWS_FIREHOSE_HTTP_TLS_KEY_PATH    - - tlsCertChainPath TLS certificate chain path SW_RECEIVER_AWS_FIREHOSE_HTTP_TLS_CERT_CHAIN_PATH    ai-pipeline default       - - uriRecognitionServerAddr The address of the URI recognition server. SW_AI_PIPELINE_URI_RECOGNITION_SERVER_ADDR -   - - uriRecognitionServerPort The port of the URI recognition server. SW_AI_PIPELINE_URI_RECOGNITION_SERVER_PORT 17128    Note ¹ System Environment Variable name could be declared and changed in application.yml. The names listed here are simply provided in the default application.yml file.\n","title":"Configuration Vocabulary","url":"/docs/main/v9.6.0/en/setup/backend/configuration-vocabulary/"},{"content":"Configuration Vocabulary The Configuration Vocabulary lists all available configurations provided by application.yml.\n   Module Provider Settings Value(s) and Explanation System Environment Variable¹ Default     core default role Option values: Mixed/Receiver/Aggregator. Receiver mode OAP opens the service to the agents, then analyzes and aggregates the results, and forwards the results for distributed aggregation. Aggregator mode OAP receives data from Mixer and Receiver role OAP nodes, and performs 2nd level aggregation. Mixer means both Receiver and Aggregator. SW_CORE_ROLE Mixed   - - restHost Binding IP of RESTful services. Services include GraphQL query and HTTP data report. SW_CORE_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_CORE_REST_PORT 12800   - - restContextPath Web context path of RESTful services. SW_CORE_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_CORE_REST_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_CORE_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize ServerSocketChannel Backlog of RESTful services. SW_CORE_REST_QUEUE_SIZE 0   - - httpMaxRequestHeaderSize Maximum request header size accepted. SW_CORE_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - gRPCHost Binding IP of gRPC services, including gRPC data report and internal communication among OAP nodes. SW_CORE_GRPC_HOST 0.0.0.0   - - gRPCPort Binding port of gRPC services. SW_CORE_GRPC_PORT 11800   - - gRPCSslEnabled Activates SSL for gRPC services. SW_CORE_GRPC_SSL_ENABLED false   - - gRPCSslKeyPath File path of gRPC SSL key. SW_CORE_GRPC_SSL_KEY_PATH -   - - gRPCSslCertChainPath File path of gRPC SSL cert chain. SW_CORE_GRPC_SSL_CERT_CHAIN_PATH -   - - gRPCSslTrustedCAPath File path of gRPC trusted CA. SW_CORE_GRPC_SSL_TRUSTED_CA_PATH -   - - downsampling Activated level of down sampling aggregation.  Hour,Day   - - enableDataKeeperExecutor Controller of TTL scheduler. Once disabled, TTL wouldn\u0026rsquo;t work. SW_CORE_ENABLE_DATA_KEEPER_EXECUTOR true   - - dataKeeperExecutePeriod Execution period of TTL scheduler (in minutes). Execution doesn\u0026rsquo;t mean deleting data. The storage provider (e.g. ElasticSearch storage) could override this. SW_CORE_DATA_KEEPER_EXECUTE_PERIOD 5   - - recordDataTTL The lifecycle of record data (in days). Record data includes traces, top N sample records, and logs. Minimum value is 2. SW_CORE_RECORD_DATA_TTL 3   - - metricsDataTTL The lifecycle of metrics data (in days), including metadata. We recommend setting metricsDataTTL \u0026gt;= recordDataTTL. Minimum value is 2. SW_CORE_METRICS_DATA_TTL 7   - - l1FlushPeriod The period of L1 aggregation flush to L2 aggregation (in milliseconds). SW_CORE_L1_AGGREGATION_FLUSH_PERIOD 500   - - storageSessionTimeout The threshold of session time (in milliseconds). Default value is 70000. SW_CORE_STORAGE_SESSION_TIMEOUT 70000   - - persistentPeriod The period of doing data persistence. Unit is second.Default value is 25s SW_CORE_PERSISTENT_PERIOD 25   - - topNReportPeriod The execution period (in minutes) of top N sampler, which saves sampled data into the storage. SW_CORE_TOPN_REPORT_PERIOD 10   - - activeExtraModelColumns Appends entity names (e.g. service names) into metrics storage entities. SW_CORE_ACTIVE_EXTRA_MODEL_COLUMNS false   - - serviceNameMaxLength Maximum length limit of service names. SW_SERVICE_NAME_MAX_LENGTH 70   - - instanceNameMaxLength Maximum length limit of service instance names. The maximum length of service + instance names should be less than 200. SW_INSTANCE_NAME_MAX_LENGTH 70   - - endpointNameMaxLength Maximum length limit of endpoint names. The maximum length of service + endpoint names should be less than 240. SW_ENDPOINT_NAME_MAX_LENGTH 150   - - searchableTracesTags Defines a set of span tag keys which are searchable through GraphQL. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_SEARCHABLE_TAG_KEYS http.method,http.status_code,rpc.status_code,db.type,db.instance,mq.queue,mq.topic,mq.broker   - - searchableLogsTags Defines a set of log tag keys which are searchable through GraphQL. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_SEARCHABLE_LOGS_TAG_KEYS level   - - searchableAlarmTags Defines a set of alarm tag keys which are searchable through GraphQL. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_SEARCHABLE_ALARM_TAG_KEYS level   - - autocompleteTagKeysQueryMaxSize The max size of tags keys for autocomplete select. SW_AUTOCOMPLETE_TAG_KEYS_QUERY_MAX_SIZE 100   - - autocompleteTagValuesQueryMaxSize The max size of tags values for autocomplete select. SW_AUTOCOMPLETE_TAG_VALUES_QUERY_MAX_SIZE 100   - - gRPCThreadPoolSize Pool size of gRPC server. SW_CORE_GRPC_THREAD_POOL_SIZE CPU core * 4   - - gRPCThreadPoolQueueSize Queue size of gRPC server. SW_CORE_GRPC_POOL_QUEUE_SIZE 10000   - - maxConcurrentCallsPerConnection The maximum number of concurrent calls permitted for each incoming connection. Defaults to no limit. SW_CORE_GRPC_MAX_CONCURRENT_CALL -   - - maxMessageSize Sets the maximum message size allowed to be received on the server. Empty means 4 MiB. SW_CORE_GRPC_MAX_MESSAGE_SIZE 4M(based on Netty)   - - remoteTimeout Timeout for cluster internal communication (in seconds). - 20   - - maxSizeOfNetworkAddressAlias The maximum size of network address detected in the system being monitored. - 1_000_000   - - maxPageSizeOfQueryProfileSnapshot The maximum size for snapshot analysis in an OAP query. - 500   - - maxSizeOfAnalyzeProfileSnapshot The maximum number of snapshots analyzed by the OAP. - 12000   - - prepareThreads The number of threads used to prepare metrics data to the storage. SW_CORE_PREPARE_THREADS 2   - - enableEndpointNameGroupingByOpenapi Automatically groups endpoints by the given OpenAPI definitions. SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI true   - - maxDurationOfQueryEBPFProfilingData The maximum duration(in second) of query the eBPF profiling data from database. - 30   - - maxThreadCountOfQueryEBPFProfilingData The maximum thread count of query the eBPF profiling data from database. - System CPU core size   - - uiMenuRefreshInterval The period(in seconds) of refreshing the status of all UI menu items. - 20   - - serviceCacheRefreshInterval The period(in seconds) of refreshing the service cache. SW_SERVICE_CACHE_REFRESH_INTERVAL 10   cluster standalone - Standalone is not suitable for running on a single node running. No configuration available. - -   - zookeeper namespace The namespace, represented by root path, isolates the configurations in Zookeeper. SW_NAMESPACE /, root path   - - hostPort Hosts and ports of Zookeeper Cluster. SW_CLUSTER_ZK_HOST_PORT localhost:2181   - - baseSleepTimeMs The period of Zookeeper client between two retries (in milliseconds). SW_CLUSTER_ZK_SLEEP_TIME 1000   - - maxRetries The maximum retry time. SW_CLUSTER_ZK_MAX_RETRIES 3   - - enableACL Opens ACL using schema and expression. SW_ZK_ENABLE_ACL false   - - schema Schema for the authorization. SW_ZK_SCHEMA digest   - - expression Expression for the authorization. SW_ZK_EXPRESSION skywalking:skywalking   - - internalComHost The hostname registered in Zookeeper for the internal communication of OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Zookeeper for the internal communication of OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - kubernetes namespace Namespace deployed by SkyWalking in k8s. SW_CLUSTER_K8S_NAMESPACE default   - - labelSelector Labels used for filtering OAP deployment in k8s. SW_CLUSTER_K8S_LABEL app=collector,release=skywalking   - - uidEnvName Environment variable name for reading uid. SW_CLUSTER_K8S_UID SKYWALKING_COLLECTOR_UID   - consul serviceName Service name for SkyWalking cluster. SW_SERVICE_NAME SkyWalking_OAP_Cluster   - - hostPort Hosts and ports for Consul cluster. SW_CLUSTER_CONSUL_HOST_PORT localhost:8500   - - aclToken ACL Token of Consul. Empty string means without ALC token. SW_CLUSTER_CONSUL_ACLTOKEN -   - - internalComHost The hostname registered in Consul for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Consul for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - etcd serviceName Service name for SkyWalking cluster. SW_CLUSTER_ETCD_SERVICE_NAME SkyWalking_OAP_Cluster   - - endpoints Hosts and ports for etcd cluster. SW_CLUSTER_ETCD_ENDPOINTS localhost:2379   - - namespace Namespace for SkyWalking cluster. SW_CLUSTER_ETCD_NAMESPACE /skywalking   - - authentication Indicates whether there is authentication. SW_CLUSTER_ETCD_AUTHENTICATION false   - - user Etcd auth username. SW_CLUSTER_ETCD_USER    - - password Etcd auth password. SW_CLUSTER_ETCD_PASSWORD    - - internalComHost The hostname registered in etcd for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in etcd for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - Nacos serviceName Service name for SkyWalking cluster. SW_SERVICE_NAME SkyWalking_OAP_Cluster   - - hostPort Hosts and ports for Nacos cluster. SW_CLUSTER_NACOS_HOST_PORT localhost:8848   - - namespace Namespace used by SkyWalking node coordination. SW_CLUSTER_NACOS_NAMESPACE public   - - internalComHost The hostname registered in Nacos for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Nacos for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - - username Nacos Auth username. SW_CLUSTER_NACOS_USERNAME -   - - password Nacos Auth password. SW_CLUSTER_NACOS_PASSWORD -   - - accessKey Nacos Auth accessKey. SW_CLUSTER_NACOS_ACCESSKEY -   - - secretKey Nacos Auth secretKey. SW_CLUSTER_NACOS_SECRETKEY -   - - syncPeriodHttpUriRecognitionPattern The period of HTTP URI recognition pattern synchronization (in seconds). SW_CORE_SYNC_PERIOD_HTTP_URI_RECOGNITION_PATTERN 10   - - trainingPeriodHttpUriRecognitionPattern The period of HTTP URI recognition pattern training (in seconds). SW_CORE_TRAINING_PERIOD_HTTP_URI_RECOGNITION_PATTERN 60   - - maxHttpUrisNumberPerService The maximum number of HTTP URIs per service. SW_MAX_HTTP_URIS_NUMBER_PER_SERVICE 3000   storage elasticsearch - ElasticSearch (and OpenSearch) storage implementation. - -   - - namespace Prefix of indexes created and used by SkyWalking. SW_NAMESPACE -   - - clusterNodes ElasticSearch cluster nodes for client connection. SW_STORAGE_ES_CLUSTER_NODES localhost   - - protocol HTTP or HTTPs. SW_STORAGE_ES_HTTP_PROTOCOL HTTP   - - connectTimeout Connect timeout of ElasticSearch client (in milliseconds). SW_STORAGE_ES_CONNECT_TIMEOUT 3000   - - socketTimeout Socket timeout of ElasticSearch client (in milliseconds). SW_STORAGE_ES_SOCKET_TIMEOUT 30000   - - responseTimeout Response timeout of ElasticSearch client (in milliseconds), 0 disables the timeout. SW_STORAGE_ES_RESPONSE_TIMEOUT 1500   - - numHttpClientThread The number of threads for the underlying HTTP client to perform socket I/O. If the value is \u0026lt;= 0, the number of available processors will be used. SW_STORAGE_ES_NUM_HTTP_CLIENT_THREAD 0   - - user Username of ElasticSearch cluster. SW_ES_USER -   - - password Password of ElasticSearch cluster. SW_ES_PASSWORD -   - - trustStorePath Trust JKS file path. Only works when username and password are enabled. SW_STORAGE_ES_SSL_JKS_PATH -   - - trustStorePass Trust JKS file password. Only works when username and password are enabled. SW_STORAGE_ES_SSL_JKS_PASS -   - - secretsManagementFile Secrets management file in the properties format, including username and password, which are managed by a 3rd party tool. Capable of being updated them at runtime. SW_ES_SECRETS_MANAGEMENT_FILE -   - - dayStep Represents the number of days in the one-minute/hour/day index. SW_STORAGE_DAY_STEP 1   - - indexShardsNumber Shard number of new indexes. SW_STORAGE_ES_INDEX_SHARDS_NUMBER 1   - - indexReplicasNumber Replicas number of new indexes. SW_STORAGE_ES_INDEX_REPLICAS_NUMBER 0   - - specificIndexSettings Specify the settings for each index individually. If configured, this setting has the highest priority and overrides the generic settings. SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS -   - - superDatasetDayStep Represents the number of days in the super size dataset record index. Default value is the same as dayStep when the value is less than 0. SW_STORAGE_ES_SUPER_DATASET_DAY_STEP -1   - - superDatasetIndexShardsFactor Super dataset is defined in the code (e.g. trace segments). This factor provides more shards for the super dataset: shards number = indexShardsNumber * superDatasetIndexShardsFactor. This factor also affects Zipkin and Jaeger traces. SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR 5   - - superDatasetIndexReplicasNumber Represents the replicas number in the super size dataset record index. SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER 0   - - indexTemplateOrder The order of index template. SW_STORAGE_ES_INDEX_TEMPLATE_ORDER 0   - - bulkActions Async bulk size of the record data batch execution. SW_STORAGE_ES_BULK_ACTIONS 5000   - - batchOfBytes A threshold to control the max body size of ElasticSearch Bulk flush. SW_STORAGE_ES_BATCH_OF_BYTES 10485760 (10m)   - - flushInterval Period of flush (in seconds). Does not matter whether bulkActions is reached or not. SW_STORAGE_ES_FLUSH_INTERVAL 5   - - concurrentRequests The number of concurrent requests allowed to be executed. SW_STORAGE_ES_CONCURRENT_REQUESTS 2   - - resultWindowMaxSize The maximum size of dataset when the OAP loads cache, such as network aliases. SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE 10000   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_ES_QUERY_MAX_SIZE 10000   - - scrollingBatchSize The batch size of metadata per iteration when metadataQueryMaxSize or resultWindowMaxSize is too large to be retrieved in a single query. SW_STORAGE_ES_SCROLLING_BATCH_SIZE 5000   - - segmentQueryMaxSize The maximum size of trace segments per query. SW_STORAGE_ES_QUERY_SEGMENT_SIZE 200   - - profileTaskQueryMaxSize The maximum size of profile task per query. SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE 200   - - profileDataQueryScrollBatchSize The batch size of query profiling data. SW_STORAGE_ES_QUERY_PROFILE_DATA_BATCH_SIZE 100   - - advanced All settings of ElasticSearch index creation. The value should be in JSON format. SW_STORAGE_ES_ADVANCED -   - - logicSharding Shard metrics and records indices into multi-physical indices, one index template per metric/meter aggregation function or record. SW_STORAGE_ES_LOGIC_SHARDING false   - h2 - H2 storage is designed for demonstration and running in short term (i.e. 1-2 hours) only. - -   - - url H2 connection URL. Defaults to H2 memory mode. SW_STORAGE_H2_URL jdbc:h2:mem:skywalking-oap-db   - - user Username of H2 database. SW_STORAGE_H2_USER sa   - - password Password of H2 database. - -   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_H2_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 100   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 1   - mysql - MySQL Storage. The MySQL JDBC Driver is not in the dist. Please copy it into the oap-lib folder manually. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - postgresql - PostgreSQL storage. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - banyandb - BanyanDB storage. - -   - - targets Hosts with ports of the BanyanDB. SW_STORAGE_BANYANDB_TARGETS 127.0.0.1:17912   - - maxBulkSize The maximum size of write entities in a single batch write call. SW_STORAGE_BANYANDB_MAX_BULK_SIZE 5000   - - flushInterval Period of flush interval. In the timeunit of seconds. SW_STORAGE_BANYANDB_FLUSH_INTERVAL 15   - - metricsShardsNumber Shards Number for measure/metrics. SW_STORAGE_BANYANDB_METRICS_SHARDS_NUMBER 1   - - recordShardsNumber Shards Number for a normal record. SW_STORAGE_BANYANDB_RECORD_SHARDS_NUMBER 1   - - superDatasetShardsFactor Shards Factor for a super dataset record, i.e. Shard number of a super dataset is recordShardsNumber*superDatasetShardsFactor. SW_STORAGE_BANYANDB_SUPERDATASET_SHARDS_FACTOR 2   - - concurrentWriteThreads Concurrent consumer threads for batch writing. SW_STORAGE_BANYANDB_CONCURRENT_WRITE_THREADS 15   - - profileTaskQueryMaxSize Max size of ProfileTask to be fetched. SW_STORAGE_BANYANDB_PROFILE_TASK_QUERY_MAX_SIZE 200   agent-analyzer default Agent Analyzer. SW_AGENT_ANALYZER default    - - traceSamplingPolicySettingsFile The sampling policy including sampling rate and the threshold of trace segment latency can be configured by the traceSamplingPolicySettingsFile file. SW_TRACE_SAMPLING_POLICY_SETTINGS_FILE trace-sampling-policy-settings.yml   - - slowDBAccessThreshold The slow database access threshold (in milliseconds). SW_SLOW_DB_THRESHOLD default:200,mongodb:100   - - forceSampleErrorSegment When sampling mechanism is activated, this config samples the error status segment and ignores the sampling rate. SW_FORCE_SAMPLE_ERROR_SEGMENT true   - - segmentStatusAnalysisStrategy Determines the final segment status from span status. Available values are FROM_SPAN_STATUS , FROM_ENTRY_SPAN, and FROM_FIRST_SPAN. FROM_SPAN_STATUS indicates that the segment status would be error if any span has an error status. FROM_ENTRY_SPAN means that the segment status would only be determined by the status of entry spans. FROM_FIRST_SPAN means that the segment status would only be determined by the status of the first span. SW_SEGMENT_STATUS_ANALYSIS_STRATEGY FROM_SPAN_STATUS   - - noUpstreamRealAddressAgents Exit spans with the component in the list would not generate client-side instance relation metrics, since some tracing plugins (e.g. Nginx-LUA and Envoy) can\u0026rsquo;t collect the real peer IP address. SW_NO_UPSTREAM_REAL_ADDRESS 6000,9000   - - meterAnalyzerActiveFiles Indicates which files could be instrumented and analyzed. Multiple files are split by \u0026ldquo;,\u0026rdquo;. SW_METER_ANALYZER_ACTIVE_FILES    - - slowCacheWriteThreshold The threshold of slow command which is used for writing operation (in milliseconds). SW_SLOW_CACHE_WRITE_THRESHOLD default:20,redis:10   - - slowCacheReadThreshold The threshold of slow command which is used for reading (getting) operation (in milliseconds). SW_SLOW_CACHE_READ_THRESHOLD default:20,redis:10   receiver-sharing-server default Sharing server provides new gRPC and restful servers for data collection. Ana designates that servers in the core module are to be used for internal communication only. - -    - - restHost Binding IP of RESTful services. Services include GraphQL query and HTTP data report. SW_RECEIVER_SHARING_REST_HOST -   - - restPort Binding port of RESTful services. SW_RECEIVER_SHARING_REST_PORT -   - - restContextPath Web context path of RESTful services. SW_RECEIVER_SHARING_REST_CONTEXT_PATH -   - - restMaxThreads Maximum thread number of RESTful services. SW_RECEIVER_SHARING_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_RECEIVER_SHARING_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize ServerSocketChannel backlog of RESTful services. SW_RECEIVER_SHARING_REST_QUEUE_SIZE 0   - - httpMaxRequestHeaderSize Maximum request header size accepted. SW_RECEIVER_SHARING_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - gRPCHost Binding IP of gRPC services. Services include gRPC data report and internal communication among OAP nodes. SW_RECEIVER_GRPC_HOST 0.0.0.0. Not Activated   - - gRPCPort Binding port of gRPC services. SW_RECEIVER_GRPC_PORT Not Activated   - - gRPCThreadPoolSize Pool size of gRPC server. SW_RECEIVER_GRPC_THREAD_POOL_SIZE CPU core * 4   - - gRPCThreadPoolQueueSize Queue size of gRPC server. SW_RECEIVER_GRPC_POOL_QUEUE_SIZE 10000   - - gRPCSslEnabled Activates SSL for gRPC services. SW_RECEIVER_GRPC_SSL_ENABLED false   - - gRPCSslKeyPath File path of gRPC SSL key. SW_RECEIVER_GRPC_SSL_KEY_PATH -   - - gRPCSslCertChainPath File path of gRPC SSL cert chain. SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH -   - - maxConcurrentCallsPerConnection The maximum number of concurrent calls permitted for each incoming connection. Defaults to no limit. SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL -   - - authentication The token text for authentication. Works for gRPC connection only. Once this is set, the client is required to use the same token. SW_AUTHENTICATION -   log-analyzer default Log Analyzer. SW_LOG_ANALYZER default    - - lalFiles The LAL configuration file names (without file extension) to be activated. Read LAL for more details. SW_LOG_LAL_FILES default   - - malFiles The MAL configuration file names (without file extension) to be activated. Read LAL for more details. SW_LOG_MAL_FILES \u0026quot;\u0026quot;   event-analyzer default Event Analyzer. SW_EVENT_ANALYZER default    receiver-register default gRPC and HTTPRestful services that provide service, service instance and endpoint register. - -    receiver-trace default gRPC and HTTPRestful services that accept SkyWalking format traces. - -    receiver-jvm default gRPC services that accept JVM metrics data. - -    receiver-clr default gRPC services that accept .Net CLR metrics data. - -    receiver-profile default gRPC services that accept profile task status and snapshot reporter. - -    receiver-zabbix default TCP receiver accepts Zabbix format metrics. - -    - - port Exported TCP port. Zabbix agent could connect and transport data. SW_RECEIVER_ZABBIX_PORT 10051   - - host Binds to host. SW_RECEIVER_ZABBIX_HOST 0.0.0.0   - - activeFiles Enables config when agent request is received. SW_RECEIVER_ZABBIX_ACTIVE_FILES agent   service-mesh default gRPC services that accept data from inbound mesh probes. - -    envoy-metric default Envoy metrics_service and ALS(access log service) are supported by this receiver. The OAL script supports all GAUGE type metrics. - -    - - acceptMetricsService Starts Envoy Metrics Service analysis. SW_ENVOY_METRIC_SERVICE true   - - alsHTTPAnalysis Starts Envoy HTTP Access Log Service analysis. Value = k8s-mesh means starting the analysis. SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS -   - - alsTCPAnalysis Starts Envoy TCP Access Log Service analysis. Value = k8s-mesh means starting the analysis. SW_ENVOY_METRIC_ALS_TCP_ANALYSIS -   - - k8sServiceNameRule k8sServiceNameRule allows you to customize the service name in ALS via Kubernetes metadata. The available variables are pod and service. E.g. you can use ${service.metadata.name}-${pod.metadata.labels.version} to append the version number to the service name. Note that when using environment variables to pass this configuration, use single quotes('') to avoid being evaluated by the shell. K8S_SERVICE_NAME_RULE ${pod.metadata.labels.(service.istio.io/canonical-name)}   - - istioServiceNameRule istioServiceNameRule allows you to customize the service name in ALS via Kubernetes metadata. The available variables are serviceEntry. E.g. you can use ${serviceEntry.metadata.name}-${serviceEntry.metadata.labels.version} to append the version number to the service name. Note that when using environment variables to pass this configuration, use single quotes('') to avoid being evaluated by the shell. ISTIO_SERVICE_NAME_RULE ${serviceEntry.metadata.name}   receiver-otel default A receiver for analyzing metrics data from OpenTelemetry. - -    - - enabledHandlers Enabled handlers for otel. SW_OTEL_RECEIVER_ENABLED_HANDLERS -   - - enabledOtelMetricsRules Enabled metric rules for OTLP handler. SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES -   receiver-zipkin default A receiver for Zipkin traces. - -    - - sampleRate The sample rate precision is 1/10000, should be between 0 and 10000 SW_ZIPKIN_SAMPLE_RATE 10000   - - searchableTracesTags Defines a set of span tag keys which are searchable. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_ZIPKIN_SEARCHABLE_TAG_KEYS http.method   - - enableHttpCollector Enable Http Collector. SW_ZIPKIN_HTTP_COLLECTOR_ENABLED true   - - restHost Binding IP of RESTful services. SW_RECEIVER_ZIPKIN_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_RECEIVER_ZIPKIN_REST_PORT 9411   - - restContextPath Web context path of RESTful services. SW_RECEIVER_ZIPKIN_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_RECEIVER_ZIPKIN_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_RECEIVER_ZIPKIN_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_RECEIVER_ZIPKIN_REST_QUEUE_SIZE 0   - - enableKafkaCollector Enable Kafka Collector. SW_ZIPKIN_KAFKA_COLLECTOR_ENABLED false   - - kafkaBootstrapServers Kafka ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG. SW_ZIPKIN_KAFKA_SERVERS localhost:9092   - - kafkaGroupId Kafka ConsumerConfig.GROUP_ID_CONFIG. SW_ZIPKIN_KAFKA_GROUP_ID zipkin   - - kafkaTopic Kafka Topics. SW_ZIPKIN_KAFKA_TOPIC zipkin   - - kafkaConsumerConfig Kafka consumer config, JSON format as Properties. If it contains the same key with above, would override. SW_ZIPKIN_KAFKA_CONSUMER_CONFIG \u0026ldquo;{\u0026quot;auto.offset.reset\u0026quot;:\u0026quot;earliest\u0026quot;,\u0026quot;enable.auto.commit\u0026quot;:true}\u0026rdquo;   - - kafkaConsumers The number of consumers to create. SW_ZIPKIN_KAFKA_CONSUMERS 1   - - kafkaHandlerThreadPoolSize Pool size of Kafka message handler executor. SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_SIZE CPU core * 2   - - kafkaHandlerThreadPoolQueueSize Queue size of Kafka message handler executor. SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE 10000   kafka-fetcher default Read SkyWalking\u0026rsquo;s native metrics/logs/traces through Kafka server. - -    - - bootstrapServers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_KAFKA_FETCHER_SERVERS localhost:9092   - - namespace Namespace aims to isolate multi OAP cluster when using the same Kafka cluster. If you set a namespace for Kafka fetcher, OAP will add a prefix to topic name. You should also set namespace in agent.config. The property is named plugin.kafka.namespace. SW_NAMESPACE -   - - groupId A unique string that identifies the consumer group to which this consumer belongs. - skywalking-consumer   - - createTopicIfNotExist If true, this creates Kafka topic (if it does not already exist). - true   - - partitions The number of partitions for the topic being created. SW_KAFKA_FETCHER_PARTITIONS 3   - - consumers The number of consumers to create. SW_KAFKA_FETCHER_CONSUMERS 1   - - enableNativeProtoLog Enables fetching and handling native proto log data. SW_KAFKA_FETCHER_ENABLE_NATIVE_PROTO_LOG true   - - enableNativeJsonLog Enables fetching and handling native json log data. SW_KAFKA_FETCHER_ENABLE_NATIVE_JSON_LOG true   - - replicationFactor The replication factor for each partition in the topic being created. SW_KAFKA_FETCHER_PARTITIONS_FACTOR 2   - - kafkaHandlerThreadPoolSize Pool size of Kafka message handler executor. SW_KAFKA_HANDLER_THREAD_POOL_SIZE CPU core * 2   - - kafkaHandlerThreadPoolQueueSize Queue size of Kafka message handler executor. SW_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE 10000   - - topicNameOfMeters Kafka topic name for meter system data. - skywalking-meters   - - topicNameOfMetrics Kafka topic name for JVM metrics data. - skywalking-metrics   - - topicNameOfProfiling Kafka topic name for profiling data. - skywalking-profilings   - - topicNameOfTracingSegments Kafka topic name for tracing data. - skywalking-segments   - - topicNameOfManagements Kafka topic name for service instance reporting and registration. - skywalking-managements   - - topicNameOfLogs Kafka topic name for native proto log data. - skywalking-logs   - - topicNameOfJsonLogs Kafka topic name for native json log data. - skywalking-logs-json   receiver-browser default gRPC services that accept browser performance data and error log. - - -   - - sampleRate Sampling rate for receiving trace. Precise to 1/10000. 10000 means sampling rate of 100% by default. SW_RECEIVER_BROWSER_SAMPLE_RATE 10000   query graphql - GraphQL query implementation. -    - - enableLogTestTool Enable the log testing API to test the LAL. NOTE: This API evaluates untrusted code on the OAP server. A malicious script can do significant damage (steal keys and secrets, remove files and directories, install malware, etc). As such, please enable this API only when you completely trust your users. SW_QUERY_GRAPHQL_ENABLE_LOG_TEST_TOOL false   - - maxQueryComplexity Maximum complexity allowed for the GraphQL query that can be used to abort a query if the total number of data fields queried exceeds the defined threshold. SW_QUERY_MAX_QUERY_COMPLEXITY 3000   - - enableUpdateUITemplate Allow user add,disable and update UI template. SW_ENABLE_UPDATE_UI_TEMPLATE false   - - enableOnDemandPodLog Ondemand Pod log: fetch the Pod logs on users' demand, the logs are fetched and displayed in real time, and are not persisted in any kind. This is helpful when users want to do some experiments and monitor the logs and see what\u0026rsquo;s happing inside the service. Note: if you print secrets in the logs, they are also visible to the UI, so for the sake of security, this feature is disabled by default, please set this configuration to enable the feature manually. SW_ENABLE_ON_DEMAND_POD_LOG false   query-zipkin default - This module is for Zipkin query API and support zipkin-lens UI -    - - restHost Binding IP of RESTful services. SW_QUERY_ZIPKIN_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_QUERY_ZIPKIN_REST_PORT 9412   - - restContextPath Web context path of RESTful services. SW_QUERY_ZIPKIN_REST_CONTEXT_PATH zipkin   - - restMaxThreads Maximum thread number of RESTful services. SW_QUERY_ZIPKIN_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_QUERY_ZIPKIN_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_QUERY_ZIPKIN_REST_QUEUE_SIZE 0   - - lookback Default look back for traces and autocompleteTags, 1 day in millis SW_QUERY_ZIPKIN_LOOKBACK 86400000   - - namesMaxAge The Cache-Control max-age (seconds) for serviceNames, remoteServiceNames and spanNames SW_QUERY_ZIPKIN_NAMES_MAX_AGE 300   - - uiQueryLimit Default traces query max size SW_QUERY_ZIPKIN_UI_QUERY_LIMIT 10   - - uiDefaultLookback Default look back on the UI for search traces, 15 minutes in millis SW_QUERY_ZIPKIN_UI_DEFAULT_LOOKBACK 900000   promql default - This module is for PromQL API. -    - - restHost Binding IP of RESTful services. SW_PROMQL_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_PROMQL_REST_PORT 9090   - - restContextPath Web context path of RESTful services. SW_PROMQL_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_PROMQL_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_PROMQL_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_PROMQL_REST_QUEUE_SIZE 0   alarm default - Read alarm doc for more details. -    telemetry - - Read telemetry doc for more details. -    - none - No op implementation. -    - prometheus host Binding host for Prometheus server fetching data. SW_TELEMETRY_PROMETHEUS_HOST 0.0.0.0   - - port Binding port for Prometheus server fetching data. SW_TELEMETRY_PROMETHEUS_PORT 1234   configuration - - Read dynamic configuration doc for more details. -    - grpc host DCS server binding hostname. SW_DCS_SERVER_HOST -   - - port DCS server binding port. SW_DCS_SERVER_PORT 80   - - clusterName Cluster name when reading the latest configuration from DSC server. SW_DCS_CLUSTER_NAME SkyWalking   - - period The period of reading data from DSC server by the OAP (in seconds). SW_DCS_PERIOD 20   - apollo apolloMeta apollo.meta in Apollo. SW_CONFIG_APOLLO http://localhost:8080   - - apolloCluster apollo.cluster in Apollo. SW_CONFIG_APOLLO_CLUSTER default   - - apolloEnv env in Apollo. SW_CONFIG_APOLLO_ENV -   - - appId app.id in Apollo. SW_CONFIG_APOLLO_APP_ID skywalking   - zookeeper namespace The namespace (represented by root path) that isolates the configurations in the Zookeeper. SW_CONFIG_ZK_NAMESPACE /, root path   - - hostPort Hosts and ports of Zookeeper Cluster. SW_CONFIG_ZK_HOST_PORT localhost:2181   - - baseSleepTimeMs The period of Zookeeper client between two retries (in milliseconds). SW_CONFIG_ZK_BASE_SLEEP_TIME_MS 1000   - - maxRetries The maximum retry time. SW_CONFIG_ZK_MAX_RETRIES 3   - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - etcd endpoints Hosts and ports for etcd cluster (separated by commas if multiple). SW_CONFIG_ETCD_ENDPOINTS http://localhost:2379   - - namespace Namespace for SkyWalking cluster. SW_CONFIG_ETCD_NAMESPACE /skywalking   - - authentication Indicates whether there is authentication. SW_CONFIG_ETCD_AUTHENTICATION false   - - user Etcd auth username. SW_CONFIG_ETCD_USER    - - password Etcd auth password. SW_CONFIG_ETCD_PASSWORD    - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - consul hostPort Hosts and ports for Consul cluster. SW_CONFIG_CONSUL_HOST_AND_PORTS localhost:8500   - - aclToken ACL Token of Consul. Empty string means without ACL token. SW_CONFIG_CONSUL_ACL_TOKEN -   - - period The period of data sync (in seconds). SW_CONFIG_CONSUL_PERIOD 60   - k8s-configmap namespace Deployment namespace of the config map. SW_CLUSTER_K8S_NAMESPACE default   - - labelSelector Labels for locating configmap. SW_CLUSTER_K8S_LABEL app=collector,release=skywalking   - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - nacos serverAddr Nacos Server Host. SW_CONFIG_NACOS_SERVER_ADDR 127.0.0.1   - - port Nacos Server Port. SW_CONFIG_NACOS_SERVER_PORT 8848   - - group Nacos Configuration namespace. SW_CONFIG_NACOS_SERVER_NAMESPACE -   - - period The period of data sync (in seconds). SW_CONFIG_CONFIG_NACOS_PERIOD 60   - - username Nacos Auth username. SW_CONFIG_NACOS_USERNAME -   - - password Nacos Auth password. SW_CONFIG_NACOS_PASSWORD -   - - accessKey Nacos Auth accessKey. SW_CONFIG_NACOS_ACCESSKEY -   - - secretKey Nacos Auth secretKey. SW_CONFIG_NACOS_SECRETKEY -   exporter default enableGRPCMetrics Enable gRPC metrics exporter. SW_EXPORTER_ENABLE_GRPC_METRICS false   - - gRPCTargetHost The host of target gRPC server for receiving export data SW_EXPORTER_GRPC_HOST 127.0.0.1   - - gRPCTargetPort The port of target gRPC server for receiving export data. SW_EXPORTER_GRPC_PORT 9870   - - enableKafkaTrace Enable Kafka trace exporter. SW_EXPORTER_ENABLE_KAFKA_TRACE false   - - enableKafkaLog Enable Kafka log exporter. SW_EXPORTER_ENABLE_KAFKA_LOG false   - - kafkaBootstrapServers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_EXPORTER_KAFKA_SERVERS localhost:9092   - - kafkaProducerConfig Kafka producer config, JSON format as Properties. SW_EXPORTER_KAFKA_PRODUCER_CONFIG -   - - kafkaTopicTrace Kafka topic name for trace. SW_EXPORTER_KAFKA_TOPIC_TRACE skywalking-export-trace   - - kafkaTopicLog Kafka topic name for log. SW_EXPORTER_KAFKA_TOPIC_LOG skywalking-export-log   - - exportErrorStatusTraceOnly Export error status trace segments through the Kafka channel. SW_EXPORTER_KAFKA_TRACE_FILTER_ERROR false   health-checker default checkIntervalSeconds The period of checking OAP internal health status (in seconds). SW_HEALTH_CHECKER_INTERVAL_SECONDS 5   debugging-query default       - - keywords4MaskingSecretsOfConfig Include the list of keywords to filter configurations including secrets. Separate keywords by a comma. SW_DEBUGGING_QUERY_KEYWORDS_FOR_MASKING_SECRETS user,password,token,accessKey,secretKey,authentication   configuration-discovery default disableMessageDigest If true, agent receives the latest configuration every time, even without making any changes. By default, OAP uses the SHA512 message digest mechanism to detect changes in configuration. SW_DISABLE_MESSAGE_DIGEST false   receiver-event default gRPC services that handle events data. - -    aws-firehose-receiver default host Binding IP of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_HOST 0.0.0.0   - - port Binding port of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_PORT 12801   - - contextPath Context path of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_CONTEXT_PATH /   - - maxThreads Max Thtread number of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_MAX_THREADS 200   - - idleTimeOut Idle timeout of a connection for keep-alive. SW_RECEIVER_AWS_FIREHOSE_HTTP_IDLE_TIME_OUT 30000   - - acceptQueueSize Maximum allowed number of open connections SW_RECEIVER_AWS_FIREHOSE_HTTP_ACCEPT_QUEUE_SIZE 0   - - maxRequestHeaderSize Maximum length of all headers in an HTTP/1 response SW_RECEIVER_AWS_FIREHOSE_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - firehoseAccessKey The AccessKey of AWS firhose SW_RECEIVER_AWS_FIREHOSE_ACCESS_KEY    - - enableTLS Indicate if enable HTTPS for the server SW_RECEIVER_AWS_FIREHOSE_HTTP_ENABLE_TLS false   - - tlsKeyPath TLS key path SW_RECEIVER_AWS_FIREHOSE_HTTP_TLS_KEY_PATH    - - tlsCertChainPath TLS certificate chain path SW_RECEIVER_AWS_FIREHOSE_HTTP_TLS_CERT_CHAIN_PATH    ai-pipeline default       - - uriRecognitionServerAddr The address of the URI recognition server. SW_AI_PIPELINE_URI_RECOGNITION_SERVER_ADDR -   - - uriRecognitionServerPort The port of the URI recognition server. SW_AI_PIPELINE_URI_RECOGNITION_SERVER_PORT 17128    Note ¹ System Environment Variable name could be declared and changed in application.yml. The names listed here are simply provided in the default application.yml file.\n","title":"Configuration Vocabulary","url":"/docs/main/v9.7.0/en/setup/backend/configuration-vocabulary/"},{"content":"Context injection If you want to fetch the SkyWalking Context in your PHP code, which is super helpful for debugging and observability, You can enable the configuration item skywalking_agent.inject_context.\nDescription skywalking_agent.inject_context\nWhether to enable automatic injection of skywalking context variables (such as SW_TRACE_ID). For php-fpm mode, it will be injected into the $_SERVER variable. For swoole mode, it will be injected into the $request-\u0026gt;server variable.\nConfiguration [skywalking_agent] extension = skywalking_agent.so skywalking_agent.inject_context = On Usage For php-fpm mode:\n\u0026lt;?php echo $_SERVER[\u0026#34;SW_SERVICE_NAME\u0026#34;]; // get service name echo $_SERVER[\u0026#34;SW_INSTANCE_NAME\u0026#34;]; // get instance name echo $_SERVER[\u0026#34;SW_TRACE_ID\u0026#34;]; // get trace id For swoole mode:\n\u0026lt;?php $http = new Swoole\\Http\\Server(\u0026#39;127.0.0.1\u0026#39;, 9501); $http-\u0026gt;on(\u0026#39;request\u0026#39;, function ($request, $response) { echo $request-\u0026gt;server[\u0026#34;SW_SERVICE_NAME\u0026#34;]; // get service name  echo $request-\u0026gt;server[\u0026#34;SW_INSTANCE_NAME\u0026#34;]; // get instance name  echo $request-\u0026gt;server[\u0026#34;SW_TRACE_ID\u0026#34;]; // get trace id }); ","title":"Context injection","url":"/docs/skywalking-php/next/en/configuration/context-injection/"},{"content":"Continuous Profiling Continuous profiling utilizes eBPF, process monitoring, and other technologies to collect data. When the configured threshold is met, it would automatically start profiling tasks. Corresponds to Continuous Profiling in the concepts and designs. This approach helps identify performance bottlenecks and potential issues in a proactive manner, allowing users to optimize their applications and systems more effectively.\nActive in the OAP Continuous profiling uses the same protocol service as eBPF Profiling, so you only need to ensure that the eBPF Profiling receiver is running.\nreceiver-ebpf:selector:${SW_RECEIVER_EBPF:default}default:Configuration of Continuous Profiling Policy Continuous profiling can be configured on a service entity, with the following fields in the configuration:\n Service: The service entity for which you want to monitor the processes. Targets: Configuration conditions.  Target Type: Target profiling type, currently supporting On CPU Profiling, Off CPU Profiling, and Network Profiling. Check Items: Detection conditions, only one of the multiple condition rules needs to be met to start the task.  Type: Monitoring type, currently supporting \u0026ldquo;System Load\u0026rdquo;, \u0026ldquo;Process CPU\u0026rdquo;, \u0026ldquo;Process Thread Count\u0026rdquo;, \u0026ldquo;HTTP Error Rate\u0026rdquo;, \u0026ldquo;HTTP Avg Response Time\u0026rdquo;. Threshold: Check if the monitoring value meets the specified expectations. Period: The time period(seconds) for monitoring data, which can also be understood as the most recent duration. Count: The number of times(seconds) the threshold is triggered within the detection period, which can also be understood as the total number of times the specified threshold rule is triggered in the most recent duration(seconds). Once the count check is met, the specified Profiling task will be started. URI: For HTTP-related monitoring types, used to filter specific URIs.      Monitoring After saving the configuration, the eBPF agent can perform monitoring operations on the processes under the specified service based on the service-level configuration.\nMetrics While performing monitoring, the eBPF agent would report the monitoring data to OAP for storage, making it more convenient to understand the real-time monitoring status. The main metrics include:\n   Monitor Type Unit Description     System Load Load System load average over a specified period.   Process CPU Percentage The CPU usage of the process as a percentage.   Process Thread Count Count The number of threads in the process.   HTTP Error Rate Percentage The percentage of HTTP requests that result in error responses (e.g., 4xx or 5xx status codes).   HTTP Avg Response Time Millisecond The average response time for HTTP requests.    Threshold With Trigger In the eBPF agent, data is collected periodically, and the sliding time window technique is used to store the data from the most recent Period cycles. The Threshold rule is used to verify whether the data within each cycle meets the specified criteria. If the number of times the conditions are met within the sliding time window exceeds the Count value, the corresponding Profiling task would be triggered.\nThe sliding time window technique ensures that the most recent and relevant data is considered when evaluating the conditions. This approach allows for a more accurate and dynamic assessment of the system\u0026rsquo;s performance, making it possible to identify and respond to issues in a timely manner. By triggering Profiling tasks when specific conditions are met, the system can automatically initiate performance analysis and help uncover potential bottlenecks or areas for improvement.\nCauses When the eBPF agent reports a Profiling task, it also reports the reason for triggering the Profiling task, which mainly includes the following information:\n Process: The specific process that triggered the policy. Monitor Type: The type of monitoring that was triggered. Threshold: The configured threshold value. Current: The monitoring value at the time the rule was triggered.  Silence Period Upon triggering a continuous profiling task, the eBPF agent supports a feature that prevents re-triggering tasks within a specified period. This feature is designed to prevent an unlimited number of profiling tasks from being initiated if the process continuously reaches the threshold, which could potentially cause system issues.\n","title":"Continuous Profiling","url":"/docs/main/latest/en/setup/backend/backend-continuous-profiling/"},{"content":"Continuous Profiling Continuous profiling utilizes eBPF, process monitoring, and other technologies to collect data. When the configured threshold is met, it would automatically start profiling tasks. Corresponds to Continuous Profiling in the concepts and designs. This approach helps identify performance bottlenecks and potential issues in a proactive manner, allowing users to optimize their applications and systems more effectively.\nActive in the OAP Continuous profiling uses the same protocol service as eBPF Profiling, so you only need to ensure that the eBPF Profiling receiver is running.\nreceiver-ebpf:selector:${SW_RECEIVER_EBPF:default}default:Configuration of Continuous Profiling Policy Continuous profiling can be configured on a service entity, with the following fields in the configuration:\n Service: The service entity for which you want to monitor the processes. Targets: Configuration conditions.  Target Type: Target profiling type, currently supporting On CPU Profiling, Off CPU Profiling, and Network Profiling. Check Items: Detection conditions, only one of the multiple condition rules needs to be met to start the task.  Type: Monitoring type, currently supporting \u0026ldquo;System Load\u0026rdquo;, \u0026ldquo;Process CPU\u0026rdquo;, \u0026ldquo;Process Thread Count\u0026rdquo;, \u0026ldquo;HTTP Error Rate\u0026rdquo;, \u0026ldquo;HTTP Avg Response Time\u0026rdquo;. Threshold: Check if the monitoring value meets the specified expectations. Period: The time period(seconds) for monitoring data, which can also be understood as the most recent duration. Count: The number of times(seconds) the threshold is triggered within the detection period, which can also be understood as the total number of times the specified threshold rule is triggered in the most recent duration(seconds). Once the count check is met, the specified Profiling task will be started. URI: For HTTP-related monitoring types, used to filter specific URIs.      Monitoring After saving the configuration, the eBPF agent can perform monitoring operations on the processes under the specified service based on the service-level configuration.\nMetrics While performing monitoring, the eBPF agent would report the monitoring data to OAP for storage, making it more convenient to understand the real-time monitoring status. The main metrics include:\n   Monitor Type Unit Description     System Load Load System load average over a specified period.   Process CPU Percentage The CPU usage of the process as a percentage.   Process Thread Count Count The number of threads in the process.   HTTP Error Rate Percentage The percentage of HTTP requests that result in error responses (e.g., 4xx or 5xx status codes).   HTTP Avg Response Time Millisecond The average response time for HTTP requests.    Threshold With Trigger In the eBPF agent, data is collected periodically, and the sliding time window technique is used to store the data from the most recent Period cycles. The Threshold rule is used to verify whether the data within each cycle meets the specified criteria. If the number of times the conditions are met within the sliding time window exceeds the Count value, the corresponding Profiling task would be triggered.\nThe sliding time window technique ensures that the most recent and relevant data is considered when evaluating the conditions. This approach allows for a more accurate and dynamic assessment of the system\u0026rsquo;s performance, making it possible to identify and respond to issues in a timely manner. By triggering Profiling tasks when specific conditions are met, the system can automatically initiate performance analysis and help uncover potential bottlenecks or areas for improvement.\nCauses When the eBPF agent reports a Profiling task, it also reports the reason for triggering the Profiling task, which mainly includes the following information:\n Process: The specific process that triggered the policy. Monitor Type: The type of monitoring that was triggered. Threshold: The configured threshold value. Current: The monitoring value at the time the rule was triggered.  Silence Period Upon triggering a continuous profiling task, the eBPF agent supports a feature that prevents re-triggering tasks within a specified period. This feature is designed to prevent an unlimited number of profiling tasks from being initiated if the process continuously reaches the threshold, which could potentially cause system issues.\n","title":"Continuous Profiling","url":"/docs/main/next/en/setup/backend/backend-continuous-profiling/"},{"content":"Continuous Profiling Continuous profiling utilizes eBPF, process monitoring, and other technologies to collect data. When the configured threshold is met, it would automatically start profiling tasks. Corresponds to Continuous Profiling in the concepts and designs. This approach helps identify performance bottlenecks and potential issues in a proactive manner, allowing users to optimize their applications and systems more effectively.\nActive in the OAP Continuous profiling uses the same protocol service as eBPF Profiling, so you only need to ensure that the eBPF Profiling receiver is running.\nreceiver-ebpf:selector:${SW_RECEIVER_EBPF:default}default:Configuration of Continuous Profiling Policy Continuous profiling can be configured on a service entity, with the following fields in the configuration:\n Service: The service entity for which you want to monitor the processes. Targets: Configuration conditions.  Target Type: Target profiling type, currently supporting On CPU Profiling, Off CPU Profiling, and Network Profiling. Check Items: Detection conditions, only one of the multiple condition rules needs to be met to start the task.  Type: Monitoring type, currently supporting \u0026ldquo;System Load\u0026rdquo;, \u0026ldquo;Process CPU\u0026rdquo;, \u0026ldquo;Process Thread Count\u0026rdquo;, \u0026ldquo;HTTP Error Rate\u0026rdquo;, \u0026ldquo;HTTP Avg Response Time\u0026rdquo;. Threshold: Check if the monitoring value meets the specified expectations. Period: The time period for monitoring data, which can also be understood as the most recent duration. Count: The number of times the threshold is triggered within the detection period, which can also be understood as the total number of times the specified threshold rule is triggered in the most recent duration. Once the count check is met, the specified Profiling task will be started. URI: For HTTP-related monitoring types, used to filter specific URIs.      Monitoring After saving the configuration, the eBPF agent can perform monitoring operations on the processes under the specified service based on the service-level configuration.\nMetrics While performing monitoring, the eBPF agent would report the monitoring data to OAP for storage, making it more convenient to understand the real-time monitoring status. The main metrics include:\n   Monitor Type Unit Description     System Load Load System load average over a specified period.   Process CPU Percentage The CPU usage of the process as a percentage.   Process Thread Count Count The number of threads in the process.   HTTP Error Rate Percentage The percentage of HTTP requests that result in error responses (e.g., 4xx or 5xx status codes).   HTTP Avg Response Time Millisecond The average response time for HTTP requests.    Threshold With Trigger In the eBPF agent, data is collected periodically, and the sliding time window technique is used to store the data from the most recent Period cycles. The Threshold rule is used to verify whether the data within each cycle meets the specified criteria. If the number of times the conditions are met within the sliding time window exceeds the Count value, the corresponding Profiling task would be triggered.\nThe sliding time window technique ensures that the most recent and relevant data is considered when evaluating the conditions. This approach allows for a more accurate and dynamic assessment of the system\u0026rsquo;s performance, making it possible to identify and respond to issues in a timely manner. By triggering Profiling tasks when specific conditions are met, the system can automatically initiate performance analysis and help uncover potential bottlenecks or areas for improvement.\nCauses When the eBPF agent reports a Profiling task, it also reports the reason for triggering the Profiling task, which mainly includes the following information:\n Process: The specific process that triggered the policy. Monitor Type: The type of monitoring that was triggered. Threshold: The configured threshold value. Current: The monitoring value at the time the rule was triggered.  Silence Period Upon triggering a continuous profiling task, the eBPF agent supports a feature that prevents re-triggering tasks within a specified period. This feature is designed to prevent an unlimited number of profiling tasks from being initiated if the process continuously reaches the threshold, which could potentially cause system issues.\n","title":"Continuous Profiling","url":"/docs/main/v9.5.0/en/setup/backend/backend-continuous-profiling/"},{"content":"Continuous Profiling Continuous profiling utilizes eBPF, process monitoring, and other technologies to collect data. When the configured threshold is met, it would automatically start profiling tasks. Corresponds to Continuous Profiling in the concepts and designs. This approach helps identify performance bottlenecks and potential issues in a proactive manner, allowing users to optimize their applications and systems more effectively.\nActive in the OAP Continuous profiling uses the same protocol service as eBPF Profiling, so you only need to ensure that the eBPF Profiling receiver is running.\nreceiver-ebpf:selector:${SW_RECEIVER_EBPF:default}default:Configuration of Continuous Profiling Policy Continuous profiling can be configured on a service entity, with the following fields in the configuration:\n Service: The service entity for which you want to monitor the processes. Targets: Configuration conditions.  Target Type: Target profiling type, currently supporting On CPU Profiling, Off CPU Profiling, and Network Profiling. Check Items: Detection conditions, only one of the multiple condition rules needs to be met to start the task.  Type: Monitoring type, currently supporting \u0026ldquo;System Load\u0026rdquo;, \u0026ldquo;Process CPU\u0026rdquo;, \u0026ldquo;Process Thread Count\u0026rdquo;, \u0026ldquo;HTTP Error Rate\u0026rdquo;, \u0026ldquo;HTTP Avg Response Time\u0026rdquo;. Threshold: Check if the monitoring value meets the specified expectations. Period: The time period(seconds) for monitoring data, which can also be understood as the most recent duration. Count: The number of times(seconds) the threshold is triggered within the detection period, which can also be understood as the total number of times the specified threshold rule is triggered in the most recent duration(seconds). Once the count check is met, the specified Profiling task will be started. URI: For HTTP-related monitoring types, used to filter specific URIs.      Monitoring After saving the configuration, the eBPF agent can perform monitoring operations on the processes under the specified service based on the service-level configuration.\nMetrics While performing monitoring, the eBPF agent would report the monitoring data to OAP for storage, making it more convenient to understand the real-time monitoring status. The main metrics include:\n   Monitor Type Unit Description     System Load Load System load average over a specified period.   Process CPU Percentage The CPU usage of the process as a percentage.   Process Thread Count Count The number of threads in the process.   HTTP Error Rate Percentage The percentage of HTTP requests that result in error responses (e.g., 4xx or 5xx status codes).   HTTP Avg Response Time Millisecond The average response time for HTTP requests.    Threshold With Trigger In the eBPF agent, data is collected periodically, and the sliding time window technique is used to store the data from the most recent Period cycles. The Threshold rule is used to verify whether the data within each cycle meets the specified criteria. If the number of times the conditions are met within the sliding time window exceeds the Count value, the corresponding Profiling task would be triggered.\nThe sliding time window technique ensures that the most recent and relevant data is considered when evaluating the conditions. This approach allows for a more accurate and dynamic assessment of the system\u0026rsquo;s performance, making it possible to identify and respond to issues in a timely manner. By triggering Profiling tasks when specific conditions are met, the system can automatically initiate performance analysis and help uncover potential bottlenecks or areas for improvement.\nCauses When the eBPF agent reports a Profiling task, it also reports the reason for triggering the Profiling task, which mainly includes the following information:\n Process: The specific process that triggered the policy. Monitor Type: The type of monitoring that was triggered. Threshold: The configured threshold value. Current: The monitoring value at the time the rule was triggered.  Silence Period Upon triggering a continuous profiling task, the eBPF agent supports a feature that prevents re-triggering tasks within a specified period. This feature is designed to prevent an unlimited number of profiling tasks from being initiated if the process continuously reaches the threshold, which could potentially cause system issues.\n","title":"Continuous Profiling","url":"/docs/main/v9.6.0/en/setup/backend/backend-continuous-profiling/"},{"content":"Continuous Profiling Continuous profiling utilizes eBPF, process monitoring, and other technologies to collect data. When the configured threshold is met, it would automatically start profiling tasks. Corresponds to Continuous Profiling in the concepts and designs. This approach helps identify performance bottlenecks and potential issues in a proactive manner, allowing users to optimize their applications and systems more effectively.\nActive in the OAP Continuous profiling uses the same protocol service as eBPF Profiling, so you only need to ensure that the eBPF Profiling receiver is running.\nreceiver-ebpf:selector:${SW_RECEIVER_EBPF:default}default:Configuration of Continuous Profiling Policy Continuous profiling can be configured on a service entity, with the following fields in the configuration:\n Service: The service entity for which you want to monitor the processes. Targets: Configuration conditions.  Target Type: Target profiling type, currently supporting On CPU Profiling, Off CPU Profiling, and Network Profiling. Check Items: Detection conditions, only one of the multiple condition rules needs to be met to start the task.  Type: Monitoring type, currently supporting \u0026ldquo;System Load\u0026rdquo;, \u0026ldquo;Process CPU\u0026rdquo;, \u0026ldquo;Process Thread Count\u0026rdquo;, \u0026ldquo;HTTP Error Rate\u0026rdquo;, \u0026ldquo;HTTP Avg Response Time\u0026rdquo;. Threshold: Check if the monitoring value meets the specified expectations. Period: The time period(seconds) for monitoring data, which can also be understood as the most recent duration. Count: The number of times(seconds) the threshold is triggered within the detection period, which can also be understood as the total number of times the specified threshold rule is triggered in the most recent duration(seconds). Once the count check is met, the specified Profiling task will be started. URI: For HTTP-related monitoring types, used to filter specific URIs.      Monitoring After saving the configuration, the eBPF agent can perform monitoring operations on the processes under the specified service based on the service-level configuration.\nMetrics While performing monitoring, the eBPF agent would report the monitoring data to OAP for storage, making it more convenient to understand the real-time monitoring status. The main metrics include:\n   Monitor Type Unit Description     System Load Load System load average over a specified period.   Process CPU Percentage The CPU usage of the process as a percentage.   Process Thread Count Count The number of threads in the process.   HTTP Error Rate Percentage The percentage of HTTP requests that result in error responses (e.g., 4xx or 5xx status codes).   HTTP Avg Response Time Millisecond The average response time for HTTP requests.    Threshold With Trigger In the eBPF agent, data is collected periodically, and the sliding time window technique is used to store the data from the most recent Period cycles. The Threshold rule is used to verify whether the data within each cycle meets the specified criteria. If the number of times the conditions are met within the sliding time window exceeds the Count value, the corresponding Profiling task would be triggered.\nThe sliding time window technique ensures that the most recent and relevant data is considered when evaluating the conditions. This approach allows for a more accurate and dynamic assessment of the system\u0026rsquo;s performance, making it possible to identify and respond to issues in a timely manner. By triggering Profiling tasks when specific conditions are met, the system can automatically initiate performance analysis and help uncover potential bottlenecks or areas for improvement.\nCauses When the eBPF agent reports a Profiling task, it also reports the reason for triggering the Profiling task, which mainly includes the following information:\n Process: The specific process that triggered the policy. Monitor Type: The type of monitoring that was triggered. Threshold: The configured threshold value. Current: The monitoring value at the time the rule was triggered.  Silence Period Upon triggering a continuous profiling task, the eBPF agent supports a feature that prevents re-triggering tasks within a specified period. This feature is designed to prevent an unlimited number of profiling tasks from being initiated if the process continuously reaches the threshold, which could potentially cause system issues.\n","title":"Continuous Profiling","url":"/docs/main/v9.7.0/en/setup/backend/backend-continuous-profiling/"},{"content":"Contribution If you want to debug or develop SkyWalking Infra E2E, The following documentations would guide you.\n  Compiling\n Compiling Guidance    Release\n Release Guidance    ","title":"Contribution","url":"/docs/skywalking-infra-e2e/latest/en/contribution/readme/"},{"content":"Contribution If you want to debug or develop SkyWalking Infra E2E, The following documentations would guide you.\n  Compiling\n Compiling Guidance    Release\n Release Guidance    ","title":"Contribution","url":"/docs/skywalking-infra-e2e/next/en/contribution/readme/"},{"content":"Contribution If you want to debug or develop SkyWalking Infra E2E, The following documentations would guide you.\n  Compiling\n Compiling Guidance    Release\n Release Guidance    ","title":"Contribution","url":"/docs/skywalking-infra-e2e/v1.3.0/en/contribution/readme/"},{"content":"Create and detect Service Hierarchy Relationship Motivation Service relationship is one of the most important parts of collaborating data in the APM. Service Map is supported for years from tracing to trace analysis. But still due to the means of the probs, a service could be detected from multiple methods, which is the same service in multiple layers. v9 proposal mentioned the concept of the layer. Through this proposal, we plan to establish a kernel-level concept to connect services detected in different layers.\nArchitecture Graph There is no significant architecture-level change.\nPropose Changes The data sources of SkyWalking APM have covered traditional agent installed service, VMs, cloud infra, k8s, etc.\nFor example, a Java service is built in a docker image and is going to be deployed in a k8s cluster, with a sidecar injected due to service mesh managed. The following services would be able to detect cross-layers\n Java service, detected as Java agent installed. A pod of k8s service is detected, due to k8s layer monitoring. Side car perspective service is detected. VM Linux monitoring for a general process, as the container of Java service is deployed on this specific k8s node. Virtual databases, caches, and queues conjectured by agents, and also monitored through k8s monitoring, even traffic monitored by service mesh.  All these services have logic connections or are identical from a physical perspective, but currently, they may be just similar on name(s), no further metadata connection.\nBy those, we have a chance to move one step ahead to connect the dots of the whole infrastructure. This means, for the first time, we are going to establish the connections among services detected from various layers.\nIn the v10, I am proposing a new concept Service Hierarchy. Service Hierarchy defines the relationships of existing services in various layers. With more kinds of agent tech involved(such as eBPF) and deployment tools(such as operator and agent injector), we could inject relative service/instance metadata and try to build the connections, including,\n Agent injector injects the pod ID into the system env, then Java agent could report the relationship through system properties. Rover(eBPF agent) reveals its next iteration forward k8s monitoring rather than profiling. And add the capabilities to establish connections among k8s pods and service mesh srv.  Meanwhile, as usual with the new major version change, I would expect UI side changes as well. UI should have flexible capabilities to show hierarchy services from the service view and topology view. Also, we could consider a deeper view of the instance part as well.\nImported Dependencies libs and their licenses. No new library is planned to be added to the codebase.\nCompatibility About the protocol, there should be no breaking changes, but enhancements only. New query protocols( service-hierarchy and instance-hierarchy) are considered to be added, some new fields should be added on things like topology query and instance dependencies to list relative services/instances from other layers directly rather than an extra query.\nAbout the data structure, due to the new data concept is going to be created, service hierarchy relative data models are going to be added. If the user is using Elasticsearch and BanyanDB, this should be compatible, they just need to re-run init-mode OAP to extend the existing models. But for SQL database users(MySQL, PostgreSQL), this could require new tables.\nGraphQL query protocol New query protocol hierarchy.graphqls is going to be added.\ntypeHierarchyRelatedService{# The related service ID.id:ID!# The literal name of the #id.name:String!# The related service\u0026#39;s Layer name.layer:String!normal:Boolean!}typeHierarchyRelatedInstance{# The related instance ID.id:ID!# The literal name of the #id. Instance Name.name:String!# Service idserviceId:ID!# The literal name of the #serviceId.serviceName:String!# The service\u0026#39;s Layer name.# Service could have multiple layers, this is the layer of the service that the instance belongs to.layer:String!normal:Boolean!}typeHierarchyServiceRelation{upperService:HierarchyRelatedService!lowerService:HierarchyRelatedService!}typeHierarchyInstanceRelation{upperInstance:HierarchyRelatedInstance!lowerInstance:HierarchyRelatedInstance!}typeServiceHierarchy{relations:[HierarchyServiceRelation!]!}typeInstanceHierarchy{relations:[HierarchyInstanceRelation!]!}typeLayerLevel{# The layer name.layer:String!# The layer level.# The level of the upper service should greater than the level of the lower service.level:Int!}extendtypeQuery{# Query the service hierarchy, based on the given service. Will recursively return all related layers services in the hierarchy.getServiceHierarchy(serviceId:ID!,layer:String!):ServiceHierarchy!# Query the instance hierarchy, based on the given instance. Will return all direct related layers instances in the hierarchy, no recursive.getInstanceHierarchy(instanceId:ID!,layer:String!):InstanceHierarchy!# List layer hierarchy levels. The layer levels are defined in the `hierarchy-definition.yml`.listLayerLevels:[LayerLevel!]!}New data models   service_hierarchy_relation\n   Column name Data type Description     id String serviceId.servicelayer-relatedServiceId.relatedServiceLayer   service_id String upper service id   service_layer int upper service layer value   related_service_id String lower service id   related_service_layer int lower service layer value   time_bucket long       instance_hierarchy_relation\n   Column name Data type Description     id String instanceId.servicelayer-relateInstanceId.relatedServiceLayer   instance_id String upper instance id   service_layer int upper service layer value   related_instance_id String lower instance id   related_service_layer int lower service layer value   time_bucket long       Internal APIs Internal APIs should be exposed in the Core module to support building the hierarchy relationship.\npublic void toServiceHierarchyRelation(String upperServiceName, Layer upperServiceLayer, String lowerServiceName, Layer lowerServiceLayer); public void toInstanceHierarchyRelation(String upperInstanceName, String upperServiceName, Layer upperServiceLayer, String lowerInstanceName, String lowerServiceName, Layer lowerServiceLayer); Hierarchy Definition All layers hierarchy relations are defined in the hierarchy-definition.yml file. OAP will check the hierarchy relations before building and use the matching rules to auto match the relations. Here is an example:\n# Define the hierarchy of service layers, the layers under the specific layer are related lower of the layer.# The relation could have a matching rule for auto matching, which are defined in the `auto-matching-rules` section.# All the layers are defined in the file `org.apache.skywalking.oap.server.core.analysis.Layers.java`.hierarchy:MESH:MESH_DP:nameK8S_SERVICE:short-nameMESH_DP:K8S_SERVICE:short-nameGENERAL:K8S_SERVICE:lower-short-name-remove-nsMYSQL:K8S_SERVICE:~VIRTUAL_DATABASE:MYSQL:~# Use Groovy script to define the matching rules, the input parameters are the upper service(u) and the lower service(l) and the return value is a boolean.# which are used to match the relation between the upper service(u) and the lower service(l) on the different layers.auto-matching-rules:# the name of the upper service is equal to the name of the lower servicename:\u0026#34;{ (u, l) -\u0026gt; u.name == l.name }\u0026#34;# the short name of the upper service is equal to the short name of the lower serviceshort-name:\u0026#34;{ (u, l) -\u0026gt; u.shortName == l.shortName }\u0026#34;# remove the namespace from the lower service short namelower-short-name-remove-ns:\u0026#34;{ (u, l) -\u0026gt; u.shortName == l.shortName.substring(0, l.shortName.lastIndexOf(\u0026#39;.\u0026#39;)) }\u0026#34;# The hierarchy level of the service layer, the level is used to define the order of the service layer for UI presentation,# The level of the upper service should greater than the level of the lower service in `hierarchy` section.layer-levels:MESH:3GENERAL:3VIRTUAL_DATABASE:3MYSQL:2MESH_DP:1K8S_SERVICE:0General usage docs This proposal doesn\u0026rsquo;t impact the end user in any way of using SkyWalking. The remarkable change will be in the UI. On the service dashboard and topology map, the user should be able to see the hierarchy relationship, which means other services in other layers are logically the same as the current one. UI would provide the link to jump to the relative service\u0026rsquo;s dashboard.\nNo Goal This proposal doesn\u0026rsquo;t cover all the logic about how to detect the service hierarchy structure. All those should be in a separate SWIP.\n","title":"Create and detect Service Hierarchy Relationship","url":"/docs/main/next/en/swip/swip-1/"},{"content":"Create Span   Use Tracer.createEntrySpan() API to create entry span, and then use SpanRef to contain the reference of created span in agent kernel. The first parameter is operation name of span and the second parameter is the ContextCarrierRef instance which is the reference of contextcarrier in agent kernel. If the second parameter is not null, the process of creating entry span will do the extract operation which will be introduced in inject/extract scenario.\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... SpanRef spanRef = Tracer.createEntrySpan(\u0026#34;${operationName}\u0026#34;, null);   Use Tracer.createLocalSpan() API to create local span, the only parameter is the operation name of span.\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... SpanRef spanRef = Tracer.createLocalSpan(\u0026#34;${operationName}\u0026#34;);   Use Tracer.createExitSpan() API to create exit span\n  two parameters case: the first parameter is the operation name of span, the second parameter is the remote peer which means the peer address of exit operation.\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... SpanRef spanRef = Tracer.createExitSpan(\u0026#34;${operationName}\u0026#34;, \u0026#34;${remotePeer}\u0026#34;);   three parameters case: the first parameter is the operation name of span, the second parameter is the ContextCarrierRef instance and the third parameter is the remote peer. This case will be introduced in inject/extract scenario.\n    Use Tracer.stopSpan() API to stop current span\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... Tracer.stopSpan();   Inject/Extract Context Carrier The Inject/extract is to pass context information between different process. The ContextCarrierRef contains the reference of ContextCarrier and the CarrierItemRef contains the reference of CarrierItem. The CarrierItem instances compose a linked list.\n Use Tracer.inject() to inject information of current context into carrier Use Tracer.extract() to extract info from contextCarrier. Use items() of ContextCarrierRef instance to get head CarrierItemRef instance. Use hasNext() of CarrierItemRef instance to judge if the CarrierItemRef has next item. Use next() of CarrierItemRef instance to get next item Use getHeadKey of CarrierItemRef instance to get key of current item Use getHeadValue of CarrierItemRef instance to get value of current item Use setHeadValue of CarrierItemRef instance to set value of current item  /* You can consider map as the message\u0026#39;s header/metadata, such as Http, MQ and RPC. Do the inject operation in one process and then pass the map in header/metadata. */ ContextCarrierRef contextCarrierRef = new ContextCarrierRef(); Tracer.inject(contextCarrierRef); Map\u0026lt;String, String\u0026gt; map = new HashMap\u0026lt;\u0026gt;(); CarrierItemRef next = contextCarrierRef.items(); while (next.hasNext()) { next = next.next(); map.put(next.getHeadKey(), next.getHeadValue()); } ... note: Inject can be done only in Exit Span\n// Receive the map representing a header/metadata and do the extract operation in another process. ... ContextCarrierRef contextCarrierRef = new ContextCarrierRef(); CarrierItemRef next = contextCarrierRef.items(); while ((next.hasNext())) { next = next.next(); String value = map.get(next.getHeadKey()); if (value != null){ next.setHeadValue(value); } } Tracer.extract(contextCarrierRef); Also, you can do the inject/extract operation when creating exit/entry span.\nContextCarrierRef contextCarrierRef = new ContextCarrierRef(); SpanRef spanRef = Tracer.createExitSpan(\u0026#34;operationName\u0026#34;, contextCarrierRef, \u0026#34;remotePeer\u0026#34;); Map\u0026lt;String, String\u0026gt; map = new HashMap\u0026lt;\u0026gt;(); CarrierItemRef next = contextCarrierRef.items(); while (next.hasNext()) { next = next.next(); map.put(next.getHeadKey(), next.getHeadValue()); } ... ... ContextCarrierRef contextCarrierRef = new ContextCarrierRef(); CarrierItemRef next = contextCarrierRef.items(); while ((next.hasNext())) { next = next.next(); String value = map.get(next.getHeadKey()); if (value != null){ next.setHeadValue(value); } } SpanRef spanRef = Tracer.createEntrySpan(\u0026#34;${operationName}\u0026#34;, contextCarrierRef); Capture/Continue Context Snapshot   Use Tracer.capture() to capture the segment info and store it in ContextSnapshotRef, and then use Tracer.continued() to load the snapshot as the ref segment info. The capture/continue is used for tracing context in the x-thread tracing.\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... ContextSnapshotRef contextSnapshotRef = Tracer.capture(); Thread thread = new Thread(() -\u0026gt; { SpanRef spanRef = Tracer.createLocalSpan(\u0026#34;${operationName}\u0026#34;); Tracer.continued(contextSnapshotRef); ... }); thread.start(); thread.join();   Add Span\u0026rsquo;s Tag and Log   Use log of SpanRef instance to record log in span\nimport org.apache.skywalking.apm.toolkit.trace.SpanRef; ... SpanRef spanRef = Tracer.createLocalSpan(\u0026#34;${operationName}\u0026#34;); // Throwable parameter spanRef.log(new RuntimeException(\u0026#34;${exception_message}\u0026#34;)); // Map parameter Map\u0026lt;String, String\u0026gt; logMap = new HashMap\u0026lt;\u0026gt;(); logMap.put(\u0026#34;event\u0026#34;, \u0026#34;${event_type}\u0026#34;); logMap.put(\u0026#34;message\u0026#34;, \u0026#34;${message_value}\u0026#34;); spanRef.log(logMap);   Use tag of SpanRef instance to add tag to span, the parameters of tag are two String which are key and value respectively.\nimport org.apache.skywalking.apm.toolkit.trace.SpanRef; ... SpanRef spanRef = Tracer.createLocalSpan(operationName); spanRef.tag(\u0026#34;${key}\u0026#34;, \u0026#34;${value}\u0026#34;);   Async Prepare/Finish   Use prepareForAsync of SpanRef instance to make the span still alive until asyncFinish called, and then in specific time use asyncFinish of this SpanRef instance to notify this span that it could be finished.\nimport org.apache.skywalking.apm.toolkit.trace.SpanRef; ... SpanRef spanRef = Tracer.createLocalSpan(\u0026#34;${operationName}\u0026#34;); spanRef.prepareForAsync(); // the span does not finish because of the prepareForAsync() operation Tracer.stopSpan(); Thread thread = new Thread(() -\u0026gt; { ... spanRef.asyncFinish(); }); thread.start(); thread.join();   ActiveSpan You can use the ActiveSpan to get the current span and do some operations.\n  Add custom tag in the context of traced method, ActiveSpan.tag(\u0026quot;key\u0026quot;, \u0026quot;val\u0026quot;).\n  ActiveSpan.error() Mark the current span as error status.\n  ActiveSpan.error(String errorMsg) Mark the current span as error status with a message.\n  ActiveSpan.error(Throwable throwable) Mark the current span as error status with a Throwable.\n  ActiveSpan.debug(String debugMsg) Add a debug level log message in the current span.\n  ActiveSpan.info(String infoMsg) Add an info level log message in the current span.\n  ActiveSpan.setOperationName(String operationName) Customize an operation name.\n  ActiveSpan.tag(\u0026#34;my_tag\u0026#34;, \u0026#34;my_value\u0026#34;); ActiveSpan.error(); ActiveSpan.error(\u0026#34;Test-Error-Reason\u0026#34;); ActiveSpan.error(new RuntimeException(\u0026#34;Test-Error-Throwable\u0026#34;)); ActiveSpan.info(\u0026#34;Test-Info-Msg\u0026#34;); ActiveSpan.debug(\u0026#34;Test-debug-Msg\u0026#34;); ActiveSpan.setOperationName(\u0026#34;${opetationName}\u0026#34;); Sample codes only\n","title":"Create Span","url":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/application-toolkit-tracer/"},{"content":"Create Span   Use Tracer.createEntrySpan() API to create entry span, and then use SpanRef to contain the reference of created span in agent kernel. The first parameter is operation name of span and the second parameter is the ContextCarrierRef instance which is the reference of contextcarrier in agent kernel. If the second parameter is not null, the process of creating entry span will do the extract operation which will be introduced in inject/extract scenario.\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... SpanRef spanRef = Tracer.createEntrySpan(\u0026#34;${operationName}\u0026#34;, null);   Use Tracer.createLocalSpan() API to create local span, the only parameter is the operation name of span.\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... SpanRef spanRef = Tracer.createLocalSpan(\u0026#34;${operationName}\u0026#34;);   Use Tracer.createExitSpan() API to create exit span\n  two parameters case: the first parameter is the operation name of span, the second parameter is the remote peer which means the peer address of exit operation.\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... SpanRef spanRef = Tracer.createExitSpan(\u0026#34;${operationName}\u0026#34;, \u0026#34;${remotePeer}\u0026#34;);   three parameters case: the first parameter is the operation name of span, the second parameter is the ContextCarrierRef instance and the third parameter is the remote peer. This case will be introduced in inject/extract scenario.\n    Use Tracer.stopSpan() API to stop current span\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... Tracer.stopSpan();   Inject/Extract Context Carrier The Inject/extract is to pass context information between different process. The ContextCarrierRef contains the reference of ContextCarrier and the CarrierItemRef contains the reference of CarrierItem. The CarrierItem instances compose a linked list.\n Use Tracer.inject() to inject information of current context into carrier Use Tracer.extract() to extract info from contextCarrier. Use items() of ContextCarrierRef instance to get head CarrierItemRef instance. Use hasNext() of CarrierItemRef instance to judge if the CarrierItemRef has next item. Use next() of CarrierItemRef instance to get next item Use getHeadKey of CarrierItemRef instance to get key of current item Use getHeadValue of CarrierItemRef instance to get value of current item Use setHeadValue of CarrierItemRef instance to set value of current item  /* You can consider map as the message\u0026#39;s header/metadata, such as Http, MQ and RPC. Do the inject operation in one process and then pass the map in header/metadata. */ ContextCarrierRef contextCarrierRef = new ContextCarrierRef(); Tracer.inject(contextCarrierRef); Map\u0026lt;String, String\u0026gt; map = new HashMap\u0026lt;\u0026gt;(); CarrierItemRef next = contextCarrierRef.items(); while (next.hasNext()) { next = next.next(); map.put(next.getHeadKey(), next.getHeadValue()); } ... note: Inject can be done only in Exit Span\n// Receive the map representing a header/metadata and do the extract operation in another process. ... ContextCarrierRef contextCarrierRef = new ContextCarrierRef(); CarrierItemRef next = contextCarrierRef.items(); while ((next.hasNext())) { next = next.next(); String value = map.get(next.getHeadKey()); if (value != null){ next.setHeadValue(value); } } Tracer.extract(contextCarrierRef); Also, you can do the inject/extract operation when creating exit/entry span.\nContextCarrierRef contextCarrierRef = new ContextCarrierRef(); SpanRef spanRef = Tracer.createExitSpan(\u0026#34;operationName\u0026#34;, contextCarrierRef, \u0026#34;remotePeer\u0026#34;); Map\u0026lt;String, String\u0026gt; map = new HashMap\u0026lt;\u0026gt;(); CarrierItemRef next = contextCarrierRef.items(); while (next.hasNext()) { next = next.next(); map.put(next.getHeadKey(), next.getHeadValue()); } ... ... ContextCarrierRef contextCarrierRef = new ContextCarrierRef(); CarrierItemRef next = contextCarrierRef.items(); while ((next.hasNext())) { next = next.next(); String value = map.get(next.getHeadKey()); if (value != null){ next.setHeadValue(value); } } SpanRef spanRef = Tracer.createEntrySpan(\u0026#34;${operationName}\u0026#34;, contextCarrierRef); Capture/Continue Context Snapshot   Use Tracer.capture() to capture the segment info and store it in ContextSnapshotRef, and then use Tracer.continued() to load the snapshot as the ref segment info. The capture/continue is used for tracing context in the x-thread tracing.\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... ContextSnapshotRef contextSnapshotRef = Tracer.capture(); Thread thread = new Thread(() -\u0026gt; { SpanRef spanRef = Tracer.createLocalSpan(\u0026#34;${operationName}\u0026#34;); Tracer.continued(contextSnapshotRef); ... }); thread.start(); thread.join();   Add Span\u0026rsquo;s Tag and Log   Use log of SpanRef instance to record log in span\nimport org.apache.skywalking.apm.toolkit.trace.SpanRef; ... SpanRef spanRef = Tracer.createLocalSpan(\u0026#34;${operationName}\u0026#34;); // Throwable parameter spanRef.log(new RuntimeException(\u0026#34;${exception_message}\u0026#34;)); // Map parameter Map\u0026lt;String, String\u0026gt; logMap = new HashMap\u0026lt;\u0026gt;(); logMap.put(\u0026#34;event\u0026#34;, \u0026#34;${event_type}\u0026#34;); logMap.put(\u0026#34;message\u0026#34;, \u0026#34;${message_value}\u0026#34;); spanRef.log(logMap);   Use tag of SpanRef instance to add tag to span, the parameters of tag are two String which are key and value respectively.\nimport org.apache.skywalking.apm.toolkit.trace.SpanRef; ... SpanRef spanRef = Tracer.createLocalSpan(operationName); spanRef.tag(\u0026#34;${key}\u0026#34;, \u0026#34;${value}\u0026#34;);   Async Prepare/Finish   Use prepareForAsync of SpanRef instance to make the span still alive until asyncFinish called, and then in specific time use asyncFinish of this SpanRef instance to notify this span that it could be finished.\nimport org.apache.skywalking.apm.toolkit.trace.SpanRef; ... SpanRef spanRef = Tracer.createLocalSpan(\u0026#34;${operationName}\u0026#34;); spanRef.prepareForAsync(); // the span does not finish because of the prepareForAsync() operation Tracer.stopSpan(); Thread thread = new Thread(() -\u0026gt; { ... spanRef.asyncFinish(); }); thread.start(); thread.join();   ActiveSpan You can use the ActiveSpan to get the current span and do some operations.\n  Add custom tag in the context of traced method, ActiveSpan.tag(\u0026quot;key\u0026quot;, \u0026quot;val\u0026quot;).\n  ActiveSpan.error() Mark the current span as error status.\n  ActiveSpan.error(String errorMsg) Mark the current span as error status with a message.\n  ActiveSpan.error(Throwable throwable) Mark the current span as error status with a Throwable.\n  ActiveSpan.debug(String debugMsg) Add a debug level log message in the current span.\n  ActiveSpan.info(String infoMsg) Add an info level log message in the current span.\n  ActiveSpan.setOperationName(String operationName) Customize an operation name.\n  ActiveSpan.tag(\u0026#34;my_tag\u0026#34;, \u0026#34;my_value\u0026#34;); ActiveSpan.error(); ActiveSpan.error(\u0026#34;Test-Error-Reason\u0026#34;); ActiveSpan.error(new RuntimeException(\u0026#34;Test-Error-Throwable\u0026#34;)); ActiveSpan.info(\u0026#34;Test-Info-Msg\u0026#34;); ActiveSpan.debug(\u0026#34;Test-debug-Msg\u0026#34;); ActiveSpan.setOperationName(\u0026#34;${opetationName}\u0026#34;); Sample codes only\n","title":"Create Span","url":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-tracer/"},{"content":"Create Span   Use Tracer.createEntrySpan() API to create entry span, and then use SpanRef to contain the reference of created span in agent kernel. The first parameter is operation name of span and the second parameter is the ContextCarrierRef instance which is the reference of contextcarrier in agent kernel. If the second parameter is not null, the process of creating entry span will do the extract operation which will be introduced in inject/extract scenario.\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... SpanRef spanRef = Tracer.createEnteySpan(\u0026#34;${operationName}\u0026#34;, null);   Use Tracer.createLocalSpan() API to create local span, the only parameter is the operation name of span.\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... SpanRef spanRef = Tracer.createLocalSpan(\u0026#34;${operationName}\u0026#34;);   Use Tracer.createExitSpan() API to create exit span\n  two parameters case: the first parameter is the operation name of span, the second parameter is the remote peer which means the peer address of exit operation.\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... SpanRef spanRef = Tracer.createExitSpan(\u0026#34;${operationName}\u0026#34;, \u0026#34;${remotePeer}\u0026#34;);   three parameters case: the first parameter is the operation name of span, the second parameter is the ContextCarrierRef instance and the third parameter is the remote peer. This case will be introduced in inject/extract scenario.\n    Use Tracer.stopSpan() API to stop current span\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... Tracer.stopSpan();   Inject/Extract Context Carrier The Inject/extract is to pass context information between different process. The ContextCarrierRef contains the reference of ContextCarrier and the CarrierItemRef contains the reference of CarrierItem. The CarrierItem instances compose a linked list.\n Use Tracer.inject() to inject information of current context into carrier Use Tracer.extract() to extract info from contextCarrier. Use items() of ContextCarrierRef instance to get head CarrierItemRef instance. Use hasNext() of CarrierItemRef instance to judge if the CarrierItemRef has next item. Use next() of CarrierItemRef instance to get next item Use getHeadKey of CarrierItemRef instance to get key of current item Use getHeadValue of CarrierItemRef instance to get value of current item Use setHeadValue of CarrierItemRef instance to set value of current item  /* You can consider map as the message\u0026#39;s header/metadata, such as Http, MQ and RPC. Do the inject operation in one process and then pass the map in header/metadata. */ ContextCarrierRef contextCarrierRef = new ContextCarrierRef(); Tracer.inject(contextCarrierRef); Map\u0026lt;String, String\u0026gt; map = new HashMap\u0026lt;\u0026gt;(); CarrierItemRef next = contextCarrierRef.items(); while (next.hasNext()) { next = next.next(); map.put(next.getHeadKey(), next.getHeadValue()); } ... // Receive the map representing a header/metadata and do the extract operation in another process. ... ContextCarrierRef contextCarrierRef = new ContextCarrierRef(); CarrierItemRef next = contextCarrierRef.items(); for (Map.Entry\u0026lt;String, String\u0026gt; entry : map.entrySet()) { if (next.hasNext()) { next = next.next(); if (entry.getKey().equals(next.getHeadKey())) next.setHeadValue(entry.getValue()); } } Tracer.extract(contextCarrierRef); Also, you can do the inject/extract operation when creating exit/entry span.\nContextCarrierRef contextCarrierRef = new ContextCarrierRef(); SpanRef spanRef = Tracer.createExitSpan(\u0026#34;operationName\u0026#34;, contextCarrierRef, \u0026#34;remotePeer\u0026#34;); Map\u0026lt;String, String\u0026gt; map = new HashMap\u0026lt;\u0026gt;(); CarrierItemRef next = contextCarrierRef.items(); while (next.hasNext()) { next = next.next(); map.put(next.getHeadKey(), next.getHeadValue()); } ... ... ContextCarrierRef contextCarrierRef = new ContextCarrierRef(); CarrierItemRef next = contextCarrierRef.items(); for (Map.Entry\u0026lt;String, String\u0026gt; entry : map.entrySet()) { if (next.hasNext()) { next = next.next(); if (entry.getKey().equals(next.getHeadKey())) next.setHeadValue(entry.getValue()); } } SpanRef spanRef = Tracer.createEntrySpan(\u0026#34;${operationName}\u0026#34;, contextCarrierRef); Capture/Continue Context Snapshot   Use Tracer.capture() to capture the segment info and store it in ContextSnapshotRef, and then use Tracer.continued() to load the snapshot as the ref segment info. The capture/continue is used for tracing context in the x-thread tracing.\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... ContextSnapshotRef contextSnapshotRef = Tracer.capture(); Thread thread = new Thread(() -\u0026gt; { SpanRef spanRef = Tracer.createLocalSpan(\u0026#34;${operationName}\u0026#34;); Tracer.continued(contextSnapshotRef); ... }); thread.start(); thread.join();   Add Span\u0026rsquo;s Tag and Log   Use log of SpanRef instance to record log in span\nimport org.apache.skywalking.apm.toolkit.trace.SpanRef; ... SpanRef spanRef = Tracer.createLocalSpan(\u0026#34;${operationName}\u0026#34;); // Throwable parameter spanRef.log(new RuntimeException(\u0026#34;${exception_message}\u0026#34;)); // Map parameter Map\u0026lt;String, String\u0026gt; logMap = new HashMap\u0026lt;\u0026gt;(); logMap.put(\u0026#34;event\u0026#34;, \u0026#34;${event_type}\u0026#34;); logMap.put(\u0026#34;message\u0026#34;, \u0026#34;${message_value}\u0026#34;); spanRef.log(logMap);   Use tag of SpanRef instance to add tag to span, the parameters of tag are two String which are key and value respectively.\nimport org.apache.skywalking.apm.toolkit.trace.SpanRef; ... SpanRef spanRef = Tracer.createLocalSpan(operationName); spanRef.tag(\u0026#34;${key}\u0026#34;, \u0026#34;${value}\u0026#34;);   Async Prepare/Finish   Use prepareForAsync of SpanRef instance to make the span still alive until asyncFinish called, and then in specific time use asyncFinish of this SpanRef instance to notify this span that it could be finished.\nimport org.apache.skywalking.apm.toolkit.trace.SpanRef; ... SpanRef spanRef = Tracer.createLocalSpan(\u0026#34;${operationName}\u0026#34;); spanRef.prepareForAsync(); // the span does not finish because of the prepareForAsync() operation Tracer.stopSpan(); Thread thread = new Thread(() -\u0026gt; { ... spanRef.asyncFinish(); }); thread.start(); thread.join();   ActiveSpan You can use the ActiveSpan to get the current span and do some operations.\n  Add custom tag in the context of traced method, ActiveSpan.tag(\u0026quot;key\u0026quot;, \u0026quot;val\u0026quot;).\n  ActiveSpan.error() Mark the current span as error status.\n  ActiveSpan.error(String errorMsg) Mark the current span as error status with a message.\n  ActiveSpan.error(Throwable throwable) Mark the current span as error status with a Throwable.\n  ActiveSpan.debug(String debugMsg) Add a debug level log message in the current span.\n  ActiveSpan.info(String infoMsg) Add an info level log message in the current span.\n  ActiveSpan.setOperationName(String operationName) Customize an operation name.\n  ActiveSpan.tag(\u0026#34;my_tag\u0026#34;, \u0026#34;my_value\u0026#34;); ActiveSpan.error(); ActiveSpan.error(\u0026#34;Test-Error-Reason\u0026#34;); ActiveSpan.error(new RuntimeException(\u0026#34;Test-Error-Throwable\u0026#34;)); ActiveSpan.info(\u0026#34;Test-Info-Msg\u0026#34;); ActiveSpan.debug(\u0026#34;Test-debug-Msg\u0026#34;); ActiveSpan.setOperationName(\u0026#34;${opetationName}\u0026#34;); Sample codes only\n","title":"Create Span","url":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/application-toolkit-tracer/"},{"content":"Create Span   Use Tracer.createEntrySpan() API to create entry span, and then use SpanRef to contain the reference of created span in agent kernel. The first parameter is operation name of span and the second parameter is the ContextCarrierRef instance which is the reference of contextcarrier in agent kernel. If the second parameter is not null, the process of creating entry span will do the extract operation which will be introduced in inject/extract scenario.\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... SpanRef spanRef = Tracer.createEntrySpan(\u0026#34;${operationName}\u0026#34;, null);   Use Tracer.createLocalSpan() API to create local span, the only parameter is the operation name of span.\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... SpanRef spanRef = Tracer.createLocalSpan(\u0026#34;${operationName}\u0026#34;);   Use Tracer.createExitSpan() API to create exit span\n  two parameters case: the first parameter is the operation name of span, the second parameter is the remote peer which means the peer address of exit operation.\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... SpanRef spanRef = Tracer.createExitSpan(\u0026#34;${operationName}\u0026#34;, \u0026#34;${remotePeer}\u0026#34;);   three parameters case: the first parameter is the operation name of span, the second parameter is the ContextCarrierRef instance and the third parameter is the remote peer. This case will be introduced in inject/extract scenario.\n    Use Tracer.stopSpan() API to stop current span\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... Tracer.stopSpan();   Inject/Extract Context Carrier The Inject/extract is to pass context information between different process. The ContextCarrierRef contains the reference of ContextCarrier and the CarrierItemRef contains the reference of CarrierItem. The CarrierItem instances compose a linked list.\n Use Tracer.inject() to inject information of current context into carrier Use Tracer.extract() to extract info from contextCarrier. Use items() of ContextCarrierRef instance to get head CarrierItemRef instance. Use hasNext() of CarrierItemRef instance to judge if the CarrierItemRef has next item. Use next() of CarrierItemRef instance to get next item Use getHeadKey of CarrierItemRef instance to get key of current item Use getHeadValue of CarrierItemRef instance to get value of current item Use setHeadValue of CarrierItemRef instance to set value of current item  /* You can consider map as the message\u0026#39;s header/metadata, such as Http, MQ and RPC. Do the inject operation in one process and then pass the map in header/metadata. */ ContextCarrierRef contextCarrierRef = new ContextCarrierRef(); Tracer.inject(contextCarrierRef); Map\u0026lt;String, String\u0026gt; map = new HashMap\u0026lt;\u0026gt;(); CarrierItemRef next = contextCarrierRef.items(); while (next.hasNext()) { next = next.next(); map.put(next.getHeadKey(), next.getHeadValue()); } ... note: Inject can be done only in Exit Span\n// Receive the map representing a header/metadata and do the extract operation in another process. ... ContextCarrierRef contextCarrierRef = new ContextCarrierRef(); CarrierItemRef next = contextCarrierRef.items(); while ((next.hasNext())) { next = next.next(); String value = map.get(next.getHeadKey()); if (value != null){ next.setHeadValue(value); } } Tracer.extract(contextCarrierRef); Also, you can do the inject/extract operation when creating exit/entry span.\nContextCarrierRef contextCarrierRef = new ContextCarrierRef(); SpanRef spanRef = Tracer.createExitSpan(\u0026#34;operationName\u0026#34;, contextCarrierRef, \u0026#34;remotePeer\u0026#34;); Map\u0026lt;String, String\u0026gt; map = new HashMap\u0026lt;\u0026gt;(); CarrierItemRef next = contextCarrierRef.items(); while (next.hasNext()) { next = next.next(); map.put(next.getHeadKey(), next.getHeadValue()); } ... ... ContextCarrierRef contextCarrierRef = new ContextCarrierRef(); CarrierItemRef next = contextCarrierRef.items(); while ((next.hasNext())) { next = next.next(); String value = map.get(next.getHeadKey()); if (value != null){ next.setHeadValue(value); } } SpanRef spanRef = Tracer.createEntrySpan(\u0026#34;${operationName}\u0026#34;, contextCarrierRef); Capture/Continue Context Snapshot   Use Tracer.capture() to capture the segment info and store it in ContextSnapshotRef, and then use Tracer.continued() to load the snapshot as the ref segment info. The capture/continue is used for tracing context in the x-thread tracing.\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... ContextSnapshotRef contextSnapshotRef = Tracer.capture(); Thread thread = new Thread(() -\u0026gt; { SpanRef spanRef = Tracer.createLocalSpan(\u0026#34;${operationName}\u0026#34;); Tracer.continued(contextSnapshotRef); ... }); thread.start(); thread.join();   Add Span\u0026rsquo;s Tag and Log   Use log of SpanRef instance to record log in span\nimport org.apache.skywalking.apm.toolkit.trace.SpanRef; ... SpanRef spanRef = Tracer.createLocalSpan(\u0026#34;${operationName}\u0026#34;); // Throwable parameter spanRef.log(new RuntimeException(\u0026#34;${exception_message}\u0026#34;)); // Map parameter Map\u0026lt;String, String\u0026gt; logMap = new HashMap\u0026lt;\u0026gt;(); logMap.put(\u0026#34;event\u0026#34;, \u0026#34;${event_type}\u0026#34;); logMap.put(\u0026#34;message\u0026#34;, \u0026#34;${message_value}\u0026#34;); spanRef.log(logMap);   Use tag of SpanRef instance to add tag to span, the parameters of tag are two String which are key and value respectively.\nimport org.apache.skywalking.apm.toolkit.trace.SpanRef; ... SpanRef spanRef = Tracer.createLocalSpan(operationName); spanRef.tag(\u0026#34;${key}\u0026#34;, \u0026#34;${value}\u0026#34;);   Async Prepare/Finish   Use prepareForAsync of SpanRef instance to make the span still alive until asyncFinish called, and then in specific time use asyncFinish of this SpanRef instance to notify this span that it could be finished.\nimport org.apache.skywalking.apm.toolkit.trace.SpanRef; ... SpanRef spanRef = Tracer.createLocalSpan(\u0026#34;${operationName}\u0026#34;); spanRef.prepareForAsync(); // the span does not finish because of the prepareForAsync() operation Tracer.stopSpan(); Thread thread = new Thread(() -\u0026gt; { ... spanRef.asyncFinish(); }); thread.start(); thread.join();   ActiveSpan You can use the ActiveSpan to get the current span and do some operations.\n  Add custom tag in the context of traced method, ActiveSpan.tag(\u0026quot;key\u0026quot;, \u0026quot;val\u0026quot;).\n  ActiveSpan.error() Mark the current span as error status.\n  ActiveSpan.error(String errorMsg) Mark the current span as error status with a message.\n  ActiveSpan.error(Throwable throwable) Mark the current span as error status with a Throwable.\n  ActiveSpan.debug(String debugMsg) Add a debug level log message in the current span.\n  ActiveSpan.info(String infoMsg) Add an info level log message in the current span.\n  ActiveSpan.setOperationName(String operationName) Customize an operation name.\n  ActiveSpan.tag(\u0026#34;my_tag\u0026#34;, \u0026#34;my_value\u0026#34;); ActiveSpan.error(); ActiveSpan.error(\u0026#34;Test-Error-Reason\u0026#34;); ActiveSpan.error(new RuntimeException(\u0026#34;Test-Error-Throwable\u0026#34;)); ActiveSpan.info(\u0026#34;Test-Info-Msg\u0026#34;); ActiveSpan.debug(\u0026#34;Test-debug-Msg\u0026#34;); ActiveSpan.setOperationName(\u0026#34;${opetationName}\u0026#34;); Sample codes only\n","title":"Create Span","url":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/application-toolkit-tracer/"},{"content":"Create Span   Use Tracer.createEntrySpan() API to create entry span, and then use SpanRef to contain the reference of created span in agent kernel. The first parameter is operation name of span and the second parameter is the ContextCarrierRef instance which is the reference of contextcarrier in agent kernel. If the second parameter is not null, the process of creating entry span will do the extract operation which will be introduced in inject/extract scenario.\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... SpanRef spanRef = Tracer.createEntrySpan(\u0026#34;${operationName}\u0026#34;, null);   Use Tracer.createLocalSpan() API to create local span, the only parameter is the operation name of span.\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... SpanRef spanRef = Tracer.createLocalSpan(\u0026#34;${operationName}\u0026#34;);   Use Tracer.createExitSpan() API to create exit span\n  two parameters case: the first parameter is the operation name of span, the second parameter is the remote peer which means the peer address of exit operation.\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... SpanRef spanRef = Tracer.createExitSpan(\u0026#34;${operationName}\u0026#34;, \u0026#34;${remotePeer}\u0026#34;);   three parameters case: the first parameter is the operation name of span, the second parameter is the ContextCarrierRef instance and the third parameter is the remote peer. This case will be introduced in inject/extract scenario.\n    Use Tracer.stopSpan() API to stop current span\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... Tracer.stopSpan();   Inject/Extract Context Carrier The Inject/extract is to pass context information between different process. The ContextCarrierRef contains the reference of ContextCarrier and the CarrierItemRef contains the reference of CarrierItem. The CarrierItem instances compose a linked list.\n Use Tracer.inject() to inject information of current context into carrier Use Tracer.extract() to extract info from contextCarrier. Use items() of ContextCarrierRef instance to get head CarrierItemRef instance. Use hasNext() of CarrierItemRef instance to judge if the CarrierItemRef has next item. Use next() of CarrierItemRef instance to get next item Use getHeadKey of CarrierItemRef instance to get key of current item Use getHeadValue of CarrierItemRef instance to get value of current item Use setHeadValue of CarrierItemRef instance to set value of current item  /* You can consider map as the message\u0026#39;s header/metadata, such as Http, MQ and RPC. Do the inject operation in one process and then pass the map in header/metadata. */ ContextCarrierRef contextCarrierRef = new ContextCarrierRef(); Tracer.inject(contextCarrierRef); Map\u0026lt;String, String\u0026gt; map = new HashMap\u0026lt;\u0026gt;(); CarrierItemRef next = contextCarrierRef.items(); while (next.hasNext()) { next = next.next(); map.put(next.getHeadKey(), next.getHeadValue()); } ... note: Inject can be done only in Exit Span\n// Receive the map representing a header/metadata and do the extract operation in another process. ... ContextCarrierRef contextCarrierRef = new ContextCarrierRef(); CarrierItemRef next = contextCarrierRef.items(); while ((next.hasNext())) { next = next.next(); String value = map.get(next.getHeadKey()); if (value != null){ next.setHeadValue(value); } } Tracer.extract(contextCarrierRef); Also, you can do the inject/extract operation when creating exit/entry span.\nContextCarrierRef contextCarrierRef = new ContextCarrierRef(); SpanRef spanRef = Tracer.createExitSpan(\u0026#34;operationName\u0026#34;, contextCarrierRef, \u0026#34;remotePeer\u0026#34;); Map\u0026lt;String, String\u0026gt; map = new HashMap\u0026lt;\u0026gt;(); CarrierItemRef next = contextCarrierRef.items(); while (next.hasNext()) { next = next.next(); map.put(next.getHeadKey(), next.getHeadValue()); } ... ... ContextCarrierRef contextCarrierRef = new ContextCarrierRef(); CarrierItemRef next = contextCarrierRef.items(); while ((next.hasNext())) { next = next.next(); String value = map.get(next.getHeadKey()); if (value != null){ next.setHeadValue(value); } } SpanRef spanRef = Tracer.createEntrySpan(\u0026#34;${operationName}\u0026#34;, contextCarrierRef); Capture/Continue Context Snapshot   Use Tracer.capture() to capture the segment info and store it in ContextSnapshotRef, and then use Tracer.continued() to load the snapshot as the ref segment info. The capture/continue is used for tracing context in the x-thread tracing.\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... ContextSnapshotRef contextSnapshotRef = Tracer.capture(); Thread thread = new Thread(() -\u0026gt; { SpanRef spanRef = Tracer.createLocalSpan(\u0026#34;${operationName}\u0026#34;); Tracer.continued(contextSnapshotRef); ... }); thread.start(); thread.join();   Add Span\u0026rsquo;s Tag and Log   Use log of SpanRef instance to record log in span\nimport org.apache.skywalking.apm.toolkit.trace.SpanRef; ... SpanRef spanRef = Tracer.createLocalSpan(\u0026#34;${operationName}\u0026#34;); // Throwable parameter spanRef.log(new RuntimeException(\u0026#34;${exception_message}\u0026#34;)); // Map parameter Map\u0026lt;String, String\u0026gt; logMap = new HashMap\u0026lt;\u0026gt;(); logMap.put(\u0026#34;event\u0026#34;, \u0026#34;${event_type}\u0026#34;); logMap.put(\u0026#34;message\u0026#34;, \u0026#34;${message_value}\u0026#34;); spanRef.log(logMap);   Use tag of SpanRef instance to add tag to span, the parameters of tag are two String which are key and value respectively.\nimport org.apache.skywalking.apm.toolkit.trace.SpanRef; ... SpanRef spanRef = Tracer.createLocalSpan(operationName); spanRef.tag(\u0026#34;${key}\u0026#34;, \u0026#34;${value}\u0026#34;);   Async Prepare/Finish   Use prepareForAsync of SpanRef instance to make the span still alive until asyncFinish called, and then in specific time use asyncFinish of this SpanRef instance to notify this span that it could be finished.\nimport org.apache.skywalking.apm.toolkit.trace.SpanRef; ... SpanRef spanRef = Tracer.createLocalSpan(\u0026#34;${operationName}\u0026#34;); spanRef.prepareForAsync(); // the span does not finish because of the prepareForAsync() operation Tracer.stopSpan(); Thread thread = new Thread(() -\u0026gt; { ... spanRef.asyncFinish(); }); thread.start(); thread.join();   ActiveSpan You can use the ActiveSpan to get the current span and do some operations.\n  Add custom tag in the context of traced method, ActiveSpan.tag(\u0026quot;key\u0026quot;, \u0026quot;val\u0026quot;).\n  ActiveSpan.error() Mark the current span as error status.\n  ActiveSpan.error(String errorMsg) Mark the current span as error status with a message.\n  ActiveSpan.error(Throwable throwable) Mark the current span as error status with a Throwable.\n  ActiveSpan.debug(String debugMsg) Add a debug level log message in the current span.\n  ActiveSpan.info(String infoMsg) Add an info level log message in the current span.\n  ActiveSpan.setOperationName(String operationName) Customize an operation name.\n  ActiveSpan.tag(\u0026#34;my_tag\u0026#34;, \u0026#34;my_value\u0026#34;); ActiveSpan.error(); ActiveSpan.error(\u0026#34;Test-Error-Reason\u0026#34;); ActiveSpan.error(new RuntimeException(\u0026#34;Test-Error-Throwable\u0026#34;)); ActiveSpan.info(\u0026#34;Test-Info-Msg\u0026#34;); ActiveSpan.debug(\u0026#34;Test-debug-Msg\u0026#34;); ActiveSpan.setOperationName(\u0026#34;${opetationName}\u0026#34;); Sample codes only\n","title":"Create Span","url":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/application-toolkit-tracer/"},{"content":"CRUD Groups CRUD operations create, read, update and delete groups.\nThe group represents a collection of a class of resources. Each resource has a name unique to a group.\nbydbctl is the command line tool in examples.\nCreate operation Create operation adds a new group to the database\u0026rsquo;s metadata registry repository. If the group does not currently exist, create operation will create the schema.\nExamples of creating $ bydbctl group create -f - \u0026lt;\u0026lt;EOF metadata: name: sw_metric catalog: CATALOG_MEASURE resource_opts: shard_num: 2 block_interval: unit: UNIT_HOUR num: 2 segment_interval: unit: UNIT_DAY num: 1 ttl: unit: UNIT_DAY num: 7 EOF The group creates two shards to store group data points. Every day, it would create a segment that will generate a block every 2 hours.\nThe data in this group will keep 7 days.\nGet operation Get operation gets a group\u0026rsquo;s schema.\nExamples of getting $ bydbctl group get -g sw_metric Update operation Update operation updates a group\u0026rsquo;s schema.\nExamples of updating If we want to change the ttl of the data in this group to be 1 day, use the command:\n$ bydbctl group update -f - \u0026lt;\u0026lt;EOF metadata: name: sw_metric catalog: CATALOG_MEASURE resource_opts: shard_num: 2 block_interval: unit: UNIT_HOUR num: 2 segment_interval: unit: UNIT_DAY num: 1 ttl: unit: UNIT_DAY num: 1 EOF Delete operation Delete operation deletes a group\u0026rsquo;s schema.\nExamples of deleting $ bydbctl group delete -g sw_metric List operation The list operation shows all groups' schema.\nExamples $ bydbctl group list API Reference GroupService v1\n","title":"CRUD Groups","url":"/docs/skywalking-banyandb/latest/crud/group/"},{"content":"CRUD Groups CRUD operations create, read, update and delete groups.\nThe group represents a collection of a class of resources. Each resource has a name unique to a group.\nbydbctl is the command line tool in examples.\nCreate operation Create operation adds a new group to the database\u0026rsquo;s metadata registry repository. If the group does not currently exist, create operation will create the schema.\nExamples of creating $ bydbctl group create -f - \u0026lt;\u0026lt;EOF metadata: name: sw_metric catalog: CATALOG_MEASURE resource_opts: shard_num: 2 segment_interval: unit: UNIT_DAY num: 1 ttl: unit: UNIT_DAY num: 7 EOF The group creates two shards to store group data points. Every day, it would create a segment that will generate a block every 2 hours.\nThe data in this group will keep 7 days.\nGet operation Get operation gets a group\u0026rsquo;s schema.\nExamples of getting $ bydbctl group get -g sw_metric Update operation Update operation updates a group\u0026rsquo;s schema.\nExamples of updating If we want to change the ttl of the data in this group to be 1 day, use the command:\n$ bydbctl group update -f - \u0026lt;\u0026lt;EOF metadata: name: sw_metric catalog: CATALOG_MEASURE resource_opts: shard_num: 2 segment_interval: unit: UNIT_DAY num: 1 ttl: unit: UNIT_DAY num: 1 EOF Delete operation Delete operation deletes a group\u0026rsquo;s schema.\nExamples of deleting $ bydbctl group delete -g sw_metric List operation The list operation shows all groups' schema.\nExamples $ bydbctl group list API Reference GroupService v1\n","title":"CRUD Groups","url":"/docs/skywalking-banyandb/next/crud/group/"},{"content":"CRUD Groups CRUD operations create, read, update and delete groups.\nThe group represents a collection of a class of resources. Each resource has a name unique to a group.\nbydbctl is the command line tool in examples.\nCreate operation Create operation adds a new group to the database\u0026rsquo;s metadata registry repository. If the group does not currently exist, create operation will create the schema.\nExamples of creating $ bydbctl group create -f - \u0026lt;\u0026lt;EOF metadata: name: sw_metric catalog: CATALOG_MEASURE resource_opts: shard_num: 2 block_interval: unit: UNIT_HOUR num: 2 segment_interval: unit: UNIT_DAY num: 1 ttl: unit: UNIT_DAY num: 7 EOF The group creates two shards to store group data points. Every day, it would create a segment that will generate a block every 2 hours.\nThe data in this group will keep 7 days.\nGet operation Get operation gets a group\u0026rsquo;s schema.\nExamples of getting $ bydbctl group get -g sw_metric Update operation Update operation updates a group\u0026rsquo;s schema.\nExamples of updating If we want to change the ttl of the data in this group to be 1 day, use the command:\n$ bydbctl group update -f - \u0026lt;\u0026lt;EOF metadata: name: sw_metric catalog: CATALOG_MEASURE resource_opts: shard_num: 2 block_interval: unit: UNIT_HOUR num: 2 segment_interval: unit: UNIT_DAY num: 1 ttl: unit: UNIT_DAY num: 1 EOF Delete operation Delete operation deletes a group\u0026rsquo;s schema.\nExamples of deleting $ bydbctl group delete -g sw_metric List operation The list operation shows all groups' schema.\nExamples $ bydbctl group list API Reference GroupService v1\n","title":"CRUD Groups","url":"/docs/skywalking-banyandb/v0.5.0/crud/group/"},{"content":"CRUD indexRuleBindings CRUD operations create, read, update and delete index rule bindings.\nAn index rule binding is a bridge to connect several index rules to a subject. This binding is valid between begin_at_nanoseconds and expire_at_nanoseconds, that provides flexible strategies to control how to generate time series indices.\nbydbctl is the command line tool in examples.\nCreate operation Create operation adds a new index rule binding to the database\u0026rsquo;s metadata registry repository. If the index rule binding does not currently exist, create operation will create the schema.\nExamples An index rule binding belongs to a unique group. We should create such a group with a catalog CATALOG_STREAM before creating a index rule binding. The subject(stream/measure) and index rule MUST live in the same group with the binding.\n$ bydbctl group create -f - \u0026lt;\u0026lt;EOF metadata: name: default catalog: CATALOG_STREAM resource_opts: shard_num: 2 block_interval: unit: UNIT_HOUR num: 2 segment_interval: unit: UNIT_DAY num: 1 ttl: unit: UNIT_DAY num: 7 EOF The group creates two shards to store indexRuleBinding data points. Every one day, it would create a segment which will generate a block every 2 hours.\nThe data in this group will keep 7 days.\nThen, below command will create a new indexRuleBinding:\n$ bydbctl indexRuleBinding create -f - \u0026lt;\u0026lt;EOF metadata: name: stream_binding group: sw_stream rules: - trace_id - duration - endpoint_id - status_code - http.method - db.instance - db.type - mq.broker - mq.queue - mq.topic - extended_tags subject: catalog: CATALOG_STREAM name: sw begin_at: \u0026#39;2021-04-15T01:30:15.01Z\u0026#39; expire_at: \u0026#39;2121-04-15T01:30:15.01Z\u0026#39; EOF The YAML contains:\n rules: references to the name of index rules. subject: stream or measure\u0026rsquo;s name and catalog. begin_at and expire_at: the TTL of this binding.  Get operation Get(Read) operation gets an index rule binding\u0026rsquo;s schema.\nExamples of getting $ bydbctl indexRuleBinding get -g sw_stream -n stream_binding Update operation Update operation update an index rule binding\u0026rsquo;s schema.\nExamples updating $ bydbctl indexRuleBinding update -f - \u0026lt;\u0026lt;EOF metadata: name: stream_binding group: sw_stream rules: - trace_id - duration - endpoint_id - status_code - http.method - db.instance - db.type - mq.broker - mq.queue - mq.topic # Remove this rule # - extended_tags subject: catalog: CATALOG_STREAM name: sw begin_at: \u0026#39;2021-04-15T01:30:15.01Z\u0026#39; expire_at: \u0026#39;2121-04-15T01:30:15.01Z\u0026#39; EOF The new YAML removed the index rule extended_tags\u0026rsquo;s binding.\nDelete operation Delete operation delete an index rule binding\u0026rsquo;s schema.\nExamples of deleting $ bydbctl indexRuleBinding delete -g sw_stream -n stream_binding List operation List operation list all index rule bindings in a group.\nExamples of listing $ bydbctl indexRuleBinding list -g sw_stream API Reference indexRuleBindingService v1\n","title":"CRUD indexRuleBindings","url":"/docs/skywalking-banyandb/latest/crud/index_rule_binding/"},{"content":"CRUD indexRuleBindings CRUD operations create, read, update and delete index rule bindings.\nAn index rule binding is a bridge to connect several index rules to a subject. This binding is valid between begin_at_nanoseconds and expire_at_nanoseconds, that provides flexible strategies to control how to generate time series indices.\nbydbctl is the command line tool in examples.\nCreate operation Create operation adds a new index rule binding to the database\u0026rsquo;s metadata registry repository. If the index rule binding does not currently exist, create operation will create the schema.\nExamples An index rule binding belongs to a unique group. We should create such a group with a catalog CATALOG_STREAM before creating a index rule binding. The subject(stream/measure) and index rule MUST live in the same group with the binding.\n$ bydbctl group create -f - \u0026lt;\u0026lt;EOF metadata: name: default catalog: CATALOG_STREAM resource_opts: shard_num: 2 segment_interval: unit: UNIT_DAY num: 1 ttl: unit: UNIT_DAY num: 7 EOF The group creates two shards to store indexRuleBinding data points. Every one day, it would create a segment which will generate a block every 2 hours.\nThe data in this group will keep 7 days.\nThen, below command will create a new indexRuleBinding:\n$ bydbctl indexRuleBinding create -f - \u0026lt;\u0026lt;EOF metadata: name: stream_binding group: sw_stream rules: - trace_id - duration - endpoint_id - status_code - http.method - db.instance - db.type - mq.broker - mq.queue - mq.topic - extended_tags subject: catalog: CATALOG_STREAM name: sw begin_at: \u0026#39;2021-04-15T01:30:15.01Z\u0026#39; expire_at: \u0026#39;2121-04-15T01:30:15.01Z\u0026#39; EOF The YAML contains:\n rules: references to the name of index rules. subject: stream or measure\u0026rsquo;s name and catalog. begin_at and expire_at: the TTL of this binding.  Get operation Get(Read) operation gets an index rule binding\u0026rsquo;s schema.\nExamples of getting $ bydbctl indexRuleBinding get -g sw_stream -n stream_binding Update operation Update operation update an index rule binding\u0026rsquo;s schema.\nExamples updating $ bydbctl indexRuleBinding update -f - \u0026lt;\u0026lt;EOF metadata: name: stream_binding group: sw_stream rules: - trace_id - duration - endpoint_id - status_code - http.method - db.instance - db.type - mq.broker - mq.queue - mq.topic # Remove this rule # - extended_tags subject: catalog: CATALOG_STREAM name: sw begin_at: \u0026#39;2021-04-15T01:30:15.01Z\u0026#39; expire_at: \u0026#39;2121-04-15T01:30:15.01Z\u0026#39; EOF The new YAML removed the index rule extended_tags\u0026rsquo;s binding.\nDelete operation Delete operation delete an index rule binding\u0026rsquo;s schema.\nExamples of deleting $ bydbctl indexRuleBinding delete -g sw_stream -n stream_binding List operation List operation list all index rule bindings in a group.\nExamples of listing $ bydbctl indexRuleBinding list -g sw_stream API Reference indexRuleBindingService v1\n","title":"CRUD indexRuleBindings","url":"/docs/skywalking-banyandb/next/crud/index_rule_binding/"},{"content":"CRUD indexRuleBindings CRUD operations create, read, update and delete index rule bindings.\nAn index rule binding is a bridge to connect several index rules to a subject. This binding is valid between begin_at_nanoseconds and expire_at_nanoseconds, that provides flexible strategies to control how to generate time series indices.\nbydbctl is the command line tool in examples.\nCreate operation Create operation adds a new index rule binding to the database\u0026rsquo;s metadata registry repository. If the index rule binding does not currently exist, create operation will create the schema.\nExamples An index rule binding belongs to a unique group. We should create such a group with a catalog CATALOG_STREAM before creating a index rule binding. The subject(stream/measure) and index rule MUST live in the same group with the binding.\n$ bydbctl group create -f - \u0026lt;\u0026lt;EOF metadata: name: default catalog: CATALOG_STREAM resource_opts: shard_num: 2 block_interval: unit: UNIT_HOUR num: 2 segment_interval: unit: UNIT_DAY num: 1 ttl: unit: UNIT_DAY num: 7 EOF The group creates two shards to store indexRuleBinding data points. Every one day, it would create a segment which will generate a block every 2 hours.\nThe data in this group will keep 7 days.\nThen, below command will create a new indexRuleBinding:\n$ bydbctl indexRuleBinding create -f - \u0026lt;\u0026lt;EOF metadata: name: stream_binding group: sw_stream rules: - trace_id - duration - endpoint_id - status_code - http.method - db.instance - db.type - mq.broker - mq.queue - mq.topic - extended_tags subject: catalog: CATALOG_STREAM name: sw begin_at: \u0026#39;2021-04-15T01:30:15.01Z\u0026#39; expire_at: \u0026#39;2121-04-15T01:30:15.01Z\u0026#39; EOF The YAML contains:\n rules: references to the name of index rules. subject: stream or measure\u0026rsquo;s name and catalog. begin_at and expire_at: the TTL of this binding.  Get operation Get(Read) operation gets an index rule binding\u0026rsquo;s schema.\nExamples of getting $ bydbctl indexRuleBinding get -g sw_stream -n stream_binding Update operation Update operation update an index rule binding\u0026rsquo;s schema.\nExamples updating $ bydbctl indexRuleBinding update -f - \u0026lt;\u0026lt;EOF metadata: name: stream_binding group: sw_stream rules: - trace_id - duration - endpoint_id - status_code - http.method - db.instance - db.type - mq.broker - mq.queue - mq.topic # Remove this rule # - extended_tags subject: catalog: CATALOG_STREAM name: sw begin_at: \u0026#39;2021-04-15T01:30:15.01Z\u0026#39; expire_at: \u0026#39;2121-04-15T01:30:15.01Z\u0026#39; EOF The new YAML removed the index rule extended_tags\u0026rsquo;s binding.\nDelete operation Delete operation delete an index rule binding\u0026rsquo;s schema.\nExamples of deleting $ bydbctl indexRuleBinding delete -g sw_stream -n stream_binding List operation List operation list all index rule bindings in a group.\nExamples of listing $ bydbctl indexRuleBinding list -g sw_stream API Reference indexRuleBindingService v1\n","title":"CRUD indexRuleBindings","url":"/docs/skywalking-banyandb/v0.5.0/crud/index_rule_binding/"},{"content":"CRUD IndexRules CRUD operations create, read, update and delete index rules.\nIndexRule defines how to generate indices based on tags and the index type. IndexRule should bind to a subject(stream or measure) through an IndexRuleBinding to generate proper indices.\nbydbctl is the command line tool in examples.\nCreate operation Create operation adds a new index rule to the database\u0026rsquo;s metadata registry repository. If the index rule does not currently exist, create operation will create the schema.\nExamples of creating An index rule belongs to its subjects' group. We should create such a group if there is no such group.\nThe command supposes that the index rule will bind to streams. So it creates a CATALOG_STREAM group here.\n$ bydbctl group create -f - \u0026lt;\u0026lt;EOF metadata: name: sw_stream catalog: CATALOG_STREAM resource_opts: shard_num: 2 block_interval: unit: UNIT_HOUR num: 2 segment_interval: unit: UNIT_DAY num: 1 ttl: unit: UNIT_DAY num: 7 EOF The group creates two shards to store indexRule data points. Every day, it would create a segment that will generate a block every 2 hours.\nThe data in this group will keep 7 days.\nThen, the next command will create a new index rule:\n$ bydbctl indexRule create -f - \u0026lt;\u0026lt;EOF metadata: name: trace_id group: sw_stream tags: - trace_id type: TYPE_TREE location: LOCATION_GLOBAL EOF This YAML creates an index rule which uses the tag trace_id to generate a TREE_TYPE index which is located at GLOBAL.\nGet operation Get(Read) operation gets an index rule\u0026rsquo;s schema.\nExamples of getting $ bydbctl indexRule get -g sw_stream -n trace_id Update operation Update operation updates an index rule\u0026rsquo;s schema.\nExamples of updating This example changes the type from TREE to INVERTED.\n$ bydbctl indexRule update -f - \u0026lt;\u0026lt;EOF metadata: name: trace_id group: sw_stream tags: - trace_id type: TYPE_INVERTED location: LOCATION_GLOBAL EOF Delete operation Delete operation deletes an index rule\u0026rsquo;s schema.\nExamples of deleting $ bydbctl indexRule delete -g sw_stream -n trace_id List operation List operation list all index rules' schema in a group.\nExamples of listing $ bydbctl indexRule list -g sw_stream API Reference indexRuleService v1\n","title":"CRUD IndexRules","url":"/docs/skywalking-banyandb/latest/crud/index_rule/"},{"content":"CRUD IndexRules CRUD operations create, read, update and delete index rules.\nIndexRule defines how to generate indices based on tags and the index type. IndexRule should bind to a subject(stream or measure) through an IndexRuleBinding to generate proper indices.\nbydbctl is the command line tool in examples.\nCreate operation Create operation adds a new index rule to the database\u0026rsquo;s metadata registry repository. If the index rule does not currently exist, create operation will create the schema.\nExamples of creating An index rule belongs to its subjects' group. We should create such a group if there is no such group.\nThe command supposes that the index rule will bind to streams. So it creates a CATALOG_STREAM group here.\n$ bydbctl group create -f - \u0026lt;\u0026lt;EOF metadata: name: sw_stream catalog: CATALOG_STREAM resource_opts: shard_num: 2 segment_interval: unit: UNIT_DAY num: 1 ttl: unit: UNIT_DAY num: 7 EOF The group creates two shards to store indexRule data points. Every day, it would create a segment that will generate a block every 2 hours.\nThe data in this group will keep 7 days.\nThen, the next command will create a new index rule:\n$ bydbctl indexRule create -f - \u0026lt;\u0026lt;EOF metadata: name: trace_id group: sw_stream tags: - trace_id type: TYPE_INVERTED EOF This YAML creates an index rule which uses the tag trace_id to generate a TYPE_INVERTED index.\nGet operation Get(Read) operation gets an index rule\u0026rsquo;s schema.\nExamples of getting $ bydbctl indexRule get -g sw_stream -n trace_id Update operation Update operation updates an index rule\u0026rsquo;s schema.\nExamples of updating This example changes the type from TREE to INVERTED.\n$ bydbctl indexRule update -f - \u0026lt;\u0026lt;EOF metadata: name: trace_id group: sw_stream tags: - trace_id type: TYPE_INVERTED EOF Delete operation Delete operation deletes an index rule\u0026rsquo;s schema.\nExamples of deleting $ bydbctl indexRule delete -g sw_stream -n trace_id List operation List operation list all index rules' schema in a group.\nExamples of listing $ bydbctl indexRule list -g sw_stream API Reference indexRuleService v1\n","title":"CRUD IndexRules","url":"/docs/skywalking-banyandb/next/crud/index_rule/"},{"content":"CRUD IndexRules CRUD operations create, read, update and delete index rules.\nIndexRule defines how to generate indices based on tags and the index type. IndexRule should bind to a subject(stream or measure) through an IndexRuleBinding to generate proper indices.\nbydbctl is the command line tool in examples.\nCreate operation Create operation adds a new index rule to the database\u0026rsquo;s metadata registry repository. If the index rule does not currently exist, create operation will create the schema.\nExamples of creating An index rule belongs to its subjects' group. We should create such a group if there is no such group.\nThe command supposes that the index rule will bind to streams. So it creates a CATALOG_STREAM group here.\n$ bydbctl group create -f - \u0026lt;\u0026lt;EOF metadata: name: sw_stream catalog: CATALOG_STREAM resource_opts: shard_num: 2 block_interval: unit: UNIT_HOUR num: 2 segment_interval: unit: UNIT_DAY num: 1 ttl: unit: UNIT_DAY num: 7 EOF The group creates two shards to store indexRule data points. Every day, it would create a segment that will generate a block every 2 hours.\nThe data in this group will keep 7 days.\nThen, the next command will create a new index rule:\n$ bydbctl indexRule create -f - \u0026lt;\u0026lt;EOF metadata: name: trace_id group: sw_stream tags: - trace_id type: TYPE_TREE location: LOCATION_GLOBAL EOF This YAML creates an index rule which uses the tag trace_id to generate a TREE_TYPE index which is located at GLOBAL.\nGet operation Get(Read) operation gets an index rule\u0026rsquo;s schema.\nExamples of getting $ bydbctl indexRule get -g sw_stream -n trace_id Update operation Update operation updates an index rule\u0026rsquo;s schema.\nExamples of updating This example changes the type from TREE to INVERTED.\n$ bydbctl indexRule update -f - \u0026lt;\u0026lt;EOF metadata: name: trace_id group: sw_stream tags: - trace_id type: TYPE_INVERTED location: LOCATION_GLOBAL EOF Delete operation Delete operation deletes an index rule\u0026rsquo;s schema.\nExamples of deleting $ bydbctl indexRule delete -g sw_stream -n trace_id List operation List operation list all index rules' schema in a group.\nExamples of listing $ bydbctl indexRule list -g sw_stream API Reference indexRuleService v1\n","title":"CRUD IndexRules","url":"/docs/skywalking-banyandb/v0.5.0/crud/index_rule/"},{"content":"CRUD Measures CRUD operations create, read, update and delete measures.\nbydbctl is the command line tool in examples.\nCreate operation Create operation adds a new measure to the database\u0026rsquo;s metadata registry repository. If the measure does not currently exist, create operation will create the schema.\nExamples of creating A measure belongs to a unique group. We should create such a group with a catalog CATALOG_MEASURE before creating a measure.\n$ bydbctl group create -f - \u0026lt;\u0026lt;EOF metadata: name: sw_metric catalog: CATALOG_MEASURE resource_opts: shard_num: 2 block_interval: unit: UNIT_HOUR num: 2 segment_interval: unit: UNIT_DAY num: 1 ttl: unit: UNIT_DAY num: 7 EOF The group creates two shards to store data points. Every day, it would create a segment that will generate a block every 2 hours.\nThe data in this group will keep 7 days.\nThen, the below command will create a new measure:\n$ bydbctl measure create -f - \u0026lt;\u0026lt;EOF metadata: name: service_cpm_minute group: sw_metric tag_families: - name: default tags: - name: id type: TAG_TYPE_STRING - name: entity_id type: TAG_TYPE_STRING fields: - name: total field_type: FIELD_TYPE_INT encoding_method: ENCODING_METHOD_GORILLA compression_method: COMPRESSION_METHOD_ZSTD - name: value field_type: FIELD_TYPE_INT encoding_method: ENCODING_METHOD_GORILLA compression_method: COMPRESSION_METHOD_ZSTD entity: tag_names: - entity_id interval: 1m EOF service_cpm_minute expects to ingest a series of data points with a minute interval.\nGet operation Get(Read) operation gets a measure\u0026rsquo;s schema.\nExamples of getting $ bydbctl measure get -g sw_metric -n service_cpm_minute Update operation Update operation changes a measure\u0026rsquo;s schema.\nExamples of updating $ bydbctl measure update -f - \u0026lt;\u0026lt;EOF metadata: name: service_cpm_minute group: sw_metric tagFamilies: - name: searchable tags: - name: trace_id type: TAG_TYPE_STRING entity: tag_names: - entity_id EOF Delete operation Delete operation removes a measure\u0026rsquo;s schema.\nExamples of deleting $ bydbctl measure delete -g sw_metric -n service_cpm_minute List operation The list operation shows all measures' schema in a group.\nExamples of listing $ bydbctl measure list -g sw_metric API Reference MeasureService v1\n","title":"CRUD Measures","url":"/docs/skywalking-banyandb/latest/crud/measure/schema/"},{"content":"CRUD Measures CRUD operations create, read, update and delete measures.\nbydbctl is the command line tool in examples.\nCreate operation Create operation adds a new measure to the database\u0026rsquo;s metadata registry repository. If the measure does not currently exist, create operation will create the schema.\nExamples of creating A measure belongs to a unique group. We should create such a group with a catalog CATALOG_MEASURE before creating a measure.\n$ bydbctl group create -f - \u0026lt;\u0026lt;EOF metadata: name: sw_metric catalog: CATALOG_MEASURE resource_opts: shard_num: 2 segment_interval: unit: UNIT_DAY num: 1 ttl: unit: UNIT_DAY num: 7 EOF The group creates two shards to store data points. Every day, it would create a segment that will generate a block every 2 hours.\nThe data in this group will keep 7 days.\nThen, the below command will create a new measure:\n$ bydbctl measure create -f - \u0026lt;\u0026lt;EOF metadata: name: service_cpm_minute group: sw_metric tag_families: - name: default tags: - name: id type: TAG_TYPE_STRING - name: entity_id type: TAG_TYPE_STRING fields: - name: total field_type: FIELD_TYPE_INT encoding_method: ENCODING_METHOD_GORILLA compression_method: COMPRESSION_METHOD_ZSTD - name: value field_type: FIELD_TYPE_INT encoding_method: ENCODING_METHOD_GORILLA compression_method: COMPRESSION_METHOD_ZSTD entity: tag_names: - entity_id interval: 1m EOF service_cpm_minute expects to ingest a series of data points with a minute interval.\nGet operation Get(Read) operation gets a measure\u0026rsquo;s schema.\nExamples of getting $ bydbctl measure get -g sw_metric -n service_cpm_minute Update operation Update operation changes a measure\u0026rsquo;s schema.\nExamples of updating $ bydbctl measure update -f - \u0026lt;\u0026lt;EOF metadata: name: service_cpm_minute group: sw_metric tagFamilies: - name: searchable tags: - name: trace_id type: TAG_TYPE_STRING entity: tag_names: - entity_id EOF Delete operation Delete operation removes a measure\u0026rsquo;s schema.\nExamples of deleting $ bydbctl measure delete -g sw_metric -n service_cpm_minute List operation The list operation shows all measures' schema in a group.\nExamples of listing $ bydbctl measure list -g sw_metric API Reference MeasureService v1\n","title":"CRUD Measures","url":"/docs/skywalking-banyandb/next/crud/measure/schema/"},{"content":"CRUD Measures CRUD operations create, read, update and delete measures.\nbydbctl is the command line tool in examples.\nCreate operation Create operation adds a new measure to the database\u0026rsquo;s metadata registry repository. If the measure does not currently exist, create operation will create the schema.\nExamples of creating A measure belongs to a unique group. We should create such a group with a catalog CATALOG_MEASURE before creating a measure.\n$ bydbctl group create -f - \u0026lt;\u0026lt;EOF metadata: name: sw_metric catalog: CATALOG_MEASURE resource_opts: shard_num: 2 block_interval: unit: UNIT_HOUR num: 2 segment_interval: unit: UNIT_DAY num: 1 ttl: unit: UNIT_DAY num: 7 EOF The group creates two shards to store data points. Every day, it would create a segment that will generate a block every 2 hours.\nThe data in this group will keep 7 days.\nThen, the below command will create a new measure:\n$ bydbctl measure create -f - \u0026lt;\u0026lt;EOF metadata: name: service_cpm_minute group: sw_metric tag_families: - name: default tags: - name: id type: TAG_TYPE_STRING - name: entity_id type: TAG_TYPE_STRING fields: - name: total field_type: FIELD_TYPE_INT encoding_method: ENCODING_METHOD_GORILLA compression_method: COMPRESSION_METHOD_ZSTD - name: value field_type: FIELD_TYPE_INT encoding_method: ENCODING_METHOD_GORILLA compression_method: COMPRESSION_METHOD_ZSTD entity: tag_names: - entity_id interval: 1m EOF service_cpm_minute expects to ingest a series of data points with a minute interval.\nGet operation Get(Read) operation gets a measure\u0026rsquo;s schema.\nExamples of getting $ bydbctl measure get -g sw_metric -n service_cpm_minute Update operation Update operation changes a measure\u0026rsquo;s schema.\nExamples of updating $ bydbctl measure update -f - \u0026lt;\u0026lt;EOF metadata: name: service_cpm_minute group: sw_metric tagFamilies: - name: searchable tags: - name: trace_id type: TAG_TYPE_STRING entity: tag_names: - entity_id EOF Delete operation Delete operation removes a measure\u0026rsquo;s schema.\nExamples of deleting $ bydbctl measure delete -g sw_metric -n service_cpm_minute List operation The list operation shows all measures' schema in a group.\nExamples of listing $ bydbctl measure list -g sw_metric API Reference MeasureService v1\n","title":"CRUD Measures","url":"/docs/skywalking-banyandb/v0.5.0/crud/measure/schema/"},{"content":"CRUD Property CRUD operations create/update, read and delete property.\nProperty stores the user defined data.\nbydbctl is the command line tool in examples.\nApply (Create/Update) operation Apply creates a property if it\u0026rsquo;s absent, or updates an existed one based on a strategy. If the property does not currently exist, create operation will create the property.\nExamples of applying A property belongs to a unique group. We should create such a group before creating a property.\nThe group\u0026rsquo;s catalog should be empty.\n$ bydbctl group create -f - \u0026lt;\u0026lt;EOF metadata: name: sw EOF Then, below command will create a new property:\n$ bydbctl property apply -f - \u0026lt;\u0026lt;EOF metadata: container: group: sw name: temp_data id: General-Service tags: - key: name value: str: value: \u0026#34;hello\u0026#34; - key: state value: str: value: \u0026#34;succeed\u0026#34; EOF The operation supports updating partial tags.\n$ bydbctl property apply -f - \u0026lt;\u0026lt;EOF metadata: container: group: sw name: temp_data id: General-Service tags: - key: state value: str: value: \u0026#34;failed\u0026#34; EOF TTL is supported in the operation.\n$ bydbctl property apply -f - \u0026lt;\u0026lt;EOF metadata: container: group: sw name: temp_data id: General-Service tags: - key: state value: str: value: \u0026#34;failed\u0026#34; ttl: \u0026#34;1h\u0026#34; Get operation Get operation gets a property.\nExamples of getting $ bydbctl property get -g sw -n temp_data --id General-Service The operation could filter data by tags.\n$ bydbctl property get -g sw -n temp_data --id General-Service --tags state Delete operation Delete operation delete a property.\nExamples of deleting $ bydbctl property delete -g sw -n temp_data --id General-Service The delete operation could remove specific tags instead of the whole property.\n$ bydbctl property delete -g sw -n temp_data --id General-Service --tags state List operation List operation lists all properties in a group.\nExamples of listing in a group $ bydbctl property list -g sw List operation lists all properties in a group with a name.\nExamples of listing in a group with a name $ bydbctl property list -g sw -n temp_data TTL field in a property TTL field in a property is used to set the time to live of the property. The property will be deleted automatically after the TTL.\nThis functionality is supported by the lease mechanism. The readonly lease_id field is used to identify the lease of the property.\nExamples of setting TTL $ bydbctl property apply -f - \u0026lt;\u0026lt;EOF metadata: container: group: sw name: temp_data id: General-Service tags: - key: state value: str: value: \u0026#34;failed\u0026#34; ttl: \u0026#34;1h\u0026#34; EOF The lease_id is returned in the response. You can use get operation to get the property with the lease_id as well.\n$ bydbctl property get -g sw -n temp_data --id General-Service The lease_id is used to keep the property alive. You can use keepalive operation to keep the property alive. When the keepalive operation is called, the property\u0026rsquo;s TTL will be reset to the original value.\n$ bydbctl property keepalive --lease_id 1 API Reference MeasureService v1\n","title":"CRUD Property","url":"/docs/skywalking-banyandb/latest/crud/property/"},{"content":"CRUD Property CRUD operations create/update, read and delete property.\nProperty stores the user defined data.\nbydbctl is the command line tool in examples.\nApply (Create/Update) operation Apply creates a property if it\u0026rsquo;s absent, or updates an existed one based on a strategy. If the property does not currently exist, create operation will create the property.\nExamples of applying A property belongs to a unique group. We should create such a group before creating a property.\nThe group\u0026rsquo;s catalog should be empty.\n$ bydbctl group create -f - \u0026lt;\u0026lt;EOF metadata: name: sw EOF Then, below command will create a new property:\n$ bydbctl property apply -f - \u0026lt;\u0026lt;EOF metadata: container: group: sw name: temp_data id: General-Service tags: - key: name value: str: value: \u0026#34;hello\u0026#34; - key: state value: str: value: \u0026#34;succeed\u0026#34; EOF The operation supports updating partial tags.\n$ bydbctl property apply -f - \u0026lt;\u0026lt;EOF metadata: container: group: sw name: temp_data id: General-Service tags: - key: state value: str: value: \u0026#34;failed\u0026#34; EOF TTL is supported in the operation.\n$ bydbctl property apply -f - \u0026lt;\u0026lt;EOF metadata: container: group: sw name: temp_data id: General-Service tags: - key: state value: str: value: \u0026#34;failed\u0026#34; ttl: \u0026#34;1h\u0026#34; Get operation Get operation gets a property.\nExamples of getting $ bydbctl property get -g sw -n temp_data --id General-Service The operation could filter data by tags.\n$ bydbctl property get -g sw -n temp_data --id General-Service --tags state Delete operation Delete operation delete a property.\nExamples of deleting $ bydbctl property delete -g sw -n temp_data --id General-Service The delete operation could remove specific tags instead of the whole property.\n$ bydbctl property delete -g sw -n temp_data --id General-Service --tags state List operation List operation lists all properties in a group.\nExamples of listing in a group $ bydbctl property list -g sw List operation lists all properties in a group with a name.\nExamples of listing in a group with a name $ bydbctl property list -g sw -n temp_data TTL field in a property TTL field in a property is used to set the time to live of the property. The property will be deleted automatically after the TTL.\nThis functionality is supported by the lease mechanism. The readonly lease_id field is used to identify the lease of the property.\nExamples of setting TTL $ bydbctl property apply -f - \u0026lt;\u0026lt;EOF metadata: container: group: sw name: temp_data id: General-Service tags: - key: state value: str: value: \u0026#34;failed\u0026#34; ttl: \u0026#34;1h\u0026#34; EOF The lease_id is returned in the response. You can use get operation to get the property with the lease_id as well.\n$ bydbctl property get -g sw -n temp_data --id General-Service The lease_id is used to keep the property alive. You can use keepalive operation to keep the property alive. When the keepalive operation is called, the property\u0026rsquo;s TTL will be reset to the original value.\n$ bydbctl property keepalive --lease_id 1 API Reference MeasureService v1\n","title":"CRUD Property","url":"/docs/skywalking-banyandb/next/crud/property/"},{"content":"CRUD Property CRUD operations create/update, read and delete property.\nProperty stores the user defined data.\nbydbctl is the command line tool in examples.\nApply (Create/Update) operation Apply creates a property if it\u0026rsquo;s absent, or updates an existed one based on a strategy. If the property does not currently exist, create operation will create the property.\nExamples of applying A property belongs to a unique group. We should create such a group before creating a property.\nThe group\u0026rsquo;s catalog should be empty.\n$ bydbctl group create -f - \u0026lt;\u0026lt;EOF metadata: name: sw EOF Then, below command will create a new property:\n$ bydbctl property apply -f - \u0026lt;\u0026lt;EOF metadata: container: group: sw name: temp_data id: General-Service tags: - key: name value: str: value: \u0026#34;hello\u0026#34; - key: state value: str: value: \u0026#34;succeed\u0026#34; EOF The operation supports updating partial tags.\n$ bydbctl property apply -f - \u0026lt;\u0026lt;EOF metadata: container: group: sw name: temp_data id: General-Service tags: - key: state value: str: value: \u0026#34;failed\u0026#34; EOF TTL is supported in the operation.\n$ bydbctl property apply -f - \u0026lt;\u0026lt;EOF metadata: container: group: sw name: temp_data id: General-Service tags: - key: state value: str: value: \u0026#34;failed\u0026#34; ttl: \u0026#34;1h\u0026#34; Get operation Get operation gets a property.\nExamples of getting $ bydbctl property get -g sw -n temp_data --id General-Service The operation could filter data by tags.\n$ bydbctl property get -g sw -n temp_data --id General-Service --tags state Delete operation Delete operation delete a property.\nExamples of deleting $ bydbctl property delete -g sw -n temp_data --id General-Service The delete operation could remove specific tags instead of the whole property.\n$ bydbctl property delete -g sw -n temp_data --id General-Service --tags state List operation List operation lists all properties in a group.\nExamples of listing in a group $ bydbctl property list -g sw List operation lists all properties in a group with a name.\nExamples of listing in a group with a name $ bydbctl property list -g sw -n temp_data TTL field in a property TTL field in a property is used to set the time to live of the property. The property will be deleted automatically after the TTL.\nThis functionality is supported by the lease mechanism. The readonly lease_id field is used to identify the lease of the property.\nExamples of setting TTL $ bydbctl property apply -f - \u0026lt;\u0026lt;EOF metadata: container: group: sw name: temp_data id: General-Service tags: - key: state value: str: value: \u0026#34;failed\u0026#34; ttl: \u0026#34;1h\u0026#34; EOF The lease_id is returned in the response. You can use get operation to get the property with the lease_id as well.\n$ bydbctl property get -g sw -n temp_data --id General-Service The lease_id is used to keep the property alive. You can use keepalive operation to keep the property alive. When the keepalive operation is called, the property\u0026rsquo;s TTL will be reset to the original value.\n$ bydbctl property keepalive --lease_id 1 API Reference MeasureService v1\n","title":"CRUD Property","url":"/docs/skywalking-banyandb/v0.5.0/crud/property/"},{"content":"CRUD Streams CRUD operations create, read, update and delete streams.\nbydbctl is the command line tool in examples.\nStream intends to store streaming data, for example, traces or logs.\nCreate operation Create operation adds a new stream to the database\u0026rsquo;s metadata registry repository. If the stream does not currently exist, create operation will create the schema.\nExamples of creating A stream belongs to a unique group. We should create such a group with a catalog CATALOG_STREAM before creating a stream.\n$ bydbctl group create -f - \u0026lt;\u0026lt;EOF metadata: name: default catalog: CATALOG_STREAM resource_opts: shard_num: 2 block_interval: unit: UNIT_HOUR num: 2 segment_interval: unit: UNIT_DAY num: 1 ttl: unit: UNIT_DAY num: 7 EOF The group creates two shards to store stream data points. Every one day, it would create a segment which will generate a block every 2 hours.\nThe data in this group will keep 7 days.\nThen, below command will create a new stream:\n$ bydbctl stream create -f - \u0026lt;\u0026lt;EOF metadata: name: sw group: default tagFamilies: - name: searchable tags: - name: trace_id type: TAG_TYPE_STRING entity: tagNames: - stream_id EOF Get operation Get(Read) operation get a stream\u0026rsquo;s schema.\nExamples of getting $ bydbctl stream get -g default -n sw Update operation Update operation update a stream\u0026rsquo;s schema.\nExamples of updating bydbctl is the command line tool to update a stream in this example.\n$ bydbctl stream update -f - \u0026lt;\u0026lt;EOF metadata: name: sw group: default tagFamilies: - name: searchable tags: - name: trace_id type: TAG_TYPE_STRING entity: tagNames: - stream_id EOF Delete operation Delete operation delete a stream\u0026rsquo;s schema.\nExamples of deleting bydbctl is the command line tool to delete a stream in this example.\n$ bydbctl stream delete -g default -n sw List operation List operation list all streams' schema in a group.\nExamples of listing $ bydbctl stream list -g default API Reference StreamService v1\n","title":"CRUD Streams","url":"/docs/skywalking-banyandb/latest/crud/stream/schema/"},{"content":"CRUD Streams CRUD operations create, read, update and delete streams.\nbydbctl is the command line tool in examples.\nStream intends to store streaming data, for example, traces or logs.\nCreate operation Create operation adds a new stream to the database\u0026rsquo;s metadata registry repository. If the stream does not currently exist, create operation will create the schema.\nExamples of creating A stream belongs to a unique group. We should create such a group with a catalog CATALOG_STREAM before creating a stream.\n$ bydbctl group create -f - \u0026lt;\u0026lt;EOF metadata: name: default catalog: CATALOG_STREAM resource_opts: shard_num: 2 segment_interval: unit: UNIT_DAY num: 1 ttl: unit: UNIT_DAY num: 7 EOF The group creates two shards to store stream data points. Every one day, it would create a segment which will generate a block every 2 hours.\nThe data in this group will keep 7 days.\nThen, below command will create a new stream:\n$ bydbctl stream create -f - \u0026lt;\u0026lt;EOF metadata: name: sw group: default tagFamilies: - name: searchable tags: - name: trace_id type: TAG_TYPE_STRING entity: tagNames: - stream_id EOF Get operation Get(Read) operation get a stream\u0026rsquo;s schema.\nExamples of getting $ bydbctl stream get -g default -n sw Update operation Update operation update a stream\u0026rsquo;s schema.\nExamples of updating bydbctl is the command line tool to update a stream in this example.\n$ bydbctl stream update -f - \u0026lt;\u0026lt;EOF metadata: name: sw group: default tagFamilies: - name: searchable tags: - name: trace_id type: TAG_TYPE_STRING entity: tagNames: - stream_id EOF Delete operation Delete operation delete a stream\u0026rsquo;s schema.\nExamples of deleting bydbctl is the command line tool to delete a stream in this example.\n$ bydbctl stream delete -g default -n sw List operation List operation list all streams' schema in a group.\nExamples of listing $ bydbctl stream list -g default API Reference StreamService v1\n","title":"CRUD Streams","url":"/docs/skywalking-banyandb/next/crud/stream/schema/"},{"content":"CRUD Streams CRUD operations create, read, update and delete streams.\nbydbctl is the command line tool in examples.\nStream intends to store streaming data, for example, traces or logs.\nCreate operation Create operation adds a new stream to the database\u0026rsquo;s metadata registry repository. If the stream does not currently exist, create operation will create the schema.\nExamples of creating A stream belongs to a unique group. We should create such a group with a catalog CATALOG_STREAM before creating a stream.\n$ bydbctl group create -f - \u0026lt;\u0026lt;EOF metadata: name: default catalog: CATALOG_STREAM resource_opts: shard_num: 2 block_interval: unit: UNIT_HOUR num: 2 segment_interval: unit: UNIT_DAY num: 1 ttl: unit: UNIT_DAY num: 7 EOF The group creates two shards to store stream data points. Every one day, it would create a segment which will generate a block every 2 hours.\nThe data in this group will keep 7 days.\nThen, below command will create a new stream:\n$ bydbctl stream create -f - \u0026lt;\u0026lt;EOF metadata: name: sw group: default tagFamilies: - name: searchable tags: - name: trace_id type: TAG_TYPE_STRING entity: tagNames: - stream_id EOF Get operation Get(Read) operation get a stream\u0026rsquo;s schema.\nExamples of getting $ bydbctl stream get -g default -n sw Update operation Update operation update a stream\u0026rsquo;s schema.\nExamples of updating bydbctl is the command line tool to update a stream in this example.\n$ bydbctl stream update -f - \u0026lt;\u0026lt;EOF metadata: name: sw group: default tagFamilies: - name: searchable tags: - name: trace_id type: TAG_TYPE_STRING entity: tagNames: - stream_id EOF Delete operation Delete operation delete a stream\u0026rsquo;s schema.\nExamples of deleting bydbctl is the command line tool to delete a stream in this example.\n$ bydbctl stream delete -g default -n sw List operation List operation list all streams' schema in a group.\nExamples of listing $ bydbctl stream list -g default API Reference StreamService v1\n","title":"CRUD Streams","url":"/docs/skywalking-banyandb/v0.5.0/crud/stream/schema/"},{"content":"Custom metrics Adapter This adapter contains an implementation of external metrics API. It is therefore suitable for use with the autoscaling/v2 Horizontal Pod Autoscaler in Kubernetes 1.9+.\nUse kustomize to customise your deployment  Clone the source code:  git clone git@github.com:apache/skywalking-swck.git  Edit file adapter/config/adapter/kustomization.yaml file to change your preferences. If you prefer to your private docker image, a quick path to override ADAPTER_IMG environment variable : export ADAPTER_IMG=\u0026lt;private registry\u0026gt;/metrics-adapter:\u0026lt;tag\u0026gt;\n  Use make to generate the final manifests and deploy:\n  make -C adapter deploy Configuration The adapter takes the standard Kubernetes generic API server arguments (including those for authentication and authorization). By default, it will attempt to using Kubernetes in-cluster config to connect to the cluster.\nIt takes the following addition arguments specific to configuring how the adapter talks to SkyWalking OAP cluster:\n --oap-addr The address of OAP cluster. --metric-filter-regex A regular expression to filter metrics retrieved from OAP cluster. --refresh-interval This is the interval at which to update the cache of available metrics from OAP cluster. --namespace A prefix to which metrics are appended. The format is \u0026lsquo;namespace|metric_name\u0026rsquo;, defaults to skywalking.apache.org  HPA Configuration External metrics allow you to autoscale your cluster based on any metric available in OAP cluster. Just provide a metric block with a name and selector, and use the External metric type.\n- type:Externalexternal:metric:name:\u0026lt;metric_name\u0026gt;selector:matchLabels:\u0026lt;label_key\u0026gt;:\u0026lt;label_value\u0026gt;...target:.... metric_name: The name of metric generated by OAL or other subsystem. label: label_key is the entity name of skywalking metrics. if the label value contains special characters more than ., - and _, service.str.\u0026lt;number\u0026gt; represent the literal of label value, and service.byte.\u0026lt;number\u0026gt; could encode these special characters to hex bytes.  Supposing the service name is v1|productpage|bookinfo|demo, the matchLabels should be like the below piece:\nmatchLabels:\u0026#34;service.str.0\u0026#34;: \u0026#34;v1\u0026#34;\u0026#34;service.byte.1\u0026#34;: \u0026#34;7c\u0026#34;// the hex byte of \u0026#34;|\u0026#34;\u0026#34;service.str.2\u0026#34;: \u0026#34;productpage\u0026#34;\u0026#34;service.byte.3\u0026#34;: \u0026#34;7c\u0026#34;\u0026#34;service.str.4\u0026#34;: \u0026#34;bookinfo\u0026#34;\u0026#34;service.byte.5\u0026#34;: \u0026#34;7c\u0026#34;\u0026#34;service.str.6\u0026#34;: \u0026#34;demo\u0026#34; Caveats: byte label only accept a single character. That means || should be transformed to service.byte.0:\u0026quot;7c\u0026quot; and service.byte.1:\u0026quot;7c\u0026quot; instead of service.byte.0:\u0026quot;7c7c\u0026quot;\n The options of label keys are:\n service, service.str.\u0026lt;number\u0026gt; or service.byte.\u0026lt;number\u0026gt; The name of the service. instance, instance.str.\u0026lt;number\u0026gt; or instance.byte.\u0026lt;number\u0026gt; The name of the service instance. endpoint, endpoint.str.\u0026lt;number\u0026gt; or endpoint.byte.\u0026lt;number\u0026gt; The name of the endpoint. label, label.str.\u0026lt;number\u0026gt; or label.byte.\u0026lt;number\u0026gt; is optional, The labels you need to query, used for querying multi-labels metrics. Unlike swctl, this key only supports a single label due to the specification of the custom metrics API.  For example, if your application name is front_gateway, you could add the following section to your HorizontalPodAutoscaler manifest to specify that you need less than 80ms of 90th latency.\n- type:Externalexternal:metric:name:skywalking.apache.org|service_percentileselector:matchLabels:service:front_gateway# The index of [P50, P75, P90, P95, P99]. 2 is the index of P90(90%)label:\u0026#34;2\u0026#34;target:type:Valuevalue:80If the service is v1|productpage|bookinfo|demo|-:\n- type:Externalexternal:metric:name:skywalking.apache.org|service_cpmselector:matchLabels:\u0026#34;service.str.0\u0026#34;: \u0026#34;v1\u0026#34;\u0026#34;service.byte.1\u0026#34;: \u0026#34;7c\u0026#34;\u0026#34;service.str.2\u0026#34;: \u0026#34;productpage\u0026#34;\u0026#34;service.byte.3\u0026#34;: \u0026#34;7c\u0026#34;\u0026#34;service.str.4\u0026#34;: \u0026#34;bookinfo\u0026#34;\u0026#34;service.byte.5\u0026#34;: \u0026#34;7c\u0026#34;\u0026#34;service.str.6\u0026#34;: \u0026#34;demo\u0026#34;\u0026#34;service.byte.7\u0026#34;: \u0026#34;7c\u0026#34;\u0026#34;service.byte.8\u0026#34;: \u0026#34;2d\u0026#34;target:type:Valuevalue:80","title":"Custom metrics Adapter","url":"/docs/skywalking-swck/latest/custom-metrics-adapter/"},{"content":"Custom metrics Adapter This adapter contains an implementation of external metrics API. It is therefore suitable for use with the autoscaling/v2 Horizontal Pod Autoscaler in Kubernetes 1.9+.\nUse kustomize to customise your deployment  Clone the source code:  git clone git@github.com:apache/skywalking-swck.git  Edit file adapter/config/adapter/kustomization.yaml file to change your preferences. If you prefer to your private docker image, a quick path to override ADAPTER_IMG environment variable : export ADAPTER_IMG=\u0026lt;private registry\u0026gt;/metrics-adapter:\u0026lt;tag\u0026gt;\n  Use make to generate the final manifests and deploy:\n  make -C adapter deploy Configuration The adapter takes the standard Kubernetes generic API server arguments (including those for authentication and authorization). By default, it will attempt to using Kubernetes in-cluster config to connect to the cluster.\nIt takes the following addition arguments specific to configuring how the adapter talks to SkyWalking OAP cluster:\n --oap-addr The address of OAP cluster. --metric-filter-regex A regular expression to filter metrics retrieved from OAP cluster. --refresh-interval This is the interval at which to update the cache of available metrics from OAP cluster. --namespace A prefix to which metrics are appended. The format is \u0026lsquo;namespace|metric_name\u0026rsquo;, defaults to skywalking.apache.org  HPA Configuration External metrics allow you to autoscale your cluster based on any metric available in OAP cluster. Just provide a metric block with a name and selector, and use the External metric type.\n- type:Externalexternal:metric:name:\u0026lt;metric_name\u0026gt;selector:matchLabels:\u0026lt;label_key\u0026gt;:\u0026lt;label_value\u0026gt;...target:.... metric_name: The name of metric generated by OAL or other subsystem. label: label_key is the entity name of skywalking metrics. if the label value contains special characters more than ., - and _, service.str.\u0026lt;number\u0026gt; represent the literal of label value, and service.byte.\u0026lt;number\u0026gt; could encode these special characters to hex bytes.  Supposing the service name is v1|productpage|bookinfo|demo, the matchLabels should be like the below piece:\nmatchLabels:\u0026#34;service.str.0\u0026#34;: \u0026#34;v1\u0026#34;\u0026#34;service.byte.1\u0026#34;: \u0026#34;7c\u0026#34;// the hex byte of \u0026#34;|\u0026#34;\u0026#34;service.str.2\u0026#34;: \u0026#34;productpage\u0026#34;\u0026#34;service.byte.3\u0026#34;: \u0026#34;7c\u0026#34;\u0026#34;service.str.4\u0026#34;: \u0026#34;bookinfo\u0026#34;\u0026#34;service.byte.5\u0026#34;: \u0026#34;7c\u0026#34;\u0026#34;service.str.6\u0026#34;: \u0026#34;demo\u0026#34; Caveats: byte label only accept a single character. That means || should be transformed to service.byte.0:\u0026quot;7c\u0026quot; and service.byte.1:\u0026quot;7c\u0026quot; instead of service.byte.0:\u0026quot;7c7c\u0026quot;\n The options of label keys are:\n service, service.str.\u0026lt;number\u0026gt; or service.byte.\u0026lt;number\u0026gt; The name of the service. instance, instance.str.\u0026lt;number\u0026gt; or instance.byte.\u0026lt;number\u0026gt; The name of the service instance. endpoint, endpoint.str.\u0026lt;number\u0026gt; or endpoint.byte.\u0026lt;number\u0026gt; The name of the endpoint. label, label.str.\u0026lt;number\u0026gt; or label.byte.\u0026lt;number\u0026gt; is optional, The labels you need to query, used for querying multi-labels metrics. Unlike swctl, this key only supports a single label due to the specification of the custom metrics API.  For example, if your application name is front_gateway, you could add the following section to your HorizontalPodAutoscaler manifest to specify that you need less than 80ms of 90th latency.\n- type:Externalexternal:metric:name:skywalking.apache.org|service_percentileselector:matchLabels:service:front_gateway# The index of [P50, P75, P90, P95, P99]. 2 is the index of P90(90%)label:\u0026#34;2\u0026#34;target:type:Valuevalue:80If the service is v1|productpage|bookinfo|demo|-:\n- type:Externalexternal:metric:name:skywalking.apache.org|service_cpmselector:matchLabels:\u0026#34;service.str.0\u0026#34;: \u0026#34;v1\u0026#34;\u0026#34;service.byte.1\u0026#34;: \u0026#34;7c\u0026#34;\u0026#34;service.str.2\u0026#34;: \u0026#34;productpage\u0026#34;\u0026#34;service.byte.3\u0026#34;: \u0026#34;7c\u0026#34;\u0026#34;service.str.4\u0026#34;: \u0026#34;bookinfo\u0026#34;\u0026#34;service.byte.5\u0026#34;: \u0026#34;7c\u0026#34;\u0026#34;service.str.6\u0026#34;: \u0026#34;demo\u0026#34;\u0026#34;service.byte.7\u0026#34;: \u0026#34;7c\u0026#34;\u0026#34;service.byte.8\u0026#34;: \u0026#34;2d\u0026#34;target:type:Valuevalue:80","title":"Custom metrics Adapter","url":"/docs/skywalking-swck/next/custom-metrics-adapter/"},{"content":"Custom metrics Adapter This adapter contains an implementation of external metrics API. It is therefore suitable for use with the autoscaling/v2 Horizontal Pod Autoscaler in Kubernetes 1.9+.\nUse kustomize to customise your deployment  Clone the source code:  git clone git@github.com:apache/skywalking-swck.git  Edit file adapter/config/adapter/kustomization.yaml file to change your preferences. If you prefer to your private docker image, a quick path to override ADAPTER_IMG environment variable : export ADAPTER_IMG=\u0026lt;private registry\u0026gt;/metrics-adapter:\u0026lt;tag\u0026gt;\n  Use make to generate the final manifests and deploy:\n  make -C adapter deploy Configuration The adapter takes the standard Kubernetes generic API server arguments (including those for authentication and authorization). By default, it will attempt to using Kubernetes in-cluster config to connect to the cluster.\nIt takes the following addition arguments specific to configuring how the adapter talks to SkyWalking OAP cluster:\n --oap-addr The address of OAP cluster. --metric-filter-regex A regular expression to filter metrics retrieved from OAP cluster. --refresh-interval This is the interval at which to update the cache of available metrics from OAP cluster. --namespace A prefix to which metrics are appended. The format is \u0026lsquo;namespace|metric_name\u0026rsquo;, defaults to skywalking.apache.org  HPA Configuration External metrics allow you to autoscale your cluster based on any metric available in OAP cluster. Just provide a metric block with a name and selector, and use the External metric type.\n- type:Externalexternal:metric:name:\u0026lt;metric_name\u0026gt;selector:matchLabels:\u0026lt;label_key\u0026gt;:\u0026lt;label_value\u0026gt;...target:.... metric_name: The name of metric generated by OAL or other subsystem. label: label_key is the entity name of skywalking metrics. if the label value contains special characters more than ., - and _, service.str.\u0026lt;number\u0026gt; represent the literal of label value, and service.byte.\u0026lt;number\u0026gt; could encode these special characters to hex bytes.  Supposing the service name is v1|productpage|bookinfo|demo, the matchLabels should be like the below piece:\nmatchLabels:\u0026#34;service.str.0\u0026#34;: \u0026#34;v1\u0026#34;\u0026#34;service.byte.1\u0026#34;: \u0026#34;7c\u0026#34;// the hex byte of \u0026#34;|\u0026#34;\u0026#34;service.str.2\u0026#34;: \u0026#34;productpage\u0026#34;\u0026#34;service.byte.3\u0026#34;: \u0026#34;7c\u0026#34;\u0026#34;service.str.4\u0026#34;: \u0026#34;bookinfo\u0026#34;\u0026#34;service.byte.5\u0026#34;: \u0026#34;7c\u0026#34;\u0026#34;service.str.6\u0026#34;: \u0026#34;demo\u0026#34; Caveats: byte label only accept a single character. That means || should be transformed to service.byte.0:\u0026quot;7c\u0026quot; and service.byte.1:\u0026quot;7c\u0026quot; instead of service.byte.0:\u0026quot;7c7c\u0026quot;\n The options of label keys are:\n service, service.str.\u0026lt;number\u0026gt; or service.byte.\u0026lt;number\u0026gt; The name of the service. instance, instance.str.\u0026lt;number\u0026gt; or instance.byte.\u0026lt;number\u0026gt; The name of the service instance. endpoint, endpoint.str.\u0026lt;number\u0026gt; or endpoint.byte.\u0026lt;number\u0026gt; The name of the endpoint. label, label.str.\u0026lt;number\u0026gt; or label.byte.\u0026lt;number\u0026gt; is optional, The labels you need to query, used for querying multi-labels metrics. Unlike swctl, this key only supports a single label due to the specification of the custom metrics API.  For example, if your application name is front_gateway, you could add the following section to your HorizontalPodAutoscaler manifest to specify that you need less than 80ms of 90th latency.\n- type:Externalexternal:metric:name:skywalking.apache.org|service_percentileselector:matchLabels:service:front_gateway# The index of [P50, P75, P90, P95, P99]. 2 is the index of P90(90%)label:\u0026#34;2\u0026#34;target:type:Valuevalue:80If the service is v1|productpage|bookinfo|demo|-:\n- type:Externalexternal:metric:name:skywalking.apache.org|service_cpmselector:matchLabels:\u0026#34;service.str.0\u0026#34;: \u0026#34;v1\u0026#34;\u0026#34;service.byte.1\u0026#34;: \u0026#34;7c\u0026#34;\u0026#34;service.str.2\u0026#34;: \u0026#34;productpage\u0026#34;\u0026#34;service.byte.3\u0026#34;: \u0026#34;7c\u0026#34;\u0026#34;service.str.4\u0026#34;: \u0026#34;bookinfo\u0026#34;\u0026#34;service.byte.5\u0026#34;: \u0026#34;7c\u0026#34;\u0026#34;service.str.6\u0026#34;: \u0026#34;demo\u0026#34;\u0026#34;service.byte.7\u0026#34;: \u0026#34;7c\u0026#34;\u0026#34;service.byte.8\u0026#34;: \u0026#34;2d\u0026#34;target:type:Valuevalue:80","title":"Custom metrics Adapter","url":"/docs/skywalking-swck/v0.9.0/custom-metrics-adapter/"},{"content":"Data Model This chapter introduces BanyanDB\u0026rsquo;s data models and covers the following:\n the high-level data organization data model data retrieval  You can also find examples of how to interact with BanyanDB using bydbctl, how to create and drop groups, or how to create, read, update and drop streams/measures.\nStructure of BanyanDB The hierarchy that data is organized into streams, measures and properties in groups.\nGroups Group does not provide a mechanism for isolating groups of resources within a single banyand-server but is the minimal unit to manage physical structures. Each group contains a set of options, like retention policy, shard number, etc. Several shards distribute in a group.\nmetadata:name:othersor\nmetadata:name:sw_metriccatalog:CATALOG_MEASUREresource_opts:shard_num:2block_interval:unit:UNIT_HOURnum:2segment_interval:unit:UNIT_DAYnum:1ttl:unit:UNIT_DAYnum:7The group creates two shards to store data points. Every day, it would create a segment that will generate a block every 2 hours. The available units are HOUR and DAY. The data in this group will keep 7 days.\nEvery other resource should belong to a group. The catalog indicates which kind of data model the group contains.\n UNSPECIFIED: Property or other data models. MEASURE: Measure. STREAM: Stream.  Group Registration Operations\nMeasures BanyanDB lets you define a measure as follows:\nmetadata:name:service_cpm_minutegroup:sw_metrictag_families:- name:defaulttags:- name:idtype:TAG_TYPE_STRING- name:entity_idtype:TAG_TYPE_STRINGfields:- name:totalfield_type:FIELD_TYPE_INTencoding_method:ENCODING_METHOD_GORILLAcompression_method:COMPRESSION_METHOD_ZSTD- name:valuefield_type:FIELD_TYPE_INTencoding_method:ENCODING_METHOD_GORILLAcompression_method:COMPRESSION_METHOD_ZSTDentity:tag_names:- entity_idinterval:1mMeasure consists of a sequence of data points. Each data point contains tags and fields.\nTags are key-value pairs. The database engine can index tag values by referring to the index rules and rule bindings, confining the query to filtering data points based on tags bound to an index rule.\nTags are grouped into unique tag_families which are the logical and physical grouping of tags.\nMeasure supports the following tag types:\n STRING : Text INT : 64 bits long integer STRING_ARRAY : A group of strings INT_ARRAY : A group of integers DATA_BINARY : Raw binary  A group of selected tags composite an entity that points out a specific time series the data point belongs to. The database engine has capacities to encode and compress values in the same time series. Users should select appropriate tag combinations to optimize the data size. Another role of entity is the sharding key of data points, determining how to fragment data between shards.\nFields are also key-value pairs like tags. But the value of each field is the actual value of a single data point. The database engine would encode and compress the field\u0026rsquo;s values in the same time series. The query operation is forbidden to filter data points based on a field\u0026rsquo;s value. You could apply aggregation functions to them.\nMeasure supports the following fields types:\n STRING : Text INT : 64 bits long integer DATA_BINARY : Raw binary FLOAT : 64 bits double-precision floating-point number  Measure supports the following encoding methods:\n GORILLA : GORILLA encoding is lossless. It is more suitable for a numerical sequence with similar values and is not recommended for sequence data with large fluctuations.  Measure supports the types of the following fields:\n ZSTD : Zstandard is a real-time compression algorithm, that provides high compression ratios. It offers a very wide range of compression/speed trade-offs, while being backed by a very fast decoder. For BanyanDB focus on speed.  Another option named interval plays a critical role in encoding. It indicates the time range between two adjacent data points in a time series and implies that all data points belonging to the same time series are distributed based on a fixed interval. A better practice for the naming measure is to append the interval literal to the tail, for example, service_cpm_minute. It\u0026rsquo;s a parameter of GORILLA encoding method.\nMeasure Registration Operations\nTopNAggregation Find the Top-N entities from a dataset in a time range is a common scenario. We could see the diagrams like \u0026ldquo;Top 10 throughput endpoints\u0026rdquo;, and \u0026ldquo;Most slow 20 endpoints\u0026rdquo;, etc on SkyWalking\u0026rsquo;s UI. Exploring and analyzing the top entities can always reveal some high-value information.\nBanyanDB introduces the TopNAggregation, aiming to pre-calculate the top/bottom entities during the measure writing phase. In the query phase, BanyanDB can quickly retrieve the top/bottom records. The performance would be much better than top() function which is based on the query phase aggregation procedure.\n Caveat: TopNAggregation is an approximate realization, to use it well you need have a good understanding with the algorithm as well as the data distribution.\n ---metadata:name:endpoint_cpm_minute_top_bottomgroup:sw_metricsource_measure:name:endpoint_cpm_minutegroup:sw_metricfield_name:valuefield_value_sort:SORT_UNSPECIFIEDgroup_by_tag_names:- entity_idcounters_number:10000lru_size:10endpoint_cpm_minute_top_bottom is watching the data ingesting of the source measure endpoint_cpm_minute to generate both top 1000 and bottom 1000 entity cardinalities. If only Top 1000 or Bottom 1000 is needed, the field_value_sort could be DESC or ASC respectively.\n SORT_DESC: Top-N. In a series of 1,2,3...1000. Top10\u0026rsquo;s result is 1000,999...991. SORT_ASC: Bottom-N. In a series of 1,2,3...1000. Bottom10\u0026rsquo;s result is 1,2...10.  Tags in group_by_tag_names are used as dimensions. These tags can be searched (only equality is supported) in the query phase. Tags do not exist in group_by_tag_names will be dropped in the pre-calculating phase.\ncounters_number denotes the number of entity cardinality. As the above example shows, calculating the Top 100 among 10 thousands is easier than among 10 millions.\nlru_size is a late data optimizing flag. The higher the number, the more late data, but the more memory space is consumed.\nTopNAggregation Registration Operations\nStreams Stream shares many details with Measure except for abandoning field. Stream focuses on high throughput data collection, for example, tracing and logging. The database engine also supports compressing stream entries based on entity, but no encoding process is involved.\nStream Registration Operations\nProperties Property is a schema-less or schema-free data model. That means you DO NOT have to define a schema before writing a Property\nProperty is a standard key-value store. Users could store their metadata or items on a property and get a sequential consistency guarantee. BanyanDB\u0026rsquo;s motivation for introducing such a particular structure is to support most APM scenarios that need to store critical data, especially for a distributed database cluster.\nWe should create a group before creating a property.\nCreating group.\nmetadata:name:swCreating property.\nmetadata:container:group:swname:temp_dataid:General-Servicetags:- key:namevalue:str:value:\u0026#34;hello\u0026#34;- key:statevalue:str:value:\u0026#34;succeed\u0026#34;Property supports a three-level hierarchy, group/name/id, that is more flexible than schemaful data models.\nThe property supports the TTL mechanism. You could set the ttl field to specify the time to live.\nmetadata:container:group:swname:temp_dataid:General-Servicetags:- key:namevalue:str:value:\u0026#34;hello\u0026#34;- key:statevalue:str:value:\u0026#34;succeed\u0026#34;ttl:\u0026#34;1h\u0026#34;\u0026ldquo;General-Service\u0026rdquo; will be dropped after 1 hour. If you want to extend the TTL, you could use the \u0026ldquo;keepalive\u0026rdquo; operation. The \u0026ldquo;lease_id\u0026rdquo; is returned in the apply response. You can use get operation to get the property with the lease_id as well.\nlease_id:1\u0026ldquo;General-Service\u0026rdquo; lives another 1 hour.\nYou could Create, Read, Update and Drop a property, and update or drop several tags instead of the entire property.\nProperty Operations\nData Models Data models in BanyanDB derive from some classic data models.\nTimeSeries Model A time series is a series of data points indexed in time order. Most commonly, a time series is a sequence taken at successive equally spaced points in time. Thus it is a sequence of discrete-time data.\nYou can store time series data points through Stream or Measure. Examples of Stream are logs, traces and events. Measure could ingest metrics, profiles, etc.\nKey-Value Model The key-value data model is a subset of the Property data model. Every property has a key \u0026lt;group\u0026gt;/\u0026lt;name\u0026gt;/\u0026lt;id\u0026gt; that identifies a property within a collection. This key acts as the primary key to retrieve the data. You can set it when creating a key. It cannot be changed later because the attribute is immutable.\nThere are several Key-Value pairs in a property, named Tags. You could add, update and drop them based on the tag\u0026rsquo;s key.\nData Retrieval Queries and Writes are used to filter schemaful data models, Stream, Measure or TopNAggregation based on certain criteria, as well as to compute or store new data.\n MeasureService provides Write, Query and TopN StreamService provides Write, Query  IndexRule \u0026amp; IndexRuleBinding An IndexRule indicates which tags are indexed. An IndexRuleBinding binds an index rule to the target resources or the subject. There might be several rule bindings to a single resource, but their effective time range could NOT overlap.\nmetadata:name:trace_idgroup:sw_streamtags:- trace_idtype:TYPE_TREElocation:LOCATION_GLOBALIndexRule supports selecting two distinct kinds of index structures. The INVERTED index is the primary option when users set up an index rule. It\u0026rsquo;s suitable for most tag indexing due to a better memory usage ratio and query performance. When there are many unique tag values here, such as the ID tag and numeric duration tag, the TREE index could be better. This index saves much memory space with high-cardinality data sets.\nMost IndexRule\u0026rsquo;s location is LOCAL which places indices with their indexed data together. IndexRule also provides a GLOBAL location to place some indices on a higher layer of hierarchical structure. This option intends to optimize the full-scan operation for some querying cases of no time range specification, such as finding spans from a trace by trace_id.\nmetadata:name:stream_bindinggroup:sw_streamrules:- trace_id- duration- endpoint_id- status_code- http.method- db.instance- db.type- mq.broker- mq.queue- mq.topic- extended_tagssubject:catalog:CATALOG_STREAMname:swbegin_at:\u0026#39;2021-04-15T01:30:15.01Z\u0026#39;expire_at:\u0026#39;2121-04-15T01:30:15.01Z\u0026#39;IndexRuleBinding binds IndexRules to a subject, Stream or Measure. The time range between begin_at and expire_at is the effective time.\nIndexRule Registration Operations\nIndexRuleBinding Registration Operations\nIndex Granularity In BanyanDB, Stream and Measure have different levels of index granularity.\nFor Measure, the indexed target is a data point with specific tag values. The query processor uses the tag values defined in the entity field of the Measure to compose a series ID, which is used to find the several series that match the query criteria. The entity field is a set of tags that defines the unique identity of a time series, and it restricts the tags that can be used as indexed target.\nEach series contains a sequence of data points that share the same tag values. Once the query processor has identified the relevant series, it scans the data points between the desired time range in those series to find the data that matches the query criteria.\nFor example, suppose we have a Measure with the following entity field: {service, operation, instance}. If we get a data point with the following tag values: service=shopping, operation=search, and instance=prod-1, then the query processor would use those tag values to construct a series ID that uniquely identifies the series containing that data point. The query processor would then scan the relevant data points in that series to find the data that matches the query criteria.\nThe side effect of the measure index is that each indexed value has to represent a unique seriesID. This is because the series ID is constructed by concatenating the indexed tag values in the entity field. If two series have the same entity field, they would have the same series ID and would be indistinguishable from one another. This means that if you want to index a tag that is not part of the entity field, you would need to ensure that it is unique across all series. One way to do this would be to include the tag in the entity field, but this may not always be feasible or desirable depending on your use case.\nFor Stream, the indexed target is an element that is a combination of the series ID and timestamp. The Stream query processor uses the time range to find target files. The indexed result points to the target element. The processor doesn\u0026rsquo;t have to scan a series of elements in this time range, which reduces the query time.\nFor example, suppose we have a Stream with the following tags: service, operation, instance, and status_code. If we get a data point with the following tag values: service=shopping, operation=search, instance=prod-1, and status_code=200, and the data point\u0026rsquo;s time is 1:00pm on January 1st, 2022, then the series ID for this data point would be shopping_search_prod-1_200_1641052800, where 1641052800 is the Unix timestamp representing 1:00pm on January 1st, 2022.\nThe indexed target would be the combination of the series ID and timestamp, which in this case would be shopping_search_prod-1_200_1641052800. The Stream query processor would use the time range specified in the query to find target files and then search within those files for the indexed target.\nThe following is a comparison of the indexing granularity, performance, and flexibility of Stream and Measure indices:\n   Indexing Granularity Performance Flexibility     Measure indices are constructed for each series and are based on the entity field of the Measure. Each indexed value has to represent a unique seriesID. Measure index is faster than Stream index. Measure index is less flexible and requires more care when indexing tags that are not part of the entity field.   Stream indices are constructed for each element and are based on the series ID and timestamp. Stream index is slower than Measure index. Stream index is more flexible than Measure index and can index any tag value.    In general, Measure indices are faster and more efficient, but they require more care when indexing tags that are not part of the entity field. Stream indices, on the other hand, are slower and take up more space, but they can index any tag value and do not have the same side effects as Measure indices.\n","title":"Data Model","url":"/docs/skywalking-banyandb/latest/concept/data-model/"},{"content":"Data Model This chapter introduces BanyanDB\u0026rsquo;s data models and covers the following:\n the high-level data organization data model data retrieval  You can also find examples of how to interact with BanyanDB using bydbctl, how to create and drop groups, or how to create, read, update and drop streams/measures.\nStructure of BanyanDB The hierarchy that data is organized into streams, measures and properties in groups.\nGroups Group does not provide a mechanism for isolating groups of resources within a single banyand-server but is the minimal unit to manage physical structures. Each group contains a set of options, like retention policy, shard number, etc. Several shards distribute in a group.\nmetadata:name:othersor\nmetadata:name:sw_metriccatalog:CATALOG_MEASUREresource_opts:shard_num:2segment_interval:unit:UNIT_DAYnum:1ttl:unit:UNIT_DAYnum:7The group creates two shards to store data points. Every day, it would create a segment that will generate a block every 2 hours. The available units are HOUR and DAY. The data in this group will keep 7 days.\nEvery other resource should belong to a group. The catalog indicates which kind of data model the group contains.\n UNSPECIFIED: Property or other data models. MEASURE: Measure. STREAM: Stream.  Group Registration Operations\nMeasures BanyanDB lets you define a measure as follows:\nmetadata:name:service_cpm_minutegroup:sw_metrictag_families:- name:defaulttags:- name:idtype:TAG_TYPE_STRING- name:entity_idtype:TAG_TYPE_STRINGfields:- name:totalfield_type:FIELD_TYPE_INTencoding_method:ENCODING_METHOD_GORILLAcompression_method:COMPRESSION_METHOD_ZSTD- name:valuefield_type:FIELD_TYPE_INTencoding_method:ENCODING_METHOD_GORILLAcompression_method:COMPRESSION_METHOD_ZSTDentity:tag_names:- entity_idinterval:1mMeasure consists of a sequence of data points. Each data point contains tags and fields.\nTags are key-value pairs. The database engine can index tag values by referring to the index rules and rule bindings, confining the query to filtering data points based on tags bound to an index rule.\nTags are grouped into unique tag_families which are the logical and physical grouping of tags.\nMeasure supports the following tag types:\n STRING : Text INT : 64 bits long integer STRING_ARRAY : A group of strings INT_ARRAY : A group of integers DATA_BINARY : Raw binary  A group of selected tags composite an entity that points out a specific time series the data point belongs to. The database engine has capacities to encode and compress values in the same time series. Users should select appropriate tag combinations to optimize the data size. Another role of entity is the sharding key of data points, determining how to fragment data between shards.\nFields are also key-value pairs like tags. But the value of each field is the actual value of a single data point. The database engine would encode and compress the field\u0026rsquo;s values in the same time series. The query operation is forbidden to filter data points based on a field\u0026rsquo;s value. You could apply aggregation functions to them.\nMeasure supports the following fields types:\n STRING : Text INT : 64 bits long integer DATA_BINARY : Raw binary FLOAT : 64 bits double-precision floating-point number  Measure supports the following encoding methods:\n GORILLA : GORILLA encoding is lossless. It is more suitable for a numerical sequence with similar values and is not recommended for sequence data with large fluctuations.  Measure supports the types of the following fields:\n ZSTD : Zstandard is a real-time compression algorithm, that provides high compression ratios. It offers a very wide range of compression/speed trade-offs, while being backed by a very fast decoder. For BanyanDB focus on speed.  Another option named interval plays a critical role in encoding. It indicates the time range between two adjacent data points in a time series and implies that all data points belonging to the same time series are distributed based on a fixed interval. A better practice for the naming measure is to append the interval literal to the tail, for example, service_cpm_minute. It\u0026rsquo;s a parameter of GORILLA encoding method.\nMeasure Registration Operations\nTopNAggregation Find the Top-N entities from a dataset in a time range is a common scenario. We could see the diagrams like \u0026ldquo;Top 10 throughput endpoints\u0026rdquo;, and \u0026ldquo;Most slow 20 endpoints\u0026rdquo;, etc on SkyWalking\u0026rsquo;s UI. Exploring and analyzing the top entities can always reveal some high-value information.\nBanyanDB introduces the TopNAggregation, aiming to pre-calculate the top/bottom entities during the measure writing phase. In the query phase, BanyanDB can quickly retrieve the top/bottom records. The performance would be much better than top() function which is based on the query phase aggregation procedure.\n Caveat: TopNAggregation is an approximate realization, to use it well you need have a good understanding with the algorithm as well as the data distribution.\n ---metadata:name:endpoint_cpm_minute_top_bottomgroup:sw_metricsource_measure:name:endpoint_cpm_minutegroup:sw_metricfield_name:valuefield_value_sort:SORT_UNSPECIFIEDgroup_by_tag_names:- entity_idcounters_number:10000lru_size:10endpoint_cpm_minute_top_bottom is watching the data ingesting of the source measure endpoint_cpm_minute to generate both top 1000 and bottom 1000 entity cardinalities. If only Top 1000 or Bottom 1000 is needed, the field_value_sort could be DESC or ASC respectively.\n SORT_DESC: Top-N. In a series of 1,2,3...1000. Top10\u0026rsquo;s result is 1000,999...991. SORT_ASC: Bottom-N. In a series of 1,2,3...1000. Bottom10\u0026rsquo;s result is 1,2...10.  Tags in group_by_tag_names are used as dimensions. These tags can be searched (only equality is supported) in the query phase. Tags do not exist in group_by_tag_names will be dropped in the pre-calculating phase.\ncounters_number denotes the number of entity cardinality. As the above example shows, calculating the Top 100 among 10 thousands is easier than among 10 millions.\nlru_size is a late data optimizing flag. The higher the number, the more late data, but the more memory space is consumed.\nTopNAggregation Registration Operations\nStreams Stream shares many details with Measure except for abandoning field. Stream focuses on high throughput data collection, for example, tracing and logging. The database engine also supports compressing stream entries based on entity, but no encoding process is involved.\nStream Registration Operations\nProperties Property is a schema-less or schema-free data model. That means you DO NOT have to define a schema before writing a Property\nProperty is a standard key-value store. Users could store their metadata or items on a property and get a sequential consistency guarantee. BanyanDB\u0026rsquo;s motivation for introducing such a particular structure is to support most APM scenarios that need to store critical data, especially for a distributed database cluster.\nWe should create a group before creating a property.\nCreating group.\nmetadata:name:swCreating property.\nmetadata:container:group:swname:temp_dataid:General-Servicetags:- key:namevalue:str:value:\u0026#34;hello\u0026#34;- key:statevalue:str:value:\u0026#34;succeed\u0026#34;Property supports a three-level hierarchy, group/name/id, that is more flexible than schemaful data models.\nThe property supports the TTL mechanism. You could set the ttl field to specify the time to live.\nmetadata:container:group:swname:temp_dataid:General-Servicetags:- key:namevalue:str:value:\u0026#34;hello\u0026#34;- key:statevalue:str:value:\u0026#34;succeed\u0026#34;ttl:\u0026#34;1h\u0026#34;\u0026ldquo;General-Service\u0026rdquo; will be dropped after 1 hour. If you want to extend the TTL, you could use the \u0026ldquo;keepalive\u0026rdquo; operation. The \u0026ldquo;lease_id\u0026rdquo; is returned in the apply response. You can use get operation to get the property with the lease_id as well.\nlease_id:1\u0026ldquo;General-Service\u0026rdquo; lives another 1 hour.\nYou could Create, Read, Update and Drop a property, and update or drop several tags instead of the entire property.\nProperty Operations\nData Models Data models in BanyanDB derive from some classic data models.\nTimeSeries Model A time series is a series of data points indexed in time order. Most commonly, a time series is a sequence taken at successive equally spaced points in time. Thus it is a sequence of discrete-time data.\nYou can store time series data points through Stream or Measure. Examples of Stream are logs, traces and events. Measure could ingest metrics, profiles, etc.\nKey-Value Model The key-value data model is a subset of the Property data model. Every property has a key \u0026lt;group\u0026gt;/\u0026lt;name\u0026gt;/\u0026lt;id\u0026gt; that identifies a property within a collection. This key acts as the primary key to retrieve the data. You can set it when creating a key. It cannot be changed later because the attribute is immutable.\nThere are several Key-Value pairs in a property, named Tags. You could add, update and drop them based on the tag\u0026rsquo;s key.\nData Retrieval Queries and Writes are used to filter schemaful data models, Stream, Measure or TopNAggregation based on certain criteria, as well as to compute or store new data.\n MeasureService provides Write, Query and TopN StreamService provides Write, Query  IndexRule \u0026amp; IndexRuleBinding An IndexRule indicates which tags are indexed. An IndexRuleBinding binds an index rule to the target resources or the subject. There might be several rule bindings to a single resource, but their effective time range could NOT overlap.\nmetadata:name:trace_idgroup:sw_streamtags:- trace_idtype:TYPE_INVERTEDIndexRule supports selecting two distinct kinds of index structures. The INVERTED index is the primary option when users set up an index rule. It\u0026rsquo;s suitable for most tag indexing due to a better memory usage ratio and query performance.\nmetadata:name:stream_bindinggroup:sw_streamrules:- trace_id- duration- endpoint_id- status_code- http.method- db.instance- db.type- mq.broker- mq.queue- mq.topic- extended_tagssubject:catalog:CATALOG_STREAMname:swbegin_at:\u0026#39;2021-04-15T01:30:15.01Z\u0026#39;expire_at:\u0026#39;2121-04-15T01:30:15.01Z\u0026#39;IndexRuleBinding binds IndexRules to a subject, Stream or Measure. The time range between begin_at and expire_at is the effective time.\nIndexRule Registration Operations\nIndexRuleBinding Registration Operations\nIndex Granularity In BanyanDB, Stream and Measure have different levels of index granularity.\nFor Measure, the indexed target is a data point with specific tag values. The query processor uses the tag values defined in the entity field of the Measure to compose a series ID, which is used to find the several series that match the query criteria. The entity field is a set of tags that defines the unique identity of a time series, and it restricts the tags that can be used as indexed target.\nEach series contains a sequence of data points that share the same tag values. Once the query processor has identified the relevant series, it scans the data points between the desired time range in those series to find the data that matches the query criteria.\nFor example, suppose we have a Measure with the following entity field: {service, operation, instance}. If we get a data point with the following tag values: service=shopping, operation=search, and instance=prod-1, then the query processor would use those tag values to construct a series ID that uniquely identifies the series containing that data point. The query processor would then scan the relevant data points in that series to find the data that matches the query criteria.\nThe side effect of the measure index is that each indexed value has to represent a unique seriesID. This is because the series ID is constructed by concatenating the indexed tag values in the entity field. If two series have the same entity field, they would have the same series ID and would be indistinguishable from one another. This means that if you want to index a tag that is not part of the entity field, you would need to ensure that it is unique across all series. One way to do this would be to include the tag in the entity field, but this may not always be feasible or desirable depending on your use case.\nFor Stream, the indexed target is an element that is a combination of the series ID and timestamp. The Stream query processor uses the time range to find target files. The indexed result points to the target element. The processor doesn\u0026rsquo;t have to scan a series of elements in this time range, which reduces the query time.\nFor example, suppose we have a Stream with the following tags: service, operation, instance, and status_code. If we get a data point with the following tag values: service=shopping, operation=search, instance=prod-1, and status_code=200, and the data point\u0026rsquo;s time is 1:00pm on January 1st, 2022, then the series ID for this data point would be shopping_search_prod-1_200_1641052800, where 1641052800 is the Unix timestamp representing 1:00pm on January 1st, 2022.\nThe indexed target would be the combination of the series ID and timestamp, which in this case would be shopping_search_prod-1_200_1641052800. The Stream query processor would use the time range specified in the query to find target files and then search within those files for the indexed target.\nThe following is a comparison of the indexing granularity, performance, and flexibility of Stream and Measure indices:\n   Indexing Granularity Performance Flexibility     Measure indices are constructed for each series and are based on the entity field of the Measure. Each indexed value has to represent a unique seriesID. Measure index is faster than Stream index. Measure index is less flexible and requires more care when indexing tags that are not part of the entity field.   Stream indices are constructed for each element and are based on the series ID and timestamp. Stream index is slower than Measure index. Stream index is more flexible than Measure index and can index any tag value.    In general, Measure indices are faster and more efficient, but they require more care when indexing tags that are not part of the entity field. Stream indices, on the other hand, are slower and take up more space, but they can index any tag value and do not have the same side effects as Measure indices.\n","title":"Data Model","url":"/docs/skywalking-banyandb/next/concept/data-model/"},{"content":"Data Model This chapter introduces BanyanDB\u0026rsquo;s data models and covers the following:\n the high-level data organization data model data retrieval  You can also find examples of how to interact with BanyanDB using bydbctl, how to create and drop groups, or how to create, read, update and drop streams/measures.\nStructure of BanyanDB The hierarchy that data is organized into streams, measures and properties in groups.\nGroups Group does not provide a mechanism for isolating groups of resources within a single banyand-server but is the minimal unit to manage physical structures. Each group contains a set of options, like retention policy, shard number, etc. Several shards distribute in a group.\nmetadata:name:othersor\nmetadata:name:sw_metriccatalog:CATALOG_MEASUREresource_opts:shard_num:2block_interval:unit:UNIT_HOURnum:2segment_interval:unit:UNIT_DAYnum:1ttl:unit:UNIT_DAYnum:7The group creates two shards to store data points. Every day, it would create a segment that will generate a block every 2 hours. The available units are HOUR and DAY. The data in this group will keep 7 days.\nEvery other resource should belong to a group. The catalog indicates which kind of data model the group contains.\n UNSPECIFIED: Property or other data models. MEASURE: Measure. STREAM: Stream.  Group Registration Operations\nMeasures BanyanDB lets you define a measure as follows:\nmetadata:name:service_cpm_minutegroup:sw_metrictag_families:- name:defaulttags:- name:idtype:TAG_TYPE_STRING- name:entity_idtype:TAG_TYPE_STRINGfields:- name:totalfield_type:FIELD_TYPE_INTencoding_method:ENCODING_METHOD_GORILLAcompression_method:COMPRESSION_METHOD_ZSTD- name:valuefield_type:FIELD_TYPE_INTencoding_method:ENCODING_METHOD_GORILLAcompression_method:COMPRESSION_METHOD_ZSTDentity:tag_names:- entity_idinterval:1mMeasure consists of a sequence of data points. Each data point contains tags and fields.\nTags are key-value pairs. The database engine can index tag values by referring to the index rules and rule bindings, confining the query to filtering data points based on tags bound to an index rule.\nTags are grouped into unique tag_families which are the logical and physical grouping of tags.\nMeasure supports the following tag types:\n STRING : Text INT : 64 bits long integer STRING_ARRAY : A group of strings INT_ARRAY : A group of integers DATA_BINARY : Raw binary  A group of selected tags composite an entity that points out a specific time series the data point belongs to. The database engine has capacities to encode and compress values in the same time series. Users should select appropriate tag combinations to optimize the data size. Another role of entity is the sharding key of data points, determining how to fragment data between shards.\nFields are also key-value pairs like tags. But the value of each field is the actual value of a single data point. The database engine would encode and compress the field\u0026rsquo;s values in the same time series. The query operation is forbidden to filter data points based on a field\u0026rsquo;s value. You could apply aggregation functions to them.\nMeasure supports the following fields types:\n STRING : Text INT : 64 bits long integer DATA_BINARY : Raw binary FLOAT : 64 bits double-precision floating-point number  Measure supports the following encoding methods:\n GORILLA : GORILLA encoding is lossless. It is more suitable for a numerical sequence with similar values and is not recommended for sequence data with large fluctuations.  Measure supports the types of the following fields:\n ZSTD : Zstandard is a real-time compression algorithm, that provides high compression ratios. It offers a very wide range of compression/speed trade-offs, while being backed by a very fast decoder. For BanyanDB focus on speed.  Another option named interval plays a critical role in encoding. It indicates the time range between two adjacent data points in a time series and implies that all data points belonging to the same time series are distributed based on a fixed interval. A better practice for the naming measure is to append the interval literal to the tail, for example, service_cpm_minute. It\u0026rsquo;s a parameter of GORILLA encoding method.\nMeasure Registration Operations\nTopNAggregation Find the Top-N entities from a dataset in a time range is a common scenario. We could see the diagrams like \u0026ldquo;Top 10 throughput endpoints\u0026rdquo;, and \u0026ldquo;Most slow 20 endpoints\u0026rdquo;, etc on SkyWalking\u0026rsquo;s UI. Exploring and analyzing the top entities can always reveal some high-value information.\nBanyanDB introduces the TopNAggregation, aiming to pre-calculate the top/bottom entities during the measure writing phase. In the query phase, BanyanDB can quickly retrieve the top/bottom records. The performance would be much better than top() function which is based on the query phase aggregation procedure.\n Caveat: TopNAggregation is an approximate realization, to use it well you need have a good understanding with the algorithm as well as the data distribution.\n ---metadata:name:endpoint_cpm_minute_top_bottomgroup:sw_metricsource_measure:name:endpoint_cpm_minutegroup:sw_metricfield_name:valuefield_value_sort:SORT_UNSPECIFIEDgroup_by_tag_names:- entity_idcounters_number:10000lru_size:10endpoint_cpm_minute_top_bottom is watching the data ingesting of the source measure endpoint_cpm_minute to generate both top 1000 and bottom 1000 entity cardinalities. If only Top 1000 or Bottom 1000 is needed, the field_value_sort could be DESC or ASC respectively.\n SORT_DESC: Top-N. In a series of 1,2,3...1000. Top10\u0026rsquo;s result is 1000,999...991. SORT_ASC: Bottom-N. In a series of 1,2,3...1000. Bottom10\u0026rsquo;s result is 1,2...10.  Tags in group_by_tag_names are used as dimensions. These tags can be searched (only equality is supported) in the query phase. Tags do not exist in group_by_tag_names will be dropped in the pre-calculating phase.\ncounters_number denotes the number of entity cardinality. As the above example shows, calculating the Top 100 among 10 thousands is easier than among 10 millions.\nlru_size is a late data optimizing flag. The higher the number, the more late data, but the more memory space is consumed.\nTopNAggregation Registration Operations\nStreams Stream shares many details with Measure except for abandoning field. Stream focuses on high throughput data collection, for example, tracing and logging. The database engine also supports compressing stream entries based on entity, but no encoding process is involved.\nStream Registration Operations\nProperties Property is a schema-less or schema-free data model. That means you DO NOT have to define a schema before writing a Property\nProperty is a standard key-value store. Users could store their metadata or items on a property and get a sequential consistency guarantee. BanyanDB\u0026rsquo;s motivation for introducing such a particular structure is to support most APM scenarios that need to store critical data, especially for a distributed database cluster.\nWe should create a group before creating a property.\nCreating group.\nmetadata:name:swCreating property.\nmetadata:container:group:swname:temp_dataid:General-Servicetags:- key:namevalue:str:value:\u0026#34;hello\u0026#34;- key:statevalue:str:value:\u0026#34;succeed\u0026#34;Property supports a three-level hierarchy, group/name/id, that is more flexible than schemaful data models.\nThe property supports the TTL mechanism. You could set the ttl field to specify the time to live.\nmetadata:container:group:swname:temp_dataid:General-Servicetags:- key:namevalue:str:value:\u0026#34;hello\u0026#34;- key:statevalue:str:value:\u0026#34;succeed\u0026#34;ttl:\u0026#34;1h\u0026#34;\u0026ldquo;General-Service\u0026rdquo; will be dropped after 1 hour. If you want to extend the TTL, you could use the \u0026ldquo;keepalive\u0026rdquo; operation. The \u0026ldquo;lease_id\u0026rdquo; is returned in the apply response. You can use get operation to get the property with the lease_id as well.\nlease_id:1\u0026ldquo;General-Service\u0026rdquo; lives another 1 hour.\nYou could Create, Read, Update and Drop a property, and update or drop several tags instead of the entire property.\nProperty Operations\nData Models Data models in BanyanDB derive from some classic data models.\nTimeSeries Model A time series is a series of data points indexed in time order. Most commonly, a time series is a sequence taken at successive equally spaced points in time. Thus it is a sequence of discrete-time data.\nYou can store time series data points through Stream or Measure. Examples of Stream are logs, traces and events. Measure could ingest metrics, profiles, etc.\nKey-Value Model The key-value data model is a subset of the Property data model. Every property has a key \u0026lt;group\u0026gt;/\u0026lt;name\u0026gt;/\u0026lt;id\u0026gt; that identifies a property within a collection. This key acts as the primary key to retrieve the data. You can set it when creating a key. It cannot be changed later because the attribute is immutable.\nThere are several Key-Value pairs in a property, named Tags. You could add, update and drop them based on the tag\u0026rsquo;s key.\nData Retrieval Queries and Writes are used to filter schemaful data models, Stream, Measure or TopNAggregation based on certain criteria, as well as to compute or store new data.\n MeasureService provides Write, Query and TopN StreamService provides Write, Query  IndexRule \u0026amp; IndexRuleBinding An IndexRule indicates which tags are indexed. An IndexRuleBinding binds an index rule to the target resources or the subject. There might be several rule bindings to a single resource, but their effective time range could NOT overlap.\nmetadata:name:trace_idgroup:sw_streamtags:- trace_idtype:TYPE_TREElocation:LOCATION_GLOBALIndexRule supports selecting two distinct kinds of index structures. The INVERTED index is the primary option when users set up an index rule. It\u0026rsquo;s suitable for most tag indexing due to a better memory usage ratio and query performance. When there are many unique tag values here, such as the ID tag and numeric duration tag, the TREE index could be better. This index saves much memory space with high-cardinality data sets.\nMost IndexRule\u0026rsquo;s location is LOCAL which places indices with their indexed data together. IndexRule also provides a GLOBAL location to place some indices on a higher layer of hierarchical structure. This option intends to optimize the full-scan operation for some querying cases of no time range specification, such as finding spans from a trace by trace_id.\nmetadata:name:stream_bindinggroup:sw_streamrules:- trace_id- duration- endpoint_id- status_code- http.method- db.instance- db.type- mq.broker- mq.queue- mq.topic- extended_tagssubject:catalog:CATALOG_STREAMname:swbegin_at:\u0026#39;2021-04-15T01:30:15.01Z\u0026#39;expire_at:\u0026#39;2121-04-15T01:30:15.01Z\u0026#39;IndexRuleBinding binds IndexRules to a subject, Stream or Measure. The time range between begin_at and expire_at is the effective time.\nIndexRule Registration Operations\nIndexRuleBinding Registration Operations\nIndex Granularity In BanyanDB, Stream and Measure have different levels of index granularity.\nFor Measure, the indexed target is a data point with specific tag values. The query processor uses the tag values defined in the entity field of the Measure to compose a series ID, which is used to find the several series that match the query criteria. The entity field is a set of tags that defines the unique identity of a time series, and it restricts the tags that can be used as indexed target.\nEach series contains a sequence of data points that share the same tag values. Once the query processor has identified the relevant series, it scans the data points between the desired time range in those series to find the data that matches the query criteria.\nFor example, suppose we have a Measure with the following entity field: {service, operation, instance}. If we get a data point with the following tag values: service=shopping, operation=search, and instance=prod-1, then the query processor would use those tag values to construct a series ID that uniquely identifies the series containing that data point. The query processor would then scan the relevant data points in that series to find the data that matches the query criteria.\nThe side effect of the measure index is that each indexed value has to represent a unique seriesID. This is because the series ID is constructed by concatenating the indexed tag values in the entity field. If two series have the same entity field, they would have the same series ID and would be indistinguishable from one another. This means that if you want to index a tag that is not part of the entity field, you would need to ensure that it is unique across all series. One way to do this would be to include the tag in the entity field, but this may not always be feasible or desirable depending on your use case.\nFor Stream, the indexed target is an element that is a combination of the series ID and timestamp. The Stream query processor uses the time range to find target files. The indexed result points to the target element. The processor doesn\u0026rsquo;t have to scan a series of elements in this time range, which reduces the query time.\nFor example, suppose we have a Stream with the following tags: service, operation, instance, and status_code. If we get a data point with the following tag values: service=shopping, operation=search, instance=prod-1, and status_code=200, and the data point\u0026rsquo;s time is 1:00pm on January 1st, 2022, then the series ID for this data point would be shopping_search_prod-1_200_1641052800, where 1641052800 is the Unix timestamp representing 1:00pm on January 1st, 2022.\nThe indexed target would be the combination of the series ID and timestamp, which in this case would be shopping_search_prod-1_200_1641052800. The Stream query processor would use the time range specified in the query to find target files and then search within those files for the indexed target.\nThe following is a comparison of the indexing granularity, performance, and flexibility of Stream and Measure indices:\n   Indexing Granularity Performance Flexibility     Measure indices are constructed for each series and are based on the entity field of the Measure. Each indexed value has to represent a unique seriesID. Measure index is faster than Stream index. Measure index is less flexible and requires more care when indexing tags that are not part of the entity field.   Stream indices are constructed for each element and are based on the series ID and timestamp. Stream index is slower than Measure index. Stream index is more flexible than Measure index and can index any tag value.    In general, Measure indices are faster and more efficient, but they require more care when indexing tags that are not part of the entity field. Stream indices, on the other hand, are slower and take up more space, but they can index any tag value and do not have the same side effects as Measure indices.\n","title":"Data Model","url":"/docs/skywalking-banyandb/v0.5.0/concept/data-model/"},{"content":"Define Service Hierarchy SkyWalking v10 introduces a new concept Service Hierarchy which defines the relationships of existing logically same services in various layers. The concept and design could be found here.\nService Hierarchy Configuration All the relationships defined in the config/hierarchy-definition.yml file. You can customize it according to your own needs. Here is an example:\nhierarchy:MESH:MESH_DP:nameK8S_SERVICE:short-nameMESH_DP:K8S_SERVICE:short-nameGENERAL:K8S_SERVICE:lower-short-name-remove-nsMYSQL:K8S_SERVICE:short-namePOSTGRESQL:K8S_SERVICE:short-nameSO11Y_OAP:K8S_SERVICE:short-nameVIRTUAL_DATABASE:MYSQL:lower-short-name-with-fqdnPOSTGRESQL:lower-short-name-with-fqdnauto-matching-rules:# the name of the upper service is equal to the name of the lower servicename:\u0026#34;{ (u, l) -\u0026gt; u.name == l.name }\u0026#34;# the short name of the upper service is equal to the short name of the lower serviceshort-name:\u0026#34;{ (u, l) -\u0026gt; u.shortName == l.shortName }\u0026#34;# remove the k8s namespace from the lower service short name# this rule is only works on k8s env.lower-short-name-remove-ns:\u0026#34;{ (u, l) -\u0026gt; { if(l.shortName.lastIndexOf(\u0026#39;.\u0026#39;) \u0026gt; 0) return u.shortName == l.shortName.substring(0, l.shortName.lastIndexOf(\u0026#39;.\u0026#39;)); return false; } }\u0026#34;# the short name of the upper remove port is equal to the short name of the lower service with fqdn suffix# this rule is only works on k8s env.lower-short-name-with-fqdn:\u0026#34;{ (u, l) -\u0026gt; { if(u.shortName.lastIndexOf(\u0026#39;:\u0026#39;) \u0026gt; 0) return u.shortName.substring(0, u.shortName.lastIndexOf(\u0026#39;:\u0026#39;)) == l.shortName.concat(\u0026#39;.svc.cluster.local\u0026#39;); return false; } }\u0026#34;layer-levels:# The hierarchy level of the service layer, the level is used to define the order of the service layer for UI presentation.# The level of the upper service should greater than the level of the lower service in `hierarchy` section.MESH:3GENERAL:3SO11Y_OAP:3VIRTUAL_DATABASE:3MYSQL:2POSTGRESQL:2MESH_DP:1K8S_SERVICE:0Hierarchy  The hierarchy of service layers are defined in the hierarchy section. The layers under the specific layer are related lower of the layer. The relation could have a matching rule for auto matching, which are defined in the auto-matching-rules section. The relation without a matching rule should be built through the internal API. All the layers are defined in the file org.apache.skywalking.oap.server.core.analysis.Layers.java. If the hierarchy is not defined, the service hierarchy relationship will not be built. If you want to add a new relationship, you should certainly know they can be matched automatically by Auto Matching Rules. Notice: some hierarchy relations and auto matching rules are only works on k8s env.  Auto Matching Rules  The auto matching rules are defined in the auto-matching-rules section. Use Groovy script to define the matching rules, the input parameters are the upper service(u) and the lower service(l) and the return value is a boolean, which are used to match the relation between the upper service(u) and the lower service(l) on the different layers. The default matching rules required the service name configured as SkyWalking default and follow the Showcase. If you customized the service name in any layer, you should customize the related matching rules according your service name rules.  Layer Levels  Define the hierarchy level of the service layer in the layer-levels section. The level is used to define the order of the service layer for UI presentation. The level of the upper service should greater than the level of the lower service in hierarchy section.  ","title":"Define Service Hierarchy","url":"/docs/main/next/en/concepts-and-designs/service-hierarchy-configuration/"},{"content":" Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-log4j-1.x\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Print trace ID in your logs  Config a layout  log4j.appender.CONSOLE.layout=org.apache.skywalking.apm.toolkit.log.log4j.v1.x.TraceIdPatternLayout  set %T in layout.ConversionPattern ( In 2.0-2016, you should use %x, Why change? )  log4j.appender.CONSOLE.layout.ConversionPattern=%d [%T] %-5p %c{1}:%L - %m%n  When you use -javaagent to active the SkyWalking tracer, log4j will output traceId, if it existed. If the tracer is inactive, the output will be TID: N/A.  Print SkyWalking context in your logs   Your only need to replace pattern %T with %T{SW_CTX}.\n  When you use -javaagent to active the SkyWalking tracer, log4j will output SW_CTX: [$serviceName,$instanceName,$traceId,$traceSegmentId,$spanId], if it existed. If the tracer is inactive, the output will be SW_CTX: N/A.\n  gRPC reporter The gRPC report could forward the collected logs to SkyWalking OAP server, or SkyWalking Satellite sidecar. Trace id, segment id, and span id will attach to logs automatically. You don\u0026rsquo;t need to change the layout.\n Add GRPCLogClientAppender in log4j.properties  log4j.rootLogger=INFO,CustomAppender log4j.appender.CustomAppender=org.apache.skywalking.apm.toolkit.log.log4j.v1.x.log.GRPCLogClientAppender log4j.appender.CustomAppender.layout=org.apache.log4j.PatternLayout log4j.appender.CustomAppender.layout.ConversionPattern=[%t] %-5p %c %x - %m%n  Add config of the plugin or use default  log.max_message_size=${SW_GRPC_LOG_MAX_MESSAGE_SIZE:10485760} ","title":"Dependency the toolkit, such as using maven or gradle","url":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/application-toolkit-log4j-1.x/"},{"content":" Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-log4j-2.x\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Print trace ID in your logs  Config the [%traceId] pattern in your log4j2.xml  \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout pattern=\u0026#34;%d [%traceId] %-5p %c{1}:%L - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;/Appenders\u0026gt;  Support log4j2 AsyncRoot , No additional configuration is required. Refer to the demo of log4j2.xml below. For details: Log4j2 Async Loggers  \u0026lt;Configuration\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout pattern=\u0026#34;%d [%traceId] %-5p %c{1}:%L - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;AsyncRoot level=\u0026#34;INFO\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Console\u0026#34;/\u0026gt; \u0026lt;/AsyncRoot\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;   Support log4j2 AsyncAppender , No additional configuration is required. Refer to the demo of log4j2.xml below.\nFor details: All Loggers Async\nLog4j-2.9 and higher require disruptor-3.3.4.jar or higher on the classpath. Prior to Log4j-2.9, disruptor-3.0.0.jar or higher was required. This is simplest to configure and gives the best performance. To make all loggers asynchronous, add the disruptor jar to the classpath and set the system property log4j2.contextSelector to org.apache.logging.log4j.core.async.AsyncLoggerContextSelector.\n\u0026lt;Configuration status=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;!-- Async Loggers will auto-flush in batches, so switch off immediateFlush. --\u0026gt; \u0026lt;RandomAccessFile name=\u0026#34;RandomAccessFile\u0026#34; fileName=\u0026#34;async.log\u0026#34; immediateFlush=\u0026#34;false\u0026#34; append=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;PatternLayout\u0026gt; \u0026lt;Pattern\u0026gt;%d %p %c{1.} [%t] [%traceId] %m %ex%n\u0026lt;/Pattern\u0026gt; \u0026lt;/PatternLayout\u0026gt; \u0026lt;/RandomAccessFile\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;Root level=\u0026#34;info\u0026#34; includeLocation=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;RandomAccessFile\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt; For details: Mixed Sync \u0026amp; Async\nLog4j-2.9 and higher require disruptor-3.3.4.jar or higher on the classpath. Prior to Log4j-2.9, disruptor-3.0.0.jar or higher was required. There is no need to set system property Log4jContextSelector to any value.\n\u0026lt;Configuration status=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;!-- Async Loggers will auto-flush in batches, so switch off immediateFlush. --\u0026gt; \u0026lt;RandomAccessFile name=\u0026#34;RandomAccessFile\u0026#34; fileName=\u0026#34;asyncWithLocation.log\u0026#34; immediateFlush=\u0026#34;false\u0026#34; append=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;PatternLayout\u0026gt; \u0026lt;Pattern\u0026gt;%d %p %class{1.} [%t] [%traceId] %location %m %ex%n\u0026lt;/Pattern\u0026gt; \u0026lt;/PatternLayout\u0026gt; \u0026lt;/RandomAccessFile\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;!-- pattern layout actually uses location, so we need to include it --\u0026gt; \u0026lt;AsyncLogger name=\u0026#34;com.foo.Bar\u0026#34; level=\u0026#34;trace\u0026#34; includeLocation=\u0026#34;true\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;RandomAccessFile\u0026#34;/\u0026gt; \u0026lt;/AsyncLogger\u0026gt; \u0026lt;Root level=\u0026#34;info\u0026#34; includeLocation=\u0026#34;true\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;RandomAccessFile\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;   Support log4j2 AsyncAppender, For details: Log4j2 AsyncAppender\n  \u0026lt;Configuration\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout pattern=\u0026#34;%d [%traceId] %-5p %c{1}:%L - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;Async name=\u0026#34;Async\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Console\u0026#34;/\u0026gt; \u0026lt;/Async\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;Root level=\u0026#34;INFO\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Async\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;  When you use -javaagent to active the SkyWalking tracer, log4j2 will output traceId, if it existed. If the tracer is inactive, the output will be TID: N/A.  Print SkyWalking context in your logs   Your only need to replace pattern %traceId with %sw_ctx.\n  When you use -javaagent to active the SkyWalking tracer, log4j2 will output SW_CTX: [$serviceName,$instanceName,$traceId,$traceSegmentId,$spanId], if it existed. If the tracer is inactive, the output will be SW_CTX: N/A.\n  gRPC reporter The gRPC report could forward the collected logs to SkyWalking OAP server, or SkyWalking Satellite sidecar. Trace id, segment id, and span id will attach to logs automatically. You don\u0026rsquo;t need to change the layout.\n Add GRPCLogClientAppender in log4j2.xml  \u0026lt;GRPCLogClientAppender name=\u0026#34;grpc-log\u0026#34;\u0026gt; \u0026lt;PatternLayout pattern=\u0026#34;%d{HH:mm:ss.SSS} [%t] %-5level %logger{36} - %msg%n\u0026#34;/\u0026gt; \u0026lt;/GRPCLogClientAppender\u0026gt;  Add config of the plugin or use default  log.max_message_size=${SW_GRPC_LOG_MAX_MESSAGE_SIZE:10485760}  Support -Dlog4j2.contextSelector=org.apache.logging.log4j.core.async.AsyncLoggerContextSelector in gRPC log report.  Transmitting un-formatted messages The log4j 2.x gRPC reporter supports transmitting logs as formatted or un-formatted. Transmitting formatted data is the default but can be disabled by adding the following to the agent config:\nplugin.toolkit.log.transmit_formatted=false The above will result in the content field being used for the log pattern with additional log tags of argument.0, argument.1, and so on representing each logged argument as well as an additional exception tag which is only present if a throwable is also logged.\nFor example, the following code:\nlog.info(\u0026#34;{} {} {}\u0026#34;, 1, 2, 3); Will result in:\n{ \u0026#34;content\u0026#34;: \u0026#34;{} {} {}\u0026#34;, \u0026#34;tags\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;argument.0\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.1\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.2\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;3\u0026#34; } ] } ","title":"Dependency the toolkit, such as using maven or gradle","url":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/application-toolkit-log4j-2.x/"},{"content":" Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-meter\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; If you\u0026rsquo;re using Spring sleuth, you could use Spring Sleuth Setup at the OAP server.\n Counter API represents a single monotonically increasing counter, automatic collect data and report to backend.  import org.apache.skywalking.apm.toolkit.meter.MeterFactory; Counter counter = MeterFactory.counter(meterName).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).mode(Counter.Mode.INCREMENT).build(); counter.increment(1d);  MeterFactory.counter Create a new counter builder with the meter name. Counter.Builder.tag(String key, String value) Mark a tag key/value pair. Counter.Builder.mode(Counter.Mode mode) Change the counter mode, RATE mode means reporting rate to the backend. Counter.Builder.build() Build a new Counter which is collected and reported to the backend. Counter.increment(double count) Increment count to the Counter, It could be a positive value.   Gauge API represents a single numerical value.  import org.apache.skywalking.apm.toolkit.meter.MeterFactory; ThreadPoolExecutor threadPool = ...; Gauge gauge = MeterFactory.gauge(meterName, () -\u0026gt; threadPool.getActiveCount()).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).build();  MeterFactory.gauge(String name, Supplier\u0026lt;Double\u0026gt; getter) Create a new gauge builder with the meter name and supplier function, this function need to return a double value. Gauge.Builder.tag(String key, String value) Mark a tag key/value pair. Gauge.Builder.build() Build a new Gauge which is collected and reported to the backend.   Histogram API represents a summary sample observations with customize buckets.  import org.apache.skywalking.apm.toolkit.meter.MeterFactory; Histogram histogram = MeterFactory.histogram(\u0026#34;test\u0026#34;).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).steps(Arrays.asList(1, 5, 10)).minValue(0).build(); histogram.addValue(3);  MeterFactory.histogram(String name) Create a new histogram builder with the meter name. Histogram.Builder.tag(String key, String value) Mark a tag key/value pair. Histogram.Builder.steps(List\u0026lt;Double\u0026gt; steps) Set up the max values of every histogram buckets. Histogram.Builder.minValue(double value) Set up the minimal value of this histogram, default is 0. Histogram.Builder.build() Build a new Histogram which is collected and reported to the backend. Histogram.addValue(double value) Add value into the histogram, automatically analyze what bucket count needs to be increment. rule: count into [step1, step2).  ","title":"Dependency the toolkit, such as using maven or gradle","url":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/application-toolkit-meter/"},{"content":" Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-trace\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  Use TraceContext.traceId() API to obtain traceId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;traceId\u0026#34;, TraceContext.traceId());  Use TraceContext.segmentId() API to obtain segmentId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;segmentId\u0026#34;, TraceContext.segmentId());  Use TraceContext.spanId() API to obtain spanId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;spanId\u0026#34;, TraceContext.spanId()); Sample codes only\n  Add @Trace to any method you want to trace. After that, you can see the span in the Stack.\n  Methods annotated with @Tag will try to tag the current active span with the given key (Tag#key()) and (Tag#value()), if there is no active span at all, this annotation takes no effect. @Tag can be repeated, and can be used in companion with @Trace, see examples below. The value of Tag is the same as what are supported in Customize Enhance Trace.\n  Add custom tag in the context of traced method, ActiveSpan.tag(\u0026quot;key\u0026quot;, \u0026quot;val\u0026quot;).\n  ActiveSpan.error() Mark the current span as error status.\n  ActiveSpan.error(String errorMsg) Mark the current span as error status with a message.\n  ActiveSpan.error(Throwable throwable) Mark the current span as error status with a Throwable.\n  ActiveSpan.debug(String debugMsg) Add a debug level log message in the current span.\n  ActiveSpan.info(String infoMsg) Add an info level log message in the current span.\n  ActiveSpan.setOperationName(String operationName) Customize an operation name.\n  ActiveSpan.tag(\u0026#34;my_tag\u0026#34;, \u0026#34;my_value\u0026#34;); ActiveSpan.error(); ActiveSpan.error(\u0026#34;Test-Error-Reason\u0026#34;); ActiveSpan.error(new RuntimeException(\u0026#34;Test-Error-Throwable\u0026#34;)); ActiveSpan.info(\u0026#34;Test-Info-Msg\u0026#34;); ActiveSpan.debug(\u0026#34;Test-debug-Msg\u0026#34;); /** * The codes below will generate a span, * and two types of tags, one type tag: keys are `tag1` and `tag2`, values are the passed-in parameters, respectively, the other type tag: keys are `username` and `age`, values are the return value in User, respectively */ @Trace @Tag(key = \u0026#34;tag1\u0026#34;, value = \u0026#34;arg[0]\u0026#34;) @Tag(key = \u0026#34;tag2\u0026#34;, value = \u0026#34;arg[1]\u0026#34;) @Tag(key = \u0026#34;username\u0026#34;, value = \u0026#34;returnedObj.username\u0026#34;) @Tag(key = \u0026#34;age\u0026#34;, value = \u0026#34;returnedObj.age\u0026#34;) public User methodYouWantToTrace(String param1, String param2) { // ActiveSpan.setOperationName(\u0026#34;Customize your own operation name, if this is an entry span, this would be an endpoint name\u0026#34;);  // ... }  Use TraceContext.putCorrelation() API to put custom data in tracing context.  Optional\u0026lt;String\u0026gt; previous = TraceContext.putCorrelation(\u0026#34;customKey\u0026#34;, \u0026#34;customValue\u0026#34;); CorrelationContext will remove the item when the value is null or empty.\n Use TraceContext.getCorrelation() API to get custom data.  Optional\u0026lt;String\u0026gt; value = TraceContext.getCorrelation(\u0026#34;customKey\u0026#34;); CorrelationContext configuration descriptions could be found in the agent configuration documentation, with correlation. as the prefix.\n","title":"Dependency the toolkit, such as using maven or gradle","url":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/application-toolkit-trace/"},{"content":" Dependency the toolkit, such as using maven or gradle  OpenTracing (Deprecated) OpenTracing is a vendor-neutral standard for distributed tracing. It is a set of APIs that can be used to instrument, generate, collect, and report telemetry data for distributed systems. It is designed to be extensible so that new implementations can be created for new platforms or languages. It had been archived by the CNCF TOC. Learn more.\nSkyWalking community keeps the API compatible with 0.30.0 only. All further development will not be accepted.\n\u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-opentracing\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  Use our OpenTracing tracer implementation  Tracer tracer = new SkywalkingTracer(); Tracer.SpanBuilder spanBuilder = tracer.buildSpan(\u0026#34;/yourApplication/yourService\u0026#34;); ","title":"Dependency the toolkit, such as using maven or gradle","url":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/opentracing/"},{"content":" Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-log4j-1.x\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Print trace ID in your logs  Config a layout  log4j.appender.CONSOLE.layout=org.apache.skywalking.apm.toolkit.log.log4j.v1.x.TraceIdPatternLayout  set %T in layout.ConversionPattern ( In 2.0-2016, you should use %x, Why change? )  log4j.appender.CONSOLE.layout.ConversionPattern=%d [%T] %-5p %c{1}:%L - %m%n  When you use -javaagent to active the SkyWalking tracer, log4j will output traceId, if it existed. If the tracer is inactive, the output will be TID: N/A.  Print SkyWalking context in your logs   Your only need to replace pattern %T with %T{SW_CTX}.\n  When you use -javaagent to active the SkyWalking tracer, log4j will output SW_CTX: [$serviceName,$instanceName,$traceId,$traceSegmentId,$spanId], if it existed. If the tracer is inactive, the output will be SW_CTX: N/A.\n  gRPC reporter The gRPC report could forward the collected logs to SkyWalking OAP server, or SkyWalking Satellite sidecar. Trace id, segment id, and span id will attach to logs automatically. You don\u0026rsquo;t need to change the layout.\n Add GRPCLogClientAppender in log4j.properties  log4j.rootLogger=INFO,CustomAppender log4j.appender.CustomAppender=org.apache.skywalking.apm.toolkit.log.log4j.v1.x.log.GRPCLogClientAppender log4j.appender.CustomAppender.layout=org.apache.log4j.PatternLayout log4j.appender.CustomAppender.layout.ConversionPattern=[%t] %-5p %c %x - %m%n  Add config of the plugin or use default  log.max_message_size=${SW_GRPC_LOG_MAX_MESSAGE_SIZE:10485760} ","title":"Dependency the toolkit, such as using maven or gradle","url":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-log4j-1.x/"},{"content":" Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-log4j-2.x\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Print trace ID in your logs  Config the [%traceId] pattern in your log4j2.xml  \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout pattern=\u0026#34;%d [%traceId] %-5p %c{1}:%L - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;/Appenders\u0026gt;  Support log4j2 AsyncRoot , No additional configuration is required. Refer to the demo of log4j2.xml below. For details: Log4j2 Async Loggers  \u0026lt;Configuration\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout pattern=\u0026#34;%d [%traceId] %-5p %c{1}:%L - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;AsyncRoot level=\u0026#34;INFO\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Console\u0026#34;/\u0026gt; \u0026lt;/AsyncRoot\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;   Support log4j2 AsyncAppender , No additional configuration is required. Refer to the demo of log4j2.xml below.\nFor details: All Loggers Async\nLog4j-2.9 and higher require disruptor-3.3.4.jar or higher on the classpath. Prior to Log4j-2.9, disruptor-3.0.0.jar or higher was required. This is simplest to configure and gives the best performance. To make all loggers asynchronous, add the disruptor jar to the classpath and set the system property log4j2.contextSelector to org.apache.logging.log4j.core.async.AsyncLoggerContextSelector.\n\u0026lt;Configuration status=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;!-- Async Loggers will auto-flush in batches, so switch off immediateFlush. --\u0026gt; \u0026lt;RandomAccessFile name=\u0026#34;RandomAccessFile\u0026#34; fileName=\u0026#34;async.log\u0026#34; immediateFlush=\u0026#34;false\u0026#34; append=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;PatternLayout\u0026gt; \u0026lt;Pattern\u0026gt;%d %p %c{1.} [%t] [%traceId] %m %ex%n\u0026lt;/Pattern\u0026gt; \u0026lt;/PatternLayout\u0026gt; \u0026lt;/RandomAccessFile\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;Root level=\u0026#34;info\u0026#34; includeLocation=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;RandomAccessFile\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt; For details: Mixed Sync \u0026amp; Async\nLog4j-2.9 and higher require disruptor-3.3.4.jar or higher on the classpath. Prior to Log4j-2.9, disruptor-3.0.0.jar or higher was required. There is no need to set system property Log4jContextSelector to any value.\n\u0026lt;Configuration status=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;!-- Async Loggers will auto-flush in batches, so switch off immediateFlush. --\u0026gt; \u0026lt;RandomAccessFile name=\u0026#34;RandomAccessFile\u0026#34; fileName=\u0026#34;asyncWithLocation.log\u0026#34; immediateFlush=\u0026#34;false\u0026#34; append=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;PatternLayout\u0026gt; \u0026lt;Pattern\u0026gt;%d %p %class{1.} [%t] [%traceId] %location %m %ex%n\u0026lt;/Pattern\u0026gt; \u0026lt;/PatternLayout\u0026gt; \u0026lt;/RandomAccessFile\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;!-- pattern layout actually uses location, so we need to include it --\u0026gt; \u0026lt;AsyncLogger name=\u0026#34;com.foo.Bar\u0026#34; level=\u0026#34;trace\u0026#34; includeLocation=\u0026#34;true\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;RandomAccessFile\u0026#34;/\u0026gt; \u0026lt;/AsyncLogger\u0026gt; \u0026lt;Root level=\u0026#34;info\u0026#34; includeLocation=\u0026#34;true\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;RandomAccessFile\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;   Support log4j2 AsyncAppender, For details: Log4j2 AsyncAppender\n  \u0026lt;Configuration\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout pattern=\u0026#34;%d [%traceId] %-5p %c{1}:%L - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;Async name=\u0026#34;Async\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Console\u0026#34;/\u0026gt; \u0026lt;/Async\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;Root level=\u0026#34;INFO\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Async\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;  When you use -javaagent to active the SkyWalking tracer, log4j2 will output traceId, if it existed. If the tracer is inactive, the output will be TID: N/A.  Print SkyWalking context in your logs   Your only need to replace pattern %traceId with %sw_ctx.\n  When you use -javaagent to active the SkyWalking tracer, log4j2 will output SW_CTX: [$serviceName,$instanceName,$traceId,$traceSegmentId,$spanId], if it existed. If the tracer is inactive, the output will be SW_CTX: N/A.\n  gRPC reporter The gRPC report could forward the collected logs to SkyWalking OAP server, or SkyWalking Satellite sidecar. Trace id, segment id, and span id will attach to logs automatically. You don\u0026rsquo;t need to change the layout.\n Add GRPCLogClientAppender in log4j2.xml  \u0026lt;GRPCLogClientAppender name=\u0026#34;grpc-log\u0026#34;\u0026gt; \u0026lt;PatternLayout pattern=\u0026#34;%d{HH:mm:ss.SSS} [%t] %-5level %logger{36} - %msg%n\u0026#34;/\u0026gt; \u0026lt;/GRPCLogClientAppender\u0026gt;  Add config of the plugin or use default  log.max_message_size=${SW_GRPC_LOG_MAX_MESSAGE_SIZE:10485760}  Support -Dlog4j2.contextSelector=org.apache.logging.log4j.core.async.AsyncLoggerContextSelector in gRPC log report.  Transmitting un-formatted messages The log4j 2.x gRPC reporter supports transmitting logs as formatted or un-formatted. Transmitting formatted data is the default but can be disabled by adding the following to the agent config:\nplugin.toolkit.log.transmit_formatted=false The above will result in the content field being used for the log pattern with additional log tags of argument.0, argument.1, and so on representing each logged argument as well as an additional exception tag which is only present if a throwable is also logged.\nFor example, the following code:\nlog.info(\u0026#34;{} {} {}\u0026#34;, 1, 2, 3); Will result in:\n{ \u0026#34;content\u0026#34;: \u0026#34;{} {} {}\u0026#34;, \u0026#34;tags\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;argument.0\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.1\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.2\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;3\u0026#34; } ] } ","title":"Dependency the toolkit, such as using maven or gradle","url":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-log4j-2.x/"},{"content":" Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-meter\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; If you\u0026rsquo;re using Spring sleuth, you could use Spring Sleuth Setup at the OAP server.\n Counter API represents a single monotonically increasing counter, automatic collect data and report to backend.  import org.apache.skywalking.apm.toolkit.meter.MeterFactory; Counter counter = MeterFactory.counter(meterName).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).mode(Counter.Mode.INCREMENT).build(); counter.increment(1d);  MeterFactory.counter Create a new counter builder with the meter name. Counter.Builder.tag(String key, String value) Mark a tag key/value pair. Counter.Builder.mode(Counter.Mode mode) Change the counter mode, RATE mode means reporting rate to the backend. Counter.Builder.build() Build a new Counter which is collected and reported to the backend. Counter.increment(double count) Increment count to the Counter, It could be a positive value.   Gauge API represents a single numerical value.  import org.apache.skywalking.apm.toolkit.meter.MeterFactory; ThreadPoolExecutor threadPool = ...; Gauge gauge = MeterFactory.gauge(meterName, () -\u0026gt; threadPool.getActiveCount()).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).build();  MeterFactory.gauge(String name, Supplier\u0026lt;Double\u0026gt; getter) Create a new gauge builder with the meter name and supplier function, this function need to return a double value. Gauge.Builder.tag(String key, String value) Mark a tag key/value pair. Gauge.Builder.build() Build a new Gauge which is collected and reported to the backend.   Histogram API represents a summary sample observations with customize buckets.  import org.apache.skywalking.apm.toolkit.meter.MeterFactory; Histogram histogram = MeterFactory.histogram(\u0026#34;test\u0026#34;).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).steps(Arrays.asList(1, 5, 10)).minValue(0).build(); histogram.addValue(3);  MeterFactory.histogram(String name) Create a new histogram builder with the meter name. Histogram.Builder.tag(String key, String value) Mark a tag key/value pair. Histogram.Builder.steps(List\u0026lt;Double\u0026gt; steps) Set up the max values of every histogram buckets. Histogram.Builder.minValue(double value) Set up the minimal value of this histogram, default is 0. Histogram.Builder.build() Build a new Histogram which is collected and reported to the backend. Histogram.addValue(double value) Add value into the histogram, automatically analyze what bucket count needs to be increment. rule: count into [step1, step2).  ","title":"Dependency the toolkit, such as using maven or gradle","url":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-meter/"},{"content":" Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-trace\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  Use TraceContext.traceId() API to obtain traceId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;traceId\u0026#34;, TraceContext.traceId());  Use TraceContext.segmentId() API to obtain segmentId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;segmentId\u0026#34;, TraceContext.segmentId());  Use TraceContext.spanId() API to obtain spanId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;spanId\u0026#34;, TraceContext.spanId()); Sample codes only\n  Add @Trace to any method you want to trace. After that, you can see the span in the Stack.\n  Methods annotated with @Tag will try to tag the current active span with the given key (Tag#key()) and (Tag#value()), if there is no active span at all, this annotation takes no effect. @Tag can be repeated, and can be used in companion with @Trace, see examples below. The value of Tag is the same as what are supported in Customize Enhance Trace.\n  Add custom tag in the context of traced method, ActiveSpan.tag(\u0026quot;key\u0026quot;, \u0026quot;val\u0026quot;).\n  ActiveSpan.error() Mark the current span as error status.\n  ActiveSpan.error(String errorMsg) Mark the current span as error status with a message.\n  ActiveSpan.error(Throwable throwable) Mark the current span as error status with a Throwable.\n  ActiveSpan.debug(String debugMsg) Add a debug level log message in the current span.\n  ActiveSpan.info(String infoMsg) Add an info level log message in the current span.\n  ActiveSpan.setOperationName(String operationName) Customize an operation name.\n  ActiveSpan.tag(\u0026#34;my_tag\u0026#34;, \u0026#34;my_value\u0026#34;); ActiveSpan.error(); ActiveSpan.error(\u0026#34;Test-Error-Reason\u0026#34;); ActiveSpan.error(new RuntimeException(\u0026#34;Test-Error-Throwable\u0026#34;)); ActiveSpan.info(\u0026#34;Test-Info-Msg\u0026#34;); ActiveSpan.debug(\u0026#34;Test-debug-Msg\u0026#34;); /** * The codes below will generate a span, * and two types of tags, one type tag: keys are `tag1` and `tag2`, values are the passed-in parameters, respectively, the other type tag: keys are `username` and `age`, values are the return value in User, respectively */ @Trace @Tag(key = \u0026#34;tag1\u0026#34;, value = \u0026#34;arg[0]\u0026#34;) @Tag(key = \u0026#34;tag2\u0026#34;, value = \u0026#34;arg[1]\u0026#34;) @Tag(key = \u0026#34;username\u0026#34;, value = \u0026#34;returnedObj.username\u0026#34;) @Tag(key = \u0026#34;age\u0026#34;, value = \u0026#34;returnedObj.age\u0026#34;) public User methodYouWantToTrace(String param1, String param2) { // ActiveSpan.setOperationName(\u0026#34;Customize your own operation name, if this is an entry span, this would be an endpoint name\u0026#34;);  // ... }  Use TraceContext.putCorrelation() API to put custom data in tracing context.  Optional\u0026lt;String\u0026gt; previous = TraceContext.putCorrelation(\u0026#34;customKey\u0026#34;, \u0026#34;customValue\u0026#34;); CorrelationContext will remove the item when the value is null or empty.\n Use TraceContext.getCorrelation() API to get custom data.  Optional\u0026lt;String\u0026gt; value = TraceContext.getCorrelation(\u0026#34;customKey\u0026#34;); CorrelationContext configuration descriptions could be found in the agent configuration documentation, with correlation. as the prefix.\n","title":"Dependency the toolkit, such as using maven or gradle","url":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-trace/"},{"content":" Dependency the toolkit, such as using maven or gradle  OpenTracing (Deprecated) OpenTracing is a vendor-neutral standard for distributed tracing. It is a set of APIs that can be used to instrument, generate, collect, and report telemetry data for distributed systems. It is designed to be extensible so that new implementations can be created for new platforms or languages. It had been archived by the CNCF TOC. Learn more.\nSkyWalking community keeps the API compatible with 0.30.0 only. All further development will not be accepted.\n\u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-opentracing\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  Use our OpenTracing tracer implementation  Tracer tracer = new SkywalkingTracer(); Tracer.SpanBuilder spanBuilder = tracer.buildSpan(\u0026#34;/yourApplication/yourService\u0026#34;); ","title":"Dependency the toolkit, such as using maven or gradle","url":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/opentracing/"},{"content":" Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-log4j-1.x\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Print trace ID in your logs  Config a layout  log4j.appender.CONSOLE.layout=org.apache.skywalking.apm.toolkit.log.log4j.v1.x.TraceIdPatternLayout  set %T in layout.ConversionPattern ( In 2.0-2016, you should use %x, Why change? )  log4j.appender.CONSOLE.layout.ConversionPattern=%d [%T] %-5p %c{1}:%L - %m%n  When you use -javaagent to active the SkyWalking tracer, log4j will output traceId, if it existed. If the tracer is inactive, the output will be TID: N/A.  Print SkyWalking context in your logs   Your only need to replace pattern %T with %T{SW_CTX}.\n  When you use -javaagent to active the SkyWalking tracer, log4j will output SW_CTX: [$serviceName,$instanceName,$traceId,$traceSegmentId,$spanId], if it existed. If the tracer is inactive, the output will be SW_CTX: N/A.\n  gRPC reporter The gRPC report could forward the collected logs to SkyWalking OAP server, or SkyWalking Satellite sidecar. Trace id, segment id, and span id will attach to logs automatically. You don\u0026rsquo;t need to change the layout.\n Add GRPCLogClientAppender in log4j.properties  log4j.rootLogger=INFO,CustomAppender log4j.appender.CustomAppender=org.apache.skywalking.apm.toolkit.log.log4j.v1.x.log.GRPCLogClientAppender log4j.appender.CustomAppender.layout=org.apache.log4j.PatternLayout log4j.appender.CustomAppender.layout.ConversionPattern=[%t] %-5p %c %x - %m%n  Add config of the plugin or use default  log.max_message_size=${SW_GRPC_LOG_MAX_MESSAGE_SIZE:10485760} ","title":"Dependency the toolkit, such as using maven or gradle","url":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/application-toolkit-log4j-1.x/"},{"content":" Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-log4j-2.x\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Print trace ID in your logs  Config the [%traceId] pattern in your log4j2.xml  \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout pattern=\u0026#34;%d [%traceId] %-5p %c{1}:%L - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;/Appenders\u0026gt;  Support log4j2 AsyncRoot , No additional configuration is required. Refer to the demo of log4j2.xml below. For details: Log4j2 Async Loggers  \u0026lt;Configuration\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout pattern=\u0026#34;%d [%traceId] %-5p %c{1}:%L - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;AsyncRoot level=\u0026#34;INFO\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Console\u0026#34;/\u0026gt; \u0026lt;/AsyncRoot\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;   Support log4j2 AsyncAppender , No additional configuration is required. Refer to the demo of log4j2.xml below.\nFor details: All Loggers Async\nLog4j-2.9 and higher require disruptor-3.3.4.jar or higher on the classpath. Prior to Log4j-2.9, disruptor-3.0.0.jar or higher was required. This is simplest to configure and gives the best performance. To make all loggers asynchronous, add the disruptor jar to the classpath and set the system property log4j2.contextSelector to org.apache.logging.log4j.core.async.AsyncLoggerContextSelector.\n\u0026lt;Configuration status=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;!-- Async Loggers will auto-flush in batches, so switch off immediateFlush. --\u0026gt; \u0026lt;RandomAccessFile name=\u0026#34;RandomAccessFile\u0026#34; fileName=\u0026#34;async.log\u0026#34; immediateFlush=\u0026#34;false\u0026#34; append=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;PatternLayout\u0026gt; \u0026lt;Pattern\u0026gt;%d %p %c{1.} [%t] [%traceId] %m %ex%n\u0026lt;/Pattern\u0026gt; \u0026lt;/PatternLayout\u0026gt; \u0026lt;/RandomAccessFile\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;Root level=\u0026#34;info\u0026#34; includeLocation=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;RandomAccessFile\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt; For details: Mixed Sync \u0026amp; Async\nLog4j-2.9 and higher require disruptor-3.3.4.jar or higher on the classpath. Prior to Log4j-2.9, disruptor-3.0.0.jar or higher was required. There is no need to set system property Log4jContextSelector to any value.\n\u0026lt;Configuration status=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;!-- Async Loggers will auto-flush in batches, so switch off immediateFlush. --\u0026gt; \u0026lt;RandomAccessFile name=\u0026#34;RandomAccessFile\u0026#34; fileName=\u0026#34;asyncWithLocation.log\u0026#34; immediateFlush=\u0026#34;false\u0026#34; append=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;PatternLayout\u0026gt; \u0026lt;Pattern\u0026gt;%d %p %class{1.} [%t] [%traceId] %location %m %ex%n\u0026lt;/Pattern\u0026gt; \u0026lt;/PatternLayout\u0026gt; \u0026lt;/RandomAccessFile\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;!-- pattern layout actually uses location, so we need to include it --\u0026gt; \u0026lt;AsyncLogger name=\u0026#34;com.foo.Bar\u0026#34; level=\u0026#34;trace\u0026#34; includeLocation=\u0026#34;true\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;RandomAccessFile\u0026#34;/\u0026gt; \u0026lt;/AsyncLogger\u0026gt; \u0026lt;Root level=\u0026#34;info\u0026#34; includeLocation=\u0026#34;true\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;RandomAccessFile\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;   Support log4j2 AsyncAppender, For details: Log4j2 AsyncAppender\n  \u0026lt;Configuration\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout pattern=\u0026#34;%d [%traceId] %-5p %c{1}:%L - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;Async name=\u0026#34;Async\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Console\u0026#34;/\u0026gt; \u0026lt;/Async\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;Root level=\u0026#34;INFO\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Async\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;  When you use -javaagent to active the SkyWalking tracer, log4j2 will output traceId, if it existed. If the tracer is inactive, the output will be TID: N/A.  Print SkyWalking context in your logs   Your only need to replace pattern %traceId with %sw_ctx.\n  When you use -javaagent to active the SkyWalking tracer, log4j2 will output SW_CTX: [$serviceName,$instanceName,$traceId,$traceSegmentId,$spanId], if it existed. If the tracer is inactive, the output will be SW_CTX: N/A.\n  gRPC reporter The gRPC report could forward the collected logs to SkyWalking OAP server, or SkyWalking Satellite sidecar. Trace id, segment id, and span id will attach to logs automatically. You don\u0026rsquo;t need to change the layout.\n Add GRPCLogClientAppender in log4j2.xml  \u0026lt;GRPCLogClientAppender name=\u0026#34;grpc-log\u0026#34;\u0026gt; \u0026lt;PatternLayout pattern=\u0026#34;%d{HH:mm:ss.SSS} [%t] %-5level %logger{36} - %msg%n\u0026#34;/\u0026gt; \u0026lt;/GRPCLogClientAppender\u0026gt;  Add config of the plugin or use default  log.max_message_size=${SW_GRPC_LOG_MAX_MESSAGE_SIZE:10485760}  Support -Dlog4j2.contextSelector=org.apache.logging.log4j.core.async.AsyncLoggerContextSelector in gRPC log report.  Transmitting un-formatted messages The log4j 2.x gRPC reporter supports transmitting logs as formatted or un-formatted. Transmitting formatted data is the default but can be disabled by adding the following to the agent config:\nplugin.toolkit.log.transmit_formatted=false The above will result in the content field being used for the log pattern with additional log tags of argument.0, argument.1, and so on representing each logged argument as well as an additional exception tag which is only present if a throwable is also logged.\nFor example, the following code:\nlog.info(\u0026#34;{} {} {}\u0026#34;, 1, 2, 3); Will result in:\n{ \u0026#34;content\u0026#34;: \u0026#34;{} {} {}\u0026#34;, \u0026#34;tags\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;argument.0\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.1\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.2\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;3\u0026#34; } ] } ","title":"Dependency the toolkit, such as using maven or gradle","url":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/application-toolkit-log4j-2.x/"},{"content":" Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-meter\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; If you\u0026rsquo;re using Spring sleuth, you could use Spring Sleuth Setup at the OAP server.\n Counter API represents a single monotonically increasing counter, automatic collect data and report to backend.  import org.apache.skywalking.apm.toolkit.meter.MeterFactory; Counter counter = MeterFactory.counter(meterName).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).mode(Counter.Mode.INCREMENT).build(); counter.increment(1d);  MeterFactory.counter Create a new counter builder with the meter name. Counter.Builder.tag(String key, String value) Mark a tag key/value pair. Counter.Builder.mode(Counter.Mode mode) Change the counter mode, RATE mode means reporting rate to the backend. Counter.Builder.build() Build a new Counter which is collected and reported to the backend. Counter.increment(double count) Increment count to the Counter, It could be a positive value.   Gauge API represents a single numerical value.  import org.apache.skywalking.apm.toolkit.meter.MeterFactory; ThreadPoolExecutor threadPool = ...; Gauge gauge = MeterFactory.gauge(meterName, () -\u0026gt; threadPool.getActiveCount()).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).build();  MeterFactory.gauge(String name, Supplier\u0026lt;Double\u0026gt; getter) Create a new gauge builder with the meter name and supplier function, this function need to return a double value. Gauge.Builder.tag(String key, String value) Mark a tag key/value pair. Gauge.Builder.build() Build a new Gauge which is collected and reported to the backend.   Histogram API represents a summary sample observations with customize buckets.  import org.apache.skywalking.apm.toolkit.meter.MeterFactory; Histogram histogram = MeterFactory.histogram(\u0026#34;test\u0026#34;).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).steps(Arrays.asList(1, 5, 10)).minValue(0).build(); histogram.addValue(3);  MeterFactory.histogram(String name) Create a new histogram builder with the meter name. Histogram.Builder.tag(String key, String value) Mark a tag key/value pair. Histogram.Builder.steps(List\u0026lt;Double\u0026gt; steps) Set up the max values of every histogram buckets. Histogram.Builder.minValue(double value) Set up the minimal value of this histogram, default is 0. Histogram.Builder.build() Build a new Histogram which is collected and reported to the backend. Histogram.addValue(double value) Add value into the histogram, automatically analyze what bucket count needs to be increment. rule: count into [step1, step2).  ","title":"Dependency the toolkit, such as using maven or gradle","url":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/application-toolkit-meter/"},{"content":" Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-trace\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  Use TraceContext.traceId() API to obtain traceId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;traceId\u0026#34;, TraceContext.traceId());  Use TraceContext.segmentId() API to obtain segmentId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;segmentId\u0026#34;, TraceContext.segmentId());  Use TraceContext.spanId() API to obtain spanId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;spanId\u0026#34;, TraceContext.spanId()); Sample codes only\n  Add @Trace to any method you want to trace. After that, you can see the span in the Stack.\n  Methods annotated with @Tag will try to tag the current active span with the given key (Tag#key()) and (Tag#value()), if there is no active span at all, this annotation takes no effect. @Tag can be repeated, and can be used in companion with @Trace, see examples below. The value of Tag is the same as what are supported in Customize Enhance Trace.\n  Add custom tag in the context of traced method, ActiveSpan.tag(\u0026quot;key\u0026quot;, \u0026quot;val\u0026quot;).\n  ActiveSpan.error() Mark the current span as error status.\n  ActiveSpan.error(String errorMsg) Mark the current span as error status with a message.\n  ActiveSpan.error(Throwable throwable) Mark the current span as error status with a Throwable.\n  ActiveSpan.debug(String debugMsg) Add a debug level log message in the current span.\n  ActiveSpan.info(String infoMsg) Add an info level log message in the current span.\n  ActiveSpan.setOperationName(String operationName) Customize an operation name.\n  ActiveSpan.tag(\u0026#34;my_tag\u0026#34;, \u0026#34;my_value\u0026#34;); ActiveSpan.error(); ActiveSpan.error(\u0026#34;Test-Error-Reason\u0026#34;); ActiveSpan.error(new RuntimeException(\u0026#34;Test-Error-Throwable\u0026#34;)); ActiveSpan.info(\u0026#34;Test-Info-Msg\u0026#34;); ActiveSpan.debug(\u0026#34;Test-debug-Msg\u0026#34;); /** * The codes below will generate a span, * and two types of tags, one type tag: keys are `tag1` and `tag2`, values are the passed-in parameters, respectively, the other type tag: keys are `username` and `age`, values are the return value in User, respectively */ @Trace @Tag(key = \u0026#34;tag1\u0026#34;, value = \u0026#34;arg[0]\u0026#34;) @Tag(key = \u0026#34;tag2\u0026#34;, value = \u0026#34;arg[1]\u0026#34;) @Tag(key = \u0026#34;username\u0026#34;, value = \u0026#34;returnedObj.username\u0026#34;) @Tag(key = \u0026#34;age\u0026#34;, value = \u0026#34;returnedObj.age\u0026#34;) public User methodYouWantToTrace(String param1, String param2) { // ActiveSpan.setOperationName(\u0026#34;Customize your own operation name, if this is an entry span, this would be an endpoint name\u0026#34;);  // ... }  Use TraceContext.putCorrelation() API to put custom data in tracing context.  Optional\u0026lt;String\u0026gt; previous = TraceContext.putCorrelation(\u0026#34;customKey\u0026#34;, \u0026#34;customValue\u0026#34;); CorrelationContext will remove the item when the value is null or empty.\n Use TraceContext.getCorrelation() API to get custom data.  Optional\u0026lt;String\u0026gt; value = TraceContext.getCorrelation(\u0026#34;customKey\u0026#34;); CorrelationContext configuration descriptions could be found in the agent configuration documentation, with correlation. as the prefix.\n","title":"Dependency the toolkit, such as using maven or gradle","url":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/application-toolkit-trace/"},{"content":" Dependency the toolkit, such as using maven or gradle  OpenTracing (Deprecated) OpenTracing is a vendor-neutral standard for distributed tracing. It is a set of APIs that can be used to instrument, generate, collect, and report telemetry data for distributed systems. It is designed to be extensible so that new implementations can be created for new platforms or languages. It had been archived by the CNCF TOC. Learn more.\nSkyWalking community keeps the API compatible with 0.30.0 only. All further development will not be accepted.\n\u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-opentracing\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  Use our OpenTracing tracer implementation  Tracer tracer = new SkywalkingTracer(); Tracer.SpanBuilder spanBuilder = tracer.buildSpan(\u0026#34;/yourApplication/yourService\u0026#34;); ","title":"Dependency the toolkit, such as using maven or gradle","url":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/opentracing/"},{"content":" Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-log4j-1.x\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Print trace ID in your logs  Config a layout  log4j.appender.CONSOLE.layout=org.apache.skywalking.apm.toolkit.log.log4j.v1.x.TraceIdPatternLayout  set %T in layout.ConversionPattern ( In 2.0-2016, you should use %x, Why change? )  log4j.appender.CONSOLE.layout.ConversionPattern=%d [%T] %-5p %c{1}:%L - %m%n  When you use -javaagent to active the SkyWalking tracer, log4j will output traceId, if it existed. If the tracer is inactive, the output will be TID: N/A.  Print SkyWalking context in your logs   Your only need to replace pattern %T with %T{SW_CTX}.\n  When you use -javaagent to active the SkyWalking tracer, log4j will output SW_CTX: [$serviceName,$instanceName,$traceId,$traceSegmentId,$spanId], if it existed. If the tracer is inactive, the output will be SW_CTX: N/A.\n  gRPC reporter The gRPC report could forward the collected logs to SkyWalking OAP server, or SkyWalking Satellite sidecar. Trace id, segment id, and span id will attach to logs automatically. You don\u0026rsquo;t need to change the layout.\n Add GRPCLogClientAppender in log4j.properties  log4j.rootLogger=INFO,CustomAppender log4j.appender.CustomAppender=org.apache.skywalking.apm.toolkit.log.log4j.v1.x.log.GRPCLogClientAppender log4j.appender.CustomAppender.layout=org.apache.log4j.PatternLayout log4j.appender.CustomAppender.layout.ConversionPattern=[%t] %-5p %c %x - %m%n  Add config of the plugin or use default  log.max_message_size=${SW_GRPC_LOG_MAX_MESSAGE_SIZE:10485760} ","title":"Dependency the toolkit, such as using maven or gradle","url":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/application-toolkit-log4j-1.x/"},{"content":" Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-log4j-2.x\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Print trace ID in your logs  Config the [%traceId] pattern in your log4j2.xml  \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout pattern=\u0026#34;%d [%traceId] %-5p %c{1}:%L - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;/Appenders\u0026gt;  Support log4j2 AsyncRoot , No additional configuration is required. Refer to the demo of log4j2.xml below. For details: Log4j2 Async Loggers  \u0026lt;Configuration\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout pattern=\u0026#34;%d [%traceId] %-5p %c{1}:%L - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;AsyncRoot level=\u0026#34;INFO\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Console\u0026#34;/\u0026gt; \u0026lt;/AsyncRoot\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;   Support log4j2 AsyncAppender , No additional configuration is required. Refer to the demo of log4j2.xml below.\nFor details: All Loggers Async\nLog4j-2.9 and higher require disruptor-3.3.4.jar or higher on the classpath. Prior to Log4j-2.9, disruptor-3.0.0.jar or higher was required. This is simplest to configure and gives the best performance. To make all loggers asynchronous, add the disruptor jar to the classpath and set the system property log4j2.contextSelector to org.apache.logging.log4j.core.async.AsyncLoggerContextSelector.\n\u0026lt;Configuration status=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;!-- Async Loggers will auto-flush in batches, so switch off immediateFlush. --\u0026gt; \u0026lt;RandomAccessFile name=\u0026#34;RandomAccessFile\u0026#34; fileName=\u0026#34;async.log\u0026#34; immediateFlush=\u0026#34;false\u0026#34; append=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;PatternLayout\u0026gt; \u0026lt;Pattern\u0026gt;%d %p %c{1.} [%t] [%traceId] %m %ex%n\u0026lt;/Pattern\u0026gt; \u0026lt;/PatternLayout\u0026gt; \u0026lt;/RandomAccessFile\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;Root level=\u0026#34;info\u0026#34; includeLocation=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;RandomAccessFile\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt; For details: Mixed Sync \u0026amp; Async\nLog4j-2.9 and higher require disruptor-3.3.4.jar or higher on the classpath. Prior to Log4j-2.9, disruptor-3.0.0.jar or higher was required. There is no need to set system property Log4jContextSelector to any value.\n\u0026lt;Configuration status=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;!-- Async Loggers will auto-flush in batches, so switch off immediateFlush. --\u0026gt; \u0026lt;RandomAccessFile name=\u0026#34;RandomAccessFile\u0026#34; fileName=\u0026#34;asyncWithLocation.log\u0026#34; immediateFlush=\u0026#34;false\u0026#34; append=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;PatternLayout\u0026gt; \u0026lt;Pattern\u0026gt;%d %p %class{1.} [%t] [%traceId] %location %m %ex%n\u0026lt;/Pattern\u0026gt; \u0026lt;/PatternLayout\u0026gt; \u0026lt;/RandomAccessFile\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;!-- pattern layout actually uses location, so we need to include it --\u0026gt; \u0026lt;AsyncLogger name=\u0026#34;com.foo.Bar\u0026#34; level=\u0026#34;trace\u0026#34; includeLocation=\u0026#34;true\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;RandomAccessFile\u0026#34;/\u0026gt; \u0026lt;/AsyncLogger\u0026gt; \u0026lt;Root level=\u0026#34;info\u0026#34; includeLocation=\u0026#34;true\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;RandomAccessFile\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;   Support log4j2 AsyncAppender, For details: Log4j2 AsyncAppender\n  \u0026lt;Configuration\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout pattern=\u0026#34;%d [%traceId] %-5p %c{1}:%L - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;Async name=\u0026#34;Async\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Console\u0026#34;/\u0026gt; \u0026lt;/Async\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;Root level=\u0026#34;INFO\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Async\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;  When you use -javaagent to active the SkyWalking tracer, log4j2 will output traceId, if it existed. If the tracer is inactive, the output will be TID: N/A.  Print SkyWalking context in your logs   Your only need to replace pattern %traceId with %sw_ctx.\n  When you use -javaagent to active the SkyWalking tracer, log4j2 will output SW_CTX: [$serviceName,$instanceName,$traceId,$traceSegmentId,$spanId], if it existed. If the tracer is inactive, the output will be SW_CTX: N/A.\n  gRPC reporter The gRPC report could forward the collected logs to SkyWalking OAP server, or SkyWalking Satellite sidecar. Trace id, segment id, and span id will attach to logs automatically. You don\u0026rsquo;t need to change the layout.\n Add GRPCLogClientAppender in log4j2.xml  \u0026lt;GRPCLogClientAppender name=\u0026#34;grpc-log\u0026#34;\u0026gt; \u0026lt;PatternLayout pattern=\u0026#34;%d{HH:mm:ss.SSS} [%t] %-5level %logger{36} - %msg%n\u0026#34;/\u0026gt; \u0026lt;/GRPCLogClientAppender\u0026gt;  Add config of the plugin or use default  log.max_message_size=${SW_GRPC_LOG_MAX_MESSAGE_SIZE:10485760}  Support -Dlog4j2.contextSelector=org.apache.logging.log4j.core.async.AsyncLoggerContextSelector in gRPC log report.  Transmitting un-formatted messages The log4j 2.x gRPC reporter supports transmitting logs as formatted or un-formatted. Transmitting formatted data is the default but can be disabled by adding the following to the agent config:\nplugin.toolkit.log.transmit_formatted=false The above will result in the content field being used for the log pattern with additional log tags of argument.0, argument.1, and so on representing each logged argument as well as an additional exception tag which is only present if a throwable is also logged.\nFor example, the following code:\nlog.info(\u0026#34;{} {} {}\u0026#34;, 1, 2, 3); Will result in:\n{ \u0026#34;content\u0026#34;: \u0026#34;{} {} {}\u0026#34;, \u0026#34;tags\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;argument.0\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.1\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.2\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;3\u0026#34; } ] } ","title":"Dependency the toolkit, such as using maven or gradle","url":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/application-toolkit-log4j-2.x/"},{"content":" Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-meter\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; If you\u0026rsquo;re using Spring sleuth, you could use Spring Sleuth Setup at the OAP server.\n Counter API represents a single monotonically increasing counter, automatic collect data and report to backend.  import org.apache.skywalking.apm.toolkit.meter.MeterFactory; Counter counter = MeterFactory.counter(meterName).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).mode(Counter.Mode.INCREMENT).build(); counter.increment(1d);  MeterFactory.counter Create a new counter builder with the meter name. Counter.Builder.tag(String key, String value) Mark a tag key/value pair. Counter.Builder.mode(Counter.Mode mode) Change the counter mode, RATE mode means reporting rate to the backend. Counter.Builder.build() Build a new Counter which is collected and reported to the backend. Counter.increment(double count) Increment count to the Counter, It could be a positive value.   Gauge API represents a single numerical value.  import org.apache.skywalking.apm.toolkit.meter.MeterFactory; ThreadPoolExecutor threadPool = ...; Gauge gauge = MeterFactory.gauge(meterName, () -\u0026gt; threadPool.getActiveCount()).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).build();  MeterFactory.gauge(String name, Supplier\u0026lt;Double\u0026gt; getter) Create a new gauge builder with the meter name and supplier function, this function need to return a double value. Gauge.Builder.tag(String key, String value) Mark a tag key/value pair. Gauge.Builder.build() Build a new Gauge which is collected and reported to the backend.   Histogram API represents a summary sample observations with customize buckets.  import org.apache.skywalking.apm.toolkit.meter.MeterFactory; Histogram histogram = MeterFactory.histogram(\u0026#34;test\u0026#34;).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).steps(Arrays.asList(1, 5, 10)).minValue(0).build(); histogram.addValue(3);  MeterFactory.histogram(String name) Create a new histogram builder with the meter name. Histogram.Builder.tag(String key, String value) Mark a tag key/value pair. Histogram.Builder.steps(List\u0026lt;Double\u0026gt; steps) Set up the max values of every histogram buckets. Histogram.Builder.minValue(double value) Set up the minimal value of this histogram, default is 0. Histogram.Builder.build() Build a new Histogram which is collected and reported to the backend. Histogram.addValue(double value) Add value into the histogram, automatically analyze what bucket count needs to be increment. rule: count into [step1, step2).  ","title":"Dependency the toolkit, such as using maven or gradle","url":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/application-toolkit-meter/"},{"content":" Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-trace\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  Use TraceContext.traceId() API to obtain traceId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;traceId\u0026#34;, TraceContext.traceId());  Use TraceContext.segmentId() API to obtain segmentId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;segmentId\u0026#34;, TraceContext.segmentId());  Use TraceContext.spanId() API to obtain spanId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;spanId\u0026#34;, TraceContext.spanId()); Sample codes only\n  Add @Trace to any method you want to trace. After that, you can see the span in the Stack.\n  Methods annotated with @Tag will try to tag the current active span with the given key (Tag#key()) and (Tag#value()), if there is no active span at all, this annotation takes no effect. @Tag can be repeated, and can be used in companion with @Trace, see examples below. The value of Tag is the same as what are supported in Customize Enhance Trace.\n  Add custom tag in the context of traced method, ActiveSpan.tag(\u0026quot;key\u0026quot;, \u0026quot;val\u0026quot;).\n  ActiveSpan.error() Mark the current span as error status.\n  ActiveSpan.error(String errorMsg) Mark the current span as error status with a message.\n  ActiveSpan.error(Throwable throwable) Mark the current span as error status with a Throwable.\n  ActiveSpan.debug(String debugMsg) Add a debug level log message in the current span.\n  ActiveSpan.info(String infoMsg) Add an info level log message in the current span.\n  ActiveSpan.setOperationName(String operationName) Customize an operation name.\n  ActiveSpan.tag(\u0026#34;my_tag\u0026#34;, \u0026#34;my_value\u0026#34;); ActiveSpan.error(); ActiveSpan.error(\u0026#34;Test-Error-Reason\u0026#34;); ActiveSpan.error(new RuntimeException(\u0026#34;Test-Error-Throwable\u0026#34;)); ActiveSpan.info(\u0026#34;Test-Info-Msg\u0026#34;); ActiveSpan.debug(\u0026#34;Test-debug-Msg\u0026#34;); /** * The codes below will generate a span, * and two types of tags, one type tag: keys are `tag1` and `tag2`, values are the passed-in parameters, respectively, the other type tag: keys are `username` and `age`, values are the return value in User, respectively */ @Trace @Tag(key = \u0026#34;tag1\u0026#34;, value = \u0026#34;arg[0]\u0026#34;) @Tag(key = \u0026#34;tag2\u0026#34;, value = \u0026#34;arg[1]\u0026#34;) @Tag(key = \u0026#34;username\u0026#34;, value = \u0026#34;returnedObj.username\u0026#34;) @Tag(key = \u0026#34;age\u0026#34;, value = \u0026#34;returnedObj.age\u0026#34;) public User methodYouWantToTrace(String param1, String param2) { // ActiveSpan.setOperationName(\u0026#34;Customize your own operation name, if this is an entry span, this would be an endpoint name\u0026#34;);  // ... }  Use TraceContext.putCorrelation() API to put custom data in tracing context.  Optional\u0026lt;String\u0026gt; previous = TraceContext.putCorrelation(\u0026#34;customKey\u0026#34;, \u0026#34;customValue\u0026#34;); CorrelationContext will remove the item when the value is null or empty.\n Use TraceContext.getCorrelation() API to get custom data.  Optional\u0026lt;String\u0026gt; value = TraceContext.getCorrelation(\u0026#34;customKey\u0026#34;); CorrelationContext configuration descriptions could be found in the agent configuration documentation, with correlation. as the prefix.\n","title":"Dependency the toolkit, such as using maven or gradle","url":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/application-toolkit-trace/"},{"content":" Dependency the toolkit, such as using maven or gradle  OpenTracing (Deprecated) OpenTracing is a vendor-neutral standard for distributed tracing. It is a set of APIs that can be used to instrument, generate, collect, and report telemetry data for distributed systems. It is designed to be extensible so that new implementations can be created for new platforms or languages. It had been archived by the CNCF TOC. Learn more.\nSkyWalking community keeps the API compatible with 0.30.0 only. All further development will not be accepted.\n\u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-opentracing\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  Use our OpenTracing tracer implementation  Tracer tracer = new SkywalkingTracer(); Tracer.SpanBuilder spanBuilder = tracer.buildSpan(\u0026#34;/yourApplication/yourService\u0026#34;); ","title":"Dependency the toolkit, such as using maven or gradle","url":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/opentracing/"},{"content":" Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-log4j-1.x\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Print trace ID in your logs  Config a layout  log4j.appender.CONSOLE.layout=org.apache.skywalking.apm.toolkit.log.log4j.v1.x.TraceIdPatternLayout  set %T in layout.ConversionPattern ( In 2.0-2016, you should use %x, Why change? )  log4j.appender.CONSOLE.layout.ConversionPattern=%d [%T] %-5p %c{1}:%L - %m%n  When you use -javaagent to active the SkyWalking tracer, log4j will output traceId, if it existed. If the tracer is inactive, the output will be TID: N/A.  Print SkyWalking context in your logs   Your only need to replace pattern %T with %T{SW_CTX}.\n  When you use -javaagent to active the SkyWalking tracer, log4j will output SW_CTX: [$serviceName,$instanceName,$traceId,$traceSegmentId,$spanId], if it existed. If the tracer is inactive, the output will be SW_CTX: N/A.\n  gRPC reporter The gRPC report could forward the collected logs to SkyWalking OAP server, or SkyWalking Satellite sidecar. Trace id, segment id, and span id will attach to logs automatically. You don\u0026rsquo;t need to change the layout.\n Add GRPCLogClientAppender in log4j.properties  log4j.rootLogger=INFO,CustomAppender log4j.appender.CustomAppender=org.apache.skywalking.apm.toolkit.log.log4j.v1.x.log.GRPCLogClientAppender log4j.appender.CustomAppender.layout=org.apache.log4j.PatternLayout log4j.appender.CustomAppender.layout.ConversionPattern=[%t] %-5p %c %x - %m%n  Add config of the plugin or use default  log.max_message_size=${SW_GRPC_LOG_MAX_MESSAGE_SIZE:10485760} ","title":"Dependency the toolkit, such as using maven or gradle","url":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/application-toolkit-log4j-1.x/"},{"content":" Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-log4j-2.x\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Print trace ID in your logs  Config the [%traceId] pattern in your log4j2.xml  \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout pattern=\u0026#34;%d [%traceId] %-5p %c{1}:%L - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;/Appenders\u0026gt;  Support log4j2 AsyncRoot , No additional configuration is required. Refer to the demo of log4j2.xml below. For details: Log4j2 Async Loggers  \u0026lt;Configuration\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout pattern=\u0026#34;%d [%traceId] %-5p %c{1}:%L - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;AsyncRoot level=\u0026#34;INFO\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Console\u0026#34;/\u0026gt; \u0026lt;/AsyncRoot\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;   Support log4j2 AsyncAppender , No additional configuration is required. Refer to the demo of log4j2.xml below.\nFor details: All Loggers Async\nLog4j-2.9 and higher require disruptor-3.3.4.jar or higher on the classpath. Prior to Log4j-2.9, disruptor-3.0.0.jar or higher was required. This is simplest to configure and gives the best performance. To make all loggers asynchronous, add the disruptor jar to the classpath and set the system property log4j2.contextSelector to org.apache.logging.log4j.core.async.AsyncLoggerContextSelector.\n\u0026lt;Configuration status=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;!-- Async Loggers will auto-flush in batches, so switch off immediateFlush. --\u0026gt; \u0026lt;RandomAccessFile name=\u0026#34;RandomAccessFile\u0026#34; fileName=\u0026#34;async.log\u0026#34; immediateFlush=\u0026#34;false\u0026#34; append=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;PatternLayout\u0026gt; \u0026lt;Pattern\u0026gt;%d %p %c{1.} [%t] [%traceId] %m %ex%n\u0026lt;/Pattern\u0026gt; \u0026lt;/PatternLayout\u0026gt; \u0026lt;/RandomAccessFile\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;Root level=\u0026#34;info\u0026#34; includeLocation=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;RandomAccessFile\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt; For details: Mixed Sync \u0026amp; Async\nLog4j-2.9 and higher require disruptor-3.3.4.jar or higher on the classpath. Prior to Log4j-2.9, disruptor-3.0.0.jar or higher was required. There is no need to set system property Log4jContextSelector to any value.\n\u0026lt;Configuration status=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;!-- Async Loggers will auto-flush in batches, so switch off immediateFlush. --\u0026gt; \u0026lt;RandomAccessFile name=\u0026#34;RandomAccessFile\u0026#34; fileName=\u0026#34;asyncWithLocation.log\u0026#34; immediateFlush=\u0026#34;false\u0026#34; append=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;PatternLayout\u0026gt; \u0026lt;Pattern\u0026gt;%d %p %class{1.} [%t] [%traceId] %location %m %ex%n\u0026lt;/Pattern\u0026gt; \u0026lt;/PatternLayout\u0026gt; \u0026lt;/RandomAccessFile\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;!-- pattern layout actually uses location, so we need to include it --\u0026gt; \u0026lt;AsyncLogger name=\u0026#34;com.foo.Bar\u0026#34; level=\u0026#34;trace\u0026#34; includeLocation=\u0026#34;true\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;RandomAccessFile\u0026#34;/\u0026gt; \u0026lt;/AsyncLogger\u0026gt; \u0026lt;Root level=\u0026#34;info\u0026#34; includeLocation=\u0026#34;true\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;RandomAccessFile\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;   Support log4j2 AsyncAppender, For details: Log4j2 AsyncAppender\n  \u0026lt;Configuration\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout pattern=\u0026#34;%d [%traceId] %-5p %c{1}:%L - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;Async name=\u0026#34;Async\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Console\u0026#34;/\u0026gt; \u0026lt;/Async\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;Root level=\u0026#34;INFO\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Async\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;  When you use -javaagent to active the SkyWalking tracer, log4j2 will output traceId, if it existed. If the tracer is inactive, the output will be TID: N/A.  Print SkyWalking context in your logs   Your only need to replace pattern %traceId with %sw_ctx.\n  When you use -javaagent to active the SkyWalking tracer, log4j2 will output SW_CTX: [$serviceName,$instanceName,$traceId,$traceSegmentId,$spanId], if it existed. If the tracer is inactive, the output will be SW_CTX: N/A.\n  gRPC reporter The gRPC report could forward the collected logs to SkyWalking OAP server, or SkyWalking Satellite sidecar. Trace id, segment id, and span id will attach to logs automatically. You don\u0026rsquo;t need to change the layout.\n Add GRPCLogClientAppender in log4j2.xml  \u0026lt;GRPCLogClientAppender name=\u0026#34;grpc-log\u0026#34;\u0026gt; \u0026lt;PatternLayout pattern=\u0026#34;%d{HH:mm:ss.SSS} [%t] %-5level %logger{36} - %msg%n\u0026#34;/\u0026gt; \u0026lt;/GRPCLogClientAppender\u0026gt;  Add config of the plugin or use default  log.max_message_size=${SW_GRPC_LOG_MAX_MESSAGE_SIZE:10485760}  Support -Dlog4j2.contextSelector=org.apache.logging.log4j.core.async.AsyncLoggerContextSelector in gRPC log report.  Transmitting un-formatted messages The log4j 2.x gRPC reporter supports transmitting logs as formatted or un-formatted. Transmitting formatted data is the default but can be disabled by adding the following to the agent config:\nplugin.toolkit.log.transmit_formatted=false The above will result in the content field being used for the log pattern with additional log tags of argument.0, argument.1, and so on representing each logged argument as well as an additional exception tag which is only present if a throwable is also logged.\nFor example, the following code:\nlog.info(\u0026#34;{} {} {}\u0026#34;, 1, 2, 3); Will result in:\n{ \u0026#34;content\u0026#34;: \u0026#34;{} {} {}\u0026#34;, \u0026#34;tags\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;argument.0\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.1\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.2\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;3\u0026#34; } ] } ","title":"Dependency the toolkit, such as using maven or gradle","url":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/application-toolkit-log4j-2.x/"},{"content":" Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-meter\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; If you\u0026rsquo;re using Spring sleuth, you could use Spring Sleuth Setup at the OAP server.\n Counter API represents a single monotonically increasing counter, automatic collect data and report to backend.  import org.apache.skywalking.apm.toolkit.meter.MeterFactory; Counter counter = MeterFactory.counter(meterName).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).mode(Counter.Mode.INCREMENT).build(); counter.increment(1d);  MeterFactory.counter Create a new counter builder with the meter name. Counter.Builder.tag(String key, String value) Mark a tag key/value pair. Counter.Builder.mode(Counter.Mode mode) Change the counter mode, RATE mode means reporting rate to the backend. Counter.Builder.build() Build a new Counter which is collected and reported to the backend. Counter.increment(double count) Increment count to the Counter, It could be a positive value.   Gauge API represents a single numerical value.  import org.apache.skywalking.apm.toolkit.meter.MeterFactory; ThreadPoolExecutor threadPool = ...; Gauge gauge = MeterFactory.gauge(meterName, () -\u0026gt; threadPool.getActiveCount()).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).build();  MeterFactory.gauge(String name, Supplier\u0026lt;Double\u0026gt; getter) Create a new gauge builder with the meter name and supplier function, this function need to return a double value. Gauge.Builder.tag(String key, String value) Mark a tag key/value pair. Gauge.Builder.build() Build a new Gauge which is collected and reported to the backend.   Histogram API represents a summary sample observations with customize buckets.  import org.apache.skywalking.apm.toolkit.meter.MeterFactory; Histogram histogram = MeterFactory.histogram(\u0026#34;test\u0026#34;).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).steps(Arrays.asList(1, 5, 10)).minValue(0).build(); histogram.addValue(3);  MeterFactory.histogram(String name) Create a new histogram builder with the meter name. Histogram.Builder.tag(String key, String value) Mark a tag key/value pair. Histogram.Builder.steps(List\u0026lt;Double\u0026gt; steps) Set up the max values of every histogram buckets. Histogram.Builder.minValue(double value) Set up the minimal value of this histogram, default is 0. Histogram.Builder.build() Build a new Histogram which is collected and reported to the backend. Histogram.addValue(double value) Add value into the histogram, automatically analyze what bucket count needs to be increment. rule: count into [step1, step2).  ","title":"Dependency the toolkit, such as using maven or gradle","url":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/application-toolkit-meter/"},{"content":" Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-trace\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  Use TraceContext.traceId() API to obtain traceId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;traceId\u0026#34;, TraceContext.traceId());  Use TraceContext.segmentId() API to obtain segmentId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;segmentId\u0026#34;, TraceContext.segmentId());  Use TraceContext.spanId() API to obtain spanId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;spanId\u0026#34;, TraceContext.spanId()); Sample codes only\n  Add @Trace to any method you want to trace. After that, you can see the span in the Stack.\n  Methods annotated with @Tag will try to tag the current active span with the given key (Tag#key()) and (Tag#value()), if there is no active span at all, this annotation takes no effect. @Tag can be repeated, and can be used in companion with @Trace, see examples below. The value of Tag is the same as what are supported in Customize Enhance Trace.\n  Add custom tag in the context of traced method, ActiveSpan.tag(\u0026quot;key\u0026quot;, \u0026quot;val\u0026quot;).\n  ActiveSpan.error() Mark the current span as error status.\n  ActiveSpan.error(String errorMsg) Mark the current span as error status with a message.\n  ActiveSpan.error(Throwable throwable) Mark the current span as error status with a Throwable.\n  ActiveSpan.debug(String debugMsg) Add a debug level log message in the current span.\n  ActiveSpan.info(String infoMsg) Add an info level log message in the current span.\n  ActiveSpan.setOperationName(String operationName) Customize an operation name.\n  ActiveSpan.tag(\u0026#34;my_tag\u0026#34;, \u0026#34;my_value\u0026#34;); ActiveSpan.error(); ActiveSpan.error(\u0026#34;Test-Error-Reason\u0026#34;); ActiveSpan.error(new RuntimeException(\u0026#34;Test-Error-Throwable\u0026#34;)); ActiveSpan.info(\u0026#34;Test-Info-Msg\u0026#34;); ActiveSpan.debug(\u0026#34;Test-debug-Msg\u0026#34;); /** * The codes below will generate a span, * and two types of tags, one type tag: keys are `tag1` and `tag2`, values are the passed-in parameters, respectively, the other type tag: keys are `username` and `age`, values are the return value in User, respectively */ @Trace @Tag(key = \u0026#34;tag1\u0026#34;, value = \u0026#34;arg[0]\u0026#34;) @Tag(key = \u0026#34;tag2\u0026#34;, value = \u0026#34;arg[1]\u0026#34;) @Tag(key = \u0026#34;username\u0026#34;, value = \u0026#34;returnedObj.username\u0026#34;) @Tag(key = \u0026#34;age\u0026#34;, value = \u0026#34;returnedObj.age\u0026#34;) public User methodYouWantToTrace(String param1, String param2) { // ActiveSpan.setOperationName(\u0026#34;Customize your own operation name, if this is an entry span, this would be an endpoint name\u0026#34;);  // ... }  Use TraceContext.putCorrelation() API to put custom data in tracing context.  Optional\u0026lt;String\u0026gt; previous = TraceContext.putCorrelation(\u0026#34;customKey\u0026#34;, \u0026#34;customValue\u0026#34;); CorrelationContext will remove the item when the value is null or empty.\n Use TraceContext.getCorrelation() API to get custom data.  Optional\u0026lt;String\u0026gt; value = TraceContext.getCorrelation(\u0026#34;customKey\u0026#34;); CorrelationContext configuration descriptions could be found in the agent configuration documentation, with correlation. as the prefix.\n","title":"Dependency the toolkit, such as using maven or gradle","url":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/application-toolkit-trace/"},{"content":" Dependency the toolkit, such as using maven or gradle  OpenTracing (Deprecated) OpenTracing is a vendor-neutral standard for distributed tracing. It is a set of APIs that can be used to instrument, generate, collect, and report telemetry data for distributed systems. It is designed to be extensible so that new implementations can be created for new platforms or languages. It had been archived by the CNCF TOC. Learn more.\nSkyWalking community keeps the API compatible with 0.30.0 only. All further development will not be accepted.\n\u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-opentracing\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  Use our OpenTracing tracer implementation  Tracer tracer = new SkywalkingTracer(); Tracer.SpanBuilder spanBuilder = tracer.buildSpan(\u0026#34;/yourApplication/yourService\u0026#34;); ","title":"Dependency the toolkit, such as using maven or gradle","url":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/opentracing/"},{"content":"Deploy OAP server and UI with default settings In this example, we will deploy an OAP server and UI to Kubernetes cluster with default settings specified by their Custom Resource Defines(CRD).\nInstall Operator Follow Operator installation instrument to install the operator.\nDeploy OAP server and UI with default setting Clone this repo, then change current directory to samples.\nIssue the below command to deploy an OAP server and UI.\nkubectl apply -f default.yaml Get created custom resources as below:\n$ kubectl get oapserver,ui NAME INSTANCES RUNNING ADDRESS oapserver.operator.skywalking.apache.org/default 1 1 default-oap.skywalking-swck-system NAME INSTANCES RUNNING INTERNALADDRESS EXTERNALIPS PORTS ui.operator.skywalking.apache.org/default 1 1 default-ui.skywalking-swck-system [80] View the UI In order to view the UI from your browser, you should get the external address from the ingress generated by the UI custom resource firstly.\n$ kubectl get ingresses NAME HOSTS ADDRESS PORTS AGE default-ui demo.ui.skywalking \u0026lt;External_IP\u0026gt; 80 33h Edit your local /etc/hosts to append the following host-ip mapping.\ndemo.ui.skywalking \u0026lt;External_IP\u0026gt; Finally, navigate your browser to demo.ui.skywalking to access UI service.\nNotice, please install an ingress controller to your Kubernetes environment.\n","title":"Deploy OAP server and UI with default settings","url":"/docs/skywalking-swck/latest/examples/default-backend/"},{"content":"Deploy OAP server and UI with default settings In this example, we will deploy an OAP server and UI to Kubernetes cluster with default settings specified by their Custom Resource Defines(CRD).\nInstall Operator Follow Operator installation instrument to install the operator.\nDeploy OAP server and UI with default setting Clone this repo, then change current directory to samples.\nIssue the below command to deploy an OAP server and UI.\nkubectl apply -f default.yaml Get created custom resources as below:\n$ kubectl get oapserver,ui NAME INSTANCES RUNNING ADDRESS oapserver.operator.skywalking.apache.org/default 1 1 default-oap.skywalking-swck-system NAME INSTANCES RUNNING INTERNALADDRESS EXTERNALIPS PORTS ui.operator.skywalking.apache.org/default 1 1 default-ui.skywalking-swck-system [80] View the UI In order to view the UI from your browser, you should get the external address from the ingress generated by the UI custom resource firstly.\n$ kubectl get ingresses NAME HOSTS ADDRESS PORTS AGE default-ui demo.ui.skywalking \u0026lt;External_IP\u0026gt; 80 33h Edit your local /etc/hosts to append the following host-ip mapping.\ndemo.ui.skywalking \u0026lt;External_IP\u0026gt; Finally, navigate your browser to demo.ui.skywalking to access UI service.\nNotice, please install an ingress controller to your Kubernetes environment.\n","title":"Deploy OAP server and UI with default settings","url":"/docs/skywalking-swck/next/examples/default-backend/"},{"content":"Deploy OAP server and UI with default settings In this example, we will deploy an OAP server and UI to Kubernetes cluster with default settings specified by their Custom Resource Defines(CRD).\nInstall Operator Follow Operator installation instrument to install the operator.\nDeploy OAP server and UI with default setting Clone this repo, then change current directory to samples.\nIssue the below command to deploy an OAP server and UI.\nkubectl apply -f default.yaml Get created custom resources as below:\n$ kubectl get oapserver,ui NAME INSTANCES RUNNING ADDRESS oapserver.operator.skywalking.apache.org/default 1 1 default-oap.skywalking-swck-system NAME INSTANCES RUNNING INTERNALADDRESS EXTERNALIPS PORTS ui.operator.skywalking.apache.org/default 1 1 default-ui.skywalking-swck-system [80] View the UI In order to view the UI from your browser, you should get the external address from the ingress generated by the UI custom resource firstly.\n$ kubectl get ingresses NAME HOSTS ADDRESS PORTS AGE default-ui demo.ui.skywalking \u0026lt;External_IP\u0026gt; 80 33h Edit your local /etc/hosts to append the following host-ip mapping.\ndemo.ui.skywalking \u0026lt;External_IP\u0026gt; Finally, navigate your browser to demo.ui.skywalking to access UI service.\nNotice, please install an ingress controller to your Kubernetes environment.\n","title":"Deploy OAP server and UI with default settings","url":"/docs/skywalking-swck/v0.9.0/examples/default-backend/"},{"content":"Deploy on Kubernetes This documentation helps you to set up the rover in the Kubernetes environment.\nStartup Kubernetes Make sure that you already have a Kubernetes cluster.\nIf you don\u0026rsquo;t have a running cluster, you can also leverage KinD (Kubernetes in Docker) or minikube to create a cluster.\nDeploy Rover Please follow the rover-daemonset.yml to deploy the rover in your Kubernetes cluster. Update the comment in the file, which includes two configs:\n Rover docker image: You could use make docker to build an image and upload it to your private registry, or update from the public image. OAP address: Update the OAP address.  Then, you could use kuberctl apply -f rover-daemonset.yml to deploy the skywalking-rover into your cluster. It would deploy in each node as a DaemonSet.\n","title":"Deploy on Kubernetes","url":"/docs/skywalking-rover/latest/en/setup/deployment/kubernetes/readme/"},{"content":"Deploy on Kubernetes This documentation helps you to set up the rover in the Kubernetes environment.\nStartup Kubernetes Make sure that you already have a Kubernetes cluster.\nIf you don\u0026rsquo;t have a running cluster, you can also leverage KinD (Kubernetes in Docker) or minikube to create a cluster.\nDeploy Rover Please follow the rover-daemonset.yml to deploy the rover in your Kubernetes cluster. Update the comment in the file, which includes two configs:\n Rover docker image: You could use make docker to build an image and upload it to your private registry, or update from the public image. OAP address: Update the OAP address.  Then, you could use kubectl apply -f rover-daemonset.yml to deploy the skywalking-rover into your cluster. It would deploy in each node as a DaemonSet.\n","title":"Deploy on Kubernetes","url":"/docs/skywalking-rover/next/en/setup/deployment/kubernetes/readme/"},{"content":"Deploy on Kubernetes This documentation helps you to set up the rover in the Kubernetes environment.\nStartup Kubernetes Make sure that you already have a Kubernetes cluster.\nIf you don\u0026rsquo;t have a running cluster, you can also leverage KinD (Kubernetes in Docker) or minikube to create a cluster.\nDeploy Rover Please follow the rover-daemonset.yml to deploy the rover in your Kubernetes cluster. Update the comment in the file, which includes two configs:\n Rover docker image: You could use make docker to build an image and upload it to your private registry, or update from the public image. OAP address: Update the OAP address.  Then, you could use kuberctl apply -f rover-daemonset.yml to deploy the skywalking-rover into your cluster. It would deploy in each node as a DaemonSet.\n","title":"Deploy on Kubernetes","url":"/docs/skywalking-rover/v0.6.0/en/setup/deployment/kubernetes/readme/"},{"content":"Deploy on Kubernetes It could help you run the Satellite as a gateway in Kubernetes environment.\nInstall We recommend install the Satellite by helm, follow command below, it could start the latest release version of SkyWalking Backend, UI and Satellite.\nexport SKYWALKING_RELEASE_NAME=skywalking # change the release name according to your scenario export SKYWALKING_RELEASE_NAMESPACE=default # change the namespace to where you want to install SkyWalking export REPO=skywalking helm repo add ${REPO} https://apache.jfrog.io/artifactory/skywalking-helm helm install \u0026#34;${SKYWALKING_RELEASE_NAME}\u0026#34; ${REPO}/skywalking -n \u0026#34;${SKYWALKING_RELEASE_NAMESPACE}\u0026#34; \\  --set oap.image.tag=8.8.1 \\  --set oap.storageType=elasticsearch \\  --set ui.image.tag=8.8.1 \\  --set elasticsearch.imageTag=6.8.6 \\  --set satellite.enabled=true \\  --set satellite.image.tag=v0.4.0 Change Address After the Satellite and Backend started, need to change the address from agent/node. Then the satellite could load balance the request from agent/node to OAP backend.\nSuch as in Java Agent, you should change the property value in collector.backend_service forward to this: skywalking-satellite.${SKYWALKING_RELEASE_NAMESPACE}:11800.\n","title":"Deploy on Kubernetes","url":"/docs/skywalking-satellite/latest/en/setup/examples/deploy/kubernetes/readme/"},{"content":"Deploy on Kubernetes It could help you run the Satellite as a gateway in Kubernetes environment.\nInstall We recommend install the Satellite by helm, follow command below, it could start the latest release version of SkyWalking Backend, UI and Satellite.\nexport SKYWALKING_RELEASE_NAME=skywalking # change the release name according to your scenario export SKYWALKING_RELEASE_NAMESPACE=default # change the namespace to where you want to install SkyWalking export REPO=skywalking helm repo add ${REPO} https://apache.jfrog.io/artifactory/skywalking-helm helm install \u0026#34;${SKYWALKING_RELEASE_NAME}\u0026#34; ${REPO}/skywalking -n \u0026#34;${SKYWALKING_RELEASE_NAMESPACE}\u0026#34; \\  --set oap.image.tag=8.8.1 \\  --set oap.storageType=elasticsearch \\  --set ui.image.tag=8.8.1 \\  --set elasticsearch.imageTag=6.8.6 \\  --set satellite.enabled=true \\  --set satellite.image.tag=v0.4.0 Change Address After the Satellite and Backend started, need to change the address from agent/node. Then the satellite could load balance the request from agent/node to OAP backend.\nSuch as in Java Agent, you should change the property value in collector.backend_service forward to this: skywalking-satellite.${SKYWALKING_RELEASE_NAMESPACE}:11800.\n","title":"Deploy on Kubernetes","url":"/docs/skywalking-satellite/next/en/setup/examples/deploy/kubernetes/readme/"},{"content":"Deploy on Kubernetes It could help you run the Satellite as a gateway in Kubernetes environment.\nInstall We recommend install the Satellite by helm, follow command below, it could start the latest release version of SkyWalking Backend, UI and Satellite.\nexport SKYWALKING_RELEASE_NAME=skywalking # change the release name according to your scenario export SKYWALKING_RELEASE_NAMESPACE=default # change the namespace to where you want to install SkyWalking export REPO=skywalking helm repo add ${REPO} https://apache.jfrog.io/artifactory/skywalking-helm helm install \u0026#34;${SKYWALKING_RELEASE_NAME}\u0026#34; ${REPO}/skywalking -n \u0026#34;${SKYWALKING_RELEASE_NAMESPACE}\u0026#34; \\  --set oap.image.tag=8.8.1 \\  --set oap.storageType=elasticsearch \\  --set ui.image.tag=8.8.1 \\  --set elasticsearch.imageTag=6.8.6 \\  --set satellite.enabled=true \\  --set satellite.image.tag=v0.4.0 Change Address After the Satellite and Backend started, need to change the address from agent/node. Then the satellite could load balance the request from agent/node to OAP backend.\nSuch as in Java Agent, you should change the property value in collector.backend_service forward to this: skywalking-satellite.${SKYWALKING_RELEASE_NAMESPACE}:11800.\n","title":"Deploy on Kubernetes","url":"/docs/skywalking-satellite/v1.2.0/en/setup/examples/deploy/kubernetes/readme/"},{"content":"Deploy on Linux and Windows It could help you run the Satellite as a gateway in Linux or Windows instance.\nInstall Download Download the latest release version from SkyWalking Release Page.\nChange OAP Server addresses Update the OAP Server address in the config file, then satellite could connect to them and use round-robin policy for load-balance server before send each request.\nSupport two ways to locate the server list, using finder_type to change the type to find:\n static: Define the server address list. kubernetes: Define kubernetes pod/service/endpoint, it could be found addresses and dynamic update automatically.  Static server list You could see there define two server address and split by \u0026ldquo;,\u0026rdquo;.\nsharing:clients:- plugin_name:\u0026#34;grpc-client\u0026#34;# The gRPC server address finder typefinder_type:${SATELLITE_GRPC_CLIENT_FINDER:static}# The gRPC server address (default localhost:11800).server_addr:${SATELLITE_GRPC_CLIENT:127.0.0.1:11800,127.0.0.2:11800}# The TLS switchenable_TLS:${SATELLITE_GRPC_ENABLE_TLS:false}# The file path of client.pem. The config only works when opening the TLS switch.client_pem_path:${SATELLITE_GRPC_CLIENT_PEM_PATH:\u0026#34;client.pem\u0026#34;}# The file path of client.key. The config only works when opening the TLS switch.client_key_path:${SATELLITE_GRPC_CLIENT_KEY_PATH:\u0026#34;client.key\u0026#34;}# InsecureSkipVerify controls whether a client verifies the server\u0026#39;s certificate chain and host name.insecure_skip_verify:${SATELLITE_GRPC_INSECURE_SKIP_VERIFY:false}# The file path oca.pem. The config only works when opening the TLS switch.ca_pem_path:${SATELLITE_grpc_CA_PEM_PATH:\u0026#34;ca.pem\u0026#34;}# How frequently to check the connection(second)check_period:${SATELLITE_GRPC_CHECK_PERIOD:5}# The auth value when send requestauthentication:${SATELLITE_GRPC_AUTHENTICATION:\u0026#34;\u0026#34;}Kubernetes selector Using kubernetes_config to define the address\u0026rsquo;s finder.\nsharing:clients:- plugin_name:\u0026#34;grpc-client\u0026#34;# The gRPC server address finder typefinder_type:${SATELLITE_GRPC_CLIENT_FINDER:kubernetes}# The kubernetes config to lookup addresseskubernetes_config:# The kubernetes API server address, If not define means using in kubernetes mode to connectapi_server:http://localhost:8001/# The kind of apikind:endpoints# Support to lookup namespacesnamespaces:- default# The kind selectorselector:label:app=productpage# How to get the address exported portextra_port:port:9080# The TLS switchenable_TLS:${SATELLITE_GRPC_ENABLE_TLS:false}# The file path of client.pem. The config only works when opening the TLS switch.client_pem_path:${SATELLITE_GRPC_CLIENT_PEM_PATH:\u0026#34;client.pem\u0026#34;}# The file path of client.key. The config only works when opening the TLS switch.client_key_path:${SATELLITE_GRPC_CLIENT_KEY_PATH:\u0026#34;client.key\u0026#34;}# InsecureSkipVerify controls whether a client verifies the server\u0026#39;s certificate chain and host name.insecure_skip_verify:${SATELLITE_GRPC_INSECURE_SKIP_VERIFY:false}# The file path oca.pem. The config only works when opening the TLS switch.ca_pem_path:${SATELLITE_grpc_CA_PEM_PATH:\u0026#34;ca.pem\u0026#34;}# How frequently to check the connection(second)check_period:${SATELLITE_GRPC_CHECK_PERIOD:5}# The auth value when send requestauthentication:${SATELLITE_GRPC_AUTHENTICATION:\u0026#34;\u0026#34;}Start Satellite Execute the script bin/startup.sh(linux) or bin/startup.cmd(windows) to start. Then It could start these port:\n gRPC port(11800): listen the gRPC request, It could handle request from SkyWalking Agent protocol and Envoy ALS/Metrics protocol. Prometheus(1234): listen the HTTP request, It could get all SO11Y metrics from /metrics endpoint using Prometheus format.  Change Address After the satellite start, need to change the address from agent/node. Then the satellite could load balance the request from agent/node to OAP backend.\nSuch as in Java Agent, you should change the property value in collector.backend_service forward to the satellite gRPC port.\n","title":"Deploy on Linux and Windows","url":"/docs/skywalking-satellite/latest/en/setup/examples/deploy/linux-windows/readme/"},{"content":"Deploy on Linux and Windows It could help you run the Satellite as a gateway in Linux or Windows instance.\nInstall Download Download the latest release version from SkyWalking Release Page.\nChange OAP Server addresses Update the OAP Server address in the config file, then satellite could connect to them and use round-robin policy for load-balance server before send each request.\nSupport two ways to locate the server list, using finder_type to change the type to find:\n static: Define the server address list. kubernetes: Define kubernetes pod/service/endpoint, it could be found addresses and dynamic update automatically.  Static server list You could see there define two server address and split by \u0026ldquo;,\u0026rdquo;.\nsharing:clients:- plugin_name:\u0026#34;grpc-client\u0026#34;# The gRPC server address finder typefinder_type:${SATELLITE_GRPC_CLIENT_FINDER:static}# The gRPC server address (default localhost:11800).server_addr:${SATELLITE_GRPC_CLIENT:127.0.0.1:11800,127.0.0.2:11800}# The TLS switchenable_TLS:${SATELLITE_GRPC_ENABLE_TLS:false}# The file path of client.pem. The config only works when opening the TLS switch.client_pem_path:${SATELLITE_GRPC_CLIENT_PEM_PATH:\u0026#34;client.pem\u0026#34;}# The file path of client.key. The config only works when opening the TLS switch.client_key_path:${SATELLITE_GRPC_CLIENT_KEY_PATH:\u0026#34;client.key\u0026#34;}# InsecureSkipVerify controls whether a client verifies the server\u0026#39;s certificate chain and host name.insecure_skip_verify:${SATELLITE_GRPC_INSECURE_SKIP_VERIFY:false}# The file path oca.pem. The config only works when opening the TLS switch.ca_pem_path:${SATELLITE_grpc_CA_PEM_PATH:\u0026#34;ca.pem\u0026#34;}# How frequently to check the connection(second)check_period:${SATELLITE_GRPC_CHECK_PERIOD:5}# The auth value when send requestauthentication:${SATELLITE_GRPC_AUTHENTICATION:\u0026#34;\u0026#34;}Kubernetes selector Using kubernetes_config to define the address\u0026rsquo;s finder.\nsharing:clients:- plugin_name:\u0026#34;grpc-client\u0026#34;# The gRPC server address finder typefinder_type:${SATELLITE_GRPC_CLIENT_FINDER:kubernetes}# The kubernetes config to lookup addresseskubernetes_config:# The kubernetes API server address, If not define means using in kubernetes mode to connectapi_server:http://localhost:8001/# The kind of apikind:endpoints# Support to lookup namespacesnamespaces:- default# The kind selectorselector:label:app=productpage# How to get the address exported portextra_port:port:9080# The TLS switchenable_TLS:${SATELLITE_GRPC_ENABLE_TLS:false}# The file path of client.pem. The config only works when opening the TLS switch.client_pem_path:${SATELLITE_GRPC_CLIENT_PEM_PATH:\u0026#34;client.pem\u0026#34;}# The file path of client.key. The config only works when opening the TLS switch.client_key_path:${SATELLITE_GRPC_CLIENT_KEY_PATH:\u0026#34;client.key\u0026#34;}# InsecureSkipVerify controls whether a client verifies the server\u0026#39;s certificate chain and host name.insecure_skip_verify:${SATELLITE_GRPC_INSECURE_SKIP_VERIFY:false}# The file path oca.pem. The config only works when opening the TLS switch.ca_pem_path:${SATELLITE_grpc_CA_PEM_PATH:\u0026#34;ca.pem\u0026#34;}# How frequently to check the connection(second)check_period:${SATELLITE_GRPC_CHECK_PERIOD:5}# The auth value when send requestauthentication:${SATELLITE_GRPC_AUTHENTICATION:\u0026#34;\u0026#34;}Start Satellite Execute the script bin/startup.sh(linux) or bin/startup.cmd(windows) to start. Then It could start these port:\n gRPC port(11800): listen the gRPC request, It could handle request from SkyWalking Agent protocol and Envoy ALS/Metrics protocol. Prometheus(1234): listen the HTTP request, It could get all SO11Y metrics from /metrics endpoint using Prometheus format.  Change Address After the satellite start, need to change the address from agent/node. Then the satellite could load balance the request from agent/node to OAP backend.\nSuch as in Java Agent, you should change the property value in collector.backend_service forward to the satellite gRPC port.\n","title":"Deploy on Linux and Windows","url":"/docs/skywalking-satellite/next/en/setup/examples/deploy/linux-windows/readme/"},{"content":"Deploy on Linux and Windows It could help you run the Satellite as a gateway in Linux or Windows instance.\nInstall Download Download the latest release version from SkyWalking Release Page.\nChange OAP Server addresses Update the OAP Server address in the config file, then satellite could connect to them and use round-robin policy for load-balance server before send each request.\nSupport two ways to locate the server list, using finder_type to change the type to find:\n static: Define the server address list. kubernetes: Define kubernetes pod/service/endpoint, it could be found addresses and dynamic update automatically.  Static server list You could see there define two server address and split by \u0026ldquo;,\u0026rdquo;.\nsharing:clients:- plugin_name:\u0026#34;grpc-client\u0026#34;# The gRPC server address finder typefinder_type:${SATELLITE_GRPC_CLIENT_FINDER:static}# The gRPC server address (default localhost:11800).server_addr:${SATELLITE_GRPC_CLIENT:127.0.0.1:11800,127.0.0.2:11800}# The TLS switchenable_TLS:${SATELLITE_GRPC_ENABLE_TLS:false}# The file path of client.pem. The config only works when opening the TLS switch.client_pem_path:${SATELLITE_GRPC_CLIENT_PEM_PATH:\u0026#34;client.pem\u0026#34;}# The file path of client.key. The config only works when opening the TLS switch.client_key_path:${SATELLITE_GRPC_CLIENT_KEY_PATH:\u0026#34;client.key\u0026#34;}# InsecureSkipVerify controls whether a client verifies the server\u0026#39;s certificate chain and host name.insecure_skip_verify:${SATELLITE_GRPC_INSECURE_SKIP_VERIFY:false}# The file path oca.pem. The config only works when opening the TLS switch.ca_pem_path:${SATELLITE_grpc_CA_PEM_PATH:\u0026#34;ca.pem\u0026#34;}# How frequently to check the connection(second)check_period:${SATELLITE_GRPC_CHECK_PERIOD:5}# The auth value when send requestauthentication:${SATELLITE_GRPC_AUTHENTICATION:\u0026#34;\u0026#34;}Kubernetes selector Using kubernetes_config to define the address\u0026rsquo;s finder.\nsharing:clients:- plugin_name:\u0026#34;grpc-client\u0026#34;# The gRPC server address finder typefinder_type:${SATELLITE_GRPC_CLIENT_FINDER:kubernetes}# The kubernetes config to lookup addresseskubernetes_config:# The kubernetes API server address, If not define means using in kubernetes mode to connectapi_server:http://localhost:8001/# The kind of apikind:endpoints# Support to lookup namespacesnamespaces:- default# The kind selectorselector:label:app=productpage# How to get the address exported portextra_port:port:9080# The TLS switchenable_TLS:${SATELLITE_GRPC_ENABLE_TLS:false}# The file path of client.pem. The config only works when opening the TLS switch.client_pem_path:${SATELLITE_GRPC_CLIENT_PEM_PATH:\u0026#34;client.pem\u0026#34;}# The file path of client.key. The config only works when opening the TLS switch.client_key_path:${SATELLITE_GRPC_CLIENT_KEY_PATH:\u0026#34;client.key\u0026#34;}# InsecureSkipVerify controls whether a client verifies the server\u0026#39;s certificate chain and host name.insecure_skip_verify:${SATELLITE_GRPC_INSECURE_SKIP_VERIFY:false}# The file path oca.pem. The config only works when opening the TLS switch.ca_pem_path:${SATELLITE_grpc_CA_PEM_PATH:\u0026#34;ca.pem\u0026#34;}# How frequently to check the connection(second)check_period:${SATELLITE_GRPC_CHECK_PERIOD:5}# The auth value when send requestauthentication:${SATELLITE_GRPC_AUTHENTICATION:\u0026#34;\u0026#34;}Start Satellite Execute the script bin/startup.sh(linux) or bin/startup.cmd(windows) to start. Then It could start these port:\n gRPC port(11800): listen the gRPC request, It could handle request from SkyWalking Agent protocol and Envoy ALS/Metrics protocol. Prometheus(1234): listen the HTTP request, It could get all SO11Y metrics from /metrics endpoint using Prometheus format.  Change Address After the satellite start, need to change the address from agent/node. Then the satellite could load balance the request from agent/node to OAP backend.\nSuch as in Java Agent, you should change the property value in collector.backend_service forward to the satellite gRPC port.\n","title":"Deploy on Linux and Windows","url":"/docs/skywalking-satellite/v1.2.0/en/setup/examples/deploy/linux-windows/readme/"},{"content":"Deploy SkyWalking backend and UI in Kubernetes Before you read Kubernetes deployment guidance, please make sure you have read Quick Start and Advanced Setup documents. Most SkyWalking OAP settings are controlled through System environment variables when applying helm deployment.\nFollow instructions in the deploying SkyWalking backend to Kubernetes cluster to deploy OAP and UI to a Kubernetes cluster.\nPlease refer to the Readme file.\n","title":"Deploy SkyWalking backend and UI in Kubernetes","url":"/docs/main/latest/en/setup/backend/backend-k8s/"},{"content":"Deploy SkyWalking backend and UI in Kubernetes Before you read Kubernetes deployment guidance, please make sure you have read Quick Start and Advanced Setup documents. Most SkyWalking OAP settings are controlled through System environment variables when applying helm deployment.\nFollow instructions in the deploying SkyWalking backend to Kubernetes cluster to deploy OAP and UI to a Kubernetes cluster.\nPlease refer to the Readme file.\n","title":"Deploy SkyWalking backend and UI in Kubernetes","url":"/docs/main/next/en/setup/backend/backend-k8s/"},{"content":"Deploy SkyWalking backend and UI in Kubernetes Before you read Kubernetes deployment guidance, please make sure you have read Quick Start and Advanced Setup documents. Most SkyWalking OAP settings are controlled through System environment variables when applying helm deployment.\nFollow instructions in the deploying SkyWalking backend to Kubernetes cluster to deploy OAP and UI to a Kubernetes cluster.\nPlease refer to the Readme file.\n","title":"Deploy SkyWalking backend and UI in Kubernetes","url":"/docs/main/v9.1.0/en/setup/backend/backend-k8s/"},{"content":"Deploy SkyWalking backend and UI in Kubernetes Before you read Kubernetes deployment guidance, please make sure you have read Quick Start and Advanced Setup documents. Most SkyWalking OAP settings are controlled through System environment variables when applying helm deployment.\nFollow instructions in the deploying SkyWalking backend to Kubernetes cluster to deploy OAP and UI to a Kubernetes cluster.\nPlease refer to the Readme file.\n","title":"Deploy SkyWalking backend and UI in Kubernetes","url":"/docs/main/v9.2.0/en/setup/backend/backend-k8s/"},{"content":"Deploy SkyWalking backend and UI in Kubernetes Before you read Kubernetes deployment guidance, please make sure you have read Quick Start and Advanced Setup documents. Most SkyWalking OAP settings are controlled through System environment variables when applying helm deployment.\nFollow instructions in the deploying SkyWalking backend to Kubernetes cluster to deploy OAP and UI to a Kubernetes cluster.\nPlease refer to the Readme file.\n","title":"Deploy SkyWalking backend and UI in Kubernetes","url":"/docs/main/v9.3.0/en/setup/backend/backend-k8s/"},{"content":"Deploy SkyWalking backend and UI in Kubernetes Before you read Kubernetes deployment guidance, please make sure you have read Quick Start and Advanced Setup documents. Most SkyWalking OAP settings are controlled through System environment variables when applying helm deployment.\nFollow instructions in the deploying SkyWalking backend to Kubernetes cluster to deploy OAP and UI to a Kubernetes cluster.\nPlease refer to the Readme file.\n","title":"Deploy SkyWalking backend and UI in Kubernetes","url":"/docs/main/v9.4.0/en/setup/backend/backend-k8s/"},{"content":"Deploy SkyWalking backend and UI in Kubernetes Before you read Kubernetes deployment guidance, please make sure you have read Quick Start and Advanced Setup documents. Most SkyWalking OAP settings are controlled through System environment variables when applying helm deployment.\nFollow instructions in the deploying SkyWalking backend to Kubernetes cluster to deploy OAP and UI to a Kubernetes cluster.\nPlease refer to the Readme file.\n","title":"Deploy SkyWalking backend and UI in Kubernetes","url":"/docs/main/v9.5.0/en/setup/backend/backend-k8s/"},{"content":"Deploy SkyWalking backend and UI in Kubernetes Before you read Kubernetes deployment guidance, please make sure you have read Quick Start and Advanced Setup documents. Most SkyWalking OAP settings are controlled through System environment variables when applying helm deployment.\nFollow instructions in the deploying SkyWalking backend to Kubernetes cluster to deploy OAP and UI to a Kubernetes cluster.\nPlease refer to the Readme file.\n","title":"Deploy SkyWalking backend and UI in Kubernetes","url":"/docs/main/v9.6.0/en/setup/backend/backend-k8s/"},{"content":"Deploy SkyWalking backend and UI in Kubernetes Before you read Kubernetes deployment guidance, please make sure you have read Quick Start and Advanced Setup documents. Most SkyWalking OAP settings are controlled through System environment variables when applying helm deployment.\nFollow instructions in the deploying SkyWalking backend to Kubernetes cluster to deploy OAP and UI to a Kubernetes cluster.\nPlease refer to the Readme file.\n","title":"Deploy SkyWalking backend and UI in Kubernetes","url":"/docs/main/v9.7.0/en/setup/backend/backend-k8s/"},{"content":"Deploy SkyWalking backend and UI in kubernetes Before you read Kubernetes deployment guidance, please make sure you have read Quick Start and Advanced Setup documents. Most SkyWalking OAP settings are controlled through System environment variables when apply helm deployment.\nFollow instructions in the deploying SkyWalking backend to Kubernetes cluster to deploy oap and ui to a kubernetes cluster.\nPlease read the Readme file.\n","title":"Deploy SkyWalking backend and UI in kubernetes","url":"/docs/main/v9.0.0/en/setup/backend/backend-k8s/"},{"content":"Deprecated Query Protocol The following query services are deprecated since 9.5.0. All these queries are still available for the short term to keep compatibility.\nQuery protocol official repository, https://github.com/apache/skywalking-query-protocol.\nMetrics Metrics query targets all objects defined in OAL script and MAL. You may obtain the metrics data in linear or thermodynamic matrix formats based on the aggregation functions in script.\nV2 APIs Provide Metrics V2 query APIs since 8.0.0, including metadata, single/multiple values, heatmap, and sampled records metrics.\nextendtypeQuery{# Read metrics single value in the duration of required metricsreadMetricsValue(condition:MetricsCondition!,duration:Duration!):Long!# Read metrics single value in the duration of required metrics# NullableValue#isEmptyValue == true indicates no telemetry data rather than aggregated value is actually zero.readNullableMetricsValue(condition:MetricsCondition!,duration:Duration!):NullableValue!# Read time-series values in the duration of required metricsreadMetricsValues(condition:MetricsCondition!,duration:Duration!):MetricsValues!# Read entity list of required metrics and parent entity type.sortMetrics(condition:TopNCondition!,duration:Duration!):[SelectedRecord!]!# Read value in the given time duration, usually as a linear.# labels: the labels you need to query.readLabeledMetricsValues(condition:MetricsCondition!,labels:[String!]!,duration:Duration!):[MetricsValues!]!# Heatmap is bucket based value statistic result.readHeatMap(condition:MetricsCondition!,duration:Duration!):HeatMap# Deprecated since 9.3.0, replaced by readRecords defined in record.graphqls# Read the sampled records# TopNCondition#scope is not required.readSampledRecords(condition:TopNCondition!,duration:Duration!):[SelectedRecord!]!}V1 APIs 3 types of metrics can be queried. V1 APIs were introduced since 6.x. Now they are a shell to V2 APIs.\n Single value. Most default metrics are in single value. getValues and getLinearIntValues are suitable for this purpose. Multiple value. A metric defined in OAL includes multiple value calculations. Use getMultipleLinearIntValues to obtain all values. percentile is a typical multiple value function in OAL. Heatmap value. Read Heatmap in WIKI for details. thermodynamic is the only OAL function. Use getThermodynamic to get the values.  extendtypeQuery{getValues(metric:BatchMetricConditions!,duration:Duration!):IntValuesgetLinearIntValues(metric:MetricCondition!,duration:Duration!):IntValues# Query the type of metrics including multiple values, and format them as multiple lines.# The seq of these multiple lines base on the calculation func in OAL# Such as, should us this to query the result of func percentile(50,75,90,95,99) in OAL,# then five lines will be responded, p50 is the first element of return value.getMultipleLinearIntValues(metric:MetricCondition!,numOfLinear:Int!,duration:Duration!):[IntValues!]!getThermodynamic(metric:MetricCondition!,duration:Duration!):Thermodynamic}Aggregation Aggregation query means that the metrics data need a secondary aggregation at query stage, which causes the query interfaces to have some different arguments. A typical example of aggregation query is the TopN list of services. Metrics stream aggregation simply calculates the metrics values of each service, but the expected list requires ordering metrics data by their values.\nAggregation query is for single value metrics only.\n# The aggregation query is different with the metric query.# All aggregation queries require backend or/and storage do aggregation in query time.extendtypeQuery{# TopN is an aggregation query.getServiceTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getAllServiceInstanceTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getServiceInstanceTopN(serviceId:ID!,name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getAllEndpointTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getEndpointTopN(serviceId:ID!,name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!}Record Record is a general and abstract type for collected raw data. In the observability, traces and logs have specific and well-defined meanings, meanwhile, the general records represent other collected records. Such as sampled slow SQL statement, HTTP request raw data(request/response header/body)\nextendtypeQuery{# Query collected records with given metric name and parent entity conditions, and return in the requested order.readRecords(condition:RecordCondition!,duration:Duration!):[Record!]!}","title":"Deprecated Query Protocol","url":"/docs/main/latest/en/api/query-protocol-deprecated/"},{"content":"Deprecated Query Protocol The following query services are deprecated since 9.5.0. All these queries are still available for the short term to keep compatibility.\nQuery protocol official repository, https://github.com/apache/skywalking-query-protocol.\nMetadata Metadata contains concise information on all services and their instances, endpoints, etc. under monitoring. You may query the metadata in different ways.\nV1 APIs V1 APIs were introduced since 6.x. Now they are a shell to V2 APIs since 9.0.0.\nextendtypeQuery{# Normal service related meta info getAllServices(duration:Duration!,group:String):[Service!]!searchServices(duration:Duration!,keyword:String!):[Service!]!searchService(serviceCode:String!):Service# Fetch all services of Browser typegetAllBrowserServices(duration:Duration!):[Service!]!searchBrowserServices(duration:Duration!,keyword:String!):[Service!]!searchBrowserService(serviceCode:String!):Service# Service instance querygetServiceInstances(duration:Duration!,serviceId:ID!):[ServiceInstance!]!# Endpoint query# Consider there are huge numbers of endpoint,# must use endpoint owner\u0026#39;s service id, keyword and limit filter to do query.searchEndpoint(keyword:String!,serviceId:ID!,limit:Int!):[Endpoint!]!# Database related meta info.getAllDatabases(duration:Duration!):[Database!]!}Metrics Metrics query targets all objects defined in OAL script and MAL. You may obtain the metrics data in linear or thermodynamic matrix formats based on the aggregation functions in script.\nV2 APIs Provide Metrics V2 query APIs since 8.0.0, including metadata, single/multiple values, heatmap, and sampled records metrics.\nextendtypeQuery{# Read metrics single value in the duration of required metricsreadMetricsValue(condition:MetricsCondition!,duration:Duration!):Long!# Read metrics single value in the duration of required metrics# NullableValue#isEmptyValue == true indicates no telemetry data rather than aggregated value is actually zero.readNullableMetricsValue(condition:MetricsCondition!,duration:Duration!):NullableValue!# Read time-series values in the duration of required metricsreadMetricsValues(condition:MetricsCondition!,duration:Duration!):MetricsValues!# Read entity list of required metrics and parent entity type.sortMetrics(condition:TopNCondition!,duration:Duration!):[SelectedRecord!]!# Read value in the given time duration, usually as a linear.# labels: the labels you need to query.readLabeledMetricsValues(condition:MetricsCondition!,labels:[String!]!,duration:Duration!):[MetricsValues!]!# Heatmap is bucket based value statistic result.readHeatMap(condition:MetricsCondition!,duration:Duration!):HeatMap# Deprecated since 9.3.0, replaced by readRecords defined in record.graphqls# Read the sampled records# TopNCondition#scope is not required.readSampledRecords(condition:TopNCondition!,duration:Duration!):[SelectedRecord!]!}V1 APIs 3 types of metrics can be queried. V1 APIs were introduced since 6.x. Now they are a shell to V2 APIs.\n Single value. Most default metrics are in single value. getValues and getLinearIntValues are suitable for this purpose. Multiple value. A metric defined in OAL includes multiple value calculations. Use getMultipleLinearIntValues to obtain all values. percentile is a typical multiple value function in OAL. Heatmap value. Read Heatmap in WIKI for details. thermodynamic is the only OAL function. Use getThermodynamic to get the values.  extendtypeQuery{getValues(metric:BatchMetricConditions!,duration:Duration!):IntValuesgetLinearIntValues(metric:MetricCondition!,duration:Duration!):IntValues# Query the type of metrics including multiple values, and format them as multiple lines.# The seq of these multiple lines base on the calculation func in OAL# Such as, should us this to query the result of func percentile(50,75,90,95,99) in OAL,# then five lines will be responded, p50 is the first element of return value.getMultipleLinearIntValues(metric:MetricCondition!,numOfLinear:Int!,duration:Duration!):[IntValues!]!getThermodynamic(metric:MetricCondition!,duration:Duration!):Thermodynamic}Aggregation Aggregation query means that the metrics data need a secondary aggregation at query stage, which causes the query interfaces to have some different arguments. A typical example of aggregation query is the TopN list of services. Metrics stream aggregation simply calculates the metrics values of each service, but the expected list requires ordering metrics data by their values.\nAggregation query is for single value metrics only.\n# The aggregation query is different with the metric query.# All aggregation queries require backend or/and storage do aggregation in query time.extendtypeQuery{# TopN is an aggregation query.getServiceTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getAllServiceInstanceTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getServiceInstanceTopN(serviceId:ID!,name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getAllEndpointTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getEndpointTopN(serviceId:ID!,name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!}Record Record is a general and abstract type for collected raw data. In the observability, traces and logs have specific and well-defined meanings, meanwhile, the general records represent other collected records. Such as sampled slow SQL statement, HTTP request raw data(request/response header/body)\nextendtypeQuery{# Query collected records with given metric name and parent entity conditions, and return in the requested order.readRecords(condition:RecordCondition!,duration:Duration!):[Record!]!}","title":"Deprecated Query Protocol","url":"/docs/main/next/en/api/query-protocol-deprecated/"},{"content":"Deprecated Query Protocol The following query services are deprecated since 9.5.0. All these queries are still available for the short term to keep compatibility.\nQuery protocol official repository, https://github.com/apache/skywalking-query-protocol.\nMetrics Metrics query targets all objects defined in OAL script and MAL. You may obtain the metrics data in linear or thermodynamic matrix formats based on the aggregation functions in script.\nV2 APIs Provide Metrics V2 query APIs since 8.0.0, including metadata, single/multiple values, heatmap, and sampled records metrics.\nextendtypeQuery{# Read metrics single value in the duration of required metricsreadMetricsValue(condition:MetricsCondition!,duration:Duration!):Long!# Read metrics single value in the duration of required metrics# NullableValue#isEmptyValue == true indicates no telemetry data rather than aggregated value is actually zero.readNullableMetricsValue(condition:MetricsCondition!,duration:Duration!):NullableValue!# Read time-series values in the duration of required metricsreadMetricsValues(condition:MetricsCondition!,duration:Duration!):MetricsValues!# Read entity list of required metrics and parent entity type.sortMetrics(condition:TopNCondition!,duration:Duration!):[SelectedRecord!]!# Read value in the given time duration, usually as a linear.# labels: the labels you need to query.readLabeledMetricsValues(condition:MetricsCondition!,labels:[String!]!,duration:Duration!):[MetricsValues!]!# Heatmap is bucket based value statistic result.readHeatMap(condition:MetricsCondition!,duration:Duration!):HeatMap# Deprecated since 9.3.0, replaced by readRecords defined in record.graphqls# Read the sampled records# TopNCondition#scope is not required.readSampledRecords(condition:TopNCondition!,duration:Duration!):[SelectedRecord!]!}V1 APIs 3 types of metrics can be queried. V1 APIs were introduced since 6.x. Now they are a shell to V2 APIs.\n Single value. Most default metrics are in single value. getValues and getLinearIntValues are suitable for this purpose. Multiple value. A metric defined in OAL includes multiple value calculations. Use getMultipleLinearIntValues to obtain all values. percentile is a typical multiple value function in OAL. Heatmap value. Read Heatmap in WIKI for details. thermodynamic is the only OAL function. Use getThermodynamic to get the values.  extendtypeQuery{getValues(metric:BatchMetricConditions!,duration:Duration!):IntValuesgetLinearIntValues(metric:MetricCondition!,duration:Duration!):IntValues# Query the type of metrics including multiple values, and format them as multiple lines.# The seq of these multiple lines base on the calculation func in OAL# Such as, should us this to query the result of func percentile(50,75,90,95,99) in OAL,# then five lines will be responded, p50 is the first element of return value.getMultipleLinearIntValues(metric:MetricCondition!,numOfLinear:Int!,duration:Duration!):[IntValues!]!getThermodynamic(metric:MetricCondition!,duration:Duration!):Thermodynamic}Aggregation Aggregation query means that the metrics data need a secondary aggregation at query stage, which causes the query interfaces to have some different arguments. A typical example of aggregation query is the TopN list of services. Metrics stream aggregation simply calculates the metrics values of each service, but the expected list requires ordering metrics data by their values.\nAggregation query is for single value metrics only.\n# The aggregation query is different with the metric query.# All aggregation queries require backend or/and storage do aggregation in query time.extendtypeQuery{# TopN is an aggregation query.getServiceTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getAllServiceInstanceTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getServiceInstanceTopN(serviceId:ID!,name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getAllEndpointTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getEndpointTopN(serviceId:ID!,name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!}Record Record is a general and abstract type for collected raw data. In the observability, traces and logs have specific and well-defined meanings, meanwhile, the general records represent other collected records. Such as sampled slow SQL statement, HTTP request raw data(request/response header/body)\nextendtypeQuery{# Query collected records with given metric name and parent entity conditions, and return in the requested order.readRecords(condition:RecordCondition!,duration:Duration!):[Record!]!}","title":"Deprecated Query Protocol","url":"/docs/main/v9.5.0/en/api/query-protocol-deprecated/"},{"content":"Deprecated Query Protocol The following query services are deprecated since 9.5.0. All these queries are still available for the short term to keep compatibility.\nQuery protocol official repository, https://github.com/apache/skywalking-query-protocol.\nMetrics Metrics query targets all objects defined in OAL script and MAL. You may obtain the metrics data in linear or thermodynamic matrix formats based on the aggregation functions in script.\nV2 APIs Provide Metrics V2 query APIs since 8.0.0, including metadata, single/multiple values, heatmap, and sampled records metrics.\nextendtypeQuery{# Read metrics single value in the duration of required metricsreadMetricsValue(condition:MetricsCondition!,duration:Duration!):Long!# Read metrics single value in the duration of required metrics# NullableValue#isEmptyValue == true indicates no telemetry data rather than aggregated value is actually zero.readNullableMetricsValue(condition:MetricsCondition!,duration:Duration!):NullableValue!# Read time-series values in the duration of required metricsreadMetricsValues(condition:MetricsCondition!,duration:Duration!):MetricsValues!# Read entity list of required metrics and parent entity type.sortMetrics(condition:TopNCondition!,duration:Duration!):[SelectedRecord!]!# Read value in the given time duration, usually as a linear.# labels: the labels you need to query.readLabeledMetricsValues(condition:MetricsCondition!,labels:[String!]!,duration:Duration!):[MetricsValues!]!# Heatmap is bucket based value statistic result.readHeatMap(condition:MetricsCondition!,duration:Duration!):HeatMap# Deprecated since 9.3.0, replaced by readRecords defined in record.graphqls# Read the sampled records# TopNCondition#scope is not required.readSampledRecords(condition:TopNCondition!,duration:Duration!):[SelectedRecord!]!}V1 APIs 3 types of metrics can be queried. V1 APIs were introduced since 6.x. Now they are a shell to V2 APIs.\n Single value. Most default metrics are in single value. getValues and getLinearIntValues are suitable for this purpose. Multiple value. A metric defined in OAL includes multiple value calculations. Use getMultipleLinearIntValues to obtain all values. percentile is a typical multiple value function in OAL. Heatmap value. Read Heatmap in WIKI for details. thermodynamic is the only OAL function. Use getThermodynamic to get the values.  extendtypeQuery{getValues(metric:BatchMetricConditions!,duration:Duration!):IntValuesgetLinearIntValues(metric:MetricCondition!,duration:Duration!):IntValues# Query the type of metrics including multiple values, and format them as multiple lines.# The seq of these multiple lines base on the calculation func in OAL# Such as, should us this to query the result of func percentile(50,75,90,95,99) in OAL,# then five lines will be responded, p50 is the first element of return value.getMultipleLinearIntValues(metric:MetricCondition!,numOfLinear:Int!,duration:Duration!):[IntValues!]!getThermodynamic(metric:MetricCondition!,duration:Duration!):Thermodynamic}Aggregation Aggregation query means that the metrics data need a secondary aggregation at query stage, which causes the query interfaces to have some different arguments. A typical example of aggregation query is the TopN list of services. Metrics stream aggregation simply calculates the metrics values of each service, but the expected list requires ordering metrics data by their values.\nAggregation query is for single value metrics only.\n# The aggregation query is different with the metric query.# All aggregation queries require backend or/and storage do aggregation in query time.extendtypeQuery{# TopN is an aggregation query.getServiceTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getAllServiceInstanceTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getServiceInstanceTopN(serviceId:ID!,name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getAllEndpointTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getEndpointTopN(serviceId:ID!,name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!}Record Record is a general and abstract type for collected raw data. In the observability, traces and logs have specific and well-defined meanings, meanwhile, the general records represent other collected records. Such as sampled slow SQL statement, HTTP request raw data(request/response header/body)\nextendtypeQuery{# Query collected records with given metric name and parent entity conditions, and return in the requested order.readRecords(condition:RecordCondition!,duration:Duration!):[Record!]!}","title":"Deprecated Query Protocol","url":"/docs/main/v9.6.0/en/api/query-protocol-deprecated/"},{"content":"Deprecated Query Protocol The following query services are deprecated since 9.5.0. All these queries are still available for the short term to keep compatibility.\nQuery protocol official repository, https://github.com/apache/skywalking-query-protocol.\nMetrics Metrics query targets all objects defined in OAL script and MAL. You may obtain the metrics data in linear or thermodynamic matrix formats based on the aggregation functions in script.\nV2 APIs Provide Metrics V2 query APIs since 8.0.0, including metadata, single/multiple values, heatmap, and sampled records metrics.\nextendtypeQuery{# Read metrics single value in the duration of required metricsreadMetricsValue(condition:MetricsCondition!,duration:Duration!):Long!# Read metrics single value in the duration of required metrics# NullableValue#isEmptyValue == true indicates no telemetry data rather than aggregated value is actually zero.readNullableMetricsValue(condition:MetricsCondition!,duration:Duration!):NullableValue!# Read time-series values in the duration of required metricsreadMetricsValues(condition:MetricsCondition!,duration:Duration!):MetricsValues!# Read entity list of required metrics and parent entity type.sortMetrics(condition:TopNCondition!,duration:Duration!):[SelectedRecord!]!# Read value in the given time duration, usually as a linear.# labels: the labels you need to query.readLabeledMetricsValues(condition:MetricsCondition!,labels:[String!]!,duration:Duration!):[MetricsValues!]!# Heatmap is bucket based value statistic result.readHeatMap(condition:MetricsCondition!,duration:Duration!):HeatMap# Deprecated since 9.3.0, replaced by readRecords defined in record.graphqls# Read the sampled records# TopNCondition#scope is not required.readSampledRecords(condition:TopNCondition!,duration:Duration!):[SelectedRecord!]!}V1 APIs 3 types of metrics can be queried. V1 APIs were introduced since 6.x. Now they are a shell to V2 APIs.\n Single value. Most default metrics are in single value. getValues and getLinearIntValues are suitable for this purpose. Multiple value. A metric defined in OAL includes multiple value calculations. Use getMultipleLinearIntValues to obtain all values. percentile is a typical multiple value function in OAL. Heatmap value. Read Heatmap in WIKI for details. thermodynamic is the only OAL function. Use getThermodynamic to get the values.  extendtypeQuery{getValues(metric:BatchMetricConditions!,duration:Duration!):IntValuesgetLinearIntValues(metric:MetricCondition!,duration:Duration!):IntValues# Query the type of metrics including multiple values, and format them as multiple lines.# The seq of these multiple lines base on the calculation func in OAL# Such as, should us this to query the result of func percentile(50,75,90,95,99) in OAL,# then five lines will be responded, p50 is the first element of return value.getMultipleLinearIntValues(metric:MetricCondition!,numOfLinear:Int!,duration:Duration!):[IntValues!]!getThermodynamic(metric:MetricCondition!,duration:Duration!):Thermodynamic}Aggregation Aggregation query means that the metrics data need a secondary aggregation at query stage, which causes the query interfaces to have some different arguments. A typical example of aggregation query is the TopN list of services. Metrics stream aggregation simply calculates the metrics values of each service, but the expected list requires ordering metrics data by their values.\nAggregation query is for single value metrics only.\n# The aggregation query is different with the metric query.# All aggregation queries require backend or/and storage do aggregation in query time.extendtypeQuery{# TopN is an aggregation query.getServiceTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getAllServiceInstanceTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getServiceInstanceTopN(serviceId:ID!,name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getAllEndpointTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getEndpointTopN(serviceId:ID!,name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!}Record Record is a general and abstract type for collected raw data. In the observability, traces and logs have specific and well-defined meanings, meanwhile, the general records represent other collected records. Such as sampled slow SQL statement, HTTP request raw data(request/response header/body)\nextendtypeQuery{# Query collected records with given metric name and parent entity conditions, and return in the requested order.readRecords(condition:RecordCondition!,duration:Duration!):[Record!]!}","title":"Deprecated Query Protocol","url":"/docs/main/v9.7.0/en/api/query-protocol-deprecated/"},{"content":"Design The mmap-queue is a big, fast, and persistent queue based on the memory-mapped files. One mmap-queue has a directory to store the whole data. The queue directory is made up of many segments and 1 metafile. This is originally implemented by bigqueue project, we changed it a little for fitting the Satellite project requirements.\n Segment: Segment is the real data store center, that provides large-space storage and does not reduce read and write performance as much as possible by using mmap. And we will avoid deleting files by reusing them. Meta: The purpose of meta is to find the data that the consumer needs.  Meta Metadata only needs 80B to store the Metadata for the pipe. But for memory alignment, it takes at least one memory page size, which is generally 4K.\n[ 8Bit ][ 8Bit ][ 8Bit ][ 8Bit ][ 8Bit ][ 8Bit ][ 8Bit ][ 8Bit ][ 8Bit ][ 8Bit ] [metaVersion][ ID ][ offset][ ID ][ offset][ ID ][ offset][ ID ][ offset][capacity] [metaVersion][writing offset][watermark offset][committed offset][reading offset][capacity] Transforming BenchmarkTest Test machine: macbook pro 2018\nModel Name:\tMacBook Pro Model Identifier:\tMacBookPro15,1 Processor Name:\t6-Core Intel Core i7 Processor Speed:\t2.2 GHz Number of Processors:\t1 Total Number of Cores:\t6 L2 Cache (per Core):\t256 KB L3 Cache:\t9 MB Hyper-Threading Technology:\tEnabled Memory:\t16 GB System Firmware Version:\t1554.60.15.0.0 (iBridge: 18.16.13030.0.0,0 push operation goos: darwin goarch: amd64 pkg: github.com/apache/skywalking-satellite/plugins/queue/mmap BenchmarkEnqueue BenchmarkEnqueue/segmentSize:_128KB_maxInMemSegments:18_message:8KB_queueCapacity:10000 27585\t43559 ns/op\t9889 B/op\t9 allocs/op BenchmarkEnqueue/segmentSize:_256KB_maxInMemSegments:10_message:8KB_queueCapacity:10000 39326\t31773 ns/op\t9840 B/op\t9 allocs/op BenchmarkEnqueue/segmentSize:_512KB_maxInMemSegments:6_message:8KB_queueCapacity:10000 56770\t22990 ns/op\t9816 B/op\t9 allocs/op BenchmarkEnqueue/segmentSize:_256KB_maxInMemSegments:20_message:8KB_queueCapacity:10000 43803\t29778 ns/op\t9840 B/op\t9 allocs/op BenchmarkEnqueue/segmentSize:_128KB_maxInMemSegments:10_message:16KB_queueCapacity:10000 16870\t80576 ns/op\t18944 B/op\t10 allocs/op BenchmarkEnqueue/segmentSize:_128KB_maxInMemSegments:10_message:8KB_queueCapacity:100000 36922\t39085 ns/op\t9889 B/op\t9 allocs/op PASS push and pop operation goos: darwin goarch: amd64 pkg: github.com/apache/skywalking-satellite/plugins/queue/mmap BenchmarkEnqueueAndDequeue BenchmarkEnqueueAndDequeue/segmentSize:_128KB_maxInMemSegments:18_message:8KB_queueCapacity:10000 21030\t60728 ns/op\t28774 B/op\t42 allocs/op BenchmarkEnqueueAndDequeue/segmentSize:_256KB_maxInMemSegments:10_message:8KB_queueCapacity:10000 30327\t41274 ns/op\t28726 B/op\t42 allocs/op BenchmarkEnqueueAndDequeue/segmentSize:_512KB_maxInMemSegments:6_message:8KB_queueCapacity:10000 32738\t37923 ns/op\t28700 B/op\t42 allocs/op BenchmarkEnqueueAndDequeue/segmentSize:_256KB_maxInMemSegments:20_message:8KB_queueCapacity:10000 28209\t41169 ns/op\t28726 B/op\t42 allocs/op BenchmarkEnqueueAndDequeue/segmentSize:_128KB_maxInMemSegments:10_message:16KB_queueCapacity:10000 14677\t89637 ns/op\t54981 B/op\t43 allocs/op BenchmarkEnqueueAndDequeue/segmentSize:_128KB_maxInMemSegments:10_message:8KB_queueCapacity:100000 22228\t54963 ns/op\t28774 B/op\t42 allocs/op PASS ","title":"Design","url":"/docs/skywalking-satellite/latest/en/concepts-and-designs/mmap-queue/"},{"content":"Design The mmap-queue is a big, fast, and persistent queue based on the memory-mapped files. One mmap-queue has a directory to store the whole data. The queue directory is made up of many segments and 1 metafile. This is originally implemented by bigqueue project, we changed it a little for fitting the Satellite project requirements.\n Segment: Segment is the real data store center, that provides large-space storage and does not reduce read and write performance as much as possible by using mmap. And we will avoid deleting files by reusing them. Meta: The purpose of meta is to find the data that the consumer needs.  Meta Metadata only needs 80B to store the Metadata for the pipe. But for memory alignment, it takes at least one memory page size, which is generally 4K.\n[ 8Bit ][ 8Bit ][ 8Bit ][ 8Bit ][ 8Bit ][ 8Bit ][ 8Bit ][ 8Bit ][ 8Bit ][ 8Bit ] [metaVersion][ ID ][ offset][ ID ][ offset][ ID ][ offset][ ID ][ offset][capacity] [metaVersion][writing offset][watermark offset][committed offset][reading offset][capacity] Transforming BenchmarkTest Test machine: macbook pro 2018\nModel Name:\tMacBook Pro Model Identifier:\tMacBookPro15,1 Processor Name:\t6-Core Intel Core i7 Processor Speed:\t2.2 GHz Number of Processors:\t1 Total Number of Cores:\t6 L2 Cache (per Core):\t256 KB L3 Cache:\t9 MB Hyper-Threading Technology:\tEnabled Memory:\t16 GB System Firmware Version:\t1554.60.15.0.0 (iBridge: 18.16.13030.0.0,0 push operation goos: darwin goarch: amd64 pkg: github.com/apache/skywalking-satellite/plugins/queue/mmap BenchmarkEnqueue BenchmarkEnqueue/segmentSize:_128KB_maxInMemSegments:18_message:8KB_queueCapacity:10000 27585\t43559 ns/op\t9889 B/op\t9 allocs/op BenchmarkEnqueue/segmentSize:_256KB_maxInMemSegments:10_message:8KB_queueCapacity:10000 39326\t31773 ns/op\t9840 B/op\t9 allocs/op BenchmarkEnqueue/segmentSize:_512KB_maxInMemSegments:6_message:8KB_queueCapacity:10000 56770\t22990 ns/op\t9816 B/op\t9 allocs/op BenchmarkEnqueue/segmentSize:_256KB_maxInMemSegments:20_message:8KB_queueCapacity:10000 43803\t29778 ns/op\t9840 B/op\t9 allocs/op BenchmarkEnqueue/segmentSize:_128KB_maxInMemSegments:10_message:16KB_queueCapacity:10000 16870\t80576 ns/op\t18944 B/op\t10 allocs/op BenchmarkEnqueue/segmentSize:_128KB_maxInMemSegments:10_message:8KB_queueCapacity:100000 36922\t39085 ns/op\t9889 B/op\t9 allocs/op PASS push and pop operation goos: darwin goarch: amd64 pkg: github.com/apache/skywalking-satellite/plugins/queue/mmap BenchmarkEnqueueAndDequeue BenchmarkEnqueueAndDequeue/segmentSize:_128KB_maxInMemSegments:18_message:8KB_queueCapacity:10000 21030\t60728 ns/op\t28774 B/op\t42 allocs/op BenchmarkEnqueueAndDequeue/segmentSize:_256KB_maxInMemSegments:10_message:8KB_queueCapacity:10000 30327\t41274 ns/op\t28726 B/op\t42 allocs/op BenchmarkEnqueueAndDequeue/segmentSize:_512KB_maxInMemSegments:6_message:8KB_queueCapacity:10000 32738\t37923 ns/op\t28700 B/op\t42 allocs/op BenchmarkEnqueueAndDequeue/segmentSize:_256KB_maxInMemSegments:20_message:8KB_queueCapacity:10000 28209\t41169 ns/op\t28726 B/op\t42 allocs/op BenchmarkEnqueueAndDequeue/segmentSize:_128KB_maxInMemSegments:10_message:16KB_queueCapacity:10000 14677\t89637 ns/op\t54981 B/op\t43 allocs/op BenchmarkEnqueueAndDequeue/segmentSize:_128KB_maxInMemSegments:10_message:8KB_queueCapacity:100000 22228\t54963 ns/op\t28774 B/op\t42 allocs/op PASS ","title":"Design","url":"/docs/skywalking-satellite/next/en/concepts-and-designs/mmap-queue/"},{"content":"Design The mmap-queue is a big, fast, and persistent queue based on the memory-mapped files. One mmap-queue has a directory to store the whole data. The queue directory is made up of many segments and 1 metafile. This is originally implemented by bigqueue project, we changed it a little for fitting the Satellite project requirements.\n Segment: Segment is the real data store center, that provides large-space storage and does not reduce read and write performance as much as possible by using mmap. And we will avoid deleting files by reusing them. Meta: The purpose of meta is to find the data that the consumer needs.  Meta Metadata only needs 80B to store the Metadata for the pipe. But for memory alignment, it takes at least one memory page size, which is generally 4K.\n[ 8Bit ][ 8Bit ][ 8Bit ][ 8Bit ][ 8Bit ][ 8Bit ][ 8Bit ][ 8Bit ][ 8Bit ][ 8Bit ] [metaVersion][ ID ][ offset][ ID ][ offset][ ID ][ offset][ ID ][ offset][capacity] [metaVersion][writing offset][watermark offset][committed offset][reading offset][capacity] Transforming BenchmarkTest Test machine: macbook pro 2018\nModel Name:\tMacBook Pro Model Identifier:\tMacBookPro15,1 Processor Name:\t6-Core Intel Core i7 Processor Speed:\t2.2 GHz Number of Processors:\t1 Total Number of Cores:\t6 L2 Cache (per Core):\t256 KB L3 Cache:\t9 MB Hyper-Threading Technology:\tEnabled Memory:\t16 GB System Firmware Version:\t1554.60.15.0.0 (iBridge: 18.16.13030.0.0,0 push operation goos: darwin goarch: amd64 pkg: github.com/apache/skywalking-satellite/plugins/queue/mmap BenchmarkEnqueue BenchmarkEnqueue/segmentSize:_128KB_maxInMemSegments:18_message:8KB_queueCapacity:10000 27585\t43559 ns/op\t9889 B/op\t9 allocs/op BenchmarkEnqueue/segmentSize:_256KB_maxInMemSegments:10_message:8KB_queueCapacity:10000 39326\t31773 ns/op\t9840 B/op\t9 allocs/op BenchmarkEnqueue/segmentSize:_512KB_maxInMemSegments:6_message:8KB_queueCapacity:10000 56770\t22990 ns/op\t9816 B/op\t9 allocs/op BenchmarkEnqueue/segmentSize:_256KB_maxInMemSegments:20_message:8KB_queueCapacity:10000 43803\t29778 ns/op\t9840 B/op\t9 allocs/op BenchmarkEnqueue/segmentSize:_128KB_maxInMemSegments:10_message:16KB_queueCapacity:10000 16870\t80576 ns/op\t18944 B/op\t10 allocs/op BenchmarkEnqueue/segmentSize:_128KB_maxInMemSegments:10_message:8KB_queueCapacity:100000 36922\t39085 ns/op\t9889 B/op\t9 allocs/op PASS push and pop operation goos: darwin goarch: amd64 pkg: github.com/apache/skywalking-satellite/plugins/queue/mmap BenchmarkEnqueueAndDequeue BenchmarkEnqueueAndDequeue/segmentSize:_128KB_maxInMemSegments:18_message:8KB_queueCapacity:10000 21030\t60728 ns/op\t28774 B/op\t42 allocs/op BenchmarkEnqueueAndDequeue/segmentSize:_256KB_maxInMemSegments:10_message:8KB_queueCapacity:10000 30327\t41274 ns/op\t28726 B/op\t42 allocs/op BenchmarkEnqueueAndDequeue/segmentSize:_512KB_maxInMemSegments:6_message:8KB_queueCapacity:10000 32738\t37923 ns/op\t28700 B/op\t42 allocs/op BenchmarkEnqueueAndDequeue/segmentSize:_256KB_maxInMemSegments:20_message:8KB_queueCapacity:10000 28209\t41169 ns/op\t28726 B/op\t42 allocs/op BenchmarkEnqueueAndDequeue/segmentSize:_128KB_maxInMemSegments:10_message:16KB_queueCapacity:10000 14677\t89637 ns/op\t54981 B/op\t43 allocs/op BenchmarkEnqueueAndDequeue/segmentSize:_128KB_maxInMemSegments:10_message:8KB_queueCapacity:100000 22228\t54963 ns/op\t28774 B/op\t42 allocs/op PASS ","title":"Design","url":"/docs/skywalking-satellite/v1.2.0/en/concepts-and-designs/mmap-queue/"},{"content":"Design Goals This document outlines the core design goals for the SkyWalking project.\n  Maintaining Observability. Regardless of the deployment method of the target system, SkyWalking provides an integration solution for it to maintain observability. Based on this, SkyWalking provides multiple runtime forms and probes.\n  Topology, Metrics and Trace Together. The first step to understanding a distributed system is the topology map. It visualizes the entire complex system in an easy-to-read layout. Under the topology, the OSS personnel have higher requirements in terms of the metrics for service, instance, endpoint and calls. Traces are in the form of detailed logs to make sense of those metrics. For example, when the endpoint latency becomes long, you want to see the slowest the trace to find out why. So you can see, they are from big picture to details, they are all needed. SkyWalking integrates and provides a lot of features to make this possible and easy understand.\n  Light Weight. There two parts of light weight are needed. (1) In probe, we just depend on network communication framework, prefer gRPC. By that, the probe should be as small as possible, to avoid the library conflicts and the payload of VM, such as permsize requirement in JVM. (2) As an observability platform, it is secondary and third level system in your project environment. So we are using our own light weight framework to build the backend core. Then you don\u0026rsquo;t need to deploy big data tech platform and maintain them. SkyWalking should be simple in tech stack.\n  Pluggable. SkyWalking core team provides many default implementations, but definitely it is not enough, and also don\u0026rsquo;t fit every scenario. So, we provide a lot of features for being pluggable.\n  Portability. SkyWalking can run in multiple environments, including: (1) Use traditional register center like eureka. (2) Use RPC framework including service discovery, like Spring Cloud, Apache Dubbo. (3) Use Service Mesh in modern infrastructure. (4) Use cloud services. (5) Across cloud deployment. SkyWalking should run well in all of these cases.\n  Interoperability. The observability landscape is so vast that it is virtually impossible for SkyWalking to support all systems, even with the support of its community. Currently, it supports interoperability with other OSS systems, especially probes, such as Zipkin, Jaeger, and OpenTelemetry. It is very important to end users that SkyWalking has the ability to accept and read these data formats, since the users are not required to switch their libraries.\n  What is next?  See probe Introduction to learn about SkyWalking\u0026rsquo;s probe groups. From backend overview, you can understand what the backend does after it receives probe data.  ","title":"Design Goals","url":"/docs/main/latest/en/concepts-and-designs/project-goals/"},{"content":"Design Goals This document outlines the core design goals for the SkyWalking project.\n  Maintaining Observability. Regardless of the deployment method of the target system, SkyWalking provides an integration solution for it to maintain observability. Based on this, SkyWalking provides multiple runtime forms and probes.\n  Topology, Metrics and Trace Together. The first step to understanding a distributed system is the topology map. It visualizes the entire complex system in an easy-to-read layout. Under the topology, the OSS personnel have higher requirements in terms of the metrics for service, instance, endpoint and calls. Traces are in the form of detailed logs to make sense of those metrics. For example, when the endpoint latency becomes long, you want to see the slowest the trace to find out why. So you can see, they are from big picture to details, they are all needed. SkyWalking integrates and provides a lot of features to make this possible and easy understand.\n  Light Weight. There two parts of light weight are needed. (1) In probe, we just depend on network communication framework, prefer gRPC. By that, the probe should be as small as possible, to avoid the library conflicts and the payload of VM, such as permsize requirement in JVM. (2) As an observability platform, it is secondary and third level system in your project environment. So we are using our own light weight framework to build the backend core. Then you don\u0026rsquo;t need to deploy big data tech platform and maintain them. SkyWalking should be simple in tech stack.\n  Pluggable. SkyWalking core team provides many default implementations, but definitely it is not enough, and also don\u0026rsquo;t fit every scenario. So, we provide a lot of features for being pluggable.\n  Portability. SkyWalking can run in multiple environments, including: (1) Use traditional register center like eureka. (2) Use RPC framework including service discovery, like Spring Cloud, Apache Dubbo. (3) Use Service Mesh in modern infrastructure. (4) Use cloud services. (5) Across cloud deployment. SkyWalking should run well in all of these cases.\n  Interoperability. The observability landscape is so vast that it is virtually impossible for SkyWalking to support all systems, even with the support of its community. Currently, it supports interoperability with other OSS systems, especially probes, such as Zipkin, Jaeger, and OpenTelemetry. It is very important to end users that SkyWalking has the ability to accept and read these data formats, since the users are not required to switch their libraries.\n  What is next?  See probe Introduction to learn about SkyWalking\u0026rsquo;s probe groups. From backend overview, you can understand what the backend does after it receives probe data.  ","title":"Design Goals","url":"/docs/main/next/en/concepts-and-designs/project-goals/"},{"content":"Design Goals This document outlines the core design goals for the SkyWalking project.\n  Maintaining Observability. Regardless of the deployment method of the target system, SkyWalking provides an integration solution for it to maintain observability. Based on this, SkyWalking provides multiple runtime forms and probes.\n  Topology, Metrics and Trace Together. The first step to understanding a distributed system is the topology map. It visualizes the entire complex system in an easy-to-read layout. Under the topology, the OSS personnel have higher requirements in terms of the metrics for service, instance, endpoint and calls. Traces are in the form of detailed logs to make sense of those metrics. For example, when the endpoint latency becomes long, you want to see the slowest the trace to find out why. So you can see, they are from big picture to details, they are all needed. SkyWalking integrates and provides a lot of features to make this possible and easy understand.\n  Light Weight. There two parts of light weight are needed. (1) In probe, we just depend on network communication framework, prefer gRPC. By that, the probe should be as small as possible, to avoid the library conflicts and the payload of VM, such as permsize requirement in JVM. (2) As an observability platform, it is secondary and third level system in your project environment. So we are using our own light weight framework to build the backend core. Then you don\u0026rsquo;t need to deploy big data tech platform and maintain them. SkyWalking should be simple in tech stack.\n  Pluggable. SkyWalking core team provides many default implementations, but definitely it is not enough, and also don\u0026rsquo;t fit every scenario. So, we provide a lot of features for being pluggable.\n  Portability. SkyWalking can run in multiple environments, including: (1) Use traditional register center like eureka. (2) Use RPC framework including service discovery, like Spring Cloud, Apache Dubbo. (3) Use Service Mesh in modern infrastructure. (4) Use cloud services. (5) Across cloud deployment. SkyWalking should run well in all of these cases.\n  Interoperability. The observability landscape is so vast that it is virtually impossible for SkyWalking to support all systems, even with the support of its community. Currently, it supports interoperability with other OSS systems, especially probes, such as Zipkin, Jaeger, OpenTracing, and OpenCensus. It is very important to end users that SkyWalking has the ability to accept and read these data formats, since the users are not required to switch their libraries.\n  What is next?  See probe Introduction to learn about SkyWalking\u0026rsquo;s probe groups. From backend overview, you can understand what the backend does after it receives probe data.  ","title":"Design Goals","url":"/docs/main/v9.0.0/en/concepts-and-designs/project-goals/"},{"content":"Design Goals This document outlines the core design goals for the SkyWalking project.\n  Maintaining Observability. Regardless of the deployment method of the target system, SkyWalking provides an integration solution for it to maintain observability. Based on this, SkyWalking provides multiple runtime forms and probes.\n  Topology, Metrics and Trace Together. The first step to understanding a distributed system is the topology map. It visualizes the entire complex system in an easy-to-read layout. Under the topology, the OSS personnel have higher requirements in terms of the metrics for service, instance, endpoint and calls. Traces are in the form of detailed logs to make sense of those metrics. For example, when the endpoint latency becomes long, you want to see the slowest the trace to find out why. So you can see, they are from big picture to details, they are all needed. SkyWalking integrates and provides a lot of features to make this possible and easy understand.\n  Light Weight. There two parts of light weight are needed. (1) In probe, we just depend on network communication framework, prefer gRPC. By that, the probe should be as small as possible, to avoid the library conflicts and the payload of VM, such as permsize requirement in JVM. (2) As an observability platform, it is secondary and third level system in your project environment. So we are using our own light weight framework to build the backend core. Then you don\u0026rsquo;t need to deploy big data tech platform and maintain them. SkyWalking should be simple in tech stack.\n  Pluggable. SkyWalking core team provides many default implementations, but definitely it is not enough, and also don\u0026rsquo;t fit every scenario. So, we provide a lot of features for being pluggable.\n  Portability. SkyWalking can run in multiple environments, including: (1) Use traditional register center like eureka. (2) Use RPC framework including service discovery, like Spring Cloud, Apache Dubbo. (3) Use Service Mesh in modern infrastructure. (4) Use cloud services. (5) Across cloud deployment. SkyWalking should run well in all of these cases.\n  Interoperability. The observability landscape is so vast that it is virtually impossible for SkyWalking to support all systems, even with the support of its community. Currently, it supports interoperability with other OSS systems, especially probes, such as Zipkin, Jaeger, OpenTracing, and OpenCensus. It is very important to end users that SkyWalking has the ability to accept and read these data formats, since the users are not required to switch their libraries.\n  What is next?  See probe Introduction to learn about SkyWalking\u0026rsquo;s probe groups. From backend overview, you can understand what the backend does after it receives probe data.  ","title":"Design Goals","url":"/docs/main/v9.1.0/en/concepts-and-designs/project-goals/"},{"content":"Design Goals This document outlines the core design goals for the SkyWalking project.\n  Maintaining Observability. Regardless of the deployment method of the target system, SkyWalking provides an integration solution for it to maintain observability. Based on this, SkyWalking provides multiple runtime forms and probes.\n  Topology, Metrics and Trace Together. The first step to understanding a distributed system is the topology map. It visualizes the entire complex system in an easy-to-read layout. Under the topology, the OSS personnel have higher requirements in terms of the metrics for service, instance, endpoint and calls. Traces are in the form of detailed logs to make sense of those metrics. For example, when the endpoint latency becomes long, you want to see the slowest the trace to find out why. So you can see, they are from big picture to details, they are all needed. SkyWalking integrates and provides a lot of features to make this possible and easy understand.\n  Light Weight. There two parts of light weight are needed. (1) In probe, we just depend on network communication framework, prefer gRPC. By that, the probe should be as small as possible, to avoid the library conflicts and the payload of VM, such as permsize requirement in JVM. (2) As an observability platform, it is secondary and third level system in your project environment. So we are using our own light weight framework to build the backend core. Then you don\u0026rsquo;t need to deploy big data tech platform and maintain them. SkyWalking should be simple in tech stack.\n  Pluggable. SkyWalking core team provides many default implementations, but definitely it is not enough, and also don\u0026rsquo;t fit every scenario. So, we provide a lot of features for being pluggable.\n  Portability. SkyWalking can run in multiple environments, including: (1) Use traditional register center like eureka. (2) Use RPC framework including service discovery, like Spring Cloud, Apache Dubbo. (3) Use Service Mesh in modern infrastructure. (4) Use cloud services. (5) Across cloud deployment. SkyWalking should run well in all of these cases.\n  Interoperability. The observability landscape is so vast that it is virtually impossible for SkyWalking to support all systems, even with the support of its community. Currently, it supports interoperability with other OSS systems, especially probes, such as Zipkin, Jaeger, OpenTracing, and OpenCensus. It is very important to end users that SkyWalking has the ability to accept and read these data formats, since the users are not required to switch their libraries.\n  What is next?  See probe Introduction to learn about SkyWalking\u0026rsquo;s probe groups. From backend overview, you can understand what the backend does after it receives probe data.  ","title":"Design Goals","url":"/docs/main/v9.2.0/en/concepts-and-designs/project-goals/"},{"content":"Design Goals This document outlines the core design goals for the SkyWalking project.\n  Maintaining Observability. Regardless of the deployment method of the target system, SkyWalking provides an integration solution for it to maintain observability. Based on this, SkyWalking provides multiple runtime forms and probes.\n  Topology, Metrics and Trace Together. The first step to understanding a distributed system is the topology map. It visualizes the entire complex system in an easy-to-read layout. Under the topology, the OSS personnel have higher requirements in terms of the metrics for service, instance, endpoint and calls. Traces are in the form of detailed logs to make sense of those metrics. For example, when the endpoint latency becomes long, you want to see the slowest the trace to find out why. So you can see, they are from big picture to details, they are all needed. SkyWalking integrates and provides a lot of features to make this possible and easy understand.\n  Light Weight. There two parts of light weight are needed. (1) In probe, we just depend on network communication framework, prefer gRPC. By that, the probe should be as small as possible, to avoid the library conflicts and the payload of VM, such as permsize requirement in JVM. (2) As an observability platform, it is secondary and third level system in your project environment. So we are using our own light weight framework to build the backend core. Then you don\u0026rsquo;t need to deploy big data tech platform and maintain them. SkyWalking should be simple in tech stack.\n  Pluggable. SkyWalking core team provides many default implementations, but definitely it is not enough, and also don\u0026rsquo;t fit every scenario. So, we provide a lot of features for being pluggable.\n  Portability. SkyWalking can run in multiple environments, including: (1) Use traditional register center like eureka. (2) Use RPC framework including service discovery, like Spring Cloud, Apache Dubbo. (3) Use Service Mesh in modern infrastructure. (4) Use cloud services. (5) Across cloud deployment. SkyWalking should run well in all of these cases.\n  Interoperability. The observability landscape is so vast that it is virtually impossible for SkyWalking to support all systems, even with the support of its community. Currently, it supports interoperability with other OSS systems, especially probes, such as Zipkin, Jaeger, OpenTracing, and OpenCensus. It is very important to end users that SkyWalking has the ability to accept and read these data formats, since the users are not required to switch their libraries.\n  What is next?  See probe Introduction to learn about SkyWalking\u0026rsquo;s probe groups. From backend overview, you can understand what the backend does after it receives probe data.  ","title":"Design Goals","url":"/docs/main/v9.3.0/en/concepts-and-designs/project-goals/"},{"content":"Design Goals This document outlines the core design goals for the SkyWalking project.\n  Maintaining Observability. Regardless of the deployment method of the target system, SkyWalking provides an integration solution for it to maintain observability. Based on this, SkyWalking provides multiple runtime forms and probes.\n  Topology, Metrics and Trace Together. The first step to understanding a distributed system is the topology map. It visualizes the entire complex system in an easy-to-read layout. Under the topology, the OSS personnel have higher requirements in terms of the metrics for service, instance, endpoint and calls. Traces are in the form of detailed logs to make sense of those metrics. For example, when the endpoint latency becomes long, you want to see the slowest the trace to find out why. So you can see, they are from big picture to details, they are all needed. SkyWalking integrates and provides a lot of features to make this possible and easy understand.\n  Light Weight. There two parts of light weight are needed. (1) In probe, we just depend on network communication framework, prefer gRPC. By that, the probe should be as small as possible, to avoid the library conflicts and the payload of VM, such as permsize requirement in JVM. (2) As an observability platform, it is secondary and third level system in your project environment. So we are using our own light weight framework to build the backend core. Then you don\u0026rsquo;t need to deploy big data tech platform and maintain them. SkyWalking should be simple in tech stack.\n  Pluggable. SkyWalking core team provides many default implementations, but definitely it is not enough, and also don\u0026rsquo;t fit every scenario. So, we provide a lot of features for being pluggable.\n  Portability. SkyWalking can run in multiple environments, including: (1) Use traditional register center like eureka. (2) Use RPC framework including service discovery, like Spring Cloud, Apache Dubbo. (3) Use Service Mesh in modern infrastructure. (4) Use cloud services. (5) Across cloud deployment. SkyWalking should run well in all of these cases.\n  Interoperability. The observability landscape is so vast that it is virtually impossible for SkyWalking to support all systems, even with the support of its community. Currently, it supports interoperability with other OSS systems, especially probes, such as Zipkin, Jaeger, OpenTracing, and OpenCensus. It is very important to end users that SkyWalking has the ability to accept and read these data formats, since the users are not required to switch their libraries.\n  What is next?  See probe Introduction to learn about SkyWalking\u0026rsquo;s probe groups. From backend overview, you can understand what the backend does after it receives probe data.  ","title":"Design Goals","url":"/docs/main/v9.4.0/en/concepts-and-designs/project-goals/"},{"content":"Design Goals This document outlines the core design goals for the SkyWalking project.\n  Maintaining Observability. Regardless of the deployment method of the target system, SkyWalking provides an integration solution for it to maintain observability. Based on this, SkyWalking provides multiple runtime forms and probes.\n  Topology, Metrics and Trace Together. The first step to understanding a distributed system is the topology map. It visualizes the entire complex system in an easy-to-read layout. Under the topology, the OSS personnel have higher requirements in terms of the metrics for service, instance, endpoint and calls. Traces are in the form of detailed logs to make sense of those metrics. For example, when the endpoint latency becomes long, you want to see the slowest the trace to find out why. So you can see, they are from big picture to details, they are all needed. SkyWalking integrates and provides a lot of features to make this possible and easy understand.\n  Light Weight. There two parts of light weight are needed. (1) In probe, we just depend on network communication framework, prefer gRPC. By that, the probe should be as small as possible, to avoid the library conflicts and the payload of VM, such as permsize requirement in JVM. (2) As an observability platform, it is secondary and third level system in your project environment. So we are using our own light weight framework to build the backend core. Then you don\u0026rsquo;t need to deploy big data tech platform and maintain them. SkyWalking should be simple in tech stack.\n  Pluggable. SkyWalking core team provides many default implementations, but definitely it is not enough, and also don\u0026rsquo;t fit every scenario. So, we provide a lot of features for being pluggable.\n  Portability. SkyWalking can run in multiple environments, including: (1) Use traditional register center like eureka. (2) Use RPC framework including service discovery, like Spring Cloud, Apache Dubbo. (3) Use Service Mesh in modern infrastructure. (4) Use cloud services. (5) Across cloud deployment. SkyWalking should run well in all of these cases.\n  Interoperability. The observability landscape is so vast that it is virtually impossible for SkyWalking to support all systems, even with the support of its community. Currently, it supports interoperability with other OSS systems, especially probes, such as Zipkin, Jaeger, and OpenTelemetry. It is very important to end users that SkyWalking has the ability to accept and read these data formats, since the users are not required to switch their libraries.\n  What is next?  See probe Introduction to learn about SkyWalking\u0026rsquo;s probe groups. From backend overview, you can understand what the backend does after it receives probe data.  ","title":"Design Goals","url":"/docs/main/v9.5.0/en/concepts-and-designs/project-goals/"},{"content":"Design Goals This document outlines the core design goals for the SkyWalking project.\n  Maintaining Observability. Regardless of the deployment method of the target system, SkyWalking provides an integration solution for it to maintain observability. Based on this, SkyWalking provides multiple runtime forms and probes.\n  Topology, Metrics and Trace Together. The first step to understanding a distributed system is the topology map. It visualizes the entire complex system in an easy-to-read layout. Under the topology, the OSS personnel have higher requirements in terms of the metrics for service, instance, endpoint and calls. Traces are in the form of detailed logs to make sense of those metrics. For example, when the endpoint latency becomes long, you want to see the slowest the trace to find out why. So you can see, they are from big picture to details, they are all needed. SkyWalking integrates and provides a lot of features to make this possible and easy understand.\n  Light Weight. There two parts of light weight are needed. (1) In probe, we just depend on network communication framework, prefer gRPC. By that, the probe should be as small as possible, to avoid the library conflicts and the payload of VM, such as permsize requirement in JVM. (2) As an observability platform, it is secondary and third level system in your project environment. So we are using our own light weight framework to build the backend core. Then you don\u0026rsquo;t need to deploy big data tech platform and maintain them. SkyWalking should be simple in tech stack.\n  Pluggable. SkyWalking core team provides many default implementations, but definitely it is not enough, and also don\u0026rsquo;t fit every scenario. So, we provide a lot of features for being pluggable.\n  Portability. SkyWalking can run in multiple environments, including: (1) Use traditional register center like eureka. (2) Use RPC framework including service discovery, like Spring Cloud, Apache Dubbo. (3) Use Service Mesh in modern infrastructure. (4) Use cloud services. (5) Across cloud deployment. SkyWalking should run well in all of these cases.\n  Interoperability. The observability landscape is so vast that it is virtually impossible for SkyWalking to support all systems, even with the support of its community. Currently, it supports interoperability with other OSS systems, especially probes, such as Zipkin, Jaeger, and OpenTelemetry. It is very important to end users that SkyWalking has the ability to accept and read these data formats, since the users are not required to switch their libraries.\n  What is next?  See probe Introduction to learn about SkyWalking\u0026rsquo;s probe groups. From backend overview, you can understand what the backend does after it receives probe data.  ","title":"Design Goals","url":"/docs/main/v9.6.0/en/concepts-and-designs/project-goals/"},{"content":"Design Goals This document outlines the core design goals for the SkyWalking project.\n  Maintaining Observability. Regardless of the deployment method of the target system, SkyWalking provides an integration solution for it to maintain observability. Based on this, SkyWalking provides multiple runtime forms and probes.\n  Topology, Metrics and Trace Together. The first step to understanding a distributed system is the topology map. It visualizes the entire complex system in an easy-to-read layout. Under the topology, the OSS personnel have higher requirements in terms of the metrics for service, instance, endpoint and calls. Traces are in the form of detailed logs to make sense of those metrics. For example, when the endpoint latency becomes long, you want to see the slowest the trace to find out why. So you can see, they are from big picture to details, they are all needed. SkyWalking integrates and provides a lot of features to make this possible and easy understand.\n  Light Weight. There two parts of light weight are needed. (1) In probe, we just depend on network communication framework, prefer gRPC. By that, the probe should be as small as possible, to avoid the library conflicts and the payload of VM, such as permsize requirement in JVM. (2) As an observability platform, it is secondary and third level system in your project environment. So we are using our own light weight framework to build the backend core. Then you don\u0026rsquo;t need to deploy big data tech platform and maintain them. SkyWalking should be simple in tech stack.\n  Pluggable. SkyWalking core team provides many default implementations, but definitely it is not enough, and also don\u0026rsquo;t fit every scenario. So, we provide a lot of features for being pluggable.\n  Portability. SkyWalking can run in multiple environments, including: (1) Use traditional register center like eureka. (2) Use RPC framework including service discovery, like Spring Cloud, Apache Dubbo. (3) Use Service Mesh in modern infrastructure. (4) Use cloud services. (5) Across cloud deployment. SkyWalking should run well in all of these cases.\n  Interoperability. The observability landscape is so vast that it is virtually impossible for SkyWalking to support all systems, even with the support of its community. Currently, it supports interoperability with other OSS systems, especially probes, such as Zipkin, Jaeger, and OpenTelemetry. It is very important to end users that SkyWalking has the ability to accept and read these data formats, since the users are not required to switch their libraries.\n  What is next?  See probe Introduction to learn about SkyWalking\u0026rsquo;s probe groups. From backend overview, you can understand what the backend does after it receives probe data.  ","title":"Design Goals","url":"/docs/main/v9.7.0/en/concepts-and-designs/project-goals/"},{"content":"Design Goals The document outlines the core design goals for the SkyWalking Infra E2E project.\n Support various E2E testing requirements in SkyWalking main repository with other ecosystem repositories. Support both docker-compose and KinD to orchestrate the tested services under different environments. Be language-independent as much as possible, users only need to configure YAMLs and run commands, without writing code.  Non-Goal  This framework is not involved with the build process, i.e. it won’t do something like mvn package or docker build, the artifacts (.tar, docker images) should be ready in an earlier process before this; This project doesn’t take the plugin tests into account, at least for now;  ","title":"Design Goals","url":"/docs/skywalking-infra-e2e/latest/en/concepts-and-designs/project-goals/"},{"content":"Design Goals The document outlines the core design goals for the SkyWalking Infra E2E project.\n Support various E2E testing requirements in SkyWalking main repository with other ecosystem repositories. Support both docker-compose and KinD to orchestrate the tested services under different environments. Be language-independent as much as possible, users only need to configure YAMLs and run commands, without writing code.  Non-Goal  This framework is not involved with the build process, i.e. it won’t do something like mvn package or docker build, the artifacts (.tar, docker images) should be ready in an earlier process before this; This project doesn’t take the plugin tests into account, at least for now;  ","title":"Design Goals","url":"/docs/skywalking-infra-e2e/next/en/concepts-and-designs/project-goals/"},{"content":"Design Goals The document outlines the core design goals for the SkyWalking Infra E2E project.\n Support various E2E testing requirements in SkyWalking main repository with other ecosystem repositories. Support both docker-compose and KinD to orchestrate the tested services under different environments. Be language-independent as much as possible, users only need to configure YAMLs and run commands, without writing code.  Non-Goal  This framework is not involved with the build process, i.e. it won’t do something like mvn package or docker build, the artifacts (.tar, docker images) should be ready in an earlier process before this; This project doesn’t take the plugin tests into account, at least for now;  ","title":"Design Goals","url":"/docs/skywalking-infra-e2e/v1.3.0/en/concepts-and-designs/project-goals/"},{"content":"Design Goals The document outlines the core design goals for SkyWalking Satellite project.\n  Light Weight. SkyWalking Satellite has a limited cost for resources and high-performance because of the requirements of the sidecar deployment model.\n  Pluggability. SkyWalking Satellite core team provides many default implementations, but definitely it is not enough, and also don\u0026rsquo;t fit every scenario. So, we provide a lot of features for being pluggable.\n  Portability. SkyWalking Satellite can run in multiple environments, including:\n Use traditional deployment as a daemon process to collect data. Use cloud services as a sidecar, such as in the Kubernetes platform.    Interoperability. Observability is a big landscape, SkyWalking is impossible to support all, even by its community. So SkyWalking Satellite is compatible with many protocols, including:\n SkyWalking protocol (WIP) Prometheus protocol.    ","title":"Design Goals","url":"/docs/skywalking-satellite/latest/en/concepts-and-designs/project-goals/"},{"content":"Design Goals The document outlines the core design goals for SkyWalking Satellite project.\n  Light Weight. SkyWalking Satellite has a limited cost for resources and high-performance because of the requirements of the sidecar deployment model.\n  Pluggability. SkyWalking Satellite core team provides many default implementations, but definitely it is not enough, and also don\u0026rsquo;t fit every scenario. So, we provide a lot of features for being pluggable.\n  Portability. SkyWalking Satellite can run in multiple environments, including:\n Use traditional deployment as a daemon process to collect data. Use cloud services as a sidecar, such as in the Kubernetes platform.    Interoperability. Observability is a big landscape, SkyWalking is impossible to support all, even by its community. So SkyWalking Satellite is compatible with many protocols, including:\n SkyWalking protocol (WIP) Prometheus protocol.    ","title":"Design Goals","url":"/docs/skywalking-satellite/next/en/concepts-and-designs/project-goals/"},{"content":"Design Goals The document outlines the core design goals for SkyWalking Satellite project.\n  Light Weight. SkyWalking Satellite has a limited cost for resources and high-performance because of the requirements of the sidecar deployment model.\n  Pluggability. SkyWalking Satellite core team provides many default implementations, but definitely it is not enough, and also don\u0026rsquo;t fit every scenario. So, we provide a lot of features for being pluggable.\n  Portability. SkyWalking Satellite can run in multiple environments, including:\n Use traditional deployment as a daemon process to collect data. Use cloud services as a sidecar, such as in the Kubernetes platform.    Interoperability. Observability is a big landscape, SkyWalking is impossible to support all, even by its community. So SkyWalking Satellite is compatible with many protocols, including:\n SkyWalking protocol (WIP) Prometheus protocol.    ","title":"Design Goals","url":"/docs/skywalking-satellite/v1.2.0/en/concepts-and-designs/project-goals/"},{"content":"Diagnose Service Mesh Network Performance with eBPF Background This article will show how to use Apache SkyWalking with eBPF to make network troubleshooting easier in a service mesh environment.\nApache SkyWalking is an application performance monitor tool for distributed systems. It observes metrics, logs, traces, and events in the service mesh environment and uses that data to generate a dependency graph of your pods and services. This dependency graph can provide quick insights into your system, especially when there\u0026rsquo;s an issue.\nHowever, when troubleshooting network issues in SkyWalking\u0026rsquo;s service topology, it is not always easy to pinpoint where the error actually is. There are two reasons for the difficulty:\n Traffic through the Envoy sidecar is not easy to observe. Data from Envoy\u0026rsquo;s Access Log Service (ALS) shows traffic between services (sidecar-to-sidecar), but not metrics on communication between the Envoy sidecar and the service it proxies. Without that information, it is more difficult to understand the impact of the sidecar. There is a lack of data from transport layer (OSI Layer 4) communication. Since services generally use application layer (OSI Layer 7) protocols such as HTTP, observability data is generally restricted to application layer communication. However, the root cause may actually be in the transport layer, which is typically opaque to observability tools.  Access to metrics from Envoy-to-service and transport layer communication can make it easier to diagnose service issues. To this end, SkyWalking needs to collect and analyze transport layer metrics between processes inside Kubernetes pods - a task well suited to eBPF. We investigated using eBPF for this purpose and present our results and a demo below.\nMonitoring Kubernetes Networks with eBPF With its origins as the Extended Berkeley Packet Filter, eBPF is a general purpose mechanism for injecting and running your own code into the Linux kernel and is an excellent tool for monitoring network traffic in Kubernetes Pods. In the next few sections, we'll provide an overview of how to use eBPF for network monitoring as background for introducing Skywalking Rover, a metrics collector and profiler powered by eBPF to diagnose CPU and network performance.\nHow Applications and the Network Interact Interactions between the application and the network can generally be divided into the following steps from higher to lower levels of abstraction:\n User Code: Application code uses high-level network libraries in the application stack to exchange data across the network, like sending and receiving HTTP requests. Network Library: When the network library receives a network request, it interacts with the language API to send the network data. Language API: Each language provides an API for operating the network, system, etc. When a request is received, it interacts with the system API. In Linux, this API is called syscalls. Linux API: When the Linux kernel receives the request through the API, it communicates with the socket to send the data, which is usually closer to an OSI Layer 4 protocol, such as TCP, UDP, etc. Socket Ops: Sending or receiving the data to/from the NIC.  Our hypothesis is that eBPF can monitor the network. There are two ways to implement the interception: User space (uprobe) or Kernel space (kprobe). The table below summarizes the differences.\n    Pros Cons     uprobe •\tGet more application-related contexts, such as whether the current request is HTTP or HTTPS.•\tRequests and responses can be intercepted by a single method •\tData structures can be unstable, so it is more difficult to get the desired data.  •\tImplementation may differ between language/library versions.  •\tDoes not work in applications without symbol tables.   kprobe •\tAvailable for all languages.  •\tThe data structure and methods are stable and do not require much adaptation.  •\tEasier correlation with underlying data, such as getting the destination address of TCP, OSI Layer 4 protocol metrics, etc. •\tA single request and response may be split into multiple probes.  •\tContextual information is not easy to get for stateful requests. For example header compression in HTTP/2.    For the general network performance monitor, we chose to use the kprobe (intercept the syscalls) for the following reasons:\n It\u0026rsquo;s available for applications written in any programming language, and it\u0026rsquo;s stable, so it saves a lot of development/adaptation costs. It can be correlated with metrics from the system level, which makes it easier to troubleshoot. As a single request and response are split into multiple probes, we can use technology to correlate them. For contextual information, It\u0026rsquo;s usually used in OSI Layer 7 protocol network analysis. So, if we just monitor the network performance, then they can be ignored.  Kprobes and network monitoring Following the network syscalls of Linux documentation, we can implement network monitoring by intercepting two types of methods: socket operations and send/receive methods.\nSocket Operations When accepting or connecting with another socket, we can get the following information:\n Connection information: Includes the remote address from the connection which helps us to understand which pod is connected. Connection statics: Includes basic metrics from sockets, such as round-trip time (RTT), lost packet count in TCP, etc. Socket and file descriptor (FD) mapping: Includes the relationship between the Linux file descriptor and socket object. It is useful when sending and receiving data through a Linux file descriptor.  Send/Receive The interface related to sending or receiving data is the focus of performance analysis. It mainly contains the following parameters:\n Socket file descriptor: The file descriptor of the current operation corresponding to the socket. Buffer: The data sent or received, passed as a byte array.  Based on the above parameters, we can analyze the following data:\n Bytes: The size of the packet in bytes. Protocol: The protocol analysis according to the buffer data, such as HTTP, MySQL, etc. Execution Time: The time it takes to send/receive the data.  At this point (Figure 1) we can analyze the following steps for the whole lifecycle of the connection:\n Connect/Accept: When the connection is created. Transform: Sending and receiving data on the connection. Close: When the connection is closed.  Figure 1\nProtocol and TLS The previous section described how to analyze connections using send or receive buffer data. For example, following the HTTP/1.1 message specification to analyze the connection. However, this does not work for TLS requests/responses.\nFigure 2\nWhen TLS is in use, the Linux Kernel transmits data encrypted in user space. In the figure above, The application usually transmits SSL data through a third-party library (such as OpenSSL). For this case, the Linux API can only get the encrypted data, so it cannot recognize any higher layer protocol. To decrypt inside eBPF, we need to follow these steps:\n Read unencrypted data through uprobe: Compatible multiple languages, using uprobe to capture the data that is not encrypted before sending or after receiving. In this way, we can get the original data and associate it with the socket. Associate with socket: We can associate unencrypted data with the socket.  OpenSSL Use case For example, the most common way to send/receive SSL data is to use OpenSSL as a shared library, specifically the SSL_read and SSL_write methods to submit the buffer data with the socket.\nFollowing the documentation, we can intercept these two methods, which are almost identical to the API in Linux. The source code of the SSL structure in OpenSSL shows that the Socket FD exists in the BIO object of the SSL structure, and we can get it by the offset.\nIn summary, with knowledge of how OpenSSL works, we can read unencrypted data in an eBPF function.\nIntroducing SkyWalking Rover, an eBPF-based Metrics Collector and Profiler SkyWalking Rover introduces the eBPF network profiling feature into the SkyWalking ecosystem. It\u0026rsquo;s currently supported in a Kubernetes environment, so must be deployed inside a Kubernetes cluster. Once the deployment is complete, SkyWalking Rover can monitor the network for all processes inside a given Pod. Based on the monitoring data, SkyWalking can generate the topology relationship diagram and metrics between processes.\nTopology Diagram The topology diagram can help us understand the network access between processes inside the same Pod, and between the process and external environment (other Pod or service). Additionally, it can identify the data direction of traffic based on the line flow direction.\nIn Figure 3 below, all nodes within the hexagon are the internal process of a Pod, and nodes outside the hexagon are externally associated services or Pods. Nodes are connected by lines, which indicate the direction of requests or responses between nodes (client or server). The protocol is indicated on the line, and it\u0026rsquo;s either HTTP(S), TCP, or TCP(TLS). Also, we can see in this figure that the line between Envoy and Python applications is bidirectional because Envoy intercepts all application traffic.\nFigure 3\nMetrics Once we recognize the network call relationship between processes through the topology, we can select a specific line and view the TCP metrics between the two processes.\nThe diagram below (Figure 4) shows the metrics of network monitoring between two processes. There are four metrics in each line. Two on the left side are on the client side, and two on the right side are on the server side. If the remote process is not in the same Pod, only one side of the metrics is displayed.\nFigure 4\nThe following two metric types are available:\n Counter: Records the total number of data in a certain period. Each counter contains the following data: a. Count: Execution count. b. Bytes: Packet size in bytes. c. Execution time: Execution duration. Histogram: Records the distribution of data in the buckets.  Based on the above data types, the following metrics are exposed:\n   Name Type Unit Description     Write Counter and histogram Millisecond The socket write counter.   Read Counter and histogram Millisecond The socket read counter.   Write RTT Counter and histogram Microsecond The socket write round trip time (RTT) counter.   Connect Counter and histogram Millisecond The socket connect/accept with another server/client counter.   Close Counter and histogram Millisecond The socket with other socket counter.   Retransmit Counter Millisecond The socket retransmit package counter.   Drop Counter Millisecond The socket drop package counter.    Demo In this section, we demonstrate how to perform network profiling in the service mesh. To follow along, you will need a running Kubernetes environment.\nNOTE: All commands and scripts are available in this GitHub repository.\nInstall Istio Istio is the most widely deployed service mesh, and comes with a complete demo application that we can use for testing. To install Istio and the demo application, follow these steps:\n Install Istio using the demo configuration profile. Label the default namespace, so Istio automatically injects Envoy sidecar proxies when we\u0026rsquo;ll deploy the application. Deploy the bookinfo application to the cluster. Deploy the traffic generator to generate some traffic to the application.  export ISTIO_VERSION=1.13.1 # install istio istioctl install -y --set profile=demo kubectl label namespace default istio-injection=enabled # deploy the bookinfo applications kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/platform/kube/bookinfo.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/bookinfo-gateway.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/destination-rule-all.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/virtual-service-all-v1.yaml # generate traffic kubectl apply -f https://raw.githubusercontent.com/mrproliu/skywalking-network-profiling-demo/main/resources/traffic-generator.yaml Install SkyWalking The following will install the storage, backend, and UI needed for SkyWalking:\ngit clone https://github.com/apache/skywalking-helm.git cd skywalking-helm cd chart helm dep up skywalking helm -n istio-system install skywalking skywalking \\  --set fullnameOverride=skywalking \\  --set elasticsearch.minimumMasterNodes=1 \\  --set elasticsearch.imageTag=7.5.1 \\  --set oap.replicas=1 \\  --set ui.image.repository=apache/skywalking-ui \\  --set ui.image.tag=9.2.0 \\  --set oap.image.tag=9.2.0 \\  --set oap.envoy.als.enabled=true \\  --set oap.image.repository=apache/skywalking-oap-server \\  --set oap.storageType=elasticsearch \\  --set oap.env.SW_METER_ANALYZER_ACTIVE_FILES=\u0026#39;network-profiling\u0026#39; Install SkyWalking Rover SkyWalking Rover is deployed on every node in Kubernetes, and it automatically detects the services in the Kubernetes cluster. The network profiling feature has been released in the version 0.3.0 of SkyWalking Rover. When a network monitoring task is created, the SkyWalking rover sends the data to the SkyWalking backend.\nkubectl apply -f https://raw.githubusercontent.com/mrproliu/skywalking-network-profiling-demo/main/resources/skywalking-rover.yaml Start the Network Profiling Task Once all deployments are completed, we must create a network profiling task for a specific instance of the service in the SkyWalking UI.\nTo open SkyWalking UI, run:\nkubectl port-forward svc/skywalking-ui 8080:80 --namespace istio-system Currently, we can select the specific instances that we wish to monitor by clicking the Data Plane item in the Service Mesh panel and the Service item in the Kubernetes panel.\nIn the figure below, we have selected an instance with a list of tasks in the network profiling tab. When we click the start button, the SkyWalking Rover starts monitoring this instance\u0026rsquo;s network.\nFigure 5\nDone! After a few seconds, you will see the process topology appear on the right side of the page.\nFigure 6\nWhen you click on the line between processes, you can see the TCP metrics between the two processes.\nFigure 7\nConclusion In this article, we detailed a problem that makes troubleshooting service mesh architectures difficult: lack of context between layers in the network stack. These are the cases when eBPF begins to really help with debugging/productivity when existing service mesh/envoy cannot. Then, we researched how eBPF could be applied to common communication, such as TLS. Finally, we demo the implementation of this process with SkyWalking Rover.\nFor now, we have completed the performance analysis for OSI layer 4 (mostly TCP). In the future, we will also introduce the analysis for OSI layer 7 protocols like HTTP.\n","title":"Diagnose Service Mesh Network Performance with eBPF","url":"/docs/main/latest/en/academy/diagnose-service-mesh-network-performance-with-ebpf/"},{"content":"Diagnose Service Mesh Network Performance with eBPF Background This article will show how to use Apache SkyWalking with eBPF to make network troubleshooting easier in a service mesh environment.\nApache SkyWalking is an application performance monitor tool for distributed systems. It observes metrics, logs, traces, and events in the service mesh environment and uses that data to generate a dependency graph of your pods and services. This dependency graph can provide quick insights into your system, especially when there\u0026rsquo;s an issue.\nHowever, when troubleshooting network issues in SkyWalking\u0026rsquo;s service topology, it is not always easy to pinpoint where the error actually is. There are two reasons for the difficulty:\n Traffic through the Envoy sidecar is not easy to observe. Data from Envoy\u0026rsquo;s Access Log Service (ALS) shows traffic between services (sidecar-to-sidecar), but not metrics on communication between the Envoy sidecar and the service it proxies. Without that information, it is more difficult to understand the impact of the sidecar. There is a lack of data from transport layer (OSI Layer 4) communication. Since services generally use application layer (OSI Layer 7) protocols such as HTTP, observability data is generally restricted to application layer communication. However, the root cause may actually be in the transport layer, which is typically opaque to observability tools.  Access to metrics from Envoy-to-service and transport layer communication can make it easier to diagnose service issues. To this end, SkyWalking needs to collect and analyze transport layer metrics between processes inside Kubernetes pods - a task well suited to eBPF. We investigated using eBPF for this purpose and present our results and a demo below.\nMonitoring Kubernetes Networks with eBPF With its origins as the Extended Berkeley Packet Filter, eBPF is a general purpose mechanism for injecting and running your own code into the Linux kernel and is an excellent tool for monitoring network traffic in Kubernetes Pods. In the next few sections, we'll provide an overview of how to use eBPF for network monitoring as background for introducing Skywalking Rover, a metrics collector and profiler powered by eBPF to diagnose CPU and network performance.\nHow Applications and the Network Interact Interactions between the application and the network can generally be divided into the following steps from higher to lower levels of abstraction:\n User Code: Application code uses high-level network libraries in the application stack to exchange data across the network, like sending and receiving HTTP requests. Network Library: When the network library receives a network request, it interacts with the language API to send the network data. Language API: Each language provides an API for operating the network, system, etc. When a request is received, it interacts with the system API. In Linux, this API is called syscalls. Linux API: When the Linux kernel receives the request through the API, it communicates with the socket to send the data, which is usually closer to an OSI Layer 4 protocol, such as TCP, UDP, etc. Socket Ops: Sending or receiving the data to/from the NIC.  Our hypothesis is that eBPF can monitor the network. There are two ways to implement the interception: User space (uprobe) or Kernel space (kprobe). The table below summarizes the differences.\n    Pros Cons     uprobe •\tGet more application-related contexts, such as whether the current request is HTTP or HTTPS.•\tRequests and responses can be intercepted by a single method •\tData structures can be unstable, so it is more difficult to get the desired data.  •\tImplementation may differ between language/library versions.  •\tDoes not work in applications without symbol tables.   kprobe •\tAvailable for all languages.  •\tThe data structure and methods are stable and do not require much adaptation.  •\tEasier correlation with underlying data, such as getting the destination address of TCP, OSI Layer 4 protocol metrics, etc. •\tA single request and response may be split into multiple probes.  •\tContextual information is not easy to get for stateful requests. For example header compression in HTTP/2.    For the general network performance monitor, we chose to use the kprobe (intercept the syscalls) for the following reasons:\n It\u0026rsquo;s available for applications written in any programming language, and it\u0026rsquo;s stable, so it saves a lot of development/adaptation costs. It can be correlated with metrics from the system level, which makes it easier to troubleshoot. As a single request and response are split into multiple probes, we can use technology to correlate them. For contextual information, It\u0026rsquo;s usually used in OSI Layer 7 protocol network analysis. So, if we just monitor the network performance, then they can be ignored.  Kprobes and network monitoring Following the network syscalls of Linux documentation, we can implement network monitoring by intercepting two types of methods: socket operations and send/receive methods.\nSocket Operations When accepting or connecting with another socket, we can get the following information:\n Connection information: Includes the remote address from the connection which helps us to understand which pod is connected. Connection statics: Includes basic metrics from sockets, such as round-trip time (RTT), lost packet count in TCP, etc. Socket and file descriptor (FD) mapping: Includes the relationship between the Linux file descriptor and socket object. It is useful when sending and receiving data through a Linux file descriptor.  Send/Receive The interface related to sending or receiving data is the focus of performance analysis. It mainly contains the following parameters:\n Socket file descriptor: The file descriptor of the current operation corresponding to the socket. Buffer: The data sent or received, passed as a byte array.  Based on the above parameters, we can analyze the following data:\n Bytes: The size of the packet in bytes. Protocol: The protocol analysis according to the buffer data, such as HTTP, MySQL, etc. Execution Time: The time it takes to send/receive the data.  At this point (Figure 1) we can analyze the following steps for the whole lifecycle of the connection:\n Connect/Accept: When the connection is created. Transform: Sending and receiving data on the connection. Close: When the connection is closed.  Figure 1\nProtocol and TLS The previous section described how to analyze connections using send or receive buffer data. For example, following the HTTP/1.1 message specification to analyze the connection. However, this does not work for TLS requests/responses.\nFigure 2\nWhen TLS is in use, the Linux Kernel transmits data encrypted in user space. In the figure above, The application usually transmits SSL data through a third-party library (such as OpenSSL). For this case, the Linux API can only get the encrypted data, so it cannot recognize any higher layer protocol. To decrypt inside eBPF, we need to follow these steps:\n Read unencrypted data through uprobe: Compatible multiple languages, using uprobe to capture the data that is not encrypted before sending or after receiving. In this way, we can get the original data and associate it with the socket. Associate with socket: We can associate unencrypted data with the socket.  OpenSSL Use case For example, the most common way to send/receive SSL data is to use OpenSSL as a shared library, specifically the SSL_read and SSL_write methods to submit the buffer data with the socket.\nFollowing the documentation, we can intercept these two methods, which are almost identical to the API in Linux. The source code of the SSL structure in OpenSSL shows that the Socket FD exists in the BIO object of the SSL structure, and we can get it by the offset.\nIn summary, with knowledge of how OpenSSL works, we can read unencrypted data in an eBPF function.\nIntroducing SkyWalking Rover, an eBPF-based Metrics Collector and Profiler SkyWalking Rover introduces the eBPF network profiling feature into the SkyWalking ecosystem. It\u0026rsquo;s currently supported in a Kubernetes environment, so must be deployed inside a Kubernetes cluster. Once the deployment is complete, SkyWalking Rover can monitor the network for all processes inside a given Pod. Based on the monitoring data, SkyWalking can generate the topology relationship diagram and metrics between processes.\nTopology Diagram The topology diagram can help us understand the network access between processes inside the same Pod, and between the process and external environment (other Pod or service). Additionally, it can identify the data direction of traffic based on the line flow direction.\nIn Figure 3 below, all nodes within the hexagon are the internal process of a Pod, and nodes outside the hexagon are externally associated services or Pods. Nodes are connected by lines, which indicate the direction of requests or responses between nodes (client or server). The protocol is indicated on the line, and it\u0026rsquo;s either HTTP(S), TCP, or TCP(TLS). Also, we can see in this figure that the line between Envoy and Python applications is bidirectional because Envoy intercepts all application traffic.\nFigure 3\nMetrics Once we recognize the network call relationship between processes through the topology, we can select a specific line and view the TCP metrics between the two processes.\nThe diagram below (Figure 4) shows the metrics of network monitoring between two processes. There are four metrics in each line. Two on the left side are on the client side, and two on the right side are on the server side. If the remote process is not in the same Pod, only one side of the metrics is displayed.\nFigure 4\nThe following two metric types are available:\n Counter: Records the total number of data in a certain period. Each counter contains the following data: a. Count: Execution count. b. Bytes: Packet size in bytes. c. Execution time: Execution duration. Histogram: Records the distribution of data in the buckets.  Based on the above data types, the following metrics are exposed:\n   Name Type Unit Description     Write Counter and histogram Millisecond The socket write counter.   Read Counter and histogram Millisecond The socket read counter.   Write RTT Counter and histogram Microsecond The socket write round trip time (RTT) counter.   Connect Counter and histogram Millisecond The socket connect/accept with another server/client counter.   Close Counter and histogram Millisecond The socket with other socket counter.   Retransmit Counter Millisecond The socket retransmit package counter.   Drop Counter Millisecond The socket drop package counter.    Demo In this section, we demonstrate how to perform network profiling in the service mesh. To follow along, you will need a running Kubernetes environment.\nNOTE: All commands and scripts are available in this GitHub repository.\nInstall Istio Istio is the most widely deployed service mesh, and comes with a complete demo application that we can use for testing. To install Istio and the demo application, follow these steps:\n Install Istio using the demo configuration profile. Label the default namespace, so Istio automatically injects Envoy sidecar proxies when we\u0026rsquo;ll deploy the application. Deploy the bookinfo application to the cluster. Deploy the traffic generator to generate some traffic to the application.  export ISTIO_VERSION=1.13.1 # install istio istioctl install -y --set profile=demo kubectl label namespace default istio-injection=enabled # deploy the bookinfo applications kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/platform/kube/bookinfo.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/bookinfo-gateway.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/destination-rule-all.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/virtual-service-all-v1.yaml # generate traffic kubectl apply -f https://raw.githubusercontent.com/mrproliu/skywalking-network-profiling-demo/main/resources/traffic-generator.yaml Install SkyWalking The following will install the storage, backend, and UI needed for SkyWalking:\ngit clone https://github.com/apache/skywalking-helm.git cd skywalking-helm cd chart helm dep up skywalking helm -n istio-system install skywalking skywalking \\  --set fullnameOverride=skywalking \\  --set elasticsearch.minimumMasterNodes=1 \\  --set elasticsearch.imageTag=7.5.1 \\  --set oap.replicas=1 \\  --set ui.image.repository=apache/skywalking-ui \\  --set ui.image.tag=9.2.0 \\  --set oap.image.tag=9.2.0 \\  --set oap.envoy.als.enabled=true \\  --set oap.image.repository=apache/skywalking-oap-server \\  --set oap.storageType=elasticsearch \\  --set oap.env.SW_METER_ANALYZER_ACTIVE_FILES=\u0026#39;network-profiling\u0026#39; Install SkyWalking Rover SkyWalking Rover is deployed on every node in Kubernetes, and it automatically detects the services in the Kubernetes cluster. The network profiling feature has been released in the version 0.3.0 of SkyWalking Rover. When a network monitoring task is created, the SkyWalking rover sends the data to the SkyWalking backend.\nkubectl apply -f https://raw.githubusercontent.com/mrproliu/skywalking-network-profiling-demo/main/resources/skywalking-rover.yaml Start the Network Profiling Task Once all deployments are completed, we must create a network profiling task for a specific instance of the service in the SkyWalking UI.\nTo open SkyWalking UI, run:\nkubectl port-forward svc/skywalking-ui 8080:80 --namespace istio-system Currently, we can select the specific instances that we wish to monitor by clicking the Data Plane item in the Service Mesh panel and the Service item in the Kubernetes panel.\nIn the figure below, we have selected an instance with a list of tasks in the network profiling tab. When we click the start button, the SkyWalking Rover starts monitoring this instance\u0026rsquo;s network.\nFigure 5\nDone! After a few seconds, you will see the process topology appear on the right side of the page.\nFigure 6\nWhen you click on the line between processes, you can see the TCP metrics between the two processes.\nFigure 7\nConclusion In this article, we detailed a problem that makes troubleshooting service mesh architectures difficult: lack of context between layers in the network stack. These are the cases when eBPF begins to really help with debugging/productivity when existing service mesh/envoy cannot. Then, we researched how eBPF could be applied to common communication, such as TLS. Finally, we demo the implementation of this process with SkyWalking Rover.\nFor now, we have completed the performance analysis for OSI layer 4 (mostly TCP). In the future, we will also introduce the analysis for OSI layer 7 protocols like HTTP.\n","title":"Diagnose Service Mesh Network Performance with eBPF","url":"/docs/main/next/en/academy/diagnose-service-mesh-network-performance-with-ebpf/"},{"content":"Diagnose Service Mesh Network Performance with eBPF Background This article will show how to use Apache SkyWalking with eBPF to make network troubleshooting easier in a service mesh environment.\nApache SkyWalking is an application performance monitor tool for distributed systems. It observes metrics, logs, traces, and events in the service mesh environment and uses that data to generate a dependency graph of your pods and services. This dependency graph can provide quick insights into your system, especially when there\u0026rsquo;s an issue.\nHowever, when troubleshooting network issues in SkyWalking\u0026rsquo;s service topology, it is not always easy to pinpoint where the error actually is. There are two reasons for the difficulty:\n Traffic through the Envoy sidecar is not easy to observe. Data from Envoy\u0026rsquo;s Access Log Service (ALS) shows traffic between services (sidecar-to-sidecar), but not metrics on communication between the Envoy sidecar and the service it proxies. Without that information, it is more difficult to understand the impact of the sidecar. There is a lack of data from transport layer (OSI Layer 4) communication. Since services generally use application layer (OSI Layer 7) protocols such as HTTP, observability data is generally restricted to application layer communication. However, the root cause may actually be in the transport layer, which is typically opaque to observability tools.  Access to metrics from Envoy-to-service and transport layer communication can make it easier to diagnose service issues. To this end, SkyWalking needs to collect and analyze transport layer metrics between processes inside Kubernetes pods - a task well suited to eBPF. We investigated using eBPF for this purpose and present our results and a demo below.\nMonitoring Kubernetes Networks with eBPF With its origins as the Extended Berkeley Packet Filter, eBPF is a general purpose mechanism for injecting and running your own code into the Linux kernel and is an excellent tool for monitoring network traffic in Kubernetes Pods. In the next few sections, we'll provide an overview of how to use eBPF for network monitoring as background for introducing Skywalking Rover, a metrics collector and profiler powered by eBPF to diagnose CPU and network performance.\nHow Applications and the Network Interact Interactions between the application and the network can generally be divided into the following steps from higher to lower levels of abstraction:\n User Code: Application code uses high-level network libraries in the application stack to exchange data across the network, like sending and receiving HTTP requests. Network Library: When the network library receives a network request, it interacts with the language API to send the network data. Language API: Each language provides an API for operating the network, system, etc. When a request is received, it interacts with the system API. In Linux, this API is called syscalls. Linux API: When the Linux kernel receives the request through the API, it communicates with the socket to send the data, which is usually closer to an OSI Layer 4 protocol, such as TCP, UDP, etc. Socket Ops: Sending or receiving the data to/from the NIC.  Our hypothesis is that eBPF can monitor the network. There are two ways to implement the interception: User space (uprobe) or Kernel space (kprobe). The table below summarizes the differences.\n    Pros Cons     uprobe •\tGet more application-related contexts, such as whether the current request is HTTP or HTTPS.•\tRequests and responses can be intercepted by a single method •\tData structures can be unstable, so it is more difficult to get the desired data.  •\tImplementation may differ between language/library versions.  •\tDoes not work in applications without symbol tables.   kprobe •\tAvailable for all languages.  •\tThe data structure and methods are stable and do not require much adaptation.  •\tEasier correlation with underlying data, such as getting the destination address of TCP, OSI Layer 4 protocol metrics, etc. •\tA single request and response may be split into multiple probes.  •\tContextual information is not easy to get for stateful requests. For example header compression in HTTP/2.    For the general network performance monitor, we chose to use the kprobe (intercept the syscalls) for the following reasons:\n It\u0026rsquo;s available for applications written in any programming language, and it\u0026rsquo;s stable, so it saves a lot of development/adaptation costs. It can be correlated with metrics from the system level, which makes it easier to troubleshoot. As a single request and response are split into multiple probes, we can use technology to correlate them. For contextual information, It\u0026rsquo;s usually used in OSI Layer 7 protocol network analysis. So, if we just monitor the network performance, then they can be ignored.  Kprobes and network monitoring Following the network syscalls of Linux documentation, we can implement network monitoring by intercepting two types of methods: socket operations and send/receive methods.\nSocket Operations When accepting or connecting with another socket, we can get the following information:\n Connection information: Includes the remote address from the connection which helps us to understand which pod is connected. Connection statics: Includes basic metrics from sockets, such as round-trip time (RTT), lost packet count in TCP, etc. Socket and file descriptor (FD) mapping: Includes the relationship between the Linux file descriptor and socket object. It is useful when sending and receiving data through a Linux file descriptor.  Send/Receive The interface related to sending or receiving data is the focus of performance analysis. It mainly contains the following parameters:\n Socket file descriptor: The file descriptor of the current operation corresponding to the socket. Buffer: The data sent or received, passed as a byte array.  Based on the above parameters, we can analyze the following data:\n Bytes: The size of the packet in bytes. Protocol: The protocol analysis according to the buffer data, such as HTTP, MySQL, etc. Execution Time: The time it takes to send/receive the data.  At this point (Figure 1) we can analyze the following steps for the whole lifecycle of the connection:\n Connect/Accept: When the connection is created. Transform: Sending and receiving data on the connection. Close: When the connection is closed.  Figure 1\nProtocol and TLS The previous section described how to analyze connections using send or receive buffer data. For example, following the HTTP/1.1 message specification to analyze the connection. However, this does not work for TLS requests/responses.\nFigure 2\nWhen TLS is in use, the Linux Kernel transmits data encrypted in user space. In the figure above, The application usually transmits SSL data through a third-party library (such as OpenSSL). For this case, the Linux API can only get the encrypted data, so it cannot recognize any higher layer protocol. To decrypt inside eBPF, we need to follow these steps:\n Read unencrypted data through uprobe: Compatible multiple languages, using uprobe to capture the data that is not encrypted before sending or after receiving. In this way, we can get the original data and associate it with the socket. Associate with socket: We can associate unencrypted data with the socket.  OpenSSL Use case For example, the most common way to send/receive SSL data is to use OpenSSL as a shared library, specifically the SSL_read and SSL_write methods to submit the buffer data with the socket.\nFollowing the documentation, we can intercept these two methods, which are almost identical to the API in Linux. The source code of the SSL structure in OpenSSL shows that the Socket FD exists in the BIO object of the SSL structure, and we can get it by the offset.\nIn summary, with knowledge of how OpenSSL works, we can read unencrypted data in an eBPF function.\nIntroducing SkyWalking Rover, an eBPF-based Metrics Collector and Profiler SkyWalking Rover introduces the eBPF network profiling feature into the SkyWalking ecosystem. It\u0026rsquo;s currently supported in a Kubernetes environment, so must be deployed inside a Kubernetes cluster. Once the deployment is complete, SkyWalking Rover can monitor the network for all processes inside a given Pod. Based on the monitoring data, SkyWalking can generate the topology relationship diagram and metrics between processes.\nTopology Diagram The topology diagram can help us understand the network access between processes inside the same Pod, and between the process and external environment (other Pod or service). Additionally, it can identify the data direction of traffic based on the line flow direction.\nIn Figure 3 below, all nodes within the hexagon are the internal process of a Pod, and nodes outside the hexagon are externally associated services or Pods. Nodes are connected by lines, which indicate the direction of requests or responses between nodes (client or server). The protocol is indicated on the line, and it\u0026rsquo;s either HTTP(S), TCP, or TCP(TLS). Also, we can see in this figure that the line between Envoy and Python applications is bidirectional because Envoy intercepts all application traffic.\nFigure 3\nMetrics Once we recognize the network call relationship between processes through the topology, we can select a specific line and view the TCP metrics between the two processes.\nThe diagram below (Figure 4) shows the metrics of network monitoring between two processes. There are four metrics in each line. Two on the left side are on the client side, and two on the right side are on the server side. If the remote process is not in the same Pod, only one side of the metrics is displayed.\nFigure 4\nThe following two metric types are available:\n Counter: Records the total number of data in a certain period. Each counter contains the following data: a. Count: Execution count. b. Bytes: Packet size in bytes. c. Execution time: Execution duration. Histogram: Records the distribution of data in the buckets.  Based on the above data types, the following metrics are exposed:\n   Name Type Unit Description     Write Counter and histogram Millisecond The socket write counter.   Read Counter and histogram Millisecond The socket read counter.   Write RTT Counter and histogram Microsecond The socket write round trip time (RTT) counter.   Connect Counter and histogram Millisecond The socket connect/accept with another server/client counter.   Close Counter and histogram Millisecond The socket with other socket counter.   Retransmit Counter Millisecond The socket retransmit package counter.   Drop Counter Millisecond The socket drop package counter.    Demo In this section, we demonstrate how to perform network profiling in the service mesh. To follow along, you will need a running Kubernetes environment.\nNOTE: All commands and scripts are available in this GitHub repository.\nInstall Istio Istio is the most widely deployed service mesh, and comes with a complete demo application that we can use for testing. To install Istio and the demo application, follow these steps:\n Install Istio using the demo configuration profile. Label the default namespace, so Istio automatically injects Envoy sidecar proxies when we\u0026rsquo;ll deploy the application. Deploy the bookinfo application to the cluster. Deploy the traffic generator to generate some traffic to the application.  export ISTIO_VERSION=1.13.1 # install istio istioctl install -y --set profile=demo kubectl label namespace default istio-injection=enabled # deploy the bookinfo applications kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/platform/kube/bookinfo.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/bookinfo-gateway.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/destination-rule-all.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/virtual-service-all-v1.yaml # generate traffic kubectl apply -f https://raw.githubusercontent.com/mrproliu/skywalking-network-profiling-demo/main/resources/traffic-generator.yaml Install SkyWalking The following will install the storage, backend, and UI needed for SkyWalking:\ngit clone https://github.com/apache/skywalking-kubernetes.git cd skywalking-kubernetes cd chart helm dep up skywalking helm -n istio-system install skywalking skywalking \\  --set fullnameOverride=skywalking \\  --set elasticsearch.minimumMasterNodes=1 \\  --set elasticsearch.imageTag=7.5.1 \\  --set oap.replicas=1 \\  --set ui.image.repository=apache/skywalking-ui \\  --set ui.image.tag=9.2.0 \\  --set oap.image.tag=9.2.0 \\  --set oap.envoy.als.enabled=true \\  --set oap.image.repository=apache/skywalking-oap-server \\  --set oap.storageType=elasticsearch \\  --set oap.env.SW_METER_ANALYZER_ACTIVE_FILES=\u0026#39;network-profiling\u0026#39; Install SkyWalking Rover SkyWalking Rover is deployed on every node in Kubernetes, and it automatically detects the services in the Kubernetes cluster. The network profiling feature has been released in the version 0.3.0 of SkyWalking Rover. When a network monitoring task is created, the SkyWalking rover sends the data to the SkyWalking backend.\nkubectl apply -f https://raw.githubusercontent.com/mrproliu/skywalking-network-profiling-demo/main/resources/skywalking-rover.yaml Start the Network Profiling Task Once all deployments are completed, we must create a network profiling task for a specific instance of the service in the SkyWalking UI.\nTo open SkyWalking UI, run:\nkubectl port-forward svc/skywalking-ui 8080:80 --namespace istio-system Currently, we can select the specific instances that we wish to monitor by clicking the Data Plane item in the Service Mesh panel and the Service item in the Kubernetes panel.\nIn the figure below, we have selected an instance with a list of tasks in the network profiling tab. When we click the start button, the SkyWalking Rover starts monitoring this instance\u0026rsquo;s network.\nFigure 5\nDone! After a few seconds, you will see the process topology appear on the right side of the page.\nFigure 6\nWhen you click on the line between processes, you can see the TCP metrics between the two processes.\nFigure 7\nConclusion In this article, we detailed a problem that makes troubleshooting service mesh architectures difficult: lack of context between layers in the network stack. These are the cases when eBPF begins to really help with debugging/productivity when existing service mesh/envoy cannot. Then, we researched how eBPF could be applied to common communication, such as TLS. Finally, we demo the implementation of this process with SkyWalking Rover.\nFor now, we have completed the performance analysis for OSI layer 4 (mostly TCP). In the future, we will also introduce the analysis for OSI layer 7 protocols like HTTP.\n","title":"Diagnose Service Mesh Network Performance with eBPF","url":"/docs/main/v9.3.0/en/academy/diagnose-service-mesh-network-performance-with-ebpf/"},{"content":"Diagnose Service Mesh Network Performance with eBPF Background This article will show how to use Apache SkyWalking with eBPF to make network troubleshooting easier in a service mesh environment.\nApache SkyWalking is an application performance monitor tool for distributed systems. It observes metrics, logs, traces, and events in the service mesh environment and uses that data to generate a dependency graph of your pods and services. This dependency graph can provide quick insights into your system, especially when there\u0026rsquo;s an issue.\nHowever, when troubleshooting network issues in SkyWalking\u0026rsquo;s service topology, it is not always easy to pinpoint where the error actually is. There are two reasons for the difficulty:\n Traffic through the Envoy sidecar is not easy to observe. Data from Envoy\u0026rsquo;s Access Log Service (ALS) shows traffic between services (sidecar-to-sidecar), but not metrics on communication between the Envoy sidecar and the service it proxies. Without that information, it is more difficult to understand the impact of the sidecar. There is a lack of data from transport layer (OSI Layer 4) communication. Since services generally use application layer (OSI Layer 7) protocols such as HTTP, observability data is generally restricted to application layer communication. However, the root cause may actually be in the transport layer, which is typically opaque to observability tools.  Access to metrics from Envoy-to-service and transport layer communication can make it easier to diagnose service issues. To this end, SkyWalking needs to collect and analyze transport layer metrics between processes inside Kubernetes pods - a task well suited to eBPF. We investigated using eBPF for this purpose and present our results and a demo below.\nMonitoring Kubernetes Networks with eBPF With its origins as the Extended Berkeley Packet Filter, eBPF is a general purpose mechanism for injecting and running your own code into the Linux kernel and is an excellent tool for monitoring network traffic in Kubernetes Pods. In the next few sections, we'll provide an overview of how to use eBPF for network monitoring as background for introducing Skywalking Rover, a metrics collector and profiler powered by eBPF to diagnose CPU and network performance.\nHow Applications and the Network Interact Interactions between the application and the network can generally be divided into the following steps from higher to lower levels of abstraction:\n User Code: Application code uses high-level network libraries in the application stack to exchange data across the network, like sending and receiving HTTP requests. Network Library: When the network library receives a network request, it interacts with the language API to send the network data. Language API: Each language provides an API for operating the network, system, etc. When a request is received, it interacts with the system API. In Linux, this API is called syscalls. Linux API: When the Linux kernel receives the request through the API, it communicates with the socket to send the data, which is usually closer to an OSI Layer 4 protocol, such as TCP, UDP, etc. Socket Ops: Sending or receiving the data to/from the NIC.  Our hypothesis is that eBPF can monitor the network. There are two ways to implement the interception: User space (uprobe) or Kernel space (kprobe). The table below summarizes the differences.\n    Pros Cons     uprobe •\tGet more application-related contexts, such as whether the current request is HTTP or HTTPS.•\tRequests and responses can be intercepted by a single method •\tData structures can be unstable, so it is more difficult to get the desired data.  •\tImplementation may differ between language/library versions.  •\tDoes not work in applications without symbol tables.   kprobe •\tAvailable for all languages.  •\tThe data structure and methods are stable and do not require much adaptation.  •\tEasier correlation with underlying data, such as getting the destination address of TCP, OSI Layer 4 protocol metrics, etc. •\tA single request and response may be split into multiple probes.  •\tContextual information is not easy to get for stateful requests. For example header compression in HTTP/2.    For the general network performance monitor, we chose to use the kprobe (intercept the syscalls) for the following reasons:\n It\u0026rsquo;s available for applications written in any programming language, and it\u0026rsquo;s stable, so it saves a lot of development/adaptation costs. It can be correlated with metrics from the system level, which makes it easier to troubleshoot. As a single request and response are split into multiple probes, we can use technology to correlate them. For contextual information, It\u0026rsquo;s usually used in OSI Layer 7 protocol network analysis. So, if we just monitor the network performance, then they can be ignored.  Kprobes and network monitoring Following the network syscalls of Linux documentation, we can implement network monitoring by intercepting two types of methods: socket operations and send/receive methods.\nSocket Operations When accepting or connecting with another socket, we can get the following information:\n Connection information: Includes the remote address from the connection which helps us to understand which pod is connected. Connection statics: Includes basic metrics from sockets, such as round-trip time (RTT), lost packet count in TCP, etc. Socket and file descriptor (FD) mapping: Includes the relationship between the Linux file descriptor and socket object. It is useful when sending and receiving data through a Linux file descriptor.  Send/Receive The interface related to sending or receiving data is the focus of performance analysis. It mainly contains the following parameters:\n Socket file descriptor: The file descriptor of the current operation corresponding to the socket. Buffer: The data sent or received, passed as a byte array.  Based on the above parameters, we can analyze the following data:\n Bytes: The size of the packet in bytes. Protocol: The protocol analysis according to the buffer data, such as HTTP, MySQL, etc. Execution Time: The time it takes to send/receive the data.  At this point (Figure 1) we can analyze the following steps for the whole lifecycle of the connection:\n Connect/Accept: When the connection is created. Transform: Sending and receiving data on the connection. Close: When the connection is closed.  Figure 1\nProtocol and TLS The previous section described how to analyze connections using send or receive buffer data. For example, following the HTTP/1.1 message specification to analyze the connection. However, this does not work for TLS requests/responses.\nFigure 2\nWhen TLS is in use, the Linux Kernel transmits data encrypted in user space. In the figure above, The application usually transmits SSL data through a third-party library (such as OpenSSL). For this case, the Linux API can only get the encrypted data, so it cannot recognize any higher layer protocol. To decrypt inside eBPF, we need to follow these steps:\n Read unencrypted data through uprobe: Compatible multiple languages, using uprobe to capture the data that is not encrypted before sending or after receiving. In this way, we can get the original data and associate it with the socket. Associate with socket: We can associate unencrypted data with the socket.  OpenSSL Use case For example, the most common way to send/receive SSL data is to use OpenSSL as a shared library, specifically the SSL_read and SSL_write methods to submit the buffer data with the socket.\nFollowing the documentation, we can intercept these two methods, which are almost identical to the API in Linux. The source code of the SSL structure in OpenSSL shows that the Socket FD exists in the BIO object of the SSL structure, and we can get it by the offset.\nIn summary, with knowledge of how OpenSSL works, we can read unencrypted data in an eBPF function.\nIntroducing SkyWalking Rover, an eBPF-based Metrics Collector and Profiler SkyWalking Rover introduces the eBPF network profiling feature into the SkyWalking ecosystem. It\u0026rsquo;s currently supported in a Kubernetes environment, so must be deployed inside a Kubernetes cluster. Once the deployment is complete, SkyWalking Rover can monitor the network for all processes inside a given Pod. Based on the monitoring data, SkyWalking can generate the topology relationship diagram and metrics between processes.\nTopology Diagram The topology diagram can help us understand the network access between processes inside the same Pod, and between the process and external environment (other Pod or service). Additionally, it can identify the data direction of traffic based on the line flow direction.\nIn Figure 3 below, all nodes within the hexagon are the internal process of a Pod, and nodes outside the hexagon are externally associated services or Pods. Nodes are connected by lines, which indicate the direction of requests or responses between nodes (client or server). The protocol is indicated on the line, and it\u0026rsquo;s either HTTP(S), TCP, or TCP(TLS). Also, we can see in this figure that the line between Envoy and Python applications is bidirectional because Envoy intercepts all application traffic.\nFigure 3\nMetrics Once we recognize the network call relationship between processes through the topology, we can select a specific line and view the TCP metrics between the two processes.\nThe diagram below (Figure 4) shows the metrics of network monitoring between two processes. There are four metrics in each line. Two on the left side are on the client side, and two on the right side are on the server side. If the remote process is not in the same Pod, only one side of the metrics is displayed.\nFigure 4\nThe following two metric types are available:\n Counter: Records the total number of data in a certain period. Each counter contains the following data: a. Count: Execution count. b. Bytes: Packet size in bytes. c. Execution time: Execution duration. Histogram: Records the distribution of data in the buckets.  Based on the above data types, the following metrics are exposed:\n   Name Type Unit Description     Write Counter and histogram Millisecond The socket write counter.   Read Counter and histogram Millisecond The socket read counter.   Write RTT Counter and histogram Microsecond The socket write round trip time (RTT) counter.   Connect Counter and histogram Millisecond The socket connect/accept with another server/client counter.   Close Counter and histogram Millisecond The socket with other socket counter.   Retransmit Counter Millisecond The socket retransmit package counter.   Drop Counter Millisecond The socket drop package counter.    Demo In this section, we demonstrate how to perform network profiling in the service mesh. To follow along, you will need a running Kubernetes environment.\nNOTE: All commands and scripts are available in this GitHub repository.\nInstall Istio Istio is the most widely deployed service mesh, and comes with a complete demo application that we can use for testing. To install Istio and the demo application, follow these steps:\n Install Istio using the demo configuration profile. Label the default namespace, so Istio automatically injects Envoy sidecar proxies when we\u0026rsquo;ll deploy the application. Deploy the bookinfo application to the cluster. Deploy the traffic generator to generate some traffic to the application.  export ISTIO_VERSION=1.13.1 # install istio istioctl install -y --set profile=demo kubectl label namespace default istio-injection=enabled # deploy the bookinfo applications kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/platform/kube/bookinfo.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/bookinfo-gateway.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/destination-rule-all.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/virtual-service-all-v1.yaml # generate traffic kubectl apply -f https://raw.githubusercontent.com/mrproliu/skywalking-network-profiling-demo/main/resources/traffic-generator.yaml Install SkyWalking The following will install the storage, backend, and UI needed for SkyWalking:\ngit clone https://github.com/apache/skywalking-kubernetes.git cd skywalking-kubernetes cd chart helm dep up skywalking helm -n istio-system install skywalking skywalking \\  --set fullnameOverride=skywalking \\  --set elasticsearch.minimumMasterNodes=1 \\  --set elasticsearch.imageTag=7.5.1 \\  --set oap.replicas=1 \\  --set ui.image.repository=apache/skywalking-ui \\  --set ui.image.tag=9.2.0 \\  --set oap.image.tag=9.2.0 \\  --set oap.envoy.als.enabled=true \\  --set oap.image.repository=apache/skywalking-oap-server \\  --set oap.storageType=elasticsearch \\  --set oap.env.SW_METER_ANALYZER_ACTIVE_FILES=\u0026#39;network-profiling\u0026#39; Install SkyWalking Rover SkyWalking Rover is deployed on every node in Kubernetes, and it automatically detects the services in the Kubernetes cluster. The network profiling feature has been released in the version 0.3.0 of SkyWalking Rover. When a network monitoring task is created, the SkyWalking rover sends the data to the SkyWalking backend.\nkubectl apply -f https://raw.githubusercontent.com/mrproliu/skywalking-network-profiling-demo/main/resources/skywalking-rover.yaml Start the Network Profiling Task Once all deployments are completed, we must create a network profiling task for a specific instance of the service in the SkyWalking UI.\nTo open SkyWalking UI, run:\nkubectl port-forward svc/skywalking-ui 8080:80 --namespace istio-system Currently, we can select the specific instances that we wish to monitor by clicking the Data Plane item in the Service Mesh panel and the Service item in the Kubernetes panel.\nIn the figure below, we have selected an instance with a list of tasks in the network profiling tab. When we click the start button, the SkyWalking Rover starts monitoring this instance\u0026rsquo;s network.\nFigure 5\nDone! After a few seconds, you will see the process topology appear on the right side of the page.\nFigure 6\nWhen you click on the line between processes, you can see the TCP metrics between the two processes.\nFigure 7\nConclusion In this article, we detailed a problem that makes troubleshooting service mesh architectures difficult: lack of context between layers in the network stack. These are the cases when eBPF begins to really help with debugging/productivity when existing service mesh/envoy cannot. Then, we researched how eBPF could be applied to common communication, such as TLS. Finally, we demo the implementation of this process with SkyWalking Rover.\nFor now, we have completed the performance analysis for OSI layer 4 (mostly TCP). In the future, we will also introduce the analysis for OSI layer 7 protocols like HTTP.\n","title":"Diagnose Service Mesh Network Performance with eBPF","url":"/docs/main/v9.4.0/en/academy/diagnose-service-mesh-network-performance-with-ebpf/"},{"content":"Diagnose Service Mesh Network Performance with eBPF Background This article will show how to use Apache SkyWalking with eBPF to make network troubleshooting easier in a service mesh environment.\nApache SkyWalking is an application performance monitor tool for distributed systems. It observes metrics, logs, traces, and events in the service mesh environment and uses that data to generate a dependency graph of your pods and services. This dependency graph can provide quick insights into your system, especially when there\u0026rsquo;s an issue.\nHowever, when troubleshooting network issues in SkyWalking\u0026rsquo;s service topology, it is not always easy to pinpoint where the error actually is. There are two reasons for the difficulty:\n Traffic through the Envoy sidecar is not easy to observe. Data from Envoy\u0026rsquo;s Access Log Service (ALS) shows traffic between services (sidecar-to-sidecar), but not metrics on communication between the Envoy sidecar and the service it proxies. Without that information, it is more difficult to understand the impact of the sidecar. There is a lack of data from transport layer (OSI Layer 4) communication. Since services generally use application layer (OSI Layer 7) protocols such as HTTP, observability data is generally restricted to application layer communication. However, the root cause may actually be in the transport layer, which is typically opaque to observability tools.  Access to metrics from Envoy-to-service and transport layer communication can make it easier to diagnose service issues. To this end, SkyWalking needs to collect and analyze transport layer metrics between processes inside Kubernetes pods - a task well suited to eBPF. We investigated using eBPF for this purpose and present our results and a demo below.\nMonitoring Kubernetes Networks with eBPF With its origins as the Extended Berkeley Packet Filter, eBPF is a general purpose mechanism for injecting and running your own code into the Linux kernel and is an excellent tool for monitoring network traffic in Kubernetes Pods. In the next few sections, we'll provide an overview of how to use eBPF for network monitoring as background for introducing Skywalking Rover, a metrics collector and profiler powered by eBPF to diagnose CPU and network performance.\nHow Applications and the Network Interact Interactions between the application and the network can generally be divided into the following steps from higher to lower levels of abstraction:\n User Code: Application code uses high-level network libraries in the application stack to exchange data across the network, like sending and receiving HTTP requests. Network Library: When the network library receives a network request, it interacts with the language API to send the network data. Language API: Each language provides an API for operating the network, system, etc. When a request is received, it interacts with the system API. In Linux, this API is called syscalls. Linux API: When the Linux kernel receives the request through the API, it communicates with the socket to send the data, which is usually closer to an OSI Layer 4 protocol, such as TCP, UDP, etc. Socket Ops: Sending or receiving the data to/from the NIC.  Our hypothesis is that eBPF can monitor the network. There are two ways to implement the interception: User space (uprobe) or Kernel space (kprobe). The table below summarizes the differences.\n    Pros Cons     uprobe •\tGet more application-related contexts, such as whether the current request is HTTP or HTTPS.•\tRequests and responses can be intercepted by a single method •\tData structures can be unstable, so it is more difficult to get the desired data.  •\tImplementation may differ between language/library versions.  •\tDoes not work in applications without symbol tables.   kprobe •\tAvailable for all languages.  •\tThe data structure and methods are stable and do not require much adaptation.  •\tEasier correlation with underlying data, such as getting the destination address of TCP, OSI Layer 4 protocol metrics, etc. •\tA single request and response may be split into multiple probes.  •\tContextual information is not easy to get for stateful requests. For example header compression in HTTP/2.    For the general network performance monitor, we chose to use the kprobe (intercept the syscalls) for the following reasons:\n It\u0026rsquo;s available for applications written in any programming language, and it\u0026rsquo;s stable, so it saves a lot of development/adaptation costs. It can be correlated with metrics from the system level, which makes it easier to troubleshoot. As a single request and response are split into multiple probes, we can use technology to correlate them. For contextual information, It\u0026rsquo;s usually used in OSI Layer 7 protocol network analysis. So, if we just monitor the network performance, then they can be ignored.  Kprobes and network monitoring Following the network syscalls of Linux documentation, we can implement network monitoring by intercepting two types of methods: socket operations and send/receive methods.\nSocket Operations When accepting or connecting with another socket, we can get the following information:\n Connection information: Includes the remote address from the connection which helps us to understand which pod is connected. Connection statics: Includes basic metrics from sockets, such as round-trip time (RTT), lost packet count in TCP, etc. Socket and file descriptor (FD) mapping: Includes the relationship between the Linux file descriptor and socket object. It is useful when sending and receiving data through a Linux file descriptor.  Send/Receive The interface related to sending or receiving data is the focus of performance analysis. It mainly contains the following parameters:\n Socket file descriptor: The file descriptor of the current operation corresponding to the socket. Buffer: The data sent or received, passed as a byte array.  Based on the above parameters, we can analyze the following data:\n Bytes: The size of the packet in bytes. Protocol: The protocol analysis according to the buffer data, such as HTTP, MySQL, etc. Execution Time: The time it takes to send/receive the data.  At this point (Figure 1) we can analyze the following steps for the whole lifecycle of the connection:\n Connect/Accept: When the connection is created. Transform: Sending and receiving data on the connection. Close: When the connection is closed.  Figure 1\nProtocol and TLS The previous section described how to analyze connections using send or receive buffer data. For example, following the HTTP/1.1 message specification to analyze the connection. However, this does not work for TLS requests/responses.\nFigure 2\nWhen TLS is in use, the Linux Kernel transmits data encrypted in user space. In the figure above, The application usually transmits SSL data through a third-party library (such as OpenSSL). For this case, the Linux API can only get the encrypted data, so it cannot recognize any higher layer protocol. To decrypt inside eBPF, we need to follow these steps:\n Read unencrypted data through uprobe: Compatible multiple languages, using uprobe to capture the data that is not encrypted before sending or after receiving. In this way, we can get the original data and associate it with the socket. Associate with socket: We can associate unencrypted data with the socket.  OpenSSL Use case For example, the most common way to send/receive SSL data is to use OpenSSL as a shared library, specifically the SSL_read and SSL_write methods to submit the buffer data with the socket.\nFollowing the documentation, we can intercept these two methods, which are almost identical to the API in Linux. The source code of the SSL structure in OpenSSL shows that the Socket FD exists in the BIO object of the SSL structure, and we can get it by the offset.\nIn summary, with knowledge of how OpenSSL works, we can read unencrypted data in an eBPF function.\nIntroducing SkyWalking Rover, an eBPF-based Metrics Collector and Profiler SkyWalking Rover introduces the eBPF network profiling feature into the SkyWalking ecosystem. It\u0026rsquo;s currently supported in a Kubernetes environment, so must be deployed inside a Kubernetes cluster. Once the deployment is complete, SkyWalking Rover can monitor the network for all processes inside a given Pod. Based on the monitoring data, SkyWalking can generate the topology relationship diagram and metrics between processes.\nTopology Diagram The topology diagram can help us understand the network access between processes inside the same Pod, and between the process and external environment (other Pod or service). Additionally, it can identify the data direction of traffic based on the line flow direction.\nIn Figure 3 below, all nodes within the hexagon are the internal process of a Pod, and nodes outside the hexagon are externally associated services or Pods. Nodes are connected by lines, which indicate the direction of requests or responses between nodes (client or server). The protocol is indicated on the line, and it\u0026rsquo;s either HTTP(S), TCP, or TCP(TLS). Also, we can see in this figure that the line between Envoy and Python applications is bidirectional because Envoy intercepts all application traffic.\nFigure 3\nMetrics Once we recognize the network call relationship between processes through the topology, we can select a specific line and view the TCP metrics between the two processes.\nThe diagram below (Figure 4) shows the metrics of network monitoring between two processes. There are four metrics in each line. Two on the left side are on the client side, and two on the right side are on the server side. If the remote process is not in the same Pod, only one side of the metrics is displayed.\nFigure 4\nThe following two metric types are available:\n Counter: Records the total number of data in a certain period. Each counter contains the following data: a. Count: Execution count. b. Bytes: Packet size in bytes. c. Execution time: Execution duration. Histogram: Records the distribution of data in the buckets.  Based on the above data types, the following metrics are exposed:\n   Name Type Unit Description     Write Counter and histogram Millisecond The socket write counter.   Read Counter and histogram Millisecond The socket read counter.   Write RTT Counter and histogram Microsecond The socket write round trip time (RTT) counter.   Connect Counter and histogram Millisecond The socket connect/accept with another server/client counter.   Close Counter and histogram Millisecond The socket with other socket counter.   Retransmit Counter Millisecond The socket retransmit package counter.   Drop Counter Millisecond The socket drop package counter.    Demo In this section, we demonstrate how to perform network profiling in the service mesh. To follow along, you will need a running Kubernetes environment.\nNOTE: All commands and scripts are available in this GitHub repository.\nInstall Istio Istio is the most widely deployed service mesh, and comes with a complete demo application that we can use for testing. To install Istio and the demo application, follow these steps:\n Install Istio using the demo configuration profile. Label the default namespace, so Istio automatically injects Envoy sidecar proxies when we\u0026rsquo;ll deploy the application. Deploy the bookinfo application to the cluster. Deploy the traffic generator to generate some traffic to the application.  export ISTIO_VERSION=1.13.1 # install istio istioctl install -y --set profile=demo kubectl label namespace default istio-injection=enabled # deploy the bookinfo applications kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/platform/kube/bookinfo.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/bookinfo-gateway.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/destination-rule-all.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/virtual-service-all-v1.yaml # generate traffic kubectl apply -f https://raw.githubusercontent.com/mrproliu/skywalking-network-profiling-demo/main/resources/traffic-generator.yaml Install SkyWalking The following will install the storage, backend, and UI needed for SkyWalking:\ngit clone https://github.com/apache/skywalking-kubernetes.git cd skywalking-kubernetes cd chart helm dep up skywalking helm -n istio-system install skywalking skywalking \\  --set fullnameOverride=skywalking \\  --set elasticsearch.minimumMasterNodes=1 \\  --set elasticsearch.imageTag=7.5.1 \\  --set oap.replicas=1 \\  --set ui.image.repository=apache/skywalking-ui \\  --set ui.image.tag=9.2.0 \\  --set oap.image.tag=9.2.0 \\  --set oap.envoy.als.enabled=true \\  --set oap.image.repository=apache/skywalking-oap-server \\  --set oap.storageType=elasticsearch \\  --set oap.env.SW_METER_ANALYZER_ACTIVE_FILES=\u0026#39;network-profiling\u0026#39; Install SkyWalking Rover SkyWalking Rover is deployed on every node in Kubernetes, and it automatically detects the services in the Kubernetes cluster. The network profiling feature has been released in the version 0.3.0 of SkyWalking Rover. When a network monitoring task is created, the SkyWalking rover sends the data to the SkyWalking backend.\nkubectl apply -f https://raw.githubusercontent.com/mrproliu/skywalking-network-profiling-demo/main/resources/skywalking-rover.yaml Start the Network Profiling Task Once all deployments are completed, we must create a network profiling task for a specific instance of the service in the SkyWalking UI.\nTo open SkyWalking UI, run:\nkubectl port-forward svc/skywalking-ui 8080:80 --namespace istio-system Currently, we can select the specific instances that we wish to monitor by clicking the Data Plane item in the Service Mesh panel and the Service item in the Kubernetes panel.\nIn the figure below, we have selected an instance with a list of tasks in the network profiling tab. When we click the start button, the SkyWalking Rover starts monitoring this instance\u0026rsquo;s network.\nFigure 5\nDone! After a few seconds, you will see the process topology appear on the right side of the page.\nFigure 6\nWhen you click on the line between processes, you can see the TCP metrics between the two processes.\nFigure 7\nConclusion In this article, we detailed a problem that makes troubleshooting service mesh architectures difficult: lack of context between layers in the network stack. These are the cases when eBPF begins to really help with debugging/productivity when existing service mesh/envoy cannot. Then, we researched how eBPF could be applied to common communication, such as TLS. Finally, we demo the implementation of this process with SkyWalking Rover.\nFor now, we have completed the performance analysis for OSI layer 4 (mostly TCP). In the future, we will also introduce the analysis for OSI layer 7 protocols like HTTP.\n","title":"Diagnose Service Mesh Network Performance with eBPF","url":"/docs/main/v9.5.0/en/academy/diagnose-service-mesh-network-performance-with-ebpf/"},{"content":"Diagnose Service Mesh Network Performance with eBPF Background This article will show how to use Apache SkyWalking with eBPF to make network troubleshooting easier in a service mesh environment.\nApache SkyWalking is an application performance monitor tool for distributed systems. It observes metrics, logs, traces, and events in the service mesh environment and uses that data to generate a dependency graph of your pods and services. This dependency graph can provide quick insights into your system, especially when there\u0026rsquo;s an issue.\nHowever, when troubleshooting network issues in SkyWalking\u0026rsquo;s service topology, it is not always easy to pinpoint where the error actually is. There are two reasons for the difficulty:\n Traffic through the Envoy sidecar is not easy to observe. Data from Envoy\u0026rsquo;s Access Log Service (ALS) shows traffic between services (sidecar-to-sidecar), but not metrics on communication between the Envoy sidecar and the service it proxies. Without that information, it is more difficult to understand the impact of the sidecar. There is a lack of data from transport layer (OSI Layer 4) communication. Since services generally use application layer (OSI Layer 7) protocols such as HTTP, observability data is generally restricted to application layer communication. However, the root cause may actually be in the transport layer, which is typically opaque to observability tools.  Access to metrics from Envoy-to-service and transport layer communication can make it easier to diagnose service issues. To this end, SkyWalking needs to collect and analyze transport layer metrics between processes inside Kubernetes pods - a task well suited to eBPF. We investigated using eBPF for this purpose and present our results and a demo below.\nMonitoring Kubernetes Networks with eBPF With its origins as the Extended Berkeley Packet Filter, eBPF is a general purpose mechanism for injecting and running your own code into the Linux kernel and is an excellent tool for monitoring network traffic in Kubernetes Pods. In the next few sections, we'll provide an overview of how to use eBPF for network monitoring as background for introducing Skywalking Rover, a metrics collector and profiler powered by eBPF to diagnose CPU and network performance.\nHow Applications and the Network Interact Interactions between the application and the network can generally be divided into the following steps from higher to lower levels of abstraction:\n User Code: Application code uses high-level network libraries in the application stack to exchange data across the network, like sending and receiving HTTP requests. Network Library: When the network library receives a network request, it interacts with the language API to send the network data. Language API: Each language provides an API for operating the network, system, etc. When a request is received, it interacts with the system API. In Linux, this API is called syscalls. Linux API: When the Linux kernel receives the request through the API, it communicates with the socket to send the data, which is usually closer to an OSI Layer 4 protocol, such as TCP, UDP, etc. Socket Ops: Sending or receiving the data to/from the NIC.  Our hypothesis is that eBPF can monitor the network. There are two ways to implement the interception: User space (uprobe) or Kernel space (kprobe). The table below summarizes the differences.\n    Pros Cons     uprobe •\tGet more application-related contexts, such as whether the current request is HTTP or HTTPS.•\tRequests and responses can be intercepted by a single method •\tData structures can be unstable, so it is more difficult to get the desired data.  •\tImplementation may differ between language/library versions.  •\tDoes not work in applications without symbol tables.   kprobe •\tAvailable for all languages.  •\tThe data structure and methods are stable and do not require much adaptation.  •\tEasier correlation with underlying data, such as getting the destination address of TCP, OSI Layer 4 protocol metrics, etc. •\tA single request and response may be split into multiple probes.  •\tContextual information is not easy to get for stateful requests. For example header compression in HTTP/2.    For the general network performance monitor, we chose to use the kprobe (intercept the syscalls) for the following reasons:\n It\u0026rsquo;s available for applications written in any programming language, and it\u0026rsquo;s stable, so it saves a lot of development/adaptation costs. It can be correlated with metrics from the system level, which makes it easier to troubleshoot. As a single request and response are split into multiple probes, we can use technology to correlate them. For contextual information, It\u0026rsquo;s usually used in OSI Layer 7 protocol network analysis. So, if we just monitor the network performance, then they can be ignored.  Kprobes and network monitoring Following the network syscalls of Linux documentation, we can implement network monitoring by intercepting two types of methods: socket operations and send/receive methods.\nSocket Operations When accepting or connecting with another socket, we can get the following information:\n Connection information: Includes the remote address from the connection which helps us to understand which pod is connected. Connection statics: Includes basic metrics from sockets, such as round-trip time (RTT), lost packet count in TCP, etc. Socket and file descriptor (FD) mapping: Includes the relationship between the Linux file descriptor and socket object. It is useful when sending and receiving data through a Linux file descriptor.  Send/Receive The interface related to sending or receiving data is the focus of performance analysis. It mainly contains the following parameters:\n Socket file descriptor: The file descriptor of the current operation corresponding to the socket. Buffer: The data sent or received, passed as a byte array.  Based on the above parameters, we can analyze the following data:\n Bytes: The size of the packet in bytes. Protocol: The protocol analysis according to the buffer data, such as HTTP, MySQL, etc. Execution Time: The time it takes to send/receive the data.  At this point (Figure 1) we can analyze the following steps for the whole lifecycle of the connection:\n Connect/Accept: When the connection is created. Transform: Sending and receiving data on the connection. Close: When the connection is closed.  Figure 1\nProtocol and TLS The previous section described how to analyze connections using send or receive buffer data. For example, following the HTTP/1.1 message specification to analyze the connection. However, this does not work for TLS requests/responses.\nFigure 2\nWhen TLS is in use, the Linux Kernel transmits data encrypted in user space. In the figure above, The application usually transmits SSL data through a third-party library (such as OpenSSL). For this case, the Linux API can only get the encrypted data, so it cannot recognize any higher layer protocol. To decrypt inside eBPF, we need to follow these steps:\n Read unencrypted data through uprobe: Compatible multiple languages, using uprobe to capture the data that is not encrypted before sending or after receiving. In this way, we can get the original data and associate it with the socket. Associate with socket: We can associate unencrypted data with the socket.  OpenSSL Use case For example, the most common way to send/receive SSL data is to use OpenSSL as a shared library, specifically the SSL_read and SSL_write methods to submit the buffer data with the socket.\nFollowing the documentation, we can intercept these two methods, which are almost identical to the API in Linux. The source code of the SSL structure in OpenSSL shows that the Socket FD exists in the BIO object of the SSL structure, and we can get it by the offset.\nIn summary, with knowledge of how OpenSSL works, we can read unencrypted data in an eBPF function.\nIntroducing SkyWalking Rover, an eBPF-based Metrics Collector and Profiler SkyWalking Rover introduces the eBPF network profiling feature into the SkyWalking ecosystem. It\u0026rsquo;s currently supported in a Kubernetes environment, so must be deployed inside a Kubernetes cluster. Once the deployment is complete, SkyWalking Rover can monitor the network for all processes inside a given Pod. Based on the monitoring data, SkyWalking can generate the topology relationship diagram and metrics between processes.\nTopology Diagram The topology diagram can help us understand the network access between processes inside the same Pod, and between the process and external environment (other Pod or service). Additionally, it can identify the data direction of traffic based on the line flow direction.\nIn Figure 3 below, all nodes within the hexagon are the internal process of a Pod, and nodes outside the hexagon are externally associated services or Pods. Nodes are connected by lines, which indicate the direction of requests or responses between nodes (client or server). The protocol is indicated on the line, and it\u0026rsquo;s either HTTP(S), TCP, or TCP(TLS). Also, we can see in this figure that the line between Envoy and Python applications is bidirectional because Envoy intercepts all application traffic.\nFigure 3\nMetrics Once we recognize the network call relationship between processes through the topology, we can select a specific line and view the TCP metrics between the two processes.\nThe diagram below (Figure 4) shows the metrics of network monitoring between two processes. There are four metrics in each line. Two on the left side are on the client side, and two on the right side are on the server side. If the remote process is not in the same Pod, only one side of the metrics is displayed.\nFigure 4\nThe following two metric types are available:\n Counter: Records the total number of data in a certain period. Each counter contains the following data: a. Count: Execution count. b. Bytes: Packet size in bytes. c. Execution time: Execution duration. Histogram: Records the distribution of data in the buckets.  Based on the above data types, the following metrics are exposed:\n   Name Type Unit Description     Write Counter and histogram Millisecond The socket write counter.   Read Counter and histogram Millisecond The socket read counter.   Write RTT Counter and histogram Microsecond The socket write round trip time (RTT) counter.   Connect Counter and histogram Millisecond The socket connect/accept with another server/client counter.   Close Counter and histogram Millisecond The socket with other socket counter.   Retransmit Counter Millisecond The socket retransmit package counter.   Drop Counter Millisecond The socket drop package counter.    Demo In this section, we demonstrate how to perform network profiling in the service mesh. To follow along, you will need a running Kubernetes environment.\nNOTE: All commands and scripts are available in this GitHub repository.\nInstall Istio Istio is the most widely deployed service mesh, and comes with a complete demo application that we can use for testing. To install Istio and the demo application, follow these steps:\n Install Istio using the demo configuration profile. Label the default namespace, so Istio automatically injects Envoy sidecar proxies when we\u0026rsquo;ll deploy the application. Deploy the bookinfo application to the cluster. Deploy the traffic generator to generate some traffic to the application.  export ISTIO_VERSION=1.13.1 # install istio istioctl install -y --set profile=demo kubectl label namespace default istio-injection=enabled # deploy the bookinfo applications kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/platform/kube/bookinfo.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/bookinfo-gateway.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/destination-rule-all.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/virtual-service-all-v1.yaml # generate traffic kubectl apply -f https://raw.githubusercontent.com/mrproliu/skywalking-network-profiling-demo/main/resources/traffic-generator.yaml Install SkyWalking The following will install the storage, backend, and UI needed for SkyWalking:\ngit clone https://github.com/apache/skywalking-kubernetes.git cd skywalking-kubernetes cd chart helm dep up skywalking helm -n istio-system install skywalking skywalking \\  --set fullnameOverride=skywalking \\  --set elasticsearch.minimumMasterNodes=1 \\  --set elasticsearch.imageTag=7.5.1 \\  --set oap.replicas=1 \\  --set ui.image.repository=apache/skywalking-ui \\  --set ui.image.tag=9.2.0 \\  --set oap.image.tag=9.2.0 \\  --set oap.envoy.als.enabled=true \\  --set oap.image.repository=apache/skywalking-oap-server \\  --set oap.storageType=elasticsearch \\  --set oap.env.SW_METER_ANALYZER_ACTIVE_FILES=\u0026#39;network-profiling\u0026#39; Install SkyWalking Rover SkyWalking Rover is deployed on every node in Kubernetes, and it automatically detects the services in the Kubernetes cluster. The network profiling feature has been released in the version 0.3.0 of SkyWalking Rover. When a network monitoring task is created, the SkyWalking rover sends the data to the SkyWalking backend.\nkubectl apply -f https://raw.githubusercontent.com/mrproliu/skywalking-network-profiling-demo/main/resources/skywalking-rover.yaml Start the Network Profiling Task Once all deployments are completed, we must create a network profiling task for a specific instance of the service in the SkyWalking UI.\nTo open SkyWalking UI, run:\nkubectl port-forward svc/skywalking-ui 8080:80 --namespace istio-system Currently, we can select the specific instances that we wish to monitor by clicking the Data Plane item in the Service Mesh panel and the Service item in the Kubernetes panel.\nIn the figure below, we have selected an instance with a list of tasks in the network profiling tab. When we click the start button, the SkyWalking Rover starts monitoring this instance\u0026rsquo;s network.\nFigure 5\nDone! After a few seconds, you will see the process topology appear on the right side of the page.\nFigure 6\nWhen you click on the line between processes, you can see the TCP metrics between the two processes.\nFigure 7\nConclusion In this article, we detailed a problem that makes troubleshooting service mesh architectures difficult: lack of context between layers in the network stack. These are the cases when eBPF begins to really help with debugging/productivity when existing service mesh/envoy cannot. Then, we researched how eBPF could be applied to common communication, such as TLS. Finally, we demo the implementation of this process with SkyWalking Rover.\nFor now, we have completed the performance analysis for OSI layer 4 (mostly TCP). In the future, we will also introduce the analysis for OSI layer 7 protocols like HTTP.\n","title":"Diagnose Service Mesh Network Performance with eBPF","url":"/docs/main/v9.6.0/en/academy/diagnose-service-mesh-network-performance-with-ebpf/"},{"content":"Diagnose Service Mesh Network Performance with eBPF Background This article will show how to use Apache SkyWalking with eBPF to make network troubleshooting easier in a service mesh environment.\nApache SkyWalking is an application performance monitor tool for distributed systems. It observes metrics, logs, traces, and events in the service mesh environment and uses that data to generate a dependency graph of your pods and services. This dependency graph can provide quick insights into your system, especially when there\u0026rsquo;s an issue.\nHowever, when troubleshooting network issues in SkyWalking\u0026rsquo;s service topology, it is not always easy to pinpoint where the error actually is. There are two reasons for the difficulty:\n Traffic through the Envoy sidecar is not easy to observe. Data from Envoy\u0026rsquo;s Access Log Service (ALS) shows traffic between services (sidecar-to-sidecar), but not metrics on communication between the Envoy sidecar and the service it proxies. Without that information, it is more difficult to understand the impact of the sidecar. There is a lack of data from transport layer (OSI Layer 4) communication. Since services generally use application layer (OSI Layer 7) protocols such as HTTP, observability data is generally restricted to application layer communication. However, the root cause may actually be in the transport layer, which is typically opaque to observability tools.  Access to metrics from Envoy-to-service and transport layer communication can make it easier to diagnose service issues. To this end, SkyWalking needs to collect and analyze transport layer metrics between processes inside Kubernetes pods - a task well suited to eBPF. We investigated using eBPF for this purpose and present our results and a demo below.\nMonitoring Kubernetes Networks with eBPF With its origins as the Extended Berkeley Packet Filter, eBPF is a general purpose mechanism for injecting and running your own code into the Linux kernel and is an excellent tool for monitoring network traffic in Kubernetes Pods. In the next few sections, we'll provide an overview of how to use eBPF for network monitoring as background for introducing Skywalking Rover, a metrics collector and profiler powered by eBPF to diagnose CPU and network performance.\nHow Applications and the Network Interact Interactions between the application and the network can generally be divided into the following steps from higher to lower levels of abstraction:\n User Code: Application code uses high-level network libraries in the application stack to exchange data across the network, like sending and receiving HTTP requests. Network Library: When the network library receives a network request, it interacts with the language API to send the network data. Language API: Each language provides an API for operating the network, system, etc. When a request is received, it interacts with the system API. In Linux, this API is called syscalls. Linux API: When the Linux kernel receives the request through the API, it communicates with the socket to send the data, which is usually closer to an OSI Layer 4 protocol, such as TCP, UDP, etc. Socket Ops: Sending or receiving the data to/from the NIC.  Our hypothesis is that eBPF can monitor the network. There are two ways to implement the interception: User space (uprobe) or Kernel space (kprobe). The table below summarizes the differences.\n    Pros Cons     uprobe •\tGet more application-related contexts, such as whether the current request is HTTP or HTTPS.•\tRequests and responses can be intercepted by a single method •\tData structures can be unstable, so it is more difficult to get the desired data.  •\tImplementation may differ between language/library versions.  •\tDoes not work in applications without symbol tables.   kprobe •\tAvailable for all languages.  •\tThe data structure and methods are stable and do not require much adaptation.  •\tEasier correlation with underlying data, such as getting the destination address of TCP, OSI Layer 4 protocol metrics, etc. •\tA single request and response may be split into multiple probes.  •\tContextual information is not easy to get for stateful requests. For example header compression in HTTP/2.    For the general network performance monitor, we chose to use the kprobe (intercept the syscalls) for the following reasons:\n It\u0026rsquo;s available for applications written in any programming language, and it\u0026rsquo;s stable, so it saves a lot of development/adaptation costs. It can be correlated with metrics from the system level, which makes it easier to troubleshoot. As a single request and response are split into multiple probes, we can use technology to correlate them. For contextual information, It\u0026rsquo;s usually used in OSI Layer 7 protocol network analysis. So, if we just monitor the network performance, then they can be ignored.  Kprobes and network monitoring Following the network syscalls of Linux documentation, we can implement network monitoring by intercepting two types of methods: socket operations and send/receive methods.\nSocket Operations When accepting or connecting with another socket, we can get the following information:\n Connection information: Includes the remote address from the connection which helps us to understand which pod is connected. Connection statics: Includes basic metrics from sockets, such as round-trip time (RTT), lost packet count in TCP, etc. Socket and file descriptor (FD) mapping: Includes the relationship between the Linux file descriptor and socket object. It is useful when sending and receiving data through a Linux file descriptor.  Send/Receive The interface related to sending or receiving data is the focus of performance analysis. It mainly contains the following parameters:\n Socket file descriptor: The file descriptor of the current operation corresponding to the socket. Buffer: The data sent or received, passed as a byte array.  Based on the above parameters, we can analyze the following data:\n Bytes: The size of the packet in bytes. Protocol: The protocol analysis according to the buffer data, such as HTTP, MySQL, etc. Execution Time: The time it takes to send/receive the data.  At this point (Figure 1) we can analyze the following steps for the whole lifecycle of the connection:\n Connect/Accept: When the connection is created. Transform: Sending and receiving data on the connection. Close: When the connection is closed.  Figure 1\nProtocol and TLS The previous section described how to analyze connections using send or receive buffer data. For example, following the HTTP/1.1 message specification to analyze the connection. However, this does not work for TLS requests/responses.\nFigure 2\nWhen TLS is in use, the Linux Kernel transmits data encrypted in user space. In the figure above, The application usually transmits SSL data through a third-party library (such as OpenSSL). For this case, the Linux API can only get the encrypted data, so it cannot recognize any higher layer protocol. To decrypt inside eBPF, we need to follow these steps:\n Read unencrypted data through uprobe: Compatible multiple languages, using uprobe to capture the data that is not encrypted before sending or after receiving. In this way, we can get the original data and associate it with the socket. Associate with socket: We can associate unencrypted data with the socket.  OpenSSL Use case For example, the most common way to send/receive SSL data is to use OpenSSL as a shared library, specifically the SSL_read and SSL_write methods to submit the buffer data with the socket.\nFollowing the documentation, we can intercept these two methods, which are almost identical to the API in Linux. The source code of the SSL structure in OpenSSL shows that the Socket FD exists in the BIO object of the SSL structure, and we can get it by the offset.\nIn summary, with knowledge of how OpenSSL works, we can read unencrypted data in an eBPF function.\nIntroducing SkyWalking Rover, an eBPF-based Metrics Collector and Profiler SkyWalking Rover introduces the eBPF network profiling feature into the SkyWalking ecosystem. It\u0026rsquo;s currently supported in a Kubernetes environment, so must be deployed inside a Kubernetes cluster. Once the deployment is complete, SkyWalking Rover can monitor the network for all processes inside a given Pod. Based on the monitoring data, SkyWalking can generate the topology relationship diagram and metrics between processes.\nTopology Diagram The topology diagram can help us understand the network access between processes inside the same Pod, and between the process and external environment (other Pod or service). Additionally, it can identify the data direction of traffic based on the line flow direction.\nIn Figure 3 below, all nodes within the hexagon are the internal process of a Pod, and nodes outside the hexagon are externally associated services or Pods. Nodes are connected by lines, which indicate the direction of requests or responses between nodes (client or server). The protocol is indicated on the line, and it\u0026rsquo;s either HTTP(S), TCP, or TCP(TLS). Also, we can see in this figure that the line between Envoy and Python applications is bidirectional because Envoy intercepts all application traffic.\nFigure 3\nMetrics Once we recognize the network call relationship between processes through the topology, we can select a specific line and view the TCP metrics between the two processes.\nThe diagram below (Figure 4) shows the metrics of network monitoring between two processes. There are four metrics in each line. Two on the left side are on the client side, and two on the right side are on the server side. If the remote process is not in the same Pod, only one side of the metrics is displayed.\nFigure 4\nThe following two metric types are available:\n Counter: Records the total number of data in a certain period. Each counter contains the following data: a. Count: Execution count. b. Bytes: Packet size in bytes. c. Execution time: Execution duration. Histogram: Records the distribution of data in the buckets.  Based on the above data types, the following metrics are exposed:\n   Name Type Unit Description     Write Counter and histogram Millisecond The socket write counter.   Read Counter and histogram Millisecond The socket read counter.   Write RTT Counter and histogram Microsecond The socket write round trip time (RTT) counter.   Connect Counter and histogram Millisecond The socket connect/accept with another server/client counter.   Close Counter and histogram Millisecond The socket with other socket counter.   Retransmit Counter Millisecond The socket retransmit package counter.   Drop Counter Millisecond The socket drop package counter.    Demo In this section, we demonstrate how to perform network profiling in the service mesh. To follow along, you will need a running Kubernetes environment.\nNOTE: All commands and scripts are available in this GitHub repository.\nInstall Istio Istio is the most widely deployed service mesh, and comes with a complete demo application that we can use for testing. To install Istio and the demo application, follow these steps:\n Install Istio using the demo configuration profile. Label the default namespace, so Istio automatically injects Envoy sidecar proxies when we\u0026rsquo;ll deploy the application. Deploy the bookinfo application to the cluster. Deploy the traffic generator to generate some traffic to the application.  export ISTIO_VERSION=1.13.1 # install istio istioctl install -y --set profile=demo kubectl label namespace default istio-injection=enabled # deploy the bookinfo applications kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/platform/kube/bookinfo.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/bookinfo-gateway.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/destination-rule-all.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/virtual-service-all-v1.yaml # generate traffic kubectl apply -f https://raw.githubusercontent.com/mrproliu/skywalking-network-profiling-demo/main/resources/traffic-generator.yaml Install SkyWalking The following will install the storage, backend, and UI needed for SkyWalking:\ngit clone https://github.com/apache/skywalking-helm.git cd skywalking-helm cd chart helm dep up skywalking helm -n istio-system install skywalking skywalking \\  --set fullnameOverride=skywalking \\  --set elasticsearch.minimumMasterNodes=1 \\  --set elasticsearch.imageTag=7.5.1 \\  --set oap.replicas=1 \\  --set ui.image.repository=apache/skywalking-ui \\  --set ui.image.tag=9.2.0 \\  --set oap.image.tag=9.2.0 \\  --set oap.envoy.als.enabled=true \\  --set oap.image.repository=apache/skywalking-oap-server \\  --set oap.storageType=elasticsearch \\  --set oap.env.SW_METER_ANALYZER_ACTIVE_FILES=\u0026#39;network-profiling\u0026#39; Install SkyWalking Rover SkyWalking Rover is deployed on every node in Kubernetes, and it automatically detects the services in the Kubernetes cluster. The network profiling feature has been released in the version 0.3.0 of SkyWalking Rover. When a network monitoring task is created, the SkyWalking rover sends the data to the SkyWalking backend.\nkubectl apply -f https://raw.githubusercontent.com/mrproliu/skywalking-network-profiling-demo/main/resources/skywalking-rover.yaml Start the Network Profiling Task Once all deployments are completed, we must create a network profiling task for a specific instance of the service in the SkyWalking UI.\nTo open SkyWalking UI, run:\nkubectl port-forward svc/skywalking-ui 8080:80 --namespace istio-system Currently, we can select the specific instances that we wish to monitor by clicking the Data Plane item in the Service Mesh panel and the Service item in the Kubernetes panel.\nIn the figure below, we have selected an instance with a list of tasks in the network profiling tab. When we click the start button, the SkyWalking Rover starts monitoring this instance\u0026rsquo;s network.\nFigure 5\nDone! After a few seconds, you will see the process topology appear on the right side of the page.\nFigure 6\nWhen you click on the line between processes, you can see the TCP metrics between the two processes.\nFigure 7\nConclusion In this article, we detailed a problem that makes troubleshooting service mesh architectures difficult: lack of context between layers in the network stack. These are the cases when eBPF begins to really help with debugging/productivity when existing service mesh/envoy cannot. Then, we researched how eBPF could be applied to common communication, such as TLS. Finally, we demo the implementation of this process with SkyWalking Rover.\nFor now, we have completed the performance analysis for OSI layer 4 (mostly TCP). In the future, we will also introduce the analysis for OSI layer 7 protocols like HTTP.\n","title":"Diagnose Service Mesh Network Performance with eBPF","url":"/docs/main/v9.7.0/en/academy/diagnose-service-mesh-network-performance-with-ebpf/"},{"content":"Disable plugins Delete or remove the specific libraries / jars in skywalking-agent/plugins/*.jar\n+-- skywalking-agent +-- activations apm-toolkit-log4j-1.x-activation.jar apm-toolkit-log4j-2.x-activation.jar apm-toolkit-logback-1.x-activation.jar ... +-- config agent.config +-- plugins apm-dubbo-plugin.jar apm-feign-default-http-9.x.jar apm-httpClient-4.x-plugin.jar ..... skywalking-agent.jar ","title":"Disable plugins","url":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/how-to-disable-plugin/"},{"content":"Disable plugins Delete or remove the specific libraries / jars in skywalking-agent/plugins/*.jar\n+-- skywalking-agent +-- activations apm-toolkit-log4j-1.x-activation.jar apm-toolkit-log4j-2.x-activation.jar apm-toolkit-logback-1.x-activation.jar ... +-- config agent.config +-- plugins apm-dubbo-plugin.jar apm-feign-default-http-9.x.jar apm-httpClient-4.x-plugin.jar ..... skywalking-agent.jar ","title":"Disable plugins","url":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/how-to-disable-plugin/"},{"content":"Disable plugins Delete or remove the specific libraries / jars in skywalking-agent/plugins/*.jar\n+-- skywalking-agent +-- activations apm-toolkit-log4j-1.x-activation.jar apm-toolkit-log4j-2.x-activation.jar apm-toolkit-logback-1.x-activation.jar ... +-- config agent.config +-- plugins apm-dubbo-plugin.jar apm-feign-default-http-9.x.jar apm-httpClient-4.x-plugin.jar ..... skywalking-agent.jar ","title":"Disable plugins","url":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/how-to-disable-plugin/"},{"content":"Disable plugins Delete or remove the specific libraries / jars in skywalking-agent/plugins/*.jar\n+-- skywalking-agent +-- activations apm-toolkit-log4j-1.x-activation.jar apm-toolkit-log4j-2.x-activation.jar apm-toolkit-logback-1.x-activation.jar ... +-- config agent.config +-- plugins apm-dubbo-plugin.jar apm-feign-default-http-9.x.jar apm-httpClient-4.x-plugin.jar ..... skywalking-agent.jar ","title":"Disable plugins","url":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/how-to-disable-plugin/"},{"content":"Disable plugins Delete or remove the specific libraries / jars in skywalking-agent/plugins/*.jar\n+-- skywalking-agent +-- activations apm-toolkit-log4j-1.x-activation.jar apm-toolkit-log4j-2.x-activation.jar apm-toolkit-logback-1.x-activation.jar ... +-- config agent.config +-- plugins apm-dubbo-plugin.jar apm-feign-default-http-9.x.jar apm-httpClient-4.x-plugin.jar ..... skywalking-agent.jar ","title":"Disable plugins","url":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/how-to-disable-plugin/"},{"content":"Dynamic Configuration SkyWalking Configurations are mostly set through application.yml and OS system environment variables.\nAt the same time, some of them support dynamic settings from an upstream management system.\nCurrently, SkyWalking supports two types of dynamic configurations: Single and Group.\nThis feature depends on upstream service, so it is DISABLED by default.\nconfiguration:selector:${SW_CONFIGURATION:none}none:grpc:host:${SW_DCS_SERVER_HOST:\u0026#34;\u0026#34;}port:${SW_DCS_SERVER_PORT:80}clusterName:${SW_DCS_CLUSTER_NAME:SkyWalking}period:${SW_DCS_PERIOD:20}# ... other implementationsSingle Configuration Single Configuration is a config key that corresponds to a specific config value. The logic structure is:\n{configKey}:{configValue} For example:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} Supported configurations are as follows:\n   Config Key Value Description Value Format Example     agent-analyzer.default.slowDBAccessThreshold Thresholds of slow Database statement. Overrides agent-analyzer/default/slowDBAccessThreshold of application.yml. default:200,mongodb:50   agent-analyzer.default.uninstrumentedGateways The uninstrumented gateways. Overrides gateways.yml. Same as gateways.yml.   alarm.default.alarm-settings The alarm settings. Overrides alarm-settings.yml. Same as alarm-settings.yml.   core.default.apdexThreshold The apdex threshold settings. Overrides service-apdex-threshold.yml. Same as service-apdex-threshold.yml.   core.default.endpoint-name-grouping The endpoint name grouping setting. Overrides endpoint-name-grouping.yml. Same as endpoint-name-grouping.yml.   core.default.log4j-xml The log4j xml configuration. Overrides log4j2.xml. Same as log4j2.xml.   core.default.searchableTracesTags The searchableTracesTags configuration. Override core/default/searchableTracesTags in the application.yml. http.method,http.status_code,rpc.status_code,db.type,db.instance,mq.queue,mq.topic,mq.broker   agent-analyzer.default.traceSamplingPolicy The sampling policy for default and service dimension, override trace-sampling-policy-settings.yml. same as trace-sampling-policy-settings.yml   configuration-discovery.default.agentConfigurations The ConfigurationDiscovery settings. See configuration-discovery.md.    Group Configuration Group Configuration is a config key corresponding to a group sub config item. A sub config item is a key-value pair. The logic structure is:\n{configKey}: |{subItemkey1}:{subItemValue1} |{subItemkey2}:{subItemValue2} |{subItemkey3}:{subItemValue3} ... For example:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} Supported configurations are as follows:\n   Config Key SubItem Key Description Value Description Value Format Example     core.default.endpoint-name-grouping-openapi The serviceName relevant to openAPI definition file. eg. serviceA. If the serviceName relevant to multiple files should add subItems for each files, and each subItem key should split serviceName and fileName with . eg. serviceA.API-file1,serviceA.API-file2 The openAPI definitions file contents(yaml format) for create endpoint name grouping rules. Same as productAPI-v2.yaml    Dynamic Configuration Implementations  Dynamic Configuration Service, DCS Zookeeper Implementation Etcd Implementation Consul Implementation Apollo Implementation Kubernetes Configmap Implementation Nacos Implementation  ","title":"Dynamic Configuration","url":"/docs/main/latest/en/setup/backend/dynamic-config/"},{"content":"Dynamic Configuration SkyWalking Configurations are mostly set through application.yml and OS system environment variables.\nAt the same time, some of them support dynamic settings from an upstream management system.\nCurrently, SkyWalking supports two types of dynamic configurations: Single and Group.\nThis feature depends on upstream service, so it is DISABLED by default.\nconfiguration:selector:${SW_CONFIGURATION:none}none:grpc:host:${SW_DCS_SERVER_HOST:\u0026#34;\u0026#34;}port:${SW_DCS_SERVER_PORT:80}clusterName:${SW_DCS_CLUSTER_NAME:SkyWalking}period:${SW_DCS_PERIOD:20}# ... other implementationsSingle Configuration Single Configuration is a config key that corresponds to a specific config value. The logic structure is:\n{configKey}:{configValue} For example:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} Supported configurations are as follows:\n   Config Key Value Description Value Format Example     agent-analyzer.default.slowDBAccessThreshold Thresholds of slow Database statement. Overrides agent-analyzer/default/slowDBAccessThreshold of application.yml. default:200,mongodb:50   agent-analyzer.default.uninstrumentedGateways The uninstrumented gateways. Overrides gateways.yml. Same as gateways.yml.   alarm.default.alarm-settings The alarm settings. Overrides alarm-settings.yml. Same as alarm-settings.yml.   core.default.apdexThreshold The apdex threshold settings. Overrides service-apdex-threshold.yml. Same as service-apdex-threshold.yml.   core.default.endpoint-name-grouping The endpoint name grouping setting. Overrides endpoint-name-grouping.yml. Same as endpoint-name-grouping.yml.   core.default.log4j-xml The log4j xml configuration. Overrides log4j2.xml. Same as log4j2.xml.   core.default.searchableTracesTags The searchableTracesTags configuration. Override core/default/searchableTracesTags in the application.yml. http.method,http.status_code,rpc.status_code,db.type,db.instance,mq.queue,mq.topic,mq.broker   agent-analyzer.default.traceSamplingPolicy The sampling policy for default and service dimension, override trace-sampling-policy-settings.yml. same as trace-sampling-policy-settings.yml   configuration-discovery.default.agentConfigurations The ConfigurationDiscovery settings. See configuration-discovery.md.    Group Configuration Group Configuration is a config key corresponding to a group sub config item. A sub config item is a key-value pair. The logic structure is:\n{configKey}: |{subItemkey1}:{subItemValue1} |{subItemkey2}:{subItemValue2} |{subItemkey3}:{subItemValue3} ... For example:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} Supported configurations are as follows:\n   Config Key SubItem Key Description Value Description Value Format Example     core.default.endpoint-name-grouping-openapi The serviceName relevant to openAPI definition file. eg. serviceA. If the serviceName relevant to multiple files should add subItems for each files, and each subItem key should split serviceName and fileName with . eg. serviceA.API-file1,serviceA.API-file2 The openAPI definitions file contents(yaml format) for create endpoint name grouping rules. Same as productAPI-v2.yaml    Dynamic Configuration Implementations  Dynamic Configuration Service, DCS Zookeeper Implementation Etcd Implementation Consul Implementation Apollo Implementation Kubernetes Configmap Implementation Nacos Implementation  ","title":"Dynamic Configuration","url":"/docs/main/next/en/setup/backend/dynamic-config/"},{"content":"Dynamic Configuration SkyWalking Configurations are mostly set through application.yml and OS system environment variables. At the same time, some of them support dynamic settings from upstream management system.\nCurrently, SkyWalking supports the 2 types of dynamic configurations: Single and Group.\nThis feature depends on upstream service, so it is DISABLED by default.\nconfiguration:selector:${SW_CONFIGURATION:none}none:grpc:host:${SW_DCS_SERVER_HOST:\u0026#34;\u0026#34;}port:${SW_DCS_SERVER_PORT:80}clusterName:${SW_DCS_CLUSTER_NAME:SkyWalking}period:${SW_DCS_PERIOD:20}# ... other implementationsSingle Configuration Single Configuration is a config key that corresponds to a specific config value. The logic structure is:\n{configKey}:{configVaule} For example:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} Supported configurations are as follows:\n   Config Key Value Description Value Format Example     agent-analyzer.default.slowDBAccessThreshold Thresholds of slow Database statement. Overrides receiver-trace/default/slowDBAccessThreshold of application.yml. default:200,mongodb:50   agent-analyzer.default.uninstrumentedGateways The uninstrumented gateways. Overrides gateways.yml. Same as gateways.yml.   alarm.default.alarm-settings The alarm settings. Overrides alarm-settings.yml. Same as alarm-settings.yml.   core.default.apdexThreshold The apdex threshold settings. Overrides service-apdex-threshold.yml. Same as service-apdex-threshold.yml.   core.default.endpoint-name-grouping The endpoint name grouping setting. Overrides endpoint-name-grouping.yml. Same as endpoint-name-grouping.yml.   core.default.log4j-xml The log4j xml configuration. Overrides log4j2.xml. Same as log4j2.xml.   agent-analyzer.default.traceSamplingPolicy The sampling policy for default and service dimension, override trace-sampling-policy-settings.yml. same as trace-sampling-policy-settings.yml   configuration-discovery.default.agentConfigurations The ConfigurationDiscovery settings. See configuration-discovery.md.    Group Configuration Group Configuration is a config key that corresponds to a group sub config items. A sub config item is a key value pair. The logic structure is:\n{configKey}: |{subItemkey1}:{subItemValue1} |{subItemkey2}:{subItemValue2} |{subItemkey3}:{subItemValue3} ... For example:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} Supported configurations are as follows:\n   Config Key SubItem Key Description Value Description Value Format Example     core.default.endpoint-name-grouping-openapi The serviceName relevant to openAPI definition file. eg. serviceA. If the serviceName relevant to multiple files should add subItems for each files, and each subItem key should split serviceName and fileName with . eg. serviceA.API-file1,serviceA.API-file2 The openAPI definitions file contents(yaml format) for create endpoint name grouping rules. Same as productAPI-v2.yaml    Dynamic Configuration Implementations  Dynamic Configuration Service, DCS Zookeeper Implementation Etcd Implementation Consul Implementation Apollo Implementation Kuberbetes Configmap Implementation Nacos Implementation  ","title":"Dynamic Configuration","url":"/docs/main/v9.0.0/en/setup/backend/dynamic-config/"},{"content":"Dynamic Configuration SkyWalking Configurations are mostly set through application.yml and OS system environment variables.\nAt the same time, some of them support dynamic settings from an upstream management system.\nCurrently, SkyWalking supports two types of dynamic configurations: Single and Group.\nThis feature depends on upstream service, so it is DISABLED by default.\nconfiguration:selector:${SW_CONFIGURATION:none}none:grpc:host:${SW_DCS_SERVER_HOST:\u0026#34;\u0026#34;}port:${SW_DCS_SERVER_PORT:80}clusterName:${SW_DCS_CLUSTER_NAME:SkyWalking}period:${SW_DCS_PERIOD:20}# ... other implementationsSingle Configuration Single Configuration is a config key that corresponds to a specific config value. The logic structure is:\n{configKey}:{configVaule} For example:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} Supported configurations are as follows:\n   Config Key Value Description Value Format Example     agent-analyzer.default.slowDBAccessThreshold Thresholds of slow Database statement. Overrides agent-analyzer/default/slowDBAccessThreshold of application.yml. default:200,mongodb:50   agent-analyzer.default.uninstrumentedGateways The uninstrumented gateways. Overrides gateways.yml. Same as gateways.yml.   alarm.default.alarm-settings The alarm settings. Overrides alarm-settings.yml. Same as alarm-settings.yml.   core.default.apdexThreshold The apdex threshold settings. Overrides service-apdex-threshold.yml. Same as service-apdex-threshold.yml.   core.default.endpoint-name-grouping The endpoint name grouping setting. Overrides endpoint-name-grouping.yml. Same as endpoint-name-grouping.yml.   core.default.log4j-xml The log4j xml configuration. Overrides log4j2.xml. Same as log4j2.xml.   agent-analyzer.default.traceSamplingPolicy The sampling policy for default and service dimension, override trace-sampling-policy-settings.yml. same as trace-sampling-policy-settings.yml   configuration-discovery.default.agentConfigurations The ConfigurationDiscovery settings. See configuration-discovery.md.    Group Configuration Group Configuration is a config key corresponding to a group sub config item. A sub config item is a key-value pair. The logic structure is:\n{configKey}: |{subItemkey1}:{subItemValue1} |{subItemkey2}:{subItemValue2} |{subItemkey3}:{subItemValue3} ... For example:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} Supported configurations are as follows:\n   Config Key SubItem Key Description Value Description Value Format Example     core.default.endpoint-name-grouping-openapi The serviceName relevant to openAPI definition file. eg. serviceA. If the serviceName relevant to multiple files should add subItems for each files, and each subItem key should split serviceName and fileName with . eg. serviceA.API-file1,serviceA.API-file2 The openAPI definitions file contents(yaml format) for create endpoint name grouping rules. Same as productAPI-v2.yaml    Dynamic Configuration Implementations  Dynamic Configuration Service, DCS Zookeeper Implementation Etcd Implementation Consul Implementation Apollo Implementation Kuberbetes Configmap Implementation Nacos Implementation  ","title":"Dynamic Configuration","url":"/docs/main/v9.1.0/en/setup/backend/dynamic-config/"},{"content":"Dynamic Configuration SkyWalking Configurations are mostly set through application.yml and OS system environment variables.\nAt the same time, some of them support dynamic settings from an upstream management system.\nCurrently, SkyWalking supports two types of dynamic configurations: Single and Group.\nThis feature depends on upstream service, so it is DISABLED by default.\nconfiguration:selector:${SW_CONFIGURATION:none}none:grpc:host:${SW_DCS_SERVER_HOST:\u0026#34;\u0026#34;}port:${SW_DCS_SERVER_PORT:80}clusterName:${SW_DCS_CLUSTER_NAME:SkyWalking}period:${SW_DCS_PERIOD:20}# ... other implementationsSingle Configuration Single Configuration is a config key that corresponds to a specific config value. The logic structure is:\n{configKey}:{configValue} For example:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} Supported configurations are as follows:\n   Config Key Value Description Value Format Example     agent-analyzer.default.slowDBAccessThreshold Thresholds of slow Database statement. Overrides agent-analyzer/default/slowDBAccessThreshold of application.yml. default:200,mongodb:50   agent-analyzer.default.uninstrumentedGateways The uninstrumented gateways. Overrides gateways.yml. Same as gateways.yml.   alarm.default.alarm-settings The alarm settings. Overrides alarm-settings.yml. Same as alarm-settings.yml.   core.default.apdexThreshold The apdex threshold settings. Overrides service-apdex-threshold.yml. Same as service-apdex-threshold.yml.   core.default.endpoint-name-grouping The endpoint name grouping setting. Overrides endpoint-name-grouping.yml. Same as endpoint-name-grouping.yml.   core.default.log4j-xml The log4j xml configuration. Overrides log4j2.xml. Same as log4j2.xml.   agent-analyzer.default.traceSamplingPolicy The sampling policy for default and service dimension, override trace-sampling-policy-settings.yml. same as trace-sampling-policy-settings.yml   configuration-discovery.default.agentConfigurations The ConfigurationDiscovery settings. See configuration-discovery.md.    Group Configuration Group Configuration is a config key corresponding to a group sub config item. A sub config item is a key-value pair. The logic structure is:\n{configKey}: |{subItemkey1}:{subItemValue1} |{subItemkey2}:{subItemValue2} |{subItemkey3}:{subItemValue3} ... For example:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} Supported configurations are as follows:\n   Config Key SubItem Key Description Value Description Value Format Example     core.default.endpoint-name-grouping-openapi The serviceName relevant to openAPI definition file. eg. serviceA. If the serviceName relevant to multiple files should add subItems for each files, and each subItem key should split serviceName and fileName with . eg. serviceA.API-file1,serviceA.API-file2 The openAPI definitions file contents(yaml format) for create endpoint name grouping rules. Same as productAPI-v2.yaml    Dynamic Configuration Implementations  Dynamic Configuration Service, DCS Zookeeper Implementation Etcd Implementation Consul Implementation Apollo Implementation Kubernetes Configmap Implementation Nacos Implementation  ","title":"Dynamic Configuration","url":"/docs/main/v9.2.0/en/setup/backend/dynamic-config/"},{"content":"Dynamic Configuration SkyWalking Configurations are mostly set through application.yml and OS system environment variables.\nAt the same time, some of them support dynamic settings from an upstream management system.\nCurrently, SkyWalking supports two types of dynamic configurations: Single and Group.\nThis feature depends on upstream service, so it is DISABLED by default.\nconfiguration:selector:${SW_CONFIGURATION:none}none:grpc:host:${SW_DCS_SERVER_HOST:\u0026#34;\u0026#34;}port:${SW_DCS_SERVER_PORT:80}clusterName:${SW_DCS_CLUSTER_NAME:SkyWalking}period:${SW_DCS_PERIOD:20}# ... other implementationsSingle Configuration Single Configuration is a config key that corresponds to a specific config value. The logic structure is:\n{configKey}:{configValue} For example:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} Supported configurations are as follows:\n   Config Key Value Description Value Format Example     agent-analyzer.default.slowDBAccessThreshold Thresholds of slow Database statement. Overrides agent-analyzer/default/slowDBAccessThreshold of application.yml. default:200,mongodb:50   agent-analyzer.default.uninstrumentedGateways The uninstrumented gateways. Overrides gateways.yml. Same as gateways.yml.   alarm.default.alarm-settings The alarm settings. Overrides alarm-settings.yml. Same as alarm-settings.yml.   core.default.apdexThreshold The apdex threshold settings. Overrides service-apdex-threshold.yml. Same as service-apdex-threshold.yml.   core.default.endpoint-name-grouping The endpoint name grouping setting. Overrides endpoint-name-grouping.yml. Same as endpoint-name-grouping.yml.   core.default.log4j-xml The log4j xml configuration. Overrides log4j2.xml. Same as log4j2.xml.   agent-analyzer.default.traceSamplingPolicy The sampling policy for default and service dimension, override trace-sampling-policy-settings.yml. same as trace-sampling-policy-settings.yml   configuration-discovery.default.agentConfigurations The ConfigurationDiscovery settings. See configuration-discovery.md.    Group Configuration Group Configuration is a config key corresponding to a group sub config item. A sub config item is a key-value pair. The logic structure is:\n{configKey}: |{subItemkey1}:{subItemValue1} |{subItemkey2}:{subItemValue2} |{subItemkey3}:{subItemValue3} ... For example:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} Supported configurations are as follows:\n   Config Key SubItem Key Description Value Description Value Format Example     core.default.endpoint-name-grouping-openapi The serviceName relevant to openAPI definition file. eg. serviceA. If the serviceName relevant to multiple files should add subItems for each files, and each subItem key should split serviceName and fileName with . eg. serviceA.API-file1,serviceA.API-file2 The openAPI definitions file contents(yaml format) for create endpoint name grouping rules. Same as productAPI-v2.yaml    Dynamic Configuration Implementations  Dynamic Configuration Service, DCS Zookeeper Implementation Etcd Implementation Consul Implementation Apollo Implementation Kubernetes Configmap Implementation Nacos Implementation  ","title":"Dynamic Configuration","url":"/docs/main/v9.3.0/en/setup/backend/dynamic-config/"},{"content":"Dynamic Configuration SkyWalking Configurations are mostly set through application.yml and OS system environment variables.\nAt the same time, some of them support dynamic settings from an upstream management system.\nCurrently, SkyWalking supports two types of dynamic configurations: Single and Group.\nThis feature depends on upstream service, so it is DISABLED by default.\nconfiguration:selector:${SW_CONFIGURATION:none}none:grpc:host:${SW_DCS_SERVER_HOST:\u0026#34;\u0026#34;}port:${SW_DCS_SERVER_PORT:80}clusterName:${SW_DCS_CLUSTER_NAME:SkyWalking}period:${SW_DCS_PERIOD:20}# ... other implementationsSingle Configuration Single Configuration is a config key that corresponds to a specific config value. The logic structure is:\n{configKey}:{configValue} For example:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} Supported configurations are as follows:\n   Config Key Value Description Value Format Example     agent-analyzer.default.slowDBAccessThreshold Thresholds of slow Database statement. Overrides agent-analyzer/default/slowDBAccessThreshold of application.yml. default:200,mongodb:50   agent-analyzer.default.uninstrumentedGateways The uninstrumented gateways. Overrides gateways.yml. Same as gateways.yml.   alarm.default.alarm-settings The alarm settings. Overrides alarm-settings.yml. Same as alarm-settings.yml.   core.default.apdexThreshold The apdex threshold settings. Overrides service-apdex-threshold.yml. Same as service-apdex-threshold.yml.   core.default.endpoint-name-grouping The endpoint name grouping setting. Overrides endpoint-name-grouping.yml. Same as endpoint-name-grouping.yml.   core.default.log4j-xml The log4j xml configuration. Overrides log4j2.xml. Same as log4j2.xml.   agent-analyzer.default.traceSamplingPolicy The sampling policy for default and service dimension, override trace-sampling-policy-settings.yml. same as trace-sampling-policy-settings.yml   configuration-discovery.default.agentConfigurations The ConfigurationDiscovery settings. See configuration-discovery.md.    Group Configuration Group Configuration is a config key corresponding to a group sub config item. A sub config item is a key-value pair. The logic structure is:\n{configKey}: |{subItemkey1}:{subItemValue1} |{subItemkey2}:{subItemValue2} |{subItemkey3}:{subItemValue3} ... For example:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} Supported configurations are as follows:\n   Config Key SubItem Key Description Value Description Value Format Example     core.default.endpoint-name-grouping-openapi The serviceName relevant to openAPI definition file. eg. serviceA. If the serviceName relevant to multiple files should add subItems for each files, and each subItem key should split serviceName and fileName with . eg. serviceA.API-file1,serviceA.API-file2 The openAPI definitions file contents(yaml format) for create endpoint name grouping rules. Same as productAPI-v2.yaml    Dynamic Configuration Implementations  Dynamic Configuration Service, DCS Zookeeper Implementation Etcd Implementation Consul Implementation Apollo Implementation Kubernetes Configmap Implementation Nacos Implementation  ","title":"Dynamic Configuration","url":"/docs/main/v9.4.0/en/setup/backend/dynamic-config/"},{"content":"Dynamic Configuration SkyWalking Configurations are mostly set through application.yml and OS system environment variables.\nAt the same time, some of them support dynamic settings from an upstream management system.\nCurrently, SkyWalking supports two types of dynamic configurations: Single and Group.\nThis feature depends on upstream service, so it is DISABLED by default.\nconfiguration:selector:${SW_CONFIGURATION:none}none:grpc:host:${SW_DCS_SERVER_HOST:\u0026#34;\u0026#34;}port:${SW_DCS_SERVER_PORT:80}clusterName:${SW_DCS_CLUSTER_NAME:SkyWalking}period:${SW_DCS_PERIOD:20}# ... other implementationsSingle Configuration Single Configuration is a config key that corresponds to a specific config value. The logic structure is:\n{configKey}:{configValue} For example:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} Supported configurations are as follows:\n   Config Key Value Description Value Format Example     agent-analyzer.default.slowDBAccessThreshold Thresholds of slow Database statement. Overrides agent-analyzer/default/slowDBAccessThreshold of application.yml. default:200,mongodb:50   agent-analyzer.default.uninstrumentedGateways The uninstrumented gateways. Overrides gateways.yml. Same as gateways.yml.   alarm.default.alarm-settings The alarm settings. Overrides alarm-settings.yml. Same as alarm-settings.yml.   core.default.apdexThreshold The apdex threshold settings. Overrides service-apdex-threshold.yml. Same as service-apdex-threshold.yml.   core.default.endpoint-name-grouping The endpoint name grouping setting. Overrides endpoint-name-grouping.yml. Same as endpoint-name-grouping.yml.   core.default.log4j-xml The log4j xml configuration. Overrides log4j2.xml. Same as log4j2.xml.   core.default.searchableTracesTags The searchableTracesTags configuration. Override core/default/searchableTracesTags in the application.yml. http.method,http.status_code,rpc.status_code,db.type,db.instance,mq.queue,mq.topic,mq.broker   agent-analyzer.default.traceSamplingPolicy The sampling policy for default and service dimension, override trace-sampling-policy-settings.yml. same as trace-sampling-policy-settings.yml   configuration-discovery.default.agentConfigurations The ConfigurationDiscovery settings. See configuration-discovery.md.    Group Configuration Group Configuration is a config key corresponding to a group sub config item. A sub config item is a key-value pair. The logic structure is:\n{configKey}: |{subItemkey1}:{subItemValue1} |{subItemkey2}:{subItemValue2} |{subItemkey3}:{subItemValue3} ... For example:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} Supported configurations are as follows:\n   Config Key SubItem Key Description Value Description Value Format Example     core.default.endpoint-name-grouping-openapi The serviceName relevant to openAPI definition file. eg. serviceA. If the serviceName relevant to multiple files should add subItems for each files, and each subItem key should split serviceName and fileName with . eg. serviceA.API-file1,serviceA.API-file2 The openAPI definitions file contents(yaml format) for create endpoint name grouping rules. Same as productAPI-v2.yaml    Dynamic Configuration Implementations  Dynamic Configuration Service, DCS Zookeeper Implementation Etcd Implementation Consul Implementation Apollo Implementation Kubernetes Configmap Implementation Nacos Implementation  ","title":"Dynamic Configuration","url":"/docs/main/v9.5.0/en/setup/backend/dynamic-config/"},{"content":"Dynamic Configuration SkyWalking Configurations are mostly set through application.yml and OS system environment variables.\nAt the same time, some of them support dynamic settings from an upstream management system.\nCurrently, SkyWalking supports two types of dynamic configurations: Single and Group.\nThis feature depends on upstream service, so it is DISABLED by default.\nconfiguration:selector:${SW_CONFIGURATION:none}none:grpc:host:${SW_DCS_SERVER_HOST:\u0026#34;\u0026#34;}port:${SW_DCS_SERVER_PORT:80}clusterName:${SW_DCS_CLUSTER_NAME:SkyWalking}period:${SW_DCS_PERIOD:20}# ... other implementationsSingle Configuration Single Configuration is a config key that corresponds to a specific config value. The logic structure is:\n{configKey}:{configValue} For example:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} Supported configurations are as follows:\n   Config Key Value Description Value Format Example     agent-analyzer.default.slowDBAccessThreshold Thresholds of slow Database statement. Overrides agent-analyzer/default/slowDBAccessThreshold of application.yml. default:200,mongodb:50   agent-analyzer.default.uninstrumentedGateways The uninstrumented gateways. Overrides gateways.yml. Same as gateways.yml.   alarm.default.alarm-settings The alarm settings. Overrides alarm-settings.yml. Same as alarm-settings.yml.   core.default.apdexThreshold The apdex threshold settings. Overrides service-apdex-threshold.yml. Same as service-apdex-threshold.yml.   core.default.endpoint-name-grouping The endpoint name grouping setting. Overrides endpoint-name-grouping.yml. Same as endpoint-name-grouping.yml.   core.default.log4j-xml The log4j xml configuration. Overrides log4j2.xml. Same as log4j2.xml.   core.default.searchableTracesTags The searchableTracesTags configuration. Override core/default/searchableTracesTags in the application.yml. http.method,http.status_code,rpc.status_code,db.type,db.instance,mq.queue,mq.topic,mq.broker   agent-analyzer.default.traceSamplingPolicy The sampling policy for default and service dimension, override trace-sampling-policy-settings.yml. same as trace-sampling-policy-settings.yml   configuration-discovery.default.agentConfigurations The ConfigurationDiscovery settings. See configuration-discovery.md.    Group Configuration Group Configuration is a config key corresponding to a group sub config item. A sub config item is a key-value pair. The logic structure is:\n{configKey}: |{subItemkey1}:{subItemValue1} |{subItemkey2}:{subItemValue2} |{subItemkey3}:{subItemValue3} ... For example:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} Supported configurations are as follows:\n   Config Key SubItem Key Description Value Description Value Format Example     core.default.endpoint-name-grouping-openapi The serviceName relevant to openAPI definition file. eg. serviceA. If the serviceName relevant to multiple files should add subItems for each files, and each subItem key should split serviceName and fileName with . eg. serviceA.API-file1,serviceA.API-file2 The openAPI definitions file contents(yaml format) for create endpoint name grouping rules. Same as productAPI-v2.yaml    Dynamic Configuration Implementations  Dynamic Configuration Service, DCS Zookeeper Implementation Etcd Implementation Consul Implementation Apollo Implementation Kubernetes Configmap Implementation Nacos Implementation  ","title":"Dynamic Configuration","url":"/docs/main/v9.6.0/en/setup/backend/dynamic-config/"},{"content":"Dynamic Configuration SkyWalking Configurations are mostly set through application.yml and OS system environment variables.\nAt the same time, some of them support dynamic settings from an upstream management system.\nCurrently, SkyWalking supports two types of dynamic configurations: Single and Group.\nThis feature depends on upstream service, so it is DISABLED by default.\nconfiguration:selector:${SW_CONFIGURATION:none}none:grpc:host:${SW_DCS_SERVER_HOST:\u0026#34;\u0026#34;}port:${SW_DCS_SERVER_PORT:80}clusterName:${SW_DCS_CLUSTER_NAME:SkyWalking}period:${SW_DCS_PERIOD:20}# ... other implementationsSingle Configuration Single Configuration is a config key that corresponds to a specific config value. The logic structure is:\n{configKey}:{configValue} For example:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} Supported configurations are as follows:\n   Config Key Value Description Value Format Example     agent-analyzer.default.slowDBAccessThreshold Thresholds of slow Database statement. Overrides agent-analyzer/default/slowDBAccessThreshold of application.yml. default:200,mongodb:50   agent-analyzer.default.uninstrumentedGateways The uninstrumented gateways. Overrides gateways.yml. Same as gateways.yml.   alarm.default.alarm-settings The alarm settings. Overrides alarm-settings.yml. Same as alarm-settings.yml.   core.default.apdexThreshold The apdex threshold settings. Overrides service-apdex-threshold.yml. Same as service-apdex-threshold.yml.   core.default.endpoint-name-grouping The endpoint name grouping setting. Overrides endpoint-name-grouping.yml. Same as endpoint-name-grouping.yml.   core.default.log4j-xml The log4j xml configuration. Overrides log4j2.xml. Same as log4j2.xml.   core.default.searchableTracesTags The searchableTracesTags configuration. Override core/default/searchableTracesTags in the application.yml. http.method,http.status_code,rpc.status_code,db.type,db.instance,mq.queue,mq.topic,mq.broker   agent-analyzer.default.traceSamplingPolicy The sampling policy for default and service dimension, override trace-sampling-policy-settings.yml. same as trace-sampling-policy-settings.yml   configuration-discovery.default.agentConfigurations The ConfigurationDiscovery settings. See configuration-discovery.md.    Group Configuration Group Configuration is a config key corresponding to a group sub config item. A sub config item is a key-value pair. The logic structure is:\n{configKey}: |{subItemkey1}:{subItemValue1} |{subItemkey2}:{subItemValue2} |{subItemkey3}:{subItemValue3} ... For example:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} Supported configurations are as follows:\n   Config Key SubItem Key Description Value Description Value Format Example     core.default.endpoint-name-grouping-openapi The serviceName relevant to openAPI definition file. eg. serviceA. If the serviceName relevant to multiple files should add subItems for each files, and each subItem key should split serviceName and fileName with . eg. serviceA.API-file1,serviceA.API-file2 The openAPI definitions file contents(yaml format) for create endpoint name grouping rules. Same as productAPI-v2.yaml    Dynamic Configuration Implementations  Dynamic Configuration Service, DCS Zookeeper Implementation Etcd Implementation Consul Implementation Apollo Implementation Kubernetes Configmap Implementation Nacos Implementation  ","title":"Dynamic Configuration","url":"/docs/main/v9.7.0/en/setup/backend/dynamic-config/"},{"content":"Dynamic Configuration Apollo Implementation Apollo is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:apollo}apollo:apolloMeta:${SW_CONFIG_APOLLO:http://localhost:8080}apolloCluster:${SW_CONFIG_APOLLO_CLUSTER:default}apolloEnv:${SW_CONFIG_APOLLO_ENV:\u0026#34;\u0026#34;}appId:${SW_CONFIG_APOLLO_APP_ID:skywalking}Config Storage Single Config Single configs in Apollo are key/value pairs:\n   Key Value     configKey configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in Apollo is:\n   Key Value     agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in Apollo are key/value pairs as well, and the key is composited by configKey and subItemKey with ..\n   Key Value     configKey.subItemkey1 subItemValue1   configKey.subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config in Apollo is:\n   Key Value     core.default.endpoint-name-grouping-openapi.customerAPI-v1 value of customerAPI-v1   core.default.endpoint-name-grouping-openapi.productAPI-v1 value of productAPI-v1   core.default.endpoint-name-grouping-openapi.productAPI-v2 value of productAPI-v2    ","title":"Dynamic Configuration Apollo Implementation","url":"/docs/main/latest/en/setup/backend/dynamic-config-apollo/"},{"content":"Dynamic Configuration Apollo Implementation Apollo is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:apollo}apollo:apolloMeta:${SW_CONFIG_APOLLO:http://localhost:8080}apolloCluster:${SW_CONFIG_APOLLO_CLUSTER:default}apolloEnv:${SW_CONFIG_APOLLO_ENV:\u0026#34;\u0026#34;}appId:${SW_CONFIG_APOLLO_APP_ID:skywalking}Config Storage Single Config Single configs in Apollo are key/value pairs:\n   Key Value     configKey configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in Apollo is:\n   Key Value     agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in Apollo are key/value pairs as well, and the key is composited by configKey and subItemKey with ..\n   Key Value     configKey.subItemkey1 subItemValue1   configKey.subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config in Apollo is:\n   Key Value     core.default.endpoint-name-grouping-openapi.customerAPI-v1 value of customerAPI-v1   core.default.endpoint-name-grouping-openapi.productAPI-v1 value of productAPI-v1   core.default.endpoint-name-grouping-openapi.productAPI-v2 value of productAPI-v2    ","title":"Dynamic Configuration Apollo Implementation","url":"/docs/main/next/en/setup/backend/dynamic-config-apollo/"},{"content":"Dynamic Configuration Apollo Implementation Apollo is also supported as Dynamic Configuration Center (DCC). To use it, please configure as follows:\nconfiguration:selector:${SW_CONFIGURATION:apollo}apollo:apolloMeta:${SW_CONFIG_APOLLO:http://localhost:8080}apolloCluster:${SW_CONFIG_APOLLO_CLUSTER:default}apolloEnv:${SW_CONFIG_APOLLO_ENV:\u0026#34;\u0026#34;}appId:${SW_CONFIG_APOLLO_APP_ID:skywalking}period:${SW_CONFIG_APOLLO_PERIOD:60}Config Storage Single Config Single configs in apollo are key/value pairs:\n   Key Value     configKey configVaule    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in apollo is:\n   Key Value     agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in apollo are key/value pairs as well, and the key is composited by configKey and subItemKey with ..\n   Key Value     configKey.subItemkey1 subItemValue1   configKey.subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config in apollo is:\n   Key Value     core.default.endpoint-name-grouping-openapi.customerAPI-v1 value of customerAPI-v1   core.default.endpoint-name-grouping-openapi.productAPI-v1 value of productAPI-v1   core.default.endpoint-name-grouping-openapi.productAPI-v2 value of productAPI-v2    ","title":"Dynamic Configuration Apollo Implementation","url":"/docs/main/v9.0.0/en/setup/backend/dynamic-config-apollo/"},{"content":"Dynamic Configuration Apollo Implementation Apollo is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:apollo}apollo:apolloMeta:${SW_CONFIG_APOLLO:http://localhost:8080}apolloCluster:${SW_CONFIG_APOLLO_CLUSTER:default}apolloEnv:${SW_CONFIG_APOLLO_ENV:\u0026#34;\u0026#34;}appId:${SW_CONFIG_APOLLO_APP_ID:skywalking}period:${SW_CONFIG_APOLLO_PERIOD:60}Config Storage Single Config Single configs in Apollo are key/value pairs:\n   Key Value     configKey configVaule    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in Apollo is:\n   Key Value     agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in Apollo are key/value pairs as well, and the key is composited by configKey and subItemKey with ..\n   Key Value     configKey.subItemkey1 subItemValue1   configKey.subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config in Apollo is:\n   Key Value     core.default.endpoint-name-grouping-openapi.customerAPI-v1 value of customerAPI-v1   core.default.endpoint-name-grouping-openapi.productAPI-v1 value of productAPI-v1   core.default.endpoint-name-grouping-openapi.productAPI-v2 value of productAPI-v2    ","title":"Dynamic Configuration Apollo Implementation","url":"/docs/main/v9.1.0/en/setup/backend/dynamic-config-apollo/"},{"content":"Dynamic Configuration Apollo Implementation Apollo is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:apollo}apollo:apolloMeta:${SW_CONFIG_APOLLO:http://localhost:8080}apolloCluster:${SW_CONFIG_APOLLO_CLUSTER:default}apolloEnv:${SW_CONFIG_APOLLO_ENV:\u0026#34;\u0026#34;}appId:${SW_CONFIG_APOLLO_APP_ID:skywalking}period:${SW_CONFIG_APOLLO_PERIOD:60}Config Storage Single Config Single configs in Apollo are key/value pairs:\n   Key Value     configKey configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in Apollo is:\n   Key Value     agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in Apollo are key/value pairs as well, and the key is composited by configKey and subItemKey with ..\n   Key Value     configKey.subItemkey1 subItemValue1   configKey.subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config in Apollo is:\n   Key Value     core.default.endpoint-name-grouping-openapi.customerAPI-v1 value of customerAPI-v1   core.default.endpoint-name-grouping-openapi.productAPI-v1 value of productAPI-v1   core.default.endpoint-name-grouping-openapi.productAPI-v2 value of productAPI-v2    ","title":"Dynamic Configuration Apollo Implementation","url":"/docs/main/v9.2.0/en/setup/backend/dynamic-config-apollo/"},{"content":"Dynamic Configuration Apollo Implementation Apollo is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:apollo}apollo:apolloMeta:${SW_CONFIG_APOLLO:http://localhost:8080}apolloCluster:${SW_CONFIG_APOLLO_CLUSTER:default}apolloEnv:${SW_CONFIG_APOLLO_ENV:\u0026#34;\u0026#34;}appId:${SW_CONFIG_APOLLO_APP_ID:skywalking}period:${SW_CONFIG_APOLLO_PERIOD:60}Config Storage Single Config Single configs in Apollo are key/value pairs:\n   Key Value     configKey configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in Apollo is:\n   Key Value     agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in Apollo are key/value pairs as well, and the key is composited by configKey and subItemKey with ..\n   Key Value     configKey.subItemkey1 subItemValue1   configKey.subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config in Apollo is:\n   Key Value     core.default.endpoint-name-grouping-openapi.customerAPI-v1 value of customerAPI-v1   core.default.endpoint-name-grouping-openapi.productAPI-v1 value of productAPI-v1   core.default.endpoint-name-grouping-openapi.productAPI-v2 value of productAPI-v2    ","title":"Dynamic Configuration Apollo Implementation","url":"/docs/main/v9.3.0/en/setup/backend/dynamic-config-apollo/"},{"content":"Dynamic Configuration Apollo Implementation Apollo is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:apollo}apollo:apolloMeta:${SW_CONFIG_APOLLO:http://localhost:8080}apolloCluster:${SW_CONFIG_APOLLO_CLUSTER:default}apolloEnv:${SW_CONFIG_APOLLO_ENV:\u0026#34;\u0026#34;}appId:${SW_CONFIG_APOLLO_APP_ID:skywalking}period:${SW_CONFIG_APOLLO_PERIOD:60}Config Storage Single Config Single configs in Apollo are key/value pairs:\n   Key Value     configKey configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in Apollo is:\n   Key Value     agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in Apollo are key/value pairs as well, and the key is composited by configKey and subItemKey with ..\n   Key Value     configKey.subItemkey1 subItemValue1   configKey.subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config in Apollo is:\n   Key Value     core.default.endpoint-name-grouping-openapi.customerAPI-v1 value of customerAPI-v1   core.default.endpoint-name-grouping-openapi.productAPI-v1 value of productAPI-v1   core.default.endpoint-name-grouping-openapi.productAPI-v2 value of productAPI-v2    ","title":"Dynamic Configuration Apollo Implementation","url":"/docs/main/v9.4.0/en/setup/backend/dynamic-config-apollo/"},{"content":"Dynamic Configuration Apollo Implementation Apollo is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:apollo}apollo:apolloMeta:${SW_CONFIG_APOLLO:http://localhost:8080}apolloCluster:${SW_CONFIG_APOLLO_CLUSTER:default}apolloEnv:${SW_CONFIG_APOLLO_ENV:\u0026#34;\u0026#34;}appId:${SW_CONFIG_APOLLO_APP_ID:skywalking}period:${SW_CONFIG_APOLLO_PERIOD:60}Config Storage Single Config Single configs in Apollo are key/value pairs:\n   Key Value     configKey configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in Apollo is:\n   Key Value     agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in Apollo are key/value pairs as well, and the key is composited by configKey and subItemKey with ..\n   Key Value     configKey.subItemkey1 subItemValue1   configKey.subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config in Apollo is:\n   Key Value     core.default.endpoint-name-grouping-openapi.customerAPI-v1 value of customerAPI-v1   core.default.endpoint-name-grouping-openapi.productAPI-v1 value of productAPI-v1   core.default.endpoint-name-grouping-openapi.productAPI-v2 value of productAPI-v2    ","title":"Dynamic Configuration Apollo Implementation","url":"/docs/main/v9.5.0/en/setup/backend/dynamic-config-apollo/"},{"content":"Dynamic Configuration Apollo Implementation Apollo is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:apollo}apollo:apolloMeta:${SW_CONFIG_APOLLO:http://localhost:8080}apolloCluster:${SW_CONFIG_APOLLO_CLUSTER:default}apolloEnv:${SW_CONFIG_APOLLO_ENV:\u0026#34;\u0026#34;}appId:${SW_CONFIG_APOLLO_APP_ID:skywalking}Config Storage Single Config Single configs in Apollo are key/value pairs:\n   Key Value     configKey configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in Apollo is:\n   Key Value     agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in Apollo are key/value pairs as well, and the key is composited by configKey and subItemKey with ..\n   Key Value     configKey.subItemkey1 subItemValue1   configKey.subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config in Apollo is:\n   Key Value     core.default.endpoint-name-grouping-openapi.customerAPI-v1 value of customerAPI-v1   core.default.endpoint-name-grouping-openapi.productAPI-v1 value of productAPI-v1   core.default.endpoint-name-grouping-openapi.productAPI-v2 value of productAPI-v2    ","title":"Dynamic Configuration Apollo Implementation","url":"/docs/main/v9.6.0/en/setup/backend/dynamic-config-apollo/"},{"content":"Dynamic Configuration Apollo Implementation Apollo is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:apollo}apollo:apolloMeta:${SW_CONFIG_APOLLO:http://localhost:8080}apolloCluster:${SW_CONFIG_APOLLO_CLUSTER:default}apolloEnv:${SW_CONFIG_APOLLO_ENV:\u0026#34;\u0026#34;}appId:${SW_CONFIG_APOLLO_APP_ID:skywalking}Config Storage Single Config Single configs in Apollo are key/value pairs:\n   Key Value     configKey configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in Apollo is:\n   Key Value     agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in Apollo are key/value pairs as well, and the key is composited by configKey and subItemKey with ..\n   Key Value     configKey.subItemkey1 subItemValue1   configKey.subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config in Apollo is:\n   Key Value     core.default.endpoint-name-grouping-openapi.customerAPI-v1 value of customerAPI-v1   core.default.endpoint-name-grouping-openapi.productAPI-v1 value of productAPI-v1   core.default.endpoint-name-grouping-openapi.productAPI-v2 value of productAPI-v2    ","title":"Dynamic Configuration Apollo Implementation","url":"/docs/main/v9.7.0/en/setup/backend/dynamic-config-apollo/"},{"content":"Dynamic Configuration Consul Implementation Consul is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:consul}consul:# Consul host and ports, separated by comma, e.g. 1.2.3.4:8500,2.3.4.5:8500hostAndPorts:${SW_CONFIG_CONSUL_HOST_AND_PORTS:1.2.3.4:8500}# Sync period in seconds. Defaults to 60 seconds.period:${SW_CONFIG_CONSUL_PERIOD:1}# Consul aclTokenaclToken:${SW_CONFIG_CONSUL_ACL_TOKEN:\u0026#34;\u0026#34;}Config Storage Single Config Single configs in Consul are key/value pairs:\n   Key Value     configKey configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in Consul is:\n   Key Value     agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in Consul are key/value pairs as well, but according to the level keys organized by /.\n   Key Value     configKey/subItemkey1 subItemValue1   configKey/subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    If we use Consul UI, we can see keys organized like a folder:\nconfigKey -- subItemkey1 -- subItemkey2 ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config in Consul is:\n   Key Value     core.default.endpoint-name-grouping-openapi/customerAPI-v1 value of customerAPI-v1   core.default.endpoint-name-grouping-openapi/productAPI-v1 value of productAPI-v1   core.default.endpoint-name-grouping-openapi/productAPI-v2 value of productAPI-v2    ","title":"Dynamic Configuration Consul Implementation","url":"/docs/main/latest/en/setup/backend/dynamic-config-consul/"},{"content":"Dynamic Configuration Consul Implementation Consul is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:consul}consul:# Consul host and ports, separated by comma, e.g. 1.2.3.4:8500,2.3.4.5:8500hostAndPorts:${SW_CONFIG_CONSUL_HOST_AND_PORTS:1.2.3.4:8500}# Sync period in seconds. Defaults to 60 seconds.period:${SW_CONFIG_CONSUL_PERIOD:1}# Consul aclTokenaclToken:${SW_CONFIG_CONSUL_ACL_TOKEN:\u0026#34;\u0026#34;}Config Storage Single Config Single configs in Consul are key/value pairs:\n   Key Value     configKey configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in Consul is:\n   Key Value     agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in Consul are key/value pairs as well, but according to the level keys organized by /.\n   Key Value     configKey/subItemkey1 subItemValue1   configKey/subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    If we use Consul UI, we can see keys organized like a folder:\nconfigKey -- subItemkey1 -- subItemkey2 ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config in Consul is:\n   Key Value     core.default.endpoint-name-grouping-openapi/customerAPI-v1 value of customerAPI-v1   core.default.endpoint-name-grouping-openapi/productAPI-v1 value of productAPI-v1   core.default.endpoint-name-grouping-openapi/productAPI-v2 value of productAPI-v2    ","title":"Dynamic Configuration Consul Implementation","url":"/docs/main/next/en/setup/backend/dynamic-config-consul/"},{"content":"Dynamic Configuration Consul Implementation Consul is also supported as Dynamic Configuration Center (DCC). To use it, please configure as follows:\nconfiguration:selector:${SW_CONFIGURATION:consul}consul:# Consul host and ports, separated by comma, e.g. 1.2.3.4:8500,2.3.4.5:8500hostAndPorts:${SW_CONFIG_CONSUL_HOST_AND_PORTS:1.2.3.4:8500}# Sync period in seconds. Defaults to 60 seconds.period:${SW_CONFIG_CONSUL_PERIOD:1}# Consul aclTokenaclToken:${SW_CONFIG_CONSUL_ACL_TOKEN:\u0026#34;\u0026#34;}Config Storage Single Config Single configs in Consul are key/value pairs:\n   Key Value     configKey configVaule    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in Consul is:\n   Key Value     agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in Consul are key/value pairs as well, but according to the level keys organized by /, see: https://www.consul.io/docs/dynamic-app-config/kv#using-consul-kv\n   Key Value     configKey/subItemkey1 subItemValue1   configKey/subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    If use Consul UI we can see keys organized like folder:\nconfigKey -- subItemkey1 -- subItemkey2 ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config in Consul is:\n   Key Value     core.default.endpoint-name-grouping-openapi/customerAPI-v1 value of customerAPI-v1   core.default.endpoint-name-grouping-openapi/productAPI-v1 value of productAPI-v1   core.default.endpoint-name-grouping-openapi/productAPI-v2 value of productAPI-v2    ","title":"Dynamic Configuration Consul Implementation","url":"/docs/main/v9.0.0/en/setup/backend/dynamic-config-consul/"},{"content":"Dynamic Configuration Consul Implementation Consul is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:consul}consul:# Consul host and ports, separated by comma, e.g. 1.2.3.4:8500,2.3.4.5:8500hostAndPorts:${SW_CONFIG_CONSUL_HOST_AND_PORTS:1.2.3.4:8500}# Sync period in seconds. Defaults to 60 seconds.period:${SW_CONFIG_CONSUL_PERIOD:1}# Consul aclTokenaclToken:${SW_CONFIG_CONSUL_ACL_TOKEN:\u0026#34;\u0026#34;}Config Storage Single Config Single configs in Consul are key/value pairs:\n   Key Value     configKey configVaule    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in Consul is:\n   Key Value     agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in Consul are key/value pairs as well, but according to the level keys organized by /, see: https://www.consul.io/docs/dynamic-app-config/kv#using-consul-kv\n   Key Value     configKey/subItemkey1 subItemValue1   configKey/subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    If we use Consul UI, we can see keys organized like a folder:\nconfigKey -- subItemkey1 -- subItemkey2 ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config in Consul is:\n   Key Value     core.default.endpoint-name-grouping-openapi/customerAPI-v1 value of customerAPI-v1   core.default.endpoint-name-grouping-openapi/productAPI-v1 value of productAPI-v1   core.default.endpoint-name-grouping-openapi/productAPI-v2 value of productAPI-v2    ","title":"Dynamic Configuration Consul Implementation","url":"/docs/main/v9.1.0/en/setup/backend/dynamic-config-consul/"},{"content":"Dynamic Configuration Consul Implementation Consul is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:consul}consul:# Consul host and ports, separated by comma, e.g. 1.2.3.4:8500,2.3.4.5:8500hostAndPorts:${SW_CONFIG_CONSUL_HOST_AND_PORTS:1.2.3.4:8500}# Sync period in seconds. Defaults to 60 seconds.period:${SW_CONFIG_CONSUL_PERIOD:1}# Consul aclTokenaclToken:${SW_CONFIG_CONSUL_ACL_TOKEN:\u0026#34;\u0026#34;}Config Storage Single Config Single configs in Consul are key/value pairs:\n   Key Value     configKey configVaule    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in Consul is:\n   Key Value     agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in Consul are key/value pairs as well, but according to the level keys organized by /, see: https://www.consul.io/docs/dynamic-app-config/kv#using-consul-kv\n   Key Value     configKey/subItemkey1 subItemValue1   configKey/subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    If we use Consul UI, we can see keys organized like a folder:\nconfigKey -- subItemkey1 -- subItemkey2 ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config in Consul is:\n   Key Value     core.default.endpoint-name-grouping-openapi/customerAPI-v1 value of customerAPI-v1   core.default.endpoint-name-grouping-openapi/productAPI-v1 value of productAPI-v1   core.default.endpoint-name-grouping-openapi/productAPI-v2 value of productAPI-v2    ","title":"Dynamic Configuration Consul Implementation","url":"/docs/main/v9.2.0/en/setup/backend/dynamic-config-consul/"},{"content":"Dynamic Configuration Consul Implementation Consul is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:consul}consul:# Consul host and ports, separated by comma, e.g. 1.2.3.4:8500,2.3.4.5:8500hostAndPorts:${SW_CONFIG_CONSUL_HOST_AND_PORTS:1.2.3.4:8500}# Sync period in seconds. Defaults to 60 seconds.period:${SW_CONFIG_CONSUL_PERIOD:1}# Consul aclTokenaclToken:${SW_CONFIG_CONSUL_ACL_TOKEN:\u0026#34;\u0026#34;}Config Storage Single Config Single configs in Consul are key/value pairs:\n   Key Value     configKey configVaule    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in Consul is:\n   Key Value     agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in Consul are key/value pairs as well, but according to the level keys organized by /.\n   Key Value     configKey/subItemkey1 subItemValue1   configKey/subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    If we use Consul UI, we can see keys organized like a folder:\nconfigKey -- subItemkey1 -- subItemkey2 ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config in Consul is:\n   Key Value     core.default.endpoint-name-grouping-openapi/customerAPI-v1 value of customerAPI-v1   core.default.endpoint-name-grouping-openapi/productAPI-v1 value of productAPI-v1   core.default.endpoint-name-grouping-openapi/productAPI-v2 value of productAPI-v2    ","title":"Dynamic Configuration Consul Implementation","url":"/docs/main/v9.3.0/en/setup/backend/dynamic-config-consul/"},{"content":"Dynamic Configuration Consul Implementation Consul is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:consul}consul:# Consul host and ports, separated by comma, e.g. 1.2.3.4:8500,2.3.4.5:8500hostAndPorts:${SW_CONFIG_CONSUL_HOST_AND_PORTS:1.2.3.4:8500}# Sync period in seconds. Defaults to 60 seconds.period:${SW_CONFIG_CONSUL_PERIOD:1}# Consul aclTokenaclToken:${SW_CONFIG_CONSUL_ACL_TOKEN:\u0026#34;\u0026#34;}Config Storage Single Config Single configs in Consul are key/value pairs:\n   Key Value     configKey configVaule    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in Consul is:\n   Key Value     agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in Consul are key/value pairs as well, but according to the level keys organized by /.\n   Key Value     configKey/subItemkey1 subItemValue1   configKey/subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    If we use Consul UI, we can see keys organized like a folder:\nconfigKey -- subItemkey1 -- subItemkey2 ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config in Consul is:\n   Key Value     core.default.endpoint-name-grouping-openapi/customerAPI-v1 value of customerAPI-v1   core.default.endpoint-name-grouping-openapi/productAPI-v1 value of productAPI-v1   core.default.endpoint-name-grouping-openapi/productAPI-v2 value of productAPI-v2    ","title":"Dynamic Configuration Consul Implementation","url":"/docs/main/v9.4.0/en/setup/backend/dynamic-config-consul/"},{"content":"Dynamic Configuration Consul Implementation Consul is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:consul}consul:# Consul host and ports, separated by comma, e.g. 1.2.3.4:8500,2.3.4.5:8500hostAndPorts:${SW_CONFIG_CONSUL_HOST_AND_PORTS:1.2.3.4:8500}# Sync period in seconds. Defaults to 60 seconds.period:${SW_CONFIG_CONSUL_PERIOD:1}# Consul aclTokenaclToken:${SW_CONFIG_CONSUL_ACL_TOKEN:\u0026#34;\u0026#34;}Config Storage Single Config Single configs in Consul are key/value pairs:\n   Key Value     configKey configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in Consul is:\n   Key Value     agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in Consul are key/value pairs as well, but according to the level keys organized by /.\n   Key Value     configKey/subItemkey1 subItemValue1   configKey/subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    If we use Consul UI, we can see keys organized like a folder:\nconfigKey -- subItemkey1 -- subItemkey2 ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config in Consul is:\n   Key Value     core.default.endpoint-name-grouping-openapi/customerAPI-v1 value of customerAPI-v1   core.default.endpoint-name-grouping-openapi/productAPI-v1 value of productAPI-v1   core.default.endpoint-name-grouping-openapi/productAPI-v2 value of productAPI-v2    ","title":"Dynamic Configuration Consul Implementation","url":"/docs/main/v9.5.0/en/setup/backend/dynamic-config-consul/"},{"content":"Dynamic Configuration Consul Implementation Consul is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:consul}consul:# Consul host and ports, separated by comma, e.g. 1.2.3.4:8500,2.3.4.5:8500hostAndPorts:${SW_CONFIG_CONSUL_HOST_AND_PORTS:1.2.3.4:8500}# Sync period in seconds. Defaults to 60 seconds.period:${SW_CONFIG_CONSUL_PERIOD:1}# Consul aclTokenaclToken:${SW_CONFIG_CONSUL_ACL_TOKEN:\u0026#34;\u0026#34;}Config Storage Single Config Single configs in Consul are key/value pairs:\n   Key Value     configKey configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in Consul is:\n   Key Value     agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in Consul are key/value pairs as well, but according to the level keys organized by /.\n   Key Value     configKey/subItemkey1 subItemValue1   configKey/subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    If we use Consul UI, we can see keys organized like a folder:\nconfigKey -- subItemkey1 -- subItemkey2 ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config in Consul is:\n   Key Value     core.default.endpoint-name-grouping-openapi/customerAPI-v1 value of customerAPI-v1   core.default.endpoint-name-grouping-openapi/productAPI-v1 value of productAPI-v1   core.default.endpoint-name-grouping-openapi/productAPI-v2 value of productAPI-v2    ","title":"Dynamic Configuration Consul Implementation","url":"/docs/main/v9.6.0/en/setup/backend/dynamic-config-consul/"},{"content":"Dynamic Configuration Consul Implementation Consul is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:consul}consul:# Consul host and ports, separated by comma, e.g. 1.2.3.4:8500,2.3.4.5:8500hostAndPorts:${SW_CONFIG_CONSUL_HOST_AND_PORTS:1.2.3.4:8500}# Sync period in seconds. Defaults to 60 seconds.period:${SW_CONFIG_CONSUL_PERIOD:1}# Consul aclTokenaclToken:${SW_CONFIG_CONSUL_ACL_TOKEN:\u0026#34;\u0026#34;}Config Storage Single Config Single configs in Consul are key/value pairs:\n   Key Value     configKey configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in Consul is:\n   Key Value     agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in Consul are key/value pairs as well, but according to the level keys organized by /.\n   Key Value     configKey/subItemkey1 subItemValue1   configKey/subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    If we use Consul UI, we can see keys organized like a folder:\nconfigKey -- subItemkey1 -- subItemkey2 ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config in Consul is:\n   Key Value     core.default.endpoint-name-grouping-openapi/customerAPI-v1 value of customerAPI-v1   core.default.endpoint-name-grouping-openapi/productAPI-v1 value of productAPI-v1   core.default.endpoint-name-grouping-openapi/productAPI-v2 value of productAPI-v2    ","title":"Dynamic Configuration Consul Implementation","url":"/docs/main/v9.7.0/en/setup/backend/dynamic-config-consul/"},{"content":"Dynamic Configuration Etcd Implementation Etcd is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:etcd}etcd:period:${SW_CONFIG_ETCD_PERIOD:60}# Unit seconds, sync period. Default fetch every 60 seconds.endpoints:${SW_CONFIG_ETCD_ENDPOINTS:http://localhost:2379}namespace:${SW_CONFIG_ETCD_NAMESPACE:/skywalking}authentication:${SW_CONFIG_ETCD_AUTHENTICATION:false}user:${SW_CONFIG_ETCD_USER:}password:${SW_CONFIG_ETCD_password:}NOTE: Since 8.7.0, only the v3 protocol is supported.\nConfig Storage Single Config Single configs in etcd are key/value pairs:\n   Key Value     {namespace}/configKey configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If namespace = /skywalking the config in etcd is:\n   Key Value     /skywalking/agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in etcd are key/value pairs as well, and the key is composited by configKey and subItemKey with /.\n   Key Value     {namespace}/configKey/subItemkey1 subItemValue1   {namespace}/configKey/subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If namespace = /skywalking the config in etcd is:\n   Key Value     /skywalking/core.default.endpoint-name-grouping-openapi/customerAPI-v1 value of customerAPI-v1   /skywalking/core.default.endpoint-name-grouping-openapi/productAPI-v1 value of productAPI-v1   /skywalking/core.default.endpoint-name-grouping-openapi/productAPI-v2 value of productAPI-v2    ","title":"Dynamic Configuration Etcd Implementation","url":"/docs/main/latest/en/setup/backend/dynamic-config-etcd/"},{"content":"Dynamic Configuration Etcd Implementation Etcd is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:etcd}etcd:period:${SW_CONFIG_ETCD_PERIOD:60}# Unit seconds, sync period. Default fetch every 60 seconds.endpoints:${SW_CONFIG_ETCD_ENDPOINTS:http://localhost:2379}namespace:${SW_CONFIG_ETCD_NAMESPACE:/skywalking}authentication:${SW_CONFIG_ETCD_AUTHENTICATION:false}user:${SW_CONFIG_ETCD_USER:}password:${SW_CONFIG_ETCD_password:}NOTE: Since 8.7.0, only the v3 protocol is supported.\nConfig Storage Single Config Single configs in etcd are key/value pairs:\n   Key Value     {namespace}/configKey configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If namespace = /skywalking the config in etcd is:\n   Key Value     /skywalking/agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in etcd are key/value pairs as well, and the key is composited by configKey and subItemKey with /.\n   Key Value     {namespace}/configKey/subItemkey1 subItemValue1   {namespace}/configKey/subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If namespace = /skywalking the config in etcd is:\n   Key Value     /skywalking/core.default.endpoint-name-grouping-openapi/customerAPI-v1 value of customerAPI-v1   /skywalking/core.default.endpoint-name-grouping-openapi/productAPI-v1 value of productAPI-v1   /skywalking/core.default.endpoint-name-grouping-openapi/productAPI-v2 value of productAPI-v2    ","title":"Dynamic Configuration Etcd Implementation","url":"/docs/main/next/en/setup/backend/dynamic-config-etcd/"},{"content":"Dynamic Configuration Etcd Implementation Etcd is also supported as Dynamic Configuration Center (DCC). To use it, please configure as follows:\nconfiguration:selector:${SW_CONFIGURATION:etcd}etcd:period:${SW_CONFIG_ETCD_PERIOD:60}# Unit seconds, sync period. Default fetch every 60 seconds.endpoints:${SW_CONFIG_ETCD_ENDPOINTS:http://localhost:2379}namespace:${SW_CONFIG_ETCD_NAMESPACE:/skywalking}authentication:${SW_CONFIG_ETCD_AUTHENTICATION:false}user:${SW_CONFIG_ETCD_USER:}password:${SW_CONFIG_ETCD_password:}NOTE: Only the v3 protocol is supported since 8.7.0.\nConfig Storage Single Config Single configs in etcd are key/value pairs:\n   Key Value     {namespace}/configKey configVaule    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If namespace = /skywalking the config in etcd is:\n   Key Value     /skywalking/agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in etcd are key/value pairs as well and the key is composited by configKey and subItemKey with /.\n   Key Value     {namespace}/configKey/subItemkey1 subItemValue1   {namespace}/configKey/subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If namespace = /skywalking the config in etcd is:\n   Key Value     /skywalking/core.default.endpoint-name-grouping-openapi/customerAPI-v1 value of customerAPI-v1   /skywalking/core.default.endpoint-name-grouping-openapi/productAPI-v1 value of productAPI-v1   /skywalking/core.default.endpoint-name-grouping-openapi/productAPI-v2 value of productAPI-v2    ","title":"Dynamic Configuration Etcd Implementation","url":"/docs/main/v9.0.0/en/setup/backend/dynamic-config-etcd/"},{"content":"Dynamic Configuration Etcd Implementation Etcd is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:etcd}etcd:period:${SW_CONFIG_ETCD_PERIOD:60}# Unit seconds, sync period. Default fetch every 60 seconds.endpoints:${SW_CONFIG_ETCD_ENDPOINTS:http://localhost:2379}namespace:${SW_CONFIG_ETCD_NAMESPACE:/skywalking}authentication:${SW_CONFIG_ETCD_AUTHENTICATION:false}user:${SW_CONFIG_ETCD_USER:}password:${SW_CONFIG_ETCD_password:}NOTE: Since 8.7.0, only the v3 protocol is supported.\nConfig Storage Single Config Single configs in etcd are key/value pairs:\n   Key Value     {namespace}/configKey configVaule    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If namespace = /skywalking the config in etcd is:\n   Key Value     /skywalking/agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in etcd are key/value pairs as well, and the key is composited by configKey and subItemKey with /.\n   Key Value     {namespace}/configKey/subItemkey1 subItemValue1   {namespace}/configKey/subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If namespace = /skywalking the config in etcd is:\n   Key Value     /skywalking/core.default.endpoint-name-grouping-openapi/customerAPI-v1 value of customerAPI-v1   /skywalking/core.default.endpoint-name-grouping-openapi/productAPI-v1 value of productAPI-v1   /skywalking/core.default.endpoint-name-grouping-openapi/productAPI-v2 value of productAPI-v2    ","title":"Dynamic Configuration Etcd Implementation","url":"/docs/main/v9.1.0/en/setup/backend/dynamic-config-etcd/"},{"content":"Dynamic Configuration Etcd Implementation Etcd is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:etcd}etcd:period:${SW_CONFIG_ETCD_PERIOD:60}# Unit seconds, sync period. Default fetch every 60 seconds.endpoints:${SW_CONFIG_ETCD_ENDPOINTS:http://localhost:2379}namespace:${SW_CONFIG_ETCD_NAMESPACE:/skywalking}authentication:${SW_CONFIG_ETCD_AUTHENTICATION:false}user:${SW_CONFIG_ETCD_USER:}password:${SW_CONFIG_ETCD_password:}NOTE: Since 8.7.0, only the v3 protocol is supported.\nConfig Storage Single Config Single configs in etcd are key/value pairs:\n   Key Value     {namespace}/configKey configVaule    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If namespace = /skywalking the config in etcd is:\n   Key Value     /skywalking/agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in etcd are key/value pairs as well, and the key is composited by configKey and subItemKey with /.\n   Key Value     {namespace}/configKey/subItemkey1 subItemValue1   {namespace}/configKey/subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If namespace = /skywalking the config in etcd is:\n   Key Value     /skywalking/core.default.endpoint-name-grouping-openapi/customerAPI-v1 value of customerAPI-v1   /skywalking/core.default.endpoint-name-grouping-openapi/productAPI-v1 value of productAPI-v1   /skywalking/core.default.endpoint-name-grouping-openapi/productAPI-v2 value of productAPI-v2    ","title":"Dynamic Configuration Etcd Implementation","url":"/docs/main/v9.2.0/en/setup/backend/dynamic-config-etcd/"},{"content":"Dynamic Configuration Etcd Implementation Etcd is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:etcd}etcd:period:${SW_CONFIG_ETCD_PERIOD:60}# Unit seconds, sync period. Default fetch every 60 seconds.endpoints:${SW_CONFIG_ETCD_ENDPOINTS:http://localhost:2379}namespace:${SW_CONFIG_ETCD_NAMESPACE:/skywalking}authentication:${SW_CONFIG_ETCD_AUTHENTICATION:false}user:${SW_CONFIG_ETCD_USER:}password:${SW_CONFIG_ETCD_password:}NOTE: Since 8.7.0, only the v3 protocol is supported.\nConfig Storage Single Config Single configs in etcd are key/value pairs:\n   Key Value     {namespace}/configKey configVaule    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If namespace = /skywalking the config in etcd is:\n   Key Value     /skywalking/agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in etcd are key/value pairs as well, and the key is composited by configKey and subItemKey with /.\n   Key Value     {namespace}/configKey/subItemkey1 subItemValue1   {namespace}/configKey/subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If namespace = /skywalking the config in etcd is:\n   Key Value     /skywalking/core.default.endpoint-name-grouping-openapi/customerAPI-v1 value of customerAPI-v1   /skywalking/core.default.endpoint-name-grouping-openapi/productAPI-v1 value of productAPI-v1   /skywalking/core.default.endpoint-name-grouping-openapi/productAPI-v2 value of productAPI-v2    ","title":"Dynamic Configuration Etcd Implementation","url":"/docs/main/v9.3.0/en/setup/backend/dynamic-config-etcd/"},{"content":"Dynamic Configuration Etcd Implementation Etcd is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:etcd}etcd:period:${SW_CONFIG_ETCD_PERIOD:60}# Unit seconds, sync period. Default fetch every 60 seconds.endpoints:${SW_CONFIG_ETCD_ENDPOINTS:http://localhost:2379}namespace:${SW_CONFIG_ETCD_NAMESPACE:/skywalking}authentication:${SW_CONFIG_ETCD_AUTHENTICATION:false}user:${SW_CONFIG_ETCD_USER:}password:${SW_CONFIG_ETCD_password:}NOTE: Since 8.7.0, only the v3 protocol is supported.\nConfig Storage Single Config Single configs in etcd are key/value pairs:\n   Key Value     {namespace}/configKey configVaule    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If namespace = /skywalking the config in etcd is:\n   Key Value     /skywalking/agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in etcd are key/value pairs as well, and the key is composited by configKey and subItemKey with /.\n   Key Value     {namespace}/configKey/subItemkey1 subItemValue1   {namespace}/configKey/subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If namespace = /skywalking the config in etcd is:\n   Key Value     /skywalking/core.default.endpoint-name-grouping-openapi/customerAPI-v1 value of customerAPI-v1   /skywalking/core.default.endpoint-name-grouping-openapi/productAPI-v1 value of productAPI-v1   /skywalking/core.default.endpoint-name-grouping-openapi/productAPI-v2 value of productAPI-v2    ","title":"Dynamic Configuration Etcd Implementation","url":"/docs/main/v9.4.0/en/setup/backend/dynamic-config-etcd/"},{"content":"Dynamic Configuration Etcd Implementation Etcd is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:etcd}etcd:period:${SW_CONFIG_ETCD_PERIOD:60}# Unit seconds, sync period. Default fetch every 60 seconds.endpoints:${SW_CONFIG_ETCD_ENDPOINTS:http://localhost:2379}namespace:${SW_CONFIG_ETCD_NAMESPACE:/skywalking}authentication:${SW_CONFIG_ETCD_AUTHENTICATION:false}user:${SW_CONFIG_ETCD_USER:}password:${SW_CONFIG_ETCD_password:}NOTE: Since 8.7.0, only the v3 protocol is supported.\nConfig Storage Single Config Single configs in etcd are key/value pairs:\n   Key Value     {namespace}/configKey configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If namespace = /skywalking the config in etcd is:\n   Key Value     /skywalking/agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in etcd are key/value pairs as well, and the key is composited by configKey and subItemKey with /.\n   Key Value     {namespace}/configKey/subItemkey1 subItemValue1   {namespace}/configKey/subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If namespace = /skywalking the config in etcd is:\n   Key Value     /skywalking/core.default.endpoint-name-grouping-openapi/customerAPI-v1 value of customerAPI-v1   /skywalking/core.default.endpoint-name-grouping-openapi/productAPI-v1 value of productAPI-v1   /skywalking/core.default.endpoint-name-grouping-openapi/productAPI-v2 value of productAPI-v2    ","title":"Dynamic Configuration Etcd Implementation","url":"/docs/main/v9.5.0/en/setup/backend/dynamic-config-etcd/"},{"content":"Dynamic Configuration Etcd Implementation Etcd is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:etcd}etcd:period:${SW_CONFIG_ETCD_PERIOD:60}# Unit seconds, sync period. Default fetch every 60 seconds.endpoints:${SW_CONFIG_ETCD_ENDPOINTS:http://localhost:2379}namespace:${SW_CONFIG_ETCD_NAMESPACE:/skywalking}authentication:${SW_CONFIG_ETCD_AUTHENTICATION:false}user:${SW_CONFIG_ETCD_USER:}password:${SW_CONFIG_ETCD_password:}NOTE: Since 8.7.0, only the v3 protocol is supported.\nConfig Storage Single Config Single configs in etcd are key/value pairs:\n   Key Value     {namespace}/configKey configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If namespace = /skywalking the config in etcd is:\n   Key Value     /skywalking/agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in etcd are key/value pairs as well, and the key is composited by configKey and subItemKey with /.\n   Key Value     {namespace}/configKey/subItemkey1 subItemValue1   {namespace}/configKey/subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If namespace = /skywalking the config in etcd is:\n   Key Value     /skywalking/core.default.endpoint-name-grouping-openapi/customerAPI-v1 value of customerAPI-v1   /skywalking/core.default.endpoint-name-grouping-openapi/productAPI-v1 value of productAPI-v1   /skywalking/core.default.endpoint-name-grouping-openapi/productAPI-v2 value of productAPI-v2    ","title":"Dynamic Configuration Etcd Implementation","url":"/docs/main/v9.6.0/en/setup/backend/dynamic-config-etcd/"},{"content":"Dynamic Configuration Etcd Implementation Etcd is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:etcd}etcd:period:${SW_CONFIG_ETCD_PERIOD:60}# Unit seconds, sync period. Default fetch every 60 seconds.endpoints:${SW_CONFIG_ETCD_ENDPOINTS:http://localhost:2379}namespace:${SW_CONFIG_ETCD_NAMESPACE:/skywalking}authentication:${SW_CONFIG_ETCD_AUTHENTICATION:false}user:${SW_CONFIG_ETCD_USER:}password:${SW_CONFIG_ETCD_password:}NOTE: Since 8.7.0, only the v3 protocol is supported.\nConfig Storage Single Config Single configs in etcd are key/value pairs:\n   Key Value     {namespace}/configKey configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If namespace = /skywalking the config in etcd is:\n   Key Value     /skywalking/agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in etcd are key/value pairs as well, and the key is composited by configKey and subItemKey with /.\n   Key Value     {namespace}/configKey/subItemkey1 subItemValue1   {namespace}/configKey/subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If namespace = /skywalking the config in etcd is:\n   Key Value     /skywalking/core.default.endpoint-name-grouping-openapi/customerAPI-v1 value of customerAPI-v1   /skywalking/core.default.endpoint-name-grouping-openapi/productAPI-v1 value of productAPI-v1   /skywalking/core.default.endpoint-name-grouping-openapi/productAPI-v2 value of productAPI-v2    ","title":"Dynamic Configuration Etcd Implementation","url":"/docs/main/v9.7.0/en/setup/backend/dynamic-config-etcd/"},{"content":"Dynamic Configuration Kuberbetes Configmap Implementation configmap is also supported as Dynamic Configuration Center (DCC). To use it, please configure as follows:\nconfiguration:selector:${SW_CONFIGURATION:k8s-configmap}# [example] (../../../../oap-server/server-configuration/configuration-k8s-configmap/src/test/resources/skywalking-dynamic-configmap.example.yaml)k8s-configmap:# Sync period in seconds. Defaults to 60 seconds.period:${SW_CONFIG_CONFIGMAP_PERIOD:60}# Which namespace is configmap deployed in.namespace:${SW_CLUSTER_K8S_NAMESPACE:default}# Labelselector is used to locate specific configmaplabelSelector:${SW_CLUSTER_K8S_LABEL:app=collector,release=skywalking}{namespace} is the k8s namespace to which the configmap belongs. {labelSelector} is used to identify which configmaps would be selected.\ne.g. These 2 configmaps would be selected by the above config:\napiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: configKey1: configValue1 configKey2: configValue2 ... apiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config2 namespace: default labels: app: collector release: skywalking data: configKey3: configValue3 ... Config Storage The configs is configmap data items as the above example shows. we can organize the configs in 1 or more configmap files.\nSingle Config Under configmap.data:\n configKey: configValue e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in configmap is:\napiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: agent-analyzer.default.slowDBAccessThreshold: default:200,mongodb:50 Group Config The data key is composited by configKey and subItemKey to identify it is a group config:\nconfigKey.subItemKey1: subItemValue1 configKey.subItemKey2: subItemValue2 ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config can separate into 2 configmaps is:\napiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: core.default.endpoint-name-grouping-openapi.customerAPI-v1: value of customerAPI-v1 core.default.endpoint-name-grouping-openapi.productAPI-v1: value of productAPI-v1 apiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config2 namespace: default labels: app: collector release: skywalking data: core.default.endpoint-name-grouping-openapi.productAPI-v2: value of productAPI-v2 ","title":"Dynamic Configuration Kuberbetes Configmap Implementation","url":"/docs/main/v9.0.0/en/setup/backend/dynamic-config-configmap/"},{"content":"Dynamic Configuration Kuberbetes Configmap Implementation configmap is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:k8s-configmap}# [example] (../../../../oap-server/server-configuration/configuration-k8s-configmap/src/test/resources/skywalking-dynamic-configmap.example.yaml)k8s-configmap:# Sync period in seconds. Defaults to 60 seconds.period:${SW_CONFIG_CONFIGMAP_PERIOD:60}# Which namespace is configmap deployed in.namespace:${SW_CLUSTER_K8S_NAMESPACE:default}# Labelselector is used to locate specific configmaplabelSelector:${SW_CLUSTER_K8S_LABEL:app=collector,release=skywalking}{namespace} is the k8s namespace to which the configmap belongs. {labelSelector} is used to identify which configmaps would be selected.\ne.g. These 2 configmaps would be selected by the above config:\napiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: configKey1: configValue1 configKey2: configValue2 ... apiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config2 namespace: default labels: app: collector release: skywalking data: configKey3: configValue3 ... Config Storage The configs are configmap data items, as the above example shows. we can organize the configs in 1 or more configmap files.\nSingle Config Under configmap.data:\n configKey: configValue e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in configmap is:\napiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: agent-analyzer.default.slowDBAccessThreshold: default:200,mongodb:50 Group Config The data key is composited by configKey and subItemKey to identify it is a group config:\nconfigKey.subItemKey1: subItemValue1 configKey.subItemKey2: subItemValue2 ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config can separate into 2 configmaps is:\napiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: core.default.endpoint-name-grouping-openapi.customerAPI-v1: value of customerAPI-v1 core.default.endpoint-name-grouping-openapi.productAPI-v1: value of productAPI-v1 apiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config2 namespace: default labels: app: collector release: skywalking data: core.default.endpoint-name-grouping-openapi.productAPI-v2: value of productAPI-v2 ","title":"Dynamic Configuration Kuberbetes Configmap Implementation","url":"/docs/main/v9.1.0/en/setup/backend/dynamic-config-configmap/"},{"content":"Dynamic Configuration Kubernetes Configmap Implementation configmap is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:k8s-configmap}# [example] (../../../../oap-server/server-configuration/configuration-k8s-configmap/src/test/resources/skywalking-dynamic-configmap.example.yaml)k8s-configmap:# Sync period in seconds. Defaults to 60 seconds.period:${SW_CONFIG_CONFIGMAP_PERIOD:60}# Which namespace is configmap deployed in.namespace:${SW_CLUSTER_K8S_NAMESPACE:default}# Labelselector is used to locate specific configmaplabelSelector:${SW_CLUSTER_K8S_LABEL:app=collector,release=skywalking}{namespace} is the k8s namespace to which the configmap belongs. {labelSelector} is used to identify which configmaps would be selected.\ne.g. These 2 configmaps would be selected by the above config:\napiVersion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: configKey1: configValue1 configKey2: configValue2 ... apiVersion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config2 namespace: default labels: app: collector release: skywalking data: configKey3: configValue3 ... Config Storage The configs are configmap data items, as the above example shows. we can organize the configs in 1 or more configmap files.\nSingle Config Under configmap.data:\n configKey: configValue e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in configmap is:\napiVersion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: agent-analyzer.default.slowDBAccessThreshold: default:200,mongodb:50 Group Config The data key is composited by configKey and subItemKey to identify it is a group config:\nconfigKey.subItemKey1: subItemValue1 configKey.subItemKey2: subItemValue2 ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config can separate into 2 configmaps is:\napiVersion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: core.default.endpoint-name-grouping-openapi.customerAPI-v1: value of customerAPI-v1 core.default.endpoint-name-grouping-openapi.productAPI-v1: value of productAPI-v1 apiVersion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config2 namespace: default labels: app: collector release: skywalking data: core.default.endpoint-name-grouping-openapi.productAPI-v2: value of productAPI-v2 ","title":"Dynamic Configuration Kubernetes Configmap Implementation","url":"/docs/main/latest/en/setup/backend/dynamic-config-configmap/"},{"content":"Dynamic Configuration Kubernetes Configmap Implementation configmap is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:k8s-configmap}# [example] (../../../../oap-server/server-configuration/configuration-k8s-configmap/src/test/resources/skywalking-dynamic-configmap.example.yaml)k8s-configmap:# Sync period in seconds. Defaults to 60 seconds.period:${SW_CONFIG_CONFIGMAP_PERIOD:60}# Which namespace is configmap deployed in.namespace:${SW_CLUSTER_K8S_NAMESPACE:default}# Labelselector is used to locate specific configmaplabelSelector:${SW_CLUSTER_K8S_LABEL:app=collector,release=skywalking}{namespace} is the k8s namespace to which the configmap belongs. {labelSelector} is used to identify which configmaps would be selected.\ne.g. These 2 configmaps would be selected by the above config:\napiVersion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: configKey1: configValue1 configKey2: configValue2 ... apiVersion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config2 namespace: default labels: app: collector release: skywalking data: configKey3: configValue3 ... Config Storage The configs are configmap data items, as the above example shows. we can organize the configs in 1 or more configmap files.\nSingle Config Under configmap.data:\n configKey: configValue e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in configmap is:\napiVersion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: agent-analyzer.default.slowDBAccessThreshold: default:200,mongodb:50 Group Config The data key is composited by configKey and subItemKey to identify it is a group config:\nconfigKey.subItemKey1: subItemValue1 configKey.subItemKey2: subItemValue2 ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config can separate into 2 configmaps is:\napiVersion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: core.default.endpoint-name-grouping-openapi.customerAPI-v1: value of customerAPI-v1 core.default.endpoint-name-grouping-openapi.productAPI-v1: value of productAPI-v1 apiVersion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config2 namespace: default labels: app: collector release: skywalking data: core.default.endpoint-name-grouping-openapi.productAPI-v2: value of productAPI-v2 ","title":"Dynamic Configuration Kubernetes Configmap Implementation","url":"/docs/main/next/en/setup/backend/dynamic-config-configmap/"},{"content":"Dynamic Configuration Kubernetes Configmap Implementation configmap is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:k8s-configmap}# [example] (../../../../oap-server/server-configuration/configuration-k8s-configmap/src/test/resources/skywalking-dynamic-configmap.example.yaml)k8s-configmap:# Sync period in seconds. Defaults to 60 seconds.period:${SW_CONFIG_CONFIGMAP_PERIOD:60}# Which namespace is configmap deployed in.namespace:${SW_CLUSTER_K8S_NAMESPACE:default}# Labelselector is used to locate specific configmaplabelSelector:${SW_CLUSTER_K8S_LABEL:app=collector,release=skywalking}{namespace} is the k8s namespace to which the configmap belongs. {labelSelector} is used to identify which configmaps would be selected.\ne.g. These 2 configmaps would be selected by the above config:\napiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: configKey1: configValue1 configKey2: configValue2 ... apiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config2 namespace: default labels: app: collector release: skywalking data: configKey3: configValue3 ... Config Storage The configs are configmap data items, as the above example shows. we can organize the configs in 1 or more configmap files.\nSingle Config Under configmap.data:\n configKey: configValue e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in configmap is:\napiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: agent-analyzer.default.slowDBAccessThreshold: default:200,mongodb:50 Group Config The data key is composited by configKey and subItemKey to identify it is a group config:\nconfigKey.subItemKey1: subItemValue1 configKey.subItemKey2: subItemValue2 ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config can separate into 2 configmaps is:\napiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: core.default.endpoint-name-grouping-openapi.customerAPI-v1: value of customerAPI-v1 core.default.endpoint-name-grouping-openapi.productAPI-v1: value of productAPI-v1 apiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config2 namespace: default labels: app: collector release: skywalking data: core.default.endpoint-name-grouping-openapi.productAPI-v2: value of productAPI-v2 ","title":"Dynamic Configuration Kubernetes Configmap Implementation","url":"/docs/main/v9.2.0/en/setup/backend/dynamic-config-configmap/"},{"content":"Dynamic Configuration Kubernetes Configmap Implementation configmap is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:k8s-configmap}# [example] (../../../../oap-server/server-configuration/configuration-k8s-configmap/src/test/resources/skywalking-dynamic-configmap.example.yaml)k8s-configmap:# Sync period in seconds. Defaults to 60 seconds.period:${SW_CONFIG_CONFIGMAP_PERIOD:60}# Which namespace is configmap deployed in.namespace:${SW_CLUSTER_K8S_NAMESPACE:default}# Labelselector is used to locate specific configmaplabelSelector:${SW_CLUSTER_K8S_LABEL:app=collector,release=skywalking}{namespace} is the k8s namespace to which the configmap belongs. {labelSelector} is used to identify which configmaps would be selected.\ne.g. These 2 configmaps would be selected by the above config:\napiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: configKey1: configValue1 configKey2: configValue2 ... apiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config2 namespace: default labels: app: collector release: skywalking data: configKey3: configValue3 ... Config Storage The configs are configmap data items, as the above example shows. we can organize the configs in 1 or more configmap files.\nSingle Config Under configmap.data:\n configKey: configValue e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in configmap is:\napiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: agent-analyzer.default.slowDBAccessThreshold: default:200,mongodb:50 Group Config The data key is composited by configKey and subItemKey to identify it is a group config:\nconfigKey.subItemKey1: subItemValue1 configKey.subItemKey2: subItemValue2 ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config can separate into 2 configmaps is:\napiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: core.default.endpoint-name-grouping-openapi.customerAPI-v1: value of customerAPI-v1 core.default.endpoint-name-grouping-openapi.productAPI-v1: value of productAPI-v1 apiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config2 namespace: default labels: app: collector release: skywalking data: core.default.endpoint-name-grouping-openapi.productAPI-v2: value of productAPI-v2 ","title":"Dynamic Configuration Kubernetes Configmap Implementation","url":"/docs/main/v9.3.0/en/setup/backend/dynamic-config-configmap/"},{"content":"Dynamic Configuration Kubernetes Configmap Implementation configmap is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:k8s-configmap}# [example] (../../../../oap-server/server-configuration/configuration-k8s-configmap/src/test/resources/skywalking-dynamic-configmap.example.yaml)k8s-configmap:# Sync period in seconds. Defaults to 60 seconds.period:${SW_CONFIG_CONFIGMAP_PERIOD:60}# Which namespace is configmap deployed in.namespace:${SW_CLUSTER_K8S_NAMESPACE:default}# Labelselector is used to locate specific configmaplabelSelector:${SW_CLUSTER_K8S_LABEL:app=collector,release=skywalking}{namespace} is the k8s namespace to which the configmap belongs. {labelSelector} is used to identify which configmaps would be selected.\ne.g. These 2 configmaps would be selected by the above config:\napiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: configKey1: configValue1 configKey2: configValue2 ... apiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config2 namespace: default labels: app: collector release: skywalking data: configKey3: configValue3 ... Config Storage The configs are configmap data items, as the above example shows. we can organize the configs in 1 or more configmap files.\nSingle Config Under configmap.data:\n configKey: configValue e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in configmap is:\napiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: agent-analyzer.default.slowDBAccessThreshold: default:200,mongodb:50 Group Config The data key is composited by configKey and subItemKey to identify it is a group config:\nconfigKey.subItemKey1: subItemValue1 configKey.subItemKey2: subItemValue2 ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config can separate into 2 configmaps is:\napiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: core.default.endpoint-name-grouping-openapi.customerAPI-v1: value of customerAPI-v1 core.default.endpoint-name-grouping-openapi.productAPI-v1: value of productAPI-v1 apiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config2 namespace: default labels: app: collector release: skywalking data: core.default.endpoint-name-grouping-openapi.productAPI-v2: value of productAPI-v2 ","title":"Dynamic Configuration Kubernetes Configmap Implementation","url":"/docs/main/v9.4.0/en/setup/backend/dynamic-config-configmap/"},{"content":"Dynamic Configuration Kubernetes Configmap Implementation configmap is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:k8s-configmap}# [example] (../../../../oap-server/server-configuration/configuration-k8s-configmap/src/test/resources/skywalking-dynamic-configmap.example.yaml)k8s-configmap:# Sync period in seconds. Defaults to 60 seconds.period:${SW_CONFIG_CONFIGMAP_PERIOD:60}# Which namespace is configmap deployed in.namespace:${SW_CLUSTER_K8S_NAMESPACE:default}# Labelselector is used to locate specific configmaplabelSelector:${SW_CLUSTER_K8S_LABEL:app=collector,release=skywalking}{namespace} is the k8s namespace to which the configmap belongs. {labelSelector} is used to identify which configmaps would be selected.\ne.g. These 2 configmaps would be selected by the above config:\napiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: configKey1: configValue1 configKey2: configValue2 ... apiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config2 namespace: default labels: app: collector release: skywalking data: configKey3: configValue3 ... Config Storage The configs are configmap data items, as the above example shows. we can organize the configs in 1 or more configmap files.\nSingle Config Under configmap.data:\n configKey: configValue e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in configmap is:\napiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: agent-analyzer.default.slowDBAccessThreshold: default:200,mongodb:50 Group Config The data key is composited by configKey and subItemKey to identify it is a group config:\nconfigKey.subItemKey1: subItemValue1 configKey.subItemKey2: subItemValue2 ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config can separate into 2 configmaps is:\napiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: core.default.endpoint-name-grouping-openapi.customerAPI-v1: value of customerAPI-v1 core.default.endpoint-name-grouping-openapi.productAPI-v1: value of productAPI-v1 apiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config2 namespace: default labels: app: collector release: skywalking data: core.default.endpoint-name-grouping-openapi.productAPI-v2: value of productAPI-v2 ","title":"Dynamic Configuration Kubernetes Configmap Implementation","url":"/docs/main/v9.5.0/en/setup/backend/dynamic-config-configmap/"},{"content":"Dynamic Configuration Kubernetes Configmap Implementation configmap is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:k8s-configmap}# [example] (../../../../oap-server/server-configuration/configuration-k8s-configmap/src/test/resources/skywalking-dynamic-configmap.example.yaml)k8s-configmap:# Sync period in seconds. Defaults to 60 seconds.period:${SW_CONFIG_CONFIGMAP_PERIOD:60}# Which namespace is configmap deployed in.namespace:${SW_CLUSTER_K8S_NAMESPACE:default}# Labelselector is used to locate specific configmaplabelSelector:${SW_CLUSTER_K8S_LABEL:app=collector,release=skywalking}{namespace} is the k8s namespace to which the configmap belongs. {labelSelector} is used to identify which configmaps would be selected.\ne.g. These 2 configmaps would be selected by the above config:\napiVersion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: configKey1: configValue1 configKey2: configValue2 ... apiVersion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config2 namespace: default labels: app: collector release: skywalking data: configKey3: configValue3 ... Config Storage The configs are configmap data items, as the above example shows. we can organize the configs in 1 or more configmap files.\nSingle Config Under configmap.data:\n configKey: configValue e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in configmap is:\napiVersion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: agent-analyzer.default.slowDBAccessThreshold: default:200,mongodb:50 Group Config The data key is composited by configKey and subItemKey to identify it is a group config:\nconfigKey.subItemKey1: subItemValue1 configKey.subItemKey2: subItemValue2 ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config can separate into 2 configmaps is:\napiVersion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: core.default.endpoint-name-grouping-openapi.customerAPI-v1: value of customerAPI-v1 core.default.endpoint-name-grouping-openapi.productAPI-v1: value of productAPI-v1 apiVersion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config2 namespace: default labels: app: collector release: skywalking data: core.default.endpoint-name-grouping-openapi.productAPI-v2: value of productAPI-v2 ","title":"Dynamic Configuration Kubernetes Configmap Implementation","url":"/docs/main/v9.6.0/en/setup/backend/dynamic-config-configmap/"},{"content":"Dynamic Configuration Kubernetes Configmap Implementation configmap is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:k8s-configmap}# [example] (../../../../oap-server/server-configuration/configuration-k8s-configmap/src/test/resources/skywalking-dynamic-configmap.example.yaml)k8s-configmap:# Sync period in seconds. Defaults to 60 seconds.period:${SW_CONFIG_CONFIGMAP_PERIOD:60}# Which namespace is configmap deployed in.namespace:${SW_CLUSTER_K8S_NAMESPACE:default}# Labelselector is used to locate specific configmaplabelSelector:${SW_CLUSTER_K8S_LABEL:app=collector,release=skywalking}{namespace} is the k8s namespace to which the configmap belongs. {labelSelector} is used to identify which configmaps would be selected.\ne.g. These 2 configmaps would be selected by the above config:\napiVersion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: configKey1: configValue1 configKey2: configValue2 ... apiVersion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config2 namespace: default labels: app: collector release: skywalking data: configKey3: configValue3 ... Config Storage The configs are configmap data items, as the above example shows. we can organize the configs in 1 or more configmap files.\nSingle Config Under configmap.data:\n configKey: configValue e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in configmap is:\napiVersion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: agent-analyzer.default.slowDBAccessThreshold: default:200,mongodb:50 Group Config The data key is composited by configKey and subItemKey to identify it is a group config:\nconfigKey.subItemKey1: subItemValue1 configKey.subItemKey2: subItemValue2 ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config can separate into 2 configmaps is:\napiVersion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: core.default.endpoint-name-grouping-openapi.customerAPI-v1: value of customerAPI-v1 core.default.endpoint-name-grouping-openapi.productAPI-v1: value of productAPI-v1 apiVersion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config2 namespace: default labels: app: collector release: skywalking data: core.default.endpoint-name-grouping-openapi.productAPI-v2: value of productAPI-v2 ","title":"Dynamic Configuration Kubernetes Configmap Implementation","url":"/docs/main/v9.7.0/en/setup/backend/dynamic-config-configmap/"},{"content":"Dynamic Configuration Nacos Implementation Nacos is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:nacos}nacos:# Nacos Server HostserverAddr:${SW_CONFIG_NACOS_SERVER_ADDR:127.0.0.1}# Nacos Server Portport:${SW_CONFIG_NACOS_SERVER_PORT:8848}# Nacos Configuration Groupgroup:${SW_CONFIG_NACOS_SERVER_GROUP:skywalking}# Nacos Configuration namespacenamespace:${SW_CONFIG_NACOS_SERVER_NAMESPACE:}# Unit seconds, sync period. Default fetch every 60 seconds.period:${SW_CONFIG_NACOS_PERIOD:60}# the name of current cluster, set the name if you want to upstream system known.clusterName:${SW_CONFIG_NACOS_CLUSTER_NAME:default}Config Storage Single Config    Data Id Group Config Value     configKey {group} configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If group = skywalking, the config in Nacos is:\n   Data Id Group Config Value     agent-analyzer.default.slowDBAccessThreshold skywalking default:200,mongodb:50    Group Config    Data Id Group Config Value Config Type     configKey {group} subItemkey1subItemkey2\u0026hellip; TEXT   subItemkey1 {group} subItemValue1    subItemkey2 {group} subItemValue2    \u0026hellip; \u0026hellip; \u0026hellip;     Notice: If you add/remove a subItem, you need to add/remove the subItemKey from the group to which the subItem belongs:\n   Data Id Group Config Value Config Type     configKey {group} subItemkey1subItemkey2\u0026hellip; TEXT    We separate subItemkeys by \\n or \\r\\n, trim leading and trailing whitespace; if you set the config by Nacos UI, each subItemkey should be in a new line:\nsubItemValue1 subItemValue2 ... If you set the config by API, each subItemkey should be separated by \\n or \\r\\n:\nconfigService.publishConfig(\u0026quot;test-module.default.testKeyGroup\u0026quot;, \u0026quot;skywalking\u0026quot;, \u0026quot;subItemkey1\\n subItemkey2\u0026quot;)); e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If group = skywalking, the config in Nacos is:\n   Data Id Group Config Value Config Type     core.default.endpoint-name-grouping-openapi skywalking customerAPI-v1productAPI-v1productAPI-v2 TEXT   customerAPI-v1 skywalking value of customerAPI-v1    productAPI-v1 skywalking value of productAPI-v1    productAPI-v2 skywalking value of productAPI-v2     ","title":"Dynamic Configuration Nacos Implementation","url":"/docs/main/latest/en/setup/backend/dynamic-config-nacos/"},{"content":"Dynamic Configuration Nacos Implementation Nacos is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:nacos}nacos:# Nacos Server HostserverAddr:${SW_CONFIG_NACOS_SERVER_ADDR:127.0.0.1}# Nacos Server Portport:${SW_CONFIG_NACOS_SERVER_PORT:8848}# Nacos Configuration Groupgroup:${SW_CONFIG_NACOS_SERVER_GROUP:skywalking}# Nacos Configuration namespacenamespace:${SW_CONFIG_NACOS_SERVER_NAMESPACE:}# Unit seconds, sync period. Default fetch every 60 seconds.period:${SW_CONFIG_NACOS_PERIOD:60}# the name of current cluster, set the name if you want to upstream system known.clusterName:${SW_CONFIG_NACOS_CLUSTER_NAME:default}Config Storage Single Config    Data Id Group Config Value     configKey {group} configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If group = skywalking, the config in Nacos is:\n   Data Id Group Config Value     agent-analyzer.default.slowDBAccessThreshold skywalking default:200,mongodb:50    Group Config    Data Id Group Config Value Config Type     configKey {group} subItemkey1subItemkey2\u0026hellip; TEXT   subItemkey1 {group} subItemValue1    subItemkey2 {group} subItemValue2    \u0026hellip; \u0026hellip; \u0026hellip;     Notice: If you add/remove a subItem, you need to add/remove the subItemKey from the group to which the subItem belongs:\n   Data Id Group Config Value Config Type     configKey {group} subItemkey1subItemkey2\u0026hellip; TEXT    We separate subItemkeys by \\n or \\r\\n, trim leading and trailing whitespace; if you set the config by Nacos UI, each subItemkey should be in a new line:\nsubItemValue1 subItemValue2 ... If you set the config by API, each subItemkey should be separated by \\n or \\r\\n:\nconfigService.publishConfig(\u0026quot;test-module.default.testKeyGroup\u0026quot;, \u0026quot;skywalking\u0026quot;, \u0026quot;subItemkey1\\n subItemkey2\u0026quot;)); e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If group = skywalking, the config in Nacos is:\n   Data Id Group Config Value Config Type     core.default.endpoint-name-grouping-openapi skywalking customerAPI-v1productAPI-v1productAPI-v2 TEXT   customerAPI-v1 skywalking value of customerAPI-v1    productAPI-v1 skywalking value of productAPI-v1    productAPI-v2 skywalking value of productAPI-v2     ","title":"Dynamic Configuration Nacos Implementation","url":"/docs/main/next/en/setup/backend/dynamic-config-nacos/"},{"content":"Dynamic Configuration Nacos Implementation Nacos is also supported as Dynamic Configuration Center (DCC). To use it, please configure as follows:\nconfiguration:selector:${SW_CONFIGURATION:nacos}nacos:# Nacos Server HostserverAddr:${SW_CONFIG_NACOS_SERVER_ADDR:127.0.0.1}# Nacos Server Portport:${SW_CONFIG_NACOS_SERVER_PORT:8848}# Nacos Configuration Groupgroup:${SW_CONFIG_NACOS_SERVER_GROUP:skywalking}# Nacos Configuration namespacenamespace:${SW_CONFIG_NACOS_SERVER_NAMESPACE:}# Unit seconds, sync period. Default fetch every 60 seconds.period:${SW_CONFIG_NACOS_PERIOD:60}# the name of current cluster, set the name if you want to upstream system known.clusterName:${SW_CONFIG_NACOS_CLUSTER_NAME:default}Config Storage Single Config    Data Id Group Config Value     configKey {group} configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If group = skywalking the config in nacos is:\n   Data Id Group Config Value     agent-analyzer.default.slowDBAccessThreshold skywalking default:200,mongodb:50    Group Config    Data Id Group Config Value Config Type     configKey {group} subItemkey1subItemkey2\u0026hellip; TEXT   subItemkey1 {group} subItemValue1    subItemkey2 {group} subItemValue2    \u0026hellip; \u0026hellip; \u0026hellip;     Notice: If you add/remove a subItem, you need to add/remove the subItemKey from the group which the subItem belongs:\n   Data Id Group Config Value Config Type     configKey {group} subItemkey1subItemkey2\u0026hellip; TEXT    We separate subItemkeys by \\n or \\r\\n, trim leading and trailing whitespace, if you set the config by Nacos UI each subItemkey should in a new line:\nsubItemValue1 subItemValue2 ... If you set the config by API each subItemkey should separated by \\n or \\r\\n:\nconfigService.publishConfig(\u0026quot;test-module.default.testKeyGroup\u0026quot;, \u0026quot;skywalking\u0026quot;, \u0026quot;subItemkey1\\n subItemkey2\u0026quot;)); e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If group = skywalking the config in nacos is:\n   Data Id Group Config Value Config Type     core.default.endpoint-name-grouping-openapi skywalking customerAPI-v1productAPI-v1productAPI-v2 TEXT   customerAPI-v1 skywalking value of customerAPI-v1    productAPI-v1 skywalking value of productAPI-v1    productAPI-v2 skywalking value of productAPI-v2     ","title":"Dynamic Configuration Nacos Implementation","url":"/docs/main/v9.0.0/en/setup/backend/dynamic-config-nacos/"},{"content":"Dynamic Configuration Nacos Implementation Nacos is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:nacos}nacos:# Nacos Server HostserverAddr:${SW_CONFIG_NACOS_SERVER_ADDR:127.0.0.1}# Nacos Server Portport:${SW_CONFIG_NACOS_SERVER_PORT:8848}# Nacos Configuration Groupgroup:${SW_CONFIG_NACOS_SERVER_GROUP:skywalking}# Nacos Configuration namespacenamespace:${SW_CONFIG_NACOS_SERVER_NAMESPACE:}# Unit seconds, sync period. Default fetch every 60 seconds.period:${SW_CONFIG_NACOS_PERIOD:60}# the name of current cluster, set the name if you want to upstream system known.clusterName:${SW_CONFIG_NACOS_CLUSTER_NAME:default}Config Storage Single Config    Data Id Group Config Value     configKey {group} configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If group = skywalking, the config in Nacos is:\n   Data Id Group Config Value     agent-analyzer.default.slowDBAccessThreshold skywalking default:200,mongodb:50    Group Config    Data Id Group Config Value Config Type     configKey {group} subItemkey1subItemkey2\u0026hellip; TEXT   subItemkey1 {group} subItemValue1    subItemkey2 {group} subItemValue2    \u0026hellip; \u0026hellip; \u0026hellip;     Notice: If you add/remove a subItem, you need to add/remove the subItemKey from the group to which the subItem belongs:\n   Data Id Group Config Value Config Type     configKey {group} subItemkey1subItemkey2\u0026hellip; TEXT    We separate subItemkeys by \\n or \\r\\n, trim leading and trailing whitespace; if you set the config by Nacos UI, each subItemkey should be in a new line:\nsubItemValue1 subItemValue2 ... If you set the config by API, each subItemkey should be separated by \\n or \\r\\n:\nconfigService.publishConfig(\u0026quot;test-module.default.testKeyGroup\u0026quot;, \u0026quot;skywalking\u0026quot;, \u0026quot;subItemkey1\\n subItemkey2\u0026quot;)); e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If group = skywalking, the config in Nacos is:\n   Data Id Group Config Value Config Type     core.default.endpoint-name-grouping-openapi skywalking customerAPI-v1productAPI-v1productAPI-v2 TEXT   customerAPI-v1 skywalking value of customerAPI-v1    productAPI-v1 skywalking value of productAPI-v1    productAPI-v2 skywalking value of productAPI-v2     ","title":"Dynamic Configuration Nacos Implementation","url":"/docs/main/v9.1.0/en/setup/backend/dynamic-config-nacos/"},{"content":"Dynamic Configuration Nacos Implementation Nacos is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:nacos}nacos:# Nacos Server HostserverAddr:${SW_CONFIG_NACOS_SERVER_ADDR:127.0.0.1}# Nacos Server Portport:${SW_CONFIG_NACOS_SERVER_PORT:8848}# Nacos Configuration Groupgroup:${SW_CONFIG_NACOS_SERVER_GROUP:skywalking}# Nacos Configuration namespacenamespace:${SW_CONFIG_NACOS_SERVER_NAMESPACE:}# Unit seconds, sync period. Default fetch every 60 seconds.period:${SW_CONFIG_NACOS_PERIOD:60}# the name of current cluster, set the name if you want to upstream system known.clusterName:${SW_CONFIG_NACOS_CLUSTER_NAME:default}Config Storage Single Config    Data Id Group Config Value     configKey {group} configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If group = skywalking, the config in Nacos is:\n   Data Id Group Config Value     agent-analyzer.default.slowDBAccessThreshold skywalking default:200,mongodb:50    Group Config    Data Id Group Config Value Config Type     configKey {group} subItemkey1subItemkey2\u0026hellip; TEXT   subItemkey1 {group} subItemValue1    subItemkey2 {group} subItemValue2    \u0026hellip; \u0026hellip; \u0026hellip;     Notice: If you add/remove a subItem, you need to add/remove the subItemKey from the group to which the subItem belongs:\n   Data Id Group Config Value Config Type     configKey {group} subItemkey1subItemkey2\u0026hellip; TEXT    We separate subItemkeys by \\n or \\r\\n, trim leading and trailing whitespace; if you set the config by Nacos UI, each subItemkey should be in a new line:\nsubItemValue1 subItemValue2 ... If you set the config by API, each subItemkey should be separated by \\n or \\r\\n:\nconfigService.publishConfig(\u0026quot;test-module.default.testKeyGroup\u0026quot;, \u0026quot;skywalking\u0026quot;, \u0026quot;subItemkey1\\n subItemkey2\u0026quot;)); e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If group = skywalking, the config in Nacos is:\n   Data Id Group Config Value Config Type     core.default.endpoint-name-grouping-openapi skywalking customerAPI-v1productAPI-v1productAPI-v2 TEXT   customerAPI-v1 skywalking value of customerAPI-v1    productAPI-v1 skywalking value of productAPI-v1    productAPI-v2 skywalking value of productAPI-v2     ","title":"Dynamic Configuration Nacos Implementation","url":"/docs/main/v9.2.0/en/setup/backend/dynamic-config-nacos/"},{"content":"Dynamic Configuration Nacos Implementation Nacos is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:nacos}nacos:# Nacos Server HostserverAddr:${SW_CONFIG_NACOS_SERVER_ADDR:127.0.0.1}# Nacos Server Portport:${SW_CONFIG_NACOS_SERVER_PORT:8848}# Nacos Configuration Groupgroup:${SW_CONFIG_NACOS_SERVER_GROUP:skywalking}# Nacos Configuration namespacenamespace:${SW_CONFIG_NACOS_SERVER_NAMESPACE:}# Unit seconds, sync period. Default fetch every 60 seconds.period:${SW_CONFIG_NACOS_PERIOD:60}# the name of current cluster, set the name if you want to upstream system known.clusterName:${SW_CONFIG_NACOS_CLUSTER_NAME:default}Config Storage Single Config    Data Id Group Config Value     configKey {group} configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If group = skywalking, the config in Nacos is:\n   Data Id Group Config Value     agent-analyzer.default.slowDBAccessThreshold skywalking default:200,mongodb:50    Group Config    Data Id Group Config Value Config Type     configKey {group} subItemkey1subItemkey2\u0026hellip; TEXT   subItemkey1 {group} subItemValue1    subItemkey2 {group} subItemValue2    \u0026hellip; \u0026hellip; \u0026hellip;     Notice: If you add/remove a subItem, you need to add/remove the subItemKey from the group to which the subItem belongs:\n   Data Id Group Config Value Config Type     configKey {group} subItemkey1subItemkey2\u0026hellip; TEXT    We separate subItemkeys by \\n or \\r\\n, trim leading and trailing whitespace; if you set the config by Nacos UI, each subItemkey should be in a new line:\nsubItemValue1 subItemValue2 ... If you set the config by API, each subItemkey should be separated by \\n or \\r\\n:\nconfigService.publishConfig(\u0026quot;test-module.default.testKeyGroup\u0026quot;, \u0026quot;skywalking\u0026quot;, \u0026quot;subItemkey1\\n subItemkey2\u0026quot;)); e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If group = skywalking, the config in Nacos is:\n   Data Id Group Config Value Config Type     core.default.endpoint-name-grouping-openapi skywalking customerAPI-v1productAPI-v1productAPI-v2 TEXT   customerAPI-v1 skywalking value of customerAPI-v1    productAPI-v1 skywalking value of productAPI-v1    productAPI-v2 skywalking value of productAPI-v2     ","title":"Dynamic Configuration Nacos Implementation","url":"/docs/main/v9.3.0/en/setup/backend/dynamic-config-nacos/"},{"content":"Dynamic Configuration Nacos Implementation Nacos is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:nacos}nacos:# Nacos Server HostserverAddr:${SW_CONFIG_NACOS_SERVER_ADDR:127.0.0.1}# Nacos Server Portport:${SW_CONFIG_NACOS_SERVER_PORT:8848}# Nacos Configuration Groupgroup:${SW_CONFIG_NACOS_SERVER_GROUP:skywalking}# Nacos Configuration namespacenamespace:${SW_CONFIG_NACOS_SERVER_NAMESPACE:}# Unit seconds, sync period. Default fetch every 60 seconds.period:${SW_CONFIG_NACOS_PERIOD:60}# the name of current cluster, set the name if you want to upstream system known.clusterName:${SW_CONFIG_NACOS_CLUSTER_NAME:default}Config Storage Single Config    Data Id Group Config Value     configKey {group} configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If group = skywalking, the config in Nacos is:\n   Data Id Group Config Value     agent-analyzer.default.slowDBAccessThreshold skywalking default:200,mongodb:50    Group Config    Data Id Group Config Value Config Type     configKey {group} subItemkey1subItemkey2\u0026hellip; TEXT   subItemkey1 {group} subItemValue1    subItemkey2 {group} subItemValue2    \u0026hellip; \u0026hellip; \u0026hellip;     Notice: If you add/remove a subItem, you need to add/remove the subItemKey from the group to which the subItem belongs:\n   Data Id Group Config Value Config Type     configKey {group} subItemkey1subItemkey2\u0026hellip; TEXT    We separate subItemkeys by \\n or \\r\\n, trim leading and trailing whitespace; if you set the config by Nacos UI, each subItemkey should be in a new line:\nsubItemValue1 subItemValue2 ... If you set the config by API, each subItemkey should be separated by \\n or \\r\\n:\nconfigService.publishConfig(\u0026quot;test-module.default.testKeyGroup\u0026quot;, \u0026quot;skywalking\u0026quot;, \u0026quot;subItemkey1\\n subItemkey2\u0026quot;)); e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If group = skywalking, the config in Nacos is:\n   Data Id Group Config Value Config Type     core.default.endpoint-name-grouping-openapi skywalking customerAPI-v1productAPI-v1productAPI-v2 TEXT   customerAPI-v1 skywalking value of customerAPI-v1    productAPI-v1 skywalking value of productAPI-v1    productAPI-v2 skywalking value of productAPI-v2     ","title":"Dynamic Configuration Nacos Implementation","url":"/docs/main/v9.4.0/en/setup/backend/dynamic-config-nacos/"},{"content":"Dynamic Configuration Nacos Implementation Nacos is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:nacos}nacos:# Nacos Server HostserverAddr:${SW_CONFIG_NACOS_SERVER_ADDR:127.0.0.1}# Nacos Server Portport:${SW_CONFIG_NACOS_SERVER_PORT:8848}# Nacos Configuration Groupgroup:${SW_CONFIG_NACOS_SERVER_GROUP:skywalking}# Nacos Configuration namespacenamespace:${SW_CONFIG_NACOS_SERVER_NAMESPACE:}# Unit seconds, sync period. Default fetch every 60 seconds.period:${SW_CONFIG_NACOS_PERIOD:60}# the name of current cluster, set the name if you want to upstream system known.clusterName:${SW_CONFIG_NACOS_CLUSTER_NAME:default}Config Storage Single Config    Data Id Group Config Value     configKey {group} configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If group = skywalking, the config in Nacos is:\n   Data Id Group Config Value     agent-analyzer.default.slowDBAccessThreshold skywalking default:200,mongodb:50    Group Config    Data Id Group Config Value Config Type     configKey {group} subItemkey1subItemkey2\u0026hellip; TEXT   subItemkey1 {group} subItemValue1    subItemkey2 {group} subItemValue2    \u0026hellip; \u0026hellip; \u0026hellip;     Notice: If you add/remove a subItem, you need to add/remove the subItemKey from the group to which the subItem belongs:\n   Data Id Group Config Value Config Type     configKey {group} subItemkey1subItemkey2\u0026hellip; TEXT    We separate subItemkeys by \\n or \\r\\n, trim leading and trailing whitespace; if you set the config by Nacos UI, each subItemkey should be in a new line:\nsubItemValue1 subItemValue2 ... If you set the config by API, each subItemkey should be separated by \\n or \\r\\n:\nconfigService.publishConfig(\u0026quot;test-module.default.testKeyGroup\u0026quot;, \u0026quot;skywalking\u0026quot;, \u0026quot;subItemkey1\\n subItemkey2\u0026quot;)); e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If group = skywalking, the config in Nacos is:\n   Data Id Group Config Value Config Type     core.default.endpoint-name-grouping-openapi skywalking customerAPI-v1productAPI-v1productAPI-v2 TEXT   customerAPI-v1 skywalking value of customerAPI-v1    productAPI-v1 skywalking value of productAPI-v1    productAPI-v2 skywalking value of productAPI-v2     ","title":"Dynamic Configuration Nacos Implementation","url":"/docs/main/v9.5.0/en/setup/backend/dynamic-config-nacos/"},{"content":"Dynamic Configuration Nacos Implementation Nacos is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:nacos}nacos:# Nacos Server HostserverAddr:${SW_CONFIG_NACOS_SERVER_ADDR:127.0.0.1}# Nacos Server Portport:${SW_CONFIG_NACOS_SERVER_PORT:8848}# Nacos Configuration Groupgroup:${SW_CONFIG_NACOS_SERVER_GROUP:skywalking}# Nacos Configuration namespacenamespace:${SW_CONFIG_NACOS_SERVER_NAMESPACE:}# Unit seconds, sync period. Default fetch every 60 seconds.period:${SW_CONFIG_NACOS_PERIOD:60}# the name of current cluster, set the name if you want to upstream system known.clusterName:${SW_CONFIG_NACOS_CLUSTER_NAME:default}Config Storage Single Config    Data Id Group Config Value     configKey {group} configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If group = skywalking, the config in Nacos is:\n   Data Id Group Config Value     agent-analyzer.default.slowDBAccessThreshold skywalking default:200,mongodb:50    Group Config    Data Id Group Config Value Config Type     configKey {group} subItemkey1subItemkey2\u0026hellip; TEXT   subItemkey1 {group} subItemValue1    subItemkey2 {group} subItemValue2    \u0026hellip; \u0026hellip; \u0026hellip;     Notice: If you add/remove a subItem, you need to add/remove the subItemKey from the group to which the subItem belongs:\n   Data Id Group Config Value Config Type     configKey {group} subItemkey1subItemkey2\u0026hellip; TEXT    We separate subItemkeys by \\n or \\r\\n, trim leading and trailing whitespace; if you set the config by Nacos UI, each subItemkey should be in a new line:\nsubItemValue1 subItemValue2 ... If you set the config by API, each subItemkey should be separated by \\n or \\r\\n:\nconfigService.publishConfig(\u0026quot;test-module.default.testKeyGroup\u0026quot;, \u0026quot;skywalking\u0026quot;, \u0026quot;subItemkey1\\n subItemkey2\u0026quot;)); e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If group = skywalking, the config in Nacos is:\n   Data Id Group Config Value Config Type     core.default.endpoint-name-grouping-openapi skywalking customerAPI-v1productAPI-v1productAPI-v2 TEXT   customerAPI-v1 skywalking value of customerAPI-v1    productAPI-v1 skywalking value of productAPI-v1    productAPI-v2 skywalking value of productAPI-v2     ","title":"Dynamic Configuration Nacos Implementation","url":"/docs/main/v9.6.0/en/setup/backend/dynamic-config-nacos/"},{"content":"Dynamic Configuration Nacos Implementation Nacos is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:nacos}nacos:# Nacos Server HostserverAddr:${SW_CONFIG_NACOS_SERVER_ADDR:127.0.0.1}# Nacos Server Portport:${SW_CONFIG_NACOS_SERVER_PORT:8848}# Nacos Configuration Groupgroup:${SW_CONFIG_NACOS_SERVER_GROUP:skywalking}# Nacos Configuration namespacenamespace:${SW_CONFIG_NACOS_SERVER_NAMESPACE:}# Unit seconds, sync period. Default fetch every 60 seconds.period:${SW_CONFIG_NACOS_PERIOD:60}# the name of current cluster, set the name if you want to upstream system known.clusterName:${SW_CONFIG_NACOS_CLUSTER_NAME:default}Config Storage Single Config    Data Id Group Config Value     configKey {group} configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If group = skywalking, the config in Nacos is:\n   Data Id Group Config Value     agent-analyzer.default.slowDBAccessThreshold skywalking default:200,mongodb:50    Group Config    Data Id Group Config Value Config Type     configKey {group} subItemkey1subItemkey2\u0026hellip; TEXT   subItemkey1 {group} subItemValue1    subItemkey2 {group} subItemValue2    \u0026hellip; \u0026hellip; \u0026hellip;     Notice: If you add/remove a subItem, you need to add/remove the subItemKey from the group to which the subItem belongs:\n   Data Id Group Config Value Config Type     configKey {group} subItemkey1subItemkey2\u0026hellip; TEXT    We separate subItemkeys by \\n or \\r\\n, trim leading and trailing whitespace; if you set the config by Nacos UI, each subItemkey should be in a new line:\nsubItemValue1 subItemValue2 ... If you set the config by API, each subItemkey should be separated by \\n or \\r\\n:\nconfigService.publishConfig(\u0026quot;test-module.default.testKeyGroup\u0026quot;, \u0026quot;skywalking\u0026quot;, \u0026quot;subItemkey1\\n subItemkey2\u0026quot;)); e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If group = skywalking, the config in Nacos is:\n   Data Id Group Config Value Config Type     core.default.endpoint-name-grouping-openapi skywalking customerAPI-v1productAPI-v1productAPI-v2 TEXT   customerAPI-v1 skywalking value of customerAPI-v1    productAPI-v1 skywalking value of productAPI-v1    productAPI-v2 skywalking value of productAPI-v2     ","title":"Dynamic Configuration Nacos Implementation","url":"/docs/main/v9.7.0/en/setup/backend/dynamic-config-nacos/"},{"content":"Dynamic Configuration Service, DCS Dynamic Configuration Service is a gRPC service which requires implementation of the upstream system. The SkyWalking OAP fetches the configuration from the implementation (any system) after you open the implementation like this:\nconfiguration:selector:${SW_CONFIGURATION:grpc}grpc:host:${SW_DCS_SERVER_HOST:\u0026#34;\u0026#34;}port:${SW_DCS_SERVER_PORT:80}clusterName:${SW_DCS_CLUSTER_NAME:SkyWalking}period:${SW_DCS_PERIOD:20}Config Server Response uuid: To identify whether the config data changed, if uuid is the same, it is not required to respond to the config data.\nSingle Config Implement:\nrpc call (ConfigurationRequest) returns (ConfigurationResponse) { } e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The response configTable is:\nconfigTable { name: \u0026quot;agent-analyzer.default.slowDBAccessThreshold\u0026quot; value: \u0026quot;default:200,mongodb:50\u0026quot; } Group Config Implement:\nrpc callGroup (ConfigurationRequest) returns (GroupConfigurationResponse) {} Respond config data GroupConfigItems groupConfigTable\ne.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The response groupConfigTable is:\ngroupConfigTable { groupName: \u0026quot;core.default.endpoint-name-grouping-openapi\u0026quot; items { name: \u0026quot;customerAPI-v1\u0026quot; value: \u0026quot;value of customerAPI-v1\u0026quot; } items { name: \u0026quot;productAPI-v1\u0026quot; value: \u0026quot;value of productAPI-v1\u0026quot; } items { name: \u0026quot;productAPI-v2\u0026quot; value: \u0026quot;value of productAPI-v2\u0026quot; } } ","title":"Dynamic Configuration Service, DCS","url":"/docs/main/latest/en/setup/backend/dynamic-config-service/"},{"content":"Dynamic Configuration Service, DCS Dynamic Configuration Service is a gRPC service which requires implementation of the upstream system. The SkyWalking OAP fetches the configuration from the implementation (any system) after you open the implementation like this:\nconfiguration:selector:${SW_CONFIGURATION:grpc}grpc:host:${SW_DCS_SERVER_HOST:\u0026#34;\u0026#34;}port:${SW_DCS_SERVER_PORT:80}clusterName:${SW_DCS_CLUSTER_NAME:SkyWalking}period:${SW_DCS_PERIOD:20}Config Server Response uuid: To identify whether the config data changed, if uuid is the same, it is not required to respond to the config data.\nSingle Config Implement:\nrpc call (ConfigurationRequest) returns (ConfigurationResponse) { } e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The response configTable is:\nconfigTable { name: \u0026quot;agent-analyzer.default.slowDBAccessThreshold\u0026quot; value: \u0026quot;default:200,mongodb:50\u0026quot; } Group Config Implement:\nrpc callGroup (ConfigurationRequest) returns (GroupConfigurationResponse) {} Respond config data GroupConfigItems groupConfigTable\ne.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The response groupConfigTable is:\ngroupConfigTable { groupName: \u0026quot;core.default.endpoint-name-grouping-openapi\u0026quot; items { name: \u0026quot;customerAPI-v1\u0026quot; value: \u0026quot;value of customerAPI-v1\u0026quot; } items { name: \u0026quot;productAPI-v1\u0026quot; value: \u0026quot;value of productAPI-v1\u0026quot; } items { name: \u0026quot;productAPI-v2\u0026quot; value: \u0026quot;value of productAPI-v2\u0026quot; } } ","title":"Dynamic Configuration Service, DCS","url":"/docs/main/next/en/setup/backend/dynamic-config-service/"},{"content":"Dynamic Configuration Service, DCS Dynamic Configuration Service is a gRPC service which requires implementation of the upstream system. The SkyWalking OAP fetches the configuration from the implementation (any system), after you open the implementation like this:\nconfiguration:selector:${SW_CONFIGURATION:grpc}grpc:host:${SW_DCS_SERVER_HOST:\u0026#34;\u0026#34;}port:${SW_DCS_SERVER_PORT:80}clusterName:${SW_DCS_CLUSTER_NAME:SkyWalking}period:${SW_DCS_PERIOD:20}Config Server Response uuid: To identify whether the config data changed, if uuid is the same not required to respond the config data.\nSingle Config Implement:\nrpc call (ConfigurationRequest) returns (ConfigurationResponse) { } e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The response configTable is:\nconfigTable { name: \u0026quot;agent-analyzer.default.slowDBAccessThreshold\u0026quot; value: \u0026quot;default:200,mongodb:50\u0026quot; } Group Config Implement:\nrpc callGroup (ConfigurationRequest) returns (GroupConfigurationResponse) {} Respond config data GroupConfigItems groupConfigTable\ne.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The response groupConfigTable is:\ngroupConfigTable { groupName: \u0026quot;core.default.endpoint-name-grouping-openapi\u0026quot; items { name: \u0026quot;customerAPI-v1\u0026quot; value: \u0026quot;value of customerAPI-v1\u0026quot; } items { name: \u0026quot;productAPI-v1\u0026quot; value: \u0026quot;value of productAPI-v1\u0026quot; } items { name: \u0026quot;productAPI-v2\u0026quot; value: \u0026quot;value of productAPI-v2\u0026quot; } } ","title":"Dynamic Configuration Service, DCS","url":"/docs/main/v9.0.0/en/setup/backend/dynamic-config-service/"},{"content":"Dynamic Configuration Service, DCS Dynamic Configuration Service is a gRPC service which requires implementation of the upstream system. The SkyWalking OAP fetches the configuration from the implementation (any system) after you open the implementation like this:\nconfiguration:selector:${SW_CONFIGURATION:grpc}grpc:host:${SW_DCS_SERVER_HOST:\u0026#34;\u0026#34;}port:${SW_DCS_SERVER_PORT:80}clusterName:${SW_DCS_CLUSTER_NAME:SkyWalking}period:${SW_DCS_PERIOD:20}Config Server Response uuid: To identify whether the config data changed, if uuid is the same, it is not required to respond to the config data.\nSingle Config Implement:\nrpc call (ConfigurationRequest) returns (ConfigurationResponse) { } e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The response configTable is:\nconfigTable { name: \u0026quot;agent-analyzer.default.slowDBAccessThreshold\u0026quot; value: \u0026quot;default:200,mongodb:50\u0026quot; } Group Config Implement:\nrpc callGroup (ConfigurationRequest) returns (GroupConfigurationResponse) {} Respond config data GroupConfigItems groupConfigTable\ne.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The response groupConfigTable is:\ngroupConfigTable { groupName: \u0026quot;core.default.endpoint-name-grouping-openapi\u0026quot; items { name: \u0026quot;customerAPI-v1\u0026quot; value: \u0026quot;value of customerAPI-v1\u0026quot; } items { name: \u0026quot;productAPI-v1\u0026quot; value: \u0026quot;value of productAPI-v1\u0026quot; } items { name: \u0026quot;productAPI-v2\u0026quot; value: \u0026quot;value of productAPI-v2\u0026quot; } } ","title":"Dynamic Configuration Service, DCS","url":"/docs/main/v9.1.0/en/setup/backend/dynamic-config-service/"},{"content":"Dynamic Configuration Service, DCS Dynamic Configuration Service is a gRPC service which requires implementation of the upstream system. The SkyWalking OAP fetches the configuration from the implementation (any system) after you open the implementation like this:\nconfiguration:selector:${SW_CONFIGURATION:grpc}grpc:host:${SW_DCS_SERVER_HOST:\u0026#34;\u0026#34;}port:${SW_DCS_SERVER_PORT:80}clusterName:${SW_DCS_CLUSTER_NAME:SkyWalking}period:${SW_DCS_PERIOD:20}Config Server Response uuid: To identify whether the config data changed, if uuid is the same, it is not required to respond to the config data.\nSingle Config Implement:\nrpc call (ConfigurationRequest) returns (ConfigurationResponse) { } e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The response configTable is:\nconfigTable { name: \u0026quot;agent-analyzer.default.slowDBAccessThreshold\u0026quot; value: \u0026quot;default:200,mongodb:50\u0026quot; } Group Config Implement:\nrpc callGroup (ConfigurationRequest) returns (GroupConfigurationResponse) {} Respond config data GroupConfigItems groupConfigTable\ne.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The response groupConfigTable is:\ngroupConfigTable { groupName: \u0026quot;core.default.endpoint-name-grouping-openapi\u0026quot; items { name: \u0026quot;customerAPI-v1\u0026quot; value: \u0026quot;value of customerAPI-v1\u0026quot; } items { name: \u0026quot;productAPI-v1\u0026quot; value: \u0026quot;value of productAPI-v1\u0026quot; } items { name: \u0026quot;productAPI-v2\u0026quot; value: \u0026quot;value of productAPI-v2\u0026quot; } } ","title":"Dynamic Configuration Service, DCS","url":"/docs/main/v9.2.0/en/setup/backend/dynamic-config-service/"},{"content":"Dynamic Configuration Service, DCS Dynamic Configuration Service is a gRPC service which requires implementation of the upstream system. The SkyWalking OAP fetches the configuration from the implementation (any system) after you open the implementation like this:\nconfiguration:selector:${SW_CONFIGURATION:grpc}grpc:host:${SW_DCS_SERVER_HOST:\u0026#34;\u0026#34;}port:${SW_DCS_SERVER_PORT:80}clusterName:${SW_DCS_CLUSTER_NAME:SkyWalking}period:${SW_DCS_PERIOD:20}Config Server Response uuid: To identify whether the config data changed, if uuid is the same, it is not required to respond to the config data.\nSingle Config Implement:\nrpc call (ConfigurationRequest) returns (ConfigurationResponse) { } e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The response configTable is:\nconfigTable { name: \u0026quot;agent-analyzer.default.slowDBAccessThreshold\u0026quot; value: \u0026quot;default:200,mongodb:50\u0026quot; } Group Config Implement:\nrpc callGroup (ConfigurationRequest) returns (GroupConfigurationResponse) {} Respond config data GroupConfigItems groupConfigTable\ne.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The response groupConfigTable is:\ngroupConfigTable { groupName: \u0026quot;core.default.endpoint-name-grouping-openapi\u0026quot; items { name: \u0026quot;customerAPI-v1\u0026quot; value: \u0026quot;value of customerAPI-v1\u0026quot; } items { name: \u0026quot;productAPI-v1\u0026quot; value: \u0026quot;value of productAPI-v1\u0026quot; } items { name: \u0026quot;productAPI-v2\u0026quot; value: \u0026quot;value of productAPI-v2\u0026quot; } } ","title":"Dynamic Configuration Service, DCS","url":"/docs/main/v9.3.0/en/setup/backend/dynamic-config-service/"},{"content":"Dynamic Configuration Service, DCS Dynamic Configuration Service is a gRPC service which requires implementation of the upstream system. The SkyWalking OAP fetches the configuration from the implementation (any system) after you open the implementation like this:\nconfiguration:selector:${SW_CONFIGURATION:grpc}grpc:host:${SW_DCS_SERVER_HOST:\u0026#34;\u0026#34;}port:${SW_DCS_SERVER_PORT:80}clusterName:${SW_DCS_CLUSTER_NAME:SkyWalking}period:${SW_DCS_PERIOD:20}Config Server Response uuid: To identify whether the config data changed, if uuid is the same, it is not required to respond to the config data.\nSingle Config Implement:\nrpc call (ConfigurationRequest) returns (ConfigurationResponse) { } e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The response configTable is:\nconfigTable { name: \u0026quot;agent-analyzer.default.slowDBAccessThreshold\u0026quot; value: \u0026quot;default:200,mongodb:50\u0026quot; } Group Config Implement:\nrpc callGroup (ConfigurationRequest) returns (GroupConfigurationResponse) {} Respond config data GroupConfigItems groupConfigTable\ne.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The response groupConfigTable is:\ngroupConfigTable { groupName: \u0026quot;core.default.endpoint-name-grouping-openapi\u0026quot; items { name: \u0026quot;customerAPI-v1\u0026quot; value: \u0026quot;value of customerAPI-v1\u0026quot; } items { name: \u0026quot;productAPI-v1\u0026quot; value: \u0026quot;value of productAPI-v1\u0026quot; } items { name: \u0026quot;productAPI-v2\u0026quot; value: \u0026quot;value of productAPI-v2\u0026quot; } } ","title":"Dynamic Configuration Service, DCS","url":"/docs/main/v9.4.0/en/setup/backend/dynamic-config-service/"},{"content":"Dynamic Configuration Service, DCS Dynamic Configuration Service is a gRPC service which requires implementation of the upstream system. The SkyWalking OAP fetches the configuration from the implementation (any system) after you open the implementation like this:\nconfiguration:selector:${SW_CONFIGURATION:grpc}grpc:host:${SW_DCS_SERVER_HOST:\u0026#34;\u0026#34;}port:${SW_DCS_SERVER_PORT:80}clusterName:${SW_DCS_CLUSTER_NAME:SkyWalking}period:${SW_DCS_PERIOD:20}Config Server Response uuid: To identify whether the config data changed, if uuid is the same, it is not required to respond to the config data.\nSingle Config Implement:\nrpc call (ConfigurationRequest) returns (ConfigurationResponse) { } e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The response configTable is:\nconfigTable { name: \u0026quot;agent-analyzer.default.slowDBAccessThreshold\u0026quot; value: \u0026quot;default:200,mongodb:50\u0026quot; } Group Config Implement:\nrpc callGroup (ConfigurationRequest) returns (GroupConfigurationResponse) {} Respond config data GroupConfigItems groupConfigTable\ne.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The response groupConfigTable is:\ngroupConfigTable { groupName: \u0026quot;core.default.endpoint-name-grouping-openapi\u0026quot; items { name: \u0026quot;customerAPI-v1\u0026quot; value: \u0026quot;value of customerAPI-v1\u0026quot; } items { name: \u0026quot;productAPI-v1\u0026quot; value: \u0026quot;value of productAPI-v1\u0026quot; } items { name: \u0026quot;productAPI-v2\u0026quot; value: \u0026quot;value of productAPI-v2\u0026quot; } } ","title":"Dynamic Configuration Service, DCS","url":"/docs/main/v9.5.0/en/setup/backend/dynamic-config-service/"},{"content":"Dynamic Configuration Service, DCS Dynamic Configuration Service is a gRPC service which requires implementation of the upstream system. The SkyWalking OAP fetches the configuration from the implementation (any system) after you open the implementation like this:\nconfiguration:selector:${SW_CONFIGURATION:grpc}grpc:host:${SW_DCS_SERVER_HOST:\u0026#34;\u0026#34;}port:${SW_DCS_SERVER_PORT:80}clusterName:${SW_DCS_CLUSTER_NAME:SkyWalking}period:${SW_DCS_PERIOD:20}Config Server Response uuid: To identify whether the config data changed, if uuid is the same, it is not required to respond to the config data.\nSingle Config Implement:\nrpc call (ConfigurationRequest) returns (ConfigurationResponse) { } e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The response configTable is:\nconfigTable { name: \u0026quot;agent-analyzer.default.slowDBAccessThreshold\u0026quot; value: \u0026quot;default:200,mongodb:50\u0026quot; } Group Config Implement:\nrpc callGroup (ConfigurationRequest) returns (GroupConfigurationResponse) {} Respond config data GroupConfigItems groupConfigTable\ne.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The response groupConfigTable is:\ngroupConfigTable { groupName: \u0026quot;core.default.endpoint-name-grouping-openapi\u0026quot; items { name: \u0026quot;customerAPI-v1\u0026quot; value: \u0026quot;value of customerAPI-v1\u0026quot; } items { name: \u0026quot;productAPI-v1\u0026quot; value: \u0026quot;value of productAPI-v1\u0026quot; } items { name: \u0026quot;productAPI-v2\u0026quot; value: \u0026quot;value of productAPI-v2\u0026quot; } } ","title":"Dynamic Configuration Service, DCS","url":"/docs/main/v9.6.0/en/setup/backend/dynamic-config-service/"},{"content":"Dynamic Configuration Service, DCS Dynamic Configuration Service is a gRPC service which requires implementation of the upstream system. The SkyWalking OAP fetches the configuration from the implementation (any system) after you open the implementation like this:\nconfiguration:selector:${SW_CONFIGURATION:grpc}grpc:host:${SW_DCS_SERVER_HOST:\u0026#34;\u0026#34;}port:${SW_DCS_SERVER_PORT:80}clusterName:${SW_DCS_CLUSTER_NAME:SkyWalking}period:${SW_DCS_PERIOD:20}Config Server Response uuid: To identify whether the config data changed, if uuid is the same, it is not required to respond to the config data.\nSingle Config Implement:\nrpc call (ConfigurationRequest) returns (ConfigurationResponse) { } e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The response configTable is:\nconfigTable { name: \u0026quot;agent-analyzer.default.slowDBAccessThreshold\u0026quot; value: \u0026quot;default:200,mongodb:50\u0026quot; } Group Config Implement:\nrpc callGroup (ConfigurationRequest) returns (GroupConfigurationResponse) {} Respond config data GroupConfigItems groupConfigTable\ne.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The response groupConfigTable is:\ngroupConfigTable { groupName: \u0026quot;core.default.endpoint-name-grouping-openapi\u0026quot; items { name: \u0026quot;customerAPI-v1\u0026quot; value: \u0026quot;value of customerAPI-v1\u0026quot; } items { name: \u0026quot;productAPI-v1\u0026quot; value: \u0026quot;value of productAPI-v1\u0026quot; } items { name: \u0026quot;productAPI-v2\u0026quot; value: \u0026quot;value of productAPI-v2\u0026quot; } } ","title":"Dynamic Configuration Service, DCS","url":"/docs/main/v9.7.0/en/setup/backend/dynamic-config-service/"},{"content":"Dynamic Configuration Zookeeper Implementation Zookeeper is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:zookeeper}zookeeper:period:${SW_CONFIG_ZK_PERIOD:60}# Unit seconds, sync period. Default fetch every 60 seconds.namespace:${SW_CONFIG_ZK_NAMESPACE:/default}hostPort:${SW_CONFIG_ZK_HOST_PORT:localhost:2181}# Retry PolicybaseSleepTimeMs:${SW_CONFIG_ZK_BASE_SLEEP_TIME_MS:1000}# initial amount of time to wait between retriesmaxRetries:${SW_CONFIG_ZK_MAX_RETRIES:3}# max number of times to retryThe namespace is the ZooKeeper path. The config key and value are the properties of the namespace folder.\nConfig Storage Single Config znode.path = {namespace}/configKey configValue = znode.data e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If namespace = /default the config in zookeeper is:\nznode.path = /default/agent-analyzer.default.slowDBAccessThreshold znode.data = default:200,mongodb:50 Group Config znode.path = {namespace}/configKey znode.child1.path = {znode.path}/subItemkey1 znode.child2.path = {znode.path}/subItemkey2 ... subItemValue1 = znode.child1.data subItemValue2 = znode.child2.data ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If namespace = /default the config in zookeeper is:\nznode.path = /default/core.default.endpoint-name-grouping-openapi znode.customerAPI-v1.path = /default/core.default.endpoint-name-grouping-openapi/customerAPI-v1 znode.productAPI-v1.path = /default/core.default.endpoint-name-grouping-openapi/productAPI-v1 znode.productAPI-v2.path = /default/core.default.endpoint-name-grouping-openapi/productAPI-v2 znode.customerAPI-v1.data = value of customerAPI-v1 znode.productAPI-v1.data = value of productAPI-v1 znode.productAPI-v2.data = value of productAPI-v2 ","title":"Dynamic Configuration Zookeeper Implementation","url":"/docs/main/latest/en/setup/backend/dynamic-config-zookeeper/"},{"content":"Dynamic Configuration Zookeeper Implementation Zookeeper is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:zookeeper}zookeeper:period:${SW_CONFIG_ZK_PERIOD:60}# Unit seconds, sync period. Default fetch every 60 seconds.namespace:${SW_CONFIG_ZK_NAMESPACE:/default}hostPort:${SW_CONFIG_ZK_HOST_PORT:localhost:2181}# Retry PolicybaseSleepTimeMs:${SW_CONFIG_ZK_BASE_SLEEP_TIME_MS:1000}# initial amount of time to wait between retriesmaxRetries:${SW_CONFIG_ZK_MAX_RETRIES:3}# max number of times to retryThe namespace is the ZooKeeper path. The config key and value are the properties of the namespace folder.\nConfig Storage Single Config znode.path = {namespace}/configKey configValue = znode.data e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If namespace = /default the config in zookeeper is:\nznode.path = /default/agent-analyzer.default.slowDBAccessThreshold znode.data = default:200,mongodb:50 Group Config znode.path = {namespace}/configKey znode.child1.path = {znode.path}/subItemkey1 znode.child2.path = {znode.path}/subItemkey2 ... subItemValue1 = znode.child1.data subItemValue2 = znode.child2.data ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If namespace = /default the config in zookeeper is:\nznode.path = /default/core.default.endpoint-name-grouping-openapi znode.customerAPI-v1.path = /default/core.default.endpoint-name-grouping-openapi/customerAPI-v1 znode.productAPI-v1.path = /default/core.default.endpoint-name-grouping-openapi/productAPI-v1 znode.productAPI-v2.path = /default/core.default.endpoint-name-grouping-openapi/productAPI-v2 znode.customerAPI-v1.data = value of customerAPI-v1 znode.productAPI-v1.data = value of productAPI-v1 znode.productAPI-v2.data = value of productAPI-v2 ","title":"Dynamic Configuration Zookeeper Implementation","url":"/docs/main/next/en/setup/backend/dynamic-config-zookeeper/"},{"content":"Dynamic Configuration Zookeeper Implementation Zookeeper is also supported as Dynamic Configuration Center (DCC). To use it, please configure as follows:\nconfiguration:selector:${SW_CONFIGURATION:zookeeper}zookeeper:period:${SW_CONFIG_ZK_PERIOD:60}# Unit seconds, sync period. Default fetch every 60 seconds.namespace:${SW_CONFIG_ZK_NAMESPACE:/default}hostPort:${SW_CONFIG_ZK_HOST_PORT:localhost:2181}# Retry PolicybaseSleepTimeMs:${SW_CONFIG_ZK_BASE_SLEEP_TIME_MS:1000}# initial amount of time to wait between retriesmaxRetries:${SW_CONFIG_ZK_MAX_RETRIES:3}# max number of times to retryThe namespace is the ZooKeeper path. The config key and value are the properties of the namespace folder.\nConfig Storage Single Config znode.path = {namespace}/configKey configValue = znode.data e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If namespace = /default the config in zookeeper is:\nznode.path = /default/agent-analyzer.default.slowDBAccessThreshold znode.data = default:200,mongodb:50 Group Config znode.path = {namespace}/configKey znode.child1.path = {znode.path}/subItemkey1 znode.child2.path = {znode.path}/subItemkey2 ... subItemValue1 = znode.child1.data subItemValue2 = znode.child2.data ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If namespace = /default the config in zookeeper is:\nznode.path = /default/core.default.endpoint-name-grouping-openapi znode.customerAPI-v1.path = /default/core.default.endpoint-name-grouping-openapi/customerAPI-v1 znode.productAPI-v1.path = /default/core.default.endpoint-name-grouping-openapi/productAPI-v1 znode.productAPI-v2.path = /default/core.default.endpoint-name-grouping-openapi/productAPI-v2 znode.customerAPI-v1.data = value of customerAPI-v1 znode.productAPI-v1.data = value of productAPI-v1 znode.productAPI-v2.data = value of productAPI-v2 ","title":"Dynamic Configuration Zookeeper Implementation","url":"/docs/main/v9.0.0/en/setup/backend/dynamic-config-zookeeper/"},{"content":"Dynamic Configuration Zookeeper Implementation Zookeeper is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:zookeeper}zookeeper:period:${SW_CONFIG_ZK_PERIOD:60}# Unit seconds, sync period. Default fetch every 60 seconds.namespace:${SW_CONFIG_ZK_NAMESPACE:/default}hostPort:${SW_CONFIG_ZK_HOST_PORT:localhost:2181}# Retry PolicybaseSleepTimeMs:${SW_CONFIG_ZK_BASE_SLEEP_TIME_MS:1000}# initial amount of time to wait between retriesmaxRetries:${SW_CONFIG_ZK_MAX_RETRIES:3}# max number of times to retryThe namespace is the ZooKeeper path. The config key and value are the properties of the namespace folder.\nConfig Storage Single Config znode.path = {namespace}/configKey configValue = znode.data e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If namespace = /default the config in zookeeper is:\nznode.path = /default/agent-analyzer.default.slowDBAccessThreshold znode.data = default:200,mongodb:50 Group Config znode.path = {namespace}/configKey znode.child1.path = {znode.path}/subItemkey1 znode.child2.path = {znode.path}/subItemkey2 ... subItemValue1 = znode.child1.data subItemValue2 = znode.child2.data ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If namespace = /default the config in zookeeper is:\nznode.path = /default/core.default.endpoint-name-grouping-openapi znode.customerAPI-v1.path = /default/core.default.endpoint-name-grouping-openapi/customerAPI-v1 znode.productAPI-v1.path = /default/core.default.endpoint-name-grouping-openapi/productAPI-v1 znode.productAPI-v2.path = /default/core.default.endpoint-name-grouping-openapi/productAPI-v2 znode.customerAPI-v1.data = value of customerAPI-v1 znode.productAPI-v1.data = value of productAPI-v1 znode.productAPI-v2.data = value of productAPI-v2 ","title":"Dynamic Configuration Zookeeper Implementation","url":"/docs/main/v9.1.0/en/setup/backend/dynamic-config-zookeeper/"},{"content":"Dynamic Configuration Zookeeper Implementation Zookeeper is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:zookeeper}zookeeper:period:${SW_CONFIG_ZK_PERIOD:60}# Unit seconds, sync period. Default fetch every 60 seconds.namespace:${SW_CONFIG_ZK_NAMESPACE:/default}hostPort:${SW_CONFIG_ZK_HOST_PORT:localhost:2181}# Retry PolicybaseSleepTimeMs:${SW_CONFIG_ZK_BASE_SLEEP_TIME_MS:1000}# initial amount of time to wait between retriesmaxRetries:${SW_CONFIG_ZK_MAX_RETRIES:3}# max number of times to retryThe namespace is the ZooKeeper path. The config key and value are the properties of the namespace folder.\nConfig Storage Single Config znode.path = {namespace}/configKey configValue = znode.data e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If namespace = /default the config in zookeeper is:\nznode.path = /default/agent-analyzer.default.slowDBAccessThreshold znode.data = default:200,mongodb:50 Group Config znode.path = {namespace}/configKey znode.child1.path = {znode.path}/subItemkey1 znode.child2.path = {znode.path}/subItemkey2 ... subItemValue1 = znode.child1.data subItemValue2 = znode.child2.data ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If namespace = /default the config in zookeeper is:\nznode.path = /default/core.default.endpoint-name-grouping-openapi znode.customerAPI-v1.path = /default/core.default.endpoint-name-grouping-openapi/customerAPI-v1 znode.productAPI-v1.path = /default/core.default.endpoint-name-grouping-openapi/productAPI-v1 znode.productAPI-v2.path = /default/core.default.endpoint-name-grouping-openapi/productAPI-v2 znode.customerAPI-v1.data = value of customerAPI-v1 znode.productAPI-v1.data = value of productAPI-v1 znode.productAPI-v2.data = value of productAPI-v2 ","title":"Dynamic Configuration Zookeeper Implementation","url":"/docs/main/v9.2.0/en/setup/backend/dynamic-config-zookeeper/"},{"content":"Dynamic Configuration Zookeeper Implementation Zookeeper is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:zookeeper}zookeeper:period:${SW_CONFIG_ZK_PERIOD:60}# Unit seconds, sync period. Default fetch every 60 seconds.namespace:${SW_CONFIG_ZK_NAMESPACE:/default}hostPort:${SW_CONFIG_ZK_HOST_PORT:localhost:2181}# Retry PolicybaseSleepTimeMs:${SW_CONFIG_ZK_BASE_SLEEP_TIME_MS:1000}# initial amount of time to wait between retriesmaxRetries:${SW_CONFIG_ZK_MAX_RETRIES:3}# max number of times to retryThe namespace is the ZooKeeper path. The config key and value are the properties of the namespace folder.\nConfig Storage Single Config znode.path = {namespace}/configKey configValue = znode.data e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If namespace = /default the config in zookeeper is:\nznode.path = /default/agent-analyzer.default.slowDBAccessThreshold znode.data = default:200,mongodb:50 Group Config znode.path = {namespace}/configKey znode.child1.path = {znode.path}/subItemkey1 znode.child2.path = {znode.path}/subItemkey2 ... subItemValue1 = znode.child1.data subItemValue2 = znode.child2.data ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If namespace = /default the config in zookeeper is:\nznode.path = /default/core.default.endpoint-name-grouping-openapi znode.customerAPI-v1.path = /default/core.default.endpoint-name-grouping-openapi/customerAPI-v1 znode.productAPI-v1.path = /default/core.default.endpoint-name-grouping-openapi/productAPI-v1 znode.productAPI-v2.path = /default/core.default.endpoint-name-grouping-openapi/productAPI-v2 znode.customerAPI-v1.data = value of customerAPI-v1 znode.productAPI-v1.data = value of productAPI-v1 znode.productAPI-v2.data = value of productAPI-v2 ","title":"Dynamic Configuration Zookeeper Implementation","url":"/docs/main/v9.3.0/en/setup/backend/dynamic-config-zookeeper/"},{"content":"Dynamic Configuration Zookeeper Implementation Zookeeper is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:zookeeper}zookeeper:period:${SW_CONFIG_ZK_PERIOD:60}# Unit seconds, sync period. Default fetch every 60 seconds.namespace:${SW_CONFIG_ZK_NAMESPACE:/default}hostPort:${SW_CONFIG_ZK_HOST_PORT:localhost:2181}# Retry PolicybaseSleepTimeMs:${SW_CONFIG_ZK_BASE_SLEEP_TIME_MS:1000}# initial amount of time to wait between retriesmaxRetries:${SW_CONFIG_ZK_MAX_RETRIES:3}# max number of times to retryThe namespace is the ZooKeeper path. The config key and value are the properties of the namespace folder.\nConfig Storage Single Config znode.path = {namespace}/configKey configValue = znode.data e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If namespace = /default the config in zookeeper is:\nznode.path = /default/agent-analyzer.default.slowDBAccessThreshold znode.data = default:200,mongodb:50 Group Config znode.path = {namespace}/configKey znode.child1.path = {znode.path}/subItemkey1 znode.child2.path = {znode.path}/subItemkey2 ... subItemValue1 = znode.child1.data subItemValue2 = znode.child2.data ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If namespace = /default the config in zookeeper is:\nznode.path = /default/core.default.endpoint-name-grouping-openapi znode.customerAPI-v1.path = /default/core.default.endpoint-name-grouping-openapi/customerAPI-v1 znode.productAPI-v1.path = /default/core.default.endpoint-name-grouping-openapi/productAPI-v1 znode.productAPI-v2.path = /default/core.default.endpoint-name-grouping-openapi/productAPI-v2 znode.customerAPI-v1.data = value of customerAPI-v1 znode.productAPI-v1.data = value of productAPI-v1 znode.productAPI-v2.data = value of productAPI-v2 ","title":"Dynamic Configuration Zookeeper Implementation","url":"/docs/main/v9.4.0/en/setup/backend/dynamic-config-zookeeper/"},{"content":"Dynamic Configuration Zookeeper Implementation Zookeeper is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:zookeeper}zookeeper:period:${SW_CONFIG_ZK_PERIOD:60}# Unit seconds, sync period. Default fetch every 60 seconds.namespace:${SW_CONFIG_ZK_NAMESPACE:/default}hostPort:${SW_CONFIG_ZK_HOST_PORT:localhost:2181}# Retry PolicybaseSleepTimeMs:${SW_CONFIG_ZK_BASE_SLEEP_TIME_MS:1000}# initial amount of time to wait between retriesmaxRetries:${SW_CONFIG_ZK_MAX_RETRIES:3}# max number of times to retryThe namespace is the ZooKeeper path. The config key and value are the properties of the namespace folder.\nConfig Storage Single Config znode.path = {namespace}/configKey configValue = znode.data e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If namespace = /default the config in zookeeper is:\nznode.path = /default/agent-analyzer.default.slowDBAccessThreshold znode.data = default:200,mongodb:50 Group Config znode.path = {namespace}/configKey znode.child1.path = {znode.path}/subItemkey1 znode.child2.path = {znode.path}/subItemkey2 ... subItemValue1 = znode.child1.data subItemValue2 = znode.child2.data ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If namespace = /default the config in zookeeper is:\nznode.path = /default/core.default.endpoint-name-grouping-openapi znode.customerAPI-v1.path = /default/core.default.endpoint-name-grouping-openapi/customerAPI-v1 znode.productAPI-v1.path = /default/core.default.endpoint-name-grouping-openapi/productAPI-v1 znode.productAPI-v2.path = /default/core.default.endpoint-name-grouping-openapi/productAPI-v2 znode.customerAPI-v1.data = value of customerAPI-v1 znode.productAPI-v1.data = value of productAPI-v1 znode.productAPI-v2.data = value of productAPI-v2 ","title":"Dynamic Configuration Zookeeper Implementation","url":"/docs/main/v9.5.0/en/setup/backend/dynamic-config-zookeeper/"},{"content":"Dynamic Configuration Zookeeper Implementation Zookeeper is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:zookeeper}zookeeper:period:${SW_CONFIG_ZK_PERIOD:60}# Unit seconds, sync period. Default fetch every 60 seconds.namespace:${SW_CONFIG_ZK_NAMESPACE:/default}hostPort:${SW_CONFIG_ZK_HOST_PORT:localhost:2181}# Retry PolicybaseSleepTimeMs:${SW_CONFIG_ZK_BASE_SLEEP_TIME_MS:1000}# initial amount of time to wait between retriesmaxRetries:${SW_CONFIG_ZK_MAX_RETRIES:3}# max number of times to retryThe namespace is the ZooKeeper path. The config key and value are the properties of the namespace folder.\nConfig Storage Single Config znode.path = {namespace}/configKey configValue = znode.data e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If namespace = /default the config in zookeeper is:\nznode.path = /default/agent-analyzer.default.slowDBAccessThreshold znode.data = default:200,mongodb:50 Group Config znode.path = {namespace}/configKey znode.child1.path = {znode.path}/subItemkey1 znode.child2.path = {znode.path}/subItemkey2 ... subItemValue1 = znode.child1.data subItemValue2 = znode.child2.data ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If namespace = /default the config in zookeeper is:\nznode.path = /default/core.default.endpoint-name-grouping-openapi znode.customerAPI-v1.path = /default/core.default.endpoint-name-grouping-openapi/customerAPI-v1 znode.productAPI-v1.path = /default/core.default.endpoint-name-grouping-openapi/productAPI-v1 znode.productAPI-v2.path = /default/core.default.endpoint-name-grouping-openapi/productAPI-v2 znode.customerAPI-v1.data = value of customerAPI-v1 znode.productAPI-v1.data = value of productAPI-v1 znode.productAPI-v2.data = value of productAPI-v2 ","title":"Dynamic Configuration Zookeeper Implementation","url":"/docs/main/v9.6.0/en/setup/backend/dynamic-config-zookeeper/"},{"content":"Dynamic Configuration Zookeeper Implementation Zookeeper is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:zookeeper}zookeeper:period:${SW_CONFIG_ZK_PERIOD:60}# Unit seconds, sync period. Default fetch every 60 seconds.namespace:${SW_CONFIG_ZK_NAMESPACE:/default}hostPort:${SW_CONFIG_ZK_HOST_PORT:localhost:2181}# Retry PolicybaseSleepTimeMs:${SW_CONFIG_ZK_BASE_SLEEP_TIME_MS:1000}# initial amount of time to wait between retriesmaxRetries:${SW_CONFIG_ZK_MAX_RETRIES:3}# max number of times to retryThe namespace is the ZooKeeper path. The config key and value are the properties of the namespace folder.\nConfig Storage Single Config znode.path = {namespace}/configKey configValue = znode.data e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If namespace = /default the config in zookeeper is:\nznode.path = /default/agent-analyzer.default.slowDBAccessThreshold znode.data = default:200,mongodb:50 Group Config znode.path = {namespace}/configKey znode.child1.path = {znode.path}/subItemkey1 znode.child2.path = {znode.path}/subItemkey2 ... subItemValue1 = znode.child1.data subItemValue2 = znode.child2.data ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If namespace = /default the config in zookeeper is:\nznode.path = /default/core.default.endpoint-name-grouping-openapi znode.customerAPI-v1.path = /default/core.default.endpoint-name-grouping-openapi/customerAPI-v1 znode.productAPI-v1.path = /default/core.default.endpoint-name-grouping-openapi/productAPI-v1 znode.productAPI-v2.path = /default/core.default.endpoint-name-grouping-openapi/productAPI-v2 znode.customerAPI-v1.data = value of customerAPI-v1 znode.productAPI-v1.data = value of productAPI-v1 znode.productAPI-v2.data = value of productAPI-v2 ","title":"Dynamic Configuration Zookeeper Implementation","url":"/docs/main/v9.7.0/en/setup/backend/dynamic-config-zookeeper/"},{"content":"Dynamical Logging The OAP server leverages log4j2 to manage the logging system. log4j2 supports changing the configuration at runtime, but you have to manually update the XML configuration file, which could be time-consuming and prone to man-made mistakes.\nDynamical logging, which depends on dynamic configuration, provides us with an agile way to update all OAP log4j configurations through a single operation.\nThe key of the configuration item is core.default.log4j-xml, and you can select any of the configuration implements to store the content of log4j.xml. In the booting phase, once the core module gets started, core.default.log4j-xml would come into the OAP log4j context.\nIf the configuration is changed after the OAP startup, you have to wait for a while for the changes to be applied. The default value is 60 seconds, which you could change through configuration.\u0026lt;configuration implement\u0026gt;.period in application.yaml.\nIf you remove core.default.log4j-xml from the configuration center or disable the configuration module, log4j.xml in the config directory would be affected.\n Caveat: The OAP only supports the XML configuration format.\n This is an example of configuring dynamical logging through a ConfigMap in a Kubernetes cluster. You may set up other configuration clusters following the same procedures.\napiVersion:v1data:core.default.log4j-xml:|-\u0026lt;Configuration status=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout charset=\u0026#34;UTF-8\u0026#34; pattern=\u0026#34;%d - %c - %L [%t] %-5p %x - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;logger name=\u0026#34;io.grpc.netty\u0026#34; level=\u0026#34;INFO\u0026#34;/\u0026gt; \u0026lt;logger name=\u0026#34;org.apache.skywalking.oap.server.configuration.api\u0026#34; level=\u0026#34;TRACE\u0026#34;/\u0026gt; \u0026lt;logger name=\u0026#34;org.apache.skywalking.oap.server.configuration.configmap\u0026#34; level=\u0026#34;DEBUG\u0026#34;/\u0026gt; \u0026lt;Root level=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Console\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;kind:ConfigMapmetadata:labels:app:collectorrelease:skywalkingname:skywalking-oapnamespace:default","title":"Dynamical Logging","url":"/docs/main/latest/en/setup/backend/dynamical-logging/"},{"content":"Dynamical Logging The OAP server leverages log4j2 to manage the logging system. log4j2 supports changing the configuration at runtime, but you have to manually update the XML configuration file, which could be time-consuming and prone to man-made mistakes.\nDynamical logging, which depends on dynamic configuration, provides us with an agile way to update all OAP log4j configurations through a single operation.\nThe key of the configuration item is core.default.log4j-xml, and you can select any of the configuration implements to store the content of log4j.xml. In the booting phase, once the core module gets started, core.default.log4j-xml would come into the OAP log4j context.\nIf the configuration is changed after the OAP startup, you have to wait for a while for the changes to be applied. The default value is 60 seconds, which you could change through configuration.\u0026lt;configuration implement\u0026gt;.period in application.yaml.\nIf you remove core.default.log4j-xml from the configuration center or disable the configuration module, log4j.xml in the config directory would be affected.\n Caveat: The OAP only supports the XML configuration format.\n This is an example of configuring dynamical logging through a ConfigMap in a Kubernetes cluster. You may set up other configuration clusters following the same procedures.\napiVersion:v1data:core.default.log4j-xml:|-\u0026lt;Configuration status=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout charset=\u0026#34;UTF-8\u0026#34; pattern=\u0026#34;%d - %c - %L [%t] %-5p %x - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;logger name=\u0026#34;io.grpc.netty\u0026#34; level=\u0026#34;INFO\u0026#34;/\u0026gt; \u0026lt;logger name=\u0026#34;org.apache.skywalking.oap.server.configuration.api\u0026#34; level=\u0026#34;TRACE\u0026#34;/\u0026gt; \u0026lt;logger name=\u0026#34;org.apache.skywalking.oap.server.configuration.configmap\u0026#34; level=\u0026#34;DEBUG\u0026#34;/\u0026gt; \u0026lt;Root level=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Console\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;kind:ConfigMapmetadata:labels:app:collectorrelease:skywalkingname:skywalking-oapnamespace:default","title":"Dynamical Logging","url":"/docs/main/next/en/setup/backend/dynamical-logging/"},{"content":"Dynamical Logging The OAP server leverages log4j2 to manage the logging system. log4j2 supports changing the configuration at runtime, but you have to update the XML configuration file manually, which could be time-consuming and prone to manmade mistakes.\nDynamical logging, which depends on dynamic configuration, provides us with an agile way to update all OAP log4j configurations through a single operation.\nThe key of the configuration item is core.default.log4j-xml, and you can select any of the configuration implements to store the content of log4j.xml. In the booting phase, once the core module gets started, core.default.log4j-xml would come into the OAP log4j context.\nIf the configuration is changed after the OAP has started, you have to wait for a while for the changes to be applied. The default value is 60 seconds, which you could change through configuration.\u0026lt;configuration implement\u0026gt;.peroid in application.yaml.\nIf you remove core.default.log4j-xml from the configuration center or disable the configuration module, log4j.xml in the config directory would be affected.\n Caveat: The OAP only supports the XML configuration format.\n This is an example on how to config dynamical logging through a ConfigMap in a Kubernetes cluster. You may set up other configuration clusters following the same procedures.\napiVersion:v1data:core.default.log4j-xml:|-\u0026lt;Configuration status=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout charset=\u0026#34;UTF-8\u0026#34; pattern=\u0026#34;%d - %c - %L [%t] %-5p %x - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;logger name=\u0026#34;io.grpc.netty\u0026#34; level=\u0026#34;INFO\u0026#34;/\u0026gt; \u0026lt;logger name=\u0026#34;org.apache.skywalking.oap.server.configuration.api\u0026#34; level=\u0026#34;TRACE\u0026#34;/\u0026gt; \u0026lt;logger name=\u0026#34;org.apache.skywalking.oap.server.configuration.configmap\u0026#34; level=\u0026#34;DEBUG\u0026#34;/\u0026gt; \u0026lt;Root level=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Console\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;kind:ConfigMapmetadata:labels:app:collectorrelease:skywalkingname:skywalking-oapnamespace:default","title":"Dynamical Logging","url":"/docs/main/v9.0.0/en/setup/backend/dynamical-logging/"},{"content":"Dynamical Logging The OAP server leverages log4j2 to manage the logging system. log4j2 supports changing the configuration at runtime, but you have to manually update the XML configuration file, which could be time-consuming and prone to man-made mistakes.\nDynamical logging, which depends on dynamic configuration, provides us with an agile way to update all OAP log4j configurations through a single operation.\nThe key of the configuration item is core.default.log4j-xml, and you can select any of the configuration implements to store the content of log4j.xml. In the booting phase, once the core module gets started, core.default.log4j-xml would come into the OAP log4j context.\nIf the configuration is changed after the OAP startup, you have to wait for a while for the changes to be applied. The default value is 60 seconds, which you could change through configuration.\u0026lt;configuration implement\u0026gt;.peroid in application.yaml.\nIf you remove core.default.log4j-xml from the configuration center or disable the configuration module, log4j.xml in the config directory would be affected.\n Caveat: The OAP only supports the XML configuration format.\n This is an example of configuring dynamical logging through a ConfigMap in a Kubernetes cluster. You may set up other configuration clusters following the same procedures.\napiVersion:v1data:core.default.log4j-xml:|-\u0026lt;Configuration status=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout charset=\u0026#34;UTF-8\u0026#34; pattern=\u0026#34;%d - %c - %L [%t] %-5p %x - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;logger name=\u0026#34;io.grpc.netty\u0026#34; level=\u0026#34;INFO\u0026#34;/\u0026gt; \u0026lt;logger name=\u0026#34;org.apache.skywalking.oap.server.configuration.api\u0026#34; level=\u0026#34;TRACE\u0026#34;/\u0026gt; \u0026lt;logger name=\u0026#34;org.apache.skywalking.oap.server.configuration.configmap\u0026#34; level=\u0026#34;DEBUG\u0026#34;/\u0026gt; \u0026lt;Root level=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Console\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;kind:ConfigMapmetadata:labels:app:collectorrelease:skywalkingname:skywalking-oapnamespace:default","title":"Dynamical Logging","url":"/docs/main/v9.1.0/en/setup/backend/dynamical-logging/"},{"content":"Dynamical Logging The OAP server leverages log4j2 to manage the logging system. log4j2 supports changing the configuration at runtime, but you have to manually update the XML configuration file, which could be time-consuming and prone to man-made mistakes.\nDynamical logging, which depends on dynamic configuration, provides us with an agile way to update all OAP log4j configurations through a single operation.\nThe key of the configuration item is core.default.log4j-xml, and you can select any of the configuration implements to store the content of log4j.xml. In the booting phase, once the core module gets started, core.default.log4j-xml would come into the OAP log4j context.\nIf the configuration is changed after the OAP startup, you have to wait for a while for the changes to be applied. The default value is 60 seconds, which you could change through configuration.\u0026lt;configuration implement\u0026gt;.period in application.yaml.\nIf you remove core.default.log4j-xml from the configuration center or disable the configuration module, log4j.xml in the config directory would be affected.\n Caveat: The OAP only supports the XML configuration format.\n This is an example of configuring dynamical logging through a ConfigMap in a Kubernetes cluster. You may set up other configuration clusters following the same procedures.\napiVersion:v1data:core.default.log4j-xml:|-\u0026lt;Configuration status=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout charset=\u0026#34;UTF-8\u0026#34; pattern=\u0026#34;%d - %c - %L [%t] %-5p %x - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;logger name=\u0026#34;io.grpc.netty\u0026#34; level=\u0026#34;INFO\u0026#34;/\u0026gt; \u0026lt;logger name=\u0026#34;org.apache.skywalking.oap.server.configuration.api\u0026#34; level=\u0026#34;TRACE\u0026#34;/\u0026gt; \u0026lt;logger name=\u0026#34;org.apache.skywalking.oap.server.configuration.configmap\u0026#34; level=\u0026#34;DEBUG\u0026#34;/\u0026gt; \u0026lt;Root level=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Console\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;kind:ConfigMapmetadata:labels:app:collectorrelease:skywalkingname:skywalking-oapnamespace:default","title":"Dynamical Logging","url":"/docs/main/v9.2.0/en/setup/backend/dynamical-logging/"},{"content":"Dynamical Logging The OAP server leverages log4j2 to manage the logging system. log4j2 supports changing the configuration at runtime, but you have to manually update the XML configuration file, which could be time-consuming and prone to man-made mistakes.\nDynamical logging, which depends on dynamic configuration, provides us with an agile way to update all OAP log4j configurations through a single operation.\nThe key of the configuration item is core.default.log4j-xml, and you can select any of the configuration implements to store the content of log4j.xml. In the booting phase, once the core module gets started, core.default.log4j-xml would come into the OAP log4j context.\nIf the configuration is changed after the OAP startup, you have to wait for a while for the changes to be applied. The default value is 60 seconds, which you could change through configuration.\u0026lt;configuration implement\u0026gt;.period in application.yaml.\nIf you remove core.default.log4j-xml from the configuration center or disable the configuration module, log4j.xml in the config directory would be affected.\n Caveat: The OAP only supports the XML configuration format.\n This is an example of configuring dynamical logging through a ConfigMap in a Kubernetes cluster. You may set up other configuration clusters following the same procedures.\napiVersion:v1data:core.default.log4j-xml:|-\u0026lt;Configuration status=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout charset=\u0026#34;UTF-8\u0026#34; pattern=\u0026#34;%d - %c - %L [%t] %-5p %x - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;logger name=\u0026#34;io.grpc.netty\u0026#34; level=\u0026#34;INFO\u0026#34;/\u0026gt; \u0026lt;logger name=\u0026#34;org.apache.skywalking.oap.server.configuration.api\u0026#34; level=\u0026#34;TRACE\u0026#34;/\u0026gt; \u0026lt;logger name=\u0026#34;org.apache.skywalking.oap.server.configuration.configmap\u0026#34; level=\u0026#34;DEBUG\u0026#34;/\u0026gt; \u0026lt;Root level=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Console\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;kind:ConfigMapmetadata:labels:app:collectorrelease:skywalkingname:skywalking-oapnamespace:default","title":"Dynamical Logging","url":"/docs/main/v9.3.0/en/setup/backend/dynamical-logging/"},{"content":"Dynamical Logging The OAP server leverages log4j2 to manage the logging system. log4j2 supports changing the configuration at runtime, but you have to manually update the XML configuration file, which could be time-consuming and prone to man-made mistakes.\nDynamical logging, which depends on dynamic configuration, provides us with an agile way to update all OAP log4j configurations through a single operation.\nThe key of the configuration item is core.default.log4j-xml, and you can select any of the configuration implements to store the content of log4j.xml. In the booting phase, once the core module gets started, core.default.log4j-xml would come into the OAP log4j context.\nIf the configuration is changed after the OAP startup, you have to wait for a while for the changes to be applied. The default value is 60 seconds, which you could change through configuration.\u0026lt;configuration implement\u0026gt;.period in application.yaml.\nIf you remove core.default.log4j-xml from the configuration center or disable the configuration module, log4j.xml in the config directory would be affected.\n Caveat: The OAP only supports the XML configuration format.\n This is an example of configuring dynamical logging through a ConfigMap in a Kubernetes cluster. You may set up other configuration clusters following the same procedures.\napiVersion:v1data:core.default.log4j-xml:|-\u0026lt;Configuration status=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout charset=\u0026#34;UTF-8\u0026#34; pattern=\u0026#34;%d - %c - %L [%t] %-5p %x - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;logger name=\u0026#34;io.grpc.netty\u0026#34; level=\u0026#34;INFO\u0026#34;/\u0026gt; \u0026lt;logger name=\u0026#34;org.apache.skywalking.oap.server.configuration.api\u0026#34; level=\u0026#34;TRACE\u0026#34;/\u0026gt; \u0026lt;logger name=\u0026#34;org.apache.skywalking.oap.server.configuration.configmap\u0026#34; level=\u0026#34;DEBUG\u0026#34;/\u0026gt; \u0026lt;Root level=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Console\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;kind:ConfigMapmetadata:labels:app:collectorrelease:skywalkingname:skywalking-oapnamespace:default","title":"Dynamical Logging","url":"/docs/main/v9.4.0/en/setup/backend/dynamical-logging/"},{"content":"Dynamical Logging The OAP server leverages log4j2 to manage the logging system. log4j2 supports changing the configuration at runtime, but you have to manually update the XML configuration file, which could be time-consuming and prone to man-made mistakes.\nDynamical logging, which depends on dynamic configuration, provides us with an agile way to update all OAP log4j configurations through a single operation.\nThe key of the configuration item is core.default.log4j-xml, and you can select any of the configuration implements to store the content of log4j.xml. In the booting phase, once the core module gets started, core.default.log4j-xml would come into the OAP log4j context.\nIf the configuration is changed after the OAP startup, you have to wait for a while for the changes to be applied. The default value is 60 seconds, which you could change through configuration.\u0026lt;configuration implement\u0026gt;.period in application.yaml.\nIf you remove core.default.log4j-xml from the configuration center or disable the configuration module, log4j.xml in the config directory would be affected.\n Caveat: The OAP only supports the XML configuration format.\n This is an example of configuring dynamical logging through a ConfigMap in a Kubernetes cluster. You may set up other configuration clusters following the same procedures.\napiVersion:v1data:core.default.log4j-xml:|-\u0026lt;Configuration status=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout charset=\u0026#34;UTF-8\u0026#34; pattern=\u0026#34;%d - %c - %L [%t] %-5p %x - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;logger name=\u0026#34;io.grpc.netty\u0026#34; level=\u0026#34;INFO\u0026#34;/\u0026gt; \u0026lt;logger name=\u0026#34;org.apache.skywalking.oap.server.configuration.api\u0026#34; level=\u0026#34;TRACE\u0026#34;/\u0026gt; \u0026lt;logger name=\u0026#34;org.apache.skywalking.oap.server.configuration.configmap\u0026#34; level=\u0026#34;DEBUG\u0026#34;/\u0026gt; \u0026lt;Root level=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Console\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;kind:ConfigMapmetadata:labels:app:collectorrelease:skywalkingname:skywalking-oapnamespace:default","title":"Dynamical Logging","url":"/docs/main/v9.5.0/en/setup/backend/dynamical-logging/"},{"content":"Dynamical Logging The OAP server leverages log4j2 to manage the logging system. log4j2 supports changing the configuration at runtime, but you have to manually update the XML configuration file, which could be time-consuming and prone to man-made mistakes.\nDynamical logging, which depends on dynamic configuration, provides us with an agile way to update all OAP log4j configurations through a single operation.\nThe key of the configuration item is core.default.log4j-xml, and you can select any of the configuration implements to store the content of log4j.xml. In the booting phase, once the core module gets started, core.default.log4j-xml would come into the OAP log4j context.\nIf the configuration is changed after the OAP startup, you have to wait for a while for the changes to be applied. The default value is 60 seconds, which you could change through configuration.\u0026lt;configuration implement\u0026gt;.period in application.yaml.\nIf you remove core.default.log4j-xml from the configuration center or disable the configuration module, log4j.xml in the config directory would be affected.\n Caveat: The OAP only supports the XML configuration format.\n This is an example of configuring dynamical logging through a ConfigMap in a Kubernetes cluster. You may set up other configuration clusters following the same procedures.\napiVersion:v1data:core.default.log4j-xml:|-\u0026lt;Configuration status=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout charset=\u0026#34;UTF-8\u0026#34; pattern=\u0026#34;%d - %c - %L [%t] %-5p %x - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;logger name=\u0026#34;io.grpc.netty\u0026#34; level=\u0026#34;INFO\u0026#34;/\u0026gt; \u0026lt;logger name=\u0026#34;org.apache.skywalking.oap.server.configuration.api\u0026#34; level=\u0026#34;TRACE\u0026#34;/\u0026gt; \u0026lt;logger name=\u0026#34;org.apache.skywalking.oap.server.configuration.configmap\u0026#34; level=\u0026#34;DEBUG\u0026#34;/\u0026gt; \u0026lt;Root level=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Console\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;kind:ConfigMapmetadata:labels:app:collectorrelease:skywalkingname:skywalking-oapnamespace:default","title":"Dynamical Logging","url":"/docs/main/v9.6.0/en/setup/backend/dynamical-logging/"},{"content":"Dynamical Logging The OAP server leverages log4j2 to manage the logging system. log4j2 supports changing the configuration at runtime, but you have to manually update the XML configuration file, which could be time-consuming and prone to man-made mistakes.\nDynamical logging, which depends on dynamic configuration, provides us with an agile way to update all OAP log4j configurations through a single operation.\nThe key of the configuration item is core.default.log4j-xml, and you can select any of the configuration implements to store the content of log4j.xml. In the booting phase, once the core module gets started, core.default.log4j-xml would come into the OAP log4j context.\nIf the configuration is changed after the OAP startup, you have to wait for a while for the changes to be applied. The default value is 60 seconds, which you could change through configuration.\u0026lt;configuration implement\u0026gt;.period in application.yaml.\nIf you remove core.default.log4j-xml from the configuration center or disable the configuration module, log4j.xml in the config directory would be affected.\n Caveat: The OAP only supports the XML configuration format.\n This is an example of configuring dynamical logging through a ConfigMap in a Kubernetes cluster. You may set up other configuration clusters following the same procedures.\napiVersion:v1data:core.default.log4j-xml:|-\u0026lt;Configuration status=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout charset=\u0026#34;UTF-8\u0026#34; pattern=\u0026#34;%d - %c - %L [%t] %-5p %x - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;logger name=\u0026#34;io.grpc.netty\u0026#34; level=\u0026#34;INFO\u0026#34;/\u0026gt; \u0026lt;logger name=\u0026#34;org.apache.skywalking.oap.server.configuration.api\u0026#34; level=\u0026#34;TRACE\u0026#34;/\u0026gt; \u0026lt;logger name=\u0026#34;org.apache.skywalking.oap.server.configuration.configmap\u0026#34; level=\u0026#34;DEBUG\u0026#34;/\u0026gt; \u0026lt;Root level=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Console\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;kind:ConfigMapmetadata:labels:app:collectorrelease:skywalkingname:skywalking-oapnamespace:default","title":"Dynamical Logging","url":"/docs/main/v9.7.0/en/setup/backend/dynamical-logging/"},{"content":"eBPF Profiling eBPF Profiling utilizes the eBPF technology to monitor applications without requiring any modifications to the application itself. Corresponds to Out-Process Profiling.\nTo use eBPF Profiling, the SkyWalking Rover application (eBPF Agent) needs to be installed on the host machine. When the agent receives a Profiling task, it starts the Profiling task for the specific application to analyze performance bottlenecks for the corresponding type of Profiling.\nLean more about the eBPF profiling in following blogs:\n Pinpoint Service Mesh Critical Performance Impact by using eBPF Diagnose Service Mesh Network Performance with eBPF  Active in the OAP OAP and the agent use a brand-new protocol to exchange eBPF Profiling data, so it is necessary to start OAP with the following configuration:\nreceiver-ebpf:selector:${SW_RECEIVER_EBPF:default}default:Profiling type eBPF Profiling leverages eBPF technology to provide support for the following types of tasks:\n On CPU Profiling: Periodically samples the thread stacks of the current program while it\u0026rsquo;s executing on the CPU using PERF_COUNT_SW_CPU_CLOCK. Off CPU Profiling: Collects and aggregates thread stacks when the program executes the kernel function finish_task_switch. Network Profiling: Collects the execution details of the application when performing network-related syscalls, and then aggregates them into a topology map and metrics for different network protocols.  On CPU Profiling On CPU Profiling periodically samples the thread stacks of the target program while it\u0026rsquo;s executing on the CPU and aggregates the thread stacks to create a flame graph. This helps users identify performance bottlenecks based on the flame graph information.\nCreating task When creating an On CPU Profiling task, you need to specify which eligible processes need to be sampled. The required configuration information is as follows:\n Service: The processes under which service entity need to perform Profiling tasks. Labels: Specifies which processes with certain labels under the service entity can perform profiling tasks. If left blank, all processes under the specified service will require profiling. Start Time: Whether the current task needs to be executed immediately or at a future point in time. Duration: The execution time of the current profiling task.  The eBPF agent would periodically request from the OAP whether there are any eligible tasks among all the processes collected by the current eBPF agent. When the eBPF agent receives a task, it would start the profiling task with the process.\nProfiling analyze Once the eBPF agent starts a profiling task for a specific process, it would periodically collect data and report it to the OAP. At this point, a scheduling of task is generated. The scheduling data contains the following information:\n Schedule ID: The ID of current schedule. Task: The task to which the current scheduling data belongs. Process: The process for which the current scheduling Profiling data is being collected. Start Time: The execution start time of the current schedule. End Time: The time when the last sampling of the current schedule was completed.  Once the schedule is created, we can use the existing scheduling ID and time range to query the CPU execution situation of the specified process within a specific time period. The query contains the following fields:\n Schedule ID: The schedule ID you want to query. Time: The start and end times you want to query.  After the query, the following data would be returned. With the data, it\u0026rsquo;s easy to generate a flame graph:\n Id: Element ID. Parent ID: Parent element ID. The dependency relationship between elements can be determined using the element ID and parent element ID. Symbol: The symbol name of the current element. Usually, it represents the method names of thread stacks in different languages. Stack Type: The type of thread stack where the current element is located. Supports KERNEL_SPACE and USER_SPACE, which represent user mode and kernel mode, respectively. Dump Count: The number of times the current element was sampled. The more samples of symbol, means the longer the method execution time.  Off CPU Profiling Off CPU Profiling can analyze the thread state when a thread switch occurs in the current process, thereby determining performance loss caused by blocked on I/O, locks, timers, paging/swapping, and other reasons. The execution flow between the eBPF agent and OAP in Off CPU Profiling is the same as in On CPU Profiling, but the data content being analyzed is different.\nCreate task The process of creating an Off CPU Profiling task is the same as creating an On CPU Profiling task, with the only difference being that the Profiling task type is changed to OFF CPU Profiling. For specific parameters, please refer to the previous section.\nProfiling analyze When the eBPF agent receives a Off CPU Profiling task, it would also collect data and generate a schedule. When analyzing data, unlike On CPU Profiling, Off CPU Profiling can generate different flame graphs based on the following two aggregation methods:\n By Time: Aggregate based on the time consumed by each method, allowing you to analyze which methods take longer. By Count: Aggregate based on the number of times a method switches to non-CPU execution, allowing you to analyze which methods cause more non-CPU executions for the task.  Network Profiling Network Profiling can analyze and monitor network requests related to process, and based on the data, generate topology diagrams, metrics, and other information. Furthermore, it can be integrated with existing Tracing systems to enhance the data content.\nCreate task Unlike On/Off CPU Profiling, Network Profiling requires specifying the instance entity information when creating a task. For example, in a Service Mesh, there may be multiple processes under a single instance(Pod), such as an application and Envoy. In network analysis, they usually work together, so analyzing them together can give you a better understanding of the network execution situation of the Pod. The following parameters are needed:\n Instance: The current Instance entity. Sampling: Sampling information for network requests.  Sampling represents how the current system samples raw data and combines it with the existing Tracing system, allowing you to see the complete network data corresponding to a Span in Tracing Span. Currently, it supports sampling Raw information for Spans using HTTP/1.x as RPC and parsing SkyWalking and Zipkin protocols. The sampling information configuration is as follows:\n URI Regex: Only collect requests that match the specified URI. If empty, all requests will be collected. Min Duration: Only sample data with a response time greater than or equal to the specified duration. If empty, all requests will be collected. When 4XX: Only sample data with a response status code between 400 and 500 (exclusive). When 5XX: Only sample data with a response status code between 500 and 600 (exclusive). Settings: When network data meets the above rules, how to collect the data.  Require Complete Request: Whether to collect request data. Max Request Size: The maximum data size for collecting requests. If empty, all data will be collected. Require Complete Response: Whether to collect response data. Max Response Size: The maximum data size for collecting responses. If empty, all data will be collected.    Profiling analysis After starting the task, the following data can be analyzed:\n Topology: Analyze the data flow and data types when the current instance interacts internally and externally. TCP Metrics: Network Layer-4 metrics between two process. HTTP/1.x Metrics: If there are HTTP/1.x requests between two nodes, the HTTP/1.x metrics would be analyzed based on the data content. HTTP Request: If two nodes use HTTP/1.x and include a tracing system, the tracing data would be extended with events.  Topology The topology can generate two types of data:\n Internal entities: The network call relationships between all processes within the current instance. Entities and external: The call relationships between processes inside the entity and external network nodes.  For external nodes, since eBPF can only collect remote IP and port information during data collection, OAP can use Kubernetes cluster information to recognize the corresponding Service or Pod names.\nBetween two nodes, data flow direction can be detected, and the following types of data protocols can be identified:\n HTTP: Two nodes communicate using HTTP/1.x or HTTP/2.x protocol. HTTPS: Two nodes communicate using HTTPS. TLS: Two nodes use encrypted data for transition, such as when using OpenSSL. TCP: There is TCP data transmission between two nodes.  TCP Metrics In the TCP metrics, each metric includes both client-side and server-side data. The metrics are as follows:\n   Name Unit Description     Write CPM Count Number of write requests initiated per minute   Write Total Bytes B Total data size written per minute   Write Avg Execute Time ns Average execution time for each write operation   Write RTT ns Round Trip Time (RTT)   Read CPM Count Number of read requests per minute   Read Total Bytes B Total data size read per minute   Read Avg Execute Time ns Average execution time for each read operation   Connect CPM Count Number of new connections established   Connect Execute Time ns Time taken to establish a connection   Close CPM Count Number of closed connections   Close Execute Time ns Time taken to close a connection   Retransmit CPM Count Number of data retransmissions per minute   Drop CPM Count Number of dropped packets per minute    HTTP/1.x Metrics If there is HTTP/1.x protocol communication between two nodes, the eBPF agent can recognize the request data and parse the following metric information:\n   Name Unit Description     Request CPM Count Number of requests received per minute   Response Status CPM Count Number of occurrences of each response status code per minute   Request Package Size B Average request package data size   Response Package Size B Average response package data size   Client Duration ns Time taken for the client to receive a response   Server Duration ns Time taken for the server to send a response    HTTP Request If two nodes communicate using the HTTP/1.x protocol, and they employ a distributed tracing system, then eBPf agent can collect raw data according to the sampling rules configured in the previous sections.\nSampling Raw Data When the sampling conditions are met, the original request or response data would be collected, including the following fields:\n Data Size: The data size of the current request/response content. Data Content: The raw data content. Non-plain format content would not be collected. Data Direction: The data transfer direction, either Ingress or Egress. Data Type: The data type, either Request or Response. Connection Role: The current node\u0026rsquo;s role as a client or server. Entity: The entity information of the current process. Time: The Request or response sent/received time.  Syscall Event When sampling rules are applied, the related Syscall invocations for the request or response would also be collected, including the following information:\n Method Name: System Syscall method names such as read, write, readv, writev, etc. Packet Size: The current TCP packet size. Packet Count: The number of sent or received packets. Network Interface Information: The network interface from which the packet was sent.  ","title":"eBPF Profiling","url":"/docs/main/latest/en/setup/backend/backend-ebpf-profiling/"},{"content":"eBPF Profiling eBPF Profiling utilizes the eBPF technology to monitor applications without requiring any modifications to the application itself. Corresponds to Out-Process Profiling.\nTo use eBPF Profiling, the SkyWalking Rover application (eBPF Agent) needs to be installed on the host machine. When the agent receives a Profiling task, it starts the Profiling task for the specific application to analyze performance bottlenecks for the corresponding type of Profiling.\nLean more about the eBPF profiling in following blogs:\n Pinpoint Service Mesh Critical Performance Impact by using eBPF Diagnose Service Mesh Network Performance with eBPF  Active in the OAP OAP and the agent use a brand-new protocol to exchange eBPF Profiling data, so it is necessary to start OAP with the following configuration:\nreceiver-ebpf:selector:${SW_RECEIVER_EBPF:default}default:Profiling type eBPF Profiling leverages eBPF technology to provide support for the following types of tasks:\n On CPU Profiling: Periodically samples the thread stacks of the current program while it\u0026rsquo;s executing on the CPU using PERF_COUNT_SW_CPU_CLOCK. Off CPU Profiling: Collects and aggregates thread stacks when the program executes the kernel function finish_task_switch. Network Profiling: Collects the execution details of the application when performing network-related syscalls, and then aggregates them into a topology map and metrics for different network protocols.  On CPU Profiling On CPU Profiling periodically samples the thread stacks of the target program while it\u0026rsquo;s executing on the CPU and aggregates the thread stacks to create a flame graph. This helps users identify performance bottlenecks based on the flame graph information.\nCreating task When creating an On CPU Profiling task, you need to specify which eligible processes need to be sampled. The required configuration information is as follows:\n Service: The processes under which service entity need to perform Profiling tasks. Labels: Specifies which processes with certain labels under the service entity can perform profiling tasks. If left blank, all processes under the specified service will require profiling. Start Time: Whether the current task needs to be executed immediately or at a future point in time. Duration: The execution time of the current profiling task.  The eBPF agent would periodically request from the OAP whether there are any eligible tasks among all the processes collected by the current eBPF agent. When the eBPF agent receives a task, it would start the profiling task with the process.\nProfiling analyze Once the eBPF agent starts a profiling task for a specific process, it would periodically collect data and report it to the OAP. At this point, a scheduling of task is generated. The scheduling data contains the following information:\n Schedule ID: The ID of current schedule. Task: The task to which the current scheduling data belongs. Process: The process for which the current scheduling Profiling data is being collected. Start Time: The execution start time of the current schedule. End Time: The time when the last sampling of the current schedule was completed.  Once the schedule is created, we can use the existing scheduling ID and time range to query the CPU execution situation of the specified process within a specific time period. The query contains the following fields:\n Schedule ID: The schedule ID you want to query. Time: The start and end times you want to query.  After the query, the following data would be returned. With the data, it\u0026rsquo;s easy to generate a flame graph:\n Id: Element ID. Parent ID: Parent element ID. The dependency relationship between elements can be determined using the element ID and parent element ID. Symbol: The symbol name of the current element. Usually, it represents the method names of thread stacks in different languages. Stack Type: The type of thread stack where the current element is located. Supports KERNEL_SPACE and USER_SPACE, which represent user mode and kernel mode, respectively. Dump Count: The number of times the current element was sampled. The more samples of symbol, means the longer the method execution time.  Off CPU Profiling Off CPU Profiling can analyze the thread state when a thread switch occurs in the current process, thereby determining performance loss caused by blocked on I/O, locks, timers, paging/swapping, and other reasons. The execution flow between the eBPF agent and OAP in Off CPU Profiling is the same as in On CPU Profiling, but the data content being analyzed is different.\nCreate task The process of creating an Off CPU Profiling task is the same as creating an On CPU Profiling task, with the only difference being that the Profiling task type is changed to OFF CPU Profiling. For specific parameters, please refer to the previous section.\nProfiling analyze When the eBPF agent receives a Off CPU Profiling task, it would also collect data and generate a schedule. When analyzing data, unlike On CPU Profiling, Off CPU Profiling can generate different flame graphs based on the following two aggregation methods:\n By Time: Aggregate based on the time consumed by each method, allowing you to analyze which methods take longer. By Count: Aggregate based on the number of times a method switches to non-CPU execution, allowing you to analyze which methods cause more non-CPU executions for the task.  Network Profiling Network Profiling can analyze and monitor network requests related to process, and based on the data, generate topology diagrams, metrics, and other information. Furthermore, it can be integrated with existing Tracing systems to enhance the data content.\nCreate task Unlike On/Off CPU Profiling, Network Profiling requires specifying the instance entity information when creating a task. For example, in a Service Mesh, there may be multiple processes under a single instance(Pod), such as an application and Envoy. In network analysis, they usually work together, so analyzing them together can give you a better understanding of the network execution situation of the Pod. The following parameters are needed:\n Instance: The current Instance entity. Sampling: Sampling information for network requests.  Sampling represents how the current system samples raw data and combines it with the existing Tracing system, allowing you to see the complete network data corresponding to a Span in Tracing Span. Currently, it supports sampling Raw information for Spans using HTTP/1.x as RPC and parsing SkyWalking and Zipkin protocols. The sampling information configuration is as follows:\n URI Regex: Only collect requests that match the specified URI. If empty, all requests will be collected. Min Duration: Only sample data with a response time greater than or equal to the specified duration. If empty, all requests will be collected. When 4XX: Only sample data with a response status code between 400 and 500 (exclusive). When 5XX: Only sample data with a response status code between 500 and 600 (exclusive). Settings: When network data meets the above rules, how to collect the data.  Require Complete Request: Whether to collect request data. Max Request Size: The maximum data size for collecting requests. If empty, all data will be collected. Require Complete Response: Whether to collect response data. Max Response Size: The maximum data size for collecting responses. If empty, all data will be collected.    Profiling analysis After starting the task, the following data can be analyzed:\n Topology: Analyze the data flow and data types when the current instance interacts internally and externally. TCP Metrics: Network Layer-4 metrics between two process. HTTP/1.x Metrics: If there are HTTP/1.x requests between two nodes, the HTTP/1.x metrics would be analyzed based on the data content. HTTP Request: If two nodes use HTTP/1.x and include a tracing system, the tracing data would be extended with events.  Topology The topology can generate two types of data:\n Internal entities: The network call relationships between all processes within the current instance. Entities and external: The call relationships between processes inside the entity and external network nodes.  For external nodes, since eBPF can only collect remote IP and port information during data collection, OAP can use Kubernetes cluster information to recognize the corresponding Service or Pod names.\nBetween two nodes, data flow direction can be detected, and the following types of data protocols can be identified:\n HTTP: Two nodes communicate using HTTP/1.x or HTTP/2.x protocol. HTTPS: Two nodes communicate using HTTPS. TLS: Two nodes use encrypted data for transition, such as when using OpenSSL. TCP: There is TCP data transmission between two nodes.  TCP Metrics In the TCP metrics, each metric includes both client-side and server-side data. The metrics are as follows:\n   Name Unit Description     Write CPM Count Number of write requests initiated per minute   Write Total Bytes B Total data size written per minute   Write Avg Execute Time ns Average execution time for each write operation   Write RTT ns Round Trip Time (RTT)   Read CPM Count Number of read requests per minute   Read Total Bytes B Total data size read per minute   Read Avg Execute Time ns Average execution time for each read operation   Connect CPM Count Number of new connections established   Connect Execute Time ns Time taken to establish a connection   Close CPM Count Number of closed connections   Close Execute Time ns Time taken to close a connection   Retransmit CPM Count Number of data retransmissions per minute   Drop CPM Count Number of dropped packets per minute    HTTP/1.x Metrics If there is HTTP/1.x protocol communication between two nodes, the eBPF agent can recognize the request data and parse the following metric information:\n   Name Unit Description     Request CPM Count Number of requests received per minute   Response Status CPM Count Number of occurrences of each response status code per minute   Request Package Size B Average request package data size   Response Package Size B Average response package data size   Client Duration ns Time taken for the client to receive a response   Server Duration ns Time taken for the server to send a response    HTTP Request If two nodes communicate using the HTTP/1.x protocol, and they employ a distributed tracing system, then eBPf agent can collect raw data according to the sampling rules configured in the previous sections.\nSampling Raw Data When the sampling conditions are met, the original request or response data would be collected, including the following fields:\n Data Size: The data size of the current request/response content. Data Content: The raw data content. Non-plain format content would not be collected. Data Direction: The data transfer direction, either Ingress or Egress. Data Type: The data type, either Request or Response. Connection Role: The current node\u0026rsquo;s role as a client or server. Entity: The entity information of the current process. Time: The Request or response sent/received time.  Syscall Event When sampling rules are applied, the related Syscall invocations for the request or response would also be collected, including the following information:\n Method Name: System Syscall method names such as read, write, readv, writev, etc. Packet Size: The current TCP packet size. Packet Count: The number of sent or received packets. Network Interface Information: The network interface from which the packet was sent.  ","title":"eBPF Profiling","url":"/docs/main/next/en/setup/backend/backend-ebpf-profiling/"},{"content":"eBPF Profiling eBPF Profiling utilizes the eBPF technology to monitor applications without requiring any modifications to the application itself. Corresponds to Out-Process Profiling.\nTo use eBPF Profiling, the SkyWalking Rover application (eBPF Agent) needs to be installed on the host machine. When the agent receives a Profiling task, it starts the Profiling task for the specific application to analyze performance bottlenecks for the corresponding type of Profiling.\nLean more about the eBPF profiling in following blogs:\n Pinpoint Service Mesh Critical Performance Impact by using eBPF Diagnose Service Mesh Network Performance with eBPF  Active in the OAP OAP and the agent use a brand-new protocol to exchange eBPF Profiling data, so it is necessary to start OAP with the following configuration:\nreceiver-ebpf:selector:${SW_RECEIVER_EBPF:default}default:Profiling type eBPF Profiling leverages eBPF technology to provide support for the following types of tasks:\n On CPU Profiling: Periodically samples the thread stacks of the current program while it\u0026rsquo;s executing on the CPU using PERF_COUNT_SW_CPU_CLOCK. Off CPU Profiling: Collects and aggregates thread stacks when the program executes the kernel function finish_task_switch. Network Profiling: Collects the execution details of the application when performing network-related syscalls, and then aggregates them into a topology map and metrics for different network protocols.  On CPU Profiling On CPU Profiling periodically samples the thread stacks of the target program while it\u0026rsquo;s executing on the CPU and aggregates the thread stacks to create a flame graph. This helps users identify performance bottlenecks based on the flame graph information.\nCreating task When creating an On CPU Profiling task, you need to specify which eligible processes need to be sampled. The required configuration information is as follows:\n Service: The processes under which service entity need to perform Profiling tasks. Labels: Specifies which processes with certain labels under the service entity can perform profiling tasks. If left blank, all processes under the specified service will require profiling. Start Time: Whether the current task needs to be executed immediately or at a future point in time. Duration: The execution time of the current profiling task.  The eBPF agent would periodically request from the OAP whether there are any eligible tasks among all the processes collected by the current eBPF agent. When the eBPF agent receives a task, it would start the profiling task with the process.\nProfiling analyze Once the eBPF agent starts a profiling task for a specific process, it would periodically collect data and report it to the OAP. At this point, a scheduling of task is generated. The scheduling data contains the following information:\n Schedule ID: The ID of current schedule. Task: The task to which the current scheduling data belongs. Process: The process for which the current scheduling Profiling data is being collected. Start Time: The execution start time of the current schedule. End Time: The time when the last sampling of the current schedule was completed.  Once the schedule is created, we can use the existing scheduling ID and time range to query the CPU execution situation of the specified process within a specific time period. The query contains the following fields:\n Schedule ID: The schedule ID you want to query. Time: The start and end times you want to query.  After the query, the following data would be returned. With the data, it\u0026rsquo;s easy to generate a flame graph:\n Id: Element ID. Parent ID: Parent element ID. The dependency relationship between elements can be determined using the element ID and parent element ID. Symbol: The symbol name of the current element. Usually, it represents the method names of thread stacks in different languages. Stack Type: The type of thread stack where the current element is located. Supports KERNEL_SPACE and USER_SPACE, which represent user mode and kernel mode, respectively. Dump Count: The number of times the current element was sampled. The more samples of symbol, means the longer the method execution time.  Off CPU Profiling Off CPU Profiling can analyze the thread state when a thread switch occurs in the current process, thereby determining performance loss caused by blocked on I/O, locks, timers, paging/swapping, and other reasons. The execution flow between the eBPF agent and OAP in Off CPU Profiling is the same as in On CPU Profiling, but the data content being analyzed is different.\nCreate task The process of creating an Off CPU Profiling task is the same as creating an On CPU Profiling task, with the only difference being that the Profiling task type is changed to OFF CPU Profiling. For specific parameters, please refer to the previous section.\nProfiling analyze When the eBPF agent receives a Off CPU Profiling task, it would also collect data and generate a schedule. When analyzing data, unlike On CPU Profiling, Off CPU Profiling can generate different flame graphs based on the following two aggregation methods:\n By Time: Aggregate based on the time consumed by each method, allowing you to analyze which methods take longer. By Count: Aggregate based on the number of times a method switches to non-CPU execution, allowing you to analyze which methods cause more non-CPU executions for the task.  Network Profiling Network Profiling can analyze and monitor network requests related to process, and based on the data, generate topology diagrams, metrics, and other information. Furthermore, it can be integrated with existing Tracing systems to enhance the data content.\nCreate task Unlike On/Off CPU Profiling, Network Profiling requires specifying the instance entity information when creating a task. For example, in a Service Mesh, there may be multiple processes under a single instance(Pod), such as an application and Envoy. In network analysis, they usually work together, so analyzing them together can give you a better understanding of the network execution situation of the Pod. The following parameters are needed:\n Instance: The current Instance entity. Sampling: Sampling information for network requests.  Sampling represents how the current system samples raw data and combines it with the existing Tracing system, allowing you to see the complete network data corresponding to a Span in Tracing Span. Currently, it supports sampling Raw information for Spans using HTTP/1.x as RPC and parsing SkyWalking and Zipkin protocols. The sampling information configuration is as follows:\n URI Regex: Only collect requests that match the specified URI. If empty, all requests will be collected. Min Duration: Only sample data with a response time greater than or equal to the specified duration. If empty, all requests will be collected. When 4XX: Only sample data with a response status code between 400 and 500 (exclusive). When 5XX: Only sample data with a response status code between 500 and 600 (exclusive). Settings: When network data meets the above rules, how to collect the data.  Require Complete Request: Whether to collect request data. Max Request Size: The maximum data size for collecting requests. If empty, all data will be collected. Require Complete Response: Whether to collect response data. Max Response Size: The maximum data size for collecting responses. If empty, all data will be collected.    Profiling analysis After starting the task, the following data can be analyzed:\n Topology: Analyze the data flow and data types when the current instance interacts internally and externally. TCP Metrics: Network Layer-4 metrics between two process. HTTP/1.x Metrics: If there are HTTP/1.x requests between two nodes, the HTTP/1.x metrics would be analyzed based on the data content. HTTP Request: If two nodes use HTTP/1.x and include a tracing system, the tracing data would be extended with events.  Topology The topology can generate two types of data:\n Internal entities: The network call relationships between all processes within the current instance. Entities and external: The call relationships between processes inside the entity and external network nodes.  For external nodes, since eBPF can only collect remote IP and port information during data collection, OAP can use Kubernetes cluster information to recognize the corresponding Service or Pod names.\nBetween two nodes, data flow direction can be detected, and the following types of data protocols can be identified:\n HTTP: Two nodes communicate using HTTP/1.x or HTTP/2.x protocol. HTTPS: Two nodes communicate using HTTPS. TLS: Two nodes use encrypted data for transition, such as when using OpenSSL. TCP: There is TCP data transmission between two nodes.  TCP Metrics In the TCP metrics, each metric includes both client-side and server-side data. The metrics are as follows:\n   Name Unit Description     Write CPM Count Number of write requests initiated per minute   Write Total Bytes B Total data size written per minute   Write Avg Execute Time ns Average execution time for each write operation   Write RTT ns Round Trip Time (RTT)   Read CPM Count Number of read requests per minute   Read Total Bytes B Total data size read per minute   Read Avg Execute Time ns Average execution time for each read operation   Connect CPM Count Number of new connections established   Connect Execute Time ns Time taken to establish a connection   Close CPM Count Number of closed connections   Close Execute Time ns Time taken to close a connection   Retransmit CPM Count Number of data retransmissions per minute   Drop CPM Count Number of dropped packets per minute    HTTP/1.x Metrics If there is HTTP/1.x protocol communication between two nodes, the eBPF agent can recognize the request data and parse the following metric information:\n   Name Unit Description     Request CPM Count Number of requests received per minute   Response Status CPM Count Number of occurrences of each response status code per minute   Request Package Size B Average request package data size   Response Package Size B Average response package data size   Client Duration ns Time taken for the client to receive a response   Server Duration ns Time taken for the server to send a response    HTTP Request If two nodes communicate using the HTTP/1.x protocol, and they employ a distributed tracing system, then eBPf agent can collect raw data according to the sampling rules configured in the previous sections.\nSampling Raw Data When the sampling conditions are met, the original request or response data would be collected, including the following fields:\n Data Size: The data size of the current request/response content. Data Content: The raw data content. Non-plain format content would not be collected. Data Direction: The data transfer direction, either Ingress or Egress. Data Type: The data type, either Request or Response. Connection Role: The current node\u0026rsquo;s role as a client or server. Entity: The entity information of the current process. Time: The Request or response sent/received time.  Syscall Event When sampling rules are applied, the related Syscall invocations for the request or response would also be collected, including the following information:\n Method Name: System Syscall method names such as read, write, readv, writev, etc. Packet Size: The current TCP packet size. Packet Count: The number of sent or received packets. Network Interface Information: The network interface from which the packet was sent.  ","title":"eBPF Profiling","url":"/docs/main/v9.5.0/en/setup/backend/backend-ebpf-profiling/"},{"content":"eBPF Profiling eBPF Profiling utilizes the eBPF technology to monitor applications without requiring any modifications to the application itself. Corresponds to Out-Process Profiling.\nTo use eBPF Profiling, the SkyWalking Rover application (eBPF Agent) needs to be installed on the host machine. When the agent receives a Profiling task, it starts the Profiling task for the specific application to analyze performance bottlenecks for the corresponding type of Profiling.\nLean more about the eBPF profiling in following blogs:\n Pinpoint Service Mesh Critical Performance Impact by using eBPF Diagnose Service Mesh Network Performance with eBPF  Active in the OAP OAP and the agent use a brand-new protocol to exchange eBPF Profiling data, so it is necessary to start OAP with the following configuration:\nreceiver-ebpf:selector:${SW_RECEIVER_EBPF:default}default:Profiling type eBPF Profiling leverages eBPF technology to provide support for the following types of tasks:\n On CPU Profiling: Periodically samples the thread stacks of the current program while it\u0026rsquo;s executing on the CPU using PERF_COUNT_SW_CPU_CLOCK. Off CPU Profiling: Collects and aggregates thread stacks when the program executes the kernel function finish_task_switch. Network Profiling: Collects the execution details of the application when performing network-related syscalls, and then aggregates them into a topology map and metrics for different network protocols.  On CPU Profiling On CPU Profiling periodically samples the thread stacks of the target program while it\u0026rsquo;s executing on the CPU and aggregates the thread stacks to create a flame graph. This helps users identify performance bottlenecks based on the flame graph information.\nCreating task When creating an On CPU Profiling task, you need to specify which eligible processes need to be sampled. The required configuration information is as follows:\n Service: The processes under which service entity need to perform Profiling tasks. Labels: Specifies which processes with certain labels under the service entity can perform profiling tasks. If left blank, all processes under the specified service will require profiling. Start Time: Whether the current task needs to be executed immediately or at a future point in time. Duration: The execution time of the current profiling task.  The eBPF agent would periodically request from the OAP whether there are any eligible tasks among all the processes collected by the current eBPF agent. When the eBPF agent receives a task, it would start the profiling task with the process.\nProfiling analyze Once the eBPF agent starts a profiling task for a specific process, it would periodically collect data and report it to the OAP. At this point, a scheduling of task is generated. The scheduling data contains the following information:\n Schedule ID: The ID of current schedule. Task: The task to which the current scheduling data belongs. Process: The process for which the current scheduling Profiling data is being collected. Start Time: The execution start time of the current schedule. End Time: The time when the last sampling of the current schedule was completed.  Once the schedule is created, we can use the existing scheduling ID and time range to query the CPU execution situation of the specified process within a specific time period. The query contains the following fields:\n Schedule ID: The schedule ID you want to query. Time: The start and end times you want to query.  After the query, the following data would be returned. With the data, it\u0026rsquo;s easy to generate a flame graph:\n Id: Element ID. Parent ID: Parent element ID. The dependency relationship between elements can be determined using the element ID and parent element ID. Symbol: The symbol name of the current element. Usually, it represents the method names of thread stacks in different languages. Stack Type: The type of thread stack where the current element is located. Supports KERNEL_SPACE and USER_SPACE, which represent user mode and kernel mode, respectively. Dump Count: The number of times the current element was sampled. The more samples of symbol, means the longer the method execution time.  Off CPU Profiling Off CPU Profiling can analyze the thread state when a thread switch occurs in the current process, thereby determining performance loss caused by blocked on I/O, locks, timers, paging/swapping, and other reasons. The execution flow between the eBPF agent and OAP in Off CPU Profiling is the same as in On CPU Profiling, but the data content being analyzed is different.\nCreate task The process of creating an Off CPU Profiling task is the same as creating an On CPU Profiling task, with the only difference being that the Profiling task type is changed to OFF CPU Profiling. For specific parameters, please refer to the previous section.\nProfiling analyze When the eBPF agent receives a Off CPU Profiling task, it would also collect data and generate a schedule. When analyzing data, unlike On CPU Profiling, Off CPU Profiling can generate different flame graphs based on the following two aggregation methods:\n By Time: Aggregate based on the time consumed by each method, allowing you to analyze which methods take longer. By Count: Aggregate based on the number of times a method switches to non-CPU execution, allowing you to analyze which methods cause more non-CPU executions for the task.  Network Profiling Network Profiling can analyze and monitor network requests related to process, and based on the data, generate topology diagrams, metrics, and other information. Furthermore, it can be integrated with existing Tracing systems to enhance the data content.\nCreate task Unlike On/Off CPU Profiling, Network Profiling requires specifying the instance entity information when creating a task. For example, in a Service Mesh, there may be multiple processes under a single instance(Pod), such as an application and Envoy. In network analysis, they usually work together, so analyzing them together can give you a better understanding of the network execution situation of the Pod. The following parameters are needed:\n Instance: The current Instance entity. Sampling: Sampling information for network requests.  Sampling represents how the current system samples raw data and combines it with the existing Tracing system, allowing you to see the complete network data corresponding to a Span in Tracing Span. Currently, it supports sampling Raw information for Spans using HTTP/1.x as RPC and parsing SkyWalking and Zipkin protocols. The sampling information configuration is as follows:\n URI Regex: Only collect requests that match the specified URI. If empty, all requests will be collected. Min Duration: Only sample data with a response time greater than or equal to the specified duration. If empty, all requests will be collected. When 4XX: Only sample data with a response status code between 400 and 500 (exclusive). When 5XX: Only sample data with a response status code between 500 and 600 (exclusive). Settings: When network data meets the above rules, how to collect the data.  Require Complete Request: Whether to collect request data. Max Request Size: The maximum data size for collecting requests. If empty, all data will be collected. Require Complete Response: Whether to collect response data. Max Response Size: The maximum data size for collecting responses. If empty, all data will be collected.    Profiling analysis After starting the task, the following data can be analyzed:\n Topology: Analyze the data flow and data types when the current instance interacts internally and externally. TCP Metrics: Network Layer-4 metrics between two process. HTTP/1.x Metrics: If there are HTTP/1.x requests between two nodes, the HTTP/1.x metrics would be analyzed based on the data content. HTTP Request: If two nodes use HTTP/1.x and include a tracing system, the tracing data would be extended with events.  Topology The topology can generate two types of data:\n Internal entities: The network call relationships between all processes within the current instance. Entities and external: The call relationships between processes inside the entity and external network nodes.  For external nodes, since eBPF can only collect remote IP and port information during data collection, OAP can use Kubernetes cluster information to recognize the corresponding Service or Pod names.\nBetween two nodes, data flow direction can be detected, and the following types of data protocols can be identified:\n HTTP: Two nodes communicate using HTTP/1.x or HTTP/2.x protocol. HTTPS: Two nodes communicate using HTTPS. TLS: Two nodes use encrypted data for transition, such as when using OpenSSL. TCP: There is TCP data transmission between two nodes.  TCP Metrics In the TCP metrics, each metric includes both client-side and server-side data. The metrics are as follows:\n   Name Unit Description     Write CPM Count Number of write requests initiated per minute   Write Total Bytes B Total data size written per minute   Write Avg Execute Time ns Average execution time for each write operation   Write RTT ns Round Trip Time (RTT)   Read CPM Count Number of read requests per minute   Read Total Bytes B Total data size read per minute   Read Avg Execute Time ns Average execution time for each read operation   Connect CPM Count Number of new connections established   Connect Execute Time ns Time taken to establish a connection   Close CPM Count Number of closed connections   Close Execute Time ns Time taken to close a connection   Retransmit CPM Count Number of data retransmissions per minute   Drop CPM Count Number of dropped packets per minute    HTTP/1.x Metrics If there is HTTP/1.x protocol communication between two nodes, the eBPF agent can recognize the request data and parse the following metric information:\n   Name Unit Description     Request CPM Count Number of requests received per minute   Response Status CPM Count Number of occurrences of each response status code per minute   Request Package Size B Average request package data size   Response Package Size B Average response package data size   Client Duration ns Time taken for the client to receive a response   Server Duration ns Time taken for the server to send a response    HTTP Request If two nodes communicate using the HTTP/1.x protocol, and they employ a distributed tracing system, then eBPf agent can collect raw data according to the sampling rules configured in the previous sections.\nSampling Raw Data When the sampling conditions are met, the original request or response data would be collected, including the following fields:\n Data Size: The data size of the current request/response content. Data Content: The raw data content. Non-plain format content would not be collected. Data Direction: The data transfer direction, either Ingress or Egress. Data Type: The data type, either Request or Response. Connection Role: The current node\u0026rsquo;s role as a client or server. Entity: The entity information of the current process. Time: The Request or response sent/received time.  Syscall Event When sampling rules are applied, the related Syscall invocations for the request or response would also be collected, including the following information:\n Method Name: System Syscall method names such as read, write, readv, writev, etc. Packet Size: The current TCP packet size. Packet Count: The number of sent or received packets. Network Interface Information: The network interface from which the packet was sent.  ","title":"eBPF Profiling","url":"/docs/main/v9.6.0/en/setup/backend/backend-ebpf-profiling/"},{"content":"eBPF Profiling eBPF Profiling utilizes the eBPF technology to monitor applications without requiring any modifications to the application itself. Corresponds to Out-Process Profiling.\nTo use eBPF Profiling, the SkyWalking Rover application (eBPF Agent) needs to be installed on the host machine. When the agent receives a Profiling task, it starts the Profiling task for the specific application to analyze performance bottlenecks for the corresponding type of Profiling.\nLean more about the eBPF profiling in following blogs:\n Pinpoint Service Mesh Critical Performance Impact by using eBPF Diagnose Service Mesh Network Performance with eBPF  Active in the OAP OAP and the agent use a brand-new protocol to exchange eBPF Profiling data, so it is necessary to start OAP with the following configuration:\nreceiver-ebpf:selector:${SW_RECEIVER_EBPF:default}default:Profiling type eBPF Profiling leverages eBPF technology to provide support for the following types of tasks:\n On CPU Profiling: Periodically samples the thread stacks of the current program while it\u0026rsquo;s executing on the CPU using PERF_COUNT_SW_CPU_CLOCK. Off CPU Profiling: Collects and aggregates thread stacks when the program executes the kernel function finish_task_switch. Network Profiling: Collects the execution details of the application when performing network-related syscalls, and then aggregates them into a topology map and metrics for different network protocols.  On CPU Profiling On CPU Profiling periodically samples the thread stacks of the target program while it\u0026rsquo;s executing on the CPU and aggregates the thread stacks to create a flame graph. This helps users identify performance bottlenecks based on the flame graph information.\nCreating task When creating an On CPU Profiling task, you need to specify which eligible processes need to be sampled. The required configuration information is as follows:\n Service: The processes under which service entity need to perform Profiling tasks. Labels: Specifies which processes with certain labels under the service entity can perform profiling tasks. If left blank, all processes under the specified service will require profiling. Start Time: Whether the current task needs to be executed immediately or at a future point in time. Duration: The execution time of the current profiling task.  The eBPF agent would periodically request from the OAP whether there are any eligible tasks among all the processes collected by the current eBPF agent. When the eBPF agent receives a task, it would start the profiling task with the process.\nProfiling analyze Once the eBPF agent starts a profiling task for a specific process, it would periodically collect data and report it to the OAP. At this point, a scheduling of task is generated. The scheduling data contains the following information:\n Schedule ID: The ID of current schedule. Task: The task to which the current scheduling data belongs. Process: The process for which the current scheduling Profiling data is being collected. Start Time: The execution start time of the current schedule. End Time: The time when the last sampling of the current schedule was completed.  Once the schedule is created, we can use the existing scheduling ID and time range to query the CPU execution situation of the specified process within a specific time period. The query contains the following fields:\n Schedule ID: The schedule ID you want to query. Time: The start and end times you want to query.  After the query, the following data would be returned. With the data, it\u0026rsquo;s easy to generate a flame graph:\n Id: Element ID. Parent ID: Parent element ID. The dependency relationship between elements can be determined using the element ID and parent element ID. Symbol: The symbol name of the current element. Usually, it represents the method names of thread stacks in different languages. Stack Type: The type of thread stack where the current element is located. Supports KERNEL_SPACE and USER_SPACE, which represent user mode and kernel mode, respectively. Dump Count: The number of times the current element was sampled. The more samples of symbol, means the longer the method execution time.  Off CPU Profiling Off CPU Profiling can analyze the thread state when a thread switch occurs in the current process, thereby determining performance loss caused by blocked on I/O, locks, timers, paging/swapping, and other reasons. The execution flow between the eBPF agent and OAP in Off CPU Profiling is the same as in On CPU Profiling, but the data content being analyzed is different.\nCreate task The process of creating an Off CPU Profiling task is the same as creating an On CPU Profiling task, with the only difference being that the Profiling task type is changed to OFF CPU Profiling. For specific parameters, please refer to the previous section.\nProfiling analyze When the eBPF agent receives a Off CPU Profiling task, it would also collect data and generate a schedule. When analyzing data, unlike On CPU Profiling, Off CPU Profiling can generate different flame graphs based on the following two aggregation methods:\n By Time: Aggregate based on the time consumed by each method, allowing you to analyze which methods take longer. By Count: Aggregate based on the number of times a method switches to non-CPU execution, allowing you to analyze which methods cause more non-CPU executions for the task.  Network Profiling Network Profiling can analyze and monitor network requests related to process, and based on the data, generate topology diagrams, metrics, and other information. Furthermore, it can be integrated with existing Tracing systems to enhance the data content.\nCreate task Unlike On/Off CPU Profiling, Network Profiling requires specifying the instance entity information when creating a task. For example, in a Service Mesh, there may be multiple processes under a single instance(Pod), such as an application and Envoy. In network analysis, they usually work together, so analyzing them together can give you a better understanding of the network execution situation of the Pod. The following parameters are needed:\n Instance: The current Instance entity. Sampling: Sampling information for network requests.  Sampling represents how the current system samples raw data and combines it with the existing Tracing system, allowing you to see the complete network data corresponding to a Span in Tracing Span. Currently, it supports sampling Raw information for Spans using HTTP/1.x as RPC and parsing SkyWalking and Zipkin protocols. The sampling information configuration is as follows:\n URI Regex: Only collect requests that match the specified URI. If empty, all requests will be collected. Min Duration: Only sample data with a response time greater than or equal to the specified duration. If empty, all requests will be collected. When 4XX: Only sample data with a response status code between 400 and 500 (exclusive). When 5XX: Only sample data with a response status code between 500 and 600 (exclusive). Settings: When network data meets the above rules, how to collect the data.  Require Complete Request: Whether to collect request data. Max Request Size: The maximum data size for collecting requests. If empty, all data will be collected. Require Complete Response: Whether to collect response data. Max Response Size: The maximum data size for collecting responses. If empty, all data will be collected.    Profiling analysis After starting the task, the following data can be analyzed:\n Topology: Analyze the data flow and data types when the current instance interacts internally and externally. TCP Metrics: Network Layer-4 metrics between two process. HTTP/1.x Metrics: If there are HTTP/1.x requests between two nodes, the HTTP/1.x metrics would be analyzed based on the data content. HTTP Request: If two nodes use HTTP/1.x and include a tracing system, the tracing data would be extended with events.  Topology The topology can generate two types of data:\n Internal entities: The network call relationships between all processes within the current instance. Entities and external: The call relationships between processes inside the entity and external network nodes.  For external nodes, since eBPF can only collect remote IP and port information during data collection, OAP can use Kubernetes cluster information to recognize the corresponding Service or Pod names.\nBetween two nodes, data flow direction can be detected, and the following types of data protocols can be identified:\n HTTP: Two nodes communicate using HTTP/1.x or HTTP/2.x protocol. HTTPS: Two nodes communicate using HTTPS. TLS: Two nodes use encrypted data for transition, such as when using OpenSSL. TCP: There is TCP data transmission between two nodes.  TCP Metrics In the TCP metrics, each metric includes both client-side and server-side data. The metrics are as follows:\n   Name Unit Description     Write CPM Count Number of write requests initiated per minute   Write Total Bytes B Total data size written per minute   Write Avg Execute Time ns Average execution time for each write operation   Write RTT ns Round Trip Time (RTT)   Read CPM Count Number of read requests per minute   Read Total Bytes B Total data size read per minute   Read Avg Execute Time ns Average execution time for each read operation   Connect CPM Count Number of new connections established   Connect Execute Time ns Time taken to establish a connection   Close CPM Count Number of closed connections   Close Execute Time ns Time taken to close a connection   Retransmit CPM Count Number of data retransmissions per minute   Drop CPM Count Number of dropped packets per minute    HTTP/1.x Metrics If there is HTTP/1.x protocol communication between two nodes, the eBPF agent can recognize the request data and parse the following metric information:\n   Name Unit Description     Request CPM Count Number of requests received per minute   Response Status CPM Count Number of occurrences of each response status code per minute   Request Package Size B Average request package data size   Response Package Size B Average response package data size   Client Duration ns Time taken for the client to receive a response   Server Duration ns Time taken for the server to send a response    HTTP Request If two nodes communicate using the HTTP/1.x protocol, and they employ a distributed tracing system, then eBPf agent can collect raw data according to the sampling rules configured in the previous sections.\nSampling Raw Data When the sampling conditions are met, the original request or response data would be collected, including the following fields:\n Data Size: The data size of the current request/response content. Data Content: The raw data content. Non-plain format content would not be collected. Data Direction: The data transfer direction, either Ingress or Egress. Data Type: The data type, either Request or Response. Connection Role: The current node\u0026rsquo;s role as a client or server. Entity: The entity information of the current process. Time: The Request or response sent/received time.  Syscall Event When sampling rules are applied, the related Syscall invocations for the request or response would also be collected, including the following information:\n Method Name: System Syscall method names such as read, write, readv, writev, etc. Packet Size: The current TCP packet size. Packet Count: The number of sent or received packets. Network Interface Information: The network interface from which the packet was sent.  ","title":"eBPF Profiling","url":"/docs/main/v9.7.0/en/setup/backend/backend-ebpf-profiling/"},{"content":"ElasticSearch Some new users may encounter the following issues:\n The performance of ElasticSearch is not as good as expected. For instance, the latest data cannot be accessed after some time.  Or\n ERROR CODE 429.   Suppressed: org.elasticsearch.client.ResponseException: method [POST], host [http://127.0.0.1:9200], URI [/service_instance_inventory/type/6_tcc-app-gateway-77b98ff6ff-crblx.cards_0_0/_update?refresh=true\u0026amp;timeout=1m], status line [HTTP/1.1 429 Too Many Requests] {\u0026quot;error\u0026quot;:{\u0026quot;root_cause\u0026quot;:[{\u0026quot;type\u0026quot;:\u0026quot;remote_transport_exception\u0026quot;,\u0026quot;reason\u0026quot;:\u0026quot;[elasticsearch-0][10.16.9.130:9300][indices:data/write/update[s]]\u0026quot;}],\u0026quot;type\u0026quot;:\u0026quot;es_rejected_execution_exception\u0026quot;,\u0026quot;reason\u0026quot;:\u0026quot;rejected execution of org.elasticsearch.transport.TransportService$7@19a5cf02 on EsThreadPoolExecutor[name = elasticsearch-0/write, queue capacity = 200, org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor@389297ad[Running, pool size = 2, active threads = 2, queued tasks = 200, completed tasks = 147611]]\u0026quot;},\u0026quot;status\u0026quot;:429} at org.elasticsearch.client.RestClient$SyncResponseListener.get(RestClient.java:705) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestClient.performRequest(RestClient.java:235) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestClient.performRequest(RestClient.java:198) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestHighLevelClient.performRequest(RestHighLevelClient.java:522) ~[elasticsearch You could add the following config to elasticsearch.yml, and set the value based on your environment variable.\n# In the case of tracing, consider setting a value higher than this.thread_pool.index.queue_size:1000thread_pool.write.queue_size:1000# When you face query error at trace page, remember to check this.index.max_result_window:1000000For more information, see ElasticSearch\u0026rsquo;s official documentation.\n","title":"ElasticSearch","url":"/docs/main/latest/en/faq/es-server-faq/"},{"content":"ElasticSearch Some new users may encounter the following issues:\n The performance of ElasticSearch is not as good as expected. For instance, the latest data cannot be accessed after some time.  Or\n ERROR CODE 429.   Suppressed: org.elasticsearch.client.ResponseException: method [POST], host [http://127.0.0.1:9200], URI [/service_instance_inventory/type/6_tcc-app-gateway-77b98ff6ff-crblx.cards_0_0/_update?refresh=true\u0026amp;timeout=1m], status line [HTTP/1.1 429 Too Many Requests] {\u0026quot;error\u0026quot;:{\u0026quot;root_cause\u0026quot;:[{\u0026quot;type\u0026quot;:\u0026quot;remote_transport_exception\u0026quot;,\u0026quot;reason\u0026quot;:\u0026quot;[elasticsearch-0][10.16.9.130:9300][indices:data/write/update[s]]\u0026quot;}],\u0026quot;type\u0026quot;:\u0026quot;es_rejected_execution_exception\u0026quot;,\u0026quot;reason\u0026quot;:\u0026quot;rejected execution of org.elasticsearch.transport.TransportService$7@19a5cf02 on EsThreadPoolExecutor[name = elasticsearch-0/write, queue capacity = 200, org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor@389297ad[Running, pool size = 2, active threads = 2, queued tasks = 200, completed tasks = 147611]]\u0026quot;},\u0026quot;status\u0026quot;:429} at org.elasticsearch.client.RestClient$SyncResponseListener.get(RestClient.java:705) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestClient.performRequest(RestClient.java:235) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestClient.performRequest(RestClient.java:198) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestHighLevelClient.performRequest(RestHighLevelClient.java:522) ~[elasticsearch You could add the following config to elasticsearch.yml, and set the value based on your environment variable.\n# In the case of tracing, consider setting a value higher than this.thread_pool.index.queue_size:1000thread_pool.write.queue_size:1000# When you face query error at trace page, remember to check this.index.max_result_window:1000000For more information, see ElasticSearch\u0026rsquo;s official documentation.\n","title":"ElasticSearch","url":"/docs/main/next/en/faq/es-server-faq/"},{"content":"ElasticSearch Some new users may encounter the following issues:\n The performance of ElasticSearch is not as good as expected. For instance, the latest data cannot be accessed after some time.  Or\n ERROR CODE 429.   Suppressed: org.elasticsearch.client.ResponseException: method [POST], host [http://127.0.0.1:9200], URI [/service_instance_inventory/type/6_tcc-app-gateway-77b98ff6ff-crblx.cards_0_0/_update?refresh=true\u0026amp;timeout=1m], status line [HTTP/1.1 429 Too Many Requests] {\u0026quot;error\u0026quot;:{\u0026quot;root_cause\u0026quot;:[{\u0026quot;type\u0026quot;:\u0026quot;remote_transport_exception\u0026quot;,\u0026quot;reason\u0026quot;:\u0026quot;[elasticsearch-0][10.16.9.130:9300][indices:data/write/update[s]]\u0026quot;}],\u0026quot;type\u0026quot;:\u0026quot;es_rejected_execution_exception\u0026quot;,\u0026quot;reason\u0026quot;:\u0026quot;rejected execution of org.elasticsearch.transport.TransportService$7@19a5cf02 on EsThreadPoolExecutor[name = elasticsearch-0/write, queue capacity = 200, org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor@389297ad[Running, pool size = 2, active threads = 2, queued tasks = 200, completed tasks = 147611]]\u0026quot;},\u0026quot;status\u0026quot;:429} at org.elasticsearch.client.RestClient$SyncResponseListener.get(RestClient.java:705) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestClient.performRequest(RestClient.java:235) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestClient.performRequest(RestClient.java:198) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestHighLevelClient.performRequest(RestHighLevelClient.java:522) ~[elasticsearch You could add the following config to elasticsearch.yml, and set the value based on your environment variable.\n# In the case of tracing, consider setting a value higher than this.thread_pool.index.queue_size:1000thread_pool.write.queue_size:1000# When you face query error at trace page, remember to check this.index.max_result_window:1000000For more information, see ElasticSearch\u0026rsquo;s official documentation.\n","title":"ElasticSearch","url":"/docs/main/v9.0.0/en/faq/es-server-faq/"},{"content":"ElasticSearch Some new users may encounter the following issues:\n The performance of ElasticSearch is not as good as expected. For instance, the latest data cannot be accessed after some time.  Or\n ERROR CODE 429.   Suppressed: org.elasticsearch.client.ResponseException: method [POST], host [http://127.0.0.1:9200], URI [/service_instance_inventory/type/6_tcc-app-gateway-77b98ff6ff-crblx.cards_0_0/_update?refresh=true\u0026amp;timeout=1m], status line [HTTP/1.1 429 Too Many Requests] {\u0026quot;error\u0026quot;:{\u0026quot;root_cause\u0026quot;:[{\u0026quot;type\u0026quot;:\u0026quot;remote_transport_exception\u0026quot;,\u0026quot;reason\u0026quot;:\u0026quot;[elasticsearch-0][10.16.9.130:9300][indices:data/write/update[s]]\u0026quot;}],\u0026quot;type\u0026quot;:\u0026quot;es_rejected_execution_exception\u0026quot;,\u0026quot;reason\u0026quot;:\u0026quot;rejected execution of org.elasticsearch.transport.TransportService$7@19a5cf02 on EsThreadPoolExecutor[name = elasticsearch-0/write, queue capacity = 200, org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor@389297ad[Running, pool size = 2, active threads = 2, queued tasks = 200, completed tasks = 147611]]\u0026quot;},\u0026quot;status\u0026quot;:429} at org.elasticsearch.client.RestClient$SyncResponseListener.get(RestClient.java:705) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestClient.performRequest(RestClient.java:235) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestClient.performRequest(RestClient.java:198) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestHighLevelClient.performRequest(RestHighLevelClient.java:522) ~[elasticsearch You could add the following config to elasticsearch.yml, and set the value based on your environment variable.\n# In the case of tracing, consider setting a value higher than this.thread_pool.index.queue_size:1000thread_pool.write.queue_size:1000# When you face query error at trace page, remember to check this.index.max_result_window:1000000For more information, see ElasticSearch\u0026rsquo;s official documentation.\n","title":"ElasticSearch","url":"/docs/main/v9.1.0/en/faq/es-server-faq/"},{"content":"ElasticSearch Some new users may encounter the following issues:\n The performance of ElasticSearch is not as good as expected. For instance, the latest data cannot be accessed after some time.  Or\n ERROR CODE 429.   Suppressed: org.elasticsearch.client.ResponseException: method [POST], host [http://127.0.0.1:9200], URI [/service_instance_inventory/type/6_tcc-app-gateway-77b98ff6ff-crblx.cards_0_0/_update?refresh=true\u0026amp;timeout=1m], status line [HTTP/1.1 429 Too Many Requests] {\u0026quot;error\u0026quot;:{\u0026quot;root_cause\u0026quot;:[{\u0026quot;type\u0026quot;:\u0026quot;remote_transport_exception\u0026quot;,\u0026quot;reason\u0026quot;:\u0026quot;[elasticsearch-0][10.16.9.130:9300][indices:data/write/update[s]]\u0026quot;}],\u0026quot;type\u0026quot;:\u0026quot;es_rejected_execution_exception\u0026quot;,\u0026quot;reason\u0026quot;:\u0026quot;rejected execution of org.elasticsearch.transport.TransportService$7@19a5cf02 on EsThreadPoolExecutor[name = elasticsearch-0/write, queue capacity = 200, org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor@389297ad[Running, pool size = 2, active threads = 2, queued tasks = 200, completed tasks = 147611]]\u0026quot;},\u0026quot;status\u0026quot;:429} at org.elasticsearch.client.RestClient$SyncResponseListener.get(RestClient.java:705) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestClient.performRequest(RestClient.java:235) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestClient.performRequest(RestClient.java:198) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestHighLevelClient.performRequest(RestHighLevelClient.java:522) ~[elasticsearch You could add the following config to elasticsearch.yml, and set the value based on your environment variable.\n# In the case of tracing, consider setting a value higher than this.thread_pool.index.queue_size:1000thread_pool.write.queue_size:1000# When you face query error at trace page, remember to check this.index.max_result_window:1000000For more information, see ElasticSearch\u0026rsquo;s official documentation.\n","title":"ElasticSearch","url":"/docs/main/v9.2.0/en/faq/es-server-faq/"},{"content":"ElasticSearch Some new users may encounter the following issues:\n The performance of ElasticSearch is not as good as expected. For instance, the latest data cannot be accessed after some time.  Or\n ERROR CODE 429.   Suppressed: org.elasticsearch.client.ResponseException: method [POST], host [http://127.0.0.1:9200], URI [/service_instance_inventory/type/6_tcc-app-gateway-77b98ff6ff-crblx.cards_0_0/_update?refresh=true\u0026amp;timeout=1m], status line [HTTP/1.1 429 Too Many Requests] {\u0026quot;error\u0026quot;:{\u0026quot;root_cause\u0026quot;:[{\u0026quot;type\u0026quot;:\u0026quot;remote_transport_exception\u0026quot;,\u0026quot;reason\u0026quot;:\u0026quot;[elasticsearch-0][10.16.9.130:9300][indices:data/write/update[s]]\u0026quot;}],\u0026quot;type\u0026quot;:\u0026quot;es_rejected_execution_exception\u0026quot;,\u0026quot;reason\u0026quot;:\u0026quot;rejected execution of org.elasticsearch.transport.TransportService$7@19a5cf02 on EsThreadPoolExecutor[name = elasticsearch-0/write, queue capacity = 200, org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor@389297ad[Running, pool size = 2, active threads = 2, queued tasks = 200, completed tasks = 147611]]\u0026quot;},\u0026quot;status\u0026quot;:429} at org.elasticsearch.client.RestClient$SyncResponseListener.get(RestClient.java:705) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestClient.performRequest(RestClient.java:235) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestClient.performRequest(RestClient.java:198) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestHighLevelClient.performRequest(RestHighLevelClient.java:522) ~[elasticsearch You could add the following config to elasticsearch.yml, and set the value based on your environment variable.\n# In the case of tracing, consider setting a value higher than this.thread_pool.index.queue_size:1000thread_pool.write.queue_size:1000# When you face query error at trace page, remember to check this.index.max_result_window:1000000For more information, see ElasticSearch\u0026rsquo;s official documentation.\n","title":"ElasticSearch","url":"/docs/main/v9.3.0/en/faq/es-server-faq/"},{"content":"ElasticSearch Some new users may encounter the following issues:\n The performance of ElasticSearch is not as good as expected. For instance, the latest data cannot be accessed after some time.  Or\n ERROR CODE 429.   Suppressed: org.elasticsearch.client.ResponseException: method [POST], host [http://127.0.0.1:9200], URI [/service_instance_inventory/type/6_tcc-app-gateway-77b98ff6ff-crblx.cards_0_0/_update?refresh=true\u0026amp;timeout=1m], status line [HTTP/1.1 429 Too Many Requests] {\u0026quot;error\u0026quot;:{\u0026quot;root_cause\u0026quot;:[{\u0026quot;type\u0026quot;:\u0026quot;remote_transport_exception\u0026quot;,\u0026quot;reason\u0026quot;:\u0026quot;[elasticsearch-0][10.16.9.130:9300][indices:data/write/update[s]]\u0026quot;}],\u0026quot;type\u0026quot;:\u0026quot;es_rejected_execution_exception\u0026quot;,\u0026quot;reason\u0026quot;:\u0026quot;rejected execution of org.elasticsearch.transport.TransportService$7@19a5cf02 on EsThreadPoolExecutor[name = elasticsearch-0/write, queue capacity = 200, org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor@389297ad[Running, pool size = 2, active threads = 2, queued tasks = 200, completed tasks = 147611]]\u0026quot;},\u0026quot;status\u0026quot;:429} at org.elasticsearch.client.RestClient$SyncResponseListener.get(RestClient.java:705) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestClient.performRequest(RestClient.java:235) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestClient.performRequest(RestClient.java:198) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestHighLevelClient.performRequest(RestHighLevelClient.java:522) ~[elasticsearch You could add the following config to elasticsearch.yml, and set the value based on your environment variable.\n# In the case of tracing, consider setting a value higher than this.thread_pool.index.queue_size:1000thread_pool.write.queue_size:1000# When you face query error at trace page, remember to check this.index.max_result_window:1000000For more information, see ElasticSearch\u0026rsquo;s official documentation.\n","title":"ElasticSearch","url":"/docs/main/v9.4.0/en/faq/es-server-faq/"},{"content":"ElasticSearch Some new users may encounter the following issues:\n The performance of ElasticSearch is not as good as expected. For instance, the latest data cannot be accessed after some time.  Or\n ERROR CODE 429.   Suppressed: org.elasticsearch.client.ResponseException: method [POST], host [http://127.0.0.1:9200], URI [/service_instance_inventory/type/6_tcc-app-gateway-77b98ff6ff-crblx.cards_0_0/_update?refresh=true\u0026amp;timeout=1m], status line [HTTP/1.1 429 Too Many Requests] {\u0026quot;error\u0026quot;:{\u0026quot;root_cause\u0026quot;:[{\u0026quot;type\u0026quot;:\u0026quot;remote_transport_exception\u0026quot;,\u0026quot;reason\u0026quot;:\u0026quot;[elasticsearch-0][10.16.9.130:9300][indices:data/write/update[s]]\u0026quot;}],\u0026quot;type\u0026quot;:\u0026quot;es_rejected_execution_exception\u0026quot;,\u0026quot;reason\u0026quot;:\u0026quot;rejected execution of org.elasticsearch.transport.TransportService$7@19a5cf02 on EsThreadPoolExecutor[name = elasticsearch-0/write, queue capacity = 200, org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor@389297ad[Running, pool size = 2, active threads = 2, queued tasks = 200, completed tasks = 147611]]\u0026quot;},\u0026quot;status\u0026quot;:429} at org.elasticsearch.client.RestClient$SyncResponseListener.get(RestClient.java:705) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestClient.performRequest(RestClient.java:235) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestClient.performRequest(RestClient.java:198) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestHighLevelClient.performRequest(RestHighLevelClient.java:522) ~[elasticsearch You could add the following config to elasticsearch.yml, and set the value based on your environment variable.\n# In the case of tracing, consider setting a value higher than this.thread_pool.index.queue_size:1000thread_pool.write.queue_size:1000# When you face query error at trace page, remember to check this.index.max_result_window:1000000For more information, see ElasticSearch\u0026rsquo;s official documentation.\n","title":"ElasticSearch","url":"/docs/main/v9.5.0/en/faq/es-server-faq/"},{"content":"ElasticSearch Some new users may encounter the following issues:\n The performance of ElasticSearch is not as good as expected. For instance, the latest data cannot be accessed after some time.  Or\n ERROR CODE 429.   Suppressed: org.elasticsearch.client.ResponseException: method [POST], host [http://127.0.0.1:9200], URI [/service_instance_inventory/type/6_tcc-app-gateway-77b98ff6ff-crblx.cards_0_0/_update?refresh=true\u0026amp;timeout=1m], status line [HTTP/1.1 429 Too Many Requests] {\u0026quot;error\u0026quot;:{\u0026quot;root_cause\u0026quot;:[{\u0026quot;type\u0026quot;:\u0026quot;remote_transport_exception\u0026quot;,\u0026quot;reason\u0026quot;:\u0026quot;[elasticsearch-0][10.16.9.130:9300][indices:data/write/update[s]]\u0026quot;}],\u0026quot;type\u0026quot;:\u0026quot;es_rejected_execution_exception\u0026quot;,\u0026quot;reason\u0026quot;:\u0026quot;rejected execution of org.elasticsearch.transport.TransportService$7@19a5cf02 on EsThreadPoolExecutor[name = elasticsearch-0/write, queue capacity = 200, org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor@389297ad[Running, pool size = 2, active threads = 2, queued tasks = 200, completed tasks = 147611]]\u0026quot;},\u0026quot;status\u0026quot;:429} at org.elasticsearch.client.RestClient$SyncResponseListener.get(RestClient.java:705) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestClient.performRequest(RestClient.java:235) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestClient.performRequest(RestClient.java:198) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestHighLevelClient.performRequest(RestHighLevelClient.java:522) ~[elasticsearch You could add the following config to elasticsearch.yml, and set the value based on your environment variable.\n# In the case of tracing, consider setting a value higher than this.thread_pool.index.queue_size:1000thread_pool.write.queue_size:1000# When you face query error at trace page, remember to check this.index.max_result_window:1000000For more information, see ElasticSearch\u0026rsquo;s official documentation.\n","title":"ElasticSearch","url":"/docs/main/v9.6.0/en/faq/es-server-faq/"},{"content":"ElasticSearch Some new users may encounter the following issues:\n The performance of ElasticSearch is not as good as expected. For instance, the latest data cannot be accessed after some time.  Or\n ERROR CODE 429.   Suppressed: org.elasticsearch.client.ResponseException: method [POST], host [http://127.0.0.1:9200], URI [/service_instance_inventory/type/6_tcc-app-gateway-77b98ff6ff-crblx.cards_0_0/_update?refresh=true\u0026amp;timeout=1m], status line [HTTP/1.1 429 Too Many Requests] {\u0026quot;error\u0026quot;:{\u0026quot;root_cause\u0026quot;:[{\u0026quot;type\u0026quot;:\u0026quot;remote_transport_exception\u0026quot;,\u0026quot;reason\u0026quot;:\u0026quot;[elasticsearch-0][10.16.9.130:9300][indices:data/write/update[s]]\u0026quot;}],\u0026quot;type\u0026quot;:\u0026quot;es_rejected_execution_exception\u0026quot;,\u0026quot;reason\u0026quot;:\u0026quot;rejected execution of org.elasticsearch.transport.TransportService$7@19a5cf02 on EsThreadPoolExecutor[name = elasticsearch-0/write, queue capacity = 200, org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor@389297ad[Running, pool size = 2, active threads = 2, queued tasks = 200, completed tasks = 147611]]\u0026quot;},\u0026quot;status\u0026quot;:429} at org.elasticsearch.client.RestClient$SyncResponseListener.get(RestClient.java:705) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestClient.performRequest(RestClient.java:235) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestClient.performRequest(RestClient.java:198) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestHighLevelClient.performRequest(RestHighLevelClient.java:522) ~[elasticsearch You could add the following config to elasticsearch.yml, and set the value based on your environment variable.\n# In the case of tracing, consider setting a value higher than this.thread_pool.index.queue_size:1000thread_pool.write.queue_size:1000# When you face query error at trace page, remember to check this.index.max_result_window:1000000For more information, see ElasticSearch\u0026rsquo;s official documentation.\n","title":"ElasticSearch","url":"/docs/main/v9.7.0/en/faq/es-server-faq/"},{"content":"Elasticsearch and OpenSearch Elasticsearch and OpenSearch are supported as storage. The storage provider is elasticsearch. This storage option is recommended for a large scale production environment, such as more than 1000 services, 10000 endpoints, and 100000 traces per minute, and plan to 100% sampling rate for the persistent in the storage.\nOpenSearch OpenSearch is a fork from ElasticSearch 7.11 but licensed in Apache 2.0. OpenSearch storage shares the same configurations as ElasticSearch. In order to activate OpenSearch as storage, set the storage provider to elasticsearch.\nWe support and tested the following versions of OpenSearch:\n 1.1.0, 1.3.10 2.4.0, 2.8.0  Elasticsearch NOTE: Elastic announced through their blog that Elasticsearch will be moving over to a Server Side Public License (SSPL) and/or Elastic License 2.0(ELv2), since Feb. 2021, which is incompatible with Apache License 2.0. Both of these licenses are not OSS licenses approved by the Open Source Initiative (OSI). This license change is effective from Elasticsearch version 7.11. So please choose the suitable ElasticSearch version according to your usage. If you have concerns about SSPL/ELv2, choose the versions before 7.11 or switch to OpenSearch.\nBy default, SkyWalking uses following indices for various telemetry data.\n sw_management (All SkyWalking management data, e.g. UI dashboard settings, UI Menu, Continuous profiling policy) sw_metrics-all-${day-format} (All metrics/meters generated through MAL and OAL engines, and metadata of service/instance/endpoint) sw_log-${day-format} (Collected logs, exclude browser logs) sw_segment-${day-format} (Native trace segments) sw_browser_error_log-${day-format} (Collected browser logs) sw_zipkin_span-${day-format} (Zipkin trace spans) sw_records-all-${day-format} (All sampled records, e.g. slow SQLs, agent profiling, and ebpf profiling)  SkyWalking rebuilds the ElasticSearch client on top of ElasticSearch REST API and automatically picks up correct request formats according to the server-side version, hence you don\u0026rsquo;t need to download different binaries and don\u0026rsquo;t need to configure different storage selectors for different ElasticSearch server-side versions anymore.\nFor now, SkyWalking supports ElasticSearch 7.x, ElasticSearch 8.x, and OpenSearch 1.x, their configurations are as follows:\nNotice, ElasticSearch 6 worked and is not promised due to end of life officially.\nstorage:selector:${SW_STORAGE:elasticsearch}elasticsearch:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}clusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:9200}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;http\u0026#34;}trustStorePath:${SW_STORAGE_ES_SSL_JKS_PATH:\u0026#34;\u0026#34;}trustStorePass:${SW_STORAGE_ES_SSL_JKS_PASS:\u0026#34;\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}password:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}secretsManagementFile:${SW_ES_SECRETS_MANAGEMENT_FILE:\u0026#34;\u0026#34;}# Secrets management file in the properties format includes the username, password, which are managed by 3rd party tool.dayStep:${SW_STORAGE_DAY_STEP:1}# Represent the number of days in the one minute/hour/day index.indexShardsNumber:${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:1}# Shard number of new indexesindexReplicasNumber:${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:1}# Replicas number of new indexes# Specify the settings for each index individually.# If configured, this setting has the highest priority and overrides the generic settings.specificIndexSettings:${SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS:\u0026#34;\u0026#34;}# Super data set has been defined in the codes, such as trace segments.The following 3 config would be improve es performance when storage super size data in es.superDatasetDayStep:${SW_STORAGE_ES_SUPER_DATASET_DAY_STEP:-1}# Represent the number of days in the super size dataset record index, the default value is the same as dayStep when the value is less than 0superDatasetIndexShardsFactor:${SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR:5}# This factor provides more shards for the super data set, shards number = indexShardsNumber * superDatasetIndexShardsFactor. Also, this factor effects Zipkin traces.superDatasetIndexReplicasNumber:${SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER:0}# Represent the replicas number in the super size dataset record index, the default value is 0.indexTemplateOrder:${SW_STORAGE_ES_INDEX_TEMPLATE_ORDER:0}# the order of index templatebulkActions:${SW_STORAGE_ES_BULK_ACTIONS:1000}# Execute the async bulk record data every ${SW_STORAGE_ES_BULK_ACTIONS} requestsflushInterval:${SW_STORAGE_ES_FLUSH_INTERVAL:10}# flush the bulk every 10 seconds whatever the number of requestsconcurrentRequests:${SW_STORAGE_ES_CONCURRENT_REQUESTS:2}# the number of concurrent requestsresultWindowMaxSize:${SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE:10000}metadataQueryMaxSize:${SW_STORAGE_ES_QUERY_MAX_SIZE:5000}segmentQueryMaxSize:${SW_STORAGE_ES_QUERY_SEGMENT_SIZE:200}profileTaskQueryMaxSize:${SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE:200}profileDataQueryScrollBatchSize:${SW_STORAGE_ES_QUERY_PROFILE_DATA_SCROLLING_BATCH_SIZE:100}oapAnalyzer:${SW_STORAGE_ES_OAP_ANALYZER:\u0026#34;{\\\u0026#34;analyzer\\\u0026#34;:{\\\u0026#34;oap_analyzer\\\u0026#34;:{\\\u0026#34;type\\\u0026#34;:\\\u0026#34;stop\\\u0026#34;}}}\u0026#34;}# the oap analyzer.oapLogAnalyzer:${SW_STORAGE_ES_OAP_LOG_ANALYZER:\u0026#34;{\\\u0026#34;analyzer\\\u0026#34;:{\\\u0026#34;oap_log_analyzer\\\u0026#34;:{\\\u0026#34;type\\\u0026#34;:\\\u0026#34;standard\\\u0026#34;}}}\u0026#34;}# the oap log analyzer. It could be customized by the ES analyzer configuration to support more language log formats, such as Chinese log, Japanese log and etc.advanced:${SW_STORAGE_ES_ADVANCED:\u0026#34;\u0026#34;}# Set it to `true` could shard metrics indices into multi-physical indices# as same as the versions(one index template per metric/meter aggregation function) before 9.2.0.logicSharding:${SW_STORAGE_ES_LOGIC_SHARDING:false}# Custom routing can reduce the impact of searches. Instead of having to fan out a search request to all the shards in an index, the request can be sent to just the shard that matches the specific routing value (or values).enableCustomRouting:${SW_STORAGE_ES_ENABLE_CUSTOM_ROUTING:false}ElasticSearch With Https SSL Encrypting communications. Example:\nstorage:selector:${SW_STORAGE:elasticsearch}elasticsearch:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}# User needs to be set when Http Basic authentication is enabledpassword:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}# Password to be set when Http Basic authentication is enabledclusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:443}trustStorePath:${SW_SW_STORAGE_ES_SSL_JKS_PATH:\u0026#34;../es_keystore.jks\u0026#34;}trustStorePass:${SW_SW_STORAGE_ES_SSL_JKS_PASS:\u0026#34;\u0026#34;}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;https\u0026#34;}... File at trustStorePath is being monitored. Once it is changed, the ElasticSearch client will reconnect. trustStorePass could be changed in the runtime through Secrets Management File Of ElasticSearch Authentication.  Daily Index Step Daily index step(storage/elasticsearch/dayStep, default 1) represents the index creation period. In this period, metrics for several days (dayStep value) are saved.\nIn most cases, users don\u0026rsquo;t need to change the value manually, as SkyWalking is designed to observe large-scale distributed systems. But in some cases, users may want to set a long TTL value, such as more than 60 days. However, their ElasticSearch cluster may not be powerful enough due to low traffic in the production environment. This value could be increased to 5 (or more) if users could ensure a single index could support the metrics and traces for these days (5 in this case).\nFor example, if dayStep == 11,\n Data in [2000-01-01, 2000-01-11] will be merged into the index-20000101. Data in [2000-01-12, 2000-01-22] will be merged into the index-20000112.  storage/elasticsearch/superDatasetDayStep overrides the storage/elasticsearch/dayStep if the value is positive. This would affect the record-related entities, such as trace segments. In some cases, the size of metrics is much smaller than the record (trace). This would improve the shards balance in the ElasticSearch cluster.\nNOTE: TTL deletion would be affected by these steps. You should set an extra dayStep in your TTL. For example, if you want to have TTL == 30 days and dayStep == 10, you are recommended to set TTL = 40.\nSecrets Management File Of ElasticSearch Authentication The value of secretsManagementFile should point to the secrets management file absolute path. The file includes the username, password, and JKS password of the ElasticSearch server in the properties format.\nuser=xxx password=yyy trustStorePass=zzz The major difference between using user, password, trustStorePass configs in the application.yaml file is that the Secrets Management File is being watched by the OAP server. Once it is changed manually or through a 3rd party tool, such as Vault, the storage provider will use the new username, password, and JKS password to establish the connection and close the old one. If the information exists in the file, the user/password will be overridden.\nIndex Settings The following settings control the number of shards and replicas for new and existing index templates. The update only got applied after OAP reboots.\nstorage:elasticsearch:# ......indexShardsNumber:${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:1}indexReplicasNumber:${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:1}specificIndexSettings:${SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS:\u0026#34;\u0026#34;}superDatasetIndexShardsFactor:${SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR:5}superDatasetIndexReplicasNumber:${SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER:0}The following table shows the relationship between those config items and Elasticsearch index number_of_shards/number_of_replicas. And also you can specify the settings for each index individually.\n   index number_of_shards number_of_replicas     sw_ui_template indexShardsNumber indexReplicasNumber   sw_metrics-all-${day-format} indexShardsNumber indexReplicasNumber   sw_log-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_segment-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_browser_error_log-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_zipkin_span-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_records-all-${day-format} indexShardsNumber indexReplicasNumber    Advanced Configurations For Elasticsearch Index You can add advanced configurations in JSON format to set ElasticSearch index settings by following ElasticSearch doc\nFor example, set translog settings:\nstorage:elasticsearch:# ......advanced:${SW_STORAGE_ES_ADVANCED:\u0026#34;{\\\u0026#34;index.translog.durability\\\u0026#34;:\\\u0026#34;request\\\u0026#34;,\\\u0026#34;index.translog.sync_interval\\\u0026#34;:\\\u0026#34;5s\\\u0026#34;}\u0026#34;}Specify Settings For Each Elasticsearch Index Individually You can specify the settings for one or more indexes individually by using SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS.\nNOTE: Supported settings:\n number_of_shards number_of_replicas  NOTE: These settings have the highest priority and will override the existing generic settings mentioned in index settings doc.\nThe settings are in JSON format. The index name here is logic entity name, which should exclude the ${SW_NAMESPACE} which is sw by default, e.g.\n{ \u0026#34;metrics-all\u0026#34;:{ \u0026#34;number_of_shards\u0026#34;:\u0026#34;3\u0026#34;, \u0026#34;number_of_replicas\u0026#34;:\u0026#34;2\u0026#34; }, \u0026#34;segment\u0026#34;:{ \u0026#34;number_of_shards\u0026#34;:\u0026#34;6\u0026#34;, \u0026#34;number_of_replicas\u0026#34;:\u0026#34;1\u0026#34; } } This configuration in the YAML file is like this,\nstorage:elasticsearch:# ......specificIndexSettings:${SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS:\u0026#34;{\\\u0026#34;metrics-all\\\u0026#34;:{\\\u0026#34;number_of_shards\\\u0026#34;:\\\u0026#34;3\\\u0026#34;,\\\u0026#34;number_of_replicas\\\u0026#34;:\\\u0026#34;2\\\u0026#34;},\\\u0026#34;segment\\\u0026#34;:{\\\u0026#34;number_of_shards\\\u0026#34;:\\\u0026#34;6\\\u0026#34;,\\\u0026#34;number_of_replicas\\\u0026#34;:\\\u0026#34;1\\\u0026#34;}}\u0026#34;}Recommended ElasticSearch server-side configurations You could add the following configuration to elasticsearch.yml, and set the value based on your environment.\n# In tracing scenario, consider to set more than this at least.thread_pool.index.queue_size:1000# Only suitable for ElasticSearch 6thread_pool.write.queue_size:1000# Suitable for ElasticSearch 6 and 7# When you face a query error on the traces page, remember to check this.index.max_result_window:1000000We strongly recommend that you read more about these configurations from ElasticSearch\u0026rsquo;s official documentation since they directly impact the performance of ElasticSearch.\nAbout Namespace When a namespace is set, all index names in ElasticSearch will use it as the prefix.\n","title":"Elasticsearch and OpenSearch","url":"/docs/main/latest/en/setup/backend/storages/elasticsearch/"},{"content":"Elasticsearch and OpenSearch Elasticsearch and OpenSearch are supported as storage. The storage provider is elasticsearch. This storage option is recommended for a large scale production environment, such as more than 1000 services, 10000 endpoints, and 100000 traces per minute, and plan to 100% sampling rate for the persistent in the storage.\nOpenSearch OpenSearch is a fork from ElasticSearch 7.11 but licensed in Apache 2.0. OpenSearch storage shares the same configurations as ElasticSearch. In order to activate OpenSearch as storage, set the storage provider to elasticsearch.\nWe support and tested the following versions of OpenSearch:\n 1.1.0, 1.3.10 2.4.0, 2.8.0  Elasticsearch NOTE: Elastic announced through their blog that Elasticsearch will be moving over to a Server Side Public License (SSPL) and/or Elastic License 2.0(ELv2), since Feb. 2021, which is incompatible with Apache License 2.0. Both of these licenses are not OSS licenses approved by the Open Source Initiative (OSI). This license change is effective from Elasticsearch version 7.11. So please choose the suitable ElasticSearch version according to your usage. If you have concerns about SSPL/ELv2, choose the versions before 7.11 or switch to OpenSearch.\nBy default, SkyWalking uses following indices for various telemetry data.\n sw_management (All SkyWalking management data, e.g. UI dashboard settings, UI Menu, Continuous profiling policy) sw_metrics-all-${day-format} (All metrics/meters generated through MAL and OAL engines, and metadata of service/instance/endpoint) sw_log-${day-format} (Collected logs, exclude browser logs) sw_segment-${day-format} (Native trace segments) sw_browser_error_log-${day-format} (Collected browser logs) sw_zipkin_span-${day-format} (Zipkin trace spans) sw_records-all-${day-format} (All sampled records, e.g. slow SQLs, agent profiling, and ebpf profiling)  SkyWalking rebuilds the ElasticSearch client on top of ElasticSearch REST API and automatically picks up correct request formats according to the server-side version, hence you don\u0026rsquo;t need to download different binaries and don\u0026rsquo;t need to configure different storage selectors for different ElasticSearch server-side versions anymore.\nFor now, SkyWalking supports ElasticSearch 7.x, ElasticSearch 8.x, and OpenSearch 1.x, their configurations are as follows:\nNotice, ElasticSearch 6 worked and is not promised due to end of life officially.\nstorage:selector:${SW_STORAGE:elasticsearch}elasticsearch:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}clusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:9200}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;http\u0026#34;}trustStorePath:${SW_STORAGE_ES_SSL_JKS_PATH:\u0026#34;\u0026#34;}trustStorePass:${SW_STORAGE_ES_SSL_JKS_PASS:\u0026#34;\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}password:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}secretsManagementFile:${SW_ES_SECRETS_MANAGEMENT_FILE:\u0026#34;\u0026#34;}# Secrets management file in the properties format includes the username, password, which are managed by 3rd party tool.dayStep:${SW_STORAGE_DAY_STEP:1}# Represent the number of days in the one minute/hour/day index.indexShardsNumber:${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:1}# Shard number of new indexesindexReplicasNumber:${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:1}# Replicas number of new indexes# Specify the settings for each index individually.# If configured, this setting has the highest priority and overrides the generic settings.specificIndexSettings:${SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS:\u0026#34;\u0026#34;}# Super data set has been defined in the codes, such as trace segments.The following 3 config would be improve es performance when storage super size data in es.superDatasetDayStep:${SW_STORAGE_ES_SUPER_DATASET_DAY_STEP:-1}# Represent the number of days in the super size dataset record index, the default value is the same as dayStep when the value is less than 0superDatasetIndexShardsFactor:${SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR:5}# This factor provides more shards for the super data set, shards number = indexShardsNumber * superDatasetIndexShardsFactor. Also, this factor effects Zipkin traces.superDatasetIndexReplicasNumber:${SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER:0}# Represent the replicas number in the super size dataset record index, the default value is 0.indexTemplateOrder:${SW_STORAGE_ES_INDEX_TEMPLATE_ORDER:0}# the order of index templatebulkActions:${SW_STORAGE_ES_BULK_ACTIONS:1000}# Execute the async bulk record data every ${SW_STORAGE_ES_BULK_ACTIONS} requestsflushInterval:${SW_STORAGE_ES_FLUSH_INTERVAL:10}# flush the bulk every 10 seconds whatever the number of requestsconcurrentRequests:${SW_STORAGE_ES_CONCURRENT_REQUESTS:2}# the number of concurrent requestsresultWindowMaxSize:${SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE:10000}metadataQueryMaxSize:${SW_STORAGE_ES_QUERY_MAX_SIZE:5000}segmentQueryMaxSize:${SW_STORAGE_ES_QUERY_SEGMENT_SIZE:200}profileTaskQueryMaxSize:${SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE:200}profileDataQueryScrollBatchSize:${SW_STORAGE_ES_QUERY_PROFILE_DATA_SCROLLING_BATCH_SIZE:100}oapAnalyzer:${SW_STORAGE_ES_OAP_ANALYZER:\u0026#34;{\\\u0026#34;analyzer\\\u0026#34;:{\\\u0026#34;oap_analyzer\\\u0026#34;:{\\\u0026#34;type\\\u0026#34;:\\\u0026#34;stop\\\u0026#34;}}}\u0026#34;}# the oap analyzer.oapLogAnalyzer:${SW_STORAGE_ES_OAP_LOG_ANALYZER:\u0026#34;{\\\u0026#34;analyzer\\\u0026#34;:{\\\u0026#34;oap_log_analyzer\\\u0026#34;:{\\\u0026#34;type\\\u0026#34;:\\\u0026#34;standard\\\u0026#34;}}}\u0026#34;}# the oap log analyzer. It could be customized by the ES analyzer configuration to support more language log formats, such as Chinese log, Japanese log and etc.advanced:${SW_STORAGE_ES_ADVANCED:\u0026#34;\u0026#34;}# Set it to `true` could shard metrics indices into multi-physical indices# as same as the versions(one index template per metric/meter aggregation function) before 9.2.0.logicSharding:${SW_STORAGE_ES_LOGIC_SHARDING:false}# Custom routing can reduce the impact of searches. Instead of having to fan out a search request to all the shards in an index, the request can be sent to just the shard that matches the specific routing value (or values).enableCustomRouting:${SW_STORAGE_ES_ENABLE_CUSTOM_ROUTING:false}ElasticSearch With Https SSL Encrypting communications. Example:\nstorage:selector:${SW_STORAGE:elasticsearch}elasticsearch:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}# User needs to be set when Http Basic authentication is enabledpassword:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}# Password to be set when Http Basic authentication is enabledclusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:443}trustStorePath:${SW_STORAGE_ES_SSL_JKS_PATH:\u0026#34;../es_keystore.jks\u0026#34;}trustStorePass:${SW_STORAGE_ES_SSL_JKS_PASS:\u0026#34;\u0026#34;}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;https\u0026#34;}... File at trustStorePath is being monitored. Once it is changed, the ElasticSearch client will reconnect. trustStorePass could be changed in the runtime through Secrets Management File Of ElasticSearch Authentication.  Daily Index Step Daily index step(storage/elasticsearch/dayStep, default 1) represents the index creation period. In this period, metrics for several days (dayStep value) are saved.\nIn most cases, users don\u0026rsquo;t need to change the value manually, as SkyWalking is designed to observe large-scale distributed systems. But in some cases, users may want to set a long TTL value, such as more than 60 days. However, their ElasticSearch cluster may not be powerful enough due to low traffic in the production environment. This value could be increased to 5 (or more) if users could ensure a single index could support the metrics and traces for these days (5 in this case).\nFor example, if dayStep == 11,\n Data in [2000-01-01, 2000-01-11] will be merged into the index-20000101. Data in [2000-01-12, 2000-01-22] will be merged into the index-20000112.  storage/elasticsearch/superDatasetDayStep overrides the storage/elasticsearch/dayStep if the value is positive. This would affect the record-related entities, such as trace segments. In some cases, the size of metrics is much smaller than the record (trace). This would improve the shards balance in the ElasticSearch cluster.\nNOTE: TTL deletion would be affected by these steps. You should set an extra dayStep in your TTL. For example, if you want to have TTL == 30 days and dayStep == 10, you are recommended to set TTL = 40.\nSecrets Management File Of ElasticSearch Authentication The value of secretsManagementFile should point to the secrets management file absolute path. The file includes the username, password, and JKS password of the ElasticSearch server in the properties format.\nuser=xxx password=yyy trustStorePass=zzz The major difference between using user, password, trustStorePass configs in the application.yaml file is that the Secrets Management File is being watched by the OAP server. Once it is changed manually or through a 3rd party tool, such as Vault, the storage provider will use the new username, password, and JKS password to establish the connection and close the old one. If the information exists in the file, the user/password will be overridden.\nIndex Settings The following settings control the number of shards and replicas for new and existing index templates. The update only got applied after OAP reboots.\nstorage:elasticsearch:# ......indexShardsNumber:${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:1}indexReplicasNumber:${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:1}specificIndexSettings:${SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS:\u0026#34;\u0026#34;}superDatasetIndexShardsFactor:${SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR:5}superDatasetIndexReplicasNumber:${SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER:0}The following table shows the relationship between those config items and Elasticsearch index number_of_shards/number_of_replicas. And also you can specify the settings for each index individually.\n   index number_of_shards number_of_replicas     sw_ui_template indexShardsNumber indexReplicasNumber   sw_metrics-all-${day-format} indexShardsNumber indexReplicasNumber   sw_log-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_segment-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_browser_error_log-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_zipkin_span-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_records-all-${day-format} indexShardsNumber indexReplicasNumber    Advanced Configurations For Elasticsearch Index You can add advanced configurations in JSON format to set ElasticSearch index settings by following ElasticSearch doc\nFor example, set translog settings:\nstorage:elasticsearch:# ......advanced:${SW_STORAGE_ES_ADVANCED:\u0026#34;{\\\u0026#34;index.translog.durability\\\u0026#34;:\\\u0026#34;request\\\u0026#34;,\\\u0026#34;index.translog.sync_interval\\\u0026#34;:\\\u0026#34;5s\\\u0026#34;}\u0026#34;}Specify Settings For Each Elasticsearch Index Individually You can specify the settings for one or more indexes individually by using SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS.\nNOTE: Supported settings:\n number_of_shards number_of_replicas  NOTE: These settings have the highest priority and will override the existing generic settings mentioned in index settings doc.\nThe settings are in JSON format. The index name here is logic entity name, which should exclude the ${SW_NAMESPACE} which is sw by default, e.g.\n{ \u0026#34;metrics-all\u0026#34;:{ \u0026#34;number_of_shards\u0026#34;:\u0026#34;3\u0026#34;, \u0026#34;number_of_replicas\u0026#34;:\u0026#34;2\u0026#34; }, \u0026#34;segment\u0026#34;:{ \u0026#34;number_of_shards\u0026#34;:\u0026#34;6\u0026#34;, \u0026#34;number_of_replicas\u0026#34;:\u0026#34;1\u0026#34; } } This configuration in the YAML file is like this,\nstorage:elasticsearch:# ......specificIndexSettings:${SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS:\u0026#34;{\\\u0026#34;metrics-all\\\u0026#34;:{\\\u0026#34;number_of_shards\\\u0026#34;:\\\u0026#34;3\\\u0026#34;,\\\u0026#34;number_of_replicas\\\u0026#34;:\\\u0026#34;2\\\u0026#34;},\\\u0026#34;segment\\\u0026#34;:{\\\u0026#34;number_of_shards\\\u0026#34;:\\\u0026#34;6\\\u0026#34;,\\\u0026#34;number_of_replicas\\\u0026#34;:\\\u0026#34;1\\\u0026#34;}}\u0026#34;}Recommended ElasticSearch server-side configurations You could add the following configuration to elasticsearch.yml, and set the value based on your environment.\n# In tracing scenario, consider to set more than this at least.thread_pool.index.queue_size:1000# Only suitable for ElasticSearch 6thread_pool.write.queue_size:1000# Suitable for ElasticSearch 6 and 7# When you face a query error on the traces page, remember to check this.index.max_result_window:1000000We strongly recommend that you read more about these configurations from ElasticSearch\u0026rsquo;s official documentation since they directly impact the performance of ElasticSearch.\nAbout Namespace When a namespace is set, all index names in ElasticSearch will use it as the prefix.\n","title":"Elasticsearch and OpenSearch","url":"/docs/main/next/en/setup/backend/storages/elasticsearch/"},{"content":"Elasticsearch and OpenSearch Elasticsearch and OpenSearch are supported as storage. The storage provider is elasticsearch. This storage option is recommended for a large scale production environment, such as more than 1000 services, 10000 endpoints, and 100000 traces per minute, and plan to 100% sampling rate for the persistent in the storage.\nOpenSearch OpenSearch is a fork from ElasticSearch 7.11 but licensed in Apache 2.0. OpenSearch storage shares the same configurations as ElasticSearch. In order to activate OpenSearch as storage, set the storage provider to elasticsearch.\nWe support and tested the following versions of OpenSearch:\n 1.1.0, 1.3.10 2.4.0, 2.8.0  Elasticsearch NOTE: Elastic announced through their blog that Elasticsearch will be moving over to a Server Side Public License (SSPL) and/or Elastic License 2.0(ELv2), since Feb. 2021, which is incompatible with Apache License 2.0. Both of these licenses are not OSS licenses approved by the Open Source Initiative (OSI). This license change is effective from Elasticsearch version 7.11. So please choose the suitable ElasticSearch version according to your usage. If you have concerns about SSPL/ELv2, choose the versions before 7.11 or switch to OpenSearch.\nBy default, SkyWalking uses following indices for various telemetry data.\n sw_management (All SkyWalking management data, e.g. UI dashboard settings, UI Menu, Continuous profiling policy) sw_metrics-all-${day-format} (All metrics/meters generated through MAL and OAL engines, and metadata of service/instance/endpoint) sw_log-${day-format} (Collected logs, exclude browser logs) sw_segment-${day-format} (Native trace segments) sw_browser_error_log-${day-format} (Collected browser logs) sw_zipkin_span-${day-format} (Zipkin trace spans) sw_records-all-${day-format} (All sampled records, e.g. slow SQLs, agent profiling, and ebpf profiling)  SkyWalking rebuilds the ElasticSearch client on top of ElasticSearch REST API and automatically picks up correct request formats according to the server-side version, hence you don\u0026rsquo;t need to download different binaries and don\u0026rsquo;t need to configure different storage selectors for different ElasticSearch server-side versions anymore.\nFor now, SkyWalking supports ElasticSearch 7.x, ElasticSearch 8.x, and OpenSearch 1.x, their configurations are as follows:\nNotice, ElasticSearch 6 worked and is not promised due to end of life officially.\nstorage:selector:${SW_STORAGE:elasticsearch}elasticsearch:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}clusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:9200}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;http\u0026#34;}trustStorePath:${SW_STORAGE_ES_SSL_JKS_PATH:\u0026#34;\u0026#34;}trustStorePass:${SW_STORAGE_ES_SSL_JKS_PASS:\u0026#34;\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}password:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}secretsManagementFile:${SW_ES_SECRETS_MANAGEMENT_FILE:\u0026#34;\u0026#34;}# Secrets management file in the properties format includes the username, password, which are managed by 3rd party tool.dayStep:${SW_STORAGE_DAY_STEP:1}# Represent the number of days in the one minute/hour/day index.indexShardsNumber:${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:1}# Shard number of new indexesindexReplicasNumber:${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:1}# Replicas number of new indexes# Specify the settings for each index individually.# If configured, this setting has the highest priority and overrides the generic settings.specificIndexSettings:${SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS:\u0026#34;\u0026#34;}# Super data set has been defined in the codes, such as trace segments.The following 3 config would be improve es performance when storage super size data in es.superDatasetDayStep:${SW_STORAGE_ES_SUPER_DATASET_DAY_STEP:-1}# Represent the number of days in the super size dataset record index, the default value is the same as dayStep when the value is less than 0superDatasetIndexShardsFactor:${SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR:5}# This factor provides more shards for the super data set, shards number = indexShardsNumber * superDatasetIndexShardsFactor. Also, this factor effects Zipkin traces.superDatasetIndexReplicasNumber:${SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER:0}# Represent the replicas number in the super size dataset record index, the default value is 0.indexTemplateOrder:${SW_STORAGE_ES_INDEX_TEMPLATE_ORDER:0}# the order of index templatebulkActions:${SW_STORAGE_ES_BULK_ACTIONS:1000}# Execute the async bulk record data every ${SW_STORAGE_ES_BULK_ACTIONS} requestsflushInterval:${SW_STORAGE_ES_FLUSH_INTERVAL:10}# flush the bulk every 10 seconds whatever the number of requestsconcurrentRequests:${SW_STORAGE_ES_CONCURRENT_REQUESTS:2}# the number of concurrent requestsresultWindowMaxSize:${SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE:10000}metadataQueryMaxSize:${SW_STORAGE_ES_QUERY_MAX_SIZE:5000}segmentQueryMaxSize:${SW_STORAGE_ES_QUERY_SEGMENT_SIZE:200}profileTaskQueryMaxSize:${SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE:200}profileDataQueryScrollBatchSize:${SW_STORAGE_ES_QUERY_PROFILE_DATA_SCROLLING_BATCH_SIZE:100}oapAnalyzer:${SW_STORAGE_ES_OAP_ANALYZER:\u0026#34;{\\\u0026#34;analyzer\\\u0026#34;:{\\\u0026#34;oap_analyzer\\\u0026#34;:{\\\u0026#34;type\\\u0026#34;:\\\u0026#34;stop\\\u0026#34;}}}\u0026#34;}# the oap analyzer.oapLogAnalyzer:${SW_STORAGE_ES_OAP_LOG_ANALYZER:\u0026#34;{\\\u0026#34;analyzer\\\u0026#34;:{\\\u0026#34;oap_log_analyzer\\\u0026#34;:{\\\u0026#34;type\\\u0026#34;:\\\u0026#34;standard\\\u0026#34;}}}\u0026#34;}# the oap log analyzer. It could be customized by the ES analyzer configuration to support more language log formats, such as Chinese log, Japanese log and etc.advanced:${SW_STORAGE_ES_ADVANCED:\u0026#34;\u0026#34;}# Set it to `true` could shard metrics indices into multi-physical indices# as same as the versions(one index template per metric/meter aggregation function) before 9.2.0.logicSharding:${SW_STORAGE_ES_LOGIC_SHARDING:false}# Custom routing can reduce the impact of searches. Instead of having to fan out a search request to all the shards in an index, the request can be sent to just the shard that matches the specific routing value (or values).enableCustomRouting:${SW_STORAGE_ES_ENABLE_CUSTOM_ROUTING:false}ElasticSearch With Https SSL Encrypting communications. Example:\nstorage:selector:${SW_STORAGE:elasticsearch}elasticsearch:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}# User needs to be set when Http Basic authentication is enabledpassword:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}# Password to be set when Http Basic authentication is enabledclusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:443}trustStorePath:${SW_SW_STORAGE_ES_SSL_JKS_PATH:\u0026#34;../es_keystore.jks\u0026#34;}trustStorePass:${SW_SW_STORAGE_ES_SSL_JKS_PASS:\u0026#34;\u0026#34;}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;https\u0026#34;}... File at trustStorePath is being monitored. Once it is changed, the ElasticSearch client will reconnect. trustStorePass could be changed in the runtime through Secrets Management File Of ElasticSearch Authentication.  Daily Index Step Daily index step(storage/elasticsearch/dayStep, default 1) represents the index creation period. In this period, metrics for several days (dayStep value) are saved.\nIn most cases, users don\u0026rsquo;t need to change the value manually, as SkyWalking is designed to observe large-scale distributed systems. But in some cases, users may want to set a long TTL value, such as more than 60 days. However, their ElasticSearch cluster may not be powerful enough due to low traffic in the production environment. This value could be increased to 5 (or more) if users could ensure a single index could support the metrics and traces for these days (5 in this case).\nFor example, if dayStep == 11,\n Data in [2000-01-01, 2000-01-11] will be merged into the index-20000101. Data in [2000-01-12, 2000-01-22] will be merged into the index-20000112.  storage/elasticsearch/superDatasetDayStep overrides the storage/elasticsearch/dayStep if the value is positive. This would affect the record-related entities, such as trace segments. In some cases, the size of metrics is much smaller than the record (trace). This would improve the shards balance in the ElasticSearch cluster.\nNOTE: TTL deletion would be affected by these steps. You should set an extra dayStep in your TTL. For example, if you want to have TTL == 30 days and dayStep == 10, you are recommended to set TTL = 40.\nSecrets Management File Of ElasticSearch Authentication The value of secretsManagementFile should point to the secrets management file absolute path. The file includes the username, password, and JKS password of the ElasticSearch server in the properties format.\nuser=xxx password=yyy trustStorePass=zzz The major difference between using user, password, trustStorePass configs in the application.yaml file is that the Secrets Management File is being watched by the OAP server. Once it is changed manually or through a 3rd party tool, such as Vault, the storage provider will use the new username, password, and JKS password to establish the connection and close the old one. If the information exists in the file, the user/password will be overridden.\nIndex Settings The following settings control the number of shards and replicas for new and existing index templates. The update only got applied after OAP reboots.\nstorage:elasticsearch:# ......indexShardsNumber:${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:1}indexReplicasNumber:${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:1}specificIndexSettings:${SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS:\u0026#34;\u0026#34;}superDatasetIndexShardsFactor:${SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR:5}superDatasetIndexReplicasNumber:${SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER:0}The following table shows the relationship between those config items and Elasticsearch index number_of_shards/number_of_replicas. And also you can specify the settings for each index individually.\n   index number_of_shards number_of_replicas     sw_ui_template indexShardsNumber indexReplicasNumber   sw_metrics-all-${day-format} indexShardsNumber indexReplicasNumber   sw_log-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_segment-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_browser_error_log-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_zipkin_span-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_records-all-${day-format} indexShardsNumber indexReplicasNumber    Advanced Configurations For Elasticsearch Index You can add advanced configurations in JSON format to set ElasticSearch index settings by following ElasticSearch doc\nFor example, set translog settings:\nstorage:elasticsearch:# ......advanced:${SW_STORAGE_ES_ADVANCED:\u0026#34;{\\\u0026#34;index.translog.durability\\\u0026#34;:\\\u0026#34;request\\\u0026#34;,\\\u0026#34;index.translog.sync_interval\\\u0026#34;:\\\u0026#34;5s\\\u0026#34;}\u0026#34;}Specify Settings For Each Elasticsearch Index Individually You can specify the settings for one or more indexes individually by using SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS.\nNOTE: Supported settings:\n number_of_shards number_of_replicas  NOTE: These settings have the highest priority and will override the existing generic settings mentioned in index settings doc.\nThe settings are in JSON format. The index name here is logic entity name, which should exclude the ${SW_NAMESPACE} which is sw by default, e.g.\n{ \u0026#34;metrics-all\u0026#34;:{ \u0026#34;number_of_shards\u0026#34;:\u0026#34;3\u0026#34;, \u0026#34;number_of_replicas\u0026#34;:\u0026#34;2\u0026#34; }, \u0026#34;segment\u0026#34;:{ \u0026#34;number_of_shards\u0026#34;:\u0026#34;6\u0026#34;, \u0026#34;number_of_replicas\u0026#34;:\u0026#34;1\u0026#34; } } This configuration in the YAML file is like this,\nstorage:elasticsearch:# ......specificIndexSettings:${SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS:\u0026#34;{\\\u0026#34;metrics-all\\\u0026#34;:{\\\u0026#34;number_of_shards\\\u0026#34;:\\\u0026#34;3\\\u0026#34;,\\\u0026#34;number_of_replicas\\\u0026#34;:\\\u0026#34;2\\\u0026#34;},\\\u0026#34;segment\\\u0026#34;:{\\\u0026#34;number_of_shards\\\u0026#34;:\\\u0026#34;6\\\u0026#34;,\\\u0026#34;number_of_replicas\\\u0026#34;:\\\u0026#34;1\\\u0026#34;}}\u0026#34;}Recommended ElasticSearch server-side configurations You could add the following configuration to elasticsearch.yml, and set the value based on your environment.\n# In tracing scenario, consider to set more than this at least.thread_pool.index.queue_size:1000# Only suitable for ElasticSearch 6thread_pool.write.queue_size:1000# Suitable for ElasticSearch 6 and 7# When you face a query error on the traces page, remember to check this.index.max_result_window:1000000We strongly recommend that you read more about these configurations from ElasticSearch\u0026rsquo;s official documentation since they directly impact the performance of ElasticSearch.\nAbout Namespace When a namespace is set, all index names in ElasticSearch will use it as the prefix.\n","title":"Elasticsearch and OpenSearch","url":"/docs/main/v9.7.0/en/setup/backend/storages/elasticsearch/"},{"content":"Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Since 8.7.0, we did the following optimization to reduce Elasticsearch load.\nPerformance: remove the synchronous persistence mechanism from batch ElasticSearch DAO. Because the current enhanced persistent session mechanism, don\u0026#39;t require the data queryable immediately after the insert and update anymore. Due to this, we flush the metrics into Elasticsearch without using WriteRequest.RefreshPolicy.WAIT_UNTIL. This reduces the load of persistent works in OAP server and load of Elasticsearch CPU dramatically.\nMeanwhile, there is little chance you could see following warns in your logs.\n{ \u0026quot;timeMillis\u0026quot;: 1626247722647, \u0026quot;thread\u0026quot;: \u0026quot;I/O dispatcher 4\u0026quot;, \u0026quot;level\u0026quot;: \u0026quot;WARN\u0026quot;, \u0026quot;loggerName\u0026quot;: \u0026quot;org.apache.skywalking.oap.server.library.client.elasticsearch.ElasticSearchClient\u0026quot;, \u0026quot;message\u0026quot;: \u0026quot;Bulk [70] executed with failures:[failure in bulk execution:\\n[18875]: index [sw8_service_relation_client_side-20210714], type [_doc], id [20210714_b3BlcmF0aW9uLXJ1bGUtc2VydmVyQDExNDgx.1-bWFya2V0LXJlZmVycmFsLXNlcnZlckAxMDI1MQ==.1], message [[sw8_service_relation_client_side-20210714/D7qzncbeRq6qh2QF5MogTw][[sw8_service_relation_client_side-20210714][0]] ElasticsearchException[Elasticsearch exception [type=version_conflict_engine_exception, reason=[20210714_b3BlcmF0aW9uLXJ1bGUtc2VydmVyQDExNDgx.1-bWFya2V0LXJlZmVycmFsLXNlcnZlckAxMDI1MQ==.1]: version conflict, required seqNo [14012594], primary term [1]. current document has seqNo [14207928] and primary term [1]]]]]\u0026quot;, \u0026quot;endOfBatch\u0026quot;: false, \u0026quot;loggerFqcn\u0026quot;: \u0026quot;org.apache.logging.slf4j.Log4jLogger\u0026quot;, \u0026quot;threadId\u0026quot;: 44, \u0026quot;threadPriority\u0026quot;: 5, \u0026quot;timestamp\u0026quot;: \u0026quot;2021-07-14 15:28:42.647\u0026quot; } This would not affect the system much, just a possibility of inaccurate of metrics. If this wouldn\u0026rsquo;t show up in high frequency, you could ignore this directly.\nIn case you could see many logs like this. Then it is a signal, that the flush period of your ElasticSearch template can\u0026rsquo;t catch up your setting. Or you set the persistentPeriod less than the flush period.\n","title":"Elasticsearch exception `type=version_conflict_engine_exception` since 8.7.0","url":"/docs/main/latest/en/faq/es-version-conflict/"},{"content":"Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Since 8.7.0, we did the following optimization to reduce Elasticsearch load.\nPerformance: remove the synchronous persistence mechanism from batch ElasticSearch DAO. Because the current enhanced persistent session mechanism, don\u0026#39;t require the data queryable immediately after the insert and update anymore. Due to this, we flush the metrics into Elasticsearch without using WriteRequest.RefreshPolicy.WAIT_UNTIL. This reduces the load of persistent works in OAP server and load of Elasticsearch CPU dramatically.\nMeanwhile, there is little chance you could see following warns in your logs.\n{ \u0026quot;timeMillis\u0026quot;: 1626247722647, \u0026quot;thread\u0026quot;: \u0026quot;I/O dispatcher 4\u0026quot;, \u0026quot;level\u0026quot;: \u0026quot;WARN\u0026quot;, \u0026quot;loggerName\u0026quot;: \u0026quot;org.apache.skywalking.oap.server.library.client.elasticsearch.ElasticSearchClient\u0026quot;, \u0026quot;message\u0026quot;: \u0026quot;Bulk [70] executed with failures:[failure in bulk execution:\\n[18875]: index [sw8_service_relation_client_side-20210714], type [_doc], id [20210714_b3BlcmF0aW9uLXJ1bGUtc2VydmVyQDExNDgx.1-bWFya2V0LXJlZmVycmFsLXNlcnZlckAxMDI1MQ==.1], message [[sw8_service_relation_client_side-20210714/D7qzncbeRq6qh2QF5MogTw][[sw8_service_relation_client_side-20210714][0]] ElasticsearchException[Elasticsearch exception [type=version_conflict_engine_exception, reason=[20210714_b3BlcmF0aW9uLXJ1bGUtc2VydmVyQDExNDgx.1-bWFya2V0LXJlZmVycmFsLXNlcnZlckAxMDI1MQ==.1]: version conflict, required seqNo [14012594], primary term [1]. current document has seqNo [14207928] and primary term [1]]]]]\u0026quot;, \u0026quot;endOfBatch\u0026quot;: false, \u0026quot;loggerFqcn\u0026quot;: \u0026quot;org.apache.logging.slf4j.Log4jLogger\u0026quot;, \u0026quot;threadId\u0026quot;: 44, \u0026quot;threadPriority\u0026quot;: 5, \u0026quot;timestamp\u0026quot;: \u0026quot;2021-07-14 15:28:42.647\u0026quot; } This would not affect the system much, just a possibility of inaccurate of metrics. If this wouldn\u0026rsquo;t show up in high frequency, you could ignore this directly.\nIn case you could see many logs like this. Then it is a signal, that the flush period of your ElasticSearch template can\u0026rsquo;t catch up your setting. Or you set the persistentPeriod less than the flush period.\n","title":"Elasticsearch exception `type=version_conflict_engine_exception` since 8.7.0","url":"/docs/main/next/en/faq/es-version-conflict/"},{"content":"Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Since 8.7.0, we did the following optimization to reduce Elasticsearch load.\nPerformance: remove the synchronous persistence mechanism from batch ElasticSearch DAO. Because the current enhanced persistent session mechanism, don\u0026#39;t require the data queryable immediately after the insert and update anymore. Due to this, we flush the metrics into Elasticsearch without using WriteRequest.RefreshPolicy.WAIT_UNTIL. This reduces the load of persistent works in OAP server and load of Elasticsearch CPU dramatically.\nMeanwhile, there is little chance you could see following warns in your logs.\n{ \u0026quot;timeMillis\u0026quot;: 1626247722647, \u0026quot;thread\u0026quot;: \u0026quot;I/O dispatcher 4\u0026quot;, \u0026quot;level\u0026quot;: \u0026quot;WARN\u0026quot;, \u0026quot;loggerName\u0026quot;: \u0026quot;org.apache.skywalking.oap.server.library.client.elasticsearch.ElasticSearchClient\u0026quot;, \u0026quot;message\u0026quot;: \u0026quot;Bulk [70] executed with failures:[failure in bulk execution:\\n[18875]: index [sw8_service_relation_client_side-20210714], type [_doc], id [20210714_b3BlcmF0aW9uLXJ1bGUtc2VydmVyQDExNDgx.1-bWFya2V0LXJlZmVycmFsLXNlcnZlckAxMDI1MQ==.1], message [[sw8_service_relation_client_side-20210714/D7qzncbeRq6qh2QF5MogTw][[sw8_service_relation_client_side-20210714][0]] ElasticsearchException[Elasticsearch exception [type=version_conflict_engine_exception, reason=[20210714_b3BlcmF0aW9uLXJ1bGUtc2VydmVyQDExNDgx.1-bWFya2V0LXJlZmVycmFsLXNlcnZlckAxMDI1MQ==.1]: version conflict, required seqNo [14012594], primary term [1]. current document has seqNo [14207928] and primary term [1]]]]]\u0026quot;, \u0026quot;endOfBatch\u0026quot;: false, \u0026quot;loggerFqcn\u0026quot;: \u0026quot;org.apache.logging.slf4j.Log4jLogger\u0026quot;, \u0026quot;threadId\u0026quot;: 44, \u0026quot;threadPriority\u0026quot;: 5, \u0026quot;timestamp\u0026quot;: \u0026quot;2021-07-14 15:28:42.647\u0026quot; } This would not affect the system much, just a possibility of inaccurate of metrics. If this wouldn\u0026rsquo;t show up in high frequency, you could ignore this directly.\nIn case you could see many logs like this. Then it is a signal, that the flush period of your ElasticSearch template can\u0026rsquo;t catch up your setting. Or you set the persistentPeriod less than the flush period.\n","title":"Elasticsearch exception `type=version_conflict_engine_exception` since 8.7.0","url":"/docs/main/v9.0.0/en/faq/es-version-conflict/"},{"content":"Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Since 8.7.0, we did the following optimization to reduce Elasticsearch load.\nPerformance: remove the synchronous persistence mechanism from batch ElasticSearch DAO. Because the current enhanced persistent session mechanism, don\u0026#39;t require the data queryable immediately after the insert and update anymore. Due to this, we flush the metrics into Elasticsearch without using WriteRequest.RefreshPolicy.WAIT_UNTIL. This reduces the load of persistent works in OAP server and load of Elasticsearch CPU dramatically.\nMeanwhile, there is little chance you could see following warns in your logs.\n{ \u0026quot;timeMillis\u0026quot;: 1626247722647, \u0026quot;thread\u0026quot;: \u0026quot;I/O dispatcher 4\u0026quot;, \u0026quot;level\u0026quot;: \u0026quot;WARN\u0026quot;, \u0026quot;loggerName\u0026quot;: \u0026quot;org.apache.skywalking.oap.server.library.client.elasticsearch.ElasticSearchClient\u0026quot;, \u0026quot;message\u0026quot;: \u0026quot;Bulk [70] executed with failures:[failure in bulk execution:\\n[18875]: index [sw8_service_relation_client_side-20210714], type [_doc], id [20210714_b3BlcmF0aW9uLXJ1bGUtc2VydmVyQDExNDgx.1-bWFya2V0LXJlZmVycmFsLXNlcnZlckAxMDI1MQ==.1], message [[sw8_service_relation_client_side-20210714/D7qzncbeRq6qh2QF5MogTw][[sw8_service_relation_client_side-20210714][0]] ElasticsearchException[Elasticsearch exception [type=version_conflict_engine_exception, reason=[20210714_b3BlcmF0aW9uLXJ1bGUtc2VydmVyQDExNDgx.1-bWFya2V0LXJlZmVycmFsLXNlcnZlckAxMDI1MQ==.1]: version conflict, required seqNo [14012594], primary term [1]. current document has seqNo [14207928] and primary term [1]]]]]\u0026quot;, \u0026quot;endOfBatch\u0026quot;: false, \u0026quot;loggerFqcn\u0026quot;: \u0026quot;org.apache.logging.slf4j.Log4jLogger\u0026quot;, \u0026quot;threadId\u0026quot;: 44, \u0026quot;threadPriority\u0026quot;: 5, \u0026quot;timestamp\u0026quot;: \u0026quot;2021-07-14 15:28:42.647\u0026quot; } This would not affect the system much, just a possibility of inaccurate of metrics. If this wouldn\u0026rsquo;t show up in high frequency, you could ignore this directly.\nIn case you could see many logs like this. Then it is a signal, that the flush period of your ElasticSearch template can\u0026rsquo;t catch up your setting. Or you set the persistentPeriod less than the flush period.\n","title":"Elasticsearch exception `type=version_conflict_engine_exception` since 8.7.0","url":"/docs/main/v9.1.0/en/faq/es-version-conflict/"},{"content":"Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Since 8.7.0, we did the following optimization to reduce Elasticsearch load.\nPerformance: remove the synchronous persistence mechanism from batch ElasticSearch DAO. Because the current enhanced persistent session mechanism, don\u0026#39;t require the data queryable immediately after the insert and update anymore. Due to this, we flush the metrics into Elasticsearch without using WriteRequest.RefreshPolicy.WAIT_UNTIL. This reduces the load of persistent works in OAP server and load of Elasticsearch CPU dramatically.\nMeanwhile, there is little chance you could see following warns in your logs.\n{ \u0026quot;timeMillis\u0026quot;: 1626247722647, \u0026quot;thread\u0026quot;: \u0026quot;I/O dispatcher 4\u0026quot;, \u0026quot;level\u0026quot;: \u0026quot;WARN\u0026quot;, \u0026quot;loggerName\u0026quot;: \u0026quot;org.apache.skywalking.oap.server.library.client.elasticsearch.ElasticSearchClient\u0026quot;, \u0026quot;message\u0026quot;: \u0026quot;Bulk [70] executed with failures:[failure in bulk execution:\\n[18875]: index [sw8_service_relation_client_side-20210714], type [_doc], id [20210714_b3BlcmF0aW9uLXJ1bGUtc2VydmVyQDExNDgx.1-bWFya2V0LXJlZmVycmFsLXNlcnZlckAxMDI1MQ==.1], message [[sw8_service_relation_client_side-20210714/D7qzncbeRq6qh2QF5MogTw][[sw8_service_relation_client_side-20210714][0]] ElasticsearchException[Elasticsearch exception [type=version_conflict_engine_exception, reason=[20210714_b3BlcmF0aW9uLXJ1bGUtc2VydmVyQDExNDgx.1-bWFya2V0LXJlZmVycmFsLXNlcnZlckAxMDI1MQ==.1]: version conflict, required seqNo [14012594], primary term [1]. current document has seqNo [14207928] and primary term [1]]]]]\u0026quot;, \u0026quot;endOfBatch\u0026quot;: false, \u0026quot;loggerFqcn\u0026quot;: \u0026quot;org.apache.logging.slf4j.Log4jLogger\u0026quot;, \u0026quot;threadId\u0026quot;: 44, \u0026quot;threadPriority\u0026quot;: 5, \u0026quot;timestamp\u0026quot;: \u0026quot;2021-07-14 15:28:42.647\u0026quot; } This would not affect the system much, just a possibility of inaccurate of metrics. If this wouldn\u0026rsquo;t show up in high frequency, you could ignore this directly.\nIn case you could see many logs like this. Then it is a signal, that the flush period of your ElasticSearch template can\u0026rsquo;t catch up your setting. Or you set the persistentPeriod less than the flush period.\n","title":"Elasticsearch exception `type=version_conflict_engine_exception` since 8.7.0","url":"/docs/main/v9.2.0/en/faq/es-version-conflict/"},{"content":"Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Since 8.7.0, we did the following optimization to reduce Elasticsearch load.\nPerformance: remove the synchronous persistence mechanism from batch ElasticSearch DAO. Because the current enhanced persistent session mechanism, don\u0026#39;t require the data queryable immediately after the insert and update anymore. Due to this, we flush the metrics into Elasticsearch without using WriteRequest.RefreshPolicy.WAIT_UNTIL. This reduces the load of persistent works in OAP server and load of Elasticsearch CPU dramatically.\nMeanwhile, there is little chance you could see following warns in your logs.\n{ \u0026quot;timeMillis\u0026quot;: 1626247722647, \u0026quot;thread\u0026quot;: \u0026quot;I/O dispatcher 4\u0026quot;, \u0026quot;level\u0026quot;: \u0026quot;WARN\u0026quot;, \u0026quot;loggerName\u0026quot;: \u0026quot;org.apache.skywalking.oap.server.library.client.elasticsearch.ElasticSearchClient\u0026quot;, \u0026quot;message\u0026quot;: \u0026quot;Bulk [70] executed with failures:[failure in bulk execution:\\n[18875]: index [sw8_service_relation_client_side-20210714], type [_doc], id [20210714_b3BlcmF0aW9uLXJ1bGUtc2VydmVyQDExNDgx.1-bWFya2V0LXJlZmVycmFsLXNlcnZlckAxMDI1MQ==.1], message [[sw8_service_relation_client_side-20210714/D7qzncbeRq6qh2QF5MogTw][[sw8_service_relation_client_side-20210714][0]] ElasticsearchException[Elasticsearch exception [type=version_conflict_engine_exception, reason=[20210714_b3BlcmF0aW9uLXJ1bGUtc2VydmVyQDExNDgx.1-bWFya2V0LXJlZmVycmFsLXNlcnZlckAxMDI1MQ==.1]: version conflict, required seqNo [14012594], primary term [1]. current document has seqNo [14207928] and primary term [1]]]]]\u0026quot;, \u0026quot;endOfBatch\u0026quot;: false, \u0026quot;loggerFqcn\u0026quot;: \u0026quot;org.apache.logging.slf4j.Log4jLogger\u0026quot;, \u0026quot;threadId\u0026quot;: 44, \u0026quot;threadPriority\u0026quot;: 5, \u0026quot;timestamp\u0026quot;: \u0026quot;2021-07-14 15:28:42.647\u0026quot; } This would not affect the system much, just a possibility of inaccurate of metrics. If this wouldn\u0026rsquo;t show up in high frequency, you could ignore this directly.\nIn case you could see many logs like this. Then it is a signal, that the flush period of your ElasticSearch template can\u0026rsquo;t catch up your setting. Or you set the persistentPeriod less than the flush period.\n","title":"Elasticsearch exception `type=version_conflict_engine_exception` since 8.7.0","url":"/docs/main/v9.3.0/en/faq/es-version-conflict/"},{"content":"Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Since 8.7.0, we did the following optimization to reduce Elasticsearch load.\nPerformance: remove the synchronous persistence mechanism from batch ElasticSearch DAO. Because the current enhanced persistent session mechanism, don\u0026#39;t require the data queryable immediately after the insert and update anymore. Due to this, we flush the metrics into Elasticsearch without using WriteRequest.RefreshPolicy.WAIT_UNTIL. This reduces the load of persistent works in OAP server and load of Elasticsearch CPU dramatically.\nMeanwhile, there is little chance you could see following warns in your logs.\n{ \u0026quot;timeMillis\u0026quot;: 1626247722647, \u0026quot;thread\u0026quot;: \u0026quot;I/O dispatcher 4\u0026quot;, \u0026quot;level\u0026quot;: \u0026quot;WARN\u0026quot;, \u0026quot;loggerName\u0026quot;: \u0026quot;org.apache.skywalking.oap.server.library.client.elasticsearch.ElasticSearchClient\u0026quot;, \u0026quot;message\u0026quot;: \u0026quot;Bulk [70] executed with failures:[failure in bulk execution:\\n[18875]: index [sw8_service_relation_client_side-20210714], type [_doc], id [20210714_b3BlcmF0aW9uLXJ1bGUtc2VydmVyQDExNDgx.1-bWFya2V0LXJlZmVycmFsLXNlcnZlckAxMDI1MQ==.1], message [[sw8_service_relation_client_side-20210714/D7qzncbeRq6qh2QF5MogTw][[sw8_service_relation_client_side-20210714][0]] ElasticsearchException[Elasticsearch exception [type=version_conflict_engine_exception, reason=[20210714_b3BlcmF0aW9uLXJ1bGUtc2VydmVyQDExNDgx.1-bWFya2V0LXJlZmVycmFsLXNlcnZlckAxMDI1MQ==.1]: version conflict, required seqNo [14012594], primary term [1]. current document has seqNo [14207928] and primary term [1]]]]]\u0026quot;, \u0026quot;endOfBatch\u0026quot;: false, \u0026quot;loggerFqcn\u0026quot;: \u0026quot;org.apache.logging.slf4j.Log4jLogger\u0026quot;, \u0026quot;threadId\u0026quot;: 44, \u0026quot;threadPriority\u0026quot;: 5, \u0026quot;timestamp\u0026quot;: \u0026quot;2021-07-14 15:28:42.647\u0026quot; } This would not affect the system much, just a possibility of inaccurate of metrics. If this wouldn\u0026rsquo;t show up in high frequency, you could ignore this directly.\nIn case you could see many logs like this. Then it is a signal, that the flush period of your ElasticSearch template can\u0026rsquo;t catch up your setting. Or you set the persistentPeriod less than the flush period.\n","title":"Elasticsearch exception `type=version_conflict_engine_exception` since 8.7.0","url":"/docs/main/v9.4.0/en/faq/es-version-conflict/"},{"content":"Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Since 8.7.0, we did the following optimization to reduce Elasticsearch load.\nPerformance: remove the synchronous persistence mechanism from batch ElasticSearch DAO. Because the current enhanced persistent session mechanism, don\u0026#39;t require the data queryable immediately after the insert and update anymore. Due to this, we flush the metrics into Elasticsearch without using WriteRequest.RefreshPolicy.WAIT_UNTIL. This reduces the load of persistent works in OAP server and load of Elasticsearch CPU dramatically.\nMeanwhile, there is little chance you could see following warns in your logs.\n{ \u0026quot;timeMillis\u0026quot;: 1626247722647, \u0026quot;thread\u0026quot;: \u0026quot;I/O dispatcher 4\u0026quot;, \u0026quot;level\u0026quot;: \u0026quot;WARN\u0026quot;, \u0026quot;loggerName\u0026quot;: \u0026quot;org.apache.skywalking.oap.server.library.client.elasticsearch.ElasticSearchClient\u0026quot;, \u0026quot;message\u0026quot;: \u0026quot;Bulk [70] executed with failures:[failure in bulk execution:\\n[18875]: index [sw8_service_relation_client_side-20210714], type [_doc], id [20210714_b3BlcmF0aW9uLXJ1bGUtc2VydmVyQDExNDgx.1-bWFya2V0LXJlZmVycmFsLXNlcnZlckAxMDI1MQ==.1], message [[sw8_service_relation_client_side-20210714/D7qzncbeRq6qh2QF5MogTw][[sw8_service_relation_client_side-20210714][0]] ElasticsearchException[Elasticsearch exception [type=version_conflict_engine_exception, reason=[20210714_b3BlcmF0aW9uLXJ1bGUtc2VydmVyQDExNDgx.1-bWFya2V0LXJlZmVycmFsLXNlcnZlckAxMDI1MQ==.1]: version conflict, required seqNo [14012594], primary term [1]. current document has seqNo [14207928] and primary term [1]]]]]\u0026quot;, \u0026quot;endOfBatch\u0026quot;: false, \u0026quot;loggerFqcn\u0026quot;: \u0026quot;org.apache.logging.slf4j.Log4jLogger\u0026quot;, \u0026quot;threadId\u0026quot;: 44, \u0026quot;threadPriority\u0026quot;: 5, \u0026quot;timestamp\u0026quot;: \u0026quot;2021-07-14 15:28:42.647\u0026quot; } This would not affect the system much, just a possibility of inaccurate of metrics. If this wouldn\u0026rsquo;t show up in high frequency, you could ignore this directly.\nIn case you could see many logs like this. Then it is a signal, that the flush period of your ElasticSearch template can\u0026rsquo;t catch up your setting. Or you set the persistentPeriod less than the flush period.\n","title":"Elasticsearch exception `type=version_conflict_engine_exception` since 8.7.0","url":"/docs/main/v9.5.0/en/faq/es-version-conflict/"},{"content":"Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Since 8.7.0, we did the following optimization to reduce Elasticsearch load.\nPerformance: remove the synchronous persistence mechanism from batch ElasticSearch DAO. Because the current enhanced persistent session mechanism, don\u0026#39;t require the data queryable immediately after the insert and update anymore. Due to this, we flush the metrics into Elasticsearch without using WriteRequest.RefreshPolicy.WAIT_UNTIL. This reduces the load of persistent works in OAP server and load of Elasticsearch CPU dramatically.\nMeanwhile, there is little chance you could see following warns in your logs.\n{ \u0026quot;timeMillis\u0026quot;: 1626247722647, \u0026quot;thread\u0026quot;: \u0026quot;I/O dispatcher 4\u0026quot;, \u0026quot;level\u0026quot;: \u0026quot;WARN\u0026quot;, \u0026quot;loggerName\u0026quot;: \u0026quot;org.apache.skywalking.oap.server.library.client.elasticsearch.ElasticSearchClient\u0026quot;, \u0026quot;message\u0026quot;: \u0026quot;Bulk [70] executed with failures:[failure in bulk execution:\\n[18875]: index [sw8_service_relation_client_side-20210714], type [_doc], id [20210714_b3BlcmF0aW9uLXJ1bGUtc2VydmVyQDExNDgx.1-bWFya2V0LXJlZmVycmFsLXNlcnZlckAxMDI1MQ==.1], message [[sw8_service_relation_client_side-20210714/D7qzncbeRq6qh2QF5MogTw][[sw8_service_relation_client_side-20210714][0]] ElasticsearchException[Elasticsearch exception [type=version_conflict_engine_exception, reason=[20210714_b3BlcmF0aW9uLXJ1bGUtc2VydmVyQDExNDgx.1-bWFya2V0LXJlZmVycmFsLXNlcnZlckAxMDI1MQ==.1]: version conflict, required seqNo [14012594], primary term [1]. current document has seqNo [14207928] and primary term [1]]]]]\u0026quot;, \u0026quot;endOfBatch\u0026quot;: false, \u0026quot;loggerFqcn\u0026quot;: \u0026quot;org.apache.logging.slf4j.Log4jLogger\u0026quot;, \u0026quot;threadId\u0026quot;: 44, \u0026quot;threadPriority\u0026quot;: 5, \u0026quot;timestamp\u0026quot;: \u0026quot;2021-07-14 15:28:42.647\u0026quot; } This would not affect the system much, just a possibility of inaccurate of metrics. If this wouldn\u0026rsquo;t show up in high frequency, you could ignore this directly.\nIn case you could see many logs like this. Then it is a signal, that the flush period of your ElasticSearch template can\u0026rsquo;t catch up your setting. Or you set the persistentPeriod less than the flush period.\n","title":"Elasticsearch exception `type=version_conflict_engine_exception` since 8.7.0","url":"/docs/main/v9.6.0/en/faq/es-version-conflict/"},{"content":"Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Since 8.7.0, we did the following optimization to reduce Elasticsearch load.\nPerformance: remove the synchronous persistence mechanism from batch ElasticSearch DAO. Because the current enhanced persistent session mechanism, don\u0026#39;t require the data queryable immediately after the insert and update anymore. Due to this, we flush the metrics into Elasticsearch without using WriteRequest.RefreshPolicy.WAIT_UNTIL. This reduces the load of persistent works in OAP server and load of Elasticsearch CPU dramatically.\nMeanwhile, there is little chance you could see following warns in your logs.\n{ \u0026quot;timeMillis\u0026quot;: 1626247722647, \u0026quot;thread\u0026quot;: \u0026quot;I/O dispatcher 4\u0026quot;, \u0026quot;level\u0026quot;: \u0026quot;WARN\u0026quot;, \u0026quot;loggerName\u0026quot;: \u0026quot;org.apache.skywalking.oap.server.library.client.elasticsearch.ElasticSearchClient\u0026quot;, \u0026quot;message\u0026quot;: \u0026quot;Bulk [70] executed with failures:[failure in bulk execution:\\n[18875]: index [sw8_service_relation_client_side-20210714], type [_doc], id [20210714_b3BlcmF0aW9uLXJ1bGUtc2VydmVyQDExNDgx.1-bWFya2V0LXJlZmVycmFsLXNlcnZlckAxMDI1MQ==.1], message [[sw8_service_relation_client_side-20210714/D7qzncbeRq6qh2QF5MogTw][[sw8_service_relation_client_side-20210714][0]] ElasticsearchException[Elasticsearch exception [type=version_conflict_engine_exception, reason=[20210714_b3BlcmF0aW9uLXJ1bGUtc2VydmVyQDExNDgx.1-bWFya2V0LXJlZmVycmFsLXNlcnZlckAxMDI1MQ==.1]: version conflict, required seqNo [14012594], primary term [1]. current document has seqNo [14207928] and primary term [1]]]]]\u0026quot;, \u0026quot;endOfBatch\u0026quot;: false, \u0026quot;loggerFqcn\u0026quot;: \u0026quot;org.apache.logging.slf4j.Log4jLogger\u0026quot;, \u0026quot;threadId\u0026quot;: 44, \u0026quot;threadPriority\u0026quot;: 5, \u0026quot;timestamp\u0026quot;: \u0026quot;2021-07-14 15:28:42.647\u0026quot; } This would not affect the system much, just a possibility of inaccurate of metrics. If this wouldn\u0026rsquo;t show up in high frequency, you could ignore this directly.\nIn case you could see many logs like this. Then it is a signal, that the flush period of your ElasticSearch template can\u0026rsquo;t catch up your setting. Or you set the persistentPeriod less than the flush period.\n","title":"Elasticsearch exception `type=version_conflict_engine_exception` since 8.7.0","url":"/docs/main/v9.7.0/en/faq/es-version-conflict/"},{"content":"Elasticsearch monitoring SkyWalking leverages elasticsearch-exporter for collecting metrics data from Elasticsearch. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  The elasticsearch-exporter collect metrics data from Elasticsearch. OpenTelemetry Collector fetches metrics from elasticsearch-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup elasticsearch-exporter. Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  Elasticsearch Monitoring Elasticsearch monitoring provides multidimensional metrics monitoring of Elasticsearch clusters as Layer: ELASTICSEARCH Service in the OAP. In each cluster, the nodes are represented as Instance and indices are Endpoints.\nElasticsearch Cluster Supported Metrics    Monitoring Panel Metric Name Description Data Source     Cluster Health meter_elasticsearch_cluster_health_status Whether all primary and replica shards are allocated elasticsearch-exporter   Tripped Of Breakers meter_elasticsearch_cluster_breakers_tripped Tripped for breaker elasticsearch-exporter   Nodes meter_elasticsearch_cluster_nodes Number of nodes in the cluster. elasticsearch-exporter   Data Nodes meter_elasticsearch_cluster_data_nodes Number of data nodes in the cluster elasticsearch-exporter   Pending Tasks meter_elasticsearch_cluster_pending_tasks_total Cluster level changes which have not yet been executed elasticsearch-exporter   CPU Usage Avg. (%) meter_elasticsearch_cluster_cpu_usage_avg Cluster level percent CPU used by process elasticsearch-exporter   JVM Memory Used Avg. (%) meter_elasticsearch_cluster_jvm_memory_used_avg Cluster level percent JVM memory used elasticsearch-exporter   Open Files meter_elasticsearch_cluster_open_file_count Open file descriptors elasticsearch-exporter   Active Primary Shards meter_elasticsearch_cluster_primary_shards_total The number of primary shards in your cluster. This is an aggregate total across all indices elasticsearch-exporter   Active Shards meter_elasticsearch_cluster_shards_total Aggregate total of all shards across all indices, which includes replica shards elasticsearch-exporter   Initializing Shards meter_elasticsearch_cluster_initializing_shards_total Count of shards that are being freshly created elasticsearch-exporter   Delayed Unassigned Shards meter_elasticsearch_cluster_delayed_unassigned_shards_total Shards delayed to reduce reallocation overhead elasticsearch-exporter   Relocating Shards meter_elasticsearch_cluster_relocating_shards_total The number of shards that are currently moving from one node to another node elasticsearch-exporter   Unassigned Shards meter_elasticsearch_cluster_unassigned_shards_total The number of shards that exist in the cluster state, but cannot be found in the cluster itself elasticsearch-exporter    Elasticsearch Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Node Rules  meter_elasticsearch_node_rules Node roles elasticsearch-exporter   JVM Memory Used MB meter_elasticsearch_node_jvm_memory_used Node level JVM memory used size elasticsearch-exporter   CPU Percent % meter_elasticsearch_node_process_cpu_percent Node level percent CPU used by process elasticsearch-exporter   Documents  meter_elasticsearch_node_indices_docs Count of index documents on this node elasticsearch-exporter   Segments  meter_elasticsearch_node_segment_count Count of index segments on this node elasticsearch-exporter   Disk Free Space GB meter_elasticsearch_node_all_disk_free_space Available space on all block device elasticsearch-exporter   Open Files  meter_elasticsearch_node_open_file_count Open file descriptors elasticsearch-exporter   Process CPU Usage Percent % meter_elasticsearch_node_process_cpu_percent Percent CPU used by process elasticsearch-exporter   OS CPU usage percent % meter_elasticsearch_node_os_cpu_percent Percent CPU used by the OS elasticsearch-exporter   Load Average  meter_elasticsearch_node_os_load1 meter_elasticsearch_node_os_load5meter_elasticsearch_node_os_load15 Shortterm, Midterm, Longterm load average elasticsearch-exporter   JVM Memory Usage MB meter_elasticsearch_node_jvm_memory_nonheap_used\nmeter_elasticsearch_node_jvm_memory_heap_usedmeter_elasticsearch_node_jvm_memory_heap_max JVM memory currently usage by area elasticsearch-exporter   JVM Pool Peak Used MB meter_elasticsearch_node_jvm_memory_pool_peak_used JVM memory currently used by pool elasticsearch-exporter   GC Count  meter_elasticsearch_node_jvm_gc_count Count of JVM GC runs elasticsearch-exporter   GC Time ms/min meter_elasticsearch_node_jvm_gc_time GC run time elasticsearch-exporter   All Operations ReqRate  meter_elasticsearch_node_indices_*_req_rate All Operations ReqRate on node elasticsearch-exporter   Indexing Rate reqps meter_elasticsearch_node_indices_indexing_index_total_req_rate\nmeter_elasticsearch_node_indices_indexing_index_total_proc_rate Indexing rate on node elasticsearch-exporter   Searching Rate reqps meter_elasticsearch_node_indices_search_fetch_total_req_rate\nmeter_elasticsearch_node_indices_search_query_time_seconds_proc_rate Searching rate on node elasticsearch-exporter   Total Translog Operations  meter_elasticsearch_node_indices_translog_operations Total translog operations elasticsearch-exporter   Total Translog Size MB meter_elasticsearch_node_indices_translog_size Total translog size elasticsearch-exporter   Tripped For Breakers  meter_elasticsearch_node_breakers_tripped Tripped for breaker elasticsearch-exporter   Estimated Size Of Breaker MB meter_elasticsearch_node_breakers_estimated_size Estimated size of breaker elasticsearch-exporter   Documents Count KB/s meter_elasticsearch_node_indices_docs Count of documents on this node elasticsearch-exporter   Merged Documents Count count/s meter_elasticsearch_node_indices_merges_docs_total Cumulative docs merged elasticsearch-exporter   Deleted Documents Count  meter_elasticsearch_node_indices_docs_deleted_total Count of deleted documents on this node elasticsearch-exporter   Documents Index Rate calls/s meter_elasticsearch_node_indices_indexing_index_total_req_rate Total index calls per second elasticsearch-exporter   Merged Documents Rate MB / s meter_elasticsearch_node_indices_merges_total_size_bytes_total Total merge size per second elasticsearch-exporter   Documents Deleted Rate docs/s meter_elasticsearch_node_indices_docs_deleted Count of deleted documents per second on this node elasticsearch-exporter   Count Of Index Segments  meter_elasticsearch_node_segment_count Count of index segments on this node elasticsearch-exporter   Current Memory Size Of Segments MB meter_elasticsearch_node_segment_memory Current memory size of segments elasticsearch-exporter   Network bytes/sec meter_elasticsearch_node_network_send_bytesmeter_elasticsearch_node_network_receive_bytes Total number of bytes sent and receive elasticsearch-exporter   Disk Usage Percent % meter_elasticsearch_node_disk_usage_percent Used space on block device elasticsearch-exporter   Disk Usage GB meter_elasticsearch_node_disk_usage Used space size of block device elasticsearch-exporter   Disk Read KBs meter_elasticsearch_node_disk_io_read_bytes Total kilobytes read from disk elasticsearch-exporter   Disk Write KBs meter_elasticsearch_node_disk_io_write_bytes Total kilobytes write from disk elasticsearch-exporter    Elasticsearch Index Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Documents Primary  meter_elasticsearch_index_indices_docs_primary Count of documents with only primary shards on all nodes elasticsearch-exporter   Deleted Documents Primary  meter_elasticsearch_index_indices_deleted_docs_primary Count of deleted documents with only primary shards elasticsearch-exporter   Data Primary GB meter_elasticsearch_index_indices_store_size_bytes_primary Current total size of stored index data with only primary shards on all nodes elasticsearch-exporter   Data GB meter_elasticsearch_index_indices_store_size_bytes_total Current total size of stored index data with all shards on all nodes elasticsearch-exporter   Segments Primary  meter_elasticsearch_index_indices_segment_count_primary Current number of segments with only primary shards on all nodes elasticsearch-exporter   Segments Memory Primary MB meter_elasticsearch_index_indices_segment_memory_bytes_primary Current size of segments with only primary shards on all nodes elasticsearch-exporter   Segments  meter_elasticsearch_index_indices_segment_count_total Current number of segments with all shards on all nodes elasticsearch-exporter   Segments Memory MB meter_elasticsearch_index_indices_segment_memory_bytes_total Current size of segments with all shards on all nodes elasticsearch-exporter   Indexing Rate  meter_elasticsearch_index_stats_indexing_index_total_req_ratemeter_elasticsearch_index_stats_indexing_index_total_proc_rate Indexing rate on index elasticsearch-exporter   Searching Rate  meter_elasticsearch_index_stats_search_query_total_req_ratemeter_elasticsearch_index_stats_search_query_total_proc_rate Searching rate on index elasticsearch-exporter   All Operations ReqRate  meter_elasticsearch_index_stats_*_req_rate All Operations ReqRate on index elasticsearch-exporter   All Operations Runtime  meter_elasticsearch_index_stats_*_time_seconds_total All Operations Runtime/s on index elasticsearch-exporter   Avg. Search Time Execute / Request s meter_elasticsearch_index_search_fetch_avg_timemeter_elasticsearch_index_search_query_avg_timemeter_elasticsearch_index_search_scroll_avg_timemeter_elasticsearch_index_search_suggest_avg_time Search Operation Avg. time on index elasticsearch-exporter   Search Operations Rate req/s meter_elasticsearch_index_stats_search_query_total_req_ratemeter_elasticsearch_index_stats_search_fetch_total_req_ratemeter_elasticsearch_index_stats_search_scroll_total_req_ratemeter_elasticsearch_index_stats_search_suggest_total_req_rate Search Operations ReqRate on index elasticsearch-exporter   Shards Documents  meter_elasticsearch_index_indices_shards_docs Count of documents per shards on index elasticsearch-exporter   Documents (Primary Shards)  meter_elasticsearch_index_indices_docs_primary Count of documents with only primary shards on index elasticsearch-exporter   Documents Created Per Min (Primary Shards)  meter_elasticsearch_index_indices_docs_primary_rate Documents rate with only primary shards on index elasticsearch-exporter   Total Size Of Index (Primary Shards) MB meter_elasticsearch_index_indices_store_size_bytes_primary Current total size of stored index data in bytes with only primary shards on all nodes elasticsearch-exporter   Documents (All Shards)  meter_elasticsearch_index_indices_docs_total Count of documents with all shards on index elasticsearch-exporter   Documents Created Per Min (All Shards)  meter_elasticsearch_index_indices_docs_total_rate Documents rate with only all shards on index elasticsearch-exporter   Total Size Of Index (All Shards) MB meter_elasticsearch_index_indices_store_size_bytes_total Current total size of stored index data in bytes with all shards on all nodes elasticsearch-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/elasticsearch/elasticsearch-cluster.yaml, /config/otel-rules/elasticsearch/elasticsearch-node.yaml, /config/otel-rules/elasticsearch/elasticsearch-index.yaml. The Elasticsearch dashboard panel configurations are found in /config/ui-initialized-templates/elasticsearch.\n","title":"Elasticsearch monitoring","url":"/docs/main/latest/en/setup/backend/backend-elasticsearch-monitoring/"},{"content":"Elasticsearch monitoring SkyWalking leverages elasticsearch-exporter for collecting metrics data from Elasticsearch. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  The elasticsearch-exporter collect metrics data from Elasticsearch. OpenTelemetry Collector fetches metrics from elasticsearch-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup elasticsearch-exporter. Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  Elasticsearch Monitoring Elasticsearch monitoring provides multidimensional metrics monitoring of Elasticsearch clusters as Layer: ELASTICSEARCH Service in the OAP. In each cluster, the nodes are represented as Instance and indices are Endpoints.\nElasticsearch Cluster Supported Metrics    Monitoring Panel Metric Name Description Data Source     Cluster Health meter_elasticsearch_cluster_health_status Whether all primary and replica shards are allocated elasticsearch-exporter   Tripped Of Breakers meter_elasticsearch_cluster_breakers_tripped Tripped for breaker elasticsearch-exporter   Nodes meter_elasticsearch_cluster_nodes Number of nodes in the cluster. elasticsearch-exporter   Data Nodes meter_elasticsearch_cluster_data_nodes Number of data nodes in the cluster elasticsearch-exporter   Pending Tasks meter_elasticsearch_cluster_pending_tasks_total Cluster level changes which have not yet been executed elasticsearch-exporter   CPU Usage Avg. (%) meter_elasticsearch_cluster_cpu_usage_avg Cluster level percent CPU used by process elasticsearch-exporter   JVM Memory Used Avg. (%) meter_elasticsearch_cluster_jvm_memory_used_avg Cluster level percent JVM memory used elasticsearch-exporter   Open Files meter_elasticsearch_cluster_open_file_count Open file descriptors elasticsearch-exporter   Active Primary Shards meter_elasticsearch_cluster_primary_shards_total The number of primary shards in your cluster. This is an aggregate total across all indices elasticsearch-exporter   Active Shards meter_elasticsearch_cluster_shards_total Aggregate total of all shards across all indices, which includes replica shards elasticsearch-exporter   Initializing Shards meter_elasticsearch_cluster_initializing_shards_total Count of shards that are being freshly created elasticsearch-exporter   Delayed Unassigned Shards meter_elasticsearch_cluster_delayed_unassigned_shards_total Shards delayed to reduce reallocation overhead elasticsearch-exporter   Relocating Shards meter_elasticsearch_cluster_relocating_shards_total The number of shards that are currently moving from one node to another node elasticsearch-exporter   Unassigned Shards meter_elasticsearch_cluster_unassigned_shards_total The number of shards that exist in the cluster state, but cannot be found in the cluster itself elasticsearch-exporter    Elasticsearch Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Node Rules  meter_elasticsearch_node_rules Node roles elasticsearch-exporter   JVM Memory Used MB meter_elasticsearch_node_jvm_memory_used Node level JVM memory used size elasticsearch-exporter   CPU Percent % meter_elasticsearch_node_process_cpu_percent Node level percent CPU used by process elasticsearch-exporter   Documents  meter_elasticsearch_node_indices_docs Count of index documents on this node elasticsearch-exporter   Segments  meter_elasticsearch_node_segment_count Count of index segments on this node elasticsearch-exporter   Disk Free Space GB meter_elasticsearch_node_all_disk_free_space Available space on all block device elasticsearch-exporter   Open Files  meter_elasticsearch_node_open_file_count Open file descriptors elasticsearch-exporter   Process CPU Usage Percent % meter_elasticsearch_node_process_cpu_percent Percent CPU used by process elasticsearch-exporter   OS CPU usage percent % meter_elasticsearch_node_os_cpu_percent Percent CPU used by the OS elasticsearch-exporter   Load Average  meter_elasticsearch_node_os_load1 meter_elasticsearch_node_os_load5meter_elasticsearch_node_os_load15 Shortterm, Midterm, Longterm load average elasticsearch-exporter   JVM Memory Usage MB meter_elasticsearch_node_jvm_memory_nonheap_used\nmeter_elasticsearch_node_jvm_memory_heap_usedmeter_elasticsearch_node_jvm_memory_heap_max JVM memory currently usage by area elasticsearch-exporter   JVM Pool Peak Used MB meter_elasticsearch_node_jvm_memory_pool_peak_used JVM memory currently used by pool elasticsearch-exporter   GC Count  meter_elasticsearch_node_jvm_gc_count Count of JVM GC runs elasticsearch-exporter   GC Time ms/min meter_elasticsearch_node_jvm_gc_time GC run time elasticsearch-exporter   All Operations ReqRate  meter_elasticsearch_node_indices_*_req_rate All Operations ReqRate on node elasticsearch-exporter   Indexing Rate reqps meter_elasticsearch_node_indices_indexing_index_total_req_rate\nmeter_elasticsearch_node_indices_indexing_index_total_proc_rate Indexing rate on node elasticsearch-exporter   Searching Rate reqps meter_elasticsearch_node_indices_search_fetch_total_req_rate\nmeter_elasticsearch_node_indices_search_query_time_seconds_proc_rate Searching rate on node elasticsearch-exporter   Total Translog Operations  meter_elasticsearch_node_indices_translog_operations Total translog operations elasticsearch-exporter   Total Translog Size MB meter_elasticsearch_node_indices_translog_size Total translog size elasticsearch-exporter   Tripped For Breakers  meter_elasticsearch_node_breakers_tripped Tripped for breaker elasticsearch-exporter   Estimated Size Of Breaker MB meter_elasticsearch_node_breakers_estimated_size Estimated size of breaker elasticsearch-exporter   Documents Count KB/s meter_elasticsearch_node_indices_docs Count of documents on this node elasticsearch-exporter   Merged Documents Count count/s meter_elasticsearch_node_indices_merges_docs_total Cumulative docs merged elasticsearch-exporter   Deleted Documents Count  meter_elasticsearch_node_indices_docs_deleted_total Count of deleted documents on this node elasticsearch-exporter   Documents Index Rate calls/s meter_elasticsearch_node_indices_indexing_index_total_req_rate Total index calls per second elasticsearch-exporter   Merged Documents Rate MB / s meter_elasticsearch_node_indices_merges_total_size_bytes_total Total merge size per second elasticsearch-exporter   Documents Deleted Rate docs/s meter_elasticsearch_node_indices_docs_deleted Count of deleted documents per second on this node elasticsearch-exporter   Count Of Index Segments  meter_elasticsearch_node_segment_count Count of index segments on this node elasticsearch-exporter   Current Memory Size Of Segments MB meter_elasticsearch_node_segment_memory Current memory size of segments elasticsearch-exporter   Network bytes/sec meter_elasticsearch_node_network_send_bytesmeter_elasticsearch_node_network_receive_bytes Total number of bytes sent and receive elasticsearch-exporter   Disk Usage Percent % meter_elasticsearch_node_disk_usage_percent Used space on block device elasticsearch-exporter   Disk Usage GB meter_elasticsearch_node_disk_usage Used space size of block device elasticsearch-exporter   Disk Read KBs meter_elasticsearch_node_disk_io_read_bytes Total kilobytes read from disk elasticsearch-exporter   Disk Write KBs meter_elasticsearch_node_disk_io_write_bytes Total kilobytes write from disk elasticsearch-exporter    Elasticsearch Index Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Documents Primary  meter_elasticsearch_index_indices_docs_primary Count of documents with only primary shards on all nodes elasticsearch-exporter   Deleted Documents Primary  meter_elasticsearch_index_indices_deleted_docs_primary Count of deleted documents with only primary shards elasticsearch-exporter   Data Primary GB meter_elasticsearch_index_indices_store_size_bytes_primary Current total size of stored index data with only primary shards on all nodes elasticsearch-exporter   Data GB meter_elasticsearch_index_indices_store_size_bytes_total Current total size of stored index data with all shards on all nodes elasticsearch-exporter   Segments Primary  meter_elasticsearch_index_indices_segment_count_primary Current number of segments with only primary shards on all nodes elasticsearch-exporter   Segments Memory Primary MB meter_elasticsearch_index_indices_segment_memory_bytes_primary Current size of segments with only primary shards on all nodes elasticsearch-exporter   Segments  meter_elasticsearch_index_indices_segment_count_total Current number of segments with all shards on all nodes elasticsearch-exporter   Segments Memory MB meter_elasticsearch_index_indices_segment_memory_bytes_total Current size of segments with all shards on all nodes elasticsearch-exporter   Indexing Rate  meter_elasticsearch_index_stats_indexing_index_total_req_ratemeter_elasticsearch_index_stats_indexing_index_total_proc_rate Indexing rate on index elasticsearch-exporter   Searching Rate  meter_elasticsearch_index_stats_search_query_total_req_ratemeter_elasticsearch_index_stats_search_query_total_proc_rate Searching rate on index elasticsearch-exporter   All Operations ReqRate  meter_elasticsearch_index_stats_*_req_rate All Operations ReqRate on index elasticsearch-exporter   All Operations Runtime  meter_elasticsearch_index_stats_*_time_seconds_total All Operations Runtime/s on index elasticsearch-exporter   Avg. Search Time Execute / Request s meter_elasticsearch_index_search_fetch_avg_timemeter_elasticsearch_index_search_query_avg_timemeter_elasticsearch_index_search_scroll_avg_timemeter_elasticsearch_index_search_suggest_avg_time Search Operation Avg. time on index elasticsearch-exporter   Search Operations Rate req/s meter_elasticsearch_index_stats_search_query_total_req_ratemeter_elasticsearch_index_stats_search_fetch_total_req_ratemeter_elasticsearch_index_stats_search_scroll_total_req_ratemeter_elasticsearch_index_stats_search_suggest_total_req_rate Search Operations ReqRate on index elasticsearch-exporter   Shards Documents  meter_elasticsearch_index_indices_shards_docs Count of documents per shards on index elasticsearch-exporter   Documents (Primary Shards)  meter_elasticsearch_index_indices_docs_primary Count of documents with only primary shards on index elasticsearch-exporter   Documents Created Per Min (Primary Shards)  meter_elasticsearch_index_indices_docs_primary_rate Documents rate with only primary shards on index elasticsearch-exporter   Total Size Of Index (Primary Shards) MB meter_elasticsearch_index_indices_store_size_bytes_primary Current total size of stored index data in bytes with only primary shards on all nodes elasticsearch-exporter   Documents (All Shards)  meter_elasticsearch_index_indices_docs_total Count of documents with all shards on index elasticsearch-exporter   Documents Created Per Min (All Shards)  meter_elasticsearch_index_indices_docs_total_rate Documents rate with only all shards on index elasticsearch-exporter   Total Size Of Index (All Shards) MB meter_elasticsearch_index_indices_store_size_bytes_total Current total size of stored index data in bytes with all shards on all nodes elasticsearch-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/elasticsearch/elasticsearch-cluster.yaml, /config/otel-rules/elasticsearch/elasticsearch-node.yaml, /config/otel-rules/elasticsearch/elasticsearch-index.yaml. The Elasticsearch dashboard panel configurations are found in /config/ui-initialized-templates/elasticsearch.\n","title":"Elasticsearch monitoring","url":"/docs/main/next/en/setup/backend/backend-elasticsearch-monitoring/"},{"content":"Elasticsearch monitoring SkyWalking leverages elasticsearch-exporter for collecting metrics data from Elasticsearch. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  The elasticsearch-exporter collect metrics data from Elasticsearch. OpenTelemetry Collector fetches metrics from elasticsearch-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup elasticsearch-exporter. Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  Elasticsearch Monitoring Elasticsearch monitoring provides multidimensional metrics monitoring of Elasticsearch clusters as Layer: ELASTICSEARCH Service in the OAP. In each cluster, the nodes are represented as Instance and indices are Endpoints.\nElasticsearch Cluster Supported Metrics    Monitoring Panel Metric Name Description Data Source     Cluster Health meter_elasticsearch_cluster_health_status Whether all primary and replica shards are allocated elasticsearch-exporter   Tripped Of Breakers meter_elasticsearch_cluster_breakers_tripped Tripped for breaker elasticsearch-exporter   Nodes meter_elasticsearch_cluster_nodes Number of nodes in the cluster. elasticsearch-exporter   Data Nodes meter_elasticsearch_cluster_data_nodes Number of data nodes in the cluster elasticsearch-exporter   Pending Tasks meter_elasticsearch_cluster_pending_tasks_total Cluster level changes which have not yet been executed elasticsearch-exporter   CPU Usage Avg. (%) meter_elasticsearch_cluster_cpu_usage_avg Cluster level percent CPU used by process elasticsearch-exporter   JVM Memory Used Avg. (%) meter_elasticsearch_cluster_jvm_memory_used_avg Cluster level percent JVM memory used elasticsearch-exporter   Open Files meter_elasticsearch_cluster_open_file_count Open file descriptors elasticsearch-exporter   Active Primary Shards meter_elasticsearch_cluster_primary_shards_total The number of primary shards in your cluster. This is an aggregate total across all indices elasticsearch-exporter   Active Shards meter_elasticsearch_cluster_shards_total Aggregate total of all shards across all indices, which includes replica shards elasticsearch-exporter   Initializing Shards meter_elasticsearch_cluster_initializing_shards_total Count of shards that are being freshly created elasticsearch-exporter   Delayed Unassigned Shards meter_elasticsearch_cluster_delayed_unassigned_shards_total Shards delayed to reduce reallocation overhead elasticsearch-exporter   Relocating Shards meter_elasticsearch_cluster_relocating_shards_total The number of shards that are currently moving from one node to another node elasticsearch-exporter   Unassigned Shards meter_elasticsearch_cluster_unassigned_shards_total The number of shards that exist in the cluster state, but cannot be found in the cluster itself elasticsearch-exporter    Elasticsearch Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Node Rules  meter_elasticsearch_node_rules Node roles elasticsearch-exporter   JVM Memory Used MB meter_elasticsearch_node_jvm_memory_used Node level JVM memory used size elasticsearch-exporter   CPU Percent % meter_elasticsearch_node_process_cpu_percent Node level percent CPU used by process elasticsearch-exporter   Documents  meter_elasticsearch_node_indices_docs Count of index documents on this node elasticsearch-exporter   Segments  meter_elasticsearch_node_segment_count Count of index segments on this node elasticsearch-exporter   Disk Free Space GB meter_elasticsearch_node_all_disk_free_space Available space on all block device elasticsearch-exporter   Open Files  meter_elasticsearch_node_open_file_count Open file descriptors elasticsearch-exporter   Process CPU Usage Percent % meter_elasticsearch_node_process_cpu_percent Percent CPU used by process elasticsearch-exporter   OS CPU usage percent % meter_elasticsearch_node_os_cpu_percent Percent CPU used by the OS elasticsearch-exporter   Load Average  meter_elasticsearch_node_os_load1 meter_elasticsearch_node_os_load5meter_elasticsearch_node_os_load15 Shortterm, Midterm, Longterm load average elasticsearch-exporter   JVM Memory Usage MB meter_elasticsearch_node_jvm_memory_nonheap_used\nmeter_elasticsearch_node_jvm_memory_heap_usedmeter_elasticsearch_node_jvm_memory_heap_max JVM memory currently usage by area elasticsearch-exporter   JVM Pool Peak Used MB meter_elasticsearch_node_jvm_memory_pool_peak_used JVM memory currently used by pool elasticsearch-exporter   GC Count  meter_elasticsearch_node_jvm_gc_count Count of JVM GC runs elasticsearch-exporter   GC Time ms/min meter_elasticsearch_node_jvm_gc_time GC run time elasticsearch-exporter   All Operations ReqRate  meter_elasticsearch_node_indices_*_req_rate All Operations ReqRate on node elasticsearch-exporter   Indexing Rate reqps meter_elasticsearch_node_indices_indexing_index_total_req_rate\nmeter_elasticsearch_node_indices_indexing_index_total_proc_rate Indexing rate on node elasticsearch-exporter   Searching Rate reqps meter_elasticsearch_node_indices_search_fetch_total_req_rate\nmeter_elasticsearch_node_indices_search_query_time_seconds_proc_rate Searching rate on node elasticsearch-exporter   Total Translog Operations  meter_elasticsearch_node_indices_translog_operations Total translog operations elasticsearch-exporter   Total Translog Size MB meter_elasticsearch_node_indices_translog_size Total translog size elasticsearch-exporter   Tripped For Breakers  meter_elasticsearch_node_breakers_tripped Tripped for breaker elasticsearch-exporter   Estimated Size Of Breaker MB meter_elasticsearch_node_breakers_estimated_size Estimated size of breaker elasticsearch-exporter   Documents Count KB/s meter_elasticsearch_node_indices_docs Count of documents on this node elasticsearch-exporter   Merged Documents Count count/s meter_elasticsearch_node_indices_merges_docs_total Cumulative docs merged elasticsearch-exporter   Deleted Documents Count  meter_elasticsearch_node_indices_docs_deleted_total Count of deleted documents on this node elasticsearch-exporter   Documents Index Rate calls/s meter_elasticsearch_node_indices_indexing_index_total_req_rate Total index calls per second elasticsearch-exporter   Merged Documents Rate MB / s meter_elasticsearch_node_indices_merges_total_size_bytes_total Total merge size per second elasticsearch-exporter   Documents Deleted Rate docs/s meter_elasticsearch_node_indices_docs_deleted Count of deleted documents per second on this node elasticsearch-exporter   Count Of Index Segments  meter_elasticsearch_node_segment_count Count of index segments on this node elasticsearch-exporter   Current Memory Size Of Segments MB meter_elasticsearch_node_segment_memory Current memory size of segments elasticsearch-exporter   Network bytes/sec meter_elasticsearch_node_network_send_bytesmeter_elasticsearch_node_network_receive_bytes Total number of bytes sent and receive elasticsearch-exporter   Disk Usage Percent % meter_elasticsearch_node_disk_usage_percent Used space on block device elasticsearch-exporter   Disk Usage GB meter_elasticsearch_node_disk_usage Used space size of block device elasticsearch-exporter   Disk Read KBs meter_elasticsearch_node_disk_io_read_bytes Total kilobytes read from disk elasticsearch-exporter   Disk Write KBs meter_elasticsearch_node_disk_io_write_bytes Total kilobytes write from disk elasticsearch-exporter    Elasticsearch Index Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Documents Primary  meter_elasticsearch_index_indices_docs_primary Count of documents with only primary shards on all nodes elasticsearch-exporter   Deleted Documents Primary  meter_elasticsearch_index_indices_deleted_docs_primary Count of deleted documents with only primary shards elasticsearch-exporter   Data Primary GB meter_elasticsearch_index_indices_store_size_bytes_primary Current total size of stored index data with only primary shards on all nodes elasticsearch-exporter   Data GB meter_elasticsearch_index_indices_store_size_bytes_total Current total size of stored index data with all shards on all nodes elasticsearch-exporter   Segments Primary  meter_elasticsearch_index_indices_segment_count_primary Current number of segments with only primary shards on all nodes elasticsearch-exporter   Segments Memory Primary MB meter_elasticsearch_index_indices_segment_memory_bytes_primary Current size of segments with only primary shards on all nodes elasticsearch-exporter   Segments  meter_elasticsearch_index_indices_segment_count_total Current number of segments with all shards on all nodes elasticsearch-exporter   Segments Memory MB meter_elasticsearch_index_indices_segment_memory_bytes_total Current size of segments with all shards on all nodes elasticsearch-exporter   Indexing Rate  meter_elasticsearch_index_stats_indexing_index_total_req_ratemeter_elasticsearch_index_stats_indexing_index_total_proc_rate Indexing rate on index elasticsearch-exporter   Searching Rate  meter_elasticsearch_index_stats_search_query_total_req_ratemeter_elasticsearch_index_stats_search_query_total_proc_rate Searching rate on index elasticsearch-exporter   All Operations ReqRate  meter_elasticsearch_index_stats_*_req_rate All Operations ReqRate on index elasticsearch-exporter   All Operations Runtime  meter_elasticsearch_index_stats_*_time_seconds_total All Operations Runtime/s on index elasticsearch-exporter   Avg. Search Time Execute / Request s meter_elasticsearch_index_search_fetch_avg_timemeter_elasticsearch_index_search_query_avg_timemeter_elasticsearch_index_search_scroll_avg_timemeter_elasticsearch_index_search_suggest_avg_time Search Operation Avg. time on index elasticsearch-exporter   Search Operations Rate req/s meter_elasticsearch_index_stats_search_query_total_req_ratemeter_elasticsearch_index_stats_search_fetch_total_req_ratemeter_elasticsearch_index_stats_search_scroll_total_req_ratemeter_elasticsearch_index_stats_search_suggest_total_req_rate Search Operations ReqRate on index elasticsearch-exporter   Shards Documents  meter_elasticsearch_index_indices_shards_docs Count of documents per shards on index elasticsearch-exporter   Documents (Primary Shards)  meter_elasticsearch_index_indices_docs_primary Count of documents with only primary shards on index elasticsearch-exporter   Documents Created Per Min (Primary Shards)  meter_elasticsearch_index_indices_docs_primary_rate Documents rate with only primary shards on index elasticsearch-exporter   Total Size Of Index (Primary Shards) MB meter_elasticsearch_index_indices_store_size_bytes_primary Current total size of stored index data in bytes with only primary shards on all nodes elasticsearch-exporter   Documents (All Shards)  meter_elasticsearch_index_indices_docs_total Count of documents with all shards on index elasticsearch-exporter   Documents Created Per Min (All Shards)  meter_elasticsearch_index_indices_docs_total_rate Documents rate with only all shards on index elasticsearch-exporter   Total Size Of Index (All Shards) MB meter_elasticsearch_index_indices_store_size_bytes_total Current total size of stored index data in bytes with all shards on all nodes elasticsearch-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/elasticsearch/elasticsearch-cluster.yaml, /config/otel-rules/elasticsearch/elasticsearch-node.yaml, /config/otel-rules/elasticsearch/elasticsearch-index.yaml. The Elasticsearch dashboard panel configurations are found in /config/ui-initialized-templates/elasticsearch.\n","title":"Elasticsearch monitoring","url":"/docs/main/v9.5.0/en/setup/backend/backend-elasticsearch-monitoring/"},{"content":"Elasticsearch monitoring SkyWalking leverages elasticsearch-exporter for collecting metrics data from Elasticsearch. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  The elasticsearch-exporter collect metrics data from Elasticsearch. OpenTelemetry Collector fetches metrics from elasticsearch-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup elasticsearch-exporter. Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  Elasticsearch Monitoring Elasticsearch monitoring provides multidimensional metrics monitoring of Elasticsearch clusters as Layer: ELASTICSEARCH Service in the OAP. In each cluster, the nodes are represented as Instance and indices are Endpoints.\nElasticsearch Cluster Supported Metrics    Monitoring Panel Metric Name Description Data Source     Cluster Health meter_elasticsearch_cluster_health_status Whether all primary and replica shards are allocated elasticsearch-exporter   Tripped Of Breakers meter_elasticsearch_cluster_breakers_tripped Tripped for breaker elasticsearch-exporter   Nodes meter_elasticsearch_cluster_nodes Number of nodes in the cluster. elasticsearch-exporter   Data Nodes meter_elasticsearch_cluster_data_nodes Number of data nodes in the cluster elasticsearch-exporter   Pending Tasks meter_elasticsearch_cluster_pending_tasks_total Cluster level changes which have not yet been executed elasticsearch-exporter   CPU Usage Avg. (%) meter_elasticsearch_cluster_cpu_usage_avg Cluster level percent CPU used by process elasticsearch-exporter   JVM Memory Used Avg. (%) meter_elasticsearch_cluster_jvm_memory_used_avg Cluster level percent JVM memory used elasticsearch-exporter   Open Files meter_elasticsearch_cluster_open_file_count Open file descriptors elasticsearch-exporter   Active Primary Shards meter_elasticsearch_cluster_primary_shards_total The number of primary shards in your cluster. This is an aggregate total across all indices elasticsearch-exporter   Active Shards meter_elasticsearch_cluster_shards_total Aggregate total of all shards across all indices, which includes replica shards elasticsearch-exporter   Initializing Shards meter_elasticsearch_cluster_initializing_shards_total Count of shards that are being freshly created elasticsearch-exporter   Delayed Unassigned Shards meter_elasticsearch_cluster_delayed_unassigned_shards_total Shards delayed to reduce reallocation overhead elasticsearch-exporter   Relocating Shards meter_elasticsearch_cluster_relocating_shards_total The number of shards that are currently moving from one node to another node elasticsearch-exporter   Unassigned Shards meter_elasticsearch_cluster_unassigned_shards_total The number of shards that exist in the cluster state, but cannot be found in the cluster itself elasticsearch-exporter    Elasticsearch Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Node Rules  meter_elasticsearch_node_rules Node roles elasticsearch-exporter   JVM Memory Used MB meter_elasticsearch_node_jvm_memory_used Node level JVM memory used size elasticsearch-exporter   CPU Percent % meter_elasticsearch_node_process_cpu_percent Node level percent CPU used by process elasticsearch-exporter   Documents  meter_elasticsearch_node_indices_docs Count of index documents on this node elasticsearch-exporter   Segments  meter_elasticsearch_node_segment_count Count of index segments on this node elasticsearch-exporter   Disk Free Space GB meter_elasticsearch_node_all_disk_free_space Available space on all block device elasticsearch-exporter   Open Files  meter_elasticsearch_node_open_file_count Open file descriptors elasticsearch-exporter   Process CPU Usage Percent % meter_elasticsearch_node_process_cpu_percent Percent CPU used by process elasticsearch-exporter   OS CPU usage percent % meter_elasticsearch_node_os_cpu_percent Percent CPU used by the OS elasticsearch-exporter   Load Average  meter_elasticsearch_node_os_load1 meter_elasticsearch_node_os_load5meter_elasticsearch_node_os_load15 Shortterm, Midterm, Longterm load average elasticsearch-exporter   JVM Memory Usage MB meter_elasticsearch_node_jvm_memory_nonheap_used\nmeter_elasticsearch_node_jvm_memory_heap_usedmeter_elasticsearch_node_jvm_memory_heap_max JVM memory currently usage by area elasticsearch-exporter   JVM Pool Peak Used MB meter_elasticsearch_node_jvm_memory_pool_peak_used JVM memory currently used by pool elasticsearch-exporter   GC Count  meter_elasticsearch_node_jvm_gc_count Count of JVM GC runs elasticsearch-exporter   GC Time ms/min meter_elasticsearch_node_jvm_gc_time GC run time elasticsearch-exporter   All Operations ReqRate  meter_elasticsearch_node_indices_*_req_rate All Operations ReqRate on node elasticsearch-exporter   Indexing Rate reqps meter_elasticsearch_node_indices_indexing_index_total_req_rate\nmeter_elasticsearch_node_indices_indexing_index_total_proc_rate Indexing rate on node elasticsearch-exporter   Searching Rate reqps meter_elasticsearch_node_indices_search_fetch_total_req_rate\nmeter_elasticsearch_node_indices_search_query_time_seconds_proc_rate Searching rate on node elasticsearch-exporter   Total Translog Operations  meter_elasticsearch_node_indices_translog_operations Total translog operations elasticsearch-exporter   Total Translog Size MB meter_elasticsearch_node_indices_translog_size Total translog size elasticsearch-exporter   Tripped For Breakers  meter_elasticsearch_node_breakers_tripped Tripped for breaker elasticsearch-exporter   Estimated Size Of Breaker MB meter_elasticsearch_node_breakers_estimated_size Estimated size of breaker elasticsearch-exporter   Documents Count KB/s meter_elasticsearch_node_indices_docs Count of documents on this node elasticsearch-exporter   Merged Documents Count count/s meter_elasticsearch_node_indices_merges_docs_total Cumulative docs merged elasticsearch-exporter   Deleted Documents Count  meter_elasticsearch_node_indices_docs_deleted_total Count of deleted documents on this node elasticsearch-exporter   Documents Index Rate calls/s meter_elasticsearch_node_indices_indexing_index_total_req_rate Total index calls per second elasticsearch-exporter   Merged Documents Rate MB / s meter_elasticsearch_node_indices_merges_total_size_bytes_total Total merge size per second elasticsearch-exporter   Documents Deleted Rate docs/s meter_elasticsearch_node_indices_docs_deleted Count of deleted documents per second on this node elasticsearch-exporter   Count Of Index Segments  meter_elasticsearch_node_segment_count Count of index segments on this node elasticsearch-exporter   Current Memory Size Of Segments MB meter_elasticsearch_node_segment_memory Current memory size of segments elasticsearch-exporter   Network bytes/sec meter_elasticsearch_node_network_send_bytesmeter_elasticsearch_node_network_receive_bytes Total number of bytes sent and receive elasticsearch-exporter   Disk Usage Percent % meter_elasticsearch_node_disk_usage_percent Used space on block device elasticsearch-exporter   Disk Usage GB meter_elasticsearch_node_disk_usage Used space size of block device elasticsearch-exporter   Disk Read KBs meter_elasticsearch_node_disk_io_read_bytes Total kilobytes read from disk elasticsearch-exporter   Disk Write KBs meter_elasticsearch_node_disk_io_write_bytes Total kilobytes write from disk elasticsearch-exporter    Elasticsearch Index Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Documents Primary  meter_elasticsearch_index_indices_docs_primary Count of documents with only primary shards on all nodes elasticsearch-exporter   Deleted Documents Primary  meter_elasticsearch_index_indices_deleted_docs_primary Count of deleted documents with only primary shards elasticsearch-exporter   Data Primary GB meter_elasticsearch_index_indices_store_size_bytes_primary Current total size of stored index data with only primary shards on all nodes elasticsearch-exporter   Data GB meter_elasticsearch_index_indices_store_size_bytes_total Current total size of stored index data with all shards on all nodes elasticsearch-exporter   Segments Primary  meter_elasticsearch_index_indices_segment_count_primary Current number of segments with only primary shards on all nodes elasticsearch-exporter   Segments Memory Primary MB meter_elasticsearch_index_indices_segment_memory_bytes_primary Current size of segments with only primary shards on all nodes elasticsearch-exporter   Segments  meter_elasticsearch_index_indices_segment_count_total Current number of segments with all shards on all nodes elasticsearch-exporter   Segments Memory MB meter_elasticsearch_index_indices_segment_memory_bytes_total Current size of segments with all shards on all nodes elasticsearch-exporter   Indexing Rate  meter_elasticsearch_index_stats_indexing_index_total_req_ratemeter_elasticsearch_index_stats_indexing_index_total_proc_rate Indexing rate on index elasticsearch-exporter   Searching Rate  meter_elasticsearch_index_stats_search_query_total_req_ratemeter_elasticsearch_index_stats_search_query_total_proc_rate Searching rate on index elasticsearch-exporter   All Operations ReqRate  meter_elasticsearch_index_stats_*_req_rate All Operations ReqRate on index elasticsearch-exporter   All Operations Runtime  meter_elasticsearch_index_stats_*_time_seconds_total All Operations Runtime/s on index elasticsearch-exporter   Avg. Search Time Execute / Request s meter_elasticsearch_index_search_fetch_avg_timemeter_elasticsearch_index_search_query_avg_timemeter_elasticsearch_index_search_scroll_avg_timemeter_elasticsearch_index_search_suggest_avg_time Search Operation Avg. time on index elasticsearch-exporter   Search Operations Rate req/s meter_elasticsearch_index_stats_search_query_total_req_ratemeter_elasticsearch_index_stats_search_fetch_total_req_ratemeter_elasticsearch_index_stats_search_scroll_total_req_ratemeter_elasticsearch_index_stats_search_suggest_total_req_rate Search Operations ReqRate on index elasticsearch-exporter   Shards Documents  meter_elasticsearch_index_indices_shards_docs Count of documents per shards on index elasticsearch-exporter   Documents (Primary Shards)  meter_elasticsearch_index_indices_docs_primary Count of documents with only primary shards on index elasticsearch-exporter   Documents Created Per Min (Primary Shards)  meter_elasticsearch_index_indices_docs_primary_rate Documents rate with only primary shards on index elasticsearch-exporter   Total Size Of Index (Primary Shards) MB meter_elasticsearch_index_indices_store_size_bytes_primary Current total size of stored index data in bytes with only primary shards on all nodes elasticsearch-exporter   Documents (All Shards)  meter_elasticsearch_index_indices_docs_total Count of documents with all shards on index elasticsearch-exporter   Documents Created Per Min (All Shards)  meter_elasticsearch_index_indices_docs_total_rate Documents rate with only all shards on index elasticsearch-exporter   Total Size Of Index (All Shards) MB meter_elasticsearch_index_indices_store_size_bytes_total Current total size of stored index data in bytes with all shards on all nodes elasticsearch-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/elasticsearch/elasticsearch-cluster.yaml, /config/otel-rules/elasticsearch/elasticsearch-node.yaml, /config/otel-rules/elasticsearch/elasticsearch-index.yaml. The Elasticsearch dashboard panel configurations are found in /config/ui-initialized-templates/elasticsearch.\n","title":"Elasticsearch monitoring","url":"/docs/main/v9.6.0/en/setup/backend/backend-elasticsearch-monitoring/"},{"content":"Elasticsearch monitoring SkyWalking leverages elasticsearch-exporter for collecting metrics data from Elasticsearch. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  The elasticsearch-exporter collect metrics data from Elasticsearch. OpenTelemetry Collector fetches metrics from elasticsearch-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup elasticsearch-exporter. Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  Elasticsearch Monitoring Elasticsearch monitoring provides multidimensional metrics monitoring of Elasticsearch clusters as Layer: ELASTICSEARCH Service in the OAP. In each cluster, the nodes are represented as Instance and indices are Endpoints.\nElasticsearch Cluster Supported Metrics    Monitoring Panel Metric Name Description Data Source     Cluster Health meter_elasticsearch_cluster_health_status Whether all primary and replica shards are allocated elasticsearch-exporter   Tripped Of Breakers meter_elasticsearch_cluster_breakers_tripped Tripped for breaker elasticsearch-exporter   Nodes meter_elasticsearch_cluster_nodes Number of nodes in the cluster. elasticsearch-exporter   Data Nodes meter_elasticsearch_cluster_data_nodes Number of data nodes in the cluster elasticsearch-exporter   Pending Tasks meter_elasticsearch_cluster_pending_tasks_total Cluster level changes which have not yet been executed elasticsearch-exporter   CPU Usage Avg. (%) meter_elasticsearch_cluster_cpu_usage_avg Cluster level percent CPU used by process elasticsearch-exporter   JVM Memory Used Avg. (%) meter_elasticsearch_cluster_jvm_memory_used_avg Cluster level percent JVM memory used elasticsearch-exporter   Open Files meter_elasticsearch_cluster_open_file_count Open file descriptors elasticsearch-exporter   Active Primary Shards meter_elasticsearch_cluster_primary_shards_total The number of primary shards in your cluster. This is an aggregate total across all indices elasticsearch-exporter   Active Shards meter_elasticsearch_cluster_shards_total Aggregate total of all shards across all indices, which includes replica shards elasticsearch-exporter   Initializing Shards meter_elasticsearch_cluster_initializing_shards_total Count of shards that are being freshly created elasticsearch-exporter   Delayed Unassigned Shards meter_elasticsearch_cluster_delayed_unassigned_shards_total Shards delayed to reduce reallocation overhead elasticsearch-exporter   Relocating Shards meter_elasticsearch_cluster_relocating_shards_total The number of shards that are currently moving from one node to another node elasticsearch-exporter   Unassigned Shards meter_elasticsearch_cluster_unassigned_shards_total The number of shards that exist in the cluster state, but cannot be found in the cluster itself elasticsearch-exporter    Elasticsearch Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Node Rules  meter_elasticsearch_node_rules Node roles elasticsearch-exporter   JVM Memory Used MB meter_elasticsearch_node_jvm_memory_used Node level JVM memory used size elasticsearch-exporter   CPU Percent % meter_elasticsearch_node_process_cpu_percent Node level percent CPU used by process elasticsearch-exporter   Documents  meter_elasticsearch_node_indices_docs Count of index documents on this node elasticsearch-exporter   Segments  meter_elasticsearch_node_segment_count Count of index segments on this node elasticsearch-exporter   Disk Free Space GB meter_elasticsearch_node_all_disk_free_space Available space on all block device elasticsearch-exporter   Open Files  meter_elasticsearch_node_open_file_count Open file descriptors elasticsearch-exporter   Process CPU Usage Percent % meter_elasticsearch_node_process_cpu_percent Percent CPU used by process elasticsearch-exporter   OS CPU usage percent % meter_elasticsearch_node_os_cpu_percent Percent CPU used by the OS elasticsearch-exporter   Load Average  meter_elasticsearch_node_os_load1 meter_elasticsearch_node_os_load5meter_elasticsearch_node_os_load15 Shortterm, Midterm, Longterm load average elasticsearch-exporter   JVM Memory Usage MB meter_elasticsearch_node_jvm_memory_nonheap_used\nmeter_elasticsearch_node_jvm_memory_heap_usedmeter_elasticsearch_node_jvm_memory_heap_max JVM memory currently usage by area elasticsearch-exporter   JVM Pool Peak Used MB meter_elasticsearch_node_jvm_memory_pool_peak_used JVM memory currently used by pool elasticsearch-exporter   GC Count  meter_elasticsearch_node_jvm_gc_count Count of JVM GC runs elasticsearch-exporter   GC Time ms/min meter_elasticsearch_node_jvm_gc_time GC run time elasticsearch-exporter   All Operations ReqRate  meter_elasticsearch_node_indices_*_req_rate All Operations ReqRate on node elasticsearch-exporter   Indexing Rate reqps meter_elasticsearch_node_indices_indexing_index_total_req_rate\nmeter_elasticsearch_node_indices_indexing_index_total_proc_rate Indexing rate on node elasticsearch-exporter   Searching Rate reqps meter_elasticsearch_node_indices_search_fetch_total_req_rate\nmeter_elasticsearch_node_indices_search_query_time_seconds_proc_rate Searching rate on node elasticsearch-exporter   Total Translog Operations  meter_elasticsearch_node_indices_translog_operations Total translog operations elasticsearch-exporter   Total Translog Size MB meter_elasticsearch_node_indices_translog_size Total translog size elasticsearch-exporter   Tripped For Breakers  meter_elasticsearch_node_breakers_tripped Tripped for breaker elasticsearch-exporter   Estimated Size Of Breaker MB meter_elasticsearch_node_breakers_estimated_size Estimated size of breaker elasticsearch-exporter   Documents Count KB/s meter_elasticsearch_node_indices_docs Count of documents on this node elasticsearch-exporter   Merged Documents Count count/s meter_elasticsearch_node_indices_merges_docs_total Cumulative docs merged elasticsearch-exporter   Deleted Documents Count  meter_elasticsearch_node_indices_docs_deleted_total Count of deleted documents on this node elasticsearch-exporter   Documents Index Rate calls/s meter_elasticsearch_node_indices_indexing_index_total_req_rate Total index calls per second elasticsearch-exporter   Merged Documents Rate MB / s meter_elasticsearch_node_indices_merges_total_size_bytes_total Total merge size per second elasticsearch-exporter   Documents Deleted Rate docs/s meter_elasticsearch_node_indices_docs_deleted Count of deleted documents per second on this node elasticsearch-exporter   Count Of Index Segments  meter_elasticsearch_node_segment_count Count of index segments on this node elasticsearch-exporter   Current Memory Size Of Segments MB meter_elasticsearch_node_segment_memory Current memory size of segments elasticsearch-exporter   Network bytes/sec meter_elasticsearch_node_network_send_bytesmeter_elasticsearch_node_network_receive_bytes Total number of bytes sent and receive elasticsearch-exporter   Disk Usage Percent % meter_elasticsearch_node_disk_usage_percent Used space on block device elasticsearch-exporter   Disk Usage GB meter_elasticsearch_node_disk_usage Used space size of block device elasticsearch-exporter   Disk Read KBs meter_elasticsearch_node_disk_io_read_bytes Total kilobytes read from disk elasticsearch-exporter   Disk Write KBs meter_elasticsearch_node_disk_io_write_bytes Total kilobytes write from disk elasticsearch-exporter    Elasticsearch Index Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Documents Primary  meter_elasticsearch_index_indices_docs_primary Count of documents with only primary shards on all nodes elasticsearch-exporter   Deleted Documents Primary  meter_elasticsearch_index_indices_deleted_docs_primary Count of deleted documents with only primary shards elasticsearch-exporter   Data Primary GB meter_elasticsearch_index_indices_store_size_bytes_primary Current total size of stored index data with only primary shards on all nodes elasticsearch-exporter   Data GB meter_elasticsearch_index_indices_store_size_bytes_total Current total size of stored index data with all shards on all nodes elasticsearch-exporter   Segments Primary  meter_elasticsearch_index_indices_segment_count_primary Current number of segments with only primary shards on all nodes elasticsearch-exporter   Segments Memory Primary MB meter_elasticsearch_index_indices_segment_memory_bytes_primary Current size of segments with only primary shards on all nodes elasticsearch-exporter   Segments  meter_elasticsearch_index_indices_segment_count_total Current number of segments with all shards on all nodes elasticsearch-exporter   Segments Memory MB meter_elasticsearch_index_indices_segment_memory_bytes_total Current size of segments with all shards on all nodes elasticsearch-exporter   Indexing Rate  meter_elasticsearch_index_stats_indexing_index_total_req_ratemeter_elasticsearch_index_stats_indexing_index_total_proc_rate Indexing rate on index elasticsearch-exporter   Searching Rate  meter_elasticsearch_index_stats_search_query_total_req_ratemeter_elasticsearch_index_stats_search_query_total_proc_rate Searching rate on index elasticsearch-exporter   All Operations ReqRate  meter_elasticsearch_index_stats_*_req_rate All Operations ReqRate on index elasticsearch-exporter   All Operations Runtime  meter_elasticsearch_index_stats_*_time_seconds_total All Operations Runtime/s on index elasticsearch-exporter   Avg. Search Time Execute / Request s meter_elasticsearch_index_search_fetch_avg_timemeter_elasticsearch_index_search_query_avg_timemeter_elasticsearch_index_search_scroll_avg_timemeter_elasticsearch_index_search_suggest_avg_time Search Operation Avg. time on index elasticsearch-exporter   Search Operations Rate req/s meter_elasticsearch_index_stats_search_query_total_req_ratemeter_elasticsearch_index_stats_search_fetch_total_req_ratemeter_elasticsearch_index_stats_search_scroll_total_req_ratemeter_elasticsearch_index_stats_search_suggest_total_req_rate Search Operations ReqRate on index elasticsearch-exporter   Shards Documents  meter_elasticsearch_index_indices_shards_docs Count of documents per shards on index elasticsearch-exporter   Documents (Primary Shards)  meter_elasticsearch_index_indices_docs_primary Count of documents with only primary shards on index elasticsearch-exporter   Documents Created Per Min (Primary Shards)  meter_elasticsearch_index_indices_docs_primary_rate Documents rate with only primary shards on index elasticsearch-exporter   Total Size Of Index (Primary Shards) MB meter_elasticsearch_index_indices_store_size_bytes_primary Current total size of stored index data in bytes with only primary shards on all nodes elasticsearch-exporter   Documents (All Shards)  meter_elasticsearch_index_indices_docs_total Count of documents with all shards on index elasticsearch-exporter   Documents Created Per Min (All Shards)  meter_elasticsearch_index_indices_docs_total_rate Documents rate with only all shards on index elasticsearch-exporter   Total Size Of Index (All Shards) MB meter_elasticsearch_index_indices_store_size_bytes_total Current total size of stored index data in bytes with all shards on all nodes elasticsearch-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/elasticsearch/elasticsearch-cluster.yaml, /config/otel-rules/elasticsearch/elasticsearch-node.yaml, /config/otel-rules/elasticsearch/elasticsearch-index.yaml. The Elasticsearch dashboard panel configurations are found in /config/ui-initialized-templates/elasticsearch.\n","title":"Elasticsearch monitoring","url":"/docs/main/v9.7.0/en/setup/backend/backend-elasticsearch-monitoring/"},{"content":"Enable/Disable Channel Different channels mean that different protocols can be transparently transmitted to upstream services(OAP).\nConfig In the Satellite configuration, a channel is represented under the configured pipes. By default, we open all channels and process all known protocols.\nYou could delete the channel if you don\u0026rsquo;t want to receive and transmit in satellite.\nAfter restart the satellite service, then the channel what you delete is disable.\n","title":"Enable/Disable Channel","url":"/docs/skywalking-satellite/latest/en/setup/examples/feature/enable-disable-channel/readme/"},{"content":"Enable/Disable Channel Different channels mean that different protocols can be transparently transmitted to upstream services(OAP).\nConfig In the Satellite configuration, a channel is represented under the configured pipes. By default, we open all channels and process all known protocols.\nYou could delete the channel if you don\u0026rsquo;t want to receive and transmit in satellite.\nAfter restart the satellite service, then the channel what you delete is disable.\n","title":"Enable/Disable Channel","url":"/docs/skywalking-satellite/next/en/setup/examples/feature/enable-disable-channel/readme/"},{"content":"Enable/Disable Channel Different channels mean that different protocols can be transparently transmitted to upstream services(OAP).\nConfig In the Satellite configuration, a channel is represented under the configured pipes. By default, we open all channels and process all known protocols.\nYou could delete the channel if you don\u0026rsquo;t want to receive and transmit in satellite.\nAfter restart the satellite service, then the channel what you delete is disable.\n","title":"Enable/Disable Channel","url":"/docs/skywalking-satellite/v1.2.0/en/setup/examples/feature/enable-disable-channel/readme/"},{"content":"End to End Tests (E2E) SkyWalking heavily rely more automatic tests to perform software quality assurance. E2E is an integral part of it.\n End-to-end testing is a methodology used to test whether the flow of an application is performing as designed from start to finish. The purpose of carrying out end-to-end tests is to identify system dependencies and to ensure that the right information is passed between various system components and systems.\n E2E in SkyWalking is always setting the OAP, monitored services and relative remote server dependencies in a real environment, and verify the dataflow and ultimate query results.\nThe E2E test involves some/all of the OAP server, storage, coordinator, webapp, and the instrumented services, all of which are orchestrated by docker-compose or KinD. Since version 8.9.0, we immigrate to e2e-v2 which leverage skywalking-infra-e2e and skywalking-cli to do the whole e2e process. skywalking-infra-e2e is used to control the e2e process and skywalking-cli is used to interact with the OAP such as request and get response metrics from OAP.\nWriting E2E Cases  Set up the environment   Set up skywalking-infra-e2e Set up skywalking-cli, yq (generally these 2 are enough) and others tools if your cases need. Can reference the script under skywalking/test/e2e-v2/script/prepare/setup-e2e-shell.   Orchestrate the components  The goal of the E2E tests is to test the SkyWalking project as a whole, including the OAP server, storage, coordinator, webapp, and even the frontend UI (not for now), on the single node mode as well as the cluster mode. Therefore, the first step is to determine what case we are going to verify, and orchestrate the components.\nTo make the orchestration process easier, we\u0026rsquo;re using a docker-compose that provides a simple file format (docker-compose.yml) for orchestrating the required containers, and offers an opportunity to define the dependencies of the components.\nFollow these steps:\n Decide what (and how many) containers will be needed. For example, for cluster testing, you\u0026rsquo;ll need \u0026gt; 2 OAP nodes, coordinators (e.g. zookeeper), storage (e.g. ElasticSearch), and instrumented services; Define the containers in docker-compose.yml, and carefully specify the dependencies, starting orders, and most importantly, link them together, e.g. set the correct OAP address on the agent end, and set the correct coordinator address in OAP, etc. Define the e2e case config in e2e.yaml. Write the expected data(yml) for verify.   Run e2e test  All e2e cases should under skywalking/test/e2e-v2/cases. You could execute e2e run command in skywalking/ e.g.\ne2e run -c test/e2e-v2/cases/alarm/h2/e2e.yaml  Troubleshooting  We expose all logs from all containers to the stdout in the non-CI (local) mode, but save and upload them to the GitHub server. You can download them (only when the tests have failed) at \u0026ldquo;Artifacts/Download artifacts/logs\u0026rdquo; (see top right) for debugging.\nNOTE: Please verify the newly-added E2E test case locally first. However, if you find that it has passed locally but failed in the PR check status, make sure that all the updated/newly-added files (especially those in the submodules) are committed and included in the PR, or reset the git HEAD to the remote and verify locally again.\n","title":"End to End Tests (E2E)","url":"/docs/main/latest/en/guides/e2e/"},{"content":"End to End Tests (E2E) SkyWalking heavily rely more automatic tests to perform software quality assurance. E2E is an integral part of it.\n End-to-end testing is a methodology used to test whether the flow of an application is performing as designed from start to finish. The purpose of carrying out end-to-end tests is to identify system dependencies and to ensure that the right information is passed between various system components and systems.\n E2E in SkyWalking is always setting the OAP, monitored services and relative remote server dependencies in a real environment, and verify the dataflow and ultimate query results.\nThe E2E test involves some/all of the OAP server, storage, coordinator, webapp, and the instrumented services, all of which are orchestrated by docker-compose or KinD. Since version 8.9.0, we immigrate to e2e-v2 which leverage skywalking-infra-e2e and skywalking-cli to do the whole e2e process. skywalking-infra-e2e is used to control the e2e process and skywalking-cli is used to interact with the OAP such as request and get response metrics from OAP.\nWriting E2E Cases  Set up the environment   Set up skywalking-infra-e2e Set up skywalking-cli, yq (generally these 2 are enough) and others tools if your cases need. Can reference the script under skywalking/test/e2e-v2/script/prepare/setup-e2e-shell.   Orchestrate the components  The goal of the E2E tests is to test the SkyWalking project as a whole, including the OAP server, storage, coordinator, webapp, and even the frontend UI (not for now), on the single node mode as well as the cluster mode. Therefore, the first step is to determine what case we are going to verify, and orchestrate the components.\nTo make the orchestration process easier, we\u0026rsquo;re using a docker-compose that provides a simple file format (docker-compose.yml) for orchestrating the required containers, and offers an opportunity to define the dependencies of the components.\nFollow these steps:\n Decide what (and how many) containers will be needed. For example, for cluster testing, you\u0026rsquo;ll need \u0026gt; 2 OAP nodes, coordinators (e.g. zookeeper), storage (e.g. ElasticSearch), and instrumented services; Define the containers in docker-compose.yml, and carefully specify the dependencies, starting orders, and most importantly, link them together, e.g. set the correct OAP address on the agent end, and set the correct coordinator address in OAP, etc. Define the e2e case config in e2e.yaml. Write the expected data(yml) for verify.   Run e2e test  All e2e cases should under skywalking/test/e2e-v2/cases. You could execute e2e run command in skywalking/ e.g.\ne2e run -c test/e2e-v2/cases/alarm/h2/e2e.yaml  Troubleshooting  We expose all logs from all containers to the stdout in the non-CI (local) mode, but save and upload them to the GitHub server. You can download them (only when the tests have failed) at \u0026ldquo;Artifacts/Download artifacts/logs\u0026rdquo; (see top right) for debugging.\nNOTE: Please verify the newly-added E2E test case locally first. However, if you find that it has passed locally but failed in the PR check status, make sure that all the updated/newly-added files (especially those in the submodules) are committed and included in the PR, or reset the git HEAD to the remote and verify locally again.\n","title":"End to End Tests (E2E)","url":"/docs/main/next/en/guides/e2e/"},{"content":"End to End Tests (E2E) SkyWalking heavily rely more automatic tests to perform software quality assurance. E2E is an integral part of it.\n End-to-end testing is a methodology used to test whether the flow of an application is performing as designed from start to finish. The purpose of carrying out end-to-end tests is to identify system dependencies and to ensure that the right information is passed between various system components and systems.\n E2E in SkyWalking is always setting the OAP, monitored services and relative remote server dependencies in a real environment, and verify the dataflow and ultimate query results.\nThe E2E test involves some/all of the OAP server, storage, coordinator, webapp, and the instrumented services, all of which are orchestrated by docker-compose or KinD. Since version 8.9.0, we immigrate to e2e-v2 which leverage skywalking-infra-e2e and skywalking-cli to do the whole e2e process. skywalking-infra-e2e is used to control the e2e process and skywalking-cli is used to interact with the OAP such as request and get response metrics from OAP.\nWriting E2E Cases  Set up the environment   Set up skywalking-infra-e2e Set up skywalking-cli, yq (generally these 2 are enough) and others tools if your cases need. Can reference the script under skywalking/test/e2e-v2/script/prepare/setup-e2e-shell.   Orchestrate the components  The goal of the E2E tests is to test the SkyWalking project as a whole, including the OAP server, storage, coordinator, webapp, and even the frontend UI (not for now), on the single node mode as well as the cluster mode. Therefore, the first step is to determine what case we are going to verify, and orchestrate the components.\nTo make the orchestration process easier, we\u0026rsquo;re using a docker-compose that provides a simple file format (docker-compose.yml) for orchestrating the required containers, and offers an opportunity to define the dependencies of the components.\nFollow these steps:\n Decide what (and how many) containers will be needed. For example, for cluster testing, you\u0026rsquo;ll need \u0026gt; 2 OAP nodes, coordinators (e.g. zookeeper), storage (e.g. ElasticSearch), and instrumented services; Define the containers in docker-compose.yml, and carefully specify the dependencies, starting orders, and most importantly, link them together, e.g. set the correct OAP address on the agent end, and set the correct coordinator address in OAP, etc. Define the e2e case config in e2e.yaml. Write the expected data(yml) for verify.   Run e2e test  All e2e cases should under skywalking/test/e2e-v2/cases. You could execute e2e run command in skywalking/ e.g.\ne2e run -c test/e2e-v2/cases/alarm/h2/e2e.yaml  Troubleshooting  We expose all logs from all containers to the stdout in the non-CI (local) mode, but save and upload them to the GitHub server. You can download them (only when the tests have failed) at \u0026ldquo;Artifacts/Download artifacts/logs\u0026rdquo; (see top right) for debugging.\nNOTE: Please verify the newly-added E2E test case locally first. However, if you find that it has passed locally but failed in the PR check status, make sure that all the updated/newly-added files (especially those in the submodules) are committed and included in the PR, or reset the git HEAD to the remote and verify locally again.\n","title":"End to End Tests (E2E)","url":"/docs/main/v9.6.0/en/guides/e2e/"},{"content":"End to End Tests (E2E) SkyWalking heavily rely more automatic tests to perform software quality assurance. E2E is an integral part of it.\n End-to-end testing is a methodology used to test whether the flow of an application is performing as designed from start to finish. The purpose of carrying out end-to-end tests is to identify system dependencies and to ensure that the right information is passed between various system components and systems.\n E2E in SkyWalking is always setting the OAP, monitored services and relative remote server dependencies in a real environment, and verify the dataflow and ultimate query results.\nThe E2E test involves some/all of the OAP server, storage, coordinator, webapp, and the instrumented services, all of which are orchestrated by docker-compose or KinD. Since version 8.9.0, we immigrate to e2e-v2 which leverage skywalking-infra-e2e and skywalking-cli to do the whole e2e process. skywalking-infra-e2e is used to control the e2e process and skywalking-cli is used to interact with the OAP such as request and get response metrics from OAP.\nWriting E2E Cases  Set up the environment   Set up skywalking-infra-e2e Set up skywalking-cli, yq (generally these 2 are enough) and others tools if your cases need. Can reference the script under skywalking/test/e2e-v2/script/prepare/setup-e2e-shell.   Orchestrate the components  The goal of the E2E tests is to test the SkyWalking project as a whole, including the OAP server, storage, coordinator, webapp, and even the frontend UI (not for now), on the single node mode as well as the cluster mode. Therefore, the first step is to determine what case we are going to verify, and orchestrate the components.\nTo make the orchestration process easier, we\u0026rsquo;re using a docker-compose that provides a simple file format (docker-compose.yml) for orchestrating the required containers, and offers an opportunity to define the dependencies of the components.\nFollow these steps:\n Decide what (and how many) containers will be needed. For example, for cluster testing, you\u0026rsquo;ll need \u0026gt; 2 OAP nodes, coordinators (e.g. zookeeper), storage (e.g. ElasticSearch), and instrumented services; Define the containers in docker-compose.yml, and carefully specify the dependencies, starting orders, and most importantly, link them together, e.g. set the correct OAP address on the agent end, and set the correct coordinator address in OAP, etc. Define the e2e case config in e2e.yaml. Write the expected data(yml) for verify.   Run e2e test  All e2e cases should under skywalking/test/e2e-v2/cases. You could execute e2e run command in skywalking/ e.g.\ne2e run -c test/e2e-v2/cases/alarm/h2/e2e.yaml  Troubleshooting  We expose all logs from all containers to the stdout in the non-CI (local) mode, but save and upload them to the GitHub server. You can download them (only when the tests have failed) at \u0026ldquo;Artifacts/Download artifacts/logs\u0026rdquo; (see top right) for debugging.\nNOTE: Please verify the newly-added E2E test case locally first. However, if you find that it has passed locally but failed in the PR check status, make sure that all the updated/newly-added files (especially those in the submodules) are committed and included in the PR, or reset the git HEAD to the remote and verify locally again.\n","title":"End to End Tests (E2E)","url":"/docs/main/v9.7.0/en/guides/e2e/"},{"content":"Events SkyWalking already supports the three pillars of observability, namely logs, metrics, and traces. In reality, a production system experiences many other events that may affect the performance of the system, such as upgrading, rebooting, chaos testing, etc. Although some of these events are reflected in the logs, many others are not. Hence, SkyWalking provides a more native way to collect these events. This doc details how SkyWalking collects events and what events look like in SkyWalking.\nHow to Report Events The SkyWalking backend supports three protocols to collect events: gRPC, HTTP, and Kafka. Any agent or CLI that implements one of these protocols can report events to SkyWalking. Currently, the officially supported clients to report events are:\n Java Agent Toolkit: Using the Java agent toolkit to report events within the applications. SkyWalking CLI: Using the CLI to report events from the command line interface. Kubernetes Event Exporter: Deploying an event exporter to refine and report Kubernetes events.  Event Definitions An event contains the following fields. The definitions of event can be found at the protocol repo.\nUUID Unique ID of the event. Since an event may span a long period of time, the UUID is necessary to associate the start time with the end time of the same event.\nSource The source object on which the event occurs. In SkyWalking, the object is typically a service, service instance, etc.\nName Name of the event. For example, Start, Stop, Crash, Reboot, Upgrade, etc.\nType Type of the event. This field is friendly for UI visualization, where events of type Normal are considered normal operations, while Error is considered unexpected operations, such as Crash events. Marking them with different colors allows us to more easily identify them.\nMessage The detail of the event that describes why this event happened. This should be a one-line message that briefly describes why the event is reported. Examples of an Upgrade event may be something like Upgrade from ${from_version} to ${to_version}. It\u0026rsquo;s NOT recommended to include the detailed logs of this event, such as the exception stack trace.\nParameters The parameters in the message field. This is a simple \u0026lt;string,string\u0026gt; map.\nStart Time The start time of the event. This field is mandatory when an event occurs.\nEnd Time The end time of the event. This field may be empty if the event has not ended yet, otherwise there should be a valid timestamp after startTime.\nNOTE: When reporting an event, you typically call the report function twice, the first time for starting of the event and the second time for ending of the event, both with the same UUID. There are also cases where you would already have both the start time and end time. For example, when exporting events from a third-party system, the start time and end time are already known so you may simply call the report function once.\nCorrelation between events and metrics SkyWalking UI visualizes the events in the dashboard when the event service / instance / endpoint matches the displayed service / instance / endpoint.\nKnown Events    Name Type When Where     Start Normal When your Java Application starts with SkyWalking Agent installed, the Start Event will be created. Reported from SkyWalking agent.   Shutdown Normal When your Java Application stops with SkyWalking Agent installed, the Shutdown Event will be created. Reported from SkyWalking agent.   Alarm Error When the Alarm is triggered, the corresponding Alarm Event will is created. Reported from internal SkyWalking OAP.    The following events are all reported by Kubernetes Event Exporter, in order to see these events, please make sure you have deployed the exporter.\n   Name Type When Where     Killing Normal When the Kubernetes Pod is being killing. Reporter by Kubernetes Event Exporter.   Pulling Normal When a docker image is being pulled for deployment. Reporter by Kubernetes Event Exporter.   Pulled Normal When a docker image is pulled for deployment. Reporter by Kubernetes Event Exporter.   Created Normal When a container inside a Pod is created. Reporter by Kubernetes Event Exporter.   Started Normal When a container inside a Pod is started. Reporter by Kubernetes Event Exporter.   Unhealthy Error When the readiness probe failed. Reporter by Kubernetes Event Exporter.    The complete event lists can be found in the Kubernetes codebase, please note that not all the events are supported by the exporter for now.\n","title":"Events","url":"/docs/main/latest/en/concepts-and-designs/event/"},{"content":"Events SkyWalking already supports the three pillars of observability, namely logs, metrics, and traces. In reality, a production system experiences many other events that may affect the performance of the system, such as upgrading, rebooting, chaos testing, etc. Although some of these events are reflected in the logs, many others are not. Hence, SkyWalking provides a more native way to collect these events. This doc details how SkyWalking collects events and what events look like in SkyWalking.\nHow to Report Events The SkyWalking backend supports three protocols to collect events: gRPC, HTTP, and Kafka. Any agent or CLI that implements one of these protocols can report events to SkyWalking. Currently, the officially supported clients to report events are:\n Java Agent Toolkit: Using the Java agent toolkit to report events within the applications. SkyWalking CLI: Using the CLI to report events from the command line interface. Kubernetes Event Exporter: Deploying an event exporter to refine and report Kubernetes events.  Event Definitions An event contains the following fields. The definitions of event can be found at the protocol repo.\nUUID Unique ID of the event. Since an event may span a long period of time, the UUID is necessary to associate the start time with the end time of the same event.\nSource The source object on which the event occurs. In SkyWalking, the object is typically a service, service instance, etc.\nName Name of the event. For example, Start, Stop, Crash, Reboot, Upgrade, etc.\nType Type of the event. This field is friendly for UI visualization, where events of type Normal are considered normal operations, while Error is considered unexpected operations, such as Crash events. Marking them with different colors allows us to more easily identify them.\nMessage The detail of the event that describes why this event happened. This should be a one-line message that briefly describes why the event is reported. Examples of an Upgrade event may be something like Upgrade from ${from_version} to ${to_version}. It\u0026rsquo;s NOT recommended to include the detailed logs of this event, such as the exception stack trace.\nParameters The parameters in the message field. This is a simple \u0026lt;string,string\u0026gt; map.\nStart Time The start time of the event. This field is mandatory when an event occurs.\nEnd Time The end time of the event. This field may be empty if the event has not ended yet, otherwise there should be a valid timestamp after startTime.\nNOTE: When reporting an event, you typically call the report function twice, the first time for starting of the event and the second time for ending of the event, both with the same UUID. There are also cases where you would already have both the start time and end time. For example, when exporting events from a third-party system, the start time and end time are already known so you may simply call the report function once.\nCorrelation between events and metrics SkyWalking UI visualizes the events in the dashboard when the event service / instance / endpoint matches the displayed service / instance / endpoint.\nKnown Events    Name Type When Where     Start Normal When your Java Application starts with SkyWalking Agent installed, the Start Event will be created. Reported from SkyWalking agent.   Shutdown Normal When your Java Application stops with SkyWalking Agent installed, the Shutdown Event will be created. Reported from SkyWalking agent.   Alarm Error When the Alarm is triggered, the corresponding Alarm Event will is created. Reported from internal SkyWalking OAP.    The following events are all reported by Kubernetes Event Exporter, in order to see these events, please make sure you have deployed the exporter.\n   Name Type When Where     Killing Normal When the Kubernetes Pod is being killing. Reporter by Kubernetes Event Exporter.   Pulling Normal When a docker image is being pulled for deployment. Reporter by Kubernetes Event Exporter.   Pulled Normal When a docker image is pulled for deployment. Reporter by Kubernetes Event Exporter.   Created Normal When a container inside a Pod is created. Reporter by Kubernetes Event Exporter.   Started Normal When a container inside a Pod is started. Reporter by Kubernetes Event Exporter.   Unhealthy Error When the readiness probe failed. Reporter by Kubernetes Event Exporter.    The complete event lists can be found in the Kubernetes codebase, please note that not all the events are supported by the exporter for now.\n","title":"Events","url":"/docs/main/next/en/concepts-and-designs/event/"},{"content":"Events SkyWalking already supports the three pillars of observability, namely logs, metrics, and traces. In reality, a production system experiences many other events that may affect the performance of the system, such as upgrading, rebooting, chaos testing, etc. Although some of these events are reflected in the logs, many others are not. Hence, SkyWalking provides a more native way to collect these events. This doc details how SkyWalking collects events and what events look like in SkyWalking.\nHow to Report Events The SkyWalking backend supports three protocols to collect events: gRPC, HTTP, and Kafka. Any agent or CLI that implements one of these protocols can report events to SkyWalking. Currently, the officially supported clients to report events are:\n Java Agent Toolkit: Using the Java agent toolkit to report events within the applications. SkyWalking CLI: Using the CLI to report events from the command line interface. Kubernetes Event Exporter: Deploying an event exporter to refine and report Kubernetes events.  Event Definitions An event contains the following fields. The definitions of event can be found at the protocol repo.\nUUID Unique ID of the event. Since an event may span a long period of time, the UUID is necessary to associate the start time with the end time of the same event.\nSource The source object on which the event occurs. In SkyWalking, the object is typically a service, service instance, etc.\nName Name of the event. For example, Start, Stop, Crash, Reboot, Upgrade, etc.\nType Type of the event. This field is friendly for UI visualization, where events of type Normal are considered normal operations, while Error is considered unexpected operations, such as Crash events. Marking them with different colors allows us to more easily identify them.\nMessage The detail of the event that describes why this event happened. This should be a one-line message that briefly describes why the event is reported. Examples of an Upgrade event may be something like Upgrade from ${from_version} to ${to_version}. It\u0026rsquo;s NOT recommended to include the detailed logs of this event, such as the exception stack trace.\nParameters The parameters in the message field. This is a simple \u0026lt;string,string\u0026gt; map.\nStart Time The start time of the event. This field is mandatory when an event occurs.\nEnd Time The end time of the event. This field may be empty if the event has not ended yet, otherwise there should be a valid timestamp after startTime.\nNOTE: When reporting an event, you typically call the report function twice, the first time for starting of the event and the second time for ending of the event, both with the same UUID. There are also cases where you would already have both the start time and end time. For example, when exporting events from a third-party system, the start time and end time are already known so you may simply call the report function once.\nHow to Configure Alarms for Events Events derive from metrics, and can be the source to trigger alarms. For example, if a specific event occurs for a certain times in a period, alarms can be triggered and sent.\nEvery event has a default value = 1, when n events with the same name are reported, they are aggregated into value = n as follows.\nEvent{name=Unhealthy, source={service=A,instance=a}, ...} Event{name=Unhealthy, source={service=A,instance=a}, ...} Event{name=Unhealthy, source={service=A,instance=a}, ...} Event{name=Unhealthy, source={service=A,instance=a}, ...} Event{name=Unhealthy, source={service=A,instance=a}, ...} Event{name=Unhealthy, source={service=A,instance=a}, ...} will be aggregated into\nEvent{name=Unhealthy, source={service=A,instance=a}, ...} \u0026lt;value = 6\u0026gt; so you can configure the following alarm rule to trigger alarm when Unhealthy event occurs more than 5 times within 10 minutes.\nrules:unhealthy_event_rule:metrics-name:Unhealthy# Healthiness check is usually a scheduled task,# they may be unhealthy for the first few times,# and can be unhealthy occasionally due to network jitter,# please adjust the threshold as per your actual situation.threshold:5op:\u0026#34;\u0026gt;\u0026#34;period:10count:1message:Service instance has been unhealthy for 10 minutesFor more alarm configuration details, please refer to the alarm doc.\nNote that the Unhealthy event above is only for demonstration, they are not detected by default in SkyWalking, however, you can use the methods in How to Report Events to report this kind of events.\nCorrelation between events and metrics SkyWalking UI visualizes the events in the dashboard when the event service / instance / endpoint matches the displayed service / instance / endpoint.\nBy default, SkyWalking also generates some metrics for events by using OAL. The default metrics list of event may change over time, you can find the complete list in event.oal. If you want to generate you custom metrics from events, please refer to OAL about how to write OAL rules.\nKnown Events    Name Type When Where     Start Normal When your Java Application starts with SkyWalking Agent installed, the Start Event will be created. Reported from SkyWalking agent.   Shutdown Normal When your Java Application stops with SkyWalking Agent installed, the Shutdown Event will be created. Reported from SkyWalking agent.   Alarm Error When the Alarm is triggered, the corresponding Alarm Event will is created. Reported from internal SkyWalking OAP.    The following events are all reported by Kubernetes Event Exporter, in order to see these events, please make sure you have deployed the exporter.\n   Name Type When Where     Killing Normal When the Kubernetes Pod is being killing. Reporter by Kubernetes Event Exporter.   Pulling Normal When a docker image is being pulled for deployment. Reporter by Kubernetes Event Exporter.   Pulled Normal When a docker image is pulled for deployment. Reporter by Kubernetes Event Exporter.   Created Normal When a container inside a Pod is created. Reporter by Kubernetes Event Exporter.   Started Normal When a container inside a Pod is started. Reporter by Kubernetes Event Exporter.   Unhealthy Error When the readiness probe failed. Reporter by Kubernetes Event Exporter.    The complete event lists can be found in the Kubernetes codebase, please note that not all the events are supported by the exporter for now.\n","title":"Events","url":"/docs/main/v9.0.0/en/concepts-and-designs/event/"},{"content":"Events SkyWalking already supports the three pillars of observability, namely logs, metrics, and traces. In reality, a production system experiences many other events that may affect the performance of the system, such as upgrading, rebooting, chaos testing, etc. Although some of these events are reflected in the logs, many others are not. Hence, SkyWalking provides a more native way to collect these events. This doc details how SkyWalking collects events and what events look like in SkyWalking.\nHow to Report Events The SkyWalking backend supports three protocols to collect events: gRPC, HTTP, and Kafka. Any agent or CLI that implements one of these protocols can report events to SkyWalking. Currently, the officially supported clients to report events are:\n Java Agent Toolkit: Using the Java agent toolkit to report events within the applications. SkyWalking CLI: Using the CLI to report events from the command line interface. Kubernetes Event Exporter: Deploying an event exporter to refine and report Kubernetes events.  Event Definitions An event contains the following fields. The definitions of event can be found at the protocol repo.\nUUID Unique ID of the event. Since an event may span a long period of time, the UUID is necessary to associate the start time with the end time of the same event.\nSource The source object on which the event occurs. In SkyWalking, the object is typically a service, service instance, etc.\nName Name of the event. For example, Start, Stop, Crash, Reboot, Upgrade, etc.\nType Type of the event. This field is friendly for UI visualization, where events of type Normal are considered normal operations, while Error is considered unexpected operations, such as Crash events. Marking them with different colors allows us to more easily identify them.\nMessage The detail of the event that describes why this event happened. This should be a one-line message that briefly describes why the event is reported. Examples of an Upgrade event may be something like Upgrade from ${from_version} to ${to_version}. It\u0026rsquo;s NOT recommended to include the detailed logs of this event, such as the exception stack trace.\nParameters The parameters in the message field. This is a simple \u0026lt;string,string\u0026gt; map.\nStart Time The start time of the event. This field is mandatory when an event occurs.\nEnd Time The end time of the event. This field may be empty if the event has not ended yet, otherwise there should be a valid timestamp after startTime.\nNOTE: When reporting an event, you typically call the report function twice, the first time for starting of the event and the second time for ending of the event, both with the same UUID. There are also cases where you would already have both the start time and end time. For example, when exporting events from a third-party system, the start time and end time are already known so you may simply call the report function once.\nHow to Configure Alarms for Events Events derive from metrics, and can be the source to trigger alarms. For example, if a specific event occurs for a certain times in a period, alarms can be triggered and sent.\nEvery event has a default value = 1, when n events with the same name are reported, they are aggregated into value = n as follows.\nEvent{name=Unhealthy, source={service=A,instance=a}, ...} Event{name=Unhealthy, source={service=A,instance=a}, ...} Event{name=Unhealthy, source={service=A,instance=a}, ...} Event{name=Unhealthy, source={service=A,instance=a}, ...} Event{name=Unhealthy, source={service=A,instance=a}, ...} Event{name=Unhealthy, source={service=A,instance=a}, ...} will be aggregated into\nEvent{name=Unhealthy, source={service=A,instance=a}, ...} \u0026lt;value = 6\u0026gt; so you can configure the following alarm rule to trigger alarm when Unhealthy event occurs more than 5 times within 10 minutes.\nrules:unhealthy_event_rule:metrics-name:Unhealthy# Healthiness check is usually a scheduled task,# they may be unhealthy for the first few times,# and can be unhealthy occasionally due to network jitter,# please adjust the threshold as per your actual situation.threshold:5op:\u0026#34;\u0026gt;\u0026#34;period:10count:1message:Service instance has been unhealthy for 10 minutesFor more alarm configuration details, please refer to the alarm doc.\nNote that the Unhealthy event above is only for demonstration, they are not detected by default in SkyWalking, however, you can use the methods in How to Report Events to report this kind of events.\nCorrelation between events and metrics SkyWalking UI visualizes the events in the dashboard when the event service / instance / endpoint matches the displayed service / instance / endpoint.\nKnown Events    Name Type When Where     Start Normal When your Java Application starts with SkyWalking Agent installed, the Start Event will be created. Reported from SkyWalking agent.   Shutdown Normal When your Java Application stops with SkyWalking Agent installed, the Shutdown Event will be created. Reported from SkyWalking agent.   Alarm Error When the Alarm is triggered, the corresponding Alarm Event will is created. Reported from internal SkyWalking OAP.    The following events are all reported by Kubernetes Event Exporter, in order to see these events, please make sure you have deployed the exporter.\n   Name Type When Where     Killing Normal When the Kubernetes Pod is being killing. Reporter by Kubernetes Event Exporter.   Pulling Normal When a docker image is being pulled for deployment. Reporter by Kubernetes Event Exporter.   Pulled Normal When a docker image is pulled for deployment. Reporter by Kubernetes Event Exporter.   Created Normal When a container inside a Pod is created. Reporter by Kubernetes Event Exporter.   Started Normal When a container inside a Pod is started. Reporter by Kubernetes Event Exporter.   Unhealthy Error When the readiness probe failed. Reporter by Kubernetes Event Exporter.    The complete event lists can be found in the Kubernetes codebase, please note that not all the events are supported by the exporter for now.\n","title":"Events","url":"/docs/main/v9.1.0/en/concepts-and-designs/event/"},{"content":"Events SkyWalking already supports the three pillars of observability, namely logs, metrics, and traces. In reality, a production system experiences many other events that may affect the performance of the system, such as upgrading, rebooting, chaos testing, etc. Although some of these events are reflected in the logs, many others are not. Hence, SkyWalking provides a more native way to collect these events. This doc details how SkyWalking collects events and what events look like in SkyWalking.\nHow to Report Events The SkyWalking backend supports three protocols to collect events: gRPC, HTTP, and Kafka. Any agent or CLI that implements one of these protocols can report events to SkyWalking. Currently, the officially supported clients to report events are:\n Java Agent Toolkit: Using the Java agent toolkit to report events within the applications. SkyWalking CLI: Using the CLI to report events from the command line interface. Kubernetes Event Exporter: Deploying an event exporter to refine and report Kubernetes events.  Event Definitions An event contains the following fields. The definitions of event can be found at the protocol repo.\nUUID Unique ID of the event. Since an event may span a long period of time, the UUID is necessary to associate the start time with the end time of the same event.\nSource The source object on which the event occurs. In SkyWalking, the object is typically a service, service instance, etc.\nName Name of the event. For example, Start, Stop, Crash, Reboot, Upgrade, etc.\nType Type of the event. This field is friendly for UI visualization, where events of type Normal are considered normal operations, while Error is considered unexpected operations, such as Crash events. Marking them with different colors allows us to more easily identify them.\nMessage The detail of the event that describes why this event happened. This should be a one-line message that briefly describes why the event is reported. Examples of an Upgrade event may be something like Upgrade from ${from_version} to ${to_version}. It\u0026rsquo;s NOT recommended to include the detailed logs of this event, such as the exception stack trace.\nParameters The parameters in the message field. This is a simple \u0026lt;string,string\u0026gt; map.\nStart Time The start time of the event. This field is mandatory when an event occurs.\nEnd Time The end time of the event. This field may be empty if the event has not ended yet, otherwise there should be a valid timestamp after startTime.\nNOTE: When reporting an event, you typically call the report function twice, the first time for starting of the event and the second time for ending of the event, both with the same UUID. There are also cases where you would already have both the start time and end time. For example, when exporting events from a third-party system, the start time and end time are already known so you may simply call the report function once.\nCorrelation between events and metrics SkyWalking UI visualizes the events in the dashboard when the event service / instance / endpoint matches the displayed service / instance / endpoint.\nKnown Events    Name Type When Where     Start Normal When your Java Application starts with SkyWalking Agent installed, the Start Event will be created. Reported from SkyWalking agent.   Shutdown Normal When your Java Application stops with SkyWalking Agent installed, the Shutdown Event will be created. Reported from SkyWalking agent.   Alarm Error When the Alarm is triggered, the corresponding Alarm Event will is created. Reported from internal SkyWalking OAP.    The following events are all reported by Kubernetes Event Exporter, in order to see these events, please make sure you have deployed the exporter.\n   Name Type When Where     Killing Normal When the Kubernetes Pod is being killing. Reporter by Kubernetes Event Exporter.   Pulling Normal When a docker image is being pulled for deployment. Reporter by Kubernetes Event Exporter.   Pulled Normal When a docker image is pulled for deployment. Reporter by Kubernetes Event Exporter.   Created Normal When a container inside a Pod is created. Reporter by Kubernetes Event Exporter.   Started Normal When a container inside a Pod is started. Reporter by Kubernetes Event Exporter.   Unhealthy Error When the readiness probe failed. Reporter by Kubernetes Event Exporter.    The complete event lists can be found in the Kubernetes codebase, please note that not all the events are supported by the exporter for now.\n","title":"Events","url":"/docs/main/v9.2.0/en/concepts-and-designs/event/"},{"content":"Events SkyWalking already supports the three pillars of observability, namely logs, metrics, and traces. In reality, a production system experiences many other events that may affect the performance of the system, such as upgrading, rebooting, chaos testing, etc. Although some of these events are reflected in the logs, many others are not. Hence, SkyWalking provides a more native way to collect these events. This doc details how SkyWalking collects events and what events look like in SkyWalking.\nHow to Report Events The SkyWalking backend supports three protocols to collect events: gRPC, HTTP, and Kafka. Any agent or CLI that implements one of these protocols can report events to SkyWalking. Currently, the officially supported clients to report events are:\n Java Agent Toolkit: Using the Java agent toolkit to report events within the applications. SkyWalking CLI: Using the CLI to report events from the command line interface. Kubernetes Event Exporter: Deploying an event exporter to refine and report Kubernetes events.  Event Definitions An event contains the following fields. The definitions of event can be found at the protocol repo.\nUUID Unique ID of the event. Since an event may span a long period of time, the UUID is necessary to associate the start time with the end time of the same event.\nSource The source object on which the event occurs. In SkyWalking, the object is typically a service, service instance, etc.\nName Name of the event. For example, Start, Stop, Crash, Reboot, Upgrade, etc.\nType Type of the event. This field is friendly for UI visualization, where events of type Normal are considered normal operations, while Error is considered unexpected operations, such as Crash events. Marking them with different colors allows us to more easily identify them.\nMessage The detail of the event that describes why this event happened. This should be a one-line message that briefly describes why the event is reported. Examples of an Upgrade event may be something like Upgrade from ${from_version} to ${to_version}. It\u0026rsquo;s NOT recommended to include the detailed logs of this event, such as the exception stack trace.\nParameters The parameters in the message field. This is a simple \u0026lt;string,string\u0026gt; map.\nStart Time The start time of the event. This field is mandatory when an event occurs.\nEnd Time The end time of the event. This field may be empty if the event has not ended yet, otherwise there should be a valid timestamp after startTime.\nNOTE: When reporting an event, you typically call the report function twice, the first time for starting of the event and the second time for ending of the event, both with the same UUID. There are also cases where you would already have both the start time and end time. For example, when exporting events from a third-party system, the start time and end time are already known so you may simply call the report function once.\nCorrelation between events and metrics SkyWalking UI visualizes the events in the dashboard when the event service / instance / endpoint matches the displayed service / instance / endpoint.\nKnown Events    Name Type When Where     Start Normal When your Java Application starts with SkyWalking Agent installed, the Start Event will be created. Reported from SkyWalking agent.   Shutdown Normal When your Java Application stops with SkyWalking Agent installed, the Shutdown Event will be created. Reported from SkyWalking agent.   Alarm Error When the Alarm is triggered, the corresponding Alarm Event will is created. Reported from internal SkyWalking OAP.    The following events are all reported by Kubernetes Event Exporter, in order to see these events, please make sure you have deployed the exporter.\n   Name Type When Where     Killing Normal When the Kubernetes Pod is being killing. Reporter by Kubernetes Event Exporter.   Pulling Normal When a docker image is being pulled for deployment. Reporter by Kubernetes Event Exporter.   Pulled Normal When a docker image is pulled for deployment. Reporter by Kubernetes Event Exporter.   Created Normal When a container inside a Pod is created. Reporter by Kubernetes Event Exporter.   Started Normal When a container inside a Pod is started. Reporter by Kubernetes Event Exporter.   Unhealthy Error When the readiness probe failed. Reporter by Kubernetes Event Exporter.    The complete event lists can be found in the Kubernetes codebase, please note that not all the events are supported by the exporter for now.\n","title":"Events","url":"/docs/main/v9.3.0/en/concepts-and-designs/event/"},{"content":"Events SkyWalking already supports the three pillars of observability, namely logs, metrics, and traces. In reality, a production system experiences many other events that may affect the performance of the system, such as upgrading, rebooting, chaos testing, etc. Although some of these events are reflected in the logs, many others are not. Hence, SkyWalking provides a more native way to collect these events. This doc details how SkyWalking collects events and what events look like in SkyWalking.\nHow to Report Events The SkyWalking backend supports three protocols to collect events: gRPC, HTTP, and Kafka. Any agent or CLI that implements one of these protocols can report events to SkyWalking. Currently, the officially supported clients to report events are:\n Java Agent Toolkit: Using the Java agent toolkit to report events within the applications. SkyWalking CLI: Using the CLI to report events from the command line interface. Kubernetes Event Exporter: Deploying an event exporter to refine and report Kubernetes events.  Event Definitions An event contains the following fields. The definitions of event can be found at the protocol repo.\nUUID Unique ID of the event. Since an event may span a long period of time, the UUID is necessary to associate the start time with the end time of the same event.\nSource The source object on which the event occurs. In SkyWalking, the object is typically a service, service instance, etc.\nName Name of the event. For example, Start, Stop, Crash, Reboot, Upgrade, etc.\nType Type of the event. This field is friendly for UI visualization, where events of type Normal are considered normal operations, while Error is considered unexpected operations, such as Crash events. Marking them with different colors allows us to more easily identify them.\nMessage The detail of the event that describes why this event happened. This should be a one-line message that briefly describes why the event is reported. Examples of an Upgrade event may be something like Upgrade from ${from_version} to ${to_version}. It\u0026rsquo;s NOT recommended to include the detailed logs of this event, such as the exception stack trace.\nParameters The parameters in the message field. This is a simple \u0026lt;string,string\u0026gt; map.\nStart Time The start time of the event. This field is mandatory when an event occurs.\nEnd Time The end time of the event. This field may be empty if the event has not ended yet, otherwise there should be a valid timestamp after startTime.\nNOTE: When reporting an event, you typically call the report function twice, the first time for starting of the event and the second time for ending of the event, both with the same UUID. There are also cases where you would already have both the start time and end time. For example, when exporting events from a third-party system, the start time and end time are already known so you may simply call the report function once.\nCorrelation between events and metrics SkyWalking UI visualizes the events in the dashboard when the event service / instance / endpoint matches the displayed service / instance / endpoint.\nKnown Events    Name Type When Where     Start Normal When your Java Application starts with SkyWalking Agent installed, the Start Event will be created. Reported from SkyWalking agent.   Shutdown Normal When your Java Application stops with SkyWalking Agent installed, the Shutdown Event will be created. Reported from SkyWalking agent.   Alarm Error When the Alarm is triggered, the corresponding Alarm Event will is created. Reported from internal SkyWalking OAP.    The following events are all reported by Kubernetes Event Exporter, in order to see these events, please make sure you have deployed the exporter.\n   Name Type When Where     Killing Normal When the Kubernetes Pod is being killing. Reporter by Kubernetes Event Exporter.   Pulling Normal When a docker image is being pulled for deployment. Reporter by Kubernetes Event Exporter.   Pulled Normal When a docker image is pulled for deployment. Reporter by Kubernetes Event Exporter.   Created Normal When a container inside a Pod is created. Reporter by Kubernetes Event Exporter.   Started Normal When a container inside a Pod is started. Reporter by Kubernetes Event Exporter.   Unhealthy Error When the readiness probe failed. Reporter by Kubernetes Event Exporter.    The complete event lists can be found in the Kubernetes codebase, please note that not all the events are supported by the exporter for now.\n","title":"Events","url":"/docs/main/v9.4.0/en/concepts-and-designs/event/"},{"content":"Events SkyWalking already supports the three pillars of observability, namely logs, metrics, and traces. In reality, a production system experiences many other events that may affect the performance of the system, such as upgrading, rebooting, chaos testing, etc. Although some of these events are reflected in the logs, many others are not. Hence, SkyWalking provides a more native way to collect these events. This doc details how SkyWalking collects events and what events look like in SkyWalking.\nHow to Report Events The SkyWalking backend supports three protocols to collect events: gRPC, HTTP, and Kafka. Any agent or CLI that implements one of these protocols can report events to SkyWalking. Currently, the officially supported clients to report events are:\n Java Agent Toolkit: Using the Java agent toolkit to report events within the applications. SkyWalking CLI: Using the CLI to report events from the command line interface. Kubernetes Event Exporter: Deploying an event exporter to refine and report Kubernetes events.  Event Definitions An event contains the following fields. The definitions of event can be found at the protocol repo.\nUUID Unique ID of the event. Since an event may span a long period of time, the UUID is necessary to associate the start time with the end time of the same event.\nSource The source object on which the event occurs. In SkyWalking, the object is typically a service, service instance, etc.\nName Name of the event. For example, Start, Stop, Crash, Reboot, Upgrade, etc.\nType Type of the event. This field is friendly for UI visualization, where events of type Normal are considered normal operations, while Error is considered unexpected operations, such as Crash events. Marking them with different colors allows us to more easily identify them.\nMessage The detail of the event that describes why this event happened. This should be a one-line message that briefly describes why the event is reported. Examples of an Upgrade event may be something like Upgrade from ${from_version} to ${to_version}. It\u0026rsquo;s NOT recommended to include the detailed logs of this event, such as the exception stack trace.\nParameters The parameters in the message field. This is a simple \u0026lt;string,string\u0026gt; map.\nStart Time The start time of the event. This field is mandatory when an event occurs.\nEnd Time The end time of the event. This field may be empty if the event has not ended yet, otherwise there should be a valid timestamp after startTime.\nNOTE: When reporting an event, you typically call the report function twice, the first time for starting of the event and the second time for ending of the event, both with the same UUID. There are also cases where you would already have both the start time and end time. For example, when exporting events from a third-party system, the start time and end time are already known so you may simply call the report function once.\nCorrelation between events and metrics SkyWalking UI visualizes the events in the dashboard when the event service / instance / endpoint matches the displayed service / instance / endpoint.\nKnown Events    Name Type When Where     Start Normal When your Java Application starts with SkyWalking Agent installed, the Start Event will be created. Reported from SkyWalking agent.   Shutdown Normal When your Java Application stops with SkyWalking Agent installed, the Shutdown Event will be created. Reported from SkyWalking agent.   Alarm Error When the Alarm is triggered, the corresponding Alarm Event will is created. Reported from internal SkyWalking OAP.    The following events are all reported by Kubernetes Event Exporter, in order to see these events, please make sure you have deployed the exporter.\n   Name Type When Where     Killing Normal When the Kubernetes Pod is being killing. Reporter by Kubernetes Event Exporter.   Pulling Normal When a docker image is being pulled for deployment. Reporter by Kubernetes Event Exporter.   Pulled Normal When a docker image is pulled for deployment. Reporter by Kubernetes Event Exporter.   Created Normal When a container inside a Pod is created. Reporter by Kubernetes Event Exporter.   Started Normal When a container inside a Pod is started. Reporter by Kubernetes Event Exporter.   Unhealthy Error When the readiness probe failed. Reporter by Kubernetes Event Exporter.    The complete event lists can be found in the Kubernetes codebase, please note that not all the events are supported by the exporter for now.\n","title":"Events","url":"/docs/main/v9.5.0/en/concepts-and-designs/event/"},{"content":"Events SkyWalking already supports the three pillars of observability, namely logs, metrics, and traces. In reality, a production system experiences many other events that may affect the performance of the system, such as upgrading, rebooting, chaos testing, etc. Although some of these events are reflected in the logs, many others are not. Hence, SkyWalking provides a more native way to collect these events. This doc details how SkyWalking collects events and what events look like in SkyWalking.\nHow to Report Events The SkyWalking backend supports three protocols to collect events: gRPC, HTTP, and Kafka. Any agent or CLI that implements one of these protocols can report events to SkyWalking. Currently, the officially supported clients to report events are:\n Java Agent Toolkit: Using the Java agent toolkit to report events within the applications. SkyWalking CLI: Using the CLI to report events from the command line interface. Kubernetes Event Exporter: Deploying an event exporter to refine and report Kubernetes events.  Event Definitions An event contains the following fields. The definitions of event can be found at the protocol repo.\nUUID Unique ID of the event. Since an event may span a long period of time, the UUID is necessary to associate the start time with the end time of the same event.\nSource The source object on which the event occurs. In SkyWalking, the object is typically a service, service instance, etc.\nName Name of the event. For example, Start, Stop, Crash, Reboot, Upgrade, etc.\nType Type of the event. This field is friendly for UI visualization, where events of type Normal are considered normal operations, while Error is considered unexpected operations, such as Crash events. Marking them with different colors allows us to more easily identify them.\nMessage The detail of the event that describes why this event happened. This should be a one-line message that briefly describes why the event is reported. Examples of an Upgrade event may be something like Upgrade from ${from_version} to ${to_version}. It\u0026rsquo;s NOT recommended to include the detailed logs of this event, such as the exception stack trace.\nParameters The parameters in the message field. This is a simple \u0026lt;string,string\u0026gt; map.\nStart Time The start time of the event. This field is mandatory when an event occurs.\nEnd Time The end time of the event. This field may be empty if the event has not ended yet, otherwise there should be a valid timestamp after startTime.\nNOTE: When reporting an event, you typically call the report function twice, the first time for starting of the event and the second time for ending of the event, both with the same UUID. There are also cases where you would already have both the start time and end time. For example, when exporting events from a third-party system, the start time and end time are already known so you may simply call the report function once.\nCorrelation between events and metrics SkyWalking UI visualizes the events in the dashboard when the event service / instance / endpoint matches the displayed service / instance / endpoint.\nKnown Events    Name Type When Where     Start Normal When your Java Application starts with SkyWalking Agent installed, the Start Event will be created. Reported from SkyWalking agent.   Shutdown Normal When your Java Application stops with SkyWalking Agent installed, the Shutdown Event will be created. Reported from SkyWalking agent.   Alarm Error When the Alarm is triggered, the corresponding Alarm Event will is created. Reported from internal SkyWalking OAP.    The following events are all reported by Kubernetes Event Exporter, in order to see these events, please make sure you have deployed the exporter.\n   Name Type When Where     Killing Normal When the Kubernetes Pod is being killing. Reporter by Kubernetes Event Exporter.   Pulling Normal When a docker image is being pulled for deployment. Reporter by Kubernetes Event Exporter.   Pulled Normal When a docker image is pulled for deployment. Reporter by Kubernetes Event Exporter.   Created Normal When a container inside a Pod is created. Reporter by Kubernetes Event Exporter.   Started Normal When a container inside a Pod is started. Reporter by Kubernetes Event Exporter.   Unhealthy Error When the readiness probe failed. Reporter by Kubernetes Event Exporter.    The complete event lists can be found in the Kubernetes codebase, please note that not all the events are supported by the exporter for now.\n","title":"Events","url":"/docs/main/v9.6.0/en/concepts-and-designs/event/"},{"content":"Events SkyWalking already supports the three pillars of observability, namely logs, metrics, and traces. In reality, a production system experiences many other events that may affect the performance of the system, such as upgrading, rebooting, chaos testing, etc. Although some of these events are reflected in the logs, many others are not. Hence, SkyWalking provides a more native way to collect these events. This doc details how SkyWalking collects events and what events look like in SkyWalking.\nHow to Report Events The SkyWalking backend supports three protocols to collect events: gRPC, HTTP, and Kafka. Any agent or CLI that implements one of these protocols can report events to SkyWalking. Currently, the officially supported clients to report events are:\n Java Agent Toolkit: Using the Java agent toolkit to report events within the applications. SkyWalking CLI: Using the CLI to report events from the command line interface. Kubernetes Event Exporter: Deploying an event exporter to refine and report Kubernetes events.  Event Definitions An event contains the following fields. The definitions of event can be found at the protocol repo.\nUUID Unique ID of the event. Since an event may span a long period of time, the UUID is necessary to associate the start time with the end time of the same event.\nSource The source object on which the event occurs. In SkyWalking, the object is typically a service, service instance, etc.\nName Name of the event. For example, Start, Stop, Crash, Reboot, Upgrade, etc.\nType Type of the event. This field is friendly for UI visualization, where events of type Normal are considered normal operations, while Error is considered unexpected operations, such as Crash events. Marking them with different colors allows us to more easily identify them.\nMessage The detail of the event that describes why this event happened. This should be a one-line message that briefly describes why the event is reported. Examples of an Upgrade event may be something like Upgrade from ${from_version} to ${to_version}. It\u0026rsquo;s NOT recommended to include the detailed logs of this event, such as the exception stack trace.\nParameters The parameters in the message field. This is a simple \u0026lt;string,string\u0026gt; map.\nStart Time The start time of the event. This field is mandatory when an event occurs.\nEnd Time The end time of the event. This field may be empty if the event has not ended yet, otherwise there should be a valid timestamp after startTime.\nNOTE: When reporting an event, you typically call the report function twice, the first time for starting of the event and the second time for ending of the event, both with the same UUID. There are also cases where you would already have both the start time and end time. For example, when exporting events from a third-party system, the start time and end time are already known so you may simply call the report function once.\nCorrelation between events and metrics SkyWalking UI visualizes the events in the dashboard when the event service / instance / endpoint matches the displayed service / instance / endpoint.\nKnown Events    Name Type When Where     Start Normal When your Java Application starts with SkyWalking Agent installed, the Start Event will be created. Reported from SkyWalking agent.   Shutdown Normal When your Java Application stops with SkyWalking Agent installed, the Shutdown Event will be created. Reported from SkyWalking agent.   Alarm Error When the Alarm is triggered, the corresponding Alarm Event will is created. Reported from internal SkyWalking OAP.    The following events are all reported by Kubernetes Event Exporter, in order to see these events, please make sure you have deployed the exporter.\n   Name Type When Where     Killing Normal When the Kubernetes Pod is being killing. Reporter by Kubernetes Event Exporter.   Pulling Normal When a docker image is being pulled for deployment. Reporter by Kubernetes Event Exporter.   Pulled Normal When a docker image is pulled for deployment. Reporter by Kubernetes Event Exporter.   Created Normal When a container inside a Pod is created. Reporter by Kubernetes Event Exporter.   Started Normal When a container inside a Pod is started. Reporter by Kubernetes Event Exporter.   Unhealthy Error When the readiness probe failed. Reporter by Kubernetes Event Exporter.    The complete event lists can be found in the Kubernetes codebase, please note that not all the events are supported by the exporter for now.\n","title":"Events","url":"/docs/main/v9.7.0/en/concepts-and-designs/event/"},{"content":"Events Report Protocol The protocol is used to report events to the backend. The doc introduces the definition of an event, and the protocol repository defines gRPC services and message formats of events.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.event.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/event/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;service EventService { // When reporting an event, you typically call the collect function twice, one for starting of the event and the other one for ending of the event, with the same UUID.  // There are also cases where you have both start time and end time already, for example, when exporting events from a 3rd-party system,  // the start time and end time are already known so that you can call the collect function only once.  rpc collect (stream Event) returns (Commands) { }}message Event { // Unique ID of the event. Because an event may span a long period of time, the UUID is necessary to associate the  // start time with the end time of the same event.  string uuid = 1; // The source object that the event occurs on.  Source source = 2; // The name of the event. For example, `Reboot`, `Upgrade` etc.  string name = 3; // The type of the event. This field is friendly for UI visualization, where events of type `Normal` are considered as normal operations,  // while `Error` is considered as unexpected operations, such as `Crash` events, therefore we can mark them with different colors to be easier identified.  Type type = 4; // The detail of the event that describes why this event happened. This should be a one-line message that briefly describes why the event is reported.  // Examples of an `Upgrade` event may be something like `Upgrade from ${from_version} to ${to_version}`.  // It\u0026#39;s NOT encouraged to include the detailed logs of this event, such as the exception stack trace.  string message = 5; // The parameters in the `message` field.  map\u0026lt;string, string\u0026gt; parameters = 6; // The start time (in milliseconds) of the event, measured between the current time and midnight, January 1, 1970 UTC.  // This field is mandatory when an event occurs.  int64 startTime = 7; // The end time (in milliseconds) of the event. , measured between the current time and midnight, January 1, 1970 UTC.  // This field may be empty if the event has not stopped yet, otherwise it should be a valid timestamp after `startTime`.  int64 endTime = 8;  // [Required] Since 9.0.0  // Name of the layer to which the event belongs.  string layer = 9;}enum Type { Normal = 0; Error = 1;}// If the event occurs on a service ONLY, the `service` field is mandatory, the serviceInstance field and endpoint field are optional; // If the event occurs on a service instance, the `service` and `serviceInstance` are mandatory and endpoint is optional; // If the event occurs on an endpoint, `service` and `endpoint` are mandatory, `serviceInstance` is optional; message Source { string service = 1; string serviceInstance = 2; string endpoint = 3;}JSON format events can be reported via HTTP API. The endpoint is http://\u0026lt;oap-address\u0026gt;:12800/v3/events. Example of a JSON event record:\n[ { \u0026#34;uuid\u0026#34;: \u0026#34;f498b3c0-8bca-438d-a5b0-3701826ae21c\u0026#34;, \u0026#34;source\u0026#34;: { \u0026#34;service\u0026#34;: \u0026#34;SERVICE-A\u0026#34;, \u0026#34;instance\u0026#34;: \u0026#34;INSTANCE-1\u0026#34; }, \u0026#34;name\u0026#34;: \u0026#34;Reboot\u0026#34;, \u0026#34;type\u0026#34;: \u0026#34;Normal\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;App reboot.\u0026#34;, \u0026#34;parameters\u0026#34;: {}, \u0026#34;startTime\u0026#34;: 1628044330000, \u0026#34;endTime\u0026#34;: 1628044331000 } ] ","title":"Events Report Protocol","url":"/docs/main/latest/en/api/event/"},{"content":"Events Report Protocol The protocol is used to report events to the backend. The doc introduces the definition of an event, and the protocol repository defines gRPC services and message formats of events.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.event.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/event/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;service EventService { // When reporting an event, you typically call the collect function twice, one for starting of the event and the other one for ending of the event, with the same UUID.  // There are also cases where you have both start time and end time already, for example, when exporting events from a 3rd-party system,  // the start time and end time are already known so that you can call the collect function only once.  rpc collect (stream Event) returns (Commands) { }}message Event { // Unique ID of the event. Because an event may span a long period of time, the UUID is necessary to associate the  // start time with the end time of the same event.  string uuid = 1; // The source object that the event occurs on.  Source source = 2; // The name of the event. For example, `Reboot`, `Upgrade` etc.  string name = 3; // The type of the event. This field is friendly for UI visualization, where events of type `Normal` are considered as normal operations,  // while `Error` is considered as unexpected operations, such as `Crash` events, therefore we can mark them with different colors to be easier identified.  Type type = 4; // The detail of the event that describes why this event happened. This should be a one-line message that briefly describes why the event is reported.  // Examples of an `Upgrade` event may be something like `Upgrade from ${from_version} to ${to_version}`.  // It\u0026#39;s NOT encouraged to include the detailed logs of this event, such as the exception stack trace.  string message = 5; // The parameters in the `message` field.  map\u0026lt;string, string\u0026gt; parameters = 6; // The start time (in milliseconds) of the event, measured between the current time and midnight, January 1, 1970 UTC.  // This field is mandatory when an event occurs.  int64 startTime = 7; // The end time (in milliseconds) of the event. , measured between the current time and midnight, January 1, 1970 UTC.  // This field may be empty if the event has not stopped yet, otherwise it should be a valid timestamp after `startTime`.  int64 endTime = 8;  // [Required] Since 9.0.0  // Name of the layer to which the event belongs.  string layer = 9;}enum Type { Normal = 0; Error = 1;}// If the event occurs on a service ONLY, the `service` field is mandatory, the serviceInstance field and endpoint field are optional; // If the event occurs on a service instance, the `service` and `serviceInstance` are mandatory and endpoint is optional; // If the event occurs on an endpoint, `service` and `endpoint` are mandatory, `serviceInstance` is optional; message Source { string service = 1; string serviceInstance = 2; string endpoint = 3;}JSON format events can be reported via HTTP API. The endpoint is http://\u0026lt;oap-address\u0026gt;:12800/v3/events. Example of a JSON event record:\n[ { \u0026#34;uuid\u0026#34;: \u0026#34;f498b3c0-8bca-438d-a5b0-3701826ae21c\u0026#34;, \u0026#34;source\u0026#34;: { \u0026#34;service\u0026#34;: \u0026#34;SERVICE-A\u0026#34;, \u0026#34;instance\u0026#34;: \u0026#34;INSTANCE-1\u0026#34; }, \u0026#34;name\u0026#34;: \u0026#34;Reboot\u0026#34;, \u0026#34;type\u0026#34;: \u0026#34;Normal\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;App reboot.\u0026#34;, \u0026#34;parameters\u0026#34;: {}, \u0026#34;startTime\u0026#34;: 1628044330000, \u0026#34;endTime\u0026#34;: 1628044331000 } ] ","title":"Events Report Protocol","url":"/docs/main/next/en/api/event/"},{"content":"Events Report Protocol The protocol is used to report events to the backend. The doc introduces the definition of an event, and the protocol repository defines gRPC services and message formats of events.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.event.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/event/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;service EventService { // When reporting an event, you typically call the collect function twice, one for starting of the event and the other one for ending of the event, with the same UUID.  // There are also cases where you have both start time and end time already, for example, when exporting events from a 3rd-party system,  // the start time and end time are already known so that you can call the collect function only once.  rpc collect (stream Event) returns (Commands) { }}message Event { // Unique ID of the event. Because an event may span a long period of time, the UUID is necessary to associate the  // start time with the end time of the same event.  string uuid = 1; // The source object that the event occurs on.  Source source = 2; // The name of the event. For example, `Reboot`, `Upgrade` etc.  string name = 3; // The type of the event. This field is friendly for UI visualization, where events of type `Normal` are considered as normal operations,  // while `Error` is considered as unexpected operations, such as `Crash` events, therefore we can mark them with different colors to be easier identified.  Type type = 4; // The detail of the event that describes why this event happened. This should be a one-line message that briefly describes why the event is reported.  // Examples of an `Upgrade` event may be something like `Upgrade from ${from_version} to ${to_version}`.  // It\u0026#39;s NOT encouraged to include the detailed logs of this event, such as the exception stack trace.  string message = 5; // The parameters in the `message` field.  map\u0026lt;string, string\u0026gt; parameters = 6; // The start time (in milliseconds) of the event, measured between the current time and midnight, January 1, 1970 UTC.  // This field is mandatory when an event occurs.  int64 startTime = 7; // The end time (in milliseconds) of the event. , measured between the current time and midnight, January 1, 1970 UTC.  // This field may be empty if the event has not stopped yet, otherwise it should be a valid timestamp after `startTime`.  int64 endTime = 8;  // [Required] Since 9.0.0  // Name of the layer to which the event belongs.  string layer = 9;}enum Type { Normal = 0; Error = 1;}// If the event occurs on a service ONLY, the `service` field is mandatory, the serviceInstance field and endpoint field are optional; // If the event occurs on a service instance, the `service` and `serviceInstance` are mandatory and endpoint is optional; // If the event occurs on an endpoint, `service` and `endpoint` are mandatory, `serviceInstance` is optional; message Source { string service = 1; string serviceInstance = 2; string endpoint = 3;}JSON format events can be reported via HTTP API. The endpoint is http://\u0026lt;oap-address\u0026gt;:12800/v3/events. Example of a JSON event record:\n[ { \u0026#34;uuid\u0026#34;: \u0026#34;f498b3c0-8bca-438d-a5b0-3701826ae21c\u0026#34;, \u0026#34;source\u0026#34;: { \u0026#34;service\u0026#34;: \u0026#34;SERVICE-A\u0026#34;, \u0026#34;instance\u0026#34;: \u0026#34;INSTANCE-1\u0026#34; }, \u0026#34;name\u0026#34;: \u0026#34;Reboot\u0026#34;, \u0026#34;type\u0026#34;: \u0026#34;Normal\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;App reboot.\u0026#34;, \u0026#34;parameters\u0026#34;: {}, \u0026#34;startTime\u0026#34;: 1628044330000, \u0026#34;endTime\u0026#34;: 1628044331000 } ] ","title":"Events Report Protocol","url":"/docs/main/v9.4.0/en/api/event/"},{"content":"Events Report Protocol The protocol is used to report events to the backend. The doc introduces the definition of an event, and the protocol repository defines gRPC services and message formats of events.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.event.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/event/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;service EventService { // When reporting an event, you typically call the collect function twice, one for starting of the event and the other one for ending of the event, with the same UUID.  // There are also cases where you have both start time and end time already, for example, when exporting events from a 3rd-party system,  // the start time and end time are already known so that you can call the collect function only once.  rpc collect (stream Event) returns (Commands) { }}message Event { // Unique ID of the event. Because an event may span a long period of time, the UUID is necessary to associate the  // start time with the end time of the same event.  string uuid = 1; // The source object that the event occurs on.  Source source = 2; // The name of the event. For example, `Reboot`, `Upgrade` etc.  string name = 3; // The type of the event. This field is friendly for UI visualization, where events of type `Normal` are considered as normal operations,  // while `Error` is considered as unexpected operations, such as `Crash` events, therefore we can mark them with different colors to be easier identified.  Type type = 4; // The detail of the event that describes why this event happened. This should be a one-line message that briefly describes why the event is reported.  // Examples of an `Upgrade` event may be something like `Upgrade from ${from_version} to ${to_version}`.  // It\u0026#39;s NOT encouraged to include the detailed logs of this event, such as the exception stack trace.  string message = 5; // The parameters in the `message` field.  map\u0026lt;string, string\u0026gt; parameters = 6; // The start time (in milliseconds) of the event, measured between the current time and midnight, January 1, 1970 UTC.  // This field is mandatory when an event occurs.  int64 startTime = 7; // The end time (in milliseconds) of the event. , measured between the current time and midnight, January 1, 1970 UTC.  // This field may be empty if the event has not stopped yet, otherwise it should be a valid timestamp after `startTime`.  int64 endTime = 8;  // [Required] Since 9.0.0  // Name of the layer to which the event belongs.  string layer = 9;}enum Type { Normal = 0; Error = 1;}// If the event occurs on a service ONLY, the `service` field is mandatory, the serviceInstance field and endpoint field are optional; // If the event occurs on a service instance, the `service` and `serviceInstance` are mandatory and endpoint is optional; // If the event occurs on an endpoint, `service` and `endpoint` are mandatory, `serviceInstance` is optional; message Source { string service = 1; string serviceInstance = 2; string endpoint = 3;}JSON format events can be reported via HTTP API. The endpoint is http://\u0026lt;oap-address\u0026gt;:12800/v3/events. Example of a JSON event record:\n[ { \u0026#34;uuid\u0026#34;: \u0026#34;f498b3c0-8bca-438d-a5b0-3701826ae21c\u0026#34;, \u0026#34;source\u0026#34;: { \u0026#34;service\u0026#34;: \u0026#34;SERVICE-A\u0026#34;, \u0026#34;instance\u0026#34;: \u0026#34;INSTANCE-1\u0026#34; }, \u0026#34;name\u0026#34;: \u0026#34;Reboot\u0026#34;, \u0026#34;type\u0026#34;: \u0026#34;Normal\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;App reboot.\u0026#34;, \u0026#34;parameters\u0026#34;: {}, \u0026#34;startTime\u0026#34;: 1628044330000, \u0026#34;endTime\u0026#34;: 1628044331000 } ] ","title":"Events Report Protocol","url":"/docs/main/v9.5.0/en/api/event/"},{"content":"Events Report Protocol The protocol is used to report events to the backend. The doc introduces the definition of an event, and the protocol repository defines gRPC services and message formats of events.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.event.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/event/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;service EventService { // When reporting an event, you typically call the collect function twice, one for starting of the event and the other one for ending of the event, with the same UUID.  // There are also cases where you have both start time and end time already, for example, when exporting events from a 3rd-party system,  // the start time and end time are already known so that you can call the collect function only once.  rpc collect (stream Event) returns (Commands) { }}message Event { // Unique ID of the event. Because an event may span a long period of time, the UUID is necessary to associate the  // start time with the end time of the same event.  string uuid = 1; // The source object that the event occurs on.  Source source = 2; // The name of the event. For example, `Reboot`, `Upgrade` etc.  string name = 3; // The type of the event. This field is friendly for UI visualization, where events of type `Normal` are considered as normal operations,  // while `Error` is considered as unexpected operations, such as `Crash` events, therefore we can mark them with different colors to be easier identified.  Type type = 4; // The detail of the event that describes why this event happened. This should be a one-line message that briefly describes why the event is reported.  // Examples of an `Upgrade` event may be something like `Upgrade from ${from_version} to ${to_version}`.  // It\u0026#39;s NOT encouraged to include the detailed logs of this event, such as the exception stack trace.  string message = 5; // The parameters in the `message` field.  map\u0026lt;string, string\u0026gt; parameters = 6; // The start time (in milliseconds) of the event, measured between the current time and midnight, January 1, 1970 UTC.  // This field is mandatory when an event occurs.  int64 startTime = 7; // The end time (in milliseconds) of the event. , measured between the current time and midnight, January 1, 1970 UTC.  // This field may be empty if the event has not stopped yet, otherwise it should be a valid timestamp after `startTime`.  int64 endTime = 8;  // [Required] Since 9.0.0  // Name of the layer to which the event belongs.  string layer = 9;}enum Type { Normal = 0; Error = 1;}// If the event occurs on a service ONLY, the `service` field is mandatory, the serviceInstance field and endpoint field are optional; // If the event occurs on a service instance, the `service` and `serviceInstance` are mandatory and endpoint is optional; // If the event occurs on an endpoint, `service` and `endpoint` are mandatory, `serviceInstance` is optional; message Source { string service = 1; string serviceInstance = 2; string endpoint = 3;}JSON format events can be reported via HTTP API. The endpoint is http://\u0026lt;oap-address\u0026gt;:12800/v3/events. Example of a JSON event record:\n[ { \u0026#34;uuid\u0026#34;: \u0026#34;f498b3c0-8bca-438d-a5b0-3701826ae21c\u0026#34;, \u0026#34;source\u0026#34;: { \u0026#34;service\u0026#34;: \u0026#34;SERVICE-A\u0026#34;, \u0026#34;instance\u0026#34;: \u0026#34;INSTANCE-1\u0026#34; }, \u0026#34;name\u0026#34;: \u0026#34;Reboot\u0026#34;, \u0026#34;type\u0026#34;: \u0026#34;Normal\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;App reboot.\u0026#34;, \u0026#34;parameters\u0026#34;: {}, \u0026#34;startTime\u0026#34;: 1628044330000, \u0026#34;endTime\u0026#34;: 1628044331000 } ] ","title":"Events Report Protocol","url":"/docs/main/v9.6.0/en/api/event/"},{"content":"Events Report Protocol The protocol is used to report events to the backend. The doc introduces the definition of an event, and the protocol repository defines gRPC services and message formats of events.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.event.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/event/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;service EventService { // When reporting an event, you typically call the collect function twice, one for starting of the event and the other one for ending of the event, with the same UUID.  // There are also cases where you have both start time and end time already, for example, when exporting events from a 3rd-party system,  // the start time and end time are already known so that you can call the collect function only once.  rpc collect (stream Event) returns (Commands) { }}message Event { // Unique ID of the event. Because an event may span a long period of time, the UUID is necessary to associate the  // start time with the end time of the same event.  string uuid = 1; // The source object that the event occurs on.  Source source = 2; // The name of the event. For example, `Reboot`, `Upgrade` etc.  string name = 3; // The type of the event. This field is friendly for UI visualization, where events of type `Normal` are considered as normal operations,  // while `Error` is considered as unexpected operations, such as `Crash` events, therefore we can mark them with different colors to be easier identified.  Type type = 4; // The detail of the event that describes why this event happened. This should be a one-line message that briefly describes why the event is reported.  // Examples of an `Upgrade` event may be something like `Upgrade from ${from_version} to ${to_version}`.  // It\u0026#39;s NOT encouraged to include the detailed logs of this event, such as the exception stack trace.  string message = 5; // The parameters in the `message` field.  map\u0026lt;string, string\u0026gt; parameters = 6; // The start time (in milliseconds) of the event, measured between the current time and midnight, January 1, 1970 UTC.  // This field is mandatory when an event occurs.  int64 startTime = 7; // The end time (in milliseconds) of the event. , measured between the current time and midnight, January 1, 1970 UTC.  // This field may be empty if the event has not stopped yet, otherwise it should be a valid timestamp after `startTime`.  int64 endTime = 8;  // [Required] Since 9.0.0  // Name of the layer to which the event belongs.  string layer = 9;}enum Type { Normal = 0; Error = 1;}// If the event occurs on a service ONLY, the `service` field is mandatory, the serviceInstance field and endpoint field are optional; // If the event occurs on a service instance, the `service` and `serviceInstance` are mandatory and endpoint is optional; // If the event occurs on an endpoint, `service` and `endpoint` are mandatory, `serviceInstance` is optional; message Source { string service = 1; string serviceInstance = 2; string endpoint = 3;}JSON format events can be reported via HTTP API. The endpoint is http://\u0026lt;oap-address\u0026gt;:12800/v3/events. Example of a JSON event record:\n[ { \u0026#34;uuid\u0026#34;: \u0026#34;f498b3c0-8bca-438d-a5b0-3701826ae21c\u0026#34;, \u0026#34;source\u0026#34;: { \u0026#34;service\u0026#34;: \u0026#34;SERVICE-A\u0026#34;, \u0026#34;instance\u0026#34;: \u0026#34;INSTANCE-1\u0026#34; }, \u0026#34;name\u0026#34;: \u0026#34;Reboot\u0026#34;, \u0026#34;type\u0026#34;: \u0026#34;Normal\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;App reboot.\u0026#34;, \u0026#34;parameters\u0026#34;: {}, \u0026#34;startTime\u0026#34;: 1628044330000, \u0026#34;endTime\u0026#34;: 1628044331000 } ] ","title":"Events Report Protocol","url":"/docs/main/v9.7.0/en/api/event/"},{"content":"Exporter SkyWalking provides the essential functions of observability, including metrics aggregation, trace, log, alerting, and profiling. In many real-world scenarios, users may want to forward their data to a 3rd party system for further in-depth analysis. Exporter has made that possible.\nThe exporter is an independent module that has to be manually activated.\nRight now, we provide the following exporting channels:\n gRPC Exporter   Metrics  Kafka Exporter   Trace Log  gRPC Exporter Metrics gRPC Exporter Metrics gRPC exporter uses SkyWalking\u0026rsquo;s native export service definition. Here is the proto definition: metric-exporter.proto.\nservice MetricExportService { rpc export (stream ExportMetricValue) returns (ExportResponse) { } rpc subscription (SubscriptionReq) returns (SubscriptionsResp) { }}To activate the exporter, you should set ${SW_EXPORTER_ENABLE_GRPC_METRICS:true} and config the target gRPC server address.\nexporter:default:# gRPC exporterenableGRPCMetrics:${SW_EXPORTER_ENABLE_GRPC_METRICS:true}gRPCTargetHost:${SW_EXPORTER_GRPC_HOST:127.0.0.1}gRPCTargetPort:${SW_EXPORTER_GRPC_PORT:9870}... gRPCTargetHost:gRPCTargetPort is the expected target service address. You could set any gRPC server to receive the data. Target gRPC service needs to go on standby; otherwise, the OAP startup may fail.  Target exporter service   Subscription implementation. Return the expected metrics name list with event type (incremental or total). All names must match the OAL/MAL script definition. Return empty list, if you want to export all metrics in the incremental event type.\n  Export implementation. Stream service. All subscribed metrics will be sent here based on the OAP core schedule. Also, if the OAP is deployed as a cluster, this method will be called concurrently. For metrics value, you need to follow #type to choose #longValue or #doubleValue.\n  Kafka Exporter Trace Kafka Exporter Trace kafka exporter pushes messages to the Kafka Broker and Topic skywalking-export-trace to export the trace. Here is the message:\nProducerRecord\u0026lt;String, Bytes\u0026gt; Key: TraceSegmentId Value: Bytes of SegmentObject The SegmentObject definition follows the protocol: SkyWalking data collect protocol#Tracing.proto.\n// The segment is a collection of spans. It includes all collected spans in a simple one request context, such as a HTTP request process. message SegmentObject { string traceId = 1; string traceSegmentId = 2; repeated SpanObject spans = 3; string service = 4; string serviceInstance = 5; bool isSizeLimited = 6;}To activate the exporter, you should set ${SW_EXPORTER_ENABLE_KAFKA_TRACE:true} and config the Kafka server.\nexporter:default:# Kafka exporterenableKafkaTrace:${SW_EXPORTER_ENABLE_KAFKA_TRACE:true}kafkaBootstrapServers:${SW_EXPORTER_KAFKA_SERVERS:localhost:9092}# Kafka producer config, JSON format as Properties.kafkaProducerConfig:${SW_EXPORTER_KAFKA_PRODUCER_CONFIG:\u0026#34;\u0026#34;}kafkaTopicTrace:${SW_EXPORTER_KAFKA_TOPIC_TRACE:skywalking-export-trace}exportErrorStatusTraceOnly:${SW_EXPORTER_KAFKA_TRACE_FILTER_ERROR:false}... exportErrorStatusTraceOnly=true represents that only export the error status trace segments through the Kafka channel.  Log Kafka Exporter Log kafka exporter pushes messages to the Kafka Broker and Topic skywalking-export-log to export the log. Here is the message:\nProducerRecord\u0026lt;String, Bytes\u0026gt; Key: LogRecordId Value: Bytes of LogData The LogData definition follows the protocol: SkyWalking data collect protocol#Logging.proto.\nmessage LogData { int64 timestamp = 1; string service = 2; string serviceInstance = 3; string endpoint = 4; LogDataBody body = 5; TraceContext traceContext = 6; LogTags tags = 7; string layer = 8;}To activate the exporter, you should set ${SW_EXPORTER_ENABLE_KAFKA_LOG:true} and config the Kafka server.\nexporter:default:# Kafka exporterenableKafkaLog:${SW_EXPORTER_ENABLE_KAFKA_LOG:true}kafkaBootstrapServers:${SW_EXPORTER_KAFKA_SERVERS:localhost:9092}# Kafka producer config, JSON format as Properties.kafkaProducerConfig:${SW_EXPORTER_KAFKA_PRODUCER_CONFIG:\u0026#34;\u0026#34;}kafkaTopicLog:${SW_EXPORTER_KAFKA_TOPIC_LOG:skywalking-export-log}...","title":"Exporter","url":"/docs/main/latest/en/setup/backend/exporter/"},{"content":"Exporter SkyWalking provides the essential functions of observability, including metrics aggregation, trace, log, alerting, and profiling. In many real-world scenarios, users may want to forward their data to a 3rd party system for further in-depth analysis. Exporter has made that possible.\nThe exporter is an independent module that has to be manually activated.\nRight now, we provide the following exporting channels:\n gRPC Exporter   Metrics  Kafka Exporter   Trace Log  gRPC Exporter Metrics gRPC Exporter Metrics gRPC exporter uses SkyWalking\u0026rsquo;s native export service definition. Here is the proto definition: metric-exporter.proto.\nservice MetricExportService { rpc export (stream ExportMetricValue) returns (ExportResponse) { } rpc subscription (SubscriptionReq) returns (SubscriptionsResp) { }}To activate the exporter, you should set ${SW_EXPORTER:default} and ${SW_EXPORTER_ENABLE_GRPC_METRICS:true}, configure the target gRPC server address.\nexporter:selector:${SW_EXPORTER:default}default:# gRPC exporterenableGRPCMetrics:${SW_EXPORTER_ENABLE_GRPC_METRICS:true}gRPCTargetHost:${SW_EXPORTER_GRPC_HOST:127.0.0.1}gRPCTargetPort:${SW_EXPORTER_GRPC_PORT:9870}... gRPCTargetHost:gRPCTargetPort is the expected target service address. You could set any gRPC server to receive the data. Target gRPC service needs to go on standby; otherwise, the OAP startup may fail.  Target exporter service   Subscription implementation. Return the expected metrics name list with event type (incremental or total). All names must match the OAL/MAL script definition. Return empty list, if you want to export all metrics in the incremental event type.\n  Export implementation. Stream service. All subscribed metrics will be sent here based on the OAP core schedule. Also, if the OAP is deployed as a cluster, this method will be called concurrently.\n  Kafka Exporter Trace Kafka Exporter Trace kafka exporter pushes messages to the Kafka Broker and Topic skywalking-export-trace to export the trace. Here is the message:\nProducerRecord\u0026lt;String, Bytes\u0026gt; Key: TraceSegmentId Value: Bytes of SegmentObject The SegmentObject definition follows the protocol: SkyWalking data collect protocol#Tracing.proto.\n// The segment is a collection of spans. It includes all collected spans in a simple one request context, such as a HTTP request process. message SegmentObject { string traceId = 1; string traceSegmentId = 2; repeated SpanObject spans = 3; string service = 4; string serviceInstance = 5; bool isSizeLimited = 6;}To activate the exporter, you should set ${SW_EXPORTER:default} and ${SW_EXPORTER_ENABLE_KAFKA_TRACE:true}, configure the Kafka server addresses.\nexporter:selector:${SW_EXPORTER:default}default:# Kafka exporterenableKafkaTrace:${SW_EXPORTER_ENABLE_KAFKA_TRACE:true}kafkaBootstrapServers:${SW_EXPORTER_KAFKA_SERVERS:localhost:9092}# Kafka producer config, JSON format as Properties.kafkaProducerConfig:${SW_EXPORTER_KAFKA_PRODUCER_CONFIG:\u0026#34;\u0026#34;}kafkaTopicTrace:${SW_EXPORTER_KAFKA_TOPIC_TRACE:skywalking-export-trace}exportErrorStatusTraceOnly:${SW_EXPORTER_KAFKA_TRACE_FILTER_ERROR:false}... exportErrorStatusTraceOnly=true represents that only export the error status trace segments through the Kafka channel.  Log Kafka Exporter Log kafka exporter pushes messages to the Kafka Broker and Topic skywalking-export-log to export the log. Here is the message:\nProducerRecord\u0026lt;String, Bytes\u0026gt; Key: LogRecordId Value: Bytes of LogData The LogData definition follows the protocol: SkyWalking data collect protocol#Logging.proto.\nmessage LogData { int64 timestamp = 1; string service = 2; string serviceInstance = 3; string endpoint = 4; LogDataBody body = 5; TraceContext traceContext = 6; LogTags tags = 7; string layer = 8;}To activate the exporter, you should set ${SW_EXPORTER:default} and ${SW_EXPORTER_ENABLE_KAFKA_LOG:true}, configure the Kafka server addresses.\nexporter:selector:${SW_EXPORTER:default}default:# Kafka exporterenableKafkaLog:${SW_EXPORTER_ENABLE_KAFKA_LOG:true}kafkaBootstrapServers:${SW_EXPORTER_KAFKA_SERVERS:localhost:9092}# Kafka producer config, JSON format as Properties.kafkaProducerConfig:${SW_EXPORTER_KAFKA_PRODUCER_CONFIG:\u0026#34;\u0026#34;}kafkaTopicLog:${SW_EXPORTER_KAFKA_TOPIC_LOG:skywalking-export-log}...","title":"Exporter","url":"/docs/main/next/en/setup/backend/exporter/"},{"content":"Exporter SkyWalking provides the essential functions of observability, including metrics aggregation, trace, log, alerting, and profiling. In many real-world scenarios, users may want to forward their data to a 3rd party system for further in-depth analysis. Exporter has made that possible.\nThe exporter is an independent module that has to be manually activated.\nRight now, we provide the following exporting channels:\n gRPC Exporter   Metrics   Kafka Exporter   Trace Log  gRPC Exporter Metrics gRPC Exporter Metrics gRPC exporter uses SkyWalking\u0026rsquo;s native export service definition. Here is the proto definition: metric-exporter.proto.\nservice MetricExportService { rpc export (stream ExportMetricValue) returns (ExportResponse) { } rpc subscription (SubscriptionReq) returns (SubscriptionsResp) { }}To activate the exporter, you should set ${SW_EXPORTER_ENABLE_GRPC_METRICS:true} and config the target gRPC server address.\nexporter:default:# gRPC exporterenableGRPCMetrics:${SW_EXPORTER_ENABLE_GRPC_METRICS:true}gRPCTargetHost:${SW_EXPORTER_GRPC_HOST:127.0.0.1}gRPCTargetPort:${SW_EXPORTER_GRPC_PORT:9870}... gRPCTargetHost:gRPCTargetPort is the expected target service address. You could set any gRPC server to receive the data. Target gRPC service needs to go on standby; otherwise, the OAP startup may fail.  Target exporter service   Subscription implementation. Return the expected metrics name list with event type (incremental or total). All names must match the OAL/MAL script definition. Return empty list, if you want to export all metrics in the incremental event type.\n  Export implementation. Stream service. All subscribed metrics will be sent here based on the OAP core schedule. Also, if the OAP is deployed as a cluster, this method will be called concurrently. For metrics value, you need to follow #type to choose #longValue or #doubleValue.\n  Kafka Exporter Trace Kafka Exporter Trace kafka exporter pushes messages to the Kafka Broker and Topic skywalking-export-trace to export the trace. Here is the message:\nProducerRecord\u0026lt;String, Bytes\u0026gt; Key: TraceSegmentId Value: Bytes of SegmentObject The SegmentObject definition follows the protocol: SkyWalking data collect protocol#Tracing.proto.\n// The segment is a collection of spans. It includes all collected spans in a simple one request context, such as a HTTP request process. message SegmentObject { string traceId = 1; string traceSegmentId = 2; repeated SpanObject spans = 3; string service = 4; string serviceInstance = 5; bool isSizeLimited = 6;}To activate the exporter, you should set ${SW_EXPORTER_ENABLE_KAFKA_TRACE:true} and config the Kafka server.\nexporter:default:# Kafka exporterenableKafkaTrace:${SW_EXPORTER_ENABLE_KAFKA_TRACE:true}kafkaBootstrapServers:${SW_EXPORTER_KAFKA_SERVERS:localhost:9092}# Kafka producer config, JSON format as Properties.kafkaProducerConfig:${SW_EXPORTER_KAFKA_PRODUCER_CONFIG:\u0026#34;\u0026#34;}kafkaTopicTrace:${SW_EXPORTER_KAFKA_TOPIC_TRACE:skywalking-export-trace}...Log Kafka Exporter Log kafka exporter pushes messages to the Kafka Broker and Topic skywalking-export-log to export the log. Here is the message:\nProducerRecord\u0026lt;String, Bytes\u0026gt; Key: LogRecordId Value: Bytes of LogData The LogData definition follows the protocol: SkyWalking data collect protocol#Logging.proto.\nmessage LogData { int64 timestamp = 1; string service = 2; string serviceInstance = 3; string endpoint = 4; LogDataBody body = 5; TraceContext traceContext = 6; LogTags tags = 7; string layer = 8;}To activate the exporter, you should set ${SW_EXPORTER_ENABLE_KAFKA_LOG:true} and config the Kafka server.\nexporter:default:# Kafka exporterenableKafkaLog:${SW_EXPORTER_ENABLE_KAFKA_LOG:true}kafkaBootstrapServers:${SW_EXPORTER_KAFKA_SERVERS:localhost:9092}# Kafka producer config, JSON format as Properties.kafkaProducerConfig:${SW_EXPORTER_KAFKA_PRODUCER_CONFIG:\u0026#34;\u0026#34;}kafkaTopicLog:${SW_EXPORTER_KAFKA_TOPIC_LOG:skywalking-export-log}...","title":"Exporter","url":"/docs/main/v9.3.0/en/setup/backend/exporter/"},{"content":"Exporter SkyWalking provides the essential functions of observability, including metrics aggregation, trace, log, alerting, and profiling. In many real-world scenarios, users may want to forward their data to a 3rd party system for further in-depth analysis. Exporter has made that possible.\nThe exporter is an independent module that has to be manually activated.\nRight now, we provide the following exporting channels:\n gRPC Exporter   Metrics   Kafka Exporter   Trace Log  gRPC Exporter Metrics gRPC Exporter Metrics gRPC exporter uses SkyWalking\u0026rsquo;s native export service definition. Here is the proto definition: metric-exporter.proto.\nservice MetricExportService { rpc export (stream ExportMetricValue) returns (ExportResponse) { } rpc subscription (SubscriptionReq) returns (SubscriptionsResp) { }}To activate the exporter, you should set ${SW_EXPORTER_ENABLE_GRPC_METRICS:true} and config the target gRPC server address.\nexporter:default:# gRPC exporterenableGRPCMetrics:${SW_EXPORTER_ENABLE_GRPC_METRICS:true}gRPCTargetHost:${SW_EXPORTER_GRPC_HOST:127.0.0.1}gRPCTargetPort:${SW_EXPORTER_GRPC_PORT:9870}... gRPCTargetHost:gRPCTargetPort is the expected target service address. You could set any gRPC server to receive the data. Target gRPC service needs to go on standby; otherwise, the OAP startup may fail.  Target exporter service   Subscription implementation. Return the expected metrics name list with event type (incremental or total). All names must match the OAL/MAL script definition. Return empty list, if you want to export all metrics in the incremental event type.\n  Export implementation. Stream service. All subscribed metrics will be sent here based on the OAP core schedule. Also, if the OAP is deployed as a cluster, this method will be called concurrently. For metrics value, you need to follow #type to choose #longValue or #doubleValue.\n  Kafka Exporter Trace Kafka Exporter Trace kafka exporter pushes messages to the Kafka Broker and Topic skywalking-export-trace to export the trace. Here is the message:\nProducerRecord\u0026lt;String, Bytes\u0026gt; Key: TraceSegmentId Value: Bytes of SegmentObject The SegmentObject definition follows the protocol: SkyWalking data collect protocol#Tracing.proto.\n// The segment is a collection of spans. It includes all collected spans in a simple one request context, such as a HTTP request process. message SegmentObject { string traceId = 1; string traceSegmentId = 2; repeated SpanObject spans = 3; string service = 4; string serviceInstance = 5; bool isSizeLimited = 6;}To activate the exporter, you should set ${SW_EXPORTER_ENABLE_KAFKA_TRACE:true} and config the Kafka server.\nexporter:default:# Kafka exporterenableKafkaTrace:${SW_EXPORTER_ENABLE_KAFKA_TRACE:true}kafkaBootstrapServers:${SW_EXPORTER_KAFKA_SERVERS:localhost:9092}# Kafka producer config, JSON format as Properties.kafkaProducerConfig:${SW_EXPORTER_KAFKA_PRODUCER_CONFIG:\u0026#34;\u0026#34;}kafkaTopicTrace:${SW_EXPORTER_KAFKA_TOPIC_TRACE:skywalking-export-trace}...Log Kafka Exporter Log kafka exporter pushes messages to the Kafka Broker and Topic skywalking-export-log to export the log. Here is the message:\nProducerRecord\u0026lt;String, Bytes\u0026gt; Key: LogRecordId Value: Bytes of LogData The LogData definition follows the protocol: SkyWalking data collect protocol#Logging.proto.\nmessage LogData { int64 timestamp = 1; string service = 2; string serviceInstance = 3; string endpoint = 4; LogDataBody body = 5; TraceContext traceContext = 6; LogTags tags = 7; string layer = 8;}To activate the exporter, you should set ${SW_EXPORTER_ENABLE_KAFKA_LOG:true} and config the Kafka server.\nexporter:default:# Kafka exporterenableKafkaLog:${SW_EXPORTER_ENABLE_KAFKA_LOG:true}kafkaBootstrapServers:${SW_EXPORTER_KAFKA_SERVERS:localhost:9092}# Kafka producer config, JSON format as Properties.kafkaProducerConfig:${SW_EXPORTER_KAFKA_PRODUCER_CONFIG:\u0026#34;\u0026#34;}kafkaTopicLog:${SW_EXPORTER_KAFKA_TOPIC_LOG:skywalking-export-log}...","title":"Exporter","url":"/docs/main/v9.4.0/en/setup/backend/exporter/"},{"content":"Exporter SkyWalking provides the essential functions of observability, including metrics aggregation, trace, log, alerting, and profiling. In many real-world scenarios, users may want to forward their data to a 3rd party system for further in-depth analysis. Exporter has made that possible.\nThe exporter is an independent module that has to be manually activated.\nRight now, we provide the following exporting channels:\n gRPC Exporter   Metrics  Kafka Exporter   Trace Log  gRPC Exporter Metrics gRPC Exporter Metrics gRPC exporter uses SkyWalking\u0026rsquo;s native export service definition. Here is the proto definition: metric-exporter.proto.\nservice MetricExportService { rpc export (stream ExportMetricValue) returns (ExportResponse) { } rpc subscription (SubscriptionReq) returns (SubscriptionsResp) { }}To activate the exporter, you should set ${SW_EXPORTER_ENABLE_GRPC_METRICS:true} and config the target gRPC server address.\nexporter:default:# gRPC exporterenableGRPCMetrics:${SW_EXPORTER_ENABLE_GRPC_METRICS:true}gRPCTargetHost:${SW_EXPORTER_GRPC_HOST:127.0.0.1}gRPCTargetPort:${SW_EXPORTER_GRPC_PORT:9870}... gRPCTargetHost:gRPCTargetPort is the expected target service address. You could set any gRPC server to receive the data. Target gRPC service needs to go on standby; otherwise, the OAP startup may fail.  Target exporter service   Subscription implementation. Return the expected metrics name list with event type (incremental or total). All names must match the OAL/MAL script definition. Return empty list, if you want to export all metrics in the incremental event type.\n  Export implementation. Stream service. All subscribed metrics will be sent here based on the OAP core schedule. Also, if the OAP is deployed as a cluster, this method will be called concurrently. For metrics value, you need to follow #type to choose #longValue or #doubleValue.\n  Kafka Exporter Trace Kafka Exporter Trace kafka exporter pushes messages to the Kafka Broker and Topic skywalking-export-trace to export the trace. Here is the message:\nProducerRecord\u0026lt;String, Bytes\u0026gt; Key: TraceSegmentId Value: Bytes of SegmentObject The SegmentObject definition follows the protocol: SkyWalking data collect protocol#Tracing.proto.\n// The segment is a collection of spans. It includes all collected spans in a simple one request context, such as a HTTP request process. message SegmentObject { string traceId = 1; string traceSegmentId = 2; repeated SpanObject spans = 3; string service = 4; string serviceInstance = 5; bool isSizeLimited = 6;}To activate the exporter, you should set ${SW_EXPORTER_ENABLE_KAFKA_TRACE:true} and config the Kafka server.\nexporter:default:# Kafka exporterenableKafkaTrace:${SW_EXPORTER_ENABLE_KAFKA_TRACE:true}kafkaBootstrapServers:${SW_EXPORTER_KAFKA_SERVERS:localhost:9092}# Kafka producer config, JSON format as Properties.kafkaProducerConfig:${SW_EXPORTER_KAFKA_PRODUCER_CONFIG:\u0026#34;\u0026#34;}kafkaTopicTrace:${SW_EXPORTER_KAFKA_TOPIC_TRACE:skywalking-export-trace}exportErrorStatusTraceOnly:${SW_EXPORTER_KAFKA_TRACE_FILTER_ERROR:false}... exportErrorStatusTraceOnly=true represents that only export the error status trace segments through the Kafka channel.  Log Kafka Exporter Log kafka exporter pushes messages to the Kafka Broker and Topic skywalking-export-log to export the log. Here is the message:\nProducerRecord\u0026lt;String, Bytes\u0026gt; Key: LogRecordId Value: Bytes of LogData The LogData definition follows the protocol: SkyWalking data collect protocol#Logging.proto.\nmessage LogData { int64 timestamp = 1; string service = 2; string serviceInstance = 3; string endpoint = 4; LogDataBody body = 5; TraceContext traceContext = 6; LogTags tags = 7; string layer = 8;}To activate the exporter, you should set ${SW_EXPORTER_ENABLE_KAFKA_LOG:true} and config the Kafka server.\nexporter:default:# Kafka exporterenableKafkaLog:${SW_EXPORTER_ENABLE_KAFKA_LOG:true}kafkaBootstrapServers:${SW_EXPORTER_KAFKA_SERVERS:localhost:9092}# Kafka producer config, JSON format as Properties.kafkaProducerConfig:${SW_EXPORTER_KAFKA_PRODUCER_CONFIG:\u0026#34;\u0026#34;}kafkaTopicLog:${SW_EXPORTER_KAFKA_TOPIC_LOG:skywalking-export-log}...","title":"Exporter","url":"/docs/main/v9.5.0/en/setup/backend/exporter/"},{"content":"Exporter SkyWalking provides the essential functions of observability, including metrics aggregation, trace, log, alerting, and profiling. In many real-world scenarios, users may want to forward their data to a 3rd party system for further in-depth analysis. Exporter has made that possible.\nThe exporter is an independent module that has to be manually activated.\nRight now, we provide the following exporting channels:\n gRPC Exporter   Metrics  Kafka Exporter   Trace Log  gRPC Exporter Metrics gRPC Exporter Metrics gRPC exporter uses SkyWalking\u0026rsquo;s native export service definition. Here is the proto definition: metric-exporter.proto.\nservice MetricExportService { rpc export (stream ExportMetricValue) returns (ExportResponse) { } rpc subscription (SubscriptionReq) returns (SubscriptionsResp) { }}To activate the exporter, you should set ${SW_EXPORTER_ENABLE_GRPC_METRICS:true} and config the target gRPC server address.\nexporter:default:# gRPC exporterenableGRPCMetrics:${SW_EXPORTER_ENABLE_GRPC_METRICS:true}gRPCTargetHost:${SW_EXPORTER_GRPC_HOST:127.0.0.1}gRPCTargetPort:${SW_EXPORTER_GRPC_PORT:9870}... gRPCTargetHost:gRPCTargetPort is the expected target service address. You could set any gRPC server to receive the data. Target gRPC service needs to go on standby; otherwise, the OAP startup may fail.  Target exporter service   Subscription implementation. Return the expected metrics name list with event type (incremental or total). All names must match the OAL/MAL script definition. Return empty list, if you want to export all metrics in the incremental event type.\n  Export implementation. Stream service. All subscribed metrics will be sent here based on the OAP core schedule. Also, if the OAP is deployed as a cluster, this method will be called concurrently. For metrics value, you need to follow #type to choose #longValue or #doubleValue.\n  Kafka Exporter Trace Kafka Exporter Trace kafka exporter pushes messages to the Kafka Broker and Topic skywalking-export-trace to export the trace. Here is the message:\nProducerRecord\u0026lt;String, Bytes\u0026gt; Key: TraceSegmentId Value: Bytes of SegmentObject The SegmentObject definition follows the protocol: SkyWalking data collect protocol#Tracing.proto.\n// The segment is a collection of spans. It includes all collected spans in a simple one request context, such as a HTTP request process. message SegmentObject { string traceId = 1; string traceSegmentId = 2; repeated SpanObject spans = 3; string service = 4; string serviceInstance = 5; bool isSizeLimited = 6;}To activate the exporter, you should set ${SW_EXPORTER_ENABLE_KAFKA_TRACE:true} and config the Kafka server.\nexporter:default:# Kafka exporterenableKafkaTrace:${SW_EXPORTER_ENABLE_KAFKA_TRACE:true}kafkaBootstrapServers:${SW_EXPORTER_KAFKA_SERVERS:localhost:9092}# Kafka producer config, JSON format as Properties.kafkaProducerConfig:${SW_EXPORTER_KAFKA_PRODUCER_CONFIG:\u0026#34;\u0026#34;}kafkaTopicTrace:${SW_EXPORTER_KAFKA_TOPIC_TRACE:skywalking-export-trace}exportErrorStatusTraceOnly:${SW_EXPORTER_KAFKA_TRACE_FILTER_ERROR:false}... exportErrorStatusTraceOnly=true represents that only export the error status trace segments through the Kafka channel.  Log Kafka Exporter Log kafka exporter pushes messages to the Kafka Broker and Topic skywalking-export-log to export the log. Here is the message:\nProducerRecord\u0026lt;String, Bytes\u0026gt; Key: LogRecordId Value: Bytes of LogData The LogData definition follows the protocol: SkyWalking data collect protocol#Logging.proto.\nmessage LogData { int64 timestamp = 1; string service = 2; string serviceInstance = 3; string endpoint = 4; LogDataBody body = 5; TraceContext traceContext = 6; LogTags tags = 7; string layer = 8;}To activate the exporter, you should set ${SW_EXPORTER_ENABLE_KAFKA_LOG:true} and config the Kafka server.\nexporter:default:# Kafka exporterenableKafkaLog:${SW_EXPORTER_ENABLE_KAFKA_LOG:true}kafkaBootstrapServers:${SW_EXPORTER_KAFKA_SERVERS:localhost:9092}# Kafka producer config, JSON format as Properties.kafkaProducerConfig:${SW_EXPORTER_KAFKA_PRODUCER_CONFIG:\u0026#34;\u0026#34;}kafkaTopicLog:${SW_EXPORTER_KAFKA_TOPIC_LOG:skywalking-export-log}...","title":"Exporter","url":"/docs/main/v9.6.0/en/setup/backend/exporter/"},{"content":"Exporter SkyWalking provides the essential functions of observability, including metrics aggregation, trace, log, alerting, and profiling. In many real-world scenarios, users may want to forward their data to a 3rd party system for further in-depth analysis. Exporter has made that possible.\nThe exporter is an independent module that has to be manually activated.\nRight now, we provide the following exporting channels:\n gRPC Exporter   Metrics  Kafka Exporter   Trace Log  gRPC Exporter Metrics gRPC Exporter Metrics gRPC exporter uses SkyWalking\u0026rsquo;s native export service definition. Here is the proto definition: metric-exporter.proto.\nservice MetricExportService { rpc export (stream ExportMetricValue) returns (ExportResponse) { } rpc subscription (SubscriptionReq) returns (SubscriptionsResp) { }}To activate the exporter, you should set ${SW_EXPORTER_ENABLE_GRPC_METRICS:true} and config the target gRPC server address.\nexporter:default:# gRPC exporterenableGRPCMetrics:${SW_EXPORTER_ENABLE_GRPC_METRICS:true}gRPCTargetHost:${SW_EXPORTER_GRPC_HOST:127.0.0.1}gRPCTargetPort:${SW_EXPORTER_GRPC_PORT:9870}... gRPCTargetHost:gRPCTargetPort is the expected target service address. You could set any gRPC server to receive the data. Target gRPC service needs to go on standby; otherwise, the OAP startup may fail.  Target exporter service   Subscription implementation. Return the expected metrics name list with event type (incremental or total). All names must match the OAL/MAL script definition. Return empty list, if you want to export all metrics in the incremental event type.\n  Export implementation. Stream service. All subscribed metrics will be sent here based on the OAP core schedule. Also, if the OAP is deployed as a cluster, this method will be called concurrently. For metrics value, you need to follow #type to choose #longValue or #doubleValue.\n  Kafka Exporter Trace Kafka Exporter Trace kafka exporter pushes messages to the Kafka Broker and Topic skywalking-export-trace to export the trace. Here is the message:\nProducerRecord\u0026lt;String, Bytes\u0026gt; Key: TraceSegmentId Value: Bytes of SegmentObject The SegmentObject definition follows the protocol: SkyWalking data collect protocol#Tracing.proto.\n// The segment is a collection of spans. It includes all collected spans in a simple one request context, such as a HTTP request process. message SegmentObject { string traceId = 1; string traceSegmentId = 2; repeated SpanObject spans = 3; string service = 4; string serviceInstance = 5; bool isSizeLimited = 6;}To activate the exporter, you should set ${SW_EXPORTER_ENABLE_KAFKA_TRACE:true} and config the Kafka server.\nexporter:default:# Kafka exporterenableKafkaTrace:${SW_EXPORTER_ENABLE_KAFKA_TRACE:true}kafkaBootstrapServers:${SW_EXPORTER_KAFKA_SERVERS:localhost:9092}# Kafka producer config, JSON format as Properties.kafkaProducerConfig:${SW_EXPORTER_KAFKA_PRODUCER_CONFIG:\u0026#34;\u0026#34;}kafkaTopicTrace:${SW_EXPORTER_KAFKA_TOPIC_TRACE:skywalking-export-trace}exportErrorStatusTraceOnly:${SW_EXPORTER_KAFKA_TRACE_FILTER_ERROR:false}... exportErrorStatusTraceOnly=true represents that only export the error status trace segments through the Kafka channel.  Log Kafka Exporter Log kafka exporter pushes messages to the Kafka Broker and Topic skywalking-export-log to export the log. Here is the message:\nProducerRecord\u0026lt;String, Bytes\u0026gt; Key: LogRecordId Value: Bytes of LogData The LogData definition follows the protocol: SkyWalking data collect protocol#Logging.proto.\nmessage LogData { int64 timestamp = 1; string service = 2; string serviceInstance = 3; string endpoint = 4; LogDataBody body = 5; TraceContext traceContext = 6; LogTags tags = 7; string layer = 8;}To activate the exporter, you should set ${SW_EXPORTER_ENABLE_KAFKA_LOG:true} and config the Kafka server.\nexporter:default:# Kafka exporterenableKafkaLog:${SW_EXPORTER_ENABLE_KAFKA_LOG:true}kafkaBootstrapServers:${SW_EXPORTER_KAFKA_SERVERS:localhost:9092}# Kafka producer config, JSON format as Properties.kafkaProducerConfig:${SW_EXPORTER_KAFKA_PRODUCER_CONFIG:\u0026#34;\u0026#34;}kafkaTopicLog:${SW_EXPORTER_KAFKA_TOPIC_LOG:skywalking-export-log}...","title":"Exporter","url":"/docs/main/v9.7.0/en/setup/backend/exporter/"},{"content":"Exporter tool for profile raw data When visualization doesn\u0026rsquo;t work well on the official UI, users may submit issue reports. This tool helps users package the original profile data to assist the community in locating the issues in the users' cases. NOTE: This report includes the class name, method name, line number, etc. Before making your submission, please make sure that the security of your system wouldn\u0026rsquo;t be compromised.\nExport using command line  Set the storage in the tools/profile-exporter/application.yml file based on your use case. Prepare the data  Profile task ID: Profile task ID Trace ID: Trace ID of the profile error Export dir: Directory exported by the data   Enter the Skywalking root path Execute shell command bash tools/profile-exporter/profile_exporter.sh --taskid={profileTaskId} --traceid={traceId} {exportDir}  The file {traceId}.tar.gz will be generated after executing shell.  Exported data content  basic.yml: Contains the complete information of the profiled segments in the trace. snapshot.data: All monitored thread snapshot data in the current segment.  Report profile issues  Provide exported data generated from this tool. Provide the operation name and the mode of analysis (including/excluding child span) for the span. Issue description. (It would be great if you could provide UI screenshots.)  ","title":"Exporter tool for profile raw data","url":"/docs/main/latest/en/guides/backend-profile-export/"},{"content":"Exporter tool for profile raw data When visualization doesn\u0026rsquo;t work well on the official UI, users may submit issue reports. This tool helps users package the original profile data to assist the community in locating the issues in the users' cases. NOTE: This report includes the class name, method name, line number, etc. Before making your submission, please make sure that the security of your system wouldn\u0026rsquo;t be compromised.\nExport using command line  Set the storage in the tools/profile-exporter/application.yml file based on your use case. Prepare the data  Profile task ID: Profile task ID Trace ID: Trace ID of the profile error Export dir: Directory exported by the data   Enter the Skywalking root path Execute shell command bash tools/profile-exporter/profile_exporter.sh --taskid={profileTaskId} --traceid={traceId} {exportDir}  The file {traceId}.tar.gz will be generated after executing shell.  Exported data content  basic.yml: Contains the complete information of the profiled segments in the trace. snapshot.data: All monitored thread snapshot data in the current segment.  Report profile issues  Provide exported data generated from this tool. Provide the operation name and the mode of analysis (including/excluding child span) for the span. Issue description. (It would be great if you could provide UI screenshots.)  ","title":"Exporter tool for profile raw data","url":"/docs/main/next/en/guides/backend-profile-export/"},{"content":"Exporter tool for profile raw data When visualization doesn\u0026rsquo;t work well on the official UI, users may submit issue reports. This tool helps users package the original profile data to assist the community in locating the issues in the users' cases. NOTE: This report includes the class name, method name, line number, etc. Before making your submission, please make sure that the security of your system wouldn\u0026rsquo;t be compromised.\nExport using command line  Set the storage in the tools/profile-exporter/application.yml file based on your use case. Prepare the data  Profile task ID: Profile task ID Trace ID: Trace ID of the profile error Export dir: Directory exported by the data   Enter the Skywalking root path Execute shell command bash tools/profile-exporter/profile_exporter.sh --taskid={profileTaskId} --traceid={traceId} {exportDir}  The file {traceId}.tar.gz will be generated after executing shell.  Exported data content  basic.yml: Contains the complete information of the profiled segments in the trace. snapshot.data: All monitored thread snapshot data in the current segment.  Report profile issues  Provide exported data generated from this tool. Provide the operation name and the mode of analysis (including/excluding child span) for the span. Issue description. (It would be great if you could provide UI screenshots.)  ","title":"Exporter tool for profile raw data","url":"/docs/main/v9.0.0/en/guides/backend-profile-export/"},{"content":"Exporter tool for profile raw data When visualization doesn\u0026rsquo;t work well on the official UI, users may submit issue reports. This tool helps users package the original profile data to assist the community in locating the issues in the users' cases. NOTE: This report includes the class name, method name, line number, etc. Before making your submission, please make sure that the security of your system wouldn\u0026rsquo;t be compromised.\nExport using command line  Set the storage in the tools/profile-exporter/application.yml file based on your use case. Prepare the data  Profile task ID: Profile task ID Trace ID: Trace ID of the profile error Export dir: Directory exported by the data   Enter the Skywalking root path Execute shell command bash tools/profile-exporter/profile_exporter.sh --taskid={profileTaskId} --traceid={traceId} {exportDir}  The file {traceId}.tar.gz will be generated after executing shell.  Exported data content  basic.yml: Contains the complete information of the profiled segments in the trace. snapshot.data: All monitored thread snapshot data in the current segment.  Report profile issues  Provide exported data generated from this tool. Provide the operation name and the mode of analysis (including/excluding child span) for the span. Issue description. (It would be great if you could provide UI screenshots.)  ","title":"Exporter tool for profile raw data","url":"/docs/main/v9.1.0/en/guides/backend-profile-export/"},{"content":"Exporter tool for profile raw data When visualization doesn\u0026rsquo;t work well on the official UI, users may submit issue reports. This tool helps users package the original profile data to assist the community in locating the issues in the users' cases. NOTE: This report includes the class name, method name, line number, etc. Before making your submission, please make sure that the security of your system wouldn\u0026rsquo;t be compromised.\nExport using command line  Set the storage in the tools/profile-exporter/application.yml file based on your use case. Prepare the data  Profile task ID: Profile task ID Trace ID: Trace ID of the profile error Export dir: Directory exported by the data   Enter the Skywalking root path Execute shell command bash tools/profile-exporter/profile_exporter.sh --taskid={profileTaskId} --traceid={traceId} {exportDir}  The file {traceId}.tar.gz will be generated after executing shell.  Exported data content  basic.yml: Contains the complete information of the profiled segments in the trace. snapshot.data: All monitored thread snapshot data in the current segment.  Report profile issues  Provide exported data generated from this tool. Provide the operation name and the mode of analysis (including/excluding child span) for the span. Issue description. (It would be great if you could provide UI screenshots.)  ","title":"Exporter tool for profile raw data","url":"/docs/main/v9.2.0/en/guides/backend-profile-export/"},{"content":"Exporter tool for profile raw data When visualization doesn\u0026rsquo;t work well on the official UI, users may submit issue reports. This tool helps users package the original profile data to assist the community in locating the issues in the users' cases. NOTE: This report includes the class name, method name, line number, etc. Before making your submission, please make sure that the security of your system wouldn\u0026rsquo;t be compromised.\nExport using command line  Set the storage in the tools/profile-exporter/application.yml file based on your use case. Prepare the data  Profile task ID: Profile task ID Trace ID: Trace ID of the profile error Export dir: Directory exported by the data   Enter the Skywalking root path Execute shell command bash tools/profile-exporter/profile_exporter.sh --taskid={profileTaskId} --traceid={traceId} {exportDir}  The file {traceId}.tar.gz will be generated after executing shell.  Exported data content  basic.yml: Contains the complete information of the profiled segments in the trace. snapshot.data: All monitored thread snapshot data in the current segment.  Report profile issues  Provide exported data generated from this tool. Provide the operation name and the mode of analysis (including/excluding child span) for the span. Issue description. (It would be great if you could provide UI screenshots.)  ","title":"Exporter tool for profile raw data","url":"/docs/main/v9.3.0/en/guides/backend-profile-export/"},{"content":"Exporter tool for profile raw data When visualization doesn\u0026rsquo;t work well on the official UI, users may submit issue reports. This tool helps users package the original profile data to assist the community in locating the issues in the users' cases. NOTE: This report includes the class name, method name, line number, etc. Before making your submission, please make sure that the security of your system wouldn\u0026rsquo;t be compromised.\nExport using command line  Set the storage in the tools/profile-exporter/application.yml file based on your use case. Prepare the data  Profile task ID: Profile task ID Trace ID: Trace ID of the profile error Export dir: Directory exported by the data   Enter the Skywalking root path Execute shell command bash tools/profile-exporter/profile_exporter.sh --taskid={profileTaskId} --traceid={traceId} {exportDir}  The file {traceId}.tar.gz will be generated after executing shell.  Exported data content  basic.yml: Contains the complete information of the profiled segments in the trace. snapshot.data: All monitored thread snapshot data in the current segment.  Report profile issues  Provide exported data generated from this tool. Provide the operation name and the mode of analysis (including/excluding child span) for the span. Issue description. (It would be great if you could provide UI screenshots.)  ","title":"Exporter tool for profile raw data","url":"/docs/main/v9.4.0/en/guides/backend-profile-export/"},{"content":"Exporter tool for profile raw data When visualization doesn\u0026rsquo;t work well on the official UI, users may submit issue reports. This tool helps users package the original profile data to assist the community in locating the issues in the users' cases. NOTE: This report includes the class name, method name, line number, etc. Before making your submission, please make sure that the security of your system wouldn\u0026rsquo;t be compromised.\nExport using command line  Set the storage in the tools/profile-exporter/application.yml file based on your use case. Prepare the data  Profile task ID: Profile task ID Trace ID: Trace ID of the profile error Export dir: Directory exported by the data   Enter the Skywalking root path Execute shell command bash tools/profile-exporter/profile_exporter.sh --taskid={profileTaskId} --traceid={traceId} {exportDir}  The file {traceId}.tar.gz will be generated after executing shell.  Exported data content  basic.yml: Contains the complete information of the profiled segments in the trace. snapshot.data: All monitored thread snapshot data in the current segment.  Report profile issues  Provide exported data generated from this tool. Provide the operation name and the mode of analysis (including/excluding child span) for the span. Issue description. (It would be great if you could provide UI screenshots.)  ","title":"Exporter tool for profile raw data","url":"/docs/main/v9.5.0/en/guides/backend-profile-export/"},{"content":"Exporter tool for profile raw data When visualization doesn\u0026rsquo;t work well on the official UI, users may submit issue reports. This tool helps users package the original profile data to assist the community in locating the issues in the users' cases. NOTE: This report includes the class name, method name, line number, etc. Before making your submission, please make sure that the security of your system wouldn\u0026rsquo;t be compromised.\nExport using command line  Set the storage in the tools/profile-exporter/application.yml file based on your use case. Prepare the data  Profile task ID: Profile task ID Trace ID: Trace ID of the profile error Export dir: Directory exported by the data   Enter the Skywalking root path Execute shell command bash tools/profile-exporter/profile_exporter.sh --taskid={profileTaskId} --traceid={traceId} {exportDir}  The file {traceId}.tar.gz will be generated after executing shell.  Exported data content  basic.yml: Contains the complete information of the profiled segments in the trace. snapshot.data: All monitored thread snapshot data in the current segment.  Report profile issues  Provide exported data generated from this tool. Provide the operation name and the mode of analysis (including/excluding child span) for the span. Issue description. (It would be great if you could provide UI screenshots.)  ","title":"Exporter tool for profile raw data","url":"/docs/main/v9.6.0/en/guides/backend-profile-export/"},{"content":"Exporter tool for profile raw data When visualization doesn\u0026rsquo;t work well on the official UI, users may submit issue reports. This tool helps users package the original profile data to assist the community in locating the issues in the users' cases. NOTE: This report includes the class name, method name, line number, etc. Before making your submission, please make sure that the security of your system wouldn\u0026rsquo;t be compromised.\nExport using command line  Set the storage in the tools/profile-exporter/application.yml file based on your use case. Prepare the data  Profile task ID: Profile task ID Trace ID: Trace ID of the profile error Export dir: Directory exported by the data   Enter the Skywalking root path Execute shell command bash tools/profile-exporter/profile_exporter.sh --taskid={profileTaskId} --traceid={traceId} {exportDir}  The file {traceId}.tar.gz will be generated after executing shell.  Exported data content  basic.yml: Contains the complete information of the profiled segments in the trace. snapshot.data: All monitored thread snapshot data in the current segment.  Report profile issues  Provide exported data generated from this tool. Provide the operation name and the mode of analysis (including/excluding child span) for the span. Issue description. (It would be great if you could provide UI screenshots.)  ","title":"Exporter tool for profile raw data","url":"/docs/main/v9.7.0/en/guides/backend-profile-export/"},{"content":"Extend storage SkyWalking has already provided several storage solutions. In this document, you could learn how to easily implement a new storage.\nDefine your storage provider  Define class extension org.apache.skywalking.oap.server.library.module.ModuleProvider. Set this provider targeting to storage module.  @Override public Class\u0026lt;? extends ModuleDefine\u0026gt; module() { return StorageModule.class; } Implement all DAOs Here\u0026rsquo;s a list of all DAO interfaces in storage:\n  IServiceInventoryCacheDAO\n  IServiceInstanceInventoryCacheDAO\n  IEndpointInventoryCacheDAO\n  INetworkAddressInventoryCacheDAO\n  IBatchDAO\n  StorageDAO\n  IRegisterLockDAO\n  ITopologyQueryDAO\n  IMetricsQueryDAO\n  ITraceQueryDAO\n  IMetadataQueryDAO\n  IAggregationQueryDAO\n  IAlarmQueryDAO\n  IHistoryDeleteDAO\n  IMetricsDAO\n  IRecordDAO\n  IRegisterDAO\n  ILogQueryDAO\n  ITopNRecordsQueryDAO\n  IBrowserLogQueryDAO\n  IProfileTaskQueryDAO\n  IProfileTaskLogQueryDAO\n  IProfileThreadSnapshotQueryDAO\n  UITemplateManagementDAO\n  Register all service implementations In public void prepare(), use this#registerServiceImplementation method to register and bind with your implementation of the above interfaces.\nExample org.apache.skywalking.oap.server.storage.plugin.elasticsearch.StorageModuleElasticsearchProvider and org.apache.skywalking.oap.server.storage.plugin.jdbc.mysql.MySQLStorageProvider are good examples.\n","title":"Extend storage","url":"/docs/main/v9.0.0/en/guides/storage-extention/"},{"content":"Extend storage SkyWalking has already provided several storage solutions. In this document, you could learn how to easily implement a new storage.\nDefine your storage provider  Define class extension org.apache.skywalking.oap.server.library.module.ModuleProvider. Set this provider targeting to storage module.  @Override public Class\u0026lt;? extends ModuleDefine\u0026gt; module() { return StorageModule.class; } Implement all DAOs Here\u0026rsquo;s a list of all DAO interfaces in storage:\n  IServiceInventoryCacheDAO\n  IServiceInstanceInventoryCacheDAO\n  IEndpointInventoryCacheDAO\n  INetworkAddressInventoryCacheDAO\n  IBatchDAO\n  StorageDAO\n  IRegisterLockDAO\n  ITopologyQueryDAO\n  IMetricsQueryDAO\n  ITraceQueryDAO\n  IMetadataQueryDAO\n  IAggregationQueryDAO\n  IAlarmQueryDAO\n  IHistoryDeleteDAO\n  IMetricsDAO\n  IRecordDAO\n  IRegisterDAO\n  ILogQueryDAO\n  ITopNRecordsQueryDAO\n  IBrowserLogQueryDAO\n  IProfileTaskQueryDAO\n  IProfileTaskLogQueryDAO\n  IProfileThreadSnapshotQueryDAO\n  UITemplateManagementDAO\n  Register all service implementations In public void prepare(), use this#registerServiceImplementation method to register and bind with your implementation of the above interfaces.\nExample org.apache.skywalking.oap.server.storage.plugin.elasticsearch.StorageModuleElasticsearchProvider and org.apache.skywalking.oap.server.storage.plugin.jdbc.mysql.MySQLStorageProvider are good examples.\n","title":"Extend storage","url":"/docs/main/v9.1.0/en/guides/storage-extention/"},{"content":"Extend storage SkyWalking has already provided several storage solutions. In this document, you could learn how to easily implement a new storage.\nDefine your storage provider  Define class extension org.apache.skywalking.oap.server.library.module.ModuleProvider. Set this provider targeting to storage module.  @Override public Class\u0026lt;? extends ModuleDefine\u0026gt; module() { return StorageModule.class; } Implement all DAOs Here\u0026rsquo;s a list of all DAO interfaces in storage:\n  IServiceInventoryCacheDAO\n  IServiceInstanceInventoryCacheDAO\n  IEndpointInventoryCacheDAO\n  INetworkAddressInventoryCacheDAO\n  IBatchDAO\n  StorageDAO\n  IRegisterLockDAO\n  ITopologyQueryDAO\n  IMetricsQueryDAO\n  ITraceQueryDAO\n  IMetadataQueryDAO\n  IAggregationQueryDAO\n  IAlarmQueryDAO\n  IHistoryDeleteDAO\n  IMetricsDAO\n  IRecordDAO\n  IRegisterDAO\n  ILogQueryDAO\n  ITopNRecordsQueryDAO\n  IBrowserLogQueryDAO\n  IProfileTaskQueryDAO\n  IProfileTaskLogQueryDAO\n  IProfileThreadSnapshotQueryDAO\n  UITemplateManagementDAO\n  Register all service implementations In public void prepare(), use this#registerServiceImplementation method to register and bind with your implementation of the above interfaces.\nExample org.apache.skywalking.oap.server.storage.plugin.elasticsearch.StorageModuleElasticsearchProvider and org.apache.skywalking.oap.server.storage.plugin.jdbc.mysql.MySQLStorageProvider are good examples.\n","title":"Extend storage","url":"/docs/main/v9.2.0/en/guides/storage-extention/"},{"content":"Extend storage SkyWalking has already provided several storage solutions. In this document, you could learn how to easily implement a new storage.\nDefine your storage provider  Define class extension org.apache.skywalking.oap.server.library.module.ModuleProvider. Set this provider targeting to storage module.  @Override public Class\u0026lt;? extends ModuleDefine\u0026gt; module() { return StorageModule.class; } Implement all DAOs Here\u0026rsquo;s a list of all DAO interfaces in storage:\n  IServiceInventoryCacheDAO\n  IServiceInstanceInventoryCacheDAO\n  IEndpointInventoryCacheDAO\n  INetworkAddressInventoryCacheDAO\n  IBatchDAO\n  StorageDAO\n  IRegisterLockDAO\n  ITopologyQueryDAO\n  IMetricsQueryDAO\n  ITraceQueryDAO\n  IMetadataQueryDAO\n  IAggregationQueryDAO\n  IAlarmQueryDAO\n  IHistoryDeleteDAO\n  IMetricsDAO\n  IRecordDAO\n  IRegisterDAO\n  ILogQueryDAO\n  ITopNRecordsQueryDAO\n  IBrowserLogQueryDAO\n  IProfileTaskQueryDAO\n  IProfileTaskLogQueryDAO\n  IProfileThreadSnapshotQueryDAO\n  UITemplateManagementDAO\n  Register all service implementations In public void prepare(), use this#registerServiceImplementation method to register and bind with your implementation of the above interfaces.\nExample org.apache.skywalking.oap.server.storage.plugin.elasticsearch.StorageModuleElasticsearchProvider and org.apache.skywalking.oap.server.storage.plugin.jdbc.mysql.MySQLStorageProvider are good examples.\n","title":"Extend storage","url":"/docs/main/v9.3.0/en/guides/storage-extention/"},{"content":"Extend storage SkyWalking has already provided several storage solutions. In this document, you could learn how to easily implement a new storage.\nDefine your storage provider  Define class extension org.apache.skywalking.oap.server.library.module.ModuleProvider. Set this provider targeting to storage module.  @Override public Class\u0026lt;? extends ModuleDefine\u0026gt; module() { return StorageModule.class; } Implement all DAOs Here\u0026rsquo;s a list of all DAO interfaces in storage:\n  IServiceInventoryCacheDAO\n  IServiceInstanceInventoryCacheDAO\n  IEndpointInventoryCacheDAO\n  INetworkAddressInventoryCacheDAO\n  IBatchDAO\n  StorageDAO\n  IRegisterLockDAO\n  ITopologyQueryDAO\n  IMetricsQueryDAO\n  ITraceQueryDAO\n  IMetadataQueryDAO\n  IAggregationQueryDAO\n  IAlarmQueryDAO\n  IHistoryDeleteDAO\n  IMetricsDAO\n  IRecordDAO\n  IRegisterDAO\n  ILogQueryDAO\n  ITopNRecordsQueryDAO\n  IBrowserLogQueryDAO\n  IProfileTaskQueryDAO\n  IProfileTaskLogQueryDAO\n  IProfileThreadSnapshotQueryDAO\n  UITemplateManagementDAO\n  Register all service implementations In public void prepare(), use this#registerServiceImplementation method to register and bind with your implementation of the above interfaces.\nExample org.apache.skywalking.oap.server.storage.plugin.elasticsearch.StorageModuleElasticsearchProvider and org.apache.skywalking.oap.server.storage.plugin.jdbc.mysql.MySQLStorageProvider are good examples.\n","title":"Extend storage","url":"/docs/main/v9.4.0/en/guides/storage-extention/"},{"content":"Extend storage SkyWalking has already provided several storage solutions. In this document, you could learn how to easily implement a new storage.\nDefine your storage provider  Define class extension org.apache.skywalking.oap.server.library.module.ModuleProvider. Set this provider targeting to storage module.  @Override public Class\u0026lt;? extends ModuleDefine\u0026gt; module() { return StorageModule.class; } Implement all DAOs Here\u0026rsquo;s a list of all DAO interfaces in storage:\n  IServiceInventoryCacheDAO\n  IServiceInstanceInventoryCacheDAO\n  IEndpointInventoryCacheDAO\n  INetworkAddressInventoryCacheDAO\n  IBatchDAO\n  StorageDAO\n  IRegisterLockDAO\n  ITopologyQueryDAO\n  IMetricsQueryDAO\n  ITraceQueryDAO\n  IMetadataQueryDAO\n  IAggregationQueryDAO\n  IAlarmQueryDAO\n  IHistoryDeleteDAO\n  IMetricsDAO\n  IRecordDAO\n  IRegisterDAO\n  ILogQueryDAO\n  ITopNRecordsQueryDAO\n  IBrowserLogQueryDAO\n  IProfileTaskQueryDAO\n  IProfileTaskLogQueryDAO\n  IProfileThreadSnapshotQueryDAO\n  UITemplateManagementDAO\n  Register all service implementations In public void prepare(), use this#registerServiceImplementation method to register and bind with your implementation of the above interfaces.\nExample org.apache.skywalking.oap.server.storage.plugin.elasticsearch.StorageModuleElasticsearchProvider and org.apache.skywalking.oap.server.storage.plugin.jdbc.mysql.MySQLStorageProvider are good examples.\n","title":"Extend storage","url":"/docs/main/v9.5.0/en/guides/storage-extention/"},{"content":"Fallbacker/none-fallbacker Description The fallbacker would do nothing when facing failure data.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Fallbacker/none-fallbacker","url":"/docs/skywalking-satellite/latest/en/setup/plugins/fallbacker_none-fallbacker/"},{"content":"Fallbacker/none-fallbacker Description The fallbacker would do nothing when facing failure data.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Fallbacker/none-fallbacker","url":"/docs/skywalking-satellite/next/en/setup/plugins/fallbacker_none-fallbacker/"},{"content":"Fallbacker/none-fallbacker Description The fallbacker would do nothing when facing failure data.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Fallbacker/none-fallbacker","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/fallbacker_none-fallbacker/"},{"content":"Fallbacker/timer-fallbacker Description This is a timer fallback trigger to process the forward failure data.\nDefaultConfig # The forwarder max attempt times.max_attempts:3# The exponential_backoff is the standard retry duration, and the time for each retry is expanded# by 2 times until the number of retries reaches the maximum.(Time unit is millisecond.)exponential_backoff:2000# The max backoff time used in retrying, which would override the latency time when the latency time# with exponential increasing larger than it.(Time unit is millisecond.)max_backoff:5000Configuration    Name Type Description     max_attempts int    exponential_backoff int    max_backoff int     ","title":"Fallbacker/timer-fallbacker","url":"/docs/skywalking-satellite/latest/en/setup/plugins/fallbacker_timer-fallbacker/"},{"content":"Fallbacker/timer-fallbacker Description This is a timer fallback trigger to process the forward failure data.\nDefaultConfig # The forwarder max attempt times.max_attempts:3# The exponential_backoff is the standard retry duration, and the time for each retry is expanded# by 2 times until the number of retries reaches the maximum.(Time unit is millisecond.)exponential_backoff:2000# The max backoff time used in retrying, which would override the latency time when the latency time# with exponential increasing larger than it.(Time unit is millisecond.)max_backoff:5000Configuration    Name Type Description     max_attempts int    exponential_backoff int    max_backoff int     ","title":"Fallbacker/timer-fallbacker","url":"/docs/skywalking-satellite/next/en/setup/plugins/fallbacker_timer-fallbacker/"},{"content":"Fallbacker/timer-fallbacker Description This is a timer fallback trigger to process the forward failure data.\nDefaultConfig # The forwarder max attempt times.max_attempts:3# The exponential_backoff is the standard retry duration, and the time for each retry is expanded# by 2 times until the number of retries reaches the maximum.(Time unit is millisecond.)exponential_backoff:2000# The max backoff time used in retrying, which would override the latency time when the latency time# with exponential increasing larger than it.(Time unit is millisecond.)max_backoff:5000Configuration    Name Type Description     max_attempts int    exponential_backoff int    max_backoff int     ","title":"Fallbacker/timer-fallbacker","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/fallbacker_timer-fallbacker/"},{"content":"FAQs These are known and frequently asked questions about SkyWalking. We welcome you to contribute here.\nDesign  Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture?  Compiling  Protoc plugin fails in maven build Required items could not be found when importing project into Eclipse Maven compilation failure with error such as python2 not found Compiling issues on Mac\u0026rsquo;s M1 chip  Runtime  New ElasticSearch storage option explanation in 9.2.0 Version 9.x+ upgrade Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Version 8.x+ upgrade Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? Version 6.x upgrade Why are there only traces in UI? Tracing doesn\u0026rsquo;t work on the Kafka consumer end Agent or collector version upgrade, 3.x -\u0026gt; 5.0.0-alpha EnhanceRequireObjectCache class cast exception ElasticSearch server performance issues, including ERROR CODE:429 IllegalStateException when installing Java agent on WebSphere 7 \u0026ldquo;FORBIDDEN/12/index read-only / allow delete (api)\u0026rdquo; appears in the log No data shown and backend replies with \u0026ldquo;Variable \u0026lsquo;serviceId\u0026rsquo; has coerced Null value for NonNull type \u0026lsquo;ID!'\u0026quot; Unexpected endpoint register warning after 6.6.0 Use the profile exporter tool if the profile analysis is not right Compatibility with other javaagent bytecode processes Java agent memory leak when enhancing Worker thread at Thread Pool Thrift plugin  UI  What is VNode? And why does SkyWalking have that?  ","title":"FAQs","url":"/docs/main/latest/en/faq/readme/"},{"content":"FAQs These are known and frequently asked questions about SkyWalking. We welcome you to contribute here.\nDesign  Why does SkyWalking use RPC(gRPC and RESTful) rather than MQ as transport layer by default? Why is Clickhouse or Loki or xxx not supported as a storage option?  Compiling  Protoc plugin fails in maven build Required items could not be found when importing project into Eclipse Maven compilation failure with error such as python2 not found Compiling issues on Mac\u0026rsquo;s M1 chip  Runtime  New ElasticSearch storage option explanation in 9.2.0 Version 9.x+ upgrade Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Version 8.x+ upgrade Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? Version 6.x upgrade Why are there only traces in UI? Tracing doesn\u0026rsquo;t work on the Kafka consumer end Agent or collector version upgrade, 3.x -\u0026gt; 5.0.0-alpha EnhanceRequireObjectCache class cast exception ElasticSearch server performance issues, including ERROR CODE:429 IllegalStateException when installing Java agent on WebSphere 7 \u0026ldquo;FORBIDDEN/12/index read-only / allow delete (api)\u0026rdquo; appears in the log No data shown and backend replies with \u0026ldquo;Variable \u0026lsquo;serviceId\u0026rsquo; has coerced Null value for NonNull type \u0026lsquo;ID!'\u0026quot; Unexpected endpoint register warning after 6.6.0 Use the profile exporter tool if the profile analysis is not right Compatibility with other javaagent bytecode processes Java agent memory leak when enhancing Worker thread at Thread Pool Thrift plugin  UI  What is VNode? And why does SkyWalking have that?  ","title":"FAQs","url":"/docs/main/next/en/faq/readme/"},{"content":"FAQs These are known and frequently asked questions about SkyWalking. We welcome you to contribute here.\nDesign  Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture?  Compiling  Protoc plugin fails in maven build Required items could not be found when importing project into Eclipse Maven compilation failure with error such as python2 not found Compiling issues on Mac\u0026rsquo;s M1 chip  Runtime  Version 9.x+ upgrade Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Version 8.x+ upgrade Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? Version 6.x upgrade Why are there only traces in UI? Tracing doesn\u0026rsquo;t work on the Kafka consumer end Agent or collector version upgrade, 3.x -\u0026gt; 5.0.0-alpha EnhanceRequireObjectCache class cast exception ElasticSearch server performance issues, including ERROR CODE:429 IllegalStateException when installing Java agent on WebSphere 7 \u0026ldquo;FORBIDDEN/12/index read-only / allow delete (api)\u0026rdquo; appears in the log No data shown and backend replies with \u0026ldquo;Variable \u0026lsquo;serviceId\u0026rsquo; has coerced Null value for NonNull type \u0026lsquo;ID!'\u0026quot; Unexpected endpoint register warning after 6.6.0 Use the profile exporter tool if the profile analysis is not right Compatibility with other javaagent bytecode processes Java agent memory leak when enhancing Worker thread at Thread Pool Thrift plugin  UI  What is VNode? And why does SkyWalking have that?  ","title":"FAQs","url":"/docs/main/v9.0.0/en/faq/readme/"},{"content":"FAQs These are known and frequently asked questions about SkyWalking. We welcome you to contribute here.\nDesign  Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture?  Compiling  Protoc plugin fails in maven build Required items could not be found when importing project into Eclipse Maven compilation failure with error such as python2 not found Compiling issues on Mac\u0026rsquo;s M1 chip  Runtime  Version 9.x+ upgrade Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Version 8.x+ upgrade Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? Version 6.x upgrade Why are there only traces in UI? Tracing doesn\u0026rsquo;t work on the Kafka consumer end Agent or collector version upgrade, 3.x -\u0026gt; 5.0.0-alpha EnhanceRequireObjectCache class cast exception ElasticSearch server performance issues, including ERROR CODE:429 IllegalStateException when installing Java agent on WebSphere 7 \u0026ldquo;FORBIDDEN/12/index read-only / allow delete (api)\u0026rdquo; appears in the log No data shown and backend replies with \u0026ldquo;Variable \u0026lsquo;serviceId\u0026rsquo; has coerced Null value for NonNull type \u0026lsquo;ID!'\u0026quot; Unexpected endpoint register warning after 6.6.0 Use the profile exporter tool if the profile analysis is not right Compatibility with other javaagent bytecode processes Java agent memory leak when enhancing Worker thread at Thread Pool Thrift plugin  UI  What is VNode? And why does SkyWalking have that?  ","title":"FAQs","url":"/docs/main/v9.1.0/en/faq/readme/"},{"content":"FAQs These are known and frequently asked questions about SkyWalking. We welcome you to contribute here.\nDesign  Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture?  Compiling  Protoc plugin fails in maven build Required items could not be found when importing project into Eclipse Maven compilation failure with error such as python2 not found Compiling issues on Mac\u0026rsquo;s M1 chip  Runtime  New ElasticSearch storage option explanation in 9.2.0 Version 9.x+ upgrade Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Version 8.x+ upgrade Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? Version 6.x upgrade Why are there only traces in UI? Tracing doesn\u0026rsquo;t work on the Kafka consumer end Agent or collector version upgrade, 3.x -\u0026gt; 5.0.0-alpha EnhanceRequireObjectCache class cast exception ElasticSearch server performance issues, including ERROR CODE:429 IllegalStateException when installing Java agent on WebSphere 7 \u0026ldquo;FORBIDDEN/12/index read-only / allow delete (api)\u0026rdquo; appears in the log No data shown and backend replies with \u0026ldquo;Variable \u0026lsquo;serviceId\u0026rsquo; has coerced Null value for NonNull type \u0026lsquo;ID!'\u0026quot; Unexpected endpoint register warning after 6.6.0 Use the profile exporter tool if the profile analysis is not right Compatibility with other javaagent bytecode processes Java agent memory leak when enhancing Worker thread at Thread Pool Thrift plugin  UI  What is VNode? And why does SkyWalking have that?  ","title":"FAQs","url":"/docs/main/v9.2.0/en/faq/readme/"},{"content":"FAQs These are known and frequently asked questions about SkyWalking. We welcome you to contribute here.\nDesign  Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture?  Compiling  Protoc plugin fails in maven build Required items could not be found when importing project into Eclipse Maven compilation failure with error such as python2 not found Compiling issues on Mac\u0026rsquo;s M1 chip  Runtime  New ElasticSearch storage option explanation in 9.2.0 Version 9.x+ upgrade Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Version 8.x+ upgrade Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? Version 6.x upgrade Why are there only traces in UI? Tracing doesn\u0026rsquo;t work on the Kafka consumer end Agent or collector version upgrade, 3.x -\u0026gt; 5.0.0-alpha EnhanceRequireObjectCache class cast exception ElasticSearch server performance issues, including ERROR CODE:429 IllegalStateException when installing Java agent on WebSphere 7 \u0026ldquo;FORBIDDEN/12/index read-only / allow delete (api)\u0026rdquo; appears in the log No data shown and backend replies with \u0026ldquo;Variable \u0026lsquo;serviceId\u0026rsquo; has coerced Null value for NonNull type \u0026lsquo;ID!'\u0026quot; Unexpected endpoint register warning after 6.6.0 Use the profile exporter tool if the profile analysis is not right Compatibility with other javaagent bytecode processes Java agent memory leak when enhancing Worker thread at Thread Pool Thrift plugin  UI  What is VNode? And why does SkyWalking have that?  ","title":"FAQs","url":"/docs/main/v9.3.0/en/faq/readme/"},{"content":"FAQs These are known and frequently asked questions about SkyWalking. We welcome you to contribute here.\nDesign  Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture?  Compiling  Protoc plugin fails in maven build Required items could not be found when importing project into Eclipse Maven compilation failure with error such as python2 not found Compiling issues on Mac\u0026rsquo;s M1 chip  Runtime  New ElasticSearch storage option explanation in 9.2.0 Version 9.x+ upgrade Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Version 8.x+ upgrade Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? Version 6.x upgrade Why are there only traces in UI? Tracing doesn\u0026rsquo;t work on the Kafka consumer end Agent or collector version upgrade, 3.x -\u0026gt; 5.0.0-alpha EnhanceRequireObjectCache class cast exception ElasticSearch server performance issues, including ERROR CODE:429 IllegalStateException when installing Java agent on WebSphere 7 \u0026ldquo;FORBIDDEN/12/index read-only / allow delete (api)\u0026rdquo; appears in the log No data shown and backend replies with \u0026ldquo;Variable \u0026lsquo;serviceId\u0026rsquo; has coerced Null value for NonNull type \u0026lsquo;ID!'\u0026quot; Unexpected endpoint register warning after 6.6.0 Use the profile exporter tool if the profile analysis is not right Compatibility with other javaagent bytecode processes Java agent memory leak when enhancing Worker thread at Thread Pool Thrift plugin  UI  What is VNode? And why does SkyWalking have that?  ","title":"FAQs","url":"/docs/main/v9.4.0/en/faq/readme/"},{"content":"FAQs These are known and frequently asked questions about SkyWalking. We welcome you to contribute here.\nDesign  Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture?  Compiling  Protoc plugin fails in maven build Required items could not be found when importing project into Eclipse Maven compilation failure with error such as python2 not found Compiling issues on Mac\u0026rsquo;s M1 chip  Runtime  New ElasticSearch storage option explanation in 9.2.0 Version 9.x+ upgrade Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Version 8.x+ upgrade Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? Version 6.x upgrade Why are there only traces in UI? Tracing doesn\u0026rsquo;t work on the Kafka consumer end Agent or collector version upgrade, 3.x -\u0026gt; 5.0.0-alpha EnhanceRequireObjectCache class cast exception ElasticSearch server performance issues, including ERROR CODE:429 IllegalStateException when installing Java agent on WebSphere 7 \u0026ldquo;FORBIDDEN/12/index read-only / allow delete (api)\u0026rdquo; appears in the log No data shown and backend replies with \u0026ldquo;Variable \u0026lsquo;serviceId\u0026rsquo; has coerced Null value for NonNull type \u0026lsquo;ID!'\u0026quot; Unexpected endpoint register warning after 6.6.0 Use the profile exporter tool if the profile analysis is not right Compatibility with other javaagent bytecode processes Java agent memory leak when enhancing Worker thread at Thread Pool Thrift plugin  UI  What is VNode? And why does SkyWalking have that?  ","title":"FAQs","url":"/docs/main/v9.5.0/en/faq/readme/"},{"content":"FAQs These are known and frequently asked questions about SkyWalking. We welcome you to contribute here.\nDesign  Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture?  Compiling  Protoc plugin fails in maven build Required items could not be found when importing project into Eclipse Maven compilation failure with error such as python2 not found Compiling issues on Mac\u0026rsquo;s M1 chip  Runtime  New ElasticSearch storage option explanation in 9.2.0 Version 9.x+ upgrade Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Version 8.x+ upgrade Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? Version 6.x upgrade Why are there only traces in UI? Tracing doesn\u0026rsquo;t work on the Kafka consumer end Agent or collector version upgrade, 3.x -\u0026gt; 5.0.0-alpha EnhanceRequireObjectCache class cast exception ElasticSearch server performance issues, including ERROR CODE:429 IllegalStateException when installing Java agent on WebSphere 7 \u0026ldquo;FORBIDDEN/12/index read-only / allow delete (api)\u0026rdquo; appears in the log No data shown and backend replies with \u0026ldquo;Variable \u0026lsquo;serviceId\u0026rsquo; has coerced Null value for NonNull type \u0026lsquo;ID!'\u0026quot; Unexpected endpoint register warning after 6.6.0 Use the profile exporter tool if the profile analysis is not right Compatibility with other javaagent bytecode processes Java agent memory leak when enhancing Worker thread at Thread Pool Thrift plugin  UI  What is VNode? And why does SkyWalking have that?  ","title":"FAQs","url":"/docs/main/v9.6.0/en/faq/readme/"},{"content":"FAQs These are known and frequently asked questions about SkyWalking. We welcome you to contribute here.\nDesign  Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture?  Compiling  Protoc plugin fails in maven build Required items could not be found when importing project into Eclipse Maven compilation failure with error such as python2 not found Compiling issues on Mac\u0026rsquo;s M1 chip  Runtime  New ElasticSearch storage option explanation in 9.2.0 Version 9.x+ upgrade Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Version 8.x+ upgrade Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? Version 6.x upgrade Why are there only traces in UI? Tracing doesn\u0026rsquo;t work on the Kafka consumer end Agent or collector version upgrade, 3.x -\u0026gt; 5.0.0-alpha EnhanceRequireObjectCache class cast exception ElasticSearch server performance issues, including ERROR CODE:429 IllegalStateException when installing Java agent on WebSphere 7 \u0026ldquo;FORBIDDEN/12/index read-only / allow delete (api)\u0026rdquo; appears in the log No data shown and backend replies with \u0026ldquo;Variable \u0026lsquo;serviceId\u0026rsquo; has coerced Null value for NonNull type \u0026lsquo;ID!'\u0026quot; Unexpected endpoint register warning after 6.6.0 Use the profile exporter tool if the profile analysis is not right Compatibility with other javaagent bytecode processes Java agent memory leak when enhancing Worker thread at Thread Pool Thrift plugin  UI  What is VNode? And why does SkyWalking have that?  ","title":"FAQs","url":"/docs/main/v9.7.0/en/faq/readme/"},{"content":"Fetch metrics from the Istio control plane(istiod) In this example, you will learn how to setup a Fetcher to fetch Istio control plane metrics, then push them to OAP server.\nInstall Operator Follow Operator installation instrument to install the operator.\nInstall Istio control plane Follow Install with istioctl to install a istiod.\nDeploy Fetcher, OAP server and UI with default settings Clone this repo, then change current directory to samples.\nIssue the below command to deploy an OAP server and UI.\nkubectl apply -f fetcher.yaml Get created custom resources as below:\n$ kubectl get oapserver,ui,fetcher NAME INSTANCES RUNNING ADDRESS oapserver.operator.skywalking.apache.org/default 1 1 default-oap.skywalking-swck-system NAME INSTANCES RUNNING INTERNALADDRESS EXTERNALIPS PORTS ui.operator.skywalking.apache.org/default 1 1 default-ui.skywalking-swck-system [80] NAME AGE fetcher.operator.skywalking.apache.org/istio-prod-cluster 36h View Istio Control Plane Dashboard from UI Follow View the UI to access the UI service.\nNavigate to Dashboard-\u0026gt;Istio Control Plane to view relevant metric diagrams.\n","title":"Fetch metrics from the Istio control plane(istiod)","url":"/docs/skywalking-swck/latest/examples/istio-controlplane/"},{"content":"Fetch metrics from the Istio control plane(istiod) In this example, you will learn how to setup a Fetcher to fetch Istio control plane metrics, then push them to OAP server.\nInstall Operator Follow Operator installation instrument to install the operator.\nInstall Istio control plane Follow Install with istioctl to install a istiod.\nDeploy Fetcher, OAP server and UI with default settings Clone this repo, then change current directory to samples.\nIssue the below command to deploy an OAP server and UI.\nkubectl apply -f fetcher.yaml Get created custom resources as below:\n$ kubectl get oapserver,ui,fetcher NAME INSTANCES RUNNING ADDRESS oapserver.operator.skywalking.apache.org/default 1 1 default-oap.skywalking-swck-system NAME INSTANCES RUNNING INTERNALADDRESS EXTERNALIPS PORTS ui.operator.skywalking.apache.org/default 1 1 default-ui.skywalking-swck-system [80] NAME AGE fetcher.operator.skywalking.apache.org/istio-prod-cluster 36h View Istio Control Plane Dashboard from UI Follow View the UI to access the UI service.\nNavigate to Dashboard-\u0026gt;Istio Control Plane to view relevant metric diagrams.\n","title":"Fetch metrics from the Istio control plane(istiod)","url":"/docs/skywalking-swck/next/examples/istio-controlplane/"},{"content":"Fetch metrics from the Istio control plane(istiod) In this example, you will learn how to setup a Fetcher to fetch Istio control plane metrics, then push them to OAP server.\nInstall Operator Follow Operator installation instrument to install the operator.\nInstall Istio control plane Follow Install with istioctl to install a istiod.\nDeploy Fetcher, OAP server and UI with default settings Clone this repo, then change current directory to samples.\nIssue the below command to deploy an OAP server and UI.\nkubectl apply -f fetcher.yaml Get created custom resources as below:\n$ kubectl get oapserver,ui,fetcher NAME INSTANCES RUNNING ADDRESS oapserver.operator.skywalking.apache.org/default 1 1 default-oap.skywalking-swck-system NAME INSTANCES RUNNING INTERNALADDRESS EXTERNALIPS PORTS ui.operator.skywalking.apache.org/default 1 1 default-ui.skywalking-swck-system [80] NAME AGE fetcher.operator.skywalking.apache.org/istio-prod-cluster 36h View Istio Control Plane Dashboard from UI Follow View the UI to access the UI service.\nNavigate to Dashboard-\u0026gt;Istio Control Plane to view relevant metric diagrams.\n","title":"Fetch metrics from the Istio control plane(istiod)","url":"/docs/skywalking-swck/v0.9.0/examples/istio-controlplane/"},{"content":"Forwarder/envoy-als-v2-grpc-forwarder Description This is a synchronization ALS v2 grpc forwarder with the Envoy ALS protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Forwarder/envoy-als-v2-grpc-forwarder","url":"/docs/skywalking-satellite/latest/en/setup/plugins/forwarder_envoy-als-v2-grpc-forwarder/"},{"content":"Forwarder/envoy-als-v2-grpc-forwarder Description This is a synchronization ALS v2 grpc forwarder with the Envoy ALS protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Forwarder/envoy-als-v2-grpc-forwarder","url":"/docs/skywalking-satellite/next/en/setup/plugins/forwarder_envoy-als-v2-grpc-forwarder/"},{"content":"Forwarder/envoy-als-v2-grpc-forwarder Description This is a synchronization ALS v2 grpc forwarder with the Envoy ALS protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Forwarder/envoy-als-v2-grpc-forwarder","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/forwarder_envoy-als-v2-grpc-forwarder/"},{"content":"Forwarder/envoy-als-v3-grpc-forwarder Description This is a synchronization ALS v3 grpc forwarder with the Envoy ALS protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Forwarder/envoy-als-v3-grpc-forwarder","url":"/docs/skywalking-satellite/latest/en/setup/plugins/forwarder_envoy-als-v3-grpc-forwarder/"},{"content":"Forwarder/envoy-als-v3-grpc-forwarder Description This is a synchronization ALS v3 grpc forwarder with the Envoy ALS protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Forwarder/envoy-als-v3-grpc-forwarder","url":"/docs/skywalking-satellite/next/en/setup/plugins/forwarder_envoy-als-v3-grpc-forwarder/"},{"content":"Forwarder/envoy-als-v3-grpc-forwarder Description This is a synchronization ALS v3 grpc forwarder with the Envoy ALS protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Forwarder/envoy-als-v3-grpc-forwarder","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/forwarder_envoy-als-v3-grpc-forwarder/"},{"content":"Forwarder/envoy-metrics-v2-grpc-forwarder Description This is a synchronization Metrics v2 grpc forwarder with the Envoy metrics protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Forwarder/envoy-metrics-v2-grpc-forwarder","url":"/docs/skywalking-satellite/latest/en/setup/plugins/forwarder_envoy-metrics-v2-grpc-forwarder/"},{"content":"Forwarder/envoy-metrics-v2-grpc-forwarder Description This is a synchronization Metrics v2 grpc forwarder with the Envoy metrics protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Forwarder/envoy-metrics-v2-grpc-forwarder","url":"/docs/skywalking-satellite/next/en/setup/plugins/forwarder_envoy-metrics-v2-grpc-forwarder/"},{"content":"Forwarder/envoy-metrics-v2-grpc-forwarder Description This is a synchronization Metrics v2 grpc forwarder with the Envoy metrics protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Forwarder/envoy-metrics-v2-grpc-forwarder","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/forwarder_envoy-metrics-v2-grpc-forwarder/"},{"content":"Forwarder/envoy-metrics-v3-grpc-forwarder Description This is a synchronization Metrics v3 grpc forwarder with the Envoy metrics protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Forwarder/envoy-metrics-v3-grpc-forwarder","url":"/docs/skywalking-satellite/latest/en/setup/plugins/forwarder_envoy-metrics-v3-grpc-forwarder/"},{"content":"Forwarder/envoy-metrics-v3-grpc-forwarder Description This is a synchronization Metrics v3 grpc forwarder with the Envoy metrics protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Forwarder/envoy-metrics-v3-grpc-forwarder","url":"/docs/skywalking-satellite/next/en/setup/plugins/forwarder_envoy-metrics-v3-grpc-forwarder/"},{"content":"Forwarder/envoy-metrics-v3-grpc-forwarder Description This is a synchronization Metrics v3 grpc forwarder with the Envoy metrics protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Forwarder/envoy-metrics-v3-grpc-forwarder","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/forwarder_envoy-metrics-v3-grpc-forwarder/"},{"content":"Forwarder/native-cds-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native Configuration Discovery Service protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Forwarder/native-cds-grpc-forwarder","url":"/docs/skywalking-satellite/latest/en/setup/plugins/forwarder_native-cds-grpc-forwarder/"},{"content":"Forwarder/native-cds-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native Configuration Discovery Service protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Forwarder/native-cds-grpc-forwarder","url":"/docs/skywalking-satellite/next/en/setup/plugins/forwarder_native-cds-grpc-forwarder/"},{"content":"Forwarder/native-cds-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native Configuration Discovery Service protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Forwarder/native-cds-grpc-forwarder","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/forwarder_native-cds-grpc-forwarder/"},{"content":"Forwarder/native-clr-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native Configuration Discovery Service protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Forwarder/native-clr-grpc-forwarder","url":"/docs/skywalking-satellite/latest/en/setup/plugins/forwarder_native-clr-grpc-forwarder/"},{"content":"Forwarder/native-clr-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native Configuration Discovery Service protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Forwarder/native-clr-grpc-forwarder","url":"/docs/skywalking-satellite/next/en/setup/plugins/forwarder_native-clr-grpc-forwarder/"},{"content":"Forwarder/native-clr-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native Configuration Discovery Service protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Forwarder/native-clr-grpc-forwarder","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/forwarder_native-clr-grpc-forwarder/"},{"content":"Forwarder/native-ebpf-accesslog-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native eBPF access log protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Forwarder/native-ebpf-accesslog-grpc-forwarder","url":"/docs/skywalking-satellite/next/en/setup/plugins/forwarder_native-ebpf-accesslog-grpc-forwarder/"},{"content":"Forwarder/native-ebpf-profiling-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native process protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Forwarder/native-ebpf-profiling-grpc-forwarder","url":"/docs/skywalking-satellite/latest/en/setup/plugins/forwarder_native-ebpf-profiling-grpc-forwarder/"},{"content":"Forwarder/native-ebpf-profiling-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native process protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Forwarder/native-ebpf-profiling-grpc-forwarder","url":"/docs/skywalking-satellite/next/en/setup/plugins/forwarder_native-ebpf-profiling-grpc-forwarder/"},{"content":"Forwarder/native-ebpf-profiling-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native process protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Forwarder/native-ebpf-profiling-grpc-forwarder","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/forwarder_native-ebpf-profiling-grpc-forwarder/"},{"content":"Forwarder/native-event-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native event protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Forwarder/native-event-grpc-forwarder","url":"/docs/skywalking-satellite/latest/en/setup/plugins/forwarder_native-event-grpc-forwarder/"},{"content":"Forwarder/native-event-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native event protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Forwarder/native-event-grpc-forwarder","url":"/docs/skywalking-satellite/next/en/setup/plugins/forwarder_native-event-grpc-forwarder/"},{"content":"Forwarder/native-event-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native event protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Forwarder/native-event-grpc-forwarder","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/forwarder_native-event-grpc-forwarder/"},{"content":"Forwarder/native-jvm-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native Configuration Discovery Service protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Forwarder/native-jvm-grpc-forwarder","url":"/docs/skywalking-satellite/latest/en/setup/plugins/forwarder_native-jvm-grpc-forwarder/"},{"content":"Forwarder/native-jvm-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native Configuration Discovery Service protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Forwarder/native-jvm-grpc-forwarder","url":"/docs/skywalking-satellite/next/en/setup/plugins/forwarder_native-jvm-grpc-forwarder/"},{"content":"Forwarder/native-jvm-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native Configuration Discovery Service protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Forwarder/native-jvm-grpc-forwarder","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/forwarder_native-jvm-grpc-forwarder/"},{"content":"Forwarder/native-log-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native log protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Forwarder/native-log-grpc-forwarder","url":"/docs/skywalking-satellite/latest/en/setup/plugins/forwarder_native-log-grpc-forwarder/"},{"content":"Forwarder/native-log-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native log protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Forwarder/native-log-grpc-forwarder","url":"/docs/skywalking-satellite/next/en/setup/plugins/forwarder_native-log-grpc-forwarder/"},{"content":"Forwarder/native-log-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native log protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Forwarder/native-log-grpc-forwarder","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/forwarder_native-log-grpc-forwarder/"},{"content":"Forwarder/native-log-kafka-forwarder Description This is a synchronization Kafka forwarder with the SkyWalking native log protocol.\nDefaultConfig # The remote topic. topic:\u0026#34;log-topic\u0026#34;Configuration    Name Type Description     topic string The forwarder topic.    ","title":"Forwarder/native-log-kafka-forwarder","url":"/docs/skywalking-satellite/latest/en/setup/plugins/forwarder_native-log-kafka-forwarder/"},{"content":"Forwarder/native-log-kafka-forwarder Description This is a synchronization Kafka forwarder with the SkyWalking native log protocol.\nDefaultConfig # The remote topic. topic:\u0026#34;log-topic\u0026#34;Configuration    Name Type Description     topic string The forwarder topic.    ","title":"Forwarder/native-log-kafka-forwarder","url":"/docs/skywalking-satellite/next/en/setup/plugins/forwarder_native-log-kafka-forwarder/"},{"content":"Forwarder/native-log-kafka-forwarder Description This is a synchronization Kafka forwarder with the SkyWalking native log protocol.\nDefaultConfig # The remote topic. topic:\u0026#34;log-topic\u0026#34;Configuration    Name Type Description     topic string The forwarder topic.    ","title":"Forwarder/native-log-kafka-forwarder","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/forwarder_native-log-kafka-forwarder/"},{"content":"Forwarder/native-management-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native management protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Forwarder/native-management-grpc-forwarder","url":"/docs/skywalking-satellite/latest/en/setup/plugins/forwarder_native-management-grpc-forwarder/"},{"content":"Forwarder/native-management-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native management protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Forwarder/native-management-grpc-forwarder","url":"/docs/skywalking-satellite/next/en/setup/plugins/forwarder_native-management-grpc-forwarder/"},{"content":"Forwarder/native-management-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native management protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Forwarder/native-management-grpc-forwarder","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/forwarder_native-management-grpc-forwarder/"},{"content":"Forwarder/native-meter-grpc-forwarder Description This is a synchronization meter grpc forwarder with the SkyWalking meter protocol.\nDefaultConfig # The LRU policy cache size for hosting routine rules of service instance.routing_rule_lru_cache_size:5000# The TTL of the LRU cache size for hosting routine rules of service instance.routing_rule_lru_cache_ttl:180Configuration    Name Type Description     routing_rule_lru_cache_size int The LRU policy cache size for hosting routine rules of service instance.   routing_rule_lru_cache_ttl int The TTL of the LRU cache size for hosting routine rules of service instance.    ","title":"Forwarder/native-meter-grpc-forwarder","url":"/docs/skywalking-satellite/latest/en/setup/plugins/forwarder_native-meter-grpc-forwarder/"},{"content":"Forwarder/native-meter-grpc-forwarder Description This is a synchronization meter grpc forwarder with the SkyWalking meter protocol.\nDefaultConfig # The LRU policy cache size for hosting routine rules of service instance.routing_rule_lru_cache_size:5000# The TTL of the LRU cache size for hosting routine rules of service instance.routing_rule_lru_cache_ttl:180Configuration    Name Type Description     routing_rule_lru_cache_size int The LRU policy cache size for hosting routine rules of service instance.   routing_rule_lru_cache_ttl int The TTL of the LRU cache size for hosting routine rules of service instance.    ","title":"Forwarder/native-meter-grpc-forwarder","url":"/docs/skywalking-satellite/next/en/setup/plugins/forwarder_native-meter-grpc-forwarder/"},{"content":"Forwarder/native-meter-grpc-forwarder Description This is a synchronization meter grpc forwarder with the SkyWalking meter protocol.\nDefaultConfig # The LRU policy cache size for hosting routine rules of service instance.routing_rule_lru_cache_size:5000# The TTL of the LRU cache size for hosting routine rules of service instance.routing_rule_lru_cache_ttl:180Configuration    Name Type Description     routing_rule_lru_cache_size int The LRU policy cache size for hosting routine rules of service instance.   routing_rule_lru_cache_ttl int The TTL of the LRU cache size for hosting routine rules of service instance.    ","title":"Forwarder/native-meter-grpc-forwarder","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/forwarder_native-meter-grpc-forwarder/"},{"content":"Forwarder/native-process-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native process protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Forwarder/native-process-grpc-forwarder","url":"/docs/skywalking-satellite/latest/en/setup/plugins/forwarder_native-process-grpc-forwarder/"},{"content":"Forwarder/native-process-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native process protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Forwarder/native-process-grpc-forwarder","url":"/docs/skywalking-satellite/next/en/setup/plugins/forwarder_native-process-grpc-forwarder/"},{"content":"Forwarder/native-process-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native process protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Forwarder/native-process-grpc-forwarder","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/forwarder_native-process-grpc-forwarder/"},{"content":"Forwarder/native-profile-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native log protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Forwarder/native-profile-grpc-forwarder","url":"/docs/skywalking-satellite/latest/en/setup/plugins/forwarder_native-profile-grpc-forwarder/"},{"content":"Forwarder/native-profile-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native log protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Forwarder/native-profile-grpc-forwarder","url":"/docs/skywalking-satellite/next/en/setup/plugins/forwarder_native-profile-grpc-forwarder/"},{"content":"Forwarder/native-profile-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native log protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Forwarder/native-profile-grpc-forwarder","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/forwarder_native-profile-grpc-forwarder/"},{"content":"Forwarder/native-tracing-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native tracing protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Forwarder/native-tracing-grpc-forwarder","url":"/docs/skywalking-satellite/latest/en/setup/plugins/forwarder_native-tracing-grpc-forwarder/"},{"content":"Forwarder/native-tracing-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native tracing protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Forwarder/native-tracing-grpc-forwarder","url":"/docs/skywalking-satellite/next/en/setup/plugins/forwarder_native-tracing-grpc-forwarder/"},{"content":"Forwarder/native-tracing-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native tracing protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Forwarder/native-tracing-grpc-forwarder","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/forwarder_native-tracing-grpc-forwarder/"},{"content":"Forwarder/otlp-metrics-v1-grpc-forwarder Description This is a synchronization grpc forwarder with the OpenTelemetry metrics v1 protocol.\nDefaultConfig # The LRU policy cache size for hosting routine rules of service instance.routing_rule_lru_cache_size:5000# The TTL of the LRU cache size for hosting routine rules of service instance.routing_rule_lru_cache_ttl:180# The label key of the routing data, multiple keys are split by \u0026#34;,\u0026#34;routing_label_keys:net.host.name,host.name,job,service.nameConfiguration    Name Type Description     routing_label_keys string The label key of the routing data, multiple keys are split by \u0026ldquo;,\u0026rdquo;   routing_rule_lru_cache_size int The LRU policy cache size for hosting routine rules of service instance.   routing_rule_lru_cache_ttl int The TTL of the LRU cache size for hosting routine rules of service instance.    ","title":"Forwarder/otlp-metrics-v1-grpc-forwarder","url":"/docs/skywalking-satellite/latest/en/setup/plugins/forwarder_otlp-metrics-v1-grpc-forwarder/"},{"content":"Forwarder/otlp-metrics-v1-grpc-forwarder Description This is a synchronization grpc forwarder with the OpenTelemetry metrics v1 protocol.\nDefaultConfig # The LRU policy cache size for hosting routine rules of service instance.routing_rule_lru_cache_size:5000# The TTL of the LRU cache size for hosting routine rules of service instance.routing_rule_lru_cache_ttl:180# The label key of the routing data, multiple keys are split by \u0026#34;,\u0026#34;routing_label_keys:net.host.name,host.name,job,service.nameConfiguration    Name Type Description     routing_label_keys string The label key of the routing data, multiple keys are split by \u0026ldquo;,\u0026rdquo;   routing_rule_lru_cache_size int The LRU policy cache size for hosting routine rules of service instance.   routing_rule_lru_cache_ttl int The TTL of the LRU cache size for hosting routine rules of service instance.    ","title":"Forwarder/otlp-metrics-v1-grpc-forwarder","url":"/docs/skywalking-satellite/next/en/setup/plugins/forwarder_otlp-metrics-v1-grpc-forwarder/"},{"content":"Forwarder/otlp-metrics-v1-grpc-forwarder Description This is a synchronization grpc forwarder with the OpenTelemetry metrics v1 protocol.\nDefaultConfig # The LRU policy cache size for hosting routine rules of service instance.routing_rule_lru_cache_size:5000# The TTL of the LRU cache size for hosting routine rules of service instance.routing_rule_lru_cache_ttl:180# The label key of the routing data, multiple keys are split by \u0026#34;,\u0026#34;routing_label_keys:net.host.name,host.name,job,service.nameConfiguration    Name Type Description     routing_label_keys string The label key of the routing data, multiple keys are split by \u0026ldquo;,\u0026rdquo;   routing_rule_lru_cache_size int The LRU policy cache size for hosting routine rules of service instance.   routing_rule_lru_cache_ttl int The TTL of the LRU cache size for hosting routine rules of service instance.    ","title":"Forwarder/otlp-metrics-v1-grpc-forwarder","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/forwarder_otlp-metrics-v1-grpc-forwarder/"},{"content":"Get Binaries This page shows how to get binaries of Banyand.\nPrebuilt Released binaries Get binaries from the download.\nBuild From Source Requirements Users who want to build a binary from sources have to set up:\n Go 1.20 Node 18.16 Git \u0026gt;= 2.30 Linux, macOS or Windows+WSL2 GNU make  Windows BanyanDB is built on Linux and macOS that introduced several platform-specific characters to the building system. Therefore, we highly recommend you use WSL2+Ubuntu to execute tasks of the Makefile.\nBuild Binaries To issue the below command to get basic binaries of banyand and bydbctl.\n$ make generate ... $ make build ... --- banyand: all --- make[1]: Entering directory \u0026#39;\u0026lt;path_to_project_root\u0026gt;/banyand\u0026#39; ... chmod +x build/bin/banyand-server Done building banyand server make[1]: Leaving directory \u0026#39;\u0026lt;path_to_project_root\u0026gt;/banyand\u0026#39; ... --- bydbctl: all --- make[1]: Entering directory \u0026#39;\u0026lt;path_to_project_root\u0026gt;/bydbctl\u0026#39; ... chmod +x build/bin/bydbctl Done building bydbctl make[1]: Leaving directory \u0026#39;\u0026lt;path_to_project_root\u0026gt;/bydbctl\u0026#39; The build system provides a series of binary options as well.\n make -C banyand banyand-server generates a basic banyand-server. make -C banyand release builds out a static binary for releasing. make -C banyand debug gives a binary for debugging without the complier\u0026rsquo;s optimizations. make -C banyand debug-static is a static binary for debugging. make -C bydbctl release cross-builds several binaries for multi-platforms.  Then users get binaries as below\n$ ls banyand/build/bin banyand-server banyand-server-debug banyand-server-debug-static banyand-server-static $ ls banyand/build/bin bydbctl ","title":"Get Binaries","url":"/docs/skywalking-banyandb/latest/installation/binaries/"},{"content":"Get Binaries This page shows how to get binaries of Banyand.\nPrebuilt Released binaries Get binaries from the download.\nBuild From Source Requirements Users who want to build a binary from sources have to set up:\n Go 1.22 Node 20.12 Git \u0026gt;= 2.30 Linux, macOS or Windows+WSL2 GNU make  Windows BanyanDB is built on Linux and macOS that introduced several platform-specific characters to the building system. Therefore, we highly recommend you use WSL2+Ubuntu to execute tasks of the Makefile.\nBuild Binaries To issue the below command to get basic binaries of banyand and bydbctl.\n$ make generate ... $ make build ... --- banyand: all --- make[1]: Entering directory \u0026#39;\u0026lt;path_to_project_root\u0026gt;/banyand\u0026#39; ... chmod +x build/bin/banyand-server Done building banyand server make[1]: Leaving directory \u0026#39;\u0026lt;path_to_project_root\u0026gt;/banyand\u0026#39; ... --- bydbctl: all --- make[1]: Entering directory \u0026#39;\u0026lt;path_to_project_root\u0026gt;/bydbctl\u0026#39; ... chmod +x build/bin/bydbctl Done building bydbctl make[1]: Leaving directory \u0026#39;\u0026lt;path_to_project_root\u0026gt;/bydbctl\u0026#39; The build system provides a series of binary options as well.\n make -C banyand banyand-server generates a basic banyand-server. make -C banyand release or make -C banyand static builds out a static binary banyand-server-static for releasing. make -C banyand debug gives a binary for debugging without the complier\u0026rsquo;s optimizations. make -C banyand debug-static is a static binary for debugging. make -C bydbctl release cross-builds several binaries for multi-platforms.  Then users get binaries as below\n$ ls banyand/build/bin banyand-server banyand-server-debug banyand-server-debug-static $ ls bydbctl/build/bin bydbctl bydbctl--darwin-amd64 bydbctl--darwin-arm64 bydbctl--linux-386 bydbctl--linux-amd64 bydbctl--linux-arm64 bydbctl--windows-386 bydbctl--windows-amd64 ","title":"Get Binaries","url":"/docs/skywalking-banyandb/next/installation/binaries/"},{"content":"Get Binaries This page shows how to get binaries of Banyand.\nPrebuilt Released binaries Get binaries from the download.\nBuild From Source Requirements Users who want to build a binary from sources have to set up:\n Go 1.20 Node 18.16 Git \u0026gt;= 2.30 Linux, macOS or Windows+WSL2 GNU make  Windows BanyanDB is built on Linux and macOS that introduced several platform-specific characters to the building system. Therefore, we highly recommend you use WSL2+Ubuntu to execute tasks of the Makefile.\nBuild Binaries To issue the below command to get basic binaries of banyand and bydbctl.\n$ make generate ... $ make build ... --- banyand: all --- make[1]: Entering directory \u0026#39;\u0026lt;path_to_project_root\u0026gt;/banyand\u0026#39; ... chmod +x build/bin/banyand-server Done building banyand server make[1]: Leaving directory \u0026#39;\u0026lt;path_to_project_root\u0026gt;/banyand\u0026#39; ... --- bydbctl: all --- make[1]: Entering directory \u0026#39;\u0026lt;path_to_project_root\u0026gt;/bydbctl\u0026#39; ... chmod +x build/bin/bydbctl Done building bydbctl make[1]: Leaving directory \u0026#39;\u0026lt;path_to_project_root\u0026gt;/bydbctl\u0026#39; The build system provides a series of binary options as well.\n make -C banyand banyand-server generates a basic banyand-server. make -C banyand release builds out a static binary for releasing. make -C banyand debug gives a binary for debugging without the complier\u0026rsquo;s optimizations. make -C banyand debug-static is a static binary for debugging. make -C bydbctl release cross-builds several binaries for multi-platforms.  Then users get binaries as below\n$ ls banyand/build/bin banyand-server banyand-server-debug banyand-server-debug-static banyand-server-static $ ls banyand/build/bin bydbctl ","title":"Get Binaries","url":"/docs/skywalking-banyandb/v0.5.0/installation/binaries/"},{"content":"Getting Started This document introduces how to create a kubernetes cluster locally using kind and how to deploy the basic skywalking components to the cluster.\nPrerequisites  docker \u0026gt;= v20.10.6 kubectl \u0026gt;= v1.21.0 kind \u0026gt;= v0.20.0 swctl \u0026gt;= v0.10.0  Step1: Create a kubernetes cluster locally using kind  Note: If you have a kubernetes cluster (\u0026gt; v1.21.10) already, you can skip this step.\n Here we create a kubernetes cluster with 1 control-plane node and 1 worker nodes.\n$ cat \u0026lt;\u0026lt;EOF | kind create cluster --config=- kind: Cluster apiVersion: kind.x-k8s.io/v1alpha4 nodes: - role: control-plane image: kindest/node:v1.21.10 - role: worker image: kindest/node:v1.21.10 EOF  Expected output Creating cluster \u0026#34;kind\u0026#34; ... ✓ Ensuring node image (kindest/node:v1.21.10) 🖼 ✓ Preparing nodes 📦 📦 ✓ Writing configuration 📜 ✓ Starting control-plane 🕹️ ✓ Installing CNI 🔌 ✓ Installing StorageClass 💾 ✓ Joining worker nodes 🚜 Set kubectl context to \u0026#34;kind-kind\u0026#34; You can now use your cluster with: kubectl cluster-info --context kind-kind Not sure what to do next? 😅 Check out https://kind.sigs.k8s.io/docs/user/quick-start/  Check all pods in the cluster.\n$ kubectl get pods -A  Expected output NAMESPACE NAME READY STATUS RESTARTS AGE kube-system coredns-558bd4d5db-h5gxt 1/1 Running 0 106s kube-system coredns-558bd4d5db-lhnvz 1/1 Running 0 106s kube-system etcd-kind-control-plane 1/1 Running 0 116s kube-system kindnet-fxlkm 1/1 Running 0 106s kube-system kindnet-vmcvl 1/1 Running 0 91s kube-system kube-apiserver-kind-control-plane 1/1 Running 0 116s kube-system kube-controller-manager-kind-control-plane 1/1 Running 0 116s kube-system kube-proxy-nr4f4 1/1 Running 0 91s kube-system kube-proxy-zl4h2 1/1 Running 0 106s kube-system kube-scheduler-kind-control-plane 1/1 Running 0 116s local-path-storage local-path-provisioner-74567d47b4-kmtjh 1/1 Running 0 106s  Step2: Build the operator image Check into the root directory of SWCK and build the operator image as follows.\n$ cd operator # Build the operator image $ make docker-build You will get the operator image controller:latest as follows.\n$ docker images REPOSITORY TAG IMAGE ID CREATED SIZE controller latest 84da7509092a 22 seconds ago 53.6MB Load the operator image into the kind cluster or push the image to a registry that your kubernetes cluster can access.\n$ kind load docker-image controller or\n$ docker push $(YOUR_REGISTRY)/controller Step3: Deploy operator on the kubernetes cluster Install the CRDs as follows.\n$ make install Check the CRDs are installed successfully.\n Expected output kubectl get crd | grep skywalking banyandbs.operator.skywalking.apache.org 2023-11-05T03:30:43Z fetchers.operator.skywalking.apache.org 2023-11-05T03:30:43Z javaagents.operator.skywalking.apache.org 2023-11-05T03:30:43Z oapserverconfigs.operator.skywalking.apache.org 2023-11-05T03:30:43Z oapserverdynamicconfigs.operator.skywalking.apache.org 2023-11-05T03:30:43Z oapservers.operator.skywalking.apache.org 2023-11-05T03:30:43Z satellites.operator.skywalking.apache.org 2023-11-05T03:30:43Z storages.operator.skywalking.apache.org 2023-11-05T03:30:43Z swagents.operator.skywalking.apache.org 2023-11-05T03:30:43Z uis.operator.skywalking.apache.org 2023-11-05T03:30:43Z  Deploy the SWCK operator to the cluster.\n$ make deploy Or deploy the SWCK operator to the cluster with your own image.\n$ make deploy OPERATOR_IMG=$(YOUR_REGISTRY)/controller Get the status of the SWCK operator pod.\n$ kubectl get pod -n skywalking-swck-system NAME READY STATUS RESTARTS AGE skywalking-swck-controller-manager-5f5bbd4fd-9wdw6 2/2 Running 0 34s Step4: Deploy skywalking componentes on the kubernetes cluster Create the skywalking-system namespace.\n$ kubectl create namespace skywalking-system Deploy the skywalking components to the cluster.\n$ cat \u0026lt;\u0026lt;EOF | kubectl apply -f - apiVersion: operator.skywalking.apache.org/v1alpha1 kind: OAPServer metadata: name: skywalking-system namespace: skywalking-system spec: version: 9.5.0 instances: 1 image: apache/skywalking-oap-server:9.5.0 service: template: type: ClusterIP --- apiVersion: operator.skywalking.apache.org/v1alpha1 kind: UI metadata: name: skywalking-system namespace: skywalking-system spec: version: 9.5.0 instances: 1 image: apache/skywalking-ui:9.5.0 OAPServerAddress: http://skywalking-system-oap.skywalking-system:12800 service: template: type: ClusterIP ingress: host: demo.ui.skywalking EOF Check the status of the skywalking components.\n$ kubectl get pod -n skywalking-system NAME READY STATUS RESTARTS AGE skywalking-system-oap-68bd877f57-fhzdz 1/1 Running 0 6m23s skywalking-system-ui-6db8579b47-rphtl 1/1 Running 0 6m23s Step5: Use the java agent injector to inject the java agent into the application pod Label the namespace where the application pod is located with swck-injection=enabled.\n$ kubectl label namespace skywalking-system swck-injection=enabled Create the application pod.\n Note: The application pod must be labeled with swck-java-agent-injected=true and the agent.skywalking.apache.org/collector.backend_service annotation must be set to the address of the OAP server. For more configurations, please refer to the guide.\n $ cat \u0026lt;\u0026lt;EOF | kubectl apply -f - apiVersion: apps/v1 kind: Deployment metadata: name: demo namespace: skywalking-system spec: selector: matchLabels: app: demo template: metadata: labels: # enable the java agent injector swck-java-agent-injected: \u0026#34;true\u0026#34; app: demo annotations: agent.skywalking.apache.org/collector.backend_service: \u0026#34;skywalking-system-oap.skywalking-system:11800\u0026#34; spec: containers: - name: demo1 imagePullPolicy: IfNotPresent image: ghcr.io/apache/skywalking-swck-spring-demo:v0.0.1 command: [\u0026#34;java\u0026#34;] args: [\u0026#34;-jar\u0026#34;,\u0026#34;/app.jar\u0026#34;] ports: - containerPort: 8085 readinessProbe: httpGet: path: /hello port: 8085 initialDelaySeconds: 3 periodSeconds: 3 failureThreshold: 10 --- apiVersion: v1 kind: Service metadata: name: demo namespace: skywalking-system spec: type: ClusterIP ports: - name: 8085-tcp port: 8085 protocol: TCP targetPort: 8085 selector: app: demo EOF Check the status of the application pod and make sure the java agent is injected into the application pod.\n$ kubectl get pod -n skywalking-system -l app=demo -ojsonpath=\u0026#39;{.items[0].spec.initContainers[0]}\u0026#39;  Expected output {\u0026#34;args\u0026#34;:[\u0026#34;-c\u0026#34;,\u0026#34;mkdir -p /sky/agent \\u0026\\u0026 cp -r /skywalking/agent/* /sky/agent\u0026#34;],\u0026#34;command\u0026#34;:[\u0026#34;sh\u0026#34;],\u0026#34;image\u0026#34;:\u0026#34;apache/skywalking-java-agent:8.16.0-java8\u0026#34;,\u0026#34;imagePullPolicy\u0026#34;:\u0026#34;IfNotPresent\u0026#34;,\u0026#34;name\u0026#34;:\u0026#34;inject-skywalking-agent\u0026#34;,\u0026#34;resources\u0026#34;:{},\u0026#34;terminationMessagePath\u0026#34;:\u0026#34;/dev/termination-log\u0026#34;,\u0026#34;terminationMessagePolicy\u0026#34;:\u0026#34;File\u0026#34;,\u0026#34;volumeMounts\u0026#34;:[{\u0026#34;mountPath\u0026#34;:\u0026#34;/sky/agent\u0026#34;,\u0026#34;name\u0026#34;:\u0026#34;sky-agent\u0026#34;},{\u0026#34;mountPath\u0026#34;:\u0026#34;/var/run/secrets/kubernetes.io/serviceaccount\u0026#34;,\u0026#34;name\u0026#34;:\u0026#34;kube-api-access-4qk26\u0026#34;,\u0026#34;readOnly\u0026#34;:true}]}  Also, you could check the final java agent configurations with the following command.\n$ kubectl get javaagent -n skywalking-system -l app=demo -oyaml  Expected output apiVersion: v1 items: - apiVersion: operator.skywalking.apache.org/v1alpha1 kind: JavaAgent metadata: creationTimestamp: \u0026#34;2023-11-19T05:34:03Z\u0026#34; generation: 1 labels: app: demo name: app-demo-javaagent namespace: skywalking-system ownerReferences: - apiVersion: apps/v1 blockOwnerDeletion: true controller: true kind: ReplicaSet name: demo-75d8d995cc uid: 8cb64abc-9b50-4f67-9304-2e09de476168 resourceVersion: \u0026#34;21515\u0026#34; uid: 6cbafb3d-9f43-4448-95e8-bda1f7c72bc3 spec: agentConfiguration: collector.backend_service: skywalking-system-oap.skywalking-system:11800 optional-plugin: webflux|cloud-gateway-2.1.x backendService: skywalking-system-oap.skywalking-system:11800 podSelector: app=demo serviceName: Your_ApplicationName status: creationTime: \u0026#34;2023-11-19T05:34:03Z\u0026#34; expectedInjectiedNum: 1 lastUpdateTime: \u0026#34;2023-11-19T05:34:46Z\u0026#34; realInjectedNum: 1 kind: List metadata: resourceVersion: \u0026#34;\u0026#34; selfLink: \u0026#34;\u0026#34;  If you want to check the logs of the java agent, you can run the following command.\n$ kubectl logs -f -n skywalking-system -l app=demo -c inject-skywalking-agent Step6: Check the application metrics in the skywalking UI First, port-forward the demo service to your local machine.\n$ kubectl port-forward svc/demo 8085:8085 -n skywalking-system Then, trigger the application to generate some metrics.\n$ for i in {1..10}; do curl http://127.0.0.1:8085/hello \u0026amp;\u0026amp; echo \u0026#34;\u0026#34;; done After that, you can port-forward the skywalking UI to your local machine.\n$ kubectl port-forward svc/skywalking-system-ui 8080:80 -n skywalking-system Open the skywalking UI in your browser and navigate to http://127.0.0.1:8080 to check the application metrics.\n Expected output  Also, if you want to expose the external metrics to the kubernetes HPA, you can follow the guide to deploy the custom metrics adapter and you may get some inspiration from the e2e test.\n","title":"Getting Started","url":"/docs/skywalking-swck/next/getting-started/"},{"content":"Getting Started This document introduces how to create a kubernetes cluster locally using kind and how to deploy the basic skywalking components to the cluster.\nPrerequisites  docker \u0026gt;= v20.10.6 kubectl \u0026gt;= v1.21.0 kind \u0026gt;= v0.20.0 swctl \u0026gt;= v0.10.0  Step1: Create a kubernetes cluster locally using kind  Note: If you have a kubernetes cluster (\u0026gt; v1.21.10) already, you can skip this step.\n Here we create a kubernetes cluster with 1 control-plane node and 1 worker nodes.\n$ cat \u0026lt;\u0026lt;EOF | kind create cluster --config=- kind: Cluster apiVersion: kind.x-k8s.io/v1alpha4 nodes: - role: control-plane image: kindest/node:v1.21.10 - role: worker image: kindest/node:v1.21.10 EOF  Expected output Creating cluster \u0026#34;kind\u0026#34; ... ✓ Ensuring node image (kindest/node:v1.21.10) 🖼 ✓ Preparing nodes 📦 📦 ✓ Writing configuration 📜 ✓ Starting control-plane 🕹️ ✓ Installing CNI 🔌 ✓ Installing StorageClass 💾 ✓ Joining worker nodes 🚜 Set kubectl context to \u0026#34;kind-kind\u0026#34; You can now use your cluster with: kubectl cluster-info --context kind-kind Not sure what to do next? 😅 Check out https://kind.sigs.k8s.io/docs/user/quick-start/  Check all pods in the cluster.\n$ kubectl get pods -A  Expected output NAMESPACE NAME READY STATUS RESTARTS AGE kube-system coredns-558bd4d5db-h5gxt 1/1 Running 0 106s kube-system coredns-558bd4d5db-lhnvz 1/1 Running 0 106s kube-system etcd-kind-control-plane 1/1 Running 0 116s kube-system kindnet-fxlkm 1/1 Running 0 106s kube-system kindnet-vmcvl 1/1 Running 0 91s kube-system kube-apiserver-kind-control-plane 1/1 Running 0 116s kube-system kube-controller-manager-kind-control-plane 1/1 Running 0 116s kube-system kube-proxy-nr4f4 1/1 Running 0 91s kube-system kube-proxy-zl4h2 1/1 Running 0 106s kube-system kube-scheduler-kind-control-plane 1/1 Running 0 116s local-path-storage local-path-provisioner-74567d47b4-kmtjh 1/1 Running 0 106s  Step2: Build the operator image Check into the root directory of SWCK and build the operator image as follows.\n$ cd operator # Build the operator image $ make docker-build You will get the operator image controller:latest as follows.\n$ docker images REPOSITORY TAG IMAGE ID CREATED SIZE controller latest 84da7509092a 22 seconds ago 53.6MB Load the operator image into the kind cluster or push the image to a registry that your kubernetes cluster can access.\n$ kind load docker-image controller or\n$ docker push $(YOUR_REGISTRY)/controller Step3: Deploy operator on the kubernetes cluster Install the CRDs as follows.\n$ make install Check the CRDs are installed successfully.\n Expected output kubectl get crd | grep skywalking banyandbs.operator.skywalking.apache.org 2023-11-05T03:30:43Z fetchers.operator.skywalking.apache.org 2023-11-05T03:30:43Z javaagents.operator.skywalking.apache.org 2023-11-05T03:30:43Z oapserverconfigs.operator.skywalking.apache.org 2023-11-05T03:30:43Z oapserverdynamicconfigs.operator.skywalking.apache.org 2023-11-05T03:30:43Z oapservers.operator.skywalking.apache.org 2023-11-05T03:30:43Z satellites.operator.skywalking.apache.org 2023-11-05T03:30:43Z storages.operator.skywalking.apache.org 2023-11-05T03:30:43Z swagents.operator.skywalking.apache.org 2023-11-05T03:30:43Z uis.operator.skywalking.apache.org 2023-11-05T03:30:43Z  Deploy the SWCK operator to the cluster.\n$ make deploy Or deploy the SWCK operator to the cluster with your own image.\n$ make deploy OPERATOR_IMG=$(YOUR_REGISTRY)/controller Get the status of the SWCK operator pod.\n$ kubectl get pod -n skywalking-swck-system NAME READY STATUS RESTARTS AGE skywalking-swck-controller-manager-5f5bbd4fd-9wdw6 2/2 Running 0 34s Step4: Deploy skywalking componentes on the kubernetes cluster Create the skywalking-system namespace.\n$ kubectl create namespace skywalking-system Deploy the skywalking components to the cluster.\n$ cat \u0026lt;\u0026lt;EOF | kubectl apply -f - apiVersion: operator.skywalking.apache.org/v1alpha1 kind: OAPServer metadata: name: skywalking-system namespace: skywalking-system spec: version: 9.5.0 instances: 1 image: apache/skywalking-oap-server:9.5.0 service: template: type: ClusterIP --- apiVersion: operator.skywalking.apache.org/v1alpha1 kind: UI metadata: name: skywalking-system namespace: skywalking-system spec: version: 9.5.0 instances: 1 image: apache/skywalking-ui:9.5.0 OAPServerAddress: http://skywalking-system-oap.skywalking-system:12800 service: template: type: ClusterIP ingress: host: demo.ui.skywalking EOF Check the status of the skywalking components.\n$ kubectl get pod -n skywalking-system NAME READY STATUS RESTARTS AGE skywalking-system-oap-68bd877f57-fhzdz 1/1 Running 0 6m23s skywalking-system-ui-6db8579b47-rphtl 1/1 Running 0 6m23s Step5: Use the java agent injector to inject the java agent into the application pod Label the namespace where the application pod is located with swck-injection=enabled.\n$ kubectl label namespace skywalking-system swck-injection=enabled Create the application pod.\n Note: The application pod must be labeled with swck-java-agent-injected=true and the agent.skywalking.apache.org/collector.backend_service annotation must be set to the address of the OAP server. For more configurations, please refer to the guide.\n $ cat \u0026lt;\u0026lt;EOF | kubectl apply -f - apiVersion: apps/v1 kind: Deployment metadata: name: demo namespace: skywalking-system spec: selector: matchLabels: app: demo template: metadata: labels: # enable the java agent injector swck-java-agent-injected: \u0026#34;true\u0026#34; app: demo annotations: agent.skywalking.apache.org/collector.backend_service: \u0026#34;skywalking-system-oap.skywalking-system:11800\u0026#34; spec: containers: - name: demo1 imagePullPolicy: IfNotPresent image: ghcr.io/apache/skywalking-swck-spring-demo:v0.0.1 command: [\u0026#34;java\u0026#34;] args: [\u0026#34;-jar\u0026#34;,\u0026#34;/app.jar\u0026#34;] ports: - containerPort: 8085 readinessProbe: httpGet: path: /hello port: 8085 initialDelaySeconds: 3 periodSeconds: 3 failureThreshold: 10 --- apiVersion: v1 kind: Service metadata: name: demo namespace: skywalking-system spec: type: ClusterIP ports: - name: 8085-tcp port: 8085 protocol: TCP targetPort: 8085 selector: app: demo EOF Check the status of the application pod and make sure the java agent is injected into the application pod.\n$ kubectl get pod -n skywalking-system -l app=demo -ojsonpath=\u0026#39;{.items[0].spec.initContainers[0]}\u0026#39;  Expected output {\u0026#34;args\u0026#34;:[\u0026#34;-c\u0026#34;,\u0026#34;mkdir -p /sky/agent \\u0026\\u0026 cp -r /skywalking/agent/* /sky/agent\u0026#34;],\u0026#34;command\u0026#34;:[\u0026#34;sh\u0026#34;],\u0026#34;image\u0026#34;:\u0026#34;apache/skywalking-java-agent:8.16.0-java8\u0026#34;,\u0026#34;imagePullPolicy\u0026#34;:\u0026#34;IfNotPresent\u0026#34;,\u0026#34;name\u0026#34;:\u0026#34;inject-skywalking-agent\u0026#34;,\u0026#34;resources\u0026#34;:{},\u0026#34;terminationMessagePath\u0026#34;:\u0026#34;/dev/termination-log\u0026#34;,\u0026#34;terminationMessagePolicy\u0026#34;:\u0026#34;File\u0026#34;,\u0026#34;volumeMounts\u0026#34;:[{\u0026#34;mountPath\u0026#34;:\u0026#34;/sky/agent\u0026#34;,\u0026#34;name\u0026#34;:\u0026#34;sky-agent\u0026#34;},{\u0026#34;mountPath\u0026#34;:\u0026#34;/var/run/secrets/kubernetes.io/serviceaccount\u0026#34;,\u0026#34;name\u0026#34;:\u0026#34;kube-api-access-4qk26\u0026#34;,\u0026#34;readOnly\u0026#34;:true}]}  Also, you could check the final java agent configurations with the following command.\n$ kubectl get javaagent -n skywalking-system -l app=demo -oyaml  Expected output apiVersion: v1 items: - apiVersion: operator.skywalking.apache.org/v1alpha1 kind: JavaAgent metadata: creationTimestamp: \u0026#34;2023-11-19T05:34:03Z\u0026#34; generation: 1 labels: app: demo name: app-demo-javaagent namespace: skywalking-system ownerReferences: - apiVersion: apps/v1 blockOwnerDeletion: true controller: true kind: ReplicaSet name: demo-75d8d995cc uid: 8cb64abc-9b50-4f67-9304-2e09de476168 resourceVersion: \u0026#34;21515\u0026#34; uid: 6cbafb3d-9f43-4448-95e8-bda1f7c72bc3 spec: agentConfiguration: collector.backend_service: skywalking-system-oap.skywalking-system:11800 optional-plugin: webflux|cloud-gateway-2.1.x backendService: skywalking-system-oap.skywalking-system:11800 podSelector: app=demo serviceName: Your_ApplicationName status: creationTime: \u0026#34;2023-11-19T05:34:03Z\u0026#34; expectedInjectiedNum: 1 lastUpdateTime: \u0026#34;2023-11-19T05:34:46Z\u0026#34; realInjectedNum: 1 kind: List metadata: resourceVersion: \u0026#34;\u0026#34; selfLink: \u0026#34;\u0026#34;  If you want to check the logs of the java agent, you can run the following command.\n$ kubectl logs -f -n skywalking-system -l app=demo -c inject-skywalking-agent Step6: Check the application metrics in the skywalking UI First, port-forward the demo service to your local machine.\n$ kubectl port-forward svc/demo 8085:8085 -n skywalking-system Then, trigger the application to generate some metrics.\n$ for i in {1..10}; do curl http://127.0.0.1:8085/hello \u0026amp;\u0026amp; echo \u0026#34;\u0026#34;; done After that, you can port-forward the skywalking UI to your local machine.\n$ kubectl port-forward svc/skywalking-system-ui 8080:80 -n skywalking-system Open the skywalking UI in your browser and navigate to http://127.0.0.1:8080 to check the application metrics.\n Expected output  Also, if you want to expose the external metrics to the kubernetes HPA, you can follow the guide to deploy the custom metrics adapter and you may get some inspiration from the e2e test.\n","title":"Getting Started","url":"/docs/skywalking-swck/v0.9.0/getting-started/"},{"content":"Group Parameterized Endpoints In most cases, endpoints are detected automatically through language agents, service mesh observability solutions, or meter system configurations.\nThere are some special cases, especially when REST-style URI is used, where the application codes include the parameter in the endpoint name, such as putting order ID in the URI. Examples are /prod/ORDER123 and /prod/ORDER456. But logically, most would expect to have an endpoint name like prod/{order-id}. This is a specially designed feature in parameterized endpoint grouping.\nIf the incoming endpoint name accords with the rules, SkyWalking will group the endpoint by rules.\nThere are two approaches in which SkyWalking supports endpoint grouping:\n Endpoint name grouping by OpenAPI definitions. Endpoint name grouping by custom configurations.  Both grouping approaches can work together in sequence.\nEndpoint name grouping by OpenAPI definitions The OpenAPI definitions are documents based on the OpenAPI Specification (OAS), which is used to define a standard, language-agnostic interface for HTTP APIs.\nSkyWalking now supports OAS v2.0+. It could parse the documents (yaml) and build grouping rules from them automatically.\nHow to use   Add Specification Extensions for SkyWalking config in the OpenAPI definition documents; otherwise, all configs are default:\n${METHOD} is a reserved placeholder which represents the HTTP method, e.g. POST/GET... . ${PATH} is a reserved placeholder which represents the path, e.g. /products/{id}.\n   Extension Name Required Description Default Value     x-sw-service-name false The service name to which these endpoints belong. The directory name to which the OpenAPI definition documents belong.   x-sw-endpoint-name-match-rule false The rule used to match the endpoint. ${METHOD}:${PATH}   x-sw-endpoint-name-format false The endpoint name after grouping. ${METHOD}:${PATH}    These extensions are under OpenAPI Object. For example, the document below has a full custom config:\n  openapi:3.0.0x-sw-service-name:serviceBx-sw-endpoint-name-match-rule:\u0026#34;${METHOD}:${PATH}\u0026#34;x-sw-endpoint-name-format:\u0026#34;${METHOD}:${PATH}\u0026#34;info:description:OpenAPI definition for SkyWalking test.version:v2title:Product API...We highly recommend using the default config. The custom config (x-sw-endpoint-name-match-rule/x-sw-endpoint-name-format) is considered part of the match rules (regex pattern). We have provided some use cases in org.apache.skywalking.oap.server.core.config.group.openapi.EndpointGroupingRuleReader4OpenapiTest. You may validate your custom config as well.\nAll OpenAPI definition documents are located in the openapi-definitions directory, with directories having at most two levels. We recommend using the service name as the subDirectory name, as you will then not be required to set x-sw-service-name. For example:  ├── openapi-definitions │ ├── serviceA │ │ ├── customerAPI-v1.yaml │ │ └── productAPI-v1.yaml │ └── serviceB │ └── productAPI-v2.yaml The feature is enabled by default. You can disable it by setting the Core Module configuration ${SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI:false}.  Rules match priority We recommend designing the API path as clearly as possible. If the API path is fuzzy and an endpoint name matches multiple paths, SkyWalking would select a path according to the match priority set out below:\n The exact path is matched. E.g. /products or /products/inventory The path with fewer variables. E.g. In the case of /products/{var1}/{var2} and /products/{var1}/abc, endpoint name /products/123/abc will match the second one. If the paths have the same number of variables, the longest path is matched, and the vars are considered to be 1. E.g. In the case of /products/abc/{var1} and products/{var12345}/ef, endpoint name /products/abc/ef will match the first one, because length(\u0026quot;abc\u0026quot;) = 3 is larger than length(\u0026quot;ef\u0026quot;) = 2.  Examples If we have an OpenAPI definition doc productAPI-v2.yaml in directory serviceB, it will look like this:\nopenapi:3.0.0info:description:OpenAPI definition for SkyWalking test.version:v2title:Product APItags:- name:productdescription:product- name:relatedProductsdescription:Related Productspaths:/products:get:tags:- productsummary:Get all products listdescription:Get all products list.operationId:getProductsresponses:\u0026#34;200\u0026#34;:description:Successcontent:application/json:schema:type:arrayitems:$ref:\u0026#34;#/components/schemas/Product\u0026#34;/products/{region}/{country}:get:tags:- productsummary:Get products regionaldescription:Get products regional with the given id.operationId:getProductRegionalparameters:- name:regionin:pathdescription:Products regionrequired:trueschema:type:string- name:countryin:pathdescription:Products countryrequired:trueschema:type:stringresponses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/Product\u0026#34;\u0026#34;400\u0026#34;:description:Invalid parameters supplied/products/{id}:get:tags:- productsummary:Get product detailsdescription:Get product details with the given id.operationId:getProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/ProductDetails\u0026#34;\u0026#34;400\u0026#34;:description:Invalid product idpost:tags:- productsummary:Update product detailsdescription:Update product details with the given id.operationId:updateProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64- name:namein:querydescription:Product namerequired:trueschema:type:stringresponses:\u0026#34;200\u0026#34;:description:successful operationdelete:tags:- productsummary:Delete product detailsdescription:Delete product details with the given id.operationId:deleteProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operation/products/{id}/relatedProducts:get:tags:- relatedProductssummary:Get related productsdescription:Get related products with the given product id.operationId:getRelatedProductsparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/RelatedProducts\u0026#34;\u0026#34;400\u0026#34;:description:Invalid product idcomponents:schemas:Product:type:objectdescription:Product id and nameproperties:id:type:integerformat:int64description:Product idname:type:stringdescription:Product namerequired:- id- nameProductDetails:type:objectdescription:Product detailsproperties:id:type:integerformat:int64description:Product idname:type:stringdescription:Product namedescription:type:stringdescription:Product descriptionrequired:- id- nameRelatedProducts:type:objectdescription:Related Productsproperties:id:type:integerformat:int32description:Product idrelatedProducts:type:arraydescription:List of related productsitems:$ref:\u0026#34;#/components/schemas/Product\u0026#34;Here are some use cases:\n   Incoming Endpoint Incoming Service x-sw-service-name x-sw-endpoint-name-match-rule x-sw-endpoint-name-format Matched Grouping Result     GET:/products serviceB default default default true GET:/products   GET:/products/asia/cn serviceB default default default true GET:/products/{region}/{country}   GET:/products/123 serviceB default default default true GET:/products{id}   GET:/products/123/abc/efg serviceB default default default false GET:/products/123/abc/efg   \u0026lt;GET\u0026gt;:/products/123 serviceB default default default false \u0026lt;GET\u0026gt;:/products/123   GET:/products/123 serviceC default default default false GET:/products/123   GET:/products/123 serviceC serviceC default default true GET:/products/123   \u0026lt;GET\u0026gt;:/products/123 serviceB default \u0026lt;${METHOD}\u0026gt;:${PATH} \u0026lt;${METHOD}\u0026gt;:${PATH} true \u0026lt;GET\u0026gt;:/products/{id}   GET:/products/123 serviceB default default ${PATH}:\u0026lt;${METHOD}\u0026gt; true /products/{id}:\u0026lt;GET\u0026gt;   /products/123:\u0026lt;GET\u0026gt; serviceB default ${PATH}:\u0026lt;${METHOD}\u0026gt; default true GET:/products/{id}    Initialize and update the OpenAPI definitions dynamically Use Dynamic Configuration to initialize and update OpenAPI definitions, the endpoint grouping rules from OpenAPI will re-create by the new config.\nEndpoint name grouping by custom configuration Currently, a user could set up grouping rules through the static YAML file named endpoint-name-grouping.yml, or use Dynamic Configuration to initialize and update endpoint grouping rules.\nConfiguration Format Both the static local file and dynamic configuration value share the same YAML format.\ngrouping:# Endpoint of the service would follow the following rules- service-name:serviceArules:# {var} represents any variable string in the URI.- /prod/{var}","title":"Group Parameterized Endpoints","url":"/docs/main/latest/en/setup/backend/endpoint-grouping-rules/"},{"content":"Group Parameterized Endpoints In most cases, endpoints are detected automatically through language agents, service mesh observability solutions, or meter system configurations.\nThere are some special cases, especially when REST-style URI is used, where the application codes include the parameter in the endpoint name, such as putting order ID in the URI. Examples are /prod/ORDER123 and /prod/ORDER456. But logically, most would expect to have an endpoint name like prod/{order-id}. This is a specially designed feature in parameterized endpoint grouping.\nIf the incoming endpoint name accords with the rules, SkyWalking will group the endpoint by rules.\nThere are two approaches in which SkyWalking supports endpoint grouping:\n Endpoint name grouping by OpenAPI definitions. Endpoint name grouping by custom configurations.  Both grouping approaches can work together in sequence.\nEndpoint name grouping by OpenAPI definitions The OpenAPI definitions are documents based on the OpenAPI Specification (OAS), which is used to define a standard, language-agnostic interface for HTTP APIs.\nSkyWalking now supports OAS v2.0+. It could parse the documents (yaml) and build grouping rules from them automatically.\nHow to use   Add Specification Extensions for SkyWalking config in the OpenAPI definition documents; otherwise, all configs are default:\n${METHOD} is a reserved placeholder which represents the HTTP method, e.g. POST/GET... . ${PATH} is a reserved placeholder which represents the path, e.g. /products/{id}.\n   Extension Name Required Description Default Value     x-sw-service-name false The service name to which these endpoints belong. The directory name to which the OpenAPI definition documents belong.   x-sw-endpoint-name-match-rule false The rule used to match the endpoint. ${METHOD}:${PATH}   x-sw-endpoint-name-format false The endpoint name after grouping. ${METHOD}:${PATH}    These extensions are under OpenAPI Object. For example, the document below has a full custom config:\n  openapi:3.0.0x-sw-service-name:serviceBx-sw-endpoint-name-match-rule:\u0026#34;${METHOD}:${PATH}\u0026#34;x-sw-endpoint-name-format:\u0026#34;${METHOD}:${PATH}\u0026#34;info:description:OpenAPI definition for SkyWalking test.version:v2title:Product API...We highly recommend using the default config. The custom config (x-sw-endpoint-name-match-rule/x-sw-endpoint-name-format) is considered part of the match rules (regex pattern). We have provided some use cases in org.apache.skywalking.oap.server.core.config.group.openapi.EndpointGroupingRuleReader4OpenapiTest. You may validate your custom config as well.\nAll OpenAPI definition documents are located in the openapi-definitions directory, with directories having at most two levels. We recommend using the service name as the subDirectory name, as you will then not be required to set x-sw-service-name. For example:  ├── openapi-definitions │ ├── serviceA │ │ ├── customerAPI-v1.yaml │ │ └── productAPI-v1.yaml │ └── serviceB │ └── productAPI-v2.yaml The feature is enabled by default. You can disable it by setting the Core Module configuration ${SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI:false}.  Rules match priority We recommend designing the API path as clearly as possible. If the API path is fuzzy and an endpoint name matches multiple paths, SkyWalking would select a path according to the match priority set out below:\n The exact path is matched. E.g. /products or /products/inventory The path with fewer variables. E.g. In the case of /products/{var1}/{var2} and /products/{var1}/abc, endpoint name /products/123/abc will match the second one. If the paths have the same number of variables, the longest path is matched, and the vars are considered to be 1. E.g. In the case of /products/abc/{var1} and products/{var12345}/ef, endpoint name /products/abc/ef will match the first one, because length(\u0026quot;abc\u0026quot;) = 3 is larger than length(\u0026quot;ef\u0026quot;) = 2.  Examples If we have an OpenAPI definition doc productAPI-v2.yaml in directory serviceB, it will look like this:\nopenapi:3.0.0info:description:OpenAPI definition for SkyWalking test.version:v2title:Product APItags:- name:productdescription:product- name:relatedProductsdescription:Related Productspaths:/products:get:tags:- productsummary:Get all products listdescription:Get all products list.operationId:getProductsresponses:\u0026#34;200\u0026#34;:description:Successcontent:application/json:schema:type:arrayitems:$ref:\u0026#34;#/components/schemas/Product\u0026#34;/products/{region}/{country}:get:tags:- productsummary:Get products regionaldescription:Get products regional with the given id.operationId:getProductRegionalparameters:- name:regionin:pathdescription:Products regionrequired:trueschema:type:string- name:countryin:pathdescription:Products countryrequired:trueschema:type:stringresponses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/Product\u0026#34;\u0026#34;400\u0026#34;:description:Invalid parameters supplied/products/{id}:get:tags:- productsummary:Get product detailsdescription:Get product details with the given id.operationId:getProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/ProductDetails\u0026#34;\u0026#34;400\u0026#34;:description:Invalid product idpost:tags:- productsummary:Update product detailsdescription:Update product details with the given id.operationId:updateProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64- name:namein:querydescription:Product namerequired:trueschema:type:stringresponses:\u0026#34;200\u0026#34;:description:successful operationdelete:tags:- productsummary:Delete product detailsdescription:Delete product details with the given id.operationId:deleteProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operation/products/{id}/relatedProducts:get:tags:- relatedProductssummary:Get related productsdescription:Get related products with the given product id.operationId:getRelatedProductsparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/RelatedProducts\u0026#34;\u0026#34;400\u0026#34;:description:Invalid product idcomponents:schemas:Product:type:objectdescription:Product id and nameproperties:id:type:integerformat:int64description:Product idname:type:stringdescription:Product namerequired:- id- nameProductDetails:type:objectdescription:Product detailsproperties:id:type:integerformat:int64description:Product idname:type:stringdescription:Product namedescription:type:stringdescription:Product descriptionrequired:- id- nameRelatedProducts:type:objectdescription:Related Productsproperties:id:type:integerformat:int32description:Product idrelatedProducts:type:arraydescription:List of related productsitems:$ref:\u0026#34;#/components/schemas/Product\u0026#34;Here are some use cases:\n   Incoming Endpoint Incoming Service x-sw-service-name x-sw-endpoint-name-match-rule x-sw-endpoint-name-format Matched Grouping Result     GET:/products serviceB default default default true GET:/products   GET:/products/asia/cn serviceB default default default true GET:/products/{region}/{country}   GET:/products/123 serviceB default default default true GET:/products{id}   GET:/products/123/abc/efg serviceB default default default false GET:/products/123/abc/efg   \u0026lt;GET\u0026gt;:/products/123 serviceB default default default false \u0026lt;GET\u0026gt;:/products/123   GET:/products/123 serviceC default default default false GET:/products/123   GET:/products/123 serviceC serviceC default default true GET:/products/123   \u0026lt;GET\u0026gt;:/products/123 serviceB default \u0026lt;${METHOD}\u0026gt;:${PATH} \u0026lt;${METHOD}\u0026gt;:${PATH} true \u0026lt;GET\u0026gt;:/products/{id}   GET:/products/123 serviceB default default ${PATH}:\u0026lt;${METHOD}\u0026gt; true /products/{id}:\u0026lt;GET\u0026gt;   /products/123:\u0026lt;GET\u0026gt; serviceB default ${PATH}:\u0026lt;${METHOD}\u0026gt; default true GET:/products/{id}    Initialize and update the OpenAPI definitions dynamically Use Dynamic Configuration to initialize and update OpenAPI definitions, the endpoint grouping rules from OpenAPI will re-create by the new config.\nEndpoint name grouping by custom configuration Currently, a user could set up grouping rules through the static YAML file named endpoint-name-grouping.yml, or use Dynamic Configuration to initialize and update endpoint grouping rules.\nConfiguration Format Both the static local file and dynamic configuration value share the same YAML format.\ngrouping:# Endpoint of the service would follow the following rules- service-name:serviceArules:# {var} represents any variable string in the URI.- /prod/{var}","title":"Group Parameterized Endpoints","url":"/docs/main/next/en/setup/backend/endpoint-grouping-rules/"},{"content":"Group Parameterized Endpoints In most cases, endpoints are detected automatically through language agents, service mesh observability solutions, or meter system configurations.\nThere are some special cases, especially when REST style URI is used, where the application codes include the parameter in the endpoint name, such as putting order ID in the URI. Examples are /prod/ORDER123 and /prod/ORDER456. But logically, most would expect to have an endpoint name like prod/{order-id}. This is a specially designed feature in parameterized endpoint grouping.\nIf the incoming endpoint name accords with the rules, SkyWalking will group the endpoint by rules.\nThere are two approaches in which SkyWalking supports endpoint grouping:\n Endpoint name grouping by OpenAPI definitions. Endpoint name grouping by custom configurations.  Both grouping approaches can work together in sequence.\nEndpoint name grouping by OpenAPI definitions The OpenAPI definitions are documents based on the OpenAPI Specification (OAS), which is used to define a standard, language-agnostic interface for HTTP APIs.\nSkyWalking now supports OAS v2.0+. It could parse the documents (yaml) and build grouping rules from them automatically.\nHow to use   Add Specification Extensions for SkyWalking config in the OpenAPI definition documents; otherwise, all configs are default:\n${METHOD} is a reserved placeholder which represents the HTTP method, e.g. POST/GET... . ${PATH} is a reserved placeholder which represents the path, e.g. /products/{id}.\n   Extension Name Required Description Default Value     x-sw-service-name false The service name to which these endpoints belong. The directory name to which the OpenAPI definition documents belong.   x-sw-endpoint-name-match-rule false The rule used to match the endpoint. ${METHOD}:${PATH}   x-sw-endpoint-name-format false The endpoint name after grouping. ${METHOD}:${PATH}    These extensions are under OpenAPI Object. For example, the document below has a full custom config:\n  openapi:3.0.0x-sw-service-name:serviceBx-sw-endpoint-name-match-rule:\u0026#34;${METHOD}:${PATH}\u0026#34;x-sw-endpoint-name-format:\u0026#34;${METHOD}:${PATH}\u0026#34;info:description:OpenAPI definition for SkyWalking test.version:v2title:Product API...We highly recommend using the default config. The custom config (x-sw-endpoint-name-match-rule/x-sw-endpoint-name-format) is considered part of the match rules (regex pattern). We have provided some use cases in org.apache.skywalking.oap.server.core.config.group.openapi.EndpointGroupingRuleReader4OpenapiTest. You may validate your custom config as well.\nAll OpenAPI definition documents are located in the openapi-definitions directory, with directories having at most two levels. We recommend using the service name as the subDirectory name, as you will then not be required to set x-sw-service-name. For example:  ├── openapi-definitions │ ├── serviceA │ │ ├── customerAPI-v1.yaml │ │ └── productAPI-v1.yaml │ └── serviceB │ └── productAPI-v2.yaml The feature is enabled by default. You can disable it by setting the Core Module configuration ${SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPAENAPI:false}.  Rules match priority We recommend designing the API path as clearly as possible. If the API path is fuzzy and an endpoint name matches multiple paths, SkyWalking would select a path according to the match priority set out below:\n The exact path being matched. E.g. /products or /products/inventory The path which has less variables. E.g. In the case of /products/{var1}/{var2} and /products/{var1}/abc, endpoint name /products/123/abc will match the second one. If the paths have the same number of variables, the longest path is matched, and the vars are considered to be 1. E.g. In the case of /products/abc/{var1} and products/{var12345}/ef, endpoint name /products/abc/ef will match the first one, because length(\u0026quot;abc\u0026quot;) = 3 is larger than length(\u0026quot;ef\u0026quot;) = 2.  Examples If we have an OpenAPI definition doc productAPI-v2.yaml in directory serviceB, it will look like this:\nopenapi:3.0.0info:description:OpenAPI definition for SkyWalking test.version:v2title:Product APItags:- name:productdescription:product- name:relatedProductsdescription:Related Productspaths:/products:get:tags:- productsummary:Get all products listdescription:Get all products list.operationId:getProductsresponses:\u0026#34;200\u0026#34;:description:Successcontent:application/json:schema:type:arrayitems:$ref:\u0026#34;#/components/schemas/Product\u0026#34;/products/{region}/{country}:get:tags:- productsummary:Get products regionaldescription:Get products regional with the given id.operationId:getProductRegionalparameters:- name:regionin:pathdescription:Products regionrequired:trueschema:type:string- name:countryin:pathdescription:Products countryrequired:trueschema:type:stringresponses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/Product\u0026#34;\u0026#34;400\u0026#34;:description:Invalid parameters supplied/products/{id}:get:tags:- productsummary:Get product detailsdescription:Get product details with the given id.operationId:getProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/ProductDetails\u0026#34;\u0026#34;400\u0026#34;:description:Invalid product idpost:tags:- productsummary:Update product detailsdescription:Update product details with the given id.operationId:updateProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64- name:namein:querydescription:Product namerequired:trueschema:type:stringresponses:\u0026#34;200\u0026#34;:description:successful operationdelete:tags:- productsummary:Delete product detailsdescription:Delete product details with the given id.operationId:deleteProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operation/products/{id}/relatedProducts:get:tags:- relatedProductssummary:Get related productsdescription:Get related products with the given product id.operationId:getRelatedProductsparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/RelatedProducts\u0026#34;\u0026#34;400\u0026#34;:description:Invalid product idcomponents:schemas:Product:type:objectdescription:Product id and nameproperties:id:type:integerformat:int64description:Product idname:type:stringdescription:Product namerequired:- id- nameProductDetails:type:objectdescription:Product detailsproperties:id:type:integerformat:int64description:Product idname:type:stringdescription:Product namedescription:type:stringdescription:Product descriptionrequired:- id- nameRelatedProducts:type:objectdescription:Related Productsproperties:id:type:integerformat:int32description:Product idrelatedProducts:type:arraydescription:List of related productsitems:$ref:\u0026#34;#/components/schemas/Product\u0026#34;Here are some use cases:\n   Incoming Endpiont Incoming Service x-sw-service-name x-sw-endpoint-name-match-rule x-sw-endpoint-name-format Matched Grouping Result     GET:/products serviceB default default default true GET:/products   GET:/products/123 serviceB default default default true GET:/products{id}   GET:/products/asia/cn serviceB default default default true GET:/products/{region}/{country}   GET:/products/123/abc/efg serviceB default default default false GET:/products/123/abc/efg   \u0026lt;GET\u0026gt;:/products/123 serviceB default default default false \u0026lt;GET\u0026gt;:/products/123   GET:/products/123 serviceC default default default false GET:/products/123   GET:/products/123 serviceC serviceC default default true GET:/products/123   \u0026lt;GET\u0026gt;:/products/123 serviceB default \u0026lt;${METHOD}\u0026gt;:${PATH} \u0026lt;${METHOD}\u0026gt;:${PATH} true \u0026lt;GET\u0026gt;:/products/{id}   GET:/products/123 serviceB default default ${PATH}:\u0026lt;${METHOD}\u0026gt; true /products/{id}:\u0026lt;GET\u0026gt;   /products/123:\u0026lt;GET\u0026gt; serviceB default ${PATH}:\u0026lt;${METHOD}\u0026gt; default true GET:/products/{id}    Initialize and update the OpenAPI definitions dynamically Use Dynamic Configuration to initialize and update OpenAPI definitions, the endpoint grouping rules from OpenAPI will re-create by new config.\nEndpoint name grouping by custom configuration Currently, a user could set up grouping rules through the static YAML file named endpoint-name-grouping.yml, or use Dynamic Configuration to initialize and update endpoint grouping rules.\nConfiguration Format Both the static local file and dynamic configuration value share the same YAML format.\ngrouping:# Endpoint of the service would follow the following rules- service-name:serviceArules:# Logic name when the regex expression matched.- endpoint-name:/prod/{id}regex:\\/prod\\/.+","title":"Group Parameterized Endpoints","url":"/docs/main/v9.0.0/en/setup/backend/endpoint-grouping-rules/"},{"content":"Group Parameterized Endpoints In most cases, endpoints are detected automatically through language agents, service mesh observability solutions, or meter system configurations.\nThere are some special cases, especially when REST-style URI is used, where the application codes include the parameter in the endpoint name, such as putting order ID in the URI. Examples are /prod/ORDER123 and /prod/ORDER456. But logically, most would expect to have an endpoint name like prod/{order-id}. This is a specially designed feature in parameterized endpoint grouping.\nIf the incoming endpoint name accords with the rules, SkyWalking will group the endpoint by rules.\nThere are two approaches in which SkyWalking supports endpoint grouping:\n Endpoint name grouping by OpenAPI definitions. Endpoint name grouping by custom configurations.  Both grouping approaches can work together in sequence.\nEndpoint name grouping by OpenAPI definitions The OpenAPI definitions are documents based on the OpenAPI Specification (OAS), which is used to define a standard, language-agnostic interface for HTTP APIs.\nSkyWalking now supports OAS v2.0+. It could parse the documents (yaml) and build grouping rules from them automatically.\nHow to use   Add Specification Extensions for SkyWalking config in the OpenAPI definition documents; otherwise, all configs are default:\n${METHOD} is a reserved placeholder which represents the HTTP method, e.g. POST/GET... . ${PATH} is a reserved placeholder which represents the path, e.g. /products/{id}.\n   Extension Name Required Description Default Value     x-sw-service-name false The service name to which these endpoints belong. The directory name to which the OpenAPI definition documents belong.   x-sw-endpoint-name-match-rule false The rule used to match the endpoint. ${METHOD}:${PATH}   x-sw-endpoint-name-format false The endpoint name after grouping. ${METHOD}:${PATH}    These extensions are under OpenAPI Object. For example, the document below has a full custom config:\n  openapi:3.0.0x-sw-service-name:serviceBx-sw-endpoint-name-match-rule:\u0026#34;${METHOD}:${PATH}\u0026#34;x-sw-endpoint-name-format:\u0026#34;${METHOD}:${PATH}\u0026#34;info:description:OpenAPI definition for SkyWalking test.version:v2title:Product API...We highly recommend using the default config. The custom config (x-sw-endpoint-name-match-rule/x-sw-endpoint-name-format) is considered part of the match rules (regex pattern). We have provided some use cases in org.apache.skywalking.oap.server.core.config.group.openapi.EndpointGroupingRuleReader4OpenapiTest. You may validate your custom config as well.\nAll OpenAPI definition documents are located in the openapi-definitions directory, with directories having at most two levels. We recommend using the service name as the subDirectory name, as you will then not be required to set x-sw-service-name. For example:  ├── openapi-definitions │ ├── serviceA │ │ ├── customerAPI-v1.yaml │ │ └── productAPI-v1.yaml │ └── serviceB │ └── productAPI-v2.yaml The feature is enabled by default. You can disable it by setting the Core Module configuration ${SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPAENAPI:false}.  Rules match priority We recommend designing the API path as clearly as possible. If the API path is fuzzy and an endpoint name matches multiple paths, SkyWalking would select a path according to the match priority set out below:\n The exact path is matched. E.g. /products or /products/inventory The path with fewer variables. E.g. In the case of /products/{var1}/{var2} and /products/{var1}/abc, endpoint name /products/123/abc will match the second one. If the paths have the same number of variables, the longest path is matched, and the vars are considered to be 1. E.g. In the case of /products/abc/{var1} and products/{var12345}/ef, endpoint name /products/abc/ef will match the first one, because length(\u0026quot;abc\u0026quot;) = 3 is larger than length(\u0026quot;ef\u0026quot;) = 2.  Examples If we have an OpenAPI definition doc productAPI-v2.yaml in directory serviceB, it will look like this:\nopenapi:3.0.0info:description:OpenAPI definition for SkyWalking test.version:v2title:Product APItags:- name:productdescription:product- name:relatedProductsdescription:Related Productspaths:/products:get:tags:- productsummary:Get all products listdescription:Get all products list.operationId:getProductsresponses:\u0026#34;200\u0026#34;:description:Successcontent:application/json:schema:type:arrayitems:$ref:\u0026#34;#/components/schemas/Product\u0026#34;/products/{region}/{country}:get:tags:- productsummary:Get products regionaldescription:Get products regional with the given id.operationId:getProductRegionalparameters:- name:regionin:pathdescription:Products regionrequired:trueschema:type:string- name:countryin:pathdescription:Products countryrequired:trueschema:type:stringresponses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/Product\u0026#34;\u0026#34;400\u0026#34;:description:Invalid parameters supplied/products/{id}:get:tags:- productsummary:Get product detailsdescription:Get product details with the given id.operationId:getProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/ProductDetails\u0026#34;\u0026#34;400\u0026#34;:description:Invalid product idpost:tags:- productsummary:Update product detailsdescription:Update product details with the given id.operationId:updateProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64- name:namein:querydescription:Product namerequired:trueschema:type:stringresponses:\u0026#34;200\u0026#34;:description:successful operationdelete:tags:- productsummary:Delete product detailsdescription:Delete product details with the given id.operationId:deleteProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operation/products/{id}/relatedProducts:get:tags:- relatedProductssummary:Get related productsdescription:Get related products with the given product id.operationId:getRelatedProductsparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/RelatedProducts\u0026#34;\u0026#34;400\u0026#34;:description:Invalid product idcomponents:schemas:Product:type:objectdescription:Product id and nameproperties:id:type:integerformat:int64description:Product idname:type:stringdescription:Product namerequired:- id- nameProductDetails:type:objectdescription:Product detailsproperties:id:type:integerformat:int64description:Product idname:type:stringdescription:Product namedescription:type:stringdescription:Product descriptionrequired:- id- nameRelatedProducts:type:objectdescription:Related Productsproperties:id:type:integerformat:int32description:Product idrelatedProducts:type:arraydescription:List of related productsitems:$ref:\u0026#34;#/components/schemas/Product\u0026#34;Here are some use cases:\n   Incoming Endpiont Incoming Service x-sw-service-name x-sw-endpoint-name-match-rule x-sw-endpoint-name-format Matched Grouping Result     GET:/products serviceB default default default true GET:/products   GET:/products/123 serviceB default default default true GET:/products{id}   GET:/products/asia/cn serviceB default default default true GET:/products/{region}/{country}   GET:/products/123/abc/efg serviceB default default default false GET:/products/123/abc/efg   \u0026lt;GET\u0026gt;:/products/123 serviceB default default default false \u0026lt;GET\u0026gt;:/products/123   GET:/products/123 serviceC default default default false GET:/products/123   GET:/products/123 serviceC serviceC default default true GET:/products/123   \u0026lt;GET\u0026gt;:/products/123 serviceB default \u0026lt;${METHOD}\u0026gt;:${PATH} \u0026lt;${METHOD}\u0026gt;:${PATH} true \u0026lt;GET\u0026gt;:/products/{id}   GET:/products/123 serviceB default default ${PATH}:\u0026lt;${METHOD}\u0026gt; true /products/{id}:\u0026lt;GET\u0026gt;   /products/123:\u0026lt;GET\u0026gt; serviceB default ${PATH}:\u0026lt;${METHOD}\u0026gt; default true GET:/products/{id}    Initialize and update the OpenAPI definitions dynamically Use Dynamic Configuration to initialize and update OpenAPI definitions, the endpoint grouping rules from OpenAPI will re-create by the new config.\nEndpoint name grouping by custom configuration Currently, a user could set up grouping rules through the static YAML file named endpoint-name-grouping.yml, or use Dynamic Configuration to initialize and update endpoint grouping rules.\nConfiguration Format Both the static local file and dynamic configuration value share the same YAML format.\ngrouping:# Endpoint of the service would follow the following rules- service-name:serviceArules:# Logic name when the regex expression matched.- endpoint-name:/prod/{id}regex:\\/prod\\/.+","title":"Group Parameterized Endpoints","url":"/docs/main/v9.1.0/en/setup/backend/endpoint-grouping-rules/"},{"content":"Group Parameterized Endpoints In most cases, endpoints are detected automatically through language agents, service mesh observability solutions, or meter system configurations.\nThere are some special cases, especially when REST-style URI is used, where the application codes include the parameter in the endpoint name, such as putting order ID in the URI. Examples are /prod/ORDER123 and /prod/ORDER456. But logically, most would expect to have an endpoint name like prod/{order-id}. This is a specially designed feature in parameterized endpoint grouping.\nIf the incoming endpoint name accords with the rules, SkyWalking will group the endpoint by rules.\nThere are two approaches in which SkyWalking supports endpoint grouping:\n Endpoint name grouping by OpenAPI definitions. Endpoint name grouping by custom configurations.  Both grouping approaches can work together in sequence.\nEndpoint name grouping by OpenAPI definitions The OpenAPI definitions are documents based on the OpenAPI Specification (OAS), which is used to define a standard, language-agnostic interface for HTTP APIs.\nSkyWalking now supports OAS v2.0+. It could parse the documents (yaml) and build grouping rules from them automatically.\nHow to use   Add Specification Extensions for SkyWalking config in the OpenAPI definition documents; otherwise, all configs are default:\n${METHOD} is a reserved placeholder which represents the HTTP method, e.g. POST/GET... . ${PATH} is a reserved placeholder which represents the path, e.g. /products/{id}.\n   Extension Name Required Description Default Value     x-sw-service-name false The service name to which these endpoints belong. The directory name to which the OpenAPI definition documents belong.   x-sw-endpoint-name-match-rule false The rule used to match the endpoint. ${METHOD}:${PATH}   x-sw-endpoint-name-format false The endpoint name after grouping. ${METHOD}:${PATH}    These extensions are under OpenAPI Object. For example, the document below has a full custom config:\n  openapi:3.0.0x-sw-service-name:serviceBx-sw-endpoint-name-match-rule:\u0026#34;${METHOD}:${PATH}\u0026#34;x-sw-endpoint-name-format:\u0026#34;${METHOD}:${PATH}\u0026#34;info:description:OpenAPI definition for SkyWalking test.version:v2title:Product API...We highly recommend using the default config. The custom config (x-sw-endpoint-name-match-rule/x-sw-endpoint-name-format) is considered part of the match rules (regex pattern). We have provided some use cases in org.apache.skywalking.oap.server.core.config.group.openapi.EndpointGroupingRuleReader4OpenapiTest. You may validate your custom config as well.\nAll OpenAPI definition documents are located in the openapi-definitions directory, with directories having at most two levels. We recommend using the service name as the subDirectory name, as you will then not be required to set x-sw-service-name. For example:  ├── openapi-definitions │ ├── serviceA │ │ ├── customerAPI-v1.yaml │ │ └── productAPI-v1.yaml │ └── serviceB │ └── productAPI-v2.yaml The feature is enabled by default. You can disable it by setting the Core Module configuration ${SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPAENAPI:false}.  Rules match priority We recommend designing the API path as clearly as possible. If the API path is fuzzy and an endpoint name matches multiple paths, SkyWalking would select a path according to the match priority set out below:\n The exact path is matched. E.g. /products or /products/inventory The path with fewer variables. E.g. In the case of /products/{var1}/{var2} and /products/{var1}/abc, endpoint name /products/123/abc will match the second one. If the paths have the same number of variables, the longest path is matched, and the vars are considered to be 1. E.g. In the case of /products/abc/{var1} and products/{var12345}/ef, endpoint name /products/abc/ef will match the first one, because length(\u0026quot;abc\u0026quot;) = 3 is larger than length(\u0026quot;ef\u0026quot;) = 2.  Examples If we have an OpenAPI definition doc productAPI-v2.yaml in directory serviceB, it will look like this:\nopenapi:3.0.0info:description:OpenAPI definition for SkyWalking test.version:v2title:Product APItags:- name:productdescription:product- name:relatedProductsdescription:Related Productspaths:/products:get:tags:- productsummary:Get all products listdescription:Get all products list.operationId:getProductsresponses:\u0026#34;200\u0026#34;:description:Successcontent:application/json:schema:type:arrayitems:$ref:\u0026#34;#/components/schemas/Product\u0026#34;/products/{region}/{country}:get:tags:- productsummary:Get products regionaldescription:Get products regional with the given id.operationId:getProductRegionalparameters:- name:regionin:pathdescription:Products regionrequired:trueschema:type:string- name:countryin:pathdescription:Products countryrequired:trueschema:type:stringresponses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/Product\u0026#34;\u0026#34;400\u0026#34;:description:Invalid parameters supplied/products/{id}:get:tags:- productsummary:Get product detailsdescription:Get product details with the given id.operationId:getProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/ProductDetails\u0026#34;\u0026#34;400\u0026#34;:description:Invalid product idpost:tags:- productsummary:Update product detailsdescription:Update product details with the given id.operationId:updateProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64- name:namein:querydescription:Product namerequired:trueschema:type:stringresponses:\u0026#34;200\u0026#34;:description:successful operationdelete:tags:- productsummary:Delete product detailsdescription:Delete product details with the given id.operationId:deleteProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operation/products/{id}/relatedProducts:get:tags:- relatedProductssummary:Get related productsdescription:Get related products with the given product id.operationId:getRelatedProductsparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/RelatedProducts\u0026#34;\u0026#34;400\u0026#34;:description:Invalid product idcomponents:schemas:Product:type:objectdescription:Product id and nameproperties:id:type:integerformat:int64description:Product idname:type:stringdescription:Product namerequired:- id- nameProductDetails:type:objectdescription:Product detailsproperties:id:type:integerformat:int64description:Product idname:type:stringdescription:Product namedescription:type:stringdescription:Product descriptionrequired:- id- nameRelatedProducts:type:objectdescription:Related Productsproperties:id:type:integerformat:int32description:Product idrelatedProducts:type:arraydescription:List of related productsitems:$ref:\u0026#34;#/components/schemas/Product\u0026#34;Here are some use cases:\n   Incoming Endpoint Incoming Service x-sw-service-name x-sw-endpoint-name-match-rule x-sw-endpoint-name-format Matched Grouping Result     GET:/products serviceB default default default true GET:/products   GET:/products/123 serviceB default default default true GET:/products{id}   GET:/products/asia/cn serviceB default default default true GET:/products/{region}/{country}   GET:/products/123/abc/efg serviceB default default default false GET:/products/123/abc/efg   \u0026lt;GET\u0026gt;:/products/123 serviceB default default default false \u0026lt;GET\u0026gt;:/products/123   GET:/products/123 serviceC default default default false GET:/products/123   GET:/products/123 serviceC serviceC default default true GET:/products/123   \u0026lt;GET\u0026gt;:/products/123 serviceB default \u0026lt;${METHOD}\u0026gt;:${PATH} \u0026lt;${METHOD}\u0026gt;:${PATH} true \u0026lt;GET\u0026gt;:/products/{id}   GET:/products/123 serviceB default default ${PATH}:\u0026lt;${METHOD}\u0026gt; true /products/{id}:\u0026lt;GET\u0026gt;   /products/123:\u0026lt;GET\u0026gt; serviceB default ${PATH}:\u0026lt;${METHOD}\u0026gt; default true GET:/products/{id}    Initialize and update the OpenAPI definitions dynamically Use Dynamic Configuration to initialize and update OpenAPI definitions, the endpoint grouping rules from OpenAPI will re-create by the new config.\nEndpoint name grouping by custom configuration Currently, a user could set up grouping rules through the static YAML file named endpoint-name-grouping.yml, or use Dynamic Configuration to initialize and update endpoint grouping rules.\nConfiguration Format Both the static local file and dynamic configuration value share the same YAML format.\ngrouping:# Endpoint of the service would follow the following rules- service-name:serviceArules:# Logic name when the regex expression matched.- endpoint-name:/prod/{id}regex:\\/prod\\/.+","title":"Group Parameterized Endpoints","url":"/docs/main/v9.2.0/en/setup/backend/endpoint-grouping-rules/"},{"content":"Group Parameterized Endpoints In most cases, endpoints are detected automatically through language agents, service mesh observability solutions, or meter system configurations.\nThere are some special cases, especially when REST-style URI is used, where the application codes include the parameter in the endpoint name, such as putting order ID in the URI. Examples are /prod/ORDER123 and /prod/ORDER456. But logically, most would expect to have an endpoint name like prod/{order-id}. This is a specially designed feature in parameterized endpoint grouping.\nIf the incoming endpoint name accords with the rules, SkyWalking will group the endpoint by rules.\nThere are two approaches in which SkyWalking supports endpoint grouping:\n Endpoint name grouping by OpenAPI definitions. Endpoint name grouping by custom configurations.  Both grouping approaches can work together in sequence.\nEndpoint name grouping by OpenAPI definitions The OpenAPI definitions are documents based on the OpenAPI Specification (OAS), which is used to define a standard, language-agnostic interface for HTTP APIs.\nSkyWalking now supports OAS v2.0+. It could parse the documents (yaml) and build grouping rules from them automatically.\nHow to use   Add Specification Extensions for SkyWalking config in the OpenAPI definition documents; otherwise, all configs are default:\n${METHOD} is a reserved placeholder which represents the HTTP method, e.g. POST/GET... . ${PATH} is a reserved placeholder which represents the path, e.g. /products/{id}.\n   Extension Name Required Description Default Value     x-sw-service-name false The service name to which these endpoints belong. The directory name to which the OpenAPI definition documents belong.   x-sw-endpoint-name-match-rule false The rule used to match the endpoint. ${METHOD}:${PATH}   x-sw-endpoint-name-format false The endpoint name after grouping. ${METHOD}:${PATH}    These extensions are under OpenAPI Object. For example, the document below has a full custom config:\n  openapi:3.0.0x-sw-service-name:serviceBx-sw-endpoint-name-match-rule:\u0026#34;${METHOD}:${PATH}\u0026#34;x-sw-endpoint-name-format:\u0026#34;${METHOD}:${PATH}\u0026#34;info:description:OpenAPI definition for SkyWalking test.version:v2title:Product API...We highly recommend using the default config. The custom config (x-sw-endpoint-name-match-rule/x-sw-endpoint-name-format) is considered part of the match rules (regex pattern). We have provided some use cases in org.apache.skywalking.oap.server.core.config.group.openapi.EndpointGroupingRuleReader4OpenapiTest. You may validate your custom config as well.\nAll OpenAPI definition documents are located in the openapi-definitions directory, with directories having at most two levels. We recommend using the service name as the subDirectory name, as you will then not be required to set x-sw-service-name. For example:  ├── openapi-definitions │ ├── serviceA │ │ ├── customerAPI-v1.yaml │ │ └── productAPI-v1.yaml │ └── serviceB │ └── productAPI-v2.yaml The feature is enabled by default. You can disable it by setting the Core Module configuration ${SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI:false}.  Rules match priority We recommend designing the API path as clearly as possible. If the API path is fuzzy and an endpoint name matches multiple paths, SkyWalking would select a path according to the match priority set out below:\n The exact path is matched. E.g. /products or /products/inventory The path with fewer variables. E.g. In the case of /products/{var1}/{var2} and /products/{var1}/abc, endpoint name /products/123/abc will match the second one. If the paths have the same number of variables, the longest path is matched, and the vars are considered to be 1. E.g. In the case of /products/abc/{var1} and products/{var12345}/ef, endpoint name /products/abc/ef will match the first one, because length(\u0026quot;abc\u0026quot;) = 3 is larger than length(\u0026quot;ef\u0026quot;) = 2.  Examples If we have an OpenAPI definition doc productAPI-v2.yaml in directory serviceB, it will look like this:\nopenapi:3.0.0info:description:OpenAPI definition for SkyWalking test.version:v2title:Product APItags:- name:productdescription:product- name:relatedProductsdescription:Related Productspaths:/products:get:tags:- productsummary:Get all products listdescription:Get all products list.operationId:getProductsresponses:\u0026#34;200\u0026#34;:description:Successcontent:application/json:schema:type:arrayitems:$ref:\u0026#34;#/components/schemas/Product\u0026#34;/products/{region}/{country}:get:tags:- productsummary:Get products regionaldescription:Get products regional with the given id.operationId:getProductRegionalparameters:- name:regionin:pathdescription:Products regionrequired:trueschema:type:string- name:countryin:pathdescription:Products countryrequired:trueschema:type:stringresponses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/Product\u0026#34;\u0026#34;400\u0026#34;:description:Invalid parameters supplied/products/{id}:get:tags:- productsummary:Get product detailsdescription:Get product details with the given id.operationId:getProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/ProductDetails\u0026#34;\u0026#34;400\u0026#34;:description:Invalid product idpost:tags:- productsummary:Update product detailsdescription:Update product details with the given id.operationId:updateProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64- name:namein:querydescription:Product namerequired:trueschema:type:stringresponses:\u0026#34;200\u0026#34;:description:successful operationdelete:tags:- productsummary:Delete product detailsdescription:Delete product details with the given id.operationId:deleteProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operation/products/{id}/relatedProducts:get:tags:- relatedProductssummary:Get related productsdescription:Get related products with the given product id.operationId:getRelatedProductsparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/RelatedProducts\u0026#34;\u0026#34;400\u0026#34;:description:Invalid product idcomponents:schemas:Product:type:objectdescription:Product id and nameproperties:id:type:integerformat:int64description:Product idname:type:stringdescription:Product namerequired:- id- nameProductDetails:type:objectdescription:Product detailsproperties:id:type:integerformat:int64description:Product idname:type:stringdescription:Product namedescription:type:stringdescription:Product descriptionrequired:- id- nameRelatedProducts:type:objectdescription:Related Productsproperties:id:type:integerformat:int32description:Product idrelatedProducts:type:arraydescription:List of related productsitems:$ref:\u0026#34;#/components/schemas/Product\u0026#34;Here are some use cases:\n   Incoming Endpoint Incoming Service x-sw-service-name x-sw-endpoint-name-match-rule x-sw-endpoint-name-format Matched Grouping Result     GET:/products serviceB default default default true GET:/products   GET:/products/123 serviceB default default default true GET:/products{id}   GET:/products/asia/cn serviceB default default default true GET:/products/{region}/{country}   GET:/products/123/abc/efg serviceB default default default false GET:/products/123/abc/efg   \u0026lt;GET\u0026gt;:/products/123 serviceB default default default false \u0026lt;GET\u0026gt;:/products/123   GET:/products/123 serviceC default default default false GET:/products/123   GET:/products/123 serviceC serviceC default default true GET:/products/123   \u0026lt;GET\u0026gt;:/products/123 serviceB default \u0026lt;${METHOD}\u0026gt;:${PATH} \u0026lt;${METHOD}\u0026gt;:${PATH} true \u0026lt;GET\u0026gt;:/products/{id}   GET:/products/123 serviceB default default ${PATH}:\u0026lt;${METHOD}\u0026gt; true /products/{id}:\u0026lt;GET\u0026gt;   /products/123:\u0026lt;GET\u0026gt; serviceB default ${PATH}:\u0026lt;${METHOD}\u0026gt; default true GET:/products/{id}    Initialize and update the OpenAPI definitions dynamically Use Dynamic Configuration to initialize and update OpenAPI definitions, the endpoint grouping rules from OpenAPI will re-create by the new config.\nEndpoint name grouping by custom configuration Currently, a user could set up grouping rules through the static YAML file named endpoint-name-grouping.yml, or use Dynamic Configuration to initialize and update endpoint grouping rules.\nConfiguration Format Both the static local file and dynamic configuration value share the same YAML format.\ngrouping:# Endpoint of the service would follow the following rules- service-name:serviceArules:# Logic name when the regex expression matched.- endpoint-name:/prod/{id}regex:\\/prod\\/.+","title":"Group Parameterized Endpoints","url":"/docs/main/v9.3.0/en/setup/backend/endpoint-grouping-rules/"},{"content":"Group Parameterized Endpoints In most cases, endpoints are detected automatically through language agents, service mesh observability solutions, or meter system configurations.\nThere are some special cases, especially when REST-style URI is used, where the application codes include the parameter in the endpoint name, such as putting order ID in the URI. Examples are /prod/ORDER123 and /prod/ORDER456. But logically, most would expect to have an endpoint name like prod/{order-id}. This is a specially designed feature in parameterized endpoint grouping.\nIf the incoming endpoint name accords with the rules, SkyWalking will group the endpoint by rules.\nThere are two approaches in which SkyWalking supports endpoint grouping:\n Endpoint name grouping by OpenAPI definitions. Endpoint name grouping by custom configurations.  Both grouping approaches can work together in sequence.\nEndpoint name grouping by OpenAPI definitions The OpenAPI definitions are documents based on the OpenAPI Specification (OAS), which is used to define a standard, language-agnostic interface for HTTP APIs.\nSkyWalking now supports OAS v2.0+. It could parse the documents (yaml) and build grouping rules from them automatically.\nHow to use   Add Specification Extensions for SkyWalking config in the OpenAPI definition documents; otherwise, all configs are default:\n${METHOD} is a reserved placeholder which represents the HTTP method, e.g. POST/GET... . ${PATH} is a reserved placeholder which represents the path, e.g. /products/{id}.\n   Extension Name Required Description Default Value     x-sw-service-name false The service name to which these endpoints belong. The directory name to which the OpenAPI definition documents belong.   x-sw-endpoint-name-match-rule false The rule used to match the endpoint. ${METHOD}:${PATH}   x-sw-endpoint-name-format false The endpoint name after grouping. ${METHOD}:${PATH}    These extensions are under OpenAPI Object. For example, the document below has a full custom config:\n  openapi:3.0.0x-sw-service-name:serviceBx-sw-endpoint-name-match-rule:\u0026#34;${METHOD}:${PATH}\u0026#34;x-sw-endpoint-name-format:\u0026#34;${METHOD}:${PATH}\u0026#34;info:description:OpenAPI definition for SkyWalking test.version:v2title:Product API...We highly recommend using the default config. The custom config (x-sw-endpoint-name-match-rule/x-sw-endpoint-name-format) is considered part of the match rules (regex pattern). We have provided some use cases in org.apache.skywalking.oap.server.core.config.group.openapi.EndpointGroupingRuleReader4OpenapiTest. You may validate your custom config as well.\nAll OpenAPI definition documents are located in the openapi-definitions directory, with directories having at most two levels. We recommend using the service name as the subDirectory name, as you will then not be required to set x-sw-service-name. For example:  ├── openapi-definitions │ ├── serviceA │ │ ├── customerAPI-v1.yaml │ │ └── productAPI-v1.yaml │ └── serviceB │ └── productAPI-v2.yaml The feature is enabled by default. You can disable it by setting the Core Module configuration ${SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI:false}.  Rules match priority We recommend designing the API path as clearly as possible. If the API path is fuzzy and an endpoint name matches multiple paths, SkyWalking would select a path according to the match priority set out below:\n The exact path is matched. E.g. /products or /products/inventory The path with fewer variables. E.g. In the case of /products/{var1}/{var2} and /products/{var1}/abc, endpoint name /products/123/abc will match the second one. If the paths have the same number of variables, the longest path is matched, and the vars are considered to be 1. E.g. In the case of /products/abc/{var1} and products/{var12345}/ef, endpoint name /products/abc/ef will match the first one, because length(\u0026quot;abc\u0026quot;) = 3 is larger than length(\u0026quot;ef\u0026quot;) = 2.  Examples If we have an OpenAPI definition doc productAPI-v2.yaml in directory serviceB, it will look like this:\nopenapi:3.0.0info:description:OpenAPI definition for SkyWalking test.version:v2title:Product APItags:- name:productdescription:product- name:relatedProductsdescription:Related Productspaths:/products:get:tags:- productsummary:Get all products listdescription:Get all products list.operationId:getProductsresponses:\u0026#34;200\u0026#34;:description:Successcontent:application/json:schema:type:arrayitems:$ref:\u0026#34;#/components/schemas/Product\u0026#34;/products/{region}/{country}:get:tags:- productsummary:Get products regionaldescription:Get products regional with the given id.operationId:getProductRegionalparameters:- name:regionin:pathdescription:Products regionrequired:trueschema:type:string- name:countryin:pathdescription:Products countryrequired:trueschema:type:stringresponses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/Product\u0026#34;\u0026#34;400\u0026#34;:description:Invalid parameters supplied/products/{id}:get:tags:- productsummary:Get product detailsdescription:Get product details with the given id.operationId:getProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/ProductDetails\u0026#34;\u0026#34;400\u0026#34;:description:Invalid product idpost:tags:- productsummary:Update product detailsdescription:Update product details with the given id.operationId:updateProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64- name:namein:querydescription:Product namerequired:trueschema:type:stringresponses:\u0026#34;200\u0026#34;:description:successful operationdelete:tags:- productsummary:Delete product detailsdescription:Delete product details with the given id.operationId:deleteProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operation/products/{id}/relatedProducts:get:tags:- relatedProductssummary:Get related productsdescription:Get related products with the given product id.operationId:getRelatedProductsparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/RelatedProducts\u0026#34;\u0026#34;400\u0026#34;:description:Invalid product idcomponents:schemas:Product:type:objectdescription:Product id and nameproperties:id:type:integerformat:int64description:Product idname:type:stringdescription:Product namerequired:- id- nameProductDetails:type:objectdescription:Product detailsproperties:id:type:integerformat:int64description:Product idname:type:stringdescription:Product namedescription:type:stringdescription:Product descriptionrequired:- id- nameRelatedProducts:type:objectdescription:Related Productsproperties:id:type:integerformat:int32description:Product idrelatedProducts:type:arraydescription:List of related productsitems:$ref:\u0026#34;#/components/schemas/Product\u0026#34;Here are some use cases:\n   Incoming Endpoint Incoming Service x-sw-service-name x-sw-endpoint-name-match-rule x-sw-endpoint-name-format Matched Grouping Result     GET:/products serviceB default default default true GET:/products   GET:/products/123 serviceB default default default true GET:/products{id}   GET:/products/asia/cn serviceB default default default true GET:/products/{region}/{country}   GET:/products/123/abc/efg serviceB default default default false GET:/products/123/abc/efg   \u0026lt;GET\u0026gt;:/products/123 serviceB default default default false \u0026lt;GET\u0026gt;:/products/123   GET:/products/123 serviceC default default default false GET:/products/123   GET:/products/123 serviceC serviceC default default true GET:/products/123   \u0026lt;GET\u0026gt;:/products/123 serviceB default \u0026lt;${METHOD}\u0026gt;:${PATH} \u0026lt;${METHOD}\u0026gt;:${PATH} true \u0026lt;GET\u0026gt;:/products/{id}   GET:/products/123 serviceB default default ${PATH}:\u0026lt;${METHOD}\u0026gt; true /products/{id}:\u0026lt;GET\u0026gt;   /products/123:\u0026lt;GET\u0026gt; serviceB default ${PATH}:\u0026lt;${METHOD}\u0026gt; default true GET:/products/{id}    Initialize and update the OpenAPI definitions dynamically Use Dynamic Configuration to initialize and update OpenAPI definitions, the endpoint grouping rules from OpenAPI will re-create by the new config.\nEndpoint name grouping by custom configuration Currently, a user could set up grouping rules through the static YAML file named endpoint-name-grouping.yml, or use Dynamic Configuration to initialize and update endpoint grouping rules.\nConfiguration Format Both the static local file and dynamic configuration value share the same YAML format.\ngrouping:# Endpoint of the service would follow the following rules- service-name:serviceArules:# Logic name when the regex expression matched.- endpoint-name:/prod/{id}regex:\\/prod\\/.+","title":"Group Parameterized Endpoints","url":"/docs/main/v9.4.0/en/setup/backend/endpoint-grouping-rules/"},{"content":"Group Parameterized Endpoints In most cases, endpoints are detected automatically through language agents, service mesh observability solutions, or meter system configurations.\nThere are some special cases, especially when REST-style URI is used, where the application codes include the parameter in the endpoint name, such as putting order ID in the URI. Examples are /prod/ORDER123 and /prod/ORDER456. But logically, most would expect to have an endpoint name like prod/{order-id}. This is a specially designed feature in parameterized endpoint grouping.\nIf the incoming endpoint name accords with the rules, SkyWalking will group the endpoint by rules.\nThere are two approaches in which SkyWalking supports endpoint grouping:\n Endpoint name grouping by OpenAPI definitions. Endpoint name grouping by custom configurations.  Both grouping approaches can work together in sequence.\nEndpoint name grouping by OpenAPI definitions The OpenAPI definitions are documents based on the OpenAPI Specification (OAS), which is used to define a standard, language-agnostic interface for HTTP APIs.\nSkyWalking now supports OAS v2.0+. It could parse the documents (yaml) and build grouping rules from them automatically.\nHow to use   Add Specification Extensions for SkyWalking config in the OpenAPI definition documents; otherwise, all configs are default:\n${METHOD} is a reserved placeholder which represents the HTTP method, e.g. POST/GET... . ${PATH} is a reserved placeholder which represents the path, e.g. /products/{id}.\n   Extension Name Required Description Default Value     x-sw-service-name false The service name to which these endpoints belong. The directory name to which the OpenAPI definition documents belong.   x-sw-endpoint-name-match-rule false The rule used to match the endpoint. ${METHOD}:${PATH}   x-sw-endpoint-name-format false The endpoint name after grouping. ${METHOD}:${PATH}    These extensions are under OpenAPI Object. For example, the document below has a full custom config:\n  openapi:3.0.0x-sw-service-name:serviceBx-sw-endpoint-name-match-rule:\u0026#34;${METHOD}:${PATH}\u0026#34;x-sw-endpoint-name-format:\u0026#34;${METHOD}:${PATH}\u0026#34;info:description:OpenAPI definition for SkyWalking test.version:v2title:Product API...We highly recommend using the default config. The custom config (x-sw-endpoint-name-match-rule/x-sw-endpoint-name-format) is considered part of the match rules (regex pattern). We have provided some use cases in org.apache.skywalking.oap.server.core.config.group.openapi.EndpointGroupingRuleReader4OpenapiTest. You may validate your custom config as well.\nAll OpenAPI definition documents are located in the openapi-definitions directory, with directories having at most two levels. We recommend using the service name as the subDirectory name, as you will then not be required to set x-sw-service-name. For example:  ├── openapi-definitions │ ├── serviceA │ │ ├── customerAPI-v1.yaml │ │ └── productAPI-v1.yaml │ └── serviceB │ └── productAPI-v2.yaml The feature is enabled by default. You can disable it by setting the Core Module configuration ${SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI:false}.  Rules match priority We recommend designing the API path as clearly as possible. If the API path is fuzzy and an endpoint name matches multiple paths, SkyWalking would select a path according to the match priority set out below:\n The exact path is matched. E.g. /products or /products/inventory The path with fewer variables. E.g. In the case of /products/{var1}/{var2} and /products/{var1}/abc, endpoint name /products/123/abc will match the second one. If the paths have the same number of variables, the longest path is matched, and the vars are considered to be 1. E.g. In the case of /products/abc/{var1} and products/{var12345}/ef, endpoint name /products/abc/ef will match the first one, because length(\u0026quot;abc\u0026quot;) = 3 is larger than length(\u0026quot;ef\u0026quot;) = 2.  Examples If we have an OpenAPI definition doc productAPI-v2.yaml in directory serviceB, it will look like this:\nopenapi:3.0.0info:description:OpenAPI definition for SkyWalking test.version:v2title:Product APItags:- name:productdescription:product- name:relatedProductsdescription:Related Productspaths:/products:get:tags:- productsummary:Get all products listdescription:Get all products list.operationId:getProductsresponses:\u0026#34;200\u0026#34;:description:Successcontent:application/json:schema:type:arrayitems:$ref:\u0026#34;#/components/schemas/Product\u0026#34;/products/{region}/{country}:get:tags:- productsummary:Get products regionaldescription:Get products regional with the given id.operationId:getProductRegionalparameters:- name:regionin:pathdescription:Products regionrequired:trueschema:type:string- name:countryin:pathdescription:Products countryrequired:trueschema:type:stringresponses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/Product\u0026#34;\u0026#34;400\u0026#34;:description:Invalid parameters supplied/products/{id}:get:tags:- productsummary:Get product detailsdescription:Get product details with the given id.operationId:getProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/ProductDetails\u0026#34;\u0026#34;400\u0026#34;:description:Invalid product idpost:tags:- productsummary:Update product detailsdescription:Update product details with the given id.operationId:updateProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64- name:namein:querydescription:Product namerequired:trueschema:type:stringresponses:\u0026#34;200\u0026#34;:description:successful operationdelete:tags:- productsummary:Delete product detailsdescription:Delete product details with the given id.operationId:deleteProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operation/products/{id}/relatedProducts:get:tags:- relatedProductssummary:Get related productsdescription:Get related products with the given product id.operationId:getRelatedProductsparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/RelatedProducts\u0026#34;\u0026#34;400\u0026#34;:description:Invalid product idcomponents:schemas:Product:type:objectdescription:Product id and nameproperties:id:type:integerformat:int64description:Product idname:type:stringdescription:Product namerequired:- id- nameProductDetails:type:objectdescription:Product detailsproperties:id:type:integerformat:int64description:Product idname:type:stringdescription:Product namedescription:type:stringdescription:Product descriptionrequired:- id- nameRelatedProducts:type:objectdescription:Related Productsproperties:id:type:integerformat:int32description:Product idrelatedProducts:type:arraydescription:List of related productsitems:$ref:\u0026#34;#/components/schemas/Product\u0026#34;Here are some use cases:\n   Incoming Endpoint Incoming Service x-sw-service-name x-sw-endpoint-name-match-rule x-sw-endpoint-name-format Matched Grouping Result     GET:/products serviceB default default default true GET:/products   GET:/products/asia/cn serviceB default default default true GET:/products/{region}/{country}   GET:/products/123 serviceB default default default true GET:/products{id}   GET:/products/123/abc/efg serviceB default default default false GET:/products/123/abc/efg   \u0026lt;GET\u0026gt;:/products/123 serviceB default default default false \u0026lt;GET\u0026gt;:/products/123   GET:/products/123 serviceC default default default false GET:/products/123   GET:/products/123 serviceC serviceC default default true GET:/products/123   \u0026lt;GET\u0026gt;:/products/123 serviceB default \u0026lt;${METHOD}\u0026gt;:${PATH} \u0026lt;${METHOD}\u0026gt;:${PATH} true \u0026lt;GET\u0026gt;:/products/{id}   GET:/products/123 serviceB default default ${PATH}:\u0026lt;${METHOD}\u0026gt; true /products/{id}:\u0026lt;GET\u0026gt;   /products/123:\u0026lt;GET\u0026gt; serviceB default ${PATH}:\u0026lt;${METHOD}\u0026gt; default true GET:/products/{id}    Initialize and update the OpenAPI definitions dynamically Use Dynamic Configuration to initialize and update OpenAPI definitions, the endpoint grouping rules from OpenAPI will re-create by the new config.\nEndpoint name grouping by custom configuration Currently, a user could set up grouping rules through the static YAML file named endpoint-name-grouping.yml, or use Dynamic Configuration to initialize and update endpoint grouping rules.\nConfiguration Format Both the static local file and dynamic configuration value share the same YAML format.\ngrouping:# Endpoint of the service would follow the following rules- service-name:serviceArules:# {var} represents any variable string in the URI.- /prod/{var}","title":"Group Parameterized Endpoints","url":"/docs/main/v9.5.0/en/setup/backend/endpoint-grouping-rules/"},{"content":"Group Parameterized Endpoints In most cases, endpoints are detected automatically through language agents, service mesh observability solutions, or meter system configurations.\nThere are some special cases, especially when REST-style URI is used, where the application codes include the parameter in the endpoint name, such as putting order ID in the URI. Examples are /prod/ORDER123 and /prod/ORDER456. But logically, most would expect to have an endpoint name like prod/{order-id}. This is a specially designed feature in parameterized endpoint grouping.\nIf the incoming endpoint name accords with the rules, SkyWalking will group the endpoint by rules.\nThere are two approaches in which SkyWalking supports endpoint grouping:\n Endpoint name grouping by OpenAPI definitions. Endpoint name grouping by custom configurations.  Both grouping approaches can work together in sequence.\nEndpoint name grouping by OpenAPI definitions The OpenAPI definitions are documents based on the OpenAPI Specification (OAS), which is used to define a standard, language-agnostic interface for HTTP APIs.\nSkyWalking now supports OAS v2.0+. It could parse the documents (yaml) and build grouping rules from them automatically.\nHow to use   Add Specification Extensions for SkyWalking config in the OpenAPI definition documents; otherwise, all configs are default:\n${METHOD} is a reserved placeholder which represents the HTTP method, e.g. POST/GET... . ${PATH} is a reserved placeholder which represents the path, e.g. /products/{id}.\n   Extension Name Required Description Default Value     x-sw-service-name false The service name to which these endpoints belong. The directory name to which the OpenAPI definition documents belong.   x-sw-endpoint-name-match-rule false The rule used to match the endpoint. ${METHOD}:${PATH}   x-sw-endpoint-name-format false The endpoint name after grouping. ${METHOD}:${PATH}    These extensions are under OpenAPI Object. For example, the document below has a full custom config:\n  openapi:3.0.0x-sw-service-name:serviceBx-sw-endpoint-name-match-rule:\u0026#34;${METHOD}:${PATH}\u0026#34;x-sw-endpoint-name-format:\u0026#34;${METHOD}:${PATH}\u0026#34;info:description:OpenAPI definition for SkyWalking test.version:v2title:Product API...We highly recommend using the default config. The custom config (x-sw-endpoint-name-match-rule/x-sw-endpoint-name-format) is considered part of the match rules (regex pattern). We have provided some use cases in org.apache.skywalking.oap.server.core.config.group.openapi.EndpointGroupingRuleReader4OpenapiTest. You may validate your custom config as well.\nAll OpenAPI definition documents are located in the openapi-definitions directory, with directories having at most two levels. We recommend using the service name as the subDirectory name, as you will then not be required to set x-sw-service-name. For example:  ├── openapi-definitions │ ├── serviceA │ │ ├── customerAPI-v1.yaml │ │ └── productAPI-v1.yaml │ └── serviceB │ └── productAPI-v2.yaml The feature is enabled by default. You can disable it by setting the Core Module configuration ${SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI:false}.  Rules match priority We recommend designing the API path as clearly as possible. If the API path is fuzzy and an endpoint name matches multiple paths, SkyWalking would select a path according to the match priority set out below:\n The exact path is matched. E.g. /products or /products/inventory The path with fewer variables. E.g. In the case of /products/{var1}/{var2} and /products/{var1}/abc, endpoint name /products/123/abc will match the second one. If the paths have the same number of variables, the longest path is matched, and the vars are considered to be 1. E.g. In the case of /products/abc/{var1} and products/{var12345}/ef, endpoint name /products/abc/ef will match the first one, because length(\u0026quot;abc\u0026quot;) = 3 is larger than length(\u0026quot;ef\u0026quot;) = 2.  Examples If we have an OpenAPI definition doc productAPI-v2.yaml in directory serviceB, it will look like this:\nopenapi:3.0.0info:description:OpenAPI definition for SkyWalking test.version:v2title:Product APItags:- name:productdescription:product- name:relatedProductsdescription:Related Productspaths:/products:get:tags:- productsummary:Get all products listdescription:Get all products list.operationId:getProductsresponses:\u0026#34;200\u0026#34;:description:Successcontent:application/json:schema:type:arrayitems:$ref:\u0026#34;#/components/schemas/Product\u0026#34;/products/{region}/{country}:get:tags:- productsummary:Get products regionaldescription:Get products regional with the given id.operationId:getProductRegionalparameters:- name:regionin:pathdescription:Products regionrequired:trueschema:type:string- name:countryin:pathdescription:Products countryrequired:trueschema:type:stringresponses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/Product\u0026#34;\u0026#34;400\u0026#34;:description:Invalid parameters supplied/products/{id}:get:tags:- productsummary:Get product detailsdescription:Get product details with the given id.operationId:getProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/ProductDetails\u0026#34;\u0026#34;400\u0026#34;:description:Invalid product idpost:tags:- productsummary:Update product detailsdescription:Update product details with the given id.operationId:updateProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64- name:namein:querydescription:Product namerequired:trueschema:type:stringresponses:\u0026#34;200\u0026#34;:description:successful operationdelete:tags:- productsummary:Delete product detailsdescription:Delete product details with the given id.operationId:deleteProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operation/products/{id}/relatedProducts:get:tags:- relatedProductssummary:Get related productsdescription:Get related products with the given product id.operationId:getRelatedProductsparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/RelatedProducts\u0026#34;\u0026#34;400\u0026#34;:description:Invalid product idcomponents:schemas:Product:type:objectdescription:Product id and nameproperties:id:type:integerformat:int64description:Product idname:type:stringdescription:Product namerequired:- id- nameProductDetails:type:objectdescription:Product detailsproperties:id:type:integerformat:int64description:Product idname:type:stringdescription:Product namedescription:type:stringdescription:Product descriptionrequired:- id- nameRelatedProducts:type:objectdescription:Related Productsproperties:id:type:integerformat:int32description:Product idrelatedProducts:type:arraydescription:List of related productsitems:$ref:\u0026#34;#/components/schemas/Product\u0026#34;Here are some use cases:\n   Incoming Endpoint Incoming Service x-sw-service-name x-sw-endpoint-name-match-rule x-sw-endpoint-name-format Matched Grouping Result     GET:/products serviceB default default default true GET:/products   GET:/products/asia/cn serviceB default default default true GET:/products/{region}/{country}   GET:/products/123 serviceB default default default true GET:/products{id}   GET:/products/123/abc/efg serviceB default default default false GET:/products/123/abc/efg   \u0026lt;GET\u0026gt;:/products/123 serviceB default default default false \u0026lt;GET\u0026gt;:/products/123   GET:/products/123 serviceC default default default false GET:/products/123   GET:/products/123 serviceC serviceC default default true GET:/products/123   \u0026lt;GET\u0026gt;:/products/123 serviceB default \u0026lt;${METHOD}\u0026gt;:${PATH} \u0026lt;${METHOD}\u0026gt;:${PATH} true \u0026lt;GET\u0026gt;:/products/{id}   GET:/products/123 serviceB default default ${PATH}:\u0026lt;${METHOD}\u0026gt; true /products/{id}:\u0026lt;GET\u0026gt;   /products/123:\u0026lt;GET\u0026gt; serviceB default ${PATH}:\u0026lt;${METHOD}\u0026gt; default true GET:/products/{id}    Initialize and update the OpenAPI definitions dynamically Use Dynamic Configuration to initialize and update OpenAPI definitions, the endpoint grouping rules from OpenAPI will re-create by the new config.\nEndpoint name grouping by custom configuration Currently, a user could set up grouping rules through the static YAML file named endpoint-name-grouping.yml, or use Dynamic Configuration to initialize and update endpoint grouping rules.\nConfiguration Format Both the static local file and dynamic configuration value share the same YAML format.\ngrouping:# Endpoint of the service would follow the following rules- service-name:serviceArules:# {var} represents any variable string in the URI.- /prod/{var}","title":"Group Parameterized Endpoints","url":"/docs/main/v9.6.0/en/setup/backend/endpoint-grouping-rules/"},{"content":"Group Parameterized Endpoints In most cases, endpoints are detected automatically through language agents, service mesh observability solutions, or meter system configurations.\nThere are some special cases, especially when REST-style URI is used, where the application codes include the parameter in the endpoint name, such as putting order ID in the URI. Examples are /prod/ORDER123 and /prod/ORDER456. But logically, most would expect to have an endpoint name like prod/{order-id}. This is a specially designed feature in parameterized endpoint grouping.\nIf the incoming endpoint name accords with the rules, SkyWalking will group the endpoint by rules.\nThere are two approaches in which SkyWalking supports endpoint grouping:\n Endpoint name grouping by OpenAPI definitions. Endpoint name grouping by custom configurations.  Both grouping approaches can work together in sequence.\nEndpoint name grouping by OpenAPI definitions The OpenAPI definitions are documents based on the OpenAPI Specification (OAS), which is used to define a standard, language-agnostic interface for HTTP APIs.\nSkyWalking now supports OAS v2.0+. It could parse the documents (yaml) and build grouping rules from them automatically.\nHow to use   Add Specification Extensions for SkyWalking config in the OpenAPI definition documents; otherwise, all configs are default:\n${METHOD} is a reserved placeholder which represents the HTTP method, e.g. POST/GET... . ${PATH} is a reserved placeholder which represents the path, e.g. /products/{id}.\n   Extension Name Required Description Default Value     x-sw-service-name false The service name to which these endpoints belong. The directory name to which the OpenAPI definition documents belong.   x-sw-endpoint-name-match-rule false The rule used to match the endpoint. ${METHOD}:${PATH}   x-sw-endpoint-name-format false The endpoint name after grouping. ${METHOD}:${PATH}    These extensions are under OpenAPI Object. For example, the document below has a full custom config:\n  openapi:3.0.0x-sw-service-name:serviceBx-sw-endpoint-name-match-rule:\u0026#34;${METHOD}:${PATH}\u0026#34;x-sw-endpoint-name-format:\u0026#34;${METHOD}:${PATH}\u0026#34;info:description:OpenAPI definition for SkyWalking test.version:v2title:Product API...We highly recommend using the default config. The custom config (x-sw-endpoint-name-match-rule/x-sw-endpoint-name-format) is considered part of the match rules (regex pattern). We have provided some use cases in org.apache.skywalking.oap.server.core.config.group.openapi.EndpointGroupingRuleReader4OpenapiTest. You may validate your custom config as well.\nAll OpenAPI definition documents are located in the openapi-definitions directory, with directories having at most two levels. We recommend using the service name as the subDirectory name, as you will then not be required to set x-sw-service-name. For example:  ├── openapi-definitions │ ├── serviceA │ │ ├── customerAPI-v1.yaml │ │ └── productAPI-v1.yaml │ └── serviceB │ └── productAPI-v2.yaml The feature is enabled by default. You can disable it by setting the Core Module configuration ${SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI:false}.  Rules match priority We recommend designing the API path as clearly as possible. If the API path is fuzzy and an endpoint name matches multiple paths, SkyWalking would select a path according to the match priority set out below:\n The exact path is matched. E.g. /products or /products/inventory The path with fewer variables. E.g. In the case of /products/{var1}/{var2} and /products/{var1}/abc, endpoint name /products/123/abc will match the second one. If the paths have the same number of variables, the longest path is matched, and the vars are considered to be 1. E.g. In the case of /products/abc/{var1} and products/{var12345}/ef, endpoint name /products/abc/ef will match the first one, because length(\u0026quot;abc\u0026quot;) = 3 is larger than length(\u0026quot;ef\u0026quot;) = 2.  Examples If we have an OpenAPI definition doc productAPI-v2.yaml in directory serviceB, it will look like this:\nopenapi:3.0.0info:description:OpenAPI definition for SkyWalking test.version:v2title:Product APItags:- name:productdescription:product- name:relatedProductsdescription:Related Productspaths:/products:get:tags:- productsummary:Get all products listdescription:Get all products list.operationId:getProductsresponses:\u0026#34;200\u0026#34;:description:Successcontent:application/json:schema:type:arrayitems:$ref:\u0026#34;#/components/schemas/Product\u0026#34;/products/{region}/{country}:get:tags:- productsummary:Get products regionaldescription:Get products regional with the given id.operationId:getProductRegionalparameters:- name:regionin:pathdescription:Products regionrequired:trueschema:type:string- name:countryin:pathdescription:Products countryrequired:trueschema:type:stringresponses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/Product\u0026#34;\u0026#34;400\u0026#34;:description:Invalid parameters supplied/products/{id}:get:tags:- productsummary:Get product detailsdescription:Get product details with the given id.operationId:getProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/ProductDetails\u0026#34;\u0026#34;400\u0026#34;:description:Invalid product idpost:tags:- productsummary:Update product detailsdescription:Update product details with the given id.operationId:updateProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64- name:namein:querydescription:Product namerequired:trueschema:type:stringresponses:\u0026#34;200\u0026#34;:description:successful operationdelete:tags:- productsummary:Delete product detailsdescription:Delete product details with the given id.operationId:deleteProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operation/products/{id}/relatedProducts:get:tags:- relatedProductssummary:Get related productsdescription:Get related products with the given product id.operationId:getRelatedProductsparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/RelatedProducts\u0026#34;\u0026#34;400\u0026#34;:description:Invalid product idcomponents:schemas:Product:type:objectdescription:Product id and nameproperties:id:type:integerformat:int64description:Product idname:type:stringdescription:Product namerequired:- id- nameProductDetails:type:objectdescription:Product detailsproperties:id:type:integerformat:int64description:Product idname:type:stringdescription:Product namedescription:type:stringdescription:Product descriptionrequired:- id- nameRelatedProducts:type:objectdescription:Related Productsproperties:id:type:integerformat:int32description:Product idrelatedProducts:type:arraydescription:List of related productsitems:$ref:\u0026#34;#/components/schemas/Product\u0026#34;Here are some use cases:\n   Incoming Endpoint Incoming Service x-sw-service-name x-sw-endpoint-name-match-rule x-sw-endpoint-name-format Matched Grouping Result     GET:/products serviceB default default default true GET:/products   GET:/products/asia/cn serviceB default default default true GET:/products/{region}/{country}   GET:/products/123 serviceB default default default true GET:/products{id}   GET:/products/123/abc/efg serviceB default default default false GET:/products/123/abc/efg   \u0026lt;GET\u0026gt;:/products/123 serviceB default default default false \u0026lt;GET\u0026gt;:/products/123   GET:/products/123 serviceC default default default false GET:/products/123   GET:/products/123 serviceC serviceC default default true GET:/products/123   \u0026lt;GET\u0026gt;:/products/123 serviceB default \u0026lt;${METHOD}\u0026gt;:${PATH} \u0026lt;${METHOD}\u0026gt;:${PATH} true \u0026lt;GET\u0026gt;:/products/{id}   GET:/products/123 serviceB default default ${PATH}:\u0026lt;${METHOD}\u0026gt; true /products/{id}:\u0026lt;GET\u0026gt;   /products/123:\u0026lt;GET\u0026gt; serviceB default ${PATH}:\u0026lt;${METHOD}\u0026gt; default true GET:/products/{id}    Initialize and update the OpenAPI definitions dynamically Use Dynamic Configuration to initialize and update OpenAPI definitions, the endpoint grouping rules from OpenAPI will re-create by the new config.\nEndpoint name grouping by custom configuration Currently, a user could set up grouping rules through the static YAML file named endpoint-name-grouping.yml, or use Dynamic Configuration to initialize and update endpoint grouping rules.\nConfiguration Format Both the static local file and dynamic configuration value share the same YAML format.\ngrouping:# Endpoint of the service would follow the following rules- service-name:serviceArules:# {var} represents any variable string in the URI.- /prod/{var}","title":"Group Parameterized Endpoints","url":"/docs/main/v9.7.0/en/setup/backend/endpoint-grouping-rules/"},{"content":"gRPC SSL transportation support for OAP server For OAP communication, we are currently using gRPC, a multi-platform RPC framework that uses protocol buffers for message serialization. The nice part about gRPC is that it promotes the use of SSL/TLS to authenticate and encrypt exchanges. Now OAP supports enabling SSL transportation for gRPC receivers. Since 8.8.0, OAP supports enabling mutual TLS authentication between probes and OAP servers.\nTo enable this feature, follow the steps below.\nPreparation By default, the communication between OAP nodes and the communication between receiver and probe share the same gRPC server. Its configuration is in application.yml/core/default section.\nThe advanced gRPC receiver is only for communication with the probes. This configuration is in application.yml/receiver-sharing-server/default section.\nThe first step is to generate certificates and private key files for encrypting communication.\nCreating SSL/TLS Certificates The first step is to generate certificates and key files for encrypting communication. This is fairly straightforward: use openssl from the command line.\nUse this script if you are not familiar with how to generate key files.\nWe need the following files:\n ca.crt: A certificate authority public key for a client to validate the server\u0026rsquo;s certificate. server.pem, client.pem: A private RSA key to sign and authenticate the public key. It\u0026rsquo;s either a PKCS#8(PEM) or PKCS#1(DER). server.crt, client.crt: Self-signed X.509 public keys for distribution.  TLS on OAP servers By default, the communication between OAP nodes and the communication between receiver and probe share the same gRPC server. That means once you enable SSL for receivers and probes, the OAP nodes will enable it too.\nNOTE: SkyWalking does not support enabling mTLS on OAP server nodes communication. That means you have to enable receiver-sharing-server for enabling mTLS on communication between probes and OAP servers. More details see Enable mTLS mode on gRPC receiver.\nYou can enable gRPC SSL by adding the following lines to application.yml/core/default.\ngRPCSslEnabled:truegRPCSslKeyPath:/path/to/server.pemgRPCSslCertChainPath:/path/to/server.crtgRPCSslTrustedCAPath:/path/to/ca.crtgRPCSslKeyPath and gRPCSslCertChainPath are loaded by the OAP server to encrypt communication. gRPCSslTrustedCAPath helps the gRPC client to verify server certificates in cluster mode.\n There is a gRPC client and server in every OAP server node. The gRPC client communicates with OAP servers in cluster mode. They are sharing the core module configuration.\n When new files are in place, they can be loaded dynamically, and you won\u0026rsquo;t have to restart an OAP instance.\nEnable TLS on independent gRPC receiver If you enable receiver-sharing-server to ingest data from an external source, add the following lines to application.yml/receiver-sharing-server/default:\ngRPCPort:${SW_RECEIVER_GRPC_PORT:\u0026#34;changeMe\u0026#34;}gRPCSslEnabled:truegRPCSslKeyPath:/path/to/server.pemgRPCSslCertChainPath:/path/to/server.crtSince receiver-sharing-server only receives data from an external source, it doesn\u0026rsquo;t need a CA at all. But you have to configure the CA for the clients, such as Java agent, Satellite. If you port to the Java agent, refer to the Java agent repo to configure the Java agent and enable TLS.\nNOTE: change the SW_RECEIVER_GRPC_PORT as non-zero to enable receiver-sharing-server. And the port is open for the clients.\nEnable mTLS mode on gRPC receiver Since 8.8.0, SkyWalking has supported mutual TLS authentication for transporting between clients and OAP servers. Enable mTLS mode for the gRPC channel requires Sharing gRPC Server enabled, as the following configuration.\nreceiver-sharing-server:selector:${SW_RECEIVER_SHARING_SERVER:default}default:# For gRPC servergRPCHost:${SW_RECEIVER_GRPC_HOST:0.0.0.0}gRPCPort:${SW_RECEIVER_GRPC_PORT:\u0026#34;changeMe\u0026#34;}maxConcurrentCallsPerConnection:${SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL:0}maxMessageSize:${SW_RECEIVER_GRPC_MAX_MESSAGE_SIZE:0}gRPCThreadPoolQueueSize:${SW_RECEIVER_GRPC_POOL_QUEUE_SIZE:0}gRPCThreadPoolSize:${SW_RECEIVER_GRPC_THREAD_POOL_SIZE:0}gRPCSslEnabled:${SW_RECEIVER_GRPC_SSL_ENABLED:true}gRPCSslKeyPath:${SW_RECEIVER_GRPC_SSL_KEY_PATH:\u0026#34;/path/to/server.pem\u0026#34;}gRPCSslCertChainPath:${SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH:\u0026#34;/path/to/server.crt\u0026#34;}gRPCSslTrustedCAsPath:${SW_RECEIVER_GRPC_SSL_TRUSTED_CAS_PATH:\u0026#34;/path/to/ca.crt\u0026#34;}authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}You can still use this script to generate CA certificate and the key files of server-side(for OAP Server) and client-side(for Agent/Satellite). You have to notice the keys, including server and client-side, are from the same CA certificate.\n","title":"gRPC SSL transportation support for OAP server","url":"/docs/main/latest/en/setup/backend/grpc-security/"},{"content":"gRPC SSL transportation support for OAP server For OAP communication, we are currently using gRPC, a multi-platform RPC framework that uses protocol buffers for message serialization. The nice part about gRPC is that it promotes the use of SSL/TLS to authenticate and encrypt exchanges. Now OAP supports enabling SSL transportation for gRPC receivers. Since 8.8.0, OAP supports enabling mutual TLS authentication between probes and OAP servers.\nTo enable this feature, follow the steps below.\nPreparation By default, the communication between OAP nodes and the communication between receiver and probe share the same gRPC server. Its configuration is in application.yml/core/default section.\nThe advanced gRPC receiver is only for communication with the probes. This configuration is in application.yml/receiver-sharing-server/default section.\nThe first step is to generate certificates and private key files for encrypting communication.\nCreating SSL/TLS Certificates The first step is to generate certificates and key files for encrypting communication. This is fairly straightforward: use openssl from the command line.\nUse this script if you are not familiar with how to generate key files.\nWe need the following files:\n ca.crt: A certificate authority public key for a client to validate the server\u0026rsquo;s certificate. server.pem, client.pem: A private RSA key to sign and authenticate the public key. It\u0026rsquo;s either a PKCS#8(PEM) or PKCS#1(DER). server.crt, client.crt: Self-signed X.509 public keys for distribution.  TLS on OAP servers By default, the communication between OAP nodes and the communication between receiver and probe share the same gRPC server. That means once you enable SSL for receivers and probes, the OAP nodes will enable it too.\nNOTE: SkyWalking does not support enabling mTLS on OAP server nodes communication. That means you have to enable receiver-sharing-server for enabling mTLS on communication between probes and OAP servers. More details see Enable mTLS mode on gRPC receiver.\nYou can enable gRPC SSL by adding the following lines to application.yml/core/default.\ngRPCSslEnabled:truegRPCSslKeyPath:/path/to/server.pemgRPCSslCertChainPath:/path/to/server.crtgRPCSslTrustedCAPath:/path/to/ca.crtgRPCSslKeyPath and gRPCSslCertChainPath are loaded by the OAP server to encrypt communication. gRPCSslTrustedCAPath helps the gRPC client to verify server certificates in cluster mode.\n There is a gRPC client and server in every OAP server node. The gRPC client communicates with OAP servers in cluster mode. They are sharing the core module configuration.\n When new files are in place, they can be loaded dynamically, and you won\u0026rsquo;t have to restart an OAP instance.\nEnable TLS on independent gRPC receiver If you enable receiver-sharing-server to ingest data from an external source, add the following lines to application.yml/receiver-sharing-server/default:\ngRPCPort:${SW_RECEIVER_GRPC_PORT:\u0026#34;changeMe\u0026#34;}gRPCSslEnabled:truegRPCSslKeyPath:/path/to/server.pemgRPCSslCertChainPath:/path/to/server.crtSince receiver-sharing-server only receives data from an external source, it doesn\u0026rsquo;t need a CA at all. But you have to configure the CA for the clients, such as Java agent, Satellite. If you port to the Java agent, refer to the Java agent repo to configure the Java agent and enable TLS.\nNOTE: change the SW_RECEIVER_GRPC_PORT as non-zero to enable receiver-sharing-server. And the port is open for the clients.\nEnable mTLS mode on gRPC receiver Since 8.8.0, SkyWalking has supported mutual TLS authentication for transporting between clients and OAP servers. Enable mTLS mode for the gRPC channel requires Sharing gRPC Server enabled, as the following configuration.\nreceiver-sharing-server:selector:${SW_RECEIVER_SHARING_SERVER:default}default:# For gRPC servergRPCHost:${SW_RECEIVER_GRPC_HOST:0.0.0.0}gRPCPort:${SW_RECEIVER_GRPC_PORT:\u0026#34;changeMe\u0026#34;}maxConcurrentCallsPerConnection:${SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL:0}maxMessageSize:${SW_RECEIVER_GRPC_MAX_MESSAGE_SIZE:0}gRPCThreadPoolSize:${SW_RECEIVER_GRPC_THREAD_POOL_SIZE:0}gRPCSslEnabled:${SW_RECEIVER_GRPC_SSL_ENABLED:true}gRPCSslKeyPath:${SW_RECEIVER_GRPC_SSL_KEY_PATH:\u0026#34;/path/to/server.pem\u0026#34;}gRPCSslCertChainPath:${SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH:\u0026#34;/path/to/server.crt\u0026#34;}gRPCSslTrustedCAsPath:${SW_RECEIVER_GRPC_SSL_TRUSTED_CAS_PATH:\u0026#34;/path/to/ca.crt\u0026#34;}authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}You can still use this script to generate CA certificate and the key files of server-side(for OAP Server) and client-side(for Agent/Satellite). You have to notice the keys, including server and client-side, are from the same CA certificate.\n","title":"gRPC SSL transportation support for OAP server","url":"/docs/main/next/en/setup/backend/grpc-security/"},{"content":"gRPC SSL transportation support for OAP server For OAP communication, we are currently using gRPC, a multi-platform RPC framework that uses protocol buffers for message serialization. The nice part about gRPC is that it promotes the use of SSL/TLS to authenticate and encrypt exchanges. Now OAP supports enabling SSL transportation for gRPC receivers. Since 8.8.0, OAP supports enabling mutual TLS authentication between probes and OAP servers.\nTo enable this feature, follow the steps below.\nPreparation By default, the communication between OAP nodes and the communication between receiver and probe share the same gRPC server. Its configuration is in application.yml/core/default section.\nThe advanced gRPC receiver is only for communication with the probes. This configuration is in application.yml/receiver-sharing-server/default section.\nThe first step is to generate certificates and private key files for encrypting communication.\nCreating SSL/TLS Certificates The first step is to generate certificates and key files for encrypting communication. This is fairly straightforward: use openssl from the command line.\nUse this script if you are not familiar with how to generate key files.\nWe need the following files:\n ca.crt: A certificate authority public key for a client to validate the server\u0026rsquo;s certificate. server.pem, client.pem: A private RSA key to sign and authenticate the public key. It\u0026rsquo;s either a PKCS#8(PEM) or PKCS#1(DER). server.crt, client.crt: Self-signed X.509 public keys for distribution.  TLS on OAP servers By default, the communication between OAP nodes and the communication between receiver and probe share the same gRPC server. That means once you enabling SSL for receivers and probes, the OAP nodes will enable it too.\nNOTE: SkyWalking does not support to enable mTLS on OAP server nodes communication. That means you have to enable receiver-sharing-server for enabling mTLS on communication between probes ang OAP servers. More details see Enable mTLS mode on gRPC receiver.\nYou can enable gRPC SSL by adding the following lines to application.yml/core/default.\ngRPCSslEnabled:truegRPCSslKeyPath:/path/to/server.pemgRPCSslCertChainPath:/path/to/server.crtgRPCSslTrustedCAPath:/path/to/ca.crtgRPCSslKeyPath and gRPCSslCertChainPath are loaded by the OAP server to encrypt communication. gRPCSslTrustedCAPath helps the gRPC client to verify server certificates in cluster mode.\n There is a gRPC client and server in every OAP server node. The gRPC client comunicates with OAP servers in cluster mode. They are sharing the core module configuration.\n When new files are in place, they can be loaded dynamically, and you won\u0026rsquo;t have to restart an OAP instance.\nEnable TLS on independent gRPC receiver If you enable receiver-sharing-server to ingest data from an external source, add the following lines to application.yml/receiver-sharing-server/default:\ngRPCPort:${SW_RECEIVER_GRPC_PORT:\u0026#34;changeMe\u0026#34;}gRPCSslEnabled:truegRPCSslKeyPath:/path/to/server.pemgRPCSslCertChainPath:/path/to/server.crtSince recevier-sharing-server only receives data from an external source, it doesn\u0026rsquo;t need a CA at all. But you have to configure the CA for the clients, such as Java agent, Satellite. If you port to Java agent, refer to the Java agent repo to configure java agent and enable TLS.\nNOTE: change the SW_RECEIVER_GRPC_PORT as non-zore to enable receiver-sharing-server. And the port is open for the clients.\nEnable mTLS mode on gRPC receiver Since 8.8.0, SkyWalking supports enable mutual TLS authentication for transporting between clients and OAP servers. To enable mTLS mode for gRPC channel requires Sharing gRPC Server enabled, as the following configuration.\nreceiver-sharing-server:selector:${SW_RECEIVER_SHARING_SERVER:default}default:# For gRPC servergRPCHost:${SW_RECEIVER_GRPC_HOST:0.0.0.0}gRPCPort:${SW_RECEIVER_GRPC_PORT:\u0026#34;changeMe\u0026#34;}maxConcurrentCallsPerConnection:${SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL:0}maxMessageSize:${SW_RECEIVER_GRPC_MAX_MESSAGE_SIZE:0}gRPCThreadPoolQueueSize:${SW_RECEIVER_GRPC_POOL_QUEUE_SIZE:0}gRPCThreadPoolSize:${SW_RECEIVER_GRPC_THREAD_POOL_SIZE:0}gRPCSslEnabled:${SW_RECEIVER_GRPC_SSL_ENABLED:true}gRPCSslKeyPath:${SW_RECEIVER_GRPC_SSL_KEY_PATH:\u0026#34;/path/to/server.pem\u0026#34;}gRPCSslCertChainPath:${SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH:\u0026#34;/path/to/server.crt\u0026#34;}gRPCSslTrustedCAsPath:${SW_RECEIVER_GRPC_SSL_TRUSTED_CAS_PATH:\u0026#34;/path/to/ca.crt\u0026#34;}authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}You can still use this script to generate CA certificate and the key files of server-side(for OAP Server) and client-side(for Agent/Satellite). You have to notice the keys, including server and client-side, are from the same CA certificate.\n","title":"gRPC SSL transportation support for OAP server","url":"/docs/main/v9.0.0/en/setup/backend/grpc-security/"},{"content":"gRPC SSL transportation support for OAP server For OAP communication, we are currently using gRPC, a multi-platform RPC framework that uses protocol buffers for message serialization. The nice part about gRPC is that it promotes the use of SSL/TLS to authenticate and encrypt exchanges. Now OAP supports enabling SSL transportation for gRPC receivers. Since 8.8.0, OAP supports enabling mutual TLS authentication between probes and OAP servers.\nTo enable this feature, follow the steps below.\nPreparation By default, the communication between OAP nodes and the communication between receiver and probe share the same gRPC server. Its configuration is in application.yml/core/default section.\nThe advanced gRPC receiver is only for communication with the probes. This configuration is in application.yml/receiver-sharing-server/default section.\nThe first step is to generate certificates and private key files for encrypting communication.\nCreating SSL/TLS Certificates The first step is to generate certificates and key files for encrypting communication. This is fairly straightforward: use openssl from the command line.\nUse this script if you are not familiar with how to generate key files.\nWe need the following files:\n ca.crt: A certificate authority public key for a client to validate the server\u0026rsquo;s certificate. server.pem, client.pem: A private RSA key to sign and authenticate the public key. It\u0026rsquo;s either a PKCS#8(PEM) or PKCS#1(DER). server.crt, client.crt: Self-signed X.509 public keys for distribution.  TLS on OAP servers By default, the communication between OAP nodes and the communication between receiver and probe share the same gRPC server. That means once you enable SSL for receivers and probes, the OAP nodes will enable it too.\nNOTE: SkyWalking does not support enabling mTLS on OAP server nodes communication. That means you have to enable receiver-sharing-server for enabling mTLS on communication between probes and OAP servers. More details see Enable mTLS mode on gRPC receiver.\nYou can enable gRPC SSL by adding the following lines to application.yml/core/default.\ngRPCSslEnabled:truegRPCSslKeyPath:/path/to/server.pemgRPCSslCertChainPath:/path/to/server.crtgRPCSslTrustedCAPath:/path/to/ca.crtgRPCSslKeyPath and gRPCSslCertChainPath are loaded by the OAP server to encrypt communication. gRPCSslTrustedCAPath helps the gRPC client to verify server certificates in cluster mode.\n There is a gRPC client and server in every OAP server node. The gRPC client communicates with OAP servers in cluster mode. They are sharing the core module configuration.\n When new files are in place, they can be loaded dynamically, and you won\u0026rsquo;t have to restart an OAP instance.\nEnable TLS on independent gRPC receiver If you enable receiver-sharing-server to ingest data from an external source, add the following lines to application.yml/receiver-sharing-server/default:\ngRPCPort:${SW_RECEIVER_GRPC_PORT:\u0026#34;changeMe\u0026#34;}gRPCSslEnabled:truegRPCSslKeyPath:/path/to/server.pemgRPCSslCertChainPath:/path/to/server.crtSince receiver-sharing-server only receives data from an external source, it doesn\u0026rsquo;t need a CA at all. But you have to configure the CA for the clients, such as Java agent, Satellite. If you port to the Java agent, refer to the Java agent repo to configure the Java agent and enable TLS.\nNOTE: change the SW_RECEIVER_GRPC_PORT as non-zero to enable receiver-sharing-server. And the port is open for the clients.\nEnable mTLS mode on gRPC receiver Since 8.8.0, SkyWalking has supported mutual TLS authentication for transporting between clients and OAP servers. Enable mTLS mode for the gRPC channel requires Sharing gRPC Server enabled, as the following configuration.\nreceiver-sharing-server:selector:${SW_RECEIVER_SHARING_SERVER:default}default:# For gRPC servergRPCHost:${SW_RECEIVER_GRPC_HOST:0.0.0.0}gRPCPort:${SW_RECEIVER_GRPC_PORT:\u0026#34;changeMe\u0026#34;}maxConcurrentCallsPerConnection:${SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL:0}maxMessageSize:${SW_RECEIVER_GRPC_MAX_MESSAGE_SIZE:0}gRPCThreadPoolQueueSize:${SW_RECEIVER_GRPC_POOL_QUEUE_SIZE:0}gRPCThreadPoolSize:${SW_RECEIVER_GRPC_THREAD_POOL_SIZE:0}gRPCSslEnabled:${SW_RECEIVER_GRPC_SSL_ENABLED:true}gRPCSslKeyPath:${SW_RECEIVER_GRPC_SSL_KEY_PATH:\u0026#34;/path/to/server.pem\u0026#34;}gRPCSslCertChainPath:${SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH:\u0026#34;/path/to/server.crt\u0026#34;}gRPCSslTrustedCAsPath:${SW_RECEIVER_GRPC_SSL_TRUSTED_CAS_PATH:\u0026#34;/path/to/ca.crt\u0026#34;}authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}You can still use this script to generate CA certificate and the key files of server-side(for OAP Server) and client-side(for Agent/Satellite). You have to notice the keys, including server and client-side, are from the same CA certificate.\n","title":"gRPC SSL transportation support for OAP server","url":"/docs/main/v9.1.0/en/setup/backend/grpc-security/"},{"content":"gRPC SSL transportation support for OAP server For OAP communication, we are currently using gRPC, a multi-platform RPC framework that uses protocol buffers for message serialization. The nice part about gRPC is that it promotes the use of SSL/TLS to authenticate and encrypt exchanges. Now OAP supports enabling SSL transportation for gRPC receivers. Since 8.8.0, OAP supports enabling mutual TLS authentication between probes and OAP servers.\nTo enable this feature, follow the steps below.\nPreparation By default, the communication between OAP nodes and the communication between receiver and probe share the same gRPC server. Its configuration is in application.yml/core/default section.\nThe advanced gRPC receiver is only for communication with the probes. This configuration is in application.yml/receiver-sharing-server/default section.\nThe first step is to generate certificates and private key files for encrypting communication.\nCreating SSL/TLS Certificates The first step is to generate certificates and key files for encrypting communication. This is fairly straightforward: use openssl from the command line.\nUse this script if you are not familiar with how to generate key files.\nWe need the following files:\n ca.crt: A certificate authority public key for a client to validate the server\u0026rsquo;s certificate. server.pem, client.pem: A private RSA key to sign and authenticate the public key. It\u0026rsquo;s either a PKCS#8(PEM) or PKCS#1(DER). server.crt, client.crt: Self-signed X.509 public keys for distribution.  TLS on OAP servers By default, the communication between OAP nodes and the communication between receiver and probe share the same gRPC server. That means once you enable SSL for receivers and probes, the OAP nodes will enable it too.\nNOTE: SkyWalking does not support enabling mTLS on OAP server nodes communication. That means you have to enable receiver-sharing-server for enabling mTLS on communication between probes and OAP servers. More details see Enable mTLS mode on gRPC receiver.\nYou can enable gRPC SSL by adding the following lines to application.yml/core/default.\ngRPCSslEnabled:truegRPCSslKeyPath:/path/to/server.pemgRPCSslCertChainPath:/path/to/server.crtgRPCSslTrustedCAPath:/path/to/ca.crtgRPCSslKeyPath and gRPCSslCertChainPath are loaded by the OAP server to encrypt communication. gRPCSslTrustedCAPath helps the gRPC client to verify server certificates in cluster mode.\n There is a gRPC client and server in every OAP server node. The gRPC client communicates with OAP servers in cluster mode. They are sharing the core module configuration.\n When new files are in place, they can be loaded dynamically, and you won\u0026rsquo;t have to restart an OAP instance.\nEnable TLS on independent gRPC receiver If you enable receiver-sharing-server to ingest data from an external source, add the following lines to application.yml/receiver-sharing-server/default:\ngRPCPort:${SW_RECEIVER_GRPC_PORT:\u0026#34;changeMe\u0026#34;}gRPCSslEnabled:truegRPCSslKeyPath:/path/to/server.pemgRPCSslCertChainPath:/path/to/server.crtSince receiver-sharing-server only receives data from an external source, it doesn\u0026rsquo;t need a CA at all. But you have to configure the CA for the clients, such as Java agent, Satellite. If you port to the Java agent, refer to the Java agent repo to configure the Java agent and enable TLS.\nNOTE: change the SW_RECEIVER_GRPC_PORT as non-zero to enable receiver-sharing-server. And the port is open for the clients.\nEnable mTLS mode on gRPC receiver Since 8.8.0, SkyWalking has supported mutual TLS authentication for transporting between clients and OAP servers. Enable mTLS mode for the gRPC channel requires Sharing gRPC Server enabled, as the following configuration.\nreceiver-sharing-server:selector:${SW_RECEIVER_SHARING_SERVER:default}default:# For gRPC servergRPCHost:${SW_RECEIVER_GRPC_HOST:0.0.0.0}gRPCPort:${SW_RECEIVER_GRPC_PORT:\u0026#34;changeMe\u0026#34;}maxConcurrentCallsPerConnection:${SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL:0}maxMessageSize:${SW_RECEIVER_GRPC_MAX_MESSAGE_SIZE:0}gRPCThreadPoolQueueSize:${SW_RECEIVER_GRPC_POOL_QUEUE_SIZE:0}gRPCThreadPoolSize:${SW_RECEIVER_GRPC_THREAD_POOL_SIZE:0}gRPCSslEnabled:${SW_RECEIVER_GRPC_SSL_ENABLED:true}gRPCSslKeyPath:${SW_RECEIVER_GRPC_SSL_KEY_PATH:\u0026#34;/path/to/server.pem\u0026#34;}gRPCSslCertChainPath:${SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH:\u0026#34;/path/to/server.crt\u0026#34;}gRPCSslTrustedCAsPath:${SW_RECEIVER_GRPC_SSL_TRUSTED_CAS_PATH:\u0026#34;/path/to/ca.crt\u0026#34;}authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}You can still use this script to generate CA certificate and the key files of server-side(for OAP Server) and client-side(for Agent/Satellite). You have to notice the keys, including server and client-side, are from the same CA certificate.\n","title":"gRPC SSL transportation support for OAP server","url":"/docs/main/v9.2.0/en/setup/backend/grpc-security/"},{"content":"gRPC SSL transportation support for OAP server For OAP communication, we are currently using gRPC, a multi-platform RPC framework that uses protocol buffers for message serialization. The nice part about gRPC is that it promotes the use of SSL/TLS to authenticate and encrypt exchanges. Now OAP supports enabling SSL transportation for gRPC receivers. Since 8.8.0, OAP supports enabling mutual TLS authentication between probes and OAP servers.\nTo enable this feature, follow the steps below.\nPreparation By default, the communication between OAP nodes and the communication between receiver and probe share the same gRPC server. Its configuration is in application.yml/core/default section.\nThe advanced gRPC receiver is only for communication with the probes. This configuration is in application.yml/receiver-sharing-server/default section.\nThe first step is to generate certificates and private key files for encrypting communication.\nCreating SSL/TLS Certificates The first step is to generate certificates and key files for encrypting communication. This is fairly straightforward: use openssl from the command line.\nUse this script if you are not familiar with how to generate key files.\nWe need the following files:\n ca.crt: A certificate authority public key for a client to validate the server\u0026rsquo;s certificate. server.pem, client.pem: A private RSA key to sign and authenticate the public key. It\u0026rsquo;s either a PKCS#8(PEM) or PKCS#1(DER). server.crt, client.crt: Self-signed X.509 public keys for distribution.  TLS on OAP servers By default, the communication between OAP nodes and the communication between receiver and probe share the same gRPC server. That means once you enable SSL for receivers and probes, the OAP nodes will enable it too.\nNOTE: SkyWalking does not support enabling mTLS on OAP server nodes communication. That means you have to enable receiver-sharing-server for enabling mTLS on communication between probes and OAP servers. More details see Enable mTLS mode on gRPC receiver.\nYou can enable gRPC SSL by adding the following lines to application.yml/core/default.\ngRPCSslEnabled:truegRPCSslKeyPath:/path/to/server.pemgRPCSslCertChainPath:/path/to/server.crtgRPCSslTrustedCAPath:/path/to/ca.crtgRPCSslKeyPath and gRPCSslCertChainPath are loaded by the OAP server to encrypt communication. gRPCSslTrustedCAPath helps the gRPC client to verify server certificates in cluster mode.\n There is a gRPC client and server in every OAP server node. The gRPC client communicates with OAP servers in cluster mode. They are sharing the core module configuration.\n When new files are in place, they can be loaded dynamically, and you won\u0026rsquo;t have to restart an OAP instance.\nEnable TLS on independent gRPC receiver If you enable receiver-sharing-server to ingest data from an external source, add the following lines to application.yml/receiver-sharing-server/default:\ngRPCPort:${SW_RECEIVER_GRPC_PORT:\u0026#34;changeMe\u0026#34;}gRPCSslEnabled:truegRPCSslKeyPath:/path/to/server.pemgRPCSslCertChainPath:/path/to/server.crtSince receiver-sharing-server only receives data from an external source, it doesn\u0026rsquo;t need a CA at all. But you have to configure the CA for the clients, such as Java agent, Satellite. If you port to the Java agent, refer to the Java agent repo to configure the Java agent and enable TLS.\nNOTE: change the SW_RECEIVER_GRPC_PORT as non-zero to enable receiver-sharing-server. And the port is open for the clients.\nEnable mTLS mode on gRPC receiver Since 8.8.0, SkyWalking has supported mutual TLS authentication for transporting between clients and OAP servers. Enable mTLS mode for the gRPC channel requires Sharing gRPC Server enabled, as the following configuration.\nreceiver-sharing-server:selector:${SW_RECEIVER_SHARING_SERVER:default}default:# For gRPC servergRPCHost:${SW_RECEIVER_GRPC_HOST:0.0.0.0}gRPCPort:${SW_RECEIVER_GRPC_PORT:\u0026#34;changeMe\u0026#34;}maxConcurrentCallsPerConnection:${SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL:0}maxMessageSize:${SW_RECEIVER_GRPC_MAX_MESSAGE_SIZE:0}gRPCThreadPoolQueueSize:${SW_RECEIVER_GRPC_POOL_QUEUE_SIZE:0}gRPCThreadPoolSize:${SW_RECEIVER_GRPC_THREAD_POOL_SIZE:0}gRPCSslEnabled:${SW_RECEIVER_GRPC_SSL_ENABLED:true}gRPCSslKeyPath:${SW_RECEIVER_GRPC_SSL_KEY_PATH:\u0026#34;/path/to/server.pem\u0026#34;}gRPCSslCertChainPath:${SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH:\u0026#34;/path/to/server.crt\u0026#34;}gRPCSslTrustedCAsPath:${SW_RECEIVER_GRPC_SSL_TRUSTED_CAS_PATH:\u0026#34;/path/to/ca.crt\u0026#34;}authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}You can still use this script to generate CA certificate and the key files of server-side(for OAP Server) and client-side(for Agent/Satellite). You have to notice the keys, including server and client-side, are from the same CA certificate.\n","title":"gRPC SSL transportation support for OAP server","url":"/docs/main/v9.3.0/en/setup/backend/grpc-security/"},{"content":"gRPC SSL transportation support for OAP server For OAP communication, we are currently using gRPC, a multi-platform RPC framework that uses protocol buffers for message serialization. The nice part about gRPC is that it promotes the use of SSL/TLS to authenticate and encrypt exchanges. Now OAP supports enabling SSL transportation for gRPC receivers. Since 8.8.0, OAP supports enabling mutual TLS authentication between probes and OAP servers.\nTo enable this feature, follow the steps below.\nPreparation By default, the communication between OAP nodes and the communication between receiver and probe share the same gRPC server. Its configuration is in application.yml/core/default section.\nThe advanced gRPC receiver is only for communication with the probes. This configuration is in application.yml/receiver-sharing-server/default section.\nThe first step is to generate certificates and private key files for encrypting communication.\nCreating SSL/TLS Certificates The first step is to generate certificates and key files for encrypting communication. This is fairly straightforward: use openssl from the command line.\nUse this script if you are not familiar with how to generate key files.\nWe need the following files:\n ca.crt: A certificate authority public key for a client to validate the server\u0026rsquo;s certificate. server.pem, client.pem: A private RSA key to sign and authenticate the public key. It\u0026rsquo;s either a PKCS#8(PEM) or PKCS#1(DER). server.crt, client.crt: Self-signed X.509 public keys for distribution.  TLS on OAP servers By default, the communication between OAP nodes and the communication between receiver and probe share the same gRPC server. That means once you enable SSL for receivers and probes, the OAP nodes will enable it too.\nNOTE: SkyWalking does not support enabling mTLS on OAP server nodes communication. That means you have to enable receiver-sharing-server for enabling mTLS on communication between probes and OAP servers. More details see Enable mTLS mode on gRPC receiver.\nYou can enable gRPC SSL by adding the following lines to application.yml/core/default.\ngRPCSslEnabled:truegRPCSslKeyPath:/path/to/server.pemgRPCSslCertChainPath:/path/to/server.crtgRPCSslTrustedCAPath:/path/to/ca.crtgRPCSslKeyPath and gRPCSslCertChainPath are loaded by the OAP server to encrypt communication. gRPCSslTrustedCAPath helps the gRPC client to verify server certificates in cluster mode.\n There is a gRPC client and server in every OAP server node. The gRPC client communicates with OAP servers in cluster mode. They are sharing the core module configuration.\n When new files are in place, they can be loaded dynamically, and you won\u0026rsquo;t have to restart an OAP instance.\nEnable TLS on independent gRPC receiver If you enable receiver-sharing-server to ingest data from an external source, add the following lines to application.yml/receiver-sharing-server/default:\ngRPCPort:${SW_RECEIVER_GRPC_PORT:\u0026#34;changeMe\u0026#34;}gRPCSslEnabled:truegRPCSslKeyPath:/path/to/server.pemgRPCSslCertChainPath:/path/to/server.crtSince receiver-sharing-server only receives data from an external source, it doesn\u0026rsquo;t need a CA at all. But you have to configure the CA for the clients, such as Java agent, Satellite. If you port to the Java agent, refer to the Java agent repo to configure the Java agent and enable TLS.\nNOTE: change the SW_RECEIVER_GRPC_PORT as non-zero to enable receiver-sharing-server. And the port is open for the clients.\nEnable mTLS mode on gRPC receiver Since 8.8.0, SkyWalking has supported mutual TLS authentication for transporting between clients and OAP servers. Enable mTLS mode for the gRPC channel requires Sharing gRPC Server enabled, as the following configuration.\nreceiver-sharing-server:selector:${SW_RECEIVER_SHARING_SERVER:default}default:# For gRPC servergRPCHost:${SW_RECEIVER_GRPC_HOST:0.0.0.0}gRPCPort:${SW_RECEIVER_GRPC_PORT:\u0026#34;changeMe\u0026#34;}maxConcurrentCallsPerConnection:${SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL:0}maxMessageSize:${SW_RECEIVER_GRPC_MAX_MESSAGE_SIZE:0}gRPCThreadPoolQueueSize:${SW_RECEIVER_GRPC_POOL_QUEUE_SIZE:0}gRPCThreadPoolSize:${SW_RECEIVER_GRPC_THREAD_POOL_SIZE:0}gRPCSslEnabled:${SW_RECEIVER_GRPC_SSL_ENABLED:true}gRPCSslKeyPath:${SW_RECEIVER_GRPC_SSL_KEY_PATH:\u0026#34;/path/to/server.pem\u0026#34;}gRPCSslCertChainPath:${SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH:\u0026#34;/path/to/server.crt\u0026#34;}gRPCSslTrustedCAsPath:${SW_RECEIVER_GRPC_SSL_TRUSTED_CAS_PATH:\u0026#34;/path/to/ca.crt\u0026#34;}authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}You can still use this script to generate CA certificate and the key files of server-side(for OAP Server) and client-side(for Agent/Satellite). You have to notice the keys, including server and client-side, are from the same CA certificate.\n","title":"gRPC SSL transportation support for OAP server","url":"/docs/main/v9.4.0/en/setup/backend/grpc-security/"},{"content":"gRPC SSL transportation support for OAP server For OAP communication, we are currently using gRPC, a multi-platform RPC framework that uses protocol buffers for message serialization. The nice part about gRPC is that it promotes the use of SSL/TLS to authenticate and encrypt exchanges. Now OAP supports enabling SSL transportation for gRPC receivers. Since 8.8.0, OAP supports enabling mutual TLS authentication between probes and OAP servers.\nTo enable this feature, follow the steps below.\nPreparation By default, the communication between OAP nodes and the communication between receiver and probe share the same gRPC server. Its configuration is in application.yml/core/default section.\nThe advanced gRPC receiver is only for communication with the probes. This configuration is in application.yml/receiver-sharing-server/default section.\nThe first step is to generate certificates and private key files for encrypting communication.\nCreating SSL/TLS Certificates The first step is to generate certificates and key files for encrypting communication. This is fairly straightforward: use openssl from the command line.\nUse this script if you are not familiar with how to generate key files.\nWe need the following files:\n ca.crt: A certificate authority public key for a client to validate the server\u0026rsquo;s certificate. server.pem, client.pem: A private RSA key to sign and authenticate the public key. It\u0026rsquo;s either a PKCS#8(PEM) or PKCS#1(DER). server.crt, client.crt: Self-signed X.509 public keys for distribution.  TLS on OAP servers By default, the communication between OAP nodes and the communication between receiver and probe share the same gRPC server. That means once you enable SSL for receivers and probes, the OAP nodes will enable it too.\nNOTE: SkyWalking does not support enabling mTLS on OAP server nodes communication. That means you have to enable receiver-sharing-server for enabling mTLS on communication between probes and OAP servers. More details see Enable mTLS mode on gRPC receiver.\nYou can enable gRPC SSL by adding the following lines to application.yml/core/default.\ngRPCSslEnabled:truegRPCSslKeyPath:/path/to/server.pemgRPCSslCertChainPath:/path/to/server.crtgRPCSslTrustedCAPath:/path/to/ca.crtgRPCSslKeyPath and gRPCSslCertChainPath are loaded by the OAP server to encrypt communication. gRPCSslTrustedCAPath helps the gRPC client to verify server certificates in cluster mode.\n There is a gRPC client and server in every OAP server node. The gRPC client communicates with OAP servers in cluster mode. They are sharing the core module configuration.\n When new files are in place, they can be loaded dynamically, and you won\u0026rsquo;t have to restart an OAP instance.\nEnable TLS on independent gRPC receiver If you enable receiver-sharing-server to ingest data from an external source, add the following lines to application.yml/receiver-sharing-server/default:\ngRPCPort:${SW_RECEIVER_GRPC_PORT:\u0026#34;changeMe\u0026#34;}gRPCSslEnabled:truegRPCSslKeyPath:/path/to/server.pemgRPCSslCertChainPath:/path/to/server.crtSince receiver-sharing-server only receives data from an external source, it doesn\u0026rsquo;t need a CA at all. But you have to configure the CA for the clients, such as Java agent, Satellite. If you port to the Java agent, refer to the Java agent repo to configure the Java agent and enable TLS.\nNOTE: change the SW_RECEIVER_GRPC_PORT as non-zero to enable receiver-sharing-server. And the port is open for the clients.\nEnable mTLS mode on gRPC receiver Since 8.8.0, SkyWalking has supported mutual TLS authentication for transporting between clients and OAP servers. Enable mTLS mode for the gRPC channel requires Sharing gRPC Server enabled, as the following configuration.\nreceiver-sharing-server:selector:${SW_RECEIVER_SHARING_SERVER:default}default:# For gRPC servergRPCHost:${SW_RECEIVER_GRPC_HOST:0.0.0.0}gRPCPort:${SW_RECEIVER_GRPC_PORT:\u0026#34;changeMe\u0026#34;}maxConcurrentCallsPerConnection:${SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL:0}maxMessageSize:${SW_RECEIVER_GRPC_MAX_MESSAGE_SIZE:0}gRPCThreadPoolQueueSize:${SW_RECEIVER_GRPC_POOL_QUEUE_SIZE:0}gRPCThreadPoolSize:${SW_RECEIVER_GRPC_THREAD_POOL_SIZE:0}gRPCSslEnabled:${SW_RECEIVER_GRPC_SSL_ENABLED:true}gRPCSslKeyPath:${SW_RECEIVER_GRPC_SSL_KEY_PATH:\u0026#34;/path/to/server.pem\u0026#34;}gRPCSslCertChainPath:${SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH:\u0026#34;/path/to/server.crt\u0026#34;}gRPCSslTrustedCAsPath:${SW_RECEIVER_GRPC_SSL_TRUSTED_CAS_PATH:\u0026#34;/path/to/ca.crt\u0026#34;}authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}You can still use this script to generate CA certificate and the key files of server-side(for OAP Server) and client-side(for Agent/Satellite). You have to notice the keys, including server and client-side, are from the same CA certificate.\n","title":"gRPC SSL transportation support for OAP server","url":"/docs/main/v9.5.0/en/setup/backend/grpc-security/"},{"content":"gRPC SSL transportation support for OAP server For OAP communication, we are currently using gRPC, a multi-platform RPC framework that uses protocol buffers for message serialization. The nice part about gRPC is that it promotes the use of SSL/TLS to authenticate and encrypt exchanges. Now OAP supports enabling SSL transportation for gRPC receivers. Since 8.8.0, OAP supports enabling mutual TLS authentication between probes and OAP servers.\nTo enable this feature, follow the steps below.\nPreparation By default, the communication between OAP nodes and the communication between receiver and probe share the same gRPC server. Its configuration is in application.yml/core/default section.\nThe advanced gRPC receiver is only for communication with the probes. This configuration is in application.yml/receiver-sharing-server/default section.\nThe first step is to generate certificates and private key files for encrypting communication.\nCreating SSL/TLS Certificates The first step is to generate certificates and key files for encrypting communication. This is fairly straightforward: use openssl from the command line.\nUse this script if you are not familiar with how to generate key files.\nWe need the following files:\n ca.crt: A certificate authority public key for a client to validate the server\u0026rsquo;s certificate. server.pem, client.pem: A private RSA key to sign and authenticate the public key. It\u0026rsquo;s either a PKCS#8(PEM) or PKCS#1(DER). server.crt, client.crt: Self-signed X.509 public keys for distribution.  TLS on OAP servers By default, the communication between OAP nodes and the communication between receiver and probe share the same gRPC server. That means once you enable SSL for receivers and probes, the OAP nodes will enable it too.\nNOTE: SkyWalking does not support enabling mTLS on OAP server nodes communication. That means you have to enable receiver-sharing-server for enabling mTLS on communication between probes and OAP servers. More details see Enable mTLS mode on gRPC receiver.\nYou can enable gRPC SSL by adding the following lines to application.yml/core/default.\ngRPCSslEnabled:truegRPCSslKeyPath:/path/to/server.pemgRPCSslCertChainPath:/path/to/server.crtgRPCSslTrustedCAPath:/path/to/ca.crtgRPCSslKeyPath and gRPCSslCertChainPath are loaded by the OAP server to encrypt communication. gRPCSslTrustedCAPath helps the gRPC client to verify server certificates in cluster mode.\n There is a gRPC client and server in every OAP server node. The gRPC client communicates with OAP servers in cluster mode. They are sharing the core module configuration.\n When new files are in place, they can be loaded dynamically, and you won\u0026rsquo;t have to restart an OAP instance.\nEnable TLS on independent gRPC receiver If you enable receiver-sharing-server to ingest data from an external source, add the following lines to application.yml/receiver-sharing-server/default:\ngRPCPort:${SW_RECEIVER_GRPC_PORT:\u0026#34;changeMe\u0026#34;}gRPCSslEnabled:truegRPCSslKeyPath:/path/to/server.pemgRPCSslCertChainPath:/path/to/server.crtSince receiver-sharing-server only receives data from an external source, it doesn\u0026rsquo;t need a CA at all. But you have to configure the CA for the clients, such as Java agent, Satellite. If you port to the Java agent, refer to the Java agent repo to configure the Java agent and enable TLS.\nNOTE: change the SW_RECEIVER_GRPC_PORT as non-zero to enable receiver-sharing-server. And the port is open for the clients.\nEnable mTLS mode on gRPC receiver Since 8.8.0, SkyWalking has supported mutual TLS authentication for transporting between clients and OAP servers. Enable mTLS mode for the gRPC channel requires Sharing gRPC Server enabled, as the following configuration.\nreceiver-sharing-server:selector:${SW_RECEIVER_SHARING_SERVER:default}default:# For gRPC servergRPCHost:${SW_RECEIVER_GRPC_HOST:0.0.0.0}gRPCPort:${SW_RECEIVER_GRPC_PORT:\u0026#34;changeMe\u0026#34;}maxConcurrentCallsPerConnection:${SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL:0}maxMessageSize:${SW_RECEIVER_GRPC_MAX_MESSAGE_SIZE:0}gRPCThreadPoolQueueSize:${SW_RECEIVER_GRPC_POOL_QUEUE_SIZE:0}gRPCThreadPoolSize:${SW_RECEIVER_GRPC_THREAD_POOL_SIZE:0}gRPCSslEnabled:${SW_RECEIVER_GRPC_SSL_ENABLED:true}gRPCSslKeyPath:${SW_RECEIVER_GRPC_SSL_KEY_PATH:\u0026#34;/path/to/server.pem\u0026#34;}gRPCSslCertChainPath:${SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH:\u0026#34;/path/to/server.crt\u0026#34;}gRPCSslTrustedCAsPath:${SW_RECEIVER_GRPC_SSL_TRUSTED_CAS_PATH:\u0026#34;/path/to/ca.crt\u0026#34;}authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}You can still use this script to generate CA certificate and the key files of server-side(for OAP Server) and client-side(for Agent/Satellite). You have to notice the keys, including server and client-side, are from the same CA certificate.\n","title":"gRPC SSL transportation support for OAP server","url":"/docs/main/v9.6.0/en/setup/backend/grpc-security/"},{"content":"gRPC SSL transportation support for OAP server For OAP communication, we are currently using gRPC, a multi-platform RPC framework that uses protocol buffers for message serialization. The nice part about gRPC is that it promotes the use of SSL/TLS to authenticate and encrypt exchanges. Now OAP supports enabling SSL transportation for gRPC receivers. Since 8.8.0, OAP supports enabling mutual TLS authentication between probes and OAP servers.\nTo enable this feature, follow the steps below.\nPreparation By default, the communication between OAP nodes and the communication between receiver and probe share the same gRPC server. Its configuration is in application.yml/core/default section.\nThe advanced gRPC receiver is only for communication with the probes. This configuration is in application.yml/receiver-sharing-server/default section.\nThe first step is to generate certificates and private key files for encrypting communication.\nCreating SSL/TLS Certificates The first step is to generate certificates and key files for encrypting communication. This is fairly straightforward: use openssl from the command line.\nUse this script if you are not familiar with how to generate key files.\nWe need the following files:\n ca.crt: A certificate authority public key for a client to validate the server\u0026rsquo;s certificate. server.pem, client.pem: A private RSA key to sign and authenticate the public key. It\u0026rsquo;s either a PKCS#8(PEM) or PKCS#1(DER). server.crt, client.crt: Self-signed X.509 public keys for distribution.  TLS on OAP servers By default, the communication between OAP nodes and the communication between receiver and probe share the same gRPC server. That means once you enable SSL for receivers and probes, the OAP nodes will enable it too.\nNOTE: SkyWalking does not support enabling mTLS on OAP server nodes communication. That means you have to enable receiver-sharing-server for enabling mTLS on communication between probes and OAP servers. More details see Enable mTLS mode on gRPC receiver.\nYou can enable gRPC SSL by adding the following lines to application.yml/core/default.\ngRPCSslEnabled:truegRPCSslKeyPath:/path/to/server.pemgRPCSslCertChainPath:/path/to/server.crtgRPCSslTrustedCAPath:/path/to/ca.crtgRPCSslKeyPath and gRPCSslCertChainPath are loaded by the OAP server to encrypt communication. gRPCSslTrustedCAPath helps the gRPC client to verify server certificates in cluster mode.\n There is a gRPC client and server in every OAP server node. The gRPC client communicates with OAP servers in cluster mode. They are sharing the core module configuration.\n When new files are in place, they can be loaded dynamically, and you won\u0026rsquo;t have to restart an OAP instance.\nEnable TLS on independent gRPC receiver If you enable receiver-sharing-server to ingest data from an external source, add the following lines to application.yml/receiver-sharing-server/default:\ngRPCPort:${SW_RECEIVER_GRPC_PORT:\u0026#34;changeMe\u0026#34;}gRPCSslEnabled:truegRPCSslKeyPath:/path/to/server.pemgRPCSslCertChainPath:/path/to/server.crtSince receiver-sharing-server only receives data from an external source, it doesn\u0026rsquo;t need a CA at all. But you have to configure the CA for the clients, such as Java agent, Satellite. If you port to the Java agent, refer to the Java agent repo to configure the Java agent and enable TLS.\nNOTE: change the SW_RECEIVER_GRPC_PORT as non-zero to enable receiver-sharing-server. And the port is open for the clients.\nEnable mTLS mode on gRPC receiver Since 8.8.0, SkyWalking has supported mutual TLS authentication for transporting between clients and OAP servers. Enable mTLS mode for the gRPC channel requires Sharing gRPC Server enabled, as the following configuration.\nreceiver-sharing-server:selector:${SW_RECEIVER_SHARING_SERVER:default}default:# For gRPC servergRPCHost:${SW_RECEIVER_GRPC_HOST:0.0.0.0}gRPCPort:${SW_RECEIVER_GRPC_PORT:\u0026#34;changeMe\u0026#34;}maxConcurrentCallsPerConnection:${SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL:0}maxMessageSize:${SW_RECEIVER_GRPC_MAX_MESSAGE_SIZE:0}gRPCThreadPoolQueueSize:${SW_RECEIVER_GRPC_POOL_QUEUE_SIZE:0}gRPCThreadPoolSize:${SW_RECEIVER_GRPC_THREAD_POOL_SIZE:0}gRPCSslEnabled:${SW_RECEIVER_GRPC_SSL_ENABLED:true}gRPCSslKeyPath:${SW_RECEIVER_GRPC_SSL_KEY_PATH:\u0026#34;/path/to/server.pem\u0026#34;}gRPCSslCertChainPath:${SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH:\u0026#34;/path/to/server.crt\u0026#34;}gRPCSslTrustedCAsPath:${SW_RECEIVER_GRPC_SSL_TRUSTED_CAS_PATH:\u0026#34;/path/to/ca.crt\u0026#34;}authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}You can still use this script to generate CA certificate and the key files of server-side(for OAP Server) and client-side(for Agent/Satellite). You have to notice the keys, including server and client-side, are from the same CA certificate.\n","title":"gRPC SSL transportation support for OAP server","url":"/docs/main/v9.7.0/en/setup/backend/grpc-security/"},{"content":"Guide  This section explains how to manage translations for internationalization of menu items.\n SkyWalking UI\u0026rsquo;s internationalization translations are in the src/locales/lang. The translations include menu name and description. The translation key of menu name is the value of i18nKey from menu definition file. The translation key of description consists of the i18nKey value and _desc suffix. The description contents will be displayed on the Marketplace page.\nThe following is a typical menu name and description for i18nKey=general_service\n{ \u0026#34;general_service\u0026#34;: \u0026#34;General Service\u0026#34;, \u0026#34;general_service_desc\u0026#34;: \u0026#34;Observe services and relative direct dependencies through telemetry data collected from SkyWalking Agents.\u0026#34; } ","title":"Guide","url":"/docs/main/latest/en/guides/i18n/"},{"content":"Guide  This section explains how to manage translations for internationalization of menu items.\n SkyWalking UI\u0026rsquo;s internationalization translations are in the src/locales/lang. The translations include menu name and description. The translation key of menu name is the value of i18nKey from menu definition file. The translation key of description consists of the i18nKey value and _desc suffix. The description contents will be displayed on the Marketplace page.\nThe following is a typical menu name and description for i18nKey=general_service\n{ \u0026#34;general_service\u0026#34;: \u0026#34;General Service\u0026#34;, \u0026#34;general_service_desc\u0026#34;: \u0026#34;Observe services and relative direct dependencies through telemetry data collected from SkyWalking Agents.\u0026#34; } ","title":"Guide","url":"/docs/main/next/en/guides/i18n/"},{"content":"Guide  This section explains how to manage translations for internationalization of menu items.\n SkyWalking UI\u0026rsquo;s internationalization translations are in the src/locales/lang. The translations include menu name and description. The translation key of menu name is the value of i18nKey from menu definition file. The translation key of description consists of the i18nKey value and _desc suffix. The description contents will be displayed on the Marketplace page.\nThe following is a typical menu name and description for i18nKey=general_service\n{ \u0026#34;general_service\u0026#34;: \u0026#34;General Service\u0026#34;, \u0026#34;general_service_desc\u0026#34;: \u0026#34;Observe services and relative direct dependencies through telemetry data collected from SkyWalking Agents.\u0026#34; } ","title":"Guide","url":"/docs/main/v9.6.0/en/guides/i18n/"},{"content":"Guide  This section explains how to manage translations for internationalization of menu items.\n SkyWalking UI\u0026rsquo;s internationalization translations are in the src/locales/lang. The translations include menu name and description. The translation key of menu name is the value of i18nKey from menu definition file. The translation key of description consists of the i18nKey value and _desc suffix. The description contents will be displayed on the Marketplace page.\nThe following is a typical menu name and description for i18nKey=general_service\n{ \u0026#34;general_service\u0026#34;: \u0026#34;General Service\u0026#34;, \u0026#34;general_service_desc\u0026#34;: \u0026#34;Observe services and relative direct dependencies through telemetry data collected from SkyWalking Agents.\u0026#34; } ","title":"Guide","url":"/docs/main/v9.7.0/en/guides/i18n/"},{"content":"Guides There are many ways you can connect and contribute to the SkyWalking community.\n Submit an issue for an addressed issue or feature implementation plan. Submit a discussion to ask questions, feature proposal and uncertain bug discussion. Mail list: dev@skywalking.apache.org. Mail to dev-subscribe@skywalking.apache.org. Follow the instructions in the reply to subscribe to the mail list. Send Request to join SkyWalking slack mail to the mail list(dev@skywalking.apache.org), we will invite you in. For Chinese speaker, send [CN] Request to join SkyWalking slack mail to the mail list(dev@skywalking.apache.org), we will invite you in.  ","title":"Guides","url":"/docs/main/latest/en/guides/community/"},{"content":"Guides There are many ways you can connect and contribute to the SkyWalking community.\n Submit an issue for an addressed issue or feature implementation plan. Submit a discussion to ask questions, feature proposal and uncertain bug discussion. Mail list: dev@skywalking.apache.org. Mail to dev-subscribe@skywalking.apache.org. Follow the instructions in the reply to subscribe to the mail list. Send Request to join SkyWalking slack mail to the mail list(dev@skywalking.apache.org), we will invite you in. For Chinese speaker, send [CN] Request to join SkyWalking slack mail to the mail list(dev@skywalking.apache.org), we will invite you in.  ","title":"Guides","url":"/docs/main/next/en/guides/community/"},{"content":"Guides There are many ways you can contribute to the SkyWalking community.\n Go through our documents, and point out or fix a problem. Translate the documents into other languages. Download our releases, try to monitor your applications, and provide feedback to us. Read our source codes. For details, reach out to us. If you find any bugs, submit an issue. You can also try to fix it. Find good first issue issues. This is a good place for you to start. Submit an issue or start a discussion at GitHub issue. See all mail list discussions at website list review. If you are already a SkyWalking committer, you can log in and use the mail list in the browser mode. Otherwise, subscribe following the step below. Issue reports and discussions may also take place via dev@skywalking.apache.org. Mail to dev-subscribe@skywalking.apache.org, and follow the instructions in the reply to subscribe to the mail list.  Contact Us All the following channels are open to the community.\n Submit an issue for an issue or feature proposal. Mail list: dev@skywalking.apache.org. Mail to dev-subscribe@skywalking.apache.org. Follow the instructions in the reply to subscribe to the mail list. Submit a discussion to ask questions.  Become an official Apache SkyWalking Committer The PMC assesses the contributions of every contributor, including their code contributions. It also promotes, votes on, and invites new committers and PMC members according to the Apache guides. See Become official Apache SkyWalking Committer for more details.\nFor code developer For developers, the starting point is the Compiling Guide. It guides developers on how to build the project in local and set up the environment.\nIntegration Tests After setting up the environment and writing your codes, to facilitate integration with the SkyWalking project, you\u0026rsquo;ll need to run tests locally to verify that your codes would not break any existing features, as well as write some unit test (UT) codes to verify that the new codes would work well. This will prevent them from being broken by future contributors. If the new codes involve other components or libraries, you should also write integration tests (IT).\nSkyWalking leverages the plugin maven-surefire-plugin to run the UTs and uses maven-failsafe-plugin to run the ITs. maven-surefire-plugin excludes ITs (whose class name starts with IT) and leaves them for maven-failsafe-plugin to run, which is bound to the verify goal. Therefore, to run the UTs, try ./mvnw clean test, which only runs the UTs but not the ITs.\nIf you would like to run the ITs, please set the property skipITs to false as well as the profiles of the modules whose ITs you want to run. E.g. if you would like to run the ITs in oap-server, try ./mvnw -Pbackend clean verify -DskipITs=false, and if you would like to run all the ITs, simply run ./mvnw clean verify -DskipITs=false.\nPlease be advised that if you\u0026rsquo;re writing integration tests, name it with the pattern IT* so they would only run when property skipITs is set to false.\nJava Microbenchmark Harness (JMH) JMH is a Java harness for building, running, and analysing nano/micro/milli/macro benchmarks written in Java and other languages targeting the JVM.\nWe have a module called microbench which performs a series of micro-benchmark tests for JMH testing. Make new JMH tests extend the org.apache.skywalking.oap.server.microbench.base.AbstractMicrobenchmark to customize runtime conditions (Measurement, Fork, Warmup, etc.).\nJMH tests could run as a normal unit test. And they could run as an independent uber jar via java -jar benchmark.jar for all benchmarks, or via java -jar /benchmarks.jar exampleClassName for a specific test.\nOutput test results in JSON format, you can add -rf json like java -jar benchmarks.jar -rf json, if you run through the IDE, you can configure the -DperfReportDir=savePath parameter to set the JMH report result save path, a report results in JSON format will be generated when the run ends.\nMore information about JMH can be found here: jmh docs.\nEnd to End Tests (E2E) Since version 6.3.0, we have introduced more automatic tests to perform software quality assurance. E2E is an integral part of it.\n End-to-end testing is a methodology used to test whether the flow of an application is performing as designed from start to finish. The purpose of carrying out end-to-end tests is to identify system dependencies and to ensure that the right information is passed between various system components and systems.\n The E2E test involves some/all of the OAP server, storage, coordinator, webapp, and the instrumented services, all of which are orchestrated by docker-compose or KinD. Since version 8.9.0, we immigrate to e2e-v2 which leverage skywalking-infra-e2e and skywalking-cli to do the whole e2e process. skywalking-infra-e2e is used to control the e2e process and skywalking-cli is used to interact with the OAP such as request and get response metris from OAP.\nWriting E2E Cases  Set up the environment   Set up skywalking-infra-e2e Set up skywalking-cli, yq (generally these 2 are enough) and others tools if your cases need. Can reference the script under skywalking/test/e2e-v2/script/prepare/setup-e2e-shell.   Orchestrate the components  The goal of the E2E tests is to test the SkyWalking project as a whole, including the OAP server, storage, coordinator, webapp, and even the frontend UI (not for now), on the single node mode as well as the cluster mode. Therefore, the first step is to determine what case we are going to verify, and orchestrate the components.\nTo make the orchestration process easier, we\u0026rsquo;re using a docker-compose that provides a simple file format (docker-compose.yml) for orchestrating the required containers, and offers an opportunity to define the dependencies of the components.\nFollow these steps:\n Decide what (and how many) containers will be needed. For example, for cluster testing, you\u0026rsquo;ll need \u0026gt; 2 OAP nodes, coordinators (e.g. zookeeper), storage (e.g. ElasticSearch), and instrumented services; Define the containers in docker-compose.yml, and carefully specify the dependencies, starting orders, and most importantly, link them together, e.g. set the correct OAP address on the agent end, and set the correct coordinator address in OAP, etc. Define the e2e case config in e2e.yaml. Write the expected data(yml) for verify.   Run e2e test  All e2e cases should under skywalking/test/e2e-v2/cases. You could execute e2e run command in skywalking/ e.g.\ne2e run -c test/e2e-v2/cases/alarm/h2/e2e.yaml  Troubleshooting  We expose all logs from all containers to the stdout in the non-CI (local) mode, but save and upload them to the GitHub server. You can download them (only when the tests have failed) at \u0026ldquo;Artifacts/Download artifacts/logs\u0026rdquo; (see top right) for debugging.\nNOTE: Please verify the newly-added E2E test case locally first. However, if you find that it has passed locally but failed in the PR check status, make sure that all the updated/newly-added files (especially those in the submodules) are committed and included in the PR, or reset the git HEAD to the remote and verify locally again.\nProject Extensions The SkyWalking project supports various extensions of existing features. If you are interesting in writing extensions, read the following guides.\nThis guides you in developing SkyWalking agent plugins to support more frameworks. Developers for both open source and private plugins should read this.\n If you would like to build a new probe or plugin in any language, please read the Component library definition and extension document. Storage extension development guide. Potential contributors can learn how to build a new storage implementor in addition to the official one. Customize analysis using OAL scripts. OAL scripts are located in config/oal/*.oal. You could modify them and reboot the OAP server. Read Observability Analysis Language Introduction to learn more about OAL scripts. Source and scope extension for new metrics. For analysis of a new metric which SkyWalking hasn\u0026rsquo;t yet provided, add a new receiver. You would most likely have to add a new source and scope. To learn how to do this, read the document.  OAP backend dependency management  This section is only applicable to dependencies of the backend module.\n As one of the Top Level Projects of The Apache Software Foundation (ASF), SkyWalking must follow the ASF 3RD PARTY LICENSE POLICY. So if you\u0026rsquo;re adding new dependencies to the project, you should make sure that the new dependencies would not break the policy, and add their LICENSE and NOTICE to the project.\nWe have a simple script to help you make sure that you haven\u0026rsquo;t missed out any new dependencies:\n Build a distribution package and unzip/untar it to folder dist. Run the script in the root directory. It will print out all new dependencies. Check the LICENSE and NOTICE of those dependencies to make sure that they can be included in an ASF project. Add them to the apm-dist/release-docs/{LICENSE,NOTICE} file. Add the names of these dependencies to the tools/dependencies/known-oap-backend-dependencies.txt file (in alphabetical order). check-LICENSE.sh should pass in the next run.  Profile The performance profile is an enhancement feature in the APM system. We use thread dump to estimate the method execution time, rather than adding multiple local spans. In this way, the cost would be significantly reduced compared to using distributed tracing to locate the slow method. This feature is suitable in the production environment. The following documents are key to understanding the essential parts of this feature.\n Profile data report protocol is provided through gRPC, just like other traces and JVM data. Thread dump merging mechanism introduces the merging mechanism. This mechanism helps end users understand profile reports. Exporter tool of profile raw data guides you on how to package the original profile data for issue reports when the visualization doesn\u0026rsquo;t work well on the official UI.  Release If you\u0026rsquo;re a committer, read the Apache Release Guide to learn about how to create an official Apache version release in accordance with avoid Apache\u0026rsquo;s rules. As long as you keep our LICENSE and NOTICE, the Apache license allows everyone to redistribute.\n","title":"Guides","url":"/docs/main/v9.0.0/en/guides/readme/"},{"content":"Guides There are many ways you can contribute to the SkyWalking community.\n Go through our documents, and point out or fix a problem. Translate the documents into other languages. Download our releases, try to monitor your applications, and provide feedback to us. Read our source codes. For details, reach out to us. If you find any bugs, submit an issue. You can also try to fix it. Find good first issue issues. This is a good place for you to start. Submit an issue or start a discussion at GitHub issue. See all mail list discussions at website list review. If you are already a SkyWalking committer, you can log in and use the mail list in the browser mode. Otherwise, subscribe following the step below. Issue reports and discussions may also take place via dev@skywalking.apache.org. Mail to dev-subscribe@skywalking.apache.org, and follow the instructions in the reply to subscribe to the mail list.  Contact Us All the following channels are open to the community.\n Submit an issue for an issue or feature proposal. Mail list: dev@skywalking.apache.org. Mail to dev-subscribe@skywalking.apache.org. Follow the instructions in the reply to subscribe to the mail list. Submit a discussion to ask questions.  Become an official Apache SkyWalking Committer The PMC assesses the contributions of every contributor, including their code contributions. It also promotes, votes on, and invites new committers and PMC members according to the Apache guides. See Become official Apache SkyWalking Committer for more details.\nFor code developer For developers, the starting point is the Compiling Guide. It guides developers on how to build the project in local and set up the environment.\nIntegration Tests After setting up the environment and writing your codes, to facilitate integration with the SkyWalking project, you\u0026rsquo;ll need to run tests locally to verify that your codes would not break any existing features, as well as write some unit test (UT) codes to verify that the new codes would work well. This will prevent them from being broken by future contributors. If the new codes involve other components or libraries, you should also write integration tests (IT).\nSkyWalking leverages the plugin maven-surefire-plugin to run the UTs and uses maven-failsafe-plugin to run the ITs. maven-surefire-plugin excludes ITs (whose class name starts with IT) and leaves them for maven-failsafe-plugin to run, which is bound to the verify goal. Therefore, to run the UTs, try ./mvnw clean test, which only runs the UTs but not the ITs.\nIf you would like to run the ITs, please set the property skipITs to false as well as the profiles of the modules whose ITs you want to run. E.g. if you would like to run the ITs in oap-server, try ./mvnw -Pbackend clean verify -DskipITs=false, and if you would like to run all the ITs, simply run ./mvnw clean verify -DskipITs=false.\nPlease be advised that if you\u0026rsquo;re writing integration tests, name it with the pattern IT* so they would only run when property skipITs is set to false.\nJava Microbenchmark Harness (JMH) JMH is a Java harness for building, running, and analysing nano/micro/milli/macro benchmarks written in Java and other languages targeting the JVM.\nWe have a module called microbench which performs a series of micro-benchmark tests for JMH testing. Make new JMH tests extend the org.apache.skywalking.oap.server.microbench.base.AbstractMicrobenchmark to customize runtime conditions (Measurement, Fork, Warmup, etc.).\nJMH tests could run as a normal unit test. And they could run as an independent uber jar via java -jar benchmark.jar for all benchmarks, or via java -jar /benchmarks.jar exampleClassName for a specific test.\nOutput test results in JSON format, you can add -rf json like java -jar benchmarks.jar -rf json, if you run through the IDE, you can configure the -DperfReportDir=savePath parameter to set the JMH report result save path, a report results in JSON format will be generated when the run ends.\nMore information about JMH can be found here: jmh docs.\nEnd to End Tests (E2E) Since version 6.3.0, we have introduced more automatic tests to perform software quality assurance. E2E is an integral part of it.\n End-to-end testing is a methodology used to test whether the flow of an application is performing as designed from start to finish. The purpose of carrying out end-to-end tests is to identify system dependencies and to ensure that the right information is passed between various system components and systems.\n The E2E test involves some/all of the OAP server, storage, coordinator, webapp, and the instrumented services, all of which are orchestrated by docker-compose or KinD. Since version 8.9.0, we immigrate to e2e-v2 which leverage skywalking-infra-e2e and skywalking-cli to do the whole e2e process. skywalking-infra-e2e is used to control the e2e process and skywalking-cli is used to interact with the OAP such as request and get response metris from OAP.\nWriting E2E Cases  Set up the environment   Set up skywalking-infra-e2e Set up skywalking-cli, yq (generally these 2 are enough) and others tools if your cases need. Can reference the script under skywalking/test/e2e-v2/script/prepare/setup-e2e-shell.   Orchestrate the components  The goal of the E2E tests is to test the SkyWalking project as a whole, including the OAP server, storage, coordinator, webapp, and even the frontend UI (not for now), on the single node mode as well as the cluster mode. Therefore, the first step is to determine what case we are going to verify, and orchestrate the components.\nTo make the orchestration process easier, we\u0026rsquo;re using a docker-compose that provides a simple file format (docker-compose.yml) for orchestrating the required containers, and offers an opportunity to define the dependencies of the components.\nFollow these steps:\n Decide what (and how many) containers will be needed. For example, for cluster testing, you\u0026rsquo;ll need \u0026gt; 2 OAP nodes, coordinators (e.g. zookeeper), storage (e.g. ElasticSearch), and instrumented services; Define the containers in docker-compose.yml, and carefully specify the dependencies, starting orders, and most importantly, link them together, e.g. set the correct OAP address on the agent end, and set the correct coordinator address in OAP, etc. Define the e2e case config in e2e.yaml. Write the expected data(yml) for verify.   Run e2e test  All e2e cases should under skywalking/test/e2e-v2/cases. You could execute e2e run command in skywalking/ e.g.\ne2e run -c test/e2e-v2/cases/alarm/h2/e2e.yaml  Troubleshooting  We expose all logs from all containers to the stdout in the non-CI (local) mode, but save and upload them to the GitHub server. You can download them (only when the tests have failed) at \u0026ldquo;Artifacts/Download artifacts/logs\u0026rdquo; (see top right) for debugging.\nNOTE: Please verify the newly-added E2E test case locally first. However, if you find that it has passed locally but failed in the PR check status, make sure that all the updated/newly-added files (especially those in the submodules) are committed and included in the PR, or reset the git HEAD to the remote and verify locally again.\nProject Extensions The SkyWalking project supports various extensions of existing features. If you are interesting in writing extensions, read the following guides.\nThis guides you in developing SkyWalking agent plugins to support more frameworks. Developers for both open source and private plugins should read this.\n If you would like to build a new probe or plugin in any language, please read the Component library definition and extension document. Storage extension development guide. Potential contributors can learn how to build a new storage implementor in addition to the official one. Customize analysis using OAL scripts. OAL scripts are located in config/oal/*.oal. You could modify them and reboot the OAP server. Read Observability Analysis Language Introduction to learn more about OAL scripts. Source and scope extension for new metrics. For analysis of a new metric which SkyWalking hasn\u0026rsquo;t yet provided, add a new receiver. You would most likely have to add a new source and scope. To learn how to do this, read the document.  OAP backend dependency management  This section is only applicable to dependencies of the backend module.\n As one of the Top Level Projects of The Apache Software Foundation (ASF), SkyWalking must follow the ASF 3RD PARTY LICENSE POLICY. So if you\u0026rsquo;re adding new dependencies to the project, you should make sure that the new dependencies would not break the policy, and add their LICENSE and NOTICE to the project.\nWe have a simple script to help you make sure that you haven\u0026rsquo;t missed out any new dependencies:\n Build a distribution package and unzip/untar it to folder dist. Run the script in the root directory. It will print out all new dependencies. Check the LICENSE and NOTICE of those dependencies to make sure that they can be included in an ASF project. Add them to the apm-dist/release-docs/{LICENSE,NOTICE} file. Add the names of these dependencies to the tools/dependencies/known-oap-backend-dependencies.txt file (in alphabetical order). check-LICENSE.sh should pass in the next run.  Profile The performance profile is an enhancement feature in the APM system. We use thread dump to estimate the method execution time, rather than adding multiple local spans. In this way, the cost would be significantly reduced compared to using distributed tracing to locate the slow method. This feature is suitable in the production environment. The following documents are key to understanding the essential parts of this feature.\n Profile data report protocol is provided through gRPC, just like other traces and JVM data. Thread dump merging mechanism introduces the merging mechanism. This mechanism helps end users understand profile reports. Exporter tool of profile raw data guides you on how to package the original profile data for issue reports when the visualization doesn\u0026rsquo;t work well on the official UI.  Release If you\u0026rsquo;re a committer, read the Apache Release Guide to learn about how to create an official Apache version release in accordance with avoid Apache\u0026rsquo;s rules. As long as you keep our LICENSE and NOTICE, the Apache license allows everyone to redistribute.\n","title":"Guides","url":"/docs/main/v9.1.0/en/guides/readme/"},{"content":"Guides There are many ways you can contribute to the SkyWalking community.\n Go through our documents, and point out or fix a problem. Translate the documents into other languages. Download our releases, try to monitor your applications, and provide feedback to us. Read our source codes. For details, reach out to us. If you find any bugs, submit an issue. You can also try to fix it. Find good first issue issues. This is a good place for you to start. Submit an issue or start a discussion at GitHub issue. See all mail list discussions at website list review. If you are already a SkyWalking committer, you can log in and use the mail list in the browser mode. Otherwise, subscribe following the step below. Issue reports and discussions may also take place via dev@skywalking.apache.org. Mail to dev-subscribe@skywalking.apache.org, and follow the instructions in the reply to subscribe to the mail list.  Contact Us All the following channels are open to the community.\n Submit an issue for an issue or feature proposal. Mail list: dev@skywalking.apache.org. Mail to dev-subscribe@skywalking.apache.org. Follow the instructions in the reply to subscribe to the mail list. Submit a discussion to ask questions.  Become an official Apache SkyWalking Committer The PMC assesses the contributions of every contributor, including their code contributions. It also promotes, votes on, and invites new committers and PMC members according to the Apache guides. See Become official Apache SkyWalking Committer for more details.\nFor code developer For developers, the starting point is the Compiling Guide. It guides developers on how to build the project in local and set up the environment.\nIntegration Tests After setting up the environment and writing your codes, to facilitate integration with the SkyWalking project, you\u0026rsquo;ll need to run tests locally to verify that your codes would not break any existing features, as well as write some unit test (UT) codes to verify that the new codes would work well. This will prevent them from being broken by future contributors. If the new codes involve other components or libraries, you should also write integration tests (IT).\nSkyWalking leverages the plugin maven-surefire-plugin to run the UTs and uses maven-failsafe-plugin to run the ITs. maven-surefire-plugin excludes ITs (whose class name starts with IT) and leaves them for maven-failsafe-plugin to run, which is bound to the verify goal. Therefore, to run the UTs, try ./mvnw clean test, which only runs the UTs but not the ITs.\nIf you would like to run the ITs, please set the property skipITs to false as well as the profiles of the modules whose ITs you want to run. E.g. if you would like to run the ITs in oap-server, try ./mvnw -Pbackend clean verify -DskipITs=false, and if you would like to run all the ITs, simply run ./mvnw clean verify -DskipITs=false.\nPlease be advised that if you\u0026rsquo;re writing integration tests, name it with the pattern IT* so they would only run when property skipITs is set to false.\nJava Microbenchmark Harness (JMH) JMH is a Java harness for building, running, and analysing nano/micro/milli/macro benchmarks written in Java and other languages targeting the JVM.\nWe have a module called microbench which performs a series of micro-benchmark tests for JMH testing. Make new JMH tests extend the org.apache.skywalking.oap.server.microbench.base.AbstractMicrobenchmark to customize runtime conditions (Measurement, Fork, Warmup, etc.).\nJMH tests could run as a normal unit test. And they could run as an independent uber jar via java -jar benchmark.jar for all benchmarks, or via java -jar /benchmarks.jar exampleClassName for a specific test.\nOutput test results in JSON format, you can add -rf json like java -jar benchmarks.jar -rf json, if you run through the IDE, you can configure the -DperfReportDir=savePath parameter to set the JMH report result save path, a report results in JSON format will be generated when the run ends.\nMore information about JMH can be found here: jmh docs.\nEnd to End Tests (E2E) Since version 6.3.0, we have introduced more automatic tests to perform software quality assurance. E2E is an integral part of it.\n End-to-end testing is a methodology used to test whether the flow of an application is performing as designed from start to finish. The purpose of carrying out end-to-end tests is to identify system dependencies and to ensure that the right information is passed between various system components and systems.\n The E2E test involves some/all of the OAP server, storage, coordinator, webapp, and the instrumented services, all of which are orchestrated by docker-compose or KinD. Since version 8.9.0, we immigrate to e2e-v2 which leverage skywalking-infra-e2e and skywalking-cli to do the whole e2e process. skywalking-infra-e2e is used to control the e2e process and skywalking-cli is used to interact with the OAP such as request and get response metrics from OAP.\nWriting E2E Cases  Set up the environment   Set up skywalking-infra-e2e Set up skywalking-cli, yq (generally these 2 are enough) and others tools if your cases need. Can reference the script under skywalking/test/e2e-v2/script/prepare/setup-e2e-shell.   Orchestrate the components  The goal of the E2E tests is to test the SkyWalking project as a whole, including the OAP server, storage, coordinator, webapp, and even the frontend UI (not for now), on the single node mode as well as the cluster mode. Therefore, the first step is to determine what case we are going to verify, and orchestrate the components.\nTo make the orchestration process easier, we\u0026rsquo;re using a docker-compose that provides a simple file format (docker-compose.yml) for orchestrating the required containers, and offers an opportunity to define the dependencies of the components.\nFollow these steps:\n Decide what (and how many) containers will be needed. For example, for cluster testing, you\u0026rsquo;ll need \u0026gt; 2 OAP nodes, coordinators (e.g. zookeeper), storage (e.g. ElasticSearch), and instrumented services; Define the containers in docker-compose.yml, and carefully specify the dependencies, starting orders, and most importantly, link them together, e.g. set the correct OAP address on the agent end, and set the correct coordinator address in OAP, etc. Define the e2e case config in e2e.yaml. Write the expected data(yml) for verify.   Run e2e test  All e2e cases should under skywalking/test/e2e-v2/cases. You could execute e2e run command in skywalking/ e.g.\ne2e run -c test/e2e-v2/cases/alarm/h2/e2e.yaml  Troubleshooting  We expose all logs from all containers to the stdout in the non-CI (local) mode, but save and upload them to the GitHub server. You can download them (only when the tests have failed) at \u0026ldquo;Artifacts/Download artifacts/logs\u0026rdquo; (see top right) for debugging.\nNOTE: Please verify the newly-added E2E test case locally first. However, if you find that it has passed locally but failed in the PR check status, make sure that all the updated/newly-added files (especially those in the submodules) are committed and included in the PR, or reset the git HEAD to the remote and verify locally again.\nProject Extensions The SkyWalking project supports various extensions of existing features. If you are interesting in writing extensions, read the following guides.\nThis guides you in developing SkyWalking agent plugins to support more frameworks. Developers for both open source and private plugins should read this.\n If you would like to build a new probe or plugin in any language, please read the Component library definition and extension document. Storage extension development guide. Potential contributors can learn how to build a new storage implementor in addition to the official one. Customize analysis using OAL scripts. OAL scripts are located in config/oal/*.oal. You could modify them and reboot the OAP server. Read Observability Analysis Language Introduction to learn more about OAL scripts. Source and scope extension for new metrics. For analysis of a new metric which SkyWalking hasn\u0026rsquo;t yet provided, add a new receiver. You would most likely have to add a new source and scope. To learn how to do this, read the document.  OAP backend dependency management  This section is only applicable to dependencies of the backend module.\n As one of the Top Level Projects of The Apache Software Foundation (ASF), SkyWalking must follow the ASF 3RD PARTY LICENSE POLICY. So if you\u0026rsquo;re adding new dependencies to the project, you should make sure that the new dependencies would not break the policy, and add their LICENSE and NOTICE to the project.\nWe use license-eye to help you make sure that you haven\u0026rsquo;t missed out any new dependencies:\n Install license-eye according to the doc. Run license-eye dependency resolve --summary ./dist-material/release-docs/LICENSE.tpl in the root directory of this project. Check the modified lines in ./dist-material/release-docs/LICENSE (via command git diff -U0 ./dist-material/release-docs/LICENSE) and check whether the new dependencies' licenses are compatible with Apache 2.0. Add the new dependencies' notice files (if any) to ./dist-material/release-docs/NOTICE if they are Apache 2.0 license. Copy their license files to ./dist-material/release-docs/licenses if they are not standard Apache 2.0 license. Copy the new dependencies' license file to ./dist-material/release-docs/licenses if they are not standard Apache 2.0 license.  Profile The performance profile is an enhancement feature in the APM system. We use thread dump to estimate the method execution time, rather than adding multiple local spans. In this way, the cost would be significantly reduced compared to using distributed tracing to locate the slow method. This feature is suitable in the production environment. The following documents are key to understanding the essential parts of this feature.\n Profile data report protocol is provided through gRPC, just like other traces and JVM data. Thread dump merging mechanism introduces the merging mechanism. This mechanism helps end users understand profile reports. Exporter tool of profile raw data guides you on how to package the original profile data for issue reports when the visualization doesn\u0026rsquo;t work well on the official UI.  Release If you\u0026rsquo;re a committer, read the Apache Release Guide to learn about how to create an official Apache version release in accordance with avoid Apache\u0026rsquo;s rules. As long as you keep our LICENSE and NOTICE, the Apache license allows everyone to redistribute.\n","title":"Guides","url":"/docs/main/v9.2.0/en/guides/readme/"},{"content":"Guides There are many ways you can contribute to the SkyWalking community.\n Go through our documents, and point out or fix a problem. Translate the documents into other languages. Download our releases, try to monitor your applications, and provide feedback to us. Read our source codes. For details, reach out to us. If you find any bugs, submit an issue. You can also try to fix it. Find good first issue issues. This is a good place for you to start. Submit an issue or start a discussion at GitHub issue. See all mail list discussions at website list review. If you are already a SkyWalking committer, you can log in and use the mail list in the browser mode. Otherwise, subscribe following the step below. Issue reports and discussions may also take place via dev@skywalking.apache.org. Mail to dev-subscribe@skywalking.apache.org, and follow the instructions in the reply to subscribe to the mail list.  Contact Us All the following channels are open to the community.\n Submit an issue for an issue or feature proposal. Mail list: dev@skywalking.apache.org. Mail to dev-subscribe@skywalking.apache.org. Follow the instructions in the reply to subscribe to the mail list. Submit a discussion to ask questions.  Become an official Apache SkyWalking Committer The PMC assesses the contributions of every contributor, including their code contributions. It also promotes, votes on, and invites new committers and PMC members according to the Apache guides. See Become official Apache SkyWalking Committer for more details.\nFor code developer For developers, the starting point is the Compiling Guide. It guides developers on how to build the project in local and set up the environment.\nIntegration Tests After setting up the environment and writing your codes, to facilitate integration with the SkyWalking project, you\u0026rsquo;ll need to run tests locally to verify that your codes would not break any existing features, as well as write some unit test (UT) codes to verify that the new codes would work well. This will prevent them from being broken by future contributors. If the new codes involve other components or libraries, you should also write integration tests (IT).\nSkyWalking leverages the plugin maven-surefire-plugin to run the UTs and uses maven-failsafe-plugin to run the ITs. maven-surefire-plugin excludes ITs (whose class name starts with IT) and leaves them for maven-failsafe-plugin to run, which is bound to the verify goal. Therefore, to run the UTs, try ./mvnw clean test, which only runs the UTs but not the ITs.\nIf you would like to run the ITs, please set the property skipITs to false as well as the profiles of the modules whose ITs you want to run. E.g. if you would like to run the ITs in oap-server, try ./mvnw -Pbackend clean verify -DskipITs=false, and if you would like to run all the ITs, simply run ./mvnw clean verify -DskipITs=false.\nPlease be advised that if you\u0026rsquo;re writing integration tests, name it with the pattern IT* so they would only run when property skipITs is set to false.\nJava Microbenchmark Harness (JMH) JMH is a Java harness for building, running, and analysing nano/micro/milli/macro benchmarks written in Java and other languages targeting the JVM.\nWe have a module called microbench which performs a series of micro-benchmark tests for JMH testing. Make new JMH tests extend the org.apache.skywalking.oap.server.microbench.base.AbstractMicrobenchmark to customize runtime conditions (Measurement, Fork, Warmup, etc.).\nJMH tests could run as a normal unit test. And they could run as an independent uber jar via java -jar benchmark.jar for all benchmarks, or via java -jar /benchmarks.jar exampleClassName for a specific test.\nOutput test results in JSON format, you can add -rf json like java -jar benchmarks.jar -rf json, if you run through the IDE, you can configure the -DperfReportDir=savePath parameter to set the JMH report result save path, a report results in JSON format will be generated when the run ends.\nMore information about JMH can be found here: jmh docs.\nEnd to End Tests (E2E) Since version 6.3.0, we have introduced more automatic tests to perform software quality assurance. E2E is an integral part of it.\n End-to-end testing is a methodology used to test whether the flow of an application is performing as designed from start to finish. The purpose of carrying out end-to-end tests is to identify system dependencies and to ensure that the right information is passed between various system components and systems.\n The E2E test involves some/all of the OAP server, storage, coordinator, webapp, and the instrumented services, all of which are orchestrated by docker-compose or KinD. Since version 8.9.0, we immigrate to e2e-v2 which leverage skywalking-infra-e2e and skywalking-cli to do the whole e2e process. skywalking-infra-e2e is used to control the e2e process and skywalking-cli is used to interact with the OAP such as request and get response metrics from OAP.\nWriting E2E Cases  Set up the environment   Set up skywalking-infra-e2e Set up skywalking-cli, yq (generally these 2 are enough) and others tools if your cases need. Can reference the script under skywalking/test/e2e-v2/script/prepare/setup-e2e-shell.   Orchestrate the components  The goal of the E2E tests is to test the SkyWalking project as a whole, including the OAP server, storage, coordinator, webapp, and even the frontend UI (not for now), on the single node mode as well as the cluster mode. Therefore, the first step is to determine what case we are going to verify, and orchestrate the components.\nTo make the orchestration process easier, we\u0026rsquo;re using a docker-compose that provides a simple file format (docker-compose.yml) for orchestrating the required containers, and offers an opportunity to define the dependencies of the components.\nFollow these steps:\n Decide what (and how many) containers will be needed. For example, for cluster testing, you\u0026rsquo;ll need \u0026gt; 2 OAP nodes, coordinators (e.g. zookeeper), storage (e.g. ElasticSearch), and instrumented services; Define the containers in docker-compose.yml, and carefully specify the dependencies, starting orders, and most importantly, link them together, e.g. set the correct OAP address on the agent end, and set the correct coordinator address in OAP, etc. Define the e2e case config in e2e.yaml. Write the expected data(yml) for verify.   Run e2e test  All e2e cases should under skywalking/test/e2e-v2/cases. You could execute e2e run command in skywalking/ e.g.\ne2e run -c test/e2e-v2/cases/alarm/h2/e2e.yaml  Troubleshooting  We expose all logs from all containers to the stdout in the non-CI (local) mode, but save and upload them to the GitHub server. You can download them (only when the tests have failed) at \u0026ldquo;Artifacts/Download artifacts/logs\u0026rdquo; (see top right) for debugging.\nNOTE: Please verify the newly-added E2E test case locally first. However, if you find that it has passed locally but failed in the PR check status, make sure that all the updated/newly-added files (especially those in the submodules) are committed and included in the PR, or reset the git HEAD to the remote and verify locally again.\nProject Extensions The SkyWalking project supports various extensions of existing features. If you are interesting in writing extensions, read the following guides.\nThis guides you in developing SkyWalking agent plugins to support more frameworks. Developers for both open source and private plugins should read this.\n If you would like to build a new probe or plugin in any language, please read the Component library definition and extension document. Storage extension development guide. Potential contributors can learn how to build a new storage implementor in addition to the official one. Customize analysis using OAL scripts. OAL scripts are located in config/oal/*.oal. You could modify them and reboot the OAP server. Read Observability Analysis Language Introduction to learn more about OAL scripts. Source and scope extension for new metrics. For analysis of a new metric which SkyWalking hasn\u0026rsquo;t yet provided, add a new receiver. You would most likely have to add a new source and scope. To learn how to do this, read the document. If you would like to add a new root menu or sub-menu to booster UI, read the UI menu control document.  OAP backend dependency management  This section is only applicable to dependencies of the backend module.\n As one of the Top Level Projects of The Apache Software Foundation (ASF), SkyWalking must follow the ASF 3RD PARTY LICENSE POLICY. So if you\u0026rsquo;re adding new dependencies to the project, you should make sure that the new dependencies would not break the policy, and add their LICENSE and NOTICE to the project.\nWe use license-eye to help you make sure that you haven\u0026rsquo;t missed out any new dependencies:\n Install license-eye according to the doc. Run license-eye dependency resolve --summary ./dist-material/release-docs/LICENSE.tpl in the root directory of this project. Check the modified lines in ./dist-material/release-docs/LICENSE (via command git diff -U0 ./dist-material/release-docs/LICENSE) and check whether the new dependencies' licenses are compatible with Apache 2.0. Add the new dependencies' notice files (if any) to ./dist-material/release-docs/NOTICE if they are Apache 2.0 license. Copy their license files to ./dist-material/release-docs/licenses if they are not standard Apache 2.0 license. Copy the new dependencies' license file to ./dist-material/release-docs/licenses if they are not standard Apache 2.0 license.  Profile The performance profile is an enhancement feature in the APM system. We use thread dump to estimate the method execution time, rather than adding multiple local spans. In this way, the cost would be significantly reduced compared to using distributed tracing to locate the slow method. This feature is suitable in the production environment. The following documents are key to understanding the essential parts of this feature.\n Profile data report protocol is provided through gRPC, just like other traces and JVM data. Thread dump merging mechanism introduces the merging mechanism. This mechanism helps end users understand profile reports. Exporter tool of profile raw data guides you on how to package the original profile data for issue reports when the visualization doesn\u0026rsquo;t work well on the official UI.  Release If you\u0026rsquo;re a committer, read the Apache Release Guide to learn about how to create an official Apache version release in accordance with avoid Apache\u0026rsquo;s rules. As long as you keep our LICENSE and NOTICE, the Apache license allows everyone to redistribute.\n","title":"Guides","url":"/docs/main/v9.3.0/en/guides/readme/"},{"content":"Guides There are many ways you can contribute to the SkyWalking community.\n Go through our documents, and point out or fix a problem. Translate the documents into other languages. Download our releases, try to monitor your applications, and provide feedback to us. Read our source codes. For details, reach out to us. If you find any bugs, submit an issue. You can also try to fix it. Find good first issue issues. This is a good place for you to start. Submit an issue or start a discussion at GitHub issue. See all mail list discussions at website list review. If you are already a SkyWalking committer, you can log in and use the mail list in the browser mode. Otherwise, subscribe following the step below. Issue reports and discussions may also take place via dev@skywalking.apache.org. Mail to dev-subscribe@skywalking.apache.org, and follow the instructions in the reply to subscribe to the mail list.  Contact Us All the following channels are open to the community.\n Submit an issue for an issue or feature proposal. Mail list: dev@skywalking.apache.org. Mail to dev-subscribe@skywalking.apache.org. Follow the instructions in the reply to subscribe to the mail list. Submit a discussion to ask questions.  Become an official Apache SkyWalking Committer The PMC assesses the contributions of every contributor, including their code contributions. It also promotes, votes on, and invites new committers and PMC members according to the Apache guides. See Become official Apache SkyWalking Committer for more details.\nFor code developer For developers, the starting point is the Compiling Guide. It guides developers on how to build the project in local and set up the environment.\nIntegration Tests After setting up the environment and writing your codes, to facilitate integration with the SkyWalking project, you\u0026rsquo;ll need to run tests locally to verify that your codes would not break any existing features, as well as write some unit test (UT) codes to verify that the new codes would work well. This will prevent them from being broken by future contributors. If the new codes involve other components or libraries, you should also write integration tests (IT).\nSkyWalking leverages the plugin maven-surefire-plugin to run the UTs and uses maven-failsafe-plugin to run the ITs. maven-surefire-plugin excludes ITs (whose class name starts or ends with *IT, IT*) and leaves them for maven-failsafe-plugin to run, which is bound to the integration-test goal. Therefore, to run the UTs, try ./mvnw clean test, which only runs the UTs but not the ITs.\nIf you would like to run the ITs, please run ./mvnw integration-test as well as the profiles of the modules whose ITs you want to run. If you don\u0026rsquo;t want to run UTs, please add -DskipUTs=true. E.g. if you would like to only run the ITs in oap-server, try ./mvnw -Pbackend clean verify -DskipUTs=true, and if you would like to run all the ITs, simply run ./mvnw clean integration-test -DskipUTs=true.\nPlease be advised that if you\u0026rsquo;re writing integration tests, name it with the pattern IT* or *IT so they would only run in goal integration-test.\nJava Microbenchmark Harness (JMH) JMH is a Java harness for building, running, and analysing nano/micro/milli/macro benchmarks written in Java and other languages targeting the JVM.\nWe have a module called microbench which performs a series of micro-benchmark tests for JMH testing. Make new JMH tests extend the org.apache.skywalking.oap.server.microbench.base.AbstractMicrobenchmark to customize runtime conditions (Measurement, Fork, Warmup, etc.).\nJMH tests could run as a normal unit test. And they could run as an independent uber jar via java -jar benchmark.jar for all benchmarks, or via java -jar /benchmarks.jar exampleClassName for a specific test.\nOutput test results in JSON format, you can add -rf json like java -jar benchmarks.jar -rf json, if you run through the IDE, you can configure the -DperfReportDir=savePath parameter to set the JMH report result save path, a report results in JSON format will be generated when the run ends.\nMore information about JMH can be found here: jmh docs.\nEnd to End Tests (E2E) Since version 6.3.0, we have introduced more automatic tests to perform software quality assurance. E2E is an integral part of it.\n End-to-end testing is a methodology used to test whether the flow of an application is performing as designed from start to finish. The purpose of carrying out end-to-end tests is to identify system dependencies and to ensure that the right information is passed between various system components and systems.\n The E2E test involves some/all of the OAP server, storage, coordinator, webapp, and the instrumented services, all of which are orchestrated by docker-compose or KinD. Since version 8.9.0, we immigrate to e2e-v2 which leverage skywalking-infra-e2e and skywalking-cli to do the whole e2e process. skywalking-infra-e2e is used to control the e2e process and skywalking-cli is used to interact with the OAP such as request and get response metrics from OAP.\nWriting E2E Cases  Set up the environment   Set up skywalking-infra-e2e Set up skywalking-cli, yq (generally these 2 are enough) and others tools if your cases need. Can reference the script under skywalking/test/e2e-v2/script/prepare/setup-e2e-shell.   Orchestrate the components  The goal of the E2E tests is to test the SkyWalking project as a whole, including the OAP server, storage, coordinator, webapp, and even the frontend UI (not for now), on the single node mode as well as the cluster mode. Therefore, the first step is to determine what case we are going to verify, and orchestrate the components.\nTo make the orchestration process easier, we\u0026rsquo;re using a docker-compose that provides a simple file format (docker-compose.yml) for orchestrating the required containers, and offers an opportunity to define the dependencies of the components.\nFollow these steps:\n Decide what (and how many) containers will be needed. For example, for cluster testing, you\u0026rsquo;ll need \u0026gt; 2 OAP nodes, coordinators (e.g. zookeeper), storage (e.g. ElasticSearch), and instrumented services; Define the containers in docker-compose.yml, and carefully specify the dependencies, starting orders, and most importantly, link them together, e.g. set the correct OAP address on the agent end, and set the correct coordinator address in OAP, etc. Define the e2e case config in e2e.yaml. Write the expected data(yml) for verify.   Run e2e test  All e2e cases should under skywalking/test/e2e-v2/cases. You could execute e2e run command in skywalking/ e.g.\ne2e run -c test/e2e-v2/cases/alarm/h2/e2e.yaml  Troubleshooting  We expose all logs from all containers to the stdout in the non-CI (local) mode, but save and upload them to the GitHub server. You can download them (only when the tests have failed) at \u0026ldquo;Artifacts/Download artifacts/logs\u0026rdquo; (see top right) for debugging.\nNOTE: Please verify the newly-added E2E test case locally first. However, if you find that it has passed locally but failed in the PR check status, make sure that all the updated/newly-added files (especially those in the submodules) are committed and included in the PR, or reset the git HEAD to the remote and verify locally again.\nProject Extensions The SkyWalking project supports various extensions of existing features. If you are interesting in writing extensions, read the following guides.\nThis guides you in developing SkyWalking agent plugins to support more frameworks. Developers for both open source and private plugins should read this.\n If you would like to build a new probe or plugin in any language, please read the Component library definition and extension document. Storage extension development guide. Potential contributors can learn how to build a new storage implementor in addition to the official one. Customize analysis using OAL scripts. OAL scripts are located in config/oal/*.oal. You could modify them and reboot the OAP server. Read Observability Analysis Language Introduction to learn more about OAL scripts. Source and scope extension for new metrics. For analysis of a new metric which SkyWalking hasn\u0026rsquo;t yet provided, add a new receiver. You would most likely have to add a new source and scope. To learn how to do this, read the document. If you would like to add a new root menu or sub-menu to booster UI, read the UI menu control document.  OAP backend dependency management  This section is only applicable to dependencies of the backend module.\n As one of the Top Level Projects of The Apache Software Foundation (ASF), SkyWalking must follow the ASF 3RD PARTY LICENSE POLICY. So if you\u0026rsquo;re adding new dependencies to the project, you should make sure that the new dependencies would not break the policy, and add their LICENSE and NOTICE to the project.\nWe use license-eye to help you make sure that you haven\u0026rsquo;t missed out any new dependencies:\n Install license-eye according to the doc. Run license-eye dependency resolve --summary ./dist-material/release-docs/LICENSE.tpl in the root directory of this project. Check the modified lines in ./dist-material/release-docs/LICENSE (via command git diff -U0 ./dist-material/release-docs/LICENSE) and check whether the new dependencies' licenses are compatible with Apache 2.0. Add the new dependencies' notice files (if any) to ./dist-material/release-docs/NOTICE if they are Apache 2.0 license. Copy their license files to ./dist-material/release-docs/licenses if they are not standard Apache 2.0 license. Copy the new dependencies' license file to ./dist-material/release-docs/licenses if they are not standard Apache 2.0 license.  Profile The performance profile is an enhancement feature in the APM system. We use thread dump to estimate the method execution time, rather than adding multiple local spans. In this way, the cost would be significantly reduced compared to using distributed tracing to locate the slow method. This feature is suitable in the production environment. The following documents are key to understanding the essential parts of this feature.\n Profile data report protocol is provided through gRPC, just like other traces and JVM data. Thread dump merging mechanism introduces the merging mechanism. This mechanism helps end users understand profile reports. Exporter tool of profile raw data guides you on how to package the original profile data for issue reports when the visualization doesn\u0026rsquo;t work well on the official UI.  Release If you\u0026rsquo;re a committer, read the Apache Release Guide to learn about how to create an official Apache version release in accordance with avoid Apache\u0026rsquo;s rules. As long as you keep our LICENSE and NOTICE, the Apache license allows everyone to redistribute.\n","title":"Guides","url":"/docs/main/v9.4.0/en/guides/readme/"},{"content":"Guides There are many ways you can contribute to the SkyWalking community.\n Go through our documents, and point out or fix a problem. Translate the documents into other languages. Download our releases, try to monitor your applications, and provide feedback to us. Read our source codes. For details, reach out to us. If you find any bugs, submit an issue. You can also try to fix it. Find good first issue issues. This is a good place for you to start. Submit an issue or start a discussion at GitHub issue. See all mail list discussions at website list review. If you are already a SkyWalking committer, you can log in and use the mail list in the browser mode. Otherwise, subscribe following the step below. Issue reports and discussions may also take place via dev@skywalking.apache.org. Mail to dev-subscribe@skywalking.apache.org, and follow the instructions in the reply to subscribe to the mail list.  Contact Us All the following channels are open to the community.\n Submit an issue for an issue or feature proposal. Mail list: dev@skywalking.apache.org. Mail to dev-subscribe@skywalking.apache.org. Follow the instructions in the reply to subscribe to the mail list. Submit a discussion to ask questions.  Become an official Apache SkyWalking Committer The PMC assesses the contributions of every contributor, including their code contributions. It also promotes, votes on, and invites new committers and PMC members according to the Apache guides. See Become official Apache SkyWalking Committer for more details.\nFor code developer For developers, the starting point is the Compiling Guide. It guides developers on how to build the project in local and set up the environment.\nIntegration Tests After setting up the environment and writing your codes, to facilitate integration with the SkyWalking project, you\u0026rsquo;ll need to run tests locally to verify that your codes would not break any existing features, as well as write some unit test (UT) codes to verify that the new codes would work well. This will prevent them from being broken by future contributors. If the new codes involve other components or libraries, you should also write integration tests (IT).\nSkyWalking leverages the plugin maven-surefire-plugin to run the UTs and uses maven-failsafe-plugin to run the ITs. maven-surefire-plugin excludes ITs (whose class name starts or ends with *IT, IT*) and leaves them for maven-failsafe-plugin to run, which is bound to the integration-test goal. Therefore, to run the UTs, try ./mvnw clean test, which only runs the UTs but not the ITs.\nIf you would like to run the ITs, please run ./mvnw integration-test as well as the profiles of the modules whose ITs you want to run. If you don\u0026rsquo;t want to run UTs, please add -DskipUTs=true. E.g. if you would like to only run the ITs in oap-server, try ./mvnw -Pbackend clean verify -DskipUTs=true, and if you would like to run all the ITs, simply run ./mvnw clean integration-test -DskipUTs=true.\nPlease be advised that if you\u0026rsquo;re writing integration tests, name it with the pattern IT* or *IT so they would only run in goal integration-test.\nJava Microbenchmark Harness (JMH) JMH is a Java harness for building, running, and analysing nano/micro/milli/macro benchmarks written in Java and other languages targeting the JVM.\nWe have a module called microbench which performs a series of micro-benchmark tests for JMH testing. Make new JMH tests extend the org.apache.skywalking.oap.server.microbench.base.AbstractMicrobenchmark to customize runtime conditions (Measurement, Fork, Warmup, etc.).\nJMH tests could run as a normal unit test. And they could run as an independent uber jar via java -jar benchmark.jar for all benchmarks, or via java -jar /benchmarks.jar exampleClassName for a specific test.\nOutput test results in JSON format, you can add -rf json like java -jar benchmarks.jar -rf json, if you run through the IDE, you can configure the -DperfReportDir=savePath parameter to set the JMH report result save path, a report results in JSON format will be generated when the run ends.\nMore information about JMH can be found here: jmh docs.\nEnd to End Tests (E2E) Since version 6.3.0, we have introduced more automatic tests to perform software quality assurance. E2E is an integral part of it.\n End-to-end testing is a methodology used to test whether the flow of an application is performing as designed from start to finish. The purpose of carrying out end-to-end tests is to identify system dependencies and to ensure that the right information is passed between various system components and systems.\n The E2E test involves some/all of the OAP server, storage, coordinator, webapp, and the instrumented services, all of which are orchestrated by docker-compose or KinD. Since version 8.9.0, we immigrate to e2e-v2 which leverage skywalking-infra-e2e and skywalking-cli to do the whole e2e process. skywalking-infra-e2e is used to control the e2e process and skywalking-cli is used to interact with the OAP such as request and get response metrics from OAP.\nWriting E2E Cases  Set up the environment   Set up skywalking-infra-e2e Set up skywalking-cli, yq (generally these 2 are enough) and others tools if your cases need. Can reference the script under skywalking/test/e2e-v2/script/prepare/setup-e2e-shell.   Orchestrate the components  The goal of the E2E tests is to test the SkyWalking project as a whole, including the OAP server, storage, coordinator, webapp, and even the frontend UI (not for now), on the single node mode as well as the cluster mode. Therefore, the first step is to determine what case we are going to verify, and orchestrate the components.\nTo make the orchestration process easier, we\u0026rsquo;re using a docker-compose that provides a simple file format (docker-compose.yml) for orchestrating the required containers, and offers an opportunity to define the dependencies of the components.\nFollow these steps:\n Decide what (and how many) containers will be needed. For example, for cluster testing, you\u0026rsquo;ll need \u0026gt; 2 OAP nodes, coordinators (e.g. zookeeper), storage (e.g. ElasticSearch), and instrumented services; Define the containers in docker-compose.yml, and carefully specify the dependencies, starting orders, and most importantly, link them together, e.g. set the correct OAP address on the agent end, and set the correct coordinator address in OAP, etc. Define the e2e case config in e2e.yaml. Write the expected data(yml) for verify.   Run e2e test  All e2e cases should under skywalking/test/e2e-v2/cases. You could execute e2e run command in skywalking/ e.g.\ne2e run -c test/e2e-v2/cases/alarm/h2/e2e.yaml  Troubleshooting  We expose all logs from all containers to the stdout in the non-CI (local) mode, but save and upload them to the GitHub server. You can download them (only when the tests have failed) at \u0026ldquo;Artifacts/Download artifacts/logs\u0026rdquo; (see top right) for debugging.\nNOTE: Please verify the newly-added E2E test case locally first. However, if you find that it has passed locally but failed in the PR check status, make sure that all the updated/newly-added files (especially those in the submodules) are committed and included in the PR, or reset the git HEAD to the remote and verify locally again.\nProject Extensions The SkyWalking project supports various extensions of existing features. If you are interesting in writing extensions, read the following guides.\nThis guides you in developing SkyWalking agent plugins to support more frameworks. Developers for both open source and private plugins should read this.\n If you would like to build a new probe or plugin in any language, please read the Component library definition and extension document. Storage extension development guide. Potential contributors can learn how to build a new storage implementor in addition to the official one. Customize analysis using OAL scripts. OAL scripts are located in config/oal/*.oal. You could modify them and reboot the OAP server. Read Observability Analysis Language Introduction to learn more about OAL scripts. Source and scope extension for new metrics. For analysis of a new metric which SkyWalking hasn\u0026rsquo;t yet provided, add a new receiver. You would most likely have to add a new source and scope. To learn how to do this, read the document. If you would like to add a new root menu or sub-menu to booster UI, read the UI menu control document.  OAP backend dependency management  This section is only applicable to dependencies of the backend module.\n As one of the Top Level Projects of The Apache Software Foundation (ASF), SkyWalking must follow the ASF 3RD PARTY LICENSE POLICY. So if you\u0026rsquo;re adding new dependencies to the project, you should make sure that the new dependencies would not break the policy, and add their LICENSE and NOTICE to the project.\nWe use license-eye to help you make sure that you haven\u0026rsquo;t missed out any new dependencies:\n Install license-eye according to the doc. Run license-eye dependency resolve --summary ./dist-material/release-docs/LICENSE.tpl in the root directory of this project. Check the modified lines in ./dist-material/release-docs/LICENSE (via command git diff -U0 ./dist-material/release-docs/LICENSE) and check whether the new dependencies' licenses are compatible with Apache 2.0. Add the new dependencies' notice files (if any) to ./dist-material/release-docs/NOTICE if they are Apache 2.0 license. Copy their license files to ./dist-material/release-docs/licenses if they are not standard Apache 2.0 license. Copy the new dependencies' license file to ./dist-material/release-docs/licenses if they are not standard Apache 2.0 license.  Release If you\u0026rsquo;re a committer, read the Apache Release Guide to learn about how to create an official Apache version release in accordance with avoid Apache\u0026rsquo;s rules. As long as you keep our LICENSE and NOTICE, the Apache license allows everyone to redistribute.\n","title":"Guides","url":"/docs/main/v9.5.0/en/guides/readme/"},{"content":"Guides There are many ways you can connect and contribute to the SkyWalking community.\n Submit an issue for an addressed issue or feature implementation plan. Submit a discussion to ask questions, feature proposal and uncertain bug discussion. Mail list: dev@skywalking.apache.org. Mail to dev-subscribe@skywalking.apache.org. Follow the instructions in the reply to subscribe to the mail list. Send Request to join SkyWalking slack mail to the mail list(dev@skywalking.apache.org), we will invite you in. For Chinese speaker, send [CN] Request to join SkyWalking slack mail to the mail list(dev@skywalking.apache.org), we will invite you in.  ","title":"Guides","url":"/docs/main/v9.6.0/en/guides/community/"},{"content":"Guides There are many ways you can connect and contribute to the SkyWalking community.\n Submit an issue for an addressed issue or feature implementation plan. Submit a discussion to ask questions, feature proposal and uncertain bug discussion. Mail list: dev@skywalking.apache.org. Mail to dev-subscribe@skywalking.apache.org. Follow the instructions in the reply to subscribe to the mail list. Send Request to join SkyWalking slack mail to the mail list(dev@skywalking.apache.org), we will invite you in. For Chinese speaker, send [CN] Request to join SkyWalking slack mail to the mail list(dev@skywalking.apache.org), we will invite you in.  ","title":"Guides","url":"/docs/main/v9.7.0/en/guides/community/"},{"content":"Guides If you want to debug or develop SkyWalking Rover, The following documentations would guide you.\n Contribution  How to contribute a module?   Compile  How to compile SkyWalking Rover?    ","title":"Guides","url":"/docs/skywalking-rover/latest/en/guides/readme/"},{"content":"Guides If you want to debug or develop SkyWalking Rover, The following documentations would guide you.\n Contribution  How to contribute a module?   Compile  How to compile SkyWalking Rover?    ","title":"Guides","url":"/docs/skywalking-rover/next/en/guides/readme/"},{"content":"Guides If you want to debug or develop SkyWalking Rover, The following documentations would guide you.\n Contribution  How to contribute a module?   Compile  How to compile SkyWalking Rover?    ","title":"Guides","url":"/docs/skywalking-rover/v0.6.0/en/guides/readme/"},{"content":"Guides If you want to debug or develop SkyWalking Satellite, The following documentations would guide you.\n Contribution  How to contribute a plugin? How to release SkyWalking Satellite?   Compile  How to compile SkyWalking Satellite?   Test  How to add unit test for a plugin?    ","title":"Guides","url":"/docs/skywalking-satellite/latest/en/guides/readme/"},{"content":"Guides If you want to debug or develop SkyWalking Satellite, The following documentations would guide you.\n Contribution  How to contribute a plugin? How to release SkyWalking Satellite?   Compile  How to compile SkyWalking Satellite?   Test  How to add unit test for a plugin?    ","title":"Guides","url":"/docs/skywalking-satellite/next/en/guides/readme/"},{"content":"Guides If you want to debug or develop SkyWalking Satellite, The following documentations would guide you.\n Contribution  How to contribute a plugin? How to release SkyWalking Satellite?   Compile  How to compile SkyWalking Satellite?   Test  How to add unit test for a plugin?    ","title":"Guides","url":"/docs/skywalking-satellite/v1.2.0/en/guides/readme/"},{"content":"H2 Activate H2 as storage, set storage provider to H2 In-Memory Databases by default in the distribution package. Please read Database URL Overview in H2 official document. You can set the target to H2 in Embedded, Server and Mixed modes.\nSetting fragment example\nstorage:selector:${SW_STORAGE:h2}h2:driver:org.h2.jdbcx.JdbcDataSourceurl:jdbc:h2:mem:skywalking-oap-dbuser:samaxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:100}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:1}","title":"H2","url":"/docs/main/latest/en/setup/backend/storages/h2/"},{"content":"H2 Activate H2 as storage, set storage provider to H2 In-Memory Databases by default in the distribution package. Please read Database URL Overview in H2 official document. You can set the target to H2 in Embedded, Server and Mixed modes.\nSetting fragment example\nstorage:selector:${SW_STORAGE:h2}h2:driver:org.h2.jdbcx.JdbcDataSourceurl:jdbc:h2:mem:skywalking-oap-dbuser:samaxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:100}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:1}","title":"H2","url":"/docs/main/next/en/setup/backend/storages/h2/"},{"content":"H2 Activate H2 as storage, set storage provider to H2 In-Memory Databases by default in the distribution package. Please read Database URL Overview in H2 official document. You can set the target to H2 in Embedded, Server and Mixed modes.\nSetting fragment example\nstorage:selector:${SW_STORAGE:h2}h2:driver:org.h2.jdbcx.JdbcDataSourceurl:jdbc:h2:mem:skywalking-oap-dbuser:samaxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:100}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:1}","title":"H2","url":"/docs/main/v9.7.0/en/setup/backend/storages/h2/"},{"content":"Health Check Health check intends to provide a unique approach to checking the health status of the OAP server. It includes the health status of modules, GraphQL, and gRPC services readiness.\n 0 means healthy, and more than 0 means unhealthy. less than 0 means that the OAP doesn\u0026rsquo;t start up.\n Health Checker Module. The Health Checker module helps observe the health status of modules. You may activate it as follows:\nhealth-checker:selector:${SW_HEALTH_CHECKER:default}default:checkIntervalSeconds:${SW_HEALTH_CHECKER_INTERVAL_SECONDS:5}Note: The telemetry module should be enabled at the same time. This means that the provider should not be - and none.\nAfter that, we can check the OAP server health status by querying GraphQL:\nquery{ checkHealth{ score details } } If the OAP server is healthy, the response should be\n{ \u0026#34;data\u0026#34;: { \u0026#34;checkHealth\u0026#34;: { \u0026#34;score\u0026#34;: 0, \u0026#34;details\u0026#34;: \u0026#34;\u0026#34; } } } If some modules are unhealthy (e.g. storage H2 is down), then the result may look as follows:\n{ \u0026#34;data\u0026#34;: { \u0026#34;checkHealth\u0026#34;: { \u0026#34;score\u0026#34;: 1, \u0026#34;details\u0026#34;: \u0026#34;storage_h2,\u0026#34; } } } Refer to checkHealth query for more details.\nThe readiness of GraphQL and gRPC Use the query above to check the readiness of GraphQL.\nOAP has implemented the gRPC Health Checking Protocol. You may use the grpc-health-probe or any other tools to check the health of OAP gRPC services.\nCLI tool Please follow the CLI doc to get the health status score directly through the checkhealth command.\n","title":"Health Check","url":"/docs/main/latest/en/setup/backend/backend-health-check/"},{"content":"Health Check Health check intends to provide a unique approach to checking the health status of the OAP server. It includes the health status of modules, GraphQL, and gRPC services readiness.\n 0 means healthy, and more than 0 means unhealthy. less than 0 means that the OAP doesn\u0026rsquo;t start up.\n Health Checker Module. The Health Checker module helps observe the health status of modules. You may activate it as follows:\nhealth-checker:selector:${SW_HEALTH_CHECKER:default}default:checkIntervalSeconds:${SW_HEALTH_CHECKER_INTERVAL_SECONDS:5}Note: The telemetry module should be enabled at the same time. This means that the provider should not be - and none.\nAfter that, we can check the OAP server health status by querying GraphQL:\nquery{ checkHealth{ score details } } If the OAP server is healthy, the response should be\n{ \u0026#34;data\u0026#34;: { \u0026#34;checkHealth\u0026#34;: { \u0026#34;score\u0026#34;: 0, \u0026#34;details\u0026#34;: \u0026#34;\u0026#34; } } } If some modules are unhealthy (e.g. storage H2 is down), then the result may look as follows:\n{ \u0026#34;data\u0026#34;: { \u0026#34;checkHealth\u0026#34;: { \u0026#34;score\u0026#34;: 1, \u0026#34;details\u0026#34;: \u0026#34;storage_h2,\u0026#34; } } } Refer to checkHealth query for more details.\nThe readiness of GraphQL and gRPC Use the query above to check the readiness of GraphQL.\nOAP has implemented the gRPC Health Checking Protocol. You may use the grpc-health-probe or any other tools to check the health of OAP gRPC services.\nCLI tool Please follow the CLI doc to get the health status score directly through the checkhealth command.\n","title":"Health Check","url":"/docs/main/next/en/setup/backend/backend-health-check/"},{"content":"Health Check Health check intends to provide a unique approach to check the health status of the OAP server. It includes the health status of modules, GraphQL, and gRPC services readiness.\n 0 means healthy, and more than 0 means unhealthy. less than 0 means that the OAP doesn\u0026rsquo;t start up.\n Health Checker Module. The Health Checker module helps observe the health status of modules. You may activate it as follows:\nhealth-checker:selector:${SW_HEALTH_CHECKER:default}default:checkIntervalSeconds:${SW_HEALTH_CHECKER_INTERVAL_SECONDS:5}Note: The telemetry module should be enabled at the same time. This means that the provider should not be - and none.\nAfter that, we can check the OAP server health status by querying GraphQL:\nquery{ checkHealth{ score details } } If the OAP server is healthy, the response should be\n{ \u0026#34;data\u0026#34;: { \u0026#34;checkHealth\u0026#34;: { \u0026#34;score\u0026#34;: 0, \u0026#34;details\u0026#34;: \u0026#34;\u0026#34; } } } If some modules are unhealthy (e.g. storage H2 is down), then the result may look as follows:\n{ \u0026#34;data\u0026#34;: { \u0026#34;checkHealth\u0026#34;: { \u0026#34;score\u0026#34;: 1, \u0026#34;details\u0026#34;: \u0026#34;storage_h2,\u0026#34; } } } Refer to checkHealth query for more details.\nThe readiness of GraphQL and gRPC Use the query above to check the readiness of GraphQL.\nOAP has implemented the gRPC Health Checking Protocol. You may use the grpc-health-probe or any other tools to check the health of OAP gRPC services.\nCLI tool Please follow the CLI doc to get the health status score directly through the checkhealth command.\n","title":"Health Check","url":"/docs/main/v9.0.0/en/setup/backend/backend-health-check/"},{"content":"Health Check Health check intends to provide a unique approach to checking the health status of the OAP server. It includes the health status of modules, GraphQL, and gRPC services readiness.\n 0 means healthy, and more than 0 means unhealthy. less than 0 means that the OAP doesn\u0026rsquo;t start up.\n Health Checker Module. The Health Checker module helps observe the health status of modules. You may activate it as follows:\nhealth-checker:selector:${SW_HEALTH_CHECKER:default}default:checkIntervalSeconds:${SW_HEALTH_CHECKER_INTERVAL_SECONDS:5}Note: The telemetry module should be enabled at the same time. This means that the provider should not be - and none.\nAfter that, we can check the OAP server health status by querying GraphQL:\nquery{ checkHealth{ score details } } If the OAP server is healthy, the response should be\n{ \u0026#34;data\u0026#34;: { \u0026#34;checkHealth\u0026#34;: { \u0026#34;score\u0026#34;: 0, \u0026#34;details\u0026#34;: \u0026#34;\u0026#34; } } } If some modules are unhealthy (e.g. storage H2 is down), then the result may look as follows:\n{ \u0026#34;data\u0026#34;: { \u0026#34;checkHealth\u0026#34;: { \u0026#34;score\u0026#34;: 1, \u0026#34;details\u0026#34;: \u0026#34;storage_h2,\u0026#34; } } } Refer to checkHealth query for more details.\nThe readiness of GraphQL and gRPC Use the query above to check the readiness of GraphQL.\nOAP has implemented the gRPC Health Checking Protocol. You may use the grpc-health-probe or any other tools to check the health of OAP gRPC services.\nCLI tool Please follow the CLI doc to get the health status score directly through the checkhealth command.\n","title":"Health Check","url":"/docs/main/v9.1.0/en/setup/backend/backend-health-check/"},{"content":"Health Check Health check intends to provide a unique approach to checking the health status of the OAP server. It includes the health status of modules, GraphQL, and gRPC services readiness.\n 0 means healthy, and more than 0 means unhealthy. less than 0 means that the OAP doesn\u0026rsquo;t start up.\n Health Checker Module. The Health Checker module helps observe the health status of modules. You may activate it as follows:\nhealth-checker:selector:${SW_HEALTH_CHECKER:default}default:checkIntervalSeconds:${SW_HEALTH_CHECKER_INTERVAL_SECONDS:5}Note: The telemetry module should be enabled at the same time. This means that the provider should not be - and none.\nAfter that, we can check the OAP server health status by querying GraphQL:\nquery{ checkHealth{ score details } } If the OAP server is healthy, the response should be\n{ \u0026#34;data\u0026#34;: { \u0026#34;checkHealth\u0026#34;: { \u0026#34;score\u0026#34;: 0, \u0026#34;details\u0026#34;: \u0026#34;\u0026#34; } } } If some modules are unhealthy (e.g. storage H2 is down), then the result may look as follows:\n{ \u0026#34;data\u0026#34;: { \u0026#34;checkHealth\u0026#34;: { \u0026#34;score\u0026#34;: 1, \u0026#34;details\u0026#34;: \u0026#34;storage_h2,\u0026#34; } } } Refer to checkHealth query for more details.\nThe readiness of GraphQL and gRPC Use the query above to check the readiness of GraphQL.\nOAP has implemented the gRPC Health Checking Protocol. You may use the grpc-health-probe or any other tools to check the health of OAP gRPC services.\nCLI tool Please follow the CLI doc to get the health status score directly through the checkhealth command.\n","title":"Health Check","url":"/docs/main/v9.2.0/en/setup/backend/backend-health-check/"},{"content":"Health Check Health check intends to provide a unique approach to checking the health status of the OAP server. It includes the health status of modules, GraphQL, and gRPC services readiness.\n 0 means healthy, and more than 0 means unhealthy. less than 0 means that the OAP doesn\u0026rsquo;t start up.\n Health Checker Module. The Health Checker module helps observe the health status of modules. You may activate it as follows:\nhealth-checker:selector:${SW_HEALTH_CHECKER:default}default:checkIntervalSeconds:${SW_HEALTH_CHECKER_INTERVAL_SECONDS:5}Note: The telemetry module should be enabled at the same time. This means that the provider should not be - and none.\nAfter that, we can check the OAP server health status by querying GraphQL:\nquery{ checkHealth{ score details } } If the OAP server is healthy, the response should be\n{ \u0026#34;data\u0026#34;: { \u0026#34;checkHealth\u0026#34;: { \u0026#34;score\u0026#34;: 0, \u0026#34;details\u0026#34;: \u0026#34;\u0026#34; } } } If some modules are unhealthy (e.g. storage H2 is down), then the result may look as follows:\n{ \u0026#34;data\u0026#34;: { \u0026#34;checkHealth\u0026#34;: { \u0026#34;score\u0026#34;: 1, \u0026#34;details\u0026#34;: \u0026#34;storage_h2,\u0026#34; } } } Refer to checkHealth query for more details.\nThe readiness of GraphQL and gRPC Use the query above to check the readiness of GraphQL.\nOAP has implemented the gRPC Health Checking Protocol. You may use the grpc-health-probe or any other tools to check the health of OAP gRPC services.\nCLI tool Please follow the CLI doc to get the health status score directly through the checkhealth command.\n","title":"Health Check","url":"/docs/main/v9.3.0/en/setup/backend/backend-health-check/"},{"content":"Health Check Health check intends to provide a unique approach to checking the health status of the OAP server. It includes the health status of modules, GraphQL, and gRPC services readiness.\n 0 means healthy, and more than 0 means unhealthy. less than 0 means that the OAP doesn\u0026rsquo;t start up.\n Health Checker Module. The Health Checker module helps observe the health status of modules. You may activate it as follows:\nhealth-checker:selector:${SW_HEALTH_CHECKER:default}default:checkIntervalSeconds:${SW_HEALTH_CHECKER_INTERVAL_SECONDS:5}Note: The telemetry module should be enabled at the same time. This means that the provider should not be - and none.\nAfter that, we can check the OAP server health status by querying GraphQL:\nquery{ checkHealth{ score details } } If the OAP server is healthy, the response should be\n{ \u0026#34;data\u0026#34;: { \u0026#34;checkHealth\u0026#34;: { \u0026#34;score\u0026#34;: 0, \u0026#34;details\u0026#34;: \u0026#34;\u0026#34; } } } If some modules are unhealthy (e.g. storage H2 is down), then the result may look as follows:\n{ \u0026#34;data\u0026#34;: { \u0026#34;checkHealth\u0026#34;: { \u0026#34;score\u0026#34;: 1, \u0026#34;details\u0026#34;: \u0026#34;storage_h2,\u0026#34; } } } Refer to checkHealth query for more details.\nThe readiness of GraphQL and gRPC Use the query above to check the readiness of GraphQL.\nOAP has implemented the gRPC Health Checking Protocol. You may use the grpc-health-probe or any other tools to check the health of OAP gRPC services.\nCLI tool Please follow the CLI doc to get the health status score directly through the checkhealth command.\n","title":"Health Check","url":"/docs/main/v9.4.0/en/setup/backend/backend-health-check/"},{"content":"Health Check Health check intends to provide a unique approach to checking the health status of the OAP server. It includes the health status of modules, GraphQL, and gRPC services readiness.\n 0 means healthy, and more than 0 means unhealthy. less than 0 means that the OAP doesn\u0026rsquo;t start up.\n Health Checker Module. The Health Checker module helps observe the health status of modules. You may activate it as follows:\nhealth-checker:selector:${SW_HEALTH_CHECKER:default}default:checkIntervalSeconds:${SW_HEALTH_CHECKER_INTERVAL_SECONDS:5}Note: The telemetry module should be enabled at the same time. This means that the provider should not be - and none.\nAfter that, we can check the OAP server health status by querying GraphQL:\nquery{ checkHealth{ score details } } If the OAP server is healthy, the response should be\n{ \u0026#34;data\u0026#34;: { \u0026#34;checkHealth\u0026#34;: { \u0026#34;score\u0026#34;: 0, \u0026#34;details\u0026#34;: \u0026#34;\u0026#34; } } } If some modules are unhealthy (e.g. storage H2 is down), then the result may look as follows:\n{ \u0026#34;data\u0026#34;: { \u0026#34;checkHealth\u0026#34;: { \u0026#34;score\u0026#34;: 1, \u0026#34;details\u0026#34;: \u0026#34;storage_h2,\u0026#34; } } } Refer to checkHealth query for more details.\nThe readiness of GraphQL and gRPC Use the query above to check the readiness of GraphQL.\nOAP has implemented the gRPC Health Checking Protocol. You may use the grpc-health-probe or any other tools to check the health of OAP gRPC services.\nCLI tool Please follow the CLI doc to get the health status score directly through the checkhealth command.\n","title":"Health Check","url":"/docs/main/v9.5.0/en/setup/backend/backend-health-check/"},{"content":"Health Check Health check intends to provide a unique approach to checking the health status of the OAP server. It includes the health status of modules, GraphQL, and gRPC services readiness.\n 0 means healthy, and more than 0 means unhealthy. less than 0 means that the OAP doesn\u0026rsquo;t start up.\n Health Checker Module. The Health Checker module helps observe the health status of modules. You may activate it as follows:\nhealth-checker:selector:${SW_HEALTH_CHECKER:default}default:checkIntervalSeconds:${SW_HEALTH_CHECKER_INTERVAL_SECONDS:5}Note: The telemetry module should be enabled at the same time. This means that the provider should not be - and none.\nAfter that, we can check the OAP server health status by querying GraphQL:\nquery{ checkHealth{ score details } } If the OAP server is healthy, the response should be\n{ \u0026#34;data\u0026#34;: { \u0026#34;checkHealth\u0026#34;: { \u0026#34;score\u0026#34;: 0, \u0026#34;details\u0026#34;: \u0026#34;\u0026#34; } } } If some modules are unhealthy (e.g. storage H2 is down), then the result may look as follows:\n{ \u0026#34;data\u0026#34;: { \u0026#34;checkHealth\u0026#34;: { \u0026#34;score\u0026#34;: 1, \u0026#34;details\u0026#34;: \u0026#34;storage_h2,\u0026#34; } } } Refer to checkHealth query for more details.\nThe readiness of GraphQL and gRPC Use the query above to check the readiness of GraphQL.\nOAP has implemented the gRPC Health Checking Protocol. You may use the grpc-health-probe or any other tools to check the health of OAP gRPC services.\nCLI tool Please follow the CLI doc to get the health status score directly through the checkhealth command.\n","title":"Health Check","url":"/docs/main/v9.6.0/en/setup/backend/backend-health-check/"},{"content":"Health Check Health check intends to provide a unique approach to checking the health status of the OAP server. It includes the health status of modules, GraphQL, and gRPC services readiness.\n 0 means healthy, and more than 0 means unhealthy. less than 0 means that the OAP doesn\u0026rsquo;t start up.\n Health Checker Module. The Health Checker module helps observe the health status of modules. You may activate it as follows:\nhealth-checker:selector:${SW_HEALTH_CHECKER:default}default:checkIntervalSeconds:${SW_HEALTH_CHECKER_INTERVAL_SECONDS:5}Note: The telemetry module should be enabled at the same time. This means that the provider should not be - and none.\nAfter that, we can check the OAP server health status by querying GraphQL:\nquery{ checkHealth{ score details } } If the OAP server is healthy, the response should be\n{ \u0026#34;data\u0026#34;: { \u0026#34;checkHealth\u0026#34;: { \u0026#34;score\u0026#34;: 0, \u0026#34;details\u0026#34;: \u0026#34;\u0026#34; } } } If some modules are unhealthy (e.g. storage H2 is down), then the result may look as follows:\n{ \u0026#34;data\u0026#34;: { \u0026#34;checkHealth\u0026#34;: { \u0026#34;score\u0026#34;: 1, \u0026#34;details\u0026#34;: \u0026#34;storage_h2,\u0026#34; } } } Refer to checkHealth query for more details.\nThe readiness of GraphQL and gRPC Use the query above to check the readiness of GraphQL.\nOAP has implemented the gRPC Health Checking Protocol. You may use the grpc-health-probe or any other tools to check the health of OAP gRPC services.\nCLI tool Please follow the CLI doc to get the health status score directly through the checkhealth command.\n","title":"Health Check","url":"/docs/main/v9.7.0/en/setup/backend/backend-health-check/"},{"content":"How does threading-profiler (the default mode) work These blogs skywalking-profiling and skywalking-python-profiling described how the threading-profiler works\nAnd this figure demonstrates how the profiler works as well:\nsequenceDiagram API-\u0026gt;\u0026gt;+working thread: get: /api/v1/user/ rect rgb(0,200,0) API-\u0026gt;\u0026gt;+profiling thread: start profiling profiling thread-\u0026gt;\u0026gt;working thread: snapshot profiling thread-\u0026gt;\u0026gt;working thread: snapshot profiling thread-\u0026gt;\u0026gt;working thread: snapshot profiling thread-\u0026gt;\u0026gt;-working thread: snapshot end working thread--\u0026gt;\u0026gt;-API: response It works well with threading mode because the whole process will be executed in the same thread, so the profiling thread can fetch the complete profiling info of the process of the API request.\nWhy doesn\u0026rsquo;t threading-profiler work in greenlet mode When the python program runs with gevent + greenlet, the process would be like this:\nsequenceDiagram API-\u0026gt;\u0026gt;+working thread 1: get: /api/v1/user/ rect rgb(0,200,0) greenlet.HUB--\u0026gt;\u0026gt;+working thread 1: swap in the profiled greenlet API-\u0026gt;\u0026gt;+profiling thread: start profiling profiling thread-\u0026gt;\u0026gt;working thread 1: snapshot working thread 1--\u0026gt;\u0026gt;-greenlet.HUB : swap out the profiled greenlet end greenlet.HUB--\u0026gt;\u0026gt;+working thread 1: swap in the other greenlet profiling thread-\u0026gt;\u0026gt;working thread 1: snapshot greenlet.HUB--\u0026gt;\u0026gt;+working thread 2: swap in the profiled greenlet profiling thread-\u0026gt;\u0026gt;working thread 1: snapshot profiling thread-\u0026gt;\u0026gt;working thread 1: snapshot working thread 2--\u0026gt;-greenlet.HUB : swap out the profiled greenlet profiling thread-\u0026gt;\u0026gt;working thread 1: snapshot profiling thread-\u0026gt;\u0026gt;-working thread 1: snapshot working thread 1--\u0026gt;\u0026gt;-greenlet.HUB : swap out the other greenlet working thread 1--\u0026gt;\u0026gt;-API: response In this circumstance, the snapshot of the working thread includes multi contexts of different greenlets, which will make skywalking confused to build the trace stack.\nFortunately, greenlet has an API for profiling, the doc is here. We can implement a greenlet profiler to solve this issue.\nHow the greenlet profiler works A greenlet profiler leverages the trace callback of greenlet, it works like this:\nsequenceDiagram API-\u0026gt;\u0026gt;+working thread 1: get: /api/v1/user/ rect rgb(0,200,0) greenlet.HUB--\u0026gt;\u0026gt;+working thread 1: swap in the profiled greenlet and snapshot working thread 1--\u0026gt;\u0026gt;-greenlet.HUB : swap out the profiled greenlet and snapshot end greenlet.HUB--\u0026gt;\u0026gt;+working thread 1: swap in the other greenlet rect rgb(0,200,0) greenlet.HUB--\u0026gt;\u0026gt;+working thread 2: swap in the profiled greenlet and snapshot working thread 2--\u0026gt;-greenlet.HUB : swap out the profiled greenlet and snapshot end working thread 1--\u0026gt;\u0026gt;-greenlet.HUB : swap out the other greenlet working thread 1--\u0026gt;\u0026gt;-API: response We can set a callback function to the greenlet that we need to profiling, then when the greenlet.HUB switches the context in/out to the working thread, the callback will build a snapshot of the greenlet\u0026rsquo;s traceback and send it to skywalking.\nThe difference between these two profilers The greenlet profiler will significantly reduce the snapshot times of the profiling process, which means that it will cost less CPU time than the threading profiler.\n","title":"How does threading-profiler (the default mode) work","url":"/docs/skywalking-python/latest/en/profiling/profiling/"},{"content":"How does threading-profiler (the default mode) work These blogs skywalking-profiling and skywalking-python-profiling described how the threading-profiler works\nAnd this figure demonstrates how the profiler works as well:\nsequenceDiagram API-\u0026gt;\u0026gt;+working thread: get: /api/v1/user/ rect rgb(0,200,0) API-\u0026gt;\u0026gt;+profiling thread: start profiling profiling thread-\u0026gt;\u0026gt;working thread: snapshot profiling thread-\u0026gt;\u0026gt;working thread: snapshot profiling thread-\u0026gt;\u0026gt;working thread: snapshot profiling thread-\u0026gt;\u0026gt;-working thread: snapshot end working thread--\u0026gt;\u0026gt;-API: response It works well with threading mode because the whole process will be executed in the same thread, so the profiling thread can fetch the complete profiling info of the process of the API request.\nWhy doesn\u0026rsquo;t threading-profiler work in greenlet mode When the python program runs with gevent + greenlet, the process would be like this:\nsequenceDiagram API-\u0026gt;\u0026gt;+working thread 1: get: /api/v1/user/ rect rgb(0,200,0) greenlet.HUB--\u0026gt;\u0026gt;+working thread 1: swap in the profiled greenlet API-\u0026gt;\u0026gt;+profiling thread: start profiling profiling thread-\u0026gt;\u0026gt;working thread 1: snapshot working thread 1--\u0026gt;\u0026gt;-greenlet.HUB : swap out the profiled greenlet end greenlet.HUB--\u0026gt;\u0026gt;+working thread 1: swap in the other greenlet profiling thread-\u0026gt;\u0026gt;working thread 1: snapshot greenlet.HUB--\u0026gt;\u0026gt;+working thread 2: swap in the profiled greenlet profiling thread-\u0026gt;\u0026gt;working thread 1: snapshot profiling thread-\u0026gt;\u0026gt;working thread 1: snapshot working thread 2--\u0026gt;-greenlet.HUB : swap out the profiled greenlet profiling thread-\u0026gt;\u0026gt;working thread 1: snapshot profiling thread-\u0026gt;\u0026gt;-working thread 1: snapshot working thread 1--\u0026gt;\u0026gt;-greenlet.HUB : swap out the other greenlet working thread 1--\u0026gt;\u0026gt;-API: response In this circumstance, the snapshot of the working thread includes multi contexts of different greenlets, which will make skywalking confused to build the trace stack.\nFortunately, greenlet has an API for profiling, the doc is here. We can implement a greenlet profiler to solve this issue.\nHow the greenlet profiler works A greenlet profiler leverages the trace callback of greenlet, it works like this:\nsequenceDiagram API-\u0026gt;\u0026gt;+working thread 1: get: /api/v1/user/ rect rgb(0,200,0) greenlet.HUB--\u0026gt;\u0026gt;+working thread 1: swap in the profiled greenlet and snapshot working thread 1--\u0026gt;\u0026gt;-greenlet.HUB : swap out the profiled greenlet and snapshot end greenlet.HUB--\u0026gt;\u0026gt;+working thread 1: swap in the other greenlet rect rgb(0,200,0) greenlet.HUB--\u0026gt;\u0026gt;+working thread 2: swap in the profiled greenlet and snapshot working thread 2--\u0026gt;-greenlet.HUB : swap out the profiled greenlet and snapshot end working thread 1--\u0026gt;\u0026gt;-greenlet.HUB : swap out the other greenlet working thread 1--\u0026gt;\u0026gt;-API: response We can set a callback function to the greenlet that we need to profiling, then when the greenlet.HUB switches the context in/out to the working thread, the callback will build a snapshot of the greenlet\u0026rsquo;s traceback and send it to skywalking.\nThe difference between these two profilers The greenlet profiler will significantly reduce the snapshot times of the profiling process, which means that it will cost less CPU time than the threading profiler.\n","title":"How does threading-profiler (the default mode) work","url":"/docs/skywalking-python/next/en/profiling/profiling/"},{"content":"How does threading-profiler (the default mode) work These blogs skywalking-profiling and skywalking-python-profiling described how the threading-profiler works\nAnd this figure demonstrates how the profiler works as well:\nsequenceDiagram API-\u0026gt;\u0026gt;+working thread: get: /api/v1/user/ rect rgb(0,200,0) API-\u0026gt;\u0026gt;+profiling thread: start profiling profiling thread-\u0026gt;\u0026gt;working thread: snapshot profiling thread-\u0026gt;\u0026gt;working thread: snapshot profiling thread-\u0026gt;\u0026gt;working thread: snapshot profiling thread-\u0026gt;\u0026gt;-working thread: snapshot end working thread--\u0026gt;\u0026gt;-API: response It works well with threading mode because the whole process will be executed in the same thread, so the profiling thread can fetch the complete profiling info of the process of the API request.\nWhy doesn\u0026rsquo;t threading-profiler work in greenlet mode When the python program runs with gevent + greenlet, the process would be like this:\nsequenceDiagram API-\u0026gt;\u0026gt;+working thread 1: get: /api/v1/user/ rect rgb(0,200,0) greenlet.HUB--\u0026gt;\u0026gt;+working thread 1: swap in the profiled greenlet API-\u0026gt;\u0026gt;+profiling thread: start profiling profiling thread-\u0026gt;\u0026gt;working thread 1: snapshot working thread 1--\u0026gt;\u0026gt;-greenlet.HUB : swap out the profiled greenlet end greenlet.HUB--\u0026gt;\u0026gt;+working thread 1: swap in the other greenlet profiling thread-\u0026gt;\u0026gt;working thread 1: snapshot greenlet.HUB--\u0026gt;\u0026gt;+working thread 2: swap in the profiled greenlet profiling thread-\u0026gt;\u0026gt;working thread 1: snapshot profiling thread-\u0026gt;\u0026gt;working thread 1: snapshot working thread 2--\u0026gt;-greenlet.HUB : swap out the profiled greenlet profiling thread-\u0026gt;\u0026gt;working thread 1: snapshot profiling thread-\u0026gt;\u0026gt;-working thread 1: snapshot working thread 1--\u0026gt;\u0026gt;-greenlet.HUB : swap out the other greenlet working thread 1--\u0026gt;\u0026gt;-API: response In this circumstance, the snapshot of the working thread includes multi contexts of different greenlets, which will make skywalking confused to build the trace stack.\nFortunately, greenlet has an API for profiling, the doc is here. We can implement a greenlet profiler to solve this issue.\nHow the greenlet profiler works A greenlet profiler leverages the trace callback of greenlet, it works like this:\nsequenceDiagram API-\u0026gt;\u0026gt;+working thread 1: get: /api/v1/user/ rect rgb(0,200,0) greenlet.HUB--\u0026gt;\u0026gt;+working thread 1: swap in the profiled greenlet and snapshot working thread 1--\u0026gt;\u0026gt;-greenlet.HUB : swap out the profiled greenlet and snapshot end greenlet.HUB--\u0026gt;\u0026gt;+working thread 1: swap in the other greenlet rect rgb(0,200,0) greenlet.HUB--\u0026gt;\u0026gt;+working thread 2: swap in the profiled greenlet and snapshot working thread 2--\u0026gt;-greenlet.HUB : swap out the profiled greenlet and snapshot end working thread 1--\u0026gt;\u0026gt;-greenlet.HUB : swap out the other greenlet working thread 1--\u0026gt;\u0026gt;-API: response We can set a callback function to the greenlet that we need to profiling, then when the greenlet.HUB switches the context in/out to the working thread, the callback will build a snapshot of the greenlet\u0026rsquo;s traceback and send it to skywalking.\nThe difference between these two profilers The greenlet profiler will significantly reduce the snapshot times of the profiling process, which means that it will cost less CPU time than the threading profiler.\n","title":"How does threading-profiler (the default mode) work","url":"/docs/skywalking-python/v1.0.1/en/profiling/profiling/"},{"content":"How to add a new root menu or sub-menu to booster UI If you would like to add a new root menu or sub-menu, you should add data to src/router/data/xx and add translation contents for the title to src/locales/lang/xx in booster UI.\n Create a new file called xxx.ts in src/router/data. Add configurations to the xxx.ts, configurations should be like this.  export default [ { // Add `Infrastructure` menu  path: \u0026#34;\u0026#34;, name: \u0026#34;Infrastructure\u0026#34;, meta: { title: \u0026#34;infrastructure\u0026#34;, icon: \u0026#34;scatter_plot\u0026#34;, hasGroup: true, }, redirect: \u0026#34;/linux\u0026#34;, children: [ // Add a sub menu of the `Infrastructure`  { path: \u0026#34;/linux\u0026#34;, name: \u0026#34;Linux\u0026#34;, meta: { title: \u0026#34;linux\u0026#34;, layer: \u0026#34;OS_LINUX\u0026#34;, }, }, // If there are Tabs widgets in your dashboards, add following extra configuration to provide static links to the specific tab.  { path: \u0026#34;/linux/tab/:activeTabIndex\u0026#34;, name: \u0026#34;LinuxActiveTabIndex\u0026#34;, meta: { title: \u0026#34;linux\u0026#34;, notShow: true, layer: \u0026#34;OS_LINUX\u0026#34;, }, }, ], }, ]; import configurations in src/router/data/index.ts.  import name from \u0026#34;./xxx\u0026#34;; ","title":"How to add a new root menu or sub-menu to booster UI","url":"/docs/main/v9.3.0/en/guides/how-to-add-menu/"},{"content":"How to add a new root menu or sub-menu to booster UI If you would like to add a new root menu or sub-menu, you should add data to src/router/data/xx and add translation contents for the title to src/locales/lang/xx in booster UI.\n Create a new file called xxx.ts in src/router/data. Add configurations to the xxx.ts, configurations should be like this.  export default [ { // Add `Infrastructure` menu  path: \u0026#34;\u0026#34;, name: \u0026#34;Infrastructure\u0026#34;, meta: { title: \u0026#34;infrastructure\u0026#34;, icon: \u0026#34;scatter_plot\u0026#34;, hasGroup: true, }, redirect: \u0026#34;/linux\u0026#34;, children: [ // Add a sub menu of the `Infrastructure`  { path: \u0026#34;/linux\u0026#34;, name: \u0026#34;Linux\u0026#34;, meta: { title: \u0026#34;linux\u0026#34;, layer: \u0026#34;OS_LINUX\u0026#34;, }, }, // If there are Tabs widgets in your dashboards, add following extra configuration to provide static links to the specific tab.  { path: \u0026#34;/linux/tab/:activeTabIndex\u0026#34;, name: \u0026#34;LinuxActiveTabIndex\u0026#34;, meta: { title: \u0026#34;linux\u0026#34;, notShow: true, layer: \u0026#34;OS_LINUX\u0026#34;, }, }, ], }, ]; import configurations in src/router/data/index.ts.  import name from \u0026#34;./xxx\u0026#34;; ","title":"How to add a new root menu or sub-menu to booster UI","url":"/docs/main/v9.4.0/en/guides/how-to-add-menu/"},{"content":"How to add a new root menu or sub-menu to booster UI If you would like to add a new root menu or sub-menu, you should add data to src/router/data/xx and add translation contents for the title to src/locales/lang/xx in booster UI.\n Create a new file called xxx.ts in src/router/data. Add configurations to the xxx.ts, configurations should be like this.  export default [ { // Add `Infrastructure` menu  path: \u0026#34;\u0026#34;, name: \u0026#34;Infrastructure\u0026#34;, meta: { title: \u0026#34;infrastructure\u0026#34;, icon: \u0026#34;scatter_plot\u0026#34;, hasGroup: true, }, redirect: \u0026#34;/linux\u0026#34;, children: [ // Add a sub menu of the `Infrastructure`  { path: \u0026#34;/linux\u0026#34;, name: \u0026#34;Linux\u0026#34;, meta: { title: \u0026#34;linux\u0026#34;, layer: \u0026#34;OS_LINUX\u0026#34;, }, }, // If there are Tabs widgets in your dashboards, add following extra configuration to provide static links to the specific tab.  { path: \u0026#34;/linux/tab/:activeTabIndex\u0026#34;, name: \u0026#34;LinuxActiveTabIndex\u0026#34;, meta: { title: \u0026#34;linux\u0026#34;, notShow: true, layer: \u0026#34;OS_LINUX\u0026#34;, }, }, ], }, ]; import configurations in src/router/data/index.ts.  import name from \u0026#34;./xxx\u0026#34;; ","title":"How to add a new root menu or sub-menu to booster UI","url":"/docs/main/v9.5.0/en/guides/how-to-add-menu/"},{"content":"How to add CRD and Controller in SWCK? The guide intends to help contributors who want to add CRDs and Controllers in SWCK.\n1. Install the kubebuilder  Notice, SWCK is built by kubebuilder v3.2.0, so you need to install it at first.\n SWCK is based on the kubebuilder, and you could download the kubebuilder by the script.\n2. Create CRD and Controller You can use kubebuilder create api to scaffold a new Kind and corresponding controller. Here we use the Demo as an example.\n$ cd operator \u0026amp;\u0026amp; kubebuilder create api --group operator --version v1alpha1 --kind Demo(Your CRD) Then you need to input twice y to create the Resource and Controller, and there will be some newly added files.\n$ git status On branch master Your branch is up to date with \u0026#39;origin/master\u0026#39;. Changes not staged for commit: (use \u0026#34;git add \u0026lt;file\u0026gt;...\u0026#34; to update what will be committed) (use \u0026#34;git restore \u0026lt;file\u0026gt;...\u0026#34; to discard changes in working directory) modified: PROJECT modified: apis/operator/v1alpha1/zz_generated.deepcopy.go modified: config/crd/bases/operator.skywalking.apache.org_swagents.yaml modified: config/crd/kustomization.yaml modified: config/rbac/role.yaml modified: go.mod modified: go.sum modified: main.go Untracked files: (use \u0026#34;git add \u0026lt;file\u0026gt;...\u0026#34; to include in what will be committed) apis/operator/v1alpha1/demo_types.go config/crd/bases/operator.skywalking.apache.org_demoes.yaml config/crd/patches/cainjection_in_operator_demoes.yaml config/crd/patches/webhook_in_operator_demoes.yaml config/rbac/operator_demo_editor_role.yaml config/rbac/operator_demo_viewer_role.yaml config/samples/operator_v1alpha1_demo.yaml controllers/operator/demo_controller.go controllers/operator/suite_test.go no changes added to commit (use \u0026#34;git add\u0026#34; and/or \u0026#34;git commit -a\u0026#34;) Next, we need to focus on the file apis/operator/v1alpha1/demo_types.go which defines your CRD, and the file controllers/operator/configuration_controller.go which defines the Controller. The others files are some configurations generated by the kubebuilder markers. Here are some references:\n  Kubebuilder project demo, in which you can understand the overall architecture.\n  How to add new-api, which you can find more details for oapserverconfig_types.go.\n  Controller-overview, where you can find more details about oapserverconfig_controller.go.\n  3. Create webhook If you want to fields or set defaults to CRs, creating webhooks is a good practice:\nkubebuilder create webhook --group operator --version v1alpha1 --kind Demo --defaulting --programmatic-validation The newly generated files are as follows.\n$ git status On branch master Your branch is ahead of \u0026#39;origin/master\u0026#39; by 1 commit. (use \u0026#34;git push\u0026#34; to publish your local commits) Changes not staged for commit: (use \u0026#34;git add \u0026lt;file\u0026gt;...\u0026#34; to update what will be committed) (use \u0026#34;git restore \u0026lt;file\u0026gt;...\u0026#34; to discard changes in working directory) modified: PROJECT modified: config/webhook/manifests.yaml modified: main.go Untracked files: (use \u0026#34;git add \u0026lt;file\u0026gt;...\u0026#34; to include in what will be committed) apis/operator/v1alpha1/demo_webhook.go apis/operator/v1alpha1/webhook_suite_test.go no changes added to commit (use \u0026#34;git add\u0026#34; and/or \u0026#34;git commit -a\u0026#34;) You can get more details through webhook-overview.\n4. Create the template Generally, a controller would generate a series of resources, such as workload, rbac, service, etc based on CRDs. SWCK is using the Go standard template engine to generate these resources. All template files are stored in the ./operator/pkg/operator/manifests. You could create a directory there such as demo to hold templates. The framework would transfer the CR as the arguments to these templates. More than CR, it supports passing custom rendering functions by setting up the TmplFunc. At last, you need to change the comment and add a field demo there to embed the template files into golang binaries.\n Notice, every file under the template directory can only contain one resource and we can\u0026rsquo;t use the --- to create multiple resources in a single file.\n 5. Build and Test SWCK needs to run in the k8s environment, so we highly recommend using the kind if you don\u0026rsquo;t have a cluster in hand. There are currently two ways to test your implementation.\n Before testing, please make sure you have the kind installed.\n  Test locally. After finishing your implementation, you could use the following steps to test locally:   Disable the webhook  export ENABLE_WEBHOOKS=false Run the main.go with the kubeconfig file.  go run main.go --kubeconfig=(use your kubeconfig file here, and the default is ~/.kube/config)  If you want to test the webhook, please refer the guide.\n  Test in-cluster.   Before testing the swck, please install cert-manager to provide the certificate for webhook in swck.  kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.9.1/cert-manager.yaml At first, you should build the swck image and load it into the kind cluster, and then you could install the crds and the operator as follows.  make docker-build \u0026amp;\u0026amp; kind load docker-image controller:latest \u0026amp;\u0026amp; make install \u0026amp;\u0026amp; make deploy After the swck is installed, and then you could use the following command to get the logs produced by the operator.  kubectl logs -f [skywalking-swck-controller-manager-*](../use the swck deployment name) -n skywalking-swck-system ","title":"How to add CRD and Controller in SWCK?","url":"/docs/skywalking-swck/latest/how-to-add-new-crd-and-controller/"},{"content":"How to add CRD and Controller in SWCK? The guide intends to help contributors who want to add CRDs and Controllers in SWCK.\n1. Install the kubebuilder  Notice, SWCK is built by kubebuilder v3.2.0, so you need to install it at first.\n SWCK is based on the kubebuilder, and you could download the kubebuilder by the script.\n2. Create CRD and Controller You can use kubebuilder create api to scaffold a new Kind and corresponding controller. Here we use the Demo as an example.\n$ cd operator \u0026amp;\u0026amp; kubebuilder create api --group operator --version v1alpha1 --kind Demo(Your CRD) Then you need to input twice y to create the Resource and Controller, and there will be some newly added files.\n$ git status On branch master Your branch is up to date with \u0026#39;origin/master\u0026#39;. Changes not staged for commit: (use \u0026#34;git add \u0026lt;file\u0026gt;...\u0026#34; to update what will be committed) (use \u0026#34;git restore \u0026lt;file\u0026gt;...\u0026#34; to discard changes in working directory) modified: PROJECT modified: apis/operator/v1alpha1/zz_generated.deepcopy.go modified: config/crd/bases/operator.skywalking.apache.org_swagents.yaml modified: config/crd/kustomization.yaml modified: config/rbac/role.yaml modified: go.mod modified: go.sum modified: main.go Untracked files: (use \u0026#34;git add \u0026lt;file\u0026gt;...\u0026#34; to include in what will be committed) apis/operator/v1alpha1/demo_types.go config/crd/bases/operator.skywalking.apache.org_demoes.yaml config/crd/patches/cainjection_in_operator_demoes.yaml config/crd/patches/webhook_in_operator_demoes.yaml config/rbac/operator_demo_editor_role.yaml config/rbac/operator_demo_viewer_role.yaml config/samples/operator_v1alpha1_demo.yaml controllers/operator/demo_controller.go controllers/operator/suite_test.go no changes added to commit (use \u0026#34;git add\u0026#34; and/or \u0026#34;git commit -a\u0026#34;) Next, we need to focus on the file apis/operator/v1alpha1/demo_types.go which defines your CRD, and the file controllers/operator/configuration_controller.go which defines the Controller. The others files are some configurations generated by the kubebuilder markers. Here are some references:\n  Kubebuilder project demo, in which you can understand the overall architecture.\n  How to add new-api, which you can find more details for oapserverconfig_types.go.\n  Controller-overview, where you can find more details about oapserverconfig_controller.go.\n  3. Create webhook If you want to fields or set defaults to CRs, creating webhooks is a good practice:\nkubebuilder create webhook --group operator --version v1alpha1 --kind Demo --defaulting --programmatic-validation The newly generated files are as follows.\n$ git status On branch master Your branch is ahead of \u0026#39;origin/master\u0026#39; by 1 commit. (use \u0026#34;git push\u0026#34; to publish your local commits) Changes not staged for commit: (use \u0026#34;git add \u0026lt;file\u0026gt;...\u0026#34; to update what will be committed) (use \u0026#34;git restore \u0026lt;file\u0026gt;...\u0026#34; to discard changes in working directory) modified: PROJECT modified: config/webhook/manifests.yaml modified: main.go Untracked files: (use \u0026#34;git add \u0026lt;file\u0026gt;...\u0026#34; to include in what will be committed) apis/operator/v1alpha1/demo_webhook.go apis/operator/v1alpha1/webhook_suite_test.go no changes added to commit (use \u0026#34;git add\u0026#34; and/or \u0026#34;git commit -a\u0026#34;) You can get more details through webhook-overview.\n4. Create the template Generally, a controller would generate a series of resources, such as workload, rbac, service, etc based on CRDs. SWCK is using the Go standard template engine to generate these resources. All template files are stored in the ./operator/pkg/operator/manifests. You could create a directory there such as demo to hold templates. The framework would transfer the CR as the arguments to these templates. More than CR, it supports passing custom rendering functions by setting up the TmplFunc. At last, you need to change the comment and add a field demo there to embed the template files into golang binaries.\n Notice, every file under the template directory can only contain one resource and we can\u0026rsquo;t use the --- to create multiple resources in a single file.\n 5. Build and Test SWCK needs to run in the k8s environment, so we highly recommend using the kind if you don\u0026rsquo;t have a cluster in hand. There are currently two ways to test your implementation.\n Before testing, please make sure you have the kind installed.\n  Test locally. After finishing your implementation, you could use the following steps to test locally:   Disable the webhook  export ENABLE_WEBHOOKS=false Run the main.go with the kubeconfig file.  go run main.go --kubeconfig=(use your kubeconfig file here, and the default is ~/.kube/config)  If you want to test the webhook, please refer the guide.\n  Test in-cluster.   Before testing the swck, please install cert-manager to provide the certificate for webhook in swck.  kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.9.1/cert-manager.yaml At first, you should build the swck image and load it into the kind cluster, and then you could install the crds and the operator as follows.  make docker-build \u0026amp;\u0026amp; kind load docker-image controller:latest \u0026amp;\u0026amp; make install \u0026amp;\u0026amp; make deploy After the swck is installed, and then you could use the following command to get the logs produced by the operator.  kubectl logs -f [skywalking-swck-controller-manager-*](../use the swck deployment name) -n skywalking-swck-system ","title":"How to add CRD and Controller in SWCK?","url":"/docs/skywalking-swck/next/how-to-add-new-crd-and-controller/"},{"content":"How to add CRD and Controller in SWCK? The guide intends to help contributors who want to add CRDs and Controllers in SWCK.\n1. Install the kubebuilder  Notice, SWCK is built by kubebuilder v3.2.0, so you need to install it at first.\n SWCK is based on the kubebuilder, and you could download the kubebuilder by the script.\n2. Create CRD and Controller You can use kubebuilder create api to scaffold a new Kind and corresponding controller. Here we use the Demo as an example.\n$ cd operator \u0026amp;\u0026amp; kubebuilder create api --group operator --version v1alpha1 --kind Demo(Your CRD) Then you need to input twice y to create the Resource and Controller, and there will be some newly added files.\n$ git status On branch master Your branch is up to date with \u0026#39;origin/master\u0026#39;. Changes not staged for commit: (use \u0026#34;git add \u0026lt;file\u0026gt;...\u0026#34; to update what will be committed) (use \u0026#34;git restore \u0026lt;file\u0026gt;...\u0026#34; to discard changes in working directory) modified: PROJECT modified: apis/operator/v1alpha1/zz_generated.deepcopy.go modified: config/crd/bases/operator.skywalking.apache.org_swagents.yaml modified: config/crd/kustomization.yaml modified: config/rbac/role.yaml modified: go.mod modified: go.sum modified: main.go Untracked files: (use \u0026#34;git add \u0026lt;file\u0026gt;...\u0026#34; to include in what will be committed) apis/operator/v1alpha1/demo_types.go config/crd/bases/operator.skywalking.apache.org_demoes.yaml config/crd/patches/cainjection_in_operator_demoes.yaml config/crd/patches/webhook_in_operator_demoes.yaml config/rbac/operator_demo_editor_role.yaml config/rbac/operator_demo_viewer_role.yaml config/samples/operator_v1alpha1_demo.yaml controllers/operator/demo_controller.go controllers/operator/suite_test.go no changes added to commit (use \u0026#34;git add\u0026#34; and/or \u0026#34;git commit -a\u0026#34;) Next, we need to focus on the file apis/operator/v1alpha1/demo_types.go which defines your CRD, and the file controllers/operator/configuration_controller.go which defines the Controller. The others files are some configurations generated by the kubebuilder markers. Here are some references:\n  Kubebuilder project demo, in which you can understand the overall architecture.\n  How to add new-api, which you can find more details for oapserverconfig_types.go.\n  Controller-overview, where you can find more details about oapserverconfig_controller.go.\n  3. Create webhook If you want to fields or set defaults to CRs, creating webhooks is a good practice:\nkubebuilder create webhook --group operator --version v1alpha1 --kind Demo --defaulting --programmatic-validation The newly generated files are as follows.\n$ git status On branch master Your branch is ahead of \u0026#39;origin/master\u0026#39; by 1 commit. (use \u0026#34;git push\u0026#34; to publish your local commits) Changes not staged for commit: (use \u0026#34;git add \u0026lt;file\u0026gt;...\u0026#34; to update what will be committed) (use \u0026#34;git restore \u0026lt;file\u0026gt;...\u0026#34; to discard changes in working directory) modified: PROJECT modified: config/webhook/manifests.yaml modified: main.go Untracked files: (use \u0026#34;git add \u0026lt;file\u0026gt;...\u0026#34; to include in what will be committed) apis/operator/v1alpha1/demo_webhook.go apis/operator/v1alpha1/webhook_suite_test.go no changes added to commit (use \u0026#34;git add\u0026#34; and/or \u0026#34;git commit -a\u0026#34;) You can get more details through webhook-overview.\n4. Create the template Generally, a controller would generate a series of resources, such as workload, rbac, service, etc based on CRDs. SWCK is using the Go standard template engine to generate these resources. All template files are stored in the ./operator/pkg/operator/manifests. You could create a directory there such as demo to hold templates. The framework would transfer the CR as the arguments to these templates. More than CR, it supports passing custom rendering functions by setting up the TmplFunc. At last, you need to change the comment and add a field demo there to embed the template files into golang binaries.\n Notice, every file under the template directory can only contain one resource and we can\u0026rsquo;t use the --- to create multiple resources in a single file.\n 5. Build and Test SWCK needs to run in the k8s environment, so we highly recommend using the kind if you don\u0026rsquo;t have a cluster in hand. There are currently two ways to test your implementation.\n Before testing, please make sure you have the kind installed.\n  Test locally. After finishing your implementation, you could use the following steps to test locally:   Disable the webhook  export ENABLE_WEBHOOKS=false Run the main.go with the kubeconfig file.  go run main.go --kubeconfig=(use your kubeconfig file here, and the default is ~/.kube/config)  If you want to test the webhook, please refer the guide.\n  Test in-cluster.   Before testing the swck, please install cert-manager to provide the certificate for webhook in swck.  kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.9.1/cert-manager.yaml At first, you should build the swck image and load it into the kind cluster, and then you could install the crds and the operator as follows.  make docker-build \u0026amp;\u0026amp; kind load docker-image controller:latest \u0026amp;\u0026amp; make install \u0026amp;\u0026amp; make deploy After the swck is installed, and then you could use the following command to get the logs produced by the operator.  kubectl logs -f [skywalking-swck-controller-manager-*](../use the swck deployment name) -n skywalking-swck-system ","title":"How to add CRD and Controller in SWCK?","url":"/docs/skywalking-swck/v0.9.0/how-to-add-new-crd-and-controller/"},{"content":"How to build a project This document will help you compile and build a project in your maven and set your IDE.\nBuilding the Project Since we are using Git submodule, we do not recommend using the GitHub tag or release page to download source codes for compiling.\nMaven behind the Proxy If you need to execute build behind the proxy, edit the .mvn/jvm.config and set the follow properties:\n-Dhttp.proxyHost=proxy_ip -Dhttp.proxyPort=proxy_port -Dhttps.proxyHost=proxy_ip -Dhttps.proxyPort=proxy_port -Dhttp.proxyUser=username -Dhttp.proxyPassword=password Building from GitHub   Prepare git, JDK 11 or 17 (LTS versions), and Maven 3.6+.\n  Clone the project.\nIf you want to build a release from source codes, set a tag name by using git clone -b [tag_name] ... while cloning.\ngit clone --recurse-submodules https://github.com/apache/skywalking.git cd skywalking/ OR git clone https://github.com/apache/skywalking.git cd skywalking/ git submodule init git submodule update   Run ./mvnw clean package -Dmaven.test.skip\n  All packages are in /dist (.tar.gz for Linux and .zip for Windows).\n  Building from Apache source code release  What is the Apache source code release?  For each official Apache release, there is a complete and independent source code tar, which includes all source codes. You could download it from SkyWalking Apache download page. There is no requirement related to git when compiling this. Just follow these steps.\n Prepare JDK11+ and Maven 3.6+. Run ./mvnw clean package -Dmaven.test.skip. All packages are in /dist.(.tar.gz for Linux and .zip for Windows).  Advanced compiling SkyWalking is a complex maven project that has many modules. Therefore, the time to compile may be a bit longer than usual. If you just want to recompile part of the project, you have the following options:\n Compile backend and package   ./mvnw package -Pbackend,dist\n or\n make build.backend\n If you intend to compile a single plugin, such as one in the dev stage, you could\n cd plugin_module_dir \u0026amp; mvn clean package\n  Compile UI and package   ./mvnw package -Pui,dist\n or\n make build.ui\n Building docker images You can build docker images of backend and ui with Makefile located in root folder.\nRefer to Build docker image for more details.\nSetting up your IntelliJ IDEA NOTE: If you clone the codes from GitHub, please make sure that you have finished steps 1 to 3 in section Build from GitHub. If you download the source codes from the official website of SkyWalking, please make sure that you have followed the steps in section Build from Apache source code release.\n Import the project as a maven project. Run ./mvnw compile -Dmaven.test.skip=true to compile project and generate source codes. The reason is that we use gRPC and protobuf. Set Generated Source Codes folders.  grpc-java and java folders in apm-protocol/apm-network/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-core/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-receiver-plugin/receiver-proto/target/generated-sources/fbs grpc-java and java folders in oap-server/server-receiver-plugin/receiver-proto/target/generated-sources/protobuf grpc-java and java folders in oap-server/exporter/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-configuration/grpc-configuration-sync/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-alarm-plugin/target/generated-sources/protobuf antlr4 folder in oap-server/oal-grammar/target/generated-sources    ","title":"How to build a project","url":"/docs/main/latest/en/guides/how-to-build/"},{"content":"How to build a project This document will help you compile and build a project in your maven and set your IDE.\nBuilding the Project Since we are using Git submodule, we do not recommend using the GitHub tag or release page to download source codes for compiling.\nMaven behind the Proxy If you need to execute build behind the proxy, edit the .mvn/jvm.config and set the follow properties:\n-Dhttp.proxyHost=proxy_ip -Dhttp.proxyPort=proxy_port -Dhttps.proxyHost=proxy_ip -Dhttps.proxyPort=proxy_port -Dhttp.proxyUser=username -Dhttp.proxyPassword=password Building from GitHub   Prepare git, JDK 11, 17, 21 (LTS versions), and Maven 3.6+.\n  Clone the project.\nIf you want to build a release from source codes, set a tag name by using git clone -b [tag_name] ... while cloning.\ngit clone --recurse-submodules https://github.com/apache/skywalking.git cd skywalking/ OR git clone https://github.com/apache/skywalking.git cd skywalking/ git submodule init git submodule update   Run ./mvnw clean package -Dmaven.test.skip\n  All packages are in /dist (.tar.gz for Linux and .zip for Windows).\n  Building from Apache source code release  What is the Apache source code release?  For each official Apache release, there is a complete and independent source code tar, which includes all source codes. You could download it from SkyWalking Apache download page. There is no requirement related to git when compiling this. Just follow these steps.\n Prepare JDK11+ and Maven 3.6+. Run ./mvnw clean package -Dmaven.test.skip. All packages are in /dist.(.tar.gz for Linux and .zip for Windows).  Advanced compiling SkyWalking is a complex maven project that has many modules. Therefore, the time to compile may be a bit longer than usual. If you just want to recompile part of the project, you have the following options:\n Compile backend and package   ./mvnw package -Pbackend,dist\n or\n make build.backend\n If you intend to compile a single plugin, such as one in the dev stage, you could\n cd plugin_module_dir \u0026amp; mvn clean package\n  Compile UI and package   ./mvnw package -Pui,dist\n or\n make build.ui\n Building docker images You can build docker images of backend and ui with Makefile located in root folder.\nRefer to Build docker image for more details.\nSetting up your IntelliJ IDEA NOTE: If you clone the codes from GitHub, please make sure that you have finished steps 1 to 3 in section Build from GitHub. If you download the source codes from the official website of SkyWalking, please make sure that you have followed the steps in section Build from Apache source code release.\n Import the project as a maven project. Run ./mvnw compile -Dmaven.test.skip=true to compile project and generate source codes. The reason is that we use gRPC and protobuf. Set Generated Source Codes folders.  grpc-java and java folders in apm-protocol/apm-network/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-core/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-receiver-plugin/receiver-proto/target/generated-sources/fbs grpc-java and java folders in oap-server/server-receiver-plugin/receiver-proto/target/generated-sources/protobuf grpc-java and java folders in oap-server/exporter/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-configuration/grpc-configuration-sync/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-alarm-plugin/target/generated-sources/protobuf antlr4 folder in oap-server/oal-grammar/target/generated-sources    ","title":"How to build a project","url":"/docs/main/next/en/guides/how-to-build/"},{"content":"How to build a project This document will help you compile and build a project in your maven and set your IDE.\nBuilding the Project Since we are using Git submodule, we do not recommend using the GitHub tag or release page to download source codes for compiling.\nMaven behind the Proxy If you need to execute build behind the proxy, edit the .mvn/jvm.config and set the follow properties:\n-Dhttp.proxyHost=proxy_ip -Dhttp.proxyPort=proxy_port -Dhttps.proxyHost=proxy_ip -Dhttps.proxyPort=proxy_port -Dhttp.proxyUser=username -Dhttp.proxyPassword=password Building from GitHub   Prepare git, JDK8+, and Maven 3.6+.\n  Clone the project.\nIf you want to build a release from source codes, set a tag name by using git clone -b [tag_name] ... while cloning.\ngit clone --recurse-submodules https://github.com/apache/skywalking.git cd skywalking/ OR git clone https://github.com/apache/skywalking.git cd skywalking/ git submodule init git submodule update   Run ./mvnw clean package -Dmaven.test.skip\n  All packages are in /dist (.tar.gz for Linux and .zip for Windows).\n  Building from Apache source code release  What is the Apache source code release?  For each official Apache release, there is a complete and independent source code tar, which includes all source codes. You could download it from SkyWalking Apache download page. There is no requirement related to git when compiling this. Just follow these steps.\n Prepare JDK8+ and Maven 3.6+. Run ./mvnw clean package -Dmaven.test.skip. All packages are in /dist.(.tar.gz for Linux and .zip for Windows).  Advanced compiling SkyWalking is a complex maven project that has many modules. Therefore, the time to compile may be a bit longer than usual. If you just want to recompile part of the project, you have the following options:\n Compile backend and package   ./mvnw package -Pbackend,dist\n or\n make build.backend\n If you intend to compile a single plugin, such as one in the dev stage, you could\n cd plugin_module_dir \u0026amp; mvn clean package\n  Compile UI and package   ./mvnw package -Pui,dist\n or\n make build.ui\n Building docker images You can build docker images of backend and ui with Makefile located in root folder.\nRefer to Build docker image for more details.\nSetting up your IntelliJ IDEA NOTE: If you clone the codes from GitHub, please make sure that you have finished steps 1 to 3 in section Build from GitHub. If you download the source codes from the official website of SkyWalking, please make sure that you have followed the steps in section Build from Apache source code release.\n Import the project as a maven project. Run ./mvnw compile -Dmaven.test.skip=true to compile project and generate source codes. The reason is that we use gRPC and protobuf. Set Generated Source Codes folders.  grpc-java and java folders in apm-protocol/apm-network/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-core/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-receiver-plugin/receiver-proto/target/generated-sources/fbs grpc-java and java folders in oap-server/server-receiver-plugin/receiver-proto/target/generated-sources/protobuf grpc-java and java folders in oap-server/exporter/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-configuration/grpc-configuration-sync/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-alarm-plugin/target/generated-sources/protobuf antlr4 folder in oap-server/oal-grammar/target/generated-sources    ","title":"How to build a project","url":"/docs/main/v9.0.0/en/guides/how-to-build/"},{"content":"How to build a project This document will help you compile and build a project in your maven and set your IDE.\nBuilding the Project Since we are using Git submodule, we do not recommend using the GitHub tag or release page to download source codes for compiling.\nMaven behind the Proxy If you need to execute build behind the proxy, edit the .mvn/jvm.config and set the follow properties:\n-Dhttp.proxyHost=proxy_ip -Dhttp.proxyPort=proxy_port -Dhttps.proxyHost=proxy_ip -Dhttps.proxyPort=proxy_port -Dhttp.proxyUser=username -Dhttp.proxyPassword=password Building from GitHub   Prepare git, JDK8+, and Maven 3.6+.\n  Clone the project.\nIf you want to build a release from source codes, set a tag name by using git clone -b [tag_name] ... while cloning.\ngit clone --recurse-submodules https://github.com/apache/skywalking.git cd skywalking/ OR git clone https://github.com/apache/skywalking.git cd skywalking/ git submodule init git submodule update   Run ./mvnw clean package -Dmaven.test.skip\n  All packages are in /dist (.tar.gz for Linux and .zip for Windows).\n  Building from Apache source code release  What is the Apache source code release?  For each official Apache release, there is a complete and independent source code tar, which includes all source codes. You could download it from SkyWalking Apache download page. There is no requirement related to git when compiling this. Just follow these steps.\n Prepare JDK8+ and Maven 3.6+. Run ./mvnw clean package -Dmaven.test.skip. All packages are in /dist.(.tar.gz for Linux and .zip for Windows).  Advanced compiling SkyWalking is a complex maven project that has many modules. Therefore, the time to compile may be a bit longer than usual. If you just want to recompile part of the project, you have the following options:\n Compile backend and package   ./mvnw package -Pbackend,dist\n or\n make build.backend\n If you intend to compile a single plugin, such as one in the dev stage, you could\n cd plugin_module_dir \u0026amp; mvn clean package\n  Compile UI and package   ./mvnw package -Pui,dist\n or\n make build.ui\n Building docker images You can build docker images of backend and ui with Makefile located in root folder.\nRefer to Build docker image for more details.\nSetting up your IntelliJ IDEA NOTE: If you clone the codes from GitHub, please make sure that you have finished steps 1 to 3 in section Build from GitHub. If you download the source codes from the official website of SkyWalking, please make sure that you have followed the steps in section Build from Apache source code release.\n Import the project as a maven project. Run ./mvnw compile -Dmaven.test.skip=true to compile project and generate source codes. The reason is that we use gRPC and protobuf. Set Generated Source Codes folders.  grpc-java and java folders in apm-protocol/apm-network/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-core/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-receiver-plugin/receiver-proto/target/generated-sources/fbs grpc-java and java folders in oap-server/server-receiver-plugin/receiver-proto/target/generated-sources/protobuf grpc-java and java folders in oap-server/exporter/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-configuration/grpc-configuration-sync/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-alarm-plugin/target/generated-sources/protobuf antlr4 folder in oap-server/oal-grammar/target/generated-sources    ","title":"How to build a project","url":"/docs/main/v9.1.0/en/guides/how-to-build/"},{"content":"How to build a project This document will help you compile and build a project in your maven and set your IDE.\nBuilding the Project Since we are using Git submodule, we do not recommend using the GitHub tag or release page to download source codes for compiling.\nMaven behind the Proxy If you need to execute build behind the proxy, edit the .mvn/jvm.config and set the follow properties:\n-Dhttp.proxyHost=proxy_ip -Dhttp.proxyPort=proxy_port -Dhttps.proxyHost=proxy_ip -Dhttps.proxyPort=proxy_port -Dhttp.proxyUser=username -Dhttp.proxyPassword=password Building from GitHub   Prepare git, JDK8+, and Maven 3.6+.\n  Clone the project.\nIf you want to build a release from source codes, set a tag name by using git clone -b [tag_name] ... while cloning.\ngit clone --recurse-submodules https://github.com/apache/skywalking.git cd skywalking/ OR git clone https://github.com/apache/skywalking.git cd skywalking/ git submodule init git submodule update   Run ./mvnw clean package -Dmaven.test.skip\n  All packages are in /dist (.tar.gz for Linux and .zip for Windows).\n  Building from Apache source code release  What is the Apache source code release?  For each official Apache release, there is a complete and independent source code tar, which includes all source codes. You could download it from SkyWalking Apache download page. There is no requirement related to git when compiling this. Just follow these steps.\n Prepare JDK8+ and Maven 3.6+. Run ./mvnw clean package -Dmaven.test.skip. All packages are in /dist.(.tar.gz for Linux and .zip for Windows).  Advanced compiling SkyWalking is a complex maven project that has many modules. Therefore, the time to compile may be a bit longer than usual. If you just want to recompile part of the project, you have the following options:\n Compile backend and package   ./mvnw package -Pbackend,dist\n or\n make build.backend\n If you intend to compile a single plugin, such as one in the dev stage, you could\n cd plugin_module_dir \u0026amp; mvn clean package\n  Compile UI and package   ./mvnw package -Pui,dist\n or\n make build.ui\n Building docker images You can build docker images of backend and ui with Makefile located in root folder.\nRefer to Build docker image for more details.\nSetting up your IntelliJ IDEA NOTE: If you clone the codes from GitHub, please make sure that you have finished steps 1 to 3 in section Build from GitHub. If you download the source codes from the official website of SkyWalking, please make sure that you have followed the steps in section Build from Apache source code release.\n Import the project as a maven project. Run ./mvnw compile -Dmaven.test.skip=true to compile project and generate source codes. The reason is that we use gRPC and protobuf. Set Generated Source Codes folders.  grpc-java and java folders in apm-protocol/apm-network/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-core/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-receiver-plugin/receiver-proto/target/generated-sources/fbs grpc-java and java folders in oap-server/server-receiver-plugin/receiver-proto/target/generated-sources/protobuf grpc-java and java folders in oap-server/exporter/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-configuration/grpc-configuration-sync/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-alarm-plugin/target/generated-sources/protobuf antlr4 folder in oap-server/oal-grammar/target/generated-sources    ","title":"How to build a project","url":"/docs/main/v9.2.0/en/guides/how-to-build/"},{"content":"How to build a project This document will help you compile and build a project in your maven and set your IDE.\nBuilding the Project Since we are using Git submodule, we do not recommend using the GitHub tag or release page to download source codes for compiling.\nMaven behind the Proxy If you need to execute build behind the proxy, edit the .mvn/jvm.config and set the follow properties:\n-Dhttp.proxyHost=proxy_ip -Dhttp.proxyPort=proxy_port -Dhttps.proxyHost=proxy_ip -Dhttps.proxyPort=proxy_port -Dhttp.proxyUser=username -Dhttp.proxyPassword=password Building from GitHub   Prepare git, JDK8+, and Maven 3.6+.\n  Clone the project.\nIf you want to build a release from source codes, set a tag name by using git clone -b [tag_name] ... while cloning.\ngit clone --recurse-submodules https://github.com/apache/skywalking.git cd skywalking/ OR git clone https://github.com/apache/skywalking.git cd skywalking/ git submodule init git submodule update   Run ./mvnw clean package -Dmaven.test.skip\n  All packages are in /dist (.tar.gz for Linux and .zip for Windows).\n  Building from Apache source code release  What is the Apache source code release?  For each official Apache release, there is a complete and independent source code tar, which includes all source codes. You could download it from SkyWalking Apache download page. There is no requirement related to git when compiling this. Just follow these steps.\n Prepare JDK8+ and Maven 3.6+. Run ./mvnw clean package -Dmaven.test.skip. All packages are in /dist.(.tar.gz for Linux and .zip for Windows).  Advanced compiling SkyWalking is a complex maven project that has many modules. Therefore, the time to compile may be a bit longer than usual. If you just want to recompile part of the project, you have the following options:\n Compile backend and package   ./mvnw package -Pbackend,dist\n or\n make build.backend\n If you intend to compile a single plugin, such as one in the dev stage, you could\n cd plugin_module_dir \u0026amp; mvn clean package\n  Compile UI and package   ./mvnw package -Pui,dist\n or\n make build.ui\n Building docker images You can build docker images of backend and ui with Makefile located in root folder.\nRefer to Build docker image for more details.\nSetting up your IntelliJ IDEA NOTE: If you clone the codes from GitHub, please make sure that you have finished steps 1 to 3 in section Build from GitHub. If you download the source codes from the official website of SkyWalking, please make sure that you have followed the steps in section Build from Apache source code release.\n Import the project as a maven project. Run ./mvnw compile -Dmaven.test.skip=true to compile project and generate source codes. The reason is that we use gRPC and protobuf. Set Generated Source Codes folders.  grpc-java and java folders in apm-protocol/apm-network/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-core/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-receiver-plugin/receiver-proto/target/generated-sources/fbs grpc-java and java folders in oap-server/server-receiver-plugin/receiver-proto/target/generated-sources/protobuf grpc-java and java folders in oap-server/exporter/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-configuration/grpc-configuration-sync/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-alarm-plugin/target/generated-sources/protobuf antlr4 folder in oap-server/oal-grammar/target/generated-sources    ","title":"How to build a project","url":"/docs/main/v9.3.0/en/guides/how-to-build/"},{"content":"How to build a project This document will help you compile and build a project in your maven and set your IDE.\nBuilding the Project Since we are using Git submodule, we do not recommend using the GitHub tag or release page to download source codes for compiling.\nMaven behind the Proxy If you need to execute build behind the proxy, edit the .mvn/jvm.config and set the follow properties:\n-Dhttp.proxyHost=proxy_ip -Dhttp.proxyPort=proxy_port -Dhttps.proxyHost=proxy_ip -Dhttps.proxyPort=proxy_port -Dhttp.proxyUser=username -Dhttp.proxyPassword=password Building from GitHub   Prepare git, JDK11+, and Maven 3.6+.\n  Clone the project.\nIf you want to build a release from source codes, set a tag name by using git clone -b [tag_name] ... while cloning.\ngit clone --recurse-submodules https://github.com/apache/skywalking.git cd skywalking/ OR git clone https://github.com/apache/skywalking.git cd skywalking/ git submodule init git submodule update   Run ./mvnw clean package -Dmaven.test.skip\n  All packages are in /dist (.tar.gz for Linux and .zip for Windows).\n  Building from Apache source code release  What is the Apache source code release?  For each official Apache release, there is a complete and independent source code tar, which includes all source codes. You could download it from SkyWalking Apache download page. There is no requirement related to git when compiling this. Just follow these steps.\n Prepare JDK11+ and Maven 3.6+. Run ./mvnw clean package -Dmaven.test.skip. All packages are in /dist.(.tar.gz for Linux and .zip for Windows).  Advanced compiling SkyWalking is a complex maven project that has many modules. Therefore, the time to compile may be a bit longer than usual. If you just want to recompile part of the project, you have the following options:\n Compile backend and package   ./mvnw package -Pbackend,dist\n or\n make build.backend\n If you intend to compile a single plugin, such as one in the dev stage, you could\n cd plugin_module_dir \u0026amp; mvn clean package\n  Compile UI and package   ./mvnw package -Pui,dist\n or\n make build.ui\n Building docker images You can build docker images of backend and ui with Makefile located in root folder.\nRefer to Build docker image for more details.\nSetting up your IntelliJ IDEA NOTE: If you clone the codes from GitHub, please make sure that you have finished steps 1 to 3 in section Build from GitHub. If you download the source codes from the official website of SkyWalking, please make sure that you have followed the steps in section Build from Apache source code release.\n Import the project as a maven project. Run ./mvnw compile -Dmaven.test.skip=true to compile project and generate source codes. The reason is that we use gRPC and protobuf. Set Generated Source Codes folders.  grpc-java and java folders in apm-protocol/apm-network/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-core/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-receiver-plugin/receiver-proto/target/generated-sources/fbs grpc-java and java folders in oap-server/server-receiver-plugin/receiver-proto/target/generated-sources/protobuf grpc-java and java folders in oap-server/exporter/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-configuration/grpc-configuration-sync/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-alarm-plugin/target/generated-sources/protobuf antlr4 folder in oap-server/oal-grammar/target/generated-sources    ","title":"How to build a project","url":"/docs/main/v9.4.0/en/guides/how-to-build/"},{"content":"How to build a project This document will help you compile and build a project in your maven and set your IDE.\nBuilding the Project Since we are using Git submodule, we do not recommend using the GitHub tag or release page to download source codes for compiling.\nMaven behind the Proxy If you need to execute build behind the proxy, edit the .mvn/jvm.config and set the follow properties:\n-Dhttp.proxyHost=proxy_ip -Dhttp.proxyPort=proxy_port -Dhttps.proxyHost=proxy_ip -Dhttps.proxyPort=proxy_port -Dhttp.proxyUser=username -Dhttp.proxyPassword=password Building from GitHub   Prepare git, JDK11+, and Maven 3.6+.\n  Clone the project.\nIf you want to build a release from source codes, set a tag name by using git clone -b [tag_name] ... while cloning.\ngit clone --recurse-submodules https://github.com/apache/skywalking.git cd skywalking/ OR git clone https://github.com/apache/skywalking.git cd skywalking/ git submodule init git submodule update   Run ./mvnw clean package -Dmaven.test.skip\n  All packages are in /dist (.tar.gz for Linux and .zip for Windows).\n  Building from Apache source code release  What is the Apache source code release?  For each official Apache release, there is a complete and independent source code tar, which includes all source codes. You could download it from SkyWalking Apache download page. There is no requirement related to git when compiling this. Just follow these steps.\n Prepare JDK11+ and Maven 3.6+. Run ./mvnw clean package -Dmaven.test.skip. All packages are in /dist.(.tar.gz for Linux and .zip for Windows).  Advanced compiling SkyWalking is a complex maven project that has many modules. Therefore, the time to compile may be a bit longer than usual. If you just want to recompile part of the project, you have the following options:\n Compile backend and package   ./mvnw package -Pbackend,dist\n or\n make build.backend\n If you intend to compile a single plugin, such as one in the dev stage, you could\n cd plugin_module_dir \u0026amp; mvn clean package\n  Compile UI and package   ./mvnw package -Pui,dist\n or\n make build.ui\n Building docker images You can build docker images of backend and ui with Makefile located in root folder.\nRefer to Build docker image for more details.\nSetting up your IntelliJ IDEA NOTE: If you clone the codes from GitHub, please make sure that you have finished steps 1 to 3 in section Build from GitHub. If you download the source codes from the official website of SkyWalking, please make sure that you have followed the steps in section Build from Apache source code release.\n Import the project as a maven project. Run ./mvnw compile -Dmaven.test.skip=true to compile project and generate source codes. The reason is that we use gRPC and protobuf. Set Generated Source Codes folders.  grpc-java and java folders in apm-protocol/apm-network/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-core/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-receiver-plugin/receiver-proto/target/generated-sources/fbs grpc-java and java folders in oap-server/server-receiver-plugin/receiver-proto/target/generated-sources/protobuf grpc-java and java folders in oap-server/exporter/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-configuration/grpc-configuration-sync/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-alarm-plugin/target/generated-sources/protobuf antlr4 folder in oap-server/oal-grammar/target/generated-sources    ","title":"How to build a project","url":"/docs/main/v9.5.0/en/guides/how-to-build/"},{"content":"How to build a project This document will help you compile and build a project in your maven and set your IDE.\nBuilding the Project Since we are using Git submodule, we do not recommend using the GitHub tag or release page to download source codes for compiling.\nMaven behind the Proxy If you need to execute build behind the proxy, edit the .mvn/jvm.config and set the follow properties:\n-Dhttp.proxyHost=proxy_ip -Dhttp.proxyPort=proxy_port -Dhttps.proxyHost=proxy_ip -Dhttps.proxyPort=proxy_port -Dhttp.proxyUser=username -Dhttp.proxyPassword=password Building from GitHub   Prepare git, JDK11+, and Maven 3.6+.\n  Clone the project.\nIf you want to build a release from source codes, set a tag name by using git clone -b [tag_name] ... while cloning.\ngit clone --recurse-submodules https://github.com/apache/skywalking.git cd skywalking/ OR git clone https://github.com/apache/skywalking.git cd skywalking/ git submodule init git submodule update   Run ./mvnw clean package -Dmaven.test.skip\n  All packages are in /dist (.tar.gz for Linux and .zip for Windows).\n  Building from Apache source code release  What is the Apache source code release?  For each official Apache release, there is a complete and independent source code tar, which includes all source codes. You could download it from SkyWalking Apache download page. There is no requirement related to git when compiling this. Just follow these steps.\n Prepare JDK11+ and Maven 3.6+. Run ./mvnw clean package -Dmaven.test.skip. All packages are in /dist.(.tar.gz for Linux and .zip for Windows).  Advanced compiling SkyWalking is a complex maven project that has many modules. Therefore, the time to compile may be a bit longer than usual. If you just want to recompile part of the project, you have the following options:\n Compile backend and package   ./mvnw package -Pbackend,dist\n or\n make build.backend\n If you intend to compile a single plugin, such as one in the dev stage, you could\n cd plugin_module_dir \u0026amp; mvn clean package\n  Compile UI and package   ./mvnw package -Pui,dist\n or\n make build.ui\n Building docker images You can build docker images of backend and ui with Makefile located in root folder.\nRefer to Build docker image for more details.\nSetting up your IntelliJ IDEA NOTE: If you clone the codes from GitHub, please make sure that you have finished steps 1 to 3 in section Build from GitHub. If you download the source codes from the official website of SkyWalking, please make sure that you have followed the steps in section Build from Apache source code release.\n Import the project as a maven project. Run ./mvnw compile -Dmaven.test.skip=true to compile project and generate source codes. The reason is that we use gRPC and protobuf. Set Generated Source Codes folders.  grpc-java and java folders in apm-protocol/apm-network/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-core/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-receiver-plugin/receiver-proto/target/generated-sources/fbs grpc-java and java folders in oap-server/server-receiver-plugin/receiver-proto/target/generated-sources/protobuf grpc-java and java folders in oap-server/exporter/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-configuration/grpc-configuration-sync/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-alarm-plugin/target/generated-sources/protobuf antlr4 folder in oap-server/oal-grammar/target/generated-sources    ","title":"How to build a project","url":"/docs/main/v9.6.0/en/guides/how-to-build/"},{"content":"How to build a project This document will help you compile and build a project in your maven and set your IDE.\nBuilding the Project Since we are using Git submodule, we do not recommend using the GitHub tag or release page to download source codes for compiling.\nMaven behind the Proxy If you need to execute build behind the proxy, edit the .mvn/jvm.config and set the follow properties:\n-Dhttp.proxyHost=proxy_ip -Dhttp.proxyPort=proxy_port -Dhttps.proxyHost=proxy_ip -Dhttps.proxyPort=proxy_port -Dhttp.proxyUser=username -Dhttp.proxyPassword=password Building from GitHub   Prepare git, JDK 11 or 17 (LTS versions), and Maven 3.6+.\n  Clone the project.\nIf you want to build a release from source codes, set a tag name by using git clone -b [tag_name] ... while cloning.\ngit clone --recurse-submodules https://github.com/apache/skywalking.git cd skywalking/ OR git clone https://github.com/apache/skywalking.git cd skywalking/ git submodule init git submodule update   Run ./mvnw clean package -Dmaven.test.skip\n  All packages are in /dist (.tar.gz for Linux and .zip for Windows).\n  Building from Apache source code release  What is the Apache source code release?  For each official Apache release, there is a complete and independent source code tar, which includes all source codes. You could download it from SkyWalking Apache download page. There is no requirement related to git when compiling this. Just follow these steps.\n Prepare JDK11+ and Maven 3.6+. Run ./mvnw clean package -Dmaven.test.skip. All packages are in /dist.(.tar.gz for Linux and .zip for Windows).  Advanced compiling SkyWalking is a complex maven project that has many modules. Therefore, the time to compile may be a bit longer than usual. If you just want to recompile part of the project, you have the following options:\n Compile backend and package   ./mvnw package -Pbackend,dist\n or\n make build.backend\n If you intend to compile a single plugin, such as one in the dev stage, you could\n cd plugin_module_dir \u0026amp; mvn clean package\n  Compile UI and package   ./mvnw package -Pui,dist\n or\n make build.ui\n Building docker images You can build docker images of backend and ui with Makefile located in root folder.\nRefer to Build docker image for more details.\nSetting up your IntelliJ IDEA NOTE: If you clone the codes from GitHub, please make sure that you have finished steps 1 to 3 in section Build from GitHub. If you download the source codes from the official website of SkyWalking, please make sure that you have followed the steps in section Build from Apache source code release.\n Import the project as a maven project. Run ./mvnw compile -Dmaven.test.skip=true to compile project and generate source codes. The reason is that we use gRPC and protobuf. Set Generated Source Codes folders.  grpc-java and java folders in apm-protocol/apm-network/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-core/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-receiver-plugin/receiver-proto/target/generated-sources/fbs grpc-java and java folders in oap-server/server-receiver-plugin/receiver-proto/target/generated-sources/protobuf grpc-java and java folders in oap-server/exporter/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-configuration/grpc-configuration-sync/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-alarm-plugin/target/generated-sources/protobuf antlr4 folder in oap-server/oal-grammar/target/generated-sources    ","title":"How to build a project","url":"/docs/main/v9.7.0/en/guides/how-to-build/"},{"content":"How to build from sources? Download the source tar from the official website, and run the following commands to build from source\nMake sure you have Python 3.7+ and the python3 command available\n$ tar -zxf skywalking-python-src-\u0026lt;version\u0026gt;.tgz $ cd skywalking-python-src-\u0026lt;version\u0026gt; $ make install If you want to build from the latest source codes from GitHub for some reasons, for example, you want to try the latest features that are not released yet, please clone the source codes from GitHub and make install it:\n$ git clone https://github.com/apache/skywalking-python $ cd skywalking-python $ git submodule update --init $ make install NOTE that only releases from the website are official Apache releases.\n","title":"How to build from sources?","url":"/docs/skywalking-python/latest/en/setup/faq/how-to-build-from-sources/"},{"content":"How to build from sources? Download the source tar from the official website, and run the following commands to build from source\nMake sure you have Python 3.7+ and the python3 command available\n$ tar -zxf skywalking-python-src-\u0026lt;version\u0026gt;.tgz $ cd skywalking-python-src-\u0026lt;version\u0026gt; $ make install If you want to build from the latest source codes from GitHub for some reasons, for example, you want to try the latest features that are not released yet, please clone the source codes from GitHub and make install it:\n$ git clone https://github.com/apache/skywalking-python $ cd skywalking-python $ git submodule update --init $ make install NOTE that only releases from the website are official Apache releases.\n","title":"How to build from sources?","url":"/docs/skywalking-python/next/en/setup/faq/how-to-build-from-sources/"},{"content":"How to build from sources? Download the source tar from the official website, and run the following commands to build from source\nMake sure you have Python 3.7+ and the python3 command available\n$ tar -zxf skywalking-python-src-\u0026lt;version\u0026gt;.tgz $ cd skywalking-python-src-\u0026lt;version\u0026gt; $ make install If you want to build from the latest source codes from GitHub for some reasons, for example, you want to try the latest features that are not released yet, please clone the source codes from GitHub and make install it:\n$ git clone https://github.com/apache/skywalking-python $ cd skywalking-python $ git submodule update --init $ make install NOTE that only releases from the website are official Apache releases.\n","title":"How to build from sources?","url":"/docs/skywalking-python/v1.0.1/en/setup/faq/how-to-build-from-sources/"},{"content":"How to bump up Zipkin Lens dependency Because SkyWalking embeds Zipkin Lens UI as a part of the SkyWalking UI, and Zipkin Lens UI contains a lot of other front-end dependencies that we also distribute in SkyWalking binary tars, so we have to take care of the dependencies' licenses when we bump up the Zipkin Lens dependency.\nMake sure to do the following steps when you bump up the Zipkin Lens dependency:\n Clone the Zipkin project into a directory.  ZIPKIN_VERSION=\u0026lt;the Zipkin version you want to bump to\u0026gt; git clone https://github.com/openzipkin/zipkin \u0026amp;\u0026amp; cd zipkin git checkout $ZIPKIN_VERSION cd zipkin-lens  Create .licenserc.yaml with the following content.  cat \u0026gt; .licenserc.yaml \u0026lt;\u0026lt; EOF header: license: spdx-id: Apache-2.0 copyright-owner: Apache Software Foundation dependency: files: - package.json licenses: - name: cli-table version: 0.3.1 license: MIT - name: domutils version: 1.5.1 license: BSD-2-Clause - name: rework version: 1.0.1 license: MIT EOF  Create license template LICENSE.tpl with the following content.  {{ range .Groups }} ======================================================================== {{ .LicenseID }} licenses ======================================================================== The following components are provided under the {{ .LicenseID }} License. See project link for details. {{- if eq .LicenseID \u0026quot;Apache-2.0\u0026quot; }} The text of each license is the standard Apache 2.0 license. {{- else }} The text of each license is also included in licenses/LICENSE-[project].txt. {{ end }} {{- range .Deps }} https://npmjs.com/package/{{ .Name }}/v/{{ .Version }} {{ .Version }} {{ .LicenseID }} {{- end }} {{ end }}  Make sure you\u0026rsquo;re using the supported NodeJS version and NPM version.  node -v # should be v14.x.x npm -v # should be 6.x.x  Run the following command to generate the license file.  license-eye dependency resolve --summary LICENSE.tpl  Copy the generated file LICENSE to replace the zipkin-LICENSE in SkyWalking repo.  Note: if there are dependencies that license-eye failed to identify the license, you should manually identify the license and add it to the step above in .licenserc.yaml.\n","title":"How to bump up Zipkin Lens dependency","url":"/docs/main/latest/en/guides/how-to-bump-up-zipkin/"},{"content":"How to bump up Zipkin Lens dependency Because SkyWalking embeds Zipkin Lens UI as a part of the SkyWalking UI, and Zipkin Lens UI contains a lot of other front-end dependencies that we also distribute in SkyWalking binary tars, so we have to take care of the dependencies' licenses when we bump up the Zipkin Lens dependency.\nMake sure to do the following steps when you bump up the Zipkin Lens dependency:\n Clone the Zipkin project into a directory.  ZIPKIN_VERSION=\u0026lt;the Zipkin version you want to bump to\u0026gt; git clone https://github.com/openzipkin/zipkin \u0026amp;\u0026amp; cd zipkin git checkout $ZIPKIN_VERSION cd zipkin-lens  Create .licenserc.yaml with the following content.  cat \u0026gt; .licenserc.yaml \u0026lt;\u0026lt; EOF header: license: spdx-id: Apache-2.0 copyright-owner: Apache Software Foundation dependency: files: - package.json licenses: - name: cli-table version: 0.3.1 license: MIT - name: domutils version: 1.5.1 license: BSD-2-Clause - name: rework version: 1.0.1 license: MIT EOF  Create license template LICENSE.tpl with the following content.  {{ range .Groups }} ======================================================================== {{ .LicenseID }} licenses ======================================================================== The following components are provided under the {{ .LicenseID }} License. See project link for details. {{- if eq .LicenseID \u0026quot;Apache-2.0\u0026quot; }} The text of each license is the standard Apache 2.0 license. {{- else }} The text of each license is also included in licenses/LICENSE-[project].txt. {{ end }} {{- range .Deps }} https://npmjs.com/package/{{ .Name }}/v/{{ .Version }} {{ .Version }} {{ .LicenseID }} {{- end }} {{ end }}  Make sure you\u0026rsquo;re using the supported NodeJS version and NPM version.  node -v # should be v14.x.x npm -v # should be 6.x.x  Run the following command to generate the license file.  license-eye dependency resolve --summary LICENSE.tpl  Copy the generated file LICENSE to replace the zipkin-LICENSE in SkyWalking repo.  Note: if there are dependencies that license-eye failed to identify the license, you should manually identify the license and add it to the step above in .licenserc.yaml.\n","title":"How to bump up Zipkin Lens dependency","url":"/docs/main/next/en/guides/how-to-bump-up-zipkin/"},{"content":"How to bump up Zipkin Lens dependency Because SkyWalking embeds Zipkin Lens UI as a part of the SkyWalking UI, and Zipkin Lens UI contains a lot of other front-end dependencies that we also distribute in SkyWalking binary tars, so we have to take care of the dependencies' licenses when we bump up the Zipkin Lens dependency.\nMake sure to do the following steps when you bump up the Zipkin Lens dependency:\n Clone the Zipkin project into a directory.  ZIPKIN_VERSION=\u0026lt;the Zipkin version you want to bump to\u0026gt; git clone https://github.com/openzipkin/zipkin \u0026amp;\u0026amp; cd zipkin git checkout $ZIPKIN_VERSION cd zipkin-lens  Create .licenserc.yaml with the following content.  cat \u0026gt; .licenserc.yaml \u0026lt;\u0026lt; EOF header: license: spdx-id: Apache-2.0 copyright-owner: Apache Software Foundation dependency: files: - package.json licenses: - name: cli-table version: 0.3.1 license: MIT - name: domutils version: 1.5.1 license: BSD-2-Clause - name: rework version: 1.0.1 license: MIT EOF  Create license template LICENSE.tpl with the following content.  {{ range .Groups }} ======================================================================== {{ .LicenseID }} licenses ======================================================================== The following components are provided under the {{ .LicenseID }} License. See project link for details. {{- if eq .LicenseID \u0026quot;Apache-2.0\u0026quot; }} The text of each license is the standard Apache 2.0 license. {{- else }} The text of each license is also included in licenses/LICENSE-[project].txt. {{ end }} {{- range .Deps }} https://npmjs.com/package/{{ .Name }}/v/{{ .Version }} {{ .Version }} {{ .LicenseID }} {{- end }} {{ end }}  Make sure you\u0026rsquo;re using the supported NodeJS version and NPM version.  node -v # should be v14.x.x npm -v # should be 6.x.x  Run the following command to generate the license file.  license-eye dependency resolve --summary LICENSE.tpl  Copy the generated file LICENSE to replace the zipkin-LICENSE in SkyWalking repo.  Note: if there are dependencies that license-eye failed to identify the license, you should manually identify the license and add it to the step above in .licenserc.yaml.\n","title":"How to bump up Zipkin Lens dependency","url":"/docs/main/v9.4.0/en/guides/how-to-bump-up-zipkin/"},{"content":"How to bump up Zipkin Lens dependency Because SkyWalking embeds Zipkin Lens UI as a part of the SkyWalking UI, and Zipkin Lens UI contains a lot of other front-end dependencies that we also distribute in SkyWalking binary tars, so we have to take care of the dependencies' licenses when we bump up the Zipkin Lens dependency.\nMake sure to do the following steps when you bump up the Zipkin Lens dependency:\n Clone the Zipkin project into a directory.  ZIPKIN_VERSION=\u0026lt;the Zipkin version you want to bump to\u0026gt; git clone https://github.com/openzipkin/zipkin \u0026amp;\u0026amp; cd zipkin git checkout $ZIPKIN_VERSION cd zipkin-lens  Create .licenserc.yaml with the following content.  cat \u0026gt; .licenserc.yaml \u0026lt;\u0026lt; EOF header: license: spdx-id: Apache-2.0 copyright-owner: Apache Software Foundation dependency: files: - package.json licenses: - name: cli-table version: 0.3.1 license: MIT - name: domutils version: 1.5.1 license: BSD-2-Clause - name: rework version: 1.0.1 license: MIT EOF  Create license template LICENSE.tpl with the following content.  {{ range .Groups }} ======================================================================== {{ .LicenseID }} licenses ======================================================================== The following components are provided under the {{ .LicenseID }} License. See project link for details. {{- if eq .LicenseID \u0026quot;Apache-2.0\u0026quot; }} The text of each license is the standard Apache 2.0 license. {{- else }} The text of each license is also included in licenses/LICENSE-[project].txt. {{ end }} {{- range .Deps }} https://npmjs.com/package/{{ .Name }}/v/{{ .Version }} {{ .Version }} {{ .LicenseID }} {{- end }} {{ end }}  Make sure you\u0026rsquo;re using the supported NodeJS version and NPM version.  node -v # should be v14.x.x npm -v # should be 6.x.x  Run the following command to generate the license file.  license-eye dependency resolve --summary LICENSE.tpl  Copy the generated file LICENSE to replace the zipkin-LICENSE in SkyWalking repo.  Note: if there are dependencies that license-eye failed to identify the license, you should manually identify the license and add it to the step above in .licenserc.yaml.\n","title":"How to bump up Zipkin Lens dependency","url":"/docs/main/v9.5.0/en/guides/how-to-bump-up-zipkin/"},{"content":"How to bump up Zipkin Lens dependency Because SkyWalking embeds Zipkin Lens UI as a part of the SkyWalking UI, and Zipkin Lens UI contains a lot of other front-end dependencies that we also distribute in SkyWalking binary tars, so we have to take care of the dependencies' licenses when we bump up the Zipkin Lens dependency.\nMake sure to do the following steps when you bump up the Zipkin Lens dependency:\n Clone the Zipkin project into a directory.  ZIPKIN_VERSION=\u0026lt;the Zipkin version you want to bump to\u0026gt; git clone https://github.com/openzipkin/zipkin \u0026amp;\u0026amp; cd zipkin git checkout $ZIPKIN_VERSION cd zipkin-lens  Create .licenserc.yaml with the following content.  cat \u0026gt; .licenserc.yaml \u0026lt;\u0026lt; EOF header: license: spdx-id: Apache-2.0 copyright-owner: Apache Software Foundation dependency: files: - package.json licenses: - name: cli-table version: 0.3.1 license: MIT - name: domutils version: 1.5.1 license: BSD-2-Clause - name: rework version: 1.0.1 license: MIT EOF  Create license template LICENSE.tpl with the following content.  {{ range .Groups }} ======================================================================== {{ .LicenseID }} licenses ======================================================================== The following components are provided under the {{ .LicenseID }} License. See project link for details. {{- if eq .LicenseID \u0026quot;Apache-2.0\u0026quot; }} The text of each license is the standard Apache 2.0 license. {{- else }} The text of each license is also included in licenses/LICENSE-[project].txt. {{ end }} {{- range .Deps }} https://npmjs.com/package/{{ .Name }}/v/{{ .Version }} {{ .Version }} {{ .LicenseID }} {{- end }} {{ end }}  Make sure you\u0026rsquo;re using the supported NodeJS version and NPM version.  node -v # should be v14.x.x npm -v # should be 6.x.x  Run the following command to generate the license file.  license-eye dependency resolve --summary LICENSE.tpl  Copy the generated file LICENSE to replace the zipkin-LICENSE in SkyWalking repo.  Note: if there are dependencies that license-eye failed to identify the license, you should manually identify the license and add it to the step above in .licenserc.yaml.\n","title":"How to bump up Zipkin Lens dependency","url":"/docs/main/v9.6.0/en/guides/how-to-bump-up-zipkin/"},{"content":"How to bump up Zipkin Lens dependency Because SkyWalking embeds Zipkin Lens UI as a part of the SkyWalking UI, and Zipkin Lens UI contains a lot of other front-end dependencies that we also distribute in SkyWalking binary tars, so we have to take care of the dependencies' licenses when we bump up the Zipkin Lens dependency.\nMake sure to do the following steps when you bump up the Zipkin Lens dependency:\n Clone the Zipkin project into a directory.  ZIPKIN_VERSION=\u0026lt;the Zipkin version you want to bump to\u0026gt; git clone https://github.com/openzipkin/zipkin \u0026amp;\u0026amp; cd zipkin git checkout $ZIPKIN_VERSION cd zipkin-lens  Create .licenserc.yaml with the following content.  cat \u0026gt; .licenserc.yaml \u0026lt;\u0026lt; EOF header: license: spdx-id: Apache-2.0 copyright-owner: Apache Software Foundation dependency: files: - package.json licenses: - name: cli-table version: 0.3.1 license: MIT - name: domutils version: 1.5.1 license: BSD-2-Clause - name: rework version: 1.0.1 license: MIT EOF  Create license template LICENSE.tpl with the following content.  {{ range .Groups }} ======================================================================== {{ .LicenseID }} licenses ======================================================================== The following components are provided under the {{ .LicenseID }} License. See project link for details. {{- if eq .LicenseID \u0026quot;Apache-2.0\u0026quot; }} The text of each license is the standard Apache 2.0 license. {{- else }} The text of each license is also included in licenses/LICENSE-[project].txt. {{ end }} {{- range .Deps }} https://npmjs.com/package/{{ .Name }}/v/{{ .Version }} {{ .Version }} {{ .LicenseID }} {{- end }} {{ end }}  Make sure you\u0026rsquo;re using the supported NodeJS version and NPM version.  node -v # should be v14.x.x npm -v # should be 6.x.x  Run the following command to generate the license file.  license-eye dependency resolve --summary LICENSE.tpl  Copy the generated file LICENSE to replace the zipkin-LICENSE in SkyWalking repo.  Note: if there are dependencies that license-eye failed to identify the license, you should manually identify the license and add it to the step above in .licenserc.yaml.\n","title":"How to bump up Zipkin Lens dependency","url":"/docs/main/v9.7.0/en/guides/how-to-bump-up-zipkin/"},{"content":"How to disable some plugins? You can find the plugin name in the list and disable one or more plugins by following methods.\nfrom skywalking import config config.agent_disable_plugins = [\u0026#39;sw_http_server\u0026#39;, \u0026#39;sw_urllib_request\u0026#39;] # can be also CSV format, i.e. \u0026#39;sw_http_server,sw_urllib_request\u0026#39; You can also disable the plugins via environment variables SW_AGENT_DISABLE_PLUGINS, please check the Environment Variables List for an explanation.\n","title":"How to disable some plugins?","url":"/docs/skywalking-python/latest/en/setup/faq/how-to-disable-plugin/"},{"content":"How to disable some plugins? You can find the plugin name in the list and disable one or more plugins by following methods.\nfrom skywalking import config config.agent_disable_plugins = [\u0026#39;sw_http_server\u0026#39;, \u0026#39;sw_urllib_request\u0026#39;] # can be also CSV format, i.e. \u0026#39;sw_http_server,sw_urllib_request\u0026#39; You can also disable the plugins via environment variables SW_AGENT_DISABLE_PLUGINS, please check the Environment Variables List for an explanation.\n","title":"How to disable some plugins?","url":"/docs/skywalking-python/next/en/setup/faq/how-to-disable-plugin/"},{"content":"How to disable some plugins? You can find the plugin name in the list and disable one or more plugins by following methods.\nfrom skywalking import config config.agent_disable_plugins = [\u0026#39;sw_http_server\u0026#39;, \u0026#39;sw_urllib_request\u0026#39;] # can be also CSV format, i.e. \u0026#39;sw_http_server,sw_urllib_request\u0026#39; You can also disable the plugins via environment variables SW_AGENT_DISABLE_PLUGINS, please check the Environment Variables List for an explanation.\n","title":"How to disable some plugins?","url":"/docs/skywalking-python/v1.0.1/en/setup/faq/how-to-disable-plugin/"},{"content":"How to make SkyWalking agent works in OSGI environment? OSGI implements its own set of modularity, which means that each Bundle has its own unique class loader for isolating different versions of classes. By default, OSGI runtime uses the boot classloader for the bundle codes, which makes the java.lang.NoClassDefFoundError exception in the booting stage.\njava.lang.NoClassDefFoundError: org/apache/skywalking/apm/agent/core/plugin/interceptor/enhance/EnhancedInstance at ch.qos.logback.classic.Logger.buildLoggingEventAndAppend(Logger.java:419) at ch.qos.logback.classic.Logger.filterAndLog_0_Or3Plus(Logger.java:383) at ch.qos.logback.classic.Logger.log(Logger.java:765) at org.apache.commons.logging.impl.SLF4JLocationAwareLog.error(SLF4JLocationAwareLog.java:216) at org.springframework.boot.SpringApplication.reportFailure(SpringApplication.java:771) at org.springframework.boot.SpringApplication.handleRunFailure(SpringApplication.java:748) at org.springframework.boot.SpringApplication.run(SpringApplication.java:314) at org.springframework.boot.SpringApplication.run(SpringApplication.java:1118) at org.springframework.boot.SpringApplication.run(SpringApplication.java:1107) at by.kolodyuk.osgi.springboot.SpringBootBundleActivator.start(SpringBootBundleActivator.java:21) at org.apache.felix.framework.util.SecureAction.startActivator(SecureAction.java:849) at org.apache.felix.framework.Felix.activateBundle(Felix.java:2429) at org.apache.felix.framework.Felix.startBundle(Felix.java:2335) at org.apache.felix.framework.Felix.setActiveStartLevel(Felix.java:1566) at org.apache.felix.framework.FrameworkStartLevelImpl.run(FrameworkStartLevelImpl.java:297) at java.base/java.lang.Thread.run(Thread.java:829) How to resolve this issue?  we need to set the parent classloader in OSGI to AppClassLoader, through the specific parameter org.osgi.framework.bundle.parent=app. The list of parameters can be found in the OSGI API Load the SkyWalking related classes to the bundle parent class loader, AppClassLoader, with the parameter org.osgi.framework.bootdelegation=org.apache.skywalking.apm.* or org.osgi.framework.bootdelegation=*. This step is optional. Some OSGi implementations (i.e. Equinox) enable them by default  ","title":"How to make SkyWalking agent works in `OSGI` environment?","url":"/docs/skywalking-java/latest/en/faq/osgi/"},{"content":"How to make SkyWalking agent works in OSGI environment? OSGI implements its own set of modularity, which means that each Bundle has its own unique class loader for isolating different versions of classes. By default, OSGI runtime uses the boot classloader for the bundle codes, which makes the java.lang.NoClassDefFoundError exception in the booting stage.\njava.lang.NoClassDefFoundError: org/apache/skywalking/apm/agent/core/plugin/interceptor/enhance/EnhancedInstance at ch.qos.logback.classic.Logger.buildLoggingEventAndAppend(Logger.java:419) at ch.qos.logback.classic.Logger.filterAndLog_0_Or3Plus(Logger.java:383) at ch.qos.logback.classic.Logger.log(Logger.java:765) at org.apache.commons.logging.impl.SLF4JLocationAwareLog.error(SLF4JLocationAwareLog.java:216) at org.springframework.boot.SpringApplication.reportFailure(SpringApplication.java:771) at org.springframework.boot.SpringApplication.handleRunFailure(SpringApplication.java:748) at org.springframework.boot.SpringApplication.run(SpringApplication.java:314) at org.springframework.boot.SpringApplication.run(SpringApplication.java:1118) at org.springframework.boot.SpringApplication.run(SpringApplication.java:1107) at by.kolodyuk.osgi.springboot.SpringBootBundleActivator.start(SpringBootBundleActivator.java:21) at org.apache.felix.framework.util.SecureAction.startActivator(SecureAction.java:849) at org.apache.felix.framework.Felix.activateBundle(Felix.java:2429) at org.apache.felix.framework.Felix.startBundle(Felix.java:2335) at org.apache.felix.framework.Felix.setActiveStartLevel(Felix.java:1566) at org.apache.felix.framework.FrameworkStartLevelImpl.run(FrameworkStartLevelImpl.java:297) at java.base/java.lang.Thread.run(Thread.java:829) How to resolve this issue?  we need to set the parent classloader in OSGI to AppClassLoader, through the specific parameter org.osgi.framework.bundle.parent=app. The list of parameters can be found in the OSGI API Load the SkyWalking related classes to the bundle parent class loader, AppClassLoader, with the parameter org.osgi.framework.bootdelegation=org.apache.skywalking.apm.* or org.osgi.framework.bootdelegation=*. This step is optional. Some OSGi implementations (i.e. Equinox) enable them by default  ","title":"How to make SkyWalking agent works in `OSGI` environment?","url":"/docs/skywalking-java/next/en/faq/osgi/"},{"content":"How to make SkyWalking agent works in OSGI environment? OSGI implements its own set of modularity, which means that each Bundle has its own unique class loader for isolating different versions of classes. By default, OSGI runtime uses the boot classloader for the bundle codes, which makes the java.lang.NoClassDefFoundError exception in the booting stage.\njava.lang.NoClassDefFoundError: org/apache/skywalking/apm/agent/core/plugin/interceptor/enhance/EnhancedInstance at ch.qos.logback.classic.Logger.buildLoggingEventAndAppend(Logger.java:419) at ch.qos.logback.classic.Logger.filterAndLog_0_Or3Plus(Logger.java:383) at ch.qos.logback.classic.Logger.log(Logger.java:765) at org.apache.commons.logging.impl.SLF4JLocationAwareLog.error(SLF4JLocationAwareLog.java:216) at org.springframework.boot.SpringApplication.reportFailure(SpringApplication.java:771) at org.springframework.boot.SpringApplication.handleRunFailure(SpringApplication.java:748) at org.springframework.boot.SpringApplication.run(SpringApplication.java:314) at org.springframework.boot.SpringApplication.run(SpringApplication.java:1118) at org.springframework.boot.SpringApplication.run(SpringApplication.java:1107) at by.kolodyuk.osgi.springboot.SpringBootBundleActivator.start(SpringBootBundleActivator.java:21) at org.apache.felix.framework.util.SecureAction.startActivator(SecureAction.java:849) at org.apache.felix.framework.Felix.activateBundle(Felix.java:2429) at org.apache.felix.framework.Felix.startBundle(Felix.java:2335) at org.apache.felix.framework.Felix.setActiveStartLevel(Felix.java:1566) at org.apache.felix.framework.FrameworkStartLevelImpl.run(FrameworkStartLevelImpl.java:297) at java.base/java.lang.Thread.run(Thread.java:829) How to resolve this issue?  we need to set the parent classloader in OSGI to AppClassLoader, through the specific parameter org.osgi.framework.bundle.parent=app. The list of parameters can be found in the OSGI API Load the SkyWalking related classes to the bundle parent class loader, AppClassLoader, with the parameter org.osgi.framework.bootdelegation=org.apache.skywalking.apm.* or org.osgi.framework.bootdelegation=*. This step is optional. Some OSGi implementations (i.e. Equinox) enable them by default  ","title":"How to make SkyWalking agent works in `OSGI` environment?","url":"/docs/skywalking-java/v9.0.0/en/faq/osgi/"},{"content":"How to make SkyWalking agent works in OSGI environment? OSGI implements its own set of modularity, which means that each Bundle has its own unique class loader for isolating different versions of classes. By default, OSGI runtime uses the boot classloader for the bundle codes, which makes the java.lang.NoClassDefFoundError exception in the booting stage.\njava.lang.NoClassDefFoundError: org/apache/skywalking/apm/agent/core/plugin/interceptor/enhance/EnhancedInstance at ch.qos.logback.classic.Logger.buildLoggingEventAndAppend(Logger.java:419) at ch.qos.logback.classic.Logger.filterAndLog_0_Or3Plus(Logger.java:383) at ch.qos.logback.classic.Logger.log(Logger.java:765) at org.apache.commons.logging.impl.SLF4JLocationAwareLog.error(SLF4JLocationAwareLog.java:216) at org.springframework.boot.SpringApplication.reportFailure(SpringApplication.java:771) at org.springframework.boot.SpringApplication.handleRunFailure(SpringApplication.java:748) at org.springframework.boot.SpringApplication.run(SpringApplication.java:314) at org.springframework.boot.SpringApplication.run(SpringApplication.java:1118) at org.springframework.boot.SpringApplication.run(SpringApplication.java:1107) at by.kolodyuk.osgi.springboot.SpringBootBundleActivator.start(SpringBootBundleActivator.java:21) at org.apache.felix.framework.util.SecureAction.startActivator(SecureAction.java:849) at org.apache.felix.framework.Felix.activateBundle(Felix.java:2429) at org.apache.felix.framework.Felix.startBundle(Felix.java:2335) at org.apache.felix.framework.Felix.setActiveStartLevel(Felix.java:1566) at org.apache.felix.framework.FrameworkStartLevelImpl.run(FrameworkStartLevelImpl.java:297) at java.base/java.lang.Thread.run(Thread.java:829) How to resolve this issue?  we need to set the parent classloader in OSGI to AppClassLoader, through the specific parameter org.osgi.framework.bundle.parent=app. The list of parameters can be found in the OSGI API Load the SkyWalking related classes to the bundle parent class loader, AppClassLoader, with the parameter org.osgi.framework.bootdelegation=org.apache.skywalking.apm.* or org.osgi.framework.bootdelegation=*. This step is optional. Some OSGi implementations (i.e. Equinox) enable them by default  ","title":"How to make SkyWalking agent works in `OSGI` environment?","url":"/docs/skywalking-java/v9.1.0/en/faq/osgi/"},{"content":"How to make SkyWalking agent works in OSGI environment? OSGI implements its own set of modularity, which means that each Bundle has its own unique class loader for isolating different versions of classes. By default, OSGI runtime uses the boot classloader for the bundle codes, which makes the java.lang.NoClassDefFoundError exception in the booting stage.\njava.lang.NoClassDefFoundError: org/apache/skywalking/apm/agent/core/plugin/interceptor/enhance/EnhancedInstance at ch.qos.logback.classic.Logger.buildLoggingEventAndAppend(Logger.java:419) at ch.qos.logback.classic.Logger.filterAndLog_0_Or3Plus(Logger.java:383) at ch.qos.logback.classic.Logger.log(Logger.java:765) at org.apache.commons.logging.impl.SLF4JLocationAwareLog.error(SLF4JLocationAwareLog.java:216) at org.springframework.boot.SpringApplication.reportFailure(SpringApplication.java:771) at org.springframework.boot.SpringApplication.handleRunFailure(SpringApplication.java:748) at org.springframework.boot.SpringApplication.run(SpringApplication.java:314) at org.springframework.boot.SpringApplication.run(SpringApplication.java:1118) at org.springframework.boot.SpringApplication.run(SpringApplication.java:1107) at by.kolodyuk.osgi.springboot.SpringBootBundleActivator.start(SpringBootBundleActivator.java:21) at org.apache.felix.framework.util.SecureAction.startActivator(SecureAction.java:849) at org.apache.felix.framework.Felix.activateBundle(Felix.java:2429) at org.apache.felix.framework.Felix.startBundle(Felix.java:2335) at org.apache.felix.framework.Felix.setActiveStartLevel(Felix.java:1566) at org.apache.felix.framework.FrameworkStartLevelImpl.run(FrameworkStartLevelImpl.java:297) at java.base/java.lang.Thread.run(Thread.java:829) How to resolve this issue?  we need to set the parent classloader in OSGI to AppClassLoader, through the specific parameter org.osgi.framework.bundle.parent=app. The list of parameters can be found in the OSGI API Load the SkyWalking related classes to the bundle parent class loader, AppClassLoader, with the parameter org.osgi.framework.bootdelegation=org.apache.skywalking.apm.* or org.osgi.framework.bootdelegation=*. This step is optional. Some OSGi implementations (i.e. Equinox) enable them by default  ","title":"How to make SkyWalking agent works in `OSGI` environment?","url":"/docs/skywalking-java/v9.2.0/en/faq/osgi/"},{"content":"How to test locally? This guide assumes you just cloned the repo and are ready to make some changes.\nAfter cloning the repo, make sure you also have cloned the submodule for protocol. Otherwise, run the command below.\ngit submodule update --init Please first refer to the Developer Guide to set up a development environment.\nTL;DR: run make env. This will create virtual environments for python and generate the protocol folder needed for the agent.\nNote: Make sure you have python3 aliased to python available on Windows computers instead of pointing to the Microsoft app store.\nBy now, you can do what you want. Let\u0026rsquo;s get to the topic of how to test.\nThe test process requires docker and docker-compose throughout. If you haven\u0026rsquo;t installed them, please install them first.\nThen run make test, which will generate a list of plugin versions based on the support_matrix variable in each Plugin and orchestrate the tests automatically. Remember to inspect the outcomes carefully to debug your plugin.\nAlternatively, you can run full tests via our GitHub action workflow on your own GitHub fork, it is usually easier since local environment can be tricky to setup for new contributors.\nTo do so, you need to fork this repo on GitHub and enable GitHub actions on your forked repo. Then, you can simply push your changes and open a Pull Request to the fork\u0026rsquo;s master branch.\nNote: GitHub automatically targets Pull Requests to the upstream repo, be careful when you open them to avoid accidental PRs to upstream.\n","title":"How to test locally?","url":"/docs/skywalking-python/latest/en/contribution/how-to-test-locally/"},{"content":"How to test locally? This guide assumes you just cloned the repo and are ready to make some changes.\nAfter cloning the repo, make sure you also have cloned the submodule for protocol. Otherwise, run the command below.\ngit submodule update --init Please first refer to the Developer Guide to set up a development environment.\nTL;DR: run make env. This will create virtual environments for python and generate the protocol folder needed for the agent.\nNote: Make sure you have python3 aliased to python available on Windows computers instead of pointing to the Microsoft app store.\nBy now, you can do what you want. Let\u0026rsquo;s get to the topic of how to test.\nThe test process requires docker and docker-compose throughout. If you haven\u0026rsquo;t installed them, please install them first.\nThen run make test, which will generate a list of plugin versions based on the support_matrix variable in each Plugin and orchestrate the tests automatically. Remember to inspect the outcomes carefully to debug your plugin.\nAlternatively, you can run full tests via our GitHub action workflow on your own GitHub fork, it is usually easier since local environment can be tricky to setup for new contributors.\nTo do so, you need to fork this repo on GitHub and enable GitHub actions on your forked repo. Then, you can simply push your changes and open a Pull Request to the fork\u0026rsquo;s master branch.\nNote: GitHub automatically targets Pull Requests to the upstream repo, be careful when you open them to avoid accidental PRs to upstream.\n","title":"How to test locally?","url":"/docs/skywalking-python/next/en/contribution/how-to-test-locally/"},{"content":"How to test locally? This guide assumes you just cloned the repo and are ready to make some changes.\nAfter cloning the repo, make sure you also have cloned the submodule for protocol. Otherwise, run the command below.\ngit submodule update --init Please first refer to the Developer Guide to set up a development environment.\nTL;DR: run make env. This will create virtual environments for python and generate the protocol folder needed for the agent.\nNote: Make sure you have python3 aliased to python available on Windows computers instead of pointing to the Microsoft app store.\nBy now, you can do what you want. Let\u0026rsquo;s get to the topic of how to test.\nThe test process requires docker and docker-compose throughout. If you haven\u0026rsquo;t installed them, please install them first.\nThen run make test, which will generate a list of plugin versions based on the support_matrix variable in each Plugin and orchestrate the tests automatically. Remember to inspect the outcomes carefully to debug your plugin.\nAlternatively, you can run full tests via our GitHub action workflow on your own GitHub fork, it is usually easier since local environment can be tricky to setup for new contributors.\nTo do so, you need to fork this repo on GitHub and enable GitHub actions on your forked repo. Then, you can simply push your changes and open a Pull Request to the fork\u0026rsquo;s master branch.\nNote: GitHub automatically targets Pull Requests to the upstream repo, be careful when you open them to avoid accidental PRs to upstream.\n","title":"How to test locally?","url":"/docs/skywalking-python/v1.0.1/en/contribution/how-to-test-locally/"},{"content":"How to tolerate custom exceptions In some codes, the exception is being used as a way of controlling business flow. Skywalking provides 2 ways to tolerate an exception which is traced in a span.\n Set the names of exception classes in the agent config Use our annotation in the codes.  Set the names of exception classes in the agent config The property named \u0026ldquo;statuscheck.ignored_exceptions\u0026rdquo; is used to set up class names in the agent configuration file. if the exception listed here are detected in the agent, the agent core would flag the related span as the error status.\nDemo   A custom exception.\n TestNamedMatchException  package org.apache.skywalking.apm.agent.core.context.status; public class TestNamedMatchException extends RuntimeException { public TestNamedMatchException() { } public TestNamedMatchException(final String message) { super(message); } ... }  TestHierarchyMatchException  package org.apache.skywalking.apm.agent.core.context.status; public class TestHierarchyMatchException extends TestNamedMatchException { public TestHierarchyMatchException() { } public TestHierarchyMatchException(final String message) { super(message); } ... }   When the above exceptions traced in some spans, the status is like the following.\n   The traced exception Final span status     org.apache.skywalking.apm.agent.core.context.status.TestNamedMatchException true   org.apache.skywalking.apm.agent.core.context.status.TestHierarchyMatchException true      After set these class names through \u0026ldquo;statuscheck.ignored_exceptions\u0026rdquo;, the status of spans would be changed.\nstatuscheck.ignored_exceptions=org.apache.skywalking.apm.agent.core.context.status.TestNamedMatchException    The traced exception Final span status     org.apache.skywalking.apm.agent.core.context.status.TestNamedMatchException false   org.apache.skywalking.apm.agent.core.context.status.TestHierarchyMatchException false      Use our annotation in the codes. If an exception has the @IgnoredException annotation, the exception wouldn\u0026rsquo;t be marked as error status when tracing. Because the annotation supports inheritance, also affects the subclasses.\nDependency  Dependency the toolkit, such as using maven or gradle. Since 8.2.0.  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-trace\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Demo   A custom exception.\npackage org.apache.skywalking.apm.agent.core.context.status; public class TestAnnotatedException extends RuntimeException { public TestAnnotatedException() { } public TestAnnotatedException(final String message) { super(message); } ... }   When the above exception traced in some spans, the status is like the following.\n   The traced exception Final span status     org.apache.skywalking.apm.agent.core.context.status.TestAnnotatedException true      However, when the exception annotated with the annotation, the status would be changed.\npackage org.apache.skywalking.apm.agent.core.context.status; @IgnoredException public class TestAnnotatedException extends RuntimeException { public TestAnnotatedException() { } public TestAnnotatedException(final String message) { super(message); } ... }    The traced exception Final span status     org.apache.skywalking.apm.agent.core.context.status.TestAnnotatedException false      Recursive check Due to the wrapper nature of Java exceptions, sometimes users need recursive checking. Skywalking also supports it.\nstatuscheck.max_recursive_depth=${SW_STATUSCHECK_MAX_RECURSIVE_DEPTH:1} The following report shows the benchmark results of the exception checks with different recursive depths,\n# JMH version: 1.33 # VM version: JDK 1.8.0_292, OpenJDK 64-Bit Server VM, 25.292-b10 # VM invoker: /Library/Java/JavaVirtualMachines/adoptopenjdk-8.jdk/Contents/Home/jre/bin/java # VM options: -javaagent:/Applications/IntelliJ IDEA.app/Contents/lib/idea_rt.jar=54972:/Applications/IntelliJ IDEA.app/Contents/bin -Dfile.encoding=UTF-8 # Blackhole mode: full + dont-inline hint (default, use -Djmh.blackhole.autoDetect=true to auto-detect) # Warmup: 5 iterations, 10 s each # Measurement: 5 iterations, 10 s each # Timeout: 10 min per iteration # Threads: 1 thread, will synchronize iterations # Benchmark mode: Average time, time/op Benchmark Mode Cnt Score Error Units HierarchyMatchExceptionBenchmark.depthOneBenchmark avgt 25 31.050 ± 0.731 ns/op HierarchyMatchExceptionBenchmark.depthTwoBenchmark avgt 25 64.918 ± 2.537 ns/op HierarchyMatchExceptionBenchmark.depthThreeBenchmark avgt 25 89.645 ± 2.556 ns/op According to the reported results above, the exception check time is nearly proportional to the recursive depth being set. For each single check, it costs about ten of nanoseconds (~30 nanoseconds in the report, but may vary according to different hardware and platforms).\nTypically, we don\u0026rsquo;t recommend setting this more than 10, which could cause a performance issue. Negative value and 0 would be ignored, which means all exceptions would make the span tagged in error status.\n","title":"How to tolerate custom exceptions","url":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/how-to-tolerate-exceptions/"},{"content":"How to tolerate custom exceptions In some codes, the exception is being used as a way of controlling business flow. Skywalking provides 2 ways to tolerate an exception which is traced in a span.\n Set the names of exception classes in the agent config Use our annotation in the codes.  Set the names of exception classes in the agent config The property named \u0026ldquo;statuscheck.ignored_exceptions\u0026rdquo; is used to set up class names in the agent configuration file. if the exception listed here are detected in the agent, the agent core would flag the related span as the error status.\nDemo   A custom exception.\n TestNamedMatchException  package org.apache.skywalking.apm.agent.core.context.status; public class TestNamedMatchException extends RuntimeException { public TestNamedMatchException() { } public TestNamedMatchException(final String message) { super(message); } ... }  TestHierarchyMatchException  package org.apache.skywalking.apm.agent.core.context.status; public class TestHierarchyMatchException extends TestNamedMatchException { public TestHierarchyMatchException() { } public TestHierarchyMatchException(final String message) { super(message); } ... }   When the above exceptions traced in some spans, the status is like the following.\n   The traced exception Final span status     org.apache.skywalking.apm.agent.core.context.status.TestNamedMatchException true   org.apache.skywalking.apm.agent.core.context.status.TestHierarchyMatchException true      After set these class names through \u0026ldquo;statuscheck.ignored_exceptions\u0026rdquo;, the status of spans would be changed.\nstatuscheck.ignored_exceptions=org.apache.skywalking.apm.agent.core.context.status.TestNamedMatchException    The traced exception Final span status     org.apache.skywalking.apm.agent.core.context.status.TestNamedMatchException false   org.apache.skywalking.apm.agent.core.context.status.TestHierarchyMatchException false      Use our annotation in the codes. If an exception has the @IgnoredException annotation, the exception wouldn\u0026rsquo;t be marked as error status when tracing. Because the annotation supports inheritance, also affects the subclasses.\nDependency  Dependency the toolkit, such as using maven or gradle. Since 8.2.0.  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-trace\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Demo   A custom exception.\npackage org.apache.skywalking.apm.agent.core.context.status; public class TestAnnotatedException extends RuntimeException { public TestAnnotatedException() { } public TestAnnotatedException(final String message) { super(message); } ... }   When the above exception traced in some spans, the status is like the following.\n   The traced exception Final span status     org.apache.skywalking.apm.agent.core.context.status.TestAnnotatedException true      However, when the exception annotated with the annotation, the status would be changed.\npackage org.apache.skywalking.apm.agent.core.context.status; @IgnoredException public class TestAnnotatedException extends RuntimeException { public TestAnnotatedException() { } public TestAnnotatedException(final String message) { super(message); } ... }    The traced exception Final span status     org.apache.skywalking.apm.agent.core.context.status.TestAnnotatedException false      Recursive check Due to the wrapper nature of Java exceptions, sometimes users need recursive checking. Skywalking also supports it.\nstatuscheck.max_recursive_depth=${SW_STATUSCHECK_MAX_RECURSIVE_DEPTH:1} The following report shows the benchmark results of the exception checks with different recursive depths,\n# JMH version: 1.33 # VM version: JDK 1.8.0_292, OpenJDK 64-Bit Server VM, 25.292-b10 # VM invoker: /Library/Java/JavaVirtualMachines/adoptopenjdk-8.jdk/Contents/Home/jre/bin/java # VM options: -javaagent:/Applications/IntelliJ IDEA.app/Contents/lib/idea_rt.jar=54972:/Applications/IntelliJ IDEA.app/Contents/bin -Dfile.encoding=UTF-8 # Blackhole mode: full + dont-inline hint (default, use -Djmh.blackhole.autoDetect=true to auto-detect) # Warmup: 5 iterations, 10 s each # Measurement: 5 iterations, 10 s each # Timeout: 10 min per iteration # Threads: 1 thread, will synchronize iterations # Benchmark mode: Average time, time/op Benchmark Mode Cnt Score Error Units HierarchyMatchExceptionBenchmark.depthOneBenchmark avgt 25 31.050 ± 0.731 ns/op HierarchyMatchExceptionBenchmark.depthTwoBenchmark avgt 25 64.918 ± 2.537 ns/op HierarchyMatchExceptionBenchmark.depthThreeBenchmark avgt 25 89.645 ± 2.556 ns/op According to the reported results above, the exception check time is nearly proportional to the recursive depth being set. For each single check, it costs about ten of nanoseconds (~30 nanoseconds in the report, but may vary according to different hardware and platforms).\nTypically, we don\u0026rsquo;t recommend setting this more than 10, which could cause a performance issue. Negative value and 0 would be ignored, which means all exceptions would make the span tagged in error status.\n","title":"How to tolerate custom exceptions","url":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/how-to-tolerate-exceptions/"},{"content":"How to tolerate custom exceptions In some codes, the exception is being used as a way of controlling business flow. Skywalking provides 2 ways to tolerate an exception which is traced in a span.\n Set the names of exception classes in the agent config Use our annotation in the codes.  Set the names of exception classes in the agent config The property named \u0026ldquo;statuscheck.ignored_exceptions\u0026rdquo; is used to set up class names in the agent configuration file. if the exception listed here are detected in the agent, the agent core would flag the related span as the error status.\nDemo   A custom exception.\n TestNamedMatchException  package org.apache.skywalking.apm.agent.core.context.status; public class TestNamedMatchException extends RuntimeException { public TestNamedMatchException() { } public TestNamedMatchException(final String message) { super(message); } ... }  TestHierarchyMatchException  package org.apache.skywalking.apm.agent.core.context.status; public class TestHierarchyMatchException extends TestNamedMatchException { public TestHierarchyMatchException() { } public TestHierarchyMatchException(final String message) { super(message); } ... }   When the above exceptions traced in some spans, the status is like the following.\n   The traced exception Final span status     org.apache.skywalking.apm.agent.core.context.status.TestNamedMatchException true   org.apache.skywalking.apm.agent.core.context.status.TestHierarchyMatchException true      After set these class names through \u0026ldquo;statuscheck.ignored_exceptions\u0026rdquo;, the status of spans would be changed.\nstatuscheck.ignored_exceptions=org.apache.skywalking.apm.agent.core.context.status.TestNamedMatchException    The traced exception Final span status     org.apache.skywalking.apm.agent.core.context.status.TestNamedMatchException false   org.apache.skywalking.apm.agent.core.context.status.TestHierarchyMatchException false      Use our annotation in the codes. If an exception has the @IgnoredException annotation, the exception wouldn\u0026rsquo;t be marked as error status when tracing. Because the annotation supports inheritance, also affects the subclasses.\nDependency  Dependency the toolkit, such as using maven or gradle. Since 8.2.0.  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-trace\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Demo   A custom exception.\npackage org.apache.skywalking.apm.agent.core.context.status; public class TestAnnotatedException extends RuntimeException { public TestAnnotatedException() { } public TestAnnotatedException(final String message) { super(message); } ... }   When the above exception traced in some spans, the status is like the following.\n   The traced exception Final span status     org.apache.skywalking.apm.agent.core.context.status.TestAnnotatedException true      However, when the exception annotated with the annotation, the status would be changed.\npackage org.apache.skywalking.apm.agent.core.context.status; @IgnoredException public class TestAnnotatedException extends RuntimeException { public TestAnnotatedException() { } public TestAnnotatedException(final String message) { super(message); } ... }    The traced exception Final span status     org.apache.skywalking.apm.agent.core.context.status.TestAnnotatedException false      Recursive check Due to the wrapper nature of Java exceptions, sometimes users need recursive checking. Skywalking also supports it.\nstatuscheck.max_recursive_depth=${SW_STATUSCHECK_MAX_RECURSIVE_DEPTH:1} The following report shows the benchmark results of the exception checks with different recursive depths,\n# JMH version: 1.33 # VM version: JDK 1.8.0_292, OpenJDK 64-Bit Server VM, 25.292-b10 # VM invoker: /Library/Java/JavaVirtualMachines/adoptopenjdk-8.jdk/Contents/Home/jre/bin/java # VM options: -javaagent:/Applications/IntelliJ IDEA.app/Contents/lib/idea_rt.jar=54972:/Applications/IntelliJ IDEA.app/Contents/bin -Dfile.encoding=UTF-8 # Blackhole mode: full + dont-inline hint (default, use -Djmh.blackhole.autoDetect=true to auto-detect) # Warmup: 5 iterations, 10 s each # Measurement: 5 iterations, 10 s each # Timeout: 10 min per iteration # Threads: 1 thread, will synchronize iterations # Benchmark mode: Average time, time/op Benchmark Mode Cnt Score Error Units HierarchyMatchExceptionBenchmark.depthOneBenchmark avgt 25 31.050 ± 0.731 ns/op HierarchyMatchExceptionBenchmark.depthTwoBenchmark avgt 25 64.918 ± 2.537 ns/op HierarchyMatchExceptionBenchmark.depthThreeBenchmark avgt 25 89.645 ± 2.556 ns/op According to the reported results above, the exception check time is nearly proportional to the recursive depth being set. For each single check, it costs about ten of nanoseconds (~30 nanoseconds in the report, but may vary according to different hardware and platforms).\nTypically, we don\u0026rsquo;t recommend setting this more than 10, which could cause a performance issue. Negative value and 0 would be ignored, which means all exceptions would make the span tagged in error status.\n","title":"How to tolerate custom exceptions","url":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/how-to-tolerate-exceptions/"},{"content":"How to tolerate custom exceptions In some codes, the exception is being used as a way of controlling business flow. Skywalking provides 2 ways to tolerate an exception which is traced in a span.\n Set the names of exception classes in the agent config Use our annotation in the codes.  Set the names of exception classes in the agent config The property named \u0026ldquo;statuscheck.ignored_exceptions\u0026rdquo; is used to set up class names in the agent configuration file. if the exception listed here are detected in the agent, the agent core would flag the related span as the error status.\nDemo   A custom exception.\n TestNamedMatchException  package org.apache.skywalking.apm.agent.core.context.status; public class TestNamedMatchException extends RuntimeException { public TestNamedMatchException() { } public TestNamedMatchException(final String message) { super(message); } ... }  TestHierarchyMatchException  package org.apache.skywalking.apm.agent.core.context.status; public class TestHierarchyMatchException extends TestNamedMatchException { public TestHierarchyMatchException() { } public TestHierarchyMatchException(final String message) { super(message); } ... }   When the above exceptions traced in some spans, the status is like the following.\n   The traced exception Final span status     org.apache.skywalking.apm.agent.core.context.status.TestNamedMatchException true   org.apache.skywalking.apm.agent.core.context.status.TestHierarchyMatchException true      After set these class names through \u0026ldquo;statuscheck.ignored_exceptions\u0026rdquo;, the status of spans would be changed.\nstatuscheck.ignored_exceptions=org.apache.skywalking.apm.agent.core.context.status.TestNamedMatchException    The traced exception Final span status     org.apache.skywalking.apm.agent.core.context.status.TestNamedMatchException false   org.apache.skywalking.apm.agent.core.context.status.TestHierarchyMatchException false      Use our annotation in the codes. If an exception has the @IgnoredException annotation, the exception wouldn\u0026rsquo;t be marked as error status when tracing. Because the annotation supports inheritance, also affects the subclasses.\nDependency  Dependency the toolkit, such as using maven or gradle. Since 8.2.0.  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-trace\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Demo   A custom exception.\npackage org.apache.skywalking.apm.agent.core.context.status; public class TestAnnotatedException extends RuntimeException { public TestAnnotatedException() { } public TestAnnotatedException(final String message) { super(message); } ... }   When the above exception traced in some spans, the status is like the following.\n   The traced exception Final span status     org.apache.skywalking.apm.agent.core.context.status.TestAnnotatedException true      However, when the exception annotated with the annotation, the status would be changed.\npackage org.apache.skywalking.apm.agent.core.context.status; @IgnoredException public class TestAnnotatedException extends RuntimeException { public TestAnnotatedException() { } public TestAnnotatedException(final String message) { super(message); } ... }    The traced exception Final span status     org.apache.skywalking.apm.agent.core.context.status.TestAnnotatedException false      Recursive check Due to the wrapper nature of Java exceptions, sometimes users need recursive checking. Skywalking also supports it.\nstatuscheck.max_recursive_depth=${SW_STATUSCHECK_MAX_RECURSIVE_DEPTH:1} The following report shows the benchmark results of the exception checks with different recursive depths,\n# JMH version: 1.33 # VM version: JDK 1.8.0_292, OpenJDK 64-Bit Server VM, 25.292-b10 # VM invoker: /Library/Java/JavaVirtualMachines/adoptopenjdk-8.jdk/Contents/Home/jre/bin/java # VM options: -javaagent:/Applications/IntelliJ IDEA.app/Contents/lib/idea_rt.jar=54972:/Applications/IntelliJ IDEA.app/Contents/bin -Dfile.encoding=UTF-8 # Blackhole mode: full + dont-inline hint (default, use -Djmh.blackhole.autoDetect=true to auto-detect) # Warmup: 5 iterations, 10 s each # Measurement: 5 iterations, 10 s each # Timeout: 10 min per iteration # Threads: 1 thread, will synchronize iterations # Benchmark mode: Average time, time/op Benchmark Mode Cnt Score Error Units HierarchyMatchExceptionBenchmark.depthOneBenchmark avgt 25 31.050 ± 0.731 ns/op HierarchyMatchExceptionBenchmark.depthTwoBenchmark avgt 25 64.918 ± 2.537 ns/op HierarchyMatchExceptionBenchmark.depthThreeBenchmark avgt 25 89.645 ± 2.556 ns/op According to the reported results above, the exception check time is nearly proportional to the recursive depth being set. For each single check, it costs about ten of nanoseconds (~30 nanoseconds in the report, but may vary according to different hardware and platforms).\nTypically, we don\u0026rsquo;t recommend setting this more than 10, which could cause a performance issue. Negative value and 0 would be ignored, which means all exceptions would make the span tagged in error status.\n","title":"How to tolerate custom exceptions","url":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/how-to-tolerate-exceptions/"},{"content":"How to tolerate custom exceptions In some codes, the exception is being used as a way of controlling business flow. Skywalking provides 2 ways to tolerate an exception which is traced in a span.\n Set the names of exception classes in the agent config Use our annotation in the codes.  Set the names of exception classes in the agent config The property named \u0026ldquo;statuscheck.ignored_exceptions\u0026rdquo; is used to set up class names in the agent configuration file. if the exception listed here are detected in the agent, the agent core would flag the related span as the error status.\nDemo   A custom exception.\n TestNamedMatchException  package org.apache.skywalking.apm.agent.core.context.status; public class TestNamedMatchException extends RuntimeException { public TestNamedMatchException() { } public TestNamedMatchException(final String message) { super(message); } ... }  TestHierarchyMatchException  package org.apache.skywalking.apm.agent.core.context.status; public class TestHierarchyMatchException extends TestNamedMatchException { public TestHierarchyMatchException() { } public TestHierarchyMatchException(final String message) { super(message); } ... }   When the above exceptions traced in some spans, the status is like the following.\n   The traced exception Final span status     org.apache.skywalking.apm.agent.core.context.status.TestNamedMatchException true   org.apache.skywalking.apm.agent.core.context.status.TestHierarchyMatchException true      After set these class names through \u0026ldquo;statuscheck.ignored_exceptions\u0026rdquo;, the status of spans would be changed.\nstatuscheck.ignored_exceptions=org.apache.skywalking.apm.agent.core.context.status.TestNamedMatchException    The traced exception Final span status     org.apache.skywalking.apm.agent.core.context.status.TestNamedMatchException false   org.apache.skywalking.apm.agent.core.context.status.TestHierarchyMatchException false      Use our annotation in the codes. If an exception has the @IgnoredException annotation, the exception wouldn\u0026rsquo;t be marked as error status when tracing. Because the annotation supports inheritance, also affects the subclasses.\nDependency  Dependency the toolkit, such as using maven or gradle. Since 8.2.0.  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-trace\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Demo   A custom exception.\npackage org.apache.skywalking.apm.agent.core.context.status; public class TestAnnotatedException extends RuntimeException { public TestAnnotatedException() { } public TestAnnotatedException(final String message) { super(message); } ... }   When the above exception traced in some spans, the status is like the following.\n   The traced exception Final span status     org.apache.skywalking.apm.agent.core.context.status.TestAnnotatedException true      However, when the exception annotated with the annotation, the status would be changed.\npackage org.apache.skywalking.apm.agent.core.context.status; @IgnoredException public class TestAnnotatedException extends RuntimeException { public TestAnnotatedException() { } public TestAnnotatedException(final String message) { super(message); } ... }    The traced exception Final span status     org.apache.skywalking.apm.agent.core.context.status.TestAnnotatedException false      Recursive check Due to the wrapper nature of Java exceptions, sometimes users need recursive checking. Skywalking also supports it.\nstatuscheck.max_recursive_depth=${SW_STATUSCHECK_MAX_RECURSIVE_DEPTH:1} The following report shows the benchmark results of the exception checks with different recursive depths,\n# JMH version: 1.33 # VM version: JDK 1.8.0_292, OpenJDK 64-Bit Server VM, 25.292-b10 # VM invoker: /Library/Java/JavaVirtualMachines/adoptopenjdk-8.jdk/Contents/Home/jre/bin/java # VM options: -javaagent:/Applications/IntelliJ IDEA.app/Contents/lib/idea_rt.jar=54972:/Applications/IntelliJ IDEA.app/Contents/bin -Dfile.encoding=UTF-8 # Blackhole mode: full + dont-inline hint (default, use -Djmh.blackhole.autoDetect=true to auto-detect) # Warmup: 5 iterations, 10 s each # Measurement: 5 iterations, 10 s each # Timeout: 10 min per iteration # Threads: 1 thread, will synchronize iterations # Benchmark mode: Average time, time/op Benchmark Mode Cnt Score Error Units HierarchyMatchExceptionBenchmark.depthOneBenchmark avgt 25 31.050 ± 0.731 ns/op HierarchyMatchExceptionBenchmark.depthTwoBenchmark avgt 25 64.918 ± 2.537 ns/op HierarchyMatchExceptionBenchmark.depthThreeBenchmark avgt 25 89.645 ± 2.556 ns/op According to the reported results above, the exception check time is nearly proportional to the recursive depth being set. For each single check, it costs about ten of nanoseconds (~30 nanoseconds in the report, but may vary according to different hardware and platforms).\nTypically, we don\u0026rsquo;t recommend setting this more than 10, which could cause a performance issue. Negative value and 0 would be ignored, which means all exceptions would make the span tagged in error status.\n","title":"How to tolerate custom exceptions","url":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/how-to-tolerate-exceptions/"},{"content":"How to use the Docker images Start a standalone container with H2 storage docker run --name oap --restart always -d apache/skywalking-oap-server:9.0.0 Start a standalone container with ElasticSearch 7 as storage whose address is elasticsearch:9200 docker run --name oap --restart always -d -e SW_STORAGE=elasticsearch -e SW_STORAGE_ES_CLUSTER_NODES=elasticsearch:9200 apache/skywalking-oap-server:9.0.0 Configuration We could set up environment variables to configure this image. They are defined in backend-setup.\nExtend image If you intend to override or add config files in /skywalking/config, /skywalking/ext-config is the location for you to put extra files. The files with the same name will be overridden; otherwise, they will be added to /skywalking/config.\nIf you want to add more libs/jars into the classpath of OAP, for example, new metrics for OAL. These jars can be mounted into /skywalking/ext-libs, then entrypoint bash will append them into the classpath. Notice, you can\u0026rsquo;t override an existing jar in classpath.\n","title":"How to use the Docker images","url":"/docs/main/latest/en/setup/backend/backend-docker/"},{"content":"How to use the Docker images Start the storage, OAP and Booster UI with docker-compose As a quick start, you can use our one-liner script to start ElasticSearch or BanyanDB as the storage, OAP server and Booster UI, please make sure you have installed Docker.\nLinux, macOS, Windows (WSL)\nbash \u0026lt;(curl -sSL https://skywalking.apache.org/quickstart-docker.sh) Windows (Powershell)\nInvoke-Expression ([System.Text.Encoding]::UTF8.GetString((Invoke-WebRequest -Uri https://skywalking.apache.org/quickstart-docker.ps1 -UseBasicParsing).Content)) You will be prompted to choose the storage type, and then the script will start the backend cluster with the selected storage.\nTo tear down the cluster, run the following command:\ndocker compose --project-name=skywalking-quickstart down Start a standalone container with H2 storage docker run --name oap --restart always -d apache/skywalking-oap-server:9.7.0 Start a standalone container with BanyanDB as storage, whose address is banyandb:17912 docker run --name oap --restart always -d -e SW_STORAGE=banyandb -e SW_STORAGE_BANYANDB_TARGETS=banyandb:17912 apache/skywalking-oap-server:9.7.0 Start a standalone container with ElasticSearch 7 as storage, whose address is elasticsearch:9200 docker run --name oap --restart always -d -e SW_STORAGE=elasticsearch -e SW_STORAGE_ES_CLUSTER_NODES=elasticsearch:9200 apache/skywalking-oap-server:9.7.0 Configuration We could set up environment variables to configure this image. They are defined in backend-setup.\nExtend image If you intend to override or add config files in /skywalking/config, /skywalking/ext-config is the location for you to put extra files. The files with the same name will be overridden; otherwise, they will be added to /skywalking/config.\nIf you want to add more libs/jars into the classpath of OAP, for example, new metrics for OAL. These jars can be mounted into /skywalking/ext-libs, then entrypoint bash will append them into the classpath. Notice, you can\u0026rsquo;t override an existing jar in classpath.\n","title":"How to use the Docker images","url":"/docs/main/next/en/setup/backend/backend-docker/"},{"content":"How to use the Docker images Start a standlone container with H2 storage docker run --name oap --restart always -d apache/skywalking-oap-server:8.8.0 Start a standlone container with ElasticSearch 7 as storage whose address is elasticsearch:9200 docker run --name oap --restart always -d -e SW_STORAGE=elasticsearch -e SW_STORAGE_ES_CLUSTER_NODES=elasticsearch:9200 apache/skywalking-oap-server:8.8.0 Configuration We could set up environment variables to configure this image. They are defined in backend-setup.\nExtend image If you intend to override or add config files in /skywalking/config, /skywalking/ext-config is the location for you to put extra files. The files with the same name will be overridden, otherwise, they will be added in /skywalking/config.\nIf you want to add more libs/jars into the classpath of OAP, for example, new metrics for OAL. These jars can be mounted into /skywalking/ext-libs, then entrypoint bash will append them into the classpath. Notice, you can\u0026rsquo;t override an existing jar in classpath.\n","title":"How to use the Docker images","url":"/docs/main/v9.0.0/en/setup/backend/backend-docker/"},{"content":"How to use the Docker images Start a standlone container with H2 storage docker run --name oap --restart always -d apache/skywalking-oap-server:9.0.0 Start a standlone container with ElasticSearch 7 as storage whose address is elasticsearch:9200 docker run --name oap --restart always -d -e SW_STORAGE=elasticsearch -e SW_STORAGE_ES_CLUSTER_NODES=elasticsearch:9200 apache/skywalking-oap-server:9.0.0 Configuration We could set up environment variables to configure this image. They are defined in backend-setup.\nExtend image If you intend to override or add config files in /skywalking/config, /skywalking/ext-config is the location for you to put extra files. The files with the same name will be overridden; otherwise, they will be added to /skywalking/config.\nIf you want to add more libs/jars into the classpath of OAP, for example, new metrics for OAL. These jars can be mounted into /skywalking/ext-libs, then entrypoint bash will append them into the classpath. Notice, you can\u0026rsquo;t override an existing jar in classpath.\n","title":"How to use the Docker images","url":"/docs/main/v9.1.0/en/setup/backend/backend-docker/"},{"content":"How to use the Docker images Start a standalone container with H2 storage docker run --name oap --restart always -d apache/skywalking-oap-server:9.0.0 Start a standalone container with ElasticSearch 7 as storage whose address is elasticsearch:9200 docker run --name oap --restart always -d -e SW_STORAGE=elasticsearch -e SW_STORAGE_ES_CLUSTER_NODES=elasticsearch:9200 apache/skywalking-oap-server:9.0.0 Configuration We could set up environment variables to configure this image. They are defined in backend-setup.\nExtend image If you intend to override or add config files in /skywalking/config, /skywalking/ext-config is the location for you to put extra files. The files with the same name will be overridden; otherwise, they will be added to /skywalking/config.\nIf you want to add more libs/jars into the classpath of OAP, for example, new metrics for OAL. These jars can be mounted into /skywalking/ext-libs, then entrypoint bash will append them into the classpath. Notice, you can\u0026rsquo;t override an existing jar in classpath.\n","title":"How to use the Docker images","url":"/docs/main/v9.2.0/en/setup/backend/backend-docker/"},{"content":"How to use the Docker images Start a standalone container with H2 storage docker run --name oap --restart always -d apache/skywalking-oap-server:9.0.0 Start a standalone container with ElasticSearch 7 as storage whose address is elasticsearch:9200 docker run --name oap --restart always -d -e SW_STORAGE=elasticsearch -e SW_STORAGE_ES_CLUSTER_NODES=elasticsearch:9200 apache/skywalking-oap-server:9.0.0 Configuration We could set up environment variables to configure this image. They are defined in backend-setup.\nExtend image If you intend to override or add config files in /skywalking/config, /skywalking/ext-config is the location for you to put extra files. The files with the same name will be overridden; otherwise, they will be added to /skywalking/config.\nIf you want to add more libs/jars into the classpath of OAP, for example, new metrics for OAL. These jars can be mounted into /skywalking/ext-libs, then entrypoint bash will append them into the classpath. Notice, you can\u0026rsquo;t override an existing jar in classpath.\n","title":"How to use the Docker images","url":"/docs/main/v9.3.0/en/setup/backend/backend-docker/"},{"content":"How to use the Docker images Start a standalone container with H2 storage docker run --name oap --restart always -d apache/skywalking-oap-server:9.0.0 Start a standalone container with ElasticSearch 7 as storage whose address is elasticsearch:9200 docker run --name oap --restart always -d -e SW_STORAGE=elasticsearch -e SW_STORAGE_ES_CLUSTER_NODES=elasticsearch:9200 apache/skywalking-oap-server:9.0.0 Configuration We could set up environment variables to configure this image. They are defined in backend-setup.\nExtend image If you intend to override or add config files in /skywalking/config, /skywalking/ext-config is the location for you to put extra files. The files with the same name will be overridden; otherwise, they will be added to /skywalking/config.\nIf you want to add more libs/jars into the classpath of OAP, for example, new metrics for OAL. These jars can be mounted into /skywalking/ext-libs, then entrypoint bash will append them into the classpath. Notice, you can\u0026rsquo;t override an existing jar in classpath.\n","title":"How to use the Docker images","url":"/docs/main/v9.4.0/en/setup/backend/backend-docker/"},{"content":"How to use the Docker images Start a standalone container with H2 storage docker run --name oap --restart always -d apache/skywalking-oap-server:9.0.0 Start a standalone container with ElasticSearch 7 as storage whose address is elasticsearch:9200 docker run --name oap --restart always -d -e SW_STORAGE=elasticsearch -e SW_STORAGE_ES_CLUSTER_NODES=elasticsearch:9200 apache/skywalking-oap-server:9.0.0 Configuration We could set up environment variables to configure this image. They are defined in backend-setup.\nExtend image If you intend to override or add config files in /skywalking/config, /skywalking/ext-config is the location for you to put extra files. The files with the same name will be overridden; otherwise, they will be added to /skywalking/config.\nIf you want to add more libs/jars into the classpath of OAP, for example, new metrics for OAL. These jars can be mounted into /skywalking/ext-libs, then entrypoint bash will append them into the classpath. Notice, you can\u0026rsquo;t override an existing jar in classpath.\n","title":"How to use the Docker images","url":"/docs/main/v9.5.0/en/setup/backend/backend-docker/"},{"content":"How to use the Docker images Start a standalone container with H2 storage docker run --name oap --restart always -d apache/skywalking-oap-server:9.0.0 Start a standalone container with ElasticSearch 7 as storage whose address is elasticsearch:9200 docker run --name oap --restart always -d -e SW_STORAGE=elasticsearch -e SW_STORAGE_ES_CLUSTER_NODES=elasticsearch:9200 apache/skywalking-oap-server:9.0.0 Configuration We could set up environment variables to configure this image. They are defined in backend-setup.\nExtend image If you intend to override or add config files in /skywalking/config, /skywalking/ext-config is the location for you to put extra files. The files with the same name will be overridden; otherwise, they will be added to /skywalking/config.\nIf you want to add more libs/jars into the classpath of OAP, for example, new metrics for OAL. These jars can be mounted into /skywalking/ext-libs, then entrypoint bash will append them into the classpath. Notice, you can\u0026rsquo;t override an existing jar in classpath.\n","title":"How to use the Docker images","url":"/docs/main/v9.6.0/en/setup/backend/backend-docker/"},{"content":"How to use the Docker images Start a standalone container with H2 storage docker run --name oap --restart always -d apache/skywalking-oap-server:9.0.0 Start a standalone container with ElasticSearch 7 as storage whose address is elasticsearch:9200 docker run --name oap --restart always -d -e SW_STORAGE=elasticsearch -e SW_STORAGE_ES_CLUSTER_NODES=elasticsearch:9200 apache/skywalking-oap-server:9.0.0 Configuration We could set up environment variables to configure this image. They are defined in backend-setup.\nExtend image If you intend to override or add config files in /skywalking/config, /skywalking/ext-config is the location for you to put extra files. The files with the same name will be overridden; otherwise, they will be added to /skywalking/config.\nIf you want to add more libs/jars into the classpath of OAP, for example, new metrics for OAL. These jars can be mounted into /skywalking/ext-libs, then entrypoint bash will append them into the classpath. Notice, you can\u0026rsquo;t override an existing jar in classpath.\n","title":"How to use the Docker images","url":"/docs/main/v9.7.0/en/setup/backend/backend-docker/"},{"content":"How to use with Gunicorn? Gunicorn is another popular process manager and prefork server widely used in production. The state-of-the-art practice is to use Gunicorn as the process manager for ASGI applications such as FastAPI to get resilient \u0026amp; blazing fast services.\nSince Gunicorn is a prefork server, it will fork a new process for each worker, and the forked process will be the one that actually serves requests.\n Tired of understanding these complicated multiprocessing behaviors? Try the new sw-python run --prefork/-p support for Gunicorn first! You can always fall back to the manual approach (although it\u0026rsquo;s also non-intrusive for application).\n Automatic Injection Approach (Non-intrusive)  Caveat: Although E2E test passes for Python3.7, there\u0026rsquo;s a small chance that this approach won\u0026rsquo;t work on Python 3.7 if your application uses gPRC protocol AND subprocess AND fork together (you will immediately see service is not starting normally, not randomly breaking after)\nThis is due to an unfixed bug in gRPC core that leads to deadlock if Python 3.7 application involves subprocess (like debug mode). You should upgrade to Python 3.8+ soon since the EOL is approaching on 2023 June 27th, or fallback to manual approach should this case happen, or simply use HTTP/Kafka protocol.\n TL;DR: specify -p or --prefork in sw-python run -p and all Gunicorn workers and master will get their own working agent.\nImportant: if the call to gunicorn is prefixed with other commands, this approach will fail since agent currently looks for the command line input at index 0 for safety as an experimental feature.\nsw-python run -p gunicorn gunicorn_consumer_prefork:app --workers 2 --worker-class uvicorn.workers.UvicornWorker --bind 0.0.0.0:8088 Long version: (notice this is different from how uWSGI equivalent works)\nBy specifying the -p or \u0026ndash;prefork option in sw-python CLI, the agent_experimental_fork_support agent option will be turned on automatically.\nStartup flow: sw-python -\u0026gt; gunicorn -\u0026gt; master process (agent starts) -\u0026gt; fork -\u0026gt; worker process (agent restarts due to os.register_at_fork)\nThe master process will get its own agent, although it won\u0026rsquo;t report any trace, since obviously it doesn\u0026rsquo;t take requests, it still reports metrics that is useful for debugging\n A runnable example can be found in the demo folder of skywalking-python GitHub repository\n Manual Approach (only use when sw-python doesn\u0026rsquo;t work) Limitation: Using normal postfork hook will not add observability to the master process, you could also define a prefork hook to start an agent in the master process, with a instance name like instance-name-master(\u0026lt;pid\u0026gt;)\nThe following is just an example, since Gunicorn\u0026rsquo;s automatic injection approach is likely to work in many situations.\n The manual approach should not be used together with the agent\u0026rsquo;s fork support. Otherwise, agent will be dual booted and raise an error saying that you should not do so.\n # Usage explained here: https://docs.gunicorn.org/en/stable/settings.html#post-fork bind = '0.0.0.0:8088' workers = 3 def post_fork(server, worker): # Important: The import of skywalking should be inside the post_fork function import os from skywalking import agent, config # append pid-suffix to instance name # This must be done to distinguish instances if you give your instance customized names # (highly recommended to identify workers) # Notice the -child(pid) part is required to tell the difference of each worker. agent_instance_name = f'\u0026lt;some_good_name\u0026gt;-child({os.getpid()})' config.init(agent_collector_backend_services='127.0.0.1:11800', agent_name='your awesome service', agent_instance_name=agent_instance_name) agent.start() Run Gunicorn normally without sw-python CLI:\ngunicorn gunicorn_consumer_prefork:app --workers 2 --worker-class uvicorn.workers.UvicornWorker --bind 0.0.0.0:8088 ","title":"How to use with Gunicorn?","url":"/docs/skywalking-python/latest/en/setup/faq/how-to-use-with-gunicorn/"},{"content":"How to use with Gunicorn? Gunicorn is another popular process manager and prefork server widely used in production. The state-of-the-art practice is to use Gunicorn as the process manager for ASGI applications such as FastAPI to get resilient \u0026amp; blazing fast services.\nSince Gunicorn is a prefork server, it will fork a new process for each worker, and the forked process will be the one that actually serves requests.\n Tired of understanding these complicated multiprocessing behaviors? Try the new sw-python run --prefork/-p support for Gunicorn first! You can always fall back to the manual approach (although it\u0026rsquo;s also non-intrusive for application).\n Automatic Injection Approach (Non-intrusive)  Caveat: Although E2E test passes for Python3.7, there\u0026rsquo;s a small chance that this approach won\u0026rsquo;t work on Python 3.7 if your application uses gPRC protocol AND subprocess AND fork together (you will immediately see service is not starting normally, not randomly breaking after)\nThis is due to an unfixed bug in gRPC core that leads to deadlock if Python 3.7 application involves subprocess (like debug mode). You should upgrade to Python 3.8+ soon since the EOL is approaching on 2023 June 27th, or fallback to manual approach should this case happen, or simply use HTTP/Kafka protocol.\n TL;DR: specify -p or --prefork in sw-python run -p and all Gunicorn workers and master will get their own working agent.\nImportant: if the call to gunicorn is prefixed with other commands, this approach will fail since agent currently looks for the command line input at index 0 for safety as an experimental feature.\nsw-python run -p gunicorn gunicorn_consumer_prefork:app --workers 2 --worker-class uvicorn.workers.UvicornWorker --bind 0.0.0.0:8088 Long version: (notice this is different from how uWSGI equivalent works)\nBy specifying the -p or \u0026ndash;prefork option in sw-python CLI, the agent_experimental_fork_support agent option will be turned on automatically.\nStartup flow: sw-python -\u0026gt; gunicorn -\u0026gt; master process (agent starts) -\u0026gt; fork -\u0026gt; worker process (agent restarts due to os.register_at_fork)\nThe master process will get its own agent, although it won\u0026rsquo;t report any trace, since obviously it doesn\u0026rsquo;t take requests, it still reports metrics that is useful for debugging\n A runnable example can be found in the demo folder of skywalking-python GitHub repository\n Manual Approach (only use when sw-python doesn\u0026rsquo;t work) Limitation: Using normal postfork hook will not add observability to the master process, you could also define a prefork hook to start an agent in the master process, with a instance name like instance-name-master(\u0026lt;pid\u0026gt;)\nThe following is just an example, since Gunicorn\u0026rsquo;s automatic injection approach is likely to work in many situations.\n The manual approach should not be used together with the agent\u0026rsquo;s fork support. Otherwise, agent will be dual booted and raise an error saying that you should not do so.\n # Usage explained here: https://docs.gunicorn.org/en/stable/settings.html#post-fork bind = '0.0.0.0:8088' workers = 3 def post_fork(server, worker): # Important: The import of skywalking should be inside the post_fork function import os from skywalking import agent, config # append pid-suffix to instance name # This must be done to distinguish instances if you give your instance customized names # (highly recommended to identify workers) # Notice the -child(pid) part is required to tell the difference of each worker. agent_instance_name = f'\u0026lt;some_good_name\u0026gt;-child({os.getpid()})' config.init(agent_collector_backend_services='127.0.0.1:11800', agent_name='your awesome service', agent_instance_name=agent_instance_name) agent.start() Run Gunicorn normally without sw-python CLI:\ngunicorn gunicorn_consumer_prefork:app --workers 2 --worker-class uvicorn.workers.UvicornWorker --bind 0.0.0.0:8088 ","title":"How to use with Gunicorn?","url":"/docs/skywalking-python/next/en/setup/faq/how-to-use-with-gunicorn/"},{"content":"How to use with Gunicorn? Gunicorn is another popular process manager and prefork server widely used in production. The state-of-the-art practice is to use Gunicorn as the process manager for ASGI applications such as FastAPI to get resilient \u0026amp; blazing fast services.\nSince Gunicorn is a prefork server, it will fork a new process for each worker, and the forked process will be the one that actually serves requests.\n Tired of understanding these complicated multiprocessing behaviors? Try the new sw-python run --prefork/-p support for Gunicorn first! You can always fall back to the manual approach (although it\u0026rsquo;s also non-intrusive for application).\n Automatic Injection Approach (Non-intrusive)  Caveat: Although E2E test passes for Python3.7, there\u0026rsquo;s a small chance that this approach won\u0026rsquo;t work on Python 3.7 if your application uses gPRC protocol AND subprocess AND fork together (you will immediately see service is not starting normally, not randomly breaking after)\nThis is due to an unfixed bug in gRPC core that leads to deadlock if Python 3.7 application involves subprocess (like debug mode). You should upgrade to Python 3.8+ soon since the EOL is approaching on 2023 June 27th, or fallback to manual approach should this case happen, or simply use HTTP/Kafka protocol.\n TL;DR: specify -p or --prefork in sw-python run -p and all Gunicorn workers and master will get their own working agent.\nImportant: if the call to gunicorn is prefixed with other commands, this approach will fail since agent currently looks for the command line input at index 0 for safety as an experimental feature.\nsw-python run -p gunicorn gunicorn_consumer_prefork:app --workers 2 --worker-class uvicorn.workers.UvicornWorker --bind 0.0.0.0:8088 Long version: (notice this is different from how uWSGI equivalent works)\nBy specifying the -p or \u0026ndash;prefork option in sw-python CLI, the agent_experimental_fork_support agent option will be turned on automatically.\nStartup flow: sw-python -\u0026gt; gunicorn -\u0026gt; master process (agent starts) -\u0026gt; fork -\u0026gt; worker process (agent restarts due to os.register_at_fork)\nThe master process will get its own agent, although it won\u0026rsquo;t report any trace, since obviously it doesn\u0026rsquo;t take requests, it still reports metrics that is useful for debugging\n A runnable example can be found in the demo folder of skywalking-python GitHub repository\n Manual Approach (only use when sw-python doesn\u0026rsquo;t work) Limitation: Using normal postfork hook will not add observability to the master process, you could also define a prefork hook to start an agent in the master process, with a instance name like instance-name-master(\u0026lt;pid\u0026gt;)\nThe following is just an example, since Gunicorn\u0026rsquo;s automatic injection approach is likely to work in many situations.\n The manual approach should not be used together with the agent\u0026rsquo;s fork support. Otherwise, agent will be dual booted and raise an error saying that you should not do so.\n # Usage explained here: https://docs.gunicorn.org/en/stable/settings.html#post-fork bind = '0.0.0.0:8088' workers = 3 def post_fork(server, worker): # Important: The import of skywalking should be inside the post_fork function import os from skywalking import agent, config # append pid-suffix to instance name # This must be done to distinguish instances if you give your instance customized names # (highly recommended to identify workers) # Notice the -child(pid) part is required to tell the difference of each worker. agent_instance_name = f'\u0026lt;some_good_name\u0026gt;-child({os.getpid()})' config.init(agent_collector_backend_services='127.0.0.1:11800', agent_name='your awesome service', agent_instance_name=agent_instance_name) agent.start() Run Gunicorn normally without sw-python CLI:\ngunicorn gunicorn_consumer_prefork:app --workers 2 --worker-class uvicorn.workers.UvicornWorker --bind 0.0.0.0:8088 ","title":"How to use with Gunicorn?","url":"/docs/skywalking-python/v1.0.1/en/setup/faq/how-to-use-with-gunicorn/"},{"content":"How to use with uWSGI? uWSGI is popular in the Python ecosystem. It is a lightweight, fast, and easy-to-use web server.\nSince uWSGI is relatively old and offers multi-language support, it can get quite troublesome due to the usage of a system-level fork.\nSome of the original discussion can be found here:\n [Python] Apache Skywalking, flask uwsgi, no metrics send to server · Issue #6324 · apache/skywalking [Bug] skywalking-python not work with uwsgi + flask in master workers mode and threads mode · Issue #8566 · apache/skywalking   Tired of understanding these complicated multiprocessing behaviours? Try the new sw-python run --prefork/-p support for uWSGI first! You can always fall back to the manual approach. (although it\u0026rsquo;s also possible to pass postfork hook without changing code, which is essentially how sw-python is implemented)\n  Limitation: regardless of the approach used, uWSGI master process cannot be safely monitored. Since it doesn\u0026rsquo;t take any requests, it is generally acceptable. Alternatively, you could switch to Gunicorn, where its master process can be monitored properly along with all child workers.\n Important: The --enable-threads and --master option must be given to allow the usage of post_fork hooks and threading in workers. In the sw-python CLI, these two options will be automatically injected for you in addition to the post_fork hook.\nAutomatic Injection Approach (Non-intrusive) TL;DR: specify -p or --prefork in sw-python run -p and all uWSGI workers will get their own working agent.\nImportant: if the call to uwsgi is prefixed with other commands, this approach will fail since agent currently looks for the command line input at index 0 for safety as an experimental feature.\nsw-python run -p uwsgi --die-on-term \\  --http 0.0.0.0:9090 \\  --http-manage-expect \\  --master --workers 2 \\  --enable-threads \\  --threads 2 \\  --manage-script-name \\  --mount /=flask_consumer_prefork:app Long version: (notice this is different from how Gunicorn equivalent works)\nBy specifying the -p or \u0026ndash;prefork option in sw-python CLI, a uwsgi_hook will be registered by the CLI by adding the environment variable into one of [\u0026lsquo;UWSGI_SHARED_PYTHON_IMPORT\u0026rsquo;, \u0026lsquo;UWSGI_SHARED_IMPORT\u0026rsquo;, \u0026lsquo;UWSGI_SHARED_PYIMPORT\u0026rsquo;, \u0026lsquo;UWSGI_SHARED_PY_IMPORT\u0026rsquo;]. uWSGI will then import the module and start the agent in forked workers.\nStartup flow: sw-python -\u0026gt; uwsgi -\u0026gt; master process (agent doesn\u0026rsquo;t start here) -\u0026gt; fork -\u0026gt; worker process (agent starts due to post_fork hook)\nThe master process (which doesn\u0026rsquo;t accept requests) currently does not get its own agent as it can not be safely started and handled by os.register_at_fork() handlers.\n A runnable example can be found in the demo folder of skywalking-python GitHub repository\n Manual Approach (only use when sw-python doesn\u0026rsquo;t work) If you get some problems when using SkyWalking Python agent, you can try to use the following manual method to call @postfork, the low-level API of uWSGI to initialize the agent.\nThe following is an example of the use of uWSGI and flask.\nImportant: Never directly start the agent in the app, forked workers are unlikely to work properly (even if they do, it\u0026rsquo;s out of luck) you should either add the following postfork, or try our new experimental automatic startup through sw-python CLI (see above).\n# main.py # Note: The --master uwsgi flag must be on, otherwise the decorators will not be available to import from uwsgidecorators import postfork @postfork def init_tracing(): # Important: The import of skywalking must be inside the postfork function from skywalking import agent, config # append pid-suffix to instance name # This must be done to distinguish instances if you give your instance customized names # (highly recommended to identify workers) # Notice the -child(pid) part is required to tell the difference of each worker. agent_instance_name = f'\u0026lt;some_good_name\u0026gt;-child({os.getpid()})' config.init(agent_collector_backend_services='127.0.0.1:11800', agent_name='your awesome service', agent_instance_name=agent_instance_name) agent.start() from flask import Flask app = Flask(__name__) @app.route('/') def hello_world(): return 'Hello World!' if __name__ == '__main__': app.run() Run uWSGI normally without sw-python CLI:\nuwsgi --die-on-term \\  --http 0.0.0.0:5000 \\  --http-manage-expect \\  --master --workers 3 \\  --enable-threads \\  --threads 3 \\  --manage-script-name \\  --mount /=main:app ","title":"How to use with uWSGI?","url":"/docs/skywalking-python/latest/en/setup/faq/how-to-use-with-uwsgi/"},{"content":"How to use with uWSGI? uWSGI is popular in the Python ecosystem. It is a lightweight, fast, and easy-to-use web server.\nSince uWSGI is relatively old and offers multi-language support, it can get quite troublesome due to the usage of a system-level fork.\nSome of the original discussion can be found here:\n [Python] Apache Skywalking, flask uwsgi, no metrics send to server · Issue #6324 · apache/skywalking [Bug] skywalking-python not work with uwsgi + flask in master workers mode and threads mode · Issue #8566 · apache/skywalking   Tired of understanding these complicated multiprocessing behaviours? Try the new sw-python run --prefork/-p support for uWSGI first! You can always fall back to the manual approach. (although it\u0026rsquo;s also possible to pass postfork hook without changing code, which is essentially how sw-python is implemented)\n  Limitation: regardless of the approach used, uWSGI master process cannot be safely monitored. Since it doesn\u0026rsquo;t take any requests, it is generally acceptable. Alternatively, you could switch to Gunicorn, where its master process can be monitored properly along with all child workers.\n Important: The --enable-threads and --master option must be given to allow the usage of post_fork hooks and threading in workers. In the sw-python CLI, these two options will be automatically injected for you in addition to the post_fork hook.\nAutomatic Injection Approach (Non-intrusive) TL;DR: specify -p or --prefork in sw-python run -p and all uWSGI workers will get their own working agent.\nImportant: if the call to uwsgi is prefixed with other commands, this approach will fail since agent currently looks for the command line input at index 0 for safety as an experimental feature.\nsw-python run -p uwsgi --die-on-term \\  --http 0.0.0.0:9090 \\  --http-manage-expect \\  --master --workers 2 \\  --enable-threads \\  --threads 2 \\  --manage-script-name \\  --mount /=flask_consumer_prefork:app Long version: (notice this is different from how Gunicorn equivalent works)\nBy specifying the -p or \u0026ndash;prefork option in sw-python CLI, a uwsgi_hook will be registered by the CLI by adding the environment variable into one of [\u0026lsquo;UWSGI_SHARED_PYTHON_IMPORT\u0026rsquo;, \u0026lsquo;UWSGI_SHARED_IMPORT\u0026rsquo;, \u0026lsquo;UWSGI_SHARED_PYIMPORT\u0026rsquo;, \u0026lsquo;UWSGI_SHARED_PY_IMPORT\u0026rsquo;]. uWSGI will then import the module and start the agent in forked workers.\nStartup flow: sw-python -\u0026gt; uwsgi -\u0026gt; master process (agent doesn\u0026rsquo;t start here) -\u0026gt; fork -\u0026gt; worker process (agent starts due to post_fork hook)\nThe master process (which doesn\u0026rsquo;t accept requests) currently does not get its own agent as it can not be safely started and handled by os.register_at_fork() handlers.\n A runnable example can be found in the demo folder of skywalking-python GitHub repository\n Manual Approach (only use when sw-python doesn\u0026rsquo;t work) If you get some problems when using SkyWalking Python agent, you can try to use the following manual method to call @postfork, the low-level API of uWSGI to initialize the agent.\nThe following is an example of the use of uWSGI and flask.\nImportant: Never directly start the agent in the app, forked workers are unlikely to work properly (even if they do, it\u0026rsquo;s out of luck) you should either add the following postfork, or try our new experimental automatic startup through sw-python CLI (see above).\n# main.py # Note: The --master uwsgi flag must be on, otherwise the decorators will not be available to import from uwsgidecorators import postfork @postfork def init_tracing(): # Important: The import of skywalking must be inside the postfork function from skywalking import agent, config # append pid-suffix to instance name # This must be done to distinguish instances if you give your instance customized names # (highly recommended to identify workers) # Notice the -child(pid) part is required to tell the difference of each worker. agent_instance_name = f'\u0026lt;some_good_name\u0026gt;-child({os.getpid()})' config.init(agent_collector_backend_services='127.0.0.1:11800', agent_name='your awesome service', agent_instance_name=agent_instance_name) agent.start() from flask import Flask app = Flask(__name__) @app.route('/') def hello_world(): return 'Hello World!' if __name__ == '__main__': app.run() Run uWSGI normally without sw-python CLI:\nuwsgi --die-on-term \\  --http 0.0.0.0:5000 \\  --http-manage-expect \\  --master --workers 3 \\  --enable-threads \\  --threads 3 \\  --manage-script-name \\  --mount /=main:app ","title":"How to use with uWSGI?","url":"/docs/skywalking-python/next/en/setup/faq/how-to-use-with-uwsgi/"},{"content":"How to use with uWSGI? uWSGI is popular in the Python ecosystem. It is a lightweight, fast, and easy-to-use web server.\nSince uWSGI is relatively old and offers multi-language support, it can get quite troublesome due to the usage of a system-level fork.\nSome of the original discussion can be found here:\n [Python] Apache Skywalking, flask uwsgi, no metrics send to server · Issue #6324 · apache/skywalking [Bug] skywalking-python not work with uwsgi + flask in master workers mode and threads mode · Issue #8566 · apache/skywalking   Tired of understanding these complicated multiprocessing behaviours? Try the new sw-python run --prefork/-p support for uWSGI first! You can always fall back to the manual approach. (although it\u0026rsquo;s also possible to pass postfork hook without changing code, which is essentially how sw-python is implemented)\n  Limitation: regardless of the approach used, uWSGI master process cannot be safely monitored. Since it doesn\u0026rsquo;t take any requests, it is generally acceptable. Alternatively, you could switch to Gunicorn, where its master process can be monitored properly along with all child workers.\n Important: The --enable-threads and --master option must be given to allow the usage of post_fork hooks and threading in workers. In the sw-python CLI, these two options will be automatically injected for you in addition to the post_fork hook.\nAutomatic Injection Approach (Non-intrusive) TL;DR: specify -p or --prefork in sw-python run -p and all uWSGI workers will get their own working agent.\nImportant: if the call to uwsgi is prefixed with other commands, this approach will fail since agent currently looks for the command line input at index 0 for safety as an experimental feature.\nsw-python run -p uwsgi --die-on-term \\  --http 0.0.0.0:9090 \\  --http-manage-expect \\  --master --workers 2 \\  --enable-threads \\  --threads 2 \\  --manage-script-name \\  --mount /=flask_consumer_prefork:app Long version: (notice this is different from how Gunicorn equivalent works)\nBy specifying the -p or \u0026ndash;prefork option in sw-python CLI, a uwsgi_hook will be registered by the CLI by adding the environment variable into one of [\u0026lsquo;UWSGI_SHARED_PYTHON_IMPORT\u0026rsquo;, \u0026lsquo;UWSGI_SHARED_IMPORT\u0026rsquo;, \u0026lsquo;UWSGI_SHARED_PYIMPORT\u0026rsquo;, \u0026lsquo;UWSGI_SHARED_PY_IMPORT\u0026rsquo;]. uWSGI will then import the module and start the agent in forked workers.\nStartup flow: sw-python -\u0026gt; uwsgi -\u0026gt; master process (agent doesn\u0026rsquo;t start here) -\u0026gt; fork -\u0026gt; worker process (agent starts due to post_fork hook)\nThe master process (which doesn\u0026rsquo;t accept requests) currently does not get its own agent as it can not be safely started and handled by os.register_at_fork() handlers.\n A runnable example can be found in the demo folder of skywalking-python GitHub repository\n Manual Approach (only use when sw-python doesn\u0026rsquo;t work) If you get some problems when using SkyWalking Python agent, you can try to use the following manual method to call @postfork, the low-level API of uWSGI to initialize the agent.\nThe following is an example of the use of uWSGI and flask.\nImportant: Never directly start the agent in the app, forked workers are unlikely to work properly (even if they do, it\u0026rsquo;s out of luck) you should either add the following postfork, or try our new experimental automatic startup through sw-python CLI (see above).\n# main.py # Note: The --master uwsgi flag must be on, otherwise the decorators will not be available to import from uwsgidecorators import postfork @postfork def init_tracing(): # Important: The import of skywalking must be inside the postfork function from skywalking import agent, config # append pid-suffix to instance name # This must be done to distinguish instances if you give your instance customized names # (highly recommended to identify workers) # Notice the -child(pid) part is required to tell the difference of each worker. agent_instance_name = f'\u0026lt;some_good_name\u0026gt;-child({os.getpid()})' config.init(agent_collector_backend_services='127.0.0.1:11800', agent_name='your awesome service', agent_instance_name=agent_instance_name) agent.start() from flask import Flask app = Flask(__name__) @app.route('/') def hello_world(): return 'Hello World!' if __name__ == '__main__': app.run() Run uWSGI normally without sw-python CLI:\nuwsgi --die-on-term \\  --http 0.0.0.0:5000 \\  --http-manage-expect \\  --master --workers 3 \\  --enable-threads \\  --threads 3 \\  --manage-script-name \\  --mount /=main:app ","title":"How to use with uWSGI?","url":"/docs/skywalking-python/v1.0.1/en/setup/faq/how-to-use-with-uwsgi/"},{"content":"How to write a new module? If you want to add a custom module to SkyWalking Rover, the following contents would guide you. Let\u0026rsquo;s use the profiling module as an example of how to write a module.\n Please read the Module Design to understand what is module. The module should be written in the skywalking-rover/pkg directory. So we create a new directory called profiling as the module codes space. Implement the interface in the skywalking-rover/pkg/module. Each module has 6 methods, which are Name, RequiredModules, Config, Start, NotifyStartSuccess, and Shutdown.  Name returns the unique name of the module, also this name is used to define in the configuration file. RequiredModules returns this needs depended on module names. In the profiling module, it needs to query the existing process and send snapshots to the backend, so it needs the core and process module. Config returns the config content of this module, which relate to the configuration file, and you could declare the tag(mapstructure) with the field to define the name in the configuration file. Start is triggered when the module needs to start. if this module start failure, please return the error. NotifyStartSuccess is triggered after all the active modules are Start method success. Shutdown   Add the configuration into the skywalking-rover/configs/rover_configs.yaml. It should same as the config declaration. Register the module into skywalking-rover/pkg/boot/register.go. Add the Unit test or E2E testing for testing the module is works well. Write the documentation under the skywalking-rover/docs/en directory and add it to the documentation index file skywalking-rover/docs/menu.yml.  ","title":"How to write a new module?","url":"/docs/skywalking-rover/latest/en/guides/contribution/how-to-write-module/"},{"content":"How to write a new module? If you want to add a custom module to SkyWalking Rover, the following contents would guide you. Let\u0026rsquo;s use the profiling module as an example of how to write a module.\n Please read the Module Design to understand what is module. The module should be written in the skywalking-rover/pkg directory. So we create a new directory called profiling as the module codes space. Implement the interface in the skywalking-rover/pkg/module. Each module has 6 methods, which are Name, RequiredModules, Config, Start, NotifyStartSuccess, and Shutdown.  Name returns the unique name of the module, also this name is used to define in the configuration file. RequiredModules returns this needs depended on module names. In the profiling module, it needs to query the existing process and send snapshots to the backend, so it needs the core and process module. Config returns the config content of this module, which relate to the configuration file, and you could declare the tag(mapstructure) with the field to define the name in the configuration file. Start is triggered when the module needs to start. if this module start failure, please return the error. NotifyStartSuccess is triggered after all the active modules are Start method success. Shutdown   Add the configuration into the skywalking-rover/configs/rover_configs.yaml. It should same as the config declaration. Register the module into skywalking-rover/pkg/boot/register.go. Add the Unit test or E2E testing for testing the module is works well. Write the documentation under the skywalking-rover/docs/en directory and add it to the documentation index file skywalking-rover/docs/menu.yml.  ","title":"How to write a new module?","url":"/docs/skywalking-rover/next/en/guides/contribution/how-to-write-module/"},{"content":"How to write a new module? If you want to add a custom module to SkyWalking Rover, the following contents would guide you. Let\u0026rsquo;s use the profiling module as an example of how to write a module.\n Please read the Module Design to understand what is module. The module should be written in the skywalking-rover/pkg directory. So we create a new directory called profiling as the module codes space. Implement the interface in the skywalking-rover/pkg/module. Each module has 6 methods, which are Name, RequiredModules, Config, Start, NotifyStartSuccess, and Shutdown.  Name returns the unique name of the module, also this name is used to define in the configuration file. RequiredModules returns this needs depended on module names. In the profiling module, it needs to query the existing process and send snapshots to the backend, so it needs the core and process module. Config returns the config content of this module, which relate to the configuration file, and you could declare the tag(mapstructure) with the field to define the name in the configuration file. Start is triggered when the module needs to start. if this module start failure, please return the error. NotifyStartSuccess is triggered after all the active modules are Start method success. Shutdown   Add the configuration into the skywalking-rover/configs/rover_configs.yaml. It should same as the config declaration. Register the module into skywalking-rover/pkg/boot/register.go. Add the Unit test or E2E testing for testing the module is works well. Write the documentation under the skywalking-rover/docs/en directory and add it to the documentation index file skywalking-rover/docs/menu.yml.  ","title":"How to write a new module?","url":"/docs/skywalking-rover/v0.6.0/en/guides/contribution/how-to-write-module/"},{"content":"How to write a new plugin? If you want to add a custom plugin in SkyWalking Satellite, the following contents would guide you. Let\u0026rsquo;s use memory-queue as an example of how to write a plugin.\n  Choose the plugin category. As the memory-queue is a queue, the plugin should be written in the skywalking-satellite/plugins/queue directory. So we create a new directory called memory as the plugin codes space.\n  Implement the interface in the skywalking-satellite/plugins/queue/api. Each plugin has 3 common methods, which are Name(), Description(), DefaultConfig().\n Name() returns the unique name in the plugin category. Description() returns the description of the plugin, which would be used to generate the plugin documentation. DefaultConfig() returns the default plugin config with yaml pattern, which would be used as the default value in the plugin struct and to generate the plugin documentation.  type Queue struct { config.CommonFields // config  EventBufferSize int `mapstructure:\u0026#34;event_buffer_size\u0026#34;` // The maximum buffer event size.  // components  buffer *goconcurrentqueue.FixedFIFO } func (q *Queue) Name() string { return Name } func (q *Queue) Description() string { return \u0026#34;this is a memory queue to buffer the input event.\u0026#34; } func (q *Queue) DefaultConfig() string { return ` # The maximum buffer event size. event_buffer_size: 5000   Add unit test.\n  Generate the plugin docs.\n  make gen-docs ","title":"How to write a new plugin?","url":"/docs/skywalking-satellite/latest/en/guides/contribution/how-to-write-plugin/"},{"content":"How to write a new plugin? If you want to add a custom plugin in SkyWalking Satellite, the following contents would guide you. Let\u0026rsquo;s use memory-queue as an example of how to write a plugin.\n  Choose the plugin category. As the memory-queue is a queue, the plugin should be written in the skywalking-satellite/plugins/queue directory. So we create a new directory called memory as the plugin codes space.\n  Implement the interface in the skywalking-satellite/plugins/queue/api. Each plugin has 3 common methods, which are Name(), Description(), DefaultConfig().\n Name() returns the unique name in the plugin category. Description() returns the description of the plugin, which would be used to generate the plugin documentation. DefaultConfig() returns the default plugin config with yaml pattern, which would be used as the default value in the plugin struct and to generate the plugin documentation.  type Queue struct { config.CommonFields // config  EventBufferSize int `mapstructure:\u0026#34;event_buffer_size\u0026#34;` // The maximum buffer event size.  // components  buffer *goconcurrentqueue.FixedFIFO } func (q *Queue) Name() string { return Name } func (q *Queue) Description() string { return \u0026#34;this is a memory queue to buffer the input event.\u0026#34; } func (q *Queue) DefaultConfig() string { return ` # The maximum buffer event size. event_buffer_size: 5000   Add unit test.\n  Generate the plugin docs.\n  make gen-docs ","title":"How to write a new plugin?","url":"/docs/skywalking-satellite/next/en/guides/contribution/how-to-write-plugin/"},{"content":"How to write a new plugin? If you want to add a custom plugin in SkyWalking Satellite, the following contents would guide you. Let\u0026rsquo;s use memory-queue as an example of how to write a plugin.\n  Choose the plugin category. As the memory-queue is a queue, the plugin should be written in the skywalking-satellite/plugins/queue directory. So we create a new directory called memory as the plugin codes space.\n  Implement the interface in the skywalking-satellite/plugins/queue/api. Each plugin has 3 common methods, which are Name(), Description(), DefaultConfig().\n Name() returns the unique name in the plugin category. Description() returns the description of the plugin, which would be used to generate the plugin documentation. DefaultConfig() returns the default plugin config with yaml pattern, which would be used as the default value in the plugin struct and to generate the plugin documentation.  type Queue struct { config.CommonFields // config  EventBufferSize int `mapstructure:\u0026#34;event_buffer_size\u0026#34;` // The maximum buffer event size.  // components  buffer *goconcurrentqueue.FixedFIFO } func (q *Queue) Name() string { return Name } func (q *Queue) Description() string { return \u0026#34;this is a memory queue to buffer the input event.\u0026#34; } func (q *Queue) DefaultConfig() string { return ` # The maximum buffer event size. event_buffer_size: 5000   Add unit test.\n  Generate the plugin docs.\n  make gen-docs ","title":"How to write a new plugin?","url":"/docs/skywalking-satellite/v1.2.0/en/guides/contribution/how-to-write-plugin/"},{"content":"HTTP API Protocol HTTP API Protocol defines the API data format, including API request and response data format. They use the HTTP1.1 wrapper of the official SkyWalking Browser Protocol. Read it for more details.\nPerformance Data Report Detailed information about data format can be found in BrowserPerf.proto.\nPOST http://localhost:12800/browser/perfData Send a performance data object in JSON format.\nInput:\n{ \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;redirectTime\u0026#34;: 10, \u0026#34;dnsTime\u0026#34;: 10, \u0026#34;ttfbTime\u0026#34;: 10, \u0026#34;tcpTime\u0026#34;: 10, \u0026#34;transTime\u0026#34;: 10, \u0026#34;domAnalysisTime\u0026#34;: 10, \u0026#34;fptTime\u0026#34;: 10, \u0026#34;domReadyTime\u0026#34;: 10, \u0026#34;loadPageTime\u0026#34;: 10, \u0026#34;resTime\u0026#34;: 10, \u0026#34;sslTime\u0026#34;: 10, \u0026#34;ttlTime\u0026#34;: 10, \u0026#34;firstPackTime\u0026#34;: 10, \u0026#34;fmpTime\u0026#34;: 10 } OutPut:\nHttp Status: 204\nError Log Report Detailed information about data format can be found in BrowserPerf.proto.\nPOST http://localhost:12800/browser/errorLogs Send an error log object list in JSON format.\nInput:\n[ { \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b01\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; }, { \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b02\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; } ] OutPut:\nHttp Status: 204\nPOST http://localhost:12800/browser/errorLog Send a single error log object in JSON format.\nInput:\n{ \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b01\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; } OutPut:\nHttp Status: 204\n","title":"HTTP API Protocol","url":"/docs/main/latest/en/api/browser-http-api-protocol/"},{"content":"HTTP API Protocol HTTP API Protocol defines the API data format, including API request and response data format. They use the HTTP1.1 wrapper of the official SkyWalking Browser Protocol. Read it for more details.\nPerformance Data Report Detailed information about data format can be found in BrowserPerf.proto.\nPOST http://localhost:12800/browser/perfData Send a performance data object in JSON format.\nInput:\n{ \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;redirectTime\u0026#34;: 10, \u0026#34;dnsTime\u0026#34;: 10, \u0026#34;ttfbTime\u0026#34;: 10, \u0026#34;tcpTime\u0026#34;: 10, \u0026#34;transTime\u0026#34;: 10, \u0026#34;domAnalysisTime\u0026#34;: 10, \u0026#34;fptTime\u0026#34;: 10, \u0026#34;domReadyTime\u0026#34;: 10, \u0026#34;loadPageTime\u0026#34;: 10, \u0026#34;resTime\u0026#34;: 10, \u0026#34;sslTime\u0026#34;: 10, \u0026#34;ttlTime\u0026#34;: 10, \u0026#34;firstPackTime\u0026#34;: 10, \u0026#34;fmpTime\u0026#34;: 10 } OutPut:\nHttp Status: 204\nError Log Report Detailed information about data format can be found in BrowserPerf.proto.\nPOST http://localhost:12800/browser/errorLogs Send an error log object list in JSON format.\nInput:\n[ { \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b01\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; }, { \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b02\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; } ] OutPut:\nHttp Status: 204\nPOST http://localhost:12800/browser/errorLog Send a single error log object in JSON format.\nInput:\n{ \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b01\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; } OutPut:\nHttp Status: 204\n","title":"HTTP API Protocol","url":"/docs/main/next/en/api/browser-http-api-protocol/"},{"content":"HTTP API Protocol HTTP API Protocol defines the API data format, including API request and response data format. They use the HTTP1.1 wrapper of the official SkyWalking Browser Protocol. Read it for more details.\nPerformance Data Report Detailed information about data format can be found in BrowserPerf.proto.\nPOST http://localhost:12800/browser/perfData Send a performance data object in JSON format.\nInput:\n{ \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;redirectTime\u0026#34;: 10, \u0026#34;dnsTime\u0026#34;: 10, \u0026#34;ttfbTime\u0026#34;: 10, \u0026#34;tcpTime\u0026#34;: 10, \u0026#34;transTime\u0026#34;: 10, \u0026#34;domAnalysisTime\u0026#34;: 10, \u0026#34;fptTime\u0026#34;: 10, \u0026#34;domReadyTime\u0026#34;: 10, \u0026#34;loadPageTime\u0026#34;: 10, \u0026#34;resTime\u0026#34;: 10, \u0026#34;sslTime\u0026#34;: 10, \u0026#34;ttlTime\u0026#34;: 10, \u0026#34;firstPackTime\u0026#34;: 10, \u0026#34;fmpTime\u0026#34;: 10 } OutPut:\nHttp Status: 204\nError Log Report Detailed information about data format can be found in BrowserPerf.proto.\nPOST http://localhost:12800/browser/errorLogs Send an error log object list in JSON format.\nInput:\n[ { \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b01\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; }, { \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b02\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; } ] OutPut:\nHttp Status: 204\nPOST http://localhost:12800/browser/errorLog Send a single error log object in JSON format.\nInput:\n{ \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b01\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; } OutPut:\nHttp Status: 204\n","title":"HTTP API Protocol","url":"/docs/main/v9.0.0/en/protocols/browser-http-api-protocol/"},{"content":"HTTP API Protocol HTTP API Protocol defines the API data format, including API request and response data format. They use the HTTP1.1 wrapper of the official SkyWalking Trace Data Protocol v3. Read it for more details.\nInstance Management Detailed information about data format can be found in Instance Management.\n Report service instance properties   POST http://localhost:12800/v3/management/reportProperties\n Input:\n{ \u0026#34;service\u0026#34;: \u0026#34;User Service Name\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User Service Instance Name\u0026#34;, \u0026#34;properties\u0026#34;: [{ \u0026#34;language\u0026#34;: \u0026#34;Lua\u0026#34; }] } Output JSON Array:\n{}  Service instance ping   POST http://localhost:12800/v3/management/keepAlive\n Input:\n{ \u0026#34;service\u0026#34;: \u0026#34;User Service Name\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User Service Instance Name\u0026#34; } OutPut:\n{} Trace Report Detailed information about data format can be found in Instance Management. There are two ways to report segment data: one segment per request or segment array in bulk mode.\nPOST http://localhost:12800/v3/segment Send a single segment object in JSON format.\nInput:\n{ \u0026#34;traceId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34; } OutPut:\nPOST http://localhost:12800/v3/segments Send a segment object list in JSON format.\nInput:\n[{ \u0026#34;traceId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34; }, { \u0026#34;traceId\u0026#34;: \u0026#34;f956699e-5106-4ea3-95e5-da748c55bac1\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577250, \u0026#34;endTime\u0026#34;: 1588664577250, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577250, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577250, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;f956699e-5106-4ea3-95e5-da748c55bac1\u0026#34; }] OutPut:\n","title":"HTTP API Protocol","url":"/docs/main/v9.0.0/en/protocols/http-api-protocol/"},{"content":"HTTP API Protocol HTTP API Protocol defines the API data format, including API request and response data format. They use the HTTP1.1 wrapper of the official SkyWalking Browser Protocol. Read it for more details.\nPerformance Data Report Detailed information about data format can be found in BrowserPerf.proto.\nPOST http://localhost:12800/browser/perfData Send a performance data object in JSON format.\nInput:\n{ \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;redirectTime\u0026#34;: 10, \u0026#34;dnsTime\u0026#34;: 10, \u0026#34;ttfbTime\u0026#34;: 10, \u0026#34;tcpTime\u0026#34;: 10, \u0026#34;transTime\u0026#34;: 10, \u0026#34;domAnalysisTime\u0026#34;: 10, \u0026#34;fptTime\u0026#34;: 10, \u0026#34;domReadyTime\u0026#34;: 10, \u0026#34;loadPageTime\u0026#34;: 10, \u0026#34;resTime\u0026#34;: 10, \u0026#34;sslTime\u0026#34;: 10, \u0026#34;ttlTime\u0026#34;: 10, \u0026#34;firstPackTime\u0026#34;: 10, \u0026#34;fmpTime\u0026#34;: 10 } OutPut:\nHttp Status: 204\nError Log Report Detailed information about data format can be found in BrowserPerf.proto.\nPOST http://localhost:12800/browser/errorLogs Send an error log object list in JSON format.\nInput:\n[ { \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b01\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; }, { \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b02\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; } ] OutPut:\nHttp Status: 204\nPOST http://localhost:12800/browser/errorLog Send a single error log object in JSON format.\nInput:\n{ \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b01\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; } OutPut:\nHttp Status: 204\n","title":"HTTP API Protocol","url":"/docs/main/v9.1.0/en/protocols/browser-http-api-protocol/"},{"content":"HTTP API Protocol HTTP API Protocol defines the API data format, including API request and response data format. They use the HTTP1.1 wrapper of the official SkyWalking Trace Data Protocol v3. Read it for more details.\nInstance Management Detailed information about data format can be found in Instance Management.\n Report service instance properties   POST http://localhost:12800/v3/management/reportProperties\n Input:\n{ \u0026#34;service\u0026#34;: \u0026#34;User Service Name\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User Service Instance Name\u0026#34;, \u0026#34;properties\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;language\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;Lua\u0026#34; } ] } Output JSON Array:\n{}  Service instance ping   POST http://localhost:12800/v3/management/keepAlive\n Input:\n{ \u0026#34;service\u0026#34;: \u0026#34;User Service Name\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User Service Instance Name\u0026#34; } OutPut:\n{} Trace Report Detailed information about data format can be found in Instance Management. There are two ways to report segment data: one segment per request or segment array in bulk mode.\nPOST http://localhost:12800/v3/segment Send a single segment object in JSON format.\nInput:\n{ \u0026#34;traceId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34; } OutPut:\nPOST http://localhost:12800/v3/segments Send a segment object list in JSON format.\nInput:\n[{ \u0026#34;traceId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34; }, { \u0026#34;traceId\u0026#34;: \u0026#34;f956699e-5106-4ea3-95e5-da748c55bac1\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577250, \u0026#34;endTime\u0026#34;: 1588664577250, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577250, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577250, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;f956699e-5106-4ea3-95e5-da748c55bac1\u0026#34; }] OutPut:\n","title":"HTTP API Protocol","url":"/docs/main/v9.1.0/en/protocols/http-api-protocol/"},{"content":"HTTP API Protocol HTTP API Protocol defines the API data format, including API request and response data format. They use the HTTP1.1 wrapper of the official SkyWalking Browser Protocol. Read it for more details.\nPerformance Data Report Detailed information about data format can be found in BrowserPerf.proto.\nPOST http://localhost:12800/browser/perfData Send a performance data object in JSON format.\nInput:\n{ \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;redirectTime\u0026#34;: 10, \u0026#34;dnsTime\u0026#34;: 10, \u0026#34;ttfbTime\u0026#34;: 10, \u0026#34;tcpTime\u0026#34;: 10, \u0026#34;transTime\u0026#34;: 10, \u0026#34;domAnalysisTime\u0026#34;: 10, \u0026#34;fptTime\u0026#34;: 10, \u0026#34;domReadyTime\u0026#34;: 10, \u0026#34;loadPageTime\u0026#34;: 10, \u0026#34;resTime\u0026#34;: 10, \u0026#34;sslTime\u0026#34;: 10, \u0026#34;ttlTime\u0026#34;: 10, \u0026#34;firstPackTime\u0026#34;: 10, \u0026#34;fmpTime\u0026#34;: 10 } OutPut:\nHttp Status: 204\nError Log Report Detailed information about data format can be found in BrowserPerf.proto.\nPOST http://localhost:12800/browser/errorLogs Send an error log object list in JSON format.\nInput:\n[ { \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b01\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; }, { \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b02\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; } ] OutPut:\nHttp Status: 204\nPOST http://localhost:12800/browser/errorLog Send a single error log object in JSON format.\nInput:\n{ \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b01\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; } OutPut:\nHttp Status: 204\n","title":"HTTP API Protocol","url":"/docs/main/v9.2.0/en/protocols/browser-http-api-protocol/"},{"content":"HTTP API Protocol HTTP API Protocol defines the API data format, including API request and response data format. They use the HTTP1.1 wrapper of the official SkyWalking Trace Data Protocol v3. Read it for more details.\nInstance Management Detailed information about data format can be found in Instance Management.\n Report service instance properties   POST http://localhost:12800/v3/management/reportProperties\n Input:\n{ \u0026#34;service\u0026#34;: \u0026#34;User Service Name\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User Service Instance Name\u0026#34;, \u0026#34;properties\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;language\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;Lua\u0026#34; } ] } Output JSON Array:\n{}  Service instance ping   POST http://localhost:12800/v3/management/keepAlive\n Input:\n{ \u0026#34;service\u0026#34;: \u0026#34;User Service Name\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User Service Instance Name\u0026#34; } OutPut:\n{} Trace Report Detailed information about data format can be found in Instance Management. There are two ways to report segment data: one segment per request or segment array in bulk mode.\nPOST http://localhost:12800/v3/segment Send a single segment object in JSON format.\nInput:\n{ \u0026#34;traceId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34; } OutPut:\nPOST http://localhost:12800/v3/segments Send a segment object list in JSON format.\nInput:\n[{ \u0026#34;traceId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34; }, { \u0026#34;traceId\u0026#34;: \u0026#34;f956699e-5106-4ea3-95e5-da748c55bac1\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577250, \u0026#34;endTime\u0026#34;: 1588664577250, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577250, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577250, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;f956699e-5106-4ea3-95e5-da748c55bac1\u0026#34; }] OutPut:\n","title":"HTTP API Protocol","url":"/docs/main/v9.2.0/en/protocols/http-api-protocol/"},{"content":"HTTP API Protocol HTTP API Protocol defines the API data format, including API request and response data format. They use the HTTP1.1 wrapper of the official SkyWalking Browser Protocol. Read it for more details.\nPerformance Data Report Detailed information about data format can be found in BrowserPerf.proto.\nPOST http://localhost:12800/browser/perfData Send a performance data object in JSON format.\nInput:\n{ \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;redirectTime\u0026#34;: 10, \u0026#34;dnsTime\u0026#34;: 10, \u0026#34;ttfbTime\u0026#34;: 10, \u0026#34;tcpTime\u0026#34;: 10, \u0026#34;transTime\u0026#34;: 10, \u0026#34;domAnalysisTime\u0026#34;: 10, \u0026#34;fptTime\u0026#34;: 10, \u0026#34;domReadyTime\u0026#34;: 10, \u0026#34;loadPageTime\u0026#34;: 10, \u0026#34;resTime\u0026#34;: 10, \u0026#34;sslTime\u0026#34;: 10, \u0026#34;ttlTime\u0026#34;: 10, \u0026#34;firstPackTime\u0026#34;: 10, \u0026#34;fmpTime\u0026#34;: 10 } OutPut:\nHttp Status: 204\nError Log Report Detailed information about data format can be found in BrowserPerf.proto.\nPOST http://localhost:12800/browser/errorLogs Send an error log object list in JSON format.\nInput:\n[ { \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b01\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; }, { \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b02\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; } ] OutPut:\nHttp Status: 204\nPOST http://localhost:12800/browser/errorLog Send a single error log object in JSON format.\nInput:\n{ \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b01\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; } OutPut:\nHttp Status: 204\n","title":"HTTP API Protocol","url":"/docs/main/v9.3.0/en/protocols/browser-http-api-protocol/"},{"content":"HTTP API Protocol HTTP API Protocol defines the API data format, including API request and response data format. They use the HTTP1.1 wrapper of the official SkyWalking Trace Data Protocol v3. Read it for more details.\nInstance Management Detailed information about data format can be found in Instance Management.\n Report service instance properties   POST http://localhost:12800/v3/management/reportProperties\n Input:\n{ \u0026#34;service\u0026#34;: \u0026#34;User Service Name\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User Service Instance Name\u0026#34;, \u0026#34;properties\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;language\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;Lua\u0026#34; } ] } Output JSON Array:\n{}  Service instance ping   POST http://localhost:12800/v3/management/keepAlive\n Input:\n{ \u0026#34;service\u0026#34;: \u0026#34;User Service Name\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User Service Instance Name\u0026#34; } OutPut:\n{} Trace Report Detailed information about data format can be found in Instance Management. There are two ways to report segment data: one segment per request or segment array in bulk mode.\nPOST http://localhost:12800/v3/segment Send a single segment object in JSON format.\nInput:\n{ \u0026#34;traceId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34; } OutPut:\nPOST http://localhost:12800/v3/segments Send a segment object list in JSON format.\nInput:\n[{ \u0026#34;traceId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34; }, { \u0026#34;traceId\u0026#34;: \u0026#34;f956699e-5106-4ea3-95e5-da748c55bac1\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577250, \u0026#34;endTime\u0026#34;: 1588664577250, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577250, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577250, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;f956699e-5106-4ea3-95e5-da748c55bac1\u0026#34; }] OutPut:\n","title":"HTTP API Protocol","url":"/docs/main/v9.3.0/en/protocols/http-api-protocol/"},{"content":"HTTP API Protocol HTTP API Protocol defines the API data format, including API request and response data format. They use the HTTP1.1 wrapper of the official SkyWalking Browser Protocol. Read it for more details.\nPerformance Data Report Detailed information about data format can be found in BrowserPerf.proto.\nPOST http://localhost:12800/browser/perfData Send a performance data object in JSON format.\nInput:\n{ \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;redirectTime\u0026#34;: 10, \u0026#34;dnsTime\u0026#34;: 10, \u0026#34;ttfbTime\u0026#34;: 10, \u0026#34;tcpTime\u0026#34;: 10, \u0026#34;transTime\u0026#34;: 10, \u0026#34;domAnalysisTime\u0026#34;: 10, \u0026#34;fptTime\u0026#34;: 10, \u0026#34;domReadyTime\u0026#34;: 10, \u0026#34;loadPageTime\u0026#34;: 10, \u0026#34;resTime\u0026#34;: 10, \u0026#34;sslTime\u0026#34;: 10, \u0026#34;ttlTime\u0026#34;: 10, \u0026#34;firstPackTime\u0026#34;: 10, \u0026#34;fmpTime\u0026#34;: 10 } OutPut:\nHttp Status: 204\nError Log Report Detailed information about data format can be found in BrowserPerf.proto.\nPOST http://localhost:12800/browser/errorLogs Send an error log object list in JSON format.\nInput:\n[ { \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b01\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; }, { \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b02\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; } ] OutPut:\nHttp Status: 204\nPOST http://localhost:12800/browser/errorLog Send a single error log object in JSON format.\nInput:\n{ \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b01\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; } OutPut:\nHttp Status: 204\n","title":"HTTP API Protocol","url":"/docs/main/v9.4.0/en/api/browser-http-api-protocol/"},{"content":"HTTP API Protocol HTTP API Protocol defines the API data format, including API request and response data format. They use the HTTP1.1 wrapper of the official SkyWalking Browser Protocol. Read it for more details.\nPerformance Data Report Detailed information about data format can be found in BrowserPerf.proto.\nPOST http://localhost:12800/browser/perfData Send a performance data object in JSON format.\nInput:\n{ \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;redirectTime\u0026#34;: 10, \u0026#34;dnsTime\u0026#34;: 10, \u0026#34;ttfbTime\u0026#34;: 10, \u0026#34;tcpTime\u0026#34;: 10, \u0026#34;transTime\u0026#34;: 10, \u0026#34;domAnalysisTime\u0026#34;: 10, \u0026#34;fptTime\u0026#34;: 10, \u0026#34;domReadyTime\u0026#34;: 10, \u0026#34;loadPageTime\u0026#34;: 10, \u0026#34;resTime\u0026#34;: 10, \u0026#34;sslTime\u0026#34;: 10, \u0026#34;ttlTime\u0026#34;: 10, \u0026#34;firstPackTime\u0026#34;: 10, \u0026#34;fmpTime\u0026#34;: 10 } OutPut:\nHttp Status: 204\nError Log Report Detailed information about data format can be found in BrowserPerf.proto.\nPOST http://localhost:12800/browser/errorLogs Send an error log object list in JSON format.\nInput:\n[ { \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b01\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; }, { \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b02\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; } ] OutPut:\nHttp Status: 204\nPOST http://localhost:12800/browser/errorLog Send a single error log object in JSON format.\nInput:\n{ \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b01\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; } OutPut:\nHttp Status: 204\n","title":"HTTP API Protocol","url":"/docs/main/v9.5.0/en/api/browser-http-api-protocol/"},{"content":"HTTP API Protocol HTTP API Protocol defines the API data format, including API request and response data format. They use the HTTP1.1 wrapper of the official SkyWalking Browser Protocol. Read it for more details.\nPerformance Data Report Detailed information about data format can be found in BrowserPerf.proto.\nPOST http://localhost:12800/browser/perfData Send a performance data object in JSON format.\nInput:\n{ \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;redirectTime\u0026#34;: 10, \u0026#34;dnsTime\u0026#34;: 10, \u0026#34;ttfbTime\u0026#34;: 10, \u0026#34;tcpTime\u0026#34;: 10, \u0026#34;transTime\u0026#34;: 10, \u0026#34;domAnalysisTime\u0026#34;: 10, \u0026#34;fptTime\u0026#34;: 10, \u0026#34;domReadyTime\u0026#34;: 10, \u0026#34;loadPageTime\u0026#34;: 10, \u0026#34;resTime\u0026#34;: 10, \u0026#34;sslTime\u0026#34;: 10, \u0026#34;ttlTime\u0026#34;: 10, \u0026#34;firstPackTime\u0026#34;: 10, \u0026#34;fmpTime\u0026#34;: 10 } OutPut:\nHttp Status: 204\nError Log Report Detailed information about data format can be found in BrowserPerf.proto.\nPOST http://localhost:12800/browser/errorLogs Send an error log object list in JSON format.\nInput:\n[ { \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b01\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; }, { \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b02\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; } ] OutPut:\nHttp Status: 204\nPOST http://localhost:12800/browser/errorLog Send a single error log object in JSON format.\nInput:\n{ \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b01\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; } OutPut:\nHttp Status: 204\n","title":"HTTP API Protocol","url":"/docs/main/v9.6.0/en/api/browser-http-api-protocol/"},{"content":"HTTP API Protocol HTTP API Protocol defines the API data format, including API request and response data format. They use the HTTP1.1 wrapper of the official SkyWalking Browser Protocol. Read it for more details.\nPerformance Data Report Detailed information about data format can be found in BrowserPerf.proto.\nPOST http://localhost:12800/browser/perfData Send a performance data object in JSON format.\nInput:\n{ \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;redirectTime\u0026#34;: 10, \u0026#34;dnsTime\u0026#34;: 10, \u0026#34;ttfbTime\u0026#34;: 10, \u0026#34;tcpTime\u0026#34;: 10, \u0026#34;transTime\u0026#34;: 10, \u0026#34;domAnalysisTime\u0026#34;: 10, \u0026#34;fptTime\u0026#34;: 10, \u0026#34;domReadyTime\u0026#34;: 10, \u0026#34;loadPageTime\u0026#34;: 10, \u0026#34;resTime\u0026#34;: 10, \u0026#34;sslTime\u0026#34;: 10, \u0026#34;ttlTime\u0026#34;: 10, \u0026#34;firstPackTime\u0026#34;: 10, \u0026#34;fmpTime\u0026#34;: 10 } OutPut:\nHttp Status: 204\nError Log Report Detailed information about data format can be found in BrowserPerf.proto.\nPOST http://localhost:12800/browser/errorLogs Send an error log object list in JSON format.\nInput:\n[ { \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b01\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; }, { \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b02\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; } ] OutPut:\nHttp Status: 204\nPOST http://localhost:12800/browser/errorLog Send a single error log object in JSON format.\nInput:\n{ \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b01\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; } OutPut:\nHttp Status: 204\n","title":"HTTP API Protocol","url":"/docs/main/v9.7.0/en/api/browser-http-api-protocol/"},{"content":"HTTP Restful URI recognition As introduced in the Group Parameterized Endpoints doc, HTTP Restful URIs are identified as endpoints. With some additional rules, we can identify the parameters in the URI and group the endpoints in case of annoying and huge size of endpoint candidates with low value of the metrics.\nIn the ML/AI specific fields, decision trees or neural networks can be trained on labeled URI data to automatically recognize and classify different URI patterns, as well as many other ways.\nIn this pipeline, OAP has the capabilities to cache the URI candidates with occurrence count, and push the data to 3rd party for further analysis. Then OAP would pull the analyzed results for processing the further telemetry traffic.\nSet up OAP to connect remote URI recognition server uriRecognitionServerAddr and uriRecognitionServerPort are the configurations to set up the remote URI recognition server.\nThe URI recognition server is a gRPC server, which is defined in URIRecognition.proto.\nservice HttpUriRecognitionService { // Sync for the pattern recognition dictionary.  rpc fetchAllPatterns(HttpUriRecognitionSyncRequest) returns (HttpUriRecognitionResponse) {} // Feed new raw data and matched patterns to the AI-server.  rpc feedRawData(HttpUriRecognitionRequest) returns (google.protobuf.Empty) {}} fetchAllPatterns service  fetchAllPatterns is up and running in 1 minute period from every OAP to fetch all recognized patterns from the remote server.\n feedRawData service  feedRawData is running in 25-30 minutes period to push the raw data to the remote server for training.\nConfigurations  core/maxHttpUrisNumberPerService The max number of HTTP URIs per service for further URI pattern recognition. core/syncPeriodHttpUriRecognitionPattern The period of HTTP URI pattern recognition(feedRawData). Unit is second, 10s by default. core/trainingPeriodHttpUriRecognitionPattern The training period of HTTP URI pattern recognition(fetchAllPatterns). Unit is second, 60s by default.  Optional Server Implementation R3 RESTful Pattern Recognition(R3) is an Apache 2.0 licensed implementation for the URI recognition, and natively supports URIRecognition.proto defined in OAP.\n","title":"HTTP Restful URI recognition","url":"/docs/main/latest/en/setup/ai-pipeline/http-restful-uri-pattern/"},{"content":"HTTP Restful URI recognition As introduced in the Group Parameterized Endpoints doc, HTTP Restful URIs are identified as endpoints. With some additional rules, we can identify the parameters in the URI and group the endpoints in case of annoying and huge size of endpoint candidates with low value of the metrics.\nIn the ML/AI specific fields, decision trees or neural networks can be trained on labeled URI data to automatically recognize and classify different URI patterns, as well as many other ways.\nIn this pipeline, OAP has the capabilities to cache the URI candidates with occurrence count, and push the data to 3rd party for further analysis. Then OAP would pull the analyzed results for processing the further telemetry traffic.\nSet up OAP to connect remote URI recognition server uriRecognitionServerAddr and uriRecognitionServerPort are the configurations to set up the remote URI recognition server.\nThe URI recognition server is a gRPC server, which is defined in URIRecognition.proto.\nservice HttpUriRecognitionService { // Sync for the pattern recognition dictionary.  rpc fetchAllPatterns(HttpUriRecognitionSyncRequest) returns (HttpUriRecognitionResponse) {} // Feed new raw data and matched patterns to the AI-server.  rpc feedRawData(HttpUriRecognitionRequest) returns (google.protobuf.Empty) {}} fetchAllPatterns service  fetchAllPatterns is up and running in 1 minute period from every OAP to fetch all recognized patterns from the remote server.\n feedRawData service  feedRawData is running in 25-30 minutes period to push the raw data to the remote server for training.\nConfigurations  core/maxHttpUrisNumberPerService The max number of HTTP URIs per service for further URI pattern recognition. core/syncPeriodHttpUriRecognitionPattern The period of HTTP URI pattern recognition(feedRawData). Unit is second, 10s by default. core/trainingPeriodHttpUriRecognitionPattern The training period of HTTP URI pattern recognition(fetchAllPatterns). Unit is second, 60s by default.  Optional Server Implementation R3 RESTful Pattern Recognition(R3) is an Apache 2.0 licensed implementation for the URI recognition, and natively supports URIRecognition.proto defined in OAP.\n","title":"HTTP Restful URI recognition","url":"/docs/main/next/en/setup/ai-pipeline/http-restful-uri-pattern/"},{"content":"HTTP Restful URI recognition As introduced in the Group Parameterized Endpoints doc, HTTP Restful URIs are identified as endpoints. With some additional rules, we can identify the parameters in the URI and group the endpoints in case of annoying and huge size of endpoint candidates with low value of the metrics.\nIn the ML/AI specific fields, decision trees or neural networks can be trained on labeled URI data to automatically recognize and classify different URI patterns, as well as many other ways.\nIn this pipeline, OAP has the capabilities to cache the URI candidates with occurrence count, and push the data to 3rd party for further analysis. Then OAP would pull the analyzed results for processing the further telemetry traffic.\nSet up OAP to connect remote URI recognition server uriRecognitionServerAddr and uriRecognitionServerPort are the configurations to set up the remote URI recognition server.\nThe URI recognition server is a gRPC server, which is defined in URIRecognition.proto.\nservice HttpUriRecognitionService { // Sync for the pattern recognition dictionary.  rpc fetchAllPatterns(HttpUriRecognitionSyncRequest) returns (HttpUriRecognitionResponse) {} // Feed new raw data and matched patterns to the AI-server.  rpc feedRawData(HttpUriRecognitionRequest) returns (google.protobuf.Empty) {}} fetchAllPatterns service  fetchAllPatterns is up and running in 1 minute period from every OAP to fetch all recognized patterns from the remote server.\n feedRawData service  feedRawData is running in 25-30 minutes period to push the raw data to the remote server for training.\nConfigurations  core/maxHttpUrisNumberPerService The max number of HTTP URIs per service for further URI pattern recognition. No configuration to set periods of feedRawData and fetchAllPatterns services.  Optional Server Implementation R3 RESTful Pattern Recognition(R3) is an Apache 2.0 licensed implementation for the URI recognition, and natively supports URIRecognition.proto defined in OAP.\n","title":"HTTP Restful URI recognition","url":"/docs/main/v9.5.0/en/setup/ai-pipeline/http-restful-uri-pattern/"},{"content":"HTTP Restful URI recognition As introduced in the Group Parameterized Endpoints doc, HTTP Restful URIs are identified as endpoints. With some additional rules, we can identify the parameters in the URI and group the endpoints in case of annoying and huge size of endpoint candidates with low value of the metrics.\nIn the ML/AI specific fields, decision trees or neural networks can be trained on labeled URI data to automatically recognize and classify different URI patterns, as well as many other ways.\nIn this pipeline, OAP has the capabilities to cache the URI candidates with occurrence count, and push the data to 3rd party for further analysis. Then OAP would pull the analyzed results for processing the further telemetry traffic.\nSet up OAP to connect remote URI recognition server uriRecognitionServerAddr and uriRecognitionServerPort are the configurations to set up the remote URI recognition server.\nThe URI recognition server is a gRPC server, which is defined in URIRecognition.proto.\nservice HttpUriRecognitionService { // Sync for the pattern recognition dictionary.  rpc fetchAllPatterns(HttpUriRecognitionSyncRequest) returns (HttpUriRecognitionResponse) {} // Feed new raw data and matched patterns to the AI-server.  rpc feedRawData(HttpUriRecognitionRequest) returns (google.protobuf.Empty) {}} fetchAllPatterns service  fetchAllPatterns is up and running in 1 minute period from every OAP to fetch all recognized patterns from the remote server.\n feedRawData service  feedRawData is running in 25-30 minutes period to push the raw data to the remote server for training.\nConfigurations  core/maxHttpUrisNumberPerService The max number of HTTP URIs per service for further URI pattern recognition. core/syncPeriodHttpUriRecognitionPattern The period of HTTP URI pattern recognition(feedRawData). Unit is second, 10s by default. core/trainingPeriodHttpUriRecognitionPattern The training period of HTTP URI pattern recognition(fetchAllPatterns). Unit is second, 60s by default.  Optional Server Implementation R3 RESTful Pattern Recognition(R3) is an Apache 2.0 licensed implementation for the URI recognition, and natively supports URIRecognition.proto defined in OAP.\n","title":"HTTP Restful URI recognition","url":"/docs/main/v9.6.0/en/setup/ai-pipeline/http-restful-uri-pattern/"},{"content":"HTTP Restful URI recognition As introduced in the Group Parameterized Endpoints doc, HTTP Restful URIs are identified as endpoints. With some additional rules, we can identify the parameters in the URI and group the endpoints in case of annoying and huge size of endpoint candidates with low value of the metrics.\nIn the ML/AI specific fields, decision trees or neural networks can be trained on labeled URI data to automatically recognize and classify different URI patterns, as well as many other ways.\nIn this pipeline, OAP has the capabilities to cache the URI candidates with occurrence count, and push the data to 3rd party for further analysis. Then OAP would pull the analyzed results for processing the further telemetry traffic.\nSet up OAP to connect remote URI recognition server uriRecognitionServerAddr and uriRecognitionServerPort are the configurations to set up the remote URI recognition server.\nThe URI recognition server is a gRPC server, which is defined in URIRecognition.proto.\nservice HttpUriRecognitionService { // Sync for the pattern recognition dictionary.  rpc fetchAllPatterns(HttpUriRecognitionSyncRequest) returns (HttpUriRecognitionResponse) {} // Feed new raw data and matched patterns to the AI-server.  rpc feedRawData(HttpUriRecognitionRequest) returns (google.protobuf.Empty) {}} fetchAllPatterns service  fetchAllPatterns is up and running in 1 minute period from every OAP to fetch all recognized patterns from the remote server.\n feedRawData service  feedRawData is running in 25-30 minutes period to push the raw data to the remote server for training.\nConfigurations  core/maxHttpUrisNumberPerService The max number of HTTP URIs per service for further URI pattern recognition. core/syncPeriodHttpUriRecognitionPattern The period of HTTP URI pattern recognition(feedRawData). Unit is second, 10s by default. core/trainingPeriodHttpUriRecognitionPattern The training period of HTTP URI pattern recognition(fetchAllPatterns). Unit is second, 60s by default.  Optional Server Implementation R3 RESTful Pattern Recognition(R3) is an Apache 2.0 licensed implementation for the URI recognition, and natively supports URIRecognition.proto defined in OAP.\n","title":"HTTP Restful URI recognition","url":"/docs/main/v9.7.0/en/setup/ai-pipeline/http-restful-uri-pattern/"},{"content":"Hybrid Compilation Hybrid compilation technology is the base of SkyWalking Go\u0026rsquo;s implementation.\nIt utilizes the -toolexec flag during Golang compilation to introduce custom programs that intercept all original files in the compilation stage. This allows for the modification or addition of files to be completed seamlessly.\nToolchain in Golang The -toolexec flag in Golang is a powerful feature that can be used during stages such as build, test, and others. When this flag is used, developers can provide a custom program or script to replace the default go tools functionality. This offers greater flexibility and control over the build, test, or analysis processes.\nWhen passing this flag during a go build, it can intercept the execution flow of commands such as compile, asm, and link, which are required during Golang\u0026rsquo;s compilation process. These commands are also referred to as the toolchain within Golang.\nInformation about the Toolchain The following command demonstrates the parameter information for the specified -toolexec program when it is invoked:\n/usr/bin/skywalking-go /usr/local/opt/go/libexec/pkg/tool/darwin_amd64/compile -o /var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build452071603/b011/_pkg_.a -trimpath /var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build452071603/b011=\u0026gt; -p runtime -std -+ -buildid zSeDyjJh0lgXlIqBZScI/zSeDyjJh0lgXlIqBZScI -goversion go1.19.2 -symabis /var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build452071603/b011/symabis -c=4 -nolocalimports -importcfg /var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build452071603/b011/importcfg -pack -asmhdr /var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build452071603/b011/go_asm.h /usr/local/opt/go/libexec/src/runtime/alg.go /usr/local/opt/go/libexec/src/runtime/asan0.go ... The code above demonstrates the parameters used when a custom program is executed, which mainly includes the following information:\n Current toolchain tool: In this example, it is a compilation tool with the path: /usr/local/opt/go/libexec/pkg/tool/darwin_amd64/compile. Target file of the tool: The final target file that the current tool needs to generate. Package information: The module package path information being compiled, which is the parameter value of the -p flag. The current package path is runtime. Temporary directory address: For each compilation, the Go program would generate a corresponding temporary directory. This directory contains all the temporary files required for the compilation. Files to be compiled: Many .go file paths can be seen at the end of the command, which are the file path list of the module that needs to be compiled.  Toolchain with SkyWalking Go Agent SkyWalking Go Agent works by intercepting the compile program through the toolchain and making changes to the program based on the information above. The main parts include:\n AST: Using AST to parse and manipulate the codes. File copying/generation: Copy or generate files to the temporary directory required for the compilation, and add file path addresses when the compilation command is executed. Proxy command execution: After completing the modification of the specified package, the new codes are weaved into the target.  Hybrid Compilation After enhancing the program with SkyWalking Go Agent, the following parts of the program will be enhanced:\n SkyWalking Go: The agent core part of the code would be dynamically copied to the agent path for plugin use. Plugins: Enhance the specified framework code according to the enhancement rules of the plugins. Runtime: Enhance the runtime package in Go, including extensions for goroutines and other content. Main: Enhance the main package during system startup, for stating the system with Agent.  ","title":"Hybrid Compilation","url":"/docs/skywalking-go/latest/en/concepts-and-designs/hybrid-compilation/"},{"content":"Hybrid Compilation Hybrid compilation technology is the base of SkyWalking Go\u0026rsquo;s implementation.\nIt utilizes the -toolexec flag during Golang compilation to introduce custom programs that intercept all original files in the compilation stage. This allows for the modification or addition of files to be completed seamlessly.\nToolchain in Golang The -toolexec flag in Golang is a powerful feature that can be used during stages such as build, test, and others. When this flag is used, developers can provide a custom program or script to replace the default go tools functionality. This offers greater flexibility and control over the build, test, or analysis processes.\nWhen passing this flag during a go build, it can intercept the execution flow of commands such as compile, asm, and link, which are required during Golang\u0026rsquo;s compilation process. These commands are also referred to as the toolchain within Golang.\nInformation about the Toolchain The following command demonstrates the parameter information for the specified -toolexec program when it is invoked:\n/usr/bin/skywalking-go /usr/local/opt/go/libexec/pkg/tool/darwin_amd64/compile -o /var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build452071603/b011/_pkg_.a -trimpath /var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build452071603/b011=\u0026gt; -p runtime -std -+ -buildid zSeDyjJh0lgXlIqBZScI/zSeDyjJh0lgXlIqBZScI -goversion go1.19.2 -symabis /var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build452071603/b011/symabis -c=4 -nolocalimports -importcfg /var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build452071603/b011/importcfg -pack -asmhdr /var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build452071603/b011/go_asm.h /usr/local/opt/go/libexec/src/runtime/alg.go /usr/local/opt/go/libexec/src/runtime/asan0.go ... The code above demonstrates the parameters used when a custom program is executed, which mainly includes the following information:\n Current toolchain tool: In this example, it is a compilation tool with the path: /usr/local/opt/go/libexec/pkg/tool/darwin_amd64/compile. Target file of the tool: The final target file that the current tool needs to generate. Package information: The module package path information being compiled, which is the parameter value of the -p flag. The current package path is runtime. Temporary directory address: For each compilation, the Go program would generate a corresponding temporary directory. This directory contains all the temporary files required for the compilation. Files to be compiled: Many .go file paths can be seen at the end of the command, which are the file path list of the module that needs to be compiled.  Toolchain with SkyWalking Go Agent SkyWalking Go Agent works by intercepting the compile program through the toolchain and making changes to the program based on the information above. The main parts include:\n AST: Using AST to parse and manipulate the codes. File copying/generation: Copy or generate files to the temporary directory required for the compilation, and add file path addresses when the compilation command is executed. Proxy command execution: After completing the modification of the specified package, the new codes are weaved into the target.  Hybrid Compilation After enhancing the program with SkyWalking Go Agent, the following parts of the program will be enhanced:\n SkyWalking Go: The agent core part of the code would be dynamically copied to the agent path for plugin use. Plugins: Enhance the specified framework code according to the enhancement rules of the plugins. Runtime: Enhance the runtime package in Go, including extensions for goroutines and other content. Main: Enhance the main package during system startup, for stating the system with Agent.  ","title":"Hybrid Compilation","url":"/docs/skywalking-go/next/en/concepts-and-designs/hybrid-compilation/"},{"content":"Hybrid Compilation Hybrid compilation technology is the base of SkyWalking Go\u0026rsquo;s implementation.\nIt utilizes the -toolexec flag during Golang compilation to introduce custom programs that intercept all original files in the compilation stage. This allows for the modification or addition of files to be completed seamlessly.\nToolchain in Golang The -toolexec flag in Golang is a powerful feature that can be used during stages such as build, test, and others. When this flag is used, developers can provide a custom program or script to replace the default go tools functionality. This offers greater flexibility and control over the build, test, or analysis processes.\nWhen passing this flag during a go build, it can intercept the execution flow of commands such as compile, asm, and link, which are required during Golang\u0026rsquo;s compilation process. These commands are also referred to as the toolchain within Golang.\nInformation about the Toolchain The following command demonstrates the parameter information for the specified -toolexec program when it is invoked:\n/usr/bin/skywalking-go /usr/local/opt/go/libexec/pkg/tool/darwin_amd64/compile -o /var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build452071603/b011/_pkg_.a -trimpath /var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build452071603/b011=\u0026gt; -p runtime -std -+ -buildid zSeDyjJh0lgXlIqBZScI/zSeDyjJh0lgXlIqBZScI -goversion go1.19.2 -symabis /var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build452071603/b011/symabis -c=4 -nolocalimports -importcfg /var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build452071603/b011/importcfg -pack -asmhdr /var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build452071603/b011/go_asm.h /usr/local/opt/go/libexec/src/runtime/alg.go /usr/local/opt/go/libexec/src/runtime/asan0.go ... The code above demonstrates the parameters used when a custom program is executed, which mainly includes the following information:\n Current toolchain tool: In this example, it is a compilation tool with the path: /usr/local/opt/go/libexec/pkg/tool/darwin_amd64/compile. Target file of the tool: The final target file that the current tool needs to generate. Package information: The module package path information being compiled, which is the parameter value of the -p flag. The current package path is runtime. Temporary directory address: For each compilation, the Go program would generate a corresponding temporary directory. This directory contains all the temporary files required for the compilation. Files to be compiled: Many .go file paths can be seen at the end of the command, which are the file path list of the module that needs to be compiled.  Toolchain with SkyWalking Go Agent SkyWalking Go Agent works by intercepting the compile program through the toolchain and making changes to the program based on the information above. The main parts include:\n AST: Using AST to parse and manipulate the codes. File copying/generation: Copy or generate files to the temporary directory required for the compilation, and add file path addresses when the compilation command is executed. Proxy command execution: After completing the modification of the specified package, the new codes are weaved into the target.  Hybrid Compilation After enhancing the program with SkyWalking Go Agent, the following parts of the program will be enhanced:\n SkyWalking Go: The agent core part of the code would be dynamically copied to the agent path for plugin use. Plugins: Enhance the specified framework code according to the enhancement rules of the plugins. Runtime: Enhance the runtime package in Go, including extensions for goroutines and other content. Main: Enhance the main package during system startup, for stating the system with Agent.  ","title":"Hybrid Compilation","url":"/docs/skywalking-go/v0.4.0/en/concepts-and-designs/hybrid-compilation/"},{"content":"IllegalStateException when installing Java agent on WebSphere This issue was found in our community discussion and feedback. A user installed the SkyWalking Java agent on WebSphere 7.0.0.11 and ibm jdk 1.8_20160719 and 1.7.0_20150407, and experienced the following error logs:\nWARN 2019-05-09 17:01:35:905 SkywalkingAgent-1-GRPCChannelManager-0 ProtectiveShieldMatcher : Byte-buddy occurs exception when match type. java.lang.IllegalStateException: Cannot resolve type description for java.security.PrivilegedAction at org.apache.skywalking.apm.dependencies.net.bytebuddy.pool.TypePool$Resolution$Illegal.resolve(TypePool.java:144) at org.apache.skywalking.apm.dependencies.net.bytebuddy.pool.TypePool$Default$WithLazyResolution$LazyTypeDescription.delegate(TypePool.java:1392) at org.apache.skywalking.apm.dependencies.net.bytebuddy.description.type.TypeDescription$AbstractBase$OfSimpleType$WithDelegation.getInterfaces(TypeDescription.java:8016) at org.apache.skywalking.apm.dependencies.net.bytebuddy.description.type.TypeDescription$Generic$OfNonGenericType.getInterfaces(TypeDescription.java:3621) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.hasInterface(HasSuperTypeMatcher.java:53) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.hasInterface(HasSuperTypeMatcher.java:54) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.matches(HasSuperTypeMatcher.java:38) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.matches(HasSuperTypeMatcher.java:15) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Conjunction.matches(ElementMatcher.java:107) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) ... The exception occurred because access grant was required in WebSphere. Simply follow these steps:\n Set the agent\u0026rsquo;s owner to the owner of WebSphere. Add \u0026ldquo;grant codeBase \u0026ldquo;file:${agent_dir}/-\u0026rdquo; { permission java.security.AllPermission; };\u0026rdquo; in the file of \u0026ldquo;server.policy\u0026rdquo;.  ","title":"IllegalStateException when installing Java agent on WebSphere","url":"/docs/main/latest/en/faq/install_agent_on_websphere/"},{"content":"IllegalStateException when installing Java agent on WebSphere This issue was found in our community discussion and feedback. A user installed the SkyWalking Java agent on WebSphere 7.0.0.11 and ibm jdk 1.8_20160719 and 1.7.0_20150407, and experienced the following error logs:\nWARN 2019-05-09 17:01:35:905 SkywalkingAgent-1-GRPCChannelManager-0 ProtectiveShieldMatcher : Byte-buddy occurs exception when match type. java.lang.IllegalStateException: Cannot resolve type description for java.security.PrivilegedAction at org.apache.skywalking.apm.dependencies.net.bytebuddy.pool.TypePool$Resolution$Illegal.resolve(TypePool.java:144) at org.apache.skywalking.apm.dependencies.net.bytebuddy.pool.TypePool$Default$WithLazyResolution$LazyTypeDescription.delegate(TypePool.java:1392) at org.apache.skywalking.apm.dependencies.net.bytebuddy.description.type.TypeDescription$AbstractBase$OfSimpleType$WithDelegation.getInterfaces(TypeDescription.java:8016) at org.apache.skywalking.apm.dependencies.net.bytebuddy.description.type.TypeDescription$Generic$OfNonGenericType.getInterfaces(TypeDescription.java:3621) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.hasInterface(HasSuperTypeMatcher.java:53) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.hasInterface(HasSuperTypeMatcher.java:54) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.matches(HasSuperTypeMatcher.java:38) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.matches(HasSuperTypeMatcher.java:15) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Conjunction.matches(ElementMatcher.java:107) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) ... The exception occurred because access grant was required in WebSphere. Simply follow these steps:\n Set the agent\u0026rsquo;s owner to the owner of WebSphere. Add \u0026ldquo;grant codeBase \u0026ldquo;file:${agent_dir}/-\u0026rdquo; { permission java.security.AllPermission; };\u0026rdquo; in the file of \u0026ldquo;server.policy\u0026rdquo;.  ","title":"IllegalStateException when installing Java agent on WebSphere","url":"/docs/main/next/en/faq/install_agent_on_websphere/"},{"content":"IllegalStateException when installing Java agent on WebSphere This issue was found in our community discussion and feedback. A user installed the SkyWalking Java agent on WebSphere 7.0.0.11 and ibm jdk 1.8_20160719 and 1.7.0_20150407, and experienced the following error logs:\nWARN 2019-05-09 17:01:35:905 SkywalkingAgent-1-GRPCChannelManager-0 ProtectiveShieldMatcher : Byte-buddy occurs exception when match type. java.lang.IllegalStateException: Cannot resolve type description for java.security.PrivilegedAction at org.apache.skywalking.apm.dependencies.net.bytebuddy.pool.TypePool$Resolution$Illegal.resolve(TypePool.java:144) at org.apache.skywalking.apm.dependencies.net.bytebuddy.pool.TypePool$Default$WithLazyResolution$LazyTypeDescription.delegate(TypePool.java:1392) at org.apache.skywalking.apm.dependencies.net.bytebuddy.description.type.TypeDescription$AbstractBase$OfSimpleType$WithDelegation.getInterfaces(TypeDescription.java:8016) at org.apache.skywalking.apm.dependencies.net.bytebuddy.description.type.TypeDescription$Generic$OfNonGenericType.getInterfaces(TypeDescription.java:3621) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.hasInterface(HasSuperTypeMatcher.java:53) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.hasInterface(HasSuperTypeMatcher.java:54) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.matches(HasSuperTypeMatcher.java:38) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.matches(HasSuperTypeMatcher.java:15) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Conjunction.matches(ElementMatcher.java:107) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) ... The exception occured because access grant was required in WebSphere. Simply follow these steps:\n Set the agent\u0026rsquo;s owner to the owner of WebSphere. Add \u0026ldquo;grant codeBase \u0026ldquo;file:${agent_dir}/-\u0026rdquo; { permission java.security.AllPermission; };\u0026rdquo; in the file of \u0026ldquo;server.policy\u0026rdquo;.  ","title":"IllegalStateException when installing Java agent on WebSphere","url":"/docs/main/v9.0.0/en/faq/install_agent_on_websphere/"},{"content":"IllegalStateException when installing Java agent on WebSphere This issue was found in our community discussion and feedback. A user installed the SkyWalking Java agent on WebSphere 7.0.0.11 and ibm jdk 1.8_20160719 and 1.7.0_20150407, and experienced the following error logs:\nWARN 2019-05-09 17:01:35:905 SkywalkingAgent-1-GRPCChannelManager-0 ProtectiveShieldMatcher : Byte-buddy occurs exception when match type. java.lang.IllegalStateException: Cannot resolve type description for java.security.PrivilegedAction at org.apache.skywalking.apm.dependencies.net.bytebuddy.pool.TypePool$Resolution$Illegal.resolve(TypePool.java:144) at org.apache.skywalking.apm.dependencies.net.bytebuddy.pool.TypePool$Default$WithLazyResolution$LazyTypeDescription.delegate(TypePool.java:1392) at org.apache.skywalking.apm.dependencies.net.bytebuddy.description.type.TypeDescription$AbstractBase$OfSimpleType$WithDelegation.getInterfaces(TypeDescription.java:8016) at org.apache.skywalking.apm.dependencies.net.bytebuddy.description.type.TypeDescription$Generic$OfNonGenericType.getInterfaces(TypeDescription.java:3621) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.hasInterface(HasSuperTypeMatcher.java:53) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.hasInterface(HasSuperTypeMatcher.java:54) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.matches(HasSuperTypeMatcher.java:38) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.matches(HasSuperTypeMatcher.java:15) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Conjunction.matches(ElementMatcher.java:107) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) ... The exception occured because access grant was required in WebSphere. Simply follow these steps:\n Set the agent\u0026rsquo;s owner to the owner of WebSphere. Add \u0026ldquo;grant codeBase \u0026ldquo;file:${agent_dir}/-\u0026rdquo; { permission java.security.AllPermission; };\u0026rdquo; in the file of \u0026ldquo;server.policy\u0026rdquo;.  ","title":"IllegalStateException when installing Java agent on WebSphere","url":"/docs/main/v9.1.0/en/faq/install_agent_on_websphere/"},{"content":"IllegalStateException when installing Java agent on WebSphere This issue was found in our community discussion and feedback. A user installed the SkyWalking Java agent on WebSphere 7.0.0.11 and ibm jdk 1.8_20160719 and 1.7.0_20150407, and experienced the following error logs:\nWARN 2019-05-09 17:01:35:905 SkywalkingAgent-1-GRPCChannelManager-0 ProtectiveShieldMatcher : Byte-buddy occurs exception when match type. java.lang.IllegalStateException: Cannot resolve type description for java.security.PrivilegedAction at org.apache.skywalking.apm.dependencies.net.bytebuddy.pool.TypePool$Resolution$Illegal.resolve(TypePool.java:144) at org.apache.skywalking.apm.dependencies.net.bytebuddy.pool.TypePool$Default$WithLazyResolution$LazyTypeDescription.delegate(TypePool.java:1392) at org.apache.skywalking.apm.dependencies.net.bytebuddy.description.type.TypeDescription$AbstractBase$OfSimpleType$WithDelegation.getInterfaces(TypeDescription.java:8016) at org.apache.skywalking.apm.dependencies.net.bytebuddy.description.type.TypeDescription$Generic$OfNonGenericType.getInterfaces(TypeDescription.java:3621) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.hasInterface(HasSuperTypeMatcher.java:53) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.hasInterface(HasSuperTypeMatcher.java:54) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.matches(HasSuperTypeMatcher.java:38) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.matches(HasSuperTypeMatcher.java:15) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Conjunction.matches(ElementMatcher.java:107) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) ... The exception occurred because access grant was required in WebSphere. Simply follow these steps:\n Set the agent\u0026rsquo;s owner to the owner of WebSphere. Add \u0026ldquo;grant codeBase \u0026ldquo;file:${agent_dir}/-\u0026rdquo; { permission java.security.AllPermission; };\u0026rdquo; in the file of \u0026ldquo;server.policy\u0026rdquo;.  ","title":"IllegalStateException when installing Java agent on WebSphere","url":"/docs/main/v9.2.0/en/faq/install_agent_on_websphere/"},{"content":"IllegalStateException when installing Java agent on WebSphere This issue was found in our community discussion and feedback. A user installed the SkyWalking Java agent on WebSphere 7.0.0.11 and ibm jdk 1.8_20160719 and 1.7.0_20150407, and experienced the following error logs:\nWARN 2019-05-09 17:01:35:905 SkywalkingAgent-1-GRPCChannelManager-0 ProtectiveShieldMatcher : Byte-buddy occurs exception when match type. java.lang.IllegalStateException: Cannot resolve type description for java.security.PrivilegedAction at org.apache.skywalking.apm.dependencies.net.bytebuddy.pool.TypePool$Resolution$Illegal.resolve(TypePool.java:144) at org.apache.skywalking.apm.dependencies.net.bytebuddy.pool.TypePool$Default$WithLazyResolution$LazyTypeDescription.delegate(TypePool.java:1392) at org.apache.skywalking.apm.dependencies.net.bytebuddy.description.type.TypeDescription$AbstractBase$OfSimpleType$WithDelegation.getInterfaces(TypeDescription.java:8016) at org.apache.skywalking.apm.dependencies.net.bytebuddy.description.type.TypeDescription$Generic$OfNonGenericType.getInterfaces(TypeDescription.java:3621) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.hasInterface(HasSuperTypeMatcher.java:53) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.hasInterface(HasSuperTypeMatcher.java:54) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.matches(HasSuperTypeMatcher.java:38) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.matches(HasSuperTypeMatcher.java:15) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Conjunction.matches(ElementMatcher.java:107) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) ... The exception occurred because access grant was required in WebSphere. Simply follow these steps:\n Set the agent\u0026rsquo;s owner to the owner of WebSphere. Add \u0026ldquo;grant codeBase \u0026ldquo;file:${agent_dir}/-\u0026rdquo; { permission java.security.AllPermission; };\u0026rdquo; in the file of \u0026ldquo;server.policy\u0026rdquo;.  ","title":"IllegalStateException when installing Java agent on WebSphere","url":"/docs/main/v9.3.0/en/faq/install_agent_on_websphere/"},{"content":"IllegalStateException when installing Java agent on WebSphere This issue was found in our community discussion and feedback. A user installed the SkyWalking Java agent on WebSphere 7.0.0.11 and ibm jdk 1.8_20160719 and 1.7.0_20150407, and experienced the following error logs:\nWARN 2019-05-09 17:01:35:905 SkywalkingAgent-1-GRPCChannelManager-0 ProtectiveShieldMatcher : Byte-buddy occurs exception when match type. java.lang.IllegalStateException: Cannot resolve type description for java.security.PrivilegedAction at org.apache.skywalking.apm.dependencies.net.bytebuddy.pool.TypePool$Resolution$Illegal.resolve(TypePool.java:144) at org.apache.skywalking.apm.dependencies.net.bytebuddy.pool.TypePool$Default$WithLazyResolution$LazyTypeDescription.delegate(TypePool.java:1392) at org.apache.skywalking.apm.dependencies.net.bytebuddy.description.type.TypeDescription$AbstractBase$OfSimpleType$WithDelegation.getInterfaces(TypeDescription.java:8016) at org.apache.skywalking.apm.dependencies.net.bytebuddy.description.type.TypeDescription$Generic$OfNonGenericType.getInterfaces(TypeDescription.java:3621) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.hasInterface(HasSuperTypeMatcher.java:53) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.hasInterface(HasSuperTypeMatcher.java:54) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.matches(HasSuperTypeMatcher.java:38) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.matches(HasSuperTypeMatcher.java:15) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Conjunction.matches(ElementMatcher.java:107) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) ... The exception occurred because access grant was required in WebSphere. Simply follow these steps:\n Set the agent\u0026rsquo;s owner to the owner of WebSphere. Add \u0026ldquo;grant codeBase \u0026ldquo;file:${agent_dir}/-\u0026rdquo; { permission java.security.AllPermission; };\u0026rdquo; in the file of \u0026ldquo;server.policy\u0026rdquo;.  ","title":"IllegalStateException when installing Java agent on WebSphere","url":"/docs/main/v9.4.0/en/faq/install_agent_on_websphere/"},{"content":"IllegalStateException when installing Java agent on WebSphere This issue was found in our community discussion and feedback. A user installed the SkyWalking Java agent on WebSphere 7.0.0.11 and ibm jdk 1.8_20160719 and 1.7.0_20150407, and experienced the following error logs:\nWARN 2019-05-09 17:01:35:905 SkywalkingAgent-1-GRPCChannelManager-0 ProtectiveShieldMatcher : Byte-buddy occurs exception when match type. java.lang.IllegalStateException: Cannot resolve type description for java.security.PrivilegedAction at org.apache.skywalking.apm.dependencies.net.bytebuddy.pool.TypePool$Resolution$Illegal.resolve(TypePool.java:144) at org.apache.skywalking.apm.dependencies.net.bytebuddy.pool.TypePool$Default$WithLazyResolution$LazyTypeDescription.delegate(TypePool.java:1392) at org.apache.skywalking.apm.dependencies.net.bytebuddy.description.type.TypeDescription$AbstractBase$OfSimpleType$WithDelegation.getInterfaces(TypeDescription.java:8016) at org.apache.skywalking.apm.dependencies.net.bytebuddy.description.type.TypeDescription$Generic$OfNonGenericType.getInterfaces(TypeDescription.java:3621) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.hasInterface(HasSuperTypeMatcher.java:53) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.hasInterface(HasSuperTypeMatcher.java:54) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.matches(HasSuperTypeMatcher.java:38) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.matches(HasSuperTypeMatcher.java:15) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Conjunction.matches(ElementMatcher.java:107) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) ... The exception occurred because access grant was required in WebSphere. Simply follow these steps:\n Set the agent\u0026rsquo;s owner to the owner of WebSphere. Add \u0026ldquo;grant codeBase \u0026ldquo;file:${agent_dir}/-\u0026rdquo; { permission java.security.AllPermission; };\u0026rdquo; in the file of \u0026ldquo;server.policy\u0026rdquo;.  ","title":"IllegalStateException when installing Java agent on WebSphere","url":"/docs/main/v9.5.0/en/faq/install_agent_on_websphere/"},{"content":"IllegalStateException when installing Java agent on WebSphere This issue was found in our community discussion and feedback. A user installed the SkyWalking Java agent on WebSphere 7.0.0.11 and ibm jdk 1.8_20160719 and 1.7.0_20150407, and experienced the following error logs:\nWARN 2019-05-09 17:01:35:905 SkywalkingAgent-1-GRPCChannelManager-0 ProtectiveShieldMatcher : Byte-buddy occurs exception when match type. java.lang.IllegalStateException: Cannot resolve type description for java.security.PrivilegedAction at org.apache.skywalking.apm.dependencies.net.bytebuddy.pool.TypePool$Resolution$Illegal.resolve(TypePool.java:144) at org.apache.skywalking.apm.dependencies.net.bytebuddy.pool.TypePool$Default$WithLazyResolution$LazyTypeDescription.delegate(TypePool.java:1392) at org.apache.skywalking.apm.dependencies.net.bytebuddy.description.type.TypeDescription$AbstractBase$OfSimpleType$WithDelegation.getInterfaces(TypeDescription.java:8016) at org.apache.skywalking.apm.dependencies.net.bytebuddy.description.type.TypeDescription$Generic$OfNonGenericType.getInterfaces(TypeDescription.java:3621) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.hasInterface(HasSuperTypeMatcher.java:53) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.hasInterface(HasSuperTypeMatcher.java:54) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.matches(HasSuperTypeMatcher.java:38) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.matches(HasSuperTypeMatcher.java:15) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Conjunction.matches(ElementMatcher.java:107) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) ... The exception occurred because access grant was required in WebSphere. Simply follow these steps:\n Set the agent\u0026rsquo;s owner to the owner of WebSphere. Add \u0026ldquo;grant codeBase \u0026ldquo;file:${agent_dir}/-\u0026rdquo; { permission java.security.AllPermission; };\u0026rdquo; in the file of \u0026ldquo;server.policy\u0026rdquo;.  ","title":"IllegalStateException when installing Java agent on WebSphere","url":"/docs/main/v9.6.0/en/faq/install_agent_on_websphere/"},{"content":"IllegalStateException when installing Java agent on WebSphere This issue was found in our community discussion and feedback. A user installed the SkyWalking Java agent on WebSphere 7.0.0.11 and ibm jdk 1.8_20160719 and 1.7.0_20150407, and experienced the following error logs:\nWARN 2019-05-09 17:01:35:905 SkywalkingAgent-1-GRPCChannelManager-0 ProtectiveShieldMatcher : Byte-buddy occurs exception when match type. java.lang.IllegalStateException: Cannot resolve type description for java.security.PrivilegedAction at org.apache.skywalking.apm.dependencies.net.bytebuddy.pool.TypePool$Resolution$Illegal.resolve(TypePool.java:144) at org.apache.skywalking.apm.dependencies.net.bytebuddy.pool.TypePool$Default$WithLazyResolution$LazyTypeDescription.delegate(TypePool.java:1392) at org.apache.skywalking.apm.dependencies.net.bytebuddy.description.type.TypeDescription$AbstractBase$OfSimpleType$WithDelegation.getInterfaces(TypeDescription.java:8016) at org.apache.skywalking.apm.dependencies.net.bytebuddy.description.type.TypeDescription$Generic$OfNonGenericType.getInterfaces(TypeDescription.java:3621) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.hasInterface(HasSuperTypeMatcher.java:53) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.hasInterface(HasSuperTypeMatcher.java:54) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.matches(HasSuperTypeMatcher.java:38) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.matches(HasSuperTypeMatcher.java:15) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Conjunction.matches(ElementMatcher.java:107) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) ... The exception occurred because access grant was required in WebSphere. Simply follow these steps:\n Set the agent\u0026rsquo;s owner to the owner of WebSphere. Add \u0026ldquo;grant codeBase \u0026ldquo;file:${agent_dir}/-\u0026rdquo; { permission java.security.AllPermission; };\u0026rdquo; in the file of \u0026ldquo;server.policy\u0026rdquo;.  ","title":"IllegalStateException when installing Java agent on WebSphere","url":"/docs/main/v9.7.0/en/faq/install_agent_on_websphere/"},{"content":"INI Settings This is the configuration list supported in php.ini.\n   Configuration Item Description Default Value     skywalking_agent.enable Enable skywalking_agent extension or not. Off   skywalking_agent.log_file Log file path. /tmp/skywalking-agent.log   skywalking_agent.log_level Log level: one of OFF, TRACE, DEBUG, INFO, WARN, ERROR. INFO   skywalking_agent.runtime_dir Skywalking agent runtime directory. /tmp/skywalking-agent   skywalking_agent.server_addr Address of skywalking oap server. Only available when reporter_type is grpc. 127.0.0.1:11800   skywalking_agent.service_name Application service name. hello-skywalking   skywalking_agent.skywalking_version Skywalking version, 8 or 9. 8   skywalking_agent.authentication Skywalking authentication token, let it empty if the backend isn\u0026rsquo;t enabled. Only available when reporter_type is grpc.    skywalking_agent.worker_threads Skywalking worker threads, 0 will auto set as the cpu core size. 0   skywalking_agent.enable_tls Wether to enable tls for gPRC, default is false. Only available when reporter_type is grpc. Off   skywalking_agent.ssl_trusted_ca_path The gRPC SSL trusted ca file. Only available when reporter_type is grpc.    skywalking_agent.ssl_key_path The private key file. Enable mTLS when ssl_key_path and ssl_cert_chain_path exist. Only available when reporter_type is grpc.    skywalking_agent.ssl_cert_chain_path The certificate file. Enable mTLS when ssl_key_path and ssl_cert_chain_path exist. Only available when reporter_type is grpc.    skywalking_agent.heartbeat_period Agent heartbeat report period. Unit, second. 30   skywalking_agent.properties_report_period_factor The agent sends the instance properties to the backend every heartbeat_period * properties_report_period_factor seconds. 10   skywalking_agent.enable_zend_observer Whether to use zend observer instead of zend_execute_ex to hook the functions, this feature is only available for PHP8+. Off   skywalking_agent.reporter_type Reporter type, optional values are grpc and kafka. grpc   skywalking_agent.kafka_bootstrap_servers A list of host/port pairs to use for connect to the Kafka cluster. Only available when reporter_type is kafka.    skywalking_agent.kafka_producer_config Configure Kafka Producer configuration in JSON format {\u0026quot;key\u0026quot;: \u0026quot;value}. Only available when reporter_type is kafka. {}    ","title":"INI Settings","url":"/docs/skywalking-php/latest/en/configuration/ini-settings/"},{"content":"INI Settings This is the configuration list supported in php.ini.\n   Configuration Item Description Default Value     skywalking_agent.enable Enable skywalking_agent extension or not. Off   skywalking_agent.log_file Log file path. /tmp/skywalking-agent.log   skywalking_agent.log_level Log level: one of OFF, TRACE, DEBUG, INFO, WARN, ERROR. INFO   skywalking_agent.runtime_dir Skywalking agent runtime directory. /tmp/skywalking-agent   skywalking_agent.server_addr Address of skywalking oap server. Only available when reporter_type is grpc. 127.0.0.1:11800   skywalking_agent.service_name Application service name. hello-skywalking   skywalking_agent.skywalking_version Skywalking version, 8 or 9. 8   skywalking_agent.authentication Skywalking authentication token, let it empty if the backend isn\u0026rsquo;t enabled. Only available when reporter_type is grpc.    skywalking_agent.worker_threads Skywalking worker threads, 0 will auto set as the cpu core size. 0   skywalking_agent.enable_tls Wether to enable tls for gPRC, default is false. Only available when reporter_type is grpc. Off   skywalking_agent.ssl_trusted_ca_path The gRPC SSL trusted ca file. Only available when reporter_type is grpc.    skywalking_agent.ssl_key_path The private key file. Enable mTLS when ssl_key_path and ssl_cert_chain_path exist. Only available when reporter_type is grpc.    skywalking_agent.ssl_cert_chain_path The certificate file. Enable mTLS when ssl_key_path and ssl_cert_chain_path exist. Only available when reporter_type is grpc.    skywalking_agent.heartbeat_period Agent heartbeat report period. Unit, second. 30   skywalking_agent.properties_report_period_factor The agent sends the instance properties to the backend every heartbeat_period * properties_report_period_factor seconds. 10   skywalking_agent.enable_zend_observer Whether to use zend observer instead of zend_execute_ex to hook the functions, this feature is only available for PHP8+. Off   skywalking_agent.reporter_type Reporter type, optional values are grpc and kafka. grpc   skywalking_agent.kafka_bootstrap_servers A list of host/port pairs to use for connect to the Kafka cluster. Only available when reporter_type is kafka.    skywalking_agent.kafka_producer_config Configure Kafka Producer configuration in JSON format {\u0026quot;key\u0026quot;: \u0026quot;value}. Only available when reporter_type is kafka. {}   skywalking_agent.inject_context Whether to enable automatic injection of skywalking context variables (such as SW_TRACE_ID). For php-fpm mode, it will be injected into the $_SERVER variable. For swoole mode, it will be injected into the $request-\u0026gt;server variable. Off    ","title":"INI Settings","url":"/docs/skywalking-php/next/en/configuration/ini-settings/"},{"content":"INI Settings This is the configuration list supported in php.ini.\n   Configuration Item Description Default Value     skywalking_agent.enable Enable skywalking_agent extension or not. Off   skywalking_agent.log_file Log file path. /tmp/skywalking-agent.log   skywalking_agent.log_level Log level: one of OFF, TRACE, DEBUG, INFO, WARN, ERROR. INFO   skywalking_agent.runtime_dir Skywalking agent runtime directory. /tmp/skywalking-agent   skywalking_agent.server_addr Address of skywalking oap server. Only available when reporter_type is grpc. 127.0.0.1:11800   skywalking_agent.service_name Application service name. hello-skywalking   skywalking_agent.skywalking_version Skywalking version, 8 or 9. 8   skywalking_agent.authentication Skywalking authentication token, let it empty if the backend isn\u0026rsquo;t enabled. Only available when reporter_type is grpc.    skywalking_agent.worker_threads Skywalking worker threads, 0 will auto set as the cpu core size. 0   skywalking_agent.enable_tls Wether to enable tls for gPRC, default is false. Only available when reporter_type is grpc. Off   skywalking_agent.ssl_trusted_ca_path The gRPC SSL trusted ca file. Only available when reporter_type is grpc.    skywalking_agent.ssl_key_path The private key file. Enable mTLS when ssl_key_path and ssl_cert_chain_path exist. Only available when reporter_type is grpc.    skywalking_agent.ssl_cert_chain_path The certificate file. Enable mTLS when ssl_key_path and ssl_cert_chain_path exist. Only available when reporter_type is grpc.    skywalking_agent.heartbeat_period Agent heartbeat report period. Unit, second. 30   skywalking_agent.properties_report_period_factor The agent sends the instance properties to the backend every heartbeat_period * properties_report_period_factor seconds. 10   skywalking_agent.enable_zend_observer Whether to use zend observer instead of zend_execute_ex to hook the functions, this feature is only available for PHP8+. Off   skywalking_agent.reporter_type Reporter type, optional values are grpc and kafka. grpc   skywalking_agent.kafka_bootstrap_servers A list of host/port pairs to use for connect to the Kafka cluster. Only available when reporter_type is kafka.    skywalking_agent.kafka_producer_config Configure Kafka Producer configuration in JSON format {\u0026quot;key\u0026quot;: \u0026quot;value}. Only available when reporter_type is kafka. {}    ","title":"INI Settings","url":"/docs/skywalking-php/v0.7.0/en/configuration/ini-settings/"},{"content":"Init mode The SkyWalking backend supports multiple storage implementors. Most of them would automatically initialize the storage, such as Elastic Search or Database, when the backend starts up first.\nBut there may be some unexpected events that may occur with the storage, such as When multiple Elastic Search indexes are created concurrently, these backend instances would startup at the same time., When there is a change, the APIs of Elastic Search would be blocked without reporting any exception. This often happens on container management platforms, such as k8s.\nThis is where you need the Init mode startup.\nSolution Only a single instance should run in the Init mode before other instances start up. And this instance will exit graciously after all initialization steps are done.\nUse oapServiceInit.sh/oapServiceInit.bat to start up backend. You should see the following logs:\n 2018-11-09 23:04:39,465 - org.apache.skywalking.oap.server.starter.OAPServerStartUp -2214 [main] INFO [] - OAP starts up in init mode successfully, exit now\u0026hellip;\n Kubernetes Initialization in this mode would be included in our Kubernetes scripts and Helm.\n","title":"Init mode","url":"/docs/main/latest/en/setup/backend/backend-init-mode/"},{"content":"Init mode The SkyWalking backend supports multiple storage implementors. Most of them would automatically initialize the storage, such as Elastic Search or Database, when the backend starts up first.\nBut there may be some unexpected events that may occur with the storage, such as When multiple Elastic Search indexes are created concurrently, these backend instances would startup at the same time., When there is a change, the APIs of Elastic Search would be blocked without reporting any exception. This often happens on container management platforms, such as k8s.\nThis is where you need the Init mode startup.\nSolution Only a single instance should run in the Init mode before other instances start up. And this instance will exit graciously after all initialization steps are done.\nUse oapServiceInit.sh/oapServiceInit.bat to start up backend. You should see the following logs:\n 2018-11-09 23:04:39,465 - org.apache.skywalking.oap.server.starter.OAPServerStartUp -2214 [main] INFO [] - OAP starts up in init mode successfully, exit now\u0026hellip;\n Kubernetes Initialization in this mode would be included in our Kubernetes scripts and Helm.\n","title":"Init mode","url":"/docs/main/next/en/setup/backend/backend-init-mode/"},{"content":"Init mode The SkyWalking backend supports multiple storage implementors. Most of them would automatically initialize the storage, such as Elastic Search or Database, when the backend starts up at first.\nBut there may be some unexpected events that may occur with the storage, such as When multiple Elastic Search indexes are created concurrently, these backend instances would start up at the same time., When there is a change, the APIs of Elastic Search would be blocked without reporting any exception. This often happens on container management platforms, such as k8s.\nThis is where you need the Init mode startup.\nSolution Only one single instance should run in the Init mode before other instances start up. And this instance will exit graciously after all initialization steps are done.\nUse oapServiceInit.sh/oapServiceInit.bat to start up backend. You should see the following logs:\n 2018-11-09 23:04:39,465 - org.apache.skywalking.oap.server.starter.OAPServerStartUp -2214 [main] INFO [] - OAP starts up in init mode successfully, exit now\u0026hellip;\n Kubernetes Initialization in this mode would be included in our Kubernetes scripts and Helm.\n","title":"Init mode","url":"/docs/main/v9.0.0/en/setup/backend/backend-init-mode/"},{"content":"Init mode The SkyWalking backend supports multiple storage implementors. Most of them would automatically initialize the storage, such as Elastic Search or Database, when the backend starts up first.\nBut there may be some unexpected events that may occur with the storage, such as When multiple Elastic Search indexes are created concurrently, these backend instances would startup at the same time., When there is a change, the APIs of Elastic Search would be blocked without reporting any exception. This often happens on container management platforms, such as k8s.\nThis is where you need the Init mode startup.\nSolution Only a single instance should run in the Init mode before other instances start up. And this instance will exit graciously after all initialization steps are done.\nUse oapServiceInit.sh/oapServiceInit.bat to start up backend. You should see the following logs:\n 2018-11-09 23:04:39,465 - org.apache.skywalking.oap.server.starter.OAPServerStartUp -2214 [main] INFO [] - OAP starts up in init mode successfully, exit now\u0026hellip;\n Kubernetes Initialization in this mode would be included in our Kubernetes scripts and Helm.\n","title":"Init mode","url":"/docs/main/v9.1.0/en/setup/backend/backend-init-mode/"},{"content":"Init mode The SkyWalking backend supports multiple storage implementors. Most of them would automatically initialize the storage, such as Elastic Search or Database, when the backend starts up first.\nBut there may be some unexpected events that may occur with the storage, such as When multiple Elastic Search indexes are created concurrently, these backend instances would startup at the same time., When there is a change, the APIs of Elastic Search would be blocked without reporting any exception. This often happens on container management platforms, such as k8s.\nThis is where you need the Init mode startup.\nSolution Only a single instance should run in the Init mode before other instances start up. And this instance will exit graciously after all initialization steps are done.\nUse oapServiceInit.sh/oapServiceInit.bat to start up backend. You should see the following logs:\n 2018-11-09 23:04:39,465 - org.apache.skywalking.oap.server.starter.OAPServerStartUp -2214 [main] INFO [] - OAP starts up in init mode successfully, exit now\u0026hellip;\n Kubernetes Initialization in this mode would be included in our Kubernetes scripts and Helm.\n","title":"Init mode","url":"/docs/main/v9.2.0/en/setup/backend/backend-init-mode/"},{"content":"Init mode The SkyWalking backend supports multiple storage implementors. Most of them would automatically initialize the storage, such as Elastic Search or Database, when the backend starts up first.\nBut there may be some unexpected events that may occur with the storage, such as When multiple Elastic Search indexes are created concurrently, these backend instances would startup at the same time., When there is a change, the APIs of Elastic Search would be blocked without reporting any exception. This often happens on container management platforms, such as k8s.\nThis is where you need the Init mode startup.\nSolution Only a single instance should run in the Init mode before other instances start up. And this instance will exit graciously after all initialization steps are done.\nUse oapServiceInit.sh/oapServiceInit.bat to start up backend. You should see the following logs:\n 2018-11-09 23:04:39,465 - org.apache.skywalking.oap.server.starter.OAPServerStartUp -2214 [main] INFO [] - OAP starts up in init mode successfully, exit now\u0026hellip;\n Kubernetes Initialization in this mode would be included in our Kubernetes scripts and Helm.\n","title":"Init mode","url":"/docs/main/v9.3.0/en/setup/backend/backend-init-mode/"},{"content":"Init mode The SkyWalking backend supports multiple storage implementors. Most of them would automatically initialize the storage, such as Elastic Search or Database, when the backend starts up first.\nBut there may be some unexpected events that may occur with the storage, such as When multiple Elastic Search indexes are created concurrently, these backend instances would startup at the same time., When there is a change, the APIs of Elastic Search would be blocked without reporting any exception. This often happens on container management platforms, such as k8s.\nThis is where you need the Init mode startup.\nSolution Only a single instance should run in the Init mode before other instances start up. And this instance will exit graciously after all initialization steps are done.\nUse oapServiceInit.sh/oapServiceInit.bat to start up backend. You should see the following logs:\n 2018-11-09 23:04:39,465 - org.apache.skywalking.oap.server.starter.OAPServerStartUp -2214 [main] INFO [] - OAP starts up in init mode successfully, exit now\u0026hellip;\n Kubernetes Initialization in this mode would be included in our Kubernetes scripts and Helm.\n","title":"Init mode","url":"/docs/main/v9.4.0/en/setup/backend/backend-init-mode/"},{"content":"Init mode The SkyWalking backend supports multiple storage implementors. Most of them would automatically initialize the storage, such as Elastic Search or Database, when the backend starts up first.\nBut there may be some unexpected events that may occur with the storage, such as When multiple Elastic Search indexes are created concurrently, these backend instances would startup at the same time., When there is a change, the APIs of Elastic Search would be blocked without reporting any exception. This often happens on container management platforms, such as k8s.\nThis is where you need the Init mode startup.\nSolution Only a single instance should run in the Init mode before other instances start up. And this instance will exit graciously after all initialization steps are done.\nUse oapServiceInit.sh/oapServiceInit.bat to start up backend. You should see the following logs:\n 2018-11-09 23:04:39,465 - org.apache.skywalking.oap.server.starter.OAPServerStartUp -2214 [main] INFO [] - OAP starts up in init mode successfully, exit now\u0026hellip;\n Kubernetes Initialization in this mode would be included in our Kubernetes scripts and Helm.\n","title":"Init mode","url":"/docs/main/v9.5.0/en/setup/backend/backend-init-mode/"},{"content":"Init mode The SkyWalking backend supports multiple storage implementors. Most of them would automatically initialize the storage, such as Elastic Search or Database, when the backend starts up first.\nBut there may be some unexpected events that may occur with the storage, such as When multiple Elastic Search indexes are created concurrently, these backend instances would startup at the same time., When there is a change, the APIs of Elastic Search would be blocked without reporting any exception. This often happens on container management platforms, such as k8s.\nThis is where you need the Init mode startup.\nSolution Only a single instance should run in the Init mode before other instances start up. And this instance will exit graciously after all initialization steps are done.\nUse oapServiceInit.sh/oapServiceInit.bat to start up backend. You should see the following logs:\n 2018-11-09 23:04:39,465 - org.apache.skywalking.oap.server.starter.OAPServerStartUp -2214 [main] INFO [] - OAP starts up in init mode successfully, exit now\u0026hellip;\n Kubernetes Initialization in this mode would be included in our Kubernetes scripts and Helm.\n","title":"Init mode","url":"/docs/main/v9.6.0/en/setup/backend/backend-init-mode/"},{"content":"Init mode The SkyWalking backend supports multiple storage implementors. Most of them would automatically initialize the storage, such as Elastic Search or Database, when the backend starts up first.\nBut there may be some unexpected events that may occur with the storage, such as When multiple Elastic Search indexes are created concurrently, these backend instances would startup at the same time., When there is a change, the APIs of Elastic Search would be blocked without reporting any exception. This often happens on container management platforms, such as k8s.\nThis is where you need the Init mode startup.\nSolution Only a single instance should run in the Init mode before other instances start up. And this instance will exit graciously after all initialization steps are done.\nUse oapServiceInit.sh/oapServiceInit.bat to start up backend. You should see the following logs:\n 2018-11-09 23:04:39,465 - org.apache.skywalking.oap.server.starter.OAPServerStartUp -2214 [main] INFO [] - OAP starts up in init mode successfully, exit now\u0026hellip;\n Kubernetes Initialization in this mode would be included in our Kubernetes scripts and Helm.\n","title":"Init mode","url":"/docs/main/v9.7.0/en/setup/backend/backend-init-mode/"},{"content":"Install SkyWalking Infra E2E Download pre-built binaries Download the pre-built binaries from our website, currently we have pre-built binaries for macOS, Linux and Windows. Extract the tarball and add bin/\u0026lt;os\u0026gt;/e2e to you PATH environment variable.\nInstall from source codes If you want to try some features that are not released yet, you can compile from the source code.\nmkdir skywalking-infra-e2e \u0026amp;\u0026amp; cd skywalking-infra-e2e git clone https://github.com/apache/skywalking-infra-e2e.git . make build Then add the binary in bin/\u0026lt;os\u0026gt;/e2e to your PATH.\nInstall via go install If you already have Go SDK installed, you can also directly install e2e via go install.\ngo install github.com/apache/skywalking-infra-e2e/cmd/e2e@\u0026lt;revision\u0026gt; Note that installation via go install is only supported after Git commit 2a33478 so you can only go install a revision afterwards.\n","title":"Install SkyWalking Infra E2E","url":"/docs/skywalking-infra-e2e/latest/en/setup/install/"},{"content":"Install SkyWalking Infra E2E Download pre-built binaries Download the pre-built binaries from our website, currently we have pre-built binaries for macOS, Linux and Windows. Extract the tarball and add bin/\u0026lt;os\u0026gt;/e2e to you PATH environment variable.\nInstall from source codes If you want to try some features that are not released yet, you can compile from the source code.\nmkdir skywalking-infra-e2e \u0026amp;\u0026amp; cd skywalking-infra-e2e git clone https://github.com/apache/skywalking-infra-e2e.git . make build Then add the binary in bin/\u0026lt;os\u0026gt;/e2e to your PATH.\nInstall via go install If you already have Go SDK installed, you can also directly install e2e via go install.\ngo install github.com/apache/skywalking-infra-e2e/cmd/e2e@\u0026lt;revision\u0026gt; Note that installation via go install is only supported after Git commit 2a33478 so you can only go install a revision afterwards.\n","title":"Install SkyWalking Infra E2E","url":"/docs/skywalking-infra-e2e/next/en/setup/install/"},{"content":"Install SkyWalking Infra E2E Download pre-built binaries Download the pre-built binaries from our website, currently we have pre-built binaries for macOS, Linux and Windows. Extract the tarball and add bin/\u0026lt;os\u0026gt;/e2e to you PATH environment variable.\nInstall from source codes If you want to try some features that are not released yet, you can compile from the source code.\nmkdir skywalking-infra-e2e \u0026amp;\u0026amp; cd skywalking-infra-e2e git clone https://github.com/apache/skywalking-infra-e2e.git . make build Then add the binary in bin/\u0026lt;os\u0026gt;/e2e to your PATH.\nInstall via go install If you already have Go SDK installed, you can also directly install e2e via go install.\ngo install github.com/apache/skywalking-infra-e2e/cmd/e2e@\u0026lt;revision\u0026gt; Note that installation via go install is only supported after Git commit 2a33478 so you can only go install a revision afterwards.\n","title":"Install SkyWalking Infra E2E","url":"/docs/skywalking-infra-e2e/v1.3.0/en/setup/install/"},{"content":"Installation Banyand is the daemon server of the BanyanDB database. This section will show several paths installing it in your environment.\nGet Binaries Released binaries Get binaries from the download.\nBuild From Source Requirements Users who want to build a binary from sources have to set up:\n Go 1.20 Node 18.16 Git \u0026gt;= 2.30 Linux, macOS or Windows+WSL2 GNU make  Windows BanyanDB is built on Linux and macOS that introduced several platform-specific characters to the building system. Therefore, we highly recommend you use WSL2+Ubuntu to execute tasks of the Makefile.\nBuild Binaries To issue the below command to get basic binaries of banyand and bydbctl.\n$ make generate ... $ make build ... --- banyand: all --- make[1]: Entering directory \u0026#39;\u0026lt;path_to_project_root\u0026gt;/banyand\u0026#39; ... chmod +x build/bin/banyand-server Done building banyand server make[1]: Leaving directory \u0026#39;\u0026lt;path_to_project_root\u0026gt;/banyand\u0026#39; ... --- bydbctl: all --- make[1]: Entering directory \u0026#39;\u0026lt;path_to_project_root\u0026gt;/bydbctl\u0026#39; ... chmod +x build/bin/bydbctl Done building bydbctl make[1]: Leaving directory \u0026#39;\u0026lt;path_to_project_root\u0026gt;/bydbctl\u0026#39; The build system provides a series of binary options as well.\n make -C banyand banyand-server generates a basic banyand-server. make -C banyand release builds out a static binary for releasing. make -C banyand debug gives a binary for debugging without the complier\u0026rsquo;s optimizations. make -C banyand debug-static is a static binary for debugging. make -C bydbctl release cross-builds several binaries for multi-platforms.  Then users get binaries as below\n$ ls banyand/build/bin banyand-server banyand-server-debug banyand-server-debug-static banyand-server-static $ ls banyand/build/bin bydbctl Setup Banyand Banyand shows its available commands and arguments by\n$ ./banyand-server ██████╗ █████╗ ███╗ ██╗██╗ ██╗ █████╗ ███╗ ██╗██████╗ ██████╗ ██╔══██╗██╔══██╗████╗ ██║╚██╗ ██╔╝██╔══██╗████╗ ██║██╔══██╗██╔══██╗ ██████╔╝███████║██╔██╗ ██║ ╚████╔╝ ███████║██╔██╗ ██║██║ ██║██████╔╝ ██╔══██╗██╔══██║██║╚██╗██║ ╚██╔╝ ██╔══██║██║╚██╗██║██║ ██║██╔══██╗ ██████╔╝██║ ██║██║ ╚████║ ██║ ██║ ██║██║ ╚████║██████╔╝██████╔╝ ╚═════╝ ╚═╝ ╚═╝╚═╝ ╚═══╝ ╚═╝ ╚═╝ ╚═╝╚═╝ ╚═══╝╚═════╝ ╚═════╝ BanyanDB, as an observability database, aims to ingest, analyze and store Metrics, Tracing and Logging data Usage: [command] Available Commands: completion generate the autocompletion script for the specified shell help Help about any command liaison Run as the liaison server meta Run as the meta server standalone Run as the standalone server storage Run as the storage server Flags: -h, --help help for this command -v, --version version for this command Use \u0026#34; [command] --help\u0026#34; for more information about a command. Banyand is running as a standalone server by\n$ ./banyand-server standalone ██████╗ █████╗ ███╗ ██╗██╗ ██╗ █████╗ ███╗ ██╗██████╗ ██████╗ ██╔══██╗██╔══██╗████╗ ██║╚██╗ ██╔╝██╔══██╗████╗ ██║██╔══██╗██╔══██╗ ██████╔╝███████║██╔██╗ ██║ ╚████╔╝ ███████║██╔██╗ ██║██║ ██║██████╔╝ ██╔══██╗██╔══██║██║╚██╗██║ ╚██╔╝ ██╔══██║██║╚██╗██║██║ ██║██╔══██╗ ██████╔╝██║ ██║██║ ╚████║ ██║ ██║ ██║██║ ╚████║██████╔╝██████╔╝ ╚═════╝ ╚═╝ ╚═╝╚═╝ ╚═══╝ ╚═╝ ╚═╝ ╚═╝╚═╝ ╚═══╝╚═════╝ ╚═════╝ ***starting as a standalone server**** ... ... ***Listening to**** addr::17912 module:LIAISON-GRPC The banyand-server would be listening on the 0.0.0.0:17912 if no errors occurred.\nSetup Multiple Banyand as Cluster Firstly, you need to setup a etcd cluster which is required for the metadata module to provide the metadata service and nodes discovery service for the whole cluster. The etcd cluster can be setup by the etcd installation guide. The etcd version should be v3.1 or above.\nThen, you can start the metadata module by\nConsidering the etcd cluster is spread across three nodes with the addresses `10.0.0.1:2379`, `10.0.0.2:2379`, and `10.0.0.3:2379`, Data nodes and liaison nodes are running as independent processes by ```shell $ ./banyand-server storage --etcd-endpoints=http://10.0.0.1:2379,http://10.0.0.2:2379,http://10.0.0.3:2379 \u0026lt;flags\u0026gt; $ ./banyand-server storage --etcd-endpoints=http://10.0.0.1:2379,http://10.0.0.2:2379,http://10.0.0.3:2379 \u0026lt;flags\u0026gt; $ ./banyand-server storage --etcd-endpoints=http://10.0.0.1:2379,http://10.0.0.2:2379,http://10.0.0.3:2379 \u0026lt;flags\u0026gt; $ ./banyand-server liaison --etcd-endpoints=http://10.0.0.1:2379,http://10.0.0.2:2379,http://10.0.0.3:2379 \u0026lt;flags\u0026gt; Docker \u0026amp; Kubernetes The docker image of banyandb is available on Docker Hub.\nIf you want to onboard banyandb to the Kubernetes, you can refer to the banyandb-helm.\n","title":"Installation","url":"/docs/skywalking-banyandb/latest/installation/"},{"content":"Installation Banyand is the daemon server of the BanyanDB database. This section will show several paths installing it in your environment.\nGet Binaries Released binaries Get binaries from the download.\nBuild From Source Requirements Users who want to build a binary from sources have to set up:\n Go 1.20 Node 18.16 Git \u0026gt;= 2.30 Linux, macOS or Windows+WSL2 GNU make  Windows BanyanDB is built on Linux and macOS that introduced several platform-specific characters to the building system. Therefore, we highly recommend you use WSL2+Ubuntu to execute tasks of the Makefile.\nBuild Binaries To issue the below command to get basic binaries of banyand and bydbctl.\n$ make generate ... $ make build ... --- banyand: all --- make[1]: Entering directory \u0026#39;\u0026lt;path_to_project_root\u0026gt;/banyand\u0026#39; ... chmod +x build/bin/banyand-server Done building banyand server make[1]: Leaving directory \u0026#39;\u0026lt;path_to_project_root\u0026gt;/banyand\u0026#39; ... --- bydbctl: all --- make[1]: Entering directory \u0026#39;\u0026lt;path_to_project_root\u0026gt;/bydbctl\u0026#39; ... chmod +x build/bin/bydbctl Done building bydbctl make[1]: Leaving directory \u0026#39;\u0026lt;path_to_project_root\u0026gt;/bydbctl\u0026#39; The build system provides a series of binary options as well.\n make -C banyand banyand-server generates a basic banyand-server. make -C banyand release builds out a static binary for releasing. make -C banyand debug gives a binary for debugging without the complier\u0026rsquo;s optimizations. make -C banyand debug-static is a static binary for debugging. make -C bydbctl release cross-builds several binaries for multi-platforms.  Then users get binaries as below\n$ ls banyand/build/bin banyand-server banyand-server-debug banyand-server-debug-static banyand-server-static $ ls banyand/build/bin bydbctl Setup Banyand Banyand shows its available commands and arguments by\n$ ./banyand-server ██████╗ █████╗ ███╗ ██╗██╗ ██╗ █████╗ ███╗ ██╗██████╗ ██████╗ ██╔══██╗██╔══██╗████╗ ██║╚██╗ ██╔╝██╔══██╗████╗ ██║██╔══██╗██╔══██╗ ██████╔╝███████║██╔██╗ ██║ ╚████╔╝ ███████║██╔██╗ ██║██║ ██║██████╔╝ ██╔══██╗██╔══██║██║╚██╗██║ ╚██╔╝ ██╔══██║██║╚██╗██║██║ ██║██╔══██╗ ██████╔╝██║ ██║██║ ╚████║ ██║ ██║ ██║██║ ╚████║██████╔╝██████╔╝ ╚═════╝ ╚═╝ ╚═╝╚═╝ ╚═══╝ ╚═╝ ╚═╝ ╚═╝╚═╝ ╚═══╝╚═════╝ ╚═════╝ BanyanDB, as an observability database, aims to ingest, analyze and store Metrics, Tracing and Logging data Usage: [command] Available Commands: completion generate the autocompletion script for the specified shell help Help about any command liaison Run as the liaison server meta Run as the meta server standalone Run as the standalone server storage Run as the storage server Flags: -h, --help help for this command -v, --version version for this command Use \u0026#34; [command] --help\u0026#34; for more information about a command. Banyand is running as a standalone server by\n$ ./banyand-server standalone ██████╗ █████╗ ███╗ ██╗██╗ ██╗ █████╗ ███╗ ██╗██████╗ ██████╗ ██╔══██╗██╔══██╗████╗ ██║╚██╗ ██╔╝██╔══██╗████╗ ██║██╔══██╗██╔══██╗ ██████╔╝███████║██╔██╗ ██║ ╚████╔╝ ███████║██╔██╗ ██║██║ ██║██████╔╝ ██╔══██╗██╔══██║██║╚██╗██║ ╚██╔╝ ██╔══██║██║╚██╗██║██║ ██║██╔══██╗ ██████╔╝██║ ██║██║ ╚████║ ██║ ██║ ██║██║ ╚████║██████╔╝██████╔╝ ╚═════╝ ╚═╝ ╚═╝╚═╝ ╚═══╝ ╚═╝ ╚═╝ ╚═╝╚═╝ ╚═══╝╚═════╝ ╚═════╝ ***starting as a standalone server**** ... ... ***Listening to**** addr::17912 module:LIAISON-GRPC The banyand-server would be listening on the 0.0.0.0:17912 if no errors occurred.\nSetup Multiple Banyand as Cluster Firstly, you need to setup a etcd cluster which is required for the metadata module to provide the metadata service and nodes discovery service for the whole cluster. The etcd cluster can be setup by the etcd installation guide. The etcd version should be v3.1 or above.\nThen, you can start the metadata module by\nConsidering the etcd cluster is spread across three nodes with the addresses `10.0.0.1:2379`, `10.0.0.2:2379`, and `10.0.0.3:2379`, Data nodes and liaison nodes are running as independent processes by ```shell $ ./banyand-server storage --etcd-endpoints=http://10.0.0.1:2379,http://10.0.0.2:2379,http://10.0.0.3:2379 \u0026lt;flags\u0026gt; $ ./banyand-server storage --etcd-endpoints=http://10.0.0.1:2379,http://10.0.0.2:2379,http://10.0.0.3:2379 \u0026lt;flags\u0026gt; $ ./banyand-server storage --etcd-endpoints=http://10.0.0.1:2379,http://10.0.0.2:2379,http://10.0.0.3:2379 \u0026lt;flags\u0026gt; $ ./banyand-server liaison --etcd-endpoints=http://10.0.0.1:2379,http://10.0.0.2:2379,http://10.0.0.3:2379 \u0026lt;flags\u0026gt; Docker \u0026amp; Kubernetes The docker image of banyandb is available on Docker Hub.\nIf you want to onboard banyandb to the Kubernetes, you can refer to the banyandb-helm.\n","title":"Installation","url":"/docs/skywalking-banyandb/v0.5.0/installation/"},{"content":"Installation SkyWalking Python agent requires SkyWalking 8.0+ and Python 3.7+\nYou can install the SkyWalking Python agent via various ways described next.\n Already installed? Check out easy ways to start the agent in your application\n  Non-intrusive  | Intrusive  | Containerization\n  All available configurations are listed here\n Important Note on Different Reporter Protocols Currently only gRPC protocol fully supports all available telemetry capabilities in the Python agent.\nWhile gRPC is highly recommended, we provide alternative protocols to suit your production requirements.\nPlease refer to the table below before deciding which report protocol suits best for you.\n   Reporter Protocol Trace Reporter Log Reporter Meter Reporter Profiling     gRPC ✅ ✅ ✅ ✅   HTTP ✅ ✅ ❌ ❌   Kafka ✅ ✅ ✅ ❌    From PyPI  If you want to try out the latest features that are not released yet, please refer to this guide to build from sources.\n The Python agent module is published to PyPI, from where you can use pip to install:\n# Install the latest version, using the default gRPC protocol to report data to OAP pip install \u0026#34;apache-skywalking\u0026#34; # Install support for every protocol (gRPC, HTTP, Kafka) pip install \u0026#34;apache-skywalking[all]\u0026#34; # Install the latest version, using the http protocol to report data to OAP pip install \u0026#34;apache-skywalking[http]\u0026#34; # Install the latest version, using the kafka protocol to report data to OAP pip install \u0026#34;apache-skywalking[kafka]\u0026#34; # Install a specific version x.y.z # pip install apache-skywalking==x.y.z pip install apache-skywalking==0.1.0 # For example, install version 0.1.0 no matter what the latest version is From Docker Hub SkyWalking Python agent provides convenient dockerfile and images for easy integration utilizing its auto-bootstrap capability.\nSimply pull SkyWalking Python image from Docker Hub based on desired agent version, protocol and Python version.\nFROMapache/skywalking-python:0.8.0-grpc-py3.10# ... build your Python application# If you prefer compact images (built from official Python slim image)FROMapache/skywalking-python:0.8.0-grpc-py3.10-slim# ... build your Python applicationThen, You can build your Python application image based on our agent-enabled Python images and start your applications with SkyWalking agent enabled for you. Please refer to our Containerization Guide for further instructions on integration and configuring.\nFrom Source Code Please refer to the How-to-build-from-sources FAQ.\n","title":"Installation","url":"/docs/skywalking-python/latest/en/setup/installation/"},{"content":"Installation SkyWalking Python agent requires SkyWalking 8.0+ and Python 3.7+\nYou can install the SkyWalking Python agent via various ways described next.\n Already installed? Check out easy ways to start the agent in your application\n  Non-intrusive  | Intrusive  | Containerization\n  All available configurations are listed here\n Important Note on Different Reporter Protocols Currently only gRPC protocol fully supports all available telemetry capabilities in the Python agent.\nWhile gRPC is highly recommended, we provide alternative protocols to suit your production requirements.\nPlease refer to the table below before deciding which report protocol suits best for you.\n   Reporter Protocol Trace Reporter Log Reporter Meter Reporter Profiling     gRPC ✅ ✅ ✅ ✅   HTTP ✅ ✅ ❌ ❌   Kafka ✅ ✅ ✅ ❌    From PyPI  If you want to try out the latest features that are not released yet, please refer to this guide to build from sources.\n The Python agent module is published to PyPI, from where you can use pip to install:\n# Install the latest version, using the default gRPC protocol to report data to OAP pip install \u0026#34;apache-skywalking\u0026#34; # Install support for every protocol (gRPC, HTTP, Kafka) pip install \u0026#34;apache-skywalking[all]\u0026#34; # Install the latest version, using the http protocol to report data to OAP pip install \u0026#34;apache-skywalking[http]\u0026#34; # Install the latest version, using the kafka protocol to report data to OAP pip install \u0026#34;apache-skywalking[kafka]\u0026#34; # Install a specific version x.y.z # pip install apache-skywalking==x.y.z pip install apache-skywalking==0.1.0 # For example, install version 0.1.0 no matter what the latest version is From Docker Hub SkyWalking Python agent provides convenient dockerfile and images for easy integration utilizing its auto-bootstrap capability.\nSimply pull SkyWalking Python image from Docker Hub based on desired agent version, protocol and Python version.\nFROMapache/skywalking-python:0.8.0-grpc-py3.10# ... build your Python application# If you prefer compact images (built from official Python slim image)FROMapache/skywalking-python:0.8.0-grpc-py3.10-slim# ... build your Python applicationThen, You can build your Python application image based on our agent-enabled Python images and start your applications with SkyWalking agent enabled for you. Please refer to our Containerization Guide for further instructions on integration and configuring.\nFrom Source Code Please refer to the How-to-build-from-sources FAQ.\n","title":"Installation","url":"/docs/skywalking-python/next/en/setup/installation/"},{"content":"Installation SkyWalking Python agent requires SkyWalking 8.0+ and Python 3.7+\nYou can install the SkyWalking Python agent via various ways described next.\n Already installed? Check out easy ways to start the agent in your application\n  Non-intrusive  | Intrusive  | Containerization\n  All available configurations are listed here\n Important Note on Different Reporter Protocols Currently only gRPC protocol fully supports all available telemetry capabilities in the Python agent.\nWhile gRPC is highly recommended, we provide alternative protocols to suit your production requirements.\nPlease refer to the table below before deciding which report protocol suits best for you.\n   Reporter Protocol Trace Reporter Log Reporter Meter Reporter Profiling     gRPC ✅ ✅ ✅ ✅   HTTP ✅ ✅ ❌ ❌   Kafka ✅ ✅ ✅ ❌    From PyPI  If you want to try out the latest features that are not released yet, please refer to this guide to build from sources.\n The Python agent module is published to PyPI, from where you can use pip to install:\n# Install the latest version, using the default gRPC protocol to report data to OAP pip install \u0026#34;apache-skywalking\u0026#34; # Install support for every protocol (gRPC, HTTP, Kafka) pip install \u0026#34;apache-skywalking[all]\u0026#34; # Install the latest version, using the http protocol to report data to OAP pip install \u0026#34;apache-skywalking[http]\u0026#34; # Install the latest version, using the kafka protocol to report data to OAP pip install \u0026#34;apache-skywalking[kafka]\u0026#34; # Install a specific version x.y.z # pip install apache-skywalking==x.y.z pip install apache-skywalking==0.1.0 # For example, install version 0.1.0 no matter what the latest version is From Docker Hub SkyWalking Python agent provides convenient dockerfile and images for easy integration utilizing its auto-bootstrap capability.\nSimply pull SkyWalking Python image from Docker Hub based on desired agent version, protocol and Python version.\nFROMapache/skywalking-python:0.8.0-grpc-py3.10# ... build your Python application# If you prefer compact images (built from official Python slim image)FROMapache/skywalking-python:0.8.0-grpc-py3.10-slim# ... build your Python applicationThen, You can build your Python application image based on our agent-enabled Python images and start your applications with SkyWalking agent enabled for you. Please refer to our Containerization Guide for further instructions on integration and configuring.\nFrom Source Code Please refer to the How-to-build-from-sources FAQ.\n","title":"Installation","url":"/docs/skywalking-python/v1.0.1/en/setup/installation/"},{"content":"Integration Tests IT(Integration Tests) represents the JUnit driven integration test to verify the features and compatibility between lib and known server with various versions.\nAfter setting up the environment and writing your codes, to facilitate integration with the SkyWalking project, you\u0026rsquo;ll need to run tests locally to verify that your codes would not break any existing features, as well as write some unit test (UT) codes to verify that the new codes would work well. This will prevent them from being broken by future contributors. If the new codes involve other components or libraries, you should also write integration tests (IT).\nSkyWalking leverages the plugin maven-surefire-plugin to run the UTs and uses maven-failsafe-plugin to run the ITs. maven-surefire-plugin excludes ITs (whose class name starts or ends with *IT, IT*) and leaves them for maven-failsafe-plugin to run, which is bound to the integration-test goal. Therefore, to run the UTs, try ./mvnw clean test, which only runs the UTs but not the ITs.\nIf you would like to run the ITs, please run ./mvnw integration-test as well as the profiles of the modules whose ITs you want to run. If you don\u0026rsquo;t want to run UTs, please add -DskipUTs=true. E.g. if you would like to only run the ITs in oap-server, try ./mvnw -Pbackend clean verify -DskipUTs=true, and if you would like to run all the ITs, simply run ./mvnw clean integration-test -DskipUTs=true.\nPlease be advised that if you\u0026rsquo;re writing integration tests, name it with the pattern IT* or *IT so they would only run in goal integration-test.\n","title":"Integration Tests","url":"/docs/main/latest/en/guides/it-guide/"},{"content":"Integration Tests IT(Integration Tests) represents the JUnit driven integration test to verify the features and compatibility between lib and known server with various versions.\nAfter setting up the environment and writing your codes, to facilitate integration with the SkyWalking project, you\u0026rsquo;ll need to run tests locally to verify that your codes would not break any existing features, as well as write some unit test (UT) codes to verify that the new codes would work well. This will prevent them from being broken by future contributors. If the new codes involve other components or libraries, you should also write integration tests (IT).\nSkyWalking leverages the plugin maven-surefire-plugin to run the UTs and uses maven-failsafe-plugin to run the ITs. maven-surefire-plugin excludes ITs (whose class name starts or ends with *IT, IT*) and leaves them for maven-failsafe-plugin to run, which is bound to the integration-test goal. Therefore, to run the UTs, try ./mvnw clean test, which only runs the UTs but not the ITs.\nIf you would like to run the ITs, please run ./mvnw integration-test as well as the profiles of the modules whose ITs you want to run. If you don\u0026rsquo;t want to run UTs, please add -DskipUTs=true. E.g. if you would like to only run the ITs in oap-server, try ./mvnw -Pbackend clean verify -DskipUTs=true, and if you would like to run all the ITs, simply run ./mvnw clean integration-test -DskipUTs=true.\nPlease be advised that if you\u0026rsquo;re writing integration tests, name it with the pattern IT* or *IT so they would only run in goal integration-test.\n","title":"Integration Tests","url":"/docs/main/next/en/guides/it-guide/"},{"content":"Integration Tests IT(Integration Tests) represents the JUnit driven integration test to verify the features and compatibility between lib and known server with various versions.\nAfter setting up the environment and writing your codes, to facilitate integration with the SkyWalking project, you\u0026rsquo;ll need to run tests locally to verify that your codes would not break any existing features, as well as write some unit test (UT) codes to verify that the new codes would work well. This will prevent them from being broken by future contributors. If the new codes involve other components or libraries, you should also write integration tests (IT).\nSkyWalking leverages the plugin maven-surefire-plugin to run the UTs and uses maven-failsafe-plugin to run the ITs. maven-surefire-plugin excludes ITs (whose class name starts or ends with *IT, IT*) and leaves them for maven-failsafe-plugin to run, which is bound to the integration-test goal. Therefore, to run the UTs, try ./mvnw clean test, which only runs the UTs but not the ITs.\nIf you would like to run the ITs, please run ./mvnw integration-test as well as the profiles of the modules whose ITs you want to run. If you don\u0026rsquo;t want to run UTs, please add -DskipUTs=true. E.g. if you would like to only run the ITs in oap-server, try ./mvnw -Pbackend clean verify -DskipUTs=true, and if you would like to run all the ITs, simply run ./mvnw clean integration-test -DskipUTs=true.\nPlease be advised that if you\u0026rsquo;re writing integration tests, name it with the pattern IT* or *IT so they would only run in goal integration-test.\n","title":"Integration Tests","url":"/docs/main/v9.6.0/en/guides/it-guide/"},{"content":"Integration Tests IT(Integration Tests) represents the JUnit driven integration test to verify the features and compatibility between lib and known server with various versions.\nAfter setting up the environment and writing your codes, to facilitate integration with the SkyWalking project, you\u0026rsquo;ll need to run tests locally to verify that your codes would not break any existing features, as well as write some unit test (UT) codes to verify that the new codes would work well. This will prevent them from being broken by future contributors. If the new codes involve other components or libraries, you should also write integration tests (IT).\nSkyWalking leverages the plugin maven-surefire-plugin to run the UTs and uses maven-failsafe-plugin to run the ITs. maven-surefire-plugin excludes ITs (whose class name starts or ends with *IT, IT*) and leaves them for maven-failsafe-plugin to run, which is bound to the integration-test goal. Therefore, to run the UTs, try ./mvnw clean test, which only runs the UTs but not the ITs.\nIf you would like to run the ITs, please run ./mvnw integration-test as well as the profiles of the modules whose ITs you want to run. If you don\u0026rsquo;t want to run UTs, please add -DskipUTs=true. E.g. if you would like to only run the ITs in oap-server, try ./mvnw -Pbackend clean verify -DskipUTs=true, and if you would like to run all the ITs, simply run ./mvnw clean integration-test -DskipUTs=true.\nPlease be advised that if you\u0026rsquo;re writing integration tests, name it with the pattern IT* or *IT so they would only run in goal integration-test.\n","title":"Integration Tests","url":"/docs/main/v9.7.0/en/guides/it-guide/"},{"content":"Introduction to UI The SkyWalking official UI provides the default and powerful visualization capabilities for SkyWalking to observe full-stack applications.\nThe left side menu lists all available supported stacks with default dashboards.\nFollow the Official Dashboards menu to explore all default dashboards on their ways to monitor different tech stacks.\nSidebar Menu and Marketplace All available feature menu items are only listed in the marketplace(since 9.6.0). They are only visible on the Sidebar Menu when there are relative services being observed by various supported observation agents, such as installed language agents, service mesh platform, OTEL integration.\nThe menu items defined in ui-initialized-templates/menu.yaml are the universal marketplace for all default-supported integration. The menu definition supports one and two levels items. The leaf menu item should have the layer for navigation.\nmenus:- name:GeneralServiceicon:general_servicemenus:- name:Serviceslayer:GENERAL- name:VisualDatabaselayer:VIRTUAL_DATABASE- name:VisualCachelayer:VIRTUAL_CACHE- name:VisualMQlayer:VIRTUAL_MQ....- name:SelfObservabilityicon:self_observabilitymenus:- name:SkyWalkingServerlayer:SO11Y_OAP- name:Satellitelayer:SO11Y_SATELLITEThe menu items would automatically pop up on the left after short period of time that at least one service was observed. For more details, please refer to the \u0026ldquo;uiMenuRefreshInterval\u0026rdquo; configuration item in the backend settings\nCustom Dashboard Besides official dashboards, Dashboards provide customization capabilities to end-users to add new tabs/pages/widgets, and flexibility to re-config the dashboard on your own preference.\nThe dashboard has two key attributes, Layer and Entity Type. Learn these two concepts first before you begin any customization. Also, trace, metrics, and log analysis are relative to OAL, MAL, and LAL engines in the SkyWalking kernel. It would help if you learned them first, too.\nService and All entity type dashboard could be set as root(set this to root), which means this dashboard would be used as the entrance of its Layer. If you have multiple root dashboards, UI will choose one randomly (We don\u0026rsquo;t recommend doing so).\nNotice, dashboard editable is disabled on release; set system env(SW_ENABLE_UPDATE_UI_TEMPLATE=true) to activate them. Before you save the edited dashboard, it is just stored in memory. Closing a tab would LOSE the change permanently.\nA new dashboard should be added through New Dashboard in the Dashboards menu. Meanwhile, there are two ways to edit an existing dashboard.\n Dashboard List in the Dashboard menu provides edit/delete/set-as-root features to manage existing dashboards. In every dashboard page, click the right top V toggle, and turn to E(representing Edit) mode.  Widget A dashboard consists of various widget. In the Edit mode, widgets could be added/moved/removed/edit according to the Layer.(Every widget declares its suitable layer.)\nThe widget provides the ability to visualize the metrics, generated through OAL, MAL, or LAL scripts.\nMetrics To display one or more metrics in a graph, the following information is required:\n Name: The name of the metric. Data Type: The way of reading the metrics data according to various metric types. Visualization: The graph options to visualize the metric. Each data type has its own matched graph options. See the mapping doc for more details. Unit: The unit of the metrics data. Calculation: The calculation formula for the metric. The available formulas are here.  Common Graphs    Metrics Data Type Visualization Demo     read all values in the duration Line    get sorted top N values Top List    read all values of labels in the duration Table    read all values in the duration Area    read all values in the duration Service/Instance/Endpoint List    read sampled records in the duration Records List     Calculations    Label Calculation     Percentage Value / 100   Apdex Value / 10000   Average Sum of values / Count of values   Percentage + Avg-preview Sum of values / Count of values / 100   Apdex + Avg-preview Sum of values / Count of values / 10000   Byte to KB Value / 1024   Byte to MB Value / 1024 / 1024   Byte to GB Value / 1024 / 1024 / 1024   Seconds to YYYY-MM-DD HH:mm:ss dayjs(value * 1000).format(\u0026ldquo;YYYY-MM-DD HH:mm:ss\u0026rdquo;)   Milliseconds to YYYY-MM-DD HH:mm:ss dayjs(value).format(\u0026ldquo;YYYY-MM-DD HH:mm:ss\u0026rdquo;)   Precision Value.toFixed(2)   Milliseconds to seconds Value / 1000   Seconds to days Value / 86400    Graph styles Graph advanced style options.\nWidget options Define the following properties of the widget:\n Name: The name of the widget, which used to associate with other widget in the dashboard. Title: The title name of the widget. Tooltip Content: Additional explanation of the widget.  Association Options Widget provides the ability to associate with other widgets to show axis pointer with tips for the same time point, in order to help users to understand the connectivity among metrics.\nWidget Static Link On the right top of every widget on the dashboard, there is a Generate Link option, which could generate a static link to represent this widget. By using this link, users could share this widget, or integrate it into any 3rd party iFrame to build a network operations center(NOC) dashboard on the wall easily. About this link, there are several customizable options\n Lock Query Duration. Set the query duration manually. It is OFF by default. Auto Fresh option is ON with 6s query period and last 30 mins time range. Query period and range are customizable.  Settings Settings provide language, server time zone, and auto-fresh options. These settings are stored in the browser\u0026rsquo;s local storage. Unless you clear them manually, those will not change.\nFAQ Login and Authentication SkyWalking doesn\u0026rsquo;t provide login and authentication as usual for years. If you need, a lot of Gateway solutions have provides well-established solutions, such as the Nginx ecosystem.\n","title":"Introduction to UI","url":"/docs/main/latest/en/ui/readme/"},{"content":"Introduction to UI The SkyWalking official UI provides the default and powerful visualization capabilities for SkyWalking to observe full-stack applications.\nThe left side menu lists all available supported stacks with default dashboards.\nFollow the Official Dashboards menu to explore all default dashboards on their ways to monitor different tech stacks.\nSidebar Menu and Marketplace All available feature menu items are only listed in the marketplace(since 9.6.0). They are only visible on the Sidebar Menu when there are relative services being observed by various supported observation agents, such as installed language agents, service mesh platform, OTEL integration.\nThe menu items defined in ui-initialized-templates/menu.yaml are the universal marketplace for all default-supported integration. The menu definition supports one and two levels items. The leaf menu item should have the layer for navigation.\nmenus:- name:GeneralServiceicon:general_servicemenus:- name:Serviceslayer:GENERAL- name:VisualDatabaselayer:VIRTUAL_DATABASE- name:VisualCachelayer:VIRTUAL_CACHE- name:VisualMQlayer:VIRTUAL_MQ....- name:SelfObservabilityicon:self_observabilitymenus:- name:SkyWalkingServerlayer:SO11Y_OAP- name:Satellitelayer:SO11Y_SATELLITEThe menu items would automatically pop up on the left after short period of time that at least one service was observed. For more details, please refer to the \u0026ldquo;uiMenuRefreshInterval\u0026rdquo; configuration item in the backend settings\nCustom Dashboard Besides official dashboards, Dashboards provide customization capabilities to end-users to add new tabs/pages/widgets, and flexibility to re-config the dashboard on your own preference.\nThe dashboard has two key attributes, Layer and Entity Type. Learn these two concepts first before you begin any customization. Also, trace, metrics, and log analysis are relative to OAL, MAL, and LAL engines in the SkyWalking kernel. It would help if you learned them first, too.\nService and All entity type dashboard could be set as root(set this to root), which means this dashboard would be used as the entrance of its Layer. If you have multiple root dashboards, UI will choose one randomly (We don\u0026rsquo;t recommend doing so).\nNotice, dashboard editable is disabled on release; set system env(SW_ENABLE_UPDATE_UI_TEMPLATE=true) to activate them. Before you save the edited dashboard, it is just stored in memory. Closing a tab would LOSE the change permanently.\nA new dashboard should be added through New Dashboard in the Dashboards menu. Meanwhile, there are two ways to edit an existing dashboard.\n Dashboard List in the Dashboard menu provides edit/delete/set-as-root features to manage existing dashboards. In every dashboard page, click the right top V toggle, and turn to E(representing Edit) mode.  Widget A dashboard consists of various widget. In the Edit mode, widgets could be added/moved/removed/edit according to the Layer.(Every widget declares its suitable layer.)\nThe widget provides the ability to visualize the metrics, generated through OAL, MAL, or LAL scripts.\nMetrics To display one or more metrics in a graph, the following information is required:\n Name: The name of the metric. Data Type: The way of reading the metrics data according to various metric types. Visualization: The graph options to visualize the metric. Each data type has its own matched graph options. See the mapping doc for more details. Unit: The unit of the metrics data. Calculation: The calculation formula for the metric. The available formulas are here.  Common Graphs    Metrics Data Type Visualization Demo     read all values in the duration Line    get sorted top N values Top List    read all values of labels in the duration Table    read all values in the duration Area    read all values in the duration Service/Instance/Endpoint List    read sampled records in the duration Records List     Calculations    Label Calculation     Percentage Value / 100   Apdex Value / 10000   Average Sum of values / Count of values   Percentage + Avg-preview Sum of values / Count of values / 100   Apdex + Avg-preview Sum of values / Count of values / 10000   Byte to KB Value / 1024   Byte to MB Value / 1024 / 1024   Byte to GB Value / 1024 / 1024 / 1024   Seconds to YYYY-MM-DD HH:mm:ss dayjs(value * 1000).format(\u0026ldquo;YYYY-MM-DD HH:mm:ss\u0026rdquo;)   Milliseconds to YYYY-MM-DD HH:mm:ss dayjs(value).format(\u0026ldquo;YYYY-MM-DD HH:mm:ss\u0026rdquo;)   Precision Value.toFixed(2)   Milliseconds to seconds Value / 1000   Seconds to days Value / 86400    Graph styles Graph advanced style options.\nWidget options Define the following properties of the widget:\n Name: The name of the widget, which used to associate with other widget in the dashboard. Title: The title name of the widget. Tooltip Content: Additional explanation of the widget.  Association Options Widget provides the ability to associate with other widgets to show axis pointer with tips for the same time point, in order to help users to understand the connectivity among metrics.\nWidget Static Link On the right top of every widget on the dashboard, there is a Generate Link option, which could generate a static link to represent this widget. By using this link, users could share this widget, or integrate it into any 3rd party iFrame to build a network operations center(NOC) dashboard on the wall easily. About this link, there are several customizable options\n Lock Query Duration. Set the query duration manually. It is OFF by default. Auto Fresh option is ON with 6s query period and last 30 mins time range. Query period and range are customizable.  Settings Settings provide language, server time zone, and auto-fresh options. These settings are stored in the browser\u0026rsquo;s local storage. Unless you clear them manually, those will not change.\nFAQ Login and Authentication SkyWalking doesn\u0026rsquo;t provide login and authentication as usual for years. If you need, a lot of Gateway solutions have provides well-established solutions, such as the Nginx ecosystem.\n","title":"Introduction to UI","url":"/docs/main/next/en/ui/readme/"},{"content":"Introduction to UI The SkyWalking official UI provides the default and powerful visualization capabilities for SkyWalking to observe full-stack application.\nThe left side menu lists all available supported stack, with default dashboards.\nFollow Official Dashboards menu explores all default dashboards about how to monitor different tech stacks.\nCustom Dashboard Besides, official dashboards, Dashboards provides customization to end users to add new tabs/pages/widgets, and flexibility to re-config the dashboard on your own preference.\nThe dashboard has two key attributes, Layer and Entity Type. Learn these two concepts first before you begin any customization. Also, trace, metrics, log analysis are relative to OAL, MAL, and LAL engines in SkyWalking kernel. You should learn them first too.\nService and All entity type dashboard could be set as root(set this to root), which mean this dashboard would be used as the entrance of its layer. If you have multiple root dashboards, UI could choose one randomly(Don\u0026rsquo;t recommend doing so).\nNotice, dashboard editable is disabled on release, set system env(SW_ENABLE_UPDATE_UI_TEMPLATE=true) to activate them. Before you save the edited dashboard, it is just stored in memory, closing tab would LOSE the change permanently.\nSettings Settings provide language, server time zone, and auto-fresh option. These settings are stored in browser local storage. Unless you clear them manually, those would not change.\nFAQ Login and Authentication SkyWalking doesn\u0026rsquo;t provide login and authentication as usual for years. If you need, a lot of Gateway solutions have provides well-established solutions, such as Nginx ecosystem.\n","title":"Introduction to UI","url":"/docs/main/v9.0.0/en/ui/readme/"},{"content":"Introduction to UI The SkyWalking official UI provides the default and powerful visualization capabilities for SkyWalking to observe full-stack applications.\nThe left side menu lists all available supported stacks with default dashboards.\nFollow the Official Dashboards menu to explore all default dashboards on their ways to monitor different tech stacks.\nCustom Dashboard Besides official dashboards, Dashboards provide customization capabilities to end-users to add new tabs/pages/widgets, and flexibility to re-config the dashboard on your own preference.\nThe dashboard has two key attributes, Layer and Entity Type. Learn these two concepts first before you begin any customization. Also, trace, metrics, and log analysis are relative to OAL, MAL, and LAL engines in the SkyWalking kernel. It would help if you learned them first, too.\nService and All entity type dashboard could be set as root(set this to root), which means this dashboard would be used as the entrance of its Layer. If you have multiple root dashboards, UI will choose one randomly (We don\u0026rsquo;t recommend doing so).\nNotice, dashboard editable is disabled on release; set system env(SW_ENABLE_UPDATE_UI_TEMPLATE=true) to activate them. Before you save the edited dashboard, it is just stored in memory. Closing a tab would LOSE the change permanently.\nSettings Settings provide language, server time zone, and auto-fresh options. These settings are stored in the browser\u0026rsquo;s local storage. Unless you clear them manually, those will not change.\nFAQ Login and Authentication SkyWalking doesn\u0026rsquo;t provide login and authentication as usual for years. If you need, a lot of Gateway solutions have provides well-established solutions, such as the Nginx ecosystem.\n","title":"Introduction to UI","url":"/docs/main/v9.1.0/en/ui/readme/"},{"content":"Introduction to UI The SkyWalking official UI provides the default and powerful visualization capabilities for SkyWalking to observe full-stack applications.\nThe left side menu lists all available supported stacks with default dashboards.\nFollow the Official Dashboards menu to explore all default dashboards on their ways to monitor different tech stacks.\nCustom Dashboard Besides official dashboards, Dashboards provide customization capabilities to end-users to add new tabs/pages/widgets, and flexibility to re-config the dashboard on your own preference.\nThe dashboard has two key attributes, Layer and Entity Type. Learn these two concepts first before you begin any customization. Also, trace, metrics, and log analysis are relative to OAL, MAL, and LAL engines in the SkyWalking kernel. It would help if you learned them first, too.\nService and All entity type dashboard could be set as root(set this to root), which means this dashboard would be used as the entrance of its Layer. If you have multiple root dashboards, UI will choose one randomly (We don\u0026rsquo;t recommend doing so).\nNotice, dashboard editable is disabled on release; set system env(SW_ENABLE_UPDATE_UI_TEMPLATE=true) to activate them. Before you save the edited dashboard, it is just stored in memory. Closing a tab would LOSE the change permanently.\nWidget The widget provides the ability to visualize the metrics, generated through OAL, MAL, or LAL scripts.\nMetrics To display one or more metrics in a graph, the following information is required:\n Name: The name of the metric. Data Type: The way of reading the metrics data according to various metric types. Visualization: The graph options to visualize the metric. Each data type has its own matched graph options. See the mapping doc for more details. Unit: The unit of the metrics data. Calculation: The calculation formula for the metric. The available formulas are here.  Common Graphs    Metrics Data Type Visualization Demo     read all values in the duration Line    get sorted top N values Top List    read all values of labels in the duration Table    read all values in the duration Area    read all values in the duration Service/Instance/Endpoint List     Calculations    Label Calculation     Percentage Value / 100   Apdex Value / 10000   Average Sum of values / Count of values   Percentage + Avg-preview Sum of values / Count of values / 100   Apdex + Avg-preview Sum of values / Count of values / 10000   Byte to KB Value / 1024   Byte to MB Value / 1024 / 1024   Byte to GB Value / 1024 / 1024 / 1024   Seconds to YYYY-MM-DD HH:mm:ss dayjs(value * 1000).format(\u0026ldquo;YYYY-MM-DD HH:mm:ss\u0026rdquo;)   Milliseconds to YYYY-MM-DD HH:mm:ss dayjs(value).format(\u0026ldquo;YYYY-MM-DD HH:mm:ss\u0026rdquo;)   Precision Value.toFixed(2)   Milliseconds to seconds Value / 1000   Seconds to days Value / 86400    Graph styles Graph advanced style options.\nWidget options Define the following properties of the widget:\n Name: The name of the widget, which used to associate with other widget in the dashboard. Title: The title name of the widget. Tooltip Content: Additional explanation of the widget.  Association Options Widget provides the ability to associate with other widgets to show axis pointer with tips for the same time point, in order to help users to understand the connectivity among metrics.\nSettings Settings provide language, server time zone, and auto-fresh options. These settings are stored in the browser\u0026rsquo;s local storage. Unless you clear them manually, those will not change.\nFAQ Login and Authentication SkyWalking doesn\u0026rsquo;t provide login and authentication as usual for years. If you need, a lot of Gateway solutions have provides well-established solutions, such as the Nginx ecosystem.\n","title":"Introduction to UI","url":"/docs/main/v9.2.0/en/ui/readme/"},{"content":"Introduction to UI The SkyWalking official UI provides the default and powerful visualization capabilities for SkyWalking to observe full-stack applications.\nThe left side menu lists all available supported stacks with default dashboards.\nFollow the Official Dashboards menu to explore all default dashboards on their ways to monitor different tech stacks.\nCustom Dashboard Besides official dashboards, Dashboards provide customization capabilities to end-users to add new tabs/pages/widgets, and flexibility to re-config the dashboard on your own preference.\nThe dashboard has two key attributes, Layer and Entity Type. Learn these two concepts first before you begin any customization. Also, trace, metrics, and log analysis are relative to OAL, MAL, and LAL engines in the SkyWalking kernel. It would help if you learned them first, too.\nService and All entity type dashboard could be set as root(set this to root), which means this dashboard would be used as the entrance of its Layer. If you have multiple root dashboards, UI will choose one randomly (We don\u0026rsquo;t recommend doing so).\nNotice, dashboard editable is disabled on release; set system env(SW_ENABLE_UPDATE_UI_TEMPLATE=true) to activate them. Before you save the edited dashboard, it is just stored in memory. Closing a tab would LOSE the change permanently.\nA new dashboard should be added through New Dashboard in the Dashboards menu. Meanwhile, there are two ways to edit an existing dashboard.\n Dashboard List in the Dashboard menu provides edit/delete/set-as-root features to manage existing dashboards. In every dashboard page, click the right top V toggle, and turn to E(representing Edit) mode.  Widget A dashboard consists of various widget. In the Edit mode, widgets could be added/moved/removed/edit according to the Layer.(Every widget declares its suitable layer.)\nThe widget provides the ability to visualize the metrics, generated through OAL, MAL, or LAL scripts.\nMetrics To display one or more metrics in a graph, the following information is required:\n Name: The name of the metric. Data Type: The way of reading the metrics data according to various metric types. Visualization: The graph options to visualize the metric. Each data type has its own matched graph options. See the mapping doc for more details. Unit: The unit of the metrics data. Calculation: The calculation formula for the metric. The available formulas are here.  Common Graphs    Metrics Data Type Visualization Demo     read all values in the duration Line    get sorted top N values Top List    read all values of labels in the duration Table    read all values in the duration Area    read all values in the duration Service/Instance/Endpoint List    read sampled records in the duration Records List     Calculations    Label Calculation     Percentage Value / 100   Apdex Value / 10000   Average Sum of values / Count of values   Percentage + Avg-preview Sum of values / Count of values / 100   Apdex + Avg-preview Sum of values / Count of values / 10000   Byte to KB Value / 1024   Byte to MB Value / 1024 / 1024   Byte to GB Value / 1024 / 1024 / 1024   Seconds to YYYY-MM-DD HH:mm:ss dayjs(value * 1000).format(\u0026ldquo;YYYY-MM-DD HH:mm:ss\u0026rdquo;)   Milliseconds to YYYY-MM-DD HH:mm:ss dayjs(value).format(\u0026ldquo;YYYY-MM-DD HH:mm:ss\u0026rdquo;)   Precision Value.toFixed(2)   Milliseconds to seconds Value / 1000   Seconds to days Value / 86400    Graph styles Graph advanced style options.\nWidget options Define the following properties of the widget:\n Name: The name of the widget, which used to associate with other widget in the dashboard. Title: The title name of the widget. Tooltip Content: Additional explanation of the widget.  Association Options Widget provides the ability to associate with other widgets to show axis pointer with tips for the same time point, in order to help users to understand the connectivity among metrics.\nSettings Settings provide language, server time zone, and auto-fresh options. These settings are stored in the browser\u0026rsquo;s local storage. Unless you clear them manually, those will not change.\nFAQ Login and Authentication SkyWalking doesn\u0026rsquo;t provide login and authentication as usual for years. If you need, a lot of Gateway solutions have provides well-established solutions, such as the Nginx ecosystem.\n","title":"Introduction to UI","url":"/docs/main/v9.3.0/en/ui/readme/"},{"content":"Introduction to UI The SkyWalking official UI provides the default and powerful visualization capabilities for SkyWalking to observe full-stack applications.\nThe left side menu lists all available supported stacks with default dashboards.\nFollow the Official Dashboards menu to explore all default dashboards on their ways to monitor different tech stacks.\nCustom Dashboard Besides official dashboards, Dashboards provide customization capabilities to end-users to add new tabs/pages/widgets, and flexibility to re-config the dashboard on your own preference.\nThe dashboard has two key attributes, Layer and Entity Type. Learn these two concepts first before you begin any customization. Also, trace, metrics, and log analysis are relative to OAL, MAL, and LAL engines in the SkyWalking kernel. It would help if you learned them first, too.\nService and All entity type dashboard could be set as root(set this to root), which means this dashboard would be used as the entrance of its Layer. If you have multiple root dashboards, UI will choose one randomly (We don\u0026rsquo;t recommend doing so).\nNotice, dashboard editable is disabled on release; set system env(SW_ENABLE_UPDATE_UI_TEMPLATE=true) to activate them. Before you save the edited dashboard, it is just stored in memory. Closing a tab would LOSE the change permanently.\nA new dashboard should be added through New Dashboard in the Dashboards menu. Meanwhile, there are two ways to edit an existing dashboard.\n Dashboard List in the Dashboard menu provides edit/delete/set-as-root features to manage existing dashboards. In every dashboard page, click the right top V toggle, and turn to E(representing Edit) mode.  Widget A dashboard consists of various widget. In the Edit mode, widgets could be added/moved/removed/edit according to the Layer.(Every widget declares its suitable layer.)\nThe widget provides the ability to visualize the metrics, generated through OAL, MAL, or LAL scripts.\nMetrics To display one or more metrics in a graph, the following information is required:\n Name: The name of the metric. Data Type: The way of reading the metrics data according to various metric types. Visualization: The graph options to visualize the metric. Each data type has its own matched graph options. See the mapping doc for more details. Unit: The unit of the metrics data. Calculation: The calculation formula for the metric. The available formulas are here.  Common Graphs    Metrics Data Type Visualization Demo     read all values in the duration Line    get sorted top N values Top List    read all values of labels in the duration Table    read all values in the duration Area    read all values in the duration Service/Instance/Endpoint List    read sampled records in the duration Records List     Calculations    Label Calculation     Percentage Value / 100   Apdex Value / 10000   Average Sum of values / Count of values   Percentage + Avg-preview Sum of values / Count of values / 100   Apdex + Avg-preview Sum of values / Count of values / 10000   Byte to KB Value / 1024   Byte to MB Value / 1024 / 1024   Byte to GB Value / 1024 / 1024 / 1024   Seconds to YYYY-MM-DD HH:mm:ss dayjs(value * 1000).format(\u0026ldquo;YYYY-MM-DD HH:mm:ss\u0026rdquo;)   Milliseconds to YYYY-MM-DD HH:mm:ss dayjs(value).format(\u0026ldquo;YYYY-MM-DD HH:mm:ss\u0026rdquo;)   Precision Value.toFixed(2)   Milliseconds to seconds Value / 1000   Seconds to days Value / 86400    Graph styles Graph advanced style options.\nWidget options Define the following properties of the widget:\n Name: The name of the widget, which used to associate with other widget in the dashboard. Title: The title name of the widget. Tooltip Content: Additional explanation of the widget.  Association Options Widget provides the ability to associate with other widgets to show axis pointer with tips for the same time point, in order to help users to understand the connectivity among metrics.\nWidget Static Link On the right top of every widget on the dashboard, there is a Generate Link option, which could generate a static link to represent this widget. By using this link, users could share this widget, or integrate it into any 3rd party iFrame to build a network operations center(NOC) dashboard on the wall easily. About this link, there are several customizable options\n Lock Query Duration. Set the query duration manually. It is OFF by default. Auto Fresh option is ON with 6s query period and last 30 mins time range. Query period and range are customizable.  Settings Settings provide language, server time zone, and auto-fresh options. These settings are stored in the browser\u0026rsquo;s local storage. Unless you clear them manually, those will not change.\nFAQ Login and Authentication SkyWalking doesn\u0026rsquo;t provide login and authentication as usual for years. If you need, a lot of Gateway solutions have provides well-established solutions, such as the Nginx ecosystem.\n","title":"Introduction to UI","url":"/docs/main/v9.4.0/en/ui/readme/"},{"content":"Introduction to UI The SkyWalking official UI provides the default and powerful visualization capabilities for SkyWalking to observe full-stack applications.\nThe left side menu lists all available supported stacks with default dashboards.\nFollow the Official Dashboards menu to explore all default dashboards on their ways to monitor different tech stacks.\nCustom Dashboard Besides official dashboards, Dashboards provide customization capabilities to end-users to add new tabs/pages/widgets, and flexibility to re-config the dashboard on your own preference.\nThe dashboard has two key attributes, Layer and Entity Type. Learn these two concepts first before you begin any customization. Also, trace, metrics, and log analysis are relative to OAL, MAL, and LAL engines in the SkyWalking kernel. It would help if you learned them first, too.\nService and All entity type dashboard could be set as root(set this to root), which means this dashboard would be used as the entrance of its Layer. If you have multiple root dashboards, UI will choose one randomly (We don\u0026rsquo;t recommend doing so).\nNotice, dashboard editable is disabled on release; set system env(SW_ENABLE_UPDATE_UI_TEMPLATE=true) to activate them. Before you save the edited dashboard, it is just stored in memory. Closing a tab would LOSE the change permanently.\nA new dashboard should be added through New Dashboard in the Dashboards menu. Meanwhile, there are two ways to edit an existing dashboard.\n Dashboard List in the Dashboard menu provides edit/delete/set-as-root features to manage existing dashboards. In every dashboard page, click the right top V toggle, and turn to E(representing Edit) mode.  Widget A dashboard consists of various widget. In the Edit mode, widgets could be added/moved/removed/edit according to the Layer.(Every widget declares its suitable layer.)\nThe widget provides the ability to visualize the metrics, generated through OAL, MAL, or LAL scripts.\nMetrics To display one or more metrics in a graph, the following information is required:\n Name: The name of the metric. Data Type: The way of reading the metrics data according to various metric types. Visualization: The graph options to visualize the metric. Each data type has its own matched graph options. See the mapping doc for more details. Unit: The unit of the metrics data. Calculation: The calculation formula for the metric. The available formulas are here.  Common Graphs    Metrics Data Type Visualization Demo     read all values in the duration Line    get sorted top N values Top List    read all values of labels in the duration Table    read all values in the duration Area    read all values in the duration Service/Instance/Endpoint List    read sampled records in the duration Records List     Calculations    Label Calculation     Percentage Value / 100   Apdex Value / 10000   Average Sum of values / Count of values   Percentage + Avg-preview Sum of values / Count of values / 100   Apdex + Avg-preview Sum of values / Count of values / 10000   Byte to KB Value / 1024   Byte to MB Value / 1024 / 1024   Byte to GB Value / 1024 / 1024 / 1024   Seconds to YYYY-MM-DD HH:mm:ss dayjs(value * 1000).format(\u0026ldquo;YYYY-MM-DD HH:mm:ss\u0026rdquo;)   Milliseconds to YYYY-MM-DD HH:mm:ss dayjs(value).format(\u0026ldquo;YYYY-MM-DD HH:mm:ss\u0026rdquo;)   Precision Value.toFixed(2)   Milliseconds to seconds Value / 1000   Seconds to days Value / 86400    Graph styles Graph advanced style options.\nWidget options Define the following properties of the widget:\n Name: The name of the widget, which used to associate with other widget in the dashboard. Title: The title name of the widget. Tooltip Content: Additional explanation of the widget.  Association Options Widget provides the ability to associate with other widgets to show axis pointer with tips for the same time point, in order to help users to understand the connectivity among metrics.\nWidget Static Link On the right top of every widget on the dashboard, there is a Generate Link option, which could generate a static link to represent this widget. By using this link, users could share this widget, or integrate it into any 3rd party iFrame to build a network operations center(NOC) dashboard on the wall easily. About this link, there are several customizable options\n Lock Query Duration. Set the query duration manually. It is OFF by default. Auto Fresh option is ON with 6s query period and last 30 mins time range. Query period and range are customizable.  Settings Settings provide language, server time zone, and auto-fresh options. These settings are stored in the browser\u0026rsquo;s local storage. Unless you clear them manually, those will not change.\nFAQ Login and Authentication SkyWalking doesn\u0026rsquo;t provide login and authentication as usual for years. If you need, a lot of Gateway solutions have provides well-established solutions, such as the Nginx ecosystem.\n","title":"Introduction to UI","url":"/docs/main/v9.5.0/en/ui/readme/"},{"content":"Introduction to UI The SkyWalking official UI provides the default and powerful visualization capabilities for SkyWalking to observe full-stack applications.\nThe left side menu lists all available supported stacks with default dashboards.\nFollow the Official Dashboards menu to explore all default dashboards on their ways to monitor different tech stacks.\nSidebar Menu and Marketplace All available feature menu items are only listed in the marketplace(since 9.6.0). They are only visible on the Sidebar Menu when there are relative services being observed by various supported observation agents, such as installed language agents, service mesh platform, OTEL integration.\nThe menu items defined in ui-initialized-templates/menu.yaml are the universal marketplace for all default-supported integration. The menu definition supports one and two levels items. The leaf menu item should have the layer for navigation.\nmenus:- name:GeneralServiceicon:general_servicemenus:- name:Serviceslayer:GENERAL- name:VisualDatabaselayer:VIRTUAL_DATABASE- name:VisualCachelayer:VIRTUAL_CACHE- name:VisualMQlayer:VIRTUAL_MQ....- name:SelfObservabilityicon:self_observabilitymenus:- name:SkyWalkingServerlayer:SO11Y_OAP- name:Satellitelayer:SO11Y_SATELLITEThe menu items would automatically pop up on the left after short period of time that at least one service was observed. For more details, please refer to the \u0026ldquo;uiMenuRefreshInterval\u0026rdquo; configuration item in the backend settings\nCustom Dashboard Besides official dashboards, Dashboards provide customization capabilities to end-users to add new tabs/pages/widgets, and flexibility to re-config the dashboard on your own preference.\nThe dashboard has two key attributes, Layer and Entity Type. Learn these two concepts first before you begin any customization. Also, trace, metrics, and log analysis are relative to OAL, MAL, and LAL engines in the SkyWalking kernel. It would help if you learned them first, too.\nService and All entity type dashboard could be set as root(set this to root), which means this dashboard would be used as the entrance of its Layer. If you have multiple root dashboards, UI will choose one randomly (We don\u0026rsquo;t recommend doing so).\nNotice, dashboard editable is disabled on release; set system env(SW_ENABLE_UPDATE_UI_TEMPLATE=true) to activate them. Before you save the edited dashboard, it is just stored in memory. Closing a tab would LOSE the change permanently.\nA new dashboard should be added through New Dashboard in the Dashboards menu. Meanwhile, there are two ways to edit an existing dashboard.\n Dashboard List in the Dashboard menu provides edit/delete/set-as-root features to manage existing dashboards. In every dashboard page, click the right top V toggle, and turn to E(representing Edit) mode.  Widget A dashboard consists of various widget. In the Edit mode, widgets could be added/moved/removed/edit according to the Layer.(Every widget declares its suitable layer.)\nThe widget provides the ability to visualize the metrics, generated through OAL, MAL, or LAL scripts.\nMetrics To display one or more metrics in a graph, the following information is required:\n Name: The name of the metric. Data Type: The way of reading the metrics data according to various metric types. Visualization: The graph options to visualize the metric. Each data type has its own matched graph options. See the mapping doc for more details. Unit: The unit of the metrics data. Calculation: The calculation formula for the metric. The available formulas are here.  Common Graphs    Metrics Data Type Visualization Demo     read all values in the duration Line    get sorted top N values Top List    read all values of labels in the duration Table    read all values in the duration Area    read all values in the duration Service/Instance/Endpoint List    read sampled records in the duration Records List     Calculations    Label Calculation     Percentage Value / 100   Apdex Value / 10000   Average Sum of values / Count of values   Percentage + Avg-preview Sum of values / Count of values / 100   Apdex + Avg-preview Sum of values / Count of values / 10000   Byte to KB Value / 1024   Byte to MB Value / 1024 / 1024   Byte to GB Value / 1024 / 1024 / 1024   Seconds to YYYY-MM-DD HH:mm:ss dayjs(value * 1000).format(\u0026ldquo;YYYY-MM-DD HH:mm:ss\u0026rdquo;)   Milliseconds to YYYY-MM-DD HH:mm:ss dayjs(value).format(\u0026ldquo;YYYY-MM-DD HH:mm:ss\u0026rdquo;)   Precision Value.toFixed(2)   Milliseconds to seconds Value / 1000   Seconds to days Value / 86400    Graph styles Graph advanced style options.\nWidget options Define the following properties of the widget:\n Name: The name of the widget, which used to associate with other widget in the dashboard. Title: The title name of the widget. Tooltip Content: Additional explanation of the widget.  Association Options Widget provides the ability to associate with other widgets to show axis pointer with tips for the same time point, in order to help users to understand the connectivity among metrics.\nWidget Static Link On the right top of every widget on the dashboard, there is a Generate Link option, which could generate a static link to represent this widget. By using this link, users could share this widget, or integrate it into any 3rd party iFrame to build a network operations center(NOC) dashboard on the wall easily. About this link, there are several customizable options\n Lock Query Duration. Set the query duration manually. It is OFF by default. Auto Fresh option is ON with 6s query period and last 30 mins time range. Query period and range are customizable.  Settings Settings provide language, server time zone, and auto-fresh options. These settings are stored in the browser\u0026rsquo;s local storage. Unless you clear them manually, those will not change.\nFAQ Login and Authentication SkyWalking doesn\u0026rsquo;t provide login and authentication as usual for years. If you need, a lot of Gateway solutions have provides well-established solutions, such as the Nginx ecosystem.\n","title":"Introduction to UI","url":"/docs/main/v9.6.0/en/ui/readme/"},{"content":"Introduction to UI The SkyWalking official UI provides the default and powerful visualization capabilities for SkyWalking to observe full-stack applications.\nThe left side menu lists all available supported stacks with default dashboards.\nFollow the Official Dashboards menu to explore all default dashboards on their ways to monitor different tech stacks.\nSidebar Menu and Marketplace All available feature menu items are only listed in the marketplace(since 9.6.0). They are only visible on the Sidebar Menu when there are relative services being observed by various supported observation agents, such as installed language agents, service mesh platform, OTEL integration.\nThe menu items defined in ui-initialized-templates/menu.yaml are the universal marketplace for all default-supported integration. The menu definition supports one and two levels items. The leaf menu item should have the layer for navigation.\nmenus:- name:GeneralServiceicon:general_servicemenus:- name:Serviceslayer:GENERAL- name:VisualDatabaselayer:VIRTUAL_DATABASE- name:VisualCachelayer:VIRTUAL_CACHE- name:VisualMQlayer:VIRTUAL_MQ....- name:SelfObservabilityicon:self_observabilitymenus:- name:SkyWalkingServerlayer:SO11Y_OAP- name:Satellitelayer:SO11Y_SATELLITEThe menu items would automatically pop up on the left after short period of time that at least one service was observed. For more details, please refer to the \u0026ldquo;uiMenuRefreshInterval\u0026rdquo; configuration item in the backend settings\nCustom Dashboard Besides official dashboards, Dashboards provide customization capabilities to end-users to add new tabs/pages/widgets, and flexibility to re-config the dashboard on your own preference.\nThe dashboard has two key attributes, Layer and Entity Type. Learn these two concepts first before you begin any customization. Also, trace, metrics, and log analysis are relative to OAL, MAL, and LAL engines in the SkyWalking kernel. It would help if you learned them first, too.\nService and All entity type dashboard could be set as root(set this to root), which means this dashboard would be used as the entrance of its Layer. If you have multiple root dashboards, UI will choose one randomly (We don\u0026rsquo;t recommend doing so).\nNotice, dashboard editable is disabled on release; set system env(SW_ENABLE_UPDATE_UI_TEMPLATE=true) to activate them. Before you save the edited dashboard, it is just stored in memory. Closing a tab would LOSE the change permanently.\nA new dashboard should be added through New Dashboard in the Dashboards menu. Meanwhile, there are two ways to edit an existing dashboard.\n Dashboard List in the Dashboard menu provides edit/delete/set-as-root features to manage existing dashboards. In every dashboard page, click the right top V toggle, and turn to E(representing Edit) mode.  Widget A dashboard consists of various widget. In the Edit mode, widgets could be added/moved/removed/edit according to the Layer.(Every widget declares its suitable layer.)\nThe widget provides the ability to visualize the metrics, generated through OAL, MAL, or LAL scripts.\nMetrics To display one or more metrics in a graph, the following information is required:\n Name: The name of the metric. Data Type: The way of reading the metrics data according to various metric types. Visualization: The graph options to visualize the metric. Each data type has its own matched graph options. See the mapping doc for more details. Unit: The unit of the metrics data. Calculation: The calculation formula for the metric. The available formulas are here.  Common Graphs    Metrics Data Type Visualization Demo     read all values in the duration Line    get sorted top N values Top List    read all values of labels in the duration Table    read all values in the duration Area    read all values in the duration Service/Instance/Endpoint List    read sampled records in the duration Records List     Calculations    Label Calculation     Percentage Value / 100   Apdex Value / 10000   Average Sum of values / Count of values   Percentage + Avg-preview Sum of values / Count of values / 100   Apdex + Avg-preview Sum of values / Count of values / 10000   Byte to KB Value / 1024   Byte to MB Value / 1024 / 1024   Byte to GB Value / 1024 / 1024 / 1024   Seconds to YYYY-MM-DD HH:mm:ss dayjs(value * 1000).format(\u0026ldquo;YYYY-MM-DD HH:mm:ss\u0026rdquo;)   Milliseconds to YYYY-MM-DD HH:mm:ss dayjs(value).format(\u0026ldquo;YYYY-MM-DD HH:mm:ss\u0026rdquo;)   Precision Value.toFixed(2)   Milliseconds to seconds Value / 1000   Seconds to days Value / 86400    Graph styles Graph advanced style options.\nWidget options Define the following properties of the widget:\n Name: The name of the widget, which used to associate with other widget in the dashboard. Title: The title name of the widget. Tooltip Content: Additional explanation of the widget.  Association Options Widget provides the ability to associate with other widgets to show axis pointer with tips for the same time point, in order to help users to understand the connectivity among metrics.\nWidget Static Link On the right top of every widget on the dashboard, there is a Generate Link option, which could generate a static link to represent this widget. By using this link, users could share this widget, or integrate it into any 3rd party iFrame to build a network operations center(NOC) dashboard on the wall easily. About this link, there are several customizable options\n Lock Query Duration. Set the query duration manually. It is OFF by default. Auto Fresh option is ON with 6s query period and last 30 mins time range. Query period and range are customizable.  Settings Settings provide language, server time zone, and auto-fresh options. These settings are stored in the browser\u0026rsquo;s local storage. Unless you clear them manually, those will not change.\nFAQ Login and Authentication SkyWalking doesn\u0026rsquo;t provide login and authentication as usual for years. If you need, a lot of Gateway solutions have provides well-established solutions, such as the Nginx ecosystem.\n","title":"Introduction to UI","url":"/docs/main/v9.7.0/en/ui/readme/"},{"content":"IP and port setting The backend uses IP and port binding in order to allow the OS to have multiple IPs. The binding/listening IP and port are specified by the core module\ncore:default:restHost:0.0.0.0restPort:12800restContextPath:/gRPCHost:0.0.0.0gRPCPort:11800There are two IP/port pairs for gRPC and HTTP REST services.\n Most agents and probes use gRPC service for better performance and code readability. Some agents use REST service because gRPC may not be supported in that language. The UI uses REST service, but the data is always in GraphQL format.  Note IP binding For users unfamiliar with IP binding, note that once IP binding is complete, the client could only use this IP to access the service. For example, if 172.09.13.28 is bound, even if you are in this machine, you must use 172.09.13.28, rather than 127.0.0.1 or localhost, to access the service.\nModule provider specified IP and port The IP and port in the core module are provided by default. But it is common for some module providers, such as receiver modules, to provide other IP and port settings.\n","title":"IP and port setting","url":"/docs/main/latest/en/setup/backend/backend-ip-port/"},{"content":"IP and port setting The backend uses IP and port binding in order to allow the OS to have multiple IPs. The binding/listening IP and port are specified by the core module\ncore:default:restHost:0.0.0.0restPort:12800restContextPath:/gRPCHost:0.0.0.0gRPCPort:11800There are two IP/port pairs for gRPC and HTTP REST services.\n Most agents and probes use gRPC service for better performance and code readability. Some agents use REST service because gRPC may not be supported in that language. The UI uses REST service, but the data is always in GraphQL format.  Note IP binding For users unfamiliar with IP binding, note that once IP binding is complete, the client could only use this IP to access the service. For example, if 172.09.13.28 is bound, even if you are in this machine, you must use 172.09.13.28, rather than 127.0.0.1 or localhost, to access the service.\nModule provider specified IP and port The IP and port in the core module are provided by default. But it is common for some module providers, such as receiver modules, to provide other IP and port settings.\n","title":"IP and port setting","url":"/docs/main/next/en/setup/backend/backend-ip-port/"},{"content":"IP and port setting The backend uses IP and port binding in order to allow the OS to have multiple IPs. The binding/listening IP and port are specified by the core module\ncore:default:restHost:0.0.0.0restPort:12800restContextPath:/gRPCHost:0.0.0.0gRPCPort:11800There are two IP/port pairs for gRPC and HTTP REST services.\n Most agents and probes use gRPC service for better performance and code readability. Some agents use REST service, because gRPC may be not supported in that language. The UI uses REST service, but the data is always in GraphQL format.  Note IP binding For users who are not familiar with IP binding, note that once IP binding is complete, the client could only use this IP to access the service. For example, if 172.09.13.28 is bound, even if you are in this machine, you must use 172.09.13.28, rather than 127.0.0.1 or localhost, to access the service.\nModule provider specified IP and port The IP and port in the core module are provided by default. But it is common for some module providers, such as receiver modules, to provide other IP and port settings.\n","title":"IP and port setting","url":"/docs/main/v9.0.0/en/setup/backend/backend-ip-port/"},{"content":"IP and port setting The backend uses IP and port binding in order to allow the OS to have multiple IPs. The binding/listening IP and port are specified by the core module\ncore:default:restHost:0.0.0.0restPort:12800restContextPath:/gRPCHost:0.0.0.0gRPCPort:11800There are two IP/port pairs for gRPC and HTTP REST services.\n Most agents and probes use gRPC service for better performance and code readability. Some agents use REST service because gRPC may not be supported in that language. The UI uses REST service, but the data is always in GraphQL format.  Note IP binding For users unfamiliar with IP binding, note that once IP binding is complete, the client could only use this IP to access the service. For example, if 172.09.13.28 is bound, even if you are in this machine, you must use 172.09.13.28, rather than 127.0.0.1 or localhost, to access the service.\nModule provider specified IP and port The IP and port in the core module are provided by default. But it is common for some module providers, such as receiver modules, to provide other IP and port settings.\n","title":"IP and port setting","url":"/docs/main/v9.1.0/en/setup/backend/backend-ip-port/"},{"content":"IP and port setting The backend uses IP and port binding in order to allow the OS to have multiple IPs. The binding/listening IP and port are specified by the core module\ncore:default:restHost:0.0.0.0restPort:12800restContextPath:/gRPCHost:0.0.0.0gRPCPort:11800There are two IP/port pairs for gRPC and HTTP REST services.\n Most agents and probes use gRPC service for better performance and code readability. Some agents use REST service because gRPC may not be supported in that language. The UI uses REST service, but the data is always in GraphQL format.  Note IP binding For users unfamiliar with IP binding, note that once IP binding is complete, the client could only use this IP to access the service. For example, if 172.09.13.28 is bound, even if you are in this machine, you must use 172.09.13.28, rather than 127.0.0.1 or localhost, to access the service.\nModule provider specified IP and port The IP and port in the core module are provided by default. But it is common for some module providers, such as receiver modules, to provide other IP and port settings.\n","title":"IP and port setting","url":"/docs/main/v9.2.0/en/setup/backend/backend-ip-port/"},{"content":"IP and port setting The backend uses IP and port binding in order to allow the OS to have multiple IPs. The binding/listening IP and port are specified by the core module\ncore:default:restHost:0.0.0.0restPort:12800restContextPath:/gRPCHost:0.0.0.0gRPCPort:11800There are two IP/port pairs for gRPC and HTTP REST services.\n Most agents and probes use gRPC service for better performance and code readability. Some agents use REST service because gRPC may not be supported in that language. The UI uses REST service, but the data is always in GraphQL format.  Note IP binding For users unfamiliar with IP binding, note that once IP binding is complete, the client could only use this IP to access the service. For example, if 172.09.13.28 is bound, even if you are in this machine, you must use 172.09.13.28, rather than 127.0.0.1 or localhost, to access the service.\nModule provider specified IP and port The IP and port in the core module are provided by default. But it is common for some module providers, such as receiver modules, to provide other IP and port settings.\n","title":"IP and port setting","url":"/docs/main/v9.3.0/en/setup/backend/backend-ip-port/"},{"content":"IP and port setting The backend uses IP and port binding in order to allow the OS to have multiple IPs. The binding/listening IP and port are specified by the core module\ncore:default:restHost:0.0.0.0restPort:12800restContextPath:/gRPCHost:0.0.0.0gRPCPort:11800There are two IP/port pairs for gRPC and HTTP REST services.\n Most agents and probes use gRPC service for better performance and code readability. Some agents use REST service because gRPC may not be supported in that language. The UI uses REST service, but the data is always in GraphQL format.  Note IP binding For users unfamiliar with IP binding, note that once IP binding is complete, the client could only use this IP to access the service. For example, if 172.09.13.28 is bound, even if you are in this machine, you must use 172.09.13.28, rather than 127.0.0.1 or localhost, to access the service.\nModule provider specified IP and port The IP and port in the core module are provided by default. But it is common for some module providers, such as receiver modules, to provide other IP and port settings.\n","title":"IP and port setting","url":"/docs/main/v9.4.0/en/setup/backend/backend-ip-port/"},{"content":"IP and port setting The backend uses IP and port binding in order to allow the OS to have multiple IPs. The binding/listening IP and port are specified by the core module\ncore:default:restHost:0.0.0.0restPort:12800restContextPath:/gRPCHost:0.0.0.0gRPCPort:11800There are two IP/port pairs for gRPC and HTTP REST services.\n Most agents and probes use gRPC service for better performance and code readability. Some agents use REST service because gRPC may not be supported in that language. The UI uses REST service, but the data is always in GraphQL format.  Note IP binding For users unfamiliar with IP binding, note that once IP binding is complete, the client could only use this IP to access the service. For example, if 172.09.13.28 is bound, even if you are in this machine, you must use 172.09.13.28, rather than 127.0.0.1 or localhost, to access the service.\nModule provider specified IP and port The IP and port in the core module are provided by default. But it is common for some module providers, such as receiver modules, to provide other IP and port settings.\n","title":"IP and port setting","url":"/docs/main/v9.5.0/en/setup/backend/backend-ip-port/"},{"content":"IP and port setting The backend uses IP and port binding in order to allow the OS to have multiple IPs. The binding/listening IP and port are specified by the core module\ncore:default:restHost:0.0.0.0restPort:12800restContextPath:/gRPCHost:0.0.0.0gRPCPort:11800There are two IP/port pairs for gRPC and HTTP REST services.\n Most agents and probes use gRPC service for better performance and code readability. Some agents use REST service because gRPC may not be supported in that language. The UI uses REST service, but the data is always in GraphQL format.  Note IP binding For users unfamiliar with IP binding, note that once IP binding is complete, the client could only use this IP to access the service. For example, if 172.09.13.28 is bound, even if you are in this machine, you must use 172.09.13.28, rather than 127.0.0.1 or localhost, to access the service.\nModule provider specified IP and port The IP and port in the core module are provided by default. But it is common for some module providers, such as receiver modules, to provide other IP and port settings.\n","title":"IP and port setting","url":"/docs/main/v9.6.0/en/setup/backend/backend-ip-port/"},{"content":"IP and port setting The backend uses IP and port binding in order to allow the OS to have multiple IPs. The binding/listening IP and port are specified by the core module\ncore:default:restHost:0.0.0.0restPort:12800restContextPath:/gRPCHost:0.0.0.0gRPCPort:11800There are two IP/port pairs for gRPC and HTTP REST services.\n Most agents and probes use gRPC service for better performance and code readability. Some agents use REST service because gRPC may not be supported in that language. The UI uses REST service, but the data is always in GraphQL format.  Note IP binding For users unfamiliar with IP binding, note that once IP binding is complete, the client could only use this IP to access the service. For example, if 172.09.13.28 is bound, even if you are in this machine, you must use 172.09.13.28, rather than 127.0.0.1 or localhost, to access the service.\nModule provider specified IP and port The IP and port in the core module are provided by default. But it is common for some module providers, such as receiver modules, to provide other IP and port settings.\n","title":"IP and port setting","url":"/docs/main/v9.7.0/en/setup/backend/backend-ip-port/"},{"content":"Java agent injector Manual To use the java agent more natively, we propose the java agent injector to inject the agent sidecar into a pod.\nWhen enabled in a pod\u0026rsquo;s namespace, the injector injects the java agent container at pod creation time using a mutating webhook admission controller. By rendering the java agent to a shared volume, containers within the pod can use the java agent.\nThe following sections describe how to configure the agent, if you want to try it directly, please see Usage for more details.\nInstall Injector The java agent injector is a component of the operator, so you need to follow Operator installation instrument to install the operator firstly.\nActive the java agent injection We have two granularities here: namespace and pod.\n   Resource Label Enabled value Disabled value     Namespace swck-injection enabled disabled   Pod swck-java-agent-injected \u0026ldquo;true\u0026rdquo; \u0026ldquo;false\u0026rdquo;    The injector is configured with the following logic:\n If either label is disabled, the pod is not injected. If two labels are enabled, the pod is injected.  Follow the next steps to active java agent injection.\n Label the namespace with swck-injection=enabled  $ kubectl label namespace default(your namespace) swck-injection=enabled  Add label swck-java-agent-injected: \u0026quot;true\u0026quot; to the pod, and get the result as below.  $ kubectl get pod -l swck-java-agent-injected=true NAME READY STATUS RESTARTS AGE inject-demo 1/1 Running 0 2d2h The ways to configure the agent The java agent injector supports a precedence order to configure the agent:\n Annotations \u0026gt; SwAgent \u0026gt; Configmap (Deprecated) \u0026gt; Default Configmap (Deprecated)\nAnnotations Annotations are described in kubernetes annotations doc.\nWe support annotations in agent annotations and sidecar annotations.\nSwAgent SwAgent is a Customer Resource defined by SWCK.\nWe support SwAgent in SwAgent usage guide\nConfigmap (Deprecated) Configmap is described in kubernetes configmap doc.\nWe need to use configmap to set agent.config so that we can modify the agent configuration without entering the container.\nIf there are different configmap in the namepsace, you can choose a configmap by setting sidecar annotations; If there is no configmap, the injector will create a default configmap.\nDefault configmap (Deprecated) The injector will create the default configmap to overlay the agent.config in the agent container.\nThe default configmap is shown as below, one is agent.service_name and the string can\u0026rsquo;t be empty; the other is collector.backend_service and it needs to be a legal IP address and port, the other fields need to be guaranteed by users themselves. Users can change it as their default configmap.\ndata: agent.config: | # The service name in UI agent.service_name=${SW_AGENT_NAME:Your_ApplicationName} # Backend service addresses. collector.backend_service=${SW_AGENT_COLLECTOR_BACKEND_SERVICES:127.0.0.1:11800} # Please refer to https://skywalking.apache.org/docs/skywalking-java/latest/en/setup/service-agent/java-agent/configurations/#table-of-agent-configuration-properties to get more details. To avoid the default configmap deleting by mistake, we use a configmap controller to watch the default configmap. In addition, if the user applies an invalid configuration, such as a malformed backend_service, the controller will use the default configmap.\nConfigure the agent The injector supports two methods to configure agent:\n Only use the default configuration. Use annotations to overlay the default configuration.  Use the default agent configuration After activating the java agent injection, if not set the annotations, the injector will use the default agent configuration directly as below.\ninitContainers: - args: - -c - mkdir -p /sky/agent \u0026amp;\u0026amp; cp -r /skywalking/agent/* /sky/agent command: - sh image: apache/skywalking-java-agent:8.16.0-java8 name: inject-skywalking-agent volumeMounts: - mountPath: /sky/agent name: sky-agent volumes: - emptyDir: {} name: sky-agent - configMap: name: skywalking-swck-java-agent-configmap name: java-agent-configmap-volume Use SwAgent to overlay default agent configuration The injector will read the SwAgent CR when pods creating.\nSwAgent CRD basic structure is like:\napiVersion:operator.skywalking.apache.org/v1alpha1kind:SwAgentmetadata:name:swagent-demonamespace:defaultspec:containerMatcher:\u0026#39;\u0026#39;selector:javaSidecar:name:swagent-demoimage:apache/skywalking-java-agent:8.16.0-java8env:- name:\u0026#34;SW_LOGGING_LEVEL\u0026#34;value:\u0026#34;DEBUG\u0026#34;- name:\u0026#34;SW_AGENT_COLLECTOR_BACKEND_SERVICES\u0026#34;value:\u0026#34;skywalking-system-oap:11800\u0026#34;sharedVolumeName:\u0026#34;sky-agent-demo\u0026#34;optionalPlugins:- \u0026#34;webflux\u0026#34;- \u0026#34;cloud-gateway-2.1.x\u0026#34;There are three kind of configs in SwAgent CR.\n1. label selector and container matcher label selector and container matcher decides which pod and container should be injected.\n   key path description default value     spec.selector label selector for pods which should be effected during injection. if no label selector was set, SwAgent CR config will affect every pod during injection. no default value   spec.containerMatcher container matcher is used to decide which container to be inject during injection. regular expression is supported. default value \u0026lsquo;.*\u0026rsquo; would match any container name. .*    2. injection configuration injection configuration will affect on agent injection behaviour\n   key path description default value     javaSidecar javaSidecar is the configs for init container, which holds agent sdk and take agent sdk to the target containers.    javaSidecar.name the name of the init container. inject-skywalking-agent   javaSidecar.image the image of the init container. apache/skywalking-java-agent:8.16.0-java8   SharedVolumeName SharedVolume is the name of an empty volume which shared by initContainer and target containers. sky-agent   OptionalPlugins Select the optional plugin which needs to be moved to the directory(/plugins). Such as trace,webflux,cloud-gateway-2.1.x. no default value   OptionalReporterPlugins Select the optional reporter plugin which needs to be moved to the directory(/plugins). such as kafka. no default value    3. skywalking agent configuration skywalking agent configuration is for agent SDK.\n   key path description default value     javaSidecar.env the env list to be appended to target containers. usually we can use it to setup agent configuration at container level. no default value.    Use annotations to overlay default agent configuration The injector can recognize five kinds of annotations to configure the agent as below.\n1. strategy configuration The strategy configuration is the annotation as below.\n   Annotation key Description Annotation Default value     strategy.skywalking.apache.org/inject.Container Select the injected container, if not set, inject all containers. not set    2. agent configuration The agent configuration is the annotation like agent.skywalking.apache.org/{option}: {value}, and the option support agent.xxx 、osinfo.xxx 、collector.xxx 、 logging.xxx 、statuscheck.xxx 、correlation.xxx 、jvm.xxx 、buffer.xxx 、 profile.xxx 、 meter.xxx 、 log.xxx in agent.config, such as agent.skywalking.apache.org/agent.namespace, agent.skywalking.apache.org/meter.max_meter_size, etc.\n3. plugins configuration The plugins configuration is the annotation like plugins.skywalking.apache.org/{option}: {value}, and the option only support plugin.xxx in the agent.config, such as plugins.skywalking.apache.org/plugin.mount, plugins.skywalking.apache.org/plugin.mongodb.trace_param, etc.\n4. optional plugin configuration The optional plugin configuration is the annotation as below.\n   Annotation key Description Annotation value     optional.skywalking.apache.org Select the optional plugin which needs to be moved to the directory(/plugins). Users can select several optional plugins by separating from |, such as trace|webflux|cloud-gateway-2.1.x. not set    5. optional reporter plugin configuration The optional reporter plugin configuration is the annotation as below.\n   Annotation key Description Annotation value     optional-reporter.skywalking.apache.org Select the optional reporter plugin which needs to be moved to the directory(/plugins). Users can select several optional reporter plugins by separating from |, such as kafka. not set    Configure sidecar The injector can recognize the following annotations to configure the sidecar:\n   Annotation key Description Annotation Default value     sidecar.skywalking.apache.org/initcontainer.Name The name of the injected java agent container. inject-skywalking-agent   sidecar.skywalking.apache.org/initcontainer.Image The container image of the injected java agent container. apache/skywalking-java-agent:8.16.0-java8   sidecar.skywalking.apache.org/initcontainer.Command The command of the injected java agent container. sh   sidecar.skywalking.apache.org/initcontainer.args.Option The args option of the injected java agent container. -c   sidecar.skywalking.apache.org/initcontainer.args.Command The args command of the injected java agent container. mkdir -p /sky/agent \u0026amp;\u0026amp; cp -r /skywalking/agent/* /sky/agent   sidecar.skywalking.apache.org/initcontainer.resources.limits The resources limits of the injected java agent container. You should use json type to define it such as {\u0026quot;memory\u0026quot;: \u0026quot;100Mi\u0026quot;,\u0026quot;cpu\u0026quot;: \u0026quot;100m\u0026quot;} nil   sidecar.skywalking.apache.org/initcontainer.resources.requests The resources requests of the injected java agent container. You should use json type to define it such as {\u0026quot;memory\u0026quot;: \u0026quot;100Mi\u0026quot;,\u0026quot;cpu\u0026quot;: \u0026quot;100m\u0026quot;} nil   sidecar.skywalking.apache.org/sidecarVolume.Name The name of sidecar Volume. sky-agent   sidecar.skywalking.apache.org/sidecarVolumeMount.MountPath Mount path of the agent directory in the injected container. /sky/agent   sidecar.skywalking.apache.org/env.Name Environment Name used by the injected container (application container). JAVA_TOOL_OPTIONS   sidecar.skywalking.apache.org/env.Value Environment variables used by the injected container (application container). -javaagent:/sky/agent/skywalking-agent.jar    The ways to get the final injected agent\u0026rsquo;s configuration Please see javaagent introduction for details.\n","title":"Java agent injector Manual","url":"/docs/skywalking-swck/latest/java-agent-injector/"},{"content":"Java agent injector Manual To use the java agent more natively, we propose the java agent injector to inject the agent sidecar into a pod.\nWhen enabled in a pod\u0026rsquo;s namespace, the injector injects the java agent container at pod creation time using a mutating webhook admission controller. By rendering the java agent to a shared volume, containers within the pod can use the java agent.\nThe following sections describe how to configure the agent, if you want to try it directly, please see Usage for more details.\nInstall Injector The java agent injector is a component of the operator, so you need to follow Operator installation instrument to install the operator firstly.\nActive the java agent injection We have two granularities here: namespace and pod.\n   Resource Label Enabled value Disabled value     Namespace swck-injection enabled disabled   Pod swck-java-agent-injected \u0026ldquo;true\u0026rdquo; \u0026ldquo;false\u0026rdquo;    The injector is configured with the following logic:\n If either label is disabled, the pod is not injected. If two labels are enabled, the pod is injected.  Follow the next steps to active java agent injection.\n Label the namespace with swck-injection=enabled  $ kubectl label namespace default(your namespace) swck-injection=enabled  Add label swck-java-agent-injected: \u0026quot;true\u0026quot; to the pod, and get the result as below.  $ kubectl get pod -l swck-java-agent-injected=true NAME READY STATUS RESTARTS AGE inject-demo 1/1 Running 0 2d2h The ways to configure the agent The java agent injector supports a precedence order to configure the agent:\n Annotations \u0026gt; SwAgent \u0026gt; Configmap (Deprecated) \u0026gt; Default Configmap (Deprecated)\nAnnotations Annotations are described in kubernetes annotations doc.\nWe support annotations in agent annotations and sidecar annotations.\nSwAgent SwAgent is a Customer Resource defined by SWCK.\nWe support SwAgent in SwAgent usage guide\nConfigmap (Deprecated) Configmap is described in kubernetes configmap doc.\nWe need to use configmap to set agent.config so that we can modify the agent configuration without entering the container.\nIf there are different configmap in the namepsace, you can choose a configmap by setting sidecar annotations; If there is no configmap, the injector will create a default configmap.\nDefault configmap (Deprecated) The injector will create the default configmap to overlay the agent.config in the agent container.\nThe default configmap is shown as below, one is agent.service_name and the string can\u0026rsquo;t be empty; the other is collector.backend_service and it needs to be a legal IP address and port, the other fields need to be guaranteed by users themselves. Users can change it as their default configmap.\ndata: agent.config: | # The service name in UI agent.service_name=${SW_AGENT_NAME:Your_ApplicationName} # Backend service addresses. collector.backend_service=${SW_AGENT_COLLECTOR_BACKEND_SERVICES:127.0.0.1:11800} # Please refer to https://skywalking.apache.org/docs/skywalking-java/latest/en/setup/service-agent/java-agent/configurations/#table-of-agent-configuration-properties to get more details. To avoid the default configmap deleting by mistake, we use a configmap controller to watch the default configmap. In addition, if the user applies an invalid configuration, such as a malformed backend_service, the controller will use the default configmap.\nConfigure the agent The injector supports two methods to configure agent:\n Only use the default configuration. Use annotations to overlay the default configuration.  Use the default agent configuration After activating the java agent injection, if not set the annotations, the injector will use the default agent configuration directly as below.\ninitContainers: - args: - -c - mkdir -p /sky/agent \u0026amp;\u0026amp; cp -r /skywalking/agent/* /sky/agent command: - sh image: apache/skywalking-java-agent:8.16.0-java8 name: inject-skywalking-agent volumeMounts: - mountPath: /sky/agent name: sky-agent volumes: - emptyDir: {} name: sky-agent - configMap: name: skywalking-swck-java-agent-configmap name: java-agent-configmap-volume Use SwAgent to overlay default agent configuration The injector will read the SwAgent CR when pods creating.\nSwAgent CRD basic structure is like:\napiVersion:operator.skywalking.apache.org/v1alpha1kind:SwAgentmetadata:name:swagent-demonamespace:defaultspec:containerMatcher:\u0026#39;\u0026#39;selector:javaSidecar:name:swagent-demoimage:apache/skywalking-java-agent:8.16.0-java8env:- name:\u0026#34;SW_LOGGING_LEVEL\u0026#34;value:\u0026#34;DEBUG\u0026#34;- name:\u0026#34;SW_AGENT_COLLECTOR_BACKEND_SERVICES\u0026#34;value:\u0026#34;skywalking-system-oap:11800\u0026#34;sharedVolumeName:\u0026#34;sky-agent-demo\u0026#34;optionalPlugins:- \u0026#34;webflux\u0026#34;- \u0026#34;cloud-gateway-2.1.x\u0026#34;bootstrapPlugins:- \u0026#34;jdk-threading\u0026#34;There are three kind of configs in SwAgent CR.\n1. label selector and container matcher label selector and container matcher decides which pod and container should be injected.\n   key path description default value     spec.selector label selector for pods which should be effected during injection. if no label selector was set, SwAgent CR config will affect every pod during injection. no default value   spec.containerMatcher container matcher is used to decide which container to be inject during injection. regular expression is supported. default value \u0026lsquo;.*\u0026rsquo; would match any container name. .*    2. injection configuration injection configuration will affect on agent injection behaviour\n   key path description default value     javaSidecar javaSidecar is the configs for init container, which holds agent sdk and take agent sdk to the target containers.    javaSidecar.name the name of the init container. inject-skywalking-agent   javaSidecar.image the image of the init container. apache/skywalking-java-agent:8.16.0-java8   SharedVolumeName SharedVolume is the name of an empty volume which shared by initContainer and target containers. sky-agent   OptionalPlugins Select the optional plugin which needs to be moved to the directory(/plugins). Such as trace,webflux,cloud-gateway-2.1.x. no default value   OptionalReporterPlugins Select the optional reporter plugin which needs to be moved to the directory(/plugins). such as kafka. no default value   BootstrapPlugins Select the bootstrap plugin which needs to be moved to the directory(/plugins). such as jdk-threading. no default value    3. skywalking agent configuration skywalking agent configuration is for agent SDK.\n   key path description default value     javaSidecar.env the env list to be appended to target containers. usually we can use it to setup agent configuration at container level. no default value.    Use annotations to overlay default agent configuration The injector can recognize five kinds of annotations to configure the agent as below.\n1. strategy configuration The strategy configuration is the annotation as below.\n   Annotation key Description Annotation Default value     strategy.skywalking.apache.org/inject.Container Select the injected container, if not set, inject all containers. not set    2. agent configuration The agent configuration is the annotation like agent.skywalking.apache.org/{option}: {value}, and the option support agent.xxx 、osinfo.xxx 、collector.xxx 、 logging.xxx 、statuscheck.xxx 、correlation.xxx 、jvm.xxx 、buffer.xxx 、 profile.xxx 、 meter.xxx 、 log.xxx in agent.config, such as agent.skywalking.apache.org/agent.namespace, agent.skywalking.apache.org/meter.max_meter_size, etc.\n3. plugins configuration The plugins configuration is the annotation like plugins.skywalking.apache.org/{option}: {value}, and the option only support plugin.xxx in the agent.config, such as plugins.skywalking.apache.org/plugin.mount, plugins.skywalking.apache.org/plugin.mongodb.trace_param, etc.\n4. optional plugin configuration The optional plugin configuration is the annotation as below.\n   Annotation key Description Annotation value     optional.skywalking.apache.org Select the optional plugin which needs to be moved to the directory(/plugins). Users can select several optional plugins by separating from |, such as trace|webflux|cloud-gateway-2.1.x. not set    5. optional reporter plugin configuration The optional reporter plugin configuration is the annotation as below.\n   Annotation key Description Annotation value     optional-reporter.skywalking.apache.org Select the optional reporter plugin which needs to be moved to the directory(/plugins). Users can select several optional reporter plugins by separating from |, such as kafka. not set    6. bootstrap plugin configuration The bootstrap plugin configuration is the annotation as below.\n   Annotation key Description Annotation value     bootstrap.skywalking.apache.org Select the bootstrap plugin which needs to be moved to the directory(/plugins). Users can select several bootstrap plugins by separating from |, such as jdk-threading. not set    Configure sidecar The injector can recognize the following annotations to configure the sidecar:\n   Annotation key Description Annotation Default value     sidecar.skywalking.apache.org/initcontainer.Name The name of the injected java agent container. inject-skywalking-agent   sidecar.skywalking.apache.org/initcontainer.Image The container image of the injected java agent container. apache/skywalking-java-agent:8.16.0-java8   sidecar.skywalking.apache.org/initcontainer.Command The command of the injected java agent container. sh   sidecar.skywalking.apache.org/initcontainer.args.Option The args option of the injected java agent container. -c   sidecar.skywalking.apache.org/initcontainer.args.Command The args command of the injected java agent container. mkdir -p /sky/agent \u0026amp;\u0026amp; cp -r /skywalking/agent/* /sky/agent   sidecar.skywalking.apache.org/initcontainer.resources.limits The resources limits of the injected java agent container. You should use json type to define it such as {\u0026quot;memory\u0026quot;: \u0026quot;100Mi\u0026quot;,\u0026quot;cpu\u0026quot;: \u0026quot;100m\u0026quot;} nil   sidecar.skywalking.apache.org/initcontainer.resources.requests The resources requests of the injected java agent container. You should use json type to define it such as {\u0026quot;memory\u0026quot;: \u0026quot;100Mi\u0026quot;,\u0026quot;cpu\u0026quot;: \u0026quot;100m\u0026quot;} nil   sidecar.skywalking.apache.org/sidecarVolume.Name The name of sidecar Volume. sky-agent   sidecar.skywalking.apache.org/sidecarVolumeMount.MountPath Mount path of the agent directory in the injected container. /sky/agent   sidecar.skywalking.apache.org/env.Name Environment Name used by the injected container (application container). JAVA_TOOL_OPTIONS   sidecar.skywalking.apache.org/env.Value Environment variables used by the injected container (application container). -javaagent:/sky/agent/skywalking-agent.jar    The ways to get the final injected agent\u0026rsquo;s configuration Please see javaagent introduction for details.\n","title":"Java agent injector Manual","url":"/docs/skywalking-swck/next/java-agent-injector/"},{"content":"Java agent injector Manual To use the java agent more natively, we propose the java agent injector to inject the agent sidecar into a pod.\nWhen enabled in a pod\u0026rsquo;s namespace, the injector injects the java agent container at pod creation time using a mutating webhook admission controller. By rendering the java agent to a shared volume, containers within the pod can use the java agent.\nThe following sections describe how to configure the agent, if you want to try it directly, please see Usage for more details.\nInstall Injector The java agent injector is a component of the operator, so you need to follow Operator installation instrument to install the operator firstly.\nActive the java agent injection We have two granularities here: namespace and pod.\n   Resource Label Enabled value Disabled value     Namespace swck-injection enabled disabled   Pod swck-java-agent-injected \u0026ldquo;true\u0026rdquo; \u0026ldquo;false\u0026rdquo;    The injector is configured with the following logic:\n If either label is disabled, the pod is not injected. If two labels are enabled, the pod is injected.  Follow the next steps to active java agent injection.\n Label the namespace with swck-injection=enabled  $ kubectl label namespace default(your namespace) swck-injection=enabled  Add label swck-java-agent-injected: \u0026quot;true\u0026quot; to the pod, and get the result as below.  $ kubectl get pod -l swck-java-agent-injected=true NAME READY STATUS RESTARTS AGE inject-demo 1/1 Running 0 2d2h The ways to configure the agent The java agent injector supports a precedence order to configure the agent:\n Annotations \u0026gt; SwAgent \u0026gt; Configmap (Deprecated) \u0026gt; Default Configmap (Deprecated)\nAnnotations Annotations are described in kubernetes annotations doc.\nWe support annotations in agent annotations and sidecar annotations.\nSwAgent SwAgent is a Customer Resource defined by SWCK.\nWe support SwAgent in SwAgent usage guide\nConfigmap (Deprecated) Configmap is described in kubernetes configmap doc.\nWe need to use configmap to set agent.config so that we can modify the agent configuration without entering the container.\nIf there are different configmap in the namepsace, you can choose a configmap by setting sidecar annotations; If there is no configmap, the injector will create a default configmap.\nDefault configmap (Deprecated) The injector will create the default configmap to overlay the agent.config in the agent container.\nThe default configmap is shown as below, one is agent.service_name and the string can\u0026rsquo;t be empty; the other is collector.backend_service and it needs to be a legal IP address and port, the other fields need to be guaranteed by users themselves. Users can change it as their default configmap.\ndata: agent.config: | # The service name in UI agent.service_name=${SW_AGENT_NAME:Your_ApplicationName} # Backend service addresses. collector.backend_service=${SW_AGENT_COLLECTOR_BACKEND_SERVICES:127.0.0.1:11800} # Please refer to https://skywalking.apache.org/docs/skywalking-java/latest/en/setup/service-agent/java-agent/configurations/#table-of-agent-configuration-properties to get more details. To avoid the default configmap deleting by mistake, we use a configmap controller to watch the default configmap. In addition, if the user applies an invalid configuration, such as a malformed backend_service, the controller will use the default configmap.\nConfigure the agent The injector supports two methods to configure agent:\n Only use the default configuration. Use annotations to overlay the default configuration.  Use the default agent configuration After activating the java agent injection, if not set the annotations, the injector will use the default agent configuration directly as below.\ninitContainers: - args: - -c - mkdir -p /sky/agent \u0026amp;\u0026amp; cp -r /skywalking/agent/* /sky/agent command: - sh image: apache/skywalking-java-agent:8.16.0-java8 name: inject-skywalking-agent volumeMounts: - mountPath: /sky/agent name: sky-agent volumes: - emptyDir: {} name: sky-agent - configMap: name: skywalking-swck-java-agent-configmap name: java-agent-configmap-volume Use SwAgent to overlay default agent configuration The injector will read the SwAgent CR when pods creating.\nSwAgent CRD basic structure is like:\napiVersion:operator.skywalking.apache.org/v1alpha1kind:SwAgentmetadata:name:swagent-demonamespace:defaultspec:containerMatcher:\u0026#39;\u0026#39;selector:javaSidecar:name:swagent-demoimage:apache/skywalking-java-agent:8.16.0-java8env:- name:\u0026#34;SW_LOGGING_LEVEL\u0026#34;value:\u0026#34;DEBUG\u0026#34;- name:\u0026#34;SW_AGENT_COLLECTOR_BACKEND_SERVICES\u0026#34;value:\u0026#34;skywalking-system-oap:11800\u0026#34;sharedVolumeName:\u0026#34;sky-agent-demo\u0026#34;optionalPlugins:- \u0026#34;webflux\u0026#34;- \u0026#34;cloud-gateway-2.1.x\u0026#34;bootstrapPlugins:- \u0026#34;jdk-threading\u0026#34;There are three kind of configs in SwAgent CR.\n1. label selector and container matcher label selector and container matcher decides which pod and container should be injected.\n   key path description default value     spec.selector label selector for pods which should be effected during injection. if no label selector was set, SwAgent CR config will affect every pod during injection. no default value   spec.containerMatcher container matcher is used to decide which container to be inject during injection. regular expression is supported. default value \u0026lsquo;.*\u0026rsquo; would match any container name. .*    2. injection configuration injection configuration will affect on agent injection behaviour\n   key path description default value     javaSidecar javaSidecar is the configs for init container, which holds agent sdk and take agent sdk to the target containers.    javaSidecar.name the name of the init container. inject-skywalking-agent   javaSidecar.image the image of the init container. apache/skywalking-java-agent:8.16.0-java8   SharedVolumeName SharedVolume is the name of an empty volume which shared by initContainer and target containers. sky-agent   OptionalPlugins Select the optional plugin which needs to be moved to the directory(/plugins). Such as trace,webflux,cloud-gateway-2.1.x. no default value   OptionalReporterPlugins Select the optional reporter plugin which needs to be moved to the directory(/plugins). such as kafka. no default value   BootstrapPlugins Select the bootstrap plugin which needs to be moved to the directory(/plugins). such as jdk-threading. no default value    3. skywalking agent configuration skywalking agent configuration is for agent SDK.\n   key path description default value     javaSidecar.env the env list to be appended to target containers. usually we can use it to setup agent configuration at container level. no default value.    Use annotations to overlay default agent configuration The injector can recognize five kinds of annotations to configure the agent as below.\n1. strategy configuration The strategy configuration is the annotation as below.\n   Annotation key Description Annotation Default value     strategy.skywalking.apache.org/inject.Container Select the injected container, if not set, inject all containers. not set    2. agent configuration The agent configuration is the annotation like agent.skywalking.apache.org/{option}: {value}, and the option support agent.xxx 、osinfo.xxx 、collector.xxx 、 logging.xxx 、statuscheck.xxx 、correlation.xxx 、jvm.xxx 、buffer.xxx 、 profile.xxx 、 meter.xxx 、 log.xxx in agent.config, such as agent.skywalking.apache.org/agent.namespace, agent.skywalking.apache.org/meter.max_meter_size, etc.\n3. plugins configuration The plugins configuration is the annotation like plugins.skywalking.apache.org/{option}: {value}, and the option only support plugin.xxx in the agent.config, such as plugins.skywalking.apache.org/plugin.mount, plugins.skywalking.apache.org/plugin.mongodb.trace_param, etc.\n4. optional plugin configuration The optional plugin configuration is the annotation as below.\n   Annotation key Description Annotation value     optional.skywalking.apache.org Select the optional plugin which needs to be moved to the directory(/plugins). Users can select several optional plugins by separating from |, such as trace|webflux|cloud-gateway-2.1.x. not set    5. optional reporter plugin configuration The optional reporter plugin configuration is the annotation as below.\n   Annotation key Description Annotation value     optional-reporter.skywalking.apache.org Select the optional reporter plugin which needs to be moved to the directory(/plugins). Users can select several optional reporter plugins by separating from |, such as kafka. not set    6. bootstrap plugin configuration The bootstrap plugin configuration is the annotation as below.\n   Annotation key Description Annotation value     bootstrap.skywalking.apache.org Select the bootstrap plugin which needs to be moved to the directory(/plugins). Users can select several bootstrap plugins by separating from |, such as jdk-threading. not set    Configure sidecar The injector can recognize the following annotations to configure the sidecar:\n   Annotation key Description Annotation Default value     sidecar.skywalking.apache.org/initcontainer.Name The name of the injected java agent container. inject-skywalking-agent   sidecar.skywalking.apache.org/initcontainer.Image The container image of the injected java agent container. apache/skywalking-java-agent:8.16.0-java8   sidecar.skywalking.apache.org/initcontainer.Command The command of the injected java agent container. sh   sidecar.skywalking.apache.org/initcontainer.args.Option The args option of the injected java agent container. -c   sidecar.skywalking.apache.org/initcontainer.args.Command The args command of the injected java agent container. mkdir -p /sky/agent \u0026amp;\u0026amp; cp -r /skywalking/agent/* /sky/agent   sidecar.skywalking.apache.org/initcontainer.resources.limits The resources limits of the injected java agent container. You should use json type to define it such as {\u0026quot;memory\u0026quot;: \u0026quot;100Mi\u0026quot;,\u0026quot;cpu\u0026quot;: \u0026quot;100m\u0026quot;} nil   sidecar.skywalking.apache.org/initcontainer.resources.requests The resources requests of the injected java agent container. You should use json type to define it such as {\u0026quot;memory\u0026quot;: \u0026quot;100Mi\u0026quot;,\u0026quot;cpu\u0026quot;: \u0026quot;100m\u0026quot;} nil   sidecar.skywalking.apache.org/sidecarVolume.Name The name of sidecar Volume. sky-agent   sidecar.skywalking.apache.org/sidecarVolumeMount.MountPath Mount path of the agent directory in the injected container. /sky/agent   sidecar.skywalking.apache.org/env.Name Environment Name used by the injected container (application container). JAVA_TOOL_OPTIONS   sidecar.skywalking.apache.org/env.Value Environment variables used by the injected container (application container). -javaagent:/sky/agent/skywalking-agent.jar    The ways to get the final injected agent\u0026rsquo;s configuration Please see javaagent introduction for details.\n","title":"Java agent injector Manual","url":"/docs/skywalking-swck/v0.9.0/java-agent-injector/"},{"content":"Java agent injector Usage In this example, you will learn how to use the java agent injector.\nInstall injector The java agent injector is a component of the operator, so you need to follow Operator installation instrument to install the operator firstly.\nDeployment Example Let\u0026rsquo;s take a demo deployment for example.\n# demo1.yamlapiVersion:apps/v1kind:Deploymentmetadata:name:demo1namespace:defaultspec:selector:matchLabels:app:demo1template:metadata:labels:app:demo1spec:containers:- name:demo1image:ghcr.io/apache/skywalking-swck-spring-demo:v0.0.1command:[\u0026#34;java\u0026#34;]args:[\u0026#34;-jar\u0026#34;,\u0026#34;/app.jar\u0026#34;]ports:- containerPort:8085readinessProbe:httpGet:path:/helloport:8085initialDelaySeconds:3periodSeconds:3failureThreshold:10Enable Injection for Namespace and Deployments/StatefulSets. Firstly, set the injection label in your namespace as below.\nkubectl label namespace default(your namespace) swck-injection=enabled Secondly, set the injection label for your target Deployment/StatefulSet.\nkubectl -n default patch deployment demo1 --patch \u0026#39;{ \u0026#34;spec\u0026#34;: { \u0026#34;template\u0026#34;: { \u0026#34;metadata\u0026#34;: { \u0026#34;labels\u0026#34;: { \u0026#34;swck-java-agent-injected\u0026#34;: \u0026#34;true\u0026#34; } } } } }\u0026#39; Then the pods create by the Deployments/StatefulSets would be recreated with agent injected.\nThe injected pods would be like this:\nspec:containers:- args:- -jar- /app.jarcommand:- javaenv:- name:JAVA_TOOL_OPTIONSvalue:-javaagent:/sky/agent/skywalking-agent.jarimage:ghcr.io/apache/skywalking-swck-spring-demo:v0.0.1name:demo1- mountPath:/sky/agentname:sky-agentinitContainers:- args:- -c- mkdir -p /sky/agent \u0026amp;\u0026amp; cp -r /skywalking/agent/* /sky/agentcommand:- shimage:apache/skywalking-java-agent:8.10.0-java8name:inject-skywalking-agentvolumeMounts:- mountPath:/sky/agentname:sky-agentvolumes:- emptyDir:{}name:sky-agentThen you can get the final agent configuration and the pod as below.\n$ kubectl get javaagent NAME PODSELECTOR SERVICENAME BACKENDSERVICE app-demo1-javaagent app=demo1 demo1 127.0.0.1:11800 $ kubectl get pod -l app=demo1(the podSelector) NAME READY STATUS RESTARTS AGE demo1-5fbb6fcd98-cq5ws 1/1 Running 0 54s Get the javaagent\u0026rsquo;s yaml for more datails.\n$ kubectl get javaagent app-demo1-javaagent -o yaml apiVersion: operator.skywalking.apache.org/v1alpha1 kind: JavaAgent metadata: creationTimestamp: \u0026#34;2022-08-16T12:09:34Z\u0026#34; generation: 1 name: app-demo1-javaagent namespace: default ownerReferences: - apiVersion: apps/v1 blockOwnerDeletion: true controller: true kind: ReplicaSet name: demo1-7fdffc7b95 uid: 417c413f-0cc0-41f9-b6eb-0192eb8c8622 resourceVersion: \u0026#34;25067\u0026#34; uid: 1cdab012-784c-4efb-b5d2-c032eb2fb22a spec: backendService: 127.0.0.1:11800 podSelector: app=demo1 serviceName: Your_ApplicationName status: creationTime: \u0026#34;2022-08-16T12:09:34Z\u0026#34; expectedInjectiedNum: 1 lastUpdateTime: \u0026#34;2022-08-16T12:10:04Z\u0026#34; realInjectedNum: 1 Use SwAgent CR to setup override default configuration Suppose that injection label had been set for Namespace and Deployments/StatefulSets as previous said.\nApply SwAgent CR with correct label selector and container matcher:\n# SwAgent.yamlapiVersion:operator.skywalking.apache.org/v1alpha1kind:SwAgentmetadata:name:swagent-demonamespace:defaultspec:containerMatcher:\u0026#39;\u0026#39;selector:javaSidecar:name:swagent-demoimage:apache/skywalking-java-agent:8.16.0-java8env:- name:\u0026#34;SW_LOGGING_LEVEL\u0026#34;value:\u0026#34;DEBUG\u0026#34;- name:\u0026#34;SW_AGENT_COLLECTOR_BACKEND_SERVICES\u0026#34;value:\u0026#34;skywalking-system-oap:11800\u0026#34;sharedVolumeName:\u0026#34;sky-agent-demo\u0026#34;optionalPlugins:- \u0026#34;webflux\u0026#34;- \u0026#34;cloud-gateway-2.1.x\u0026#34;kubectl -n default apply swagent.yaml You can also get SwAgent CR by:\nkubectl -n default get SwAgent NAME AGE swagent-demo 38s Now the pod is still the old one, because pod could not load the SwAgent config automatically.\nSo you need to recreate pod to load SwAgent config. For the pods created by Deployment/StatefulSet, you can just simply delete the old pod.\n# verify pods to be delete  kubectl -n default get pods -l app=demo1 # delete pods kubectl -n default delete pods -l app=demo1 After the pods recreated, we can get injected pod as below.\nkubectl -n default get pods -l app=demo1 spec:containers:- args:- -jar- /app.jarcommand:- javaenv:- name:JAVA_TOOL_OPTIONSvalue:-javaagent:/sky/agent/skywalking-agent.jar=agent.service_name=demo1,collector.backend_service=skywalking-system-oap.skywalking-system:11800- name:SW_LOGGING_LEVELvalue:DEBUG- name:SW_AGENT_COLLECTOR_BACKEND_SERVICESvalue:skywalking-system-oap.default.svc:11800image:ghcr.io/apache/skywalking-swck-spring-demo:v0.0.1name:demo1- mountPath:/sky/agentname:sky-agent-demoinitContainers:- args:- -c- mkdir -p /sky/agent \u0026amp;\u0026amp; cp -r /skywalking/agent/* /sky/agent \u0026amp;\u0026amp; cd /sky/agent/optional-plugins/\u0026amp;\u0026amp;ls | grep -E \u0026#34;webflux|cloud-gateway-2.1.x\u0026#34; | xargs -i cp {} /sky/agent/plugins/command:- shimage:apache/skywalking-java-agent:8.16.0-java8name:swagent-demovolumeMounts:- mountPath:/sky/agentname:sky-agent-demovolumes:- emptyDir:{}name:sky-agent-demoUse annotation to override sidecar configuration Suppose that injection label had been set for Namespace and Deployments/StatefulSets as previous said.\nThen add agent configuration and sidecar configuration to annotations as below.\n# demo1_anno.yamlapiVersion:apps/v1kind:Deploymentmetadata:name:demo1namespace:defaultspec:selector:matchLabels:app:demo1template:metadata:annotations:strategy.skywalking.apache.org/inject.Container:\u0026#34;demo1\u0026#34;agent.skywalking.apache.org/agent.service_name:\u0026#34;app\u0026#34;agent.skywalking.apache.org/agent.sample_n_per_3_secs:\u0026#34;6\u0026#34;agent.skywalking.apache.org/agent.class_cache_mode:\u0026#34;MEMORY\u0026#34;agent.skywalking.apache.org/agent.ignore_suffix:\u0026#34;\u0026#39;jpg,.jpeg\u0026#39;\u0026#34;plugins.skywalking.apache.org/plugin.mount:\u0026#34;\u0026#39;plugins,activations\u0026#39;\u0026#34;plugins.skywalking.apache.org/plugin.mongodb.trace_param:\u0026#34;true\u0026#34;plugins.skywalking.apache.org/plugin.influxdb.trace_influxql:\u0026#34;false\u0026#34;optional.skywalking.apache.org:\u0026#34;trace|webflux|cloud-gateway-2.1.x\u0026#34;optional-reporter.skywalking.apache.org:\u0026#34;kafka\u0026#34;labels:swck-java-agent-injected:\u0026#34;true\u0026#34;app:demo1spec:containers:- name:demo1image:ghcr.io/apache/skywalking-swck-spring-demo:v0.0.1command:[\u0026#34;java\u0026#34;]args:[\u0026#34;-jar\u0026#34;,\u0026#34;/app.jar\u0026#34;]ports:- containerPort:8085readinessProbe:httpGet:path:/helloport:8085initialDelaySeconds:3periodSeconds:3failureThreshold:10Then we can get injected pod as below:\nkubectl -n default get pods -l app=demo1 spec:containers:- image:nginx:1.16.1imagePullPolicy:IfNotPresentname:nginx- args:- -jar- /app.jarcommand:- javaenv:- name:JAVA_TOOL_OPTIONSvalue:-javaagent:/sky/agent/skywalking-agent.jar=agent.ignore_suffix=\u0026#39;jpg,.jpeg\u0026#39;,agent.service_name=app,agent.class_cache_mode=MEMORY,agent.sample_n_per_3_secs=6,plugin.mongodb.trace_param=true,plugin.influxdb.trace_influxql=false,plugin.mount=\u0026#39;plugins,activations\u0026#39;image:ghcr.io/apache/skywalking-swck-spring-demo:v0.0.1name:demo1ports:- containerPort:8085protocol:TCPreadinessProbe:failureThreshold:10httpGet:path:/helloport:8085scheme:HTTPinitialDelaySeconds:3periodSeconds:3successThreshold:1timeoutSeconds:1volumeMounts:- mountPath:/sky/agentname:sky-agentinitContainers:- args:- -c- mkdir -p /sky/agent \u0026amp;\u0026amp; cp -r /skywalking/agent/* /sky/agent \u0026amp;\u0026amp; cd /sky/agent/optional-plugins/\u0026amp;\u0026amp;ls | grep -E \u0026#34;trace|webflux|cloud-gateway-2.1.x\u0026#34; | xargs -i cp {} /sky/agent/plugins/\u0026amp;\u0026amp;cd /sky/agent/optional-reporter-plugins/ \u0026amp;\u0026amp; ls | grep -E \u0026#34;kafka\u0026#34; | xargs-i cp {} /sky/agent/plugins/command:- shimage:apache/skywalking-java-agent:8.16.0-java8name:inject-skywalking-agentvolumeMounts:- mountPath:/sky/agentname:sky-agentvolumes:- emptyDir:{}name:sky-agentThen you can get the final agent configuration and the pod as below.\n$ kubectl get javaagent NAME PODSELECTOR SERVICENAME BACKENDSERVICE app-demo1-javaagent app=demo1 app 127.0.0.1:11800 $ kubectl get pod -l app=demo1(the podSelector) NAME READY STATUS RESTARTS AGE demo1-d48b96467-p7zrv 1/1 Running 0 5m25s Get the javaagent\u0026rsquo;s yaml for more datails.\n$ kubectl get javaagent app-demo1-javaagent -o yaml apiVersion: operator.skywalking.apache.org/v1alpha1 kind: JavaAgent metadata: creationTimestamp: \u0026#34;2022-08-16T12:18:53Z\u0026#34; generation: 1 name: app-demo1-javaagent namespace: default ownerReferences: - apiVersion: apps/v1 blockOwnerDeletion: true controller: true kind: ReplicaSet name: demo1-d48b96467 uid: 2b7f1ac4-b459-41cd-8568-ecd4578ca457 resourceVersion: \u0026#34;26187\u0026#34; uid: c2b2f3e2-9442-4465-9423-d24249b2c53b spec: agentConfiguration: agent.class_cache_mode: MEMORY agent.ignore_suffix: \u0026#39;\u0026#39;\u0026#39;jpg,.jpeg\u0026#39;\u0026#39;\u0026#39; agent.sample_n_per_3_secs: \u0026#34;6\u0026#34; agent.service_name: app optional-plugin: trace|webflux|cloud-gateway-2.1.x optional-reporter-plugin: kafka plugin.influxdb.trace_influxql: \u0026#34;false\u0026#34; plugin.mongodb.trace_param: \u0026#34;true\u0026#34; plugin.mount: \u0026#39;\u0026#39;\u0026#39;plugins,activations\u0026#39;\u0026#39;\u0026#39; backendService: 127.0.0.1:11800 podSelector: app=demo1 serviceName: app status: creationTime: \u0026#34;2022-08-16T12:18:53Z\u0026#34; expectedInjectiedNum: 1 lastUpdateTime: \u0026#34;2022-08-16T12:19:18Z\u0026#34; realInjectedNum: 1 ","title":"Java agent injector Usage","url":"/docs/skywalking-swck/latest/examples/java-agent-injector-usage/"},{"content":"Java agent injector Usage In this example, you will learn how to use the java agent injector.\nInstall injector The java agent injector is a component of the operator, so you need to follow Operator installation instrument to install the operator firstly.\nDeployment Example Let\u0026rsquo;s take a demo deployment for example.\n# demo1.yamlapiVersion:apps/v1kind:Deploymentmetadata:name:demo1namespace:defaultspec:selector:matchLabels:app:demo1template:metadata:labels:app:demo1spec:containers:- name:demo1image:ghcr.io/apache/skywalking-swck-spring-demo:v0.0.1command:[\u0026#34;java\u0026#34;]args:[\u0026#34;-jar\u0026#34;,\u0026#34;/app.jar\u0026#34;]ports:- containerPort:8085readinessProbe:httpGet:path:/helloport:8085initialDelaySeconds:3periodSeconds:3failureThreshold:10Enable Injection for Namespace and Deployments/StatefulSets. Firstly, set the injection label in your namespace as below.\nkubectl label namespace default(your namespace) swck-injection=enabled Secondly, set the injection label for your target Deployment/StatefulSet.\nkubectl -n default patch deployment demo1 --patch \u0026#39;{ \u0026#34;spec\u0026#34;: { \u0026#34;template\u0026#34;: { \u0026#34;metadata\u0026#34;: { \u0026#34;labels\u0026#34;: { \u0026#34;swck-java-agent-injected\u0026#34;: \u0026#34;true\u0026#34; } } } } }\u0026#39; Then the pods create by the Deployments/StatefulSets would be recreated with agent injected.\nThe injected pods would be like this:\nspec:containers:- args:- -jar- /app.jarcommand:- javaenv:- name:JAVA_TOOL_OPTIONSvalue:-javaagent:/sky/agent/skywalking-agent.jarimage:ghcr.io/apache/skywalking-swck-spring-demo:v0.0.1name:demo1- mountPath:/sky/agentname:sky-agentinitContainers:- args:- -c- mkdir -p /sky/agent \u0026amp;\u0026amp; cp -r /skywalking/agent/* /sky/agentcommand:- shimage:apache/skywalking-java-agent:8.10.0-java8name:inject-skywalking-agentvolumeMounts:- mountPath:/sky/agentname:sky-agentvolumes:- emptyDir:{}name:sky-agentThen you can get the final agent configuration and the pod as below.\n$ kubectl get javaagent NAME PODSELECTOR SERVICENAME BACKENDSERVICE app-demo1-javaagent app=demo1 demo1 127.0.0.1:11800 $ kubectl get pod -l app=demo1(the podSelector) NAME READY STATUS RESTARTS AGE demo1-5fbb6fcd98-cq5ws 1/1 Running 0 54s Get the javaagent\u0026rsquo;s yaml for more datails.\n$ kubectl get javaagent app-demo1-javaagent -o yaml apiVersion: operator.skywalking.apache.org/v1alpha1 kind: JavaAgent metadata: creationTimestamp: \u0026#34;2022-08-16T12:09:34Z\u0026#34; generation: 1 name: app-demo1-javaagent namespace: default ownerReferences: - apiVersion: apps/v1 blockOwnerDeletion: true controller: true kind: ReplicaSet name: demo1-7fdffc7b95 uid: 417c413f-0cc0-41f9-b6eb-0192eb8c8622 resourceVersion: \u0026#34;25067\u0026#34; uid: 1cdab012-784c-4efb-b5d2-c032eb2fb22a spec: backendService: 127.0.0.1:11800 podSelector: app=demo1 serviceName: Your_ApplicationName status: creationTime: \u0026#34;2022-08-16T12:09:34Z\u0026#34; expectedInjectiedNum: 1 lastUpdateTime: \u0026#34;2022-08-16T12:10:04Z\u0026#34; realInjectedNum: 1 Use SwAgent CR to setup override default configuration Suppose that injection label had been set for Namespace and Deployments/StatefulSets as previous said.\nApply SwAgent CR with correct label selector and container matcher:\n# SwAgent.yamlapiVersion:operator.skywalking.apache.org/v1alpha1kind:SwAgentmetadata:name:swagent-demonamespace:defaultspec:containerMatcher:\u0026#39;\u0026#39;selector:javaSidecar:name:swagent-demoimage:apache/skywalking-java-agent:8.16.0-java8env:- name:\u0026#34;SW_LOGGING_LEVEL\u0026#34;value:\u0026#34;DEBUG\u0026#34;- name:\u0026#34;SW_AGENT_COLLECTOR_BACKEND_SERVICES\u0026#34;value:\u0026#34;skywalking-system-oap:11800\u0026#34;sharedVolumeName:\u0026#34;sky-agent-demo\u0026#34;optionalPlugins:- \u0026#34;webflux\u0026#34;- \u0026#34;cloud-gateway-2.1.x\u0026#34;kubectl -n default apply swagent.yaml You can also get SwAgent CR by:\nkubectl -n default get SwAgent NAME AGE swagent-demo 38s Now the pod is still the old one, because pod could not load the SwAgent config automatically.\nSo you need to recreate pod to load SwAgent config. For the pods created by Deployment/StatefulSet, you can just simply delete the old pod.\n# verify pods to be delete  kubectl -n default get pods -l app=demo1 # delete pods kubectl -n default delete pods -l app=demo1 After the pods recreated, we can get injected pod as below.\nkubectl -n default get pods -l app=demo1 spec:containers:- args:- -jar- /app.jarcommand:- javaenv:- name:JAVA_TOOL_OPTIONSvalue:-javaagent:/sky/agent/skywalking-agent.jar=agent.service_name=demo1,collector.backend_service=skywalking-system-oap.skywalking-system:11800- name:SW_LOGGING_LEVELvalue:DEBUG- name:SW_AGENT_COLLECTOR_BACKEND_SERVICESvalue:skywalking-system-oap.default.svc:11800image:ghcr.io/apache/skywalking-swck-spring-demo:v0.0.1name:demo1- mountPath:/sky/agentname:sky-agent-demoinitContainers:- args:- -c- mkdir -p /sky/agent \u0026amp;\u0026amp; cp -r /skywalking/agent/* /sky/agent \u0026amp;\u0026amp; cd /sky/agent/optional-plugins/\u0026amp;\u0026amp;ls | grep -E \u0026#34;webflux|cloud-gateway-2.1.x\u0026#34; | xargs -i cp {} /sky/agent/plugins/command:- shimage:apache/skywalking-java-agent:8.16.0-java8name:swagent-demovolumeMounts:- mountPath:/sky/agentname:sky-agent-demovolumes:- emptyDir:{}name:sky-agent-demoUse annotation to override sidecar configuration Suppose that injection label had been set for Namespace and Deployments/StatefulSets as previous said.\nThen add agent configuration and sidecar configuration to annotations as below.\n# demo1_anno.yamlapiVersion:apps/v1kind:Deploymentmetadata:name:demo1namespace:defaultspec:selector:matchLabels:app:demo1template:metadata:annotations:strategy.skywalking.apache.org/inject.Container:\u0026#34;demo1\u0026#34;agent.skywalking.apache.org/agent.service_name:\u0026#34;app\u0026#34;agent.skywalking.apache.org/agent.sample_n_per_3_secs:\u0026#34;6\u0026#34;agent.skywalking.apache.org/agent.class_cache_mode:\u0026#34;MEMORY\u0026#34;agent.skywalking.apache.org/agent.ignore_suffix:\u0026#34;\u0026#39;jpg,.jpeg\u0026#39;\u0026#34;plugins.skywalking.apache.org/plugin.mount:\u0026#34;\u0026#39;plugins,activations\u0026#39;\u0026#34;plugins.skywalking.apache.org/plugin.mongodb.trace_param:\u0026#34;true\u0026#34;plugins.skywalking.apache.org/plugin.influxdb.trace_influxql:\u0026#34;false\u0026#34;optional.skywalking.apache.org:\u0026#34;trace|webflux|cloud-gateway-2.1.x\u0026#34;optional-reporter.skywalking.apache.org:\u0026#34;kafka\u0026#34;labels:swck-java-agent-injected:\u0026#34;true\u0026#34;app:demo1spec:containers:- name:demo1image:ghcr.io/apache/skywalking-swck-spring-demo:v0.0.1command:[\u0026#34;java\u0026#34;]args:[\u0026#34;-jar\u0026#34;,\u0026#34;/app.jar\u0026#34;]ports:- containerPort:8085readinessProbe:httpGet:path:/helloport:8085initialDelaySeconds:3periodSeconds:3failureThreshold:10Then we can get injected pod as below:\nkubectl -n default get pods -l app=demo1 spec:containers:- image:nginx:1.16.1imagePullPolicy:IfNotPresentname:nginx- args:- -jar- /app.jarcommand:- javaenv:- name:JAVA_TOOL_OPTIONSvalue:-javaagent:/sky/agent/skywalking-agent.jar=agent.ignore_suffix=\u0026#39;jpg,.jpeg\u0026#39;,agent.service_name=app,agent.class_cache_mode=MEMORY,agent.sample_n_per_3_secs=6,plugin.mongodb.trace_param=true,plugin.influxdb.trace_influxql=false,plugin.mount=\u0026#39;plugins,activations\u0026#39;image:ghcr.io/apache/skywalking-swck-spring-demo:v0.0.1name:demo1ports:- containerPort:8085protocol:TCPreadinessProbe:failureThreshold:10httpGet:path:/helloport:8085scheme:HTTPinitialDelaySeconds:3periodSeconds:3successThreshold:1timeoutSeconds:1volumeMounts:- mountPath:/sky/agentname:sky-agentinitContainers:- args:- -c- mkdir -p /sky/agent \u0026amp;\u0026amp; cp -r /skywalking/agent/* /sky/agent \u0026amp;\u0026amp; cd /sky/agent/optional-plugins/\u0026amp;\u0026amp;ls | grep -E \u0026#34;trace|webflux|cloud-gateway-2.1.x\u0026#34; | xargs -i cp {} /sky/agent/plugins/\u0026amp;\u0026amp;cd /sky/agent/optional-reporter-plugins/ \u0026amp;\u0026amp; ls | grep -E \u0026#34;kafka\u0026#34; | xargs-i cp {} /sky/agent/plugins/command:- shimage:apache/skywalking-java-agent:8.16.0-java8name:inject-skywalking-agentvolumeMounts:- mountPath:/sky/agentname:sky-agentvolumes:- emptyDir:{}name:sky-agentThen you can get the final agent configuration and the pod as below.\n$ kubectl get javaagent NAME PODSELECTOR SERVICENAME BACKENDSERVICE app-demo1-javaagent app=demo1 app 127.0.0.1:11800 $ kubectl get pod -l app=demo1(the podSelector) NAME READY STATUS RESTARTS AGE demo1-d48b96467-p7zrv 1/1 Running 0 5m25s Get the javaagent\u0026rsquo;s yaml for more datails.\n$ kubectl get javaagent app-demo1-javaagent -o yaml apiVersion: operator.skywalking.apache.org/v1alpha1 kind: JavaAgent metadata: creationTimestamp: \u0026#34;2022-08-16T12:18:53Z\u0026#34; generation: 1 name: app-demo1-javaagent namespace: default ownerReferences: - apiVersion: apps/v1 blockOwnerDeletion: true controller: true kind: ReplicaSet name: demo1-d48b96467 uid: 2b7f1ac4-b459-41cd-8568-ecd4578ca457 resourceVersion: \u0026#34;26187\u0026#34; uid: c2b2f3e2-9442-4465-9423-d24249b2c53b spec: agentConfiguration: agent.class_cache_mode: MEMORY agent.ignore_suffix: \u0026#39;\u0026#39;\u0026#39;jpg,.jpeg\u0026#39;\u0026#39;\u0026#39; agent.sample_n_per_3_secs: \u0026#34;6\u0026#34; agent.service_name: app optional-plugin: trace|webflux|cloud-gateway-2.1.x optional-reporter-plugin: kafka plugin.influxdb.trace_influxql: \u0026#34;false\u0026#34; plugin.mongodb.trace_param: \u0026#34;true\u0026#34; plugin.mount: \u0026#39;\u0026#39;\u0026#39;plugins,activations\u0026#39;\u0026#39;\u0026#39; backendService: 127.0.0.1:11800 podSelector: app=demo1 serviceName: app status: creationTime: \u0026#34;2022-08-16T12:18:53Z\u0026#34; expectedInjectiedNum: 1 lastUpdateTime: \u0026#34;2022-08-16T12:19:18Z\u0026#34; realInjectedNum: 1 ","title":"Java agent injector Usage","url":"/docs/skywalking-swck/next/examples/java-agent-injector-usage/"},{"content":"Java agent injector Usage In this example, you will learn how to use the java agent injector.\nInstall injector The java agent injector is a component of the operator, so you need to follow Operator installation instrument to install the operator firstly.\nDeployment Example Let\u0026rsquo;s take a demo deployment for example.\n# demo1.yamlapiVersion:apps/v1kind:Deploymentmetadata:name:demo1namespace:defaultspec:selector:matchLabels:app:demo1template:metadata:labels:app:demo1spec:containers:- name:demo1image:ghcr.io/apache/skywalking-swck-spring-demo:v0.0.1command:[\u0026#34;java\u0026#34;]args:[\u0026#34;-jar\u0026#34;,\u0026#34;/app.jar\u0026#34;]ports:- containerPort:8085readinessProbe:httpGet:path:/helloport:8085initialDelaySeconds:3periodSeconds:3failureThreshold:10Enable Injection for Namespace and Deployments/StatefulSets. Firstly, set the injection label in your namespace as below.\nkubectl label namespace default(your namespace) swck-injection=enabled Secondly, set the injection label for your target Deployment/StatefulSet.\nkubectl -n default patch deployment demo1 --patch \u0026#39;{ \u0026#34;spec\u0026#34;: { \u0026#34;template\u0026#34;: { \u0026#34;metadata\u0026#34;: { \u0026#34;labels\u0026#34;: { \u0026#34;swck-java-agent-injected\u0026#34;: \u0026#34;true\u0026#34; } } } } }\u0026#39; Then the pods create by the Deployments/StatefulSets would be recreated with agent injected.\nThe injected pods would be like this:\nspec:containers:- args:- -jar- /app.jarcommand:- javaenv:- name:JAVA_TOOL_OPTIONSvalue:-javaagent:/sky/agent/skywalking-agent.jarimage:ghcr.io/apache/skywalking-swck-spring-demo:v0.0.1name:demo1- mountPath:/sky/agentname:sky-agentinitContainers:- args:- -c- mkdir -p /sky/agent \u0026amp;\u0026amp; cp -r /skywalking/agent/* /sky/agentcommand:- shimage:apache/skywalking-java-agent:8.10.0-java8name:inject-skywalking-agentvolumeMounts:- mountPath:/sky/agentname:sky-agentvolumes:- emptyDir:{}name:sky-agentThen you can get the final agent configuration and the pod as below.\n$ kubectl get javaagent NAME PODSELECTOR SERVICENAME BACKENDSERVICE app-demo1-javaagent app=demo1 demo1 127.0.0.1:11800 $ kubectl get pod -l app=demo1(the podSelector) NAME READY STATUS RESTARTS AGE demo1-5fbb6fcd98-cq5ws 1/1 Running 0 54s Get the javaagent\u0026rsquo;s yaml for more datails.\n$ kubectl get javaagent app-demo1-javaagent -o yaml apiVersion: operator.skywalking.apache.org/v1alpha1 kind: JavaAgent metadata: creationTimestamp: \u0026#34;2022-08-16T12:09:34Z\u0026#34; generation: 1 name: app-demo1-javaagent namespace: default ownerReferences: - apiVersion: apps/v1 blockOwnerDeletion: true controller: true kind: ReplicaSet name: demo1-7fdffc7b95 uid: 417c413f-0cc0-41f9-b6eb-0192eb8c8622 resourceVersion: \u0026#34;25067\u0026#34; uid: 1cdab012-784c-4efb-b5d2-c032eb2fb22a spec: backendService: 127.0.0.1:11800 podSelector: app=demo1 serviceName: Your_ApplicationName status: creationTime: \u0026#34;2022-08-16T12:09:34Z\u0026#34; expectedInjectiedNum: 1 lastUpdateTime: \u0026#34;2022-08-16T12:10:04Z\u0026#34; realInjectedNum: 1 Use SwAgent CR to setup override default configuration Suppose that injection label had been set for Namespace and Deployments/StatefulSets as previous said.\nApply SwAgent CR with correct label selector and container matcher:\n# SwAgent.yamlapiVersion:operator.skywalking.apache.org/v1alpha1kind:SwAgentmetadata:name:swagent-demonamespace:defaultspec:containerMatcher:\u0026#39;\u0026#39;selector:javaSidecar:name:swagent-demoimage:apache/skywalking-java-agent:8.16.0-java8env:- name:\u0026#34;SW_LOGGING_LEVEL\u0026#34;value:\u0026#34;DEBUG\u0026#34;- name:\u0026#34;SW_AGENT_COLLECTOR_BACKEND_SERVICES\u0026#34;value:\u0026#34;skywalking-system-oap:11800\u0026#34;sharedVolumeName:\u0026#34;sky-agent-demo\u0026#34;optionalPlugins:- \u0026#34;webflux\u0026#34;- \u0026#34;cloud-gateway-2.1.x\u0026#34;kubectl -n default apply swagent.yaml You can also get SwAgent CR by:\nkubectl -n default get SwAgent NAME AGE swagent-demo 38s Now the pod is still the old one, because pod could not load the SwAgent config automatically.\nSo you need to recreate pod to load SwAgent config. For the pods created by Deployment/StatefulSet, you can just simply delete the old pod.\n# verify pods to be delete  kubectl -n default get pods -l app=demo1 # delete pods kubectl -n default delete pods -l app=demo1 After the pods recreated, we can get injected pod as below.\nkubectl -n default get pods -l app=demo1 spec:containers:- args:- -jar- /app.jarcommand:- javaenv:- name:JAVA_TOOL_OPTIONSvalue:-javaagent:/sky/agent/skywalking-agent.jar=agent.service_name=demo1,collector.backend_service=skywalking-system-oap.skywalking-system:11800- name:SW_LOGGING_LEVELvalue:DEBUG- name:SW_AGENT_COLLECTOR_BACKEND_SERVICESvalue:skywalking-system-oap.default.svc:11800image:ghcr.io/apache/skywalking-swck-spring-demo:v0.0.1name:demo1- mountPath:/sky/agentname:sky-agent-demoinitContainers:- args:- -c- mkdir -p /sky/agent \u0026amp;\u0026amp; cp -r /skywalking/agent/* /sky/agent \u0026amp;\u0026amp; cd /sky/agent/optional-plugins/\u0026amp;\u0026amp;ls | grep -E \u0026#34;webflux|cloud-gateway-2.1.x\u0026#34; | xargs -i cp {} /sky/agent/plugins/command:- shimage:apache/skywalking-java-agent:8.16.0-java8name:swagent-demovolumeMounts:- mountPath:/sky/agentname:sky-agent-demovolumes:- emptyDir:{}name:sky-agent-demoUse annotation to override sidecar configuration Suppose that injection label had been set for Namespace and Deployments/StatefulSets as previous said.\nThen add agent configuration and sidecar configuration to annotations as below.\n# demo1_anno.yamlapiVersion:apps/v1kind:Deploymentmetadata:name:demo1namespace:defaultspec:selector:matchLabels:app:demo1template:metadata:annotations:strategy.skywalking.apache.org/inject.Container:\u0026#34;demo1\u0026#34;agent.skywalking.apache.org/agent.service_name:\u0026#34;app\u0026#34;agent.skywalking.apache.org/agent.sample_n_per_3_secs:\u0026#34;6\u0026#34;agent.skywalking.apache.org/agent.class_cache_mode:\u0026#34;MEMORY\u0026#34;agent.skywalking.apache.org/agent.ignore_suffix:\u0026#34;\u0026#39;jpg,.jpeg\u0026#39;\u0026#34;plugins.skywalking.apache.org/plugin.mount:\u0026#34;\u0026#39;plugins,activations\u0026#39;\u0026#34;plugins.skywalking.apache.org/plugin.mongodb.trace_param:\u0026#34;true\u0026#34;plugins.skywalking.apache.org/plugin.influxdb.trace_influxql:\u0026#34;false\u0026#34;optional.skywalking.apache.org:\u0026#34;trace|webflux|cloud-gateway-2.1.x\u0026#34;optional-reporter.skywalking.apache.org:\u0026#34;kafka\u0026#34;labels:swck-java-agent-injected:\u0026#34;true\u0026#34;app:demo1spec:containers:- name:demo1image:ghcr.io/apache/skywalking-swck-spring-demo:v0.0.1command:[\u0026#34;java\u0026#34;]args:[\u0026#34;-jar\u0026#34;,\u0026#34;/app.jar\u0026#34;]ports:- containerPort:8085readinessProbe:httpGet:path:/helloport:8085initialDelaySeconds:3periodSeconds:3failureThreshold:10Then we can get injected pod as below:\nkubectl -n default get pods -l app=demo1 spec:containers:- image:nginx:1.16.1imagePullPolicy:IfNotPresentname:nginx- args:- -jar- /app.jarcommand:- javaenv:- name:JAVA_TOOL_OPTIONSvalue:-javaagent:/sky/agent/skywalking-agent.jar=agent.ignore_suffix=\u0026#39;jpg,.jpeg\u0026#39;,agent.service_name=app,agent.class_cache_mode=MEMORY,agent.sample_n_per_3_secs=6,plugin.mongodb.trace_param=true,plugin.influxdb.trace_influxql=false,plugin.mount=\u0026#39;plugins,activations\u0026#39;image:ghcr.io/apache/skywalking-swck-spring-demo:v0.0.1name:demo1ports:- containerPort:8085protocol:TCPreadinessProbe:failureThreshold:10httpGet:path:/helloport:8085scheme:HTTPinitialDelaySeconds:3periodSeconds:3successThreshold:1timeoutSeconds:1volumeMounts:- mountPath:/sky/agentname:sky-agentinitContainers:- args:- -c- mkdir -p /sky/agent \u0026amp;\u0026amp; cp -r /skywalking/agent/* /sky/agent \u0026amp;\u0026amp; cd /sky/agent/optional-plugins/\u0026amp;\u0026amp;ls | grep -E \u0026#34;trace|webflux|cloud-gateway-2.1.x\u0026#34; | xargs -i cp {} /sky/agent/plugins/\u0026amp;\u0026amp;cd /sky/agent/optional-reporter-plugins/ \u0026amp;\u0026amp; ls | grep -E \u0026#34;kafka\u0026#34; | xargs-i cp {} /sky/agent/plugins/command:- shimage:apache/skywalking-java-agent:8.16.0-java8name:inject-skywalking-agentvolumeMounts:- mountPath:/sky/agentname:sky-agentvolumes:- emptyDir:{}name:sky-agentThen you can get the final agent configuration and the pod as below.\n$ kubectl get javaagent NAME PODSELECTOR SERVICENAME BACKENDSERVICE app-demo1-javaagent app=demo1 app 127.0.0.1:11800 $ kubectl get pod -l app=demo1(the podSelector) NAME READY STATUS RESTARTS AGE demo1-d48b96467-p7zrv 1/1 Running 0 5m25s Get the javaagent\u0026rsquo;s yaml for more datails.\n$ kubectl get javaagent app-demo1-javaagent -o yaml apiVersion: operator.skywalking.apache.org/v1alpha1 kind: JavaAgent metadata: creationTimestamp: \u0026#34;2022-08-16T12:18:53Z\u0026#34; generation: 1 name: app-demo1-javaagent namespace: default ownerReferences: - apiVersion: apps/v1 blockOwnerDeletion: true controller: true kind: ReplicaSet name: demo1-d48b96467 uid: 2b7f1ac4-b459-41cd-8568-ecd4578ca457 resourceVersion: \u0026#34;26187\u0026#34; uid: c2b2f3e2-9442-4465-9423-d24249b2c53b spec: agentConfiguration: agent.class_cache_mode: MEMORY agent.ignore_suffix: \u0026#39;\u0026#39;\u0026#39;jpg,.jpeg\u0026#39;\u0026#39;\u0026#39; agent.sample_n_per_3_secs: \u0026#34;6\u0026#34; agent.service_name: app optional-plugin: trace|webflux|cloud-gateway-2.1.x optional-reporter-plugin: kafka plugin.influxdb.trace_influxql: \u0026#34;false\u0026#34; plugin.mongodb.trace_param: \u0026#34;true\u0026#34; plugin.mount: \u0026#39;\u0026#39;\u0026#39;plugins,activations\u0026#39;\u0026#39;\u0026#39; backendService: 127.0.0.1:11800 podSelector: app=demo1 serviceName: app status: creationTime: \u0026#34;2022-08-16T12:18:53Z\u0026#34; expectedInjectiedNum: 1 lastUpdateTime: \u0026#34;2022-08-16T12:19:18Z\u0026#34; realInjectedNum: 1 ","title":"Java agent injector Usage","url":"/docs/skywalking-swck/v0.9.0/examples/java-agent-injector-usage/"},{"content":"Java Microbenchmark Harness (JMH) JMH is a Java harness for building, running, and analysing nano/micro/milli/macro benchmarks written in Java and other languages targeting the JVM.\nWe have a module called microbench which performs a series of micro-benchmark tests for JMH testing. Make new JMH tests extend the org.apache.skywalking.oap.server.microbench.base.AbstractMicrobenchmark to customize runtime conditions (Measurement, Fork, Warmup, etc.).\nYou can build the jar with command ./mvnw -Dmaven.test.skip -DskipTests -pl :microbench package -am -Pbenchmark.\nJMH tests could run as a normal unit test. And they could run as an independent uber jar via java -jar benchmark.jar for all benchmarks, or via java -jar /benchmarks.jar exampleClassName for a specific test.\nOutput test results in JSON format, you can add -rf json like java -jar benchmarks.jar -rf json, if you run through the IDE, you can configure the -DperfReportDir=savePath parameter to set the JMH report result save path, a report results in JSON format will be generated when the run ends.\nMore information about JMH can be found here: jmh docs.\n","title":"Java Microbenchmark Harness (JMH)","url":"/docs/main/latest/en/guides/benchmark/"},{"content":"Java Microbenchmark Harness (JMH) JMH is a Java harness for building, running, and analysing nano/micro/milli/macro benchmarks written in Java and other languages targeting the JVM.\nWe have a module called microbench which performs a series of micro-benchmark tests for JMH testing. Make new JMH tests extend the org.apache.skywalking.oap.server.microbench.base.AbstractMicrobenchmark to customize runtime conditions (Measurement, Fork, Warmup, etc.).\nYou can build the jar with command ./mvnw -Dmaven.test.skip -DskipTests -pl :microbench package -am -Pbenchmark.\nJMH tests could run as a normal unit test. And they could run as an independent uber jar via java -jar benchmark.jar for all benchmarks, or via java -jar /benchmarks.jar exampleClassName for a specific test.\nOutput test results in JSON format, you can add -rf json like java -jar benchmarks.jar -rf json, if you run through the IDE, you can configure the -DperfReportDir=savePath parameter to set the JMH report result save path, a report results in JSON format will be generated when the run ends.\nMore information about JMH can be found here: jmh docs.\n","title":"Java Microbenchmark Harness (JMH)","url":"/docs/main/next/en/guides/benchmark/"},{"content":"Java Microbenchmark Harness (JMH) JMH is a Java harness for building, running, and analysing nano/micro/milli/macro benchmarks written in Java and other languages targeting the JVM.\nWe have a module called microbench which performs a series of micro-benchmark tests for JMH testing. Make new JMH tests extend the org.apache.skywalking.oap.server.microbench.base.AbstractMicrobenchmark to customize runtime conditions (Measurement, Fork, Warmup, etc.).\nYou can build the jar with command ./mvnw -Dmaven.test.skip -DskipTests -pl :microbench package -am -Pbenchmark.\nJMH tests could run as a normal unit test. And they could run as an independent uber jar via java -jar benchmark.jar for all benchmarks, or via java -jar /benchmarks.jar exampleClassName for a specific test.\nOutput test results in JSON format, you can add -rf json like java -jar benchmarks.jar -rf json, if you run through the IDE, you can configure the -DperfReportDir=savePath parameter to set the JMH report result save path, a report results in JSON format will be generated when the run ends.\nMore information about JMH can be found here: jmh docs.\n","title":"Java Microbenchmark Harness (JMH)","url":"/docs/main/v9.6.0/en/guides/benchmark/"},{"content":"Java Microbenchmark Harness (JMH) JMH is a Java harness for building, running, and analysing nano/micro/milli/macro benchmarks written in Java and other languages targeting the JVM.\nWe have a module called microbench which performs a series of micro-benchmark tests for JMH testing. Make new JMH tests extend the org.apache.skywalking.oap.server.microbench.base.AbstractMicrobenchmark to customize runtime conditions (Measurement, Fork, Warmup, etc.).\nYou can build the jar with command ./mvnw -Dmaven.test.skip -DskipTests -pl :microbench package -am -Pbenchmark.\nJMH tests could run as a normal unit test. And they could run as an independent uber jar via java -jar benchmark.jar for all benchmarks, or via java -jar /benchmarks.jar exampleClassName for a specific test.\nOutput test results in JSON format, you can add -rf json like java -jar benchmarks.jar -rf json, if you run through the IDE, you can configure the -DperfReportDir=savePath parameter to set the JMH report result save path, a report results in JSON format will be generated when the run ends.\nMore information about JMH can be found here: jmh docs.\n","title":"Java Microbenchmark Harness (JMH)","url":"/docs/main/v9.7.0/en/guides/benchmark/"},{"content":"JavaAgent Introduction To see the final injected agent\u0026rsquo;s configuration, we define a CustomDefinitionResource called JavaAgent.\nWhen the pod is injected, the pod will be labeled with sidecar.skywalking.apache.org/succeed, then the controller will watch the specific pod labeled with sidecar.skywalking.apache.org/succeed. After the pod is created, the controller will create JavaAgent(custom resource), which contains the final agent configuration as below.\nSpec    Field Name Description     podSelector We hope users can use workloads to create pods, the podSelector is the selector label of workload.   serviceName serviceName is an important attribute that needs to be printed.   backendService backendService is an important attribute that needs to be printed.   agentConfiguration agentConfiguration contains serviceName、backendService and covered agent configuration, other default configurations will not be displayed, please see agent.config for details.    Status    Field Name Description     creationTime The creation time of the JavaAgent   lastUpdateTime The last Update time of the JavaAgent   expectedInjectiedNum The number of the pod that need to be injected   realInjectedNum The real number of injected pods.    Demo This demo shows the usage of javaagent. If you want to see the complete process, please see java-agent-injector-usagefor details.\nWhen we use java-agent-injector, we can get custom resources as below.\n$ kubectl get javaagent -A NAMESPACE NAME PODSELECTOR SERVICENAME BACKENDSERVICE default app-demo1-javaagent app=demo1 Your_ApplicationName 127.0.0.1:11800 default app-demo2-javaagent app=demo2 Your_ApplicationName 127.0.0.1:11800 $ kubectl get pod -l app=demo1 NAME READY STATUS RESTARTS AGE demo1-bb97b8b4d-bkwm4 1/1 Running 0 28s demo1-bb97b8b4d-wxgs2 1/1 Running 0 28s $ kubectl get pod -l app=demo2 NAME READY STATUS RESTARTS AGE app2-0 1/1 Running 0 27s app2-1 1/1 Running 0 25s app2-2 1/1 Running 0 23s If we want to see more information, we can get the specific javaagent\u0026rsquo;s yaml as below.\n$ kubectl get javaagent app-demo1-javaagent -oyaml apiVersion: operator.skywalking.apache.org/v1alpha1 kind: JavaAgent metadata: creationTimestamp: \u0026quot;2021-10-14T07:07:12Z\u0026quot; generation: 1 name: app-demo1-javaagent namespace: default ownerReferences: - apiVersion: apps/v1 blockOwnerDeletion: true controller: true kind: ReplicaSet name: demo1-bb97b8b4d uid: c712924f-4652-4c07-8332-b3938ad72392 resourceVersion: \u0026quot;330808\u0026quot; selfLink: /apis/operator.skywalking.apache.org/v1alpha1/namespaces/default/javaagents/app-demo1-javaagent uid: 9350338f-15a5-4832-84d1-530f8d0e1c3b spec: agentConfiguration: agent.namespace: default-namespace agent.service_name: Your_ApplicationName collector.backend_service: 127.0.0.1:11800 backendService: 127.0.0.1:11800 podSelector: app=demo1 serviceName: Your_ApplicationName status: creationTime: \u0026quot;2021-10-14T07:07:12Z\u0026quot; expectedInjectiedNum: 2 lastUpdateTime: \u0026quot;2021-10-14T07:07:14Z\u0026quot; realInjectedNum: 2 ","title":"JavaAgent Introduction","url":"/docs/skywalking-swck/latest/javaagent/"},{"content":"JavaAgent Introduction To see the final injected agent\u0026rsquo;s configuration, we define a CustomDefinitionResource called JavaAgent.\nWhen the pod is injected, the pod will be labeled with sidecar.skywalking.apache.org/succeed, then the controller will watch the specific pod labeled with sidecar.skywalking.apache.org/succeed. After the pod is created, the controller will create JavaAgent(custom resource), which contains the final agent configuration as below.\nSpec    Field Name Description     podSelector We hope users can use workloads to create pods, the podSelector is the selector label of workload.   serviceName serviceName is an important attribute that needs to be printed.   backendService backendService is an important attribute that needs to be printed.   agentConfiguration agentConfiguration contains serviceName、backendService and covered agent configuration, other default configurations will not be displayed, please see agent.config for details.    Status    Field Name Description     creationTime The creation time of the JavaAgent   lastUpdateTime The last Update time of the JavaAgent   expectedInjectiedNum The number of the pod that need to be injected   realInjectedNum The real number of injected pods.    Demo This demo shows the usage of javaagent. If you want to see the complete process, please see java-agent-injector-usagefor details.\nWhen we use java-agent-injector, we can get custom resources as below.\n$ kubectl get javaagent -A NAMESPACE NAME PODSELECTOR SERVICENAME BACKENDSERVICE default app-demo1-javaagent app=demo1 Your_ApplicationName 127.0.0.1:11800 default app-demo2-javaagent app=demo2 Your_ApplicationName 127.0.0.1:11800 $ kubectl get pod -l app=demo1 NAME READY STATUS RESTARTS AGE demo1-bb97b8b4d-bkwm4 1/1 Running 0 28s demo1-bb97b8b4d-wxgs2 1/1 Running 0 28s $ kubectl get pod -l app=demo2 NAME READY STATUS RESTARTS AGE app2-0 1/1 Running 0 27s app2-1 1/1 Running 0 25s app2-2 1/1 Running 0 23s If we want to see more information, we can get the specific javaagent\u0026rsquo;s yaml as below.\n$ kubectl get javaagent app-demo1-javaagent -oyaml apiVersion: operator.skywalking.apache.org/v1alpha1 kind: JavaAgent metadata: creationTimestamp: \u0026quot;2021-10-14T07:07:12Z\u0026quot; generation: 1 name: app-demo1-javaagent namespace: default ownerReferences: - apiVersion: apps/v1 blockOwnerDeletion: true controller: true kind: ReplicaSet name: demo1-bb97b8b4d uid: c712924f-4652-4c07-8332-b3938ad72392 resourceVersion: \u0026quot;330808\u0026quot; selfLink: /apis/operator.skywalking.apache.org/v1alpha1/namespaces/default/javaagents/app-demo1-javaagent uid: 9350338f-15a5-4832-84d1-530f8d0e1c3b spec: agentConfiguration: agent.namespace: default-namespace agent.service_name: Your_ApplicationName collector.backend_service: 127.0.0.1:11800 backendService: 127.0.0.1:11800 podSelector: app=demo1 serviceName: Your_ApplicationName status: creationTime: \u0026quot;2021-10-14T07:07:12Z\u0026quot; expectedInjectiedNum: 2 lastUpdateTime: \u0026quot;2021-10-14T07:07:14Z\u0026quot; realInjectedNum: 2 ","title":"JavaAgent Introduction","url":"/docs/skywalking-swck/next/javaagent/"},{"content":"JavaAgent Introduction To see the final injected agent\u0026rsquo;s configuration, we define a CustomDefinitionResource called JavaAgent.\nWhen the pod is injected, the pod will be labeled with sidecar.skywalking.apache.org/succeed, then the controller will watch the specific pod labeled with sidecar.skywalking.apache.org/succeed. After the pod is created, the controller will create JavaAgent(custom resource), which contains the final agent configuration as below.\nSpec    Field Name Description     podSelector We hope users can use workloads to create pods, the podSelector is the selector label of workload.   serviceName serviceName is an important attribute that needs to be printed.   backendService backendService is an important attribute that needs to be printed.   agentConfiguration agentConfiguration contains serviceName、backendService and covered agent configuration, other default configurations will not be displayed, please see agent.config for details.    Status    Field Name Description     creationTime The creation time of the JavaAgent   lastUpdateTime The last Update time of the JavaAgent   expectedInjectiedNum The number of the pod that need to be injected   realInjectedNum The real number of injected pods.    Demo This demo shows the usage of javaagent. If you want to see the complete process, please see java-agent-injector-usagefor details.\nWhen we use java-agent-injector, we can get custom resources as below.\n$ kubectl get javaagent -A NAMESPACE NAME PODSELECTOR SERVICENAME BACKENDSERVICE default app-demo1-javaagent app=demo1 Your_ApplicationName 127.0.0.1:11800 default app-demo2-javaagent app=demo2 Your_ApplicationName 127.0.0.1:11800 $ kubectl get pod -l app=demo1 NAME READY STATUS RESTARTS AGE demo1-bb97b8b4d-bkwm4 1/1 Running 0 28s demo1-bb97b8b4d-wxgs2 1/1 Running 0 28s $ kubectl get pod -l app=demo2 NAME READY STATUS RESTARTS AGE app2-0 1/1 Running 0 27s app2-1 1/1 Running 0 25s app2-2 1/1 Running 0 23s If we want to see more information, we can get the specific javaagent\u0026rsquo;s yaml as below.\n$ kubectl get javaagent app-demo1-javaagent -oyaml apiVersion: operator.skywalking.apache.org/v1alpha1 kind: JavaAgent metadata: creationTimestamp: \u0026quot;2021-10-14T07:07:12Z\u0026quot; generation: 1 name: app-demo1-javaagent namespace: default ownerReferences: - apiVersion: apps/v1 blockOwnerDeletion: true controller: true kind: ReplicaSet name: demo1-bb97b8b4d uid: c712924f-4652-4c07-8332-b3938ad72392 resourceVersion: \u0026quot;330808\u0026quot; selfLink: /apis/operator.skywalking.apache.org/v1alpha1/namespaces/default/javaagents/app-demo1-javaagent uid: 9350338f-15a5-4832-84d1-530f8d0e1c3b spec: agentConfiguration: agent.namespace: default-namespace agent.service_name: Your_ApplicationName collector.backend_service: 127.0.0.1:11800 backendService: 127.0.0.1:11800 podSelector: app=demo1 serviceName: Your_ApplicationName status: creationTime: \u0026quot;2021-10-14T07:07:12Z\u0026quot; expectedInjectiedNum: 2 lastUpdateTime: \u0026quot;2021-10-14T07:07:14Z\u0026quot; realInjectedNum: 2 ","title":"JavaAgent Introduction","url":"/docs/skywalking-swck/v0.9.0/javaagent/"},{"content":"JVM Metrics APIs Notice, SkyWalking has provided general available meter APIs for all kinds of metrics. This API is still supported for forward compatibility only. SkyWalking community would not accept new language specific metric APIs anymore.\nUplink the JVM metrics, including PermSize, HeapSize, CPU, Memory, etc., every second.\ngRPC service define\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.language.agent.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/language/agent/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the JVM metrics report service. service JVMMetricReportService { rpc collect (JVMMetricCollection) returns (Commands) { }}message JVMMetricCollection { repeated JVMMetric metrics = 1; string service = 2; string serviceInstance = 3;}message JVMMetric { int64 time = 1; CPU cpu = 2; repeated Memory memory = 3; repeated MemoryPool memoryPool = 4; repeated GC gc = 5; Thread thread = 6; Class clazz = 7;}message Memory { bool isHeap = 1; int64 init = 2; int64 max = 3; int64 used = 4; int64 committed = 5;}message MemoryPool { PoolType type = 1; int64 init = 2; int64 max = 3; int64 used = 4; int64 committed = 5;}enum PoolType { CODE_CACHE_USAGE = 0; NEWGEN_USAGE = 1; OLDGEN_USAGE = 2; SURVIVOR_USAGE = 3; PERMGEN_USAGE = 4; METASPACE_USAGE = 5; ZHEAP_USAGE = 6; COMPRESSED_CLASS_SPACE_USAGE = 7; CODEHEAP_NON_NMETHODS_USAGE = 8; CODEHEAP_PROFILED_NMETHODS_USAGE = 9; CODEHEAP_NON_PROFILED_NMETHODS_USAGE = 10;}message GC { GCPhase phase = 1; int64 count = 2; int64 time = 3;}enum GCPhase { NEW = 0; OLD = 1; NORMAL = 2; // The type of GC doesn\u0026#39;t have new and old phases, like Z Garbage Collector (ZGC) }// See: https://docs.oracle.com/javase/8/docs/api/java/lang/management/ThreadMXBean.html message Thread { int64 liveCount = 1; int64 daemonCount = 2; int64 peakCount = 3; int64 runnableStateThreadCount = 4; int64 blockedStateThreadCount = 5; int64 waitingStateThreadCount = 6; int64 timedWaitingStateThreadCount = 7;}// See: https://docs.oracle.com/javase/8/docs/api/java/lang/management/ClassLoadingMXBean.html message Class { int64 loadedClassCount = 1; int64 totalUnloadedClassCount = 2; int64 totalLoadedClassCount = 3;}","title":"JVM Metrics APIs","url":"/docs/main/latest/en/api/jvm-protocol/"},{"content":"JVM Metrics APIs Notice, SkyWalking has provided general available meter APIs for all kinds of metrics. This API is still supported for forward compatibility only. SkyWalking community would not accept new language specific metric APIs anymore.\nUplink the JVM metrics, including PermSize, HeapSize, CPU, Memory, etc., every second.\ngRPC service define\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.language.agent.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/language/agent/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the JVM metrics report service. service JVMMetricReportService { rpc collect (JVMMetricCollection) returns (Commands) { }}message JVMMetricCollection { repeated JVMMetric metrics = 1; string service = 2; string serviceInstance = 3;}message JVMMetric { int64 time = 1; CPU cpu = 2; repeated Memory memory = 3; repeated MemoryPool memoryPool = 4; repeated GC gc = 5; Thread thread = 6; Class clazz = 7;}message Memory { bool isHeap = 1; int64 init = 2; int64 max = 3; int64 used = 4; int64 committed = 5;}message MemoryPool { PoolType type = 1; int64 init = 2; int64 max = 3; int64 used = 4; int64 committed = 5;}enum PoolType { CODE_CACHE_USAGE = 0; NEWGEN_USAGE = 1; OLDGEN_USAGE = 2; SURVIVOR_USAGE = 3; PERMGEN_USAGE = 4; METASPACE_USAGE = 5; ZHEAP_USAGE = 6; COMPRESSED_CLASS_SPACE_USAGE = 7; CODEHEAP_NON_NMETHODS_USAGE = 8; CODEHEAP_PROFILED_NMETHODS_USAGE = 9; CODEHEAP_NON_PROFILED_NMETHODS_USAGE = 10;}message GC { GCPhase phase = 1; int64 count = 2; int64 time = 3;}enum GCPhase { NEW = 0; OLD = 1; NORMAL = 2; // The type of GC doesn\u0026#39;t have new and old phases, like Z Garbage Collector (ZGC) }// See: https://docs.oracle.com/javase/8/docs/api/java/lang/management/ThreadMXBean.html message Thread { int64 liveCount = 1; int64 daemonCount = 2; int64 peakCount = 3; int64 runnableStateThreadCount = 4; int64 blockedStateThreadCount = 5; int64 waitingStateThreadCount = 6; int64 timedWaitingStateThreadCount = 7;}// See: https://docs.oracle.com/javase/8/docs/api/java/lang/management/ClassLoadingMXBean.html message Class { int64 loadedClassCount = 1; int64 totalUnloadedClassCount = 2; int64 totalLoadedClassCount = 3;}","title":"JVM Metrics APIs","url":"/docs/main/next/en/api/jvm-protocol/"},{"content":"JVM Metrics APIs Notice, SkyWalking has provided general available meter APIs for all kinds of metrics. This API is still supported for forward compatibility only. SkyWalking community would not accept new language specific metric APIs anymore.\nUplink the JVM metrics, including PermSize, HeapSize, CPU, Memory, etc., every second.\ngRPC service define\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.language.agent.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/language/agent/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the JVM metrics report service. service JVMMetricReportService { rpc collect (JVMMetricCollection) returns (Commands) { }}message JVMMetricCollection { repeated JVMMetric metrics = 1; string service = 2; string serviceInstance = 3;}message JVMMetric { int64 time = 1; CPU cpu = 2; repeated Memory memory = 3; repeated MemoryPool memoryPool = 4; repeated GC gc = 5; Thread thread = 6; Class clazz = 7;}message Memory { bool isHeap = 1; int64 init = 2; int64 max = 3; int64 used = 4; int64 committed = 5;}message MemoryPool { PoolType type = 1; int64 init = 2; int64 max = 3; int64 used = 4; int64 committed = 5;}enum PoolType { CODE_CACHE_USAGE = 0; NEWGEN_USAGE = 1; OLDGEN_USAGE = 2; SURVIVOR_USAGE = 3; PERMGEN_USAGE = 4; METASPACE_USAGE = 5;}message GC { GCPhase phase = 1; int64 count = 2; int64 time = 3;}enum GCPhase { NEW = 0; OLD = 1; NORMAL = 2; // The type of GC doesn\u0026#39;t have new and old phases, like Z Garbage Collector (ZGC) }// See: https://docs.oracle.com/javase/8/docs/api/java/lang/management/ThreadMXBean.html message Thread { int64 liveCount = 1; int64 daemonCount = 2; int64 peakCount = 3; int64 runnableStateThreadCount = 4; int64 blockedStateThreadCount = 5; int64 waitingStateThreadCount = 6; int64 timedWaitingStateThreadCount = 7;}// See: https://docs.oracle.com/javase/8/docs/api/java/lang/management/ClassLoadingMXBean.html message Class { int64 loadedClassCount = 1; int64 totalUnloadedClassCount = 2; int64 totalLoadedClassCount = 3;}","title":"JVM Metrics APIs","url":"/docs/main/v9.4.0/en/api/jvm-protocol/"},{"content":"JVM Metrics APIs Notice, SkyWalking has provided general available meter APIs for all kinds of metrics. This API is still supported for forward compatibility only. SkyWalking community would not accept new language specific metric APIs anymore.\nUplink the JVM metrics, including PermSize, HeapSize, CPU, Memory, etc., every second.\ngRPC service define\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.language.agent.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/language/agent/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the JVM metrics report service. service JVMMetricReportService { rpc collect (JVMMetricCollection) returns (Commands) { }}message JVMMetricCollection { repeated JVMMetric metrics = 1; string service = 2; string serviceInstance = 3;}message JVMMetric { int64 time = 1; CPU cpu = 2; repeated Memory memory = 3; repeated MemoryPool memoryPool = 4; repeated GC gc = 5; Thread thread = 6; Class clazz = 7;}message Memory { bool isHeap = 1; int64 init = 2; int64 max = 3; int64 used = 4; int64 committed = 5;}message MemoryPool { PoolType type = 1; int64 init = 2; int64 max = 3; int64 used = 4; int64 committed = 5;}enum PoolType { CODE_CACHE_USAGE = 0; NEWGEN_USAGE = 1; OLDGEN_USAGE = 2; SURVIVOR_USAGE = 3; PERMGEN_USAGE = 4; METASPACE_USAGE = 5;}message GC { GCPhase phase = 1; int64 count = 2; int64 time = 3;}enum GCPhase { NEW = 0; OLD = 1; NORMAL = 2; // The type of GC doesn\u0026#39;t have new and old phases, like Z Garbage Collector (ZGC) }// See: https://docs.oracle.com/javase/8/docs/api/java/lang/management/ThreadMXBean.html message Thread { int64 liveCount = 1; int64 daemonCount = 2; int64 peakCount = 3; int64 runnableStateThreadCount = 4; int64 blockedStateThreadCount = 5; int64 waitingStateThreadCount = 6; int64 timedWaitingStateThreadCount = 7;}// See: https://docs.oracle.com/javase/8/docs/api/java/lang/management/ClassLoadingMXBean.html message Class { int64 loadedClassCount = 1; int64 totalUnloadedClassCount = 2; int64 totalLoadedClassCount = 3;}","title":"JVM Metrics APIs","url":"/docs/main/v9.5.0/en/api/jvm-protocol/"},{"content":"JVM Metrics APIs Notice, SkyWalking has provided general available meter APIs for all kinds of metrics. This API is still supported for forward compatibility only. SkyWalking community would not accept new language specific metric APIs anymore.\nUplink the JVM metrics, including PermSize, HeapSize, CPU, Memory, etc., every second.\ngRPC service define\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.language.agent.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/language/agent/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the JVM metrics report service. service JVMMetricReportService { rpc collect (JVMMetricCollection) returns (Commands) { }}message JVMMetricCollection { repeated JVMMetric metrics = 1; string service = 2; string serviceInstance = 3;}message JVMMetric { int64 time = 1; CPU cpu = 2; repeated Memory memory = 3; repeated MemoryPool memoryPool = 4; repeated GC gc = 5; Thread thread = 6; Class clazz = 7;}message Memory { bool isHeap = 1; int64 init = 2; int64 max = 3; int64 used = 4; int64 committed = 5;}message MemoryPool { PoolType type = 1; int64 init = 2; int64 max = 3; int64 used = 4; int64 committed = 5;}enum PoolType { CODE_CACHE_USAGE = 0; NEWGEN_USAGE = 1; OLDGEN_USAGE = 2; SURVIVOR_USAGE = 3; PERMGEN_USAGE = 4; METASPACE_USAGE = 5;}message GC { GCPhase phase = 1; int64 count = 2; int64 time = 3;}enum GCPhase { NEW = 0; OLD = 1; NORMAL = 2; // The type of GC doesn\u0026#39;t have new and old phases, like Z Garbage Collector (ZGC) }// See: https://docs.oracle.com/javase/8/docs/api/java/lang/management/ThreadMXBean.html message Thread { int64 liveCount = 1; int64 daemonCount = 2; int64 peakCount = 3; int64 runnableStateThreadCount = 4; int64 blockedStateThreadCount = 5; int64 waitingStateThreadCount = 6; int64 timedWaitingStateThreadCount = 7;}// See: https://docs.oracle.com/javase/8/docs/api/java/lang/management/ClassLoadingMXBean.html message Class { int64 loadedClassCount = 1; int64 totalUnloadedClassCount = 2; int64 totalLoadedClassCount = 3;}","title":"JVM Metrics APIs","url":"/docs/main/v9.6.0/en/api/jvm-protocol/"},{"content":"JVM Metrics APIs Notice, SkyWalking has provided general available meter APIs for all kinds of metrics. This API is still supported for forward compatibility only. SkyWalking community would not accept new language specific metric APIs anymore.\nUplink the JVM metrics, including PermSize, HeapSize, CPU, Memory, etc., every second.\ngRPC service define\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.language.agent.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/language/agent/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the JVM metrics report service. service JVMMetricReportService { rpc collect (JVMMetricCollection) returns (Commands) { }}message JVMMetricCollection { repeated JVMMetric metrics = 1; string service = 2; string serviceInstance = 3;}message JVMMetric { int64 time = 1; CPU cpu = 2; repeated Memory memory = 3; repeated MemoryPool memoryPool = 4; repeated GC gc = 5; Thread thread = 6; Class clazz = 7;}message Memory { bool isHeap = 1; int64 init = 2; int64 max = 3; int64 used = 4; int64 committed = 5;}message MemoryPool { PoolType type = 1; int64 init = 2; int64 max = 3; int64 used = 4; int64 committed = 5;}enum PoolType { CODE_CACHE_USAGE = 0; NEWGEN_USAGE = 1; OLDGEN_USAGE = 2; SURVIVOR_USAGE = 3; PERMGEN_USAGE = 4; METASPACE_USAGE = 5; ZHEAP_USAGE = 6; COMPRESSED_CLASS_SPACE_USAGE = 7; CODEHEAP_NON_NMETHODS_USAGE = 8; CODEHEAP_PROFILED_NMETHODS_USAGE = 9; CODEHEAP_NON_PROFILED_NMETHODS_USAGE = 10;}message GC { GCPhase phase = 1; int64 count = 2; int64 time = 3;}enum GCPhase { NEW = 0; OLD = 1; NORMAL = 2; // The type of GC doesn\u0026#39;t have new and old phases, like Z Garbage Collector (ZGC) }// See: https://docs.oracle.com/javase/8/docs/api/java/lang/management/ThreadMXBean.html message Thread { int64 liveCount = 1; int64 daemonCount = 2; int64 peakCount = 3; int64 runnableStateThreadCount = 4; int64 blockedStateThreadCount = 5; int64 waitingStateThreadCount = 6; int64 timedWaitingStateThreadCount = 7;}// See: https://docs.oracle.com/javase/8/docs/api/java/lang/management/ClassLoadingMXBean.html message Class { int64 loadedClassCount = 1; int64 totalUnloadedClassCount = 2; int64 totalLoadedClassCount = 3;}","title":"JVM Metrics APIs","url":"/docs/main/v9.7.0/en/api/jvm-protocol/"},{"content":"JVM Metrics Service Abstract Uplink the JVM metrics, including PermSize, HeapSize, CPU, Memory, etc., every second.\ngRPC service define\n","title":"JVM Metrics Service","url":"/docs/main/v9.0.0/en/protocols/jvm-protocol/"},{"content":"JVM Metrics Service Abstract Uplink the JVM metrics, including PermSize, HeapSize, CPU, Memory, etc., every second.\ngRPC service define\n","title":"JVM Metrics Service","url":"/docs/main/v9.1.0/en/protocols/jvm-protocol/"},{"content":"JVM Metrics Service Abstract Uplink the JVM metrics, including PermSize, HeapSize, CPU, Memory, etc., every second.\ngRPC service define\n","title":"JVM Metrics Service","url":"/docs/main/v9.2.0/en/protocols/jvm-protocol/"},{"content":"JVM Metrics Service Abstract Uplink the JVM metrics, including PermSize, HeapSize, CPU, Memory, etc., every second.\ngRPC service define\n","title":"JVM Metrics Service","url":"/docs/main/v9.3.0/en/protocols/jvm-protocol/"},{"content":"K8s monitoring SkyWalking leverages K8s kube-state-metrics and cAdvisor for collecting metrics data from K8s, and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. This feature requires authorizing the OAP Server to access K8s\u0026rsquo;s API Server.\nData flow  K8s kube-state-metrics and cAdvisor collect metrics data from K8s. OpenTelemetry Collector fetches metrics from kube-state-metrics and cAdvisor via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via the OpenCensus GRPC Exporter. The SkyWalking OAP Server access to K8s\u0026rsquo;s API Server gets meta info and parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup kube-state-metric. cAdvisor is integrated into kubelet by default. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector for K8s, refer to here. For a quick start, we have provided a full example of configuration and recommended version , you can refer to showcase. Config SkyWalking OpenTelemetry receiver.  K8s Cluster Monitoring K8s cluster monitoring provide monitoring of the status and resources of the K8S Cluster, including the whole cluster and each node. K8s cluster as a Service in OAP, K8s node as a Instance in OAP, and land on the Layer: K8S.\nK8s Cluster Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Node Total  k8s_cluster_node_total The number of nodes K8s kube-state-metrics   Namespace Total  k8s_cluster_namespace_total The number of namespaces K8s kube-state-metrics   Deployment Total  k8s_cluster_deployment_total The number of deployments K8s kube-state-metrics   Service Total  k8s_cluster_service_total The number of services K8s kube-state-metrics   Pod Total  k8s_cluster_pod_total The number of pods K8s kube-state-metrics   Container Total  k8s_cluster_container_total The number of containers K8s kube-state-metrics   CPU Resources m k8s_cluster_cpu_cores\nk8s_cluster_cpu_cores_requests\nk8s_cluster_cpu_cores_limits\nk8s_cluster_cpu_cores_allocatable The capacity and the Requests / Limits / Allocatable of the CPU K8s kube-state-metrics   Memory Resources Gi k8s_cluster_memory_total\nk8s_cluster_memory_requests\nk8s_cluster_memory_limits\nk8s_cluster_memory_allocatable The capacity and the Requests / Limits / Allocatable of the memory K8s kube-state-metrics   Storage Resources Gi k8s_cluster_storage_total\nk8s_cluster_storage_allocatable The capacity and allocatable of the storage K8s kube-state-metrics   Node Status  k8s_cluster_node_status The current status of the nodes K8s kube-state-metrics   Deployment Status  k8s_cluster_deployment_status The current status of the deployment K8s kube-state-metrics   Deployment Spec Replicas  k8s_cluster_deployment_spec_replicas The number of desired pods for a deployment K8s kube-state-metrics   Service Status  k8s_cluster_service_pod_status The services current status, depending on the related pods' status K8s kube-state-metrics   Pod Status Not Running  k8s_cluster_pod_status_not_running The pods which are not running in the current phase K8s kube-state-metrics   Pod Status Waiting  k8s_cluster_pod_status_waiting The pods and containers which are currently in the waiting status, with reasons shown K8s kube-state-metrics   Pod Status Terminated  k8s_cluster_container_status_terminated The pods and containers which are currently in the terminated status, with reasons shown K8s kube-state-metrics    K8s Cluster Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Pod Total  k8s_node_pod_total The number of pods in this node K8s kube-state-metrics   Node Status  k8s_node_node_status The current status of this node K8s kube-state-metrics   CPU Resources m k8s_node_cpu_cores\nk8s_node_cpu_cores_allocatable\nk8s_node_cpu_cores_requests\nk8s_node_cpu_cores_limits The capacity and the requests / Limits / Allocatable of the CPU K8s kube-state-metrics   Memory Resources Gi k8s_node_memory_total\nk8s_node_memory_allocatable\nk8s_node_memory_requests\nk8s_node_memory_limits The capacity and the requests / Limits / Allocatable of the memory K8s kube-state-metrics   Storage Resources Gi k8s_node_storage_total\nk8s_node_storage_allocatable The capacity and allocatable of the storage K8s kube-state-metrics   CPU Usage m k8s_node_cpu_usage The total usage of the CPU core, if there are 2 cores the maximum usage is 2000m cAdvisor   Memory Usage Gi k8s_node_memory_usage The totaly memory usage cAdvisor   Network I/O KB/s k8s_node_network_receive\nk8s_node_network_transmit The network receive and transmit cAdvisor    K8s Service Monitoring K8s Service Monitoring provide observe service status and resources from Kubernetes. K8s Service as a Service in OAP and land on the Layer: K8S_SERVICE.\nK8s Service Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Service Pod Total  k8s_service_pod_total The number of pods K8s kube-state-metrics   Service Pod Status  k8s_service_pod_status The current status of pods K8s kube-state-metrics   Service CPU Resources m k8s_service_cpu_cores_requests\nk8s_service_cpu_cores_limits The CPU resources requests / Limits of this service K8s kube-state-metrics   Service Memory Resources MB k8s_service_memory_requests\nk8s_service_memory_limits The memory resources requests / Limits of this service K8s kube-state-metrics   Pod CPU Usage m k8s_service_pod_cpu_usage The CPU resources total usage of pods cAdvisor   Pod Memory Usage MB k8s_service_pod_memory_usage The memory resources total usage of pods cAdvisor   Pod Waiting  k8s_service_pod_status_waiting The pods and containers which are currently in the waiting status, with reasons shown K8s kube-state-metrics   Pod Terminated  k8s_service_pod_status_terminated The pods and containers which are currently in the terminated status, with reasons shown K8s kube-state-metrics   Pod Restarts  k8s_service_pod_status_restarts_total The number of per container restarts related to the pods K8s kube-state-metrics    Customizing You can customize your own metrics/expression/dashboard panel.\nThe metrics definition and expression rules are found in /config/otel-oc-rules/k8s-cluster.yaml,/config/otel-oc-rules/k8s-node.yaml, /config/otel-oc-rules/k8s-service.yaml.\nThe K8s Cluster dashboard panel configurations are found in /config/ui-initialized-templates/k8s. The K8s Service dashboard panel configurations are found in /config/ui-initialized-templates/k8s_service.\n","title":"K8s monitoring","url":"/docs/main/v9.0.0/en/setup/backend/backend-k8s-monitoring/"},{"content":"Kafka Fetcher The Kafka Fetcher pulls messages from the Kafka Broker to learn about what agents have delivered. Check the agent documentation for details on how to enable the Kafka reporter. Typically, tracing segments, service/instance properties, JVM metrics, and meter system data are supported (depending on the agent implementation). Kafka Fetcher can work with gRPC/HTTP Receivers simultaneously for adopting different transport protocols.\nKafka Fetcher is disabled by default. To enable it, configure it as follows.\nNamespace aims to isolate multi OAP clusters when using the same Kafka cluster. If you set a namespace for Kafka fetcher, the OAP will add a prefix to the topic name. You should also set the namespace in the property named plugin.kafka.namespace in agent.config.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}skywalking-segments, skywalking-metrics, skywalking-profilings, skywalking-managements, skywalking-meters, skywalking-logs and skywalking-logs-json topics are required by kafka-fetcher. If they do not exist, Kafka Fetcher will create them by default. Also, you can create them by yourself before the OAP server starts.\nWhen using the OAP server automatic creation mechanism, you could modify the number of partitions and replications of the topics using the following configurations:\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}When using Kafka MirrorMaker 2.0 to replicate topics between Kafka clusters, you can set the source Kafka Cluster alias (mm2SourceAlias) and separator (mm2SourceSeparator) according to your Kafka MirrorMaker config.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}mm2SourceAlias:${SW_KAFKA_MM2_SOURCE_ALIAS:\u0026#34;\u0026#34;}mm2SourceSeparator:${SW_KAFKA_MM2_SOURCE_SEPARATOR:\u0026#34;\u0026#34;}kafkaConsumerConfig:enable.auto.commit:true...","title":"Kafka Fetcher","url":"/docs/main/latest/en/setup/backend/kafka-fetcher/"},{"content":"Kafka Fetcher The Kafka Fetcher pulls messages from the Kafka Broker to learn about what agents have delivered. Check the agent documentation for details on how to enable the Kafka reporter. Typically, tracing segments, service/instance properties, JVM metrics, and meter system data are supported (depending on the agent implementation). Kafka Fetcher can work with gRPC/HTTP Receivers simultaneously for adopting different transport protocols.\nKafka Fetcher is disabled by default. To enable it, configure it as follows.\nNamespace aims to isolate multi OAP clusters when using the same Kafka cluster. If you set a namespace for Kafka fetcher, the OAP will add a prefix to the topic name. You should also set the namespace in the property named plugin.kafka.namespace in agent.config.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}skywalking-segments, skywalking-metrics, skywalking-profilings, skywalking-managements, skywalking-meters, skywalking-logs and skywalking-logs-json topics are required by kafka-fetcher. If they do not exist, Kafka Fetcher will create them by default. Also, you can create them by yourself before the OAP server starts.\nWhen using the OAP server automatic creation mechanism, you could modify the number of partitions and replications of the topics using the following configurations:\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}When using Kafka MirrorMaker 2.0 to replicate topics between Kafka clusters, you can set the source Kafka Cluster alias (mm2SourceAlias) and separator (mm2SourceSeparator) according to your Kafka MirrorMaker config.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}mm2SourceAlias:${SW_KAFKA_MM2_SOURCE_ALIAS:\u0026#34;\u0026#34;}mm2SourceSeparator:${SW_KAFKA_MM2_SOURCE_SEPARATOR:\u0026#34;\u0026#34;}kafkaConsumerConfig:enable.auto.commit:true...","title":"Kafka Fetcher","url":"/docs/main/next/en/setup/backend/kafka-fetcher/"},{"content":"Kafka Fetcher The Kafka Fetcher pulls messages from the Kafka Broker to learn about what agent is delivered. Check the agent documentation for details. Typically, tracing segments, service/instance properties, JVM metrics, and meter system data are supported. Kafka Fetcher can work with gRPC/HTTP Receivers at the same time for adopting different transport protocols.\nKafka Fetcher is disabled by default. To enable it, configure as follows.\nNamespace aims to isolate multi OAP cluster when using the same Kafka cluster. If you set a namespace for Kafka fetcher, the OAP will add a prefix to topic name. You should also set namespace in the property named plugin.kafka.namespace in agent.config.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}skywalking-segments, skywalking-metrics, skywalking-profilings, skywalking-managements, skywalking-meters, skywalking-logs and skywalking-logs-json topics are required by kafka-fetcher. If they do not exist, Kafka Fetcher will create them by default. Also, you can create them by yourself before the OAP server starts.\nWhen using the OAP server automatic creation mechanism, you could modify the number of partitions and replications of the topics using the following configurations:\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}isSharding:${SW_KAFKA_FETCHER_IS_SHARDING:false}consumePartitions:${SW_KAFKA_FETCHER_CONSUME_PARTITIONS:\u0026#34;\u0026#34;}In the cluster mode, all topics have the same number of partitions. Set \u0026quot;isSharding\u0026quot; to \u0026quot;true\u0026quot; and assign the partitions to consume for the OAP server. Use commas to separate multiple partitions for the OAP server.\nThe Kafka Fetcher allows you to configure all the Kafka producers listed here in property kafkaConsumerConfig. For example:\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}isSharding:${SW_KAFKA_FETCHER_IS_SHARDING:true}consumePartitions:${SW_KAFKA_FETCHER_CONSUME_PARTITIONS:1,3,5}kafkaConsumerConfig:enable.auto.commit:true...When using Kafka MirrorMaker 2.0 to replicate topics between Kafka clusters, you can set the source Kafka Cluster alias (mm2SourceAlias) and separator (mm2SourceSeparator) according to your Kafka MirrorMaker config.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}isSharding:${SW_KAFKA_FETCHER_IS_SHARDING:true}consumePartitions:${SW_KAFKA_FETCHER_CONSUME_PARTITIONS:1,3,5}mm2SourceAlias:${SW_KAFKA_MM2_SOURCE_ALIAS:\u0026#34;\u0026#34;}mm2SourceSeparator:${SW_KAFKA_MM2_SOURCE_SEPARATOR:\u0026#34;\u0026#34;}kafkaConsumerConfig:enable.auto.commit:true...Other Fetcher Plugins There are other transporter plugins. You could find these plugins from 3rd party repositories.\n  Pulsar Fetcher Plugin\n  RocketMQ Fetcher Plugin\n  ","title":"Kafka Fetcher","url":"/docs/main/v9.0.0/en/setup/backend/kafka-fetcher/"},{"content":"Kafka Fetcher The Kafka Fetcher pulls messages from the Kafka Broker to learn about what agents have delivered. Check the agent documentation for details on how to enable the Kafka reporter. Typically, tracing segments, service/instance properties, JVM metrics, and meter system data are supported (depending on the agent implementation). Kafka Fetcher can work with gRPC/HTTP Receivers simultaneously for adopting different transport protocols.\nKafka Fetcher is disabled by default. To enable it, configure it as follows.\nNamespace aims to isolate multi OAP clusters when using the same Kafka cluster. If you set a namespace for Kafka fetcher, the OAP will add a prefix to the topic name. You should also set the namespace in the property named plugin.kafka.namespace in agent.config.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}skywalking-segments, skywalking-metrics, skywalking-profilings, skywalking-managements, skywalking-meters, skywalking-logs and skywalking-logs-json topics are required by kafka-fetcher. If they do not exist, Kafka Fetcher will create them by default. Also, you can create them by yourself before the OAP server starts.\nWhen using the OAP server automatic creation mechanism, you could modify the number of partitions and replications of the topics using the following configurations:\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}isSharding:${SW_KAFKA_FETCHER_IS_SHARDING:false}consumePartitions:${SW_KAFKA_FETCHER_CONSUME_PARTITIONS:\u0026#34;\u0026#34;}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}In the cluster mode, all topics have the same number of partitions. Set \u0026quot;isSharding\u0026quot; to \u0026quot;true\u0026quot; and assign the partitions to consume for the OAP server. Use commas to separate multiple partitions for the OAP server.\nThe Kafka Fetcher allows you to configure all the Kafka producers listed here in property kafkaConsumerConfig. For example:\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}isSharding:${SW_KAFKA_FETCHER_IS_SHARDING:true}consumePartitions:${SW_KAFKA_FETCHER_CONSUME_PARTITIONS:1,3,5}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}kafkaConsumerConfig:enable.auto.commit:true...When using Kafka MirrorMaker 2.0 to replicate topics between Kafka clusters, you can set the source Kafka Cluster alias (mm2SourceAlias) and separator (mm2SourceSeparator) according to your Kafka MirrorMaker config.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}isSharding:${SW_KAFKA_FETCHER_IS_SHARDING:true}consumePartitions:${SW_KAFKA_FETCHER_CONSUME_PARTITIONS:1,3,5}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}mm2SourceAlias:${SW_KAFKA_MM2_SOURCE_ALIAS:\u0026#34;\u0026#34;}mm2SourceSeparator:${SW_KAFKA_MM2_SOURCE_SEPARATOR:\u0026#34;\u0026#34;}kafkaConsumerConfig:enable.auto.commit:true...Other Fetcher Plugins There are other transporter plugins. You can find these plugins from 3rd party repositories.\n  Pulsar Fetcher Plugin\n  RocketMQ Fetcher Plugin\n  ","title":"Kafka Fetcher","url":"/docs/main/v9.1.0/en/setup/backend/kafka-fetcher/"},{"content":"Kafka Fetcher The Kafka Fetcher pulls messages from the Kafka Broker to learn about what agents have delivered. Check the agent documentation for details on how to enable the Kafka reporter. Typically, tracing segments, service/instance properties, JVM metrics, and meter system data are supported (depending on the agent implementation). Kafka Fetcher can work with gRPC/HTTP Receivers simultaneously for adopting different transport protocols.\nKafka Fetcher is disabled by default. To enable it, configure it as follows.\nNamespace aims to isolate multi OAP clusters when using the same Kafka cluster. If you set a namespace for Kafka fetcher, the OAP will add a prefix to the topic name. You should also set the namespace in the property named plugin.kafka.namespace in agent.config.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}skywalking-segments, skywalking-metrics, skywalking-profilings, skywalking-managements, skywalking-meters, skywalking-logs and skywalking-logs-json topics are required by kafka-fetcher. If they do not exist, Kafka Fetcher will create them by default. Also, you can create them by yourself before the OAP server starts.\nWhen using the OAP server automatic creation mechanism, you could modify the number of partitions and replications of the topics using the following configurations:\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}isSharding:${SW_KAFKA_FETCHER_IS_SHARDING:false}consumePartitions:${SW_KAFKA_FETCHER_CONSUME_PARTITIONS:\u0026#34;\u0026#34;}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}In the cluster mode, all topics have the same number of partitions. Set \u0026quot;isSharding\u0026quot; to \u0026quot;true\u0026quot; and assign the partitions to consume for the OAP server. Use commas to separate multiple partitions for the OAP server.\nThe Kafka Fetcher allows you to configure all the Kafka producers listed here in property kafkaConsumerConfig. For example:\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}isSharding:${SW_KAFKA_FETCHER_IS_SHARDING:true}consumePartitions:${SW_KAFKA_FETCHER_CONSUME_PARTITIONS:1,3,5}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}kafkaConsumerConfig:enable.auto.commit:true...When using Kafka MirrorMaker 2.0 to replicate topics between Kafka clusters, you can set the source Kafka Cluster alias (mm2SourceAlias) and separator (mm2SourceSeparator) according to your Kafka MirrorMaker config.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}isSharding:${SW_KAFKA_FETCHER_IS_SHARDING:true}consumePartitions:${SW_KAFKA_FETCHER_CONSUME_PARTITIONS:1,3,5}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}mm2SourceAlias:${SW_KAFKA_MM2_SOURCE_ALIAS:\u0026#34;\u0026#34;}mm2SourceSeparator:${SW_KAFKA_MM2_SOURCE_SEPARATOR:\u0026#34;\u0026#34;}kafkaConsumerConfig:enable.auto.commit:true...Other Fetcher Plugins There are other transporter plugins. You can find these plugins from 3rd party repositories.\n  Pulsar Fetcher Plugin\n  RocketMQ Fetcher Plugin\n  ","title":"Kafka Fetcher","url":"/docs/main/v9.2.0/en/setup/backend/kafka-fetcher/"},{"content":"Kafka Fetcher The Kafka Fetcher pulls messages from the Kafka Broker to learn about what agents have delivered. Check the agent documentation for details on how to enable the Kafka reporter. Typically, tracing segments, service/instance properties, JVM metrics, and meter system data are supported (depending on the agent implementation). Kafka Fetcher can work with gRPC/HTTP Receivers simultaneously for adopting different transport protocols.\nKafka Fetcher is disabled by default. To enable it, configure it as follows.\nNamespace aims to isolate multi OAP clusters when using the same Kafka cluster. If you set a namespace for Kafka fetcher, the OAP will add a prefix to the topic name. You should also set the namespace in the property named plugin.kafka.namespace in agent.config.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}skywalking-segments, skywalking-metrics, skywalking-profilings, skywalking-managements, skywalking-meters, skywalking-logs and skywalking-logs-json topics are required by kafka-fetcher. If they do not exist, Kafka Fetcher will create them by default. Also, you can create them by yourself before the OAP server starts.\nWhen using the OAP server automatic creation mechanism, you could modify the number of partitions and replications of the topics using the following configurations:\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}isSharding:${SW_KAFKA_FETCHER_IS_SHARDING:false}consumePartitions:${SW_KAFKA_FETCHER_CONSUME_PARTITIONS:\u0026#34;\u0026#34;}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}In the cluster mode, all topics have the same number of partitions. Set \u0026quot;isSharding\u0026quot; to \u0026quot;true\u0026quot; and assign the partitions to consume for the OAP server. Use commas to separate multiple partitions for the OAP server.\nThe Kafka Fetcher allows you to configure all the Kafka producers listed here in property kafkaConsumerConfig. For example:\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}isSharding:${SW_KAFKA_FETCHER_IS_SHARDING:true}consumePartitions:${SW_KAFKA_FETCHER_CONSUME_PARTITIONS:1,3,5}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}kafkaConsumerConfig:enable.auto.commit:true...When using Kafka MirrorMaker 2.0 to replicate topics between Kafka clusters, you can set the source Kafka Cluster alias (mm2SourceAlias) and separator (mm2SourceSeparator) according to your Kafka MirrorMaker config.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}isSharding:${SW_KAFKA_FETCHER_IS_SHARDING:true}consumePartitions:${SW_KAFKA_FETCHER_CONSUME_PARTITIONS:1,3,5}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}mm2SourceAlias:${SW_KAFKA_MM2_SOURCE_ALIAS:\u0026#34;\u0026#34;}mm2SourceSeparator:${SW_KAFKA_MM2_SOURCE_SEPARATOR:\u0026#34;\u0026#34;}kafkaConsumerConfig:enable.auto.commit:true...Other Fetcher Plugins There are other transporter plugins. You can find these plugins from 3rd party repositories.\n  Pulsar Fetcher Plugin\n  RocketMQ Fetcher Plugin\n  ","title":"Kafka Fetcher","url":"/docs/main/v9.3.0/en/setup/backend/kafka-fetcher/"},{"content":"Kafka Fetcher The Kafka Fetcher pulls messages from the Kafka Broker to learn about what agents have delivered. Check the agent documentation for details on how to enable the Kafka reporter. Typically, tracing segments, service/instance properties, JVM metrics, and meter system data are supported (depending on the agent implementation). Kafka Fetcher can work with gRPC/HTTP Receivers simultaneously for adopting different transport protocols.\nKafka Fetcher is disabled by default. To enable it, configure it as follows.\nNamespace aims to isolate multi OAP clusters when using the same Kafka cluster. If you set a namespace for Kafka fetcher, the OAP will add a prefix to the topic name. You should also set the namespace in the property named plugin.kafka.namespace in agent.config.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}skywalking-segments, skywalking-metrics, skywalking-profilings, skywalking-managements, skywalking-meters, skywalking-logs and skywalking-logs-json topics are required by kafka-fetcher. If they do not exist, Kafka Fetcher will create them by default. Also, you can create them by yourself before the OAP server starts.\nWhen using the OAP server automatic creation mechanism, you could modify the number of partitions and replications of the topics using the following configurations:\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}isSharding:${SW_KAFKA_FETCHER_IS_SHARDING:false}consumePartitions:${SW_KAFKA_FETCHER_CONSUME_PARTITIONS:\u0026#34;\u0026#34;}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}In the cluster mode, all topics have the same number of partitions. Set \u0026quot;isSharding\u0026quot; to \u0026quot;true\u0026quot; and assign the partitions to consume for the OAP server. Use commas to separate multiple partitions for the OAP server.\nThe Kafka Fetcher allows you to configure all the Kafka producers listed here in property kafkaConsumerConfig. For example:\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}isSharding:${SW_KAFKA_FETCHER_IS_SHARDING:true}consumePartitions:${SW_KAFKA_FETCHER_CONSUME_PARTITIONS:1,3,5}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}kafkaConsumerConfig:enable.auto.commit:true...When using Kafka MirrorMaker 2.0 to replicate topics between Kafka clusters, you can set the source Kafka Cluster alias (mm2SourceAlias) and separator (mm2SourceSeparator) according to your Kafka MirrorMaker config.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}isSharding:${SW_KAFKA_FETCHER_IS_SHARDING:true}consumePartitions:${SW_KAFKA_FETCHER_CONSUME_PARTITIONS:1,3,5}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}mm2SourceAlias:${SW_KAFKA_MM2_SOURCE_ALIAS:\u0026#34;\u0026#34;}mm2SourceSeparator:${SW_KAFKA_MM2_SOURCE_SEPARATOR:\u0026#34;\u0026#34;}kafkaConsumerConfig:enable.auto.commit:true...Other Fetcher Plugins There are other transporter plugins. You can find these plugins from 3rd party repositories.\n  Pulsar Fetcher Plugin\n  RocketMQ Fetcher Plugin\n  ","title":"Kafka Fetcher","url":"/docs/main/v9.4.0/en/setup/backend/kafka-fetcher/"},{"content":"Kafka Fetcher The Kafka Fetcher pulls messages from the Kafka Broker to learn about what agents have delivered. Check the agent documentation for details on how to enable the Kafka reporter. Typically, tracing segments, service/instance properties, JVM metrics, and meter system data are supported (depending on the agent implementation). Kafka Fetcher can work with gRPC/HTTP Receivers simultaneously for adopting different transport protocols.\nKafka Fetcher is disabled by default. To enable it, configure it as follows.\nNamespace aims to isolate multi OAP clusters when using the same Kafka cluster. If you set a namespace for Kafka fetcher, the OAP will add a prefix to the topic name. You should also set the namespace in the property named plugin.kafka.namespace in agent.config.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}skywalking-segments, skywalking-metrics, skywalking-profilings, skywalking-managements, skywalking-meters, skywalking-logs and skywalking-logs-json topics are required by kafka-fetcher. If they do not exist, Kafka Fetcher will create them by default. Also, you can create them by yourself before the OAP server starts.\nWhen using the OAP server automatic creation mechanism, you could modify the number of partitions and replications of the topics using the following configurations:\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}isSharding:${SW_KAFKA_FETCHER_IS_SHARDING:false}consumePartitions:${SW_KAFKA_FETCHER_CONSUME_PARTITIONS:\u0026#34;\u0026#34;}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}In the cluster mode, all topics have the same number of partitions. Set \u0026quot;isSharding\u0026quot; to \u0026quot;true\u0026quot; and assign the partitions to consume for the OAP server. Use commas to separate multiple partitions for the OAP server.\nThe Kafka Fetcher allows you to configure all the Kafka producers listed here in property kafkaConsumerConfig. For example:\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}isSharding:${SW_KAFKA_FETCHER_IS_SHARDING:true}consumePartitions:${SW_KAFKA_FETCHER_CONSUME_PARTITIONS:1,3,5}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}kafkaConsumerConfig:enable.auto.commit:true...When using Kafka MirrorMaker 2.0 to replicate topics between Kafka clusters, you can set the source Kafka Cluster alias (mm2SourceAlias) and separator (mm2SourceSeparator) according to your Kafka MirrorMaker config.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}isSharding:${SW_KAFKA_FETCHER_IS_SHARDING:true}consumePartitions:${SW_KAFKA_FETCHER_CONSUME_PARTITIONS:1,3,5}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}mm2SourceAlias:${SW_KAFKA_MM2_SOURCE_ALIAS:\u0026#34;\u0026#34;}mm2SourceSeparator:${SW_KAFKA_MM2_SOURCE_SEPARATOR:\u0026#34;\u0026#34;}kafkaConsumerConfig:enable.auto.commit:true...Other Fetcher Plugins There are other transporter plugins. You can find these plugins from 3rd party repositories.\n  Pulsar Fetcher Plugin\n  RocketMQ Fetcher Plugin\n  ","title":"Kafka Fetcher","url":"/docs/main/v9.5.0/en/setup/backend/kafka-fetcher/"},{"content":"Kafka Fetcher The Kafka Fetcher pulls messages from the Kafka Broker to learn about what agents have delivered. Check the agent documentation for details on how to enable the Kafka reporter. Typically, tracing segments, service/instance properties, JVM metrics, and meter system data are supported (depending on the agent implementation). Kafka Fetcher can work with gRPC/HTTP Receivers simultaneously for adopting different transport protocols.\nKafka Fetcher is disabled by default. To enable it, configure it as follows.\nNamespace aims to isolate multi OAP clusters when using the same Kafka cluster. If you set a namespace for Kafka fetcher, the OAP will add a prefix to the topic name. You should also set the namespace in the property named plugin.kafka.namespace in agent.config.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}skywalking-segments, skywalking-metrics, skywalking-profilings, skywalking-managements, skywalking-meters, skywalking-logs and skywalking-logs-json topics are required by kafka-fetcher. If they do not exist, Kafka Fetcher will create them by default. Also, you can create them by yourself before the OAP server starts.\nWhen using the OAP server automatic creation mechanism, you could modify the number of partitions and replications of the topics using the following configurations:\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}When using Kafka MirrorMaker 2.0 to replicate topics between Kafka clusters, you can set the source Kafka Cluster alias (mm2SourceAlias) and separator (mm2SourceSeparator) according to your Kafka MirrorMaker config.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}mm2SourceAlias:${SW_KAFKA_MM2_SOURCE_ALIAS:\u0026#34;\u0026#34;}mm2SourceSeparator:${SW_KAFKA_MM2_SOURCE_SEPARATOR:\u0026#34;\u0026#34;}kafkaConsumerConfig:enable.auto.commit:true...","title":"Kafka Fetcher","url":"/docs/main/v9.6.0/en/setup/backend/kafka-fetcher/"},{"content":"Kafka Fetcher The Kafka Fetcher pulls messages from the Kafka Broker to learn about what agents have delivered. Check the agent documentation for details on how to enable the Kafka reporter. Typically, tracing segments, service/instance properties, JVM metrics, and meter system data are supported (depending on the agent implementation). Kafka Fetcher can work with gRPC/HTTP Receivers simultaneously for adopting different transport protocols.\nKafka Fetcher is disabled by default. To enable it, configure it as follows.\nNamespace aims to isolate multi OAP clusters when using the same Kafka cluster. If you set a namespace for Kafka fetcher, the OAP will add a prefix to the topic name. You should also set the namespace in the property named plugin.kafka.namespace in agent.config.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}skywalking-segments, skywalking-metrics, skywalking-profilings, skywalking-managements, skywalking-meters, skywalking-logs and skywalking-logs-json topics are required by kafka-fetcher. If they do not exist, Kafka Fetcher will create them by default. Also, you can create them by yourself before the OAP server starts.\nWhen using the OAP server automatic creation mechanism, you could modify the number of partitions and replications of the topics using the following configurations:\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}When using Kafka MirrorMaker 2.0 to replicate topics between Kafka clusters, you can set the source Kafka Cluster alias (mm2SourceAlias) and separator (mm2SourceSeparator) according to your Kafka MirrorMaker config.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}mm2SourceAlias:${SW_KAFKA_MM2_SOURCE_ALIAS:\u0026#34;\u0026#34;}mm2SourceSeparator:${SW_KAFKA_MM2_SOURCE_SEPARATOR:\u0026#34;\u0026#34;}kafkaConsumerConfig:enable.auto.commit:true...","title":"Kafka Fetcher","url":"/docs/main/v9.7.0/en/setup/backend/kafka-fetcher/"},{"content":"Kafka monitoring SkyWalking leverages Prometheus JMX Exporter to collect metrics data from the Kafka and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. Kafka entity as a Service in OAP and on the Layer: KAFKA.\nData flow  The prometheus_JMX_Exporter collect metrics data from Kafka. Note: Running the exporter as a Java agent. OpenTelemetry Collector fetches metrics from prometheus_JMX_Exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup prometheus_JMX_Exporter. This is an example for JMX Exporter configuration kafka-2_0_0.yml. Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  Kafka Monitoring Kafka monitoring provides multidimensional metrics monitoring of Kafka cluster as Layer: KAFKA Service in the OAP. In each cluster, the kafka brokers are represented as Instance.\nKafka Cluster Supported Metrics    Monitoring Panel Metric Name Description Data Source     Under-Replicated Partitions meter_kafka_under_replicated_partitions Number of under-replicated partitions in the broker. A higher number is a sign of potential issues. Prometheus JMX Exporter   Offline Partitions Count meter_kafka_offline_partitions_count Number of partitions that are offline. Non-zero values indicate a problem. Prometheus JMX Exporter   Partition Count meter_kafka_partition_count Total number of partitions on the broker. Prometheus JMX Exporter   Leader Count meter_kafka_leader_count Number of leader partitions on this broker. Prometheus JMX Exporter   Active Controller Count meter_kafka_active_controller_count The number of active controllers in the cluster. Typically should be 1. Prometheus JMX Exporter   Leader Election Rate meter_kafka_leader_election_rate The rate of leader elections per minute. High rate could be a sign of instability. Prometheus JMX Exporter   Unclean Leader Elections Per Second meter_kafka_unclean_leader_elections_per_second The rate of unclean leader elections per second. Non-zero values indicate a serious problem. Prometheus JMX Exporter   Max Lag meter_kafka_max_lag The maximum lag between the leader and followers in terms of messages still needed to be sent. Higher lag indicates delays. Prometheus JMX Exporter    Kafka Broker Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     CPU Usage % meter_kafka_broker_cpu_time_total CPU usage in percentage Prometheus JMX Exporter   Memory Usage % meter_kafka_broker_memory_usage_percentage JVM heap memory usage in percentage Prometheus JMX Exporter   Incoming Messages Msg/sec meter_kafka_broker_messages_per_second Rate of incoming messages Prometheus JMX Exporter   Bytes In Bytes/sec meter_kafka_broker_bytes_in_per_second Rate of incoming bytes Prometheus JMX Exporter   Bytes Out Bytes/sec meter_kafka_broker_bytes_out_per_second Rate of outgoing bytes Prometheus JMX Exporter   Replication Bytes In Bytes/sec meter_kafka_broker_replication_bytes_in_per_second Rate of incoming bytes for replication Prometheus JMX Exporter   Replication Bytes Out Bytes/sec meter_kafka_broker_replication_bytes_out_per_second Rate of outgoing bytes for replication Prometheus JMX Exporter   Under-Replicated Partitions Count meter_kafka_broker_under_replicated_partitions Number of under-replicated partitions Prometheus JMX Exporter   Under Min ISR Partition Count Count meter_kafka_broker_under_min_isr_partition_count Number of partitions below the minimum ISR (In-Sync Replicas) Prometheus JMX Exporter   Partition Count Count meter_kafka_broker_partition_count Total number of partitions Prometheus JMX Exporter   Leader Count Count meter_kafka_broker_leader_count Number of partitions for which this broker is the leader Prometheus JMX Exporter   ISR Shrinks Count/sec meter_kafka_broker_isr_shrinks_per_second Rate of ISR (In-Sync Replicas) shrinking Prometheus JMX Exporter   ISR Expands Count/sec meter_kafka_broker_isr_expands_per_second Rate of ISR (In-Sync Replicas) expanding Prometheus JMX Exporter   Max Lag Count meter_kafka_broker_max_lag Maximum lag between the leader and follower for a partition Prometheus JMX Exporter   Purgatory Size Count meter_kafka_broker_purgatory_size Size of purgatory for Produce and Fetch operations Prometheus JMX Exporter   Garbage Collector Count Count/sec meter_kafka_broker_garbage_collector_count Rate of garbage collection cycles Prometheus JMX Exporter   Requests Per Second Req/sec meter_kafka_broker_requests_per_second Rate of requests to the broker Prometheus JMX Exporter   Request Queue Time ms meter_kafka_broker_request_queue_time_ms Average time a request spends in the request queue Prometheus JMX Exporter   Remote Time ms meter_kafka_broker_remote_time_ms Average time taken for a remote operation Prometheus JMX Exporter   Response Queue Time ms meter_kafka_broker_response_queue_time_ms Average time a response spends in the response queue Prometheus JMX Exporter   Response Send Time ms meter_kafka_broker_response_send_time_ms Average time taken to send a response Prometheus JMX Exporter   Network Processor Avg Idle % meter_kafka_broker_network_processor_avg_idle_percent Percentage of idle time for the network processor Prometheus JMX Exporter   Topic Messages In Total Count meter_kafka_broker_topic_messages_in_total Total number of messages per topic Prometheus JMX Exporter   Topic Bytes Out Per Second Bytes/sec meter_kafka_broker_topic_bytesout_per_second Rate of outgoing bytes per topic Prometheus JMX Exporter   Topic Bytes In Per Second Bytes/sec meter_kafka_broker_topic_bytesin_per_second Rate of incoming bytes per topic Prometheus JMX Exporter   Topic Fetch Requests Per Second Req/sec meter_kafka_broker_topic_fetch_requests_per_second Rate of fetch requests per topic Prometheus JMX Exporter   Topic Produce Requests Per Second Req/sec meter_kafka_broker_topic_produce_requests_per_second Rate of produce requests per topic Prometheus JMX Exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/kafka/kafka-cluster.yaml, /config/otel-rules/kafka/kafka-node.yaml. The Kafka dashboard panel configurations are found in /config/ui-initialized-templates/kafka.\nReference For more details on monitoring Kafka and the metrics to focus on, see the following articles:\n Monitoring Kafka Streams Applications Kafka Monitoring  ","title":"Kafka monitoring","url":"/docs/main/latest/en/setup/backend/backend-kafka-monitoring/"},{"content":"Kafka monitoring SkyWalking leverages Prometheus JMX Exporter to collect metrics data from the Kafka and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. Kafka entity as a Service in OAP and on the Layer: KAFKA.\nData flow  The prometheus_JMX_Exporter collect metrics data from Kafka. Note: Running the exporter as a Java agent. OpenTelemetry Collector fetches metrics from prometheus_JMX_Exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup prometheus_JMX_Exporter. This is an example for JMX Exporter configuration kafka-2_0_0.yml. Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  Kafka Monitoring Kafka monitoring provides multidimensional metrics monitoring of Kafka cluster as Layer: KAFKA Service in the OAP. In each cluster, the kafka brokers are represented as Instance.\nKafka Cluster Supported Metrics    Monitoring Panel Metric Name Description Data Source     Under-Replicated Partitions meter_kafka_under_replicated_partitions Number of under-replicated partitions in the broker. A higher number is a sign of potential issues. Prometheus JMX Exporter   Offline Partitions Count meter_kafka_offline_partitions_count Number of partitions that are offline. Non-zero values indicate a problem. Prometheus JMX Exporter   Partition Count meter_kafka_partition_count Total number of partitions on the broker. Prometheus JMX Exporter   Leader Count meter_kafka_leader_count Number of leader partitions on this broker. Prometheus JMX Exporter   Active Controller Count meter_kafka_active_controller_count The number of active controllers in the cluster. Typically should be 1. Prometheus JMX Exporter   Leader Election Rate meter_kafka_leader_election_rate The rate of leader elections per minute. High rate could be a sign of instability. Prometheus JMX Exporter   Unclean Leader Elections Per Second meter_kafka_unclean_leader_elections_per_second The rate of unclean leader elections per second. Non-zero values indicate a serious problem. Prometheus JMX Exporter   Max Lag meter_kafka_max_lag The maximum lag between the leader and followers in terms of messages still needed to be sent. Higher lag indicates delays. Prometheus JMX Exporter    Kafka Broker Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     CPU Usage % meter_kafka_broker_cpu_time_total CPU usage in percentage Prometheus JMX Exporter   Memory Usage % meter_kafka_broker_memory_usage_percentage JVM heap memory usage in percentage Prometheus JMX Exporter   Incoming Messages Msg/sec meter_kafka_broker_messages_per_second Rate of incoming messages Prometheus JMX Exporter   Bytes In Bytes/sec meter_kafka_broker_bytes_in_per_second Rate of incoming bytes Prometheus JMX Exporter   Bytes Out Bytes/sec meter_kafka_broker_bytes_out_per_second Rate of outgoing bytes Prometheus JMX Exporter   Replication Bytes In Bytes/sec meter_kafka_broker_replication_bytes_in_per_second Rate of incoming bytes for replication Prometheus JMX Exporter   Replication Bytes Out Bytes/sec meter_kafka_broker_replication_bytes_out_per_second Rate of outgoing bytes for replication Prometheus JMX Exporter   Under-Replicated Partitions Count meter_kafka_broker_under_replicated_partitions Number of under-replicated partitions Prometheus JMX Exporter   Under Min ISR Partition Count Count meter_kafka_broker_under_min_isr_partition_count Number of partitions below the minimum ISR (In-Sync Replicas) Prometheus JMX Exporter   Partition Count Count meter_kafka_broker_partition_count Total number of partitions Prometheus JMX Exporter   Leader Count Count meter_kafka_broker_leader_count Number of partitions for which this broker is the leader Prometheus JMX Exporter   ISR Shrinks Count/sec meter_kafka_broker_isr_shrinks_per_second Rate of ISR (In-Sync Replicas) shrinking Prometheus JMX Exporter   ISR Expands Count/sec meter_kafka_broker_isr_expands_per_second Rate of ISR (In-Sync Replicas) expanding Prometheus JMX Exporter   Max Lag Count meter_kafka_broker_max_lag Maximum lag between the leader and follower for a partition Prometheus JMX Exporter   Purgatory Size Count meter_kafka_broker_purgatory_size Size of purgatory for Produce and Fetch operations Prometheus JMX Exporter   Garbage Collector Count Count/sec meter_kafka_broker_garbage_collector_count Rate of garbage collection cycles Prometheus JMX Exporter   Requests Per Second Req/sec meter_kafka_broker_requests_per_second Rate of requests to the broker Prometheus JMX Exporter   Request Queue Time ms meter_kafka_broker_request_queue_time_ms Average time a request spends in the request queue Prometheus JMX Exporter   Remote Time ms meter_kafka_broker_remote_time_ms Average time taken for a remote operation Prometheus JMX Exporter   Response Queue Time ms meter_kafka_broker_response_queue_time_ms Average time a response spends in the response queue Prometheus JMX Exporter   Response Send Time ms meter_kafka_broker_response_send_time_ms Average time taken to send a response Prometheus JMX Exporter   Network Processor Avg Idle % meter_kafka_broker_network_processor_avg_idle_percent Percentage of idle time for the network processor Prometheus JMX Exporter   Topic Messages In Total Count meter_kafka_broker_topic_messages_in_total Total number of messages per topic Prometheus JMX Exporter   Topic Bytes Out Per Second Bytes/sec meter_kafka_broker_topic_bytesout_per_second Rate of outgoing bytes per topic Prometheus JMX Exporter   Topic Bytes In Per Second Bytes/sec meter_kafka_broker_topic_bytesin_per_second Rate of incoming bytes per topic Prometheus JMX Exporter   Topic Fetch Requests Per Second Req/sec meter_kafka_broker_topic_fetch_requests_per_second Rate of fetch requests per topic Prometheus JMX Exporter   Topic Produce Requests Per Second Req/sec meter_kafka_broker_topic_produce_requests_per_second Rate of produce requests per topic Prometheus JMX Exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/kafka/kafka-cluster.yaml, /config/otel-rules/kafka/kafka-node.yaml. The Kafka dashboard panel configurations are found in /config/ui-initialized-templates/kafka.\nReference For more details on monitoring Kafka and the metrics to focus on, see the following articles:\n Monitoring Kafka Streams Applications Kafka Monitoring  ","title":"Kafka monitoring","url":"/docs/main/next/en/setup/backend/backend-kafka-monitoring/"},{"content":"Kafka monitoring SkyWalking leverages Prometheus JMX Exporter to collect metrics data from the Kafka and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. Kafka entity as a Service in OAP and on the Layer: KAFKA.\nData flow  The prometheus_JMX_Exporter collect metrics data from Kafka. Note: Running the exporter as a Java agent. OpenTelemetry Collector fetches metrics from prometheus_JMX_Exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup prometheus_JMX_Exporter. This is an example for JMX Exporter configuration kafka-2_0_0.yml. Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  Kafka Monitoring Kafka monitoring provides multidimensional metrics monitoring of Kafka cluster as Layer: KAFKA Service in the OAP. In each cluster, the kafka brokers are represented as Instance.\nKafka Cluster Supported Metrics    Monitoring Panel Metric Name Description Data Source     Under-Replicated Partitions meter_kafka_under_replicated_partitions Number of under-replicated partitions in the broker. A higher number is a sign of potential issues. Prometheus JMX Exporter   Offline Partitions Count meter_kafka_offline_partitions_count Number of partitions that are offline. Non-zero values indicate a problem. Prometheus JMX Exporter   Partition Count meter_kafka_partition_count Total number of partitions on the broker. Prometheus JMX Exporter   Leader Count meter_kafka_leader_count Number of leader partitions on this broker. Prometheus JMX Exporter   Active Controller Count meter_kafka_active_controller_count The number of active controllers in the cluster. Typically should be 1. Prometheus JMX Exporter   Leader Election Rate meter_kafka_leader_election_rate The rate of leader elections per minute. High rate could be a sign of instability. Prometheus JMX Exporter   Unclean Leader Elections Per Second meter_kafka_unclean_leader_elections_per_second The rate of unclean leader elections per second. Non-zero values indicate a serious problem. Prometheus JMX Exporter   Max Lag meter_kafka_max_lag The maximum lag between the leader and followers in terms of messages still needed to be sent. Higher lag indicates delays. Prometheus JMX Exporter    Kafka Broker Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     CPU Usage % meter_kafka_broker_cpu_time_total CPU usage in percentage Prometheus JMX Exporter   Memory Usage % meter_kafka_broker_memory_usage_percentage JVM heap memory usage in percentage Prometheus JMX Exporter   Incoming Messages Msg/sec meter_kafka_broker_messages_per_second Rate of incoming messages Prometheus JMX Exporter   Bytes In Bytes/sec meter_kafka_broker_bytes_in_per_second Rate of incoming bytes Prometheus JMX Exporter   Bytes Out Bytes/sec meter_kafka_broker_bytes_out_per_second Rate of outgoing bytes Prometheus JMX Exporter   Replication Bytes In Bytes/sec meter_kafka_broker_replication_bytes_in_per_second Rate of incoming bytes for replication Prometheus JMX Exporter   Replication Bytes Out Bytes/sec meter_kafka_broker_replication_bytes_out_per_second Rate of outgoing bytes for replication Prometheus JMX Exporter   Under-Replicated Partitions Count meter_kafka_broker_under_replicated_partitions Number of under-replicated partitions Prometheus JMX Exporter   Under Min ISR Partition Count Count meter_kafka_broker_under_min_isr_partition_count Number of partitions below the minimum ISR (In-Sync Replicas) Prometheus JMX Exporter   Partition Count Count meter_kafka_broker_partition_count Total number of partitions Prometheus JMX Exporter   Leader Count Count meter_kafka_broker_leader_count Number of partitions for which this broker is the leader Prometheus JMX Exporter   ISR Shrinks Count/sec meter_kafka_broker_isr_shrinks_per_second Rate of ISR (In-Sync Replicas) shrinking Prometheus JMX Exporter   ISR Expands Count/sec meter_kafka_broker_isr_expands_per_second Rate of ISR (In-Sync Replicas) expanding Prometheus JMX Exporter   Max Lag Count meter_kafka_broker_max_lag Maximum lag between the leader and follower for a partition Prometheus JMX Exporter   Purgatory Size Count meter_kafka_broker_purgatory_size Size of purgatory for Produce and Fetch operations Prometheus JMX Exporter   Garbage Collector Count Count/sec meter_kafka_broker_garbage_collector_count Rate of garbage collection cycles Prometheus JMX Exporter   Requests Per Second Req/sec meter_kafka_broker_requests_per_second Rate of requests to the broker Prometheus JMX Exporter   Request Queue Time ms meter_kafka_broker_request_queue_time_ms Average time a request spends in the request queue Prometheus JMX Exporter   Remote Time ms meter_kafka_broker_remote_time_ms Average time taken for a remote operation Prometheus JMX Exporter   Response Queue Time ms meter_kafka_broker_response_queue_time_ms Average time a response spends in the response queue Prometheus JMX Exporter   Response Send Time ms meter_kafka_broker_response_send_time_ms Average time taken to send a response Prometheus JMX Exporter   Network Processor Avg Idle % meter_kafka_broker_network_processor_avg_idle_percent Percentage of idle time for the network processor Prometheus JMX Exporter   Topic Messages In Total Count meter_kafka_broker_topic_messages_in_total Total number of messages per topic Prometheus JMX Exporter   Topic Bytes Out Per Second Bytes/sec meter_kafka_broker_topic_bytesout_per_second Rate of outgoing bytes per topic Prometheus JMX Exporter   Topic Bytes In Per Second Bytes/sec meter_kafka_broker_topic_bytesin_per_second Rate of incoming bytes per topic Prometheus JMX Exporter   Topic Fetch Requests Per Second Req/sec meter_kafka_broker_topic_fetch_requests_per_second Rate of fetch requests per topic Prometheus JMX Exporter   Topic Produce Requests Per Second Req/sec meter_kafka_broker_topic_produce_requests_per_second Rate of produce requests per topic Prometheus JMX Exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/kafka/kafka-cluster.yaml, /config/otel-rules/kafka/kafka-node.yaml. The Kafka dashboard panel configurations are found in /config/ui-initialized-templates/kafka.\nReference For more details on monitoring Kafka and the metrics to focus on, see the following articles:\n Monitoring Kafka Streams Applications Kafka Monitoring  ","title":"Kafka monitoring","url":"/docs/main/v9.7.0/en/setup/backend/backend-kafka-monitoring/"},{"content":"Kafka Poll And Invoke  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-kafka\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  usage 1.  public class ConsumerThread2 extends Thread { @Override public void run() { Properties consumerProperties = new Properties(); //...consumerProperties.put()  KafkaConsumer\u0026lt;String, String\u0026gt; consumer = new KafkaConsumer\u0026lt;\u0026gt;(consumerProperties); consumer.subscribe(topicPattern, new NoOpConsumerRebalanceListener()); while (true) { if (pollAndInvoke(consumer)) break; } consumer.close(); } @KafkaPollAndInvoke private boolean pollAndInvoke(KafkaConsumer\u0026lt;String, String\u0026gt; consumer) { try { Thread.sleep(1000); } catch (InterruptedException e) { } ConsumerRecords\u0026lt;String, String\u0026gt; records = consumer.poll(100); if (!records.isEmpty()) { OkHttpClient client = new OkHttpClient.Builder().build(); Request request = new Request.Builder().url(\u0026#34;http://localhost:8080/kafka-scenario/case/kafka-thread2-ping\u0026#34;).build(); Response response = null; try { response = client.newCall(request).execute(); } catch (IOException e) { } response.body().close(); return true; } return false; } } Sample codes only\n","title":"Kafka Poll And Invoke","url":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/application-toolkit-kafka/"},{"content":"Kafka Poll And Invoke  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-kafka\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  usage 1.  public class ConsumerThread2 extends Thread { @Override public void run() { Properties consumerProperties = new Properties(); //...consumerProperties.put()  KafkaConsumer\u0026lt;String, String\u0026gt; consumer = new KafkaConsumer\u0026lt;\u0026gt;(consumerProperties); consumer.subscribe(topicPattern, new NoOpConsumerRebalanceListener()); while (true) { if (pollAndInvoke(consumer)) break; } consumer.close(); } @KafkaPollAndInvoke private boolean pollAndInvoke(KafkaConsumer\u0026lt;String, String\u0026gt; consumer) { try { Thread.sleep(1000); } catch (InterruptedException e) { } ConsumerRecords\u0026lt;String, String\u0026gt; records = consumer.poll(100); if (!records.isEmpty()) { OkHttpClient client = new OkHttpClient.Builder().build(); Request request = new Request.Builder().url(\u0026#34;http://localhost:8080/kafka-scenario/case/kafka-thread2-ping\u0026#34;).build(); Response response = null; try { response = client.newCall(request).execute(); } catch (IOException e) { } response.body().close(); return true; } return false; } } Sample codes only\n","title":"Kafka Poll And Invoke","url":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-kafka/"},{"content":"Kafka Poll And Invoke  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-kafka\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  usage 1.  public class ConsumerThread2 extends Thread { @Override public void run() { Properties consumerProperties = new Properties(); //...consumerProperties.put()  KafkaConsumer\u0026lt;String, String\u0026gt; consumer = new KafkaConsumer\u0026lt;\u0026gt;(consumerProperties); consumer.subscribe(topicPattern, new NoOpConsumerRebalanceListener()); while (true) { if (pollAndInvoke(consumer)) break; } consumer.close(); } @KafkaPollAndInvoke private boolean pollAndInvoke(KafkaConsumer\u0026lt;String, String\u0026gt; consumer) { try { Thread.sleep(1000); } catch (InterruptedException e) { } ConsumerRecords\u0026lt;String, String\u0026gt; records = consumer.poll(100); if (!records.isEmpty()) { OkHttpClient client = new OkHttpClient.Builder().build(); Request request = new Request.Builder().url(\u0026#34;http://localhost:8080/kafka-scenario/case/kafka-thread2-ping\u0026#34;).build(); Response response = null; try { response = client.newCall(request).execute(); } catch (IOException e) { } response.body().close(); return true; } return false; } } Sample codes only\n","title":"Kafka Poll And Invoke","url":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/application-toolkit-kafka/"},{"content":"Kafka Poll And Invoke  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-kafka\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  usage 1.  public class ConsumerThread2 extends Thread { @Override public void run() { Properties consumerProperties = new Properties(); //...consumerProperties.put()  KafkaConsumer\u0026lt;String, String\u0026gt; consumer = new KafkaConsumer\u0026lt;\u0026gt;(consumerProperties); consumer.subscribe(topicPattern, new NoOpConsumerRebalanceListener()); while (true) { if (pollAndInvoke(consumer)) break; } consumer.close(); } @KafkaPollAndInvoke private boolean pollAndInvoke(KafkaConsumer\u0026lt;String, String\u0026gt; consumer) { try { Thread.sleep(1000); } catch (InterruptedException e) { } ConsumerRecords\u0026lt;String, String\u0026gt; records = consumer.poll(100); if (!records.isEmpty()) { OkHttpClient client = new OkHttpClient.Builder().build(); Request request = new Request.Builder().url(\u0026#34;http://localhost:8080/kafka-scenario/case/kafka-thread2-ping\u0026#34;).build(); Response response = null; try { response = client.newCall(request).execute(); } catch (IOException e) { } response.body().close(); return true; } return false; } } Sample codes only\n","title":"Kafka Poll And Invoke","url":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/application-toolkit-kafka/"},{"content":"Kafka Poll And Invoke  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-kafka\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  usage 1.  public class ConsumerThread2 extends Thread { @Override public void run() { Properties consumerProperties = new Properties(); //...consumerProperties.put()  KafkaConsumer\u0026lt;String, String\u0026gt; consumer = new KafkaConsumer\u0026lt;\u0026gt;(consumerProperties); consumer.subscribe(topicPattern, new NoOpConsumerRebalanceListener()); while (true) { if (pollAndInvoke(consumer)) break; } consumer.close(); } @KafkaPollAndInvoke private boolean pollAndInvoke(KafkaConsumer\u0026lt;String, String\u0026gt; consumer) { try { Thread.sleep(1000); } catch (InterruptedException e) { } ConsumerRecords\u0026lt;String, String\u0026gt; records = consumer.poll(100); if (!records.isEmpty()) { OkHttpClient client = new OkHttpClient.Builder().build(); Request request = new Request.Builder().url(\u0026#34;http://localhost:8080/kafka-scenario/case/kafka-thread2-ping\u0026#34;).build(); Response response = null; try { response = client.newCall(request).execute(); } catch (IOException e) { } response.body().close(); return true; } return false; } } Sample codes only\n","title":"Kafka Poll And Invoke","url":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/application-toolkit-kafka/"},{"content":"Kafka reporter By default, the configuration option skywalking_agent.reporter_type is grpc, means that the skywalking agent will report the traces, metrics, logs etc. to SkyWalking OAP Server by gPRC protocol.\nAt the same time, SkyWalking also supports kafka-fetcher, so you can report traces, metrics, logs, etc. by kafka.\nBut the skywalking agent does not compile the kafka-reporter feature by default, you need to enable the it.\nSteps   Compile the skywalking agent with feature kafka-reporter.\nFor pecl:\npecl install skywalking_agent Enable the kafka reporter interactively:\n68 source files, building running: phpize Configuring for: PHP Api Version: 20220829 Zend Module Api No: 20220829 Zend Extension Api No: 420220829 enable cargo debug? [no] : enable kafka reporter? [no] : yes Or, build from sources:\nphpize ./configure --enable-kafka-reporter make make install   Config php.ini.\nSwitch to use kafka reporter.\n[skywalking_agent] extension = skywalking_agent.so skywalking_agent.reporter_type = kafka skywalking_agent.kafka_bootstrap_servers = 127.0.0.1:9092,127.0.0.2:9092,127.0.0.3:9092 If you want to custom the kafka reporter properties, you can specify it by JSON format:\nskywalking_agent.kafka_producer_config = {\u0026#34;delivery.timeout.ms\u0026#34;: \u0026#34;12000\u0026#34;}   ","title":"Kafka reporter","url":"/docs/skywalking-php/latest/en/reporter/kafka-reporter/"},{"content":"Kafka reporter By default, the configuration option skywalking_agent.reporter_type is grpc, means that the skywalking agent will report the traces, metrics, logs etc. to SkyWalking OAP Server by gPRC protocol.\nAt the same time, SkyWalking also supports kafka-fetcher, so you can report traces, metrics, logs, etc. by kafka.\nBut the skywalking agent does not compile the kafka-reporter feature by default, you need to enable the it.\nSteps   Compile the skywalking agent with feature kafka-reporter.\nFor pecl:\npecl install skywalking_agent Enable the kafka reporter interactively:\n68 source files, building running: phpize Configuring for: PHP Api Version: 20220829 Zend Module Api No: 20220829 Zend Extension Api No: 420220829 enable cargo debug? [no] : enable kafka reporter? [no] : yes Or, build from sources:\nphpize ./configure --enable-kafka-reporter make make install   Config php.ini.\nSwitch to use kafka reporter.\n[skywalking_agent] extension = skywalking_agent.so skywalking_agent.reporter_type = kafka skywalking_agent.kafka_bootstrap_servers = 127.0.0.1:9092,127.0.0.2:9092,127.0.0.3:9092 If you want to custom the kafka reporter properties, you can specify it by JSON format:\nskywalking_agent.kafka_producer_config = {\u0026#34;delivery.timeout.ms\u0026#34;: \u0026#34;12000\u0026#34;}   ","title":"Kafka reporter","url":"/docs/skywalking-php/next/en/reporter/kafka-reporter/"},{"content":"Kafka reporter By default, the configuration option skywalking_agent.reporter_type is grpc, means that the skywalking agent will report the traces, metrics, logs etc. to SkyWalking OAP Server by gPRC protocol.\nAt the same time, SkyWalking also supports kafka-fetcher, so you can report traces, metrics, logs, etc. by kafka.\nBut the skywalking agent does not compile the kafka-reporter feature by default, you need to enable the it.\nSteps   Compile the skywalking agent with feature kafka-reporter.\nFor pecl:\npecl install skywalking_agent Enable the kafka reporter interactively:\n68 source files, building running: phpize Configuring for: PHP Api Version: 20220829 Zend Module Api No: 20220829 Zend Extension Api No: 420220829 enable cargo debug? [no] : enable kafka reporter? [no] : yes Or, build from sources:\nphpize ./configure --enable-kafka-reporter make make install   Config php.ini.\nSwitch to use kafka reporter.\n[skywalking_agent] extension = skywalking_agent.so skywalking_agent.reporter_type = kafka skywalking_agent.kafka_bootstrap_servers = 127.0.0.1:9092,127.0.0.2:9092,127.0.0.3:9092 If you want to custom the kafka reporter properties, you can specify it by JSON format:\nskywalking_agent.kafka_producer_config = {\u0026#34;delivery.timeout.ms\u0026#34;: \u0026#34;12000\u0026#34;}   ","title":"Kafka reporter","url":"/docs/skywalking-php/v0.7.0/en/reporter/kafka-reporter/"},{"content":"Key Principle Introduce the key technical processes used in the SkyWalking Go Agent, to help the developers and end users understand how the agent works easier.\nMethod Interceptor Method interception is particularly important in SkyWalking Go, as it enables the creation of plugins. In SkyWalking Go, method interception mainly involves the following key points:\n Finding Method: Using AST to find method information in the target code to be enhanced. Modifying Methods: Enhancing the specified methods and embedding interceptor code. Saving and Compiling: Updating the modified files in the compilation arguments.  Finding Method When looking for methods, the SkyWalking Go Agent requires to search according to the provided compilation arguments, which mainly include the following two parts:\n Package information: Based on the package name provided by the arguments, the Agent can find the specific plugin. Go files: When a matching plugin is found, the Agent reads the .go files and uses AST to parse the method information from these source files. When the method information matches the method information required by the plugin for the interception, the agent would consider the method found.  Modifying Methods After finding the method, the SkyWalking Go Agent needs to modify the method implication and embed the interceptor code.\nChange Method Body When intercepting a method, the first thing to do is to modify the method and embed the template code. This code segment includes two method executions:\n Before method execution: Pass in the current method\u0026rsquo;s arguments, instances, and other information. After method execution: Using the defer method, intercept the result parameters after the code execution is completed.  Based on these two methods, the agent can intercept before and after method execution.\nIn order not to affect the line of code execution, this code segment will only be executed in the same line as the first statement in the method. This ensures that when an exception occurs in the framework code execution, the exact location can still be found without being affected by the enhanced code.\nWrite Delegator File After the agent enhances the method body, it needs to implement the above two methods and write them into a single file, called the delegator file. These two methods would do the following:\n Before method execution: Build by the template. Build the context for before and after interception, and pass the parameter information during execution to the interceptor in each plugin. After method execution: Build by the template. Pass the method return value to the interceptor and execute the method.  Copy Files After completing the delegator file, the agent would perform the following copy operations:\n Plugin Code: Copy the Go files containing the interceptors in the plugin to the same level directory as the current framework. Plugin Development API Code: Copy the operation APIs required by the interceptors in the plugin to the same level directory as the current framework, such as tracing.  After copying the files, they cannot be immediately added to the compilation parameters, because they may have the same name as the existing framework code. Therefore, we need to perform some rewriting operations, which include the following parts:\n Types: Rename created structures, interfaces, methods, and other types by adding a unified prefix. Static Methods: Add a prefix to non-instance methods. Static methods do not need to be rewritten since they have already been processed in the types. Variables: Add a prefix to global variables. It\u0026rsquo;s not necessary to add a prefix to variables inside methods because they can ensure no conflicts would arise and are helpful for debugging.  In the Tracing API, we can see several methods, such as:\nvar ( errParameter = operator.NewError(\u0026#34;parameter are nil\u0026#34;) ) func CreateLocalSpan(operationName string, opts ...SpanOption) (s Span, err error) type SpanOption interface { Apply(interface{}) } After performed rewriting operations, they would become:\nvar ( skywalkingOperatorVarTracingerrParameter = skywalkingOperatorStaticMethodOperatorNewError(\u0026#34;parameter are nil\u0026#34;) ) func skywalkingOperatorStaticMethodTracingCreateLocalSpan(operationName string, opts ...skywalkingOperatorTypeTracingSpanOption) (s skywalkingOperatorTypeTracingSpan, err error) type skywalkingOperatorTypeTracingSpanOption interface { Apply(interface{}) } Saving and Compiling After the above steps are completed, the agent needs to save the modified files and add them to the compilation parameters.\nAt this point, when the framework executes the enhanced method, it can have the following capabilities:\n Execute Plugin Code: Custom code can be embedded before and after the method execution, and real-time parameter information can be obtained. Operate Agent: By calling the Agent API, interaction with the Agent Core can be achieved, enabling functions such as distributed tracing.  Propagation Context SkyWalking uses a new and internal mechanism to propagate context(e.g. tracing context) instead of relying on go native context.Context. This reduces the requirement for the target codes.\nContext Propagation between Methods In the agent, it would enhance the g structure in the runtime package. The g structure in Golang represents the internal data of the current goroutine. By enhancing this structure and using the runtime.getg() method, we can obtain the enhanced data in the current structure in real-time.\nEnhancement includes the following steps:\n Add Attributes to g: Add a new field to the g struct, and value as interface{}. Export Methods: Export methods for real-time setting and getting of custom field values in the current goroutine through go:linkname. Import methods: In the Agent Core, import the setting and getting methods for custom fields.  Through these, the agent has a shared context in any place within the same goroutine, similar to Java\u0026rsquo;s Thread Local.\nContext Propagation between Goroutines Besides using g object as the in-goroutine context propagation, SkyWalking builds a mechanism to propagate context between Goroutines.\nWhen a new goroutine is started on an existing goroutine, the runtime.newproc1 method is called to create a new goroutine based on the existing one. The agent would do context-copy from the previous goroutine to the newly created goroutine. The new context in the goroutine only shares limited information to help continues tracing.\nThe specific operation process is as follows:\n Write the copy method: Create a method for copying data from the previous goroutine. Insert code into newproc1: Insert the defer code, intercept the g objects before and after the execution, and call the copy method to assign values to the custom fields' data.  Agent with Dependency Since SkyWalking Go Agent is based on compile-time enhancement, it cannot introduce third-party modules. For example, when SkyWalking Agent communicates with OAP, it needs to exchange data through the gRPC protocol. If the user does not introduce the gRPC module, it cannot be completed.\nDue to resolve this problem, users need to introduce relevant modules to complete the basic dependency functions. This is why import _ \u0026quot;github.com/apache/skywalking-go\u0026quot; is required. The main key modules that users currently need to introduce include:\n uuid: Used to generate UUIDs, mainly for TraceID generation. errors: To encapsulate error content. gRPC: The basic library used for communication between SkyWalking Go Agent and the Server. skywalking-goapi: The data protocol for communication between Agent and Server in SkyWalking.  Agent Core Copy To simplify the complexity of using Agent, the SkyWalking Go introduced by users only contains the user usage API and code import. The Agent Core code would be dynamically added during hybrid compilation, so when the Agent releases new features, users only need to upgrade the Agent enhancement program without modifying the references in the program.\nCode Import You can see a lot of imports.go files anywhere in the SkyWalking Go, such as imports.go in the root directory, but there is no actual code. This is because, during hybrid compilation, if the code to be compiled references other libraries, such as os, fmt, etc., they need to be referenced through the importcfg file during compilation.\nThe content of the importcfg file is shown below, which specifies the package dependency information required for all Go files to be compiled in the current package path.\npackagefile errors=/var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build2774248373/b006/_pkg_.a packagefile internal/itoa=/var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build2774248373/b027/_pkg_.a packagefile internal/oserror=/var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build2774248373/b035/_pkg_.a So when the file is copied and added to the compilation process, the relevant dependency libraries need to be declared in importcfg. Therefore, by predefining import in the project, the compiler can be forced to introduce the relevant libraries during compilation, thus completing the dynamic enhancement operation.\nPlugin with Agent Core As mentioned in the previous section, it is not possible to dynamically add dependencies between modules. Agent can only modify the importcfg file to reference dependencies if we are sure that the previous dependencies have already been loaded, but this is often impractical. For example, Agent cannot introduce dependencies from the plugin code into the Agent Core, because the plugin is unaware of the Agent\u0026rsquo;s existence. This raises a question: how can agent enable communication between plugins and Agent Core?\nCurrently, agent employ the following method: a global object is introduced in the runtime package, provided by Agent Core. When a plugin needs to interact with Agent Core, it simply searches for this global object from runtime package. The specific steps are as follows:\n Global object definition: Add a global variable when the runtime package is loaded and provide corresponding set and get methods. Set the variable when the Agent loads: When the Agent Core is copied and enhanced, import the method for setting the global variable and initialize the object in the global variable. Plugins: When the plugin is built, import the methods for reading the global variables and APIs. At this point, we can access the object set in Agent Core and use the defined interface for the plugin to access methods in Agent Core.  Limitation Since the communication between the plugin API and Agent Core is through an interface, and the plugin API is copied in each plugin, they can only transfer basic data types or any(interface{}) type. The reason is that when additional types are transferred, agent would be copied multiple times, so the types transferred in the plugin are not consistent with the types in Agent Core, as the types also need to be defined multiple times.\nTherefore, when communicating, they only pass structured data through any type, and when the Agent Core or plugin obtains the data, a type cast is simply required.\nDebugging Based on the introductions in the previous sections, both Agent Core and plugin code are dynamically copied/modified into the target package. So, how can we debug the program during development to identify issues?\nOur current approach consists of the following steps:\n Inform the source code location during flag: Enhance the debug parameters during compilation and inform the system path, for example: -toolexec \u0026quot;/path/to/agent -debug /path/to/code\u0026quot; Get the original file path: Find the absolute location of the source code of the file to be copied based on the rules. Introduce the //line directive: Add the //line directive to the copied target file to inform the compiler of the location of the original file after copying.  At this point, when the program is executed, developer can find the original file to be copied in the source code.\n","title":"Key Principle","url":"/docs/skywalking-go/latest/en/concepts-and-designs/key-principles/"},{"content":"Key Principle Introduce the key technical processes used in the SkyWalking Go Agent, to help the developers and end users understand how the agent works easier.\nMethod Interceptor Method interception is particularly important in SkyWalking Go, as it enables the creation of plugins. In SkyWalking Go, method interception mainly involves the following key points:\n Finding Method: Using AST to find method information in the target code to be enhanced. Modifying Methods: Enhancing the specified methods and embedding interceptor code. Saving and Compiling: Updating the modified files in the compilation arguments.  Finding Method When looking for methods, the SkyWalking Go Agent requires to search according to the provided compilation arguments, which mainly include the following two parts:\n Package information: Based on the package name provided by the arguments, the Agent can find the specific plugin. Go files: When a matching plugin is found, the Agent reads the .go files and uses AST to parse the method information from these source files. When the method information matches the method information required by the plugin for the interception, the agent would consider the method found.  Modifying Methods After finding the method, the SkyWalking Go Agent needs to modify the method implication and embed the interceptor code.\nChange Method Body When intercepting a method, the first thing to do is to modify the method and embed the template code. This code segment includes two method executions:\n Before method execution: Pass in the current method\u0026rsquo;s arguments, instances, and other information. After method execution: Using the defer method, intercept the result parameters after the code execution is completed.  Based on these two methods, the agent can intercept before and after method execution.\nIn order not to affect the line of code execution, this code segment will only be executed in the same line as the first statement in the method. This ensures that when an exception occurs in the framework code execution, the exact location can still be found without being affected by the enhanced code.\nWrite Delegator File After the agent enhances the method body, it needs to implement the above two methods and write them into a single file, called the delegator file. These two methods would do the following:\n Before method execution: Build by the template. Build the context for before and after interception, and pass the parameter information during execution to the interceptor in each plugin. After method execution: Build by the template. Pass the method return value to the interceptor and execute the method.  Copy Files After completing the delegator file, the agent would perform the following copy operations:\n Plugin Code: Copy the Go files containing the interceptors in the plugin to the same level directory as the current framework. Plugin Development API Code: Copy the operation APIs required by the interceptors in the plugin to the same level directory as the current framework, such as tracing.  After copying the files, they cannot be immediately added to the compilation parameters, because they may have the same name as the existing framework code. Therefore, we need to perform some rewriting operations, which include the following parts:\n Types: Rename created structures, interfaces, methods, and other types by adding a unified prefix. Static Methods: Add a prefix to non-instance methods. Static methods do not need to be rewritten since they have already been processed in the types. Variables: Add a prefix to global variables. It\u0026rsquo;s not necessary to add a prefix to variables inside methods because they can ensure no conflicts would arise and are helpful for debugging.  In the Tracing API, we can see several methods, such as:\nvar ( errParameter = operator.NewError(\u0026#34;parameter are nil\u0026#34;) ) func CreateLocalSpan(operationName string, opts ...SpanOption) (s Span, err error) type SpanOption interface { Apply(interface{}) } After performed rewriting operations, they would become:\nvar ( skywalkingOperatorVarTracingerrParameter = skywalkingOperatorStaticMethodOperatorNewError(\u0026#34;parameter are nil\u0026#34;) ) func skywalkingOperatorStaticMethodTracingCreateLocalSpan(operationName string, opts ...skywalkingOperatorTypeTracingSpanOption) (s skywalkingOperatorTypeTracingSpan, err error) type skywalkingOperatorTypeTracingSpanOption interface { Apply(interface{}) } Saving and Compiling After the above steps are completed, the agent needs to save the modified files and add them to the compilation parameters.\nAt this point, when the framework executes the enhanced method, it can have the following capabilities:\n Execute Plugin Code: Custom code can be embedded before and after the method execution, and real-time parameter information can be obtained. Operate Agent: By calling the Agent API, interaction with the Agent Core can be achieved, enabling functions such as distributed tracing.  Propagation Context SkyWalking uses a new and internal mechanism to propagate context(e.g. tracing context) instead of relying on go native context.Context. This reduces the requirement for the target codes.\nContext Propagation between Methods In the agent, it would enhance the g structure in the runtime package. The g structure in Golang represents the internal data of the current goroutine. By enhancing this structure and using the runtime.getg() method, we can obtain the enhanced data in the current structure in real-time.\nEnhancement includes the following steps:\n Add Attributes to g: Add a new field to the g struct, and value as interface{}. Export Methods: Export methods for real-time setting and getting of custom field values in the current goroutine through go:linkname. Import methods: In the Agent Core, import the setting and getting methods for custom fields.  Through these, the agent has a shared context in any place within the same goroutine, similar to Java\u0026rsquo;s Thread Local.\nContext Propagation between Goroutines Besides using g object as the in-goroutine context propagation, SkyWalking builds a mechanism to propagate context between Goroutines.\nWhen a new goroutine is started on an existing goroutine, the runtime.newproc1 method is called to create a new goroutine based on the existing one. The agent would do context-copy from the previous goroutine to the newly created goroutine. The new context in the goroutine only shares limited information to help continues tracing.\nThe specific operation process is as follows:\n Write the copy method: Create a method for copying data from the previous goroutine. Insert code into newproc1: Insert the defer code, intercept the g objects before and after the execution, and call the copy method to assign values to the custom fields' data.  Agent with Dependency Since SkyWalking Go Agent is based on compile-time enhancement, it cannot introduce third-party modules. For example, when SkyWalking Agent communicates with OAP, it needs to exchange data through the gRPC protocol. If the user does not introduce the gRPC module, it cannot be completed.\nDue to resolve this problem, users need to introduce relevant modules to complete the basic dependency functions. This is why import _ \u0026quot;github.com/apache/skywalking-go\u0026quot; is required. The main key modules that users currently need to introduce include:\n uuid: Used to generate UUIDs, mainly for TraceID generation. errors: To encapsulate error content. gRPC: The basic library used for communication between SkyWalking Go Agent and the Server. skywalking-goapi: The data protocol for communication between Agent and Server in SkyWalking.  Agent Core Copy To simplify the complexity of using Agent, the SkyWalking Go introduced by users only contains the user usage API and code import. The Agent Core code would be dynamically added during hybrid compilation, so when the Agent releases new features, users only need to upgrade the Agent enhancement program without modifying the references in the program.\nCode Import You can see a lot of imports.go files anywhere in the SkyWalking Go, such as imports.go in the root directory, but there is no actual code. This is because, during hybrid compilation, if the code to be compiled references other libraries, such as os, fmt, etc., they need to be referenced through the importcfg file during compilation.\nThe content of the importcfg file is shown below, which specifies the package dependency information required for all Go files to be compiled in the current package path.\npackagefile errors=/var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build2774248373/b006/_pkg_.a packagefile internal/itoa=/var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build2774248373/b027/_pkg_.a packagefile internal/oserror=/var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build2774248373/b035/_pkg_.a So when the file is copied and added to the compilation process, the relevant dependency libraries need to be declared in importcfg. Therefore, by predefining import in the project, the compiler can be forced to introduce the relevant libraries during compilation, thus completing the dynamic enhancement operation.\nPlugin with Agent Core As mentioned in the previous section, it is not possible to dynamically add dependencies between modules. Agent can only modify the importcfg file to reference dependencies if we are sure that the previous dependencies have already been loaded, but this is often impractical. For example, Agent cannot introduce dependencies from the plugin code into the Agent Core, because the plugin is unaware of the Agent\u0026rsquo;s existence. This raises a question: how can agent enable communication between plugins and Agent Core?\nCurrently, agent employ the following method: a global object is introduced in the runtime package, provided by Agent Core. When a plugin needs to interact with Agent Core, it simply searches for this global object from runtime package. The specific steps are as follows:\n Global object definition: Add a global variable when the runtime package is loaded and provide corresponding set and get methods. Set the variable when the Agent loads: When the Agent Core is copied and enhanced, import the method for setting the global variable and initialize the object in the global variable. Plugins: When the plugin is built, import the methods for reading the global variables and APIs. At this point, we can access the object set in Agent Core and use the defined interface for the plugin to access methods in Agent Core.  Limitation Since the communication between the plugin API and Agent Core is through an interface, and the plugin API is copied in each plugin, they can only transfer basic data types or any(interface{}) type. The reason is that when additional types are transferred, agent would be copied multiple times, so the types transferred in the plugin are not consistent with the types in Agent Core, as the types also need to be defined multiple times.\nTherefore, when communicating, they only pass structured data through any type, and when the Agent Core or plugin obtains the data, a type cast is simply required.\nDebugging Based on the introductions in the previous sections, both Agent Core and plugin code are dynamically copied/modified into the target package. So, how can we debug the program during development to identify issues?\nOur current approach consists of the following steps:\n Inform the source code location during flag: Enhance the debug parameters during compilation and inform the system path, for example: -toolexec \u0026quot;/path/to/agent -debug /path/to/code\u0026quot; Get the original file path: Find the absolute location of the source code of the file to be copied based on the rules. Introduce the //line directive: Add the //line directive to the copied target file to inform the compiler of the location of the original file after copying.  At this point, when the program is executed, developer can find the original file to be copied in the source code.\n","title":"Key Principle","url":"/docs/skywalking-go/next/en/concepts-and-designs/key-principles/"},{"content":"Key Principle Introduce the key technical processes used in the SkyWalking Go Agent, to help the developers and end users understand how the agent works easier.\nMethod Interceptor Method interception is particularly important in SkyWalking Go, as it enables the creation of plugins. In SkyWalking Go, method interception mainly involves the following key points:\n Finding Method: Using AST to find method information in the target code to be enhanced. Modifying Methods: Enhancing the specified methods and embedding interceptor code. Saving and Compiling: Updating the modified files in the compilation arguments.  Finding Method When looking for methods, the SkyWalking Go Agent requires to search according to the provided compilation arguments, which mainly include the following two parts:\n Package information: Based on the package name provided by the arguments, the Agent can find the specific plugin. Go files: When a matching plugin is found, the Agent reads the .go files and uses AST to parse the method information from these source files. When the method information matches the method information required by the plugin for the interception, the agent would consider the method found.  Modifying Methods After finding the method, the SkyWalking Go Agent needs to modify the method implication and embed the interceptor code.\nChange Method Body When intercepting a method, the first thing to do is to modify the method and embed the template code. This code segment includes two method executions:\n Before method execution: Pass in the current method\u0026rsquo;s arguments, instances, and other information. After method execution: Using the defer method, intercept the result parameters after the code execution is completed.  Based on these two methods, the agent can intercept before and after method execution.\nIn order not to affect the line of code execution, this code segment will only be executed in the same line as the first statement in the method. This ensures that when an exception occurs in the framework code execution, the exact location can still be found without being affected by the enhanced code.\nWrite Delegator File After the agent enhances the method body, it needs to implement the above two methods and write them into a single file, called the delegator file. These two methods would do the following:\n Before method execution: Build by the template. Build the context for before and after interception, and pass the parameter information during execution to the interceptor in each plugin. After method execution: Build by the template. Pass the method return value to the interceptor and execute the method.  Copy Files After completing the delegator file, the agent would perform the following copy operations:\n Plugin Code: Copy the Go files containing the interceptors in the plugin to the same level directory as the current framework. Plugin Development API Code: Copy the operation APIs required by the interceptors in the plugin to the same level directory as the current framework, such as tracing.  After copying the files, they cannot be immediately added to the compilation parameters, because they may have the same name as the existing framework code. Therefore, we need to perform some rewriting operations, which include the following parts:\n Types: Rename created structures, interfaces, methods, and other types by adding a unified prefix. Static Methods: Add a prefix to non-instance methods. Static methods do not need to be rewritten since they have already been processed in the types. Variables: Add a prefix to global variables. It\u0026rsquo;s not necessary to add a prefix to variables inside methods because they can ensure no conflicts would arise and are helpful for debugging.  In the Tracing API, we can see several methods, such as:\nvar ( errParameter = operator.NewError(\u0026#34;parameter are nil\u0026#34;) ) func CreateLocalSpan(operationName string, opts ...SpanOption) (s Span, err error) type SpanOption interface { Apply(interface{}) } After performed rewriting operations, they would become:\nvar ( skywalkingOperatorVarTracingerrParameter = skywalkingOperatorStaticMethodOperatorNewError(\u0026#34;parameter are nil\u0026#34;) ) func skywalkingOperatorStaticMethodTracingCreateLocalSpan(operationName string, opts ...skywalkingOperatorTypeTracingSpanOption) (s skywalkingOperatorTypeTracingSpan, err error) type skywalkingOperatorTypeTracingSpanOption interface { Apply(interface{}) } Saving and Compiling After the above steps are completed, the agent needs to save the modified files and add them to the compilation parameters.\nAt this point, when the framework executes the enhanced method, it can have the following capabilities:\n Execute Plugin Code: Custom code can be embedded before and after the method execution, and real-time parameter information can be obtained. Operate Agent: By calling the Agent API, interaction with the Agent Core can be achieved, enabling functions such as distributed tracing.  Propagation Context SkyWalking uses a new and internal mechanism to propagate context(e.g. tracing context) instead of relying on go native context.Context. This reduces the requirement for the target codes.\nContext Propagation between Methods In the agent, it would enhance the g structure in the runtime package. The g structure in Golang represents the internal data of the current goroutine. By enhancing this structure and using the runtime.getg() method, we can obtain the enhanced data in the current structure in real-time.\nEnhancement includes the following steps:\n Add Attributes to g: Add a new field to the g struct, and value as interface{}. Export Methods: Export methods for real-time setting and getting of custom field values in the current goroutine through go:linkname. Import methods: In the Agent Core, import the setting and getting methods for custom fields.  Through these, the agent has a shared context in any place within the same goroutine, similar to Java\u0026rsquo;s Thread Local.\nContext Propagation between Goroutines Besides using g object as the in-goroutine context propagation, SkyWalking builds a mechanism to propagate context between Goroutines.\nWhen a new goroutine is started on an existing goroutine, the runtime.newproc1 method is called to create a new goroutine based on the existing one. The agent would do context-copy from the previous goroutine to the newly created goroutine. The new context in the goroutine only shares limited information to help continues tracing.\nThe specific operation process is as follows:\n Write the copy method: Create a method for copying data from the previous goroutine. Insert code into newproc1: Insert the defer code, intercept the g objects before and after the execution, and call the copy method to assign values to the custom fields' data.  Agent with Dependency Since SkyWalking Go Agent is based on compile-time enhancement, it cannot introduce third-party modules. For example, when SkyWalking Agent communicates with OAP, it needs to exchange data through the gRPC protocol. If the user does not introduce the gRPC module, it cannot be completed.\nDue to resolve this problem, users need to introduce relevant modules to complete the basic dependency functions. This is why import _ \u0026quot;github.com/apache/skywalking-go\u0026quot; is required. The main key modules that users currently need to introduce include:\n uuid: Used to generate UUIDs, mainly for TraceID generation. errors: To encapsulate error content. gRPC: The basic library used for communication between SkyWalking Go Agent and the Server. skywalking-goapi: The data protocol for communication between Agent and Server in SkyWalking.  Agent Core Copy To simplify the complexity of using Agent, the SkyWalking Go introduced by users only contains the user usage API and code import. The Agent Core code would be dynamically added during hybrid compilation, so when the Agent releases new features, users only need to upgrade the Agent enhancement program without modifying the references in the program.\nCode Import You can see a lot of imports.go files anywhere in the SkyWalking Go, such as imports.go in the root directory, but there is no actual code. This is because, during hybrid compilation, if the code to be compiled references other libraries, such as os, fmt, etc., they need to be referenced through the importcfg file during compilation.\nThe content of the importcfg file is shown below, which specifies the package dependency information required for all Go files to be compiled in the current package path.\npackagefile errors=/var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build2774248373/b006/_pkg_.a packagefile internal/itoa=/var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build2774248373/b027/_pkg_.a packagefile internal/oserror=/var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build2774248373/b035/_pkg_.a So when the file is copied and added to the compilation process, the relevant dependency libraries need to be declared in importcfg. Therefore, by predefining import in the project, the compiler can be forced to introduce the relevant libraries during compilation, thus completing the dynamic enhancement operation.\nPlugin with Agent Core As mentioned in the previous section, it is not possible to dynamically add dependencies between modules. Agent can only modify the importcfg file to reference dependencies if we are sure that the previous dependencies have already been loaded, but this is often impractical. For example, Agent cannot introduce dependencies from the plugin code into the Agent Core, because the plugin is unaware of the Agent\u0026rsquo;s existence. This raises a question: how can agent enable communication between plugins and Agent Core?\nCurrently, agent employ the following method: a global object is introduced in the runtime package, provided by Agent Core. When a plugin needs to interact with Agent Core, it simply searches for this global object from runtime package. The specific steps are as follows:\n Global object definition: Add a global variable when the runtime package is loaded and provide corresponding set and get methods. Set the variable when the Agent loads: When the Agent Core is copied and enhanced, import the method for setting the global variable and initialize the object in the global variable. Plugins: When the plugin is built, import the methods for reading the global variables and APIs. At this point, we can access the object set in Agent Core and use the defined interface for the plugin to access methods in Agent Core.  Limitation Since the communication between the plugin API and Agent Core is through an interface, and the plugin API is copied in each plugin, they can only transfer basic data types or any(interface{}) type. The reason is that when additional types are transferred, agent would be copied multiple times, so the types transferred in the plugin are not consistent with the types in Agent Core, as the types also need to be defined multiple times.\nTherefore, when communicating, they only pass structured data through any type, and when the Agent Core or plugin obtains the data, a type cast is simply required.\nDebugging Based on the introductions in the previous sections, both Agent Core and plugin code are dynamically copied/modified into the target package. So, how can we debug the program during development to identify issues?\nOur current approach consists of the following steps:\n Inform the source code location during flag: Enhance the debug parameters during compilation and inform the system path, for example: -toolexec \u0026quot;/path/to/agent -debug /path/to/code\u0026quot; Get the original file path: Find the absolute location of the source code of the file to be copied based on the rules. Introduce the //line directive: Add the //line directive to the copied target file to inform the compiler of the location of the original file after copying.  At this point, when the program is executed, developer can find the original file to be copied in the source code.\n","title":"Key Principle","url":"/docs/skywalking-go/v0.4.0/en/concepts-and-designs/key-principles/"},{"content":"Kubernetes (K8s) monitoring SkyWalking leverages K8s kube-state-metrics (KSM) and cAdvisor for collecting metrics data from K8s. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. This feature requires authorizing the OAP Server to access K8s\u0026rsquo;s API Server.\nData flow  K8s kube-state-metrics and cAdvisor collect metrics data from K8s. OpenTelemetry Collector fetches metrics from kube-state-metrics and cAdvisor via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server access to K8s\u0026rsquo;s API Server gets meta info and parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup kube-state-metric. cAdvisor is integrated into kubelet by default. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector for K8s, refer to here. For a quick start, we have provided a complete example of configuration and recommended version; you can refer to showcase. Config SkyWalking OpenTelemetry receiver.  Kubernetes Cluster Monitoring K8s cluster monitoring provides monitoring of the status and resources of the whole cluster and each node. K8s cluster as a Service in OAP, K8s node as an Instance in OAP, and land on the Layer: K8S.\nKubernetes Cluster Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Node Total  k8s_cluster_node_total The number of nodes K8s kube-state-metrics   Namespace Total  k8s_cluster_namespace_total The number of namespaces K8s kube-state-metrics   Deployment Total  k8s_cluster_deployment_total The number of deployments K8s kube-state-metrics   StatefulSet Total  k8s_cluster_statefulset_total The number of statefulsets K8s kube-state-metrics   DaemonSet Total  k8s_cluster_daemonset_total The number of daemonsets K8s kube-state-metrics   Service Total  k8s_cluster_service_total The number of services K8s kube-state-metrics   Pod Total  k8s_cluster_pod_total The number of pods K8s kube-state-metrics   Container Total  k8s_cluster_container_total The number of containers K8s kube-state-metrics   CPU Resources m k8s_cluster_cpu_cores\nk8s_cluster_cpu_cores_requests\nk8s_cluster_cpu_cores_limits\nk8s_cluster_cpu_cores_allocatable The capacity and the Requests / Limits / Allocatable of the CPU K8s kube-state-metrics   Memory Resources Gi k8s_cluster_memory_total\nk8s_cluster_memory_requests\nk8s_cluster_memory_limits\nk8s_cluster_memory_allocatable The capacity and the Requests / Limits / Allocatable of the memory K8s kube-state-metrics   Storage Resources Gi k8s_cluster_storage_total\nk8s_cluster_storage_allocatable The capacity and allocatable of the storage K8s kube-state-metrics   Node Status  k8s_cluster_node_status The current status of the nodes K8s kube-state-metrics   Deployment Status  k8s_cluster_deployment_status The current status of the deployment K8s kube-state-metrics   Deployment Spec Replicas  k8s_cluster_deployment_spec_replicas The number of desired pods for a deployment K8s kube-state-metrics   Service Status  k8s_cluster_service_pod_status The services current status, depending on the related pods' status K8s kube-state-metrics   Pod Status Not Running  k8s_cluster_pod_status_not_running The pods which are not running in the current phase K8s kube-state-metrics   Pod Status Waiting  k8s_cluster_pod_status_waiting The pods and containers which are currently in the waiting status, with reasons shown K8s kube-state-metrics   Pod Status Terminated  k8s_cluster_container_status_terminated The pods and containers which are currently in the terminated status, with reasons shown K8s kube-state-metrics    Kubernetes Cluster Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Pod Total  k8s_node_pod_total The number of pods in this node K8s kube-state-metrics   Node Status  k8s_node_node_status The current status of this node K8s kube-state-metrics   CPU Resources m k8s_node_cpu_cores\nk8s_node_cpu_cores_allocatable\nk8s_node_cpu_cores_requests\nk8s_node_cpu_cores_limits The capacity and the requests / Limits / Allocatable of the CPU K8s kube-state-metrics   Memory Resources Gi k8s_node_memory_total\nk8s_node_memory_allocatable\nk8s_node_memory_requests\nk8s_node_memory_limits The capacity and the requests / Limits / Allocatable of the memory K8s kube-state-metrics   Storage Resources Gi k8s_node_storage_total\nk8s_node_storage_allocatable The capacity and allocatable of the storage K8s kube-state-metrics   CPU Usage m k8s_node_cpu_usage The total usage of the CPU core, if there are 2 cores the maximum usage is 2000m cAdvisor   Memory Usage Gi k8s_node_memory_usage The totaly memory usage cAdvisor   Network I/O KB/s k8s_node_network_receive\nk8s_node_network_transmit The network receive and transmit cAdvisor    Kubernetes Service Monitoring K8s Service Monitoring provides observabilities into service status and resources from Kubernetes. K8s Service as a Service in OAP and land on the Layer: K8S_SERVICE.\nKubernetes Service Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Service Pod Total  k8s_service_pod_total The number of pods K8s kube-state-metrics   Service Pod Status  k8s_service_pod_status The current status of pods K8s kube-state-metrics   Service CPU Resources m k8s_service_cpu_cores_requests\nk8s_service_cpu_cores_limits The CPU resources requests / Limits of this service K8s kube-state-metrics   Service Memory Resources MB k8s_service_memory_requests\nk8s_service_memory_limits The memory resources requests / Limits of this service K8s kube-state-metrics   Pod CPU Usage m k8s_service_pod_cpu_usage The CPU resources total usage of pods cAdvisor   Pod Memory Usage MB k8s_service_pod_memory_usage The memory resources total usage of pods cAdvisor   Pod Waiting  k8s_service_pod_status_waiting The pods and containers which are currently in the waiting status, with reasons shown K8s kube-state-metrics   Pod Terminated  k8s_service_pod_status_terminated The pods and containers which are currently in the terminated status, with reasons shown K8s kube-state-metrics   Pod Restarts  k8s_service_pod_status_restarts_total The number of per container restarts related to the pods K8s kube-state-metrics    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/k8s/k8s-cluster.yaml,/config/otel-rules/k8s/k8s-node.yaml, /config/otel-rules/k8s/k8s-service.yaml. The K8s Cluster dashboard panel configurations are found in /config/ui-initialized-templates/k8s. The K8s Service dashboard panel configurations are found in /config/ui-initialized-templates/k8s_service.\n","title":"Kubernetes (K8s) monitoring","url":"/docs/main/latest/en/setup/backend/backend-k8s-monitoring/"},{"content":"Kubernetes (K8s) monitoring Kubernetes is an open-source container-orchestration system for automating computer application deployment, scaling, and management. It was originally designed by Google and is now maintained by the Cloud Native Computing Foundation. It aims to provide a \u0026ldquo;platform for automating deployment, scaling, and operations of application containers across clusters of hosts\u0026rdquo;. It works with a range of container tools, including Docker.\nNowadays, Kubernetes is the fundamental infrastructure for cloud native applications. SkyWalking provides the following ways to monitor deployments on Kubernetes.\n Use kube-state-metrics (KSM) and cAdvisor to collect metrics of Kubernetes resources, such as CPU, service, pod, and node. Read kube-state-metrics and cAdvisor setup guide for more details. Rover is a SkyWalking native eBPF agent to collect network Access Logs to support topology-aware and metrics analysis. Meanwhile, due to the power of eBPF, it could profile running services written by C++, Rust, Golang, etc. Read Rover setup guide for more details.  SkyWalking deeply integrates with Kubernetes to help users understand the status of their applications on Kubernetes. Cillium with Hubble is in our v10 plan.\n","title":"Kubernetes (K8s) monitoring","url":"/docs/main/next/en/setup/backend/backend-k8s-monitoring/"},{"content":"Kubernetes (K8s) monitoring SkyWalking leverages K8s kube-state-metrics (KSM) and cAdvisor for collecting metrics data from K8s. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. This feature requires authorizing the OAP Server to access K8s\u0026rsquo;s API Server.\nData flow  K8s kube-state-metrics and cAdvisor collect metrics data from K8s. OpenTelemetry Collector fetches metrics from kube-state-metrics and cAdvisor via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via the OpenCensus GRPC Exporter. The SkyWalking OAP Server access to K8s\u0026rsquo;s API Server gets meta info and parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup kube-state-metric. cAdvisor is integrated into kubelet by default. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector for K8s, refer to here. For a quick start, we have provided a complete example of configuration and recommended version; you can refer to showcase. Config SkyWalking OpenTelemetry receiver.  Kubernetes Cluster Monitoring K8s cluster monitoring provides monitoring of the status and resources of the whole cluster and each node. K8s cluster as a Service in OAP, K8s node as an Instance in OAP, and land on the Layer: K8S.\nKubernetes Cluster Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Node Total  k8s_cluster_node_total The number of nodes K8s kube-state-metrics   Namespace Total  k8s_cluster_namespace_total The number of namespaces K8s kube-state-metrics   Deployment Total  k8s_cluster_deployment_total The number of deployments K8s kube-state-metrics   Service Total  k8s_cluster_service_total The number of services K8s kube-state-metrics   Pod Total  k8s_cluster_pod_total The number of pods K8s kube-state-metrics   Container Total  k8s_cluster_container_total The number of containers K8s kube-state-metrics   CPU Resources m k8s_cluster_cpu_cores\nk8s_cluster_cpu_cores_requests\nk8s_cluster_cpu_cores_limits\nk8s_cluster_cpu_cores_allocatable The capacity and the Requests / Limits / Allocatable of the CPU K8s kube-state-metrics   Memory Resources Gi k8s_cluster_memory_total\nk8s_cluster_memory_requests\nk8s_cluster_memory_limits\nk8s_cluster_memory_allocatable The capacity and the Requests / Limits / Allocatable of the memory K8s kube-state-metrics   Storage Resources Gi k8s_cluster_storage_total\nk8s_cluster_storage_allocatable The capacity and allocatable of the storage K8s kube-state-metrics   Node Status  k8s_cluster_node_status The current status of the nodes K8s kube-state-metrics   Deployment Status  k8s_cluster_deployment_status The current status of the deployment K8s kube-state-metrics   Deployment Spec Replicas  k8s_cluster_deployment_spec_replicas The number of desired pods for a deployment K8s kube-state-metrics   Service Status  k8s_cluster_service_pod_status The services current status, depending on the related pods' status K8s kube-state-metrics   Pod Status Not Running  k8s_cluster_pod_status_not_running The pods which are not running in the current phase K8s kube-state-metrics   Pod Status Waiting  k8s_cluster_pod_status_waiting The pods and containers which are currently in the waiting status, with reasons shown K8s kube-state-metrics   Pod Status Terminated  k8s_cluster_container_status_terminated The pods and containers which are currently in the terminated status, with reasons shown K8s kube-state-metrics    Kubernetes Cluster Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Pod Total  k8s_node_pod_total The number of pods in this node K8s kube-state-metrics   Node Status  k8s_node_node_status The current status of this node K8s kube-state-metrics   CPU Resources m k8s_node_cpu_cores\nk8s_node_cpu_cores_allocatable\nk8s_node_cpu_cores_requests\nk8s_node_cpu_cores_limits The capacity and the requests / Limits / Allocatable of the CPU K8s kube-state-metrics   Memory Resources Gi k8s_node_memory_total\nk8s_node_memory_allocatable\nk8s_node_memory_requests\nk8s_node_memory_limits The capacity and the requests / Limits / Allocatable of the memory K8s kube-state-metrics   Storage Resources Gi k8s_node_storage_total\nk8s_node_storage_allocatable The capacity and allocatable of the storage K8s kube-state-metrics   CPU Usage m k8s_node_cpu_usage The total usage of the CPU core, if there are 2 cores the maximum usage is 2000m cAdvisor   Memory Usage Gi k8s_node_memory_usage The totaly memory usage cAdvisor   Network I/O KB/s k8s_node_network_receive\nk8s_node_network_transmit The network receive and transmit cAdvisor    Kubernetes Service Monitoring K8s Service Monitoring provides observabilities into service status and resources from Kubernetes. K8s Service as a Service in OAP and land on the Layer: K8S_SERVICE.\nKubernetes Service Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Service Pod Total  k8s_service_pod_total The number of pods K8s kube-state-metrics   Service Pod Status  k8s_service_pod_status The current status of pods K8s kube-state-metrics   Service CPU Resources m k8s_service_cpu_cores_requests\nk8s_service_cpu_cores_limits The CPU resources requests / Limits of this service K8s kube-state-metrics   Service Memory Resources MB k8s_service_memory_requests\nk8s_service_memory_limits The memory resources requests / Limits of this service K8s kube-state-metrics   Pod CPU Usage m k8s_service_pod_cpu_usage The CPU resources total usage of pods cAdvisor   Pod Memory Usage MB k8s_service_pod_memory_usage The memory resources total usage of pods cAdvisor   Pod Waiting  k8s_service_pod_status_waiting The pods and containers which are currently in the waiting status, with reasons shown K8s kube-state-metrics   Pod Terminated  k8s_service_pod_status_terminated The pods and containers which are currently in the terminated status, with reasons shown K8s kube-state-metrics   Pod Restarts  k8s_service_pod_status_restarts_total The number of per container restarts related to the pods K8s kube-state-metrics    Customizations You can customize your own metrics/expression/dashboard panel.\nThe metrics definition and expression rules are found in /config/otel-oc-rules/k8s-cluster.yaml,/config/otel-oc-rules/k8s-node.yaml, /config/otel-oc-rules/k8s-service.yaml.\nThe K8s Cluster dashboard panel configurations are found in /config/ui-initialized-templates/k8s. The K8s Service dashboard panel configurations are found in /config/ui-initialized-templates/k8s_service.\n","title":"Kubernetes (K8s) monitoring","url":"/docs/main/v9.1.0/en/setup/backend/backend-k8s-monitoring/"},{"content":"Kubernetes (K8s) monitoring SkyWalking leverages K8s kube-state-metrics (KSM) and cAdvisor for collecting metrics data from K8s. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. This feature requires authorizing the OAP Server to access K8s\u0026rsquo;s API Server.\nData flow  K8s kube-state-metrics and cAdvisor collect metrics data from K8s. OpenTelemetry Collector fetches metrics from kube-state-metrics and cAdvisor via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via the OpenCensus gRPC Exporter or OpenTelemetry gRPC exporter. The SkyWalking OAP Server access to K8s\u0026rsquo;s API Server gets meta info and parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup kube-state-metric. cAdvisor is integrated into kubelet by default. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector for K8s, refer to here. For a quick start, we have provided a complete example of configuration and recommended version; you can refer to showcase. Config SkyWalking OpenTelemetry receiver.  Kubernetes Cluster Monitoring K8s cluster monitoring provides monitoring of the status and resources of the whole cluster and each node. K8s cluster as a Service in OAP, K8s node as an Instance in OAP, and land on the Layer: K8S.\nKubernetes Cluster Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Node Total  k8s_cluster_node_total The number of nodes K8s kube-state-metrics   Namespace Total  k8s_cluster_namespace_total The number of namespaces K8s kube-state-metrics   Deployment Total  k8s_cluster_deployment_total The number of deployments K8s kube-state-metrics   Service Total  k8s_cluster_service_total The number of services K8s kube-state-metrics   Pod Total  k8s_cluster_pod_total The number of pods K8s kube-state-metrics   Container Total  k8s_cluster_container_total The number of containers K8s kube-state-metrics   CPU Resources m k8s_cluster_cpu_cores\nk8s_cluster_cpu_cores_requests\nk8s_cluster_cpu_cores_limits\nk8s_cluster_cpu_cores_allocatable The capacity and the Requests / Limits / Allocatable of the CPU K8s kube-state-metrics   Memory Resources Gi k8s_cluster_memory_total\nk8s_cluster_memory_requests\nk8s_cluster_memory_limits\nk8s_cluster_memory_allocatable The capacity and the Requests / Limits / Allocatable of the memory K8s kube-state-metrics   Storage Resources Gi k8s_cluster_storage_total\nk8s_cluster_storage_allocatable The capacity and allocatable of the storage K8s kube-state-metrics   Node Status  k8s_cluster_node_status The current status of the nodes K8s kube-state-metrics   Deployment Status  k8s_cluster_deployment_status The current status of the deployment K8s kube-state-metrics   Deployment Spec Replicas  k8s_cluster_deployment_spec_replicas The number of desired pods for a deployment K8s kube-state-metrics   Service Status  k8s_cluster_service_pod_status The services current status, depending on the related pods' status K8s kube-state-metrics   Pod Status Not Running  k8s_cluster_pod_status_not_running The pods which are not running in the current phase K8s kube-state-metrics   Pod Status Waiting  k8s_cluster_pod_status_waiting The pods and containers which are currently in the waiting status, with reasons shown K8s kube-state-metrics   Pod Status Terminated  k8s_cluster_container_status_terminated The pods and containers which are currently in the terminated status, with reasons shown K8s kube-state-metrics    Kubernetes Cluster Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Pod Total  k8s_node_pod_total The number of pods in this node K8s kube-state-metrics   Node Status  k8s_node_node_status The current status of this node K8s kube-state-metrics   CPU Resources m k8s_node_cpu_cores\nk8s_node_cpu_cores_allocatable\nk8s_node_cpu_cores_requests\nk8s_node_cpu_cores_limits The capacity and the requests / Limits / Allocatable of the CPU K8s kube-state-metrics   Memory Resources Gi k8s_node_memory_total\nk8s_node_memory_allocatable\nk8s_node_memory_requests\nk8s_node_memory_limits The capacity and the requests / Limits / Allocatable of the memory K8s kube-state-metrics   Storage Resources Gi k8s_node_storage_total\nk8s_node_storage_allocatable The capacity and allocatable of the storage K8s kube-state-metrics   CPU Usage m k8s_node_cpu_usage The total usage of the CPU core, if there are 2 cores the maximum usage is 2000m cAdvisor   Memory Usage Gi k8s_node_memory_usage The totaly memory usage cAdvisor   Network I/O KB/s k8s_node_network_receive\nk8s_node_network_transmit The network receive and transmit cAdvisor    Kubernetes Service Monitoring K8s Service Monitoring provides observabilities into service status and resources from Kubernetes. K8s Service as a Service in OAP and land on the Layer: K8S_SERVICE.\nKubernetes Service Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Service Pod Total  k8s_service_pod_total The number of pods K8s kube-state-metrics   Service Pod Status  k8s_service_pod_status The current status of pods K8s kube-state-metrics   Service CPU Resources m k8s_service_cpu_cores_requests\nk8s_service_cpu_cores_limits The CPU resources requests / Limits of this service K8s kube-state-metrics   Service Memory Resources MB k8s_service_memory_requests\nk8s_service_memory_limits The memory resources requests / Limits of this service K8s kube-state-metrics   Pod CPU Usage m k8s_service_pod_cpu_usage The CPU resources total usage of pods cAdvisor   Pod Memory Usage MB k8s_service_pod_memory_usage The memory resources total usage of pods cAdvisor   Pod Waiting  k8s_service_pod_status_waiting The pods and containers which are currently in the waiting status, with reasons shown K8s kube-state-metrics   Pod Terminated  k8s_service_pod_status_terminated The pods and containers which are currently in the terminated status, with reasons shown K8s kube-state-metrics   Pod Restarts  k8s_service_pod_status_restarts_total The number of per container restarts related to the pods K8s kube-state-metrics    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/k8s-cluster.yaml,/config/otel-rules/k8s-node.yaml, /config/otel-rules/k8s-service.yaml. The K8s Cluster dashboard panel configurations are found in /config/ui-initialized-templates/k8s. The K8s Service dashboard panel configurations are found in /config/ui-initialized-templates/k8s_service.\n","title":"Kubernetes (K8s) monitoring","url":"/docs/main/v9.2.0/en/setup/backend/backend-k8s-monitoring/"},{"content":"Kubernetes (K8s) monitoring SkyWalking leverages K8s kube-state-metrics (KSM) and cAdvisor for collecting metrics data from K8s. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. This feature requires authorizing the OAP Server to access K8s\u0026rsquo;s API Server.\nData flow  K8s kube-state-metrics and cAdvisor collect metrics data from K8s. OpenTelemetry Collector fetches metrics from kube-state-metrics and cAdvisor via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via the OpenCensus gRPC Exporter or OpenTelemetry gRPC exporter. The SkyWalking OAP Server access to K8s\u0026rsquo;s API Server gets meta info and parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup kube-state-metric. cAdvisor is integrated into kubelet by default. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector for K8s, refer to here. For a quick start, we have provided a complete example of configuration and recommended version; you can refer to showcase. Config SkyWalking OpenTelemetry receiver.  Kubernetes Cluster Monitoring K8s cluster monitoring provides monitoring of the status and resources of the whole cluster and each node. K8s cluster as a Service in OAP, K8s node as an Instance in OAP, and land on the Layer: K8S.\nKubernetes Cluster Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Node Total  k8s_cluster_node_total The number of nodes K8s kube-state-metrics   Namespace Total  k8s_cluster_namespace_total The number of namespaces K8s kube-state-metrics   Deployment Total  k8s_cluster_deployment_total The number of deployments K8s kube-state-metrics   Service Total  k8s_cluster_service_total The number of services K8s kube-state-metrics   Pod Total  k8s_cluster_pod_total The number of pods K8s kube-state-metrics   Container Total  k8s_cluster_container_total The number of containers K8s kube-state-metrics   CPU Resources m k8s_cluster_cpu_cores\nk8s_cluster_cpu_cores_requests\nk8s_cluster_cpu_cores_limits\nk8s_cluster_cpu_cores_allocatable The capacity and the Requests / Limits / Allocatable of the CPU K8s kube-state-metrics   Memory Resources Gi k8s_cluster_memory_total\nk8s_cluster_memory_requests\nk8s_cluster_memory_limits\nk8s_cluster_memory_allocatable The capacity and the Requests / Limits / Allocatable of the memory K8s kube-state-metrics   Storage Resources Gi k8s_cluster_storage_total\nk8s_cluster_storage_allocatable The capacity and allocatable of the storage K8s kube-state-metrics   Node Status  k8s_cluster_node_status The current status of the nodes K8s kube-state-metrics   Deployment Status  k8s_cluster_deployment_status The current status of the deployment K8s kube-state-metrics   Deployment Spec Replicas  k8s_cluster_deployment_spec_replicas The number of desired pods for a deployment K8s kube-state-metrics   Service Status  k8s_cluster_service_pod_status The services current status, depending on the related pods' status K8s kube-state-metrics   Pod Status Not Running  k8s_cluster_pod_status_not_running The pods which are not running in the current phase K8s kube-state-metrics   Pod Status Waiting  k8s_cluster_pod_status_waiting The pods and containers which are currently in the waiting status, with reasons shown K8s kube-state-metrics   Pod Status Terminated  k8s_cluster_container_status_terminated The pods and containers which are currently in the terminated status, with reasons shown K8s kube-state-metrics    Kubernetes Cluster Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Pod Total  k8s_node_pod_total The number of pods in this node K8s kube-state-metrics   Node Status  k8s_node_node_status The current status of this node K8s kube-state-metrics   CPU Resources m k8s_node_cpu_cores\nk8s_node_cpu_cores_allocatable\nk8s_node_cpu_cores_requests\nk8s_node_cpu_cores_limits The capacity and the requests / Limits / Allocatable of the CPU K8s kube-state-metrics   Memory Resources Gi k8s_node_memory_total\nk8s_node_memory_allocatable\nk8s_node_memory_requests\nk8s_node_memory_limits The capacity and the requests / Limits / Allocatable of the memory K8s kube-state-metrics   Storage Resources Gi k8s_node_storage_total\nk8s_node_storage_allocatable The capacity and allocatable of the storage K8s kube-state-metrics   CPU Usage m k8s_node_cpu_usage The total usage of the CPU core, if there are 2 cores the maximum usage is 2000m cAdvisor   Memory Usage Gi k8s_node_memory_usage The totaly memory usage cAdvisor   Network I/O KB/s k8s_node_network_receive\nk8s_node_network_transmit The network receive and transmit cAdvisor    Kubernetes Service Monitoring K8s Service Monitoring provides observabilities into service status and resources from Kubernetes. K8s Service as a Service in OAP and land on the Layer: K8S_SERVICE.\nKubernetes Service Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Service Pod Total  k8s_service_pod_total The number of pods K8s kube-state-metrics   Service Pod Status  k8s_service_pod_status The current status of pods K8s kube-state-metrics   Service CPU Resources m k8s_service_cpu_cores_requests\nk8s_service_cpu_cores_limits The CPU resources requests / Limits of this service K8s kube-state-metrics   Service Memory Resources MB k8s_service_memory_requests\nk8s_service_memory_limits The memory resources requests / Limits of this service K8s kube-state-metrics   Pod CPU Usage m k8s_service_pod_cpu_usage The CPU resources total usage of pods cAdvisor   Pod Memory Usage MB k8s_service_pod_memory_usage The memory resources total usage of pods cAdvisor   Pod Waiting  k8s_service_pod_status_waiting The pods and containers which are currently in the waiting status, with reasons shown K8s kube-state-metrics   Pod Terminated  k8s_service_pod_status_terminated The pods and containers which are currently in the terminated status, with reasons shown K8s kube-state-metrics   Pod Restarts  k8s_service_pod_status_restarts_total The number of per container restarts related to the pods K8s kube-state-metrics    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/k8s-cluster.yaml,/config/otel-rules/k8s-node.yaml, /config/otel-rules/k8s-service.yaml. The K8s Cluster dashboard panel configurations are found in /config/ui-initialized-templates/k8s. The K8s Service dashboard panel configurations are found in /config/ui-initialized-templates/k8s_service.\n","title":"Kubernetes (K8s) monitoring","url":"/docs/main/v9.3.0/en/setup/backend/backend-k8s-monitoring/"},{"content":"Kubernetes (K8s) monitoring SkyWalking leverages K8s kube-state-metrics (KSM) and cAdvisor for collecting metrics data from K8s. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. This feature requires authorizing the OAP Server to access K8s\u0026rsquo;s API Server.\nData flow  K8s kube-state-metrics and cAdvisor collect metrics data from K8s. OpenTelemetry Collector fetches metrics from kube-state-metrics and cAdvisor via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via the OpenCensus gRPC Exporter or OpenTelemetry gRPC exporter. The SkyWalking OAP Server access to K8s\u0026rsquo;s API Server gets meta info and parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup kube-state-metric. cAdvisor is integrated into kubelet by default. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector for K8s, refer to here. For a quick start, we have provided a complete example of configuration and recommended version; you can refer to showcase. Config SkyWalking OpenTelemetry receiver.  Kubernetes Cluster Monitoring K8s cluster monitoring provides monitoring of the status and resources of the whole cluster and each node. K8s cluster as a Service in OAP, K8s node as an Instance in OAP, and land on the Layer: K8S.\nKubernetes Cluster Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Node Total  k8s_cluster_node_total The number of nodes K8s kube-state-metrics   Namespace Total  k8s_cluster_namespace_total The number of namespaces K8s kube-state-metrics   Deployment Total  k8s_cluster_deployment_total The number of deployments K8s kube-state-metrics   Service Total  k8s_cluster_service_total The number of services K8s kube-state-metrics   Pod Total  k8s_cluster_pod_total The number of pods K8s kube-state-metrics   Container Total  k8s_cluster_container_total The number of containers K8s kube-state-metrics   CPU Resources m k8s_cluster_cpu_cores\nk8s_cluster_cpu_cores_requests\nk8s_cluster_cpu_cores_limits\nk8s_cluster_cpu_cores_allocatable The capacity and the Requests / Limits / Allocatable of the CPU K8s kube-state-metrics   Memory Resources Gi k8s_cluster_memory_total\nk8s_cluster_memory_requests\nk8s_cluster_memory_limits\nk8s_cluster_memory_allocatable The capacity and the Requests / Limits / Allocatable of the memory K8s kube-state-metrics   Storage Resources Gi k8s_cluster_storage_total\nk8s_cluster_storage_allocatable The capacity and allocatable of the storage K8s kube-state-metrics   Node Status  k8s_cluster_node_status The current status of the nodes K8s kube-state-metrics   Deployment Status  k8s_cluster_deployment_status The current status of the deployment K8s kube-state-metrics   Deployment Spec Replicas  k8s_cluster_deployment_spec_replicas The number of desired pods for a deployment K8s kube-state-metrics   Service Status  k8s_cluster_service_pod_status The services current status, depending on the related pods' status K8s kube-state-metrics   Pod Status Not Running  k8s_cluster_pod_status_not_running The pods which are not running in the current phase K8s kube-state-metrics   Pod Status Waiting  k8s_cluster_pod_status_waiting The pods and containers which are currently in the waiting status, with reasons shown K8s kube-state-metrics   Pod Status Terminated  k8s_cluster_container_status_terminated The pods and containers which are currently in the terminated status, with reasons shown K8s kube-state-metrics    Kubernetes Cluster Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Pod Total  k8s_node_pod_total The number of pods in this node K8s kube-state-metrics   Node Status  k8s_node_node_status The current status of this node K8s kube-state-metrics   CPU Resources m k8s_node_cpu_cores\nk8s_node_cpu_cores_allocatable\nk8s_node_cpu_cores_requests\nk8s_node_cpu_cores_limits The capacity and the requests / Limits / Allocatable of the CPU K8s kube-state-metrics   Memory Resources Gi k8s_node_memory_total\nk8s_node_memory_allocatable\nk8s_node_memory_requests\nk8s_node_memory_limits The capacity and the requests / Limits / Allocatable of the memory K8s kube-state-metrics   Storage Resources Gi k8s_node_storage_total\nk8s_node_storage_allocatable The capacity and allocatable of the storage K8s kube-state-metrics   CPU Usage m k8s_node_cpu_usage The total usage of the CPU core, if there are 2 cores the maximum usage is 2000m cAdvisor   Memory Usage Gi k8s_node_memory_usage The totaly memory usage cAdvisor   Network I/O KB/s k8s_node_network_receive\nk8s_node_network_transmit The network receive and transmit cAdvisor    Kubernetes Service Monitoring K8s Service Monitoring provides observabilities into service status and resources from Kubernetes. K8s Service as a Service in OAP and land on the Layer: K8S_SERVICE.\nKubernetes Service Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Service Pod Total  k8s_service_pod_total The number of pods K8s kube-state-metrics   Service Pod Status  k8s_service_pod_status The current status of pods K8s kube-state-metrics   Service CPU Resources m k8s_service_cpu_cores_requests\nk8s_service_cpu_cores_limits The CPU resources requests / Limits of this service K8s kube-state-metrics   Service Memory Resources MB k8s_service_memory_requests\nk8s_service_memory_limits The memory resources requests / Limits of this service K8s kube-state-metrics   Pod CPU Usage m k8s_service_pod_cpu_usage The CPU resources total usage of pods cAdvisor   Pod Memory Usage MB k8s_service_pod_memory_usage The memory resources total usage of pods cAdvisor   Pod Waiting  k8s_service_pod_status_waiting The pods and containers which are currently in the waiting status, with reasons shown K8s kube-state-metrics   Pod Terminated  k8s_service_pod_status_terminated The pods and containers which are currently in the terminated status, with reasons shown K8s kube-state-metrics   Pod Restarts  k8s_service_pod_status_restarts_total The number of per container restarts related to the pods K8s kube-state-metrics    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/k8s/k8s-cluster.yaml,/config/otel-rules/k8s/k8s-node.yaml, /config/otel-rules/k8s/k8s-service.yaml. The K8s Cluster dashboard panel configurations are found in /config/ui-initialized-templates/k8s. The K8s Service dashboard panel configurations are found in /config/ui-initialized-templates/k8s_service.\n","title":"Kubernetes (K8s) monitoring","url":"/docs/main/v9.4.0/en/setup/backend/backend-k8s-monitoring/"},{"content":"Kubernetes (K8s) monitoring SkyWalking leverages K8s kube-state-metrics (KSM) and cAdvisor for collecting metrics data from K8s. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. This feature requires authorizing the OAP Server to access K8s\u0026rsquo;s API Server.\nData flow  K8s kube-state-metrics and cAdvisor collect metrics data from K8s. OpenTelemetry Collector fetches metrics from kube-state-metrics and cAdvisor via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server access to K8s\u0026rsquo;s API Server gets meta info and parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup kube-state-metric. cAdvisor is integrated into kubelet by default. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector for K8s, refer to here. For a quick start, we have provided a complete example of configuration and recommended version; you can refer to showcase. Config SkyWalking OpenTelemetry receiver.  Kubernetes Cluster Monitoring K8s cluster monitoring provides monitoring of the status and resources of the whole cluster and each node. K8s cluster as a Service in OAP, K8s node as an Instance in OAP, and land on the Layer: K8S.\nKubernetes Cluster Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Node Total  k8s_cluster_node_total The number of nodes K8s kube-state-metrics   Namespace Total  k8s_cluster_namespace_total The number of namespaces K8s kube-state-metrics   Deployment Total  k8s_cluster_deployment_total The number of deployments K8s kube-state-metrics   StatefulSet Total  k8s_cluster_statefulset_total The number of statefulsets K8s kube-state-metrics   DaemonSet Total  k8s_cluster_daemonset_total The number of daemonsets K8s kube-state-metrics   Service Total  k8s_cluster_service_total The number of services K8s kube-state-metrics   Pod Total  k8s_cluster_pod_total The number of pods K8s kube-state-metrics   Container Total  k8s_cluster_container_total The number of containers K8s kube-state-metrics   CPU Resources m k8s_cluster_cpu_cores\nk8s_cluster_cpu_cores_requests\nk8s_cluster_cpu_cores_limits\nk8s_cluster_cpu_cores_allocatable The capacity and the Requests / Limits / Allocatable of the CPU K8s kube-state-metrics   Memory Resources Gi k8s_cluster_memory_total\nk8s_cluster_memory_requests\nk8s_cluster_memory_limits\nk8s_cluster_memory_allocatable The capacity and the Requests / Limits / Allocatable of the memory K8s kube-state-metrics   Storage Resources Gi k8s_cluster_storage_total\nk8s_cluster_storage_allocatable The capacity and allocatable of the storage K8s kube-state-metrics   Node Status  k8s_cluster_node_status The current status of the nodes K8s kube-state-metrics   Deployment Status  k8s_cluster_deployment_status The current status of the deployment K8s kube-state-metrics   Deployment Spec Replicas  k8s_cluster_deployment_spec_replicas The number of desired pods for a deployment K8s kube-state-metrics   Service Status  k8s_cluster_service_pod_status The services current status, depending on the related pods' status K8s kube-state-metrics   Pod Status Not Running  k8s_cluster_pod_status_not_running The pods which are not running in the current phase K8s kube-state-metrics   Pod Status Waiting  k8s_cluster_pod_status_waiting The pods and containers which are currently in the waiting status, with reasons shown K8s kube-state-metrics   Pod Status Terminated  k8s_cluster_container_status_terminated The pods and containers which are currently in the terminated status, with reasons shown K8s kube-state-metrics    Kubernetes Cluster Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Pod Total  k8s_node_pod_total The number of pods in this node K8s kube-state-metrics   Node Status  k8s_node_node_status The current status of this node K8s kube-state-metrics   CPU Resources m k8s_node_cpu_cores\nk8s_node_cpu_cores_allocatable\nk8s_node_cpu_cores_requests\nk8s_node_cpu_cores_limits The capacity and the requests / Limits / Allocatable of the CPU K8s kube-state-metrics   Memory Resources Gi k8s_node_memory_total\nk8s_node_memory_allocatable\nk8s_node_memory_requests\nk8s_node_memory_limits The capacity and the requests / Limits / Allocatable of the memory K8s kube-state-metrics   Storage Resources Gi k8s_node_storage_total\nk8s_node_storage_allocatable The capacity and allocatable of the storage K8s kube-state-metrics   CPU Usage m k8s_node_cpu_usage The total usage of the CPU core, if there are 2 cores the maximum usage is 2000m cAdvisor   Memory Usage Gi k8s_node_memory_usage The totaly memory usage cAdvisor   Network I/O KB/s k8s_node_network_receive\nk8s_node_network_transmit The network receive and transmit cAdvisor    Kubernetes Service Monitoring K8s Service Monitoring provides observabilities into service status and resources from Kubernetes. K8s Service as a Service in OAP and land on the Layer: K8S_SERVICE.\nKubernetes Service Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Service Pod Total  k8s_service_pod_total The number of pods K8s kube-state-metrics   Service Pod Status  k8s_service_pod_status The current status of pods K8s kube-state-metrics   Service CPU Resources m k8s_service_cpu_cores_requests\nk8s_service_cpu_cores_limits The CPU resources requests / Limits of this service K8s kube-state-metrics   Service Memory Resources MB k8s_service_memory_requests\nk8s_service_memory_limits The memory resources requests / Limits of this service K8s kube-state-metrics   Pod CPU Usage m k8s_service_pod_cpu_usage The CPU resources total usage of pods cAdvisor   Pod Memory Usage MB k8s_service_pod_memory_usage The memory resources total usage of pods cAdvisor   Pod Waiting  k8s_service_pod_status_waiting The pods and containers which are currently in the waiting status, with reasons shown K8s kube-state-metrics   Pod Terminated  k8s_service_pod_status_terminated The pods and containers which are currently in the terminated status, with reasons shown K8s kube-state-metrics   Pod Restarts  k8s_service_pod_status_restarts_total The number of per container restarts related to the pods K8s kube-state-metrics    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/k8s/k8s-cluster.yaml,/config/otel-rules/k8s/k8s-node.yaml, /config/otel-rules/k8s/k8s-service.yaml. The K8s Cluster dashboard panel configurations are found in /config/ui-initialized-templates/k8s. The K8s Service dashboard panel configurations are found in /config/ui-initialized-templates/k8s_service.\n","title":"Kubernetes (K8s) monitoring","url":"/docs/main/v9.5.0/en/setup/backend/backend-k8s-monitoring/"},{"content":"Kubernetes (K8s) monitoring SkyWalking leverages K8s kube-state-metrics (KSM) and cAdvisor for collecting metrics data from K8s. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. This feature requires authorizing the OAP Server to access K8s\u0026rsquo;s API Server.\nData flow  K8s kube-state-metrics and cAdvisor collect metrics data from K8s. OpenTelemetry Collector fetches metrics from kube-state-metrics and cAdvisor via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server access to K8s\u0026rsquo;s API Server gets meta info and parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup kube-state-metric. cAdvisor is integrated into kubelet by default. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector for K8s, refer to here. For a quick start, we have provided a complete example of configuration and recommended version; you can refer to showcase. Config SkyWalking OpenTelemetry receiver.  Kubernetes Cluster Monitoring K8s cluster monitoring provides monitoring of the status and resources of the whole cluster and each node. K8s cluster as a Service in OAP, K8s node as an Instance in OAP, and land on the Layer: K8S.\nKubernetes Cluster Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Node Total  k8s_cluster_node_total The number of nodes K8s kube-state-metrics   Namespace Total  k8s_cluster_namespace_total The number of namespaces K8s kube-state-metrics   Deployment Total  k8s_cluster_deployment_total The number of deployments K8s kube-state-metrics   StatefulSet Total  k8s_cluster_statefulset_total The number of statefulsets K8s kube-state-metrics   DaemonSet Total  k8s_cluster_daemonset_total The number of daemonsets K8s kube-state-metrics   Service Total  k8s_cluster_service_total The number of services K8s kube-state-metrics   Pod Total  k8s_cluster_pod_total The number of pods K8s kube-state-metrics   Container Total  k8s_cluster_container_total The number of containers K8s kube-state-metrics   CPU Resources m k8s_cluster_cpu_cores\nk8s_cluster_cpu_cores_requests\nk8s_cluster_cpu_cores_limits\nk8s_cluster_cpu_cores_allocatable The capacity and the Requests / Limits / Allocatable of the CPU K8s kube-state-metrics   Memory Resources Gi k8s_cluster_memory_total\nk8s_cluster_memory_requests\nk8s_cluster_memory_limits\nk8s_cluster_memory_allocatable The capacity and the Requests / Limits / Allocatable of the memory K8s kube-state-metrics   Storage Resources Gi k8s_cluster_storage_total\nk8s_cluster_storage_allocatable The capacity and allocatable of the storage K8s kube-state-metrics   Node Status  k8s_cluster_node_status The current status of the nodes K8s kube-state-metrics   Deployment Status  k8s_cluster_deployment_status The current status of the deployment K8s kube-state-metrics   Deployment Spec Replicas  k8s_cluster_deployment_spec_replicas The number of desired pods for a deployment K8s kube-state-metrics   Service Status  k8s_cluster_service_pod_status The services current status, depending on the related pods' status K8s kube-state-metrics   Pod Status Not Running  k8s_cluster_pod_status_not_running The pods which are not running in the current phase K8s kube-state-metrics   Pod Status Waiting  k8s_cluster_pod_status_waiting The pods and containers which are currently in the waiting status, with reasons shown K8s kube-state-metrics   Pod Status Terminated  k8s_cluster_container_status_terminated The pods and containers which are currently in the terminated status, with reasons shown K8s kube-state-metrics    Kubernetes Cluster Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Pod Total  k8s_node_pod_total The number of pods in this node K8s kube-state-metrics   Node Status  k8s_node_node_status The current status of this node K8s kube-state-metrics   CPU Resources m k8s_node_cpu_cores\nk8s_node_cpu_cores_allocatable\nk8s_node_cpu_cores_requests\nk8s_node_cpu_cores_limits The capacity and the requests / Limits / Allocatable of the CPU K8s kube-state-metrics   Memory Resources Gi k8s_node_memory_total\nk8s_node_memory_allocatable\nk8s_node_memory_requests\nk8s_node_memory_limits The capacity and the requests / Limits / Allocatable of the memory K8s kube-state-metrics   Storage Resources Gi k8s_node_storage_total\nk8s_node_storage_allocatable The capacity and allocatable of the storage K8s kube-state-metrics   CPU Usage m k8s_node_cpu_usage The total usage of the CPU core, if there are 2 cores the maximum usage is 2000m cAdvisor   Memory Usage Gi k8s_node_memory_usage The totaly memory usage cAdvisor   Network I/O KB/s k8s_node_network_receive\nk8s_node_network_transmit The network receive and transmit cAdvisor    Kubernetes Service Monitoring K8s Service Monitoring provides observabilities into service status and resources from Kubernetes. K8s Service as a Service in OAP and land on the Layer: K8S_SERVICE.\nKubernetes Service Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Service Pod Total  k8s_service_pod_total The number of pods K8s kube-state-metrics   Service Pod Status  k8s_service_pod_status The current status of pods K8s kube-state-metrics   Service CPU Resources m k8s_service_cpu_cores_requests\nk8s_service_cpu_cores_limits The CPU resources requests / Limits of this service K8s kube-state-metrics   Service Memory Resources MB k8s_service_memory_requests\nk8s_service_memory_limits The memory resources requests / Limits of this service K8s kube-state-metrics   Pod CPU Usage m k8s_service_pod_cpu_usage The CPU resources total usage of pods cAdvisor   Pod Memory Usage MB k8s_service_pod_memory_usage The memory resources total usage of pods cAdvisor   Pod Waiting  k8s_service_pod_status_waiting The pods and containers which are currently in the waiting status, with reasons shown K8s kube-state-metrics   Pod Terminated  k8s_service_pod_status_terminated The pods and containers which are currently in the terminated status, with reasons shown K8s kube-state-metrics   Pod Restarts  k8s_service_pod_status_restarts_total The number of per container restarts related to the pods K8s kube-state-metrics    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/k8s/k8s-cluster.yaml,/config/otel-rules/k8s/k8s-node.yaml, /config/otel-rules/k8s/k8s-service.yaml. The K8s Cluster dashboard panel configurations are found in /config/ui-initialized-templates/k8s. The K8s Service dashboard panel configurations are found in /config/ui-initialized-templates/k8s_service.\n","title":"Kubernetes (K8s) monitoring","url":"/docs/main/v9.6.0/en/setup/backend/backend-k8s-monitoring/"},{"content":"Kubernetes (K8s) monitoring SkyWalking leverages K8s kube-state-metrics (KSM) and cAdvisor for collecting metrics data from K8s. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. This feature requires authorizing the OAP Server to access K8s\u0026rsquo;s API Server.\nData flow  K8s kube-state-metrics and cAdvisor collect metrics data from K8s. OpenTelemetry Collector fetches metrics from kube-state-metrics and cAdvisor via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server access to K8s\u0026rsquo;s API Server gets meta info and parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup kube-state-metric. cAdvisor is integrated into kubelet by default. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector for K8s, refer to here. For a quick start, we have provided a complete example of configuration and recommended version; you can refer to showcase. Config SkyWalking OpenTelemetry receiver.  Kubernetes Cluster Monitoring K8s cluster monitoring provides monitoring of the status and resources of the whole cluster and each node. K8s cluster as a Service in OAP, K8s node as an Instance in OAP, and land on the Layer: K8S.\nKubernetes Cluster Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Node Total  k8s_cluster_node_total The number of nodes K8s kube-state-metrics   Namespace Total  k8s_cluster_namespace_total The number of namespaces K8s kube-state-metrics   Deployment Total  k8s_cluster_deployment_total The number of deployments K8s kube-state-metrics   StatefulSet Total  k8s_cluster_statefulset_total The number of statefulsets K8s kube-state-metrics   DaemonSet Total  k8s_cluster_daemonset_total The number of daemonsets K8s kube-state-metrics   Service Total  k8s_cluster_service_total The number of services K8s kube-state-metrics   Pod Total  k8s_cluster_pod_total The number of pods K8s kube-state-metrics   Container Total  k8s_cluster_container_total The number of containers K8s kube-state-metrics   CPU Resources m k8s_cluster_cpu_cores\nk8s_cluster_cpu_cores_requests\nk8s_cluster_cpu_cores_limits\nk8s_cluster_cpu_cores_allocatable The capacity and the Requests / Limits / Allocatable of the CPU K8s kube-state-metrics   Memory Resources Gi k8s_cluster_memory_total\nk8s_cluster_memory_requests\nk8s_cluster_memory_limits\nk8s_cluster_memory_allocatable The capacity and the Requests / Limits / Allocatable of the memory K8s kube-state-metrics   Storage Resources Gi k8s_cluster_storage_total\nk8s_cluster_storage_allocatable The capacity and allocatable of the storage K8s kube-state-metrics   Node Status  k8s_cluster_node_status The current status of the nodes K8s kube-state-metrics   Deployment Status  k8s_cluster_deployment_status The current status of the deployment K8s kube-state-metrics   Deployment Spec Replicas  k8s_cluster_deployment_spec_replicas The number of desired pods for a deployment K8s kube-state-metrics   Service Status  k8s_cluster_service_pod_status The services current status, depending on the related pods' status K8s kube-state-metrics   Pod Status Not Running  k8s_cluster_pod_status_not_running The pods which are not running in the current phase K8s kube-state-metrics   Pod Status Waiting  k8s_cluster_pod_status_waiting The pods and containers which are currently in the waiting status, with reasons shown K8s kube-state-metrics   Pod Status Terminated  k8s_cluster_container_status_terminated The pods and containers which are currently in the terminated status, with reasons shown K8s kube-state-metrics    Kubernetes Cluster Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Pod Total  k8s_node_pod_total The number of pods in this node K8s kube-state-metrics   Node Status  k8s_node_node_status The current status of this node K8s kube-state-metrics   CPU Resources m k8s_node_cpu_cores\nk8s_node_cpu_cores_allocatable\nk8s_node_cpu_cores_requests\nk8s_node_cpu_cores_limits The capacity and the requests / Limits / Allocatable of the CPU K8s kube-state-metrics   Memory Resources Gi k8s_node_memory_total\nk8s_node_memory_allocatable\nk8s_node_memory_requests\nk8s_node_memory_limits The capacity and the requests / Limits / Allocatable of the memory K8s kube-state-metrics   Storage Resources Gi k8s_node_storage_total\nk8s_node_storage_allocatable The capacity and allocatable of the storage K8s kube-state-metrics   CPU Usage m k8s_node_cpu_usage The total usage of the CPU core, if there are 2 cores the maximum usage is 2000m cAdvisor   Memory Usage Gi k8s_node_memory_usage The totaly memory usage cAdvisor   Network I/O KB/s k8s_node_network_receive\nk8s_node_network_transmit The network receive and transmit cAdvisor    Kubernetes Service Monitoring K8s Service Monitoring provides observabilities into service status and resources from Kubernetes. K8s Service as a Service in OAP and land on the Layer: K8S_SERVICE.\nKubernetes Service Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Service Pod Total  k8s_service_pod_total The number of pods K8s kube-state-metrics   Service Pod Status  k8s_service_pod_status The current status of pods K8s kube-state-metrics   Service CPU Resources m k8s_service_cpu_cores_requests\nk8s_service_cpu_cores_limits The CPU resources requests / Limits of this service K8s kube-state-metrics   Service Memory Resources MB k8s_service_memory_requests\nk8s_service_memory_limits The memory resources requests / Limits of this service K8s kube-state-metrics   Pod CPU Usage m k8s_service_pod_cpu_usage The CPU resources total usage of pods cAdvisor   Pod Memory Usage MB k8s_service_pod_memory_usage The memory resources total usage of pods cAdvisor   Pod Waiting  k8s_service_pod_status_waiting The pods and containers which are currently in the waiting status, with reasons shown K8s kube-state-metrics   Pod Terminated  k8s_service_pod_status_terminated The pods and containers which are currently in the terminated status, with reasons shown K8s kube-state-metrics   Pod Restarts  k8s_service_pod_status_restarts_total The number of per container restarts related to the pods K8s kube-state-metrics    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/k8s/k8s-cluster.yaml,/config/otel-rules/k8s/k8s-node.yaml, /config/otel-rules/k8s/k8s-service.yaml. The K8s Cluster dashboard panel configurations are found in /config/ui-initialized-templates/k8s. The K8s Service dashboard panel configurations are found in /config/ui-initialized-templates/k8s_service.\n","title":"Kubernetes (K8s) monitoring","url":"/docs/main/v9.7.0/en/setup/backend/backend-k8s-monitoring/"},{"content":"Kubernetes (K8s) monitoring from kube-state-metrics and cAdvisor SkyWalking leverages K8s kube-state-metrics (KSM) and cAdvisor for collecting metrics data from K8s. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. This feature requires authorizing the OAP Server to access K8s\u0026rsquo;s API Server.\nData flow  K8s kube-state-metrics and cAdvisor collect metrics data from K8s. OpenTelemetry Collector fetches metrics from kube-state-metrics and cAdvisor via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server access to K8s\u0026rsquo;s API Server gets meta info and parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup kube-state-metric. cAdvisor is integrated into kubelet by default. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector for K8s, refer to here. For a quick start, we have provided a complete example of configuration and recommended version; you can refer to showcase. Config SkyWalking OpenTelemetry receiver.  Kubernetes Cluster Monitoring K8s cluster monitoring provides monitoring of the status and resources of the whole cluster and each node. K8s cluster as a Service in OAP, K8s node as an Instance in OAP, and land on the Layer: K8S.\nKubernetes Cluster Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Node Total  k8s_cluster_node_total The number of nodes K8s kube-state-metrics   Namespace Total  k8s_cluster_namespace_total The number of namespaces K8s kube-state-metrics   Deployment Total  k8s_cluster_deployment_total The number of deployments K8s kube-state-metrics   StatefulSet Total  k8s_cluster_statefulset_total The number of statefulsets K8s kube-state-metrics   DaemonSet Total  k8s_cluster_daemonset_total The number of daemonsets K8s kube-state-metrics   Service Total  k8s_cluster_service_total The number of services K8s kube-state-metrics   Pod Total  k8s_cluster_pod_total The number of pods K8s kube-state-metrics   Container Total  k8s_cluster_container_total The number of containers K8s kube-state-metrics   CPU Resources m k8s_cluster_cpu_cores\nk8s_cluster_cpu_cores_requests\nk8s_cluster_cpu_cores_limits\nk8s_cluster_cpu_cores_allocatable The capacity and the Requests / Limits / Allocatable of the CPU K8s kube-state-metrics   Memory Resources Gi k8s_cluster_memory_total\nk8s_cluster_memory_requests\nk8s_cluster_memory_limits\nk8s_cluster_memory_allocatable The capacity and the Requests / Limits / Allocatable of the memory K8s kube-state-metrics   Storage Resources Gi k8s_cluster_storage_total\nk8s_cluster_storage_allocatable The capacity and allocatable of the storage K8s kube-state-metrics   Node Status  k8s_cluster_node_status The current status of the nodes K8s kube-state-metrics   Deployment Status  k8s_cluster_deployment_status The current status of the deployment K8s kube-state-metrics   Deployment Spec Replicas  k8s_cluster_deployment_spec_replicas The number of desired pods for a deployment K8s kube-state-metrics   Service Status  k8s_cluster_service_pod_status The services current status, depending on the related pods' status K8s kube-state-metrics   Pod Status Not Running  k8s_cluster_pod_status_not_running The pods which are not running in the current phase K8s kube-state-metrics   Pod Status Waiting  k8s_cluster_pod_status_waiting The pods and containers which are currently in the waiting status, with reasons shown K8s kube-state-metrics   Pod Status Terminated  k8s_cluster_container_status_terminated The pods and containers which are currently in the terminated status, with reasons shown K8s kube-state-metrics    Kubernetes Cluster Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Pod Total  k8s_node_pod_total The number of pods in this node K8s kube-state-metrics   Node Status  k8s_node_node_status The current status of this node K8s kube-state-metrics   CPU Resources m k8s_node_cpu_cores\nk8s_node_cpu_cores_allocatable\nk8s_node_cpu_cores_requests\nk8s_node_cpu_cores_limits The capacity and the requests / Limits / Allocatable of the CPU K8s kube-state-metrics   Memory Resources Gi k8s_node_memory_total\nk8s_node_memory_allocatable\nk8s_node_memory_requests\nk8s_node_memory_limits The capacity and the requests / Limits / Allocatable of the memory K8s kube-state-metrics   Storage Resources Gi k8s_node_storage_total\nk8s_node_storage_allocatable The capacity and allocatable of the storage K8s kube-state-metrics   CPU Usage m k8s_node_cpu_usage The total usage of the CPU core, if there are 2 cores the maximum usage is 2000m cAdvisor   Memory Usage Gi k8s_node_memory_usage The totaly memory usage cAdvisor   Network I/O KB/s k8s_node_network_receive\nk8s_node_network_transmit The network receive and transmit cAdvisor    Kubernetes Service Monitoring K8s Service Monitoring provides observabilities into service status and resources from Kubernetes. K8s Service as a Service in OAP and land on the Layer: K8S_SERVICE.\nKubernetes Service Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Service Pod Total  k8s_service_pod_total The number of pods K8s kube-state-metrics   Service Pod Status  k8s_service_pod_status The current status of pods K8s kube-state-metrics   Service CPU Resources m k8s_service_cpu_cores_requests\nk8s_service_cpu_cores_limits The CPU resources requests / Limits of this service K8s kube-state-metrics   Service Memory Resources MB k8s_service_memory_requests\nk8s_service_memory_limits The memory resources requests / Limits of this service K8s kube-state-metrics   Pod CPU Usage m k8s_service_pod_cpu_usage The CPU resources total usage of pods cAdvisor   Pod Memory Usage MB k8s_service_pod_memory_usage The memory resources total usage of pods cAdvisor   Pod Waiting  k8s_service_pod_status_waiting The pods and containers which are currently in the waiting status, with reasons shown K8s kube-state-metrics   Pod Terminated  k8s_service_pod_status_terminated The pods and containers which are currently in the terminated status, with reasons shown K8s kube-state-metrics   Pod Restarts  k8s_service_pod_status_restarts_total The number of per container restarts related to the pods K8s kube-state-metrics    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/k8s/k8s-cluster.yaml,/config/otel-rules/k8s/k8s-node.yaml, /config/otel-rules/k8s/k8s-service.yaml. The K8s Cluster dashboard panel configurations are found in /config/ui-initialized-templates/k8s. The K8s Service dashboard panel configurations are found in /config/ui-initialized-templates/k8s_service.\n","title":"Kubernetes (K8s) monitoring from kube-state-metrics and cAdvisor","url":"/docs/main/next/en/setup/backend/backend-k8s-monitoring-metrics-cadvisor/"},{"content":"Kubernetes (K8s) monitoring from Rover SkyWalking uses the SkyWalking Rover system to collect access logs from Kubernetes clusters and hands them over to the OAL system for metrics and entity analysis.\nData flow  SkyWalking Rover monitoring access log data from K8s and send to the OAP. The SkyWalking OAP Server receive access log from Rover through gRPC, analysis the generate entity, and using OAL to generating metrics.  Setup  Setup Rover in the Kubernetes and enable access log service. Setup eBPF receiver module by the following configuration.  receiver-ebpf:selector:${SW_RECEIVER_EBPF:default}default:Generated Entities SkyWalking receive the access logs from Rover, analyzes the kubernetes connection information to parse out the following corresponding entities:\n Service Service Instance Service Endpoint Service Relation Service Instance Relation Service Endpoint Relation  Generate Metrics For each of the above-mentioned entities, metrics such as connection, transmission, and protocol can be analyzed.\nConnection Metrics Record the relevant metrics for every service establishing/closing connections with other services.\n   Name Unit Description     Connect CPM Count Total Connect to other Service counts per minutes.   Connect Duration Nanoseconds Total Connect to other Service use duration.   Connect Success CPM Count Success to connect to other Service counts per minutes.   Accept CPM Count Accept new connection from other Service counts per minutes.   Accept Duration Nanoseconds Total accept new connection from other Service use duration.   Close CPM Count Close one connection counts per minutes.   Close Duration Nanoseconds Total Close connections use duration.    Transfer Metrics Record the basic information and L2-L4 layer details for each syscall made during network requests by every service to other services.\nRead Data from Connection    Name Unit Description     Read CPM Count Read from connection counts per minutes.   Read Duration Nanoseconds Total read data use duration.   Read Package CPM Count Total read TCP Package count per minutes.   Read Package Size Bytes Total read TCP package size per minutes.   Read Layer 4 Duration Nanoseconds Total read data on the Layer 4 use duration.   Read Layer 3 Duration Nanoseconds Total read data on the Layer 3 use duration.   Read Layer 3 Recv Duration Nanoseconds Total read data on the Layer 3 receive use duration.   Read Layer 3 Local Duration Nanoseconds Total read data on the Layer 3 local use duration.   Read Package To Queue Duration Nanoseconds Total duration between TCP package received and send to Queue.   Read Package From Queue Duration Nanoseconds Total duration between send to Queue and receive from Queue.   Read Net Filter CPM Count Total Net Filtered count when read data.   Read Net Filter Duration Nanoseconds Total Net Filtered use duration.    Write Data to Connection    Name Unit Description     Write CPM Count Write to connection counts per minutes.   Write Duration Nanoseconds Total write data to connection use duration.   Write Package CPM Count Total write TCP Package count per minutes.   Write Package Size Bytes Total write TCP Package size per minutes.   Write L4 Duration Nanoseconds Total write data to connection Layer 4 use duration.   Write L3 Duration Nanoseconds Total write data to connection Layer 3 use duration.   Write L3 Local Duration Nanoseconds Total write data to the connection Layer 3 Local use duration.   Write L3 Output Duration Nanoseconds Total write data to the connection Layer 3 Output use duration.   Write L2 Duration Nanoseconds Total write data to connection Layer 2 use duration.   Write L2 Ready Send Duration Nanoseconds Total write data to the connection Layer 2 ready send data queue use duration.   Write L2 Send NetDevice Duration Nanoseconds Total write data to the connection Layer 2 send data to net device use duration.    Protocol Based on each transfer data analysis, extract the information of the 7-layer network protocol.\nHTTP/1.x or HTTP/2.x    Name Init Description     Call CPM Count HTTP Request calls per minutes.   Duration Nanoseconds Total HTTP Response use duration.   Success CPM Count Total HTTP Response success(status \u0026lt; 500) count.   Request Header Size Bytes Total Request Header size.   Request Body Size Bytes Total Request Body size.   Response Header Size Bytes Total Response Header size.   Response Body Size Bytes Total Response Body size.    Customizations You can customize your own metrics/dashboard panel. The metrics definition and expression rules are found in /config/oal/ebpf.oal, please refer the Scope Declaration Documentation. The K8s dashboard panel configurations are found in /config/ui-initialized-templates/k8s_service.\n","title":"Kubernetes (K8s) monitoring from Rover","url":"/docs/main/next/en/setup/backend/backend-k8s-monitoring-rover/"},{"content":"Kubernetes Network monitoring SkyWalking leverages SkyWalking Rover network profiling feature to measure network performance for particular pods on-demand, including metrics of L4(TCP) and L7(HTTP) traffic and raw data of HTTP requests and responses. Underlying, SkyWalking Rover converts data from socket data to metrics using eBPF technology.\nData flow  SkyWalking OAP server observes which specific k8s pod needs to monitor the network. SkyWalking Rover receives tasks from SkyWalking OAP server and executes them, and converts the network data into metrics send to the backend service. The SkyWalking OAP Server accesses K8s\u0026rsquo;s API Server to fetch meta info and parses the expression with MAL to aggregate.  Setup  Setup SkyWalking Rover. Enable the network profiling MAL file in the OAP server.  agent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:meterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:network-profiling}Sampling config Notice the precondition, the HTTP request must have the trace header in SkyWalking(sw8 header) or Zipkin(b3 header(s)) format.\nThe sampling configurations define the sampling boundaries for the HTTP traffic. When a HTTP calling is sampled, the SkyWalking Rover could collect the HTTP request/response raw data and upload it to the span attached event.\nThe sampling config contains multiple rules, and each of rules has the following configurations:\n URI Regex: The match pattern for HTTP requests is HTTP URI-oriented. Match all requests if the URI regex is not set. Minimal Request Duration (ms): Sample the HTTP requests with slower latency than this threshold. Sample HTTP requests and responses with tracing when the response code is between 400 and 499: This is OFF by default. Sample HTTP requests and responses with tracing when the response code is between 500 and 599: This is ON by default.  Supported metrics After SkyWalking OAP server receives the metrics from the SkyWalking Rover, it supports to analysis the following data:\n Topology: Based on the process and peer address, the following topology data is supported:  Relation: Analyze the relationship between local processes, or local process with external pods or services. SSL: The socket read or write package with SSL. Protocol: The protocols for write or read data.   TCP socket read and write metrics, including following types:  Call Per Minute: The count of the socket read or write. Bytes: The package size of the socket data. Execute Time: The executed time of the socket read or write. Connect: The socket connect/accept with peer address count and execute time. Close: The socket close the socket count and execute time. RTT: The RTT(Round Trip Time) of socket communicate with peer address.   Local process communicate with peer address exception data, including following types:  Retransmit: The count of TCP package is retransmitted. Drop: The count of TCP package is dropped.   HTTP/1.x request/response related metrics, including following types:  Request CPM: The calls per minute of requests. Response CPM: The calls per minute of responses with status code. Request Package Size: The size(KB) of the request package. Response Package Size: The size(KB) of the response package. Client Side Response Duration: The duration(ms) of the client receive the response. Server Side Response Duration: The duration(ms) of the server send the response.   HTTP sampled request with traces, including following types:  Slow traces: The traces which have slow duration. Traces from HTTP Code in [400, 500) (ms): The traces which response status code in [400, 500). Traces from HTTP Code in [500, 600) (ms): The traces which response status code in [500, 600).    ","title":"Kubernetes Network monitoring","url":"/docs/main/latest/en/setup/backend/backend-k8s-network-monitoring/"},{"content":"Kubernetes Network monitoring SkyWalking leverages SkyWalking Rover network profiling feature to measure network performance for particular pods on-demand, including metrics of L4(TCP) and L7(HTTP) traffic and raw data of HTTP requests and responses. Underlying, SkyWalking Rover converts data from socket data to metrics using eBPF technology.\nData flow  SkyWalking OAP server observes which specific k8s pod needs to monitor the network. SkyWalking Rover receives tasks from SkyWalking OAP server and executes them, and converts the network data into metrics send to the backend service. The SkyWalking OAP Server accesses K8s\u0026rsquo;s API Server to fetch meta info and parses the expression with MAL to aggregate.  Setup  Setup SkyWalking Rover. Enable the network profiling MAL file in the OAP server.  agent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:meterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:network-profiling}Sampling config Notice the precondition, the HTTP request must have the trace header in SkyWalking(sw8 header) or Zipkin(b3 header(s)) format.\nThe sampling configurations define the sampling boundaries for the HTTP traffic. When a HTTP calling is sampled, the SkyWalking Rover could collect the HTTP request/response raw data and upload it to the span attached event.\nThe sampling config contains multiple rules, and each of rules has the following configurations:\n URI Regex: The match pattern for HTTP requests is HTTP URI-oriented. Match all requests if the URI regex is not set. Minimal Request Duration (ms): Sample the HTTP requests with slower latency than this threshold. Sample HTTP requests and responses with tracing when the response code is between 400 and 499: This is OFF by default. Sample HTTP requests and responses with tracing when the response code is between 500 and 599: This is ON by default.  Supported metrics After SkyWalking OAP server receives the metrics from the SkyWalking Rover, it supports to analysis the following data:\n Topology: Based on the process and peer address, the following topology data is supported:  Relation: Analyze the relationship between local processes, or local process with external pods or services. SSL: The socket read or write package with SSL. Protocol: The protocols for write or read data.   TCP socket read and write metrics, including following types:  Call Per Minute: The count of the socket read or write. Bytes: The package size of the socket data. Execute Time: The executed time of the socket read or write. Connect: The socket connect/accept with peer address count and execute time. Close: The socket close the socket count and execute time. RTT: The RTT(Round Trip Time) of socket communicate with peer address.   Local process communicate with peer address exception data, including following types:  Retransmit: The count of TCP package is retransmitted. Drop: The count of TCP package is dropped.   HTTP/1.x request/response related metrics, including following types:  Request CPM: The calls per minute of requests. Response CPM: The calls per minute of responses with status code. Request Package Size: The size(KB) of the request package. Response Package Size: The size(KB) of the response package. Client Side Response Duration: The duration(ms) of the client receive the response. Server Side Response Duration: The duration(ms) of the server send the response.   HTTP sampled request with traces, including following types:  Slow traces: The traces which have slow duration. Traces from HTTP Code in [400, 500) (ms): The traces which response status code in [400, 500). Traces from HTTP Code in [500, 600) (ms): The traces which response status code in [500, 600).    ","title":"Kubernetes Network monitoring","url":"/docs/main/next/en/setup/backend/backend-k8s-network-monitoring/"},{"content":"Kubernetes Network monitoring SkyWalking leverages SkyWalking Rover network profiling feature for collecting metrics data from the network. SkyWalking Rover converts data from socket data to metrics using eBPF technology.\nData flow  SkyWalking OAP server observes which specific k8s pod needs to monitor the network. SkyWalking Rover receives tasks from SkyWalking OAP server and executes them, and converts the network data into metrics send to the backend service. The SkyWalking OAP Server accesses K8s\u0026rsquo;s API Server to fetch meta info and parses the expression with MAL to aggregate.  Setup  Setup SkyWalking Rover. Enable the network profiling MAL file in the OAP server.  agent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:meterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:network-profiling}Supported metrics After SkyWalking OAP server receives the metrics from the SkyWalking Rover, it supports to analysis the following data:\n Topology: Based on the process and peer address, the following topology data is supported:  Relation: Analyze the relationship between local processes, or local process with external pods or services. SSL: The socket read or write package with SSL. Protocol: The protocols for write or read data.   TCP socket read and write metrics, including following types:  Call Per Minute: The count of the socket read or write. Bytes: The package size of the socket data. Execute Time: The executed time of the socket read or write. Connect: The socket connect/accept with peer address count and execute time. Close: The socket close the socket count and execute time. RTT: The RTT(Round Trip Time) of socket communicate with peer address.   Local process communicate with peer address exception data, including following types:  Retransmit: The count of TCP package is retransmitted. Drop: The count of TCP package is dropped.    ","title":"Kubernetes Network monitoring","url":"/docs/main/v9.2.0/en/setup/backend/backend-k8s-network-monitoring/"},{"content":"Kubernetes Network monitoring SkyWalking leverages SkyWalking Rover network profiling feature to measure network performance for particular pods on-demand, including metrics of L4(TCP) and L7(HTTP) traffic and raw data of HTTP requests and responses. Underlying, SkyWalking Rover converts data from socket data to metrics using eBPF technology.\nData flow  SkyWalking OAP server observes which specific k8s pod needs to monitor the network. SkyWalking Rover receives tasks from SkyWalking OAP server and executes them, and converts the network data into metrics send to the backend service. The SkyWalking OAP Server accesses K8s\u0026rsquo;s API Server to fetch meta info and parses the expression with MAL to aggregate.  Setup  Setup SkyWalking Rover. Enable the network profiling MAL file in the OAP server.  agent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:meterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:network-profiling}Sampling config Notice the precondition, the HTTP request must have the trace header in SkyWalking(sw8 header) or Zipkin(b3 header(s)) format.\nThe sampling configurations define the sampling boundaries for the HTTP traffic. When a HTTP calling is sampled, the SkyWalking Rover could collect the HTTP request/response raw data and upload it to the span attached event.\nThe sampling config contains multiple rules, and each of rules has the following configurations:\n URI Regex: The match pattern for HTTP requests is HTTP URI-oriented. Match all requests if the URI regex is not set. Minimal Request Duration (ms): Sample the HTTP requests with slower latency than this threshold. Sample HTTP requests and responses with tracing when the response code is between 400 and 499: This is OFF by default. Sample HTTP requests and responses with tracing when the response code is between 500 and 599: This is ON by default.  Supported metrics After SkyWalking OAP server receives the metrics from the SkyWalking Rover, it supports to analysis the following data:\n Topology: Based on the process and peer address, the following topology data is supported:  Relation: Analyze the relationship between local processes, or local process with external pods or services. SSL: The socket read or write package with SSL. Protocol: The protocols for write or read data.   TCP socket read and write metrics, including following types:  Call Per Minute: The count of the socket read or write. Bytes: The package size of the socket data. Execute Time: The executed time of the socket read or write. Connect: The socket connect/accept with peer address count and execute time. Close: The socket close the socket count and execute time. RTT: The RTT(Round Trip Time) of socket communicate with peer address.   Local process communicate with peer address exception data, including following types:  Retransmit: The count of TCP package is retransmitted. Drop: The count of TCP package is dropped.   HTTP/1.x request/response related metrics, including following types:  Request CPM: The calls per minute of requests. Response CPM: The calls per minute of responses with status code. Request Package Size: The size(KB) of the request package. Response Package Size: The size(KB) of the response package. Client Side Response Duration: The duration(ms) of the client receive the response. Server Side Response Duration: The duration(ms) of the server send the response.   HTTP sampled request with traces, including following types:  Slow traces: The traces which have slow duration. Traces from HTTP Code in [400, 500) (ms): The traces which response status code in [400, 500). Traces from HTTP Code in [500, 600) (ms): The traces which response status code in [500, 600).    ","title":"Kubernetes Network monitoring","url":"/docs/main/v9.3.0/en/setup/backend/backend-k8s-network-monitoring/"},{"content":"Kubernetes Network monitoring SkyWalking leverages SkyWalking Rover network profiling feature to measure network performance for particular pods on-demand, including metrics of L4(TCP) and L7(HTTP) traffic and raw data of HTTP requests and responses. Underlying, SkyWalking Rover converts data from socket data to metrics using eBPF technology.\nData flow  SkyWalking OAP server observes which specific k8s pod needs to monitor the network. SkyWalking Rover receives tasks from SkyWalking OAP server and executes them, and converts the network data into metrics send to the backend service. The SkyWalking OAP Server accesses K8s\u0026rsquo;s API Server to fetch meta info and parses the expression with MAL to aggregate.  Setup  Setup SkyWalking Rover. Enable the network profiling MAL file in the OAP server.  agent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:meterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:network-profiling}Sampling config Notice the precondition, the HTTP request must have the trace header in SkyWalking(sw8 header) or Zipkin(b3 header(s)) format.\nThe sampling configurations define the sampling boundaries for the HTTP traffic. When a HTTP calling is sampled, the SkyWalking Rover could collect the HTTP request/response raw data and upload it to the span attached event.\nThe sampling config contains multiple rules, and each of rules has the following configurations:\n URI Regex: The match pattern for HTTP requests is HTTP URI-oriented. Match all requests if the URI regex is not set. Minimal Request Duration (ms): Sample the HTTP requests with slower latency than this threshold. Sample HTTP requests and responses with tracing when the response code is between 400 and 499: This is OFF by default. Sample HTTP requests and responses with tracing when the response code is between 500 and 599: This is ON by default.  Supported metrics After SkyWalking OAP server receives the metrics from the SkyWalking Rover, it supports to analysis the following data:\n Topology: Based on the process and peer address, the following topology data is supported:  Relation: Analyze the relationship between local processes, or local process with external pods or services. SSL: The socket read or write package with SSL. Protocol: The protocols for write or read data.   TCP socket read and write metrics, including following types:  Call Per Minute: The count of the socket read or write. Bytes: The package size of the socket data. Execute Time: The executed time of the socket read or write. Connect: The socket connect/accept with peer address count and execute time. Close: The socket close the socket count and execute time. RTT: The RTT(Round Trip Time) of socket communicate with peer address.   Local process communicate with peer address exception data, including following types:  Retransmit: The count of TCP package is retransmitted. Drop: The count of TCP package is dropped.   HTTP/1.x request/response related metrics, including following types:  Request CPM: The calls per minute of requests. Response CPM: The calls per minute of responses with status code. Request Package Size: The size(KB) of the request package. Response Package Size: The size(KB) of the response package. Client Side Response Duration: The duration(ms) of the client receive the response. Server Side Response Duration: The duration(ms) of the server send the response.   HTTP sampled request with traces, including following types:  Slow traces: The traces which have slow duration. Traces from HTTP Code in [400, 500) (ms): The traces which response status code in [400, 500). Traces from HTTP Code in [500, 600) (ms): The traces which response status code in [500, 600).    ","title":"Kubernetes Network monitoring","url":"/docs/main/v9.4.0/en/setup/backend/backend-k8s-network-monitoring/"},{"content":"Kubernetes Network monitoring SkyWalking leverages SkyWalking Rover network profiling feature to measure network performance for particular pods on-demand, including metrics of L4(TCP) and L7(HTTP) traffic and raw data of HTTP requests and responses. Underlying, SkyWalking Rover converts data from socket data to metrics using eBPF technology.\nData flow  SkyWalking OAP server observes which specific k8s pod needs to monitor the network. SkyWalking Rover receives tasks from SkyWalking OAP server and executes them, and converts the network data into metrics send to the backend service. The SkyWalking OAP Server accesses K8s\u0026rsquo;s API Server to fetch meta info and parses the expression with MAL to aggregate.  Setup  Setup SkyWalking Rover. Enable the network profiling MAL file in the OAP server.  agent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:meterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:network-profiling}Sampling config Notice the precondition, the HTTP request must have the trace header in SkyWalking(sw8 header) or Zipkin(b3 header(s)) format.\nThe sampling configurations define the sampling boundaries for the HTTP traffic. When a HTTP calling is sampled, the SkyWalking Rover could collect the HTTP request/response raw data and upload it to the span attached event.\nThe sampling config contains multiple rules, and each of rules has the following configurations:\n URI Regex: The match pattern for HTTP requests is HTTP URI-oriented. Match all requests if the URI regex is not set. Minimal Request Duration (ms): Sample the HTTP requests with slower latency than this threshold. Sample HTTP requests and responses with tracing when the response code is between 400 and 499: This is OFF by default. Sample HTTP requests and responses with tracing when the response code is between 500 and 599: This is ON by default.  Supported metrics After SkyWalking OAP server receives the metrics from the SkyWalking Rover, it supports to analysis the following data:\n Topology: Based on the process and peer address, the following topology data is supported:  Relation: Analyze the relationship between local processes, or local process with external pods or services. SSL: The socket read or write package with SSL. Protocol: The protocols for write or read data.   TCP socket read and write metrics, including following types:  Call Per Minute: The count of the socket read or write. Bytes: The package size of the socket data. Execute Time: The executed time of the socket read or write. Connect: The socket connect/accept with peer address count and execute time. Close: The socket close the socket count and execute time. RTT: The RTT(Round Trip Time) of socket communicate with peer address.   Local process communicate with peer address exception data, including following types:  Retransmit: The count of TCP package is retransmitted. Drop: The count of TCP package is dropped.   HTTP/1.x request/response related metrics, including following types:  Request CPM: The calls per minute of requests. Response CPM: The calls per minute of responses with status code. Request Package Size: The size(KB) of the request package. Response Package Size: The size(KB) of the response package. Client Side Response Duration: The duration(ms) of the client receive the response. Server Side Response Duration: The duration(ms) of the server send the response.   HTTP sampled request with traces, including following types:  Slow traces: The traces which have slow duration. Traces from HTTP Code in [400, 500) (ms): The traces which response status code in [400, 500). Traces from HTTP Code in [500, 600) (ms): The traces which response status code in [500, 600).    ","title":"Kubernetes Network monitoring","url":"/docs/main/v9.5.0/en/setup/backend/backend-k8s-network-monitoring/"},{"content":"Kubernetes Network monitoring SkyWalking leverages SkyWalking Rover network profiling feature to measure network performance for particular pods on-demand, including metrics of L4(TCP) and L7(HTTP) traffic and raw data of HTTP requests and responses. Underlying, SkyWalking Rover converts data from socket data to metrics using eBPF technology.\nData flow  SkyWalking OAP server observes which specific k8s pod needs to monitor the network. SkyWalking Rover receives tasks from SkyWalking OAP server and executes them, and converts the network data into metrics send to the backend service. The SkyWalking OAP Server accesses K8s\u0026rsquo;s API Server to fetch meta info and parses the expression with MAL to aggregate.  Setup  Setup SkyWalking Rover. Enable the network profiling MAL file in the OAP server.  agent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:meterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:network-profiling}Sampling config Notice the precondition, the HTTP request must have the trace header in SkyWalking(sw8 header) or Zipkin(b3 header(s)) format.\nThe sampling configurations define the sampling boundaries for the HTTP traffic. When a HTTP calling is sampled, the SkyWalking Rover could collect the HTTP request/response raw data and upload it to the span attached event.\nThe sampling config contains multiple rules, and each of rules has the following configurations:\n URI Regex: The match pattern for HTTP requests is HTTP URI-oriented. Match all requests if the URI regex is not set. Minimal Request Duration (ms): Sample the HTTP requests with slower latency than this threshold. Sample HTTP requests and responses with tracing when the response code is between 400 and 499: This is OFF by default. Sample HTTP requests and responses with tracing when the response code is between 500 and 599: This is ON by default.  Supported metrics After SkyWalking OAP server receives the metrics from the SkyWalking Rover, it supports to analysis the following data:\n Topology: Based on the process and peer address, the following topology data is supported:  Relation: Analyze the relationship between local processes, or local process with external pods or services. SSL: The socket read or write package with SSL. Protocol: The protocols for write or read data.   TCP socket read and write metrics, including following types:  Call Per Minute: The count of the socket read or write. Bytes: The package size of the socket data. Execute Time: The executed time of the socket read or write. Connect: The socket connect/accept with peer address count and execute time. Close: The socket close the socket count and execute time. RTT: The RTT(Round Trip Time) of socket communicate with peer address.   Local process communicate with peer address exception data, including following types:  Retransmit: The count of TCP package is retransmitted. Drop: The count of TCP package is dropped.   HTTP/1.x request/response related metrics, including following types:  Request CPM: The calls per minute of requests. Response CPM: The calls per minute of responses with status code. Request Package Size: The size(KB) of the request package. Response Package Size: The size(KB) of the response package. Client Side Response Duration: The duration(ms) of the client receive the response. Server Side Response Duration: The duration(ms) of the server send the response.   HTTP sampled request with traces, including following types:  Slow traces: The traces which have slow duration. Traces from HTTP Code in [400, 500) (ms): The traces which response status code in [400, 500). Traces from HTTP Code in [500, 600) (ms): The traces which response status code in [500, 600).    ","title":"Kubernetes Network monitoring","url":"/docs/main/v9.6.0/en/setup/backend/backend-k8s-network-monitoring/"},{"content":"Kubernetes Network monitoring SkyWalking leverages SkyWalking Rover network profiling feature to measure network performance for particular pods on-demand, including metrics of L4(TCP) and L7(HTTP) traffic and raw data of HTTP requests and responses. Underlying, SkyWalking Rover converts data from socket data to metrics using eBPF technology.\nData flow  SkyWalking OAP server observes which specific k8s pod needs to monitor the network. SkyWalking Rover receives tasks from SkyWalking OAP server and executes them, and converts the network data into metrics send to the backend service. The SkyWalking OAP Server accesses K8s\u0026rsquo;s API Server to fetch meta info and parses the expression with MAL to aggregate.  Setup  Setup SkyWalking Rover. Enable the network profiling MAL file in the OAP server.  agent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:meterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:network-profiling}Sampling config Notice the precondition, the HTTP request must have the trace header in SkyWalking(sw8 header) or Zipkin(b3 header(s)) format.\nThe sampling configurations define the sampling boundaries for the HTTP traffic. When a HTTP calling is sampled, the SkyWalking Rover could collect the HTTP request/response raw data and upload it to the span attached event.\nThe sampling config contains multiple rules, and each of rules has the following configurations:\n URI Regex: The match pattern for HTTP requests is HTTP URI-oriented. Match all requests if the URI regex is not set. Minimal Request Duration (ms): Sample the HTTP requests with slower latency than this threshold. Sample HTTP requests and responses with tracing when the response code is between 400 and 499: This is OFF by default. Sample HTTP requests and responses with tracing when the response code is between 500 and 599: This is ON by default.  Supported metrics After SkyWalking OAP server receives the metrics from the SkyWalking Rover, it supports to analysis the following data:\n Topology: Based on the process and peer address, the following topology data is supported:  Relation: Analyze the relationship between local processes, or local process with external pods or services. SSL: The socket read or write package with SSL. Protocol: The protocols for write or read data.   TCP socket read and write metrics, including following types:  Call Per Minute: The count of the socket read or write. Bytes: The package size of the socket data. Execute Time: The executed time of the socket read or write. Connect: The socket connect/accept with peer address count and execute time. Close: The socket close the socket count and execute time. RTT: The RTT(Round Trip Time) of socket communicate with peer address.   Local process communicate with peer address exception data, including following types:  Retransmit: The count of TCP package is retransmitted. Drop: The count of TCP package is dropped.   HTTP/1.x request/response related metrics, including following types:  Request CPM: The calls per minute of requests. Response CPM: The calls per minute of responses with status code. Request Package Size: The size(KB) of the request package. Response Package Size: The size(KB) of the response package. Client Side Response Duration: The duration(ms) of the client receive the response. Server Side Response Duration: The duration(ms) of the server send the response.   HTTP sampled request with traces, including following types:  Slow traces: The traces which have slow duration. Traces from HTTP Code in [400, 500) (ms): The traces which response status code in [400, 500). Traces from HTTP Code in [500, 600) (ms): The traces which response status code in [500, 600).    ","title":"Kubernetes Network monitoring","url":"/docs/main/v9.7.0/en/setup/backend/backend-k8s-network-monitoring/"},{"content":"Legacy Setup You can always fall back to our traditional way of integration as introduced below, which is by importing SkyWalking into your project and starting the agent.\nDefaults By default, SkyWalking Python agent uses gRPC protocol to report data to SkyWalking backend, in SkyWalking backend, the port of gRPC protocol is 11800, and the port of HTTP protocol is 12800,\nSee all default configuration values in the Configuration Vocabulary\nYou could configure agent_collector_backend_services (or environment variable SW_AGENT_COLLECTOR_BACKEND_SERVICES) and set agent_protocol (or environment variable SW_AGENT_PROTOCOL to one of gprc, http or kafka according to the protocol you would like to use.\nReport data via gRPC protocol (Default) For example, if you want to use gRPC protocol to report data, configure agent_collector_backend_services (or environment variable SW_AGENT_COLLECTOR_BACKEND_SERVICES) to \u0026lt;oap-ip-or-host\u0026gt;:11800, such as 127.0.0.1:11800:\nfrom skywalking import agent, config config.init(agent_collector_backend_services=\u0026#39;127.0.0.1:11800\u0026#39;, agent_name=\u0026#39;your awesome service\u0026#39;, agent_instance_name=\u0026#39;your-instance-name or \u0026lt;generated uuid\u0026gt;\u0026#39;) agent.start() Report data via HTTP protocol However, if you want to use HTTP protocol to report data, configure agent_collector_backend_services (or environment variable SW_AGENT_COLLECTOR_BACKEND_SERVICES) to \u0026lt;oap-ip-or-host\u0026gt;:12800, such as 127.0.0.1:12800, further set agent_protocol (or environment variable SW_AGENT_PROTOCOL to http):\n Remember you should install skywalking-python with extra requires http, pip install \u0026quot;apache-skywalking[http].\n from skywalking import agent, config config.init(agent_collector_backend_services=\u0026#39;127.0.0.1:12800\u0026#39;, agent_name=\u0026#39;your awesome service\u0026#39;, agent_protocol=\u0026#39;http\u0026#39;, agent_instance_name=\u0026#39;your-instance-name or \u0026lt;generated uuid\u0026gt;\u0026#39;) agent.start() Report data via Kafka protocol Please make sure OAP is consuming the same Kafka topic as your agent produces to, kafka_namespace must match OAP side configuration plugin.kafka.namespace\nFinally, if you want to use Kafka protocol to report data, configure kafka_bootstrap_servers (or environment variable SW_KAFKA_BOOTSTRAP_SERVERS) to kafka-brokers, such as 127.0.0.1:9200, further set agent_protocol (or environment variable SW_AGENT_PROTOCOL to kafka):\n Remember you should install skywalking-python with extra requires kafka, pip install \u0026quot;apache-skywalking[kafka]\u0026quot;.\n from skywalking import agent, config config.init(kafka_bootstrap_servers=\u0026#39;127.0.0.1:9200\u0026#39;, agent_name=\u0026#39;your awesome service\u0026#39;, agent_protocol=\u0026#39;kafka\u0026#39;, agent_instance_name=\u0026#39;your-instance-name or \u0026lt;generated uuid\u0026gt;\u0026#39;) agent.start() Alternatively, you can also pass the configurations via environment variables (such as SW_AGENT_NAME, SW_AGENT_COLLECTOR_BACKEND_SERVICES, etc.) so that you don\u0026rsquo;t need to call config.init.\nAll supported environment variables can be found in the Environment Variables List.\n","title":"Legacy Setup","url":"/docs/skywalking-python/latest/en/setup/intrusive/"},{"content":"Legacy Setup You can always fall back to our traditional way of integration as introduced below, which is by importing SkyWalking into your project and starting the agent.\nDefaults By default, SkyWalking Python agent uses gRPC protocol to report data to SkyWalking backend, in SkyWalking backend, the port of gRPC protocol is 11800, and the port of HTTP protocol is 12800,\nSee all default configuration values in the Configuration Vocabulary\nYou could configure agent_collector_backend_services (or environment variable SW_AGENT_COLLECTOR_BACKEND_SERVICES) and set agent_protocol (or environment variable SW_AGENT_PROTOCOL to one of gprc, http or kafka according to the protocol you would like to use.\nReport data via gRPC protocol (Default) For example, if you want to use gRPC protocol to report data, configure agent_collector_backend_services (or environment variable SW_AGENT_COLLECTOR_BACKEND_SERVICES) to \u0026lt;oap-ip-or-host\u0026gt;:11800, such as 127.0.0.1:11800:\nfrom skywalking import agent, config config.init(agent_collector_backend_services=\u0026#39;127.0.0.1:11800\u0026#39;, agent_name=\u0026#39;your awesome service\u0026#39;, agent_instance_name=\u0026#39;your-instance-name or \u0026lt;generated uuid\u0026gt;\u0026#39;) agent.start() Report data via HTTP protocol However, if you want to use HTTP protocol to report data, configure agent_collector_backend_services (or environment variable SW_AGENT_COLLECTOR_BACKEND_SERVICES) to \u0026lt;oap-ip-or-host\u0026gt;:12800, such as 127.0.0.1:12800, further set agent_protocol (or environment variable SW_AGENT_PROTOCOL to http):\n Remember you should install skywalking-python with extra requires http, pip install \u0026quot;apache-skywalking[http].\n from skywalking import agent, config config.init(agent_collector_backend_services=\u0026#39;127.0.0.1:12800\u0026#39;, agent_name=\u0026#39;your awesome service\u0026#39;, agent_protocol=\u0026#39;http\u0026#39;, agent_instance_name=\u0026#39;your-instance-name or \u0026lt;generated uuid\u0026gt;\u0026#39;) agent.start() Report data via Kafka protocol Please make sure OAP is consuming the same Kafka topic as your agent produces to, kafka_namespace must match OAP side configuration plugin.kafka.namespace\nFinally, if you want to use Kafka protocol to report data, configure kafka_bootstrap_servers (or environment variable SW_KAFKA_BOOTSTRAP_SERVERS) to kafka-brokers, such as 127.0.0.1:9200, further set agent_protocol (or environment variable SW_AGENT_PROTOCOL to kafka):\n Remember you should install skywalking-python with extra requires kafka, pip install \u0026quot;apache-skywalking[kafka]\u0026quot;.\n from skywalking import agent, config config.init(kafka_bootstrap_servers=\u0026#39;127.0.0.1:9200\u0026#39;, agent_name=\u0026#39;your awesome service\u0026#39;, agent_protocol=\u0026#39;kafka\u0026#39;, agent_instance_name=\u0026#39;your-instance-name or \u0026lt;generated uuid\u0026gt;\u0026#39;) agent.start() Alternatively, you can also pass the configurations via environment variables (such as SW_AGENT_NAME, SW_AGENT_COLLECTOR_BACKEND_SERVICES, etc.) so that you don\u0026rsquo;t need to call config.init.\nAll supported environment variables can be found in the Environment Variables List.\n","title":"Legacy Setup","url":"/docs/skywalking-python/next/en/setup/intrusive/"},{"content":"Legacy Setup You can always fall back to our traditional way of integration as introduced below, which is by importing SkyWalking into your project and starting the agent.\nDefaults By default, SkyWalking Python agent uses gRPC protocol to report data to SkyWalking backend, in SkyWalking backend, the port of gRPC protocol is 11800, and the port of HTTP protocol is 12800,\nSee all default configuration values in the Configuration Vocabulary\nYou could configure agent_collector_backend_services (or environment variable SW_AGENT_COLLECTOR_BACKEND_SERVICES) and set agent_protocol (or environment variable SW_AGENT_PROTOCOL to one of gprc, http or kafka according to the protocol you would like to use.\nReport data via gRPC protocol (Default) For example, if you want to use gRPC protocol to report data, configure agent_collector_backend_services (or environment variable SW_AGENT_COLLECTOR_BACKEND_SERVICES) to \u0026lt;oap-ip-or-host\u0026gt;:11800, such as 127.0.0.1:11800:\nfrom skywalking import agent, config config.init(agent_collector_backend_services=\u0026#39;127.0.0.1:11800\u0026#39;, agent_name=\u0026#39;your awesome service\u0026#39;, agent_instance_name=\u0026#39;your-instance-name or \u0026lt;generated uuid\u0026gt;\u0026#39;) agent.start() Report data via HTTP protocol However, if you want to use HTTP protocol to report data, configure agent_collector_backend_services (or environment variable SW_AGENT_COLLECTOR_BACKEND_SERVICES) to \u0026lt;oap-ip-or-host\u0026gt;:12800, such as 127.0.0.1:12800, further set agent_protocol (or environment variable SW_AGENT_PROTOCOL to http):\n Remember you should install skywalking-python with extra requires http, pip install \u0026quot;apache-skywalking[http].\n from skywalking import agent, config config.init(agent_collector_backend_services=\u0026#39;127.0.0.1:12800\u0026#39;, agent_name=\u0026#39;your awesome service\u0026#39;, agent_protocol=\u0026#39;http\u0026#39;, agent_instance_name=\u0026#39;your-instance-name or \u0026lt;generated uuid\u0026gt;\u0026#39;) agent.start() Report data via Kafka protocol Please make sure OAP is consuming the same Kafka topic as your agent produces to, kafka_namespace must match OAP side configuration plugin.kafka.namespace\nFinally, if you want to use Kafka protocol to report data, configure kafka_bootstrap_servers (or environment variable SW_KAFKA_BOOTSTRAP_SERVERS) to kafka-brokers, such as 127.0.0.1:9200, further set agent_protocol (or environment variable SW_AGENT_PROTOCOL to kafka):\n Remember you should install skywalking-python with extra requires kafka, pip install \u0026quot;apache-skywalking[kafka]\u0026quot;.\n from skywalking import agent, config config.init(kafka_bootstrap_servers=\u0026#39;127.0.0.1:9200\u0026#39;, agent_name=\u0026#39;your awesome service\u0026#39;, agent_protocol=\u0026#39;kafka\u0026#39;, agent_instance_name=\u0026#39;your-instance-name or \u0026lt;generated uuid\u0026gt;\u0026#39;) agent.start() Alternatively, you can also pass the configurations via environment variables (such as SW_AGENT_NAME, SW_AGENT_COLLECTOR_BACKEND_SERVICES, etc.) so that you don\u0026rsquo;t need to call config.init.\nAll supported environment variables can be found in the Environment Variables List.\n","title":"Legacy Setup","url":"/docs/skywalking-python/v1.0.1/en/setup/intrusive/"},{"content":"Linux Monitoring SkyWalking leverages Prometheus node-exporter to collect metrics data from the VMs and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. VM entity as a Service in OAP and on the Layer: OS_LINUX.\nSkyWalking also provides InfluxDB Telegraf to receive VMs' metrics data by Telegraf receiver. The telegraf receiver plugin receiver, process and convert the metrics, then it send converted metrics to Meter System. VM entity as a Service in OAP and on the Layer: OS_LINUX.\nData flow For OpenTelemetry receiver:\n The Prometheus node-exporter collects metrics data from the VMs. The OpenTelemetry Collector fetches metrics from node-exporter via Prometheus Receiver and pushes metrics to the SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  For Telegraf receiver:\n The InfluxDB Telegraf input plugins collects various metrics data from the VMs. The cpu, mem, system, disk and diskio input plugins should be set in telegraf.conf file. The InfluxDB Telegraf send JSON format metrics by HTTP messages to Telegraf Receiver, then pushes converted metrics to the SkyWalking OAP Server Meter System. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate ad store the results. The meter_vm_cpu_average_used metrics indicates the average usage of each CPU core for telegraf receiver.  Setup For OpenTelemetry receiver:\n Setup Prometheus node-exporter. Setup OpenTelemetry Collector. This is an example for OpenTelemetry Collector configuration otel-collector-config.yaml. Config SkyWalking OpenTelemetry receiver.  For Telegraf receiver:\n Setup InfluxDB Telegraf\u0026rsquo;s telegraf.conf file according to Telegraf office document. Setup InfluxDB Telegraf\u0026rsquo;s telegraf.conf file specific rules according to Telegraf receiver document. Config SkyWalking Telegraf receiver.  Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     CPU Usage % meter_vm_cpu_total_percentage The total percentage usage of the CPU core. If there are 2 cores, the maximum usage is 200%. Prometheus node-exporter\nTelegraf input plugin   Memory RAM Usage MB meter_vm_memory_used The total RAM usage Prometheus node-exporter\nTelegraf input plugin   Memory Swap Usage % meter_vm_memory_swap_percentage The percentage usage of swap memory Prometheus node-exporter\nTelegraf input plugin   CPU Average Used % meter_vm_cpu_average_used The percentage usage of the CPU core in each mode Prometheus node-exporter\nTelegraf input plugin   CPU Load  meter_vm_cpu_load1\nmeter_vm_cpu_load5\nmeter_vm_cpu_load15 The CPU 1m / 5m / 15m average load Prometheus node-exporter\nTelegraf input plugin   Memory RAM MB meter_vm_memory_total\nmeter_vm_memory_available\nmeter_vm_memory_used\nmeter_vm_memory_buff_cache The RAM statistics, including Total / Available / Used / Buff-Cache Prometheus node-exporter\nTelegraf input plugin   Memory Swap MB meter_vm_memory_swap_free\nmeter_vm_memory_swap_total Swap memory statistics, including Free / Total Prometheus node-exporter\nTelegraf input plugin   File System Mountpoint Usage % meter_vm_filesystem_percentage The percentage usage of the file system at each mount point Prometheus node-exporter\nTelegraf input plugin   Disk R/W KB/s meter_vm_disk_read\nmeter_vm_disk_written The disk read and written Prometheus node-exporter\nTelegraf input plugin   Network Bandwidth Usage KB/s meter_vm_network_receive\nmeter_vm_network_transmit The network receive and transmit Prometheus node-exporter\nTelegraf input plugin   Network Status  meter_vm_tcp_curr_estab\nmeter_vm_tcp_tw\nmeter_vm_tcp_alloc\nmeter_vm_sockets_used\nmeter_vm_udp_inuse The number of TCPs established / TCP time wait / TCPs allocated / sockets in use / UDPs in use Prometheus node-exporter\nTelegraf input plugin   Filefd Allocated  meter_vm_filefd_allocated The number of file descriptors allocated Prometheus node-exporter    Customizing You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/vm.yaml and /config/telegraf-rules/vm.yaml. The dashboard panel confirmations are found in /config/ui-initialized-templates/os_linux.\nBlog For more details, see the blog article SkyWalking 8.4 provides infrastructure monitoring.\n","title":"Linux Monitoring","url":"/docs/main/latest/en/setup/backend/backend-vm-monitoring/"},{"content":"Linux Monitoring SkyWalking leverages Prometheus node-exporter to collect metrics data from the VMs and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. VM entity as a Service in OAP and on the Layer: OS_LINUX.\nSkyWalking also provides InfluxDB Telegraf to receive VMs' metrics data by Telegraf receiver. The telegraf receiver plugin receiver, process and convert the metrics, then it send converted metrics to Meter System. VM entity as a Service in OAP and on the Layer: OS_LINUX.\nData flow For OpenTelemetry receiver:\n The Prometheus node-exporter collects metrics data from the VMs. The OpenTelemetry Collector fetches metrics from node-exporter via Prometheus Receiver and pushes metrics to the SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  For Telegraf receiver:\n The InfluxDB Telegraf input plugins collects various metrics data from the VMs. The cpu, mem, system, disk and diskio input plugins should be set in telegraf.conf file. The InfluxDB Telegraf send JSON format metrics by HTTP messages to Telegraf Receiver, then pushes converted metrics to the SkyWalking OAP Server Meter System. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate ad store the results. The meter_vm_cpu_average_used metrics indicates the average usage of each CPU core for telegraf receiver.  Setup For OpenTelemetry receiver:\n Setup Prometheus node-exporter. Setup OpenTelemetry Collector. This is an example for OpenTelemetry Collector configuration otel-collector-config.yaml. Config SkyWalking OpenTelemetry receiver.  For Telegraf receiver:\n Setup InfluxDB Telegraf\u0026rsquo;s telegraf.conf file according to Telegraf office document. Setup InfluxDB Telegraf\u0026rsquo;s telegraf.conf file specific rules according to Telegraf receiver document. Config SkyWalking Telegraf receiver.  Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     CPU Usage % meter_vm_cpu_total_percentage The total percentage usage of the CPU core. If there are 2 cores, the maximum usage is 200%. Prometheus node-exporter\nTelegraf input plugin   Memory RAM Usage MB meter_vm_memory_used The total RAM usage Prometheus node-exporter\nTelegraf input plugin   Memory Swap Usage % meter_vm_memory_swap_percentage The percentage usage of swap memory Prometheus node-exporter\nTelegraf input plugin   CPU Average Used % meter_vm_cpu_average_used The percentage usage of the CPU core in each mode Prometheus node-exporter\nTelegraf input plugin   CPU Load  meter_vm_cpu_load1\nmeter_vm_cpu_load5\nmeter_vm_cpu_load15 The CPU 1m / 5m / 15m average load Prometheus node-exporter\nTelegraf input plugin   Memory RAM MB meter_vm_memory_total\nmeter_vm_memory_available\nmeter_vm_memory_used\nmeter_vm_memory_buff_cache The RAM statistics, including Total / Available / Used / Buff-Cache Prometheus node-exporter\nTelegraf input plugin   Memory Swap MB meter_vm_memory_swap_free\nmeter_vm_memory_swap_total Swap memory statistics, including Free / Total Prometheus node-exporter\nTelegraf input plugin   File System Mountpoint Usage % meter_vm_filesystem_percentage The percentage usage of the file system at each mount point Prometheus node-exporter\nTelegraf input plugin   Disk R/W KB/s meter_vm_disk_read\nmeter_vm_disk_written The disk read and written Prometheus node-exporter\nTelegraf input plugin   Network Bandwidth Usage KB/s meter_vm_network_receive\nmeter_vm_network_transmit The network receive and transmit Prometheus node-exporter\nTelegraf input plugin   Network Status  meter_vm_tcp_curr_estab\nmeter_vm_tcp_tw\nmeter_vm_tcp_alloc\nmeter_vm_sockets_used\nmeter_vm_udp_inuse The number of TCPs established / TCP time wait / TCPs allocated / sockets in use / UDPs in use Prometheus node-exporter\nTelegraf input plugin   Filefd Allocated  meter_vm_filefd_allocated The number of file descriptors allocated Prometheus node-exporter    Customizing You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/vm.yaml and /config/telegraf-rules/vm.yaml. The dashboard panel confirmations are found in /config/ui-initialized-templates/os_linux.\nBlog For more details, see the blog article SkyWalking 8.4 provides infrastructure monitoring.\n","title":"Linux Monitoring","url":"/docs/main/next/en/setup/backend/backend-vm-monitoring/"},{"content":"Linux Monitoring SkyWalking leverages Prometheus node-exporter to collect metrics data from the VMs, and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nVM entity as a Service in OAP, and on the Layer: OS_LINUX.\nData flow  The Prometheus node-exporter collects metrics data from the VMs. The OpenTelemetry Collector fetches metrics from node-exporter via Prometheus Receiver and pushes metrics to the SkyWalking OAP Server via the OpenCensus gRPC Exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup Prometheus node-exporter. Setup OpenTelemetry Collector . This is an example for OpenTelemetry Collector configuration otel-collector-config.yaml. Config SkyWalking OpenTelemetry receiver.  Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     CPU Usage % cpu_total_percentage The total percentage usage of the CPU core. If there are 2 cores, the maximum usage is 200%. Prometheus node-exporter   Memory RAM Usage MB meter_vm_memory_used The total RAM usage Prometheus node-exporter   Memory Swap Usage % meter_vm_memory_swap_percentage The percentage usage of swap memory Prometheus node-exporter   CPU Average Used % meter_vm_cpu_average_used The percentage usage of the CPU core in each mode Prometheus node-exporter   CPU Load  meter_vm_cpu_load1\nmeter_vm_cpu_load5\nmeter_vm_cpu_load15 The CPU 1m / 5m / 15m average load Prometheus node-exporter   Memory RAM MB meter_vm_memory_total\nmeter_vm_memory_available\nmeter_vm_memory_used The RAM statistics, including Total / Available / Used Prometheus node-exporter   Memory Swap MB meter_vm_memory_swap_free\nmeter_vm_memory_swap_total Swap memory statistics, including Free / Total Prometheus node-exporter   File System Mountpoint Usage % meter_vm_filesystem_percentage The percentage usage of the file system at each mount point Prometheus node-exporter   Disk R/W KB/s meter_vm_disk_read,meter_vm_disk_written The disk read and written Prometheus node-exporter   Network Bandwidth Usage KB/s meter_vm_network_receive\nmeter_vm_network_transmit The network receive and transmit Prometheus node-exporter   Network Status  meter_vm_tcp_curr_estab\nmeter_vm_tcp_tw\nmeter_vm_tcp_alloc\nmeter_vm_sockets_used\nmeter_vm_udp_inuse The number of TCPs established / TCP time wait / TCPs allocated / sockets in use / UDPs in use Prometheus node-exporter   Filefd Allocated  meter_vm_filefd_allocated The number of file descriptors allocated Prometheus node-exporter    Customizing You can customize your own metrics/expression/dashboard panel.\nThe metrics definition and expression rules are found in /config/otel-oc-rules/vm.yaml.\nThe dashboard panel confirmations are found in /config/ui-initialized-templates/os_linux.\nBlog For more details, see blog article SkyWalking 8.4 provides infrastructure monitoring.\n","title":"Linux Monitoring","url":"/docs/main/v9.0.0/en/setup/backend/backend-vm-monitoring/"},{"content":"Linux Monitoring SkyWalking leverages Prometheus node-exporter to collect metrics data from the VMs and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nVM entity as a Service in OAP and on the Layer: OS_LINUX.\nData flow  The Prometheus node-exporter collects metrics data from the VMs. The OpenTelemetry Collector fetches metrics from node-exporter via Prometheus Receiver and pushes metrics to the SkyWalking OAP Server via the OpenCensus gRPC Exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup Prometheus node-exporter. Setup OpenTelemetry Collector . This is an example for OpenTelemetry Collector configuration otel-collector-config.yaml. Config SkyWalking OpenTelemetry receiver.  Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     CPU Usage % cpu_total_percentage The total percentage usage of the CPU core. If there are 2 cores, the maximum usage is 200%. Prometheus node-exporter   Memory RAM Usage MB meter_vm_memory_used The total RAM usage Prometheus node-exporter   Memory Swap Usage % meter_vm_memory_swap_percentage The percentage usage of swap memory Prometheus node-exporter   CPU Average Used % meter_vm_cpu_average_used The percentage usage of the CPU core in each mode Prometheus node-exporter   CPU Load  meter_vm_cpu_load1\nmeter_vm_cpu_load5\nmeter_vm_cpu_load15 The CPU 1m / 5m / 15m average load Prometheus node-exporter   Memory RAM MB meter_vm_memory_total\nmeter_vm_memory_available\nmeter_vm_memory_used The RAM statistics, including Total / Available / Used Prometheus node-exporter   Memory Swap MB meter_vm_memory_swap_free\nmeter_vm_memory_swap_total Swap memory statistics, including Free / Total Prometheus node-exporter   File System Mountpoint Usage % meter_vm_filesystem_percentage The percentage usage of the file system at each mount point Prometheus node-exporter   Disk R/W KB/s meter_vm_disk_read,meter_vm_disk_written The disk read and written Prometheus node-exporter   Network Bandwidth Usage KB/s meter_vm_network_receive\nmeter_vm_network_transmit The network receive and transmit Prometheus node-exporter   Network Status  meter_vm_tcp_curr_estab\nmeter_vm_tcp_tw\nmeter_vm_tcp_alloc\nmeter_vm_sockets_used\nmeter_vm_udp_inuse The number of TCPs established / TCP time wait / TCPs allocated / sockets in use / UDPs in use Prometheus node-exporter   Filefd Allocated  meter_vm_filefd_allocated The number of file descriptors allocated Prometheus node-exporter    Customizing You can customize your own metrics/expression/dashboard panel.\nThe metrics definition and expression rules are found in /config/otel-oc-rules/vm.yaml.\nThe dashboard panel confirmations are found in /config/ui-initialized-templates/os_linux.\nBlog For more details, see the blog article SkyWalking 8.4 provides infrastructure monitoring.\n","title":"Linux Monitoring","url":"/docs/main/v9.1.0/en/setup/backend/backend-vm-monitoring/"},{"content":"Linux Monitoring SkyWalking leverages Prometheus node-exporter to collect metrics data from the VMs and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. VM entity as a Service in OAP and on the Layer: OS_LINUX.\nData flow  The Prometheus node-exporter collects metrics data from the VMs. The OpenTelemetry Collector fetches metrics from node-exporter via Prometheus Receiver and pushes metrics to the SkyWalking OAP Server via the OpenCensus gRPC Exporter or OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup Prometheus node-exporter. Setup OpenTelemetry Collector . This is an example for OpenTelemetry Collector configuration otel-collector-config.yaml. Config SkyWalking OpenTelemetry receiver.  Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     CPU Usage % cpu_total_percentage The total percentage usage of the CPU core. If there are 2 cores, the maximum usage is 200%. Prometheus node-exporter   Memory RAM Usage MB meter_vm_memory_used The total RAM usage Prometheus node-exporter   Memory Swap Usage % meter_vm_memory_swap_percentage The percentage usage of swap memory Prometheus node-exporter   CPU Average Used % meter_vm_cpu_average_used The percentage usage of the CPU core in each mode Prometheus node-exporter   CPU Load  meter_vm_cpu_load1\nmeter_vm_cpu_load5\nmeter_vm_cpu_load15 The CPU 1m / 5m / 15m average load Prometheus node-exporter   Memory RAM MB meter_vm_memory_total\nmeter_vm_memory_available\nmeter_vm_memory_used The RAM statistics, including Total / Available / Used Prometheus node-exporter   Memory Swap MB meter_vm_memory_swap_free\nmeter_vm_memory_swap_total Swap memory statistics, including Free / Total Prometheus node-exporter   File System Mountpoint Usage % meter_vm_filesystem_percentage The percentage usage of the file system at each mount point Prometheus node-exporter   Disk R/W KB/s meter_vm_disk_read,meter_vm_disk_written The disk read and written Prometheus node-exporter   Network Bandwidth Usage KB/s meter_vm_network_receive\nmeter_vm_network_transmit The network receive and transmit Prometheus node-exporter   Network Status  meter_vm_tcp_curr_estab\nmeter_vm_tcp_tw\nmeter_vm_tcp_alloc\nmeter_vm_sockets_used\nmeter_vm_udp_inuse The number of TCPs established / TCP time wait / TCPs allocated / sockets in use / UDPs in use Prometheus node-exporter   Filefd Allocated  meter_vm_filefd_allocated The number of file descriptors allocated Prometheus node-exporter    Customizing You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/vm.yaml. The dashboard panel confirmations are found in /config/ui-initialized-templates/os_linux.\nBlog For more details, see the blog article SkyWalking 8.4 provides infrastructure monitoring.\n","title":"Linux Monitoring","url":"/docs/main/v9.2.0/en/setup/backend/backend-vm-monitoring/"},{"content":"Linux Monitoring SkyWalking leverages Prometheus node-exporter to collect metrics data from the VMs and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. VM entity as a Service in OAP and on the Layer: OS_LINUX.\nSkyWalking also provides InfluxDB Telegraf to receive VMs' metrics data by Telegraf receiver. The telegraf receiver plugin receiver, process and convert the metrics, then it send converted metrics to Meter System. VM entity as a Service in OAP and on the Layer: OS_LINUX.\nData flow For OpenTelemetry receiver:\n The Prometheus node-exporter collects metrics data from the VMs. The OpenTelemetry Collector fetches metrics from node-exporter via Prometheus Receiver and pushes metrics to the SkyWalking OAP Server via the OpenCensus gRPC Exporter or OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  For Telegraf receiver:\n The InfluxDB Telegraf input plugins collects various metrics data from the VMs. The cpu, mem, system, disk and diskio input plugins should be set in telegraf.conf file. The InfluxDB Telegraf send JSON format metrics by HTTP messages to Telegraf Receiver, then pushes converted metrics to the SkyWalking OAP Server Meter System. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate ad store the results. The meter_vm_cpu_average_used metrics indicates the average usage of each CPU core for telegraf receiver.  Setup For OpenTelemetry receiver:\n Setup Prometheus node-exporter. Setup OpenTelemetry Collector . This is an example for OpenTelemetry Collector configuration otel-collector-config.yaml. Config SkyWalking OpenTelemetry receiver.  For Telegraf receiver:\n Setup InfluxDB Telegraf\u0026rsquo;s telegraf.conf file according to Telegraf office document. Setup InfluxDB Telegraf\u0026rsquo;s telegraf.conf file specific rules according to Telegraf receiver document. Config SkyWalking Telegraf receiver.  Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     CPU Usage % meter_vm_cpu_total_percentage The total percentage usage of the CPU core. If there are 2 cores, the maximum usage is 200%. Prometheus node-exporter\nTelegraf input plugin   Memory RAM Usage MB meter_vm_memory_used The total RAM usage Prometheus node-exporter\nTelegraf input plugin   Memory Swap Usage % meter_vm_memory_swap_percentage The percentage usage of swap memory Prometheus node-exporter\nTelegraf input plugin   CPU Average Used % meter_vm_cpu_average_used The percentage usage of the CPU core in each mode Prometheus node-exporter\nTelegraf input plugin   CPU Load  meter_vm_cpu_load1\nmeter_vm_cpu_load5\nmeter_vm_cpu_load15 The CPU 1m / 5m / 15m average load Prometheus node-exporter\nTelegraf input plugin   Memory RAM MB meter_vm_memory_total\nmeter_vm_memory_available\nmeter_vm_memory_used The RAM statistics, including Total / Available / Used Prometheus node-exporter\nTelegraf input plugin   Memory Swap MB meter_vm_memory_swap_free\nmeter_vm_memory_swap_total Swap memory statistics, including Free / Total Prometheus node-exporter\nTelegraf input plugin   File System Mountpoint Usage % meter_vm_filesystem_percentage The percentage usage of the file system at each mount point Prometheus node-exporter\nTelegraf input plugin   Disk R/W KB/s meter_vm_disk_read,meter_vm_disk_written The disk read and written Prometheus node-exporter\nTelegraf input plugin   Network Bandwidth Usage KB/s meter_vm_network_receive\nmeter_vm_network_transmit The network receive and transmit Prometheus node-exporter\nTelegraf input plugin   Network Status  meter_vm_tcp_curr_estab\nmeter_vm_tcp_tw\nmeter_vm_tcp_alloc\nmeter_vm_sockets_used\nmeter_vm_udp_inuse The number of TCPs established / TCP time wait / TCPs allocated / sockets in use / UDPs in use Prometheus node-exporter\nTelegraf input plugin   Filefd Allocated  meter_vm_filefd_allocated The number of file descriptors allocated Prometheus node-exporter    Customizing You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/vm.yaml and /config/telegraf-rules/vm.yaml. The dashboard panel confirmations are found in /config/ui-initialized-templates/os_linux.\nBlog For more details, see the blog article SkyWalking 8.4 provides infrastructure monitoring.\n","title":"Linux Monitoring","url":"/docs/main/v9.3.0/en/setup/backend/backend-vm-monitoring/"},{"content":"Linux Monitoring SkyWalking leverages Prometheus node-exporter to collect metrics data from the VMs and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. VM entity as a Service in OAP and on the Layer: OS_LINUX.\nSkyWalking also provides InfluxDB Telegraf to receive VMs' metrics data by Telegraf receiver. The telegraf receiver plugin receiver, process and convert the metrics, then it send converted metrics to Meter System. VM entity as a Service in OAP and on the Layer: OS_LINUX.\nData flow For OpenTelemetry receiver:\n The Prometheus node-exporter collects metrics data from the VMs. The OpenTelemetry Collector fetches metrics from node-exporter via Prometheus Receiver and pushes metrics to the SkyWalking OAP Server via the OpenCensus gRPC Exporter or OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  For Telegraf receiver:\n The InfluxDB Telegraf input plugins collects various metrics data from the VMs. The cpu, mem, system, disk and diskio input plugins should be set in telegraf.conf file. The InfluxDB Telegraf send JSON format metrics by HTTP messages to Telegraf Receiver, then pushes converted metrics to the SkyWalking OAP Server Meter System. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate ad store the results. The meter_vm_cpu_average_used metrics indicates the average usage of each CPU core for telegraf receiver.  Setup For OpenTelemetry receiver:\n Setup Prometheus node-exporter. Setup OpenTelemetry Collector . This is an example for OpenTelemetry Collector configuration otel-collector-config.yaml. Config SkyWalking OpenTelemetry receiver.  For Telegraf receiver:\n Setup InfluxDB Telegraf\u0026rsquo;s telegraf.conf file according to Telegraf office document. Setup InfluxDB Telegraf\u0026rsquo;s telegraf.conf file specific rules according to Telegraf receiver document. Config SkyWalking Telegraf receiver.  Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     CPU Usage % meter_vm_cpu_total_percentage The total percentage usage of the CPU core. If there are 2 cores, the maximum usage is 200%. Prometheus node-exporter\nTelegraf input plugin   Memory RAM Usage MB meter_vm_memory_used The total RAM usage Prometheus node-exporter\nTelegraf input plugin   Memory Swap Usage % meter_vm_memory_swap_percentage The percentage usage of swap memory Prometheus node-exporter\nTelegraf input plugin   CPU Average Used % meter_vm_cpu_average_used The percentage usage of the CPU core in each mode Prometheus node-exporter\nTelegraf input plugin   CPU Load  meter_vm_cpu_load1\nmeter_vm_cpu_load5\nmeter_vm_cpu_load15 The CPU 1m / 5m / 15m average load Prometheus node-exporter\nTelegraf input plugin   Memory RAM MB meter_vm_memory_total\nmeter_vm_memory_available\nmeter_vm_memory_used The RAM statistics, including Total / Available / Used Prometheus node-exporter\nTelegraf input plugin   Memory Swap MB meter_vm_memory_swap_free\nmeter_vm_memory_swap_total Swap memory statistics, including Free / Total Prometheus node-exporter\nTelegraf input plugin   File System Mountpoint Usage % meter_vm_filesystem_percentage The percentage usage of the file system at each mount point Prometheus node-exporter\nTelegraf input plugin   Disk R/W KB/s meter_vm_disk_read,meter_vm_disk_written The disk read and written Prometheus node-exporter\nTelegraf input plugin   Network Bandwidth Usage KB/s meter_vm_network_receive\nmeter_vm_network_transmit The network receive and transmit Prometheus node-exporter\nTelegraf input plugin   Network Status  meter_vm_tcp_curr_estab\nmeter_vm_tcp_tw\nmeter_vm_tcp_alloc\nmeter_vm_sockets_used\nmeter_vm_udp_inuse The number of TCPs established / TCP time wait / TCPs allocated / sockets in use / UDPs in use Prometheus node-exporter\nTelegraf input plugin   Filefd Allocated  meter_vm_filefd_allocated The number of file descriptors allocated Prometheus node-exporter    Customizing You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/vm.yaml and /config/telegraf-rules/vm.yaml. The dashboard panel confirmations are found in /config/ui-initialized-templates/os_linux.\nBlog For more details, see the blog article SkyWalking 8.4 provides infrastructure monitoring.\n","title":"Linux Monitoring","url":"/docs/main/v9.4.0/en/setup/backend/backend-vm-monitoring/"},{"content":"Linux Monitoring SkyWalking leverages Prometheus node-exporter to collect metrics data from the VMs and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. VM entity as a Service in OAP and on the Layer: OS_LINUX.\nSkyWalking also provides InfluxDB Telegraf to receive VMs' metrics data by Telegraf receiver. The telegraf receiver plugin receiver, process and convert the metrics, then it send converted metrics to Meter System. VM entity as a Service in OAP and on the Layer: OS_LINUX.\nData flow For OpenTelemetry receiver:\n The Prometheus node-exporter collects metrics data from the VMs. The OpenTelemetry Collector fetches metrics from node-exporter via Prometheus Receiver and pushes metrics to the SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  For Telegraf receiver:\n The InfluxDB Telegraf input plugins collects various metrics data from the VMs. The cpu, mem, system, disk and diskio input plugins should be set in telegraf.conf file. The InfluxDB Telegraf send JSON format metrics by HTTP messages to Telegraf Receiver, then pushes converted metrics to the SkyWalking OAP Server Meter System. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate ad store the results. The meter_vm_cpu_average_used metrics indicates the average usage of each CPU core for telegraf receiver.  Setup For OpenTelemetry receiver:\n Setup Prometheus node-exporter. Setup OpenTelemetry Collector. This is an example for OpenTelemetry Collector configuration otel-collector-config.yaml. Config SkyWalking OpenTelemetry receiver.  For Telegraf receiver:\n Setup InfluxDB Telegraf\u0026rsquo;s telegraf.conf file according to Telegraf office document. Setup InfluxDB Telegraf\u0026rsquo;s telegraf.conf file specific rules according to Telegraf receiver document. Config SkyWalking Telegraf receiver.  Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     CPU Usage % meter_vm_cpu_total_percentage The total percentage usage of the CPU core. If there are 2 cores, the maximum usage is 200%. Prometheus node-exporter\nTelegraf input plugin   Memory RAM Usage MB meter_vm_memory_used The total RAM usage Prometheus node-exporter\nTelegraf input plugin   Memory Swap Usage % meter_vm_memory_swap_percentage The percentage usage of swap memory Prometheus node-exporter\nTelegraf input plugin   CPU Average Used % meter_vm_cpu_average_used The percentage usage of the CPU core in each mode Prometheus node-exporter\nTelegraf input plugin   CPU Load  meter_vm_cpu_load1\nmeter_vm_cpu_load5\nmeter_vm_cpu_load15 The CPU 1m / 5m / 15m average load Prometheus node-exporter\nTelegraf input plugin   Memory RAM MB meter_vm_memory_total\nmeter_vm_memory_available\nmeter_vm_memory_used\nmeter_vm_memory_buff_cache The RAM statistics, including Total / Available / Used / Buff-Cache Prometheus node-exporter\nTelegraf input plugin   Memory Swap MB meter_vm_memory_swap_free\nmeter_vm_memory_swap_total Swap memory statistics, including Free / Total Prometheus node-exporter\nTelegraf input plugin   File System Mountpoint Usage % meter_vm_filesystem_percentage The percentage usage of the file system at each mount point Prometheus node-exporter\nTelegraf input plugin   Disk R/W KB/s meter_vm_disk_read\nmeter_vm_disk_written The disk read and written Prometheus node-exporter\nTelegraf input plugin   Network Bandwidth Usage KB/s meter_vm_network_receive\nmeter_vm_network_transmit The network receive and transmit Prometheus node-exporter\nTelegraf input plugin   Network Status  meter_vm_tcp_curr_estab\nmeter_vm_tcp_tw\nmeter_vm_tcp_alloc\nmeter_vm_sockets_used\nmeter_vm_udp_inuse The number of TCPs established / TCP time wait / TCPs allocated / sockets in use / UDPs in use Prometheus node-exporter\nTelegraf input plugin   Filefd Allocated  meter_vm_filefd_allocated The number of file descriptors allocated Prometheus node-exporter    Customizing You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/vm.yaml and /config/telegraf-rules/vm.yaml. The dashboard panel confirmations are found in /config/ui-initialized-templates/os_linux.\nBlog For more details, see the blog article SkyWalking 8.4 provides infrastructure monitoring.\n","title":"Linux Monitoring","url":"/docs/main/v9.5.0/en/setup/backend/backend-vm-monitoring/"},{"content":"Linux Monitoring SkyWalking leverages Prometheus node-exporter to collect metrics data from the VMs and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. VM entity as a Service in OAP and on the Layer: OS_LINUX.\nSkyWalking also provides InfluxDB Telegraf to receive VMs' metrics data by Telegraf receiver. The telegraf receiver plugin receiver, process and convert the metrics, then it send converted metrics to Meter System. VM entity as a Service in OAP and on the Layer: OS_LINUX.\nData flow For OpenTelemetry receiver:\n The Prometheus node-exporter collects metrics data from the VMs. The OpenTelemetry Collector fetches metrics from node-exporter via Prometheus Receiver and pushes metrics to the SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  For Telegraf receiver:\n The InfluxDB Telegraf input plugins collects various metrics data from the VMs. The cpu, mem, system, disk and diskio input plugins should be set in telegraf.conf file. The InfluxDB Telegraf send JSON format metrics by HTTP messages to Telegraf Receiver, then pushes converted metrics to the SkyWalking OAP Server Meter System. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate ad store the results. The meter_vm_cpu_average_used metrics indicates the average usage of each CPU core for telegraf receiver.  Setup For OpenTelemetry receiver:\n Setup Prometheus node-exporter. Setup OpenTelemetry Collector. This is an example for OpenTelemetry Collector configuration otel-collector-config.yaml. Config SkyWalking OpenTelemetry receiver.  For Telegraf receiver:\n Setup InfluxDB Telegraf\u0026rsquo;s telegraf.conf file according to Telegraf office document. Setup InfluxDB Telegraf\u0026rsquo;s telegraf.conf file specific rules according to Telegraf receiver document. Config SkyWalking Telegraf receiver.  Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     CPU Usage % meter_vm_cpu_total_percentage The total percentage usage of the CPU core. If there are 2 cores, the maximum usage is 200%. Prometheus node-exporter\nTelegraf input plugin   Memory RAM Usage MB meter_vm_memory_used The total RAM usage Prometheus node-exporter\nTelegraf input plugin   Memory Swap Usage % meter_vm_memory_swap_percentage The percentage usage of swap memory Prometheus node-exporter\nTelegraf input plugin   CPU Average Used % meter_vm_cpu_average_used The percentage usage of the CPU core in each mode Prometheus node-exporter\nTelegraf input plugin   CPU Load  meter_vm_cpu_load1\nmeter_vm_cpu_load5\nmeter_vm_cpu_load15 The CPU 1m / 5m / 15m average load Prometheus node-exporter\nTelegraf input plugin   Memory RAM MB meter_vm_memory_total\nmeter_vm_memory_available\nmeter_vm_memory_used\nmeter_vm_memory_buff_cache The RAM statistics, including Total / Available / Used / Buff-Cache Prometheus node-exporter\nTelegraf input plugin   Memory Swap MB meter_vm_memory_swap_free\nmeter_vm_memory_swap_total Swap memory statistics, including Free / Total Prometheus node-exporter\nTelegraf input plugin   File System Mountpoint Usage % meter_vm_filesystem_percentage The percentage usage of the file system at each mount point Prometheus node-exporter\nTelegraf input plugin   Disk R/W KB/s meter_vm_disk_read\nmeter_vm_disk_written The disk read and written Prometheus node-exporter\nTelegraf input plugin   Network Bandwidth Usage KB/s meter_vm_network_receive\nmeter_vm_network_transmit The network receive and transmit Prometheus node-exporter\nTelegraf input plugin   Network Status  meter_vm_tcp_curr_estab\nmeter_vm_tcp_tw\nmeter_vm_tcp_alloc\nmeter_vm_sockets_used\nmeter_vm_udp_inuse The number of TCPs established / TCP time wait / TCPs allocated / sockets in use / UDPs in use Prometheus node-exporter\nTelegraf input plugin   Filefd Allocated  meter_vm_filefd_allocated The number of file descriptors allocated Prometheus node-exporter    Customizing You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/vm.yaml and /config/telegraf-rules/vm.yaml. The dashboard panel confirmations are found in /config/ui-initialized-templates/os_linux.\nBlog For more details, see the blog article SkyWalking 8.4 provides infrastructure monitoring.\n","title":"Linux Monitoring","url":"/docs/main/v9.6.0/en/setup/backend/backend-vm-monitoring/"},{"content":"Linux Monitoring SkyWalking leverages Prometheus node-exporter to collect metrics data from the VMs and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. VM entity as a Service in OAP and on the Layer: OS_LINUX.\nSkyWalking also provides InfluxDB Telegraf to receive VMs' metrics data by Telegraf receiver. The telegraf receiver plugin receiver, process and convert the metrics, then it send converted metrics to Meter System. VM entity as a Service in OAP and on the Layer: OS_LINUX.\nData flow For OpenTelemetry receiver:\n The Prometheus node-exporter collects metrics data from the VMs. The OpenTelemetry Collector fetches metrics from node-exporter via Prometheus Receiver and pushes metrics to the SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  For Telegraf receiver:\n The InfluxDB Telegraf input plugins collects various metrics data from the VMs. The cpu, mem, system, disk and diskio input plugins should be set in telegraf.conf file. The InfluxDB Telegraf send JSON format metrics by HTTP messages to Telegraf Receiver, then pushes converted metrics to the SkyWalking OAP Server Meter System. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate ad store the results. The meter_vm_cpu_average_used metrics indicates the average usage of each CPU core for telegraf receiver.  Setup For OpenTelemetry receiver:\n Setup Prometheus node-exporter. Setup OpenTelemetry Collector. This is an example for OpenTelemetry Collector configuration otel-collector-config.yaml. Config SkyWalking OpenTelemetry receiver.  For Telegraf receiver:\n Setup InfluxDB Telegraf\u0026rsquo;s telegraf.conf file according to Telegraf office document. Setup InfluxDB Telegraf\u0026rsquo;s telegraf.conf file specific rules according to Telegraf receiver document. Config SkyWalking Telegraf receiver.  Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     CPU Usage % meter_vm_cpu_total_percentage The total percentage usage of the CPU core. If there are 2 cores, the maximum usage is 200%. Prometheus node-exporter\nTelegraf input plugin   Memory RAM Usage MB meter_vm_memory_used The total RAM usage Prometheus node-exporter\nTelegraf input plugin   Memory Swap Usage % meter_vm_memory_swap_percentage The percentage usage of swap memory Prometheus node-exporter\nTelegraf input plugin   CPU Average Used % meter_vm_cpu_average_used The percentage usage of the CPU core in each mode Prometheus node-exporter\nTelegraf input plugin   CPU Load  meter_vm_cpu_load1\nmeter_vm_cpu_load5\nmeter_vm_cpu_load15 The CPU 1m / 5m / 15m average load Prometheus node-exporter\nTelegraf input plugin   Memory RAM MB meter_vm_memory_total\nmeter_vm_memory_available\nmeter_vm_memory_used\nmeter_vm_memory_buff_cache The RAM statistics, including Total / Available / Used / Buff-Cache Prometheus node-exporter\nTelegraf input plugin   Memory Swap MB meter_vm_memory_swap_free\nmeter_vm_memory_swap_total Swap memory statistics, including Free / Total Prometheus node-exporter\nTelegraf input plugin   File System Mountpoint Usage % meter_vm_filesystem_percentage The percentage usage of the file system at each mount point Prometheus node-exporter\nTelegraf input plugin   Disk R/W KB/s meter_vm_disk_read\nmeter_vm_disk_written The disk read and written Prometheus node-exporter\nTelegraf input plugin   Network Bandwidth Usage KB/s meter_vm_network_receive\nmeter_vm_network_transmit The network receive and transmit Prometheus node-exporter\nTelegraf input plugin   Network Status  meter_vm_tcp_curr_estab\nmeter_vm_tcp_tw\nmeter_vm_tcp_alloc\nmeter_vm_sockets_used\nmeter_vm_udp_inuse The number of TCPs established / TCP time wait / TCPs allocated / sockets in use / UDPs in use Prometheus node-exporter\nTelegraf input plugin   Filefd Allocated  meter_vm_filefd_allocated The number of file descriptors allocated Prometheus node-exporter    Customizing You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/vm.yaml and /config/telegraf-rules/vm.yaml. The dashboard panel confirmations are found in /config/ui-initialized-templates/os_linux.\nBlog For more details, see the blog article SkyWalking 8.4 provides infrastructure monitoring.\n","title":"Linux Monitoring","url":"/docs/main/v9.7.0/en/setup/backend/backend-vm-monitoring/"},{"content":"Locate agent config file by system property Supported version 5.0.0-RC+\nWhat is Locate agent config file by system property ? In Default. The agent will try to locate agent.config, which should be in the /config dictionary of agent package. If User sets the specified agent config file through system properties, The agent will try to load file from there. By the way, This function has no conflict with Setting Override\nOverride priority The specified agent config \u0026gt; The default agent config\nHow to use The content formats of the specified config must be same as the default config.\nUsing System.Properties(-D) to set the specified config path\n-Dskywalking_config=/path/to/agent.config /path/to/agent.config is the absolute path of the specified config file\n","title":"Locate agent config file by system property","url":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/specified-agent-config/"},{"content":"Locate agent config file by system property Supported version 5.0.0-RC+\nWhat is Locate agent config file by system property ? In Default. The agent will try to locate agent.config, which should be in the /config dictionary of agent package. If User sets the specified agent config file through system properties, The agent will try to load file from there. By the way, This function has no conflict with Setting Override\nOverride priority The specified agent config \u0026gt; The default agent config\nHow to use The content formats of the specified config must be same as the default config.\nUsing System.Properties(-D) to set the specified config path\n-Dskywalking_config=/path/to/agent.config /path/to/agent.config is the absolute path of the specified config file\n","title":"Locate agent config file by system property","url":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/specified-agent-config/"},{"content":"Locate agent config file by system property Supported version 5.0.0-RC+\nWhat is Locate agent config file by system property ? In Default. The agent will try to locate agent.config, which should be in the /config dictionary of agent package. If User sets the specified agent config file through system properties, The agent will try to load file from there. By the way, This function has no conflict with Setting Override\nOverride priority The specified agent config \u0026gt; The default agent config\nHow to use The content formats of the specified config must be same as the default config.\nUsing System.Properties(-D) to set the specified config path\n-Dskywalking_config=/path/to/agent.config /path/to/agent.config is the absolute path of the specified config file\n","title":"Locate agent config file by system property","url":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/specified-agent-config/"},{"content":"Locate agent config file by system property Supported version 5.0.0-RC+\nWhat is Locate agent config file by system property ? In Default. The agent will try to locate agent.config, which should be in the /config dictionary of agent package. If User sets the specified agent config file through system properties, The agent will try to load file from there. By the way, This function has no conflict with Setting Override\nOverride priority The specified agent config \u0026gt; The default agent config\nHow to use The content formats of the specified config must be same as the default config.\nUsing System.Properties(-D) to set the specified config path\n-Dskywalking_config=/path/to/agent.config /path/to/agent.config is the absolute path of the specified config file\n","title":"Locate agent config file by system property","url":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/specified-agent-config/"},{"content":"Locate agent config file by system property Supported version 5.0.0-RC+\nWhat is Locate agent config file by system property ? In Default. The agent will try to locate agent.config, which should be in the /config dictionary of agent package. If User sets the specified agent config file through system properties, The agent will try to load file from there. By the way, This function has no conflict with Setting Override\nOverride priority The specified agent config \u0026gt; The default agent config\nHow to use The content formats of the specified config must be same as the default config.\nUsing System.Properties(-D) to set the specified config path\n-Dskywalking_config=/path/to/agent.config /path/to/agent.config is the absolute path of the specified config file\n","title":"Locate agent config file by system property","url":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/specified-agent-config/"},{"content":"Log Analysis Log analyzer of OAP server supports native log data. OAP could use Log Analysis Language to structure log content through parsing, extracting and saving logs. The analyzer also uses Meter Analysis Language Engine for further metrics calculation.\nlog-analyzer:selector:${SW_LOG_ANALYZER:default}default:lalFiles:${SW_LOG_LAL_FILES:default}malFiles:${SW_LOG_MAL_FILES:\u0026#34;\u0026#34;}Read the doc on Log Analysis Language(LAL) for more on log structuring and metrics analysis. The LAL\u0026rsquo;s metrics extracts provide the capabilities to generate new metrics from the raw log text for further calculation.\n","title":"Log Analysis","url":"/docs/main/latest/en/setup/backend/log-analyzer/"},{"content":"Log Analysis Log analyzer of OAP server supports native log data. OAP could use Log Analysis Language to structure log content through parsing, extracting and saving logs. The analyzer also uses Meter Analysis Language Engine for further metrics calculation.\nlog-analyzer:selector:${SW_LOG_ANALYZER:default}default:lalFiles:${SW_LOG_LAL_FILES:default}malFiles:${SW_LOG_MAL_FILES:\u0026#34;\u0026#34;}Read the doc on Log Analysis Language(LAL) for more on log structuring and metrics analysis. The LAL\u0026rsquo;s metrics extracts provide the capabilities to generate new metrics from the raw log text for further calculation.\n","title":"Log Analysis","url":"/docs/main/next/en/setup/backend/log-analyzer/"},{"content":"Log Analysis Log analyzer of OAP server supports native log data. OAP could use Log Analysis Language to structure log content through parsing, extracting and saving logs. The analyzer also uses Meter Analysis Language Engine for further metrics calculation.\nlog-analyzer:selector:${SW_LOG_ANALYZER:default}default:lalFiles:${SW_LOG_LAL_FILES:default}malFiles:${SW_LOG_MAL_FILES:\u0026#34;\u0026#34;}Read the doc on Log Analysis Language(LAL) for more on log structuring and metrics analysis. The LAL\u0026rsquo;s metrics extracts provide the capabilities to generate new metrics from the raw log text for further calculation.\n","title":"Log Analysis","url":"/docs/main/v9.5.0/en/setup/backend/log-analyzer/"},{"content":"Log Analysis Log analyzer of OAP server supports native log data. OAP could use Log Analysis Language to structure log content through parsing, extracting and saving logs. The analyzer also uses Meter Analysis Language Engine for further metrics calculation.\nlog-analyzer:selector:${SW_LOG_ANALYZER:default}default:lalFiles:${SW_LOG_LAL_FILES:default}malFiles:${SW_LOG_MAL_FILES:\u0026#34;\u0026#34;}Read the doc on Log Analysis Language(LAL) for more on log structuring and metrics analysis. The LAL\u0026rsquo;s metrics extracts provide the capabilities to generate new metrics from the raw log text for further calculation.\n","title":"Log Analysis","url":"/docs/main/v9.6.0/en/setup/backend/log-analyzer/"},{"content":"Log Analysis Log analyzer of OAP server supports native log data. OAP could use Log Analysis Language to structure log content through parsing, extracting and saving logs. The analyzer also uses Meter Analysis Language Engine for further metrics calculation.\nlog-analyzer:selector:${SW_LOG_ANALYZER:default}default:lalFiles:${SW_LOG_LAL_FILES:default}malFiles:${SW_LOG_MAL_FILES:\u0026#34;\u0026#34;}Read the doc on Log Analysis Language(LAL) for more on log structuring and metrics analysis. The LAL\u0026rsquo;s metrics extracts provide the capabilities to generate new metrics from the raw log text for further calculation.\n","title":"Log Analysis","url":"/docs/main/v9.7.0/en/setup/backend/log-analyzer/"},{"content":"Log Analysis Language Log Analysis Language (LAL) in SkyWalking is essentially a Domain-Specific Language (DSL) to analyze logs. You can use LAL to parse, extract, and save the logs, as well as collaborate the logs with traces (by extracting the trace ID, segment ID and span ID) and metrics (by generating metrics from the logs and sending them to the meter system).\nThe LAL config files are in YAML format, and are located under directory lal. You can set log-analyzer/default/lalFiles in the application.yml file or set environment variable SW_LOG_LAL_FILES to activate specific LAL config files.\nLayer Layer should be declared in the LAL script to represent the analysis scope of the logs.\nFilter A filter is a group of parser, extractor and sink. Users can use one or more filters to organize their processing logic. Every piece of log will be sent to all filters in an LAL rule. A piece of log sent to the filter is available as property log in the LAL, therefore you can access the log service name via log.service. For all available fields of log, please refer to the protocol definition.\nAll components are executed sequentially in the orders they are declared.\nGlobal Functions Globally available functions may be used them in all components (i.e. parsers, extractors, and sinks) where necessary.\n abort  By default, all components declared are executed no matter what flags (dropped, saved, etc.) have been set. There are cases where you may want the filter chain to stop earlier when specified conditions are met. abort function aborts the remaining filter chain from where it\u0026rsquo;s declared, and all the remaining components won\u0026rsquo;t be executed at all. abort function serves as a fast-fail mechanism in LAL.\nfilter { if (log.service == \u0026#34;TestingService\u0026#34;) { // Don\u0026#39;t waste resources on TestingServices  abort {} // all remaining components won\u0026#39;t be executed at all  } // ... parsers, extractors, sinks } Note that when you put regexp in an if statement, you need to surround the expression with () like regexp(\u0026lt;the expression\u0026gt;), instead of regexp \u0026lt;the expression\u0026gt;.\n tag  tag function provide a convenient way to get the value of a tag key.\nWe can add tags like following:\n[ { \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;TEST_KEY\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;TEST_VALUE\u0026#34; } ] }, \u0026#34;body\u0026#34;:{ ... } ... } ] And we can use this method to get the value of the tag key TEST_KEY.\nfilter { if (tag(\u0026#34;TEST_KEY\u0026#34;) == \u0026#34;TEST_VALUE\u0026#34;) { ... } } Parser Parsers are responsible for parsing the raw logs into structured data in SkyWalking for further processing. There are 3 types of parsers at the moment, namely json, yaml, and text.\nWhen a piece of log is parsed, there is a corresponding property available, called parsed, injected by LAL. Property parsed is typically a map, containing all the fields parsed from the raw logs. For example, if the parser is json / yaml, parsed is a map containing all the key-values in the json / yaml; if the parser is text , parsed is a map containing all the captured groups and their values (for regexp and grok).\nAll parsers share the following options:\n   Option Type Description Default Value     abortOnFailure boolean Whether the filter chain should abort if the parser failed to parse / match the logs true    See examples below.\njson filter { json { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  } } yaml filter { yaml { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  } } text For unstructured logs, there are some text parsers for use.\n regexp  regexp parser uses a regular expression (regexp) to parse the logs. It leverages the captured groups of the regexp, all the captured groups can be used later in the extractors or sinks. regexp returns a boolean indicating whether the log matches the pattern or not.\nfilter { text { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  // this is just a demo pattern  regexp \u0026#34;(?\u0026lt;timestamp\u0026gt;\\\\d{8}) (?\u0026lt;thread\u0026gt;\\\\w+) (?\u0026lt;level\u0026gt;\\\\w+) (?\u0026lt;traceId\u0026gt;\\\\w+) (?\u0026lt;msg\u0026gt;.+)\u0026#34; } extractor { tag level: parsed.level // we add a tag called `level` and its value is parsed.level, captured from the regexp above  traceId parsed.traceId // we also extract the trace id from the parsed result, which will be used to associate the log with the trace  } // ... }  grok (TODO)  We\u0026rsquo;re aware of certain performance issues in the grok Java library, and so we\u0026rsquo;re currently conducting investigations and benchmarking. Contributions are welcome.\nExtractor Extractors aim to extract metadata from the logs. The metadata can be a service name, a service instance name, an endpoint name, or even a trace ID, all of which can be associated with the existing traces and metrics.\n service  service extracts the service name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n instance  instance extracts the service instance name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n endpoint  endpoint extracts the service instance name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n traceId  traceId extracts the trace ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n segmentId  segmentId extracts the segment ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n spanId  spanId extracts the span ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n timestamp  timestamp extracts the timestamp from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\nThe parameter of timestamp can be a millisecond:\nfilter { // ... parser  extractor { timestamp parsed.time as String } } or a datetime string with a specified pattern:\nfilter { // ... parser  extractor { timestamp parsed.time as String, \u0026#34;yyyy-MM-dd HH:mm:ss\u0026#34; } }  layer  layer extracts the layer from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with service.\n tag  tag extracts the tags from the parsed result, and set them into the LogData. The form of this extractor should look something like this: tag key1: value, key2: value2. You may use the properties of parsed as both keys and values.\nimport javax.swing.text.LayeredHighlighter filter { // ... parser  extractor { tag level: parsed.level, (parsed.statusCode): parsed.statusMsg tag anotherKey: \u0026#34;anotherConstantValue\u0026#34; layer \u0026#39;GENERAL\u0026#39; } }  metrics  metrics extracts / generates metrics from the logs, and sends the generated metrics to the meter system. You may configure MAL for further analysis of these metrics. The dedicated MAL config files are under directory log-mal-rules, and you can set log-analyzer/default/malFiles to enable configured files.\n# application.yml# ...log-analyzer:selector:${SW_LOG_ANALYZER:default}default:lalFiles:${SW_LOG_LAL_FILES:my-lal-config}# files are under \u0026#34;lal\u0026#34; directorymalFiles:${SW_LOG_MAL_FILES:my-lal-mal-config, folder1/another-lal-mal-config, folder2/*}# files are under \u0026#34;log-mal-rules\u0026#34; directoryExamples are as follows:\nfilter { // ...  extractor { service parsed.serviceName metrics { name \u0026#34;log_count\u0026#34; timestamp parsed.timestamp labels level: parsed.level, service: parsed.service, instance: parsed.instance value 1 } metrics { name \u0026#34;http_response_time\u0026#34; timestamp parsed.timestamp labels status_code: parsed.statusCode, service: parsed.service, instance: parsed.instance value parsed.duration } } // ... } The extractor above generates a metrics named log_count, with tag key level and value 1. After that, you can configure MAL rules to calculate the log count grouping by logging level like this:\n# ... other configurations of MALmetrics:- name:log_count_debugexp:log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;DEBUG\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT1M\u0026#39;)- name:log_count_errorexp:log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;ERROR\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT1M\u0026#39;)The other metrics generated is http_response_time, so you can configure MAL rules to generate more useful metrics like percentiles.\n# ... other configurations of MALmetrics:- name:response_time_percentileexp:http_response_time.sum([\u0026#39;le\u0026#39;, \u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT5M\u0026#39;).histogram().histogram_percentile([50,70,90,99]) slowSql  slowSql aims to convert LogData to DatabaseSlowStatement. It extracts data from parsed result and save them as DatabaseSlowStatement. SlowSql will not abort or edit logs, you can use other LAL for further processing. SlowSql will reuse service, layer and timestamp of extractor, so it is necessary to use SlowSQL after setting these. We require a log tag \u0026quot;LOG_KIND\u0026quot; = \u0026quot;SLOW_SQL\u0026quot; to make OAP distinguish slow SQL logs from other log reports.\nNote, slow SQL sampling would only flag this SQL in the candidate list. The OAP server would run statistic per service and only persistent the top 50 every 10(controlled by topNReportPeriod: ${SW_CORE_TOPN_REPORT_PERIOD:10}) minutes by default.\nAn example of JSON sent to OAP is as following:\n[ { \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;LOG_KIND\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;SLOW_SQL\u0026#34; } ] }, \u0026#34;layer\u0026#34;:\u0026#34;MYSQL\u0026#34;, \u0026#34;body\u0026#34;:{ \u0026#34;json\u0026#34;:{ \u0026#34;json\u0026#34;:\u0026#34;{\\\u0026#34;time\\\u0026#34;:\\\u0026#34;1663063011\\\u0026#34;,\\\u0026#34;id\\\u0026#34;:\\\u0026#34;cb92c1a5b-2691e-fb2f-457a-9c72a392d9ed\\\u0026#34;,\\\u0026#34;service\\\u0026#34;:\\\u0026#34;root[root]@[localhost]\\\u0026#34;,\\\u0026#34;statement\\\u0026#34;:\\\u0026#34;select sleep(2);\\\u0026#34;,\\\u0026#34;layer\\\u0026#34;:\\\u0026#34;MYSQL\\\u0026#34;,\\\u0026#34;query_time\\\u0026#34;:2000}\u0026#34; } }, \u0026#34;service\u0026#34;:\u0026#34;root[root]@[localhost]\u0026#34; } ]  statement  statement extracts the SQL statement from the parsed result, and set it into the DatabaseSlowStatement, which will be persisted (if not dropped) and is used to associate with TopNDatabaseStatement.\n latency  latency extracts the latency from the parsed result, and set it into the DatabaseSlowStatement, which will be persisted (if not dropped) and is used to associate with TopNDatabaseStatement.\n id  id extracts the id from the parsed result, and set it into the DatabaseSlowStatement, which will be persisted (if not dropped) and is used to associate with TopNDatabaseStatement.\nA Example of LAL to distinguish slow logs:\nfilter { json{ } extractor{ layer parsed.layer as String service parsed.service as String timestamp parsed.time as String if (tag(\u0026#34;LOG_KIND\u0026#34;) == \u0026#34;SLOW_SQL\u0026#34;) { slowSql { id parsed.id as String statement parsed.statement as String latency parsed.query_time as Long } } } }  sampledTrace  sampledTrace aims to convert LogData to SampledTrace Records. It extracts data from parsed result and save them as SampledTraceRecord. SampledTrace will not abort or edit logs, you can use other LAL for further processing. We require a log tag \u0026quot;LOG_KIND\u0026quot; = \u0026quot;NET_PROFILING_SAMPLED_TRACE\u0026quot; to make OAP distinguish slow trace logs from other log reports. An example of JSON sent to OAP is as following:\n[ { \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;LOG_KIND\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;NET_PROFILING_SAMPLED_TRACE\u0026#34; } ] }, \u0026#34;layer\u0026#34;:\u0026#34;MESH\u0026#34;, \u0026#34;body\u0026#34;:{ \u0026#34;json\u0026#34;:{ \u0026#34;json\u0026#34;:\u0026#34;{\\\u0026#34;uri\\\u0026#34;:\\\u0026#34;/provider\\\u0026#34;,\\\u0026#34;reason\\\u0026#34;:\\\u0026#34;slow\\\u0026#34;,\\\u0026#34;latency\\\u0026#34;:2048,\\\u0026#34;client_process\\\u0026#34;:{\\\u0026#34;process_id\\\u0026#34;:\\\u0026#34;c1519f4555ec11eda8df0242ac1d0002\\\u0026#34;,\\\u0026#34;local\\\u0026#34;:false,\\\u0026#34;address\\\u0026#34;:\\\u0026#34;\\\u0026#34;},\\\u0026#34;server_process\\\u0026#34;:{\\\u0026#34;process_id\\\u0026#34;:\\\u0026#34;\\\u0026#34;,\\\u0026#34;local\\\u0026#34;:false,\\\u0026#34;address\\\u0026#34;:\\\u0026#34;172.31.0.3:443\\\u0026#34;},\\\u0026#34;detect_point\\\u0026#34;:\\\u0026#34;client\\\u0026#34;,\\\u0026#34;component\\\u0026#34;:\\\u0026#34;http\\\u0026#34;,\\\u0026#34;ssl\\\u0026#34;:true}\u0026#34; } }, \u0026#34;service\u0026#34;:\u0026#34;test-service\u0026#34;, \u0026#34;serviceInstance\u0026#34;:\u0026#34;test-service-instance\u0026#34;, \u0026#34;timestamp\u0026#34;: 1666916962406, } ] Examples are as follows:\nfilter { json { } if (tag(\u0026#34;LOG_KIND\u0026#34;) == \u0026#34;NET_PROFILING_SAMPLED_TRACE\u0026#34;) { sampledTrace { latency parsed.latency as Long uri parsed.uri as String reason parsed.reason as String if (parsed.client_process.process_id as String != \u0026#34;\u0026#34;) { processId parsed.client_process.process_id as String } else if (parsed.client_process.local as Boolean) { processId ProcessRegistry.generateVirtualLocalProcess(parsed.service as String, parsed.serviceInstance as String) as String } else { processId ProcessRegistry.generateVirtualRemoteProcess(parsed.service as String, parsed.serviceInstance as String, parsed.client_process.address as String) as String } if (parsed.server_process.process_id as String != \u0026#34;\u0026#34;) { destProcessId parsed.server_process.process_id as String } else if (parsed.server_process.local as Boolean) { destProcessId ProcessRegistry.generateVirtualLocalProcess(parsed.service as String, parsed.serviceInstance as String) as String } else { destProcessId ProcessRegistry.generateVirtualRemoteProcess(parsed.service as String, parsed.serviceInstance as String, parsed.server_process.address as String) as String } detectPoint parsed.detect_point as String if (parsed.component as String == \u0026#34;http\u0026#34; \u0026amp;\u0026amp; parsed.ssl as Boolean) { componentId 129 } else if (parsed.component as String == \u0026#34;http\u0026#34;) { componentId 49 } else if (parsed.ssl as Boolean) { componentId 130 } else { componentId 110 } } } } Sink Sinks are the persistent layer of the LAL. By default, all the logs of each filter are persisted into the storage. However, some mechanisms allow you to selectively save some logs, or even drop all the logs after you\u0026rsquo;ve extracted useful information, such as metrics.\nSampler Sampler allows you to save the logs in a sampling manner. Currently, the following sampling strategies are supported:\n rateLimit: samples n logs at a maximum rate of 1 minute. rateLimit(\u0026quot;SamplerID\u0026quot;) requires an ID for the sampler. Sampler declarations with the same ID share the same sampler instance, thus sharing the same rpm and resetting logic. possibility: every piece of log has a pseudo possibility of percentage to be sampled, the possibility was generated by Java random number generator and compare to the given percentage option.  We welcome contributions on more sampling strategies. If multiple samplers are specified, the last one determines the final sampling result. See examples in Enforcer.\nExamples 1, rateLimit:\nfilter { // ... parser  sink { sampler { if (parsed.service == \u0026#34;ImportantApp\u0026#34;) { rateLimit(\u0026#34;ImportantAppSampler\u0026#34;) { rpm 1800 // samples 1800 pieces of logs every minute for service \u0026#34;ImportantApp\u0026#34;  } } else { rateLimit(\u0026#34;OtherSampler\u0026#34;) { rpm 180 // samples 180 pieces of logs every minute for other services than \u0026#34;ImportantApp\u0026#34;  } } } } } Examples 2, possibility:\nfilter { // ... parser  sink { sampler { if (parsed.service == \u0026#34;ImportantApp\u0026#34;) { possibility(80) { // samples 80% of the logs for service \u0026#34;ImportantApp\u0026#34;  } } else { possibility(30) { // samples 30% of the logs for other services than \u0026#34;ImportantApp\u0026#34;  } } } } } Dropper Dropper is a special sink, meaning that all logs are dropped without any exception. This is useful when you want to drop debugging logs.\nfilter { // ... parser  sink { if (parsed.level == \u0026#34;DEBUG\u0026#34;) { dropper {} } else { sampler { // ... configs  } } } } Or if you have multiple filters, some of which are for extracting metrics, only one of them has to be persisted.\nfilter { // filter A: this is for persistence  // ... parser  sink { sampler { // .. sampler configs  } } } filter { // filter B:  // ... extractors to generate many metrics  extractors { metrics { // ... metrics  } } sink { dropper {} // drop all logs because they have been saved in \u0026#34;filter A\u0026#34; above.  } } Enforcer Enforcer is another special sink that forcibly samples the log. A typical use case of enforcer is when you have configured a sampler and want to save some logs forcibly, such as to save error logs even if the sampling mechanism has been configured.\nfilter { // ... parser  sink { sampler { // ... sampler configs  } if (parsed.level == \u0026#34;ERROR\u0026#34; || parsed.userId == \u0026#34;TestingUserId\u0026#34;) { // sample error logs or testing users\u0026#39; logs (userId == \u0026#34;TestingUserId\u0026#34;) even if the sampling strategy is configured  enforcer { } } } } ","title":"Log Analysis Language","url":"/docs/main/latest/en/concepts-and-designs/lal/"},{"content":"Log Analysis Language Log Analysis Language (LAL) in SkyWalking is essentially a Domain-Specific Language (DSL) to analyze logs. You can use LAL to parse, extract, and save the logs, as well as collaborate the logs with traces (by extracting the trace ID, segment ID and span ID) and metrics (by generating metrics from the logs and sending them to the meter system).\nThe LAL config files are in YAML format, and are located under directory lal. You can set log-analyzer/default/lalFiles in the application.yml file or set environment variable SW_LOG_LAL_FILES to activate specific LAL config files.\nLayer Layer should be declared in the LAL script to represent the analysis scope of the logs.\nFilter A filter is a group of parser, extractor and sink. Users can use one or more filters to organize their processing logic. Every piece of log will be sent to all filters in an LAL rule. A piece of log sent to the filter is available as property log in the LAL, therefore you can access the log service name via log.service. For all available fields of log, please refer to the protocol definition.\nAll components are executed sequentially in the orders they are declared.\nGlobal Functions Globally available functions may be used them in all components (i.e. parsers, extractors, and sinks) where necessary.\n abort  By default, all components declared are executed no matter what flags (dropped, saved, etc.) have been set. There are cases where you may want the filter chain to stop earlier when specified conditions are met. abort function aborts the remaining filter chain from where it\u0026rsquo;s declared, and all the remaining components won\u0026rsquo;t be executed at all. abort function serves as a fast-fail mechanism in LAL.\nfilter { if (log.service == \u0026#34;TestingService\u0026#34;) { // Don\u0026#39;t waste resources on TestingServices  abort {} // all remaining components won\u0026#39;t be executed at all  } // ... parsers, extractors, sinks } Note that when you put regexp in an if statement, you need to surround the expression with () like regexp(\u0026lt;the expression\u0026gt;), instead of regexp \u0026lt;the expression\u0026gt;.\n tag  tag function provide a convenient way to get the value of a tag key.\nWe can add tags like following:\n[ { \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;TEST_KEY\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;TEST_VALUE\u0026#34; } ] }, \u0026#34;body\u0026#34;:{ ... } ... } ] And we can use this method to get the value of the tag key TEST_KEY.\nfilter { if (tag(\u0026#34;TEST_KEY\u0026#34;) == \u0026#34;TEST_VALUE\u0026#34;) { ... } } Parser Parsers are responsible for parsing the raw logs into structured data in SkyWalking for further processing. There are 3 types of parsers at the moment, namely json, yaml, and text.\nWhen a piece of log is parsed, there is a corresponding property available, called parsed, injected by LAL. Property parsed is typically a map, containing all the fields parsed from the raw logs. For example, if the parser is json / yaml, parsed is a map containing all the key-values in the json / yaml; if the parser is text , parsed is a map containing all the captured groups and their values (for regexp and grok).\nAll parsers share the following options:\n   Option Type Description Default Value     abortOnFailure boolean Whether the filter chain should abort if the parser failed to parse / match the logs true    See examples below.\njson filter { json { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  } } yaml filter { yaml { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  } } text For unstructured logs, there are some text parsers for use.\n regexp  regexp parser uses a regular expression (regexp) to parse the logs. It leverages the captured groups of the regexp, all the captured groups can be used later in the extractors or sinks. regexp returns a boolean indicating whether the log matches the pattern or not.\nfilter { text { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  // this is just a demo pattern  regexp \u0026#34;(?\u0026lt;timestamp\u0026gt;\\\\d{8}) (?\u0026lt;thread\u0026gt;\\\\w+) (?\u0026lt;level\u0026gt;\\\\w+) (?\u0026lt;traceId\u0026gt;\\\\w+) (?\u0026lt;msg\u0026gt;.+)\u0026#34; } extractor { tag level: parsed.level // we add a tag called `level` and its value is parsed.level, captured from the regexp above  traceId parsed.traceId // we also extract the trace id from the parsed result, which will be used to associate the log with the trace  } // ... }  grok (TODO)  We\u0026rsquo;re aware of certain performance issues in the grok Java library, and so we\u0026rsquo;re currently conducting investigations and benchmarking. Contributions are welcome.\nExtractor Extractors aim to extract metadata from the logs. The metadata can be a service name, a service instance name, an endpoint name, or even a trace ID, all of which can be associated with the existing traces and metrics.\n service  service extracts the service name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n instance  instance extracts the service instance name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n endpoint  endpoint extracts the service instance name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n traceId  traceId extracts the trace ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n segmentId  segmentId extracts the segment ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n spanId  spanId extracts the span ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n timestamp  timestamp extracts the timestamp from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\nThe parameter of timestamp can be a millisecond:\nfilter { // ... parser  extractor { timestamp parsed.time as String } } or a datetime string with a specified pattern:\nfilter { // ... parser  extractor { timestamp parsed.time as String, \u0026#34;yyyy-MM-dd HH:mm:ss\u0026#34; } }  layer  layer extracts the layer from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with service.\n tag  tag extracts the tags from the parsed result, and set them into the LogData. The form of this extractor should look something like this: tag key1: value, key2: value2. You may use the properties of parsed as both keys and values.\nimport javax.swing.text.LayeredHighlighter filter { // ... parser  extractor { tag level: parsed.level, (parsed.statusCode): parsed.statusMsg tag anotherKey: \u0026#34;anotherConstantValue\u0026#34; layer \u0026#39;GENERAL\u0026#39; } }  metrics  metrics extracts / generates metrics from the logs, and sends the generated metrics to the meter system. You may configure MAL for further analysis of these metrics. The dedicated MAL config files are under directory log-mal-rules, and you can set log-analyzer/default/malFiles to enable configured files.\n# application.yml# ...log-analyzer:selector:${SW_LOG_ANALYZER:default}default:lalFiles:${SW_LOG_LAL_FILES:my-lal-config}# files are under \u0026#34;lal\u0026#34; directorymalFiles:${SW_LOG_MAL_FILES:my-lal-mal-config, folder1/another-lal-mal-config, folder2/*}# files are under \u0026#34;log-mal-rules\u0026#34; directoryExamples are as follows:\nfilter { // ...  extractor { service parsed.serviceName metrics { name \u0026#34;log_count\u0026#34; timestamp parsed.timestamp labels level: parsed.level, service: parsed.service, instance: parsed.instance value 1 } metrics { name \u0026#34;http_response_time\u0026#34; timestamp parsed.timestamp labels status_code: parsed.statusCode, service: parsed.service, instance: parsed.instance value parsed.duration } } // ... } The extractor above generates a metrics named log_count, with tag key level and value 1. After that, you can configure MAL rules to calculate the log count grouping by logging level like this:\n# ... other configurations of MALmetrics:- name:log_count_debugexp:log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;DEBUG\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT1M\u0026#39;)- name:log_count_errorexp:log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;ERROR\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT1M\u0026#39;)The other metrics generated is http_response_time, so you can configure MAL rules to generate more useful metrics like percentiles.\n# ... other configurations of MALmetrics:- name:response_time_percentileexp:http_response_time.sum([\u0026#39;le\u0026#39;, \u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT5M\u0026#39;).histogram().histogram_percentile([50,70,90,99]) slowSql  slowSql aims to convert LogData to DatabaseSlowStatement. It extracts data from parsed result and save them as DatabaseSlowStatement. SlowSql will not abort or edit logs, you can use other LAL for further processing. SlowSql will reuse service, layer and timestamp of extractor, so it is necessary to use SlowSQL after setting these. We require a log tag \u0026quot;LOG_KIND\u0026quot; = \u0026quot;SLOW_SQL\u0026quot; to make OAP distinguish slow SQL logs from other log reports.\nNote, slow SQL sampling would only flag this SQL in the candidate list. The OAP server would run statistic per service and only persistent the top 50 every 10(controlled by topNReportPeriod: ${SW_CORE_TOPN_REPORT_PERIOD:10}) minutes by default.\nAn example of JSON sent to OAP is as following:\n[ { \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;LOG_KIND\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;SLOW_SQL\u0026#34; } ] }, \u0026#34;layer\u0026#34;:\u0026#34;MYSQL\u0026#34;, \u0026#34;body\u0026#34;:{ \u0026#34;json\u0026#34;:{ \u0026#34;json\u0026#34;:\u0026#34;{\\\u0026#34;time\\\u0026#34;:\\\u0026#34;1663063011\\\u0026#34;,\\\u0026#34;id\\\u0026#34;:\\\u0026#34;cb92c1a5b-2691e-fb2f-457a-9c72a392d9ed\\\u0026#34;,\\\u0026#34;service\\\u0026#34;:\\\u0026#34;root[root]@[localhost]\\\u0026#34;,\\\u0026#34;statement\\\u0026#34;:\\\u0026#34;select sleep(2);\\\u0026#34;,\\\u0026#34;layer\\\u0026#34;:\\\u0026#34;MYSQL\\\u0026#34;,\\\u0026#34;query_time\\\u0026#34;:2000}\u0026#34; } }, \u0026#34;service\u0026#34;:\u0026#34;root[root]@[localhost]\u0026#34; } ]  statement  statement extracts the SQL statement from the parsed result, and set it into the DatabaseSlowStatement, which will be persisted (if not dropped) and is used to associate with TopNDatabaseStatement.\n latency  latency extracts the latency from the parsed result, and set it into the DatabaseSlowStatement, which will be persisted (if not dropped) and is used to associate with TopNDatabaseStatement.\n id  id extracts the id from the parsed result, and set it into the DatabaseSlowStatement, which will be persisted (if not dropped) and is used to associate with TopNDatabaseStatement.\nA Example of LAL to distinguish slow logs:\nfilter { json{ } extractor{ layer parsed.layer as String service parsed.service as String timestamp parsed.time as String if (tag(\u0026#34;LOG_KIND\u0026#34;) == \u0026#34;SLOW_SQL\u0026#34;) { slowSql { id parsed.id as String statement parsed.statement as String latency parsed.query_time as Long } } } }  sampledTrace  sampledTrace aims to convert LogData to SampledTrace Records. It extracts data from parsed result and save them as SampledTraceRecord. SampledTrace will not abort or edit logs, you can use other LAL for further processing. We require a log tag \u0026quot;LOG_KIND\u0026quot; = \u0026quot;NET_PROFILING_SAMPLED_TRACE\u0026quot; to make OAP distinguish slow trace logs from other log reports. An example of JSON sent to OAP is as following:\n[ { \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;LOG_KIND\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;NET_PROFILING_SAMPLED_TRACE\u0026#34; } ] }, \u0026#34;layer\u0026#34;:\u0026#34;MESH\u0026#34;, \u0026#34;body\u0026#34;:{ \u0026#34;json\u0026#34;:{ \u0026#34;json\u0026#34;:\u0026#34;{\\\u0026#34;uri\\\u0026#34;:\\\u0026#34;/provider\\\u0026#34;,\\\u0026#34;reason\\\u0026#34;:\\\u0026#34;slow\\\u0026#34;,\\\u0026#34;latency\\\u0026#34;:2048,\\\u0026#34;client_process\\\u0026#34;:{\\\u0026#34;process_id\\\u0026#34;:\\\u0026#34;c1519f4555ec11eda8df0242ac1d0002\\\u0026#34;,\\\u0026#34;local\\\u0026#34;:false,\\\u0026#34;address\\\u0026#34;:\\\u0026#34;\\\u0026#34;},\\\u0026#34;server_process\\\u0026#34;:{\\\u0026#34;process_id\\\u0026#34;:\\\u0026#34;\\\u0026#34;,\\\u0026#34;local\\\u0026#34;:false,\\\u0026#34;address\\\u0026#34;:\\\u0026#34;172.31.0.3:443\\\u0026#34;},\\\u0026#34;detect_point\\\u0026#34;:\\\u0026#34;client\\\u0026#34;,\\\u0026#34;component\\\u0026#34;:\\\u0026#34;http\\\u0026#34;,\\\u0026#34;ssl\\\u0026#34;:true}\u0026#34; } }, \u0026#34;service\u0026#34;:\u0026#34;test-service\u0026#34;, \u0026#34;serviceInstance\u0026#34;:\u0026#34;test-service-instance\u0026#34;, \u0026#34;timestamp\u0026#34;: 1666916962406, } ] Examples are as follows:\nfilter { json { } if (tag(\u0026#34;LOG_KIND\u0026#34;) == \u0026#34;NET_PROFILING_SAMPLED_TRACE\u0026#34;) { sampledTrace { latency parsed.latency as Long uri parsed.uri as String reason parsed.reason as String if (parsed.client_process.process_id as String != \u0026#34;\u0026#34;) { processId parsed.client_process.process_id as String } else if (parsed.client_process.local as Boolean) { processId ProcessRegistry.generateVirtualLocalProcess(parsed.service as String, parsed.serviceInstance as String) as String } else { processId ProcessRegistry.generateVirtualRemoteProcess(parsed.service as String, parsed.serviceInstance as String, parsed.client_process.address as String) as String } if (parsed.server_process.process_id as String != \u0026#34;\u0026#34;) { destProcessId parsed.server_process.process_id as String } else if (parsed.server_process.local as Boolean) { destProcessId ProcessRegistry.generateVirtualLocalProcess(parsed.service as String, parsed.serviceInstance as String) as String } else { destProcessId ProcessRegistry.generateVirtualRemoteProcess(parsed.service as String, parsed.serviceInstance as String, parsed.server_process.address as String) as String } detectPoint parsed.detect_point as String if (parsed.component as String == \u0026#34;http\u0026#34; \u0026amp;\u0026amp; parsed.ssl as Boolean) { componentId 129 } else if (parsed.component as String == \u0026#34;http\u0026#34;) { componentId 49 } else if (parsed.ssl as Boolean) { componentId 130 } else { componentId 110 } } } } Sink Sinks are the persistent layer of the LAL. By default, all the logs of each filter are persisted into the storage. However, some mechanisms allow you to selectively save some logs, or even drop all the logs after you\u0026rsquo;ve extracted useful information, such as metrics.\nSampler Sampler allows you to save the logs in a sampling manner. Currently, the following sampling strategies are supported:\n rateLimit: samples n logs at a maximum rate of 1 minute. rateLimit(\u0026quot;SamplerID\u0026quot;) requires an ID for the sampler. Sampler declarations with the same ID share the same sampler instance, thus sharing the same rpm and resetting logic. possibility: every piece of log has a pseudo possibility of percentage to be sampled, the possibility was generated by Java random number generator and compare to the given percentage option.  We welcome contributions on more sampling strategies. If multiple samplers are specified, the last one determines the final sampling result. See examples in Enforcer.\nExamples 1, rateLimit:\nfilter { // ... parser  sink { sampler { if (parsed.service == \u0026#34;ImportantApp\u0026#34;) { rateLimit(\u0026#34;ImportantAppSampler\u0026#34;) { rpm 1800 // samples 1800 pieces of logs every minute for service \u0026#34;ImportantApp\u0026#34;  } } else { rateLimit(\u0026#34;OtherSampler\u0026#34;) { rpm 180 // samples 180 pieces of logs every minute for other services than \u0026#34;ImportantApp\u0026#34;  } } } } } Examples 2, possibility:\nfilter { // ... parser  sink { sampler { if (parsed.service == \u0026#34;ImportantApp\u0026#34;) { possibility(80) { // samples 80% of the logs for service \u0026#34;ImportantApp\u0026#34;  } } else { possibility(30) { // samples 30% of the logs for other services than \u0026#34;ImportantApp\u0026#34;  } } } } } Dropper Dropper is a special sink, meaning that all logs are dropped without any exception. This is useful when you want to drop debugging logs.\nfilter { // ... parser  sink { if (parsed.level == \u0026#34;DEBUG\u0026#34;) { dropper {} } else { sampler { // ... configs  } } } } Or if you have multiple filters, some of which are for extracting metrics, only one of them has to be persisted.\nfilter { // filter A: this is for persistence  // ... parser  sink { sampler { // .. sampler configs  } } } filter { // filter B:  // ... extractors to generate many metrics  extractors { metrics { // ... metrics  } } sink { dropper {} // drop all logs because they have been saved in \u0026#34;filter A\u0026#34; above.  } } Enforcer Enforcer is another special sink that forcibly samples the log. A typical use case of enforcer is when you have configured a sampler and want to save some logs forcibly, such as to save error logs even if the sampling mechanism has been configured.\nfilter { // ... parser  sink { sampler { // ... sampler configs  } if (parsed.level == \u0026#34;ERROR\u0026#34; || parsed.userId == \u0026#34;TestingUserId\u0026#34;) { // sample error logs or testing users\u0026#39; logs (userId == \u0026#34;TestingUserId\u0026#34;) even if the sampling strategy is configured  enforcer { } } } } ","title":"Log Analysis Language","url":"/docs/main/next/en/concepts-and-designs/lal/"},{"content":"Log Analysis Language Log Analysis Language (LAL) in SkyWalking is essentially a Domain-Specific Language (DSL) to analyze logs. You can use LAL to parse, extract, and save the logs, as well as collaborate the logs with traces (by extracting the trace ID, segment ID and span ID) and metrics (by generating metrics from the logs and sending them to the meter system).\nThe LAL config files are in YAML format, and are located under directory lal. You can set log-analyzer/default/lalFiles in the application.yml file or set environment variable SW_LOG_LAL_FILES to activate specific LAL config files.\nFilter A filter is a group of parser, extractor and sink. Users can use one or more filters to organize their processing logic. Every piece of log will be sent to all filters in an LAL rule. A piece of log sent to the filter is available as property log in the LAL, therefore you can access the log service name via log.service. For all available fields of log, please refer to the protocol definition.\nAll components are executed sequentially in the orders they are declared.\nGlobal Functions Globally available functions may be used them in all components (i.e. parsers, extractors, and sinks) where necessary.\n abort  By default, all components declared are executed no matter what flags (dropped, saved, etc.) have been set. There are cases where you may want the filter chain to stop earlier when specified conditions are met. abort function aborts the remaining filter chain from where it\u0026rsquo;s declared, and all the remaining components won\u0026rsquo;t be executed at all. abort function serves as a fast-fail mechanism in LAL.\nfilter { if (log.service == \u0026#34;TestingService\u0026#34;) { // Don\u0026#39;t waste resources on TestingServices  abort {} // all remaining components won\u0026#39;t be executed at all  } // ... parsers, extractors, sinks } Note that when you put regexp in an if statement, you need to surround the expression with () like regexp(\u0026lt;the expression\u0026gt;), instead of regexp \u0026lt;the expression\u0026gt;.\nParser Parsers are responsible for parsing the raw logs into structured data in SkyWalking for further processing. There are 3 types of parsers at the moment, namely json, yaml, and text.\nWhen a piece of log is parsed, there is a corresponding property available, called parsed, injected by LAL. Property parsed is typically a map, containing all the fields parsed from the raw logs. For example, if the parser is json / yaml, parsed is a map containing all the key-values in the json / yaml; if the parser is text , parsed is a map containing all the captured groups and their values (for regexp and grok).\nAll parsers share the following options:\n   Option Type Description Default Value     abortOnFailure boolean Whether the filter chain should abort if the parser failed to parse / match the logs true    See examples below.\njson filter { json { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  } } yaml filter { yaml { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  } } text For unstructured logs, there are some text parsers for use.\n regexp  regexp parser uses a regular expression (regexp) to parse the logs. It leverages the captured groups of the regexp, all the captured groups can be used later in the extractors or sinks. regexp returns a boolean indicating whether the log matches the pattern or not.\nfilter { text { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  // this is just a demo pattern  regexp \u0026#34;(?\u0026lt;timestamp\u0026gt;\\\\d{8}) (?\u0026lt;thread\u0026gt;\\\\w+) (?\u0026lt;level\u0026gt;\\\\w+) (?\u0026lt;traceId\u0026gt;\\\\w+) (?\u0026lt;msg\u0026gt;.+)\u0026#34; } extractor { tag level: parsed.level // we add a tag called `level` and its value is parsed.level, captured from the regexp above  traceId parsed.traceId // we also extract the trace id from the parsed result, which will be used to associate the log with the trace  } // ... }  grok (TODO)  We\u0026rsquo;re aware of certains performance issues in the grok Java library, and so we\u0026rsquo;re currently conducting investigations and benchmarking. Contributions are welcome.\nExtractor Extractors aim to extract metadata from the logs. The metadata can be a service name, a service instance name, an endpoint name, or even a trace ID, all of which can be associated with the existing traces and metrics.\n service  service extracts the service name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n instance  instance extracts the service instance name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n endpoint  endpoint extracts the service instance name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n traceId  traceId extracts the trace ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n segmentId  segmentId extracts the segment ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n spanId  spanId extracts the span ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n timestamp  timestamp extracts the timestamp from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\nThe unit of timestamp is millisecond.\n layer  layer extracts the layer from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with service / instance.\n tag  tag extracts the tags from the parsed result, and set them into the LogData. The form of this extractor should look something like this: tag key1: value, key2: value2. You may use the properties of parsed as both keys and values.\nimport javax.swing.text.LayeredHighlighter filter { // ... parser  extractor { tag level: parsed.level, (parsed.statusCode): parsed.statusMsg tag anotherKey: \u0026#34;anotherConstantValue\u0026#34; layer \u0026#39;GENERAL\u0026#39; } }  metrics  metrics extracts / generates metrics from the logs, and sends the generated metrics to the meter system. You may configure MAL for further analysis of these metrics. The dedicated MAL config files are under directory log-mal-rules, and you can set log-analyzer/default/malFiles to enable configured files.\n# application.yml# ...log-analyzer:selector:${SW_LOG_ANALYZER:default}default:lalFiles:${SW_LOG_LAL_FILES:my-lal-config}# files are under \u0026#34;lal\u0026#34; directorymalFiles:${SW_LOG_MAL_FILES:my-lal-mal-config,another-lal-mal-config}# files are under \u0026#34;log-mal-rules\u0026#34; directoryExamples are as follows:\nfilter { // ...  extractor { service parsed.serviceName metrics { name \u0026#34;log_count\u0026#34; timestamp parsed.timestamp labels level: parsed.level, service: parsed.service, instance: parsed.instance value 1 } metrics { name \u0026#34;http_response_time\u0026#34; timestamp parsed.timestamp labels status_code: parsed.statusCode, service: parsed.service, instance: parsed.instance value parsed.duration } } // ... } The extractor above generates a metrics named log_count, with tag key level and value 1. After that, you can configure MAL rules to calculate the log count grouping by logging level like this:\n# ... other configurations of MALmetrics:- name:log_count_debugexp:log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;DEBUG\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT1M\u0026#39;)- name:log_count_errorexp:log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;ERROR\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT1M\u0026#39;)The other metrics generated is http_response_time, so you can configure MAL rules to generate more useful metrics like percentiles.\n# ... other configurations of MALmetrics:- name:response_time_percentileexp:http_response_time.sum([\u0026#39;le\u0026#39;, \u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT5M\u0026#39;).histogram().histogram_percentile([50,70,90,99])Sink Sinks are the persistent layer of the LAL. By default, all the logs of each filter are persisted into the storage. However, some mechanisms allow you to selectively save some logs, or even drop all the logs after you\u0026rsquo;ve extracted useful information, such as metrics.\nSampler Sampler allows you to save the logs in a sampling manner. Currently, the following sampling strategies are supported:\n rateLimit: samples n logs at a maximum rate of 1 minute. rateLimit(\u0026quot;SamplerID\u0026quot;) requires an ID for the sampler. Sampler declarations with the same ID share the same sampler instance, thus sharing the same rpm and resetting logic. possibility: every piece of log has a pseudo possibility of percentage to be sampled, the possibility was generated by Java random number generator and compare to the given percentage option.  We welcome contributions on more sampling strategies. If multiple samplers are specified, the last one determines the final sampling result. See examples in Enforcer.\nExamples 1, rateLimit:\nfilter { // ... parser  sink { sampler { if (parsed.service == \u0026#34;ImportantApp\u0026#34;) { rateLimit(\u0026#34;ImportantAppSampler\u0026#34;) { rpm 1800 // samples 1800 pieces of logs every minute for service \u0026#34;ImportantApp\u0026#34;  } } else { rateLimit(\u0026#34;OtherSampler\u0026#34;) { rpm 180 // samples 180 pieces of logs every minute for other services than \u0026#34;ImportantApp\u0026#34;  } } } } } Examples 2, possibility:\nfilter { // ... parser  sink { sampler { if (parsed.service == \u0026#34;ImportantApp\u0026#34;) { possibility(80) { // samples 80% of the logs for service \u0026#34;ImportantApp\u0026#34;  } } else { possibility(30) { // samples 30% of the logs for other services than \u0026#34;ImportantApp\u0026#34;  } } } } } Dropper Dropper is a special sink, meaning that all logs are dropped without any exception. This is useful when you want to drop debugging logs.\nfilter { // ... parser  sink { if (parsed.level == \u0026#34;DEBUG\u0026#34;) { dropper {} } else { sampler { // ... configs  } } } } Or if you have multiple filters, some of which are for extracting metrics, only one of them has to be persisted.\nfilter { // filter A: this is for persistence  // ... parser  sink { sampler { // .. sampler configs  } } } filter { // filter B:  // ... extractors to generate many metrics  extractors { metrics { // ... metrics  } } sink { dropper {} // drop all logs because they have been saved in \u0026#34;filter A\u0026#34; above.  } } Enforcer Enforcer is another special sink that forcibly samples the log. A typical use case of enforcer is when you have configured a sampler and want to save some logs forcibly, such as to save error logs even if the sampling mechanism has been configured.\nfilter { // ... parser  sink { sampler { // ... sampler configs  } if (parserd.level == \u0026#34;ERROR\u0026#34; || parsed.userId == \u0026#34;TestingUserId\u0026#34;) { // sample error logs or testing users\u0026#39; logs (userId == \u0026#34;TestingUserId\u0026#34;) even if the sampling strategy is configured  enforcer { } } } } ","title":"Log Analysis Language","url":"/docs/main/v9.0.0/en/concepts-and-designs/lal/"},{"content":"Log Analysis Language Log Analysis Language (LAL) in SkyWalking is essentially a Domain-Specific Language (DSL) to analyze logs. You can use LAL to parse, extract, and save the logs, as well as collaborate the logs with traces (by extracting the trace ID, segment ID and span ID) and metrics (by generating metrics from the logs and sending them to the meter system).\nThe LAL config files are in YAML format, and are located under directory lal. You can set log-analyzer/default/lalFiles in the application.yml file or set environment variable SW_LOG_LAL_FILES to activate specific LAL config files.\nFilter A filter is a group of parser, extractor and sink. Users can use one or more filters to organize their processing logic. Every piece of log will be sent to all filters in an LAL rule. A piece of log sent to the filter is available as property log in the LAL, therefore you can access the log service name via log.service. For all available fields of log, please refer to the protocol definition.\nAll components are executed sequentially in the orders they are declared.\nGlobal Functions Globally available functions may be used them in all components (i.e. parsers, extractors, and sinks) where necessary.\n abort  By default, all components declared are executed no matter what flags (dropped, saved, etc.) have been set. There are cases where you may want the filter chain to stop earlier when specified conditions are met. abort function aborts the remaining filter chain from where it\u0026rsquo;s declared, and all the remaining components won\u0026rsquo;t be executed at all. abort function serves as a fast-fail mechanism in LAL.\nfilter { if (log.service == \u0026#34;TestingService\u0026#34;) { // Don\u0026#39;t waste resources on TestingServices  abort {} // all remaining components won\u0026#39;t be executed at all  } // ... parsers, extractors, sinks } Note that when you put regexp in an if statement, you need to surround the expression with () like regexp(\u0026lt;the expression\u0026gt;), instead of regexp \u0026lt;the expression\u0026gt;.\nParser Parsers are responsible for parsing the raw logs into structured data in SkyWalking for further processing. There are 3 types of parsers at the moment, namely json, yaml, and text.\nWhen a piece of log is parsed, there is a corresponding property available, called parsed, injected by LAL. Property parsed is typically a map, containing all the fields parsed from the raw logs. For example, if the parser is json / yaml, parsed is a map containing all the key-values in the json / yaml; if the parser is text , parsed is a map containing all the captured groups and their values (for regexp and grok).\nAll parsers share the following options:\n   Option Type Description Default Value     abortOnFailure boolean Whether the filter chain should abort if the parser failed to parse / match the logs true    See examples below.\njson filter { json { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  } } yaml filter { yaml { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  } } text For unstructured logs, there are some text parsers for use.\n regexp  regexp parser uses a regular expression (regexp) to parse the logs. It leverages the captured groups of the regexp, all the captured groups can be used later in the extractors or sinks. regexp returns a boolean indicating whether the log matches the pattern or not.\nfilter { text { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  // this is just a demo pattern  regexp \u0026#34;(?\u0026lt;timestamp\u0026gt;\\\\d{8}) (?\u0026lt;thread\u0026gt;\\\\w+) (?\u0026lt;level\u0026gt;\\\\w+) (?\u0026lt;traceId\u0026gt;\\\\w+) (?\u0026lt;msg\u0026gt;.+)\u0026#34; } extractor { tag level: parsed.level // we add a tag called `level` and its value is parsed.level, captured from the regexp above  traceId parsed.traceId // we also extract the trace id from the parsed result, which will be used to associate the log with the trace  } // ... }  grok (TODO)  We\u0026rsquo;re aware of certains performance issues in the grok Java library, and so we\u0026rsquo;re currently conducting investigations and benchmarking. Contributions are welcome.\nExtractor Extractors aim to extract metadata from the logs. The metadata can be a service name, a service instance name, an endpoint name, or even a trace ID, all of which can be associated with the existing traces and metrics.\n service  service extracts the service name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n instance  instance extracts the service instance name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n endpoint  endpoint extracts the service instance name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n traceId  traceId extracts the trace ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n segmentId  segmentId extracts the segment ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n spanId  spanId extracts the span ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n timestamp  timestamp extracts the timestamp from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\nThe unit of timestamp is millisecond.\n layer  layer extracts the layer from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with service.\n tag  tag extracts the tags from the parsed result, and set them into the LogData. The form of this extractor should look something like this: tag key1: value, key2: value2. You may use the properties of parsed as both keys and values.\nimport javax.swing.text.LayeredHighlighter filter { // ... parser  extractor { tag level: parsed.level, (parsed.statusCode): parsed.statusMsg tag anotherKey: \u0026#34;anotherConstantValue\u0026#34; layer \u0026#39;GENERAL\u0026#39; } }  metrics  metrics extracts / generates metrics from the logs, and sends the generated metrics to the meter system. You may configure MAL for further analysis of these metrics. The dedicated MAL config files are under directory log-mal-rules, and you can set log-analyzer/default/malFiles to enable configured files.\n# application.yml# ...log-analyzer:selector:${SW_LOG_ANALYZER:default}default:lalFiles:${SW_LOG_LAL_FILES:my-lal-config}# files are under \u0026#34;lal\u0026#34; directorymalFiles:${SW_LOG_MAL_FILES:my-lal-mal-config,another-lal-mal-config}# files are under \u0026#34;log-mal-rules\u0026#34; directoryExamples are as follows:\nfilter { // ...  extractor { service parsed.serviceName metrics { name \u0026#34;log_count\u0026#34; timestamp parsed.timestamp labels level: parsed.level, service: parsed.service, instance: parsed.instance value 1 } metrics { name \u0026#34;http_response_time\u0026#34; timestamp parsed.timestamp labels status_code: parsed.statusCode, service: parsed.service, instance: parsed.instance value parsed.duration } } // ... } The extractor above generates a metrics named log_count, with tag key level and value 1. After that, you can configure MAL rules to calculate the log count grouping by logging level like this:\n# ... other configurations of MALmetrics:- name:log_count_debugexp:log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;DEBUG\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT1M\u0026#39;)- name:log_count_errorexp:log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;ERROR\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT1M\u0026#39;)The other metrics generated is http_response_time, so you can configure MAL rules to generate more useful metrics like percentiles.\n# ... other configurations of MALmetrics:- name:response_time_percentileexp:http_response_time.sum([\u0026#39;le\u0026#39;, \u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT5M\u0026#39;).histogram().histogram_percentile([50,70,90,99])Sink Sinks are the persistent layer of the LAL. By default, all the logs of each filter are persisted into the storage. However, some mechanisms allow you to selectively save some logs, or even drop all the logs after you\u0026rsquo;ve extracted useful information, such as metrics.\nSampler Sampler allows you to save the logs in a sampling manner. Currently, the following sampling strategies are supported:\n rateLimit: samples n logs at a maximum rate of 1 minute. rateLimit(\u0026quot;SamplerID\u0026quot;) requires an ID for the sampler. Sampler declarations with the same ID share the same sampler instance, thus sharing the same rpm and resetting logic. possibility: every piece of log has a pseudo possibility of percentage to be sampled, the possibility was generated by Java random number generator and compare to the given percentage option.  We welcome contributions on more sampling strategies. If multiple samplers are specified, the last one determines the final sampling result. See examples in Enforcer.\nExamples 1, rateLimit:\nfilter { // ... parser  sink { sampler { if (parsed.service == \u0026#34;ImportantApp\u0026#34;) { rateLimit(\u0026#34;ImportantAppSampler\u0026#34;) { rpm 1800 // samples 1800 pieces of logs every minute for service \u0026#34;ImportantApp\u0026#34;  } } else { rateLimit(\u0026#34;OtherSampler\u0026#34;) { rpm 180 // samples 180 pieces of logs every minute for other services than \u0026#34;ImportantApp\u0026#34;  } } } } } Examples 2, possibility:\nfilter { // ... parser  sink { sampler { if (parsed.service == \u0026#34;ImportantApp\u0026#34;) { possibility(80) { // samples 80% of the logs for service \u0026#34;ImportantApp\u0026#34;  } } else { possibility(30) { // samples 30% of the logs for other services than \u0026#34;ImportantApp\u0026#34;  } } } } } Dropper Dropper is a special sink, meaning that all logs are dropped without any exception. This is useful when you want to drop debugging logs.\nfilter { // ... parser  sink { if (parsed.level == \u0026#34;DEBUG\u0026#34;) { dropper {} } else { sampler { // ... configs  } } } } Or if you have multiple filters, some of which are for extracting metrics, only one of them has to be persisted.\nfilter { // filter A: this is for persistence  // ... parser  sink { sampler { // .. sampler configs  } } } filter { // filter B:  // ... extractors to generate many metrics  extractors { metrics { // ... metrics  } } sink { dropper {} // drop all logs because they have been saved in \u0026#34;filter A\u0026#34; above.  } } Enforcer Enforcer is another special sink that forcibly samples the log. A typical use case of enforcer is when you have configured a sampler and want to save some logs forcibly, such as to save error logs even if the sampling mechanism has been configured.\nfilter { // ... parser  sink { sampler { // ... sampler configs  } if (parserd.level == \u0026#34;ERROR\u0026#34; || parsed.userId == \u0026#34;TestingUserId\u0026#34;) { // sample error logs or testing users\u0026#39; logs (userId == \u0026#34;TestingUserId\u0026#34;) even if the sampling strategy is configured  enforcer { } } } } ","title":"Log Analysis Language","url":"/docs/main/v9.1.0/en/concepts-and-designs/lal/"},{"content":"Log Analysis Language Log Analysis Language (LAL) in SkyWalking is essentially a Domain-Specific Language (DSL) to analyze logs. You can use LAL to parse, extract, and save the logs, as well as collaborate the logs with traces (by extracting the trace ID, segment ID and span ID) and metrics (by generating metrics from the logs and sending them to the meter system).\nThe LAL config files are in YAML format, and are located under directory lal. You can set log-analyzer/default/lalFiles in the application.yml file or set environment variable SW_LOG_LAL_FILES to activate specific LAL config files.\nFilter A filter is a group of parser, extractor and sink. Users can use one or more filters to organize their processing logic. Every piece of log will be sent to all filters in an LAL rule. A piece of log sent to the filter is available as property log in the LAL, therefore you can access the log service name via log.service. For all available fields of log, please refer to the protocol definition.\nAll components are executed sequentially in the orders they are declared.\nGlobal Functions Globally available functions may be used them in all components (i.e. parsers, extractors, and sinks) where necessary.\n abort  By default, all components declared are executed no matter what flags (dropped, saved, etc.) have been set. There are cases where you may want the filter chain to stop earlier when specified conditions are met. abort function aborts the remaining filter chain from where it\u0026rsquo;s declared, and all the remaining components won\u0026rsquo;t be executed at all. abort function serves as a fast-fail mechanism in LAL.\nfilter { if (log.service == \u0026#34;TestingService\u0026#34;) { // Don\u0026#39;t waste resources on TestingServices  abort {} // all remaining components won\u0026#39;t be executed at all  } // ... parsers, extractors, sinks } Note that when you put regexp in an if statement, you need to surround the expression with () like regexp(\u0026lt;the expression\u0026gt;), instead of regexp \u0026lt;the expression\u0026gt;.\nParser Parsers are responsible for parsing the raw logs into structured data in SkyWalking for further processing. There are 3 types of parsers at the moment, namely json, yaml, and text.\nWhen a piece of log is parsed, there is a corresponding property available, called parsed, injected by LAL. Property parsed is typically a map, containing all the fields parsed from the raw logs. For example, if the parser is json / yaml, parsed is a map containing all the key-values in the json / yaml; if the parser is text , parsed is a map containing all the captured groups and their values (for regexp and grok).\nAll parsers share the following options:\n   Option Type Description Default Value     abortOnFailure boolean Whether the filter chain should abort if the parser failed to parse / match the logs true    See examples below.\njson filter { json { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  } } yaml filter { yaml { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  } } text For unstructured logs, there are some text parsers for use.\n regexp  regexp parser uses a regular expression (regexp) to parse the logs. It leverages the captured groups of the regexp, all the captured groups can be used later in the extractors or sinks. regexp returns a boolean indicating whether the log matches the pattern or not.\nfilter { text { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  // this is just a demo pattern  regexp \u0026#34;(?\u0026lt;timestamp\u0026gt;\\\\d{8}) (?\u0026lt;thread\u0026gt;\\\\w+) (?\u0026lt;level\u0026gt;\\\\w+) (?\u0026lt;traceId\u0026gt;\\\\w+) (?\u0026lt;msg\u0026gt;.+)\u0026#34; } extractor { tag level: parsed.level // we add a tag called `level` and its value is parsed.level, captured from the regexp above  traceId parsed.traceId // we also extract the trace id from the parsed result, which will be used to associate the log with the trace  } // ... }  grok (TODO)  We\u0026rsquo;re aware of certain performance issues in the grok Java library, and so we\u0026rsquo;re currently conducting investigations and benchmarking. Contributions are welcome.\nExtractor Extractors aim to extract metadata from the logs. The metadata can be a service name, a service instance name, an endpoint name, or even a trace ID, all of which can be associated with the existing traces and metrics.\n service  service extracts the service name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n instance  instance extracts the service instance name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n endpoint  endpoint extracts the service instance name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n traceId  traceId extracts the trace ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n segmentId  segmentId extracts the segment ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n spanId  spanId extracts the span ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n timestamp  timestamp extracts the timestamp from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\nThe unit of timestamp is millisecond.\n layer  layer extracts the layer from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with service.\n tag  tag extracts the tags from the parsed result, and set them into the LogData. The form of this extractor should look something like this: tag key1: value, key2: value2. You may use the properties of parsed as both keys and values.\nimport javax.swing.text.LayeredHighlighter filter { // ... parser  extractor { tag level: parsed.level, (parsed.statusCode): parsed.statusMsg tag anotherKey: \u0026#34;anotherConstantValue\u0026#34; layer \u0026#39;GENERAL\u0026#39; } }  metrics  metrics extracts / generates metrics from the logs, and sends the generated metrics to the meter system. You may configure MAL for further analysis of these metrics. The dedicated MAL config files are under directory log-mal-rules, and you can set log-analyzer/default/malFiles to enable configured files.\n# application.yml# ...log-analyzer:selector:${SW_LOG_ANALYZER:default}default:lalFiles:${SW_LOG_LAL_FILES:my-lal-config}# files are under \u0026#34;lal\u0026#34; directorymalFiles:${SW_LOG_MAL_FILES:my-lal-mal-config,another-lal-mal-config}# files are under \u0026#34;log-mal-rules\u0026#34; directoryExamples are as follows:\nfilter { // ...  extractor { service parsed.serviceName metrics { name \u0026#34;log_count\u0026#34; timestamp parsed.timestamp labels level: parsed.level, service: parsed.service, instance: parsed.instance value 1 } metrics { name \u0026#34;http_response_time\u0026#34; timestamp parsed.timestamp labels status_code: parsed.statusCode, service: parsed.service, instance: parsed.instance value parsed.duration } } // ... } The extractor above generates a metrics named log_count, with tag key level and value 1. After that, you can configure MAL rules to calculate the log count grouping by logging level like this:\n# ... other configurations of MALmetrics:- name:log_count_debugexp:log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;DEBUG\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT1M\u0026#39;)- name:log_count_errorexp:log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;ERROR\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT1M\u0026#39;)The other metrics generated is http_response_time, so you can configure MAL rules to generate more useful metrics like percentiles.\n# ... other configurations of MALmetrics:- name:response_time_percentileexp:http_response_time.sum([\u0026#39;le\u0026#39;, \u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT5M\u0026#39;).histogram().histogram_percentile([50,70,90,99])Sink Sinks are the persistent layer of the LAL. By default, all the logs of each filter are persisted into the storage. However, some mechanisms allow you to selectively save some logs, or even drop all the logs after you\u0026rsquo;ve extracted useful information, such as metrics.\nSampler Sampler allows you to save the logs in a sampling manner. Currently, the following sampling strategies are supported:\n rateLimit: samples n logs at a maximum rate of 1 minute. rateLimit(\u0026quot;SamplerID\u0026quot;) requires an ID for the sampler. Sampler declarations with the same ID share the same sampler instance, thus sharing the same rpm and resetting logic. possibility: every piece of log has a pseudo possibility of percentage to be sampled, the possibility was generated by Java random number generator and compare to the given percentage option.  We welcome contributions on more sampling strategies. If multiple samplers are specified, the last one determines the final sampling result. See examples in Enforcer.\nExamples 1, rateLimit:\nfilter { // ... parser  sink { sampler { if (parsed.service == \u0026#34;ImportantApp\u0026#34;) { rateLimit(\u0026#34;ImportantAppSampler\u0026#34;) { rpm 1800 // samples 1800 pieces of logs every minute for service \u0026#34;ImportantApp\u0026#34;  } } else { rateLimit(\u0026#34;OtherSampler\u0026#34;) { rpm 180 // samples 180 pieces of logs every minute for other services than \u0026#34;ImportantApp\u0026#34;  } } } } } Examples 2, possibility:\nfilter { // ... parser  sink { sampler { if (parsed.service == \u0026#34;ImportantApp\u0026#34;) { possibility(80) { // samples 80% of the logs for service \u0026#34;ImportantApp\u0026#34;  } } else { possibility(30) { // samples 30% of the logs for other services than \u0026#34;ImportantApp\u0026#34;  } } } } } Dropper Dropper is a special sink, meaning that all logs are dropped without any exception. This is useful when you want to drop debugging logs.\nfilter { // ... parser  sink { if (parsed.level == \u0026#34;DEBUG\u0026#34;) { dropper {} } else { sampler { // ... configs  } } } } Or if you have multiple filters, some of which are for extracting metrics, only one of them has to be persisted.\nfilter { // filter A: this is for persistence  // ... parser  sink { sampler { // .. sampler configs  } } } filter { // filter B:  // ... extractors to generate many metrics  extractors { metrics { // ... metrics  } } sink { dropper {} // drop all logs because they have been saved in \u0026#34;filter A\u0026#34; above.  } } Enforcer Enforcer is another special sink that forcibly samples the log. A typical use case of enforcer is when you have configured a sampler and want to save some logs forcibly, such as to save error logs even if the sampling mechanism has been configured.\nfilter { // ... parser  sink { sampler { // ... sampler configs  } if (parsed.level == \u0026#34;ERROR\u0026#34; || parsed.userId == \u0026#34;TestingUserId\u0026#34;) { // sample error logs or testing users\u0026#39; logs (userId == \u0026#34;TestingUserId\u0026#34;) even if the sampling strategy is configured  enforcer { } } } } ","title":"Log Analysis Language","url":"/docs/main/v9.2.0/en/concepts-and-designs/lal/"},{"content":"Log Analysis Language Log Analysis Language (LAL) in SkyWalking is essentially a Domain-Specific Language (DSL) to analyze logs. You can use LAL to parse, extract, and save the logs, as well as collaborate the logs with traces (by extracting the trace ID, segment ID and span ID) and metrics (by generating metrics from the logs and sending them to the meter system).\nThe LAL config files are in YAML format, and are located under directory lal. You can set log-analyzer/default/lalFiles in the application.yml file or set environment variable SW_LOG_LAL_FILES to activate specific LAL config files.\nLayer Layer should be declared in the LAL script to represent the analysis scope of the logs.\nFilter A filter is a group of parser, extractor and sink. Users can use one or more filters to organize their processing logic. Every piece of log will be sent to all filters in an LAL rule. A piece of log sent to the filter is available as property log in the LAL, therefore you can access the log service name via log.service. For all available fields of log, please refer to the protocol definition.\nAll components are executed sequentially in the orders they are declared.\nGlobal Functions Globally available functions may be used them in all components (i.e. parsers, extractors, and sinks) where necessary.\n abort  By default, all components declared are executed no matter what flags (dropped, saved, etc.) have been set. There are cases where you may want the filter chain to stop earlier when specified conditions are met. abort function aborts the remaining filter chain from where it\u0026rsquo;s declared, and all the remaining components won\u0026rsquo;t be executed at all. abort function serves as a fast-fail mechanism in LAL.\nfilter { if (log.service == \u0026#34;TestingService\u0026#34;) { // Don\u0026#39;t waste resources on TestingServices  abort {} // all remaining components won\u0026#39;t be executed at all  } // ... parsers, extractors, sinks } Note that when you put regexp in an if statement, you need to surround the expression with () like regexp(\u0026lt;the expression\u0026gt;), instead of regexp \u0026lt;the expression\u0026gt;.\n tag  tag function provide a convenient way to get the value of a tag key.\nWe can add tags like following:\n[ { \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;TEST_KEY\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;TEST_VALUE\u0026#34; } ] }, \u0026#34;body\u0026#34;:{ ... } ... } ] And we can use this method to get the value of the tag key TEST_KEY.\nfilter { if (tag(\u0026#34;TEST_KEY\u0026#34;) == \u0026#34;TEST_VALUE\u0026#34;) { ... } } Parser Parsers are responsible for parsing the raw logs into structured data in SkyWalking for further processing. There are 3 types of parsers at the moment, namely json, yaml, and text.\nWhen a piece of log is parsed, there is a corresponding property available, called parsed, injected by LAL. Property parsed is typically a map, containing all the fields parsed from the raw logs. For example, if the parser is json / yaml, parsed is a map containing all the key-values in the json / yaml; if the parser is text , parsed is a map containing all the captured groups and their values (for regexp and grok).\nAll parsers share the following options:\n   Option Type Description Default Value     abortOnFailure boolean Whether the filter chain should abort if the parser failed to parse / match the logs true    See examples below.\njson filter { json { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  } } yaml filter { yaml { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  } } text For unstructured logs, there are some text parsers for use.\n regexp  regexp parser uses a regular expression (regexp) to parse the logs. It leverages the captured groups of the regexp, all the captured groups can be used later in the extractors or sinks. regexp returns a boolean indicating whether the log matches the pattern or not.\nfilter { text { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  // this is just a demo pattern  regexp \u0026#34;(?\u0026lt;timestamp\u0026gt;\\\\d{8}) (?\u0026lt;thread\u0026gt;\\\\w+) (?\u0026lt;level\u0026gt;\\\\w+) (?\u0026lt;traceId\u0026gt;\\\\w+) (?\u0026lt;msg\u0026gt;.+)\u0026#34; } extractor { tag level: parsed.level // we add a tag called `level` and its value is parsed.level, captured from the regexp above  traceId parsed.traceId // we also extract the trace id from the parsed result, which will be used to associate the log with the trace  } // ... }  grok (TODO)  We\u0026rsquo;re aware of certain performance issues in the grok Java library, and so we\u0026rsquo;re currently conducting investigations and benchmarking. Contributions are welcome.\nExtractor Extractors aim to extract metadata from the logs. The metadata can be a service name, a service instance name, an endpoint name, or even a trace ID, all of which can be associated with the existing traces and metrics.\n service  service extracts the service name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n instance  instance extracts the service instance name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n endpoint  endpoint extracts the service instance name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n traceId  traceId extracts the trace ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n segmentId  segmentId extracts the segment ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n spanId  spanId extracts the span ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n timestamp  timestamp extracts the timestamp from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\nThe unit of timestamp is millisecond.\n layer  layer extracts the layer from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with service.\n tag  tag extracts the tags from the parsed result, and set them into the LogData. The form of this extractor should look something like this: tag key1: value, key2: value2. You may use the properties of parsed as both keys and values.\nimport javax.swing.text.LayeredHighlighter filter { // ... parser  extractor { tag level: parsed.level, (parsed.statusCode): parsed.statusMsg tag anotherKey: \u0026#34;anotherConstantValue\u0026#34; layer \u0026#39;GENERAL\u0026#39; } }  metrics  metrics extracts / generates metrics from the logs, and sends the generated metrics to the meter system. You may configure MAL for further analysis of these metrics. The dedicated MAL config files are under directory log-mal-rules, and you can set log-analyzer/default/malFiles to enable configured files.\n# application.yml# ...log-analyzer:selector:${SW_LOG_ANALYZER:default}default:lalFiles:${SW_LOG_LAL_FILES:my-lal-config}# files are under \u0026#34;lal\u0026#34; directorymalFiles:${SW_LOG_MAL_FILES:my-lal-mal-config, folder1/another-lal-mal-config, folder2/*}# files are under \u0026#34;log-mal-rules\u0026#34; directoryExamples are as follows:\nfilter { // ...  extractor { service parsed.serviceName metrics { name \u0026#34;log_count\u0026#34; timestamp parsed.timestamp labels level: parsed.level, service: parsed.service, instance: parsed.instance value 1 } metrics { name \u0026#34;http_response_time\u0026#34; timestamp parsed.timestamp labels status_code: parsed.statusCode, service: parsed.service, instance: parsed.instance value parsed.duration } } // ... } The extractor above generates a metrics named log_count, with tag key level and value 1. After that, you can configure MAL rules to calculate the log count grouping by logging level like this:\n# ... other configurations of MALmetrics:- name:log_count_debugexp:log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;DEBUG\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT1M\u0026#39;)- name:log_count_errorexp:log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;ERROR\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT1M\u0026#39;)The other metrics generated is http_response_time, so you can configure MAL rules to generate more useful metrics like percentiles.\n# ... other configurations of MALmetrics:- name:response_time_percentileexp:http_response_time.sum([\u0026#39;le\u0026#39;, \u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT5M\u0026#39;).histogram().histogram_percentile([50,70,90,99]) slowSql  slowSql aims to convert LogData to DatabaseSlowStatement. It extracts data from parsed result and save them as DatabaseSlowStatement. SlowSql will not abort or edit logs, you can use other LAL for further processing. SlowSql will reuse service, layer and timestamp of extractor, so it is necessary to use SlowSQL after setting these. We require a log tag \u0026quot;LOG_KIND\u0026quot; = \u0026quot;SLOW_SQL\u0026quot; to make OAP distinguish slow SQL logs from other log reports.\nNote, slow SQL sampling would only flag this SQL in the candidate list. The OAP server would run statistic per service and only persistent the top 50 every 10(controlled by topNReportPeriod: ${SW_CORE_TOPN_REPORT_PERIOD:10}) minutes by default.\nAn example of JSON sent to OAP is as following:\n[ { \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;LOG_KIND\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;SLOW_SQL\u0026#34; } ] }, \u0026#34;layer\u0026#34;:\u0026#34;MYSQL\u0026#34;, \u0026#34;body\u0026#34;:{ \u0026#34;json\u0026#34;:{ \u0026#34;json\u0026#34;:\u0026#34;{\\\u0026#34;time\\\u0026#34;:\\\u0026#34;1663063011\\\u0026#34;,\\\u0026#34;id\\\u0026#34;:\\\u0026#34;cb92c1a5b-2691e-fb2f-457a-9c72a392d9ed\\\u0026#34;,\\\u0026#34;service\\\u0026#34;:\\\u0026#34;root[root]@[localhost]\\\u0026#34;,\\\u0026#34;statement\\\u0026#34;:\\\u0026#34;select sleep(2);\\\u0026#34;,\\\u0026#34;layer\\\u0026#34;:\\\u0026#34;MYSQL\\\u0026#34;,\\\u0026#34;query_time\\\u0026#34;:2000}\u0026#34; } }, \u0026#34;service\u0026#34;:\u0026#34;root[root]@[localhost]\u0026#34; } ]  statement  statement extracts the SQL statement from the parsed result, and set it into the DatabaseSlowStatement, which will be persisted (if not dropped) and is used to associate with TopNDatabaseStatement.\n latency  latency extracts the latency from the parsed result, and set it into the DatabaseSlowStatement, which will be persisted (if not dropped) and is used to associate with TopNDatabaseStatement.\n id  id extracts the id from the parsed result, and set it into the DatabaseSlowStatement, which will be persisted (if not dropped) and is used to associate with TopNDatabaseStatement.\nA Example of LAL to distinguish slow logs:\nfilter { json{ } extractor{ layer parsed.layer as String service parsed.service as String timestamp parsed.time as String if (tag(\u0026#34;LOG_KIND\u0026#34;) == \u0026#34;SLOW_SQL\u0026#34;) { slowSql { id parsed.id as String statement parsed.statement as String latency parsed.query_time as Long } } } }  sampledTrace  sampledTrace aims to convert LogData to SampledTrace Records. It extracts data from parsed result and save them as SampledTraceRecord. SampledTrace will not abort or edit logs, you can use other LAL for further processing. We require a log tag \u0026quot;LOG_KIND\u0026quot; = \u0026quot;NET_PROFILING_SAMPLED_TRACE\u0026quot; to make OAP distinguish slow trace logs from other log reports. An example of JSON sent to OAP is as following:\n[ { \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;LOG_KIND\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;NET_PROFILING_SAMPLED_TRACE\u0026#34; } ] }, \u0026#34;layer\u0026#34;:\u0026#34;MESH\u0026#34;, \u0026#34;body\u0026#34;:{ \u0026#34;json\u0026#34;:{ \u0026#34;json\u0026#34;:\u0026#34;{\\\u0026#34;uri\\\u0026#34;:\\\u0026#34;/provider\\\u0026#34;,\\\u0026#34;reason\\\u0026#34;:\\\u0026#34;slow\\\u0026#34;,\\\u0026#34;latency\\\u0026#34;:2048,\\\u0026#34;client_process\\\u0026#34;:{\\\u0026#34;process_id\\\u0026#34;:\\\u0026#34;c1519f4555ec11eda8df0242ac1d0002\\\u0026#34;,\\\u0026#34;local\\\u0026#34;:false,\\\u0026#34;address\\\u0026#34;:\\\u0026#34;\\\u0026#34;},\\\u0026#34;server_process\\\u0026#34;:{\\\u0026#34;process_id\\\u0026#34;:\\\u0026#34;\\\u0026#34;,\\\u0026#34;local\\\u0026#34;:false,\\\u0026#34;address\\\u0026#34;:\\\u0026#34;172.31.0.3:443\\\u0026#34;},\\\u0026#34;detect_point\\\u0026#34;:\\\u0026#34;client\\\u0026#34;,\\\u0026#34;component\\\u0026#34;:\\\u0026#34;http\\\u0026#34;,\\\u0026#34;ssl\\\u0026#34;:true}\u0026#34; } }, \u0026#34;service\u0026#34;:\u0026#34;test-service\u0026#34;, \u0026#34;serviceInstance\u0026#34;:\u0026#34;test-service-instance\u0026#34;, \u0026#34;timestamp\u0026#34;: 1666916962406, } ] Examples are as follows:\nfilter { json { } if (tag(\u0026#34;LOG_KIND\u0026#34;) == \u0026#34;NET_PROFILING_SAMPLED_TRACE\u0026#34;) { sampledTrace { latency parsed.latency as Long uri parsed.uri as String reason parsed.reason as String if (parsed.client_process.process_id as String != \u0026#34;\u0026#34;) { processId parsed.client_process.process_id as String } else if (parsed.client_process.local as Boolean) { processId ProcessRegistry.generateVirtualLocalProcess(parsed.service as String, parsed.serviceInstance as String) as String } else { processId ProcessRegistry.generateVirtualRemoteProcess(parsed.service as String, parsed.serviceInstance as String, parsed.client_process.address as String) as String } if (parsed.server_process.process_id as String != \u0026#34;\u0026#34;) { destProcessId parsed.server_process.process_id as String } else if (parsed.server_process.local as Boolean) { destProcessId ProcessRegistry.generateVirtualLocalProcess(parsed.service as String, parsed.serviceInstance as String) as String } else { destProcessId ProcessRegistry.generateVirtualRemoteProcess(parsed.service as String, parsed.serviceInstance as String, parsed.server_process.address as String) as String } detectPoint parsed.detect_point as String if (parsed.component as String == \u0026#34;http\u0026#34; \u0026amp;\u0026amp; parsed.ssl as Boolean) { componentId 129 } else if (parsed.component as String == \u0026#34;http\u0026#34;) { componentId 49 } else if (parsed.ssl as Boolean) { componentId 130 } else { componentId 110 } } } } Sink Sinks are the persistent layer of the LAL. By default, all the logs of each filter are persisted into the storage. However, some mechanisms allow you to selectively save some logs, or even drop all the logs after you\u0026rsquo;ve extracted useful information, such as metrics.\nSampler Sampler allows you to save the logs in a sampling manner. Currently, the following sampling strategies are supported:\n rateLimit: samples n logs at a maximum rate of 1 minute. rateLimit(\u0026quot;SamplerID\u0026quot;) requires an ID for the sampler. Sampler declarations with the same ID share the same sampler instance, thus sharing the same rpm and resetting logic. possibility: every piece of log has a pseudo possibility of percentage to be sampled, the possibility was generated by Java random number generator and compare to the given percentage option.  We welcome contributions on more sampling strategies. If multiple samplers are specified, the last one determines the final sampling result. See examples in Enforcer.\nExamples 1, rateLimit:\nfilter { // ... parser  sink { sampler { if (parsed.service == \u0026#34;ImportantApp\u0026#34;) { rateLimit(\u0026#34;ImportantAppSampler\u0026#34;) { rpm 1800 // samples 1800 pieces of logs every minute for service \u0026#34;ImportantApp\u0026#34;  } } else { rateLimit(\u0026#34;OtherSampler\u0026#34;) { rpm 180 // samples 180 pieces of logs every minute for other services than \u0026#34;ImportantApp\u0026#34;  } } } } } Examples 2, possibility:\nfilter { // ... parser  sink { sampler { if (parsed.service == \u0026#34;ImportantApp\u0026#34;) { possibility(80) { // samples 80% of the logs for service \u0026#34;ImportantApp\u0026#34;  } } else { possibility(30) { // samples 30% of the logs for other services than \u0026#34;ImportantApp\u0026#34;  } } } } } Dropper Dropper is a special sink, meaning that all logs are dropped without any exception. This is useful when you want to drop debugging logs.\nfilter { // ... parser  sink { if (parsed.level == \u0026#34;DEBUG\u0026#34;) { dropper {} } else { sampler { // ... configs  } } } } Or if you have multiple filters, some of which are for extracting metrics, only one of them has to be persisted.\nfilter { // filter A: this is for persistence  // ... parser  sink { sampler { // .. sampler configs  } } } filter { // filter B:  // ... extractors to generate many metrics  extractors { metrics { // ... metrics  } } sink { dropper {} // drop all logs because they have been saved in \u0026#34;filter A\u0026#34; above.  } } Enforcer Enforcer is another special sink that forcibly samples the log. A typical use case of enforcer is when you have configured a sampler and want to save some logs forcibly, such as to save error logs even if the sampling mechanism has been configured.\nfilter { // ... parser  sink { sampler { // ... sampler configs  } if (parsed.level == \u0026#34;ERROR\u0026#34; || parsed.userId == \u0026#34;TestingUserId\u0026#34;) { // sample error logs or testing users\u0026#39; logs (userId == \u0026#34;TestingUserId\u0026#34;) even if the sampling strategy is configured  enforcer { } } } } ","title":"Log Analysis Language","url":"/docs/main/v9.3.0/en/concepts-and-designs/lal/"},{"content":"Log Analysis Language Log Analysis Language (LAL) in SkyWalking is essentially a Domain-Specific Language (DSL) to analyze logs. You can use LAL to parse, extract, and save the logs, as well as collaborate the logs with traces (by extracting the trace ID, segment ID and span ID) and metrics (by generating metrics from the logs and sending them to the meter system).\nThe LAL config files are in YAML format, and are located under directory lal. You can set log-analyzer/default/lalFiles in the application.yml file or set environment variable SW_LOG_LAL_FILES to activate specific LAL config files.\nLayer Layer should be declared in the LAL script to represent the analysis scope of the logs.\nFilter A filter is a group of parser, extractor and sink. Users can use one or more filters to organize their processing logic. Every piece of log will be sent to all filters in an LAL rule. A piece of log sent to the filter is available as property log in the LAL, therefore you can access the log service name via log.service. For all available fields of log, please refer to the protocol definition.\nAll components are executed sequentially in the orders they are declared.\nGlobal Functions Globally available functions may be used them in all components (i.e. parsers, extractors, and sinks) where necessary.\n abort  By default, all components declared are executed no matter what flags (dropped, saved, etc.) have been set. There are cases where you may want the filter chain to stop earlier when specified conditions are met. abort function aborts the remaining filter chain from where it\u0026rsquo;s declared, and all the remaining components won\u0026rsquo;t be executed at all. abort function serves as a fast-fail mechanism in LAL.\nfilter { if (log.service == \u0026#34;TestingService\u0026#34;) { // Don\u0026#39;t waste resources on TestingServices  abort {} // all remaining components won\u0026#39;t be executed at all  } // ... parsers, extractors, sinks } Note that when you put regexp in an if statement, you need to surround the expression with () like regexp(\u0026lt;the expression\u0026gt;), instead of regexp \u0026lt;the expression\u0026gt;.\n tag  tag function provide a convenient way to get the value of a tag key.\nWe can add tags like following:\n[ { \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;TEST_KEY\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;TEST_VALUE\u0026#34; } ] }, \u0026#34;body\u0026#34;:{ ... } ... } ] And we can use this method to get the value of the tag key TEST_KEY.\nfilter { if (tag(\u0026#34;TEST_KEY\u0026#34;) == \u0026#34;TEST_VALUE\u0026#34;) { ... } } Parser Parsers are responsible for parsing the raw logs into structured data in SkyWalking for further processing. There are 3 types of parsers at the moment, namely json, yaml, and text.\nWhen a piece of log is parsed, there is a corresponding property available, called parsed, injected by LAL. Property parsed is typically a map, containing all the fields parsed from the raw logs. For example, if the parser is json / yaml, parsed is a map containing all the key-values in the json / yaml; if the parser is text , parsed is a map containing all the captured groups and their values (for regexp and grok).\nAll parsers share the following options:\n   Option Type Description Default Value     abortOnFailure boolean Whether the filter chain should abort if the parser failed to parse / match the logs true    See examples below.\njson filter { json { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  } } yaml filter { yaml { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  } } text For unstructured logs, there are some text parsers for use.\n regexp  regexp parser uses a regular expression (regexp) to parse the logs. It leverages the captured groups of the regexp, all the captured groups can be used later in the extractors or sinks. regexp returns a boolean indicating whether the log matches the pattern or not.\nfilter { text { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  // this is just a demo pattern  regexp \u0026#34;(?\u0026lt;timestamp\u0026gt;\\\\d{8}) (?\u0026lt;thread\u0026gt;\\\\w+) (?\u0026lt;level\u0026gt;\\\\w+) (?\u0026lt;traceId\u0026gt;\\\\w+) (?\u0026lt;msg\u0026gt;.+)\u0026#34; } extractor { tag level: parsed.level // we add a tag called `level` and its value is parsed.level, captured from the regexp above  traceId parsed.traceId // we also extract the trace id from the parsed result, which will be used to associate the log with the trace  } // ... }  grok (TODO)  We\u0026rsquo;re aware of certain performance issues in the grok Java library, and so we\u0026rsquo;re currently conducting investigations and benchmarking. Contributions are welcome.\nExtractor Extractors aim to extract metadata from the logs. The metadata can be a service name, a service instance name, an endpoint name, or even a trace ID, all of which can be associated with the existing traces and metrics.\n service  service extracts the service name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n instance  instance extracts the service instance name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n endpoint  endpoint extracts the service instance name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n traceId  traceId extracts the trace ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n segmentId  segmentId extracts the segment ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n spanId  spanId extracts the span ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n timestamp  timestamp extracts the timestamp from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\nThe unit of timestamp is millisecond.\n layer  layer extracts the layer from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with service.\n tag  tag extracts the tags from the parsed result, and set them into the LogData. The form of this extractor should look something like this: tag key1: value, key2: value2. You may use the properties of parsed as both keys and values.\nimport javax.swing.text.LayeredHighlighter filter { // ... parser  extractor { tag level: parsed.level, (parsed.statusCode): parsed.statusMsg tag anotherKey: \u0026#34;anotherConstantValue\u0026#34; layer \u0026#39;GENERAL\u0026#39; } }  metrics  metrics extracts / generates metrics from the logs, and sends the generated metrics to the meter system. You may configure MAL for further analysis of these metrics. The dedicated MAL config files are under directory log-mal-rules, and you can set log-analyzer/default/malFiles to enable configured files.\n# application.yml# ...log-analyzer:selector:${SW_LOG_ANALYZER:default}default:lalFiles:${SW_LOG_LAL_FILES:my-lal-config}# files are under \u0026#34;lal\u0026#34; directorymalFiles:${SW_LOG_MAL_FILES:my-lal-mal-config, folder1/another-lal-mal-config, folder2/*}# files are under \u0026#34;log-mal-rules\u0026#34; directoryExamples are as follows:\nfilter { // ...  extractor { service parsed.serviceName metrics { name \u0026#34;log_count\u0026#34; timestamp parsed.timestamp labels level: parsed.level, service: parsed.service, instance: parsed.instance value 1 } metrics { name \u0026#34;http_response_time\u0026#34; timestamp parsed.timestamp labels status_code: parsed.statusCode, service: parsed.service, instance: parsed.instance value parsed.duration } } // ... } The extractor above generates a metrics named log_count, with tag key level and value 1. After that, you can configure MAL rules to calculate the log count grouping by logging level like this:\n# ... other configurations of MALmetrics:- name:log_count_debugexp:log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;DEBUG\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT1M\u0026#39;)- name:log_count_errorexp:log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;ERROR\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT1M\u0026#39;)The other metrics generated is http_response_time, so you can configure MAL rules to generate more useful metrics like percentiles.\n# ... other configurations of MALmetrics:- name:response_time_percentileexp:http_response_time.sum([\u0026#39;le\u0026#39;, \u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT5M\u0026#39;).histogram().histogram_percentile([50,70,90,99]) slowSql  slowSql aims to convert LogData to DatabaseSlowStatement. It extracts data from parsed result and save them as DatabaseSlowStatement. SlowSql will not abort or edit logs, you can use other LAL for further processing. SlowSql will reuse service, layer and timestamp of extractor, so it is necessary to use SlowSQL after setting these. We require a log tag \u0026quot;LOG_KIND\u0026quot; = \u0026quot;SLOW_SQL\u0026quot; to make OAP distinguish slow SQL logs from other log reports.\nNote, slow SQL sampling would only flag this SQL in the candidate list. The OAP server would run statistic per service and only persistent the top 50 every 10(controlled by topNReportPeriod: ${SW_CORE_TOPN_REPORT_PERIOD:10}) minutes by default.\nAn example of JSON sent to OAP is as following:\n[ { \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;LOG_KIND\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;SLOW_SQL\u0026#34; } ] }, \u0026#34;layer\u0026#34;:\u0026#34;MYSQL\u0026#34;, \u0026#34;body\u0026#34;:{ \u0026#34;json\u0026#34;:{ \u0026#34;json\u0026#34;:\u0026#34;{\\\u0026#34;time\\\u0026#34;:\\\u0026#34;1663063011\\\u0026#34;,\\\u0026#34;id\\\u0026#34;:\\\u0026#34;cb92c1a5b-2691e-fb2f-457a-9c72a392d9ed\\\u0026#34;,\\\u0026#34;service\\\u0026#34;:\\\u0026#34;root[root]@[localhost]\\\u0026#34;,\\\u0026#34;statement\\\u0026#34;:\\\u0026#34;select sleep(2);\\\u0026#34;,\\\u0026#34;layer\\\u0026#34;:\\\u0026#34;MYSQL\\\u0026#34;,\\\u0026#34;query_time\\\u0026#34;:2000}\u0026#34; } }, \u0026#34;service\u0026#34;:\u0026#34;root[root]@[localhost]\u0026#34; } ]  statement  statement extracts the SQL statement from the parsed result, and set it into the DatabaseSlowStatement, which will be persisted (if not dropped) and is used to associate with TopNDatabaseStatement.\n latency  latency extracts the latency from the parsed result, and set it into the DatabaseSlowStatement, which will be persisted (if not dropped) and is used to associate with TopNDatabaseStatement.\n id  id extracts the id from the parsed result, and set it into the DatabaseSlowStatement, which will be persisted (if not dropped) and is used to associate with TopNDatabaseStatement.\nA Example of LAL to distinguish slow logs:\nfilter { json{ } extractor{ layer parsed.layer as String service parsed.service as String timestamp parsed.time as String if (tag(\u0026#34;LOG_KIND\u0026#34;) == \u0026#34;SLOW_SQL\u0026#34;) { slowSql { id parsed.id as String statement parsed.statement as String latency parsed.query_time as Long } } } }  sampledTrace  sampledTrace aims to convert LogData to SampledTrace Records. It extracts data from parsed result and save them as SampledTraceRecord. SampledTrace will not abort or edit logs, you can use other LAL for further processing. We require a log tag \u0026quot;LOG_KIND\u0026quot; = \u0026quot;NET_PROFILING_SAMPLED_TRACE\u0026quot; to make OAP distinguish slow trace logs from other log reports. An example of JSON sent to OAP is as following:\n[ { \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;LOG_KIND\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;NET_PROFILING_SAMPLED_TRACE\u0026#34; } ] }, \u0026#34;layer\u0026#34;:\u0026#34;MESH\u0026#34;, \u0026#34;body\u0026#34;:{ \u0026#34;json\u0026#34;:{ \u0026#34;json\u0026#34;:\u0026#34;{\\\u0026#34;uri\\\u0026#34;:\\\u0026#34;/provider\\\u0026#34;,\\\u0026#34;reason\\\u0026#34;:\\\u0026#34;slow\\\u0026#34;,\\\u0026#34;latency\\\u0026#34;:2048,\\\u0026#34;client_process\\\u0026#34;:{\\\u0026#34;process_id\\\u0026#34;:\\\u0026#34;c1519f4555ec11eda8df0242ac1d0002\\\u0026#34;,\\\u0026#34;local\\\u0026#34;:false,\\\u0026#34;address\\\u0026#34;:\\\u0026#34;\\\u0026#34;},\\\u0026#34;server_process\\\u0026#34;:{\\\u0026#34;process_id\\\u0026#34;:\\\u0026#34;\\\u0026#34;,\\\u0026#34;local\\\u0026#34;:false,\\\u0026#34;address\\\u0026#34;:\\\u0026#34;172.31.0.3:443\\\u0026#34;},\\\u0026#34;detect_point\\\u0026#34;:\\\u0026#34;client\\\u0026#34;,\\\u0026#34;component\\\u0026#34;:\\\u0026#34;http\\\u0026#34;,\\\u0026#34;ssl\\\u0026#34;:true}\u0026#34; } }, \u0026#34;service\u0026#34;:\u0026#34;test-service\u0026#34;, \u0026#34;serviceInstance\u0026#34;:\u0026#34;test-service-instance\u0026#34;, \u0026#34;timestamp\u0026#34;: 1666916962406, } ] Examples are as follows:\nfilter { json { } if (tag(\u0026#34;LOG_KIND\u0026#34;) == \u0026#34;NET_PROFILING_SAMPLED_TRACE\u0026#34;) { sampledTrace { latency parsed.latency as Long uri parsed.uri as String reason parsed.reason as String if (parsed.client_process.process_id as String != \u0026#34;\u0026#34;) { processId parsed.client_process.process_id as String } else if (parsed.client_process.local as Boolean) { processId ProcessRegistry.generateVirtualLocalProcess(parsed.service as String, parsed.serviceInstance as String) as String } else { processId ProcessRegistry.generateVirtualRemoteProcess(parsed.service as String, parsed.serviceInstance as String, parsed.client_process.address as String) as String } if (parsed.server_process.process_id as String != \u0026#34;\u0026#34;) { destProcessId parsed.server_process.process_id as String } else if (parsed.server_process.local as Boolean) { destProcessId ProcessRegistry.generateVirtualLocalProcess(parsed.service as String, parsed.serviceInstance as String) as String } else { destProcessId ProcessRegistry.generateVirtualRemoteProcess(parsed.service as String, parsed.serviceInstance as String, parsed.server_process.address as String) as String } detectPoint parsed.detect_point as String if (parsed.component as String == \u0026#34;http\u0026#34; \u0026amp;\u0026amp; parsed.ssl as Boolean) { componentId 129 } else if (parsed.component as String == \u0026#34;http\u0026#34;) { componentId 49 } else if (parsed.ssl as Boolean) { componentId 130 } else { componentId 110 } } } } Sink Sinks are the persistent layer of the LAL. By default, all the logs of each filter are persisted into the storage. However, some mechanisms allow you to selectively save some logs, or even drop all the logs after you\u0026rsquo;ve extracted useful information, such as metrics.\nSampler Sampler allows you to save the logs in a sampling manner. Currently, the following sampling strategies are supported:\n rateLimit: samples n logs at a maximum rate of 1 minute. rateLimit(\u0026quot;SamplerID\u0026quot;) requires an ID for the sampler. Sampler declarations with the same ID share the same sampler instance, thus sharing the same rpm and resetting logic. possibility: every piece of log has a pseudo possibility of percentage to be sampled, the possibility was generated by Java random number generator and compare to the given percentage option.  We welcome contributions on more sampling strategies. If multiple samplers are specified, the last one determines the final sampling result. See examples in Enforcer.\nExamples 1, rateLimit:\nfilter { // ... parser  sink { sampler { if (parsed.service == \u0026#34;ImportantApp\u0026#34;) { rateLimit(\u0026#34;ImportantAppSampler\u0026#34;) { rpm 1800 // samples 1800 pieces of logs every minute for service \u0026#34;ImportantApp\u0026#34;  } } else { rateLimit(\u0026#34;OtherSampler\u0026#34;) { rpm 180 // samples 180 pieces of logs every minute for other services than \u0026#34;ImportantApp\u0026#34;  } } } } } Examples 2, possibility:\nfilter { // ... parser  sink { sampler { if (parsed.service == \u0026#34;ImportantApp\u0026#34;) { possibility(80) { // samples 80% of the logs for service \u0026#34;ImportantApp\u0026#34;  } } else { possibility(30) { // samples 30% of the logs for other services than \u0026#34;ImportantApp\u0026#34;  } } } } } Dropper Dropper is a special sink, meaning that all logs are dropped without any exception. This is useful when you want to drop debugging logs.\nfilter { // ... parser  sink { if (parsed.level == \u0026#34;DEBUG\u0026#34;) { dropper {} } else { sampler { // ... configs  } } } } Or if you have multiple filters, some of which are for extracting metrics, only one of them has to be persisted.\nfilter { // filter A: this is for persistence  // ... parser  sink { sampler { // .. sampler configs  } } } filter { // filter B:  // ... extractors to generate many metrics  extractors { metrics { // ... metrics  } } sink { dropper {} // drop all logs because they have been saved in \u0026#34;filter A\u0026#34; above.  } } Enforcer Enforcer is another special sink that forcibly samples the log. A typical use case of enforcer is when you have configured a sampler and want to save some logs forcibly, such as to save error logs even if the sampling mechanism has been configured.\nfilter { // ... parser  sink { sampler { // ... sampler configs  } if (parsed.level == \u0026#34;ERROR\u0026#34; || parsed.userId == \u0026#34;TestingUserId\u0026#34;) { // sample error logs or testing users\u0026#39; logs (userId == \u0026#34;TestingUserId\u0026#34;) even if the sampling strategy is configured  enforcer { } } } } ","title":"Log Analysis Language","url":"/docs/main/v9.4.0/en/concepts-and-designs/lal/"},{"content":"Log Analysis Language Log Analysis Language (LAL) in SkyWalking is essentially a Domain-Specific Language (DSL) to analyze logs. You can use LAL to parse, extract, and save the logs, as well as collaborate the logs with traces (by extracting the trace ID, segment ID and span ID) and metrics (by generating metrics from the logs and sending them to the meter system).\nThe LAL config files are in YAML format, and are located under directory lal. You can set log-analyzer/default/lalFiles in the application.yml file or set environment variable SW_LOG_LAL_FILES to activate specific LAL config files.\nLayer Layer should be declared in the LAL script to represent the analysis scope of the logs.\nFilter A filter is a group of parser, extractor and sink. Users can use one or more filters to organize their processing logic. Every piece of log will be sent to all filters in an LAL rule. A piece of log sent to the filter is available as property log in the LAL, therefore you can access the log service name via log.service. For all available fields of log, please refer to the protocol definition.\nAll components are executed sequentially in the orders they are declared.\nGlobal Functions Globally available functions may be used them in all components (i.e. parsers, extractors, and sinks) where necessary.\n abort  By default, all components declared are executed no matter what flags (dropped, saved, etc.) have been set. There are cases where you may want the filter chain to stop earlier when specified conditions are met. abort function aborts the remaining filter chain from where it\u0026rsquo;s declared, and all the remaining components won\u0026rsquo;t be executed at all. abort function serves as a fast-fail mechanism in LAL.\nfilter { if (log.service == \u0026#34;TestingService\u0026#34;) { // Don\u0026#39;t waste resources on TestingServices  abort {} // all remaining components won\u0026#39;t be executed at all  } // ... parsers, extractors, sinks } Note that when you put regexp in an if statement, you need to surround the expression with () like regexp(\u0026lt;the expression\u0026gt;), instead of regexp \u0026lt;the expression\u0026gt;.\n tag  tag function provide a convenient way to get the value of a tag key.\nWe can add tags like following:\n[ { \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;TEST_KEY\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;TEST_VALUE\u0026#34; } ] }, \u0026#34;body\u0026#34;:{ ... } ... } ] And we can use this method to get the value of the tag key TEST_KEY.\nfilter { if (tag(\u0026#34;TEST_KEY\u0026#34;) == \u0026#34;TEST_VALUE\u0026#34;) { ... } } Parser Parsers are responsible for parsing the raw logs into structured data in SkyWalking for further processing. There are 3 types of parsers at the moment, namely json, yaml, and text.\nWhen a piece of log is parsed, there is a corresponding property available, called parsed, injected by LAL. Property parsed is typically a map, containing all the fields parsed from the raw logs. For example, if the parser is json / yaml, parsed is a map containing all the key-values in the json / yaml; if the parser is text , parsed is a map containing all the captured groups and their values (for regexp and grok).\nAll parsers share the following options:\n   Option Type Description Default Value     abortOnFailure boolean Whether the filter chain should abort if the parser failed to parse / match the logs true    See examples below.\njson filter { json { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  } } yaml filter { yaml { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  } } text For unstructured logs, there are some text parsers for use.\n regexp  regexp parser uses a regular expression (regexp) to parse the logs. It leverages the captured groups of the regexp, all the captured groups can be used later in the extractors or sinks. regexp returns a boolean indicating whether the log matches the pattern or not.\nfilter { text { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  // this is just a demo pattern  regexp \u0026#34;(?\u0026lt;timestamp\u0026gt;\\\\d{8}) (?\u0026lt;thread\u0026gt;\\\\w+) (?\u0026lt;level\u0026gt;\\\\w+) (?\u0026lt;traceId\u0026gt;\\\\w+) (?\u0026lt;msg\u0026gt;.+)\u0026#34; } extractor { tag level: parsed.level // we add a tag called `level` and its value is parsed.level, captured from the regexp above  traceId parsed.traceId // we also extract the trace id from the parsed result, which will be used to associate the log with the trace  } // ... }  grok (TODO)  We\u0026rsquo;re aware of certain performance issues in the grok Java library, and so we\u0026rsquo;re currently conducting investigations and benchmarking. Contributions are welcome.\nExtractor Extractors aim to extract metadata from the logs. The metadata can be a service name, a service instance name, an endpoint name, or even a trace ID, all of which can be associated with the existing traces and metrics.\n service  service extracts the service name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n instance  instance extracts the service instance name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n endpoint  endpoint extracts the service instance name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n traceId  traceId extracts the trace ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n segmentId  segmentId extracts the segment ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n spanId  spanId extracts the span ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n timestamp  timestamp extracts the timestamp from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\nThe unit of timestamp is millisecond.\n layer  layer extracts the layer from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with service.\n tag  tag extracts the tags from the parsed result, and set them into the LogData. The form of this extractor should look something like this: tag key1: value, key2: value2. You may use the properties of parsed as both keys and values.\nimport javax.swing.text.LayeredHighlighter filter { // ... parser  extractor { tag level: parsed.level, (parsed.statusCode): parsed.statusMsg tag anotherKey: \u0026#34;anotherConstantValue\u0026#34; layer \u0026#39;GENERAL\u0026#39; } }  metrics  metrics extracts / generates metrics from the logs, and sends the generated metrics to the meter system. You may configure MAL for further analysis of these metrics. The dedicated MAL config files are under directory log-mal-rules, and you can set log-analyzer/default/malFiles to enable configured files.\n# application.yml# ...log-analyzer:selector:${SW_LOG_ANALYZER:default}default:lalFiles:${SW_LOG_LAL_FILES:my-lal-config}# files are under \u0026#34;lal\u0026#34; directorymalFiles:${SW_LOG_MAL_FILES:my-lal-mal-config, folder1/another-lal-mal-config, folder2/*}# files are under \u0026#34;log-mal-rules\u0026#34; directoryExamples are as follows:\nfilter { // ...  extractor { service parsed.serviceName metrics { name \u0026#34;log_count\u0026#34; timestamp parsed.timestamp labels level: parsed.level, service: parsed.service, instance: parsed.instance value 1 } metrics { name \u0026#34;http_response_time\u0026#34; timestamp parsed.timestamp labels status_code: parsed.statusCode, service: parsed.service, instance: parsed.instance value parsed.duration } } // ... } The extractor above generates a metrics named log_count, with tag key level and value 1. After that, you can configure MAL rules to calculate the log count grouping by logging level like this:\n# ... other configurations of MALmetrics:- name:log_count_debugexp:log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;DEBUG\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT1M\u0026#39;)- name:log_count_errorexp:log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;ERROR\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT1M\u0026#39;)The other metrics generated is http_response_time, so you can configure MAL rules to generate more useful metrics like percentiles.\n# ... other configurations of MALmetrics:- name:response_time_percentileexp:http_response_time.sum([\u0026#39;le\u0026#39;, \u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT5M\u0026#39;).histogram().histogram_percentile([50,70,90,99]) slowSql  slowSql aims to convert LogData to DatabaseSlowStatement. It extracts data from parsed result and save them as DatabaseSlowStatement. SlowSql will not abort or edit logs, you can use other LAL for further processing. SlowSql will reuse service, layer and timestamp of extractor, so it is necessary to use SlowSQL after setting these. We require a log tag \u0026quot;LOG_KIND\u0026quot; = \u0026quot;SLOW_SQL\u0026quot; to make OAP distinguish slow SQL logs from other log reports.\nNote, slow SQL sampling would only flag this SQL in the candidate list. The OAP server would run statistic per service and only persistent the top 50 every 10(controlled by topNReportPeriod: ${SW_CORE_TOPN_REPORT_PERIOD:10}) minutes by default.\nAn example of JSON sent to OAP is as following:\n[ { \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;LOG_KIND\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;SLOW_SQL\u0026#34; } ] }, \u0026#34;layer\u0026#34;:\u0026#34;MYSQL\u0026#34;, \u0026#34;body\u0026#34;:{ \u0026#34;json\u0026#34;:{ \u0026#34;json\u0026#34;:\u0026#34;{\\\u0026#34;time\\\u0026#34;:\\\u0026#34;1663063011\\\u0026#34;,\\\u0026#34;id\\\u0026#34;:\\\u0026#34;cb92c1a5b-2691e-fb2f-457a-9c72a392d9ed\\\u0026#34;,\\\u0026#34;service\\\u0026#34;:\\\u0026#34;root[root]@[localhost]\\\u0026#34;,\\\u0026#34;statement\\\u0026#34;:\\\u0026#34;select sleep(2);\\\u0026#34;,\\\u0026#34;layer\\\u0026#34;:\\\u0026#34;MYSQL\\\u0026#34;,\\\u0026#34;query_time\\\u0026#34;:2000}\u0026#34; } }, \u0026#34;service\u0026#34;:\u0026#34;root[root]@[localhost]\u0026#34; } ]  statement  statement extracts the SQL statement from the parsed result, and set it into the DatabaseSlowStatement, which will be persisted (if not dropped) and is used to associate with TopNDatabaseStatement.\n latency  latency extracts the latency from the parsed result, and set it into the DatabaseSlowStatement, which will be persisted (if not dropped) and is used to associate with TopNDatabaseStatement.\n id  id extracts the id from the parsed result, and set it into the DatabaseSlowStatement, which will be persisted (if not dropped) and is used to associate with TopNDatabaseStatement.\nA Example of LAL to distinguish slow logs:\nfilter { json{ } extractor{ layer parsed.layer as String service parsed.service as String timestamp parsed.time as String if (tag(\u0026#34;LOG_KIND\u0026#34;) == \u0026#34;SLOW_SQL\u0026#34;) { slowSql { id parsed.id as String statement parsed.statement as String latency parsed.query_time as Long } } } }  sampledTrace  sampledTrace aims to convert LogData to SampledTrace Records. It extracts data from parsed result and save them as SampledTraceRecord. SampledTrace will not abort or edit logs, you can use other LAL for further processing. We require a log tag \u0026quot;LOG_KIND\u0026quot; = \u0026quot;NET_PROFILING_SAMPLED_TRACE\u0026quot; to make OAP distinguish slow trace logs from other log reports. An example of JSON sent to OAP is as following:\n[ { \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;LOG_KIND\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;NET_PROFILING_SAMPLED_TRACE\u0026#34; } ] }, \u0026#34;layer\u0026#34;:\u0026#34;MESH\u0026#34;, \u0026#34;body\u0026#34;:{ \u0026#34;json\u0026#34;:{ \u0026#34;json\u0026#34;:\u0026#34;{\\\u0026#34;uri\\\u0026#34;:\\\u0026#34;/provider\\\u0026#34;,\\\u0026#34;reason\\\u0026#34;:\\\u0026#34;slow\\\u0026#34;,\\\u0026#34;latency\\\u0026#34;:2048,\\\u0026#34;client_process\\\u0026#34;:{\\\u0026#34;process_id\\\u0026#34;:\\\u0026#34;c1519f4555ec11eda8df0242ac1d0002\\\u0026#34;,\\\u0026#34;local\\\u0026#34;:false,\\\u0026#34;address\\\u0026#34;:\\\u0026#34;\\\u0026#34;},\\\u0026#34;server_process\\\u0026#34;:{\\\u0026#34;process_id\\\u0026#34;:\\\u0026#34;\\\u0026#34;,\\\u0026#34;local\\\u0026#34;:false,\\\u0026#34;address\\\u0026#34;:\\\u0026#34;172.31.0.3:443\\\u0026#34;},\\\u0026#34;detect_point\\\u0026#34;:\\\u0026#34;client\\\u0026#34;,\\\u0026#34;component\\\u0026#34;:\\\u0026#34;http\\\u0026#34;,\\\u0026#34;ssl\\\u0026#34;:true}\u0026#34; } }, \u0026#34;service\u0026#34;:\u0026#34;test-service\u0026#34;, \u0026#34;serviceInstance\u0026#34;:\u0026#34;test-service-instance\u0026#34;, \u0026#34;timestamp\u0026#34;: 1666916962406, } ] Examples are as follows:\nfilter { json { } if (tag(\u0026#34;LOG_KIND\u0026#34;) == \u0026#34;NET_PROFILING_SAMPLED_TRACE\u0026#34;) { sampledTrace { latency parsed.latency as Long uri parsed.uri as String reason parsed.reason as String if (parsed.client_process.process_id as String != \u0026#34;\u0026#34;) { processId parsed.client_process.process_id as String } else if (parsed.client_process.local as Boolean) { processId ProcessRegistry.generateVirtualLocalProcess(parsed.service as String, parsed.serviceInstance as String) as String } else { processId ProcessRegistry.generateVirtualRemoteProcess(parsed.service as String, parsed.serviceInstance as String, parsed.client_process.address as String) as String } if (parsed.server_process.process_id as String != \u0026#34;\u0026#34;) { destProcessId parsed.server_process.process_id as String } else if (parsed.server_process.local as Boolean) { destProcessId ProcessRegistry.generateVirtualLocalProcess(parsed.service as String, parsed.serviceInstance as String) as String } else { destProcessId ProcessRegistry.generateVirtualRemoteProcess(parsed.service as String, parsed.serviceInstance as String, parsed.server_process.address as String) as String } detectPoint parsed.detect_point as String if (parsed.component as String == \u0026#34;http\u0026#34; \u0026amp;\u0026amp; parsed.ssl as Boolean) { componentId 129 } else if (parsed.component as String == \u0026#34;http\u0026#34;) { componentId 49 } else if (parsed.ssl as Boolean) { componentId 130 } else { componentId 110 } } } } Sink Sinks are the persistent layer of the LAL. By default, all the logs of each filter are persisted into the storage. However, some mechanisms allow you to selectively save some logs, or even drop all the logs after you\u0026rsquo;ve extracted useful information, such as metrics.\nSampler Sampler allows you to save the logs in a sampling manner. Currently, the following sampling strategies are supported:\n rateLimit: samples n logs at a maximum rate of 1 minute. rateLimit(\u0026quot;SamplerID\u0026quot;) requires an ID for the sampler. Sampler declarations with the same ID share the same sampler instance, thus sharing the same rpm and resetting logic. possibility: every piece of log has a pseudo possibility of percentage to be sampled, the possibility was generated by Java random number generator and compare to the given percentage option.  We welcome contributions on more sampling strategies. If multiple samplers are specified, the last one determines the final sampling result. See examples in Enforcer.\nExamples 1, rateLimit:\nfilter { // ... parser  sink { sampler { if (parsed.service == \u0026#34;ImportantApp\u0026#34;) { rateLimit(\u0026#34;ImportantAppSampler\u0026#34;) { rpm 1800 // samples 1800 pieces of logs every minute for service \u0026#34;ImportantApp\u0026#34;  } } else { rateLimit(\u0026#34;OtherSampler\u0026#34;) { rpm 180 // samples 180 pieces of logs every minute for other services than \u0026#34;ImportantApp\u0026#34;  } } } } } Examples 2, possibility:\nfilter { // ... parser  sink { sampler { if (parsed.service == \u0026#34;ImportantApp\u0026#34;) { possibility(80) { // samples 80% of the logs for service \u0026#34;ImportantApp\u0026#34;  } } else { possibility(30) { // samples 30% of the logs for other services than \u0026#34;ImportantApp\u0026#34;  } } } } } Dropper Dropper is a special sink, meaning that all logs are dropped without any exception. This is useful when you want to drop debugging logs.\nfilter { // ... parser  sink { if (parsed.level == \u0026#34;DEBUG\u0026#34;) { dropper {} } else { sampler { // ... configs  } } } } Or if you have multiple filters, some of which are for extracting metrics, only one of them has to be persisted.\nfilter { // filter A: this is for persistence  // ... parser  sink { sampler { // .. sampler configs  } } } filter { // filter B:  // ... extractors to generate many metrics  extractors { metrics { // ... metrics  } } sink { dropper {} // drop all logs because they have been saved in \u0026#34;filter A\u0026#34; above.  } } Enforcer Enforcer is another special sink that forcibly samples the log. A typical use case of enforcer is when you have configured a sampler and want to save some logs forcibly, such as to save error logs even if the sampling mechanism has been configured.\nfilter { // ... parser  sink { sampler { // ... sampler configs  } if (parsed.level == \u0026#34;ERROR\u0026#34; || parsed.userId == \u0026#34;TestingUserId\u0026#34;) { // sample error logs or testing users\u0026#39; logs (userId == \u0026#34;TestingUserId\u0026#34;) even if the sampling strategy is configured  enforcer { } } } } ","title":"Log Analysis Language","url":"/docs/main/v9.5.0/en/concepts-and-designs/lal/"},{"content":"Log Analysis Language Log Analysis Language (LAL) in SkyWalking is essentially a Domain-Specific Language (DSL) to analyze logs. You can use LAL to parse, extract, and save the logs, as well as collaborate the logs with traces (by extracting the trace ID, segment ID and span ID) and metrics (by generating metrics from the logs and sending them to the meter system).\nThe LAL config files are in YAML format, and are located under directory lal. You can set log-analyzer/default/lalFiles in the application.yml file or set environment variable SW_LOG_LAL_FILES to activate specific LAL config files.\nLayer Layer should be declared in the LAL script to represent the analysis scope of the logs.\nFilter A filter is a group of parser, extractor and sink. Users can use one or more filters to organize their processing logic. Every piece of log will be sent to all filters in an LAL rule. A piece of log sent to the filter is available as property log in the LAL, therefore you can access the log service name via log.service. For all available fields of log, please refer to the protocol definition.\nAll components are executed sequentially in the orders they are declared.\nGlobal Functions Globally available functions may be used them in all components (i.e. parsers, extractors, and sinks) where necessary.\n abort  By default, all components declared are executed no matter what flags (dropped, saved, etc.) have been set. There are cases where you may want the filter chain to stop earlier when specified conditions are met. abort function aborts the remaining filter chain from where it\u0026rsquo;s declared, and all the remaining components won\u0026rsquo;t be executed at all. abort function serves as a fast-fail mechanism in LAL.\nfilter { if (log.service == \u0026#34;TestingService\u0026#34;) { // Don\u0026#39;t waste resources on TestingServices  abort {} // all remaining components won\u0026#39;t be executed at all  } // ... parsers, extractors, sinks } Note that when you put regexp in an if statement, you need to surround the expression with () like regexp(\u0026lt;the expression\u0026gt;), instead of regexp \u0026lt;the expression\u0026gt;.\n tag  tag function provide a convenient way to get the value of a tag key.\nWe can add tags like following:\n[ { \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;TEST_KEY\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;TEST_VALUE\u0026#34; } ] }, \u0026#34;body\u0026#34;:{ ... } ... } ] And we can use this method to get the value of the tag key TEST_KEY.\nfilter { if (tag(\u0026#34;TEST_KEY\u0026#34;) == \u0026#34;TEST_VALUE\u0026#34;) { ... } } Parser Parsers are responsible for parsing the raw logs into structured data in SkyWalking for further processing. There are 3 types of parsers at the moment, namely json, yaml, and text.\nWhen a piece of log is parsed, there is a corresponding property available, called parsed, injected by LAL. Property parsed is typically a map, containing all the fields parsed from the raw logs. For example, if the parser is json / yaml, parsed is a map containing all the key-values in the json / yaml; if the parser is text , parsed is a map containing all the captured groups and their values (for regexp and grok).\nAll parsers share the following options:\n   Option Type Description Default Value     abortOnFailure boolean Whether the filter chain should abort if the parser failed to parse / match the logs true    See examples below.\njson filter { json { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  } } yaml filter { yaml { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  } } text For unstructured logs, there are some text parsers for use.\n regexp  regexp parser uses a regular expression (regexp) to parse the logs. It leverages the captured groups of the regexp, all the captured groups can be used later in the extractors or sinks. regexp returns a boolean indicating whether the log matches the pattern or not.\nfilter { text { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  // this is just a demo pattern  regexp \u0026#34;(?\u0026lt;timestamp\u0026gt;\\\\d{8}) (?\u0026lt;thread\u0026gt;\\\\w+) (?\u0026lt;level\u0026gt;\\\\w+) (?\u0026lt;traceId\u0026gt;\\\\w+) (?\u0026lt;msg\u0026gt;.+)\u0026#34; } extractor { tag level: parsed.level // we add a tag called `level` and its value is parsed.level, captured from the regexp above  traceId parsed.traceId // we also extract the trace id from the parsed result, which will be used to associate the log with the trace  } // ... }  grok (TODO)  We\u0026rsquo;re aware of certain performance issues in the grok Java library, and so we\u0026rsquo;re currently conducting investigations and benchmarking. Contributions are welcome.\nExtractor Extractors aim to extract metadata from the logs. The metadata can be a service name, a service instance name, an endpoint name, or even a trace ID, all of which can be associated with the existing traces and metrics.\n service  service extracts the service name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n instance  instance extracts the service instance name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n endpoint  endpoint extracts the service instance name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n traceId  traceId extracts the trace ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n segmentId  segmentId extracts the segment ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n spanId  spanId extracts the span ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n timestamp  timestamp extracts the timestamp from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\nThe unit of timestamp is millisecond.\n layer  layer extracts the layer from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with service.\n tag  tag extracts the tags from the parsed result, and set them into the LogData. The form of this extractor should look something like this: tag key1: value, key2: value2. You may use the properties of parsed as both keys and values.\nimport javax.swing.text.LayeredHighlighter filter { // ... parser  extractor { tag level: parsed.level, (parsed.statusCode): parsed.statusMsg tag anotherKey: \u0026#34;anotherConstantValue\u0026#34; layer \u0026#39;GENERAL\u0026#39; } }  metrics  metrics extracts / generates metrics from the logs, and sends the generated metrics to the meter system. You may configure MAL for further analysis of these metrics. The dedicated MAL config files are under directory log-mal-rules, and you can set log-analyzer/default/malFiles to enable configured files.\n# application.yml# ...log-analyzer:selector:${SW_LOG_ANALYZER:default}default:lalFiles:${SW_LOG_LAL_FILES:my-lal-config}# files are under \u0026#34;lal\u0026#34; directorymalFiles:${SW_LOG_MAL_FILES:my-lal-mal-config, folder1/another-lal-mal-config, folder2/*}# files are under \u0026#34;log-mal-rules\u0026#34; directoryExamples are as follows:\nfilter { // ...  extractor { service parsed.serviceName metrics { name \u0026#34;log_count\u0026#34; timestamp parsed.timestamp labels level: parsed.level, service: parsed.service, instance: parsed.instance value 1 } metrics { name \u0026#34;http_response_time\u0026#34; timestamp parsed.timestamp labels status_code: parsed.statusCode, service: parsed.service, instance: parsed.instance value parsed.duration } } // ... } The extractor above generates a metrics named log_count, with tag key level and value 1. After that, you can configure MAL rules to calculate the log count grouping by logging level like this:\n# ... other configurations of MALmetrics:- name:log_count_debugexp:log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;DEBUG\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT1M\u0026#39;)- name:log_count_errorexp:log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;ERROR\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT1M\u0026#39;)The other metrics generated is http_response_time, so you can configure MAL rules to generate more useful metrics like percentiles.\n# ... other configurations of MALmetrics:- name:response_time_percentileexp:http_response_time.sum([\u0026#39;le\u0026#39;, \u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT5M\u0026#39;).histogram().histogram_percentile([50,70,90,99]) slowSql  slowSql aims to convert LogData to DatabaseSlowStatement. It extracts data from parsed result and save them as DatabaseSlowStatement. SlowSql will not abort or edit logs, you can use other LAL for further processing. SlowSql will reuse service, layer and timestamp of extractor, so it is necessary to use SlowSQL after setting these. We require a log tag \u0026quot;LOG_KIND\u0026quot; = \u0026quot;SLOW_SQL\u0026quot; to make OAP distinguish slow SQL logs from other log reports.\nNote, slow SQL sampling would only flag this SQL in the candidate list. The OAP server would run statistic per service and only persistent the top 50 every 10(controlled by topNReportPeriod: ${SW_CORE_TOPN_REPORT_PERIOD:10}) minutes by default.\nAn example of JSON sent to OAP is as following:\n[ { \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;LOG_KIND\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;SLOW_SQL\u0026#34; } ] }, \u0026#34;layer\u0026#34;:\u0026#34;MYSQL\u0026#34;, \u0026#34;body\u0026#34;:{ \u0026#34;json\u0026#34;:{ \u0026#34;json\u0026#34;:\u0026#34;{\\\u0026#34;time\\\u0026#34;:\\\u0026#34;1663063011\\\u0026#34;,\\\u0026#34;id\\\u0026#34;:\\\u0026#34;cb92c1a5b-2691e-fb2f-457a-9c72a392d9ed\\\u0026#34;,\\\u0026#34;service\\\u0026#34;:\\\u0026#34;root[root]@[localhost]\\\u0026#34;,\\\u0026#34;statement\\\u0026#34;:\\\u0026#34;select sleep(2);\\\u0026#34;,\\\u0026#34;layer\\\u0026#34;:\\\u0026#34;MYSQL\\\u0026#34;,\\\u0026#34;query_time\\\u0026#34;:2000}\u0026#34; } }, \u0026#34;service\u0026#34;:\u0026#34;root[root]@[localhost]\u0026#34; } ]  statement  statement extracts the SQL statement from the parsed result, and set it into the DatabaseSlowStatement, which will be persisted (if not dropped) and is used to associate with TopNDatabaseStatement.\n latency  latency extracts the latency from the parsed result, and set it into the DatabaseSlowStatement, which will be persisted (if not dropped) and is used to associate with TopNDatabaseStatement.\n id  id extracts the id from the parsed result, and set it into the DatabaseSlowStatement, which will be persisted (if not dropped) and is used to associate with TopNDatabaseStatement.\nA Example of LAL to distinguish slow logs:\nfilter { json{ } extractor{ layer parsed.layer as String service parsed.service as String timestamp parsed.time as String if (tag(\u0026#34;LOG_KIND\u0026#34;) == \u0026#34;SLOW_SQL\u0026#34;) { slowSql { id parsed.id as String statement parsed.statement as String latency parsed.query_time as Long } } } }  sampledTrace  sampledTrace aims to convert LogData to SampledTrace Records. It extracts data from parsed result and save them as SampledTraceRecord. SampledTrace will not abort or edit logs, you can use other LAL for further processing. We require a log tag \u0026quot;LOG_KIND\u0026quot; = \u0026quot;NET_PROFILING_SAMPLED_TRACE\u0026quot; to make OAP distinguish slow trace logs from other log reports. An example of JSON sent to OAP is as following:\n[ { \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;LOG_KIND\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;NET_PROFILING_SAMPLED_TRACE\u0026#34; } ] }, \u0026#34;layer\u0026#34;:\u0026#34;MESH\u0026#34;, \u0026#34;body\u0026#34;:{ \u0026#34;json\u0026#34;:{ \u0026#34;json\u0026#34;:\u0026#34;{\\\u0026#34;uri\\\u0026#34;:\\\u0026#34;/provider\\\u0026#34;,\\\u0026#34;reason\\\u0026#34;:\\\u0026#34;slow\\\u0026#34;,\\\u0026#34;latency\\\u0026#34;:2048,\\\u0026#34;client_process\\\u0026#34;:{\\\u0026#34;process_id\\\u0026#34;:\\\u0026#34;c1519f4555ec11eda8df0242ac1d0002\\\u0026#34;,\\\u0026#34;local\\\u0026#34;:false,\\\u0026#34;address\\\u0026#34;:\\\u0026#34;\\\u0026#34;},\\\u0026#34;server_process\\\u0026#34;:{\\\u0026#34;process_id\\\u0026#34;:\\\u0026#34;\\\u0026#34;,\\\u0026#34;local\\\u0026#34;:false,\\\u0026#34;address\\\u0026#34;:\\\u0026#34;172.31.0.3:443\\\u0026#34;},\\\u0026#34;detect_point\\\u0026#34;:\\\u0026#34;client\\\u0026#34;,\\\u0026#34;component\\\u0026#34;:\\\u0026#34;http\\\u0026#34;,\\\u0026#34;ssl\\\u0026#34;:true}\u0026#34; } }, \u0026#34;service\u0026#34;:\u0026#34;test-service\u0026#34;, \u0026#34;serviceInstance\u0026#34;:\u0026#34;test-service-instance\u0026#34;, \u0026#34;timestamp\u0026#34;: 1666916962406, } ] Examples are as follows:\nfilter { json { } if (tag(\u0026#34;LOG_KIND\u0026#34;) == \u0026#34;NET_PROFILING_SAMPLED_TRACE\u0026#34;) { sampledTrace { latency parsed.latency as Long uri parsed.uri as String reason parsed.reason as String if (parsed.client_process.process_id as String != \u0026#34;\u0026#34;) { processId parsed.client_process.process_id as String } else if (parsed.client_process.local as Boolean) { processId ProcessRegistry.generateVirtualLocalProcess(parsed.service as String, parsed.serviceInstance as String) as String } else { processId ProcessRegistry.generateVirtualRemoteProcess(parsed.service as String, parsed.serviceInstance as String, parsed.client_process.address as String) as String } if (parsed.server_process.process_id as String != \u0026#34;\u0026#34;) { destProcessId parsed.server_process.process_id as String } else if (parsed.server_process.local as Boolean) { destProcessId ProcessRegistry.generateVirtualLocalProcess(parsed.service as String, parsed.serviceInstance as String) as String } else { destProcessId ProcessRegistry.generateVirtualRemoteProcess(parsed.service as String, parsed.serviceInstance as String, parsed.server_process.address as String) as String } detectPoint parsed.detect_point as String if (parsed.component as String == \u0026#34;http\u0026#34; \u0026amp;\u0026amp; parsed.ssl as Boolean) { componentId 129 } else if (parsed.component as String == \u0026#34;http\u0026#34;) { componentId 49 } else if (parsed.ssl as Boolean) { componentId 130 } else { componentId 110 } } } } Sink Sinks are the persistent layer of the LAL. By default, all the logs of each filter are persisted into the storage. However, some mechanisms allow you to selectively save some logs, or even drop all the logs after you\u0026rsquo;ve extracted useful information, such as metrics.\nSampler Sampler allows you to save the logs in a sampling manner. Currently, the following sampling strategies are supported:\n rateLimit: samples n logs at a maximum rate of 1 minute. rateLimit(\u0026quot;SamplerID\u0026quot;) requires an ID for the sampler. Sampler declarations with the same ID share the same sampler instance, thus sharing the same rpm and resetting logic. possibility: every piece of log has a pseudo possibility of percentage to be sampled, the possibility was generated by Java random number generator and compare to the given percentage option.  We welcome contributions on more sampling strategies. If multiple samplers are specified, the last one determines the final sampling result. See examples in Enforcer.\nExamples 1, rateLimit:\nfilter { // ... parser  sink { sampler { if (parsed.service == \u0026#34;ImportantApp\u0026#34;) { rateLimit(\u0026#34;ImportantAppSampler\u0026#34;) { rpm 1800 // samples 1800 pieces of logs every minute for service \u0026#34;ImportantApp\u0026#34;  } } else { rateLimit(\u0026#34;OtherSampler\u0026#34;) { rpm 180 // samples 180 pieces of logs every minute for other services than \u0026#34;ImportantApp\u0026#34;  } } } } } Examples 2, possibility:\nfilter { // ... parser  sink { sampler { if (parsed.service == \u0026#34;ImportantApp\u0026#34;) { possibility(80) { // samples 80% of the logs for service \u0026#34;ImportantApp\u0026#34;  } } else { possibility(30) { // samples 30% of the logs for other services than \u0026#34;ImportantApp\u0026#34;  } } } } } Dropper Dropper is a special sink, meaning that all logs are dropped without any exception. This is useful when you want to drop debugging logs.\nfilter { // ... parser  sink { if (parsed.level == \u0026#34;DEBUG\u0026#34;) { dropper {} } else { sampler { // ... configs  } } } } Or if you have multiple filters, some of which are for extracting metrics, only one of them has to be persisted.\nfilter { // filter A: this is for persistence  // ... parser  sink { sampler { // .. sampler configs  } } } filter { // filter B:  // ... extractors to generate many metrics  extractors { metrics { // ... metrics  } } sink { dropper {} // drop all logs because they have been saved in \u0026#34;filter A\u0026#34; above.  } } Enforcer Enforcer is another special sink that forcibly samples the log. A typical use case of enforcer is when you have configured a sampler and want to save some logs forcibly, such as to save error logs even if the sampling mechanism has been configured.\nfilter { // ... parser  sink { sampler { // ... sampler configs  } if (parsed.level == \u0026#34;ERROR\u0026#34; || parsed.userId == \u0026#34;TestingUserId\u0026#34;) { // sample error logs or testing users\u0026#39; logs (userId == \u0026#34;TestingUserId\u0026#34;) even if the sampling strategy is configured  enforcer { } } } } ","title":"Log Analysis Language","url":"/docs/main/v9.6.0/en/concepts-and-designs/lal/"},{"content":"Log Analysis Language Log Analysis Language (LAL) in SkyWalking is essentially a Domain-Specific Language (DSL) to analyze logs. You can use LAL to parse, extract, and save the logs, as well as collaborate the logs with traces (by extracting the trace ID, segment ID and span ID) and metrics (by generating metrics from the logs and sending them to the meter system).\nThe LAL config files are in YAML format, and are located under directory lal. You can set log-analyzer/default/lalFiles in the application.yml file or set environment variable SW_LOG_LAL_FILES to activate specific LAL config files.\nLayer Layer should be declared in the LAL script to represent the analysis scope of the logs.\nFilter A filter is a group of parser, extractor and sink. Users can use one or more filters to organize their processing logic. Every piece of log will be sent to all filters in an LAL rule. A piece of log sent to the filter is available as property log in the LAL, therefore you can access the log service name via log.service. For all available fields of log, please refer to the protocol definition.\nAll components are executed sequentially in the orders they are declared.\nGlobal Functions Globally available functions may be used them in all components (i.e. parsers, extractors, and sinks) where necessary.\n abort  By default, all components declared are executed no matter what flags (dropped, saved, etc.) have been set. There are cases where you may want the filter chain to stop earlier when specified conditions are met. abort function aborts the remaining filter chain from where it\u0026rsquo;s declared, and all the remaining components won\u0026rsquo;t be executed at all. abort function serves as a fast-fail mechanism in LAL.\nfilter { if (log.service == \u0026#34;TestingService\u0026#34;) { // Don\u0026#39;t waste resources on TestingServices  abort {} // all remaining components won\u0026#39;t be executed at all  } // ... parsers, extractors, sinks } Note that when you put regexp in an if statement, you need to surround the expression with () like regexp(\u0026lt;the expression\u0026gt;), instead of regexp \u0026lt;the expression\u0026gt;.\n tag  tag function provide a convenient way to get the value of a tag key.\nWe can add tags like following:\n[ { \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;TEST_KEY\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;TEST_VALUE\u0026#34; } ] }, \u0026#34;body\u0026#34;:{ ... } ... } ] And we can use this method to get the value of the tag key TEST_KEY.\nfilter { if (tag(\u0026#34;TEST_KEY\u0026#34;) == \u0026#34;TEST_VALUE\u0026#34;) { ... } } Parser Parsers are responsible for parsing the raw logs into structured data in SkyWalking for further processing. There are 3 types of parsers at the moment, namely json, yaml, and text.\nWhen a piece of log is parsed, there is a corresponding property available, called parsed, injected by LAL. Property parsed is typically a map, containing all the fields parsed from the raw logs. For example, if the parser is json / yaml, parsed is a map containing all the key-values in the json / yaml; if the parser is text , parsed is a map containing all the captured groups and their values (for regexp and grok).\nAll parsers share the following options:\n   Option Type Description Default Value     abortOnFailure boolean Whether the filter chain should abort if the parser failed to parse / match the logs true    See examples below.\njson filter { json { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  } } yaml filter { yaml { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  } } text For unstructured logs, there are some text parsers for use.\n regexp  regexp parser uses a regular expression (regexp) to parse the logs. It leverages the captured groups of the regexp, all the captured groups can be used later in the extractors or sinks. regexp returns a boolean indicating whether the log matches the pattern or not.\nfilter { text { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  // this is just a demo pattern  regexp \u0026#34;(?\u0026lt;timestamp\u0026gt;\\\\d{8}) (?\u0026lt;thread\u0026gt;\\\\w+) (?\u0026lt;level\u0026gt;\\\\w+) (?\u0026lt;traceId\u0026gt;\\\\w+) (?\u0026lt;msg\u0026gt;.+)\u0026#34; } extractor { tag level: parsed.level // we add a tag called `level` and its value is parsed.level, captured from the regexp above  traceId parsed.traceId // we also extract the trace id from the parsed result, which will be used to associate the log with the trace  } // ... }  grok (TODO)  We\u0026rsquo;re aware of certain performance issues in the grok Java library, and so we\u0026rsquo;re currently conducting investigations and benchmarking. Contributions are welcome.\nExtractor Extractors aim to extract metadata from the logs. The metadata can be a service name, a service instance name, an endpoint name, or even a trace ID, all of which can be associated with the existing traces and metrics.\n service  service extracts the service name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n instance  instance extracts the service instance name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n endpoint  endpoint extracts the service instance name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n traceId  traceId extracts the trace ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n segmentId  segmentId extracts the segment ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n spanId  spanId extracts the span ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n timestamp  timestamp extracts the timestamp from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\nThe parameter of timestamp can be a millisecond:\nfilter { // ... parser  extractor { timestamp parsed.time as String } } or a datetime string with a specified pattern:\nfilter { // ... parser  extractor { timestamp parsed.time as String, \u0026#34;yyyy-MM-dd HH:mm:ss\u0026#34; } }  layer  layer extracts the layer from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with service.\n tag  tag extracts the tags from the parsed result, and set them into the LogData. The form of this extractor should look something like this: tag key1: value, key2: value2. You may use the properties of parsed as both keys and values.\nimport javax.swing.text.LayeredHighlighter filter { // ... parser  extractor { tag level: parsed.level, (parsed.statusCode): parsed.statusMsg tag anotherKey: \u0026#34;anotherConstantValue\u0026#34; layer \u0026#39;GENERAL\u0026#39; } }  metrics  metrics extracts / generates metrics from the logs, and sends the generated metrics to the meter system. You may configure MAL for further analysis of these metrics. The dedicated MAL config files are under directory log-mal-rules, and you can set log-analyzer/default/malFiles to enable configured files.\n# application.yml# ...log-analyzer:selector:${SW_LOG_ANALYZER:default}default:lalFiles:${SW_LOG_LAL_FILES:my-lal-config}# files are under \u0026#34;lal\u0026#34; directorymalFiles:${SW_LOG_MAL_FILES:my-lal-mal-config, folder1/another-lal-mal-config, folder2/*}# files are under \u0026#34;log-mal-rules\u0026#34; directoryExamples are as follows:\nfilter { // ...  extractor { service parsed.serviceName metrics { name \u0026#34;log_count\u0026#34; timestamp parsed.timestamp labels level: parsed.level, service: parsed.service, instance: parsed.instance value 1 } metrics { name \u0026#34;http_response_time\u0026#34; timestamp parsed.timestamp labels status_code: parsed.statusCode, service: parsed.service, instance: parsed.instance value parsed.duration } } // ... } The extractor above generates a metrics named log_count, with tag key level and value 1. After that, you can configure MAL rules to calculate the log count grouping by logging level like this:\n# ... other configurations of MALmetrics:- name:log_count_debugexp:log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;DEBUG\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT1M\u0026#39;)- name:log_count_errorexp:log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;ERROR\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT1M\u0026#39;)The other metrics generated is http_response_time, so you can configure MAL rules to generate more useful metrics like percentiles.\n# ... other configurations of MALmetrics:- name:response_time_percentileexp:http_response_time.sum([\u0026#39;le\u0026#39;, \u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT5M\u0026#39;).histogram().histogram_percentile([50,70,90,99]) slowSql  slowSql aims to convert LogData to DatabaseSlowStatement. It extracts data from parsed result and save them as DatabaseSlowStatement. SlowSql will not abort or edit logs, you can use other LAL for further processing. SlowSql will reuse service, layer and timestamp of extractor, so it is necessary to use SlowSQL after setting these. We require a log tag \u0026quot;LOG_KIND\u0026quot; = \u0026quot;SLOW_SQL\u0026quot; to make OAP distinguish slow SQL logs from other log reports.\nNote, slow SQL sampling would only flag this SQL in the candidate list. The OAP server would run statistic per service and only persistent the top 50 every 10(controlled by topNReportPeriod: ${SW_CORE_TOPN_REPORT_PERIOD:10}) minutes by default.\nAn example of JSON sent to OAP is as following:\n[ { \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;LOG_KIND\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;SLOW_SQL\u0026#34; } ] }, \u0026#34;layer\u0026#34;:\u0026#34;MYSQL\u0026#34;, \u0026#34;body\u0026#34;:{ \u0026#34;json\u0026#34;:{ \u0026#34;json\u0026#34;:\u0026#34;{\\\u0026#34;time\\\u0026#34;:\\\u0026#34;1663063011\\\u0026#34;,\\\u0026#34;id\\\u0026#34;:\\\u0026#34;cb92c1a5b-2691e-fb2f-457a-9c72a392d9ed\\\u0026#34;,\\\u0026#34;service\\\u0026#34;:\\\u0026#34;root[root]@[localhost]\\\u0026#34;,\\\u0026#34;statement\\\u0026#34;:\\\u0026#34;select sleep(2);\\\u0026#34;,\\\u0026#34;layer\\\u0026#34;:\\\u0026#34;MYSQL\\\u0026#34;,\\\u0026#34;query_time\\\u0026#34;:2000}\u0026#34; } }, \u0026#34;service\u0026#34;:\u0026#34;root[root]@[localhost]\u0026#34; } ]  statement  statement extracts the SQL statement from the parsed result, and set it into the DatabaseSlowStatement, which will be persisted (if not dropped) and is used to associate with TopNDatabaseStatement.\n latency  latency extracts the latency from the parsed result, and set it into the DatabaseSlowStatement, which will be persisted (if not dropped) and is used to associate with TopNDatabaseStatement.\n id  id extracts the id from the parsed result, and set it into the DatabaseSlowStatement, which will be persisted (if not dropped) and is used to associate with TopNDatabaseStatement.\nA Example of LAL to distinguish slow logs:\nfilter { json{ } extractor{ layer parsed.layer as String service parsed.service as String timestamp parsed.time as String if (tag(\u0026#34;LOG_KIND\u0026#34;) == \u0026#34;SLOW_SQL\u0026#34;) { slowSql { id parsed.id as String statement parsed.statement as String latency parsed.query_time as Long } } } }  sampledTrace  sampledTrace aims to convert LogData to SampledTrace Records. It extracts data from parsed result and save them as SampledTraceRecord. SampledTrace will not abort or edit logs, you can use other LAL for further processing. We require a log tag \u0026quot;LOG_KIND\u0026quot; = \u0026quot;NET_PROFILING_SAMPLED_TRACE\u0026quot; to make OAP distinguish slow trace logs from other log reports. An example of JSON sent to OAP is as following:\n[ { \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;LOG_KIND\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;NET_PROFILING_SAMPLED_TRACE\u0026#34; } ] }, \u0026#34;layer\u0026#34;:\u0026#34;MESH\u0026#34;, \u0026#34;body\u0026#34;:{ \u0026#34;json\u0026#34;:{ \u0026#34;json\u0026#34;:\u0026#34;{\\\u0026#34;uri\\\u0026#34;:\\\u0026#34;/provider\\\u0026#34;,\\\u0026#34;reason\\\u0026#34;:\\\u0026#34;slow\\\u0026#34;,\\\u0026#34;latency\\\u0026#34;:2048,\\\u0026#34;client_process\\\u0026#34;:{\\\u0026#34;process_id\\\u0026#34;:\\\u0026#34;c1519f4555ec11eda8df0242ac1d0002\\\u0026#34;,\\\u0026#34;local\\\u0026#34;:false,\\\u0026#34;address\\\u0026#34;:\\\u0026#34;\\\u0026#34;},\\\u0026#34;server_process\\\u0026#34;:{\\\u0026#34;process_id\\\u0026#34;:\\\u0026#34;\\\u0026#34;,\\\u0026#34;local\\\u0026#34;:false,\\\u0026#34;address\\\u0026#34;:\\\u0026#34;172.31.0.3:443\\\u0026#34;},\\\u0026#34;detect_point\\\u0026#34;:\\\u0026#34;client\\\u0026#34;,\\\u0026#34;component\\\u0026#34;:\\\u0026#34;http\\\u0026#34;,\\\u0026#34;ssl\\\u0026#34;:true}\u0026#34; } }, \u0026#34;service\u0026#34;:\u0026#34;test-service\u0026#34;, \u0026#34;serviceInstance\u0026#34;:\u0026#34;test-service-instance\u0026#34;, \u0026#34;timestamp\u0026#34;: 1666916962406, } ] Examples are as follows:\nfilter { json { } if (tag(\u0026#34;LOG_KIND\u0026#34;) == \u0026#34;NET_PROFILING_SAMPLED_TRACE\u0026#34;) { sampledTrace { latency parsed.latency as Long uri parsed.uri as String reason parsed.reason as String if (parsed.client_process.process_id as String != \u0026#34;\u0026#34;) { processId parsed.client_process.process_id as String } else if (parsed.client_process.local as Boolean) { processId ProcessRegistry.generateVirtualLocalProcess(parsed.service as String, parsed.serviceInstance as String) as String } else { processId ProcessRegistry.generateVirtualRemoteProcess(parsed.service as String, parsed.serviceInstance as String, parsed.client_process.address as String) as String } if (parsed.server_process.process_id as String != \u0026#34;\u0026#34;) { destProcessId parsed.server_process.process_id as String } else if (parsed.server_process.local as Boolean) { destProcessId ProcessRegistry.generateVirtualLocalProcess(parsed.service as String, parsed.serviceInstance as String) as String } else { destProcessId ProcessRegistry.generateVirtualRemoteProcess(parsed.service as String, parsed.serviceInstance as String, parsed.server_process.address as String) as String } detectPoint parsed.detect_point as String if (parsed.component as String == \u0026#34;http\u0026#34; \u0026amp;\u0026amp; parsed.ssl as Boolean) { componentId 129 } else if (parsed.component as String == \u0026#34;http\u0026#34;) { componentId 49 } else if (parsed.ssl as Boolean) { componentId 130 } else { componentId 110 } } } } Sink Sinks are the persistent layer of the LAL. By default, all the logs of each filter are persisted into the storage. However, some mechanisms allow you to selectively save some logs, or even drop all the logs after you\u0026rsquo;ve extracted useful information, such as metrics.\nSampler Sampler allows you to save the logs in a sampling manner. Currently, the following sampling strategies are supported:\n rateLimit: samples n logs at a maximum rate of 1 minute. rateLimit(\u0026quot;SamplerID\u0026quot;) requires an ID for the sampler. Sampler declarations with the same ID share the same sampler instance, thus sharing the same rpm and resetting logic. possibility: every piece of log has a pseudo possibility of percentage to be sampled, the possibility was generated by Java random number generator and compare to the given percentage option.  We welcome contributions on more sampling strategies. If multiple samplers are specified, the last one determines the final sampling result. See examples in Enforcer.\nExamples 1, rateLimit:\nfilter { // ... parser  sink { sampler { if (parsed.service == \u0026#34;ImportantApp\u0026#34;) { rateLimit(\u0026#34;ImportantAppSampler\u0026#34;) { rpm 1800 // samples 1800 pieces of logs every minute for service \u0026#34;ImportantApp\u0026#34;  } } else { rateLimit(\u0026#34;OtherSampler\u0026#34;) { rpm 180 // samples 180 pieces of logs every minute for other services than \u0026#34;ImportantApp\u0026#34;  } } } } } Examples 2, possibility:\nfilter { // ... parser  sink { sampler { if (parsed.service == \u0026#34;ImportantApp\u0026#34;) { possibility(80) { // samples 80% of the logs for service \u0026#34;ImportantApp\u0026#34;  } } else { possibility(30) { // samples 30% of the logs for other services than \u0026#34;ImportantApp\u0026#34;  } } } } } Dropper Dropper is a special sink, meaning that all logs are dropped without any exception. This is useful when you want to drop debugging logs.\nfilter { // ... parser  sink { if (parsed.level == \u0026#34;DEBUG\u0026#34;) { dropper {} } else { sampler { // ... configs  } } } } Or if you have multiple filters, some of which are for extracting metrics, only one of them has to be persisted.\nfilter { // filter A: this is for persistence  // ... parser  sink { sampler { // .. sampler configs  } } } filter { // filter B:  // ... extractors to generate many metrics  extractors { metrics { // ... metrics  } } sink { dropper {} // drop all logs because they have been saved in \u0026#34;filter A\u0026#34; above.  } } Enforcer Enforcer is another special sink that forcibly samples the log. A typical use case of enforcer is when you have configured a sampler and want to save some logs forcibly, such as to save error logs even if the sampling mechanism has been configured.\nfilter { // ... parser  sink { sampler { // ... sampler configs  } if (parsed.level == \u0026#34;ERROR\u0026#34; || parsed.userId == \u0026#34;TestingUserId\u0026#34;) { // sample error logs or testing users\u0026#39; logs (userId == \u0026#34;TestingUserId\u0026#34;) even if the sampling strategy is configured  enforcer { } } } } ","title":"Log Analysis Language","url":"/docs/main/v9.7.0/en/concepts-and-designs/lal/"},{"content":"Log Collection and Analysis Collection There are various ways to collect logs from applications.\nLog files collector You can use Filebeat, Fluentd and FluentBit to collect logs, and then transport the logs to SkyWalking OAP through Kafka or HTTP protocol, with the formats Kafka JSON or HTTP JSON array.\nFilebeat Filebeat supports using Kafka to transport logs. Open kafka-fetcher and enable configs enableNativeJsonLog.\nTake the following filebeat config yaml as an example to set up Filebeat:\n filebeat.yml  Fluentd Fluentd supports using Kafka to transport logs. Open kafka-fetcher and enable configs enableNativeJsonLog.\nTake the following fluentd config file as an example to set up Fluentd:\n fluentd.conf  Fluent-bit Fluent-bit sends logs to OAP directly through HTTP(rest port). Point the output address to restHost:restPort of receiver-sharing-server or core(if receiver-sharing-server is inactivated)\nTake the following fluent-bit config files as an example to set up Fluent-bit:\n fluent-bit.conf  OpenTelemetry You can use OpenTelemetry Collector to transport the logs to SkyWalking OAP. Read the doc on Skywalking Exporter for a detailed guide.\nJava agent\u0026rsquo;s toolkits Java agent provides toolkits for log4j, log4j2, and logback to report logs through gRPC with automatically injected trace context.\nSkyWalking Satellite sidecar is a recommended proxy/side that forwards logs (including the use of Kafka MQ to transport logs). When using this, open kafka-fetcher and enable configs enableNativeProtoLog.\nJava agent provides toolkits for log4j, log4j2, and logback to report logs through files with automatically injected trace context.\nLog framework config examples:\n log4j1.x fileAppender log4j2.x fileAppender logback fileAppender  Python agent log reporter SkyWalking Python Agent implements a log reporter for the logging module with functionalities aligning with the Java toolkits.\nTo explore how to enable the reporting features for your use cases, please refer to the Log Reporter Doc for a detailed guide.\nLog Analyzer Log analyzer of OAP server supports native log data. OAP could use Log Analysis Language to structure log content through parsing, extracting, and saving logs. The analyzer also uses Meter Analysis Language Engine for further metrics calculation.\nlog-analyzer:selector:${SW_LOG_ANALYZER:default}default:lalFiles:${SW_LOG_LAL_FILES:default}malFiles:${SW_LOG_MAL_FILES:\u0026#34;\u0026#34;}Read the doc on Log Analysis Language for more on log structuring and metrics analysis.\n","title":"Log Collection and Analysis","url":"/docs/main/v9.0.0/en/setup/backend/log-analyzer/"},{"content":"Log Collection and Analysis Collection There are various ways to collect logs from applications.\nLog files collector You can use Filebeat, Fluentd and FluentBit to collect logs, and then transport the logs to SkyWalking OAP through Kafka or HTTP protocol, with the formats Kafka JSON or HTTP JSON array.\nFilebeat Filebeat supports using Kafka to transport logs. Open kafka-fetcher and enable configs enableNativeJsonLog.\nTake the following Filebeat config YAML as an example to set up Filebeat:\n filebeat.yml  Fluentd Fluentd supports using Kafka to transport logs. Open kafka-fetcher and enable configs enableNativeJsonLog.\nTake the following fluentd config file as an example to set up Fluentd:\n fluentd.conf  Fluent-bit Fluent-bit sends logs to OAP directly through HTTP(rest port). Point the output address to restHost:restPort of receiver-sharing-server or core(if receiver-sharing-server is inactivated)\nTake the following fluent-bit config files as an example to set up Fluent-bit:\n fluent-bit.conf  OpenTelemetry You can use OpenTelemetry Collector to transport the logs to SkyWalking OAP. Read the doc on Skywalking Exporter for a detailed guide.\nJava agent\u0026rsquo;s toolkits Java agent provides toolkits for log4j, log4j2, and logback to report logs through gRPC with automatically injected trace context.\nSkyWalking Satellite sidecar is a recommended proxy/side that forwards logs (including the use of Kafka MQ to transport logs). When using this, open kafka-fetcher and enable configs enableNativeProtoLog.\nJava agent provides toolkits for log4j, log4j2, and logback to report logs through files with automatically injected trace context.\nLog framework config examples:\n log4j1.x fileAppender log4j2.x fileAppender logback fileAppender  Python agent log reporter SkyWalking Python Agent implements a log reporter for the logging module with functionalities aligning with the Java toolkits.\nTo explore how to enable the reporting features for your use cases, please refer to the Log Reporter Doc for a detailed guide.\nLog Analyzer Log analyzer of OAP server supports native log data. OAP could use Log Analysis Language to structure log content through parsing, extracting and saving logs. The analyzer also uses Meter Analysis Language Engine for further metrics calculation.\nlog-analyzer:selector:${SW_LOG_ANALYZER:default}default:lalFiles:${SW_LOG_LAL_FILES:default}malFiles:${SW_LOG_MAL_FILES:\u0026#34;\u0026#34;}Read the doc on Log Analysis Language for more on log structuring and metrics analysis.\n","title":"Log Collection and Analysis","url":"/docs/main/v9.1.0/en/setup/backend/log-analyzer/"},{"content":"Log Collection and Analysis Collection There are various ways to collect logs from applications.\nLog files collector You can use Filebeat, Fluentd and FluentBit to collect logs, and then transport the logs to SkyWalking OAP through Kafka or HTTP protocol, with the formats Kafka JSON or HTTP JSON array.\nFilebeat Filebeat supports using Kafka to transport logs. Open kafka-fetcher and enable configs enableNativeJsonLog.\nTake the following Filebeat config YAML as an example to set up Filebeat:\n filebeat.yml  Fluentd Fluentd supports using Kafka to transport logs. Open kafka-fetcher and enable configs enableNativeJsonLog.\nTake the following fluentd config file as an example to set up Fluentd:\n fluentd.conf  Fluent-bit Fluent-bit sends logs to OAP directly through HTTP(rest port). Point the output address to restHost:restPort of receiver-sharing-server or core(if receiver-sharing-server is inactivated)\nTake the following fluent-bit config files as an example to set up Fluent-bit:\n fluent-bit.conf  OpenTelemetry You can use OpenTelemetry Collector to transport the logs to SkyWalking OAP. Read the doc on Skywalking Exporter for a detailed guide.\nJava agent\u0026rsquo;s toolkits Java agent provides toolkits for log4j, log4j2, and logback to report logs through gRPC with automatically injected trace context.\nSkyWalking Satellite sidecar is a recommended proxy/side that forwards logs (including the use of Kafka MQ to transport logs). When using this, open kafka-fetcher and enable configs enableNativeProtoLog.\nJava agent provides toolkits for log4j, log4j2, and logback to report logs through files with automatically injected trace context.\nLog framework config examples:\n log4j1.x fileAppender log4j2.x fileAppender logback fileAppender  Python agent log reporter SkyWalking Python Agent implements a log reporter for the logging module with functionalities aligning with the Java toolkits.\nTo explore how to enable the reporting features for your use cases, please refer to the Log Reporter Doc for a detailed guide.\nLog Analyzer Log analyzer of OAP server supports native log data. OAP could use Log Analysis Language to structure log content through parsing, extracting and saving logs. The analyzer also uses Meter Analysis Language Engine for further metrics calculation.\nlog-analyzer:selector:${SW_LOG_ANALYZER:default}default:lalFiles:${SW_LOG_LAL_FILES:default}malFiles:${SW_LOG_MAL_FILES:\u0026#34;\u0026#34;}Read the doc on Log Analysis Language for more on log structuring and metrics analysis.\n","title":"Log Collection and Analysis","url":"/docs/main/v9.2.0/en/setup/backend/log-analyzer/"},{"content":"Log Collection and Analysis Collection There are various ways to collect logs from applications.\nLog files collector You can use Filebeat, Fluentd and FluentBit to collect logs, and then transport the logs to SkyWalking OAP through Kafka or HTTP protocol, with the formats Kafka JSON or HTTP JSON array.\nFilebeat Filebeat supports using Kafka to transport logs. Open kafka-fetcher and enable configs enableNativeJsonLog.\nTake the following Filebeat config YAML as an example to set up Filebeat:\n filebeat.yml  Fluentd Fluentd supports using Kafka to transport logs. Open kafka-fetcher and enable configs enableNativeJsonLog.\nTake the following fluentd config file as an example to set up Fluentd:\n fluentd.conf  Fluent-bit Fluent-bit sends logs to OAP directly through HTTP(rest port). Point the output address to restHost:restPort of receiver-sharing-server or core(if receiver-sharing-server is inactivated)\nTake the following fluent-bit config files as an example to set up Fluent-bit:\n fluent-bit.conf  OpenTelemetry You can use OpenTelemetry Collector to transport the logs to SkyWalking OAP. Read the doc on Skywalking Exporter for a detailed guide.\nJava agent\u0026rsquo;s toolkits Java agent provides toolkits for log4j, log4j2, and logback to report logs through gRPC with automatically injected trace context.\nSkyWalking Satellite sidecar is a recommended proxy/side that forwards logs (including the use of Kafka MQ to transport logs). When using this, open kafka-fetcher and enable configs enableNativeProtoLog.\nJava agent provides toolkits for log4j, log4j2, and logback to report logs through files with automatically injected trace context.\nLog framework config examples:\n log4j1.x fileAppender log4j2.x fileAppender logback fileAppender  Python agent log reporter SkyWalking Python Agent implements a log reporter for the logging module with functionalities aligning with the Java toolkits.\nTo explore how to enable the reporting features for your use cases, please refer to the Log Reporter Doc for a detailed guide.\nLog Analyzer Log analyzer of OAP server supports native log data. OAP could use Log Analysis Language to structure log content through parsing, extracting and saving logs. The analyzer also uses Meter Analysis Language Engine for further metrics calculation.\nlog-analyzer:selector:${SW_LOG_ANALYZER:default}default:lalFiles:${SW_LOG_LAL_FILES:default}malFiles:${SW_LOG_MAL_FILES:\u0026#34;\u0026#34;}Read the doc on Log Analysis Language for more on log structuring and metrics analysis.\n","title":"Log Collection and Analysis","url":"/docs/main/v9.3.0/en/setup/backend/log-analyzer/"},{"content":"Log Collection and Analysis Collection There are various ways to collect logs from applications.\nLog files collector You can use Filebeat, Fluentd and FluentBit to collect logs, and then transport the logs to SkyWalking OAP through Kafka or HTTP protocol, with the formats Kafka JSON or HTTP JSON array.\nFilebeat Filebeat supports using Kafka to transport logs. Open kafka-fetcher and enable configs enableNativeJsonLog.\nTake the following Filebeat config YAML as an example to set up Filebeat:\n filebeat.yml  Fluentd Fluentd supports using Kafka to transport logs. Open kafka-fetcher and enable configs enableNativeJsonLog.\nTake the following fluentd config file as an example to set up Fluentd:\n fluentd.conf  Fluent-bit Fluent-bit sends logs to OAP directly through HTTP(rest port). Point the output address to restHost:restPort of receiver-sharing-server or core(if receiver-sharing-server is inactivated)\nTake the following fluent-bit config files as an example to set up Fluent-bit:\n fluent-bit.conf  OpenTelemetry You can use OpenTelemetry Collector to transport the logs to SkyWalking OAP. Read the doc on Skywalking Exporter for a detailed guide.\nJava agent\u0026rsquo;s toolkits Java agent provides toolkits for log4j, log4j2, and logback to report logs through gRPC with automatically injected trace context.\nSkyWalking Satellite sidecar is a recommended proxy/side that forwards logs (including the use of Kafka MQ to transport logs). When using this, open kafka-fetcher and enable configs enableNativeProtoLog.\nJava agent provides toolkits for log4j, log4j2, and logback to report logs through files with automatically injected trace context.\nLog framework config examples:\n log4j1.x fileAppender log4j2.x fileAppender logback fileAppender  Python agent log reporter SkyWalking Python Agent implements a log reporter for the logging module with functionalities aligning with the Java toolkits.\nTo explore how to enable the reporting features for your use cases, please refer to the Log Reporter Doc for a detailed guide.\nLog Analyzer Log analyzer of OAP server supports native log data. OAP could use Log Analysis Language to structure log content through parsing, extracting and saving logs. The analyzer also uses Meter Analysis Language Engine for further metrics calculation.\nlog-analyzer:selector:${SW_LOG_ANALYZER:default}default:lalFiles:${SW_LOG_LAL_FILES:default}malFiles:${SW_LOG_MAL_FILES:\u0026#34;\u0026#34;}Read the doc on Log Analysis Language for more on log structuring and metrics analysis.\n","title":"Log Collection and Analysis","url":"/docs/main/v9.4.0/en/setup/backend/log-analyzer/"},{"content":"Log Data Protocol Report log data via protocol.\nNative Proto Protocol Report native-proto format log via gRPC.\ngRPC service define\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.logging.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/logging/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Report collected logs into the OAP backend service LogReportService { // Recommend to report log data in a stream mode.  // The service/instance/endpoint of the log could share the previous value if they are not set.  // Reporting the logs of same service in the batch mode could reduce the network cost.  rpc collect (stream LogData) returns (Commands) { }}// Log data is collected through file scratcher of agent. // Natively, Satellite provides various ways to collect logs. message LogData { // [Optional] The timestamp of the log, in millisecond.  // If not set, OAP server would use the received timestamp as log\u0026#39;s timestamp, or relies on the OAP server analyzer.  int64 timestamp = 1; // [Required] **Service**. Represents a set/group of workloads which provide the same behaviours for incoming requests.  //  // The logic name represents the service. This would show as a separate node in the topology.  // The metrics analyzed from the spans, would be aggregated for this entity as the service level.  //  // If this is not the first element of the streaming, use the previous not-null name as the service name.  string service = 2; // [Optional] **Service Instance**. Each individual workload in the Service group is known as an instance. Like `pods` in Kubernetes, it  // doesn\u0026#39;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process.  //  // The logic name represents the service instance. This would show as a separate node in the instance relationship.  // The metrics analyzed from the spans, would be aggregated for this entity as the service instance level.  string serviceInstance = 3; // [Optional] **Endpoint**. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature.  //  // The logic name represents the endpoint, which logs belong.  string endpoint = 4; // [Required] The content of the log.  LogDataBody body = 5; // [Optional] Logs with trace context  TraceContext traceContext = 6; // [Optional] The available tags. OAP server could provide search/analysis capabilities based on these.  LogTags tags = 7; // [Optional] Since 9.0.0  // The layer of the service and servce instance. If absent, the OAP would set `layer`=`ID: 2, NAME: general`  string layer = 8;}// The content of the log data message LogDataBody { // A type to match analyzer(s) at the OAP server.  // The data could be analyzed at the client side, but could be partial  string type = 1; // Content with extendable format.  oneof content { TextLog text = 2; JSONLog json = 3; YAMLLog yaml = 4; }}// Literal text log, typically requires regex or split mechanism to filter meaningful info. message TextLog { string text = 1;}// JSON formatted log. The json field represents the string that could be formatted as a JSON object. message JSONLog { string json = 1;}// YAML formatted log. The yaml field represents the string that could be formatted as a YAML map. message YAMLLog { string yaml = 1;}// Logs with trace context, represent agent system has injects context(IDs) into log text. message TraceContext { // [Optional] A string id represents the whole trace.  string traceId = 1; // [Optional] A unique id represents this segment. Other segments could use this id to reference as a child segment.  string traceSegmentId = 2; // [Optional] The number id of the span. Should be unique in the whole segment.  // Starting at 0.  int32 spanId = 3;}message LogTags { // String key, String value pair.  repeated KeyStringValuePair data = 1;}Native Kafka Protocol Report native-json format log via kafka.\nJson log record example:\n{ \u0026#34;timestamp\u0026#34;:1618161813371, \u0026#34;service\u0026#34;:\u0026#34;Your_ApplicationName\u0026#34;, \u0026#34;serviceInstance\u0026#34;:\u0026#34;3a5b8da5a5ba40c0b192e91b5c80f1a8@192.168.1.8\u0026#34;, \u0026#34;layer\u0026#34;:\u0026#34;GENERAL\u0026#34;, \u0026#34;traceContext\u0026#34;:{ \u0026#34;traceId\u0026#34;:\u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470001\u0026#34;, \u0026#34;spanId\u0026#34;:\u0026#34;0\u0026#34;, \u0026#34;traceSegmentId\u0026#34;:\u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470000\u0026#34; }, \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;INFO\u0026#34; }, { \u0026#34;key\u0026#34;:\u0026#34;logger\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;com.example.MyLogger\u0026#34; } ] }, \u0026#34;body\u0026#34;:{ \u0026#34;text\u0026#34;:{ \u0026#34;text\u0026#34;:\u0026#34;log message\u0026#34; } } } HTTP API Report json format logs via HTTP API, the endpoint is http://\u0026lt;oap-address\u0026gt;:12800/v3/logs.\nJson log record example:\n[ { \u0026#34;timestamp\u0026#34;: 1618161813371, \u0026#34;service\u0026#34;: \u0026#34;Your_ApplicationName\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;3a5b8da5a5ba40c0b192e91b5c80f1a8@192.168.1.8\u0026#34;, \u0026#34;layer\u0026#34;:\u0026#34;GENERAL\u0026#34;, \u0026#34;traceContext\u0026#34;: { \u0026#34;traceId\u0026#34;: \u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470001\u0026#34;, \u0026#34;spanId\u0026#34;: \u0026#34;0\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470000\u0026#34; }, \u0026#34;tags\u0026#34;: { \u0026#34;data\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;INFO\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;logger\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;com.example.MyLogger\u0026#34; } ] }, \u0026#34;body\u0026#34;: { \u0026#34;text\u0026#34;: { \u0026#34;text\u0026#34;: \u0026#34;log message\u0026#34; } } } ] ","title":"Log Data Protocol","url":"/docs/main/latest/en/api/log-data-protocol/"},{"content":"Log Data Protocol Report log data via protocol.\nNative Proto Protocol Report native-proto format log via gRPC.\ngRPC service define\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.logging.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/logging/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Report collected logs into the OAP backend service LogReportService { // Recommend to report log data in a stream mode.  // The service/instance/endpoint of the log could share the previous value if they are not set.  // Reporting the logs of same service in the batch mode could reduce the network cost.  rpc collect (stream LogData) returns (Commands) { }}// Log data is collected through file scratcher of agent. // Natively, Satellite provides various ways to collect logs. message LogData { // [Optional] The timestamp of the log, in millisecond.  // If not set, OAP server would use the received timestamp as log\u0026#39;s timestamp, or relies on the OAP server analyzer.  int64 timestamp = 1; // [Required] **Service**. Represents a set/group of workloads which provide the same behaviours for incoming requests.  //  // The logic name represents the service. This would show as a separate node in the topology.  // The metrics analyzed from the spans, would be aggregated for this entity as the service level.  //  // If this is not the first element of the streaming, use the previous not-null name as the service name.  string service = 2; // [Optional] **Service Instance**. Each individual workload in the Service group is known as an instance. Like `pods` in Kubernetes, it  // doesn\u0026#39;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process.  //  // The logic name represents the service instance. This would show as a separate node in the instance relationship.  // The metrics analyzed from the spans, would be aggregated for this entity as the service instance level.  string serviceInstance = 3; // [Optional] **Endpoint**. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature.  //  // The logic name represents the endpoint, which logs belong.  string endpoint = 4; // [Required] The content of the log.  LogDataBody body = 5; // [Optional] Logs with trace context  TraceContext traceContext = 6; // [Optional] The available tags. OAP server could provide search/analysis capabilities based on these.  LogTags tags = 7; // [Optional] Since 9.0.0  // The layer of the service and servce instance. If absent, the OAP would set `layer`=`ID: 2, NAME: general`  string layer = 8;}// The content of the log data message LogDataBody { // A type to match analyzer(s) at the OAP server.  // The data could be analyzed at the client side, but could be partial  string type = 1; // Content with extendable format.  oneof content { TextLog text = 2; JSONLog json = 3; YAMLLog yaml = 4; }}// Literal text log, typically requires regex or split mechanism to filter meaningful info. message TextLog { string text = 1;}// JSON formatted log. The json field represents the string that could be formatted as a JSON object. message JSONLog { string json = 1;}// YAML formatted log. The yaml field represents the string that could be formatted as a YAML map. message YAMLLog { string yaml = 1;}// Logs with trace context, represent agent system has injects context(IDs) into log text. message TraceContext { // [Optional] A string id represents the whole trace.  string traceId = 1; // [Optional] A unique id represents this segment. Other segments could use this id to reference as a child segment.  string traceSegmentId = 2; // [Optional] The number id of the span. Should be unique in the whole segment.  // Starting at 0.  int32 spanId = 3;}message LogTags { // String key, String value pair.  repeated KeyStringValuePair data = 1;}Native Kafka Protocol Report native-json format log via kafka.\nJson log record example:\n{ \u0026#34;timestamp\u0026#34;:1618161813371, \u0026#34;service\u0026#34;:\u0026#34;Your_ApplicationName\u0026#34;, \u0026#34;serviceInstance\u0026#34;:\u0026#34;3a5b8da5a5ba40c0b192e91b5c80f1a8@192.168.1.8\u0026#34;, \u0026#34;layer\u0026#34;:\u0026#34;GENERAL\u0026#34;, \u0026#34;traceContext\u0026#34;:{ \u0026#34;traceId\u0026#34;:\u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470001\u0026#34;, \u0026#34;spanId\u0026#34;:\u0026#34;0\u0026#34;, \u0026#34;traceSegmentId\u0026#34;:\u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470000\u0026#34; }, \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;INFO\u0026#34; }, { \u0026#34;key\u0026#34;:\u0026#34;logger\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;com.example.MyLogger\u0026#34; } ] }, \u0026#34;body\u0026#34;:{ \u0026#34;text\u0026#34;:{ \u0026#34;text\u0026#34;:\u0026#34;log message\u0026#34; } } } HTTP API Report json format logs via HTTP API, the endpoint is http://\u0026lt;oap-address\u0026gt;:12800/v3/logs.\nJson log record example:\n[ { \u0026#34;timestamp\u0026#34;: 1618161813371, \u0026#34;service\u0026#34;: \u0026#34;Your_ApplicationName\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;3a5b8da5a5ba40c0b192e91b5c80f1a8@192.168.1.8\u0026#34;, \u0026#34;layer\u0026#34;:\u0026#34;GENERAL\u0026#34;, \u0026#34;traceContext\u0026#34;: { \u0026#34;traceId\u0026#34;: \u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470001\u0026#34;, \u0026#34;spanId\u0026#34;: \u0026#34;0\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470000\u0026#34; }, \u0026#34;tags\u0026#34;: { \u0026#34;data\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;INFO\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;logger\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;com.example.MyLogger\u0026#34; } ] }, \u0026#34;body\u0026#34;: { \u0026#34;text\u0026#34;: { \u0026#34;text\u0026#34;: \u0026#34;log message\u0026#34; } } } ] ","title":"Log Data Protocol","url":"/docs/main/next/en/api/log-data-protocol/"},{"content":"Log Data Protocol Report log data via protocol.\nNative Proto Protocol Report native-proto format log via gRPC.\ngRPC service define\nNative Kafka Protocol Report native-json format log via kafka.\nJson log record example:\n{ \u0026#34;timestamp\u0026#34;:1618161813371, \u0026#34;service\u0026#34;:\u0026#34;Your_ApplicationName\u0026#34;, \u0026#34;serviceInstance\u0026#34;:\u0026#34;3a5b8da5a5ba40c0b192e91b5c80f1a8@192.168.1.8\u0026#34;, \u0026#34;layer\u0026#34;:\u0026#34;GENERAL\u0026#34;, \u0026#34;traceContext\u0026#34;:{ \u0026#34;traceId\u0026#34;:\u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470001\u0026#34;, \u0026#34;spanId\u0026#34;:\u0026#34;0\u0026#34;, \u0026#34;traceSegmentId\u0026#34;:\u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470000\u0026#34; }, \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;INFO\u0026#34; }, { \u0026#34;key\u0026#34;:\u0026#34;logger\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;com.example.MyLogger\u0026#34; } ] }, \u0026#34;body\u0026#34;:{ \u0026#34;text\u0026#34;:{ \u0026#34;text\u0026#34;:\u0026#34;log message\u0026#34; } } } HTTP API Report json format logs via HTTP API, the endpoint is http://\u0026lt;oap-address\u0026gt;:12800/v3/logs.\nJson log record example:\n[ { \u0026#34;timestamp\u0026#34;: 1618161813371, \u0026#34;service\u0026#34;: \u0026#34;Your_ApplicationName\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;3a5b8da5a5ba40c0b192e91b5c80f1a8@192.168.1.8\u0026#34;, \u0026#34;layer\u0026#34;:\u0026#34;GENERAL\u0026#34;, \u0026#34;traceContext\u0026#34;: { \u0026#34;traceId\u0026#34;: \u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470001\u0026#34;, \u0026#34;spanId\u0026#34;: \u0026#34;0\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470000\u0026#34; }, \u0026#34;tags\u0026#34;: { \u0026#34;data\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;INFO\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;logger\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;com.example.MyLogger\u0026#34; } ] }, \u0026#34;body\u0026#34;: { \u0026#34;text\u0026#34;: { \u0026#34;text\u0026#34;: \u0026#34;log message\u0026#34; } } } ] ","title":"Log Data Protocol","url":"/docs/main/v9.0.0/en/protocols/log-data-protocol/"},{"content":"Log Data Protocol Report log data via protocol.\nNative Proto Protocol Report native-proto format log via gRPC.\ngRPC service define\nNative Kafka Protocol Report native-json format log via kafka.\nJson log record example:\n{ \u0026#34;timestamp\u0026#34;:1618161813371, \u0026#34;service\u0026#34;:\u0026#34;Your_ApplicationName\u0026#34;, \u0026#34;serviceInstance\u0026#34;:\u0026#34;3a5b8da5a5ba40c0b192e91b5c80f1a8@192.168.1.8\u0026#34;, \u0026#34;layer\u0026#34;:\u0026#34;GENERAL\u0026#34;, \u0026#34;traceContext\u0026#34;:{ \u0026#34;traceId\u0026#34;:\u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470001\u0026#34;, \u0026#34;spanId\u0026#34;:\u0026#34;0\u0026#34;, \u0026#34;traceSegmentId\u0026#34;:\u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470000\u0026#34; }, \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;INFO\u0026#34; }, { \u0026#34;key\u0026#34;:\u0026#34;logger\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;com.example.MyLogger\u0026#34; } ] }, \u0026#34;body\u0026#34;:{ \u0026#34;text\u0026#34;:{ \u0026#34;text\u0026#34;:\u0026#34;log message\u0026#34; } } } HTTP API Report json format logs via HTTP API, the endpoint is http://\u0026lt;oap-address\u0026gt;:12800/v3/logs.\nJson log record example:\n[ { \u0026#34;timestamp\u0026#34;: 1618161813371, \u0026#34;service\u0026#34;: \u0026#34;Your_ApplicationName\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;3a5b8da5a5ba40c0b192e91b5c80f1a8@192.168.1.8\u0026#34;, \u0026#34;layer\u0026#34;:\u0026#34;GENERAL\u0026#34;, \u0026#34;traceContext\u0026#34;: { \u0026#34;traceId\u0026#34;: \u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470001\u0026#34;, \u0026#34;spanId\u0026#34;: \u0026#34;0\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470000\u0026#34; }, \u0026#34;tags\u0026#34;: { \u0026#34;data\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;INFO\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;logger\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;com.example.MyLogger\u0026#34; } ] }, \u0026#34;body\u0026#34;: { \u0026#34;text\u0026#34;: { \u0026#34;text\u0026#34;: \u0026#34;log message\u0026#34; } } } ] ","title":"Log Data Protocol","url":"/docs/main/v9.1.0/en/protocols/log-data-protocol/"},{"content":"Log Data Protocol Report log data via protocol.\nNative Proto Protocol Report native-proto format log via gRPC.\ngRPC service define\nNative Kafka Protocol Report native-json format log via kafka.\nJson log record example:\n{ \u0026#34;timestamp\u0026#34;:1618161813371, \u0026#34;service\u0026#34;:\u0026#34;Your_ApplicationName\u0026#34;, \u0026#34;serviceInstance\u0026#34;:\u0026#34;3a5b8da5a5ba40c0b192e91b5c80f1a8@192.168.1.8\u0026#34;, \u0026#34;layer\u0026#34;:\u0026#34;GENERAL\u0026#34;, \u0026#34;traceContext\u0026#34;:{ \u0026#34;traceId\u0026#34;:\u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470001\u0026#34;, \u0026#34;spanId\u0026#34;:\u0026#34;0\u0026#34;, \u0026#34;traceSegmentId\u0026#34;:\u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470000\u0026#34; }, \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;INFO\u0026#34; }, { \u0026#34;key\u0026#34;:\u0026#34;logger\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;com.example.MyLogger\u0026#34; } ] }, \u0026#34;body\u0026#34;:{ \u0026#34;text\u0026#34;:{ \u0026#34;text\u0026#34;:\u0026#34;log message\u0026#34; } } } HTTP API Report json format logs via HTTP API, the endpoint is http://\u0026lt;oap-address\u0026gt;:12800/v3/logs.\nJson log record example:\n[ { \u0026#34;timestamp\u0026#34;: 1618161813371, \u0026#34;service\u0026#34;: \u0026#34;Your_ApplicationName\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;3a5b8da5a5ba40c0b192e91b5c80f1a8@192.168.1.8\u0026#34;, \u0026#34;layer\u0026#34;:\u0026#34;GENERAL\u0026#34;, \u0026#34;traceContext\u0026#34;: { \u0026#34;traceId\u0026#34;: \u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470001\u0026#34;, \u0026#34;spanId\u0026#34;: \u0026#34;0\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470000\u0026#34; }, \u0026#34;tags\u0026#34;: { \u0026#34;data\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;INFO\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;logger\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;com.example.MyLogger\u0026#34; } ] }, \u0026#34;body\u0026#34;: { \u0026#34;text\u0026#34;: { \u0026#34;text\u0026#34;: \u0026#34;log message\u0026#34; } } } ] ","title":"Log Data Protocol","url":"/docs/main/v9.2.0/en/protocols/log-data-protocol/"},{"content":"Log Data Protocol Report log data via protocol.\nNative Proto Protocol Report native-proto format log via gRPC.\ngRPC service define\nNative Kafka Protocol Report native-json format log via kafka.\nJson log record example:\n{ \u0026#34;timestamp\u0026#34;:1618161813371, \u0026#34;service\u0026#34;:\u0026#34;Your_ApplicationName\u0026#34;, \u0026#34;serviceInstance\u0026#34;:\u0026#34;3a5b8da5a5ba40c0b192e91b5c80f1a8@192.168.1.8\u0026#34;, \u0026#34;layer\u0026#34;:\u0026#34;GENERAL\u0026#34;, \u0026#34;traceContext\u0026#34;:{ \u0026#34;traceId\u0026#34;:\u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470001\u0026#34;, \u0026#34;spanId\u0026#34;:\u0026#34;0\u0026#34;, \u0026#34;traceSegmentId\u0026#34;:\u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470000\u0026#34; }, \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;INFO\u0026#34; }, { \u0026#34;key\u0026#34;:\u0026#34;logger\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;com.example.MyLogger\u0026#34; } ] }, \u0026#34;body\u0026#34;:{ \u0026#34;text\u0026#34;:{ \u0026#34;text\u0026#34;:\u0026#34;log message\u0026#34; } } } HTTP API Report json format logs via HTTP API, the endpoint is http://\u0026lt;oap-address\u0026gt;:12800/v3/logs.\nJson log record example:\n[ { \u0026#34;timestamp\u0026#34;: 1618161813371, \u0026#34;service\u0026#34;: \u0026#34;Your_ApplicationName\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;3a5b8da5a5ba40c0b192e91b5c80f1a8@192.168.1.8\u0026#34;, \u0026#34;layer\u0026#34;:\u0026#34;GENERAL\u0026#34;, \u0026#34;traceContext\u0026#34;: { \u0026#34;traceId\u0026#34;: \u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470001\u0026#34;, \u0026#34;spanId\u0026#34;: \u0026#34;0\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470000\u0026#34; }, \u0026#34;tags\u0026#34;: { \u0026#34;data\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;INFO\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;logger\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;com.example.MyLogger\u0026#34; } ] }, \u0026#34;body\u0026#34;: { \u0026#34;text\u0026#34;: { \u0026#34;text\u0026#34;: \u0026#34;log message\u0026#34; } } } ] ","title":"Log Data Protocol","url":"/docs/main/v9.3.0/en/protocols/log-data-protocol/"},{"content":"Log Data Protocol Report log data via protocol.\nNative Proto Protocol Report native-proto format log via gRPC.\ngRPC service define\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.logging.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/logging/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Report collected logs into the OAP backend service LogReportService { // Recommend to report log data in a stream mode.  // The service/instance/endpoint of the log could share the previous value if they are not set.  // Reporting the logs of same service in the batch mode could reduce the network cost.  rpc collect (stream LogData) returns (Commands) { }}// Log data is collected through file scratcher of agent. // Natively, Satellite provides various ways to collect logs. message LogData { // [Optional] The timestamp of the log, in millisecond.  // If not set, OAP server would use the received timestamp as log\u0026#39;s timestamp, or relies on the OAP server analyzer.  int64 timestamp = 1; // [Required] **Service**. Represents a set/group of workloads which provide the same behaviours for incoming requests.  //  // The logic name represents the service. This would show as a separate node in the topology.  // The metrics analyzed from the spans, would be aggregated for this entity as the service level.  //  // If this is not the first element of the streaming, use the previous not-null name as the service name.  string service = 2; // [Optional] **Service Instance**. Each individual workload in the Service group is known as an instance. Like `pods` in Kubernetes, it  // doesn\u0026#39;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process.  //  // The logic name represents the service instance. This would show as a separate node in the instance relationship.  // The metrics analyzed from the spans, would be aggregated for this entity as the service instance level.  string serviceInstance = 3; // [Optional] **Endpoint**. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature.  //  // The logic name represents the endpoint, which logs belong.  string endpoint = 4; // [Required] The content of the log.  LogDataBody body = 5; // [Optional] Logs with trace context  TraceContext traceContext = 6; // [Optional] The available tags. OAP server could provide search/analysis capabilities based on these.  LogTags tags = 7; // [Optional] Since 9.0.0  // The layer of the service and servce instance. If absent, the OAP would set `layer`=`ID: 2, NAME: general`  string layer = 8;}// The content of the log data message LogDataBody { // A type to match analyzer(s) at the OAP server.  // The data could be analyzed at the client side, but could be partial  string type = 1; // Content with extendable format.  oneof content { TextLog text = 2; JSONLog json = 3; YAMLLog yaml = 4; }}// Literal text log, typically requires regex or split mechanism to filter meaningful info. message TextLog { string text = 1;}// JSON formatted log. The json field represents the string that could be formatted as a JSON object. message JSONLog { string json = 1;}// YAML formatted log. The yaml field represents the string that could be formatted as a YAML map. message YAMLLog { string yaml = 1;}// Logs with trace context, represent agent system has injects context(IDs) into log text. message TraceContext { // [Optional] A string id represents the whole trace.  string traceId = 1; // [Optional] A unique id represents this segment. Other segments could use this id to reference as a child segment.  string traceSegmentId = 2; // [Optional] The number id of the span. Should be unique in the whole segment.  // Starting at 0.  int32 spanId = 3;}message LogTags { // String key, String value pair.  repeated KeyStringValuePair data = 1;}Native Kafka Protocol Report native-json format log via kafka.\nJson log record example:\n{ \u0026#34;timestamp\u0026#34;:1618161813371, \u0026#34;service\u0026#34;:\u0026#34;Your_ApplicationName\u0026#34;, \u0026#34;serviceInstance\u0026#34;:\u0026#34;3a5b8da5a5ba40c0b192e91b5c80f1a8@192.168.1.8\u0026#34;, \u0026#34;layer\u0026#34;:\u0026#34;GENERAL\u0026#34;, \u0026#34;traceContext\u0026#34;:{ \u0026#34;traceId\u0026#34;:\u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470001\u0026#34;, \u0026#34;spanId\u0026#34;:\u0026#34;0\u0026#34;, \u0026#34;traceSegmentId\u0026#34;:\u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470000\u0026#34; }, \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;INFO\u0026#34; }, { \u0026#34;key\u0026#34;:\u0026#34;logger\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;com.example.MyLogger\u0026#34; } ] }, \u0026#34;body\u0026#34;:{ \u0026#34;text\u0026#34;:{ \u0026#34;text\u0026#34;:\u0026#34;log message\u0026#34; } } } HTTP API Report json format logs via HTTP API, the endpoint is http://\u0026lt;oap-address\u0026gt;:12800/v3/logs.\nJson log record example:\n[ { \u0026#34;timestamp\u0026#34;: 1618161813371, \u0026#34;service\u0026#34;: \u0026#34;Your_ApplicationName\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;3a5b8da5a5ba40c0b192e91b5c80f1a8@192.168.1.8\u0026#34;, \u0026#34;layer\u0026#34;:\u0026#34;GENERAL\u0026#34;, \u0026#34;traceContext\u0026#34;: { \u0026#34;traceId\u0026#34;: \u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470001\u0026#34;, \u0026#34;spanId\u0026#34;: \u0026#34;0\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470000\u0026#34; }, \u0026#34;tags\u0026#34;: { \u0026#34;data\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;INFO\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;logger\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;com.example.MyLogger\u0026#34; } ] }, \u0026#34;body\u0026#34;: { \u0026#34;text\u0026#34;: { \u0026#34;text\u0026#34;: \u0026#34;log message\u0026#34; } } } ] ","title":"Log Data Protocol","url":"/docs/main/v9.4.0/en/api/log-data-protocol/"},{"content":"Log Data Protocol Report log data via protocol.\nNative Proto Protocol Report native-proto format log via gRPC.\ngRPC service define\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.logging.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/logging/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Report collected logs into the OAP backend service LogReportService { // Recommend to report log data in a stream mode.  // The service/instance/endpoint of the log could share the previous value if they are not set.  // Reporting the logs of same service in the batch mode could reduce the network cost.  rpc collect (stream LogData) returns (Commands) { }}// Log data is collected through file scratcher of agent. // Natively, Satellite provides various ways to collect logs. message LogData { // [Optional] The timestamp of the log, in millisecond.  // If not set, OAP server would use the received timestamp as log\u0026#39;s timestamp, or relies on the OAP server analyzer.  int64 timestamp = 1; // [Required] **Service**. Represents a set/group of workloads which provide the same behaviours for incoming requests.  //  // The logic name represents the service. This would show as a separate node in the topology.  // The metrics analyzed from the spans, would be aggregated for this entity as the service level.  //  // If this is not the first element of the streaming, use the previous not-null name as the service name.  string service = 2; // [Optional] **Service Instance**. Each individual workload in the Service group is known as an instance. Like `pods` in Kubernetes, it  // doesn\u0026#39;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process.  //  // The logic name represents the service instance. This would show as a separate node in the instance relationship.  // The metrics analyzed from the spans, would be aggregated for this entity as the service instance level.  string serviceInstance = 3; // [Optional] **Endpoint**. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature.  //  // The logic name represents the endpoint, which logs belong.  string endpoint = 4; // [Required] The content of the log.  LogDataBody body = 5; // [Optional] Logs with trace context  TraceContext traceContext = 6; // [Optional] The available tags. OAP server could provide search/analysis capabilities based on these.  LogTags tags = 7; // [Optional] Since 9.0.0  // The layer of the service and servce instance. If absent, the OAP would set `layer`=`ID: 2, NAME: general`  string layer = 8;}// The content of the log data message LogDataBody { // A type to match analyzer(s) at the OAP server.  // The data could be analyzed at the client side, but could be partial  string type = 1; // Content with extendable format.  oneof content { TextLog text = 2; JSONLog json = 3; YAMLLog yaml = 4; }}// Literal text log, typically requires regex or split mechanism to filter meaningful info. message TextLog { string text = 1;}// JSON formatted log. The json field represents the string that could be formatted as a JSON object. message JSONLog { string json = 1;}// YAML formatted log. The yaml field represents the string that could be formatted as a YAML map. message YAMLLog { string yaml = 1;}// Logs with trace context, represent agent system has injects context(IDs) into log text. message TraceContext { // [Optional] A string id represents the whole trace.  string traceId = 1; // [Optional] A unique id represents this segment. Other segments could use this id to reference as a child segment.  string traceSegmentId = 2; // [Optional] The number id of the span. Should be unique in the whole segment.  // Starting at 0.  int32 spanId = 3;}message LogTags { // String key, String value pair.  repeated KeyStringValuePair data = 1;}Native Kafka Protocol Report native-json format log via kafka.\nJson log record example:\n{ \u0026#34;timestamp\u0026#34;:1618161813371, \u0026#34;service\u0026#34;:\u0026#34;Your_ApplicationName\u0026#34;, \u0026#34;serviceInstance\u0026#34;:\u0026#34;3a5b8da5a5ba40c0b192e91b5c80f1a8@192.168.1.8\u0026#34;, \u0026#34;layer\u0026#34;:\u0026#34;GENERAL\u0026#34;, \u0026#34;traceContext\u0026#34;:{ \u0026#34;traceId\u0026#34;:\u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470001\u0026#34;, \u0026#34;spanId\u0026#34;:\u0026#34;0\u0026#34;, \u0026#34;traceSegmentId\u0026#34;:\u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470000\u0026#34; }, \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;INFO\u0026#34; }, { \u0026#34;key\u0026#34;:\u0026#34;logger\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;com.example.MyLogger\u0026#34; } ] }, \u0026#34;body\u0026#34;:{ \u0026#34;text\u0026#34;:{ \u0026#34;text\u0026#34;:\u0026#34;log message\u0026#34; } } } HTTP API Report json format logs via HTTP API, the endpoint is http://\u0026lt;oap-address\u0026gt;:12800/v3/logs.\nJson log record example:\n[ { \u0026#34;timestamp\u0026#34;: 1618161813371, \u0026#34;service\u0026#34;: \u0026#34;Your_ApplicationName\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;3a5b8da5a5ba40c0b192e91b5c80f1a8@192.168.1.8\u0026#34;, \u0026#34;layer\u0026#34;:\u0026#34;GENERAL\u0026#34;, \u0026#34;traceContext\u0026#34;: { \u0026#34;traceId\u0026#34;: \u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470001\u0026#34;, \u0026#34;spanId\u0026#34;: \u0026#34;0\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470000\u0026#34; }, \u0026#34;tags\u0026#34;: { \u0026#34;data\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;INFO\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;logger\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;com.example.MyLogger\u0026#34; } ] }, \u0026#34;body\u0026#34;: { \u0026#34;text\u0026#34;: { \u0026#34;text\u0026#34;: \u0026#34;log message\u0026#34; } } } ] ","title":"Log Data Protocol","url":"/docs/main/v9.5.0/en/api/log-data-protocol/"},{"content":"Log Data Protocol Report log data via protocol.\nNative Proto Protocol Report native-proto format log via gRPC.\ngRPC service define\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.logging.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/logging/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Report collected logs into the OAP backend service LogReportService { // Recommend to report log data in a stream mode.  // The service/instance/endpoint of the log could share the previous value if they are not set.  // Reporting the logs of same service in the batch mode could reduce the network cost.  rpc collect (stream LogData) returns (Commands) { }}// Log data is collected through file scratcher of agent. // Natively, Satellite provides various ways to collect logs. message LogData { // [Optional] The timestamp of the log, in millisecond.  // If not set, OAP server would use the received timestamp as log\u0026#39;s timestamp, or relies on the OAP server analyzer.  int64 timestamp = 1; // [Required] **Service**. Represents a set/group of workloads which provide the same behaviours for incoming requests.  //  // The logic name represents the service. This would show as a separate node in the topology.  // The metrics analyzed from the spans, would be aggregated for this entity as the service level.  //  // If this is not the first element of the streaming, use the previous not-null name as the service name.  string service = 2; // [Optional] **Service Instance**. Each individual workload in the Service group is known as an instance. Like `pods` in Kubernetes, it  // doesn\u0026#39;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process.  //  // The logic name represents the service instance. This would show as a separate node in the instance relationship.  // The metrics analyzed from the spans, would be aggregated for this entity as the service instance level.  string serviceInstance = 3; // [Optional] **Endpoint**. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature.  //  // The logic name represents the endpoint, which logs belong.  string endpoint = 4; // [Required] The content of the log.  LogDataBody body = 5; // [Optional] Logs with trace context  TraceContext traceContext = 6; // [Optional] The available tags. OAP server could provide search/analysis capabilities based on these.  LogTags tags = 7; // [Optional] Since 9.0.0  // The layer of the service and servce instance. If absent, the OAP would set `layer`=`ID: 2, NAME: general`  string layer = 8;}// The content of the log data message LogDataBody { // A type to match analyzer(s) at the OAP server.  // The data could be analyzed at the client side, but could be partial  string type = 1; // Content with extendable format.  oneof content { TextLog text = 2; JSONLog json = 3; YAMLLog yaml = 4; }}// Literal text log, typically requires regex or split mechanism to filter meaningful info. message TextLog { string text = 1;}// JSON formatted log. The json field represents the string that could be formatted as a JSON object. message JSONLog { string json = 1;}// YAML formatted log. The yaml field represents the string that could be formatted as a YAML map. message YAMLLog { string yaml = 1;}// Logs with trace context, represent agent system has injects context(IDs) into log text. message TraceContext { // [Optional] A string id represents the whole trace.  string traceId = 1; // [Optional] A unique id represents this segment. Other segments could use this id to reference as a child segment.  string traceSegmentId = 2; // [Optional] The number id of the span. Should be unique in the whole segment.  // Starting at 0.  int32 spanId = 3;}message LogTags { // String key, String value pair.  repeated KeyStringValuePair data = 1;}Native Kafka Protocol Report native-json format log via kafka.\nJson log record example:\n{ \u0026#34;timestamp\u0026#34;:1618161813371, \u0026#34;service\u0026#34;:\u0026#34;Your_ApplicationName\u0026#34;, \u0026#34;serviceInstance\u0026#34;:\u0026#34;3a5b8da5a5ba40c0b192e91b5c80f1a8@192.168.1.8\u0026#34;, \u0026#34;layer\u0026#34;:\u0026#34;GENERAL\u0026#34;, \u0026#34;traceContext\u0026#34;:{ \u0026#34;traceId\u0026#34;:\u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470001\u0026#34;, \u0026#34;spanId\u0026#34;:\u0026#34;0\u0026#34;, \u0026#34;traceSegmentId\u0026#34;:\u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470000\u0026#34; }, \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;INFO\u0026#34; }, { \u0026#34;key\u0026#34;:\u0026#34;logger\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;com.example.MyLogger\u0026#34; } ] }, \u0026#34;body\u0026#34;:{ \u0026#34;text\u0026#34;:{ \u0026#34;text\u0026#34;:\u0026#34;log message\u0026#34; } } } HTTP API Report json format logs via HTTP API, the endpoint is http://\u0026lt;oap-address\u0026gt;:12800/v3/logs.\nJson log record example:\n[ { \u0026#34;timestamp\u0026#34;: 1618161813371, \u0026#34;service\u0026#34;: \u0026#34;Your_ApplicationName\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;3a5b8da5a5ba40c0b192e91b5c80f1a8@192.168.1.8\u0026#34;, \u0026#34;layer\u0026#34;:\u0026#34;GENERAL\u0026#34;, \u0026#34;traceContext\u0026#34;: { \u0026#34;traceId\u0026#34;: \u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470001\u0026#34;, \u0026#34;spanId\u0026#34;: \u0026#34;0\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470000\u0026#34; }, \u0026#34;tags\u0026#34;: { \u0026#34;data\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;INFO\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;logger\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;com.example.MyLogger\u0026#34; } ] }, \u0026#34;body\u0026#34;: { \u0026#34;text\u0026#34;: { \u0026#34;text\u0026#34;: \u0026#34;log message\u0026#34; } } } ] ","title":"Log Data Protocol","url":"/docs/main/v9.6.0/en/api/log-data-protocol/"},{"content":"Log Data Protocol Report log data via protocol.\nNative Proto Protocol Report native-proto format log via gRPC.\ngRPC service define\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.logging.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/logging/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Report collected logs into the OAP backend service LogReportService { // Recommend to report log data in a stream mode.  // The service/instance/endpoint of the log could share the previous value if they are not set.  // Reporting the logs of same service in the batch mode could reduce the network cost.  rpc collect (stream LogData) returns (Commands) { }}// Log data is collected through file scratcher of agent. // Natively, Satellite provides various ways to collect logs. message LogData { // [Optional] The timestamp of the log, in millisecond.  // If not set, OAP server would use the received timestamp as log\u0026#39;s timestamp, or relies on the OAP server analyzer.  int64 timestamp = 1; // [Required] **Service**. Represents a set/group of workloads which provide the same behaviours for incoming requests.  //  // The logic name represents the service. This would show as a separate node in the topology.  // The metrics analyzed from the spans, would be aggregated for this entity as the service level.  //  // If this is not the first element of the streaming, use the previous not-null name as the service name.  string service = 2; // [Optional] **Service Instance**. Each individual workload in the Service group is known as an instance. Like `pods` in Kubernetes, it  // doesn\u0026#39;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process.  //  // The logic name represents the service instance. This would show as a separate node in the instance relationship.  // The metrics analyzed from the spans, would be aggregated for this entity as the service instance level.  string serviceInstance = 3; // [Optional] **Endpoint**. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature.  //  // The logic name represents the endpoint, which logs belong.  string endpoint = 4; // [Required] The content of the log.  LogDataBody body = 5; // [Optional] Logs with trace context  TraceContext traceContext = 6; // [Optional] The available tags. OAP server could provide search/analysis capabilities based on these.  LogTags tags = 7; // [Optional] Since 9.0.0  // The layer of the service and servce instance. If absent, the OAP would set `layer`=`ID: 2, NAME: general`  string layer = 8;}// The content of the log data message LogDataBody { // A type to match analyzer(s) at the OAP server.  // The data could be analyzed at the client side, but could be partial  string type = 1; // Content with extendable format.  oneof content { TextLog text = 2; JSONLog json = 3; YAMLLog yaml = 4; }}// Literal text log, typically requires regex or split mechanism to filter meaningful info. message TextLog { string text = 1;}// JSON formatted log. The json field represents the string that could be formatted as a JSON object. message JSONLog { string json = 1;}// YAML formatted log. The yaml field represents the string that could be formatted as a YAML map. message YAMLLog { string yaml = 1;}// Logs with trace context, represent agent system has injects context(IDs) into log text. message TraceContext { // [Optional] A string id represents the whole trace.  string traceId = 1; // [Optional] A unique id represents this segment. Other segments could use this id to reference as a child segment.  string traceSegmentId = 2; // [Optional] The number id of the span. Should be unique in the whole segment.  // Starting at 0.  int32 spanId = 3;}message LogTags { // String key, String value pair.  repeated KeyStringValuePair data = 1;}Native Kafka Protocol Report native-json format log via kafka.\nJson log record example:\n{ \u0026#34;timestamp\u0026#34;:1618161813371, \u0026#34;service\u0026#34;:\u0026#34;Your_ApplicationName\u0026#34;, \u0026#34;serviceInstance\u0026#34;:\u0026#34;3a5b8da5a5ba40c0b192e91b5c80f1a8@192.168.1.8\u0026#34;, \u0026#34;layer\u0026#34;:\u0026#34;GENERAL\u0026#34;, \u0026#34;traceContext\u0026#34;:{ \u0026#34;traceId\u0026#34;:\u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470001\u0026#34;, \u0026#34;spanId\u0026#34;:\u0026#34;0\u0026#34;, \u0026#34;traceSegmentId\u0026#34;:\u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470000\u0026#34; }, \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;INFO\u0026#34; }, { \u0026#34;key\u0026#34;:\u0026#34;logger\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;com.example.MyLogger\u0026#34; } ] }, \u0026#34;body\u0026#34;:{ \u0026#34;text\u0026#34;:{ \u0026#34;text\u0026#34;:\u0026#34;log message\u0026#34; } } } HTTP API Report json format logs via HTTP API, the endpoint is http://\u0026lt;oap-address\u0026gt;:12800/v3/logs.\nJson log record example:\n[ { \u0026#34;timestamp\u0026#34;: 1618161813371, \u0026#34;service\u0026#34;: \u0026#34;Your_ApplicationName\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;3a5b8da5a5ba40c0b192e91b5c80f1a8@192.168.1.8\u0026#34;, \u0026#34;layer\u0026#34;:\u0026#34;GENERAL\u0026#34;, \u0026#34;traceContext\u0026#34;: { \u0026#34;traceId\u0026#34;: \u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470001\u0026#34;, \u0026#34;spanId\u0026#34;: \u0026#34;0\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470000\u0026#34; }, \u0026#34;tags\u0026#34;: { \u0026#34;data\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;INFO\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;logger\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;com.example.MyLogger\u0026#34; } ] }, \u0026#34;body\u0026#34;: { \u0026#34;text\u0026#34;: { \u0026#34;text\u0026#34;: \u0026#34;log message\u0026#34; } } } ] ","title":"Log Data Protocol","url":"/docs/main/v9.7.0/en/api/log-data-protocol/"},{"content":"logback plugin  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-logback-1.x\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Print trace ID in your logs  set %tid in Pattern section of logback.xml  \u0026lt;appender name=\u0026#34;STDOUT\u0026#34; class=\u0026#34;ch.qos.logback.core.ConsoleAppender\u0026#34;\u0026gt; \u0026lt;encoder class=\u0026#34;ch.qos.logback.core.encoder.LayoutWrappingEncoder\u0026#34;\u0026gt; \u0026lt;layout class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.TraceIdPatternLogbackLayout\u0026#34;\u0026gt; \u0026lt;Pattern\u0026gt;%d{yyyy-MM-dd HH:mm:ss.SSS} [%tid] [%thread] %-5level %logger{36} -%msg%n\u0026lt;/Pattern\u0026gt; \u0026lt;/layout\u0026gt; \u0026lt;/encoder\u0026gt; \u0026lt;/appender\u0026gt;  with the MDC, set %X{tid} in Pattern section of logback.xml  \u0026lt;appender name=\u0026#34;STDOUT\u0026#34; class=\u0026#34;ch.qos.logback.core.ConsoleAppender\u0026#34;\u0026gt; \u0026lt;encoder class=\u0026#34;ch.qos.logback.core.encoder.LayoutWrappingEncoder\u0026#34;\u0026gt; \u0026lt;layout class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.mdc.TraceIdMDCPatternLogbackLayout\u0026#34;\u0026gt; \u0026lt;Pattern\u0026gt;%d{yyyy-MM-dd HH:mm:ss.SSS} [%X{tid}] [%thread] %-5level %logger{36} -%msg%n\u0026lt;/Pattern\u0026gt; \u0026lt;/layout\u0026gt; \u0026lt;/encoder\u0026gt; \u0026lt;/appender\u0026gt;  Support logback AsyncAppender(MDC also support), No additional configuration is required. Refer to the demo of logback.xml below. For details: Logback AsyncAppender  \u0026lt;configuration scan=\u0026#34;true\u0026#34; scanPeriod=\u0026#34; 5 seconds\u0026#34;\u0026gt; \u0026lt;appender name=\u0026#34;STDOUT\u0026#34; class=\u0026#34;ch.qos.logback.core.ConsoleAppender\u0026#34;\u0026gt; \u0026lt;encoder class=\u0026#34;ch.qos.logback.core.encoder.LayoutWrappingEncoder\u0026#34;\u0026gt; \u0026lt;layout class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.mdc.TraceIdMDCPatternLogbackLayout\u0026#34;\u0026gt; \u0026lt;Pattern\u0026gt;%d{yyyy-MM-dd HH:mm:ss.SSS} [%X{tid}] [%thread] %-5level %logger{36} -%msg%n\u0026lt;/Pattern\u0026gt; \u0026lt;/layout\u0026gt; \u0026lt;/encoder\u0026gt; \u0026lt;/appender\u0026gt; \u0026lt;appender name=\u0026#34;ASYNC\u0026#34; class=\u0026#34;ch.qos.logback.classic.AsyncAppender\u0026#34;\u0026gt; \u0026lt;discardingThreshold\u0026gt;0\u0026lt;/discardingThreshold\u0026gt; \u0026lt;queueSize\u0026gt;1024\u0026lt;/queueSize\u0026gt; \u0026lt;neverBlock\u0026gt;true\u0026lt;/neverBlock\u0026gt; \u0026lt;appender-ref ref=\u0026#34;STDOUT\u0026#34;/\u0026gt; \u0026lt;/appender\u0026gt; \u0026lt;root level=\u0026#34;INFO\u0026#34;\u0026gt; \u0026lt;appender-ref ref=\u0026#34;ASYNC\u0026#34;/\u0026gt; \u0026lt;/root\u0026gt; \u0026lt;/configuration\u0026gt;  When you use -javaagent to active the SkyWalking tracer, logback will output traceId, if it existed. If the tracer is inactive, the output will be TID: N/A.  Print SkyWalking context in your logs   Your only need to replace pattern %tid or %X{tid]} with %sw_ctx or %X{sw_ctx}.\n  When you use -javaagent to active the SkyWalking tracer, logback will output SW_CTX: [$serviceName,$instanceName,$traceId,$traceSegmentId,$spanId], if it existed. If the tracer is inactive, the output will be SW_CTX: N/A.\n  logstash logback plugin  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-logback-1.x\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  set LogstashEncoder of logback.xml  \u0026lt;encoder charset=\u0026#34;UTF-8\u0026#34; class=\u0026#34;net.logstash.logback.encoder.LogstashEncoder\u0026#34;\u0026gt; \u0026lt;!-- add TID(traceId) field --\u0026gt; \u0026lt;provider class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.logstash.TraceIdJsonProvider\u0026#34;\u0026gt; \u0026lt;/provider\u0026gt; \u0026lt;!-- add SW_CTX(SkyWalking context) field --\u0026gt; \u0026lt;provider class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.logstash.SkyWalkingContextJsonProvider\u0026#34;\u0026gt; \u0026lt;/provider\u0026gt; \u0026lt;/encoder\u0026gt;  set LoggingEventCompositeJsonEncoder of logstash in logback-spring.xml for custom json format  1.add converter for %tid or %sw_ctx as child of  node\n\u0026lt;!-- add converter for %tid --\u0026gt; \u0026lt;conversionRule conversionWord=\u0026#34;tid\u0026#34; converterClass=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.LogbackPatternConverter\u0026#34;/\u0026gt; \u0026lt;!-- add converter for %sw_ctx --\u0026gt; \u0026lt;conversionRule conversionWord=\u0026#34;sw_ctx\u0026#34; converterClass=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.LogbackSkyWalkingContextPatternConverter\u0026#34;/\u0026gt; 2.add json encoder for custom json format\n\u0026lt;encoder class=\u0026#34;net.logstash.logback.encoder.LoggingEventCompositeJsonEncoder\u0026#34;\u0026gt; \u0026lt;providers\u0026gt; \u0026lt;timestamp\u0026gt; \u0026lt;timeZone\u0026gt;UTC\u0026lt;/timeZone\u0026gt; \u0026lt;/timestamp\u0026gt; \u0026lt;pattern\u0026gt; \u0026lt;pattern\u0026gt; { \u0026#34;level\u0026#34;: \u0026#34;%level\u0026#34;, \u0026#34;tid\u0026#34;: \u0026#34;%tid\u0026#34;, \u0026#34;skyWalkingContext\u0026#34;: \u0026#34;%sw_ctx\u0026#34;, \u0026#34;thread\u0026#34;: \u0026#34;%thread\u0026#34;, \u0026#34;class\u0026#34;: \u0026#34;%logger{1.}:%L\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;%message\u0026#34;, \u0026#34;stackTrace\u0026#34;: \u0026#34;%exception{10}\u0026#34; } \u0026lt;/pattern\u0026gt; \u0026lt;/pattern\u0026gt; \u0026lt;/providers\u0026gt; \u0026lt;/encoder\u0026gt; gRPC reporter The gRPC reporter could forward the collected logs to SkyWalking OAP server, or SkyWalking Satellite sidecar. Trace id, segment id, and span id will attach to logs automatically. There is no need to modify existing layouts.\n Add GRPCLogClientAppender in logback.xml  \u0026lt;appender name=\u0026#34;grpc-log\u0026#34; class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.log.GRPCLogClientAppender\u0026#34;\u0026gt; \u0026lt;encoder class=\u0026#34;ch.qos.logback.core.encoder.LayoutWrappingEncoder\u0026#34;\u0026gt; \u0026lt;layout class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.mdc.TraceIdMDCPatternLogbackLayout\u0026#34;\u0026gt; \u0026lt;Pattern\u0026gt;%d{yyyy-MM-dd HH:mm:ss.SSS} [%X{tid}] [%thread] %-5level %logger{36} -%msg%n\u0026lt;/Pattern\u0026gt; \u0026lt;/layout\u0026gt; \u0026lt;/encoder\u0026gt; \u0026lt;/appender\u0026gt;  Add config of the plugin or use default  log.max_message_size=${SW_GRPC_LOG_MAX_MESSAGE_SIZE:10485760} Transmitting un-formatted messages The logback 1.x gRPC reporter supports transmitting logs as formatted or un-formatted. Transmitting formatted data is the default but can be disabled by adding the following to the agent config:\nplugin.toolkit.log.transmit_formatted=false The above will result in the content field being used for the log pattern with additional log tags of argument.0, argument.1, and so on representing each logged argument as well as an additional exception tag which is only present if a throwable is also logged.\nFor example, the following code:\nlog.info(\u0026#34;{} {} {}\u0026#34;, 1, 2, 3); Will result in:\n{ \u0026#34;content\u0026#34;: \u0026#34;{} {} {}\u0026#34;, \u0026#34;tags\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;argument.0\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.1\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.2\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;3\u0026#34; } ] } ","title":"logback plugin","url":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/application-toolkit-logback-1.x/"},{"content":"logback plugin  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-logback-1.x\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Print trace ID in your logs  set %tid in Pattern section of logback.xml  \u0026lt;appender name=\u0026#34;STDOUT\u0026#34; class=\u0026#34;ch.qos.logback.core.ConsoleAppender\u0026#34;\u0026gt; \u0026lt;encoder class=\u0026#34;ch.qos.logback.core.encoder.LayoutWrappingEncoder\u0026#34;\u0026gt; \u0026lt;layout class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.TraceIdPatternLogbackLayout\u0026#34;\u0026gt; \u0026lt;Pattern\u0026gt;%d{yyyy-MM-dd HH:mm:ss.SSS} [%tid] [%thread] %-5level %logger{36} -%msg%n\u0026lt;/Pattern\u0026gt; \u0026lt;/layout\u0026gt; \u0026lt;/encoder\u0026gt; \u0026lt;/appender\u0026gt;  with the MDC, set %X{tid} in Pattern section of logback.xml  \u0026lt;appender name=\u0026#34;STDOUT\u0026#34; class=\u0026#34;ch.qos.logback.core.ConsoleAppender\u0026#34;\u0026gt; \u0026lt;encoder class=\u0026#34;ch.qos.logback.core.encoder.LayoutWrappingEncoder\u0026#34;\u0026gt; \u0026lt;layout class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.mdc.TraceIdMDCPatternLogbackLayout\u0026#34;\u0026gt; \u0026lt;Pattern\u0026gt;%d{yyyy-MM-dd HH:mm:ss.SSS} [%X{tid}] [%thread] %-5level %logger{36} -%msg%n\u0026lt;/Pattern\u0026gt; \u0026lt;/layout\u0026gt; \u0026lt;/encoder\u0026gt; \u0026lt;/appender\u0026gt;  Support logback AsyncAppender(MDC also support), No additional configuration is required. Refer to the demo of logback.xml below. For details: Logback AsyncAppender  \u0026lt;configuration scan=\u0026#34;true\u0026#34; scanPeriod=\u0026#34; 5 seconds\u0026#34;\u0026gt; \u0026lt;appender name=\u0026#34;STDOUT\u0026#34; class=\u0026#34;ch.qos.logback.core.ConsoleAppender\u0026#34;\u0026gt; \u0026lt;encoder class=\u0026#34;ch.qos.logback.core.encoder.LayoutWrappingEncoder\u0026#34;\u0026gt; \u0026lt;layout class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.mdc.TraceIdMDCPatternLogbackLayout\u0026#34;\u0026gt; \u0026lt;Pattern\u0026gt;%d{yyyy-MM-dd HH:mm:ss.SSS} [%X{tid}] [%thread] %-5level %logger{36} -%msg%n\u0026lt;/Pattern\u0026gt; \u0026lt;/layout\u0026gt; \u0026lt;/encoder\u0026gt; \u0026lt;/appender\u0026gt; \u0026lt;appender name=\u0026#34;ASYNC\u0026#34; class=\u0026#34;ch.qos.logback.classic.AsyncAppender\u0026#34;\u0026gt; \u0026lt;discardingThreshold\u0026gt;0\u0026lt;/discardingThreshold\u0026gt; \u0026lt;queueSize\u0026gt;1024\u0026lt;/queueSize\u0026gt; \u0026lt;neverBlock\u0026gt;true\u0026lt;/neverBlock\u0026gt; \u0026lt;appender-ref ref=\u0026#34;STDOUT\u0026#34;/\u0026gt; \u0026lt;/appender\u0026gt; \u0026lt;root level=\u0026#34;INFO\u0026#34;\u0026gt; \u0026lt;appender-ref ref=\u0026#34;ASYNC\u0026#34;/\u0026gt; \u0026lt;/root\u0026gt; \u0026lt;/configuration\u0026gt;  When you use -javaagent to active the SkyWalking tracer, logback will output traceId, if it existed. If the tracer is inactive, the output will be TID: N/A.  Print SkyWalking context in your logs   Your only need to replace pattern %tid or %X{tid]} with %sw_ctx or %X{sw_ctx}.\n  When you use -javaagent to active the SkyWalking tracer, logback will output SW_CTX: [$serviceName,$instanceName,$traceId,$traceSegmentId,$spanId], if it existed. If the tracer is inactive, the output will be SW_CTX: N/A.\n  logstash logback plugin  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-logback-1.x\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  set LogstashEncoder of logback.xml  \u0026lt;encoder charset=\u0026#34;UTF-8\u0026#34; class=\u0026#34;net.logstash.logback.encoder.LogstashEncoder\u0026#34;\u0026gt; \u0026lt;!-- add TID(traceId) field --\u0026gt; \u0026lt;provider class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.logstash.TraceIdJsonProvider\u0026#34;\u0026gt; \u0026lt;/provider\u0026gt; \u0026lt;!-- add SW_CTX(SkyWalking context) field --\u0026gt; \u0026lt;provider class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.logstash.SkyWalkingContextJsonProvider\u0026#34;\u0026gt; \u0026lt;/provider\u0026gt; \u0026lt;/encoder\u0026gt;  set LoggingEventCompositeJsonEncoder of logstash in logback-spring.xml for custom json format  1.add converter for %tid or %sw_ctx as child of  node\n\u0026lt;!-- add converter for %tid --\u0026gt; \u0026lt;conversionRule conversionWord=\u0026#34;tid\u0026#34; converterClass=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.LogbackPatternConverter\u0026#34;/\u0026gt; \u0026lt;!-- add converter for %sw_ctx --\u0026gt; \u0026lt;conversionRule conversionWord=\u0026#34;sw_ctx\u0026#34; converterClass=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.LogbackSkyWalkingContextPatternConverter\u0026#34;/\u0026gt; 2.add json encoder for custom json format\n\u0026lt;encoder class=\u0026#34;net.logstash.logback.encoder.LoggingEventCompositeJsonEncoder\u0026#34;\u0026gt; \u0026lt;providers\u0026gt; \u0026lt;timestamp\u0026gt; \u0026lt;timeZone\u0026gt;UTC\u0026lt;/timeZone\u0026gt; \u0026lt;/timestamp\u0026gt; \u0026lt;pattern\u0026gt; \u0026lt;pattern\u0026gt; { \u0026#34;level\u0026#34;: \u0026#34;%level\u0026#34;, \u0026#34;tid\u0026#34;: \u0026#34;%tid\u0026#34;, \u0026#34;skyWalkingContext\u0026#34;: \u0026#34;%sw_ctx\u0026#34;, \u0026#34;thread\u0026#34;: \u0026#34;%thread\u0026#34;, \u0026#34;class\u0026#34;: \u0026#34;%logger{1.}:%L\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;%message\u0026#34;, \u0026#34;stackTrace\u0026#34;: \u0026#34;%exception{10}\u0026#34; } \u0026lt;/pattern\u0026gt; \u0026lt;/pattern\u0026gt; \u0026lt;/providers\u0026gt; \u0026lt;/encoder\u0026gt; gRPC reporter The gRPC reporter could forward the collected logs to SkyWalking OAP server, or SkyWalking Satellite sidecar. Trace id, segment id, and span id will attach to logs automatically. There is no need to modify existing layouts.\n Add GRPCLogClientAppender in logback.xml  \u0026lt;appender name=\u0026#34;grpc-log\u0026#34; class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.log.GRPCLogClientAppender\u0026#34;\u0026gt; \u0026lt;encoder class=\u0026#34;ch.qos.logback.core.encoder.LayoutWrappingEncoder\u0026#34;\u0026gt; \u0026lt;layout class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.mdc.TraceIdMDCPatternLogbackLayout\u0026#34;\u0026gt; \u0026lt;Pattern\u0026gt;%d{yyyy-MM-dd HH:mm:ss.SSS} [%X{tid}] [%thread] %-5level %logger{36} -%msg%n\u0026lt;/Pattern\u0026gt; \u0026lt;/layout\u0026gt; \u0026lt;/encoder\u0026gt; \u0026lt;/appender\u0026gt;  Add config of the plugin or use default  log.max_message_size=${SW_GRPC_LOG_MAX_MESSAGE_SIZE:10485760} Transmitting un-formatted messages The logback 1.x gRPC reporter supports transmitting logs as formatted or un-formatted. Transmitting formatted data is the default but can be disabled by adding the following to the agent config:\nplugin.toolkit.log.transmit_formatted=false The above will result in the content field being used for the log pattern with additional log tags of argument.0, argument.1, and so on representing each logged argument as well as an additional exception tag which is only present if a throwable is also logged.\nFor example, the following code:\nlog.info(\u0026#34;{} {} {}\u0026#34;, 1, 2, 3); Will result in:\n{ \u0026#34;content\u0026#34;: \u0026#34;{} {} {}\u0026#34;, \u0026#34;tags\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;argument.0\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.1\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.2\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;3\u0026#34; } ] } ","title":"logback plugin","url":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-logback-1.x/"},{"content":"logback plugin  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-logback-1.x\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Print trace ID in your logs  set %tid in Pattern section of logback.xml  \u0026lt;appender name=\u0026#34;STDOUT\u0026#34; class=\u0026#34;ch.qos.logback.core.ConsoleAppender\u0026#34;\u0026gt; \u0026lt;encoder class=\u0026#34;ch.qos.logback.core.encoder.LayoutWrappingEncoder\u0026#34;\u0026gt; \u0026lt;layout class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.TraceIdPatternLogbackLayout\u0026#34;\u0026gt; \u0026lt;Pattern\u0026gt;%d{yyyy-MM-dd HH:mm:ss.SSS} [%tid] [%thread] %-5level %logger{36} -%msg%n\u0026lt;/Pattern\u0026gt; \u0026lt;/layout\u0026gt; \u0026lt;/encoder\u0026gt; \u0026lt;/appender\u0026gt;  with the MDC, set %X{tid} in Pattern section of logback.xml  \u0026lt;appender name=\u0026#34;STDOUT\u0026#34; class=\u0026#34;ch.qos.logback.core.ConsoleAppender\u0026#34;\u0026gt; \u0026lt;encoder class=\u0026#34;ch.qos.logback.core.encoder.LayoutWrappingEncoder\u0026#34;\u0026gt; \u0026lt;layout class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.mdc.TraceIdMDCPatternLogbackLayout\u0026#34;\u0026gt; \u0026lt;Pattern\u0026gt;%d{yyyy-MM-dd HH:mm:ss.SSS} [%X{tid}] [%thread] %-5level %logger{36} -%msg%n\u0026lt;/Pattern\u0026gt; \u0026lt;/layout\u0026gt; \u0026lt;/encoder\u0026gt; \u0026lt;/appender\u0026gt;  Support logback AsyncAppender(MDC also support), No additional configuration is required. Refer to the demo of logback.xml below. For details: Logback AsyncAppender  \u0026lt;configuration scan=\u0026#34;true\u0026#34; scanPeriod=\u0026#34; 5 seconds\u0026#34;\u0026gt; \u0026lt;appender name=\u0026#34;STDOUT\u0026#34; class=\u0026#34;ch.qos.logback.core.ConsoleAppender\u0026#34;\u0026gt; \u0026lt;encoder class=\u0026#34;ch.qos.logback.core.encoder.LayoutWrappingEncoder\u0026#34;\u0026gt; \u0026lt;layout class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.mdc.TraceIdMDCPatternLogbackLayout\u0026#34;\u0026gt; \u0026lt;Pattern\u0026gt;%d{yyyy-MM-dd HH:mm:ss.SSS} [%X{tid}] [%thread] %-5level %logger{36} -%msg%n\u0026lt;/Pattern\u0026gt; \u0026lt;/layout\u0026gt; \u0026lt;/encoder\u0026gt; \u0026lt;/appender\u0026gt; \u0026lt;appender name=\u0026#34;ASYNC\u0026#34; class=\u0026#34;ch.qos.logback.classic.AsyncAppender\u0026#34;\u0026gt; \u0026lt;discardingThreshold\u0026gt;0\u0026lt;/discardingThreshold\u0026gt; \u0026lt;queueSize\u0026gt;1024\u0026lt;/queueSize\u0026gt; \u0026lt;neverBlock\u0026gt;true\u0026lt;/neverBlock\u0026gt; \u0026lt;appender-ref ref=\u0026#34;STDOUT\u0026#34;/\u0026gt; \u0026lt;/appender\u0026gt; \u0026lt;root level=\u0026#34;INFO\u0026#34;\u0026gt; \u0026lt;appender-ref ref=\u0026#34;ASYNC\u0026#34;/\u0026gt; \u0026lt;/root\u0026gt; \u0026lt;/configuration\u0026gt;  When you use -javaagent to active the SkyWalking tracer, logback will output traceId, if it existed. If the tracer is inactive, the output will be TID: N/A.  Print SkyWalking context in your logs   Your only need to replace pattern %tid or %X{tid]} with %sw_ctx or %X{sw_ctx}.\n  When you use -javaagent to active the SkyWalking tracer, logback will output SW_CTX: [$serviceName,$instanceName,$traceId,$traceSegmentId,$spanId], if it existed. If the tracer is inactive, the output will be SW_CTX: N/A.\n  logstash logback plugin  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-logback-1.x\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  set LogstashEncoder of logback.xml  \u0026lt;encoder charset=\u0026#34;UTF-8\u0026#34; class=\u0026#34;net.logstash.logback.encoder.LogstashEncoder\u0026#34;\u0026gt; \u0026lt;!-- add TID(traceId) field --\u0026gt; \u0026lt;provider class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.logstash.TraceIdJsonProvider\u0026#34;\u0026gt; \u0026lt;/provider\u0026gt; \u0026lt;!-- add SW_CTX(SkyWalking context) field --\u0026gt; \u0026lt;provider class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.logstash.SkyWalkingContextJsonProvider\u0026#34;\u0026gt; \u0026lt;/provider\u0026gt; \u0026lt;/encoder\u0026gt;  set LoggingEventCompositeJsonEncoder of logstash in logback-spring.xml for custom json format  1.add converter for %tid or %sw_ctx as child of  node\n\u0026lt;!-- add converter for %tid --\u0026gt; \u0026lt;conversionRule conversionWord=\u0026#34;tid\u0026#34; converterClass=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.LogbackPatternConverter\u0026#34;/\u0026gt; \u0026lt;!-- add converter for %sw_ctx --\u0026gt; \u0026lt;conversionRule conversionWord=\u0026#34;sw_ctx\u0026#34; converterClass=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.LogbackSkyWalkingContextPatternConverter\u0026#34;/\u0026gt; 2.add json encoder for custom json format\n\u0026lt;encoder class=\u0026#34;net.logstash.logback.encoder.LoggingEventCompositeJsonEncoder\u0026#34;\u0026gt; \u0026lt;providers\u0026gt; \u0026lt;timestamp\u0026gt; \u0026lt;timeZone\u0026gt;UTC\u0026lt;/timeZone\u0026gt; \u0026lt;/timestamp\u0026gt; \u0026lt;pattern\u0026gt; \u0026lt;pattern\u0026gt; { \u0026#34;level\u0026#34;: \u0026#34;%level\u0026#34;, \u0026#34;tid\u0026#34;: \u0026#34;%tid\u0026#34;, \u0026#34;skyWalkingContext\u0026#34;: \u0026#34;%sw_ctx\u0026#34;, \u0026#34;thread\u0026#34;: \u0026#34;%thread\u0026#34;, \u0026#34;class\u0026#34;: \u0026#34;%logger{1.}:%L\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;%message\u0026#34;, \u0026#34;stackTrace\u0026#34;: \u0026#34;%exception{10}\u0026#34; } \u0026lt;/pattern\u0026gt; \u0026lt;/pattern\u0026gt; \u0026lt;/providers\u0026gt; \u0026lt;/encoder\u0026gt; gRPC reporter The gRPC reporter could forward the collected logs to SkyWalking OAP server, or SkyWalking Satellite sidecar. Trace id, segment id, and span id will attach to logs automatically. There is no need to modify existing layouts.\n Add GRPCLogClientAppender in logback.xml  \u0026lt;appender name=\u0026#34;grpc-log\u0026#34; class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.log.GRPCLogClientAppender\u0026#34;\u0026gt; \u0026lt;encoder class=\u0026#34;ch.qos.logback.core.encoder.LayoutWrappingEncoder\u0026#34;\u0026gt; \u0026lt;layout class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.mdc.TraceIdMDCPatternLogbackLayout\u0026#34;\u0026gt; \u0026lt;Pattern\u0026gt;%d{yyyy-MM-dd HH:mm:ss.SSS} [%X{tid}] [%thread] %-5level %logger{36} -%msg%n\u0026lt;/Pattern\u0026gt; \u0026lt;/layout\u0026gt; \u0026lt;/encoder\u0026gt; \u0026lt;/appender\u0026gt;  Add config of the plugin or use default  log.max_message_size=${SW_GRPC_LOG_MAX_MESSAGE_SIZE:10485760} Transmitting un-formatted messages The logback 1.x gRPC reporter supports transmitting logs as formatted or un-formatted. Transmitting formatted data is the default but can be disabled by adding the following to the agent config:\nplugin.toolkit.log.transmit_formatted=false The above will result in the content field being used for the log pattern with additional log tags of argument.0, argument.1, and so on representing each logged argument as well as an additional exception tag which is only present if a throwable is also logged.\nFor example, the following code:\nlog.info(\u0026#34;{} {} {}\u0026#34;, 1, 2, 3); Will result in:\n{ \u0026#34;content\u0026#34;: \u0026#34;{} {} {}\u0026#34;, \u0026#34;tags\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;argument.0\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.1\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.2\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;3\u0026#34; } ] } ","title":"logback plugin","url":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/application-toolkit-logback-1.x/"},{"content":"logback plugin  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-logback-1.x\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Print trace ID in your logs  set %tid in Pattern section of logback.xml  \u0026lt;appender name=\u0026#34;STDOUT\u0026#34; class=\u0026#34;ch.qos.logback.core.ConsoleAppender\u0026#34;\u0026gt; \u0026lt;encoder class=\u0026#34;ch.qos.logback.core.encoder.LayoutWrappingEncoder\u0026#34;\u0026gt; \u0026lt;layout class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.TraceIdPatternLogbackLayout\u0026#34;\u0026gt; \u0026lt;Pattern\u0026gt;%d{yyyy-MM-dd HH:mm:ss.SSS} [%tid] [%thread] %-5level %logger{36} -%msg%n\u0026lt;/Pattern\u0026gt; \u0026lt;/layout\u0026gt; \u0026lt;/encoder\u0026gt; \u0026lt;/appender\u0026gt;  with the MDC, set %X{tid} in Pattern section of logback.xml  \u0026lt;appender name=\u0026#34;STDOUT\u0026#34; class=\u0026#34;ch.qos.logback.core.ConsoleAppender\u0026#34;\u0026gt; \u0026lt;encoder class=\u0026#34;ch.qos.logback.core.encoder.LayoutWrappingEncoder\u0026#34;\u0026gt; \u0026lt;layout class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.mdc.TraceIdMDCPatternLogbackLayout\u0026#34;\u0026gt; \u0026lt;Pattern\u0026gt;%d{yyyy-MM-dd HH:mm:ss.SSS} [%X{tid}] [%thread] %-5level %logger{36} -%msg%n\u0026lt;/Pattern\u0026gt; \u0026lt;/layout\u0026gt; \u0026lt;/encoder\u0026gt; \u0026lt;/appender\u0026gt;  Support logback AsyncAppender(MDC also support), No additional configuration is required. Refer to the demo of logback.xml below. For details: Logback AsyncAppender  \u0026lt;configuration scan=\u0026#34;true\u0026#34; scanPeriod=\u0026#34; 5 seconds\u0026#34;\u0026gt; \u0026lt;appender name=\u0026#34;STDOUT\u0026#34; class=\u0026#34;ch.qos.logback.core.ConsoleAppender\u0026#34;\u0026gt; \u0026lt;encoder class=\u0026#34;ch.qos.logback.core.encoder.LayoutWrappingEncoder\u0026#34;\u0026gt; \u0026lt;layout class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.mdc.TraceIdMDCPatternLogbackLayout\u0026#34;\u0026gt; \u0026lt;Pattern\u0026gt;%d{yyyy-MM-dd HH:mm:ss.SSS} [%X{tid}] [%thread] %-5level %logger{36} -%msg%n\u0026lt;/Pattern\u0026gt; \u0026lt;/layout\u0026gt; \u0026lt;/encoder\u0026gt; \u0026lt;/appender\u0026gt; \u0026lt;appender name=\u0026#34;ASYNC\u0026#34; class=\u0026#34;ch.qos.logback.classic.AsyncAppender\u0026#34;\u0026gt; \u0026lt;discardingThreshold\u0026gt;0\u0026lt;/discardingThreshold\u0026gt; \u0026lt;queueSize\u0026gt;1024\u0026lt;/queueSize\u0026gt; \u0026lt;neverBlock\u0026gt;true\u0026lt;/neverBlock\u0026gt; \u0026lt;appender-ref ref=\u0026#34;STDOUT\u0026#34;/\u0026gt; \u0026lt;/appender\u0026gt; \u0026lt;root level=\u0026#34;INFO\u0026#34;\u0026gt; \u0026lt;appender-ref ref=\u0026#34;ASYNC\u0026#34;/\u0026gt; \u0026lt;/root\u0026gt; \u0026lt;/configuration\u0026gt;  When you use -javaagent to active the SkyWalking tracer, logback will output traceId, if it existed. If the tracer is inactive, the output will be TID: N/A.  Print SkyWalking context in your logs   Your only need to replace pattern %tid or %X{tid]} with %sw_ctx or %X{sw_ctx}.\n  When you use -javaagent to active the SkyWalking tracer, logback will output SW_CTX: [$serviceName,$instanceName,$traceId,$traceSegmentId,$spanId], if it existed. If the tracer is inactive, the output will be SW_CTX: N/A.\n  logstash logback plugin  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-logback-1.x\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  set LogstashEncoder of logback.xml  \u0026lt;encoder charset=\u0026#34;UTF-8\u0026#34; class=\u0026#34;net.logstash.logback.encoder.LogstashEncoder\u0026#34;\u0026gt; \u0026lt;!-- add TID(traceId) field --\u0026gt; \u0026lt;provider class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.logstash.TraceIdJsonProvider\u0026#34;\u0026gt; \u0026lt;/provider\u0026gt; \u0026lt;!-- add SW_CTX(SkyWalking context) field --\u0026gt; \u0026lt;provider class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.logstash.SkyWalkingContextJsonProvider\u0026#34;\u0026gt; \u0026lt;/provider\u0026gt; \u0026lt;/encoder\u0026gt;  set LoggingEventCompositeJsonEncoder of logstash in logback-spring.xml for custom json format  1.add converter for %tid or %sw_ctx as child of  node\n\u0026lt;!-- add converter for %tid --\u0026gt; \u0026lt;conversionRule conversionWord=\u0026#34;tid\u0026#34; converterClass=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.LogbackPatternConverter\u0026#34;/\u0026gt; \u0026lt;!-- add converter for %sw_ctx --\u0026gt; \u0026lt;conversionRule conversionWord=\u0026#34;sw_ctx\u0026#34; converterClass=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.LogbackSkyWalkingContextPatternConverter\u0026#34;/\u0026gt; 2.add json encoder for custom json format\n\u0026lt;encoder class=\u0026#34;net.logstash.logback.encoder.LoggingEventCompositeJsonEncoder\u0026#34;\u0026gt; \u0026lt;providers\u0026gt; \u0026lt;timestamp\u0026gt; \u0026lt;timeZone\u0026gt;UTC\u0026lt;/timeZone\u0026gt; \u0026lt;/timestamp\u0026gt; \u0026lt;pattern\u0026gt; \u0026lt;pattern\u0026gt; { \u0026#34;level\u0026#34;: \u0026#34;%level\u0026#34;, \u0026#34;tid\u0026#34;: \u0026#34;%tid\u0026#34;, \u0026#34;skyWalkingContext\u0026#34;: \u0026#34;%sw_ctx\u0026#34;, \u0026#34;thread\u0026#34;: \u0026#34;%thread\u0026#34;, \u0026#34;class\u0026#34;: \u0026#34;%logger{1.}:%L\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;%message\u0026#34;, \u0026#34;stackTrace\u0026#34;: \u0026#34;%exception{10}\u0026#34; } \u0026lt;/pattern\u0026gt; \u0026lt;/pattern\u0026gt; \u0026lt;/providers\u0026gt; \u0026lt;/encoder\u0026gt; gRPC reporter The gRPC reporter could forward the collected logs to SkyWalking OAP server, or SkyWalking Satellite sidecar. Trace id, segment id, and span id will attach to logs automatically. There is no need to modify existing layouts.\n Add GRPCLogClientAppender in logback.xml  \u0026lt;appender name=\u0026#34;grpc-log\u0026#34; class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.log.GRPCLogClientAppender\u0026#34;\u0026gt; \u0026lt;encoder class=\u0026#34;ch.qos.logback.core.encoder.LayoutWrappingEncoder\u0026#34;\u0026gt; \u0026lt;layout class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.mdc.TraceIdMDCPatternLogbackLayout\u0026#34;\u0026gt; \u0026lt;Pattern\u0026gt;%d{yyyy-MM-dd HH:mm:ss.SSS} [%X{tid}] [%thread] %-5level %logger{36} -%msg%n\u0026lt;/Pattern\u0026gt; \u0026lt;/layout\u0026gt; \u0026lt;/encoder\u0026gt; \u0026lt;/appender\u0026gt;  Add config of the plugin or use default  log.max_message_size=${SW_GRPC_LOG_MAX_MESSAGE_SIZE:10485760} Transmitting un-formatted messages The logback 1.x gRPC reporter supports transmitting logs as formatted or un-formatted. Transmitting formatted data is the default but can be disabled by adding the following to the agent config:\nplugin.toolkit.log.transmit_formatted=false The above will result in the content field being used for the log pattern with additional log tags of argument.0, argument.1, and so on representing each logged argument as well as an additional exception tag which is only present if a throwable is also logged.\nFor example, the following code:\nlog.info(\u0026#34;{} {} {}\u0026#34;, 1, 2, 3); Will result in:\n{ \u0026#34;content\u0026#34;: \u0026#34;{} {} {}\u0026#34;, \u0026#34;tags\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;argument.0\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.1\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.2\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;3\u0026#34; } ] } ","title":"logback plugin","url":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/application-toolkit-logback-1.x/"},{"content":"logback plugin  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-logback-1.x\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Print trace ID in your logs  set %tid in Pattern section of logback.xml  \u0026lt;appender name=\u0026#34;STDOUT\u0026#34; class=\u0026#34;ch.qos.logback.core.ConsoleAppender\u0026#34;\u0026gt; \u0026lt;encoder class=\u0026#34;ch.qos.logback.core.encoder.LayoutWrappingEncoder\u0026#34;\u0026gt; \u0026lt;layout class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.TraceIdPatternLogbackLayout\u0026#34;\u0026gt; \u0026lt;Pattern\u0026gt;%d{yyyy-MM-dd HH:mm:ss.SSS} [%tid] [%thread] %-5level %logger{36} -%msg%n\u0026lt;/Pattern\u0026gt; \u0026lt;/layout\u0026gt; \u0026lt;/encoder\u0026gt; \u0026lt;/appender\u0026gt;  with the MDC, set %X{tid} in Pattern section of logback.xml  \u0026lt;appender name=\u0026#34;STDOUT\u0026#34; class=\u0026#34;ch.qos.logback.core.ConsoleAppender\u0026#34;\u0026gt; \u0026lt;encoder class=\u0026#34;ch.qos.logback.core.encoder.LayoutWrappingEncoder\u0026#34;\u0026gt; \u0026lt;layout class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.mdc.TraceIdMDCPatternLogbackLayout\u0026#34;\u0026gt; \u0026lt;Pattern\u0026gt;%d{yyyy-MM-dd HH:mm:ss.SSS} [%X{tid}] [%thread] %-5level %logger{36} -%msg%n\u0026lt;/Pattern\u0026gt; \u0026lt;/layout\u0026gt; \u0026lt;/encoder\u0026gt; \u0026lt;/appender\u0026gt;  Support logback AsyncAppender(MDC also support), No additional configuration is required. Refer to the demo of logback.xml below. For details: Logback AsyncAppender  \u0026lt;configuration scan=\u0026#34;true\u0026#34; scanPeriod=\u0026#34; 5 seconds\u0026#34;\u0026gt; \u0026lt;appender name=\u0026#34;STDOUT\u0026#34; class=\u0026#34;ch.qos.logback.core.ConsoleAppender\u0026#34;\u0026gt; \u0026lt;encoder class=\u0026#34;ch.qos.logback.core.encoder.LayoutWrappingEncoder\u0026#34;\u0026gt; \u0026lt;layout class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.mdc.TraceIdMDCPatternLogbackLayout\u0026#34;\u0026gt; \u0026lt;Pattern\u0026gt;%d{yyyy-MM-dd HH:mm:ss.SSS} [%X{tid}] [%thread] %-5level %logger{36} -%msg%n\u0026lt;/Pattern\u0026gt; \u0026lt;/layout\u0026gt; \u0026lt;/encoder\u0026gt; \u0026lt;/appender\u0026gt; \u0026lt;appender name=\u0026#34;ASYNC\u0026#34; class=\u0026#34;ch.qos.logback.classic.AsyncAppender\u0026#34;\u0026gt; \u0026lt;discardingThreshold\u0026gt;0\u0026lt;/discardingThreshold\u0026gt; \u0026lt;queueSize\u0026gt;1024\u0026lt;/queueSize\u0026gt; \u0026lt;neverBlock\u0026gt;true\u0026lt;/neverBlock\u0026gt; \u0026lt;appender-ref ref=\u0026#34;STDOUT\u0026#34;/\u0026gt; \u0026lt;/appender\u0026gt; \u0026lt;root level=\u0026#34;INFO\u0026#34;\u0026gt; \u0026lt;appender-ref ref=\u0026#34;ASYNC\u0026#34;/\u0026gt; \u0026lt;/root\u0026gt; \u0026lt;/configuration\u0026gt;  When you use -javaagent to active the SkyWalking tracer, logback will output traceId, if it existed. If the tracer is inactive, the output will be TID: N/A.  Print SkyWalking context in your logs   Your only need to replace pattern %tid or %X{tid]} with %sw_ctx or %X{sw_ctx}.\n  When you use -javaagent to active the SkyWalking tracer, logback will output SW_CTX: [$serviceName,$instanceName,$traceId,$traceSegmentId,$spanId], if it existed. If the tracer is inactive, the output will be SW_CTX: N/A.\n  logstash logback plugin  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-logback-1.x\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  set LogstashEncoder of logback.xml  \u0026lt;encoder charset=\u0026#34;UTF-8\u0026#34; class=\u0026#34;net.logstash.logback.encoder.LogstashEncoder\u0026#34;\u0026gt; \u0026lt;!-- add TID(traceId) field --\u0026gt; \u0026lt;provider class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.logstash.TraceIdJsonProvider\u0026#34;\u0026gt; \u0026lt;/provider\u0026gt; \u0026lt;!-- add SW_CTX(SkyWalking context) field --\u0026gt; \u0026lt;provider class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.logstash.SkyWalkingContextJsonProvider\u0026#34;\u0026gt; \u0026lt;/provider\u0026gt; \u0026lt;/encoder\u0026gt;  set LoggingEventCompositeJsonEncoder of logstash in logback-spring.xml for custom json format  1.add converter for %tid or %sw_ctx as child of  node\n\u0026lt;!-- add converter for %tid --\u0026gt; \u0026lt;conversionRule conversionWord=\u0026#34;tid\u0026#34; converterClass=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.LogbackPatternConverter\u0026#34;/\u0026gt; \u0026lt;!-- add converter for %sw_ctx --\u0026gt; \u0026lt;conversionRule conversionWord=\u0026#34;sw_ctx\u0026#34; converterClass=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.LogbackSkyWalkingContextPatternConverter\u0026#34;/\u0026gt; 2.add json encoder for custom json format\n\u0026lt;encoder class=\u0026#34;net.logstash.logback.encoder.LoggingEventCompositeJsonEncoder\u0026#34;\u0026gt; \u0026lt;providers\u0026gt; \u0026lt;timestamp\u0026gt; \u0026lt;timeZone\u0026gt;UTC\u0026lt;/timeZone\u0026gt; \u0026lt;/timestamp\u0026gt; \u0026lt;pattern\u0026gt; \u0026lt;pattern\u0026gt; { \u0026#34;level\u0026#34;: \u0026#34;%level\u0026#34;, \u0026#34;tid\u0026#34;: \u0026#34;%tid\u0026#34;, \u0026#34;skyWalkingContext\u0026#34;: \u0026#34;%sw_ctx\u0026#34;, \u0026#34;thread\u0026#34;: \u0026#34;%thread\u0026#34;, \u0026#34;class\u0026#34;: \u0026#34;%logger{1.}:%L\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;%message\u0026#34;, \u0026#34;stackTrace\u0026#34;: \u0026#34;%exception{10}\u0026#34; } \u0026lt;/pattern\u0026gt; \u0026lt;/pattern\u0026gt; \u0026lt;/providers\u0026gt; \u0026lt;/encoder\u0026gt; gRPC reporter The gRPC reporter could forward the collected logs to SkyWalking OAP server, or SkyWalking Satellite sidecar. Trace id, segment id, and span id will attach to logs automatically. There is no need to modify existing layouts.\n Add GRPCLogClientAppender in logback.xml  \u0026lt;appender name=\u0026#34;grpc-log\u0026#34; class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.log.GRPCLogClientAppender\u0026#34;\u0026gt; \u0026lt;encoder class=\u0026#34;ch.qos.logback.core.encoder.LayoutWrappingEncoder\u0026#34;\u0026gt; \u0026lt;layout class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.mdc.TraceIdMDCPatternLogbackLayout\u0026#34;\u0026gt; \u0026lt;Pattern\u0026gt;%d{yyyy-MM-dd HH:mm:ss.SSS} [%X{tid}] [%thread] %-5level %logger{36} -%msg%n\u0026lt;/Pattern\u0026gt; \u0026lt;/layout\u0026gt; \u0026lt;/encoder\u0026gt; \u0026lt;/appender\u0026gt;  Add config of the plugin or use default  log.max_message_size=${SW_GRPC_LOG_MAX_MESSAGE_SIZE:10485760} Transmitting un-formatted messages The logback 1.x gRPC reporter supports transmitting logs as formatted or un-formatted. Transmitting formatted data is the default but can be disabled by adding the following to the agent config:\nplugin.toolkit.log.transmit_formatted=false The above will result in the content field being used for the log pattern with additional log tags of argument.0, argument.1, and so on representing each logged argument as well as an additional exception tag which is only present if a throwable is also logged.\nFor example, the following code:\nlog.info(\u0026#34;{} {} {}\u0026#34;, 1, 2, 3); Will result in:\n{ \u0026#34;content\u0026#34;: \u0026#34;{} {} {}\u0026#34;, \u0026#34;tags\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;argument.0\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.1\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.2\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;3\u0026#34; } ] } ","title":"logback plugin","url":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/application-toolkit-logback-1.x/"},{"content":"Logging Setup Logging Setup is used to integrate the Go Agent with the logging system in the current service. It currently supports the recognition of Logrus and Zap frameworks. If neither of these frameworks is present, it would output logs using Std Error.\nYou can learn about the configuration details through the \u0026ldquo;log\u0026rdquo; configuration item in the default settings.\nLogging Detection Log detection means that the logging plugin would automatically detect the usage of logs in your application. When the log type is set to auto, it would choose the appropriate log based on the creation rules of different frameworks. The selection rules vary depending on the framework:\n Logrus: It automatically selects the current logger when executing functions such as logrus.New, logger.SetOutput, or logger.SetFormatter. Zap: It automatically selects the current logger when executing functions such as zap.New, zap.NewNop, zap.NewProduction, zap.NewDevelopment, or zap.NewExample.  If there are multiple different logging systems in your current application, the last-called logging system would be chosen.\nThe configuration information is as follows:\n   Name Environment Key Default Value Description     log.type SW_LOG_TYPE auto The type of logging system. It currently supports auto, logrus, zap, and std.    Agent with Logging system The integration of the Agent with logs includes the two parts as following.\n Integrating Agent logs into the Service: Integrating the logs from the Agent into the framework used by the service. Integrating Tracing information into the Service: Integrating the information from Tracing into the service logs.  Agent logs into the Service Agent logs output the current running status of the Agent system, most of which are execution exceptions. For example, communication anomalies between the Agent and the backend service, plugin execution exceptions, etc.\nIntegrating Agent logs into the service\u0026rsquo;s logging system can effectively help users quickly troubleshoot whether there are issues with the current Agent execution.\nTracing information into the Service The Agent would also enhance the existing logging system. When the service outputs log, if the current goroutine contains Tracing data, it would be outputted together with the current logs. This helps users to quickly locate the link based on the Tracing data.\nTracing data The Tracing includes the following information:\n ServiceName: Current service name. ServiceInstanceName: Current service instance name. TraceID: The current Trace ID. If there is no link, it outputs N/A. SegmentID: The Segment ID in the current Trace. If there is no link, it outputs N/A. SpanID: The Span ID currently being operated on. If there is no link, it outputs -1.  The output format is as follows: [${ServiceName},${ServiceInstanceName},${TraceID},${SegmentID},${SpanID}].\nThe following is an example of a log output when using Zap.NewProduction:\n{\u0026quot;level\u0026quot;:\u0026quot;info\u0026quot;,\u0026quot;ts\u0026quot;:1683641507.052247,\u0026quot;caller\u0026quot;:\u0026quot;gin/main.go:45\u0026quot;,\u0026quot;msg\u0026quot;:\u0026quot;test log\u0026quot;,\u0026quot;SW_CTX\u0026quot;:\u0026quot;[Your_ApplicationName,681e4178ee7311ed864facde48001122@192.168.50.193,6f13069eee7311ed864facde48001122,6f13070cee7311ed864facde48001122,0]\u0026quot;} The configuration information is as follows:\n   Name Environment Key Default Value Description     log.tracing.enable SW_AGENT_LOG_TRACING_ENABLE true Whether to automatically integrate Tracing information into the logs.   log.tracing.key SW_AGENT_LOG_TRACING_KEY SW_CTX The key of the Tracing information in the log.    Log Upload The Agent would report the following two types of logs to the SkyWalking backend for storage and querying:\n Application Logs: It provides support for various logging frameworks and reports logs along with the corresponding distributed tracing information related to the current request. Only the relevant logs matching the current system log level would be output. Agent Logs: These are the logs generated by the Agent itself.  The current configuration options available are as follows:\n   Name Environment Key Default Value Description     log.reporter.enable SW_LOG_REPORTER_ENABLE true Whether to enable log reporting.   log.reporter.label_keys SW_LOG_REPORTER_LABEL_KEYS  By default, all fields are not reported. To specify the fields that need to be reported, please provide a comma-separated list of configuration item keys.    ","title":"Logging Setup","url":"/docs/skywalking-go/latest/en/advanced-features/logging-setup/"},{"content":"Logging Setup Logging Setup is used to integrate the Go Agent with the logging system in the current service. It currently supports the recognition of Logrus and Zap frameworks. If neither of these frameworks is present, it would output logs using Std Error.\nYou can learn about the configuration details through the \u0026ldquo;log\u0026rdquo; configuration item in the default settings.\nLogging Detection Log detection means that the logging plugin would automatically detect the usage of logs in your application. When the log type is set to auto, it would choose the appropriate log based on the creation rules of different frameworks. The selection rules vary depending on the framework:\n Logrus: It automatically selects the current logger when executing functions such as logrus.New, logger.SetOutput, or logger.SetFormatter. Zap: It automatically selects the current logger when executing functions such as zap.New, zap.NewNop, zap.NewProduction, zap.NewDevelopment, or zap.NewExample.  If there are multiple different logging systems in your current application, the last-called logging system would be chosen.\nThe configuration information is as follows:\n   Name Environment Key Default Value Description     log.type SW_LOG_TYPE auto The type of logging system. It currently supports auto, logrus, zap, and std.    Agent with Logging system The integration of the Agent with logs includes the two parts as following.\n Integrating Agent logs into the Service: Integrating the logs from the Agent into the framework used by the service. Integrating Tracing information into the Service: Integrating the information from Tracing into the service logs.  Agent logs into the Service Agent logs output the current running status of the Agent system, most of which are execution exceptions. For example, communication anomalies between the Agent and the backend service, plugin execution exceptions, etc.\nIntegrating Agent logs into the service\u0026rsquo;s logging system can effectively help users quickly troubleshoot whether there are issues with the current Agent execution.\nTracing information into the Service The Agent would also enhance the existing logging system. When the service outputs log, if the current goroutine contains Tracing data, it would be outputted together with the current logs. This helps users to quickly locate the link based on the Tracing data.\nTracing data The Tracing includes the following information:\n ServiceName: Current service name. ServiceInstanceName: Current service instance name. TraceID: The current Trace ID. If there is no link, it outputs N/A. SegmentID: The Segment ID in the current Trace. If there is no link, it outputs N/A. SpanID: The Span ID currently being operated on. If there is no link, it outputs -1.  The output format is as follows: [${ServiceName},${ServiceInstanceName},${TraceID},${SegmentID},${SpanID}].\nThe following is an example of a log output when using Zap.NewProduction:\n{\u0026quot;level\u0026quot;:\u0026quot;info\u0026quot;,\u0026quot;ts\u0026quot;:1683641507.052247,\u0026quot;caller\u0026quot;:\u0026quot;gin/main.go:45\u0026quot;,\u0026quot;msg\u0026quot;:\u0026quot;test log\u0026quot;,\u0026quot;SW_CTX\u0026quot;:\u0026quot;[Your_ApplicationName,681e4178ee7311ed864facde48001122@192.168.50.193,6f13069eee7311ed864facde48001122,6f13070cee7311ed864facde48001122,0]\u0026quot;} The configuration information is as follows:\n   Name Environment Key Default Value Description     log.tracing.enable SW_AGENT_LOG_TRACING_ENABLE true Whether to automatically integrate Tracing information into the logs.   log.tracing.key SW_AGENT_LOG_TRACING_KEY SW_CTX The key of the Tracing information in the log.    Log Upload The Agent would report the following two types of logs to the SkyWalking backend for storage and querying:\n Application Logs: It provides support for various logging frameworks and reports logs along with the corresponding distributed tracing information related to the current request. Only the relevant logs matching the current system log level would be output. Agent Logs: These are the logs generated by the Agent itself.  The current configuration options available are as follows:\n   Name Environment Key Default Value Description     log.reporter.enable SW_LOG_REPORTER_ENABLE true Whether to enable log reporting.   log.reporter.label_keys SW_LOG_REPORTER_LABEL_KEYS  By default, all fields are not reported. To specify the fields that need to be reported, please provide a comma-separated list of configuration item keys.    ","title":"Logging Setup","url":"/docs/skywalking-go/next/en/advanced-features/logging-setup/"},{"content":"Logging Setup Logging Setup is used to integrate the Go Agent with the logging system in the current service. It currently supports the recognition of Logrus and Zap frameworks. If neither of these frameworks is present, it would output logs using Std Error.\nYou can learn about the configuration details through the \u0026ldquo;log\u0026rdquo; configuration item in the default settings.\nLogging Detection Log detection means that the logging plugin would automatically detect the usage of logs in your application. When the log type is set to auto, it would choose the appropriate log based on the creation rules of different frameworks. The selection rules vary depending on the framework:\n Logrus: It automatically selects the current logger when executing functions such as logrus.New, logger.SetOutput, or logger.SetFormatter. Zap: It automatically selects the current logger when executing functions such as zap.New, zap.NewNop, zap.NewProduction, zap.NewDevelopment, or zap.NewExample.  If there are multiple different logging systems in your current application, the last-called logging system would be chosen.\nThe configuration information is as follows:\n   Name Environment Key Default Value Description     log.type SW_LOG_TYPE auto The type of logging system. It currently supports auto, logrus, zap, and std.    Agent with Logging system The integration of the Agent with logs includes the two parts as following.\n Integrating Agent logs into the Service: Integrating the logs from the Agent into the framework used by the service. Integrating Tracing information into the Service: Integrating the information from Tracing into the service logs.  Agent logs into the Service Agent logs output the current running status of the Agent system, most of which are execution exceptions. For example, communication anomalies between the Agent and the backend service, plugin execution exceptions, etc.\nIntegrating Agent logs into the service\u0026rsquo;s logging system can effectively help users quickly troubleshoot whether there are issues with the current Agent execution.\nTracing information into the Service The Agent would also enhance the existing logging system. When the service outputs log, if the current goroutine contains Tracing data, it would be outputted together with the current logs. This helps users to quickly locate the link based on the Tracing data.\nTracing data The Tracing includes the following information:\n ServiceName: Current service name. ServiceInstanceName: Current service instance name. TraceID: The current Trace ID. If there is no link, it outputs N/A. SegmentID: The Segment ID in the current Trace. If there is no link, it outputs N/A. SpanID: The Span ID currently being operated on. If there is no link, it outputs -1.  The output format is as follows: [${ServiceName},${ServiceInstanceName},${TraceID},${SegmentID},${SpanID}].\nThe following is an example of a log output when using Zap.NewProduction:\n{\u0026quot;level\u0026quot;:\u0026quot;info\u0026quot;,\u0026quot;ts\u0026quot;:1683641507.052247,\u0026quot;caller\u0026quot;:\u0026quot;gin/main.go:45\u0026quot;,\u0026quot;msg\u0026quot;:\u0026quot;test log\u0026quot;,\u0026quot;SW_CTX\u0026quot;:\u0026quot;[Your_ApplicationName,681e4178ee7311ed864facde48001122@192.168.50.193,6f13069eee7311ed864facde48001122,6f13070cee7311ed864facde48001122,0]\u0026quot;} The configuration information is as follows:\n   Name Environment Key Default Value Description     log.tracing.enable SW_AGENT_LOG_TRACING_ENABLE true Whether to automatically integrate Tracing information into the logs.   log.tracing.key SW_AGENT_LOG_TRACING_KEY SW_CTX The key of the Tracing information in the log.    Log Upload The Agent would report the following two types of logs to the SkyWalking backend for storage and querying:\n Application Logs: It provides support for various logging frameworks and reports logs along with the corresponding distributed tracing information related to the current request. Only the relevant logs matching the current system log level would be output. Agent Logs: These are the logs generated by the Agent itself.  The current configuration options available are as follows:\n   Name Environment Key Default Value Description     log.reporter.enable SW_LOG_REPORTER_ENABLE true Whether to enable log reporting.   log.reporter.label_keys SW_LOG_REPORTER_LABEL_KEYS  By default, all fields are not reported. To specify the fields that need to be reported, please provide a comma-separated list of configuration item keys.    ","title":"Logging Setup","url":"/docs/skywalking-go/v0.4.0/en/advanced-features/logging-setup/"},{"content":"LogQL Service LogQL (Log Query Language) is Grafana Loki’s PromQL-inspired query language. LogQL Service exposes Loki Querying HTTP APIs including the bundled LogQL expression system. Third-party systems or visualization platforms that already support LogQL (such as Grafana), could obtain logs through LogQL Service.\nAs Skywalking log mechanism is different from Loki(metric extract, storage, etc.), the LogQL implemented by Skywalking won\u0026rsquo;t be a full features LogQL.\nDetails Of Supported LogQL The following doc describes the details of the supported protocol and compared it to the LogQL official documentation. If not mentioned, it will not be supported by default.\nLog queries The picture bellow is LogQL syntax in log queries: The expression supported by LogQL is composed of the following parts (expression with [✅] is implemented in SkyWalking):\n stream selector:The stream selector determines which log streams to include in a query’s results by labels. line filter: The line filter expression does a grep over the logs from the matching log streams. label filter: Label filter expression allows filtering log line using their original and extracted labels. parser: Parser expression can parse and extract labels from the log content. Those extracted labels can then be used by label filter expressions. line formate: The line format expression can rewrite the log line content by using the text/template format. labels formate: The label format expression can rename, modify or add labels. drop labels: The drop expression will drop the given labels in the pipeline.  The stream selector operator supported by LogQL is composed of the following (operator with [✅] is implemented in SkyWalking):\n =: exactly equal !=: not equal =~: regex matches !~: regex does not match  The filter operator supported by LogQL is composed of the following (operator with [✅] is implemented in SkyWalking):\n |=: Log line contains string !=: Log line does not contain string |~: Log line contains a match to the regular expression !~: Log line does not contain a match to the regular expression  Here are some typical expressions used in SkyWalking log query:\n# query service instance logs with specified traceId {service=\u0026quot;$service\u0026quot;, service_instance=\u0026quot;$service_instance\u0026quot;, trace_id=\u0026quot;$trace_id\u0026quot;} # query service instance logs contains keyword in content {service=\u0026quot;$service\u0026quot;, service_instance=\u0026quot;$service_instance\u0026quot;} |= \u0026quot;$keyword_contains\u0026quot; # query service instance logs not contains keyword in content {service=\u0026quot;$service\u0026quot;, service_instance=\u0026quot;$service_instance\u0026quot;} != \u0026quot;$keyword_not_contains\u0026quot; # query service instance logs contains A keyword but not contains B keyword in content {service=\u0026quot;$service\u0026quot;, service_instance=\u0026quot;$service_instance\u0026quot;} |= \u0026quot;$keyword_contains\u0026quot; != \u0026quot;$keyword_not_contains\u0026quot; Metric queries Metric queries is used to calculate metrics from logs in Loki. In SkyWalking, it is recommended to use LAL(Log Analysis Language). So metric queries LogQL won\u0026rsquo;t be supported in SkyWalking.\nDetails Of Supported Http Query API List Labels Query log tags within a range of time. It is different from Loki. In loki, this api query all labels used in stream selector, but in SkyWalking, this api only for log tags query. Others metadata (service, service_instance, endpoint) query is provided by PromQL Service.\nGET /loki/api/v1/labels    Parameter Definition Optional     start start timestamp in nanoseconds no   end end timestamp in nanoseconds no    For example:\n/loki/api/v1/labels?start=1690947455457000000\u0026amp;end=1690947671936000000 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;level\u0026#34; ] } List Label values Query log tag values of tag within a range of time.\nGET /loki/api/v1/label/\u0026lt;label_name\u0026gt;/values    Parameter Definition Optional     start start timestamp in nanoseconds no   end end timestamp in nanoseconds no    For example:\n/loki/api/v1/label/level/values?start=1690947455457000000\u0026amp;end=1690947671936000000 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;INFO\u0026#34;, \u0026#34;WARN\u0026#34;, \u0026#34;ERROR\u0026#34; ] } Range queries Query logs within a range of time with LogQL expression.\nGET /loki/api/v1/query_range    Parameter Definition Optional     query logql expression no   start start timestamp in nanoseconds no   end end timestamp in nanoseconds no   limit numbers of log line returned in a query no   direction log order,FORWARD or BACKWARD no    For example:\n/api/v1/query_range?query={service=\u0026#39;agent::songs\u0026#39;}\u0026amp;start=1690947455457000000\u0026amp;end=1690947671936000000\u0026amp;limit=100\u0026amp;direction=BACKWARD Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;streams\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;stream\u0026#34;: { \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;instance1\u0026#34;, \u0026#34;endpoint\u0026#34;: \u0026#34;xxx\u0026#34;, \u0026#34;trace_id\u0026#34;: \u0026#34;xxx\u0026#34; }, \u0026#34;values\u0026#34;: [ [ \u0026#34;1690947671936000000\u0026#34;, \u0026#34;foo\u0026#34; ], [ \u0026#34;1690947455457000000\u0026#34;, \u0026#34;bar\u0026#34; ] ] }, { \u0026#34;stream\u0026#34;: { \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;instance2\u0026#34;, \u0026#34;endpoint\u0026#34;: \u0026#34;xxx\u0026#34;, \u0026#34;trace_id\u0026#34;: \u0026#34;xxx\u0026#34; }, \u0026#34;values\u0026#34;: [ [ \u0026#34;1690947671936000000\u0026#34;, \u0026#34;foo\u0026#34; ], [ \u0026#34;1690947455457000000\u0026#34;, \u0026#34;bar\u0026#34; ] ] } ] } } ","title":"LogQL Service","url":"/docs/main/latest/en/api/logql-service/"},{"content":"LogQL Service LogQL (Log Query Language) is Grafana Loki’s PromQL-inspired query language. LogQL Service exposes Loki Querying HTTP APIs including the bundled LogQL expression system. Third-party systems or visualization platforms that already support LogQL (such as Grafana), could obtain logs through LogQL Service.\nAs Skywalking log mechanism is different from Loki(metric extract, storage, etc.), the LogQL implemented by Skywalking won\u0026rsquo;t be a full features LogQL.\nDetails Of Supported LogQL The following doc describes the details of the supported protocol and compared it to the LogQL official documentation. If not mentioned, it will not be supported by default.\nLog queries The picture bellow is LogQL syntax in log queries: The expression supported by LogQL is composed of the following parts (expression with [✅] is implemented in SkyWalking):\n stream selector:The stream selector determines which log streams to include in a query’s results by labels. line filter: The line filter expression does a grep over the logs from the matching log streams. label filter: Label filter expression allows filtering log line using their original and extracted labels. parser: Parser expression can parse and extract labels from the log content. Those extracted labels can then be used by label filter expressions. line formate: The line format expression can rewrite the log line content by using the text/template format. labels formate: The label format expression can rename, modify or add labels. drop labels: The drop expression will drop the given labels in the pipeline.  The stream selector operator supported by LogQL is composed of the following (operator with [✅] is implemented in SkyWalking):\n =: exactly equal !=: not equal =~: regex matches !~: regex does not match  The filter operator supported by LogQL is composed of the following (operator with [✅] is implemented in SkyWalking):\n |=: Log line contains string !=: Log line does not contain string |~: Log line contains a match to the regular expression !~: Log line does not contain a match to the regular expression  Here are some typical expressions used in SkyWalking log query:\n# query service instance logs with specified traceId {service=\u0026quot;$service\u0026quot;, service_instance=\u0026quot;$service_instance\u0026quot;, trace_id=\u0026quot;$trace_id\u0026quot;} # query service instance logs contains keyword in content {service=\u0026quot;$service\u0026quot;, service_instance=\u0026quot;$service_instance\u0026quot;} |= \u0026quot;$keyword_contains\u0026quot; # query service instance logs not contains keyword in content {service=\u0026quot;$service\u0026quot;, service_instance=\u0026quot;$service_instance\u0026quot;} != \u0026quot;$keyword_not_contains\u0026quot; # query service instance logs contains A keyword but not contains B keyword in content {service=\u0026quot;$service\u0026quot;, service_instance=\u0026quot;$service_instance\u0026quot;} |= \u0026quot;$keyword_contains\u0026quot; != \u0026quot;$keyword_not_contains\u0026quot; Metric queries Metric queries is used to calculate metrics from logs in Loki. In SkyWalking, it is recommended to use LAL(Log Analysis Language). So metric queries LogQL won\u0026rsquo;t be supported in SkyWalking.\nDetails Of Supported Http Query API List Labels Query log tags within a range of time. It is different from Loki. In loki, this api query all labels used in stream selector, but in SkyWalking, this api only for log tags query. Others metadata (service, service_instance, endpoint) query is provided by PromQL Service.\nGET /loki/api/v1/labels    Parameter Definition Optional     start start timestamp in nanoseconds no   end end timestamp in nanoseconds no    For example:\n/loki/api/v1/labels?start=1690947455457000000\u0026amp;end=1690947671936000000 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;level\u0026#34; ] } List Label values Query log tag values of tag within a range of time.\nGET /loki/api/v1/label/\u0026lt;label_name\u0026gt;/values    Parameter Definition Optional     start start timestamp in nanoseconds no   end end timestamp in nanoseconds no    For example:\n/loki/api/v1/label/level/values?start=1690947455457000000\u0026amp;end=1690947671936000000 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;INFO\u0026#34;, \u0026#34;WARN\u0026#34;, \u0026#34;ERROR\u0026#34; ] } Range queries Query logs within a range of time with LogQL expression.\nGET /loki/api/v1/query_range    Parameter Definition Optional     query logql expression no   start start timestamp in nanoseconds no   end end timestamp in nanoseconds no   limit numbers of log line returned in a query no   direction log order,FORWARD or BACKWARD no    For example:\n/api/v1/query_range?query={service=\u0026#39;agent::songs\u0026#39;}\u0026amp;start=1690947455457000000\u0026amp;end=1690947671936000000\u0026amp;limit=100\u0026amp;direction=BACKWARD Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;streams\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;stream\u0026#34;: { \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;instance1\u0026#34;, \u0026#34;endpoint\u0026#34;: \u0026#34;xxx\u0026#34;, \u0026#34;trace_id\u0026#34;: \u0026#34;xxx\u0026#34; }, \u0026#34;values\u0026#34;: [ [ \u0026#34;1690947671936000000\u0026#34;, \u0026#34;foo\u0026#34; ], [ \u0026#34;1690947455457000000\u0026#34;, \u0026#34;bar\u0026#34; ] ] }, { \u0026#34;stream\u0026#34;: { \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;instance2\u0026#34;, \u0026#34;endpoint\u0026#34;: \u0026#34;xxx\u0026#34;, \u0026#34;trace_id\u0026#34;: \u0026#34;xxx\u0026#34; }, \u0026#34;values\u0026#34;: [ [ \u0026#34;1690947671936000000\u0026#34;, \u0026#34;foo\u0026#34; ], [ \u0026#34;1690947455457000000\u0026#34;, \u0026#34;bar\u0026#34; ] ] } ] } } ","title":"LogQL Service","url":"/docs/main/next/en/api/logql-service/"},{"content":"LogQL Service LogQL (Log Query Language) is Grafana Loki’s PromQL-inspired query language. LogQL Service exposes Loki Querying HTTP APIs including the bundled LogQL expression system. Third-party systems or visualization platforms that already support LogQL (such as Grafana), could obtain logs through LogQL Service.\nAs Skywalking log mechanism is different from Loki(metric extract, storage, etc.), the LogQL implemented by Skywalking won\u0026rsquo;t be a full features LogQL.\nDetails Of Supported LogQL The following doc describes the details of the supported protocol and compared it to the LogQL official documentation. If not mentioned, it will not be supported by default.\nLog queries The picture bellow is LogQL syntax in log queries: The expression supported by LogQL is composed of the following parts (expression with [✅] is implemented in SkyWalking):\n stream selector:The stream selector determines which log streams to include in a query’s results by labels. line filter: The line filter expression does a grep over the logs from the matching log streams. label filter: Label filter expression allows filtering log line using their original and extracted labels. parser: Parser expression can parse and extract labels from the log content. Those extracted labels can then be used by label filter expressions. line formate: The line format expression can rewrite the log line content by using the text/template format. labels formate: The label format expression can rename, modify or add labels. drop labels: The drop expression will drop the given labels in the pipeline.  The stream selector operator supported by LogQL is composed of the following (operator with [✅] is implemented in SkyWalking):\n =: exactly equal !=: not equal =~: regex matches !~: regex does not match  The filter operator supported by LogQL is composed of the following (operator with [✅] is implemented in SkyWalking):\n |=: Log line contains string !=: Log line does not contain string |~: Log line contains a match to the regular expression !~: Log line does not contain a match to the regular expression  Here are some typical expressions used in SkyWalking log query:\n# query service instance logs with specified traceId {service=\u0026quot;$service\u0026quot;, service_instance=\u0026quot;$service_instance\u0026quot;, trace_id=\u0026quot;$trace_id\u0026quot;} # query service instance logs contains keyword in content {service=\u0026quot;$service\u0026quot;, service_instance=\u0026quot;$service_instance\u0026quot;} |= \u0026quot;$keyword_contains\u0026quot; # query service instance logs not contains keyword in content {service=\u0026quot;$service\u0026quot;, service_instance=\u0026quot;$service_instance\u0026quot;} != \u0026quot;$keyword_not_contains\u0026quot; # query service instance logs contains A keyword but not contains B keyword in content {service=\u0026quot;$service\u0026quot;, service_instance=\u0026quot;$service_instance\u0026quot;} |= \u0026quot;$keyword_contains\u0026quot; != \u0026quot;$keyword_not_contains\u0026quot; Metric queries Metric queries is used to calculate metrics from logs in Loki. In SkyWalking, it is recommended to use LAL(Log Analysis Language). So metric queries LogQL won\u0026rsquo;t be supported in SkyWalking.\nDetails Of Supported Http Query API List Labels Query log tags within a range of time. It is different from Loki. In loki, this api query all labels used in stream selector, but in SkyWalking, this api only for log tags query. Others metadata (service, service_instance, endpoint) query is provided by PromQL Service.\nGET /loki/api/v1/labels    Parameter Definition Optional     start start timestamp in nanoseconds no   end end timestamp in nanoseconds no    For example:\n/loki/api/v1/labels?start=1690947455457000000\u0026amp;end=1690947671936000000 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;level\u0026#34; ] } List Label values Query log tag values of tag within a range of time.\nGET /loki/api/v1/label/\u0026lt;label_name\u0026gt;/values    Parameter Definition Optional     start start timestamp in nanoseconds no   end end timestamp in nanoseconds no    For example:\n/loki/api/v1/label/level/values?start=1690947455457000000\u0026amp;end=1690947671936000000 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;INFO\u0026#34;, \u0026#34;WARN\u0026#34;, \u0026#34;ERROR\u0026#34; ] } Range queries Query logs within a range of time with LogQL expression.\nGET /loki/api/v1/query_range    Parameter Definition Optional     query logql expression no   start start timestamp in nanoseconds no   end end timestamp in nanoseconds no   limit numbers of log line returned in a query no   direction log order,FORWARD or BACKWARD no    For example:\n/api/v1/query_range?query={service=\u0026#39;agent::songs\u0026#39;}\u0026amp;start=1690947455457000000\u0026amp;end=1690947671936000000\u0026amp;limit=100\u0026amp;direction=BACKWARD Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;streams\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;stream\u0026#34;: { \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;instance1\u0026#34;, \u0026#34;endpoint\u0026#34;: \u0026#34;xxx\u0026#34;, \u0026#34;trace_id\u0026#34;: \u0026#34;xxx\u0026#34; }, \u0026#34;values\u0026#34;: [ [ \u0026#34;1690947671936000000\u0026#34;, \u0026#34;foo\u0026#34; ], [ \u0026#34;1690947455457000000\u0026#34;, \u0026#34;bar\u0026#34; ] ] }, { \u0026#34;stream\u0026#34;: { \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;instance2\u0026#34;, \u0026#34;endpoint\u0026#34;: \u0026#34;xxx\u0026#34;, \u0026#34;trace_id\u0026#34;: \u0026#34;xxx\u0026#34; }, \u0026#34;values\u0026#34;: [ [ \u0026#34;1690947671936000000\u0026#34;, \u0026#34;foo\u0026#34; ], [ \u0026#34;1690947455457000000\u0026#34;, \u0026#34;bar\u0026#34; ] ] } ] } } ","title":"LogQL Service","url":"/docs/main/v9.6.0/en/api/logql-service/"},{"content":"LogQL Service LogQL (Log Query Language) is Grafana Loki’s PromQL-inspired query language. LogQL Service exposes Loki Querying HTTP APIs including the bundled LogQL expression system. Third-party systems or visualization platforms that already support LogQL (such as Grafana), could obtain logs through LogQL Service.\nAs Skywalking log mechanism is different from Loki(metric extract, storage, etc.), the LogQL implemented by Skywalking won\u0026rsquo;t be a full features LogQL.\nDetails Of Supported LogQL The following doc describes the details of the supported protocol and compared it to the LogQL official documentation. If not mentioned, it will not be supported by default.\nLog queries The picture bellow is LogQL syntax in log queries: The expression supported by LogQL is composed of the following parts (expression with [✅] is implemented in SkyWalking):\n stream selector:The stream selector determines which log streams to include in a query’s results by labels. line filter: The line filter expression does a grep over the logs from the matching log streams. label filter: Label filter expression allows filtering log line using their original and extracted labels. parser: Parser expression can parse and extract labels from the log content. Those extracted labels can then be used by label filter expressions. line formate: The line format expression can rewrite the log line content by using the text/template format. labels formate: The label format expression can rename, modify or add labels. drop labels: The drop expression will drop the given labels in the pipeline.  The stream selector operator supported by LogQL is composed of the following (operator with [✅] is implemented in SkyWalking):\n =: exactly equal !=: not equal =~: regex matches !~: regex does not match  The filter operator supported by LogQL is composed of the following (operator with [✅] is implemented in SkyWalking):\n |=: Log line contains string !=: Log line does not contain string |~: Log line contains a match to the regular expression !~: Log line does not contain a match to the regular expression  Here are some typical expressions used in SkyWalking log query:\n# query service instance logs with specified traceId {service=\u0026quot;$service\u0026quot;, service_instance=\u0026quot;$service_instance\u0026quot;, trace_id=\u0026quot;$trace_id\u0026quot;} # query service instance logs contains keyword in content {service=\u0026quot;$service\u0026quot;, service_instance=\u0026quot;$service_instance\u0026quot;} |= \u0026quot;$keyword_contains\u0026quot; # query service instance logs not contains keyword in content {service=\u0026quot;$service\u0026quot;, service_instance=\u0026quot;$service_instance\u0026quot;} != \u0026quot;$keyword_not_contains\u0026quot; # query service instance logs contains A keyword but not contains B keyword in content {service=\u0026quot;$service\u0026quot;, service_instance=\u0026quot;$service_instance\u0026quot;} |= \u0026quot;$keyword_contains\u0026quot; != \u0026quot;$keyword_not_contains\u0026quot; Metric queries Metric queries is used to calculate metrics from logs in Loki. In SkyWalking, it is recommended to use LAL(Log Analysis Language). So metric queries LogQL won\u0026rsquo;t be supported in SkyWalking.\nDetails Of Supported Http Query API List Labels Query log tags within a range of time. It is different from Loki. In loki, this api query all labels used in stream selector, but in SkyWalking, this api only for log tags query. Others metadata (service, service_instance, endpoint) query is provided by PromQL Service.\nGET /loki/api/v1/labels    Parameter Definition Optional     start start timestamp in nanoseconds no   end end timestamp in nanoseconds no    For example:\n/loki/api/v1/labels?start=1690947455457000000\u0026amp;end=1690947671936000000 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;level\u0026#34; ] } List Label values Query log tag values of tag within a range of time.\nGET /loki/api/v1/label/\u0026lt;label_name\u0026gt;/values    Parameter Definition Optional     start start timestamp in nanoseconds no   end end timestamp in nanoseconds no    For example:\n/loki/api/v1/label/level/values?start=1690947455457000000\u0026amp;end=1690947671936000000 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;INFO\u0026#34;, \u0026#34;WARN\u0026#34;, \u0026#34;ERROR\u0026#34; ] } Range queries Query logs within a range of time with LogQL expression.\nGET /loki/api/v1/query_range    Parameter Definition Optional     query logql expression no   start start timestamp in nanoseconds no   end end timestamp in nanoseconds no   limit numbers of log line returned in a query no   direction log order,FORWARD or BACKWARD no    For example:\n/api/v1/query_range?query={service=\u0026#39;agent::songs\u0026#39;}\u0026amp;start=1690947455457000000\u0026amp;end=1690947671936000000\u0026amp;limit=100\u0026amp;direction=BACKWARD Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;streams\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;stream\u0026#34;: { \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;instance1\u0026#34;, \u0026#34;endpoint\u0026#34;: \u0026#34;xxx\u0026#34;, \u0026#34;trace_id\u0026#34;: \u0026#34;xxx\u0026#34; }, \u0026#34;values\u0026#34;: [ [ \u0026#34;1690947671936000000\u0026#34;, \u0026#34;foo\u0026#34; ], [ \u0026#34;1690947455457000000\u0026#34;, \u0026#34;bar\u0026#34; ] ] }, { \u0026#34;stream\u0026#34;: { \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;instance2\u0026#34;, \u0026#34;endpoint\u0026#34;: \u0026#34;xxx\u0026#34;, \u0026#34;trace_id\u0026#34;: \u0026#34;xxx\u0026#34; }, \u0026#34;values\u0026#34;: [ [ \u0026#34;1690947671936000000\u0026#34;, \u0026#34;foo\u0026#34; ], [ \u0026#34;1690947455457000000\u0026#34;, \u0026#34;bar\u0026#34; ] ] } ] } } ","title":"LogQL Service","url":"/docs/main/v9.7.0/en/api/logql-service/"},{"content":"Manual instrument SDK Our incredible community has contributed to the manual instrument SDK.\n Rust. Rust SDK follows the SkyWalking format. C++. C++ SDK follows the SkyWalking format.  Below is the archived list.\n Go2Sky. Since Jun 14, 2023.  What are the SkyWalking format and the propagation protocols?  Tracing APIs Meter APIs Logging APIs  Envoy tracer Envoy has its internal tracer implementation for SkyWalking. Read SkyWalking Tracer doc and SkyWalking tracing sandbox for more details.\n","title":"Manual instrument SDK","url":"/docs/main/latest/en/concepts-and-designs/manual-sdk/"},{"content":"Manual instrument SDK Our incredible community has contributed to the manual instrument SDK.\n Rust. Rust SDK follows the SkyWalking format. C++. C++ SDK follows the SkyWalking format.  Below is the archived list.\n Go2Sky. Since Jun 14, 2023.  What are the SkyWalking format and the propagation protocols?  Tracing APIs Meter APIs Logging APIs  Envoy tracer Envoy has its internal tracer implementation for SkyWalking. Read SkyWalking Tracer doc and SkyWalking tracing sandbox for more details.\n","title":"Manual instrument SDK","url":"/docs/main/next/en/concepts-and-designs/manual-sdk/"},{"content":"Manual instrument SDK Our incredible community has contributed to the manual instrument SDK.\n Go2Sky. Go SDK follows the SkyWalking format. C++. C++ SDK follows the SkyWalking format.  What are the SkyWalking format and the propagation protocols? See these protocols in protocols document.\nEnvoy tracer Envoy has its internal tracer implementation for SkyWalking. Read SkyWalking Tracer doc and SkyWalking tracing sandbox for more details.\n","title":"Manual instrument SDK","url":"/docs/main/v9.0.0/en/concepts-and-designs/manual-sdk/"},{"content":"Manual instrument SDK Our incredible community has contributed to the manual instrument SDK.\n Go2Sky. Go SDK follows the SkyWalking format. C++. C++ SDK follows the SkyWalking format.  What are the SkyWalking format and the propagation protocols? See these protocols in protocols document.\nEnvoy tracer Envoy has its internal tracer implementation for SkyWalking. Read SkyWalking Tracer doc and SkyWalking tracing sandbox for more details.\n","title":"Manual instrument SDK","url":"/docs/main/v9.1.0/en/concepts-and-designs/manual-sdk/"},{"content":"Manual instrument SDK Our incredible community has contributed to the manual instrument SDK.\n Go2Sky. Go SDK follows the SkyWalking format. C++. C++ SDK follows the SkyWalking format.  What are the SkyWalking format and the propagation protocols? See these protocols in protocols document.\nEnvoy tracer Envoy has its internal tracer implementation for SkyWalking. Read SkyWalking Tracer doc and SkyWalking tracing sandbox for more details.\n","title":"Manual instrument SDK","url":"/docs/main/v9.2.0/en/concepts-and-designs/manual-sdk/"},{"content":"Manual instrument SDK Our incredible community has contributed to the manual instrument SDK.\n Rust. Rust SDK follows the SkyWalking format. Go2Sky. Go SDK follows the SkyWalking format. C++. C++ SDK follows the SkyWalking format.  What are the SkyWalking format and the propagation protocols? See these protocols in protocols document.\nEnvoy tracer Envoy has its internal tracer implementation for SkyWalking. Read SkyWalking Tracer doc and SkyWalking tracing sandbox for more details.\n","title":"Manual instrument SDK","url":"/docs/main/v9.3.0/en/concepts-and-designs/manual-sdk/"},{"content":"Manual instrument SDK Our incredible community has contributed to the manual instrument SDK.\n Rust. Rust SDK follows the SkyWalking format. Go2Sky. Go SDK follows the SkyWalking format. C++. C++ SDK follows the SkyWalking format.  What are the SkyWalking format and the propagation protocols?  Tracing APIs Meter APIs Logging APIs  Envoy tracer Envoy has its internal tracer implementation for SkyWalking. Read SkyWalking Tracer doc and SkyWalking tracing sandbox for more details.\n","title":"Manual instrument SDK","url":"/docs/main/v9.4.0/en/concepts-and-designs/manual-sdk/"},{"content":"Manual instrument SDK Our incredible community has contributed to the manual instrument SDK.\n Rust. Rust SDK follows the SkyWalking format. Go2Sky. Go SDK follows the SkyWalking format. C++. C++ SDK follows the SkyWalking format.  What are the SkyWalking format and the propagation protocols?  Tracing APIs Meter APIs Logging APIs  Envoy tracer Envoy has its internal tracer implementation for SkyWalking. Read SkyWalking Tracer doc and SkyWalking tracing sandbox for more details.\n","title":"Manual instrument SDK","url":"/docs/main/v9.5.0/en/concepts-and-designs/manual-sdk/"},{"content":"Manual instrument SDK Our incredible community has contributed to the manual instrument SDK.\n Rust. Rust SDK follows the SkyWalking format. Go2Sky. Go SDK follows the SkyWalking format. C++. C++ SDK follows the SkyWalking format.  What are the SkyWalking format and the propagation protocols?  Tracing APIs Meter APIs Logging APIs  Envoy tracer Envoy has its internal tracer implementation for SkyWalking. Read SkyWalking Tracer doc and SkyWalking tracing sandbox for more details.\n","title":"Manual instrument SDK","url":"/docs/main/v9.6.0/en/concepts-and-designs/manual-sdk/"},{"content":"Manual instrument SDK Our incredible community has contributed to the manual instrument SDK.\n Rust. Rust SDK follows the SkyWalking format. C++. C++ SDK follows the SkyWalking format.  Below is the archived list.\n Go2Sky. Since Jun 14, 2023.  What are the SkyWalking format and the propagation protocols?  Tracing APIs Meter APIs Logging APIs  Envoy tracer Envoy has its internal tracer implementation for SkyWalking. Read SkyWalking Tracer doc and SkyWalking tracing sandbox for more details.\n","title":"Manual instrument SDK","url":"/docs/main/v9.7.0/en/concepts-and-designs/manual-sdk/"},{"content":"Message Queue performance and consuming latency monitoring Message Queue server plays an essential role in today\u0026rsquo;s distributed system to reduce the length and latency of blocking RPC and eventually improve user experience. But in this async way, the measure for queue consuming traffic and latency becomes significant.\nSince 8.9.0, SkyWalking leverages native tracing agent and Extension Header Item of SkyWalking Cross Process Propagation Headers Protocol v3 To provide performance monitoring for the Message Queue systems.\nIn default, we provide Message Queue Consuming Count and Message Queue Avg Consuming Latency metrics for service and endpoint levels.\nMore metrics could be added through core.oal.\n","title":"Message Queue performance and consuming latency monitoring","url":"/docs/main/latest/en/setup/backend/mq/"},{"content":"Message Queue performance and consuming latency monitoring Message Queue server plays an essential role in today\u0026rsquo;s distributed system to reduce the length and latency of blocking RPC and eventually improve user experience. But in this async way, the measure for queue consuming traffic and latency becomes significant.\nSince 8.9.0, SkyWalking leverages native tracing agent and Extension Header Item of SkyWalking Cross Process Propagation Headers Protocol v3 To provide performance monitoring for the Message Queue systems.\nIn default, we provide Message Queue Consuming Count and Message Queue Avg Consuming Latency metrics for service and endpoint levels.\nMore metrics could be added through core.oal.\n","title":"Message Queue performance and consuming latency monitoring","url":"/docs/main/next/en/setup/backend/mq/"},{"content":"Message Queue performance and consuming latency monitoring Message Queue server plays an important role in today\u0026rsquo;s distributed system, in order to reduce the length and latency of blocking RPC, and eventually improve user experience. But in this async way, the measure for queue consuming traffic and latency becomes significant.\nSince 8.9.0, SkyWalking leverages native tracing agent and Extension Header Item of SkyWalking Cross Process Propagation Headers Protocol v3 , to provide performance monitoring for Message Queue system.\nIn default, we provide Message Queue Consuming Count and Message Queue Avg Consuming Latency metrics for service and endpoint levels.\nMore metrics could be added through core.oal.\n","title":"Message Queue performance and consuming latency monitoring","url":"/docs/main/v9.0.0/en/setup/backend/mq/"},{"content":"Message Queue performance and consuming latency monitoring Message Queue server plays an essential role in today\u0026rsquo;s distributed system to reduce the length and latency of blocking RPC and eventually improve user experience. But in this async way, the measure for queue consuming traffic and latency becomes significant.\nSince 8.9.0, SkyWalking leverages native tracing agent and Extension Header Item of SkyWalking Cross Process Propagation Headers Protocol v3 To provide performance monitoring for the Message Queue systems.\nIn default, we provide Message Queue Consuming Count and Message Queue Avg Consuming Latency metrics for service and endpoint levels.\nMore metrics could be added through core.oal.\n","title":"Message Queue performance and consuming latency monitoring","url":"/docs/main/v9.1.0/en/setup/backend/mq/"},{"content":"Message Queue performance and consuming latency monitoring Message Queue server plays an essential role in today\u0026rsquo;s distributed system to reduce the length and latency of blocking RPC and eventually improve user experience. But in this async way, the measure for queue consuming traffic and latency becomes significant.\nSince 8.9.0, SkyWalking leverages native tracing agent and Extension Header Item of SkyWalking Cross Process Propagation Headers Protocol v3 To provide performance monitoring for the Message Queue systems.\nIn default, we provide Message Queue Consuming Count and Message Queue Avg Consuming Latency metrics for service and endpoint levels.\nMore metrics could be added through core.oal.\n","title":"Message Queue performance and consuming latency monitoring","url":"/docs/main/v9.2.0/en/setup/backend/mq/"},{"content":"Message Queue performance and consuming latency monitoring Message Queue server plays an essential role in today\u0026rsquo;s distributed system to reduce the length and latency of blocking RPC and eventually improve user experience. But in this async way, the measure for queue consuming traffic and latency becomes significant.\nSince 8.9.0, SkyWalking leverages native tracing agent and Extension Header Item of SkyWalking Cross Process Propagation Headers Protocol v3 To provide performance monitoring for the Message Queue systems.\nIn default, we provide Message Queue Consuming Count and Message Queue Avg Consuming Latency metrics for service and endpoint levels.\nMore metrics could be added through core.oal.\n","title":"Message Queue performance and consuming latency monitoring","url":"/docs/main/v9.3.0/en/setup/backend/mq/"},{"content":"Message Queue performance and consuming latency monitoring Message Queue server plays an essential role in today\u0026rsquo;s distributed system to reduce the length and latency of blocking RPC and eventually improve user experience. But in this async way, the measure for queue consuming traffic and latency becomes significant.\nSince 8.9.0, SkyWalking leverages native tracing agent and Extension Header Item of SkyWalking Cross Process Propagation Headers Protocol v3 To provide performance monitoring for the Message Queue systems.\nIn default, we provide Message Queue Consuming Count and Message Queue Avg Consuming Latency metrics for service and endpoint levels.\nMore metrics could be added through core.oal.\n","title":"Message Queue performance and consuming latency monitoring","url":"/docs/main/v9.4.0/en/setup/backend/mq/"},{"content":"Message Queue performance and consuming latency monitoring Message Queue server plays an essential role in today\u0026rsquo;s distributed system to reduce the length and latency of blocking RPC and eventually improve user experience. But in this async way, the measure for queue consuming traffic and latency becomes significant.\nSince 8.9.0, SkyWalking leverages native tracing agent and Extension Header Item of SkyWalking Cross Process Propagation Headers Protocol v3 To provide performance monitoring for the Message Queue systems.\nIn default, we provide Message Queue Consuming Count and Message Queue Avg Consuming Latency metrics for service and endpoint levels.\nMore metrics could be added through core.oal.\n","title":"Message Queue performance and consuming latency monitoring","url":"/docs/main/v9.5.0/en/setup/backend/mq/"},{"content":"Message Queue performance and consuming latency monitoring Message Queue server plays an essential role in today\u0026rsquo;s distributed system to reduce the length and latency of blocking RPC and eventually improve user experience. But in this async way, the measure for queue consuming traffic and latency becomes significant.\nSince 8.9.0, SkyWalking leverages native tracing agent and Extension Header Item of SkyWalking Cross Process Propagation Headers Protocol v3 To provide performance monitoring for the Message Queue systems.\nIn default, we provide Message Queue Consuming Count and Message Queue Avg Consuming Latency metrics for service and endpoint levels.\nMore metrics could be added through core.oal.\n","title":"Message Queue performance and consuming latency monitoring","url":"/docs/main/v9.6.0/en/setup/backend/mq/"},{"content":"Message Queue performance and consuming latency monitoring Message Queue server plays an essential role in today\u0026rsquo;s distributed system to reduce the length and latency of blocking RPC and eventually improve user experience. But in this async way, the measure for queue consuming traffic and latency becomes significant.\nSince 8.9.0, SkyWalking leverages native tracing agent and Extension Header Item of SkyWalking Cross Process Propagation Headers Protocol v3 To provide performance monitoring for the Message Queue systems.\nIn default, we provide Message Queue Consuming Count and Message Queue Avg Consuming Latency metrics for service and endpoint levels.\nMore metrics could be added through core.oal.\n","title":"Message Queue performance and consuming latency monitoring","url":"/docs/main/v9.7.0/en/setup/backend/mq/"},{"content":"Meter Analysis Language The meter system provides a functional analysis language called MAL (Meter Analysis Language) that lets users analyze and aggregate meter data in the OAP streaming system. The result of an expression can either be ingested by the agent analyzer, or the OpenTelemetry/Prometheus analyzer.\nLanguage data type In MAL, an expression or sub-expression can evaluate to one of the following two types:\n Sample family: A set of samples (metrics) containing a range of metrics whose names are identical. Scalar: A simple numeric value that supports integer/long and floating/double.  Sample family A set of samples, which acts as the basic unit in MAL. For example:\ninstance_trace_count The sample family above may contain the following samples which are provided by external modules, such as the agent analyzer:\ninstance_trace_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 100 instance_trace_count{region=\u0026quot;us-east\u0026quot;,az=\u0026quot;az-3\u0026quot;} 20 instance_trace_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 33 Tag filter MAL supports four type operations to filter samples in a sample family by tag:\n tagEqual: Filter tags exactly equal to the string provided. tagNotEqual: Filter tags not equal to the string provided. tagMatch: Filter tags that regex-match the string provided. tagNotMatch: Filter labels that do not regex-match the string provided.  For example, this filters all instance_trace_count samples for us-west and asia-north region and az-1 az:\ninstance_trace_count.tagMatch(\u0026quot;region\u0026quot;, \u0026quot;us-west|asia-north\u0026quot;).tagEqual(\u0026quot;az\u0026quot;, \u0026quot;az-1\u0026quot;) Value filter MAL supports six type operations to filter samples in a sample family by value:\n valueEqual: Filter values exactly equal to the value provided. valueNotEqual: Filter values equal to the value provided. valueGreater: Filter values greater than the value provided. valueGreaterEqual: Filter values greater than or equal to the value provided. valueLess: Filter values less than the value provided. valueLessEqual: Filter values less than or equal to the value provided.  For example, this filters all instance_trace_count samples for values \u0026gt;= 33:\ninstance_trace_count.valueGreaterEqual(33) Tag manipulator MAL allows tag manipulators to change (i.e. add/delete/update) tags and their values.\nK8s MAL supports using the metadata of K8s to manipulate the tags and their values. This feature requires authorizing the OAP Server to access K8s\u0026rsquo;s API Server.\nretagByK8sMeta retagByK8sMeta(newLabelName, K8sRetagType, existingLabelName, namespaceLabelName). Add a new tag to the sample family based on the value of an existing label. Provide several internal converting types, including\n K8sRetagType.Pod2Service  Add a tag to the sample using service as the key, $serviceName.$namespace as the value, and according to the given value of the tag key, which represents the name of a pod.\nFor example:\ncontainer_cpu_usage_seconds_total{namespace=default, container=my-nginx, cpu=total, pod=my-nginx-5dc4865748-mbczh} 2 Expression:\ncontainer_cpu_usage_seconds_total.retagByK8sMeta('service' , K8sRetagType.Pod2Service , 'pod' , 'namespace') Output:\ncontainer_cpu_usage_seconds_total{namespace=default, container=my-nginx, cpu=total, pod=my-nginx-5dc4865748-mbczh, service='nginx-service.default'} 2 Binary operators The following binary arithmetic operators are available in MAL:\n + (addition) - (subtraction) * (multiplication) / (division)  Binary operators are defined between scalar/scalar, sampleFamily/scalar and sampleFamily/sampleFamily value pairs.\nBetween two scalars: they evaluate to another scalar that is the result of the operator being applied to both scalar operands:\n1 + 2 Between a sample family and a scalar, the operator is applied to the value of every sample in the sample family. For example:\ninstance_trace_count + 2 or\n2 + instance_trace_count results in\ninstance_trace_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 102 // 100 + 2 instance_trace_count{region=\u0026quot;us-east\u0026quot;,az=\u0026quot;az-3\u0026quot;} 22 // 20 + 2 instance_trace_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 35 // 33 + 2 Between two sample families, a binary operator is applied to each sample in the sample family on the left and its matching sample in the sample family on the right. A new sample family with empty name will be generated. Only the matched tags will be reserved. Samples with no matching samples in the sample family on the right will not be found in the result.\nAnother sample family instance_trace_analysis_error_count is\ninstance_trace_analysis_error_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 20 instance_trace_analysis_error_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 11 Example expression:\ninstance_trace_analysis_error_count / instance_trace_count This returns a resulting sample family containing the error rate of trace analysis. Samples with region us-west and az az-3 have no match and will not show up in the result:\n{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 0.2 // 20 / 100 {region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 0.3333 // 11 / 33 Aggregation Operation Sample family supports the following aggregation operations that can be used to aggregate the samples of a single sample family, resulting in a new sample family having fewer samples (sometimes having just a single sample) with aggregated values:\n sum (calculate sum over dimensions) min (select minimum over dimensions) max (select maximum over dimensions) avg (calculate the average over dimensions)  These operations can be used to aggregate overall label dimensions or preserve distinct dimensions by inputting by parameter( the keyword by could be omitted)\n\u0026lt;aggr-op\u0026gt;(by=[\u0026lt;tag1\u0026gt;, \u0026lt;tag2\u0026gt;, ...]) Example expression:\ninstance_trace_count.sum(by=['az']) will output the following result:\ninstance_trace_count{az=\u0026quot;az-1\u0026quot;} 133 // 100 + 33 instance_trace_count{az=\u0026quot;az-3\u0026quot;} 20 Function Duration is a textual representation of a time range. The formats accepted are based on the ISO-8601 duration format {@code PnDTnHnMn.nS} where a day is regarded as exactly 24 hours.\nExamples:\n \u0026ldquo;PT20.345S\u0026rdquo; \u0026ndash; parses as \u0026ldquo;20.345 seconds\u0026rdquo; \u0026ldquo;PT15M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;15 minutes\u0026rdquo; (where a minute is 60 seconds) \u0026ldquo;PT10H\u0026rdquo; \u0026ndash; parses as \u0026ldquo;10 hours\u0026rdquo; (where an hour is 3600 seconds) \u0026ldquo;P2D\u0026rdquo; \u0026ndash; parses as \u0026ldquo;2 days\u0026rdquo; (where a day is 24 hours or 86400 seconds) \u0026ldquo;P2DT3H4M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;2 days, 3 hours and 4 minutes\u0026rdquo; \u0026ldquo;P-6H3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;-6 hours and +3 minutes\u0026rdquo; \u0026ldquo;-P6H3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;-6 hours and -3 minutes\u0026rdquo; \u0026ldquo;-P-6H+3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;+6 hours and -3 minutes\u0026rdquo;  increase increase(Duration): Calculates the increase in the time range.\nrate rate(Duration): Calculates the per-second average rate of increase in the time range.\nirate irate(): Calculates the per-second instant rate of increase in the time range.\ntag tag({allTags -\u0026gt; }): Updates tags of samples. User can add, drop, rename and update tags.\nhistogram histogram(le: '\u0026lt;the tag name of le\u0026gt;'): Transforms less-based histogram buckets to meter system histogram buckets. le parameter represents the tag name of the bucket.\nhistogram_percentile histogram_percentile([\u0026lt;p scalar\u0026gt;]): Represents the meter-system to calculate the p-percentile (0 ≤ p ≤ 100) from the buckets.\ntime time(): Returns the number of seconds since January 1, 1970 UTC.\nforeach forEach([string_array], Closure\u0026lt;Void\u0026gt; each): Iterates all samples according to the first array argument, and provide two parameters in the second closure argument:\n element: element in the array. tags: tags in each sample.  Down Sampling Operation MAL should instruct meter-system on how to downsample for metrics. It doesn\u0026rsquo;t only refer to aggregate raw samples to minute level, but also expresses data from minute in higher levels, such as hour and day.\nDown sampling function is called downsampling in MAL, and it accepts the following types:\n AVG SUM LATEST SUM_PER_MIN MIN (TODO) MAX (TODO) MEAN (TODO) COUNT (TODO)  The default type is AVG.\nIf users want to get the latest time from last_server_state_sync_time_in_seconds:\nlast_server_state_sync_time_in_seconds.tagEqual('production', 'catalog').downsampling(LATEST) Metric level function They extract level relevant labels from metric labels, then informs the meter-system the level and layer to which this metric belongs.\n service([svc_label1, svc_label2...], Layer) extracts service level labels from the array argument, extracts layer from Layer argument. instance([svc_label1, svc_label2...], [ins_label1, ins_label2...], Layer, Closure\u0026lt;Map\u0026lt;String, String\u0026gt;\u0026gt; propertiesExtractor) extracts service level labels from the first array argument, extracts instance level labels from the second array argument, extracts layer from Layer argument, propertiesExtractor is an optional closure that extracts instance properties from tags, e.g. { tags -\u0026gt; ['pod': tags.pod, 'namespace': tags.namespace] }. endpoint([svc_label1, svc_label2...], [ep_label1, ep_label2...]) extracts service level labels from the first array argument, extracts endpoint level labels from the second array argument, extracts layer from Layer argument. process([svc_label1, svc_label2...], [ins_label1, ins_label2...], [ps_label1, ps_label2...], layer_lable) extracts service level labels from the first array argument, extracts instance level labels from the second array argument, extracts process level labels from the third array argument, extracts layer label from fourse argument. serviceRelation(DetectPoint, [source_svc_label1...], [dest_svc_label1...], Layer) DetectPoint including DetectPoint.CLIENT and DetectPoint.SERVER, extracts sourceService labels from the first array argument, extracts destService labels from the second array argument, extracts layer from Layer argument. processRelation(detect_point_label, [service_label1...], [instance_label1...], source_process_id_label, dest_process_id_label, component_label) extracts DetectPoint labels from first argument, the label value should be client or server. extracts Service labels from the first array argument, extracts Instance labels from the second array argument, extracts ProcessID labels from the fourth and fifth arguments of the source and destination.  Configuration file The OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP fails to start up. The files are located at $CLASSPATH/otel-rules, $CLASSPATH/meter-analyzer-config, $CLASSPATH/envoy-metrics-rules and $CLASSPATH/zabbix-rules.\nThe file is written in YAML format, defined by the scheme described below. Brackets indicate that a parameter is optional.\nA full example can be found here\nGeneric placeholders are defined as follows:\n \u0026lt;string\u0026gt;: A regular string. \u0026lt;closure\u0026gt;: A closure with custom logic.  # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# filter the metrics, only those metrics that satisfy this condition will be passed into the `metricsRules` below.filter: \u0026lt;closure\u0026gt; # example:\u0026#39;{ tags -\u0026gt; tags.job_name == \u0026#34;vm-monitoring\u0026#34; }\u0026#39;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# Metrics rule allow you to recompute queries.metricsRules:[- \u0026lt;metric_rules\u0026gt; ]\u0026lt;metric_rules\u0026gt; # The name of rule, which combinates with a prefix \u0026#39;meter_\u0026#39; as the index/table name in storage.name:\u0026lt;string\u0026gt;# MAL expression.exp:\u0026lt;string\u0026gt;More Examples Please refer to OAP Self-Observability.\n","title":"Meter Analysis Language","url":"/docs/main/latest/en/concepts-and-designs/mal/"},{"content":"Meter Analysis Language The meter system provides a functional analysis language called MAL (Meter Analysis Language) that lets users analyze and aggregate meter data in the OAP streaming system. The result of an expression can either be ingested by the agent analyzer, or the OpenTelemetry/Prometheus analyzer.\nLanguage data type In MAL, an expression or sub-expression can evaluate to one of the following two types:\n Sample family: A set of samples (metrics) containing a range of metrics whose names are identical. Scalar: A simple numeric value that supports integer/long and floating/double.  Sample family A set of samples, which acts as the basic unit in MAL. For example:\ninstance_trace_count The sample family above may contain the following samples which are provided by external modules, such as the agent analyzer:\ninstance_trace_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 100 instance_trace_count{region=\u0026quot;us-east\u0026quot;,az=\u0026quot;az-3\u0026quot;} 20 instance_trace_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 33 Tag filter MAL supports four type operations to filter samples in a sample family by tag:\n tagEqual: Filter tags exactly equal to the string provided. tagNotEqual: Filter tags not equal to the string provided. tagMatch: Filter tags that regex-match the string provided. tagNotMatch: Filter labels that do not regex-match the string provided.  For example, this filters all instance_trace_count samples for us-west and asia-north region and az-1 az:\ninstance_trace_count.tagMatch(\u0026quot;region\u0026quot;, \u0026quot;us-west|asia-north\u0026quot;).tagEqual(\u0026quot;az\u0026quot;, \u0026quot;az-1\u0026quot;) Value filter MAL supports six type operations to filter samples in a sample family by value:\n valueEqual: Filter values exactly equal to the value provided. valueNotEqual: Filter values equal to the value provided. valueGreater: Filter values greater than the value provided. valueGreaterEqual: Filter values greater than or equal to the value provided. valueLess: Filter values less than the value provided. valueLessEqual: Filter values less than or equal to the value provided.  For example, this filters all instance_trace_count samples for values \u0026gt;= 33:\ninstance_trace_count.valueGreaterEqual(33) Tag manipulator MAL allows tag manipulators to change (i.e. add/delete/update) tags and their values.\nK8s MAL supports using the metadata of K8s to manipulate the tags and their values. This feature requires authorizing the OAP Server to access K8s\u0026rsquo;s API Server.\nretagByK8sMeta retagByK8sMeta(newLabelName, K8sRetagType, existingLabelName, namespaceLabelName). Add a new tag to the sample family based on the value of an existing label. Provide several internal converting types, including\n K8sRetagType.Pod2Service  Add a tag to the sample using service as the key, $serviceName.$namespace as the value, and according to the given value of the tag key, which represents the name of a pod.\nFor example:\ncontainer_cpu_usage_seconds_total{namespace=default, container=my-nginx, cpu=total, pod=my-nginx-5dc4865748-mbczh} 2 Expression:\ncontainer_cpu_usage_seconds_total.retagByK8sMeta('service' , K8sRetagType.Pod2Service , 'pod' , 'namespace') Output:\ncontainer_cpu_usage_seconds_total{namespace=default, container=my-nginx, cpu=total, pod=my-nginx-5dc4865748-mbczh, service='nginx-service.default'} 2 Binary operators The following binary arithmetic operators are available in MAL:\n + (addition) - (subtraction) * (multiplication) / (division)  Binary operators are defined between scalar/scalar, sampleFamily/scalar and sampleFamily/sampleFamily value pairs.\nBetween two scalars: they evaluate to another scalar that is the result of the operator being applied to both scalar operands:\n1 + 2 Between a sample family and a scalar, the operator is applied to the value of every sample in the sample family. For example:\ninstance_trace_count + 2 or\n2 + instance_trace_count results in\ninstance_trace_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 102 // 100 + 2 instance_trace_count{region=\u0026quot;us-east\u0026quot;,az=\u0026quot;az-3\u0026quot;} 22 // 20 + 2 instance_trace_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 35 // 33 + 2 Between two sample families, a binary operator is applied to each sample in the sample family on the left and its matching sample in the sample family on the right. A new sample family with empty name will be generated. Only the matched tags will be reserved. Samples with no matching samples in the sample family on the right will not be found in the result.\nAnother sample family instance_trace_analysis_error_count is\ninstance_trace_analysis_error_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 20 instance_trace_analysis_error_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 11 Example expression:\ninstance_trace_analysis_error_count / instance_trace_count This returns a resulting sample family containing the error rate of trace analysis. Samples with region us-west and az az-3 have no match and will not show up in the result:\n{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 0.2 // 20 / 100 {region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 0.3333 // 11 / 33 Aggregation Operation Sample family supports the following aggregation operations that can be used to aggregate the samples of a single sample family, resulting in a new sample family having fewer samples (sometimes having just a single sample) with aggregated values:\n sum (calculate sum over dimensions) min (select minimum over dimensions) max (select maximum over dimensions) avg (calculate the average over dimensions) count (calculate the count over dimensions, the last tag will be counted)  These operations can be used to aggregate overall label dimensions or preserve distinct dimensions by inputting by parameter( the keyword by could be omitted)\n\u0026lt;aggr-op\u0026gt;(by=[\u0026lt;tag1\u0026gt;, \u0026lt;tag2\u0026gt;, ...]) Example expression:\ninstance_trace_count.sum(by=['az']) will output the following result:\ninstance_trace_count{az=\u0026quot;az-1\u0026quot;} 133 // 100 + 33 instance_trace_count{az=\u0026quot;az-3\u0026quot;} 20  Note, aggregation operations affect the samples from one bulk only. If the metrics are reported parallel from multiple instances/nodes through different SampleFamily, this aggregation would NOT work.\nIn the best practice for this scenario, build the metric with labels that represent each instance/node. Then use the AggregateLabels Operation in MQE to aggregate the metrics.\n Function Duration is a textual representation of a time range. The formats accepted are based on the ISO-8601 duration format {@code PnDTnHnMn.nS} where a day is regarded as exactly 24 hours.\nExamples:\n \u0026ldquo;PT20.345S\u0026rdquo; \u0026ndash; parses as \u0026ldquo;20.345 seconds\u0026rdquo; \u0026ldquo;PT15M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;15 minutes\u0026rdquo; (where a minute is 60 seconds) \u0026ldquo;PT10H\u0026rdquo; \u0026ndash; parses as \u0026ldquo;10 hours\u0026rdquo; (where an hour is 3600 seconds) \u0026ldquo;P2D\u0026rdquo; \u0026ndash; parses as \u0026ldquo;2 days\u0026rdquo; (where a day is 24 hours or 86400 seconds) \u0026ldquo;P2DT3H4M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;2 days, 3 hours and 4 minutes\u0026rdquo; \u0026ldquo;P-6H3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;-6 hours and +3 minutes\u0026rdquo; \u0026ldquo;-P6H3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;-6 hours and -3 minutes\u0026rdquo; \u0026ldquo;-P-6H+3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;+6 hours and -3 minutes\u0026rdquo;  increase increase(Duration): Calculates the increase in the time range.\nrate rate(Duration): Calculates the per-second average rate of increase in the time range.\nirate irate(): Calculates the per-second instant rate of increase in the time range.\ntag tag({allTags -\u0026gt; }): Updates tags of samples. User can add, drop, rename and update tags.\nhistogram histogram(le: '\u0026lt;the tag name of le\u0026gt;'): Transforms less-based histogram buckets to meter system histogram buckets. le parameter represents the tag name of the bucket.\nhistogram_percentile histogram_percentile([\u0026lt;p scalar\u0026gt;]): Represents the meter-system to calculate the p-percentile (0 ≤ p ≤ 100) from the buckets.\ntime time(): Returns the number of seconds since January 1, 1970 UTC.\nforeach forEach([string_array], Closure\u0026lt;Void\u0026gt; each): Iterates all samples according to the first array argument, and provide two parameters in the second closure argument:\n element: element in the array. tags: tags in each sample.  Down Sampling Operation MAL should instruct meter-system on how to downsample for metrics. It doesn\u0026rsquo;t only refer to aggregate raw samples to minute level, but also expresses data from minute in higher levels, such as hour and day.\nDown sampling function is called downsampling in MAL, and it accepts the following types:\n AVG SUM LATEST SUM_PER_MIN MIN MAX MEAN (TODO) COUNT (TODO)  The default type is AVG.\nIf users want to get the latest time from last_server_state_sync_time_in_seconds:\nlast_server_state_sync_time_in_seconds.tagEqual('production', 'catalog').downsampling(LATEST) Metric level function They extract level relevant labels from metric labels, then informs the meter-system the level and layer to which this metric belongs.\n service([svc_label1, svc_label2...], Layer) extracts service level labels from the array argument, extracts layer from Layer argument. instance([svc_label1, svc_label2...], [ins_label1, ins_label2...], Layer, Closure\u0026lt;Map\u0026lt;String, String\u0026gt;\u0026gt; propertiesExtractor) extracts service level labels from the first array argument, extracts instance level labels from the second array argument, extracts layer from Layer argument, propertiesExtractor is an optional closure that extracts instance properties from tags, e.g. { tags -\u0026gt; ['pod': tags.pod, 'namespace': tags.namespace] }. endpoint([svc_label1, svc_label2...], [ep_label1, ep_label2...]) extracts service level labels from the first array argument, extracts endpoint level labels from the second array argument, extracts layer from Layer argument. process([svc_label1, svc_label2...], [ins_label1, ins_label2...], [ps_label1, ps_label2...], layer_lable) extracts service level labels from the first array argument, extracts instance level labels from the second array argument, extracts process level labels from the third array argument, extracts layer label from fourse argument. serviceRelation(DetectPoint, [source_svc_label1...], [dest_svc_label1...], Layer) DetectPoint including DetectPoint.CLIENT and DetectPoint.SERVER, extracts sourceService labels from the first array argument, extracts destService labels from the second array argument, extracts layer from Layer argument. processRelation(detect_point_label, [service_label1...], [instance_label1...], source_process_id_label, dest_process_id_label, component_label) extracts DetectPoint labels from first argument, the label value should be client or server. extracts Service labels from the first array argument, extracts Instance labels from the second array argument, extracts ProcessID labels from the fourth and fifth arguments of the source and destination.  Configuration file The OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP fails to start up. The files are located at $CLASSPATH/otel-rules, $CLASSPATH/meter-analyzer-config, $CLASSPATH/envoy-metrics-rules and $CLASSPATH/zabbix-rules.\nThe file is written in YAML format, defined by the scheme described below. Brackets indicate that a parameter is optional.\nA full example can be found here\nGeneric placeholders are defined as follows:\n \u0026lt;string\u0026gt;: A regular string. \u0026lt;closure\u0026gt;: A closure with custom logic.  # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# filter the metrics, only those metrics that satisfy this condition will be passed into the `metricsRules` below.filter: \u0026lt;closure\u0026gt; # example:\u0026#39;{ tags -\u0026gt; tags.job_name == \u0026#34;vm-monitoring\u0026#34; }\u0026#39;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# Metrics rule allow you to recompute queries.metricsRules:[- \u0026lt;metric_rules\u0026gt; ]\u0026lt;metric_rules\u0026gt; # The name of rule, which combinates with a prefix \u0026#39;meter_\u0026#39; as the index/table name in storage.name:\u0026lt;string\u0026gt;# MAL expression.exp:\u0026lt;string\u0026gt;More Examples Please refer to OAP Self-Observability.\n","title":"Meter Analysis Language","url":"/docs/main/next/en/concepts-and-designs/mal/"},{"content":"Meter Analysis Language The meter system provides a functional analysis language called MAL (Meter Analysis Language) that lets users analyze and aggregate meter data in the OAP streaming system. The result of an expression can either be ingested by the agent analyzer, or the OC/Prometheus analyzer.\nLanguage data type In MAL, an expression or sub-expression can evaluate to one of the following two types:\n Sample family: A set of samples (metrics) containing a range of metrics whose names are identical. Scalar: A simple numeric value that supports integer/long and floating/double.  Sample family A set of samples, which acts as the basic unit in MAL. For example:\ninstance_trace_count The sample family above may contain the following samples which are provided by external modules, such as the agent analyzer:\ninstance_trace_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 100 instance_trace_count{region=\u0026quot;us-east\u0026quot;,az=\u0026quot;az-3\u0026quot;} 20 instance_trace_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 33 Tag filter MAL supports four type operations to filter samples in a sample family:\n tagEqual: Filter tags exactly equal to the string provided. tagNotEqual: Filter tags not equal to the string provided. tagMatch: Filter tags that regex-match the string provided. tagNotMatch: Filter labels that do not regex-match the string provided.  For example, this filters all instance_trace_count samples for us-west and asia-north region and az-1 az:\ninstance_trace_count.tagMatch(\u0026quot;region\u0026quot;, \u0026quot;us-west|asia-north\u0026quot;).tagEqual(\u0026quot;az\u0026quot;, \u0026quot;az-1\u0026quot;) Value filter MAL supports six type operations to filter samples in a sample family by value:\n valueEqual: Filter values exactly equal to the value provided. valueNotEqual: Filter values equal to the value provided. valueGreater: Filter values greater than the value provided. valueGreaterEqual: Filter values greater than or equal to the value provided. valueLess: Filter values less than the value provided. valueLessEqual: Filter values less than or equal to the value provided.  For example, this filters all instance_trace_count samples for values \u0026gt;= 33:\ninstance_trace_count.valueGreaterEqual(33) Tag manipulator MAL allows tag manipulators to change (i.e. add/delete/update) tags and their values.\nK8s MAL supports using the metadata of K8s to manipulate the tags and their values. This feature requires authorizing the OAP Server to access K8s\u0026rsquo;s API Server.\nretagByK8sMeta retagByK8sMeta(newLabelName, K8sRetagType, existingLabelName, namespaceLabelName). Add a new tag to the sample family based on the value of an existing label. Provide several internal converting types, including\n K8sRetagType.Pod2Service  Add a tag to the sample using service as the key, $serviceName.$namespace as the value, and according to the given value of the tag key, which represents the name of a pod.\nFor example:\ncontainer_cpu_usage_seconds_total{namespace=default, container=my-nginx, cpu=total, pod=my-nginx-5dc4865748-mbczh} 2 Expression:\ncontainer_cpu_usage_seconds_total.retagByK8sMeta('service' , K8sRetagType.Pod2Service , 'pod' , 'namespace') Output:\ncontainer_cpu_usage_seconds_total{namespace=default, container=my-nginx, cpu=total, pod=my-nginx-5dc4865748-mbczh, service='nginx-service.default'} 2 Binary operators The following binary arithmetic operators are available in MAL:\n + (addition) - (subtraction) * (multiplication) / (division)  Binary operators are defined between scalar/scalar, sampleFamily/scalar and sampleFamily/sampleFamily value pairs.\nBetween two scalars: they evaluate to another scalar that is the result of the operator being applied to both scalar operands:\n1 + 2 Between a sample family and a scalar, the operator is applied to the value of every sample in the sample family. For example:\ninstance_trace_count + 2 or\n2 + instance_trace_count results in\ninstance_trace_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 102 // 100 + 2 instance_trace_count{region=\u0026quot;us-east\u0026quot;,az=\u0026quot;az-3\u0026quot;} 22 // 20 + 2 instance_trace_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 35 // 33 + 2 Between two sample families, a binary operator is applied to each sample in the sample family on the left and its matching sample in the sample family on the right. A new sample family with empty name will be generated. Only the matched tags will be reserved. Samples with no matching samples in the sample family on the right will not be found in the result.\nAnother sample family instance_trace_analysis_error_count is\ninstance_trace_analysis_error_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 20 instance_trace_analysis_error_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 11 Example expression:\ninstance_trace_analysis_error_count / instance_trace_count This returns a resulting sample family containing the error rate of trace analysis. Samples with region us-west and az az-3 have no match and will not show up in the result:\n{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 0.8 // 20 / 100 {region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 0.3333 // 11 / 33 Aggregation Operation Sample family supports the following aggregation operations that can be used to aggregate the samples of a single sample family, resulting in a new sample family having fewer samples (sometimes having just a single sample) with aggregated values:\n sum (calculate sum over dimensions) min (select minimum over dimensions) max (select maximum over dimensions) avg (calculate the average over dimensions)  These operations can be used to aggregate overall label dimensions or preserve distinct dimensions by inputting by parameter.\n\u0026lt;aggr-op\u0026gt;(by: \u0026lt;tag1, tag2, ...\u0026gt;) Example expression:\ninstance_trace_count.sum(by: ['az']) will output the following result:\ninstance_trace_count{az=\u0026quot;az-1\u0026quot;} 133 // 100 + 33 instance_trace_count{az=\u0026quot;az-3\u0026quot;} 20 Function Duraton is a textual representation of a time range. The formats accepted are based on the ISO-8601 duration format {@code PnDTnHnMn.nS} where a day is regarded as exactly 24 hours.\nExamples:\n \u0026ldquo;PT20.345S\u0026rdquo; \u0026ndash; parses as \u0026ldquo;20.345 seconds\u0026rdquo; \u0026ldquo;PT15M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;15 minutes\u0026rdquo; (where a minute is 60 seconds) \u0026ldquo;PT10H\u0026rdquo; \u0026ndash; parses as \u0026ldquo;10 hours\u0026rdquo; (where an hour is 3600 seconds) \u0026ldquo;P2D\u0026rdquo; \u0026ndash; parses as \u0026ldquo;2 days\u0026rdquo; (where a day is 24 hours or 86400 seconds) \u0026ldquo;P2DT3H4M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;2 days, 3 hours and 4 minutes\u0026rdquo; \u0026ldquo;P-6H3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;-6 hours and +3 minutes\u0026rdquo; \u0026ldquo;-P6H3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;-6 hours and -3 minutes\u0026rdquo; \u0026ldquo;-P-6H+3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;+6 hours and -3 minutes\u0026rdquo;  increase increase(Duration): Calculates the increase in the time range.\nrate rate(Duration): Calculates the per-second average rate of increase in the time range.\nirate irate(): Calculates the per-second instant rate of increase in the time range.\ntag tag({allTags -\u0026gt; }): Updates tags of samples. User can add, drop, rename and update tags.\nhistogram histogram(le: '\u0026lt;the tag name of le\u0026gt;'): Transforms less-based histogram buckets to meter system histogram buckets. le parameter represents the tag name of the bucket.\nhistogram_percentile histogram_percentile([\u0026lt;p scalar\u0026gt;]). Represents the meter-system to calculate the p-percentile (0 ≤ p ≤ 100) from the buckets.\ntime time(): Returns the number of seconds since January 1, 1970 UTC.\nDown Sampling Operation MAL should instruct meter-system on how to downsample for metrics. It doesn\u0026rsquo;t only refer to aggregate raw samples to minute level, but also expresses data from minute in higher levels, such as hour and day.\nDown sampling function is called downsampling in MAL, and it accepts the following types:\n AVG SUM LATEST MIN (TODO) MAX (TODO) MEAN (TODO) COUNT (TODO)  The default type is AVG.\nIf users want to get the latest time from last_server_state_sync_time_in_seconds:\nlast_server_state_sync_time_in_seconds.tagEqual('production', 'catalog').downsampling(LATEST) Metric level function They extract level relevant labels from metric labels, then informs the meter-system the level and layer to which this metric belongs.\n service([svc_label1, svc_label2...], Layer) extracts service level labels from the array argument, extracts layer from Layer argument. instance([svc_label1, svc_label2...], [ins_label1, ins_label2...], Layer) extracts service level labels from the first array argument, extracts instance level labels from the second array argument, extracts layer from Layer argument. endpoint([svc_label1, svc_label2...], [ep_label1, ep_label2...]) extracts service level labels from the first array argument, extracts endpoint level labels from the second array argument, extracts layer from Layer argument. serviceRelation(DetectPoint, [source_svc_label1...], [dest_svc_label1...], Layer) DetectPoint including DetectPoint.CLIENT and DetectPoint.SERVER, extracts sourceService labels from the first array argument, extracts destService labels from the second array argument, extracts layer from Layer argument.  More Examples Please refer to OAP Self-Observability\n","title":"Meter Analysis Language","url":"/docs/main/v9.0.0/en/concepts-and-designs/mal/"},{"content":"Meter Analysis Language The meter system provides a functional analysis language called MAL (Meter Analysis Language) that lets users analyze and aggregate meter data in the OAP streaming system. The result of an expression can either be ingested by the agent analyzer, or the OC/Prometheus analyzer.\nLanguage data type In MAL, an expression or sub-expression can evaluate to one of the following two types:\n Sample family: A set of samples (metrics) containing a range of metrics whose names are identical. Scalar: A simple numeric value that supports integer/long and floating/double.  Sample family A set of samples, which acts as the basic unit in MAL. For example:\ninstance_trace_count The sample family above may contain the following samples which are provided by external modules, such as the agent analyzer:\ninstance_trace_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 100 instance_trace_count{region=\u0026quot;us-east\u0026quot;,az=\u0026quot;az-3\u0026quot;} 20 instance_trace_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 33 Tag filter MAL supports four type operations to filter samples in a sample family:\n tagEqual: Filter tags exactly equal to the string provided. tagNotEqual: Filter tags not equal to the string provided. tagMatch: Filter tags that regex-match the string provided. tagNotMatch: Filter labels that do not regex-match the string provided.  For example, this filters all instance_trace_count samples for us-west and asia-north region and az-1 az:\ninstance_trace_count.tagMatch(\u0026quot;region\u0026quot;, \u0026quot;us-west|asia-north\u0026quot;).tagEqual(\u0026quot;az\u0026quot;, \u0026quot;az-1\u0026quot;) Value filter MAL supports six type operations to filter samples in a sample family by value:\n valueEqual: Filter values exactly equal to the value provided. valueNotEqual: Filter values equal to the value provided. valueGreater: Filter values greater than the value provided. valueGreaterEqual: Filter values greater than or equal to the value provided. valueLess: Filter values less than the value provided. valueLessEqual: Filter values less than or equal to the value provided.  For example, this filters all instance_trace_count samples for values \u0026gt;= 33:\ninstance_trace_count.valueGreaterEqual(33) Tag manipulator MAL allows tag manipulators to change (i.e. add/delete/update) tags and their values.\nK8s MAL supports using the metadata of K8s to manipulate the tags and their values. This feature requires authorizing the OAP Server to access K8s\u0026rsquo;s API Server.\nretagByK8sMeta retagByK8sMeta(newLabelName, K8sRetagType, existingLabelName, namespaceLabelName). Add a new tag to the sample family based on the value of an existing label. Provide several internal converting types, including\n K8sRetagType.Pod2Service  Add a tag to the sample using service as the key, $serviceName.$namespace as the value, and according to the given value of the tag key, which represents the name of a pod.\nFor example:\ncontainer_cpu_usage_seconds_total{namespace=default, container=my-nginx, cpu=total, pod=my-nginx-5dc4865748-mbczh} 2 Expression:\ncontainer_cpu_usage_seconds_total.retagByK8sMeta('service' , K8sRetagType.Pod2Service , 'pod' , 'namespace') Output:\ncontainer_cpu_usage_seconds_total{namespace=default, container=my-nginx, cpu=total, pod=my-nginx-5dc4865748-mbczh, service='nginx-service.default'} 2 Binary operators The following binary arithmetic operators are available in MAL:\n + (addition) - (subtraction) * (multiplication) / (division)  Binary operators are defined between scalar/scalar, sampleFamily/scalar and sampleFamily/sampleFamily value pairs.\nBetween two scalars: they evaluate to another scalar that is the result of the operator being applied to both scalar operands:\n1 + 2 Between a sample family and a scalar, the operator is applied to the value of every sample in the sample family. For example:\ninstance_trace_count + 2 or\n2 + instance_trace_count results in\ninstance_trace_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 102 // 100 + 2 instance_trace_count{region=\u0026quot;us-east\u0026quot;,az=\u0026quot;az-3\u0026quot;} 22 // 20 + 2 instance_trace_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 35 // 33 + 2 Between two sample families, a binary operator is applied to each sample in the sample family on the left and its matching sample in the sample family on the right. A new sample family with empty name will be generated. Only the matched tags will be reserved. Samples with no matching samples in the sample family on the right will not be found in the result.\nAnother sample family instance_trace_analysis_error_count is\ninstance_trace_analysis_error_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 20 instance_trace_analysis_error_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 11 Example expression:\ninstance_trace_analysis_error_count / instance_trace_count This returns a resulting sample family containing the error rate of trace analysis. Samples with region us-west and az az-3 have no match and will not show up in the result:\n{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 0.2 // 20 / 100 {region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 0.3333 // 11 / 33 Aggregation Operation Sample family supports the following aggregation operations that can be used to aggregate the samples of a single sample family, resulting in a new sample family having fewer samples (sometimes having just a single sample) with aggregated values:\n sum (calculate sum over dimensions) min (select minimum over dimensions) max (select maximum over dimensions) avg (calculate the average over dimensions)  These operations can be used to aggregate overall label dimensions or preserve distinct dimensions by inputting by parameter.\n\u0026lt;aggr-op\u0026gt;(by: \u0026lt;tag1, tag2, ...\u0026gt;) Example expression:\ninstance_trace_count.sum(by: ['az']) will output the following result:\ninstance_trace_count{az=\u0026quot;az-1\u0026quot;} 133 // 100 + 33 instance_trace_count{az=\u0026quot;az-3\u0026quot;} 20 Function Duration is a textual representation of a time range. The formats accepted are based on the ISO-8601 duration format {@code PnDTnHnMn.nS} where a day is regarded as exactly 24 hours.\nExamples:\n \u0026ldquo;PT20.345S\u0026rdquo; \u0026ndash; parses as \u0026ldquo;20.345 seconds\u0026rdquo; \u0026ldquo;PT15M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;15 minutes\u0026rdquo; (where a minute is 60 seconds) \u0026ldquo;PT10H\u0026rdquo; \u0026ndash; parses as \u0026ldquo;10 hours\u0026rdquo; (where an hour is 3600 seconds) \u0026ldquo;P2D\u0026rdquo; \u0026ndash; parses as \u0026ldquo;2 days\u0026rdquo; (where a day is 24 hours or 86400 seconds) \u0026ldquo;P2DT3H4M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;2 days, 3 hours and 4 minutes\u0026rdquo; \u0026ldquo;P-6H3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;-6 hours and +3 minutes\u0026rdquo; \u0026ldquo;-P6H3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;-6 hours and -3 minutes\u0026rdquo; \u0026ldquo;-P-6H+3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;+6 hours and -3 minutes\u0026rdquo;  increase increase(Duration): Calculates the increase in the time range.\nrate rate(Duration): Calculates the per-second average rate of increase in the time range.\nirate irate(): Calculates the per-second instant rate of increase in the time range.\ntag tag({allTags -\u0026gt; }): Updates tags of samples. User can add, drop, rename and update tags.\nhistogram histogram(le: '\u0026lt;the tag name of le\u0026gt;'): Transforms less-based histogram buckets to meter system histogram buckets. le parameter represents the tag name of the bucket.\nhistogram_percentile histogram_percentile([\u0026lt;p scalar\u0026gt;]). Represents the meter-system to calculate the p-percentile (0 ≤ p ≤ 100) from the buckets.\ntime time(): Returns the number of seconds since January 1, 1970 UTC.\nDown Sampling Operation MAL should instruct meter-system on how to downsample for metrics. It doesn\u0026rsquo;t only refer to aggregate raw samples to minute level, but also expresses data from minute in higher levels, such as hour and day.\nDown sampling function is called downsampling in MAL, and it accepts the following types:\n AVG SUM LATEST MIN (TODO) MAX (TODO) MEAN (TODO) COUNT (TODO)  The default type is AVG.\nIf users want to get the latest time from last_server_state_sync_time_in_seconds:\nlast_server_state_sync_time_in_seconds.tagEqual('production', 'catalog').downsampling(LATEST) Metric level function They extract level relevant labels from metric labels, then informs the meter-system the level and layer to which this metric belongs.\n service([svc_label1, svc_label2...], Layer) extracts service level labels from the array argument, extracts layer from Layer argument. instance([svc_label1, svc_label2...], [ins_label1, ins_label2...], Layer, Closure\u0026lt;Map\u0026lt;String, String\u0026gt;\u0026gt; propertiesExtractor) extracts service level labels from the first array argument, extracts instance level labels from the second array argument, extracts layer from Layer argument, propertiesExtractor is an optional closure that extracts instance properties from tags, e.g. { tags -\u0026gt; ['pod': tags.pod, 'namespace': tags.namespace] }. endpoint([svc_label1, svc_label2...], [ep_label1, ep_label2...]) extracts service level labels from the first array argument, extracts endpoint level labels from the second array argument, extracts layer from Layer argument. serviceRelation(DetectPoint, [source_svc_label1...], [dest_svc_label1...], Layer) DetectPoint including DetectPoint.CLIENT and DetectPoint.SERVER, extracts sourceService labels from the first array argument, extracts destService labels from the second array argument, extracts layer from Layer argument.  More Examples Please refer to OAP Self-Observability\n","title":"Meter Analysis Language","url":"/docs/main/v9.1.0/en/concepts-and-designs/mal/"},{"content":"Meter Analysis Language The meter system provides a functional analysis language called MAL (Meter Analysis Language) that lets users analyze and aggregate meter data in the OAP streaming system. The result of an expression can either be ingested by the agent analyzer, or the OC/Prometheus analyzer.\nLanguage data type In MAL, an expression or sub-expression can evaluate to one of the following two types:\n Sample family: A set of samples (metrics) containing a range of metrics whose names are identical. Scalar: A simple numeric value that supports integer/long and floating/double.  Sample family A set of samples, which acts as the basic unit in MAL. For example:\ninstance_trace_count The sample family above may contain the following samples which are provided by external modules, such as the agent analyzer:\ninstance_trace_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 100 instance_trace_count{region=\u0026quot;us-east\u0026quot;,az=\u0026quot;az-3\u0026quot;} 20 instance_trace_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 33 Tag filter MAL supports four type operations to filter samples in a sample family:\n tagEqual: Filter tags exactly equal to the string provided. tagNotEqual: Filter tags not equal to the string provided. tagMatch: Filter tags that regex-match the string provided. tagNotMatch: Filter labels that do not regex-match the string provided.  For example, this filters all instance_trace_count samples for us-west and asia-north region and az-1 az:\ninstance_trace_count.tagMatch(\u0026quot;region\u0026quot;, \u0026quot;us-west|asia-north\u0026quot;).tagEqual(\u0026quot;az\u0026quot;, \u0026quot;az-1\u0026quot;) Value filter MAL supports six type operations to filter samples in a sample family by value:\n valueEqual: Filter values exactly equal to the value provided. valueNotEqual: Filter values equal to the value provided. valueGreater: Filter values greater than the value provided. valueGreaterEqual: Filter values greater than or equal to the value provided. valueLess: Filter values less than the value provided. valueLessEqual: Filter values less than or equal to the value provided.  For example, this filters all instance_trace_count samples for values \u0026gt;= 33:\ninstance_trace_count.valueGreaterEqual(33) Tag manipulator MAL allows tag manipulators to change (i.e. add/delete/update) tags and their values.\nK8s MAL supports using the metadata of K8s to manipulate the tags and their values. This feature requires authorizing the OAP Server to access K8s\u0026rsquo;s API Server.\nretagByK8sMeta retagByK8sMeta(newLabelName, K8sRetagType, existingLabelName, namespaceLabelName). Add a new tag to the sample family based on the value of an existing label. Provide several internal converting types, including\n K8sRetagType.Pod2Service  Add a tag to the sample using service as the key, $serviceName.$namespace as the value, and according to the given value of the tag key, which represents the name of a pod.\nFor example:\ncontainer_cpu_usage_seconds_total{namespace=default, container=my-nginx, cpu=total, pod=my-nginx-5dc4865748-mbczh} 2 Expression:\ncontainer_cpu_usage_seconds_total.retagByK8sMeta('service' , K8sRetagType.Pod2Service , 'pod' , 'namespace') Output:\ncontainer_cpu_usage_seconds_total{namespace=default, container=my-nginx, cpu=total, pod=my-nginx-5dc4865748-mbczh, service='nginx-service.default'} 2 Binary operators The following binary arithmetic operators are available in MAL:\n + (addition) - (subtraction) * (multiplication) / (division)  Binary operators are defined between scalar/scalar, sampleFamily/scalar and sampleFamily/sampleFamily value pairs.\nBetween two scalars: they evaluate to another scalar that is the result of the operator being applied to both scalar operands:\n1 + 2 Between a sample family and a scalar, the operator is applied to the value of every sample in the sample family. For example:\ninstance_trace_count + 2 or\n2 + instance_trace_count results in\ninstance_trace_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 102 // 100 + 2 instance_trace_count{region=\u0026quot;us-east\u0026quot;,az=\u0026quot;az-3\u0026quot;} 22 // 20 + 2 instance_trace_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 35 // 33 + 2 Between two sample families, a binary operator is applied to each sample in the sample family on the left and its matching sample in the sample family on the right. A new sample family with empty name will be generated. Only the matched tags will be reserved. Samples with no matching samples in the sample family on the right will not be found in the result.\nAnother sample family instance_trace_analysis_error_count is\ninstance_trace_analysis_error_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 20 instance_trace_analysis_error_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 11 Example expression:\ninstance_trace_analysis_error_count / instance_trace_count This returns a resulting sample family containing the error rate of trace analysis. Samples with region us-west and az az-3 have no match and will not show up in the result:\n{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 0.2 // 20 / 100 {region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 0.3333 // 11 / 33 Aggregation Operation Sample family supports the following aggregation operations that can be used to aggregate the samples of a single sample family, resulting in a new sample family having fewer samples (sometimes having just a single sample) with aggregated values:\n sum (calculate sum over dimensions) min (select minimum over dimensions) max (select maximum over dimensions) avg (calculate the average over dimensions)  These operations can be used to aggregate overall label dimensions or preserve distinct dimensions by inputting by parameter.\n\u0026lt;aggr-op\u0026gt;(by: \u0026lt;tag1, tag2, ...\u0026gt;) Example expression:\ninstance_trace_count.sum(by: ['az']) will output the following result:\ninstance_trace_count{az=\u0026quot;az-1\u0026quot;} 133 // 100 + 33 instance_trace_count{az=\u0026quot;az-3\u0026quot;} 20 Function Duration is a textual representation of a time range. The formats accepted are based on the ISO-8601 duration format {@code PnDTnHnMn.nS} where a day is regarded as exactly 24 hours.\nExamples:\n \u0026ldquo;PT20.345S\u0026rdquo; \u0026ndash; parses as \u0026ldquo;20.345 seconds\u0026rdquo; \u0026ldquo;PT15M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;15 minutes\u0026rdquo; (where a minute is 60 seconds) \u0026ldquo;PT10H\u0026rdquo; \u0026ndash; parses as \u0026ldquo;10 hours\u0026rdquo; (where an hour is 3600 seconds) \u0026ldquo;P2D\u0026rdquo; \u0026ndash; parses as \u0026ldquo;2 days\u0026rdquo; (where a day is 24 hours or 86400 seconds) \u0026ldquo;P2DT3H4M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;2 days, 3 hours and 4 minutes\u0026rdquo; \u0026ldquo;P-6H3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;-6 hours and +3 minutes\u0026rdquo; \u0026ldquo;-P6H3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;-6 hours and -3 minutes\u0026rdquo; \u0026ldquo;-P-6H+3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;+6 hours and -3 minutes\u0026rdquo;  increase increase(Duration): Calculates the increase in the time range.\nrate rate(Duration): Calculates the per-second average rate of increase in the time range.\nirate irate(): Calculates the per-second instant rate of increase in the time range.\ntag tag({allTags -\u0026gt; }): Updates tags of samples. User can add, drop, rename and update tags.\nhistogram histogram(le: '\u0026lt;the tag name of le\u0026gt;'): Transforms less-based histogram buckets to meter system histogram buckets. le parameter represents the tag name of the bucket.\nhistogram_percentile histogram_percentile([\u0026lt;p scalar\u0026gt;]). Represents the meter-system to calculate the p-percentile (0 ≤ p ≤ 100) from the buckets.\ntime time(): Returns the number of seconds since January 1, 1970 UTC.\nforeach forEach([string_array], Closure\u0026lt;Void\u0026gt; each): Iterates all samples according to the first array argument, and provide two parameters in the second closure argument:\n element: element in the array. tags: tags in each sample.  Down Sampling Operation MAL should instruct meter-system on how to downsample for metrics. It doesn\u0026rsquo;t only refer to aggregate raw samples to minute level, but also expresses data from minute in higher levels, such as hour and day.\nDown sampling function is called downsampling in MAL, and it accepts the following types:\n AVG SUM LATEST MIN (TODO) MAX (TODO) MEAN (TODO) COUNT (TODO)  The default type is AVG.\nIf users want to get the latest time from last_server_state_sync_time_in_seconds:\nlast_server_state_sync_time_in_seconds.tagEqual('production', 'catalog').downsampling(LATEST) Metric level function They extract level relevant labels from metric labels, then informs the meter-system the level and layer to which this metric belongs.\n service([svc_label1, svc_label2...], Layer) extracts service level labels from the array argument, extracts layer from Layer argument. instance([svc_label1, svc_label2...], [ins_label1, ins_label2...], Layer, Closure\u0026lt;Map\u0026lt;String, String\u0026gt;\u0026gt; propertiesExtractor) extracts service level labels from the first array argument, extracts instance level labels from the second array argument, extracts layer from Layer argument, propertiesExtractor is an optional closure that extracts instance properties from tags, e.g. { tags -\u0026gt; ['pod': tags.pod, 'namespace': tags.namespace] }. endpoint([svc_label1, svc_label2...], [ep_label1, ep_label2...]) extracts service level labels from the first array argument, extracts endpoint level labels from the second array argument, extracts layer from Layer argument. serviceRelation(DetectPoint, [source_svc_label1...], [dest_svc_label1...], Layer) DetectPoint including DetectPoint.CLIENT and DetectPoint.SERVER, extracts sourceService labels from the first array argument, extracts destService labels from the second array argument, extracts layer from Layer argument. processRelation(detect_point_label, [service_label1...], [instance_label1...], source_process_id_label, dest_process_id_label, component_label) extracts DetectPoint labels from first argument, the label value should be client or server. extracts Service labels from the first array argument, extracts Instance labels from the second array argument, extracts ProcessID labels from the fourth and fifth arguments of the source and destination.  More Examples Please refer to OAP Self-Observability\n","title":"Meter Analysis Language","url":"/docs/main/v9.2.0/en/concepts-and-designs/mal/"},{"content":"Meter Analysis Language The meter system provides a functional analysis language called MAL (Meter Analysis Language) that lets users analyze and aggregate meter data in the OAP streaming system. The result of an expression can either be ingested by the agent analyzer, or the OC/Prometheus analyzer.\nLanguage data type In MAL, an expression or sub-expression can evaluate to one of the following two types:\n Sample family: A set of samples (metrics) containing a range of metrics whose names are identical. Scalar: A simple numeric value that supports integer/long and floating/double.  Sample family A set of samples, which acts as the basic unit in MAL. For example:\ninstance_trace_count The sample family above may contain the following samples which are provided by external modules, such as the agent analyzer:\ninstance_trace_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 100 instance_trace_count{region=\u0026quot;us-east\u0026quot;,az=\u0026quot;az-3\u0026quot;} 20 instance_trace_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 33 Tag filter MAL supports four type operations to filter samples in a sample family:\n tagEqual: Filter tags exactly equal to the string provided. tagNotEqual: Filter tags not equal to the string provided. tagMatch: Filter tags that regex-match the string provided. tagNotMatch: Filter labels that do not regex-match the string provided.  For example, this filters all instance_trace_count samples for us-west and asia-north region and az-1 az:\ninstance_trace_count.tagMatch(\u0026quot;region\u0026quot;, \u0026quot;us-west|asia-north\u0026quot;).tagEqual(\u0026quot;az\u0026quot;, \u0026quot;az-1\u0026quot;) Value filter MAL supports six type operations to filter samples in a sample family by value:\n valueEqual: Filter values exactly equal to the value provided. valueNotEqual: Filter values equal to the value provided. valueGreater: Filter values greater than the value provided. valueGreaterEqual: Filter values greater than or equal to the value provided. valueLess: Filter values less than the value provided. valueLessEqual: Filter values less than or equal to the value provided.  For example, this filters all instance_trace_count samples for values \u0026gt;= 33:\ninstance_trace_count.valueGreaterEqual(33) Tag manipulator MAL allows tag manipulators to change (i.e. add/delete/update) tags and their values.\nK8s MAL supports using the metadata of K8s to manipulate the tags and their values. This feature requires authorizing the OAP Server to access K8s\u0026rsquo;s API Server.\nretagByK8sMeta retagByK8sMeta(newLabelName, K8sRetagType, existingLabelName, namespaceLabelName). Add a new tag to the sample family based on the value of an existing label. Provide several internal converting types, including\n K8sRetagType.Pod2Service  Add a tag to the sample using service as the key, $serviceName.$namespace as the value, and according to the given value of the tag key, which represents the name of a pod.\nFor example:\ncontainer_cpu_usage_seconds_total{namespace=default, container=my-nginx, cpu=total, pod=my-nginx-5dc4865748-mbczh} 2 Expression:\ncontainer_cpu_usage_seconds_total.retagByK8sMeta('service' , K8sRetagType.Pod2Service , 'pod' , 'namespace') Output:\ncontainer_cpu_usage_seconds_total{namespace=default, container=my-nginx, cpu=total, pod=my-nginx-5dc4865748-mbczh, service='nginx-service.default'} 2 Binary operators The following binary arithmetic operators are available in MAL:\n + (addition) - (subtraction) * (multiplication) / (division)  Binary operators are defined between scalar/scalar, sampleFamily/scalar and sampleFamily/sampleFamily value pairs.\nBetween two scalars: they evaluate to another scalar that is the result of the operator being applied to both scalar operands:\n1 + 2 Between a sample family and a scalar, the operator is applied to the value of every sample in the sample family. For example:\ninstance_trace_count + 2 or\n2 + instance_trace_count results in\ninstance_trace_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 102 // 100 + 2 instance_trace_count{region=\u0026quot;us-east\u0026quot;,az=\u0026quot;az-3\u0026quot;} 22 // 20 + 2 instance_trace_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 35 // 33 + 2 Between two sample families, a binary operator is applied to each sample in the sample family on the left and its matching sample in the sample family on the right. A new sample family with empty name will be generated. Only the matched tags will be reserved. Samples with no matching samples in the sample family on the right will not be found in the result.\nAnother sample family instance_trace_analysis_error_count is\ninstance_trace_analysis_error_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 20 instance_trace_analysis_error_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 11 Example expression:\ninstance_trace_analysis_error_count / instance_trace_count This returns a resulting sample family containing the error rate of trace analysis. Samples with region us-west and az az-3 have no match and will not show up in the result:\n{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 0.2 // 20 / 100 {region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 0.3333 // 11 / 33 Aggregation Operation Sample family supports the following aggregation operations that can be used to aggregate the samples of a single sample family, resulting in a new sample family having fewer samples (sometimes having just a single sample) with aggregated values:\n sum (calculate sum over dimensions) min (select minimum over dimensions) max (select maximum over dimensions) avg (calculate the average over dimensions)  These operations can be used to aggregate overall label dimensions or preserve distinct dimensions by inputting by parameter( the keyword by could be omitted)\n\u0026lt;aggr-op\u0026gt;(by=[\u0026lt;tag1\u0026gt;, \u0026lt;tag2\u0026gt;, ...]) Example expression:\ninstance_trace_count.sum(by=['az']) will output the following result:\ninstance_trace_count{az=\u0026quot;az-1\u0026quot;} 133 // 100 + 33 instance_trace_count{az=\u0026quot;az-3\u0026quot;} 20 Function Duration is a textual representation of a time range. The formats accepted are based on the ISO-8601 duration format {@code PnDTnHnMn.nS} where a day is regarded as exactly 24 hours.\nExamples:\n \u0026ldquo;PT20.345S\u0026rdquo; \u0026ndash; parses as \u0026ldquo;20.345 seconds\u0026rdquo; \u0026ldquo;PT15M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;15 minutes\u0026rdquo; (where a minute is 60 seconds) \u0026ldquo;PT10H\u0026rdquo; \u0026ndash; parses as \u0026ldquo;10 hours\u0026rdquo; (where an hour is 3600 seconds) \u0026ldquo;P2D\u0026rdquo; \u0026ndash; parses as \u0026ldquo;2 days\u0026rdquo; (where a day is 24 hours or 86400 seconds) \u0026ldquo;P2DT3H4M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;2 days, 3 hours and 4 minutes\u0026rdquo; \u0026ldquo;P-6H3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;-6 hours and +3 minutes\u0026rdquo; \u0026ldquo;-P6H3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;-6 hours and -3 minutes\u0026rdquo; \u0026ldquo;-P-6H+3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;+6 hours and -3 minutes\u0026rdquo;  increase increase(Duration): Calculates the increase in the time range.\nrate rate(Duration): Calculates the per-second average rate of increase in the time range.\nirate irate(): Calculates the per-second instant rate of increase in the time range.\ntag tag({allTags -\u0026gt; }): Updates tags of samples. User can add, drop, rename and update tags.\nhistogram histogram(le: '\u0026lt;the tag name of le\u0026gt;'): Transforms less-based histogram buckets to meter system histogram buckets. le parameter represents the tag name of the bucket.\nhistogram_percentile histogram_percentile([\u0026lt;p scalar\u0026gt;]). Represents the meter-system to calculate the p-percentile (0 ≤ p ≤ 100) from the buckets.\ntime time(): Returns the number of seconds since January 1, 1970 UTC.\nforeach forEach([string_array], Closure\u0026lt;Void\u0026gt; each): Iterates all samples according to the first array argument, and provide two parameters in the second closure argument:\n element: element in the array. tags: tags in each sample.  Down Sampling Operation MAL should instruct meter-system on how to downsample for metrics. It doesn\u0026rsquo;t only refer to aggregate raw samples to minute level, but also expresses data from minute in higher levels, such as hour and day.\nDown sampling function is called downsampling in MAL, and it accepts the following types:\n AVG SUM LATEST MIN (TODO) MAX (TODO) MEAN (TODO) COUNT (TODO)  The default type is AVG.\nIf users want to get the latest time from last_server_state_sync_time_in_seconds:\nlast_server_state_sync_time_in_seconds.tagEqual('production', 'catalog').downsampling(LATEST) Metric level function They extract level relevant labels from metric labels, then informs the meter-system the level and layer to which this metric belongs.\n service([svc_label1, svc_label2...], Layer) extracts service level labels from the array argument, extracts layer from Layer argument. instance([svc_label1, svc_label2...], [ins_label1, ins_label2...], Layer, Closure\u0026lt;Map\u0026lt;String, String\u0026gt;\u0026gt; propertiesExtractor) extracts service level labels from the first array argument, extracts instance level labels from the second array argument, extracts layer from Layer argument, propertiesExtractor is an optional closure that extracts instance properties from tags, e.g. { tags -\u0026gt; ['pod': tags.pod, 'namespace': tags.namespace] }. endpoint([svc_label1, svc_label2...], [ep_label1, ep_label2...]) extracts service level labels from the first array argument, extracts endpoint level labels from the second array argument, extracts layer from Layer argument. serviceRelation(DetectPoint, [source_svc_label1...], [dest_svc_label1...], Layer) DetectPoint including DetectPoint.CLIENT and DetectPoint.SERVER, extracts sourceService labels from the first array argument, extracts destService labels from the second array argument, extracts layer from Layer argument. processRelation(detect_point_label, [service_label1...], [instance_label1...], source_process_id_label, dest_process_id_label, component_label) extracts DetectPoint labels from first argument, the label value should be client or server. extracts Service labels from the first array argument, extracts Instance labels from the second array argument, extracts ProcessID labels from the fourth and fifth arguments of the source and destination.  Configuration file The OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP fails to start up. The files are located at $CLASSPATH/otel-rules, $CLASSPATH/meter-analyzer-config, $CLASSPATH/envoy-metrics-rules and $CLASSPATH/zabbix-rules.\nThe file is written in YAML format, defined by the scheme described below. Brackets indicate that a parameter is optional.\nA full example can be found here\nGeneric placeholders are defined as follows:\n \u0026lt;string\u0026gt;: A regular string. \u0026lt;closure\u0026gt;: A closure with custom logic.  # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# filter the metrics, only those metrics that satisfy this condition will be passed into the `metricsRules` below.filter: \u0026lt;closure\u0026gt; # example:\u0026#39;{ tags -\u0026gt; tags.job_name == \u0026#34;vm-monitoring\u0026#34; }\u0026#39;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# Metrics rule allow you to recompute queries.metricsRules:[- \u0026lt;metric_rules\u0026gt; ]\u0026lt;metric_rules\u0026gt; # The name of rule, which combinates with a prefix \u0026#39;meter_\u0026#39; as the index/table name in storage.name:\u0026lt;string\u0026gt;# MAL expression.exp:\u0026lt;string\u0026gt;More Examples Please refer to OAP Self-Observability.\n","title":"Meter Analysis Language","url":"/docs/main/v9.3.0/en/concepts-and-designs/mal/"},{"content":"Meter Analysis Language The meter system provides a functional analysis language called MAL (Meter Analysis Language) that lets users analyze and aggregate meter data in the OAP streaming system. The result of an expression can either be ingested by the agent analyzer, or the OC/Prometheus analyzer.\nLanguage data type In MAL, an expression or sub-expression can evaluate to one of the following two types:\n Sample family: A set of samples (metrics) containing a range of metrics whose names are identical. Scalar: A simple numeric value that supports integer/long and floating/double.  Sample family A set of samples, which acts as the basic unit in MAL. For example:\ninstance_trace_count The sample family above may contain the following samples which are provided by external modules, such as the agent analyzer:\ninstance_trace_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 100 instance_trace_count{region=\u0026quot;us-east\u0026quot;,az=\u0026quot;az-3\u0026quot;} 20 instance_trace_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 33 Tag filter MAL supports four type operations to filter samples in a sample family:\n tagEqual: Filter tags exactly equal to the string provided. tagNotEqual: Filter tags not equal to the string provided. tagMatch: Filter tags that regex-match the string provided. tagNotMatch: Filter labels that do not regex-match the string provided.  For example, this filters all instance_trace_count samples for us-west and asia-north region and az-1 az:\ninstance_trace_count.tagMatch(\u0026quot;region\u0026quot;, \u0026quot;us-west|asia-north\u0026quot;).tagEqual(\u0026quot;az\u0026quot;, \u0026quot;az-1\u0026quot;) Value filter MAL supports six type operations to filter samples in a sample family by value:\n valueEqual: Filter values exactly equal to the value provided. valueNotEqual: Filter values equal to the value provided. valueGreater: Filter values greater than the value provided. valueGreaterEqual: Filter values greater than or equal to the value provided. valueLess: Filter values less than the value provided. valueLessEqual: Filter values less than or equal to the value provided.  For example, this filters all instance_trace_count samples for values \u0026gt;= 33:\ninstance_trace_count.valueGreaterEqual(33) Tag manipulator MAL allows tag manipulators to change (i.e. add/delete/update) tags and their values.\nK8s MAL supports using the metadata of K8s to manipulate the tags and their values. This feature requires authorizing the OAP Server to access K8s\u0026rsquo;s API Server.\nretagByK8sMeta retagByK8sMeta(newLabelName, K8sRetagType, existingLabelName, namespaceLabelName). Add a new tag to the sample family based on the value of an existing label. Provide several internal converting types, including\n K8sRetagType.Pod2Service  Add a tag to the sample using service as the key, $serviceName.$namespace as the value, and according to the given value of the tag key, which represents the name of a pod.\nFor example:\ncontainer_cpu_usage_seconds_total{namespace=default, container=my-nginx, cpu=total, pod=my-nginx-5dc4865748-mbczh} 2 Expression:\ncontainer_cpu_usage_seconds_total.retagByK8sMeta('service' , K8sRetagType.Pod2Service , 'pod' , 'namespace') Output:\ncontainer_cpu_usage_seconds_total{namespace=default, container=my-nginx, cpu=total, pod=my-nginx-5dc4865748-mbczh, service='nginx-service.default'} 2 Binary operators The following binary arithmetic operators are available in MAL:\n + (addition) - (subtraction) * (multiplication) / (division)  Binary operators are defined between scalar/scalar, sampleFamily/scalar and sampleFamily/sampleFamily value pairs.\nBetween two scalars: they evaluate to another scalar that is the result of the operator being applied to both scalar operands:\n1 + 2 Between a sample family and a scalar, the operator is applied to the value of every sample in the sample family. For example:\ninstance_trace_count + 2 or\n2 + instance_trace_count results in\ninstance_trace_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 102 // 100 + 2 instance_trace_count{region=\u0026quot;us-east\u0026quot;,az=\u0026quot;az-3\u0026quot;} 22 // 20 + 2 instance_trace_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 35 // 33 + 2 Between two sample families, a binary operator is applied to each sample in the sample family on the left and its matching sample in the sample family on the right. A new sample family with empty name will be generated. Only the matched tags will be reserved. Samples with no matching samples in the sample family on the right will not be found in the result.\nAnother sample family instance_trace_analysis_error_count is\ninstance_trace_analysis_error_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 20 instance_trace_analysis_error_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 11 Example expression:\ninstance_trace_analysis_error_count / instance_trace_count This returns a resulting sample family containing the error rate of trace analysis. Samples with region us-west and az az-3 have no match and will not show up in the result:\n{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 0.2 // 20 / 100 {region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 0.3333 // 11 / 33 Aggregation Operation Sample family supports the following aggregation operations that can be used to aggregate the samples of a single sample family, resulting in a new sample family having fewer samples (sometimes having just a single sample) with aggregated values:\n sum (calculate sum over dimensions) min (select minimum over dimensions) max (select maximum over dimensions) avg (calculate the average over dimensions)  These operations can be used to aggregate overall label dimensions or preserve distinct dimensions by inputting by parameter( the keyword by could be omitted)\n\u0026lt;aggr-op\u0026gt;(by=[\u0026lt;tag1\u0026gt;, \u0026lt;tag2\u0026gt;, ...]) Example expression:\ninstance_trace_count.sum(by=['az']) will output the following result:\ninstance_trace_count{az=\u0026quot;az-1\u0026quot;} 133 // 100 + 33 instance_trace_count{az=\u0026quot;az-3\u0026quot;} 20 Function Duration is a textual representation of a time range. The formats accepted are based on the ISO-8601 duration format {@code PnDTnHnMn.nS} where a day is regarded as exactly 24 hours.\nExamples:\n \u0026ldquo;PT20.345S\u0026rdquo; \u0026ndash; parses as \u0026ldquo;20.345 seconds\u0026rdquo; \u0026ldquo;PT15M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;15 minutes\u0026rdquo; (where a minute is 60 seconds) \u0026ldquo;PT10H\u0026rdquo; \u0026ndash; parses as \u0026ldquo;10 hours\u0026rdquo; (where an hour is 3600 seconds) \u0026ldquo;P2D\u0026rdquo; \u0026ndash; parses as \u0026ldquo;2 days\u0026rdquo; (where a day is 24 hours or 86400 seconds) \u0026ldquo;P2DT3H4M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;2 days, 3 hours and 4 minutes\u0026rdquo; \u0026ldquo;P-6H3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;-6 hours and +3 minutes\u0026rdquo; \u0026ldquo;-P6H3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;-6 hours and -3 minutes\u0026rdquo; \u0026ldquo;-P-6H+3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;+6 hours and -3 minutes\u0026rdquo;  increase increase(Duration): Calculates the increase in the time range.\nrate rate(Duration): Calculates the per-second average rate of increase in the time range.\nirate irate(): Calculates the per-second instant rate of increase in the time range.\ntag tag({allTags -\u0026gt; }): Updates tags of samples. User can add, drop, rename and update tags.\nhistogram histogram(le: '\u0026lt;the tag name of le\u0026gt;'): Transforms less-based histogram buckets to meter system histogram buckets. le parameter represents the tag name of the bucket.\nhistogram_percentile histogram_percentile([\u0026lt;p scalar\u0026gt;]). Represents the meter-system to calculate the p-percentile (0 ≤ p ≤ 100) from the buckets.\ntime time(): Returns the number of seconds since January 1, 1970 UTC.\nforeach forEach([string_array], Closure\u0026lt;Void\u0026gt; each): Iterates all samples according to the first array argument, and provide two parameters in the second closure argument:\n element: element in the array. tags: tags in each sample.  Down Sampling Operation MAL should instruct meter-system on how to downsample for metrics. It doesn\u0026rsquo;t only refer to aggregate raw samples to minute level, but also expresses data from minute in higher levels, such as hour and day.\nDown sampling function is called downsampling in MAL, and it accepts the following types:\n AVG SUM LATEST MIN (TODO) MAX (TODO) MEAN (TODO) COUNT (TODO)  The default type is AVG.\nIf users want to get the latest time from last_server_state_sync_time_in_seconds:\nlast_server_state_sync_time_in_seconds.tagEqual('production', 'catalog').downsampling(LATEST) Metric level function They extract level relevant labels from metric labels, then informs the meter-system the level and layer to which this metric belongs.\n service([svc_label1, svc_label2...], Layer) extracts service level labels from the array argument, extracts layer from Layer argument. instance([svc_label1, svc_label2...], [ins_label1, ins_label2...], Layer, Closure\u0026lt;Map\u0026lt;String, String\u0026gt;\u0026gt; propertiesExtractor) extracts service level labels from the first array argument, extracts instance level labels from the second array argument, extracts layer from Layer argument, propertiesExtractor is an optional closure that extracts instance properties from tags, e.g. { tags -\u0026gt; ['pod': tags.pod, 'namespace': tags.namespace] }. endpoint([svc_label1, svc_label2...], [ep_label1, ep_label2...]) extracts service level labels from the first array argument, extracts endpoint level labels from the second array argument, extracts layer from Layer argument. serviceRelation(DetectPoint, [source_svc_label1...], [dest_svc_label1...], Layer) DetectPoint including DetectPoint.CLIENT and DetectPoint.SERVER, extracts sourceService labels from the first array argument, extracts destService labels from the second array argument, extracts layer from Layer argument. processRelation(detect_point_label, [service_label1...], [instance_label1...], source_process_id_label, dest_process_id_label, component_label) extracts DetectPoint labels from first argument, the label value should be client or server. extracts Service labels from the first array argument, extracts Instance labels from the second array argument, extracts ProcessID labels from the fourth and fifth arguments of the source and destination.  Configuration file The OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP fails to start up. The files are located at $CLASSPATH/otel-rules, $CLASSPATH/meter-analyzer-config, $CLASSPATH/envoy-metrics-rules and $CLASSPATH/zabbix-rules.\nThe file is written in YAML format, defined by the scheme described below. Brackets indicate that a parameter is optional.\nA full example can be found here\nGeneric placeholders are defined as follows:\n \u0026lt;string\u0026gt;: A regular string. \u0026lt;closure\u0026gt;: A closure with custom logic.  # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# filter the metrics, only those metrics that satisfy this condition will be passed into the `metricsRules` below.filter: \u0026lt;closure\u0026gt; # example:\u0026#39;{ tags -\u0026gt; tags.job_name == \u0026#34;vm-monitoring\u0026#34; }\u0026#39;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# Metrics rule allow you to recompute queries.metricsRules:[- \u0026lt;metric_rules\u0026gt; ]\u0026lt;metric_rules\u0026gt; # The name of rule, which combinates with a prefix \u0026#39;meter_\u0026#39; as the index/table name in storage.name:\u0026lt;string\u0026gt;# MAL expression.exp:\u0026lt;string\u0026gt;More Examples Please refer to OAP Self-Observability.\n","title":"Meter Analysis Language","url":"/docs/main/v9.4.0/en/concepts-and-designs/mal/"},{"content":"Meter Analysis Language The meter system provides a functional analysis language called MAL (Meter Analysis Language) that lets users analyze and aggregate meter data in the OAP streaming system. The result of an expression can either be ingested by the agent analyzer, or the OpenTelemetry/Prometheus analyzer.\nLanguage data type In MAL, an expression or sub-expression can evaluate to one of the following two types:\n Sample family: A set of samples (metrics) containing a range of metrics whose names are identical. Scalar: A simple numeric value that supports integer/long and floating/double.  Sample family A set of samples, which acts as the basic unit in MAL. For example:\ninstance_trace_count The sample family above may contain the following samples which are provided by external modules, such as the agent analyzer:\ninstance_trace_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 100 instance_trace_count{region=\u0026quot;us-east\u0026quot;,az=\u0026quot;az-3\u0026quot;} 20 instance_trace_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 33 Tag filter MAL supports four type operations to filter samples in a sample family by tag:\n tagEqual: Filter tags exactly equal to the string provided. tagNotEqual: Filter tags not equal to the string provided. tagMatch: Filter tags that regex-match the string provided. tagNotMatch: Filter labels that do not regex-match the string provided.  For example, this filters all instance_trace_count samples for us-west and asia-north region and az-1 az:\ninstance_trace_count.tagMatch(\u0026quot;region\u0026quot;, \u0026quot;us-west|asia-north\u0026quot;).tagEqual(\u0026quot;az\u0026quot;, \u0026quot;az-1\u0026quot;) Value filter MAL supports six type operations to filter samples in a sample family by value:\n valueEqual: Filter values exactly equal to the value provided. valueNotEqual: Filter values equal to the value provided. valueGreater: Filter values greater than the value provided. valueGreaterEqual: Filter values greater than or equal to the value provided. valueLess: Filter values less than the value provided. valueLessEqual: Filter values less than or equal to the value provided.  For example, this filters all instance_trace_count samples for values \u0026gt;= 33:\ninstance_trace_count.valueGreaterEqual(33) Tag manipulator MAL allows tag manipulators to change (i.e. add/delete/update) tags and their values.\nK8s MAL supports using the metadata of K8s to manipulate the tags and their values. This feature requires authorizing the OAP Server to access K8s\u0026rsquo;s API Server.\nretagByK8sMeta retagByK8sMeta(newLabelName, K8sRetagType, existingLabelName, namespaceLabelName). Add a new tag to the sample family based on the value of an existing label. Provide several internal converting types, including\n K8sRetagType.Pod2Service  Add a tag to the sample using service as the key, $serviceName.$namespace as the value, and according to the given value of the tag key, which represents the name of a pod.\nFor example:\ncontainer_cpu_usage_seconds_total{namespace=default, container=my-nginx, cpu=total, pod=my-nginx-5dc4865748-mbczh} 2 Expression:\ncontainer_cpu_usage_seconds_total.retagByK8sMeta('service' , K8sRetagType.Pod2Service , 'pod' , 'namespace') Output:\ncontainer_cpu_usage_seconds_total{namespace=default, container=my-nginx, cpu=total, pod=my-nginx-5dc4865748-mbczh, service='nginx-service.default'} 2 Binary operators The following binary arithmetic operators are available in MAL:\n + (addition) - (subtraction) * (multiplication) / (division)  Binary operators are defined between scalar/scalar, sampleFamily/scalar and sampleFamily/sampleFamily value pairs.\nBetween two scalars: they evaluate to another scalar that is the result of the operator being applied to both scalar operands:\n1 + 2 Between a sample family and a scalar, the operator is applied to the value of every sample in the sample family. For example:\ninstance_trace_count + 2 or\n2 + instance_trace_count results in\ninstance_trace_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 102 // 100 + 2 instance_trace_count{region=\u0026quot;us-east\u0026quot;,az=\u0026quot;az-3\u0026quot;} 22 // 20 + 2 instance_trace_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 35 // 33 + 2 Between two sample families, a binary operator is applied to each sample in the sample family on the left and its matching sample in the sample family on the right. A new sample family with empty name will be generated. Only the matched tags will be reserved. Samples with no matching samples in the sample family on the right will not be found in the result.\nAnother sample family instance_trace_analysis_error_count is\ninstance_trace_analysis_error_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 20 instance_trace_analysis_error_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 11 Example expression:\ninstance_trace_analysis_error_count / instance_trace_count This returns a resulting sample family containing the error rate of trace analysis. Samples with region us-west and az az-3 have no match and will not show up in the result:\n{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 0.2 // 20 / 100 {region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 0.3333 // 11 / 33 Aggregation Operation Sample family supports the following aggregation operations that can be used to aggregate the samples of a single sample family, resulting in a new sample family having fewer samples (sometimes having just a single sample) with aggregated values:\n sum (calculate sum over dimensions) min (select minimum over dimensions) max (select maximum over dimensions) avg (calculate the average over dimensions)  These operations can be used to aggregate overall label dimensions or preserve distinct dimensions by inputting by parameter( the keyword by could be omitted)\n\u0026lt;aggr-op\u0026gt;(by=[\u0026lt;tag1\u0026gt;, \u0026lt;tag2\u0026gt;, ...]) Example expression:\ninstance_trace_count.sum(by=['az']) will output the following result:\ninstance_trace_count{az=\u0026quot;az-1\u0026quot;} 133 // 100 + 33 instance_trace_count{az=\u0026quot;az-3\u0026quot;} 20 Function Duration is a textual representation of a time range. The formats accepted are based on the ISO-8601 duration format {@code PnDTnHnMn.nS} where a day is regarded as exactly 24 hours.\nExamples:\n \u0026ldquo;PT20.345S\u0026rdquo; \u0026ndash; parses as \u0026ldquo;20.345 seconds\u0026rdquo; \u0026ldquo;PT15M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;15 minutes\u0026rdquo; (where a minute is 60 seconds) \u0026ldquo;PT10H\u0026rdquo; \u0026ndash; parses as \u0026ldquo;10 hours\u0026rdquo; (where an hour is 3600 seconds) \u0026ldquo;P2D\u0026rdquo; \u0026ndash; parses as \u0026ldquo;2 days\u0026rdquo; (where a day is 24 hours or 86400 seconds) \u0026ldquo;P2DT3H4M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;2 days, 3 hours and 4 minutes\u0026rdquo; \u0026ldquo;P-6H3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;-6 hours and +3 minutes\u0026rdquo; \u0026ldquo;-P6H3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;-6 hours and -3 minutes\u0026rdquo; \u0026ldquo;-P-6H+3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;+6 hours and -3 minutes\u0026rdquo;  increase increase(Duration): Calculates the increase in the time range.\nrate rate(Duration): Calculates the per-second average rate of increase in the time range.\nirate irate(): Calculates the per-second instant rate of increase in the time range.\ntag tag({allTags -\u0026gt; }): Updates tags of samples. User can add, drop, rename and update tags.\nhistogram histogram(le: '\u0026lt;the tag name of le\u0026gt;'): Transforms less-based histogram buckets to meter system histogram buckets. le parameter represents the tag name of the bucket.\nhistogram_percentile histogram_percentile([\u0026lt;p scalar\u0026gt;]): Represents the meter-system to calculate the p-percentile (0 ≤ p ≤ 100) from the buckets.\ntime time(): Returns the number of seconds since January 1, 1970 UTC.\nforeach forEach([string_array], Closure\u0026lt;Void\u0026gt; each): Iterates all samples according to the first array argument, and provide two parameters in the second closure argument:\n element: element in the array. tags: tags in each sample.  Down Sampling Operation MAL should instruct meter-system on how to downsample for metrics. It doesn\u0026rsquo;t only refer to aggregate raw samples to minute level, but also expresses data from minute in higher levels, such as hour and day.\nDown sampling function is called downsampling in MAL, and it accepts the following types:\n AVG SUM LATEST SUM_PER_MIN MIN (TODO) MAX (TODO) MEAN (TODO) COUNT (TODO)  The default type is AVG.\nIf users want to get the latest time from last_server_state_sync_time_in_seconds:\nlast_server_state_sync_time_in_seconds.tagEqual('production', 'catalog').downsampling(LATEST) Metric level function They extract level relevant labels from metric labels, then informs the meter-system the level and layer to which this metric belongs.\n service([svc_label1, svc_label2...], Layer) extracts service level labels from the array argument, extracts layer from Layer argument. instance([svc_label1, svc_label2...], [ins_label1, ins_label2...], Layer, Closure\u0026lt;Map\u0026lt;String, String\u0026gt;\u0026gt; propertiesExtractor) extracts service level labels from the first array argument, extracts instance level labels from the second array argument, extracts layer from Layer argument, propertiesExtractor is an optional closure that extracts instance properties from tags, e.g. { tags -\u0026gt; ['pod': tags.pod, 'namespace': tags.namespace] }. endpoint([svc_label1, svc_label2...], [ep_label1, ep_label2...]) extracts service level labels from the first array argument, extracts endpoint level labels from the second array argument, extracts layer from Layer argument. process([svc_label1, svc_label2...], [ins_label1, ins_label2...], [ps_label1, ps_label2...], layer_lable) extracts service level labels from the first array argument, extracts instance level labels from the second array argument, extracts process level labels from the third array argument, extracts layer label from fourse argument. serviceRelation(DetectPoint, [source_svc_label1...], [dest_svc_label1...], Layer) DetectPoint including DetectPoint.CLIENT and DetectPoint.SERVER, extracts sourceService labels from the first array argument, extracts destService labels from the second array argument, extracts layer from Layer argument. processRelation(detect_point_label, [service_label1...], [instance_label1...], source_process_id_label, dest_process_id_label, component_label) extracts DetectPoint labels from first argument, the label value should be client or server. extracts Service labels from the first array argument, extracts Instance labels from the second array argument, extracts ProcessID labels from the fourth and fifth arguments of the source and destination.  Configuration file The OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP fails to start up. The files are located at $CLASSPATH/otel-rules, $CLASSPATH/meter-analyzer-config, $CLASSPATH/envoy-metrics-rules and $CLASSPATH/zabbix-rules.\nThe file is written in YAML format, defined by the scheme described below. Brackets indicate that a parameter is optional.\nA full example can be found here\nGeneric placeholders are defined as follows:\n \u0026lt;string\u0026gt;: A regular string. \u0026lt;closure\u0026gt;: A closure with custom logic.  # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# filter the metrics, only those metrics that satisfy this condition will be passed into the `metricsRules` below.filter: \u0026lt;closure\u0026gt; # example:\u0026#39;{ tags -\u0026gt; tags.job_name == \u0026#34;vm-monitoring\u0026#34; }\u0026#39;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# Metrics rule allow you to recompute queries.metricsRules:[- \u0026lt;metric_rules\u0026gt; ]\u0026lt;metric_rules\u0026gt; # The name of rule, which combinates with a prefix \u0026#39;meter_\u0026#39; as the index/table name in storage.name:\u0026lt;string\u0026gt;# MAL expression.exp:\u0026lt;string\u0026gt;More Examples Please refer to OAP Self-Observability.\n","title":"Meter Analysis Language","url":"/docs/main/v9.5.0/en/concepts-and-designs/mal/"},{"content":"Meter Analysis Language The meter system provides a functional analysis language called MAL (Meter Analysis Language) that lets users analyze and aggregate meter data in the OAP streaming system. The result of an expression can either be ingested by the agent analyzer, or the OpenTelemetry/Prometheus analyzer.\nLanguage data type In MAL, an expression or sub-expression can evaluate to one of the following two types:\n Sample family: A set of samples (metrics) containing a range of metrics whose names are identical. Scalar: A simple numeric value that supports integer/long and floating/double.  Sample family A set of samples, which acts as the basic unit in MAL. For example:\ninstance_trace_count The sample family above may contain the following samples which are provided by external modules, such as the agent analyzer:\ninstance_trace_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 100 instance_trace_count{region=\u0026quot;us-east\u0026quot;,az=\u0026quot;az-3\u0026quot;} 20 instance_trace_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 33 Tag filter MAL supports four type operations to filter samples in a sample family by tag:\n tagEqual: Filter tags exactly equal to the string provided. tagNotEqual: Filter tags not equal to the string provided. tagMatch: Filter tags that regex-match the string provided. tagNotMatch: Filter labels that do not regex-match the string provided.  For example, this filters all instance_trace_count samples for us-west and asia-north region and az-1 az:\ninstance_trace_count.tagMatch(\u0026quot;region\u0026quot;, \u0026quot;us-west|asia-north\u0026quot;).tagEqual(\u0026quot;az\u0026quot;, \u0026quot;az-1\u0026quot;) Value filter MAL supports six type operations to filter samples in a sample family by value:\n valueEqual: Filter values exactly equal to the value provided. valueNotEqual: Filter values equal to the value provided. valueGreater: Filter values greater than the value provided. valueGreaterEqual: Filter values greater than or equal to the value provided. valueLess: Filter values less than the value provided. valueLessEqual: Filter values less than or equal to the value provided.  For example, this filters all instance_trace_count samples for values \u0026gt;= 33:\ninstance_trace_count.valueGreaterEqual(33) Tag manipulator MAL allows tag manipulators to change (i.e. add/delete/update) tags and their values.\nK8s MAL supports using the metadata of K8s to manipulate the tags and their values. This feature requires authorizing the OAP Server to access K8s\u0026rsquo;s API Server.\nretagByK8sMeta retagByK8sMeta(newLabelName, K8sRetagType, existingLabelName, namespaceLabelName). Add a new tag to the sample family based on the value of an existing label. Provide several internal converting types, including\n K8sRetagType.Pod2Service  Add a tag to the sample using service as the key, $serviceName.$namespace as the value, and according to the given value of the tag key, which represents the name of a pod.\nFor example:\ncontainer_cpu_usage_seconds_total{namespace=default, container=my-nginx, cpu=total, pod=my-nginx-5dc4865748-mbczh} 2 Expression:\ncontainer_cpu_usage_seconds_total.retagByK8sMeta('service' , K8sRetagType.Pod2Service , 'pod' , 'namespace') Output:\ncontainer_cpu_usage_seconds_total{namespace=default, container=my-nginx, cpu=total, pod=my-nginx-5dc4865748-mbczh, service='nginx-service.default'} 2 Binary operators The following binary arithmetic operators are available in MAL:\n + (addition) - (subtraction) * (multiplication) / (division)  Binary operators are defined between scalar/scalar, sampleFamily/scalar and sampleFamily/sampleFamily value pairs.\nBetween two scalars: they evaluate to another scalar that is the result of the operator being applied to both scalar operands:\n1 + 2 Between a sample family and a scalar, the operator is applied to the value of every sample in the sample family. For example:\ninstance_trace_count + 2 or\n2 + instance_trace_count results in\ninstance_trace_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 102 // 100 + 2 instance_trace_count{region=\u0026quot;us-east\u0026quot;,az=\u0026quot;az-3\u0026quot;} 22 // 20 + 2 instance_trace_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 35 // 33 + 2 Between two sample families, a binary operator is applied to each sample in the sample family on the left and its matching sample in the sample family on the right. A new sample family with empty name will be generated. Only the matched tags will be reserved. Samples with no matching samples in the sample family on the right will not be found in the result.\nAnother sample family instance_trace_analysis_error_count is\ninstance_trace_analysis_error_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 20 instance_trace_analysis_error_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 11 Example expression:\ninstance_trace_analysis_error_count / instance_trace_count This returns a resulting sample family containing the error rate of trace analysis. Samples with region us-west and az az-3 have no match and will not show up in the result:\n{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 0.2 // 20 / 100 {region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 0.3333 // 11 / 33 Aggregation Operation Sample family supports the following aggregation operations that can be used to aggregate the samples of a single sample family, resulting in a new sample family having fewer samples (sometimes having just a single sample) with aggregated values:\n sum (calculate sum over dimensions) min (select minimum over dimensions) max (select maximum over dimensions) avg (calculate the average over dimensions)  These operations can be used to aggregate overall label dimensions or preserve distinct dimensions by inputting by parameter( the keyword by could be omitted)\n\u0026lt;aggr-op\u0026gt;(by=[\u0026lt;tag1\u0026gt;, \u0026lt;tag2\u0026gt;, ...]) Example expression:\ninstance_trace_count.sum(by=['az']) will output the following result:\ninstance_trace_count{az=\u0026quot;az-1\u0026quot;} 133 // 100 + 33 instance_trace_count{az=\u0026quot;az-3\u0026quot;} 20 Function Duration is a textual representation of a time range. The formats accepted are based on the ISO-8601 duration format {@code PnDTnHnMn.nS} where a day is regarded as exactly 24 hours.\nExamples:\n \u0026ldquo;PT20.345S\u0026rdquo; \u0026ndash; parses as \u0026ldquo;20.345 seconds\u0026rdquo; \u0026ldquo;PT15M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;15 minutes\u0026rdquo; (where a minute is 60 seconds) \u0026ldquo;PT10H\u0026rdquo; \u0026ndash; parses as \u0026ldquo;10 hours\u0026rdquo; (where an hour is 3600 seconds) \u0026ldquo;P2D\u0026rdquo; \u0026ndash; parses as \u0026ldquo;2 days\u0026rdquo; (where a day is 24 hours or 86400 seconds) \u0026ldquo;P2DT3H4M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;2 days, 3 hours and 4 minutes\u0026rdquo; \u0026ldquo;P-6H3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;-6 hours and +3 minutes\u0026rdquo; \u0026ldquo;-P6H3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;-6 hours and -3 minutes\u0026rdquo; \u0026ldquo;-P-6H+3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;+6 hours and -3 minutes\u0026rdquo;  increase increase(Duration): Calculates the increase in the time range.\nrate rate(Duration): Calculates the per-second average rate of increase in the time range.\nirate irate(): Calculates the per-second instant rate of increase in the time range.\ntag tag({allTags -\u0026gt; }): Updates tags of samples. User can add, drop, rename and update tags.\nhistogram histogram(le: '\u0026lt;the tag name of le\u0026gt;'): Transforms less-based histogram buckets to meter system histogram buckets. le parameter represents the tag name of the bucket.\nhistogram_percentile histogram_percentile([\u0026lt;p scalar\u0026gt;]): Represents the meter-system to calculate the p-percentile (0 ≤ p ≤ 100) from the buckets.\ntime time(): Returns the number of seconds since January 1, 1970 UTC.\nforeach forEach([string_array], Closure\u0026lt;Void\u0026gt; each): Iterates all samples according to the first array argument, and provide two parameters in the second closure argument:\n element: element in the array. tags: tags in each sample.  Down Sampling Operation MAL should instruct meter-system on how to downsample for metrics. It doesn\u0026rsquo;t only refer to aggregate raw samples to minute level, but also expresses data from minute in higher levels, such as hour and day.\nDown sampling function is called downsampling in MAL, and it accepts the following types:\n AVG SUM LATEST SUM_PER_MIN MIN (TODO) MAX (TODO) MEAN (TODO) COUNT (TODO)  The default type is AVG.\nIf users want to get the latest time from last_server_state_sync_time_in_seconds:\nlast_server_state_sync_time_in_seconds.tagEqual('production', 'catalog').downsampling(LATEST) Metric level function They extract level relevant labels from metric labels, then informs the meter-system the level and layer to which this metric belongs.\n service([svc_label1, svc_label2...], Layer) extracts service level labels from the array argument, extracts layer from Layer argument. instance([svc_label1, svc_label2...], [ins_label1, ins_label2...], Layer, Closure\u0026lt;Map\u0026lt;String, String\u0026gt;\u0026gt; propertiesExtractor) extracts service level labels from the first array argument, extracts instance level labels from the second array argument, extracts layer from Layer argument, propertiesExtractor is an optional closure that extracts instance properties from tags, e.g. { tags -\u0026gt; ['pod': tags.pod, 'namespace': tags.namespace] }. endpoint([svc_label1, svc_label2...], [ep_label1, ep_label2...]) extracts service level labels from the first array argument, extracts endpoint level labels from the second array argument, extracts layer from Layer argument. process([svc_label1, svc_label2...], [ins_label1, ins_label2...], [ps_label1, ps_label2...], layer_lable) extracts service level labels from the first array argument, extracts instance level labels from the second array argument, extracts process level labels from the third array argument, extracts layer label from fourse argument. serviceRelation(DetectPoint, [source_svc_label1...], [dest_svc_label1...], Layer) DetectPoint including DetectPoint.CLIENT and DetectPoint.SERVER, extracts sourceService labels from the first array argument, extracts destService labels from the second array argument, extracts layer from Layer argument. processRelation(detect_point_label, [service_label1...], [instance_label1...], source_process_id_label, dest_process_id_label, component_label) extracts DetectPoint labels from first argument, the label value should be client or server. extracts Service labels from the first array argument, extracts Instance labels from the second array argument, extracts ProcessID labels from the fourth and fifth arguments of the source and destination.  Configuration file The OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP fails to start up. The files are located at $CLASSPATH/otel-rules, $CLASSPATH/meter-analyzer-config, $CLASSPATH/envoy-metrics-rules and $CLASSPATH/zabbix-rules.\nThe file is written in YAML format, defined by the scheme described below. Brackets indicate that a parameter is optional.\nA full example can be found here\nGeneric placeholders are defined as follows:\n \u0026lt;string\u0026gt;: A regular string. \u0026lt;closure\u0026gt;: A closure with custom logic.  # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# filter the metrics, only those metrics that satisfy this condition will be passed into the `metricsRules` below.filter: \u0026lt;closure\u0026gt; # example:\u0026#39;{ tags -\u0026gt; tags.job_name == \u0026#34;vm-monitoring\u0026#34; }\u0026#39;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# Metrics rule allow you to recompute queries.metricsRules:[- \u0026lt;metric_rules\u0026gt; ]\u0026lt;metric_rules\u0026gt; # The name of rule, which combinates with a prefix \u0026#39;meter_\u0026#39; as the index/table name in storage.name:\u0026lt;string\u0026gt;# MAL expression.exp:\u0026lt;string\u0026gt;More Examples Please refer to OAP Self-Observability.\n","title":"Meter Analysis Language","url":"/docs/main/v9.6.0/en/concepts-and-designs/mal/"},{"content":"Meter Analysis Language The meter system provides a functional analysis language called MAL (Meter Analysis Language) that lets users analyze and aggregate meter data in the OAP streaming system. The result of an expression can either be ingested by the agent analyzer, or the OpenTelemetry/Prometheus analyzer.\nLanguage data type In MAL, an expression or sub-expression can evaluate to one of the following two types:\n Sample family: A set of samples (metrics) containing a range of metrics whose names are identical. Scalar: A simple numeric value that supports integer/long and floating/double.  Sample family A set of samples, which acts as the basic unit in MAL. For example:\ninstance_trace_count The sample family above may contain the following samples which are provided by external modules, such as the agent analyzer:\ninstance_trace_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 100 instance_trace_count{region=\u0026quot;us-east\u0026quot;,az=\u0026quot;az-3\u0026quot;} 20 instance_trace_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 33 Tag filter MAL supports four type operations to filter samples in a sample family by tag:\n tagEqual: Filter tags exactly equal to the string provided. tagNotEqual: Filter tags not equal to the string provided. tagMatch: Filter tags that regex-match the string provided. tagNotMatch: Filter labels that do not regex-match the string provided.  For example, this filters all instance_trace_count samples for us-west and asia-north region and az-1 az:\ninstance_trace_count.tagMatch(\u0026quot;region\u0026quot;, \u0026quot;us-west|asia-north\u0026quot;).tagEqual(\u0026quot;az\u0026quot;, \u0026quot;az-1\u0026quot;) Value filter MAL supports six type operations to filter samples in a sample family by value:\n valueEqual: Filter values exactly equal to the value provided. valueNotEqual: Filter values equal to the value provided. valueGreater: Filter values greater than the value provided. valueGreaterEqual: Filter values greater than or equal to the value provided. valueLess: Filter values less than the value provided. valueLessEqual: Filter values less than or equal to the value provided.  For example, this filters all instance_trace_count samples for values \u0026gt;= 33:\ninstance_trace_count.valueGreaterEqual(33) Tag manipulator MAL allows tag manipulators to change (i.e. add/delete/update) tags and their values.\nK8s MAL supports using the metadata of K8s to manipulate the tags and their values. This feature requires authorizing the OAP Server to access K8s\u0026rsquo;s API Server.\nretagByK8sMeta retagByK8sMeta(newLabelName, K8sRetagType, existingLabelName, namespaceLabelName). Add a new tag to the sample family based on the value of an existing label. Provide several internal converting types, including\n K8sRetagType.Pod2Service  Add a tag to the sample using service as the key, $serviceName.$namespace as the value, and according to the given value of the tag key, which represents the name of a pod.\nFor example:\ncontainer_cpu_usage_seconds_total{namespace=default, container=my-nginx, cpu=total, pod=my-nginx-5dc4865748-mbczh} 2 Expression:\ncontainer_cpu_usage_seconds_total.retagByK8sMeta('service' , K8sRetagType.Pod2Service , 'pod' , 'namespace') Output:\ncontainer_cpu_usage_seconds_total{namespace=default, container=my-nginx, cpu=total, pod=my-nginx-5dc4865748-mbczh, service='nginx-service.default'} 2 Binary operators The following binary arithmetic operators are available in MAL:\n + (addition) - (subtraction) * (multiplication) / (division)  Binary operators are defined between scalar/scalar, sampleFamily/scalar and sampleFamily/sampleFamily value pairs.\nBetween two scalars: they evaluate to another scalar that is the result of the operator being applied to both scalar operands:\n1 + 2 Between a sample family and a scalar, the operator is applied to the value of every sample in the sample family. For example:\ninstance_trace_count + 2 or\n2 + instance_trace_count results in\ninstance_trace_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 102 // 100 + 2 instance_trace_count{region=\u0026quot;us-east\u0026quot;,az=\u0026quot;az-3\u0026quot;} 22 // 20 + 2 instance_trace_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 35 // 33 + 2 Between two sample families, a binary operator is applied to each sample in the sample family on the left and its matching sample in the sample family on the right. A new sample family with empty name will be generated. Only the matched tags will be reserved. Samples with no matching samples in the sample family on the right will not be found in the result.\nAnother sample family instance_trace_analysis_error_count is\ninstance_trace_analysis_error_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 20 instance_trace_analysis_error_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 11 Example expression:\ninstance_trace_analysis_error_count / instance_trace_count This returns a resulting sample family containing the error rate of trace analysis. Samples with region us-west and az az-3 have no match and will not show up in the result:\n{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 0.2 // 20 / 100 {region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 0.3333 // 11 / 33 Aggregation Operation Sample family supports the following aggregation operations that can be used to aggregate the samples of a single sample family, resulting in a new sample family having fewer samples (sometimes having just a single sample) with aggregated values:\n sum (calculate sum over dimensions) min (select minimum over dimensions) max (select maximum over dimensions) avg (calculate the average over dimensions)  These operations can be used to aggregate overall label dimensions or preserve distinct dimensions by inputting by parameter( the keyword by could be omitted)\n\u0026lt;aggr-op\u0026gt;(by=[\u0026lt;tag1\u0026gt;, \u0026lt;tag2\u0026gt;, ...]) Example expression:\ninstance_trace_count.sum(by=['az']) will output the following result:\ninstance_trace_count{az=\u0026quot;az-1\u0026quot;} 133 // 100 + 33 instance_trace_count{az=\u0026quot;az-3\u0026quot;} 20 Function Duration is a textual representation of a time range. The formats accepted are based on the ISO-8601 duration format {@code PnDTnHnMn.nS} where a day is regarded as exactly 24 hours.\nExamples:\n \u0026ldquo;PT20.345S\u0026rdquo; \u0026ndash; parses as \u0026ldquo;20.345 seconds\u0026rdquo; \u0026ldquo;PT15M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;15 minutes\u0026rdquo; (where a minute is 60 seconds) \u0026ldquo;PT10H\u0026rdquo; \u0026ndash; parses as \u0026ldquo;10 hours\u0026rdquo; (where an hour is 3600 seconds) \u0026ldquo;P2D\u0026rdquo; \u0026ndash; parses as \u0026ldquo;2 days\u0026rdquo; (where a day is 24 hours or 86400 seconds) \u0026ldquo;P2DT3H4M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;2 days, 3 hours and 4 minutes\u0026rdquo; \u0026ldquo;P-6H3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;-6 hours and +3 minutes\u0026rdquo; \u0026ldquo;-P6H3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;-6 hours and -3 minutes\u0026rdquo; \u0026ldquo;-P-6H+3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;+6 hours and -3 minutes\u0026rdquo;  increase increase(Duration): Calculates the increase in the time range.\nrate rate(Duration): Calculates the per-second average rate of increase in the time range.\nirate irate(): Calculates the per-second instant rate of increase in the time range.\ntag tag({allTags -\u0026gt; }): Updates tags of samples. User can add, drop, rename and update tags.\nhistogram histogram(le: '\u0026lt;the tag name of le\u0026gt;'): Transforms less-based histogram buckets to meter system histogram buckets. le parameter represents the tag name of the bucket.\nhistogram_percentile histogram_percentile([\u0026lt;p scalar\u0026gt;]): Represents the meter-system to calculate the p-percentile (0 ≤ p ≤ 100) from the buckets.\ntime time(): Returns the number of seconds since January 1, 1970 UTC.\nforeach forEach([string_array], Closure\u0026lt;Void\u0026gt; each): Iterates all samples according to the first array argument, and provide two parameters in the second closure argument:\n element: element in the array. tags: tags in each sample.  Down Sampling Operation MAL should instruct meter-system on how to downsample for metrics. It doesn\u0026rsquo;t only refer to aggregate raw samples to minute level, but also expresses data from minute in higher levels, such as hour and day.\nDown sampling function is called downsampling in MAL, and it accepts the following types:\n AVG SUM LATEST SUM_PER_MIN MIN (TODO) MAX (TODO) MEAN (TODO) COUNT (TODO)  The default type is AVG.\nIf users want to get the latest time from last_server_state_sync_time_in_seconds:\nlast_server_state_sync_time_in_seconds.tagEqual('production', 'catalog').downsampling(LATEST) Metric level function They extract level relevant labels from metric labels, then informs the meter-system the level and layer to which this metric belongs.\n service([svc_label1, svc_label2...], Layer) extracts service level labels from the array argument, extracts layer from Layer argument. instance([svc_label1, svc_label2...], [ins_label1, ins_label2...], Layer, Closure\u0026lt;Map\u0026lt;String, String\u0026gt;\u0026gt; propertiesExtractor) extracts service level labels from the first array argument, extracts instance level labels from the second array argument, extracts layer from Layer argument, propertiesExtractor is an optional closure that extracts instance properties from tags, e.g. { tags -\u0026gt; ['pod': tags.pod, 'namespace': tags.namespace] }. endpoint([svc_label1, svc_label2...], [ep_label1, ep_label2...]) extracts service level labels from the first array argument, extracts endpoint level labels from the second array argument, extracts layer from Layer argument. process([svc_label1, svc_label2...], [ins_label1, ins_label2...], [ps_label1, ps_label2...], layer_lable) extracts service level labels from the first array argument, extracts instance level labels from the second array argument, extracts process level labels from the third array argument, extracts layer label from fourse argument. serviceRelation(DetectPoint, [source_svc_label1...], [dest_svc_label1...], Layer) DetectPoint including DetectPoint.CLIENT and DetectPoint.SERVER, extracts sourceService labels from the first array argument, extracts destService labels from the second array argument, extracts layer from Layer argument. processRelation(detect_point_label, [service_label1...], [instance_label1...], source_process_id_label, dest_process_id_label, component_label) extracts DetectPoint labels from first argument, the label value should be client or server. extracts Service labels from the first array argument, extracts Instance labels from the second array argument, extracts ProcessID labels from the fourth and fifth arguments of the source and destination.  Configuration file The OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP fails to start up. The files are located at $CLASSPATH/otel-rules, $CLASSPATH/meter-analyzer-config, $CLASSPATH/envoy-metrics-rules and $CLASSPATH/zabbix-rules.\nThe file is written in YAML format, defined by the scheme described below. Brackets indicate that a parameter is optional.\nA full example can be found here\nGeneric placeholders are defined as follows:\n \u0026lt;string\u0026gt;: A regular string. \u0026lt;closure\u0026gt;: A closure with custom logic.  # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# filter the metrics, only those metrics that satisfy this condition will be passed into the `metricsRules` below.filter: \u0026lt;closure\u0026gt; # example:\u0026#39;{ tags -\u0026gt; tags.job_name == \u0026#34;vm-monitoring\u0026#34; }\u0026#39;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# Metrics rule allow you to recompute queries.metricsRules:[- \u0026lt;metric_rules\u0026gt; ]\u0026lt;metric_rules\u0026gt; # The name of rule, which combinates with a prefix \u0026#39;meter_\u0026#39; as the index/table name in storage.name:\u0026lt;string\u0026gt;# MAL expression.exp:\u0026lt;string\u0026gt;More Examples Please refer to OAP Self-Observability.\n","title":"Meter Analysis Language","url":"/docs/main/v9.7.0/en/concepts-and-designs/mal/"},{"content":"Meter APIs SkyWalking has a native metrics format, and supports widely used metric formats, such as Prometheus, OpenTelemetry, and Zabbix.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.language.agent.v3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/language/agent/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;service MeterReportService { // Meter data is reported in a certain period. The agent/SDK should report all collected metrics in this period through one stream.  // The whole stream is an input data set, client should onComplete the stream per report period.  rpc collect (stream MeterData) returns (Commands) { } // Reporting meter data in bulk mode as MeterDataCollection.  // By using this, each one in the stream would be treated as a complete input for MAL engine,  // comparing to `collect (stream MeterData)`, which is using one stream as an input data set.  rpc collectBatch (stream MeterDataCollection) returns (Commands) { }}// Label of the meter message Label { string name = 1; string value = 2;}// The histogram element definition. It includes the bucket lower boundary and the count in the bucket. message MeterBucketValue { // The value represents the min value of the bucket,  // the upper boundary is determined by next MeterBucketValue$bucket,  // if it doesn\u0026#39;t exist, the upper boundary is positive infinity.  double bucket = 1; int64 count = 2; // If is negative infinity, the value of the bucket is invalid  bool isNegativeInfinity = 3;}// Meter single value message MeterSingleValue { // Meter name  string name = 1; // Labels  repeated Label labels = 2; // Single value  double value = 3;}// Histogram message MeterHistogram { // Meter name  string name = 1; // Labels  repeated Label labels = 2; // Customize the buckets  repeated MeterBucketValue values = 3;}// Single meter data, if the same metrics have a different label, they will separate. message MeterData { // Meter data could be a single value or histogram.  oneof metric { MeterSingleValue singleValue = 1; MeterHistogram histogram = 2; } // Service name, be set value in the first element in the stream-call.  string service = 3; // Service instance name, be set value in the first element in the stream-call.  string serviceInstance = 4; // Meter data report time, be set value in the first element in the stream-call.  int64 timestamp = 5;}message MeterDataCollection { repeated MeterData meterData = 1;}OpenTelemetry collector, Telegraf agents, Zabbix agents could use their native protocol(e.g. OTLP) and OAP server would convert metrics into native format and forward them to Meter Analysis Language engine.\nTo learn more about receiving 3rd party formats metrics, see\n Meter receiver OpenTelemetry receiver. Zabbix receiver  ","title":"Meter APIs","url":"/docs/main/latest/en/api/meter/"},{"content":"Meter APIs SkyWalking has a native metrics format, and supports widely used metric formats, such as Prometheus, OpenTelemetry, and Zabbix.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.language.agent.v3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/language/agent/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;service MeterReportService { // Meter data is reported in a certain period. The agent/SDK should report all collected metrics in this period through one stream.  // The whole stream is an input data set, client should onComplete the stream per report period.  rpc collect (stream MeterData) returns (Commands) { } // Reporting meter data in bulk mode as MeterDataCollection.  // By using this, each one in the stream would be treated as a complete input for MAL engine,  // comparing to `collect (stream MeterData)`, which is using one stream as an input data set.  rpc collectBatch (stream MeterDataCollection) returns (Commands) { }}// Label of the meter message Label { string name = 1; string value = 2;}// The histogram element definition. It includes the bucket lower boundary and the count in the bucket. message MeterBucketValue { // The value represents the min value of the bucket,  // the upper boundary is determined by next MeterBucketValue$bucket,  // if it doesn\u0026#39;t exist, the upper boundary is positive infinity.  double bucket = 1; int64 count = 2; // If is negative infinity, the value of the bucket is invalid  bool isNegativeInfinity = 3;}// Meter single value message MeterSingleValue { // Meter name  string name = 1; // Labels  repeated Label labels = 2; // Single value  double value = 3;}// Histogram message MeterHistogram { // Meter name  string name = 1; // Labels  repeated Label labels = 2; // Customize the buckets  repeated MeterBucketValue values = 3;}// Single meter data, if the same metrics have a different label, they will separate. message MeterData { // Meter data could be a single value or histogram.  oneof metric { MeterSingleValue singleValue = 1; MeterHistogram histogram = 2; } // Service name, be set value in the first element in the stream-call.  string service = 3; // Service instance name, be set value in the first element in the stream-call.  string serviceInstance = 4; // Meter data report time, be set value in the first element in the stream-call.  int64 timestamp = 5;}message MeterDataCollection { repeated MeterData meterData = 1;}OpenTelemetry collector, Telegraf agents, Zabbix agents could use their native protocol(e.g. OTLP) and OAP server would convert metrics into native format and forward them to Meter Analysis Language engine.\nTo learn more about receiving 3rd party formats metrics, see\n Meter receiver OpenTelemetry receiver. Zabbix receiver  ","title":"Meter APIs","url":"/docs/main/next/en/api/meter/"},{"content":"Meter APIs SkyWalking has a native metrics format, and supports widely used metric formats, such as Prometheus, OpenCensus, OpenTelemetry, and Zabbix.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.language.agent.v3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/language/agent/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;service MeterReportService { // Meter data is reported in a certain period. The agent/SDK should report all collected metrics in this period through one stream.  // The whole stream is an input data set, client should onComplete the stream per report period.  rpc collect (stream MeterData) returns (Commands) { } // Reporting meter data in bulk mode as MeterDataCollection.  // By using this, each one in the stream would be treated as a complete input for MAL engine,  // comparing to `collect (stream MeterData)`, which is using one stream as an input data set.  rpc collectBatch (stream MeterDataCollection) returns (Commands) { }}// Label of the meter message Label { string name = 1; string value = 2;}// The histogram element definition. It includes the bucket lower boundary and the count in the bucket. message MeterBucketValue { // The value represents the min value of the bucket,  // the upper boundary is determined by next MeterBucketValue$bucket,  // if it doesn\u0026#39;t exist, the upper boundary is positive infinity.  double bucket = 1; int64 count = 2; // If is negative infinity, the value of the bucket is invalid  bool isNegativeInfinity = 3;}// Meter single value message MeterSingleValue { // Meter name  string name = 1; // Labels  repeated Label labels = 2; // Single value  double value = 3;}// Histogram message MeterHistogram { // Meter name  string name = 1; // Labels  repeated Label labels = 2; // Customize the buckets  repeated MeterBucketValue values = 3;}// Single meter data, if the same metrics have a different label, they will separate. message MeterData { // Meter data could be a single value or histogram.  oneof metric { MeterSingleValue singleValue = 1; MeterHistogram histogram = 2; } // Service name, be set value in the first element in the stream-call.  string service = 3; // Service instance name, be set value in the first element in the stream-call.  string serviceInstance = 4; // Meter data report time, be set value in the first element in the stream-call.  int64 timestamp = 5;}message MeterDataCollection { repeated MeterData meterData = 1;}OpenTelemetry collector, Telegraf agents, Zabbix agents could use their native protocol(e.g. OTLP) and OAP server would convert metrics into native format and forward them to Meter Analysis Language engine.\nTo learn more about receiving 3rd party formats metrics, see\n Meter receiver OpenTelemetry receiver. Zabbix receiver  ","title":"Meter APIs","url":"/docs/main/v9.4.0/en/api/meter/"},{"content":"Meter APIs SkyWalking has a native metrics format, and supports widely used metric formats, such as Prometheus, OpenTelemetry, and Zabbix.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.language.agent.v3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/language/agent/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;service MeterReportService { // Meter data is reported in a certain period. The agent/SDK should report all collected metrics in this period through one stream.  // The whole stream is an input data set, client should onComplete the stream per report period.  rpc collect (stream MeterData) returns (Commands) { } // Reporting meter data in bulk mode as MeterDataCollection.  // By using this, each one in the stream would be treated as a complete input for MAL engine,  // comparing to `collect (stream MeterData)`, which is using one stream as an input data set.  rpc collectBatch (stream MeterDataCollection) returns (Commands) { }}// Label of the meter message Label { string name = 1; string value = 2;}// The histogram element definition. It includes the bucket lower boundary and the count in the bucket. message MeterBucketValue { // The value represents the min value of the bucket,  // the upper boundary is determined by next MeterBucketValue$bucket,  // if it doesn\u0026#39;t exist, the upper boundary is positive infinity.  double bucket = 1; int64 count = 2; // If is negative infinity, the value of the bucket is invalid  bool isNegativeInfinity = 3;}// Meter single value message MeterSingleValue { // Meter name  string name = 1; // Labels  repeated Label labels = 2; // Single value  double value = 3;}// Histogram message MeterHistogram { // Meter name  string name = 1; // Labels  repeated Label labels = 2; // Customize the buckets  repeated MeterBucketValue values = 3;}// Single meter data, if the same metrics have a different label, they will separate. message MeterData { // Meter data could be a single value or histogram.  oneof metric { MeterSingleValue singleValue = 1; MeterHistogram histogram = 2; } // Service name, be set value in the first element in the stream-call.  string service = 3; // Service instance name, be set value in the first element in the stream-call.  string serviceInstance = 4; // Meter data report time, be set value in the first element in the stream-call.  int64 timestamp = 5;}message MeterDataCollection { repeated MeterData meterData = 1;}OpenTelemetry collector, Telegraf agents, Zabbix agents could use their native protocol(e.g. OTLP) and OAP server would convert metrics into native format and forward them to Meter Analysis Language engine.\nTo learn more about receiving 3rd party formats metrics, see\n Meter receiver OpenTelemetry receiver. Zabbix receiver  ","title":"Meter APIs","url":"/docs/main/v9.5.0/en/api/meter/"},{"content":"Meter APIs SkyWalking has a native metrics format, and supports widely used metric formats, such as Prometheus, OpenTelemetry, and Zabbix.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.language.agent.v3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/language/agent/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;service MeterReportService { // Meter data is reported in a certain period. The agent/SDK should report all collected metrics in this period through one stream.  // The whole stream is an input data set, client should onComplete the stream per report period.  rpc collect (stream MeterData) returns (Commands) { } // Reporting meter data in bulk mode as MeterDataCollection.  // By using this, each one in the stream would be treated as a complete input for MAL engine,  // comparing to `collect (stream MeterData)`, which is using one stream as an input data set.  rpc collectBatch (stream MeterDataCollection) returns (Commands) { }}// Label of the meter message Label { string name = 1; string value = 2;}// The histogram element definition. It includes the bucket lower boundary and the count in the bucket. message MeterBucketValue { // The value represents the min value of the bucket,  // the upper boundary is determined by next MeterBucketValue$bucket,  // if it doesn\u0026#39;t exist, the upper boundary is positive infinity.  double bucket = 1; int64 count = 2; // If is negative infinity, the value of the bucket is invalid  bool isNegativeInfinity = 3;}// Meter single value message MeterSingleValue { // Meter name  string name = 1; // Labels  repeated Label labels = 2; // Single value  double value = 3;}// Histogram message MeterHistogram { // Meter name  string name = 1; // Labels  repeated Label labels = 2; // Customize the buckets  repeated MeterBucketValue values = 3;}// Single meter data, if the same metrics have a different label, they will separate. message MeterData { // Meter data could be a single value or histogram.  oneof metric { MeterSingleValue singleValue = 1; MeterHistogram histogram = 2; } // Service name, be set value in the first element in the stream-call.  string service = 3; // Service instance name, be set value in the first element in the stream-call.  string serviceInstance = 4; // Meter data report time, be set value in the first element in the stream-call.  int64 timestamp = 5;}message MeterDataCollection { repeated MeterData meterData = 1;}OpenTelemetry collector, Telegraf agents, Zabbix agents could use their native protocol(e.g. OTLP) and OAP server would convert metrics into native format and forward them to Meter Analysis Language engine.\nTo learn more about receiving 3rd party formats metrics, see\n Meter receiver OpenTelemetry receiver. Zabbix receiver  ","title":"Meter APIs","url":"/docs/main/v9.6.0/en/api/meter/"},{"content":"Meter APIs SkyWalking has a native metrics format, and supports widely used metric formats, such as Prometheus, OpenTelemetry, and Zabbix.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.language.agent.v3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/language/agent/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;service MeterReportService { // Meter data is reported in a certain period. The agent/SDK should report all collected metrics in this period through one stream.  // The whole stream is an input data set, client should onComplete the stream per report period.  rpc collect (stream MeterData) returns (Commands) { } // Reporting meter data in bulk mode as MeterDataCollection.  // By using this, each one in the stream would be treated as a complete input for MAL engine,  // comparing to `collect (stream MeterData)`, which is using one stream as an input data set.  rpc collectBatch (stream MeterDataCollection) returns (Commands) { }}// Label of the meter message Label { string name = 1; string value = 2;}// The histogram element definition. It includes the bucket lower boundary and the count in the bucket. message MeterBucketValue { // The value represents the min value of the bucket,  // the upper boundary is determined by next MeterBucketValue$bucket,  // if it doesn\u0026#39;t exist, the upper boundary is positive infinity.  double bucket = 1; int64 count = 2; // If is negative infinity, the value of the bucket is invalid  bool isNegativeInfinity = 3;}// Meter single value message MeterSingleValue { // Meter name  string name = 1; // Labels  repeated Label labels = 2; // Single value  double value = 3;}// Histogram message MeterHistogram { // Meter name  string name = 1; // Labels  repeated Label labels = 2; // Customize the buckets  repeated MeterBucketValue values = 3;}// Single meter data, if the same metrics have a different label, they will separate. message MeterData { // Meter data could be a single value or histogram.  oneof metric { MeterSingleValue singleValue = 1; MeterHistogram histogram = 2; } // Service name, be set value in the first element in the stream-call.  string service = 3; // Service instance name, be set value in the first element in the stream-call.  string serviceInstance = 4; // Meter data report time, be set value in the first element in the stream-call.  int64 timestamp = 5;}message MeterDataCollection { repeated MeterData meterData = 1;}OpenTelemetry collector, Telegraf agents, Zabbix agents could use their native protocol(e.g. OTLP) and OAP server would convert metrics into native format and forward them to Meter Analysis Language engine.\nTo learn more about receiving 3rd party formats metrics, see\n Meter receiver OpenTelemetry receiver. Zabbix receiver  ","title":"Meter APIs","url":"/docs/main/v9.7.0/en/api/meter/"},{"content":"Meter receiver The meter receiver accepts the metrics of meter protocol into the meter system.\nModule definition Module definition is defined in application.yml, typically located at $SKYWALKING_BASE_DIR/config/application.yml by default.\nreceiver-meter:selector:${SW_RECEIVER_METER:default}default:In Kafka Fetcher, follow these configurations to enable it.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}Report Meter Telemetry Data Manual Meter API Custom metrics may be collected by the Manual Meter API. Custom metrics collected cannot be used directly; they should be configured in the meter-analyzer-config configuration files described in the next part.\nThe receiver adds labels with key = service and key = instance to the collected data samples, and values from service and service instance name defined in SkyWalking Agent, for identification of the metric data.\nA typical manual meter API set is Spring MicroMeter Observations APIs\nOpenTelemetry Exporter You can use OpenTelemetry Collector to transport the metrics to SkyWalking OAP. Read the doc on Skywalking Exporter for a detailed guide.\nThe following is the correspondence between the OpenTelemetry Metric Data Type and the Skywalking Data Collect Protocol:\n   OpenTelemetry Metric Data Type Skywalking Data Collect Protocol     MetricDataTypeGauge MeterSingleValue   MetricDataTypeSum MeterSingleValue   MetricDataTypeHistogram MeterHistogram and two MeterSingleValues containing $name_sum and $name_count metrics.   MetricDataTypeSummary A series of MeterSingleValue containing tag quantile and two MeterSingleValues containing $name_sum and $name_count metrics.   MetricDataTypeExponentialHistogram Not Supported    Note: $name is the original metric name.\nConfiguration file The meter receiver is configured via a configuration file. The configuration file defines everything related to receiving from agents, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP may fail to start up. The files are located at $CLASSPATH/meter-analyzer-config.\nNew meter-analyzer-config files is NOT enabled by default, you should make meter configuration take effect through section agent-analyzer in application.yml of skywalking backend.\nagent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:# ... take care of other analyzersmeterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:your-custom-meter-conf-without-ext-name}# The multiple files should be separated by \u0026#34;,\u0026#34;Meter-analyzer-config file is written in YAML format, defined by the scheme described below. Brackets indicate that a parameter is optional.\nAll available meter analysis scripts could be found here.\n   Rule Name Description Configuration File Data Source     satellite Metrics of SkyWalking Satellite self-observability(so11y) meter-analyzer-config/satellite.yaml SkyWalking Satellite \u0026ndash;meter format\u0026ndash;\u0026gt;SkyWalking OAP Server   threadpool Metrics of Thread Pool meter-analyzer-config/threadpool.yaml Thread Pool \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server   datasource Metrics of DataSource metrics meter-analyzer-config/datasource.yaml Datasource \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server   spring-micrometer Metrics of Spring Sleuth Application meter-analyzer-config/spring-micrometer.yaml Spring Sleuth Application \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server    An example can be found here. If you\u0026rsquo;re using Spring MicroMeter Observations, see Spring MicroMeter Observations APIs.\nMeters configuration # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# filter the metrics, only those metrics that satisfy this condition will be passed into the `metricsRules` below.filter: \u0026lt;closure\u0026gt; # example:\u0026#39;{ tags -\u0026gt; tags.job_name == \u0026#34;vm-monitoring\u0026#34; }\u0026#39;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# Metrics rule allow you to recompute queries.metricsRules:# The name of rule, which combinates with a prefix \u0026#39;\u0026lt;metricPrefix\u0026gt;_\u0026#39; as the index/table name in storage.# The name with prefix can also be quoted in UI (Dashboard/Template/Item/Metrics)name:\u0026lt;string\u0026gt;# MAL expression. Raw name of custom metrics collected can be used hereexp:\u0026lt;string\u0026gt;For more information on MAL, please refer to mal.md\nrate, irate, and increase Although we support the rate, irate, increase functions in the backend, we still recommend users to consider using client-side APIs to run these functions. The reasons are as follows:\n The OAP has to set up caches to calculate the values. Once the agent reconnects to another OAP instance, the time windows of rate calculation break. This leads to inaccurate results.  ","title":"Meter receiver","url":"/docs/main/latest/en/setup/backend/backend-meter/"},{"content":"Meter receiver The meter receiver accepts the metrics of meter protocol into the meter system.\nModule definition Module definition is defined in application.yml, typically located at $SKYWALKING_BASE_DIR/config/application.yml by default.\nreceiver-meter:selector:${SW_RECEIVER_METER:default}default:In Kafka Fetcher, follow these configurations to enable it.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}Report Meter Telemetry Data Manual Meter API Custom metrics may be collected by the Manual Meter API. Custom metrics collected cannot be used directly; they should be configured in the meter-analyzer-config configuration files described in the next part.\nThe receiver adds labels with key = service and key = instance to the collected data samples, and values from service and service instance name defined in SkyWalking Agent, for identification of the metric data.\nThere are following known API libs to report meter telemetry data:\n SkyWalking Java Meter toolkit APIs Spring MicroMeter Observations APIs works with OAP MicroMeter Observations setup  Agents Bundled Meters All following agents and components have built-in meters reporting to the OAP through Meter APIs.\n Go agent for Go VM metrics Python agent for PVM metrics Java agent with Spring micrometer toolkit Java agent for datasource metrics Java agent for thread-pool metrics Rover(eBPF) agent for metrics used continues profiling Satellite proxy self-observability metrics  Configuration file The meter receiver is configured via a configuration file. The configuration file defines everything related to receiving from agents, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP may fail to start up. The files are located at $CLASSPATH/meter-analyzer-config.\nNew meter-analyzer-config files is NOT enabled by default, you should make meter configuration take effect through section agent-analyzer in application.yml of skywalking backend.\nagent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:# ... take care of other analyzersmeterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:your-custom-meter-conf-without-ext-name}# The multiple files should be separated by \u0026#34;,\u0026#34;Meter-analyzer-config file is written in YAML format, defined by the scheme described below. Brackets indicate that a parameter is optional.\nMeters configuration # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# filter the metrics, only those metrics that satisfy this condition will be passed into the `metricsRules` below.filter: \u0026lt;closure\u0026gt; # example:\u0026#39;{ tags -\u0026gt; tags.job_name == \u0026#34;vm-monitoring\u0026#34; }\u0026#39;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# Metrics rule allow you to recompute queries.metricsRules:# The name of rule, which combinates with a prefix \u0026#39;\u0026lt;metricPrefix\u0026gt;_\u0026#39; as the index/table name in storage.# The name with prefix can also be quoted in UI (Dashboard/Template/Item/Metrics)name:\u0026lt;string\u0026gt;# MAL expression. Raw name of custom metrics collected can be used hereexp:\u0026lt;string\u0026gt;For more information on MAL, please refer to mal.md\nrate, irate, and increase Although we support the rate, irate, increase functions in the backend, we still recommend users to consider using client-side APIs to run these functions. The reasons are as follows:\n The OAP has to set up caches to calculate the values. Once the agent reconnects to another OAP instance, the time windows of rate calculation break. This leads to inaccurate results.  ","title":"Meter receiver","url":"/docs/main/next/en/setup/backend/backend-meter/"},{"content":"Meter receiver The meter receiver accepts the metrics of meter protocol into the meter system.\nModule definition Module definition is defined in application.yml, typically located at $SKYWALKING_BASE_DIR/config/application.yml by default.\nreceiver-meter:selector:${SW_RECEIVER_METER:default}default:In Kafka Fetcher, follow these configurations to enable it.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}Report Meter Telemetry Data Manual Meter API Custom metrics may be collected by Manual Meter API. Custom metrics collected cannot be used directly, they should be configured in meter-analyzer-config configuration files, which is described in next part.\nThe receiver adds labels with key = service and key = instance to the collected data samples, and values from service and service instance name defined in SkyWalking Agent, for identification of the metric data.\nA typical manual meter API set is Spring Sleuth APIs\nOpenTelemetry Exporter You can use OpenTelemetry Collector to transport the metrics to SkyWalking OAP. Read the doc on Skywalking Exporter for a detailed guide.\nThe following is the correspondence between the OpenTelemetry Metric Data Type and the Skywalking Data Collect Protocol:\n   OpenTelemetry Metric Data Type Skywalking Data Collect Protocol     MetricDataTypeGauge MeterSingleValue   MetricDataTypeSum MeterSingleValue   MetricDataTypeHistogram MeterHistogram and two MeterSingleValues containing $name_sum and $name_count metrics.   MetricDataTypeSummary A series of MeterSingleValue containing tag quantile and two MeterSingleValues containing $name_sum and $name_count metrics.   MetricDataTypeExponentialHistogram Not Supported    Note: $name is the original metric name.\nConfiguration file The meter receiver is configured via a configuration file. The configuration file defines everything related to receiving from agents, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP may fail to start up. The files are located at $CLASSPATH/meter-analyzer-config.\nNew meter-analyzer-config files is NOT enabled by default, you should make meter configuration take effect through section agent-analyzer in application.yml of skywalking backend.\nagent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:# ... take care of other analyzersmeterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:your-custom-meter-conf-without-ext-name}# The multiple files should be separated by \u0026#34;,\u0026#34;Meter-analyzer-config file is written in YAML format, defined by the scheme described below. Brackets indicate that a parameter is optional.\nAll available meter analysis scripts could be found here.\n   Rule Name Description Configuration File Data Source     satellite Metrics of SkyWalking Satellite self-observability(so11y) meter-analyzer-config/satellite.yaml SkyWalking Satellite \u0026ndash;meter format\u0026ndash;\u0026gt;SkyWalking OAP Server   threadpool Metrics of Thread Pool meter-analyzer-config/threadpool.yaml Thread Pool \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server   datasource Metrics of DataSource metrics meter-analyzer-config/datasource.yaml Datasource \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server   spring-sleuth Metrics of Spring Sleuth Application meter-analyzer-config/spring-sleuth.yaml Sprign Sleuth Application \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server    An example can be found here. If you\u0026rsquo;re using Spring Sleuth, see Spring Sleuth Setup.\nMeters configuration # filter the metrics, only those metrics that satisfy this condition will be passed into the `metricsRules` below.filter: \u0026lt;closure\u0026gt; # example:\u0026#39;{ tags -\u0026gt; tags.job_name == \u0026#34;vm-monitoring\u0026#34; }\u0026#39;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# Metrics rule allow you to recompute queries.metricsRules:# The name of rule, which combinates with a prefix \u0026#39;\u0026lt;metricPrefix\u0026gt;_\u0026#39; as the index/table name in storage.# The name with prefix can also be quoted in UI (Dashboard/Template/Item/Metrics)name:\u0026lt;string\u0026gt;# MAL expression. Raw name of custom metrics collected can be used hereexp:\u0026lt;string\u0026gt;For more information on MAL, please refer to mal.md\nrate, irate, and increase Although we support the rate, irate, increase functions in the backend, we still recommend users to consider using client-side APIs to run these functions. The reasons are as follows:\n The OAP has to set up caches to calculate the values. Once the agent reconnects to another OAP instance, the time windows of rate calculation break. This leads to inaccurate results.  ","title":"Meter receiver","url":"/docs/main/v9.0.0/en/setup/backend/backend-meter/"},{"content":"Meter receiver The meter receiver accepts the metrics of meter protocol into the meter system.\nModule definition Module definition is defined in application.yml, typically located at $SKYWALKING_BASE_DIR/config/application.yml by default.\nreceiver-meter:selector:${SW_RECEIVER_METER:default}default:In Kafka Fetcher, follow these configurations to enable it.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}Report Meter Telemetry Data Manual Meter API Custom metrics may be collected by the Manual Meter API. Custom metrics collected cannot be used directly; they should be configured in the meter-analyzer-config configuration files described in the next part.\nThe receiver adds labels with key = service and key = instance to the collected data samples, and values from service and service instance name defined in SkyWalking Agent, for identification of the metric data.\nA typical manual meter API set is Spring Sleuth APIs\nOpenTelemetry Exporter You can use OpenTelemetry Collector to transport the metrics to SkyWalking OAP. Read the doc on Skywalking Exporter for a detailed guide.\nThe following is the correspondence between the OpenTelemetry Metric Data Type and the Skywalking Data Collect Protocol:\n   OpenTelemetry Metric Data Type Skywalking Data Collect Protocol     MetricDataTypeGauge MeterSingleValue   MetricDataTypeSum MeterSingleValue   MetricDataTypeHistogram MeterHistogram and two MeterSingleValues containing $name_sum and $name_count metrics.   MetricDataTypeSummary A series of MeterSingleValue containing tag quantile and two MeterSingleValues containing $name_sum and $name_count metrics.   MetricDataTypeExponentialHistogram Not Supported    Note: $name is the original metric name.\nConfiguration file The meter receiver is configured via a configuration file. The configuration file defines everything related to receiving from agents, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP may fail to start up. The files are located at $CLASSPATH/meter-analyzer-config.\nNew meter-analyzer-config files is NOT enabled by default, you should make meter configuration take effect through section agent-analyzer in application.yml of skywalking backend.\nagent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:# ... take care of other analyzersmeterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:your-custom-meter-conf-without-ext-name}# The multiple files should be separated by \u0026#34;,\u0026#34;Meter-analyzer-config file is written in YAML format, defined by the scheme described below. Brackets indicate that a parameter is optional.\nAll available meter analysis scripts could be found here.\n   Rule Name Description Configuration File Data Source     satellite Metrics of SkyWalking Satellite self-observability(so11y) meter-analyzer-config/satellite.yaml SkyWalking Satellite \u0026ndash;meter format\u0026ndash;\u0026gt;SkyWalking OAP Server   threadpool Metrics of Thread Pool meter-analyzer-config/threadpool.yaml Thread Pool \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server   datasource Metrics of DataSource metrics meter-analyzer-config/datasource.yaml Datasource \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server   spring-sleuth Metrics of Spring Sleuth Application meter-analyzer-config/spring-sleuth.yaml Sprign Sleuth Application \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server    An example can be found here. If you\u0026rsquo;re using Spring Sleuth, see Spring Sleuth Setup.\nMeters configuration # filter the metrics, only those metrics that satisfy this condition will be passed into the `metricsRules` below.filter: \u0026lt;closure\u0026gt; # example:\u0026#39;{ tags -\u0026gt; tags.job_name == \u0026#34;vm-monitoring\u0026#34; }\u0026#39;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# Metrics rule allow you to recompute queries.metricsRules:# The name of rule, which combinates with a prefix \u0026#39;\u0026lt;metricPrefix\u0026gt;_\u0026#39; as the index/table name in storage.# The name with prefix can also be quoted in UI (Dashboard/Template/Item/Metrics)name:\u0026lt;string\u0026gt;# MAL expression. Raw name of custom metrics collected can be used hereexp:\u0026lt;string\u0026gt;For more information on MAL, please refer to mal.md\nrate, irate, and increase Although we support the rate, irate, increase functions in the backend, we still recommend users to consider using client-side APIs to run these functions. The reasons are as follows:\n The OAP has to set up caches to calculate the values. Once the agent reconnects to another OAP instance, the time windows of rate calculation break. This leads to inaccurate results.  ","title":"Meter receiver","url":"/docs/main/v9.1.0/en/setup/backend/backend-meter/"},{"content":"Meter receiver The meter receiver accepts the metrics of meter protocol into the meter system.\nModule definition Module definition is defined in application.yml, typically located at $SKYWALKING_BASE_DIR/config/application.yml by default.\nreceiver-meter:selector:${SW_RECEIVER_METER:default}default:In Kafka Fetcher, follow these configurations to enable it.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}Report Meter Telemetry Data Manual Meter API Custom metrics may be collected by the Manual Meter API. Custom metrics collected cannot be used directly; they should be configured in the meter-analyzer-config configuration files described in the next part.\nThe receiver adds labels with key = service and key = instance to the collected data samples, and values from service and service instance name defined in SkyWalking Agent, for identification of the metric data.\nA typical manual meter API set is Spring Sleuth APIs\nOpenTelemetry Exporter You can use OpenTelemetry Collector to transport the metrics to SkyWalking OAP. Read the doc on Skywalking Exporter for a detailed guide.\nThe following is the correspondence between the OpenTelemetry Metric Data Type and the Skywalking Data Collect Protocol:\n   OpenTelemetry Metric Data Type Skywalking Data Collect Protocol     MetricDataTypeGauge MeterSingleValue   MetricDataTypeSum MeterSingleValue   MetricDataTypeHistogram MeterHistogram and two MeterSingleValues containing $name_sum and $name_count metrics.   MetricDataTypeSummary A series of MeterSingleValue containing tag quantile and two MeterSingleValues containing $name_sum and $name_count metrics.   MetricDataTypeExponentialHistogram Not Supported    Note: $name is the original metric name.\nConfiguration file The meter receiver is configured via a configuration file. The configuration file defines everything related to receiving from agents, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP may fail to start up. The files are located at $CLASSPATH/meter-analyzer-config.\nNew meter-analyzer-config files is NOT enabled by default, you should make meter configuration take effect through section agent-analyzer in application.yml of skywalking backend.\nagent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:# ... take care of other analyzersmeterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:your-custom-meter-conf-without-ext-name}# The multiple files should be separated by \u0026#34;,\u0026#34;Meter-analyzer-config file is written in YAML format, defined by the scheme described below. Brackets indicate that a parameter is optional.\nAll available meter analysis scripts could be found here.\n   Rule Name Description Configuration File Data Source     satellite Metrics of SkyWalking Satellite self-observability(so11y) meter-analyzer-config/satellite.yaml SkyWalking Satellite \u0026ndash;meter format\u0026ndash;\u0026gt;SkyWalking OAP Server   threadpool Metrics of Thread Pool meter-analyzer-config/threadpool.yaml Thread Pool \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server   datasource Metrics of DataSource metrics meter-analyzer-config/datasource.yaml Datasource \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server   spring-sleuth Metrics of Spring Sleuth Application meter-analyzer-config/spring-sleuth.yaml Sprign Sleuth Application \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server    An example can be found here. If you\u0026rsquo;re using Spring Sleuth, see Spring Sleuth Setup.\nMeters configuration # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# filter the metrics, only those metrics that satisfy this condition will be passed into the `metricsRules` below.filter: \u0026lt;closure\u0026gt; # example:\u0026#39;{ tags -\u0026gt; tags.job_name == \u0026#34;vm-monitoring\u0026#34; }\u0026#39;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# Metrics rule allow you to recompute queries.metricsRules:# The name of rule, which combinates with a prefix \u0026#39;\u0026lt;metricPrefix\u0026gt;_\u0026#39; as the index/table name in storage.# The name with prefix can also be quoted in UI (Dashboard/Template/Item/Metrics)name:\u0026lt;string\u0026gt;# MAL expression. Raw name of custom metrics collected can be used hereexp:\u0026lt;string\u0026gt;For more information on MAL, please refer to mal.md\nrate, irate, and increase Although we support the rate, irate, increase functions in the backend, we still recommend users to consider using client-side APIs to run these functions. The reasons are as follows:\n The OAP has to set up caches to calculate the values. Once the agent reconnects to another OAP instance, the time windows of rate calculation break. This leads to inaccurate results.  ","title":"Meter receiver","url":"/docs/main/v9.2.0/en/setup/backend/backend-meter/"},{"content":"Meter receiver The meter receiver accepts the metrics of meter protocol into the meter system.\nModule definition Module definition is defined in application.yml, typically located at $SKYWALKING_BASE_DIR/config/application.yml by default.\nreceiver-meter:selector:${SW_RECEIVER_METER:default}default:In Kafka Fetcher, follow these configurations to enable it.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}Report Meter Telemetry Data Manual Meter API Custom metrics may be collected by the Manual Meter API. Custom metrics collected cannot be used directly; they should be configured in the meter-analyzer-config configuration files described in the next part.\nThe receiver adds labels with key = service and key = instance to the collected data samples, and values from service and service instance name defined in SkyWalking Agent, for identification of the metric data.\nA typical manual meter API set is Spring Sleuth APIs\nOpenTelemetry Exporter You can use OpenTelemetry Collector to transport the metrics to SkyWalking OAP. Read the doc on Skywalking Exporter for a detailed guide.\nThe following is the correspondence between the OpenTelemetry Metric Data Type and the Skywalking Data Collect Protocol:\n   OpenTelemetry Metric Data Type Skywalking Data Collect Protocol     MetricDataTypeGauge MeterSingleValue   MetricDataTypeSum MeterSingleValue   MetricDataTypeHistogram MeterHistogram and two MeterSingleValues containing $name_sum and $name_count metrics.   MetricDataTypeSummary A series of MeterSingleValue containing tag quantile and two MeterSingleValues containing $name_sum and $name_count metrics.   MetricDataTypeExponentialHistogram Not Supported    Note: $name is the original metric name.\nConfiguration file The meter receiver is configured via a configuration file. The configuration file defines everything related to receiving from agents, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP may fail to start up. The files are located at $CLASSPATH/meter-analyzer-config.\nNew meter-analyzer-config files is NOT enabled by default, you should make meter configuration take effect through section agent-analyzer in application.yml of skywalking backend.\nagent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:# ... take care of other analyzersmeterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:your-custom-meter-conf-without-ext-name}# The multiple files should be separated by \u0026#34;,\u0026#34;Meter-analyzer-config file is written in YAML format, defined by the scheme described below. Brackets indicate that a parameter is optional.\nAll available meter analysis scripts could be found here.\n   Rule Name Description Configuration File Data Source     satellite Metrics of SkyWalking Satellite self-observability(so11y) meter-analyzer-config/satellite.yaml SkyWalking Satellite \u0026ndash;meter format\u0026ndash;\u0026gt;SkyWalking OAP Server   threadpool Metrics of Thread Pool meter-analyzer-config/threadpool.yaml Thread Pool \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server   datasource Metrics of DataSource metrics meter-analyzer-config/datasource.yaml Datasource \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server   spring-sleuth Metrics of Spring Sleuth Application meter-analyzer-config/spring-sleuth.yaml Sprign Sleuth Application \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server    An example can be found here. If you\u0026rsquo;re using Spring Sleuth, see Spring Sleuth Setup.\nMeters configuration # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# filter the metrics, only those metrics that satisfy this condition will be passed into the `metricsRules` below.filter: \u0026lt;closure\u0026gt; # example:\u0026#39;{ tags -\u0026gt; tags.job_name == \u0026#34;vm-monitoring\u0026#34; }\u0026#39;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# Metrics rule allow you to recompute queries.metricsRules:# The name of rule, which combinates with a prefix \u0026#39;\u0026lt;metricPrefix\u0026gt;_\u0026#39; as the index/table name in storage.# The name with prefix can also be quoted in UI (Dashboard/Template/Item/Metrics)name:\u0026lt;string\u0026gt;# MAL expression. Raw name of custom metrics collected can be used hereexp:\u0026lt;string\u0026gt;For more information on MAL, please refer to mal.md\nrate, irate, and increase Although we support the rate, irate, increase functions in the backend, we still recommend users to consider using client-side APIs to run these functions. The reasons are as follows:\n The OAP has to set up caches to calculate the values. Once the agent reconnects to another OAP instance, the time windows of rate calculation break. This leads to inaccurate results.  ","title":"Meter receiver","url":"/docs/main/v9.3.0/en/setup/backend/backend-meter/"},{"content":"Meter receiver The meter receiver accepts the metrics of meter protocol into the meter system.\nModule definition Module definition is defined in application.yml, typically located at $SKYWALKING_BASE_DIR/config/application.yml by default.\nreceiver-meter:selector:${SW_RECEIVER_METER:default}default:In Kafka Fetcher, follow these configurations to enable it.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}Report Meter Telemetry Data Manual Meter API Custom metrics may be collected by the Manual Meter API. Custom metrics collected cannot be used directly; they should be configured in the meter-analyzer-config configuration files described in the next part.\nThe receiver adds labels with key = service and key = instance to the collected data samples, and values from service and service instance name defined in SkyWalking Agent, for identification of the metric data.\nA typical manual meter API set is Spring MicroMeter Observations APIs\nOpenTelemetry Exporter You can use OpenTelemetry Collector to transport the metrics to SkyWalking OAP. Read the doc on Skywalking Exporter for a detailed guide.\nThe following is the correspondence between the OpenTelemetry Metric Data Type and the Skywalking Data Collect Protocol:\n   OpenTelemetry Metric Data Type Skywalking Data Collect Protocol     MetricDataTypeGauge MeterSingleValue   MetricDataTypeSum MeterSingleValue   MetricDataTypeHistogram MeterHistogram and two MeterSingleValues containing $name_sum and $name_count metrics.   MetricDataTypeSummary A series of MeterSingleValue containing tag quantile and two MeterSingleValues containing $name_sum and $name_count metrics.   MetricDataTypeExponentialHistogram Not Supported    Note: $name is the original metric name.\nConfiguration file The meter receiver is configured via a configuration file. The configuration file defines everything related to receiving from agents, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP may fail to start up. The files are located at $CLASSPATH/meter-analyzer-config.\nNew meter-analyzer-config files is NOT enabled by default, you should make meter configuration take effect through section agent-analyzer in application.yml of skywalking backend.\nagent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:# ... take care of other analyzersmeterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:your-custom-meter-conf-without-ext-name}# The multiple files should be separated by \u0026#34;,\u0026#34;Meter-analyzer-config file is written in YAML format, defined by the scheme described below. Brackets indicate that a parameter is optional.\nAll available meter analysis scripts could be found here.\n   Rule Name Description Configuration File Data Source     satellite Metrics of SkyWalking Satellite self-observability(so11y) meter-analyzer-config/satellite.yaml SkyWalking Satellite \u0026ndash;meter format\u0026ndash;\u0026gt;SkyWalking OAP Server   threadpool Metrics of Thread Pool meter-analyzer-config/threadpool.yaml Thread Pool \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server   datasource Metrics of DataSource metrics meter-analyzer-config/datasource.yaml Datasource \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server   spring-micrometer Metrics of Spring Sleuth Application meter-analyzer-config/spring-micrometer.yaml Sprign Sleuth Application \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server    An example can be found here. If you\u0026rsquo;re using Spring MicroMeter Observations, see Spring MicroMeter Observations APIs.\nMeters configuration # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# filter the metrics, only those metrics that satisfy this condition will be passed into the `metricsRules` below.filter: \u0026lt;closure\u0026gt; # example:\u0026#39;{ tags -\u0026gt; tags.job_name == \u0026#34;vm-monitoring\u0026#34; }\u0026#39;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# Metrics rule allow you to recompute queries.metricsRules:# The name of rule, which combinates with a prefix \u0026#39;\u0026lt;metricPrefix\u0026gt;_\u0026#39; as the index/table name in storage.# The name with prefix can also be quoted in UI (Dashboard/Template/Item/Metrics)name:\u0026lt;string\u0026gt;# MAL expression. Raw name of custom metrics collected can be used hereexp:\u0026lt;string\u0026gt;For more information on MAL, please refer to mal.md\nrate, irate, and increase Although we support the rate, irate, increase functions in the backend, we still recommend users to consider using client-side APIs to run these functions. The reasons are as follows:\n The OAP has to set up caches to calculate the values. Once the agent reconnects to another OAP instance, the time windows of rate calculation break. This leads to inaccurate results.  ","title":"Meter receiver","url":"/docs/main/v9.4.0/en/setup/backend/backend-meter/"},{"content":"Meter receiver The meter receiver accepts the metrics of meter protocol into the meter system.\nModule definition Module definition is defined in application.yml, typically located at $SKYWALKING_BASE_DIR/config/application.yml by default.\nreceiver-meter:selector:${SW_RECEIVER_METER:default}default:In Kafka Fetcher, follow these configurations to enable it.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}Report Meter Telemetry Data Manual Meter API Custom metrics may be collected by the Manual Meter API. Custom metrics collected cannot be used directly; they should be configured in the meter-analyzer-config configuration files described in the next part.\nThe receiver adds labels with key = service and key = instance to the collected data samples, and values from service and service instance name defined in SkyWalking Agent, for identification of the metric data.\nA typical manual meter API set is Spring MicroMeter Observations APIs\nOpenTelemetry Exporter You can use OpenTelemetry Collector to transport the metrics to SkyWalking OAP. Read the doc on Skywalking Exporter for a detailed guide.\nThe following is the correspondence between the OpenTelemetry Metric Data Type and the Skywalking Data Collect Protocol:\n   OpenTelemetry Metric Data Type Skywalking Data Collect Protocol     MetricDataTypeGauge MeterSingleValue   MetricDataTypeSum MeterSingleValue   MetricDataTypeHistogram MeterHistogram and two MeterSingleValues containing $name_sum and $name_count metrics.   MetricDataTypeSummary A series of MeterSingleValue containing tag quantile and two MeterSingleValues containing $name_sum and $name_count metrics.   MetricDataTypeExponentialHistogram Not Supported    Note: $name is the original metric name.\nConfiguration file The meter receiver is configured via a configuration file. The configuration file defines everything related to receiving from agents, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP may fail to start up. The files are located at $CLASSPATH/meter-analyzer-config.\nNew meter-analyzer-config files is NOT enabled by default, you should make meter configuration take effect through section agent-analyzer in application.yml of skywalking backend.\nagent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:# ... take care of other analyzersmeterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:your-custom-meter-conf-without-ext-name}# The multiple files should be separated by \u0026#34;,\u0026#34;Meter-analyzer-config file is written in YAML format, defined by the scheme described below. Brackets indicate that a parameter is optional.\nAll available meter analysis scripts could be found here.\n   Rule Name Description Configuration File Data Source     satellite Metrics of SkyWalking Satellite self-observability(so11y) meter-analyzer-config/satellite.yaml SkyWalking Satellite \u0026ndash;meter format\u0026ndash;\u0026gt;SkyWalking OAP Server   threadpool Metrics of Thread Pool meter-analyzer-config/threadpool.yaml Thread Pool \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server   datasource Metrics of DataSource metrics meter-analyzer-config/datasource.yaml Datasource \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server   spring-micrometer Metrics of Spring Sleuth Application meter-analyzer-config/spring-micrometer.yaml Spring Sleuth Application \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server    An example can be found here. If you\u0026rsquo;re using Spring MicroMeter Observations, see Spring MicroMeter Observations APIs.\nMeters configuration # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# filter the metrics, only those metrics that satisfy this condition will be passed into the `metricsRules` below.filter: \u0026lt;closure\u0026gt; # example:\u0026#39;{ tags -\u0026gt; tags.job_name == \u0026#34;vm-monitoring\u0026#34; }\u0026#39;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# Metrics rule allow you to recompute queries.metricsRules:# The name of rule, which combinates with a prefix \u0026#39;\u0026lt;metricPrefix\u0026gt;_\u0026#39; as the index/table name in storage.# The name with prefix can also be quoted in UI (Dashboard/Template/Item/Metrics)name:\u0026lt;string\u0026gt;# MAL expression. Raw name of custom metrics collected can be used hereexp:\u0026lt;string\u0026gt;For more information on MAL, please refer to mal.md\nrate, irate, and increase Although we support the rate, irate, increase functions in the backend, we still recommend users to consider using client-side APIs to run these functions. The reasons are as follows:\n The OAP has to set up caches to calculate the values. Once the agent reconnects to another OAP instance, the time windows of rate calculation break. This leads to inaccurate results.  ","title":"Meter receiver","url":"/docs/main/v9.5.0/en/setup/backend/backend-meter/"},{"content":"Meter receiver The meter receiver accepts the metrics of meter protocol into the meter system.\nModule definition Module definition is defined in application.yml, typically located at $SKYWALKING_BASE_DIR/config/application.yml by default.\nreceiver-meter:selector:${SW_RECEIVER_METER:default}default:In Kafka Fetcher, follow these configurations to enable it.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}Report Meter Telemetry Data Manual Meter API Custom metrics may be collected by the Manual Meter API. Custom metrics collected cannot be used directly; they should be configured in the meter-analyzer-config configuration files described in the next part.\nThe receiver adds labels with key = service and key = instance to the collected data samples, and values from service and service instance name defined in SkyWalking Agent, for identification of the metric data.\nA typical manual meter API set is Spring MicroMeter Observations APIs\nOpenTelemetry Exporter You can use OpenTelemetry Collector to transport the metrics to SkyWalking OAP. Read the doc on Skywalking Exporter for a detailed guide.\nThe following is the correspondence between the OpenTelemetry Metric Data Type and the Skywalking Data Collect Protocol:\n   OpenTelemetry Metric Data Type Skywalking Data Collect Protocol     MetricDataTypeGauge MeterSingleValue   MetricDataTypeSum MeterSingleValue   MetricDataTypeHistogram MeterHistogram and two MeterSingleValues containing $name_sum and $name_count metrics.   MetricDataTypeSummary A series of MeterSingleValue containing tag quantile and two MeterSingleValues containing $name_sum and $name_count metrics.   MetricDataTypeExponentialHistogram Not Supported    Note: $name is the original metric name.\nConfiguration file The meter receiver is configured via a configuration file. The configuration file defines everything related to receiving from agents, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP may fail to start up. The files are located at $CLASSPATH/meter-analyzer-config.\nNew meter-analyzer-config files is NOT enabled by default, you should make meter configuration take effect through section agent-analyzer in application.yml of skywalking backend.\nagent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:# ... take care of other analyzersmeterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:your-custom-meter-conf-without-ext-name}# The multiple files should be separated by \u0026#34;,\u0026#34;Meter-analyzer-config file is written in YAML format, defined by the scheme described below. Brackets indicate that a parameter is optional.\nAll available meter analysis scripts could be found here.\n   Rule Name Description Configuration File Data Source     satellite Metrics of SkyWalking Satellite self-observability(so11y) meter-analyzer-config/satellite.yaml SkyWalking Satellite \u0026ndash;meter format\u0026ndash;\u0026gt;SkyWalking OAP Server   threadpool Metrics of Thread Pool meter-analyzer-config/threadpool.yaml Thread Pool \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server   datasource Metrics of DataSource metrics meter-analyzer-config/datasource.yaml Datasource \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server   spring-micrometer Metrics of Spring Sleuth Application meter-analyzer-config/spring-micrometer.yaml Spring Sleuth Application \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server    An example can be found here. If you\u0026rsquo;re using Spring MicroMeter Observations, see Spring MicroMeter Observations APIs.\nMeters configuration # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# filter the metrics, only those metrics that satisfy this condition will be passed into the `metricsRules` below.filter: \u0026lt;closure\u0026gt; # example:\u0026#39;{ tags -\u0026gt; tags.job_name == \u0026#34;vm-monitoring\u0026#34; }\u0026#39;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# Metrics rule allow you to recompute queries.metricsRules:# The name of rule, which combinates with a prefix \u0026#39;\u0026lt;metricPrefix\u0026gt;_\u0026#39; as the index/table name in storage.# The name with prefix can also be quoted in UI (Dashboard/Template/Item/Metrics)name:\u0026lt;string\u0026gt;# MAL expression. Raw name of custom metrics collected can be used hereexp:\u0026lt;string\u0026gt;For more information on MAL, please refer to mal.md\nrate, irate, and increase Although we support the rate, irate, increase functions in the backend, we still recommend users to consider using client-side APIs to run these functions. The reasons are as follows:\n The OAP has to set up caches to calculate the values. Once the agent reconnects to another OAP instance, the time windows of rate calculation break. This leads to inaccurate results.  ","title":"Meter receiver","url":"/docs/main/v9.6.0/en/setup/backend/backend-meter/"},{"content":"Meter receiver The meter receiver accepts the metrics of meter protocol into the meter system.\nModule definition Module definition is defined in application.yml, typically located at $SKYWALKING_BASE_DIR/config/application.yml by default.\nreceiver-meter:selector:${SW_RECEIVER_METER:default}default:In Kafka Fetcher, follow these configurations to enable it.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}Report Meter Telemetry Data Manual Meter API Custom metrics may be collected by the Manual Meter API. Custom metrics collected cannot be used directly; they should be configured in the meter-analyzer-config configuration files described in the next part.\nThe receiver adds labels with key = service and key = instance to the collected data samples, and values from service and service instance name defined in SkyWalking Agent, for identification of the metric data.\nA typical manual meter API set is Spring MicroMeter Observations APIs\nOpenTelemetry Exporter You can use OpenTelemetry Collector to transport the metrics to SkyWalking OAP. Read the doc on Skywalking Exporter for a detailed guide.\nThe following is the correspondence between the OpenTelemetry Metric Data Type and the Skywalking Data Collect Protocol:\n   OpenTelemetry Metric Data Type Skywalking Data Collect Protocol     MetricDataTypeGauge MeterSingleValue   MetricDataTypeSum MeterSingleValue   MetricDataTypeHistogram MeterHistogram and two MeterSingleValues containing $name_sum and $name_count metrics.   MetricDataTypeSummary A series of MeterSingleValue containing tag quantile and two MeterSingleValues containing $name_sum and $name_count metrics.   MetricDataTypeExponentialHistogram Not Supported    Note: $name is the original metric name.\nConfiguration file The meter receiver is configured via a configuration file. The configuration file defines everything related to receiving from agents, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP may fail to start up. The files are located at $CLASSPATH/meter-analyzer-config.\nNew meter-analyzer-config files is NOT enabled by default, you should make meter configuration take effect through section agent-analyzer in application.yml of skywalking backend.\nagent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:# ... take care of other analyzersmeterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:your-custom-meter-conf-without-ext-name}# The multiple files should be separated by \u0026#34;,\u0026#34;Meter-analyzer-config file is written in YAML format, defined by the scheme described below. Brackets indicate that a parameter is optional.\nAll available meter analysis scripts could be found here.\n   Rule Name Description Configuration File Data Source     satellite Metrics of SkyWalking Satellite self-observability(so11y) meter-analyzer-config/satellite.yaml SkyWalking Satellite \u0026ndash;meter format\u0026ndash;\u0026gt;SkyWalking OAP Server   threadpool Metrics of Thread Pool meter-analyzer-config/threadpool.yaml Thread Pool \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server   datasource Metrics of DataSource metrics meter-analyzer-config/datasource.yaml Datasource \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server   spring-micrometer Metrics of Spring Sleuth Application meter-analyzer-config/spring-micrometer.yaml Spring Sleuth Application \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server    An example can be found here. If you\u0026rsquo;re using Spring MicroMeter Observations, see Spring MicroMeter Observations APIs.\nMeters configuration # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# filter the metrics, only those metrics that satisfy this condition will be passed into the `metricsRules` below.filter: \u0026lt;closure\u0026gt; # example:\u0026#39;{ tags -\u0026gt; tags.job_name == \u0026#34;vm-monitoring\u0026#34; }\u0026#39;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# Metrics rule allow you to recompute queries.metricsRules:# The name of rule, which combinates with a prefix \u0026#39;\u0026lt;metricPrefix\u0026gt;_\u0026#39; as the index/table name in storage.# The name with prefix can also be quoted in UI (Dashboard/Template/Item/Metrics)name:\u0026lt;string\u0026gt;# MAL expression. Raw name of custom metrics collected can be used hereexp:\u0026lt;string\u0026gt;For more information on MAL, please refer to mal.md\nrate, irate, and increase Although we support the rate, irate, increase functions in the backend, we still recommend users to consider using client-side APIs to run these functions. The reasons are as follows:\n The OAP has to set up caches to calculate the values. Once the agent reconnects to another OAP instance, the time windows of rate calculation break. This leads to inaccurate results.  ","title":"Meter receiver","url":"/docs/main/v9.7.0/en/setup/backend/backend-meter/"},{"content":"Meter System Meter system is another streaming calculation mode designed for metrics data. In the OAL, there are clear Scope Definitions, including definitions for native objects. Meter system is focused on the data type itself, and provides a more flexible approach to the end user in defining the scope entity.\nThe meter system is open to different receivers and fetchers in the backend, see the backend setup document for more details.\nEvery metric is declared in the meter system to include the following attributes:\n Metrics Name. A globally unique name to avoid overlapping between the OAL variable names. Function Name. The function used for this metric, namely distributed aggregation, value calculation or down sampling calculation based on the function implementation. Further, the data structure is determined by the function as well, such as function Avg is for Long. Scope Type. Unlike within the OAL, there are plenty of logic scope definitions. In the meter system, only type is required. Type values include service, instance, and endpoint, just as we have described in the Overview section. The values of scope entity name, such as service name, are required when metrics data are generated with the metrics data values.  NOTE: The metrics must be declared in the bootstrap stage, and there must be no change to runtime.\nThe Meter System supports the following binding functions:\n avg. Calculates the avg value for every entity under the same metrics name. histogram. Aggregates the counts in the configurable buckets. Buckets are configurable but must be assigned in the declaration stage. percentile. See percentile in WIKI. Unlike the OAL, we provide 50/75/90/95/99 by default. In the meter system function, the percentile function accepts several ranks, which should be in the (0, 100) range.  ","title":"Meter System","url":"/docs/main/latest/en/concepts-and-designs/meter/"},{"content":"Meter System Meter system is another streaming calculation mode designed for metrics data. In the OAL, there are clear Scope Definitions, including definitions for native objects. Meter system is focused on the data type itself, and provides a more flexible approach to the end user in defining the scope entity.\nThe meter system is open to different receivers and fetchers in the backend, see the backend setup document for more details.\nEvery metric is declared in the meter system to include the following attributes:\n Metrics Name. A globally unique name to avoid overlapping between the OAL variable names. Function Name. The function used for this metric, namely distributed aggregation, value calculation or down sampling calculation based on the function implementation. Further, the data structure is determined by the function as well, such as function Avg is for Long. Scope Type. Unlike within the OAL, there are plenty of logic scope definitions. In the meter system, only type is required. Type values include service, instance, and endpoint, just as we have described in the Overview section. The values of scope entity name, such as service name, are required when metrics data are generated with the metrics data values.  NOTE: The metrics must be declared in the bootstrap stage, and there must be no change to runtime.\nThe Meter System supports the following binding functions:\n avg. Calculates the avg value for every entity under the same metrics name. histogram. Aggregates the counts in the configurable buckets. Buckets are configurable but must be assigned in the declaration stage. percentile. See percentile in WIKI. Unlike the OAL, we provide 50/75/90/95/99 by default. In the meter system function, the percentile function accepts several ranks, which should be in the (0, 100) range.  ","title":"Meter System","url":"/docs/main/next/en/concepts-and-designs/meter/"},{"content":"Meter System Meter system is another streaming calculation mode designed for metrics data. In the OAL, there are clear Scope Definitions, including definitions for native objects. Meter system is focused on the data type itself, and provides a more flexible approach to the end user in defining the scope entity.\nThe meter system is open to different receivers and fetchers in the backend, see the backend setup document for more details.\nEvery metric is declared in the meter system to include the following attributes:\n Metrics Name. A globally unique name to avoid overlapping between the OAL variable names. Function Name. The function used for this metric, namely distributed aggregation, value calculation or down sampling calculation based on the function implementation. Further, the data structure is determined by the function as well, such as function Avg is for Long. Scope Type. Unlike within the OAL, there are plenty of logic scope definitions. In the meter system, only type is required. Type values include service, instance, and endpoint, just as we have described in the Overview section. The values of scope entity name, such as service name, are required when metrics data are generated with the metrics data values.  NOTE: The metrics must be declared in the bootstrap stage, and there must be no change to runtime.\nThe Meter System supports the following binding functions:\n avg. Calculates the avg value for every entity under the same metrics name. histogram. Aggregates the counts in the configurable buckets. Buckets are configurable but must be assigned in the declaration stage. percentile. See percentile in WIKI. Unlike the OAL, we provide 50/75/90/95/99 by default. In the meter system function, the percentile function accepts several ranks, which should be in the (0, 100) range.  ","title":"Meter System","url":"/docs/main/v9.0.0/en/concepts-and-designs/meter/"},{"content":"Meter System Meter system is another streaming calculation mode designed for metrics data. In the OAL, there are clear Scope Definitions, including definitions for native objects. Meter system is focused on the data type itself, and provides a more flexible approach to the end user in defining the scope entity.\nThe meter system is open to different receivers and fetchers in the backend, see the backend setup document for more details.\nEvery metric is declared in the meter system to include the following attributes:\n Metrics Name. A globally unique name to avoid overlapping between the OAL variable names. Function Name. The function used for this metric, namely distributed aggregation, value calculation or down sampling calculation based on the function implementation. Further, the data structure is determined by the function as well, such as function Avg is for Long. Scope Type. Unlike within the OAL, there are plenty of logic scope definitions. In the meter system, only type is required. Type values include service, instance, and endpoint, just as we have described in the Overview section. The values of scope entity name, such as service name, are required when metrics data are generated with the metrics data values.  NOTE: The metrics must be declared in the bootstrap stage, and there must be no change to runtime.\nThe Meter System supports the following binding functions:\n avg. Calculates the avg value for every entity under the same metrics name. histogram. Aggregates the counts in the configurable buckets. Buckets are configurable but must be assigned in the declaration stage. percentile. See percentile in WIKI. Unlike the OAL, we provide 50/75/90/95/99 by default. In the meter system function, the percentile function accepts several ranks, which should be in the (0, 100) range.  ","title":"Meter System","url":"/docs/main/v9.1.0/en/concepts-and-designs/meter/"},{"content":"Meter System Meter system is another streaming calculation mode designed for metrics data. In the OAL, there are clear Scope Definitions, including definitions for native objects. Meter system is focused on the data type itself, and provides a more flexible approach to the end user in defining the scope entity.\nThe meter system is open to different receivers and fetchers in the backend, see the backend setup document for more details.\nEvery metric is declared in the meter system to include the following attributes:\n Metrics Name. A globally unique name to avoid overlapping between the OAL variable names. Function Name. The function used for this metric, namely distributed aggregation, value calculation or down sampling calculation based on the function implementation. Further, the data structure is determined by the function as well, such as function Avg is for Long. Scope Type. Unlike within the OAL, there are plenty of logic scope definitions. In the meter system, only type is required. Type values include service, instance, and endpoint, just as we have described in the Overview section. The values of scope entity name, such as service name, are required when metrics data are generated with the metrics data values.  NOTE: The metrics must be declared in the bootstrap stage, and there must be no change to runtime.\nThe Meter System supports the following binding functions:\n avg. Calculates the avg value for every entity under the same metrics name. histogram. Aggregates the counts in the configurable buckets. Buckets are configurable but must be assigned in the declaration stage. percentile. See percentile in WIKI. Unlike the OAL, we provide 50/75/90/95/99 by default. In the meter system function, the percentile function accepts several ranks, which should be in the (0, 100) range.  ","title":"Meter System","url":"/docs/main/v9.2.0/en/concepts-and-designs/meter/"},{"content":"Meter System Meter system is another streaming calculation mode designed for metrics data. In the OAL, there are clear Scope Definitions, including definitions for native objects. Meter system is focused on the data type itself, and provides a more flexible approach to the end user in defining the scope entity.\nThe meter system is open to different receivers and fetchers in the backend, see the backend setup document for more details.\nEvery metric is declared in the meter system to include the following attributes:\n Metrics Name. A globally unique name to avoid overlapping between the OAL variable names. Function Name. The function used for this metric, namely distributed aggregation, value calculation or down sampling calculation based on the function implementation. Further, the data structure is determined by the function as well, such as function Avg is for Long. Scope Type. Unlike within the OAL, there are plenty of logic scope definitions. In the meter system, only type is required. Type values include service, instance, and endpoint, just as we have described in the Overview section. The values of scope entity name, such as service name, are required when metrics data are generated with the metrics data values.  NOTE: The metrics must be declared in the bootstrap stage, and there must be no change to runtime.\nThe Meter System supports the following binding functions:\n avg. Calculates the avg value for every entity under the same metrics name. histogram. Aggregates the counts in the configurable buckets. Buckets are configurable but must be assigned in the declaration stage. percentile. See percentile in WIKI. Unlike the OAL, we provide 50/75/90/95/99 by default. In the meter system function, the percentile function accepts several ranks, which should be in the (0, 100) range.  ","title":"Meter System","url":"/docs/main/v9.3.0/en/concepts-and-designs/meter/"},{"content":"Meter System Meter system is another streaming calculation mode designed for metrics data. In the OAL, there are clear Scope Definitions, including definitions for native objects. Meter system is focused on the data type itself, and provides a more flexible approach to the end user in defining the scope entity.\nThe meter system is open to different receivers and fetchers in the backend, see the backend setup document for more details.\nEvery metric is declared in the meter system to include the following attributes:\n Metrics Name. A globally unique name to avoid overlapping between the OAL variable names. Function Name. The function used for this metric, namely distributed aggregation, value calculation or down sampling calculation based on the function implementation. Further, the data structure is determined by the function as well, such as function Avg is for Long. Scope Type. Unlike within the OAL, there are plenty of logic scope definitions. In the meter system, only type is required. Type values include service, instance, and endpoint, just as we have described in the Overview section. The values of scope entity name, such as service name, are required when metrics data are generated with the metrics data values.  NOTE: The metrics must be declared in the bootstrap stage, and there must be no change to runtime.\nThe Meter System supports the following binding functions:\n avg. Calculates the avg value for every entity under the same metrics name. histogram. Aggregates the counts in the configurable buckets. Buckets are configurable but must be assigned in the declaration stage. percentile. See percentile in WIKI. Unlike the OAL, we provide 50/75/90/95/99 by default. In the meter system function, the percentile function accepts several ranks, which should be in the (0, 100) range.  ","title":"Meter System","url":"/docs/main/v9.4.0/en/concepts-and-designs/meter/"},{"content":"Meter System Meter system is another streaming calculation mode designed for metrics data. In the OAL, there are clear Scope Definitions, including definitions for native objects. Meter system is focused on the data type itself, and provides a more flexible approach to the end user in defining the scope entity.\nThe meter system is open to different receivers and fetchers in the backend, see the backend setup document for more details.\nEvery metric is declared in the meter system to include the following attributes:\n Metrics Name. A globally unique name to avoid overlapping between the OAL variable names. Function Name. The function used for this metric, namely distributed aggregation, value calculation or down sampling calculation based on the function implementation. Further, the data structure is determined by the function as well, such as function Avg is for Long. Scope Type. Unlike within the OAL, there are plenty of logic scope definitions. In the meter system, only type is required. Type values include service, instance, and endpoint, just as we have described in the Overview section. The values of scope entity name, such as service name, are required when metrics data are generated with the metrics data values.  NOTE: The metrics must be declared in the bootstrap stage, and there must be no change to runtime.\nThe Meter System supports the following binding functions:\n avg. Calculates the avg value for every entity under the same metrics name. histogram. Aggregates the counts in the configurable buckets. Buckets are configurable but must be assigned in the declaration stage. percentile. See percentile in WIKI. Unlike the OAL, we provide 50/75/90/95/99 by default. In the meter system function, the percentile function accepts several ranks, which should be in the (0, 100) range.  ","title":"Meter System","url":"/docs/main/v9.5.0/en/concepts-and-designs/meter/"},{"content":"Meter System Meter system is another streaming calculation mode designed for metrics data. In the OAL, there are clear Scope Definitions, including definitions for native objects. Meter system is focused on the data type itself, and provides a more flexible approach to the end user in defining the scope entity.\nThe meter system is open to different receivers and fetchers in the backend, see the backend setup document for more details.\nEvery metric is declared in the meter system to include the following attributes:\n Metrics Name. A globally unique name to avoid overlapping between the OAL variable names. Function Name. The function used for this metric, namely distributed aggregation, value calculation or down sampling calculation based on the function implementation. Further, the data structure is determined by the function as well, such as function Avg is for Long. Scope Type. Unlike within the OAL, there are plenty of logic scope definitions. In the meter system, only type is required. Type values include service, instance, and endpoint, just as we have described in the Overview section. The values of scope entity name, such as service name, are required when metrics data are generated with the metrics data values.  NOTE: The metrics must be declared in the bootstrap stage, and there must be no change to runtime.\nThe Meter System supports the following binding functions:\n avg. Calculates the avg value for every entity under the same metrics name. histogram. Aggregates the counts in the configurable buckets. Buckets are configurable but must be assigned in the declaration stage. percentile. See percentile in WIKI. Unlike the OAL, we provide 50/75/90/95/99 by default. In the meter system function, the percentile function accepts several ranks, which should be in the (0, 100) range.  ","title":"Meter System","url":"/docs/main/v9.6.0/en/concepts-and-designs/meter/"},{"content":"Meter System Meter system is another streaming calculation mode designed for metrics data. In the OAL, there are clear Scope Definitions, including definitions for native objects. Meter system is focused on the data type itself, and provides a more flexible approach to the end user in defining the scope entity.\nThe meter system is open to different receivers and fetchers in the backend, see the backend setup document for more details.\nEvery metric is declared in the meter system to include the following attributes:\n Metrics Name. A globally unique name to avoid overlapping between the OAL variable names. Function Name. The function used for this metric, namely distributed aggregation, value calculation or down sampling calculation based on the function implementation. Further, the data structure is determined by the function as well, such as function Avg is for Long. Scope Type. Unlike within the OAL, there are plenty of logic scope definitions. In the meter system, only type is required. Type values include service, instance, and endpoint, just as we have described in the Overview section. The values of scope entity name, such as service name, are required when metrics data are generated with the metrics data values.  NOTE: The metrics must be declared in the bootstrap stage, and there must be no change to runtime.\nThe Meter System supports the following binding functions:\n avg. Calculates the avg value for every entity under the same metrics name. histogram. Aggregates the counts in the configurable buckets. Buckets are configurable but must be assigned in the declaration stage. percentile. See percentile in WIKI. Unlike the OAL, we provide 50/75/90/95/99 by default. In the meter system function, the percentile function accepts several ranks, which should be in the (0, 100) range.  ","title":"Meter System","url":"/docs/main/v9.7.0/en/concepts-and-designs/meter/"},{"content":"Metrics  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-micrometer-registry\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  Using org.apache.skywalking.apm.meter.micrometer.SkywalkingMeterRegistry as the registry, it could forward the MicroMeter collected metrics to OAP server.  import org.apache.skywalking.apm.meter.micrometer.SkywalkingMeterRegistry; SkywalkingMeterRegistry registry = new SkywalkingMeterRegistry(); // If you has some counter want to rate by agent side SkywalkingConfig config = new SkywalkingConfig(Arrays.asList(\u0026#34;test_rate_counter\u0026#34;)); new SkywalkingMeterRegistry(config); // Also you could using composite registry to combine multiple meter registry, such as collect to Skywalking and prometheus CompositeMeterRegistry compositeRegistry = new CompositeMeterRegistry(); compositeRegistry.add(new PrometheusMeterRegistry(PrometheusConfig.DEFAULT)); compositeRegistry.add(new SkywalkingMeterRegistry());   Using snake case as the naming convention. Such as test.meter will be send to test_meter.\n  Using Millisecond as the time unit.\n  Adapt micrometer data convention.\n     Micrometer data type Transform to meter name Skywalking data type Description     Counter Counter name Counter Same with counter   Gauges Gauges name Gauges Same with gauges   Timer Timer name + \u0026ldquo;_count\u0026rdquo; Counter Execute finished count    Timer name + \u0026ldquo;_sum\u0026rdquo; Counter Total execute finished duration    Timer name + \u0026ldquo;_max\u0026rdquo; Gauges Max duration of execute finished time    Timer name + \u0026ldquo;_histogram\u0026rdquo; Histogram Histogram of execute finished duration   LongTaskTimer Timer name + \u0026ldquo;_active_count\u0026rdquo; Gauges Executing task count    Timer name + \u0026ldquo;_duration_sum\u0026rdquo; Counter All of executing task sum duration    Timer name + \u0026ldquo;_max\u0026rdquo; Counter Current longest running task execute duration   Function Timer Timer name + \u0026ldquo;_count\u0026rdquo; Gauges Execute finished timer count    Timer name + \u0026ldquo;_sum\u0026rdquo; Gauges Execute finished timer total duration   Function Counter Counter name Counter Custom counter value   Distribution summary Summary name + \u0026ldquo;_count\u0026rdquo; Counter Total record count    Summary name + \u0026ldquo;_sum\u0026rdquo; Counter Total record amount sum    Summary name + \u0026ldquo;_max\u0026rdquo; Gauges Max record amount    Summary name + \u0026ldquo;_histogram\u0026rdquo; Gauges Histogram of the amount     Not Adapt data convention.     Micrometer data type Data type     LongTaskTimer Histogram    ","title":"Metrics","url":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/application-toolkit-micrometer/"},{"content":"Metrics  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-micrometer-registry\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  Using org.apache.skywalking.apm.meter.micrometer.SkywalkingMeterRegistry as the registry, it could forward the MicroMeter collected metrics to OAP server.  import org.apache.skywalking.apm.meter.micrometer.SkywalkingMeterRegistry; SkywalkingMeterRegistry registry = new SkywalkingMeterRegistry(); // If you has some counter want to rate by agent side SkywalkingConfig config = new SkywalkingConfig(Arrays.asList(\u0026#34;test_rate_counter\u0026#34;)); new SkywalkingMeterRegistry(config); // Also you could using composite registry to combine multiple meter registry, such as collect to Skywalking and prometheus CompositeMeterRegistry compositeRegistry = new CompositeMeterRegistry(); compositeRegistry.add(new PrometheusMeterRegistry(PrometheusConfig.DEFAULT)); compositeRegistry.add(new SkywalkingMeterRegistry());   Using snake case as the naming convention. Such as test.meter will be send to test_meter.\n  Using Millisecond as the time unit.\n  Adapt micrometer data convention.\n     Micrometer data type Transform to meter name Skywalking data type Description     Counter Counter name Counter Same with counter   Gauges Gauges name Gauges Same with gauges   Timer Timer name + \u0026ldquo;_count\u0026rdquo; Counter Execute finished count    Timer name + \u0026ldquo;_sum\u0026rdquo; Counter Total execute finished duration    Timer name + \u0026ldquo;_max\u0026rdquo; Gauges Max duration of execute finished time    Timer name + \u0026ldquo;_histogram\u0026rdquo; Histogram Histogram of execute finished duration   LongTaskTimer Timer name + \u0026ldquo;_active_count\u0026rdquo; Gauges Executing task count    Timer name + \u0026ldquo;_duration_sum\u0026rdquo; Counter All of executing task sum duration    Timer name + \u0026ldquo;_max\u0026rdquo; Counter Current longest running task execute duration   Function Timer Timer name + \u0026ldquo;_count\u0026rdquo; Gauges Execute finished timer count    Timer name + \u0026ldquo;_sum\u0026rdquo; Gauges Execute finished timer total duration   Function Counter Counter name Counter Custom counter value   Distribution summary Summary name + \u0026ldquo;_count\u0026rdquo; Counter Total record count    Summary name + \u0026ldquo;_sum\u0026rdquo; Counter Total record amount sum    Summary name + \u0026ldquo;_max\u0026rdquo; Gauges Max record amount    Summary name + \u0026ldquo;_histogram\u0026rdquo; Gauges Histogram of the amount     Not Adapt data convention.     Micrometer data type Data type     LongTaskTimer Histogram    ","title":"Metrics","url":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-micrometer/"},{"content":"Metrics  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-micrometer-registry\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  Using org.apache.skywalking.apm.meter.micrometer.SkywalkingMeterRegistry as the registry, it could forward the MicroMeter collected metrics to OAP server.  import org.apache.skywalking.apm.meter.micrometer.SkywalkingMeterRegistry; SkywalkingMeterRegistry registry = new SkywalkingMeterRegistry(); // If you has some counter want to rate by agent side SkywalkingConfig config = new SkywalkingConfig(Arrays.asList(\u0026#34;test_rate_counter\u0026#34;)); new SkywalkingMeterRegistry(config); // Also you could using composite registry to combine multiple meter registry, such as collect to Skywalking and prometheus CompositeMeterRegistry compositeRegistry = new CompositeMeterRegistry(); compositeRegistry.add(new PrometheusMeterRegistry(PrometheusConfig.DEFAULT)); compositeRegistry.add(new SkywalkingMeterRegistry());   Using snake case as the naming convention. Such as test.meter will be send to test_meter.\n  Using Millisecond as the time unit.\n  Adapt micrometer data convention.\n     Micrometer data type Transform to meter name Skywalking data type Description     Counter Counter name Counter Same with counter   Gauges Gauges name Gauges Same with gauges   Timer Timer name + \u0026ldquo;_count\u0026rdquo; Counter Execute finished count    Timer name + \u0026ldquo;_sum\u0026rdquo; Counter Total execute finished duration    Timer name + \u0026ldquo;_max\u0026rdquo; Gauges Max duration of execute finished time    Timer name + \u0026ldquo;_histogram\u0026rdquo; Histogram Histogram of execute finished duration   LongTaskTimer Timer name + \u0026ldquo;_active_count\u0026rdquo; Gauges Executing task count    Timer name + \u0026ldquo;_duration_sum\u0026rdquo; Counter All of executing task sum duration    Timer name + \u0026ldquo;_max\u0026rdquo; Counter Current longest running task execute duration   Function Timer Timer name + \u0026ldquo;_count\u0026rdquo; Gauges Execute finished timer count    Timer name + \u0026ldquo;_sum\u0026rdquo; Gauges Execute finished timer total duration   Function Counter Counter name Counter Custom counter value   Distribution summary Summary name + \u0026ldquo;_count\u0026rdquo; Counter Total record count    Summary name + \u0026ldquo;_sum\u0026rdquo; Counter Total record amount sum    Summary name + \u0026ldquo;_max\u0026rdquo; Gauges Max record amount    Summary name + \u0026ldquo;_histogram\u0026rdquo; Gauges Histogram of the amount     Not Adapt data convention.     Micrometer data type Data type     LongTaskTimer Histogram    ","title":"Metrics","url":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/application-toolkit-micrometer/"},{"content":"Metrics  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-micrometer-registry\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  Using org.apache.skywalking.apm.meter.micrometer.SkywalkingMeterRegistry as the registry, it could forward the MicroMeter collected metrics to OAP server.  import org.apache.skywalking.apm.meter.micrometer.SkywalkingMeterRegistry; SkywalkingMeterRegistry registry = new SkywalkingMeterRegistry(); // If you has some counter want to rate by agent side SkywalkingConfig config = new SkywalkingConfig(Arrays.asList(\u0026#34;test_rate_counter\u0026#34;)); new SkywalkingMeterRegistry(config); // Also you could using composite registry to combine multiple meter registry, such as collect to Skywalking and prometheus CompositeMeterRegistry compositeRegistry = new CompositeMeterRegistry(); compositeRegistry.add(new PrometheusMeterRegistry(PrometheusConfig.DEFAULT)); compositeRegistry.add(new SkywalkingMeterRegistry());   Using snake case as the naming convention. Such as test.meter will be send to test_meter.\n  Using Millisecond as the time unit.\n  Adapt micrometer data convention.\n     Micrometer data type Transform to meter name Skywalking data type Description     Counter Counter name Counter Same with counter   Gauges Gauges name Gauges Same with gauges   Timer Timer name + \u0026ldquo;_count\u0026rdquo; Counter Execute finished count    Timer name + \u0026ldquo;_sum\u0026rdquo; Counter Total execute finished duration    Timer name + \u0026ldquo;_max\u0026rdquo; Gauges Max duration of execute finished time    Timer name + \u0026ldquo;_histogram\u0026rdquo; Histogram Histogram of execute finished duration   LongTaskTimer Timer name + \u0026ldquo;_active_count\u0026rdquo; Gauges Executing task count    Timer name + \u0026ldquo;_duration_sum\u0026rdquo; Counter All of executing task sum duration    Timer name + \u0026ldquo;_max\u0026rdquo; Counter Current longest running task execute duration   Function Timer Timer name + \u0026ldquo;_count\u0026rdquo; Gauges Execute finished timer count    Timer name + \u0026ldquo;_sum\u0026rdquo; Gauges Execute finished timer total duration   Function Counter Counter name Counter Custom counter value   Distribution summary Summary name + \u0026ldquo;_count\u0026rdquo; Counter Total record count    Summary name + \u0026ldquo;_sum\u0026rdquo; Counter Total record amount sum    Summary name + \u0026ldquo;_max\u0026rdquo; Gauges Max record amount    Summary name + \u0026ldquo;_histogram\u0026rdquo; Gauges Histogram of the amount     Not Adapt data convention.     Micrometer data type Data type     LongTaskTimer Histogram    ","title":"Metrics","url":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/application-toolkit-micrometer/"},{"content":"Metrics  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-micrometer-registry\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  Using org.apache.skywalking.apm.meter.micrometer.SkywalkingMeterRegistry as the registry, it could forward the MicroMeter collected metrics to OAP server.  import org.apache.skywalking.apm.meter.micrometer.SkywalkingMeterRegistry; SkywalkingMeterRegistry registry = new SkywalkingMeterRegistry(); // If you has some counter want to rate by agent side SkywalkingConfig config = new SkywalkingConfig(Arrays.asList(\u0026#34;test_rate_counter\u0026#34;)); new SkywalkingMeterRegistry(config); // Also you could using composite registry to combine multiple meter registry, such as collect to Skywalking and prometheus CompositeMeterRegistry compositeRegistry = new CompositeMeterRegistry(); compositeRegistry.add(new PrometheusMeterRegistry(PrometheusConfig.DEFAULT)); compositeRegistry.add(new SkywalkingMeterRegistry());   Using snake case as the naming convention. Such as test.meter will be send to test_meter.\n  Using Millisecond as the time unit.\n  Adapt micrometer data convention.\n     Micrometer data type Transform to meter name Skywalking data type Description     Counter Counter name Counter Same with counter   Gauges Gauges name Gauges Same with gauges   Timer Timer name + \u0026ldquo;_count\u0026rdquo; Counter Execute finished count    Timer name + \u0026ldquo;_sum\u0026rdquo; Counter Total execute finished duration    Timer name + \u0026ldquo;_max\u0026rdquo; Gauges Max duration of execute finished time    Timer name + \u0026ldquo;_histogram\u0026rdquo; Histogram Histogram of execute finished duration   LongTaskTimer Timer name + \u0026ldquo;_active_count\u0026rdquo; Gauges Executing task count    Timer name + \u0026ldquo;_duration_sum\u0026rdquo; Counter All of executing task sum duration    Timer name + \u0026ldquo;_max\u0026rdquo; Counter Current longest running task execute duration   Function Timer Timer name + \u0026ldquo;_count\u0026rdquo; Gauges Execute finished timer count    Timer name + \u0026ldquo;_sum\u0026rdquo; Gauges Execute finished timer total duration   Function Counter Counter name Counter Custom counter value   Distribution summary Summary name + \u0026ldquo;_count\u0026rdquo; Counter Total record count    Summary name + \u0026ldquo;_sum\u0026rdquo; Counter Total record amount sum    Summary name + \u0026ldquo;_max\u0026rdquo; Gauges Max record amount    Summary name + \u0026ldquo;_histogram\u0026rdquo; Gauges Histogram of the amount     Not Adapt data convention.     Micrometer data type Data type     LongTaskTimer Histogram    ","title":"Metrics","url":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/application-toolkit-micrometer/"},{"content":"Metrics Exporter SkyWalking provides the essential functions of metrics aggregation, alarm, and analysis. In the real world, many may want to forward their data to a 3rd party system for an in-depth analysis or otherwise. Metrics Exporter has made that possible.\nMetrics exporter is an independent module that has to be manually activated.\nRight now, we provide the following exporters:\n gRPC exporter  gRPC exporter gRPC exporter uses SkyWalking\u0026rsquo;s native exporter service definition. Here is the proto definition.\nservice MetricExportService { rpc export (stream ExportMetricValue) returns (ExportResponse) { } rpc subscription (SubscriptionReq) returns (SubscriptionsResp) { }}message ExportMetricValue { string metricName = 1; string entityName = 2; string entityId = 3; ValueType type = 4; int64 timeBucket = 5; int64 longValue = 6; double doubleValue = 7; repeated int64 longValues = 8;}message SubscriptionsResp { repeated SubscriptionMetric metrics = 1;}message SubscriptionMetric { string metricName = 1; EventType eventType = 2;}enum ValueType { LONG = 0; DOUBLE = 1; MULTI_LONG = 2;}enum EventType { // The metrics aggregated in this bulk, not include the existing persistent data.  INCREMENT = 0; // Final result of the metrics at this moment.  TOTAL = 1;}message SubscriptionReq {}message ExportResponse {}To activate the exporter, you should add this into your application.yml\nexporter:grpc:targetHost:127.0.0.1targetPort:9870 targetHost:targetPort is the expected target service address. You could set any gRPC server to receive the data. Target gRPC service needs to go on standby; otherwise, the OAP startup may fail.  Target exporter service Subscription implementation Return the expected metrics name list with event type (incremental or total). All names must match the OAL/MAL script definition. Return empty list, if you want to export all metrics in the incremental event type.\nExport implementation Stream service. All subscribed metrics will be sent here based on the OAP core schedule. Also, if the OAP is deployed as cluster, this method will be called concurrently. For metrics value, you need to follow #type to choose #longValue or #doubleValue.\n","title":"Metrics Exporter","url":"/docs/main/v9.0.0/en/setup/backend/metrics-exporter/"},{"content":"Metrics Exporter SkyWalking provides the essential functions of metrics aggregation, alarm, and analysis. In many real-world scenarios, users may want to forward their data to a 3rd party system for further in-depth analysis. Metrics Exporter has made that possible.\nThe metrics exporter is an independent module that has to be manually activated.\nRight now, we provide the following exporters:\n gRPC exporter  gRPC exporter gRPC exporter uses SkyWalking\u0026rsquo;s native exporter service definition. Here is the proto definition.\nservice MetricExportService { rpc export (stream ExportMetricValue) returns (ExportResponse) { } rpc subscription (SubscriptionReq) returns (SubscriptionsResp) { }}message ExportMetricValue { string metricName = 1; string entityName = 2; string entityId = 3; ValueType type = 4; int64 timeBucket = 5; int64 longValue = 6; double doubleValue = 7; repeated int64 longValues = 8;}message SubscriptionsResp { repeated SubscriptionMetric metrics = 1;}message SubscriptionMetric { string metricName = 1; EventType eventType = 2;}enum ValueType { LONG = 0; DOUBLE = 1; MULTI_LONG = 2;}enum EventType { // The metrics aggregated in this bulk, not include the existing persistent data.  INCREMENT = 0; // Final result of the metrics at this moment.  TOTAL = 1;}message SubscriptionReq {}message ExportResponse {}To activate the exporter, you should add this into your application.yml\nexporter:grpc:targetHost:127.0.0.1targetPort:9870 targetHost:targetPort is the expected target service address. You could set any gRPC server to receive the data. Target gRPC service needs to go on standby; otherwise, the OAP startup may fail.  Target exporter service Subscription implementation Return the expected metrics name list with event type (incremental or total). All names must match the OAL/MAL script definition. Return empty list, if you want to export all metrics in the incremental event type.\nExport implementation Stream service. All subscribed metrics will be sent here based on the OAP core schedule. Also, if the OAP is deployed as a cluster, this method will be called concurrently. For metrics value, you need to follow #type to choose #longValue or #doubleValue.\n","title":"Metrics Exporter","url":"/docs/main/v9.1.0/en/setup/backend/metrics-exporter/"},{"content":"Metrics Exporter SkyWalking provides the essential functions of metrics aggregation, alarm, and analysis. In many real-world scenarios, users may want to forward their data to a 3rd party system for further in-depth analysis. Metrics Exporter has made that possible.\nThe metrics exporter is an independent module that has to be manually activated.\nRight now, we provide the following exporters:\n gRPC exporter  gRPC exporter gRPC exporter uses SkyWalking\u0026rsquo;s native exporter service definition. Here is the proto definition.\nservice MetricExportService { rpc export (stream ExportMetricValue) returns (ExportResponse) { } rpc subscription (SubscriptionReq) returns (SubscriptionsResp) { }}message ExportMetricValue { string metricName = 1; string entityName = 2; string entityId = 3; ValueType type = 4; int64 timeBucket = 5; int64 longValue = 6; double doubleValue = 7; repeated int64 longValues = 8;}message SubscriptionsResp { repeated SubscriptionMetric metrics = 1;}message SubscriptionMetric { string metricName = 1; EventType eventType = 2;}enum ValueType { LONG = 0; DOUBLE = 1; MULTI_LONG = 2;}enum EventType { // The metrics aggregated in this bulk, not include the existing persistent data.  INCREMENT = 0; // Final result of the metrics at this moment.  TOTAL = 1;}message SubscriptionReq {}message ExportResponse {}To activate the exporter, you should add this into your application.yml\nexporter:grpc:targetHost:127.0.0.1targetPort:9870 targetHost:targetPort is the expected target service address. You could set any gRPC server to receive the data. Target gRPC service needs to go on standby; otherwise, the OAP startup may fail.  Target exporter service Subscription implementation Return the expected metrics name list with event type (incremental or total). All names must match the OAL/MAL script definition. Return empty list, if you want to export all metrics in the incremental event type.\nExport implementation Stream service. All subscribed metrics will be sent here based on the OAP core schedule. Also, if the OAP is deployed as a cluster, this method will be called concurrently. For metrics value, you need to follow #type to choose #longValue or #doubleValue.\n","title":"Metrics Exporter","url":"/docs/main/v9.2.0/en/setup/backend/metrics-exporter/"},{"content":"Metrics Query Expression(MQE) Syntax MQE is a string that consists of one or more expressions. Each expression could be a combination of one or more operations. The expression allows users to do simple query-stage calculation through V3 APIs.\nExpression = \u0026lt;Operation\u0026gt; Expression1 \u0026lt;Operation\u0026gt; Expression2 \u0026lt;Operation\u0026gt; Expression3 ... The following document lists the operations supported by MQE.\nMetrics Expression Metrics Expression will return a collection of time-series values.\nCommon Value Metrics Expression:\n\u0026lt;metric_name\u0026gt; For example: If we want to query the service_sla metric, we can use the following expression:\nservice_sla Result Type The ExpressionResultType of the expression is TIME_SERIES_VALUES.\nLabeled Value Metrics For now, we only have a single anonymous label with multi label values in a labeled metric. To be able to use it in expressions, define _ as the anonymous label name (key).\nExpression:\n\u0026lt;metric_name\u0026gt;{_=\u0026#39;\u0026lt;label_value_1\u0026gt;,...\u0026#39;} {_='\u0026lt;label_value_1\u0026gt;,...'} is the selected label value of the metric. If is not specified, all label values of the metric will be selected.\nFor example: If we want to query the service_percentile metric with the label values 0,1,2,3,4, we can use the following expression:\nservice_percentile{_=\u0026#39;0,1,2,3,4\u0026#39;} If we want to rename the label values to P50,P75,P90,P95,P99, see Relabel Operation.\nResult Type The ExpressionResultType of the expression is TIME_SERIES_VALUES and with labels.\nBinary Operation The Binary Operation is an operation that takes two expressions and performs a calculation on their results. The following table lists the binary operations supported by MQE.\nExpression:\nExpression1 \u0026lt;Binary-Operator\u0026gt; Expression2    Operator Definition     + addition   - subtraction   * multiplication   / division   % modulo    For example: If we want to transform the service_sla metric value to percent, we can use the following expression:\nservice_sla / 100 Result Type For the result type of the expression, please refer to the following table.\nBinary Operation Rules The following table lists if the different result types of the input expressions could do this operation and the result type after the operation. The expression could be on the left or right side of the operator. Note: If the expressions on both sides of the operator are the TIME_SERIES_VALUES with labels, they should have the same labels for calculation.\n   Expression Expression Yes/No ExpressionResultType     SINGLE_VALUE SINGLE_VALUE Yes SINGLE_VALUE   SINGLE_VALUE TIME_SERIES_VALUES Yes TIME_SERIES_VALUES   SINGLE_VALUE SORTED_LIST/RECORD_LIST Yes SORTED_LIST/RECORD_LIST   TIME_SERIES_VALUES TIME_SERIES_VALUES Yes TIME_SERIES_VALUES   TIME_SERIES_VALUES SORTED_LIST/RECORD_LIST no    SORTED_LIST/RECORD_LIST SORTED_LIST/RECORD_LIST no     Compare Operation Compare Operation takes two expressions and compares their results. The following table lists the compare operations supported by MQE.\nExpression:\nExpression1 \u0026lt;Compare-Operator\u0026gt; Expression2    Operator Definition     \u0026gt; greater than   \u0026gt;= greater than or equal   \u0026lt; less than   \u0026lt;= less than or equal   == equal   != not equal    The result of the compare operation is an int value:\n 1: true 0: false  For example: Compare the service_resp_time metric value if greater than 3000, if the service_resp_time result is:\n{ \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 3500, \u0026#34;traceID\u0026#34;: null}] } ] } } } we can use the following expression:\nservice_resp_time \u0026gt; 3000 and get result:\n{ \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;0\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 1, \u0026#34;traceID\u0026#34;: null}] } ] } } } Compare Operation Rules and Result Type Same as the Binary Operation Rules.\nAggregation Operation Aggregation Operation takes an expression and performs aggregate calculations on its results.\nExpression:\n\u0026lt;Aggregation-Operator\u0026gt;(Expression)    Operator Definition ExpressionResultType     avg average the result SINGLE_VALUE   count count number of the result SINGLE_VALUE   latest select the latest non-null value from the result SINGLE_VALUE   sum sum the result SINGLE_VALUE   max select maximum from the result SINGLE_VALUE   min select minimum from the result SINGLE_VALUE    For example: If we want to query the average value of the service_cpm metric, we can use the following expression:\navg(service_cpm) Result Type The different operators could impact the ExpressionResultType, please refer to the above table.\nMathematical Operation Mathematical Operation takes an expression and performs mathematical calculations on its results.\nExpression:\n\u0026lt;Mathematical-Operator\u0026gt;(Expression, parameters)    Operator Definition parameters ExpressionResultType     abs returns the absolute value of the result  follow the input expression   ceil returns the smallest integer value that is greater or equal to the result  follow the input expression   floor returns the largest integer value that is greater or equal to the result  follow the input expression   round returns result round to specific decimal places places: a positive integer specific decimal places of the result follow the input expression    For example: If we want to query the average value of the service_cpm metric in seconds, and round the result to 2 decimal places, we can use the following expression:\nround(service_cpm / 60 , 2) Result Type The different operators could impact the ExpressionResultType, please refer to the above table.\nTopN Operation TopN Operation takes an expression and performs TopN calculation on its results.\nExpression:\ntop_n(\u0026lt;metric_name\u0026gt;, \u0026lt;top_number\u0026gt;, \u0026lt;order\u0026gt;) top_number is the number of the top results, should be a positive integer.\norder is the order of the top results. The value of order can be asc or des.\nFor example: If we want to query the top 10 services with the highest service_cpm metric value, we can use the following expression:\ntop_n(service_instance_cpm, 10, des) Result Type According to the type of the metric, the ExpressionResultType of the expression will be SORTED_LIST or RECORD_LIST.\nRelabel Operation Relabel Operation takes an expression and replaces the label values with new label values on its results.\nExpression:\nrelabel(Expression, _=\u0026#39;\u0026lt;new_label_value_1\u0026gt;,...\u0026#39;) _ is the new label of the metric after the label is relabeled, the order of the new label values should be the same as the order of the label values in the input expression result.\nFor example: If we want to query the service_percentile metric with the label values 0,1,2,3,4, and rename the label values to P50,P75,P90,P95,P99, we can use the following expression:\nrelabel(service_percentile{_=\u0026#39;0,1,2,3,4\u0026#39;}, _=\u0026#39;P50,P75,P90,P95,P99\u0026#39;) Result Type Follow the input expression.\nAggregateLabels Operation AggregateLabels Operation takes an expression and performs an aggregate calculation on its Labeled Value Metrics results. It aggregates a group of TIME_SERIES_VALUES into a single TIME_SERIES_VALUES.\nExpression:\naggregate_labels(Expression, parameter)    parameter Definition ExpressionResultType     avg calculate avg value of a Labeled Value Metrics TIME_SERIES_VALUES   sum calculate sum value of a Labeled Value Metrics TIME_SERIES_VALUES   max select the maximum value from a Labeled Value Metrics TIME_SERIES_VALUES   min select the minimum value from a Labeled Value Metrics TIME_SERIES_VALUES    For example: If we want to query all Redis command total rates, we can use the following expression(total_commands_rate is a metric which recorded every command rate in labeled value):\naggregate_labels(total_commands_rate, SUM) Result Type The ExpressionResultType of the aggregateLabels operation is TIME_SERIES_VALUES.\nLogical Operation ViewAsSequence Operation ViewAsSequence operation represents the first not-null metric from the listing metrics in the given prioritized sequence(left to right). It could also be considered as a short-circuit of given metrics for the first value existing metric.\nExpression:\nview_as_seq([\u0026lt;expression_1\u0026gt;, \u0026lt;expression_2\u0026gt;, ...]) For example: if the first expression value is empty but the second one is not empty, it would return the result from the second expression. The following example would return the content of the service_cpm metric.\nview_as_seq(not_existing, service_cpm) Result Type The result type is determined by the type of selected not-null metric expression.\nTrend Operation Trend Operation takes an expression and performs a trend calculation on its results.\nExpression:\n\u0026lt;Trend-Operator\u0026gt;(Metrics Expression, time_range) time_range is the positive int of the calculated range. The unit will automatically align with to the query Step, for example, if the query Step is MINUTE, the unit of time_range is minute.\n   Operator Definition ExpressionResultType     increase returns the increase in the time range in the time series TIME_SERIES_VALUES   rate returns the per-second average rate of increase in the time range in the time series TIME_SERIES_VALUES    For example: If we want to query the increase value of the service_cpm metric in 2 minute(assume the query Step is MINUTE), we can use the following expression:\nincrease(service_cpm, 2) If the query duration is 3 minutes, from (T1 to T3) and the metric has values in time series:\nV(T1-2), V(T1-1), V(T1), V(T2), V(T3) then the expression result is:\nV(T1)-V(T1-2), V(T2)-V(T1-1), V(T3)-V(T1) Note:\n If the calculated metric value is empty, the result will be empty. Assume in the T3 point, the increase value = V(T3)-V(T1), If the metric V(T3) or V(T1) is empty, the result value in T3 will be empty.  Result Type TIME_SERIES_VALUES.\nExpression Query Example Labeled Value Metrics service_percentile{_=\u0026#39;0,1\u0026#39;} The example result is:\n{ \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;0\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1000\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 2000, \u0026#34;traceID\u0026#34;: null}] }, { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2000\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 3000, \u0026#34;traceID\u0026#34;: null}] } ] } } } If we want to transform the percentile value unit from ms to s the expression is:\nservice_percentile{_=\u0026#39;0,1\u0026#39;} / 1000 { \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;0\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 2, \u0026#34;traceID\u0026#34;: null}] }, { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 3, \u0026#34;traceID\u0026#34;: null}] } ] } } } Get the average value of each percentile, the expression is:\navg(service_percentile{_=\u0026#39;0,1\u0026#39;}) { \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;SINGLE_VALUE\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;0\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: null, \u0026#34;value\u0026#34;: \u0026#34;1500\u0026#34;, \u0026#34;traceID\u0026#34;: null}] }, { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: null, \u0026#34;value\u0026#34;: \u0026#34;2500\u0026#34;, \u0026#34;traceID\u0026#34;: null}] } ] } } } Calculate the difference between the percentile and the average value, the expression is:\nservice_percentile{_=\u0026#39;0,1\u0026#39;} - avg(service_percentile{_=\u0026#39;0,1\u0026#39;}) { \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;0\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;-500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 500, \u0026#34;traceID\u0026#34;: null}] }, { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;-500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 500, \u0026#34;traceID\u0026#34;: null}] } ] } } } Calculate the difference between the service_resp_time and the service_percentile, if the service_resp_time result is:\n{ \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 3500, \u0026#34;traceID\u0026#34;: null}] } ] } } } The expression is:\nservice_resp_time - service_percentile{_=\u0026#39;0,1\u0026#39;} { \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;0\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1500\u0026#34;, \u0026#34;traceID\u0026#34;: null}] }, { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;500\u0026#34;, \u0026#34;traceID\u0026#34;: null}] } ] } } } ","title":"Metrics Query Expression(MQE) Syntax","url":"/docs/main/latest/en/api/metrics-query-expression/"},{"content":"Metrics Query Expression(MQE) Syntax MQE is a string that consists of one or more expressions. Each expression could be a combination of one or more operations. The expression allows users to do simple query-stage calculation through V3 APIs.\nExpression = \u0026lt;Operation\u0026gt; Expression1 \u0026lt;Operation\u0026gt; Expression2 \u0026lt;Operation\u0026gt; Expression3 ... The following document lists the operations supported by MQE.\nMetrics Expression Metrics Expression will return a collection of time-series values.\nCommon Value Metrics Expression:\n\u0026lt;metric_name\u0026gt; For example: If we want to query the service_sla metric, we can use the following expression:\nservice_sla Result Type The ExpressionResultType of the expression is TIME_SERIES_VALUES.\nLabeled Value Metrics Since v10.0.0, SkyWalking supports multiple labels metrics. We could query the specific labels of the metric by the following expression.\nExpression:\n\u0026lt;metric_name\u0026gt;{\u0026lt;label1_name\u0026gt;=\u0026#39;\u0026lt;label1_value_1\u0026gt;,...\u0026#39;, \u0026lt;label2_name\u0026gt;=\u0026#39;\u0026lt;label2_value_1\u0026gt;,...\u0026#39;,\u0026lt;label2...} {\u0026lt;label1_name\u0026gt;='\u0026lt;label_value_1\u0026gt;,...'} is the selected label name/value of the metric. If is not specified, all label values of the metric will be selected.\nFor example: The k8s_cluster_deployment_status metric has labels namespace, deployment and status. If we want to query all deployment metric value with namespace=skywalking-showcase and status=true, we can use the following expression:\nk8s_cluster_deployment_status{namespace=\u0026#39;skywalking-showcase\u0026#39;, status=\u0026#39;true\u0026#39;} We also could query the label with multiple values by separating the values with ,: If we want to query the service_percentile metric with the label name p and values 50,75,90,95,99, we can use the following expression:\nservice_percentile{p=\u0026#39;50,75,90,95,99\u0026#39;} If we want to rename the label values to P50,P75,P90,P95,P99, see Relabel Operation.\nResult Type The ExpressionResultType of the expression is TIME_SERIES_VALUES and with labels.\nBinary Operation The Binary Operation is an operation that takes two expressions and performs a calculation on their results. The following table lists the binary operations supported by MQE.\nExpression:\nExpression1 \u0026lt;Binary-Operator\u0026gt; Expression2    Operator Definition     + addition   - subtraction   * multiplication   / division   % modulo    For example: If we want to transform the service_sla metric value to percent, we can use the following expression:\nservice_sla / 100 Result Type For the result type of the expression, please refer to the following table.\nBinary Operation Rules The following table lists if the different result types of the input expressions could do this operation and the result type after the operation. The expression could be on the left or right side of the operator. Note: If the expressions result on both sides of the operator are with labels, they should have the same labels for calculation. If the labels match, will reserve left expression result labels and the calculated value. Otherwise, will return empty value.\n   Expression Expression Yes/No ExpressionResultType     SINGLE_VALUE SINGLE_VALUE Yes SINGLE_VALUE   SINGLE_VALUE TIME_SERIES_VALUES Yes TIME_SERIES_VALUES   SINGLE_VALUE SORTED_LIST/RECORD_LIST Yes SORTED_LIST/RECORD_LIST   TIME_SERIES_VALUES TIME_SERIES_VALUES Yes TIME_SERIES_VALUES   TIME_SERIES_VALUES SORTED_LIST/RECORD_LIST no    SORTED_LIST/RECORD_LIST SORTED_LIST/RECORD_LIST no     Compare Operation Compare Operation takes two expressions and compares their results. The following table lists the compare operations supported by MQE.\nExpression:\nExpression1 \u0026lt;Compare-Operator\u0026gt; Expression2    Operator Definition     \u0026gt; greater than   \u0026gt;= greater than or equal   \u0026lt; less than   \u0026lt;= less than or equal   == equal   != not equal    The result of the compare operation is an int value:\n 1: true 0: false  For example: Compare the service_resp_time metric value if greater than 3000, if the service_resp_time result is:\n{ \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 3500, \u0026#34;traceID\u0026#34;: null}] } ] } } } we can use the following expression:\nservice_resp_time \u0026gt; 3000 and get result:\n{ \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;0\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 1, \u0026#34;traceID\u0026#34;: null}] } ] } } } Compare Operation Rules and Result Type Same as the Binary Operation Rules.\nAggregation Operation Aggregation Operation takes an expression and performs aggregate calculations on its results.\nExpression:\n\u0026lt;Aggregation-Operator\u0026gt;(Expression)    Operator Definition ExpressionResultType     avg average the result SINGLE_VALUE   count count number of the result SINGLE_VALUE   latest select the latest non-null value from the result SINGLE_VALUE   sum sum the result SINGLE_VALUE   max select maximum from the result SINGLE_VALUE   min select minimum from the result SINGLE_VALUE    For example: If we want to query the average value of the service_cpm metric, we can use the following expression:\navg(service_cpm) Result Type The different operators could impact the ExpressionResultType, please refer to the above table.\nMathematical Operation Mathematical Operation takes an expression and performs mathematical calculations on its results.\nExpression:\n\u0026lt;Mathematical-Operator\u0026gt;(Expression, parameters)    Operator Definition parameters ExpressionResultType     abs returns the absolute value of the result  follow the input expression   ceil returns the smallest integer value that is greater or equal to the result  follow the input expression   floor returns the largest integer value that is greater or equal to the result  follow the input expression   round returns result round to specific decimal places places: a positive integer specific decimal places of the result follow the input expression    For example: If we want to query the average value of the service_cpm metric in seconds, and round the result to 2 decimal places, we can use the following expression:\nround(service_cpm / 60 , 2) Result Type The different operators could impact the ExpressionResultType, please refer to the above table.\nTopN Operation TopN Operation takes an expression and performs calculation to get the TopN of Services/Instances/Endpoints. The result depends on the entity condition in the query.\n Global TopN:  The entity is empty. The result is the topN Services/Instances/Endpoints in the whole traffics. Notice: If query the Endpoints metric, the global candidate set could be huge, please use it carefully.   Service\u0026rsquo;s Instances/Endpoints TopN:  The serviceName in the entity is not empty. The result is the topN Instances/Endpoints of the service.    Expression:\ntop_n(\u0026lt;metric_name\u0026gt;, \u0026lt;top_number\u0026gt;, \u0026lt;order\u0026gt;) top_number is the number of the top results, should be a positive integer.\norder is the order of the top results. The value of order can be asc or des.\nFor example: If we want to query the current service\u0026rsquo;s top 10 instances with the highest service_instance_cpm metric value, we can use the following expression under specific service:\ntop_n(service_instance_cpm, 10, des) Result Type According to the type of the metric, the ExpressionResultType of the expression will be SORTED_LIST or RECORD_LIST.\nRelabel Operation Relabel Operation takes an expression and replaces the label values with new label values on its results. Since v10.0.0, SkyWalking supports relabel multiple labels.\nExpression:\nrelabel(Expression, \u0026lt;target_label_name\u0026gt;=\u0026#39;\u0026lt;origin_label_value_1\u0026gt;,...\u0026#39;, \u0026lt;new_label_name\u0026gt;=\u0026#39;\u0026lt;new_label_value_1\u0026gt;,...\u0026#39;) The order of the new label values should be the same as the order of the label values in the input expression result.\nFor example: If we want to query the service_percentile metric with the label values 50,75,90,95,99, and rename the label name to percentile and the label values to P50,P75,P90,P95,P99, we can use the following expression:\nrelabel(service_percentile{p=\u0026#39;50,75,90,95,99\u0026#39;}, p=\u0026#39;50,75,90,95,99\u0026#39;, percentile=\u0026#39;P50,P75,P90,P95,P99\u0026#39;) Result Type Follow the input expression.\nAggregateLabels Operation AggregateLabels Operation takes an expression and performs an aggregate calculation on its Labeled Value Metrics results. It aggregates a group of TIME_SERIES_VALUES into a single TIME_SERIES_VALUES.\nExpression:\naggregate_labels(Expression, AggregateType\u0026lt;Optional\u0026gt;(\u0026lt;label1_name\u0026gt;,\u0026lt;label2_name\u0026gt;...))  AggregateType is the type of the aggregation operation. \u0026lt;label1_name\u0026gt;,\u0026lt;label2_name\u0026gt;... is the label names that need to be aggregated. If not specified, all labels will be aggregated.     AggregateType Definition ExpressionResultType     avg calculate avg value of a Labeled Value Metrics TIME_SERIES_VALUES   sum calculate sum value of a Labeled Value Metrics TIME_SERIES_VALUES   max select the maximum value from a Labeled Value Metrics TIME_SERIES_VALUES   min select the minimum value from a Labeled Value Metrics TIME_SERIES_VALUES    For example: If we want to query all Redis command total rates, we can use the following expression(total_commands_rate is a metric which recorded every command rate in labeled value): Aggregating all the labels:\naggregate_labels(total_commands_rate, sum) Also, we can aggregate by the cmd label:\naggregate_labels(total_commands_rate, sum(cmd)) Result Type The ExpressionResultType of the aggregateLabels operation is TIME_SERIES_VALUES.\nLogical Operation ViewAsSequence Operation ViewAsSequence operation represents the first not-null metric from the listing metrics in the given prioritized sequence(left to right). It could also be considered as a short-circuit of given metrics for the first value existing metric.\nExpression:\nview_as_seq([\u0026lt;expression_1\u0026gt;, \u0026lt;expression_2\u0026gt;, ...]) For example: if the first expression value is empty but the second one is not empty, it would return the result from the second expression. The following example would return the content of the service_cpm metric.\nview_as_seq(not_existing, service_cpm) Result Type The result type is determined by the type of selected not-null metric expression.\nIsPresent Operation IsPresent operation represents that in a list of metrics, if any expression has a value, it would return 1 in the result; otherwise, it would return 0.\nExpression:\nis_present([\u0026lt;expression_1\u0026gt;, \u0026lt;expression_2\u0026gt;, ...]) For example: When the meter does not exist or the metrics has no value, it would return 0. However, if the metrics list contains meter with values, it would return 1.\nis_present(not_existing, existing_without_value, existing_with_value) Result Type The result type is SINGLE_VALUE, and the result(1 or 0) in the first value.\nTrend Operation Trend Operation takes an expression and performs a trend calculation on its results.\nExpression:\n\u0026lt;Trend-Operator\u0026gt;(Metrics Expression, time_range) time_range is the positive int of the calculated range. The unit will automatically align with to the query Step, for example, if the query Step is MINUTE, the unit of time_range is minute.\n   Operator Definition ExpressionResultType     increase returns the increase in the time range in the time series TIME_SERIES_VALUES   rate returns the per-second average rate of increase in the time range in the time series TIME_SERIES_VALUES    For example: If we want to query the increase value of the service_cpm metric in 2 minute(assume the query Step is MINUTE), we can use the following expression:\nincrease(service_cpm, 2) If the query duration is 3 minutes, from (T1 to T3) and the metric has values in time series:\nV(T1-2), V(T1-1), V(T1), V(T2), V(T3) then the expression result is:\nV(T1)-V(T1-2), V(T2)-V(T1-1), V(T3)-V(T1) Note:\n If the calculated metric value is empty, the result will be empty. Assume in the T3 point, the increase value = V(T3)-V(T1), If the metric V(T3) or V(T1) is empty, the result value in T3 will be empty.  Result Type TIME_SERIES_VALUES.\nExpression Query Example Labeled Value Metrics service_percentile{p=\u0026#39;50,95\u0026#39;} The example result is:\n{ \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;p\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;50\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1000\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 2000, \u0026#34;traceID\u0026#34;: null}] }, { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;p\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;75\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2000\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 3000, \u0026#34;traceID\u0026#34;: null}] } ] } } } If we want to transform the percentile value unit from ms to s the expression is:\nservice_percentile{p=\u0026#39;50,75\u0026#39;} / 1000 { \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;p\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;50\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 2, \u0026#34;traceID\u0026#34;: null}] }, { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;p\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;75\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 3, \u0026#34;traceID\u0026#34;: null}] } ] } } } Get the average value of each percentile, the expression is:\navg(service_percentile{p=\u0026#39;50,75\u0026#39;}) { \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;SINGLE_VALUE\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;p\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;50\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: null, \u0026#34;value\u0026#34;: \u0026#34;1500\u0026#34;, \u0026#34;traceID\u0026#34;: null}] }, { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;p\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;75\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: null, \u0026#34;value\u0026#34;: \u0026#34;2500\u0026#34;, \u0026#34;traceID\u0026#34;: null}] } ] } } } Calculate the difference between the percentile and the average value, the expression is:\nservice_percentile{p=\u0026#39;50,75\u0026#39;} - avg(service_percentile{p=\u0026#39;50,75\u0026#39;}) { \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;p\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;50\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;-500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 500, \u0026#34;traceID\u0026#34;: null}] }, { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;p\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;75\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;-500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 500, \u0026#34;traceID\u0026#34;: null}] } ] } } } Calculate the difference between the service_resp_time and the service_percentile, if the service_resp_time result is:\n{ \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 3500, \u0026#34;traceID\u0026#34;: null}] } ] } } } The expression is:\nservice_resp_time - service_percentile{p=\u0026#39;50,75\u0026#39;} { \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;p\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;50\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1500\u0026#34;, \u0026#34;traceID\u0026#34;: null}] }, { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;p\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;75\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;500\u0026#34;, \u0026#34;traceID\u0026#34;: null}] } ] } } } ","title":"Metrics Query Expression(MQE) Syntax","url":"/docs/main/next/en/api/metrics-query-expression/"},{"content":"Metrics Query Expression(MQE) Syntax MQE is a string that consists of one or more expressions. Each expression could be a combination of one or more operations. The expression allows users to do simple query-stage calculation through V3 APIs.\nExpression = \u0026lt;Operation\u0026gt; Expression1 \u0026lt;Operation\u0026gt; Expression2 \u0026lt;Operation\u0026gt; Expression3 ... The following document lists the operations supported by MQE.\nMetrics Expression Metrics Expression will return a collection of time-series values.\nCommon Value Metrics Expression:\n\u0026lt;metric_name\u0026gt; For example: If we want to query the service_sla metric, we can use the following expression:\nservice_sla Result Type The ExpressionResultType of the expression is TIME_SERIES_VALUES.\nLabeled Value Metrics Expression:\n\u0026lt;metric_name\u0026gt;{label=\u0026#39;\u0026lt;label_1\u0026gt;,...\u0026#39;} label is the selected label of the metric. If label is not specified, all label values of the metric will be selected.\nFor example: If we want to query the service_percentile metric with the labels 0,1,2,3,4, we can use the following expression:\nservice_percentile{label=\u0026#39;0,1,2,3,4\u0026#39;} If we want to rename the labels to P50,P75,P90,P95,P99, see Relabel Operation.\nResult Type The ExpressionResultType of the expression is TIME_SERIES_VALUES and with labels.\nBinary Operation Binary Operation is an operation that takes two expressions and performs a calculation on their results. The following table lists the binary operations supported by MQE.\nExpression:\nExpression1 \u0026lt;Binary-Operator\u0026gt; Expression2    Operator Definition     + addition   - subtraction   * multiplication   / division   % modulo    For example: If we want to transform the service_sla metric value to percent, we can use the following expression:\nservice_sla / 100 Result Type The result type of the expression please refer to the following table.\nBinary Operation Rules The following table listed if the difference result types of the input expressions could do this operation and the result type after the operation. The expression could on the left or right side of the operator. Note: If the expressions on both sides of the operator are the TIME_SERIES_VALUES with labels, they should have the same labels for calculation.\n   Expression Expression Yes/No ExpressionResultType     SINGLE_VALUE SINGLE_VALUE Yes SINGLE_VALUE   SINGLE_VALUE TIME_SERIES_VALUES Yes TIME_SERIES_VALUES   SINGLE_VALUE SORTED_LIST/RECORD_LIST Yes SORTED_LIST/RECORD_LIST   TIME_SERIES_VALUES TIME_SERIES_VALUES Yes TIME_SERIES_VALUES   TIME_SERIES_VALUES SORTED_LIST/RECORD_LIST no    SORTED_LIST/RECORD_LIST SORTED_LIST/RECORD_LIST no     Aggregation Operation Aggregation Operation takes an expression and performs aggregate calculation on its results.\nExpression:\n\u0026lt;Aggregation-Operator\u0026gt;(Expression)    Operator Definition ExpressionResultType     avg average the result SINGLE_VALUE   count count number of the result SINGLE_VALUE   latest select the latest non-null value from the result SINGLE_VALUE   sum sum the result SINGLE_VALUE   max select maximum from the result SINGLE_VALUE   min select minimum from the result SINGLE_VALUE    For example: If we want to query the average value of the service_cpm metric, we can use the following expression:\navg(service_cpm) Result Type The different operator could impact the ExpressionResultType, please refer to the above table.\nFunction Operation Function Operation takes an expression and performs function calculation on its results.\nExpression:\n\u0026lt;Function-Operator\u0026gt;(Expression, parameters)    Operator Definition parameters ExpressionResultType     abs returns the absolute value of the result  follow the input expression   ceil returns the smallest integer value that is greater or equal to the result  follow the input expression   floor returns the largest integer value that is greater or equal to the result  follow the input expression   round returns result round to specific decimal places places: a positive integer specific decimal places of the result follow the input expression    For example: If we want to query the average value of the service_cpm metric in seconds, and round the result to 2 decimal places, we can use the following expression:\nround(service_cpm / 60 , 2) Result Type The different operator could impact the ExpressionResultType, please refer to the above table.\nTopN Operation TopN Operation takes an expression and performs TopN calculation on its results.\nExpression:\ntop_n(\u0026lt;metric_name\u0026gt;, \u0026lt;top_number\u0026gt;, \u0026lt;order\u0026gt;) top_number is the number of the top results, should be a positive integer.\norder is the order of the top results. The value of order can be asc or des.\nFor example: If we want to query the top 10 services with the highest service_cpm metric value, we can use the following expression:\ntop_n(service_instance_cpm, 10, des) Result Type According to the type of the metric, the ExpressionResultType of the expression will be SORTED_LIST or RECORD_LIST.\nRelabel Operation Relabel Operation takes an expression and replace the labels to new labels on its results.\nExpression:\nrelabel(Expression, label=\u0026#39;\u0026lt;new_label_1\u0026gt;,...\u0026#39;) label is the new labels of the metric after the label is relabeled, the order of the new labels should be the same as the order of the labels in the input expression result.\nFor example: If we want to query the service_percentile metric with the labels 0,1,2,3,4, and rename the labels to P50,P75,P90,P95,P99, we can use the following expression:\nrelabel(service_percentile{label=\u0026#39;0,1,2,3,4\u0026#39;}, label=\u0026#39;P50,P75,P90,P95,P99\u0026#39;) Result Type Follow the input expression.\n","title":"Metrics Query Expression(MQE) Syntax","url":"/docs/main/v9.5.0/en/api/metrics-query-expression/"},{"content":"Metrics Query Expression(MQE) Syntax MQE is a string that consists of one or more expressions. Each expression could be a combination of one or more operations. The expression allows users to do simple query-stage calculation through V3 APIs.\nExpression = \u0026lt;Operation\u0026gt; Expression1 \u0026lt;Operation\u0026gt; Expression2 \u0026lt;Operation\u0026gt; Expression3 ... The following document lists the operations supported by MQE.\nMetrics Expression Metrics Expression will return a collection of time-series values.\nCommon Value Metrics Expression:\n\u0026lt;metric_name\u0026gt; For example: If we want to query the service_sla metric, we can use the following expression:\nservice_sla Result Type The ExpressionResultType of the expression is TIME_SERIES_VALUES.\nLabeled Value Metrics For now, we only have a single anonymous label with multi label values in a labeled metric. To be able to use it in expressions, define _ as the anonymous label name (key).\nExpression:\n\u0026lt;metric_name\u0026gt;{_=\u0026#39;\u0026lt;label_value_1\u0026gt;,...\u0026#39;} {_='\u0026lt;label_value_1\u0026gt;,...'} is the selected label value of the metric. If is not specified, all label values of the metric will be selected.\nFor example: If we want to query the service_percentile metric with the label values 0,1,2,3,4, we can use the following expression:\nservice_percentile{_=\u0026#39;0,1,2,3,4\u0026#39;} If we want to rename the label values to P50,P75,P90,P95,P99, see Relabel Operation.\nResult Type The ExpressionResultType of the expression is TIME_SERIES_VALUES and with labels.\nBinary Operation The Binary Operation is an operation that takes two expressions and performs a calculation on their results. The following table lists the binary operations supported by MQE.\nExpression:\nExpression1 \u0026lt;Binary-Operator\u0026gt; Expression2    Operator Definition     + addition   - subtraction   * multiplication   / division   % modulo    For example: If we want to transform the service_sla metric value to percent, we can use the following expression:\nservice_sla / 100 Result Type For the result type of the expression, please refer to the following table.\nBinary Operation Rules The following table lists if the different result types of the input expressions could do this operation and the result type after the operation. The expression could be on the left or right side of the operator. Note: If the expressions on both sides of the operator are the TIME_SERIES_VALUES with labels, they should have the same labels for calculation.\n   Expression Expression Yes/No ExpressionResultType     SINGLE_VALUE SINGLE_VALUE Yes SINGLE_VALUE   SINGLE_VALUE TIME_SERIES_VALUES Yes TIME_SERIES_VALUES   SINGLE_VALUE SORTED_LIST/RECORD_LIST Yes SORTED_LIST/RECORD_LIST   TIME_SERIES_VALUES TIME_SERIES_VALUES Yes TIME_SERIES_VALUES   TIME_SERIES_VALUES SORTED_LIST/RECORD_LIST no    SORTED_LIST/RECORD_LIST SORTED_LIST/RECORD_LIST no     Compare Operation Compare Operation takes two expressions and compares their results. The following table lists the compare operations supported by MQE.\nExpression:\nExpression1 \u0026lt;Compare-Operator\u0026gt; Expression2    Operator Definition     \u0026gt; greater than   \u0026gt;= greater than or equal   \u0026lt; less than   \u0026lt;= less than or equal   == equal   != not equal    The result of the compare operation is an int value:\n 1: true 0: false  For example: Compare the service_resp_time metric value if greater than 3000, if the service_resp_time result is:\n{ \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 3500, \u0026#34;traceID\u0026#34;: null}] } ] } } } we can use the following expression:\nservice_resp_time \u0026gt; 3000 and get result:\n{ \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;0\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 1, \u0026#34;traceID\u0026#34;: null}] } ] } } } Compare Operation Rules and Result Type Same as the Binary Operation Rules.\nAggregation Operation Aggregation Operation takes an expression and performs aggregate calculations on its results.\nExpression:\n\u0026lt;Aggregation-Operator\u0026gt;(Expression)    Operator Definition ExpressionResultType     avg average the result SINGLE_VALUE   count count number of the result SINGLE_VALUE   latest select the latest non-null value from the result SINGLE_VALUE   sum sum the result SINGLE_VALUE   max select maximum from the result SINGLE_VALUE   min select minimum from the result SINGLE_VALUE    For example: If we want to query the average value of the service_cpm metric, we can use the following expression:\navg(service_cpm) Result Type The different operators could impact the ExpressionResultType, please refer to the above table.\nMathematical Operation Mathematical Operation takes an expression and performs mathematical calculations on its results.\nExpression:\n\u0026lt;Mathematical-Operator\u0026gt;(Expression, parameters)    Operator Definition parameters ExpressionResultType     abs returns the absolute value of the result  follow the input expression   ceil returns the smallest integer value that is greater or equal to the result  follow the input expression   floor returns the largest integer value that is greater or equal to the result  follow the input expression   round returns result round to specific decimal places places: a positive integer specific decimal places of the result follow the input expression    For example: If we want to query the average value of the service_cpm metric in seconds, and round the result to 2 decimal places, we can use the following expression:\nround(service_cpm / 60 , 2) Result Type The different operators could impact the ExpressionResultType, please refer to the above table.\nTopN Operation TopN Operation takes an expression and performs TopN calculation on its results.\nExpression:\ntop_n(\u0026lt;metric_name\u0026gt;, \u0026lt;top_number\u0026gt;, \u0026lt;order\u0026gt;) top_number is the number of the top results, should be a positive integer.\norder is the order of the top results. The value of order can be asc or des.\nFor example: If we want to query the top 10 services with the highest service_cpm metric value, we can use the following expression:\ntop_n(service_instance_cpm, 10, des) Result Type According to the type of the metric, the ExpressionResultType of the expression will be SORTED_LIST or RECORD_LIST.\nRelabel Operation Relabel Operation takes an expression and replaces the label values with new label values on its results.\nExpression:\nrelabel(Expression, _=\u0026#39;\u0026lt;new_label_value_1\u0026gt;,...\u0026#39;) _ is the new label of the metric after the label is relabeled, the order of the new label values should be the same as the order of the label values in the input expression result.\nFor example: If we want to query the service_percentile metric with the label values 0,1,2,3,4, and rename the label values to P50,P75,P90,P95,P99, we can use the following expression:\nrelabel(service_percentile{_=\u0026#39;0,1,2,3,4\u0026#39;}, _=\u0026#39;P50,P75,P90,P95,P99\u0026#39;) Result Type Follow the input expression.\nAggregateLabels Operation AggregateLabels Operation takes an expression and performs an aggregate calculation on its Labeled Value Metrics results. It aggregates a group of TIME_SERIES_VALUES into a single TIME_SERIES_VALUES.\nExpression:\naggregate_labels(Expression, parameter)    parameter Definition ExpressionResultType     avg calculate avg value of a Labeled Value Metrics TIME_SERIES_VALUES   sum calculate sum value of a Labeled Value Metrics TIME_SERIES_VALUES   max select the maximum value from a Labeled Value Metrics TIME_SERIES_VALUES   min select the minimum value from a Labeled Value Metrics TIME_SERIES_VALUES    For example: If we want to query all Redis command total rates, we can use the following expression(total_commands_rate is a metric which recorded every command rate in labeled value):\naggregate_labels(total_commands_rate, SUM) Result Type The ExpressionResultType of the aggregateLabels operation is TIME_SERIES_VALUES.\nLogical Operation ViewAsSequence Operation ViewAsSequence operation represents the first not-null metric from the listing metrics in the given prioritized sequence(left to right). It could also be considered as a short-circuit of given metrics for the first value existing metric.\nExpression:\nview_as_seq([\u0026lt;expression_1\u0026gt;, \u0026lt;expression_2\u0026gt;, ...]) For example: if the first expression value is empty but the second one is not empty, it would return the result from the second expression. The following example would return the content of the service_cpm metric.\nview_as_seq(not_existing, service_cpm) Result Type The result type is determined by the type of selected not-null metric expression.\nExpression Query Example Labeled Value Metrics service_percentile{_=\u0026#39;0,1\u0026#39;} The example result is:\n{ \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;0\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1000\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 2000, \u0026#34;traceID\u0026#34;: null}] }, { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2000\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 3000, \u0026#34;traceID\u0026#34;: null}] } ] } } } If we want to transform the percentile value unit from ms to s the expression is:\nservice_percentile{_=\u0026#39;0,1\u0026#39;} / 1000 { \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;0\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 2, \u0026#34;traceID\u0026#34;: null}] }, { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 3, \u0026#34;traceID\u0026#34;: null}] } ] } } } Get the average value of each percentile, the expression is:\navg(service_percentile{_=\u0026#39;0,1\u0026#39;}) { \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;SINGLE_VALUE\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;0\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: null, \u0026#34;value\u0026#34;: \u0026#34;1500\u0026#34;, \u0026#34;traceID\u0026#34;: null}] }, { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: null, \u0026#34;value\u0026#34;: \u0026#34;2500\u0026#34;, \u0026#34;traceID\u0026#34;: null}] } ] } } } Calculate the difference between the percentile and the average value, the expression is:\nservice_percentile{_=\u0026#39;0,1\u0026#39;} - avg(service_percentile{_=\u0026#39;0,1\u0026#39;}) { \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;0\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;-500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 500, \u0026#34;traceID\u0026#34;: null}] }, { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;-500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 500, \u0026#34;traceID\u0026#34;: null}] } ] } } } Calculate the difference between the service_resp_time and the service_percentile, if the service_resp_time result is:\n{ \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 3500, \u0026#34;traceID\u0026#34;: null}] } ] } } } The expression is:\nservice_resp_time - service_percentile{_=\u0026#39;0,1\u0026#39;} { \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;0\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1500\u0026#34;, \u0026#34;traceID\u0026#34;: null}] }, { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;500\u0026#34;, \u0026#34;traceID\u0026#34;: null}] } ] } } } ","title":"Metrics Query Expression(MQE) Syntax","url":"/docs/main/v9.6.0/en/api/metrics-query-expression/"},{"content":"Metrics Query Expression(MQE) Syntax MQE is a string that consists of one or more expressions. Each expression could be a combination of one or more operations. The expression allows users to do simple query-stage calculation through V3 APIs.\nExpression = \u0026lt;Operation\u0026gt; Expression1 \u0026lt;Operation\u0026gt; Expression2 \u0026lt;Operation\u0026gt; Expression3 ... The following document lists the operations supported by MQE.\nMetrics Expression Metrics Expression will return a collection of time-series values.\nCommon Value Metrics Expression:\n\u0026lt;metric_name\u0026gt; For example: If we want to query the service_sla metric, we can use the following expression:\nservice_sla Result Type The ExpressionResultType of the expression is TIME_SERIES_VALUES.\nLabeled Value Metrics For now, we only have a single anonymous label with multi label values in a labeled metric. To be able to use it in expressions, define _ as the anonymous label name (key).\nExpression:\n\u0026lt;metric_name\u0026gt;{_=\u0026#39;\u0026lt;label_value_1\u0026gt;,...\u0026#39;} {_='\u0026lt;label_value_1\u0026gt;,...'} is the selected label value of the metric. If is not specified, all label values of the metric will be selected.\nFor example: If we want to query the service_percentile metric with the label values 0,1,2,3,4, we can use the following expression:\nservice_percentile{_=\u0026#39;0,1,2,3,4\u0026#39;} If we want to rename the label values to P50,P75,P90,P95,P99, see Relabel Operation.\nResult Type The ExpressionResultType of the expression is TIME_SERIES_VALUES and with labels.\nBinary Operation The Binary Operation is an operation that takes two expressions and performs a calculation on their results. The following table lists the binary operations supported by MQE.\nExpression:\nExpression1 \u0026lt;Binary-Operator\u0026gt; Expression2    Operator Definition     + addition   - subtraction   * multiplication   / division   % modulo    For example: If we want to transform the service_sla metric value to percent, we can use the following expression:\nservice_sla / 100 Result Type For the result type of the expression, please refer to the following table.\nBinary Operation Rules The following table lists if the different result types of the input expressions could do this operation and the result type after the operation. The expression could be on the left or right side of the operator. Note: If the expressions on both sides of the operator are the TIME_SERIES_VALUES with labels, they should have the same labels for calculation.\n   Expression Expression Yes/No ExpressionResultType     SINGLE_VALUE SINGLE_VALUE Yes SINGLE_VALUE   SINGLE_VALUE TIME_SERIES_VALUES Yes TIME_SERIES_VALUES   SINGLE_VALUE SORTED_LIST/RECORD_LIST Yes SORTED_LIST/RECORD_LIST   TIME_SERIES_VALUES TIME_SERIES_VALUES Yes TIME_SERIES_VALUES   TIME_SERIES_VALUES SORTED_LIST/RECORD_LIST no    SORTED_LIST/RECORD_LIST SORTED_LIST/RECORD_LIST no     Compare Operation Compare Operation takes two expressions and compares their results. The following table lists the compare operations supported by MQE.\nExpression:\nExpression1 \u0026lt;Compare-Operator\u0026gt; Expression2    Operator Definition     \u0026gt; greater than   \u0026gt;= greater than or equal   \u0026lt; less than   \u0026lt;= less than or equal   == equal   != not equal    The result of the compare operation is an int value:\n 1: true 0: false  For example: Compare the service_resp_time metric value if greater than 3000, if the service_resp_time result is:\n{ \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 3500, \u0026#34;traceID\u0026#34;: null}] } ] } } } we can use the following expression:\nservice_resp_time \u0026gt; 3000 and get result:\n{ \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;0\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 1, \u0026#34;traceID\u0026#34;: null}] } ] } } } Compare Operation Rules and Result Type Same as the Binary Operation Rules.\nAggregation Operation Aggregation Operation takes an expression and performs aggregate calculations on its results.\nExpression:\n\u0026lt;Aggregation-Operator\u0026gt;(Expression)    Operator Definition ExpressionResultType     avg average the result SINGLE_VALUE   count count number of the result SINGLE_VALUE   latest select the latest non-null value from the result SINGLE_VALUE   sum sum the result SINGLE_VALUE   max select maximum from the result SINGLE_VALUE   min select minimum from the result SINGLE_VALUE    For example: If we want to query the average value of the service_cpm metric, we can use the following expression:\navg(service_cpm) Result Type The different operators could impact the ExpressionResultType, please refer to the above table.\nMathematical Operation Mathematical Operation takes an expression and performs mathematical calculations on its results.\nExpression:\n\u0026lt;Mathematical-Operator\u0026gt;(Expression, parameters)    Operator Definition parameters ExpressionResultType     abs returns the absolute value of the result  follow the input expression   ceil returns the smallest integer value that is greater or equal to the result  follow the input expression   floor returns the largest integer value that is greater or equal to the result  follow the input expression   round returns result round to specific decimal places places: a positive integer specific decimal places of the result follow the input expression    For example: If we want to query the average value of the service_cpm metric in seconds, and round the result to 2 decimal places, we can use the following expression:\nround(service_cpm / 60 , 2) Result Type The different operators could impact the ExpressionResultType, please refer to the above table.\nTopN Operation TopN Operation takes an expression and performs TopN calculation on its results.\nExpression:\ntop_n(\u0026lt;metric_name\u0026gt;, \u0026lt;top_number\u0026gt;, \u0026lt;order\u0026gt;) top_number is the number of the top results, should be a positive integer.\norder is the order of the top results. The value of order can be asc or des.\nFor example: If we want to query the top 10 services with the highest service_cpm metric value, we can use the following expression:\ntop_n(service_instance_cpm, 10, des) Result Type According to the type of the metric, the ExpressionResultType of the expression will be SORTED_LIST or RECORD_LIST.\nRelabel Operation Relabel Operation takes an expression and replaces the label values with new label values on its results.\nExpression:\nrelabel(Expression, _=\u0026#39;\u0026lt;new_label_value_1\u0026gt;,...\u0026#39;) _ is the new label of the metric after the label is relabeled, the order of the new label values should be the same as the order of the label values in the input expression result.\nFor example: If we want to query the service_percentile metric with the label values 0,1,2,3,4, and rename the label values to P50,P75,P90,P95,P99, we can use the following expression:\nrelabel(service_percentile{_=\u0026#39;0,1,2,3,4\u0026#39;}, _=\u0026#39;P50,P75,P90,P95,P99\u0026#39;) Result Type Follow the input expression.\nAggregateLabels Operation AggregateLabels Operation takes an expression and performs an aggregate calculation on its Labeled Value Metrics results. It aggregates a group of TIME_SERIES_VALUES into a single TIME_SERIES_VALUES.\nExpression:\naggregate_labels(Expression, parameter)    parameter Definition ExpressionResultType     avg calculate avg value of a Labeled Value Metrics TIME_SERIES_VALUES   sum calculate sum value of a Labeled Value Metrics TIME_SERIES_VALUES   max select the maximum value from a Labeled Value Metrics TIME_SERIES_VALUES   min select the minimum value from a Labeled Value Metrics TIME_SERIES_VALUES    For example: If we want to query all Redis command total rates, we can use the following expression(total_commands_rate is a metric which recorded every command rate in labeled value):\naggregate_labels(total_commands_rate, SUM) Result Type The ExpressionResultType of the aggregateLabels operation is TIME_SERIES_VALUES.\nLogical Operation ViewAsSequence Operation ViewAsSequence operation represents the first not-null metric from the listing metrics in the given prioritized sequence(left to right). It could also be considered as a short-circuit of given metrics for the first value existing metric.\nExpression:\nview_as_seq([\u0026lt;expression_1\u0026gt;, \u0026lt;expression_2\u0026gt;, ...]) For example: if the first expression value is empty but the second one is not empty, it would return the result from the second expression. The following example would return the content of the service_cpm metric.\nview_as_seq(not_existing, service_cpm) Result Type The result type is determined by the type of selected not-null metric expression.\nTrend Operation Trend Operation takes an expression and performs a trend calculation on its results.\nExpression:\n\u0026lt;Trend-Operator\u0026gt;(Metrics Expression, time_range) time_range is the positive int of the calculated range. The unit will automatically align with to the query Step, for example, if the query Step is MINUTE, the unit of time_range is minute.\n   Operator Definition ExpressionResultType     increase returns the increase in the time range in the time series TIME_SERIES_VALUES   rate returns the per-second average rate of increase in the time range in the time series TIME_SERIES_VALUES    For example: If we want to query the increase value of the service_cpm metric in 2 minute(assume the query Step is MINUTE), we can use the following expression:\nincrease(service_cpm, 2) If the query duration is 3 minutes, from (T1 to T3) and the metric has values in time series:\nV(T1-2), V(T1-1), V(T1), V(T2), V(T3) then the expression result is:\nV(T1)-V(T1-2), V(T2)-V(T1-1), V(T3)-V(T1) Note:\n If the calculated metric value is empty, the result will be empty. Assume in the T3 point, the increase value = V(T3)-V(T1), If the metric V(T3) or V(T1) is empty, the result value in T3 will be empty.  Result Type TIME_SERIES_VALUES.\nExpression Query Example Labeled Value Metrics service_percentile{_=\u0026#39;0,1\u0026#39;} The example result is:\n{ \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;0\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1000\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 2000, \u0026#34;traceID\u0026#34;: null}] }, { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2000\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 3000, \u0026#34;traceID\u0026#34;: null}] } ] } } } If we want to transform the percentile value unit from ms to s the expression is:\nservice_percentile{_=\u0026#39;0,1\u0026#39;} / 1000 { \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;0\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 2, \u0026#34;traceID\u0026#34;: null}] }, { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 3, \u0026#34;traceID\u0026#34;: null}] } ] } } } Get the average value of each percentile, the expression is:\navg(service_percentile{_=\u0026#39;0,1\u0026#39;}) { \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;SINGLE_VALUE\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;0\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: null, \u0026#34;value\u0026#34;: \u0026#34;1500\u0026#34;, \u0026#34;traceID\u0026#34;: null}] }, { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: null, \u0026#34;value\u0026#34;: \u0026#34;2500\u0026#34;, \u0026#34;traceID\u0026#34;: null}] } ] } } } Calculate the difference between the percentile and the average value, the expression is:\nservice_percentile{_=\u0026#39;0,1\u0026#39;} - avg(service_percentile{_=\u0026#39;0,1\u0026#39;}) { \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;0\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;-500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 500, \u0026#34;traceID\u0026#34;: null}] }, { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;-500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 500, \u0026#34;traceID\u0026#34;: null}] } ] } } } Calculate the difference between the service_resp_time and the service_percentile, if the service_resp_time result is:\n{ \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 3500, \u0026#34;traceID\u0026#34;: null}] } ] } } } The expression is:\nservice_resp_time - service_percentile{_=\u0026#39;0,1\u0026#39;} { \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;0\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1500\u0026#34;, \u0026#34;traceID\u0026#34;: null}] }, { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;500\u0026#34;, \u0026#34;traceID\u0026#34;: null}] } ] } } } ","title":"Metrics Query Expression(MQE) Syntax","url":"/docs/main/v9.7.0/en/api/metrics-query-expression/"},{"content":"MicroMeter Observations setup Micrometer Observation is part of the Micrometer project and contains the Observation API. SkyWalking integrates its MicroMeter 1.10 APIs so that it can send metrics to the Skywalking Meter System.\nFollow Java agent Observations docs to set up agent in the Spring first.\nSet up backend receiver  Make sure to enable meter receiver in application.yml.  receiver-meter:selector:${SW_RECEIVER_METER:default}default: Configure the meter config file. It already has the spring sleuth meter config. If you have a customized meter at the agent side, please configure the meter using the steps set out in the meter document.\n  Enable Spring sleuth config in application.yml.\n  agent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:meterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:spring-micrometer}Dashboard configuration SkyWalking provides the Spring Sleuth dashboard by default under the general service instance, which contains the metrics provided by Spring Sleuth by default. Once you have added customized metrics in the application and configuration the meter config file in the backend. Please following the customized dashboard documentation to add the metrics in the dashboard.\nSupported meter Three types of information are supported: Application, System, and JVM.\n Application: HTTP request count and duration, JDBC max/idle/active connection count, and Tomcat session active/reject count. System: CPU system/process usage, OS system load, and OS process file count. JVM: GC pause count and duration, memory max/used/committed size, thread peak/live/daemon count, and classes loaded/unloaded count.  ","title":"MicroMeter Observations setup","url":"/docs/main/latest/en/setup/backend/micrometer-observations/"},{"content":"MicroMeter Observations setup Micrometer Observation is part of the Micrometer project and contains the Observation API. SkyWalking integrates its MicroMeter 1.10 APIs so that it can send metrics to the SkyWalking Meter System.\nFollow Java agent Observations docs to set up agent in the Spring first.\nSet up backend receiver  Make sure to enable meter receiver in application.yml.  receiver-meter:selector:${SW_RECEIVER_METER:default}default: Configure the meter config file. It already has the spring sleuth meter config. If you have a customized meter at the agent side, please configure the meter using the steps set out in the meter document.\n  Enable Spring sleuth config in application.yml.\n  agent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:meterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:spring-micrometer}Dashboard configuration SkyWalking provides the Spring Sleuth dashboard by default under the general service instance, which contains the metrics provided by Spring Sleuth by default. Once you have added customized metrics in the application and configuration the meter config file in the backend. Please following the customized dashboard documentation to add the metrics in the dashboard.\nSupported meter Three types of information are supported: Application, System, and JVM.\n Application: HTTP request count and duration, JDBC max/idle/active connection count, and Tomcat session active/reject count. System: CPU system/process usage, OS system load, and OS process file count. JVM: GC pause count and duration, memory max/used/committed size, thread peak/live/daemon count, and classes loaded/unloaded count.  ","title":"MicroMeter Observations setup","url":"/docs/main/next/en/setup/backend/micrometer-observations/"},{"content":"MicroMeter Observations setup Micrometer Observation is part of the Micrometer project and contains the Observation API. SkyWalking integrates its MicroMeter 1.10 APIs so that it can send metrics to the Skywalking Meter System.\nFollow Java agent Observations docs to set up agent in the Spring first.\nSet up backend receiver  Make sure to enable meter receiver in application.yml.  receiver-meter:selector:${SW_RECEIVER_METER:default}default: Configure the meter config file. It already has the spring sleuth meter config. If you have a customized meter at the agent side, please configure the meter using the steps set out in the meter document.\n  Enable Spring sleuth config in application.yml.\n  agent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:meterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:spring-micrometer}Dashboard configuration SkyWalking provides the Spring Sleuth dashboard by default under the general service instance, which contains the metrics provided by Spring Sleuth by default. Once you have added customized metrics in the application and configuration the meter config file in the backend. Please following the customized dashboard documentation to add the metrics in the dashboard.\nSupported meter Three types of information are supported: Application, System, and JVM.\n Application: HTTP request count and duration, JDBC max/idle/active connection count, and Tomcat session active/reject count. System: CPU system/process usage, OS system load, and OS process file count. JVM: GC pause count and duration, memory max/used/committed size, thread peak/live/daemon count, and classes loaded/unloaded count.  ","title":"MicroMeter Observations setup","url":"/docs/main/v9.4.0/en/setup/backend/micrometer-observations/"},{"content":"MicroMeter Observations setup Micrometer Observation is part of the Micrometer project and contains the Observation API. SkyWalking integrates its MicroMeter 1.10 APIs so that it can send metrics to the Skywalking Meter System.\nFollow Java agent Observations docs to set up agent in the Spring first.\nSet up backend receiver  Make sure to enable meter receiver in application.yml.  receiver-meter:selector:${SW_RECEIVER_METER:default}default: Configure the meter config file. It already has the spring sleuth meter config. If you have a customized meter at the agent side, please configure the meter using the steps set out in the meter document.\n  Enable Spring sleuth config in application.yml.\n  agent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:meterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:spring-micrometer}Dashboard configuration SkyWalking provides the Spring Sleuth dashboard by default under the general service instance, which contains the metrics provided by Spring Sleuth by default. Once you have added customized metrics in the application and configuration the meter config file in the backend. Please following the customized dashboard documentation to add the metrics in the dashboard.\nSupported meter Three types of information are supported: Application, System, and JVM.\n Application: HTTP request count and duration, JDBC max/idle/active connection count, and Tomcat session active/reject count. System: CPU system/process usage, OS system load, and OS process file count. JVM: GC pause count and duration, memory max/used/committed size, thread peak/live/daemon count, and classes loaded/unloaded count.  ","title":"MicroMeter Observations setup","url":"/docs/main/v9.5.0/en/setup/backend/micrometer-observations/"},{"content":"MicroMeter Observations setup Micrometer Observation is part of the Micrometer project and contains the Observation API. SkyWalking integrates its MicroMeter 1.10 APIs so that it can send metrics to the Skywalking Meter System.\nFollow Java agent Observations docs to set up agent in the Spring first.\nSet up backend receiver  Make sure to enable meter receiver in application.yml.  receiver-meter:selector:${SW_RECEIVER_METER:default}default: Configure the meter config file. It already has the spring sleuth meter config. If you have a customized meter at the agent side, please configure the meter using the steps set out in the meter document.\n  Enable Spring sleuth config in application.yml.\n  agent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:meterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:spring-micrometer}Dashboard configuration SkyWalking provides the Spring Sleuth dashboard by default under the general service instance, which contains the metrics provided by Spring Sleuth by default. Once you have added customized metrics in the application and configuration the meter config file in the backend. Please following the customized dashboard documentation to add the metrics in the dashboard.\nSupported meter Three types of information are supported: Application, System, and JVM.\n Application: HTTP request count and duration, JDBC max/idle/active connection count, and Tomcat session active/reject count. System: CPU system/process usage, OS system load, and OS process file count. JVM: GC pause count and duration, memory max/used/committed size, thread peak/live/daemon count, and classes loaded/unloaded count.  ","title":"MicroMeter Observations setup","url":"/docs/main/v9.6.0/en/setup/backend/micrometer-observations/"},{"content":"MicroMeter Observations setup Micrometer Observation is part of the Micrometer project and contains the Observation API. SkyWalking integrates its MicroMeter 1.10 APIs so that it can send metrics to the Skywalking Meter System.\nFollow Java agent Observations docs to set up agent in the Spring first.\nSet up backend receiver  Make sure to enable meter receiver in application.yml.  receiver-meter:selector:${SW_RECEIVER_METER:default}default: Configure the meter config file. It already has the spring sleuth meter config. If you have a customized meter at the agent side, please configure the meter using the steps set out in the meter document.\n  Enable Spring sleuth config in application.yml.\n  agent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:meterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:spring-micrometer}Dashboard configuration SkyWalking provides the Spring Sleuth dashboard by default under the general service instance, which contains the metrics provided by Spring Sleuth by default. Once you have added customized metrics in the application and configuration the meter config file in the backend. Please following the customized dashboard documentation to add the metrics in the dashboard.\nSupported meter Three types of information are supported: Application, System, and JVM.\n Application: HTTP request count and duration, JDBC max/idle/active connection count, and Tomcat session active/reject count. System: CPU system/process usage, OS system load, and OS process file count. JVM: GC pause count and duration, memory max/used/committed size, thread peak/live/daemon count, and classes loaded/unloaded count.  ","title":"MicroMeter Observations setup","url":"/docs/main/v9.7.0/en/setup/backend/micrometer-observations/"},{"content":"Mock data generator for testing In 9.1.0, SkyWalking adds a module to generate mock data for testing. You can use this module to generate mock data that will be sent to the storage.\nTo start the data generator, execute the script tools/data-generator/bin/start.sh.\nNote that SkyWalking doesn\u0026rsquo;t release a Docker image for this module, but you can still build it yourselves by running the commands:\n# build a Docker image for local use make docker.data-generator # or push to your registry export HUB=\u0026lt;your-registry\u0026gt; make push.docker.data-generator Currently the module can generate two kinds of SkyWalking data, segments and logs. For each type, there are some generators that can be used to fill the fields.\nGenerate mock data To generate mock data, POST a request to URL path /mock-data/segments/tasks (segments) or /mock-data/logs/tasks (logs) with a generator template:\ncurl -XPOST \u0026#39;http://localhost:12800/mock-data/segments/tasks?size=20\u0026#39; -H\u0026#39;Content-Type: application/json\u0026#39; -d \u0026#34;@segment-template.json\u0026#34; curl -XPOST \u0026#39;http://localhost:12800/mock-data/logs/tasks?size=20\u0026#39; -H\u0026#39;Content-Type: application/json\u0026#39; -d \u0026#34;@logs-template.json\u0026#34; There are two possible types of task to generate mock data, size and qps:\n size (/mock-data/segments/tasks?size=20): the task will generate total number of size segments/logs and then finish. qps (/mock-data/segments/tasks?qps=20): the task will generate qps segments/logs per second continuously, until the task is cancelled.  Refer to the segment template, the log template and the Generators for more details about how to compose a template.\nCancel a task When the task is acknowledged by the server it will return a task id that can be used to cancelled the task by sending a DELETE request to URL path /mock-data/logs/tasks with a parameter requestId (i.e. /mock-data/logs/tasks?requestId={request id returned in previous request}):\ncurl -XDELETE \u0026#39;http://localhost:12800/mock-data/segments/task?requestId=70d8a39e-b51e-49de-a6fc-43abf80482c1\u0026#39; curl -XDELETE \u0026#39;http://localhost:12800/mock-data/logs/task?requestId=70d8a39e-b51e-49de-a6fc-43abf80482c1\u0026#39; Cancel all tasks When needed, you can also send a DELETE request to path /mock-data/segments/tasks to cancel all segment tasks.\ncurl -XDELETE \u0026#39;http://localhost:12800/mock-data/segments/tasks curl -XDELETE \u0026#39;http://localhost:12800/mock-data/logs/tasks Generators uuid uuid generator leverages java.util.UUID to generate a string. You can use uuid generator to fill the traceId field of segments.\nchangingFrequency property can be used when you want to reuse a uuid for multiple times, for example, if you want a traceId to be reused by 5 segments, then setting changingFrequency to 5 would do the trick. By setting changingFrequency to 5, uuid generates 1 string, and uses it for 5 times, then re-generates a new uuid string and uses it for another 5 times.\n\u0026#34;traceId\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;uuid\u0026#34;, \u0026#34;changingFrequency\u0026#34;: \u0026#34;5\u0026#34; } randomString (String) length (int) length specifies the length of the random string to be generated, i.e. generatedString.length() == length is always true.\nprefix (String) prefix is always added to the random strings after they are generated, that means:\n generatedString.startsWith(prefix) is always true, and, generatedString.length() == length + prefix.length() is always true.  letters (boolean) Specifies whether the random string contains letters (i.e. a-zA-Z).\nnumbers (boolean) Specifies whether the random string contains numbers (i.e. 0-9).\ndomainSize (int) When generating random strings, you might just want some random strings and use them over and over again randomly, by setting domainSize, the generator generates domainSize random strings, and pick them randomly every time you need a string.\nrandomBool (boolean) This generator generates a Boolean value, true or false with a default possibility of 50%, while you can change the possibility below.\npossibility (double, [0, 1]) possibility is a double value \u0026gt;= 0 and \u0026lt;= 1, it\u0026rsquo;s 0.5 by default, meaning about half of the generated values are true.\nTo always return a fixed boolean value true, you can just set the possibility to 1, to always return a fixed boolean value false, you can set the possibility to 0\n\u0026#34;error\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomBool\u0026#34;, \u0026#34;possibility\u0026#34;: \u0026#34;0.9\u0026#34; }  90 percent of the generated values are true.\n randomInt (long) min (long) The minimum value of the random integers, meaning all generated values satisfy generatedInt \u0026gt;= min.\nmax (long) The maximum value of the random integers, meaning all generated values satisfy generatedInt \u0026lt; min.\ndomainSize (int) This is similar to randomString\u0026rsquo;s domainSize.\nrandomList (list / array) size (int) The list size of the generated list, i.e. generatedList.size() == size.\nitem (object) item is a template that will be use as a prototype to generate the list items, for example when generating a list of Tag, the item should be the prototype of Tag, which can be composed by the generators again.\n\u0026#34;tags\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomList\u0026#34;, \u0026#34;size\u0026#34;: 5, \u0026#34;item\u0026#34;: { \u0026#34;key\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomString\u0026#34;, \u0026#34;length\u0026#34;: \u0026#34;10\u0026#34;, \u0026#34;prefix\u0026#34;: \u0026#34;test_tag_\u0026#34;, \u0026#34;letters\u0026#34;: true, \u0026#34;numbers\u0026#34;: true, \u0026#34;domainSize\u0026#34;: 10 }, \u0026#34;value\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomString\u0026#34;, \u0026#34;length\u0026#34;: \u0026#34;10\u0026#34;, \u0026#34;prefix\u0026#34;: \u0026#34;test_value_\u0026#34;, \u0026#34;letters\u0026#34;: true, \u0026#34;numbers\u0026#34;: true } } } fixedString (string) This generator always returns a fixed value of string.\nsequence (long) sequence generator generates a sequence of monotonically increasing integers, with a configurable fluctuation.\nmin (long) The minimum value of the sequence.\nmax (long) The maximum value of the sequence.\nstep (long) The increasing step of this sequence, i.e. the next generated value == the previous value + step.\ndomainSize (int) This is similar to randomString\u0026rsquo;s domainSize.\nfluctuation (int) By default, sequence is strictly increasing numbers, but in some cases you might want the numbers to fluctuate slightly while they are increasing. Adding property fluctuation to the generator will add a random number \u0026gt;= -fluctuation, \u0026lt;= fluctuation to the sequence elements.\nFor example, min = 10, max = 15, step = 1 generates a sequence [10, 11, 12, 13, 14, 15], but adding fluctuation = 2 might generate a sequence [10, 12, 11, 14, 13, 15].\n","title":"Mock data generator for testing","url":"/docs/main/latest/en/setup/backend/backend-data-generator/"},{"content":"Mock data generator for testing In 9.1.0, SkyWalking adds a module to generate mock data for testing. You can use this module to generate mock data that will be sent to the storage.\nTo start the data generator, execute the script tools/data-generator/bin/start.sh.\nNote that SkyWalking doesn\u0026rsquo;t release a Docker image for this module, but you can still build it yourselves by running the commands:\n# build a Docker image for local use make docker.data-generator # or push to your registry export HUB=\u0026lt;your-registry\u0026gt; make push.docker.data-generator Currently the module can generate two kinds of SkyWalking data, segments and logs. For each type, there are some generators that can be used to fill the fields.\nGenerate mock data To generate mock data, POST a request to URL path /mock-data/segments/tasks (segments) or /mock-data/logs/tasks (logs) with a generator template:\ncurl -XPOST \u0026#39;http://localhost:12800/mock-data/segments/tasks?size=20\u0026#39; -H\u0026#39;Content-Type: application/json\u0026#39; -d \u0026#34;@segment-template.json\u0026#34; curl -XPOST \u0026#39;http://localhost:12800/mock-data/logs/tasks?size=20\u0026#39; -H\u0026#39;Content-Type: application/json\u0026#39; -d \u0026#34;@logs-template.json\u0026#34; There are two possible types of task to generate mock data, size and qps:\n size (/mock-data/segments/tasks?size=20): the task will generate total number of size segments/logs and then finish. qps (/mock-data/segments/tasks?qps=20): the task will generate qps segments/logs per second continuously, until the task is cancelled.  Refer to the segment template, the log template and the Generators for more details about how to compose a template.\nCancel a task When the task is acknowledged by the server it will return a task id that can be used to cancelled the task by sending a DELETE request to URL path /mock-data/logs/tasks with a parameter requestId (i.e. /mock-data/logs/tasks?requestId={request id returned in previous request}):\ncurl -XDELETE \u0026#39;http://localhost:12800/mock-data/segments/task?requestId=70d8a39e-b51e-49de-a6fc-43abf80482c1\u0026#39; curl -XDELETE \u0026#39;http://localhost:12800/mock-data/logs/task?requestId=70d8a39e-b51e-49de-a6fc-43abf80482c1\u0026#39; Cancel all tasks When needed, you can also send a DELETE request to path /mock-data/segments/tasks to cancel all segment tasks.\ncurl -XDELETE \u0026#39;http://localhost:12800/mock-data/segments/tasks curl -XDELETE \u0026#39;http://localhost:12800/mock-data/logs/tasks Generators uuid uuid generator leverages java.util.UUID to generate a string. You can use uuid generator to fill the traceId field of segments.\nchangingFrequency property can be used when you want to reuse a uuid for multiple times, for example, if you want a traceId to be reused by 5 segments, then setting changingFrequency to 5 would do the trick. By setting changingFrequency to 5, uuid generates 1 string, and uses it for 5 times, then re-generates a new uuid string and uses it for another 5 times.\n\u0026#34;traceId\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;uuid\u0026#34;, \u0026#34;changingFrequency\u0026#34;: \u0026#34;5\u0026#34; } randomString (String) length (int) length specifies the length of the random string to be generated, i.e. generatedString.length() == length is always true.\nprefix (String) prefix is always added to the random strings after they are generated, that means:\n generatedString.startsWith(prefix) is always true, and, generatedString.length() == length + prefix.length() is always true.  letters (boolean) Specifies whether the random string contains letters (i.e. a-zA-Z).\nnumbers (boolean) Specifies whether the random string contains numbers (i.e. 0-9).\ndomainSize (int) When generating random strings, you might just want some random strings and use them over and over again randomly, by setting domainSize, the generator generates domainSize random strings, and pick them randomly every time you need a string.\nrandomBool (boolean) This generator generates a Boolean value, true or false with a default possibility of 50%, while you can change the possibility below.\npossibility (double, [0, 1]) possibility is a double value \u0026gt;= 0 and \u0026lt;= 1, it\u0026rsquo;s 0.5 by default, meaning about half of the generated values are true.\nTo always return a fixed boolean value true, you can just set the possibility to 1, to always return a fixed boolean value false, you can set the possibility to 0\n\u0026#34;error\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomBool\u0026#34;, \u0026#34;possibility\u0026#34;: \u0026#34;0.9\u0026#34; }  90 percent of the generated values are true.\n randomInt (long) min (long) The minimum value of the random integers, meaning all generated values satisfy generatedInt \u0026gt;= min.\nmax (long) The maximum value of the random integers, meaning all generated values satisfy generatedInt \u0026lt; min.\ndomainSize (int) This is similar to randomString\u0026rsquo;s domainSize.\nrandomList (list / array) size (int) The list size of the generated list, i.e. generatedList.size() == size.\nitem (object) item is a template that will be use as a prototype to generate the list items, for example when generating a list of Tag, the item should be the prototype of Tag, which can be composed by the generators again.\n\u0026#34;tags\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomList\u0026#34;, \u0026#34;size\u0026#34;: 5, \u0026#34;item\u0026#34;: { \u0026#34;key\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomString\u0026#34;, \u0026#34;length\u0026#34;: \u0026#34;10\u0026#34;, \u0026#34;prefix\u0026#34;: \u0026#34;test_tag_\u0026#34;, \u0026#34;letters\u0026#34;: true, \u0026#34;numbers\u0026#34;: true, \u0026#34;domainSize\u0026#34;: 10 }, \u0026#34;value\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomString\u0026#34;, \u0026#34;length\u0026#34;: \u0026#34;10\u0026#34;, \u0026#34;prefix\u0026#34;: \u0026#34;test_value_\u0026#34;, \u0026#34;letters\u0026#34;: true, \u0026#34;numbers\u0026#34;: true } } } fixedString (string) This generator always returns a fixed value of string.\nsequence (long) sequence generator generates a sequence of monotonically increasing integers, with a configurable fluctuation.\nmin (long) The minimum value of the sequence.\nmax (long) The maximum value of the sequence.\nstep (long) The increasing step of this sequence, i.e. the next generated value == the previous value + step.\ndomainSize (int) This is similar to randomString\u0026rsquo;s domainSize.\nfluctuation (int) By default, sequence is strictly increasing numbers, but in some cases you might want the numbers to fluctuate slightly while they are increasing. Adding property fluctuation to the generator will add a random number \u0026gt;= -fluctuation, \u0026lt;= fluctuation to the sequence elements.\nFor example, min = 10, max = 15, step = 1 generates a sequence [10, 11, 12, 13, 14, 15], but adding fluctuation = 2 might generate a sequence [10, 12, 11, 14, 13, 15].\n","title":"Mock data generator for testing","url":"/docs/main/next/en/setup/backend/backend-data-generator/"},{"content":"Mock data generator for testing In 9.1.0, SkyWalking adds a module to generate mock data for testing. You can use this module to generate mock data that will be sent to the storage.\nTo start the data generator, execute the script tools/data-generator/bin/start.sh.\nNote that SkyWalking doesn\u0026rsquo;t release a Docker image for this module, but you can still build it yourselves by running the commands:\n# build a Docker image for local use make docker.data-generator # or push to your registry export HUB=\u0026lt;your-registry\u0026gt; make push.docker.data-generator Currently the module can generate two kinds of SkyWalking data, segments and logs. For each type, there are some generators that can be used to fill the fields.\nGenerate mock data To generate mock data, POST a request to URL path /mock-data/segments/tasks (segments) or /mock-data/logs/tasks (logs) with a generator template:\ncurl -XPOST \u0026#39;http://localhost:12800/mock-data/segments/tasks?size=20\u0026#39; -H\u0026#39;Content-Type: application/json\u0026#39; -d \u0026#34;@segment-template.json\u0026#34; curl -XPOST \u0026#39;http://localhost:12800/mock-data/logs/tasks?size=20\u0026#39; -H\u0026#39;Content-Type: application/json\u0026#39; -d \u0026#34;@logs-template.json\u0026#34; There are two possible types of task to generate mock data, size and qps:\n size (/mock-data/segments/tasks?size=20): the task will generate total number of size segments/logs and then finish. qps (/mock-data/segments/tasks?qps=20): the task will generate qps segments/logs per second continuously, until the task is cancelled.  Refer to the segment template, the log template and the Generators for more details about how to compose a template.\nCancel a task When the task is acknowledged by the server it will return a task id that can be used to cancelled the task by sending a DELETE request to URL path /mock-data/logs/tasks with a parameter requestId (i.e. /mock-data/logs/tasks?requestId={request id returned in previous request}):\ncurl -XDELETE \u0026#39;http://localhost:12800/mock-data/segments/task?requestId=70d8a39e-b51e-49de-a6fc-43abf80482c1\u0026#39; curl -XDELETE \u0026#39;http://localhost:12800/mock-data/logs/task?requestId=70d8a39e-b51e-49de-a6fc-43abf80482c1\u0026#39; Cancel all tasks When needed, you can also send a DELETE request to path /mock-data/segments/tasks to cancel all segment tasks.\ncurl -XDELETE \u0026#39;http://localhost:12800/mock-data/segments/tasks curl -XDELETE \u0026#39;http://localhost:12800/mock-data/logs/tasks Generators uuid uuid generator leverages java.util.UUID to generate a string. You can use uuid generator to fill the traceId field of segments.\nchangingFrequency property can be used when you want to reuse a uuid for multiple times, for example, if you want a traceId to be reused by 5 segments, then setting changingFrequency to 5 would do the trick. By setting changingFrequency to 5, uuid generates 1 string, and uses it for 5 times, then re-generates a new uuid string and uses it for another 5 times.\n\u0026#34;traceId\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;uuid\u0026#34;, \u0026#34;changingFrequency\u0026#34;: \u0026#34;5\u0026#34; } randomString (String) length (int) length specifies the length of the random string to be generated, i.e. generatedString.length() == length is always true.\nprefix (String) prefix is always added to the random strings after they are generated, that means:\n generatedString.startsWith(prefix) is always true, and, generatedString.length() == length + prefix.length() is always true.  letters (boolean) Specifies whether the random string contains letters (i.e. a-zA-Z).\nnumbers (boolean) Specifies whether the random string contains numbers (i.e. 0-9).\ndomainSize (int) When generating random strings, you might just want some random strings and use them over and over again randomly, by setting domainSize, the generator generates domainSize random strings, and pick them randomly every time you need a string.\nrandomBool (boolean) This generator generates a Boolean value, true or false with a default possibility of 50%, while you can change the possibility below.\npossibility (double, [0, 1]) possibility is a double value \u0026gt;= 0 and \u0026lt;= 1, it\u0026rsquo;s 0.5 by default, meaning about half of the generated values are true.\nTo always return a fixed boolean value true, you can just set the possibility to 1, to always return a fixed boolean value false, you can set the possibility to 0\n\u0026#34;error\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomBool\u0026#34;, \u0026#34;possibility\u0026#34;: \u0026#34;0.9\u0026#34; }  90 percent of the generated values are true.\n randomInt (long) min (long) The minimum value of the random integers, meaning all generated values satisfy generatedInt \u0026gt;= min.\nmax (long) The maximum value of the random integers, meaning all generated values satisfy generatedInt \u0026lt; min.\ndomainSize (int) This is similar to randomString\u0026rsquo;s domainSize.\nrandomList (list / array) size (int) The list size of the generated list, i.e. generatedList.size() == size.\nitem (object) item is a template that will be use as a prototype to generate the list items, for example when generating a list of Tag, the item should be the prototype of Tag, which can be composed by the generators again.\n\u0026#34;tags\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomList\u0026#34;, \u0026#34;size\u0026#34;: 5, \u0026#34;item\u0026#34;: { \u0026#34;key\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomString\u0026#34;, \u0026#34;length\u0026#34;: \u0026#34;10\u0026#34;, \u0026#34;prefix\u0026#34;: \u0026#34;test_tag_\u0026#34;, \u0026#34;letters\u0026#34;: true, \u0026#34;numbers\u0026#34;: true, \u0026#34;domainSize\u0026#34;: 10 }, \u0026#34;value\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomString\u0026#34;, \u0026#34;length\u0026#34;: \u0026#34;10\u0026#34;, \u0026#34;prefix\u0026#34;: \u0026#34;test_value_\u0026#34;, \u0026#34;letters\u0026#34;: true, \u0026#34;numbers\u0026#34;: true } } } fixedString (string) This generator always returns a fixed value of string.\nsequence (long) sequence generator generates a sequence of monotonically increasing integers, with a configurable fluctuation.\nmin (long) The minimum value of the sequence.\nmax (long) The maximum value of the sequence.\nstep (long) The increasing step of this sequence, i.e. the next generated value == the previous value + step.\ndomainSize (int) This is similar to randomString\u0026rsquo;s domainSize.\nfluctuation (int) By default, sequence is strictly increasing numbers, but in some cases you might want the numbers to fluctuate slightly while they are increasing. Adding property fluctuation to the generator will add a random number \u0026gt;= -fluctuation, \u0026lt;= fluctuation to the sequence elements.\nFor example, min = 10, max = 15, step = 1 generates a sequence [10, 11, 12, 13, 14, 15], but adding fluctuation = 2 might generate a sequence [10, 12, 11, 14, 13, 15].\n","title":"Mock data generator for testing","url":"/docs/main/v9.1.0/en/setup/backend/backend-data-generator/"},{"content":"Mock data generator for testing In 9.1.0, SkyWalking adds a module to generate mock data for testing. You can use this module to generate mock data that will be sent to the storage.\nTo start the data generator, execute the script tools/data-generator/bin/start.sh.\nNote that SkyWalking doesn\u0026rsquo;t release a Docker image for this module, but you can still build it yourselves by running the commands:\n# build a Docker image for local use make docker.data-generator # or push to your registry export HUB=\u0026lt;your-registry\u0026gt; make push.docker.data-generator Currently the module can generate two kinds of SkyWalking data, segments and logs. For each type, there are some generators that can be used to fill the fields.\nGenerate mock data To generate mock data, POST a request to URL path /mock-data/segments/tasks (segments) or /mock-data/logs/tasks (logs) with a generator template:\ncurl -XPOST \u0026#39;http://localhost:12800/mock-data/segments/tasks?size=20\u0026#39; -H\u0026#39;Content-Type: application/json\u0026#39; -d \u0026#34;@segment-template.json\u0026#34; curl -XPOST \u0026#39;http://localhost:12800/mock-data/logs/tasks?size=20\u0026#39; -H\u0026#39;Content-Type: application/json\u0026#39; -d \u0026#34;@logs-template.json\u0026#34; There are two possible types of task to generate mock data, size and qps:\n size (/mock-data/segments/tasks?size=20): the task will generate total number of size segments/logs and then finish. qps (/mock-data/segments/tasks?qps=20): the task will generate qps segments/logs per second continuously, until the task is cancelled.  Refer to the segment template, the log template and the Generators for more details about how to compose a template.\nCancel a task When the task is acknowledged by the server it will return a task id that can be used to cancelled the task by sending a DELETE request to URL path /mock-data/logs/tasks with a parameter requestId (i.e. /mock-data/logs/tasks?requestId={request id returned in previous request}):\ncurl -XDELETE \u0026#39;http://localhost:12800/mock-data/segments/task?requestId=70d8a39e-b51e-49de-a6fc-43abf80482c1\u0026#39; curl -XDELETE \u0026#39;http://localhost:12800/mock-data/logs/task?requestId=70d8a39e-b51e-49de-a6fc-43abf80482c1\u0026#39; Cancel all tasks When needed, you can also send a DELETE request to path /mock-data/segments/tasks to cancel all segment tasks.\ncurl -XDELETE \u0026#39;http://localhost:12800/mock-data/segments/tasks curl -XDELETE \u0026#39;http://localhost:12800/mock-data/logs/tasks Generators uuid uuid generator leverages java.util.UUID to generate a string. You can use uuid generator to fill the traceId field of segments.\nchangingFrequency property can be used when you want to reuse a uuid for multiple times, for example, if you want a traceId to be reused by 5 segments, then setting changingFrequency to 5 would do the trick. By setting changingFrequency to 5, uuid generates 1 string, and uses it for 5 times, then re-generates a new uuid string and uses it for another 5 times.\n\u0026#34;traceId\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;uuid\u0026#34;, \u0026#34;changingFrequency\u0026#34;: \u0026#34;5\u0026#34; } randomString (String) length (int) length specifies the length of the random string to be generated, i.e. generatedString.length() == length is always true.\nprefix (String) prefix is always added to the random strings after they are generated, that means:\n generatedString.startsWith(prefix) is always true, and, generatedString.length() == length + prefix.length() is always true.  letters (boolean) Specifies whether the random string contains letters (i.e. a-zA-Z).\nnumbers (boolean) Specifies whether the random string contains numbers (i.e. 0-9).\ndomainSize (int) When generating random strings, you might just want some random strings and use them over and over again randomly, by setting domainSize, the generator generates domainSize random strings, and pick them randomly every time you need a string.\nrandomBool (boolean) This generator generates a Boolean value, true or false with a default possibility of 50%, while you can change the possibility below.\npossibility (double, [0, 1]) possibility is a double value \u0026gt;= 0 and \u0026lt;= 1, it\u0026rsquo;s 0.5 by default, meaning about half of the generated values are true.\nTo always return a fixed boolean value true, you can just set the possibility to 1, to always return a fixed boolean value false, you can set the possibility to 0\n\u0026#34;error\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomBool\u0026#34;, \u0026#34;possibility\u0026#34;: \u0026#34;0.9\u0026#34; }  90 percent of the generated values are true.\n randomInt (long) min (long) The minimum value of the random integers, meaning all generated values satisfy generatedInt \u0026gt;= min.\nmax (long) The maximum value of the random integers, meaning all generated values satisfy generatedInt \u0026lt; min.\ndomainSize (int) This is similar to randomString\u0026rsquo;s domainSize.\nrandomList (list / array) size (int) The list size of the generated list, i.e. generatedList.size() == size.\nitem (object) item is a template that will be use as a prototype to generate the list items, for example when generating a list of Tag, the item should be the prototype of Tag, which can be composed by the generators again.\n\u0026#34;tags\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomList\u0026#34;, \u0026#34;size\u0026#34;: 5, \u0026#34;item\u0026#34;: { \u0026#34;key\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomString\u0026#34;, \u0026#34;length\u0026#34;: \u0026#34;10\u0026#34;, \u0026#34;prefix\u0026#34;: \u0026#34;test_tag_\u0026#34;, \u0026#34;letters\u0026#34;: true, \u0026#34;numbers\u0026#34;: true, \u0026#34;domainSize\u0026#34;: 10 }, \u0026#34;value\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomString\u0026#34;, \u0026#34;length\u0026#34;: \u0026#34;10\u0026#34;, \u0026#34;prefix\u0026#34;: \u0026#34;test_value_\u0026#34;, \u0026#34;letters\u0026#34;: true, \u0026#34;numbers\u0026#34;: true } } } fixedString (string) This generator always returns a fixed value of string.\nsequence (long) sequence generator generates a sequence of monotonically increasing integers, with a configurable fluctuation.\nmin (long) The minimum value of the sequence.\nmax (long) The maximum value of the sequence.\nstep (long) The increasing step of this sequence, i.e. the next generated value == the previous value + step.\ndomainSize (int) This is similar to randomString\u0026rsquo;s domainSize.\nfluctuation (int) By default, sequence is strictly increasing numbers, but in some cases you might want the numbers to fluctuate slightly while they are increasing. Adding property fluctuation to the generator will add a random number \u0026gt;= -fluctuation, \u0026lt;= fluctuation to the sequence elements.\nFor example, min = 10, max = 15, step = 1 generates a sequence [10, 11, 12, 13, 14, 15], but adding fluctuation = 2 might generate a sequence [10, 12, 11, 14, 13, 15].\n","title":"Mock data generator for testing","url":"/docs/main/v9.2.0/en/setup/backend/backend-data-generator/"},{"content":"Mock data generator for testing In 9.1.0, SkyWalking adds a module to generate mock data for testing. You can use this module to generate mock data that will be sent to the storage.\nTo start the data generator, execute the script tools/data-generator/bin/start.sh.\nNote that SkyWalking doesn\u0026rsquo;t release a Docker image for this module, but you can still build it yourselves by running the commands:\n# build a Docker image for local use make docker.data-generator # or push to your registry export HUB=\u0026lt;your-registry\u0026gt; make push.docker.data-generator Currently the module can generate two kinds of SkyWalking data, segments and logs. For each type, there are some generators that can be used to fill the fields.\nGenerate mock data To generate mock data, POST a request to URL path /mock-data/segments/tasks (segments) or /mock-data/logs/tasks (logs) with a generator template:\ncurl -XPOST \u0026#39;http://localhost:12800/mock-data/segments/tasks?size=20\u0026#39; -H\u0026#39;Content-Type: application/json\u0026#39; -d \u0026#34;@segment-template.json\u0026#34; curl -XPOST \u0026#39;http://localhost:12800/mock-data/logs/tasks?size=20\u0026#39; -H\u0026#39;Content-Type: application/json\u0026#39; -d \u0026#34;@logs-template.json\u0026#34; There are two possible types of task to generate mock data, size and qps:\n size (/mock-data/segments/tasks?size=20): the task will generate total number of size segments/logs and then finish. qps (/mock-data/segments/tasks?qps=20): the task will generate qps segments/logs per second continuously, until the task is cancelled.  Refer to the segment template, the log template and the Generators for more details about how to compose a template.\nCancel a task When the task is acknowledged by the server it will return a task id that can be used to cancelled the task by sending a DELETE request to URL path /mock-data/logs/tasks with a parameter requestId (i.e. /mock-data/logs/tasks?requestId={request id returned in previous request}):\ncurl -XDELETE \u0026#39;http://localhost:12800/mock-data/segments/task?requestId=70d8a39e-b51e-49de-a6fc-43abf80482c1\u0026#39; curl -XDELETE \u0026#39;http://localhost:12800/mock-data/logs/task?requestId=70d8a39e-b51e-49de-a6fc-43abf80482c1\u0026#39; Cancel all tasks When needed, you can also send a DELETE request to path /mock-data/segments/tasks to cancel all segment tasks.\ncurl -XDELETE \u0026#39;http://localhost:12800/mock-data/segments/tasks curl -XDELETE \u0026#39;http://localhost:12800/mock-data/logs/tasks Generators uuid uuid generator leverages java.util.UUID to generate a string. You can use uuid generator to fill the traceId field of segments.\nchangingFrequency property can be used when you want to reuse a uuid for multiple times, for example, if you want a traceId to be reused by 5 segments, then setting changingFrequency to 5 would do the trick. By setting changingFrequency to 5, uuid generates 1 string, and uses it for 5 times, then re-generates a new uuid string and uses it for another 5 times.\n\u0026#34;traceId\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;uuid\u0026#34;, \u0026#34;changingFrequency\u0026#34;: \u0026#34;5\u0026#34; } randomString (String) length (int) length specifies the length of the random string to be generated, i.e. generatedString.length() == length is always true.\nprefix (String) prefix is always added to the random strings after they are generated, that means:\n generatedString.startsWith(prefix) is always true, and, generatedString.length() == length + prefix.length() is always true.  letters (boolean) Specifies whether the random string contains letters (i.e. a-zA-Z).\nnumbers (boolean) Specifies whether the random string contains numbers (i.e. 0-9).\ndomainSize (int) When generating random strings, you might just want some random strings and use them over and over again randomly, by setting domainSize, the generator generates domainSize random strings, and pick them randomly every time you need a string.\nrandomBool (boolean) This generator generates a Boolean value, true or false with a default possibility of 50%, while you can change the possibility below.\npossibility (double, [0, 1]) possibility is a double value \u0026gt;= 0 and \u0026lt;= 1, it\u0026rsquo;s 0.5 by default, meaning about half of the generated values are true.\nTo always return a fixed boolean value true, you can just set the possibility to 1, to always return a fixed boolean value false, you can set the possibility to 0\n\u0026#34;error\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomBool\u0026#34;, \u0026#34;possibility\u0026#34;: \u0026#34;0.9\u0026#34; }  90 percent of the generated values are true.\n randomInt (long) min (long) The minimum value of the random integers, meaning all generated values satisfy generatedInt \u0026gt;= min.\nmax (long) The maximum value of the random integers, meaning all generated values satisfy generatedInt \u0026lt; min.\ndomainSize (int) This is similar to randomString\u0026rsquo;s domainSize.\nrandomList (list / array) size (int) The list size of the generated list, i.e. generatedList.size() == size.\nitem (object) item is a template that will be use as a prototype to generate the list items, for example when generating a list of Tag, the item should be the prototype of Tag, which can be composed by the generators again.\n\u0026#34;tags\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomList\u0026#34;, \u0026#34;size\u0026#34;: 5, \u0026#34;item\u0026#34;: { \u0026#34;key\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomString\u0026#34;, \u0026#34;length\u0026#34;: \u0026#34;10\u0026#34;, \u0026#34;prefix\u0026#34;: \u0026#34;test_tag_\u0026#34;, \u0026#34;letters\u0026#34;: true, \u0026#34;numbers\u0026#34;: true, \u0026#34;domainSize\u0026#34;: 10 }, \u0026#34;value\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomString\u0026#34;, \u0026#34;length\u0026#34;: \u0026#34;10\u0026#34;, \u0026#34;prefix\u0026#34;: \u0026#34;test_value_\u0026#34;, \u0026#34;letters\u0026#34;: true, \u0026#34;numbers\u0026#34;: true } } } fixedString (string) This generator always returns a fixed value of string.\nsequence (long) sequence generator generates a sequence of monotonically increasing integers, with a configurable fluctuation.\nmin (long) The minimum value of the sequence.\nmax (long) The maximum value of the sequence.\nstep (long) The increasing step of this sequence, i.e. the next generated value == the previous value + step.\ndomainSize (int) This is similar to randomString\u0026rsquo;s domainSize.\nfluctuation (int) By default, sequence is strictly increasing numbers, but in some cases you might want the numbers to fluctuate slightly while they are increasing. Adding property fluctuation to the generator will add a random number \u0026gt;= -fluctuation, \u0026lt;= fluctuation to the sequence elements.\nFor example, min = 10, max = 15, step = 1 generates a sequence [10, 11, 12, 13, 14, 15], but adding fluctuation = 2 might generate a sequence [10, 12, 11, 14, 13, 15].\n","title":"Mock data generator for testing","url":"/docs/main/v9.3.0/en/setup/backend/backend-data-generator/"},{"content":"Mock data generator for testing In 9.1.0, SkyWalking adds a module to generate mock data for testing. You can use this module to generate mock data that will be sent to the storage.\nTo start the data generator, execute the script tools/data-generator/bin/start.sh.\nNote that SkyWalking doesn\u0026rsquo;t release a Docker image for this module, but you can still build it yourselves by running the commands:\n# build a Docker image for local use make docker.data-generator # or push to your registry export HUB=\u0026lt;your-registry\u0026gt; make push.docker.data-generator Currently the module can generate two kinds of SkyWalking data, segments and logs. For each type, there are some generators that can be used to fill the fields.\nGenerate mock data To generate mock data, POST a request to URL path /mock-data/segments/tasks (segments) or /mock-data/logs/tasks (logs) with a generator template:\ncurl -XPOST \u0026#39;http://localhost:12800/mock-data/segments/tasks?size=20\u0026#39; -H\u0026#39;Content-Type: application/json\u0026#39; -d \u0026#34;@segment-template.json\u0026#34; curl -XPOST \u0026#39;http://localhost:12800/mock-data/logs/tasks?size=20\u0026#39; -H\u0026#39;Content-Type: application/json\u0026#39; -d \u0026#34;@logs-template.json\u0026#34; There are two possible types of task to generate mock data, size and qps:\n size (/mock-data/segments/tasks?size=20): the task will generate total number of size segments/logs and then finish. qps (/mock-data/segments/tasks?qps=20): the task will generate qps segments/logs per second continuously, until the task is cancelled.  Refer to the segment template, the log template and the Generators for more details about how to compose a template.\nCancel a task When the task is acknowledged by the server it will return a task id that can be used to cancelled the task by sending a DELETE request to URL path /mock-data/logs/tasks with a parameter requestId (i.e. /mock-data/logs/tasks?requestId={request id returned in previous request}):\ncurl -XDELETE \u0026#39;http://localhost:12800/mock-data/segments/task?requestId=70d8a39e-b51e-49de-a6fc-43abf80482c1\u0026#39; curl -XDELETE \u0026#39;http://localhost:12800/mock-data/logs/task?requestId=70d8a39e-b51e-49de-a6fc-43abf80482c1\u0026#39; Cancel all tasks When needed, you can also send a DELETE request to path /mock-data/segments/tasks to cancel all segment tasks.\ncurl -XDELETE \u0026#39;http://localhost:12800/mock-data/segments/tasks curl -XDELETE \u0026#39;http://localhost:12800/mock-data/logs/tasks Generators uuid uuid generator leverages java.util.UUID to generate a string. You can use uuid generator to fill the traceId field of segments.\nchangingFrequency property can be used when you want to reuse a uuid for multiple times, for example, if you want a traceId to be reused by 5 segments, then setting changingFrequency to 5 would do the trick. By setting changingFrequency to 5, uuid generates 1 string, and uses it for 5 times, then re-generates a new uuid string and uses it for another 5 times.\n\u0026#34;traceId\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;uuid\u0026#34;, \u0026#34;changingFrequency\u0026#34;: \u0026#34;5\u0026#34; } randomString (String) length (int) length specifies the length of the random string to be generated, i.e. generatedString.length() == length is always true.\nprefix (String) prefix is always added to the random strings after they are generated, that means:\n generatedString.startsWith(prefix) is always true, and, generatedString.length() == length + prefix.length() is always true.  letters (boolean) Specifies whether the random string contains letters (i.e. a-zA-Z).\nnumbers (boolean) Specifies whether the random string contains numbers (i.e. 0-9).\ndomainSize (int) When generating random strings, you might just want some random strings and use them over and over again randomly, by setting domainSize, the generator generates domainSize random strings, and pick them randomly every time you need a string.\nrandomBool (boolean) This generator generates a Boolean value, true or false with a default possibility of 50%, while you can change the possibility below.\npossibility (double, [0, 1]) possibility is a double value \u0026gt;= 0 and \u0026lt;= 1, it\u0026rsquo;s 0.5 by default, meaning about half of the generated values are true.\nTo always return a fixed boolean value true, you can just set the possibility to 1, to always return a fixed boolean value false, you can set the possibility to 0\n\u0026#34;error\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomBool\u0026#34;, \u0026#34;possibility\u0026#34;: \u0026#34;0.9\u0026#34; }  90 percent of the generated values are true.\n randomInt (long) min (long) The minimum value of the random integers, meaning all generated values satisfy generatedInt \u0026gt;= min.\nmax (long) The maximum value of the random integers, meaning all generated values satisfy generatedInt \u0026lt; min.\ndomainSize (int) This is similar to randomString\u0026rsquo;s domainSize.\nrandomList (list / array) size (int) The list size of the generated list, i.e. generatedList.size() == size.\nitem (object) item is a template that will be use as a prototype to generate the list items, for example when generating a list of Tag, the item should be the prototype of Tag, which can be composed by the generators again.\n\u0026#34;tags\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomList\u0026#34;, \u0026#34;size\u0026#34;: 5, \u0026#34;item\u0026#34;: { \u0026#34;key\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomString\u0026#34;, \u0026#34;length\u0026#34;: \u0026#34;10\u0026#34;, \u0026#34;prefix\u0026#34;: \u0026#34;test_tag_\u0026#34;, \u0026#34;letters\u0026#34;: true, \u0026#34;numbers\u0026#34;: true, \u0026#34;domainSize\u0026#34;: 10 }, \u0026#34;value\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomString\u0026#34;, \u0026#34;length\u0026#34;: \u0026#34;10\u0026#34;, \u0026#34;prefix\u0026#34;: \u0026#34;test_value_\u0026#34;, \u0026#34;letters\u0026#34;: true, \u0026#34;numbers\u0026#34;: true } } } fixedString (string) This generator always returns a fixed value of string.\nsequence (long) sequence generator generates a sequence of monotonically increasing integers, with a configurable fluctuation.\nmin (long) The minimum value of the sequence.\nmax (long) The maximum value of the sequence.\nstep (long) The increasing step of this sequence, i.e. the next generated value == the previous value + step.\ndomainSize (int) This is similar to randomString\u0026rsquo;s domainSize.\nfluctuation (int) By default, sequence is strictly increasing numbers, but in some cases you might want the numbers to fluctuate slightly while they are increasing. Adding property fluctuation to the generator will add a random number \u0026gt;= -fluctuation, \u0026lt;= fluctuation to the sequence elements.\nFor example, min = 10, max = 15, step = 1 generates a sequence [10, 11, 12, 13, 14, 15], but adding fluctuation = 2 might generate a sequence [10, 12, 11, 14, 13, 15].\n","title":"Mock data generator for testing","url":"/docs/main/v9.4.0/en/setup/backend/backend-data-generator/"},{"content":"Mock data generator for testing In 9.1.0, SkyWalking adds a module to generate mock data for testing. You can use this module to generate mock data that will be sent to the storage.\nTo start the data generator, execute the script tools/data-generator/bin/start.sh.\nNote that SkyWalking doesn\u0026rsquo;t release a Docker image for this module, but you can still build it yourselves by running the commands:\n# build a Docker image for local use make docker.data-generator # or push to your registry export HUB=\u0026lt;your-registry\u0026gt; make push.docker.data-generator Currently the module can generate two kinds of SkyWalking data, segments and logs. For each type, there are some generators that can be used to fill the fields.\nGenerate mock data To generate mock data, POST a request to URL path /mock-data/segments/tasks (segments) or /mock-data/logs/tasks (logs) with a generator template:\ncurl -XPOST \u0026#39;http://localhost:12800/mock-data/segments/tasks?size=20\u0026#39; -H\u0026#39;Content-Type: application/json\u0026#39; -d \u0026#34;@segment-template.json\u0026#34; curl -XPOST \u0026#39;http://localhost:12800/mock-data/logs/tasks?size=20\u0026#39; -H\u0026#39;Content-Type: application/json\u0026#39; -d \u0026#34;@logs-template.json\u0026#34; There are two possible types of task to generate mock data, size and qps:\n size (/mock-data/segments/tasks?size=20): the task will generate total number of size segments/logs and then finish. qps (/mock-data/segments/tasks?qps=20): the task will generate qps segments/logs per second continuously, until the task is cancelled.  Refer to the segment template, the log template and the Generators for more details about how to compose a template.\nCancel a task When the task is acknowledged by the server it will return a task id that can be used to cancelled the task by sending a DELETE request to URL path /mock-data/logs/tasks with a parameter requestId (i.e. /mock-data/logs/tasks?requestId={request id returned in previous request}):\ncurl -XDELETE \u0026#39;http://localhost:12800/mock-data/segments/task?requestId=70d8a39e-b51e-49de-a6fc-43abf80482c1\u0026#39; curl -XDELETE \u0026#39;http://localhost:12800/mock-data/logs/task?requestId=70d8a39e-b51e-49de-a6fc-43abf80482c1\u0026#39; Cancel all tasks When needed, you can also send a DELETE request to path /mock-data/segments/tasks to cancel all segment tasks.\ncurl -XDELETE \u0026#39;http://localhost:12800/mock-data/segments/tasks curl -XDELETE \u0026#39;http://localhost:12800/mock-data/logs/tasks Generators uuid uuid generator leverages java.util.UUID to generate a string. You can use uuid generator to fill the traceId field of segments.\nchangingFrequency property can be used when you want to reuse a uuid for multiple times, for example, if you want a traceId to be reused by 5 segments, then setting changingFrequency to 5 would do the trick. By setting changingFrequency to 5, uuid generates 1 string, and uses it for 5 times, then re-generates a new uuid string and uses it for another 5 times.\n\u0026#34;traceId\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;uuid\u0026#34;, \u0026#34;changingFrequency\u0026#34;: \u0026#34;5\u0026#34; } randomString (String) length (int) length specifies the length of the random string to be generated, i.e. generatedString.length() == length is always true.\nprefix (String) prefix is always added to the random strings after they are generated, that means:\n generatedString.startsWith(prefix) is always true, and, generatedString.length() == length + prefix.length() is always true.  letters (boolean) Specifies whether the random string contains letters (i.e. a-zA-Z).\nnumbers (boolean) Specifies whether the random string contains numbers (i.e. 0-9).\ndomainSize (int) When generating random strings, you might just want some random strings and use them over and over again randomly, by setting domainSize, the generator generates domainSize random strings, and pick them randomly every time you need a string.\nrandomBool (boolean) This generator generates a Boolean value, true or false with a default possibility of 50%, while you can change the possibility below.\npossibility (double, [0, 1]) possibility is a double value \u0026gt;= 0 and \u0026lt;= 1, it\u0026rsquo;s 0.5 by default, meaning about half of the generated values are true.\nTo always return a fixed boolean value true, you can just set the possibility to 1, to always return a fixed boolean value false, you can set the possibility to 0\n\u0026#34;error\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomBool\u0026#34;, \u0026#34;possibility\u0026#34;: \u0026#34;0.9\u0026#34; }  90 percent of the generated values are true.\n randomInt (long) min (long) The minimum value of the random integers, meaning all generated values satisfy generatedInt \u0026gt;= min.\nmax (long) The maximum value of the random integers, meaning all generated values satisfy generatedInt \u0026lt; min.\ndomainSize (int) This is similar to randomString\u0026rsquo;s domainSize.\nrandomList (list / array) size (int) The list size of the generated list, i.e. generatedList.size() == size.\nitem (object) item is a template that will be use as a prototype to generate the list items, for example when generating a list of Tag, the item should be the prototype of Tag, which can be composed by the generators again.\n\u0026#34;tags\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomList\u0026#34;, \u0026#34;size\u0026#34;: 5, \u0026#34;item\u0026#34;: { \u0026#34;key\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomString\u0026#34;, \u0026#34;length\u0026#34;: \u0026#34;10\u0026#34;, \u0026#34;prefix\u0026#34;: \u0026#34;test_tag_\u0026#34;, \u0026#34;letters\u0026#34;: true, \u0026#34;numbers\u0026#34;: true, \u0026#34;domainSize\u0026#34;: 10 }, \u0026#34;value\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomString\u0026#34;, \u0026#34;length\u0026#34;: \u0026#34;10\u0026#34;, \u0026#34;prefix\u0026#34;: \u0026#34;test_value_\u0026#34;, \u0026#34;letters\u0026#34;: true, \u0026#34;numbers\u0026#34;: true } } } fixedString (string) This generator always returns a fixed value of string.\nsequence (long) sequence generator generates a sequence of monotonically increasing integers, with a configurable fluctuation.\nmin (long) The minimum value of the sequence.\nmax (long) The maximum value of the sequence.\nstep (long) The increasing step of this sequence, i.e. the next generated value == the previous value + step.\ndomainSize (int) This is similar to randomString\u0026rsquo;s domainSize.\nfluctuation (int) By default, sequence is strictly increasing numbers, but in some cases you might want the numbers to fluctuate slightly while they are increasing. Adding property fluctuation to the generator will add a random number \u0026gt;= -fluctuation, \u0026lt;= fluctuation to the sequence elements.\nFor example, min = 10, max = 15, step = 1 generates a sequence [10, 11, 12, 13, 14, 15], but adding fluctuation = 2 might generate a sequence [10, 12, 11, 14, 13, 15].\n","title":"Mock data generator for testing","url":"/docs/main/v9.5.0/en/setup/backend/backend-data-generator/"},{"content":"Mock data generator for testing In 9.1.0, SkyWalking adds a module to generate mock data for testing. You can use this module to generate mock data that will be sent to the storage.\nTo start the data generator, execute the script tools/data-generator/bin/start.sh.\nNote that SkyWalking doesn\u0026rsquo;t release a Docker image for this module, but you can still build it yourselves by running the commands:\n# build a Docker image for local use make docker.data-generator # or push to your registry export HUB=\u0026lt;your-registry\u0026gt; make push.docker.data-generator Currently the module can generate two kinds of SkyWalking data, segments and logs. For each type, there are some generators that can be used to fill the fields.\nGenerate mock data To generate mock data, POST a request to URL path /mock-data/segments/tasks (segments) or /mock-data/logs/tasks (logs) with a generator template:\ncurl -XPOST \u0026#39;http://localhost:12800/mock-data/segments/tasks?size=20\u0026#39; -H\u0026#39;Content-Type: application/json\u0026#39; -d \u0026#34;@segment-template.json\u0026#34; curl -XPOST \u0026#39;http://localhost:12800/mock-data/logs/tasks?size=20\u0026#39; -H\u0026#39;Content-Type: application/json\u0026#39; -d \u0026#34;@logs-template.json\u0026#34; There are two possible types of task to generate mock data, size and qps:\n size (/mock-data/segments/tasks?size=20): the task will generate total number of size segments/logs and then finish. qps (/mock-data/segments/tasks?qps=20): the task will generate qps segments/logs per second continuously, until the task is cancelled.  Refer to the segment template, the log template and the Generators for more details about how to compose a template.\nCancel a task When the task is acknowledged by the server it will return a task id that can be used to cancelled the task by sending a DELETE request to URL path /mock-data/logs/tasks with a parameter requestId (i.e. /mock-data/logs/tasks?requestId={request id returned in previous request}):\ncurl -XDELETE \u0026#39;http://localhost:12800/mock-data/segments/task?requestId=70d8a39e-b51e-49de-a6fc-43abf80482c1\u0026#39; curl -XDELETE \u0026#39;http://localhost:12800/mock-data/logs/task?requestId=70d8a39e-b51e-49de-a6fc-43abf80482c1\u0026#39; Cancel all tasks When needed, you can also send a DELETE request to path /mock-data/segments/tasks to cancel all segment tasks.\ncurl -XDELETE \u0026#39;http://localhost:12800/mock-data/segments/tasks curl -XDELETE \u0026#39;http://localhost:12800/mock-data/logs/tasks Generators uuid uuid generator leverages java.util.UUID to generate a string. You can use uuid generator to fill the traceId field of segments.\nchangingFrequency property can be used when you want to reuse a uuid for multiple times, for example, if you want a traceId to be reused by 5 segments, then setting changingFrequency to 5 would do the trick. By setting changingFrequency to 5, uuid generates 1 string, and uses it for 5 times, then re-generates a new uuid string and uses it for another 5 times.\n\u0026#34;traceId\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;uuid\u0026#34;, \u0026#34;changingFrequency\u0026#34;: \u0026#34;5\u0026#34; } randomString (String) length (int) length specifies the length of the random string to be generated, i.e. generatedString.length() == length is always true.\nprefix (String) prefix is always added to the random strings after they are generated, that means:\n generatedString.startsWith(prefix) is always true, and, generatedString.length() == length + prefix.length() is always true.  letters (boolean) Specifies whether the random string contains letters (i.e. a-zA-Z).\nnumbers (boolean) Specifies whether the random string contains numbers (i.e. 0-9).\ndomainSize (int) When generating random strings, you might just want some random strings and use them over and over again randomly, by setting domainSize, the generator generates domainSize random strings, and pick them randomly every time you need a string.\nrandomBool (boolean) This generator generates a Boolean value, true or false with a default possibility of 50%, while you can change the possibility below.\npossibility (double, [0, 1]) possibility is a double value \u0026gt;= 0 and \u0026lt;= 1, it\u0026rsquo;s 0.5 by default, meaning about half of the generated values are true.\nTo always return a fixed boolean value true, you can just set the possibility to 1, to always return a fixed boolean value false, you can set the possibility to 0\n\u0026#34;error\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomBool\u0026#34;, \u0026#34;possibility\u0026#34;: \u0026#34;0.9\u0026#34; }  90 percent of the generated values are true.\n randomInt (long) min (long) The minimum value of the random integers, meaning all generated values satisfy generatedInt \u0026gt;= min.\nmax (long) The maximum value of the random integers, meaning all generated values satisfy generatedInt \u0026lt; min.\ndomainSize (int) This is similar to randomString\u0026rsquo;s domainSize.\nrandomList (list / array) size (int) The list size of the generated list, i.e. generatedList.size() == size.\nitem (object) item is a template that will be use as a prototype to generate the list items, for example when generating a list of Tag, the item should be the prototype of Tag, which can be composed by the generators again.\n\u0026#34;tags\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomList\u0026#34;, \u0026#34;size\u0026#34;: 5, \u0026#34;item\u0026#34;: { \u0026#34;key\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomString\u0026#34;, \u0026#34;length\u0026#34;: \u0026#34;10\u0026#34;, \u0026#34;prefix\u0026#34;: \u0026#34;test_tag_\u0026#34;, \u0026#34;letters\u0026#34;: true, \u0026#34;numbers\u0026#34;: true, \u0026#34;domainSize\u0026#34;: 10 }, \u0026#34;value\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomString\u0026#34;, \u0026#34;length\u0026#34;: \u0026#34;10\u0026#34;, \u0026#34;prefix\u0026#34;: \u0026#34;test_value_\u0026#34;, \u0026#34;letters\u0026#34;: true, \u0026#34;numbers\u0026#34;: true } } } fixedString (string) This generator always returns a fixed value of string.\nsequence (long) sequence generator generates a sequence of monotonically increasing integers, with a configurable fluctuation.\nmin (long) The minimum value of the sequence.\nmax (long) The maximum value of the sequence.\nstep (long) The increasing step of this sequence, i.e. the next generated value == the previous value + step.\ndomainSize (int) This is similar to randomString\u0026rsquo;s domainSize.\nfluctuation (int) By default, sequence is strictly increasing numbers, but in some cases you might want the numbers to fluctuate slightly while they are increasing. Adding property fluctuation to the generator will add a random number \u0026gt;= -fluctuation, \u0026lt;= fluctuation to the sequence elements.\nFor example, min = 10, max = 15, step = 1 generates a sequence [10, 11, 12, 13, 14, 15], but adding fluctuation = 2 might generate a sequence [10, 12, 11, 14, 13, 15].\n","title":"Mock data generator for testing","url":"/docs/main/v9.6.0/en/setup/backend/backend-data-generator/"},{"content":"Mock data generator for testing In 9.1.0, SkyWalking adds a module to generate mock data for testing. You can use this module to generate mock data that will be sent to the storage.\nTo start the data generator, execute the script tools/data-generator/bin/start.sh.\nNote that SkyWalking doesn\u0026rsquo;t release a Docker image for this module, but you can still build it yourselves by running the commands:\n# build a Docker image for local use make docker.data-generator # or push to your registry export HUB=\u0026lt;your-registry\u0026gt; make push.docker.data-generator Currently the module can generate two kinds of SkyWalking data, segments and logs. For each type, there are some generators that can be used to fill the fields.\nGenerate mock data To generate mock data, POST a request to URL path /mock-data/segments/tasks (segments) or /mock-data/logs/tasks (logs) with a generator template:\ncurl -XPOST \u0026#39;http://localhost:12800/mock-data/segments/tasks?size=20\u0026#39; -H\u0026#39;Content-Type: application/json\u0026#39; -d \u0026#34;@segment-template.json\u0026#34; curl -XPOST \u0026#39;http://localhost:12800/mock-data/logs/tasks?size=20\u0026#39; -H\u0026#39;Content-Type: application/json\u0026#39; -d \u0026#34;@logs-template.json\u0026#34; There are two possible types of task to generate mock data, size and qps:\n size (/mock-data/segments/tasks?size=20): the task will generate total number of size segments/logs and then finish. qps (/mock-data/segments/tasks?qps=20): the task will generate qps segments/logs per second continuously, until the task is cancelled.  Refer to the segment template, the log template and the Generators for more details about how to compose a template.\nCancel a task When the task is acknowledged by the server it will return a task id that can be used to cancelled the task by sending a DELETE request to URL path /mock-data/logs/tasks with a parameter requestId (i.e. /mock-data/logs/tasks?requestId={request id returned in previous request}):\ncurl -XDELETE \u0026#39;http://localhost:12800/mock-data/segments/task?requestId=70d8a39e-b51e-49de-a6fc-43abf80482c1\u0026#39; curl -XDELETE \u0026#39;http://localhost:12800/mock-data/logs/task?requestId=70d8a39e-b51e-49de-a6fc-43abf80482c1\u0026#39; Cancel all tasks When needed, you can also send a DELETE request to path /mock-data/segments/tasks to cancel all segment tasks.\ncurl -XDELETE \u0026#39;http://localhost:12800/mock-data/segments/tasks curl -XDELETE \u0026#39;http://localhost:12800/mock-data/logs/tasks Generators uuid uuid generator leverages java.util.UUID to generate a string. You can use uuid generator to fill the traceId field of segments.\nchangingFrequency property can be used when you want to reuse a uuid for multiple times, for example, if you want a traceId to be reused by 5 segments, then setting changingFrequency to 5 would do the trick. By setting changingFrequency to 5, uuid generates 1 string, and uses it for 5 times, then re-generates a new uuid string and uses it for another 5 times.\n\u0026#34;traceId\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;uuid\u0026#34;, \u0026#34;changingFrequency\u0026#34;: \u0026#34;5\u0026#34; } randomString (String) length (int) length specifies the length of the random string to be generated, i.e. generatedString.length() == length is always true.\nprefix (String) prefix is always added to the random strings after they are generated, that means:\n generatedString.startsWith(prefix) is always true, and, generatedString.length() == length + prefix.length() is always true.  letters (boolean) Specifies whether the random string contains letters (i.e. a-zA-Z).\nnumbers (boolean) Specifies whether the random string contains numbers (i.e. 0-9).\ndomainSize (int) When generating random strings, you might just want some random strings and use them over and over again randomly, by setting domainSize, the generator generates domainSize random strings, and pick them randomly every time you need a string.\nrandomBool (boolean) This generator generates a Boolean value, true or false with a default possibility of 50%, while you can change the possibility below.\npossibility (double, [0, 1]) possibility is a double value \u0026gt;= 0 and \u0026lt;= 1, it\u0026rsquo;s 0.5 by default, meaning about half of the generated values are true.\nTo always return a fixed boolean value true, you can just set the possibility to 1, to always return a fixed boolean value false, you can set the possibility to 0\n\u0026#34;error\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomBool\u0026#34;, \u0026#34;possibility\u0026#34;: \u0026#34;0.9\u0026#34; }  90 percent of the generated values are true.\n randomInt (long) min (long) The minimum value of the random integers, meaning all generated values satisfy generatedInt \u0026gt;= min.\nmax (long) The maximum value of the random integers, meaning all generated values satisfy generatedInt \u0026lt; min.\ndomainSize (int) This is similar to randomString\u0026rsquo;s domainSize.\nrandomList (list / array) size (int) The list size of the generated list, i.e. generatedList.size() == size.\nitem (object) item is a template that will be use as a prototype to generate the list items, for example when generating a list of Tag, the item should be the prototype of Tag, which can be composed by the generators again.\n\u0026#34;tags\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomList\u0026#34;, \u0026#34;size\u0026#34;: 5, \u0026#34;item\u0026#34;: { \u0026#34;key\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomString\u0026#34;, \u0026#34;length\u0026#34;: \u0026#34;10\u0026#34;, \u0026#34;prefix\u0026#34;: \u0026#34;test_tag_\u0026#34;, \u0026#34;letters\u0026#34;: true, \u0026#34;numbers\u0026#34;: true, \u0026#34;domainSize\u0026#34;: 10 }, \u0026#34;value\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomString\u0026#34;, \u0026#34;length\u0026#34;: \u0026#34;10\u0026#34;, \u0026#34;prefix\u0026#34;: \u0026#34;test_value_\u0026#34;, \u0026#34;letters\u0026#34;: true, \u0026#34;numbers\u0026#34;: true } } } fixedString (string) This generator always returns a fixed value of string.\nsequence (long) sequence generator generates a sequence of monotonically increasing integers, with a configurable fluctuation.\nmin (long) The minimum value of the sequence.\nmax (long) The maximum value of the sequence.\nstep (long) The increasing step of this sequence, i.e. the next generated value == the previous value + step.\ndomainSize (int) This is similar to randomString\u0026rsquo;s domainSize.\nfluctuation (int) By default, sequence is strictly increasing numbers, but in some cases you might want the numbers to fluctuate slightly while they are increasing. Adding property fluctuation to the generator will add a random number \u0026gt;= -fluctuation, \u0026lt;= fluctuation to the sequence elements.\nFor example, min = 10, max = 15, step = 1 generates a sequence [10, 11, 12, 13, 14, 15], but adding fluctuation = 2 might generate a sequence [10, 12, 11, 14, 13, 15].\n","title":"Mock data generator for testing","url":"/docs/main/v9.7.0/en/setup/backend/backend-data-generator/"},{"content":"Module Design Controller The controller means composing all the steps declared in the configuration file, it progressive and display which step is currently running. If it failed in a step, the error message could be shown, as much comprehensive as possible. An example of the output might be.\ne2e run ✔ Started Kind Cluster - Cluster Name ✔ Checked Pods Readiness - All pods are ready ? Generating Traffic - HTTP localhost:9090/users (progress spinner) ✔ Verified Output - service ls (progress spinner) Verifying Output - endpoint ls ✘ Failed to Verify Output Data - endpoint ls \u0026lt;the diff content\u0026gt; ✔ Clean Up Compared with running the steps one by one, the controller is also responsible for cleaning up the environment (by executing the cleanup command) no matter what status other commands are, even if they are failed, the controller has the following semantics in terms of setup and cleanup.\n// Java try { setup(); // trigger step // verify step // ... } finally { cleanup(); } // GoLang func run() { setup(); defer cleanup(); // trigger step // verify step // ... } Steps According to the content in the Controller, E2E Testing can be divided into the following steps.\nSetup Start the environment required for this E2E Testing, such as database, back-end process, API, etc.\nSupport two ways to set up the environment:\n compose:  Start the docker-compose services. Check the services' healthiness. Wait until all services are ready according to the interval, etc. Execute command to set up the testing environment or help verify, such as yq help to eval the YAML format.   kind:  Start the KinD cluster according to the config files or Start on an existing kubernetes cluster. Apply the resources files (--manifests) or/and run the custom init command (--commands). Check the pods' readiness. Wait until all pods are ready according to the interval, etc.    Trigger Generate traffic by trigger the action, It could access HTTP API or execute commands with interval.\nIt could have these settings:\n interval: How frequency to trigger the action. times: How many times the operation is triggered before aborting on the condition that the trigger had failed always. 0=infinite. action: The action of the trigger.  Verify Verify that the data content is matching with the expected results. such as unit test assert, etc.\nIt could have these settings:\n actual: The actual data file. query: The query to get the actual data, could run shell commands to generate the data. expected: The expected data file, could specify some matching rules to verify the actual content.  Cleanup This step requires the same options in the setup step so that it can clean up all things necessarily. Such as destroy the environment, etc.\n","title":"Module Design","url":"/docs/skywalking-infra-e2e/latest/en/concepts-and-designs/module-design/"},{"content":"Module Design Controller The controller means composing all the steps declared in the configuration file, it progressive and display which step is currently running. If it failed in a step, the error message could be shown, as much comprehensive as possible. An example of the output might be.\ne2e run ✔ Started Kind Cluster - Cluster Name ✔ Checked Pods Readiness - All pods are ready ? Generating Traffic - HTTP localhost:9090/users (progress spinner) ✔ Verified Output - service ls (progress spinner) Verifying Output - endpoint ls ✘ Failed to Verify Output Data - endpoint ls \u0026lt;the diff content\u0026gt; ✔ Clean Up Compared with running the steps one by one, the controller is also responsible for cleaning up the environment (by executing the cleanup command) no matter what status other commands are, even if they are failed, the controller has the following semantics in terms of setup and cleanup.\n// Java try { setup(); // trigger step // verify step // ... } finally { cleanup(); } // GoLang func run() { setup(); defer cleanup(); // trigger step // verify step // ... } Steps According to the content in the Controller, E2E Testing can be divided into the following steps.\nSetup Start the environment required for this E2E Testing, such as database, back-end process, API, etc.\nSupport two ways to set up the environment:\n compose:  Start the docker-compose services. Check the services' healthiness. Wait until all services are ready according to the interval, etc. Execute command to set up the testing environment or help verify, such as yq help to eval the YAML format.   kind:  Start the KinD cluster according to the config files or Start on an existing kubernetes cluster. Apply the resources files (--manifests) or/and run the custom init command (--commands). Check the pods' readiness. Wait until all pods are ready according to the interval, etc.    Trigger Generate traffic by trigger the action, It could access HTTP API or execute commands with interval.\nIt could have these settings:\n interval: How frequency to trigger the action. times: How many times the operation is triggered before aborting on the condition that the trigger had failed always. 0=infinite. action: The action of the trigger.  Verify Verify that the data content is matching with the expected results. such as unit test assert, etc.\nIt could have these settings:\n actual: The actual data file. query: The query to get the actual data, could run shell commands to generate the data. expected: The expected data file, could specify some matching rules to verify the actual content.  Cleanup This step requires the same options in the setup step so that it can clean up all things necessarily. Such as destroy the environment, etc.\n","title":"Module Design","url":"/docs/skywalking-infra-e2e/next/en/concepts-and-designs/module-design/"},{"content":"Module Design Controller The controller means composing all the steps declared in the configuration file, it progressive and display which step is currently running. If it failed in a step, the error message could be shown, as much comprehensive as possible. An example of the output might be.\ne2e run ✔ Started Kind Cluster - Cluster Name ✔ Checked Pods Readiness - All pods are ready ? Generating Traffic - HTTP localhost:9090/users (progress spinner) ✔ Verified Output - service ls (progress spinner) Verifying Output - endpoint ls ✘ Failed to Verify Output Data - endpoint ls \u0026lt;the diff content\u0026gt; ✔ Clean Up Compared with running the steps one by one, the controller is also responsible for cleaning up the environment (by executing the cleanup command) no matter what status other commands are, even if they are failed, the controller has the following semantics in terms of setup and cleanup.\n// Java try { setup(); // trigger step // verify step // ... } finally { cleanup(); } // GoLang func run() { setup(); defer cleanup(); // trigger step // verify step // ... } Steps According to the content in the Controller, E2E Testing can be divided into the following steps.\nSetup Start the environment required for this E2E Testing, such as database, back-end process, API, etc.\nSupport two ways to set up the environment:\n compose:  Start the docker-compose services. Check the services' healthiness. Wait until all services are ready according to the interval, etc. Execute command to set up the testing environment or help verify, such as yq help to eval the YAML format.   kind:  Start the KinD cluster according to the config files or Start on an existing kubernetes cluster. Apply the resources files (--manifests) or/and run the custom init command (--commands). Check the pods' readiness. Wait until all pods are ready according to the interval, etc.    Trigger Generate traffic by trigger the action, It could access HTTP API or execute commands with interval.\nIt could have these settings:\n interval: How frequency to trigger the action. times: How many times the operation is triggered before aborting on the condition that the trigger had failed always. 0=infinite. action: The action of the trigger.  Verify Verify that the data content is matching with the expected results. such as unit test assert, etc.\nIt could have these settings:\n actual: The actual data file. query: The query to get the actual data, could run shell commands to generate the data. expected: The expected data file, could specify some matching rules to verify the actual content.  Cleanup This step requires the same options in the setup step so that it can clean up all things necessarily. Such as destroy the environment, etc.\n","title":"Module Design","url":"/docs/skywalking-infra-e2e/v1.3.0/en/concepts-and-designs/module-design/"},{"content":"Module Design Pipe The pipe is an isolation concept in Satellite. Each pipe has one pipeline to process the telemetry data(metrics/traces/logs). Two pipes are not sharing data.\n Satellite --------------------------------------------------------------------- | ------------------------------------------- | | | Pipe | | | ------------------------------------------- | | ------------------------------------------- | | | Pipe | | | ------------------------------------------- | | ------------------------------------------- | | | Pipe | | | ------------------------------------------- | --------------------------------------------------------------------- Modules Module is the core workers in Satellite. Module is constituted by the specific extension plugins. There are 3 modules in one namespace, which are Gatherer, Processor, and Sender.\n The Gatherer module is responsible for fetching or receiving data and pushing the data to Queue. So there are 2 kinds of Gatherer, which are ReceiverGatherer and FetcherGatherer. The Processor module is responsible for reading data from the queue and processing data by a series of filter chains. The Sender module is responsible for async processing and forwarding the data to the external services in the batch mode. After sending success, Sender would also acknowledge the offset of Queue in Gatherer.   Pipe -------------------------------------------------------------------- | ---------- ----------- -------- | | | Gatherer | =\u0026gt; | Processor | =\u0026gt; | Sender | | | ---------- ----------- -------- | -------------------------------------------------------------------- LifeCycle\n Prepare: Prepare phase is to do some preparation works, such as register the client status listener to the client in ReceiverGatherer. Boot: Boot phase is to start the current module until receives a close signal. ShutDown: ShutDown phase is to close the used resources.  Plugins Plugin is the minimal components in the module. Satellite has 2 plugin catalogs, which are sharing plugins and normal plugins.\n a sharing plugin instance could be sharing with multiple modules in the different pipes. a normal plugin instance is only be used in a fixed module of the fixed pipes.  Sharing plugin Nowadays, there are 2 kinds of sharing plugins in Satellite, which are server plugins and client plugins. The reason why they are sharing plugins is to reduce the resource cost in connection. Server plugins are sharing with the ReceiverGatherer modules in the different pipes to receive the external requests. And the client plugins is sharing with the Sender modules in the different pipes to connect with external services, such as Kafka and OAP.\n Sharing Server Sharing Client -------------------------------------------------------------------- | ------------------ ----------- -------- | | | ReceiverGatherer | =\u0026gt; | Processor | =\u0026gt; | Sender | | | ------------------ ----------- -------- | -------------------------------------------------------------------- -------------------------------------------------------------------- | ------------------ ----------- -------- | | | ReceiverGatherer | =\u0026gt; | Processor | =\u0026gt; | Sender | | | ------------------ ----------- -------- | -------------------------------------------------------------------- -------------------------------------------------------------------- | ------------------ ----------- -------- | | | ReceiverGatherer | =\u0026gt; | Processor | =\u0026gt; | Sender | | | ------------------ ----------- -------- | -------------------------------------------------------------------- Normal plugin There are 7 kinds of normal plugins in Satellite, which are Receiver, Fetcher, Queue, Parser, Filter, Forwarder, and Fallbacker.\n Receiver: receives the input APM data from the request. Fetcher: fetch the APM data by fetching. Queue: store the APM data to ensure the data stability. Parser: supports some ways to parse data, such parse a csv file. Filter: processes the APM data. Forwarder: forwards the APM data to the external receiver, such as Kafka and OAP. Fallbacker: supports some fallback strategies, such as timer retry strategy.   Gatherer Processor ------------------------------- ------------------------------------------- | ----------- --------- | | ----------- ----------- | | | Receiver | ==\u0026gt; | Queue | |==\u0026gt;| | Filter | ==\u0026gt; ... ==\u0026gt; | Filter | | | | /Fetcher | | Mem/File | | | ----------- ----------- | | ----------- ---------- | | || || | -------------------------------- | \\/\t\\/ | | --------------------------------------- | | | OutputEventContext | | | --------------------------------------- | ------------------------------------------- || \\/ Sender ------------------------------------------ | --- --- | | | B | | D | ----------------- | | | A | | I | |Segment Forwarder| | | | T | | S | | (Fallbacker) | | | | C | | P | ----------------- | | | H | =\u0026gt; | A | | ===\u0026gt; Kafka/OAP | | B | | T | =\u0026gt; ...... | | | U | | C | | | | F | | H | ----------------- | | | F | | E | | Meter Forwarder| | | | E | | R | | (Fallbacker | | | | R | | | ----------------- | | --- --- | ------------------------------------------ 1. The Fetcher/Receiver plugin would fetch or receive the input data. 2. The Parser plugin would parse the input data to SerializableEvent that is supported to be stored in Queue. 3. The Queue plugin stores the SerializableEvent. However, whether serializing depends on the Queue implements. For example, the serialization is unnecessary when using a Memory Queue. Once an event is pulled by the consumer of Queue, the event will be processed by the filters in Processor. 4. The Filter plugin would process the event to create a new event. Next, the event is passed to the next filter to do the same things until the whole filters are performed. All created events would be stored in the OutputEventContext. However, only the events labeled with RemoteEvent type would be forwarded by Forwarder. 5. After processing, the events in OutputEventContext would be stored in the BatchBuffer. When the timer is triggered or the capacity limit is reached, the events in BatchBuffer would be partitioned by EventType and sent to the different Forwarders, such as Segment Forwarder and Meter Forwarder. 6. The Follower in different Senders would share with the remote client to avoid make duplicate connections and have the same Fallbacker(FallBack strategy) to process data. When all forwarders send success or process success in Fallbacker, the dispatcher would also ack the batch is a success. ============================================================================================ ","title":"Module Design","url":"/docs/skywalking-satellite/latest/en/concepts-and-designs/module_design/"},{"content":"Module Design Pipe The pipe is an isolation concept in Satellite. Each pipe has one pipeline to process the telemetry data(metrics/traces/logs). Two pipes are not sharing data.\n Satellite --------------------------------------------------------------------- | ------------------------------------------- | | | Pipe | | | ------------------------------------------- | | ------------------------------------------- | | | Pipe | | | ------------------------------------------- | | ------------------------------------------- | | | Pipe | | | ------------------------------------------- | --------------------------------------------------------------------- Modules Module is the core workers in Satellite. Module is constituted by the specific extension plugins. There are 3 modules in one namespace, which are Gatherer, Processor, and Sender.\n The Gatherer module is responsible for fetching or receiving data and pushing the data to Queue. So there are 2 kinds of Gatherer, which are ReceiverGatherer and FetcherGatherer. The Processor module is responsible for reading data from the queue and processing data by a series of filter chains. The Sender module is responsible for async processing and forwarding the data to the external services in the batch mode. After sending success, Sender would also acknowledge the offset of Queue in Gatherer.   Pipe -------------------------------------------------------------------- | ---------- ----------- -------- | | | Gatherer | =\u0026gt; | Processor | =\u0026gt; | Sender | | | ---------- ----------- -------- | -------------------------------------------------------------------- LifeCycle\n Prepare: Prepare phase is to do some preparation works, such as register the client status listener to the client in ReceiverGatherer. Boot: Boot phase is to start the current module until receives a close signal. ShutDown: ShutDown phase is to close the used resources.  Plugins Plugin is the minimal components in the module. Satellite has 2 plugin catalogs, which are sharing plugins and normal plugins.\n a sharing plugin instance could be sharing with multiple modules in the different pipes. a normal plugin instance is only be used in a fixed module of the fixed pipes.  Sharing plugin Nowadays, there are 2 kinds of sharing plugins in Satellite, which are server plugins and client plugins. The reason why they are sharing plugins is to reduce the resource cost in connection. Server plugins are sharing with the ReceiverGatherer modules in the different pipes to receive the external requests. And the client plugins is sharing with the Sender modules in the different pipes to connect with external services, such as Kafka and OAP.\n Sharing Server Sharing Client -------------------------------------------------------------------- | ------------------ ----------- -------- | | | ReceiverGatherer | =\u0026gt; | Processor | =\u0026gt; | Sender | | | ------------------ ----------- -------- | -------------------------------------------------------------------- -------------------------------------------------------------------- | ------------------ ----------- -------- | | | ReceiverGatherer | =\u0026gt; | Processor | =\u0026gt; | Sender | | | ------------------ ----------- -------- | -------------------------------------------------------------------- -------------------------------------------------------------------- | ------------------ ----------- -------- | | | ReceiverGatherer | =\u0026gt; | Processor | =\u0026gt; | Sender | | | ------------------ ----------- -------- | -------------------------------------------------------------------- Normal plugin There are 7 kinds of normal plugins in Satellite, which are Receiver, Fetcher, Queue, Parser, Filter, Forwarder, and Fallbacker.\n Receiver: receives the input APM data from the request. Fetcher: fetch the APM data by fetching. Queue: store the APM data to ensure the data stability. Parser: supports some ways to parse data, such parse a csv file. Filter: processes the APM data. Forwarder: forwards the APM data to the external receiver, such as Kafka and OAP. Fallbacker: supports some fallback strategies, such as timer retry strategy.   Gatherer Processor ------------------------------- ------------------------------------------- | ----------- --------- | | ----------- ----------- | | | Receiver | ==\u0026gt; | Queue | |==\u0026gt;| | Filter | ==\u0026gt; ... ==\u0026gt; | Filter | | | | /Fetcher | | Mem/File | | | ----------- ----------- | | ----------- ---------- | | || || | -------------------------------- | \\/\t\\/ | | --------------------------------------- | | | OutputEventContext | | | --------------------------------------- | ------------------------------------------- || \\/ Sender ------------------------------------------ | --- --- | | | B | | D | ----------------- | | | A | | I | |Segment Forwarder| | | | T | | S | | (Fallbacker) | | | | C | | P | ----------------- | | | H | =\u0026gt; | A | | ===\u0026gt; Kafka/OAP | | B | | T | =\u0026gt; ...... | | | U | | C | | | | F | | H | ----------------- | | | F | | E | | Meter Forwarder| | | | E | | R | | (Fallbacker | | | | R | | | ----------------- | | --- --- | ------------------------------------------ 1. The Fetcher/Receiver plugin would fetch or receive the input data. 2. The Parser plugin would parse the input data to SerializableEvent that is supported to be stored in Queue. 3. The Queue plugin stores the SerializableEvent. However, whether serializing depends on the Queue implements. For example, the serialization is unnecessary when using a Memory Queue. Once an event is pulled by the consumer of Queue, the event will be processed by the filters in Processor. 4. The Filter plugin would process the event to create a new event. Next, the event is passed to the next filter to do the same things until the whole filters are performed. All created events would be stored in the OutputEventContext. However, only the events labeled with RemoteEvent type would be forwarded by Forwarder. 5. After processing, the events in OutputEventContext would be stored in the BatchBuffer. When the timer is triggered or the capacity limit is reached, the events in BatchBuffer would be partitioned by EventType and sent to the different Forwarders, such as Segment Forwarder and Meter Forwarder. 6. The Follower in different Senders would share with the remote client to avoid make duplicate connections and have the same Fallbacker(FallBack strategy) to process data. When all forwarders send success or process success in Fallbacker, the dispatcher would also ack the batch is a success. ============================================================================================ ","title":"Module Design","url":"/docs/skywalking-satellite/next/en/concepts-and-designs/module_design/"},{"content":"Module Design Pipe The pipe is an isolation concept in Satellite. Each pipe has one pipeline to process the telemetry data(metrics/traces/logs). Two pipes are not sharing data.\n Satellite --------------------------------------------------------------------- | ------------------------------------------- | | | Pipe | | | ------------------------------------------- | | ------------------------------------------- | | | Pipe | | | ------------------------------------------- | | ------------------------------------------- | | | Pipe | | | ------------------------------------------- | --------------------------------------------------------------------- Modules Module is the core workers in Satellite. Module is constituted by the specific extension plugins. There are 3 modules in one namespace, which are Gatherer, Processor, and Sender.\n The Gatherer module is responsible for fetching or receiving data and pushing the data to Queue. So there are 2 kinds of Gatherer, which are ReceiverGatherer and FetcherGatherer. The Processor module is responsible for reading data from the queue and processing data by a series of filter chains. The Sender module is responsible for async processing and forwarding the data to the external services in the batch mode. After sending success, Sender would also acknowledge the offset of Queue in Gatherer.   Pipe -------------------------------------------------------------------- | ---------- ----------- -------- | | | Gatherer | =\u0026gt; | Processor | =\u0026gt; | Sender | | | ---------- ----------- -------- | -------------------------------------------------------------------- LifeCycle\n Prepare: Prepare phase is to do some preparation works, such as register the client status listener to the client in ReceiverGatherer. Boot: Boot phase is to start the current module until receives a close signal. ShutDown: ShutDown phase is to close the used resources.  Plugins Plugin is the minimal components in the module. Satellite has 2 plugin catalogs, which are sharing plugins and normal plugins.\n a sharing plugin instance could be sharing with multiple modules in the different pipes. a normal plugin instance is only be used in a fixed module of the fixed pipes.  Sharing plugin Nowadays, there are 2 kinds of sharing plugins in Satellite, which are server plugins and client plugins. The reason why they are sharing plugins is to reduce the resource cost in connection. Server plugins are sharing with the ReceiverGatherer modules in the different pipes to receive the external requests. And the client plugins is sharing with the Sender modules in the different pipes to connect with external services, such as Kafka and OAP.\n Sharing Server Sharing Client -------------------------------------------------------------------- | ------------------ ----------- -------- | | | ReceiverGatherer | =\u0026gt; | Processor | =\u0026gt; | Sender | | | ------------------ ----------- -------- | -------------------------------------------------------------------- -------------------------------------------------------------------- | ------------------ ----------- -------- | | | ReceiverGatherer | =\u0026gt; | Processor | =\u0026gt; | Sender | | | ------------------ ----------- -------- | -------------------------------------------------------------------- -------------------------------------------------------------------- | ------------------ ----------- -------- | | | ReceiverGatherer | =\u0026gt; | Processor | =\u0026gt; | Sender | | | ------------------ ----------- -------- | -------------------------------------------------------------------- Normal plugin There are 7 kinds of normal plugins in Satellite, which are Receiver, Fetcher, Queue, Parser, Filter, Forwarder, and Fallbacker.\n Receiver: receives the input APM data from the request. Fetcher: fetch the APM data by fetching. Queue: store the APM data to ensure the data stability. Parser: supports some ways to parse data, such parse a csv file. Filter: processes the APM data. Forwarder: forwards the APM data to the external receiver, such as Kafka and OAP. Fallbacker: supports some fallback strategies, such as timer retry strategy.   Gatherer Processor ------------------------------- ------------------------------------------- | ----------- --------- | | ----------- ----------- | | | Receiver | ==\u0026gt; | Queue | |==\u0026gt;| | Filter | ==\u0026gt; ... ==\u0026gt; | Filter | | | | /Fetcher | | Mem/File | | | ----------- ----------- | | ----------- ---------- | | || || | -------------------------------- | \\/\t\\/ | | --------------------------------------- | | | OutputEventContext | | | --------------------------------------- | ------------------------------------------- || \\/ Sender ------------------------------------------ | --- --- | | | B | | D | ----------------- | | | A | | I | |Segment Forwarder| | | | T | | S | | (Fallbacker) | | | | C | | P | ----------------- | | | H | =\u0026gt; | A | | ===\u0026gt; Kafka/OAP | | B | | T | =\u0026gt; ...... | | | U | | C | | | | F | | H | ----------------- | | | F | | E | | Meter Forwarder| | | | E | | R | | (Fallbacker | | | | R | | | ----------------- | | --- --- | ------------------------------------------ 1. The Fetcher/Receiver plugin would fetch or receive the input data. 2. The Parser plugin would parse the input data to SerializableEvent that is supported to be stored in Queue. 3. The Queue plugin stores the SerializableEvent. However, whether serializing depends on the Queue implements. For example, the serialization is unnecessary when using a Memory Queue. Once an event is pulled by the consumer of Queue, the event will be processed by the filters in Processor. 4. The Filter plugin would process the event to create a new event. Next, the event is passed to the next filter to do the same things until the whole filters are performed. All created events would be stored in the OutputEventContext. However, only the events labeled with RemoteEvent type would be forwarded by Forwarder. 5. After processing, the events in OutputEventContext would be stored in the BatchBuffer. When the timer is triggered or the capacity limit is reached, the events in BatchBuffer would be partitioned by EventType and sent to the different Forwarders, such as Segment Forwarder and Meter Forwarder. 6. The Follower in different Senders would share with the remote client to avoid make duplicate connections and have the same Fallbacker(FallBack strategy) to process data. When all forwarders send success or process success in Fallbacker, the dispatcher would also ack the batch is a success. ============================================================================================ ","title":"Module Design","url":"/docs/skywalking-satellite/v1.2.0/en/concepts-and-designs/module_design/"},{"content":"MongoDB monitoring SkyWalking leverages mongodb-exporter for collecting metrics data from MongoDB. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  The mongodb-exporter collects metrics data from MongoDB. The exporter works side by side with the MongoDB node. OpenTelemetry Collector fetches metrics from mongodb-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup mongodb-exporter. Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  MongoDB Monitoring MongoDB monitoring provides multidimensional metrics monitoring of MongoDB clusters as Layer: MONGODB Service in the OAP. In each cluster, the nodes are represented as Instance.\nMongoDB Cluster Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Uptime (day) day meter_mongodb_cluster_uptime Maximum uptime of nodes in the cluster mongodb-exporter   Data Size (GB) GB meter_mongodb_cluster_data_size Total data size of the cluster mongodb-exporter   Collection Count  meter_mongodb_cluster_collection_count Number of collection of the cluster mongodb-exporter   Object Count  meter_mongodb_cluster_object_count Number of object of the cluster mongodb-exporter   Document Avg QPS  meter_mongodb_cluster_document_avg_qps Avg document operations rate of nodes mongodb-exporter   Operation Avg QPS  meter_mongodb_cluster_operation_avg_qps Avg operations rate of nodes mongodb-exporter   Total Connections  meter_mongodb_cluster_connections Cluster total connections of nodes mongodb-exporter   Cursor Avg  meter_mongodb_cluster_cursor_avg Avg Opened cursor of nodes mongodb-exporter   Replication Lag (ms) ms meter_mongodb_cluster_repl_lag Repl set member avg replication lag, this metric works in repl mode mongodb-exporter   DB Avg Data Size Per Shard (GB) GB meter_mongodb_cluster_db_data_size Avg data size per shard (replSet) of every database mongodb-exporter   DB Avg Index Size Per Shard (GB) GB meter_mongodb_cluster_db_index_size Avg index size per shard (replSet) of every database mongodb-exporter   DB Avg Collection Count Per Shard  meter_mongodb_cluster_db_collection_count Avg collection count per shard (replSet) of every database mongodb-exporter   DB Avg Index Count Per Shard  meter_mongodb_cluster_db_index_count Avg index count per shard (replSet) of every database mongodb-exporter    MongoDB Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Uptime (day) day meter_mongodb_node_uptime Uptime of the node mongodb-exporter   QPS  meter_mongodb_node_qps Operations per second of the node mongodb-exporter   Latency µs meter_mongodb_node_latency Latency of operations mongodb-exporter   Memory Usage % meter_mongodb_node_memory_usage Memory usage percent of RAM mongodb-exporter   Version  meter_mongodb_node_version MongoDB edition and version mongodb-exporter   ReplSet State  meter_mongodb_node_rs_state Repl set state of the node, this metric works in repl mode mongodb-exporter   CPU Usage (%) % meter_mongodb_node_cpu_total_percentage Cpu usage percent of the node mongodb-exporter   Network (KB/s) KB/s meter_mongodb_node_network_bytes_inmeter_mongodb_node_network_bytes_out Inbound and outbound network bytes of node mongodb-exporter   Memory Free (GB) GB meter_mongodb_node_memory_free_kbmeter_mongodb_node_swap_memory_free_kb Free memory of RAM and swap mongodb-exporter   Disk (GB) GB meter_mongodb_node_fs_used_sizemeter_mongodb_node_fs_total_size Used and total size of disk mongodb-exporter   Connections  meter_mongodb_node_connections Connection nums of node mongodb-exporter   Active Client  meter_mongodb_node_active_total_nummeter_mongodb_node_active_reader_nummeter_mongodb_node_active_writer_num Count of active reader and writer mongodb-exporter   Transactions  meter_mongodb_node_transactions_activemeter_mongodb_node_transactions_inactive Count of transactions running on the node mongodb-exporter   Document QPS  meter_mongodb_node_document_qps Document operations per second mongodb-exporter   Operation QPS  meter_mongodb_node_operation_qps Operations per second mongodb-exporter   Repl Operation QPS  meter_mongodb_node_repl_operation_qps Repl operations per second mongodb-exporter   Operation Latency (µs) µs meter_mongodb_node_operation_latency Latencies for different operation type mongodb-exporter   Cursor  meter_mongodb_node_cursor Opened cursor of the node mongodb-exporter   Server Status Memory (MB) MB meter_mongodb_node_mem_virtualmeter_mongodb_node_mem_resident Virtual and resident memory of the node mongodb-exporter   Asserts  meter_mongodb_node_asserts The rate of raised assertions mongodb-exporter   Repl Buffer Count  meter_mongodb_node_repl_buffer_count The current number of operations in the oplog buffer mongodb-exporter   Repl Buffer Size (MB) MB meter_mongodb_node_repl_buffer_sizemeter_mongodb_node_repl_buffer_size_max The maximum size of the oplog buffer mongodb-exporter   Queued Operation  meter_mongodb_node_queued_operation The number of operations queued because of a lock mongodb-exporter   getLastError Write Num  meter_mongodb_node_write_wait_nummeter_mongodb_node_write_wait_timeout_num The number of write concern operation mongodb-exporter   getLastError Write Time (ms) ms meter_mongodb_node_write_wait_time The wait time of write concern operation mongodb-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/mongodb/mongodb-cluster.yaml, /config/otel-rules/mongodb/mongodb-node.yaml. The MongoDB dashboard panel configurations are found in /config/ui-initialized-templates/mongodb.\n","title":"MongoDB monitoring","url":"/docs/main/latest/en/setup/backend/backend-mongodb-monitoring/"},{"content":"MongoDB monitoring SkyWalking leverages mongodb-exporter for collecting metrics data from MongoDB. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  The mongodb-exporter collects metrics data from MongoDB. The exporter works side by side with the MongoDB node. OpenTelemetry Collector fetches metrics from mongodb-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup mongodb-exporter. Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  MongoDB Monitoring MongoDB monitoring provides multidimensional metrics monitoring of MongoDB clusters as Layer: MONGODB Service in the OAP. In each cluster, the nodes are represented as Instance.\nMongoDB Cluster Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Uptime (day) day meter_mongodb_cluster_uptime Maximum uptime of nodes in the cluster mongodb-exporter   Data Size (GB) GB meter_mongodb_cluster_data_size Total data size of the cluster mongodb-exporter   Collection Count  meter_mongodb_cluster_collection_count Number of collection of the cluster mongodb-exporter   Object Count  meter_mongodb_cluster_object_count Number of object of the cluster mongodb-exporter   Document Total QPS  meter_mongodb_cluster_document_avg_qps Total document operations rate of nodes mongodb-exporter   Operation Total QPS  meter_mongodb_cluster_operation_avg_qps Total operations rate of nodes mongodb-exporter   Total Connections  meter_mongodb_cluster_connections Cluster total connections of nodes mongodb-exporter   Cursor Total  meter_mongodb_cluster_cursor_avg Total Opened cursor of nodes mongodb-exporter   Replication Lag (ms) ms meter_mongodb_cluster_repl_lag Repl set member avg replication lag, this metric works in repl mode mongodb-exporter   DB Total Data Size (GB) GB meter_mongodb_cluster_db_data_size Total data size of every database mongodb-exporter   DB Total Index Size (GB) GB meter_mongodb_cluster_db_index_size Total index size per of every database mongodb-exporter   DB Total Collection Count  meter_mongodb_cluster_db_collection_count Total collection count of every database mongodb-exporter   DB Total Index Count  meter_mongodb_cluster_db_index_count Total index count of every database mongodb-exporter    MongoDB Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Uptime (day) day meter_mongodb_node_uptime Uptime of the node mongodb-exporter   QPS  meter_mongodb_node_qps Operations per second of the node mongodb-exporter   Latency µs meter_mongodb_node_op_ratemeter_mongodb_node_latency_rate Latency of operations mongodb-exporter   Memory Usage % meter_mongodb_node_memory_usage Memory usage percent of RAM mongodb-exporter   Version  meter_mongodb_node_version MongoDB edition and version mongodb-exporter   ReplSet State  meter_mongodb_node_rs_state Repl set state of the node, this metric works in repl mode mongodb-exporter   CPU Usage (%) % meter_mongodb_node_cpu_total_percentage Cpu usage percent of the node mongodb-exporter   Network (KB/s) KB/s meter_mongodb_node_network_bytes_inmeter_mongodb_node_network_bytes_out Inbound and outbound network bytes of node mongodb-exporter   Memory Free (GB) GB meter_mongodb_node_memory_free_kbmeter_mongodb_node_swap_memory_free_kb Free memory of RAM and swap mongodb-exporter   Disk (GB) GB meter_mongodb_node_fs_used_sizemeter_mongodb_node_fs_total_size Used and total size of disk mongodb-exporter   Connections  meter_mongodb_node_connections Connection nums of node mongodb-exporter   Active Client  meter_mongodb_node_active_total_nummeter_mongodb_node_active_reader_nummeter_mongodb_node_active_writer_num Count of active reader and writer mongodb-exporter   Transactions  meter_mongodb_node_transactions_activemeter_mongodb_node_transactions_inactive Count of transactions running on the node mongodb-exporter   Document QPS  meter_mongodb_node_document_qps Document operations per second mongodb-exporter   Operation QPS  meter_mongodb_node_operation_qps Operations per second mongodb-exporter   Repl Operation QPS  meter_mongodb_node_repl_operation_qps Repl operations per second mongodb-exporter   Operation Latency (µs) µs meter_mongodb_node_op_ratemeter_mongodb_node_latency_rate Latencies for different operation type mongodb-exporter   Cursor  meter_mongodb_node_cursor Opened cursor of the node mongodb-exporter   Server Status Memory (MB) MB meter_mongodb_node_mem_virtualmeter_mongodb_node_mem_resident Virtual and resident memory of the node mongodb-exporter   Asserts  meter_mongodb_node_asserts The rate of raised assertions mongodb-exporter   Repl Buffer Count  meter_mongodb_node_repl_buffer_count The current number of operations in the oplog buffer mongodb-exporter   Repl Buffer Size (MB) MB meter_mongodb_node_repl_buffer_sizemeter_mongodb_node_repl_buffer_size_max The maximum size of the oplog buffer mongodb-exporter   Queued Operation  meter_mongodb_node_queued_operation The number of operations queued because of a lock mongodb-exporter   getLastError Write Num  meter_mongodb_node_write_wait_nummeter_mongodb_node_write_wait_timeout_num The number of write concern operation mongodb-exporter   getLastError Write Time (ms) ms meter_mongodb_node_write_wait_time The wait time of write concern operation mongodb-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/mongodb/mongodb-cluster.yaml, /config/otel-rules/mongodb/mongodb-node.yaml. The MongoDB dashboard panel configurations are found in /config/ui-initialized-templates/mongodb.\n","title":"MongoDB monitoring","url":"/docs/main/next/en/setup/backend/backend-mongodb-monitoring/"},{"content":"MongoDB monitoring SkyWalking leverages mongodb-exporter for collecting metrics data from MongoDB. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  The mongodb-exporter collects metrics data from MongoDB. The exporter works side by side with the MongoDB node. OpenTelemetry Collector fetches metrics from mongodb-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup mongodb-exporter. Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  MongoDB Monitoring MongoDB monitoring provides multidimensional metrics monitoring of MongoDB clusters as Layer: MONGODB Service in the OAP. In each cluster, the nodes are represented as Instance.\nMongoDB Cluster Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Uptime (day) day meter_mongodb_cluster_uptime Maximum uptime of nodes in the cluster mongodb-exporter   Data Size (GB) GB meter_mongodb_cluster_data_size Total data size of the cluster mongodb-exporter   Collection Count  meter_mongodb_cluster_collection_count Number of collection of the cluster mongodb-exporter   Object Count  meter_mongodb_cluster_object_count Number of object of the cluster mongodb-exporter   Document Avg QPS  meter_mongodb_cluster_document_avg_qps Avg document operations rate of nodes mongodb-exporter   Operation Avg QPS  meter_mongodb_cluster_operation_avg_qps Avg operations rate of nodes mongodb-exporter   Total Connections  meter_mongodb_cluster_connections Cluster total connections of nodes mongodb-exporter   Cursor Avg  meter_mongodb_cluster_cursor_avg Avg Opened cursor of nodes mongodb-exporter   Replication Lag (ms) ms meter_mongodb_cluster_repl_lag Repl set member avg replication lag, this metric works in repl mode mongodb-exporter   DB Avg Data Size Per Shard (GB) GB meter_mongodb_cluster_db_data_size Avg data size per shard (replSet) of every database mongodb-exporter   DB Avg Index Size Per Shard (GB) GB meter_mongodb_cluster_db_index_size Avg index size per shard (replSet) of every database mongodb-exporter   DB Avg Collection Count Per Shard  meter_mongodb_cluster_db_collection_count Avg collection count per shard (replSet) of every database mongodb-exporter   DB Avg Index Count Per Shard  meter_mongodb_cluster_db_index_count Avg index count per shard (replSet) of every database mongodb-exporter    MongoDB Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Uptime (day) day meter_mongodb_node_uptime Uptime of the node mongodb-exporter   QPS  meter_mongodb_node_qps Operations per second of the node mongodb-exporter   Latency µs meter_mongodb_node_latency Latency of operations mongodb-exporter   Memory Usage % meter_mongodb_node_memory_usage Memory usage percent of RAM mongodb-exporter   Version  meter_mongodb_node_version MongoDB edition and version mongodb-exporter   ReplSet State  meter_mongodb_node_rs_state Repl set state of the node, this metric works in repl mode mongodb-exporter   CPU Usage (%) % meter_mongodb_node_cpu_total_percentage Cpu usage percent of the node mongodb-exporter   Network (KB/s) KB/s meter_mongodb_node_network_bytes_inmeter_mongodb_node_network_bytes_out Inbound and outbound network bytes of node mongodb-exporter   Memory Free (GB) GB meter_mongodb_node_memory_free_kbmeter_mongodb_node_swap_memory_free_kb Free memory of RAM and swap mongodb-exporter   Disk (GB) GB meter_mongodb_node_fs_used_sizemeter_mongodb_node_fs_total_size Used and total size of disk mongodb-exporter   Connections  meter_mongodb_node_connections Connection nums of node mongodb-exporter   Active Client  meter_mongodb_node_active_total_nummeter_mongodb_node_active_reader_nummeter_mongodb_node_active_writer_num Count of active reader and writer mongodb-exporter   Transactions  meter_mongodb_node_transactions_activemeter_mongodb_node_transactions_inactive Count of transactions running on the node mongodb-exporter   Document QPS  meter_mongodb_node_document_qps Document operations per second mongodb-exporter   Operation QPS  meter_mongodb_node_operation_qps Operations per second mongodb-exporter   Repl Operation QPS  meter_mongodb_node_repl_operation_qps Repl operations per second mongodb-exporter   Operation Latency (µs) µs meter_mongodb_node_operation_latency Latencies for different operation type mongodb-exporter   Cursor  meter_mongodb_node_cursor Opened cursor of the node mongodb-exporter   Server Status Memory (MB) MB meter_mongodb_node_mem_virtualmeter_mongodb_node_mem_resident Virtual and resident memory of the node mongodb-exporter   Asserts  meter_mongodb_node_asserts The rate of raised assertions mongodb-exporter   Repl Buffer Count  meter_mongodb_node_repl_buffer_count The current number of operations in the oplog buffer mongodb-exporter   Repl Buffer Size (MB) MB meter_mongodb_node_repl_buffer_sizemeter_mongodb_node_repl_buffer_size_max The maximum size of the oplog buffer mongodb-exporter   Queued Operation  meter_mongodb_node_queued_operation The number of operations queued because of a lock mongodb-exporter   getLastError Write Num  meter_mongodb_node_write_wait_nummeter_mongodb_node_write_wait_timeout_num The number of write concern operation mongodb-exporter   getLastError Write Time (ms) ms meter_mongodb_node_write_wait_time The wait time of write concern operation mongodb-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/mongodb/mongodb-cluster.yaml, /config/otel-rules/mongodb/mongodb-node.yaml. The MongoDB dashboard panel configurations are found in /config/ui-initialized-templates/mongodb.\n","title":"MongoDB monitoring","url":"/docs/main/v9.6.0/en/setup/backend/backend-mongodb-monitoring/"},{"content":"MongoDB monitoring SkyWalking leverages mongodb-exporter for collecting metrics data from MongoDB. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  The mongodb-exporter collects metrics data from MongoDB. The exporter works side by side with the MongoDB node. OpenTelemetry Collector fetches metrics from mongodb-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup mongodb-exporter. Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  MongoDB Monitoring MongoDB monitoring provides multidimensional metrics monitoring of MongoDB clusters as Layer: MONGODB Service in the OAP. In each cluster, the nodes are represented as Instance.\nMongoDB Cluster Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Uptime (day) day meter_mongodb_cluster_uptime Maximum uptime of nodes in the cluster mongodb-exporter   Data Size (GB) GB meter_mongodb_cluster_data_size Total data size of the cluster mongodb-exporter   Collection Count  meter_mongodb_cluster_collection_count Number of collection of the cluster mongodb-exporter   Object Count  meter_mongodb_cluster_object_count Number of object of the cluster mongodb-exporter   Document Avg QPS  meter_mongodb_cluster_document_avg_qps Avg document operations rate of nodes mongodb-exporter   Operation Avg QPS  meter_mongodb_cluster_operation_avg_qps Avg operations rate of nodes mongodb-exporter   Total Connections  meter_mongodb_cluster_connections Cluster total connections of nodes mongodb-exporter   Cursor Avg  meter_mongodb_cluster_cursor_avg Avg Opened cursor of nodes mongodb-exporter   Replication Lag (ms) ms meter_mongodb_cluster_repl_lag Repl set member avg replication lag, this metric works in repl mode mongodb-exporter   DB Avg Data Size Per Shard (GB) GB meter_mongodb_cluster_db_data_size Avg data size per shard (replSet) of every database mongodb-exporter   DB Avg Index Size Per Shard (GB) GB meter_mongodb_cluster_db_index_size Avg index size per shard (replSet) of every database mongodb-exporter   DB Avg Collection Count Per Shard  meter_mongodb_cluster_db_collection_count Avg collection count per shard (replSet) of every database mongodb-exporter   DB Avg Index Count Per Shard  meter_mongodb_cluster_db_index_count Avg index count per shard (replSet) of every database mongodb-exporter    MongoDB Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Uptime (day) day meter_mongodb_node_uptime Uptime of the node mongodb-exporter   QPS  meter_mongodb_node_qps Operations per second of the node mongodb-exporter   Latency µs meter_mongodb_node_latency Latency of operations mongodb-exporter   Memory Usage % meter_mongodb_node_memory_usage Memory usage percent of RAM mongodb-exporter   Version  meter_mongodb_node_version MongoDB edition and version mongodb-exporter   ReplSet State  meter_mongodb_node_rs_state Repl set state of the node, this metric works in repl mode mongodb-exporter   CPU Usage (%) % meter_mongodb_node_cpu_total_percentage Cpu usage percent of the node mongodb-exporter   Network (KB/s) KB/s meter_mongodb_node_network_bytes_inmeter_mongodb_node_network_bytes_out Inbound and outbound network bytes of node mongodb-exporter   Memory Free (GB) GB meter_mongodb_node_memory_free_kbmeter_mongodb_node_swap_memory_free_kb Free memory of RAM and swap mongodb-exporter   Disk (GB) GB meter_mongodb_node_fs_used_sizemeter_mongodb_node_fs_total_size Used and total size of disk mongodb-exporter   Connections  meter_mongodb_node_connections Connection nums of node mongodb-exporter   Active Client  meter_mongodb_node_active_total_nummeter_mongodb_node_active_reader_nummeter_mongodb_node_active_writer_num Count of active reader and writer mongodb-exporter   Transactions  meter_mongodb_node_transactions_activemeter_mongodb_node_transactions_inactive Count of transactions running on the node mongodb-exporter   Document QPS  meter_mongodb_node_document_qps Document operations per second mongodb-exporter   Operation QPS  meter_mongodb_node_operation_qps Operations per second mongodb-exporter   Repl Operation QPS  meter_mongodb_node_repl_operation_qps Repl operations per second mongodb-exporter   Operation Latency (µs) µs meter_mongodb_node_operation_latency Latencies for different operation type mongodb-exporter   Cursor  meter_mongodb_node_cursor Opened cursor of the node mongodb-exporter   Server Status Memory (MB) MB meter_mongodb_node_mem_virtualmeter_mongodb_node_mem_resident Virtual and resident memory of the node mongodb-exporter   Asserts  meter_mongodb_node_asserts The rate of raised assertions mongodb-exporter   Repl Buffer Count  meter_mongodb_node_repl_buffer_count The current number of operations in the oplog buffer mongodb-exporter   Repl Buffer Size (MB) MB meter_mongodb_node_repl_buffer_sizemeter_mongodb_node_repl_buffer_size_max The maximum size of the oplog buffer mongodb-exporter   Queued Operation  meter_mongodb_node_queued_operation The number of operations queued because of a lock mongodb-exporter   getLastError Write Num  meter_mongodb_node_write_wait_nummeter_mongodb_node_write_wait_timeout_num The number of write concern operation mongodb-exporter   getLastError Write Time (ms) ms meter_mongodb_node_write_wait_time The wait time of write concern operation mongodb-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/mongodb/mongodb-cluster.yaml, /config/otel-rules/mongodb/mongodb-node.yaml. The MongoDB dashboard panel configurations are found in /config/ui-initialized-templates/mongodb.\n","title":"MongoDB monitoring","url":"/docs/main/v9.7.0/en/setup/backend/backend-mongodb-monitoring/"},{"content":"MySQL Activate MySQL as storage, and set storage provider to mysql.\nNOTE: MySQL driver is NOT allowed in Apache official distribution and source codes. Please download the MySQL driver on your own. Copy the connection driver jar to oap-libs.\nstorage:selector:${SW_STORAGE:mysql}mysql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:3306/swtest?rewriteBatchedStatements=true\u0026amp;allowMultiQueries=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root@1234}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password, are found in application.yml. Only part of the settings is listed here. See the HikariCP connection pool document for full settings. To understand the function of the parameter rewriteBatchedStatements=true in MySQL, see the MySQL official document for more details.\nIn theory, all other databases that are compatible with MySQL protocol should be able to use this storage plugin, such as TiDB. Please compose the JDBC URL according to the database\u0026rsquo;s documentation.\n","title":"MySQL","url":"/docs/main/latest/en/setup/backend/storages/mysql/"},{"content":"MySQL Activate MySQL as storage, and set storage provider to mysql.\nNOTE: MySQL driver is NOT allowed in Apache official distribution and source codes. Please download the MySQL driver on your own. Copy the connection driver jar to oap-libs.\nstorage:selector:${SW_STORAGE:mysql}mysql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:3306/swtest?rewriteBatchedStatements=true\u0026amp;allowMultiQueries=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root@1234}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password, are found in application.yml. Only part of the settings is listed here. See the HikariCP connection pool document for full settings. To understand the function of the parameter rewriteBatchedStatements=true in MySQL, see the MySQL official document for more details.\nIn theory, all other databases that are compatible with MySQL protocol should be able to use this storage plugin, such as TiDB. Please compose the JDBC URL according to the database\u0026rsquo;s documentation.\n","title":"MySQL","url":"/docs/main/next/en/setup/backend/storages/mysql/"},{"content":"MySQL Activate MySQL as storage, and set storage provider to mysql.\nNOTE: MySQL driver is NOT allowed in Apache official distribution and source codes. Please download the MySQL driver on your own. Copy the connection driver jar to oap-libs.\nstorage:selector:${SW_STORAGE:mysql}mysql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:3306/swtest?rewriteBatchedStatements=true\u0026amp;allowMultiQueries=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root@1234}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password, are found in application.yml. Only part of the settings is listed here. See the HikariCP connection pool document for full settings. To understand the function of the parameter rewriteBatchedStatements=true in MySQL, see the MySQL official document for more details.\nIn theory, all other databases that are compatible with MySQL protocol should be able to use this storage plugin, such as TiDB. Please compose the JDBC URL according to the database\u0026rsquo;s documentation.\n","title":"MySQL","url":"/docs/main/v9.7.0/en/setup/backend/storages/mysql/"},{"content":"MySQL monitoring SkyWalking leverages prometheus/mysqld_exporter for collecting metrics data from MySQL. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  mysqld_exporter collect metrics data from MySQL. OpenTelemetry Collector fetches metrics from mysqld_exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via the OpenCensus gRPC Exporter or OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up mysqld_exporter. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  MySQL Monitoring MySQL monitoring provides monitoring of the status and resources of the MySQL server. MySQL server as a Service in OAP, and land on the Layer: MYSQL.\nMySQL Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     MySQL Uptime day meter_mysql_uptime The MySQL startup time mysqld_exporter   Max Connections  meter_mysql_max_connections The max number of connections. mysqld_exporter   Innodb Buffer Pool Size MB meter_mysql_innodb_buffer_pool_size The buffer pool size in Innodb engine mysqld_exporter   Thread Cache Size  meter_mysql_thread_cache_size The size of thread cache mysqld_exporter   Current QPS  meter_mysql_qps Queries Per Second mysqld_exporter   Current TPS  meter_mysql_tps Transactions Per Second mysqld_exporter   Commands Rate  meter_mysql_commands_insert_rate meter_mysql_commands_select_rate\nmeter_mysql_commands_delete_rate\nmeter_mysql_commands_update_rate The rate of total number of insert/select/delete/update executed by the current server mysqld_exporter   Threads  meter_mysql_threads_connected\nmeter_mysql_threads_created\nmeter_mysql_threads_cached\nmeter_mysql_threads_running The number of currently open connections(threads_connected)  The number of threads created(threads_created)  The number of threads in the thread cache(threads_cached)  The number of threads that are not sleeping(threads_running) mysqld_exporter   Connects  meter_mysql_connects_available\nmeter_mysql_connects_aborted The number of available connections(connects_available)The number of MySQL instance connection rejections(connects_aborted) mysqld_exporter   Connection Errors  meter_mysql_connection_errors_internal  meter_mysql_connection_errors_max_connections Errors due to exceeding the max_connections(connection_errors_max_connections) Error caused by internal system(connection_errors_internal) mysqld_exporter   Slow Queries Rate  meter_mysql_slow_queries_rate The rate of slow queries mysqld_exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/mysql.yaml. The MySQL dashboard panel configurations are found in /config/ui-initialized-templates/mysql.\n","title":"MySQL monitoring","url":"/docs/main/v9.2.0/en/setup/backend/backend-mysql-monitoring/"},{"content":"MySQL monitoring MySQL server performance from prometheus/mysqld_exporter SkyWalking leverages prometheus/mysqld_exporter for collecting metrics data. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  mysqld_exporter collect metrics data from MySQL. OpenTelemetry Collector fetches metrics from mysqld_exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via the OpenCensus gRPC Exporter or OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up mysqld_exporter. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  MySQL Monitoring MySQL monitoring provides monitoring of the status and resources of the MySQL server. MySQL cluster is cataloged as a Layer: MYSQL Service in OAP. Each MySQL server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     MySQL Uptime day meter_mysql_uptime The MySQL startup time mysqld_exporter   Max Connections  meter_mysql_max_connections The max number of connections. mysqld_exporter   Innodb Buffer Pool Size MB meter_mysql_innodb_buffer_pool_size The buffer pool size in Innodb engine mysqld_exporter   Thread Cache Size  meter_mysql_thread_cache_size The size of thread cache mysqld_exporter   Current QPS  meter_mysql_qps Queries Per Second mysqld_exporter   Current TPS  meter_mysql_tps Transactions Per Second mysqld_exporter   Commands Rate  meter_mysql_commands_insert_rate meter_mysql_commands_select_rate\nmeter_mysql_commands_delete_rate\nmeter_mysql_commands_update_rate The rate of total number of insert/select/delete/update executed by the current server mysqld_exporter   Threads  meter_mysql_threads_connected\nmeter_mysql_threads_created\nmeter_mysql_threads_cached\nmeter_mysql_threads_running The number of currently open connections(threads_connected)  The number of threads created(threads_created)  The number of threads in the thread cache(threads_cached)  The number of threads that are not sleeping(threads_running) mysqld_exporter   Connects  meter_mysql_connects_available\nmeter_mysql_connects_aborted The number of available connections(connects_available)The number of MySQL instance connection rejections(connects_aborted) mysqld_exporter   Connection Errors  meter_mysql_connection_errors_internal  meter_mysql_connection_errors_max_connections Errors due to exceeding the max_connections(connection_errors_max_connections) Error caused by internal system(connection_errors_internal) mysqld_exporter   Slow Queries Rate  meter_mysql_slow_queries_rate The rate of slow queries mysqld_exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/mysql.yaml. The MySQL dashboard panel configurations are found in /config/ui-initialized-templates/mysql.\nCollect sampled slow SQLs SkyWalking leverages fluentbit or other log agents for collecting slow SQL statements from MySQL.\nData flow  fluentbit agent collects slow sql logs from MySQL. fluentbit agent sends data to SkyWalking OAP Server using native meter APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Set up fluentbit. Config fluentbit Config MySQL to enable slow log.example.  Slow SQL Monitoring Slow SQL monitoring provides monitoring of the slow SQL statements of the MySQL server. MySQL server is cataloged as a Layer: MYSQL Service in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Slow Statements ms top_n_database_statement The latency and statement of MySQL slow SQLs fluentbit    Customizations You can customize your own metrics/expression/dashboard panel. The slowsql expression rules are found in /config/lal/mysql-slowsql.yaml The MySQL dashboard panel configurations are found in /config/ui-initialized-templates/mysql.\n","title":"MySQL monitoring","url":"/docs/main/v9.3.0/en/setup/backend/backend-mysql-monitoring/"},{"content":"MySQL/MariaDB monitoring MySQL/MariaDB server performance from prometheus/mysqld_exporter SkyWalking leverages prometheus/mysqld_exporter for collecting metrics data. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  mysqld_exporter collect metrics data from MySQL/MariaDB. OpenTelemetry Collector fetches metrics from mysqld_exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up mysqld_exporter. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  MySQL/MariaDB Monitoring MySQL/MariaDB monitoring provides monitoring of the status and resources of the MySQL/MariaDB server. MySQL/MariaDB cluster is cataloged as a Layer: MYSQL Service in OAP. Each MySQL/MariaDB server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     MySQL Uptime day meter_mysql_uptime The MySQL startup time mysqld_exporter   Max Connections  meter_mysql_max_connections The max number of connections. mysqld_exporter   Innodb Buffer Pool Size MB meter_mysql_innodb_buffer_pool_size The buffer pool size in Innodb engine mysqld_exporter   Thread Cache Size  meter_mysql_thread_cache_size The size of thread cache mysqld_exporter   Current QPS  meter_mysql_qps Queries Per Second mysqld_exporter   Current TPS  meter_mysql_tps Transactions Per Second mysqld_exporter   Commands Rate  meter_mysql_commands_insert_rate meter_mysql_commands_select_rate\nmeter_mysql_commands_delete_rate\nmeter_mysql_commands_update_rate The rate of total number of insert/select/delete/update executed by the current server mysqld_exporter   Threads  meter_mysql_threads_connected\nmeter_mysql_threads_created\nmeter_mysql_threads_cached\nmeter_mysql_threads_running The number of currently open connections(threads_connected)  The number of threads created(threads_created)  The number of threads in the thread cache(threads_cached)  The number of threads that are not sleeping(threads_running) mysqld_exporter   Connects  meter_mysql_connects_available\nmeter_mysql_connects_aborted The number of available connections(connects_available)The number of MySQL instance connection rejections(connects_aborted) mysqld_exporter   Connection Errors  meter_mysql_connection_errors_internal  meter_mysql_connection_errors_max_connections Errors due to exceeding the max_connections(connection_errors_max_connections) Error caused by internal system(connection_errors_internal) mysqld_exporter   Slow Queries Rate  meter_mysql_slow_queries_rate The rate of slow queries mysqld_exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/mysql. The MySQL dashboard panel configurations are found in /config/ui-initialized-templates/mysql.\nCollect sampled slow SQLs SkyWalking leverages fluentbit or other log agents for collecting slow SQL statements from MySQL/MariaDB.\nData flow  fluentbit agent collects slow sql logs from MySQL/MariaDB. fluentbit agent sends data to SkyWalking OAP Server using native meter APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Set up fluentbit. Config fluentbit from here for MySQL or here for MariaDB. Enable slow log from here for MySQL or here for MariaDB.  Slow SQL Monitoring Slow SQL monitoring provides monitoring of the slow SQL statements of the MySQL/MariaDB server. MySQL/MariaDB server is cataloged as a Layer: MYSQL Service in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Slow Statements ms top_n_database_statement The latency and statement of MySQL/MariaDB slow SQLs fluentbit    Customizations You can customize your own metrics/expression/dashboard panel. The slowsql expression rules are found in /config/lal/mysql-slowsql.yaml The MySQL/MariaDB dashboard panel configurations are found in /config/ui-initialized-templates/mysql.\n","title":"MySQL/MariaDB monitoring","url":"/docs/main/latest/en/setup/backend/backend-mysql-monitoring/"},{"content":"MySQL/MariaDB monitoring MySQL/MariaDB server performance from prometheus/mysqld_exporter SkyWalking leverages prometheus/mysqld_exporter for collecting metrics data. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  mysqld_exporter collect metrics data from MySQL/MariaDB. OpenTelemetry Collector fetches metrics from mysqld_exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up mysqld_exporter. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  MySQL/MariaDB Monitoring MySQL/MariaDB monitoring provides monitoring of the status and resources of the MySQL/MariaDB server. MySQL/MariaDB cluster is cataloged as a Layer: MYSQL Service in OAP. Each MySQL/MariaDB server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     MySQL Uptime day meter_mysql_uptime The MySQL startup time mysqld_exporter   Max Connections  meter_mysql_max_connections The max number of connections. mysqld_exporter   Innodb Buffer Pool Size MB meter_mysql_innodb_buffer_pool_size The buffer pool size in Innodb engine mysqld_exporter   Thread Cache Size  meter_mysql_thread_cache_size The size of thread cache mysqld_exporter   Current QPS  meter_mysql_qps Queries Per Second mysqld_exporter   Current TPS  meter_mysql_tps Transactions Per Second mysqld_exporter   Commands Rate  meter_mysql_commands_insert_rate meter_mysql_commands_select_rate\nmeter_mysql_commands_delete_rate\nmeter_mysql_commands_update_rate The rate of total number of insert/select/delete/update executed by the current server mysqld_exporter   Threads  meter_mysql_threads_connected\nmeter_mysql_threads_created\nmeter_mysql_threads_cached\nmeter_mysql_threads_running The number of currently open connections(threads_connected)  The number of threads created(threads_created)  The number of threads in the thread cache(threads_cached)  The number of threads that are not sleeping(threads_running) mysqld_exporter   Connects  meter_mysql_max_connections\nmeter_mysql_status_thread_connected\nmeter_mysql_connects_aborted The number of available connections(connects_available)The number of MySQL instance connection rejections(connects_aborted) mysqld_exporter   Connection Errors  meter_mysql_connection_errors_internal  meter_mysql_connection_errors_max_connections Errors due to exceeding the max_connections(connection_errors_max_connections) Error caused by internal system(connection_errors_internal) mysqld_exporter   Slow Queries Rate  meter_mysql_slow_queries_rate The rate of slow queries mysqld_exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/mysql. The MySQL dashboard panel configurations are found in /config/ui-initialized-templates/mysql.\nCollect sampled slow SQLs SkyWalking leverages fluentbit or other log agents for collecting slow SQL statements from MySQL/MariaDB.\nData flow  fluentbit agent collects slow sql logs from MySQL/MariaDB. fluentbit agent sends data to SkyWalking OAP Server using native meter APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Set up fluentbit. Config fluentbit from here for MySQL or here for MariaDB. Enable slow log from here for MySQL or here for MariaDB.  Slow SQL Monitoring Slow SQL monitoring provides monitoring of the slow SQL statements of the MySQL/MariaDB server. MySQL/MariaDB server is cataloged as a Layer: MYSQL Service in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Slow Statements ms top_n_database_statement The latency and statement of MySQL/MariaDB slow SQLs fluentbit    Customizations You can customize your own metrics/expression/dashboard panel. The slowsql expression rules are found in /config/lal/mysql-slowsql.yaml The MySQL/MariaDB dashboard panel configurations are found in /config/ui-initialized-templates/mysql.\n","title":"MySQL/MariaDB monitoring","url":"/docs/main/next/en/setup/backend/backend-mysql-monitoring/"},{"content":"MySQL/MariaDB monitoring MySQL/MariaDB server performance from prometheus/mysqld_exporter SkyWalking leverages prometheus/mysqld_exporter for collecting metrics data. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  mysqld_exporter collect metrics data from MySQL/MariaDB. OpenTelemetry Collector fetches metrics from mysqld_exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via the OpenCensus gRPC Exporter or OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up mysqld_exporter. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  MySQL/MariaDB Monitoring MySQL/MariaDB monitoring provides monitoring of the status and resources of the MySQL/MariaDB server. MySQL/MariaDB cluster is cataloged as a Layer: MYSQL Service in OAP. Each MySQL/MariaDB server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     MySQL Uptime day meter_mysql_uptime The MySQL startup time mysqld_exporter   Max Connections  meter_mysql_max_connections The max number of connections. mysqld_exporter   Innodb Buffer Pool Size MB meter_mysql_innodb_buffer_pool_size The buffer pool size in Innodb engine mysqld_exporter   Thread Cache Size  meter_mysql_thread_cache_size The size of thread cache mysqld_exporter   Current QPS  meter_mysql_qps Queries Per Second mysqld_exporter   Current TPS  meter_mysql_tps Transactions Per Second mysqld_exporter   Commands Rate  meter_mysql_commands_insert_rate meter_mysql_commands_select_rate\nmeter_mysql_commands_delete_rate\nmeter_mysql_commands_update_rate The rate of total number of insert/select/delete/update executed by the current server mysqld_exporter   Threads  meter_mysql_threads_connected\nmeter_mysql_threads_created\nmeter_mysql_threads_cached\nmeter_mysql_threads_running The number of currently open connections(threads_connected)  The number of threads created(threads_created)  The number of threads in the thread cache(threads_cached)  The number of threads that are not sleeping(threads_running) mysqld_exporter   Connects  meter_mysql_connects_available\nmeter_mysql_connects_aborted The number of available connections(connects_available)The number of MySQL instance connection rejections(connects_aborted) mysqld_exporter   Connection Errors  meter_mysql_connection_errors_internal  meter_mysql_connection_errors_max_connections Errors due to exceeding the max_connections(connection_errors_max_connections) Error caused by internal system(connection_errors_internal) mysqld_exporter   Slow Queries Rate  meter_mysql_slow_queries_rate The rate of slow queries mysqld_exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/mysql. The MySQL dashboard panel configurations are found in /config/ui-initialized-templates/mysql.\nCollect sampled slow SQLs SkyWalking leverages fluentbit or other log agents for collecting slow SQL statements from MySQL/MariaDB.\nData flow  fluentbit agent collects slow sql logs from MySQL/MariaDB. fluentbit agent sends data to SkyWalking OAP Server using native meter APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Set up fluentbit. Config fluentbit from here for MySQL or here for MariaDB. Enable slow log from here for MySQL or here for MariaDB.  Slow SQL Monitoring Slow SQL monitoring provides monitoring of the slow SQL statements of the MySQL/MariaDB server. MySQL/MariaDB server is cataloged as a Layer: MYSQL Service in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Slow Statements ms top_n_database_statement The latency and statement of MySQL/MariaDB slow SQLs fluentbit    Customizations You can customize your own metrics/expression/dashboard panel. The slowsql expression rules are found in /config/lal/mysql-slowsql.yaml The MySQL/MariaDB dashboard panel configurations are found in /config/ui-initialized-templates/mysql.\n","title":"MySQL/MariaDB monitoring","url":"/docs/main/v9.4.0/en/setup/backend/backend-mysql-monitoring/"},{"content":"MySQL/MariaDB monitoring MySQL/MariaDB server performance from prometheus/mysqld_exporter SkyWalking leverages prometheus/mysqld_exporter for collecting metrics data. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  mysqld_exporter collect metrics data from MySQL/MariaDB. OpenTelemetry Collector fetches metrics from mysqld_exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up mysqld_exporter. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  MySQL/MariaDB Monitoring MySQL/MariaDB monitoring provides monitoring of the status and resources of the MySQL/MariaDB server. MySQL/MariaDB cluster is cataloged as a Layer: MYSQL Service in OAP. Each MySQL/MariaDB server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     MySQL Uptime day meter_mysql_uptime The MySQL startup time mysqld_exporter   Max Connections  meter_mysql_max_connections The max number of connections. mysqld_exporter   Innodb Buffer Pool Size MB meter_mysql_innodb_buffer_pool_size The buffer pool size in Innodb engine mysqld_exporter   Thread Cache Size  meter_mysql_thread_cache_size The size of thread cache mysqld_exporter   Current QPS  meter_mysql_qps Queries Per Second mysqld_exporter   Current TPS  meter_mysql_tps Transactions Per Second mysqld_exporter   Commands Rate  meter_mysql_commands_insert_rate meter_mysql_commands_select_rate\nmeter_mysql_commands_delete_rate\nmeter_mysql_commands_update_rate The rate of total number of insert/select/delete/update executed by the current server mysqld_exporter   Threads  meter_mysql_threads_connected\nmeter_mysql_threads_created\nmeter_mysql_threads_cached\nmeter_mysql_threads_running The number of currently open connections(threads_connected)  The number of threads created(threads_created)  The number of threads in the thread cache(threads_cached)  The number of threads that are not sleeping(threads_running) mysqld_exporter   Connects  meter_mysql_connects_available\nmeter_mysql_connects_aborted The number of available connections(connects_available)The number of MySQL instance connection rejections(connects_aborted) mysqld_exporter   Connection Errors  meter_mysql_connection_errors_internal  meter_mysql_connection_errors_max_connections Errors due to exceeding the max_connections(connection_errors_max_connections) Error caused by internal system(connection_errors_internal) mysqld_exporter   Slow Queries Rate  meter_mysql_slow_queries_rate The rate of slow queries mysqld_exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/mysql. The MySQL dashboard panel configurations are found in /config/ui-initialized-templates/mysql.\nCollect sampled slow SQLs SkyWalking leverages fluentbit or other log agents for collecting slow SQL statements from MySQL/MariaDB.\nData flow  fluentbit agent collects slow sql logs from MySQL/MariaDB. fluentbit agent sends data to SkyWalking OAP Server using native meter APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Set up fluentbit. Config fluentbit from here for MySQL or here for MariaDB. Enable slow log from here for MySQL or here for MariaDB.  Slow SQL Monitoring Slow SQL monitoring provides monitoring of the slow SQL statements of the MySQL/MariaDB server. MySQL/MariaDB server is cataloged as a Layer: MYSQL Service in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Slow Statements ms top_n_database_statement The latency and statement of MySQL/MariaDB slow SQLs fluentbit    Customizations You can customize your own metrics/expression/dashboard panel. The slowsql expression rules are found in /config/lal/mysql-slowsql.yaml The MySQL/MariaDB dashboard panel configurations are found in /config/ui-initialized-templates/mysql.\n","title":"MySQL/MariaDB monitoring","url":"/docs/main/v9.5.0/en/setup/backend/backend-mysql-monitoring/"},{"content":"MySQL/MariaDB monitoring MySQL/MariaDB server performance from prometheus/mysqld_exporter SkyWalking leverages prometheus/mysqld_exporter for collecting metrics data. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  mysqld_exporter collect metrics data from MySQL/MariaDB. OpenTelemetry Collector fetches metrics from mysqld_exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up mysqld_exporter. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  MySQL/MariaDB Monitoring MySQL/MariaDB monitoring provides monitoring of the status and resources of the MySQL/MariaDB server. MySQL/MariaDB cluster is cataloged as a Layer: MYSQL Service in OAP. Each MySQL/MariaDB server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     MySQL Uptime day meter_mysql_uptime The MySQL startup time mysqld_exporter   Max Connections  meter_mysql_max_connections The max number of connections. mysqld_exporter   Innodb Buffer Pool Size MB meter_mysql_innodb_buffer_pool_size The buffer pool size in Innodb engine mysqld_exporter   Thread Cache Size  meter_mysql_thread_cache_size The size of thread cache mysqld_exporter   Current QPS  meter_mysql_qps Queries Per Second mysqld_exporter   Current TPS  meter_mysql_tps Transactions Per Second mysqld_exporter   Commands Rate  meter_mysql_commands_insert_rate meter_mysql_commands_select_rate\nmeter_mysql_commands_delete_rate\nmeter_mysql_commands_update_rate The rate of total number of insert/select/delete/update executed by the current server mysqld_exporter   Threads  meter_mysql_threads_connected\nmeter_mysql_threads_created\nmeter_mysql_threads_cached\nmeter_mysql_threads_running The number of currently open connections(threads_connected)  The number of threads created(threads_created)  The number of threads in the thread cache(threads_cached)  The number of threads that are not sleeping(threads_running) mysqld_exporter   Connects  meter_mysql_connects_available\nmeter_mysql_connects_aborted The number of available connections(connects_available)The number of MySQL instance connection rejections(connects_aborted) mysqld_exporter   Connection Errors  meter_mysql_connection_errors_internal  meter_mysql_connection_errors_max_connections Errors due to exceeding the max_connections(connection_errors_max_connections) Error caused by internal system(connection_errors_internal) mysqld_exporter   Slow Queries Rate  meter_mysql_slow_queries_rate The rate of slow queries mysqld_exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/mysql. The MySQL dashboard panel configurations are found in /config/ui-initialized-templates/mysql.\nCollect sampled slow SQLs SkyWalking leverages fluentbit or other log agents for collecting slow SQL statements from MySQL/MariaDB.\nData flow  fluentbit agent collects slow sql logs from MySQL/MariaDB. fluentbit agent sends data to SkyWalking OAP Server using native meter APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Set up fluentbit. Config fluentbit from here for MySQL or here for MariaDB. Enable slow log from here for MySQL or here for MariaDB.  Slow SQL Monitoring Slow SQL monitoring provides monitoring of the slow SQL statements of the MySQL/MariaDB server. MySQL/MariaDB server is cataloged as a Layer: MYSQL Service in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Slow Statements ms top_n_database_statement The latency and statement of MySQL/MariaDB slow SQLs fluentbit    Customizations You can customize your own metrics/expression/dashboard panel. The slowsql expression rules are found in /config/lal/mysql-slowsql.yaml The MySQL/MariaDB dashboard panel configurations are found in /config/ui-initialized-templates/mysql.\n","title":"MySQL/MariaDB monitoring","url":"/docs/main/v9.6.0/en/setup/backend/backend-mysql-monitoring/"},{"content":"MySQL/MariaDB monitoring MySQL/MariaDB server performance from prometheus/mysqld_exporter SkyWalking leverages prometheus/mysqld_exporter for collecting metrics data. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  mysqld_exporter collect metrics data from MySQL/MariaDB. OpenTelemetry Collector fetches metrics from mysqld_exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up mysqld_exporter. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  MySQL/MariaDB Monitoring MySQL/MariaDB monitoring provides monitoring of the status and resources of the MySQL/MariaDB server. MySQL/MariaDB cluster is cataloged as a Layer: MYSQL Service in OAP. Each MySQL/MariaDB server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     MySQL Uptime day meter_mysql_uptime The MySQL startup time mysqld_exporter   Max Connections  meter_mysql_max_connections The max number of connections. mysqld_exporter   Innodb Buffer Pool Size MB meter_mysql_innodb_buffer_pool_size The buffer pool size in Innodb engine mysqld_exporter   Thread Cache Size  meter_mysql_thread_cache_size The size of thread cache mysqld_exporter   Current QPS  meter_mysql_qps Queries Per Second mysqld_exporter   Current TPS  meter_mysql_tps Transactions Per Second mysqld_exporter   Commands Rate  meter_mysql_commands_insert_rate meter_mysql_commands_select_rate\nmeter_mysql_commands_delete_rate\nmeter_mysql_commands_update_rate The rate of total number of insert/select/delete/update executed by the current server mysqld_exporter   Threads  meter_mysql_threads_connected\nmeter_mysql_threads_created\nmeter_mysql_threads_cached\nmeter_mysql_threads_running The number of currently open connections(threads_connected)  The number of threads created(threads_created)  The number of threads in the thread cache(threads_cached)  The number of threads that are not sleeping(threads_running) mysqld_exporter   Connects  meter_mysql_connects_available\nmeter_mysql_connects_aborted The number of available connections(connects_available)The number of MySQL instance connection rejections(connects_aborted) mysqld_exporter   Connection Errors  meter_mysql_connection_errors_internal  meter_mysql_connection_errors_max_connections Errors due to exceeding the max_connections(connection_errors_max_connections) Error caused by internal system(connection_errors_internal) mysqld_exporter   Slow Queries Rate  meter_mysql_slow_queries_rate The rate of slow queries mysqld_exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/mysql. The MySQL dashboard panel configurations are found in /config/ui-initialized-templates/mysql.\nCollect sampled slow SQLs SkyWalking leverages fluentbit or other log agents for collecting slow SQL statements from MySQL/MariaDB.\nData flow  fluentbit agent collects slow sql logs from MySQL/MariaDB. fluentbit agent sends data to SkyWalking OAP Server using native meter APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Set up fluentbit. Config fluentbit from here for MySQL or here for MariaDB. Enable slow log from here for MySQL or here for MariaDB.  Slow SQL Monitoring Slow SQL monitoring provides monitoring of the slow SQL statements of the MySQL/MariaDB server. MySQL/MariaDB server is cataloged as a Layer: MYSQL Service in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Slow Statements ms top_n_database_statement The latency and statement of MySQL/MariaDB slow SQLs fluentbit    Customizations You can customize your own metrics/expression/dashboard panel. The slowsql expression rules are found in /config/lal/mysql-slowsql.yaml The MySQL/MariaDB dashboard panel configurations are found in /config/ui-initialized-templates/mysql.\n","title":"MySQL/MariaDB monitoring","url":"/docs/main/v9.7.0/en/setup/backend/backend-mysql-monitoring/"},{"content":"New ElasticSearch storage option explanation in 9.2.0 Since v9.2.0, SkyWalking OAP provides 2 storage options for all data, including metadata, metrics, traces, logs, events, profiling data, etc.. OAP exposes a system environment variable (SW_STORAGE_ES_LOGIC_SHARDING) to control the running mode.\nNo-Sharding Mode (OAP default setting, SW_STORAGE_ES_LOGIC_SHARDING = false) This is the new mode introduced in 9.2.0. It prefers to keep data with similar properties in one index template, such as all metrics and metadata.\n OAP merges all metrics/meter and records(without super datasets, such as segments) indices into one physical index template metrics-all and records-all. The logic index name would be present in columns metric_table or record_table. If the logic column name has an alias (configured through @ElasticSearch.Column()), the alias would be the real physical column name.  The super dataset would not be affected by this, such as traces and logs.\nSharding Mode (SW_STORAGE_ES_LOGIC_SHARDING = true )  OAP shard metrics/meter indices into multi-physical indices as in the previous versions(one index template per metric/meter aggregation function). Records and metrics without configuring aggregation functions with @MetricsFunction or @MeterFunction annotation would not be merged. They would be kept in a separate index template. The shard template name would be metrics-aggregation function name or meter-aggregation function name such as metrics-count, and the logic index name would be present in column metric_table. The OAP would not use the column alias, the logic column name would be the real physical column name.   Notice: Users still could choose to adjust ElasticSearch\u0026rsquo;s shard number(SW_STORAGE_ES_INDEX_SHARDS_NUMBER) to scale out in either mode.\n","title":"New ElasticSearch storage option explanation in 9.2.0","url":"/docs/main/latest/en/faq/new-elasticsearch-storage-option-explanation-in-9.2.0/"},{"content":"New ElasticSearch storage option explanation in 9.2.0 Since v9.2.0, SkyWalking OAP provides 2 storage options for all data, including metadata, metrics, traces, logs, events, profiling data, etc.. OAP exposes a system environment variable (SW_STORAGE_ES_LOGIC_SHARDING) to control the running mode.\nNo-Sharding Mode (OAP default setting, SW_STORAGE_ES_LOGIC_SHARDING = false) This is the new mode introduced in 9.2.0. It prefers to keep data with similar properties in one index template, such as all metrics and metadata.\n OAP merges all metrics/meter and records(without super datasets, such as segments) indices into one physical index template metrics-all and records-all. The logic index name would be present in columns metric_table or record_table. If the logic column name has an alias (configured through @ElasticSearch.Column()), the alias would be the real physical column name.  The super dataset would not be affected by this, such as traces and logs.\nSharding Mode (SW_STORAGE_ES_LOGIC_SHARDING = true )  OAP shard metrics/meter indices into multi-physical indices as in the previous versions(one index template per metric/meter aggregation function). Records and metrics without configuring aggregation functions with @MetricsFunction or @MeterFunction annotation would not be merged. They would be kept in a separate index template. The shard template name would be metrics-aggregation function name or meter-aggregation function name such as metrics-count, and the logic index name would be present in column metric_table. The OAP would not use the column alias, the logic column name would be the real physical column name.   Notice: Users still could choose to adjust ElasticSearch\u0026rsquo;s shard number(SW_STORAGE_ES_INDEX_SHARDS_NUMBER) to scale out in either mode.\n","title":"New ElasticSearch storage option explanation in 9.2.0","url":"/docs/main/next/en/faq/new-elasticsearch-storage-option-explanation-in-9.2.0/"},{"content":"New ElasticSearch storage option explanation in 9.2.0 Since v9.2.0, SkyWalking OAP provides 2 storage options for all data, including metadata, metrics, traces, logs, events, profiling data, etc.. OAP exposes a system environment variable (SW_STORAGE_ES_LOGIC_SHARDING) to control the running mode.\nNo-Sharding Mode (OAP default setting, SW_STORAGE_ES_LOGIC_SHARDING = false) This is the new mode introduced in 9.2.0. It prefers to keep data with similar properties in one index template, such as all metrics and metadata.\n OAP merges all metrics/meter and records(without super datasets, such as segments) indices into one physical index template metrics-all and records-all. The logic index name would be present in columns metric_table or record_table. If the logic column name has an alias (configured through @ElasticSearch.Column()), the alias would be the real physical column name.  The super dataset would not be affected by this, such as traces and logs.\nSharding Mode (SW_STORAGE_ES_LOGIC_SHARDING = true )  OAP shard metrics/meter indices into multi-physical indices as in the previous versions(one index template per metric/meter aggregation function). Records and metrics without configuring aggregation functions with @MetricsFunction or @MeterFunction annotation would not be merged. They would be kept in a separate index template. The shard template name would be metrics-aggregation function name or meter-aggregation function name such as metrics-count, and the logic index name would be present in column metric_table. The OAP would not use the column alias, the logic column name would be the real physical column name.   Notice: Users still could choose to adjust ElasticSearch\u0026rsquo;s shard number(SW_STORAGE_ES_INDEX_SHARDS_NUMBER) to scale out in either mode.\n","title":"New ElasticSearch storage option explanation in 9.2.0","url":"/docs/main/v9.2.0/en/faq/new-elasticsearch-storage-option-explanation-in-9.2.0/"},{"content":"New ElasticSearch storage option explanation in 9.2.0 Since v9.2.0, SkyWalking OAP provides 2 storage options for all data, including metadata, metrics, traces, logs, events, profiling data, etc.. OAP exposes a system environment variable (SW_STORAGE_ES_LOGIC_SHARDING) to control the running mode.\nNo-Sharding Mode (OAP default setting, SW_STORAGE_ES_LOGIC_SHARDING = false) This is the new mode introduced in 9.2.0. It prefers to keep data with similar properties in one index template, such as all metrics and metadata.\n OAP merges all metrics/meter and records(without super datasets, such as segments) indices into one physical index template metrics-all and records-all. The logic index name would be present in columns metric_table or record_table. If the logic column name has an alias (configured through @ElasticSearch.Column()), the alias would be the real physical column name.  The super dataset would not be affected by this, such as traces and logs.\nSharding Mode (SW_STORAGE_ES_LOGIC_SHARDING = true )  OAP shard metrics/meter indices into multi-physical indices as in the previous versions(one index template per metric/meter aggregation function). Records and metrics without configuring aggregation functions with @MetricsFunction or @MeterFunction annotation would not be merged. They would be kept in a separate index template. The shard template name would be metrics-aggregation function name or meter-aggregation function name such as metrics-count, and the logic index name would be present in column metric_table. The OAP would not use the column alias, the logic column name would be the real physical column name.   Notice: Users still could choose to adjust ElasticSearch\u0026rsquo;s shard number(SW_STORAGE_ES_INDEX_SHARDS_NUMBER) to scale out in either mode.\n","title":"New ElasticSearch storage option explanation in 9.2.0","url":"/docs/main/v9.3.0/en/faq/new-elasticsearch-storage-option-explanation-in-9.2.0/"},{"content":"New ElasticSearch storage option explanation in 9.2.0 Since v9.2.0, SkyWalking OAP provides 2 storage options for all data, including metadata, metrics, traces, logs, events, profiling data, etc.. OAP exposes a system environment variable (SW_STORAGE_ES_LOGIC_SHARDING) to control the running mode.\nNo-Sharding Mode (OAP default setting, SW_STORAGE_ES_LOGIC_SHARDING = false) This is the new mode introduced in 9.2.0. It prefers to keep data with similar properties in one index template, such as all metrics and metadata.\n OAP merges all metrics/meter and records(without super datasets, such as segments) indices into one physical index template metrics-all and records-all. The logic index name would be present in columns metric_table or record_table. If the logic column name has an alias (configured through @ElasticSearch.Column()), the alias would be the real physical column name.  The super dataset would not be affected by this, such as traces and logs.\nSharding Mode (SW_STORAGE_ES_LOGIC_SHARDING = true )  OAP shard metrics/meter indices into multi-physical indices as in the previous versions(one index template per metric/meter aggregation function). Records and metrics without configuring aggregation functions with @MetricsFunction or @MeterFunction annotation would not be merged. They would be kept in a separate index template. The shard template name would be metrics-aggregation function name or meter-aggregation function name such as metrics-count, and the logic index name would be present in column metric_table. The OAP would not use the column alias, the logic column name would be the real physical column name.   Notice: Users still could choose to adjust ElasticSearch\u0026rsquo;s shard number(SW_STORAGE_ES_INDEX_SHARDS_NUMBER) to scale out in either mode.\n","title":"New ElasticSearch storage option explanation in 9.2.0","url":"/docs/main/v9.4.0/en/faq/new-elasticsearch-storage-option-explanation-in-9.2.0/"},{"content":"New ElasticSearch storage option explanation in 9.2.0 Since v9.2.0, SkyWalking OAP provides 2 storage options for all data, including metadata, metrics, traces, logs, events, profiling data, etc.. OAP exposes a system environment variable (SW_STORAGE_ES_LOGIC_SHARDING) to control the running mode.\nNo-Sharding Mode (OAP default setting, SW_STORAGE_ES_LOGIC_SHARDING = false) This is the new mode introduced in 9.2.0. It prefers to keep data with similar properties in one index template, such as all metrics and metadata.\n OAP merges all metrics/meter and records(without super datasets, such as segments) indices into one physical index template metrics-all and records-all. The logic index name would be present in columns metric_table or record_table. If the logic column name has an alias (configured through @ElasticSearch.Column()), the alias would be the real physical column name.  The super dataset would not be affected by this, such as traces and logs.\nSharding Mode (SW_STORAGE_ES_LOGIC_SHARDING = true )  OAP shard metrics/meter indices into multi-physical indices as in the previous versions(one index template per metric/meter aggregation function). Records and metrics without configuring aggregation functions with @MetricsFunction or @MeterFunction annotation would not be merged. They would be kept in a separate index template. The shard template name would be metrics-aggregation function name or meter-aggregation function name such as metrics-count, and the logic index name would be present in column metric_table. The OAP would not use the column alias, the logic column name would be the real physical column name.   Notice: Users still could choose to adjust ElasticSearch\u0026rsquo;s shard number(SW_STORAGE_ES_INDEX_SHARDS_NUMBER) to scale out in either mode.\n","title":"New ElasticSearch storage option explanation in 9.2.0","url":"/docs/main/v9.5.0/en/faq/new-elasticsearch-storage-option-explanation-in-9.2.0/"},{"content":"New ElasticSearch storage option explanation in 9.2.0 Since v9.2.0, SkyWalking OAP provides 2 storage options for all data, including metadata, metrics, traces, logs, events, profiling data, etc.. OAP exposes a system environment variable (SW_STORAGE_ES_LOGIC_SHARDING) to control the running mode.\nNo-Sharding Mode (OAP default setting, SW_STORAGE_ES_LOGIC_SHARDING = false) This is the new mode introduced in 9.2.0. It prefers to keep data with similar properties in one index template, such as all metrics and metadata.\n OAP merges all metrics/meter and records(without super datasets, such as segments) indices into one physical index template metrics-all and records-all. The logic index name would be present in columns metric_table or record_table. If the logic column name has an alias (configured through @ElasticSearch.Column()), the alias would be the real physical column name.  The super dataset would not be affected by this, such as traces and logs.\nSharding Mode (SW_STORAGE_ES_LOGIC_SHARDING = true )  OAP shard metrics/meter indices into multi-physical indices as in the previous versions(one index template per metric/meter aggregation function). Records and metrics without configuring aggregation functions with @MetricsFunction or @MeterFunction annotation would not be merged. They would be kept in a separate index template. The shard template name would be metrics-aggregation function name or meter-aggregation function name such as metrics-count, and the logic index name would be present in column metric_table. The OAP would not use the column alias, the logic column name would be the real physical column name.   Notice: Users still could choose to adjust ElasticSearch\u0026rsquo;s shard number(SW_STORAGE_ES_INDEX_SHARDS_NUMBER) to scale out in either mode.\n","title":"New ElasticSearch storage option explanation in 9.2.0","url":"/docs/main/v9.6.0/en/faq/new-elasticsearch-storage-option-explanation-in-9.2.0/"},{"content":"New ElasticSearch storage option explanation in 9.2.0 Since v9.2.0, SkyWalking OAP provides 2 storage options for all data, including metadata, metrics, traces, logs, events, profiling data, etc.. OAP exposes a system environment variable (SW_STORAGE_ES_LOGIC_SHARDING) to control the running mode.\nNo-Sharding Mode (OAP default setting, SW_STORAGE_ES_LOGIC_SHARDING = false) This is the new mode introduced in 9.2.0. It prefers to keep data with similar properties in one index template, such as all metrics and metadata.\n OAP merges all metrics/meter and records(without super datasets, such as segments) indices into one physical index template metrics-all and records-all. The logic index name would be present in columns metric_table or record_table. If the logic column name has an alias (configured through @ElasticSearch.Column()), the alias would be the real physical column name.  The super dataset would not be affected by this, such as traces and logs.\nSharding Mode (SW_STORAGE_ES_LOGIC_SHARDING = true )  OAP shard metrics/meter indices into multi-physical indices as in the previous versions(one index template per metric/meter aggregation function). Records and metrics without configuring aggregation functions with @MetricsFunction or @MeterFunction annotation would not be merged. They would be kept in a separate index template. The shard template name would be metrics-aggregation function name or meter-aggregation function name such as metrics-count, and the logic index name would be present in column metric_table. The OAP would not use the column alias, the logic column name would be the real physical column name.   Notice: Users still could choose to adjust ElasticSearch\u0026rsquo;s shard number(SW_STORAGE_ES_INDEX_SHARDS_NUMBER) to scale out in either mode.\n","title":"New ElasticSearch storage option explanation in 9.2.0","url":"/docs/main/v9.7.0/en/faq/new-elasticsearch-storage-option-explanation-in-9.2.0/"},{"content":"Nginx monitoring Nginx performance from nginx-lua-prometheus The nginx-lua-prometheus is a lua library that can be used with Nginx to collect metrics and expose them on a separate web page. To use this library, you will need Nginx with lua-nginx-module or directly OpenResty.\nSkyWalking leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  nginx-lua-prometheus collects metrics from Nginx and expose them to an endpoint. OpenTelemetry Collector fetches metrics from the endpoint expose above via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Collect Nginx metrics and expose the following four metrics by nginx-lua-prometheus. For details on metrics definition, refer to here.   histogram: nginx_http_latency gauge: nginx_http_connections counter: nginx_http_size_bytes counter: nginx_http_requests_total  Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  Nginx Monitoring SkyWalking observes the status, payload, and latency of the Nginx server, which is cataloged as a LAYER: Nginx Service in the OAP and instances would be recognized as LAYER: Nginx instance.\nAbout LAYER: Nginx endpoint, it depends on how precision you want to monitor the nginx. We do not recommend expose every request path metrics, because it will cause explosion of metrics endpoint data.\nYou can collect host metrics:\nhttp { log_by_lua_block { metric_bytes:inc(tonumber(ngx.var.request_length), {\u0026quot;request\u0026quot;, ngx.var.host}) metric_bytes:inc(tonumber(ngx.var.bytes_send), {\u0026quot;response\u0026quot;, ngx.var.host}) metric_requests:inc(1, {ngx.var.status, ngx.var.host}) metric_latency:observe(tonumber(ngx.var.request_time), {ngx.var.host}) } } or grouped urls and upstream metrics:\nupstream backend { server ip:port; } server { location /test { default_type application/json; return 200 '{\u0026quot;code\u0026quot;: 200, \u0026quot;message\u0026quot;: \u0026quot;success\u0026quot;}'; log_by_lua_block { metric_bytes:inc(tonumber(ngx.var.request_length), {\u0026quot;request\u0026quot;, \u0026quot;/test/**\u0026quot;}) metric_bytes:inc(tonumber(ngx.var.bytes_send), {\u0026quot;response\u0026quot;, \u0026quot;/test/**\u0026quot;}) metric_requests:inc(1, {ngx.var.status, \u0026quot;/test/**\u0026quot;}) metric_latency:observe(tonumber(ngx.var.request_time), {\u0026quot;/test/**\u0026quot;}) } } location /test_upstream { proxy_pass http://backend; log_by_lua_block { metric_bytes:inc(tonumber(ngx.var.request_length), {\u0026quot;request\u0026quot;, \u0026quot;upstream/backend\u0026quot;}) metric_bytes:inc(tonumber(ngx.var.bytes_send), {\u0026quot;response\u0026quot;, \u0026quot;upstream/backend\u0026quot;}) metric_requests:inc(1, {ngx.var.status, \u0026quot;upstream/backend\u0026quot;}) metric_latency:observe(tonumber(ngx.var.request_time), {\u0026quot;upstream/backend\u0026quot;}) } } } Nginx Service Supported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     HTTP Request Trend  meter_nginx_service_http_requests Service The increment rate of HTTP requests nginx-lua-prometheus   HTTP Latency ms meter_nginx_service_http_latency Service The increment rate of the latency of HTTP requests nginx-lua-prometheus   HTTP Bandwidth KB meter_nginx_service_bandwidth Service The increment rate of the bandwidth of HTTP requests nginx-lua-prometheus   HTTP Connections  meter_nginx_service_http_connections Service The avg number of the connections nginx-lua-prometheus   HTTP Status Trend  meter_nginx_service_http_status Service The increment rate of the status of HTTP requests nginx-lua-prometheus   HTTP Status 4xx Percent % meter_nginx_service_http_4xx_requests_increment / meter_nginx_service_http_requests_increment Service The percentage of 4xx status of HTTP requests nginx-lua-prometheus   HTTP Status 5xx Percent % meter_nginx_service_http_5xx_requests_increment / meter_nginx_service_http_requests_increment Service The percentage of 4xx status of HTTP requests nginx-lua-prometheus    Nginx Instance Supported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     HTTP Request Trend  meter_nginx_instance_http_requests Instance The increment rate of HTTP requests nginx-lua-prometheus   HTTP Latency ms meter_nginx_instance_http_latency Instance The increment rate of the latency of HTTP requests nginx-lua-prometheus   HTTP Bandwidth KB meter_nginx_instance_bandwidth Instance The increment rate of the bandwidth of HTTP requests nginx-lua-prometheus   HTTP Connections  meter_nginx_instance_http_connections Instance The avg number of the connections nginx-lua-prometheus   HTTP Status Trend  meter_nginx_instance_http_status Instance The increment rate of the status of HTTP requests nginx-lua-prometheus   HTTP Status 4xx Percent % meter_nginx_instance_http_4xx_requests_increment / meter_nginx_instance_http_requests_increment Instance The percentage of 4xx status of HTTP requests nginx-lua-prometheus   HTTP Status 5xx Percent % meter_nginx_instance_http_5xx_requests_increment / meter_nginx_instance_http_requests_increment Instance The percentage of 4xx status of HTTP requests nginx-lua-prometheus    Nginx Endpoint Supported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     HTTP Request Trend  meter_nginx_endpoint_http_requests Endpoint The increment rate of HTTP requests nginx-lua-prometheus   HTTP Latency ms meter_nginx_endpoint_http_latency Endpoint The increment rate of the latency of HTTP requests nginx-lua-prometheus   HTTP Bandwidth KB meter_nginx_endpoint_bandwidth Endpoint The increment rate of the bandwidth of HTTP requests nginx-lua-prometheus   HTTP Status Trend  meter_nginx_endpoint_http_status Endpoint The increment rate of the status of HTTP requests nginx-lua-prometheus   HTTP Status 4xx Percent % meter_nginx_endpoint_http_4xx_requests_increment / meter_nginx_endpoint_http_requests_increment Endpoint The percentage of 4xx status of HTTP requests nginx-lua-prometheus   HTTP Status 5xx Percent % meter_nginx_endpoint_http_5xx_requests_increment / meter_nginx_endpoint_http_requests_increment Endpoint The percentage of 4xx status of HTTP requests nginx-lua-prometheus    Customizations You can customize your own metrics/expression/dashboard panel.\nThe metrics definition and expression rules are found in /config/otel-rules/nginx-service.yaml, /config/otel-rules/nginx-instance.yaml, /config/otel-rules/nginx-endpoint.yaml.\nThe Nginx dashboard panel configurations are found in /config/ui-initialized-templates/nginx.\nCollect nginx access and error log SkyWalking leverages fluentbit or other log agents for collecting access log and error log of Nginx.\nData flow  fluentbit agent collects access log and error log from Nginx. fluentbit agent sends data to SkyWalking OAP Server using native meter APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Install fluentbit. Config fluent bit with fluent-bit.conf, refer to here.  Error Log Monitoring Error Log monitoring provides monitoring of the error.log of the Nginx server.\nSupported Metrics    Monitoring Panel Metric Name Catalog Description Data Source     Service Error Log Count meter_nginx_service_error_log_count Service The count of log level of nginx error.log fluent bit   Instance Error Log Count meter_nginx_instance_error_log_count Instance The count of log level of nginx error.log fluent bit    Customizations You can customize your own metrics/expression/dashboard panel.\nThe log collect and analyse rules are found in /config/lal/nginx.yaml, /config/log-mal-rules/nginx.yaml.\nThe Nginx dashboard panel configurations are found in /config/ui-initialized-templates/nginx.\n","title":"Nginx monitoring","url":"/docs/main/latest/en/setup/backend/backend-nginx-monitoring/"},{"content":"Nginx monitoring Nginx performance from nginx-lua-prometheus The nginx-lua-prometheus is a lua library that can be used with Nginx to collect metrics and expose them on a separate web page. To use this library, you will need Nginx with lua-nginx-module or directly OpenResty.\nSkyWalking leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  nginx-lua-prometheus collects metrics from Nginx and expose them to an endpoint. OpenTelemetry Collector fetches metrics from the endpoint expose above via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Collect Nginx metrics and expose the following four metrics by nginx-lua-prometheus. For details on metrics definition, refer to here.   histogram: nginx_http_latency gauge: nginx_http_connections counter: nginx_http_size_bytes counter: nginx_http_requests_total  Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  Nginx Monitoring SkyWalking observes the status, payload, and latency of the Nginx server, which is cataloged as a LAYER: Nginx Service in the OAP and instances would be recognized as LAYER: Nginx instance.\nAbout LAYER: Nginx endpoint, it depends on how precision you want to monitor the nginx. We do not recommend expose every request path metrics, because it will cause explosion of metrics endpoint data.\nYou can collect host metrics:\nhttp { log_by_lua_block { metric_bytes:inc(tonumber(ngx.var.request_length), {\u0026quot;request\u0026quot;, ngx.var.host}) metric_bytes:inc(tonumber(ngx.var.bytes_send), {\u0026quot;response\u0026quot;, ngx.var.host}) metric_requests:inc(1, {ngx.var.status, ngx.var.host}) metric_latency:observe(tonumber(ngx.var.request_time), {ngx.var.host}) } } or grouped urls and upstream metrics:\nupstream backend { server ip:port; } server { location /test { default_type application/json; return 200 '{\u0026quot;code\u0026quot;: 200, \u0026quot;message\u0026quot;: \u0026quot;success\u0026quot;}'; log_by_lua_block { metric_bytes:inc(tonumber(ngx.var.request_length), {\u0026quot;request\u0026quot;, \u0026quot;/test/**\u0026quot;}) metric_bytes:inc(tonumber(ngx.var.bytes_send), {\u0026quot;response\u0026quot;, \u0026quot;/test/**\u0026quot;}) metric_requests:inc(1, {ngx.var.status, \u0026quot;/test/**\u0026quot;}) metric_latency:observe(tonumber(ngx.var.request_time), {\u0026quot;/test/**\u0026quot;}) } } location /test_upstream { proxy_pass http://backend; log_by_lua_block { metric_bytes:inc(tonumber(ngx.var.request_length), {\u0026quot;request\u0026quot;, \u0026quot;upstream/backend\u0026quot;}) metric_bytes:inc(tonumber(ngx.var.bytes_send), {\u0026quot;response\u0026quot;, \u0026quot;upstream/backend\u0026quot;}) metric_requests:inc(1, {ngx.var.status, \u0026quot;upstream/backend\u0026quot;}) metric_latency:observe(tonumber(ngx.var.request_time), {\u0026quot;upstream/backend\u0026quot;}) } } } Nginx Service Supported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     HTTP Request Trend  meter_nginx_service_http_requests Service The increment rate of HTTP requests nginx-lua-prometheus   HTTP Latency ms meter_nginx_service_http_latency Service The increment rate of the latency of HTTP requests nginx-lua-prometheus   HTTP Bandwidth KB meter_nginx_service_bandwidth Service The increment rate of the bandwidth of HTTP requests nginx-lua-prometheus   HTTP Connections  meter_nginx_service_http_connections Service The avg number of the connections nginx-lua-prometheus   HTTP Status Trend  meter_nginx_service_http_status Service The increment rate of the status of HTTP requests nginx-lua-prometheus   HTTP Status 4xx Percent % meter_nginx_service_http_4xx_requests_increment / meter_nginx_service_http_requests_increment Service The percentage of 4xx status of HTTP requests nginx-lua-prometheus   HTTP Status 5xx Percent % meter_nginx_service_http_5xx_requests_increment / meter_nginx_service_http_requests_increment Service The percentage of 4xx status of HTTP requests nginx-lua-prometheus    Nginx Instance Supported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     HTTP Request Trend  meter_nginx_instance_http_requests Instance The increment rate of HTTP requests nginx-lua-prometheus   HTTP Latency ms meter_nginx_instance_http_latency Instance The increment rate of the latency of HTTP requests nginx-lua-prometheus   HTTP Bandwidth KB meter_nginx_instance_bandwidth Instance The increment rate of the bandwidth of HTTP requests nginx-lua-prometheus   HTTP Connections  meter_nginx_instance_http_connections Instance The avg number of the connections nginx-lua-prometheus   HTTP Status Trend  meter_nginx_instance_http_status Instance The increment rate of the status of HTTP requests nginx-lua-prometheus   HTTP Status 4xx Percent % meter_nginx_instance_http_4xx_requests_increment / meter_nginx_instance_http_requests_increment Instance The percentage of 4xx status of HTTP requests nginx-lua-prometheus   HTTP Status 5xx Percent % meter_nginx_instance_http_5xx_requests_increment / meter_nginx_instance_http_requests_increment Instance The percentage of 4xx status of HTTP requests nginx-lua-prometheus    Nginx Endpoint Supported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     HTTP Request Trend  meter_nginx_endpoint_http_requests Endpoint The increment rate of HTTP requests nginx-lua-prometheus   HTTP Latency ms meter_nginx_endpoint_http_latency Endpoint The increment rate of the latency of HTTP requests nginx-lua-prometheus   HTTP Bandwidth KB meter_nginx_endpoint_bandwidth Endpoint The increment rate of the bandwidth of HTTP requests nginx-lua-prometheus   HTTP Status Trend  meter_nginx_endpoint_http_status Endpoint The increment rate of the status of HTTP requests nginx-lua-prometheus   HTTP Status 4xx Percent % meter_nginx_endpoint_http_4xx_requests_increment / meter_nginx_endpoint_http_requests_increment Endpoint The percentage of 4xx status of HTTP requests nginx-lua-prometheus   HTTP Status 5xx Percent % meter_nginx_endpoint_http_5xx_requests_increment / meter_nginx_endpoint_http_requests_increment Endpoint The percentage of 4xx status of HTTP requests nginx-lua-prometheus    Customizations You can customize your own metrics/expression/dashboard panel.\nThe metrics definition and expression rules are found in /config/otel-rules/nginx-service.yaml, /config/otel-rules/nginx-instance.yaml, /config/otel-rules/nginx-endpoint.yaml.\nThe Nginx dashboard panel configurations are found in /config/ui-initialized-templates/nginx.\nCollect nginx access and error log SkyWalking leverages fluentbit or other log agents for collecting access log and error log of Nginx.\nData flow  fluentbit agent collects access log and error log from Nginx. fluentbit agent sends data to SkyWalking OAP Server using native meter APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Install fluentbit. Config fluent bit with fluent-bit.conf, refer to here.  Error Log Monitoring Error Log monitoring provides monitoring of the error.log of the Nginx server.\nSupported Metrics    Monitoring Panel Metric Name Catalog Description Data Source     Service Error Log Count meter_nginx_service_error_log_count Service The count of log level of nginx error.log fluent bit   Instance Error Log Count meter_nginx_instance_error_log_count Instance The count of log level of nginx error.log fluent bit    Customizations You can customize your own metrics/expression/dashboard panel.\nThe log collect and analyse rules are found in /config/lal/nginx.yaml, /config/log-mal-rules/nginx.yaml.\nThe Nginx dashboard panel configurations are found in /config/ui-initialized-templates/nginx.\n","title":"Nginx monitoring","url":"/docs/main/next/en/setup/backend/backend-nginx-monitoring/"},{"content":"Nginx monitoring Nginx performance from nginx-lua-prometheus The nginx-lua-prometheus is a lua library that can be used with Nginx to collect metrics and expose them on a separate web page. To use this library, you will need Nginx with lua-nginx-module or directly OpenResty.\nSkyWalking leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  nginx-lua-prometheus collects metrics from Nginx and expose them to an endpoint. OpenTelemetry Collector fetches metrics from the endpoint expose above via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Collect Nginx metrics and expose the following four metrics by nginx-lua-prometheus. For details on metrics definition, refer to here.   histogram: nginx_http_latency gauge: nginx_http_connections counter: nginx_http_size_bytes counter: nginx_http_requests_total  Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  Nginx Monitoring SkyWalking observes the status, payload, and latency of the Nginx server, which is cataloged as a LAYER: Nginx Service in the OAP and instances would be recognized as LAYER: Nginx instance.\nAbout LAYER: Nginx endpoint, it depends on how precision you want to monitor the nginx. We do not recommend expose every request path metrics, because it will cause explosion of metrics endpoint data.\nYou can collect host metrics:\nhttp { log_by_lua_block { metric_bytes:inc(tonumber(ngx.var.request_length), {\u0026quot;request\u0026quot;, ngx.var.host}) metric_bytes:inc(tonumber(ngx.var.bytes_send), {\u0026quot;response\u0026quot;, ngx.var.host}) metric_requests:inc(1, {ngx.var.status, ngx.var.host}) metric_latency:observe(tonumber(ngx.var.request_time), {ngx.var.host}) } } or grouped urls and upstream metrics:\nupstream backend { server ip:port; } server { location /test { default_type application/json; return 200 '{\u0026quot;code\u0026quot;: 200, \u0026quot;message\u0026quot;: \u0026quot;success\u0026quot;}'; log_by_lua_block { metric_bytes:inc(tonumber(ngx.var.request_length), {\u0026quot;request\u0026quot;, \u0026quot;/test/**\u0026quot;}) metric_bytes:inc(tonumber(ngx.var.bytes_send), {\u0026quot;response\u0026quot;, \u0026quot;/test/**\u0026quot;}) metric_requests:inc(1, {ngx.var.status, \u0026quot;/test/**\u0026quot;}) metric_latency:observe(tonumber(ngx.var.request_time), {\u0026quot;/test/**\u0026quot;}) } } location /test_upstream { proxy_pass http://backend; log_by_lua_block { metric_bytes:inc(tonumber(ngx.var.request_length), {\u0026quot;request\u0026quot;, \u0026quot;upstream/backend\u0026quot;}) metric_bytes:inc(tonumber(ngx.var.bytes_send), {\u0026quot;response\u0026quot;, \u0026quot;upstream/backend\u0026quot;}) metric_requests:inc(1, {ngx.var.status, \u0026quot;upstream/backend\u0026quot;}) metric_latency:observe(tonumber(ngx.var.request_time), {\u0026quot;upstream/backend\u0026quot;}) } } } Nginx Service Supported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     HTTP Request Trend  meter_nginx_service_http_requests Service The increment rate of HTTP requests nginx-lua-prometheus   HTTP Latency ms meter_nginx_service_http_latency Service The increment rate of the latency of HTTP requests nginx-lua-prometheus   HTTP Bandwidth KB meter_nginx_service_bandwidth Service The increment rate of the bandwidth of HTTP requests nginx-lua-prometheus   HTTP Connections  meter_nginx_service_http_connections Service The avg number of the connections nginx-lua-prometheus   HTTP Status Trend  meter_nginx_service_http_status Service The increment rate of the status of HTTP requests nginx-lua-prometheus   HTTP Status 4xx Percent % meter_nginx_service_http_4xx_requests_increment / meter_nginx_service_http_requests_increment Service The percentage of 4xx status of HTTP requests nginx-lua-prometheus   HTTP Status 5xx Percent % meter_nginx_service_http_5xx_requests_increment / meter_nginx_service_http_requests_increment Service The percentage of 4xx status of HTTP requests nginx-lua-prometheus    Nginx Instance Supported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     HTTP Request Trend  meter_nginx_instance_http_requests Instance The increment rate of HTTP requests nginx-lua-prometheus   HTTP Latency ms meter_nginx_instance_http_latency Instance The increment rate of the latency of HTTP requests nginx-lua-prometheus   HTTP Bandwidth KB meter_nginx_instance_bandwidth Instance The increment rate of the bandwidth of HTTP requests nginx-lua-prometheus   HTTP Connections  meter_nginx_instance_http_connections Instance The avg number of the connections nginx-lua-prometheus   HTTP Status Trend  meter_nginx_instance_http_status Instance The increment rate of the status of HTTP requests nginx-lua-prometheus   HTTP Status 4xx Percent % meter_nginx_instance_http_4xx_requests_increment / meter_nginx_instance_http_requests_increment Instance The percentage of 4xx status of HTTP requests nginx-lua-prometheus   HTTP Status 5xx Percent % meter_nginx_instance_http_5xx_requests_increment / meter_nginx_instance_http_requests_increment Instance The percentage of 4xx status of HTTP requests nginx-lua-prometheus    Nginx Endpoint Supported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     HTTP Request Trend  meter_nginx_endpoint_http_requests Endpoint The increment rate of HTTP requests nginx-lua-prometheus   HTTP Latency ms meter_nginx_endpoint_http_latency Endpoint The increment rate of the latency of HTTP requests nginx-lua-prometheus   HTTP Bandwidth KB meter_nginx_endpoint_bandwidth Endpoint The increment rate of the bandwidth of HTTP requests nginx-lua-prometheus   HTTP Status Trend  meter_nginx_endpoint_http_status Endpoint The increment rate of the status of HTTP requests nginx-lua-prometheus   HTTP Status 4xx Percent % meter_nginx_endpoint_http_4xx_requests_increment / meter_nginx_endpoint_http_requests_increment Endpoint The percentage of 4xx status of HTTP requests nginx-lua-prometheus   HTTP Status 5xx Percent % meter_nginx_endpoint_http_5xx_requests_increment / meter_nginx_endpoint_http_requests_increment Endpoint The percentage of 4xx status of HTTP requests nginx-lua-prometheus    Customizations You can customize your own metrics/expression/dashboard panel.\nThe metrics definition and expression rules are found in /config/otel-rules/nginx-service.yaml, /config/otel-rules/nginx-instance.yaml, /config/otel-rules/nginx-endpoint.yaml.\nThe Nginx dashboard panel configurations are found in /config/ui-initialized-templates/nginx.\nCollect nginx access and error log SkyWalking leverages fluentbit or other log agents for collecting access log and error log of Nginx.\nData flow  fluentbit agent collects access log and error log from Nginx. fluentbit agent sends data to SkyWalking OAP Server using native meter APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Install fluentbit. Config fluent bit with fluent-bit.conf, refer to here.  Error Log Monitoring Error Log monitoring provides monitoring of the error.log of the Nginx server.\nSupported Metrics    Monitoring Panel Metric Name Catalog Description Data Source     Service Error Log Count meter_nginx_service_error_log_count Service The count of log level of nginx error.log fluent bit   Instance Error Log Count meter_nginx_instance_error_log_count Instance The count of log level of nginx error.log fluent bit    Customizations You can customize your own metrics/expression/dashboard panel.\nThe log collect and analyse rules are found in /config/lal/nginx.yaml, /config/log-mal-rules/nginx.yaml.\nThe Nginx dashboard panel configurations are found in /config/ui-initialized-templates/nginx.\n","title":"Nginx monitoring","url":"/docs/main/v9.7.0/en/setup/backend/backend-nginx-monitoring/"},{"content":"OAP backend dependency management  This section is only applicable to dependencies of the OAP server and UI.\n As one of the Top Level Projects of The Apache Software Foundation (ASF), SkyWalking must follow the ASF 3RD PARTY LICENSE POLICY. So if you\u0026rsquo;re adding new dependencies to the project, you should make sure that the new dependencies would not break the policy, and add their LICENSE and NOTICE to the project.\nWe use license-eye to help you make sure that you haven\u0026rsquo;t missed out any new dependencies:\n Install license-eye according to the doc. Run license-eye dependency resolve --summary ./dist-material/release-docs/LICENSE.tpl in the root directory of this project. Check the modified lines in ./dist-material/release-docs/LICENSE (via command git diff -U0 ./dist-material/release-docs/LICENSE) and check whether the new dependencies' licenses are compatible with Apache 2.0. Add the new dependencies' notice files (if any) to ./dist-material/release-docs/NOTICE if they are Apache 2.0 license. Copy their license files to ./dist-material/release-docs/licenses if they are not standard Apache 2.0 license. Copy the new dependencies' license file to ./dist-material/release-docs/licenses if they are not standard Apache 2.0 license.  ","title":"OAP backend dependency management","url":"/docs/main/latest/en/guides/dependencies/"},{"content":"OAP backend dependency management  This section is only applicable to dependencies of the OAP server and UI.\n As one of the Top Level Projects of The Apache Software Foundation (ASF), SkyWalking must follow the ASF 3RD PARTY LICENSE POLICY. So if you\u0026rsquo;re adding new dependencies to the project, you should make sure that the new dependencies would not break the policy, and add their LICENSE and NOTICE to the project.\nWe use license-eye to help you make sure that you haven\u0026rsquo;t missed out any new dependencies:\n Install license-eye according to the doc. Run license-eye dependency resolve --summary ./dist-material/release-docs/LICENSE.tpl in the root directory of this project. Check the modified lines in ./dist-material/release-docs/LICENSE (via command git diff -U0 ./dist-material/release-docs/LICENSE) and check whether the new dependencies' licenses are compatible with Apache 2.0. Add the new dependencies' notice files (if any) to ./dist-material/release-docs/NOTICE if they are Apache 2.0 license. Copy their license files to ./dist-material/release-docs/licenses if they are not standard Apache 2.0 license. Copy the new dependencies' license file to ./dist-material/release-docs/licenses if they are not standard Apache 2.0 license.  ","title":"OAP backend dependency management","url":"/docs/main/next/en/guides/dependencies/"},{"content":"OAP backend dependency management  This section is only applicable to dependencies of the OAP server and UI.\n As one of the Top Level Projects of The Apache Software Foundation (ASF), SkyWalking must follow the ASF 3RD PARTY LICENSE POLICY. So if you\u0026rsquo;re adding new dependencies to the project, you should make sure that the new dependencies would not break the policy, and add their LICENSE and NOTICE to the project.\nWe use license-eye to help you make sure that you haven\u0026rsquo;t missed out any new dependencies:\n Install license-eye according to the doc. Run license-eye dependency resolve --summary ./dist-material/release-docs/LICENSE.tpl in the root directory of this project. Check the modified lines in ./dist-material/release-docs/LICENSE (via command git diff -U0 ./dist-material/release-docs/LICENSE) and check whether the new dependencies' licenses are compatible with Apache 2.0. Add the new dependencies' notice files (if any) to ./dist-material/release-docs/NOTICE if they are Apache 2.0 license. Copy their license files to ./dist-material/release-docs/licenses if they are not standard Apache 2.0 license. Copy the new dependencies' license file to ./dist-material/release-docs/licenses if they are not standard Apache 2.0 license.  ","title":"OAP backend dependency management","url":"/docs/main/v9.6.0/en/guides/dependencies/"},{"content":"OAP backend dependency management  This section is only applicable to dependencies of the OAP server and UI.\n As one of the Top Level Projects of The Apache Software Foundation (ASF), SkyWalking must follow the ASF 3RD PARTY LICENSE POLICY. So if you\u0026rsquo;re adding new dependencies to the project, you should make sure that the new dependencies would not break the policy, and add their LICENSE and NOTICE to the project.\nWe use license-eye to help you make sure that you haven\u0026rsquo;t missed out any new dependencies:\n Install license-eye according to the doc. Run license-eye dependency resolve --summary ./dist-material/release-docs/LICENSE.tpl in the root directory of this project. Check the modified lines in ./dist-material/release-docs/LICENSE (via command git diff -U0 ./dist-material/release-docs/LICENSE) and check whether the new dependencies' licenses are compatible with Apache 2.0. Add the new dependencies' notice files (if any) to ./dist-material/release-docs/NOTICE if they are Apache 2.0 license. Copy their license files to ./dist-material/release-docs/licenses if they are not standard Apache 2.0 license. Copy the new dependencies' license file to ./dist-material/release-docs/licenses if they are not standard Apache 2.0 license.  ","title":"OAP backend dependency management","url":"/docs/main/v9.7.0/en/guides/dependencies/"},{"content":"OAP self observability dashboard SkyWalking itself collects and exports metrics in Prometheus format for consuming, it also provides a dashboard to visualize the self-observability metrics.\nData flow  SkyWalking OAP collects metrics data internally and exposes a Prometheus http endpoint to retrieve the metrics. SkyWalking OAP itself (or OpenTelemetry Collector, prefered in Kubernetes scenarios) fetches metrics from the Prometheus endpoint in step (1). OAP (or OpenTelemetry Collector) pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up Follow OAP Self Observability Telemetry doc to set up OAP and OpenTelemetry Collector.\nSelf observability monitoring Self observability monitoring provides monitoring of the status and resources of the OAP server itself. oap-server is a Service in OAP, and land on the Layer: SO11Y_OAP.\nSelf observability metrics    Unit Metric Name Description Data Source     Count Per Minute meter_oap_instance_jvm_gc_count GC Count oap self observability   MB meter_oap_instance_jvm_memory_bytes_used Memory oap self observability   ms / min meter_oap_instance_jvm_young_gc_time GC Time (ms / min) oap self observability   ms / min meter_oap_instance_jvm_old_gc_time GC Time (ms / min) oap self observability   Count Per Minute meter_oap_instance_mesh_count Mesh Analysis Count (Per Minute) oap self observability   Count Per Minute meter_oap_instance_mesh_analysis_error_count Mesh Analysis Count (Per Minute) oap self observability   ms meter_oap_instance_trace_latency_percentile Trace Analysis Latency (ms) oap self observability   Count meter_oap_jvm_class_loaded_count Class Count oap self observability   Count meter_oap_jvm_class_total_unloaded_count Class Count oap self observability   Count meter_oap_jvm_class_total_loaded_count Class Count oap self observability   Count meter_oap_instance_persistence_prepare_count Persistence Count (Per 5 Minutes) oap self observability   Count meter_oap_instance_persistence_execute_count Persistence Count (Per 5 Minutes) oap self observability   Count meter_oap_jvm_thread_live_count Thread Count oap self observability   Count meter_oap_jvm_thread_peak_count Thread Count oap self observability   Count meter_oap_jvm_thread_daemon_count Thread Count oap self observability   ms meter_oap_instance_persistence_execute_percentile Persistence Execution Latency Per Metric Type (ms) oap self observability   ms meter_oap_instance_persistence_prepare_percentile Persistence Preparing Latency Per Metric Type (ms) oap self observability   Count meter_oap_jvm_thread_runnable_count Thread State Count oap self observability   Count meter_oap_jvm_thread_timed_waiting_count Thread State Count oap self observability   Count meter_oap_jvm_thread_blocked_count Thread State Count oap self observability   Count meter_oap_jvm_thread_waiting_count Thread State Count oap self observability   Count per minute meter_oap_instance_metrics_aggregation Aggregation (Per Minute) oap self observability   ms meter_oap_instance_mesh_latency_percentile Mesh Analysis Latency (ms) oap self observability   Count per minute meter_oap_instance_trace_count Trace Analysis Count (Per Minute) oap self observability   Count per minute meter_oap_instance_trace_analysis_error_count Trace Analysis Count (Per Minute) oap self observability   Percentage meter_oap_instance_cpu_percentage CPU (%) oap self observability   Count meter_oap_instance_metrics_persistent_cache count of metrics cache hit and no-hit oap self observability    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/fetcher-prom-rules/self.yaml and config/otel-rules/oap.yaml. The self observability dashboard panel configurations are found in /config/ui-initialized-templates/so11y_oap.\n","title":"OAP self observability dashboard","url":"/docs/main/latest/en/setup/backend/dashboards-so11y/"},{"content":"OAP self observability dashboard SkyWalking itself collects and exports metrics in Prometheus format for consuming, it also provides a dashboard to visualize the self-observability metrics.\nData flow  SkyWalking OAP collects metrics data internally and exposes a Prometheus http endpoint to retrieve the metrics. SkyWalking OAP itself (or OpenTelemetry Collector, prefered in Kubernetes scenarios) fetches metrics from the Prometheus endpoint in step (1). OAP (or OpenTelemetry Collector) pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up Follow OAP Self Observability Telemetry doc to set up OAP and OpenTelemetry Collector.\nSelf observability monitoring Self observability monitoring provides monitoring of the status and resources of the OAP server itself. oap-server is a Service in OAP, and land on the Layer: SO11Y_OAP.\nSelf observability metrics    Unit Metric Name Description Data Source     Count Per Minute meter_oap_instance_jvm_gc_count GC Count oap self observability   MB meter_oap_instance_jvm_memory_bytes_used Memory oap self observability   ms / min meter_oap_instance_jvm_young_gc_time GC Time (ms / min) oap self observability   ms / min meter_oap_instance_jvm_old_gc_time GC Time (ms / min) oap self observability   Count Per Minute meter_oap_instance_mesh_count Mesh Analysis Count (Per Minute) oap self observability   Count Per Minute meter_oap_instance_mesh_analysis_error_count Mesh Analysis Count (Per Minute) oap self observability   ms meter_oap_instance_trace_latency_percentile Trace Analysis Latency (ms) oap self observability   Count meter_oap_jvm_class_loaded_count Class Count oap self observability   Count meter_oap_jvm_class_total_unloaded_count Class Count oap self observability   Count meter_oap_jvm_class_total_loaded_count Class Count oap self observability   Count meter_oap_instance_persistence_prepare_count Persistence Count (Per 5 Minutes) oap self observability   Count meter_oap_instance_persistence_execute_count Persistence Count (Per 5 Minutes) oap self observability   Count meter_oap_jvm_thread_live_count Thread Count oap self observability   Count meter_oap_jvm_thread_peak_count Thread Count oap self observability   Count meter_oap_jvm_thread_daemon_count Thread Count oap self observability   ms meter_oap_instance_persistence_execute_percentile Persistence Execution Latency Per Metric Type (ms) oap self observability   ms meter_oap_instance_persistence_prepare_percentile Persistence Preparing Latency Per Metric Type (ms) oap self observability   Count meter_oap_jvm_thread_runnable_count Thread State Count oap self observability   Count meter_oap_jvm_thread_timed_waiting_count Thread State Count oap self observability   Count meter_oap_jvm_thread_blocked_count Thread State Count oap self observability   Count meter_oap_jvm_thread_waiting_count Thread State Count oap self observability   Count per minute meter_oap_instance_metrics_aggregation Aggregation (Per Minute) oap self observability   ms meter_oap_instance_mesh_latency_percentile Mesh Analysis Latency (ms) oap self observability   Count per minute meter_oap_instance_trace_count Trace Analysis Count (Per Minute) oap self observability   Count per minute meter_oap_instance_trace_analysis_error_count Trace Analysis Count (Per Minute) oap self observability   Percentage meter_oap_instance_cpu_percentage CPU (%) oap self observability   Count meter_oap_instance_metrics_persistent_cache count of metrics cache hit and no-hit oap self observability    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/fetcher-prom-rules/self.yaml and config/otel-rules/oap.yaml. The self observability dashboard panel configurations are found in /config/ui-initialized-templates/so11y_oap.\n","title":"OAP self observability dashboard","url":"/docs/main/next/en/setup/backend/dashboards-so11y/"},{"content":"OAP self observability dashboard SkyWalking itself collects and exports metrics in Prometheus format for consuming, it also provides a dashboard to visualize the self-observability metrics.\nData flow  SkyWalking OAP collects metrics data internally and exposes a Prometheus http endpoint to retrieve the metrics. SkyWalking OAP itself (or OpenTelemetry Collector, prefered in Kubernetes scenarios) fetches metrics from the Prometheus endpoint in step (1). OAP (or OpenTelemetry Collector) pushes metrics to SkyWalking OAP Server via the OpenCensus gRPC Exporter or OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up SkyWalking Self Observability. (Optional) Set up OpenTelemetry Collector .. Config SkyWalking OpenTelemetry receiver.  Self observability monitoring Self observability monitoring provides monitoring of the status and resources of the OAP server itself. oap-server is a Service in OAP, and land on the Layer: SO11Y_OAP.\nSelf observability metrics    Unit Metric Name Description Data Source     Count Per Minute meter_oap_instance_jvm_gc_count GC Count oap self observability   MB meter_oap_instance_jvm_memory_bytes_used Memory oap self observability   ms / min meter_oap_instance_jvm_young_gc_time GC Time (ms / min) oap self observability   ms / min meter_oap_instance_jvm_old_gc_time GC Time (ms / min) oap self observability   Count Per Minute meter_oap_instance_mesh_count Mesh Analysis Count (Per Minute) oap self observability   Count Per Minute meter_oap_instance_mesh_analysis_error_count Mesh Analysis Count (Per Minute) oap self observability   ms meter_oap_instance_trace_latency_percentile Trace Analysis Latency (ms) oap self observability   Count meter_oap_jvm_class_loaded_count Class Count oap self observability   Count meter_oap_jvm_class_total_unloaded_count Class Count oap self observability   Count meter_oap_jvm_class_total_loaded_count Class Count oap self observability   Count meter_oap_instance_persistence_prepare_count Persistence Count (Per 5 Minutes) oap self observability   Count meter_oap_instance_persistence_execute_count Persistence Count (Per 5 Minutes) oap self observability   Count meter_oap_jvm_thread_live_count Thread Count oap self observability   Count meter_oap_jvm_thread_peak_count Thread Count oap self observability   Count meter_oap_jvm_thread_daemon_count Thread Count oap self observability   ms meter_oap_instance_persistence_execute_percentile Persistence Execution Latency Per Metric Type (ms) oap self observability   ms meter_oap_instance_persistence_prepare_percentile Persistence Preparing Latency Per Metric Type (ms) oap self observability   Count meter_oap_jvm_thread_runnable_count Thread State Count oap self observability   Count meter_oap_jvm_thread_timed_waiting_count Thread State Count oap self observability   Count meter_oap_jvm_thread_blocked_count Thread State Count oap self observability   Count meter_oap_jvm_thread_waiting_count Thread State Count oap self observability   Count per minute meter_oap_instance_metrics_aggregation Aggregation (Per Minute) oap self observability   ms meter_oap_instance_mesh_latency_percentile Mesh Analysis Latency (ms) oap self observability   Count per minute meter_oap_instance_trace_count Trace Analysis Count (Per Minute) oap self observability   Count per minute meter_oap_instance_trace_analysis_error_count Trace Analysis Count (Per Minute) oap self observability   Percentage meter_oap_instance_cpu_percentage CPU (%) oap self observability   Count meter_oap_instance_metrics_persistent_cache count of metrics cache hit and no-hit oap self observability    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/fetcher-prom-rules/self.yaml and config/otel-rules/oap.yaml. The self observability dashboard panel configurations are found in /config/ui-initialized-templates/so11y_oap.\n","title":"OAP self observability dashboard","url":"/docs/main/v9.3.0/en/setup/backend/dashboards-so11y/"},{"content":"OAP self observability dashboard SkyWalking itself collects and exports metrics in Prometheus format for consuming, it also provides a dashboard to visualize the self-observability metrics.\nData flow  SkyWalking OAP collects metrics data internally and exposes a Prometheus http endpoint to retrieve the metrics. SkyWalking OAP itself (or OpenTelemetry Collector, prefered in Kubernetes scenarios) fetches metrics from the Prometheus endpoint in step (1). OAP (or OpenTelemetry Collector) pushes metrics to SkyWalking OAP Server via the OpenCensus gRPC Exporter or OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up Follow OAP Self Observability Telemetry doc to set up OAP and OpenTelemetry Collector.\nSelf observability monitoring Self observability monitoring provides monitoring of the status and resources of the OAP server itself. oap-server is a Service in OAP, and land on the Layer: SO11Y_OAP.\nSelf observability metrics    Unit Metric Name Description Data Source     Count Per Minute meter_oap_instance_jvm_gc_count GC Count oap self observability   MB meter_oap_instance_jvm_memory_bytes_used Memory oap self observability   ms / min meter_oap_instance_jvm_young_gc_time GC Time (ms / min) oap self observability   ms / min meter_oap_instance_jvm_old_gc_time GC Time (ms / min) oap self observability   Count Per Minute meter_oap_instance_mesh_count Mesh Analysis Count (Per Minute) oap self observability   Count Per Minute meter_oap_instance_mesh_analysis_error_count Mesh Analysis Count (Per Minute) oap self observability   ms meter_oap_instance_trace_latency_percentile Trace Analysis Latency (ms) oap self observability   Count meter_oap_jvm_class_loaded_count Class Count oap self observability   Count meter_oap_jvm_class_total_unloaded_count Class Count oap self observability   Count meter_oap_jvm_class_total_loaded_count Class Count oap self observability   Count meter_oap_instance_persistence_prepare_count Persistence Count (Per 5 Minutes) oap self observability   Count meter_oap_instance_persistence_execute_count Persistence Count (Per 5 Minutes) oap self observability   Count meter_oap_jvm_thread_live_count Thread Count oap self observability   Count meter_oap_jvm_thread_peak_count Thread Count oap self observability   Count meter_oap_jvm_thread_daemon_count Thread Count oap self observability   ms meter_oap_instance_persistence_execute_percentile Persistence Execution Latency Per Metric Type (ms) oap self observability   ms meter_oap_instance_persistence_prepare_percentile Persistence Preparing Latency Per Metric Type (ms) oap self observability   Count meter_oap_jvm_thread_runnable_count Thread State Count oap self observability   Count meter_oap_jvm_thread_timed_waiting_count Thread State Count oap self observability   Count meter_oap_jvm_thread_blocked_count Thread State Count oap self observability   Count meter_oap_jvm_thread_waiting_count Thread State Count oap self observability   Count per minute meter_oap_instance_metrics_aggregation Aggregation (Per Minute) oap self observability   ms meter_oap_instance_mesh_latency_percentile Mesh Analysis Latency (ms) oap self observability   Count per minute meter_oap_instance_trace_count Trace Analysis Count (Per Minute) oap self observability   Count per minute meter_oap_instance_trace_analysis_error_count Trace Analysis Count (Per Minute) oap self observability   Percentage meter_oap_instance_cpu_percentage CPU (%) oap self observability   Count meter_oap_instance_metrics_persistent_cache count of metrics cache hit and no-hit oap self observability    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/fetcher-prom-rules/self.yaml and config/otel-rules/oap.yaml. The self observability dashboard panel configurations are found in /config/ui-initialized-templates/so11y_oap.\n","title":"OAP self observability dashboard","url":"/docs/main/v9.4.0/en/setup/backend/dashboards-so11y/"},{"content":"OAP self observability dashboard SkyWalking itself collects and exports metrics in Prometheus format for consuming, it also provides a dashboard to visualize the self-observability metrics.\nData flow  SkyWalking OAP collects metrics data internally and exposes a Prometheus http endpoint to retrieve the metrics. SkyWalking OAP itself (or OpenTelemetry Collector, prefered in Kubernetes scenarios) fetches metrics from the Prometheus endpoint in step (1). OAP (or OpenTelemetry Collector) pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up Follow OAP Self Observability Telemetry doc to set up OAP and OpenTelemetry Collector.\nSelf observability monitoring Self observability monitoring provides monitoring of the status and resources of the OAP server itself. oap-server is a Service in OAP, and land on the Layer: SO11Y_OAP.\nSelf observability metrics    Unit Metric Name Description Data Source     Count Per Minute meter_oap_instance_jvm_gc_count GC Count oap self observability   MB meter_oap_instance_jvm_memory_bytes_used Memory oap self observability   ms / min meter_oap_instance_jvm_young_gc_time GC Time (ms / min) oap self observability   ms / min meter_oap_instance_jvm_old_gc_time GC Time (ms / min) oap self observability   Count Per Minute meter_oap_instance_mesh_count Mesh Analysis Count (Per Minute) oap self observability   Count Per Minute meter_oap_instance_mesh_analysis_error_count Mesh Analysis Count (Per Minute) oap self observability   ms meter_oap_instance_trace_latency_percentile Trace Analysis Latency (ms) oap self observability   Count meter_oap_jvm_class_loaded_count Class Count oap self observability   Count meter_oap_jvm_class_total_unloaded_count Class Count oap self observability   Count meter_oap_jvm_class_total_loaded_count Class Count oap self observability   Count meter_oap_instance_persistence_prepare_count Persistence Count (Per 5 Minutes) oap self observability   Count meter_oap_instance_persistence_execute_count Persistence Count (Per 5 Minutes) oap self observability   Count meter_oap_jvm_thread_live_count Thread Count oap self observability   Count meter_oap_jvm_thread_peak_count Thread Count oap self observability   Count meter_oap_jvm_thread_daemon_count Thread Count oap self observability   ms meter_oap_instance_persistence_execute_percentile Persistence Execution Latency Per Metric Type (ms) oap self observability   ms meter_oap_instance_persistence_prepare_percentile Persistence Preparing Latency Per Metric Type (ms) oap self observability   Count meter_oap_jvm_thread_runnable_count Thread State Count oap self observability   Count meter_oap_jvm_thread_timed_waiting_count Thread State Count oap self observability   Count meter_oap_jvm_thread_blocked_count Thread State Count oap self observability   Count meter_oap_jvm_thread_waiting_count Thread State Count oap self observability   Count per minute meter_oap_instance_metrics_aggregation Aggregation (Per Minute) oap self observability   ms meter_oap_instance_mesh_latency_percentile Mesh Analysis Latency (ms) oap self observability   Count per minute meter_oap_instance_trace_count Trace Analysis Count (Per Minute) oap self observability   Count per minute meter_oap_instance_trace_analysis_error_count Trace Analysis Count (Per Minute) oap self observability   Percentage meter_oap_instance_cpu_percentage CPU (%) oap self observability   Count meter_oap_instance_metrics_persistent_cache count of metrics cache hit and no-hit oap self observability    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/fetcher-prom-rules/self.yaml and config/otel-rules/oap.yaml. The self observability dashboard panel configurations are found in /config/ui-initialized-templates/so11y_oap.\n","title":"OAP self observability dashboard","url":"/docs/main/v9.5.0/en/setup/backend/dashboards-so11y/"},{"content":"OAP self observability dashboard SkyWalking itself collects and exports metrics in Prometheus format for consuming, it also provides a dashboard to visualize the self-observability metrics.\nData flow  SkyWalking OAP collects metrics data internally and exposes a Prometheus http endpoint to retrieve the metrics. SkyWalking OAP itself (or OpenTelemetry Collector, prefered in Kubernetes scenarios) fetches metrics from the Prometheus endpoint in step (1). OAP (or OpenTelemetry Collector) pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up Follow OAP Self Observability Telemetry doc to set up OAP and OpenTelemetry Collector.\nSelf observability monitoring Self observability monitoring provides monitoring of the status and resources of the OAP server itself. oap-server is a Service in OAP, and land on the Layer: SO11Y_OAP.\nSelf observability metrics    Unit Metric Name Description Data Source     Count Per Minute meter_oap_instance_jvm_gc_count GC Count oap self observability   MB meter_oap_instance_jvm_memory_bytes_used Memory oap self observability   ms / min meter_oap_instance_jvm_young_gc_time GC Time (ms / min) oap self observability   ms / min meter_oap_instance_jvm_old_gc_time GC Time (ms / min) oap self observability   Count Per Minute meter_oap_instance_mesh_count Mesh Analysis Count (Per Minute) oap self observability   Count Per Minute meter_oap_instance_mesh_analysis_error_count Mesh Analysis Count (Per Minute) oap self observability   ms meter_oap_instance_trace_latency_percentile Trace Analysis Latency (ms) oap self observability   Count meter_oap_jvm_class_loaded_count Class Count oap self observability   Count meter_oap_jvm_class_total_unloaded_count Class Count oap self observability   Count meter_oap_jvm_class_total_loaded_count Class Count oap self observability   Count meter_oap_instance_persistence_prepare_count Persistence Count (Per 5 Minutes) oap self observability   Count meter_oap_instance_persistence_execute_count Persistence Count (Per 5 Minutes) oap self observability   Count meter_oap_jvm_thread_live_count Thread Count oap self observability   Count meter_oap_jvm_thread_peak_count Thread Count oap self observability   Count meter_oap_jvm_thread_daemon_count Thread Count oap self observability   ms meter_oap_instance_persistence_execute_percentile Persistence Execution Latency Per Metric Type (ms) oap self observability   ms meter_oap_instance_persistence_prepare_percentile Persistence Preparing Latency Per Metric Type (ms) oap self observability   Count meter_oap_jvm_thread_runnable_count Thread State Count oap self observability   Count meter_oap_jvm_thread_timed_waiting_count Thread State Count oap self observability   Count meter_oap_jvm_thread_blocked_count Thread State Count oap self observability   Count meter_oap_jvm_thread_waiting_count Thread State Count oap self observability   Count per minute meter_oap_instance_metrics_aggregation Aggregation (Per Minute) oap self observability   ms meter_oap_instance_mesh_latency_percentile Mesh Analysis Latency (ms) oap self observability   Count per minute meter_oap_instance_trace_count Trace Analysis Count (Per Minute) oap self observability   Count per minute meter_oap_instance_trace_analysis_error_count Trace Analysis Count (Per Minute) oap self observability   Percentage meter_oap_instance_cpu_percentage CPU (%) oap self observability   Count meter_oap_instance_metrics_persistent_cache count of metrics cache hit and no-hit oap self observability    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/fetcher-prom-rules/self.yaml and config/otel-rules/oap.yaml. The self observability dashboard panel configurations are found in /config/ui-initialized-templates/so11y_oap.\n","title":"OAP self observability dashboard","url":"/docs/main/v9.6.0/en/setup/backend/dashboards-so11y/"},{"content":"OAP self observability dashboard SkyWalking itself collects and exports metrics in Prometheus format for consuming, it also provides a dashboard to visualize the self-observability metrics.\nData flow  SkyWalking OAP collects metrics data internally and exposes a Prometheus http endpoint to retrieve the metrics. SkyWalking OAP itself (or OpenTelemetry Collector, prefered in Kubernetes scenarios) fetches metrics from the Prometheus endpoint in step (1). OAP (or OpenTelemetry Collector) pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up Follow OAP Self Observability Telemetry doc to set up OAP and OpenTelemetry Collector.\nSelf observability monitoring Self observability monitoring provides monitoring of the status and resources of the OAP server itself. oap-server is a Service in OAP, and land on the Layer: SO11Y_OAP.\nSelf observability metrics    Unit Metric Name Description Data Source     Count Per Minute meter_oap_instance_jvm_gc_count GC Count oap self observability   MB meter_oap_instance_jvm_memory_bytes_used Memory oap self observability   ms / min meter_oap_instance_jvm_young_gc_time GC Time (ms / min) oap self observability   ms / min meter_oap_instance_jvm_old_gc_time GC Time (ms / min) oap self observability   Count Per Minute meter_oap_instance_mesh_count Mesh Analysis Count (Per Minute) oap self observability   Count Per Minute meter_oap_instance_mesh_analysis_error_count Mesh Analysis Count (Per Minute) oap self observability   ms meter_oap_instance_trace_latency_percentile Trace Analysis Latency (ms) oap self observability   Count meter_oap_jvm_class_loaded_count Class Count oap self observability   Count meter_oap_jvm_class_total_unloaded_count Class Count oap self observability   Count meter_oap_jvm_class_total_loaded_count Class Count oap self observability   Count meter_oap_instance_persistence_prepare_count Persistence Count (Per 5 Minutes) oap self observability   Count meter_oap_instance_persistence_execute_count Persistence Count (Per 5 Minutes) oap self observability   Count meter_oap_jvm_thread_live_count Thread Count oap self observability   Count meter_oap_jvm_thread_peak_count Thread Count oap self observability   Count meter_oap_jvm_thread_daemon_count Thread Count oap self observability   ms meter_oap_instance_persistence_execute_percentile Persistence Execution Latency Per Metric Type (ms) oap self observability   ms meter_oap_instance_persistence_prepare_percentile Persistence Preparing Latency Per Metric Type (ms) oap self observability   Count meter_oap_jvm_thread_runnable_count Thread State Count oap self observability   Count meter_oap_jvm_thread_timed_waiting_count Thread State Count oap self observability   Count meter_oap_jvm_thread_blocked_count Thread State Count oap self observability   Count meter_oap_jvm_thread_waiting_count Thread State Count oap self observability   Count per minute meter_oap_instance_metrics_aggregation Aggregation (Per Minute) oap self observability   ms meter_oap_instance_mesh_latency_percentile Mesh Analysis Latency (ms) oap self observability   Count per minute meter_oap_instance_trace_count Trace Analysis Count (Per Minute) oap self observability   Count per minute meter_oap_instance_trace_analysis_error_count Trace Analysis Count (Per Minute) oap self observability   Percentage meter_oap_instance_cpu_percentage CPU (%) oap self observability   Count meter_oap_instance_metrics_persistent_cache count of metrics cache hit and no-hit oap self observability    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/fetcher-prom-rules/self.yaml and config/otel-rules/oap.yaml. The self observability dashboard panel configurations are found in /config/ui-initialized-templates/so11y_oap.\n","title":"OAP self observability dashboard","url":"/docs/main/v9.7.0/en/setup/backend/dashboards-so11y/"},{"content":"OAPSever Configuration Introduction To configure the OAP Sever, we propose two CRDs:\n OAPServerConfig: The CRD holds all static configuration, including environment variable and file configuration. OAPServerDynamicConfig: The CRD holds all dynamic configuration.  Spec of OAPServerConfig    Field Name Description     Version The version of OAP server, the default value is 9.5.0   Env The environment variable of OAP server   File The static file in OAP Server, which contains three fieldsfile.path、file.name and file.data. The file.path plus the file.name is the real file that needs to be replaced in the container image, and the file.data is the final data in the specific file.    Status of OAPServerConfig    Field Name Description     Desired The number of oapserver that need to be configured   Ready The number of oapserver that configured successfully   CreationTime The time the OAPServerConfig was created.   LastUpdateTime The last time this condition was updated.    Demo of OAPServerConfig  When using the file, please don\u0026rsquo;t set the same name\n # static configuration of OAPServerapiVersion:operator.skywalking.apache.org/v1alpha1kind:OAPServerConfigmetadata:name:oapserverconfig-samplenamespace:skywalking-systemspec:# The version of OAPServerversion:9.5.0# The env configuration of OAPServerenv:- name:JAVA_OPTSvalue:-Xmx2048M- name:SW_CLUSTERvalue:kubernetes- name:SW_CLUSTER_K8S_NAMESPACEvalue:skywalking-system# enable the dynamic configuration- name:SW_CONFIGURATIONvalue:k8s-configmap# set the labelselector of the dynamic configuration- name:SW_CLUSTER_K8S_LABELvalue:app=collector,release=skywalking- name:SW_TELEMETRYvalue:prometheus- name:SW_HEALTH_CHECKERvalue:default- name:SKYWALKING_COLLECTOR_UIDvalueFrom:fieldRef:fieldPath:metadata.uid- name:SW_LOG_LAL_FILESvalue:test1- name:SW_LOG_MAL_FILESvalue:test2# The file configuration of OAPServer# we should avoid setting the same file name in the filefile:- name:test1.yamlpath:/skywalking/config/laldata:|rules: - name: example dsl: | filter { text { abortOnFailure false // for test purpose, we want to persist all logs regexp $/(?s)(?\u0026lt;timestamp\u0026gt;\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}.\\d{3}) \\[TID:(?\u0026lt;tid\u0026gt;.+?)] \\[(?\u0026lt;thread\u0026gt;.+?)] (?\u0026lt;level\u0026gt;\\w{4,}) (?\u0026lt;logger\u0026gt;.{1,36}) (?\u0026lt;msg\u0026gt;.+)/$ } extractor { metrics { timestamp log.timestamp as Long labels level: parsed.level, service: log.service, instance: log.serviceInstance name \u0026#34;log_count\u0026#34; value 1 } } sink { } }- name:test2.yamlpath:/skywalking/config/log-mal-rulesdata:|expSuffix: instance([\u0026#39;service\u0026#39;], [\u0026#39;instance\u0026#39;], Layer.GENERAL) metricPrefix: log metricsRules: - name: count_info exp: log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;INFO\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).downsampling(SUM)Spec of OAPServerDynamicConfig    Field Name Description     Version The version of the OAP server, the default value is 9.5.0   LabelSelector The label selector of the specific configmap, the default value is \u0026ldquo;app=collector,release=skywalking\u0026rdquo;   Data All configurations' key and value    Status of OAPServerDynamicConfig    Field Name Description     State The state of dynamic configuration, running or stopped   CreationTime All configurations in one CR, the default value is false   LastUpdateTime The last time this condition was updated    Usage of OAPServerDynamicConfig  Notice, the CR\u0026rsquo;s name cannot contain capital letters.\n Users can split all configurations into several CRs. when using the OAPServerDynamicConfig, users can not only put some configurations in a CR, but also put a configuration in a CR, and the spec.data.name in CR represents one dynamic configuration.\nDemo of Global configuration apiVersion:operator.skywalking.apache.org/v1alpha1kind:OAPServerDynamicConfigmetadata:name:oapserverdynamicconfig-samplespec:# The version of OAPServerversion:9.5.0# The labelselector of OAPServer\u0026#39;s dynamic configuration, it should be the same as labelSelector of OAPServerConfiglabelSelector:app=collector,release=skywalkingdata:- name:agent-analyzer.default.slowDBAccessThresholdvalue:default:200,mongodb:50- name:alarm.default.alarm-settingsvalue:|-rules: # Rule unique name, must be ended with `_rule`. service_resp_time_rule: metrics-name: service_resp_time op: \u0026#34;\u0026gt;\u0026#34; threshold: 1000 period: 10 count: 3 silence-period: 5 message: Response time of service {name} is more than 1000ms in 3 minutes of last 10 minutes. service_sla_rule: # Metrics value need to be long, double or int metrics-name: service_sla op: \u0026#34;\u0026lt;\u0026#34; threshold: 8000 # The length of time to evaluate the metrics period: 10 # How many times after the metrics match the condition, will trigger alarm count: 2 # How many times of checks, the alarm keeps silence after alarm triggered, default as same as period. silence-period: 3 message: Successful rate of service {name} is lower than 80% in 2 minutes of last 10 minutes service_resp_time_percentile_rule: # Metrics value need to be long, double or int metrics-name: service_percentile op: \u0026#34;\u0026gt;\u0026#34; threshold: 1000,1000,1000,1000,1000 period: 10 count: 3 silence-period: 5 message: Percentile response time of service {name} alarm in 3 minutes of last 10 minutes, due to more than one condition of p50 \u0026gt; 1000, p75 \u0026gt; 1000, p90 \u0026gt; 1000, p95 \u0026gt; 1000, p99 \u0026gt; 1000 service_instance_resp_time_rule: metrics-name: service_instance_resp_time op: \u0026#34;\u0026gt;\u0026#34; threshold: 1000 period: 10 count: 2 silence-period: 5 message: Response time of service instance {name} is more than 1000ms in 2 minutes of last 10 minutes database_access_resp_time_rule: metrics-name: database_access_resp_time threshold: 1000 op: \u0026#34;\u0026gt;\u0026#34; period: 10 count: 2 message: Response time of database access {name} is more than 1000ms in 2 minutes of last 10 minutes endpoint_relation_resp_time_rule: metrics-name: endpoint_relation_resp_time threshold: 1000 op: \u0026#34;\u0026gt;\u0026#34; period: 10 count: 2 message: Response time of endpoint relation {name} is more than 1000ms in 2 minutes of last 10 minutes # Active endpoint related metrics alarm will cost more memory than service and service instance metrics alarm. # Because the number of endpoint is much more than service and instance. # # endpoint_resp_time_rule: # metrics-name: endpoint_resp_time # op: \u0026#34;\u0026gt;\u0026#34; # threshold: 1000 # period: 10 # count: 2 # silence-period: 5 # message: Response time of endpoint {name} is more than 1000ms in 2 minutes of last 10 minutes webhooks: # - http://127.0.0.1/notify/ # - http://127.0.0.1/go-wechat/- name:core.default.apdexThresholdvalue:|-default: 500 # example: # the threshold of service \u0026#34;tomcat\u0026#34; is 1s # tomcat: 1000 # the threshold of service \u0026#34;springboot1\u0026#34; is 50ms # springboot1: 50- name:agent-analyzer.default.uninstrumentedGatewaysvalue:|-#gateways: # - name: proxy0 # instances: # - host: 127.0.0.1 # the host/ip of this gateway instance # port: 9099 # the port of this gateway instance, defaults to 80Demo of Single configuration Set the dynamic configuration agent-analyzer.default.slowDBAccessThreshold as follows.\napiVersion:operator.skywalking.apache.org/v1alpha1kind:OAPServerDynamicConfigmetadata:name:agent-analyzer.defaultspec:# The version of OAPServerversion:9.5.0# The labelselector of OAPServer\u0026#39;s dynamic configuration, it should be the same as labelSelector of OAPServerConfiglabelSelector:app=collector,release=skywalkingdata:- name:slowDBAccessThresholdvalue:default:200,mongodb:50Set the dynamic configuration core.default.endpoint-name-grouping-openapi.customerAPI-v1 and core.default.endpoint-name-grouping-openapi.productAPI-v1 as follows.\napiVersion:operator.skywalking.apache.org/v1alpha1kind:OAPServerDynamicConfigmetadata:name:core.default.endpoint-name-grouping-openapispec:# The version of OAPServerversion:9.5.0# The labelselector of OAPServer\u0026#39;s dynamic configuration, it should be the same as labelSelector of OAPServerConfiglabelSelector:app=collector,release=skywalkingdata:- name:customerAPI-v1value:value of customerAPI-v1- name:productAPI-v1value:value of productAPI-v1","title":"OAPSever Configuration Introduction","url":"/docs/skywalking-swck/latest/oapserver-configuration/"},{"content":"OAPSever Configuration Introduction To configure the OAP Sever, we propose two CRDs:\n OAPServerConfig: The CRD holds all static configuration, including environment variable and file configuration. OAPServerDynamicConfig: The CRD holds all dynamic configuration.  Spec of OAPServerConfig    Field Name Description     Version The version of OAP server, the default value is 9.5.0   Env The environment variable of OAP server   File The static file in OAP Server, which contains three fieldsfile.path、file.name and file.data. The file.path plus the file.name is the real file that needs to be replaced in the container image, and the file.data is the final data in the specific file.    Status of OAPServerConfig    Field Name Description     Desired The number of oapserver that need to be configured   Ready The number of oapserver that configured successfully   CreationTime The time the OAPServerConfig was created.   LastUpdateTime The last time this condition was updated.    Demo of OAPServerConfig  When using the file, please don\u0026rsquo;t set the same name\n # static configuration of OAPServerapiVersion:operator.skywalking.apache.org/v1alpha1kind:OAPServerConfigmetadata:name:oapserverconfig-samplenamespace:skywalking-systemspec:# The version of OAPServerversion:9.5.0# The env configuration of OAPServerenv:- name:JAVA_OPTSvalue:-Xmx2048M- name:SW_CLUSTERvalue:kubernetes- name:SW_CLUSTER_K8S_NAMESPACEvalue:skywalking-system# enable the dynamic configuration- name:SW_CONFIGURATIONvalue:k8s-configmap# set the labelselector of the dynamic configuration- name:SW_CLUSTER_K8S_LABELvalue:app=collector,release=skywalking- name:SW_TELEMETRYvalue:prometheus- name:SW_HEALTH_CHECKERvalue:default- name:SKYWALKING_COLLECTOR_UIDvalueFrom:fieldRef:fieldPath:metadata.uid- name:SW_LOG_LAL_FILESvalue:test1- name:SW_LOG_MAL_FILESvalue:test2# The file configuration of OAPServer# we should avoid setting the same file name in the filefile:- name:test1.yamlpath:/skywalking/config/laldata:|rules: - name: example dsl: | filter { text { abortOnFailure false // for test purpose, we want to persist all logs regexp $/(?s)(?\u0026lt;timestamp\u0026gt;\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}.\\d{3}) \\[TID:(?\u0026lt;tid\u0026gt;.+?)] \\[(?\u0026lt;thread\u0026gt;.+?)] (?\u0026lt;level\u0026gt;\\w{4,}) (?\u0026lt;logger\u0026gt;.{1,36}) (?\u0026lt;msg\u0026gt;.+)/$ } extractor { metrics { timestamp log.timestamp as Long labels level: parsed.level, service: log.service, instance: log.serviceInstance name \u0026#34;log_count\u0026#34; value 1 } } sink { } }- name:test2.yamlpath:/skywalking/config/log-mal-rulesdata:|expSuffix: instance([\u0026#39;service\u0026#39;], [\u0026#39;instance\u0026#39;], Layer.GENERAL) metricPrefix: log metricsRules: - name: count_info exp: log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;INFO\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).downsampling(SUM)Spec of OAPServerDynamicConfig    Field Name Description     Version The version of the OAP server, the default value is 9.5.0   LabelSelector The label selector of the specific configmap, the default value is \u0026ldquo;app=collector,release=skywalking\u0026rdquo;   Data All configurations' key and value    Status of OAPServerDynamicConfig    Field Name Description     State The state of dynamic configuration, running or stopped   CreationTime All configurations in one CR, the default value is false   LastUpdateTime The last time this condition was updated    Usage of OAPServerDynamicConfig  Notice, the CR\u0026rsquo;s name cannot contain capital letters.\n Users can split all configurations into several CRs. when using the OAPServerDynamicConfig, users can not only put some configurations in a CR, but also put a configuration in a CR, and the spec.data.name in CR represents one dynamic configuration.\nDemo of Global configuration apiVersion:operator.skywalking.apache.org/v1alpha1kind:OAPServerDynamicConfigmetadata:name:oapserverdynamicconfig-samplespec:# The version of OAPServerversion:9.5.0# The labelselector of OAPServer\u0026#39;s dynamic configuration, it should be the same as labelSelector of OAPServerConfiglabelSelector:app=collector,release=skywalkingdata:- name:agent-analyzer.default.slowDBAccessThresholdvalue:default:200,mongodb:50- name:alarm.default.alarm-settingsvalue:|-rules: # Rule unique name, must be ended with `_rule`. service_resp_time_rule: metrics-name: service_resp_time op: \u0026#34;\u0026gt;\u0026#34; threshold: 1000 period: 10 count: 3 silence-period: 5 message: Response time of service {name} is more than 1000ms in 3 minutes of last 10 minutes. service_sla_rule: # Metrics value need to be long, double or int metrics-name: service_sla op: \u0026#34;\u0026lt;\u0026#34; threshold: 8000 # The length of time to evaluate the metrics period: 10 # How many times after the metrics match the condition, will trigger alarm count: 2 # How many times of checks, the alarm keeps silence after alarm triggered, default as same as period. silence-period: 3 message: Successful rate of service {name} is lower than 80% in 2 minutes of last 10 minutes service_resp_time_percentile_rule: # Metrics value need to be long, double or int metrics-name: service_percentile op: \u0026#34;\u0026gt;\u0026#34; threshold: 1000,1000,1000,1000,1000 period: 10 count: 3 silence-period: 5 message: Percentile response time of service {name} alarm in 3 minutes of last 10 minutes, due to more than one condition of p50 \u0026gt; 1000, p75 \u0026gt; 1000, p90 \u0026gt; 1000, p95 \u0026gt; 1000, p99 \u0026gt; 1000 service_instance_resp_time_rule: metrics-name: service_instance_resp_time op: \u0026#34;\u0026gt;\u0026#34; threshold: 1000 period: 10 count: 2 silence-period: 5 message: Response time of service instance {name} is more than 1000ms in 2 minutes of last 10 minutes database_access_resp_time_rule: metrics-name: database_access_resp_time threshold: 1000 op: \u0026#34;\u0026gt;\u0026#34; period: 10 count: 2 message: Response time of database access {name} is more than 1000ms in 2 minutes of last 10 minutes endpoint_relation_resp_time_rule: metrics-name: endpoint_relation_resp_time threshold: 1000 op: \u0026#34;\u0026gt;\u0026#34; period: 10 count: 2 message: Response time of endpoint relation {name} is more than 1000ms in 2 minutes of last 10 minutes # Active endpoint related metrics alarm will cost more memory than service and service instance metrics alarm. # Because the number of endpoint is much more than service and instance. # # endpoint_resp_time_rule: # metrics-name: endpoint_resp_time # op: \u0026#34;\u0026gt;\u0026#34; # threshold: 1000 # period: 10 # count: 2 # silence-period: 5 # message: Response time of endpoint {name} is more than 1000ms in 2 minutes of last 10 minutes webhooks: # - http://127.0.0.1/notify/ # - http://127.0.0.1/go-wechat/- name:core.default.apdexThresholdvalue:|-default: 500 # example: # the threshold of service \u0026#34;tomcat\u0026#34; is 1s # tomcat: 1000 # the threshold of service \u0026#34;springboot1\u0026#34; is 50ms # springboot1: 50- name:agent-analyzer.default.uninstrumentedGatewaysvalue:|-#gateways: # - name: proxy0 # instances: # - host: 127.0.0.1 # the host/ip of this gateway instance # port: 9099 # the port of this gateway instance, defaults to 80Demo of Single configuration Set the dynamic configuration agent-analyzer.default.slowDBAccessThreshold as follows.\napiVersion:operator.skywalking.apache.org/v1alpha1kind:OAPServerDynamicConfigmetadata:name:agent-analyzer.defaultspec:# The version of OAPServerversion:9.5.0# The labelselector of OAPServer\u0026#39;s dynamic configuration, it should be the same as labelSelector of OAPServerConfiglabelSelector:app=collector,release=skywalkingdata:- name:slowDBAccessThresholdvalue:default:200,mongodb:50Set the dynamic configuration core.default.endpoint-name-grouping-openapi.customerAPI-v1 and core.default.endpoint-name-grouping-openapi.productAPI-v1 as follows.\napiVersion:operator.skywalking.apache.org/v1alpha1kind:OAPServerDynamicConfigmetadata:name:core.default.endpoint-name-grouping-openapispec:# The version of OAPServerversion:9.5.0# The labelselector of OAPServer\u0026#39;s dynamic configuration, it should be the same as labelSelector of OAPServerConfiglabelSelector:app=collector,release=skywalkingdata:- name:customerAPI-v1value:value of customerAPI-v1- name:productAPI-v1value:value of productAPI-v1","title":"OAPSever Configuration Introduction","url":"/docs/skywalking-swck/next/oapserver-configuration/"},{"content":"OAPSever Configuration Introduction To configure the OAP Sever, we propose two CRDs:\n OAPServerConfig: The CRD holds all static configuration, including environment variable and file configuration. OAPServerDynamicConfig: The CRD holds all dynamic configuration.  Spec of OAPServerConfig    Field Name Description     Version The version of OAP server, the default value is 9.5.0   Env The environment variable of OAP server   File The static file in OAP Server, which contains three fieldsfile.path、file.name and file.data. The file.path plus the file.name is the real file that needs to be replaced in the container image, and the file.data is the final data in the specific file.    Status of OAPServerConfig    Field Name Description     Desired The number of oapserver that need to be configured   Ready The number of oapserver that configured successfully   CreationTime The time the OAPServerConfig was created.   LastUpdateTime The last time this condition was updated.    Demo of OAPServerConfig  When using the file, please don\u0026rsquo;t set the same name\n # static configuration of OAPServerapiVersion:operator.skywalking.apache.org/v1alpha1kind:OAPServerConfigmetadata:name:oapserverconfig-samplenamespace:skywalking-systemspec:# The version of OAPServerversion:9.5.0# The env configuration of OAPServerenv:- name:JAVA_OPTSvalue:-Xmx2048M- name:SW_CLUSTERvalue:kubernetes- name:SW_CLUSTER_K8S_NAMESPACEvalue:skywalking-system# enable the dynamic configuration- name:SW_CONFIGURATIONvalue:k8s-configmap# set the labelselector of the dynamic configuration- name:SW_CLUSTER_K8S_LABELvalue:app=collector,release=skywalking- name:SW_TELEMETRYvalue:prometheus- name:SW_HEALTH_CHECKERvalue:default- name:SKYWALKING_COLLECTOR_UIDvalueFrom:fieldRef:fieldPath:metadata.uid- name:SW_LOG_LAL_FILESvalue:test1- name:SW_LOG_MAL_FILESvalue:test2# The file configuration of OAPServer# we should avoid setting the same file name in the filefile:- name:test1.yamlpath:/skywalking/config/laldata:|rules: - name: example dsl: | filter { text { abortOnFailure false // for test purpose, we want to persist all logs regexp $/(?s)(?\u0026lt;timestamp\u0026gt;\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}.\\d{3}) \\[TID:(?\u0026lt;tid\u0026gt;.+?)] \\[(?\u0026lt;thread\u0026gt;.+?)] (?\u0026lt;level\u0026gt;\\w{4,}) (?\u0026lt;logger\u0026gt;.{1,36}) (?\u0026lt;msg\u0026gt;.+)/$ } extractor { metrics { timestamp log.timestamp as Long labels level: parsed.level, service: log.service, instance: log.serviceInstance name \u0026#34;log_count\u0026#34; value 1 } } sink { } }- name:test2.yamlpath:/skywalking/config/log-mal-rulesdata:|expSuffix: instance([\u0026#39;service\u0026#39;], [\u0026#39;instance\u0026#39;], Layer.GENERAL) metricPrefix: log metricsRules: - name: count_info exp: log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;INFO\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).downsampling(SUM)Spec of OAPServerDynamicConfig    Field Name Description     Version The version of the OAP server, the default value is 9.5.0   LabelSelector The label selector of the specific configmap, the default value is \u0026ldquo;app=collector,release=skywalking\u0026rdquo;   Data All configurations' key and value    Status of OAPServerDynamicConfig    Field Name Description     State The state of dynamic configuration, running or stopped   CreationTime All configurations in one CR, the default value is false   LastUpdateTime The last time this condition was updated    Usage of OAPServerDynamicConfig  Notice, the CR\u0026rsquo;s name cannot contain capital letters.\n Users can split all configurations into several CRs. when using the OAPServerDynamicConfig, users can not only put some configurations in a CR, but also put a configuration in a CR, and the spec.data.name in CR represents one dynamic configuration.\nDemo of Global configuration apiVersion:operator.skywalking.apache.org/v1alpha1kind:OAPServerDynamicConfigmetadata:name:oapserverdynamicconfig-samplespec:# The version of OAPServerversion:9.5.0# The labelselector of OAPServer\u0026#39;s dynamic configuration, it should be the same as labelSelector of OAPServerConfiglabelSelector:app=collector,release=skywalkingdata:- name:agent-analyzer.default.slowDBAccessThresholdvalue:default:200,mongodb:50- name:alarm.default.alarm-settingsvalue:|-rules: # Rule unique name, must be ended with `_rule`. service_resp_time_rule: metrics-name: service_resp_time op: \u0026#34;\u0026gt;\u0026#34; threshold: 1000 period: 10 count: 3 silence-period: 5 message: Response time of service {name} is more than 1000ms in 3 minutes of last 10 minutes. service_sla_rule: # Metrics value need to be long, double or int metrics-name: service_sla op: \u0026#34;\u0026lt;\u0026#34; threshold: 8000 # The length of time to evaluate the metrics period: 10 # How many times after the metrics match the condition, will trigger alarm count: 2 # How many times of checks, the alarm keeps silence after alarm triggered, default as same as period. silence-period: 3 message: Successful rate of service {name} is lower than 80% in 2 minutes of last 10 minutes service_resp_time_percentile_rule: # Metrics value need to be long, double or int metrics-name: service_percentile op: \u0026#34;\u0026gt;\u0026#34; threshold: 1000,1000,1000,1000,1000 period: 10 count: 3 silence-period: 5 message: Percentile response time of service {name} alarm in 3 minutes of last 10 minutes, due to more than one condition of p50 \u0026gt; 1000, p75 \u0026gt; 1000, p90 \u0026gt; 1000, p95 \u0026gt; 1000, p99 \u0026gt; 1000 service_instance_resp_time_rule: metrics-name: service_instance_resp_time op: \u0026#34;\u0026gt;\u0026#34; threshold: 1000 period: 10 count: 2 silence-period: 5 message: Response time of service instance {name} is more than 1000ms in 2 minutes of last 10 minutes database_access_resp_time_rule: metrics-name: database_access_resp_time threshold: 1000 op: \u0026#34;\u0026gt;\u0026#34; period: 10 count: 2 message: Response time of database access {name} is more than 1000ms in 2 minutes of last 10 minutes endpoint_relation_resp_time_rule: metrics-name: endpoint_relation_resp_time threshold: 1000 op: \u0026#34;\u0026gt;\u0026#34; period: 10 count: 2 message: Response time of endpoint relation {name} is more than 1000ms in 2 minutes of last 10 minutes # Active endpoint related metrics alarm will cost more memory than service and service instance metrics alarm. # Because the number of endpoint is much more than service and instance. # # endpoint_resp_time_rule: # metrics-name: endpoint_resp_time # op: \u0026#34;\u0026gt;\u0026#34; # threshold: 1000 # period: 10 # count: 2 # silence-period: 5 # message: Response time of endpoint {name} is more than 1000ms in 2 minutes of last 10 minutes webhooks: # - http://127.0.0.1/notify/ # - http://127.0.0.1/go-wechat/- name:core.default.apdexThresholdvalue:|-default: 500 # example: # the threshold of service \u0026#34;tomcat\u0026#34; is 1s # tomcat: 1000 # the threshold of service \u0026#34;springboot1\u0026#34; is 50ms # springboot1: 50- name:agent-analyzer.default.uninstrumentedGatewaysvalue:|-#gateways: # - name: proxy0 # instances: # - host: 127.0.0.1 # the host/ip of this gateway instance # port: 9099 # the port of this gateway instance, defaults to 80Demo of Single configuration Set the dynamic configuration agent-analyzer.default.slowDBAccessThreshold as follows.\napiVersion:operator.skywalking.apache.org/v1alpha1kind:OAPServerDynamicConfigmetadata:name:agent-analyzer.defaultspec:# The version of OAPServerversion:9.5.0# The labelselector of OAPServer\u0026#39;s dynamic configuration, it should be the same as labelSelector of OAPServerConfiglabelSelector:app=collector,release=skywalkingdata:- name:slowDBAccessThresholdvalue:default:200,mongodb:50Set the dynamic configuration core.default.endpoint-name-grouping-openapi.customerAPI-v1 and core.default.endpoint-name-grouping-openapi.productAPI-v1 as follows.\napiVersion:operator.skywalking.apache.org/v1alpha1kind:OAPServerDynamicConfigmetadata:name:core.default.endpoint-name-grouping-openapispec:# The version of OAPServerversion:9.5.0# The labelselector of OAPServer\u0026#39;s dynamic configuration, it should be the same as labelSelector of OAPServerConfiglabelSelector:app=collector,release=skywalkingdata:- name:customerAPI-v1value:value of customerAPI-v1- name:productAPI-v1value:value of productAPI-v1","title":"OAPSever Configuration Introduction","url":"/docs/skywalking-swck/v0.9.0/oapserver-configuration/"},{"content":"Observability This document outlines the observability features of BanyanDB, which include metrics, profiling, and tracing. These features help monitor and understand the performance, behavior, and overall health of BanyanDB.\nMetrics BanyanDB has built-in support for metrics collection through the use of build tags. The metrics provider can be enabled by specifying the build tag during the compilation process.\nCurrently, there is only one supported metrics provider: Prometheus. To use Prometheus as the metrics client, include the prometheus build tag when building BanyanDB:\nBUILD_TAGS=prometheus make -C banyand banyand-server\nIf no build tag is specified, the metrics server will not be started, and no metrics will be collected:\nmake -C banyand banyand-server\nWhen the Prometheus metrics provider is enabled, the metrics server listens on port 2121. This allows Prometheus to scrape metrics data from BanyanDB for monitoring and analysis.\nThe Docker image is tagged as \u0026ldquo;prometheus\u0026rdquo; to facilitate cloud-native operations and simplify deployment on Kubernetes. This allows users to directly deploy the Docker image onto their Kubernetes cluster without having to rebuild it with the \u0026ldquo;prometheus\u0026rdquo; tag.\nProfiling Banyand, the server of BanyanDB, supports profiling automatically. The profiling data is collected by the pprof package and can be accessed through the /debug/pprof endpoint. The port of the profiling server is 2122 by default.\nTracing TODO: Add details about the tracing support in BanyanDB, such as how to enable tracing, available tracing tools, and how to analyze tracing data.\n","title":"Observability","url":"/docs/skywalking-banyandb/latest/observability/"},{"content":"Observability This document outlines the observability features of BanyanDB, which include metrics, profiling, and tracing. These features help monitor and understand the performance, behavior, and overall health of BanyanDB.\nMetrics BanyanDB has built-in support for metrics collection through the use of build tags. The metrics provider can be enabled by specifying the build tag during the compilation process.\nCurrently, there is only one supported metrics provider: Prometheus. To use Prometheus as the metrics client, include the prometheus build tag when building BanyanDB:\nBUILD_TAGS=prometheus make -C banyand banyand-server\nIf no build tag is specified, the metrics server will not be started, and no metrics will be collected:\nmake -C banyand banyand-server\nWhen the Prometheus metrics provider is enabled, the metrics server listens on port 2121. This allows Prometheus to scrape metrics data from BanyanDB for monitoring and analysis.\nThe Docker image is tagged as \u0026ldquo;prometheus\u0026rdquo; to facilitate cloud-native operations and simplify deployment on Kubernetes. This allows users to directly deploy the Docker image onto their Kubernetes cluster without having to rebuild it with the \u0026ldquo;prometheus\u0026rdquo; tag.\nProfiling Banyand, the server of BanyanDB, supports profiling automatically. The profiling data is collected by the pprof package and can be accessed through the /debug/pprof endpoint. The port of the profiling server is 2122 by default.\nTracing TODO: Add details about the tracing support in BanyanDB, such as how to enable tracing, available tracing tools, and how to analyze tracing data.\n","title":"Observability","url":"/docs/skywalking-banyandb/next/observability/"},{"content":"Observability This document outlines the observability features of BanyanDB, which include metrics, profiling, and tracing. These features help monitor and understand the performance, behavior, and overall health of BanyanDB.\nMetrics BanyanDB has built-in support for metrics collection through the use of build tags. The metrics provider can be enabled by specifying the build tag during the compilation process.\nCurrently, there is only one supported metrics provider: Prometheus. To use Prometheus as the metrics client, include the prometheus build tag when building BanyanDB:\nBUILD_TAGS=prometheus make -C banyand banyand-server\nIf no build tag is specified, the metrics server will not be started, and no metrics will be collected:\nmake -C banyand banyand-server\nWhen the Prometheus metrics provider is enabled, the metrics server listens on port 2121. This allows Prometheus to scrape metrics data from BanyanDB for monitoring and analysis.\nThe Docker image is tagged as \u0026ldquo;prometheus\u0026rdquo; to facilitate cloud-native operations and simplify deployment on Kubernetes. This allows users to directly deploy the Docker image onto their Kubernetes cluster without having to rebuild it with the \u0026ldquo;prometheus\u0026rdquo; tag.\nProfiling Banyand, the server of BanyanDB, supports profiling automatically. The profiling data is collected by the pprof package and can be accessed through the /debug/pprof endpoint. The port of the profiling server is 2122 by default.\nTracing TODO: Add details about the tracing support in BanyanDB, such as how to enable tracing, available tracing tools, and how to analyze tracing data.\n","title":"Observability","url":"/docs/skywalking-banyandb/v0.5.0/observability/"},{"content":"Observability Analysis Language OAL(Observability Analysis Language) serves to analyze incoming data in streaming mode.\nOAL focuses on metrics in Service, Service Instance and Endpoint. Therefore, the language is easy to learn and use.\nSince 6.3, the OAL engine is embedded in OAP server runtime as oal-rt(OAL Runtime). OAL scripts are now found in the /config folder, and users could simply change and reboot the server to run them. However, the OAL script is a compiled language, and the OAL Runtime generates java codes dynamically.\nYou can open set SW_OAL_ENGINE_DEBUG=Y at system env to see which classes are generated.\nGrammar Scripts should be named *.oal\n// Declare the metrics. METRICS_NAME = from(CAST SCOPE.(* | [FIELD][,FIELD ...])) [.filter(CAST FIELD OP [INT | STRING])] .FUNCTION([PARAM][, PARAM ...]) // Disable hard code disable(METRICS_NAME); From The from statement defines the data source of this OAL expression.\nPrimary SCOPEs are Service, ServiceInstance, Endpoint, ServiceRelation, ServiceInstanceRelation, and EndpointRelation. There are also some secondary scopes which belong to a primary scope.\nSee Scope Definitions, where you can find all existing Scopes and Fields.\nFilter Use filter to build conditions for the value of fields by using field name and expression.\nThe expressions support linking by and, or and (...). The OPs support ==, !=, \u0026gt;, \u0026lt;, \u0026gt;=, \u0026lt;=, in [...] ,like %..., like ...% , like %...% , contain and not contain, with type detection based on field type. In the event of incompatibility, compile or code generation errors may be triggered.\nAggregation Function The default functions are provided by the SkyWalking OAP core, and it is possible to implement additional functions.\nFunctions provided\n longAvg. The avg of all input per scope entity. The input field must be a long.   instance_jvm_memory_max = from(ServiceInstanceJVMMemory.max).longAvg();\n In this case, the input represents the request of each ServiceInstanceJVMMemory scope, and avg is based on field max.\n doubleAvg. The avg of all input per scope entity. The input field must be a double.   instance_jvm_cpu = from(ServiceInstanceJVMCPU.usePercent).doubleAvg();\n In this case, the input represents the request of each ServiceInstanceJVMCPU scope, and avg is based on field usePercent.\n percent. The number or ratio is expressed as a fraction of 100, where the input matches with the condition.   endpoint_percent = from(Endpoint.*).percent(status == true);\n In this case, all input represents requests of each endpoint, and the condition is endpoint.status == true.\n rate. The rate expressed is as a fraction of 100, where the input matches with the condition.   browser_app_error_rate = from(BrowserAppTraffic.*).rate(trafficCategory == BrowserAppTrafficCategory.FIRST_ERROR, trafficCategory == BrowserAppTrafficCategory.NORMAL);\n In this case, all input represents requests of each browser app traffic, the numerator condition is trafficCategory == BrowserAppTrafficCategory.FIRST_ERROR and denominator condition is trafficCategory == BrowserAppTrafficCategory.NORMAL. Parameter (1) is the numerator condition. Parameter (2) is the denominator condition.\n count. The sum of calls per scope entity.   service_calls_sum = from(Service.*).count();\n In this case, the number of calls of each service.\n histogram. See Heatmap in WIKI.   service_heatmap = from(Service.latency).histogram(100, 20);\n In this case, the thermodynamic heatmap of all incoming requests. Parameter (1) is the precision of latency calculation, such as in the above case, where 113ms and 193ms are considered the same in the 101-200ms group. Parameter (2) is the group amount. In the above case, 21(param value + 1) groups are 0-100ms, 101-200ms, \u0026hellip; 1901-2000ms, 2000+ms\n apdex. See Apdex in WIKI.   service_apdex = from(Service.latency).apdex(name, status);\n In this case, the apdex score of each service. Parameter (1) is the service name, which reflects the Apdex threshold value loaded from service-apdex-threshold.yml in the config folder. Parameter (2) is the status of this request. The status(success/failure) reflects the Apdex calculation.\n p99, p95, p90, p75, p50. See percentile in WIKI.   service_percentile = from(Service.latency).percentile(10);\n percentile is the first multiple-value metric, which has been introduced since 7.0.0. As a metric with multiple values, it could be queried through the getMultipleLinearIntValues GraphQL query. In this case, see p99, p95, p90, p75, and p50 of all incoming requests. The parameter is precise to a latency at p99, such as in the above case, and 120ms and 124ms are considered to produce the same response time. Before 7.0.0, p99, p95, p90, p75, p50 func(s) are used to calculate metrics separately. They are still supported in 7.x, but they are no longer recommended and are not included in the current official OAL script.\n service_p99 = from(Service.latency).p99(10);\n In this case, the p99 value of all incoming requests. The parameter is precise to a latency at p99, such as in the above case, and 120ms and 124ms are considered to produce the same response time.\nMetrics name The metrics name for storage implementor, alarm and query modules. The type inference is supported by core.\nGroup All metrics data will be grouped by Scope.ID and min-level TimeBucket.\n In the Endpoint scope, the Scope.ID is same as the Endpoint ID (i.e. the unique ID based on service and its endpoint).  Cast Fields of source are static type. In some cases, the type required by the filter expression and aggregation function doesn\u0026rsquo;t match the type in the source, such as tag value in the source is String type, most aggregation calculation requires numeric.\nCast expression is provided to do so.\n (str-\u0026gt;long) or (long), cast string type into long. (str-\u0026gt;int) or (int), cast string type into int.  mq_consume_latency = from((str-\u0026gt;long)Service.tag[\u0026quot;transmission.latency\u0026quot;]).longAvg(); // the value of tag is string type. Cast statement is supported in\n From statement. from((cast)source.attre). Filter expression. .filter((cast)tag[\u0026quot;transmission.latency\u0026quot;] \u0026gt; 0) Aggregation function parameter. .longAvg((cast)strField1== 1, (cast)strField2)  Disable Disable is an advanced statement in OAL, which is only used in certain cases. Some of the aggregation and metrics are defined through core hard codes. Examples include segment and top_n_database_statement. This disable statement is designed to render them inactive. By default, none of them are disabled.\nNOTICE, all disable statements should be in oal/disable.oal script file.\nExamples // Calculate p99 of both Endpoint1 and Endpoint2 endpoint_p99 = from(Endpoint.latency).filter(name in (\u0026quot;Endpoint1\u0026quot;, \u0026quot;Endpoint2\u0026quot;)).summary(0.99) // Calculate p99 of Endpoint name started with `serv` serv_Endpoint_p99 = from(Endpoint.latency).filter(name like \u0026quot;serv%\u0026quot;).summary(0.99) // Calculate the avg response time of each Endpoint endpoint_resp_time = from(Endpoint.latency).avg() // Calculate the p50, p75, p90, p95 and p99 of each Endpoint by 50 ms steps. endpoint_percentile = from(Endpoint.latency).percentile(10) // Calculate the percent of response status is true, for each service. endpoint_success = from(Endpoint.*).filter(status == true).percent() // Calculate the sum of response code in [404, 500, 503], for each service. endpoint_abnormal = from(Endpoint.*).filter(responseCode in [404, 500, 503]).count() // Calculate the sum of request type in [RequestType.RPC, RequestType.gRPC], for each service. endpoint_rpc_calls_sum = from(Endpoint.*).filter(type in [RequestType.RPC, RequestType.gRPC]).count() // Calculate the sum of endpoint name in [\u0026quot;/v1\u0026quot;, \u0026quot;/v2\u0026quot;], for each service. endpoint_url_sum = from(Endpoint.*).filter(name in [\u0026quot;/v1\u0026quot;, \u0026quot;/v2\u0026quot;]).count() // Calculate the sum of calls for each service. endpoint_calls = from(Endpoint.*).count() // Calculate the CPM with the GET method for each service.The value is made up with `tagKey:tagValue`. // Option 1, use `tags contain`. service_cpm_http_get = from(Service.*).filter(tags contain \u0026quot;http.method:GET\u0026quot;).cpm() // Option 2, use `tag[key]`. service_cpm_http_get = from(Service.*).filter(tag[\u0026quot;http.method\u0026quot;] == \u0026quot;GET\u0026quot;).cpm(); // Calculate the CPM with the HTTP method except for the GET method for each service.The value is made up with `tagKey:tagValue`. service_cpm_http_other = from(Service.*).filter(tags not contain \u0026quot;http.method:GET\u0026quot;).cpm() disable(segment); disable(endpoint_relation_server_side); disable(top_n_database_statement); ","title":"Observability Analysis Language","url":"/docs/main/v9.0.0/en/concepts-and-designs/oal/"},{"content":"Observability Analysis Language OAL(Observability Analysis Language) serves to analyze incoming data in streaming mode.\nOAL focuses on metrics in Service, Service Instance and Endpoint. Therefore, the language is easy to learn and use.\nSince 6.3, the OAL engine is embedded in OAP server runtime as oal-rt(OAL Runtime). OAL scripts are now found in the /config folder, and users could simply change and reboot the server to run them. However, the OAL script is a compiled language, and the OAL Runtime generates java codes dynamically.\nYou can open set SW_OAL_ENGINE_DEBUG=Y at system env to see which classes are generated.\nGrammar Scripts should be named *.oal\n// Declare the metrics. METRICS_NAME = from(CAST SCOPE.(* | [FIELD][,FIELD ...])) [.filter(CAST FIELD OP [INT | STRING])] .FUNCTION([PARAM][, PARAM ...]) // Disable hard code disable(METRICS_NAME); From The from statement defines the data source of this OAL expression.\nPrimary SCOPEs are Service, ServiceInstance, Endpoint, ServiceRelation, ServiceInstanceRelation, and EndpointRelation. There are also some secondary scopes which belong to a primary scope.\nSee Scope Definitions, where you can find all existing Scopes and Fields.\nFilter Use filter to build conditions for the value of fields by using field name and expression.\nThe expressions support linking by and, or and (...). The OPs support ==, !=, \u0026gt;, \u0026lt;, \u0026gt;=, \u0026lt;=, in [...] ,like %..., like ...% , like %...% , contain and not contain, with type detection based on field type. In the event of incompatibility, compile or code generation errors may be triggered.\nAggregation Function The default functions are provided by the SkyWalking OAP core, and it is possible to implement additional functions.\nFunctions provided\n longAvg. The avg of all input per scope entity. The input field must be a long.   instance_jvm_memory_max = from(ServiceInstanceJVMMemory.max).longAvg();\n In this case, the input represents the request of each ServiceInstanceJVMMemory scope, and avg is based on field max.\n doubleAvg. The avg of all input per scope entity. The input field must be a double.   instance_jvm_cpu = from(ServiceInstanceJVMCPU.usePercent).doubleAvg();\n In this case, the input represents the request of each ServiceInstanceJVMCPU scope, and avg is based on field usePercent.\n percent. The number or ratio is expressed as a fraction of 100, where the input matches with the condition.   endpoint_percent = from(Endpoint.*).percent(status == true);\n In this case, all input represents requests of each endpoint, and the condition is endpoint.status == true.\n rate. The rate expressed is as a fraction of 100, where the input matches with the condition.   browser_app_error_rate = from(BrowserAppTraffic.*).rate(trafficCategory == BrowserAppTrafficCategory.FIRST_ERROR, trafficCategory == BrowserAppTrafficCategory.NORMAL);\n In this case, all input represents requests of each browser app traffic, the numerator condition is trafficCategory == BrowserAppTrafficCategory.FIRST_ERROR and denominator condition is trafficCategory == BrowserAppTrafficCategory.NORMAL. Parameter (1) is the numerator condition. Parameter (2) is the denominator condition.\n count. The sum of calls per scope entity.   service_calls_sum = from(Service.*).count();\n In this case, the number of calls of each service.\n histogram. See Heatmap in WIKI.   service_heatmap = from(Service.latency).histogram(100, 20);\n In this case, the thermodynamic heatmap of all incoming requests. Parameter (1) is the precision of latency calculation, such as in the above case, where 113ms and 193ms are considered the same in the 101-200ms group. Parameter (2) is the group amount. In the above case, 21(param value + 1) groups are 0-100ms, 101-200ms, \u0026hellip; 1901-2000ms, 2000+ms\n apdex. See Apdex in WIKI.   service_apdex = from(Service.latency).apdex(name, status);\n In this case, the apdex score of each service. Parameter (1) is the service name, which reflects the Apdex threshold value loaded from service-apdex-threshold.yml in the config folder. Parameter (2) is the status of this request. The status(success/failure) reflects the Apdex calculation.\n p99, p95, p90, p75, p50. See percentile in WIKI.   service_percentile = from(Service.latency).percentile(10);\n percentile is the first multiple-value metric, which has been introduced since 7.0.0. As a metric with multiple values, it could be queried through the getMultipleLinearIntValues GraphQL query. In this case, see p99, p95, p90, p75, and p50 of all incoming requests. The parameter is precise to a latency at p99, such as in the above case, and 120ms and 124ms are considered to produce the same response time. Before 7.0.0, p99, p95, p90, p75, p50 func(s) are used to calculate metrics separately. They are still supported in 7.x, but they are no longer recommended and are not included in the current official OAL script.\n service_p99 = from(Service.latency).p99(10);\n In this case, the p99 value of all incoming requests. The parameter is precise to a latency at p99, such as in the above case, and 120ms and 124ms are considered to produce the same response time.\nMetrics name The metrics name for storage implementor, alarm and query modules. The type inference is supported by core.\nGroup All metrics data will be grouped by Scope.ID and min-level TimeBucket.\n In the Endpoint scope, the Scope.ID is same as the Endpoint ID (i.e. the unique ID based on service and its endpoint).  Cast Fields of source are static type. In some cases, the type required by the filter expression and aggregation function doesn\u0026rsquo;t match the type in the source, such as tag value in the source is String type, most aggregation calculation requires numeric.\nCast expression is provided to do so.\n (str-\u0026gt;long) or (long), cast string type into long. (str-\u0026gt;int) or (int), cast string type into int.  mq_consume_latency = from((str-\u0026gt;long)Service.tag[\u0026quot;transmission.latency\u0026quot;]).longAvg(); // the value of tag is string type. Cast statement is supported in\n From statement. from((cast)source.attre). Filter expression. .filter((cast)tag[\u0026quot;transmission.latency\u0026quot;] \u0026gt; 0) Aggregation function parameter. .longAvg((cast)strField1== 1, (cast)strField2)  Disable Disable is an advanced statement in OAL, which is only used in certain cases. Some of the aggregation and metrics are defined through core hard codes. Examples include segment and top_n_database_statement. This disable statement is designed to render them inactive. By default, none of them are disabled.\nNOTICE, all disable statements should be in oal/disable.oal script file.\nExamples // Calculate p99 of both Endpoint1 and Endpoint2 endpoint_p99 = from(Endpoint.latency).filter(name in (\u0026quot;Endpoint1\u0026quot;, \u0026quot;Endpoint2\u0026quot;)).summary(0.99) // Calculate p99 of Endpoint name started with `serv` serv_Endpoint_p99 = from(Endpoint.latency).filter(name like \u0026quot;serv%\u0026quot;).summary(0.99) // Calculate the avg response time of each Endpoint endpoint_resp_time = from(Endpoint.latency).avg() // Calculate the p50, p75, p90, p95 and p99 of each Endpoint by 50 ms steps. endpoint_percentile = from(Endpoint.latency).percentile(10) // Calculate the percent of response status is true, for each service. endpoint_success = from(Endpoint.*).filter(status == true).percent() // Calculate the sum of response code in [404, 500, 503], for each service. endpoint_abnormal = from(Endpoint.*).filter(responseCode in [404, 500, 503]).count() // Calculate the sum of request type in [RequestType.RPC, RequestType.gRPC], for each service. endpoint_rpc_calls_sum = from(Endpoint.*).filter(type in [RequestType.RPC, RequestType.gRPC]).count() // Calculate the sum of endpoint name in [\u0026quot;/v1\u0026quot;, \u0026quot;/v2\u0026quot;], for each service. endpoint_url_sum = from(Endpoint.*).filter(name in [\u0026quot;/v1\u0026quot;, \u0026quot;/v2\u0026quot;]).count() // Calculate the sum of calls for each service. endpoint_calls = from(Endpoint.*).count() // Calculate the CPM with the GET method for each service.The value is made up with `tagKey:tagValue`. // Option 1, use `tags contain`. service_cpm_http_get = from(Service.*).filter(tags contain \u0026quot;http.method:GET\u0026quot;).cpm() // Option 2, use `tag[key]`. service_cpm_http_get = from(Service.*).filter(tag[\u0026quot;http.method\u0026quot;] == \u0026quot;GET\u0026quot;).cpm(); // Calculate the CPM with the HTTP method except for the GET method for each service.The value is made up with `tagKey:tagValue`. service_cpm_http_other = from(Service.*).filter(tags not contain \u0026quot;http.method:GET\u0026quot;).cpm() disable(segment); disable(endpoint_relation_server_side); disable(top_n_database_statement); ","title":"Observability Analysis Language","url":"/docs/main/v9.1.0/en/concepts-and-designs/oal/"},{"content":"Observability Analysis Language OAL(Observability Analysis Language) serves to analyze incoming data in streaming mode.\nOAL focuses on metrics in Service, Service Instance and Endpoint. Therefore, the language is easy to learn and use.\nSince 6.3, the OAL engine is embedded in OAP server runtime as oal-rt(OAL Runtime). OAL scripts are now found in the /config folder, and users could simply change and reboot the server to run them. However, the OAL script is a compiled language, and the OAL Runtime generates java codes dynamically.\nYou can open set SW_OAL_ENGINE_DEBUG=Y at system env to see which classes are generated.\nGrammar Scripts should be named *.oal\n// Declare the metrics. METRICS_NAME = from(CAST SCOPE.(* | [FIELD][,FIELD ...])) [.filter(CAST FIELD OP [INT | STRING])] .FUNCTION([PARAM][, PARAM ...]) // Disable hard code disable(METRICS_NAME); From The from statement defines the data source of this OAL expression.\nPrimary SCOPEs are Service, ServiceInstance, Endpoint, ServiceRelation, ServiceInstanceRelation, and EndpointRelation. There are also some secondary scopes which belong to a primary scope.\nSee Scope Definitions, where you can find all existing Scopes and Fields.\nFilter Use filter to build conditions for the value of fields by using field name and expression.\nThe expressions support linking by and, or and (...). The OPs support ==, !=, \u0026gt;, \u0026lt;, \u0026gt;=, \u0026lt;=, in [...] ,like %..., like ...% , like %...% , contain and not contain, with type detection based on field type. In the event of incompatibility, compile or code generation errors may be triggered.\nAggregation Function The default functions are provided by the SkyWalking OAP core, and it is possible to implement additional functions.\nFunctions provided\n longAvg. The avg of all input per scope entity. The input field must be a long.   instance_jvm_memory_max = from(ServiceInstanceJVMMemory.max).longAvg();\n In this case, the input represents the request of each ServiceInstanceJVMMemory scope, and avg is based on field max.\n doubleAvg. The avg of all input per scope entity. The input field must be a double.   instance_jvm_cpu = from(ServiceInstanceJVMCPU.usePercent).doubleAvg();\n In this case, the input represents the request of each ServiceInstanceJVMCPU scope, and avg is based on field usePercent.\n percent. The number or ratio is expressed as a fraction of 100, where the input matches with the condition.   endpoint_percent = from(Endpoint.*).percent(status == true);\n In this case, all input represents requests of each endpoint, and the condition is endpoint.status == true.\n rate. The rate expressed is as a fraction of 100, where the input matches with the condition.   browser_app_error_rate = from(BrowserAppTraffic.*).rate(trafficCategory == BrowserAppTrafficCategory.FIRST_ERROR, trafficCategory == BrowserAppTrafficCategory.NORMAL);\n In this case, all input represents requests of each browser app traffic, the numerator condition is trafficCategory == BrowserAppTrafficCategory.FIRST_ERROR and denominator condition is trafficCategory == BrowserAppTrafficCategory.NORMAL. Parameter (1) is the numerator condition. Parameter (2) is the denominator condition.\n count. The sum of calls per scope entity.   service_calls_sum = from(Service.*).count();\n In this case, the number of calls of each service.\n histogram. See Heatmap in WIKI.   service_heatmap = from(Service.latency).histogram(100, 20);\n In this case, the thermodynamic heatmap of all incoming requests. Parameter (1) is the precision of latency calculation, such as in the above case, where 113ms and 193ms are considered the same in the 101-200ms group. Parameter (2) is the group amount. In the above case, 21(param value + 1) groups are 0-100ms, 101-200ms, \u0026hellip; 1901-2000ms, 2000+ms\n apdex. See Apdex in WIKI.   service_apdex = from(Service.latency).apdex(name, status);\n In this case, the apdex score of each service. Parameter (1) is the service name, which reflects the Apdex threshold value loaded from service-apdex-threshold.yml in the config folder. Parameter (2) is the status of this request. The status(success/failure) reflects the Apdex calculation.\n p99, p95, p90, p75, p50. See percentile in WIKI.   service_percentile = from(Service.latency).percentile(10);\n percentile is the first multiple-value metric, which has been introduced since 7.0.0. As a metric with multiple values, it could be queried through the getMultipleLinearIntValues GraphQL query. In this case, see p99, p95, p90, p75, and p50 of all incoming requests. The parameter is precise to a latency at p99, such as in the above case, and 120ms and 124ms are considered to produce the same response time.\nIn this case, the p99 value of all incoming requests. The parameter is precise to a latency at p99, such as in the above case, and 120ms and 124ms are considered to produce the same response time.\nMetrics name The metrics name for storage implementor, alarm and query modules. The type inference is supported by core.\nGroup All metrics data will be grouped by Scope.ID and min-level TimeBucket.\n In the Endpoint scope, the Scope.ID is same as the Endpoint ID (i.e. the unique ID based on service and its endpoint).  Cast Fields of source are static type. In some cases, the type required by the filter expression and aggregation function doesn\u0026rsquo;t match the type in the source, such as tag value in the source is String type, most aggregation calculation requires numeric.\nCast expression is provided to do so.\n (str-\u0026gt;long) or (long), cast string type into long. (str-\u0026gt;int) or (int), cast string type into int.  mq_consume_latency = from((str-\u0026gt;long)Service.tag[\u0026quot;transmission.latency\u0026quot;]).longAvg(); // the value of tag is string type. Cast statement is supported in\n From statement. from((cast)source.attre). Filter expression. .filter((cast)tag[\u0026quot;transmission.latency\u0026quot;] \u0026gt; 0) Aggregation function parameter. .longAvg((cast)strField1== 1, (cast)strField2)  Disable Disable is an advanced statement in OAL, which is only used in certain cases. Some of the aggregation and metrics are defined through core hard codes. Examples include segment and top_n_database_statement. This disable statement is designed to render them inactive. By default, none of them are disabled.\nNOTICE, all disable statements should be in oal/disable.oal script file.\nExamples // Calculate p99 of both Endpoint1 and Endpoint2 endpoint_p99 = from(Endpoint.latency).filter(name in (\u0026quot;Endpoint1\u0026quot;, \u0026quot;Endpoint2\u0026quot;)).summary(0.99) // Calculate p99 of Endpoint name started with `serv` serv_Endpoint_p99 = from(Endpoint.latency).filter(name like \u0026quot;serv%\u0026quot;).summary(0.99) // Calculate the avg response time of each Endpoint endpoint_resp_time = from(Endpoint.latency).avg() // Calculate the p50, p75, p90, p95 and p99 of each Endpoint by 50 ms steps. endpoint_percentile = from(Endpoint.latency).percentile(10) // Calculate the percent of response status is true, for each service. endpoint_success = from(Endpoint.*).filter(status == true).percent() // Calculate the sum of response code in [404, 500, 503], for each service. endpoint_abnormal = from(Endpoint.*).filter(responseCode in [404, 500, 503]).count() // Calculate the sum of request type in [RequestType.RPC, RequestType.gRPC], for each service. endpoint_rpc_calls_sum = from(Endpoint.*).filter(type in [RequestType.RPC, RequestType.gRPC]).count() // Calculate the sum of endpoint name in [\u0026quot;/v1\u0026quot;, \u0026quot;/v2\u0026quot;], for each service. endpoint_url_sum = from(Endpoint.*).filter(name in [\u0026quot;/v1\u0026quot;, \u0026quot;/v2\u0026quot;]).count() // Calculate the sum of calls for each service. endpoint_calls = from(Endpoint.*).count() // Calculate the CPM with the GET method for each service.The value is made up with `tagKey:tagValue`. // Option 1, use `tags contain`. service_cpm_http_get = from(Service.*).filter(tags contain \u0026quot;http.method:GET\u0026quot;).cpm() // Option 2, use `tag[key]`. service_cpm_http_get = from(Service.*).filter(tag[\u0026quot;http.method\u0026quot;] == \u0026quot;GET\u0026quot;).cpm(); // Calculate the CPM with the HTTP method except for the GET method for each service.The value is made up with `tagKey:tagValue`. service_cpm_http_other = from(Service.*).filter(tags not contain \u0026quot;http.method:GET\u0026quot;).cpm() disable(segment); disable(endpoint_relation_server_side); disable(top_n_database_statement); ","title":"Observability Analysis Language","url":"/docs/main/v9.2.0/en/concepts-and-designs/oal/"},{"content":"Observability Analysis Language OAL(Observability Analysis Language) serves to analyze incoming data in streaming mode.\nOAL focuses on metrics in Service, Service Instance and Endpoint. Therefore, the language is easy to learn and use.\nSince 6.3, the OAL engine is embedded in OAP server runtime as oal-rt(OAL Runtime). OAL scripts are now found in the /config folder, and users could simply change and reboot the server to run them. However, the OAL script is a compiled language, and the OAL Runtime generates java codes dynamically.\nYou can open set SW_OAL_ENGINE_DEBUG=Y at system env to see which classes are generated.\nGrammar Scripts should be named *.oal\n// Declare the metrics. METRICS_NAME = from(CAST SCOPE.(* | [FIELD][,FIELD ...])) [.filter(CAST FIELD OP [INT | STRING])] .FUNCTION([PARAM][, PARAM ...]) // Disable hard code disable(METRICS_NAME); From The from statement defines the data source of this OAL expression.\nPrimary SCOPEs are Service, ServiceInstance, Endpoint, ServiceRelation, ServiceInstanceRelation, and EndpointRelation. There are also some secondary scopes which belong to a primary scope.\nSee Scope Definitions, where you can find all existing Scopes and Fields.\nFilter Use filter to build conditions for the value of fields by using field name and expression.\nThe expressions support linking by and, or and (...). The OPs support ==, !=, \u0026gt;, \u0026lt;, \u0026gt;=, \u0026lt;=, in [...] ,like %..., like ...% , like %...% , contain and not contain, with type detection based on field type. In the event of incompatibility, compile or code generation errors may be triggered.\nAggregation Function The default functions are provided by the SkyWalking OAP core, and it is possible to implement additional functions.\nFunctions provided\n longAvg. The avg of all input per scope entity. The input field must be a long.   instance_jvm_memory_max = from(ServiceInstanceJVMMemory.max).longAvg();\n In this case, the input represents the request of each ServiceInstanceJVMMemory scope, and avg is based on field max.\n doubleAvg. The avg of all input per scope entity. The input field must be a double.   instance_jvm_cpu = from(ServiceInstanceJVMCPU.usePercent).doubleAvg();\n In this case, the input represents the request of each ServiceInstanceJVMCPU scope, and avg is based on field usePercent.\n percent. The number or ratio is expressed as a fraction of 100, where the input matches with the condition.   endpoint_percent = from(Endpoint.*).percent(status == true);\n In this case, all input represents requests of each endpoint, and the condition is endpoint.status == true.\n rate. The rate expressed is as a fraction of 100, where the input matches with the condition.   browser_app_error_rate = from(BrowserAppTraffic.*).rate(trafficCategory == BrowserAppTrafficCategory.FIRST_ERROR, trafficCategory == BrowserAppTrafficCategory.NORMAL);\n In this case, all input represents requests of each browser app traffic, the numerator condition is trafficCategory == BrowserAppTrafficCategory.FIRST_ERROR and denominator condition is trafficCategory == BrowserAppTrafficCategory.NORMAL. Parameter (1) is the numerator condition. Parameter (2) is the denominator condition.\n count. The sum of calls per scope entity.   service_calls_sum = from(Service.*).count();\n In this case, the number of calls of each service.\n histogram. See Heatmap in WIKI.   service_heatmap = from(Service.latency).histogram(100, 20);\n In this case, the thermodynamic heatmap of all incoming requests. Parameter (1) is the precision of latency calculation, such as in the above case, where 113ms and 193ms are considered the same in the 101-200ms group. Parameter (2) is the group amount. In the above case, 21(param value + 1) groups are 0-100ms, 101-200ms, \u0026hellip; 1901-2000ms, 2000+ms\n apdex. See Apdex in WIKI.   service_apdex = from(Service.latency).apdex(name, status);\n In this case, the apdex score of each service. Parameter (1) is the service name, which reflects the Apdex threshold value loaded from service-apdex-threshold.yml in the config folder. Parameter (2) is the status of this request. The status(success/failure) reflects the Apdex calculation.\n p99, p95, p90, p75, p50. See percentile in WIKI.   service_percentile = from(Service.latency).percentile(10);\n percentile is the first multiple-value metric, which has been introduced since 7.0.0. As a metric with multiple values, it could be queried through the getMultipleLinearIntValues GraphQL query. In this case, see p99, p95, p90, p75, and p50 of all incoming requests. The parameter is precise to a latency at p99, such as in the above case, and 120ms and 124ms are considered to produce the same response time.\nIn this case, the p99 value of all incoming requests. The parameter is precise to a latency at p99, such as in the above case, and 120ms and 124ms are considered to produce the same response time.\nMetrics name The metrics name for storage implementor, alarm and query modules. The type inference is supported by core.\nGroup All metrics data will be grouped by Scope.ID and min-level TimeBucket.\n In the Endpoint scope, the Scope.ID is same as the Endpoint ID (i.e. the unique ID based on service and its endpoint).  Cast Fields of source are static type. In some cases, the type required by the filter expression and aggregation function doesn\u0026rsquo;t match the type in the source, such as tag value in the source is String type, most aggregation calculation requires numeric.\nCast expression is provided to do so.\n (str-\u0026gt;long) or (long), cast string type into long. (str-\u0026gt;int) or (int), cast string type into int.  mq_consume_latency = from((str-\u0026gt;long)Service.tag[\u0026quot;transmission.latency\u0026quot;]).longAvg(); // the value of tag is string type. Cast statement is supported in\n From statement. from((cast)source.attre). Filter expression. .filter((cast)tag[\u0026quot;transmission.latency\u0026quot;] \u0026gt; 0) Aggregation function parameter. .longAvg((cast)strField1== 1, (cast)strField2)  Disable Disable is an advanced statement in OAL, which is only used in certain cases. Some of the aggregation and metrics are defined through core hard codes. Examples include segment and top_n_database_statement. This disable statement is designed to render them inactive. By default, none of them are disabled.\nNOTICE, all disable statements should be in oal/disable.oal script file.\nExamples // Calculate p99 of both Endpoint1 and Endpoint2 endpoint_p99 = from(Endpoint.latency).filter(name in (\u0026quot;Endpoint1\u0026quot;, \u0026quot;Endpoint2\u0026quot;)).summary(0.99) // Calculate p99 of Endpoint name started with `serv` serv_Endpoint_p99 = from(Endpoint.latency).filter(name like \u0026quot;serv%\u0026quot;).summary(0.99) // Calculate the avg response time of each Endpoint endpoint_resp_time = from(Endpoint.latency).avg() // Calculate the p50, p75, p90, p95 and p99 of each Endpoint by 50 ms steps. endpoint_percentile = from(Endpoint.latency).percentile(10) // Calculate the percent of response status is true, for each service. endpoint_success = from(Endpoint.*).filter(status == true).percent() // Calculate the sum of response code in [404, 500, 503], for each service. endpoint_abnormal = from(Endpoint.*).filter(httpResponseStatusCode in [404, 500, 503]).count() // Calculate the sum of request type in [RequestType.RPC, RequestType.gRPC], for each service. endpoint_rpc_calls_sum = from(Endpoint.*).filter(type in [RequestType.RPC, RequestType.gRPC]).count() // Calculate the sum of endpoint name in [\u0026quot;/v1\u0026quot;, \u0026quot;/v2\u0026quot;], for each service. endpoint_url_sum = from(Endpoint.*).filter(name in [\u0026quot;/v1\u0026quot;, \u0026quot;/v2\u0026quot;]).count() // Calculate the sum of calls for each service. endpoint_calls = from(Endpoint.*).count() // Calculate the CPM with the GET method for each service.The value is made up with `tagKey:tagValue`. // Option 1, use `tags contain`. service_cpm_http_get = from(Service.*).filter(tags contain \u0026quot;http.method:GET\u0026quot;).cpm() // Option 2, use `tag[key]`. service_cpm_http_get = from(Service.*).filter(tag[\u0026quot;http.method\u0026quot;] == \u0026quot;GET\u0026quot;).cpm(); // Calculate the CPM with the HTTP method except for the GET method for each service.The value is made up with `tagKey:tagValue`. service_cpm_http_other = from(Service.*).filter(tags not contain \u0026quot;http.method:GET\u0026quot;).cpm() disable(segment); disable(endpoint_relation_server_side); disable(top_n_database_statement); ","title":"Observability Analysis Language","url":"/docs/main/v9.3.0/en/concepts-and-designs/oal/"},{"content":"Observability Analysis Language OAL(Observability Analysis Language) serves to analyze incoming data in streaming mode.\nOAL focuses on metrics in Service, Service Instance and Endpoint. Therefore, the language is easy to learn and use.\nSince 6.3, the OAL engine is embedded in OAP server runtime as oal-rt(OAL Runtime). OAL scripts are now found in the /config folder, and users could simply change and reboot the server to run them. However, the OAL script is a compiled language, and the OAL Runtime generates java codes dynamically.\nYou can open set SW_OAL_ENGINE_DEBUG=Y at system env to see which classes are generated.\nGrammar Scripts should be named *.oal\n// Declare the metrics. METRICS_NAME = from(CAST SCOPE.(* | [FIELD][,FIELD ...])) [.filter(CAST FIELD OP [INT | STRING])] .FUNCTION([PARAM][, PARAM ...]) // Disable hard code disable(METRICS_NAME); From The from statement defines the data source of this OAL expression.\nPrimary SCOPEs are Service, ServiceInstance, Endpoint, ServiceRelation, ServiceInstanceRelation, and EndpointRelation. There are also some secondary scopes which belong to a primary scope.\nSee Scope Definitions, where you can find all existing Scopes and Fields.\nFilter Use filter to build conditions for the value of fields by using field name and expression.\nThe expressions support linking by and, or and (...). The OPs support ==, !=, \u0026gt;, \u0026lt;, \u0026gt;=, \u0026lt;=, in [...] ,like %..., like ...% , like %...% , contain and not contain, with type detection based on field type. In the event of incompatibility, compile or code generation errors may be triggered.\nAggregation Function The default functions are provided by the SkyWalking OAP core, and it is possible to implement additional functions.\nFunctions provided\n longAvg. The avg of all input per scope entity. The input field must be a long.   instance_jvm_memory_max = from(ServiceInstanceJVMMemory.max).longAvg();\n In this case, the input represents the request of each ServiceInstanceJVMMemory scope, and avg is based on field max.\n doubleAvg. The avg of all input per scope entity. The input field must be a double.   instance_jvm_cpu = from(ServiceInstanceJVMCPU.usePercent).doubleAvg();\n In this case, the input represents the request of each ServiceInstanceJVMCPU scope, and avg is based on field usePercent.\n percent. The number or ratio is expressed as a fraction of 100, where the input matches with the condition.   endpoint_percent = from(Endpoint.*).percent(status == true);\n In this case, all input represents requests of each endpoint, and the condition is endpoint.status == true.\n rate. The rate expressed is as a fraction of 100, where the input matches with the condition.   browser_app_error_rate = from(BrowserAppTraffic.*).rate(trafficCategory == BrowserAppTrafficCategory.FIRST_ERROR, trafficCategory == BrowserAppTrafficCategory.NORMAL);\n In this case, all input represents requests of each browser app traffic, the numerator condition is trafficCategory == BrowserAppTrafficCategory.FIRST_ERROR and denominator condition is trafficCategory == BrowserAppTrafficCategory.NORMAL. Parameter (1) is the numerator condition. Parameter (2) is the denominator condition.\n count. The sum of calls per scope entity.   service_calls_sum = from(Service.*).count();\n In this case, the number of calls of each service.\n histogram. See Heatmap in WIKI.   service_heatmap = from(Service.latency).histogram(100, 20);\n In this case, the thermodynamic heatmap of all incoming requests. Parameter (1) is the precision of latency calculation, such as in the above case, where 113ms and 193ms are considered the same in the 101-200ms group. Parameter (2) is the group amount. In the above case, 21(param value + 1) groups are 0-100ms, 101-200ms, \u0026hellip; 1901-2000ms, 2000+ms\n apdex. See Apdex in WIKI.   service_apdex = from(Service.latency).apdex(name, status);\n In this case, the apdex score of each service. Parameter (1) is the service name, which reflects the Apdex threshold value loaded from service-apdex-threshold.yml in the config folder. Parameter (2) is the status of this request. The status(success/failure) reflects the Apdex calculation.\n p99, p95, p90, p75, p50. See percentile in WIKI.   service_percentile = from(Service.latency).percentile(10);\n percentile is the first multiple-value metric, which has been introduced since 7.0.0. As a metric with multiple values, it could be queried through the getMultipleLinearIntValues GraphQL query. In this case, see p99, p95, p90, p75, and p50 of all incoming requests. The parameter is precise to a latency at p99, such as in the above case, and 120ms and 124ms are considered to produce the same response time.\nIn this case, the p99 value of all incoming requests. The parameter is precise to a latency at p99, such as in the above case, and 120ms and 124ms are considered to produce the same response time.\nMetrics name The metrics name for storage implementor, alarm and query modules. The type inference is supported by core.\nGroup All metrics data will be grouped by Scope.ID and min-level TimeBucket.\n In the Endpoint scope, the Scope.ID is same as the Endpoint ID (i.e. the unique ID based on service and its endpoint).  Cast Fields of source are static type. In some cases, the type required by the filter expression and aggregation function doesn\u0026rsquo;t match the type in the source, such as tag value in the source is String type, most aggregation calculation requires numeric.\nCast expression is provided to do so.\n (str-\u0026gt;long) or (long), cast string type into long. (str-\u0026gt;int) or (int), cast string type into int.  mq_consume_latency = from((str-\u0026gt;long)Service.tag[\u0026quot;transmission.latency\u0026quot;]).longAvg(); // the value of tag is string type. Cast statement is supported in\n From statement. from((cast)source.attre). Filter expression. .filter((cast)tag[\u0026quot;transmission.latency\u0026quot;] \u0026gt; 0) Aggregation function parameter. .longAvg((cast)strField1== 1, (cast)strField2)  Disable Disable is an advanced statement in OAL, which is only used in certain cases. Some of the aggregation and metrics are defined through core hard codes. Examples include segment and top_n_database_statement. This disable statement is designed to render them inactive. By default, none of them are disabled.\nNOTICE, all disable statements should be in oal/disable.oal script file.\nExamples // Calculate p99 of both Endpoint1 and Endpoint2 endpoint_p99 = from(Endpoint.latency).filter(name in (\u0026quot;Endpoint1\u0026quot;, \u0026quot;Endpoint2\u0026quot;)).summary(0.99) // Calculate p99 of Endpoint name started with `serv` serv_Endpoint_p99 = from(Endpoint.latency).filter(name like \u0026quot;serv%\u0026quot;).summary(0.99) // Calculate the avg response time of each Endpoint endpoint_resp_time = from(Endpoint.latency).avg() // Calculate the p50, p75, p90, p95 and p99 of each Endpoint by 50 ms steps. endpoint_percentile = from(Endpoint.latency).percentile(10) // Calculate the percent of response status is true, for each service. endpoint_success = from(Endpoint.*).filter(status == true).percent() // Calculate the sum of response code in [404, 500, 503], for each service. endpoint_abnormal = from(Endpoint.*).filter(httpResponseStatusCode in [404, 500, 503]).count() // Calculate the sum of request type in [RequestType.RPC, RequestType.gRPC], for each service. endpoint_rpc_calls_sum = from(Endpoint.*).filter(type in [RequestType.RPC, RequestType.gRPC]).count() // Calculate the sum of endpoint name in [\u0026quot;/v1\u0026quot;, \u0026quot;/v2\u0026quot;], for each service. endpoint_url_sum = from(Endpoint.*).filter(name in [\u0026quot;/v1\u0026quot;, \u0026quot;/v2\u0026quot;]).count() // Calculate the sum of calls for each service. endpoint_calls = from(Endpoint.*).count() // Calculate the CPM with the GET method for each service.The value is made up with `tagKey:tagValue`. // Option 1, use `tags contain`. service_cpm_http_get = from(Service.*).filter(tags contain \u0026quot;http.method:GET\u0026quot;).cpm() // Option 2, use `tag[key]`. service_cpm_http_get = from(Service.*).filter(tag[\u0026quot;http.method\u0026quot;] == \u0026quot;GET\u0026quot;).cpm(); // Calculate the CPM with the HTTP method except for the GET method for each service.The value is made up with `tagKey:tagValue`. service_cpm_http_other = from(Service.*).filter(tags not contain \u0026quot;http.method:GET\u0026quot;).cpm() disable(segment); disable(endpoint_relation_server_side); disable(top_n_database_statement); ","title":"Observability Analysis Language","url":"/docs/main/v9.4.0/en/concepts-and-designs/oal/"},{"content":"Observability Analysis Language OAL(Observability Analysis Language) serves to analyze incoming data in streaming mode.\nOAL focuses on metrics in Service, Service Instance and Endpoint. Therefore, the language is easy to learn and use.\nSince 6.3, the OAL engine is embedded in OAP server runtime as oal-rt(OAL Runtime). OAL scripts are now found in the /config folder, and users could simply change and reboot the server to run them. However, the OAL script is a compiled language, and the OAL Runtime generates java codes dynamically.\nYou can open set SW_OAL_ENGINE_DEBUG=Y at system env to see which classes are generated.\nGrammar Scripts should be named *.oal\n// Declare the metrics. METRICS_NAME = from(CAST SCOPE.(* | [FIELD][,FIELD ...])) [.filter(CAST FIELD OP [INT | STRING])] .FUNCTION([PARAM][, PARAM ...]) // Disable hard code disable(METRICS_NAME); From The from statement defines the data source of this OAL expression.\nPrimary SCOPEs are Service, ServiceInstance, Endpoint, ServiceRelation, ServiceInstanceRelation, and EndpointRelation. There are also some secondary scopes which belong to a primary scope.\nSee Scope Definitions, where you can find all existing Scopes and Fields.\nFilter Use filter to build conditions for the value of fields by using field name and expression.\nThe expressions support linking by and, or and (...). The OPs support ==, !=, \u0026gt;, \u0026lt;, \u0026gt;=, \u0026lt;=, in [...] ,like %..., like ...% , like %...% , contain and not contain, with type detection based on field type. In the event of incompatibility, compile or code generation errors may be triggered.\nAggregation Function The default functions are provided by the SkyWalking OAP core, and it is possible to implement additional functions.\nFunctions provided\n longAvg. The avg of all input per scope entity. The input field must be a long.   instance_jvm_memory_max = from(ServiceInstanceJVMMemory.max).longAvg();\n In this case, the input represents the request of each ServiceInstanceJVMMemory scope, and avg is based on field max.\n doubleAvg. The avg of all input per scope entity. The input field must be a double.   instance_jvm_cpu = from(ServiceInstanceJVMCPU.usePercent).doubleAvg();\n In this case, the input represents the request of each ServiceInstanceJVMCPU scope, and avg is based on field usePercent.\n percent. The number or ratio is expressed as a fraction of 100, where the input matches with the condition.   endpoint_percent = from(Endpoint.*).percent(status == true);\n In this case, all input represents requests of each endpoint, and the condition is endpoint.status == true.\n rate. The rate expressed is as a fraction of 100, where the input matches with the condition.   browser_app_error_rate = from(BrowserAppTraffic.*).rate(trafficCategory == BrowserAppTrafficCategory.FIRST_ERROR, trafficCategory == BrowserAppTrafficCategory.NORMAL);\n In this case, all input represents requests of each browser app traffic, the numerator condition is trafficCategory == BrowserAppTrafficCategory.FIRST_ERROR and denominator condition is trafficCategory == BrowserAppTrafficCategory.NORMAL. Parameter (1) is the numerator condition. Parameter (2) is the denominator condition.\n count. The sum of calls per scope entity.   service_calls_sum = from(Service.*).count();\n In this case, the number of calls of each service.\n histogram. See Heatmap in WIKI.   service_heatmap = from(Service.latency).histogram(100, 20);\n In this case, the thermodynamic heatmap of all incoming requests. Parameter (1) is the precision of latency calculation, such as in the above case, where 113ms and 193ms are considered the same in the 101-200ms group. Parameter (2) is the group amount. In the above case, 21(param value + 1) groups are 0-100ms, 101-200ms, \u0026hellip; 1901-2000ms, 2000+ms\n apdex. See Apdex in WIKI.   service_apdex = from(Service.latency).apdex(name, status);\n In this case, the apdex score of each service. Parameter (1) is the service name, which reflects the Apdex threshold value loaded from service-apdex-threshold.yml in the config folder. Parameter (2) is the status of this request. The status(success/failure) reflects the Apdex calculation.\n p99, p95, p90, p75, p50. See percentile in WIKI.   service_percentile = from(Service.latency).percentile(10);\n percentile is the first multiple-value metric, which has been introduced since 7.0.0. As a metric with multiple values, it could be queried through the getMultipleLinearIntValues GraphQL query. In this case, see p99, p95, p90, p75, and p50 of all incoming requests. The parameter is precise to a latency at p99, such as in the above case, and 120ms and 124ms are considered to produce the same response time.\nIn this case, the p99 value of all incoming requests. The parameter is precise to a latency at p99, such as in the above case, and 120ms and 124ms are considered to produce the same response time.\nMetrics name The metrics name for storage implementor, alarm and query modules. The type inference is supported by core.\nGroup All metrics data will be grouped by Scope.ID and min-level TimeBucket.\n In the Endpoint scope, the Scope.ID is same as the Endpoint ID (i.e. the unique ID based on service and its endpoint).  Cast Fields of source are static type. In some cases, the type required by the filter expression and aggregation function doesn\u0026rsquo;t match the type in the source, such as tag value in the source is String type, most aggregation calculation requires numeric.\nCast expression is provided to do so.\n (str-\u0026gt;long) or (long), cast string type into long. (str-\u0026gt;int) or (int), cast string type into int.  mq_consume_latency = from((str-\u0026gt;long)Service.tag[\u0026quot;transmission.latency\u0026quot;]).longAvg(); // the value of tag is string type. Cast statement is supported in\n From statement. from((cast)source.attre). Filter expression. .filter((cast)tag[\u0026quot;transmission.latency\u0026quot;] \u0026gt; 0) Aggregation function parameter. .longAvg((cast)strField1== 1, (cast)strField2)  Disable Disable is an advanced statement in OAL, which is only used in certain cases. Some of the aggregation and metrics are defined through core hard codes. Examples include segment and top_n_database_statement. This disable statement is designed to render them inactive. By default, none of them are disabled.\nNOTICE, all disable statements should be in oal/disable.oal script file.\nExamples // Calculate p99 of both Endpoint1 and Endpoint2 endpoint_p99 = from(Endpoint.latency).filter(name in (\u0026quot;Endpoint1\u0026quot;, \u0026quot;Endpoint2\u0026quot;)).summary(0.99) // Calculate p99 of Endpoint name started with `serv` serv_Endpoint_p99 = from(Endpoint.latency).filter(name like \u0026quot;serv%\u0026quot;).summary(0.99) // Calculate the avg response time of each Endpoint endpoint_resp_time = from(Endpoint.latency).avg() // Calculate the p50, p75, p90, p95 and p99 of each Endpoint by 50 ms steps. endpoint_percentile = from(Endpoint.latency).percentile(10) // Calculate the percent of response status is true, for each service. endpoint_success = from(Endpoint.*).filter(status == true).percent() // Calculate the sum of response code in [404, 500, 503], for each service. endpoint_abnormal = from(Endpoint.*).filter(httpResponseStatusCode in [404, 500, 503]).count() // Calculate the sum of request type in [RequestType.RPC, RequestType.gRPC], for each service. endpoint_rpc_calls_sum = from(Endpoint.*).filter(type in [RequestType.RPC, RequestType.gRPC]).count() // Calculate the sum of endpoint name in [\u0026quot;/v1\u0026quot;, \u0026quot;/v2\u0026quot;], for each service. endpoint_url_sum = from(Endpoint.*).filter(name in [\u0026quot;/v1\u0026quot;, \u0026quot;/v2\u0026quot;]).count() // Calculate the sum of calls for each service. endpoint_calls = from(Endpoint.*).count() // Calculate the CPM with the GET method for each service.The value is made up with `tagKey:tagValue`. // Option 1, use `tags contain`. service_cpm_http_get = from(Service.*).filter(tags contain \u0026quot;http.method:GET\u0026quot;).cpm() // Option 2, use `tag[key]`. service_cpm_http_get = from(Service.*).filter(tag[\u0026quot;http.method\u0026quot;] == \u0026quot;GET\u0026quot;).cpm(); // Calculate the CPM with the HTTP method except for the GET method for each service.The value is made up with `tagKey:tagValue`. service_cpm_http_other = from(Service.*).filter(tags not contain \u0026quot;http.method:GET\u0026quot;).cpm() disable(segment); disable(endpoint_relation_server_side); disable(top_n_database_statement); ","title":"Observability Analysis Language","url":"/docs/main/v9.5.0/en/concepts-and-designs/oal/"},{"content":"Observability Analysis Language OAL(Observability Analysis Language) serves to analyze incoming data in streaming mode.\nOAL focuses on metrics in Service, Service Instance and Endpoint. Therefore, the language is easy to learn and use.\nSince 6.3, the OAL engine is embedded in OAP server runtime as oal-rt(OAL Runtime). OAL scripts are now found in the /config folder, and users could simply change and reboot the server to run them. However, the OAL script is a compiled language, and the OAL Runtime generates java codes dynamically.\nYou can open set SW_OAL_ENGINE_DEBUG=Y at system env to see which classes are generated.\nGrammar Scripts should be named *.oal\n// Declare the metrics. METRICS_NAME = from(CAST SCOPE.(* | [FIELD][,FIELD ...])) [.filter(CAST FIELD OP [INT | STRING])] .FUNCTION([PARAM][, PARAM ...]) // Disable hard code disable(METRICS_NAME); From The from statement defines the data source of this OAL expression.\nPrimary SCOPEs are Service, ServiceInstance, Endpoint, ServiceRelation, ServiceInstanceRelation, and EndpointRelation. There are also some secondary scopes which belong to a primary scope.\nSee Scope Definitions, where you can find all existing Scopes and Fields.\nFilter Use filter to build conditions for the value of fields by using field name and expression.\nThe filter expressions run as a chain, generally connected with logic AND. The OPs support ==, !=, \u0026gt;, \u0026lt;, \u0026gt;=, \u0026lt;=, in [...] ,like %..., like ...% , like %...% , contain and not contain, with type detection based on field type. In the event of incompatibility, compile or code generation errors may be triggered.\nAggregation Function The default functions are provided by the SkyWalking OAP core, and it is possible to implement additional functions.\nFunctions provided\n longAvg. The avg of all input per scope entity. The input field must be a long.   instance_jvm_memory_max = from(ServiceInstanceJVMMemory.max).longAvg();\n In this case, the input represents the request of each ServiceInstanceJVMMemory scope, and avg is based on field max.\n doubleAvg. The avg of all input per scope entity. The input field must be a double.   instance_jvm_cpu = from(ServiceInstanceJVMCPU.usePercent).doubleAvg();\n In this case, the input represents the request of each ServiceInstanceJVMCPU scope, and avg is based on field usePercent.\n percent. The number or ratio is expressed as a fraction of 100, where the input matches with the condition.   endpoint_percent = from(Endpoint.*).percent(status == true);\n In this case, all input represents requests of each endpoint, and the condition is endpoint.status == true.\n rate. The rate expressed is as a fraction of 100, where the input matches with the condition.   browser_app_error_rate = from(BrowserAppTraffic.*).rate(trafficCategory == BrowserAppTrafficCategory.FIRST_ERROR, trafficCategory == BrowserAppTrafficCategory.NORMAL);\n In this case, all input represents requests of each browser app traffic, the numerator condition is trafficCategory == BrowserAppTrafficCategory.FIRST_ERROR and denominator condition is trafficCategory == BrowserAppTrafficCategory.NORMAL. Parameter (1) is the numerator condition. Parameter (2) is the denominator condition.\n count. The sum of calls per scope entity.   service_calls_sum = from(Service.*).count();\n In this case, the number of calls of each service.\n histogram. See Heatmap in WIKI.   service_heatmap = from(Service.latency).histogram(100, 20);\n In this case, the thermodynamic heatmap of all incoming requests. Parameter (1) is the precision of latency calculation, such as in the above case, where 113ms and 193ms are considered the same in the 101-200ms group. Parameter (2) is the group amount. In the above case, 21(param value + 1) groups are 0-100ms, 101-200ms, \u0026hellip; 1901-2000ms, 2000+ms\n apdex. See Apdex in WIKI.   service_apdex = from(Service.latency).apdex(name, status);\n In this case, the apdex score of each service. Parameter (1) is the service name, which reflects the Apdex threshold value loaded from service-apdex-threshold.yml in the config folder. Parameter (2) is the status of this request. The status(success/failure) reflects the Apdex calculation.\n p99, p95, p90, p75, p50. See percentile in WIKI.   service_percentile = from(Service.latency).percentile(10);\n percentile is the first multiple-value metric, which has been introduced since 7.0.0. As a metric with multiple values, it could be queried through the getMultipleLinearIntValues GraphQL query. In this case, see p99, p95, p90, p75, and p50 of all incoming requests. The parameter is precise to a latency at p99, such as in the above case, and 120ms and 124ms are considered to produce the same response time.\nIn this case, the p99 value of all incoming requests. The parameter is precise to a latency at p99, such as in the above case, and 120ms and 124ms are considered to produce the same response time.\nMetrics name The metrics name for storage implementor, alarm and query modules. The type inference is supported by core.\nGroup All metrics data will be grouped by Scope.ID and min-level TimeBucket.\n In the Endpoint scope, the Scope.ID is same as the Endpoint ID (i.e. the unique ID based on service and its endpoint).  Cast Fields of source are static type. In some cases, the type required by the filter expression and aggregation function doesn\u0026rsquo;t match the type in the source, such as tag value in the source is String type, most aggregation calculation requires numeric.\nCast expression is provided to do so.\n (str-\u0026gt;long) or (long), cast string type into long. (str-\u0026gt;int) or (int), cast string type into int.  mq_consume_latency = from((str-\u0026gt;long)Service.tag[\u0026quot;transmission.latency\u0026quot;]).longAvg(); // the value of tag is string type. Cast statement is supported in\n From statement. from((cast)source.attre). Filter expression. .filter((cast)tag[\u0026quot;transmission.latency\u0026quot;] \u0026gt; 0) Aggregation function parameter. .longAvg((cast)strField1== 1, (cast)strField2)  Disable Disable is an advanced statement in OAL, which is only used in certain cases. Some of the aggregation and metrics are defined through core hard codes. Examples include segment and top_n_database_statement. This disable statement is designed to render them inactive. By default, none of them are disabled.\nNOTICE, all disable statements should be in oal/disable.oal script file.\nExamples // Calculate p99 of both Endpoint1 and Endpoint2 endpoint_p99 = from(Endpoint.latency).filter(name in (\u0026quot;Endpoint1\u0026quot;, \u0026quot;Endpoint2\u0026quot;)).summary(0.99) // Calculate p99 of Endpoint name started with `serv` serv_Endpoint_p99 = from(Endpoint.latency).filter(name like \u0026quot;serv%\u0026quot;).summary(0.99) // Calculate the avg response time of each Endpoint endpoint_resp_time = from(Endpoint.latency).avg() // Calculate the p50, p75, p90, p95 and p99 of each Endpoint by 50 ms steps. endpoint_percentile = from(Endpoint.latency).percentile(10) // Calculate the percent of response status is true, for each service. endpoint_success = from(Endpoint.*).filter(status == true).percent() // Calculate the sum of response code in [404, 500, 503], for each service. endpoint_abnormal = from(Endpoint.*).filter(httpResponseStatusCode in [404, 500, 503]).count() // Calculate the sum of request type in [RequestType.RPC, RequestType.gRPC], for each service. endpoint_rpc_calls_sum = from(Endpoint.*).filter(type in [RequestType.RPC, RequestType.gRPC]).count() // Calculate the sum of endpoint name in [\u0026quot;/v1\u0026quot;, \u0026quot;/v2\u0026quot;], for each service. endpoint_url_sum = from(Endpoint.*).filter(name in [\u0026quot;/v1\u0026quot;, \u0026quot;/v2\u0026quot;]).count() // Calculate the sum of calls for each service. endpoint_calls = from(Endpoint.*).count() // Calculate the CPM with the GET method for each service.The value is made up with `tagKey:tagValue`. // Option 1, use `tags contain`. service_cpm_http_get = from(Service.*).filter(tags contain \u0026quot;http.method:GET\u0026quot;).cpm() // Option 2, use `tag[key]`. service_cpm_http_get = from(Service.*).filter(tag[\u0026quot;http.method\u0026quot;] == \u0026quot;GET\u0026quot;).cpm(); // Calculate the CPM with the HTTP method except for the GET method for each service.The value is made up with `tagKey:tagValue`. service_cpm_http_other = from(Service.*).filter(tags not contain \u0026quot;http.method:GET\u0026quot;).cpm() disable(segment); disable(endpoint_relation_server_side); disable(top_n_database_statement); ","title":"Observability Analysis Language","url":"/docs/main/v9.6.0/en/concepts-and-designs/oal/"},{"content":"Observability Analysis Platform SkyWalking is an Observability Analysis Platform that provides full observability to services running in both brown and green zones, as well as services using a hybrid model.\nCapabilities SkyWalking covers all 3 areas of observability, including, Tracing, Metrics and Logging.\n Tracing. SkyWalking native data formats, and Zipkin traces of v1 and v2 formats are supported. Metrics. SkyWalking supports mature metrics formats, including native meter format, OTEL metrics format, and Telegraf format. SkyWalking integrates with Service Mesh platforms, typically Istio and Envoy, to build observability into the data plane or control plane. Also, SkyWalking native agents can run in the metrics mode, which greatly improves performances. Logging. Includes logs collected from disk or through network. Native agents could bind the tracing context with logs automatically, or use SkyWalking to bind the trace and log through the text content.  There are 3 powerful and native language engines designed to analyze observability data from the above areas.\n Observability Analysis Language processes native traces and service mesh data. Meter Analysis Language is responsible for metrics calculation for native meter data, and adopts a stable and widely used metrics system, such as Prometheus and OpenTelemetry. Log Analysis Language focuses on log contents and collaborate with Meter Analysis Language.  ","title":"Observability Analysis Platform","url":"/docs/main/latest/en/concepts-and-designs/backend-overview/"},{"content":"Observability Analysis Platform SkyWalking OAP and UI provides dozens of features to support observability analysis for your services, cloud infrastructure, open-source components, and more.\nBesides those out-of-box features for monitoring, users could leverage the powerful and flexible analysis language to build their own analysis and visualization.\nThere are 3 powerful and native language engines designed to analyze observability data from the above areas.\n Observability Analysis Language processes native traces and service mesh data to build metrics of entity and topology map. Meter Analysis Language is responsible for metrics calculation for native meter data, and adopts a stable and widely used metrics system, such as Prometheus and OpenTelemetry. Log Analysis Language focuses on analyzing log contents to format and label them, and extract metrics from them to feed Meter Analysis Language for further analysis.  SkyWalking community is willing to accept your monitoring extension powered by these languages, if the monitoring targets are public and general usable.\n","title":"Observability Analysis Platform","url":"/docs/main/next/en/concepts-and-designs/backend-overview/"},{"content":"Observability Analysis Platform SkyWalking is an Observability Analysis Platform that provides full observability to services running in both brown and green zones, as well as services using a hybrid model.\nCapabilities SkyWalking covers all 3 areas of observability, including, Tracing, Metrics and Logging.\n Tracing. SkyWalking native data formats, including Zipkin v1 and v2, as well as Jaeger. Metrics. SkyWalking integrates with Service Mesh platforms, such as Istio, Envoy, and Linkerd, to build observability into the data plane or control plane. Also, SkyWalking native agents can run in the metrics mode, which greatly improves performances. Logging. Includes logs collected from disk or through network. Native agents could bind the tracing context with logs automatically, or use SkyWalking to bind the trace and log through the text content.  There are 3 powerful and native language engines designed to analyze observability data from the above areas.\n Observability Analysis Language processes native traces and service mesh data. Meter Analysis Language is responsible for metrics calculation for native meter data, and adopts a stable and widely used metrics system, such as Prometheus and OpenTelemetry. Log Analysis Language focuses on log contents and collaborate with Meter Analysis Language.  ","title":"Observability Analysis Platform","url":"/docs/main/v9.0.0/en/concepts-and-designs/backend-overview/"},{"content":"Observability Analysis Platform SkyWalking is an Observability Analysis Platform that provides full observability to services running in both brown and green zones, as well as services using a hybrid model.\nCapabilities SkyWalking covers all 3 areas of observability, including, Tracing, Metrics and Logging.\n Tracing. SkyWalking native data formats, including Zipkin v1 and v2, as well as Jaeger. Metrics. SkyWalking integrates with Service Mesh platforms, such as Istio, Envoy, and Linkerd, to build observability into the data plane or control plane. Also, SkyWalking native agents can run in the metrics mode, which greatly improves performances. Logging. Includes logs collected from disk or through network. Native agents could bind the tracing context with logs automatically, or use SkyWalking to bind the trace and log through the text content.  There are 3 powerful and native language engines designed to analyze observability data from the above areas.\n Observability Analysis Language processes native traces and service mesh data. Meter Analysis Language is responsible for metrics calculation for native meter data, and adopts a stable and widely used metrics system, such as Prometheus and OpenTelemetry. Log Analysis Language focuses on log contents and collaborate with Meter Analysis Language.  ","title":"Observability Analysis Platform","url":"/docs/main/v9.1.0/en/concepts-and-designs/backend-overview/"},{"content":"Observability Analysis Platform SkyWalking is an Observability Analysis Platform that provides full observability to services running in both brown and green zones, as well as services using a hybrid model.\nCapabilities SkyWalking covers all 3 areas of observability, including, Tracing, Metrics and Logging.\n Tracing. SkyWalking native data formats, including Zipkin v1 and v2, as well as Jaeger. Metrics. SkyWalking integrates with Service Mesh platforms, such as Istio, Envoy, and Linkerd, to build observability into the data plane or control plane. Also, SkyWalking native agents can run in the metrics mode, which greatly improves performances. Logging. Includes logs collected from disk or through network. Native agents could bind the tracing context with logs automatically, or use SkyWalking to bind the trace and log through the text content.  There are 3 powerful and native language engines designed to analyze observability data from the above areas.\n Observability Analysis Language processes native traces and service mesh data. Meter Analysis Language is responsible for metrics calculation for native meter data, and adopts a stable and widely used metrics system, such as Prometheus and OpenTelemetry. Log Analysis Language focuses on log contents and collaborate with Meter Analysis Language.  ","title":"Observability Analysis Platform","url":"/docs/main/v9.2.0/en/concepts-and-designs/backend-overview/"},{"content":"Observability Analysis Platform SkyWalking is an Observability Analysis Platform that provides full observability to services running in both brown and green zones, as well as services using a hybrid model.\nCapabilities SkyWalking covers all 3 areas of observability, including, Tracing, Metrics and Logging.\n Tracing. SkyWalking native data formats, including Zipkin v1 and v2, as well as Jaeger. Metrics. SkyWalking integrates with Service Mesh platforms, such as Istio, Envoy, and Linkerd, to build observability into the data plane or control plane. Also, SkyWalking native agents can run in the metrics mode, which greatly improves performances. Logging. Includes logs collected from disk or through network. Native agents could bind the tracing context with logs automatically, or use SkyWalking to bind the trace and log through the text content.  There are 3 powerful and native language engines designed to analyze observability data from the above areas.\n Observability Analysis Language processes native traces and service mesh data. Meter Analysis Language is responsible for metrics calculation for native meter data, and adopts a stable and widely used metrics system, such as Prometheus and OpenTelemetry. Log Analysis Language focuses on log contents and collaborate with Meter Analysis Language.  ","title":"Observability Analysis Platform","url":"/docs/main/v9.3.0/en/concepts-and-designs/backend-overview/"},{"content":"Observability Analysis Platform SkyWalking is an Observability Analysis Platform that provides full observability to services running in both brown and green zones, as well as services using a hybrid model.\nCapabilities SkyWalking covers all 3 areas of observability, including, Tracing, Metrics and Logging.\n Tracing. SkyWalking native data formats, including Zipkin v1 and v2, as well as Jaeger. Metrics. SkyWalking integrates with Service Mesh platforms, such as Istio, Envoy, and Linkerd, to build observability into the data plane or control plane. Also, SkyWalking native agents can run in the metrics mode, which greatly improves performances. Logging. Includes logs collected from disk or through network. Native agents could bind the tracing context with logs automatically, or use SkyWalking to bind the trace and log through the text content.  There are 3 powerful and native language engines designed to analyze observability data from the above areas.\n Observability Analysis Language processes native traces and service mesh data. Meter Analysis Language is responsible for metrics calculation for native meter data, and adopts a stable and widely used metrics system, such as Prometheus and OpenTelemetry. Log Analysis Language focuses on log contents and collaborate with Meter Analysis Language.  ","title":"Observability Analysis Platform","url":"/docs/main/v9.4.0/en/concepts-and-designs/backend-overview/"},{"content":"Observability Analysis Platform SkyWalking is an Observability Analysis Platform that provides full observability to services running in both brown and green zones, as well as services using a hybrid model.\nCapabilities SkyWalking covers all 3 areas of observability, including, Tracing, Metrics and Logging.\n Tracing. SkyWalking native data formats, including Zipkin v1 and v2, as well as Jaeger. Metrics. SkyWalking integrates with Service Mesh platforms, such as Istio, Envoy, and Linkerd, to build observability into the data plane or control plane. Also, SkyWalking native agents can run in the metrics mode, which greatly improves performances. Logging. Includes logs collected from disk or through network. Native agents could bind the tracing context with logs automatically, or use SkyWalking to bind the trace and log through the text content.  There are 3 powerful and native language engines designed to analyze observability data from the above areas.\n Observability Analysis Language processes native traces and service mesh data. Meter Analysis Language is responsible for metrics calculation for native meter data, and adopts a stable and widely used metrics system, such as Prometheus and OpenTelemetry. Log Analysis Language focuses on log contents and collaborate with Meter Analysis Language.  ","title":"Observability Analysis Platform","url":"/docs/main/v9.5.0/en/concepts-and-designs/backend-overview/"},{"content":"Observability Analysis Platform SkyWalking is an Observability Analysis Platform that provides full observability to services running in both brown and green zones, as well as services using a hybrid model.\nCapabilities SkyWalking covers all 3 areas of observability, including, Tracing, Metrics and Logging.\n Tracing. SkyWalking native data formats, including Zipkin v1 and v2, as well as Jaeger. Metrics. SkyWalking integrates with Service Mesh platforms, such as Istio, Envoy, and Linkerd, to build observability into the data plane or control plane. Also, SkyWalking native agents can run in the metrics mode, which greatly improves performances. Logging. Includes logs collected from disk or through network. Native agents could bind the tracing context with logs automatically, or use SkyWalking to bind the trace and log through the text content.  There are 3 powerful and native language engines designed to analyze observability data from the above areas.\n Observability Analysis Language processes native traces and service mesh data. Meter Analysis Language is responsible for metrics calculation for native meter data, and adopts a stable and widely used metrics system, such as Prometheus and OpenTelemetry. Log Analysis Language focuses on log contents and collaborate with Meter Analysis Language.  ","title":"Observability Analysis Platform","url":"/docs/main/v9.6.0/en/concepts-and-designs/backend-overview/"},{"content":"Observability Analysis Platform SkyWalking is an Observability Analysis Platform that provides full observability to services running in both brown and green zones, as well as services using a hybrid model.\nCapabilities SkyWalking covers all 3 areas of observability, including, Tracing, Metrics and Logging.\n Tracing. SkyWalking native data formats, and Zipkin traces of v1 and v2 formats are supported. Metrics. SkyWalking supports mature metrics formats, including native meter format, OTEL metrics format, and Telegraf format. SkyWalking integrates with Service Mesh platforms, typically Istio and Envoy, to build observability into the data plane or control plane. Also, SkyWalking native agents can run in the metrics mode, which greatly improves performances. Logging. Includes logs collected from disk or through network. Native agents could bind the tracing context with logs automatically, or use SkyWalking to bind the trace and log through the text content.  There are 3 powerful and native language engines designed to analyze observability data from the above areas.\n Observability Analysis Language processes native traces and service mesh data. Meter Analysis Language is responsible for metrics calculation for native meter data, and adopts a stable and widely used metrics system, such as Prometheus and OpenTelemetry. Log Analysis Language focuses on log contents and collaborate with Meter Analysis Language.  ","title":"Observability Analysis Platform","url":"/docs/main/v9.7.0/en/concepts-and-designs/backend-overview/"},{"content":"Observations  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-micrometer-1.10\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  To use the Micrometer Observation Registry with Skywalking, you need to add handlers to the registry. Skywalking comes with dedicated SkywalkingMeterHandler (for metrics) and SkywalkingSenderTracingHandler, SkywalkingReceiverTracingHandler SkywalkingDefaultTracingHandler (for traces).  // Here we create the Observation Registry with attached handlers ObservationRegistry registry = ObservationRegistry.create(); // Here we add a meter handler registry.observationConfig() .observationHandler(new ObservationHandler.FirstMatchingCompositeObservationHandler( new SkywalkingMeterHandler(new SkywalkingMeterRegistry()) ); // Here we add tracing handlers registry.observationConfig() .observationHandler(new ObservationHandler.FirstMatchingCompositeObservationHandler( new SkywalkingSenderTracingHandler(), new SkywalkingReceiverTracingHandler(), new SkywalkingDefaultTracingHandler() )); With such setup metrics and traces will be created for any Micrometer Observation based instrumentations.\n","title":"Observations","url":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/application-toolkit-micrometer-1.10/"},{"content":"Observations  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-micrometer-1.10\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  To use the Micrometer Observation Registry with Skywalking, you need to add handlers to the registry. Skywalking comes with dedicated SkywalkingMeterHandler (for metrics) and SkywalkingSenderTracingHandler, SkywalkingReceiverTracingHandler SkywalkingDefaultTracingHandler (for traces).  // Here we create the Observation Registry with attached handlers ObservationRegistry registry = ObservationRegistry.create(); // Here we add a meter handler registry.observationConfig() .observationHandler(new ObservationHandler.FirstMatchingCompositeObservationHandler( new SkywalkingMeterHandler(new SkywalkingMeterRegistry()) ); // Here we add tracing handlers registry.observationConfig() .observationHandler(new ObservationHandler.FirstMatchingCompositeObservationHandler( new SkywalkingSenderTracingHandler(), new SkywalkingReceiverTracingHandler(), new SkywalkingDefaultTracingHandler() )); With such setup metrics and traces will be created for any Micrometer Observation based instrumentations.\n","title":"Observations","url":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-micrometer-1.10/"},{"content":"Observations  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-micrometer-1.10\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  To use the Micrometer Observation Registry with Skywalking, you need to add handlers to the registry. Skywalking comes with dedicated SkywalkingMeterHandler (for metrics) and SkywalkingSenderTracingHandler, SkywalkingReceiverTracingHandler SkywalkingDefaultTracingHandler (for traces).  // Here we create the Observation Registry with attached handlers ObservationRegistry registry = ObservationRegistry.create(); // Here we add a meter handler registry.observationConfig() .observationHandler(new ObservationHandler.FirstMatchingCompositeObservationHandler( new SkywalkingMeterHandler(new SkywalkingMeterRegistry()) ); // Here we add tracing handlers registry.observationConfig() .observationHandler(new ObservationHandler.FirstMatchingCompositeObservationHandler( new SkywalkingSenderTracingHandler(), new SkywalkingReceiverTracingHandler(), new SkywalkingDefaultTracingHandler() )); With such setup metrics and traces will be created for any Micrometer Observation based instrumentations.\n","title":"Observations","url":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/application-toolkit-micrometer-1.10/"},{"content":"Observations  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-micrometer-1.10\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  To use the Micrometer Observation Registry with Skywalking, you need to add handlers to the registry. Skywalking comes with dedicated SkywalkingMeterHandler (for metrics) and SkywalkingSenderTracingHandler, SkywalkingReceiverTracingHandler SkywalkingDefaultTracingHandler (for traces).  // Here we create the Observation Registry with attached handlers ObservationRegistry registry = ObservationRegistry.create(); // Here we add a meter handler registry.observationConfig() .observationHandler(new ObservationHandler.FirstMatchingCompositeObservationHandler( new SkywalkingMeterHandler(new SkywalkingMeterRegistry()) ); // Here we add tracing handlers registry.observationConfig() .observationHandler(new ObservationHandler.FirstMatchingCompositeObservationHandler( new SkywalkingSenderTracingHandler(), new SkywalkingReceiverTracingHandler(), new SkywalkingDefaultTracingHandler() )); With such setup metrics and traces will be created for any Micrometer Observation based instrumentations.\n","title":"Observations","url":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/application-toolkit-micrometer-1.10/"},{"content":"Observations  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-micrometer-1.10\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  To use the Micrometer Observation Registry with Skywalking, you need to add handlers to the registry. Skywalking comes with dedicated SkywalkingMeterHandler (for metrics) and SkywalkingSenderTracingHandler, SkywalkingReceiverTracingHandler SkywalkingDefaultTracingHandler (for traces).  // Here we create the Observation Registry with attached handlers ObservationRegistry registry = ObservationRegistry.create(); // Here we add a meter handler registry.observationConfig() .observationHandler(new ObservationHandler.FirstMatchingCompositeObservationHandler( new SkywalkingMeterHandler(new SkywalkingMeterRegistry()) ); // Here we add tracing handlers registry.observationConfig() .observationHandler(new ObservationHandler.FirstMatchingCompositeObservationHandler( new SkywalkingSenderTracingHandler(), new SkywalkingReceiverTracingHandler(), new SkywalkingDefaultTracingHandler() )); With such setup metrics and traces will be created for any Micrometer Observation based instrumentations.\n","title":"Observations","url":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/application-toolkit-micrometer-1.10/"},{"content":"Observe Service Mesh through ALS Envoy Access Log Service (ALS) provides full logs on routed RPC, including HTTP and TCP.\nBackground The solution was initialized and first implemented by Sheng Wu, Hongtao Gao, Lizan Zhou, and Dhi Aurrahman on May 17, 2019, and was presented at KubeCon China 2019. Here is a video recording of the presentation.\nSkyWalking is the first open-source project that introduced an ALS-based solution to the world. This solution provides a new take on observability with a lightweight payload on the service mesh.\nEnable ALS and SkyWalking Receiver You need the following steps to set up ALS.\n  Enable envoyAccessLogService in ProxyConfig and set the ALS address to where the SkyWalking OAP listens. In Istio version 1.6.0+, if Istio is installed with demo profile, you can enable ALS with this command:\nistioctl manifest apply \\  --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=\u0026lt;skywalking-oap.skywalking.svc:11800\u0026gt; Note: Replace \u0026lt;skywalking-oap.skywalking.svc:11800\u0026gt; with the real address where SkyWalking OAP is deployed.\n  Activate SkyWalking Envoy Receiver. (activated in default)\n  envoy-metric:selector:${SW_ENVOY_METRIC:default}  Choose an ALS analyzer. There are two available analyzers for both HTTP access logs and TCP access logs: k8s-mesh and mx-mesh. Set the system environment variables SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS and SW_ENVOY_METRIC_ALS_TCP_ANALYSIS, such as SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=mx-mesh and SW_ENVOY_METRIC_ALS_TCP_ANALYSIS=mx-mesh, or in application.yaml to activate the analyzers. For more about the analyzers, see SkyWalking ALS Analyzers.\nenvoy-metric:selector:${SW_ENVOY_METRIC:default}default:acceptMetricsService:${SW_ENVOY_METRIC_SERVICE:true}alsHTTPAnalysis:${SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS:\u0026#34;\u0026#34;}# Setting the system env variable would override this.alsTCPAnalysis:${SW_ENVOY_METRIC_ALS_TCP_ANALYSIS:\u0026#34;\u0026#34;}To use multiple analyzers as a fallback, please use , to concatenate.\n  Example Here\u0026rsquo;s an example of installing Istio and deploying SkyWalking by Helm chart.\nistioctl install \\  --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-oap.istio-system:11800 git clone https://github.com/apache/skywalking-helm.git cd skywalking-helm/chart helm repo add elastic https://helm.elastic.co helm dep up skywalking helm install 8.1.0 skywalking -n istio-system \\  --set oap.env.SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=mx-mesh \\  --set oap.env.SW_ENVOY_METRIC_ALS_TCP_ANALYSIS=mx-mesh \\  --set fullnameOverride=skywalking \\  --set oap.envoy.als.enabled=true You can use kubectl -n istio-system logs -l app=skywalking | grep \u0026quot;K8sALSServiceMeshHTTPAnalysis\u0026quot; to ensure that OAP ALS mx-mesh analyzer has been activated.\nSkyWalking ALS Analyzers There are several available analyzers: k8s-mesh, mx-mesh, and persistence. You can specify one or more analyzers to analyze the access logs. When multiple analyzers are specified, it acts as a fast-success mechanism: SkyWalking loops over the analyzers and use them to analyze the logs. Once there is an analyzer that is able to produce a result, it stops the loop.\nk8s-mesh k8s-mesh uses the metadata from Kubernetes clusters, hence in this analyzer, OAP needs access roles to Pod, Service, and Endpoints.\nThe blog illustrates the details of how it works and a step-by-step tutorial to apply it to the bookinfo application.\nmx-mesh mx-mesh uses the Envoy metadata exchange mechanism to get the service name, etc. This analyzer requires Istio to enable the metadata exchange plugin (you can enable it by --set values.telemetry.v2.enabled=true, or if you\u0026rsquo;re using Istio 1.7+ and installing it with profile demo/preview, it should already be enabled).\nThe blog illustrates the details of how it works and a step-by-step tutorial on applying it to the Online Boutique system.\npersistence persistence analyzer adapts the Envoy access log format to SkyWalking\u0026rsquo;s native log format, and forwards the formatted logs to LAL, where you can configure persistent conditions, such as sampler, only persist error logs, etc. SkyWalking provides a default configuration file envoy-als.yaml that you can adjust as per your needs. Please make sure to activate this rule via adding the rule name envoy-als into config item log-analyzer/default/lalFiles (or environment variable SW_LOG_LAL_FILES, e.g. SW_LOG_LAL_FILES=envoy-als).\nAttention: Since the persistence analyzer also needs a mechanism to map the logs into responding services, you need to configure at least one of k8s-mesh or mx-mesh as its antecedent so that persistence analyzer knows which service the logs belong to. For example, you should set envoy-metric/default/alsHTTPAnalysis (or environment variable SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS) to something like k8s-mesh,persistence, mx-mesh,persistence, or mx-mesh,k8s-mesh,persistence.\n","title":"Observe Service Mesh through ALS","url":"/docs/main/latest/en/setup/envoy/als_setting/"},{"content":"Observe Service Mesh through ALS Envoy Access Log Service (ALS) provides full logs on routed RPC, including HTTP and TCP.\nBackground The solution was initialized and first implemented by Sheng Wu, Hongtao Gao, Lizan Zhou, and Dhi Aurrahman on May 17, 2019, and was presented at KubeCon China 2019. Here is a video recording of the presentation.\nSkyWalking is the first open-source project that introduced an ALS-based solution to the world. This solution provides a new take on observability with a lightweight payload on the service mesh.\nEnable ALS and SkyWalking Receiver You need the following steps to set up ALS.\n  Enable envoyAccessLogService in ProxyConfig and set the ALS address to where the SkyWalking OAP listens. In Istio version 1.6.0+, if Istio is installed with demo profile, you can enable ALS with this command:\nistioctl manifest apply \\  --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=\u0026lt;skywalking-oap.skywalking.svc:11800\u0026gt; Note: Replace \u0026lt;skywalking-oap.skywalking.svc:11800\u0026gt; with the real address where SkyWalking OAP is deployed.\n  Activate SkyWalking Envoy Receiver. (activated in default)\n  envoy-metric:selector:${SW_ENVOY_METRIC:default}  Choose an ALS analyzer. There are two available analyzers for both HTTP access logs and TCP access logs: k8s-mesh and mx-mesh. Set the system environment variables SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS and SW_ENVOY_METRIC_ALS_TCP_ANALYSIS, such as SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=mx-mesh and SW_ENVOY_METRIC_ALS_TCP_ANALYSIS=mx-mesh, or in application.yaml to activate the analyzers. For more about the analyzers, see SkyWalking ALS Analyzers.\nenvoy-metric:selector:${SW_ENVOY_METRIC:default}default:acceptMetricsService:${SW_ENVOY_METRIC_SERVICE:true}alsHTTPAnalysis:${SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS:\u0026#34;\u0026#34;}# Setting the system env variable would override this.alsTCPAnalysis:${SW_ENVOY_METRIC_ALS_TCP_ANALYSIS:\u0026#34;\u0026#34;}To use multiple analyzers as a fallback, please use , to concatenate.\n  Example Here\u0026rsquo;s an example of installing Istio and deploying SkyWalking by Helm chart.\nistioctl install \\  --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-oap.istio-system:11800 git clone https://github.com/apache/skywalking-helm.git cd skywalking-helm/chart helm repo add elastic https://helm.elastic.co helm dep up skywalking helm install 8.1.0 skywalking -n istio-system \\  --set oap.env.SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=mx-mesh \\  --set oap.env.SW_ENVOY_METRIC_ALS_TCP_ANALYSIS=mx-mesh \\  --set fullnameOverride=skywalking \\  --set oap.envoy.als.enabled=true You can use kubectl -n istio-system logs -l app=skywalking | grep \u0026quot;K8sALSServiceMeshHTTPAnalysis\u0026quot; to ensure that OAP ALS mx-mesh analyzer has been activated.\nSkyWalking ALS Analyzers There are several available analyzers: k8s-mesh, mx-mesh, and persistence. You can specify one or more analyzers to analyze the access logs. When multiple analyzers are specified, it acts as a fast-success mechanism: SkyWalking loops over the analyzers and use them to analyze the logs. Once there is an analyzer that is able to produce a result, it stops the loop.\nk8s-mesh k8s-mesh uses the metadata from Kubernetes clusters, hence in this analyzer, OAP needs access roles to Pod, Service, and Endpoints.\nThe blog illustrates the details of how it works and a step-by-step tutorial to apply it to the bookinfo application.\nmx-mesh mx-mesh uses the Envoy metadata exchange mechanism to get the service name, etc. This analyzer requires Istio to enable the metadata exchange plugin (you can enable it by --set values.telemetry.v2.enabled=true, or if you\u0026rsquo;re using Istio 1.7+ and installing it with profile demo/preview, it should already be enabled).\nThe blog illustrates the details of how it works and a step-by-step tutorial on applying it to the Online Boutique system.\npersistence persistence analyzer adapts the Envoy access log format to SkyWalking\u0026rsquo;s native log format, and forwards the formatted logs to LAL, where you can configure persistent conditions, such as sampler, only persist error logs, etc. SkyWalking provides a default configuration file envoy-als.yaml that you can adjust as per your needs. Please make sure to activate this rule via adding the rule name envoy-als into config item log-analyzer/default/lalFiles (or environment variable SW_LOG_LAL_FILES, e.g. SW_LOG_LAL_FILES=envoy-als).\nAttention: Since the persistence analyzer also needs a mechanism to map the logs into responding services, you need to configure at least one of k8s-mesh or mx-mesh as its antecedent so that persistence analyzer knows which service the logs belong to. For example, you should set envoy-metric/default/alsHTTPAnalysis (or environment variable SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS) to something like k8s-mesh,persistence, mx-mesh,persistence, or mx-mesh,k8s-mesh,persistence.\n","title":"Observe Service Mesh through ALS","url":"/docs/main/next/en/setup/envoy/als_setting/"},{"content":"Observe Service Mesh through ALS Envoy Access Log Service (ALS) provides full logs on routed RPC, including HTTP and TCP.\nBackground The solution was initialized and first implemented by Sheng Wu, Hongtao Gao, Lizan Zhou, and Dhi Aurrahman on May 17, 2019, and was presented at KubeCon China 2019. Here is a video recording of the presentation.\nSkyWalking is the first open source project that introduced an ALS-based solution to the world. This solution provides a new take on observability with a lightweight payload on the service mesh.\nEnable ALS and SkyWalking Receiver You need the following steps to set up ALS.\n  Enable envoyAccessLogService in ProxyConfig and set the ALS address to where the SkyWalking OAP listens. In Istio version 1.6.0+, if Istio is installed with demo profile, you can enable ALS with this command:\nistioctl manifest apply \\  --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=\u0026lt;skywalking-oap.skywalking.svc:11800\u0026gt; Note: Replace \u0026lt;skywalking-oap.skywalking.svc:11800\u0026gt; with the real address where SkyWalking OAP is deployed.\n  Activate SkyWalking Envoy Receiver. (activated in default)\n  envoy-metric:selector:${SW_ENVOY_METRIC:default}  Choose an ALS analyzer. There are two available analyzers for both HTTP access logs and TCP access logs: k8s-mesh and mx-mesh. Set the system environment variables SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS and SW_ENVOY_METRIC_ALS_TCP_ANALYSIS, such as SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=mx-mesh and SW_ENVOY_METRIC_ALS_TCP_ANALYSIS=mx-mesh, or in application.yaml to activate the analyzers. For more about the analyzers, see SkyWalking ALS Analyzers.\nenvoy-metric:selector:${SW_ENVOY_METRIC:default}default:acceptMetricsService:${SW_ENVOY_METRIC_SERVICE:true}alsHTTPAnalysis:${SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS:\u0026#34;\u0026#34;}# Setting the system env variable would override this. alsTCPAnalysis:${SW_ENVOY_METRIC_ALS_TCP_ANALYSIS:\u0026#34;\u0026#34;}To use multiple analyzers as a fallback, please use , to concatenate.\n  Example Here\u0026rsquo;s an example on installing Istio and deploying SkyWalking by Helm chart.\nistioctl install \\  --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-oap.istio-system:11800 git clone https://github.com/apache/skywalking-kubernetes.git cd skywalking-kubernetes/chart helm repo add elastic https://helm.elastic.co helm dep up skywalking helm install 8.1.0 skywalking -n istio-system \\  --set oap.env.SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=mx-mesh \\  --set oap.env.SW_ENVOY_METRIC_ALS_TCP_ANALYSIS=mx-mesh \\  --set fullnameOverride=skywalking \\  --set oap.envoy.als.enabled=true You can use kubectl -n istio-system logs -l app=skywalking | grep \u0026quot;K8sALSServiceMeshHTTPAnalysis\u0026quot; to ensure that OAP ALS mx-mesh analyzer has been activated.\nSkyWalking ALS Analyzers There are several available analyzers: k8s-mesh, mx-mesh, and persistence. You can specify one or more analyzers to analyze the access logs. When multiple analyzers are specified, it acts as a fast-success mechanism: SkyWalking loops over the analyzers and use them to analyze the logs. Once there is an analyzer that is able to produce a result, it stops the loop.\nk8s-mesh k8s-mesh uses the metadata from Kubernetes cluster, hence in this analyzer OAP needs access roles to Pod, Service, and Endpoints.\nThe blog illustrates the details of how it works, and a step-by-step tutorial to apply it into the bookinfo application.\nmx-mesh mx-mesh uses the Envoy metadata exchange mechanism to get the service name, etc. This analyzer requires Istio to enable the metadata exchange plugin (you can enable it by --set values.telemetry.v2.enabled=true, or if you\u0026rsquo;re using Istio 1.7+ and installing it with profile demo/preview, it should already be enabled).\nThe blog illustrates the details of how it works, and a step-by-step tutorial to apply it into the Online Boutique system.\npersistence persistence analyzer adapts the Envoy access log format to SkyWalking\u0026rsquo;s native log format, and forwards the formatted logs to LAL, where you can configure persistent conditions, such as sampler, only persist error logs, etc. SkyWalking provides a default configuration file envoy-als.yaml that you can adjust as per your needs. Please make sure to activate this rule via adding the rule name envoy-als into config item log-analyzer/default/lalFiles (or environment variable SW_LOG_LAL_FILES, e.g. SW_LOG_LAL_FILES=envoy-als).\nAttention: Since the persistence analyzer also needs a mechanism to map the logs into responding services, you need to configure at least one of k8s-mesh or mx-mesh as its antecedent so that persistence analyzer knows which service the logs belong to. For example, you should set envoy-metric/default/alsHTTPAnalysis (or environment variable SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS) to something like k8s-mesh,persistence, mx-mesh,persistence, or mx-mesh,k8s-mesh,persistence.\n","title":"Observe Service Mesh through ALS","url":"/docs/main/v9.0.0/en/setup/envoy/als_setting/"},{"content":"Observe Service Mesh through ALS Envoy Access Log Service (ALS) provides full logs on routed RPC, including HTTP and TCP.\nBackground The solution was initialized and first implemented by Sheng Wu, Hongtao Gao, Lizan Zhou, and Dhi Aurrahman on May 17, 2019, and was presented at KubeCon China 2019. Here is a video recording of the presentation.\nSkyWalking is the first open-source project that introduced an ALS-based solution to the world. This solution provides a new take on observability with a lightweight payload on the service mesh.\nEnable ALS and SkyWalking Receiver You need the following steps to set up ALS.\n  Enable envoyAccessLogService in ProxyConfig and set the ALS address to where the SkyWalking OAP listens. In Istio version 1.6.0+, if Istio is installed with demo profile, you can enable ALS with this command:\nistioctl manifest apply \\  --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=\u0026lt;skywalking-oap.skywalking.svc:11800\u0026gt; Note: Replace \u0026lt;skywalking-oap.skywalking.svc:11800\u0026gt; with the real address where SkyWalking OAP is deployed.\n  Activate SkyWalking Envoy Receiver. (activated in default)\n  envoy-metric:selector:${SW_ENVOY_METRIC:default}  Choose an ALS analyzer. There are two available analyzers for both HTTP access logs and TCP access logs: k8s-mesh and mx-mesh. Set the system environment variables SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS and SW_ENVOY_METRIC_ALS_TCP_ANALYSIS, such as SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=mx-mesh and SW_ENVOY_METRIC_ALS_TCP_ANALYSIS=mx-mesh, or in application.yaml to activate the analyzers. For more about the analyzers, see SkyWalking ALS Analyzers.\nenvoy-metric:selector:${SW_ENVOY_METRIC:default}default:acceptMetricsService:${SW_ENVOY_METRIC_SERVICE:true}alsHTTPAnalysis:${SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS:\u0026#34;\u0026#34;}# Setting the system env variable would override this. alsTCPAnalysis:${SW_ENVOY_METRIC_ALS_TCP_ANALYSIS:\u0026#34;\u0026#34;}To use multiple analyzers as a fallback, please use , to concatenate.\n  Example Here\u0026rsquo;s an example of installing Istio and deploying SkyWalking by Helm chart.\nistioctl install \\  --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-oap.istio-system:11800 git clone https://github.com/apache/skywalking-kubernetes.git cd skywalking-kubernetes/chart helm repo add elastic https://helm.elastic.co helm dep up skywalking helm install 8.1.0 skywalking -n istio-system \\  --set oap.env.SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=mx-mesh \\  --set oap.env.SW_ENVOY_METRIC_ALS_TCP_ANALYSIS=mx-mesh \\  --set fullnameOverride=skywalking \\  --set oap.envoy.als.enabled=true You can use kubectl -n istio-system logs -l app=skywalking | grep \u0026quot;K8sALSServiceMeshHTTPAnalysis\u0026quot; to ensure that OAP ALS mx-mesh analyzer has been activated.\nSkyWalking ALS Analyzers There are several available analyzers: k8s-mesh, mx-mesh, and persistence. You can specify one or more analyzers to analyze the access logs. When multiple analyzers are specified, it acts as a fast-success mechanism: SkyWalking loops over the analyzers and use them to analyze the logs. Once there is an analyzer that is able to produce a result, it stops the loop.\nk8s-mesh k8s-mesh uses the metadata from Kubernetes clusters, hence in this analyzer, OAP needs access roles to Pod, Service, and Endpoints.\nThe blog illustrates the details of how it works and a step-by-step tutorial to apply it to the bookinfo application.\nmx-mesh mx-mesh uses the Envoy metadata exchange mechanism to get the service name, etc. This analyzer requires Istio to enable the metadata exchange plugin (you can enable it by --set values.telemetry.v2.enabled=true, or if you\u0026rsquo;re using Istio 1.7+ and installing it with profile demo/preview, it should already be enabled).\nThe blog illustrates the details of how it works and a step-by-step tutorial on applying it to the Online Boutique system.\npersistence persistence analyzer adapts the Envoy access log format to SkyWalking\u0026rsquo;s native log format, and forwards the formatted logs to LAL, where you can configure persistent conditions, such as sampler, only persist error logs, etc. SkyWalking provides a default configuration file envoy-als.yaml that you can adjust as per your needs. Please make sure to activate this rule via adding the rule name envoy-als into config item log-analyzer/default/lalFiles (or environment variable SW_LOG_LAL_FILES, e.g. SW_LOG_LAL_FILES=envoy-als).\nAttention: Since the persistence analyzer also needs a mechanism to map the logs into responding services, you need to configure at least one of k8s-mesh or mx-mesh as its antecedent so that persistence analyzer knows which service the logs belong to. For example, you should set envoy-metric/default/alsHTTPAnalysis (or environment variable SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS) to something like k8s-mesh,persistence, mx-mesh,persistence, or mx-mesh,k8s-mesh,persistence.\n","title":"Observe Service Mesh through ALS","url":"/docs/main/v9.1.0/en/setup/envoy/als_setting/"},{"content":"Observe Service Mesh through ALS Envoy Access Log Service (ALS) provides full logs on routed RPC, including HTTP and TCP.\nBackground The solution was initialized and first implemented by Sheng Wu, Hongtao Gao, Lizan Zhou, and Dhi Aurrahman on May 17, 2019, and was presented at KubeCon China 2019. Here is a video recording of the presentation.\nSkyWalking is the first open-source project that introduced an ALS-based solution to the world. This solution provides a new take on observability with a lightweight payload on the service mesh.\nEnable ALS and SkyWalking Receiver You need the following steps to set up ALS.\n  Enable envoyAccessLogService in ProxyConfig and set the ALS address to where the SkyWalking OAP listens. In Istio version 1.6.0+, if Istio is installed with demo profile, you can enable ALS with this command:\nistioctl manifest apply \\  --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=\u0026lt;skywalking-oap.skywalking.svc:11800\u0026gt; Note: Replace \u0026lt;skywalking-oap.skywalking.svc:11800\u0026gt; with the real address where SkyWalking OAP is deployed.\n  Activate SkyWalking Envoy Receiver. (activated in default)\n  envoy-metric:selector:${SW_ENVOY_METRIC:default}  Choose an ALS analyzer. There are two available analyzers for both HTTP access logs and TCP access logs: k8s-mesh and mx-mesh. Set the system environment variables SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS and SW_ENVOY_METRIC_ALS_TCP_ANALYSIS, such as SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=mx-mesh and SW_ENVOY_METRIC_ALS_TCP_ANALYSIS=mx-mesh, or in application.yaml to activate the analyzers. For more about the analyzers, see SkyWalking ALS Analyzers.\nenvoy-metric:selector:${SW_ENVOY_METRIC:default}default:acceptMetricsService:${SW_ENVOY_METRIC_SERVICE:true}alsHTTPAnalysis:${SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS:\u0026#34;\u0026#34;}# Setting the system env variable would override this. alsTCPAnalysis:${SW_ENVOY_METRIC_ALS_TCP_ANALYSIS:\u0026#34;\u0026#34;}To use multiple analyzers as a fallback, please use , to concatenate.\n  Example Here\u0026rsquo;s an example of installing Istio and deploying SkyWalking by Helm chart.\nistioctl install \\  --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-oap.istio-system:11800 git clone https://github.com/apache/skywalking-kubernetes.git cd skywalking-kubernetes/chart helm repo add elastic https://helm.elastic.co helm dep up skywalking helm install 8.1.0 skywalking -n istio-system \\  --set oap.env.SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=mx-mesh \\  --set oap.env.SW_ENVOY_METRIC_ALS_TCP_ANALYSIS=mx-mesh \\  --set fullnameOverride=skywalking \\  --set oap.envoy.als.enabled=true You can use kubectl -n istio-system logs -l app=skywalking | grep \u0026quot;K8sALSServiceMeshHTTPAnalysis\u0026quot; to ensure that OAP ALS mx-mesh analyzer has been activated.\nSkyWalking ALS Analyzers There are several available analyzers: k8s-mesh, mx-mesh, and persistence. You can specify one or more analyzers to analyze the access logs. When multiple analyzers are specified, it acts as a fast-success mechanism: SkyWalking loops over the analyzers and use them to analyze the logs. Once there is an analyzer that is able to produce a result, it stops the loop.\nk8s-mesh k8s-mesh uses the metadata from Kubernetes clusters, hence in this analyzer, OAP needs access roles to Pod, Service, and Endpoints.\nThe blog illustrates the details of how it works and a step-by-step tutorial to apply it to the bookinfo application.\nmx-mesh mx-mesh uses the Envoy metadata exchange mechanism to get the service name, etc. This analyzer requires Istio to enable the metadata exchange plugin (you can enable it by --set values.telemetry.v2.enabled=true, or if you\u0026rsquo;re using Istio 1.7+ and installing it with profile demo/preview, it should already be enabled).\nThe blog illustrates the details of how it works and a step-by-step tutorial on applying it to the Online Boutique system.\npersistence persistence analyzer adapts the Envoy access log format to SkyWalking\u0026rsquo;s native log format, and forwards the formatted logs to LAL, where you can configure persistent conditions, such as sampler, only persist error logs, etc. SkyWalking provides a default configuration file envoy-als.yaml that you can adjust as per your needs. Please make sure to activate this rule via adding the rule name envoy-als into config item log-analyzer/default/lalFiles (or environment variable SW_LOG_LAL_FILES, e.g. SW_LOG_LAL_FILES=envoy-als).\nAttention: Since the persistence analyzer also needs a mechanism to map the logs into responding services, you need to configure at least one of k8s-mesh or mx-mesh as its antecedent so that persistence analyzer knows which service the logs belong to. For example, you should set envoy-metric/default/alsHTTPAnalysis (or environment variable SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS) to something like k8s-mesh,persistence, mx-mesh,persistence, or mx-mesh,k8s-mesh,persistence.\n","title":"Observe Service Mesh through ALS","url":"/docs/main/v9.2.0/en/setup/envoy/als_setting/"},{"content":"Observe Service Mesh through ALS Envoy Access Log Service (ALS) provides full logs on routed RPC, including HTTP and TCP.\nBackground The solution was initialized and first implemented by Sheng Wu, Hongtao Gao, Lizan Zhou, and Dhi Aurrahman on May 17, 2019, and was presented at KubeCon China 2019. Here is a video recording of the presentation.\nSkyWalking is the first open-source project that introduced an ALS-based solution to the world. This solution provides a new take on observability with a lightweight payload on the service mesh.\nEnable ALS and SkyWalking Receiver You need the following steps to set up ALS.\n  Enable envoyAccessLogService in ProxyConfig and set the ALS address to where the SkyWalking OAP listens. In Istio version 1.6.0+, if Istio is installed with demo profile, you can enable ALS with this command:\nistioctl manifest apply \\  --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=\u0026lt;skywalking-oap.skywalking.svc:11800\u0026gt; Note: Replace \u0026lt;skywalking-oap.skywalking.svc:11800\u0026gt; with the real address where SkyWalking OAP is deployed.\n  Activate SkyWalking Envoy Receiver. (activated in default)\n  envoy-metric:selector:${SW_ENVOY_METRIC:default}  Choose an ALS analyzer. There are two available analyzers for both HTTP access logs and TCP access logs: k8s-mesh and mx-mesh. Set the system environment variables SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS and SW_ENVOY_METRIC_ALS_TCP_ANALYSIS, such as SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=mx-mesh and SW_ENVOY_METRIC_ALS_TCP_ANALYSIS=mx-mesh, or in application.yaml to activate the analyzers. For more about the analyzers, see SkyWalking ALS Analyzers.\nenvoy-metric:selector:${SW_ENVOY_METRIC:default}default:acceptMetricsService:${SW_ENVOY_METRIC_SERVICE:true}alsHTTPAnalysis:${SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS:\u0026#34;\u0026#34;}# Setting the system env variable would override this. alsTCPAnalysis:${SW_ENVOY_METRIC_ALS_TCP_ANALYSIS:\u0026#34;\u0026#34;}To use multiple analyzers as a fallback, please use , to concatenate.\n  Example Here\u0026rsquo;s an example of installing Istio and deploying SkyWalking by Helm chart.\nistioctl install \\  --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-oap.istio-system:11800 git clone https://github.com/apache/skywalking-kubernetes.git cd skywalking-kubernetes/chart helm repo add elastic https://helm.elastic.co helm dep up skywalking helm install 8.1.0 skywalking -n istio-system \\  --set oap.env.SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=mx-mesh \\  --set oap.env.SW_ENVOY_METRIC_ALS_TCP_ANALYSIS=mx-mesh \\  --set fullnameOverride=skywalking \\  --set oap.envoy.als.enabled=true You can use kubectl -n istio-system logs -l app=skywalking | grep \u0026quot;K8sALSServiceMeshHTTPAnalysis\u0026quot; to ensure that OAP ALS mx-mesh analyzer has been activated.\nSkyWalking ALS Analyzers There are several available analyzers: k8s-mesh, mx-mesh, and persistence. You can specify one or more analyzers to analyze the access logs. When multiple analyzers are specified, it acts as a fast-success mechanism: SkyWalking loops over the analyzers and use them to analyze the logs. Once there is an analyzer that is able to produce a result, it stops the loop.\nk8s-mesh k8s-mesh uses the metadata from Kubernetes clusters, hence in this analyzer, OAP needs access roles to Pod, Service, and Endpoints.\nThe blog illustrates the details of how it works and a step-by-step tutorial to apply it to the bookinfo application.\nmx-mesh mx-mesh uses the Envoy metadata exchange mechanism to get the service name, etc. This analyzer requires Istio to enable the metadata exchange plugin (you can enable it by --set values.telemetry.v2.enabled=true, or if you\u0026rsquo;re using Istio 1.7+ and installing it with profile demo/preview, it should already be enabled).\nThe blog illustrates the details of how it works and a step-by-step tutorial on applying it to the Online Boutique system.\npersistence persistence analyzer adapts the Envoy access log format to SkyWalking\u0026rsquo;s native log format, and forwards the formatted logs to LAL, where you can configure persistent conditions, such as sampler, only persist error logs, etc. SkyWalking provides a default configuration file envoy-als.yaml that you can adjust as per your needs. Please make sure to activate this rule via adding the rule name envoy-als into config item log-analyzer/default/lalFiles (or environment variable SW_LOG_LAL_FILES, e.g. SW_LOG_LAL_FILES=envoy-als).\nAttention: Since the persistence analyzer also needs a mechanism to map the logs into responding services, you need to configure at least one of k8s-mesh or mx-mesh as its antecedent so that persistence analyzer knows which service the logs belong to. For example, you should set envoy-metric/default/alsHTTPAnalysis (or environment variable SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS) to something like k8s-mesh,persistence, mx-mesh,persistence, or mx-mesh,k8s-mesh,persistence.\n","title":"Observe Service Mesh through ALS","url":"/docs/main/v9.3.0/en/setup/envoy/als_setting/"},{"content":"Observe Service Mesh through ALS Envoy Access Log Service (ALS) provides full logs on routed RPC, including HTTP and TCP.\nBackground The solution was initialized and first implemented by Sheng Wu, Hongtao Gao, Lizan Zhou, and Dhi Aurrahman on May 17, 2019, and was presented at KubeCon China 2019. Here is a video recording of the presentation.\nSkyWalking is the first open-source project that introduced an ALS-based solution to the world. This solution provides a new take on observability with a lightweight payload on the service mesh.\nEnable ALS and SkyWalking Receiver You need the following steps to set up ALS.\n  Enable envoyAccessLogService in ProxyConfig and set the ALS address to where the SkyWalking OAP listens. In Istio version 1.6.0+, if Istio is installed with demo profile, you can enable ALS with this command:\nistioctl manifest apply \\  --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=\u0026lt;skywalking-oap.skywalking.svc:11800\u0026gt; Note: Replace \u0026lt;skywalking-oap.skywalking.svc:11800\u0026gt; with the real address where SkyWalking OAP is deployed.\n  Activate SkyWalking Envoy Receiver. (activated in default)\n  envoy-metric:selector:${SW_ENVOY_METRIC:default}  Choose an ALS analyzer. There are two available analyzers for both HTTP access logs and TCP access logs: k8s-mesh and mx-mesh. Set the system environment variables SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS and SW_ENVOY_METRIC_ALS_TCP_ANALYSIS, such as SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=mx-mesh and SW_ENVOY_METRIC_ALS_TCP_ANALYSIS=mx-mesh, or in application.yaml to activate the analyzers. For more about the analyzers, see SkyWalking ALS Analyzers.\nenvoy-metric:selector:${SW_ENVOY_METRIC:default}default:acceptMetricsService:${SW_ENVOY_METRIC_SERVICE:true}alsHTTPAnalysis:${SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS:\u0026#34;\u0026#34;}# Setting the system env variable would override this.alsTCPAnalysis:${SW_ENVOY_METRIC_ALS_TCP_ANALYSIS:\u0026#34;\u0026#34;}To use multiple analyzers as a fallback, please use , to concatenate.\n  Example Here\u0026rsquo;s an example of installing Istio and deploying SkyWalking by Helm chart.\nistioctl install \\  --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-oap.istio-system:11800 git clone https://github.com/apache/skywalking-kubernetes.git cd skywalking-kubernetes/chart helm repo add elastic https://helm.elastic.co helm dep up skywalking helm install 8.1.0 skywalking -n istio-system \\  --set oap.env.SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=mx-mesh \\  --set oap.env.SW_ENVOY_METRIC_ALS_TCP_ANALYSIS=mx-mesh \\  --set fullnameOverride=skywalking \\  --set oap.envoy.als.enabled=true You can use kubectl -n istio-system logs -l app=skywalking | grep \u0026quot;K8sALSServiceMeshHTTPAnalysis\u0026quot; to ensure that OAP ALS mx-mesh analyzer has been activated.\nSkyWalking ALS Analyzers There are several available analyzers: k8s-mesh, mx-mesh, and persistence. You can specify one or more analyzers to analyze the access logs. When multiple analyzers are specified, it acts as a fast-success mechanism: SkyWalking loops over the analyzers and use them to analyze the logs. Once there is an analyzer that is able to produce a result, it stops the loop.\nk8s-mesh k8s-mesh uses the metadata from Kubernetes clusters, hence in this analyzer, OAP needs access roles to Pod, Service, and Endpoints.\nThe blog illustrates the details of how it works and a step-by-step tutorial to apply it to the bookinfo application.\nmx-mesh mx-mesh uses the Envoy metadata exchange mechanism to get the service name, etc. This analyzer requires Istio to enable the metadata exchange plugin (you can enable it by --set values.telemetry.v2.enabled=true, or if you\u0026rsquo;re using Istio 1.7+ and installing it with profile demo/preview, it should already be enabled).\nThe blog illustrates the details of how it works and a step-by-step tutorial on applying it to the Online Boutique system.\npersistence persistence analyzer adapts the Envoy access log format to SkyWalking\u0026rsquo;s native log format, and forwards the formatted logs to LAL, where you can configure persistent conditions, such as sampler, only persist error logs, etc. SkyWalking provides a default configuration file envoy-als.yaml that you can adjust as per your needs. Please make sure to activate this rule via adding the rule name envoy-als into config item log-analyzer/default/lalFiles (or environment variable SW_LOG_LAL_FILES, e.g. SW_LOG_LAL_FILES=envoy-als).\nAttention: Since the persistence analyzer also needs a mechanism to map the logs into responding services, you need to configure at least one of k8s-mesh or mx-mesh as its antecedent so that persistence analyzer knows which service the logs belong to. For example, you should set envoy-metric/default/alsHTTPAnalysis (or environment variable SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS) to something like k8s-mesh,persistence, mx-mesh,persistence, or mx-mesh,k8s-mesh,persistence.\n","title":"Observe Service Mesh through ALS","url":"/docs/main/v9.4.0/en/setup/envoy/als_setting/"},{"content":"Observe Service Mesh through ALS Envoy Access Log Service (ALS) provides full logs on routed RPC, including HTTP and TCP.\nBackground The solution was initialized and first implemented by Sheng Wu, Hongtao Gao, Lizan Zhou, and Dhi Aurrahman on May 17, 2019, and was presented at KubeCon China 2019. Here is a video recording of the presentation.\nSkyWalking is the first open-source project that introduced an ALS-based solution to the world. This solution provides a new take on observability with a lightweight payload on the service mesh.\nEnable ALS and SkyWalking Receiver You need the following steps to set up ALS.\n  Enable envoyAccessLogService in ProxyConfig and set the ALS address to where the SkyWalking OAP listens. In Istio version 1.6.0+, if Istio is installed with demo profile, you can enable ALS with this command:\nistioctl manifest apply \\  --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=\u0026lt;skywalking-oap.skywalking.svc:11800\u0026gt; Note: Replace \u0026lt;skywalking-oap.skywalking.svc:11800\u0026gt; with the real address where SkyWalking OAP is deployed.\n  Activate SkyWalking Envoy Receiver. (activated in default)\n  envoy-metric:selector:${SW_ENVOY_METRIC:default}  Choose an ALS analyzer. There are two available analyzers for both HTTP access logs and TCP access logs: k8s-mesh and mx-mesh. Set the system environment variables SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS and SW_ENVOY_METRIC_ALS_TCP_ANALYSIS, such as SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=mx-mesh and SW_ENVOY_METRIC_ALS_TCP_ANALYSIS=mx-mesh, or in application.yaml to activate the analyzers. For more about the analyzers, see SkyWalking ALS Analyzers.\nenvoy-metric:selector:${SW_ENVOY_METRIC:default}default:acceptMetricsService:${SW_ENVOY_METRIC_SERVICE:true}alsHTTPAnalysis:${SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS:\u0026#34;\u0026#34;}# Setting the system env variable would override this.alsTCPAnalysis:${SW_ENVOY_METRIC_ALS_TCP_ANALYSIS:\u0026#34;\u0026#34;}To use multiple analyzers as a fallback, please use , to concatenate.\n  Example Here\u0026rsquo;s an example of installing Istio and deploying SkyWalking by Helm chart.\nistioctl install \\  --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-oap.istio-system:11800 git clone https://github.com/apache/skywalking-kubernetes.git cd skywalking-kubernetes/chart helm repo add elastic https://helm.elastic.co helm dep up skywalking helm install 8.1.0 skywalking -n istio-system \\  --set oap.env.SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=mx-mesh \\  --set oap.env.SW_ENVOY_METRIC_ALS_TCP_ANALYSIS=mx-mesh \\  --set fullnameOverride=skywalking \\  --set oap.envoy.als.enabled=true You can use kubectl -n istio-system logs -l app=skywalking | grep \u0026quot;K8sALSServiceMeshHTTPAnalysis\u0026quot; to ensure that OAP ALS mx-mesh analyzer has been activated.\nSkyWalking ALS Analyzers There are several available analyzers: k8s-mesh, mx-mesh, and persistence. You can specify one or more analyzers to analyze the access logs. When multiple analyzers are specified, it acts as a fast-success mechanism: SkyWalking loops over the analyzers and use them to analyze the logs. Once there is an analyzer that is able to produce a result, it stops the loop.\nk8s-mesh k8s-mesh uses the metadata from Kubernetes clusters, hence in this analyzer, OAP needs access roles to Pod, Service, and Endpoints.\nThe blog illustrates the details of how it works and a step-by-step tutorial to apply it to the bookinfo application.\nmx-mesh mx-mesh uses the Envoy metadata exchange mechanism to get the service name, etc. This analyzer requires Istio to enable the metadata exchange plugin (you can enable it by --set values.telemetry.v2.enabled=true, or if you\u0026rsquo;re using Istio 1.7+ and installing it with profile demo/preview, it should already be enabled).\nThe blog illustrates the details of how it works and a step-by-step tutorial on applying it to the Online Boutique system.\npersistence persistence analyzer adapts the Envoy access log format to SkyWalking\u0026rsquo;s native log format, and forwards the formatted logs to LAL, where you can configure persistent conditions, such as sampler, only persist error logs, etc. SkyWalking provides a default configuration file envoy-als.yaml that you can adjust as per your needs. Please make sure to activate this rule via adding the rule name envoy-als into config item log-analyzer/default/lalFiles (or environment variable SW_LOG_LAL_FILES, e.g. SW_LOG_LAL_FILES=envoy-als).\nAttention: Since the persistence analyzer also needs a mechanism to map the logs into responding services, you need to configure at least one of k8s-mesh or mx-mesh as its antecedent so that persistence analyzer knows which service the logs belong to. For example, you should set envoy-metric/default/alsHTTPAnalysis (or environment variable SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS) to something like k8s-mesh,persistence, mx-mesh,persistence, or mx-mesh,k8s-mesh,persistence.\n","title":"Observe Service Mesh through ALS","url":"/docs/main/v9.5.0/en/setup/envoy/als_setting/"},{"content":"Observe Service Mesh through ALS Envoy Access Log Service (ALS) provides full logs on routed RPC, including HTTP and TCP.\nBackground The solution was initialized and first implemented by Sheng Wu, Hongtao Gao, Lizan Zhou, and Dhi Aurrahman on May 17, 2019, and was presented at KubeCon China 2019. Here is a video recording of the presentation.\nSkyWalking is the first open-source project that introduced an ALS-based solution to the world. This solution provides a new take on observability with a lightweight payload on the service mesh.\nEnable ALS and SkyWalking Receiver You need the following steps to set up ALS.\n  Enable envoyAccessLogService in ProxyConfig and set the ALS address to where the SkyWalking OAP listens. In Istio version 1.6.0+, if Istio is installed with demo profile, you can enable ALS with this command:\nistioctl manifest apply \\  --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=\u0026lt;skywalking-oap.skywalking.svc:11800\u0026gt; Note: Replace \u0026lt;skywalking-oap.skywalking.svc:11800\u0026gt; with the real address where SkyWalking OAP is deployed.\n  Activate SkyWalking Envoy Receiver. (activated in default)\n  envoy-metric:selector:${SW_ENVOY_METRIC:default}  Choose an ALS analyzer. There are two available analyzers for both HTTP access logs and TCP access logs: k8s-mesh and mx-mesh. Set the system environment variables SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS and SW_ENVOY_METRIC_ALS_TCP_ANALYSIS, such as SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=mx-mesh and SW_ENVOY_METRIC_ALS_TCP_ANALYSIS=mx-mesh, or in application.yaml to activate the analyzers. For more about the analyzers, see SkyWalking ALS Analyzers.\nenvoy-metric:selector:${SW_ENVOY_METRIC:default}default:acceptMetricsService:${SW_ENVOY_METRIC_SERVICE:true}alsHTTPAnalysis:${SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS:\u0026#34;\u0026#34;}# Setting the system env variable would override this.alsTCPAnalysis:${SW_ENVOY_METRIC_ALS_TCP_ANALYSIS:\u0026#34;\u0026#34;}To use multiple analyzers as a fallback, please use , to concatenate.\n  Example Here\u0026rsquo;s an example of installing Istio and deploying SkyWalking by Helm chart.\nistioctl install \\  --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-oap.istio-system:11800 git clone https://github.com/apache/skywalking-kubernetes.git cd skywalking-kubernetes/chart helm repo add elastic https://helm.elastic.co helm dep up skywalking helm install 8.1.0 skywalking -n istio-system \\  --set oap.env.SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=mx-mesh \\  --set oap.env.SW_ENVOY_METRIC_ALS_TCP_ANALYSIS=mx-mesh \\  --set fullnameOverride=skywalking \\  --set oap.envoy.als.enabled=true You can use kubectl -n istio-system logs -l app=skywalking | grep \u0026quot;K8sALSServiceMeshHTTPAnalysis\u0026quot; to ensure that OAP ALS mx-mesh analyzer has been activated.\nSkyWalking ALS Analyzers There are several available analyzers: k8s-mesh, mx-mesh, and persistence. You can specify one or more analyzers to analyze the access logs. When multiple analyzers are specified, it acts as a fast-success mechanism: SkyWalking loops over the analyzers and use them to analyze the logs. Once there is an analyzer that is able to produce a result, it stops the loop.\nk8s-mesh k8s-mesh uses the metadata from Kubernetes clusters, hence in this analyzer, OAP needs access roles to Pod, Service, and Endpoints.\nThe blog illustrates the details of how it works and a step-by-step tutorial to apply it to the bookinfo application.\nmx-mesh mx-mesh uses the Envoy metadata exchange mechanism to get the service name, etc. This analyzer requires Istio to enable the metadata exchange plugin (you can enable it by --set values.telemetry.v2.enabled=true, or if you\u0026rsquo;re using Istio 1.7+ and installing it with profile demo/preview, it should already be enabled).\nThe blog illustrates the details of how it works and a step-by-step tutorial on applying it to the Online Boutique system.\npersistence persistence analyzer adapts the Envoy access log format to SkyWalking\u0026rsquo;s native log format, and forwards the formatted logs to LAL, where you can configure persistent conditions, such as sampler, only persist error logs, etc. SkyWalking provides a default configuration file envoy-als.yaml that you can adjust as per your needs. Please make sure to activate this rule via adding the rule name envoy-als into config item log-analyzer/default/lalFiles (or environment variable SW_LOG_LAL_FILES, e.g. SW_LOG_LAL_FILES=envoy-als).\nAttention: Since the persistence analyzer also needs a mechanism to map the logs into responding services, you need to configure at least one of k8s-mesh or mx-mesh as its antecedent so that persistence analyzer knows which service the logs belong to. For example, you should set envoy-metric/default/alsHTTPAnalysis (or environment variable SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS) to something like k8s-mesh,persistence, mx-mesh,persistence, or mx-mesh,k8s-mesh,persistence.\n","title":"Observe Service Mesh through ALS","url":"/docs/main/v9.6.0/en/setup/envoy/als_setting/"},{"content":"Observe Service Mesh through ALS Envoy Access Log Service (ALS) provides full logs on routed RPC, including HTTP and TCP.\nBackground The solution was initialized and first implemented by Sheng Wu, Hongtao Gao, Lizan Zhou, and Dhi Aurrahman on May 17, 2019, and was presented at KubeCon China 2019. Here is a video recording of the presentation.\nSkyWalking is the first open-source project that introduced an ALS-based solution to the world. This solution provides a new take on observability with a lightweight payload on the service mesh.\nEnable ALS and SkyWalking Receiver You need the following steps to set up ALS.\n  Enable envoyAccessLogService in ProxyConfig and set the ALS address to where the SkyWalking OAP listens. In Istio version 1.6.0+, if Istio is installed with demo profile, you can enable ALS with this command:\nistioctl manifest apply \\  --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=\u0026lt;skywalking-oap.skywalking.svc:11800\u0026gt; Note: Replace \u0026lt;skywalking-oap.skywalking.svc:11800\u0026gt; with the real address where SkyWalking OAP is deployed.\n  Activate SkyWalking Envoy Receiver. (activated in default)\n  envoy-metric:selector:${SW_ENVOY_METRIC:default}  Choose an ALS analyzer. There are two available analyzers for both HTTP access logs and TCP access logs: k8s-mesh and mx-mesh. Set the system environment variables SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS and SW_ENVOY_METRIC_ALS_TCP_ANALYSIS, such as SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=mx-mesh and SW_ENVOY_METRIC_ALS_TCP_ANALYSIS=mx-mesh, or in application.yaml to activate the analyzers. For more about the analyzers, see SkyWalking ALS Analyzers.\nenvoy-metric:selector:${SW_ENVOY_METRIC:default}default:acceptMetricsService:${SW_ENVOY_METRIC_SERVICE:true}alsHTTPAnalysis:${SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS:\u0026#34;\u0026#34;}# Setting the system env variable would override this.alsTCPAnalysis:${SW_ENVOY_METRIC_ALS_TCP_ANALYSIS:\u0026#34;\u0026#34;}To use multiple analyzers as a fallback, please use , to concatenate.\n  Example Here\u0026rsquo;s an example of installing Istio and deploying SkyWalking by Helm chart.\nistioctl install \\  --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-oap.istio-system:11800 git clone https://github.com/apache/skywalking-helm.git cd skywalking-helm/chart helm repo add elastic https://helm.elastic.co helm dep up skywalking helm install 8.1.0 skywalking -n istio-system \\  --set oap.env.SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=mx-mesh \\  --set oap.env.SW_ENVOY_METRIC_ALS_TCP_ANALYSIS=mx-mesh \\  --set fullnameOverride=skywalking \\  --set oap.envoy.als.enabled=true You can use kubectl -n istio-system logs -l app=skywalking | grep \u0026quot;K8sALSServiceMeshHTTPAnalysis\u0026quot; to ensure that OAP ALS mx-mesh analyzer has been activated.\nSkyWalking ALS Analyzers There are several available analyzers: k8s-mesh, mx-mesh, and persistence. You can specify one or more analyzers to analyze the access logs. When multiple analyzers are specified, it acts as a fast-success mechanism: SkyWalking loops over the analyzers and use them to analyze the logs. Once there is an analyzer that is able to produce a result, it stops the loop.\nk8s-mesh k8s-mesh uses the metadata from Kubernetes clusters, hence in this analyzer, OAP needs access roles to Pod, Service, and Endpoints.\nThe blog illustrates the details of how it works and a step-by-step tutorial to apply it to the bookinfo application.\nmx-mesh mx-mesh uses the Envoy metadata exchange mechanism to get the service name, etc. This analyzer requires Istio to enable the metadata exchange plugin (you can enable it by --set values.telemetry.v2.enabled=true, or if you\u0026rsquo;re using Istio 1.7+ and installing it with profile demo/preview, it should already be enabled).\nThe blog illustrates the details of how it works and a step-by-step tutorial on applying it to the Online Boutique system.\npersistence persistence analyzer adapts the Envoy access log format to SkyWalking\u0026rsquo;s native log format, and forwards the formatted logs to LAL, where you can configure persistent conditions, such as sampler, only persist error logs, etc. SkyWalking provides a default configuration file envoy-als.yaml that you can adjust as per your needs. Please make sure to activate this rule via adding the rule name envoy-als into config item log-analyzer/default/lalFiles (or environment variable SW_LOG_LAL_FILES, e.g. SW_LOG_LAL_FILES=envoy-als).\nAttention: Since the persistence analyzer also needs a mechanism to map the logs into responding services, you need to configure at least one of k8s-mesh or mx-mesh as its antecedent so that persistence analyzer knows which service the logs belong to. For example, you should set envoy-metric/default/alsHTTPAnalysis (or environment variable SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS) to something like k8s-mesh,persistence, mx-mesh,persistence, or mx-mesh,k8s-mesh,persistence.\n","title":"Observe Service Mesh through ALS","url":"/docs/main/v9.7.0/en/setup/envoy/als_setting/"},{"content":"Observe Service Mesh through Zipkin traces Istio has built-in support to generate Zipkin traces from Envoy proxy sidecar, and SkyWalking can serve as a Zipkin server to collect and provide query APIs for these traces, you can deploy SkyWalking to replace Zipkin server in Istio, and point the Zipkin address to SkyWalking. SkyWalking also embeds Zipkin Lens UI as part of SkyWalking UI, you can use it to query Zipkin traces.\nEnable Zipkin Traces Receiver SkyWalking has built-in Zipkin receiver, you can enable it by setting receiver-zipkin to default in application.yml, or by setting environment variable SW_RECEIVER_ZIPKIN=default before starting OAP server:\nreceiver-zipkin:selector:${SW_RECEIVER_ZIPKIN:default}default:# Other configurations...After enabling the Zipkin receiver, SkyWalking listens on port 9411 for Zipkin traces, you can just change the Zipkin server address to SkyWalking\u0026rsquo;s address with 9411 as the port.\nEnable Zipkin Traces Query Module If you want to query Zipkin traces from SkyWalking, you need to enable the Zipkin traces query module by setting query-zipkin to default in application.yml, or by setting environment variable SW_QUERY_ZIPKIN=default before starting OAP server:\nquery-zipkin:selector:${SW_QUERY_ZIPKIN:default}default:# Other configurationsAfter enabling Zipkin query module, SkyWalking listens on port 9412 for Zipkin query APIs, you can also query the Zipkin traces from SkyWalking UI, menu Service Mesh --\u0026gt; Services --\u0026gt; Zipkin Trace.\nSet Up Zipkin Traces in Istio When installing Istio, you can enable Zipkin tracing and point it to SkyWalking by setting\nistioctl install -y --set profile=demo \\ \t--set meshConfig.defaultConfig.tracing.sampling=100 \\ \t--set meshConfig.defaultConfig.tracing.zipkin.address=oap.istio-system.svc.cluster.local:9411 \\ \t--set meshConfig.enableTracing=true so that Istio proxy (Envoy) can generate traces and sent them to SkyWalking.\nFor more details about Zipkin on Istio, refer to the Istio doc.\n","title":"Observe Service Mesh through Zipkin traces","url":"/docs/main/latest/en/setup/zipkin/tracing/"},{"content":"Observe Service Mesh through Zipkin traces Istio has built-in support to generate Zipkin traces from Envoy proxy sidecar, and SkyWalking can serve as a Zipkin server to collect and provide query APIs for these traces, you can deploy SkyWalking to replace Zipkin server in Istio, and point the Zipkin address to SkyWalking. SkyWalking also embeds Zipkin Lens UI as part of SkyWalking UI, you can use it to query Zipkin traces.\nEnable Zipkin Traces Receiver SkyWalking has built-in Zipkin receiver, you can enable it by setting receiver-zipkin to default in application.yml, or by setting environment variable SW_RECEIVER_ZIPKIN=default before starting OAP server:\nreceiver-zipkin:selector:${SW_RECEIVER_ZIPKIN:default}default:# Other configurations...After enabling the Zipkin receiver, SkyWalking listens on port 9411 for Zipkin traces, you can just change the Zipkin server address to SkyWalking\u0026rsquo;s address with 9411 as the port.\nEnable Zipkin Traces Query Module If you want to query Zipkin traces from SkyWalking, you need to enable the Zipkin traces query module by setting query-zipkin to default in application.yml, or by setting environment variable SW_QUERY_ZIPKIN=default before starting OAP server:\nquery-zipkin:selector:${SW_QUERY_ZIPKIN:default}default:# Other configurationsAfter enabling Zipkin query module, SkyWalking listens on port 9412 for Zipkin query APIs, you can also query the Zipkin traces from SkyWalking UI, menu Service Mesh --\u0026gt; Services --\u0026gt; Zipkin Trace.\nSet Up Zipkin Traces in Istio When installing Istio, you can enable Zipkin tracing and point it to SkyWalking by setting\nistioctl install -y --set profile=demo \\ \t--set meshConfig.defaultConfig.tracing.sampling=100 \\ \t--set meshConfig.defaultConfig.tracing.zipkin.address=oap.istio-system.svc.cluster.local:9411 \\ \t--set meshConfig.enableTracing=true so that Istio proxy (Envoy) can generate traces and sent them to SkyWalking.\nFor more details about Zipkin on Istio, refer to the Istio doc.\n","title":"Observe Service Mesh through Zipkin traces","url":"/docs/main/next/en/setup/zipkin/tracing/"},{"content":"Observe Service Mesh through Zipkin traces Istio has built-in support to generate Zipkin traces from Envoy proxy sidecar, and SkyWalking can serve as a Zipkin server to collect and provide query APIs for these traces, you can deploy SkyWalking to replace Zipkin server in Istio, and point the Zipkin address to SkyWalking. SkyWalking also embeds Zipkin Lens UI as part of SkyWalking UI, you can use it to query Zipkin traces.\nEnable Zipkin Traces Receiver SkyWalking has built-in Zipkin receiver, you can enable it by setting receiver-zipkin to default in application.yml, or by setting environment variable SW_RECEIVER_ZIPKIN=default before starting OAP server:\nreceiver-zipkin:selector:${SW_RECEIVER_ZIPKIN:default}default:# Other configurations...After enabling the Zipkin receiver, SkyWalking listens on port 9411 for Zipkin traces, you can just change the Zipkin server address to SkyWalking\u0026rsquo;s address with 9411 as the port.\nEnable Zipkin Traces Query Module If you want to query Zipkin traces from SkyWalking, you need to enable the Zipkin traces query module by setting query-zipkin to default in application.yml, or by setting environment variable SW_QUERY_ZIPKIN=default before starting OAP server:\nquery-zipkin:selector:${SW_QUERY_ZIPKIN:default}default:# Other configurationsAfter enabling Zipkin query module, SkyWalking listens on port 9412 for Zipkin query APIs, you can also query the Zipkin traces from SkyWalking UI, menu Service Mesh --\u0026gt; Services --\u0026gt; Zipkin Trace.\nSet Up Zipkin Traces in Istio When installing Istio, you can enable Zipkin tracing and point it to SkyWalking by setting\nistioctl install -y --set profile=demo \\ \t--set meshConfig.defaultConfig.tracing.sampling=100 \\ \t--set meshConfig.defaultConfig.tracing.zipkin.address=oap.istio-system.svc.cluster.local:9411 \\ \t--set meshConfig.enableTracing=true so that Istio proxy (Envoy) can generate traces and sent them to SkyWalking.\nFor more details about Zipkin on Istio, refer to the Istio doc.\n","title":"Observe Service Mesh through Zipkin traces","url":"/docs/main/v9.4.0/en/setup/zipkin/tracing/"},{"content":"Observe Service Mesh through Zipkin traces Istio has built-in support to generate Zipkin traces from Envoy proxy sidecar, and SkyWalking can serve as a Zipkin server to collect and provide query APIs for these traces, you can deploy SkyWalking to replace Zipkin server in Istio, and point the Zipkin address to SkyWalking. SkyWalking also embeds Zipkin Lens UI as part of SkyWalking UI, you can use it to query Zipkin traces.\nEnable Zipkin Traces Receiver SkyWalking has built-in Zipkin receiver, you can enable it by setting receiver-zipkin to default in application.yml, or by setting environment variable SW_RECEIVER_ZIPKIN=default before starting OAP server:\nreceiver-zipkin:selector:${SW_RECEIVER_ZIPKIN:default}default:# Other configurations...After enabling the Zipkin receiver, SkyWalking listens on port 9411 for Zipkin traces, you can just change the Zipkin server address to SkyWalking\u0026rsquo;s address with 9411 as the port.\nEnable Zipkin Traces Query Module If you want to query Zipkin traces from SkyWalking, you need to enable the Zipkin traces query module by setting query-zipkin to default in application.yml, or by setting environment variable SW_QUERY_ZIPKIN=default before starting OAP server:\nquery-zipkin:selector:${SW_QUERY_ZIPKIN:default}default:# Other configurationsAfter enabling Zipkin query module, SkyWalking listens on port 9412 for Zipkin query APIs, you can also query the Zipkin traces from SkyWalking UI, menu Service Mesh --\u0026gt; Services --\u0026gt; Zipkin Trace.\nSet Up Zipkin Traces in Istio When installing Istio, you can enable Zipkin tracing and point it to SkyWalking by setting\nistioctl install -y --set profile=demo \\ \t--set meshConfig.defaultConfig.tracing.sampling=100 \\ \t--set meshConfig.defaultConfig.tracing.zipkin.address=oap.istio-system.svc.cluster.local:9411 \\ \t--set meshConfig.enableTracing=true so that Istio proxy (Envoy) can generate traces and sent them to SkyWalking.\nFor more details about Zipkin on Istio, refer to the Istio doc.\n","title":"Observe Service Mesh through Zipkin traces","url":"/docs/main/v9.5.0/en/setup/zipkin/tracing/"},{"content":"Observe Service Mesh through Zipkin traces Istio has built-in support to generate Zipkin traces from Envoy proxy sidecar, and SkyWalking can serve as a Zipkin server to collect and provide query APIs for these traces, you can deploy SkyWalking to replace Zipkin server in Istio, and point the Zipkin address to SkyWalking. SkyWalking also embeds Zipkin Lens UI as part of SkyWalking UI, you can use it to query Zipkin traces.\nEnable Zipkin Traces Receiver SkyWalking has built-in Zipkin receiver, you can enable it by setting receiver-zipkin to default in application.yml, or by setting environment variable SW_RECEIVER_ZIPKIN=default before starting OAP server:\nreceiver-zipkin:selector:${SW_RECEIVER_ZIPKIN:default}default:# Other configurations...After enabling the Zipkin receiver, SkyWalking listens on port 9411 for Zipkin traces, you can just change the Zipkin server address to SkyWalking\u0026rsquo;s address with 9411 as the port.\nEnable Zipkin Traces Query Module If you want to query Zipkin traces from SkyWalking, you need to enable the Zipkin traces query module by setting query-zipkin to default in application.yml, or by setting environment variable SW_QUERY_ZIPKIN=default before starting OAP server:\nquery-zipkin:selector:${SW_QUERY_ZIPKIN:default}default:# Other configurationsAfter enabling Zipkin query module, SkyWalking listens on port 9412 for Zipkin query APIs, you can also query the Zipkin traces from SkyWalking UI, menu Service Mesh --\u0026gt; Services --\u0026gt; Zipkin Trace.\nSet Up Zipkin Traces in Istio When installing Istio, you can enable Zipkin tracing and point it to SkyWalking by setting\nistioctl install -y --set profile=demo \\ \t--set meshConfig.defaultConfig.tracing.sampling=100 \\ \t--set meshConfig.defaultConfig.tracing.zipkin.address=oap.istio-system.svc.cluster.local:9411 \\ \t--set meshConfig.enableTracing=true so that Istio proxy (Envoy) can generate traces and sent them to SkyWalking.\nFor more details about Zipkin on Istio, refer to the Istio doc.\n","title":"Observe Service Mesh through Zipkin traces","url":"/docs/main/v9.6.0/en/setup/zipkin/tracing/"},{"content":"Observe Service Mesh through Zipkin traces Istio has built-in support to generate Zipkin traces from Envoy proxy sidecar, and SkyWalking can serve as a Zipkin server to collect and provide query APIs for these traces, you can deploy SkyWalking to replace Zipkin server in Istio, and point the Zipkin address to SkyWalking. SkyWalking also embeds Zipkin Lens UI as part of SkyWalking UI, you can use it to query Zipkin traces.\nEnable Zipkin Traces Receiver SkyWalking has built-in Zipkin receiver, you can enable it by setting receiver-zipkin to default in application.yml, or by setting environment variable SW_RECEIVER_ZIPKIN=default before starting OAP server:\nreceiver-zipkin:selector:${SW_RECEIVER_ZIPKIN:default}default:# Other configurations...After enabling the Zipkin receiver, SkyWalking listens on port 9411 for Zipkin traces, you can just change the Zipkin server address to SkyWalking\u0026rsquo;s address with 9411 as the port.\nEnable Zipkin Traces Query Module If you want to query Zipkin traces from SkyWalking, you need to enable the Zipkin traces query module by setting query-zipkin to default in application.yml, or by setting environment variable SW_QUERY_ZIPKIN=default before starting OAP server:\nquery-zipkin:selector:${SW_QUERY_ZIPKIN:default}default:# Other configurationsAfter enabling Zipkin query module, SkyWalking listens on port 9412 for Zipkin query APIs, you can also query the Zipkin traces from SkyWalking UI, menu Service Mesh --\u0026gt; Services --\u0026gt; Zipkin Trace.\nSet Up Zipkin Traces in Istio When installing Istio, you can enable Zipkin tracing and point it to SkyWalking by setting\nistioctl install -y --set profile=demo \\ \t--set meshConfig.defaultConfig.tracing.sampling=100 \\ \t--set meshConfig.defaultConfig.tracing.zipkin.address=oap.istio-system.svc.cluster.local:9411 \\ \t--set meshConfig.enableTracing=true so that Istio proxy (Envoy) can generate traces and sent them to SkyWalking.\nFor more details about Zipkin on Istio, refer to the Istio doc.\n","title":"Observe Service Mesh through Zipkin traces","url":"/docs/main/v9.7.0/en/setup/zipkin/tracing/"},{"content":"Official OAL script First, read the OAL introduction to learn the OAL script grammar and the source concept.\nFrom 8.0.0, you may find the OAL script at /config/oal/*.oal of the SkyWalking dist. You could change it, such as by adding filter conditions or new metrics. Then, reboot the OAP server, and it will come into effect.\nAll metrics named in this script could be used in alarm and UI query.\nExtension Logic Endpoint In default, SkyWalking only treats the operation name of entry span as the endpoint, which are used in the OAL engine. Users could declare their custom endpoint names by adding the logic endpoint tag manually through agent\u0026rsquo;s plugins or manual APIs.\nThe logic endpoint is a concept that doesn\u0026rsquo;t represent a real RPC call, but requires the statistic. The value of x-le should be in JSON format. There are two options:\n Define a new logic endpoint in the entry span as a separate new endpoint. Provide its own endpoint name, latency and status. Suitable for entry and local span.  { \u0026#34;name\u0026#34;: \u0026#34;GraphQL-service\u0026#34;, \u0026#34;latency\u0026#34;: 100, \u0026#34;status\u0026#34;: true } Declare the current local span representing a logic endpoint.  { \u0026#34;logic-span\u0026#34;: true } References  Java plugin API guides users to write plugins with logic endpoint. Java agent\u0026rsquo;s plugins include native included logic endpoints, also it provides ways to set the tag of logic span. The document could be found here.  ","title":"Official OAL script","url":"/docs/main/latest/en/guides/backend-oal-scripts/"},{"content":"Official OAL script First, read the OAL introduction to learn the OAL script grammar and the source concept.\nFrom 8.0.0, you may find the OAL script at /config/oal/*.oal of the SkyWalking dist. You could change it, such as by adding filter conditions or new metrics. Then, reboot the OAP server, and it will come into effect.\nAll metrics named in this script could be used in alarm and UI query.\nExtension Logic Endpoint In default, SkyWalking only treats the operation name of entry span as the endpoint, which are used in the OAL engine. Users could declare their custom endpoint names by adding the logic endpoint tag manually through agent\u0026rsquo;s plugins or manual APIs.\nThe logic endpoint is a concept that doesn\u0026rsquo;t represent a real RPC call, but requires the statistic. The value of x-le should be in JSON format. There are two options:\n Define a new logic endpoint in the entry span as a separate new endpoint. Provide its own endpoint name, latency and status. Suitable for entry and local span.  { \u0026#34;name\u0026#34;: \u0026#34;GraphQL-service\u0026#34;, \u0026#34;latency\u0026#34;: 100, \u0026#34;status\u0026#34;: true } Declare the current local span representing a logic endpoint.  { \u0026#34;logic-span\u0026#34;: true } References  Java plugin API guides users to write plugins with logic endpoint. Java agent\u0026rsquo;s plugins include native included logic endpoints, also it provides ways to set the tag of logic span. The document could be found here.  ","title":"Official OAL script","url":"/docs/main/next/en/guides/backend-oal-scripts/"},{"content":"Official OAL script First, read the OAL introduction to learn the OAL script grammar and the source concept.\nFrom 8.0.0, you may find the OAL script at /config/oal/*.oal of the SkyWalking dist. You could change it, such as by adding filter conditions or new metrics. Then, reboot the OAP server, and it will come into effect.\nAll metrics named in this script could be used in alarm and UI query.\nExtension Logic Endpoint In default, SkyWalking only treats the operation name of entry span as the endpoint, which are used in the OAL engine. Users could declare their custom endpoint names by adding the logic endpoint tag manually through agent\u0026rsquo;s plugins or manual APIs.\nThe logic endpoint is a concept that doesn\u0026rsquo;t represent a real RPC call, but requires the statistic. The value of x-le should be in JSON format. There are two options:\n Define a new logic endpoint in the entry span as a separate new endpoint. Provide its own endpoint name, latency and status. Suitable for entry and local span.  { \u0026#34;name\u0026#34;: \u0026#34;GraphQL-service\u0026#34;, \u0026#34;latency\u0026#34;: 100, \u0026#34;status\u0026#34;: true } Declare the current local span representing a logic endpoint.  { \u0026#34;logic-span\u0026#34;: true } References  Java plugin API guides users to write plugins with logic endpoint. Java agent\u0026rsquo;s plugins include native included logic endpoints, also it provides ways to set the tag of logic span. The document could be found here.  ","title":"Official OAL script","url":"/docs/main/v9.0.0/en/guides/backend-oal-scripts/"},{"content":"Official OAL script First, read the OAL introduction to learn the OAL script grammar and the source concept.\nFrom 8.0.0, you may find the OAL script at /config/oal/*.oal of the SkyWalking dist. You could change it, such as by adding filter conditions or new metrics. Then, reboot the OAP server, and it will come into effect.\nAll metrics named in this script could be used in alarm and UI query.\nExtension Logic Endpoint In default, SkyWalking only treats the operation name of entry span as the endpoint, which are used in the OAL engine. Users could declare their custom endpoint names by adding the logic endpoint tag manually through agent\u0026rsquo;s plugins or manual APIs.\nThe logic endpoint is a concept that doesn\u0026rsquo;t represent a real RPC call, but requires the statistic. The value of x-le should be in JSON format. There are two options:\n Define a new logic endpoint in the entry span as a separate new endpoint. Provide its own endpoint name, latency and status. Suitable for entry and local span.  { \u0026#34;name\u0026#34;: \u0026#34;GraphQL-service\u0026#34;, \u0026#34;latency\u0026#34;: 100, \u0026#34;status\u0026#34;: true } Declare the current local span representing a logic endpoint.  { \u0026#34;logic-span\u0026#34;: true } References  Java plugin API guides users to write plugins with logic endpoint. Java agent\u0026rsquo;s plugins include native included logic endpoints, also it provides ways to set the tag of logic span. The document could be found here.  ","title":"Official OAL script","url":"/docs/main/v9.1.0/en/guides/backend-oal-scripts/"},{"content":"Official OAL script First, read the OAL introduction to learn the OAL script grammar and the source concept.\nFrom 8.0.0, you may find the OAL script at /config/oal/*.oal of the SkyWalking dist. You could change it, such as by adding filter conditions or new metrics. Then, reboot the OAP server, and it will come into effect.\nAll metrics named in this script could be used in alarm and UI query.\nExtension Logic Endpoint In default, SkyWalking only treats the operation name of entry span as the endpoint, which are used in the OAL engine. Users could declare their custom endpoint names by adding the logic endpoint tag manually through agent\u0026rsquo;s plugins or manual APIs.\nThe logic endpoint is a concept that doesn\u0026rsquo;t represent a real RPC call, but requires the statistic. The value of x-le should be in JSON format. There are two options:\n Define a new logic endpoint in the entry span as a separate new endpoint. Provide its own endpoint name, latency and status. Suitable for entry and local span.  { \u0026#34;name\u0026#34;: \u0026#34;GraphQL-service\u0026#34;, \u0026#34;latency\u0026#34;: 100, \u0026#34;status\u0026#34;: true } Declare the current local span representing a logic endpoint.  { \u0026#34;logic-span\u0026#34;: true } References  Java plugin API guides users to write plugins with logic endpoint. Java agent\u0026rsquo;s plugins include native included logic endpoints, also it provides ways to set the tag of logic span. The document could be found here.  ","title":"Official OAL script","url":"/docs/main/v9.2.0/en/guides/backend-oal-scripts/"},{"content":"Official OAL script First, read the OAL introduction to learn the OAL script grammar and the source concept.\nFrom 8.0.0, you may find the OAL script at /config/oal/*.oal of the SkyWalking dist. You could change it, such as by adding filter conditions or new metrics. Then, reboot the OAP server, and it will come into effect.\nAll metrics named in this script could be used in alarm and UI query.\nExtension Logic Endpoint In default, SkyWalking only treats the operation name of entry span as the endpoint, which are used in the OAL engine. Users could declare their custom endpoint names by adding the logic endpoint tag manually through agent\u0026rsquo;s plugins or manual APIs.\nThe logic endpoint is a concept that doesn\u0026rsquo;t represent a real RPC call, but requires the statistic. The value of x-le should be in JSON format. There are two options:\n Define a new logic endpoint in the entry span as a separate new endpoint. Provide its own endpoint name, latency and status. Suitable for entry and local span.  { \u0026#34;name\u0026#34;: \u0026#34;GraphQL-service\u0026#34;, \u0026#34;latency\u0026#34;: 100, \u0026#34;status\u0026#34;: true } Declare the current local span representing a logic endpoint.  { \u0026#34;logic-span\u0026#34;: true } References  Java plugin API guides users to write plugins with logic endpoint. Java agent\u0026rsquo;s plugins include native included logic endpoints, also it provides ways to set the tag of logic span. The document could be found here.  ","title":"Official OAL script","url":"/docs/main/v9.3.0/en/guides/backend-oal-scripts/"},{"content":"Official OAL script First, read the OAL introduction to learn the OAL script grammar and the source concept.\nFrom 8.0.0, you may find the OAL script at /config/oal/*.oal of the SkyWalking dist. You could change it, such as by adding filter conditions or new metrics. Then, reboot the OAP server, and it will come into effect.\nAll metrics named in this script could be used in alarm and UI query.\nExtension Logic Endpoint In default, SkyWalking only treats the operation name of entry span as the endpoint, which are used in the OAL engine. Users could declare their custom endpoint names by adding the logic endpoint tag manually through agent\u0026rsquo;s plugins or manual APIs.\nThe logic endpoint is a concept that doesn\u0026rsquo;t represent a real RPC call, but requires the statistic. The value of x-le should be in JSON format. There are two options:\n Define a new logic endpoint in the entry span as a separate new endpoint. Provide its own endpoint name, latency and status. Suitable for entry and local span.  { \u0026#34;name\u0026#34;: \u0026#34;GraphQL-service\u0026#34;, \u0026#34;latency\u0026#34;: 100, \u0026#34;status\u0026#34;: true } Declare the current local span representing a logic endpoint.  { \u0026#34;logic-span\u0026#34;: true } References  Java plugin API guides users to write plugins with logic endpoint. Java agent\u0026rsquo;s plugins include native included logic endpoints, also it provides ways to set the tag of logic span. The document could be found here.  ","title":"Official OAL script","url":"/docs/main/v9.4.0/en/guides/backend-oal-scripts/"},{"content":"Official OAL script First, read the OAL introduction to learn the OAL script grammar and the source concept.\nFrom 8.0.0, you may find the OAL script at /config/oal/*.oal of the SkyWalking dist. You could change it, such as by adding filter conditions or new metrics. Then, reboot the OAP server, and it will come into effect.\nAll metrics named in this script could be used in alarm and UI query.\nExtension Logic Endpoint In default, SkyWalking only treats the operation name of entry span as the endpoint, which are used in the OAL engine. Users could declare their custom endpoint names by adding the logic endpoint tag manually through agent\u0026rsquo;s plugins or manual APIs.\nThe logic endpoint is a concept that doesn\u0026rsquo;t represent a real RPC call, but requires the statistic. The value of x-le should be in JSON format. There are two options:\n Define a new logic endpoint in the entry span as a separate new endpoint. Provide its own endpoint name, latency and status. Suitable for entry and local span.  { \u0026#34;name\u0026#34;: \u0026#34;GraphQL-service\u0026#34;, \u0026#34;latency\u0026#34;: 100, \u0026#34;status\u0026#34;: true } Declare the current local span representing a logic endpoint.  { \u0026#34;logic-span\u0026#34;: true } References  Java plugin API guides users to write plugins with logic endpoint. Java agent\u0026rsquo;s plugins include native included logic endpoints, also it provides ways to set the tag of logic span. The document could be found here.  ","title":"Official OAL script","url":"/docs/main/v9.5.0/en/guides/backend-oal-scripts/"},{"content":"Official OAL script First, read the OAL introduction to learn the OAL script grammar and the source concept.\nFrom 8.0.0, you may find the OAL script at /config/oal/*.oal of the SkyWalking dist. You could change it, such as by adding filter conditions or new metrics. Then, reboot the OAP server, and it will come into effect.\nAll metrics named in this script could be used in alarm and UI query.\nExtension Logic Endpoint In default, SkyWalking only treats the operation name of entry span as the endpoint, which are used in the OAL engine. Users could declare their custom endpoint names by adding the logic endpoint tag manually through agent\u0026rsquo;s plugins or manual APIs.\nThe logic endpoint is a concept that doesn\u0026rsquo;t represent a real RPC call, but requires the statistic. The value of x-le should be in JSON format. There are two options:\n Define a new logic endpoint in the entry span as a separate new endpoint. Provide its own endpoint name, latency and status. Suitable for entry and local span.  { \u0026#34;name\u0026#34;: \u0026#34;GraphQL-service\u0026#34;, \u0026#34;latency\u0026#34;: 100, \u0026#34;status\u0026#34;: true } Declare the current local span representing a logic endpoint.  { \u0026#34;logic-span\u0026#34;: true } References  Java plugin API guides users to write plugins with logic endpoint. Java agent\u0026rsquo;s plugins include native included logic endpoints, also it provides ways to set the tag of logic span. The document could be found here.  ","title":"Official OAL script","url":"/docs/main/v9.6.0/en/guides/backend-oal-scripts/"},{"content":"Official OAL script First, read the OAL introduction to learn the OAL script grammar and the source concept.\nFrom 8.0.0, you may find the OAL script at /config/oal/*.oal of the SkyWalking dist. You could change it, such as by adding filter conditions or new metrics. Then, reboot the OAP server, and it will come into effect.\nAll metrics named in this script could be used in alarm and UI query.\nExtension Logic Endpoint In default, SkyWalking only treats the operation name of entry span as the endpoint, which are used in the OAL engine. Users could declare their custom endpoint names by adding the logic endpoint tag manually through agent\u0026rsquo;s plugins or manual APIs.\nThe logic endpoint is a concept that doesn\u0026rsquo;t represent a real RPC call, but requires the statistic. The value of x-le should be in JSON format. There are two options:\n Define a new logic endpoint in the entry span as a separate new endpoint. Provide its own endpoint name, latency and status. Suitable for entry and local span.  { \u0026#34;name\u0026#34;: \u0026#34;GraphQL-service\u0026#34;, \u0026#34;latency\u0026#34;: 100, \u0026#34;status\u0026#34;: true } Declare the current local span representing a logic endpoint.  { \u0026#34;logic-span\u0026#34;: true } References  Java plugin API guides users to write plugins with logic endpoint. Java agent\u0026rsquo;s plugins include native included logic endpoints, also it provides ways to set the tag of logic span. The document could be found here.  ","title":"Official OAL script","url":"/docs/main/v9.7.0/en/guides/backend-oal-scripts/"},{"content":"On Demand Pod Logs This feature is to fetch the Pod logs on users' demand, the logs are fetched and displayed in real time, and are not persisted in any kind. This is helpful when users want to do some experiments and monitor the logs and see what\u0026rsquo;s happing inside the service.\nNote: if you print secrets in the logs, they are also visible to the UI, so for the sake of security, this feature is disabled by default, please read the configuration documentation to enable this feature manually.\nHow it works As the name indicates, this feature only works for Kubernetes Pods.\nSkyWalking OAP collects and saves the service instance\u0026rsquo;s namespace and Pod name in the service instance\u0026rsquo;s properties, named namespace and pod, users can select the same and UI should fetch the logs by service instance in a given interval and display the logs in UI, OAP receives the query and checks the instance\u0026rsquo;s properties and use the namespace and pod to locate the Pod and query the logs.\nIf you want to register a service instance that has on demand logs available, you should add namespace and pod in the service instance properties, so that you can query the real time logs from that Pod.\nThat said, in order to make this feature work properly, you should in advance configure the cluster role for OAP to list/get namespaces, services, pods and pods/log.\n","title":"On Demand Pod Logs","url":"/docs/main/latest/en/setup/backend/on-demand-pod-log/"},{"content":"On Demand Pod Logs This feature is to fetch the Pod logs on users' demand, the logs are fetched and displayed in real time, and are not persisted in any kind. This is helpful when users want to do some experiments and monitor the logs and see what\u0026rsquo;s happing inside the service.\nNote: if you print secrets in the logs, they are also visible to the UI, so for the sake of security, this feature is disabled by default, please read the configuration documentation to enable this feature manually.\nHow it works As the name indicates, this feature only works for Kubernetes Pods.\nSkyWalking OAP collects and saves the service instance\u0026rsquo;s namespace and Pod name in the service instance\u0026rsquo;s properties, named namespace and pod, users can select the same and UI should fetch the logs by service instance in a given interval and display the logs in UI, OAP receives the query and checks the instance\u0026rsquo;s properties and use the namespace and pod to locate the Pod and query the logs.\nIf you want to register a service instance that has on demand logs available, you should add namespace and pod in the service instance properties, so that you can query the real time logs from that Pod.\nThat said, in order to make this feature work properly, you should in advance configure the cluster role for OAP to list/get namespaces, services, pods and pods/log.\n","title":"On Demand Pod Logs","url":"/docs/main/next/en/setup/backend/on-demand-pod-log/"},{"content":"On Demand Pod Logs This feature is to fetch the Pod logs on users' demand, the logs are fetched and displayed in real time, and are not persisted in any kind. This is helpful when users want to do some experiments and monitor the logs and see what\u0026rsquo;s happing inside the service.\nNote: if you print secrets in the logs, they are also visible to the UI, so for the sake of security, this feature is disabled by default, please read the configuration documentation to enable this feature manually.\nHow it works As the name indicates, this feature only works for Kubernetes Pods.\nSkyWalking OAP collects and saves the service instance\u0026rsquo;s namespace and Pod name in ther serivce instance\u0026rsquo;s properties, named namespace and pod, users can select the same and UI should fetch the logs by service instance in a given interval and display the logs in UI, OAP receives the query and checks the instance\u0026rsquo;s properties and use the namespace and pod to locate the Pod and query the logs.\nIf you want to register a service instance that has on demand logs available, you should add namespace and pod in the service instance properties, so that you can query the real time logs from that Pod.\nThat said, in order to make this feature work properly, you should in advance configure the cluster role for OAP to list/get namespaces, services, pods and pods/log.\n","title":"On Demand Pod Logs","url":"/docs/main/v9.1.0/en/setup/backend/on-demand-pod-log/"},{"content":"On Demand Pod Logs This feature is to fetch the Pod logs on users' demand, the logs are fetched and displayed in real time, and are not persisted in any kind. This is helpful when users want to do some experiments and monitor the logs and see what\u0026rsquo;s happing inside the service.\nNote: if you print secrets in the logs, they are also visible to the UI, so for the sake of security, this feature is disabled by default, please read the configuration documentation to enable this feature manually.\nHow it works As the name indicates, this feature only works for Kubernetes Pods.\nSkyWalking OAP collects and saves the service instance\u0026rsquo;s namespace and Pod name in the service instance\u0026rsquo;s properties, named namespace and pod, users can select the same and UI should fetch the logs by service instance in a given interval and display the logs in UI, OAP receives the query and checks the instance\u0026rsquo;s properties and use the namespace and pod to locate the Pod and query the logs.\nIf you want to register a service instance that has on demand logs available, you should add namespace and pod in the service instance properties, so that you can query the real time logs from that Pod.\nThat said, in order to make this feature work properly, you should in advance configure the cluster role for OAP to list/get namespaces, services, pods and pods/log.\n","title":"On Demand Pod Logs","url":"/docs/main/v9.2.0/en/setup/backend/on-demand-pod-log/"},{"content":"On Demand Pod Logs This feature is to fetch the Pod logs on users' demand, the logs are fetched and displayed in real time, and are not persisted in any kind. This is helpful when users want to do some experiments and monitor the logs and see what\u0026rsquo;s happing inside the service.\nNote: if you print secrets in the logs, they are also visible to the UI, so for the sake of security, this feature is disabled by default, please read the configuration documentation to enable this feature manually.\nHow it works As the name indicates, this feature only works for Kubernetes Pods.\nSkyWalking OAP collects and saves the service instance\u0026rsquo;s namespace and Pod name in the service instance\u0026rsquo;s properties, named namespace and pod, users can select the same and UI should fetch the logs by service instance in a given interval and display the logs in UI, OAP receives the query and checks the instance\u0026rsquo;s properties and use the namespace and pod to locate the Pod and query the logs.\nIf you want to register a service instance that has on demand logs available, you should add namespace and pod in the service instance properties, so that you can query the real time logs from that Pod.\nThat said, in order to make this feature work properly, you should in advance configure the cluster role for OAP to list/get namespaces, services, pods and pods/log.\n","title":"On Demand Pod Logs","url":"/docs/main/v9.3.0/en/setup/backend/on-demand-pod-log/"},{"content":"On Demand Pod Logs This feature is to fetch the Pod logs on users' demand, the logs are fetched and displayed in real time, and are not persisted in any kind. This is helpful when users want to do some experiments and monitor the logs and see what\u0026rsquo;s happing inside the service.\nNote: if you print secrets in the logs, they are also visible to the UI, so for the sake of security, this feature is disabled by default, please read the configuration documentation to enable this feature manually.\nHow it works As the name indicates, this feature only works for Kubernetes Pods.\nSkyWalking OAP collects and saves the service instance\u0026rsquo;s namespace and Pod name in the service instance\u0026rsquo;s properties, named namespace and pod, users can select the same and UI should fetch the logs by service instance in a given interval and display the logs in UI, OAP receives the query and checks the instance\u0026rsquo;s properties and use the namespace and pod to locate the Pod and query the logs.\nIf you want to register a service instance that has on demand logs available, you should add namespace and pod in the service instance properties, so that you can query the real time logs from that Pod.\nThat said, in order to make this feature work properly, you should in advance configure the cluster role for OAP to list/get namespaces, services, pods and pods/log.\n","title":"On Demand Pod Logs","url":"/docs/main/v9.4.0/en/setup/backend/on-demand-pod-log/"},{"content":"On Demand Pod Logs This feature is to fetch the Pod logs on users' demand, the logs are fetched and displayed in real time, and are not persisted in any kind. This is helpful when users want to do some experiments and monitor the logs and see what\u0026rsquo;s happing inside the service.\nNote: if you print secrets in the logs, they are also visible to the UI, so for the sake of security, this feature is disabled by default, please read the configuration documentation to enable this feature manually.\nHow it works As the name indicates, this feature only works for Kubernetes Pods.\nSkyWalking OAP collects and saves the service instance\u0026rsquo;s namespace and Pod name in the service instance\u0026rsquo;s properties, named namespace and pod, users can select the same and UI should fetch the logs by service instance in a given interval and display the logs in UI, OAP receives the query and checks the instance\u0026rsquo;s properties and use the namespace and pod to locate the Pod and query the logs.\nIf you want to register a service instance that has on demand logs available, you should add namespace and pod in the service instance properties, so that you can query the real time logs from that Pod.\nThat said, in order to make this feature work properly, you should in advance configure the cluster role for OAP to list/get namespaces, services, pods and pods/log.\n","title":"On Demand Pod Logs","url":"/docs/main/v9.5.0/en/setup/backend/on-demand-pod-log/"},{"content":"On Demand Pod Logs This feature is to fetch the Pod logs on users' demand, the logs are fetched and displayed in real time, and are not persisted in any kind. This is helpful when users want to do some experiments and monitor the logs and see what\u0026rsquo;s happing inside the service.\nNote: if you print secrets in the logs, they are also visible to the UI, so for the sake of security, this feature is disabled by default, please read the configuration documentation to enable this feature manually.\nHow it works As the name indicates, this feature only works for Kubernetes Pods.\nSkyWalking OAP collects and saves the service instance\u0026rsquo;s namespace and Pod name in the service instance\u0026rsquo;s properties, named namespace and pod, users can select the same and UI should fetch the logs by service instance in a given interval and display the logs in UI, OAP receives the query and checks the instance\u0026rsquo;s properties and use the namespace and pod to locate the Pod and query the logs.\nIf you want to register a service instance that has on demand logs available, you should add namespace and pod in the service instance properties, so that you can query the real time logs from that Pod.\nThat said, in order to make this feature work properly, you should in advance configure the cluster role for OAP to list/get namespaces, services, pods and pods/log.\n","title":"On Demand Pod Logs","url":"/docs/main/v9.6.0/en/setup/backend/on-demand-pod-log/"},{"content":"On Demand Pod Logs This feature is to fetch the Pod logs on users' demand, the logs are fetched and displayed in real time, and are not persisted in any kind. This is helpful when users want to do some experiments and monitor the logs and see what\u0026rsquo;s happing inside the service.\nNote: if you print secrets in the logs, they are also visible to the UI, so for the sake of security, this feature is disabled by default, please read the configuration documentation to enable this feature manually.\nHow it works As the name indicates, this feature only works for Kubernetes Pods.\nSkyWalking OAP collects and saves the service instance\u0026rsquo;s namespace and Pod name in the service instance\u0026rsquo;s properties, named namespace and pod, users can select the same and UI should fetch the logs by service instance in a given interval and display the logs in UI, OAP receives the query and checks the instance\u0026rsquo;s properties and use the namespace and pod to locate the Pod and query the logs.\nIf you want to register a service instance that has on demand logs available, you should add namespace and pod in the service instance properties, so that you can query the real time logs from that Pod.\nThat said, in order to make this feature work properly, you should in advance configure the cluster role for OAP to list/get namespaces, services, pods and pods/log.\n","title":"On Demand Pod Logs","url":"/docs/main/v9.7.0/en/setup/backend/on-demand-pod-log/"},{"content":"OpenTelemetry Logging Format SkyWalking can receive logs exported from OpenTelemetry collector, the data flow is:\ngraph LR B[OpenTelemetry SDK 1] C[FluentBit/FluentD, etc.] K[Other sources that OpenTelemetry supports ...] D[OpenTelemetry Collector] E[SkyWalking OAP Server] B --\u0026gt; D C --\u0026gt; D K --\u0026gt; D D -- exporter --\u0026gt; E where the exporter can be one of the following:\n OpenTelemetry SkyWalking Exporter. An exporter that transforms the logs to SkyWalking format before sending them to SkyWalking OAP. Read the doc in the aforementioned link for a detailed guide. OpenTelemetry OTLP Exporter. An exporter that sends the logs to SkyWalking OAP in OTLP format, and SkyWalking OAP is responsible for transforming the data format.  OpenTelemetry OTLP Exporter By using this exporter, you can send any log data to SkyWalking OAP as long as the data is in OTLP format, no matter where the data is generated.\nTo enable this exporter, make sure the receiver-otel is enabled and the otlp-logs value is in the receiver-otel/default/enabledHandlers configuration section:\nreceiver-otel:selector:${SW_OTEL_RECEIVER:default}default:enabledHandlers:${SW_OTEL_RECEIVER_ENABLED_HANDLERS:\u0026#34;otlp-metrics,otlp-logs\u0026#34;}Also, because most of the language SDKs of OpenTelemetry do not support logging feature (yet) or the logging feature is experimental, it\u0026rsquo;s your responsibility to make sure the reported log data contains the following attributes, otherwise SkyWalking is not able to consume them:\n service.name: the name of the service that generates the log data, OpenTelemetry Java SDK (experimental) has this attribute set, if you\u0026rsquo;re using other SDK or agent, please check the corresponding doc.  ","title":"OpenTelemetry Logging Format","url":"/docs/main/latest/en/setup/backend/log-otlp/"},{"content":"OpenTelemetry Logging Format SkyWalking can receive logs exported from OpenTelemetry collector, the data flow is:\ngraph LR B[OpenTelemetry SDK 1] C[FluentBit/FluentD, etc.] K[Other sources that OpenTelemetry supports ...] D[OpenTelemetry Collector] E[SkyWalking OAP Server] B --\u0026gt; D C --\u0026gt; D K --\u0026gt; D D -- exporter --\u0026gt; E Recommend to use OpenTelemetry OTLP Exporter to forward collected logs to OAP server in OTLP format, and SkyWalking OAP is responsible for transforming the data format into native log format with analysis support powered by LAL script.\n Deprecated: unmaintained and not recommended to use, will be removed.\nOpenTelemetry SkyWalking Exporter was first added into open-telemetry/opentelemetry-collector-contrib before OAP OTLP support. It transforms the logs to SkyWalking format before sending them to SkyWalking OAP. Currently, from OTLP community, it is not well maintained, and already being marked as unmaintained, and may be removed in 2024.\n OpenTelemetry OTLP Exporter By using this exporter, you can send any log data to SkyWalking OAP as long as the data is in OTLP format, no matter where the data is generated.\nTo enable this exporter, make sure the receiver-otel is enabled and the otlp-logs value is in the receiver-otel/default/enabledHandlers configuration section:\nreceiver-otel:selector:${SW_OTEL_RECEIVER:default}default:enabledHandlers:${SW_OTEL_RECEIVER_ENABLED_HANDLERS:\u0026#34;otlp-metrics,otlp-logs\u0026#34;}Also, because most of the language SDKs of OpenTelemetry do not support logging feature (yet) or the logging feature is experimental, it\u0026rsquo;s your responsibility to make sure the reported log data contains the following attributes, otherwise SkyWalking is not able to consume them:\n service.name: the name of the service that generates the log data.  And several attributes are optional as add-on information for the logs before analyzing.\n service.layer: the layer of the service that generates the logs. The default value is GENERAL layer, which is 100% sampled defined by LAL general rule service.instance: the instance name that generates the logs. The default value is empty.  Note, that these attributes should be set manually through OpenTelemetry SDK or through attribute#insert in OpenTelemetry Collector.\n","title":"OpenTelemetry Logging Format","url":"/docs/main/next/en/setup/backend/log-otlp/"},{"content":"OpenTelemetry Logging Format SkyWalking can receive logs exported from OpenTelemetry collector, the data flow is:\ngraph LR B[OpenTelemetry SDK 1] C[FluentBit/FluentD, etc.] K[Other sources that OpenTelemetry supports ...] D[OpenTelemetry Collector] E[SkyWalking OAP Server] B --\u0026gt; D C --\u0026gt; D K --\u0026gt; D D -- exporter --\u0026gt; E where the exporter can be one of the following:\n OpenTelemetry SkyWalking Exporter. An exporter that transforms the logs to SkyWalking format before sending them to SkyWalking OAP. Read the doc in the aforementioned link for a detailed guide. OpenTelemetry OTLP Exporter. An exporter that sends the logs to SkyWalking OAP in OTLP format, and SkyWalking OAP is responsible for transforming the data format.  OpenTelemetry OTLP Exporter By using this exporter, you can send any log data to SkyWalking OAP as long as the data is in OTLP format, no matter where the data is generated.\nTo enable this exporter, make sure the receiver-otel is enabled and the otlp-logs value is in the receiver-otel/default/enabledHandlers configuration section:\nreceiver-otel:selector:${SW_OTEL_RECEIVER:default}default:enabledHandlers:${SW_OTEL_RECEIVER_ENABLED_HANDLERS:\u0026#34;otlp-metrics,otlp-logs\u0026#34;}Also, because most of the language SDKs of OpenTelemetry do not support logging feature (yet) or the logging feature is experimental, it\u0026rsquo;s your responsibility to make sure the reported log data contains the following attributes, otherwise SkyWalking is not able to consume them:\n service.name: the name of the service that generates the log data, OpenTelemetry Java SDK (experimental) has this attribute set, if you\u0026rsquo;re using other SDK or agent, please check the corresponding doc.  ","title":"OpenTelemetry Logging Format","url":"/docs/main/v9.5.0/en/setup/backend/log-otlp/"},{"content":"OpenTelemetry Logging Format SkyWalking can receive logs exported from OpenTelemetry collector, the data flow is:\ngraph LR B[OpenTelemetry SDK 1] C[FluentBit/FluentD, etc.] K[Other sources that OpenTelemetry supports ...] D[OpenTelemetry Collector] E[SkyWalking OAP Server] B --\u0026gt; D C --\u0026gt; D K --\u0026gt; D D -- exporter --\u0026gt; E where the exporter can be one of the following:\n OpenTelemetry SkyWalking Exporter. An exporter that transforms the logs to SkyWalking format before sending them to SkyWalking OAP. Read the doc in the aforementioned link for a detailed guide. OpenTelemetry OTLP Exporter. An exporter that sends the logs to SkyWalking OAP in OTLP format, and SkyWalking OAP is responsible for transforming the data format.  OpenTelemetry OTLP Exporter By using this exporter, you can send any log data to SkyWalking OAP as long as the data is in OTLP format, no matter where the data is generated.\nTo enable this exporter, make sure the receiver-otel is enabled and the otlp-logs value is in the receiver-otel/default/enabledHandlers configuration section:\nreceiver-otel:selector:${SW_OTEL_RECEIVER:default}default:enabledHandlers:${SW_OTEL_RECEIVER_ENABLED_HANDLERS:\u0026#34;otlp-metrics,otlp-logs\u0026#34;}Also, because most of the language SDKs of OpenTelemetry do not support logging feature (yet) or the logging feature is experimental, it\u0026rsquo;s your responsibility to make sure the reported log data contains the following attributes, otherwise SkyWalking is not able to consume them:\n service.name: the name of the service that generates the log data, OpenTelemetry Java SDK (experimental) has this attribute set, if you\u0026rsquo;re using other SDK or agent, please check the corresponding doc.  ","title":"OpenTelemetry Logging Format","url":"/docs/main/v9.6.0/en/setup/backend/log-otlp/"},{"content":"OpenTelemetry Logging Format SkyWalking can receive logs exported from OpenTelemetry collector, the data flow is:\ngraph LR B[OpenTelemetry SDK 1] C[FluentBit/FluentD, etc.] K[Other sources that OpenTelemetry supports ...] D[OpenTelemetry Collector] E[SkyWalking OAP Server] B --\u0026gt; D C --\u0026gt; D K --\u0026gt; D D -- exporter --\u0026gt; E where the exporter can be one of the following:\n OpenTelemetry SkyWalking Exporter. An exporter that transforms the logs to SkyWalking format before sending them to SkyWalking OAP. Read the doc in the aforementioned link for a detailed guide. OpenTelemetry OTLP Exporter. An exporter that sends the logs to SkyWalking OAP in OTLP format, and SkyWalking OAP is responsible for transforming the data format.  OpenTelemetry OTLP Exporter By using this exporter, you can send any log data to SkyWalking OAP as long as the data is in OTLP format, no matter where the data is generated.\nTo enable this exporter, make sure the receiver-otel is enabled and the otlp-logs value is in the receiver-otel/default/enabledHandlers configuration section:\nreceiver-otel:selector:${SW_OTEL_RECEIVER:default}default:enabledHandlers:${SW_OTEL_RECEIVER_ENABLED_HANDLERS:\u0026#34;otlp-metrics,otlp-logs\u0026#34;}Also, because most of the language SDKs of OpenTelemetry do not support logging feature (yet) or the logging feature is experimental, it\u0026rsquo;s your responsibility to make sure the reported log data contains the following attributes, otherwise SkyWalking is not able to consume them:\n service.name: the name of the service that generates the log data, OpenTelemetry Java SDK (experimental) has this attribute set, if you\u0026rsquo;re using other SDK or agent, please check the corresponding doc.  ","title":"OpenTelemetry Logging Format","url":"/docs/main/v9.7.0/en/setup/backend/log-otlp/"},{"content":"OpenTelemetry Metrics Format The OpenTelemetry receiver supports ingesting agent metrics by meter-system. The OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP may fail to start up. The files are located at $CLASSPATH/otel-rules.\nSupported handlers:\n otlp: OpenTelemetry gRPC service handler.  Notice: Set SW_OTEL_RECEIVER=default through system environment or change receiver-otel/selector=${SW_OTEL_RECEIVER:default} to activate the OpenTelemetry receiver.\nThe rule file should be in YAML format, defined by the scheme described in MAL. Note: receiver-otel only supports the group, defaultMetricLevel, and metricsRules nodes of the scheme due to its push mode.\nTo activate the otlp handler and relevant rules of istio:\nreceiver-otel:selector:${SW_OTEL_RECEIVER:default}default:enabledHandlers:${SW_OTEL_RECEIVER_ENABLED_HANDLERS:\u0026#34;otlp-metrics\u0026#34;}enabledOtelMetricsRules:${SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES:\u0026#34;istio-controlplane\u0026#34;}The receiver adds label with key node_identifier_host_name to the collected data samples, and its value is from net.host.name (or host.name for some OTLP versions) resource attributes defined in OpenTelemetry proto, for identification of the metric data.\n   Description Configuration File Data Source     Metrics of Istio Control Plane otel-rules/istio-controlplane.yaml Istio Control Plane -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of SkyWalking OAP server itself otel-rules/oap.yaml SkyWalking OAP Server(SelfObservability) -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Linux OS otel-rules/vm.yaml prometheus/node_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Windows OS otel-rules/windows.yaml prometheus-community/windows_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of K8s cluster otel-rules/k8s/k8s-cluster.yaml K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of K8s cluster otel-rules/k8s/k8s-node.yaml cAdvisor \u0026amp; K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of K8s cluster otel-rules/k8s/k8s-service.yaml cAdvisor \u0026amp; K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of MYSQL otel-rules/mysql/mysql-instance.yaml prometheus/mysqld_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of MYSQL otel-rules/mysql/mysql-service.yaml prometheus/mysqld_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of PostgreSQL otel-rules/postgresql/postgresql-instance.yaml prometheus-community/postgres_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of PostgreSQL otel-rules/postgresql/postgresql-service.yaml prometheus-community/postgres_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Apache APISIX otel-rules/apisix.yaml apisix prometheus plugin -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of AWS Cloud EKS otel-rules/aws-eks/eks-cluster.yaml AWS Container Insights Receiver -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of AWS Cloud EKS otel-rules/aws-eks/eks-service.yaml AWS Container Insights Receiver -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of AWS Cloud EKS otel-rules/aws-eks/eks-node.yaml AWS Container Insights Receiver -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Elasticsearch otel-rules/elasticsearch/elasticsearch-cluster.yaml prometheus-community/elasticsearch_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Elasticsearch otel-rules/elasticsearch/elasticsearch-index.yaml prometheus-community/elasticsearch_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Elasticsearch otel-rules/elasticsearch/elasticsearch-node.yaml prometheus-community/elasticsearch_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Redis otel-rules/redis/redis-service.yaml oliver006/redis_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Redis otel-rules/redis/redis-instance.yaml oliver006/redis_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of RabbitMQ otel-rules/rabbitmq/rabbitmq-cluster.yaml rabbitmq-prometheus -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of RabbitMQ otel-rules/rabbitmq/rabbitmq-node.yaml rabbitmq-prometheus -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of MongoDB otel-rules/mongodb/mongodb-cluster.yaml percona/mongodb_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of MongoDB otel-rules/mongodb/mongodb-node.yaml percona/mongodb_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Kafka otel-rules/kafka/kafka-clusteryaml prometheus/jmx_exporter/jmx_prometheus_javaagent -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Kafka otel-rules/kafka/kafka-broker.yaml prometheus/jmx_exporter/jmx_prometheus_javaagent -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Note: You can also use OpenTelemetry exporter to transport the metrics to SkyWalking OAP directly. See OpenTelemetry Exporter.      ","title":"OpenTelemetry Metrics Format","url":"/docs/main/latest/en/setup/backend/opentelemetry-receiver/"},{"content":"OpenTelemetry Metrics Format The OpenTelemetry receiver supports ingesting agent metrics by meter-system. The OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP may fail to start up. The files are located at $CLASSPATH/otel-rules.\nSupported handlers:\n otlp: OpenTelemetry gRPC service handler.  Notice: Set SW_OTEL_RECEIVER=default through system environment or change receiver-otel/selector=${SW_OTEL_RECEIVER:default} to activate the OpenTelemetry receiver.\nThe rule file should be in YAML format, defined by the scheme described in MAL. Note: receiver-otel only supports the group, defaultMetricLevel, and metricsRules nodes of the scheme due to its push mode.\nTo activate the otlp handler and relevant rules of istio:\nreceiver-otel:selector:${SW_OTEL_RECEIVER:default}default:enabledHandlers:${SW_OTEL_RECEIVER_ENABLED_HANDLERS:\u0026#34;otlp-metrics\u0026#34;}enabledOtelMetricsRules:${SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES:\u0026#34;istio-controlplane\u0026#34;}The receiver adds label with key node_identifier_host_name to the collected data samples, and its value is from net.host.name (or host.name for some OTLP versions) resource attributes defined in OpenTelemetry proto, for identification of the metric data.\nNotice: In the resource scope, dots (.) in the attributes' key names are converted to underscores (_), whereas in the metrics scope, they are not converted.\n   Description Configuration File Data Source     Metrics of Istio Control Plane otel-rules/istio-controlplane.yaml Istio Control Plane -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of SkyWalking OAP server itself otel-rules/oap.yaml SkyWalking OAP Server(SelfObservability) -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Linux OS otel-rules/vm.yaml prometheus/node_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Windows OS otel-rules/windows.yaml prometheus-community/windows_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of K8s cluster otel-rules/k8s/k8s-cluster.yaml K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of K8s cluster otel-rules/k8s/k8s-node.yaml cAdvisor \u0026amp; K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of K8s cluster otel-rules/k8s/k8s-service.yaml cAdvisor \u0026amp; K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of MYSQL otel-rules/mysql/mysql-instance.yaml prometheus/mysqld_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of MYSQL otel-rules/mysql/mysql-service.yaml prometheus/mysqld_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of PostgreSQL otel-rules/postgresql/postgresql-instance.yaml prometheus-community/postgres_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of PostgreSQL otel-rules/postgresql/postgresql-service.yaml prometheus-community/postgres_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Apache APISIX otel-rules/apisix.yaml apisix prometheus plugin -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of AWS Cloud EKS otel-rules/aws-eks/eks-cluster.yaml AWS Container Insights Receiver -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of AWS Cloud EKS otel-rules/aws-eks/eks-service.yaml AWS Container Insights Receiver -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of AWS Cloud EKS otel-rules/aws-eks/eks-node.yaml AWS Container Insights Receiver -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Elasticsearch otel-rules/elasticsearch/elasticsearch-cluster.yaml prometheus-community/elasticsearch_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Elasticsearch otel-rules/elasticsearch/elasticsearch-index.yaml prometheus-community/elasticsearch_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Elasticsearch otel-rules/elasticsearch/elasticsearch-node.yaml prometheus-community/elasticsearch_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Redis otel-rules/redis/redis-service.yaml oliver006/redis_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Redis otel-rules/redis/redis-instance.yaml oliver006/redis_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of RabbitMQ otel-rules/rabbitmq/rabbitmq-cluster.yaml rabbitmq-prometheus -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of RabbitMQ otel-rules/rabbitmq/rabbitmq-node.yaml rabbitmq-prometheus -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of MongoDB otel-rules/mongodb/mongodb-cluster.yaml percona/mongodb_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of MongoDB otel-rules/mongodb/mongodb-node.yaml percona/mongodb_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Kafka otel-rules/kafka/kafka-cluster.yaml prometheus/jmx_exporter/jmx_prometheus_javaagent -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Kafka otel-rules/kafka/kafka-broker.yaml prometheus/jmx_exporter/jmx_prometheus_javaagent -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of ClickHouse otel-rules/clickhouse/clickhouse-instance.yaml ClickHouse(embedded prometheus endpoint) -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of ClickHouse otel-rules/clickhouse/clickhouse-service.yaml ClickHouse(embedded prometheus endpoint) -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of RocketMQ otel-rules/rocketmq/rocketmq-cluster.yaml rocketmq-exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of RocketMQ otel-rules/rocketmq/rocketmq-broker.yaml rocketmq-exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of RocketMQ otel-rules/rocketmq/rocketmq-topic.yaml rocketmq-exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server    ","title":"OpenTelemetry Metrics Format","url":"/docs/main/next/en/setup/backend/opentelemetry-receiver/"},{"content":"OpenTelemetry Metrics Format The OpenTelemetry receiver supports ingesting agent metrics by meter-system. The OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP may fail to start up. The files are located at $CLASSPATH/otel-rules.\nSupported handlers:\n otlp: OpenTelemetry gRPC service handler.  Notice: Set SW_OTEL_RECEIVER=default through system environment or change receiver-otel/selector=${SW_OTEL_RECEIVER:default} to activate the OpenTelemetry receiver.\nThe rule file should be in YAML format, defined by the scheme described in MAL. Note: receiver-otel only supports the group, defaultMetricLevel, and metricsRules nodes of the scheme due to its push mode.\nTo activate the otlp handler and relevant rules of istio:\nreceiver-otel:selector:${SW_OTEL_RECEIVER:default}default:enabledHandlers:${SW_OTEL_RECEIVER_ENABLED_HANDLERS:\u0026#34;otlp-metrics\u0026#34;}enabledOtelMetricsRules:${SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES:\u0026#34;istio-controlplane\u0026#34;}The receiver adds label with key node_identifier_host_name to the collected data samples, and its value is from net.host.name (or host.name for some OTLP versions) resource attributes defined in OpenTelemetry proto, for identification of the metric data.\n   Description Configuration File Data Source     Metrics of Istio Control Plane otel-rules/istio-controlplane.yaml Istio Control Plane -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of SkyWalking OAP server itself otel-rules/oap.yaml SkyWalking OAP Server(SelfObservability) -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of VMs otel-rules/vm.yaml Prometheus node-exporter(VMs) -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of K8s cluster otel-rules/k8s/k8s-cluster.yaml K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of K8s cluster otel-rules/k8s/k8s-node.yaml cAdvisor \u0026amp; K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of K8s cluster otel-rules/k8s/k8s-service.yaml cAdvisor \u0026amp; K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of MYSQL otel-rules/mysql/mysql-instance.yaml prometheus/mysqld_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of MYSQL otel-rules/mysql/mysql-service.yaml prometheus/mysqld_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of PostgreSQL otel-rules/postgresql/postgresql-instance.yaml postgres_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of PostgreSQL otel-rules/postgresql/postgresql-service.yaml postgres_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Apache APISIX otel-rules/apisix.yaml apisix prometheus plugin -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of AWS Cloud EKS otel-rules/aws-eks/eks-cluster.yaml AWS Container Insights Receiver -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of AWS Cloud EKS otel-rules/aws-eks/eks-service.yaml AWS Container Insights Receiver -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of AWS Cloud EKS otel-rules/aws-eks/eks-node.yaml AWS Container Insights Receiver -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server    Note: You can also use OpenTelemetry exporter to transport the metrics to SkyWalking OAP directly. See OpenTelemetry Exporter.\n","title":"OpenTelemetry Metrics Format","url":"/docs/main/v9.6.0/en/setup/backend/opentelemetry-receiver/"},{"content":"OpenTelemetry Metrics Format The OpenTelemetry receiver supports ingesting agent metrics by meter-system. The OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP may fail to start up. The files are located at $CLASSPATH/otel-rules.\nSupported handlers:\n otlp: OpenTelemetry gRPC service handler.  Notice: Set SW_OTEL_RECEIVER=default through system environment or change receiver-otel/selector=${SW_OTEL_RECEIVER:default} to activate the OpenTelemetry receiver.\nThe rule file should be in YAML format, defined by the scheme described in MAL. Note: receiver-otel only supports the group, defaultMetricLevel, and metricsRules nodes of the scheme due to its push mode.\nTo activate the otlp handler and relevant rules of istio:\nreceiver-otel:selector:${SW_OTEL_RECEIVER:default}default:enabledHandlers:${SW_OTEL_RECEIVER_ENABLED_HANDLERS:\u0026#34;otlp-metrics\u0026#34;}enabledOtelMetricsRules:${SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES:\u0026#34;istio-controlplane\u0026#34;}The receiver adds label with key node_identifier_host_name to the collected data samples, and its value is from net.host.name (or host.name for some OTLP versions) resource attributes defined in OpenTelemetry proto, for identification of the metric data.\n   Description Configuration File Data Source     Metrics of Istio Control Plane otel-rules/istio-controlplane.yaml Istio Control Plane -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of SkyWalking OAP server itself otel-rules/oap.yaml SkyWalking OAP Server(SelfObservability) -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Linux OS otel-rules/vm.yaml prometheus/node_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Windows OS otel-rules/windows.yaml prometheus-community/windows_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of K8s cluster otel-rules/k8s/k8s-cluster.yaml K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of K8s cluster otel-rules/k8s/k8s-node.yaml cAdvisor \u0026amp; K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of K8s cluster otel-rules/k8s/k8s-service.yaml cAdvisor \u0026amp; K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of MYSQL otel-rules/mysql/mysql-instance.yaml prometheus/mysqld_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of MYSQL otel-rules/mysql/mysql-service.yaml prometheus/mysqld_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of PostgreSQL otel-rules/postgresql/postgresql-instance.yaml prometheus-community/postgres_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of PostgreSQL otel-rules/postgresql/postgresql-service.yaml prometheus-community/postgres_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Apache APISIX otel-rules/apisix.yaml apisix prometheus plugin -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of AWS Cloud EKS otel-rules/aws-eks/eks-cluster.yaml AWS Container Insights Receiver -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of AWS Cloud EKS otel-rules/aws-eks/eks-service.yaml AWS Container Insights Receiver -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of AWS Cloud EKS otel-rules/aws-eks/eks-node.yaml AWS Container Insights Receiver -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Elasticsearch otel-rules/elasticsearch/elasticsearch-cluster.yaml prometheus-community/elasticsearch_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Elasticsearch otel-rules/elasticsearch/elasticsearch-index.yaml prometheus-community/elasticsearch_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Elasticsearch otel-rules/elasticsearch/elasticsearch-node.yaml prometheus-community/elasticsearch_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Redis otel-rules/redis/redis-service.yaml oliver006/redis_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Redis otel-rules/redis/redis-instance.yaml oliver006/redis_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of RabbitMQ otel-rules/rabbitmq/rabbitmq-cluster.yaml rabbitmq-prometheus -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of RabbitMQ otel-rules/rabbitmq/rabbitmq-node.yaml rabbitmq-prometheus -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of MongoDB otel-rules/mongodb/mongodb-cluster.yaml percona/mongodb_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of MongoDB otel-rules/mongodb/mongodb-node.yaml percona/mongodb_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Kafka otel-rules/kafka/kafka-clusteryaml prometheus/jmx_exporter/jmx_prometheus_javaagent -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Kafka otel-rules/kafka/kafka-broker.yaml prometheus/jmx_exporter/jmx_prometheus_javaagent -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Note: You can also use OpenTelemetry exporter to transport the metrics to SkyWalking OAP directly. See OpenTelemetry Exporter.      ","title":"OpenTelemetry Metrics Format","url":"/docs/main/v9.7.0/en/setup/backend/opentelemetry-receiver/"},{"content":"OpenTelemetry receiver The OpenTelemetry receiver supports ingesting agent metrics by meter-system. The OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP may fail to start up. The files are located at $CLASSPATH/otel-\u0026lt;handler\u0026gt;-rules. E.g. The oc handler loads rules from $CLASSPATH/otel-oc-rules.\nSupported handlers:\n oc: OpenCensus gRPC service handler.  Notice: Set SW_OTEL_RECEIVER=default through system environment or change receiver-otel/selector=${SW_OTEL_RECEIVER:default} to activate the OpenTelemetry receiver.\nThe rule file should be in YAML format, defined by the scheme described in prometheus-fetcher. Note: receiver-otel only supports the group, defaultMetricLevel, and metricsRules nodes of the scheme due to its push mode.\nTo activate the oc handler and relevant rules of istio:\nreceiver-otel:// Change selector value to default, for activating the otel receiver.selector:${SW_OTEL_RECEIVER:default}default:enabledHandlers:${SW_OTEL_RECEIVER_ENABLED_HANDLERS:\u0026#34;oc\u0026#34;}enabledOcRules:${SW_OTEL_RECEIVER_ENABLED_OC_RULES:\u0026#34;istio-controlplane\u0026#34;}The receiver adds labels with key = node_identifier_host_name and key = node_identifier_pid to the collected data samples, and values from Node.identifier.host_name and Node.identifier.pid defined in OpenCensus Agent Proto, for identification of the metric data.\n   Rule Name Description Configuration File Data Source     istio-controlplane Metrics of Istio Control Plane otel-oc-rules/istio-controlplane.yaml Istio Control Plane -\u0026gt; OpenTelemetry Collector \u0026ndash;OC format\u0026ndash;\u0026gt; SkyWalking OAP Server   oap Metrics of SkyWalking OAP server itself otel-oc-rules/oap.yaml SkyWalking OAP Server(SelfObservability) -\u0026gt; OpenTelemetry Collector \u0026ndash;OC format\u0026ndash;\u0026gt; SkyWalking OAP Server   vm Metrics of VMs otel-oc-rules/vm.yaml Prometheus node-exporter(VMs) -\u0026gt; OpenTelemetry Collector \u0026ndash;OC format\u0026ndash;\u0026gt; SkyWalking OAP Server   k8s-cluster Metrics of K8s cluster otel-oc-rules/k8s-cluster.yaml K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash;OC format\u0026ndash;\u0026gt; SkyWalking OAP Server   k8s-node Metrics of K8s cluster otel-oc-rules/k8s-node.yaml cAdvisor \u0026amp; K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash;OC format\u0026ndash;\u0026gt; SkyWalking OAP Server   k8s-service Metrics of K8s cluster otel-oc-rules/k8s-service.yaml cAdvisor \u0026amp; K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash;OC format\u0026ndash;\u0026gt; SkyWalking OAP Server    Note: You can also use OpenTelemetry exporter to directly transport the metrics to SkyWalking OAP. See OpenTelemetry Exporter.\n","title":"OpenTelemetry receiver","url":"/docs/main/v9.0.0/en/setup/backend/opentelemetry-receiver/"},{"content":"OpenTelemetry receiver The OpenTelemetry receiver supports ingesting agent metrics by meter-system. The OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP may fail to start up. The files are located at $CLASSPATH/otel-\u0026lt;handler\u0026gt;-rules. E.g. The oc handler loads rules from $CLASSPATH/otel-oc-rules.\nSupported handlers:\n oc: OpenCensus gRPC service handler.  Notice: Set SW_OTEL_RECEIVER=default through system environment or change receiver-otel/selector=${SW_OTEL_RECEIVER:default} to activate the OpenTelemetry receiver.\nThe rule file should be in YAML format, defined by the scheme described in prometheus-fetcher. Note: receiver-otel only supports the group, defaultMetricLevel, and metricsRules nodes of the scheme due to its push mode.\nTo activate the oc handler and relevant rules of istio:\nreceiver-otel:// Change selector value to default, for activating the otel receiver.selector:${SW_OTEL_RECEIVER:default}default:enabledHandlers:${SW_OTEL_RECEIVER_ENABLED_HANDLERS:\u0026#34;oc\u0026#34;}enabledOcRules:${SW_OTEL_RECEIVER_ENABLED_OC_RULES:\u0026#34;istio-controlplane\u0026#34;}The receiver adds labels with key = node_identifier_host_name and key = node_identifier_pid to the collected data samples, and values from Node.identifier.host_name and Node.identifier.pid defined in OpenCensus Agent Proto, for identification of the metric data.\n   Rule Name Description Configuration File Data Source     istio-controlplane Metrics of Istio Control Plane otel-oc-rules/istio-controlplane.yaml Istio Control Plane -\u0026gt; OpenTelemetry Collector \u0026ndash;OC format\u0026ndash;\u0026gt; SkyWalking OAP Server   oap Metrics of SkyWalking OAP server itself otel-oc-rules/oap.yaml SkyWalking OAP Server(SelfObservability) -\u0026gt; OpenTelemetry Collector \u0026ndash;OC format\u0026ndash;\u0026gt; SkyWalking OAP Server   vm Metrics of VMs otel-oc-rules/vm.yaml Prometheus node-exporter(VMs) -\u0026gt; OpenTelemetry Collector \u0026ndash;OC format\u0026ndash;\u0026gt; SkyWalking OAP Server   k8s-cluster Metrics of K8s cluster otel-oc-rules/k8s-cluster.yaml K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash;OC format\u0026ndash;\u0026gt; SkyWalking OAP Server   k8s-node Metrics of K8s cluster otel-oc-rules/k8s-node.yaml cAdvisor \u0026amp; K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash;OC format\u0026ndash;\u0026gt; SkyWalking OAP Server   k8s-service Metrics of K8s cluster otel-oc-rules/k8s-service.yaml cAdvisor \u0026amp; K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash;OC format\u0026ndash;\u0026gt; SkyWalking OAP Server    Note: You can also use OpenTelemetry exporter to transport the metrics to SkyWalking OAP directly. See OpenTelemetry Exporter.\n","title":"OpenTelemetry receiver","url":"/docs/main/v9.1.0/en/setup/backend/opentelemetry-receiver/"},{"content":"OpenTelemetry receiver The OpenTelemetry receiver supports ingesting agent metrics by meter-system. The OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP may fail to start up. The files are located at $CLASSPATH/otel-rules.\nSupported handlers:\n oc: OpenCensus gRPC service handler. otlp: OpenTelemetry gRPC service handler.  Notice: Set SW_OTEL_RECEIVER=default through system environment or change receiver-otel/selector=${SW_OTEL_RECEIVER:default} to activate the OpenTelemetry receiver.\nThe rule file should be in YAML format, defined by the scheme described in prometheus-fetcher. Note: receiver-otel only supports the group, defaultMetricLevel, and metricsRules nodes of the scheme due to its push mode.\nTo activate the oc handler and relevant rules of istio:\nreceiver-otel:// Change selector value to default, for activating the otel receiver.selector:${SW_OTEL_RECEIVER:default}default:enabledHandlers:${SW_OTEL_RECEIVER_ENABLED_HANDLERS:\u0026#34;oc,otlp\u0026#34;}enabledOtelRules:${SW_OTEL_RECEIVER_ENABLED_OTEL_RULES:\u0026#34;istio-controlplane\u0026#34;}The receiver adds label with key node_identifier_host_name to the collected data samples, and its value is from Node.identifier.host_name defined in OpenCensus Agent Proto, or net.host.name (or host.name for some OTLP versions) resource attributes defined in OpenTelemetry proto, for identification of the metric data.\n   Rule Name Description Configuration File Data Source     istio-controlplane Metrics of Istio Control Plane otel-rules/istio-controlplane.yaml Istio Control Plane -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   oap Metrics of SkyWalking OAP server itself otel-rules/oap.yaml SkyWalking OAP Server(SelfObservability) -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   vm Metrics of VMs otel-rules/vm.yaml Prometheus node-exporter(VMs) -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   k8s-cluster Metrics of K8s cluster otel-rules/k8s-cluster.yaml K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   k8s-node Metrics of K8s cluster otel-rules/k8s-node.yaml cAdvisor \u0026amp; K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   k8s-service Metrics of K8s cluster otel-rules/k8s-service.yaml cAdvisor \u0026amp; K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   mysql Metrics of MYSQL otel-rules/mysql.yaml prometheus/mysqld_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   postgresql Metrics of PostgreSQL otel-rules/postgresql.yaml postgres_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server    Note: You can also use OpenTelemetry exporter to transport the metrics to SkyWalking OAP directly. See OpenTelemetry Exporter.\n","title":"OpenTelemetry receiver","url":"/docs/main/v9.2.0/en/setup/backend/opentelemetry-receiver/"},{"content":"OpenTelemetry receiver The OpenTelemetry receiver supports ingesting agent metrics by meter-system. The OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP may fail to start up. The files are located at $CLASSPATH/otel-rules.\nSupported handlers:\n oc: OpenCensus gRPC service handler. otlp: OpenTelemetry gRPC service handler.  Notice: Set SW_OTEL_RECEIVER=default through system environment or change receiver-otel/selector=${SW_OTEL_RECEIVER:default} to activate the OpenTelemetry receiver.\nThe rule file should be in YAML format, defined by the scheme described in MAL. Note: receiver-otel only supports the group, defaultMetricLevel, and metricsRules nodes of the scheme due to its push mode.\nTo activate the oc handler and relevant rules of istio:\nreceiver-otel:// Change selector value to default, for activating the otel receiver.selector:${SW_OTEL_RECEIVER:default}default:enabledHandlers:${SW_OTEL_RECEIVER_ENABLED_HANDLERS:\u0026#34;oc,otlp\u0026#34;}enabledOtelRules:${SW_OTEL_RECEIVER_ENABLED_OTEL_RULES:\u0026#34;istio-controlplane\u0026#34;}The receiver adds label with key node_identifier_host_name to the collected data samples, and its value is from Node.identifier.host_name defined in OpenCensus Agent Proto, or net.host.name (or host.name for some OTLP versions) resource attributes defined in OpenTelemetry proto, for identification of the metric data.\n   Description Configuration File Data Source     Metrics of Istio Control Plane otel-rules/istio-controlplane.yaml Istio Control Plane -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of SkyWalking OAP server itself otel-rules/oap.yaml SkyWalking OAP Server(SelfObservability) -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of VMs otel-rules/vm.yaml Prometheus node-exporter(VMs) -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of K8s cluster otel-rules/k8s-cluster.yaml K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of K8s cluster otel-rules/k8s-node.yaml cAdvisor \u0026amp; K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of K8s cluster otel-rules/k8s-service.yaml cAdvisor \u0026amp; K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of MYSQL otel-rules/mysql.yaml prometheus/mysqld_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of PostgreSQL otel-rules/postgresql.yaml postgres_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Apache APISIX otel-rules/apisix.yaml apisix prometheus plugin -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server    Note: You can also use OpenTelemetry exporter to transport the metrics to SkyWalking OAP directly. See OpenTelemetry Exporter.\n","title":"OpenTelemetry receiver","url":"/docs/main/v9.3.0/en/setup/backend/opentelemetry-receiver/"},{"content":"OpenTelemetry receiver The OpenTelemetry receiver supports ingesting agent metrics by meter-system. The OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP may fail to start up. The files are located at $CLASSPATH/otel-rules.\nSupported handlers:\n oc: OpenCensus gRPC service handler. otlp: OpenTelemetry gRPC service handler.  Notice: Set SW_OTEL_RECEIVER=default through system environment or change receiver-otel/selector=${SW_OTEL_RECEIVER:default} to activate the OpenTelemetry receiver.\nThe rule file should be in YAML format, defined by the scheme described in MAL. Note: receiver-otel only supports the group, defaultMetricLevel, and metricsRules nodes of the scheme due to its push mode.\nTo activate the oc handler and relevant rules of istio:\nreceiver-otel:// Change selector value to default, for activating the otel receiver.selector:${SW_OTEL_RECEIVER:default}default:enabledHandlers:${SW_OTEL_RECEIVER_ENABLED_HANDLERS:\u0026#34;oc,otlp\u0026#34;}enabledOtelRules:${SW_OTEL_RECEIVER_ENABLED_OTEL_RULES:\u0026#34;istio-controlplane\u0026#34;}The receiver adds label with key node_identifier_host_name to the collected data samples, and its value is from Node.identifier.host_name defined in OpenCensus Agent Proto, or net.host.name (or host.name for some OTLP versions) resource attributes defined in OpenTelemetry proto, for identification of the metric data.\n   Description Configuration File Data Source     Metrics of Istio Control Plane otel-rules/istio-controlplane.yaml Istio Control Plane -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of SkyWalking OAP server itself otel-rules/oap.yaml SkyWalking OAP Server(SelfObservability) -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of VMs otel-rules/vm.yaml Prometheus node-exporter(VMs) -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of K8s cluster otel-rules/k8s/k8s-cluster.yaml K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of K8s cluster otel-rules/k8s/k8s-node.yaml cAdvisor \u0026amp; K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of K8s cluster otel-rules/k8s/k8s-service.yaml cAdvisor \u0026amp; K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of MYSQL otel-rules/mysql/mysql-instance.yaml prometheus/mysqld_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of MYSQL otel-rules/mysql/mysql-service.yaml prometheus/mysqld_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of PostgreSQL otel-rules/postgresql/postgresql-instance.yaml postgres_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of PostgreSQL otel-rules/postgresql/postgresql-service.yaml postgres_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Apache APISIX otel-rules/apisix.yaml apisix prometheus plugin -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of AWS Cloud EKS otel-rules/aws-eks/eks-cluster.yaml AWS Container Insights Receiver -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of AWS Cloud EKS otel-rules/aws-eks/eks-service.yaml AWS Container Insights Receiver -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of AWS Cloud EKS otel-rules/aws-eks/eks-node.yaml AWS Container Insights Receiver -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server    Note: You can also use OpenTelemetry exporter to transport the metrics to SkyWalking OAP directly. See OpenTelemetry Exporter.\n","title":"OpenTelemetry receiver","url":"/docs/main/v9.4.0/en/setup/backend/opentelemetry-receiver/"},{"content":"OpenTelemetry receiver The OpenTelemetry receiver supports ingesting agent metrics by meter-system. The OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP may fail to start up. The files are located at $CLASSPATH/otel-rules.\nSupported handlers:\n otlp: OpenTelemetry gRPC service handler.  Notice: Set SW_OTEL_RECEIVER=default through system environment or change receiver-otel/selector=${SW_OTEL_RECEIVER:default} to activate the OpenTelemetry receiver.\nThe rule file should be in YAML format, defined by the scheme described in MAL. Note: receiver-otel only supports the group, defaultMetricLevel, and metricsRules nodes of the scheme due to its push mode.\nTo activate the otlp handler and relevant rules of istio:\nreceiver-otel:selector:${SW_OTEL_RECEIVER:default}default:enabledHandlers:${SW_OTEL_RECEIVER_ENABLED_HANDLERS:\u0026#34;otlp\u0026#34;}enabledOtelMetricsRules:${SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES:\u0026#34;istio-controlplane\u0026#34;}The receiver adds label with key node_identifier_host_name to the collected data samples, and its value is from net.host.name (or host.name for some OTLP versions) resource attributes defined in OpenTelemetry proto, for identification of the metric data.\n   Description Configuration File Data Source     Metrics of Istio Control Plane otel-rules/istio-controlplane.yaml Istio Control Plane -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of SkyWalking OAP server itself otel-rules/oap.yaml SkyWalking OAP Server(SelfObservability) -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of VMs otel-rules/vm.yaml Prometheus node-exporter(VMs) -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of K8s cluster otel-rules/k8s/k8s-cluster.yaml K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of K8s cluster otel-rules/k8s/k8s-node.yaml cAdvisor \u0026amp; K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of K8s cluster otel-rules/k8s/k8s-service.yaml cAdvisor \u0026amp; K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of MYSQL otel-rules/mysql/mysql-instance.yaml prometheus/mysqld_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of MYSQL otel-rules/mysql/mysql-service.yaml prometheus/mysqld_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of PostgreSQL otel-rules/postgresql/postgresql-instance.yaml postgres_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of PostgreSQL otel-rules/postgresql/postgresql-service.yaml postgres_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Apache APISIX otel-rules/apisix.yaml apisix prometheus plugin -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of AWS Cloud EKS otel-rules/aws-eks/eks-cluster.yaml AWS Container Insights Receiver -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of AWS Cloud EKS otel-rules/aws-eks/eks-service.yaml AWS Container Insights Receiver -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of AWS Cloud EKS otel-rules/aws-eks/eks-node.yaml AWS Container Insights Receiver -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server    Note: You can also use OpenTelemetry exporter to transport the metrics to SkyWalking OAP directly. See OpenTelemetry Exporter.\n","title":"OpenTelemetry receiver","url":"/docs/main/v9.5.0/en/setup/backend/opentelemetry-receiver/"},{"content":"OpenTelemetry Trace Format SkyWalking can receive traces from Traces in OTLP format and convert them to Zipkin Trace format eventually. For data analysis and queries related to Zipkin Trace, please refer to the relevant documentation.\nOTLP Trace handler references the Zipkin Exporter in the OpenTelemetry Collector to convert the data format.\nSet up backend receiver  Make sure to enable otlp-traces handler in OTLP receiver of application.yml.  receiver-otel:selector:defaultdefault:enabledHandlers:otlp-tracesMake sure to enable zipkin receiver and zipkin query in application.yml for config the zipkin.  Setup Query and Lens UI Please read deploy Lens UI documentation for query OTLP traces.\n","title":"OpenTelemetry Trace Format","url":"/docs/main/latest/en/setup/backend/otlp-trace/"},{"content":"OpenTelemetry Trace Format SkyWalking can receive traces from Traces in OTLP format and convert them to Zipkin Trace format eventually. For data analysis and queries related to Zipkin Trace, please refer to the relevant documentation.\nOTLP Trace handler references the Zipkin Exporter in the OpenTelemetry Collector to convert the data format.\nSet up backend receiver  Make sure to enable otlp-traces handler in OTLP receiver of application.yml.  receiver-otel:selector:defaultdefault:enabledHandlers:otlp-tracesMake sure to enable zipkin receiver and zipkin query in application.yml for config the zipkin.  Setup Query and Lens UI Please read deploy Lens UI documentation for query OTLP traces.\n","title":"OpenTelemetry Trace Format","url":"/docs/main/next/en/setup/backend/otlp-trace/"},{"content":"OpenTelemetry Trace Format SkyWalking can receive traces from Traces in OTLP format and convert them to Zipkin Trace format eventually. For data analysis and queries related to Zipkin Trace, please refer to the relevant documentation.\nOTLP Trace handler references the Zipkin Exporter in the OpenTelemetry Collector to convert the data format.\nSet up backend receiver  Make sure to enable otlp-traces handler in OTLP receiver of application.yml.  receiver-otel:selector:defaultdefault:enabledHandlers:otlp-tracesMake sure to enable zipkin receiver and zipkin query in application.yml for config the zipkin.  Setup Query and Lens UI Please read deploy Lens UI documentation for query OTLP traces.\n","title":"OpenTelemetry Trace Format","url":"/docs/main/v9.6.0/en/setup/backend/otlp-trace/"},{"content":"OpenTelemetry Trace Format SkyWalking can receive traces from Traces in OTLP format and convert them to Zipkin Trace format eventually. For data analysis and queries related to Zipkin Trace, please refer to the relevant documentation.\nOTLP Trace handler references the Zipkin Exporter in the OpenTelemetry Collector to convert the data format.\nSet up backend receiver  Make sure to enable otlp-traces handler in OTLP receiver of application.yml.  receiver-otel:selector:defaultdefault:enabledHandlers:otlp-tracesMake sure to enable zipkin receiver and zipkin query in application.yml for config the zipkin.  Setup Query and Lens UI Please read deploy Lens UI documentation for query OTLP traces.\n","title":"OpenTelemetry Trace Format","url":"/docs/main/v9.7.0/en/setup/backend/otlp-trace/"},{"content":"Operator Usage Guide In this guide, you will learn:\n How to deploy the operator from a released package or scratch The core CRDs the operator supports  Operator Deployment You could provision the operator from a binary package or build from sources.\nBinary Package  Go to the download page to download the latest release binary, skywalking-swck-\u0026lt;SWCK_VERSION\u0026gt;-bin.tgz. Unarchive the package to a folder named skywalking-swck-\u0026lt;SWCK_VERSION\u0026gt;-bin To install the operator in an existing cluster, make sure you have cert-manager installed. Apply the manifests for the Controller and CRDs in config:  kubectl apply -f skywalking-swck-\u0026lt;SWCK_VERSION\u0026gt;-bin/config/operator-bundle.yaml Build from sources  Download released source package or clone the source code:  git clone git@github.com:apache/skywalking-swck.git  Build docker image from scratch. If you prefer to your private docker image, a quick path to override OPERATOR_IMG environment variable : export OPERATOR_IMG=\u0026lt;private registry\u0026gt;/controller:\u0026lt;tag\u0026gt;  export OPERATOR_IMG=controller make -C operator docker-build Then, push this image controller:latest to a repository where the operator\u0026rsquo;s pod could pull from. If you use a local KinD cluster:\nkind load docker-image controller   Customize resource configurations based the templates laid in operator/config. We use kustomize to build them, please refer to kustomize in case you don\u0026rsquo;t familiar with its syntax.\n  Install the CRDs to Kubernetes:\n  make -C operator install  Use make to generate the final manifests and deploy:  make -C operator deploy Test your deployment  Deploy a sample OAP server, this will create an OAP server in the default namespace:  curl https://raw.githubusercontent.com/apache/skywalking-swck/master/operator/config/samples/default.yaml | kubectl apply -f -  Check the OAP server in Kubernetes:  kubectl get oapserver  Check the UI server in Kubernetes:  kubectl get ui Troubleshooting If you encounter any issue, you can check the log of the controller by pulling it from Kubernetes:\n# get the pod name of your controller kubectl --namespace skywalking-swck-system get pods # pull the logs kubectl --namespace skywalking-swck-system logs -f [name_of_the_controller_pod] Custom Resource Define(CRD) The custom resources that the operator introduced are:\nJavaAgent The JavaAgent custom resource definition (CRD) declaratively defines a view to tracing the injection result.\nThe java-agent-injector creat JavaAgents once it injects agents into some workloads. Refer to Java Agent for more details.\nOAP The OAP custom resource definition (CRD) declaratively defines a desired OAP setup to run in a Kubernetes cluster. It provides options to configure environment variables and how to connect a Storage.\nUI The UI custom resource definition (CRD) declaratively defines a desired UI setup to run in a Kubernetes cluster. It provides options for how to connect an OAP.\nStorage The Storage custom resource definition (CRD) declaratively defines a desired storage setup to run in a Kubernetes cluster. The Storage could be managed instances onboarded by the operator or an external service. The OAP has options to select which Storage it would connect.\n Caveat: Stroage only supports the Elasticsearch.\n Satellite The Satellite custom resource definition (CRD) declaratively defines a desired Satellite setup to run in a Kubernetes cluster. It provides options for how to connect an OAP.\nFetcher The Fetcher custom resource definition (CRD) declaratively defines a desired Fetcher setup to run in a Kubernetes cluster. It provides options to configure OpenTelemetry collector, which fetches metrics to the deployed OAP.\nExamples of the Operator There are some instant examples to represent the functions or features of the Operator.\n Deploy OAP server and UI with default settings Fetch metrics from the Istio control plane(istiod) Inject the java agent to pods Deploy a storage Deploy a Satellite  ","title":"Operator Usage Guide","url":"/docs/skywalking-swck/latest/operator/"},{"content":"Operator Usage Guide In this guide, you will learn:\n How to deploy the operator from a released package or scratch The core CRDs the operator supports  Operator Deployment You could provision the operator from a binary package or build from sources.\nBinary Package  Go to the download page to download the latest release binary, skywalking-swck-\u0026lt;SWCK_VERSION\u0026gt;-bin.tgz. Unarchive the package to a folder named skywalking-swck-\u0026lt;SWCK_VERSION\u0026gt;-bin To install the operator in an existing cluster, make sure you have cert-manager installed. Apply the manifests for the Controller and CRDs in config:  kubectl apply -f skywalking-swck-\u0026lt;SWCK_VERSION\u0026gt;-bin/config/operator-bundle.yaml Build from sources  Download released source package or clone the source code:  git clone git@github.com:apache/skywalking-swck.git  Build docker image from scratch. If you prefer to your private docker image, a quick path to override OPERATOR_IMG environment variable : export OPERATOR_IMG=\u0026lt;private registry\u0026gt;/controller:\u0026lt;tag\u0026gt;  export OPERATOR_IMG=controller make -C operator docker-build Then, push this image controller:latest to a repository where the operator\u0026rsquo;s pod could pull from. If you use a local KinD cluster:\nkind load docker-image controller   Customize resource configurations based the templates laid in operator/config. We use kustomize to build them, please refer to kustomize in case you don\u0026rsquo;t familiar with its syntax.\n  Install the CRDs to Kubernetes:\n  make -C operator install  Use make to generate the final manifests and deploy:  make -C operator deploy Test your deployment  Deploy a sample OAP server, this will create an OAP server in the default namespace:  curl https://raw.githubusercontent.com/apache/skywalking-swck/master/operator/config/samples/default.yaml | kubectl apply -f -  Check the OAP server in Kubernetes:  kubectl get oapserver  Check the UI server in Kubernetes:  kubectl get ui Troubleshooting If you encounter any issue, you can check the log of the controller by pulling it from Kubernetes:\n# get the pod name of your controller kubectl --namespace skywalking-swck-system get pods # pull the logs kubectl --namespace skywalking-swck-system logs -f [name_of_the_controller_pod] Custom Resource Define(CRD) The custom resources that the operator introduced are:\nJavaAgent The JavaAgent custom resource definition (CRD) declaratively defines a view to tracing the injection result.\nThe java-agent-injector creat JavaAgents once it injects agents into some workloads. Refer to Java Agent for more details.\nOAP The OAP custom resource definition (CRD) declaratively defines a desired OAP setup to run in a Kubernetes cluster. It provides options to configure environment variables and how to connect a Storage.\nUI The UI custom resource definition (CRD) declaratively defines a desired UI setup to run in a Kubernetes cluster. It provides options for how to connect an OAP.\nStorage The Storage custom resource definition (CRD) declaratively defines a desired storage setup to run in a Kubernetes cluster. The Storage could be managed instances onboarded by the operator or an external service. The OAP has options to select which Storage it would connect.\n Caveat: Stroage only supports the Elasticsearch.\n Satellite The Satellite custom resource definition (CRD) declaratively defines a desired Satellite setup to run in a Kubernetes cluster. It provides options for how to connect an OAP.\nFetcher The Fetcher custom resource definition (CRD) declaratively defines a desired Fetcher setup to run in a Kubernetes cluster. It provides options to configure OpenTelemetry collector, which fetches metrics to the deployed OAP.\nExamples of the Operator There are some instant examples to represent the functions or features of the Operator.\n Deploy OAP server and UI with default settings Fetch metrics from the Istio control plane(istiod) Inject the java agent to pods Deploy a storage Deploy a Satellite  ","title":"Operator Usage Guide","url":"/docs/skywalking-swck/next/operator/"},{"content":"Operator Usage Guide In this guide, you will learn:\n How to deploy the operator from a released package or scratch The core CRDs the operator supports  Operator Deployment You could provision the operator from a binary package or build from sources.\nBinary Package  Go to the download page to download the latest release binary, skywalking-swck-\u0026lt;SWCK_VERSION\u0026gt;-bin.tgz. Unarchive the package to a folder named skywalking-swck-\u0026lt;SWCK_VERSION\u0026gt;-bin To install the operator in an existing cluster, make sure you have cert-manager installed. Apply the manifests for the Controller and CRDs in config:  kubectl apply -f skywalking-swck-\u0026lt;SWCK_VERSION\u0026gt;-bin/config/operator-bundle.yaml Build from sources  Download released source package or clone the source code:  git clone git@github.com:apache/skywalking-swck.git  Build docker image from scratch. If you prefer to your private docker image, a quick path to override OPERATOR_IMG environment variable : export OPERATOR_IMG=\u0026lt;private registry\u0026gt;/controller:\u0026lt;tag\u0026gt;  export OPERATOR_IMG=controller make -C operator docker-build Then, push this image controller:latest to a repository where the operator\u0026rsquo;s pod could pull from. If you use a local KinD cluster:\nkind load docker-image controller   Customize resource configurations based the templates laid in operator/config. We use kustomize to build them, please refer to kustomize in case you don\u0026rsquo;t familiar with its syntax.\n  Install the CRDs to Kubernetes:\n  make -C operator install  Use make to generate the final manifests and deploy:  make -C operator deploy Test your deployment  Deploy a sample OAP server, this will create an OAP server in the default namespace:  curl https://raw.githubusercontent.com/apache/skywalking-swck/master/operator/config/samples/default.yaml | kubectl apply -f -  Check the OAP server in Kubernetes:  kubectl get oapserver  Check the UI server in Kubernetes:  kubectl get ui Troubleshooting If you encounter any issue, you can check the log of the controller by pulling it from Kubernetes:\n# get the pod name of your controller kubectl --namespace skywalking-swck-system get pods # pull the logs kubectl --namespace skywalking-swck-system logs -f [name_of_the_controller_pod] Custom Resource Define(CRD) The custom resources that the operator introduced are:\nJavaAgent The JavaAgent custom resource definition (CRD) declaratively defines a view to tracing the injection result.\nThe java-agent-injector creat JavaAgents once it injects agents into some workloads. Refer to Java Agent for more details.\nOAP The OAP custom resource definition (CRD) declaratively defines a desired OAP setup to run in a Kubernetes cluster. It provides options to configure environment variables and how to connect a Storage.\nUI The UI custom resource definition (CRD) declaratively defines a desired UI setup to run in a Kubernetes cluster. It provides options for how to connect an OAP.\nStorage The Storage custom resource definition (CRD) declaratively defines a desired storage setup to run in a Kubernetes cluster. The Storage could be managed instances onboarded by the operator or an external service. The OAP has options to select which Storage it would connect.\n Caveat: Stroage only supports the Elasticsearch.\n Satellite The Satellite custom resource definition (CRD) declaratively defines a desired Satellite setup to run in a Kubernetes cluster. It provides options for how to connect an OAP.\nFetcher The Fetcher custom resource definition (CRD) declaratively defines a desired Fetcher setup to run in a Kubernetes cluster. It provides options to configure OpenTelemetry collector, which fetches metrics to the deployed OAP.\nExamples of the Operator There are some instant examples to represent the functions or features of the Operator.\n Deploy OAP server and UI with default settings Fetch metrics from the Istio control plane(istiod) Inject the java agent to pods Deploy a storage Deploy a Satellite  ","title":"Operator Usage Guide","url":"/docs/skywalking-swck/v0.9.0/operator/"},{"content":"Optional Plugins Java agent plugins are all pluggable. Optional plugins could be provided in optional-plugins and expired-plugins folder under agent or 3rd party repositories. For using these plugins, you need to put the target plugin jar file into /plugins.\nNow, we have the following known 2 kinds of optional plugins.\nOptional Level 2 Plugins These plugins affect the performance or must be used under some conditions, from experiences. So only released in /optional-plugins or /bootstrap-plugins, copy to /plugins in order to make them work.\n Plugin of tracing Spring annotation beans Plugin of tracing Oracle and Resin Filter traces through specified endpoint name patterns Plugin of Gson serialization lib in optional plugin folder. Plugin of Zookeeper 3.4.x in optional plugin folder. The reason of being optional plugin is, many business irrelevant traces are generated, which cause extra payload to agents and backends. At the same time, those traces may be just heartbeat(s). Customize enhance Trace methods based on description files, rather than write plugin or change source codes. Plugin of Spring Cloud Gateway 2.x and 3.x and 4.x in optional plugin folder. Please only activate this plugin when you install agent in Spring Gateway. Plugin of Spring Transaction in optional plugin folder. The reason of being optional plugin is, many local span are generated, which also spend more CPU, memory and network. Plugin of Kotlin coroutine provides the tracing across coroutines automatically. As it will add local spans to all across routines scenarios, Please assess the performance impact. Plugin of quartz-scheduler-2.x in the optional plugin folder. The reason for being an optional plugin is, many task scheduling systems are based on quartz-scheduler, this will cause duplicate tracing and link different sub-tasks as they share the same quartz level trigger, such as ElasticJob. Plugin of spring-webflux-5.x in the optional plugin folder. Please only activate this plugin when you use webflux alone as a web container. If you are using SpringMVC 5 or Spring Gateway, you don\u0026rsquo;t need this plugin. Plugin of mybatis-3.x in optional plugin folder. The reason of being optional plugin is, many local span are generated, which also spend more CPU, memory and network. Plugin of sentinel-1.x in the optional plugin folder. The reason for being an optional plugin is, the sentinel plugin generates a large number of local spans, which have a potential performance impact. Plugin of ehcache-2.x in the optional plugin folder. The reason for being an optional plugin is, this plugin enhanced cache framework, generates large number of local spans, which have a potential performance impact. Plugin of guava-cache in the optional plugin folder. The reason for being an optional plugin is, this plugin enhanced cache framework, generates large number of local spans, which have a potential performance impact. Plugin of fastjson serialization lib in optional plugin folder. Plugin of jackson serialization lib in optional plugin folder. Plugin of Apache ShenYu(incubating) Gateway 2.4.x in optional plugin folder. Please only activate this plugin when you install agent in Apache ShenYu Gateway. Plugin of trace sampler CPU policy in the optional plugin folder. Please only activate this plugin when you need to disable trace collecting when the agent process CPU usage is too high(over threshold). Plugin for Spring 6.x and RestTemplate 6.x are in the optional plugin folder. Spring 6 requires Java 17 but SkyWalking is still compatible with Java 8. So, we put it in the optional plugin folder. Plugin of nacos-client 2.x lib in optional plugin folder. The reason is many business irrelevant traces are generated, which cause extra payload to agents and backends, also spend more CPU, memory and network. Plugin of netty-http 4.1.x lib in optional plugin folder. The reason is some frameworks use Netty HTTP as kernel, which could double the unnecessary spans and create incorrect RPC relative metrics.  Optional Level 3 Plugins. Expired Plugins These plugins are not tested in the CI/CD pipeline, as the previous added tests are not able to run according to the latest CI/CD infrastructure limitations, lack of maintenance, or dependencies/images not available(e.g. removed from DockerHub).\nWarning, there is no guarantee of working and maintenance. The committer team may remove them from the agent package in the future without further notice.\n Plugin of Spring Impala 2.6.x was tested through parrot-stream released images. The images are not available since Mar. 2024. This plugin is expired due to lack of testing.  ","title":"Optional Plugins","url":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/optional-plugins/"},{"content":"Optional Plugins Java agent plugins are all pluggable. Optional plugins could be provided in optional-plugins and expired-plugins folder under agent or 3rd party repositories. For using these plugins, you need to put the target plugin jar file into /plugins.\nNow, we have the following known 2 kinds of optional plugins.\nOptional Level 2 Plugins These plugins affect the performance or must be used under some conditions, from experiences. So only released in /optional-plugins or /bootstrap-plugins, copy to /plugins in order to make them work.\n Plugin of tracing Spring annotation beans Plugin of tracing Oracle and Resin Filter traces through specified endpoint name patterns Plugin of Gson serialization lib in optional plugin folder. Plugin of Zookeeper 3.4.x in optional plugin folder. The reason of being optional plugin is, many business irrelevant traces are generated, which cause extra payload to agents and backends. At the same time, those traces may be just heartbeat(s). Customize enhance Trace methods based on description files, rather than write plugin or change source codes. Plugin of Spring Cloud Gateway 2.x and 3.x and 4.x in optional plugin folder. Please only activate this plugin when you install agent in Spring Gateway. Plugin of Spring Transaction in optional plugin folder. The reason of being optional plugin is, many local span are generated, which also spend more CPU, memory and network. Plugin of Kotlin coroutine provides the tracing across coroutines automatically. As it will add local spans to all across routines scenarios, Please assess the performance impact. Plugin of quartz-scheduler-2.x in the optional plugin folder. The reason for being an optional plugin is, many task scheduling systems are based on quartz-scheduler, this will cause duplicate tracing and link different sub-tasks as they share the same quartz level trigger, such as ElasticJob. Plugin of spring-webflux-5.x in the optional plugin folder. Please only activate this plugin when you use webflux alone as a web container. If you are using SpringMVC 5 or Spring Gateway, you don\u0026rsquo;t need this plugin. Plugin of mybatis-3.x in optional plugin folder. The reason of being optional plugin is, many local span are generated, which also spend more CPU, memory and network. Plugin of sentinel-1.x in the optional plugin folder. The reason for being an optional plugin is, the sentinel plugin generates a large number of local spans, which have a potential performance impact. Plugin of ehcache-2.x in the optional plugin folder. The reason for being an optional plugin is, this plugin enhanced cache framework, generates large number of local spans, which have a potential performance impact. Plugin of guava-cache in the optional plugin folder. The reason for being an optional plugin is, this plugin enhanced cache framework, generates large number of local spans, which have a potential performance impact. Plugin of fastjson serialization lib in optional plugin folder. Plugin of jackson serialization lib in optional plugin folder. Plugin of Apache ShenYu(incubating) Gateway 2.4.x in optional plugin folder. Please only activate this plugin when you install agent in Apache ShenYu Gateway. Plugin of trace sampler CPU policy in the optional plugin folder. Please only activate this plugin when you need to disable trace collecting when the agent process CPU usage is too high(over threshold). Plugin for Spring 6.x and RestTemplate 6.x are in the optional plugin folder. Spring 6 requires Java 17 but SkyWalking is still compatible with Java 8. So, we put it in the optional plugin folder. Plugin of nacos-client 2.x lib in optional plugin folder. The reason is many business irrelevant traces are generated, which cause extra payload to agents and backends, also spend more CPU, memory and network. Plugin of netty-http 4.1.x lib in optional plugin folder. The reason is some frameworks use Netty HTTP as kernel, which could double the unnecessary spans and create incorrect RPC relative metrics.  Optional Level 3 Plugins. Expired Plugins These plugins are not tested in the CI/CD pipeline, as the previous added tests are not able to run according to the latest CI/CD infrastructure limitations, lack of maintenance, or dependencies/images not available(e.g. removed from DockerHub).\nWarning, there is no guarantee of working and maintenance. The committer team may remove them from the agent package in the future without further notice.\n Plugin of Spring Impala 2.6.x was tested through parrot-stream released images. The images are not available since Mar. 2024. This plugin is expired due to lack of testing.  ","title":"Optional Plugins","url":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/optional-plugins/"},{"content":"Optional Plugins Java agent plugins are all pluggable. Optional plugins could be provided in optional-plugins folder under agent or 3rd party repositories. For using these plugins, you need to put the target plugin jar file into /plugins.\nNow, we have the following known optional plugins.\n Plugin of tracing Spring annotation beans Plugin of tracing Oracle and Resin Filter traces through specified endpoint name patterns Plugin of Gson serialization lib in optional plugin folder. Plugin of Zookeeper 3.4.x in optional plugin folder. The reason of being optional plugin is, many business irrelevant traces are generated, which cause extra payload to agents and backends. At the same time, those traces may be just heartbeat(s). Customize enhance Trace methods based on description files, rather than write plugin or change source codes. Plugin of Spring Cloud Gateway 2.x and 3.x in optional plugin folder. Please only activate this plugin when you install agent in Spring Gateway. Plugin of Spring Transaction in optional plugin folder. The reason of being optional plugin is, many local span are generated, which also spend more CPU, memory and network. Plugin of Kotlin coroutine provides the tracing across coroutines automatically. As it will add local spans to all across routines scenarios, Please assess the performance impact. Plugin of quartz-scheduler-2.x in the optional plugin folder. The reason for being an optional plugin is, many task scheduling systems are based on quartz-scheduler, this will cause duplicate tracing and link different sub-tasks as they share the same quartz level trigger, such as ElasticJob. Plugin of spring-webflux-5.x in the optional plugin folder. Please only activate this plugin when you use webflux alone as a web container. If you are using SpringMVC 5 or Spring Gateway, you don\u0026rsquo;t need this plugin. Plugin of mybatis-3.x in optional plugin folder. The reason of being optional plugin is, many local span are generated, which also spend more CPU, memory and network. Plugin of sentinel-1.x in the optional plugin folder. The reason for being an optional plugin is, the sentinel plugin generates a large number of local spans, which have a potential performance impact. Plugin of ehcache-2.x in the optional plugin folder. The reason for being an optional plugin is, this plugin enhanced cache framework, generates large number of local spans, which have a potential performance impact. Plugin of guava-cache in the optional plugin folder. The reason for being an optional plugin is, this plugin enhanced cache framework, generates large number of local spans, which have a potential performance impact. Plugin of fastjson serialization lib in optional plugin folder. Plugin of jackson serialization lib in optional plugin folder. Plugin of Apache ShenYu(incubating) Gateway 2.4.x in optional plugin folder. Please only activate this plugin when you install agent in Apache ShenYu Gateway. Plugin of trace sampler CPU policy in the optional plugin folder. Please only activate this plugin when you need to disable trace collecting when the agent process CPU usage is too high(over threshold). Plugin for Spring 6.x and RestTemplate 6.x are in the optional plugin folder. Spring 6 requires Java 17 but SkyWalking is still compatible with Java 8. So, we put it in the optional plugin folder. Plugin of nacos-client 2.x lib in optional plugin folder.The reason is many business irrelevant traces are generated, which cause extra payload to agents and backends, also spend more CPU, memory and network.  ","title":"Optional Plugins","url":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/optional-plugins/"},{"content":"Optional Plugins Java agent plugins are all pluggable. Optional plugins could be provided in optional-plugins folder under agent or 3rd party repositories. For using these plugins, you need to put the target plugin jar file into /plugins.\nNow, we have the following known optional plugins.\n Plugin of tracing Spring annotation beans Plugin of tracing Oracle and Resin Filter traces through specified endpoint name patterns Plugin of Gson serialization lib in optional plugin folder. Plugin of Zookeeper 3.4.x in optional plugin folder. The reason of being optional plugin is, many business irrelevant traces are generated, which cause extra payload to agents and backends. At the same time, those traces may be just heartbeat(s). Customize enhance Trace methods based on description files, rather than write plugin or change source codes. Plugin of Spring Cloud Gateway 2.x and 3.x in optional plugin folder. Please only activate this plugin when you install agent in Spring Gateway. Plugin of Spring Transaction in optional plugin folder. The reason of being optional plugin is, many local span are generated, which also spend more CPU, memory and network. Plugin of Kotlin coroutine provides the tracing across coroutines automatically. As it will add local spans to all across routines scenarios, Please assess the performance impact. Plugin of quartz-scheduler-2.x in the optional plugin folder. The reason for being an optional plugin is, many task scheduling systems are based on quartz-scheduler, this will cause duplicate tracing and link different sub-tasks as they share the same quartz level trigger, such as ElasticJob. Plugin of spring-webflux-5.x in the optional plugin folder. Please only activate this plugin when you use webflux alone as a web container. If you are using SpringMVC 5 or Spring Gateway, you don\u0026rsquo;t need this plugin. Plugin of mybatis-3.x in optional plugin folder. The reason of being optional plugin is, many local span are generated, which also spend more CPU, memory and network. Plugin of sentinel-1.x in the optional plugin folder. The reason for being an optional plugin is, the sentinel plugin generates a large number of local spans, which have a potential performance impact. Plugin of ehcache-2.x in the optional plugin folder. The reason for being an optional plugin is, this plugin enhanced cache framework, generates large number of local spans, which have a potential performance impact. Plugin of guava-cache in the optional plugin folder. The reason for being an optional plugin is, this plugin enhanced cache framework, generates large number of local spans, which have a potential performance impact. Plugin of fastjson serialization lib in optional plugin folder. Plugin of jackson serialization lib in optional plugin folder. Plugin of Apache ShenYu(incubating) Gateway 2.4.x in optional plugin folder. Please only activate this plugin when you install agent in Apache ShenYu Gateway. Plugin of trace sampler CPU policy in the optional plugin folder. Please only activate this plugin when you need to disable trace collecting when the agent process CPU usage is too high(over threshold). Plugin for Spring 6.x and RestTemplate 6.x are in the optional plugin folder. Spring 6 requires Java 17 but SkyWalking is still compatible with Java 8. So, we put it in the optional plugin folder. Plugin of nacos-client 2.x lib in optional plugin folder. The reason is many business irrelevant traces are generated, which cause extra payload to agents and backends, also spend more CPU, memory and network. Plugin of netty-http 4.1.x lib in optional plugin folder. The reason is some frameworks use Netty HTTP as kernel, which could double the unnecessary spans and create incorrect RPC relative metrics.  ","title":"Optional Plugins","url":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/optional-plugins/"},{"content":"Optional Plugins Java agent plugins are all pluggable. Optional plugins could be provided in optional-plugins and expired-plugins folder under agent or 3rd party repositories. For using these plugins, you need to put the target plugin jar file into /plugins.\nNow, we have the following known 2 kinds of optional plugins.\nOptional Level 2 Plugins These plugins affect the performance or must be used under some conditions, from experiences. So only released in /optional-plugins or /bootstrap-plugins, copy to /plugins in order to make them work.\n Plugin of tracing Spring annotation beans Plugin of tracing Oracle and Resin Filter traces through specified endpoint name patterns Plugin of Gson serialization lib in optional plugin folder. Plugin of Zookeeper 3.4.x in optional plugin folder. The reason of being optional plugin is, many business irrelevant traces are generated, which cause extra payload to agents and backends. At the same time, those traces may be just heartbeat(s). Customize enhance Trace methods based on description files, rather than write plugin or change source codes. Plugin of Spring Cloud Gateway 2.x and 3.x and 4.x in optional plugin folder. Please only activate this plugin when you install agent in Spring Gateway. Plugin of Spring Transaction in optional plugin folder. The reason of being optional plugin is, many local span are generated, which also spend more CPU, memory and network. Plugin of Kotlin coroutine provides the tracing across coroutines automatically. As it will add local spans to all across routines scenarios, Please assess the performance impact. Plugin of quartz-scheduler-2.x in the optional plugin folder. The reason for being an optional plugin is, many task scheduling systems are based on quartz-scheduler, this will cause duplicate tracing and link different sub-tasks as they share the same quartz level trigger, such as ElasticJob. Plugin of spring-webflux-5.x in the optional plugin folder. Please only activate this plugin when you use webflux alone as a web container. If you are using SpringMVC 5 or Spring Gateway, you don\u0026rsquo;t need this plugin. Plugin of mybatis-3.x in optional plugin folder. The reason of being optional plugin is, many local span are generated, which also spend more CPU, memory and network. Plugin of sentinel-1.x in the optional plugin folder. The reason for being an optional plugin is, the sentinel plugin generates a large number of local spans, which have a potential performance impact. Plugin of ehcache-2.x in the optional plugin folder. The reason for being an optional plugin is, this plugin enhanced cache framework, generates large number of local spans, which have a potential performance impact. Plugin of guava-cache in the optional plugin folder. The reason for being an optional plugin is, this plugin enhanced cache framework, generates large number of local spans, which have a potential performance impact. Plugin of fastjson serialization lib in optional plugin folder. Plugin of jackson serialization lib in optional plugin folder. Plugin of Apache ShenYu(incubating) Gateway 2.4.x in optional plugin folder. Please only activate this plugin when you install agent in Apache ShenYu Gateway. Plugin of trace sampler CPU policy in the optional plugin folder. Please only activate this plugin when you need to disable trace collecting when the agent process CPU usage is too high(over threshold). Plugin for Spring 6.x and RestTemplate 6.x are in the optional plugin folder. Spring 6 requires Java 17 but SkyWalking is still compatible with Java 8. So, we put it in the optional plugin folder. Plugin of nacos-client 2.x lib in optional plugin folder. The reason is many business irrelevant traces are generated, which cause extra payload to agents and backends, also spend more CPU, memory and network. Plugin of netty-http 4.1.x lib in optional plugin folder. The reason is some frameworks use Netty HTTP as kernel, which could double the unnecessary spans and create incorrect RPC relative metrics.  Optional Level 3 Plugins. Expired Plugins These plugins are not tested in the CI/CD pipeline, as the previous added tests are not able to run according to the latest CI/CD infrastructure limitations, lack of maintenance, or dependencies/images not available(e.g. removed from DockerHub).\nWarning, there is no guarantee of working and maintenance. The committer team may remove them from the agent package in the future without further notice.\n Plugin of Spring Impala 2.6.x was tested through parrot-stream released images. The images are not available since Mar. 2024. This plugin is expired due to lack of testing.  ","title":"Optional Plugins","url":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/optional-plugins/"},{"content":"Oracle and Resin plugins These plugins can\u0026rsquo;t be provided in Apache release because of Oracle and Resin Licenses. If you want to know details, please read Apache license legal document\nDue to license incompatibilities/restrictions these plugins are hosted and released in 3rd part repository, go to OpenSkywalking java plugin extension repository to get these.\n","title":"Oracle and Resin plugins","url":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/agent-optional-plugins/oracle-resin-plugins/"},{"content":"Oracle and Resin plugins These plugins can\u0026rsquo;t be provided in Apache release because of Oracle and Resin Licenses. If you want to know details, please read Apache license legal document\nDue to license incompatibilities/restrictions these plugins are hosted and released in 3rd part repository, go to OpenSkywalking java plugin extension repository to get these.\n","title":"Oracle and Resin plugins","url":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/agent-optional-plugins/oracle-resin-plugins/"},{"content":"Oracle and Resin plugins These plugins can\u0026rsquo;t be provided in Apache release because of Oracle and Resin Licenses. If you want to know details, please read Apache license legal document\nDue to license incompatibilities/restrictions these plugins are hosted and released in 3rd part repository, go to OpenSkywalking java plugin extension repository to get these.\n","title":"Oracle and Resin plugins","url":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/agent-optional-plugins/oracle-resin-plugins/"},{"content":"Oracle and Resin plugins These plugins can\u0026rsquo;t be provided in Apache release because of Oracle and Resin Licenses. If you want to know details, please read Apache license legal document\nDue to license incompatibilities/restrictions these plugins are hosted and released in 3rd part repository, go to OpenSkywalking java plugin extension repository to get these.\n","title":"Oracle and Resin plugins","url":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/agent-optional-plugins/oracle-resin-plugins/"},{"content":"Oracle and Resin plugins These plugins can\u0026rsquo;t be provided in Apache release because of Oracle and Resin Licenses. If you want to know details, please read Apache license legal document\nDue to license incompatibilities/restrictions these plugins are hosted and released in 3rd part repository, go to OpenSkywalking java plugin extension repository to get these.\n","title":"Oracle and Resin plugins","url":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/agent-optional-plugins/oracle-resin-plugins/"},{"content":"Overview SkyWalking is an open source observability platform used to collect, analyze, aggregate and visualize data from services and cloud native infrastructures. SkyWalking provides an easy way to maintain a clear view of your distributed systems, even across Clouds. It is a modern APM, specially designed for cloud native, container based distributed systems.\nWhy use SkyWalking? SkyWalking provides solutions for observing and monitoring distributed systems, in many different scenarios. First of all, like traditional approaches, SkyWalking provides auto instrument agents for services, such as Java, C#, Node.js, Go, PHP and Nginx LUA. (with calls out for Python and C++ SDK contributions). In multi-language, continuously deployed environments, cloud native infrastructures grow more powerful but also more complex. SkyWalking\u0026rsquo;s service mesh receiver allows SkyWalking to receive telemetry data from service mesh frameworks such as Istio/Envoy and Linkerd, allowing users to understand the entire distributed system.\nSkyWalking provides observability capabilities for service(s), service instance(s), endpoint(s), process(s). The terms Service, Instance and Endpoint are used everywhere today, so it is worth defining their specific meanings in the context of SkyWalking:\n Service. Represents a set/group of workloads which provide the same behaviours for incoming requests. You can define the service name when you are using instrument agents or SDKs. SkyWalking can also use the name you define in platforms such as Istio. Service Instance. Each individual workload in the Service group is known as an instance. Like pods in Kubernetes, it doesn\u0026rsquo;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process. Endpoint. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature. Process. An operating system process. In some scenarios, a Service Instance is not a process, such as a pod Kubernetes could contain multiple processes.  SkyWalking allows users to understand the topology relationship between Services and Endpoints, to view the metrics of every Service/Service Instance/Endpoint and to set alarm rules.\nStarting from v9, SkyWalking introduces the new core concept Layer. A layer represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), Kubernetes(k8s layer). All detected instances belong to a layer to represent the running environment of this instance, the service would have one or multiple layer definitions according to its instances.\nIn addition, you can integrate\n Other distributed tracing using Zipkin. Other metrics systems, such as Prometheus, Sleuth(Micrometer), OpenTelemetry, Telegraf.  Architecture SkyWalking is logically split into four parts: Probes, Platform backend, Storage and UI.\n Probes collect telemetry data, including metrics, traces, logs and events in various formats(SkyWalking, Zipkin, OpenTelemetry, Prometheus, Zabbix, etc.) Platform backend supports data aggregation, analysis and streaming process covers traces, metrics, logs and events. Work as Aggregator Role, Receiver Role or both. Storage houses SkyWalking data through an open/plugable interface. You can choose an existing implementation, such as ElasticSearch, H2, MySQL, TiDB, BanyanDB, or implement your own. UI is a highly customizable web based interface allowing SkyWalking end users to visualize and manage SkyWalking data.  What is next?  Learn SkyWalking\u0026rsquo;s Project Goals FAQ, Why doesn\u0026rsquo;t SkyWalking involve MQ in the architecture in default?  ","title":"Overview","url":"/docs/main/latest/en/concepts-and-designs/overview/"},{"content":"Overview SkyWalking is an open source observability platform used to collect, analyze, aggregate and visualize data from services and cloud native infrastructures. SkyWalking provides an easy way to maintain a clear view of your distributed systems, even across Clouds. It is a modern APM, specially designed for cloud native, container based distributed systems.\nSkyWalking covers all the observability needs in Cloud Native world, including:\n Tracing. SkyWalking native data formats, and Zipkin traces of v1 and v2 formats are supported. Metrics. SkyWalking supports mature metrics formats, including native meter format, OTEL metrics format, and Telegraf format. SkyWalking integrates with Service Mesh platforms, typically Istio and Envoy, to build observability into the data plane or control plane. Also, SkyWalking native agents can run in the metrics mode, which greatly improves performances. Logging. Includes logs collected from disk or through network. Native agents could bind the tracing context with logs automatically, or use SkyWalking to bind the trace and log through the text content. Profiling. Profiling is a powerful tool to help developers understand the performance of their applications from lines of codes perspective. SkyWalking provides profiling feature bundled in native language agents and independent ebpf agents. Event. Event is a special kind of data, which is used to record the important moments in the system, such as version upgrade, configuration change, etc. Linking the events with metrics could help on explain the peaks or valleys in the metrics, and linking the events with traces and logs could help on troubleshooting root cause.  Why use SkyWalking? SkyWalking provides solutions for observing and monitoring distributed systems, in many different scenarios. First of all, like traditional approaches, SkyWalking provides auto instrument agents for services, such as Java, C#, Node.js, Go, PHP and Python, and manually SDKs for C++, Rust, and Nginx LUA. In multi-language, continuously deployed environments, cloud native infrastructures grow more powerful but also more complex. SkyWalking\u0026rsquo;s service mesh receiver allows SkyWalking to receive telemetry data from service mesh frameworks such as Istio/Envoy, allowing users to understand the entire distributed system. Powered by eBPF stack, SkyWalking provides k8s monitoring. Also, by adopting OpenTelemetry, Telegraf, Zabbix, Zipkin, Prometheus, SkyWalking can integrate with other distributed tracing, metrics and logging systems and build a unified APM system to host all data.\nBesides the support of various kinds of telemetry formats, the hierarchy structure of objects in SkyWalking is defined as service(s), service instance(s), endpoint(s), process(s). The terms Service, Instance and Endpoint are used everywhere today, so it is worth defining their specific meanings in the context of SkyWalking:\n Layer. A layer represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), and Kubernetes(k8s layer). A layer is an abstract collection of services. A service typically only belongs to one layer, but in some scenarios, a service could belong to multiple layers. For example, a service could be deployed in an Istio service mesh, it could belong to mesh and mesh-dp(mesh data plane) layer. Service. Represents a set/group of workloads which provide the same behaviours for incoming requests. You can define the service name when you are using instrument agents or SDKs. SkyWalking can also use the name you define in platforms such as Istio. Service Instance. Each individual workload in the Service group is known as an instance. Like pods in Kubernetes, it doesn\u0026rsquo;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process. Endpoint. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature. Process. An operating system process. In some scenarios, a Service Instance is not a process, such as a pod Kubernetes could contain multiple processes.  SkyWalking allows users to understand the topology relationship between Services and Endpoints, also detect API dependencies in the distributed environment if you use our native agents.,\nBesides topology map, SkyWalking provides Service Hierarchy Relationship , which defines the relationships of existing logically same services in various layers. For example, a service could be deployed in a Kubernetes cluster with Istio mesh, services are detected by k8s monitoring and Istio mesh, this hierarchy relationship could connect the services in k8s layer and mesh layer.\nArchitecture SkyWalking is logically split into four parts: Probes, Platform backend, Storage and UI.\n Probes collect telemetry data, including metrics, traces, logs and events in various formats(SkyWalking, Zipkin, OpenTelemetry, Prometheus, Zabbix, etc.) Platform backend supports data aggregation, analysis and streaming process covers traces, metrics, logs and events. Work as Aggregator Role, Receiver Role or both. Storage houses SkyWalking data through an open/plugable interface. You can choose an existing implementation, such as ElasticSearch, H2, MySQL, TiDB, BanyanDB, or implement your own. UI is a highly customizable web based interface allowing SkyWalking end users to visualize and manage SkyWalking data.  What is next?  Learn SkyWalking\u0026rsquo;s Project Goals FAQ, Why doesn\u0026rsquo;t SkyWalking involve MQ in the architecture in default?  ","title":"Overview","url":"/docs/main/next/en/concepts-and-designs/overview/"},{"content":"Overview SkyWalking is an open source observability platform used to collect, analyze, aggregate and visualize data from services and cloud native infrastructures. SkyWalking provides an easy way to maintain a clear view of your distributed systems, even across Clouds. It is a modern APM, specially designed for cloud native, container based distributed systems.\nWhy use SkyWalking? SkyWalking provides solutions for observing and monitoring distributed systems, in many different scenarios. First of all, like traditional approaches, SkyWalking provides auto instrument agents for services, such as Java, C#, Node.js, Go, PHP and Nginx LUA. (with calls out for Python and C++ SDK contributions). In multi-language, continuously deployed environments, cloud native infrastructures grow more powerful but also more complex. SkyWalking\u0026rsquo;s service mesh receiver allows SkyWalking to receive telemetry data from service mesh frameworks such as Istio/Envoy and Linkerd, allowing users to understand the entire distributed system.\nSkyWalking provides observability capabilities for service(s), service instance(s), endpoint(s), process(s). The terms Service, Instance and Endpoint are used everywhere today, so it is worth defining their specific meanings in the context of SkyWalking:\n Service. Represents a set/group of workloads which provide the same behaviours for incoming requests. You can define the service name when you are using instrument agents or SDKs. SkyWalking can also use the name you define in platforms such as Istio. Service Instance. Each individual workload in the Service group is known as an instance. Like pods in Kubernetes, it doesn\u0026rsquo;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process. Endpoint. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature. Process. An operating system process. In some scenarios, a Service Instance is not a process, such as a pod Kubernetes could contain multiple processes.  SkyWalking allows users to understand the topology relationship between Services and Endpoints, to view the metrics of every Service/Service Instance/Endpoint and to set alarm rules.\nStarting from v9, SkyWalking introduces the new core concept Layer. A layer represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), Kubernetes(k8s layer). All detected instances belong to a layer to represent the running environment of this instance, the service would have one or multiple layer definitions according to its instances.\nIn addition, you can integrate\n Other distributed tracing using SkyWalking native agents and SDKs with Zipkin, Jaeger and OpenCensus. Other metrics systems, such as Prometheus, Sleuth(Micrometer), OpenTelemetry.  Architecture SkyWalking is logically split into four parts: Probes, Platform backend, Storage and UI.\n Probes collect data and reformat them for SkyWalking requirements (different probes support different sources). Platform backend supports data aggregation, analysis and streaming process covers traces, metrics, and logs. Storage houses SkyWalking data through an open/plugable interface. You can choose an existing implementation, such as ElasticSearch, H2, MySQL, TiDB, InfluxDB, or implement your own. Patches for new storage implementors welcome! UI is a highly customizable web based interface allowing SkyWalking end users to visualize and manage SkyWalking data.  What is next?  Learn SkyWalking\u0026rsquo;s Project Goals FAQ, Why doesn\u0026rsquo;t SkyWalking involve MQ in the architecture in default?  ","title":"Overview","url":"/docs/main/v9.0.0/en/concepts-and-designs/overview/"},{"content":"Overview SkyWalking is an open source observability platform used to collect, analyze, aggregate and visualize data from services and cloud native infrastructures. SkyWalking provides an easy way to maintain a clear view of your distributed systems, even across Clouds. It is a modern APM, specially designed for cloud native, container based distributed systems.\nWhy use SkyWalking? SkyWalking provides solutions for observing and monitoring distributed systems, in many different scenarios. First of all, like traditional approaches, SkyWalking provides auto instrument agents for services, such as Java, C#, Node.js, Go, PHP and Nginx LUA. (with calls out for Python and C++ SDK contributions). In multi-language, continuously deployed environments, cloud native infrastructures grow more powerful but also more complex. SkyWalking\u0026rsquo;s service mesh receiver allows SkyWalking to receive telemetry data from service mesh frameworks such as Istio/Envoy and Linkerd, allowing users to understand the entire distributed system.\nSkyWalking provides observability capabilities for service(s), service instance(s), endpoint(s), process(s). The terms Service, Instance and Endpoint are used everywhere today, so it is worth defining their specific meanings in the context of SkyWalking:\n Service. Represents a set/group of workloads which provide the same behaviours for incoming requests. You can define the service name when you are using instrument agents or SDKs. SkyWalking can also use the name you define in platforms such as Istio. Service Instance. Each individual workload in the Service group is known as an instance. Like pods in Kubernetes, it doesn\u0026rsquo;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process. Endpoint. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature. Process. An operating system process. In some scenarios, a Service Instance is not a process, such as a pod Kubernetes could contain multiple processes.  SkyWalking allows users to understand the topology relationship between Services and Endpoints, to view the metrics of every Service/Service Instance/Endpoint and to set alarm rules.\nStarting from v9, SkyWalking introduces the new core concept Layer. A layer represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), Kubernetes(k8s layer). All detected instances belong to a layer to represent the running environment of this instance, the service would have one or multiple layer definitions according to its instances.\nIn addition, you can integrate\n Other distributed tracing using SkyWalking native agents and SDKs with Zipkin, Jaeger and OpenCensus. Other metrics systems, such as Prometheus, Sleuth(Micrometer), OpenTelemetry.  Architecture SkyWalking is logically split into four parts: Probes, Platform backend, Storage and UI.\n Probes collect data and reformat them for SkyWalking requirements (different probes support different sources). Platform backend supports data aggregation, analysis and streaming process covers traces, metrics, and logs. Storage houses SkyWalking data through an open/plugable interface. You can choose an existing implementation, such as ElasticSearch, H2, MySQL, TiDB, InfluxDB, or implement your own. Patches for new storage implementors welcome! UI is a highly customizable web based interface allowing SkyWalking end users to visualize and manage SkyWalking data.  What is next?  Learn SkyWalking\u0026rsquo;s Project Goals FAQ, Why doesn\u0026rsquo;t SkyWalking involve MQ in the architecture in default?  ","title":"Overview","url":"/docs/main/v9.1.0/en/concepts-and-designs/overview/"},{"content":"Overview SkyWalking is an open source observability platform used to collect, analyze, aggregate and visualize data from services and cloud native infrastructures. SkyWalking provides an easy way to maintain a clear view of your distributed systems, even across Clouds. It is a modern APM, specially designed for cloud native, container based distributed systems.\nWhy use SkyWalking? SkyWalking provides solutions for observing and monitoring distributed systems, in many different scenarios. First of all, like traditional approaches, SkyWalking provides auto instrument agents for services, such as Java, C#, Node.js, Go, PHP and Nginx LUA. (with calls out for Python and C++ SDK contributions). In multi-language, continuously deployed environments, cloud native infrastructures grow more powerful but also more complex. SkyWalking\u0026rsquo;s service mesh receiver allows SkyWalking to receive telemetry data from service mesh frameworks such as Istio/Envoy and Linkerd, allowing users to understand the entire distributed system.\nSkyWalking provides observability capabilities for service(s), service instance(s), endpoint(s), process(s). The terms Service, Instance and Endpoint are used everywhere today, so it is worth defining their specific meanings in the context of SkyWalking:\n Service. Represents a set/group of workloads which provide the same behaviours for incoming requests. You can define the service name when you are using instrument agents or SDKs. SkyWalking can also use the name you define in platforms such as Istio. Service Instance. Each individual workload in the Service group is known as an instance. Like pods in Kubernetes, it doesn\u0026rsquo;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process. Endpoint. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature. Process. An operating system process. In some scenarios, a Service Instance is not a process, such as a pod Kubernetes could contain multiple processes.  SkyWalking allows users to understand the topology relationship between Services and Endpoints, to view the metrics of every Service/Service Instance/Endpoint and to set alarm rules.\nStarting from v9, SkyWalking introduces the new core concept Layer. A layer represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), Kubernetes(k8s layer). All detected instances belong to a layer to represent the running environment of this instance, the service would have one or multiple layer definitions according to its instances.\nIn addition, you can integrate\n Other distributed tracing using SkyWalking native agents and SDKs with Zipkin, Jaeger and OpenCensus. Other metrics systems, such as Prometheus, Sleuth(Micrometer), OpenTelemetry.  Architecture SkyWalking is logically split into four parts: Probes, Platform backend, Storage and UI.\n Probes collect telemetry data, including metrics, traces, logs and events in various formats(SkyWalking, Zipkin, OpenTelemetry, Prometheus, Zabbix, etc.) Platform backend supports data aggregation, analysis and streaming process covers traces, metrics, logs and events. Work as Aggregator Role, Receiver Role or both. Storage houses SkyWalking data through an open/plugable interface. You can choose an existing implementation, such as ElasticSearch, H2, MySQL, TiDB, BanyanDB, or implement your own. UI is a highly customizable web based interface allowing SkyWalking end users to visualize and manage SkyWalking data.  What is next?  Learn SkyWalking\u0026rsquo;s Project Goals FAQ, Why doesn\u0026rsquo;t SkyWalking involve MQ in the architecture in default?  ","title":"Overview","url":"/docs/main/v9.2.0/en/concepts-and-designs/overview/"},{"content":"Overview SkyWalking is an open source observability platform used to collect, analyze, aggregate and visualize data from services and cloud native infrastructures. SkyWalking provides an easy way to maintain a clear view of your distributed systems, even across Clouds. It is a modern APM, specially designed for cloud native, container based distributed systems.\nWhy use SkyWalking? SkyWalking provides solutions for observing and monitoring distributed systems, in many different scenarios. First of all, like traditional approaches, SkyWalking provides auto instrument agents for services, such as Java, C#, Node.js, Go, PHP and Nginx LUA. (with calls out for Python and C++ SDK contributions). In multi-language, continuously deployed environments, cloud native infrastructures grow more powerful but also more complex. SkyWalking\u0026rsquo;s service mesh receiver allows SkyWalking to receive telemetry data from service mesh frameworks such as Istio/Envoy and Linkerd, allowing users to understand the entire distributed system.\nSkyWalking provides observability capabilities for service(s), service instance(s), endpoint(s), process(s). The terms Service, Instance and Endpoint are used everywhere today, so it is worth defining their specific meanings in the context of SkyWalking:\n Service. Represents a set/group of workloads which provide the same behaviours for incoming requests. You can define the service name when you are using instrument agents or SDKs. SkyWalking can also use the name you define in platforms such as Istio. Service Instance. Each individual workload in the Service group is known as an instance. Like pods in Kubernetes, it doesn\u0026rsquo;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process. Endpoint. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature. Process. An operating system process. In some scenarios, a Service Instance is not a process, such as a pod Kubernetes could contain multiple processes.  SkyWalking allows users to understand the topology relationship between Services and Endpoints, to view the metrics of every Service/Service Instance/Endpoint and to set alarm rules.\nStarting from v9, SkyWalking introduces the new core concept Layer. A layer represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), Kubernetes(k8s layer). All detected instances belong to a layer to represent the running environment of this instance, the service would have one or multiple layer definitions according to its instances.\nIn addition, you can integrate\n Other distributed tracing using SkyWalking native agents and SDKs with Zipkin, Jaeger and OpenCensus. Other metrics systems, such as Prometheus, Sleuth(Micrometer), OpenTelemetry.  Architecture SkyWalking is logically split into four parts: Probes, Platform backend, Storage and UI.\n Probes collect telemetry data, including metrics, traces, logs and events in various formats(SkyWalking, Zipkin, OpenTelemetry, Prometheus, Zabbix, etc.) Platform backend supports data aggregation, analysis and streaming process covers traces, metrics, logs and events. Work as Aggregator Role, Receiver Role or both. Storage houses SkyWalking data through an open/plugable interface. You can choose an existing implementation, such as ElasticSearch, H2, MySQL, TiDB, BanyanDB, or implement your own. UI is a highly customizable web based interface allowing SkyWalking end users to visualize and manage SkyWalking data.  What is next?  Learn SkyWalking\u0026rsquo;s Project Goals FAQ, Why doesn\u0026rsquo;t SkyWalking involve MQ in the architecture in default?  ","title":"Overview","url":"/docs/main/v9.3.0/en/concepts-and-designs/overview/"},{"content":"Overview SkyWalking is an open source observability platform used to collect, analyze, aggregate and visualize data from services and cloud native infrastructures. SkyWalking provides an easy way to maintain a clear view of your distributed systems, even across Clouds. It is a modern APM, specially designed for cloud native, container based distributed systems.\nWhy use SkyWalking? SkyWalking provides solutions for observing and monitoring distributed systems, in many different scenarios. First of all, like traditional approaches, SkyWalking provides auto instrument agents for services, such as Java, C#, Node.js, Go, PHP and Nginx LUA. (with calls out for Python and C++ SDK contributions). In multi-language, continuously deployed environments, cloud native infrastructures grow more powerful but also more complex. SkyWalking\u0026rsquo;s service mesh receiver allows SkyWalking to receive telemetry data from service mesh frameworks such as Istio/Envoy and Linkerd, allowing users to understand the entire distributed system.\nSkyWalking provides observability capabilities for service(s), service instance(s), endpoint(s), process(s). The terms Service, Instance and Endpoint are used everywhere today, so it is worth defining their specific meanings in the context of SkyWalking:\n Service. Represents a set/group of workloads which provide the same behaviours for incoming requests. You can define the service name when you are using instrument agents or SDKs. SkyWalking can also use the name you define in platforms such as Istio. Service Instance. Each individual workload in the Service group is known as an instance. Like pods in Kubernetes, it doesn\u0026rsquo;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process. Endpoint. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature. Process. An operating system process. In some scenarios, a Service Instance is not a process, such as a pod Kubernetes could contain multiple processes.  SkyWalking allows users to understand the topology relationship between Services and Endpoints, to view the metrics of every Service/Service Instance/Endpoint and to set alarm rules.\nStarting from v9, SkyWalking introduces the new core concept Layer. A layer represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), Kubernetes(k8s layer). All detected instances belong to a layer to represent the running environment of this instance, the service would have one or multiple layer definitions according to its instances.\nIn addition, you can integrate\n Other distributed tracing using SkyWalking native agents and SDKs with Zipkin, Jaeger and OpenCensus. Other metrics systems, such as Prometheus, Sleuth(Micrometer), OpenTelemetry.  Architecture SkyWalking is logically split into four parts: Probes, Platform backend, Storage and UI.\n Probes collect telemetry data, including metrics, traces, logs and events in various formats(SkyWalking, Zipkin, OpenTelemetry, Prometheus, Zabbix, etc.) Platform backend supports data aggregation, analysis and streaming process covers traces, metrics, logs and events. Work as Aggregator Role, Receiver Role or both. Storage houses SkyWalking data through an open/plugable interface. You can choose an existing implementation, such as ElasticSearch, H2, MySQL, TiDB, BanyanDB, or implement your own. UI is a highly customizable web based interface allowing SkyWalking end users to visualize and manage SkyWalking data.  What is next?  Learn SkyWalking\u0026rsquo;s Project Goals FAQ, Why doesn\u0026rsquo;t SkyWalking involve MQ in the architecture in default?  ","title":"Overview","url":"/docs/main/v9.4.0/en/concepts-and-designs/overview/"},{"content":"Overview SkyWalking is an open source observability platform used to collect, analyze, aggregate and visualize data from services and cloud native infrastructures. SkyWalking provides an easy way to maintain a clear view of your distributed systems, even across Clouds. It is a modern APM, specially designed for cloud native, container based distributed systems.\nWhy use SkyWalking? SkyWalking provides solutions for observing and monitoring distributed systems, in many different scenarios. First of all, like traditional approaches, SkyWalking provides auto instrument agents for services, such as Java, C#, Node.js, Go, PHP and Nginx LUA. (with calls out for Python and C++ SDK contributions). In multi-language, continuously deployed environments, cloud native infrastructures grow more powerful but also more complex. SkyWalking\u0026rsquo;s service mesh receiver allows SkyWalking to receive telemetry data from service mesh frameworks such as Istio/Envoy and Linkerd, allowing users to understand the entire distributed system.\nSkyWalking provides observability capabilities for service(s), service instance(s), endpoint(s), process(s). The terms Service, Instance and Endpoint are used everywhere today, so it is worth defining their specific meanings in the context of SkyWalking:\n Service. Represents a set/group of workloads which provide the same behaviours for incoming requests. You can define the service name when you are using instrument agents or SDKs. SkyWalking can also use the name you define in platforms such as Istio. Service Instance. Each individual workload in the Service group is known as an instance. Like pods in Kubernetes, it doesn\u0026rsquo;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process. Endpoint. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature. Process. An operating system process. In some scenarios, a Service Instance is not a process, such as a pod Kubernetes could contain multiple processes.  SkyWalking allows users to understand the topology relationship between Services and Endpoints, to view the metrics of every Service/Service Instance/Endpoint and to set alarm rules.\nStarting from v9, SkyWalking introduces the new core concept Layer. A layer represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), Kubernetes(k8s layer). All detected instances belong to a layer to represent the running environment of this instance, the service would have one or multiple layer definitions according to its instances.\nIn addition, you can integrate\n Other distributed tracing using SkyWalking native agents and SDKs with Zipkin and Jaeger. Other metrics systems, such as Prometheus, Sleuth(Micrometer), OpenTelemetry.  Architecture SkyWalking is logically split into four parts: Probes, Platform backend, Storage and UI.\n Probes collect telemetry data, including metrics, traces, logs and events in various formats(SkyWalking, Zipkin, OpenTelemetry, Prometheus, Zabbix, etc.) Platform backend supports data aggregation, analysis and streaming process covers traces, metrics, logs and events. Work as Aggregator Role, Receiver Role or both. Storage houses SkyWalking data through an open/plugable interface. You can choose an existing implementation, such as ElasticSearch, H2, MySQL, TiDB, BanyanDB, or implement your own. UI is a highly customizable web based interface allowing SkyWalking end users to visualize and manage SkyWalking data.  What is next?  Learn SkyWalking\u0026rsquo;s Project Goals FAQ, Why doesn\u0026rsquo;t SkyWalking involve MQ in the architecture in default?  ","title":"Overview","url":"/docs/main/v9.5.0/en/concepts-and-designs/overview/"},{"content":"Overview SkyWalking is an open source observability platform used to collect, analyze, aggregate and visualize data from services and cloud native infrastructures. SkyWalking provides an easy way to maintain a clear view of your distributed systems, even across Clouds. It is a modern APM, specially designed for cloud native, container based distributed systems.\nWhy use SkyWalking? SkyWalking provides solutions for observing and monitoring distributed systems, in many different scenarios. First of all, like traditional approaches, SkyWalking provides auto instrument agents for services, such as Java, C#, Node.js, Go, PHP and Nginx LUA. (with calls out for Python and C++ SDK contributions). In multi-language, continuously deployed environments, cloud native infrastructures grow more powerful but also more complex. SkyWalking\u0026rsquo;s service mesh receiver allows SkyWalking to receive telemetry data from service mesh frameworks such as Istio/Envoy and Linkerd, allowing users to understand the entire distributed system.\nSkyWalking provides observability capabilities for service(s), service instance(s), endpoint(s), process(s). The terms Service, Instance and Endpoint are used everywhere today, so it is worth defining their specific meanings in the context of SkyWalking:\n Service. Represents a set/group of workloads which provide the same behaviours for incoming requests. You can define the service name when you are using instrument agents or SDKs. SkyWalking can also use the name you define in platforms such as Istio. Service Instance. Each individual workload in the Service group is known as an instance. Like pods in Kubernetes, it doesn\u0026rsquo;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process. Endpoint. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature. Process. An operating system process. In some scenarios, a Service Instance is not a process, such as a pod Kubernetes could contain multiple processes.  SkyWalking allows users to understand the topology relationship between Services and Endpoints, to view the metrics of every Service/Service Instance/Endpoint and to set alarm rules.\nStarting from v9, SkyWalking introduces the new core concept Layer. A layer represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), Kubernetes(k8s layer). All detected instances belong to a layer to represent the running environment of this instance, the service would have one or multiple layer definitions according to its instances.\nIn addition, you can integrate\n Other distributed tracing using SkyWalking native agents and SDKs with Zipkin and Jaeger. Other metrics systems, such as Prometheus, Sleuth(Micrometer), OpenTelemetry.  Architecture SkyWalking is logically split into four parts: Probes, Platform backend, Storage and UI.\n Probes collect telemetry data, including metrics, traces, logs and events in various formats(SkyWalking, Zipkin, OpenTelemetry, Prometheus, Zabbix, etc.) Platform backend supports data aggregation, analysis and streaming process covers traces, metrics, logs and events. Work as Aggregator Role, Receiver Role or both. Storage houses SkyWalking data through an open/plugable interface. You can choose an existing implementation, such as ElasticSearch, H2, MySQL, TiDB, BanyanDB, or implement your own. UI is a highly customizable web based interface allowing SkyWalking end users to visualize and manage SkyWalking data.  What is next?  Learn SkyWalking\u0026rsquo;s Project Goals FAQ, Why doesn\u0026rsquo;t SkyWalking involve MQ in the architecture in default?  ","title":"Overview","url":"/docs/main/v9.6.0/en/concepts-and-designs/overview/"},{"content":"Overview SkyWalking is an open source observability platform used to collect, analyze, aggregate and visualize data from services and cloud native infrastructures. SkyWalking provides an easy way to maintain a clear view of your distributed systems, even across Clouds. It is a modern APM, specially designed for cloud native, container based distributed systems.\nWhy use SkyWalking? SkyWalking provides solutions for observing and monitoring distributed systems, in many different scenarios. First of all, like traditional approaches, SkyWalking provides auto instrument agents for services, such as Java, C#, Node.js, Go, PHP and Nginx LUA. (with calls out for Python and C++ SDK contributions). In multi-language, continuously deployed environments, cloud native infrastructures grow more powerful but also more complex. SkyWalking\u0026rsquo;s service mesh receiver allows SkyWalking to receive telemetry data from service mesh frameworks such as Istio/Envoy and Linkerd, allowing users to understand the entire distributed system.\nSkyWalking provides observability capabilities for service(s), service instance(s), endpoint(s), process(s). The terms Service, Instance and Endpoint are used everywhere today, so it is worth defining their specific meanings in the context of SkyWalking:\n Service. Represents a set/group of workloads which provide the same behaviours for incoming requests. You can define the service name when you are using instrument agents or SDKs. SkyWalking can also use the name you define in platforms such as Istio. Service Instance. Each individual workload in the Service group is known as an instance. Like pods in Kubernetes, it doesn\u0026rsquo;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process. Endpoint. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature. Process. An operating system process. In some scenarios, a Service Instance is not a process, such as a pod Kubernetes could contain multiple processes.  SkyWalking allows users to understand the topology relationship between Services and Endpoints, to view the metrics of every Service/Service Instance/Endpoint and to set alarm rules.\nStarting from v9, SkyWalking introduces the new core concept Layer. A layer represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), Kubernetes(k8s layer). All detected instances belong to a layer to represent the running environment of this instance, the service would have one or multiple layer definitions according to its instances.\nIn addition, you can integrate\n Other distributed tracing using Zipkin. Other metrics systems, such as Prometheus, Sleuth(Micrometer), OpenTelemetry, Telegraf.  Architecture SkyWalking is logically split into four parts: Probes, Platform backend, Storage and UI.\n Probes collect telemetry data, including metrics, traces, logs and events in various formats(SkyWalking, Zipkin, OpenTelemetry, Prometheus, Zabbix, etc.) Platform backend supports data aggregation, analysis and streaming process covers traces, metrics, logs and events. Work as Aggregator Role, Receiver Role or both. Storage houses SkyWalking data through an open/plugable interface. You can choose an existing implementation, such as ElasticSearch, H2, MySQL, TiDB, BanyanDB, or implement your own. UI is a highly customizable web based interface allowing SkyWalking end users to visualize and manage SkyWalking data.  What is next?  Learn SkyWalking\u0026rsquo;s Project Goals FAQ, Why doesn\u0026rsquo;t SkyWalking involve MQ in the architecture in default?  ","title":"Overview","url":"/docs/main/v9.7.0/en/concepts-and-designs/overview/"},{"content":"Overview SkyWalking Rover is an open-source collector, which provides a eBPF-based monitor and profiler in the Kubernetes.\nWhy use SkyWalking Rover? On the Kubernetes platform, we could collect a lot of telemetry data. Rover could collect them based on the eBPF technology, and upload them to the SkyWalking backend for analysis, aggregate, and visualize them.\n EBPF-based profiling for C, C++, Golang, and Rust. Network profiling for L4(TCP) and L7(HTTP) traffic, including with TLS. Tracing enhancement. Collect extra information from OS level as attached events for the existing tracing system, such as attach raw data of HTTP request and response. Network monitoring for generating network access logs.  Architecture  Process represents the data monitored by Rover. Rover is deployed in the VM instance, collects data in VM and Process, and reports it to the OAP cluster. OAP collect data from the rover side, analysis, and stores them.  ","title":"Overview","url":"/docs/skywalking-rover/latest/en/concepts-and-designs/overview/"},{"content":"Overview SkyWalking Rover is an open-source collector, which provides a eBPF-based monitor and profiler in the Kubernetes.\nWhy use SkyWalking Rover? On the Kubernetes platform, we could collect a lot of telemetry data. Rover could collect them based on the eBPF technology, and upload them to the SkyWalking backend for analysis, aggregate, and visualize them.\n EBPF-based profiling for C, C++, Golang, and Rust. Network profiling for L4(TCP) and L7(HTTP) traffic, including with TLS. Tracing enhancement. Collect extra information from OS level as attached events for the existing tracing system, such as attach raw data of HTTP request and response. Network monitoring for generating network access logs.  Architecture  Process represents the data monitored by Rover. Rover is deployed in the VM instance, collects data in VM and Process, and reports it to the OAP cluster. OAP collect data from the rover side, analysis, and stores them.  ","title":"Overview","url":"/docs/skywalking-rover/next/en/concepts-and-designs/overview/"},{"content":"Overview SkyWalking Rover is an open-source collector, which provides a eBPF-based monitor and profiler in the Kubernetes.\nWhy use SkyWalking Rover? On the Kubernetes platform, we could collect a lot of telemetry data. Rover could collect them based on the eBPF technology, and upload them to the SkyWalking backend for analysis, aggregate, and visualize them.\n EBPF-based profiling for C, C++, Golang, and Rust. Network profiling for L4(TCP) and L7(HTTP) traffic, including with TLS. Tracing enhancement. Collect extra information from OS level as attached events for the existing tracing system, such as attach raw data of HTTP request and response. Network monitoring for generating network access logs.  Architecture  Process represents the data monitored by Rover. Rover is deployed in the VM instance, collects data in VM and Process, and reports it to the OAP cluster. OAP collect data from the rover side, analysis, and stores them.  ","title":"Overview","url":"/docs/skywalking-rover/v0.6.0/en/concepts-and-designs/overview/"},{"content":"Overview SkyWalking Satellite: an open-source agent designed for the cloud-native infrastructures, which provides a low-cost, high-efficient, and more secure way to collect telemetry data, such that Trace Segments, Logs, or Metrics.\nWhy use SkyWalking Satellite? Observability is the solution to the complex scenario of cloud-native services. However, we may encounter different telemetry data scenarios, different language services, big data analysis, etc. Satellite provides a unified data collection layer for cloud-native services. You can easily use it to connect to the SkyWalking ecosystem and enhance the capacity of SkyWalking. There are some enhance features on the following when using Satellite.\n Provide a unified data collection layer to collect logs, traces, and metrics. Provide a safer local cache to reduce the memory cost of the service. Provide the unified transfer way shields the functional differences in the different language libs, such as MQ. Provides the preprocessing functions to ensure accuracy of the metrics, such as sampling.  Architecture SkyWalking Satellite is logically split into three parts: Gatherer, Processor, and Sender.\n Gatherer collect data and reformat them for SkyWalking requirements. Processor processes the input data to generate the new data for Observability. Sender would transfer the downstream data to the SkyWalking OAP with different protocols.  ","title":"Overview","url":"/docs/skywalking-satellite/latest/en/concepts-and-designs/overview/"},{"content":"Overview SkyWalking Satellite: an open-source agent designed for the cloud-native infrastructures, which provides a low-cost, high-efficient, and more secure way to collect telemetry data, such that Trace Segments, Logs, or Metrics.\nWhy use SkyWalking Satellite? Observability is the solution to the complex scenario of cloud-native services. However, we may encounter different telemetry data scenarios, different language services, big data analysis, etc. Satellite provides a unified data collection layer for cloud-native services. You can easily use it to connect to the SkyWalking ecosystem and enhance the capacity of SkyWalking. There are some enhance features on the following when using Satellite.\n Provide a unified data collection layer to collect logs, traces, and metrics. Provide a safer local cache to reduce the memory cost of the service. Provide the unified transfer way shields the functional differences in the different language libs, such as MQ. Provides the preprocessing functions to ensure accuracy of the metrics, such as sampling.  Architecture SkyWalking Satellite is logically split into three parts: Gatherer, Processor, and Sender.\n Gatherer collect data and reformat them for SkyWalking requirements. Processor processes the input data to generate the new data for Observability. Sender would transfer the downstream data to the SkyWalking OAP with different protocols.  ","title":"Overview","url":"/docs/skywalking-satellite/next/en/concepts-and-designs/overview/"},{"content":"Overview SkyWalking Satellite: an open-source agent designed for the cloud-native infrastructures, which provides a low-cost, high-efficient, and more secure way to collect telemetry data, such that Trace Segments, Logs, or Metrics.\nWhy use SkyWalking Satellite? Observability is the solution to the complex scenario of cloud-native services. However, we may encounter different telemetry data scenarios, different language services, big data analysis, etc. Satellite provides a unified data collection layer for cloud-native services. You can easily use it to connect to the SkyWalking ecosystem and enhance the capacity of SkyWalking. There are some enhance features on the following when using Satellite.\n Provide a unified data collection layer to collect logs, traces, and metrics. Provide a safer local cache to reduce the memory cost of the service. Provide the unified transfer way shields the functional differences in the different language libs, such as MQ. Provides the preprocessing functions to ensure accuracy of the metrics, such as sampling.  Architecture SkyWalking Satellite is logically split into three parts: Gatherer, Processor, and Sender.\n Gatherer collect data and reformat them for SkyWalking requirements. Processor processes the input data to generate the new data for Observability. Sender would transfer the downstream data to the SkyWalking OAP with different protocols.  ","title":"Overview","url":"/docs/skywalking-satellite/v1.2.0/en/concepts-and-designs/overview/"},{"content":"Performance best practices  Following changes are expected in the next official release (v1.1.0).\n The Python agent currently uses a number of threads to communicate with SkyWalking OAP, it is planned to be refactored using AsyncIO (Uvloop) along with an async version of gRPC(aio-client)/HTTP(aiohttp/httpx)/Kafka(aio-kafka) to further minimize the cost of thread switching and IO time.\nFor now, we still have a few points to mention to keep the overhead to your application minimal.\n When using the gRPC protocol to report data, a higher version of gRPC is always recommended. Please also make sure that:  By running python -c \u0026quot;from google.protobuf.internal import api_implementation; print(api_implementation._implementation_type)\u0026quot;, or python -c \u0026quot;from google.protobuf.internal import api_implementation; print(api_implementation._default_implementation_type)\u0026quot; you should either see upb or cpp as the returned value. It means the Protobuf library is using a much faster implementation than Python native. If not, try setting PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION='cpp' or 'upb' or upgrade the gRPC dependency (SkyWalking Python will use whatever version your application uses).   Though HTTP is provided as an alternative, it could be slower compared to other protocols, Kafka is often a good choice when gRPC is not suitable. When some features are not needed in your use case, you could turn them off either via config.init(agent_some_reporter_active=False) or environment variables. Use ignore_path, ignore_method, and log filters to avoid reporting less valuable data that is of large amount. Log reporter safe mode is designed for situations where HTTP basic auth info could be visible in traceback and logs but shouldn\u0026rsquo;t be reported to OAP. You should keep the option as OFF if it\u0026rsquo;s not your case because frequent regular expression searches will inevitably introduce overhead to the CPU. Do not turn on sw-python CLI or agent debug logging in production, otherwise large amount of log will be produced.  sw-python CLI debug mode will automatically turn on agent debug log (override from sitecustomize.py).    ","title":"Performance best practices","url":"/docs/skywalking-python/latest/en/setup/faq/performance/"},{"content":"Performance best practices  Following changes are expected in the next official release (v1.1.0).\n The Python agent currently uses a number of threads to communicate with SkyWalking OAP, it is planned to be refactored using AsyncIO (Uvloop) along with an async version of gRPC(aio-client)/HTTP(aiohttp/httpx)/Kafka(aio-kafka) to further minimize the cost of thread switching and IO time.\nFor now, we still have a few points to mention to keep the overhead to your application minimal.\n When using the gRPC protocol to report data, a higher version of gRPC is always recommended. Please also make sure that:  By running python -c \u0026quot;from google.protobuf.internal import api_implementation; print(api_implementation._implementation_type)\u0026quot;, or python -c \u0026quot;from google.protobuf.internal import api_implementation; print(api_implementation._default_implementation_type)\u0026quot; you should either see upb or cpp as the returned value. It means the Protobuf library is using a much faster implementation than Python native. If not, try setting PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION='cpp' or 'upb' or upgrade the gRPC dependency (SkyWalking Python will use whatever version your application uses).   Though HTTP is provided as an alternative, it could be slower compared to other protocols, Kafka is often a good choice when gRPC is not suitable. When some features are not needed in your use case, you could turn them off either via config.init(agent_some_reporter_active=False) or environment variables. Use ignore_path, ignore_method, and log filters to avoid reporting less valuable data that is of large amount. Log reporter safe mode is designed for situations where HTTP basic auth info could be visible in traceback and logs but shouldn\u0026rsquo;t be reported to OAP. You should keep the option as OFF if it\u0026rsquo;s not your case because frequent regular expression searches will inevitably introduce overhead to the CPU. Do not turn on sw-python CLI or agent debug logging in production, otherwise large amount of log will be produced.  sw-python CLI debug mode will automatically turn on agent debug log (override from sitecustomize.py).    ","title":"Performance best practices","url":"/docs/skywalking-python/next/en/setup/faq/performance/"},{"content":"Performance best practices  Following changes are expected in the next official release (v1.1.0).\n The Python agent currently uses a number of threads to communicate with SkyWalking OAP, it is planned to be refactored using AsyncIO (Uvloop) along with an async version of gRPC(aio-client)/HTTP(aiohttp/httpx)/Kafka(aio-kafka) to further minimize the cost of thread switching and IO time.\nFor now, we still have a few points to mention to keep the overhead to your application minimal.\n When using the gRPC protocol to report data, a higher version of gRPC is always recommended. Please also make sure that:  By running python -c \u0026quot;from google.protobuf.internal import api_implementation; print(api_implementation._implementation_type)\u0026quot;, or python -c \u0026quot;from google.protobuf.internal import api_implementation; print(api_implementation._default_implementation_type)\u0026quot; you should either see upb or cpp as the returned value. It means the Protobuf library is using a much faster implementation than Python native. If not, try setting PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION='cpp' or 'upb' or upgrade the gRPC dependency (SkyWalking Python will use whatever version your application uses).   Though HTTP is provided as an alternative, it could be slower compared to other protocols, Kafka is often a good choice when gRPC is not suitable. When some features are not needed in your use case, you could turn them off either via config.init(agent_some_reporter_active=False) or environment variables. Use ignore_path, ignore_method, and log filters to avoid reporting less valuable data that is of large amount. Log reporter safe mode is designed for situations where HTTP basic auth info could be visible in traceback and logs but shouldn\u0026rsquo;t be reported to OAP. You should keep the option as OFF if it\u0026rsquo;s not your case because frequent regular expression searches will inevitably introduce overhead to the CPU. Do not turn on sw-python CLI or agent debug logging in production, otherwise large amount of log will be produced.  sw-python CLI debug mode will automatically turn on agent debug log (override from sitecustomize.py).    ","title":"Performance best practices","url":"/docs/skywalking-python/v1.0.1/en/setup/faq/performance/"},{"content":"Performance Tests Performance testing is used to verify the impact on application performance when using SkyWalking Go.\nTest Objective By launching both the agent and non-agent compiled applications, we subject them to the same QPS under stress testing, evaluating the CPU, memory, and network latency of the machine during the testing period.\nThe application has been saved and submitted to the test/benchmark-codebase directory, with the following topology:\ntraffic generator -\u0026gt; consumer -\u0026gt; provider The payload(traffic) generator uses multithreading to send HTTP requests to the consumer service. When the consumer receives a request, it sends three requests to the provider service to obtain return data results. Based on these network requests, when using SkyWalking Go, the consumer service generates four Spans (1 Entry Span, 3 Exit Spans).\nApplication The application\u0026rsquo;s integration with SkyWalking Go follows the same process as other applications. For more information, please refer to the documentation.\nIn the application, we use loops and mathematical calculations (math.Log) to simulate the execution of the business program. This consumes a certain amount of CPU usage, preventing idle processing during service stress testing and amplifying the impact of the Agent program on the business application.\nStress Testing Service We use the Vegeta service for stress testing, which launches traffic at a specified QPS to the application. It is based on the Go language and uses goroutines to provide a more efficient stress testing solution.\nTest Environment A total of 4 GCP machines are launched, all instances are running on tbe 4C8G VM.\n traffic generator: Used for deploying traffic to the consumer machine. consumer: Used for deploying the consumer service. provider: Used for deploying the provider service. skywalking: Used for deploying the SkyWalking backend cluster, providing a standalone OAP node (in-memory H2 storage) and a UI interface.  Each service is deployed on a separate machine to ensure there is no interference with one another.\nTest Process Preparation Phase The preparation phase is used to ensure that all machines and test case preparations are completed.\nTraffic Generator Install the Vegeta service on the stress testing instance and create the following file(request.txt) to simulate traffic usage.\nGET http://${CONSUMER_IP}:8080/consumer Sw8: 1-MWYyZDRiZjQ3YmY3MTFlYWI3OTRhY2RlNDgwMDExMjI=-MWU3YzIwNGE3YmY3MTFlYWI4NThhY2RlNDgwMDExMjI=-0-c2VydmljZQ==-aW5zdGFuY2U=-cHJvcGFnYXRpb24=-cHJvcGFnYXRpb246NTU2Ng== Please replace the above CONSUMER_IP with the real IP address of the consumer instance.\nConsumer and Provider Install the skywalking-go service on the machines to be tested, and compile with and without the Agent.\nModify the machine\u0026rsquo;s file limit to prevent the inability to create new connections due to excessive handles: ulimit -n 65536.\nStart the provider service(without Agent) and obtain the provider machine\u0026rsquo;s IP address. Please provide this address when starting the consumer machine later.\nSkyWalking Download the SkyWalking service, modify the SkyWalking OAP startup script to increase the memory size, preventing OAP crashes due to insufficient memory.\nTesting without Agent  Start the Consumer service without the Agent version. Please add the provider flag for the provider address, the format is: http://${PROVIDER_IP}:8080/provider. Execute this command to preheat the system: vegeta attack -duration=1m -rate=1000/s -max-workers=2000 -targets=request.txt Execute this command to perform the stress test. The command will output statistical data of the stress test when completed: vegeta attack -duration=20m -rate=1000/s -max-workers=2000 -targets=request.txt | tee results.bin | vegeta report  Testing with Agent The only difference in the test without the Agent is the version of the consumer that is compiled and launched.\n Add the SW_AGENT_REPORTER_GRPC_BACKEND_SERVICE environment variables to the consumer service, for setting the IP address of the SkyWalking OAP service. Start the Consumer service with the Agent version. Please add the provider flag for the provider address, the format is: http://${PROVIDER_IP}:8080/provider. Execute this command to preheat the system: vegeta attack -duration=1m -rate=1000/s -max-workers=2000 -targets=request.txt Execute this command to perform the stress test. The command will output statistical data of the stress test when completed: vegeta attack -duration=20m -rate=1000/s -max-workers=2000 -targets=request.txt | tee results.bin | vegeta report  Test Results In the tests, we used 1000 QPS as a benchmark to stress test both the Consumer services with and without the Agent.\n In the non-Agent version, the CPU usage was around 74%, memory usage was 2.53%, and the average response time for a single request was 4.18ms. In the Agent-compiled version, the CPU usage was around 81%, memory usage was 2.61%, and the average response time for a single request was 4.32ms.  From these results, we can conclude that after adding the Agent, the CPU usage increased by about 9%, memory usage experienced almost no growth, and the average response time for requests increased by approximately 0.15ms.\nExplanation, approximately 0.15ms is the in-band cost. The most of CPU(extra 9%) cost are due to the amount of out of band data being sent to the collectors from the application(consumer), which is 4000 spans/s in our test case.\n","title":"Performance Tests","url":"/docs/skywalking-go/latest/en/agent/performance-tests/"},{"content":"Performance Tests Performance testing is used to verify the impact on application performance when using SkyWalking Go.\nTest Objective By launching both the agent and non-agent compiled applications, we subject them to the same QPS under stress testing, evaluating the CPU, memory, and network latency of the machine during the testing period.\nThe application has been saved and submitted to the test/benchmark-codebase directory, with the following topology:\ntraffic generator -\u0026gt; consumer -\u0026gt; provider The payload(traffic) generator uses multithreading to send HTTP requests to the consumer service. When the consumer receives a request, it sends three requests to the provider service to obtain return data results. Based on these network requests, when using SkyWalking Go, the consumer service generates four Spans (1 Entry Span, 3 Exit Spans).\nApplication The application\u0026rsquo;s integration with SkyWalking Go follows the same process as other applications. For more information, please refer to the documentation.\nIn the application, we use loops and mathematical calculations (math.Log) to simulate the execution of the business program. This consumes a certain amount of CPU usage, preventing idle processing during service stress testing and amplifying the impact of the Agent program on the business application.\nStress Testing Service We use the Vegeta service for stress testing, which launches traffic at a specified QPS to the application. It is based on the Go language and uses goroutines to provide a more efficient stress testing solution.\nTest Environment A total of 4 GCP machines are launched, all instances are running on tbe 4C8G VM.\n traffic generator: Used for deploying traffic to the consumer machine. consumer: Used for deploying the consumer service. provider: Used for deploying the provider service. skywalking: Used for deploying the SkyWalking backend cluster, providing a standalone OAP node (in-memory H2 storage) and a UI interface.  Each service is deployed on a separate machine to ensure there is no interference with one another.\nTest Process Preparation Phase The preparation phase is used to ensure that all machines and test case preparations are completed.\nTraffic Generator Install the Vegeta service on the stress testing instance and create the following file(request.txt) to simulate traffic usage.\nGET http://${CONSUMER_IP}:8080/consumer Sw8: 1-MWYyZDRiZjQ3YmY3MTFlYWI3OTRhY2RlNDgwMDExMjI=-MWU3YzIwNGE3YmY3MTFlYWI4NThhY2RlNDgwMDExMjI=-0-c2VydmljZQ==-aW5zdGFuY2U=-cHJvcGFnYXRpb24=-cHJvcGFnYXRpb246NTU2Ng== Please replace the above CONSUMER_IP with the real IP address of the consumer instance.\nConsumer and Provider Install the skywalking-go service on the machines to be tested, and compile with and without the Agent.\nModify the machine\u0026rsquo;s file limit to prevent the inability to create new connections due to excessive handles: ulimit -n 65536.\nStart the provider service(without Agent) and obtain the provider machine\u0026rsquo;s IP address. Please provide this address when starting the consumer machine later.\nSkyWalking Download the SkyWalking service, modify the SkyWalking OAP startup script to increase the memory size, preventing OAP crashes due to insufficient memory.\nTesting without Agent  Start the Consumer service without the Agent version. Please add the provider flag for the provider address, the format is: http://${PROVIDER_IP}:8080/provider. Execute this command to preheat the system: vegeta attack -duration=1m -rate=1000/s -max-workers=2000 -targets=request.txt Execute this command to perform the stress test. The command will output statistical data of the stress test when completed: vegeta attack -duration=20m -rate=1000/s -max-workers=2000 -targets=request.txt | tee results.bin | vegeta report  Testing with Agent The only difference in the test without the Agent is the version of the consumer that is compiled and launched.\n Add the SW_AGENT_REPORTER_GRPC_BACKEND_SERVICE environment variables to the consumer service, for setting the IP address of the SkyWalking OAP service. Start the Consumer service with the Agent version. Please add the provider flag for the provider address, the format is: http://${PROVIDER_IP}:8080/provider. Execute this command to preheat the system: vegeta attack -duration=1m -rate=1000/s -max-workers=2000 -targets=request.txt Execute this command to perform the stress test. The command will output statistical data of the stress test when completed: vegeta attack -duration=20m -rate=1000/s -max-workers=2000 -targets=request.txt | tee results.bin | vegeta report  Test Results In the tests, we used 1000 QPS as a benchmark to stress test both the Consumer services with and without the Agent.\n In the non-Agent version, the CPU usage was around 74%, memory usage was 2.53%, and the average response time for a single request was 4.18ms. In the Agent-compiled version, the CPU usage was around 81%, memory usage was 2.61%, and the average response time for a single request was 4.32ms.  From these results, we can conclude that after adding the Agent, the CPU usage increased by about 9%, memory usage experienced almost no growth, and the average response time for requests increased by approximately 0.15ms.\nExplanation, approximately 0.15ms is the in-band cost. The most of CPU(extra 9%) cost are due to the amount of out of band data being sent to the collectors from the application(consumer), which is 4000 spans/s in our test case.\n","title":"Performance Tests","url":"/docs/skywalking-go/next/en/agent/performance-tests/"},{"content":"Performance Tests Performance testing is used to verify the impact on application performance when using SkyWalking Go.\nTest Objective By launching both the agent and non-agent compiled applications, we subject them to the same QPS under stress testing, evaluating the CPU, memory, and network latency of the machine during the testing period.\nThe application has been saved and submitted to the test/benchmark-codebase directory, with the following topology:\ntraffic generator -\u0026gt; consumer -\u0026gt; provider The payload(traffic) generator uses multithreading to send HTTP requests to the consumer service. When the consumer receives a request, it sends three requests to the provider service to obtain return data results. Based on these network requests, when using SkyWalking Go, the consumer service generates four Spans (1 Entry Span, 3 Exit Spans).\nApplication The application\u0026rsquo;s integration with SkyWalking Go follows the same process as other applications. For more information, please refer to the documentation.\nIn the application, we use loops and mathematical calculations (math.Log) to simulate the execution of the business program. This consumes a certain amount of CPU usage, preventing idle processing during service stress testing and amplifying the impact of the Agent program on the business application.\nStress Testing Service We use the Vegeta service for stress testing, which launches traffic at a specified QPS to the application. It is based on the Go language and uses goroutines to provide a more efficient stress testing solution.\nTest Environment A total of 4 GCP machines are launched, all instances are running on tbe 4C8G VM.\n traffic generator: Used for deploying traffic to the consumer machine. consumer: Used for deploying the consumer service. provider: Used for deploying the provider service. skywalking: Used for deploying the SkyWalking backend cluster, providing a standalone OAP node (in-memory H2 storage) and a UI interface.  Each service is deployed on a separate machine to ensure there is no interference with one another.\nTest Process Preparation Phase The preparation phase is used to ensure that all machines and test case preparations are completed.\nTraffic Generator Install the Vegeta service on the stress testing instance and create the following file(request.txt) to simulate traffic usage.\nGET http://${CONSUMER_IP}:8080/consumer Sw8: 1-MWYyZDRiZjQ3YmY3MTFlYWI3OTRhY2RlNDgwMDExMjI=-MWU3YzIwNGE3YmY3MTFlYWI4NThhY2RlNDgwMDExMjI=-0-c2VydmljZQ==-aW5zdGFuY2U=-cHJvcGFnYXRpb24=-cHJvcGFnYXRpb246NTU2Ng== Please replace the above CONSUMER_IP with the real IP address of the consumer instance.\nConsumer and Provider Install the skywalking-go service on the machines to be tested, and compile with and without the Agent.\nModify the machine\u0026rsquo;s file limit to prevent the inability to create new connections due to excessive handles: ulimit -n 65536.\nStart the provider service(without Agent) and obtain the provider machine\u0026rsquo;s IP address. Please provide this address when starting the consumer machine later.\nSkyWalking Download the SkyWalking service, modify the SkyWalking OAP startup script to increase the memory size, preventing OAP crashes due to insufficient memory.\nTesting without Agent  Start the Consumer service without the Agent version. Please add the provider flag for the provider address, the format is: http://${PROVIDER_IP}:8080/provider. Execute this command to preheat the system: vegeta attack -duration=1m -rate=1000/s -max-workers=2000 -targets=request.txt Execute this command to perform the stress test. The command will output statistical data of the stress test when completed: vegeta attack -duration=20m -rate=1000/s -max-workers=2000 -targets=request.txt | tee results.bin | vegeta report  Testing with Agent The only difference in the test without the Agent is the version of the consumer that is compiled and launched.\n Add the SW_AGENT_REPORTER_GRPC_BACKEND_SERVICE environment variables to the consumer service, for setting the IP address of the SkyWalking OAP service. Start the Consumer service with the Agent version. Please add the provider flag for the provider address, the format is: http://${PROVIDER_IP}:8080/provider. Execute this command to preheat the system: vegeta attack -duration=1m -rate=1000/s -max-workers=2000 -targets=request.txt Execute this command to perform the stress test. The command will output statistical data of the stress test when completed: vegeta attack -duration=20m -rate=1000/s -max-workers=2000 -targets=request.txt | tee results.bin | vegeta report  Test Results In the tests, we used 1000 QPS as a benchmark to stress test both the Consumer services with and without the Agent.\n In the non-Agent version, the CPU usage was around 74%, memory usage was 2.53%, and the average response time for a single request was 4.18ms. In the Agent-compiled version, the CPU usage was around 81%, memory usage was 2.61%, and the average response time for a single request was 4.32ms.  From these results, we can conclude that after adding the Agent, the CPU usage increased by about 9%, memory usage experienced almost no growth, and the average response time for requests increased by approximately 0.15ms.\nExplanation, approximately 0.15ms is the in-band cost. The most of CPU(extra 9%) cost are due to the amount of out of band data being sent to the collectors from the application(consumer), which is 4000 spans/s in our test case.\n","title":"Performance Tests","url":"/docs/skywalking-go/v0.4.0/en/agent/performance-tests/"},{"content":"Persistence Storage Persistence storage is used for unifying data of BanyanDB persistence, including write-ahead logging(WAL), index, and data collected from skywalking and other observability platforms or APM systems. It provides various implementations and IO modes to satisfy the need of different components. BanyanDB provides a concise interface that shields the complexity of the implementation from the upper layer. By exposing necessary interfaces, upper components do not need to care how persistence is implemented and avoid dealing with differences between different operating systems.\nArchitecture BanyanDB uses third-party storage for actual storage, and the file system shields the differences between different platforms and storage systems, allowing developers to operate files as easily as the local file system without worrying about specific details.\nFor different data models, stored in different locations, such as for meta and wal data, BanyanDB uses a local file system for storage. For index and data, the architecture of the file system is divided into three layers.\n The first layer is the API interface, which developers only need to care about how to operate the remote file system. The second layer is the storage system adapter, which is used to mask the differences between different storage systems. The last layer is the actual storage system. With the use of remote storage architecture, the local system can still play its role and can borrow the local system to speed up reading and writing.  IO Mode Persistence storage offers a range of IO modes to cater to various throughput requirements. The interface can be accessed by developers and can be configured through settings, which can be set in the configuration file.\nIo_uring Io_uring is a new feature in Linux 5.1, which is fully asynchronous and offers high throughput. In the scene of massive storage, io_uring can bring significant benefits. The following is the diagram about how io_uring works. If the user sets io_uring for use, the read and write requests will first be placed in the submission queue buffer when calling the operation API. When the threshold is reached, batch submissions will be made to SQ. After the kernel threads complete execution, the requests will be placed in the CQ, and the user can obtain the request results.\nSynchronous IO The most common IO mode is Synchronous IO, but it has a relatively low throughput. BanyanDB provides a nonblocking mode that is compatible with lower Linux versions.\nOperation Directory Create Create the specified directory and return the file descriptor, the error will happen if the directory already exists. The following is the pseudocode that calls the API in the go style.、\nparam:\nname: The name of the directory.\npermisson: Permission you want to set. BanyanDB provides three modes: Read, Write, ReadAndWrite. you can use it as Mode.Read.\nCreateDirectory(name String, permission Mode) (error)\nOpen Open the directory and return an error if the file descriptor does not exist. The following is the pseudocode that calls the API in the go style.\nparam:\nname: The name of the directory.\nreturn: Directory pointer, you can use it for various operations.\nOpenDirectory(name String) (*Dir, error)\nDelete Delete the directory and all files and return an error if the directory does not exist or the directory not reading or writing. The following is the pseudocode that calls the API in the go style.\nDir.DeleteDirectory() (error)\nRename Rename the directory and return an error if the directory already exists. The following is the pseudocode that calls the API in the go style.\nparam:\nname: The name of the directory.\nDir.RenameDirectory(newName String) (error)\nRead Get all lists of files or children\u0026rsquo;s directories in the directory and an error if the directory does not exist. The following is the pseudocode that calls the API in the go style.\nreturn: List of files belonging to the directory.\nDir.ReadDirectory() (FileList, error)\nPermission When creating a file, the default owner is the user who created the directory. The owner can specify read and write permissions of the directory. If not specified, the default is read and write permissions, which include permissions for all files in the directory. The following is the pseudocode that calls the API in the go style.\nparam:\npermisson: Permission you want to set. BanyanDB provides three mode: Read, Write, ReadAndWrite. you can use it as Mode.Read.\nDir.SetDirectoryPermission(permission Mode) (error)\nFile Create Create the specified file and return the file descriptor, the error will happen if the file already exists. The following is the pseudocode that calls the API in the go style.\nparam:\nname: The name of the file.\npermisson: Permission you want to set. BanyanDB provides three mode: Read, Write, ReadAndWrite. you can use it as Mode.Read.\nCreateFile(name String, permission Mode) (error)\nOpen Open the file and return an error if the file descriptor does not exist. The following is the pseudocode that calls the API in the go style.\nparam:\nname: The name of the file.\nreturn: File pointer, you can use it for various operations.\nOpenFile(name String) (*File, error)\nWrite BanyanDB provides two methods for writing files. Append mode, which adds new data to the end of a file. This mode is typically used for WAL. And BanyanDB supports vector Append mode, which supports appending consecutive buffers to the end of the file. Flush mode, which flushes all data to one file. It will return an error when writing a directory, the file does not exist or there is not enough space, and the incomplete file will be discarded. The flush operation is atomic, which means the file won\u0026rsquo;t be created if an error happens during the flush process. The following is the pseudocode that calls the API in the go style.\nFor append mode:\nparam:\nbuffer: The data append to the file.\nFile.AppendWriteFile(buffer []byte) (error)\nFor vector append mode:\nparam:\niov: The data in consecutive buffers.\nFile.AppendWritevFile(iov *[][]byte) (error)\nFor flush mode:\nparam:\nbuffer: The data append to the file.\npermisson: Permission you want to set. BanyanDB provides three mode: Read, Write, ReadAndWrite. you can use it as Mode.Read.\nreturn: File pointer, you can use it for various operations.\nFlushWriteFile(buffer []byte, permission Mode) (*File, error)\nDelete BanyanDB provides the deleting operation, which can delete a file at once. it will return an error if the directory does not exist or the file not reading or writing.\nThe following is the pseudocode that calls the API in the go style.\nFile.DeleteFile() (error)\nRead For reading operation, two read methods are provided: Reading a specified location of data, which relies on a specified offset and a buffer. And BanyanDB supports reading contiguous regions of a file and dispersing them into discontinuous buffers. Read the entire file, BanyanDB provides stream reading, which can use when the file is too large, the size gets each time can be set when using stream reading. If entering incorrect parameters such as incorrect offset or non-existent file, it will return an error. The following is the pseudocode that calls the API in the go style.\nFor reading specified location of data:\nparam:\noffset: Read begin location of the file.\nbuffer: The read length is the same as the buffer length.\nFile.ReadFile(offset int, buffer []byte) (error)\nFor vector reading:\nparam:\niov: Discontinuous buffers in memory.\nFile.ReadvFile(iov *[][]byte) (error)\nFor stream reading:\nparam:\noffset: Read begin location of the file.\nbuffer: Every read length in the stream is the same as the buffer length.\nreturn: A Iterator, the size of each iteration is the length of the buffer.\nFile.StreamReadFile(offset int, buffer []byte) (*iter, error)\nRename Rename the file and return an error if the directory exists in this directory. The following is the pseudocode that calls the API in the go style.\nparam:\nnewName: The new name of the file.\nFile.RenameFile(newName String) (error)\nGet size Get the file written data\u0026rsquo;s size and return an error if the file does not exist. The unit of file size is Byte. The following is the pseudocode that calls the API in the go style.\nreturn: the file written data\u0026rsquo;s size.\nFile.GetFileSize() (int, error)\nPermission When creating a file, the default owner is the user who created the file. The owner can specify the read and write permissions of the file. If not specified, the default is read and write permissions. The following is the pseudocode that calls the API in the go style.\nparam:\npermisson: Permission you want to set. BanyanDB provides three mode: Read, Write, ReadAndWrite. you can use it as Mode.Read.\nFile.SetFilePermission(permission Mode) (error)\n","title":"Persistence Storage","url":"/docs/skywalking-banyandb/latest/concept/persistence-storage/"},{"content":"Persistence Storage Persistence storage is used for unifying data of BanyanDB persistence, including index, and data collected from skywalking and other observability platforms or APM systems. It provides various implementations and IO modes to satisfy the need of different components. BanyanDB provides a concise interface that shields the complexity of the implementation from the upper layer. By exposing necessary interfaces, upper components do not need to care how persistence is implemented and avoid dealing with differences between different operating systems.\nArchitecture BanyanDB uses third-party storage for actual storage, and the file system shields the differences between different platforms and storage systems, allowing developers to operate files as easily as the local file system without worrying about specific details.\nFor different data models, stored in different locations, such as for meta data, BanyanDB uses a local file system for storage. For index and data, the architecture of the file system is divided into three layers.\n The first layer is the API interface, which developers only need to care about how to operate the remote file system. The second layer is the storage system adapter, which is used to mask the differences between different storage systems. The last layer is the actual storage system. With the use of remote storage architecture, the local system can still play its role and can borrow the local system to speed up reading and writing.  IO Mode Persistence storage offers a range of IO modes to cater to various throughput requirements. The interface can be accessed by developers and can be configured through settings, which can be set in the configuration file.\nIo_uring Io_uring is a new feature in Linux 5.1, which is fully asynchronous and offers high throughput. In the scene of massive storage, io_uring can bring significant benefits. The following is the diagram about how io_uring works. If the user sets io_uring for use, the read and write requests will first be placed in the submission queue buffer when calling the operation API. When the threshold is reached, batch submissions will be made to SQ. After the kernel threads complete execution, the requests will be placed in the CQ, and the user can obtain the request results.\nSynchronous IO The most common IO mode is Synchronous IO, but it has a relatively low throughput. BanyanDB provides a nonblocking mode that is compatible with lower Linux versions.\nOperation File Create Create the specified file and return the file descriptor, the error will happen if the file already exists. The following is the pseudocode that calls the API in the go style.\nparam:\nname: The name of the file.\npermisson: Permission you want to set. BanyanDB provides three mode: Read, Write, ReadAndWrite. you can use it as Mode.Read.\nreturn: The file instance, can be used for various file operations.\nCreateFile(name String, permission Mode) (File, error)\nWrite BanyanDB provides two methods for writing files. Append mode, which adds new data to the end of a file. BanyanDB also supports vector Append mode, which supports appending consecutive buffers to the end of the file. Flush mode, which flushes all data to one file. It will return an error when writing a directory, the file does not exist or there is not enough space, and the incomplete file will be discarded. The flush operation is atomic, which means the file won\u0026rsquo;t be created if an error happens during the flush process. The following is the pseudocode that calls the API in the go style.\nFor append mode:\nparam:\nbuffer: The data append to the file.\nActual length of written data.\nFile.Write(buffer []byte) (int, error)\nFor vector append mode:\nparam:\niov: The data in consecutive buffers.\nreturn: Actual length of written data.\nFile.Writev(iov *[][]byte) (int, error)\nFor flush mode:\nparam:\nbuffer: The data append to the file.\npermisson: Permission you want to set. BanyanDB provides three mode: Read, Write, ReadAndWrite. you can use it as Mode.Read.\nreturn: Actual length of flushed data.\nWrite(buffer []byte, permission Mode) (int, error)\nDelete BanyanDB provides the deleting operation, which can delete a file at once. it will return an error if the directory does not exist or the file not reading or writing.\nThe following is the pseudocode that calls the API in the go style.\nDeleteFile(name string) (error)\nRead For reading operation, two read methods are provided: Reading a specified location of data, which relies on a specified offset and a buffer. And BanyanDB supports reading contiguous regions of a file and dispersing them into discontinuous buffers. Read the entire file, BanyanDB provides stream reading, which can use when the file is too large, the size gets each time can be set when using stream reading. If entering incorrect parameters such as incorrect offset or non-existent file, it will return an error. The following is the pseudocode that calls the API in the go style.\nFor reading specified location of data:\nparam:\noffset: Read begin location of the file.\nbuffer: The read length is the same as the buffer length.\nreturn: Actual length of reading data.\nFile.Read(offset int64, buffer []byte) (int, error)\nFor vector reading:\nparam:\niov: Discontinuous buffers in memory.\nreturn: Actual length of reading data.\nFile.Readv(iov *[][]byte) (int, error)\nFor stream reading:\nparam:\nbuffer: Every read length in the stream is the same as the buffer length.\nreturn: A Iterator, the size of each iteration is the length of the buffer.\nFile.StreamRead(buffer []byte) (*iter, error)\nGet size Get the file written data\u0026rsquo;s size and return an error if the file does not exist. The unit of file size is Byte. The following is the pseudocode that calls the API in the go style.\nreturn: the file written data\u0026rsquo;s size.\nFile.Size() (int, error)\nClose Close File.The following is the pseudocode that calls the API in the go style.\nFile.Close() error\n","title":"Persistence Storage","url":"/docs/skywalking-banyandb/next/concept/persistence-storage/"},{"content":"Persistence Storage Persistence storage is used for unifying data of BanyanDB persistence, including write-ahead logging(WAL), index, and data collected from skywalking and other observability platforms or APM systems. It provides various implementations and IO modes to satisfy the need of different components. BanyanDB provides a concise interface that shields the complexity of the implementation from the upper layer. By exposing necessary interfaces, upper components do not need to care how persistence is implemented and avoid dealing with differences between different operating systems.\nArchitecture BanyanDB uses third-party storage for actual storage, and the file system shields the differences between different platforms and storage systems, allowing developers to operate files as easily as the local file system without worrying about specific details.\nFor different data models, stored in different locations, such as for meta and wal data, BanyanDB uses a local file system for storage. For index and data, the architecture of the file system is divided into three layers.\n The first layer is the API interface, which developers only need to care about how to operate the remote file system. The second layer is the storage system adapter, which is used to mask the differences between different storage systems. The last layer is the actual storage system. With the use of remote storage architecture, the local system can still play its role and can borrow the local system to speed up reading and writing.  IO Mode Persistence storage offers a range of IO modes to cater to various throughput requirements. The interface can be accessed by developers and can be configured through settings, which can be set in the configuration file.\nIo_uring Io_uring is a new feature in Linux 5.1, which is fully asynchronous and offers high throughput. In the scene of massive storage, io_uring can bring significant benefits. The following is the diagram about how io_uring works. If the user sets io_uring for use, the read and write requests will first be placed in the submission queue buffer when calling the operation API. When the threshold is reached, batch submissions will be made to SQ. After the kernel threads complete execution, the requests will be placed in the CQ, and the user can obtain the request results.\nSynchronous IO The most common IO mode is Synchronous IO, but it has a relatively low throughput. BanyanDB provides a nonblocking mode that is compatible with lower Linux versions.\nOperation Directory Create Create the specified directory and return the file descriptor, the error will happen if the directory already exists. The following is the pseudocode that calls the API in the go style.、\nparam:\nname: The name of the directory.\npermisson: Permission you want to set. BanyanDB provides three modes: Read, Write, ReadAndWrite. you can use it as Mode.Read.\nCreateDirectory(name String, permission Mode) (error)\nOpen Open the directory and return an error if the file descriptor does not exist. The following is the pseudocode that calls the API in the go style.\nparam:\nname: The name of the directory.\nreturn: Directory pointer, you can use it for various operations.\nOpenDirectory(name String) (*Dir, error)\nDelete Delete the directory and all files and return an error if the directory does not exist or the directory not reading or writing. The following is the pseudocode that calls the API in the go style.\nDir.DeleteDirectory() (error)\nRename Rename the directory and return an error if the directory already exists. The following is the pseudocode that calls the API in the go style.\nparam:\nname: The name of the directory.\nDir.RenameDirectory(newName String) (error)\nRead Get all lists of files or children\u0026rsquo;s directories in the directory and an error if the directory does not exist. The following is the pseudocode that calls the API in the go style.\nreturn: List of files belonging to the directory.\nDir.ReadDirectory() (FileList, error)\nPermission When creating a file, the default owner is the user who created the directory. The owner can specify read and write permissions of the directory. If not specified, the default is read and write permissions, which include permissions for all files in the directory. The following is the pseudocode that calls the API in the go style.\nparam:\npermisson: Permission you want to set. BanyanDB provides three mode: Read, Write, ReadAndWrite. you can use it as Mode.Read.\nDir.SetDirectoryPermission(permission Mode) (error)\nFile Create Create the specified file and return the file descriptor, the error will happen if the file already exists. The following is the pseudocode that calls the API in the go style.\nparam:\nname: The name of the file.\npermisson: Permission you want to set. BanyanDB provides three mode: Read, Write, ReadAndWrite. you can use it as Mode.Read.\nCreateFile(name String, permission Mode) (error)\nOpen Open the file and return an error if the file descriptor does not exist. The following is the pseudocode that calls the API in the go style.\nparam:\nname: The name of the file.\nreturn: File pointer, you can use it for various operations.\nOpenFile(name String) (*File, error)\nWrite BanyanDB provides two methods for writing files. Append mode, which adds new data to the end of a file. This mode is typically used for WAL. And BanyanDB supports vector Append mode, which supports appending consecutive buffers to the end of the file. Flush mode, which flushes all data to one file. It will return an error when writing a directory, the file does not exist or there is not enough space, and the incomplete file will be discarded. The flush operation is atomic, which means the file won\u0026rsquo;t be created if an error happens during the flush process. The following is the pseudocode that calls the API in the go style.\nFor append mode:\nparam:\nbuffer: The data append to the file.\nFile.AppendWriteFile(buffer []byte) (error)\nFor vector append mode:\nparam:\niov: The data in consecutive buffers.\nFile.AppendWritevFile(iov *[][]byte) (error)\nFor flush mode:\nparam:\nbuffer: The data append to the file.\npermisson: Permission you want to set. BanyanDB provides three mode: Read, Write, ReadAndWrite. you can use it as Mode.Read.\nreturn: File pointer, you can use it for various operations.\nFlushWriteFile(buffer []byte, permission Mode) (*File, error)\nDelete BanyanDB provides the deleting operation, which can delete a file at once. it will return an error if the directory does not exist or the file not reading or writing.\nThe following is the pseudocode that calls the API in the go style.\nFile.DeleteFile() (error)\nRead For reading operation, two read methods are provided: Reading a specified location of data, which relies on a specified offset and a buffer. And BanyanDB supports reading contiguous regions of a file and dispersing them into discontinuous buffers. Read the entire file, BanyanDB provides stream reading, which can use when the file is too large, the size gets each time can be set when using stream reading. If entering incorrect parameters such as incorrect offset or non-existent file, it will return an error. The following is the pseudocode that calls the API in the go style.\nFor reading specified location of data:\nparam:\noffset: Read begin location of the file.\nbuffer: The read length is the same as the buffer length.\nFile.ReadFile(offset int, buffer []byte) (error)\nFor vector reading:\nparam:\niov: Discontinuous buffers in memory.\nFile.ReadvFile(iov *[][]byte) (error)\nFor stream reading:\nparam:\noffset: Read begin location of the file.\nbuffer: Every read length in the stream is the same as the buffer length.\nreturn: A Iterator, the size of each iteration is the length of the buffer.\nFile.StreamReadFile(offset int, buffer []byte) (*iter, error)\nRename Rename the file and return an error if the directory exists in this directory. The following is the pseudocode that calls the API in the go style.\nparam:\nnewName: The new name of the file.\nFile.RenameFile(newName String) (error)\nGet size Get the file written data\u0026rsquo;s size and return an error if the file does not exist. The unit of file size is Byte. The following is the pseudocode that calls the API in the go style.\nreturn: the file written data\u0026rsquo;s size.\nFile.GetFileSize() (int, error)\nPermission When creating a file, the default owner is the user who created the file. The owner can specify the read and write permissions of the file. If not specified, the default is read and write permissions. The following is the pseudocode that calls the API in the go style.\nparam:\npermisson: Permission you want to set. BanyanDB provides three mode: Read, Write, ReadAndWrite. you can use it as Mode.Read.\nFile.SetFilePermission(permission Mode) (error)\n","title":"Persistence Storage","url":"/docs/skywalking-banyandb/v0.5.0/concept/persistence-storage/"},{"content":"Pinpoint Service Mesh Critical Performance Impact by using eBPF Background Apache SkyWalking observes metrics, logs, traces, and events for services deployed into the service mesh. When troubleshooting, SkyWalking error analysis can be an invaluable tool helping to pinpoint where an error occurred. However, performance problems are more difficult: It’s often impossible to locate the root cause of performance problems with pre-existing observation data. To move beyond the status quo, dynamic debugging and troubleshooting are essential service performance tools. In this article, we\u0026rsquo;ll discuss how to use eBPF technology to improve the profiling feature in SkyWalking and analyze the performance impact in the service mesh.\nTrace Profiling in SkyWalking Since SkyWalking 7.0.0, Trace Profiling has helped developers find performance problems by periodically sampling the thread stack to let developers know which lines of code take more time. However, Trace Profiling is not suitable for the following scenarios:\n Thread Model: Trace Profiling is most useful for profiling code that executes in a single thread. It is less useful for middleware that relies heavily on async execution models. For example Goroutines in Go or Kotlin Coroutines. Language: Currently, Trace Profiling is only supported in Java and Python, since it’s not easy to obtain the thread stack in the runtimes of some languages such as Go and Node.js. Agent Binding: Trace Profiling requires Agent installation, which can be tricky depending on the language (e.g., PHP has to rely on its C kernel; Rust and C/C++ require manual instrumentation to make install). Trace Correlation: Since Trace Profiling is only associated with a single request it can be hard to determine which request is causing the problem. Short Lifecycle Services: Trace Profiling doesn\u0026rsquo;t support short-lived services for (at least) two reasons:  It\u0026rsquo;s hard to differentiate system performance from class code manipulation in the booting stage. Trace profiling is linked to an endpoint to identify performance impact, but there is no endpoint to match these short-lived services.    Fortunately, there are techniques that can go further than Trace Profiling in these situations.\nIntroduce eBPF We have found that eBPF — a technology that can run sandboxed programs in an operating system kernel and thus safely and efficiently extend the capabilities of the kernel without requiring kernel modifications or loading kernel modules — can help us fill gaps left by Trace Profiling. eBPF is a trending technology because it breaks the traditional barrier between user and kernel space. Programs can now inject bytecode that runs in the kernel, instead of having to recompile the kernel to customize it. This is naturally a good fit for observability.\nIn the figure below, we can see that when the system executes the execve syscalls, the eBPF program is triggered, and the current process runtime information is obtained by using function calls.\nUsing eBPF technology, we can expand the scope of Skywalking\u0026rsquo;s profiling capabilities:\n Global Performance Analysis: Before eBPF, data collection was limited to what agents can observe. Since eBPF programs run in the kernel, they can observe all threads. This is especially useful when you are not sure whether a performance problem is caused by a particular request. Data Content: eBPF can dump both user and kernel space thread stacks, so if a performance issue happens in kernel space, it’s easier to find. Agent Binding: All modern Linux kernels support eBPF, so there is no need to install anything. This means it is an orchestration-free vs an agent model. This reduces friction caused by built-in software which may not have the correct agents installed, such as Envoy in a Service Mesh. Sampling Type: Unlike Trace Profiling, eBPF is event-driven and, therefore, not constrained by interval polling. For example, eBPF can trigger events and collect more data depending on a transfer size threshold. This can allow the system to triage and prioritize data collection under extreme load.  eBPF Limitations While eBPF offers significant advantages for hunting performance bottlenecks, no technology is perfect. eBPF has a number of limitations described below. Fortunately, since SkyWalking does not require eBPF, the impact is limited.\n Linux Version Requirement: eBPF programs require a Linux kernel version above 4.4, with later kernel versions offering more data to be collected. The BCC has documented the features supported by different Linux kernel versions, with the differences between versions usually being what data can be collected with eBPF. Privileges Required: All processes that intend to load eBPF programs into the Linux kernel must be running in privileged mode. As such, bugs or other issues in such code may have a big impact. Weak Support for Dynamic Language: eBPF has weak support for JIT-based dynamic languages, such as Java. It also depends on what data you want to collect. For Profiling, eBPF does not support parsing the symbols of the program, which is why most eBPF-based profiling technologies only support static languages like C, C++, Go, and Rust. However, symbol mapping can sometimes be solved through tools provided by the language. For example, in Java, perf-map-agent can be used to generate the symbol mapping. However, dynamic languages don\u0026rsquo;t support the attach (uprobe) functionality that would allow us to trace execution events through symbols.  Introducing SkyWalking Rover SkyWalking Rover introduces the eBPF profiling feature into the SkyWalking ecosystem. The figure below shows the overall architecture of SkyWalking Rover. SkyWalking Rover is currently supported in Kubernetes environments and must be deployed inside a Kubernetes cluster. After establishing a connection with the SkyWalking backend server, it saves information about the processes on the current machine to SkyWalking. When the user creates an eBPF profiling task via the user interface, SkyWalking Rover receives the task and executes it in the relevant C, C++, Golang, and Rust language-based programs.\nOther than an eBPF-capable kernel, there are no additional prerequisites for deploying SkyWalking Rover.\nCPU Profiling with Rover CPU profiling is the most intuitive way to show service performance. Inspired by Brendan Gregg‘s blog post, we\u0026rsquo;ve divided CPU profiling into two types that we have implemented in Rover:\n On-CPU Profiling: Where threads are spending time running on-CPU. Off-CPU Profiling: Where time is spent waiting while blocked on I/O, locks, timers, paging/swapping, etc.  Profiling Envoy with eBPF Envoy is a popular proxy, used as the data plane by the Istio service mesh. In a Kubernetes cluster, Istio injects Envoy into each service’s pod as a sidecar where it transparently intercepts and processes incoming and outgoing traffic. As the data plane, any performance issues in Envoy can affect all service traffic in the mesh. In this scenario, it’s more powerful to use eBPF profiling to analyze issues in production caused by service mesh configuration.\nDemo Environment If you want to see this scenario in action, we\u0026rsquo;ve built a demo environment where we deploy an Nginx service for stress testing. Traffic is intercepted by Envoy and forwarded to Nginx. The commands to install the whole environment can be accessed through GitHub.\nOn-CPU Profiling On-CPU profiling is suitable for analyzing thread stacks when service CPU usage is high. If the stack is dumped more times, it means that the thread stack occupies more CPU resources.\nWhen installing Istio using the demo configuration profile, we found there are two places where we can optimize performance:\n Zipkin Tracing: Different Zipkin sampling percentages have a direct impact on QPS. Access Log Format: Reducing the fields of the Envoy access log can improve QPS.  Zipkin Tracing Zipkin with 100% sampling In the default demo configuration profile, Envoy is using 100% sampling as default tracing policy. How does that impact the performance?\nAs shown in the figure below, using the on-CPU profiling, we found that it takes about 16% of the CPU overhead. At a fixed consumption of 2 CPUs, its QPS can reach 5.7K.\nDisable Zipkin tracing At this point, we found that if Zipkin is not necessary, the sampling percentage can be reduced or we can even disable tracing. Based on the Istio documentation, we can disable tracing when installing the service mesh using the following command:\nistioctl install -y --set profile=demo \\  --set \u0026#39;meshConfig.enableTracing=false\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.tracing.sampling=0.0\u0026#39; After disabling tracing, we performed on-CPU profiling again. According to the figure below, we found that Zipkin has disappeared from the flame graph. With the same 2 CPU consumption as in the previous example, the QPS reached 9K, which is an almost 60% increase. Tracing with Throughput With the same CPU usage, we\u0026rsquo;ve discovered that Envoy performance greatly improves when the tracing feature is disabled. Of course, this requires us to make trade-offs between the number of samples Zipkin collects and the desired performance of Envoy (QPS).\nThe table below illustrates how different Zipkin sampling percentages under the same CPU usage affect QPS.\n   Zipkin sampling % QPS CPUs Note     100% (default) 5.7K 2 16% used by Zipkin   1% 8.1K 2 0.3% used by Zipkin   disabled 9.2K 2 0% used by Zipkin    Access Log Format Default Log Format In the default demo configuration profile, the default Access Log format contains a lot of data. The flame graph below shows various functions involved in parsing the data such as request headers, response headers, and streaming the body.\nSimplifying Access Log Format Typically, we don’t need all the information in the access log, so we can often simplify it to get what we need. The following command simplifies the access log format to only display basic information:\nistioctl install -y --set profile=demo \\  --set meshConfig.accessLogFormat=\u0026#34;[%START_TIME%] \\\u0026#34;%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\\\u0026#34; %RESPONSE_CODE%\\n\u0026#34; After simplifying the access log format, we found that the QPS increased from 5.7K to 5.9K. When executing the on-CPU profiling again, the CPU usage of log formatting dropped from 2.4% to 0.7%.\nSimplifying the log format helped us to improve the performance.\nOff-CPU Profiling Off-CPU profiling is suitable for performance issues that are not caused by high CPU usage. For example, when there are too many threads in one service, using off-CPU profiling could reveal which threads spend more time context switching.\nWe provide data aggregation in two dimensions:\n Switch count: The number of times a thread switches context. When the thread returns to the CPU, it completes one context switch. A thread stack with a higher switch count spends more time context switching. Switch duration: The time it takes a thread to switch the context. A thread stack with a higher switch duration spends more time off-CPU.  Write Access Log Enable Write Using the same environment and settings as before in the on-CPU test, we performed off-CPU profiling. As shown below, we found that access log writes accounted for about 28% of the total context switches. The \u0026ldquo;__write\u0026rdquo; shown below also indicates that this method is the Linux kernel method.\nDisable Write SkyWalking implements Envoy\u0026rsquo;s Access Log Service (ALS) feature which allows us to send access logs to the SkyWalking Observability Analysis Platform (OAP) using the gRPC protocol. Even by disabling the access logging, we can still use ALS to capture/aggregate the logs. We\u0026rsquo;ve disabled writing to the access log using the following command:\nistioctl install -y --set profile=demo --set meshConfig.accessLogFile=\u0026#34;\u0026#34; After disabling the Access Log feature, we performed the off-CPU profiling. File writing entries have disappeared as shown in the figure below. Envoy throughput also increased from 5.7K to 5.9K.\nConclusion In this article, we\u0026rsquo;ve examined the insights Apache Skywalking\u0026rsquo;s Trace Profiling can give us and how much more can be achieved with eBPF profiling. All of these features are implemented in skywalking-rover. In addition to on- and off-CPU profiling, you will also find the following features:\n Continuous profiling, helps you automatically profile without manual intervention. For example, when Rover detects that the CPU exceeds a configurable threshold, it automatically executes the on-CPU profiling task. More profiling types to enrich usage scenarios, such as network, and memory profiling.  ","title":"Pinpoint Service Mesh Critical Performance Impact by using eBPF","url":"/docs/main/latest/en/concepts-and-designs/ebpf-cpu-profiling/"},{"content":"Pinpoint Service Mesh Critical Performance Impact by using eBPF Background Apache SkyWalking observes metrics, logs, traces, and events for services deployed into the service mesh. When troubleshooting, SkyWalking error analysis can be an invaluable tool helping to pinpoint where an error occurred. However, performance problems are more difficult: It’s often impossible to locate the root cause of performance problems with pre-existing observation data. To move beyond the status quo, dynamic debugging and troubleshooting are essential service performance tools. In this article, we\u0026rsquo;ll discuss how to use eBPF technology to improve the profiling feature in SkyWalking and analyze the performance impact in the service mesh.\nTrace Profiling in SkyWalking Since SkyWalking 7.0.0, Trace Profiling has helped developers find performance problems by periodically sampling the thread stack to let developers know which lines of code take more time. However, Trace Profiling is not suitable for the following scenarios:\n Thread Model: Trace Profiling is most useful for profiling code that executes in a single thread. It is less useful for middleware that relies heavily on async execution models. For example Goroutines in Go or Kotlin Coroutines. Language: Currently, Trace Profiling is only supported in Java and Python, since it’s not easy to obtain the thread stack in the runtimes of some languages such as Go and Node.js. Agent Binding: Trace Profiling requires Agent installation, which can be tricky depending on the language (e.g., PHP has to rely on its C kernel; Rust and C/C++ require manual instrumentation to make install). Trace Correlation: Since Trace Profiling is only associated with a single request it can be hard to determine which request is causing the problem. Short Lifecycle Services: Trace Profiling doesn\u0026rsquo;t support short-lived services for (at least) two reasons:  It\u0026rsquo;s hard to differentiate system performance from class code manipulation in the booting stage. Trace profiling is linked to an endpoint to identify performance impact, but there is no endpoint to match these short-lived services.    Fortunately, there are techniques that can go further than Trace Profiling in these situations.\nIntroduce eBPF We have found that eBPF — a technology that can run sandboxed programs in an operating system kernel and thus safely and efficiently extend the capabilities of the kernel without requiring kernel modifications or loading kernel modules — can help us fill gaps left by Trace Profiling. eBPF is a trending technology because it breaks the traditional barrier between user and kernel space. Programs can now inject bytecode that runs in the kernel, instead of having to recompile the kernel to customize it. This is naturally a good fit for observability.\nIn the figure below, we can see that when the system executes the execve syscalls, the eBPF program is triggered, and the current process runtime information is obtained by using function calls.\nUsing eBPF technology, we can expand the scope of Skywalking\u0026rsquo;s profiling capabilities:\n Global Performance Analysis: Before eBPF, data collection was limited to what agents can observe. Since eBPF programs run in the kernel, they can observe all threads. This is especially useful when you are not sure whether a performance problem is caused by a particular request. Data Content: eBPF can dump both user and kernel space thread stacks, so if a performance issue happens in kernel space, it’s easier to find. Agent Binding: All modern Linux kernels support eBPF, so there is no need to install anything. This means it is an orchestration-free vs an agent model. This reduces friction caused by built-in software which may not have the correct agents installed, such as Envoy in a Service Mesh. Sampling Type: Unlike Trace Profiling, eBPF is event-driven and, therefore, not constrained by interval polling. For example, eBPF can trigger events and collect more data depending on a transfer size threshold. This can allow the system to triage and prioritize data collection under extreme load.  eBPF Limitations While eBPF offers significant advantages for hunting performance bottlenecks, no technology is perfect. eBPF has a number of limitations described below. Fortunately, since SkyWalking does not require eBPF, the impact is limited.\n Linux Version Requirement: eBPF programs require a Linux kernel version above 4.4, with later kernel versions offering more data to be collected. The BCC has documented the features supported by different Linux kernel versions, with the differences between versions usually being what data can be collected with eBPF. Privileges Required: All processes that intend to load eBPF programs into the Linux kernel must be running in privileged mode. As such, bugs or other issues in such code may have a big impact. Weak Support for Dynamic Language: eBPF has weak support for JIT-based dynamic languages, such as Java. It also depends on what data you want to collect. For Profiling, eBPF does not support parsing the symbols of the program, which is why most eBPF-based profiling technologies only support static languages like C, C++, Go, and Rust. However, symbol mapping can sometimes be solved through tools provided by the language. For example, in Java, perf-map-agent can be used to generate the symbol mapping. However, dynamic languages don\u0026rsquo;t support the attach (uprobe) functionality that would allow us to trace execution events through symbols.  Introducing SkyWalking Rover SkyWalking Rover introduces the eBPF profiling feature into the SkyWalking ecosystem. The figure below shows the overall architecture of SkyWalking Rover. SkyWalking Rover is currently supported in Kubernetes environments and must be deployed inside a Kubernetes cluster. After establishing a connection with the SkyWalking backend server, it saves information about the processes on the current machine to SkyWalking. When the user creates an eBPF profiling task via the user interface, SkyWalking Rover receives the task and executes it in the relevant C, C++, Golang, and Rust language-based programs.\nOther than an eBPF-capable kernel, there are no additional prerequisites for deploying SkyWalking Rover.\nCPU Profiling with Rover CPU profiling is the most intuitive way to show service performance. Inspired by Brendan Gregg‘s blog post, we\u0026rsquo;ve divided CPU profiling into two types that we have implemented in Rover:\n On-CPU Profiling: Where threads are spending time running on-CPU. Off-CPU Profiling: Where time is spent waiting while blocked on I/O, locks, timers, paging/swapping, etc.  Profiling Envoy with eBPF Envoy is a popular proxy, used as the data plane by the Istio service mesh. In a Kubernetes cluster, Istio injects Envoy into each service’s pod as a sidecar where it transparently intercepts and processes incoming and outgoing traffic. As the data plane, any performance issues in Envoy can affect all service traffic in the mesh. In this scenario, it’s more powerful to use eBPF profiling to analyze issues in production caused by service mesh configuration.\nDemo Environment If you want to see this scenario in action, we\u0026rsquo;ve built a demo environment where we deploy an Nginx service for stress testing. Traffic is intercepted by Envoy and forwarded to Nginx. The commands to install the whole environment can be accessed through GitHub.\nOn-CPU Profiling On-CPU profiling is suitable for analyzing thread stacks when service CPU usage is high. If the stack is dumped more times, it means that the thread stack occupies more CPU resources.\nWhen installing Istio using the demo configuration profile, we found there are two places where we can optimize performance:\n Zipkin Tracing: Different Zipkin sampling percentages have a direct impact on QPS. Access Log Format: Reducing the fields of the Envoy access log can improve QPS.  Zipkin Tracing Zipkin with 100% sampling In the default demo configuration profile, Envoy is using 100% sampling as default tracing policy. How does that impact the performance?\nAs shown in the figure below, using the on-CPU profiling, we found that it takes about 16% of the CPU overhead. At a fixed consumption of 2 CPUs, its QPS can reach 5.7K.\nDisable Zipkin tracing At this point, we found that if Zipkin is not necessary, the sampling percentage can be reduced or we can even disable tracing. Based on the Istio documentation, we can disable tracing when installing the service mesh using the following command:\nistioctl install -y --set profile=demo \\  --set \u0026#39;meshConfig.enableTracing=false\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.tracing.sampling=0.0\u0026#39; After disabling tracing, we performed on-CPU profiling again. According to the figure below, we found that Zipkin has disappeared from the flame graph. With the same 2 CPU consumption as in the previous example, the QPS reached 9K, which is an almost 60% increase. Tracing with Throughput With the same CPU usage, we\u0026rsquo;ve discovered that Envoy performance greatly improves when the tracing feature is disabled. Of course, this requires us to make trade-offs between the number of samples Zipkin collects and the desired performance of Envoy (QPS).\nThe table below illustrates how different Zipkin sampling percentages under the same CPU usage affect QPS.\n   Zipkin sampling % QPS CPUs Note     100% (default) 5.7K 2 16% used by Zipkin   1% 8.1K 2 0.3% used by Zipkin   disabled 9.2K 2 0% used by Zipkin    Access Log Format Default Log Format In the default demo configuration profile, the default Access Log format contains a lot of data. The flame graph below shows various functions involved in parsing the data such as request headers, response headers, and streaming the body.\nSimplifying Access Log Format Typically, we don’t need all the information in the access log, so we can often simplify it to get what we need. The following command simplifies the access log format to only display basic information:\nistioctl install -y --set profile=demo \\  --set meshConfig.accessLogFormat=\u0026#34;[%START_TIME%] \\\u0026#34;%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\\\u0026#34; %RESPONSE_CODE%\\n\u0026#34; After simplifying the access log format, we found that the QPS increased from 5.7K to 5.9K. When executing the on-CPU profiling again, the CPU usage of log formatting dropped from 2.4% to 0.7%.\nSimplifying the log format helped us to improve the performance.\nOff-CPU Profiling Off-CPU profiling is suitable for performance issues that are not caused by high CPU usage. For example, when there are too many threads in one service, using off-CPU profiling could reveal which threads spend more time context switching.\nWe provide data aggregation in two dimensions:\n Switch count: The number of times a thread switches context. When the thread returns to the CPU, it completes one context switch. A thread stack with a higher switch count spends more time context switching. Switch duration: The time it takes a thread to switch the context. A thread stack with a higher switch duration spends more time off-CPU.  Write Access Log Enable Write Using the same environment and settings as before in the on-CPU test, we performed off-CPU profiling. As shown below, we found that access log writes accounted for about 28% of the total context switches. The \u0026ldquo;__write\u0026rdquo; shown below also indicates that this method is the Linux kernel method.\nDisable Write SkyWalking implements Envoy\u0026rsquo;s Access Log Service (ALS) feature which allows us to send access logs to the SkyWalking Observability Analysis Platform (OAP) using the gRPC protocol. Even by disabling the access logging, we can still use ALS to capture/aggregate the logs. We\u0026rsquo;ve disabled writing to the access log using the following command:\nistioctl install -y --set profile=demo --set meshConfig.accessLogFile=\u0026#34;\u0026#34; After disabling the Access Log feature, we performed the off-CPU profiling. File writing entries have disappeared as shown in the figure below. Envoy throughput also increased from 5.7K to 5.9K.\nConclusion In this article, we\u0026rsquo;ve examined the insights Apache Skywalking\u0026rsquo;s Trace Profiling can give us and how much more can be achieved with eBPF profiling. All of these features are implemented in skywalking-rover. In addition to on- and off-CPU profiling, you will also find the following features:\n Continuous profiling, helps you automatically profile without manual intervention. For example, when Rover detects that the CPU exceeds a configurable threshold, it automatically executes the on-CPU profiling task. More profiling types to enrich usage scenarios, such as network, and memory profiling.  ","title":"Pinpoint Service Mesh Critical Performance Impact by using eBPF","url":"/docs/main/next/en/concepts-and-designs/ebpf-cpu-profiling/"},{"content":"Pinpoint Service Mesh Critical Performance Impact by using eBPF Background Apache SkyWalking observes metrics, logs, traces, and events for services deployed into the service mesh. When troubleshooting, SkyWalking error analysis can be an invaluable tool helping to pinpoint where an error occurred. However, performance problems are more difficult: It’s often impossible to locate the root cause of performance problems with pre-existing observation data. To move beyond the status quo, dynamic debugging and troubleshooting are essential service performance tools. In this article, we\u0026rsquo;ll discuss how to use eBPF technology to improve the profiling feature in SkyWalking and analyze the performance impact in the service mesh.\nTrace Profiling in SkyWalking Since SkyWalking 7.0.0, Trace Profiling has helped developers find performance problems by periodically sampling the thread stack to let developers know which lines of code take more time. However, Trace Profiling is not suitable for the following scenarios:\n Thread Model: Trace Profiling is most useful for profiling code that executes in a single thread. It is less useful for middleware that relies heavily on async execution models. For example Goroutines in Go or Kotlin Coroutines. Language: Currently, Trace Profiling is only supported in Java and Python, since it’s not easy to obtain the thread stack in the runtimes of some languages such as Go and Node.js. Agent Binding: Trace Profiling requires Agent installation, which can be tricky depending on the language (e.g., PHP has to rely on its C kernel; Rust and C/C++ require manual instrumentation to make install). Trace Correlation: Since Trace Profiling is only associated with a single request it can be hard to determine which request is causing the problem. Short Lifecycle Services: Trace Profiling doesn\u0026rsquo;t support short-lived services for (at least) two reasons:  It\u0026rsquo;s hard to differentiate system performance from class code manipulation in the booting stage. Trace profiling is linked to an endpoint to identify performance impact, but there is no endpoint to match these short-lived services.    Fortunately, there are techniques that can go further than Trace Profiling in these situations.\nIntroduce eBPF We have found that eBPF — a technology that can run sandboxed programs in an operating system kernel and thus safely and efficiently extend the capabilities of the kernel without requiring kernel modifications or loading kernel modules — can help us fill gaps left by Trace Profiling. eBPF is a trending technology because it breaks the traditional barrier between user and kernel space. Programs can now inject bytecode that runs in the kernel, instead of having to recompile the kernel to customize it. This is naturally a good fit for observability.\nIn the figure below, we can see that when the system executes the execve syscalls, the eBPF program is triggered, and the current process runtime information is obtained by using function calls.\nUsing eBPF technology, we can expand the scope of Skywalking\u0026rsquo;s profiling capabilities:\n Global Performance Analysis: Before eBPF, data collection was limited to what agents can observe. Since eBPF programs run in the kernel, they can observe all threads. This is especially useful when you are not sure whether a performance problem is caused by a particular request. Data Content: eBPF can dump both user and kernel space thread stacks, so if a performance issue happens in kernel space, it’s easier to find. Agent Binding: All modern Linux kernels support eBPF, so there is no need to install anything. This means it is an orchestration-free vs an agent model. This reduces friction caused by built-in software which may not have the correct agents installed, such as Envoy in a Service Mesh. Sampling Type: Unlike Trace Profiling, eBPF is event-driven and, therefore, not constrained by interval polling. For example, eBPF can trigger events and collect more data depending on a transfer size threshold. This can allow the system to triage and prioritize data collection under extreme load.  eBPF Limitations While eBPF offers significant advantages for hunting performance bottlenecks, no technology is perfect. eBPF has a number of limitations described below. Fortunately, since SkyWalking does not require eBPF, the impact is limited.\n Linux Version Requirement: eBPF programs require a Linux kernel version above 4.4, with later kernel versions offering more data to be collected. The BCC has documented the features supported by different Linux kernel versions, with the differences between versions usually being what data can be collected with eBPF. Privileges Required: All processes that intend to load eBPF programs into the Linux kernel must be running in privileged mode. As such, bugs or other issues in such code may have a big impact. Weak Support for Dynamic Language: eBPF has weak support for JIT-based dynamic languages, such as Java. It also depends on what data you want to collect. For Profiling, eBPF does not support parsing the symbols of the program, which is why most eBPF-based profiling technologies only support static languages like C, C++, Go, and Rust. However, symbol mapping can sometimes be solved through tools provided by the language. For example, in Java, perf-map-agent can be used to generate the symbol mapping. However, dynamic languages don\u0026rsquo;t support the attach (uprobe) functionality that would allow us to trace execution events through symbols.  Introducing SkyWalking Rover SkyWalking Rover introduces the eBPF profiling feature into the SkyWalking ecosystem. The figure below shows the overall architecture of SkyWalking Rover. SkyWalking Rover is currently supported in Kubernetes environments and must be deployed inside a Kubernetes cluster. After establishing a connection with the SkyWalking backend server, it saves information about the processes on the current machine to SkyWalking. When the user creates an eBPF profiling task via the user interface, SkyWalking Rover receives the task and executes it in the relevant C, C++, Golang, and Rust language-based programs.\nOther than an eBPF-capable kernel, there are no additional prerequisites for deploying SkyWalking Rover.\nCPU Profiling with Rover CPU profiling is the most intuitive way to show service performance. Inspired by Brendan Gregg‘s blog post, we\u0026rsquo;ve divided CPU profiling into two types that we have implemented in Rover:\n On-CPU Profiling: Where threads are spending time running on-CPU. Off-CPU Profiling: Where time is spent waiting while blocked on I/O, locks, timers, paging/swapping, etc.  Profiling Envoy with eBPF Envoy is a popular proxy, used as the data plane by the Istio service mesh. In a Kubernetes cluster, Istio injects Envoy into each service’s pod as a sidecar where it transparently intercepts and processes incoming and outgoing traffic. As the data plane, any performance issues in Envoy can affect all service traffic in the mesh. In this scenario, it’s more powerful to use eBPF profiling to analyze issues in production caused by service mesh configuration.\nDemo Environment If you want to see this scenario in action, we\u0026rsquo;ve built a demo environment where we deploy an Nginx service for stress testing. Traffic is intercepted by Envoy and forwarded to Nginx. The commands to install the whole environment can be accessed through GitHub.\nOn-CPU Profiling On-CPU profiling is suitable for analyzing thread stacks when service CPU usage is high. If the stack is dumped more times, it means that the thread stack occupies more CPU resources.\nWhen installing Istio using the demo configuration profile, we found there are two places where we can optimize performance:\n Zipkin Tracing: Different Zipkin sampling percentages have a direct impact on QPS. Access Log Format: Reducing the fields of the Envoy access log can improve QPS.  Zipkin Tracing Zipkin with 100% sampling In the default demo configuration profile, Envoy is using 100% sampling as default tracing policy. How does that impact the performance?\nAs shown in the figure below, using the on-CPU profiling, we found that it takes about 16% of the CPU overhead. At a fixed consumption of 2 CPUs, its QPS can reach 5.7K.\nDisable Zipkin tracing At this point, we found that if Zipkin is not necessary, the sampling percentage can be reduced or we can even disable tracing. Based on the Istio documentation, we can disable tracing when installing the service mesh using the following command:\nistioctl install -y --set profile=demo \\  --set \u0026#39;meshConfig.enableTracing=false\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.tracing.sampling=0.0\u0026#39; After disabling tracing, we performed on-CPU profiling again. According to the figure below, we found that Zipkin has disappeared from the flame graph. With the same 2 CPU consumption as in the previous example, the QPS reached 9K, which is an almost 60% increase. Tracing with Throughput With the same CPU usage, we\u0026rsquo;ve discovered that Envoy performance greatly improves when the tracing feature is disabled. Of course, this requires us to make trade-offs between the number of samples Zipkin collects and the desired performance of Envoy (QPS).\nThe table below illustrates how different Zipkin sampling percentages under the same CPU usage affect QPS.\n   Zipkin sampling % QPS CPUs Note     100% (default) 5.7K 2 16% used by Zipkin   1% 8.1K 2 0.3% used by Zipkin   disabled 9.2K 2 0% used by Zipkin    Access Log Format Default Log Format In the default demo configuration profile, the default Access Log format contains a lot of data. The flame graph below shows various functions involved in parsing the data such as request headers, response headers, and streaming the body.\nSimplifying Access Log Format Typically, we don’t need all the information in the access log, so we can often simplify it to get what we need. The following command simplifies the access log format to only display basic information:\nistioctl install -y --set profile=demo \\  --set meshConfig.accessLogFormat=\u0026#34;[%START_TIME%] \\\u0026#34;%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\\\u0026#34; %RESPONSE_CODE%\\n\u0026#34; After simplifying the access log format, we found that the QPS increased from 5.7K to 5.9K. When executing the on-CPU profiling again, the CPU usage of log formatting dropped from 2.4% to 0.7%.\nSimplifying the log format helped us to improve the performance.\nOff-CPU Profiling Off-CPU profiling is suitable for performance issues that are not caused by high CPU usage. For example, when there are too many threads in one service, using off-CPU profiling could reveal which threads spend more time context switching.\nWe provide data aggregation in two dimensions:\n Switch count: The number of times a thread switches context. When the thread returns to the CPU, it completes one context switch. A thread stack with a higher switch count spends more time context switching. Switch duration: The time it takes a thread to switch the context. A thread stack with a higher switch duration spends more time off-CPU.  Write Access Log Enable Write Using the same environment and settings as before in the on-CPU test, we performed off-CPU profiling. As shown below, we found that access log writes accounted for about 28% of the total context switches. The \u0026ldquo;__write\u0026rdquo; shown below also indicates that this method is the Linux kernel method.\nDisable Write SkyWalking implements Envoy\u0026rsquo;s Access Log Service (ALS) feature which allows us to send access logs to the SkyWalking Observability Analysis Platform (OAP) using the gRPC protocol. Even by disabling the access logging, we can still use ALS to capture/aggregate the logs. We\u0026rsquo;ve disabled writing to the access log using the following command:\nistioctl install -y --set profile=demo --set meshConfig.accessLogFile=\u0026#34;\u0026#34; After disabling the Access Log feature, we performed the off-CPU profiling. File writing entries have disappeared as shown in the figure below. Envoy throughput also increased from 5.7K to 5.9K.\nConclusion In this article, we\u0026rsquo;ve examined the insights Apache Skywalking\u0026rsquo;s Trace Profiling can give us and how much more can be achieved with eBPF profiling. All of these features are implemented in skywalking-rover. In addition to on- and off-CPU profiling, you will also find the following features:\n Continuous profiling, helps you automatically profile without manual intervention. For example, when Rover detects that the CPU exceeds a configurable threshold, it automatically executes the on-CPU profiling task. More profiling types to enrich usage scenarios, such as network, and memory profiling.  ","title":"Pinpoint Service Mesh Critical Performance Impact by using eBPF","url":"/docs/main/v9.2.0/en/concepts-and-designs/ebpf-cpu-profiling/"},{"content":"Pinpoint Service Mesh Critical Performance Impact by using eBPF Background Apache SkyWalking observes metrics, logs, traces, and events for services deployed into the service mesh. When troubleshooting, SkyWalking error analysis can be an invaluable tool helping to pinpoint where an error occurred. However, performance problems are more difficult: It’s often impossible to locate the root cause of performance problems with pre-existing observation data. To move beyond the status quo, dynamic debugging and troubleshooting are essential service performance tools. In this article, we\u0026rsquo;ll discuss how to use eBPF technology to improve the profiling feature in SkyWalking and analyze the performance impact in the service mesh.\nTrace Profiling in SkyWalking Since SkyWalking 7.0.0, Trace Profiling has helped developers find performance problems by periodically sampling the thread stack to let developers know which lines of code take more time. However, Trace Profiling is not suitable for the following scenarios:\n Thread Model: Trace Profiling is most useful for profiling code that executes in a single thread. It is less useful for middleware that relies heavily on async execution models. For example Goroutines in Go or Kotlin Coroutines. Language: Currently, Trace Profiling is only supported in Java and Python, since it’s not easy to obtain the thread stack in the runtimes of some languages such as Go and Node.js. Agent Binding: Trace Profiling requires Agent installation, which can be tricky depending on the language (e.g., PHP has to rely on its C kernel; Rust and C/C++ require manual instrumentation to make install). Trace Correlation: Since Trace Profiling is only associated with a single request it can be hard to determine which request is causing the problem. Short Lifecycle Services: Trace Profiling doesn\u0026rsquo;t support short-lived services for (at least) two reasons:  It\u0026rsquo;s hard to differentiate system performance from class code manipulation in the booting stage. Trace profiling is linked to an endpoint to identify performance impact, but there is no endpoint to match these short-lived services.    Fortunately, there are techniques that can go further than Trace Profiling in these situations.\nIntroduce eBPF We have found that eBPF — a technology that can run sandboxed programs in an operating system kernel and thus safely and efficiently extend the capabilities of the kernel without requiring kernel modifications or loading kernel modules — can help us fill gaps left by Trace Profiling. eBPF is a trending technology because it breaks the traditional barrier between user and kernel space. Programs can now inject bytecode that runs in the kernel, instead of having to recompile the kernel to customize it. This is naturally a good fit for observability.\nIn the figure below, we can see that when the system executes the execve syscalls, the eBPF program is triggered, and the current process runtime information is obtained by using function calls.\nUsing eBPF technology, we can expand the scope of Skywalking\u0026rsquo;s profiling capabilities:\n Global Performance Analysis: Before eBPF, data collection was limited to what agents can observe. Since eBPF programs run in the kernel, they can observe all threads. This is especially useful when you are not sure whether a performance problem is caused by a particular request. Data Content: eBPF can dump both user and kernel space thread stacks, so if a performance issue happens in kernel space, it’s easier to find. Agent Binding: All modern Linux kernels support eBPF, so there is no need to install anything. This means it is an orchestration-free vs an agent model. This reduces friction caused by built-in software which may not have the correct agents installed, such as Envoy in a Service Mesh. Sampling Type: Unlike Trace Profiling, eBPF is event-driven and, therefore, not constrained by interval polling. For example, eBPF can trigger events and collect more data depending on a transfer size threshold. This can allow the system to triage and prioritize data collection under extreme load.  eBPF Limitations While eBPF offers significant advantages for hunting performance bottlenecks, no technology is perfect. eBPF has a number of limitations described below. Fortunately, since SkyWalking does not require eBPF, the impact is limited.\n Linux Version Requirement: eBPF programs require a Linux kernel version above 4.4, with later kernel versions offering more data to be collected. The BCC has documented the features supported by different Linux kernel versions, with the differences between versions usually being what data can be collected with eBPF. Privileges Required: All processes that intend to load eBPF programs into the Linux kernel must be running in privileged mode. As such, bugs or other issues in such code may have a big impact. Weak Support for Dynamic Language: eBPF has weak support for JIT-based dynamic languages, such as Java. It also depends on what data you want to collect. For Profiling, eBPF does not support parsing the symbols of the program, which is why most eBPF-based profiling technologies only support static languages like C, C++, Go, and Rust. However, symbol mapping can sometimes be solved through tools provided by the language. For example, in Java, perf-map-agent can be used to generate the symbol mapping. However, dynamic languages don\u0026rsquo;t support the attach (uprobe) functionality that would allow us to trace execution events through symbols.  Introducing SkyWalking Rover SkyWalking Rover introduces the eBPF profiling feature into the SkyWalking ecosystem. The figure below shows the overall architecture of SkyWalking Rover. SkyWalking Rover is currently supported in Kubernetes environments and must be deployed inside a Kubernetes cluster. After establishing a connection with the SkyWalking backend server, it saves information about the processes on the current machine to SkyWalking. When the user creates an eBPF profiling task via the user interface, SkyWalking Rover receives the task and executes it in the relevant C, C++, Golang, and Rust language-based programs.\nOther than an eBPF-capable kernel, there are no additional prerequisites for deploying SkyWalking Rover.\nCPU Profiling with Rover CPU profiling is the most intuitive way to show service performance. Inspired by Brendan Gregg‘s blog post, we\u0026rsquo;ve divided CPU profiling into two types that we have implemented in Rover:\n On-CPU Profiling: Where threads are spending time running on-CPU. Off-CPU Profiling: Where time is spent waiting while blocked on I/O, locks, timers, paging/swapping, etc.  Profiling Envoy with eBPF Envoy is a popular proxy, used as the data plane by the Istio service mesh. In a Kubernetes cluster, Istio injects Envoy into each service’s pod as a sidecar where it transparently intercepts and processes incoming and outgoing traffic. As the data plane, any performance issues in Envoy can affect all service traffic in the mesh. In this scenario, it’s more powerful to use eBPF profiling to analyze issues in production caused by service mesh configuration.\nDemo Environment If you want to see this scenario in action, we\u0026rsquo;ve built a demo environment where we deploy an Nginx service for stress testing. Traffic is intercepted by Envoy and forwarded to Nginx. The commands to install the whole environment can be accessed through GitHub.\nOn-CPU Profiling On-CPU profiling is suitable for analyzing thread stacks when service CPU usage is high. If the stack is dumped more times, it means that the thread stack occupies more CPU resources.\nWhen installing Istio using the demo configuration profile, we found there are two places where we can optimize performance:\n Zipkin Tracing: Different Zipkin sampling percentages have a direct impact on QPS. Access Log Format: Reducing the fields of the Envoy access log can improve QPS.  Zipkin Tracing Zipkin with 100% sampling In the default demo configuration profile, Envoy is using 100% sampling as default tracing policy. How does that impact the performance?\nAs shown in the figure below, using the on-CPU profiling, we found that it takes about 16% of the CPU overhead. At a fixed consumption of 2 CPUs, its QPS can reach 5.7K.\nDisable Zipkin tracing At this point, we found that if Zipkin is not necessary, the sampling percentage can be reduced or we can even disable tracing. Based on the Istio documentation, we can disable tracing when installing the service mesh using the following command:\nistioctl install -y --set profile=demo \\  --set \u0026#39;meshConfig.enableTracing=false\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.tracing.sampling=0.0\u0026#39; After disabling tracing, we performed on-CPU profiling again. According to the figure below, we found that Zipkin has disappeared from the flame graph. With the same 2 CPU consumption as in the previous example, the QPS reached 9K, which is an almost 60% increase. Tracing with Throughput With the same CPU usage, we\u0026rsquo;ve discovered that Envoy performance greatly improves when the tracing feature is disabled. Of course, this requires us to make trade-offs between the number of samples Zipkin collects and the desired performance of Envoy (QPS).\nThe table below illustrates how different Zipkin sampling percentages under the same CPU usage affect QPS.\n   Zipkin sampling % QPS CPUs Note     100% (default) 5.7K 2 16% used by Zipkin   1% 8.1K 2 0.3% used by Zipkin   disabled 9.2K 2 0% used by Zipkin    Access Log Format Default Log Format In the default demo configuration profile, the default Access Log format contains a lot of data. The flame graph below shows various functions involved in parsing the data such as request headers, response headers, and streaming the body.\nSimplifying Access Log Format Typically, we don’t need all the information in the access log, so we can often simplify it to get what we need. The following command simplifies the access log format to only display basic information:\nistioctl install -y --set profile=demo \\  --set meshConfig.accessLogFormat=\u0026#34;[%START_TIME%] \\\u0026#34;%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\\\u0026#34; %RESPONSE_CODE%\\n\u0026#34; After simplifying the access log format, we found that the QPS increased from 5.7K to 5.9K. When executing the on-CPU profiling again, the CPU usage of log formatting dropped from 2.4% to 0.7%.\nSimplifying the log format helped us to improve the performance.\nOff-CPU Profiling Off-CPU profiling is suitable for performance issues that are not caused by high CPU usage. For example, when there are too many threads in one service, using off-CPU profiling could reveal which threads spend more time context switching.\nWe provide data aggregation in two dimensions:\n Switch count: The number of times a thread switches context. When the thread returns to the CPU, it completes one context switch. A thread stack with a higher switch count spends more time context switching. Switch duration: The time it takes a thread to switch the context. A thread stack with a higher switch duration spends more time off-CPU.  Write Access Log Enable Write Using the same environment and settings as before in the on-CPU test, we performed off-CPU profiling. As shown below, we found that access log writes accounted for about 28% of the total context switches. The \u0026ldquo;__write\u0026rdquo; shown below also indicates that this method is the Linux kernel method.\nDisable Write SkyWalking implements Envoy\u0026rsquo;s Access Log Service (ALS) feature which allows us to send access logs to the SkyWalking Observability Analysis Platform (OAP) using the gRPC protocol. Even by disabling the access logging, we can still use ALS to capture/aggregate the logs. We\u0026rsquo;ve disabled writing to the access log using the following command:\nistioctl install -y --set profile=demo --set meshConfig.accessLogFile=\u0026#34;\u0026#34; After disabling the Access Log feature, we performed the off-CPU profiling. File writing entries have disappeared as shown in the figure below. Envoy throughput also increased from 5.7K to 5.9K.\nConclusion In this article, we\u0026rsquo;ve examined the insights Apache Skywalking\u0026rsquo;s Trace Profiling can give us and how much more can be achieved with eBPF profiling. All of these features are implemented in skywalking-rover. In addition to on- and off-CPU profiling, you will also find the following features:\n Continuous profiling, helps you automatically profile without manual intervention. For example, when Rover detects that the CPU exceeds a configurable threshold, it automatically executes the on-CPU profiling task. More profiling types to enrich usage scenarios, such as network, and memory profiling.  ","title":"Pinpoint Service Mesh Critical Performance Impact by using eBPF","url":"/docs/main/v9.3.0/en/concepts-and-designs/ebpf-cpu-profiling/"},{"content":"Pinpoint Service Mesh Critical Performance Impact by using eBPF Background Apache SkyWalking observes metrics, logs, traces, and events for services deployed into the service mesh. When troubleshooting, SkyWalking error analysis can be an invaluable tool helping to pinpoint where an error occurred. However, performance problems are more difficult: It’s often impossible to locate the root cause of performance problems with pre-existing observation data. To move beyond the status quo, dynamic debugging and troubleshooting are essential service performance tools. In this article, we\u0026rsquo;ll discuss how to use eBPF technology to improve the profiling feature in SkyWalking and analyze the performance impact in the service mesh.\nTrace Profiling in SkyWalking Since SkyWalking 7.0.0, Trace Profiling has helped developers find performance problems by periodically sampling the thread stack to let developers know which lines of code take more time. However, Trace Profiling is not suitable for the following scenarios:\n Thread Model: Trace Profiling is most useful for profiling code that executes in a single thread. It is less useful for middleware that relies heavily on async execution models. For example Goroutines in Go or Kotlin Coroutines. Language: Currently, Trace Profiling is only supported in Java and Python, since it’s not easy to obtain the thread stack in the runtimes of some languages such as Go and Node.js. Agent Binding: Trace Profiling requires Agent installation, which can be tricky depending on the language (e.g., PHP has to rely on its C kernel; Rust and C/C++ require manual instrumentation to make install). Trace Correlation: Since Trace Profiling is only associated with a single request it can be hard to determine which request is causing the problem. Short Lifecycle Services: Trace Profiling doesn\u0026rsquo;t support short-lived services for (at least) two reasons:  It\u0026rsquo;s hard to differentiate system performance from class code manipulation in the booting stage. Trace profiling is linked to an endpoint to identify performance impact, but there is no endpoint to match these short-lived services.    Fortunately, there are techniques that can go further than Trace Profiling in these situations.\nIntroduce eBPF We have found that eBPF — a technology that can run sandboxed programs in an operating system kernel and thus safely and efficiently extend the capabilities of the kernel without requiring kernel modifications or loading kernel modules — can help us fill gaps left by Trace Profiling. eBPF is a trending technology because it breaks the traditional barrier between user and kernel space. Programs can now inject bytecode that runs in the kernel, instead of having to recompile the kernel to customize it. This is naturally a good fit for observability.\nIn the figure below, we can see that when the system executes the execve syscalls, the eBPF program is triggered, and the current process runtime information is obtained by using function calls.\nUsing eBPF technology, we can expand the scope of Skywalking\u0026rsquo;s profiling capabilities:\n Global Performance Analysis: Before eBPF, data collection was limited to what agents can observe. Since eBPF programs run in the kernel, they can observe all threads. This is especially useful when you are not sure whether a performance problem is caused by a particular request. Data Content: eBPF can dump both user and kernel space thread stacks, so if a performance issue happens in kernel space, it’s easier to find. Agent Binding: All modern Linux kernels support eBPF, so there is no need to install anything. This means it is an orchestration-free vs an agent model. This reduces friction caused by built-in software which may not have the correct agents installed, such as Envoy in a Service Mesh. Sampling Type: Unlike Trace Profiling, eBPF is event-driven and, therefore, not constrained by interval polling. For example, eBPF can trigger events and collect more data depending on a transfer size threshold. This can allow the system to triage and prioritize data collection under extreme load.  eBPF Limitations While eBPF offers significant advantages for hunting performance bottlenecks, no technology is perfect. eBPF has a number of limitations described below. Fortunately, since SkyWalking does not require eBPF, the impact is limited.\n Linux Version Requirement: eBPF programs require a Linux kernel version above 4.4, with later kernel versions offering more data to be collected. The BCC has documented the features supported by different Linux kernel versions, with the differences between versions usually being what data can be collected with eBPF. Privileges Required: All processes that intend to load eBPF programs into the Linux kernel must be running in privileged mode. As such, bugs or other issues in such code may have a big impact. Weak Support for Dynamic Language: eBPF has weak support for JIT-based dynamic languages, such as Java. It also depends on what data you want to collect. For Profiling, eBPF does not support parsing the symbols of the program, which is why most eBPF-based profiling technologies only support static languages like C, C++, Go, and Rust. However, symbol mapping can sometimes be solved through tools provided by the language. For example, in Java, perf-map-agent can be used to generate the symbol mapping. However, dynamic languages don\u0026rsquo;t support the attach (uprobe) functionality that would allow us to trace execution events through symbols.  Introducing SkyWalking Rover SkyWalking Rover introduces the eBPF profiling feature into the SkyWalking ecosystem. The figure below shows the overall architecture of SkyWalking Rover. SkyWalking Rover is currently supported in Kubernetes environments and must be deployed inside a Kubernetes cluster. After establishing a connection with the SkyWalking backend server, it saves information about the processes on the current machine to SkyWalking. When the user creates an eBPF profiling task via the user interface, SkyWalking Rover receives the task and executes it in the relevant C, C++, Golang, and Rust language-based programs.\nOther than an eBPF-capable kernel, there are no additional prerequisites for deploying SkyWalking Rover.\nCPU Profiling with Rover CPU profiling is the most intuitive way to show service performance. Inspired by Brendan Gregg‘s blog post, we\u0026rsquo;ve divided CPU profiling into two types that we have implemented in Rover:\n On-CPU Profiling: Where threads are spending time running on-CPU. Off-CPU Profiling: Where time is spent waiting while blocked on I/O, locks, timers, paging/swapping, etc.  Profiling Envoy with eBPF Envoy is a popular proxy, used as the data plane by the Istio service mesh. In a Kubernetes cluster, Istio injects Envoy into each service’s pod as a sidecar where it transparently intercepts and processes incoming and outgoing traffic. As the data plane, any performance issues in Envoy can affect all service traffic in the mesh. In this scenario, it’s more powerful to use eBPF profiling to analyze issues in production caused by service mesh configuration.\nDemo Environment If you want to see this scenario in action, we\u0026rsquo;ve built a demo environment where we deploy an Nginx service for stress testing. Traffic is intercepted by Envoy and forwarded to Nginx. The commands to install the whole environment can be accessed through GitHub.\nOn-CPU Profiling On-CPU profiling is suitable for analyzing thread stacks when service CPU usage is high. If the stack is dumped more times, it means that the thread stack occupies more CPU resources.\nWhen installing Istio using the demo configuration profile, we found there are two places where we can optimize performance:\n Zipkin Tracing: Different Zipkin sampling percentages have a direct impact on QPS. Access Log Format: Reducing the fields of the Envoy access log can improve QPS.  Zipkin Tracing Zipkin with 100% sampling In the default demo configuration profile, Envoy is using 100% sampling as default tracing policy. How does that impact the performance?\nAs shown in the figure below, using the on-CPU profiling, we found that it takes about 16% of the CPU overhead. At a fixed consumption of 2 CPUs, its QPS can reach 5.7K.\nDisable Zipkin tracing At this point, we found that if Zipkin is not necessary, the sampling percentage can be reduced or we can even disable tracing. Based on the Istio documentation, we can disable tracing when installing the service mesh using the following command:\nistioctl install -y --set profile=demo \\  --set \u0026#39;meshConfig.enableTracing=false\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.tracing.sampling=0.0\u0026#39; After disabling tracing, we performed on-CPU profiling again. According to the figure below, we found that Zipkin has disappeared from the flame graph. With the same 2 CPU consumption as in the previous example, the QPS reached 9K, which is an almost 60% increase. Tracing with Throughput With the same CPU usage, we\u0026rsquo;ve discovered that Envoy performance greatly improves when the tracing feature is disabled. Of course, this requires us to make trade-offs between the number of samples Zipkin collects and the desired performance of Envoy (QPS).\nThe table below illustrates how different Zipkin sampling percentages under the same CPU usage affect QPS.\n   Zipkin sampling % QPS CPUs Note     100% (default) 5.7K 2 16% used by Zipkin   1% 8.1K 2 0.3% used by Zipkin   disabled 9.2K 2 0% used by Zipkin    Access Log Format Default Log Format In the default demo configuration profile, the default Access Log format contains a lot of data. The flame graph below shows various functions involved in parsing the data such as request headers, response headers, and streaming the body.\nSimplifying Access Log Format Typically, we don’t need all the information in the access log, so we can often simplify it to get what we need. The following command simplifies the access log format to only display basic information:\nistioctl install -y --set profile=demo \\  --set meshConfig.accessLogFormat=\u0026#34;[%START_TIME%] \\\u0026#34;%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\\\u0026#34; %RESPONSE_CODE%\\n\u0026#34; After simplifying the access log format, we found that the QPS increased from 5.7K to 5.9K. When executing the on-CPU profiling again, the CPU usage of log formatting dropped from 2.4% to 0.7%.\nSimplifying the log format helped us to improve the performance.\nOff-CPU Profiling Off-CPU profiling is suitable for performance issues that are not caused by high CPU usage. For example, when there are too many threads in one service, using off-CPU profiling could reveal which threads spend more time context switching.\nWe provide data aggregation in two dimensions:\n Switch count: The number of times a thread switches context. When the thread returns to the CPU, it completes one context switch. A thread stack with a higher switch count spends more time context switching. Switch duration: The time it takes a thread to switch the context. A thread stack with a higher switch duration spends more time off-CPU.  Write Access Log Enable Write Using the same environment and settings as before in the on-CPU test, we performed off-CPU profiling. As shown below, we found that access log writes accounted for about 28% of the total context switches. The \u0026ldquo;__write\u0026rdquo; shown below also indicates that this method is the Linux kernel method.\nDisable Write SkyWalking implements Envoy\u0026rsquo;s Access Log Service (ALS) feature which allows us to send access logs to the SkyWalking Observability Analysis Platform (OAP) using the gRPC protocol. Even by disabling the access logging, we can still use ALS to capture/aggregate the logs. We\u0026rsquo;ve disabled writing to the access log using the following command:\nistioctl install -y --set profile=demo --set meshConfig.accessLogFile=\u0026#34;\u0026#34; After disabling the Access Log feature, we performed the off-CPU profiling. File writing entries have disappeared as shown in the figure below. Envoy throughput also increased from 5.7K to 5.9K.\nConclusion In this article, we\u0026rsquo;ve examined the insights Apache Skywalking\u0026rsquo;s Trace Profiling can give us and how much more can be achieved with eBPF profiling. All of these features are implemented in skywalking-rover. In addition to on- and off-CPU profiling, you will also find the following features:\n Continuous profiling, helps you automatically profile without manual intervention. For example, when Rover detects that the CPU exceeds a configurable threshold, it automatically executes the on-CPU profiling task. More profiling types to enrich usage scenarios, such as network, and memory profiling.  ","title":"Pinpoint Service Mesh Critical Performance Impact by using eBPF","url":"/docs/main/v9.4.0/en/concepts-and-designs/ebpf-cpu-profiling/"},{"content":"Pinpoint Service Mesh Critical Performance Impact by using eBPF Background Apache SkyWalking observes metrics, logs, traces, and events for services deployed into the service mesh. When troubleshooting, SkyWalking error analysis can be an invaluable tool helping to pinpoint where an error occurred. However, performance problems are more difficult: It’s often impossible to locate the root cause of performance problems with pre-existing observation data. To move beyond the status quo, dynamic debugging and troubleshooting are essential service performance tools. In this article, we\u0026rsquo;ll discuss how to use eBPF technology to improve the profiling feature in SkyWalking and analyze the performance impact in the service mesh.\nTrace Profiling in SkyWalking Since SkyWalking 7.0.0, Trace Profiling has helped developers find performance problems by periodically sampling the thread stack to let developers know which lines of code take more time. However, Trace Profiling is not suitable for the following scenarios:\n Thread Model: Trace Profiling is most useful for profiling code that executes in a single thread. It is less useful for middleware that relies heavily on async execution models. For example Goroutines in Go or Kotlin Coroutines. Language: Currently, Trace Profiling is only supported in Java and Python, since it’s not easy to obtain the thread stack in the runtimes of some languages such as Go and Node.js. Agent Binding: Trace Profiling requires Agent installation, which can be tricky depending on the language (e.g., PHP has to rely on its C kernel; Rust and C/C++ require manual instrumentation to make install). Trace Correlation: Since Trace Profiling is only associated with a single request it can be hard to determine which request is causing the problem. Short Lifecycle Services: Trace Profiling doesn\u0026rsquo;t support short-lived services for (at least) two reasons:  It\u0026rsquo;s hard to differentiate system performance from class code manipulation in the booting stage. Trace profiling is linked to an endpoint to identify performance impact, but there is no endpoint to match these short-lived services.    Fortunately, there are techniques that can go further than Trace Profiling in these situations.\nIntroduce eBPF We have found that eBPF — a technology that can run sandboxed programs in an operating system kernel and thus safely and efficiently extend the capabilities of the kernel without requiring kernel modifications or loading kernel modules — can help us fill gaps left by Trace Profiling. eBPF is a trending technology because it breaks the traditional barrier between user and kernel space. Programs can now inject bytecode that runs in the kernel, instead of having to recompile the kernel to customize it. This is naturally a good fit for observability.\nIn the figure below, we can see that when the system executes the execve syscalls, the eBPF program is triggered, and the current process runtime information is obtained by using function calls.\nUsing eBPF technology, we can expand the scope of Skywalking\u0026rsquo;s profiling capabilities:\n Global Performance Analysis: Before eBPF, data collection was limited to what agents can observe. Since eBPF programs run in the kernel, they can observe all threads. This is especially useful when you are not sure whether a performance problem is caused by a particular request. Data Content: eBPF can dump both user and kernel space thread stacks, so if a performance issue happens in kernel space, it’s easier to find. Agent Binding: All modern Linux kernels support eBPF, so there is no need to install anything. This means it is an orchestration-free vs an agent model. This reduces friction caused by built-in software which may not have the correct agents installed, such as Envoy in a Service Mesh. Sampling Type: Unlike Trace Profiling, eBPF is event-driven and, therefore, not constrained by interval polling. For example, eBPF can trigger events and collect more data depending on a transfer size threshold. This can allow the system to triage and prioritize data collection under extreme load.  eBPF Limitations While eBPF offers significant advantages for hunting performance bottlenecks, no technology is perfect. eBPF has a number of limitations described below. Fortunately, since SkyWalking does not require eBPF, the impact is limited.\n Linux Version Requirement: eBPF programs require a Linux kernel version above 4.4, with later kernel versions offering more data to be collected. The BCC has documented the features supported by different Linux kernel versions, with the differences between versions usually being what data can be collected with eBPF. Privileges Required: All processes that intend to load eBPF programs into the Linux kernel must be running in privileged mode. As such, bugs or other issues in such code may have a big impact. Weak Support for Dynamic Language: eBPF has weak support for JIT-based dynamic languages, such as Java. It also depends on what data you want to collect. For Profiling, eBPF does not support parsing the symbols of the program, which is why most eBPF-based profiling technologies only support static languages like C, C++, Go, and Rust. However, symbol mapping can sometimes be solved through tools provided by the language. For example, in Java, perf-map-agent can be used to generate the symbol mapping. However, dynamic languages don\u0026rsquo;t support the attach (uprobe) functionality that would allow us to trace execution events through symbols.  Introducing SkyWalking Rover SkyWalking Rover introduces the eBPF profiling feature into the SkyWalking ecosystem. The figure below shows the overall architecture of SkyWalking Rover. SkyWalking Rover is currently supported in Kubernetes environments and must be deployed inside a Kubernetes cluster. After establishing a connection with the SkyWalking backend server, it saves information about the processes on the current machine to SkyWalking. When the user creates an eBPF profiling task via the user interface, SkyWalking Rover receives the task and executes it in the relevant C, C++, Golang, and Rust language-based programs.\nOther than an eBPF-capable kernel, there are no additional prerequisites for deploying SkyWalking Rover.\nCPU Profiling with Rover CPU profiling is the most intuitive way to show service performance. Inspired by Brendan Gregg‘s blog post, we\u0026rsquo;ve divided CPU profiling into two types that we have implemented in Rover:\n On-CPU Profiling: Where threads are spending time running on-CPU. Off-CPU Profiling: Where time is spent waiting while blocked on I/O, locks, timers, paging/swapping, etc.  Profiling Envoy with eBPF Envoy is a popular proxy, used as the data plane by the Istio service mesh. In a Kubernetes cluster, Istio injects Envoy into each service’s pod as a sidecar where it transparently intercepts and processes incoming and outgoing traffic. As the data plane, any performance issues in Envoy can affect all service traffic in the mesh. In this scenario, it’s more powerful to use eBPF profiling to analyze issues in production caused by service mesh configuration.\nDemo Environment If you want to see this scenario in action, we\u0026rsquo;ve built a demo environment where we deploy an Nginx service for stress testing. Traffic is intercepted by Envoy and forwarded to Nginx. The commands to install the whole environment can be accessed through GitHub.\nOn-CPU Profiling On-CPU profiling is suitable for analyzing thread stacks when service CPU usage is high. If the stack is dumped more times, it means that the thread stack occupies more CPU resources.\nWhen installing Istio using the demo configuration profile, we found there are two places where we can optimize performance:\n Zipkin Tracing: Different Zipkin sampling percentages have a direct impact on QPS. Access Log Format: Reducing the fields of the Envoy access log can improve QPS.  Zipkin Tracing Zipkin with 100% sampling In the default demo configuration profile, Envoy is using 100% sampling as default tracing policy. How does that impact the performance?\nAs shown in the figure below, using the on-CPU profiling, we found that it takes about 16% of the CPU overhead. At a fixed consumption of 2 CPUs, its QPS can reach 5.7K.\nDisable Zipkin tracing At this point, we found that if Zipkin is not necessary, the sampling percentage can be reduced or we can even disable tracing. Based on the Istio documentation, we can disable tracing when installing the service mesh using the following command:\nistioctl install -y --set profile=demo \\  --set \u0026#39;meshConfig.enableTracing=false\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.tracing.sampling=0.0\u0026#39; After disabling tracing, we performed on-CPU profiling again. According to the figure below, we found that Zipkin has disappeared from the flame graph. With the same 2 CPU consumption as in the previous example, the QPS reached 9K, which is an almost 60% increase. Tracing with Throughput With the same CPU usage, we\u0026rsquo;ve discovered that Envoy performance greatly improves when the tracing feature is disabled. Of course, this requires us to make trade-offs between the number of samples Zipkin collects and the desired performance of Envoy (QPS).\nThe table below illustrates how different Zipkin sampling percentages under the same CPU usage affect QPS.\n   Zipkin sampling % QPS CPUs Note     100% (default) 5.7K 2 16% used by Zipkin   1% 8.1K 2 0.3% used by Zipkin   disabled 9.2K 2 0% used by Zipkin    Access Log Format Default Log Format In the default demo configuration profile, the default Access Log format contains a lot of data. The flame graph below shows various functions involved in parsing the data such as request headers, response headers, and streaming the body.\nSimplifying Access Log Format Typically, we don’t need all the information in the access log, so we can often simplify it to get what we need. The following command simplifies the access log format to only display basic information:\nistioctl install -y --set profile=demo \\  --set meshConfig.accessLogFormat=\u0026#34;[%START_TIME%] \\\u0026#34;%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\\\u0026#34; %RESPONSE_CODE%\\n\u0026#34; After simplifying the access log format, we found that the QPS increased from 5.7K to 5.9K. When executing the on-CPU profiling again, the CPU usage of log formatting dropped from 2.4% to 0.7%.\nSimplifying the log format helped us to improve the performance.\nOff-CPU Profiling Off-CPU profiling is suitable for performance issues that are not caused by high CPU usage. For example, when there are too many threads in one service, using off-CPU profiling could reveal which threads spend more time context switching.\nWe provide data aggregation in two dimensions:\n Switch count: The number of times a thread switches context. When the thread returns to the CPU, it completes one context switch. A thread stack with a higher switch count spends more time context switching. Switch duration: The time it takes a thread to switch the context. A thread stack with a higher switch duration spends more time off-CPU.  Write Access Log Enable Write Using the same environment and settings as before in the on-CPU test, we performed off-CPU profiling. As shown below, we found that access log writes accounted for about 28% of the total context switches. The \u0026ldquo;__write\u0026rdquo; shown below also indicates that this method is the Linux kernel method.\nDisable Write SkyWalking implements Envoy\u0026rsquo;s Access Log Service (ALS) feature which allows us to send access logs to the SkyWalking Observability Analysis Platform (OAP) using the gRPC protocol. Even by disabling the access logging, we can still use ALS to capture/aggregate the logs. We\u0026rsquo;ve disabled writing to the access log using the following command:\nistioctl install -y --set profile=demo --set meshConfig.accessLogFile=\u0026#34;\u0026#34; After disabling the Access Log feature, we performed the off-CPU profiling. File writing entries have disappeared as shown in the figure below. Envoy throughput also increased from 5.7K to 5.9K.\nConclusion In this article, we\u0026rsquo;ve examined the insights Apache Skywalking\u0026rsquo;s Trace Profiling can give us and how much more can be achieved with eBPF profiling. All of these features are implemented in skywalking-rover. In addition to on- and off-CPU profiling, you will also find the following features:\n Continuous profiling, helps you automatically profile without manual intervention. For example, when Rover detects that the CPU exceeds a configurable threshold, it automatically executes the on-CPU profiling task. More profiling types to enrich usage scenarios, such as network, and memory profiling.  ","title":"Pinpoint Service Mesh Critical Performance Impact by using eBPF","url":"/docs/main/v9.5.0/en/concepts-and-designs/ebpf-cpu-profiling/"},{"content":"Pinpoint Service Mesh Critical Performance Impact by using eBPF Background Apache SkyWalking observes metrics, logs, traces, and events for services deployed into the service mesh. When troubleshooting, SkyWalking error analysis can be an invaluable tool helping to pinpoint where an error occurred. However, performance problems are more difficult: It’s often impossible to locate the root cause of performance problems with pre-existing observation data. To move beyond the status quo, dynamic debugging and troubleshooting are essential service performance tools. In this article, we\u0026rsquo;ll discuss how to use eBPF technology to improve the profiling feature in SkyWalking and analyze the performance impact in the service mesh.\nTrace Profiling in SkyWalking Since SkyWalking 7.0.0, Trace Profiling has helped developers find performance problems by periodically sampling the thread stack to let developers know which lines of code take more time. However, Trace Profiling is not suitable for the following scenarios:\n Thread Model: Trace Profiling is most useful for profiling code that executes in a single thread. It is less useful for middleware that relies heavily on async execution models. For example Goroutines in Go or Kotlin Coroutines. Language: Currently, Trace Profiling is only supported in Java and Python, since it’s not easy to obtain the thread stack in the runtimes of some languages such as Go and Node.js. Agent Binding: Trace Profiling requires Agent installation, which can be tricky depending on the language (e.g., PHP has to rely on its C kernel; Rust and C/C++ require manual instrumentation to make install). Trace Correlation: Since Trace Profiling is only associated with a single request it can be hard to determine which request is causing the problem. Short Lifecycle Services: Trace Profiling doesn\u0026rsquo;t support short-lived services for (at least) two reasons:  It\u0026rsquo;s hard to differentiate system performance from class code manipulation in the booting stage. Trace profiling is linked to an endpoint to identify performance impact, but there is no endpoint to match these short-lived services.    Fortunately, there are techniques that can go further than Trace Profiling in these situations.\nIntroduce eBPF We have found that eBPF — a technology that can run sandboxed programs in an operating system kernel and thus safely and efficiently extend the capabilities of the kernel without requiring kernel modifications or loading kernel modules — can help us fill gaps left by Trace Profiling. eBPF is a trending technology because it breaks the traditional barrier between user and kernel space. Programs can now inject bytecode that runs in the kernel, instead of having to recompile the kernel to customize it. This is naturally a good fit for observability.\nIn the figure below, we can see that when the system executes the execve syscalls, the eBPF program is triggered, and the current process runtime information is obtained by using function calls.\nUsing eBPF technology, we can expand the scope of Skywalking\u0026rsquo;s profiling capabilities:\n Global Performance Analysis: Before eBPF, data collection was limited to what agents can observe. Since eBPF programs run in the kernel, they can observe all threads. This is especially useful when you are not sure whether a performance problem is caused by a particular request. Data Content: eBPF can dump both user and kernel space thread stacks, so if a performance issue happens in kernel space, it’s easier to find. Agent Binding: All modern Linux kernels support eBPF, so there is no need to install anything. This means it is an orchestration-free vs an agent model. This reduces friction caused by built-in software which may not have the correct agents installed, such as Envoy in a Service Mesh. Sampling Type: Unlike Trace Profiling, eBPF is event-driven and, therefore, not constrained by interval polling. For example, eBPF can trigger events and collect more data depending on a transfer size threshold. This can allow the system to triage and prioritize data collection under extreme load.  eBPF Limitations While eBPF offers significant advantages for hunting performance bottlenecks, no technology is perfect. eBPF has a number of limitations described below. Fortunately, since SkyWalking does not require eBPF, the impact is limited.\n Linux Version Requirement: eBPF programs require a Linux kernel version above 4.4, with later kernel versions offering more data to be collected. The BCC has documented the features supported by different Linux kernel versions, with the differences between versions usually being what data can be collected with eBPF. Privileges Required: All processes that intend to load eBPF programs into the Linux kernel must be running in privileged mode. As such, bugs or other issues in such code may have a big impact. Weak Support for Dynamic Language: eBPF has weak support for JIT-based dynamic languages, such as Java. It also depends on what data you want to collect. For Profiling, eBPF does not support parsing the symbols of the program, which is why most eBPF-based profiling technologies only support static languages like C, C++, Go, and Rust. However, symbol mapping can sometimes be solved through tools provided by the language. For example, in Java, perf-map-agent can be used to generate the symbol mapping. However, dynamic languages don\u0026rsquo;t support the attach (uprobe) functionality that would allow us to trace execution events through symbols.  Introducing SkyWalking Rover SkyWalking Rover introduces the eBPF profiling feature into the SkyWalking ecosystem. The figure below shows the overall architecture of SkyWalking Rover. SkyWalking Rover is currently supported in Kubernetes environments and must be deployed inside a Kubernetes cluster. After establishing a connection with the SkyWalking backend server, it saves information about the processes on the current machine to SkyWalking. When the user creates an eBPF profiling task via the user interface, SkyWalking Rover receives the task and executes it in the relevant C, C++, Golang, and Rust language-based programs.\nOther than an eBPF-capable kernel, there are no additional prerequisites for deploying SkyWalking Rover.\nCPU Profiling with Rover CPU profiling is the most intuitive way to show service performance. Inspired by Brendan Gregg‘s blog post, we\u0026rsquo;ve divided CPU profiling into two types that we have implemented in Rover:\n On-CPU Profiling: Where threads are spending time running on-CPU. Off-CPU Profiling: Where time is spent waiting while blocked on I/O, locks, timers, paging/swapping, etc.  Profiling Envoy with eBPF Envoy is a popular proxy, used as the data plane by the Istio service mesh. In a Kubernetes cluster, Istio injects Envoy into each service’s pod as a sidecar where it transparently intercepts and processes incoming and outgoing traffic. As the data plane, any performance issues in Envoy can affect all service traffic in the mesh. In this scenario, it’s more powerful to use eBPF profiling to analyze issues in production caused by service mesh configuration.\nDemo Environment If you want to see this scenario in action, we\u0026rsquo;ve built a demo environment where we deploy an Nginx service for stress testing. Traffic is intercepted by Envoy and forwarded to Nginx. The commands to install the whole environment can be accessed through GitHub.\nOn-CPU Profiling On-CPU profiling is suitable for analyzing thread stacks when service CPU usage is high. If the stack is dumped more times, it means that the thread stack occupies more CPU resources.\nWhen installing Istio using the demo configuration profile, we found there are two places where we can optimize performance:\n Zipkin Tracing: Different Zipkin sampling percentages have a direct impact on QPS. Access Log Format: Reducing the fields of the Envoy access log can improve QPS.  Zipkin Tracing Zipkin with 100% sampling In the default demo configuration profile, Envoy is using 100% sampling as default tracing policy. How does that impact the performance?\nAs shown in the figure below, using the on-CPU profiling, we found that it takes about 16% of the CPU overhead. At a fixed consumption of 2 CPUs, its QPS can reach 5.7K.\nDisable Zipkin tracing At this point, we found that if Zipkin is not necessary, the sampling percentage can be reduced or we can even disable tracing. Based on the Istio documentation, we can disable tracing when installing the service mesh using the following command:\nistioctl install -y --set profile=demo \\  --set \u0026#39;meshConfig.enableTracing=false\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.tracing.sampling=0.0\u0026#39; After disabling tracing, we performed on-CPU profiling again. According to the figure below, we found that Zipkin has disappeared from the flame graph. With the same 2 CPU consumption as in the previous example, the QPS reached 9K, which is an almost 60% increase. Tracing with Throughput With the same CPU usage, we\u0026rsquo;ve discovered that Envoy performance greatly improves when the tracing feature is disabled. Of course, this requires us to make trade-offs between the number of samples Zipkin collects and the desired performance of Envoy (QPS).\nThe table below illustrates how different Zipkin sampling percentages under the same CPU usage affect QPS.\n   Zipkin sampling % QPS CPUs Note     100% (default) 5.7K 2 16% used by Zipkin   1% 8.1K 2 0.3% used by Zipkin   disabled 9.2K 2 0% used by Zipkin    Access Log Format Default Log Format In the default demo configuration profile, the default Access Log format contains a lot of data. The flame graph below shows various functions involved in parsing the data such as request headers, response headers, and streaming the body.\nSimplifying Access Log Format Typically, we don’t need all the information in the access log, so we can often simplify it to get what we need. The following command simplifies the access log format to only display basic information:\nistioctl install -y --set profile=demo \\  --set meshConfig.accessLogFormat=\u0026#34;[%START_TIME%] \\\u0026#34;%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\\\u0026#34; %RESPONSE_CODE%\\n\u0026#34; After simplifying the access log format, we found that the QPS increased from 5.7K to 5.9K. When executing the on-CPU profiling again, the CPU usage of log formatting dropped from 2.4% to 0.7%.\nSimplifying the log format helped us to improve the performance.\nOff-CPU Profiling Off-CPU profiling is suitable for performance issues that are not caused by high CPU usage. For example, when there are too many threads in one service, using off-CPU profiling could reveal which threads spend more time context switching.\nWe provide data aggregation in two dimensions:\n Switch count: The number of times a thread switches context. When the thread returns to the CPU, it completes one context switch. A thread stack with a higher switch count spends more time context switching. Switch duration: The time it takes a thread to switch the context. A thread stack with a higher switch duration spends more time off-CPU.  Write Access Log Enable Write Using the same environment and settings as before in the on-CPU test, we performed off-CPU profiling. As shown below, we found that access log writes accounted for about 28% of the total context switches. The \u0026ldquo;__write\u0026rdquo; shown below also indicates that this method is the Linux kernel method.\nDisable Write SkyWalking implements Envoy\u0026rsquo;s Access Log Service (ALS) feature which allows us to send access logs to the SkyWalking Observability Analysis Platform (OAP) using the gRPC protocol. Even by disabling the access logging, we can still use ALS to capture/aggregate the logs. We\u0026rsquo;ve disabled writing to the access log using the following command:\nistioctl install -y --set profile=demo --set meshConfig.accessLogFile=\u0026#34;\u0026#34; After disabling the Access Log feature, we performed the off-CPU profiling. File writing entries have disappeared as shown in the figure below. Envoy throughput also increased from 5.7K to 5.9K.\nConclusion In this article, we\u0026rsquo;ve examined the insights Apache Skywalking\u0026rsquo;s Trace Profiling can give us and how much more can be achieved with eBPF profiling. All of these features are implemented in skywalking-rover. In addition to on- and off-CPU profiling, you will also find the following features:\n Continuous profiling, helps you automatically profile without manual intervention. For example, when Rover detects that the CPU exceeds a configurable threshold, it automatically executes the on-CPU profiling task. More profiling types to enrich usage scenarios, such as network, and memory profiling.  ","title":"Pinpoint Service Mesh Critical Performance Impact by using eBPF","url":"/docs/main/v9.6.0/en/concepts-and-designs/ebpf-cpu-profiling/"},{"content":"Pinpoint Service Mesh Critical Performance Impact by using eBPF Background Apache SkyWalking observes metrics, logs, traces, and events for services deployed into the service mesh. When troubleshooting, SkyWalking error analysis can be an invaluable tool helping to pinpoint where an error occurred. However, performance problems are more difficult: It’s often impossible to locate the root cause of performance problems with pre-existing observation data. To move beyond the status quo, dynamic debugging and troubleshooting are essential service performance tools. In this article, we\u0026rsquo;ll discuss how to use eBPF technology to improve the profiling feature in SkyWalking and analyze the performance impact in the service mesh.\nTrace Profiling in SkyWalking Since SkyWalking 7.0.0, Trace Profiling has helped developers find performance problems by periodically sampling the thread stack to let developers know which lines of code take more time. However, Trace Profiling is not suitable for the following scenarios:\n Thread Model: Trace Profiling is most useful for profiling code that executes in a single thread. It is less useful for middleware that relies heavily on async execution models. For example Goroutines in Go or Kotlin Coroutines. Language: Currently, Trace Profiling is only supported in Java and Python, since it’s not easy to obtain the thread stack in the runtimes of some languages such as Go and Node.js. Agent Binding: Trace Profiling requires Agent installation, which can be tricky depending on the language (e.g., PHP has to rely on its C kernel; Rust and C/C++ require manual instrumentation to make install). Trace Correlation: Since Trace Profiling is only associated with a single request it can be hard to determine which request is causing the problem. Short Lifecycle Services: Trace Profiling doesn\u0026rsquo;t support short-lived services for (at least) two reasons:  It\u0026rsquo;s hard to differentiate system performance from class code manipulation in the booting stage. Trace profiling is linked to an endpoint to identify performance impact, but there is no endpoint to match these short-lived services.    Fortunately, there are techniques that can go further than Trace Profiling in these situations.\nIntroduce eBPF We have found that eBPF — a technology that can run sandboxed programs in an operating system kernel and thus safely and efficiently extend the capabilities of the kernel without requiring kernel modifications or loading kernel modules — can help us fill gaps left by Trace Profiling. eBPF is a trending technology because it breaks the traditional barrier between user and kernel space. Programs can now inject bytecode that runs in the kernel, instead of having to recompile the kernel to customize it. This is naturally a good fit for observability.\nIn the figure below, we can see that when the system executes the execve syscalls, the eBPF program is triggered, and the current process runtime information is obtained by using function calls.\nUsing eBPF technology, we can expand the scope of Skywalking\u0026rsquo;s profiling capabilities:\n Global Performance Analysis: Before eBPF, data collection was limited to what agents can observe. Since eBPF programs run in the kernel, they can observe all threads. This is especially useful when you are not sure whether a performance problem is caused by a particular request. Data Content: eBPF can dump both user and kernel space thread stacks, so if a performance issue happens in kernel space, it’s easier to find. Agent Binding: All modern Linux kernels support eBPF, so there is no need to install anything. This means it is an orchestration-free vs an agent model. This reduces friction caused by built-in software which may not have the correct agents installed, such as Envoy in a Service Mesh. Sampling Type: Unlike Trace Profiling, eBPF is event-driven and, therefore, not constrained by interval polling. For example, eBPF can trigger events and collect more data depending on a transfer size threshold. This can allow the system to triage and prioritize data collection under extreme load.  eBPF Limitations While eBPF offers significant advantages for hunting performance bottlenecks, no technology is perfect. eBPF has a number of limitations described below. Fortunately, since SkyWalking does not require eBPF, the impact is limited.\n Linux Version Requirement: eBPF programs require a Linux kernel version above 4.4, with later kernel versions offering more data to be collected. The BCC has documented the features supported by different Linux kernel versions, with the differences between versions usually being what data can be collected with eBPF. Privileges Required: All processes that intend to load eBPF programs into the Linux kernel must be running in privileged mode. As such, bugs or other issues in such code may have a big impact. Weak Support for Dynamic Language: eBPF has weak support for JIT-based dynamic languages, such as Java. It also depends on what data you want to collect. For Profiling, eBPF does not support parsing the symbols of the program, which is why most eBPF-based profiling technologies only support static languages like C, C++, Go, and Rust. However, symbol mapping can sometimes be solved through tools provided by the language. For example, in Java, perf-map-agent can be used to generate the symbol mapping. However, dynamic languages don\u0026rsquo;t support the attach (uprobe) functionality that would allow us to trace execution events through symbols.  Introducing SkyWalking Rover SkyWalking Rover introduces the eBPF profiling feature into the SkyWalking ecosystem. The figure below shows the overall architecture of SkyWalking Rover. SkyWalking Rover is currently supported in Kubernetes environments and must be deployed inside a Kubernetes cluster. After establishing a connection with the SkyWalking backend server, it saves information about the processes on the current machine to SkyWalking. When the user creates an eBPF profiling task via the user interface, SkyWalking Rover receives the task and executes it in the relevant C, C++, Golang, and Rust language-based programs.\nOther than an eBPF-capable kernel, there are no additional prerequisites for deploying SkyWalking Rover.\nCPU Profiling with Rover CPU profiling is the most intuitive way to show service performance. Inspired by Brendan Gregg‘s blog post, we\u0026rsquo;ve divided CPU profiling into two types that we have implemented in Rover:\n On-CPU Profiling: Where threads are spending time running on-CPU. Off-CPU Profiling: Where time is spent waiting while blocked on I/O, locks, timers, paging/swapping, etc.  Profiling Envoy with eBPF Envoy is a popular proxy, used as the data plane by the Istio service mesh. In a Kubernetes cluster, Istio injects Envoy into each service’s pod as a sidecar where it transparently intercepts and processes incoming and outgoing traffic. As the data plane, any performance issues in Envoy can affect all service traffic in the mesh. In this scenario, it’s more powerful to use eBPF profiling to analyze issues in production caused by service mesh configuration.\nDemo Environment If you want to see this scenario in action, we\u0026rsquo;ve built a demo environment where we deploy an Nginx service for stress testing. Traffic is intercepted by Envoy and forwarded to Nginx. The commands to install the whole environment can be accessed through GitHub.\nOn-CPU Profiling On-CPU profiling is suitable for analyzing thread stacks when service CPU usage is high. If the stack is dumped more times, it means that the thread stack occupies more CPU resources.\nWhen installing Istio using the demo configuration profile, we found there are two places where we can optimize performance:\n Zipkin Tracing: Different Zipkin sampling percentages have a direct impact on QPS. Access Log Format: Reducing the fields of the Envoy access log can improve QPS.  Zipkin Tracing Zipkin with 100% sampling In the default demo configuration profile, Envoy is using 100% sampling as default tracing policy. How does that impact the performance?\nAs shown in the figure below, using the on-CPU profiling, we found that it takes about 16% of the CPU overhead. At a fixed consumption of 2 CPUs, its QPS can reach 5.7K.\nDisable Zipkin tracing At this point, we found that if Zipkin is not necessary, the sampling percentage can be reduced or we can even disable tracing. Based on the Istio documentation, we can disable tracing when installing the service mesh using the following command:\nistioctl install -y --set profile=demo \\  --set \u0026#39;meshConfig.enableTracing=false\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.tracing.sampling=0.0\u0026#39; After disabling tracing, we performed on-CPU profiling again. According to the figure below, we found that Zipkin has disappeared from the flame graph. With the same 2 CPU consumption as in the previous example, the QPS reached 9K, which is an almost 60% increase. Tracing with Throughput With the same CPU usage, we\u0026rsquo;ve discovered that Envoy performance greatly improves when the tracing feature is disabled. Of course, this requires us to make trade-offs between the number of samples Zipkin collects and the desired performance of Envoy (QPS).\nThe table below illustrates how different Zipkin sampling percentages under the same CPU usage affect QPS.\n   Zipkin sampling % QPS CPUs Note     100% (default) 5.7K 2 16% used by Zipkin   1% 8.1K 2 0.3% used by Zipkin   disabled 9.2K 2 0% used by Zipkin    Access Log Format Default Log Format In the default demo configuration profile, the default Access Log format contains a lot of data. The flame graph below shows various functions involved in parsing the data such as request headers, response headers, and streaming the body.\nSimplifying Access Log Format Typically, we don’t need all the information in the access log, so we can often simplify it to get what we need. The following command simplifies the access log format to only display basic information:\nistioctl install -y --set profile=demo \\  --set meshConfig.accessLogFormat=\u0026#34;[%START_TIME%] \\\u0026#34;%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\\\u0026#34; %RESPONSE_CODE%\\n\u0026#34; After simplifying the access log format, we found that the QPS increased from 5.7K to 5.9K. When executing the on-CPU profiling again, the CPU usage of log formatting dropped from 2.4% to 0.7%.\nSimplifying the log format helped us to improve the performance.\nOff-CPU Profiling Off-CPU profiling is suitable for performance issues that are not caused by high CPU usage. For example, when there are too many threads in one service, using off-CPU profiling could reveal which threads spend more time context switching.\nWe provide data aggregation in two dimensions:\n Switch count: The number of times a thread switches context. When the thread returns to the CPU, it completes one context switch. A thread stack with a higher switch count spends more time context switching. Switch duration: The time it takes a thread to switch the context. A thread stack with a higher switch duration spends more time off-CPU.  Write Access Log Enable Write Using the same environment and settings as before in the on-CPU test, we performed off-CPU profiling. As shown below, we found that access log writes accounted for about 28% of the total context switches. The \u0026ldquo;__write\u0026rdquo; shown below also indicates that this method is the Linux kernel method.\nDisable Write SkyWalking implements Envoy\u0026rsquo;s Access Log Service (ALS) feature which allows us to send access logs to the SkyWalking Observability Analysis Platform (OAP) using the gRPC protocol. Even by disabling the access logging, we can still use ALS to capture/aggregate the logs. We\u0026rsquo;ve disabled writing to the access log using the following command:\nistioctl install -y --set profile=demo --set meshConfig.accessLogFile=\u0026#34;\u0026#34; After disabling the Access Log feature, we performed the off-CPU profiling. File writing entries have disappeared as shown in the figure below. Envoy throughput also increased from 5.7K to 5.9K.\nConclusion In this article, we\u0026rsquo;ve examined the insights Apache Skywalking\u0026rsquo;s Trace Profiling can give us and how much more can be achieved with eBPF profiling. All of these features are implemented in skywalking-rover. In addition to on- and off-CPU profiling, you will also find the following features:\n Continuous profiling, helps you automatically profile without manual intervention. For example, when Rover detects that the CPU exceeds a configurable threshold, it automatically executes the on-CPU profiling task. More profiling types to enrich usage scenarios, such as network, and memory profiling.  ","title":"Pinpoint Service Mesh Critical Performance Impact by using eBPF","url":"/docs/main/v9.7.0/en/concepts-and-designs/ebpf-cpu-profiling/"},{"content":"Pipe Plugins The pipe plugin configurations contain a series of pipe configuration. Each pipe configuration has 5 parts, which are common_config, gatherer, processor and the sender.\ncommon_config    Config Description     pipe_name The unique collect space name.    Gatherer The gatherer has 2 roles, which are the receiver and fetcher.\nReceiver Role    Config Description     server_name The server name in the sharing pipe, which would be used in the receiver plugin.   receiver The receiver configuration. Please read the doc to find all receiver plugins.   queue The queue buffers the input telemetry data. Please read the doc to find all queue plugins.    Fetcher Role    Config Description     fetch_interval The time interval between two fetch operations. The time unit is millisecond.   fetcher The fetcher configuration. Please read the doc to find all fetcher plugins.   queue The queue buffers the input telemetry data. Please read the doc to find all queue plugins.    processor The filter configuration. Please read the doc to find all filter plugins.\nsender    Config Description     flush_time The time interval between two flush operations. And the time unit is millisecond.   max_buffer_size The maximum buffer elements.   min_flush_events The minimum flush elements.   client_name The client name used in the forwarders of the sharing pipe.   forwarders The forwarder plugin list. Please read the doc to find all forwarders plugins.   fallbacker The fallbacker plugin. Please read the doc to find all fallbacker plugins.    Example pipes:- common_config:pipe_name:pipe1gatherer:server_name:\u0026#34;grpc-server\u0026#34;receiver:plugin_name:\u0026#34;grpc-native-log-receiver\u0026#34;queue:plugin_name:\u0026#34;mmap-queue\u0026#34;segment_size:${SATELLITE_MMAP_QUEUE_SIZE:524288}max_in_mem_segments:${SATELLITE_MMAP_QUEUE_MAX_IN_MEM_SEGMENTS:6}queue_dir:\u0026#34;pipe1-log-grpc-receiver-queue\u0026#34;processor:filters:sender:fallbacker:plugin_name:none-fallbackerflush_time:${SATELLITE_PIPE1_SENDER_FLUSH_TIME:1000}max_buffer_size:${SATELLITE_PIPE1_SENDER_MAX_BUFFER_SIZE:200}min_flush_events:${SATELLITE_PIPE1_SENDER_MIN_FLUSH_EVENTS:100}client_name:kafka-clientforwarders:- plugin_name:native-log-kafka-forwardertopic:${SATELLITE_NATIVELOG-TOPIC:log-topic}","title":"Pipe Plugins","url":"/docs/skywalking-satellite/latest/en/setup/configuration/pipe-plugins/"},{"content":"Pipe Plugins The pipe plugin configurations contain a series of pipe configuration. Each pipe configuration has 5 parts, which are common_config, gatherer, processor and the sender.\ncommon_config    Config Description     pipe_name The unique collect space name.    Gatherer The gatherer has 2 roles, which are the receiver and fetcher.\nReceiver Role    Config Description     server_name The server name in the sharing pipe, which would be used in the receiver plugin.   receiver The receiver configuration. Please read the doc to find all receiver plugins.   queue The queue buffers the input telemetry data. Please read the doc to find all queue plugins.    Fetcher Role    Config Description     fetch_interval The time interval between two fetch operations. The time unit is millisecond.   fetcher The fetcher configuration. Please read the doc to find all fetcher plugins.   queue The queue buffers the input telemetry data. Please read the doc to find all queue plugins.    processor The filter configuration. Please read the doc to find all filter plugins.\nsender    Config Description     flush_time The time interval between two flush operations. And the time unit is millisecond.   max_buffer_size The maximum buffer elements.   min_flush_events The minimum flush elements.   client_name The client name used in the forwarders of the sharing pipe.   forwarders The forwarder plugin list. Please read the doc to find all forwarders plugins.   fallbacker The fallbacker plugin. Please read the doc to find all fallbacker plugins.    Example pipes:- common_config:pipe_name:pipe1gatherer:server_name:\u0026#34;grpc-server\u0026#34;receiver:plugin_name:\u0026#34;grpc-native-log-receiver\u0026#34;queue:plugin_name:\u0026#34;mmap-queue\u0026#34;segment_size:${SATELLITE_MMAP_QUEUE_SIZE:524288}max_in_mem_segments:${SATELLITE_MMAP_QUEUE_MAX_IN_MEM_SEGMENTS:6}queue_dir:\u0026#34;pipe1-log-grpc-receiver-queue\u0026#34;processor:filters:sender:fallbacker:plugin_name:none-fallbackerflush_time:${SATELLITE_PIPE1_SENDER_FLUSH_TIME:1000}max_buffer_size:${SATELLITE_PIPE1_SENDER_MAX_BUFFER_SIZE:200}min_flush_events:${SATELLITE_PIPE1_SENDER_MIN_FLUSH_EVENTS:100}client_name:kafka-clientforwarders:- plugin_name:native-log-kafka-forwardertopic:${SATELLITE_NATIVELOG-TOPIC:log-topic}","title":"Pipe Plugins","url":"/docs/skywalking-satellite/next/en/setup/configuration/pipe-plugins/"},{"content":"Pipe Plugins The pipe plugin configurations contain a series of pipe configuration. Each pipe configuration has 5 parts, which are common_config, gatherer, processor and the sender.\ncommon_config    Config Description     pipe_name The unique collect space name.    Gatherer The gatherer has 2 roles, which are the receiver and fetcher.\nReceiver Role    Config Description     server_name The server name in the sharing pipe, which would be used in the receiver plugin.   receiver The receiver configuration. Please read the doc to find all receiver plugins.   queue The queue buffers the input telemetry data. Please read the doc to find all queue plugins.    Fetcher Role    Config Description     fetch_interval The time interval between two fetch operations. The time unit is millisecond.   fetcher The fetcher configuration. Please read the doc to find all fetcher plugins.   queue The queue buffers the input telemetry data. Please read the doc to find all queue plugins.    processor The filter configuration. Please read the doc to find all filter plugins.\nsender    Config Description     flush_time The time interval between two flush operations. And the time unit is millisecond.   max_buffer_size The maximum buffer elements.   min_flush_events The minimum flush elements.   client_name The client name used in the forwarders of the sharing pipe.   forwarders The forwarder plugin list. Please read the doc to find all forwarders plugins.   fallbacker The fallbacker plugin. Please read the doc to find all fallbacker plugins.    Example pipes:- common_config:pipe_name:pipe1gatherer:server_name:\u0026#34;grpc-server\u0026#34;receiver:plugin_name:\u0026#34;grpc-native-log-receiver\u0026#34;queue:plugin_name:\u0026#34;mmap-queue\u0026#34;segment_size:${SATELLITE_MMAP_QUEUE_SIZE:524288}max_in_mem_segments:${SATELLITE_MMAP_QUEUE_MAX_IN_MEM_SEGMENTS:6}queue_dir:\u0026#34;pipe1-log-grpc-receiver-queue\u0026#34;processor:filters:sender:fallbacker:plugin_name:none-fallbackerflush_time:${SATELLITE_PIPE1_SENDER_FLUSH_TIME:1000}max_buffer_size:${SATELLITE_PIPE1_SENDER_MAX_BUFFER_SIZE:200}min_flush_events:${SATELLITE_PIPE1_SENDER_MIN_FLUSH_EVENTS:100}client_name:kafka-clientforwarders:- plugin_name:native-log-kafka-forwardertopic:${SATELLITE_NATIVELOG-TOPIC:log-topic}","title":"Pipe Plugins","url":"/docs/skywalking-satellite/v1.2.0/en/setup/configuration/pipe-plugins/"},{"content":"Plugin automatic test framework The plugin test framework is designed to verify the function and compatibility of plugins. As there are dozens of plugins and hundreds of versions that need to be verified, it is impossible to do it manually. The test framework uses container-based tech stack and requires a set of real services with the agents installed. Then, the test mock OAP backend runs to check the segments data sent from agents.\nEvery plugin maintained in the main repo requires corresponding test cases as well as matching versions in the supported list doc.\nEnvironment Requirements  MacOS/Linux JDK 8+ Docker Docker Compose  Case Base Image Introduction The test framework provides JVM-container and Tomcat-container base images including JDK8 and JDK17. You can choose the best one for your test case. If both are suitable for your case, JVM-container is preferred.\nJVM-container Image Introduction JVM-container uses eclipse-temurin:8-jdk as the base image. JVM-container supports JDK8 and JDK17 as well in CI, which inherits eclipse-temurin:8-jdk and eclipse-temurin:17-jdk. It is supported to custom the base Java docker image by specify base_image_java. The test case project must be packaged as project-name.zip, including startup.sh and uber jar, by using mvn clean package.\nTake the following test projects as examples:\n sofarpc-scenario is a single project case. webflux-scenario is a case including multiple projects. jdk17-with-gson-scenario is a single project case with JDK17.  Tomcat-container Image Introduction Tomcat-container uses tomcat:8.5-jdk8-openjdk, tomcat:8.5-jdk17-openjdk as the base image. It is supported to custom the base Tomcat docker image by specify base_image_tomcat. The test case project must be packaged as project-name.war by using mvn package.\nTake the following test project as an example\n spring-4.3.x-scenario  Test project hierarchical structure The test case is an independent maven project, and it must be packaged as a war tar ball or zip file, depending on the chosen base image. Also, two external accessible endpoints usually two URLs) are required.\nAll test case codes should be in the org.apache.skywalking.apm.testcase.* package. If there are some codes expected to be instrumented, then the classes could be in the test.org.apache.skywalking.apm.testcase.* package.\nJVM-container test project hierarchical structure\n[plugin-scenario] |- [bin] |- startup.sh |- [config] |- expectedData.yaml |- [src] |- [main] |- ... |- [resource] |- log4j2.xml |- pom.xml |- configuration.yml |- support-version.list [] = directory Tomcat-container test project hierarchical structure\n[plugin-scenario] |- [config] |- expectedData.yaml |- [src] |- [main] |- ... |- [resource] |- log4j2.xml |- [webapp] |- [WEB-INF] |- web.xml |- pom.xml |- configuration.yml |- support-version.list [] = directory Test case configuration files The following files are required in every test case.\n   File Name Descriptions     configuration.yml Declare the basic case information, including case name, entrance endpoints, mode, and dependencies.   expectedData.yaml Describe the expected segmentItems, meterItems or logItems.   support-version.list List the target versions for this case.   startup.sh JVM-container only. This is not required when using Tomcat-container.    * support-version.list format requires every line for a single version (contains only the last version number of each minor version). You may use # to comment out this version.\nconfiguration.yml    Field description     type Image type, options, jvm, or tomcat. Required.   entryService The entrance endpoint (URL) for test case access. Required. (HTTP Method: GET)   healthCheck The health check endpoint (URL) for test case access. Required. (HTTP Method: HEAD)   startScript Path of the start up script. Required in type: jvm only.   runningMode Running mode with the optional plugin, options, default(default), with_optional, or with_bootstrap.   withPlugins Plugin selector rule, e.g.:apm-spring-annotation-plugin-*.jar. Required for runningMode=with_optional or runningMode=with_bootstrap.   environment Same as docker-compose#environment.   depends_on Same as docker-compose#depends_on.   dependencies Same as docker-compose#services, image, links, hostname, command, environment and depends_on are supported.    Note:, docker-compose activates only when dependencies is blank.\nrunningMode option description.\n   Option description     default Activate all plugins in plugin folder like the official distribution agent.   with_optional Activate default and plugins in optional-plugin by the give selector.   with_bootstrap Activate default and plugins in bootstrap-plugin by the give selector.    with_optional/with_bootstrap supports multiple selectors, separated by ;.\nFile Format\ntype: entryService: healthCheck: startScript: runningMode: withPlugins: environment: ... depends_on: ... dependencies: service1: image: hostname: expose: ... environment: ... depends_on: ... links: ... entrypoint: ... healthcheck: ...  dependencies support docker compose healthcheck. But the format is a little different. We need to have - as the start of every config item, and describe it as a string line.  For example, in the official document, the health check is:\nhealthcheck:test:[\u0026#34;CMD\u0026#34;,\u0026#34;curl\u0026#34;,\u0026#34;-f\u0026#34;,\u0026#34;http://localhost\u0026#34;]interval:1m30stimeout:10sretries:3start_period:40sHere you should write:\nhealthcheck:- \u0026#39;test:[\u0026#34;CMD\u0026#34;,\u0026#34;curl\u0026#34;,\u0026#34;-f\u0026#34;,\u0026#34;http://localhost\u0026#34;]\u0026#39;- \u0026#34;interval: 1m30s\u0026#34;- \u0026#34;timeout: 10s\u0026#34;- \u0026#34;retries: 3\u0026#34;- \u0026#34;start_period: 40s\u0026#34;In some cases, the dependency service (usually a third-party server like the SolrJ server) is required to keep the same version as the client lib version, which is defined as ${test.framework.version} in pom. You may use ${CASE_SERVER_IMAGE_VERSION} as the version number, which will be changed in the test for each version.\n It does not support resource related configurations, such as volumes, ports, and ulimits. The reason for this is that in test scenarios, no mapping is required for any port to the host VM, or to mount any folder.\n Take the following test cases as examples:\n dubbo-2.7.x with JVM-container jetty with JVM-container gateway with runningMode canal with docker-compose  expectedData.yaml Operator for number\n   Operator Description     nq Not equal   eq Equal(default)   ge Greater than or equal   gt Greater than    Operator for String\n   Operator Description     not null Not null   not blank Not blank ,it\u0026rsquo;s recommended for String type field as the default value maybe blank string, such as span tags   null Null or empty String   eq Equal(default)   start with Tests if this string starts with the specified prefix. DO NOT use it with meterItem tags value   end with Tests if this string ends with the specified suffix. DO NOT use it with meterItem tags value    Expected Data Format Of The Segment\nsegmentItems:- serviceName:SERVICE_NAME(string)segmentSize:SEGMENT_SIZE(int)segments:- segmentId:SEGMENT_ID(string)spans:...   Field Description     serviceName Service Name.   segmentSize The number of segments is expected.   segmentId Trace ID.   spans Segment span list. In the next section, you will learn how to describe each span.    Expected Data Format Of The Span\nNote: The order of span list should follow the order of the span finish time.\noperationName:OPERATION_NAME(string)parentSpanId:PARENT_SPAN_ID(int)spanId:SPAN_ID(int)startTime:START_TIME(int)endTime:END_TIME(int)isError: IS_ERROR(string:true,false)spanLayer: SPAN_LAYER(string:DB, RPC_FRAMEWORK, HTTP, MQ, CACHE)spanType: SPAN_TYPE(string:Exit, Entry, Local)componentId:COMPONENT_ID(int)tags:- {key: TAG_KEY(string), value:TAG_VALUE(string)}...logs:- {key: LOG_KEY(string), value:LOG_VALUE(string)}...peer:PEER(string)refs:- {traceId:TRACE_ID(string),parentTraceSegmentId:PARENT_TRACE_SEGMENT_ID(string),parentSpanId:PARENT_SPAN_ID(int),parentService:PARENT_SERVICE(string),parentServiceInstance:PARENT_SERVICE_INSTANCE(string),parentEndpoint:PARENT_ENDPOINT_NAME(string),networkAddress:NETWORK_ADDRESS(string),refType: REF_TYPE(string:CrossProcess, CrossThread)}...   Field Description     operationName Span Operation Name.   parentSpanId Parent span ID. Note: The parent span ID of the first span should be -1.   spanId Span ID. Note: Start from 0.   startTime Span start time. It is impossible to get the accurate time, not 0 should be enough.   endTime Span finish time. It is impossible to get the accurate time, not 0 should be enough.   isError Span status, true or false.   componentId Component id for your plugin.   tags Span tag list. Notice, Keep in the same order as the plugin coded.   logs Span log list. Notice, Keep in the same order as the plugin coded.   SpanLayer Options, DB, RPC_FRAMEWORK, HTTP, MQ, CACHE.   SpanType Span type, options, Exit, Entry or Local.   peer Remote network address, IP + port mostly. For exit span, this should be required.    The verify description for SegmentRef\n   Field Description     traceId    parentTraceSegmentId Parent SegmentId, pointing to the segment id in the parent segment.   parentSpanId Parent SpanID, pointing to the span id in the parent segment.   parentService The service of parent/downstream service name.   parentServiceInstance The instance of parent/downstream service instance name.   parentEndpoint The endpoint of parent/downstream service.   networkAddress The peer value of parent exit span.   refType Ref type, options, CrossProcess or CrossThread.    Expected Data Format Of The Meter Items\nmeterItems:- serviceName:SERVICE_NAME(string)meterSize:METER_SIZE(int)meters:- ...   Field Description     serviceName Service Name.   meterSize The number of meters is expected.   meters meter list. Follow the next section to see how to describe every meter.    Expected Data Format Of The Meter\nmeterId:name:NAME(string)tags:- {name: TAG_NAME(string), value:TAG_VALUE(string)}singleValue:SINGLE_VALUE(double)histogramBuckets:- HISTOGRAM_BUCKET(double)...The verify description for MeterId\n   Field Description     name meter name.   tags meter tags.   tags.name tag name.   tags.value tag value.   singleValue counter or gauge value. Using condition operate of the number to validate, such as gt, ge. If current meter is histogram, don\u0026rsquo;t need to write this field.   histogramBuckets histogram bucket. The bucket list must be ordered. The tool assert at least one bucket of the histogram having nonzero count. If current meter is counter or gauge, don\u0026rsquo;t need to write this field.    Expected Data Format Of The Log Items\nlogItems:- serviceName:SERVICE_NAME(string)logSize:LOG_SIZE(int)logs:- ...   Field Description     serviceName Service Name.   logSize The number of logs is expected.   logs log list. Follow the next section to see how to describe every log.    Expected Data Format Of The Log\ntimestamp:TIMESTAMP_VALUE(int)endpoint:ENDPOINT_VALUE(int)traceContext:traceId:TRACE_ID_VALUE(string)traceSegmentId:TRACE_SEGMENT_ID_VALUE(string)spanId:SPAN_ID_VALUE(int)body:type:TYPE_VALUE(string)content:# Choose one of three (text, json or yaml)text:TEXT_VALUE(string)# json: JSON_VALUE(string)# yaml: YAML_VALUE(string)tags:data:- key:TAG_KEY(string)value:TAG_VALUE(string)...layer:LAYER_VALUE(string)...The verify description for Log\n   Field Description     timestamp log timestamp.   endpoint log endpoint.   traceContext.traceId log associated trace id.   traceContext.traceSegmentId log associated trace segment id.   traceContext.spanId log associated span id.   body.type log body type.   body.content log content, the sub field choose one of three (text, json or yaml).   tags.data log tags, key value pairs.   layer log layer.    startup.sh This script provide a start point to JVM based service, most of them starts by a java -jar, with some variables. The following system environment variables are available in the shell.\n   Variable Description     agent_opts Agent plugin opts, check the detail in plugin doc or the same opt added in this PR.   SCENARIO_NAME Service name. Default same as the case folder name   SCENARIO_VERSION Version   SCENARIO_ENTRY_SERVICE Entrance URL to access this service   SCENARIO_HEALTH_CHECK_URL Health check URL     ${agent_opts} is required to add into your java -jar command, which including the parameter injected by test framework, and make agent installed. All other parameters should be added after ${agent_opts}.\n The test framework will set the service name as the test case folder name by default, but in some cases, there are more than one test projects are required to run in different service codes, could set it explicitly like the following example.\nExample\nhome=\u0026#34;$(cd \u0026#34;$(dirname $0)\u0026#34;; pwd)\u0026#34; java -jar ${agent_opts} \u0026#34;-Dskywalking.agent.service_name=jettyserver-scenario\u0026#34; ${home}/../libs/jettyserver-scenario.jar \u0026amp; sleep 1 java -jar ${agent_opts} \u0026#34;-Dskywalking.agent.service_name=jettyclient-scenario\u0026#34; ${home}/../libs/jettyclient-scenario.jar \u0026amp;  Only set this or use other skywalking options when it is really necessary.\n Take the following test cases as examples\n undertow webflux  Best Practices How To Use The Archetype To Create A Test Case Project We provided archetypes and a script to make creating a project easier. It creates a completed project of a test case. So that we only need to focus on cases. First, we can use followed command to get usage about the script.\nbash ${SKYWALKING_HOME}/test/plugin/generator.sh\nThen, runs and generates a project, named by scenario_name, in ./scenarios.\nRecommendations for pom \u0026lt;properties\u0026gt; \u0026lt;!-- Provide and use this property in the pom. --\u0026gt; \u0026lt;!-- This version should match the library version, --\u0026gt; \u0026lt;!-- in this case, http components lib version 4.3. --\u0026gt; \u0026lt;test.framework.version\u0026gt;4.3\u0026lt;/test.framework.version\u0026gt; \u0026lt;/properties\u0026gt; \u0026lt;dependencies\u0026gt; \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.httpcomponents\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;httpclient\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${test.framework.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; ... \u0026lt;/dependencies\u0026gt; \u0026lt;build\u0026gt; \u0026lt;!-- Set the package final name as same as the test case folder case. --\u0026gt; \u0026lt;finalName\u0026gt;httpclient-4.3.x-scenario\u0026lt;/finalName\u0026gt; .... \u0026lt;/build\u0026gt; How To Implement Heartbeat Service Heartbeat service is designed for checking the service available status. This service is a simple HTTP service, returning 200 means the target service is ready. Then the traffic generator will access the entry service and verify the expected data. User should consider to use this service to detect such as whether the dependent services are ready, especially when dependent services are database or cluster.\nNotice, because heartbeat service could be traced fully or partially, so, segmentSize in expectedData.yaml should use ge as the operator, and don\u0026rsquo;t include the segments of heartbeat service in the expected segment data.\nThe example Process of Writing Tracing Expected Data Expected data file, expectedData.yaml, include SegmentItems part.\nWe are using the HttpClient plugin to show how to write the expected data.\nThere are two key points of testing\n Whether is HttpClient span created. Whether the ContextCarrier created correctly, and propagates across processes.  +-------------+ +------------------+ +-------------------------+ | Browser | | Case Servlet | | ContextPropagateServlet | | | | | | | +-----|-------+ +---------|--------+ +------------|------------+ | | | | | | | WebHttp +-+ | +------------------------\u0026gt; |-| HttpClient +-+ | |--------------------------------\u0026gt; |-| | |-| |-| | |-| |-| | |-| \u0026lt;--------------------------------| | |-| +-+ | \u0026lt;--------------------------| | | +-+ | | | | | | | | | | | | | + + + segmentItems By following the flow of HttpClient case, there should be two segments created.\n Segment represents the CaseServlet access. Let\u0026rsquo;s name it as SegmentA. Segment represents the ContextPropagateServlet access. Let\u0026rsquo;s name it as SegmentB.  segmentItems:- serviceName:httpclient-casesegmentSize:ge 2# Could have more than one health check segments, because, the dependency is not standby.Because Tomcat plugin is a default plugin of SkyWalking, so, in SegmentA, there are two spans\n Tomcat entry span HttpClient exit span  SegmentA span list should like following\n- segmentId:not nullspans:- operationName:/httpclient-case/case/context-propagateparentSpanId:0spanId:1startTime:nq 0endTime:nq 0isError:falsespanLayer:HttpspanType:ExitcomponentId:eq 2tags:- {key: url, value:\u0026#39;http://127.0.0.1:8080/httpclient-case/case/context-propagate\u0026#39;}- {key: http.method, value:GET}- {key: http.status_code, value:\u0026#39;200\u0026#39;}logs:[]peer:127.0.0.1:8080- operationName:/httpclient-case/case/httpclientparentSpanId:-1spanId:0startTime:nq 0endTime:nq 0spanLayer:HttpisError:falsespanType:EntrycomponentId:1tags:- {key: url, value:\u0026#39;http://localhost:{SERVER_OUTPUT_PORT}/httpclient-case/case/httpclient\u0026#39;}- {key: http.method, value:GET}- {key: http.status_code, value:\u0026#39;200\u0026#39;}logs:[]peer:nullSegmentB should only have one Tomcat entry span, but includes the Ref pointing to SegmentA.\nSegmentB span list should like following\n- segmentId:not nullspans:-operationName:/httpclient-case/case/context-propagateparentSpanId:-1spanId:0tags:- {key: url, value:\u0026#39;http://127.0.0.1:8080/httpclient-case/case/context-propagate\u0026#39;}- {key: http.method, value:GET}- {key: http.status_code, value:\u0026#39;200\u0026#39;}logs:[]startTime:nq 0endTime:nq 0spanLayer:HttpisError:falsespanType:EntrycomponentId:1peer:nullrefs:- {parentEndpoint: /httpclient-case/case/httpclient, networkAddress: \u0026#39;localhost:8080\u0026#39;, refType: CrossProcess, parentSpanId: 1, parentTraceSegmentId: not null, parentServiceInstance: not null, parentService: not null, traceId:not null}The example Process of Writing Meter Expected Data Expected data file, expectedData.yaml, include MeterItems part.\nWe are using the toolkit plugin to demonstrate how to write the expected data. When write the meter plugin, the expected data file keeps the same.\nThere is one key point of testing\n Build a meter and operate it.  Such as Counter:\nMeterFactory.counter(\u0026#34;test_counter\u0026#34;).tag(\u0026#34;ck1\u0026#34;, \u0026#34;cv1\u0026#34;).build().increment(1d); MeterFactory.histogram(\u0026#34;test_histogram\u0026#34;).tag(\u0026#34;hk1\u0026#34;, \u0026#34;hv1\u0026#34;).steps(1d, 5d, 10d).build().addValue(2d); +-------------+ +------------------+ | Plugin | | Agent core | | | | | +-----|-------+ +---------|--------+ | | | | | Build or operate +-+ +------------------------\u0026gt; |-| | |-] | |-| | |-| | |-| | |-| | \u0026lt;--------------------------| | +-+ | | | | | | | | + + meterItems By following the flow of the toolkit case, there should be two meters created.\n Meter test_counter created from MeterFactory#counter. Let\u0026rsquo;s name it as MeterA. Meter test_histogram created from MeterFactory#histogram. Let\u0026rsquo;s name it as MeterB.  meterItems:- serviceName:toolkit-casemeterSize:2They\u0026rsquo;re showing two kinds of meter, MeterA has a single value, MeterB has a histogram value.\nMeterA should like following, counter and gauge use the same data format.\n- meterId:name:test_countertags:- {name: ck1, value:cv1}singleValue:gt 0MeterB should like following.\n- meterId:name:test_histogramtags:- {name: hk1, value:hv1}histogramBuckets:- 0.0- 1.0- 5.0- 10.0Local Test and Pull Request To The Upstream First of all, the test case project could be compiled successfully, with right project structure and be able to deploy. The developer should test the start script could run in Linux/MacOS, and entryService/health services are able to provide the response.\nYou could run test by using following commands\ncd ${SKYWALKING_HOME} bash ./test/plugin/run.sh -f ${scenario_name} Notice,if codes in ./apm-sniffer have been changed, no matter because your change or git update, please recompile the skywalking-agent. Because the test framework will use the existing skywalking-agent folder, rather than recompiling it every time.\nUse ${SKYWALKING_HOME}/test/plugin/run.sh -h to know more command options.\nIf the local test passed, then you could add it to .github/workflows/plugins-test.\u0026lt;n\u0026gt;.yaml file, which will drive the tests running on the GitHub Actions of official SkyWalking repository. Based on your plugin\u0026rsquo;s name, please add the test case into file .github/workflows/plugins-test.\u0026lt;n\u0026gt;.yaml, by alphabetical orders.\nEvery test case is a GitHub Actions Job. Please use the scenario directory name as the case name, mostly you\u0026rsquo;ll just need to decide which file (plugins-test.\u0026lt;n\u0026gt;.yaml) to add your test case, and simply put one line (as follows) in it, take the existed cases as examples. You can run python3 tools/select-group.py to see which file contains the least cases and add your cases into it, in order to balance the running time of each group.\nIf a test case required to run in JDK 17 environment, please add you test case into file plugins-jdk17-test.\u0026lt;n\u0026gt;.yaml. If a test case required to run in JDK 21 environment, please add you test case into file plugins-jdk21-test.\u0026lt;n\u0026gt;.yaml.\njobs:PluginsTest:name:Pluginruns-on:ubuntu-latesttimeout-minutes:90strategy:fail-fast:truematrix:case:# ...- \u0026lt;your scenario test directory name\u0026gt;# ...","title":"Plugin automatic test framework","url":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/plugin-test/"},{"content":"Plugin automatic test framework The plugin test framework is designed to verify the function and compatibility of plugins. As there are dozens of plugins and hundreds of versions that need to be verified, it is impossible to do it manually. The test framework uses container-based tech stack and requires a set of real services with the agents installed. Then, the test mock OAP backend runs to check the segments data sent from agents.\nEvery plugin maintained in the main repo requires corresponding test cases as well as matching versions in the supported list doc.\nEnvironment Requirements  MacOS/Linux JDK 8+ Docker Docker Compose  Case Base Image Introduction The test framework provides JVM-container and Tomcat-container base images including JDK8 and JDK17. You can choose the best one for your test case. If both are suitable for your case, JVM-container is preferred.\nJVM-container Image Introduction JVM-container uses eclipse-temurin:8-jdk as the base image. JVM-container supports JDK8 and JDK17 as well in CI, which inherits eclipse-temurin:8-jdk and eclipse-temurin:17-jdk. It is supported to custom the base Java docker image by specify base_image_java. The test case project must be packaged as project-name.zip, including startup.sh and uber jar, by using mvn clean package.\nTake the following test projects as examples:\n sofarpc-scenario is a single project case. webflux-scenario is a case including multiple projects. jdk17-with-gson-scenario is a single project case with JDK17.  Tomcat-container Image Introduction Tomcat-container uses tomcat:8.5-jdk8-openjdk, tomcat:8.5-jdk17-openjdk as the base image. It is supported to custom the base Tomcat docker image by specify base_image_tomcat. The test case project must be packaged as project-name.war by using mvn package.\nTake the following test project as an example\n spring-4.3.x-scenario  Test project hierarchical structure The test case is an independent maven project, and it must be packaged as a war tar ball or zip file, depending on the chosen base image. Also, two external accessible endpoints usually two URLs) are required.\nAll test case codes should be in the org.apache.skywalking.apm.testcase.* package. If there are some codes expected to be instrumented, then the classes could be in the test.org.apache.skywalking.apm.testcase.* package.\nJVM-container test project hierarchical structure\n[plugin-scenario] |- [bin] |- startup.sh |- [config] |- expectedData.yaml |- [src] |- [main] |- ... |- [resource] |- log4j2.xml |- pom.xml |- configuration.yml |- support-version.list [] = directory Tomcat-container test project hierarchical structure\n[plugin-scenario] |- [config] |- expectedData.yaml |- [src] |- [main] |- ... |- [resource] |- log4j2.xml |- [webapp] |- [WEB-INF] |- web.xml |- pom.xml |- configuration.yml |- support-version.list [] = directory Test case configuration files The following files are required in every test case.\n   File Name Descriptions     configuration.yml Declare the basic case information, including case name, entrance endpoints, mode, and dependencies.   expectedData.yaml Describe the expected segmentItems, meterItems or logItems.   support-version.list List the target versions for this case.   startup.sh JVM-container only. This is not required when using Tomcat-container.    * support-version.list format requires every line for a single version (contains only the last version number of each minor version). You may use # to comment out this version.\nconfiguration.yml    Field description     type Image type, options, jvm, or tomcat. Required.   entryService The entrance endpoint (URL) for test case access. Required. (HTTP Method: GET)   healthCheck The health check endpoint (URL) for test case access. Required. (HTTP Method: HEAD)   startScript Path of the start up script. Required in type: jvm only.   runningMode Running mode with the optional plugin, options, default(default), with_optional, or with_bootstrap.   withPlugins Plugin selector rule, e.g.:apm-spring-annotation-plugin-*.jar. Required for runningMode=with_optional or runningMode=with_bootstrap.   environment Same as docker-compose#environment.   depends_on Same as docker-compose#depends_on.   dependencies Same as docker-compose#services, image, links, hostname, command, environment and depends_on are supported.    Note:, docker-compose activates only when dependencies is blank.\nrunningMode option description.\n   Option description     default Activate all plugins in plugin folder like the official distribution agent.   with_optional Activate default and plugins in optional-plugin by the give selector.   with_bootstrap Activate default and plugins in bootstrap-plugin by the give selector.    with_optional/with_bootstrap supports multiple selectors, separated by ;.\nFile Format\ntype: entryService: healthCheck: startScript: runningMode: withPlugins: environment: ... depends_on: ... dependencies: service1: image: hostname: expose: ... environment: ... depends_on: ... links: ... entrypoint: ... healthcheck: ...  dependencies support docker compose healthcheck. But the format is a little different. We need to have - as the start of every config item, and describe it as a string line.  For example, in the official document, the health check is:\nhealthcheck:test:[\u0026#34;CMD\u0026#34;,\u0026#34;curl\u0026#34;,\u0026#34;-f\u0026#34;,\u0026#34;http://localhost\u0026#34;]interval:1m30stimeout:10sretries:3start_period:40sHere you should write:\nhealthcheck:- \u0026#39;test:[\u0026#34;CMD\u0026#34;,\u0026#34;curl\u0026#34;,\u0026#34;-f\u0026#34;,\u0026#34;http://localhost\u0026#34;]\u0026#39;- \u0026#34;interval: 1m30s\u0026#34;- \u0026#34;timeout: 10s\u0026#34;- \u0026#34;retries: 3\u0026#34;- \u0026#34;start_period: 40s\u0026#34;In some cases, the dependency service (usually a third-party server like the SolrJ server) is required to keep the same version as the client lib version, which is defined as ${test.framework.version} in pom. You may use ${CASE_SERVER_IMAGE_VERSION} as the version number, which will be changed in the test for each version.\n It does not support resource related configurations, such as volumes, ports, and ulimits. The reason for this is that in test scenarios, no mapping is required for any port to the host VM, or to mount any folder.\n Take the following test cases as examples:\n dubbo-2.7.x with JVM-container jetty with JVM-container gateway with runningMode canal with docker-compose  expectedData.yaml Operator for number\n   Operator Description     nq Not equal   eq Equal(default)   ge Greater than or equal   gt Greater than    Operator for String\n   Operator Description     not null Not null   not blank Not blank ,it\u0026rsquo;s recommended for String type field as the default value maybe blank string, such as span tags   null Null or empty String   eq Equal(default)   start with Tests if this string starts with the specified prefix. DO NOT use it with meterItem tags value   end with Tests if this string ends with the specified suffix. DO NOT use it with meterItem tags value    Expected Data Format Of The Segment\nsegmentItems:- serviceName:SERVICE_NAME(string)segmentSize:SEGMENT_SIZE(int)segments:- segmentId:SEGMENT_ID(string)spans:...   Field Description     serviceName Service Name.   segmentSize The number of segments is expected.   segmentId Trace ID.   spans Segment span list. In the next section, you will learn how to describe each span.    Expected Data Format Of The Span\nNote: The order of span list should follow the order of the span finish time.\noperationName:OPERATION_NAME(string)parentSpanId:PARENT_SPAN_ID(int)spanId:SPAN_ID(int)startTime:START_TIME(int)endTime:END_TIME(int)isError: IS_ERROR(string:true,false)spanLayer: SPAN_LAYER(string:DB, RPC_FRAMEWORK, HTTP, MQ, CACHE)spanType: SPAN_TYPE(string:Exit, Entry, Local)componentId:COMPONENT_ID(int)tags:- {key: TAG_KEY(string), value:TAG_VALUE(string)}...logs:- {key: LOG_KEY(string), value:LOG_VALUE(string)}...peer:PEER(string)refs:- {traceId:TRACE_ID(string),parentTraceSegmentId:PARENT_TRACE_SEGMENT_ID(string),parentSpanId:PARENT_SPAN_ID(int),parentService:PARENT_SERVICE(string),parentServiceInstance:PARENT_SERVICE_INSTANCE(string),parentEndpoint:PARENT_ENDPOINT_NAME(string),networkAddress:NETWORK_ADDRESS(string),refType: REF_TYPE(string:CrossProcess, CrossThread)}...   Field Description     operationName Span Operation Name.   parentSpanId Parent span ID. Note: The parent span ID of the first span should be -1.   spanId Span ID. Note: Start from 0.   startTime Span start time. It is impossible to get the accurate time, not 0 should be enough.   endTime Span finish time. It is impossible to get the accurate time, not 0 should be enough.   isError Span status, true or false.   componentId Component id for your plugin.   tags Span tag list. Notice, Keep in the same order as the plugin coded.   logs Span log list. Notice, Keep in the same order as the plugin coded.   SpanLayer Options, DB, RPC_FRAMEWORK, HTTP, MQ, CACHE.   SpanType Span type, options, Exit, Entry or Local.   peer Remote network address, IP + port mostly. For exit span, this should be required.    The verify description for SegmentRef\n   Field Description     traceId    parentTraceSegmentId Parent SegmentId, pointing to the segment id in the parent segment.   parentSpanId Parent SpanID, pointing to the span id in the parent segment.   parentService The service of parent/downstream service name.   parentServiceInstance The instance of parent/downstream service instance name.   parentEndpoint The endpoint of parent/downstream service.   networkAddress The peer value of parent exit span.   refType Ref type, options, CrossProcess or CrossThread.    Expected Data Format Of The Meter Items\nmeterItems:- serviceName:SERVICE_NAME(string)meterSize:METER_SIZE(int)meters:- ...   Field Description     serviceName Service Name.   meterSize The number of meters is expected.   meters meter list. Follow the next section to see how to describe every meter.    Expected Data Format Of The Meter\nmeterId:name:NAME(string)tags:- {name: TAG_NAME(string), value:TAG_VALUE(string)}singleValue:SINGLE_VALUE(double)histogramBuckets:- HISTOGRAM_BUCKET(double)...The verify description for MeterId\n   Field Description     name meter name.   tags meter tags.   tags.name tag name.   tags.value tag value.   singleValue counter or gauge value. Using condition operate of the number to validate, such as gt, ge. If current meter is histogram, don\u0026rsquo;t need to write this field.   histogramBuckets histogram bucket. The bucket list must be ordered. The tool assert at least one bucket of the histogram having nonzero count. If current meter is counter or gauge, don\u0026rsquo;t need to write this field.    Expected Data Format Of The Log Items\nlogItems:- serviceName:SERVICE_NAME(string)logSize:LOG_SIZE(int)logs:- ...   Field Description     serviceName Service Name.   logSize The number of logs is expected.   logs log list. Follow the next section to see how to describe every log.    Expected Data Format Of The Log\ntimestamp:TIMESTAMP_VALUE(int)endpoint:ENDPOINT_VALUE(int)traceContext:traceId:TRACE_ID_VALUE(string)traceSegmentId:TRACE_SEGMENT_ID_VALUE(string)spanId:SPAN_ID_VALUE(int)body:type:TYPE_VALUE(string)content:# Choose one of three (text, json or yaml)text:TEXT_VALUE(string)# json: JSON_VALUE(string)# yaml: YAML_VALUE(string)tags:data:- key:TAG_KEY(string)value:TAG_VALUE(string)...layer:LAYER_VALUE(string)...The verify description for Log\n   Field Description     timestamp log timestamp.   endpoint log endpoint.   traceContext.traceId log associated trace id.   traceContext.traceSegmentId log associated trace segment id.   traceContext.spanId log associated span id.   body.type log body type.   body.content log content, the sub field choose one of three (text, json or yaml).   tags.data log tags, key value pairs.   layer log layer.    startup.sh This script provide a start point to JVM based service, most of them starts by a java -jar, with some variables. The following system environment variables are available in the shell.\n   Variable Description     agent_opts Agent plugin opts, check the detail in plugin doc or the same opt added in this PR.   SCENARIO_NAME Service name. Default same as the case folder name   SCENARIO_VERSION Version   SCENARIO_ENTRY_SERVICE Entrance URL to access this service   SCENARIO_HEALTH_CHECK_URL Health check URL     ${agent_opts} is required to add into your java -jar command, which including the parameter injected by test framework, and make agent installed. All other parameters should be added after ${agent_opts}.\n The test framework will set the service name as the test case folder name by default, but in some cases, there are more than one test projects are required to run in different service codes, could set it explicitly like the following example.\nExample\nhome=\u0026#34;$(cd \u0026#34;$(dirname $0)\u0026#34;; pwd)\u0026#34; java -jar ${agent_opts} \u0026#34;-Dskywalking.agent.service_name=jettyserver-scenario\u0026#34; ${home}/../libs/jettyserver-scenario.jar \u0026amp; sleep 1 java -jar ${agent_opts} \u0026#34;-Dskywalking.agent.service_name=jettyclient-scenario\u0026#34; ${home}/../libs/jettyclient-scenario.jar \u0026amp;  Only set this or use other skywalking options when it is really necessary.\n Take the following test cases as examples\n undertow webflux  Best Practices How To Use The Archetype To Create A Test Case Project We provided archetypes and a script to make creating a project easier. It creates a completed project of a test case. So that we only need to focus on cases. First, we can use followed command to get usage about the script.\nbash ${SKYWALKING_HOME}/test/plugin/generator.sh\nThen, runs and generates a project, named by scenario_name, in ./scenarios.\nRecommendations for pom \u0026lt;properties\u0026gt; \u0026lt;!-- Provide and use this property in the pom. --\u0026gt; \u0026lt;!-- This version should match the library version, --\u0026gt; \u0026lt;!-- in this case, http components lib version 4.3. --\u0026gt; \u0026lt;test.framework.version\u0026gt;4.3\u0026lt;/test.framework.version\u0026gt; \u0026lt;/properties\u0026gt; \u0026lt;dependencies\u0026gt; \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.httpcomponents\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;httpclient\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${test.framework.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; ... \u0026lt;/dependencies\u0026gt; \u0026lt;build\u0026gt; \u0026lt;!-- Set the package final name as same as the test case folder case. --\u0026gt; \u0026lt;finalName\u0026gt;httpclient-4.3.x-scenario\u0026lt;/finalName\u0026gt; .... \u0026lt;/build\u0026gt; How To Implement Heartbeat Service Heartbeat service is designed for checking the service available status. This service is a simple HTTP service, returning 200 means the target service is ready. Then the traffic generator will access the entry service and verify the expected data. User should consider to use this service to detect such as whether the dependent services are ready, especially when dependent services are database or cluster.\nNotice, because heartbeat service could be traced fully or partially, so, segmentSize in expectedData.yaml should use ge as the operator, and don\u0026rsquo;t include the segments of heartbeat service in the expected segment data.\nThe example Process of Writing Tracing Expected Data Expected data file, expectedData.yaml, include SegmentItems part.\nWe are using the HttpClient plugin to show how to write the expected data.\nThere are two key points of testing\n Whether is HttpClient span created. Whether the ContextCarrier created correctly, and propagates across processes.  +-------------+ +------------------+ +-------------------------+ | Browser | | Case Servlet | | ContextPropagateServlet | | | | | | | +-----|-------+ +---------|--------+ +------------|------------+ | | | | | | | WebHttp +-+ | +------------------------\u0026gt; |-| HttpClient +-+ | |--------------------------------\u0026gt; |-| | |-| |-| | |-| |-| | |-| \u0026lt;--------------------------------| | |-| +-+ | \u0026lt;--------------------------| | | +-+ | | | | | | | | | | | | | + + + segmentItems By following the flow of HttpClient case, there should be two segments created.\n Segment represents the CaseServlet access. Let\u0026rsquo;s name it as SegmentA. Segment represents the ContextPropagateServlet access. Let\u0026rsquo;s name it as SegmentB.  segmentItems:- serviceName:httpclient-casesegmentSize:ge 2# Could have more than one health check segments, because, the dependency is not standby.Because Tomcat plugin is a default plugin of SkyWalking, so, in SegmentA, there are two spans\n Tomcat entry span HttpClient exit span  SegmentA span list should like following\n- segmentId:not nullspans:- operationName:/httpclient-case/case/context-propagateparentSpanId:0spanId:1startTime:nq 0endTime:nq 0isError:falsespanLayer:HttpspanType:ExitcomponentId:eq 2tags:- {key: url, value:\u0026#39;http://127.0.0.1:8080/httpclient-case/case/context-propagate\u0026#39;}- {key: http.method, value:GET}- {key: http.status_code, value:\u0026#39;200\u0026#39;}logs:[]peer:127.0.0.1:8080- operationName:/httpclient-case/case/httpclientparentSpanId:-1spanId:0startTime:nq 0endTime:nq 0spanLayer:HttpisError:falsespanType:EntrycomponentId:1tags:- {key: url, value:\u0026#39;http://localhost:{SERVER_OUTPUT_PORT}/httpclient-case/case/httpclient\u0026#39;}- {key: http.method, value:GET}- {key: http.status_code, value:\u0026#39;200\u0026#39;}logs:[]peer:nullSegmentB should only have one Tomcat entry span, but includes the Ref pointing to SegmentA.\nSegmentB span list should like following\n- segmentId:not nullspans:-operationName:/httpclient-case/case/context-propagateparentSpanId:-1spanId:0tags:- {key: url, value:\u0026#39;http://127.0.0.1:8080/httpclient-case/case/context-propagate\u0026#39;}- {key: http.method, value:GET}- {key: http.status_code, value:\u0026#39;200\u0026#39;}logs:[]startTime:nq 0endTime:nq 0spanLayer:HttpisError:falsespanType:EntrycomponentId:1peer:nullrefs:- {parentEndpoint: /httpclient-case/case/httpclient, networkAddress: \u0026#39;localhost:8080\u0026#39;, refType: CrossProcess, parentSpanId: 1, parentTraceSegmentId: not null, parentServiceInstance: not null, parentService: not null, traceId:not null}The example Process of Writing Meter Expected Data Expected data file, expectedData.yaml, include MeterItems part.\nWe are using the toolkit plugin to demonstrate how to write the expected data. When write the meter plugin, the expected data file keeps the same.\nThere is one key point of testing\n Build a meter and operate it.  Such as Counter:\nMeterFactory.counter(\u0026#34;test_counter\u0026#34;).tag(\u0026#34;ck1\u0026#34;, \u0026#34;cv1\u0026#34;).build().increment(1d); MeterFactory.histogram(\u0026#34;test_histogram\u0026#34;).tag(\u0026#34;hk1\u0026#34;, \u0026#34;hv1\u0026#34;).steps(1d, 5d, 10d).build().addValue(2d); +-------------+ +------------------+ | Plugin | | Agent core | | | | | +-----|-------+ +---------|--------+ | | | | | Build or operate +-+ +------------------------\u0026gt; |-| | |-] | |-| | |-| | |-| | |-| | \u0026lt;--------------------------| | +-+ | | | | | | | | + + meterItems By following the flow of the toolkit case, there should be two meters created.\n Meter test_counter created from MeterFactory#counter. Let\u0026rsquo;s name it as MeterA. Meter test_histogram created from MeterFactory#histogram. Let\u0026rsquo;s name it as MeterB.  meterItems:- serviceName:toolkit-casemeterSize:2They\u0026rsquo;re showing two kinds of meter, MeterA has a single value, MeterB has a histogram value.\nMeterA should like following, counter and gauge use the same data format.\n- meterId:name:test_countertags:- {name: ck1, value:cv1}singleValue:gt 0MeterB should like following.\n- meterId:name:test_histogramtags:- {name: hk1, value:hv1}histogramBuckets:- 0.0- 1.0- 5.0- 10.0Local Test and Pull Request To The Upstream First of all, the test case project could be compiled successfully, with right project structure and be able to deploy. The developer should test the start script could run in Linux/MacOS, and entryService/health services are able to provide the response.\nYou could run test by using following commands\ncd ${SKYWALKING_HOME} bash ./test/plugin/run.sh -f ${scenario_name} Notice,if codes in ./apm-sniffer have been changed, no matter because your change or git update, please recompile the skywalking-agent. Because the test framework will use the existing skywalking-agent folder, rather than recompiling it every time.\nUse ${SKYWALKING_HOME}/test/plugin/run.sh -h to know more command options.\nIf the local test passed, then you could add it to .github/workflows/plugins-test.\u0026lt;n\u0026gt;.yaml file, which will drive the tests running on the GitHub Actions of official SkyWalking repository. Based on your plugin\u0026rsquo;s name, please add the test case into file .github/workflows/plugins-test.\u0026lt;n\u0026gt;.yaml, by alphabetical orders.\nEvery test case is a GitHub Actions Job. Please use the scenario directory name as the case name, mostly you\u0026rsquo;ll just need to decide which file (plugins-test.\u0026lt;n\u0026gt;.yaml) to add your test case, and simply put one line (as follows) in it, take the existed cases as examples. You can run python3 tools/select-group.py to see which file contains the least cases and add your cases into it, in order to balance the running time of each group.\nIf a test case required to run in JDK 17 environment, please add you test case into file plugins-jdk17-test.\u0026lt;n\u0026gt;.yaml. If a test case required to run in JDK 21 environment, please add you test case into file plugins-jdk21-test.\u0026lt;n\u0026gt;.yaml.\njobs:PluginsTest:name:Pluginruns-on:ubuntu-latesttimeout-minutes:90strategy:fail-fast:truematrix:case:# ...- \u0026lt;your scenario test directory name\u0026gt;# ...","title":"Plugin automatic test framework","url":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/plugin-test/"},{"content":"Plugin automatic test framework The plugin test framework is designed to verify the function and compatibility of plugins. As there are dozens of plugins and hundreds of versions that need to be verified, it is impossible to do it manually. The test framework uses container-based tech stack and requires a set of real services with the agents installed. Then, the test mock OAP backend runs to check the segments data sent from agents.\nEvery plugin maintained in the main repo requires corresponding test cases as well as matching versions in the supported list doc.\nEnvironment Requirements  MacOS/Linux JDK 8+ Docker Docker Compose  Case Base Image Introduction The test framework provides JVM-container and Tomcat-container base images including JDK8 and JDK17. You can choose the best one for your test case. If both are suitable for your case, JVM-container is preferred.\nJVM-container Image Introduction JVM-container uses eclipse-temurin:8-jdk as the base image. JVM-container supports JDK8 and JDK17 as well in CI, which inherits eclipse-temurin:8-jdk and eclipse-temurin:17-jdk. It is supported to custom the base Java docker image by specify base_image_java. The test case project must be packaged as project-name.zip, including startup.sh and uber jar, by using mvn clean package.\nTake the following test projects as examples:\n sofarpc-scenario is a single project case. webflux-scenario is a case including multiple projects. jdk17-with-gson-scenario is a single project case with JDK17.  Tomcat-container Image Introduction Tomcat-container uses tomcat:8.5-jdk8-openjdk, tomcat:8.5-jdk17-openjdk as the base image. It is supported to custom the base Tomcat docker image by specify base_image_tomcat. The test case project must be packaged as project-name.war by using mvn package.\nTake the following test project as an example\n spring-4.3.x-scenario  Test project hierarchical structure The test case is an independent maven project, and it must be packaged as a war tar ball or zip file, depending on the chosen base image. Also, two external accessible endpoints usually two URLs) are required.\nAll test case codes should be in the org.apache.skywalking.apm.testcase.* package. If there are some codes expected to be instrumented, then the classes could be in the test.org.apache.skywalking.apm.testcase.* package.\nJVM-container test project hierarchical structure\n[plugin-scenario] |- [bin] |- startup.sh |- [config] |- expectedData.yaml |- [src] |- [main] |- ... |- [resource] |- log4j2.xml |- pom.xml |- configuration.yml |- support-version.list [] = directory Tomcat-container test project hierarchical structure\n[plugin-scenario] |- [config] |- expectedData.yaml |- [src] |- [main] |- ... |- [resource] |- log4j2.xml |- [webapp] |- [WEB-INF] |- web.xml |- pom.xml |- configuration.yml |- support-version.list [] = directory Test case configuration files The following files are required in every test case.\n   File Name Descriptions     configuration.yml Declare the basic case information, including case name, entrance endpoints, mode, and dependencies.   expectedData.yaml Describe the expected segmentItems, meterItems or logItems.   support-version.list List the target versions for this case.   startup.sh JVM-container only. This is not required when using Tomcat-container.    * support-version.list format requires every line for a single version (contains only the last version number of each minor version). You may use # to comment out this version.\nconfiguration.yml    Field description     type Image type, options, jvm, or tomcat. Required.   entryService The entrance endpoint (URL) for test case access. Required. (HTTP Method: GET)   healthCheck The health check endpoint (URL) for test case access. Required. (HTTP Method: HEAD)   startScript Path of the start up script. Required in type: jvm only.   runningMode Running mode with the optional plugin, options, default(default), with_optional, or with_bootstrap.   withPlugins Plugin selector rule, e.g.:apm-spring-annotation-plugin-*.jar. Required for runningMode=with_optional or runningMode=with_bootstrap.   environment Same as docker-compose#environment.   depends_on Same as docker-compose#depends_on.   dependencies Same as docker-compose#services, image, links, hostname, command, environment and depends_on are supported.    Note:, docker-compose activates only when dependencies is blank.\nrunningMode option description.\n   Option description     default Activate all plugins in plugin folder like the official distribution agent.   with_optional Activate default and plugins in optional-plugin by the give selector.   with_bootstrap Activate default and plugins in bootstrap-plugin by the give selector.    with_optional/with_bootstrap supports multiple selectors, separated by ;.\nFile Format\ntype: entryService: healthCheck: startScript: runningMode: withPlugins: environment: ... depends_on: ... dependencies: service1: image: hostname: expose: ... environment: ... depends_on: ... links: ... entrypoint: ... healthcheck: ...  dependencies support docker compose healthcheck. But the format is a little different. We need to have - as the start of every config item, and describe it as a string line.  For example, in the official document, the health check is:\nhealthcheck:test:[\u0026#34;CMD\u0026#34;,\u0026#34;curl\u0026#34;,\u0026#34;-f\u0026#34;,\u0026#34;http://localhost\u0026#34;]interval:1m30stimeout:10sretries:3start_period:40sHere you should write:\nhealthcheck:- \u0026#39;test:[\u0026#34;CMD\u0026#34;,\u0026#34;curl\u0026#34;,\u0026#34;-f\u0026#34;,\u0026#34;http://localhost\u0026#34;]\u0026#39;- \u0026#34;interval: 1m30s\u0026#34;- \u0026#34;timeout: 10s\u0026#34;- \u0026#34;retries: 3\u0026#34;- \u0026#34;start_period: 40s\u0026#34;In some cases, the dependency service (usually a third-party server like the SolrJ server) is required to keep the same version as the client lib version, which is defined as ${test.framework.version} in pom. You may use ${CASE_SERVER_IMAGE_VERSION} as the version number, which will be changed in the test for each version.\n It does not support resource related configurations, such as volumes, ports, and ulimits. The reason for this is that in test scenarios, no mapping is required for any port to the host VM, or to mount any folder.\n Take the following test cases as examples:\n dubbo-2.7.x with JVM-container jetty with JVM-container gateway with runningMode canal with docker-compose  expectedData.yaml Operator for number\n   Operator Description     nq Not equal   eq Equal(default)   ge Greater than or equal   gt Greater than    Operator for String\n   Operator Description     not null Not null   not blank Not blank ,it\u0026rsquo;s recommended for String type field as the default value maybe blank string, such as span tags   null Null or empty String   eq Equal(default)   start with Tests if this string starts with the specified prefix. DO NOT use it with meterItem tags value   end with Tests if this string ends with the specified suffix. DO NOT use it with meterItem tags value    Expected Data Format Of The Segment\nsegmentItems:- serviceName:SERVICE_NAME(string)segmentSize:SEGMENT_SIZE(int)segments:- segmentId:SEGMENT_ID(string)spans:...   Field Description     serviceName Service Name.   segmentSize The number of segments is expected.   segmentId Trace ID.   spans Segment span list. In the next section, you will learn how to describe each span.    Expected Data Format Of The Span\nNote: The order of span list should follow the order of the span finish time.\noperationName:OPERATION_NAME(string)parentSpanId:PARENT_SPAN_ID(int)spanId:SPAN_ID(int)startTime:START_TIME(int)endTime:END_TIME(int)isError: IS_ERROR(string:true,false)spanLayer: SPAN_LAYER(string:DB, RPC_FRAMEWORK, HTTP, MQ, CACHE)spanType: SPAN_TYPE(string:Exit, Entry, Local)componentId:COMPONENT_ID(int)tags:- {key: TAG_KEY(string), value:TAG_VALUE(string)}...logs:- {key: LOG_KEY(string), value:LOG_VALUE(string)}...peer:PEER(string)refs:- {traceId:TRACE_ID(string),parentTraceSegmentId:PARENT_TRACE_SEGMENT_ID(string),parentSpanId:PARENT_SPAN_ID(int),parentService:PARENT_SERVICE(string),parentServiceInstance:PARENT_SERVICE_INSTANCE(string),parentEndpoint:PARENT_ENDPOINT_NAME(string),networkAddress:NETWORK_ADDRESS(string),refType: REF_TYPE(string:CrossProcess, CrossThread)}...   Field Description     operationName Span Operation Name.   parentSpanId Parent span ID. Note: The parent span ID of the first span should be -1.   spanId Span ID. Note: Start from 0.   startTime Span start time. It is impossible to get the accurate time, not 0 should be enough.   endTime Span finish time. It is impossible to get the accurate time, not 0 should be enough.   isError Span status, true or false.   componentId Component id for your plugin.   tags Span tag list. Notice, Keep in the same order as the plugin coded.   logs Span log list. Notice, Keep in the same order as the plugin coded.   SpanLayer Options, DB, RPC_FRAMEWORK, HTTP, MQ, CACHE.   SpanType Span type, options, Exit, Entry or Local.   peer Remote network address, IP + port mostly. For exit span, this should be required.    The verify description for SegmentRef\n   Field Description     traceId    parentTraceSegmentId Parent SegmentId, pointing to the segment id in the parent segment.   parentSpanId Parent SpanID, pointing to the span id in the parent segment.   parentService The service of parent/downstream service name.   parentServiceInstance The instance of parent/downstream service instance name.   parentEndpoint The endpoint of parent/downstream service.   networkAddress The peer value of parent exit span.   refType Ref type, options, CrossProcess or CrossThread.    Expected Data Format Of The Meter Items\nmeterItems:- serviceName:SERVICE_NAME(string)meterSize:METER_SIZE(int)meters:- ...   Field Description     serviceName Service Name.   meterSize The number of meters is expected.   meters meter list. Follow the next section to see how to describe every meter.    Expected Data Format Of The Meter\nmeterId:name:NAME(string)tags:- {name: TAG_NAME(string), value:TAG_VALUE(string)}singleValue:SINGLE_VALUE(double)histogramBuckets:- HISTOGRAM_BUCKET(double)...The verify description for MeterId\n   Field Description     name meter name.   tags meter tags.   tags.name tag name.   tags.value tag value.   singleValue counter or gauge value. Using condition operate of the number to validate, such as gt, ge. If current meter is histogram, don\u0026rsquo;t need to write this field.   histogramBuckets histogram bucket. The bucket list must be ordered. The tool assert at least one bucket of the histogram having nonzero count. If current meter is counter or gauge, don\u0026rsquo;t need to write this field.    Expected Data Format Of The Log Items\nlogItems:- serviceName:SERVICE_NAME(string)logSize:LOG_SIZE(int)logs:- ...   Field Description     serviceName Service Name.   logSize The number of logs is expected.   logs log list. Follow the next section to see how to describe every log.    Expected Data Format Of The Log\ntimestamp:TIMESTAMP_VALUE(int)endpoint:ENDPOINT_VALUE(int)traceContext:traceId:TRACE_ID_VALUE(string)traceSegmentId:TRACE_SEGMENT_ID_VALUE(string)spanId:SPAN_ID_VALUE(int)body:type:TYPE_VALUE(string)content:# Choose one of three (text, json or yaml)text:TEXT_VALUE(string)# json: JSON_VALUE(string)# yaml: YAML_VALUE(string)tags:data:- key:TAG_KEY(string)value:TAG_VALUE(string)...layer:LAYER_VALUE(string)...The verify description for Log\n   Field Description     timestamp log timestamp.   endpoint log endpoint.   traceContext.traceId log associated trace id.   traceContext.traceSegmentId log associated trace segment id.   traceContext.spanId log associated span id.   body.type log body type.   body.content log content, the sub field choose one of three (text, json or yaml).   tags.data log tags, key value pairs.   layer log layer.    startup.sh This script provide a start point to JVM based service, most of them starts by a java -jar, with some variables. The following system environment variables are available in the shell.\n   Variable Description     agent_opts Agent plugin opts, check the detail in plugin doc or the same opt added in this PR.   SCENARIO_NAME Service name. Default same as the case folder name   SCENARIO_VERSION Version   SCENARIO_ENTRY_SERVICE Entrance URL to access this service   SCENARIO_HEALTH_CHECK_URL Health check URL     ${agent_opts} is required to add into your java -jar command, which including the parameter injected by test framework, and make agent installed. All other parameters should be added after ${agent_opts}.\n The test framework will set the service name as the test case folder name by default, but in some cases, there are more than one test projects are required to run in different service codes, could set it explicitly like the following example.\nExample\nhome=\u0026#34;$(cd \u0026#34;$(dirname $0)\u0026#34;; pwd)\u0026#34; java -jar ${agent_opts} \u0026#34;-Dskywalking.agent.service_name=jettyserver-scenario\u0026#34; ${home}/../libs/jettyserver-scenario.jar \u0026amp; sleep 1 java -jar ${agent_opts} \u0026#34;-Dskywalking.agent.service_name=jettyclient-scenario\u0026#34; ${home}/../libs/jettyclient-scenario.jar \u0026amp;  Only set this or use other skywalking options when it is really necessary.\n Take the following test cases as examples\n undertow webflux  Best Practices How To Use The Archetype To Create A Test Case Project We provided archetypes and a script to make creating a project easier. It creates a completed project of a test case. So that we only need to focus on cases. First, we can use followed command to get usage about the script.\nbash ${SKYWALKING_HOME}/test/plugin/generator.sh\nThen, runs and generates a project, named by scenario_name, in ./scenarios.\nRecommendations for pom \u0026lt;properties\u0026gt; \u0026lt;!-- Provide and use this property in the pom. --\u0026gt; \u0026lt;!-- This version should match the library version, --\u0026gt; \u0026lt;!-- in this case, http components lib version 4.3. --\u0026gt; \u0026lt;test.framework.version\u0026gt;4.3\u0026lt;/test.framework.version\u0026gt; \u0026lt;/properties\u0026gt; \u0026lt;dependencies\u0026gt; \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.httpcomponents\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;httpclient\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${test.framework.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; ... \u0026lt;/dependencies\u0026gt; \u0026lt;build\u0026gt; \u0026lt;!-- Set the package final name as same as the test case folder case. --\u0026gt; \u0026lt;finalName\u0026gt;httpclient-4.3.x-scenario\u0026lt;/finalName\u0026gt; .... \u0026lt;/build\u0026gt; How To Implement Heartbeat Service Heartbeat service is designed for checking the service available status. This service is a simple HTTP service, returning 200 means the target service is ready. Then the traffic generator will access the entry service and verify the expected data. User should consider to use this service to detect such as whether the dependent services are ready, especially when dependent services are database or cluster.\nNotice, because heartbeat service could be traced fully or partially, so, segmentSize in expectedData.yaml should use ge as the operator, and don\u0026rsquo;t include the segments of heartbeat service in the expected segment data.\nThe example Process of Writing Tracing Expected Data Expected data file, expectedData.yaml, include SegmentItems part.\nWe are using the HttpClient plugin to show how to write the expected data.\nThere are two key points of testing\n Whether is HttpClient span created. Whether the ContextCarrier created correctly, and propagates across processes.  +-------------+ +------------------+ +-------------------------+ | Browser | | Case Servlet | | ContextPropagateServlet | | | | | | | +-----|-------+ +---------|--------+ +------------|------------+ | | | | | | | WebHttp +-+ | +------------------------\u0026gt; |-| HttpClient +-+ | |--------------------------------\u0026gt; |-| | |-| |-| | |-| |-| | |-| \u0026lt;--------------------------------| | |-| +-+ | \u0026lt;--------------------------| | | +-+ | | | | | | | | | | | | | + + + segmentItems By following the flow of HttpClient case, there should be two segments created.\n Segment represents the CaseServlet access. Let\u0026rsquo;s name it as SegmentA. Segment represents the ContextPropagateServlet access. Let\u0026rsquo;s name it as SegmentB.  segmentItems:- serviceName:httpclient-casesegmentSize:ge 2# Could have more than one health check segments, because, the dependency is not standby.Because Tomcat plugin is a default plugin of SkyWalking, so, in SegmentA, there are two spans\n Tomcat entry span HttpClient exit span  SegmentA span list should like following\n- segmentId:not nullspans:- operationName:/httpclient-case/case/context-propagateparentSpanId:0spanId:1startTime:nq 0endTime:nq 0isError:falsespanLayer:HttpspanType:ExitcomponentId:eq 2tags:- {key: url, value:\u0026#39;http://127.0.0.1:8080/httpclient-case/case/context-propagate\u0026#39;}- {key: http.method, value:GET}- {key: http.status_code, value:\u0026#39;200\u0026#39;}logs:[]peer:127.0.0.1:8080- operationName:/httpclient-case/case/httpclientparentSpanId:-1spanId:0startTime:nq 0endTime:nq 0spanLayer:HttpisError:falsespanType:EntrycomponentId:1tags:- {key: url, value:\u0026#39;http://localhost:{SERVER_OUTPUT_PORT}/httpclient-case/case/httpclient\u0026#39;}- {key: http.method, value:GET}- {key: http.status_code, value:\u0026#39;200\u0026#39;}logs:[]peer:nullSegmentB should only have one Tomcat entry span, but includes the Ref pointing to SegmentA.\nSegmentB span list should like following\n- segmentId:not nullspans:-operationName:/httpclient-case/case/context-propagateparentSpanId:-1spanId:0tags:- {key: url, value:\u0026#39;http://127.0.0.1:8080/httpclient-case/case/context-propagate\u0026#39;}- {key: http.method, value:GET}- {key: http.status_code, value:\u0026#39;200\u0026#39;}logs:[]startTime:nq 0endTime:nq 0spanLayer:HttpisError:falsespanType:EntrycomponentId:1peer:nullrefs:- {parentEndpoint: /httpclient-case/case/httpclient, networkAddress: \u0026#39;localhost:8080\u0026#39;, refType: CrossProcess, parentSpanId: 1, parentTraceSegmentId: not null, parentServiceInstance: not null, parentService: not null, traceId:not null}The example Process of Writing Meter Expected Data Expected data file, expectedData.yaml, include MeterItems part.\nWe are using the toolkit plugin to demonstrate how to write the expected data. When write the meter plugin, the expected data file keeps the same.\nThere is one key point of testing\n Build a meter and operate it.  Such as Counter:\nMeterFactory.counter(\u0026#34;test_counter\u0026#34;).tag(\u0026#34;ck1\u0026#34;, \u0026#34;cv1\u0026#34;).build().increment(1d); MeterFactory.histogram(\u0026#34;test_histogram\u0026#34;).tag(\u0026#34;hk1\u0026#34;, \u0026#34;hv1\u0026#34;).steps(1d, 5d, 10d).build().addValue(2d); +-------------+ +------------------+ | Plugin | | Agent core | | | | | +-----|-------+ +---------|--------+ | | | | | Build or operate +-+ +------------------------\u0026gt; |-| | |-] | |-| | |-| | |-| | |-| | \u0026lt;--------------------------| | +-+ | | | | | | | | + + meterItems By following the flow of the toolkit case, there should be two meters created.\n Meter test_counter created from MeterFactory#counter. Let\u0026rsquo;s name it as MeterA. Meter test_histogram created from MeterFactory#histogram. Let\u0026rsquo;s name it as MeterB.  meterItems:- serviceName:toolkit-casemeterSize:2They\u0026rsquo;re showing two kinds of meter, MeterA has a single value, MeterB has a histogram value.\nMeterA should like following, counter and gauge use the same data format.\n- meterId:name:test_countertags:- {name: ck1, value:cv1}singleValue:gt 0MeterB should like following.\n- meterId:name:test_histogramtags:- {name: hk1, value:hv1}histogramBuckets:- 0.0- 1.0- 5.0- 10.0Local Test and Pull Request To The Upstream First of all, the test case project could be compiled successfully, with right project structure and be able to deploy. The developer should test the start script could run in Linux/MacOS, and entryService/health services are able to provide the response.\nYou could run test by using following commands\ncd ${SKYWALKING_HOME} bash ./test/plugin/run.sh -f ${scenario_name} Notice,if codes in ./apm-sniffer have been changed, no matter because your change or git update, please recompile the skywalking-agent. Because the test framework will use the existing skywalking-agent folder, rather than recompiling it every time.\nUse ${SKYWALKING_HOME}/test/plugin/run.sh -h to know more command options.\nIf the local test passed, then you could add it to .github/workflows/plugins-test.\u0026lt;n\u0026gt;.yaml file, which will drive the tests running on the GitHub Actions of official SkyWalking repository. Based on your plugin\u0026rsquo;s name, please add the test case into file .github/workflows/plugins-test.\u0026lt;n\u0026gt;.yaml, by alphabetical orders.\nEvery test case is a GitHub Actions Job. Please use the scenario directory name as the case name, mostly you\u0026rsquo;ll just need to decide which file (plugins-test.\u0026lt;n\u0026gt;.yaml) to add your test case, and simply put one line (as follows) in it, take the existed cases as examples. You can run python3 tools/select-group.py to see which file contains the least cases and add your cases into it, in order to balance the running time of each group.\nIf a test case required to run in JDK 17 environment, please add you test case into file plugins-jdk17-test.\u0026lt;n\u0026gt;.yaml.\njobs:PluginsTest:name:Pluginruns-on:ubuntu-latesttimeout-minutes:90strategy:fail-fast:truematrix:case:# ...- \u0026lt;your scenario test directory name\u0026gt;# ...","title":"Plugin automatic test framework","url":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/plugin-test/"},{"content":"Plugin automatic test framework The plugin test framework is designed to verify the function and compatibility of plugins. As there are dozens of plugins and hundreds of versions that need to be verified, it is impossible to do it manually. The test framework uses container-based tech stack and requires a set of real services with the agents installed. Then, the test mock OAP backend runs to check the segments data sent from agents.\nEvery plugin maintained in the main repo requires corresponding test cases as well as matching versions in the supported list doc.\nEnvironment Requirements  MacOS/Linux JDK 8+ Docker Docker Compose  Case Base Image Introduction The test framework provides JVM-container and Tomcat-container base images including JDK8 and JDK17. You can choose the best one for your test case. If both are suitable for your case, JVM-container is preferred.\nJVM-container Image Introduction JVM-container uses eclipse-temurin:8-jdk as the base image. JVM-container supports JDK8 and JDK17 as well in CI, which inherits eclipse-temurin:8-jdk and eclipse-temurin:17-jdk. It is supported to custom the base Java docker image by specify base_image_java. The test case project must be packaged as project-name.zip, including startup.sh and uber jar, by using mvn clean package.\nTake the following test projects as examples:\n sofarpc-scenario is a single project case. webflux-scenario is a case including multiple projects. jdk17-with-gson-scenario is a single project case with JDK17.  Tomcat-container Image Introduction Tomcat-container uses tomcat:8.5-jdk8-openjdk, tomcat:8.5-jdk17-openjdk as the base image. It is supported to custom the base Tomcat docker image by specify base_image_tomcat. The test case project must be packaged as project-name.war by using mvn package.\nTake the following test project as an example\n spring-4.3.x-scenario  Test project hierarchical structure The test case is an independent maven project, and it must be packaged as a war tar ball or zip file, depending on the chosen base image. Also, two external accessible endpoints usually two URLs) are required.\nAll test case codes should be in the org.apache.skywalking.apm.testcase.* package. If there are some codes expected to be instrumented, then the classes could be in the test.org.apache.skywalking.apm.testcase.* package.\nJVM-container test project hierarchical structure\n[plugin-scenario] |- [bin] |- startup.sh |- [config] |- expectedData.yaml |- [src] |- [main] |- ... |- [resource] |- log4j2.xml |- pom.xml |- configuration.yml |- support-version.list [] = directory Tomcat-container test project hierarchical structure\n[plugin-scenario] |- [config] |- expectedData.yaml |- [src] |- [main] |- ... |- [resource] |- log4j2.xml |- [webapp] |- [WEB-INF] |- web.xml |- pom.xml |- configuration.yml |- support-version.list [] = directory Test case configuration files The following files are required in every test case.\n   File Name Descriptions     configuration.yml Declare the basic case information, including case name, entrance endpoints, mode, and dependencies.   expectedData.yaml Describe the expected segmentItems, meterItems or logItems.   support-version.list List the target versions for this case.   startup.sh JVM-container only. This is not required when using Tomcat-container.    * support-version.list format requires every line for a single version (contains only the last version number of each minor version). You may use # to comment out this version.\nconfiguration.yml    Field description     type Image type, options, jvm, or tomcat. Required.   entryService The entrance endpoint (URL) for test case access. Required. (HTTP Method: GET)   healthCheck The health check endpoint (URL) for test case access. Required. (HTTP Method: HEAD)   startScript Path of the start up script. Required in type: jvm only.   runningMode Running mode with the optional plugin, options, default(default), with_optional, or with_bootstrap.   withPlugins Plugin selector rule, e.g.:apm-spring-annotation-plugin-*.jar. Required for runningMode=with_optional or runningMode=with_bootstrap.   environment Same as docker-compose#environment.   depends_on Same as docker-compose#depends_on.   dependencies Same as docker-compose#services, image, links, hostname, command, environment and depends_on are supported.    Note:, docker-compose activates only when dependencies is blank.\nrunningMode option description.\n   Option description     default Activate all plugins in plugin folder like the official distribution agent.   with_optional Activate default and plugins in optional-plugin by the give selector.   with_bootstrap Activate default and plugins in bootstrap-plugin by the give selector.    with_optional/with_bootstrap supports multiple selectors, separated by ;.\nFile Format\ntype: entryService: healthCheck: startScript: runningMode: withPlugins: environment: ... depends_on: ... dependencies: service1: image: hostname: expose: ... environment: ... depends_on: ... links: ... entrypoint: ... healthcheck: ...  dependencies support docker compose healthcheck. But the format is a little different. We need to have - as the start of every config item, and describe it as a string line.  For example, in the official document, the health check is:\nhealthcheck:test:[\u0026#34;CMD\u0026#34;,\u0026#34;curl\u0026#34;,\u0026#34;-f\u0026#34;,\u0026#34;http://localhost\u0026#34;]interval:1m30stimeout:10sretries:3start_period:40sHere you should write:\nhealthcheck:- \u0026#39;test:[\u0026#34;CMD\u0026#34;,\u0026#34;curl\u0026#34;,\u0026#34;-f\u0026#34;,\u0026#34;http://localhost\u0026#34;]\u0026#39;- \u0026#34;interval: 1m30s\u0026#34;- \u0026#34;timeout: 10s\u0026#34;- \u0026#34;retries: 3\u0026#34;- \u0026#34;start_period: 40s\u0026#34;In some cases, the dependency service (usually a third-party server like the SolrJ server) is required to keep the same version as the client lib version, which is defined as ${test.framework.version} in pom. You may use ${CASE_SERVER_IMAGE_VERSION} as the version number, which will be changed in the test for each version.\n It does not support resource related configurations, such as volumes, ports, and ulimits. The reason for this is that in test scenarios, no mapping is required for any port to the host VM, or to mount any folder.\n Take the following test cases as examples:\n dubbo-2.7.x with JVM-container jetty with JVM-container gateway with runningMode canal with docker-compose  expectedData.yaml Operator for number\n   Operator Description     nq Not equal   eq Equal(default)   ge Greater than or equal   gt Greater than    Operator for String\n   Operator Description     not null Not null   not blank Not blank ,it\u0026rsquo;s recommended for String type field as the default value maybe blank string, such as span tags   null Null or empty String   eq Equal(default)   start with Tests if this string starts with the specified prefix. DO NOT use it with meterItem tags value   end with Tests if this string ends with the specified suffix. DO NOT use it with meterItem tags value    Expected Data Format Of The Segment\nsegmentItems:- serviceName:SERVICE_NAME(string)segmentSize:SEGMENT_SIZE(int)segments:- segmentId:SEGMENT_ID(string)spans:...   Field Description     serviceName Service Name.   segmentSize The number of segments is expected.   segmentId Trace ID.   spans Segment span list. In the next section, you will learn how to describe each span.    Expected Data Format Of The Span\nNote: The order of span list should follow the order of the span finish time.\noperationName:OPERATION_NAME(string)parentSpanId:PARENT_SPAN_ID(int)spanId:SPAN_ID(int)startTime:START_TIME(int)endTime:END_TIME(int)isError: IS_ERROR(string:true,false)spanLayer: SPAN_LAYER(string:DB, RPC_FRAMEWORK, HTTP, MQ, CACHE)spanType: SPAN_TYPE(string:Exit, Entry, Local)componentId:COMPONENT_ID(int)tags:- {key: TAG_KEY(string), value:TAG_VALUE(string)}...logs:- {key: LOG_KEY(string), value:LOG_VALUE(string)}...peer:PEER(string)refs:- {traceId:TRACE_ID(string),parentTraceSegmentId:PARENT_TRACE_SEGMENT_ID(string),parentSpanId:PARENT_SPAN_ID(int),parentService:PARENT_SERVICE(string),parentServiceInstance:PARENT_SERVICE_INSTANCE(string),parentEndpoint:PARENT_ENDPOINT_NAME(string),networkAddress:NETWORK_ADDRESS(string),refType: REF_TYPE(string:CrossProcess, CrossThread)}...   Field Description     operationName Span Operation Name.   parentSpanId Parent span ID. Note: The parent span ID of the first span should be -1.   spanId Span ID. Note: Start from 0.   startTime Span start time. It is impossible to get the accurate time, not 0 should be enough.   endTime Span finish time. It is impossible to get the accurate time, not 0 should be enough.   isError Span status, true or false.   componentId Component id for your plugin.   tags Span tag list. Notice, Keep in the same order as the plugin coded.   logs Span log list. Notice, Keep in the same order as the plugin coded.   SpanLayer Options, DB, RPC_FRAMEWORK, HTTP, MQ, CACHE.   SpanType Span type, options, Exit, Entry or Local.   peer Remote network address, IP + port mostly. For exit span, this should be required.    The verify description for SegmentRef\n   Field Description     traceId    parentTraceSegmentId Parent SegmentId, pointing to the segment id in the parent segment.   parentSpanId Parent SpanID, pointing to the span id in the parent segment.   parentService The service of parent/downstream service name.   parentServiceInstance The instance of parent/downstream service instance name.   parentEndpoint The endpoint of parent/downstream service.   networkAddress The peer value of parent exit span.   refType Ref type, options, CrossProcess or CrossThread.    Expected Data Format Of The Meter Items\nmeterItems:- serviceName:SERVICE_NAME(string)meterSize:METER_SIZE(int)meters:- ...   Field Description     serviceName Service Name.   meterSize The number of meters is expected.   meters meter list. Follow the next section to see how to describe every meter.    Expected Data Format Of The Meter\nmeterId:name:NAME(string)tags:- {name: TAG_NAME(string), value:TAG_VALUE(string)}singleValue:SINGLE_VALUE(double)histogramBuckets:- HISTOGRAM_BUCKET(double)...The verify description for MeterId\n   Field Description     name meter name.   tags meter tags.   tags.name tag name.   tags.value tag value.   singleValue counter or gauge value. Using condition operate of the number to validate, such as gt, ge. If current meter is histogram, don\u0026rsquo;t need to write this field.   histogramBuckets histogram bucket. The bucket list must be ordered. The tool assert at least one bucket of the histogram having nonzero count. If current meter is counter or gauge, don\u0026rsquo;t need to write this field.    Expected Data Format Of The Log Items\nlogItems:- serviceName:SERVICE_NAME(string)logSize:LOG_SIZE(int)logs:- ...   Field Description     serviceName Service Name.   logSize The number of logs is expected.   logs log list. Follow the next section to see how to describe every log.    Expected Data Format Of The Log\ntimestamp:TIMESTAMP_VALUE(int)endpoint:ENDPOINT_VALUE(int)traceContext:traceId:TRACE_ID_VALUE(string)traceSegmentId:TRACE_SEGMENT_ID_VALUE(string)spanId:SPAN_ID_VALUE(int)body:type:TYPE_VALUE(string)content:# Choose one of three (text, json or yaml)text:TEXT_VALUE(string)# json: JSON_VALUE(string)# yaml: YAML_VALUE(string)tags:data:- key:TAG_KEY(string)value:TAG_VALUE(string)...layer:LAYER_VALUE(string)...The verify description for Log\n   Field Description     timestamp log timestamp.   endpoint log endpoint.   traceContext.traceId log associated trace id.   traceContext.traceSegmentId log associated trace segment id.   traceContext.spanId log associated span id.   body.type log body type.   body.content log content, the sub field choose one of three (text, json or yaml).   tags.data log tags, key value pairs.   layer log layer.    startup.sh This script provide a start point to JVM based service, most of them starts by a java -jar, with some variables. The following system environment variables are available in the shell.\n   Variable Description     agent_opts Agent plugin opts, check the detail in plugin doc or the same opt added in this PR.   SCENARIO_NAME Service name. Default same as the case folder name   SCENARIO_VERSION Version   SCENARIO_ENTRY_SERVICE Entrance URL to access this service   SCENARIO_HEALTH_CHECK_URL Health check URL     ${agent_opts} is required to add into your java -jar command, which including the parameter injected by test framework, and make agent installed. All other parameters should be added after ${agent_opts}.\n The test framework will set the service name as the test case folder name by default, but in some cases, there are more than one test projects are required to run in different service codes, could set it explicitly like the following example.\nExample\nhome=\u0026#34;$(cd \u0026#34;$(dirname $0)\u0026#34;; pwd)\u0026#34; java -jar ${agent_opts} \u0026#34;-Dskywalking.agent.service_name=jettyserver-scenario\u0026#34; ${home}/../libs/jettyserver-scenario.jar \u0026amp; sleep 1 java -jar ${agent_opts} \u0026#34;-Dskywalking.agent.service_name=jettyclient-scenario\u0026#34; ${home}/../libs/jettyclient-scenario.jar \u0026amp;  Only set this or use other skywalking options when it is really necessary.\n Take the following test cases as examples\n undertow webflux  Best Practices How To Use The Archetype To Create A Test Case Project We provided archetypes and a script to make creating a project easier. It creates a completed project of a test case. So that we only need to focus on cases. First, we can use followed command to get usage about the script.\nbash ${SKYWALKING_HOME}/test/plugin/generator.sh\nThen, runs and generates a project, named by scenario_name, in ./scenarios.\nRecommendations for pom \u0026lt;properties\u0026gt; \u0026lt;!-- Provide and use this property in the pom. --\u0026gt; \u0026lt;!-- This version should match the library version, --\u0026gt; \u0026lt;!-- in this case, http components lib version 4.3. --\u0026gt; \u0026lt;test.framework.version\u0026gt;4.3\u0026lt;/test.framework.version\u0026gt; \u0026lt;/properties\u0026gt; \u0026lt;dependencies\u0026gt; \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.httpcomponents\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;httpclient\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${test.framework.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; ... \u0026lt;/dependencies\u0026gt; \u0026lt;build\u0026gt; \u0026lt;!-- Set the package final name as same as the test case folder case. --\u0026gt; \u0026lt;finalName\u0026gt;httpclient-4.3.x-scenario\u0026lt;/finalName\u0026gt; .... \u0026lt;/build\u0026gt; How To Implement Heartbeat Service Heartbeat service is designed for checking the service available status. This service is a simple HTTP service, returning 200 means the target service is ready. Then the traffic generator will access the entry service and verify the expected data. User should consider to use this service to detect such as whether the dependent services are ready, especially when dependent services are database or cluster.\nNotice, because heartbeat service could be traced fully or partially, so, segmentSize in expectedData.yaml should use ge as the operator, and don\u0026rsquo;t include the segments of heartbeat service in the expected segment data.\nThe example Process of Writing Tracing Expected Data Expected data file, expectedData.yaml, include SegmentItems part.\nWe are using the HttpClient plugin to show how to write the expected data.\nThere are two key points of testing\n Whether is HttpClient span created. Whether the ContextCarrier created correctly, and propagates across processes.  +-------------+ +------------------+ +-------------------------+ | Browser | | Case Servlet | | ContextPropagateServlet | | | | | | | +-----|-------+ +---------|--------+ +------------|------------+ | | | | | | | WebHttp +-+ | +------------------------\u0026gt; |-| HttpClient +-+ | |--------------------------------\u0026gt; |-| | |-| |-| | |-| |-| | |-| \u0026lt;--------------------------------| | |-| +-+ | \u0026lt;--------------------------| | | +-+ | | | | | | | | | | | | | + + + segmentItems By following the flow of HttpClient case, there should be two segments created.\n Segment represents the CaseServlet access. Let\u0026rsquo;s name it as SegmentA. Segment represents the ContextPropagateServlet access. Let\u0026rsquo;s name it as SegmentB.  segmentItems:- serviceName:httpclient-casesegmentSize:ge 2# Could have more than one health check segments, because, the dependency is not standby.Because Tomcat plugin is a default plugin of SkyWalking, so, in SegmentA, there are two spans\n Tomcat entry span HttpClient exit span  SegmentA span list should like following\n- segmentId:not nullspans:- operationName:/httpclient-case/case/context-propagateparentSpanId:0spanId:1startTime:nq 0endTime:nq 0isError:falsespanLayer:HttpspanType:ExitcomponentId:eq 2tags:- {key: url, value:\u0026#39;http://127.0.0.1:8080/httpclient-case/case/context-propagate\u0026#39;}- {key: http.method, value:GET}- {key: http.status_code, value:\u0026#39;200\u0026#39;}logs:[]peer:127.0.0.1:8080- operationName:/httpclient-case/case/httpclientparentSpanId:-1spanId:0startTime:nq 0endTime:nq 0spanLayer:HttpisError:falsespanType:EntrycomponentId:1tags:- {key: url, value:\u0026#39;http://localhost:{SERVER_OUTPUT_PORT}/httpclient-case/case/httpclient\u0026#39;}- {key: http.method, value:GET}- {key: http.status_code, value:\u0026#39;200\u0026#39;}logs:[]peer:nullSegmentB should only have one Tomcat entry span, but includes the Ref pointing to SegmentA.\nSegmentB span list should like following\n- segmentId:not nullspans:-operationName:/httpclient-case/case/context-propagateparentSpanId:-1spanId:0tags:- {key: url, value:\u0026#39;http://127.0.0.1:8080/httpclient-case/case/context-propagate\u0026#39;}- {key: http.method, value:GET}- {key: http.status_code, value:\u0026#39;200\u0026#39;}logs:[]startTime:nq 0endTime:nq 0spanLayer:HttpisError:falsespanType:EntrycomponentId:1peer:nullrefs:- {parentEndpoint: /httpclient-case/case/httpclient, networkAddress: \u0026#39;localhost:8080\u0026#39;, refType: CrossProcess, parentSpanId: 1, parentTraceSegmentId: not null, parentServiceInstance: not null, parentService: not null, traceId:not null}The example Process of Writing Meter Expected Data Expected data file, expectedData.yaml, include MeterItems part.\nWe are using the toolkit plugin to demonstrate how to write the expected data. When write the meter plugin, the expected data file keeps the same.\nThere is one key point of testing\n Build a meter and operate it.  Such as Counter:\nMeterFactory.counter(\u0026#34;test_counter\u0026#34;).tag(\u0026#34;ck1\u0026#34;, \u0026#34;cv1\u0026#34;).build().increment(1d); MeterFactory.histogram(\u0026#34;test_histogram\u0026#34;).tag(\u0026#34;hk1\u0026#34;, \u0026#34;hv1\u0026#34;).steps(1d, 5d, 10d).build().addValue(2d); +-------------+ +------------------+ | Plugin | | Agent core | | | | | +-----|-------+ +---------|--------+ | | | | | Build or operate +-+ +------------------------\u0026gt; |-| | |-] | |-| | |-| | |-| | |-| | \u0026lt;--------------------------| | +-+ | | | | | | | | + + meterItems By following the flow of the toolkit case, there should be two meters created.\n Meter test_counter created from MeterFactory#counter. Let\u0026rsquo;s name it as MeterA. Meter test_histogram created from MeterFactory#histogram. Let\u0026rsquo;s name it as MeterB.  meterItems:- serviceName:toolkit-casemeterSize:2They\u0026rsquo;re showing two kinds of meter, MeterA has a single value, MeterB has a histogram value.\nMeterA should like following, counter and gauge use the same data format.\n- meterId:name:test_countertags:- {name: ck1, value:cv1}singleValue:gt 0MeterB should like following.\n- meterId:name:test_histogramtags:- {name: hk1, value:hv1}histogramBuckets:- 0.0- 1.0- 5.0- 10.0Local Test and Pull Request To The Upstream First of all, the test case project could be compiled successfully, with right project structure and be able to deploy. The developer should test the start script could run in Linux/MacOS, and entryService/health services are able to provide the response.\nYou could run test by using following commands\ncd ${SKYWALKING_HOME} bash ./test/plugin/run.sh -f ${scenario_name} Notice,if codes in ./apm-sniffer have been changed, no matter because your change or git update, please recompile the skywalking-agent. Because the test framework will use the existing skywalking-agent folder, rather than recompiling it every time.\nUse ${SKYWALKING_HOME}/test/plugin/run.sh -h to know more command options.\nIf the local test passed, then you could add it to .github/workflows/plugins-test.\u0026lt;n\u0026gt;.yaml file, which will drive the tests running on the GitHub Actions of official SkyWalking repository. Based on your plugin\u0026rsquo;s name, please add the test case into file .github/workflows/plugins-test.\u0026lt;n\u0026gt;.yaml, by alphabetical orders.\nEvery test case is a GitHub Actions Job. Please use the scenario directory name as the case name, mostly you\u0026rsquo;ll just need to decide which file (plugins-test.\u0026lt;n\u0026gt;.yaml) to add your test case, and simply put one line (as follows) in it, take the existed cases as examples. You can run python3 tools/select-group.py to see which file contains the least cases and add your cases into it, in order to balance the running time of each group.\nIf a test case required to run in JDK 17 environment, please add you test case into file plugins-jdk17-test.\u0026lt;n\u0026gt;.yaml. If a test case required to run in JDK 21 environment, please add you test case into file plugins-jdk21-test.\u0026lt;n\u0026gt;.yaml.\njobs:PluginsTest:name:Pluginruns-on:ubuntu-latesttimeout-minutes:90strategy:fail-fast:truematrix:case:# ...- \u0026lt;your scenario test directory name\u0026gt;# ...","title":"Plugin automatic test framework","url":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/plugin-test/"},{"content":"Plugin automatic test framework The plugin test framework is designed to verify the function and compatibility of plugins. As there are dozens of plugins and hundreds of versions that need to be verified, it is impossible to do it manually. The test framework uses container-based tech stack and requires a set of real services with the agents installed. Then, the test mock OAP backend runs to check the segments data sent from agents.\nEvery plugin maintained in the main repo requires corresponding test cases as well as matching versions in the supported list doc.\nEnvironment Requirements  MacOS/Linux JDK 8+ Docker Docker Compose  Case Base Image Introduction The test framework provides JVM-container and Tomcat-container base images including JDK8 and JDK17. You can choose the best one for your test case. If both are suitable for your case, JVM-container is preferred.\nJVM-container Image Introduction JVM-container uses eclipse-temurin:8-jdk as the base image. JVM-container supports JDK8 and JDK17 as well in CI, which inherits eclipse-temurin:8-jdk and eclipse-temurin:17-jdk. It is supported to custom the base Java docker image by specify base_image_java. The test case project must be packaged as project-name.zip, including startup.sh and uber jar, by using mvn clean package.\nTake the following test projects as examples:\n sofarpc-scenario is a single project case. webflux-scenario is a case including multiple projects. jdk17-with-gson-scenario is a single project case with JDK17.  Tomcat-container Image Introduction Tomcat-container uses tomcat:8.5-jdk8-openjdk, tomcat:8.5-jdk17-openjdk as the base image. It is supported to custom the base Tomcat docker image by specify base_image_tomcat. The test case project must be packaged as project-name.war by using mvn package.\nTake the following test project as an example\n spring-4.3.x-scenario  Test project hierarchical structure The test case is an independent maven project, and it must be packaged as a war tar ball or zip file, depending on the chosen base image. Also, two external accessible endpoints usually two URLs) are required.\nAll test case codes should be in the org.apache.skywalking.apm.testcase.* package. If there are some codes expected to be instrumented, then the classes could be in the test.org.apache.skywalking.apm.testcase.* package.\nJVM-container test project hierarchical structure\n[plugin-scenario] |- [bin] |- startup.sh |- [config] |- expectedData.yaml |- [src] |- [main] |- ... |- [resource] |- log4j2.xml |- pom.xml |- configuration.yml |- support-version.list [] = directory Tomcat-container test project hierarchical structure\n[plugin-scenario] |- [config] |- expectedData.yaml |- [src] |- [main] |- ... |- [resource] |- log4j2.xml |- [webapp] |- [WEB-INF] |- web.xml |- pom.xml |- configuration.yml |- support-version.list [] = directory Test case configuration files The following files are required in every test case.\n   File Name Descriptions     configuration.yml Declare the basic case information, including case name, entrance endpoints, mode, and dependencies.   expectedData.yaml Describe the expected segmentItems, meterItems or logItems.   support-version.list List the target versions for this case.   startup.sh JVM-container only. This is not required when using Tomcat-container.    * support-version.list format requires every line for a single version (contains only the last version number of each minor version). You may use # to comment out this version.\nconfiguration.yml    Field description     type Image type, options, jvm, or tomcat. Required.   entryService The entrance endpoint (URL) for test case access. Required. (HTTP Method: GET)   healthCheck The health check endpoint (URL) for test case access. Required. (HTTP Method: HEAD)   startScript Path of the start up script. Required in type: jvm only.   runningMode Running mode with the optional plugin, options, default(default), with_optional, or with_bootstrap.   withPlugins Plugin selector rule, e.g.:apm-spring-annotation-plugin-*.jar. Required for runningMode=with_optional or runningMode=with_bootstrap.   environment Same as docker-compose#environment.   depends_on Same as docker-compose#depends_on.   dependencies Same as docker-compose#services, image, links, hostname, command, environment and depends_on are supported.    Note:, docker-compose activates only when dependencies is blank.\nrunningMode option description.\n   Option description     default Activate all plugins in plugin folder like the official distribution agent.   with_optional Activate default and plugins in optional-plugin by the give selector.   with_bootstrap Activate default and plugins in bootstrap-plugin by the give selector.    with_optional/with_bootstrap supports multiple selectors, separated by ;.\nFile Format\ntype: entryService: healthCheck: startScript: runningMode: withPlugins: environment: ... depends_on: ... dependencies: service1: image: hostname: expose: ... environment: ... depends_on: ... links: ... entrypoint: ... healthcheck: ...  dependencies support docker compose healthcheck. But the format is a little different. We need to have - as the start of every config item, and describe it as a string line.  For example, in the official document, the health check is:\nhealthcheck:test:[\u0026#34;CMD\u0026#34;,\u0026#34;curl\u0026#34;,\u0026#34;-f\u0026#34;,\u0026#34;http://localhost\u0026#34;]interval:1m30stimeout:10sretries:3start_period:40sHere you should write:\nhealthcheck:- \u0026#39;test:[\u0026#34;CMD\u0026#34;,\u0026#34;curl\u0026#34;,\u0026#34;-f\u0026#34;,\u0026#34;http://localhost\u0026#34;]\u0026#39;- \u0026#34;interval: 1m30s\u0026#34;- \u0026#34;timeout: 10s\u0026#34;- \u0026#34;retries: 3\u0026#34;- \u0026#34;start_period: 40s\u0026#34;In some cases, the dependency service (usually a third-party server like the SolrJ server) is required to keep the same version as the client lib version, which is defined as ${test.framework.version} in pom. You may use ${CASE_SERVER_IMAGE_VERSION} as the version number, which will be changed in the test for each version.\n It does not support resource related configurations, such as volumes, ports, and ulimits. The reason for this is that in test scenarios, no mapping is required for any port to the host VM, or to mount any folder.\n Take the following test cases as examples:\n dubbo-2.7.x with JVM-container jetty with JVM-container gateway with runningMode canal with docker-compose  expectedData.yaml Operator for number\n   Operator Description     nq Not equal   eq Equal(default)   ge Greater than or equal   gt Greater than    Operator for String\n   Operator Description     not null Not null   not blank Not blank ,it\u0026rsquo;s recommended for String type field as the default value maybe blank string, such as span tags   null Null or empty String   eq Equal(default)   start with Tests if this string starts with the specified prefix. DO NOT use it with meterItem tags value   end with Tests if this string ends with the specified suffix. DO NOT use it with meterItem tags value    Expected Data Format Of The Segment\nsegmentItems:- serviceName:SERVICE_NAME(string)segmentSize:SEGMENT_SIZE(int)segments:- segmentId:SEGMENT_ID(string)spans:...   Field Description     serviceName Service Name.   segmentSize The number of segments is expected.   segmentId Trace ID.   spans Segment span list. In the next section, you will learn how to describe each span.    Expected Data Format Of The Span\nNote: The order of span list should follow the order of the span finish time.\noperationName:OPERATION_NAME(string)parentSpanId:PARENT_SPAN_ID(int)spanId:SPAN_ID(int)startTime:START_TIME(int)endTime:END_TIME(int)isError: IS_ERROR(string:true,false)spanLayer: SPAN_LAYER(string:DB, RPC_FRAMEWORK, HTTP, MQ, CACHE)spanType: SPAN_TYPE(string:Exit, Entry, Local)componentId:COMPONENT_ID(int)tags:- {key: TAG_KEY(string), value:TAG_VALUE(string)}...logs:- {key: LOG_KEY(string), value:LOG_VALUE(string)}...peer:PEER(string)refs:- {traceId:TRACE_ID(string),parentTraceSegmentId:PARENT_TRACE_SEGMENT_ID(string),parentSpanId:PARENT_SPAN_ID(int),parentService:PARENT_SERVICE(string),parentServiceInstance:PARENT_SERVICE_INSTANCE(string),parentEndpoint:PARENT_ENDPOINT_NAME(string),networkAddress:NETWORK_ADDRESS(string),refType: REF_TYPE(string:CrossProcess, CrossThread)}...   Field Description     operationName Span Operation Name.   parentSpanId Parent span ID. Note: The parent span ID of the first span should be -1.   spanId Span ID. Note: Start from 0.   startTime Span start time. It is impossible to get the accurate time, not 0 should be enough.   endTime Span finish time. It is impossible to get the accurate time, not 0 should be enough.   isError Span status, true or false.   componentId Component id for your plugin.   tags Span tag list. Notice, Keep in the same order as the plugin coded.   logs Span log list. Notice, Keep in the same order as the plugin coded.   SpanLayer Options, DB, RPC_FRAMEWORK, HTTP, MQ, CACHE.   SpanType Span type, options, Exit, Entry or Local.   peer Remote network address, IP + port mostly. For exit span, this should be required.    The verify description for SegmentRef\n   Field Description     traceId    parentTraceSegmentId Parent SegmentId, pointing to the segment id in the parent segment.   parentSpanId Parent SpanID, pointing to the span id in the parent segment.   parentService The service of parent/downstream service name.   parentServiceInstance The instance of parent/downstream service instance name.   parentEndpoint The endpoint of parent/downstream service.   networkAddress The peer value of parent exit span.   refType Ref type, options, CrossProcess or CrossThread.    Expected Data Format Of The Meter Items\nmeterItems:- serviceName:SERVICE_NAME(string)meterSize:METER_SIZE(int)meters:- ...   Field Description     serviceName Service Name.   meterSize The number of meters is expected.   meters meter list. Follow the next section to see how to describe every meter.    Expected Data Format Of The Meter\nmeterId:name:NAME(string)tags:- {name: TAG_NAME(string), value:TAG_VALUE(string)}singleValue:SINGLE_VALUE(double)histogramBuckets:- HISTOGRAM_BUCKET(double)...The verify description for MeterId\n   Field Description     name meter name.   tags meter tags.   tags.name tag name.   tags.value tag value.   singleValue counter or gauge value. Using condition operate of the number to validate, such as gt, ge. If current meter is histogram, don\u0026rsquo;t need to write this field.   histogramBuckets histogram bucket. The bucket list must be ordered. The tool assert at least one bucket of the histogram having nonzero count. If current meter is counter or gauge, don\u0026rsquo;t need to write this field.    Expected Data Format Of The Log Items\nlogItems:- serviceName:SERVICE_NAME(string)logSize:LOG_SIZE(int)logs:- ...   Field Description     serviceName Service Name.   logSize The number of logs is expected.   logs log list. Follow the next section to see how to describe every log.    Expected Data Format Of The Log\ntimestamp:TIMESTAMP_VALUE(int)endpoint:ENDPOINT_VALUE(int)traceContext:traceId:TRACE_ID_VALUE(string)traceSegmentId:TRACE_SEGMENT_ID_VALUE(string)spanId:SPAN_ID_VALUE(int)body:type:TYPE_VALUE(string)content:# Choose one of three (text, json or yaml)text:TEXT_VALUE(string)# json: JSON_VALUE(string)# yaml: YAML_VALUE(string)tags:data:- key:TAG_KEY(string)value:TAG_VALUE(string)...layer:LAYER_VALUE(string)...The verify description for Log\n   Field Description     timestamp log timestamp.   endpoint log endpoint.   traceContext.traceId log associated trace id.   traceContext.traceSegmentId log associated trace segment id.   traceContext.spanId log associated span id.   body.type log body type.   body.content log content, the sub field choose one of three (text, json or yaml).   tags.data log tags, key value pairs.   layer log layer.    startup.sh This script provide a start point to JVM based service, most of them starts by a java -jar, with some variables. The following system environment variables are available in the shell.\n   Variable Description     agent_opts Agent plugin opts, check the detail in plugin doc or the same opt added in this PR.   SCENARIO_NAME Service name. Default same as the case folder name   SCENARIO_VERSION Version   SCENARIO_ENTRY_SERVICE Entrance URL to access this service   SCENARIO_HEALTH_CHECK_URL Health check URL     ${agent_opts} is required to add into your java -jar command, which including the parameter injected by test framework, and make agent installed. All other parameters should be added after ${agent_opts}.\n The test framework will set the service name as the test case folder name by default, but in some cases, there are more than one test projects are required to run in different service codes, could set it explicitly like the following example.\nExample\nhome=\u0026#34;$(cd \u0026#34;$(dirname $0)\u0026#34;; pwd)\u0026#34; java -jar ${agent_opts} \u0026#34;-Dskywalking.agent.service_name=jettyserver-scenario\u0026#34; ${home}/../libs/jettyserver-scenario.jar \u0026amp; sleep 1 java -jar ${agent_opts} \u0026#34;-Dskywalking.agent.service_name=jettyclient-scenario\u0026#34; ${home}/../libs/jettyclient-scenario.jar \u0026amp;  Only set this or use other skywalking options when it is really necessary.\n Take the following test cases as examples\n undertow webflux  Best Practices How To Use The Archetype To Create A Test Case Project We provided archetypes and a script to make creating a project easier. It creates a completed project of a test case. So that we only need to focus on cases. First, we can use followed command to get usage about the script.\nbash ${SKYWALKING_HOME}/test/plugin/generator.sh\nThen, runs and generates a project, named by scenario_name, in ./scenarios.\nRecommendations for pom \u0026lt;properties\u0026gt; \u0026lt;!-- Provide and use this property in the pom. --\u0026gt; \u0026lt;!-- This version should match the library version, --\u0026gt; \u0026lt;!-- in this case, http components lib version 4.3. --\u0026gt; \u0026lt;test.framework.version\u0026gt;4.3\u0026lt;/test.framework.version\u0026gt; \u0026lt;/properties\u0026gt; \u0026lt;dependencies\u0026gt; \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.httpcomponents\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;httpclient\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${test.framework.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; ... \u0026lt;/dependencies\u0026gt; \u0026lt;build\u0026gt; \u0026lt;!-- Set the package final name as same as the test case folder case. --\u0026gt; \u0026lt;finalName\u0026gt;httpclient-4.3.x-scenario\u0026lt;/finalName\u0026gt; .... \u0026lt;/build\u0026gt; How To Implement Heartbeat Service Heartbeat service is designed for checking the service available status. This service is a simple HTTP service, returning 200 means the target service is ready. Then the traffic generator will access the entry service and verify the expected data. User should consider to use this service to detect such as whether the dependent services are ready, especially when dependent services are database or cluster.\nNotice, because heartbeat service could be traced fully or partially, so, segmentSize in expectedData.yaml should use ge as the operator, and don\u0026rsquo;t include the segments of heartbeat service in the expected segment data.\nThe example Process of Writing Tracing Expected Data Expected data file, expectedData.yaml, include SegmentItems part.\nWe are using the HttpClient plugin to show how to write the expected data.\nThere are two key points of testing\n Whether is HttpClient span created. Whether the ContextCarrier created correctly, and propagates across processes.  +-------------+ +------------------+ +-------------------------+ | Browser | | Case Servlet | | ContextPropagateServlet | | | | | | | +-----|-------+ +---------|--------+ +------------|------------+ | | | | | | | WebHttp +-+ | +------------------------\u0026gt; |-| HttpClient +-+ | |--------------------------------\u0026gt; |-| | |-| |-| | |-| |-| | |-| \u0026lt;--------------------------------| | |-| +-+ | \u0026lt;--------------------------| | | +-+ | | | | | | | | | | | | | + + + segmentItems By following the flow of HttpClient case, there should be two segments created.\n Segment represents the CaseServlet access. Let\u0026rsquo;s name it as SegmentA. Segment represents the ContextPropagateServlet access. Let\u0026rsquo;s name it as SegmentB.  segmentItems:- serviceName:httpclient-casesegmentSize:ge 2# Could have more than one health check segments, because, the dependency is not standby.Because Tomcat plugin is a default plugin of SkyWalking, so, in SegmentA, there are two spans\n Tomcat entry span HttpClient exit span  SegmentA span list should like following\n- segmentId:not nullspans:- operationName:/httpclient-case/case/context-propagateparentSpanId:0spanId:1startTime:nq 0endTime:nq 0isError:falsespanLayer:HttpspanType:ExitcomponentId:eq 2tags:- {key: url, value:\u0026#39;http://127.0.0.1:8080/httpclient-case/case/context-propagate\u0026#39;}- {key: http.method, value:GET}- {key: http.status_code, value:\u0026#39;200\u0026#39;}logs:[]peer:127.0.0.1:8080- operationName:/httpclient-case/case/httpclientparentSpanId:-1spanId:0startTime:nq 0endTime:nq 0spanLayer:HttpisError:falsespanType:EntrycomponentId:1tags:- {key: url, value:\u0026#39;http://localhost:{SERVER_OUTPUT_PORT}/httpclient-case/case/httpclient\u0026#39;}- {key: http.method, value:GET}- {key: http.status_code, value:\u0026#39;200\u0026#39;}logs:[]peer:nullSegmentB should only have one Tomcat entry span, but includes the Ref pointing to SegmentA.\nSegmentB span list should like following\n- segmentId:not nullspans:-operationName:/httpclient-case/case/context-propagateparentSpanId:-1spanId:0tags:- {key: url, value:\u0026#39;http://127.0.0.1:8080/httpclient-case/case/context-propagate\u0026#39;}- {key: http.method, value:GET}- {key: http.status_code, value:\u0026#39;200\u0026#39;}logs:[]startTime:nq 0endTime:nq 0spanLayer:HttpisError:falsespanType:EntrycomponentId:1peer:nullrefs:- {parentEndpoint: /httpclient-case/case/httpclient, networkAddress: \u0026#39;localhost:8080\u0026#39;, refType: CrossProcess, parentSpanId: 1, parentTraceSegmentId: not null, parentServiceInstance: not null, parentService: not null, traceId:not null}The example Process of Writing Meter Expected Data Expected data file, expectedData.yaml, include MeterItems part.\nWe are using the toolkit plugin to demonstrate how to write the expected data. When write the meter plugin, the expected data file keeps the same.\nThere is one key point of testing\n Build a meter and operate it.  Such as Counter:\nMeterFactory.counter(\u0026#34;test_counter\u0026#34;).tag(\u0026#34;ck1\u0026#34;, \u0026#34;cv1\u0026#34;).build().increment(1d); MeterFactory.histogram(\u0026#34;test_histogram\u0026#34;).tag(\u0026#34;hk1\u0026#34;, \u0026#34;hv1\u0026#34;).steps(1d, 5d, 10d).build().addValue(2d); +-------------+ +------------------+ | Plugin | | Agent core | | | | | +-----|-------+ +---------|--------+ | | | | | Build or operate +-+ +------------------------\u0026gt; |-| | |-] | |-| | |-| | |-| | |-| | \u0026lt;--------------------------| | +-+ | | | | | | | | + + meterItems By following the flow of the toolkit case, there should be two meters created.\n Meter test_counter created from MeterFactory#counter. Let\u0026rsquo;s name it as MeterA. Meter test_histogram created from MeterFactory#histogram. Let\u0026rsquo;s name it as MeterB.  meterItems:- serviceName:toolkit-casemeterSize:2They\u0026rsquo;re showing two kinds of meter, MeterA has a single value, MeterB has a histogram value.\nMeterA should like following, counter and gauge use the same data format.\n- meterId:name:test_countertags:- {name: ck1, value:cv1}singleValue:gt 0MeterB should like following.\n- meterId:name:test_histogramtags:- {name: hk1, value:hv1}histogramBuckets:- 0.0- 1.0- 5.0- 10.0Local Test and Pull Request To The Upstream First of all, the test case project could be compiled successfully, with right project structure and be able to deploy. The developer should test the start script could run in Linux/MacOS, and entryService/health services are able to provide the response.\nYou could run test by using following commands\ncd ${SKYWALKING_HOME} bash ./test/plugin/run.sh -f ${scenario_name} Notice,if codes in ./apm-sniffer have been changed, no matter because your change or git update, please recompile the skywalking-agent. Because the test framework will use the existing skywalking-agent folder, rather than recompiling it every time.\nUse ${SKYWALKING_HOME}/test/plugin/run.sh -h to know more command options.\nIf the local test passed, then you could add it to .github/workflows/plugins-test.\u0026lt;n\u0026gt;.yaml file, which will drive the tests running on the GitHub Actions of official SkyWalking repository. Based on your plugin\u0026rsquo;s name, please add the test case into file .github/workflows/plugins-test.\u0026lt;n\u0026gt;.yaml, by alphabetical orders.\nEvery test case is a GitHub Actions Job. Please use the scenario directory name as the case name, mostly you\u0026rsquo;ll just need to decide which file (plugins-test.\u0026lt;n\u0026gt;.yaml) to add your test case, and simply put one line (as follows) in it, take the existed cases as examples. You can run python3 tools/select-group.py to see which file contains the least cases and add your cases into it, in order to balance the running time of each group.\nIf a test case required to run in JDK 17 environment, please add you test case into file plugins-jdk17-test.\u0026lt;n\u0026gt;.yaml. If a test case required to run in JDK 21 environment, please add you test case into file plugins-jdk21-test.\u0026lt;n\u0026gt;.yaml.\njobs:PluginsTest:name:Pluginruns-on:ubuntu-latesttimeout-minutes:90strategy:fail-fast:truematrix:case:# ...- \u0026lt;your scenario test directory name\u0026gt;# ...","title":"Plugin automatic test framework","url":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/plugin-test/"},{"content":"Plugin Configurations    key environment key default value description     http.server_collect_parameters SW_AGENT_PLUGIN_CONFIG_HTTP_SERVER_COLLECT_PARAMETERS false Collect the parameters of the HTTP request on the server side.   mongo.collect_statement SW_AGENT_PLUGIN_CONFIG_MONGO_COLLECT_STATEMENT false Collect the statement of the MongoDB request.   sql.collect_parameter SW_AGENT_PLUGIN_CONFIG_SQL_COLLECT_PARAMETER false Collect the parameter of the SQL request.   redis.max_args_bytes SW_AGENT_PLUGIN_CONFIG_REDIS_MAX_ARGS_BYTES 1024 Limit the bytes size of redis args request.   reporter.discard SW_AGENT_REPORTER_DISCARD false Discard the reporter.    ","title":"Plugin Configurations","url":"/docs/skywalking-go/latest/en/agent/plugin-configurations/"},{"content":"Plugin Configurations    key environment key default value description     http.server_collect_parameters SW_AGENT_PLUGIN_CONFIG_HTTP_SERVER_COLLECT_PARAMETERS false Collect the parameters of the HTTP request on the server side.   mongo.collect_statement SW_AGENT_PLUGIN_CONFIG_MONGO_COLLECT_STATEMENT false Collect the statement of the MongoDB request.   sql.collect_parameter SW_AGENT_PLUGIN_CONFIG_SQL_COLLECT_PARAMETER false Collect the parameter of the SQL request.   redis.max_args_bytes SW_AGENT_PLUGIN_CONFIG_REDIS_MAX_ARGS_BYTES 1024 Limit the bytes size of redis args request.   reporter.discard SW_AGENT_REPORTER_DISCARD false Discard the reporter.   gin.collect_request_headers SW_AGENT_PLUGIN_CONFIG_GIN_COLLECT_REQUEST_HEADERS  Collect the http header of gin request.   gin.header_length_threshold SW_AGENT_PLUGIN_CONFIG_GIN_HEADER_LENGTH_THRESHOLD 2048 Controlling the length limitation of all header values.    ","title":"Plugin Configurations","url":"/docs/skywalking-go/next/en/agent/plugin-configurations/"},{"content":"Plugin Configurations    key environment key default value description     http.server_collect_parameters SW_AGENT_PLUGIN_CONFIG_HTTP_SERVER_COLLECT_PARAMETERS false Collect the parameters of the HTTP request on the server side.   mongo.collect_statement SW_AGENT_PLUGIN_CONFIG_MONGO_COLLECT_STATEMENT false Collect the statement of the MongoDB request.   sql.collect_parameter SW_AGENT_PLUGIN_CONFIG_SQL_COLLECT_PARAMETER false Collect the parameter of the SQL request.   redis.max_args_bytes SW_AGENT_PLUGIN_CONFIG_REDIS_MAX_ARGS_BYTES 1024 Limit the bytes size of redis args request.   reporter.discard SW_AGENT_REPORTER_DISCARD false Discard the reporter.    ","title":"Plugin Configurations","url":"/docs/skywalking-go/v0.4.0/en/agent/plugin-configurations/"},{"content":"Plugin Development Guide This documentation introduces how developers can create a plugin.\nAll plugins must follow these steps:\n Create a new plugin module: Create a new project in the specified directory and import the plugin API module. Define the enhancement object: Define the description for the plugin. Invoke the plugin API: Call the API provided by the core to complete the core invocation. Import the plugin module: Import the plugin into the management module for users to use.  Create a new plugin module The plugin must create a new module, which is currently stored in the project\u0026rsquo;s plugins directory.\nPlugins can import the following two modules:\n Agent core: This module provides all the dependencies needed for the plugin, including the plugin API, enhancement declaration objects, etc. Agent core plugin should be github.com/apache/skywalking-go/plugins/core and replaced by the relative location. Framework to be enhanced: Import the framework you wish to enhance.  Note: Plugins should NOT import and use any other modules, as this may cause compilation issues for users. If certain tools are needed, they should be provided by the agent core.\nDefine the enhancement object In the root directory of the project, create a new go file to define the basic information of the plugin. The basic information includes the following methods, corresponding to the Instrument interface:\n Name: The name of the plugin. Please keep this name consistent with the newly created project name. The reason will be explained later. Base Package: Declare which package this plugin intercepts. For example, if you want to intercept gin, you can write: \u0026ldquo;github.com/gin-gonic/gin\u0026rdquo;. Version Checker: This method passes the version number to the enhancement object to verify whether the specified version of the framework is supported. If not, the enhancement program will not be executed. Points: A plugin can define one or more enhancement points. This will be explained in more detail in the following sections. File System: Use //go:embed * in the current file to import all files in this module, which will be used for file copying during the mixed compilation process.  Note: Please declare //skywalking:nocopy at any position in this file to indicate that the file would not be copied. This file is only used for guidance during hybrid compilation. Also, this file involves the use of the embed package, and if the target framework does not import the package embed, a compilation error may occur.\nManage Instrument and Interceptor codes in hierarchy structure Instrument and interceptor codes are placed in root by default. In complex instrumentation scenarios, there could be dozens of interceptors, we provide PluginSourceCodePath to build a hierarchy folder structure to manage those codes.\nNotice: The instrumentation still works without proper setting of this, but the debug tool would lose the location of the source codes.\nExample For example, the framework needs to enhance two packages, as shown in the following directory structure:\n- plugins - test - go.mod - package1 - instrument.go - interceptor.go - package2 - instrument.go - interceptor.go ... In the above directory structure, the test framework needs to provide multiple different enhancement objects. In this case, a PluginSourceCodePath Source Code Path** method needs to be added for each enhancement object, the values of this method should be package1 and package2.\nInstrument Point Instrument points are used to declare that which methods and structs in the current package should be instrumented. They mainly include the following information:\n Package path: If the interception point that needs to be intercepted is not in the root directory of the current package, you need to fill in the relative path to the package. For example, if this interception point wants to instrument content in the github.com/gin-gonic/gin/render directory, you need to fill in render here. Package Name(optional): Define the package name of the current package. If the package name is not defined, the package name of the current package would be used by default. It\u0026rsquo;s used when the package path and package name are not same, such as the name of github.com/emicklei/go-restful/v3 is restful. Matcher(At): Specify which eligible content in the current package path needs to be enhanced. Interceptor: If the current method is being intercepted (whether it\u0026rsquo;s a static method or an instance method), the name of the interceptor must be specified.  Method Matcher Method matchers are used to intercept both static and non-static methods. The specific definitions are as follows:\n// NewStaticMethodEnhance creates a new EnhanceMatcher for static method. // name: method name needs to be enhanced.(Public and private methods are supported) // filters: filters for method. func NewStaticMethodEnhance(name string, filters ...MethodFilterOption) // NewMethodEnhance creates a new EnhanceMatcher for method. // receiver: receiver type name of method needs to be enhanced. // name: method name needs to be enhanced.(Public and private methods are supported) // filters: filters for method. func NewMethodEnhance(receiver, name string, filters ...MethodFilterOption) Filter Option Filter Options are used to validate the parameters or return values in the method. If the method name matches but the Options validation fails, the enhancement would not be performed.\n// WithArgsCount filter methods with specific count of arguments. func WithArgsCount(argsCount int) // WithResultCount filter methods with specific count of results. func WithResultCount(resultCount int) // WithArgType filter methods with specific type of the index of the argument. func WithArgType(argIndex int, dataType string) // WithResultType filter methods with specific type of the index of the result. func WithResultType(argIndex int, dataType string) Demo For example, if you have the following method that needs to be intercepted:\nfunc (c *Context) HandleMethod(name string) bool you can describe it using this condition:\ninstrument.NewMethodEnhance(\u0026#34;*Context\u0026#34;, \u0026#34;HandleMethod\u0026#34;, instrument.WithArgsCount(1), instrument.WithArgType(0, \u0026#34;string\u0026#34;), instrument.WithResultCount(1), instrument.WithResultType(0, \u0026#34;bool\u0026#34;)) Struct Matcher Enhancement structures can embed enhanced fields within specified structs. After the struct is instantiated, custom data content can be added to the specified struct in the method interceptor.\nStruct matchers are used to intercept struct methods. The specific definitions are as follows:\n// NewStructEnhance creates a new EnhanceMatcher for struct. // name: struct name needs to be enhanced.(Public and private structs are supported) // filters: filters for struct. func NewStructEnhance(name string, filters ...StructFilterOption) Filter Option Filter Options are used to validate the fields in the structure.\n// WithFieldExists filter the struct has the field with specific name. func WithFieldExists(fieldName string) // WithFiledType filter the struct has the field with specific name and type. func WithFiledType(filedName, filedType string) Enhanced Instance After completing the definition of the struct enhancement, you can convert the specified instance into the following interface when intercepting methods, and get or set custom field information. The interface definition is as follows:\ntype EnhancedInstance interface { // GetSkyWalkingDynamicField get the customized data from instance \tGetSkyWalkingDynamicField() interface{} // SetSkyWalkingDynamicField set the customized data into the instance \tSetSkyWalkingDynamicField(interface{}) } Demo For example, if you have the following struct that needs to be enhanced:\ntype Test struct { value *Context } you can describe it using this condition:\ninstrument.NewStructEnhance(\u0026#34;Test\u0026#34;, instrument.WithFieldExists(\u0026#34;value\u0026#34;), instrument.WithFiledType(\u0026#34;value\u0026#34;, \u0026#34;*Context\u0026#34;)) Next, you can set custom content for the specified enhanced instance when intercepting methods.\nins := testInstance.(instrument.EnhancedInstance) // setting custom content ins.SetSkyWalkingDynamicField(\u0026#34;custom content\u0026#34;) // getting custom content res := ins.GetSkyWalkingDynamicField() Interceptor Interceptors are used to define custom business logic before and after method execution, allowing you to access data from before and after method execution and interact with the Agent Core by using the Agent API.\nThe interceptor definition is as follows, you need to create a new structure and implement it:\ntype Interceptor interface { // BeforeInvoke would be called before the target method invocation.  BeforeInvoke(invocation Invocation) error // AfterInvoke would be called after the target method invocation.  AfterInvoke(invocation Invocation, result ...interface{}) error } Within the interface, you can see the Invocation interface, which defines the context of an interception. The specific definition is as follows:\ntype Invocation interface { // CallerInstance is the instance of the caller, nil if the method is static method.  CallerInstance() interface{} // Args is get the arguments of the method, please cast to the specific type to get more information.  Args() []interface{} // ChangeArg is change the argument value of the method  ChangeArg(int, interface{}) // IsContinue is the flag to control the method invocation, if it is true, the target method would not be invoked.  IsContinue() bool // DefineReturnValues are defined the return value of the method, and continue the method invoked  DefineReturnValues(...interface{}) // SetContext is the customized context of the method invocation, it should be propagated the tracing span.  SetContext(interface{}) // GetContext is get the customized context of the method invocation  GetContext() interface{} } Thread safe The Interceptor instance would define new instance at the current package level, rather than creating a new instance each time a method is intercepted.\nTherefore, do not declare objects in the interceptor, and instead use Invocation.Context to pass data.\nPackage Path If the method you want to intercept is not located in the root directory of the framework, place your interceptor code in the relative location within the plugin. The Agent would only copy files from the same package directory.\nFor example, if you want to intercept a method in github.com/gin-gonic/gin/render, create a render directory in the root of your plugin, and put the interceptor inside it. This ensures that the interceptor is properly included during the copy operation and can be correctly applied to the target package.\nPlugin Configuration Plugin configuration is used to add custom configuration parameters to a specified plugin. When users specify configuration items, the plugin can dynamically adapt the content needed in the plugin according to the user\u0026rsquo;s configuration items.\nDeclaration Please declare the configuration file you need in the package you want to use. Declare it using var, and add the //skywalking:config directive to specify that this variable requires dynamic updating.\nBy default, the configuration item belongs to the configuration of the current plugin. For example, if the name of my current plugin is gin, then this configuration item is under the gin plugin. Of course, you can also change it to the http plugin to reference the configuration information of the relevant plugin, in which case you need to specify it as //skywalking:config http.\nItem Each configuration item needs to add a config tag. This is used to specify the name of the current configuration content. By default, it would lowercase all letters and add an _ identifier before each uppercase letter.\nCurrently, it supports basic data types and struct types, and it also supports obtaining data values through environment variables.\nDemo For example, I have declared the following configuration item:\n//skywalking:config http var config struct { ServerCollectParameters bool `config:\u0026#34;server_collect_parameters\u0026#34;` Client struct{ CollectParameters bool `config:\u0026#34;collect_parameters\u0026#34;` } `config:\u0026#34;client\u0026#34;` } In the above example, I created a plugin configuration for http, which includes two configuration items.\n config.ServerCollectParameters: Its configuration is located at http.server_collect_parameters. config.Client.CollectParameter: Its configuration is located at http.client.collect_parameter.  When the plugin needs to be used, it can be accessed directly by reading the config configuration.\nAgent API The Agent API is used when a method is intercepted and interacts with the Agent Core.\nTracing API The Tracing API is used for building distributed tracing, and currently supports the following methods:\n// CreateEntrySpan creates a new entry span. // operationName is the name of the span. // extractor is the extractor to extract the context from the carrier. // opts is the options to create the span. func CreateEntrySpan(operationName string, extractor Extractor, opts ...SpanOption) // CreateLocalSpan creates a new local span. // operationName is the name of the span. // opts is the options to create the span. func CreateLocalSpan(operationName string, opts ...SpanOption) // CreateExitSpan creates a new exit span. // operationName is the name of the span. // peer is the peer address of the span. // injector is the injector to inject the context into the carrier. // opts is the options to create the span. func CreateExitSpan(operationName, peer string, injector Injector, opts ...SpanOption) // ActiveSpan returns the current active span, it can be got the current span in the current goroutine. // If the current goroutine is not in the context of the span, it will return nil. // If get the span from other goroutine, it can only get information but cannot be operated. func ActiveSpan() // GetRuntimeContextValue returns the value of the key in the runtime context, which is current goroutine. // The value can also read from the goroutine which is created by the current goroutine func GetRuntimeContextValue(key string) // SetRuntimeContextValue sets the value of the key in the runtime context. func SetRuntimeContextValue(key string, val interface{}) Context Carrier The context carrier is used to pass the context between the difference application.\nWhen creating an Entry Span, you need to obtain the context carrier from the request. When creating an Exit Span, you need to write the context carrier into the target RPC request.\n// Extractor is a tool specification which define how to // extract trace parent context from propagation context type Extractor func(headerKey string) (string, error) // Injector is a tool specification which define how to // inject trace context into propagation context type Injector func(headerKey, headerValue string) error The following demo demonstrates how to pass the Context Carrier in the Tracing API:\n// create a new entry span and extract the context carrier from the request tracing.CreateEntrySpan(fmt.Sprintf(\u0026#34;%s:%s\u0026#34;, request.Method, request.URL.Path), func(headerKey string) (string, error) { return request.Header.Get(headerKey), nil }) // create a new exit span and inject the context carrier into the request tracing.CreateExitSpan(fmt.Sprintf(\u0026#34;%s:%s\u0026#34;, request.Method, request.URL.Path), request.Host, func(headerKey, headerValue string) error { request.Header.Add(headerKey, headerValue) return nil } Span Option Span Options can be passed when creating a Span to configure the information in the Span.\nThe following options are currently supported:\n// WithLayer set the SpanLayer of the Span func WithLayer(layer SpanLayer) // WithComponent set the component id of the Span func WithComponent(componentID int32) // WithTag set the Tag of the Span func WithTag(key Tag, value string) Span Component The Component ID in Span is used to identify the current component, with its data defined in SkyWalking OAP. If the framework you are writing does not exist in this file, please submit a PR in the SkyWalking project to add the definition of this plugin.\nSpan Operation After creating a Span, you can perform additional operations on it.\n// Span for plugin API type Span interface { // AsyncSpan for the async API \tAsyncSpan // Tag set the Tag of the Span \tTag(Tag, string) // SetSpanLayer set the SpanLayer of the Span \tSetSpanLayer(SpanLayer) // SetOperationName re-set the operation name of the Span \tSetOperationName(string) // SetPeer re-set the peer address of the Span \tSetPeer(string) // Log add log to the Span \tLog(...string) // Error add error log to the Span \tError(...string) // End end the Span \tEnd() } Async Span There is a set of advanced APIs in Span which is specifically designed for async use cases. When setting name, tags, logs, and other operations (including end span) of the span in another goroutine, you should use these APIs.\ntype AsyncSpan interface { // PrepareAsync the span finished at current tracing context, but current span is still alive until AsyncFinish called  PrepareAsync() // AsyncFinish to finished current async span  AsyncFinish() } Following the previous API define, you should following these steps to use the async API:\n Call span.PrepareAsync() to prepare the span to do any operation in another goroutine. Use Span.End() in the original goroutine when your job in the current goroutine is complete. Propagate the span to any other goroutine in your plugin. Once the above steps are all set, call span.AsyncFinish() in any goroutine. When the span.AsyncFinish() is complete for all spans, the all spans would be finished and report to the backend.  Tracing Context Operation In the Go Agent, Trace Context would continue cross goroutines automatically by default. However, in some cases, goroutine would be context sharing due to be scheduled by the pool mechanism. Consider these advanced APIs to manipulate context and switch the current context.\n// CaptureContext capture current tracing context in the current goroutine. func CaptureContext() ContextSnapshot // ContinueContext continue the tracing context in the current goroutine. func ContinueContext(ctx ContextSnapshot) // CleanContext clean the tracing context in the current goroutine. func CleanContext() Typically, use APIs as following to control or switch the context:\n Use tracing.CaptureContext() to get the ContextSnapshot object. Propagate the snapshot context to any other goroutine in your plugin. Use tracing.ContinueContext(snapshot) to continue the snapshot context in the target goroutine.  Meter API The Meter API is used to record the metrics of the target program, and currently supports the following methods:\n// NewCounter creates a new counter metrics. // name is the name of the metrics // opts is the options for the metrics func NewCounter(name string, opts ...Opt) Counter // NewGauge creates a new gauge metrics. // name is the name of the metrics // getter is the function to get the value of the gauge meter // opts is the options for the metrics func NewGauge(name string, getter func() float64, opts ...Opt) Gauge // NewHistogram creates a new histogram metrics. // name is the name of the metrics // steps is the buckets of the histogram // opts is the options for the metrics func NewHistogram(name string, steps []float64, opts ...Opt) Histogram // NewHistogramWithMinValue creates a new histogram metrics. // name is the name of the metrics // minVal is the min value of the histogram bucket // steps is the buckets of the histogram // opts is the options for the metrics func NewHistogramWithMinValue(name string, minVal float64, steps []float64, opts ...Opt) Histogram // RegisterBeforeCollectHook registers a hook function which will be called before metrics collect. func RegisterBeforeCollectHook(f func()) Meter Option The Meter Options can be passed when creating a Meter to configure the information in the Meter.\n// WithLabel adds a label to the metrics. func WithLabel(key, value string) Opt Meter Type Counter Counter is a cumulative metric that represents a single monotonically increasing counter whose value can only increase.\ntype Counter interface { // Get returns the current value of the counter. \tGet() float64 // Inc increments the counter with value. \tInc(val float64) } Gauge Gauge is a metric that represents a single numerical value that can arbitrarily go up and down.\ntype Gauge interface { // Get returns the current value of the gauge.  Get() float64 } Histogram Histogram is a metric that represents the distribution of a set of values.\ntype Histogram interface { // Observe find the value associate bucket and add 1. \tObserve(val float64) // ObserveWithCount find the value associate bucket and add specific count. \tObserveWithCount(val float64, count int64) } Import Plugin Once you have finished developing the plugin, you need to import the completed module into the Agent program and define it in the corresponding file.\nAt this point, your plugin development process is complete. When the Agent performs hybrid compilation on the target program, your plugin will be executed as expected.\n","title":"Plugin Development Guide","url":"/docs/skywalking-go/latest/en/development-and-contribution/development-guide/"},{"content":"Plugin Development Guide This documentation introduces how developers can create a plugin.\nAll plugins must follow these steps:\n Create a new plugin module: Create a new project in the specified directory and import the plugin API module. Define the enhancement object: Define the description for the plugin. Invoke the plugin API: Call the API provided by the core to complete the core invocation. Import the plugin module: Import the plugin into the management module for users to use.  Create a new plugin module The plugin must create a new module, which is currently stored in the project\u0026rsquo;s plugins directory.\nPlugins can import the following two modules:\n Agent core: This module provides all the dependencies needed for the plugin, including the plugin API, enhancement declaration objects, etc. Agent core plugin should be github.com/apache/skywalking-go/plugins/core and replaced by the relative location. Framework to be enhanced: Import the framework you wish to enhance.  Note: Plugins should NOT import and use any other modules, as this may cause compilation issues for users. If certain tools are needed, they should be provided by the agent core.\nDefine the enhancement object In the root directory of the project, create a new go file to define the basic information of the plugin. The basic information includes the following methods, corresponding to the Instrument interface:\n Name: The name of the plugin. Please keep this name consistent with the newly created project name. The reason will be explained later. Base Package: Declare which package this plugin intercepts. For example, if you want to intercept gin, you can write: \u0026ldquo;github.com/gin-gonic/gin\u0026rdquo;. Version Checker: This method passes the version number to the enhancement object to verify whether the specified version of the framework is supported. If not, the enhancement program will not be executed. Points: A plugin can define one or more enhancement points. This will be explained in more detail in the following sections. File System: Use //go:embed * in the current file to import all files in this module, which will be used for file copying during the mixed compilation process.  Note: Please declare //skywalking:nocopy at any position in this file to indicate that the file would not be copied. This file is only used for guidance during hybrid compilation. Also, this file involves the use of the embed package, and if the target framework does not import the package embed, a compilation error may occur.\nManage Instrument and Interceptor codes in hierarchy structure Instrument and interceptor codes are placed in root by default. In complex instrumentation scenarios, there could be dozens of interceptors, we provide PluginSourceCodePath to build a hierarchy folder structure to manage those codes.\nNotice: The instrumentation still works without proper setting of this, but the debug tool would lose the location of the source codes.\nExample For example, the framework needs to enhance two packages, as shown in the following directory structure:\n- plugins - test - go.mod - package1 - instrument.go - interceptor.go - package2 - instrument.go - interceptor.go ... In the above directory structure, the test framework needs to provide multiple different enhancement objects. In this case, a PluginSourceCodePath Source Code Path** method needs to be added for each enhancement object, the values of this method should be package1 and package2.\nInstrument Point Instrument points are used to declare that which methods and structs in the current package should be instrumented. They mainly include the following information:\n Package path: If the interception point that needs to be intercepted is not in the root directory of the current package, you need to fill in the relative path to the package. For example, if this interception point wants to instrument content in the github.com/gin-gonic/gin/render directory, you need to fill in render here. Package Name(optional): Define the package name of the current package. If the package name is not defined, the package name of the current package would be used by default. It\u0026rsquo;s used when the package path and package name are not same, such as the name of github.com/emicklei/go-restful/v3 is restful. Matcher(At): Specify which eligible content in the current package path needs to be enhanced. Interceptor: If the current method is being intercepted (whether it\u0026rsquo;s a static method or an instance method), the name of the interceptor must be specified.  Method Matcher Method matchers are used to intercept both static and non-static methods. The specific definitions are as follows:\n// NewStaticMethodEnhance creates a new EnhanceMatcher for static method. // name: method name needs to be enhanced.(Public and private methods are supported) // filters: filters for method. func NewStaticMethodEnhance(name string, filters ...MethodFilterOption) // NewMethodEnhance creates a new EnhanceMatcher for method. // receiver: receiver type name of method needs to be enhanced. // name: method name needs to be enhanced.(Public and private methods are supported) // filters: filters for method. func NewMethodEnhance(receiver, name string, filters ...MethodFilterOption) Filter Option Filter Options are used to validate the parameters or return values in the method. If the method name matches but the Options validation fails, the enhancement would not be performed.\n// WithArgsCount filter methods with specific count of arguments. func WithArgsCount(argsCount int) // WithResultCount filter methods with specific count of results. func WithResultCount(resultCount int) // WithArgType filter methods with specific type of the index of the argument. func WithArgType(argIndex int, dataType string) // WithResultType filter methods with specific type of the index of the result. func WithResultType(argIndex int, dataType string) Demo For example, if you have the following method that needs to be intercepted:\nfunc (c *Context) HandleMethod(name string) bool you can describe it using this condition:\ninstrument.NewMethodEnhance(\u0026#34;*Context\u0026#34;, \u0026#34;HandleMethod\u0026#34;, instrument.WithArgsCount(1), instrument.WithArgType(0, \u0026#34;string\u0026#34;), instrument.WithResultCount(1), instrument.WithResultType(0, \u0026#34;bool\u0026#34;)) Struct Matcher Enhancement structures can embed enhanced fields within specified structs. After the struct is instantiated, custom data content can be added to the specified struct in the method interceptor.\nStruct matchers are used to intercept struct methods. The specific definitions are as follows:\n// NewStructEnhance creates a new EnhanceMatcher for struct. // name: struct name needs to be enhanced.(Public and private structs are supported) // filters: filters for struct. func NewStructEnhance(name string, filters ...StructFilterOption) Filter Option Filter Options are used to validate the fields in the structure.\n// WithFieldExists filter the struct has the field with specific name. func WithFieldExists(fieldName string) // WithFiledType filter the struct has the field with specific name and type. func WithFiledType(filedName, filedType string) Enhanced Instance After completing the definition of the struct enhancement, you can convert the specified instance into the following interface when intercepting methods, and get or set custom field information. The interface definition is as follows:\ntype EnhancedInstance interface { // GetSkyWalkingDynamicField get the customized data from instance \tGetSkyWalkingDynamicField() interface{} // SetSkyWalkingDynamicField set the customized data into the instance \tSetSkyWalkingDynamicField(interface{}) } Demo For example, if you have the following struct that needs to be enhanced:\ntype Test struct { value *Context } you can describe it using this condition:\ninstrument.NewStructEnhance(\u0026#34;Test\u0026#34;, instrument.WithFieldExists(\u0026#34;value\u0026#34;), instrument.WithFiledType(\u0026#34;value\u0026#34;, \u0026#34;*Context\u0026#34;)) Next, you can set custom content for the specified enhanced instance when intercepting methods.\nins := testInstance.(instrument.EnhancedInstance) // setting custom content ins.SetSkyWalkingDynamicField(\u0026#34;custom content\u0026#34;) // getting custom content res := ins.GetSkyWalkingDynamicField() Interceptor Interceptors are used to define custom business logic before and after method execution, allowing you to access data from before and after method execution and interact with the Agent Core by using the Agent API.\nThe interceptor definition is as follows, you need to create a new structure and implement it:\ntype Interceptor interface { // BeforeInvoke would be called before the target method invocation.  BeforeInvoke(invocation Invocation) error // AfterInvoke would be called after the target method invocation.  AfterInvoke(invocation Invocation, result ...interface{}) error } Within the interface, you can see the Invocation interface, which defines the context of an interception. The specific definition is as follows:\ntype Invocation interface { // CallerInstance is the instance of the caller, nil if the method is static method.  CallerInstance() interface{} // Args is get the arguments of the method, please cast to the specific type to get more information.  Args() []interface{} // ChangeArg is change the argument value of the method  ChangeArg(int, interface{}) // IsContinue is the flag to control the method invocation, if it is true, the target method would not be invoked.  IsContinue() bool // DefineReturnValues are defined the return value of the method, and continue the method invoked  DefineReturnValues(...interface{}) // SetContext is the customized context of the method invocation, it should be propagated the tracing span.  SetContext(interface{}) // GetContext is get the customized context of the method invocation  GetContext() interface{} } Thread safe The Interceptor instance would define new instance at the current package level, rather than creating a new instance each time a method is intercepted.\nTherefore, do not declare objects in the interceptor, and instead use Invocation.Context to pass data.\nPackage Path If the method you want to intercept is not located in the root directory of the framework, place your interceptor code in the relative location within the plugin. The Agent would only copy files from the same package directory.\nFor example, if you want to intercept a method in github.com/gin-gonic/gin/render, create a render directory in the root of your plugin, and put the interceptor inside it. This ensures that the interceptor is properly included during the copy operation and can be correctly applied to the target package.\nPlugin Configuration Plugin configuration is used to add custom configuration parameters to a specified plugin. When users specify configuration items, the plugin can dynamically adapt the content needed in the plugin according to the user\u0026rsquo;s configuration items.\nDeclaration Please declare the configuration file you need in the package you want to use. Declare it using var, and add the //skywalking:config directive to specify that this variable requires dynamic updating.\nBy default, the configuration item belongs to the configuration of the current plugin. For example, if the name of my current plugin is gin, then this configuration item is under the gin plugin. Of course, you can also change it to the http plugin to reference the configuration information of the relevant plugin, in which case you need to specify it as //skywalking:config http.\nItem Each configuration item needs to add a config tag. This is used to specify the name of the current configuration content. By default, it would lowercase all letters and add an _ identifier before each uppercase letter.\nCurrently, it supports basic data types and struct types, and it also supports obtaining data values through environment variables.\nDemo For example, I have declared the following configuration item:\n//skywalking:config http var config struct { ServerCollectParameters bool `config:\u0026#34;server_collect_parameters\u0026#34;` Client struct{ CollectParameters bool `config:\u0026#34;collect_parameters\u0026#34;` } `config:\u0026#34;client\u0026#34;` } In the above example, I created a plugin configuration for http, which includes two configuration items.\n config.ServerCollectParameters: Its configuration is located at http.server_collect_parameters. config.Client.CollectParameter: Its configuration is located at http.client.collect_parameter.  When the plugin needs to be used, it can be accessed directly by reading the config configuration.\nAgent API The Agent API is used when a method is intercepted and interacts with the Agent Core.\nTracing API The Tracing API is used for building distributed tracing, and currently supports the following methods:\n// CreateEntrySpan creates a new entry span. // operationName is the name of the span. // extractor is the extractor to extract the context from the carrier. // opts is the options to create the span. func CreateEntrySpan(operationName string, extractor Extractor, opts ...SpanOption) // CreateLocalSpan creates a new local span. // operationName is the name of the span. // opts is the options to create the span. func CreateLocalSpan(operationName string, opts ...SpanOption) // CreateExitSpan creates a new exit span. // operationName is the name of the span. // peer is the peer address of the span. // injector is the injector to inject the context into the carrier. // opts is the options to create the span. func CreateExitSpan(operationName, peer string, injector Injector, opts ...SpanOption) // ActiveSpan returns the current active span, it can be got the current span in the current goroutine. // If the current goroutine is not in the context of the span, it will return nil. // If get the span from other goroutine, it can only get information but cannot be operated. func ActiveSpan() // GetRuntimeContextValue returns the value of the key in the runtime context, which is current goroutine. // The value can also read from the goroutine which is created by the current goroutine func GetRuntimeContextValue(key string) // SetRuntimeContextValue sets the value of the key in the runtime context. func SetRuntimeContextValue(key string, val interface{}) Context Carrier The context carrier is used to pass the context between the difference application.\nWhen creating an Entry Span, you need to obtain the context carrier from the request. When creating an Exit Span, you need to write the context carrier into the target RPC request.\n// Extractor is a tool specification which define how to // extract trace parent context from propagation context type Extractor func(headerKey string) (string, error) // Injector is a tool specification which define how to // inject trace context into propagation context type Injector func(headerKey, headerValue string) error The following demo demonstrates how to pass the Context Carrier in the Tracing API:\n// create a new entry span and extract the context carrier from the request tracing.CreateEntrySpan(fmt.Sprintf(\u0026#34;%s:%s\u0026#34;, request.Method, request.URL.Path), func(headerKey string) (string, error) { return request.Header.Get(headerKey), nil }) // create a new exit span and inject the context carrier into the request tracing.CreateExitSpan(fmt.Sprintf(\u0026#34;%s:%s\u0026#34;, request.Method, request.URL.Path), request.Host, func(headerKey, headerValue string) error { request.Header.Add(headerKey, headerValue) return nil } Span Option Span Options can be passed when creating a Span to configure the information in the Span.\nThe following options are currently supported:\n// WithLayer set the SpanLayer of the Span func WithLayer(layer SpanLayer) // WithComponent set the component id of the Span func WithComponent(componentID int32) // WithTag set the Tag of the Span func WithTag(key Tag, value string) Span Component The Component ID in Span is used to identify the current component, with its data defined in SkyWalking OAP. If the framework you are writing does not exist in this file, please submit a PR in the SkyWalking project to add the definition of this plugin.\nSpan Operation After creating a Span, you can perform additional operations on it.\n// Span for plugin API type Span interface { // AsyncSpan for the async API \tAsyncSpan // Tag set the Tag of the Span \tTag(Tag, string) // SetSpanLayer set the SpanLayer of the Span \tSetSpanLayer(SpanLayer) // SetOperationName re-set the operation name of the Span \tSetOperationName(string) // SetPeer re-set the peer address of the Span \tSetPeer(string) // Log add log to the Span \tLog(...string) // Error add error log to the Span \tError(...string) // End end the Span \tEnd() } Async Span There is a set of advanced APIs in Span which is specifically designed for async use cases. When setting name, tags, logs, and other operations (including end span) of the span in another goroutine, you should use these APIs.\ntype AsyncSpan interface { // PrepareAsync the span finished at current tracing context, but current span is still alive until AsyncFinish called  PrepareAsync() // AsyncFinish to finished current async span  AsyncFinish() } Following the previous API define, you should following these steps to use the async API:\n Call span.PrepareAsync() to prepare the span to do any operation in another goroutine. Use Span.End() in the original goroutine when your job in the current goroutine is complete. Propagate the span to any other goroutine in your plugin. Once the above steps are all set, call span.AsyncFinish() in any goroutine. When the span.AsyncFinish() is complete for all spans, the all spans would be finished and report to the backend.  Tracing Context Operation In the Go Agent, Trace Context would continue cross goroutines automatically by default. However, in some cases, goroutine would be context sharing due to be scheduled by the pool mechanism. Consider these advanced APIs to manipulate context and switch the current context.\n// CaptureContext capture current tracing context in the current goroutine. func CaptureContext() ContextSnapshot // ContinueContext continue the tracing context in the current goroutine. func ContinueContext(ctx ContextSnapshot) // CleanContext clean the tracing context in the current goroutine. func CleanContext() Typically, use APIs as following to control or switch the context:\n Use tracing.CaptureContext() to get the ContextSnapshot object. Propagate the snapshot context to any other goroutine in your plugin. Use tracing.ContinueContext(snapshot) to continue the snapshot context in the target goroutine.  Meter API The Meter API is used to record the metrics of the target program, and currently supports the following methods:\n// NewCounter creates a new counter metrics. // name is the name of the metrics // opts is the options for the metrics func NewCounter(name string, opts ...Opt) Counter // NewGauge creates a new gauge metrics. // name is the name of the metrics // getter is the function to get the value of the gauge meter // opts is the options for the metrics func NewGauge(name string, getter func() float64, opts ...Opt) Gauge // NewHistogram creates a new histogram metrics. // name is the name of the metrics // steps is the buckets of the histogram // opts is the options for the metrics func NewHistogram(name string, steps []float64, opts ...Opt) Histogram // NewHistogramWithMinValue creates a new histogram metrics. // name is the name of the metrics // minVal is the min value of the histogram bucket // steps is the buckets of the histogram // opts is the options for the metrics func NewHistogramWithMinValue(name string, minVal float64, steps []float64, opts ...Opt) Histogram // RegisterBeforeCollectHook registers a hook function which will be called before metrics collect. func RegisterBeforeCollectHook(f func()) Meter Option The Meter Options can be passed when creating a Meter to configure the information in the Meter.\n// WithLabel adds a label to the metrics. func WithLabel(key, value string) Opt Meter Type Counter Counter is a cumulative metric that represents a single monotonically increasing counter whose value can only increase.\ntype Counter interface { // Get returns the current value of the counter. \tGet() float64 // Inc increments the counter with value. \tInc(val float64) } Gauge Gauge is a metric that represents a single numerical value that can arbitrarily go up and down.\ntype Gauge interface { // Get returns the current value of the gauge.  Get() float64 } Histogram Histogram is a metric that represents the distribution of a set of values.\ntype Histogram interface { // Observe find the value associate bucket and add 1. \tObserve(val float64) // ObserveWithCount find the value associate bucket and add specific count. \tObserveWithCount(val float64, count int64) } Import Plugin Once you have finished developing the plugin, you need to import the completed module into the Agent program and define it in the corresponding file.\nAt this point, your plugin development process is complete. When the Agent performs hybrid compilation on the target program, your plugin will be executed as expected.\n","title":"Plugin Development Guide","url":"/docs/skywalking-go/next/en/development-and-contribution/development-guide/"},{"content":"Plugin Development Guide This documentation introduces how developers can create a plugin.\nAll plugins must follow these steps:\n Create a new plugin module: Create a new project in the specified directory and import the plugin API module. Define the enhancement object: Define the description for the plugin. Invoke the plugin API: Call the API provided by the core to complete the core invocation. Import the plugin module: Import the plugin into the management module for users to use.  Create a new plugin module The plugin must create a new module, which is currently stored in the project\u0026rsquo;s plugins directory.\nPlugins can import the following two modules:\n Agent core: This module provides all the dependencies needed for the plugin, including the plugin API, enhancement declaration objects, etc. Agent core plugin should be github.com/apache/skywalking-go/plugins/core and replaced by the relative location. Framework to be enhanced: Import the framework you wish to enhance.  Note: Plugins should NOT import and use any other modules, as this may cause compilation issues for users. If certain tools are needed, they should be provided by the agent core.\nDefine the enhancement object In the root directory of the project, create a new go file to define the basic information of the plugin. The basic information includes the following methods, corresponding to the Instrument interface:\n Name: The name of the plugin. Please keep this name consistent with the newly created project name. The reason will be explained later. Base Package: Declare which package this plugin intercepts. For example, if you want to intercept gin, you can write: \u0026ldquo;github.com/gin-gonic/gin\u0026rdquo;. Version Checker: This method passes the version number to the enhancement object to verify whether the specified version of the framework is supported. If not, the enhancement program will not be executed. Points: A plugin can define one or more enhancement points. This will be explained in more detail in the following sections. File System: Use //go:embed * in the current file to import all files in this module, which will be used for file copying during the mixed compilation process.  Note: Please declare //skywalking:nocopy at any position in this file to indicate that the file would not be copied. This file is only used for guidance during hybrid compilation. Also, this file involves the use of the embed package, and if the target framework does not import the package embed, a compilation error may occur.\nManage Instrument and Interceptor codes in hierarchy structure Instrument and interceptor codes are placed in root by default. In complex instrumentation scenarios, there could be dozens of interceptors, we provide PluginSourceCodePath to build a hierarchy folder structure to manage those codes.\nNotice: The instrumentation still works without proper setting of this, but the debug tool would lose the location of the source codes.\nExample For example, the framework needs to enhance two packages, as shown in the following directory structure:\n- plugins - test - go.mod - package1 - instrument.go - interceptor.go - package2 - instrument.go - interceptor.go ... In the above directory structure, the test framework needs to provide multiple different enhancement objects. In this case, a PluginSourceCodePath Source Code Path** method needs to be added for each enhancement object, the values of this method should be package1 and package2.\nInstrument Point Instrument points are used to declare that which methods and structs in the current package should be instrumented. They mainly include the following information:\n Package path: If the interception point that needs to be intercepted is not in the root directory of the current package, you need to fill in the relative path to the package. For example, if this interception point wants to instrument content in the github.com/gin-gonic/gin/render directory, you need to fill in render here. Package Name(optional): Define the package name of the current package. If the package name is not defined, the package name of the current package would be used by default. It\u0026rsquo;s used when the package path and package name are not same, such as the name of github.com/emicklei/go-restful/v3 is restful. Matcher(At): Specify which eligible content in the current package path needs to be enhanced. Interceptor: If the current method is being intercepted (whether it\u0026rsquo;s a static method or an instance method), the name of the interceptor must be specified.  Method Matcher Method matchers are used to intercept both static and non-static methods. The specific definitions are as follows:\n// NewStaticMethodEnhance creates a new EnhanceMatcher for static method. // name: method name needs to be enhanced.(Public and private methods are supported) // filters: filters for method. func NewStaticMethodEnhance(name string, filters ...MethodFilterOption) // NewMethodEnhance creates a new EnhanceMatcher for method. // receiver: receiver type name of method needs to be enhanced. // name: method name needs to be enhanced.(Public and private methods are supported) // filters: filters for method. func NewMethodEnhance(receiver, name string, filters ...MethodFilterOption) Filter Option Filter Options are used to validate the parameters or return values in the method. If the method name matches but the Options validation fails, the enhancement would not be performed.\n// WithArgsCount filter methods with specific count of arguments. func WithArgsCount(argsCount int) // WithResultCount filter methods with specific count of results. func WithResultCount(resultCount int) // WithArgType filter methods with specific type of the index of the argument. func WithArgType(argIndex int, dataType string) // WithResultType filter methods with specific type of the index of the result. func WithResultType(argIndex int, dataType string) Demo For example, if you have the following method that needs to be intercepted:\nfunc (c *Context) HandleMethod(name string) bool you can describe it using this condition:\ninstrument.NewMethodEnhance(\u0026#34;*Context\u0026#34;, \u0026#34;HandleMethod\u0026#34;, instrument.WithArgsCount(1), instrument.WithArgType(0, \u0026#34;string\u0026#34;), instrument.WithResultCount(1), instrument.WithResultType(0, \u0026#34;bool\u0026#34;)) Struct Matcher Enhancement structures can embed enhanced fields within specified structs. After the struct is instantiated, custom data content can be added to the specified struct in the method interceptor.\nStruct matchers are used to intercept struct methods. The specific definitions are as follows:\n// NewStructEnhance creates a new EnhanceMatcher for struct. // name: struct name needs to be enhanced.(Public and private structs are supported) // filters: filters for struct. func NewStructEnhance(name string, filters ...StructFilterOption) Filter Option Filter Options are used to validate the fields in the structure.\n// WithFieldExists filter the struct has the field with specific name. func WithFieldExists(fieldName string) // WithFiledType filter the struct has the field with specific name and type. func WithFiledType(filedName, filedType string) Enhanced Instance After completing the definition of the struct enhancement, you can convert the specified instance into the following interface when intercepting methods, and get or set custom field information. The interface definition is as follows:\ntype EnhancedInstance interface { // GetSkyWalkingDynamicField get the customized data from instance \tGetSkyWalkingDynamicField() interface{} // SetSkyWalkingDynamicField set the customized data into the instance \tSetSkyWalkingDynamicField(interface{}) } Demo For example, if you have the following struct that needs to be enhanced:\ntype Test struct { value *Context } you can describe it using this condition:\ninstrument.NewStructEnhance(\u0026#34;Test\u0026#34;, instrument.WithFieldExists(\u0026#34;value\u0026#34;), instrument.WithFiledType(\u0026#34;value\u0026#34;, \u0026#34;*Context\u0026#34;)) Next, you can set custom content for the specified enhanced instance when intercepting methods.\nins := testInstance.(instrument.EnhancedInstance) // setting custom content ins.SetSkyWalkingDynamicField(\u0026#34;custom content\u0026#34;) // getting custom content res := ins.GetSkyWalkingDynamicField() Interceptor Interceptors are used to define custom business logic before and after method execution, allowing you to access data from before and after method execution and interact with the Agent Core by using the Agent API.\nThe interceptor definition is as follows, you need to create a new structure and implement it:\ntype Interceptor interface { // BeforeInvoke would be called before the target method invocation.  BeforeInvoke(invocation Invocation) error // AfterInvoke would be called after the target method invocation.  AfterInvoke(invocation Invocation, result ...interface{}) error } Within the interface, you can see the Invocation interface, which defines the context of an interception. The specific definition is as follows:\ntype Invocation interface { // CallerInstance is the instance of the caller, nil if the method is static method.  CallerInstance() interface{} // Args is get the arguments of the method, please cast to the specific type to get more information.  Args() []interface{} // ChangeArg is change the argument value of the method  ChangeArg(int, interface{}) // IsContinue is the flag to control the method invocation, if it is true, the target method would not be invoked.  IsContinue() bool // DefineReturnValues are defined the return value of the method, and continue the method invoked  DefineReturnValues(...interface{}) // SetContext is the customized context of the method invocation, it should be propagated the tracing span.  SetContext(interface{}) // GetContext is get the customized context of the method invocation  GetContext() interface{} } Thread safe The Interceptor instance would define new instance at the current package level, rather than creating a new instance each time a method is intercepted.\nTherefore, do not declare objects in the interceptor, and instead use Invocation.Context to pass data.\nPackage Path If the method you want to intercept is not located in the root directory of the framework, place your interceptor code in the relative location within the plugin. The Agent would only copy files from the same package directory.\nFor example, if you want to intercept a method in github.com/gin-gonic/gin/render, create a render directory in the root of your plugin, and put the interceptor inside it. This ensures that the interceptor is properly included during the copy operation and can be correctly applied to the target package.\nPlugin Configuration Plugin configuration is used to add custom configuration parameters to a specified plugin. When users specify configuration items, the plugin can dynamically adapt the content needed in the plugin according to the user\u0026rsquo;s configuration items.\nDeclaration Please declare the configuration file you need in the package you want to use. Declare it using var, and add the //skywalking:config directive to specify that this variable requires dynamic updating.\nBy default, the configuration item belongs to the configuration of the current plugin. For example, if the name of my current plugin is gin, then this configuration item is under the gin plugin. Of course, you can also change it to the http plugin to reference the configuration information of the relevant plugin, in which case you need to specify it as //skywalking:config http.\nItem Each configuration item needs to add a config tag. This is used to specify the name of the current configuration content. By default, it would lowercase all letters and add an _ identifier before each uppercase letter.\nCurrently, it supports basic data types and struct types, and it also supports obtaining data values through environment variables.\nDemo For example, I have declared the following configuration item:\n//skywalking:config http var config struct { ServerCollectParameters bool `config:\u0026#34;server_collect_parameters\u0026#34;` Client struct{ CollectParameters bool `config:\u0026#34;collect_parameters\u0026#34;` } `config:\u0026#34;client\u0026#34;` } In the above example, I created a plugin configuration for http, which includes two configuration items.\n config.ServerCollectParameters: Its configuration is located at http.server_collect_parameters. config.Client.CollectParameter: Its configuration is located at http.client.collect_parameter.  When the plugin needs to be used, it can be accessed directly by reading the config configuration.\nAgent API The Agent API is used when a method is intercepted and interacts with the Agent Core.\nTracing API The Tracing API is used for building distributed tracing, and currently supports the following methods:\n// CreateEntrySpan creates a new entry span. // operationName is the name of the span. // extractor is the extractor to extract the context from the carrier. // opts is the options to create the span. func CreateEntrySpan(operationName string, extractor Extractor, opts ...SpanOption) // CreateLocalSpan creates a new local span. // operationName is the name of the span. // opts is the options to create the span. func CreateLocalSpan(operationName string, opts ...SpanOption) // CreateExitSpan creates a new exit span. // operationName is the name of the span. // peer is the peer address of the span. // injector is the injector to inject the context into the carrier. // opts is the options to create the span. func CreateExitSpan(operationName, peer string, injector Injector, opts ...SpanOption) // ActiveSpan returns the current active span, it can be got the current span in the current goroutine. // If the current goroutine is not in the context of the span, it will return nil. // If get the span from other goroutine, it can only get information but cannot be operated. func ActiveSpan() // GetRuntimeContextValue returns the value of the key in the runtime context, which is current goroutine. // The value can also read from the goroutine which is created by the current goroutine func GetRuntimeContextValue(key string) // SetRuntimeContextValue sets the value of the key in the runtime context. func SetRuntimeContextValue(key string, val interface{}) Context Carrier The context carrier is used to pass the context between the difference application.\nWhen creating an Entry Span, you need to obtain the context carrier from the request. When creating an Exit Span, you need to write the context carrier into the target RPC request.\n// Extractor is a tool specification which define how to // extract trace parent context from propagation context type Extractor func(headerKey string) (string, error) // Injector is a tool specification which define how to // inject trace context into propagation context type Injector func(headerKey, headerValue string) error The following demo demonstrates how to pass the Context Carrier in the Tracing API:\n// create a new entry span and extract the context carrier from the request tracing.CreateEntrySpan(fmt.Sprintf(\u0026#34;%s:%s\u0026#34;, request.Method, request.URL.Path), func(headerKey string) (string, error) { return request.Header.Get(headerKey), nil }) // create a new exit span and inject the context carrier into the request tracing.CreateExitSpan(fmt.Sprintf(\u0026#34;%s:%s\u0026#34;, request.Method, request.URL.Path), request.Host, func(headerKey, headerValue string) error { request.Header.Add(headerKey, headerValue) return nil } Span Option Span Options can be passed when creating a Span to configure the information in the Span.\nThe following options are currently supported:\n// WithLayer set the SpanLayer of the Span func WithLayer(layer SpanLayer) // WithComponent set the component id of the Span func WithComponent(componentID int32) // WithTag set the Tag of the Span func WithTag(key Tag, value string) Span Component The Component ID in Span is used to identify the current component, with its data defined in SkyWalking OAP. If the framework you are writing does not exist in this file, please submit a PR in the SkyWalking project to add the definition of this plugin.\nSpan Operation After creating a Span, you can perform additional operations on it.\n// Span for plugin API type Span interface { // AsyncSpan for the async API \tAsyncSpan // Tag set the Tag of the Span \tTag(Tag, string) // SetSpanLayer set the SpanLayer of the Span \tSetSpanLayer(SpanLayer) // SetOperationName re-set the operation name of the Span \tSetOperationName(string) // SetPeer re-set the peer address of the Span \tSetPeer(string) // Log add log to the Span \tLog(...string) // Error add error log to the Span \tError(...string) // End end the Span \tEnd() } Async Span There is a set of advanced APIs in Span which is specifically designed for async use cases. When setting name, tags, logs, and other operations (including end span) of the span in another goroutine, you should use these APIs.\ntype AsyncSpan interface { // PrepareAsync the span finished at current tracing context, but current span is still alive until AsyncFinish called  PrepareAsync() // AsyncFinish to finished current async span  AsyncFinish() } Following the previous API define, you should following these steps to use the async API:\n Call span.PrepareAsync() to prepare the span to do any operation in another goroutine. Use Span.End() in the original goroutine when your job in the current goroutine is complete. Propagate the span to any other goroutine in your plugin. Once the above steps are all set, call span.AsyncFinish() in any goroutine. When the span.AsyncFinish() is complete for all spans, the all spans would be finished and report to the backend.  Tracing Context Operation In the Go Agent, Trace Context would continue cross goroutines automatically by default. However, in some cases, goroutine would be context sharing due to be scheduled by the pool mechanism. Consider these advanced APIs to manipulate context and switch the current context.\n// CaptureContext capture current tracing context in the current goroutine. func CaptureContext() ContextSnapshot // ContinueContext continue the tracing context in the current goroutine. func ContinueContext(ctx ContextSnapshot) // CleanContext clean the tracing context in the current goroutine. func CleanContext() Typically, use APIs as following to control or switch the context:\n Use tracing.CaptureContext() to get the ContextSnapshot object. Propagate the snapshot context to any other goroutine in your plugin. Use tracing.ContinueContext(snapshot) to continue the snapshot context in the target goroutine.  Meter API The Meter API is used to record the metrics of the target program, and currently supports the following methods:\n// NewCounter creates a new counter metrics. // name is the name of the metrics // opts is the options for the metrics func NewCounter(name string, opts ...Opt) Counter // NewGauge creates a new gauge metrics. // name is the name of the metrics // getter is the function to get the value of the gauge meter // opts is the options for the metrics func NewGauge(name string, getter func() float64, opts ...Opt) Gauge // NewHistogram creates a new histogram metrics. // name is the name of the metrics // steps is the buckets of the histogram // opts is the options for the metrics func NewHistogram(name string, steps []float64, opts ...Opt) Histogram // NewHistogramWithMinValue creates a new histogram metrics. // name is the name of the metrics // minVal is the min value of the histogram bucket // steps is the buckets of the histogram // opts is the options for the metrics func NewHistogramWithMinValue(name string, minVal float64, steps []float64, opts ...Opt) Histogram // RegisterBeforeCollectHook registers a hook function which will be called before metrics collect. func RegisterBeforeCollectHook(f func()) Meter Option The Meter Options can be passed when creating a Meter to configure the information in the Meter.\n// WithLabel adds a label to the metrics. func WithLabel(key, value string) Opt Meter Type Counter Counter is a cumulative metric that represents a single monotonically increasing counter whose value can only increase.\ntype Counter interface { // Get returns the current value of the counter. \tGet() float64 // Inc increments the counter with value. \tInc(val float64) } Gauge Gauge is a metric that represents a single numerical value that can arbitrarily go up and down.\ntype Gauge interface { // Get returns the current value of the gauge.  Get() float64 } Histogram Histogram is a metric that represents the distribution of a set of values.\ntype Histogram interface { // Observe find the value associate bucket and add 1. \tObserve(val float64) // ObserveWithCount find the value associate bucket and add specific count. \tObserveWithCount(val float64, count int64) } Import Plugin Once you have finished developing the plugin, you need to import the completed module into the Agent program and define it in the corresponding file.\nAt this point, your plugin development process is complete. When the Agent performs hybrid compilation on the target program, your plugin will be executed as expected.\n","title":"Plugin Development Guide","url":"/docs/skywalking-go/v0.4.0/en/development-and-contribution/development-guide/"},{"content":"Plugin Development Guide This document describes how to understand, develop and contribute a plugin.\nThere are 2 kinds of plugin:\n Tracing plugin. Follow the distributed tracing concept to collect spans with tags and logs. Meter plugin. Collect numeric metrics in Counter, Gauge, and Histogram formats.  We also provide the plugin test tool to verify the data collected and reported by the plugin. If you plan to contribute any plugin to our main repo, the data would be verified by this tool too.\nTracing plugin Concepts Span The span is an important and recognized concept in the distributed tracing system. Learn about the span from the Google Dapper Paper and OpenTracing\nSkyWalking has supported OpenTracing and OpenTracing-Java API since 2017. Our concepts of the span are similar to that of the Google Dapper Paper and OpenTracing. We have also extended the span.\nThere are three types of span:\n1.1 EntrySpan The EntrySpan represents a service provider. It is also an endpoint on the server end. As an APM system, our target is the application servers. Therefore, almost all the services and MQ-consumers are EntrySpan.\n1.2 LocalSpan The LocalSpan represents a normal Java method that does not concern remote services. It is neither a MQ producer/consumer nor a service (e.g. HTTP service) provider/consumer.\n1.3 ExitSpan The ExitSpan represents a client of service or MQ-producer. It is named the LeafSpan in the early versions of SkyWalking. For example, accessing DB through JDBC and reading Redis/Memcached are classified as an ExitSpan.\nContextCarrier In order to implement distributed tracing, cross-process tracing has to be bound, and the context must propagate across the process. This is where the ContextCarrier comes in.\nHere are the steps on how to use the ContextCarrier in an A-\u0026gt;B distributed call.\n Create a new and empty ContextCarrier on the client end. Create an ExitSpan by ContextManager#createExitSpan or use ContextManager#inject to initalize the ContextCarrier. Place all items of ContextCarrier into heads (e.g. HTTP HEAD), attachments (e.g. Dubbo RPC framework) or messages (e.g. Kafka). The ContextCarrier propagates to the server end through the service call. On the server end, obtain all items from the heads, attachments or messages. Create an EntrySpan by ContextManager#createEntrySpan or use ContextManager#extract to bind the client and server ends.  See the following examples, where we use the Apache HTTPComponent client plugin and Tomcat 7 server plugin:\n Using the Apache HTTPComponent client plugin on the client end  span = ContextManager.createExitSpan(\u0026#34;/span/operation/name\u0026#34;, contextCarrier, \u0026#34;ip:port\u0026#34;); CarrierItem next = contextCarrier.items(); while (next.hasNext()) { next = next.next(); httpRequest.setHeader(next.getHeadKey(), next.getHeadValue()); } Using the Tomcat 7 server plugin on the server end  ContextCarrier contextCarrier = new ContextCarrier(); CarrierItem next = contextCarrier.items(); while (next.hasNext()) { next = next.next(); next.setHeadValue(request.getHeader(next.getHeadKey())); } span = ContextManager.createEntrySpan(“/span/operation/name”, contextCarrier); ContextSnapshot Besides cross-process tracing, cross-thread tracing has to be supported as well. For instance, both async process (in-memory MQ) and batch process are common in Java. Cross-process and cross-thread tracing are very similar in that they both require propagating context, except that cross-thread tracing does not require serialization.\nHere are the three steps on cross-thread propagation:\n Use ContextManager#capture to get the ContextSnapshot object. Let the sub-thread access the ContextSnapshot through method arguments or being carried by existing arguments Use ContextManager#continued in sub-thread.  Core APIs ContextManager ContextManager provides all major and primary APIs.\n Create EntrySpan  public static AbstractSpan createEntrySpan(String endpointName, ContextCarrier carrier) Create EntrySpan according to the operation name (e.g. service name, uri) and ContextCarrier.\nCreate LocalSpan  public static AbstractSpan createLocalSpan(String endpointName) Create LocalSpan according to the operation name (e.g. full method signature).\nCreate ExitSpan  public static AbstractSpan createExitSpan(String endpointName, ContextCarrier carrier, String remotePeer) Create ExitSpan according to the operation name (e.g. service name, uri) and the new ContextCarrier and peer address (e.g. ip+port, hostname+port).\nAbstractSpan /** * Set the component id, which defines in {@link ComponentsDefine} * * @param component * @return the span for chaining. */ AbstractSpan setComponent(Component component); AbstractSpan setLayer(SpanLayer layer); /** * Set a key:value tag on the Span. * * @return this Span instance, for chaining */ AbstractSpan tag(String key, String value); /** * Record an exception event of the current walltime timestamp. * * @param t any subclass of {@link Throwable}, which occurs in this span. * @return the Span, for chaining */ AbstractSpan log(Throwable t); AbstractSpan errorOccurred(); /** * Record an event at a specific timestamp. * * @param timestamp The explicit timestamp for the log record. * @param event the events * @return the Span, for chaining */ AbstractSpan log(long timestamp, Map\u0026lt;String, ?\u0026gt; event); /** * Sets the string name for the logical operation this span represents. * * @return this Span instance, for chaining */ AbstractSpan setOperationName(String endpointName); Besides setting the operation name, tags and logs, two attributes must be set, namely the component and layer. This is especially important for the EntrySpan and ExitSpan.\nSpanLayer is the type of span. There are 5 values:\n UNKNOWN (default) DB RPC_FRAMEWORK (designed for the RPC framework, rather than an ordinary HTTP call) HTTP MQ  Component IDs are defined and reserved by the SkyWalking project. For extension of the component name/ID, please follow the OAP server Component library settings document.\nSpecial Span Tags All tags are available in the trace view. Meanwhile, in the OAP backend analysis, some special tags or tag combinations provide other advanced features.\nTag key http.status_code The value should be an integer. The response code of OAL entities corresponds to this value.\nTag keys db.statement and db.type. The value of db.statement should be a string that represents the database statement, such as SQL, or [No statement]/+span#operationName if the value is empty. When the exit span contains this tag, OAP samples the slow statements based on agent-analyzer/default/maxSlowSQLLength. The threshold of slow statement is defined in accordance with agent-analyzer/default/slowDBAccessThreshold. Check Slow Database Statement document of OAP server for details.\nExtension logic endpoint: Tag key x-le The logic endpoint is a concept that doesn\u0026rsquo;t represent a real RPC call, but requires the statistic. The value of x-le should be in JSON format. There are two options:\n Define a separated logic endpoint. Provide its own endpoint name, latency and status. Suitable for entry and local span.  { \u0026#34;name\u0026#34;: \u0026#34;GraphQL-service\u0026#34;, \u0026#34;latency\u0026#34;: 100, \u0026#34;status\u0026#34;: true } Declare the current local span representing a logic endpoint.  { \u0026#34;logic-span\u0026#34;: true } Virtual Database Relative Tags SkyWalking analysis Database(SQL-like) performance metrics through the following tags.\npublic static final StringTag DB_TYPE = new StringTag(3, \u0026#34;db.type\u0026#34;); public static final StringTag DB_STATEMENT = new StringTag(5, \u0026#34;db.statement\u0026#34;);  db.type records database type, such as sql, cassandra, Elasticsearch. db.statementrecords the sql statement of the database access.  Read backend\u0026rsquo;s virtual database doc for more details.\nVirtual Cache Relative Tags SkyWalking analysis cache performance related metrics through the following tags.\npublic static final StringTag CACHE_TYPE = new StringTag(15, \u0026#34;cache.type\u0026#34;); public static final StringTag CACHE_CMD = new StringTag(17, \u0026#34;cache.cmd\u0026#34;); public static final StringTag CACHE_OP = new StringTag(16, \u0026#34;cache.op\u0026#34;); public static final StringTag CACHE_KEY = new StringTag(18, \u0026#34;cache.key\u0026#34;);  cache.type indicates the cache type , usually it\u0026rsquo;s official name of cache (e.g. Redis) cache.cmd indicates the cache command that would be sent to cache server (e.g. setnx) cache.op indicates the command is used for write or read operation , usually the value is converting from command cache.key indicates the cache key that would be sent to cache server , this tag maybe null , as string type key would be collected usually.  In order to decide which op should be converted to flexibly , It\u0026rsquo;s better that providing config property . Reference Jedis-4.x-plugin\nVirtual Message Queue (MQ) Relative Tags SkyWalking analysis MQ performance related metrics through the following tags.\npublic static final StringTag MQ_QUEUE = new StringTag(7, \u0026#34;mq.queue\u0026#34;); public static final StringTag MQ_TOPIC = new StringTag(9, \u0026#34;mq.topic\u0026#34;); public static final StringTag TRANSMISSION_LATENCY = new StringTag(15, \u0026#34;transmission.latency\u0026#34;, false);  mq.queue indicates MQ queue name mq.topic indicates MQ topic name , It\u0026rsquo;s optional as some MQ don\u0026rsquo;t hava concept of topic transmission.latency The transmission latency from consumer to producer. Usually you needn\u0026rsquo;t to record this tag manually, instead to call contextCarrier.extensionInjector().injectSendingTimestamp(); to record tag sendingTimestamp on producer side , and SkyWalking would record this tag on consumer side if sw8-x context carrier(from producer side) contains sendingTimestamp  Notice , you should set peer at both sides(producer and consumer). And the value of peer should represent the MQ server cluster.\nAdvanced APIs Async Span APIs There is a set of advanced APIs in Span which is specifically designed for async use cases. When tags, logs, and attributes (including end time) of the span need to be set in another thread, you should use these APIs.\n/** * The span finish at current tracing context, but the current span is still alive, until {@link #asyncFinish} * called. * * This method must be called\u0026lt;br/\u0026gt; * 1. In original thread(tracing context). * 2. Current span is active span. * * During alive, tags, logs and attributes of the span could be changed, in any thread. * * The execution times of {@link #prepareForAsync} and {@link #asyncFinish()} must match. * * @return the current span */ AbstractSpan prepareForAsync(); /** * Notify the span, it could be finished. * * The execution times of {@link #prepareForAsync} and {@link #asyncFinish()} must match. * * @return the current span */ AbstractSpan asyncFinish();  Call #prepareForAsync in the original context. Run ContextManager#stopSpan in the original context when your job in the current thread is complete. Propagate the span to any other thread. Once the above steps are all set, call #asyncFinish in any thread. When #prepareForAsync is complete for all spans, the tracing context will be finished and will report to the backend (based on the count of API execution).  Develop a plugin Abstract The basic method to trace is to intercept a Java method, by using byte code manipulation tech and AOP concept. SkyWalking has packaged the byte code manipulation tech and tracing context propagation, so you simply have to define the intercept point (a.k.a. aspect pointcut in Spring).\nIntercept SkyWalking provides two common definitions to intercept constructor, instance method and class method.\nv1 APIs  Extend ClassInstanceMethodsEnhancePluginDefine to define constructor intercept points and instance method intercept points. Extend ClassStaticMethodsEnhancePluginDefine to define class method intercept points.  Of course, you can extend ClassEnhancePluginDefine to set all intercept points, although it is uncommon to do so.\nv2 APIs v2 APIs provide an enhanced interceptor, which could propagate context through MIC(MethodInvocationContext).\n Extend ClassInstanceMethodsEnhancePluginDefineV2 to define constructor intercept points and instance method intercept points. Extend ClassStaticMethodsEnhancePluginDefineV2 to define class method intercept points.  Of course, you can extend ClassEnhancePluginDefineV2 to set all intercept points, although it is uncommon to do so.\nImplement plugin See the following demonstration on how to implement a plugin by extending ClassInstanceMethodsEnhancePluginDefine.\n Define the target class name.  protected abstract ClassMatch enhanceClass(); ClassMatch represents how to match the target classes. There are 4 ways:\n byName: Based on the full class names (package name + . + class name). byClassAnnotationMatch: Depends on whether there are certain annotations in the target classes. byMethodAnnotationMatch: Depends on whether there are certain annotations in the methods of the target classes. byHierarchyMatch: Based on the parent classes or interfaces of the target classes.  Attention:\n Never use ThirdPartyClass.class in the instrumentation definitions, such as takesArguments(ThirdPartyClass.class), or byName(ThirdPartyClass.class.getName()), because of the fact that ThirdPartyClass dose not necessarily exist in the target application and this will break the agent; we have import checks to assist in checking this in CI, but it doesn\u0026rsquo;t cover all scenarios of this limitation, so never try to work around this limitation by something like using full-qualified-class-name (FQCN), i.e. takesArguments(full.qualified.ThirdPartyClass.class) and byName(full.qualified.ThirdPartyClass.class.getName()) will pass the CI check, but are still invalid in the agent codes. Therefore, Use Full Qualified Class Name String Literature Instead. Even if you are perfectly sure that the class to be intercepted exists in the target application (such as JDK classes), still, do not use *.class.getName() to get the class String name. We recommend you to use a literal string. This is to avoid ClassLoader issues. by*AnnotationMatch does not support inherited annotations. We do not recommend using byHierarchyMatch unless necessary. Using it may trigger the interception of many unexcepted methods, which would cause performance issues.  Example:\n@Override protected ClassMatch enhanceClassName() { return byName(\u0026#34;org.apache.catalina.core.StandardEngineValve\u0026#34;); } Define an instance method intercept point.  public InstanceMethodsInterceptPoint[] getInstanceMethodsInterceptPoints(); public interface InstanceMethodsInterceptPoint { /** * class instance methods matcher. * * @return methods matcher */ ElementMatcher\u0026lt;MethodDescription\u0026gt; getMethodsMatcher(); /** * @return represents a class name, the class instance must instanceof InstanceMethodsAroundInterceptor. */ String getMethodsInterceptor(); boolean isOverrideArgs(); } You may also use Matcher to set the target methods. Return true in isOverrideArgs, if you want to change the argument ref in interceptor. Please refer to bytebuddy for details of defining ElementMatcher.\nIn Skywalking, we provide 3 classes to facilitate ElementMatcher definition:\n AnnotationTypeNameMatch: Check on whether there is a certain annotation in the target method. ReturnTypeNameMatch: Check the return type name (package name + . + class name) of the target method. ArgumentTypeNameMatch: Check on the argument index and the type name (package name + . + class name) of the target method.  Attention:\n In case of using ReturnTypeNameMatch and ArgumentTypeNameMatch, use [Lxxx; (Java file format defined in JVM Specification) to define an Array type. For example, you should write [Ljava.lang.String; for java.lang.String[].  The following sections will tell you how to implement the interceptor.\nAdd plugin definition into the skywalking-plugin.def file.  tomcat-7.x/8.x=TomcatInstrumentation  Set up witnessClasses and/or witnessMethods if the instrumentation has to be activated in specific versions.\nExample:\n// The plugin is activated only when the foo.Bar class exists. @Override protected String[] witnessClasses() { return new String[] { \u0026#34;foo.Bar\u0026#34; }; } // The plugin is activated only when the foo.Bar#hello method exists. @Override protected List\u0026lt;WitnessMethod\u0026gt; witnessMethods() { List\u0026lt;WitnessMethod\u0026gt; witnessMethodList = new ArrayList\u0026lt;\u0026gt;(); WitnessMethod witnessMethod = new WitnessMethod(\u0026#34;foo.Bar\u0026#34;, ElementMatchers.named(\u0026#34;hello\u0026#34;)); witnessMethodList.add(witnessMethod); return witnessMethodList; } For more examples, see WitnessTest.java\n  Implement an interceptor As an interceptor for an instance method, it has to implement org.apache.skywalking.apm.agent.core.plugin.interceptor.enhance.InstanceMethodsAroundInterceptor\n/** * A interceptor, which intercept method\u0026#39;s invocation. The target methods will be defined in {@link * ClassEnhancePluginDefine}\u0026#39;s subclass, most likely in {@link ClassInstanceMethodsEnhancePluginDefine} */ public interface InstanceMethodsAroundInterceptor { /** * called before target method invocation. * * @param result change this result, if you want to truncate the method. * @throws Throwable */ void beforeMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, MethodInterceptResult result) throws Throwable; /** * called after target method invocation. Even method\u0026#39;s invocation triggers an exception. * * @param ret the method\u0026#39;s original return value. * @return the method\u0026#39;s actual return value. * @throws Throwable */ Object afterMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Object ret) throws Throwable; /** * called when occur exception. * * @param t the exception occur. */ void handleMethodException(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Throwable t); } Use the core APIs before and after calling the method, as well as during exception handling.\nV2 APIs The interceptor of V2 API uses MethodInvocationContext context to replace the MethodInterceptResult result in the beforeMethod, and be added as a new parameter in afterMethod and handleMethodException.\nMethodInvocationContext context is only shared in one time execution, and safe to use when face concurrency execution.\n/** * A v2 interceptor, which intercept method\u0026#39;s invocation. The target methods will be defined in {@link * ClassEnhancePluginDefineV2}\u0026#39;s subclass, most likely in {@link ClassInstanceMethodsEnhancePluginDefine} */ public interface InstanceMethodsAroundInterceptorV2 { /** * called before target method invocation. * * @param context the method invocation context including result context. */ void beforeMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, MethodInvocationContext context) throws Throwable; /** * called after target method invocation. Even method\u0026#39;s invocation triggers an exception. * * @param ret the method\u0026#39;s original return value. May be null if the method triggers an exception. * @return the method\u0026#39;s actual return value. */ Object afterMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Object ret, MethodInvocationContext context) throws Throwable; /** * called when occur exception. * * @param t the exception occur. */ void handleMethodException(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Throwable t, MethodInvocationContext context); } Bootstrap class instrumentation. SkyWalking has packaged the bootstrap instrumentation in the agent core. You can easily implement it by declaring it in the instrumentation definition.\nOverride the public boolean isBootstrapInstrumentation() and return true. Such as\npublic class URLInstrumentation extends ClassEnhancePluginDefine { private static String CLASS_NAME = \u0026#34;java.net.URL\u0026#34;; @Override protected ClassMatch enhanceClass() { return byName(CLASS_NAME); } @Override public ConstructorInterceptPoint[] getConstructorsInterceptPoints() { return new ConstructorInterceptPoint[] { new ConstructorInterceptPoint() { @Override public ElementMatcher\u0026lt;MethodDescription\u0026gt; getConstructorMatcher() { return any(); } @Override public String getConstructorInterceptor() { return \u0026#34;org.apache.skywalking.apm.plugin.jre.httpurlconnection.Interceptor2\u0026#34;; } } }; } @Override public InstanceMethodsInterceptPoint[] getInstanceMethodsInterceptPoints() { return new InstanceMethodsInterceptPoint[0]; } @Override public StaticMethodsInterceptPoint[] getStaticMethodsInterceptPoints() { return new StaticMethodsInterceptPoint[0]; } @Override public boolean isBootstrapInstrumentation() { return true; } } ClassEnhancePluginDefineV2 is provided in v2 APIs, #isBootstrapInstrumentation works too.\nNOTE: Bootstrap instrumentation should be used only where necessary. During its actual execution, it mostly affects the JRE core(rt.jar). Defining it other than where necessary could lead to unexpected results or side effects.\nProvide custom config for the plugin The config could provide different behaviours based on the configurations. The SkyWalking plugin mechanism provides the configuration injection and initialization system in the agent core.\nEvery plugin could declare one or more classes to represent the config by using @PluginConfig annotation. The agent core could initialize this class' static field through System environments, System properties, and agent.config static file.\nThe #root() method in the @PluginConfig annotation requires declaring the root class for the initialization process. Typically, SkyWalking prefers to use nested inner static classes for the hierarchy of the configuration. We recommend using Plugin/plugin-name/config-key as the nested classes structure of the config class.\nNOTE: because of the Java ClassLoader mechanism, the @PluginConfig annotation should be added on the real class used in the interceptor codes.\nIn the following example, @PluginConfig(root = SpringMVCPluginConfig.class) indicates that initialization should start with using SpringMVCPluginConfig as the root. Then, the config key of the attribute USE_QUALIFIED_NAME_AS_ENDPOINT_NAME should be plugin.springmvc.use_qualified_name_as_endpoint_name.\npublic class SpringMVCPluginConfig { public static class Plugin { // NOTE, if move this annotation on the `Plugin` or `SpringMVCPluginConfig` class, it no longer has any effect.  @PluginConfig(root = SpringMVCPluginConfig.class) public static class SpringMVC { /** * If true, the fully qualified method name will be used as the endpoint name instead of the request URL, * default is false. */ public static boolean USE_QUALIFIED_NAME_AS_ENDPOINT_NAME = false; /** * This config item controls that whether the SpringMVC plugin should collect the parameters of the * request. */ public static boolean COLLECT_HTTP_PARAMS = false; } @PluginConfig(root = SpringMVCPluginConfig.class) public static class Http { /** * When either {@link Plugin.SpringMVC#COLLECT_HTTP_PARAMS} is enabled, how many characters to keep and send * to the OAP backend, use negative values to keep and send the complete parameters, NB. this config item is * added for the sake of performance */ public static int HTTP_PARAMS_LENGTH_THRESHOLD = 1024; } } } Meter Plugin Java agent plugin could use meter APIs to collect metrics for backend analysis.\n Counter API represents a single monotonically increasing counter which automatically collects data and reports to the backend. import org.apache.skywalking.apm.agent.core.meter.MeterFactory; Counter counter = MeterFactory.counter(meterName).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).mode(Counter.Mode.INCREMENT).build(); counter.increment(1d);    MeterFactory.counter creates a new counter builder with the meter name. Counter.Builder.tag(String key, String value) marks a tag key/value pair. Counter.Builder.mode(Counter.Mode mode) changes the counter mode. RATE mode means the reporting rate to the backend. Counter.Builder.build() builds a new Counter which is collected and reported to the backend. Counter.increment(double count) increment counts to the Counter. It could be a positive value.   Gauge API represents a single numerical value.  import org.apache.skywalking.apm.agent.core.meter.MeterFactory; ThreadPoolExecutor threadPool = ...; Gauge gauge = MeterFactory.gauge(meterName, () -\u0026gt; threadPool.getActiveCount()).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).build();  MeterFactory.gauge(String name, Supplier\u0026lt;Double\u0026gt; getter) creates a new gauge builder with the meter name and supplier function. This function must return a double value. Gauge.Builder.tag(String key, String value) marks a tag key/value pair. Gauge.Builder.build() builds a new Gauge which is collected and reported to the backend.   Histogram API represents a summary sample observations with customized buckets.  import org.apache.skywalking.apm.agent.core.meter.MeterFactory; Histogram histogram = MeterFactory.histogram(\u0026#34;test\u0026#34;).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).steps(Arrays.asList(1, 5, 10)).minValue(0).build(); histogram.addValue(3);  MeterFactory.histogram(String name) creates a new histogram builder with the meter name. Histogram.Builder.tag(String key, String value) marks a tag key/value pair. Histogram.Builder.steps(List\u0026lt;Double\u0026gt; steps) sets up the max values of every histogram buckets. Histogram.Builder.minValue(double value) sets up the minimal value of this histogram. Default is 0. Histogram.Builder.build() builds a new Histogram which is collected and reported to the backend. Histogram.addValue(double value) adds value into the histogram, and automatically analyzes what bucket count needs to be incremented. Rule: count into [step1, step2).  Plugin Test Tool The Apache SkyWalking Agent Test Tool Suite is an incredibly useful test tool suite that is available in a wide variety of agent languages. It includes the mock collector and validator. The mock collector is a SkyWalking receiver, like the OAP server.\nYou could learn how to use this tool to test the plugin in this doc. This is a must if you want to contribute plugins to the SkyWalking official repo.\nContribute plugins to the Apache SkyWalking repository We welcome everyone to contribute their plugins.\nPlease follow these steps:\n Submit an issue for your plugin, including any supported versions. Create sub modules under apm-sniffer/apm-sdk-plugin or apm-sniffer/optional-plugins, and the name should include supported library name and versions. Follow this guide to develop. Make sure comments and test cases are provided. Develop and test. Provide the automatic test cases. Learn how to write the plugin test case from this doc Send a pull request and ask for review. The plugin committers will approve your plugins, plugin CI-with-IT, e2e, and the plugin tests will be passed. The plugin is accepted by SkyWalking.  ","title":"Plugin Development Guide","url":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/java-plugin-development-guide/"},{"content":"Plugin Development Guide This document describes how to understand, develop and contribute a plugin.\nThere are 2 kinds of plugin:\n Tracing plugin. Follow the distributed tracing concept to collect spans with tags and logs. Meter plugin. Collect numeric metrics in Counter, Gauge, and Histogram formats.  We also provide the plugin test tool to verify the data collected and reported by the plugin. If you plan to contribute any plugin to our main repo, the data would be verified by this tool too.\nTracing plugin Concepts Span The span is an important and recognized concept in the distributed tracing system. Learn about the span from the Google Dapper Paper and OpenTracing\nSkyWalking has supported OpenTracing and OpenTracing-Java API since 2017. Our concepts of the span are similar to that of the Google Dapper Paper and OpenTracing. We have also extended the span.\nThere are three types of span:\n1.1 EntrySpan The EntrySpan represents a service provider. It is also an endpoint on the server end. As an APM system, our target is the application servers. Therefore, almost all the services and MQ-consumers are EntrySpan.\n1.2 LocalSpan The LocalSpan represents a normal Java method that does not concern remote services. It is neither a MQ producer/consumer nor a service (e.g. HTTP service) provider/consumer.\n1.3 ExitSpan The ExitSpan represents a client of service or MQ-producer. It is named the LeafSpan in the early versions of SkyWalking. For example, accessing DB through JDBC and reading Redis/Memcached are classified as an ExitSpan.\nContextCarrier In order to implement distributed tracing, cross-process tracing has to be bound, and the context must propagate across the process. This is where the ContextCarrier comes in.\nHere are the steps on how to use the ContextCarrier in an A-\u0026gt;B distributed call.\n Create a new and empty ContextCarrier on the client end. Create an ExitSpan by ContextManager#createExitSpan or use ContextManager#inject to initalize the ContextCarrier. Place all items of ContextCarrier into heads (e.g. HTTP HEAD), attachments (e.g. Dubbo RPC framework) or messages (e.g. Kafka). The ContextCarrier propagates to the server end through the service call. On the server end, obtain all items from the heads, attachments or messages. Create an EntrySpan by ContextManager#createEntrySpan or use ContextManager#extract to bind the client and server ends.  See the following examples, where we use the Apache HTTPComponent client plugin and Tomcat 7 server plugin:\n Using the Apache HTTPComponent client plugin on the client end  span = ContextManager.createExitSpan(\u0026#34;/span/operation/name\u0026#34;, contextCarrier, \u0026#34;ip:port\u0026#34;); CarrierItem next = contextCarrier.items(); while (next.hasNext()) { next = next.next(); httpRequest.setHeader(next.getHeadKey(), next.getHeadValue()); } Using the Tomcat 7 server plugin on the server end  ContextCarrier contextCarrier = new ContextCarrier(); CarrierItem next = contextCarrier.items(); while (next.hasNext()) { next = next.next(); next.setHeadValue(request.getHeader(next.getHeadKey())); } span = ContextManager.createEntrySpan(“/span/operation/name”, contextCarrier); ContextSnapshot Besides cross-process tracing, cross-thread tracing has to be supported as well. For instance, both async process (in-memory MQ) and batch process are common in Java. Cross-process and cross-thread tracing are very similar in that they both require propagating context, except that cross-thread tracing does not require serialization.\nHere are the three steps on cross-thread propagation:\n Use ContextManager#capture to get the ContextSnapshot object. Let the sub-thread access the ContextSnapshot through method arguments or being carried by existing arguments Use ContextManager#continued in sub-thread.  Core APIs ContextManager ContextManager provides all major and primary APIs.\n Create EntrySpan  public static AbstractSpan createEntrySpan(String endpointName, ContextCarrier carrier) Create EntrySpan according to the operation name (e.g. service name, uri) and ContextCarrier.\nCreate LocalSpan  public static AbstractSpan createLocalSpan(String endpointName) Create LocalSpan according to the operation name (e.g. full method signature).\nCreate ExitSpan  public static AbstractSpan createExitSpan(String endpointName, ContextCarrier carrier, String remotePeer) Create ExitSpan according to the operation name (e.g. service name, uri) and the new ContextCarrier and peer address (e.g. ip+port, hostname+port).\nAbstractSpan /** * Set the component id, which defines in {@link ComponentsDefine} * * @param component * @return the span for chaining. */ AbstractSpan setComponent(Component component); AbstractSpan setLayer(SpanLayer layer); /** * Set a key:value tag on the Span. * * @return this Span instance, for chaining */ AbstractSpan tag(String key, String value); /** * Record an exception event of the current walltime timestamp. * * @param t any subclass of {@link Throwable}, which occurs in this span. * @return the Span, for chaining */ AbstractSpan log(Throwable t); AbstractSpan errorOccurred(); /** * Record an event at a specific timestamp. * * @param timestamp The explicit timestamp for the log record. * @param event the events * @return the Span, for chaining */ AbstractSpan log(long timestamp, Map\u0026lt;String, ?\u0026gt; event); /** * Sets the string name for the logical operation this span represents. * * @return this Span instance, for chaining */ AbstractSpan setOperationName(String endpointName); Besides setting the operation name, tags and logs, two attributes must be set, namely the component and layer. This is especially important for the EntrySpan and ExitSpan.\nSpanLayer is the type of span. There are 5 values:\n UNKNOWN (default) DB RPC_FRAMEWORK (designed for the RPC framework, rather than an ordinary HTTP call) HTTP MQ  Component IDs are defined and reserved by the SkyWalking project. For extension of the component name/ID, please follow the OAP server Component library settings document.\nSpecial Span Tags All tags are available in the trace view. Meanwhile, in the OAP backend analysis, some special tags or tag combinations provide other advanced features.\nTag key http.status_code The value should be an integer. The response code of OAL entities corresponds to this value.\nTag keys db.statement and db.type. The value of db.statement should be a string that represents the database statement, such as SQL, or [No statement]/+span#operationName if the value is empty. When the exit span contains this tag, OAP samples the slow statements based on agent-analyzer/default/maxSlowSQLLength. The threshold of slow statement is defined in accordance with agent-analyzer/default/slowDBAccessThreshold. Check Slow Database Statement document of OAP server for details.\nExtension logic endpoint: Tag key x-le The logic endpoint is a concept that doesn\u0026rsquo;t represent a real RPC call, but requires the statistic. The value of x-le should be in JSON format. There are two options:\n Define a separated logic endpoint. Provide its own endpoint name, latency and status. Suitable for entry and local span.  { \u0026#34;name\u0026#34;: \u0026#34;GraphQL-service\u0026#34;, \u0026#34;latency\u0026#34;: 100, \u0026#34;status\u0026#34;: true } Declare the current local span representing a logic endpoint.  { \u0026#34;logic-span\u0026#34;: true } Virtual Database Relative Tags SkyWalking analysis Database(SQL-like) performance metrics through the following tags.\npublic static final StringTag DB_TYPE = new StringTag(3, \u0026#34;db.type\u0026#34;); public static final StringTag DB_STATEMENT = new StringTag(5, \u0026#34;db.statement\u0026#34;);  db.type records database type, such as sql, cassandra, Elasticsearch. db.statementrecords the sql statement of the database access.  Read backend\u0026rsquo;s virtual database doc for more details.\nVirtual Cache Relative Tags SkyWalking analysis cache performance related metrics through the following tags.\npublic static final StringTag CACHE_TYPE = new StringTag(15, \u0026#34;cache.type\u0026#34;); public static final StringTag CACHE_CMD = new StringTag(17, \u0026#34;cache.cmd\u0026#34;); public static final StringTag CACHE_OP = new StringTag(16, \u0026#34;cache.op\u0026#34;); public static final StringTag CACHE_KEY = new StringTag(18, \u0026#34;cache.key\u0026#34;);  cache.type indicates the cache type , usually it\u0026rsquo;s official name of cache (e.g. Redis) cache.cmd indicates the cache command that would be sent to cache server (e.g. setnx) cache.op indicates the command is used for write or read operation , usually the value is converting from command cache.key indicates the cache key that would be sent to cache server , this tag maybe null , as string type key would be collected usually.  In order to decide which op should be converted to flexibly , It\u0026rsquo;s better that providing config property . Reference Jedis-4.x-plugin\nVirtual Message Queue (MQ) Relative Tags SkyWalking analysis MQ performance related metrics through the following tags.\npublic static final StringTag MQ_QUEUE = new StringTag(7, \u0026#34;mq.queue\u0026#34;); public static final StringTag MQ_TOPIC = new StringTag(9, \u0026#34;mq.topic\u0026#34;); public static final StringTag TRANSMISSION_LATENCY = new StringTag(15, \u0026#34;transmission.latency\u0026#34;, false);  mq.queue indicates MQ queue name mq.topic indicates MQ topic name , It\u0026rsquo;s optional as some MQ don\u0026rsquo;t hava concept of topic transmission.latency The transmission latency from consumer to producer. Usually you needn\u0026rsquo;t to record this tag manually, instead to call contextCarrier.extensionInjector().injectSendingTimestamp(); to record tag sendingTimestamp on producer side , and SkyWalking would record this tag on consumer side if sw8-x context carrier(from producer side) contains sendingTimestamp  Notice , you should set peer at both sides(producer and consumer). And the value of peer should represent the MQ server cluster.\nAdvanced APIs Async Span APIs There is a set of advanced APIs in Span which is specifically designed for async use cases. When tags, logs, and attributes (including end time) of the span need to be set in another thread, you should use these APIs.\n/** * The span finish at current tracing context, but the current span is still alive, until {@link #asyncFinish} * called. * * This method must be called\u0026lt;br/\u0026gt; * 1. In original thread(tracing context). * 2. Current span is active span. * * During alive, tags, logs and attributes of the span could be changed, in any thread. * * The execution times of {@link #prepareForAsync} and {@link #asyncFinish()} must match. * * @return the current span */ AbstractSpan prepareForAsync(); /** * Notify the span, it could be finished. * * The execution times of {@link #prepareForAsync} and {@link #asyncFinish()} must match. * * @return the current span */ AbstractSpan asyncFinish();  Call #prepareForAsync in the original context. Run ContextManager#stopSpan in the original context when your job in the current thread is complete. Propagate the span to any other thread. Once the above steps are all set, call #asyncFinish in any thread. When #prepareForAsync is complete for all spans, the tracing context will be finished and will report to the backend (based on the count of API execution).  Develop a plugin Abstract The basic method to trace is to intercept a Java method, by using byte code manipulation tech and AOP concept. SkyWalking has packaged the byte code manipulation tech and tracing context propagation, so you simply have to define the intercept point (a.k.a. aspect pointcut in Spring).\nIntercept SkyWalking provides two common definitions to intercept constructor, instance method and class method.\nv1 APIs  Extend ClassInstanceMethodsEnhancePluginDefine to define constructor intercept points and instance method intercept points. Extend ClassStaticMethodsEnhancePluginDefine to define class method intercept points.  Of course, you can extend ClassEnhancePluginDefine to set all intercept points, although it is uncommon to do so.\nv2 APIs v2 APIs provide an enhanced interceptor, which could propagate context through MIC(MethodInvocationContext).\n Extend ClassInstanceMethodsEnhancePluginDefineV2 to define constructor intercept points and instance method intercept points. Extend ClassStaticMethodsEnhancePluginDefineV2 to define class method intercept points.  Of course, you can extend ClassEnhancePluginDefineV2 to set all intercept points, although it is uncommon to do so.\nImplement plugin See the following demonstration on how to implement a plugin by extending ClassInstanceMethodsEnhancePluginDefine.\n Define the target class name.  protected abstract ClassMatch enhanceClass(); ClassMatch represents how to match the target classes. There are 4 ways:\n byName: Based on the full class names (package name + . + class name). byClassAnnotationMatch: Depends on whether there are certain annotations in the target classes. byMethodAnnotationMatch: Depends on whether there are certain annotations in the methods of the target classes. byHierarchyMatch: Based on the parent classes or interfaces of the target classes.  Attention:\n Never use ThirdPartyClass.class in the instrumentation definitions, such as takesArguments(ThirdPartyClass.class), or byName(ThirdPartyClass.class.getName()), because of the fact that ThirdPartyClass dose not necessarily exist in the target application and this will break the agent; we have import checks to assist in checking this in CI, but it doesn\u0026rsquo;t cover all scenarios of this limitation, so never try to work around this limitation by something like using full-qualified-class-name (FQCN), i.e. takesArguments(full.qualified.ThirdPartyClass.class) and byName(full.qualified.ThirdPartyClass.class.getName()) will pass the CI check, but are still invalid in the agent codes. Therefore, Use Full Qualified Class Name String Literature Instead. Even if you are perfectly sure that the class to be intercepted exists in the target application (such as JDK classes), still, do not use *.class.getName() to get the class String name. We recommend you to use a literal string. This is to avoid ClassLoader issues. by*AnnotationMatch does not support inherited annotations. We do not recommend using byHierarchyMatch unless necessary. Using it may trigger the interception of many unexcepted methods, which would cause performance issues.  Example:\n@Override protected ClassMatch enhanceClassName() { return byName(\u0026#34;org.apache.catalina.core.StandardEngineValve\u0026#34;); } Define an instance method intercept point.  public InstanceMethodsInterceptPoint[] getInstanceMethodsInterceptPoints(); public interface InstanceMethodsInterceptPoint { /** * class instance methods matcher. * * @return methods matcher */ ElementMatcher\u0026lt;MethodDescription\u0026gt; getMethodsMatcher(); /** * @return represents a class name, the class instance must instanceof InstanceMethodsAroundInterceptor. */ String getMethodsInterceptor(); boolean isOverrideArgs(); } You may also use Matcher to set the target methods. Return true in isOverrideArgs, if you want to change the argument ref in interceptor. Please refer to bytebuddy for details of defining ElementMatcher.\nIn Skywalking, we provide 3 classes to facilitate ElementMatcher definition:\n AnnotationTypeNameMatch: Check on whether there is a certain annotation in the target method. ReturnTypeNameMatch: Check the return type name (package name + . + class name) of the target method. ArgumentTypeNameMatch: Check on the argument index and the type name (package name + . + class name) of the target method.  Attention:\n In case of using ReturnTypeNameMatch and ArgumentTypeNameMatch, use [Lxxx; (Java file format defined in JVM Specification) to define an Array type. For example, you should write [Ljava.lang.String; for java.lang.String[].  The following sections will tell you how to implement the interceptor.\nAdd plugin definition into the skywalking-plugin.def file.  tomcat-7.x/8.x=TomcatInstrumentation  Set up witnessClasses and/or witnessMethods if the instrumentation has to be activated in specific versions.\nExample:\n// The plugin is activated only when the foo.Bar class exists. @Override protected String[] witnessClasses() { return new String[] { \u0026#34;foo.Bar\u0026#34; }; } // The plugin is activated only when the foo.Bar#hello method exists. @Override protected List\u0026lt;WitnessMethod\u0026gt; witnessMethods() { List\u0026lt;WitnessMethod\u0026gt; witnessMethodList = new ArrayList\u0026lt;\u0026gt;(); WitnessMethod witnessMethod = new WitnessMethod(\u0026#34;foo.Bar\u0026#34;, ElementMatchers.named(\u0026#34;hello\u0026#34;)); witnessMethodList.add(witnessMethod); return witnessMethodList; } For more examples, see WitnessTest.java\n  Implement an interceptor As an interceptor for an instance method, it has to implement org.apache.skywalking.apm.agent.core.plugin.interceptor.enhance.InstanceMethodsAroundInterceptor\n/** * A interceptor, which intercept method\u0026#39;s invocation. The target methods will be defined in {@link * ClassEnhancePluginDefine}\u0026#39;s subclass, most likely in {@link ClassInstanceMethodsEnhancePluginDefine} */ public interface InstanceMethodsAroundInterceptor { /** * called before target method invocation. * * @param result change this result, if you want to truncate the method. * @throws Throwable */ void beforeMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, MethodInterceptResult result) throws Throwable; /** * called after target method invocation. Even method\u0026#39;s invocation triggers an exception. * * @param ret the method\u0026#39;s original return value. * @return the method\u0026#39;s actual return value. * @throws Throwable */ Object afterMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Object ret) throws Throwable; /** * called when occur exception. * * @param t the exception occur. */ void handleMethodException(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Throwable t); } Use the core APIs before and after calling the method, as well as during exception handling.\nV2 APIs The interceptor of V2 API uses MethodInvocationContext context to replace the MethodInterceptResult result in the beforeMethod, and be added as a new parameter in afterMethod and handleMethodException.\nMethodInvocationContext context is only shared in one time execution, and safe to use when face concurrency execution.\n/** * A v2 interceptor, which intercept method\u0026#39;s invocation. The target methods will be defined in {@link * ClassEnhancePluginDefineV2}\u0026#39;s subclass, most likely in {@link ClassInstanceMethodsEnhancePluginDefine} */ public interface InstanceMethodsAroundInterceptorV2 { /** * called before target method invocation. * * @param context the method invocation context including result context. */ void beforeMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, MethodInvocationContext context) throws Throwable; /** * called after target method invocation. Even method\u0026#39;s invocation triggers an exception. * * @param ret the method\u0026#39;s original return value. May be null if the method triggers an exception. * @return the method\u0026#39;s actual return value. */ Object afterMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Object ret, MethodInvocationContext context) throws Throwable; /** * called when occur exception. * * @param t the exception occur. */ void handleMethodException(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Throwable t, MethodInvocationContext context); } Bootstrap class instrumentation. SkyWalking has packaged the bootstrap instrumentation in the agent core. You can easily implement it by declaring it in the instrumentation definition.\nOverride the public boolean isBootstrapInstrumentation() and return true. Such as\npublic class URLInstrumentation extends ClassEnhancePluginDefine { private static String CLASS_NAME = \u0026#34;java.net.URL\u0026#34;; @Override protected ClassMatch enhanceClass() { return byName(CLASS_NAME); } @Override public ConstructorInterceptPoint[] getConstructorsInterceptPoints() { return new ConstructorInterceptPoint[] { new ConstructorInterceptPoint() { @Override public ElementMatcher\u0026lt;MethodDescription\u0026gt; getConstructorMatcher() { return any(); } @Override public String getConstructorInterceptor() { return \u0026#34;org.apache.skywalking.apm.plugin.jre.httpurlconnection.Interceptor2\u0026#34;; } } }; } @Override public InstanceMethodsInterceptPoint[] getInstanceMethodsInterceptPoints() { return new InstanceMethodsInterceptPoint[0]; } @Override public StaticMethodsInterceptPoint[] getStaticMethodsInterceptPoints() { return new StaticMethodsInterceptPoint[0]; } @Override public boolean isBootstrapInstrumentation() { return true; } } ClassEnhancePluginDefineV2 is provided in v2 APIs, #isBootstrapInstrumentation works too.\nNOTE: Bootstrap instrumentation should be used only where necessary. During its actual execution, it mostly affects the JRE core(rt.jar). Defining it other than where necessary could lead to unexpected results or side effects.\nProvide custom config for the plugin The config could provide different behaviours based on the configurations. The SkyWalking plugin mechanism provides the configuration injection and initialization system in the agent core.\nEvery plugin could declare one or more classes to represent the config by using @PluginConfig annotation. The agent core could initialize this class' static field through System environments, System properties, and agent.config static file.\nThe #root() method in the @PluginConfig annotation requires declaring the root class for the initialization process. Typically, SkyWalking prefers to use nested inner static classes for the hierarchy of the configuration. We recommend using Plugin/plugin-name/config-key as the nested classes structure of the config class.\nNOTE: because of the Java ClassLoader mechanism, the @PluginConfig annotation should be added on the real class used in the interceptor codes.\nIn the following example, @PluginConfig(root = SpringMVCPluginConfig.class) indicates that initialization should start with using SpringMVCPluginConfig as the root. Then, the config key of the attribute USE_QUALIFIED_NAME_AS_ENDPOINT_NAME should be plugin.springmvc.use_qualified_name_as_endpoint_name.\npublic class SpringMVCPluginConfig { public static class Plugin { // NOTE, if move this annotation on the `Plugin` or `SpringMVCPluginConfig` class, it no longer has any effect.  @PluginConfig(root = SpringMVCPluginConfig.class) public static class SpringMVC { /** * If true, the fully qualified method name will be used as the endpoint name instead of the request URL, * default is false. */ public static boolean USE_QUALIFIED_NAME_AS_ENDPOINT_NAME = false; /** * This config item controls that whether the SpringMVC plugin should collect the parameters of the * request. */ public static boolean COLLECT_HTTP_PARAMS = false; } @PluginConfig(root = SpringMVCPluginConfig.class) public static class Http { /** * When either {@link Plugin.SpringMVC#COLLECT_HTTP_PARAMS} is enabled, how many characters to keep and send * to the OAP backend, use negative values to keep and send the complete parameters, NB. this config item is * added for the sake of performance */ public static int HTTP_PARAMS_LENGTH_THRESHOLD = 1024; } } } Meter Plugin Java agent plugin could use meter APIs to collect metrics for backend analysis.\n Counter API represents a single monotonically increasing counter which automatically collects data and reports to the backend. import org.apache.skywalking.apm.agent.core.meter.MeterFactory; Counter counter = MeterFactory.counter(meterName).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).mode(Counter.Mode.INCREMENT).build(); counter.increment(1d);    MeterFactory.counter creates a new counter builder with the meter name. Counter.Builder.tag(String key, String value) marks a tag key/value pair. Counter.Builder.mode(Counter.Mode mode) changes the counter mode. RATE mode means the reporting rate to the backend. Counter.Builder.build() builds a new Counter which is collected and reported to the backend. Counter.increment(double count) increment counts to the Counter. It could be a positive value.   Gauge API represents a single numerical value.  import org.apache.skywalking.apm.agent.core.meter.MeterFactory; ThreadPoolExecutor threadPool = ...; Gauge gauge = MeterFactory.gauge(meterName, () -\u0026gt; threadPool.getActiveCount()).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).build();  MeterFactory.gauge(String name, Supplier\u0026lt;Double\u0026gt; getter) creates a new gauge builder with the meter name and supplier function. This function must return a double value. Gauge.Builder.tag(String key, String value) marks a tag key/value pair. Gauge.Builder.build() builds a new Gauge which is collected and reported to the backend.   Histogram API represents a summary sample observations with customized buckets.  import org.apache.skywalking.apm.agent.core.meter.MeterFactory; Histogram histogram = MeterFactory.histogram(\u0026#34;test\u0026#34;).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).steps(Arrays.asList(1, 5, 10)).minValue(0).build(); histogram.addValue(3);  MeterFactory.histogram(String name) creates a new histogram builder with the meter name. Histogram.Builder.tag(String key, String value) marks a tag key/value pair. Histogram.Builder.steps(List\u0026lt;Double\u0026gt; steps) sets up the max values of every histogram buckets. Histogram.Builder.minValue(double value) sets up the minimal value of this histogram. Default is 0. Histogram.Builder.build() builds a new Histogram which is collected and reported to the backend. Histogram.addValue(double value) adds value into the histogram, and automatically analyzes what bucket count needs to be incremented. Rule: count into [step1, step2).  Plugin Test Tool The Apache SkyWalking Agent Test Tool Suite is an incredibly useful test tool suite that is available in a wide variety of agent languages. It includes the mock collector and validator. The mock collector is a SkyWalking receiver, like the OAP server.\nYou could learn how to use this tool to test the plugin in this doc. This is a must if you want to contribute plugins to the SkyWalking official repo.\nContribute plugins to the Apache SkyWalking repository We welcome everyone to contribute their plugins.\nPlease follow these steps:\n Submit an issue for your plugin, including any supported versions. Create sub modules under apm-sniffer/apm-sdk-plugin or apm-sniffer/optional-plugins, and the name should include supported library name and versions. Follow this guide to develop. Make sure comments and test cases are provided. Develop and test. Provide the automatic test cases. Learn how to write the plugin test case from this doc Send a pull request and ask for review. The plugin committers will approve your plugins, plugin CI-with-IT, e2e, and the plugin tests will be passed. The plugin is accepted by SkyWalking.  ","title":"Plugin Development Guide","url":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/java-plugin-development-guide/"},{"content":"Plugin Development Guide This document describes how to understand, develop and contribute a plugin.\nThere are 2 kinds of plugin:\n Tracing plugin. Follow the distributed tracing concept to collect spans with tags and logs. Meter plugin. Collect numeric metrics in Counter, Gauge, and Histogram formats.  We also provide the plugin test tool to verify the data collected and reported by the plugin. If you plan to contribute any plugin to our main repo, the data would be verified by this tool too.\nTracing plugin Concepts Span The span is an important and recognized concept in the distributed tracing system. Learn about the span from the Google Dapper Paper and OpenTracing\nSkyWalking has supported OpenTracing and OpenTracing-Java API since 2017. Our concepts of the span are similar to that of the Google Dapper Paper and OpenTracing. We have also extended the span.\nThere are three types of span:\n1.1 EntrySpan The EntrySpan represents a service provider. It is also an endpoint on the server end. As an APM system, our target is the application servers. Therefore, almost all the services and MQ-consumers are EntrySpan.\n1.2 LocalSpan The LocalSpan represents a normal Java method that does not concern remote services. It is neither a MQ producer/consumer nor a service (e.g. HTTP service) provider/consumer.\n1.3 ExitSpan The ExitSpan represents a client of service or MQ-producer. It is named the LeafSpan in the early versions of SkyWalking. For example, accessing DB through JDBC and reading Redis/Memcached are classified as an ExitSpan.\nContextCarrier In order to implement distributed tracing, cross-process tracing has to be bound, and the context must propagate across the process. This is where the ContextCarrier comes in.\nHere are the steps on how to use the ContextCarrier in an A-\u0026gt;B distributed call.\n Create a new and empty ContextCarrier on the client end. Create an ExitSpan by ContextManager#createExitSpan or use ContextManager#inject to initalize the ContextCarrier. Place all items of ContextCarrier into heads (e.g. HTTP HEAD), attachments (e.g. Dubbo RPC framework) or messages (e.g. Kafka). The ContextCarrier propagates to the server end through the service call. On the server end, obtain all items from the heads, attachments or messages. Create an EntrySpan by ContextManager#createEntrySpan or use ContextManager#extract to bind the client and server ends.  See the following examples, where we use the Apache HTTPComponent client plugin and Tomcat 7 server plugin:\n Using the Apache HTTPComponent client plugin on the client end  span = ContextManager.createExitSpan(\u0026#34;/span/operation/name\u0026#34;, contextCarrier, \u0026#34;ip:port\u0026#34;); CarrierItem next = contextCarrier.items(); while (next.hasNext()) { next = next.next(); httpRequest.setHeader(next.getHeadKey(), next.getHeadValue()); } Using the Tomcat 7 server plugin on the server end  ContextCarrier contextCarrier = new ContextCarrier(); CarrierItem next = contextCarrier.items(); while (next.hasNext()) { next = next.next(); next.setHeadValue(request.getHeader(next.getHeadKey())); } span = ContextManager.createEntrySpan(“/span/operation/name”, contextCarrier); ContextSnapshot Besides cross-process tracing, cross-thread tracing has to be supported as well. For instance, both async process (in-memory MQ) and batch process are common in Java. Cross-process and cross-thread tracing are very similar in that they both require propagating context, except that cross-thread tracing does not require serialization.\nHere are the three steps on cross-thread propagation:\n Use ContextManager#capture to get the ContextSnapshot object. Let the sub-thread access the ContextSnapshot through method arguments or being carried by existing arguments Use ContextManager#continued in sub-thread.  Core APIs ContextManager ContextManager provides all major and primary APIs.\n Create EntrySpan  public static AbstractSpan createEntrySpan(String endpointName, ContextCarrier carrier) Create EntrySpan according to the operation name (e.g. service name, uri) and ContextCarrier.\nCreate LocalSpan  public static AbstractSpan createLocalSpan(String endpointName) Create LocalSpan according to the operation name (e.g. full method signature).\nCreate ExitSpan  public static AbstractSpan createExitSpan(String endpointName, ContextCarrier carrier, String remotePeer) Create ExitSpan according to the operation name (e.g. service name, uri) and the new ContextCarrier and peer address (e.g. ip+port, hostname+port).\nAbstractSpan /** * Set the component id, which defines in {@link ComponentsDefine} * * @param component * @return the span for chaining. */ AbstractSpan setComponent(Component component); AbstractSpan setLayer(SpanLayer layer); /** * Set a key:value tag on the Span. * * @return this Span instance, for chaining */ AbstractSpan tag(String key, String value); /** * Record an exception event of the current walltime timestamp. * * @param t any subclass of {@link Throwable}, which occurs in this span. * @return the Span, for chaining */ AbstractSpan log(Throwable t); AbstractSpan errorOccurred(); /** * Record an event at a specific timestamp. * * @param timestamp The explicit timestamp for the log record. * @param event the events * @return the Span, for chaining */ AbstractSpan log(long timestamp, Map\u0026lt;String, ?\u0026gt; event); /** * Sets the string name for the logical operation this span represents. * * @return this Span instance, for chaining */ AbstractSpan setOperationName(String endpointName); Besides setting the operation name, tags and logs, two attributes must be set, namely the component and layer. This is especially important for the EntrySpan and ExitSpan.\nSpanLayer is the type of span. There are 5 values:\n UNKNOWN (default) DB RPC_FRAMEWORK (designed for the RPC framework, rather than an ordinary HTTP call) HTTP MQ  Component IDs are defined and reserved by the SkyWalking project. For extension of the component name/ID, please follow the OAP server Component library settings document.\nSpecial Span Tags All tags are available in the trace view. Meanwhile, in the OAP backend analysis, some special tags or tag combinations provide other advanced features.\nTag key http.status_code The value should be an integer. The response code of OAL entities corresponds to this value.\nTag keys db.statement and db.type. The value of db.statement should be a string that represents the database statement, such as SQL, or [No statement]/+span#operationName if the value is empty. When the exit span contains this tag, OAP samples the slow statements based on agent-analyzer/default/maxSlowSQLLength. The threshold of slow statement is defined in accordance with agent-analyzer/default/slowDBAccessThreshold. Check Slow Database Statement document of OAP server for details.\nExtension logic endpoint: Tag key x-le The logic endpoint is a concept that doesn\u0026rsquo;t represent a real RPC call, but requires the statistic. The value of x-le should be in JSON format. There are two options:\n Define a separated logic endpoint. Provide its own endpoint name, latency and status. Suitable for entry and local span.  { \u0026#34;name\u0026#34;: \u0026#34;GraphQL-service\u0026#34;, \u0026#34;latency\u0026#34;: 100, \u0026#34;status\u0026#34;: true } Declare the current local span representing a logic endpoint.  { \u0026#34;logic-span\u0026#34;: true } Virtual Database Relative Tags SkyWalking analysis Database(SQL-like) performance metrics through the following tags.\npublic static final StringTag DB_TYPE = new StringTag(3, \u0026#34;db.type\u0026#34;); public static final StringTag DB_STATEMENT = new StringTag(5, \u0026#34;db.statement\u0026#34;);  db.type records database type, such as sql, cassandra, Elasticsearch. db.statementrecords the sql statement of the database access.  Read backend\u0026rsquo;s virtual database doc for more details.\nVirtual Cache Relative Tags SkyWalking analysis cache performance related metrics through the following tags.\npublic static final StringTag CACHE_TYPE = new StringTag(15, \u0026#34;cache.type\u0026#34;); public static final StringTag CACHE_CMD = new StringTag(17, \u0026#34;cache.cmd\u0026#34;); public static final StringTag CACHE_OP = new StringTag(16, \u0026#34;cache.op\u0026#34;); public static final StringTag CACHE_KEY = new StringTag(18, \u0026#34;cache.key\u0026#34;);  cache.type indicates the cache type , usually it\u0026rsquo;s official name of cache (e.g. Redis) cache.cmd indicates the cache command that would be sent to cache server (e.g. setnx) cache.op indicates the command is used for write or read operation , usually the value is converting from command cache.key indicates the cache key that would be sent to cache server , this tag maybe null , as string type key would be collected usually.  In order to decide which op should be converted to flexibly , It\u0026rsquo;s better that providing config property . Reference Jedis-4.x-plugin\nVirtual Message Queue (MQ) Relative Tags SkyWalking analysis MQ performance related metrics through the following tags.\npublic static final StringTag MQ_QUEUE = new StringTag(7, \u0026#34;mq.queue\u0026#34;); public static final StringTag MQ_TOPIC = new StringTag(9, \u0026#34;mq.topic\u0026#34;); public static final StringTag TRANSMISSION_LATENCY = new StringTag(15, \u0026#34;transmission.latency\u0026#34;, false);  mq.queue indicates MQ queue name mq.topic indicates MQ topic name , It\u0026rsquo;s optional as some MQ don\u0026rsquo;t hava concept of topic transmission.latency The transmission latency from consumer to producer. Usually you needn\u0026rsquo;t to record this tag manually, instead to call contextCarrier.extensionInjector().injectSendingTimestamp(); to record tag sendingTimestamp on producer side , and SkyWalking would record this tag on consumer side if sw8-x context carrier(from producer side) contains sendingTimestamp  Notice , you should set peer at both sides(producer and consumer). And the value of peer should represent the MQ server cluster.\nAdvanced APIs Async Span APIs There is a set of advanced APIs in Span which is specifically designed for async use cases. When tags, logs, and attributes (including end time) of the span need to be set in another thread, you should use these APIs.\n/** * The span finish at current tracing context, but the current span is still alive, until {@link #asyncFinish} * called. * * This method must be called\u0026lt;br/\u0026gt; * 1. In original thread(tracing context). * 2. Current span is active span. * * During alive, tags, logs and attributes of the span could be changed, in any thread. * * The execution times of {@link #prepareForAsync} and {@link #asyncFinish()} must match. * * @return the current span */ AbstractSpan prepareForAsync(); /** * Notify the span, it could be finished. * * The execution times of {@link #prepareForAsync} and {@link #asyncFinish()} must match. * * @return the current span */ AbstractSpan asyncFinish();  Call #prepareForAsync in the original context. Run ContextManager#stopSpan in the original context when your job in the current thread is complete. Propagate the span to any other thread. Once the above steps are all set, call #asyncFinish in any thread. When #prepareForAsync is complete for all spans, the tracing context will be finished and will report to the backend (based on the count of API execution).  Develop a plugin Abstract The basic method to trace is to intercept a Java method, by using byte code manipulation tech and AOP concept. SkyWalking has packaged the byte code manipulation tech and tracing context propagation, so you simply have to define the intercept point (a.k.a. aspect pointcut in Spring).\nIntercept SkyWalking provides two common definitions to intercept constructor, instance method and class method.\nv1 APIs  Extend ClassInstanceMethodsEnhancePluginDefine to define constructor intercept points and instance method intercept points. Extend ClassStaticMethodsEnhancePluginDefine to define class method intercept points.  Of course, you can extend ClassEnhancePluginDefine to set all intercept points, although it is uncommon to do so.\nv2 APIs v2 APIs provide an enhanced interceptor, which could propagate context through MIC(MethodInvocationContext).\n Extend ClassInstanceMethodsEnhancePluginDefineV2 to define constructor intercept points and instance method intercept points. Extend ClassStaticMethodsEnhancePluginDefineV2 to define class method intercept points.  Of course, you can extend ClassEnhancePluginDefineV2 to set all intercept points, although it is uncommon to do so.\nImplement plugin See the following demonstration on how to implement a plugin by extending ClassInstanceMethodsEnhancePluginDefine.\n Define the target class name.  protected abstract ClassMatch enhanceClass(); ClassMatch represents how to match the target classes. There are 4 ways:\n byName: Based on the full class names (package name + . + class name). byClassAnnotationMatch: Depends on whether there are certain annotations in the target classes. byMethodAnnotationMatch: Depends on whether there are certain annotations in the methods of the target classes. byHierarchyMatch: Based on the parent classes or interfaces of the target classes.  Attention:\n Never use ThirdPartyClass.class in the instrumentation definitions, such as takesArguments(ThirdPartyClass.class), or byName(ThirdPartyClass.class.getName()), because of the fact that ThirdPartyClass dose not necessarily exist in the target application and this will break the agent; we have import checks to assist in checking this in CI, but it doesn\u0026rsquo;t cover all scenarios of this limitation, so never try to work around this limitation by something like using full-qualified-class-name (FQCN), i.e. takesArguments(full.qualified.ThirdPartyClass.class) and byName(full.qualified.ThirdPartyClass.class.getName()) will pass the CI check, but are still invalid in the agent codes. Therefore, Use Full Qualified Class Name String Literature Instead. Even if you are perfectly sure that the class to be intercepted exists in the target application (such as JDK classes), still, do not use *.class.getName() to get the class String name. We recommend you to use a literal string. This is to avoid ClassLoader issues. by*AnnotationMatch does not support inherited annotations. We do not recommend using byHierarchyMatch unless necessary. Using it may trigger the interception of many unexcepted methods, which would cause performance issues.  Example:\n@Override protected ClassMatch enhanceClassName() { return byName(\u0026#34;org.apache.catalina.core.StandardEngineValve\u0026#34;); } Define an instance method intercept point.  public InstanceMethodsInterceptPoint[] getInstanceMethodsInterceptPoints(); public interface InstanceMethodsInterceptPoint { /** * class instance methods matcher. * * @return methods matcher */ ElementMatcher\u0026lt;MethodDescription\u0026gt; getMethodsMatcher(); /** * @return represents a class name, the class instance must instanceof InstanceMethodsAroundInterceptor. */ String getMethodsInterceptor(); boolean isOverrideArgs(); } You may also use Matcher to set the target methods. Return true in isOverrideArgs, if you want to change the argument ref in interceptor. Please refer to bytebuddy for details of defining ElementMatcher.\nIn Skywalking, we provide 3 classes to facilitate ElementMatcher definition:\n AnnotationTypeNameMatch: Check on whether there is a certain annotation in the target method. ReturnTypeNameMatch: Check the return type name (package name + . + class name) of the target method. ArgumentTypeNameMatch: Check on the argument index and the type name (package name + . + class name) of the target method.  Attention:\n In case of using ReturnTypeNameMatch and ArgumentTypeNameMatch, use [Lxxx; (Java file format defined in JVM Specification) to define an Array type. For example, you should write [Ljava.lang.String; for java.lang.String[].  The following sections will tell you how to implement the interceptor.\nAdd plugin definition into the skywalking-plugin.def file.  tomcat-7.x/8.x=TomcatInstrumentation  Set up witnessClasses and/or witnessMethods if the instrumentation has to be activated in specific versions.\nExample:\n// The plugin is activated only when the foo.Bar class exists. @Override protected String[] witnessClasses() { return new String[] { \u0026#34;foo.Bar\u0026#34; }; } // The plugin is activated only when the foo.Bar#hello method exists. @Override protected List\u0026lt;WitnessMethod\u0026gt; witnessMethods() { List\u0026lt;WitnessMethod\u0026gt; witnessMethodList = new ArrayList\u0026lt;\u0026gt;(); WitnessMethod witnessMethod = new WitnessMethod(\u0026#34;foo.Bar\u0026#34;, ElementMatchers.named(\u0026#34;hello\u0026#34;)); witnessMethodList.add(witnessMethod); return witnessMethodList; } For more examples, see WitnessTest.java\n  Implement an interceptor As an interceptor for an instance method, it has to implement org.apache.skywalking.apm.agent.core.plugin.interceptor.enhance.InstanceMethodsAroundInterceptor\n/** * A interceptor, which intercept method\u0026#39;s invocation. The target methods will be defined in {@link * ClassEnhancePluginDefine}\u0026#39;s subclass, most likely in {@link ClassInstanceMethodsEnhancePluginDefine} */ public interface InstanceMethodsAroundInterceptor { /** * called before target method invocation. * * @param result change this result, if you want to truncate the method. * @throws Throwable */ void beforeMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, MethodInterceptResult result) throws Throwable; /** * called after target method invocation. Even method\u0026#39;s invocation triggers an exception. * * @param ret the method\u0026#39;s original return value. * @return the method\u0026#39;s actual return value. * @throws Throwable */ Object afterMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Object ret) throws Throwable; /** * called when occur exception. * * @param t the exception occur. */ void handleMethodException(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Throwable t); } Use the core APIs before and after calling the method, as well as during exception handling.\nV2 APIs The interceptor of V2 API uses MethodInvocationContext context to replace the MethodInterceptResult result in the beforeMethod, and be added as a new parameter in afterMethod and handleMethodException.\nMethodInvocationContext context is only shared in one time execution, and safe to use when face concurrency execution.\n/** * A v2 interceptor, which intercept method\u0026#39;s invocation. The target methods will be defined in {@link * ClassEnhancePluginDefineV2}\u0026#39;s subclass, most likely in {@link ClassInstanceMethodsEnhancePluginDefine} */ public interface InstanceMethodsAroundInterceptorV2 { /** * called before target method invocation. * * @param context the method invocation context including result context. */ void beforeMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, MethodInvocationContext context) throws Throwable; /** * called after target method invocation. Even method\u0026#39;s invocation triggers an exception. * * @param ret the method\u0026#39;s original return value. May be null if the method triggers an exception. * @return the method\u0026#39;s actual return value. */ Object afterMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Object ret, MethodInvocationContext context) throws Throwable; /** * called when occur exception. * * @param t the exception occur. */ void handleMethodException(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Throwable t, MethodInvocationContext context); } Bootstrap class instrumentation. SkyWalking has packaged the bootstrap instrumentation in the agent core. You can easily implement it by declaring it in the instrumentation definition.\nOverride the public boolean isBootstrapInstrumentation() and return true. Such as\npublic class URLInstrumentation extends ClassEnhancePluginDefine { private static String CLASS_NAME = \u0026#34;java.net.URL\u0026#34;; @Override protected ClassMatch enhanceClass() { return byName(CLASS_NAME); } @Override public ConstructorInterceptPoint[] getConstructorsInterceptPoints() { return new ConstructorInterceptPoint[] { new ConstructorInterceptPoint() { @Override public ElementMatcher\u0026lt;MethodDescription\u0026gt; getConstructorMatcher() { return any(); } @Override public String getConstructorInterceptor() { return \u0026#34;org.apache.skywalking.apm.plugin.jre.httpurlconnection.Interceptor2\u0026#34;; } } }; } @Override public InstanceMethodsInterceptPoint[] getInstanceMethodsInterceptPoints() { return new InstanceMethodsInterceptPoint[0]; } @Override public StaticMethodsInterceptPoint[] getStaticMethodsInterceptPoints() { return new StaticMethodsInterceptPoint[0]; } @Override public boolean isBootstrapInstrumentation() { return true; } } ClassEnhancePluginDefineV2 is provided in v2 APIs, #isBootstrapInstrumentation works too.\nNOTE: Bootstrap instrumentation should be used only where necessary. During its actual execution, it mostly affects the JRE core(rt.jar). Defining it other than where necessary could lead to unexpected results or side effects.\nProvide custom config for the plugin The config could provide different behaviours based on the configurations. The SkyWalking plugin mechanism provides the configuration injection and initialization system in the agent core.\nEvery plugin could declare one or more classes to represent the config by using @PluginConfig annotation. The agent core could initialize this class' static field through System environments, System properties, and agent.config static file.\nThe #root() method in the @PluginConfig annotation requires declaring the root class for the initialization process. Typically, SkyWalking prefers to use nested inner static classes for the hierarchy of the configuration. We recommend using Plugin/plugin-name/config-key as the nested classes structure of the config class.\nNOTE: because of the Java ClassLoader mechanism, the @PluginConfig annotation should be added on the real class used in the interceptor codes.\nIn the following example, @PluginConfig(root = SpringMVCPluginConfig.class) indicates that initialization should start with using SpringMVCPluginConfig as the root. Then, the config key of the attribute USE_QUALIFIED_NAME_AS_ENDPOINT_NAME should be plugin.springmvc.use_qualified_name_as_endpoint_name.\npublic class SpringMVCPluginConfig { public static class Plugin { // NOTE, if move this annotation on the `Plugin` or `SpringMVCPluginConfig` class, it no longer has any effect.  @PluginConfig(root = SpringMVCPluginConfig.class) public static class SpringMVC { /** * If true, the fully qualified method name will be used as the endpoint name instead of the request URL, * default is false. */ public static boolean USE_QUALIFIED_NAME_AS_ENDPOINT_NAME = false; /** * This config item controls that whether the SpringMVC plugin should collect the parameters of the * request. */ public static boolean COLLECT_HTTP_PARAMS = false; } @PluginConfig(root = SpringMVCPluginConfig.class) public static class Http { /** * When either {@link Plugin.SpringMVC#COLLECT_HTTP_PARAMS} is enabled, how many characters to keep and send * to the OAP backend, use negative values to keep and send the complete parameters, NB. this config item is * added for the sake of performance */ public static int HTTP_PARAMS_LENGTH_THRESHOLD = 1024; } } } Meter Plugin Java agent plugin could use meter APIs to collect metrics for backend analysis.\n Counter API represents a single monotonically increasing counter which automatically collects data and reports to the backend. import org.apache.skywalking.apm.agent.core.meter.MeterFactory; Counter counter = MeterFactory.counter(meterName).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).mode(Counter.Mode.INCREMENT).build(); counter.increment(1d);    MeterFactory.counter creates a new counter builder with the meter name. Counter.Builder.tag(String key, String value) marks a tag key/value pair. Counter.Builder.mode(Counter.Mode mode) changes the counter mode. RATE mode means the reporting rate to the backend. Counter.Builder.build() builds a new Counter which is collected and reported to the backend. Counter.increment(double count) increment counts to the Counter. It could be a positive value.   Gauge API represents a single numerical value.  import org.apache.skywalking.apm.agent.core.meter.MeterFactory; ThreadPoolExecutor threadPool = ...; Gauge gauge = MeterFactory.gauge(meterName, () -\u0026gt; threadPool.getActiveCount()).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).build();  MeterFactory.gauge(String name, Supplier\u0026lt;Double\u0026gt; getter) creates a new gauge builder with the meter name and supplier function. This function must return a double value. Gauge.Builder.tag(String key, String value) marks a tag key/value pair. Gauge.Builder.build() builds a new Gauge which is collected and reported to the backend.   Histogram API represents a summary sample observations with customized buckets.  import org.apache.skywalking.apm.agent.core.meter.MeterFactory; Histogram histogram = MeterFactory.histogram(\u0026#34;test\u0026#34;).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).steps(Arrays.asList(1, 5, 10)).minValue(0).build(); histogram.addValue(3);  MeterFactory.histogram(String name) creates a new histogram builder with the meter name. Histogram.Builder.tag(String key, String value) marks a tag key/value pair. Histogram.Builder.steps(List\u0026lt;Double\u0026gt; steps) sets up the max values of every histogram buckets. Histogram.Builder.minValue(double value) sets up the minimal value of this histogram. Default is 0. Histogram.Builder.build() builds a new Histogram which is collected and reported to the backend. Histogram.addValue(double value) adds value into the histogram, and automatically analyzes what bucket count needs to be incremented. Rule: count into [step1, step2).  Plugin Test Tool The Apache SkyWalking Agent Test Tool Suite is an incredibly useful test tool suite that is available in a wide variety of agent languages. It includes the mock collector and validator. The mock collector is a SkyWalking receiver, like the OAP server.\nYou could learn how to use this tool to test the plugin in this doc. This is a must if you want to contribute plugins to the SkyWalking official repo.\nContribute plugins to the Apache SkyWalking repository We welcome everyone to contribute their plugins.\nPlease follow these steps:\n Submit an issue for your plugin, including any supported versions. Create sub modules under apm-sniffer/apm-sdk-plugin or apm-sniffer/optional-plugins, and the name should include supported library name and versions. Follow this guide to develop. Make sure comments and test cases are provided. Develop and test. Provide the automatic test cases. Learn how to write the plugin test case from this doc Send a pull request and ask for review. The plugin committers will approve your plugins, plugin CI-with-IT, e2e, and the plugin tests will be passed. The plugin is accepted by SkyWalking.  ","title":"Plugin Development Guide","url":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/java-plugin-development-guide/"},{"content":"Plugin Development Guide This document describes how to understand, develop and contribute a plugin.\nThere are 2 kinds of plugin:\n Tracing plugin. Follow the distributed tracing concept to collect spans with tags and logs. Meter plugin. Collect numeric metrics in Counter, Gauge, and Histogram formats.  We also provide the plugin test tool to verify the data collected and reported by the plugin. If you plan to contribute any plugin to our main repo, the data would be verified by this tool too.\nTracing plugin Concepts Span The span is an important and recognized concept in the distributed tracing system. Learn about the span from the Google Dapper Paper and OpenTracing\nSkyWalking has supported OpenTracing and OpenTracing-Java API since 2017. Our concepts of the span are similar to that of the Google Dapper Paper and OpenTracing. We have also extended the span.\nThere are three types of span:\n1.1 EntrySpan The EntrySpan represents a service provider. It is also an endpoint on the server end. As an APM system, our target is the application servers. Therefore, almost all the services and MQ-consumers are EntrySpan.\n1.2 LocalSpan The LocalSpan represents a normal Java method that does not concern remote services. It is neither a MQ producer/consumer nor a service (e.g. HTTP service) provider/consumer.\n1.3 ExitSpan The ExitSpan represents a client of service or MQ-producer. It is named the LeafSpan in the early versions of SkyWalking. For example, accessing DB through JDBC and reading Redis/Memcached are classified as an ExitSpan.\nContextCarrier In order to implement distributed tracing, cross-process tracing has to be bound, and the context must propagate across the process. This is where the ContextCarrier comes in.\nHere are the steps on how to use the ContextCarrier in an A-\u0026gt;B distributed call.\n Create a new and empty ContextCarrier on the client end. Create an ExitSpan by ContextManager#createExitSpan or use ContextManager#inject to initalize the ContextCarrier. Place all items of ContextCarrier into heads (e.g. HTTP HEAD), attachments (e.g. Dubbo RPC framework) or messages (e.g. Kafka). The ContextCarrier propagates to the server end through the service call. On the server end, obtain all items from the heads, attachments or messages. Create an EntrySpan by ContextManager#createEntrySpan or use ContextManager#extract to bind the client and server ends.  See the following examples, where we use the Apache HTTPComponent client plugin and Tomcat 7 server plugin:\n Using the Apache HTTPComponent client plugin on the client end  span = ContextManager.createExitSpan(\u0026#34;/span/operation/name\u0026#34;, contextCarrier, \u0026#34;ip:port\u0026#34;); CarrierItem next = contextCarrier.items(); while (next.hasNext()) { next = next.next(); httpRequest.setHeader(next.getHeadKey(), next.getHeadValue()); } Using the Tomcat 7 server plugin on the server end  ContextCarrier contextCarrier = new ContextCarrier(); CarrierItem next = contextCarrier.items(); while (next.hasNext()) { next = next.next(); next.setHeadValue(request.getHeader(next.getHeadKey())); } span = ContextManager.createEntrySpan(“/span/operation/name”, contextCarrier); ContextSnapshot Besides cross-process tracing, cross-thread tracing has to be supported as well. For instance, both async process (in-memory MQ) and batch process are common in Java. Cross-process and cross-thread tracing are very similar in that they both require propagating context, except that cross-thread tracing does not require serialization.\nHere are the three steps on cross-thread propagation:\n Use ContextManager#capture to get the ContextSnapshot object. Let the sub-thread access the ContextSnapshot through method arguments or being carried by existing arguments Use ContextManager#continued in sub-thread.  Core APIs ContextManager ContextManager provides all major and primary APIs.\n Create EntrySpan  public static AbstractSpan createEntrySpan(String endpointName, ContextCarrier carrier) Create EntrySpan according to the operation name (e.g. service name, uri) and ContextCarrier.\nCreate LocalSpan  public static AbstractSpan createLocalSpan(String endpointName) Create LocalSpan according to the operation name (e.g. full method signature).\nCreate ExitSpan  public static AbstractSpan createExitSpan(String endpointName, ContextCarrier carrier, String remotePeer) Create ExitSpan according to the operation name (e.g. service name, uri) and the new ContextCarrier and peer address (e.g. ip+port, hostname+port).\nAbstractSpan /** * Set the component id, which defines in {@link ComponentsDefine} * * @param component * @return the span for chaining. */ AbstractSpan setComponent(Component component); AbstractSpan setLayer(SpanLayer layer); /** * Set a key:value tag on the Span. * * @return this Span instance, for chaining */ AbstractSpan tag(String key, String value); /** * Record an exception event of the current walltime timestamp. * * @param t any subclass of {@link Throwable}, which occurs in this span. * @return the Span, for chaining */ AbstractSpan log(Throwable t); AbstractSpan errorOccurred(); /** * Record an event at a specific timestamp. * * @param timestamp The explicit timestamp for the log record. * @param event the events * @return the Span, for chaining */ AbstractSpan log(long timestamp, Map\u0026lt;String, ?\u0026gt; event); /** * Sets the string name for the logical operation this span represents. * * @return this Span instance, for chaining */ AbstractSpan setOperationName(String endpointName); Besides setting the operation name, tags and logs, two attributes must be set, namely the component and layer. This is especially important for the EntrySpan and ExitSpan.\nSpanLayer is the type of span. There are 5 values:\n UNKNOWN (default) DB RPC_FRAMEWORK (designed for the RPC framework, rather than an ordinary HTTP call) HTTP MQ  Component IDs are defined and reserved by the SkyWalking project. For extension of the component name/ID, please follow the OAP server Component library settings document.\nSpecial Span Tags All tags are available in the trace view. Meanwhile, in the OAP backend analysis, some special tags or tag combinations provide other advanced features.\nTag key http.status_code The value should be an integer. The response code of OAL entities corresponds to this value.\nTag keys db.statement and db.type. The value of db.statement should be a string that represents the database statement, such as SQL, or [No statement]/+span#operationName if the value is empty. When the exit span contains this tag, OAP samples the slow statements based on agent-analyzer/default/maxSlowSQLLength. The threshold of slow statement is defined in accordance with agent-analyzer/default/slowDBAccessThreshold. Check Slow Database Statement document of OAP server for details.\nExtension logic endpoint: Tag key x-le The logic endpoint is a concept that doesn\u0026rsquo;t represent a real RPC call, but requires the statistic. The value of x-le should be in JSON format. There are two options:\n Define a separated logic endpoint. Provide its own endpoint name, latency and status. Suitable for entry and local span.  { \u0026#34;name\u0026#34;: \u0026#34;GraphQL-service\u0026#34;, \u0026#34;latency\u0026#34;: 100, \u0026#34;status\u0026#34;: true } Declare the current local span representing a logic endpoint.  { \u0026#34;logic-span\u0026#34;: true } Virtual Database Relative Tags SkyWalking analysis Database(SQL-like) performance metrics through the following tags.\npublic static final StringTag DB_TYPE = new StringTag(3, \u0026#34;db.type\u0026#34;); public static final StringTag DB_STATEMENT = new StringTag(5, \u0026#34;db.statement\u0026#34;);  db.type records database type, such as sql, cassandra, Elasticsearch. db.statementrecords the sql statement of the database access.  Read backend\u0026rsquo;s virtual database doc for more details.\nVirtual Cache Relative Tags SkyWalking analysis cache performance related metrics through the following tags.\npublic static final StringTag CACHE_TYPE = new StringTag(15, \u0026#34;cache.type\u0026#34;); public static final StringTag CACHE_CMD = new StringTag(17, \u0026#34;cache.cmd\u0026#34;); public static final StringTag CACHE_OP = new StringTag(16, \u0026#34;cache.op\u0026#34;); public static final StringTag CACHE_KEY = new StringTag(18, \u0026#34;cache.key\u0026#34;);  cache.type indicates the cache type , usually it\u0026rsquo;s official name of cache (e.g. Redis) cache.cmd indicates the cache command that would be sent to cache server (e.g. setnx) cache.op indicates the command is used for write or read operation , usually the value is converting from command cache.key indicates the cache key that would be sent to cache server , this tag maybe null , as string type key would be collected usually.  In order to decide which op should be converted to flexibly , It\u0026rsquo;s better that providing config property . Reference Jedis-4.x-plugin\nVirtual Message Queue (MQ) Relative Tags SkyWalking analysis MQ performance related metrics through the following tags.\npublic static final StringTag MQ_QUEUE = new StringTag(7, \u0026#34;mq.queue\u0026#34;); public static final StringTag MQ_TOPIC = new StringTag(9, \u0026#34;mq.topic\u0026#34;); public static final StringTag TRANSMISSION_LATENCY = new StringTag(15, \u0026#34;transmission.latency\u0026#34;, false);  mq.queue indicates MQ queue name mq.topic indicates MQ topic name , It\u0026rsquo;s optional as some MQ don\u0026rsquo;t hava concept of topic transmission.latency The transmission latency from consumer to producer. Usually you needn\u0026rsquo;t to record this tag manually, instead to call contextCarrier.extensionInjector().injectSendingTimestamp(); to record tag sendingTimestamp on producer side , and SkyWalking would record this tag on consumer side if sw8-x context carrier(from producer side) contains sendingTimestamp  Notice , you should set peer at both sides(producer and consumer). And the value of peer should represent the MQ server cluster.\nAdvanced APIs Async Span APIs There is a set of advanced APIs in Span which is specifically designed for async use cases. When tags, logs, and attributes (including end time) of the span need to be set in another thread, you should use these APIs.\n/** * The span finish at current tracing context, but the current span is still alive, until {@link #asyncFinish} * called. * * This method must be called\u0026lt;br/\u0026gt; * 1. In original thread(tracing context). * 2. Current span is active span. * * During alive, tags, logs and attributes of the span could be changed, in any thread. * * The execution times of {@link #prepareForAsync} and {@link #asyncFinish()} must match. * * @return the current span */ AbstractSpan prepareForAsync(); /** * Notify the span, it could be finished. * * The execution times of {@link #prepareForAsync} and {@link #asyncFinish()} must match. * * @return the current span */ AbstractSpan asyncFinish();  Call #prepareForAsync in the original context. Run ContextManager#stopSpan in the original context when your job in the current thread is complete. Propagate the span to any other thread. Once the above steps are all set, call #asyncFinish in any thread. When #prepareForAsync is complete for all spans, the tracing context will be finished and will report to the backend (based on the count of API execution).  Develop a plugin Abstract The basic method to trace is to intercept a Java method, by using byte code manipulation tech and AOP concept. SkyWalking has packaged the byte code manipulation tech and tracing context propagation, so you simply have to define the intercept point (a.k.a. aspect pointcut in Spring).\nIntercept SkyWalking provides two common definitions to intercept constructor, instance method and class method.\nv1 APIs  Extend ClassInstanceMethodsEnhancePluginDefine to define constructor intercept points and instance method intercept points. Extend ClassStaticMethodsEnhancePluginDefine to define class method intercept points.  Of course, you can extend ClassEnhancePluginDefine to set all intercept points, although it is uncommon to do so.\nv2 APIs v2 APIs provide an enhanced interceptor, which could propagate context through MIC(MethodInvocationContext).\n Extend ClassInstanceMethodsEnhancePluginDefineV2 to define constructor intercept points and instance method intercept points. Extend ClassStaticMethodsEnhancePluginDefineV2 to define class method intercept points.  Of course, you can extend ClassEnhancePluginDefineV2 to set all intercept points, although it is uncommon to do so.\nImplement plugin See the following demonstration on how to implement a plugin by extending ClassInstanceMethodsEnhancePluginDefine.\n Define the target class name.  protected abstract ClassMatch enhanceClass(); ClassMatch represents how to match the target classes. There are 4 ways:\n byName: Based on the full class names (package name + . + class name). byClassAnnotationMatch: Depends on whether there are certain annotations in the target classes. byMethodAnnotationMatch: Depends on whether there are certain annotations in the methods of the target classes. byHierarchyMatch: Based on the parent classes or interfaces of the target classes.  Attention:\n Never use ThirdPartyClass.class in the instrumentation definitions, such as takesArguments(ThirdPartyClass.class), or byName(ThirdPartyClass.class.getName()), because of the fact that ThirdPartyClass dose not necessarily exist in the target application and this will break the agent; we have import checks to assist in checking this in CI, but it doesn\u0026rsquo;t cover all scenarios of this limitation, so never try to work around this limitation by something like using full-qualified-class-name (FQCN), i.e. takesArguments(full.qualified.ThirdPartyClass.class) and byName(full.qualified.ThirdPartyClass.class.getName()) will pass the CI check, but are still invalid in the agent codes. Therefore, Use Full Qualified Class Name String Literature Instead. Even if you are perfectly sure that the class to be intercepted exists in the target application (such as JDK classes), still, do not use *.class.getName() to get the class String name. We recommend you to use a literal string. This is to avoid ClassLoader issues. by*AnnotationMatch does not support inherited annotations. We do not recommend using byHierarchyMatch unless necessary. Using it may trigger the interception of many unexcepted methods, which would cause performance issues.  Example:\n@Override protected ClassMatch enhanceClassName() { return byName(\u0026#34;org.apache.catalina.core.StandardEngineValve\u0026#34;); } Define an instance method intercept point.  public InstanceMethodsInterceptPoint[] getInstanceMethodsInterceptPoints(); public interface InstanceMethodsInterceptPoint { /** * class instance methods matcher. * * @return methods matcher */ ElementMatcher\u0026lt;MethodDescription\u0026gt; getMethodsMatcher(); /** * @return represents a class name, the class instance must instanceof InstanceMethodsAroundInterceptor. */ String getMethodsInterceptor(); boolean isOverrideArgs(); } You may also use Matcher to set the target methods. Return true in isOverrideArgs, if you want to change the argument ref in interceptor. Please refer to bytebuddy for details of defining ElementMatcher.\nIn Skywalking, we provide 3 classes to facilitate ElementMatcher definition:\n AnnotationTypeNameMatch: Check on whether there is a certain annotation in the target method. ReturnTypeNameMatch: Check the return type name (package name + . + class name) of the target method. ArgumentTypeNameMatch: Check on the argument index and the type name (package name + . + class name) of the target method.  Attention:\n In case of using ReturnTypeNameMatch and ArgumentTypeNameMatch, use [Lxxx; (Java file format defined in JVM Specification) to define an Array type. For example, you should write [Ljava.lang.String; for java.lang.String[].  The following sections will tell you how to implement the interceptor.\nAdd plugin definition into the skywalking-plugin.def file.  tomcat-7.x/8.x=TomcatInstrumentation  Set up witnessClasses and/or witnessMethods if the instrumentation has to be activated in specific versions.\nExample:\n// The plugin is activated only when the foo.Bar class exists. @Override protected String[] witnessClasses() { return new String[] { \u0026#34;foo.Bar\u0026#34; }; } // The plugin is activated only when the foo.Bar#hello method exists. @Override protected List\u0026lt;WitnessMethod\u0026gt; witnessMethods() { List\u0026lt;WitnessMethod\u0026gt; witnessMethodList = new ArrayList\u0026lt;\u0026gt;(); WitnessMethod witnessMethod = new WitnessMethod(\u0026#34;foo.Bar\u0026#34;, ElementMatchers.named(\u0026#34;hello\u0026#34;)); witnessMethodList.add(witnessMethod); return witnessMethodList; } For more examples, see WitnessTest.java\n  Implement an interceptor As an interceptor for an instance method, it has to implement org.apache.skywalking.apm.agent.core.plugin.interceptor.enhance.InstanceMethodsAroundInterceptor\n/** * A interceptor, which intercept method\u0026#39;s invocation. The target methods will be defined in {@link * ClassEnhancePluginDefine}\u0026#39;s subclass, most likely in {@link ClassInstanceMethodsEnhancePluginDefine} */ public interface InstanceMethodsAroundInterceptor { /** * called before target method invocation. * * @param result change this result, if you want to truncate the method. * @throws Throwable */ void beforeMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, MethodInterceptResult result) throws Throwable; /** * called after target method invocation. Even method\u0026#39;s invocation triggers an exception. * * @param ret the method\u0026#39;s original return value. * @return the method\u0026#39;s actual return value. * @throws Throwable */ Object afterMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Object ret) throws Throwable; /** * called when occur exception. * * @param t the exception occur. */ void handleMethodException(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Throwable t); } Use the core APIs before and after calling the method, as well as during exception handling.\nV2 APIs The interceptor of V2 API uses MethodInvocationContext context to replace the MethodInterceptResult result in the beforeMethod, and be added as a new parameter in afterMethod and handleMethodException.\nMethodInvocationContext context is only shared in one time execution, and safe to use when face concurrency execution.\n/** * A v2 interceptor, which intercept method\u0026#39;s invocation. The target methods will be defined in {@link * ClassEnhancePluginDefineV2}\u0026#39;s subclass, most likely in {@link ClassInstanceMethodsEnhancePluginDefine} */ public interface InstanceMethodsAroundInterceptorV2 { /** * called before target method invocation. * * @param context the method invocation context including result context. */ void beforeMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, MethodInvocationContext context) throws Throwable; /** * called after target method invocation. Even method\u0026#39;s invocation triggers an exception. * * @param ret the method\u0026#39;s original return value. May be null if the method triggers an exception. * @return the method\u0026#39;s actual return value. */ Object afterMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Object ret, MethodInvocationContext context) throws Throwable; /** * called when occur exception. * * @param t the exception occur. */ void handleMethodException(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Throwable t, MethodInvocationContext context); } Bootstrap class instrumentation. SkyWalking has packaged the bootstrap instrumentation in the agent core. You can easily implement it by declaring it in the instrumentation definition.\nOverride the public boolean isBootstrapInstrumentation() and return true. Such as\npublic class URLInstrumentation extends ClassEnhancePluginDefine { private static String CLASS_NAME = \u0026#34;java.net.URL\u0026#34;; @Override protected ClassMatch enhanceClass() { return byName(CLASS_NAME); } @Override public ConstructorInterceptPoint[] getConstructorsInterceptPoints() { return new ConstructorInterceptPoint[] { new ConstructorInterceptPoint() { @Override public ElementMatcher\u0026lt;MethodDescription\u0026gt; getConstructorMatcher() { return any(); } @Override public String getConstructorInterceptor() { return \u0026#34;org.apache.skywalking.apm.plugin.jre.httpurlconnection.Interceptor2\u0026#34;; } } }; } @Override public InstanceMethodsInterceptPoint[] getInstanceMethodsInterceptPoints() { return new InstanceMethodsInterceptPoint[0]; } @Override public StaticMethodsInterceptPoint[] getStaticMethodsInterceptPoints() { return new StaticMethodsInterceptPoint[0]; } @Override public boolean isBootstrapInstrumentation() { return true; } } ClassEnhancePluginDefineV2 is provided in v2 APIs, #isBootstrapInstrumentation works too.\nNOTE: Bootstrap instrumentation should be used only where necessary. During its actual execution, it mostly affects the JRE core(rt.jar). Defining it other than where necessary could lead to unexpected results or side effects.\nProvide custom config for the plugin The config could provide different behaviours based on the configurations. The SkyWalking plugin mechanism provides the configuration injection and initialization system in the agent core.\nEvery plugin could declare one or more classes to represent the config by using @PluginConfig annotation. The agent core could initialize this class' static field through System environments, System properties, and agent.config static file.\nThe #root() method in the @PluginConfig annotation requires declaring the root class for the initialization process. Typically, SkyWalking prefers to use nested inner static classes for the hierarchy of the configuration. We recommend using Plugin/plugin-name/config-key as the nested classes structure of the config class.\nNOTE: because of the Java ClassLoader mechanism, the @PluginConfig annotation should be added on the real class used in the interceptor codes.\nIn the following example, @PluginConfig(root = SpringMVCPluginConfig.class) indicates that initialization should start with using SpringMVCPluginConfig as the root. Then, the config key of the attribute USE_QUALIFIED_NAME_AS_ENDPOINT_NAME should be plugin.springmvc.use_qualified_name_as_endpoint_name.\npublic class SpringMVCPluginConfig { public static class Plugin { // NOTE, if move this annotation on the `Plugin` or `SpringMVCPluginConfig` class, it no longer has any effect.  @PluginConfig(root = SpringMVCPluginConfig.class) public static class SpringMVC { /** * If true, the fully qualified method name will be used as the endpoint name instead of the request URL, * default is false. */ public static boolean USE_QUALIFIED_NAME_AS_ENDPOINT_NAME = false; /** * This config item controls that whether the SpringMVC plugin should collect the parameters of the * request. */ public static boolean COLLECT_HTTP_PARAMS = false; } @PluginConfig(root = SpringMVCPluginConfig.class) public static class Http { /** * When either {@link Plugin.SpringMVC#COLLECT_HTTP_PARAMS} is enabled, how many characters to keep and send * to the OAP backend, use negative values to keep and send the complete parameters, NB. this config item is * added for the sake of performance */ public static int HTTP_PARAMS_LENGTH_THRESHOLD = 1024; } } } Meter Plugin Java agent plugin could use meter APIs to collect metrics for backend analysis.\n Counter API represents a single monotonically increasing counter which automatically collects data and reports to the backend. import org.apache.skywalking.apm.agent.core.meter.MeterFactory; Counter counter = MeterFactory.counter(meterName).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).mode(Counter.Mode.INCREMENT).build(); counter.increment(1d);    MeterFactory.counter creates a new counter builder with the meter name. Counter.Builder.tag(String key, String value) marks a tag key/value pair. Counter.Builder.mode(Counter.Mode mode) changes the counter mode. RATE mode means the reporting rate to the backend. Counter.Builder.build() builds a new Counter which is collected and reported to the backend. Counter.increment(double count) increment counts to the Counter. It could be a positive value.   Gauge API represents a single numerical value.  import org.apache.skywalking.apm.agent.core.meter.MeterFactory; ThreadPoolExecutor threadPool = ...; Gauge gauge = MeterFactory.gauge(meterName, () -\u0026gt; threadPool.getActiveCount()).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).build();  MeterFactory.gauge(String name, Supplier\u0026lt;Double\u0026gt; getter) creates a new gauge builder with the meter name and supplier function. This function must return a double value. Gauge.Builder.tag(String key, String value) marks a tag key/value pair. Gauge.Builder.build() builds a new Gauge which is collected and reported to the backend.   Histogram API represents a summary sample observations with customized buckets.  import org.apache.skywalking.apm.agent.core.meter.MeterFactory; Histogram histogram = MeterFactory.histogram(\u0026#34;test\u0026#34;).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).steps(Arrays.asList(1, 5, 10)).minValue(0).build(); histogram.addValue(3);  MeterFactory.histogram(String name) creates a new histogram builder with the meter name. Histogram.Builder.tag(String key, String value) marks a tag key/value pair. Histogram.Builder.steps(List\u0026lt;Double\u0026gt; steps) sets up the max values of every histogram buckets. Histogram.Builder.minValue(double value) sets up the minimal value of this histogram. Default is 0. Histogram.Builder.build() builds a new Histogram which is collected and reported to the backend. Histogram.addValue(double value) adds value into the histogram, and automatically analyzes what bucket count needs to be incremented. Rule: count into [step1, step2).  Plugin Test Tool The Apache SkyWalking Agent Test Tool Suite is an incredibly useful test tool suite that is available in a wide variety of agent languages. It includes the mock collector and validator. The mock collector is a SkyWalking receiver, like the OAP server.\nYou could learn how to use this tool to test the plugin in this doc. This is a must if you want to contribute plugins to the SkyWalking official repo.\nContribute plugins to the Apache SkyWalking repository We welcome everyone to contribute their plugins.\nPlease follow these steps:\n Submit an issue for your plugin, including any supported versions. Create sub modules under apm-sniffer/apm-sdk-plugin or apm-sniffer/optional-plugins, and the name should include supported library name and versions. Follow this guide to develop. Make sure comments and test cases are provided. Develop and test. Provide the automatic test cases. Learn how to write the plugin test case from this doc Send a pull request and ask for review. The plugin committers will approve your plugins, plugin CI-with-IT, e2e, and the plugin tests will be passed. The plugin is accepted by SkyWalking.  ","title":"Plugin Development Guide","url":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/java-plugin-development-guide/"},{"content":"Plugin Development Guide This document describes how to understand, develop and contribute a plugin.\nThere are 2 kinds of plugin:\n Tracing plugin. Follow the distributed tracing concept to collect spans with tags and logs. Meter plugin. Collect numeric metrics in Counter, Gauge, and Histogram formats.  We also provide the plugin test tool to verify the data collected and reported by the plugin. If you plan to contribute any plugin to our main repo, the data would be verified by this tool too.\nTracing plugin Concepts Span The span is an important and recognized concept in the distributed tracing system. Learn about the span from the Google Dapper Paper and OpenTracing\nSkyWalking has supported OpenTracing and OpenTracing-Java API since 2017. Our concepts of the span are similar to that of the Google Dapper Paper and OpenTracing. We have also extended the span.\nThere are three types of span:\n1.1 EntrySpan The EntrySpan represents a service provider. It is also an endpoint on the server end. As an APM system, our target is the application servers. Therefore, almost all the services and MQ-consumers are EntrySpan.\n1.2 LocalSpan The LocalSpan represents a normal Java method that does not concern remote services. It is neither a MQ producer/consumer nor a service (e.g. HTTP service) provider/consumer.\n1.3 ExitSpan The ExitSpan represents a client of service or MQ-producer. It is named the LeafSpan in the early versions of SkyWalking. For example, accessing DB through JDBC and reading Redis/Memcached are classified as an ExitSpan.\nContextCarrier In order to implement distributed tracing, cross-process tracing has to be bound, and the context must propagate across the process. This is where the ContextCarrier comes in.\nHere are the steps on how to use the ContextCarrier in an A-\u0026gt;B distributed call.\n Create a new and empty ContextCarrier on the client end. Create an ExitSpan by ContextManager#createExitSpan or use ContextManager#inject to initalize the ContextCarrier. Place all items of ContextCarrier into heads (e.g. HTTP HEAD), attachments (e.g. Dubbo RPC framework) or messages (e.g. Kafka). The ContextCarrier propagates to the server end through the service call. On the server end, obtain all items from the heads, attachments or messages. Create an EntrySpan by ContextManager#createEntrySpan or use ContextManager#extract to bind the client and server ends.  See the following examples, where we use the Apache HTTPComponent client plugin and Tomcat 7 server plugin:\n Using the Apache HTTPComponent client plugin on the client end  span = ContextManager.createExitSpan(\u0026#34;/span/operation/name\u0026#34;, contextCarrier, \u0026#34;ip:port\u0026#34;); CarrierItem next = contextCarrier.items(); while (next.hasNext()) { next = next.next(); httpRequest.setHeader(next.getHeadKey(), next.getHeadValue()); } Using the Tomcat 7 server plugin on the server end  ContextCarrier contextCarrier = new ContextCarrier(); CarrierItem next = contextCarrier.items(); while (next.hasNext()) { next = next.next(); next.setHeadValue(request.getHeader(next.getHeadKey())); } span = ContextManager.createEntrySpan(“/span/operation/name”, contextCarrier); ContextSnapshot Besides cross-process tracing, cross-thread tracing has to be supported as well. For instance, both async process (in-memory MQ) and batch process are common in Java. Cross-process and cross-thread tracing are very similar in that they both require propagating context, except that cross-thread tracing does not require serialization.\nHere are the three steps on cross-thread propagation:\n Use ContextManager#capture to get the ContextSnapshot object. Let the sub-thread access the ContextSnapshot through method arguments or being carried by existing arguments Use ContextManager#continued in sub-thread.  Core APIs ContextManager ContextManager provides all major and primary APIs.\n Create EntrySpan  public static AbstractSpan createEntrySpan(String endpointName, ContextCarrier carrier) Create EntrySpan according to the operation name (e.g. service name, uri) and ContextCarrier.\nCreate LocalSpan  public static AbstractSpan createLocalSpan(String endpointName) Create LocalSpan according to the operation name (e.g. full method signature).\nCreate ExitSpan  public static AbstractSpan createExitSpan(String endpointName, ContextCarrier carrier, String remotePeer) Create ExitSpan according to the operation name (e.g. service name, uri) and the new ContextCarrier and peer address (e.g. ip+port, hostname+port).\nAbstractSpan /** * Set the component id, which defines in {@link ComponentsDefine} * * @param component * @return the span for chaining. */ AbstractSpan setComponent(Component component); AbstractSpan setLayer(SpanLayer layer); /** * Set a key:value tag on the Span. * * @return this Span instance, for chaining */ AbstractSpan tag(String key, String value); /** * Record an exception event of the current walltime timestamp. * * @param t any subclass of {@link Throwable}, which occurs in this span. * @return the Span, for chaining */ AbstractSpan log(Throwable t); AbstractSpan errorOccurred(); /** * Record an event at a specific timestamp. * * @param timestamp The explicit timestamp for the log record. * @param event the events * @return the Span, for chaining */ AbstractSpan log(long timestamp, Map\u0026lt;String, ?\u0026gt; event); /** * Sets the string name for the logical operation this span represents. * * @return this Span instance, for chaining */ AbstractSpan setOperationName(String endpointName); Besides setting the operation name, tags and logs, two attributes must be set, namely the component and layer. This is especially important for the EntrySpan and ExitSpan.\nSpanLayer is the type of span. There are 5 values:\n UNKNOWN (default) DB RPC_FRAMEWORK (designed for the RPC framework, rather than an ordinary HTTP call) HTTP MQ  Component IDs are defined and reserved by the SkyWalking project. For extension of the component name/ID, please follow the OAP server Component library settings document.\nSpecial Span Tags All tags are available in the trace view. Meanwhile, in the OAP backend analysis, some special tags or tag combinations provide other advanced features.\nTag key http.status_code The value should be an integer. The response code of OAL entities corresponds to this value.\nTag keys db.statement and db.type. The value of db.statement should be a string that represents the database statement, such as SQL, or [No statement]/+span#operationName if the value is empty. When the exit span contains this tag, OAP samples the slow statements based on agent-analyzer/default/maxSlowSQLLength. The threshold of slow statement is defined in accordance with agent-analyzer/default/slowDBAccessThreshold. Check Slow Database Statement document of OAP server for details.\nExtension logic endpoint: Tag key x-le The logic endpoint is a concept that doesn\u0026rsquo;t represent a real RPC call, but requires the statistic. The value of x-le should be in JSON format. There are two options:\n Define a separated logic endpoint. Provide its own endpoint name, latency and status. Suitable for entry and local span.  { \u0026#34;name\u0026#34;: \u0026#34;GraphQL-service\u0026#34;, \u0026#34;latency\u0026#34;: 100, \u0026#34;status\u0026#34;: true } Declare the current local span representing a logic endpoint.  { \u0026#34;logic-span\u0026#34;: true } Virtual Database Relative Tags SkyWalking analysis Database(SQL-like) performance metrics through the following tags.\npublic static final StringTag DB_TYPE = new StringTag(3, \u0026#34;db.type\u0026#34;); public static final StringTag DB_STATEMENT = new StringTag(5, \u0026#34;db.statement\u0026#34;);  db.type records database type, such as sql, cassandra, Elasticsearch. db.statementrecords the sql statement of the database access.  Read backend\u0026rsquo;s virtual database doc for more details.\nVirtual Cache Relative Tags SkyWalking analysis cache performance related metrics through the following tags.\npublic static final StringTag CACHE_TYPE = new StringTag(15, \u0026#34;cache.type\u0026#34;); public static final StringTag CACHE_CMD = new StringTag(17, \u0026#34;cache.cmd\u0026#34;); public static final StringTag CACHE_OP = new StringTag(16, \u0026#34;cache.op\u0026#34;); public static final StringTag CACHE_KEY = new StringTag(18, \u0026#34;cache.key\u0026#34;);  cache.type indicates the cache type , usually it\u0026rsquo;s official name of cache (e.g. Redis) cache.cmd indicates the cache command that would be sent to cache server (e.g. setnx) cache.op indicates the command is used for write or read operation , usually the value is converting from command cache.key indicates the cache key that would be sent to cache server , this tag maybe null , as string type key would be collected usually.  In order to decide which op should be converted to flexibly , It\u0026rsquo;s better that providing config property . Reference Jedis-4.x-plugin\nVirtual Message Queue (MQ) Relative Tags SkyWalking analysis MQ performance related metrics through the following tags.\npublic static final StringTag MQ_QUEUE = new StringTag(7, \u0026#34;mq.queue\u0026#34;); public static final StringTag MQ_TOPIC = new StringTag(9, \u0026#34;mq.topic\u0026#34;); public static final StringTag TRANSMISSION_LATENCY = new StringTag(15, \u0026#34;transmission.latency\u0026#34;, false);  mq.queue indicates MQ queue name mq.topic indicates MQ topic name , It\u0026rsquo;s optional as some MQ don\u0026rsquo;t hava concept of topic transmission.latency The transmission latency from consumer to producer. Usually you needn\u0026rsquo;t to record this tag manually, instead to call contextCarrier.extensionInjector().injectSendingTimestamp(); to record tag sendingTimestamp on producer side , and SkyWalking would record this tag on consumer side if sw8-x context carrier(from producer side) contains sendingTimestamp  Notice , you should set peer at both sides(producer and consumer). And the value of peer should represent the MQ server cluster.\nAdvanced APIs Async Span APIs There is a set of advanced APIs in Span which is specifically designed for async use cases. When tags, logs, and attributes (including end time) of the span need to be set in another thread, you should use these APIs.\n/** * The span finish at current tracing context, but the current span is still alive, until {@link #asyncFinish} * called. * * This method must be called\u0026lt;br/\u0026gt; * 1. In original thread(tracing context). * 2. Current span is active span. * * During alive, tags, logs and attributes of the span could be changed, in any thread. * * The execution times of {@link #prepareForAsync} and {@link #asyncFinish()} must match. * * @return the current span */ AbstractSpan prepareForAsync(); /** * Notify the span, it could be finished. * * The execution times of {@link #prepareForAsync} and {@link #asyncFinish()} must match. * * @return the current span */ AbstractSpan asyncFinish();  Call #prepareForAsync in the original context. Run ContextManager#stopSpan in the original context when your job in the current thread is complete. Propagate the span to any other thread. Once the above steps are all set, call #asyncFinish in any thread. When #prepareForAsync is complete for all spans, the tracing context will be finished and will report to the backend (based on the count of API execution).  Develop a plugin Abstract The basic method to trace is to intercept a Java method, by using byte code manipulation tech and AOP concept. SkyWalking has packaged the byte code manipulation tech and tracing context propagation, so you simply have to define the intercept point (a.k.a. aspect pointcut in Spring).\nIntercept SkyWalking provides two common definitions to intercept constructor, instance method and class method.\nv1 APIs  Extend ClassInstanceMethodsEnhancePluginDefine to define constructor intercept points and instance method intercept points. Extend ClassStaticMethodsEnhancePluginDefine to define class method intercept points.  Of course, you can extend ClassEnhancePluginDefine to set all intercept points, although it is uncommon to do so.\nv2 APIs v2 APIs provide an enhanced interceptor, which could propagate context through MIC(MethodInvocationContext).\n Extend ClassInstanceMethodsEnhancePluginDefineV2 to define constructor intercept points and instance method intercept points. Extend ClassStaticMethodsEnhancePluginDefineV2 to define class method intercept points.  Of course, you can extend ClassEnhancePluginDefineV2 to set all intercept points, although it is uncommon to do so.\nImplement plugin See the following demonstration on how to implement a plugin by extending ClassInstanceMethodsEnhancePluginDefine.\n Define the target class name.  protected abstract ClassMatch enhanceClass(); ClassMatch represents how to match the target classes. There are 4 ways:\n byName: Based on the full class names (package name + . + class name). byClassAnnotationMatch: Depends on whether there are certain annotations in the target classes. byMethodAnnotationMatch: Depends on whether there are certain annotations in the methods of the target classes. byHierarchyMatch: Based on the parent classes or interfaces of the target classes.  Attention:\n Never use ThirdPartyClass.class in the instrumentation definitions, such as takesArguments(ThirdPartyClass.class), or byName(ThirdPartyClass.class.getName()), because of the fact that ThirdPartyClass dose not necessarily exist in the target application and this will break the agent; we have import checks to assist in checking this in CI, but it doesn\u0026rsquo;t cover all scenarios of this limitation, so never try to work around this limitation by something like using full-qualified-class-name (FQCN), i.e. takesArguments(full.qualified.ThirdPartyClass.class) and byName(full.qualified.ThirdPartyClass.class.getName()) will pass the CI check, but are still invalid in the agent codes. Therefore, Use Full Qualified Class Name String Literature Instead. Even if you are perfectly sure that the class to be intercepted exists in the target application (such as JDK classes), still, do not use *.class.getName() to get the class String name. We recommend you to use a literal string. This is to avoid ClassLoader issues. by*AnnotationMatch does not support inherited annotations. We do not recommend using byHierarchyMatch unless necessary. Using it may trigger the interception of many unexcepted methods, which would cause performance issues.  Example:\n@Override protected ClassMatch enhanceClassName() { return byName(\u0026#34;org.apache.catalina.core.StandardEngineValve\u0026#34;); } Define an instance method intercept point.  public InstanceMethodsInterceptPoint[] getInstanceMethodsInterceptPoints(); public interface InstanceMethodsInterceptPoint { /** * class instance methods matcher. * * @return methods matcher */ ElementMatcher\u0026lt;MethodDescription\u0026gt; getMethodsMatcher(); /** * @return represents a class name, the class instance must instanceof InstanceMethodsAroundInterceptor. */ String getMethodsInterceptor(); boolean isOverrideArgs(); } You may also use Matcher to set the target methods. Return true in isOverrideArgs, if you want to change the argument ref in interceptor. Please refer to bytebuddy for details of defining ElementMatcher.\nIn Skywalking, we provide 3 classes to facilitate ElementMatcher definition:\n AnnotationTypeNameMatch: Check on whether there is a certain annotation in the target method. ReturnTypeNameMatch: Check the return type name (package name + . + class name) of the target method. ArgumentTypeNameMatch: Check on the argument index and the type name (package name + . + class name) of the target method.  Attention:\n In case of using ReturnTypeNameMatch and ArgumentTypeNameMatch, use [Lxxx; (Java file format defined in JVM Specification) to define an Array type. For example, you should write [Ljava.lang.String; for java.lang.String[].  The following sections will tell you how to implement the interceptor.\nAdd plugin definition into the skywalking-plugin.def file.  tomcat-7.x/8.x=TomcatInstrumentation  Set up witnessClasses and/or witnessMethods if the instrumentation has to be activated in specific versions.\nExample:\n// The plugin is activated only when the foo.Bar class exists. @Override protected String[] witnessClasses() { return new String[] { \u0026#34;foo.Bar\u0026#34; }; } // The plugin is activated only when the foo.Bar#hello method exists. @Override protected List\u0026lt;WitnessMethod\u0026gt; witnessMethods() { List\u0026lt;WitnessMethod\u0026gt; witnessMethodList = new ArrayList\u0026lt;\u0026gt;(); WitnessMethod witnessMethod = new WitnessMethod(\u0026#34;foo.Bar\u0026#34;, ElementMatchers.named(\u0026#34;hello\u0026#34;)); witnessMethodList.add(witnessMethod); return witnessMethodList; } For more examples, see WitnessTest.java\n  Implement an interceptor As an interceptor for an instance method, it has to implement org.apache.skywalking.apm.agent.core.plugin.interceptor.enhance.InstanceMethodsAroundInterceptor\n/** * A interceptor, which intercept method\u0026#39;s invocation. The target methods will be defined in {@link * ClassEnhancePluginDefine}\u0026#39;s subclass, most likely in {@link ClassInstanceMethodsEnhancePluginDefine} */ public interface InstanceMethodsAroundInterceptor { /** * called before target method invocation. * * @param result change this result, if you want to truncate the method. * @throws Throwable */ void beforeMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, MethodInterceptResult result) throws Throwable; /** * called after target method invocation. Even method\u0026#39;s invocation triggers an exception. * * @param ret the method\u0026#39;s original return value. * @return the method\u0026#39;s actual return value. * @throws Throwable */ Object afterMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Object ret) throws Throwable; /** * called when occur exception. * * @param t the exception occur. */ void handleMethodException(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Throwable t); } Use the core APIs before and after calling the method, as well as during exception handling.\nV2 APIs The interceptor of V2 API uses MethodInvocationContext context to replace the MethodInterceptResult result in the beforeMethod, and be added as a new parameter in afterMethod and handleMethodException.\nMethodInvocationContext context is only shared in one time execution, and safe to use when face concurrency execution.\n/** * A v2 interceptor, which intercept method\u0026#39;s invocation. The target methods will be defined in {@link * ClassEnhancePluginDefineV2}\u0026#39;s subclass, most likely in {@link ClassInstanceMethodsEnhancePluginDefine} */ public interface InstanceMethodsAroundInterceptorV2 { /** * called before target method invocation. * * @param context the method invocation context including result context. */ void beforeMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, MethodInvocationContext context) throws Throwable; /** * called after target method invocation. Even method\u0026#39;s invocation triggers an exception. * * @param ret the method\u0026#39;s original return value. May be null if the method triggers an exception. * @return the method\u0026#39;s actual return value. */ Object afterMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Object ret, MethodInvocationContext context) throws Throwable; /** * called when occur exception. * * @param t the exception occur. */ void handleMethodException(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Throwable t, MethodInvocationContext context); } Bootstrap class instrumentation. SkyWalking has packaged the bootstrap instrumentation in the agent core. You can easily implement it by declaring it in the instrumentation definition.\nOverride the public boolean isBootstrapInstrumentation() and return true. Such as\npublic class URLInstrumentation extends ClassEnhancePluginDefine { private static String CLASS_NAME = \u0026#34;java.net.URL\u0026#34;; @Override protected ClassMatch enhanceClass() { return byName(CLASS_NAME); } @Override public ConstructorInterceptPoint[] getConstructorsInterceptPoints() { return new ConstructorInterceptPoint[] { new ConstructorInterceptPoint() { @Override public ElementMatcher\u0026lt;MethodDescription\u0026gt; getConstructorMatcher() { return any(); } @Override public String getConstructorInterceptor() { return \u0026#34;org.apache.skywalking.apm.plugin.jre.httpurlconnection.Interceptor2\u0026#34;; } } }; } @Override public InstanceMethodsInterceptPoint[] getInstanceMethodsInterceptPoints() { return new InstanceMethodsInterceptPoint[0]; } @Override public StaticMethodsInterceptPoint[] getStaticMethodsInterceptPoints() { return new StaticMethodsInterceptPoint[0]; } @Override public boolean isBootstrapInstrumentation() { return true; } } ClassEnhancePluginDefineV2 is provided in v2 APIs, #isBootstrapInstrumentation works too.\nNOTE: Bootstrap instrumentation should be used only where necessary. During its actual execution, it mostly affects the JRE core(rt.jar). Defining it other than where necessary could lead to unexpected results or side effects.\nProvide custom config for the plugin The config could provide different behaviours based on the configurations. The SkyWalking plugin mechanism provides the configuration injection and initialization system in the agent core.\nEvery plugin could declare one or more classes to represent the config by using @PluginConfig annotation. The agent core could initialize this class' static field through System environments, System properties, and agent.config static file.\nThe #root() method in the @PluginConfig annotation requires declaring the root class for the initialization process. Typically, SkyWalking prefers to use nested inner static classes for the hierarchy of the configuration. We recommend using Plugin/plugin-name/config-key as the nested classes structure of the config class.\nNOTE: because of the Java ClassLoader mechanism, the @PluginConfig annotation should be added on the real class used in the interceptor codes.\nIn the following example, @PluginConfig(root = SpringMVCPluginConfig.class) indicates that initialization should start with using SpringMVCPluginConfig as the root. Then, the config key of the attribute USE_QUALIFIED_NAME_AS_ENDPOINT_NAME should be plugin.springmvc.use_qualified_name_as_endpoint_name.\npublic class SpringMVCPluginConfig { public static class Plugin { // NOTE, if move this annotation on the `Plugin` or `SpringMVCPluginConfig` class, it no longer has any effect.  @PluginConfig(root = SpringMVCPluginConfig.class) public static class SpringMVC { /** * If true, the fully qualified method name will be used as the endpoint name instead of the request URL, * default is false. */ public static boolean USE_QUALIFIED_NAME_AS_ENDPOINT_NAME = false; /** * This config item controls that whether the SpringMVC plugin should collect the parameters of the * request. */ public static boolean COLLECT_HTTP_PARAMS = false; } @PluginConfig(root = SpringMVCPluginConfig.class) public static class Http { /** * When either {@link Plugin.SpringMVC#COLLECT_HTTP_PARAMS} is enabled, how many characters to keep and send * to the OAP backend, use negative values to keep and send the complete parameters, NB. this config item is * added for the sake of performance */ public static int HTTP_PARAMS_LENGTH_THRESHOLD = 1024; } } } Meter Plugin Java agent plugin could use meter APIs to collect metrics for backend analysis.\n Counter API represents a single monotonically increasing counter which automatically collects data and reports to the backend. import org.apache.skywalking.apm.agent.core.meter.MeterFactory; Counter counter = MeterFactory.counter(meterName).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).mode(Counter.Mode.INCREMENT).build(); counter.increment(1d);    MeterFactory.counter creates a new counter builder with the meter name. Counter.Builder.tag(String key, String value) marks a tag key/value pair. Counter.Builder.mode(Counter.Mode mode) changes the counter mode. RATE mode means the reporting rate to the backend. Counter.Builder.build() builds a new Counter which is collected and reported to the backend. Counter.increment(double count) increment counts to the Counter. It could be a positive value.   Gauge API represents a single numerical value.  import org.apache.skywalking.apm.agent.core.meter.MeterFactory; ThreadPoolExecutor threadPool = ...; Gauge gauge = MeterFactory.gauge(meterName, () -\u0026gt; threadPool.getActiveCount()).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).build();  MeterFactory.gauge(String name, Supplier\u0026lt;Double\u0026gt; getter) creates a new gauge builder with the meter name and supplier function. This function must return a double value. Gauge.Builder.tag(String key, String value) marks a tag key/value pair. Gauge.Builder.build() builds a new Gauge which is collected and reported to the backend.   Histogram API represents a summary sample observations with customized buckets.  import org.apache.skywalking.apm.agent.core.meter.MeterFactory; Histogram histogram = MeterFactory.histogram(\u0026#34;test\u0026#34;).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).steps(Arrays.asList(1, 5, 10)).minValue(0).build(); histogram.addValue(3);  MeterFactory.histogram(String name) creates a new histogram builder with the meter name. Histogram.Builder.tag(String key, String value) marks a tag key/value pair. Histogram.Builder.steps(List\u0026lt;Double\u0026gt; steps) sets up the max values of every histogram buckets. Histogram.Builder.minValue(double value) sets up the minimal value of this histogram. Default is 0. Histogram.Builder.build() builds a new Histogram which is collected and reported to the backend. Histogram.addValue(double value) adds value into the histogram, and automatically analyzes what bucket count needs to be incremented. Rule: count into [step1, step2).  Plugin Test Tool The Apache SkyWalking Agent Test Tool Suite is an incredibly useful test tool suite that is available in a wide variety of agent languages. It includes the mock collector and validator. The mock collector is a SkyWalking receiver, like the OAP server.\nYou could learn how to use this tool to test the plugin in this doc. This is a must if you want to contribute plugins to the SkyWalking official repo.\nContribute plugins to the Apache SkyWalking repository We welcome everyone to contribute their plugins.\nPlease follow these steps:\n Submit an issue for your plugin, including any supported versions. Create sub modules under apm-sniffer/apm-sdk-plugin or apm-sniffer/optional-plugins, and the name should include supported library name and versions. Follow this guide to develop. Make sure comments and test cases are provided. Develop and test. Provide the automatic test cases. Learn how to write the plugin test case from this doc Send a pull request and ask for review. The plugin committers will approve your plugins, plugin CI-with-IT, e2e, and the plugin tests will be passed. The plugin is accepted by SkyWalking.  ","title":"Plugin Development Guide","url":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/java-plugin-development-guide/"},{"content":"Plugin Development Guide You can always take the existing plugins as examples, while there are some general ideas for all plugins.\n  A plugin is a module under the directory skywalking/plugins with an install method;\n  Inside the install method, you find out the relevant method(s) of the libraries that you plan to instrument, and create/close spans before/after those method(s).\n  You should also provide version rules in the plugin module, which means the version of package your plugin aim to test.\nAll below variables will be used by the tools/plugin_doc_gen.py to produce a latest Plugin Doc.\nlink_vector = [\u0026#39;https://www.python-httpx.org/\u0026#39;] # This should link to the official website/doc of this lib # The support matrix is for scenarios where some libraries don\u0026#39;t work for certain Python versions # Therefore, we use the matrix to instruct the CI testing pipeline to skip over plugin test for such Python version # The right side versions, should almost always use A.B.* to test the latest minor version of two recent major versions.  support_matrix = { \u0026#39;httpx\u0026#39;: { \u0026#39;\u0026gt;=3.7\u0026#39;: [\u0026#39;0.23.*\u0026#39;, \u0026#39;0.22.*\u0026#39;] } } # The note will be used when generating the plugin documentation for users. note = \u0026#34;\u0026#34;\u0026#34;\u0026#34;\u0026#34;\u0026#34;   Every plugin requires a corresponding test under tests/plugin before it can be merged, refer to the Plugin Test Guide when writing a plugin test.\n  Add the corresponding configuration options added/modified by the new plugin to the config.py and add new comments for each, then regenerate the configuration.md by make doc-gen.\n  Steps after coding If your PR introduces the need for a new non-standard library which needs to be pulled via pip or if it removes the need for a previously-used library:\n Run poetry add library --group plugins to pin the dependency to the plugins group, Do not add it to the main dependency! Run make doc-gen to generate a test matrix documentation for the plugin.  ","title":"Plugin Development Guide","url":"/docs/skywalking-python/latest/en/contribution/how-to-develop-plugin/"},{"content":"Plugin Development Guide You can always take the existing plugins as examples, while there are some general ideas for all plugins.\n  A plugin is a module under the directory skywalking/plugins with an install method;\n  Inside the install method, you find out the relevant method(s) of the libraries that you plan to instrument, and create/close spans before/after those method(s).\n  You should also provide version rules in the plugin module, which means the version of package your plugin aim to test.\nAll below variables will be used by the tools/plugin_doc_gen.py to produce a latest Plugin Doc.\nlink_vector = [\u0026#39;https://www.python-httpx.org/\u0026#39;] # This should link to the official website/doc of this lib # The support matrix is for scenarios where some libraries don\u0026#39;t work for certain Python versions # Therefore, we use the matrix to instruct the CI testing pipeline to skip over plugin test for such Python version # The right side versions, should almost always use A.B.* to test the latest minor version of two recent major versions.  support_matrix = { \u0026#39;httpx\u0026#39;: { \u0026#39;\u0026gt;=3.7\u0026#39;: [\u0026#39;0.23.*\u0026#39;, \u0026#39;0.22.*\u0026#39;] } } # The note will be used when generating the plugin documentation for users. note = \u0026#34;\u0026#34;\u0026#34;\u0026#34;\u0026#34;\u0026#34;   Every plugin requires a corresponding test under tests/plugin before it can be merged, refer to the Plugin Test Guide when writing a plugin test.\n  Add the corresponding configuration options added/modified by the new plugin to the config.py and add new comments for each, then regenerate the configuration.md by make doc-gen.\n  Steps after coding If your PR introduces the need for a new non-standard library which needs to be pulled via pip or if it removes the need for a previously-used library:\n Run poetry add library --group plugins to pin the dependency to the plugins group, Do not add it to the main dependency! Run make doc-gen to generate a test matrix documentation for the plugin.  ","title":"Plugin Development Guide","url":"/docs/skywalking-python/next/en/contribution/how-to-develop-plugin/"},{"content":"Plugin Development Guide You can always take the existing plugins as examples, while there are some general ideas for all plugins.\n  A plugin is a module under the directory skywalking/plugins with an install method;\n  Inside the install method, you find out the relevant method(s) of the libraries that you plan to instrument, and create/close spans before/after those method(s).\n  You should also provide version rules in the plugin module, which means the version of package your plugin aim to test.\nAll below variables will be used by the tools/plugin_doc_gen.py to produce a latest Plugin Doc.\nlink_vector = [\u0026#39;https://www.python-httpx.org/\u0026#39;] # This should link to the official website/doc of this lib # The support matrix is for scenarios where some libraries don\u0026#39;t work for certain Python versions # Therefore, we use the matrix to instruct the CI testing pipeline to skip over plugin test for such Python version # The right side versions, should almost always use A.B.* to test the latest minor version of two recent major versions.  support_matrix = { \u0026#39;httpx\u0026#39;: { \u0026#39;\u0026gt;=3.7\u0026#39;: [\u0026#39;0.23.*\u0026#39;, \u0026#39;0.22.*\u0026#39;] } } # The note will be used when generating the plugin documentation for users. note = \u0026#34;\u0026#34;\u0026#34;\u0026#34;\u0026#34;\u0026#34;   Every plugin requires a corresponding test under tests/plugin before it can be merged, refer to the Plugin Test Guide when writing a plugin test.\n  Add the corresponding configuration options added/modified by the new plugin to the config.py and add new comments for each, then regenerate the configuration.md by make doc-gen.\n  Steps after coding If your PR introduces the need for a new non-standard library which needs to be pulled via pip or if it removes the need for a previously-used library:\n Run poetry add library --group plugins to pin the dependency to the plugins group, Do not add it to the main dependency! Run make doc-gen to generate a test matrix documentation for the plugin.  ","title":"Plugin Development Guide","url":"/docs/skywalking-python/v1.0.1/en/contribution/how-to-develop-plugin/"},{"content":"Plugin Exclusion The plugin exclusion is used during the compilation phase to exclude specific plugins, through their names. Consequently, the codes of these excluded plugins will not be weaved in, then, no relative tracing and metrics.\nConfiguration plugin:# List the names of excluded plugins, multiple plugin names should be splitted by \u0026#34;,\u0026#34;# NOTE: This parameter only takes effect during the compilation phase.excluded:${SW_AGENT_PLUGIN_EXCLUDES:}This configuration option is also located in the existing configuration files and supports configuration based on environment variables. However, this environment variable only takes effect during the compilation phase.\nThe plugins name please refer to the Support Plugins Documentation.\n","title":"Plugin Exclusion","url":"/docs/skywalking-go/latest/en/advanced-features/plugin-exclusion/"},{"content":"Plugin Exclusion The plugin exclusion is used during the compilation phase to exclude specific plugins, through their names. Consequently, the codes of these excluded plugins will not be weaved in, then, no relative tracing and metrics.\nConfiguration plugin:# List the names of excluded plugins, multiple plugin names should be splitted by \u0026#34;,\u0026#34;# NOTE: This parameter only takes effect during the compilation phase.excluded:${SW_AGENT_PLUGIN_EXCLUDES:}This configuration option is also located in the existing configuration files and supports configuration based on environment variables. However, this environment variable only takes effect during the compilation phase.\nThe plugins name please refer to the Support Plugins Documentation.\n","title":"Plugin Exclusion","url":"/docs/skywalking-go/next/en/advanced-features/plugin-exclusion/"},{"content":"Plugin Exclusion The plugin exclusion is used during the compilation phase to exclude specific plugins, through their names. Consequently, the codes of these excluded plugins will not be weaved in, then, no relative tracing and metrics.\nConfiguration plugin:# List the names of excluded plugins, multiple plugin names should be splitted by \u0026#34;,\u0026#34;# NOTE: This parameter only takes effect during the compilation phase.excluded:${SW_AGENT_PLUGIN_EXCLUDES:}This configuration option is also located in the existing configuration files and supports configuration based on environment variables. However, this environment variable only takes effect during the compilation phase.\nThe plugins name please refer to the Support Plugins Documentation.\n","title":"Plugin Exclusion","url":"/docs/skywalking-go/v0.4.0/en/advanced-features/plugin-exclusion/"},{"content":"Plugin List  Client  GRPC Client Kafka Client   Fallbacker  None Fallbacker Timer Fallbacker   Fetcher Filter Forwarder  Envoy ALS v2 GRPC Forwarder Envoy ALS v3 GRPC Forwarder Envoy Metrics v2 GRPC Forwarder Envoy Metrics v3 GRPC Forwarder Native CDS GRPC Forwarder Native EBPF Profiling GRPC Forwarder Native Event GRPC Forwarder Native JVM GRPC Forwarder Native CLR GRPC Forwarder Native Log GRPC Forwarder Native Log Kafka Forwarder Native Management GRPC Forwarder Native Meter GRPC Forwarder Native Process GRPC Forwarder Native Profile GRPC Forwarder Native Tracing GRPC Forwarder OpenTelemetry Metrics v1 GRPC Forwarder   Parser Queue  Memory Queue Mmap Queue None Queue   Receiver  GRPC Envoy ALS v2 Receiver GRPC Envoy ALS v3 Receiver GRPC Envoy Metrics v2 Receiver GRPC Envoy Metrics v3 Receiver GRPC Native CDS Receiver GRPC Native EBFP Profiling Receiver GRPC Native Event Receiver GRPC Native JVM Receiver GRPC Native CLR Receiver GRPC Native Log Receiver GRPC Native Management Receiver GRPC Native Meter Receiver GRPC Native Process Receiver GRPC Native Profile Receiver GRPC Native Tracing Receiver GRPC OpenTelemetry Metrics v1 Receiver HTTP Native Log Receiver   Server  GRPC Server HTTP Server    ","title":"Plugin List","url":"/docs/skywalking-satellite/latest/en/setup/plugins/plugin-list/"},{"content":"Plugin List  Client  GRPC Client Kafka Client   Fallbacker  None Fallbacker Timer Fallbacker   Fetcher Filter Forwarder  Envoy ALS v2 GRPC Forwarder Envoy ALS v3 GRPC Forwarder Envoy Metrics v2 GRPC Forwarder Envoy Metrics v3 GRPC Forwarder Native CDS GRPC Forwarder Native CLR GRPC Forwarder GRPC Native EBFP Access Log Forwarder Native EBPF Profiling GRPC Forwarder Native Event GRPC Forwarder Native JVM GRPC Forwarder Native Log GRPC Forwarder Native Log Kafka Forwarder Native Management GRPC Forwarder Native Meter GRPC Forwarder Native Process GRPC Forwarder Native Profile GRPC Forwarder Native Tracing GRPC Forwarder OpenTelemetry Metrics v1 GRPC Forwarder   Parser Queue  Memory Queue Mmap Queue None Queue   Receiver  GRPC Envoy ALS v2 Receiver GRPC Envoy ALS v3 Receiver GRPC Envoy Metrics v2 Receiver GRPC Envoy Metrics v3 Receiver GRPC Native CDS Receiver GRPC Native CLR Receiver GRPC Native EBFP Accesslog Receiver GRPC Native EBFP Profiling Receiver GRPC Native Event Receiver GRPC Native JVM Receiver GRPC Native Log Receiver GRPC Native Management Receiver GRPC Native Meter Receiver GRPC Native Process Receiver GRPC Native Profile Receiver GRPC Native Tracing Receiver GRPC OpenTelemetry Metrics v1 Receiver HTTP Native Log Receiver   Server  GRPC Server HTTP Server    ","title":"Plugin List","url":"/docs/skywalking-satellite/next/en/setup/plugins/plugin-list/"},{"content":"Plugin List  Client  GRPC Client Kafka Client   Fallbacker  None Fallbacker Timer Fallbacker   Fetcher Filter Forwarder  Envoy ALS v2 GRPC Forwarder Envoy ALS v3 GRPC Forwarder Envoy Metrics v2 GRPC Forwarder Envoy Metrics v3 GRPC Forwarder Native CDS GRPC Forwarder Native EBPF Profiling GRPC Forwarder Native Event GRPC Forwarder Native JVM GRPC Forwarder Native CLR GRPC Forwarder Native Log GRPC Forwarder Native Log Kafka Forwarder Native Management GRPC Forwarder Native Meter GRPC Forwarder Native Process GRPC Forwarder Native Profile GRPC Forwarder Native Tracing GRPC Forwarder OpenTelemetry Metrics v1 GRPC Forwarder   Parser Queue  Memory Queue Mmap Queue None Queue   Receiver  GRPC Envoy ALS v2 Receiver GRPC Envoy ALS v3 Receiver GRPC Envoy Metrics v2 Receiver GRPC Envoy Metrics v3 Receiver GRPC Native CDS Receiver GRPC Native EBFP Profiling Receiver GRPC Native Event Receiver GRPC Native JVM Receiver GRPC Native CLR Receiver GRPC Native Log Receiver GRPC Native Management Receiver GRPC Native Meter Receiver GRPC Native Process Receiver GRPC Native Profile Receiver GRPC Native Tracing Receiver GRPC OpenTelemetry Metrics v1 Receiver HTTP Native Log Receiver   Server  GRPC Server HTTP Server    ","title":"Plugin List","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/plugin-list/"},{"content":"plugin structure Plugin is a common concept for Satellite, which is in all extension plugins.\nRegistration mechanism The Plugin registration mechanism in Satellite is similar to the SPI registration mechanism of Java. Plugin registration mechanism supports to register an interface and its implementation, that means different interfaces have different registration spaces. We can easily find the type of a specific plugin according to the interface and the plugin name and initialize it according to the type.\nstructure:\n code: map[reflect.Type]map[string]reflect.Value meaning: map[interface type]map[plugin name] plugin type  Initialization mechanism Users can easily find a plugin type and initialize an empty plugin instance according to the previous registration mechanism. For setting up the configuration of the extension convenience, we define the initialization mechanism in Plugin structure.\nIn the initialization mechanism, the plugin category(interface) and the init config is required.\nInitialize processing is like the following.\n Find the plugin name in the input config according to the fixed key plugin_name. Find plugin type according to the plugin category(interface) and the plugin name. Create an empty plugin. Initialize the plugin according to the merged config, which is created by the input config and the default config.  Plugin usage in Satellite Nowadays, the numbers of the Plugin categories is 2. One is the sharing Plugin, and another is the other normal Plugin.\n Extension Plugins:  sharing plugins  Server Plugin Client Plugin   normal plugins  Receiver Plugin Fetcher Plugin Parser Plugin Queue Plugin Filter Plugin Fallbacker Plugin Forwarder Plugin      ","title":"plugin structure","url":"/docs/skywalking-satellite/latest/en/concepts-and-designs/plugin_mechanism/"},{"content":"plugin structure Plugin is a common concept for Satellite, which is in all extension plugins.\nRegistration mechanism The Plugin registration mechanism in Satellite is similar to the SPI registration mechanism of Java. Plugin registration mechanism supports to register an interface and its implementation, that means different interfaces have different registration spaces. We can easily find the type of a specific plugin according to the interface and the plugin name and initialize it according to the type.\nstructure:\n code: map[reflect.Type]map[string]reflect.Value meaning: map[interface type]map[plugin name] plugin type  Initialization mechanism Users can easily find a plugin type and initialize an empty plugin instance according to the previous registration mechanism. For setting up the configuration of the extension convenience, we define the initialization mechanism in Plugin structure.\nIn the initialization mechanism, the plugin category(interface) and the init config is required.\nInitialize processing is like the following.\n Find the plugin name in the input config according to the fixed key plugin_name. Find plugin type according to the plugin category(interface) and the plugin name. Create an empty plugin. Initialize the plugin according to the merged config, which is created by the input config and the default config.  Plugin usage in Satellite Nowadays, the numbers of the Plugin categories is 2. One is the sharing Plugin, and another is the other normal Plugin.\n Extension Plugins:  sharing plugins  Server Plugin Client Plugin   normal plugins  Receiver Plugin Fetcher Plugin Parser Plugin Queue Plugin Filter Plugin Fallbacker Plugin Forwarder Plugin      ","title":"plugin structure","url":"/docs/skywalking-satellite/next/en/concepts-and-designs/plugin_mechanism/"},{"content":"plugin structure Plugin is a common concept for Satellite, which is in all extension plugins.\nRegistration mechanism The Plugin registration mechanism in Satellite is similar to the SPI registration mechanism of Java. Plugin registration mechanism supports to register an interface and its implementation, that means different interfaces have different registration spaces. We can easily find the type of a specific plugin according to the interface and the plugin name and initialize it according to the type.\nstructure:\n code: map[reflect.Type]map[string]reflect.Value meaning: map[interface type]map[plugin name] plugin type  Initialization mechanism Users can easily find a plugin type and initialize an empty plugin instance according to the previous registration mechanism. For setting up the configuration of the extension convenience, we define the initialization mechanism in Plugin structure.\nIn the initialization mechanism, the plugin category(interface) and the init config is required.\nInitialize processing is like the following.\n Find the plugin name in the input config according to the fixed key plugin_name. Find plugin type according to the plugin category(interface) and the plugin name. Create an empty plugin. Initialize the plugin according to the merged config, which is created by the input config and the default config.  Plugin usage in Satellite Nowadays, the numbers of the Plugin categories is 2. One is the sharing Plugin, and another is the other normal Plugin.\n Extension Plugins:  sharing plugins  Server Plugin Client Plugin   normal plugins  Receiver Plugin Fetcher Plugin Parser Plugin Queue Plugin Filter Plugin Fallbacker Plugin Forwarder Plugin      ","title":"plugin structure","url":"/docs/skywalking-satellite/v1.2.0/en/concepts-and-designs/plugin_mechanism/"},{"content":"Plugin Test Plugin tests are required and should pass before a new plugin is able to merge into the master branch. Specify a support matrix in each plugin in the skywalking/plugins folder, along with their website links, the matrix and links will be used for plugin support table documentation generation for this doc Plugins.md.\nUse make doc-gen to generate a table and paste into Plugins.md after all test passes.\nSkyWalking Agent Test Tool (Mock Collector) SkyWalking Agent Test Tool respects the same protocol as the SkyWalking backend, and thus receives the report data from the agent side, besides, it also exposes some HTTP endpoints for verification.\nTested Service A tested service is a service involving the plugin that is to be tested, and exposes some endpoints to trigger the instrumented code and report log/trace/meter data to the mock collector.\nDocker Compose docker-compose is used to orchestrate the mock collector and the tested service(s), the docker-compose.yml should be able to run with docker-compose -f docker-compose.yml up in standalone mode, which can be used in debugging too.\nExpected Data The expected.data.yml file contains the expected segment/log/meter data after we have triggered the instrumentation and report to mock collector.\nOnce the mock collector receives data, we post the expected data to the mock collector and verify whether they match.\nThis can be done through the /dataValidate of the mock collector, say http://collector:12800/dataValidate, for example.\nExample If we want to test the plugin for the built-in library http, we will:\n Build a tested service, which sets up an HTTP server by http library, and exposes an HTTP endpoint to be triggered in the test codes, say /trigger, take this provider service as example. Compose a docker-compose.yml file, orchestrating the service built in step 1 and the mock collector, take this docker-compose.yml as an example. Write test codes to trigger the endpoint in step 1, and send the expected data file to the mock collector to verify, take this test as example.  ","title":"Plugin Test","url":"/docs/skywalking-python/latest/en/contribution/how-to-test-plugin/"},{"content":"Plugin Test Plugin tests are required and should pass before a new plugin is able to merge into the master branch. Specify a support matrix in each plugin in the skywalking/plugins folder, along with their website links, the matrix and links will be used for plugin support table documentation generation for this doc Plugins.md.\nUse make doc-gen to generate a table and paste into Plugins.md after all test passes.\nSkyWalking Agent Test Tool (Mock Collector) SkyWalking Agent Test Tool respects the same protocol as the SkyWalking backend, and thus receives the report data from the agent side, besides, it also exposes some HTTP endpoints for verification.\nTested Service A tested service is a service involving the plugin that is to be tested, and exposes some endpoints to trigger the instrumented code and report log/trace/meter data to the mock collector.\nDocker Compose docker-compose is used to orchestrate the mock collector and the tested service(s), the docker-compose.yml should be able to run with docker-compose -f docker-compose.yml up in standalone mode, which can be used in debugging too.\nExpected Data The expected.data.yml file contains the expected segment/log/meter data after we have triggered the instrumentation and report to mock collector.\nOnce the mock collector receives data, we post the expected data to the mock collector and verify whether they match.\nThis can be done through the /dataValidate of the mock collector, say http://collector:12800/dataValidate, for example.\nExample If we want to test the plugin for the built-in library http, we will:\n Build a tested service, which sets up an HTTP server by http library, and exposes an HTTP endpoint to be triggered in the test codes, say /trigger, take this provider service as example. Compose a docker-compose.yml file, orchestrating the service built in step 1 and the mock collector, take this docker-compose.yml as an example. Write test codes to trigger the endpoint in step 1, and send the expected data file to the mock collector to verify, take this test as example.  ","title":"Plugin Test","url":"/docs/skywalking-python/next/en/contribution/how-to-test-plugin/"},{"content":"Plugin Test Plugin tests are required and should pass before a new plugin is able to merge into the master branch. Specify a support matrix in each plugin in the skywalking/plugins folder, along with their website links, the matrix and links will be used for plugin support table documentation generation for this doc Plugins.md.\nUse make doc-gen to generate a table and paste into Plugins.md after all test passes.\nSkyWalking Agent Test Tool (Mock Collector) SkyWalking Agent Test Tool respects the same protocol as the SkyWalking backend, and thus receives the report data from the agent side, besides, it also exposes some HTTP endpoints for verification.\nTested Service A tested service is a service involving the plugin that is to be tested, and exposes some endpoints to trigger the instrumented code and report log/trace/meter data to the mock collector.\nDocker Compose docker-compose is used to orchestrate the mock collector and the tested service(s), the docker-compose.yml should be able to run with docker-compose -f docker-compose.yml up in standalone mode, which can be used in debugging too.\nExpected Data The expected.data.yml file contains the expected segment/log/meter data after we have triggered the instrumentation and report to mock collector.\nOnce the mock collector receives data, we post the expected data to the mock collector and verify whether they match.\nThis can be done through the /dataValidate of the mock collector, say http://collector:12800/dataValidate, for example.\nExample If we want to test the plugin for the built-in library http, we will:\n Build a tested service, which sets up an HTTP server by http library, and exposes an HTTP endpoint to be triggered in the test codes, say /trigger, take this provider service as example. Compose a docker-compose.yml file, orchestrating the service built in step 1 and the mock collector, take this docker-compose.yml as an example. Write test codes to trigger the endpoint in step 1, and send the expected data file to the mock collector to verify, take this test as example.  ","title":"Plugin Test","url":"/docs/skywalking-python/v1.0.1/en/contribution/how-to-test-plugin/"},{"content":"PostgreSQL PostgreSQL JDBC driver uses version 42.3.2. It supports PostgreSQL 8.2 or newer. Activate PostgreSQL as storage, and set storage provider to postgresql.\nstorage:selector:${SW_STORAGE:postgresql}postgresql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:postgresql://localhost:5432/skywalking\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:postgres}dataSource.password:${SW_DATA_SOURCE_PASSWORD:123456}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfArrayColumn:${SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN:20}numOfSearchableValuesPerTag:${SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG:2}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password, are found in application.yml. Only part of the settings is listed here. Please follow HikariCP connection pool document for full settings.\n","title":"PostgreSQL","url":"/docs/main/latest/en/setup/backend/storages/postgresql/"},{"content":"PostgreSQL PostgreSQL JDBC driver uses version 42.3.2. It supports PostgreSQL 8.2 or newer. Activate PostgreSQL as storage, and set storage provider to postgresql.\nstorage:selector:${SW_STORAGE:postgresql}postgresql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:postgresql://localhost:5432/skywalking\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:postgres}dataSource.password:${SW_DATA_SOURCE_PASSWORD:123456}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfArrayColumn:${SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN:20}numOfSearchableValuesPerTag:${SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG:2}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password, are found in application.yml. Only part of the settings is listed here. Please follow HikariCP connection pool document for full settings.\n","title":"PostgreSQL","url":"/docs/main/next/en/setup/backend/storages/postgresql/"},{"content":"PostgreSQL PostgreSQL JDBC driver uses version 42.3.2. It supports PostgreSQL 8.2 or newer. Activate PostgreSQL as storage, and set storage provider to postgresql.\nstorage:selector:${SW_STORAGE:postgresql}postgresql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:postgresql://localhost:5432/skywalking\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:postgres}dataSource.password:${SW_DATA_SOURCE_PASSWORD:123456}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfArrayColumn:${SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN:20}numOfSearchableValuesPerTag:${SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG:2}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password, are found in application.yml. Only part of the settings is listed here. Please follow HikariCP connection pool document for full settings.\n","title":"PostgreSQL","url":"/docs/main/v9.7.0/en/setup/backend/storages/postgresql/"},{"content":"PostgreSQL monitoring PostgreSQL server performance from postgres-exporter SkyWalking leverages postgres-exporter for collecting metrics data from PostgreSQL. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  postgres-exporter collect metrics data from PostgreSQL. OpenTelemetry Collector fetches metrics from postgres-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up postgres-exporter. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  PostgreSQL Monitoring PostgreSQL cluster is cataloged as a Layer: PostgreSQL Service in OAP. Each PostgreSQL server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Shared Buffers MB meter_pg_shared_buffers The number of shared memory buffers used by the server postgres-exporter   Effective Cache GB meter_pg_effective_cache The planner\u0026rsquo;s assumption about the total size of the data caches postgres-exporter   Maintenance Work Mem MB meter_pg_maintenance_work_mem The maximum memory to be used for maintenance operations postgres-exporter   Seq Page Cost  meter_pg_seq_page_cost The planner\u0026rsquo;s estimate of the cost of a sequentially fetched disk page. postgres-exporter   Random Page Cost  meter_pg_random_page_cost The planner\u0026rsquo;s estimate of the cost of a nonsequentially fetched disk page. postgres-exporter   Max Worker Processes  meter_pg_max_worker_processes Maximum number of concurrent worker processes postgres-exporter   Max WAL Size GB meter_max_wal_size The WAL size that triggers a checkpoint postgres-exporter   Max Parallel Workers  meter_pg_max_parallel_workers The maximum number of parallel processes per executor node postgres-exporter   Work Mem MB meter_pg_max_work_mem The maximum memory to be used for query workspaces. postgres-exporter   Fetched Row Trend  meter_pg_fetched_rows_rate The trend of the number of rows fetched by queries in this database. postgres-exporter   Inserted Row Trend  meter_pg_inserted_rows_rate The trend of the number of rows inserted by queries in this database. postgres-exporter   Updated Row Trend  meter_pg_updated_rows_rate The trend of the number of rows updated by queries in this database. postgres-exporter   Deleted Row Trend  meter_pg_deleted_rows_rate The trend of the number of rows deleted by queries in this database. postgres-exporter   Returned Row Trend  meter_pg_returned_rows_rate The trend of the number of rows returned by queries in this database. postgres-exporter   Committed Transactions Trend  meter_pg_committed_transactions_rate The trend of the number of transactions in this database that have been committed postgres-exporter   Rolled Back Transactions Trend  meter_pg_rolled_back_transactions_rate The trend of the number of transactions in this database that have been rolled back postgres-exporter   Buffers Trend  meter_pg_buffers_alloc  meter_pg_buffers_checkpoint meter_pg_buffers_clean meter_pg_buffers_backend_fsync meter_pg_buffers_backend The trend of the number of buffers postgres-exporter   Conflicts Trend  meter_pg_conflicts_rate The trend of the number of queries canceled due to conflicts with recovery in this database postgres-exporter   Deadlock Trend  meter_pg_deadlocks_rate The trend of the number of deadlocks detected in this database postgres-exporter   Cache Hit Rate % meter_pg_cache_hit_rate The rate of cache hit postgres-exporter   Temporary Files Trend  meter_pg_temporary_files_rate The rate of total amount of data written to temporary files by queries in this database. All temporary files are counted, regardless of why the temporary file was created, and regardless of the log_temp_files setting postgres-exporter   Checkpoint Stat Trend  meter_pg_checkpoint_write_time_rate  meter_pg_checkpoint_sync_time_rate  meter_pg_checkpoint_req_rate meter_pg_checkpoint_timed_rate The trend of checkpoint stat postgres-exporter   Active Sessions  meter_pg_active_sessions The number of connections which state is active postgres-exporter   Idle Sessions  meter_pg_idle_sessions The number of connections which state is idle,idle in transaction or idle in transaction (aborted) postgres-exporter   Locks Count  meter_pg_locks_count Number of locks postgres-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/postgresql. The PostgreSQL dashboard panel configurations are found in /config/ui-initialized-templates/postgresql.\nCollect sampled slow SQLs SkyWalking leverages fluentbit or other log agents for collecting slow SQL statements from PostgreSQL.\nData flow  fluentbit agent collects slow sql logs from PostgreSQL. fluentbit agent sends data to SkyWalking OAP Server using native log APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Set up fluentbit. Config fluentbit Config PostgreSQL to enable slow log. Example.  Slow SQL Monitoring Slow SQL monitoring provides monitoring of the slow SQL statements of the PostgreSQL server. PostgreSQL Cluster is cataloged as a Layer: POSTGRESQL Service in OAP. Each PostgreSQL server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Slow Statements ms top_n_database_statement The latency and statement of PostgreSQL slow SQLs fluentbit    Customizations You can customize your own metrics/expression/dashboard panel. The slowsql expression rules are found in /config/lal/pgsql-slowsql.yaml The PostgreSQL dashboard panel configurations are found in /config/ui-initialized-templates/postgresql.\n","title":"PostgreSQL monitoring","url":"/docs/main/latest/en/setup/backend/backend-postgresql-monitoring/"},{"content":"PostgreSQL monitoring PostgreSQL server performance from postgres-exporter SkyWalking leverages postgres-exporter for collecting metrics data from PostgreSQL. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  postgres-exporter collect metrics data from PostgreSQL. OpenTelemetry Collector fetches metrics from postgres-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up postgres-exporter. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  PostgreSQL Monitoring PostgreSQL cluster is cataloged as a Layer: PostgreSQL Service in OAP. Each PostgreSQL server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Shared Buffers MB meter_pg_shared_buffers The number of shared memory buffers used by the server postgres-exporter   Effective Cache GB meter_pg_effective_cache The planner\u0026rsquo;s assumption about the total size of the data caches postgres-exporter   Maintenance Work Mem MB meter_pg_maintenance_work_mem The maximum memory to be used for maintenance operations postgres-exporter   Seq Page Cost  meter_pg_seq_page_cost The planner\u0026rsquo;s estimate of the cost of a sequentially fetched disk page. postgres-exporter   Random Page Cost  meter_pg_random_page_cost The planner\u0026rsquo;s estimate of the cost of a nonsequentially fetched disk page. postgres-exporter   Max Worker Processes  meter_pg_max_worker_processes Maximum number of concurrent worker processes postgres-exporter   Max WAL Size GB meter_max_wal_size The WAL size that triggers a checkpoint postgres-exporter   Max Parallel Workers  meter_pg_max_parallel_workers The maximum number of parallel processes per executor node postgres-exporter   Work Mem MB meter_pg_max_work_mem The maximum memory to be used for query workspaces. postgres-exporter   Fetched Row Trend  meter_pg_fetched_rows_rate The trend of the number of rows fetched by queries in this database. postgres-exporter   Inserted Row Trend  meter_pg_inserted_rows_rate The trend of the number of rows inserted by queries in this database. postgres-exporter   Updated Row Trend  meter_pg_updated_rows_rate The trend of the number of rows updated by queries in this database. postgres-exporter   Deleted Row Trend  meter_pg_deleted_rows_rate The trend of the number of rows deleted by queries in this database. postgres-exporter   Returned Row Trend  meter_pg_returned_rows_rate The trend of the number of rows returned by queries in this database. postgres-exporter   Committed Transactions Trend  meter_pg_committed_transactions_rate The trend of the number of transactions in this database that have been committed postgres-exporter   Rolled Back Transactions Trend  meter_pg_rolled_back_transactions_rate The trend of the number of transactions in this database that have been rolled back postgres-exporter   Buffers Trend  meter_pg_buffers_alloc  meter_pg_buffers_checkpoint meter_pg_buffers_clean meter_pg_buffers_backend_fsync meter_pg_buffers_backend The trend of the number of buffers postgres-exporter   Conflicts Trend  meter_pg_conflicts_rate The trend of the number of queries canceled due to conflicts with recovery in this database postgres-exporter   Deadlock Trend  meter_pg_deadlocks_rate The trend of the number of deadlocks detected in this database postgres-exporter   Cache Hit Rate % meter_pg_cache_hit_rate The rate of cache hit postgres-exporter   Temporary Files Trend  meter_pg_temporary_files_rate The rate of total amount of data written to temporary files by queries in this database. All temporary files are counted, regardless of why the temporary file was created, and regardless of the log_temp_files setting postgres-exporter   Checkpoint Stat Trend  meter_pg_checkpoint_write_time_rate  meter_pg_checkpoint_sync_time_rate  meter_pg_checkpoint_req_rate meter_pg_checkpoint_timed_rate The trend of checkpoint stat postgres-exporter   Active Sessions  meter_pg_active_sessions The number of connections which state is active postgres-exporter   Idle Sessions  meter_pg_idle_sessions The number of connections which state is idle,idle in transaction or idle in transaction (aborted) postgres-exporter   Locks Count  meter_pg_locks_count Number of locks postgres-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/postgresql. The PostgreSQL dashboard panel configurations are found in /config/ui-initialized-templates/postgresql.\nCollect sampled slow SQLs SkyWalking leverages fluentbit or other log agents for collecting slow SQL statements from PostgreSQL.\nData flow  fluentbit agent collects slow sql logs from PostgreSQL. fluentbit agent sends data to SkyWalking OAP Server using native log APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Set up fluentbit. Config fluentbit Config PostgreSQL to enable slow log. Example.  Slow SQL Monitoring Slow SQL monitoring provides monitoring of the slow SQL statements of the PostgreSQL server. PostgreSQL Cluster is cataloged as a Layer: POSTGRESQL Service in OAP. Each PostgreSQL server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Slow Statements ms top_n_database_statement The latency and statement of PostgreSQL slow SQLs fluentbit    Customizations You can customize your own metrics/expression/dashboard panel. The slowsql expression rules are found in /config/lal/pgsql-slowsql.yaml The PostgreSQL dashboard panel configurations are found in /config/ui-initialized-templates/postgresql.\n","title":"PostgreSQL monitoring","url":"/docs/main/next/en/setup/backend/backend-postgresql-monitoring/"},{"content":"PostgreSQL monitoring SkyWalking leverages postgres-exporter for collecting metrics data from PostgreSQL. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  postgres-exporter collect metrics data from PostgreSQL. OpenTelemetry Collector fetches metrics from postgres-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via the OpenCensus gRPC Exporter or OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up postgres-exporter. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  PostgreSQL Monitoring PostgreSQL monitoring provides monitoring of the status and resources of the PostgreSQL server.PostgreSQL server as a Service in OAP, and land on the Layer: POSTGRESQL.\nPostgreSQL Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Shared Buffers MB meter_pg_shared_buffers The number of shared memory buffers used by the server postgres-exporter   Effective Cache GB meter_pg_effective_cache The planner\u0026rsquo;s assumption about the total size of the data caches postgres-exporter   Maintenance Work Mem MB meter_pg_maintenance_work_mem The maximum memory to be used for maintenance operations postgres-exporter   Seq Page Cost  meter_pg_seq_page_cost The planner\u0026rsquo;s estimate of the cost of a sequentially fetched disk page. postgres-exporter   Random Page Cost  meter_pg_random_page_cost The planner\u0026rsquo;s estimate of the cost of a nonsequentially fetched disk page. postgres-exporter   Max Worker Processes  meter_pg_max_worker_processes Maximum number of concurrent worker processes postgres-exporter   Max WAL Size GB meter_max_wal_size The WAL size that triggers a checkpoint postgres-exporter   Max Parallel Workers  meter_pg_max_parallel_workers The maximum number of parallel processes per executor node postgres-exporter   Work Mem MB meter_pg_max_work_mem The maximum memory to be used for query workspaces. postgres-exporter   Fetched Row Trend  meter_pg_fetched_rows_rate The trend of the number of rows fetched by queries in this database. postgres-exporter   Inserted Row Trend  meter_pg_inserted_rows_rate The trend of the number of rows inserted by queries in this database. postgres-exporter   Updated Row Trend  meter_pg_updated_rows_rate The trend of the number of rows updated by queries in this database. postgres-exporter   Deleted Row Trend  meter_pg_deleted_rows_rate The trend of the number of rows deleted by queries in this database. postgres-exporter   Returned Row Trend  meter_pg_returned_rows_rate The trend of the number of rows returned by queries in this database. postgres-exporter   Committed Transactions Trend  meter_pg_committed_transactions_rate The trend of the number of transactions in this database that have been committed postgres-exporter   Rolled Back Transactions Trend  meter_pg_rolled_back_transactions_rate The trend of the number of transactions in this database that have been rolled back postgres-exporter   Buffers Trend  meter_pg_buffers_alloc  meter_pg_buffers_checkpoint meter_pg_buffers_clean meter_pg_buffers_backend_fsync meter_pg_buffers_backend The trend of the number of buffers postgres-exporter   Conflicts Trend  meter_pg_conflicts_rate The trend of the number of queries canceled due to conflicts with recovery in this database postgres-exporter   Deadlock Trend  meter_pg_deadlocks_rate The trend of the number of deadlocks detected in this database postgres-exporter   Cache Hit Rate % meter_pg_cache_hit_rate The rate of cache hit postgres-exporter   Temporary Files Trend  meter_pg_temporary_files_rate The rate of total amount of data written to temporary files by queries in this database. All temporary files are counted, regardless of why the temporary file was created, and regardless of the log_temp_files setting postgres-exporter   Checkpoint Stat Trend  meter_pg_checkpoint_write_time_rate  meter_pg_checkpoint_sync_time_rate  meter_pg_checkpoint_req_rate meter_pg_checkpoint_timed_rate The trend of checkpoint stat postgres-exporter   Active Sessions  meter_pg_active_sessions The number of connections which state is active postgres-exporter   Idle Sessions  meter_pg_idle_sessions The number of connections which state is idle,idle in transaction or idle in transaction (aborted) postgres-exporter   Locks Count  meter_pg_locks_count Number of locks postgres-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/postgresql.yaml. The PostgreSQL dashboard panel configurations are found in /config/ui-initialized-templates/postgresql.\n","title":"PostgreSQL monitoring","url":"/docs/main/v9.2.0/en/setup/backend/backend-postgresql-monitoring/"},{"content":"PostgreSQL monitoring PostgreSQL server performance from postgres-exporter SkyWalking leverages postgres-exporter for collecting metrics data from PostgreSQL. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  postgres-exporter collect metrics data from PostgreSQL. OpenTelemetry Collector fetches metrics from postgres-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via the OpenCensus gRPC Exporter or OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up postgres-exporter. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  PostgreSQL Monitoring PostgreSQL monitoring provides monitoring of the status and resources of the PostgreSQL server.PostgreSQL server as a Service in OAP, and land on the Layer: POSTGRESQL.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Shared Buffers MB meter_pg_shared_buffers The number of shared memory buffers used by the server postgres-exporter   Effective Cache GB meter_pg_effective_cache The planner\u0026rsquo;s assumption about the total size of the data caches postgres-exporter   Maintenance Work Mem MB meter_pg_maintenance_work_mem The maximum memory to be used for maintenance operations postgres-exporter   Seq Page Cost  meter_pg_seq_page_cost The planner\u0026rsquo;s estimate of the cost of a sequentially fetched disk page. postgres-exporter   Random Page Cost  meter_pg_random_page_cost The planner\u0026rsquo;s estimate of the cost of a nonsequentially fetched disk page. postgres-exporter   Max Worker Processes  meter_pg_max_worker_processes Maximum number of concurrent worker processes postgres-exporter   Max WAL Size GB meter_max_wal_size The WAL size that triggers a checkpoint postgres-exporter   Max Parallel Workers  meter_pg_max_parallel_workers The maximum number of parallel processes per executor node postgres-exporter   Work Mem MB meter_pg_max_work_mem The maximum memory to be used for query workspaces. postgres-exporter   Fetched Row Trend  meter_pg_fetched_rows_rate The trend of the number of rows fetched by queries in this database. postgres-exporter   Inserted Row Trend  meter_pg_inserted_rows_rate The trend of the number of rows inserted by queries in this database. postgres-exporter   Updated Row Trend  meter_pg_updated_rows_rate The trend of the number of rows updated by queries in this database. postgres-exporter   Deleted Row Trend  meter_pg_deleted_rows_rate The trend of the number of rows deleted by queries in this database. postgres-exporter   Returned Row Trend  meter_pg_returned_rows_rate The trend of the number of rows returned by queries in this database. postgres-exporter   Committed Transactions Trend  meter_pg_committed_transactions_rate The trend of the number of transactions in this database that have been committed postgres-exporter   Rolled Back Transactions Trend  meter_pg_rolled_back_transactions_rate The trend of the number of transactions in this database that have been rolled back postgres-exporter   Buffers Trend  meter_pg_buffers_alloc  meter_pg_buffers_checkpoint meter_pg_buffers_clean meter_pg_buffers_backend_fsync meter_pg_buffers_backend The trend of the number of buffers postgres-exporter   Conflicts Trend  meter_pg_conflicts_rate The trend of the number of queries canceled due to conflicts with recovery in this database postgres-exporter   Deadlock Trend  meter_pg_deadlocks_rate The trend of the number of deadlocks detected in this database postgres-exporter   Cache Hit Rate % meter_pg_cache_hit_rate The rate of cache hit postgres-exporter   Temporary Files Trend  meter_pg_temporary_files_rate The rate of total amount of data written to temporary files by queries in this database. All temporary files are counted, regardless of why the temporary file was created, and regardless of the log_temp_files setting postgres-exporter   Checkpoint Stat Trend  meter_pg_checkpoint_write_time_rate  meter_pg_checkpoint_sync_time_rate  meter_pg_checkpoint_req_rate meter_pg_checkpoint_timed_rate The trend of checkpoint stat postgres-exporter   Active Sessions  meter_pg_active_sessions The number of connections which state is active postgres-exporter   Idle Sessions  meter_pg_idle_sessions The number of connections which state is idle,idle in transaction or idle in transaction (aborted) postgres-exporter   Locks Count  meter_pg_locks_count Number of locks postgres-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/postgresql.yaml. The PostgreSQL dashboard panel configurations are found in /config/ui-initialized-templates/postgresql.\nCollect sampled slow SQLs SkyWalking leverages fluentbit or other log agents for collecting slow SQL statements from PostgreSQL.\nData flow  fluentbit agent collects slow sql logs from PostgreSQL. fluentbit agent sends data to SkyWalking OAP Server using native log APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Set up fluentbit. Config fluentbit Config PostgreSQL to enable slow log. Example.  Slow SQL Monitoring Slow SQL monitoring provides monitoring of the slow SQL statements of the PostgreSQL server. PostgreSQL Cluster is cataloged as a Layer: POSTGRESQL Service in OAP. Each PostgreSQL server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Slow Statements ms top_n_database_statement The latency and statement of PostgreSQL slow SQLs fluentbit    Customizations You can customize your own metrics/expression/dashboard panel. The slowsql expression rules are found in /config/lal/pgsql-slowsql.yaml The PostgreSQL dashboard panel configurations are found in /config/ui-initialized-templates/postgresql.\n","title":"PostgreSQL monitoring","url":"/docs/main/v9.3.0/en/setup/backend/backend-postgresql-monitoring/"},{"content":"PostgreSQL monitoring PostgreSQL server performance from postgres-exporter SkyWalking leverages postgres-exporter for collecting metrics data from PostgreSQL. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  postgres-exporter collect metrics data from PostgreSQL. OpenTelemetry Collector fetches metrics from postgres-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via the OpenCensus gRPC Exporter or OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up postgres-exporter. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  PostgreSQL Monitoring PostgreSQL monitoring provides monitoring of the status and resources of the PostgreSQL server.PostgreSQL server as a Service in OAP, and land on the Layer: POSTGRESQL.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Shared Buffers MB meter_pg_shared_buffers The number of shared memory buffers used by the server postgres-exporter   Effective Cache GB meter_pg_effective_cache The planner\u0026rsquo;s assumption about the total size of the data caches postgres-exporter   Maintenance Work Mem MB meter_pg_maintenance_work_mem The maximum memory to be used for maintenance operations postgres-exporter   Seq Page Cost  meter_pg_seq_page_cost The planner\u0026rsquo;s estimate of the cost of a sequentially fetched disk page. postgres-exporter   Random Page Cost  meter_pg_random_page_cost The planner\u0026rsquo;s estimate of the cost of a nonsequentially fetched disk page. postgres-exporter   Max Worker Processes  meter_pg_max_worker_processes Maximum number of concurrent worker processes postgres-exporter   Max WAL Size GB meter_max_wal_size The WAL size that triggers a checkpoint postgres-exporter   Max Parallel Workers  meter_pg_max_parallel_workers The maximum number of parallel processes per executor node postgres-exporter   Work Mem MB meter_pg_max_work_mem The maximum memory to be used for query workspaces. postgres-exporter   Fetched Row Trend  meter_pg_fetched_rows_rate The trend of the number of rows fetched by queries in this database. postgres-exporter   Inserted Row Trend  meter_pg_inserted_rows_rate The trend of the number of rows inserted by queries in this database. postgres-exporter   Updated Row Trend  meter_pg_updated_rows_rate The trend of the number of rows updated by queries in this database. postgres-exporter   Deleted Row Trend  meter_pg_deleted_rows_rate The trend of the number of rows deleted by queries in this database. postgres-exporter   Returned Row Trend  meter_pg_returned_rows_rate The trend of the number of rows returned by queries in this database. postgres-exporter   Committed Transactions Trend  meter_pg_committed_transactions_rate The trend of the number of transactions in this database that have been committed postgres-exporter   Rolled Back Transactions Trend  meter_pg_rolled_back_transactions_rate The trend of the number of transactions in this database that have been rolled back postgres-exporter   Buffers Trend  meter_pg_buffers_alloc  meter_pg_buffers_checkpoint meter_pg_buffers_clean meter_pg_buffers_backend_fsync meter_pg_buffers_backend The trend of the number of buffers postgres-exporter   Conflicts Trend  meter_pg_conflicts_rate The trend of the number of queries canceled due to conflicts with recovery in this database postgres-exporter   Deadlock Trend  meter_pg_deadlocks_rate The trend of the number of deadlocks detected in this database postgres-exporter   Cache Hit Rate % meter_pg_cache_hit_rate The rate of cache hit postgres-exporter   Temporary Files Trend  meter_pg_temporary_files_rate The rate of total amount of data written to temporary files by queries in this database. All temporary files are counted, regardless of why the temporary file was created, and regardless of the log_temp_files setting postgres-exporter   Checkpoint Stat Trend  meter_pg_checkpoint_write_time_rate  meter_pg_checkpoint_sync_time_rate  meter_pg_checkpoint_req_rate meter_pg_checkpoint_timed_rate The trend of checkpoint stat postgres-exporter   Active Sessions  meter_pg_active_sessions The number of connections which state is active postgres-exporter   Idle Sessions  meter_pg_idle_sessions The number of connections which state is idle,idle in transaction or idle in transaction (aborted) postgres-exporter   Locks Count  meter_pg_locks_count Number of locks postgres-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/postgresql. The PostgreSQL dashboard panel configurations are found in /config/ui-initialized-templates/postgresql.\nCollect sampled slow SQLs SkyWalking leverages fluentbit or other log agents for collecting slow SQL statements from PostgreSQL.\nData flow  fluentbit agent collects slow sql logs from PostgreSQL. fluentbit agent sends data to SkyWalking OAP Server using native log APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Set up fluentbit. Config fluentbit Config PostgreSQL to enable slow log. Example.  Slow SQL Monitoring Slow SQL monitoring provides monitoring of the slow SQL statements of the PostgreSQL server. PostgreSQL Cluster is cataloged as a Layer: POSTGRESQL Service in OAP. Each PostgreSQL server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Slow Statements ms top_n_database_statement The latency and statement of PostgreSQL slow SQLs fluentbit    Customizations You can customize your own metrics/expression/dashboard panel. The slowsql expression rules are found in /config/lal/pgsql-slowsql.yaml The PostgreSQL dashboard panel configurations are found in /config/ui-initialized-templates/postgresql.\n","title":"PostgreSQL monitoring","url":"/docs/main/v9.4.0/en/setup/backend/backend-postgresql-monitoring/"},{"content":"PostgreSQL monitoring PostgreSQL server performance from postgres-exporter SkyWalking leverages postgres-exporter for collecting metrics data from PostgreSQL. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  postgres-exporter collect metrics data from PostgreSQL. OpenTelemetry Collector fetches metrics from postgres-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up postgres-exporter. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  PostgreSQL Monitoring PostgreSQL cluster is cataloged as a Layer: PostgreSQL Service in OAP. Each PostgreSQL server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Shared Buffers MB meter_pg_shared_buffers The number of shared memory buffers used by the server postgres-exporter   Effective Cache GB meter_pg_effective_cache The planner\u0026rsquo;s assumption about the total size of the data caches postgres-exporter   Maintenance Work Mem MB meter_pg_maintenance_work_mem The maximum memory to be used for maintenance operations postgres-exporter   Seq Page Cost  meter_pg_seq_page_cost The planner\u0026rsquo;s estimate of the cost of a sequentially fetched disk page. postgres-exporter   Random Page Cost  meter_pg_random_page_cost The planner\u0026rsquo;s estimate of the cost of a nonsequentially fetched disk page. postgres-exporter   Max Worker Processes  meter_pg_max_worker_processes Maximum number of concurrent worker processes postgres-exporter   Max WAL Size GB meter_max_wal_size The WAL size that triggers a checkpoint postgres-exporter   Max Parallel Workers  meter_pg_max_parallel_workers The maximum number of parallel processes per executor node postgres-exporter   Work Mem MB meter_pg_max_work_mem The maximum memory to be used for query workspaces. postgres-exporter   Fetched Row Trend  meter_pg_fetched_rows_rate The trend of the number of rows fetched by queries in this database. postgres-exporter   Inserted Row Trend  meter_pg_inserted_rows_rate The trend of the number of rows inserted by queries in this database. postgres-exporter   Updated Row Trend  meter_pg_updated_rows_rate The trend of the number of rows updated by queries in this database. postgres-exporter   Deleted Row Trend  meter_pg_deleted_rows_rate The trend of the number of rows deleted by queries in this database. postgres-exporter   Returned Row Trend  meter_pg_returned_rows_rate The trend of the number of rows returned by queries in this database. postgres-exporter   Committed Transactions Trend  meter_pg_committed_transactions_rate The trend of the number of transactions in this database that have been committed postgres-exporter   Rolled Back Transactions Trend  meter_pg_rolled_back_transactions_rate The trend of the number of transactions in this database that have been rolled back postgres-exporter   Buffers Trend  meter_pg_buffers_alloc  meter_pg_buffers_checkpoint meter_pg_buffers_clean meter_pg_buffers_backend_fsync meter_pg_buffers_backend The trend of the number of buffers postgres-exporter   Conflicts Trend  meter_pg_conflicts_rate The trend of the number of queries canceled due to conflicts with recovery in this database postgres-exporter   Deadlock Trend  meter_pg_deadlocks_rate The trend of the number of deadlocks detected in this database postgres-exporter   Cache Hit Rate % meter_pg_cache_hit_rate The rate of cache hit postgres-exporter   Temporary Files Trend  meter_pg_temporary_files_rate The rate of total amount of data written to temporary files by queries in this database. All temporary files are counted, regardless of why the temporary file was created, and regardless of the log_temp_files setting postgres-exporter   Checkpoint Stat Trend  meter_pg_checkpoint_write_time_rate  meter_pg_checkpoint_sync_time_rate  meter_pg_checkpoint_req_rate meter_pg_checkpoint_timed_rate The trend of checkpoint stat postgres-exporter   Active Sessions  meter_pg_active_sessions The number of connections which state is active postgres-exporter   Idle Sessions  meter_pg_idle_sessions The number of connections which state is idle,idle in transaction or idle in transaction (aborted) postgres-exporter   Locks Count  meter_pg_locks_count Number of locks postgres-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/postgresql. The PostgreSQL dashboard panel configurations are found in /config/ui-initialized-templates/postgresql.\nCollect sampled slow SQLs SkyWalking leverages fluentbit or other log agents for collecting slow SQL statements from PostgreSQL.\nData flow  fluentbit agent collects slow sql logs from PostgreSQL. fluentbit agent sends data to SkyWalking OAP Server using native log APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Set up fluentbit. Config fluentbit Config PostgreSQL to enable slow log. Example.  Slow SQL Monitoring Slow SQL monitoring provides monitoring of the slow SQL statements of the PostgreSQL server. PostgreSQL Cluster is cataloged as a Layer: POSTGRESQL Service in OAP. Each PostgreSQL server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Slow Statements ms top_n_database_statement The latency and statement of PostgreSQL slow SQLs fluentbit    Customizations You can customize your own metrics/expression/dashboard panel. The slowsql expression rules are found in /config/lal/pgsql-slowsql.yaml The PostgreSQL dashboard panel configurations are found in /config/ui-initialized-templates/postgresql.\n","title":"PostgreSQL monitoring","url":"/docs/main/v9.5.0/en/setup/backend/backend-postgresql-monitoring/"},{"content":"PostgreSQL monitoring PostgreSQL server performance from postgres-exporter SkyWalking leverages postgres-exporter for collecting metrics data from PostgreSQL. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  postgres-exporter collect metrics data from PostgreSQL. OpenTelemetry Collector fetches metrics from postgres-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up postgres-exporter. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  PostgreSQL Monitoring PostgreSQL cluster is cataloged as a Layer: PostgreSQL Service in OAP. Each PostgreSQL server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Shared Buffers MB meter_pg_shared_buffers The number of shared memory buffers used by the server postgres-exporter   Effective Cache GB meter_pg_effective_cache The planner\u0026rsquo;s assumption about the total size of the data caches postgres-exporter   Maintenance Work Mem MB meter_pg_maintenance_work_mem The maximum memory to be used for maintenance operations postgres-exporter   Seq Page Cost  meter_pg_seq_page_cost The planner\u0026rsquo;s estimate of the cost of a sequentially fetched disk page. postgres-exporter   Random Page Cost  meter_pg_random_page_cost The planner\u0026rsquo;s estimate of the cost of a nonsequentially fetched disk page. postgres-exporter   Max Worker Processes  meter_pg_max_worker_processes Maximum number of concurrent worker processes postgres-exporter   Max WAL Size GB meter_max_wal_size The WAL size that triggers a checkpoint postgres-exporter   Max Parallel Workers  meter_pg_max_parallel_workers The maximum number of parallel processes per executor node postgres-exporter   Work Mem MB meter_pg_max_work_mem The maximum memory to be used for query workspaces. postgres-exporter   Fetched Row Trend  meter_pg_fetched_rows_rate The trend of the number of rows fetched by queries in this database. postgres-exporter   Inserted Row Trend  meter_pg_inserted_rows_rate The trend of the number of rows inserted by queries in this database. postgres-exporter   Updated Row Trend  meter_pg_updated_rows_rate The trend of the number of rows updated by queries in this database. postgres-exporter   Deleted Row Trend  meter_pg_deleted_rows_rate The trend of the number of rows deleted by queries in this database. postgres-exporter   Returned Row Trend  meter_pg_returned_rows_rate The trend of the number of rows returned by queries in this database. postgres-exporter   Committed Transactions Trend  meter_pg_committed_transactions_rate The trend of the number of transactions in this database that have been committed postgres-exporter   Rolled Back Transactions Trend  meter_pg_rolled_back_transactions_rate The trend of the number of transactions in this database that have been rolled back postgres-exporter   Buffers Trend  meter_pg_buffers_alloc  meter_pg_buffers_checkpoint meter_pg_buffers_clean meter_pg_buffers_backend_fsync meter_pg_buffers_backend The trend of the number of buffers postgres-exporter   Conflicts Trend  meter_pg_conflicts_rate The trend of the number of queries canceled due to conflicts with recovery in this database postgres-exporter   Deadlock Trend  meter_pg_deadlocks_rate The trend of the number of deadlocks detected in this database postgres-exporter   Cache Hit Rate % meter_pg_cache_hit_rate The rate of cache hit postgres-exporter   Temporary Files Trend  meter_pg_temporary_files_rate The rate of total amount of data written to temporary files by queries in this database. All temporary files are counted, regardless of why the temporary file was created, and regardless of the log_temp_files setting postgres-exporter   Checkpoint Stat Trend  meter_pg_checkpoint_write_time_rate  meter_pg_checkpoint_sync_time_rate  meter_pg_checkpoint_req_rate meter_pg_checkpoint_timed_rate The trend of checkpoint stat postgres-exporter   Active Sessions  meter_pg_active_sessions The number of connections which state is active postgres-exporter   Idle Sessions  meter_pg_idle_sessions The number of connections which state is idle,idle in transaction or idle in transaction (aborted) postgres-exporter   Locks Count  meter_pg_locks_count Number of locks postgres-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/postgresql. The PostgreSQL dashboard panel configurations are found in /config/ui-initialized-templates/postgresql.\nCollect sampled slow SQLs SkyWalking leverages fluentbit or other log agents for collecting slow SQL statements from PostgreSQL.\nData flow  fluentbit agent collects slow sql logs from PostgreSQL. fluentbit agent sends data to SkyWalking OAP Server using native log APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Set up fluentbit. Config fluentbit Config PostgreSQL to enable slow log. Example.  Slow SQL Monitoring Slow SQL monitoring provides monitoring of the slow SQL statements of the PostgreSQL server. PostgreSQL Cluster is cataloged as a Layer: POSTGRESQL Service in OAP. Each PostgreSQL server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Slow Statements ms top_n_database_statement The latency and statement of PostgreSQL slow SQLs fluentbit    Customizations You can customize your own metrics/expression/dashboard panel. The slowsql expression rules are found in /config/lal/pgsql-slowsql.yaml The PostgreSQL dashboard panel configurations are found in /config/ui-initialized-templates/postgresql.\n","title":"PostgreSQL monitoring","url":"/docs/main/v9.6.0/en/setup/backend/backend-postgresql-monitoring/"},{"content":"PostgreSQL monitoring PostgreSQL server performance from postgres-exporter SkyWalking leverages postgres-exporter for collecting metrics data from PostgreSQL. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  postgres-exporter collect metrics data from PostgreSQL. OpenTelemetry Collector fetches metrics from postgres-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up postgres-exporter. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  PostgreSQL Monitoring PostgreSQL cluster is cataloged as a Layer: PostgreSQL Service in OAP. Each PostgreSQL server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Shared Buffers MB meter_pg_shared_buffers The number of shared memory buffers used by the server postgres-exporter   Effective Cache GB meter_pg_effective_cache The planner\u0026rsquo;s assumption about the total size of the data caches postgres-exporter   Maintenance Work Mem MB meter_pg_maintenance_work_mem The maximum memory to be used for maintenance operations postgres-exporter   Seq Page Cost  meter_pg_seq_page_cost The planner\u0026rsquo;s estimate of the cost of a sequentially fetched disk page. postgres-exporter   Random Page Cost  meter_pg_random_page_cost The planner\u0026rsquo;s estimate of the cost of a nonsequentially fetched disk page. postgres-exporter   Max Worker Processes  meter_pg_max_worker_processes Maximum number of concurrent worker processes postgres-exporter   Max WAL Size GB meter_max_wal_size The WAL size that triggers a checkpoint postgres-exporter   Max Parallel Workers  meter_pg_max_parallel_workers The maximum number of parallel processes per executor node postgres-exporter   Work Mem MB meter_pg_max_work_mem The maximum memory to be used for query workspaces. postgres-exporter   Fetched Row Trend  meter_pg_fetched_rows_rate The trend of the number of rows fetched by queries in this database. postgres-exporter   Inserted Row Trend  meter_pg_inserted_rows_rate The trend of the number of rows inserted by queries in this database. postgres-exporter   Updated Row Trend  meter_pg_updated_rows_rate The trend of the number of rows updated by queries in this database. postgres-exporter   Deleted Row Trend  meter_pg_deleted_rows_rate The trend of the number of rows deleted by queries in this database. postgres-exporter   Returned Row Trend  meter_pg_returned_rows_rate The trend of the number of rows returned by queries in this database. postgres-exporter   Committed Transactions Trend  meter_pg_committed_transactions_rate The trend of the number of transactions in this database that have been committed postgres-exporter   Rolled Back Transactions Trend  meter_pg_rolled_back_transactions_rate The trend of the number of transactions in this database that have been rolled back postgres-exporter   Buffers Trend  meter_pg_buffers_alloc  meter_pg_buffers_checkpoint meter_pg_buffers_clean meter_pg_buffers_backend_fsync meter_pg_buffers_backend The trend of the number of buffers postgres-exporter   Conflicts Trend  meter_pg_conflicts_rate The trend of the number of queries canceled due to conflicts with recovery in this database postgres-exporter   Deadlock Trend  meter_pg_deadlocks_rate The trend of the number of deadlocks detected in this database postgres-exporter   Cache Hit Rate % meter_pg_cache_hit_rate The rate of cache hit postgres-exporter   Temporary Files Trend  meter_pg_temporary_files_rate The rate of total amount of data written to temporary files by queries in this database. All temporary files are counted, regardless of why the temporary file was created, and regardless of the log_temp_files setting postgres-exporter   Checkpoint Stat Trend  meter_pg_checkpoint_write_time_rate  meter_pg_checkpoint_sync_time_rate  meter_pg_checkpoint_req_rate meter_pg_checkpoint_timed_rate The trend of checkpoint stat postgres-exporter   Active Sessions  meter_pg_active_sessions The number of connections which state is active postgres-exporter   Idle Sessions  meter_pg_idle_sessions The number of connections which state is idle,idle in transaction or idle in transaction (aborted) postgres-exporter   Locks Count  meter_pg_locks_count Number of locks postgres-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/postgresql. The PostgreSQL dashboard panel configurations are found in /config/ui-initialized-templates/postgresql.\nCollect sampled slow SQLs SkyWalking leverages fluentbit or other log agents for collecting slow SQL statements from PostgreSQL.\nData flow  fluentbit agent collects slow sql logs from PostgreSQL. fluentbit agent sends data to SkyWalking OAP Server using native log APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Set up fluentbit. Config fluentbit Config PostgreSQL to enable slow log. Example.  Slow SQL Monitoring Slow SQL monitoring provides monitoring of the slow SQL statements of the PostgreSQL server. PostgreSQL Cluster is cataloged as a Layer: POSTGRESQL Service in OAP. Each PostgreSQL server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Slow Statements ms top_n_database_statement The latency and statement of PostgreSQL slow SQLs fluentbit    Customizations You can customize your own metrics/expression/dashboard panel. The slowsql expression rules are found in /config/lal/pgsql-slowsql.yaml The PostgreSQL dashboard panel configurations are found in /config/ui-initialized-templates/postgresql.\n","title":"PostgreSQL monitoring","url":"/docs/main/v9.7.0/en/setup/backend/backend-postgresql-monitoring/"},{"content":"Probe Introduction In SkyWalking, probe means an agent or SDK library integrated into a target system that takes charge of collecting telemetry data, including tracing and metrics. Depending on the target system tech stack, there are very different ways how the probe performs such tasks. But ultimately, they all work towards the same goal — to collect and reformat data, and then to send them to the backend.\nOn a high level, there are four typical categories in all SkyWalking probes.\n  Language based native agent. These agents run in target service user spaces, such as a part of user codes. For example, the SkyWalking Java agent uses the -javaagent command line argument to manipulate codes in runtime, where manipulate means to change and inject user\u0026rsquo;s codes. Another example is SkyWalking agent, which leverage Golang compiling mechanism to weaves codes in the compiling time. For some static compilation languages, such as C++, manual library is the only choice. As you can see, these agents are based on languages and libraries, no matter we provide auto instrument or manual agents.\n  Service Mesh probes. Service Mesh probes collect data from sidecar, control plane in service mesh or proxy. In the old days, proxy is only used as an ingress of the whole cluster, but with the Service Mesh and sidecar, we can now perform observability functions.\n  3rd-party instrument library. SkyWalking accepts many widely used instrument libraries data formats. SkyWalking community is connected closely with Zipkin community, it could work as an alternative server for both v1 and v2 Zipkin traces. Also, OTEL trace format in gRPC is supported, and converted to Zipkin format inside SkyWalking. As an alternative Zipkin server, Zipkin lens UI could be used to visualize accepted traces when they are in Zipkin format. See Receiver for Zipkin traces and Receiver for OTEL traces for more information.\n  eBPF agent. The eBPF agent collects metrics and profiling the target service powered by the eBPF technology of Linux kernel.\n  You don\u0026rsquo;t have to install all probes to make SkyWalking up and running. There are several recommended ways on how to use these probes:\n Use Language based native agent only to build topology and metrics for your business application. Use 3rd-party instrument library only, like the Zipkin instrument ecosystem. Use Service Mesh probe if you prefer Service Mesh stack and don\u0026rsquo;t want to use native agents. Use Service Mesh probe with Language based native agent or 3rd-party instrument library in pure tracing status. (Advanced usage) Use eBPF agent only if you only want to profile on demand and/or activating automatic performance analysis. Use eBPF agent with Language based native agent collaboratively. Enhance the traces with the eBPF agent to collect extra information.  What is the meaning of in tracing status?\nBy default, Language based native agent and 3rd-party instrument library both send distributed traces to the backend, where analyses and aggregation on those traces are performed. In pure tracing status means that the backend considers these traces as something like logs. In other words, the backend saves them, but doesn\u0026rsquo;t run the metrics analysis from traces. As a result, there would not have data of service/instance/endpoint metrics and relationships.\nWhat is next?  Learn more about the probes supported by SkyWalking in Service auto instrument agent , Manual instrument SDK and Zipkin receiver. After understanding how the probe works, see the backend overview for more on analysis and persistence.  ","title":"Probe Introduction","url":"/docs/main/latest/en/concepts-and-designs/probe-introduction/"},{"content":"Probe Introduction In SkyWalking, probe means an agent or SDK library integrated into a target system that takes charge of collecting telemetry data, including tracing and metrics. Depending on the target system tech stack, there are very different ways how the probe performs such tasks. But ultimately, they all work towards the same goal — to collect and reformat data, and then to send them to the backend.\nOn a high level, there are four typical categories in all SkyWalking probes.\n  Language based native agent. These agents run in target service user spaces, such as a part of user codes. For example, the SkyWalking Java agent uses the -javaagent command line argument to manipulate codes in runtime, where manipulate means to change and inject user\u0026rsquo;s codes. Another example is SkyWalking agent, which leverage Golang compiling mechanism to weaves codes in the compiling time. For some static compilation languages, such as C++, manual library is the only choice. As you can see, these agents are based on languages and libraries, no matter we provide auto instrument or manual agents.\n  Service Mesh probes. Service Mesh probes collect data from sidecar, control plane in service mesh or proxy. In the old days, proxy is only used as an ingress of the whole cluster, but with the Service Mesh and sidecar, we can now perform observability functions.\n  3rd-party instrument library. SkyWalking accepts many widely used instrument libraries data formats. SkyWalking community is connected closely with Zipkin community, it could work as an alternative server for both v1 and v2 Zipkin traces. Also, OTEL trace format in gRPC is supported, and converted to Zipkin format inside SkyWalking. As an alternative Zipkin server, Zipkin lens UI could be used to visualize accepted traces when they are in Zipkin format. See Receiver for Zipkin traces and Receiver for OTEL traces for more information.\n  eBPF agent. The eBPF agent collects metrics and profiling the target service powered by the eBPF technology of Linux kernel.\n  You don\u0026rsquo;t have to install all probes to make SkyWalking up and running. There are several recommended ways on how to use these probes:\n Use Language based native agent only to build topology and metrics for your business application. Use 3rd-party instrument library only, like the Zipkin instrument ecosystem. Use Service Mesh probe if you prefer Service Mesh stack and don\u0026rsquo;t want to use native agents. Use Service Mesh probe with Language based native agent or 3rd-party instrument library in pure tracing status. (Advanced usage) Use eBPF agent only if you only want to profile on demand and/or activating automatic performance analysis. Use eBPF agent with Language based native agent collaboratively. Enhance the traces with the eBPF agent to collect extra information.  What is the meaning of in tracing status?\nBy default, Language based native agent and 3rd-party instrument library both send distributed traces to the backend, where analyses and aggregation on those traces are performed. In pure tracing status means that the backend considers these traces as something like logs. In other words, the backend saves them, but doesn\u0026rsquo;t run the metrics analysis from traces. As a result, there would not have data of service/instance/endpoint metrics and relationships.\nWhat is next?  Learn more about the probes supported by SkyWalking in Service auto instrument agent , Manual instrument SDK and Zipkin receiver. After understanding how the probe works, see the backend overview for more on analysis and persistence.  ","title":"Probe Introduction","url":"/docs/main/next/en/concepts-and-designs/probe-introduction/"},{"content":"Probe Introduction In SkyWalking, probe means an agent or SDK library integrated into a target system that takes charge of collecting telemetry data, including tracing and metrics. Depending on the target system tech stack, there are very different ways how the probe performs such tasks. But ultimately, they all work towards the same goal — to collect and reformat data, and then to send them to the backend.\nOn a high level, there are three typical categories in all SkyWalking probes.\n  Language based native agent. These agents run in target service user spaces, such as a part of user codes. For example, the SkyWalking Java agent uses the -javaagent command line argument to manipulate codes in runtime, where manipulate means to change and inject user\u0026rsquo;s codes. Another kind of agents uses certain hook or intercept mechanism provided by target libraries. As you can see, these agents are based on languages and libraries.\n  Service Mesh probes. Service Mesh probes collect data from sidecar, control plane in service mesh or proxy. In the old days, proxy is only used as an ingress of the whole cluster, but with the Service Mesh and sidecar, we can now perform observability functions.\n  3rd-party instrument library. SkyWalking accepts many widely used instrument libraries data formats. It analyzes the data, transfers it to SkyWalking\u0026rsquo;s formats of trace, metrics or both. This feature starts with accepting Zipkin span data. See Receiver for Zipkin traces for more information.\n  You don\u0026rsquo;t need to use Language based native agent and Service Mesh probe at the same time, since they both serve to collect metrics data. Otherwise, your system will suffer twice the payload, and the analytic numbers will be doubled.\nThere are several recommended ways on how to use these probes:\n Use Language based native agent only. Use 3rd-party instrument library only, like the Zipkin instrument ecosystem. Use Service Mesh probe only. Use Service Mesh probe with Language based native agent or 3rd-party instrument library in tracing status. (Advanced usage)  What is the meaning of in tracing status?\nBy default, Language based native agent and 3rd-party instrument library both send distributed traces to the backend, where analyses and aggregation on those traces are performed. In tracing status means that the backend considers these traces as something like logs. In other words, the backend saves them, and builds the links between traces and metrics, like which endpoint and service does the trace belong?.\nWhat is next?  Learn more about the probes supported by SkyWalking in Service auto instrument agent , Manual instrument SDK and Zipkin receiver. After understanding how the probe works, see the backend overview for more on analysis and persistence.  ","title":"Probe Introduction","url":"/docs/main/v9.0.0/en/concepts-and-designs/probe-introduction/"},{"content":"Probe Introduction In SkyWalking, probe means an agent or SDK library integrated into a target system that takes charge of collecting telemetry data, including tracing and metrics. Depending on the target system tech stack, there are very different ways how the probe performs such tasks. But ultimately, they all work towards the same goal — to collect and reformat data, and then to send them to the backend.\nOn a high level, there are four typical categories in all SkyWalking probes.\n  Language based native agent. These agents run in target service user spaces, such as a part of user codes. For example, the SkyWalking Java agent uses the -javaagent command line argument to manipulate codes in runtime, where manipulate means to change and inject user\u0026rsquo;s codes. Another kind of agents uses certain hook or intercept mechanism provided by target libraries. As you can see, these agents are based on languages and libraries.\n  Service Mesh probes. Service Mesh probes collect data from sidecar, control plane in service mesh or proxy. In the old days, proxy is only used as an ingress of the whole cluster, but with the Service Mesh and sidecar, we can now perform observability functions.\n  3rd-party instrument library. SkyWalking accepts many widely used instrument libraries data formats. It analyzes the data, transfers it to SkyWalking\u0026rsquo;s formats of trace, metrics or both. This feature starts with accepting Zipkin span data. See Receiver for Zipkin traces for more information.\n  eBPF agent. The eBPF agent collects metrics and proifiling the target service powered by the eBPF technology of Linux kernel.\n  You don\u0026rsquo;t need to use Language based native agent and Service Mesh probe at the same time, since they both serve to collect metrics data. Otherwise, your system will suffer twice the payload, and the analytic numbers will be doubled.\nThere are several recommended ways on how to use these probes:\n Use Language based native agent only. Use 3rd-party instrument library only, like the Zipkin instrument ecosystem. Use Service Mesh probe only. Use Service Mesh probe with Language based native agent or 3rd-party instrument library in tracing status. (Advanced usage) Use eBPF agent only. Use eBPF agent with Language based native agent collaboratively.  What is the meaning of in tracing status?\nBy default, Language based native agent and 3rd-party instrument library both send distributed traces to the backend, where analyses and aggregation on those traces are performed. In tracing status means that the backend considers these traces as something like logs. In other words, the backend saves them, and builds the links between traces and metrics, like which endpoint and service does the trace belong?.\nWhat is next?  Learn more about the probes supported by SkyWalking in Service auto instrument agent , Manual instrument SDK and Zipkin receiver. After understanding how the probe works, see the backend overview for more on analysis and persistence.  ","title":"Probe Introduction","url":"/docs/main/v9.1.0/en/concepts-and-designs/probe-introduction/"},{"content":"Probe Introduction In SkyWalking, probe means an agent or SDK library integrated into a target system that takes charge of collecting telemetry data, including tracing and metrics. Depending on the target system tech stack, there are very different ways how the probe performs such tasks. But ultimately, they all work towards the same goal — to collect and reformat data, and then to send them to the backend.\nOn a high level, there are four typical categories in all SkyWalking probes.\n  Language based native agent. These agents run in target service user spaces, such as a part of user codes. For example, the SkyWalking Java agent uses the -javaagent command line argument to manipulate codes in runtime, where manipulate means to change and inject user\u0026rsquo;s codes. Another kind of agents uses certain hook or intercept mechanism provided by target libraries. As you can see, these agents are based on languages and libraries.\n  Service Mesh probes. Service Mesh probes collect data from sidecar, control plane in service mesh or proxy. In the old days, proxy is only used as an ingress of the whole cluster, but with the Service Mesh and sidecar, we can now perform observability functions.\n  3rd-party instrument library. SkyWalking accepts many widely used instrument libraries data formats. It analyzes the data, transfers it to SkyWalking\u0026rsquo;s formats of trace, metrics or both. This feature starts with accepting Zipkin span data. See Receiver for Zipkin traces for more information.\n  eBPF agent. The eBPF agent collects metrics and profiling the target service powered by the eBPF technology of Linux kernel.\n  You don\u0026rsquo;t need to use Language based native agent and Service Mesh probe at the same time, since they both serve to collect metrics data. Otherwise, your system will suffer twice the payload, and the analytic numbers will be doubled.\nThere are several recommended ways on how to use these probes:\n Use Language based native agent only. Use 3rd-party instrument library only, like the Zipkin instrument ecosystem. Use Service Mesh probe only. Use Service Mesh probe with Language based native agent or 3rd-party instrument library in tracing status. (Advanced usage) Use eBPF agent only. Use eBPF agent with Language based native agent collaboratively.  What is the meaning of in tracing status?\nBy default, Language based native agent and 3rd-party instrument library both send distributed traces to the backend, where analyses and aggregation on those traces are performed. In tracing status means that the backend considers these traces as something like logs. In other words, the backend saves them, and builds the links between traces and metrics, like which endpoint and service does the trace belong?.\nWhat is next?  Learn more about the probes supported by SkyWalking in Service auto instrument agent , Manual instrument SDK and Zipkin receiver. After understanding how the probe works, see the backend overview for more on analysis and persistence.  ","title":"Probe Introduction","url":"/docs/main/v9.2.0/en/concepts-and-designs/probe-introduction/"},{"content":"Probe Introduction In SkyWalking, probe means an agent or SDK library integrated into a target system that takes charge of collecting telemetry data, including tracing and metrics. Depending on the target system tech stack, there are very different ways how the probe performs such tasks. But ultimately, they all work towards the same goal — to collect and reformat data, and then to send them to the backend.\nOn a high level, there are four typical categories in all SkyWalking probes.\n  Language based native agent. These agents run in target service user spaces, such as a part of user codes. For example, the SkyWalking Java agent uses the -javaagent command line argument to manipulate codes in runtime, where manipulate means to change and inject user\u0026rsquo;s codes. Another kind of agents uses certain hook or intercept mechanism provided by target libraries. As you can see, these agents are based on languages and libraries.\n  Service Mesh probes. Service Mesh probes collect data from sidecar, control plane in service mesh or proxy. In the old days, proxy is only used as an ingress of the whole cluster, but with the Service Mesh and sidecar, we can now perform observability functions.\n  3rd-party instrument library. SkyWalking accepts many widely used instrument libraries data formats. It analyzes the data, transfers it to SkyWalking\u0026rsquo;s formats of trace, metrics or both. This feature starts with accepting Zipkin span data. See Receiver for Zipkin traces for more information.\n  eBPF agent. The eBPF agent collects metrics and profiling the target service powered by the eBPF technology of Linux kernel.\n  You don\u0026rsquo;t need to use Language based native agent and Service Mesh probe at the same time, since they both serve to collect metrics data. Otherwise, your system will suffer twice the payload, and the analytic numbers will be doubled.\nThere are several recommended ways on how to use these probes:\n Use Language based native agent only. Use 3rd-party instrument library only, like the Zipkin instrument ecosystem. Use Service Mesh probe only. Use Service Mesh probe with Language based native agent or 3rd-party instrument library in tracing status. (Advanced usage) Use eBPF agent only. Use eBPF agent with Language based native agent collaboratively.  What is the meaning of in tracing status?\nBy default, Language based native agent and 3rd-party instrument library both send distributed traces to the backend, where analyses and aggregation on those traces are performed. In tracing status means that the backend considers these traces as something like logs. In other words, the backend saves them, and builds the links between traces and metrics, like which endpoint and service does the trace belong?.\nWhat is next?  Learn more about the probes supported by SkyWalking in Service auto instrument agent , Manual instrument SDK and Zipkin receiver. After understanding how the probe works, see the backend overview for more on analysis and persistence.  ","title":"Probe Introduction","url":"/docs/main/v9.3.0/en/concepts-and-designs/probe-introduction/"},{"content":"Probe Introduction In SkyWalking, probe means an agent or SDK library integrated into a target system that takes charge of collecting telemetry data, including tracing and metrics. Depending on the target system tech stack, there are very different ways how the probe performs such tasks. But ultimately, they all work towards the same goal — to collect and reformat data, and then to send them to the backend.\nOn a high level, there are four typical categories in all SkyWalking probes.\n  Language based native agent. These agents run in target service user spaces, such as a part of user codes. For example, the SkyWalking Java agent uses the -javaagent command line argument to manipulate codes in runtime, where manipulate means to change and inject user\u0026rsquo;s codes. Another kind of agents uses certain hook or intercept mechanism provided by target libraries. As you can see, these agents are based on languages and libraries.\n  Service Mesh probes. Service Mesh probes collect data from sidecar, control plane in service mesh or proxy. In the old days, proxy is only used as an ingress of the whole cluster, but with the Service Mesh and sidecar, we can now perform observability functions.\n  3rd-party instrument library. SkyWalking accepts many widely used instrument libraries data formats. It analyzes the data, transfers it to SkyWalking\u0026rsquo;s formats of trace, metrics or both. This feature starts with accepting Zipkin span data. See Receiver for Zipkin traces for more information.\n  eBPF agent. The eBPF agent collects metrics and profiling the target service powered by the eBPF technology of Linux kernel.\n  You don\u0026rsquo;t need to use Language based native agent and Service Mesh probe at the same time, since they both serve to collect metrics data. Otherwise, your system will suffer twice the payload, and the analytic numbers will be doubled.\nThere are several recommended ways on how to use these probes:\n Use Language based native agent only. Use 3rd-party instrument library only, like the Zipkin instrument ecosystem. Use Service Mesh probe only. Use Service Mesh probe with Language based native agent or 3rd-party instrument library in tracing status. (Advanced usage) Use eBPF agent only. Use eBPF agent with Language based native agent collaboratively.  What is the meaning of in tracing status?\nBy default, Language based native agent and 3rd-party instrument library both send distributed traces to the backend, where analyses and aggregation on those traces are performed. In tracing status means that the backend considers these traces as something like logs. In other words, the backend saves them, and builds the links between traces and metrics, like which endpoint and service does the trace belong?.\nWhat is next?  Learn more about the probes supported by SkyWalking in Service auto instrument agent , Manual instrument SDK and Zipkin receiver. After understanding how the probe works, see the backend overview for more on analysis and persistence.  ","title":"Probe Introduction","url":"/docs/main/v9.4.0/en/concepts-and-designs/probe-introduction/"},{"content":"Probe Introduction In SkyWalking, probe means an agent or SDK library integrated into a target system that takes charge of collecting telemetry data, including tracing and metrics. Depending on the target system tech stack, there are very different ways how the probe performs such tasks. But ultimately, they all work towards the same goal — to collect and reformat data, and then to send them to the backend.\nOn a high level, there are four typical categories in all SkyWalking probes.\n  Language based native agent. These agents run in target service user spaces, such as a part of user codes. For example, the SkyWalking Java agent uses the -javaagent command line argument to manipulate codes in runtime, where manipulate means to change and inject user\u0026rsquo;s codes. Another kind of agents uses certain hook or intercept mechanism provided by target libraries. As you can see, these agents are based on languages and libraries.\n  Service Mesh probes. Service Mesh probes collect data from sidecar, control plane in service mesh or proxy. In the old days, proxy is only used as an ingress of the whole cluster, but with the Service Mesh and sidecar, we can now perform observability functions.\n  3rd-party instrument library. SkyWalking accepts many widely used instrument libraries data formats. It analyzes the data, transfers it to SkyWalking\u0026rsquo;s formats of trace, metrics or both. This feature starts with accepting Zipkin span data. See Receiver for Zipkin traces for more information.\n  eBPF agent. The eBPF agent collects metrics and profiling the target service powered by the eBPF technology of Linux kernel.\n  You don\u0026rsquo;t need to use Language based native agent and Service Mesh probe at the same time, since they both serve to collect metrics data. Otherwise, your system will suffer twice the payload, and the analytic numbers will be doubled.\nThere are several recommended ways on how to use these probes:\n Use Language based native agent only. Use 3rd-party instrument library only, like the Zipkin instrument ecosystem. Use Service Mesh probe only. Use Service Mesh probe with Language based native agent or 3rd-party instrument library in tracing status. (Advanced usage) Use eBPF agent only. Use eBPF agent with Language based native agent collaboratively.  What is the meaning of in tracing status?\nBy default, Language based native agent and 3rd-party instrument library both send distributed traces to the backend, where analyses and aggregation on those traces are performed. In tracing status means that the backend considers these traces as something like logs. In other words, the backend saves them, and builds the links between traces and metrics, like which endpoint and service does the trace belong?.\nWhat is next?  Learn more about the probes supported by SkyWalking in Service auto instrument agent , Manual instrument SDK and Zipkin receiver. After understanding how the probe works, see the backend overview for more on analysis and persistence.  ","title":"Probe Introduction","url":"/docs/main/v9.5.0/en/concepts-and-designs/probe-introduction/"},{"content":"Probe Introduction In SkyWalking, probe means an agent or SDK library integrated into a target system that takes charge of collecting telemetry data, including tracing and metrics. Depending on the target system tech stack, there are very different ways how the probe performs such tasks. But ultimately, they all work towards the same goal — to collect and reformat data, and then to send them to the backend.\nOn a high level, there are four typical categories in all SkyWalking probes.\n  Language based native agent. These agents run in target service user spaces, such as a part of user codes. For example, the SkyWalking Java agent uses the -javaagent command line argument to manipulate codes in runtime, where manipulate means to change and inject user\u0026rsquo;s codes. Another kind of agents uses certain hook or intercept mechanism provided by target libraries. As you can see, these agents are based on languages and libraries.\n  Service Mesh probes. Service Mesh probes collect data from sidecar, control plane in service mesh or proxy. In the old days, proxy is only used as an ingress of the whole cluster, but with the Service Mesh and sidecar, we can now perform observability functions.\n  3rd-party instrument library. SkyWalking accepts many widely used instrument libraries data formats. It analyzes the data, transfers it to SkyWalking\u0026rsquo;s formats of trace, metrics or both. This feature starts with accepting Zipkin span data. See Receiver for Zipkin traces for more information.\n  eBPF agent. The eBPF agent collects metrics and profiling the target service powered by the eBPF technology of Linux kernel.\n  You don\u0026rsquo;t need to use Language based native agent and Service Mesh probe at the same time, since they both serve to collect metrics data. Otherwise, your system will suffer twice the payload, and the analytic numbers will be doubled.\nThere are several recommended ways on how to use these probes:\n Use Language based native agent only. Use 3rd-party instrument library only, like the Zipkin instrument ecosystem. Use Service Mesh probe only. Use Service Mesh probe with Language based native agent or 3rd-party instrument library in tracing status. (Advanced usage) Use eBPF agent only. Use eBPF agent with Language based native agent collaboratively.  What is the meaning of in tracing status?\nBy default, Language based native agent and 3rd-party instrument library both send distributed traces to the backend, where analyses and aggregation on those traces are performed. In tracing status means that the backend considers these traces as something like logs. In other words, the backend saves them, and builds the links between traces and metrics, like which endpoint and service does the trace belong?.\nWhat is next?  Learn more about the probes supported by SkyWalking in Service auto instrument agent , Manual instrument SDK and Zipkin receiver. After understanding how the probe works, see the backend overview for more on analysis and persistence.  ","title":"Probe Introduction","url":"/docs/main/v9.6.0/en/concepts-and-designs/probe-introduction/"},{"content":"Probe Introduction In SkyWalking, probe means an agent or SDK library integrated into a target system that takes charge of collecting telemetry data, including tracing and metrics. Depending on the target system tech stack, there are very different ways how the probe performs such tasks. But ultimately, they all work towards the same goal — to collect and reformat data, and then to send them to the backend.\nOn a high level, there are four typical categories in all SkyWalking probes.\n  Language based native agent. These agents run in target service user spaces, such as a part of user codes. For example, the SkyWalking Java agent uses the -javaagent command line argument to manipulate codes in runtime, where manipulate means to change and inject user\u0026rsquo;s codes. Another example is SkyWalking agent, which leverage Golang compiling mechanism to weaves codes in the compiling time. For some static compilation languages, such as C++, manual library is the only choice. As you can see, these agents are based on languages and libraries, no matter we provide auto instrument or manual agents.\n  Service Mesh probes. Service Mesh probes collect data from sidecar, control plane in service mesh or proxy. In the old days, proxy is only used as an ingress of the whole cluster, but with the Service Mesh and sidecar, we can now perform observability functions.\n  3rd-party instrument library. SkyWalking accepts many widely used instrument libraries data formats. SkyWalking community is connected closely with Zipkin community, it could work as an alternative server for both v1 and v2 Zipkin traces. Also, OTEL trace format in gRPC is supported, and converted to Zipkin format inside SkyWalking. As an alternative Zipkin server, Zipkin lens UI could be used to visualize accepted traces when they are in Zipkin format. See Receiver for Zipkin traces and Receiver for OTEL traces for more information.\n  eBPF agent. The eBPF agent collects metrics and profiling the target service powered by the eBPF technology of Linux kernel.\n  You don\u0026rsquo;t have to install all probes to make SkyWalking up and running. There are several recommended ways on how to use these probes:\n Use Language based native agent only to build topology and metrics for your business application. Use 3rd-party instrument library only, like the Zipkin instrument ecosystem. Use Service Mesh probe if you prefer Service Mesh stack and don\u0026rsquo;t want to use native agents. Use Service Mesh probe with Language based native agent or 3rd-party instrument library in pure tracing status. (Advanced usage) Use eBPF agent only if you only want to profile on demand and/or activating automatic performance analysis. Use eBPF agent with Language based native agent collaboratively. Enhance the traces with the eBPF agent to collect extra information.  What is the meaning of in tracing status?\nBy default, Language based native agent and 3rd-party instrument library both send distributed traces to the backend, where analyses and aggregation on those traces are performed. In pure tracing status means that the backend considers these traces as something like logs. In other words, the backend saves them, but doesn\u0026rsquo;t run the metrics analysis from traces. As a result, there would not have data of service/instance/endpoint metrics and relationships.\nWhat is next?  Learn more about the probes supported by SkyWalking in Service auto instrument agent , Manual instrument SDK and Zipkin receiver. After understanding how the probe works, see the backend overview for more on analysis and persistence.  ","title":"Probe Introduction","url":"/docs/main/v9.7.0/en/concepts-and-designs/probe-introduction/"},{"content":"Probe Protocols Probe protocols describe and define how agents send collected metrics, logs, traces, and events, as well as set out the format of each entity.\nTracing There are two types of protocols that help language agents work in distributed tracing.\n Cross Process Propagation Headers Protocol and Cross Process Correlation Headers Protocol come in in-wire data format. Agent/SDK usually uses HTTP/MQ/HTTP2 headers to carry the data with the RPC request. The remote agent will receive this in the request handler, and bind the context with this specific request.  Cross Process Propagation Headers Protocol v3 has been the new protocol for in-wire context propagation since the version 8.0.0 release.\nCross Process Correlation Headers Protocol v1 is a new in-wire context propagation protocol which is additional and optional. Please read SkyWalking language agents documentation to see whether it is supported.\n Trace Data Protocol is an out-of-wire data format. Agent/SDK uses this to send traces to SkyWalking OAP server.  SkyWalking Trace Data Protocol v3 defines the communication method and format between the agent and backend.\nLogging  Log Data Protocol is an out-of-wire data format. Agent/SDK and collector use this to send logs into SkyWalking OAP server. SkyWalking Log Data Protocol defines the communication method and format between the agent and backend.  Metrics SkyWalking has a native metrics format, and supports widely used metric formats, such as Prometheus, OpenCensus, and Zabbix.\nThe native metrics format definition could be found here. Typically, the agent meter plugin (e.g. Java Meter Plugin) and Satellite Prometheus fetcher would convert metrics into native format and forward them to SkyWalking OAP server.\nTo learn more about receiving 3rd party formats metrics, see Meter receiver and OpenTelemetry receiver.\nBrowser probe protocol The browser probe, such as skywalking-client-js, could use this protocol to send data to the backend. This service is provided by gRPC.\nSkyWalking Browser Protocol defines the communication method and format between skywalking-client-js and backend.\nEvents Report Protocol The protocol is used to report events to the backend. The doc introduces the definition of an event, and the protocol repository defines gRPC services and message formats of events.\nJSON format events can be reported via HTTP API. The endpoint is http://\u0026lt;oap-address\u0026gt;:12800/v3/events. Example of a JSON event record:\n[ { \u0026#34;uuid\u0026#34;: \u0026#34;f498b3c0-8bca-438d-a5b0-3701826ae21c\u0026#34;, \u0026#34;source\u0026#34;: { \u0026#34;service\u0026#34;: \u0026#34;SERVICE-A\u0026#34;, \u0026#34;instance\u0026#34;: \u0026#34;INSTANCE-1\u0026#34; }, \u0026#34;name\u0026#34;: \u0026#34;Reboot\u0026#34;, \u0026#34;type\u0026#34;: \u0026#34;Normal\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;App reboot.\u0026#34;, \u0026#34;parameters\u0026#34;: {}, \u0026#34;startTime\u0026#34;: 1628044330000, \u0026#34;endTime\u0026#34;: 1628044331000 } ] ","title":"Probe Protocols","url":"/docs/main/v9.0.0/en/protocols/readme/"},{"content":"Probe Protocols Probe protocols describe and define how agents send collected metrics, logs, traces, and events, as well as set out the format of each entity.\nTracing There are two types of protocols that help language agents work in distributed tracing.\n Cross Process Propagation Headers Protocol and Cross Process Correlation Headers Protocol come in in-wire data format. Agent/SDK usually uses HTTP/MQ/HTTP2 headers to carry the data with the RPC request. The remote agent will receive this in the request handler, and bind the context with this specific request.  Cross Process Propagation Headers Protocol v3 has been the new protocol for in-wire context propagation since the version 8.0.0 release.\nCross Process Correlation Headers Protocol v1 is a new in-wire context propagation protocol which is additional and optional. Please read SkyWalking language agents documentation to see whether it is supported.\n Trace Data Protocol is an out-of-wire data format. Agent/SDK uses this to send traces to SkyWalking OAP server.  SkyWalking Trace Data Protocol v3 defines the communication method and format between the agent and backend.\nLogging  Log Data Protocol is an out-of-wire data format. Agent/SDK and collector use this to send logs into SkyWalking OAP server. SkyWalking Log Data Protocol defines the communication method and format between the agent and backend.  Metrics SkyWalking has a native metrics format, and supports widely used metric formats, such as Prometheus, OpenCensus, and Zabbix.\nThe native metrics format definition could be found here. Typically, the agent meter plugin (e.g. Java Meter Plugin) and Satellite Prometheus fetcher would convert metrics into native format and forward them to SkyWalking OAP server.\nTo learn more about receiving 3rd party formats metrics, see Meter receiver and OpenTelemetry receiver.\nBrowser probe protocol The browser probe, such as skywalking-client-js, could use this protocol to send data to the backend. This service is provided by gRPC.\nSkyWalking Browser Protocol defines the communication method and format between skywalking-client-js and backend.\nEvents Report Protocol The protocol is used to report events to the backend. The doc introduces the definition of an event, and the protocol repository defines gRPC services and message formats of events.\nJSON format events can be reported via HTTP API. The endpoint is http://\u0026lt;oap-address\u0026gt;:12800/v3/events. Example of a JSON event record:\n[ { \u0026#34;uuid\u0026#34;: \u0026#34;f498b3c0-8bca-438d-a5b0-3701826ae21c\u0026#34;, \u0026#34;source\u0026#34;: { \u0026#34;service\u0026#34;: \u0026#34;SERVICE-A\u0026#34;, \u0026#34;instance\u0026#34;: \u0026#34;INSTANCE-1\u0026#34; }, \u0026#34;name\u0026#34;: \u0026#34;Reboot\u0026#34;, \u0026#34;type\u0026#34;: \u0026#34;Normal\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;App reboot.\u0026#34;, \u0026#34;parameters\u0026#34;: {}, \u0026#34;startTime\u0026#34;: 1628044330000, \u0026#34;endTime\u0026#34;: 1628044331000 } ] ","title":"Probe Protocols","url":"/docs/main/v9.1.0/en/protocols/readme/"},{"content":"Probe Protocols Probe protocols describe and define how agents send collected metrics, logs, traces, and events, as well as set out the format of each entity.\nTracing There are two types of protocols that help language agents work in distributed tracing.\n Cross Process Propagation Headers Protocol and Cross Process Correlation Headers Protocol come in in-wire data format. Agent/SDK usually uses HTTP/MQ/HTTP2 headers to carry the data with the RPC request. The remote agent will receive this in the request handler, and bind the context with this specific request.  Cross Process Propagation Headers Protocol v3 has been the new protocol for in-wire context propagation since the version 8.0.0 release.\nCross Process Correlation Headers Protocol v1 is a new in-wire context propagation protocol which is additional and optional. Please read SkyWalking language agents documentation to see whether it is supported.\n Trace Data Protocol is an out-of-wire data format. Agent/SDK uses this to send traces to SkyWalking OAP server.  SkyWalking Trace Data Protocol v3 defines the communication method and format between the agent and backend.\nLogging  Log Data Protocol is an out-of-wire data format. Agent/SDK and collector use this to send logs into SkyWalking OAP server. SkyWalking Log Data Protocol defines the communication method and format between the agent and backend.  Metrics SkyWalking has a native metrics format, and supports widely used metric formats, such as Prometheus, OpenCensus, OpenTelemetry, and Zabbix.\nThe native metrics format definition could be found here. Typically, the agent meter plugin (e.g. Java Meter Plugin) and Satellite Prometheus fetcher would convert metrics into native format and forward them to SkyWalking OAP server.\nTo learn more about receiving 3rd party formats metrics, see Meter receiver and OpenTelemetry receiver.\nBrowser probe protocol The browser probe, such as skywalking-client-js, could use this protocol to send data to the backend. This service is provided by gRPC.\nSkyWalking Browser Protocol defines the communication method and format between skywalking-client-js and backend.\nEvents Report Protocol The protocol is used to report events to the backend. The doc introduces the definition of an event, and the protocol repository defines gRPC services and message formats of events.\nJSON format events can be reported via HTTP API. The endpoint is http://\u0026lt;oap-address\u0026gt;:12800/v3/events. Example of a JSON event record:\n[ { \u0026#34;uuid\u0026#34;: \u0026#34;f498b3c0-8bca-438d-a5b0-3701826ae21c\u0026#34;, \u0026#34;source\u0026#34;: { \u0026#34;service\u0026#34;: \u0026#34;SERVICE-A\u0026#34;, \u0026#34;instance\u0026#34;: \u0026#34;INSTANCE-1\u0026#34; }, \u0026#34;name\u0026#34;: \u0026#34;Reboot\u0026#34;, \u0026#34;type\u0026#34;: \u0026#34;Normal\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;App reboot.\u0026#34;, \u0026#34;parameters\u0026#34;: {}, \u0026#34;startTime\u0026#34;: 1628044330000, \u0026#34;endTime\u0026#34;: 1628044331000 } ] ","title":"Probe Protocols","url":"/docs/main/v9.2.0/en/protocols/readme/"},{"content":"Probe Protocols Probe protocols describe and define how agents send collected metrics, logs, traces, and events, as well as set out the format of each entity.\nTracing There are two types of protocols that help language agents work in distributed tracing.\n Cross Process Propagation Headers Protocol and Cross Process Correlation Headers Protocol come in in-wire data format. Agent/SDK usually uses HTTP/MQ/HTTP2 headers to carry the data with the RPC request. The remote agent will receive this in the request handler, and bind the context with this specific request.  Cross Process Propagation Headers Protocol v3 has been the new protocol for in-wire context propagation since the version 8.0.0 release.\nCross Process Correlation Headers Protocol v1 is a new in-wire context propagation protocol which is additional and optional. Please read SkyWalking language agents documentation to see whether it is supported.\n Trace Data Protocol is an out-of-wire data format. Agent/SDK uses this to send traces to SkyWalking OAP server.  SkyWalking Trace Data Protocol v3.1 defines the communication method and format between the agent and backend.\nLogging  Log Data Protocol is an out-of-wire data format. Agent/SDK and collector use this to send logs into SkyWalking OAP server. SkyWalking Log Data Protocol defines the communication method and format between the agent and backend.  Metrics SkyWalking has a native metrics format, and supports widely used metric formats, such as Prometheus, OpenCensus, OpenTelemetry, and Zabbix.\nThe native metrics format definition could be found here. The agent meter plugin (e.g. Java Meter Plugin) uses the native metric format to report metrics.\nOpenTelemetry collector, Telegraf agents, Zabbix agents could use their native protocol(e.g. OTLP) and OAP server would convert metrics into native format and forward them to MAL engine.\nTo learn more about receiving 3rd party formats metrics, see Meter receiver and OpenTelemetry receiver.\nBrowser probe protocol The browser probe, such as skywalking-client-js, could use this protocol to send data to the backend. This service is provided by gRPC.\nSkyWalking Browser Protocol defines the communication method and format between skywalking-client-js and backend.\nEvents Report Protocol The protocol is used to report events to the backend. The doc introduces the definition of an event, and the protocol repository defines gRPC services and message formats of events.\nJSON format events can be reported via HTTP API. The endpoint is http://\u0026lt;oap-address\u0026gt;:12800/v3/events. Example of a JSON event record:\n[ { \u0026#34;uuid\u0026#34;: \u0026#34;f498b3c0-8bca-438d-a5b0-3701826ae21c\u0026#34;, \u0026#34;source\u0026#34;: { \u0026#34;service\u0026#34;: \u0026#34;SERVICE-A\u0026#34;, \u0026#34;instance\u0026#34;: \u0026#34;INSTANCE-1\u0026#34; }, \u0026#34;name\u0026#34;: \u0026#34;Reboot\u0026#34;, \u0026#34;type\u0026#34;: \u0026#34;Normal\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;App reboot.\u0026#34;, \u0026#34;parameters\u0026#34;: {}, \u0026#34;startTime\u0026#34;: 1628044330000, \u0026#34;endTime\u0026#34;: 1628044331000 } ] ","title":"Probe Protocols","url":"/docs/main/v9.3.0/en/protocols/readme/"},{"content":"Problem When you start your application with the skywalking agent, you may find this exception in your agent log which means that EnhanceRequireObjectCache cannot be casted to EnhanceRequireObjectCache. For example:\nERROR 2018-05-07 21:31:24 InstMethodsInter : class[class org.springframework.web.method.HandlerMethod] after method[getBean] intercept failure java.lang.ClassCastException: org.apache.skywalking.apm.plugin.spring.mvc.commons.EnhanceRequireObjectCache cannot be cast to org.apache.skywalking.apm.plugin.spring.mvc.commons.EnhanceRequireObjectCache at org.apache.skywalking.apm.plugin.spring.mvc.commons.interceptor.GetBeanInterceptor.afterMethod(GetBeanInterceptor.java:45) at org.apache.skywalking.apm.agent.core.plugin.interceptor.enhance.InstMethodsInter.intercept(InstMethodsInter.java:105) at org.springframework.web.method.HandlerMethod.getBean(HandlerMethod.java) at org.springframework.web.servlet.handler.AbstractHandlerMethodExceptionResolver.shouldApplyTo(AbstractHandlerMethodExceptionResolver.java:47) at org.springframework.web.servlet.handler.AbstractHandlerExceptionResolver.resolveException(AbstractHandlerExceptionResolver.java:131) at org.springframework.web.servlet.handler.HandlerExceptionResolverComposite.resolveException(HandlerExceptionResolverComposite.java:76) ... Reason This exception may be caused by hot deployment tools (spring-boot-devtool) or otherwise, which changes the classloader in runtime.\nResolution  This error does not occur under the production environment, since developer tools are automatically disabled: See spring-boot-devtools. If you would like to debug in your development environment as usual, you should temporarily remove such hot deployment package in your lib path.  ","title":"Problem","url":"/docs/main/latest/en/faq/enhancerequireobjectcache-cast-exception/"},{"content":"Problem  When importing the SkyWalking project to Eclipse, the following errors may occur:   Software being installed: Checkstyle configuration plugin for M2Eclipse 1.0.0.201705301746 (com.basistech.m2e.code.quality.checkstyle.feature.feature.group 1.0.0.201705301746) Missing requirement: Checkstyle configuration plugin for M2Eclipse 1.0.0.201705301746 (com.basistech.m2e.code.quality.checkstyle.feature.feature.group 1.0.0.201705301746) requires \u0026lsquo;net.sf.eclipsecs.core 5.2.0\u0026rsquo; but it could not be found\n Reason The Eclipse Checkstyle Plug-in has not been installed.\nResolution Download the plug-in at the link here: https://sourceforge.net/projects/eclipse-cs/?source=typ_redirect Eclipse Checkstyle Plug-in version 8.7.0.201801131309 is required. Plug-in notification: The Eclipse Checkstyle plug-in integrates the Checkstyle Java code auditor into the Eclipse IDE. The plug-in provides real-time feedback to the user on rule violations, including checking against coding style and error-prone code constructs.\n","title":"Problem","url":"/docs/main/latest/en/faq/import-project-eclipse-requireitems-exception/"},{"content":"Problem Tracing doesn\u0026rsquo;t work on the Kafka consumer end.\nReason The kafka client is responsible for pulling messages from the brokers, after which the data will be processed by user-defined codes. However, only the poll action can be traced by the plug-in and the subsequent data processing work inevitably goes beyond the scope of the trace context. Thus, in order to complete tracing on the client end, manual instrumentation is required, i.e. the poll action and the processing action should be wrapped manually.\nResolve For a native Kafka client, please use the Application Toolkit libraries to do the manual instrumentation, with the help of the @KafkaPollAndInvoke annotation in apm-toolkit-kafka or with OpenTracing API. If you\u0026rsquo;re using spring-kafka 1.3.x, 2.2.x or above, you can easily trace the consumer end without further configuration.\n","title":"Problem","url":"/docs/main/latest/en/faq/kafka-plugin/"},{"content":"Problem When using a thread pool, TraceSegment data in a thread cannot be reported and there are memory data that cannot be recycled (memory leaks).\nExample ThreadPoolTaskExecutor executor = new ThreadPoolTaskExecutor(); executor.setThreadFactory(r -\u0026gt; new Thread(RunnableWrapper.of(r))); Reason  Worker threads are enhanced when using the thread pool. Based on the design of the SkyWalking Java Agent, when tracing a cross thread, you must enhance the task thread.  Resolution   When using Thread Schedule Framework: See SkyWalking Thread Schedule Framework at SkyWalking Java agent supported list, such as Spring FrameWork @Async, which can implement tracing without any modification.\n  When using Custom Thread Pool: Enhance the task thread with the following code.\n  ExecutorService executorService = Executors.newFixedThreadPool(1); executorService.execute(RunnableWrapper.of(new Runnable() { @Override public void run() { //your code  } })); See across thread solution APIs for more use cases.\n","title":"Problem","url":"/docs/main/latest/en/faq/memory-leak-enhance-worker-thread/"},{"content":"Problem  In maven build, the following error may occur with the protoc-plugin:  [ERROR] Failed to execute goal org.xolstice.maven.plugins:protobuf-maven-plugin:0.5.0:compile-custom (default) on project apm-network: Unable to copy the file to \\skywalking\\apm-network\\target\\protoc-plugins: \\skywalking\\apm-network\\target\\protoc-plugins\\protoc-3.3.0-linux-x86_64.exe (The process cannot access the file because it is being used by another process) -\u0026gt; [Help 1] Reason  The Protobuf compiler is dependent on the glibc. However, glibc has not been installed, or there is an old version already installed in the system.  Resolution  Install or upgrade to the latest version of the glibc library. Under the container environment, the latest glibc version of the alpine system is recommended. Please refer to http://www.gnu.org/software/libc/documentation.html.  ","title":"Problem","url":"/docs/main/latest/en/faq/protoc-plugin-fails-when-build/"},{"content":"Problem The message with Field ID, 8888, must be reserved.\nReason Because Thrift cannot carry metadata to transport Trace Header in the original API, we transport them by wrapping TProtocolFactory.\nThrift allows us to append any additional fields in the message even if the receiver doesn\u0026rsquo;t deal with them. Those data will be skipped and left unread. Based on this, the 8888th field of the message is used to store Trace Header (or metadata) and to transport them. That means the message with Field ID, 8888, must be reserved.\nResolution Avoid using the Field(ID is 8888) in your application.\n","title":"Problem","url":"/docs/main/latest/en/faq/thrift-plugin/"},{"content":"Problem  There is no abnormal log in Agent log and Collector log. The traces can be seen, but no other information is available in UI.  Reason The operating system where the monitored system is located is not set as the current time zone, causing statistics collection time points to deviate.\nResolution Make sure the time is synchronized between collector servers and monitored application servers.\n","title":"Problem","url":"/docs/main/latest/en/faq/why-have-traces-no-others/"},{"content":"Problem When you start your application with the skywalking agent, you may find this exception in your agent log which means that EnhanceRequireObjectCache cannot be casted to EnhanceRequireObjectCache. For example:\nERROR 2018-05-07 21:31:24 InstMethodsInter : class[class org.springframework.web.method.HandlerMethod] after method[getBean] intercept failure java.lang.ClassCastException: org.apache.skywalking.apm.plugin.spring.mvc.commons.EnhanceRequireObjectCache cannot be cast to org.apache.skywalking.apm.plugin.spring.mvc.commons.EnhanceRequireObjectCache at org.apache.skywalking.apm.plugin.spring.mvc.commons.interceptor.GetBeanInterceptor.afterMethod(GetBeanInterceptor.java:45) at org.apache.skywalking.apm.agent.core.plugin.interceptor.enhance.InstMethodsInter.intercept(InstMethodsInter.java:105) at org.springframework.web.method.HandlerMethod.getBean(HandlerMethod.java) at org.springframework.web.servlet.handler.AbstractHandlerMethodExceptionResolver.shouldApplyTo(AbstractHandlerMethodExceptionResolver.java:47) at org.springframework.web.servlet.handler.AbstractHandlerExceptionResolver.resolveException(AbstractHandlerExceptionResolver.java:131) at org.springframework.web.servlet.handler.HandlerExceptionResolverComposite.resolveException(HandlerExceptionResolverComposite.java:76) ... Reason This exception may be caused by hot deployment tools (spring-boot-devtool) or otherwise, which changes the classloader in runtime.\nResolution  This error does not occur under the production environment, since developer tools are automatically disabled: See spring-boot-devtools. If you would like to debug in your development environment as usual, you should temporarily remove such hot deployment package in your lib path.  ","title":"Problem","url":"/docs/main/next/en/faq/enhancerequireobjectcache-cast-exception/"},{"content":"Problem  When importing the SkyWalking project to Eclipse, the following errors may occur:   Software being installed: Checkstyle configuration plugin for M2Eclipse 1.0.0.201705301746 (com.basistech.m2e.code.quality.checkstyle.feature.feature.group 1.0.0.201705301746) Missing requirement: Checkstyle configuration plugin for M2Eclipse 1.0.0.201705301746 (com.basistech.m2e.code.quality.checkstyle.feature.feature.group 1.0.0.201705301746) requires \u0026lsquo;net.sf.eclipsecs.core 5.2.0\u0026rsquo; but it could not be found\n Reason The Eclipse Checkstyle Plug-in has not been installed.\nResolution Download the plug-in at the link here: https://sourceforge.net/projects/eclipse-cs/?source=typ_redirect Eclipse Checkstyle Plug-in version 8.7.0.201801131309 is required. Plug-in notification: The Eclipse Checkstyle plug-in integrates the Checkstyle Java code auditor into the Eclipse IDE. The plug-in provides real-time feedback to the user on rule violations, including checking against coding style and error-prone code constructs.\n","title":"Problem","url":"/docs/main/next/en/faq/import-project-eclipse-requireitems-exception/"},{"content":"Problem Tracing doesn\u0026rsquo;t work on the Kafka consumer end.\nReason The kafka client is responsible for pulling messages from the brokers, after which the data will be processed by user-defined codes. However, only the poll action can be traced by the plug-in and the subsequent data processing work inevitably goes beyond the scope of the trace context. Thus, in order to complete tracing on the client end, manual instrumentation is required, i.e. the poll action and the processing action should be wrapped manually.\nResolve For a native Kafka client, please use the Application Toolkit libraries to do the manual instrumentation, with the help of the @KafkaPollAndInvoke annotation in apm-toolkit-kafka or with OpenTracing API. If you\u0026rsquo;re using spring-kafka 1.3.x, 2.2.x or above, you can easily trace the consumer end without further configuration.\n","title":"Problem","url":"/docs/main/next/en/faq/kafka-plugin/"},{"content":"Problem When using a thread pool, TraceSegment data in a thread cannot be reported and there are memory data that cannot be recycled (memory leaks).\nExample ThreadPoolTaskExecutor executor = new ThreadPoolTaskExecutor(); executor.setThreadFactory(r -\u0026gt; new Thread(RunnableWrapper.of(r))); Reason  Worker threads are enhanced when using the thread pool. Based on the design of the SkyWalking Java Agent, when tracing a cross thread, you must enhance the task thread.  Resolution   When using Thread Schedule Framework: See SkyWalking Thread Schedule Framework at SkyWalking Java agent supported list, such as Spring FrameWork @Async, which can implement tracing without any modification.\n  When using Custom Thread Pool: Enhance the task thread with the following code.\n  ExecutorService executorService = Executors.newFixedThreadPool(1); executorService.execute(RunnableWrapper.of(new Runnable() { @Override public void run() { //your code  } })); See across thread solution APIs for more use cases.\n","title":"Problem","url":"/docs/main/next/en/faq/memory-leak-enhance-worker-thread/"},{"content":"Problem  In maven build, the following error may occur with the protoc-plugin:  [ERROR] Failed to execute goal org.xolstice.maven.plugins:protobuf-maven-plugin:0.5.0:compile-custom (default) on project apm-network: Unable to copy the file to \\skywalking\\apm-network\\target\\protoc-plugins: \\skywalking\\apm-network\\target\\protoc-plugins\\protoc-3.3.0-linux-x86_64.exe (The process cannot access the file because it is being used by another process) -\u0026gt; [Help 1] Reason  The Protobuf compiler is dependent on the glibc. However, glibc has not been installed, or there is an old version already installed in the system.  Resolution  Install or upgrade to the latest version of the glibc library. Under the container environment, the latest glibc version of the alpine system is recommended. Please refer to http://www.gnu.org/software/libc/documentation.html.  ","title":"Problem","url":"/docs/main/next/en/faq/protoc-plugin-fails-when-build/"},{"content":"Problem The message with Field ID, 8888, must be reserved.\nReason Because Thrift cannot carry metadata to transport Trace Header in the original API, we transport them by wrapping TProtocolFactory.\nThrift allows us to append any additional fields in the message even if the receiver doesn\u0026rsquo;t deal with them. Those data will be skipped and left unread. Based on this, the 8888th field of the message is used to store Trace Header (or metadata) and to transport them. That means the message with Field ID, 8888, must be reserved.\nResolution Avoid using the Field(ID is 8888) in your application.\n","title":"Problem","url":"/docs/main/next/en/faq/thrift-plugin/"},{"content":"Problem  There is no abnormal log in Agent log and Collector log. The traces can be seen, but no other information is available in UI.  Reason The operating system where the monitored system is located is not set as the current time zone, causing statistics collection time points to deviate.\nResolution Make sure the time is synchronized between collector servers and monitored application servers.\n","title":"Problem","url":"/docs/main/next/en/faq/why-have-traces-no-others/"},{"content":"Problem When you start your application with the skywalking agent, you may find this exception in your agent log which means that EnhanceRequireObjectCache cannot be casted to EnhanceRequireObjectCache. For example:\nERROR 2018-05-07 21:31:24 InstMethodsInter : class[class org.springframework.web.method.HandlerMethod] after method[getBean] intercept failure java.lang.ClassCastException: org.apache.skywalking.apm.plugin.spring.mvc.commons.EnhanceRequireObjectCache cannot be cast to org.apache.skywalking.apm.plugin.spring.mvc.commons.EnhanceRequireObjectCache at org.apache.skywalking.apm.plugin.spring.mvc.commons.interceptor.GetBeanInterceptor.afterMethod(GetBeanInterceptor.java:45) at org.apache.skywalking.apm.agent.core.plugin.interceptor.enhance.InstMethodsInter.intercept(InstMethodsInter.java:105) at org.springframework.web.method.HandlerMethod.getBean(HandlerMethod.java) at org.springframework.web.servlet.handler.AbstractHandlerMethodExceptionResolver.shouldApplyTo(AbstractHandlerMethodExceptionResolver.java:47) at org.springframework.web.servlet.handler.AbstractHandlerExceptionResolver.resolveException(AbstractHandlerExceptionResolver.java:131) at org.springframework.web.servlet.handler.HandlerExceptionResolverComposite.resolveException(HandlerExceptionResolverComposite.java:76) ... Reason This exception may be caused by hot deployment tools (spring-boot-devtool) or otherwise, which changes the classloader in runtime.\nResolution  This error does not occur under the production environment, since developer tools are automatically disabled: See spring-boot-devtools. If you would like to debug in your development environment as usual, you should temporarily remove such hot deployment package in your lib path.  ","title":"Problem","url":"/docs/main/v9.0.0/en/faq/enhancerequireobjectcache-cast-exception/"},{"content":"Problem  When importing the SkyWalking project to Eclipse, the following errors may occur:   Software being installed: Checkstyle configuration plugin for M2Eclipse 1.0.0.201705301746 (com.basistech.m2e.code.quality.checkstyle.feature.feature.group 1.0.0.201705301746) Missing requirement: Checkstyle configuration plugin for M2Eclipse 1.0.0.201705301746 (com.basistech.m2e.code.quality.checkstyle.feature.feature.group 1.0.0.201705301746) requires \u0026lsquo;net.sf.eclipsecs.core 5.2.0\u0026rsquo; but it could not be found\n Reason The Eclipse Checkstyle Plug-in has not been installed.\nResolution Download the plug-in at the link here: https://sourceforge.net/projects/eclipse-cs/?source=typ_redirect Eclipse Checkstyle Plug-in version 8.7.0.201801131309 is required. Plug-in notification: The Eclipse Checkstyle plug-in integrates the Checkstyle Java code auditor into the Eclipse IDE. The plug-in provides real-time feedback to the user on rule violations, including checking against coding style and error-prone code constructs.\n","title":"Problem","url":"/docs/main/v9.0.0/en/faq/import-project-eclipse-requireitems-exception/"},{"content":"Problem Tracing doesn\u0026rsquo;t work on the Kafka consumer end.\nReason The kafka client is responsible for pulling messages from the brokers, after which the data will be processed by user-defined codes. However, only the poll action can be traced by the plug-in and the subsequent data processing work inevitably goes beyond the scope of the trace context. Thus, in order to complete tracing on the client end, manual instrumentation is required, i.e. the poll action and the processing action should be wrapped manually.\nResolve For a native Kafka client, please use the Application Toolkit libraries to do the manual instrumentation, with the help of the @KafkaPollAndInvoke annotation in apm-toolkit-kafka or with OpenTracing API. If you\u0026rsquo;re using spring-kafka 1.3.x, 2.2.x or above, you can easily trace the consumer end without further configuration.\n","title":"Problem","url":"/docs/main/v9.0.0/en/faq/kafka-plugin/"},{"content":"Problem When using a thread pool, TraceSegment data in a thread cannot be reported and there are memory data that cannot be recycled (memory leaks).\nExample ThreadPoolTaskExecutor executor = new ThreadPoolTaskExecutor(); executor.setThreadFactory(r -\u0026gt; new Thread(RunnableWrapper.of(r))); Reason  Worker threads are enhanced when using the thread pool. Based on the design of the SkyWalking Java Agent, when tracing a cross thread, you must enhance the task thread.  Resolution   When using Thread Schedule Framework: See SkyWalking Thread Schedule Framework at SkyWalking Java agent supported list, such as Spring FrameWork @Async, which can implement tracing without any modification.\n  When using Custom Thread Pool: Enhance the task thread with the following code.\n  ExecutorService executorService = Executors.newFixedThreadPool(1); executorService.execute(RunnableWrapper.of(new Runnable() { @Override public void run() { //your code  } })); See across thread solution APIs for more use cases.\n","title":"Problem","url":"/docs/main/v9.0.0/en/faq/memory-leak-enhance-worker-thread/"},{"content":"Problem  In maven build, the following error may occur with the protoc-plugin:  [ERROR] Failed to execute goal org.xolstice.maven.plugins:protobuf-maven-plugin:0.5.0:compile-custom (default) on project apm-network: Unable to copy the file to \\skywalking\\apm-network\\target\\protoc-plugins: \\skywalking\\apm-network\\target\\protoc-plugins\\protoc-3.3.0-linux-x86_64.exe (The process cannot access the file because it is being used by another process) -\u0026gt; [Help 1] Reason  The Protobuf compiler is dependent on the glibc. However, glibc has not been installed, or there is an old version already installed in the system.  Resolution  Install or upgrade to the latest version of the glibc library. Under the container environment, the latest glibc version of the alpine system is recommended. Please refer to http://www.gnu.org/software/libc/documentation.html.  ","title":"Problem","url":"/docs/main/v9.0.0/en/faq/protoc-plugin-fails-when-build/"},{"content":"Problem The message with Field ID, 8888, must be reserved.\nReason Because Thrift cannot carry metadata to transport Trace Header in the original API, we transport them by wrapping TProtocolFactory.\nThrift allows us to append any additional fields in the message even if the receiver doesn\u0026rsquo;t deal with them. Those data will be skipped and left unread. Based on this, the 8888th field of the message is used to store Trace Header (or metadata) and to transport them. That means the message with Field ID, 8888, must be reserved.\nResolution Avoid using the Field(ID is 8888) in your application.\n","title":"Problem","url":"/docs/main/v9.0.0/en/faq/thrift-plugin/"},{"content":"Problem  There is no abnormal log in Agent log and Collector log. The traces can be seen, but no other information is available in UI.  Reason The operating system where the monitored system is located is not set as the current time zone, causing statistics collection time points to deviate.\nResolution Make sure the time is synchronized between collector servers and monitored application servers.\n","title":"Problem","url":"/docs/main/v9.0.0/en/faq/why-have-traces-no-others/"},{"content":"Problem When you start your application with the skywalking agent, you may find this exception in your agent log which means that EnhanceRequireObjectCache cannot be casted to EnhanceRequireObjectCache. For example:\nERROR 2018-05-07 21:31:24 InstMethodsInter : class[class org.springframework.web.method.HandlerMethod] after method[getBean] intercept failure java.lang.ClassCastException: org.apache.skywalking.apm.plugin.spring.mvc.commons.EnhanceRequireObjectCache cannot be cast to org.apache.skywalking.apm.plugin.spring.mvc.commons.EnhanceRequireObjectCache at org.apache.skywalking.apm.plugin.spring.mvc.commons.interceptor.GetBeanInterceptor.afterMethod(GetBeanInterceptor.java:45) at org.apache.skywalking.apm.agent.core.plugin.interceptor.enhance.InstMethodsInter.intercept(InstMethodsInter.java:105) at org.springframework.web.method.HandlerMethod.getBean(HandlerMethod.java) at org.springframework.web.servlet.handler.AbstractHandlerMethodExceptionResolver.shouldApplyTo(AbstractHandlerMethodExceptionResolver.java:47) at org.springframework.web.servlet.handler.AbstractHandlerExceptionResolver.resolveException(AbstractHandlerExceptionResolver.java:131) at org.springframework.web.servlet.handler.HandlerExceptionResolverComposite.resolveException(HandlerExceptionResolverComposite.java:76) ... Reason This exception may be caused by hot deployment tools (spring-boot-devtool) or otherwise, which changes the classloader in runtime.\nResolution  This error does not occur under the production environment, since developer tools are automatically disabled: See spring-boot-devtools. If you would like to debug in your development environment as usual, you should temporarily remove such hot deployment package in your lib path.  ","title":"Problem","url":"/docs/main/v9.1.0/en/faq/enhancerequireobjectcache-cast-exception/"},{"content":"Problem  When importing the SkyWalking project to Eclipse, the following errors may occur:   Software being installed: Checkstyle configuration plugin for M2Eclipse 1.0.0.201705301746 (com.basistech.m2e.code.quality.checkstyle.feature.feature.group 1.0.0.201705301746) Missing requirement: Checkstyle configuration plugin for M2Eclipse 1.0.0.201705301746 (com.basistech.m2e.code.quality.checkstyle.feature.feature.group 1.0.0.201705301746) requires \u0026lsquo;net.sf.eclipsecs.core 5.2.0\u0026rsquo; but it could not be found\n Reason The Eclipse Checkstyle Plug-in has not been installed.\nResolution Download the plug-in at the link here: https://sourceforge.net/projects/eclipse-cs/?source=typ_redirect Eclipse Checkstyle Plug-in version 8.7.0.201801131309 is required. Plug-in notification: The Eclipse Checkstyle plug-in integrates the Checkstyle Java code auditor into the Eclipse IDE. The plug-in provides real-time feedback to the user on rule violations, including checking against coding style and error-prone code constructs.\n","title":"Problem","url":"/docs/main/v9.1.0/en/faq/import-project-eclipse-requireitems-exception/"},{"content":"Problem Tracing doesn\u0026rsquo;t work on the Kafka consumer end.\nReason The kafka client is responsible for pulling messages from the brokers, after which the data will be processed by user-defined codes. However, only the poll action can be traced by the plug-in and the subsequent data processing work inevitably goes beyond the scope of the trace context. Thus, in order to complete tracing on the client end, manual instrumentation is required, i.e. the poll action and the processing action should be wrapped manually.\nResolve For a native Kafka client, please use the Application Toolkit libraries to do the manual instrumentation, with the help of the @KafkaPollAndInvoke annotation in apm-toolkit-kafka or with OpenTracing API. If you\u0026rsquo;re using spring-kafka 1.3.x, 2.2.x or above, you can easily trace the consumer end without further configuration.\n","title":"Problem","url":"/docs/main/v9.1.0/en/faq/kafka-plugin/"},{"content":"Problem When using a thread pool, TraceSegment data in a thread cannot be reported and there are memory data that cannot be recycled (memory leaks).\nExample ThreadPoolTaskExecutor executor = new ThreadPoolTaskExecutor(); executor.setThreadFactory(r -\u0026gt; new Thread(RunnableWrapper.of(r))); Reason  Worker threads are enhanced when using the thread pool. Based on the design of the SkyWalking Java Agent, when tracing a cross thread, you must enhance the task thread.  Resolution   When using Thread Schedule Framework: See SkyWalking Thread Schedule Framework at SkyWalking Java agent supported list, such as Spring FrameWork @Async, which can implement tracing without any modification.\n  When using Custom Thread Pool: Enhance the task thread with the following code.\n  ExecutorService executorService = Executors.newFixedThreadPool(1); executorService.execute(RunnableWrapper.of(new Runnable() { @Override public void run() { //your code  } })); See across thread solution APIs for more use cases.\n","title":"Problem","url":"/docs/main/v9.1.0/en/faq/memory-leak-enhance-worker-thread/"},{"content":"Problem  In maven build, the following error may occur with the protoc-plugin:  [ERROR] Failed to execute goal org.xolstice.maven.plugins:protobuf-maven-plugin:0.5.0:compile-custom (default) on project apm-network: Unable to copy the file to \\skywalking\\apm-network\\target\\protoc-plugins: \\skywalking\\apm-network\\target\\protoc-plugins\\protoc-3.3.0-linux-x86_64.exe (The process cannot access the file because it is being used by another process) -\u0026gt; [Help 1] Reason  The Protobuf compiler is dependent on the glibc. However, glibc has not been installed, or there is an old version already installed in the system.  Resolution  Install or upgrade to the latest version of the glibc library. Under the container environment, the latest glibc version of the alpine system is recommended. Please refer to http://www.gnu.org/software/libc/documentation.html.  ","title":"Problem","url":"/docs/main/v9.1.0/en/faq/protoc-plugin-fails-when-build/"},{"content":"Problem The message with Field ID, 8888, must be reserved.\nReason Because Thrift cannot carry metadata to transport Trace Header in the original API, we transport them by wrapping TProtocolFactory.\nThrift allows us to append any additional fields in the message even if the receiver doesn\u0026rsquo;t deal with them. Those data will be skipped and left unread. Based on this, the 8888th field of the message is used to store Trace Header (or metadata) and to transport them. That means the message with Field ID, 8888, must be reserved.\nResolution Avoid using the Field(ID is 8888) in your application.\n","title":"Problem","url":"/docs/main/v9.1.0/en/faq/thrift-plugin/"},{"content":"Problem  There is no abnormal log in Agent log and Collector log. The traces can be seen, but no other information is available in UI.  Reason The operating system where the monitored system is located is not set as the current time zone, causing statistics collection time points to deviate.\nResolution Make sure the time is synchronized between collector servers and monitored application servers.\n","title":"Problem","url":"/docs/main/v9.1.0/en/faq/why-have-traces-no-others/"},{"content":"Problem When you start your application with the skywalking agent, you may find this exception in your agent log which means that EnhanceRequireObjectCache cannot be casted to EnhanceRequireObjectCache. For example:\nERROR 2018-05-07 21:31:24 InstMethodsInter : class[class org.springframework.web.method.HandlerMethod] after method[getBean] intercept failure java.lang.ClassCastException: org.apache.skywalking.apm.plugin.spring.mvc.commons.EnhanceRequireObjectCache cannot be cast to org.apache.skywalking.apm.plugin.spring.mvc.commons.EnhanceRequireObjectCache at org.apache.skywalking.apm.plugin.spring.mvc.commons.interceptor.GetBeanInterceptor.afterMethod(GetBeanInterceptor.java:45) at org.apache.skywalking.apm.agent.core.plugin.interceptor.enhance.InstMethodsInter.intercept(InstMethodsInter.java:105) at org.springframework.web.method.HandlerMethod.getBean(HandlerMethod.java) at org.springframework.web.servlet.handler.AbstractHandlerMethodExceptionResolver.shouldApplyTo(AbstractHandlerMethodExceptionResolver.java:47) at org.springframework.web.servlet.handler.AbstractHandlerExceptionResolver.resolveException(AbstractHandlerExceptionResolver.java:131) at org.springframework.web.servlet.handler.HandlerExceptionResolverComposite.resolveException(HandlerExceptionResolverComposite.java:76) ... Reason This exception may be caused by hot deployment tools (spring-boot-devtool) or otherwise, which changes the classloader in runtime.\nResolution  This error does not occur under the production environment, since developer tools are automatically disabled: See spring-boot-devtools. If you would like to debug in your development environment as usual, you should temporarily remove such hot deployment package in your lib path.  ","title":"Problem","url":"/docs/main/v9.2.0/en/faq/enhancerequireobjectcache-cast-exception/"},{"content":"Problem  When importing the SkyWalking project to Eclipse, the following errors may occur:   Software being installed: Checkstyle configuration plugin for M2Eclipse 1.0.0.201705301746 (com.basistech.m2e.code.quality.checkstyle.feature.feature.group 1.0.0.201705301746) Missing requirement: Checkstyle configuration plugin for M2Eclipse 1.0.0.201705301746 (com.basistech.m2e.code.quality.checkstyle.feature.feature.group 1.0.0.201705301746) requires \u0026lsquo;net.sf.eclipsecs.core 5.2.0\u0026rsquo; but it could not be found\n Reason The Eclipse Checkstyle Plug-in has not been installed.\nResolution Download the plug-in at the link here: https://sourceforge.net/projects/eclipse-cs/?source=typ_redirect Eclipse Checkstyle Plug-in version 8.7.0.201801131309 is required. Plug-in notification: The Eclipse Checkstyle plug-in integrates the Checkstyle Java code auditor into the Eclipse IDE. The plug-in provides real-time feedback to the user on rule violations, including checking against coding style and error-prone code constructs.\n","title":"Problem","url":"/docs/main/v9.2.0/en/faq/import-project-eclipse-requireitems-exception/"},{"content":"Problem Tracing doesn\u0026rsquo;t work on the Kafka consumer end.\nReason The kafka client is responsible for pulling messages from the brokers, after which the data will be processed by user-defined codes. However, only the poll action can be traced by the plug-in and the subsequent data processing work inevitably goes beyond the scope of the trace context. Thus, in order to complete tracing on the client end, manual instrumentation is required, i.e. the poll action and the processing action should be wrapped manually.\nResolve For a native Kafka client, please use the Application Toolkit libraries to do the manual instrumentation, with the help of the @KafkaPollAndInvoke annotation in apm-toolkit-kafka or with OpenTracing API. If you\u0026rsquo;re using spring-kafka 1.3.x, 2.2.x or above, you can easily trace the consumer end without further configuration.\n","title":"Problem","url":"/docs/main/v9.2.0/en/faq/kafka-plugin/"},{"content":"Problem When using a thread pool, TraceSegment data in a thread cannot be reported and there are memory data that cannot be recycled (memory leaks).\nExample ThreadPoolTaskExecutor executor = new ThreadPoolTaskExecutor(); executor.setThreadFactory(r -\u0026gt; new Thread(RunnableWrapper.of(r))); Reason  Worker threads are enhanced when using the thread pool. Based on the design of the SkyWalking Java Agent, when tracing a cross thread, you must enhance the task thread.  Resolution   When using Thread Schedule Framework: See SkyWalking Thread Schedule Framework at SkyWalking Java agent supported list, such as Spring FrameWork @Async, which can implement tracing without any modification.\n  When using Custom Thread Pool: Enhance the task thread with the following code.\n  ExecutorService executorService = Executors.newFixedThreadPool(1); executorService.execute(RunnableWrapper.of(new Runnable() { @Override public void run() { //your code  } })); See across thread solution APIs for more use cases.\n","title":"Problem","url":"/docs/main/v9.2.0/en/faq/memory-leak-enhance-worker-thread/"},{"content":"Problem  In maven build, the following error may occur with the protoc-plugin:  [ERROR] Failed to execute goal org.xolstice.maven.plugins:protobuf-maven-plugin:0.5.0:compile-custom (default) on project apm-network: Unable to copy the file to \\skywalking\\apm-network\\target\\protoc-plugins: \\skywalking\\apm-network\\target\\protoc-plugins\\protoc-3.3.0-linux-x86_64.exe (The process cannot access the file because it is being used by another process) -\u0026gt; [Help 1] Reason  The Protobuf compiler is dependent on the glibc. However, glibc has not been installed, or there is an old version already installed in the system.  Resolution  Install or upgrade to the latest version of the glibc library. Under the container environment, the latest glibc version of the alpine system is recommended. Please refer to http://www.gnu.org/software/libc/documentation.html.  ","title":"Problem","url":"/docs/main/v9.2.0/en/faq/protoc-plugin-fails-when-build/"},{"content":"Problem The message with Field ID, 8888, must be reserved.\nReason Because Thrift cannot carry metadata to transport Trace Header in the original API, we transport them by wrapping TProtocolFactory.\nThrift allows us to append any additional fields in the message even if the receiver doesn\u0026rsquo;t deal with them. Those data will be skipped and left unread. Based on this, the 8888th field of the message is used to store Trace Header (or metadata) and to transport them. That means the message with Field ID, 8888, must be reserved.\nResolution Avoid using the Field(ID is 8888) in your application.\n","title":"Problem","url":"/docs/main/v9.2.0/en/faq/thrift-plugin/"},{"content":"Problem  There is no abnormal log in Agent log and Collector log. The traces can be seen, but no other information is available in UI.  Reason The operating system where the monitored system is located is not set as the current time zone, causing statistics collection time points to deviate.\nResolution Make sure the time is synchronized between collector servers and monitored application servers.\n","title":"Problem","url":"/docs/main/v9.2.0/en/faq/why-have-traces-no-others/"},{"content":"Problem When you start your application with the skywalking agent, you may find this exception in your agent log which means that EnhanceRequireObjectCache cannot be casted to EnhanceRequireObjectCache. For example:\nERROR 2018-05-07 21:31:24 InstMethodsInter : class[class org.springframework.web.method.HandlerMethod] after method[getBean] intercept failure java.lang.ClassCastException: org.apache.skywalking.apm.plugin.spring.mvc.commons.EnhanceRequireObjectCache cannot be cast to org.apache.skywalking.apm.plugin.spring.mvc.commons.EnhanceRequireObjectCache at org.apache.skywalking.apm.plugin.spring.mvc.commons.interceptor.GetBeanInterceptor.afterMethod(GetBeanInterceptor.java:45) at org.apache.skywalking.apm.agent.core.plugin.interceptor.enhance.InstMethodsInter.intercept(InstMethodsInter.java:105) at org.springframework.web.method.HandlerMethod.getBean(HandlerMethod.java) at org.springframework.web.servlet.handler.AbstractHandlerMethodExceptionResolver.shouldApplyTo(AbstractHandlerMethodExceptionResolver.java:47) at org.springframework.web.servlet.handler.AbstractHandlerExceptionResolver.resolveException(AbstractHandlerExceptionResolver.java:131) at org.springframework.web.servlet.handler.HandlerExceptionResolverComposite.resolveException(HandlerExceptionResolverComposite.java:76) ... Reason This exception may be caused by hot deployment tools (spring-boot-devtool) or otherwise, which changes the classloader in runtime.\nResolution  This error does not occur under the production environment, since developer tools are automatically disabled: See spring-boot-devtools. If you would like to debug in your development environment as usual, you should temporarily remove such hot deployment package in your lib path.  ","title":"Problem","url":"/docs/main/v9.3.0/en/faq/enhancerequireobjectcache-cast-exception/"},{"content":"Problem  When importing the SkyWalking project to Eclipse, the following errors may occur:   Software being installed: Checkstyle configuration plugin for M2Eclipse 1.0.0.201705301746 (com.basistech.m2e.code.quality.checkstyle.feature.feature.group 1.0.0.201705301746) Missing requirement: Checkstyle configuration plugin for M2Eclipse 1.0.0.201705301746 (com.basistech.m2e.code.quality.checkstyle.feature.feature.group 1.0.0.201705301746) requires \u0026lsquo;net.sf.eclipsecs.core 5.2.0\u0026rsquo; but it could not be found\n Reason The Eclipse Checkstyle Plug-in has not been installed.\nResolution Download the plug-in at the link here: https://sourceforge.net/projects/eclipse-cs/?source=typ_redirect Eclipse Checkstyle Plug-in version 8.7.0.201801131309 is required. Plug-in notification: The Eclipse Checkstyle plug-in integrates the Checkstyle Java code auditor into the Eclipse IDE. The plug-in provides real-time feedback to the user on rule violations, including checking against coding style and error-prone code constructs.\n","title":"Problem","url":"/docs/main/v9.3.0/en/faq/import-project-eclipse-requireitems-exception/"},{"content":"Problem Tracing doesn\u0026rsquo;t work on the Kafka consumer end.\nReason The kafka client is responsible for pulling messages from the brokers, after which the data will be processed by user-defined codes. However, only the poll action can be traced by the plug-in and the subsequent data processing work inevitably goes beyond the scope of the trace context. Thus, in order to complete tracing on the client end, manual instrumentation is required, i.e. the poll action and the processing action should be wrapped manually.\nResolve For a native Kafka client, please use the Application Toolkit libraries to do the manual instrumentation, with the help of the @KafkaPollAndInvoke annotation in apm-toolkit-kafka or with OpenTracing API. If you\u0026rsquo;re using spring-kafka 1.3.x, 2.2.x or above, you can easily trace the consumer end without further configuration.\n","title":"Problem","url":"/docs/main/v9.3.0/en/faq/kafka-plugin/"},{"content":"Problem When using a thread pool, TraceSegment data in a thread cannot be reported and there are memory data that cannot be recycled (memory leaks).\nExample ThreadPoolTaskExecutor executor = new ThreadPoolTaskExecutor(); executor.setThreadFactory(r -\u0026gt; new Thread(RunnableWrapper.of(r))); Reason  Worker threads are enhanced when using the thread pool. Based on the design of the SkyWalking Java Agent, when tracing a cross thread, you must enhance the task thread.  Resolution   When using Thread Schedule Framework: See SkyWalking Thread Schedule Framework at SkyWalking Java agent supported list, such as Spring FrameWork @Async, which can implement tracing without any modification.\n  When using Custom Thread Pool: Enhance the task thread with the following code.\n  ExecutorService executorService = Executors.newFixedThreadPool(1); executorService.execute(RunnableWrapper.of(new Runnable() { @Override public void run() { //your code  } })); See across thread solution APIs for more use cases.\n","title":"Problem","url":"/docs/main/v9.3.0/en/faq/memory-leak-enhance-worker-thread/"},{"content":"Problem  In maven build, the following error may occur with the protoc-plugin:  [ERROR] Failed to execute goal org.xolstice.maven.plugins:protobuf-maven-plugin:0.5.0:compile-custom (default) on project apm-network: Unable to copy the file to \\skywalking\\apm-network\\target\\protoc-plugins: \\skywalking\\apm-network\\target\\protoc-plugins\\protoc-3.3.0-linux-x86_64.exe (The process cannot access the file because it is being used by another process) -\u0026gt; [Help 1] Reason  The Protobuf compiler is dependent on the glibc. However, glibc has not been installed, or there is an old version already installed in the system.  Resolution  Install or upgrade to the latest version of the glibc library. Under the container environment, the latest glibc version of the alpine system is recommended. Please refer to http://www.gnu.org/software/libc/documentation.html.  ","title":"Problem","url":"/docs/main/v9.3.0/en/faq/protoc-plugin-fails-when-build/"},{"content":"Problem The message with Field ID, 8888, must be reserved.\nReason Because Thrift cannot carry metadata to transport Trace Header in the original API, we transport them by wrapping TProtocolFactory.\nThrift allows us to append any additional fields in the message even if the receiver doesn\u0026rsquo;t deal with them. Those data will be skipped and left unread. Based on this, the 8888th field of the message is used to store Trace Header (or metadata) and to transport them. That means the message with Field ID, 8888, must be reserved.\nResolution Avoid using the Field(ID is 8888) in your application.\n","title":"Problem","url":"/docs/main/v9.3.0/en/faq/thrift-plugin/"},{"content":"Problem  There is no abnormal log in Agent log and Collector log. The traces can be seen, but no other information is available in UI.  Reason The operating system where the monitored system is located is not set as the current time zone, causing statistics collection time points to deviate.\nResolution Make sure the time is synchronized between collector servers and monitored application servers.\n","title":"Problem","url":"/docs/main/v9.3.0/en/faq/why-have-traces-no-others/"},{"content":"Problem When you start your application with the skywalking agent, you may find this exception in your agent log which means that EnhanceRequireObjectCache cannot be casted to EnhanceRequireObjectCache. For example:\nERROR 2018-05-07 21:31:24 InstMethodsInter : class[class org.springframework.web.method.HandlerMethod] after method[getBean] intercept failure java.lang.ClassCastException: org.apache.skywalking.apm.plugin.spring.mvc.commons.EnhanceRequireObjectCache cannot be cast to org.apache.skywalking.apm.plugin.spring.mvc.commons.EnhanceRequireObjectCache at org.apache.skywalking.apm.plugin.spring.mvc.commons.interceptor.GetBeanInterceptor.afterMethod(GetBeanInterceptor.java:45) at org.apache.skywalking.apm.agent.core.plugin.interceptor.enhance.InstMethodsInter.intercept(InstMethodsInter.java:105) at org.springframework.web.method.HandlerMethod.getBean(HandlerMethod.java) at org.springframework.web.servlet.handler.AbstractHandlerMethodExceptionResolver.shouldApplyTo(AbstractHandlerMethodExceptionResolver.java:47) at org.springframework.web.servlet.handler.AbstractHandlerExceptionResolver.resolveException(AbstractHandlerExceptionResolver.java:131) at org.springframework.web.servlet.handler.HandlerExceptionResolverComposite.resolveException(HandlerExceptionResolverComposite.java:76) ... Reason This exception may be caused by hot deployment tools (spring-boot-devtool) or otherwise, which changes the classloader in runtime.\nResolution  This error does not occur under the production environment, since developer tools are automatically disabled: See spring-boot-devtools. If you would like to debug in your development environment as usual, you should temporarily remove such hot deployment package in your lib path.  ","title":"Problem","url":"/docs/main/v9.4.0/en/faq/enhancerequireobjectcache-cast-exception/"},{"content":"Problem  When importing the SkyWalking project to Eclipse, the following errors may occur:   Software being installed: Checkstyle configuration plugin for M2Eclipse 1.0.0.201705301746 (com.basistech.m2e.code.quality.checkstyle.feature.feature.group 1.0.0.201705301746) Missing requirement: Checkstyle configuration plugin for M2Eclipse 1.0.0.201705301746 (com.basistech.m2e.code.quality.checkstyle.feature.feature.group 1.0.0.201705301746) requires \u0026lsquo;net.sf.eclipsecs.core 5.2.0\u0026rsquo; but it could not be found\n Reason The Eclipse Checkstyle Plug-in has not been installed.\nResolution Download the plug-in at the link here: https://sourceforge.net/projects/eclipse-cs/?source=typ_redirect Eclipse Checkstyle Plug-in version 8.7.0.201801131309 is required. Plug-in notification: The Eclipse Checkstyle plug-in integrates the Checkstyle Java code auditor into the Eclipse IDE. The plug-in provides real-time feedback to the user on rule violations, including checking against coding style and error-prone code constructs.\n","title":"Problem","url":"/docs/main/v9.4.0/en/faq/import-project-eclipse-requireitems-exception/"},{"content":"Problem Tracing doesn\u0026rsquo;t work on the Kafka consumer end.\nReason The kafka client is responsible for pulling messages from the brokers, after which the data will be processed by user-defined codes. However, only the poll action can be traced by the plug-in and the subsequent data processing work inevitably goes beyond the scope of the trace context. Thus, in order to complete tracing on the client end, manual instrumentation is required, i.e. the poll action and the processing action should be wrapped manually.\nResolve For a native Kafka client, please use the Application Toolkit libraries to do the manual instrumentation, with the help of the @KafkaPollAndInvoke annotation in apm-toolkit-kafka or with OpenTracing API. If you\u0026rsquo;re using spring-kafka 1.3.x, 2.2.x or above, you can easily trace the consumer end without further configuration.\n","title":"Problem","url":"/docs/main/v9.4.0/en/faq/kafka-plugin/"},{"content":"Problem When using a thread pool, TraceSegment data in a thread cannot be reported and there are memory data that cannot be recycled (memory leaks).\nExample ThreadPoolTaskExecutor executor = new ThreadPoolTaskExecutor(); executor.setThreadFactory(r -\u0026gt; new Thread(RunnableWrapper.of(r))); Reason  Worker threads are enhanced when using the thread pool. Based on the design of the SkyWalking Java Agent, when tracing a cross thread, you must enhance the task thread.  Resolution   When using Thread Schedule Framework: See SkyWalking Thread Schedule Framework at SkyWalking Java agent supported list, such as Spring FrameWork @Async, which can implement tracing without any modification.\n  When using Custom Thread Pool: Enhance the task thread with the following code.\n  ExecutorService executorService = Executors.newFixedThreadPool(1); executorService.execute(RunnableWrapper.of(new Runnable() { @Override public void run() { //your code  } })); See across thread solution APIs for more use cases.\n","title":"Problem","url":"/docs/main/v9.4.0/en/faq/memory-leak-enhance-worker-thread/"},{"content":"Problem  In maven build, the following error may occur with the protoc-plugin:  [ERROR] Failed to execute goal org.xolstice.maven.plugins:protobuf-maven-plugin:0.5.0:compile-custom (default) on project apm-network: Unable to copy the file to \\skywalking\\apm-network\\target\\protoc-plugins: \\skywalking\\apm-network\\target\\protoc-plugins\\protoc-3.3.0-linux-x86_64.exe (The process cannot access the file because it is being used by another process) -\u0026gt; [Help 1] Reason  The Protobuf compiler is dependent on the glibc. However, glibc has not been installed, or there is an old version already installed in the system.  Resolution  Install or upgrade to the latest version of the glibc library. Under the container environment, the latest glibc version of the alpine system is recommended. Please refer to http://www.gnu.org/software/libc/documentation.html.  ","title":"Problem","url":"/docs/main/v9.4.0/en/faq/protoc-plugin-fails-when-build/"},{"content":"Problem The message with Field ID, 8888, must be reserved.\nReason Because Thrift cannot carry metadata to transport Trace Header in the original API, we transport them by wrapping TProtocolFactory.\nThrift allows us to append any additional fields in the message even if the receiver doesn\u0026rsquo;t deal with them. Those data will be skipped and left unread. Based on this, the 8888th field of the message is used to store Trace Header (or metadata) and to transport them. That means the message with Field ID, 8888, must be reserved.\nResolution Avoid using the Field(ID is 8888) in your application.\n","title":"Problem","url":"/docs/main/v9.4.0/en/faq/thrift-plugin/"},{"content":"Problem  There is no abnormal log in Agent log and Collector log. The traces can be seen, but no other information is available in UI.  Reason The operating system where the monitored system is located is not set as the current time zone, causing statistics collection time points to deviate.\nResolution Make sure the time is synchronized between collector servers and monitored application servers.\n","title":"Problem","url":"/docs/main/v9.4.0/en/faq/why-have-traces-no-others/"},{"content":"Problem When you start your application with the skywalking agent, you may find this exception in your agent log which means that EnhanceRequireObjectCache cannot be casted to EnhanceRequireObjectCache. For example:\nERROR 2018-05-07 21:31:24 InstMethodsInter : class[class org.springframework.web.method.HandlerMethod] after method[getBean] intercept failure java.lang.ClassCastException: org.apache.skywalking.apm.plugin.spring.mvc.commons.EnhanceRequireObjectCache cannot be cast to org.apache.skywalking.apm.plugin.spring.mvc.commons.EnhanceRequireObjectCache at org.apache.skywalking.apm.plugin.spring.mvc.commons.interceptor.GetBeanInterceptor.afterMethod(GetBeanInterceptor.java:45) at org.apache.skywalking.apm.agent.core.plugin.interceptor.enhance.InstMethodsInter.intercept(InstMethodsInter.java:105) at org.springframework.web.method.HandlerMethod.getBean(HandlerMethod.java) at org.springframework.web.servlet.handler.AbstractHandlerMethodExceptionResolver.shouldApplyTo(AbstractHandlerMethodExceptionResolver.java:47) at org.springframework.web.servlet.handler.AbstractHandlerExceptionResolver.resolveException(AbstractHandlerExceptionResolver.java:131) at org.springframework.web.servlet.handler.HandlerExceptionResolverComposite.resolveException(HandlerExceptionResolverComposite.java:76) ... Reason This exception may be caused by hot deployment tools (spring-boot-devtool) or otherwise, which changes the classloader in runtime.\nResolution  This error does not occur under the production environment, since developer tools are automatically disabled: See spring-boot-devtools. If you would like to debug in your development environment as usual, you should temporarily remove such hot deployment package in your lib path.  ","title":"Problem","url":"/docs/main/v9.5.0/en/faq/enhancerequireobjectcache-cast-exception/"},{"content":"Problem  When importing the SkyWalking project to Eclipse, the following errors may occur:   Software being installed: Checkstyle configuration plugin for M2Eclipse 1.0.0.201705301746 (com.basistech.m2e.code.quality.checkstyle.feature.feature.group 1.0.0.201705301746) Missing requirement: Checkstyle configuration plugin for M2Eclipse 1.0.0.201705301746 (com.basistech.m2e.code.quality.checkstyle.feature.feature.group 1.0.0.201705301746) requires \u0026lsquo;net.sf.eclipsecs.core 5.2.0\u0026rsquo; but it could not be found\n Reason The Eclipse Checkstyle Plug-in has not been installed.\nResolution Download the plug-in at the link here: https://sourceforge.net/projects/eclipse-cs/?source=typ_redirect Eclipse Checkstyle Plug-in version 8.7.0.201801131309 is required. Plug-in notification: The Eclipse Checkstyle plug-in integrates the Checkstyle Java code auditor into the Eclipse IDE. The plug-in provides real-time feedback to the user on rule violations, including checking against coding style and error-prone code constructs.\n","title":"Problem","url":"/docs/main/v9.5.0/en/faq/import-project-eclipse-requireitems-exception/"},{"content":"Problem Tracing doesn\u0026rsquo;t work on the Kafka consumer end.\nReason The kafka client is responsible for pulling messages from the brokers, after which the data will be processed by user-defined codes. However, only the poll action can be traced by the plug-in and the subsequent data processing work inevitably goes beyond the scope of the trace context. Thus, in order to complete tracing on the client end, manual instrumentation is required, i.e. the poll action and the processing action should be wrapped manually.\nResolve For a native Kafka client, please use the Application Toolkit libraries to do the manual instrumentation, with the help of the @KafkaPollAndInvoke annotation in apm-toolkit-kafka or with OpenTracing API. If you\u0026rsquo;re using spring-kafka 1.3.x, 2.2.x or above, you can easily trace the consumer end without further configuration.\n","title":"Problem","url":"/docs/main/v9.5.0/en/faq/kafka-plugin/"},{"content":"Problem When using a thread pool, TraceSegment data in a thread cannot be reported and there are memory data that cannot be recycled (memory leaks).\nExample ThreadPoolTaskExecutor executor = new ThreadPoolTaskExecutor(); executor.setThreadFactory(r -\u0026gt; new Thread(RunnableWrapper.of(r))); Reason  Worker threads are enhanced when using the thread pool. Based on the design of the SkyWalking Java Agent, when tracing a cross thread, you must enhance the task thread.  Resolution   When using Thread Schedule Framework: See SkyWalking Thread Schedule Framework at SkyWalking Java agent supported list, such as Spring FrameWork @Async, which can implement tracing without any modification.\n  When using Custom Thread Pool: Enhance the task thread with the following code.\n  ExecutorService executorService = Executors.newFixedThreadPool(1); executorService.execute(RunnableWrapper.of(new Runnable() { @Override public void run() { //your code  } })); See across thread solution APIs for more use cases.\n","title":"Problem","url":"/docs/main/v9.5.0/en/faq/memory-leak-enhance-worker-thread/"},{"content":"Problem  In maven build, the following error may occur with the protoc-plugin:  [ERROR] Failed to execute goal org.xolstice.maven.plugins:protobuf-maven-plugin:0.5.0:compile-custom (default) on project apm-network: Unable to copy the file to \\skywalking\\apm-network\\target\\protoc-plugins: \\skywalking\\apm-network\\target\\protoc-plugins\\protoc-3.3.0-linux-x86_64.exe (The process cannot access the file because it is being used by another process) -\u0026gt; [Help 1] Reason  The Protobuf compiler is dependent on the glibc. However, glibc has not been installed, or there is an old version already installed in the system.  Resolution  Install or upgrade to the latest version of the glibc library. Under the container environment, the latest glibc version of the alpine system is recommended. Please refer to http://www.gnu.org/software/libc/documentation.html.  ","title":"Problem","url":"/docs/main/v9.5.0/en/faq/protoc-plugin-fails-when-build/"},{"content":"Problem The message with Field ID, 8888, must be reserved.\nReason Because Thrift cannot carry metadata to transport Trace Header in the original API, we transport them by wrapping TProtocolFactory.\nThrift allows us to append any additional fields in the message even if the receiver doesn\u0026rsquo;t deal with them. Those data will be skipped and left unread. Based on this, the 8888th field of the message is used to store Trace Header (or metadata) and to transport them. That means the message with Field ID, 8888, must be reserved.\nResolution Avoid using the Field(ID is 8888) in your application.\n","title":"Problem","url":"/docs/main/v9.5.0/en/faq/thrift-plugin/"},{"content":"Problem  There is no abnormal log in Agent log and Collector log. The traces can be seen, but no other information is available in UI.  Reason The operating system where the monitored system is located is not set as the current time zone, causing statistics collection time points to deviate.\nResolution Make sure the time is synchronized between collector servers and monitored application servers.\n","title":"Problem","url":"/docs/main/v9.5.0/en/faq/why-have-traces-no-others/"},{"content":"Problem When you start your application with the skywalking agent, you may find this exception in your agent log which means that EnhanceRequireObjectCache cannot be casted to EnhanceRequireObjectCache. For example:\nERROR 2018-05-07 21:31:24 InstMethodsInter : class[class org.springframework.web.method.HandlerMethod] after method[getBean] intercept failure java.lang.ClassCastException: org.apache.skywalking.apm.plugin.spring.mvc.commons.EnhanceRequireObjectCache cannot be cast to org.apache.skywalking.apm.plugin.spring.mvc.commons.EnhanceRequireObjectCache at org.apache.skywalking.apm.plugin.spring.mvc.commons.interceptor.GetBeanInterceptor.afterMethod(GetBeanInterceptor.java:45) at org.apache.skywalking.apm.agent.core.plugin.interceptor.enhance.InstMethodsInter.intercept(InstMethodsInter.java:105) at org.springframework.web.method.HandlerMethod.getBean(HandlerMethod.java) at org.springframework.web.servlet.handler.AbstractHandlerMethodExceptionResolver.shouldApplyTo(AbstractHandlerMethodExceptionResolver.java:47) at org.springframework.web.servlet.handler.AbstractHandlerExceptionResolver.resolveException(AbstractHandlerExceptionResolver.java:131) at org.springframework.web.servlet.handler.HandlerExceptionResolverComposite.resolveException(HandlerExceptionResolverComposite.java:76) ... Reason This exception may be caused by hot deployment tools (spring-boot-devtool) or otherwise, which changes the classloader in runtime.\nResolution  This error does not occur under the production environment, since developer tools are automatically disabled: See spring-boot-devtools. If you would like to debug in your development environment as usual, you should temporarily remove such hot deployment package in your lib path.  ","title":"Problem","url":"/docs/main/v9.6.0/en/faq/enhancerequireobjectcache-cast-exception/"},{"content":"Problem  When importing the SkyWalking project to Eclipse, the following errors may occur:   Software being installed: Checkstyle configuration plugin for M2Eclipse 1.0.0.201705301746 (com.basistech.m2e.code.quality.checkstyle.feature.feature.group 1.0.0.201705301746) Missing requirement: Checkstyle configuration plugin for M2Eclipse 1.0.0.201705301746 (com.basistech.m2e.code.quality.checkstyle.feature.feature.group 1.0.0.201705301746) requires \u0026lsquo;net.sf.eclipsecs.core 5.2.0\u0026rsquo; but it could not be found\n Reason The Eclipse Checkstyle Plug-in has not been installed.\nResolution Download the plug-in at the link here: https://sourceforge.net/projects/eclipse-cs/?source=typ_redirect Eclipse Checkstyle Plug-in version 8.7.0.201801131309 is required. Plug-in notification: The Eclipse Checkstyle plug-in integrates the Checkstyle Java code auditor into the Eclipse IDE. The plug-in provides real-time feedback to the user on rule violations, including checking against coding style and error-prone code constructs.\n","title":"Problem","url":"/docs/main/v9.6.0/en/faq/import-project-eclipse-requireitems-exception/"},{"content":"Problem Tracing doesn\u0026rsquo;t work on the Kafka consumer end.\nReason The kafka client is responsible for pulling messages from the brokers, after which the data will be processed by user-defined codes. However, only the poll action can be traced by the plug-in and the subsequent data processing work inevitably goes beyond the scope of the trace context. Thus, in order to complete tracing on the client end, manual instrumentation is required, i.e. the poll action and the processing action should be wrapped manually.\nResolve For a native Kafka client, please use the Application Toolkit libraries to do the manual instrumentation, with the help of the @KafkaPollAndInvoke annotation in apm-toolkit-kafka or with OpenTracing API. If you\u0026rsquo;re using spring-kafka 1.3.x, 2.2.x or above, you can easily trace the consumer end without further configuration.\n","title":"Problem","url":"/docs/main/v9.6.0/en/faq/kafka-plugin/"},{"content":"Problem When using a thread pool, TraceSegment data in a thread cannot be reported and there are memory data that cannot be recycled (memory leaks).\nExample ThreadPoolTaskExecutor executor = new ThreadPoolTaskExecutor(); executor.setThreadFactory(r -\u0026gt; new Thread(RunnableWrapper.of(r))); Reason  Worker threads are enhanced when using the thread pool. Based on the design of the SkyWalking Java Agent, when tracing a cross thread, you must enhance the task thread.  Resolution   When using Thread Schedule Framework: See SkyWalking Thread Schedule Framework at SkyWalking Java agent supported list, such as Spring FrameWork @Async, which can implement tracing without any modification.\n  When using Custom Thread Pool: Enhance the task thread with the following code.\n  ExecutorService executorService = Executors.newFixedThreadPool(1); executorService.execute(RunnableWrapper.of(new Runnable() { @Override public void run() { //your code  } })); See across thread solution APIs for more use cases.\n","title":"Problem","url":"/docs/main/v9.6.0/en/faq/memory-leak-enhance-worker-thread/"},{"content":"Problem  In maven build, the following error may occur with the protoc-plugin:  [ERROR] Failed to execute goal org.xolstice.maven.plugins:protobuf-maven-plugin:0.5.0:compile-custom (default) on project apm-network: Unable to copy the file to \\skywalking\\apm-network\\target\\protoc-plugins: \\skywalking\\apm-network\\target\\protoc-plugins\\protoc-3.3.0-linux-x86_64.exe (The process cannot access the file because it is being used by another process) -\u0026gt; [Help 1] Reason  The Protobuf compiler is dependent on the glibc. However, glibc has not been installed, or there is an old version already installed in the system.  Resolution  Install or upgrade to the latest version of the glibc library. Under the container environment, the latest glibc version of the alpine system is recommended. Please refer to http://www.gnu.org/software/libc/documentation.html.  ","title":"Problem","url":"/docs/main/v9.6.0/en/faq/protoc-plugin-fails-when-build/"},{"content":"Problem The message with Field ID, 8888, must be reserved.\nReason Because Thrift cannot carry metadata to transport Trace Header in the original API, we transport them by wrapping TProtocolFactory.\nThrift allows us to append any additional fields in the message even if the receiver doesn\u0026rsquo;t deal with them. Those data will be skipped and left unread. Based on this, the 8888th field of the message is used to store Trace Header (or metadata) and to transport them. That means the message with Field ID, 8888, must be reserved.\nResolution Avoid using the Field(ID is 8888) in your application.\n","title":"Problem","url":"/docs/main/v9.6.0/en/faq/thrift-plugin/"},{"content":"Problem  There is no abnormal log in Agent log and Collector log. The traces can be seen, but no other information is available in UI.  Reason The operating system where the monitored system is located is not set as the current time zone, causing statistics collection time points to deviate.\nResolution Make sure the time is synchronized between collector servers and monitored application servers.\n","title":"Problem","url":"/docs/main/v9.6.0/en/faq/why-have-traces-no-others/"},{"content":"Problem When you start your application with the skywalking agent, you may find this exception in your agent log which means that EnhanceRequireObjectCache cannot be casted to EnhanceRequireObjectCache. For example:\nERROR 2018-05-07 21:31:24 InstMethodsInter : class[class org.springframework.web.method.HandlerMethod] after method[getBean] intercept failure java.lang.ClassCastException: org.apache.skywalking.apm.plugin.spring.mvc.commons.EnhanceRequireObjectCache cannot be cast to org.apache.skywalking.apm.plugin.spring.mvc.commons.EnhanceRequireObjectCache at org.apache.skywalking.apm.plugin.spring.mvc.commons.interceptor.GetBeanInterceptor.afterMethod(GetBeanInterceptor.java:45) at org.apache.skywalking.apm.agent.core.plugin.interceptor.enhance.InstMethodsInter.intercept(InstMethodsInter.java:105) at org.springframework.web.method.HandlerMethod.getBean(HandlerMethod.java) at org.springframework.web.servlet.handler.AbstractHandlerMethodExceptionResolver.shouldApplyTo(AbstractHandlerMethodExceptionResolver.java:47) at org.springframework.web.servlet.handler.AbstractHandlerExceptionResolver.resolveException(AbstractHandlerExceptionResolver.java:131) at org.springframework.web.servlet.handler.HandlerExceptionResolverComposite.resolveException(HandlerExceptionResolverComposite.java:76) ... Reason This exception may be caused by hot deployment tools (spring-boot-devtool) or otherwise, which changes the classloader in runtime.\nResolution  This error does not occur under the production environment, since developer tools are automatically disabled: See spring-boot-devtools. If you would like to debug in your development environment as usual, you should temporarily remove such hot deployment package in your lib path.  ","title":"Problem","url":"/docs/main/v9.7.0/en/faq/enhancerequireobjectcache-cast-exception/"},{"content":"Problem  When importing the SkyWalking project to Eclipse, the following errors may occur:   Software being installed: Checkstyle configuration plugin for M2Eclipse 1.0.0.201705301746 (com.basistech.m2e.code.quality.checkstyle.feature.feature.group 1.0.0.201705301746) Missing requirement: Checkstyle configuration plugin for M2Eclipse 1.0.0.201705301746 (com.basistech.m2e.code.quality.checkstyle.feature.feature.group 1.0.0.201705301746) requires \u0026lsquo;net.sf.eclipsecs.core 5.2.0\u0026rsquo; but it could not be found\n Reason The Eclipse Checkstyle Plug-in has not been installed.\nResolution Download the plug-in at the link here: https://sourceforge.net/projects/eclipse-cs/?source=typ_redirect Eclipse Checkstyle Plug-in version 8.7.0.201801131309 is required. Plug-in notification: The Eclipse Checkstyle plug-in integrates the Checkstyle Java code auditor into the Eclipse IDE. The plug-in provides real-time feedback to the user on rule violations, including checking against coding style and error-prone code constructs.\n","title":"Problem","url":"/docs/main/v9.7.0/en/faq/import-project-eclipse-requireitems-exception/"},{"content":"Problem Tracing doesn\u0026rsquo;t work on the Kafka consumer end.\nReason The kafka client is responsible for pulling messages from the brokers, after which the data will be processed by user-defined codes. However, only the poll action can be traced by the plug-in and the subsequent data processing work inevitably goes beyond the scope of the trace context. Thus, in order to complete tracing on the client end, manual instrumentation is required, i.e. the poll action and the processing action should be wrapped manually.\nResolve For a native Kafka client, please use the Application Toolkit libraries to do the manual instrumentation, with the help of the @KafkaPollAndInvoke annotation in apm-toolkit-kafka or with OpenTracing API. If you\u0026rsquo;re using spring-kafka 1.3.x, 2.2.x or above, you can easily trace the consumer end without further configuration.\n","title":"Problem","url":"/docs/main/v9.7.0/en/faq/kafka-plugin/"},{"content":"Problem When using a thread pool, TraceSegment data in a thread cannot be reported and there are memory data that cannot be recycled (memory leaks).\nExample ThreadPoolTaskExecutor executor = new ThreadPoolTaskExecutor(); executor.setThreadFactory(r -\u0026gt; new Thread(RunnableWrapper.of(r))); Reason  Worker threads are enhanced when using the thread pool. Based on the design of the SkyWalking Java Agent, when tracing a cross thread, you must enhance the task thread.  Resolution   When using Thread Schedule Framework: See SkyWalking Thread Schedule Framework at SkyWalking Java agent supported list, such as Spring FrameWork @Async, which can implement tracing without any modification.\n  When using Custom Thread Pool: Enhance the task thread with the following code.\n  ExecutorService executorService = Executors.newFixedThreadPool(1); executorService.execute(RunnableWrapper.of(new Runnable() { @Override public void run() { //your code  } })); See across thread solution APIs for more use cases.\n","title":"Problem","url":"/docs/main/v9.7.0/en/faq/memory-leak-enhance-worker-thread/"},{"content":"Problem  In maven build, the following error may occur with the protoc-plugin:  [ERROR] Failed to execute goal org.xolstice.maven.plugins:protobuf-maven-plugin:0.5.0:compile-custom (default) on project apm-network: Unable to copy the file to \\skywalking\\apm-network\\target\\protoc-plugins: \\skywalking\\apm-network\\target\\protoc-plugins\\protoc-3.3.0-linux-x86_64.exe (The process cannot access the file because it is being used by another process) -\u0026gt; [Help 1] Reason  The Protobuf compiler is dependent on the glibc. However, glibc has not been installed, or there is an old version already installed in the system.  Resolution  Install or upgrade to the latest version of the glibc library. Under the container environment, the latest glibc version of the alpine system is recommended. Please refer to http://www.gnu.org/software/libc/documentation.html.  ","title":"Problem","url":"/docs/main/v9.7.0/en/faq/protoc-plugin-fails-when-build/"},{"content":"Problem The message with Field ID, 8888, must be reserved.\nReason Because Thrift cannot carry metadata to transport Trace Header in the original API, we transport them by wrapping TProtocolFactory.\nThrift allows us to append any additional fields in the message even if the receiver doesn\u0026rsquo;t deal with them. Those data will be skipped and left unread. Based on this, the 8888th field of the message is used to store Trace Header (or metadata) and to transport them. That means the message with Field ID, 8888, must be reserved.\nResolution Avoid using the Field(ID is 8888) in your application.\n","title":"Problem","url":"/docs/main/v9.7.0/en/faq/thrift-plugin/"},{"content":"Problem  There is no abnormal log in Agent log and Collector log. The traces can be seen, but no other information is available in UI.  Reason The operating system where the monitored system is located is not set as the current time zone, causing statistics collection time points to deviate.\nResolution Make sure the time is synchronized between collector servers and monitored application servers.\n","title":"Problem","url":"/docs/main/v9.7.0/en/faq/why-have-traces-no-others/"},{"content":"Problem: Maven compilation failure with error such as Error: not found: python2 When you compile the project via Maven, it fails at module apm-webapp and the following error occurs.\nPay attention to keywords such as node-sass and Error: not found: python2.\n[INFO] \u0026gt; node-sass@4.11.0 postinstall C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\node-sass [INFO] \u0026gt; node scripts/build.js [ERROR] gyp verb check python checking for Python executable \u0026quot;python2\u0026quot; in the PATH [ERROR] gyp verb `which` failed Error: not found: python2 [ERROR] gyp verb `which` failed at getNotFoundError (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:13:12) [ERROR] gyp verb `which` failed at F (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:68:19) [ERROR] gyp verb `which` failed at E (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:80:29) [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:89:16 [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\isexe\\index.js:42:5 [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\isexe\\windows.js:36:5 [ERROR] gyp verb `which` failed at FSReqWrap.oncomplete (fs.js:152:21) [ERROR] gyp verb `which` failed code: 'ENOENT' } [ERROR] gyp verb check python checking for Python executable \u0026quot;python\u0026quot; in the PATH [ERROR] gyp verb `which` succeeded python C:\\Users\\XXX\\AppData\\Local\\Programs\\Python\\Python37\\python.EXE [ERROR] gyp ERR! configure error [ERROR] gyp ERR! stack Error: Command failed: C:\\Users\\XXX\\AppData\\Local\\Programs\\Python\\Python37\\python.EXE -c import sys; print \u0026quot;%s.%s.%s\u0026quot; % sys.version_info[:3]; [ERROR] gyp ERR! stack File \u0026quot;\u0026lt;string\u0026gt;\u0026quot;, line 1 [ERROR] gyp ERR! stack import sys; print \u0026quot;%s.%s.%s\u0026quot; % sys.version_info[:3]; [ERROR] gyp ERR! stack ^ [ERROR] gyp ERR! stack SyntaxError: invalid syntax [ERROR] gyp ERR! stack [ERROR] gyp ERR! stack at ChildProcess.exithandler (child_process.js:275:12) [ERROR] gyp ERR! stack at emitTwo (events.js:126:13) [ERROR] gyp ERR! stack at ChildProcess.emit (events.js:214:7) [ERROR] gyp ERR! stack at maybeClose (internal/child_process.js:925:16) [ERROR] gyp ERR! stack at Process.ChildProcess._handle.onexit (internal/child_process.js:209:5) [ERROR] gyp ERR! System Windows_NT 10.0.17134 ...... [INFO] server-starter-es7 ................................. SUCCESS [ 11.657 s] [INFO] apm-webapp ......................................... FAILURE [ 25.857 s] [INFO] apache-skywalking-apm .............................. SKIPPED [INFO] apache-skywalking-apm-es7 .......................... SKIPPED Reason The error has nothing to do with SkyWalking.\nAccording to the issue here (https://github.com/sass/node-sass/issues/1176), if you live in countries where requesting resources from GitHub and npmjs.org runs slow, some precompiled binaries for dependency node-sass would fail to be downloaded during npm install, and npm would try to compile them itself. That\u0026rsquo;s why python2 is needed.\nResolution 1. Use mirror. For instance, if you\u0026rsquo;re in China, please edit skywalking\\apm-webapp\\pom.xml as follows. Find\n\u0026lt;configuration\u0026gt; \u0026lt;arguments\u0026gt;install --registry=https://registry.npmjs.org/\u0026lt;/arguments\u0026gt; \u0026lt;/configuration\u0026gt; Replace it with\n\u0026lt;configuration\u0026gt; \u0026lt;arguments\u0026gt;install --registry=https://registry.npmmirror.com/ --sass_binary_site=https://npmmirror.com/mirrors/node-sass/\u0026lt;/arguments\u0026gt; \u0026lt;/configuration\u0026gt; 2. Get a sufficiently powerful VPN. ","title":"Problem: Maven compilation failure with error such as `Error: not found: python2`","url":"/docs/main/latest/en/faq/maven-compile-npm-failure/"},{"content":"Problem: Maven compilation failure with error such as Error: not found: python2 When you compile the project via Maven, it fails at module apm-webapp and the following error occurs.\nPay attention to keywords such as node-sass and Error: not found: python2.\n[INFO] \u0026gt; node-sass@4.11.0 postinstall C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\node-sass [INFO] \u0026gt; node scripts/build.js [ERROR] gyp verb check python checking for Python executable \u0026quot;python2\u0026quot; in the PATH [ERROR] gyp verb `which` failed Error: not found: python2 [ERROR] gyp verb `which` failed at getNotFoundError (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:13:12) [ERROR] gyp verb `which` failed at F (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:68:19) [ERROR] gyp verb `which` failed at E (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:80:29) [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:89:16 [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\isexe\\index.js:42:5 [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\isexe\\windows.js:36:5 [ERROR] gyp verb `which` failed at FSReqWrap.oncomplete (fs.js:152:21) [ERROR] gyp verb `which` failed code: 'ENOENT' } [ERROR] gyp verb check python checking for Python executable \u0026quot;python\u0026quot; in the PATH [ERROR] gyp verb `which` succeeded python C:\\Users\\XXX\\AppData\\Local\\Programs\\Python\\Python37\\python.EXE [ERROR] gyp ERR! configure error [ERROR] gyp ERR! stack Error: Command failed: C:\\Users\\XXX\\AppData\\Local\\Programs\\Python\\Python37\\python.EXE -c import sys; print \u0026quot;%s.%s.%s\u0026quot; % sys.version_info[:3]; [ERROR] gyp ERR! stack File \u0026quot;\u0026lt;string\u0026gt;\u0026quot;, line 1 [ERROR] gyp ERR! stack import sys; print \u0026quot;%s.%s.%s\u0026quot; % sys.version_info[:3]; [ERROR] gyp ERR! stack ^ [ERROR] gyp ERR! stack SyntaxError: invalid syntax [ERROR] gyp ERR! stack [ERROR] gyp ERR! stack at ChildProcess.exithandler (child_process.js:275:12) [ERROR] gyp ERR! stack at emitTwo (events.js:126:13) [ERROR] gyp ERR! stack at ChildProcess.emit (events.js:214:7) [ERROR] gyp ERR! stack at maybeClose (internal/child_process.js:925:16) [ERROR] gyp ERR! stack at Process.ChildProcess._handle.onexit (internal/child_process.js:209:5) [ERROR] gyp ERR! System Windows_NT 10.0.17134 ...... [INFO] server-starter-es7 ................................. SUCCESS [ 11.657 s] [INFO] apm-webapp ......................................... FAILURE [ 25.857 s] [INFO] apache-skywalking-apm .............................. SKIPPED [INFO] apache-skywalking-apm-es7 .......................... SKIPPED Reason The error has nothing to do with SkyWalking.\nAccording to the issue here (https://github.com/sass/node-sass/issues/1176), if you live in countries where requesting resources from GitHub and npmjs.org runs slow, some precompiled binaries for dependency node-sass would fail to be downloaded during npm install, and npm would try to compile them itself. That\u0026rsquo;s why python2 is needed.\nResolution 1. Use mirror. For instance, if you\u0026rsquo;re in China, please edit skywalking\\apm-webapp\\pom.xml as follows. Find\n\u0026lt;configuration\u0026gt; \u0026lt;arguments\u0026gt;install --registry=https://registry.npmjs.org/\u0026lt;/arguments\u0026gt; \u0026lt;/configuration\u0026gt; Replace it with\n\u0026lt;configuration\u0026gt; \u0026lt;arguments\u0026gt;install --registry=https://registry.npmmirror.com/ --sass_binary_site=https://npmmirror.com/mirrors/node-sass/\u0026lt;/arguments\u0026gt; \u0026lt;/configuration\u0026gt; 2. Get a sufficiently powerful VPN. ","title":"Problem: Maven compilation failure with error such as `Error: not found: python2`","url":"/docs/main/next/en/faq/maven-compile-npm-failure/"},{"content":"Problem: Maven compilation failure with error such as Error: not found: python2 When you compile the project via Maven, it fails at module apm-webapp and the following error occurs.\nPay attention to keywords such as node-sass and Error: not found: python2.\n[INFO] \u0026gt; node-sass@4.11.0 postinstall C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\node-sass [INFO] \u0026gt; node scripts/build.js [ERROR] gyp verb check python checking for Python executable \u0026quot;python2\u0026quot; in the PATH [ERROR] gyp verb `which` failed Error: not found: python2 [ERROR] gyp verb `which` failed at getNotFoundError (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:13:12) [ERROR] gyp verb `which` failed at F (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:68:19) [ERROR] gyp verb `which` failed at E (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:80:29) [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:89:16 [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\isexe\\index.js:42:5 [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\isexe\\windows.js:36:5 [ERROR] gyp verb `which` failed at FSReqWrap.oncomplete (fs.js:152:21) [ERROR] gyp verb `which` failed code: 'ENOENT' } [ERROR] gyp verb check python checking for Python executable \u0026quot;python\u0026quot; in the PATH [ERROR] gyp verb `which` succeeded python C:\\Users\\XXX\\AppData\\Local\\Programs\\Python\\Python37\\python.EXE [ERROR] gyp ERR! configure error [ERROR] gyp ERR! stack Error: Command failed: C:\\Users\\XXX\\AppData\\Local\\Programs\\Python\\Python37\\python.EXE -c import sys; print \u0026quot;%s.%s.%s\u0026quot; % sys.version_info[:3]; [ERROR] gyp ERR! stack File \u0026quot;\u0026lt;string\u0026gt;\u0026quot;, line 1 [ERROR] gyp ERR! stack import sys; print \u0026quot;%s.%s.%s\u0026quot; % sys.version_info[:3]; [ERROR] gyp ERR! stack ^ [ERROR] gyp ERR! stack SyntaxError: invalid syntax [ERROR] gyp ERR! stack [ERROR] gyp ERR! stack at ChildProcess.exithandler (child_process.js:275:12) [ERROR] gyp ERR! stack at emitTwo (events.js:126:13) [ERROR] gyp ERR! stack at ChildProcess.emit (events.js:214:7) [ERROR] gyp ERR! stack at maybeClose (internal/child_process.js:925:16) [ERROR] gyp ERR! stack at Process.ChildProcess._handle.onexit (internal/child_process.js:209:5) [ERROR] gyp ERR! System Windows_NT 10.0.17134 ...... [INFO] server-starter-es7 ................................. SUCCESS [ 11.657 s] [INFO] apm-webapp ......................................... FAILURE [ 25.857 s] [INFO] apache-skywalking-apm .............................. SKIPPED [INFO] apache-skywalking-apm-es7 .......................... SKIPPED Reason The error has nothing to do with SkyWalking.\nAccording to the issue here (https://github.com/sass/node-sass/issues/1176), if you live in countries where requesting resources from GitHub and npmjs.org runs slow, some precompiled binaries for dependency node-sass would fail to be downloaded during npm install, and npm would try to compile them itself. That\u0026rsquo;s why python2 is needed.\nResolution 1. Use mirror. For instance, if you\u0026rsquo;re in China, please edit skywalking\\apm-webapp\\pom.xml as follows. Find\n\u0026lt;configuration\u0026gt; \u0026lt;arguments\u0026gt;install --registry=https://registry.npmjs.org/\u0026lt;/arguments\u0026gt; \u0026lt;/configuration\u0026gt; Replace it with\n\u0026lt;configuration\u0026gt; \u0026lt;arguments\u0026gt;install --registry=https://registry.npmmirror.com/ --sass_binary_site=https://npmmirror.com/mirrors/node-sass/\u0026lt;/arguments\u0026gt; \u0026lt;/configuration\u0026gt; 2. Get a sufficiently powerful VPN. ","title":"Problem: Maven compilation failure with error such as `Error: not found: python2`","url":"/docs/main/v9.0.0/en/faq/maven-compile-npm-failure/"},{"content":"Problem: Maven compilation failure with error such as Error: not found: python2 When you compile the project via Maven, it fails at module apm-webapp and the following error occurs.\nPay attention to keywords such as node-sass and Error: not found: python2.\n[INFO] \u0026gt; node-sass@4.11.0 postinstall C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\node-sass [INFO] \u0026gt; node scripts/build.js [ERROR] gyp verb check python checking for Python executable \u0026quot;python2\u0026quot; in the PATH [ERROR] gyp verb `which` failed Error: not found: python2 [ERROR] gyp verb `which` failed at getNotFoundError (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:13:12) [ERROR] gyp verb `which` failed at F (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:68:19) [ERROR] gyp verb `which` failed at E (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:80:29) [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:89:16 [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\isexe\\index.js:42:5 [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\isexe\\windows.js:36:5 [ERROR] gyp verb `which` failed at FSReqWrap.oncomplete (fs.js:152:21) [ERROR] gyp verb `which` failed code: 'ENOENT' } [ERROR] gyp verb check python checking for Python executable \u0026quot;python\u0026quot; in the PATH [ERROR] gyp verb `which` succeeded python C:\\Users\\XXX\\AppData\\Local\\Programs\\Python\\Python37\\python.EXE [ERROR] gyp ERR! configure error [ERROR] gyp ERR! stack Error: Command failed: C:\\Users\\XXX\\AppData\\Local\\Programs\\Python\\Python37\\python.EXE -c import sys; print \u0026quot;%s.%s.%s\u0026quot; % sys.version_info[:3]; [ERROR] gyp ERR! stack File \u0026quot;\u0026lt;string\u0026gt;\u0026quot;, line 1 [ERROR] gyp ERR! stack import sys; print \u0026quot;%s.%s.%s\u0026quot; % sys.version_info[:3]; [ERROR] gyp ERR! stack ^ [ERROR] gyp ERR! stack SyntaxError: invalid syntax [ERROR] gyp ERR! stack [ERROR] gyp ERR! stack at ChildProcess.exithandler (child_process.js:275:12) [ERROR] gyp ERR! stack at emitTwo (events.js:126:13) [ERROR] gyp ERR! stack at ChildProcess.emit (events.js:214:7) [ERROR] gyp ERR! stack at maybeClose (internal/child_process.js:925:16) [ERROR] gyp ERR! stack at Process.ChildProcess._handle.onexit (internal/child_process.js:209:5) [ERROR] gyp ERR! System Windows_NT 10.0.17134 ...... [INFO] server-starter-es7 ................................. SUCCESS [ 11.657 s] [INFO] apm-webapp ......................................... FAILURE [ 25.857 s] [INFO] apache-skywalking-apm .............................. SKIPPED [INFO] apache-skywalking-apm-es7 .......................... SKIPPED Reason The error has nothing to do with SkyWalking.\nAccording to the issue here (https://github.com/sass/node-sass/issues/1176), if you live in countries where requesting resources from GitHub and npmjs.org runs slow, some precompiled binaries for dependency node-sass would fail to be downloaded during npm install, and npm would try to compile them itself. That\u0026rsquo;s why python2 is needed.\nResolution 1. Use mirror. For instance, if you\u0026rsquo;re in China, please edit skywalking\\apm-webapp\\pom.xml as follows. Find\n\u0026lt;configuration\u0026gt; \u0026lt;arguments\u0026gt;install --registry=https://registry.npmjs.org/\u0026lt;/arguments\u0026gt; \u0026lt;/configuration\u0026gt; Replace it with\n\u0026lt;configuration\u0026gt; \u0026lt;arguments\u0026gt;install --registry=https://registry.npmmirror.com/ --sass_binary_site=https://npmmirror.com/mirrors/node-sass/\u0026lt;/arguments\u0026gt; \u0026lt;/configuration\u0026gt; 2. Get a sufficiently powerful VPN. ","title":"Problem: Maven compilation failure with error such as `Error: not found: python2`","url":"/docs/main/v9.1.0/en/faq/maven-compile-npm-failure/"},{"content":"Problem: Maven compilation failure with error such as Error: not found: python2 When you compile the project via Maven, it fails at module apm-webapp and the following error occurs.\nPay attention to keywords such as node-sass and Error: not found: python2.\n[INFO] \u0026gt; node-sass@4.11.0 postinstall C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\node-sass [INFO] \u0026gt; node scripts/build.js [ERROR] gyp verb check python checking for Python executable \u0026quot;python2\u0026quot; in the PATH [ERROR] gyp verb `which` failed Error: not found: python2 [ERROR] gyp verb `which` failed at getNotFoundError (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:13:12) [ERROR] gyp verb `which` failed at F (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:68:19) [ERROR] gyp verb `which` failed at E (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:80:29) [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:89:16 [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\isexe\\index.js:42:5 [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\isexe\\windows.js:36:5 [ERROR] gyp verb `which` failed at FSReqWrap.oncomplete (fs.js:152:21) [ERROR] gyp verb `which` failed code: 'ENOENT' } [ERROR] gyp verb check python checking for Python executable \u0026quot;python\u0026quot; in the PATH [ERROR] gyp verb `which` succeeded python C:\\Users\\XXX\\AppData\\Local\\Programs\\Python\\Python37\\python.EXE [ERROR] gyp ERR! configure error [ERROR] gyp ERR! stack Error: Command failed: C:\\Users\\XXX\\AppData\\Local\\Programs\\Python\\Python37\\python.EXE -c import sys; print \u0026quot;%s.%s.%s\u0026quot; % sys.version_info[:3]; [ERROR] gyp ERR! stack File \u0026quot;\u0026lt;string\u0026gt;\u0026quot;, line 1 [ERROR] gyp ERR! stack import sys; print \u0026quot;%s.%s.%s\u0026quot; % sys.version_info[:3]; [ERROR] gyp ERR! stack ^ [ERROR] gyp ERR! stack SyntaxError: invalid syntax [ERROR] gyp ERR! stack [ERROR] gyp ERR! stack at ChildProcess.exithandler (child_process.js:275:12) [ERROR] gyp ERR! stack at emitTwo (events.js:126:13) [ERROR] gyp ERR! stack at ChildProcess.emit (events.js:214:7) [ERROR] gyp ERR! stack at maybeClose (internal/child_process.js:925:16) [ERROR] gyp ERR! stack at Process.ChildProcess._handle.onexit (internal/child_process.js:209:5) [ERROR] gyp ERR! System Windows_NT 10.0.17134 ...... [INFO] server-starter-es7 ................................. SUCCESS [ 11.657 s] [INFO] apm-webapp ......................................... FAILURE [ 25.857 s] [INFO] apache-skywalking-apm .............................. SKIPPED [INFO] apache-skywalking-apm-es7 .......................... SKIPPED Reason The error has nothing to do with SkyWalking.\nAccording to the issue here (https://github.com/sass/node-sass/issues/1176), if you live in countries where requesting resources from GitHub and npmjs.org runs slow, some precompiled binaries for dependency node-sass would fail to be downloaded during npm install, and npm would try to compile them itself. That\u0026rsquo;s why python2 is needed.\nResolution 1. Use mirror. For instance, if you\u0026rsquo;re in China, please edit skywalking\\apm-webapp\\pom.xml as follows. Find\n\u0026lt;configuration\u0026gt; \u0026lt;arguments\u0026gt;install --registry=https://registry.npmjs.org/\u0026lt;/arguments\u0026gt; \u0026lt;/configuration\u0026gt; Replace it with\n\u0026lt;configuration\u0026gt; \u0026lt;arguments\u0026gt;install --registry=https://registry.npmmirror.com/ --sass_binary_site=https://npmmirror.com/mirrors/node-sass/\u0026lt;/arguments\u0026gt; \u0026lt;/configuration\u0026gt; 2. Get a sufficiently powerful VPN. ","title":"Problem: Maven compilation failure with error such as `Error: not found: python2`","url":"/docs/main/v9.2.0/en/faq/maven-compile-npm-failure/"},{"content":"Problem: Maven compilation failure with error such as Error: not found: python2 When you compile the project via Maven, it fails at module apm-webapp and the following error occurs.\nPay attention to keywords such as node-sass and Error: not found: python2.\n[INFO] \u0026gt; node-sass@4.11.0 postinstall C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\node-sass [INFO] \u0026gt; node scripts/build.js [ERROR] gyp verb check python checking for Python executable \u0026quot;python2\u0026quot; in the PATH [ERROR] gyp verb `which` failed Error: not found: python2 [ERROR] gyp verb `which` failed at getNotFoundError (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:13:12) [ERROR] gyp verb `which` failed at F (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:68:19) [ERROR] gyp verb `which` failed at E (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:80:29) [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:89:16 [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\isexe\\index.js:42:5 [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\isexe\\windows.js:36:5 [ERROR] gyp verb `which` failed at FSReqWrap.oncomplete (fs.js:152:21) [ERROR] gyp verb `which` failed code: 'ENOENT' } [ERROR] gyp verb check python checking for Python executable \u0026quot;python\u0026quot; in the PATH [ERROR] gyp verb `which` succeeded python C:\\Users\\XXX\\AppData\\Local\\Programs\\Python\\Python37\\python.EXE [ERROR] gyp ERR! configure error [ERROR] gyp ERR! stack Error: Command failed: C:\\Users\\XXX\\AppData\\Local\\Programs\\Python\\Python37\\python.EXE -c import sys; print \u0026quot;%s.%s.%s\u0026quot; % sys.version_info[:3]; [ERROR] gyp ERR! stack File \u0026quot;\u0026lt;string\u0026gt;\u0026quot;, line 1 [ERROR] gyp ERR! stack import sys; print \u0026quot;%s.%s.%s\u0026quot; % sys.version_info[:3]; [ERROR] gyp ERR! stack ^ [ERROR] gyp ERR! stack SyntaxError: invalid syntax [ERROR] gyp ERR! stack [ERROR] gyp ERR! stack at ChildProcess.exithandler (child_process.js:275:12) [ERROR] gyp ERR! stack at emitTwo (events.js:126:13) [ERROR] gyp ERR! stack at ChildProcess.emit (events.js:214:7) [ERROR] gyp ERR! stack at maybeClose (internal/child_process.js:925:16) [ERROR] gyp ERR! stack at Process.ChildProcess._handle.onexit (internal/child_process.js:209:5) [ERROR] gyp ERR! System Windows_NT 10.0.17134 ...... [INFO] server-starter-es7 ................................. SUCCESS [ 11.657 s] [INFO] apm-webapp ......................................... FAILURE [ 25.857 s] [INFO] apache-skywalking-apm .............................. SKIPPED [INFO] apache-skywalking-apm-es7 .......................... SKIPPED Reason The error has nothing to do with SkyWalking.\nAccording to the issue here (https://github.com/sass/node-sass/issues/1176), if you live in countries where requesting resources from GitHub and npmjs.org runs slow, some precompiled binaries for dependency node-sass would fail to be downloaded during npm install, and npm would try to compile them itself. That\u0026rsquo;s why python2 is needed.\nResolution 1. Use mirror. For instance, if you\u0026rsquo;re in China, please edit skywalking\\apm-webapp\\pom.xml as follows. Find\n\u0026lt;configuration\u0026gt; \u0026lt;arguments\u0026gt;install --registry=https://registry.npmjs.org/\u0026lt;/arguments\u0026gt; \u0026lt;/configuration\u0026gt; Replace it with\n\u0026lt;configuration\u0026gt; \u0026lt;arguments\u0026gt;install --registry=https://registry.npmmirror.com/ --sass_binary_site=https://npmmirror.com/mirrors/node-sass/\u0026lt;/arguments\u0026gt; \u0026lt;/configuration\u0026gt; 2. Get a sufficiently powerful VPN. ","title":"Problem: Maven compilation failure with error such as `Error: not found: python2`","url":"/docs/main/v9.3.0/en/faq/maven-compile-npm-failure/"},{"content":"Problem: Maven compilation failure with error such as Error: not found: python2 When you compile the project via Maven, it fails at module apm-webapp and the following error occurs.\nPay attention to keywords such as node-sass and Error: not found: python2.\n[INFO] \u0026gt; node-sass@4.11.0 postinstall C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\node-sass [INFO] \u0026gt; node scripts/build.js [ERROR] gyp verb check python checking for Python executable \u0026quot;python2\u0026quot; in the PATH [ERROR] gyp verb `which` failed Error: not found: python2 [ERROR] gyp verb `which` failed at getNotFoundError (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:13:12) [ERROR] gyp verb `which` failed at F (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:68:19) [ERROR] gyp verb `which` failed at E (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:80:29) [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:89:16 [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\isexe\\index.js:42:5 [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\isexe\\windows.js:36:5 [ERROR] gyp verb `which` failed at FSReqWrap.oncomplete (fs.js:152:21) [ERROR] gyp verb `which` failed code: 'ENOENT' } [ERROR] gyp verb check python checking for Python executable \u0026quot;python\u0026quot; in the PATH [ERROR] gyp verb `which` succeeded python C:\\Users\\XXX\\AppData\\Local\\Programs\\Python\\Python37\\python.EXE [ERROR] gyp ERR! configure error [ERROR] gyp ERR! stack Error: Command failed: C:\\Users\\XXX\\AppData\\Local\\Programs\\Python\\Python37\\python.EXE -c import sys; print \u0026quot;%s.%s.%s\u0026quot; % sys.version_info[:3]; [ERROR] gyp ERR! stack File \u0026quot;\u0026lt;string\u0026gt;\u0026quot;, line 1 [ERROR] gyp ERR! stack import sys; print \u0026quot;%s.%s.%s\u0026quot; % sys.version_info[:3]; [ERROR] gyp ERR! stack ^ [ERROR] gyp ERR! stack SyntaxError: invalid syntax [ERROR] gyp ERR! stack [ERROR] gyp ERR! stack at ChildProcess.exithandler (child_process.js:275:12) [ERROR] gyp ERR! stack at emitTwo (events.js:126:13) [ERROR] gyp ERR! stack at ChildProcess.emit (events.js:214:7) [ERROR] gyp ERR! stack at maybeClose (internal/child_process.js:925:16) [ERROR] gyp ERR! stack at Process.ChildProcess._handle.onexit (internal/child_process.js:209:5) [ERROR] gyp ERR! System Windows_NT 10.0.17134 ...... [INFO] server-starter-es7 ................................. SUCCESS [ 11.657 s] [INFO] apm-webapp ......................................... FAILURE [ 25.857 s] [INFO] apache-skywalking-apm .............................. SKIPPED [INFO] apache-skywalking-apm-es7 .......................... SKIPPED Reason The error has nothing to do with SkyWalking.\nAccording to the issue here (https://github.com/sass/node-sass/issues/1176), if you live in countries where requesting resources from GitHub and npmjs.org runs slow, some precompiled binaries for dependency node-sass would fail to be downloaded during npm install, and npm would try to compile them itself. That\u0026rsquo;s why python2 is needed.\nResolution 1. Use mirror. For instance, if you\u0026rsquo;re in China, please edit skywalking\\apm-webapp\\pom.xml as follows. Find\n\u0026lt;configuration\u0026gt; \u0026lt;arguments\u0026gt;install --registry=https://registry.npmjs.org/\u0026lt;/arguments\u0026gt; \u0026lt;/configuration\u0026gt; Replace it with\n\u0026lt;configuration\u0026gt; \u0026lt;arguments\u0026gt;install --registry=https://registry.npmmirror.com/ --sass_binary_site=https://npmmirror.com/mirrors/node-sass/\u0026lt;/arguments\u0026gt; \u0026lt;/configuration\u0026gt; 2. Get a sufficiently powerful VPN. ","title":"Problem: Maven compilation failure with error such as `Error: not found: python2`","url":"/docs/main/v9.4.0/en/faq/maven-compile-npm-failure/"},{"content":"Problem: Maven compilation failure with error such as Error: not found: python2 When you compile the project via Maven, it fails at module apm-webapp and the following error occurs.\nPay attention to keywords such as node-sass and Error: not found: python2.\n[INFO] \u0026gt; node-sass@4.11.0 postinstall C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\node-sass [INFO] \u0026gt; node scripts/build.js [ERROR] gyp verb check python checking for Python executable \u0026quot;python2\u0026quot; in the PATH [ERROR] gyp verb `which` failed Error: not found: python2 [ERROR] gyp verb `which` failed at getNotFoundError (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:13:12) [ERROR] gyp verb `which` failed at F (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:68:19) [ERROR] gyp verb `which` failed at E (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:80:29) [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:89:16 [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\isexe\\index.js:42:5 [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\isexe\\windows.js:36:5 [ERROR] gyp verb `which` failed at FSReqWrap.oncomplete (fs.js:152:21) [ERROR] gyp verb `which` failed code: 'ENOENT' } [ERROR] gyp verb check python checking for Python executable \u0026quot;python\u0026quot; in the PATH [ERROR] gyp verb `which` succeeded python C:\\Users\\XXX\\AppData\\Local\\Programs\\Python\\Python37\\python.EXE [ERROR] gyp ERR! configure error [ERROR] gyp ERR! stack Error: Command failed: C:\\Users\\XXX\\AppData\\Local\\Programs\\Python\\Python37\\python.EXE -c import sys; print \u0026quot;%s.%s.%s\u0026quot; % sys.version_info[:3]; [ERROR] gyp ERR! stack File \u0026quot;\u0026lt;string\u0026gt;\u0026quot;, line 1 [ERROR] gyp ERR! stack import sys; print \u0026quot;%s.%s.%s\u0026quot; % sys.version_info[:3]; [ERROR] gyp ERR! stack ^ [ERROR] gyp ERR! stack SyntaxError: invalid syntax [ERROR] gyp ERR! stack [ERROR] gyp ERR! stack at ChildProcess.exithandler (child_process.js:275:12) [ERROR] gyp ERR! stack at emitTwo (events.js:126:13) [ERROR] gyp ERR! stack at ChildProcess.emit (events.js:214:7) [ERROR] gyp ERR! stack at maybeClose (internal/child_process.js:925:16) [ERROR] gyp ERR! stack at Process.ChildProcess._handle.onexit (internal/child_process.js:209:5) [ERROR] gyp ERR! System Windows_NT 10.0.17134 ...... [INFO] server-starter-es7 ................................. SUCCESS [ 11.657 s] [INFO] apm-webapp ......................................... FAILURE [ 25.857 s] [INFO] apache-skywalking-apm .............................. SKIPPED [INFO] apache-skywalking-apm-es7 .......................... SKIPPED Reason The error has nothing to do with SkyWalking.\nAccording to the issue here (https://github.com/sass/node-sass/issues/1176), if you live in countries where requesting resources from GitHub and npmjs.org runs slow, some precompiled binaries for dependency node-sass would fail to be downloaded during npm install, and npm would try to compile them itself. That\u0026rsquo;s why python2 is needed.\nResolution 1. Use mirror. For instance, if you\u0026rsquo;re in China, please edit skywalking\\apm-webapp\\pom.xml as follows. Find\n\u0026lt;configuration\u0026gt; \u0026lt;arguments\u0026gt;install --registry=https://registry.npmjs.org/\u0026lt;/arguments\u0026gt; \u0026lt;/configuration\u0026gt; Replace it with\n\u0026lt;configuration\u0026gt; \u0026lt;arguments\u0026gt;install --registry=https://registry.npmmirror.com/ --sass_binary_site=https://npmmirror.com/mirrors/node-sass/\u0026lt;/arguments\u0026gt; \u0026lt;/configuration\u0026gt; 2. Get a sufficiently powerful VPN. ","title":"Problem: Maven compilation failure with error such as `Error: not found: python2`","url":"/docs/main/v9.5.0/en/faq/maven-compile-npm-failure/"},{"content":"Problem: Maven compilation failure with error such as Error: not found: python2 When you compile the project via Maven, it fails at module apm-webapp and the following error occurs.\nPay attention to keywords such as node-sass and Error: not found: python2.\n[INFO] \u0026gt; node-sass@4.11.0 postinstall C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\node-sass [INFO] \u0026gt; node scripts/build.js [ERROR] gyp verb check python checking for Python executable \u0026quot;python2\u0026quot; in the PATH [ERROR] gyp verb `which` failed Error: not found: python2 [ERROR] gyp verb `which` failed at getNotFoundError (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:13:12) [ERROR] gyp verb `which` failed at F (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:68:19) [ERROR] gyp verb `which` failed at E (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:80:29) [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:89:16 [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\isexe\\index.js:42:5 [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\isexe\\windows.js:36:5 [ERROR] gyp verb `which` failed at FSReqWrap.oncomplete (fs.js:152:21) [ERROR] gyp verb `which` failed code: 'ENOENT' } [ERROR] gyp verb check python checking for Python executable \u0026quot;python\u0026quot; in the PATH [ERROR] gyp verb `which` succeeded python C:\\Users\\XXX\\AppData\\Local\\Programs\\Python\\Python37\\python.EXE [ERROR] gyp ERR! configure error [ERROR] gyp ERR! stack Error: Command failed: C:\\Users\\XXX\\AppData\\Local\\Programs\\Python\\Python37\\python.EXE -c import sys; print \u0026quot;%s.%s.%s\u0026quot; % sys.version_info[:3]; [ERROR] gyp ERR! stack File \u0026quot;\u0026lt;string\u0026gt;\u0026quot;, line 1 [ERROR] gyp ERR! stack import sys; print \u0026quot;%s.%s.%s\u0026quot; % sys.version_info[:3]; [ERROR] gyp ERR! stack ^ [ERROR] gyp ERR! stack SyntaxError: invalid syntax [ERROR] gyp ERR! stack [ERROR] gyp ERR! stack at ChildProcess.exithandler (child_process.js:275:12) [ERROR] gyp ERR! stack at emitTwo (events.js:126:13) [ERROR] gyp ERR! stack at ChildProcess.emit (events.js:214:7) [ERROR] gyp ERR! stack at maybeClose (internal/child_process.js:925:16) [ERROR] gyp ERR! stack at Process.ChildProcess._handle.onexit (internal/child_process.js:209:5) [ERROR] gyp ERR! System Windows_NT 10.0.17134 ...... [INFO] server-starter-es7 ................................. SUCCESS [ 11.657 s] [INFO] apm-webapp ......................................... FAILURE [ 25.857 s] [INFO] apache-skywalking-apm .............................. SKIPPED [INFO] apache-skywalking-apm-es7 .......................... SKIPPED Reason The error has nothing to do with SkyWalking.\nAccording to the issue here (https://github.com/sass/node-sass/issues/1176), if you live in countries where requesting resources from GitHub and npmjs.org runs slow, some precompiled binaries for dependency node-sass would fail to be downloaded during npm install, and npm would try to compile them itself. That\u0026rsquo;s why python2 is needed.\nResolution 1. Use mirror. For instance, if you\u0026rsquo;re in China, please edit skywalking\\apm-webapp\\pom.xml as follows. Find\n\u0026lt;configuration\u0026gt; \u0026lt;arguments\u0026gt;install --registry=https://registry.npmjs.org/\u0026lt;/arguments\u0026gt; \u0026lt;/configuration\u0026gt; Replace it with\n\u0026lt;configuration\u0026gt; \u0026lt;arguments\u0026gt;install --registry=https://registry.npmmirror.com/ --sass_binary_site=https://npmmirror.com/mirrors/node-sass/\u0026lt;/arguments\u0026gt; \u0026lt;/configuration\u0026gt; 2. Get a sufficiently powerful VPN. ","title":"Problem: Maven compilation failure with error such as `Error: not found: python2`","url":"/docs/main/v9.6.0/en/faq/maven-compile-npm-failure/"},{"content":"Problem: Maven compilation failure with error such as Error: not found: python2 When you compile the project via Maven, it fails at module apm-webapp and the following error occurs.\nPay attention to keywords such as node-sass and Error: not found: python2.\n[INFO] \u0026gt; node-sass@4.11.0 postinstall C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\node-sass [INFO] \u0026gt; node scripts/build.js [ERROR] gyp verb check python checking for Python executable \u0026quot;python2\u0026quot; in the PATH [ERROR] gyp verb `which` failed Error: not found: python2 [ERROR] gyp verb `which` failed at getNotFoundError (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:13:12) [ERROR] gyp verb `which` failed at F (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:68:19) [ERROR] gyp verb `which` failed at E (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:80:29) [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:89:16 [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\isexe\\index.js:42:5 [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\isexe\\windows.js:36:5 [ERROR] gyp verb `which` failed at FSReqWrap.oncomplete (fs.js:152:21) [ERROR] gyp verb `which` failed code: 'ENOENT' } [ERROR] gyp verb check python checking for Python executable \u0026quot;python\u0026quot; in the PATH [ERROR] gyp verb `which` succeeded python C:\\Users\\XXX\\AppData\\Local\\Programs\\Python\\Python37\\python.EXE [ERROR] gyp ERR! configure error [ERROR] gyp ERR! stack Error: Command failed: C:\\Users\\XXX\\AppData\\Local\\Programs\\Python\\Python37\\python.EXE -c import sys; print \u0026quot;%s.%s.%s\u0026quot; % sys.version_info[:3]; [ERROR] gyp ERR! stack File \u0026quot;\u0026lt;string\u0026gt;\u0026quot;, line 1 [ERROR] gyp ERR! stack import sys; print \u0026quot;%s.%s.%s\u0026quot; % sys.version_info[:3]; [ERROR] gyp ERR! stack ^ [ERROR] gyp ERR! stack SyntaxError: invalid syntax [ERROR] gyp ERR! stack [ERROR] gyp ERR! stack at ChildProcess.exithandler (child_process.js:275:12) [ERROR] gyp ERR! stack at emitTwo (events.js:126:13) [ERROR] gyp ERR! stack at ChildProcess.emit (events.js:214:7) [ERROR] gyp ERR! stack at maybeClose (internal/child_process.js:925:16) [ERROR] gyp ERR! stack at Process.ChildProcess._handle.onexit (internal/child_process.js:209:5) [ERROR] gyp ERR! System Windows_NT 10.0.17134 ...... [INFO] server-starter-es7 ................................. SUCCESS [ 11.657 s] [INFO] apm-webapp ......................................... FAILURE [ 25.857 s] [INFO] apache-skywalking-apm .............................. SKIPPED [INFO] apache-skywalking-apm-es7 .......................... SKIPPED Reason The error has nothing to do with SkyWalking.\nAccording to the issue here (https://github.com/sass/node-sass/issues/1176), if you live in countries where requesting resources from GitHub and npmjs.org runs slow, some precompiled binaries for dependency node-sass would fail to be downloaded during npm install, and npm would try to compile them itself. That\u0026rsquo;s why python2 is needed.\nResolution 1. Use mirror. For instance, if you\u0026rsquo;re in China, please edit skywalking\\apm-webapp\\pom.xml as follows. Find\n\u0026lt;configuration\u0026gt; \u0026lt;arguments\u0026gt;install --registry=https://registry.npmjs.org/\u0026lt;/arguments\u0026gt; \u0026lt;/configuration\u0026gt; Replace it with\n\u0026lt;configuration\u0026gt; \u0026lt;arguments\u0026gt;install --registry=https://registry.npmmirror.com/ --sass_binary_site=https://npmmirror.com/mirrors/node-sass/\u0026lt;/arguments\u0026gt; \u0026lt;/configuration\u0026gt; 2. Get a sufficiently powerful VPN. ","title":"Problem: Maven compilation failure with error such as `Error: not found: python2`","url":"/docs/main/v9.7.0/en/faq/maven-compile-npm-failure/"},{"content":"Profiling The profiling is an on-demand diagnosing method to locate bottleneck of the services. These typical scenarios usually are suitable for profiling through various profiling tools\n Some methods slow down the API performance. Too many threads and/or high-frequency I/O per OS process reduce the CPU efficiency. Massive RPC requests block the network to cause responding slowly. Unexpected network requests caused by security issues or codes' bug.  In the SkyWalking landscape, we provided three ways to support profiling within reasonable resource cost.\n In-process profiling is bundled with auto-instrument agents. Out-of-process profiling is powered by eBPF agent. Continuous profiling is powered by eBPF agent.  In-process profiling In-process profiling is primarily provided by auto-instrument agents in the VM-based runtime. This feature resolves the issue \u0026lt;1\u0026gt; through capture the snapshot of the thread stacks periodically. The OAP would aggregate the thread stack per RPC request, and provide a hierarchy graph to indicate the slow methods based on continuous snapshot.\nThe period is usually every 10-100 milliseconds, which is not recommended to be less, due to this capture would usually cause classical stop-the-world for the VM, which would impact the whole process performance.\nLearn more tech details from the post, Use Profiling to Fix the Blind Spot of Distributed Tracing.\nFor now, Java and Python agents support this.\nOut-of-process profiling Out-of-process profiling leverage eBPF technology with origins in the Linux kernel. It provides a way to extend the capabilities of the kernel safely and efficiently.\nOn-CPU Profiling On-CPU profiling is suitable for analyzing thread stacks when service CPU usage is high.\nIf the stack is dumped more times, it means that the thread stack occupies more CPU resources.\nThis is pretty similar with in-process profiling to resolve the issue \u0026lt;1\u0026gt;, but it is made out-of-process and based on Linux eBPF. Meanwhile, this is made for languages without VM mechanism, which caused not supported by in-process agents, such as, C/C++, Rust. Golang is a special case, it exposed the metadata of the VM for eBPF, so, it could be profiled.\nOff-CPU Profiling Off-CPU profiling is suitable for performance issues that are not caused by high CPU usage, but may be on high CPU load. This profiling aims to resolve the issue \u0026lt;2\u0026gt;.\nFor example,\n When there are too many threads in one service, using off-CPU profiling could reveal which threads spend more time context switching. Codes heavily rely on disk I/O or remote service performance would slow down the whole process.  Off-CPU profiling provides two perspectives\n Thread switch count: The number of times a thread switches context. When the thread returns to the CPU, it completes one context switch. A thread stack with a higher switch count spends more time context switching. Thread switch duration: The time it takes a thread to switch the context. A thread stack with a higher switch duration spends more time off-CPU.  Learn more tech details about ON/OFF CPU profiling from the post, Pinpoint Service Mesh Critical Performance Impact by using eBPF\nNetwork Profiling Network profiling captures the network packages to analysis traffic at L4(TCP) and L7(HTTP) to recognize network traffic from a specific process or a k8s pod. Through this traffic analysis, locate the root causes of the issues \u0026lt;3\u0026gt; and \u0026lt;4\u0026gt;.\nNetwork profiling provides\n Network topology and identify processes. Observe TCP traffic metrics with TLS status. Observe HTTP traffic metrics. Sample HTTP request/response raw data within tracing context. Observe time costs for local I/O costing on the OS. Such as the time of Linux process HTTP request/response.  Learn more tech details from the post, Diagnose Service Mesh Network Performance with eBPF\nContinuous Profiling Continuous Profiling utilizes monitoring of system, processes, and network, and automatically initiates profiling tasks when conditions meet the configured thresholds and time windows.\nMonitor type Continuous profiling periodically collects the following types of performance metrics for processes and systems:\n System Load: Monitor current system load value. Process CPU: Monitor process CPU usage percent, value in [0-100]. Process Thread Count: Monitor process thread count. HTTP Error Rate: Monitor the process HTTP(/1.x) response error(response status \u0026gt;= 500) percent, value in [0-100]. HTTP Avg Response Time: Monitor the process HTTP(/1.x) response duration(ms).  Trigger Target When the collected metric data matches the configured threshold, the following types of profiling tasks could be triggered:\n On CPU Profiling: Perform eBPF On CPU Profiling on processes that meet the threshold. Off CPU Profiling: Perform eBPF Off CPU Profiling on processes that meet the threshold. Network Profiling: Perform eBPF Network Profiling on all processes within the same instance as the processes that meet the threshold.  ","title":"Profiling","url":"/docs/main/latest/en/concepts-and-designs/profiling/"},{"content":"Profiling The profiling is an on-demand diagnosing method to locate bottleneck of the services. These typical scenarios usually are suitable for profiling through various profiling tools\n Some methods slow down the API performance. Too many threads and/or high-frequency I/O per OS process reduce the CPU efficiency. Massive RPC requests block the network to cause responding slowly. Unexpected network requests caused by security issues or codes' bug.  In the SkyWalking landscape, we provided three ways to support profiling within reasonable resource cost.\n In-process profiling is bundled with auto-instrument agents. Out-of-process profiling is powered by eBPF agent. Continuous profiling is powered by eBPF agent.  In-process profiling In-process profiling is primarily provided by auto-instrument agents in the VM-based runtime. This feature resolves the issue \u0026lt;1\u0026gt; through capture the snapshot of the thread stacks periodically. The OAP would aggregate the thread stack per RPC request, and provide a hierarchy graph to indicate the slow methods based on continuous snapshot.\nThe period is usually every 10-100 milliseconds, which is not recommended to be less, due to this capture would usually cause classical stop-the-world for the VM, which would impact the whole process performance.\nLearn more tech details from the post, Use Profiling to Fix the Blind Spot of Distributed Tracing.\nFor now, Java and Python agents support this.\nOut-of-process profiling Out-of-process profiling leverage eBPF technology with origins in the Linux kernel. It provides a way to extend the capabilities of the kernel safely and efficiently.\nOn-CPU Profiling On-CPU profiling is suitable for analyzing thread stacks when service CPU usage is high.\nIf the stack is dumped more times, it means that the thread stack occupies more CPU resources.\nThis is pretty similar with in-process profiling to resolve the issue \u0026lt;1\u0026gt;, but it is made out-of-process and based on Linux eBPF. Meanwhile, this is made for languages without VM mechanism, which caused not supported by in-process agents, such as, C/C++, Rust. Golang is a special case, it exposed the metadata of the VM for eBPF, so, it could be profiled.\nOff-CPU Profiling Off-CPU profiling is suitable for performance issues that are not caused by high CPU usage, but may be on high CPU load. This profiling aims to resolve the issue \u0026lt;2\u0026gt;.\nFor example,\n When there are too many threads in one service, using off-CPU profiling could reveal which threads spend more time context switching. Codes heavily rely on disk I/O or remote service performance would slow down the whole process.  Off-CPU profiling provides two perspectives\n Thread switch count: The number of times a thread switches context. When the thread returns to the CPU, it completes one context switch. A thread stack with a higher switch count spends more time context switching. Thread switch duration: The time it takes a thread to switch the context. A thread stack with a higher switch duration spends more time off-CPU.  Learn more tech details about ON/OFF CPU profiling from the post, Pinpoint Service Mesh Critical Performance Impact by using eBPF\nNetwork Profiling Network profiling captures the network packages to analysis traffic at L4(TCP) and L7(HTTP) to recognize network traffic from a specific process or a k8s pod. Through this traffic analysis, locate the root causes of the issues \u0026lt;3\u0026gt; and \u0026lt;4\u0026gt;.\nNetwork profiling provides\n Network topology and identify processes. Observe TCP traffic metrics with TLS status. Observe HTTP traffic metrics. Sample HTTP request/response raw data within tracing context. Observe time costs for local I/O costing on the OS. Such as the time of Linux process HTTP request/response.  Learn more tech details from the post, Diagnose Service Mesh Network Performance with eBPF\nContinuous Profiling Continuous Profiling utilizes monitoring of system, processes, and network, and automatically initiates profiling tasks when conditions meet the configured thresholds and time windows.\nMonitor type Continuous profiling periodically collects the following types of performance metrics for processes and systems:\n System Load: Monitor current system load value. Process CPU: Monitor process CPU usage percent, value in [0-100]. Process Thread Count: Monitor process thread count. HTTP Error Rate: Monitor the process HTTP(/1.x) response error(response status \u0026gt;= 500) percent, value in [0-100]. HTTP Avg Response Time: Monitor the process HTTP(/1.x) response duration(ms).  Trigger Target When the collected metric data matches the configured threshold, the following types of profiling tasks could be triggered:\n On CPU Profiling: Perform eBPF On CPU Profiling on processes that meet the threshold. Off CPU Profiling: Perform eBPF Off CPU Profiling on processes that meet the threshold. Network Profiling: Perform eBPF Network Profiling on all processes within the same instance as the processes that meet the threshold.  ","title":"Profiling","url":"/docs/main/next/en/concepts-and-designs/profiling/"},{"content":"Profiling The profiling is an on-demand diagnosing method to locate bottleneck of the services. These typical scenarios usually are suitable for profiling through various profiling tools\n Some methods slow down the API performance. Too many threads and/or high-frequency I/O per OS process reduce the CPU efficiency. Massive RPC requests block the network to cause responding slowly. Unexpected network requests caused by security issues or codes' bug.  In the SkyWalking landscape, we provided two ways to support profiling within reasonable resource cost.\n In-process profiling is bundled with auto-instrument agents. Out-of-process profiling is powered by eBPF agent.  In-process profiling In-process profiling is primarily provided by auto-instrument agents in the VM-based runtime. This feature resolves the issue \u0026lt;1\u0026gt; through capture the snapshot of the thread stacks periodically. The OAP would aggregate the thread stack per RPC request, and provide a hierarchy graph to indicate the slow methods based on continuous snapshot.\nThe period is usually every 10-100 milliseconds, which is not recommended to be less, due to this capture would usually cause classical stop-the-world for the VM, which would impact the whole process performance.\nLearn more tech details from the post, Use Profiling to Fix the Blind Spot of Distributed Tracing.\nFor now, Java and Python agents support this.\nOut-of-process profiling Out-of-process profiling leverage eBPF technology with origins in the Linux kernel. It provides a way to extend the capabilities of the kernel safely and efficiently.\nOn-CPU Profiling On-CPU profiling is suitable for analyzing thread stacks when service CPU usage is high.\nIf the stack is dumped more times, it means that the thread stack occupies more CPU resources.\nThis is pretty similar with in-process profiling to resolve the issue \u0026lt;1\u0026gt;, but it is made out-of-process and based on Linux eBPF. Meanwhile, this is made for languages without VM mechanism, which caused not supported by in-process agents, such as, C/C++, Rust. Golang is a special case, it exposed the metadata of the VM for eBPF, so, it could be profiled.\nOff-CPU Profiling Off-CPU profiling is suitable for performance issues that are not caused by high CPU usage, but may be on high CPU load. This profiling aims to resolve the issue \u0026lt;2\u0026gt;.\nFor example,\n When there are too many threads in one service, using off-CPU profiling could reveal which threads spend more time context switching. Codes heavily rely on disk I/O or remote service performance would slow down the whole process.  Off-CPU profiling provides two perspectives\n Thread switch count: The number of times a thread switches context. When the thread returns to the CPU, it completes one context switch. A thread stack with a higher switch count spends more time context switching. Thread switch duration: The time it takes a thread to switch the context. A thread stack with a higher switch duration spends more time off-CPU.  Learn more tech details about ON/OFF CPU profiling from the post, Pinpoint Service Mesh Critical Performance Impact by using eBPF\nNetwork Profiling Network profiling captures the network packages to analysis traffic at L4(TCP) and L7(HTTP) to recognize network traffic from a specific process or a k8s pod. Through this traffic analysis, locate the root causes of the issues \u0026lt;3\u0026gt; and \u0026lt;4\u0026gt;.\nNetwork profiling provides\n Network topology and identify processes. Observe TCP traffic metrics with TLS status. Observe HTTP traffic metrics. Sample HTTP request/response raw data within tracing context. Observe time costs for local I/O costing on the OS. Such as the time of Linux process HTTP request/response.  Learn more tech details from the post, Diagnose Service Mesh Network Performance with eBPF\n","title":"Profiling","url":"/docs/main/v9.3.0/en/concepts-and-designs/profiling/"},{"content":"Profiling The profiling is an on-demand diagnosing method to locate bottleneck of the services. These typical scenarios usually are suitable for profiling through various profiling tools\n Some methods slow down the API performance. Too many threads and/or high-frequency I/O per OS process reduce the CPU efficiency. Massive RPC requests block the network to cause responding slowly. Unexpected network requests caused by security issues or codes' bug.  In the SkyWalking landscape, we provided two ways to support profiling within reasonable resource cost.\n In-process profiling is bundled with auto-instrument agents. Out-of-process profiling is powered by eBPF agent.  In-process profiling In-process profiling is primarily provided by auto-instrument agents in the VM-based runtime. This feature resolves the issue \u0026lt;1\u0026gt; through capture the snapshot of the thread stacks periodically. The OAP would aggregate the thread stack per RPC request, and provide a hierarchy graph to indicate the slow methods based on continuous snapshot.\nThe period is usually every 10-100 milliseconds, which is not recommended to be less, due to this capture would usually cause classical stop-the-world for the VM, which would impact the whole process performance.\nLearn more tech details from the post, Use Profiling to Fix the Blind Spot of Distributed Tracing.\nFor now, Java and Python agents support this.\nOut-of-process profiling Out-of-process profiling leverage eBPF technology with origins in the Linux kernel. It provides a way to extend the capabilities of the kernel safely and efficiently.\nOn-CPU Profiling On-CPU profiling is suitable for analyzing thread stacks when service CPU usage is high.\nIf the stack is dumped more times, it means that the thread stack occupies more CPU resources.\nThis is pretty similar with in-process profiling to resolve the issue \u0026lt;1\u0026gt;, but it is made out-of-process and based on Linux eBPF. Meanwhile, this is made for languages without VM mechanism, which caused not supported by in-process agents, such as, C/C++, Rust. Golang is a special case, it exposed the metadata of the VM for eBPF, so, it could be profiled.\nOff-CPU Profiling Off-CPU profiling is suitable for performance issues that are not caused by high CPU usage, but may be on high CPU load. This profiling aims to resolve the issue \u0026lt;2\u0026gt;.\nFor example,\n When there are too many threads in one service, using off-CPU profiling could reveal which threads spend more time context switching. Codes heavily rely on disk I/O or remote service performance would slow down the whole process.  Off-CPU profiling provides two perspectives\n Thread switch count: The number of times a thread switches context. When the thread returns to the CPU, it completes one context switch. A thread stack with a higher switch count spends more time context switching. Thread switch duration: The time it takes a thread to switch the context. A thread stack with a higher switch duration spends more time off-CPU.  Learn more tech details about ON/OFF CPU profiling from the post, Pinpoint Service Mesh Critical Performance Impact by using eBPF\nNetwork Profiling Network profiling captures the network packages to analysis traffic at L4(TCP) and L7(HTTP) to recognize network traffic from a specific process or a k8s pod. Through this traffic analysis, locate the root causes of the issues \u0026lt;3\u0026gt; and \u0026lt;4\u0026gt;.\nNetwork profiling provides\n Network topology and identify processes. Observe TCP traffic metrics with TLS status. Observe HTTP traffic metrics. Sample HTTP request/response raw data within tracing context. Observe time costs for local I/O costing on the OS. Such as the time of Linux process HTTP request/response.  Learn more tech details from the post, Diagnose Service Mesh Network Performance with eBPF\n","title":"Profiling","url":"/docs/main/v9.4.0/en/concepts-and-designs/profiling/"},{"content":"Profiling The profiling is an on-demand diagnosing method to locate bottleneck of the services. These typical scenarios usually are suitable for profiling through various profiling tools\n Some methods slow down the API performance. Too many threads and/or high-frequency I/O per OS process reduce the CPU efficiency. Massive RPC requests block the network to cause responding slowly. Unexpected network requests caused by security issues or codes' bug.  In the SkyWalking landscape, we provided three ways to support profiling within reasonable resource cost.\n In-process profiling is bundled with auto-instrument agents. Out-of-process profiling is powered by eBPF agent. Continuous profiling is powered by eBPF agent.  In-process profiling In-process profiling is primarily provided by auto-instrument agents in the VM-based runtime. This feature resolves the issue \u0026lt;1\u0026gt; through capture the snapshot of the thread stacks periodically. The OAP would aggregate the thread stack per RPC request, and provide a hierarchy graph to indicate the slow methods based on continuous snapshot.\nThe period is usually every 10-100 milliseconds, which is not recommended to be less, due to this capture would usually cause classical stop-the-world for the VM, which would impact the whole process performance.\nLearn more tech details from the post, Use Profiling to Fix the Blind Spot of Distributed Tracing.\nFor now, Java and Python agents support this.\nOut-of-process profiling Out-of-process profiling leverage eBPF technology with origins in the Linux kernel. It provides a way to extend the capabilities of the kernel safely and efficiently.\nOn-CPU Profiling On-CPU profiling is suitable for analyzing thread stacks when service CPU usage is high.\nIf the stack is dumped more times, it means that the thread stack occupies more CPU resources.\nThis is pretty similar with in-process profiling to resolve the issue \u0026lt;1\u0026gt;, but it is made out-of-process and based on Linux eBPF. Meanwhile, this is made for languages without VM mechanism, which caused not supported by in-process agents, such as, C/C++, Rust. Golang is a special case, it exposed the metadata of the VM for eBPF, so, it could be profiled.\nOff-CPU Profiling Off-CPU profiling is suitable for performance issues that are not caused by high CPU usage, but may be on high CPU load. This profiling aims to resolve the issue \u0026lt;2\u0026gt;.\nFor example,\n When there are too many threads in one service, using off-CPU profiling could reveal which threads spend more time context switching. Codes heavily rely on disk I/O or remote service performance would slow down the whole process.  Off-CPU profiling provides two perspectives\n Thread switch count: The number of times a thread switches context. When the thread returns to the CPU, it completes one context switch. A thread stack with a higher switch count spends more time context switching. Thread switch duration: The time it takes a thread to switch the context. A thread stack with a higher switch duration spends more time off-CPU.  Learn more tech details about ON/OFF CPU profiling from the post, Pinpoint Service Mesh Critical Performance Impact by using eBPF\nNetwork Profiling Network profiling captures the network packages to analysis traffic at L4(TCP) and L7(HTTP) to recognize network traffic from a specific process or a k8s pod. Through this traffic analysis, locate the root causes of the issues \u0026lt;3\u0026gt; and \u0026lt;4\u0026gt;.\nNetwork profiling provides\n Network topology and identify processes. Observe TCP traffic metrics with TLS status. Observe HTTP traffic metrics. Sample HTTP request/response raw data within tracing context. Observe time costs for local I/O costing on the OS. Such as the time of Linux process HTTP request/response.  Learn more tech details from the post, Diagnose Service Mesh Network Performance with eBPF\nContinuous Profiling Continuous Profiling utilizes monitoring of system, processes, and network, and automatically initiates profiling tasks when conditions meet the configured thresholds and time windows.\nMonitor type Continuous profiling periodically collects the following types of performance metrics for processes and systems:\n System Load: Monitor current system load value. Process CPU: Monitor process CPU usage percent, value in [0-100]. Process Thread Count: Monitor process thread count. HTTP Error Rate: Monitor the process HTTP(/1.x) response error(response status \u0026gt;= 500) percent, value in [0-100]. HTTP Avg Response Time: Monitor the process HTTP(/1.x) response duration(ms).  Trigger Target When the collected metric data matches the configured threshold, the following types of profiling tasks could be triggered:\n On CPU Profiling: Perform eBPF On CPU Profiling on processes that meet the threshold. Off CPU Profiling: Perform eBPF Off CPU Profiling on processes that meet the threshold. Network Profiling: Perform eBPF Network Profiling on all processes within the same instance as the processes that meet the threshold.  ","title":"Profiling","url":"/docs/main/v9.5.0/en/concepts-and-designs/profiling/"},{"content":"Profiling The profiling is an on-demand diagnosing method to locate bottleneck of the services. These typical scenarios usually are suitable for profiling through various profiling tools\n Some methods slow down the API performance. Too many threads and/or high-frequency I/O per OS process reduce the CPU efficiency. Massive RPC requests block the network to cause responding slowly. Unexpected network requests caused by security issues or codes' bug.  In the SkyWalking landscape, we provided three ways to support profiling within reasonable resource cost.\n In-process profiling is bundled with auto-instrument agents. Out-of-process profiling is powered by eBPF agent. Continuous profiling is powered by eBPF agent.  In-process profiling In-process profiling is primarily provided by auto-instrument agents in the VM-based runtime. This feature resolves the issue \u0026lt;1\u0026gt; through capture the snapshot of the thread stacks periodically. The OAP would aggregate the thread stack per RPC request, and provide a hierarchy graph to indicate the slow methods based on continuous snapshot.\nThe period is usually every 10-100 milliseconds, which is not recommended to be less, due to this capture would usually cause classical stop-the-world for the VM, which would impact the whole process performance.\nLearn more tech details from the post, Use Profiling to Fix the Blind Spot of Distributed Tracing.\nFor now, Java and Python agents support this.\nOut-of-process profiling Out-of-process profiling leverage eBPF technology with origins in the Linux kernel. It provides a way to extend the capabilities of the kernel safely and efficiently.\nOn-CPU Profiling On-CPU profiling is suitable for analyzing thread stacks when service CPU usage is high.\nIf the stack is dumped more times, it means that the thread stack occupies more CPU resources.\nThis is pretty similar with in-process profiling to resolve the issue \u0026lt;1\u0026gt;, but it is made out-of-process and based on Linux eBPF. Meanwhile, this is made for languages without VM mechanism, which caused not supported by in-process agents, such as, C/C++, Rust. Golang is a special case, it exposed the metadata of the VM for eBPF, so, it could be profiled.\nOff-CPU Profiling Off-CPU profiling is suitable for performance issues that are not caused by high CPU usage, but may be on high CPU load. This profiling aims to resolve the issue \u0026lt;2\u0026gt;.\nFor example,\n When there are too many threads in one service, using off-CPU profiling could reveal which threads spend more time context switching. Codes heavily rely on disk I/O or remote service performance would slow down the whole process.  Off-CPU profiling provides two perspectives\n Thread switch count: The number of times a thread switches context. When the thread returns to the CPU, it completes one context switch. A thread stack with a higher switch count spends more time context switching. Thread switch duration: The time it takes a thread to switch the context. A thread stack with a higher switch duration spends more time off-CPU.  Learn more tech details about ON/OFF CPU profiling from the post, Pinpoint Service Mesh Critical Performance Impact by using eBPF\nNetwork Profiling Network profiling captures the network packages to analysis traffic at L4(TCP) and L7(HTTP) to recognize network traffic from a specific process or a k8s pod. Through this traffic analysis, locate the root causes of the issues \u0026lt;3\u0026gt; and \u0026lt;4\u0026gt;.\nNetwork profiling provides\n Network topology and identify processes. Observe TCP traffic metrics with TLS status. Observe HTTP traffic metrics. Sample HTTP request/response raw data within tracing context. Observe time costs for local I/O costing on the OS. Such as the time of Linux process HTTP request/response.  Learn more tech details from the post, Diagnose Service Mesh Network Performance with eBPF\nContinuous Profiling Continuous Profiling utilizes monitoring of system, processes, and network, and automatically initiates profiling tasks when conditions meet the configured thresholds and time windows.\nMonitor type Continuous profiling periodically collects the following types of performance metrics for processes and systems:\n System Load: Monitor current system load value. Process CPU: Monitor process CPU usage percent, value in [0-100]. Process Thread Count: Monitor process thread count. HTTP Error Rate: Monitor the process HTTP(/1.x) response error(response status \u0026gt;= 500) percent, value in [0-100]. HTTP Avg Response Time: Monitor the process HTTP(/1.x) response duration(ms).  Trigger Target When the collected metric data matches the configured threshold, the following types of profiling tasks could be triggered:\n On CPU Profiling: Perform eBPF On CPU Profiling on processes that meet the threshold. Off CPU Profiling: Perform eBPF Off CPU Profiling on processes that meet the threshold. Network Profiling: Perform eBPF Network Profiling on all processes within the same instance as the processes that meet the threshold.  ","title":"Profiling","url":"/docs/main/v9.6.0/en/concepts-and-designs/profiling/"},{"content":"Profiling The profiling is an on-demand diagnosing method to locate bottleneck of the services. These typical scenarios usually are suitable for profiling through various profiling tools\n Some methods slow down the API performance. Too many threads and/or high-frequency I/O per OS process reduce the CPU efficiency. Massive RPC requests block the network to cause responding slowly. Unexpected network requests caused by security issues or codes' bug.  In the SkyWalking landscape, we provided three ways to support profiling within reasonable resource cost.\n In-process profiling is bundled with auto-instrument agents. Out-of-process profiling is powered by eBPF agent. Continuous profiling is powered by eBPF agent.  In-process profiling In-process profiling is primarily provided by auto-instrument agents in the VM-based runtime. This feature resolves the issue \u0026lt;1\u0026gt; through capture the snapshot of the thread stacks periodically. The OAP would aggregate the thread stack per RPC request, and provide a hierarchy graph to indicate the slow methods based on continuous snapshot.\nThe period is usually every 10-100 milliseconds, which is not recommended to be less, due to this capture would usually cause classical stop-the-world for the VM, which would impact the whole process performance.\nLearn more tech details from the post, Use Profiling to Fix the Blind Spot of Distributed Tracing.\nFor now, Java and Python agents support this.\nOut-of-process profiling Out-of-process profiling leverage eBPF technology with origins in the Linux kernel. It provides a way to extend the capabilities of the kernel safely and efficiently.\nOn-CPU Profiling On-CPU profiling is suitable for analyzing thread stacks when service CPU usage is high.\nIf the stack is dumped more times, it means that the thread stack occupies more CPU resources.\nThis is pretty similar with in-process profiling to resolve the issue \u0026lt;1\u0026gt;, but it is made out-of-process and based on Linux eBPF. Meanwhile, this is made for languages without VM mechanism, which caused not supported by in-process agents, such as, C/C++, Rust. Golang is a special case, it exposed the metadata of the VM for eBPF, so, it could be profiled.\nOff-CPU Profiling Off-CPU profiling is suitable for performance issues that are not caused by high CPU usage, but may be on high CPU load. This profiling aims to resolve the issue \u0026lt;2\u0026gt;.\nFor example,\n When there are too many threads in one service, using off-CPU profiling could reveal which threads spend more time context switching. Codes heavily rely on disk I/O or remote service performance would slow down the whole process.  Off-CPU profiling provides two perspectives\n Thread switch count: The number of times a thread switches context. When the thread returns to the CPU, it completes one context switch. A thread stack with a higher switch count spends more time context switching. Thread switch duration: The time it takes a thread to switch the context. A thread stack with a higher switch duration spends more time off-CPU.  Learn more tech details about ON/OFF CPU profiling from the post, Pinpoint Service Mesh Critical Performance Impact by using eBPF\nNetwork Profiling Network profiling captures the network packages to analysis traffic at L4(TCP) and L7(HTTP) to recognize network traffic from a specific process or a k8s pod. Through this traffic analysis, locate the root causes of the issues \u0026lt;3\u0026gt; and \u0026lt;4\u0026gt;.\nNetwork profiling provides\n Network topology and identify processes. Observe TCP traffic metrics with TLS status. Observe HTTP traffic metrics. Sample HTTP request/response raw data within tracing context. Observe time costs for local I/O costing on the OS. Such as the time of Linux process HTTP request/response.  Learn more tech details from the post, Diagnose Service Mesh Network Performance with eBPF\nContinuous Profiling Continuous Profiling utilizes monitoring of system, processes, and network, and automatically initiates profiling tasks when conditions meet the configured thresholds and time windows.\nMonitor type Continuous profiling periodically collects the following types of performance metrics for processes and systems:\n System Load: Monitor current system load value. Process CPU: Monitor process CPU usage percent, value in [0-100]. Process Thread Count: Monitor process thread count. HTTP Error Rate: Monitor the process HTTP(/1.x) response error(response status \u0026gt;= 500) percent, value in [0-100]. HTTP Avg Response Time: Monitor the process HTTP(/1.x) response duration(ms).  Trigger Target When the collected metric data matches the configured threshold, the following types of profiling tasks could be triggered:\n On CPU Profiling: Perform eBPF On CPU Profiling on processes that meet the threshold. Off CPU Profiling: Perform eBPF Off CPU Profiling on processes that meet the threshold. Network Profiling: Perform eBPF Network Profiling on all processes within the same instance as the processes that meet the threshold.  ","title":"Profiling","url":"/docs/main/v9.7.0/en/concepts-and-designs/profiling/"},{"content":"Profiling The profiling is used to profiling the processes from the Service Discovery, and send the snapshot to the backend server.\nConfiguration    Name Default Environment Key Description     profiling.active true ROVER_PROFILING_ACTIVE Is active the process profiling.   profiling.check_interval 10s ROVER_PROFILING_CHECK_INTERVAL Check the profiling task interval.   profiling.flush_interval 5s ROVER_PROFILING_FLUSH_INTERVAL Combine existing profiling data and report to the backend interval.   profiling.task.on_cpu.dump_period 9ms ROVER_PROFILING_TASK_ON_CPU_DUMP_PERIOD The profiling stack dump period.   profiling.task.network.report_interval 2s ROVER_PROFILING_TASK_NETWORK_TOPOLOGY_REPORT_INTERVAL The interval of send metrics to the backend.   profiling.task.network.meter_prefix rover_net_p ROVER_PROFILING_TASK_NETWORK_TOPOLOGY_METER_PREFIX The prefix of network profiling metrics name.   profiling.task.network.protocol_analyze.per_cpu_buffer 400KB ROVER_PROFILING_TASK_NETWORK_PROTOCOL_ANALYZE_PER_CPU_BUFFER The size of socket data buffer on each CPU.   profiling.task.network.protocol_analyze.parallels 2 ROVER_PROFILING_TASK_NETWORK_PROTOCOL_ANALYZE_PARALLELS The count of parallel protocol analyzer.   profiling.task.network.protocol_analyze.queue_size 5000 ROVER_PROFILING_TASK_NETWORK_PROTOCOL_ANALYZE_QUEUE_SIZE The size of per paralleled analyzer queue.   profiling.task.network.protocol_analyze.sampling.http.default_request_encoding UTF-8 ROVER_PROFILING_TASK_NETWORK_PROTOCOL_ANALYZE_SAMPLING_HTTP_DEFAULT_REQUEST_ENCODING The default body encoding when sampling the request.   profiling.task.network.protocol_analyze.sampling.http.default_response_encoding UTF-8 ROVER_PROFILING_TASK_NETWORK_PROTOCOL_ANALYZE_SAMPLING_HTTP_DEFAULT_RESPONSE_ENCODING The default body encoding when sampling the response.   profiling.continuous.meter_prefix rover_con_p ROVER_PROFILING_CONTINUOUS_METER_PREFIX The continuous related meters prefix name.   profiling.continuous.fetch_interval 1s ROVER_PROFILING_CONTINUOUS_FETCH_INTERVAL The interval of fetch metrics from the system, such as Process CPU, System Load, etc.   profiling.continuous.check_interval 5s ROVER_PROFILING_CONTINUOUS_CHECK_INTERVAL The interval of check metrics is reach the thresholds.   profiling.continuous.trigger.execute_duration 10m ROVER_PROFILING_CONTINUOUS_TRIGGER_EXECUTE_DURATION The duration of the profiling task.   profiling.continuous.trigger.silence_duration 20m ROVER_PROFILING_CONTINUOUS_TRIGGER_SILENCE_DURATION The minimal duration between the execution of the same profiling task.    Prepare service Before profiling your service, please make sure your service already has the symbol data inside the binary file. So we could locate the stack symbol, It could be checked following these ways:\n objdump: Using objdump --syms path/to/service. readelf: Using readelf --syms path/to/service.  Profiling Type All the profiling tasks are using the Linux Official Function and kprobe or uprobe to open perf event, and attach the eBPF Program to dump stacks.\nOn CPU On CPU Profiling task is using PERF_COUNT_SW_CPU_CLOCK to profiling the process with the CPU clock.\nOff CPU Off CPU Profiling task is attach the finish_task_switch in krobe to profiling the process.\nNetwork Network Profiling task is intercept IO-related syscall and urprobe in process to identify the network traffic and generate the metrics. Also, the following protocol are supported for analyzing using OpenSSL library, BoringSSL library, GoTLS, NodeTLS or plaintext:\n HTTP/1.x HTTP/2 MySQL CQL(The Cassandra Query Language) MongoDB Kafka DNS  Collecting data Network profiling uses metrics, logs send to the backend service.\nData Type The network profiling has customized the following two types of metrics to represent the network data:\n Counter: Records the total number of data in a certain period of time. Each counter containers the following data:  Count: The count of the execution. Bytes: The package size of the execution. Exe Time: The consumed time(nanosecond) of the execution.   Histogram: Records the distribution of the data in the bucket. TopN: Record the highest latency data in a certain period of time.  Labels Each metric contains the following labels to identify the process relationship:\n   Name Type Description     client_process_id or server_process_id string The ID of the current process, which is determined by the role of the current process in the connection as server or client.   client_local or server_local boolean The remote process is a local process.   client_address or server_address string The remote process address. ex: IP:port.   side enum The current process is either \u0026ldquo;client\u0026rdquo; or \u0026ldquo;server\u0026rdquo; in this connection.   protocol string Identification the protocol based on the package data content.   is_ssl bool Is the current connection using SSL.    Layer-4 Data Based on the above two data types, the following metrics are provided.\n   Name Type Unit Description     write Counter nanosecond The socket write counter   read Counter nanosecond The socket read counter   write RTT Counter microsecond The socket write RTT counter   connect Counter nanosecond The socket connect/accept with other server/client counter   close Counter nanosecond The socket close counter   retransmit Counter nanosecond The socket retransmit package counter   drop Counter nanosecond The socket drop package counter   write RTT Histogram microsecond The socket write RTT execute time histogram   write execute time Histogram nanosecond The socket write data execute time histogram   read execute time Histogram nanosecond The socket read data execute time histogram   connect execute time Histogram nanosecond The socket connect/accept with other server/client execute time histogram   close execute time Histogram nanosecond The socket close execute time histogram    HTTP/1.x Data Metrics    Name Type Unit Description     http1_request_cpm Counter count The HTTP request counter   http1_response_status_cpm Counter count The count of per HTTP response code   http1_request_package_size Histogram Byte size The request package size   http1_response_package_size Histogram Byte size The response package size   http1_client_duration Histogram millisecond The duration of single HTTP response on the client side   http1_server_duration Histogram millisecond The duration of single HTTP response on the server side    Logs    Name Type Unit Description     slow_traces TopN millisecond The Top N slow trace(id)s   status_4xx TopN millisecond The Top N trace(id)s with response status in 400-499   status_5xx TopN millisecond The Top N trace(id)s with response status in 500-599    Span Attached Event    Name Description     HTTP Request Sampling Complete information about the HTTP request, it\u0026rsquo;s only reported when it matches slow/4xx/5xx traces.   HTTP Response Sampling Complete information about the HTTP response, it\u0026rsquo;s only reported when it matches slow/4xx/5xx traces.   Syscall xxx The methods to use when the process invoke with the network-related syscall method. It\u0026rsquo;s only reported when it matches slow/4xx/5xx traces.    Continuous Profiling The continuous profiling feature monitors low-power target process information, including process CPU usage and network requests, based on configuration passed from the backend. When a threshold is met, it automatically initiates a profiling task(on/off CPU, Network) to provide more detailed analysis.\nMonitor Type System Load Monitor the average system load for the last minute, which is equivalent to using the first value of the load average in the uptime command.\nProcess CPU The target process utilizes a certain percentage of the CPU on the current host.\nProcess Thread Count The real-time number of threads in the target process.\nNetwork Network monitoring uses eBPF technology to collect real-time performance data of the current process responding to requests. Requests sent upstream are not monitored by the system.\nCurrently, network monitoring supports parsing of the HTTP/1.x protocol and supports the following types of monitoring:\n Error Rate: The percentage of network request errors, such as HTTP status codes within the range of [500-600), is considered as erroneous. Avg Response Time: Average response time(ms) for specified URI.  Metrics Rover would periodically send collected monitoring data to the backend using the Native Meter Protocol.\n   Name Unit Description     process_cpu (0-100)% The CPU usage percent   process_thread_count count The thread count of process   system_load count The average system load for the last minute, each process have same value   http_error_rate (0-100)% The network request error rate percentage   http_avg_response_time ms The network average response duration    ","title":"Profiling","url":"/docs/skywalking-rover/latest/en/setup/configuration/profiling/"},{"content":"Profiling The profiling is used to profiling the processes from the Service Discovery, and send the snapshot to the backend server.\nConfiguration    Name Default Environment Key Description     profiling.active true ROVER_PROFILING_ACTIVE Is active the process profiling.   profiling.check_interval 10s ROVER_PROFILING_CHECK_INTERVAL Check the profiling task interval.   profiling.flush_interval 5s ROVER_PROFILING_FLUSH_INTERVAL Combine existing profiling data and report to the backend interval.   profiling.task.on_cpu.dump_period 9ms ROVER_PROFILING_TASK_ON_CPU_DUMP_PERIOD The profiling stack dump period.   profiling.task.network.report_interval 2s ROVER_PROFILING_TASK_NETWORK_TOPOLOGY_REPORT_INTERVAL The interval of send metrics to the backend.   profiling.task.network.meter_prefix rover_net_p ROVER_PROFILING_TASK_NETWORK_TOPOLOGY_METER_PREFIX The prefix of network profiling metrics name.   profiling.task.network.protocol_analyze.per_cpu_buffer 400KB ROVER_PROFILING_TASK_NETWORK_PROTOCOL_ANALYZE_PER_CPU_BUFFER The size of socket data buffer on each CPU.   profiling.task.network.protocol_analyze.parallels 2 ROVER_PROFILING_TASK_NETWORK_PROTOCOL_ANALYZE_PARALLELS The count of parallel protocol analyzer.   profiling.task.network.protocol_analyze.queue_size 5000 ROVER_PROFILING_TASK_NETWORK_PROTOCOL_ANALYZE_QUEUE_SIZE The size of per paralleled analyzer queue.   profiling.task.network.protocol_analyze.sampling.http.default_request_encoding UTF-8 ROVER_PROFILING_TASK_NETWORK_PROTOCOL_ANALYZE_SAMPLING_HTTP_DEFAULT_REQUEST_ENCODING The default body encoding when sampling the request.   profiling.task.network.protocol_analyze.sampling.http.default_response_encoding UTF-8 ROVER_PROFILING_TASK_NETWORK_PROTOCOL_ANALYZE_SAMPLING_HTTP_DEFAULT_RESPONSE_ENCODING The default body encoding when sampling the response.   profiling.continuous.meter_prefix rover_con_p ROVER_PROFILING_CONTINUOUS_METER_PREFIX The continuous related meters prefix name.   profiling.continuous.fetch_interval 1s ROVER_PROFILING_CONTINUOUS_FETCH_INTERVAL The interval of fetch metrics from the system, such as Process CPU, System Load, etc.   profiling.continuous.check_interval 5s ROVER_PROFILING_CONTINUOUS_CHECK_INTERVAL The interval of check metrics is reach the thresholds.   profiling.continuous.trigger.execute_duration 10m ROVER_PROFILING_CONTINUOUS_TRIGGER_EXECUTE_DURATION The duration of the profiling task.   profiling.continuous.trigger.silence_duration 20m ROVER_PROFILING_CONTINUOUS_TRIGGER_SILENCE_DURATION The minimal duration between the execution of the same profiling task.    Prepare service Before profiling your service, please make sure your service already has the symbol data inside the binary file. So we could locate the stack symbol, It could be checked following these ways:\n objdump: Using objdump --syms path/to/service. readelf: Using readelf --syms path/to/service.  Profiling Type All the profiling tasks are using the Linux Official Function and kprobe or uprobe to open perf event, and attach the eBPF Program to dump stacks.\nOn CPU On CPU Profiling task is using PERF_COUNT_SW_CPU_CLOCK to profiling the process with the CPU clock.\nOff CPU Off CPU Profiling task is attach the finish_task_switch in krobe to profiling the process.\nNetwork Network Profiling task is intercept IO-related syscall and urprobe in process to identify the network traffic and generate the metrics. Also, the following protocol are supported for analyzing using OpenSSL library, BoringSSL library, GoTLS, NodeTLS or plaintext:\n HTTP/1.x HTTP/2 MySQL CQL(The Cassandra Query Language) MongoDB Kafka DNS  Collecting data Network profiling uses metrics, logs send to the backend service.\nData Type The network profiling has customized the following two types of metrics to represent the network data:\n Counter: Records the total number of data in a certain period of time. Each counter containers the following data:  Count: The count of the execution. Bytes: The package size of the execution. Exe Time: The consumed time(nanosecond) of the execution.   Histogram: Records the distribution of the data in the bucket. TopN: Record the highest latency data in a certain period of time.  Labels Each metric contains the following labels to identify the process relationship:\n   Name Type Description     client_process_id or server_process_id string The ID of the current process, which is determined by the role of the current process in the connection as server or client.   client_local or server_local boolean The remote process is a local process.   client_address or server_address string The remote process address. ex: IP:port.   side enum The current process is either \u0026ldquo;client\u0026rdquo; or \u0026ldquo;server\u0026rdquo; in this connection.   protocol string Identification the protocol based on the package data content.   is_ssl bool Is the current connection using SSL.    Layer-4 Data Based on the above two data types, the following metrics are provided.\n   Name Type Unit Description     write Counter nanosecond The socket write counter   read Counter nanosecond The socket read counter   write RTT Counter microsecond The socket write RTT counter   connect Counter nanosecond The socket connect/accept with other server/client counter   close Counter nanosecond The socket close counter   retransmit Counter nanosecond The socket retransmit package counter   drop Counter nanosecond The socket drop package counter   write RTT Histogram microsecond The socket write RTT execute time histogram   write execute time Histogram nanosecond The socket write data execute time histogram   read execute time Histogram nanosecond The socket read data execute time histogram   connect execute time Histogram nanosecond The socket connect/accept with other server/client execute time histogram   close execute time Histogram nanosecond The socket close execute time histogram    HTTP/1.x Data Metrics    Name Type Unit Description     http1_request_cpm Counter count The HTTP request counter   http1_response_status_cpm Counter count The count of per HTTP response code   http1_request_package_size Histogram Byte size The request package size   http1_response_package_size Histogram Byte size The response package size   http1_client_duration Histogram millisecond The duration of single HTTP response on the client side   http1_server_duration Histogram millisecond The duration of single HTTP response on the server side    Logs    Name Type Unit Description     slow_traces TopN millisecond The Top N slow trace(id)s   status_4xx TopN millisecond The Top N trace(id)s with response status in 400-499   status_5xx TopN millisecond The Top N trace(id)s with response status in 500-599    Span Attached Event    Name Description     HTTP Request Sampling Complete information about the HTTP request, it\u0026rsquo;s only reported when it matches slow/4xx/5xx traces.   HTTP Response Sampling Complete information about the HTTP response, it\u0026rsquo;s only reported when it matches slow/4xx/5xx traces.   Syscall xxx The methods to use when the process invoke with the network-related syscall method. It\u0026rsquo;s only reported when it matches slow/4xx/5xx traces.    Continuous Profiling The continuous profiling feature monitors low-power target process information, including process CPU usage and network requests, based on configuration passed from the backend. When a threshold is met, it automatically initiates a profiling task(on/off CPU, Network) to provide more detailed analysis.\nMonitor Type System Load Monitor the average system load for the last minute, which is equivalent to using the first value of the load average in the uptime command.\nProcess CPU The target process utilizes a certain percentage of the CPU on the current host.\nProcess Thread Count The real-time number of threads in the target process.\nNetwork Network monitoring uses eBPF technology to collect real-time performance data of the current process responding to requests. Requests sent upstream are not monitored by the system.\nCurrently, network monitoring supports parsing of the HTTP/1.x protocol and supports the following types of monitoring:\n Error Rate: The percentage of network request errors, such as HTTP status codes within the range of [500-600), is considered as erroneous. Avg Response Time: Average response time(ms) for specified URI.  Metrics Rover would periodically send collected monitoring data to the backend using the Native Meter Protocol.\n   Name Unit Description     process_cpu (0-100)% The CPU usage percent   process_thread_count count The thread count of process   system_load count The average system load for the last minute, each process have same value   http_error_rate (0-100)% The network request error rate percentage   http_avg_response_time ms The network average response duration    ","title":"Profiling","url":"/docs/skywalking-rover/next/en/setup/configuration/profiling/"},{"content":"Profiling The profiling is used to profiling the processes from the Service Discovery, and send the snapshot to the backend server.\nConfiguration    Name Default Environment Key Description     profiling.active true ROVER_PROFILING_ACTIVE Is active the process profiling.   profiling.check_interval 10s ROVER_PROFILING_CHECK_INTERVAL Check the profiling task interval.   profiling.flush_interval 5s ROVER_PROFILING_FLUSH_INTERVAL Combine existing profiling data and report to the backend interval.   profiling.task.on_cpu.dump_period 9ms ROVER_PROFILING_TASK_ON_CPU_DUMP_PERIOD The profiling stack dump period.   profiling.task.network.report_interval 2s ROVER_PROFILING_TASK_NETWORK_TOPOLOGY_REPORT_INTERVAL The interval of send metrics to the backend.   profiling.task.network.meter_prefix rover_net_p ROVER_PROFILING_TASK_NETWORK_TOPOLOGY_METER_PREFIX The prefix of network profiling metrics name.   profiling.task.network.protocol_analyze.per_cpu_buffer 400KB ROVER_PROFILING_TASK_NETWORK_PROTOCOL_ANALYZE_PER_CPU_BUFFER The size of socket data buffer on each CPU.   profiling.task.network.protocol_analyze.parallels 2 ROVER_PROFILING_TASK_NETWORK_PROTOCOL_ANALYZE_PARALLELS The count of parallel protocol analyzer.   profiling.task.network.protocol_analyze.queue_size 5000 ROVER_PROFILING_TASK_NETWORK_PROTOCOL_ANALYZE_QUEUE_SIZE The size of per paralleled analyzer queue.   profiling.task.network.protocol_analyze.sampling.http.default_request_encoding UTF-8 ROVER_PROFILING_TASK_NETWORK_PROTOCOL_ANALYZE_SAMPLING_HTTP_DEFAULT_REQUEST_ENCODING The default body encoding when sampling the request.   profiling.task.network.protocol_analyze.sampling.http.default_response_encoding UTF-8 ROVER_PROFILING_TASK_NETWORK_PROTOCOL_ANALYZE_SAMPLING_HTTP_DEFAULT_RESPONSE_ENCODING The default body encoding when sampling the response.   profiling.continuous.meter_prefix rover_con_p ROVER_PROFILING_CONTINUOUS_METER_PREFIX The continuous related meters prefix name.   profiling.continuous.fetch_interval 1s ROVER_PROFILING_CONTINUOUS_FETCH_INTERVAL The interval of fetch metrics from the system, such as Process CPU, System Load, etc.   profiling.continuous.check_interval 5s ROVER_PROFILING_CONTINUOUS_CHECK_INTERVAL The interval of check metrics is reach the thresholds.   profiling.continuous.trigger.execute_duration 10m ROVER_PROFILING_CONTINUOUS_TRIGGER_EXECUTE_DURATION The duration of the profiling task.   profiling.continuous.trigger.silence_duration 20m ROVER_PROFILING_CONTINUOUS_TRIGGER_SILENCE_DURATION The minimal duration between the execution of the same profiling task.    Prepare service Before profiling your service, please make sure your service already has the symbol data inside the binary file. So we could locate the stack symbol, It could be checked following these ways:\n objdump: Using objdump --syms path/to/service. readelf: Using readelf --syms path/to/service.  Profiling Type All the profiling tasks are using the Linux Official Function and kprobe or uprobe to open perf event, and attach the eBPF Program to dump stacks.\nOn CPU On CPU Profiling task is using PERF_COUNT_SW_CPU_CLOCK to profiling the process with the CPU clock.\nOff CPU Off CPU Profiling task is attach the finish_task_switch in krobe to profiling the process.\nNetwork Network Profiling task is intercept IO-related syscall and urprobe in process to identify the network traffic and generate the metrics. Also, the following protocol are supported for analyzing using OpenSSL library, BoringSSL library, GoTLS, NodeTLS or plaintext:\n HTTP/1.x HTTP/2 MySQL CQL(The Cassandra Query Language) MongoDB Kafka DNS  Collecting data Network profiling uses metrics, logs send to the backend service.\nData Type The network profiling has customized the following two types of metrics to represent the network data:\n Counter: Records the total number of data in a certain period of time. Each counter containers the following data:  Count: The count of the execution. Bytes: The package size of the execution. Exe Time: The consumed time(nanosecond) of the execution.   Histogram: Records the distribution of the data in the bucket. TopN: Record the highest latency data in a certain period of time.  Labels Each metric contains the following labels to identify the process relationship:\n   Name Type Description     client_process_id or server_process_id string The ID of the current process, which is determined by the role of the current process in the connection as server or client.   client_local or server_local boolean The remote process is a local process.   client_address or server_address string The remote process address. ex: IP:port.   side enum The current process is either \u0026ldquo;client\u0026rdquo; or \u0026ldquo;server\u0026rdquo; in this connection.   protocol string Identification the protocol based on the package data content.   is_ssl bool Is the current connection using SSL.    Layer-4 Data Based on the above two data types, the following metrics are provided.\n   Name Type Unit Description     write Counter nanosecond The socket write counter   read Counter nanosecond The socket read counter   write RTT Counter microsecond The socket write RTT counter   connect Counter nanosecond The socket connect/accept with other server/client counter   close Counter nanosecond The socket close counter   retransmit Counter nanosecond The socket retransmit package counter   drop Counter nanosecond The socket drop package counter   write RTT Histogram microsecond The socket write RTT execute time histogram   write execute time Histogram nanosecond The socket write data execute time histogram   read execute time Histogram nanosecond The socket read data execute time histogram   connect execute time Histogram nanosecond The socket connect/accept with other server/client execute time histogram   close execute time Histogram nanosecond The socket close execute time histogram    HTTP/1.x Data Metrics    Name Type Unit Description     http1_request_cpm Counter count The HTTP request counter   http1_response_status_cpm Counter count The count of per HTTP response code   http1_request_package_size Histogram Byte size The request package size   http1_response_package_size Histogram Byte size The response package size   http1_client_duration Histogram millisecond The duration of single HTTP response on the client side   http1_server_duration Histogram millisecond The duration of single HTTP response on the server side    Logs    Name Type Unit Description     slow_traces TopN millisecond The Top N slow trace(id)s   status_4xx TopN millisecond The Top N trace(id)s with response status in 400-499   status_5xx TopN millisecond The Top N trace(id)s with response status in 500-599    Span Attached Event    Name Description     HTTP Request Sampling Complete information about the HTTP request, it\u0026rsquo;s only reported when it matches slow/4xx/5xx traces.   HTTP Response Sampling Complete information about the HTTP response, it\u0026rsquo;s only reported when it matches slow/4xx/5xx traces.   Syscall xxx The methods to use when the process invoke with the network-related syscall method. It\u0026rsquo;s only reported when it matches slow/4xx/5xx traces.    Continuous Profiling The continuous profiling feature monitors low-power target process information, including process CPU usage and network requests, based on configuration passed from the backend. When a threshold is met, it automatically initiates a profiling task(on/off CPU, Network) to provide more detailed analysis.\nMonitor Type System Load Monitor the average system load for the last minute, which is equivalent to using the first value of the load average in the uptime command.\nProcess CPU The target process utilizes a certain percentage of the CPU on the current host.\nProcess Thread Count The real-time number of threads in the target process.\nNetwork Network monitoring uses eBPF technology to collect real-time performance data of the current process responding to requests. Requests sent upstream are not monitored by the system.\nCurrently, network monitoring supports parsing of the HTTP/1.x protocol and supports the following types of monitoring:\n Error Rate: The percentage of network request errors, such as HTTP status codes within the range of [500-600), is considered as erroneous. Avg Response Time: Average response time(ms) for specified URI.  Metrics Rover would periodically send collected monitoring data to the backend using the Native Meter Protocol.\n   Name Unit Description     process_cpu (0-100)% The CPU usage percent   process_thread_count count The thread count of process   system_load count The average system load for the last minute, each process have same value   http_error_rate (0-100)% The network request error rate percentage   http_avg_response_time ms The network average response duration    ","title":"Profiling","url":"/docs/skywalking-rover/v0.6.0/en/setup/configuration/profiling/"},{"content":"Profiling APIs SkyWalking offers two types of Profiling, in-process and out-process, each with its own API.\nIn-process profiling APIs In-process profiling commonly interacts with auto-instrument agents. It gathers stack traces of programs and sends the data to the OAP for further analysis.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.language.profile.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/language/profile/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;service ProfileTask { // query all sniffer need to execute profile task commands  rpc getProfileTaskCommands (ProfileTaskCommandQuery) returns (Commands) { } // collect dumped thread snapshot  rpc collectSnapshot (stream ThreadSnapshot) returns (Commands) { } // report profiling task finished  rpc reportTaskFinish (ProfileTaskFinishReport) returns (Commands) { }}message ProfileTaskCommandQuery { // current sniffer information  string service = 1; string serviceInstance = 2; // last command timestamp  int64 lastCommandTime = 3;}// dumped thread snapshot message ThreadSnapshot { // profile task id  string taskId = 1; // dumped segment id  string traceSegmentId = 2; // dump timestamp  int64 time = 3; // snapshot dump sequence, start with zero  int32 sequence = 4; // snapshot stack  ThreadStack stack = 5;}message ThreadStack { // stack code signature list  repeated string codeSignatures = 1;}// profile task finished report message ProfileTaskFinishReport { // current sniffer information  string service = 1; string serviceInstance = 2; // profile task  string taskId = 3;}Out-process profiling Out-process profiling interacts with eBPF agent, which receives tasks and captures data, then reports it to the OAP for further analysis.\nProcess APIs Similar to Service Instance, all processes must be reported to the OAP storage segment prior to analysis.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.ebpf.profiling.process.v3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/ebpf/profiling/process/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the detected processes and report them. service EBPFProcessService { // Report discovered process in Rover  rpc reportProcesses (EBPFProcessReportList) returns (EBPFReportProcessDownstream) { } // Keep the process alive in the backend.  rpc keepAlive (EBPFProcessPingPkgList) returns (Commands) { }}message EBPFProcessReportList { repeated EBPFProcessProperties processes = 1; // An ID generated by eBPF agent, should be unique globally.  string ebpfAgentID = 2;}message EBPFProcessProperties { // The Process metadata  oneof metadata { EBPFHostProcessMetadata hostProcess = 1; EBPFKubernetesProcessMetadata k8sProcess = 2; }}message EBPFHostProcessMetadata { // [required] Entity metadata  // Must ensure that entity information is unique at the time of reporting  EBPFProcessEntityMetadata entity = 1; // [required] The Process id of the host  int32 pid = 2; // [optional] properties of the process  repeated KeyStringValuePair properties = 3;}// Process Entity metadata message EBPFProcessEntityMetadata { // [required] Process belong layer name which define in the backend  string layer = 1; // [required] Process belong service name  string serviceName = 2; // [required] Process belong service instance name  string instanceName = 3; // [required] Process name  string processName = 4; // Process labels for aggregate from service  repeated string labels = 5;}// Kubernetes process metadata message EBPFKubernetesProcessMetadata { // [required] Entity metadata  // Must ensure that entity information is unique at the time of reporting  EBPFProcessEntityMetadata entity = 1; // [required] The Process id of the host  int32 pid = 2; // [optional] properties of the process  repeated KeyStringValuePair properties = 3;}message EBPFReportProcessDownstream { repeated EBPFProcessDownstream processes = 1;}message EBPFProcessDownstream { // Generated process id  string processId = 1; // Locate the process by basic information  oneof process { EBPFHostProcessDownstream hostProcess = 2; EBPFKubernetesProcessDownstream k8sProcess = 3; }}message EBPFHostProcessDownstream { int32 pid = 1; EBPFProcessEntityMetadata entityMetadata = 2;}// Kubernetes process downstream message EBPFKubernetesProcessDownstream { int32 pid = 1; EBPFProcessEntityMetadata entityMetadata = 2;}message EBPFProcessPingPkgList { repeated EBPFProcessPingPkg processes = 1; // An ID generated by eBPF agent, should be unique globally.  string ebpfAgentID = 2;}message EBPFProcessPingPkg { // Process entity  EBPFProcessEntityMetadata entityMetadata = 1; // Minimize necessary properties  repeated KeyStringValuePair properties = 2;}Out-process profiling APIs syntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.ebpf.profiling.v3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/ebpf/profiling/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the Rover Process profiling task and upload profiling data. service EBPFProfilingService { // Query profiling (start or stop) tasks  rpc queryTasks (EBPFProfilingTaskQuery) returns (Commands) { } // collect profiling data  rpc collectProfilingData (stream EBPFProfilingData) returns (Commands) { }}message EBPFProfilingTaskQuery { // rover instance id  string roverInstanceId = 1; // latest task update time  int64 latestUpdateTime = 2;}message EBPFProfilingData { // task metadata  EBPFProfilingTaskMetadata task = 1; // profiling data  oneof profiling { EBPFOnCPUProfiling onCPU = 2; EBPFOffCPUProfiling offCPU = 3; }}message EBPFProfilingTaskMetadata { // profiling task id  string taskId = 1; // profiling process id  string processId = 2; // the start time of this profiling process  int64 profilingStartTime = 3; // report time  int64 currentTime = 4;}message EBPFProfilingStackMetadata { // stack type  EBPFProfilingStackType stackType = 1; // stack id from kernel provide  int32 stackId = 2; // stack symbols  repeated string stackSymbols = 3;}enum EBPFProfilingStackType { PROCESS_KERNEL_SPACE = 0; PROCESS_USER_SPACE = 1;}message EBPFOnCPUProfiling { // stack data in one task(thread)  repeated EBPFProfilingStackMetadata stacks = 1; // stack counts  int32 dumpCount = 2;}message EBPFOffCPUProfiling { // stack data in one task(thread)  repeated EBPFProfilingStackMetadata stacks = 1; // total count of the process is switched to off cpu by the scheduler.  int32 switchCount = 2; // where time(nanoseconds) is spent waiting while blocked on I/O, locks, timers, paging/swapping, etc.  int64 duration = 3;}","title":"Profiling APIs","url":"/docs/main/latest/en/api/profiling-protocol/"},{"content":"Profiling APIs SkyWalking offers two types of Profiling, in-process and out-process, each with its own API.\nIn-process profiling APIs In-process profiling commonly interacts with auto-instrument agents. It gathers stack traces of programs and sends the data to the OAP for further analysis.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.language.profile.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/language/profile/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;service ProfileTask { // query all sniffer need to execute profile task commands  rpc getProfileTaskCommands (ProfileTaskCommandQuery) returns (Commands) { } // collect dumped thread snapshot  rpc collectSnapshot (stream ThreadSnapshot) returns (Commands) { } // report profiling task finished  rpc reportTaskFinish (ProfileTaskFinishReport) returns (Commands) { }}message ProfileTaskCommandQuery { // current sniffer information  string service = 1; string serviceInstance = 2; // last command timestamp  int64 lastCommandTime = 3;}// dumped thread snapshot message ThreadSnapshot { // profile task id  string taskId = 1; // dumped segment id  string traceSegmentId = 2; // dump timestamp  int64 time = 3; // snapshot dump sequence, start with zero  int32 sequence = 4; // snapshot stack  ThreadStack stack = 5;}message ThreadStack { // stack code signature list  repeated string codeSignatures = 1;}// profile task finished report message ProfileTaskFinishReport { // current sniffer information  string service = 1; string serviceInstance = 2; // profile task  string taskId = 3;}Out-process profiling Out-process profiling interacts with eBPF agent, which receives tasks and captures data, then reports it to the OAP for further analysis.\nProcess APIs Similar to Service Instance, all processes must be reported to the OAP storage segment prior to analysis.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.ebpf.profiling.process.v3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/ebpf/profiling/process/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the detected processes and report them. service EBPFProcessService { // Report discovered process in Rover  rpc reportProcesses (EBPFProcessReportList) returns (EBPFReportProcessDownstream) { } // Keep the process alive in the backend.  rpc keepAlive (EBPFProcessPingPkgList) returns (Commands) { }}message EBPFProcessReportList { repeated EBPFProcessProperties processes = 1; // An ID generated by eBPF agent, should be unique globally.  string ebpfAgentID = 2;}message EBPFProcessProperties { // The Process metadata  oneof metadata { EBPFHostProcessMetadata hostProcess = 1; EBPFKubernetesProcessMetadata k8sProcess = 2; }}message EBPFHostProcessMetadata { // [required] Entity metadata  // Must ensure that entity information is unique at the time of reporting  EBPFProcessEntityMetadata entity = 1; // [required] The Process id of the host  int32 pid = 2; // [optional] properties of the process  repeated KeyStringValuePair properties = 3;}// Process Entity metadata message EBPFProcessEntityMetadata { // [required] Process belong layer name which define in the backend  string layer = 1; // [required] Process belong service name  string serviceName = 2; // [required] Process belong service instance name  string instanceName = 3; // [required] Process name  string processName = 4; // Process labels for aggregate from service  repeated string labels = 5;}// Kubernetes process metadata message EBPFKubernetesProcessMetadata { // [required] Entity metadata  // Must ensure that entity information is unique at the time of reporting  EBPFProcessEntityMetadata entity = 1; // [required] The Process id of the host  int32 pid = 2; // [optional] properties of the process  repeated KeyStringValuePair properties = 3;}message EBPFReportProcessDownstream { repeated EBPFProcessDownstream processes = 1;}message EBPFProcessDownstream { // Generated process id  string processId = 1; // Locate the process by basic information  oneof process { EBPFHostProcessDownstream hostProcess = 2; EBPFKubernetesProcessDownstream k8sProcess = 3; }}message EBPFHostProcessDownstream { int32 pid = 1; EBPFProcessEntityMetadata entityMetadata = 2;}// Kubernetes process downstream message EBPFKubernetesProcessDownstream { int32 pid = 1; EBPFProcessEntityMetadata entityMetadata = 2;}message EBPFProcessPingPkgList { repeated EBPFProcessPingPkg processes = 1; // An ID generated by eBPF agent, should be unique globally.  string ebpfAgentID = 2;}message EBPFProcessPingPkg { // Process entity  EBPFProcessEntityMetadata entityMetadata = 1; // Minimize necessary properties  repeated KeyStringValuePair properties = 2;}Out-process profiling APIs syntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.ebpf.profiling.v3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/ebpf/profiling/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the Rover Process profiling task and upload profiling data. service EBPFProfilingService { // Query profiling (start or stop) tasks  rpc queryTasks (EBPFProfilingTaskQuery) returns (Commands) { } // collect profiling data  rpc collectProfilingData (stream EBPFProfilingData) returns (Commands) { }}message EBPFProfilingTaskQuery { // rover instance id  string roverInstanceId = 1; // latest task update time  int64 latestUpdateTime = 2;}message EBPFProfilingData { // task metadata  EBPFProfilingTaskMetadata task = 1; // profiling data  oneof profiling { EBPFOnCPUProfiling onCPU = 2; EBPFOffCPUProfiling offCPU = 3; }}message EBPFProfilingTaskMetadata { // profiling task id  string taskId = 1; // profiling process id  string processId = 2; // the start time of this profiling process  int64 profilingStartTime = 3; // report time  int64 currentTime = 4;}message EBPFProfilingStackMetadata { // stack type  EBPFProfilingStackType stackType = 1; // stack id from kernel provide  int32 stackId = 2; // stack symbols  repeated string stackSymbols = 3;}enum EBPFProfilingStackType { PROCESS_KERNEL_SPACE = 0; PROCESS_USER_SPACE = 1;}message EBPFOnCPUProfiling { // stack data in one task(thread)  repeated EBPFProfilingStackMetadata stacks = 1; // stack counts  int32 dumpCount = 2;}message EBPFOffCPUProfiling { // stack data in one task(thread)  repeated EBPFProfilingStackMetadata stacks = 1; // total count of the process is switched to off cpu by the scheduler.  int32 switchCount = 2; // where time(nanoseconds) is spent waiting while blocked on I/O, locks, timers, paging/swapping, etc.  int64 duration = 3;}","title":"Profiling APIs","url":"/docs/main/next/en/api/profiling-protocol/"},{"content":"Profiling APIs SkyWalking offers two types of Profiling, in-process and out-process, each with its own API.\nIn-process profiling APIs In-process profiling commonly interacts with auto-instrument agents. It gathers stack traces of programs and sends the data to the OAP for further analysis.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.language.profile.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/language/profile/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;service ProfileTask { // query all sniffer need to execute profile task commands  rpc getProfileTaskCommands (ProfileTaskCommandQuery) returns (Commands) { } // collect dumped thread snapshot  rpc collectSnapshot (stream ThreadSnapshot) returns (Commands) { } // report profiling task finished  rpc reportTaskFinish (ProfileTaskFinishReport) returns (Commands) { }}message ProfileTaskCommandQuery { // current sniffer information  string service = 1; string serviceInstance = 2; // last command timestamp  int64 lastCommandTime = 3;}// dumped thread snapshot message ThreadSnapshot { // profile task id  string taskId = 1; // dumped segment id  string traceSegmentId = 2; // dump timestamp  int64 time = 3; // snapshot dump sequence, start with zero  int32 sequence = 4; // snapshot stack  ThreadStack stack = 5;}message ThreadStack { // stack code signature list  repeated string codeSignatures = 1;}// profile task finished report message ProfileTaskFinishReport { // current sniffer information  string service = 1; string serviceInstance = 2; // profile task  string taskId = 3;}Out-process profiling Out-process profiling interacts with eBPF agent, which receives tasks and captures data, then reports it to the OAP for further analysis.\nProcess APIs Similar to Service Instance, all processes must be reported to the OAP storage segment prior to analysis.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.ebpf.profiling.process.v3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/ebpf/profiling/process/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the detected processes and report them. service EBPFProcessService { // Report discovered process in Rover  rpc reportProcesses (EBPFProcessReportList) returns (EBPFReportProcessDownstream) { } // Keep the process alive in the backend.  rpc keepAlive (EBPFProcessPingPkgList) returns (Commands) { }}message EBPFProcessReportList { repeated EBPFProcessProperties processes = 1; // An ID generated by eBPF agent, should be unique globally.  string ebpfAgentID = 2;}message EBPFProcessProperties { // The Process metadata  oneof metadata { EBPFHostProcessMetadata hostProcess = 1; EBPFKubernetesProcessMetadata k8sProcess = 2; }}message EBPFHostProcessMetadata { // [required] Entity metadata  // Must ensure that entity information is unique at the time of reporting  EBPFProcessEntityMetadata entity = 1; // [required] The Process id of the host  int32 pid = 2; // [optional] properties of the process  repeated KeyStringValuePair properties = 3;}// Process Entity metadata message EBPFProcessEntityMetadata { // [required] Process belong layer name which define in the backend  string layer = 1; // [required] Process belong service name  string serviceName = 2; // [required] Process belong service instance name  string instanceName = 3; // [required] Process name  string processName = 4; // Process labels for aggregate from service  repeated string labels = 5;}// Kubernetes process metadata message EBPFKubernetesProcessMetadata { // [required] Entity metadata  // Must ensure that entity information is unique at the time of reporting  EBPFProcessEntityMetadata entity = 1; // [required] The Process id of the host  int32 pid = 2; // [optional] properties of the process  repeated KeyStringValuePair properties = 3;}message EBPFReportProcessDownstream { repeated EBPFProcessDownstream processes = 1;}message EBPFProcessDownstream { // Generated process id  string processId = 1; // Locate the process by basic information  oneof process { EBPFHostProcessDownstream hostProcess = 2; EBPFKubernetesProcessDownstream k8sProcess = 3; }}message EBPFHostProcessDownstream { int32 pid = 1; EBPFProcessEntityMetadata entityMetadata = 2;}// Kubernetes process downstream message EBPFKubernetesProcessDownstream { int32 pid = 1; EBPFProcessEntityMetadata entityMetadata = 2;}message EBPFProcessPingPkgList { repeated EBPFProcessPingPkg processes = 1; // An ID generated by eBPF agent, should be unique globally.  string ebpfAgentID = 2;}message EBPFProcessPingPkg { // Process entity  EBPFProcessEntityMetadata entityMetadata = 1; // Minimize necessary properties  repeated KeyStringValuePair properties = 2;}Out-process profiling APIs syntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.ebpf.profiling.v3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/ebpf/profiling/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the Rover Process profiling task and upload profiling data. service EBPFProfilingService { // Query profiling (start or stop) tasks  rpc queryTasks (EBPFProfilingTaskQuery) returns (Commands) { } // collect profiling data  rpc collectProfilingData (stream EBPFProfilingData) returns (Commands) { }}message EBPFProfilingTaskQuery { // rover instance id  string roverInstanceId = 1; // latest task update time  int64 latestUpdateTime = 2;}message EBPFProfilingData { // task metadata  EBPFProfilingTaskMetadata task = 1; // profiling data  oneof profiling { EBPFOnCPUProfiling onCPU = 2; EBPFOffCPUProfiling offCPU = 3; }}message EBPFProfilingTaskMetadata { // profiling task id  string taskId = 1; // profiling process id  string processId = 2; // the start time of this profiling process  int64 profilingStartTime = 3; // report time  int64 currentTime = 4;}message EBPFProfilingStackMetadata { // stack type  EBPFProfilingStackType stackType = 1; // stack id from kernel provide  int32 stackId = 2; // stack symbols  repeated string stackSymbols = 3;}enum EBPFProfilingStackType { PROCESS_KERNEL_SPACE = 0; PROCESS_USER_SPACE = 1;}message EBPFOnCPUProfiling { // stack data in one task(thread)  repeated EBPFProfilingStackMetadata stacks = 1; // stack counts  int32 dumpCount = 2;}message EBPFOffCPUProfiling { // stack data in one task(thread)  repeated EBPFProfilingStackMetadata stacks = 1; // total count of the process is switched to off cpu by the scheduler.  int32 switchCount = 2; // where time(nanoseconds) is spent waiting while blocked on I/O, locks, timers, paging/swapping, etc.  int64 duration = 3;}","title":"Profiling APIs","url":"/docs/main/v9.4.0/en/api/profiling-protocol/"},{"content":"Profiling APIs SkyWalking offers two types of Profiling, in-process and out-process, each with its own API.\nIn-process profiling APIs In-process profiling commonly interacts with auto-instrument agents. It gathers stack traces of programs and sends the data to the OAP for further analysis.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.language.profile.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/language/profile/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;service ProfileTask { // query all sniffer need to execute profile task commands  rpc getProfileTaskCommands (ProfileTaskCommandQuery) returns (Commands) { } // collect dumped thread snapshot  rpc collectSnapshot (stream ThreadSnapshot) returns (Commands) { } // report profiling task finished  rpc reportTaskFinish (ProfileTaskFinishReport) returns (Commands) { }}message ProfileTaskCommandQuery { // current sniffer information  string service = 1; string serviceInstance = 2; // last command timestamp  int64 lastCommandTime = 3;}// dumped thread snapshot message ThreadSnapshot { // profile task id  string taskId = 1; // dumped segment id  string traceSegmentId = 2; // dump timestamp  int64 time = 3; // snapshot dump sequence, start with zero  int32 sequence = 4; // snapshot stack  ThreadStack stack = 5;}message ThreadStack { // stack code signature list  repeated string codeSignatures = 1;}// profile task finished report message ProfileTaskFinishReport { // current sniffer information  string service = 1; string serviceInstance = 2; // profile task  string taskId = 3;}Out-process profiling Out-process profiling interacts with eBPF agent, which receives tasks and captures data, then reports it to the OAP for further analysis.\nProcess APIs Similar to Service Instance, all processes must be reported to the OAP storage segment prior to analysis.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.ebpf.profiling.process.v3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/ebpf/profiling/process/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the detected processes and report them. service EBPFProcessService { // Report discovered process in Rover  rpc reportProcesses (EBPFProcessReportList) returns (EBPFReportProcessDownstream) { } // Keep the process alive in the backend.  rpc keepAlive (EBPFProcessPingPkgList) returns (Commands) { }}message EBPFProcessReportList { repeated EBPFProcessProperties processes = 1; // An ID generated by eBPF agent, should be unique globally.  string ebpfAgentID = 2;}message EBPFProcessProperties { // The Process metadata  oneof metadata { EBPFHostProcessMetadata hostProcess = 1; EBPFKubernetesProcessMetadata k8sProcess = 2; }}message EBPFHostProcessMetadata { // [required] Entity metadata  // Must ensure that entity information is unique at the time of reporting  EBPFProcessEntityMetadata entity = 1; // [required] The Process id of the host  int32 pid = 2; // [optional] properties of the process  repeated KeyStringValuePair properties = 3;}// Process Entity metadata message EBPFProcessEntityMetadata { // [required] Process belong layer name which define in the backend  string layer = 1; // [required] Process belong service name  string serviceName = 2; // [required] Process belong service instance name  string instanceName = 3; // [required] Process name  string processName = 4; // Process labels for aggregate from service  repeated string labels = 5;}// Kubernetes process metadata message EBPFKubernetesProcessMetadata { // [required] Entity metadata  // Must ensure that entity information is unique at the time of reporting  EBPFProcessEntityMetadata entity = 1; // [required] The Process id of the host  int32 pid = 2; // [optional] properties of the process  repeated KeyStringValuePair properties = 3;}message EBPFReportProcessDownstream { repeated EBPFProcessDownstream processes = 1;}message EBPFProcessDownstream { // Generated process id  string processId = 1; // Locate the process by basic information  oneof process { EBPFHostProcessDownstream hostProcess = 2; EBPFKubernetesProcessDownstream k8sProcess = 3; }}message EBPFHostProcessDownstream { int32 pid = 1; EBPFProcessEntityMetadata entityMetadata = 2;}// Kubernetes process downstream message EBPFKubernetesProcessDownstream { int32 pid = 1; EBPFProcessEntityMetadata entityMetadata = 2;}message EBPFProcessPingPkgList { repeated EBPFProcessPingPkg processes = 1; // An ID generated by eBPF agent, should be unique globally.  string ebpfAgentID = 2;}message EBPFProcessPingPkg { // Process entity  EBPFProcessEntityMetadata entityMetadata = 1; // Minimize necessary properties  repeated KeyStringValuePair properties = 2;}Out-process profiling APIs syntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.ebpf.profiling.v3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/ebpf/profiling/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the Rover Process profiling task and upload profiling data. service EBPFProfilingService { // Query profiling (start or stop) tasks  rpc queryTasks (EBPFProfilingTaskQuery) returns (Commands) { } // collect profiling data  rpc collectProfilingData (stream EBPFProfilingData) returns (Commands) { }}message EBPFProfilingTaskQuery { // rover instance id  string roverInstanceId = 1; // latest task update time  int64 latestUpdateTime = 2;}message EBPFProfilingData { // task metadata  EBPFProfilingTaskMetadata task = 1; // profiling data  oneof profiling { EBPFOnCPUProfiling onCPU = 2; EBPFOffCPUProfiling offCPU = 3; }}message EBPFProfilingTaskMetadata { // profiling task id  string taskId = 1; // profiling process id  string processId = 2; // the start time of this profiling process  int64 profilingStartTime = 3; // report time  int64 currentTime = 4;}message EBPFProfilingStackMetadata { // stack type  EBPFProfilingStackType stackType = 1; // stack id from kernel provide  int32 stackId = 2; // stack symbols  repeated string stackSymbols = 3;}enum EBPFProfilingStackType { PROCESS_KERNEL_SPACE = 0; PROCESS_USER_SPACE = 1;}message EBPFOnCPUProfiling { // stack data in one task(thread)  repeated EBPFProfilingStackMetadata stacks = 1; // stack counts  int32 dumpCount = 2;}message EBPFOffCPUProfiling { // stack data in one task(thread)  repeated EBPFProfilingStackMetadata stacks = 1; // total count of the process is switched to off cpu by the scheduler.  int32 switchCount = 2; // where time(nanoseconds) is spent waiting while blocked on I/O, locks, timers, paging/swapping, etc.  int64 duration = 3;}","title":"Profiling APIs","url":"/docs/main/v9.5.0/en/api/profiling-protocol/"},{"content":"Profiling APIs SkyWalking offers two types of Profiling, in-process and out-process, each with its own API.\nIn-process profiling APIs In-process profiling commonly interacts with auto-instrument agents. It gathers stack traces of programs and sends the data to the OAP for further analysis.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.language.profile.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/language/profile/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;service ProfileTask { // query all sniffer need to execute profile task commands  rpc getProfileTaskCommands (ProfileTaskCommandQuery) returns (Commands) { } // collect dumped thread snapshot  rpc collectSnapshot (stream ThreadSnapshot) returns (Commands) { } // report profiling task finished  rpc reportTaskFinish (ProfileTaskFinishReport) returns (Commands) { }}message ProfileTaskCommandQuery { // current sniffer information  string service = 1; string serviceInstance = 2; // last command timestamp  int64 lastCommandTime = 3;}// dumped thread snapshot message ThreadSnapshot { // profile task id  string taskId = 1; // dumped segment id  string traceSegmentId = 2; // dump timestamp  int64 time = 3; // snapshot dump sequence, start with zero  int32 sequence = 4; // snapshot stack  ThreadStack stack = 5;}message ThreadStack { // stack code signature list  repeated string codeSignatures = 1;}// profile task finished report message ProfileTaskFinishReport { // current sniffer information  string service = 1; string serviceInstance = 2; // profile task  string taskId = 3;}Out-process profiling Out-process profiling interacts with eBPF agent, which receives tasks and captures data, then reports it to the OAP for further analysis.\nProcess APIs Similar to Service Instance, all processes must be reported to the OAP storage segment prior to analysis.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.ebpf.profiling.process.v3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/ebpf/profiling/process/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the detected processes and report them. service EBPFProcessService { // Report discovered process in Rover  rpc reportProcesses (EBPFProcessReportList) returns (EBPFReportProcessDownstream) { } // Keep the process alive in the backend.  rpc keepAlive (EBPFProcessPingPkgList) returns (Commands) { }}message EBPFProcessReportList { repeated EBPFProcessProperties processes = 1; // An ID generated by eBPF agent, should be unique globally.  string ebpfAgentID = 2;}message EBPFProcessProperties { // The Process metadata  oneof metadata { EBPFHostProcessMetadata hostProcess = 1; EBPFKubernetesProcessMetadata k8sProcess = 2; }}message EBPFHostProcessMetadata { // [required] Entity metadata  // Must ensure that entity information is unique at the time of reporting  EBPFProcessEntityMetadata entity = 1; // [required] The Process id of the host  int32 pid = 2; // [optional] properties of the process  repeated KeyStringValuePair properties = 3;}// Process Entity metadata message EBPFProcessEntityMetadata { // [required] Process belong layer name which define in the backend  string layer = 1; // [required] Process belong service name  string serviceName = 2; // [required] Process belong service instance name  string instanceName = 3; // [required] Process name  string processName = 4; // Process labels for aggregate from service  repeated string labels = 5;}// Kubernetes process metadata message EBPFKubernetesProcessMetadata { // [required] Entity metadata  // Must ensure that entity information is unique at the time of reporting  EBPFProcessEntityMetadata entity = 1; // [required] The Process id of the host  int32 pid = 2; // [optional] properties of the process  repeated KeyStringValuePair properties = 3;}message EBPFReportProcessDownstream { repeated EBPFProcessDownstream processes = 1;}message EBPFProcessDownstream { // Generated process id  string processId = 1; // Locate the process by basic information  oneof process { EBPFHostProcessDownstream hostProcess = 2; EBPFKubernetesProcessDownstream k8sProcess = 3; }}message EBPFHostProcessDownstream { int32 pid = 1; EBPFProcessEntityMetadata entityMetadata = 2;}// Kubernetes process downstream message EBPFKubernetesProcessDownstream { int32 pid = 1; EBPFProcessEntityMetadata entityMetadata = 2;}message EBPFProcessPingPkgList { repeated EBPFProcessPingPkg processes = 1; // An ID generated by eBPF agent, should be unique globally.  string ebpfAgentID = 2;}message EBPFProcessPingPkg { // Process entity  EBPFProcessEntityMetadata entityMetadata = 1; // Minimize necessary properties  repeated KeyStringValuePair properties = 2;}Out-process profiling APIs syntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.ebpf.profiling.v3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/ebpf/profiling/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the Rover Process profiling task and upload profiling data. service EBPFProfilingService { // Query profiling (start or stop) tasks  rpc queryTasks (EBPFProfilingTaskQuery) returns (Commands) { } // collect profiling data  rpc collectProfilingData (stream EBPFProfilingData) returns (Commands) { }}message EBPFProfilingTaskQuery { // rover instance id  string roverInstanceId = 1; // latest task update time  int64 latestUpdateTime = 2;}message EBPFProfilingData { // task metadata  EBPFProfilingTaskMetadata task = 1; // profiling data  oneof profiling { EBPFOnCPUProfiling onCPU = 2; EBPFOffCPUProfiling offCPU = 3; }}message EBPFProfilingTaskMetadata { // profiling task id  string taskId = 1; // profiling process id  string processId = 2; // the start time of this profiling process  int64 profilingStartTime = 3; // report time  int64 currentTime = 4;}message EBPFProfilingStackMetadata { // stack type  EBPFProfilingStackType stackType = 1; // stack id from kernel provide  int32 stackId = 2; // stack symbols  repeated string stackSymbols = 3;}enum EBPFProfilingStackType { PROCESS_KERNEL_SPACE = 0; PROCESS_USER_SPACE = 1;}message EBPFOnCPUProfiling { // stack data in one task(thread)  repeated EBPFProfilingStackMetadata stacks = 1; // stack counts  int32 dumpCount = 2;}message EBPFOffCPUProfiling { // stack data in one task(thread)  repeated EBPFProfilingStackMetadata stacks = 1; // total count of the process is switched to off cpu by the scheduler.  int32 switchCount = 2; // where time(nanoseconds) is spent waiting while blocked on I/O, locks, timers, paging/swapping, etc.  int64 duration = 3;}","title":"Profiling APIs","url":"/docs/main/v9.6.0/en/api/profiling-protocol/"},{"content":"Profiling APIs SkyWalking offers two types of Profiling, in-process and out-process, each with its own API.\nIn-process profiling APIs In-process profiling commonly interacts with auto-instrument agents. It gathers stack traces of programs and sends the data to the OAP for further analysis.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.language.profile.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/language/profile/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;service ProfileTask { // query all sniffer need to execute profile task commands  rpc getProfileTaskCommands (ProfileTaskCommandQuery) returns (Commands) { } // collect dumped thread snapshot  rpc collectSnapshot (stream ThreadSnapshot) returns (Commands) { } // report profiling task finished  rpc reportTaskFinish (ProfileTaskFinishReport) returns (Commands) { }}message ProfileTaskCommandQuery { // current sniffer information  string service = 1; string serviceInstance = 2; // last command timestamp  int64 lastCommandTime = 3;}// dumped thread snapshot message ThreadSnapshot { // profile task id  string taskId = 1; // dumped segment id  string traceSegmentId = 2; // dump timestamp  int64 time = 3; // snapshot dump sequence, start with zero  int32 sequence = 4; // snapshot stack  ThreadStack stack = 5;}message ThreadStack { // stack code signature list  repeated string codeSignatures = 1;}// profile task finished report message ProfileTaskFinishReport { // current sniffer information  string service = 1; string serviceInstance = 2; // profile task  string taskId = 3;}Out-process profiling Out-process profiling interacts with eBPF agent, which receives tasks and captures data, then reports it to the OAP for further analysis.\nProcess APIs Similar to Service Instance, all processes must be reported to the OAP storage segment prior to analysis.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.ebpf.profiling.process.v3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/ebpf/profiling/process/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the detected processes and report them. service EBPFProcessService { // Report discovered process in Rover  rpc reportProcesses (EBPFProcessReportList) returns (EBPFReportProcessDownstream) { } // Keep the process alive in the backend.  rpc keepAlive (EBPFProcessPingPkgList) returns (Commands) { }}message EBPFProcessReportList { repeated EBPFProcessProperties processes = 1; // An ID generated by eBPF agent, should be unique globally.  string ebpfAgentID = 2;}message EBPFProcessProperties { // The Process metadata  oneof metadata { EBPFHostProcessMetadata hostProcess = 1; EBPFKubernetesProcessMetadata k8sProcess = 2; }}message EBPFHostProcessMetadata { // [required] Entity metadata  // Must ensure that entity information is unique at the time of reporting  EBPFProcessEntityMetadata entity = 1; // [required] The Process id of the host  int32 pid = 2; // [optional] properties of the process  repeated KeyStringValuePair properties = 3;}// Process Entity metadata message EBPFProcessEntityMetadata { // [required] Process belong layer name which define in the backend  string layer = 1; // [required] Process belong service name  string serviceName = 2; // [required] Process belong service instance name  string instanceName = 3; // [required] Process name  string processName = 4; // Process labels for aggregate from service  repeated string labels = 5;}// Kubernetes process metadata message EBPFKubernetesProcessMetadata { // [required] Entity metadata  // Must ensure that entity information is unique at the time of reporting  EBPFProcessEntityMetadata entity = 1; // [required] The Process id of the host  int32 pid = 2; // [optional] properties of the process  repeated KeyStringValuePair properties = 3;}message EBPFReportProcessDownstream { repeated EBPFProcessDownstream processes = 1;}message EBPFProcessDownstream { // Generated process id  string processId = 1; // Locate the process by basic information  oneof process { EBPFHostProcessDownstream hostProcess = 2; EBPFKubernetesProcessDownstream k8sProcess = 3; }}message EBPFHostProcessDownstream { int32 pid = 1; EBPFProcessEntityMetadata entityMetadata = 2;}// Kubernetes process downstream message EBPFKubernetesProcessDownstream { int32 pid = 1; EBPFProcessEntityMetadata entityMetadata = 2;}message EBPFProcessPingPkgList { repeated EBPFProcessPingPkg processes = 1; // An ID generated by eBPF agent, should be unique globally.  string ebpfAgentID = 2;}message EBPFProcessPingPkg { // Process entity  EBPFProcessEntityMetadata entityMetadata = 1; // Minimize necessary properties  repeated KeyStringValuePair properties = 2;}Out-process profiling APIs syntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.ebpf.profiling.v3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/ebpf/profiling/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the Rover Process profiling task and upload profiling data. service EBPFProfilingService { // Query profiling (start or stop) tasks  rpc queryTasks (EBPFProfilingTaskQuery) returns (Commands) { } // collect profiling data  rpc collectProfilingData (stream EBPFProfilingData) returns (Commands) { }}message EBPFProfilingTaskQuery { // rover instance id  string roverInstanceId = 1; // latest task update time  int64 latestUpdateTime = 2;}message EBPFProfilingData { // task metadata  EBPFProfilingTaskMetadata task = 1; // profiling data  oneof profiling { EBPFOnCPUProfiling onCPU = 2; EBPFOffCPUProfiling offCPU = 3; }}message EBPFProfilingTaskMetadata { // profiling task id  string taskId = 1; // profiling process id  string processId = 2; // the start time of this profiling process  int64 profilingStartTime = 3; // report time  int64 currentTime = 4;}message EBPFProfilingStackMetadata { // stack type  EBPFProfilingStackType stackType = 1; // stack id from kernel provide  int32 stackId = 2; // stack symbols  repeated string stackSymbols = 3;}enum EBPFProfilingStackType { PROCESS_KERNEL_SPACE = 0; PROCESS_USER_SPACE = 1;}message EBPFOnCPUProfiling { // stack data in one task(thread)  repeated EBPFProfilingStackMetadata stacks = 1; // stack counts  int32 dumpCount = 2;}message EBPFOffCPUProfiling { // stack data in one task(thread)  repeated EBPFProfilingStackMetadata stacks = 1; // total count of the process is switched to off cpu by the scheduler.  int32 switchCount = 2; // where time(nanoseconds) is spent waiting while blocked on I/O, locks, timers, paging/swapping, etc.  int64 duration = 3;}","title":"Profiling APIs","url":"/docs/main/v9.7.0/en/api/profiling-protocol/"},{"content":"Project Structure  agent: The agent core files copied when hybrid compilation. bin: The binary files of Go agent program. docs: The documentation of Go agent. log: The log configuration for adapt the Golang agent. plugins: The plugins for adapt the frameworks.  core: Agent core and API for the SkyWalking Agent, the plugins should import this module. xxx: The plugins for adapt the framework.   reporter: The reporter for adapt the SkyWalking backend. tools/go-agent: The Golang Agent enhancement program.  cmd: The agent starter. config: The application register configuration for agent. instrument: Perform enhancement on different packages during hybrid compilation.  agentcore: When compiling SkyWalking Go, enhance its code, mainly for Agent Core file copying. api: The API of the instrument. entry: When compiling the main package, enhance its code, mainly focusing on starting the Agent system. plugins: When detecting a framework that requires enhancement, enhance its. For specific operation details, please refer to the Key Principle document. reporter: When compiling the reporter package under agent, enhance its code, mainly focusing on starting the reporter. runtime: When compiling the runtime package, enhance its code. For specific operation details, please refer to the Key Principle document.   tools: helps to build the agent.    ","title":"Project Structure","url":"/docs/skywalking-go/latest/en/concepts-and-designs/project-structure/"},{"content":"Project Structure  agent: The agent core files copied when hybrid compilation. bin: The binary files of Go agent program. docs: The documentation of Go agent. log: The log configuration for adapt the Golang agent. plugins: The plugins for adapt the frameworks.  core: Agent core and API for the SkyWalking Agent, the plugins should import this module. xxx: The plugins for adapt the framework.   reporter: The reporter for adapt the SkyWalking backend. tools/go-agent: The Golang Agent enhancement program.  cmd: The agent starter. config: The application register configuration for agent. instrument: Perform enhancement on different packages during hybrid compilation.  agentcore: When compiling SkyWalking Go, enhance its code, mainly for Agent Core file copying. api: The API of the instrument. entry: When compiling the main package, enhance its code, mainly focusing on starting the Agent system. plugins: When detecting a framework that requires enhancement, enhance its. For specific operation details, please refer to the Key Principle document. reporter: When compiling the reporter package under agent, enhance its code, mainly focusing on starting the reporter. runtime: When compiling the runtime package, enhance its code. For specific operation details, please refer to the Key Principle document.   tools: helps to build the agent.    ","title":"Project Structure","url":"/docs/skywalking-go/next/en/concepts-and-designs/project-structure/"},{"content":"Project Structure  agent: The agent core files copied when hybrid compilation. bin: The binary files of Go agent program. docs: The documentation of Go agent. log: The log configuration for adapt the Golang agent. plugins: The plugins for adapt the frameworks.  core: Agent core and API for the SkyWalking Agent, the plugins should import this module. xxx: The plugins for adapt the framework.   reporter: The reporter for adapt the SkyWalking backend. tools/go-agent: The Golang Agent enhancement program.  cmd: The agent starter. config: The application register configuration for agent. instrument: Perform enhancement on different packages during hybrid compilation.  agentcore: When compiling SkyWalking Go, enhance its code, mainly for Agent Core file copying. api: The API of the instrument. entry: When compiling the main package, enhance its code, mainly focusing on starting the Agent system. plugins: When detecting a framework that requires enhancement, enhance its. For specific operation details, please refer to the Key Principle document. reporter: When compiling the reporter package under agent, enhance its code, mainly focusing on starting the reporter. runtime: When compiling the runtime package, enhance its code. For specific operation details, please refer to the Key Principle document.   tools: helps to build the agent.    ","title":"Project Structure","url":"/docs/skywalking-go/v0.4.0/en/concepts-and-designs/project-structure/"},{"content":"Project Structure  cmd: The starter of Satellite. configs: Satellite configs. internal: Core, API, and common utils.  internal/pkg: Sharing with Core and Plugins, such as api and utils. internal/satellite: The core of Satellite.   plugins: Contains all plugins.  plugins/{type}: Contains the plugins of this {type}. Satellite has 9 plugin types. plugins/{type}/api: Contains the plugin definition and initializer. plugins/{type}/{plugin-name}: Contains the specific plugin. init.go: Register the plugins to the plugin registry.    . ├── CHANGES.md ├── cmd ├── configs ├── docs ├── go.sum ├── internal │ ├── pkg │ └── satellite ├── plugins │ ├── client │ ├── fallbacker │ ├── fetcher │ ├── filter │ ├── forwarder │ ├── init.go │ ├── parser │ ├── queue │ ├── receiver │ └── server ","title":"Project Structure","url":"/docs/skywalking-satellite/latest/en/concepts-and-designs/project_structue/"},{"content":"Project Structure  cmd: The starter of Satellite. configs: Satellite configs. internal: Core, API, and common utils.  internal/pkg: Sharing with Core and Plugins, such as api and utils. internal/satellite: The core of Satellite.   plugins: Contains all plugins.  plugins/{type}: Contains the plugins of this {type}. Satellite has 9 plugin types. plugins/{type}/api: Contains the plugin definition and initializer. plugins/{type}/{plugin-name}: Contains the specific plugin. init.go: Register the plugins to the plugin registry.    . ├── CHANGES.md ├── cmd ├── configs ├── docs ├── go.sum ├── internal │ ├── pkg │ └── satellite ├── plugins │ ├── client │ ├── fallbacker │ ├── fetcher │ ├── filter │ ├── forwarder │ ├── init.go │ ├── parser │ ├── queue │ ├── receiver │ └── server ","title":"Project Structure","url":"/docs/skywalking-satellite/next/en/concepts-and-designs/project_structue/"},{"content":"Project Structure  cmd: The starter of Satellite. configs: Satellite configs. internal: Core, API, and common utils.  internal/pkg: Sharing with Core and Plugins, such as api and utils. internal/satellite: The core of Satellite.   plugins: Contains all plugins.  plugins/{type}: Contains the plugins of this {type}. Satellite has 9 plugin types. plugins/{type}/api: Contains the plugin definition and initializer. plugins/{type}/{plugin-name}: Contains the specific plugin. init.go: Register the plugins to the plugin registry.    . ├── CHANGES.md ├── cmd ├── configs ├── docs ├── go.sum ├── internal │ ├── pkg │ └── satellite ├── plugins │ ├── client │ ├── fallbacker │ ├── fetcher │ ├── filter │ ├── forwarder │ ├── init.go │ ├── parser │ ├── queue │ ├── receiver │ └── server ","title":"Project Structure","url":"/docs/skywalking-satellite/v1.2.0/en/concepts-and-designs/project_structue/"},{"content":"Prometheus Fetcher Prometheus fetcher reads metrics from Prometheus endpoint, and transfer the metrics into SkyWalking native format for the MAL engine.\nConfiguration file Prometheus fetcher is configured via a configuration file. The configuration file defines everything related to fetching services and their instances, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP fails to start up. The files are located at $CLASSPATH/fetcher-prom-rules.\nThe file is written in YAML format, defined by the scheme described below. Brackets indicate that a parameter is optional.\nA full example can be found here\nGeneric placeholders are defined as follows:\n \u0026lt;duration\u0026gt;: This is parsed into a textual representation of a duration. The formats accepted are based on the ISO-8601 duration format PnDTnHnMn.nS with days considered to be exactly 24 hours. \u0026lt;labelname\u0026gt;: A string matching the regular expression [a-zA-Z_][a-zA-Z0-9_]*. \u0026lt;labelvalue\u0026gt;: A string of unicode characters. \u0026lt;host\u0026gt;: A valid string consisting of a hostname or IP followed by an optional port number. \u0026lt;path\u0026gt;: A valid URL path. \u0026lt;string\u0026gt;: A regular string.  # How frequently to fetch targets.fetcherInterval:\u0026lt;duration\u0026gt;# Per-fetch timeout when fetching this target.fetcherTimeout:\u0026lt;duration\u0026gt;# The HTTP resource path on which to fetch metrics from targets.metricsPath:\u0026lt;path\u0026gt;#Statically configured targets.staticConfig:# The targets specified by the static config.targets:[- \u0026lt;target\u0026gt; ]# Labels assigned to all metrics fetched from the targets.labels:[ \u0026lt;labelname\u0026gt;:\u0026lt;labelvalue\u0026gt; ... ]# filter the metrics, only those metrics that satisfy this condition will be passed into the `metricsRules` below.filter: \u0026lt;closure\u0026gt; # example:\u0026#39;{ tags -\u0026gt; tags.job_name == \u0026#34;vm-monitoring\u0026#34; }\u0026#39;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# Metrics rule allow you to recompute queries.metricsRules:[- \u0026lt;metric_rules\u0026gt; ] # The url of target exporter. the format should be complied with \u0026#34;java.net.URI\u0026#34;url:\u0026lt;string\u0026gt;# The path of root CA file.sslCaFilePath:\u0026lt;string\u0026gt;\u0026lt;metric_rules\u0026gt; # The name of rule, which combinates with a prefix \u0026#39;meter_\u0026#39; as the index/table name in storage.name:\u0026lt;string\u0026gt;# MAL expression.exp:\u0026lt;string\u0026gt;To know more about MAL, please refer to mal.md\nActive Fetcher Rules Suppose you want to enable some metric-custom.yaml files stored at fetcher-prom-rules, append its name to enabledRules of prometheus-fetcher as follows:\nprometheus-fetcher:selector:${SW_PROMETHEUS_FETCHER:default}default:enabledRules:${SW_PROMETHEUS_FETCHER_ENABLED_RULES:\u0026#34;self,metric-custom\u0026#34;}","title":"Prometheus Fetcher","url":"/docs/main/v9.0.0/en/setup/backend/prometheus-metrics/"},{"content":"Prometheus Fetcher Prometheus fetcher reads metrics from the Prometheus endpoint and transfers the metrics into SkyWalking native format for the MAL engine.\nConfiguration file Prometheus fetcher is configured via a configuration file. The configuration file defines everything related to fetching services and their instances, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP fails to start up. The files are located at $CLASSPATH/fetcher-prom-rules.\nThe file is written in YAML format, defined by the scheme described below. Brackets indicate that a parameter is optional.\nA full example can be found here\nGeneric placeholders are defined as follows:\n \u0026lt;duration\u0026gt;: This is parsed into a textual representation of a duration. The accepted formats are based on the ISO-8601 duration format PnDTnHnMn.nS with days of exactly 24 hours. \u0026lt;labelname\u0026gt;: A string matching the regular expression [a-zA-Z_][a-zA-Z0-9_]*. \u0026lt;labelvalue\u0026gt;: A string of Unicode characters. \u0026lt;host\u0026gt;: A valid string consisting of a hostname or IP followed by an optional port number. \u0026lt;path\u0026gt;: A valid URL path. \u0026lt;string\u0026gt;: A regular string.  # How frequently to fetch targets.fetcherInterval:\u0026lt;duration\u0026gt;# Per-fetch timeout when fetching this target.fetcherTimeout:\u0026lt;duration\u0026gt;# The HTTP resource path on which to fetch metrics from targets.metricsPath:\u0026lt;path\u0026gt;#Statically configured targets.staticConfig:# The targets specified by the static config.targets:[- \u0026lt;target\u0026gt; ]# Labels assigned to all metrics fetched from the targets.labels:[ \u0026lt;labelname\u0026gt;:\u0026lt;labelvalue\u0026gt; ... ]# filter the metrics, only those metrics that satisfy this condition will be passed into the `metricsRules` below.filter: \u0026lt;closure\u0026gt; # example:\u0026#39;{ tags -\u0026gt; tags.job_name == \u0026#34;vm-monitoring\u0026#34; }\u0026#39;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# Metrics rule allow you to recompute queries.metricsRules:[- \u0026lt;metric_rules\u0026gt; ] # The url of target exporter. the format should be complied with \u0026#34;java.net.URI\u0026#34;url:\u0026lt;string\u0026gt;# The path of root CA file.sslCaFilePath:\u0026lt;string\u0026gt;\u0026lt;metric_rules\u0026gt; # The name of rule, which combinates with a prefix \u0026#39;meter_\u0026#39; as the index/table name in storage.name:\u0026lt;string\u0026gt;# MAL expression.exp:\u0026lt;string\u0026gt;To know more about MAL, please refer to mal.md\nActive Fetcher Rules Suppose you want to enable some metric-custom.yaml files stored at fetcher-prom-rules, append its name to enabledRules of prometheus-fetcher as follows:\nprometheus-fetcher:selector:${SW_PROMETHEUS_FETCHER:default}default:enabledRules:${SW_PROMETHEUS_FETCHER_ENABLED_RULES:\u0026#34;self,metric-custom\u0026#34;}","title":"Prometheus Fetcher","url":"/docs/main/v9.1.0/en/setup/backend/prometheus-metrics/"},{"content":"Prometheus Fetcher Prometheus fetcher reads metrics from the Prometheus endpoint and transfers the metrics into SkyWalking native format for the MAL engine.\nConfiguration file Prometheus fetcher is configured via a configuration file. The configuration file defines everything related to fetching services and their instances, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP fails to start up. The files are located at $CLASSPATH/fetcher-prom-rules.\nThe file is written in YAML format, defined by the scheme described below. Brackets indicate that a parameter is optional.\nA full example can be found here\nGeneric placeholders are defined as follows:\n \u0026lt;duration\u0026gt;: This is parsed into a textual representation of a duration. The accepted formats are based on the ISO-8601 duration format PnDTnHnMn.nS with days of exactly 24 hours. \u0026lt;labelname\u0026gt;: A string matching the regular expression [a-zA-Z_][a-zA-Z0-9_]*. \u0026lt;labelvalue\u0026gt;: A string of Unicode characters. \u0026lt;host\u0026gt;: A valid string consisting of a hostname or IP followed by an optional port number. \u0026lt;path\u0026gt;: A valid URL path. \u0026lt;string\u0026gt;: A regular string.  # How frequently to fetch targets.fetcherInterval:\u0026lt;duration\u0026gt;# Per-fetch timeout when fetching this target.fetcherTimeout:\u0026lt;duration\u0026gt;# The HTTP resource path on which to fetch metrics from targets.metricsPath:\u0026lt;path\u0026gt;#Statically configured targets.staticConfig:# The targets specified by the static config.targets:[- \u0026lt;target\u0026gt; ]# Labels assigned to all metrics fetched from the targets.labels:[ \u0026lt;labelname\u0026gt;:\u0026lt;labelvalue\u0026gt; ... ]# initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# filter the metrics, only those metrics that satisfy this condition will be passed into the `metricsRules` below.filter: \u0026lt;closure\u0026gt; # example:\u0026#39;{ tags -\u0026gt; tags.job_name == \u0026#34;vm-monitoring\u0026#34; }\u0026#39;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# Metrics rule allow you to recompute queries.metricsRules:[- \u0026lt;metric_rules\u0026gt; ] # The url of target exporter. the format should be complied with \u0026#34;java.net.URI\u0026#34;url:\u0026lt;string\u0026gt;# The path of root CA file.sslCaFilePath:\u0026lt;string\u0026gt;\u0026lt;metric_rules\u0026gt; # The name of rule, which combinates with a prefix \u0026#39;meter_\u0026#39; as the index/table name in storage.name:\u0026lt;string\u0026gt;# MAL expression.exp:\u0026lt;string\u0026gt;To know more about MAL, please refer to mal.md\nActive Fetcher Rules Suppose you want to enable some metric-custom.yaml files stored at fetcher-prom-rules, append its name to enabledRules of prometheus-fetcher as follows:\nprometheus-fetcher:selector:${SW_PROMETHEUS_FETCHER:default}default:enabledRules:${SW_PROMETHEUS_FETCHER_ENABLED_RULES:\u0026#34;self,metric-custom\u0026#34;}","title":"Prometheus Fetcher","url":"/docs/main/v9.2.0/en/setup/backend/prometheus-metrics/"},{"content":"PromQL Service PromQL(Prometheus Query Language) Service exposes Prometheus Querying HTTP APIs including the bundled PromQL expression system. Third-party systems or visualization platforms that already support PromQL (such as Grafana), could obtain metrics through PromQL Service.\nAs SkyWalking and Prometheus have fundamental differences in metrics classification, format, storage, etc. The PromQL Service supported will be a subset of the complete PromQL.\nDetails Of Supported Protocol The following doc describes the details of the supported protocol and compared it to the PromQL official documentation. If not mentioned, it will not be supported by default.\nTime series Selectors Instant Vector Selectors For example: select metric service_cpm which the service is $service and the layer is $layer.\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} Note: The label matching operators only support = instead of regular expressions.\nRange Vector Selectors For example: select metric service_cpm which the service is $service and the layer is $layer within the last 5 minutes.\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;}[5m] Time Durations    Unit Definition Support     ms milliseconds yes   s seconds yes   m minutes yes   h hours yes   d days yes   w weeks yes   y years no    Binary operators Arithmetic binary operators    Operator Definition Support     + addition yes   - subtraction yes   * multiplication yes   / division yes   % modulo yes   ^ power/exponentiation no    Between two scalars For example:\n1 + 2 Between an instant vector and a scalar For example:\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} / 100 Between two instant vectors For example:\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} + service_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} Note: The operations between vectors require the same metric and labels, and don\u0026rsquo;t support Vector matching.\nComparison binary operators    Operator Definition Support     == equal yes   != not-equal yes   \u0026gt; greater-than yes   \u0026lt; less-than yes   \u0026gt;= greater-or-equal yes   \u0026lt;= less-or-equal) yes    Between two scalars For example:\n1 \u0026gt; bool 2 Between an instant vector and a scalar For example:\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} \u0026gt; 1 Between two instant vectors For example:\nservice_cpm{service=\u0026#39;service_A\u0026#39;, layer=\u0026#39;$layer\u0026#39;} \u0026gt; service_cpm{service=\u0026#39;service_B\u0026#39;, layer=\u0026#39;$layer\u0026#39;} HTTP API Expression queries Instant queries GET|POST /api/v1/query    Parameter Definition Support Optional     query prometheus expression yes no   time The latest metrics value from current time to this time is returned. If time is empty, the default look-back time is 2 minutes. yes yes   timeout evaluation timeout no ignore    For example:\n/api/v1/query?query=service_cpm{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;} Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677548400, \u0026#34;6\u0026#34; ] } ] } } Range queries GET|POST /api/v1/query_range    Parameter Definition Support Optional     query prometheus expression yes no   start start timestamp, seconds yes no   end end timestamp, seconds yes no   step SkyWalking will automatically fit Step(DAY, HOUR, MINUTE) through start and end. no ignore   timeout evaluation timeout no ignore    For example:\n/api/v1/query_range?query=service_cpm{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;}\u0026amp;start=1677479336\u0026amp;end=1677479636 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;matrix\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;values\u0026#34;: [ [ 1677479280, \u0026#34;18\u0026#34; ], [ 1677479340, \u0026#34;18\u0026#34; ], [ 1677479400, \u0026#34;18\u0026#34; ], [ 1677479460, \u0026#34;18\u0026#34; ], [ 1677479520, \u0026#34;18\u0026#34; ], [ 1677479580, \u0026#34;18\u0026#34; ] ] } ] } } Querying metadata Finding series by label matchers GET|POST /api/v1/series    Parameter Definition Support Optional     match[] series selector yes no   start start timestamp, seconds yes no   end end timestamp, seconds yes no    For example:\n/api/v1/series?match[]=service_traffic{layer=\u0026#39;GENERAL\u0026#39;}\u0026amp;start=1677479336\u0026amp;end=1677479636 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::recommendation\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::app\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::gateway\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::frontend\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; } ] } Note: SkyWalking\u0026rsquo;s metadata exists in the following metrics(traffics):\n service_traffic instance_traffic endpoint_traffic  Getting label names GET|POST /api/v1/labels    Parameter Definition Support Optional     match[] series selector yes yes   start start timestamp no yes   end end timestamp no yes    For example:\n/api/v1/labels?match[]=instance_jvm_cpu\u0026#39; Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;layer\u0026#34;, \u0026#34;service\u0026#34;, \u0026#34;top_n\u0026#34;, \u0026#34;order\u0026#34;, \u0026#34;service_instance\u0026#34;, \u0026#34;parent_service\u0026#34; ] } Querying label values GET /api/v1/label/\u0026lt;label_name\u0026gt;/values    Parameter Definition Support Optional     match[] series selector yes no   start start timestamp no yes   end end timestamp no yes    For example:\n/api/v1/label/__name__/values Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;meter_mysql_instance_qps\u0026#34;, \u0026#34;service_cpm\u0026#34;, \u0026#34;envoy_cluster_up_rq_active\u0026#34;, \u0026#34;instance_jvm_class_loaded_class_count\u0026#34;, \u0026#34;k8s_cluster_memory_requests\u0026#34;, \u0026#34;meter_vm_memory_used\u0026#34;, \u0026#34;meter_apisix_sv_bandwidth_unmatched\u0026#34;, \u0026#34;meter_vm_memory_total\u0026#34;, \u0026#34;instance_jvm_thread_live_count\u0026#34;, \u0026#34;instance_jvm_thread_timed_waiting_state_thread_count\u0026#34;, \u0026#34;browser_app_page_first_pack_percentile\u0026#34;, \u0026#34;instance_clr_max_worker_threads\u0026#34;, ... ] } Querying metric metadata GET /api/v1/metadata    Parameter Definition Support Optional     limit maximum number of metrics to return yes yes   metric metric name, support regular expression yes yes    For example:\n/api/v1/metadata?limit=10 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;meter_mysql_instance_qps\u0026#34;: [ { \u0026#34;type\u0026#34;: \u0026#34;gauge\u0026#34;, \u0026#34;help\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;unit\u0026#34;: \u0026#34;\u0026#34; } ], \u0026#34;meter_apisix_sv_bandwidth_unmatched\u0026#34;: [ { \u0026#34;type\u0026#34;: \u0026#34;gauge\u0026#34;, \u0026#34;help\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;unit\u0026#34;: \u0026#34;\u0026#34; } ], \u0026#34;service_cpm\u0026#34;: [ { \u0026#34;type\u0026#34;: \u0026#34;gauge\u0026#34;, \u0026#34;help\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;unit\u0026#34;: \u0026#34;\u0026#34; } ], ... } } Metrics Type For Query Supported Metrics Scope(Catalog) Not all scopes are supported for now, please check the following table:\n   Scope Support     Service yes   ServiceInstance yes   Endpoint yes   ServiceRelation no   ServiceInstanceRelation no   Process no   ProcessRelation no    General labels Each metric contains general labels: layer. Different metrics will have different labels depending on their Scope and metric value type.\n   Query Labels Scope Expression Example     layer, service Service service_cpm{service='$service', layer='$layer'}   layer, service, service_instance ServiceInstance service_instance_cpm{service='$service', service_instance='$service_instance', layer='$layer'}   layer, service, endpoint Endpoint endpoint_cpm{service='$service', endpoint='$endpoint', layer='$layer'}    Common Value Metrics  Query Labels:  {General labels}  Expression Example:  service_cpm{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677490740, \u0026#34;3\u0026#34; ] } ] } } Labeled Value Metrics  Query Labels:  --{General labels} --labels: Used to filter the value labels to be returned --relabels: Used to rename the returned value labels note: The number and order of labels must match the number and order of relabels.  Expression Example:  service_percentile{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;, labels=\u0026#39;0,1,2\u0026#39;, relabels=\u0026#39;P50,P75,P90\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_percentile\u0026#34;, \u0026#34;label\u0026#34;: \u0026#34;P50\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677493380, \u0026#34;0\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_percentile\u0026#34;, \u0026#34;label\u0026#34;: \u0026#34;P75\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677493380, \u0026#34;0\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_percentile\u0026#34;, \u0026#34;label\u0026#34;: \u0026#34;P90\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677493380, \u0026#34;0\u0026#34; ] } ] } } Sort Metrics  Query Labels:  --parent_service: \u0026lt;optional\u0026gt; Name of the parent service. --top_n: The max number of the selected metric value --order: ASC/DES  Expression Example:  service_instance_cpm{parent_service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;, top_n=\u0026#39;10\u0026#39;, order=\u0026#39;DES\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_instance_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;ServiceInstance\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;651db53c0e3843d8b9c4c53a90b4992a@10.4.0.28\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677494280, \u0026#34;14\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_instance_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;ServiceInstance\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;4c04cf44d6bd408880556aa3c2cfb620@10.4.0.232\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677494280, \u0026#34;6\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_instance_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;ServiceInstance\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;f5ac8ead31af4e6795cae761729a2742@10.4.0.236\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677494280, \u0026#34;5\u0026#34; ] } ] } } Sampled Records  Query Labels:  --parent_service: Name of the parent service --top_n: The max number of the selected records value --order: ASC/DES  Expression Example:  top_n_database_statement{parent_service=\u0026#39;localhost:-1\u0026#39;, layer=\u0026#39;VIRTUAL_DATABASE\u0026#39;, top_n=\u0026#39;10\u0026#39;, order=\u0026#39;DES\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;top_n_database_statement\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;VIRTUAL_DATABASE\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;record\u0026#34;: \u0026#34;select song0_.id as id1_0_, song0_.artist as artist2_0_, song0_.genre as genre3_0_, song0_.liked as liked4_0_, song0_.name as name5_0_ from song song0_ where song0_.liked\u0026gt;?\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677501360, \u0026#34;1\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;top_n_database_statement\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;VIRTUAL_DATABASE\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;record\u0026#34;: \u0026#34;select song0_.id as id1_0_, song0_.artist as artist2_0_, song0_.genre as genre3_0_, song0_.liked as liked4_0_, song0_.name as name5_0_ from song song0_ where song0_.liked\u0026gt;?\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677501360, \u0026#34;1\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;top_n_database_statement\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;VIRTUAL_DATABASE\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;record\u0026#34;: \u0026#34;select song0_.id as id1_0_, song0_.artist as artist2_0_, song0_.genre as genre3_0_, song0_.liked as liked4_0_, song0_.name as name5_0_ from song song0_ where song0_.liked\u0026gt;?\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677501360, \u0026#34;1\u0026#34; ] } ] } } ","title":"PromQL Service","url":"/docs/main/latest/en/api/promql-service/"},{"content":"PromQL Service PromQL(Prometheus Query Language) Service exposes Prometheus Querying HTTP APIs including the bundled PromQL expression system. Third-party systems or visualization platforms that already support PromQL (such as Grafana), could obtain metrics through PromQL Service.\nAs SkyWalking and Prometheus have fundamental differences in metrics classification, format, storage, etc. The PromQL Service supported will be a subset of the complete PromQL.\nDetails Of Supported Protocol The following doc describes the details of the supported protocol and compared it to the PromQL official documentation. If not mentioned, it will not be supported by default.\nTime series Selectors Instant Vector Selectors For example: select metric service_cpm which the service is $service and the layer is $layer.\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} Note: The label matching operators only support = instead of regular expressions.\nRange Vector Selectors For example: select metric service_cpm which the service is $service and the layer is $layer within the last 5 minutes.\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;}[5m] Time Durations    Unit Definition Support     ms milliseconds yes   s seconds yes   m minutes yes   h hours yes   d days yes   w weeks yes   y years no    Binary operators Arithmetic binary operators    Operator Definition Support     + addition yes   - subtraction yes   * multiplication yes   / division yes   % modulo yes   ^ power/exponentiation no    Between two scalars For example:\n1 + 2 Between an instant vector and a scalar For example:\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} / 100 Between two instant vectors For example:\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} + service_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} Note: The operations between vectors require the same metric and labels, and don\u0026rsquo;t support Vector matching.\nComparison binary operators    Operator Definition Support     == equal yes   != not-equal yes   \u0026gt; greater-than yes   \u0026lt; less-than yes   \u0026gt;= greater-or-equal yes   \u0026lt;= less-or-equal) yes    Between two scalars For example:\n1 \u0026gt; bool 2 Between an instant vector and a scalar For example:\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} \u0026gt; 1 Between two instant vectors For example:\nservice_cpm{service=\u0026#39;service_A\u0026#39;, layer=\u0026#39;$layer\u0026#39;} \u0026gt; service_cpm{service=\u0026#39;service_B\u0026#39;, layer=\u0026#39;$layer\u0026#39;} HTTP API Expression queries Instant queries GET|POST /api/v1/query    Parameter Definition Support Optional     query prometheus expression yes no   time The latest metrics value from current time to this time is returned. If time is empty, the default look-back time is 2 minutes. yes yes   timeout evaluation timeout no ignore    For example:\n/api/v1/query?query=service_cpm{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;} Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677548400, \u0026#34;6\u0026#34; ] } ] } } Range queries GET|POST /api/v1/query_range    Parameter Definition Support Optional     query prometheus expression yes no   start start timestamp, seconds yes no   end end timestamp, seconds yes no   step SkyWalking will automatically fit Step(DAY, HOUR, MINUTE) through start and end. no ignore   timeout evaluation timeout no ignore    For example:\n/api/v1/query_range?query=service_cpm{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;}\u0026amp;start=1677479336\u0026amp;end=1677479636 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;matrix\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;values\u0026#34;: [ [ 1677479280, \u0026#34;18\u0026#34; ], [ 1677479340, \u0026#34;18\u0026#34; ], [ 1677479400, \u0026#34;18\u0026#34; ], [ 1677479460, \u0026#34;18\u0026#34; ], [ 1677479520, \u0026#34;18\u0026#34; ], [ 1677479580, \u0026#34;18\u0026#34; ] ] } ] } } Querying metadata Finding series by label matchers GET|POST /api/v1/series    Parameter Definition Support Optional     match[] series selector yes no   start start timestamp, seconds yes no   end end timestamp, seconds yes no    For example:\n/api/v1/series?match[]=service_traffic{layer=\u0026#39;GENERAL\u0026#39;}\u0026amp;start=1677479336\u0026amp;end=1677479636 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::recommendation\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::app\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::gateway\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::frontend\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; } ] } Note: SkyWalking\u0026rsquo;s metadata exists in the following metrics(traffics):\n service_traffic instance_traffic endpoint_traffic  Getting label names GET|POST /api/v1/labels    Parameter Definition Support Optional     match[] series selector yes yes   start start timestamp no yes   end end timestamp, if end time is not present, use current time as default end time yes yes    For example:\n/api/v1/labels?match[]=instance_jvm_cpu\u0026#39; Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;layer\u0026#34;, \u0026#34;service\u0026#34;, \u0026#34;top_n\u0026#34;, \u0026#34;order\u0026#34;, \u0026#34;service_instance\u0026#34;, \u0026#34;parent_service\u0026#34; ] } Querying label values GET /api/v1/label/\u0026lt;label_name\u0026gt;/values    Parameter Definition Support Optional     match[] series selector yes yes   start start timestamp no yes   end end timestamp, if end time is not present, use current time as default end time yes yes    For example:\n/api/v1/label/__name__/values Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;meter_mysql_instance_qps\u0026#34;, \u0026#34;service_cpm\u0026#34;, \u0026#34;envoy_cluster_up_rq_active\u0026#34;, \u0026#34;instance_jvm_class_loaded_class_count\u0026#34;, \u0026#34;k8s_cluster_memory_requests\u0026#34;, \u0026#34;meter_vm_memory_used\u0026#34;, \u0026#34;meter_apisix_sv_bandwidth_unmatched\u0026#34;, \u0026#34;meter_vm_memory_total\u0026#34;, \u0026#34;instance_jvm_thread_live_count\u0026#34;, \u0026#34;instance_jvm_thread_timed_waiting_state_thread_count\u0026#34;, \u0026#34;browser_app_page_first_pack_percentile\u0026#34;, \u0026#34;instance_clr_max_worker_threads\u0026#34;, ... ] } Querying metric metadata GET /api/v1/metadata    Parameter Definition Support Optional     limit maximum number of metrics to return yes yes   metric metric name, support regular expression yes yes    For example:\n/api/v1/metadata?limit=10 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;meter_mysql_instance_qps\u0026#34;: [ { \u0026#34;type\u0026#34;: \u0026#34;gauge\u0026#34;, \u0026#34;help\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;unit\u0026#34;: \u0026#34;\u0026#34; } ], \u0026#34;meter_apisix_sv_bandwidth_unmatched\u0026#34;: [ { \u0026#34;type\u0026#34;: \u0026#34;gauge\u0026#34;, \u0026#34;help\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;unit\u0026#34;: \u0026#34;\u0026#34; } ], \u0026#34;service_cpm\u0026#34;: [ { \u0026#34;type\u0026#34;: \u0026#34;gauge\u0026#34;, \u0026#34;help\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;unit\u0026#34;: \u0026#34;\u0026#34; } ], ... } } Metrics Type For Query Supported Metrics Scope(Catalog) Not all scopes are supported for now, please check the following table:\n   Scope Support     Service yes   ServiceInstance yes   Endpoint yes   ServiceRelation no   ServiceInstanceRelation no   Process no   ProcessRelation no    General labels Each metric contains general labels: layer. Different metrics will have different labels depending on their Scope and metric value type.\n   Query Labels Scope Expression Example     layer, service Service service_cpm{service='$service', layer='$layer'}   layer, service, service_instance ServiceInstance service_instance_cpm{service='$service', service_instance='$service_instance', layer='$layer'}   layer, service, endpoint Endpoint endpoint_cpm{service='$service', endpoint='$endpoint', layer='$layer'}    Common Value Metrics  Query Labels:  {General labels}  Expression Example:  service_cpm{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677490740, \u0026#34;3\u0026#34; ] } ] } } Labeled Value Metrics  Query Labels:  --{General labels} --metric labels: Used to filter the value labels to be returned  Expression Example:  service_percentile{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;, p=\u0026#39;50,75,90\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_percentile\u0026#34;, \u0026#34;p\u0026#34;: \u0026#34;50\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677493380, \u0026#34;0\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_percentile\u0026#34;, \u0026#34;p\u0026#34;: \u0026#34;75\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677493380, \u0026#34;0\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_percentile\u0026#34;, \u0026#34;p\u0026#34;: \u0026#34;90\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677493380, \u0026#34;0\u0026#34; ] } ] } } Sort Metrics  Query Labels:  --parent_service: \u0026lt;optional\u0026gt; Name of the parent service. --top_n: The max number of the selected metric value --order: ASC/DES  Expression Example:  service_instance_cpm{parent_service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;, top_n=\u0026#39;10\u0026#39;, order=\u0026#39;DES\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_instance_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;ServiceInstance\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;651db53c0e3843d8b9c4c53a90b4992a@10.4.0.28\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677494280, \u0026#34;14\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_instance_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;ServiceInstance\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;4c04cf44d6bd408880556aa3c2cfb620@10.4.0.232\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677494280, \u0026#34;6\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_instance_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;ServiceInstance\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;f5ac8ead31af4e6795cae761729a2742@10.4.0.236\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677494280, \u0026#34;5\u0026#34; ] } ] } } Sampled Records  Query Labels:  --parent_service: Name of the parent service --top_n: The max number of the selected records value --order: ASC/DES  Expression Example:  top_n_database_statement{parent_service=\u0026#39;localhost:-1\u0026#39;, layer=\u0026#39;VIRTUAL_DATABASE\u0026#39;, top_n=\u0026#39;10\u0026#39;, order=\u0026#39;DES\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;top_n_database_statement\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;VIRTUAL_DATABASE\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;record\u0026#34;: \u0026#34;select song0_.id as id1_0_, song0_.artist as artist2_0_, song0_.genre as genre3_0_, song0_.liked as liked4_0_, song0_.name as name5_0_ from song song0_ where song0_.liked\u0026gt;?\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677501360, \u0026#34;1\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;top_n_database_statement\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;VIRTUAL_DATABASE\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;record\u0026#34;: \u0026#34;select song0_.id as id1_0_, song0_.artist as artist2_0_, song0_.genre as genre3_0_, song0_.liked as liked4_0_, song0_.name as name5_0_ from song song0_ where song0_.liked\u0026gt;?\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677501360, \u0026#34;1\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;top_n_database_statement\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;VIRTUAL_DATABASE\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;record\u0026#34;: \u0026#34;select song0_.id as id1_0_, song0_.artist as artist2_0_, song0_.genre as genre3_0_, song0_.liked as liked4_0_, song0_.name as name5_0_ from song song0_ where song0_.liked\u0026gt;?\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677501360, \u0026#34;1\u0026#34; ] } ] } } ","title":"PromQL Service","url":"/docs/main/next/en/api/promql-service/"},{"content":"PromQL Service PromQL(Prometheus Query Language) Service exposes Prometheus Querying HTTP APIs including the bundled PromQL expression system. Third-party systems or visualization platforms that already support PromQL (such as Grafana), could obtain metrics through PromeQL Service.\nAs SkyWalking and Prometheus have fundamental differences in metrics classification, format, storage, etc. The PromQL Service supported will be a subset of the complete PromQL\nDetails Of Supported Protocol The following doc describes the details of the supported protocol and compared it to the PromQL official documentation. If not mentioned, it will not be supported by default.\nTime series Selectors Instant Vector Selectors For example: select metric service_cpm which the service is $service and the layer is $layer.\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} Note: The label matching operators only support = instead of regular expressions.\nRange Vector Selectors For example: select metric service_cpm which the service is $service and the layer is $layer within the last 5 minutes.\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;}[5m] Time Durations    Unit Definition Support     ms milliseconds yes   s seconds yes   m minutes yes   h hours yes   d days yes   w weeks yes   y years no    Binary operators Arithmetic binary operators    Operator Definition Support     + addition yes   - subtraction yes   * multiplication yes   / division yes   % modulo yes   ^ power/exponentiation no    Between two scalars For example:\n1 + 2 Between an instant vector and a scalar For example:\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} / 100 Between two instant vectors For example:\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} + service_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} Note: The operations between vectors require the same metric and labels, and don\u0026rsquo;t support Vector matching.\nComparison binary operators    Operator Definition Support     == equal yes   != not-equal yes   \u0026gt; greater-than yes   \u0026lt; less-than yes   \u0026gt;= greater-or-equal yes   \u0026lt;= less-or-equal) yes    Between two scalars For example:\n1 \u0026gt; bool 2 Between an instant vector and a scalar For example:\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} \u0026gt; 1 Between two instant vectors For example:\nservice_cpm{service=\u0026#39;service_A\u0026#39;, layer=\u0026#39;$layer\u0026#39;} \u0026gt; service_cpm{service=\u0026#39;service_B\u0026#39;, layer=\u0026#39;$layer\u0026#39;} HTTP API Expression queries Instant queries GET|POST /api/v1/query    Parameter Definition Support Optional     query prometheus expression yes no   time The latest metrics value from current time to this time is returned. If time is empty, the default look-back time is 2 minutes. yes yes   timeout evaluation timeout no ignore    For example:\n/api/v1/query?query=service_cpm{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;} Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677548400, \u0026#34;6\u0026#34; ] } ] } } Range queries GET|POST /api/v1/query_range    Parameter Definition Support Optional     query prometheus expression yes no   start start timestamp, seconds yes no   end end timestamp, seconds yes no   step SkyWalking will automatically fit Step(DAY, HOUR, MINUTE) through start and end. no ignore   timeout evaluation timeout no ignore    For example:\n/api/v1/query_range?query=service_cpm{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;}\u0026amp;start=1677479336\u0026amp;end=1677479636 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;matrix\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;values\u0026#34;: [ [ 1677479280, \u0026#34;18\u0026#34; ], [ 1677479340, \u0026#34;18\u0026#34; ], [ 1677479400, \u0026#34;18\u0026#34; ], [ 1677479460, \u0026#34;18\u0026#34; ], [ 1677479520, \u0026#34;18\u0026#34; ], [ 1677479580, \u0026#34;18\u0026#34; ] ] } ] } } Querying metadata Finding series by label matchers GET|POST /api/v1/series    Parameter Definition Support Optional     match[] series selector yes no   start start timestamp, seconds yes no   end end timestamp, seconds yes no    For example:\n/api/v1/series?match[]=service_traffic{layer=\u0026#39;GENERAL\u0026#39;}\u0026amp;start=1677479336\u0026amp;end=1677479636 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::recommendation\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::app\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::gateway\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::frontend\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; } ] } Note: SkyWalking\u0026rsquo;s metadata exists in the following metrics(traffics):\n service_traffic instance_traffic endpoint_traffic  Getting label names GET|POST /api/v1/labels    Parameter Definition Support Optional     match[] series selector yes yes   start start timestamp no yes   end end timestamp no yes    For example:\n/api/v1/labels?match[]=instance_jvm_cpu\u0026#39; Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;layer\u0026#34;, \u0026#34;scope\u0026#34;, \u0026#34;top_n\u0026#34;, \u0026#34;order\u0026#34;, \u0026#34;service_instance\u0026#34;, \u0026#34;parent_service\u0026#34; ] } Querying label values GET /api/v1/label/\u0026lt;label_name\u0026gt;/values    Parameter Definition Support Optional     match[] series selector yes no   start start timestamp no yes   end end timestamp no yes    For example:\n/api/v1/label/__name__/values Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;meter_mysql_instance_qps\u0026#34;, \u0026#34;service_cpm\u0026#34;, \u0026#34;envoy_cluster_up_rq_active\u0026#34;, \u0026#34;instance_jvm_class_loaded_class_count\u0026#34;, \u0026#34;k8s_cluster_memory_requests\u0026#34;, \u0026#34;meter_vm_memory_used\u0026#34;, \u0026#34;meter_apisix_sv_bandwidth_unmatched\u0026#34;, \u0026#34;meter_vm_memory_total\u0026#34;, \u0026#34;instance_jvm_thread_live_count\u0026#34;, \u0026#34;instance_jvm_thread_timed_waiting_state_thread_count\u0026#34;, \u0026#34;browser_app_page_first_pack_percentile\u0026#34;, \u0026#34;instance_clr_max_worker_threads\u0026#34;, ... ] } Querying metric metadata GET /api/v1/metadata    Parameter Definition Support Optional     limit maximum number of metrics to return yes yes   metric metric name, support regular expression yes yes    For example:\n/api/v1/metadata?limit=10 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;meter_mysql_instance_qps\u0026#34;: [ { \u0026#34;type\u0026#34;: \u0026#34;gauge\u0026#34;, \u0026#34;help\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;unit\u0026#34;: \u0026#34;\u0026#34; } ], \u0026#34;meter_apisix_sv_bandwidth_unmatched\u0026#34;: [ { \u0026#34;type\u0026#34;: \u0026#34;gauge\u0026#34;, \u0026#34;help\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;unit\u0026#34;: \u0026#34;\u0026#34; } ], \u0026#34;service_cpm\u0026#34;: [ { \u0026#34;type\u0026#34;: \u0026#34;gauge\u0026#34;, \u0026#34;help\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;unit\u0026#34;: \u0026#34;\u0026#34; } ], ... } } Metrics Type For Query Supported Metrics Scope(Catalog) All scopes are not supported completely, please check the following table:\n   Scope Support     Service yes   ServiceInstance yes   Endpoint yes   ServiceRelation no   ServiceInstanceRelation no   Process no   ProcessRelation no    General labels Each metric contains general labels: layer. Different metrics will have different labels depending on their Scope and metric value type.\n   Query Labels Scope Expression Example     layer, service Service service_cpm{service='$service', layer='$layer'}   layer, service, service_instance ServiceInstance service_instance_cpm{service='$service', service_instance='$service_instance', layer='$layer'}   layer, service, endpoint Endpoint endpoint_cpm{service='$service', endpoint='$endpoint', layer='$layer'}    Common Value Metrics  Query Labels:  {General labels}  Expression Example:  service_cpm{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677490740, \u0026#34;3\u0026#34; ] } ] } } Labeled Value Metrics  Query Labels:  --{General labels} --labels: Used to filter the value labels to be returned --relabels: Used to rename the returned value labels note: The number and order of labels must match the number and order of relabels.  Expression Example:  service_percentile{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;, labels=\u0026#39;0,1,2\u0026#39;, relabels=\u0026#39;P50,P75,P90\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_percentile\u0026#34;, \u0026#34;label\u0026#34;: \u0026#34;P50\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677493380, \u0026#34;0\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_percentile\u0026#34;, \u0026#34;label\u0026#34;: \u0026#34;P75\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677493380, \u0026#34;0\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_percentile\u0026#34;, \u0026#34;label\u0026#34;: \u0026#34;P90\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677493380, \u0026#34;0\u0026#34; ] } ] } } Sort Metrics  Query Labels:  --parent_service: \u0026lt;optional\u0026gt; Name of the parent service. --top_n: The max number of the selected metric value --order: ASC/DES  Expression Example:  service_instance_cpm{parent_service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;, top_n=\u0026#39;10\u0026#39;, order=\u0026#39;DES\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_instance_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;ServiceInstance\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;651db53c0e3843d8b9c4c53a90b4992a@10.4.0.28\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677494280, \u0026#34;14\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_instance_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;ServiceInstance\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;4c04cf44d6bd408880556aa3c2cfb620@10.4.0.232\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677494280, \u0026#34;6\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_instance_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;ServiceInstance\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;f5ac8ead31af4e6795cae761729a2742@10.4.0.236\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677494280, \u0026#34;5\u0026#34; ] } ] } } Sampled Records  Query Labels:  --parent_service: Name of the parent service --top_n: The max number of the selected records value --order: ASC/DES  Expression Example:  top_n_database_statement{parent_service=\u0026#39;localhost:-1\u0026#39;, layer=\u0026#39;VIRTUAL_DATABASE\u0026#39;, top_n=\u0026#39;10\u0026#39;, order=\u0026#39;DES\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;top_n_database_statement\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;VIRTUAL_DATABASE\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;record\u0026#34;: \u0026#34;select song0_.id as id1_0_, song0_.artist as artist2_0_, song0_.genre as genre3_0_, song0_.liked as liked4_0_, song0_.name as name5_0_ from song song0_ where song0_.liked\u0026gt;?\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677501360, \u0026#34;1\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;top_n_database_statement\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;VIRTUAL_DATABASE\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;record\u0026#34;: \u0026#34;select song0_.id as id1_0_, song0_.artist as artist2_0_, song0_.genre as genre3_0_, song0_.liked as liked4_0_, song0_.name as name5_0_ from song song0_ where song0_.liked\u0026gt;?\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677501360, \u0026#34;1\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;top_n_database_statement\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;VIRTUAL_DATABASE\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;record\u0026#34;: \u0026#34;select song0_.id as id1_0_, song0_.artist as artist2_0_, song0_.genre as genre3_0_, song0_.liked as liked4_0_, song0_.name as name5_0_ from song song0_ where song0_.liked\u0026gt;?\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677501360, \u0026#34;1\u0026#34; ] } ] } } ","title":"PromQL Service","url":"/docs/main/v9.4.0/en/api/promql-service/"},{"content":"PromQL Service PromQL(Prometheus Query Language) Service exposes Prometheus Querying HTTP APIs including the bundled PromQL expression system. Third-party systems or visualization platforms that already support PromQL (such as Grafana), could obtain metrics through PromQL Service.\nAs SkyWalking and Prometheus have fundamental differences in metrics classification, format, storage, etc. The PromQL Service supported will be a subset of the complete PromQL.\nDetails Of Supported Protocol The following doc describes the details of the supported protocol and compared it to the PromQL official documentation. If not mentioned, it will not be supported by default.\nTime series Selectors Instant Vector Selectors For example: select metric service_cpm which the service is $service and the layer is $layer.\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} Note: The label matching operators only support = instead of regular expressions.\nRange Vector Selectors For example: select metric service_cpm which the service is $service and the layer is $layer within the last 5 minutes.\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;}[5m] Time Durations    Unit Definition Support     ms milliseconds yes   s seconds yes   m minutes yes   h hours yes   d days yes   w weeks yes   y years no    Binary operators Arithmetic binary operators    Operator Definition Support     + addition yes   - subtraction yes   * multiplication yes   / division yes   % modulo yes   ^ power/exponentiation no    Between two scalars For example:\n1 + 2 Between an instant vector and a scalar For example:\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} / 100 Between two instant vectors For example:\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} + service_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} Note: The operations between vectors require the same metric and labels, and don\u0026rsquo;t support Vector matching.\nComparison binary operators    Operator Definition Support     == equal yes   != not-equal yes   \u0026gt; greater-than yes   \u0026lt; less-than yes   \u0026gt;= greater-or-equal yes   \u0026lt;= less-or-equal) yes    Between two scalars For example:\n1 \u0026gt; bool 2 Between an instant vector and a scalar For example:\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} \u0026gt; 1 Between two instant vectors For example:\nservice_cpm{service=\u0026#39;service_A\u0026#39;, layer=\u0026#39;$layer\u0026#39;} \u0026gt; service_cpm{service=\u0026#39;service_B\u0026#39;, layer=\u0026#39;$layer\u0026#39;} HTTP API Expression queries Instant queries GET|POST /api/v1/query    Parameter Definition Support Optional     query prometheus expression yes no   time The latest metrics value from current time to this time is returned. If time is empty, the default look-back time is 2 minutes. yes yes   timeout evaluation timeout no ignore    For example:\n/api/v1/query?query=service_cpm{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;} Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677548400, \u0026#34;6\u0026#34; ] } ] } } Range queries GET|POST /api/v1/query_range    Parameter Definition Support Optional     query prometheus expression yes no   start start timestamp, seconds yes no   end end timestamp, seconds yes no   step SkyWalking will automatically fit Step(DAY, HOUR, MINUTE) through start and end. no ignore   timeout evaluation timeout no ignore    For example:\n/api/v1/query_range?query=service_cpm{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;}\u0026amp;start=1677479336\u0026amp;end=1677479636 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;matrix\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;values\u0026#34;: [ [ 1677479280, \u0026#34;18\u0026#34; ], [ 1677479340, \u0026#34;18\u0026#34; ], [ 1677479400, \u0026#34;18\u0026#34; ], [ 1677479460, \u0026#34;18\u0026#34; ], [ 1677479520, \u0026#34;18\u0026#34; ], [ 1677479580, \u0026#34;18\u0026#34; ] ] } ] } } Querying metadata Finding series by label matchers GET|POST /api/v1/series    Parameter Definition Support Optional     match[] series selector yes no   start start timestamp, seconds yes no   end end timestamp, seconds yes no    For example:\n/api/v1/series?match[]=service_traffic{layer=\u0026#39;GENERAL\u0026#39;}\u0026amp;start=1677479336\u0026amp;end=1677479636 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::recommendation\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::app\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::gateway\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::frontend\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; } ] } Note: SkyWalking\u0026rsquo;s metadata exists in the following metrics(traffics):\n service_traffic instance_traffic endpoint_traffic  Getting label names GET|POST /api/v1/labels    Parameter Definition Support Optional     match[] series selector yes yes   start start timestamp no yes   end end timestamp no yes    For example:\n/api/v1/labels?match[]=instance_jvm_cpu\u0026#39; Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;layer\u0026#34;, \u0026#34;service\u0026#34;, \u0026#34;top_n\u0026#34;, \u0026#34;order\u0026#34;, \u0026#34;service_instance\u0026#34;, \u0026#34;parent_service\u0026#34; ] } Querying label values GET /api/v1/label/\u0026lt;label_name\u0026gt;/values    Parameter Definition Support Optional     match[] series selector yes no   start start timestamp no yes   end end timestamp no yes    For example:\n/api/v1/label/__name__/values Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;meter_mysql_instance_qps\u0026#34;, \u0026#34;service_cpm\u0026#34;, \u0026#34;envoy_cluster_up_rq_active\u0026#34;, \u0026#34;instance_jvm_class_loaded_class_count\u0026#34;, \u0026#34;k8s_cluster_memory_requests\u0026#34;, \u0026#34;meter_vm_memory_used\u0026#34;, \u0026#34;meter_apisix_sv_bandwidth_unmatched\u0026#34;, \u0026#34;meter_vm_memory_total\u0026#34;, \u0026#34;instance_jvm_thread_live_count\u0026#34;, \u0026#34;instance_jvm_thread_timed_waiting_state_thread_count\u0026#34;, \u0026#34;browser_app_page_first_pack_percentile\u0026#34;, \u0026#34;instance_clr_max_worker_threads\u0026#34;, ... ] } Querying metric metadata GET /api/v1/metadata    Parameter Definition Support Optional     limit maximum number of metrics to return yes yes   metric metric name, support regular expression yes yes    For example:\n/api/v1/metadata?limit=10 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;meter_mysql_instance_qps\u0026#34;: [ { \u0026#34;type\u0026#34;: \u0026#34;gauge\u0026#34;, \u0026#34;help\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;unit\u0026#34;: \u0026#34;\u0026#34; } ], \u0026#34;meter_apisix_sv_bandwidth_unmatched\u0026#34;: [ { \u0026#34;type\u0026#34;: \u0026#34;gauge\u0026#34;, \u0026#34;help\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;unit\u0026#34;: \u0026#34;\u0026#34; } ], \u0026#34;service_cpm\u0026#34;: [ { \u0026#34;type\u0026#34;: \u0026#34;gauge\u0026#34;, \u0026#34;help\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;unit\u0026#34;: \u0026#34;\u0026#34; } ], ... } } Metrics Type For Query Supported Metrics Scope(Catalog) Not all scopes are supported for now, please check the following table:\n   Scope Support     Service yes   ServiceInstance yes   Endpoint yes   ServiceRelation no   ServiceInstanceRelation no   Process no   ProcessRelation no    General labels Each metric contains general labels: layer. Different metrics will have different labels depending on their Scope and metric value type.\n   Query Labels Scope Expression Example     layer, service Service service_cpm{service='$service', layer='$layer'}   layer, service, service_instance ServiceInstance service_instance_cpm{service='$service', service_instance='$service_instance', layer='$layer'}   layer, service, endpoint Endpoint endpoint_cpm{service='$service', endpoint='$endpoint', layer='$layer'}    Common Value Metrics  Query Labels:  {General labels}  Expression Example:  service_cpm{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677490740, \u0026#34;3\u0026#34; ] } ] } } Labeled Value Metrics  Query Labels:  --{General labels} --labels: Used to filter the value labels to be returned --relabels: Used to rename the returned value labels note: The number and order of labels must match the number and order of relabels.  Expression Example:  service_percentile{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;, labels=\u0026#39;0,1,2\u0026#39;, relabels=\u0026#39;P50,P75,P90\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_percentile\u0026#34;, \u0026#34;label\u0026#34;: \u0026#34;P50\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677493380, \u0026#34;0\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_percentile\u0026#34;, \u0026#34;label\u0026#34;: \u0026#34;P75\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677493380, \u0026#34;0\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_percentile\u0026#34;, \u0026#34;label\u0026#34;: \u0026#34;P90\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677493380, \u0026#34;0\u0026#34; ] } ] } } Sort Metrics  Query Labels:  --parent_service: \u0026lt;optional\u0026gt; Name of the parent service. --top_n: The max number of the selected metric value --order: ASC/DES  Expression Example:  service_instance_cpm{parent_service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;, top_n=\u0026#39;10\u0026#39;, order=\u0026#39;DES\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_instance_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;ServiceInstance\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;651db53c0e3843d8b9c4c53a90b4992a@10.4.0.28\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677494280, \u0026#34;14\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_instance_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;ServiceInstance\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;4c04cf44d6bd408880556aa3c2cfb620@10.4.0.232\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677494280, \u0026#34;6\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_instance_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;ServiceInstance\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;f5ac8ead31af4e6795cae761729a2742@10.4.0.236\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677494280, \u0026#34;5\u0026#34; ] } ] } } Sampled Records  Query Labels:  --parent_service: Name of the parent service --top_n: The max number of the selected records value --order: ASC/DES  Expression Example:  top_n_database_statement{parent_service=\u0026#39;localhost:-1\u0026#39;, layer=\u0026#39;VIRTUAL_DATABASE\u0026#39;, top_n=\u0026#39;10\u0026#39;, order=\u0026#39;DES\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;top_n_database_statement\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;VIRTUAL_DATABASE\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;record\u0026#34;: \u0026#34;select song0_.id as id1_0_, song0_.artist as artist2_0_, song0_.genre as genre3_0_, song0_.liked as liked4_0_, song0_.name as name5_0_ from song song0_ where song0_.liked\u0026gt;?\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677501360, \u0026#34;1\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;top_n_database_statement\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;VIRTUAL_DATABASE\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;record\u0026#34;: \u0026#34;select song0_.id as id1_0_, song0_.artist as artist2_0_, song0_.genre as genre3_0_, song0_.liked as liked4_0_, song0_.name as name5_0_ from song song0_ where song0_.liked\u0026gt;?\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677501360, \u0026#34;1\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;top_n_database_statement\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;VIRTUAL_DATABASE\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;record\u0026#34;: \u0026#34;select song0_.id as id1_0_, song0_.artist as artist2_0_, song0_.genre as genre3_0_, song0_.liked as liked4_0_, song0_.name as name5_0_ from song song0_ where song0_.liked\u0026gt;?\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677501360, \u0026#34;1\u0026#34; ] } ] } } ","title":"PromQL Service","url":"/docs/main/v9.5.0/en/api/promql-service/"},{"content":"PromQL Service PromQL(Prometheus Query Language) Service exposes Prometheus Querying HTTP APIs including the bundled PromQL expression system. Third-party systems or visualization platforms that already support PromQL (such as Grafana), could obtain metrics through PromQL Service.\nAs SkyWalking and Prometheus have fundamental differences in metrics classification, format, storage, etc. The PromQL Service supported will be a subset of the complete PromQL.\nDetails Of Supported Protocol The following doc describes the details of the supported protocol and compared it to the PromQL official documentation. If not mentioned, it will not be supported by default.\nTime series Selectors Instant Vector Selectors For example: select metric service_cpm which the service is $service and the layer is $layer.\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} Note: The label matching operators only support = instead of regular expressions.\nRange Vector Selectors For example: select metric service_cpm which the service is $service and the layer is $layer within the last 5 minutes.\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;}[5m] Time Durations    Unit Definition Support     ms milliseconds yes   s seconds yes   m minutes yes   h hours yes   d days yes   w weeks yes   y years no    Binary operators Arithmetic binary operators    Operator Definition Support     + addition yes   - subtraction yes   * multiplication yes   / division yes   % modulo yes   ^ power/exponentiation no    Between two scalars For example:\n1 + 2 Between an instant vector and a scalar For example:\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} / 100 Between two instant vectors For example:\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} + service_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} Note: The operations between vectors require the same metric and labels, and don\u0026rsquo;t support Vector matching.\nComparison binary operators    Operator Definition Support     == equal yes   != not-equal yes   \u0026gt; greater-than yes   \u0026lt; less-than yes   \u0026gt;= greater-or-equal yes   \u0026lt;= less-or-equal) yes    Between two scalars For example:\n1 \u0026gt; bool 2 Between an instant vector and a scalar For example:\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} \u0026gt; 1 Between two instant vectors For example:\nservice_cpm{service=\u0026#39;service_A\u0026#39;, layer=\u0026#39;$layer\u0026#39;} \u0026gt; service_cpm{service=\u0026#39;service_B\u0026#39;, layer=\u0026#39;$layer\u0026#39;} HTTP API Expression queries Instant queries GET|POST /api/v1/query    Parameter Definition Support Optional     query prometheus expression yes no   time The latest metrics value from current time to this time is returned. If time is empty, the default look-back time is 2 minutes. yes yes   timeout evaluation timeout no ignore    For example:\n/api/v1/query?query=service_cpm{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;} Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677548400, \u0026#34;6\u0026#34; ] } ] } } Range queries GET|POST /api/v1/query_range    Parameter Definition Support Optional     query prometheus expression yes no   start start timestamp, seconds yes no   end end timestamp, seconds yes no   step SkyWalking will automatically fit Step(DAY, HOUR, MINUTE) through start and end. no ignore   timeout evaluation timeout no ignore    For example:\n/api/v1/query_range?query=service_cpm{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;}\u0026amp;start=1677479336\u0026amp;end=1677479636 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;matrix\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;values\u0026#34;: [ [ 1677479280, \u0026#34;18\u0026#34; ], [ 1677479340, \u0026#34;18\u0026#34; ], [ 1677479400, \u0026#34;18\u0026#34; ], [ 1677479460, \u0026#34;18\u0026#34; ], [ 1677479520, \u0026#34;18\u0026#34; ], [ 1677479580, \u0026#34;18\u0026#34; ] ] } ] } } Querying metadata Finding series by label matchers GET|POST /api/v1/series    Parameter Definition Support Optional     match[] series selector yes no   start start timestamp, seconds yes no   end end timestamp, seconds yes no    For example:\n/api/v1/series?match[]=service_traffic{layer=\u0026#39;GENERAL\u0026#39;}\u0026amp;start=1677479336\u0026amp;end=1677479636 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::recommendation\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::app\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::gateway\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::frontend\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; } ] } Note: SkyWalking\u0026rsquo;s metadata exists in the following metrics(traffics):\n service_traffic instance_traffic endpoint_traffic  Getting label names GET|POST /api/v1/labels    Parameter Definition Support Optional     match[] series selector yes yes   start start timestamp no yes   end end timestamp no yes    For example:\n/api/v1/labels?match[]=instance_jvm_cpu\u0026#39; Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;layer\u0026#34;, \u0026#34;service\u0026#34;, \u0026#34;top_n\u0026#34;, \u0026#34;order\u0026#34;, \u0026#34;service_instance\u0026#34;, \u0026#34;parent_service\u0026#34; ] } Querying label values GET /api/v1/label/\u0026lt;label_name\u0026gt;/values    Parameter Definition Support Optional     match[] series selector yes no   start start timestamp no yes   end end timestamp no yes    For example:\n/api/v1/label/__name__/values Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;meter_mysql_instance_qps\u0026#34;, \u0026#34;service_cpm\u0026#34;, \u0026#34;envoy_cluster_up_rq_active\u0026#34;, \u0026#34;instance_jvm_class_loaded_class_count\u0026#34;, \u0026#34;k8s_cluster_memory_requests\u0026#34;, \u0026#34;meter_vm_memory_used\u0026#34;, \u0026#34;meter_apisix_sv_bandwidth_unmatched\u0026#34;, \u0026#34;meter_vm_memory_total\u0026#34;, \u0026#34;instance_jvm_thread_live_count\u0026#34;, \u0026#34;instance_jvm_thread_timed_waiting_state_thread_count\u0026#34;, \u0026#34;browser_app_page_first_pack_percentile\u0026#34;, \u0026#34;instance_clr_max_worker_threads\u0026#34;, ... ] } Querying metric metadata GET /api/v1/metadata    Parameter Definition Support Optional     limit maximum number of metrics to return yes yes   metric metric name, support regular expression yes yes    For example:\n/api/v1/metadata?limit=10 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;meter_mysql_instance_qps\u0026#34;: [ { \u0026#34;type\u0026#34;: \u0026#34;gauge\u0026#34;, \u0026#34;help\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;unit\u0026#34;: \u0026#34;\u0026#34; } ], \u0026#34;meter_apisix_sv_bandwidth_unmatched\u0026#34;: [ { \u0026#34;type\u0026#34;: \u0026#34;gauge\u0026#34;, \u0026#34;help\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;unit\u0026#34;: \u0026#34;\u0026#34; } ], \u0026#34;service_cpm\u0026#34;: [ { \u0026#34;type\u0026#34;: \u0026#34;gauge\u0026#34;, \u0026#34;help\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;unit\u0026#34;: \u0026#34;\u0026#34; } ], ... } } Metrics Type For Query Supported Metrics Scope(Catalog) Not all scopes are supported for now, please check the following table:\n   Scope Support     Service yes   ServiceInstance yes   Endpoint yes   ServiceRelation no   ServiceInstanceRelation no   Process no   ProcessRelation no    General labels Each metric contains general labels: layer. Different metrics will have different labels depending on their Scope and metric value type.\n   Query Labels Scope Expression Example     layer, service Service service_cpm{service='$service', layer='$layer'}   layer, service, service_instance ServiceInstance service_instance_cpm{service='$service', service_instance='$service_instance', layer='$layer'}   layer, service, endpoint Endpoint endpoint_cpm{service='$service', endpoint='$endpoint', layer='$layer'}    Common Value Metrics  Query Labels:  {General labels}  Expression Example:  service_cpm{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677490740, \u0026#34;3\u0026#34; ] } ] } } Labeled Value Metrics  Query Labels:  --{General labels} --labels: Used to filter the value labels to be returned --relabels: Used to rename the returned value labels note: The number and order of labels must match the number and order of relabels.  Expression Example:  service_percentile{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;, labels=\u0026#39;0,1,2\u0026#39;, relabels=\u0026#39;P50,P75,P90\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_percentile\u0026#34;, \u0026#34;label\u0026#34;: \u0026#34;P50\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677493380, \u0026#34;0\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_percentile\u0026#34;, \u0026#34;label\u0026#34;: \u0026#34;P75\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677493380, \u0026#34;0\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_percentile\u0026#34;, \u0026#34;label\u0026#34;: \u0026#34;P90\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677493380, \u0026#34;0\u0026#34; ] } ] } } Sort Metrics  Query Labels:  --parent_service: \u0026lt;optional\u0026gt; Name of the parent service. --top_n: The max number of the selected metric value --order: ASC/DES  Expression Example:  service_instance_cpm{parent_service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;, top_n=\u0026#39;10\u0026#39;, order=\u0026#39;DES\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_instance_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;ServiceInstance\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;651db53c0e3843d8b9c4c53a90b4992a@10.4.0.28\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677494280, \u0026#34;14\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_instance_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;ServiceInstance\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;4c04cf44d6bd408880556aa3c2cfb620@10.4.0.232\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677494280, \u0026#34;6\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_instance_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;ServiceInstance\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;f5ac8ead31af4e6795cae761729a2742@10.4.0.236\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677494280, \u0026#34;5\u0026#34; ] } ] } } Sampled Records  Query Labels:  --parent_service: Name of the parent service --top_n: The max number of the selected records value --order: ASC/DES  Expression Example:  top_n_database_statement{parent_service=\u0026#39;localhost:-1\u0026#39;, layer=\u0026#39;VIRTUAL_DATABASE\u0026#39;, top_n=\u0026#39;10\u0026#39;, order=\u0026#39;DES\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;top_n_database_statement\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;VIRTUAL_DATABASE\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;record\u0026#34;: \u0026#34;select song0_.id as id1_0_, song0_.artist as artist2_0_, song0_.genre as genre3_0_, song0_.liked as liked4_0_, song0_.name as name5_0_ from song song0_ where song0_.liked\u0026gt;?\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677501360, \u0026#34;1\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;top_n_database_statement\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;VIRTUAL_DATABASE\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;record\u0026#34;: \u0026#34;select song0_.id as id1_0_, song0_.artist as artist2_0_, song0_.genre as genre3_0_, song0_.liked as liked4_0_, song0_.name as name5_0_ from song song0_ where song0_.liked\u0026gt;?\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677501360, \u0026#34;1\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;top_n_database_statement\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;VIRTUAL_DATABASE\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;record\u0026#34;: \u0026#34;select song0_.id as id1_0_, song0_.artist as artist2_0_, song0_.genre as genre3_0_, song0_.liked as liked4_0_, song0_.name as name5_0_ from song song0_ where song0_.liked\u0026gt;?\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677501360, \u0026#34;1\u0026#34; ] } ] } } ","title":"PromQL Service","url":"/docs/main/v9.6.0/en/api/promql-service/"},{"content":"PromQL Service PromQL(Prometheus Query Language) Service exposes Prometheus Querying HTTP APIs including the bundled PromQL expression system. Third-party systems or visualization platforms that already support PromQL (such as Grafana), could obtain metrics through PromQL Service.\nAs SkyWalking and Prometheus have fundamental differences in metrics classification, format, storage, etc. The PromQL Service supported will be a subset of the complete PromQL.\nDetails Of Supported Protocol The following doc describes the details of the supported protocol and compared it to the PromQL official documentation. If not mentioned, it will not be supported by default.\nTime series Selectors Instant Vector Selectors For example: select metric service_cpm which the service is $service and the layer is $layer.\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} Note: The label matching operators only support = instead of regular expressions.\nRange Vector Selectors For example: select metric service_cpm which the service is $service and the layer is $layer within the last 5 minutes.\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;}[5m] Time Durations    Unit Definition Support     ms milliseconds yes   s seconds yes   m minutes yes   h hours yes   d days yes   w weeks yes   y years no    Binary operators Arithmetic binary operators    Operator Definition Support     + addition yes   - subtraction yes   * multiplication yes   / division yes   % modulo yes   ^ power/exponentiation no    Between two scalars For example:\n1 + 2 Between an instant vector and a scalar For example:\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} / 100 Between two instant vectors For example:\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} + service_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} Note: The operations between vectors require the same metric and labels, and don\u0026rsquo;t support Vector matching.\nComparison binary operators    Operator Definition Support     == equal yes   != not-equal yes   \u0026gt; greater-than yes   \u0026lt; less-than yes   \u0026gt;= greater-or-equal yes   \u0026lt;= less-or-equal) yes    Between two scalars For example:\n1 \u0026gt; bool 2 Between an instant vector and a scalar For example:\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} \u0026gt; 1 Between two instant vectors For example:\nservice_cpm{service=\u0026#39;service_A\u0026#39;, layer=\u0026#39;$layer\u0026#39;} \u0026gt; service_cpm{service=\u0026#39;service_B\u0026#39;, layer=\u0026#39;$layer\u0026#39;} HTTP API Expression queries Instant queries GET|POST /api/v1/query    Parameter Definition Support Optional     query prometheus expression yes no   time The latest metrics value from current time to this time is returned. If time is empty, the default look-back time is 2 minutes. yes yes   timeout evaluation timeout no ignore    For example:\n/api/v1/query?query=service_cpm{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;} Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677548400, \u0026#34;6\u0026#34; ] } ] } } Range queries GET|POST /api/v1/query_range    Parameter Definition Support Optional     query prometheus expression yes no   start start timestamp, seconds yes no   end end timestamp, seconds yes no   step SkyWalking will automatically fit Step(DAY, HOUR, MINUTE) through start and end. no ignore   timeout evaluation timeout no ignore    For example:\n/api/v1/query_range?query=service_cpm{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;}\u0026amp;start=1677479336\u0026amp;end=1677479636 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;matrix\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;values\u0026#34;: [ [ 1677479280, \u0026#34;18\u0026#34; ], [ 1677479340, \u0026#34;18\u0026#34; ], [ 1677479400, \u0026#34;18\u0026#34; ], [ 1677479460, \u0026#34;18\u0026#34; ], [ 1677479520, \u0026#34;18\u0026#34; ], [ 1677479580, \u0026#34;18\u0026#34; ] ] } ] } } Querying metadata Finding series by label matchers GET|POST /api/v1/series    Parameter Definition Support Optional     match[] series selector yes no   start start timestamp, seconds yes no   end end timestamp, seconds yes no    For example:\n/api/v1/series?match[]=service_traffic{layer=\u0026#39;GENERAL\u0026#39;}\u0026amp;start=1677479336\u0026amp;end=1677479636 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::recommendation\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::app\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::gateway\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::frontend\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; } ] } Note: SkyWalking\u0026rsquo;s metadata exists in the following metrics(traffics):\n service_traffic instance_traffic endpoint_traffic  Getting label names GET|POST /api/v1/labels    Parameter Definition Support Optional     match[] series selector yes yes   start start timestamp no yes   end end timestamp no yes    For example:\n/api/v1/labels?match[]=instance_jvm_cpu\u0026#39; Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;layer\u0026#34;, \u0026#34;service\u0026#34;, \u0026#34;top_n\u0026#34;, \u0026#34;order\u0026#34;, \u0026#34;service_instance\u0026#34;, \u0026#34;parent_service\u0026#34; ] } Querying label values GET /api/v1/label/\u0026lt;label_name\u0026gt;/values    Parameter Definition Support Optional     match[] series selector yes no   start start timestamp no yes   end end timestamp no yes    For example:\n/api/v1/label/__name__/values Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;meter_mysql_instance_qps\u0026#34;, \u0026#34;service_cpm\u0026#34;, \u0026#34;envoy_cluster_up_rq_active\u0026#34;, \u0026#34;instance_jvm_class_loaded_class_count\u0026#34;, \u0026#34;k8s_cluster_memory_requests\u0026#34;, \u0026#34;meter_vm_memory_used\u0026#34;, \u0026#34;meter_apisix_sv_bandwidth_unmatched\u0026#34;, \u0026#34;meter_vm_memory_total\u0026#34;, \u0026#34;instance_jvm_thread_live_count\u0026#34;, \u0026#34;instance_jvm_thread_timed_waiting_state_thread_count\u0026#34;, \u0026#34;browser_app_page_first_pack_percentile\u0026#34;, \u0026#34;instance_clr_max_worker_threads\u0026#34;, ... ] } Querying metric metadata GET /api/v1/metadata    Parameter Definition Support Optional     limit maximum number of metrics to return yes yes   metric metric name, support regular expression yes yes    For example:\n/api/v1/metadata?limit=10 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;meter_mysql_instance_qps\u0026#34;: [ { \u0026#34;type\u0026#34;: \u0026#34;gauge\u0026#34;, \u0026#34;help\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;unit\u0026#34;: \u0026#34;\u0026#34; } ], \u0026#34;meter_apisix_sv_bandwidth_unmatched\u0026#34;: [ { \u0026#34;type\u0026#34;: \u0026#34;gauge\u0026#34;, \u0026#34;help\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;unit\u0026#34;: \u0026#34;\u0026#34; } ], \u0026#34;service_cpm\u0026#34;: [ { \u0026#34;type\u0026#34;: \u0026#34;gauge\u0026#34;, \u0026#34;help\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;unit\u0026#34;: \u0026#34;\u0026#34; } ], ... } } Metrics Type For Query Supported Metrics Scope(Catalog) Not all scopes are supported for now, please check the following table:\n   Scope Support     Service yes   ServiceInstance yes   Endpoint yes   ServiceRelation no   ServiceInstanceRelation no   Process no   ProcessRelation no    General labels Each metric contains general labels: layer. Different metrics will have different labels depending on their Scope and metric value type.\n   Query Labels Scope Expression Example     layer, service Service service_cpm{service='$service', layer='$layer'}   layer, service, service_instance ServiceInstance service_instance_cpm{service='$service', service_instance='$service_instance', layer='$layer'}   layer, service, endpoint Endpoint endpoint_cpm{service='$service', endpoint='$endpoint', layer='$layer'}    Common Value Metrics  Query Labels:  {General labels}  Expression Example:  service_cpm{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677490740, \u0026#34;3\u0026#34; ] } ] } } Labeled Value Metrics  Query Labels:  --{General labels} --labels: Used to filter the value labels to be returned --relabels: Used to rename the returned value labels note: The number and order of labels must match the number and order of relabels.  Expression Example:  service_percentile{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;, labels=\u0026#39;0,1,2\u0026#39;, relabels=\u0026#39;P50,P75,P90\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_percentile\u0026#34;, \u0026#34;label\u0026#34;: \u0026#34;P50\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677493380, \u0026#34;0\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_percentile\u0026#34;, \u0026#34;label\u0026#34;: \u0026#34;P75\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677493380, \u0026#34;0\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_percentile\u0026#34;, \u0026#34;label\u0026#34;: \u0026#34;P90\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677493380, \u0026#34;0\u0026#34; ] } ] } } Sort Metrics  Query Labels:  --parent_service: \u0026lt;optional\u0026gt; Name of the parent service. --top_n: The max number of the selected metric value --order: ASC/DES  Expression Example:  service_instance_cpm{parent_service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;, top_n=\u0026#39;10\u0026#39;, order=\u0026#39;DES\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_instance_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;ServiceInstance\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;651db53c0e3843d8b9c4c53a90b4992a@10.4.0.28\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677494280, \u0026#34;14\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_instance_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;ServiceInstance\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;4c04cf44d6bd408880556aa3c2cfb620@10.4.0.232\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677494280, \u0026#34;6\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_instance_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;ServiceInstance\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;f5ac8ead31af4e6795cae761729a2742@10.4.0.236\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677494280, \u0026#34;5\u0026#34; ] } ] } } Sampled Records  Query Labels:  --parent_service: Name of the parent service --top_n: The max number of the selected records value --order: ASC/DES  Expression Example:  top_n_database_statement{parent_service=\u0026#39;localhost:-1\u0026#39;, layer=\u0026#39;VIRTUAL_DATABASE\u0026#39;, top_n=\u0026#39;10\u0026#39;, order=\u0026#39;DES\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;top_n_database_statement\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;VIRTUAL_DATABASE\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;record\u0026#34;: \u0026#34;select song0_.id as id1_0_, song0_.artist as artist2_0_, song0_.genre as genre3_0_, song0_.liked as liked4_0_, song0_.name as name5_0_ from song song0_ where song0_.liked\u0026gt;?\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677501360, \u0026#34;1\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;top_n_database_statement\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;VIRTUAL_DATABASE\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;record\u0026#34;: \u0026#34;select song0_.id as id1_0_, song0_.artist as artist2_0_, song0_.genre as genre3_0_, song0_.liked as liked4_0_, song0_.name as name5_0_ from song song0_ where song0_.liked\u0026gt;?\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677501360, \u0026#34;1\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;top_n_database_statement\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;VIRTUAL_DATABASE\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;record\u0026#34;: \u0026#34;select song0_.id as id1_0_, song0_.artist as artist2_0_, song0_.genre as genre3_0_, song0_.liked as liked4_0_, song0_.name as name5_0_ from song song0_ where song0_.liked\u0026gt;?\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677501360, \u0026#34;1\u0026#34; ] } ] } } ","title":"PromQL Service","url":"/docs/main/v9.7.0/en/api/promql-service/"},{"content":"Protocol Documentation \nTable of Contents   banyandb/cluster/v1/rpc.proto\n  SendRequest\n  SendResponse\n  Service\n    banyandb/common/v1/common.proto\n  Group\n  IntervalRule\n  Metadata\n  ResourceOpts\n  Catalog\n  IntervalRule.Unit\n    banyandb/database/v1/database.proto\n  Node\n  Shard\n  Role\n    banyandb/model/v1/common.proto\n  FieldValue\n  Float\n  Int\n  IntArray\n  Str\n  StrArray\n  TagFamilyForWrite\n  TagValue\n  AggregationFunction\n    banyandb/model/v1/query.proto\n  Condition\n  Criteria\n  LogicalExpression\n  QueryOrder\n  Tag\n  TagFamily\n  TagProjection\n  TagProjection.TagFamily\n  TimeRange\n  Condition.BinaryOp\n  LogicalExpression.LogicalOp\n  Sort\n    banyandb/database/v1/schema.proto\n  Entity\n  FieldSpec\n  IndexRule\n  IndexRuleBinding\n  Measure\n  Stream\n  Subject\n  TagFamilySpec\n  TagSpec\n  TopNAggregation\n  CompressionMethod\n  EncodingMethod\n  FieldType\n  IndexRule.Analyzer\n  IndexRule.Location\n  IndexRule.Type\n  TagType\n    banyandb/database/v1/rpc.proto\n  GroupRegistryServiceCreateRequest\n  GroupRegistryServiceCreateResponse\n  GroupRegistryServiceDeleteRequest\n  GroupRegistryServiceDeleteResponse\n  GroupRegistryServiceExistRequest\n  GroupRegistryServiceExistResponse\n  GroupRegistryServiceGetRequest\n  GroupRegistryServiceGetResponse\n  GroupRegistryServiceListRequest\n  GroupRegistryServiceListResponse\n  GroupRegistryServiceUpdateRequest\n  GroupRegistryServiceUpdateResponse\n  IndexRuleBindingRegistryServiceCreateRequest\n  IndexRuleBindingRegistryServiceCreateResponse\n  IndexRuleBindingRegistryServiceDeleteRequest\n  IndexRuleBindingRegistryServiceDeleteResponse\n  IndexRuleBindingRegistryServiceExistRequest\n  IndexRuleBindingRegistryServiceExistResponse\n  IndexRuleBindingRegistryServiceGetRequest\n  IndexRuleBindingRegistryServiceGetResponse\n  IndexRuleBindingRegistryServiceListRequest\n  IndexRuleBindingRegistryServiceListResponse\n  IndexRuleBindingRegistryServiceUpdateRequest\n  IndexRuleBindingRegistryServiceUpdateResponse\n  IndexRuleRegistryServiceCreateRequest\n  IndexRuleRegistryServiceCreateResponse\n  IndexRuleRegistryServiceDeleteRequest\n  IndexRuleRegistryServiceDeleteResponse\n  IndexRuleRegistryServiceExistRequest\n  IndexRuleRegistryServiceExistResponse\n  IndexRuleRegistryServiceGetRequest\n  IndexRuleRegistryServiceGetResponse\n  IndexRuleRegistryServiceListRequest\n  IndexRuleRegistryServiceListResponse\n  IndexRuleRegistryServiceUpdateRequest\n  IndexRuleRegistryServiceUpdateResponse\n  MeasureRegistryServiceCreateRequest\n  MeasureRegistryServiceCreateResponse\n  MeasureRegistryServiceDeleteRequest\n  MeasureRegistryServiceDeleteResponse\n  MeasureRegistryServiceExistRequest\n  MeasureRegistryServiceExistResponse\n  MeasureRegistryServiceGetRequest\n  MeasureRegistryServiceGetResponse\n  MeasureRegistryServiceListRequest\n  MeasureRegistryServiceListResponse\n  MeasureRegistryServiceUpdateRequest\n  MeasureRegistryServiceUpdateResponse\n  StreamRegistryServiceCreateRequest\n  StreamRegistryServiceCreateResponse\n  StreamRegistryServiceDeleteRequest\n  StreamRegistryServiceDeleteResponse\n  StreamRegistryServiceExistRequest\n  StreamRegistryServiceExistResponse\n  StreamRegistryServiceGetRequest\n  StreamRegistryServiceGetResponse\n  StreamRegistryServiceListRequest\n  StreamRegistryServiceListResponse\n  StreamRegistryServiceUpdateRequest\n  StreamRegistryServiceUpdateResponse\n  TopNAggregationRegistryServiceCreateRequest\n  TopNAggregationRegistryServiceCreateResponse\n  TopNAggregationRegistryServiceDeleteRequest\n  TopNAggregationRegistryServiceDeleteResponse\n  TopNAggregationRegistryServiceExistRequest\n  TopNAggregationRegistryServiceExistResponse\n  TopNAggregationRegistryServiceGetRequest\n  TopNAggregationRegistryServiceGetResponse\n  TopNAggregationRegistryServiceListRequest\n  TopNAggregationRegistryServiceListResponse\n  TopNAggregationRegistryServiceUpdateRequest\n  TopNAggregationRegistryServiceUpdateResponse\n  GroupRegistryService\n  IndexRuleBindingRegistryService\n  IndexRuleRegistryService\n  MeasureRegistryService\n  StreamRegistryService\n  TopNAggregationRegistryService\n    banyandb/measure/v1/query.proto\n DataPoint DataPoint.Field QueryRequest QueryRequest.Aggregation QueryRequest.FieldProjection QueryRequest.GroupBy QueryRequest.Top QueryResponse    banyandb/measure/v1/topn.proto\n TopNList TopNList.Item TopNRequest TopNResponse    banyandb/model/v1/write.proto\n Status    banyandb/measure/v1/write.proto\n DataPointValue InternalWriteRequest WriteRequest WriteResponse    banyandb/measure/v1/rpc.proto\n MeasureService    banyandb/property/v1/property.proto\n Metadata Property    banyandb/property/v1/rpc.proto\n  ApplyRequest\n  ApplyResponse\n  DeleteRequest\n  DeleteResponse\n  GetRequest\n  GetResponse\n  KeepAliveRequest\n  KeepAliveResponse\n  ListRequest\n  ListResponse\n  ApplyRequest.Strategy\n  PropertyService\n    banyandb/stream/v1/query.proto\n Element QueryRequest QueryResponse    banyandb/stream/v1/write.proto\n ElementValue InternalWriteRequest WriteRequest WriteResponse    banyandb/stream/v1/rpc.proto\n StreamService    Scalar Value Types\n  \nTop\nbanyandb/cluster/v1/rpc.proto \nSendRequest    Field Type Label Description     topic string     message_id uint64     body google.protobuf.Any      \nSendResponse    Field Type Label Description     message_id uint64     error string     body google.protobuf.Any      \nService    Method Name Request Type Response Type Description     Send SendRequest stream SendResponse stream     \nTop\nbanyandb/common/v1/common.proto \nGroup Group is an internal object for Group management\n   Field Type Label Description     metadata Metadata  metadata define the group's identity   catalog Catalog  catalog denotes which type of data the group contains   resource_opts ResourceOpts  resourceOpts indicates the structure of the underlying kv storage   updated_at google.protobuf.Timestamp  updated_at indicates when resources of the group are updated    \nIntervalRule IntervalRule is a structured duration\n   Field Type Label Description     unit IntervalRule.Unit  unit can only be UNIT_HOUR or UNIT_DAY   num uint32      \nMetadata Metadata is for multi-tenant, multi-model use\n   Field Type Label Description     group string  group contains a set of options, like retention policy, max   name string  name of the entity   id uint32     create_revision int64  readonly. create_revision is the revision of last creation on this key.   mod_revision int64  readonly. mod_revision is the revision of last modification on this key.    \nResourceOpts    Field Type Label Description     shard_num uint32  shard_num is the number of shards   block_interval IntervalRule  block_interval indicates the length of a block block_interval should be less than or equal to segment_interval   segment_interval IntervalRule  segment_interval indicates the length of a segment   ttl IntervalRule  ttl indicates time to live, how long the data will be cached    \nCatalog    Name Number Description     CATALOG_UNSPECIFIED 0    CATALOG_STREAM 1    CATALOG_MEASURE 2     \nIntervalRule.Unit    Name Number Description     UNIT_UNSPECIFIED 0    UNIT_HOUR 1    UNIT_DAY 2     \nTop\nbanyandb/database/v1/database.proto \nNode    Field Type Label Description     metadata banyandb.common.v1.Metadata     roles Role repeated    grpc_address string     http_address string     created_at google.protobuf.Timestamp      \nShard    Field Type Label Description     id uint64     metadata banyandb.common.v1.Metadata     catalog banyandb.common.v1.Catalog     node string     total uint32     updated_at google.protobuf.Timestamp     created_at google.protobuf.Timestamp      \nRole    Name Number Description     ROLE_UNSPECIFIED 0    ROLE_META 1    ROLE_DATA 2    ROLE_LIAISON 3     \nTop\nbanyandb/model/v1/common.proto \nFieldValue    Field Type Label Description     null google.protobuf.NullValue     str Str     int Int     binary_data bytes     float Float      \nFloat    Field Type Label Description     value double      \nInt    Field Type Label Description     value int64      \nIntArray    Field Type Label Description     value int64 repeated     \nStr    Field Type Label Description     value string      \nStrArray    Field Type Label Description     value string repeated     \nTagFamilyForWrite    Field Type Label Description     tags TagValue repeated     \nTagValue    Field Type Label Description     null google.protobuf.NullValue     str Str     str_array StrArray     int Int     int_array IntArray     binary_data bytes      \nAggregationFunction    Name Number Description     AGGREGATION_FUNCTION_UNSPECIFIED 0    AGGREGATION_FUNCTION_MEAN 1    AGGREGATION_FUNCTION_MAX 2    AGGREGATION_FUNCTION_MIN 3    AGGREGATION_FUNCTION_COUNT 4    AGGREGATION_FUNCTION_SUM 5     \nTop\nbanyandb/model/v1/query.proto \nCondition Condition consists of the query condition with a single binary operator to be imposed For 1:1 BinaryOp, values in condition must be an array with length = 1, while for 1:N BinaryOp, values can be an array with length \u0026gt;= 1.\n   Field Type Label Description     name string     op Condition.BinaryOp     value TagValue      \nCriteria tag_families are indexed.\n   Field Type Label Description     le LogicalExpression     condition Condition      \nLogicalExpression LogicalExpression supports logical operation\n   Field Type Label Description     op LogicalExpression.LogicalOp  op is a logical operation   left Criteria     right Criteria      \nQueryOrder QueryOrder means a Sort operation to be done for a given index rule. The index_rule_name refers to the name of a index rule bound to the subject.\n   Field Type Label Description     index_rule_name string     sort Sort      \nTag Pair is the building block of a record which is equivalent to a key-value pair. In the context of Trace, it could be metadata of a trace such as service_name, service_instance, etc. Besides, other tags are organized in key-value pair in the underlying storage layer. One should notice that the values can be a multi-value.\n   Field Type Label Description     key string     value TagValue      \nTagFamily    Field Type Label Description     name string     tags Tag repeated     \nTagProjection TagProjection is used to select the names of keys to be returned.\n   Field Type Label Description     tag_families TagProjection.TagFamily repeated     \nTagProjection.TagFamily    Field Type Label Description     name string     tags string repeated     \nTimeRange TimeRange is a range query for uint64, the range here follows left-inclusive and right-exclusive rule, i.e. [begin, end) if both edges exist\n   Field Type Label Description     begin google.protobuf.Timestamp     end google.protobuf.Timestamp      \nCondition.BinaryOp BinaryOp specifies the operation imposed to the given query condition For EQ, NE, LT, GT, LE and GE, only one operand should be given, i.e. one-to-one relationship. HAVING and NOT_HAVING allow multi-value to be the operand such as array/vector, i.e. one-to-many relationship. For example, \u0026quot;keyA\u0026quot; contains \u0026quot;valueA\u0026quot; and \u0026quot;valueB\u0026quot; MATCH performances a full-text search if the tag is analyzed. The string value applies to the same analyzer as the tag, but string array value does not. Each item in a string array is seen as a token instead of a query expression.\n   Name Number Description     BINARY_OP_UNSPECIFIED 0    BINARY_OP_EQ 1    BINARY_OP_NE 2    BINARY_OP_LT 3    BINARY_OP_GT 4    BINARY_OP_LE 5    BINARY_OP_GE 6    BINARY_OP_HAVING 7    BINARY_OP_NOT_HAVING 8    BINARY_OP_IN 9    BINARY_OP_NOT_IN 10    BINARY_OP_MATCH 11     \nLogicalExpression.LogicalOp    Name Number Description     LOGICAL_OP_UNSPECIFIED 0    LOGICAL_OP_AND 1    LOGICAL_OP_OR 2     \nSort    Name Number Description     SORT_UNSPECIFIED 0    SORT_DESC 1    SORT_ASC 2     \nTop\nbanyandb/database/v1/schema.proto \nEntity    Field Type Label Description     tag_names string repeated     \nFieldSpec FieldSpec is the specification of field\n   Field Type Label Description     name string  name is the identity of a field   field_type FieldType  field_type denotes the type of field value   encoding_method EncodingMethod  encoding_method indicates how to encode data during writing   compression_method CompressionMethod  compression_method indicates how to compress data during writing    \nIndexRule IndexRule defines how to generate indices based on tags and the index type IndexRule should bind to a subject through an IndexRuleBinding to generate proper indices.\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  metadata define the rule's identity   tags string repeated tags are the combination that refers to an indexed object If the elements in tags are more than 1, the object will generate a multi-tag index Caveat: All tags in a multi-tag MUST have an identical IndexType   type IndexRule.Type  type is the IndexType of this IndexObject.   location IndexRule.Location  location indicates where to store index.   updated_at google.protobuf.Timestamp  updated_at indicates when the IndexRule is updated   analyzer IndexRule.Analyzer  analyzer analyzes tag value to support the full-text searching for TYPE_INVERTED indices.    \nIndexRuleBinding IndexRuleBinding is a bridge to connect severalIndexRules to a subject This binding is valid between begin_at_nanoseconds and expire_at_nanoseconds, that provides flexible strategies to control how to generate time series indices.\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  metadata is the identity of this binding   rules string repeated rules refers to the IndexRule   subject Subject  subject indicates the subject of binding action   begin_at google.protobuf.Timestamp  begin_at_nanoseconds is the timestamp, after which the binding will be active   expire_at google.protobuf.Timestamp  expire_at_nanoseconds it the timestamp, after which the binding will be inactive expire_at_nanoseconds must be larger than begin_at_nanoseconds   updated_at google.protobuf.Timestamp  updated_at indicates when the IndexRuleBinding is updated    \nMeasure Measure intends to store data point\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  metadata is the identity of a measure   tag_families TagFamilySpec repeated tag_families are for filter measures   fields FieldSpec repeated fields denote measure values   entity Entity  entity indicates which tags will be to generate a series and shard a measure   interval string  interval indicates how frequently to send a data point valid time units are \u0026quot;ns\u0026quot;, \u0026quot;us\u0026quot; (or \u0026quot;µs\u0026quot;), \u0026quot;ms\u0026quot;, \u0026quot;s\u0026quot;, \u0026quot;m\u0026quot;, \u0026quot;h\u0026quot;, \u0026quot;d\u0026quot;.   updated_at google.protobuf.Timestamp  updated_at indicates when the measure is updated    \nStream Stream intends to store streaming data, for example, traces or logs\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  metadata is the identity of a trace series   tag_families TagFamilySpec repeated tag_families   entity Entity  entity indicates how to generate a series and shard a stream   updated_at google.protobuf.Timestamp  updated_at indicates when the stream is updated    \nSubject Subject defines which stream or measure would generate indices\n   Field Type Label Description     catalog banyandb.common.v1.Catalog  catalog is where the subject belongs to todo validate plugin exist bug https://github.com/bufbuild/protoc-gen-validate/issues/672   name string  name refers to a stream or measure in a particular catalog    \nTagFamilySpec    Field Type Label Description     name string     tags TagSpec repeated tags defines accepted tags    \nTagSpec    Field Type Label Description     name string     type TagType     indexed_only bool  indexed_only indicates whether the tag is stored True: It's indexed only, but not stored False: it's stored and indexed    \nTopNAggregation TopNAggregation generates offline TopN statistics for a measure's TopN approximation\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  metadata is the identity of an aggregation   source_measure banyandb.common.v1.Metadata  source_measure denotes the data source of this aggregation   field_name string  field_name is the name of field used for ranking   field_value_sort banyandb.model.v1.Sort  field_value_sort indicates how to sort fields ASC: bottomN DESC: topN UNSPECIFIED: topN + bottomN todo validate plugin exist bug https://github.com/bufbuild/protoc-gen-validate/issues/672   group_by_tag_names string repeated group_by_tag_names groups data points into statistical counters   criteria banyandb.model.v1.Criteria  criteria select partial data points from measure   counters_number int32  counters_number sets the number of counters to be tracked. The default value is 1000   lru_size int32  lru_size defines how much entry is allowed to be maintained in the memory   updated_at google.protobuf.Timestamp  updated_at indicates when the measure is updated    \nCompressionMethod    Name Number Description     COMPRESSION_METHOD_UNSPECIFIED 0    COMPRESSION_METHOD_ZSTD 1     \nEncodingMethod    Name Number Description     ENCODING_METHOD_UNSPECIFIED 0    ENCODING_METHOD_GORILLA 1     \nFieldType    Name Number Description     FIELD_TYPE_UNSPECIFIED 0    FIELD_TYPE_STRING 1    FIELD_TYPE_INT 2    FIELD_TYPE_DATA_BINARY 3    FIELD_TYPE_FLOAT 4     \nIndexRule.Analyzer    Name Number Description     ANALYZER_UNSPECIFIED 0    ANALYZER_KEYWORD 1 Keyword analyzer is a “noop” analyzer which returns the entire input string as a single token.   ANALYZER_STANDARD 2 Standard analyzer provides grammar based tokenization   ANALYZER_SIMPLE 3 Simple analyzer breaks text into tokens at any non-letter character, such as numbers, spaces, hyphens and apostrophes, discards non-letter characters, and changes uppercase to lowercase.    \nIndexRule.Location    Name Number Description     LOCATION_UNSPECIFIED 0    LOCATION_SERIES 1    LOCATION_GLOBAL 2     \nIndexRule.Type Type determine the index structure under the hood\n   Name Number Description     TYPE_UNSPECIFIED 0    TYPE_TREE 1    TYPE_INVERTED 2     \nTagType    Name Number Description     TAG_TYPE_UNSPECIFIED 0    TAG_TYPE_STRING 1    TAG_TYPE_INT 2    TAG_TYPE_STRING_ARRAY 3    TAG_TYPE_INT_ARRAY 4    TAG_TYPE_DATA_BINARY 5     \nTop\nbanyandb/database/v1/rpc.proto \nGroupRegistryServiceCreateRequest    Field Type Label Description     group banyandb.common.v1.Group      \nGroupRegistryServiceCreateResponse \nGroupRegistryServiceDeleteRequest    Field Type Label Description     group string      \nGroupRegistryServiceDeleteResponse    Field Type Label Description     deleted bool      \nGroupRegistryServiceExistRequest    Field Type Label Description     group string      \nGroupRegistryServiceExistResponse    Field Type Label Description     has_group bool      \nGroupRegistryServiceGetRequest    Field Type Label Description     group string      \nGroupRegistryServiceGetResponse    Field Type Label Description     group banyandb.common.v1.Group      \nGroupRegistryServiceListRequest \nGroupRegistryServiceListResponse    Field Type Label Description     group banyandb.common.v1.Group repeated     \nGroupRegistryServiceUpdateRequest    Field Type Label Description     group banyandb.common.v1.Group      \nGroupRegistryServiceUpdateResponse \nIndexRuleBindingRegistryServiceCreateRequest    Field Type Label Description     index_rule_binding IndexRuleBinding      \nIndexRuleBindingRegistryServiceCreateResponse \nIndexRuleBindingRegistryServiceDeleteRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nIndexRuleBindingRegistryServiceDeleteResponse    Field Type Label Description     deleted bool      \nIndexRuleBindingRegistryServiceExistRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nIndexRuleBindingRegistryServiceExistResponse    Field Type Label Description     has_group bool     has_index_rule_binding bool      \nIndexRuleBindingRegistryServiceGetRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nIndexRuleBindingRegistryServiceGetResponse    Field Type Label Description     index_rule_binding IndexRuleBinding      \nIndexRuleBindingRegistryServiceListRequest    Field Type Label Description     group string      \nIndexRuleBindingRegistryServiceListResponse    Field Type Label Description     index_rule_binding IndexRuleBinding repeated     \nIndexRuleBindingRegistryServiceUpdateRequest    Field Type Label Description     index_rule_binding IndexRuleBinding      \nIndexRuleBindingRegistryServiceUpdateResponse \nIndexRuleRegistryServiceCreateRequest    Field Type Label Description     index_rule IndexRule      \nIndexRuleRegistryServiceCreateResponse \nIndexRuleRegistryServiceDeleteRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nIndexRuleRegistryServiceDeleteResponse    Field Type Label Description     deleted bool      \nIndexRuleRegistryServiceExistRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nIndexRuleRegistryServiceExistResponse    Field Type Label Description     has_group bool     has_index_rule bool      \nIndexRuleRegistryServiceGetRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nIndexRuleRegistryServiceGetResponse    Field Type Label Description     index_rule IndexRule      \nIndexRuleRegistryServiceListRequest    Field Type Label Description     group string      \nIndexRuleRegistryServiceListResponse    Field Type Label Description     index_rule IndexRule repeated     \nIndexRuleRegistryServiceUpdateRequest    Field Type Label Description     index_rule IndexRule      \nIndexRuleRegistryServiceUpdateResponse \nMeasureRegistryServiceCreateRequest    Field Type Label Description     measure Measure      \nMeasureRegistryServiceCreateResponse    Field Type Label Description     mod_revision int64      \nMeasureRegistryServiceDeleteRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nMeasureRegistryServiceDeleteResponse    Field Type Label Description     deleted bool      \nMeasureRegistryServiceExistRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nMeasureRegistryServiceExistResponse    Field Type Label Description     has_group bool     has_measure bool      \nMeasureRegistryServiceGetRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nMeasureRegistryServiceGetResponse    Field Type Label Description     measure Measure      \nMeasureRegistryServiceListRequest    Field Type Label Description     group string      \nMeasureRegistryServiceListResponse    Field Type Label Description     measure Measure repeated     \nMeasureRegistryServiceUpdateRequest    Field Type Label Description     measure Measure      \nMeasureRegistryServiceUpdateResponse    Field Type Label Description     mod_revision int64      \nStreamRegistryServiceCreateRequest    Field Type Label Description     stream Stream      \nStreamRegistryServiceCreateResponse    Field Type Label Description     mod_revision int64      \nStreamRegistryServiceDeleteRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nStreamRegistryServiceDeleteResponse    Field Type Label Description     deleted bool      \nStreamRegistryServiceExistRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nStreamRegistryServiceExistResponse    Field Type Label Description     has_group bool     has_stream bool      \nStreamRegistryServiceGetRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nStreamRegistryServiceGetResponse    Field Type Label Description     stream Stream      \nStreamRegistryServiceListRequest    Field Type Label Description     group string      \nStreamRegistryServiceListResponse    Field Type Label Description     stream Stream repeated     \nStreamRegistryServiceUpdateRequest    Field Type Label Description     stream Stream      \nStreamRegistryServiceUpdateResponse    Field Type Label Description     mod_revision int64      \nTopNAggregationRegistryServiceCreateRequest    Field Type Label Description     top_n_aggregation TopNAggregation      \nTopNAggregationRegistryServiceCreateResponse \nTopNAggregationRegistryServiceDeleteRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nTopNAggregationRegistryServiceDeleteResponse    Field Type Label Description     deleted bool      \nTopNAggregationRegistryServiceExistRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nTopNAggregationRegistryServiceExistResponse    Field Type Label Description     has_group bool     has_top_n_aggregation bool      \nTopNAggregationRegistryServiceGetRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nTopNAggregationRegistryServiceGetResponse    Field Type Label Description     top_n_aggregation TopNAggregation      \nTopNAggregationRegistryServiceListRequest    Field Type Label Description     group string      \nTopNAggregationRegistryServiceListResponse    Field Type Label Description     top_n_aggregation TopNAggregation repeated     \nTopNAggregationRegistryServiceUpdateRequest    Field Type Label Description     top_n_aggregation TopNAggregation      \nTopNAggregationRegistryServiceUpdateResponse \nGroupRegistryService    Method Name Request Type Response Type Description     Create GroupRegistryServiceCreateRequest GroupRegistryServiceCreateResponse    Update GroupRegistryServiceUpdateRequest GroupRegistryServiceUpdateResponse    Delete GroupRegistryServiceDeleteRequest GroupRegistryServiceDeleteResponse    Get GroupRegistryServiceGetRequest GroupRegistryServiceGetResponse    List GroupRegistryServiceListRequest GroupRegistryServiceListResponse    Exist GroupRegistryServiceExistRequest GroupRegistryServiceExistResponse Exist doesn't expose an HTTP endpoint. Please use HEAD method to touch Get instead    \nIndexRuleBindingRegistryService    Method Name Request Type Response Type Description     Create IndexRuleBindingRegistryServiceCreateRequest IndexRuleBindingRegistryServiceCreateResponse    Update IndexRuleBindingRegistryServiceUpdateRequest IndexRuleBindingRegistryServiceUpdateResponse    Delete IndexRuleBindingRegistryServiceDeleteRequest IndexRuleBindingRegistryServiceDeleteResponse    Get IndexRuleBindingRegistryServiceGetRequest IndexRuleBindingRegistryServiceGetResponse    List IndexRuleBindingRegistryServiceListRequest IndexRuleBindingRegistryServiceListResponse    Exist IndexRuleBindingRegistryServiceExistRequest IndexRuleBindingRegistryServiceExistResponse Exist doesn't expose an HTTP endpoint. Please use HEAD method to touch Get instead    \nIndexRuleRegistryService    Method Name Request Type Response Type Description     Create IndexRuleRegistryServiceCreateRequest IndexRuleRegistryServiceCreateResponse    Update IndexRuleRegistryServiceUpdateRequest IndexRuleRegistryServiceUpdateResponse    Delete IndexRuleRegistryServiceDeleteRequest IndexRuleRegistryServiceDeleteResponse    Get IndexRuleRegistryServiceGetRequest IndexRuleRegistryServiceGetResponse    List IndexRuleRegistryServiceListRequest IndexRuleRegistryServiceListResponse    Exist IndexRuleRegistryServiceExistRequest IndexRuleRegistryServiceExistResponse Exist doesn't expose an HTTP endpoint. Please use HEAD method to touch Get instead    \nMeasureRegistryService    Method Name Request Type Response Type Description     Create MeasureRegistryServiceCreateRequest MeasureRegistryServiceCreateResponse    Update MeasureRegistryServiceUpdateRequest MeasureRegistryServiceUpdateResponse    Delete MeasureRegistryServiceDeleteRequest MeasureRegistryServiceDeleteResponse    Get MeasureRegistryServiceGetRequest MeasureRegistryServiceGetResponse    List MeasureRegistryServiceListRequest MeasureRegistryServiceListResponse    Exist MeasureRegistryServiceExistRequest MeasureRegistryServiceExistResponse Exist doesn't expose an HTTP endpoint. Please use HEAD method to touch Get instead    \nStreamRegistryService    Method Name Request Type Response Type Description     Create StreamRegistryServiceCreateRequest StreamRegistryServiceCreateResponse    Update StreamRegistryServiceUpdateRequest StreamRegistryServiceUpdateResponse    Delete StreamRegistryServiceDeleteRequest StreamRegistryServiceDeleteResponse    Get StreamRegistryServiceGetRequest StreamRegistryServiceGetResponse    List StreamRegistryServiceListRequest StreamRegistryServiceListResponse    Exist StreamRegistryServiceExistRequest StreamRegistryServiceExistResponse Exist doesn't expose an HTTP endpoint. Please use HEAD method to touch Get instead    \nTopNAggregationRegistryService    Method Name Request Type Response Type Description     Create TopNAggregationRegistryServiceCreateRequest TopNAggregationRegistryServiceCreateResponse    Update TopNAggregationRegistryServiceUpdateRequest TopNAggregationRegistryServiceUpdateResponse    Delete TopNAggregationRegistryServiceDeleteRequest TopNAggregationRegistryServiceDeleteResponse    Get TopNAggregationRegistryServiceGetRequest TopNAggregationRegistryServiceGetResponse    List TopNAggregationRegistryServiceListRequest TopNAggregationRegistryServiceListResponse    Exist TopNAggregationRegistryServiceExistRequest TopNAggregationRegistryServiceExistResponse     \nTop\nbanyandb/measure/v1/query.proto \nDataPoint DataPoint is stored in Measures\n   Field Type Label Description     timestamp google.protobuf.Timestamp  timestamp is in the timeunit of milliseconds.   tag_families banyandb.model.v1.TagFamily repeated tag_families contains tags selected in the projection   fields DataPoint.Field repeated fields contains fields selected in the projection    \nDataPoint.Field    Field Type Label Description     name string     value banyandb.model.v1.FieldValue      \nQueryRequest QueryRequest is the request contract for query.\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  metadata is required   time_range banyandb.model.v1.TimeRange  time_range is a range query with begin/end time of entities in the timeunit of milliseconds.   criteria banyandb.model.v1.Criteria  tag_families are indexed.   tag_projection banyandb.model.v1.TagProjection  tag_projection can be used to select tags of the data points in the response   field_projection QueryRequest.FieldProjection  field_projection can be used to select fields of the data points in the response   group_by QueryRequest.GroupBy  group_by groups data points based on their field value for a specific tag and use field_name as the projection name   agg QueryRequest.Aggregation  agg aggregates data points based on a field   top QueryRequest.Top  top limits the result based on a particular field. If order_by is specified, top sorts the dataset based on order_by's output   offset uint32  offset is used to support pagination, together with the following limit. If top is specified, offset processes the dataset based on top's output   limit uint32  limit is used to impose a boundary on the number of records being returned. If top is specified, limit processes the dataset based on top's output   order_by banyandb.model.v1.QueryOrder  order_by is given to specify the sort for a tag.    \nQueryRequest.Aggregation    Field Type Label Description     function banyandb.model.v1.AggregationFunction     field_name string  field_name must be one of files indicated by the field_projection    \nQueryRequest.FieldProjection    Field Type Label Description     names string repeated     \nQueryRequest.GroupBy    Field Type Label Description     tag_projection banyandb.model.v1.TagProjection  tag_projection must be a subset of the tag_projection of QueryRequest   field_name string  field_name must be one of fields indicated by field_projection    \nQueryRequest.Top    Field Type Label Description     number int32  number set the how many items should be returned   field_name string  field_name must be one of files indicated by the field_projection   field_value_sort banyandb.model.v1.Sort  field_value_sort indicates how to sort fields ASC: bottomN DESC: topN UNSPECIFIED: topN    \nQueryResponse QueryResponse is the response for a query to the Query module.\n   Field Type Label Description     data_points DataPoint repeated data_points are the actual data returned    \nTop\nbanyandb/measure/v1/topn.proto \nTopNList TopNList contains a series of topN items\n   Field Type Label Description     timestamp google.protobuf.Timestamp  timestamp is in the timeunit of milliseconds.   items TopNList.Item repeated items contains top-n items in a list    \nTopNList.Item    Field Type Label Description     entity banyandb.model.v1.Tag repeated    value banyandb.model.v1.FieldValue      \nTopNRequest TopNRequest is the request contract for query.\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  metadata is required   time_range banyandb.model.v1.TimeRange  time_range is a range query with begin/end time of entities in the timeunit of milliseconds.   top_n int32  top_n set the how many items should be returned in each list.   agg banyandb.model.v1.AggregationFunction  agg aggregates lists grouped by field names in the time_range TODO validate enum defined_only   conditions banyandb.model.v1.Condition repeated criteria select counters. Only equals are acceptable.   field_value_sort banyandb.model.v1.Sort  field_value_sort indicates how to sort fields    \nTopNResponse TopNResponse is the response for a query to the Query module.\n   Field Type Label Description     lists TopNList repeated lists contain a series topN lists ranked by timestamp if agg_func in query request is specified, lists' size should be one.    \nTop\nbanyandb/model/v1/write.proto \nStatus Status is the response status for write\n   Name Number Description     STATUS_UNSPECIFIED 0    STATUS_SUCCEED 1    STATUS_INVALID_TIMESTAMP 2    STATUS_NOT_FOUND 3    STATUS_EXPIRED_SCHEMA 4    STATUS_INTERNAL_ERROR 5     \nTop\nbanyandb/measure/v1/write.proto \nDataPointValue DataPointValue is the data point for writing. It only contains values.\n   Field Type Label Description     timestamp google.protobuf.Timestamp  timestamp is in the timeunit of milliseconds.   tag_families banyandb.model.v1.TagFamilyForWrite repeated the order of tag_families' items match the measure schema   fields banyandb.model.v1.FieldValue repeated the order of fields match the measure schema    \nInternalWriteRequest    Field Type Label Description     shard_id uint32     series_hash bytes     entity_values banyandb.model.v1.TagValue repeated    request WriteRequest      \nWriteRequest WriteRequest is the request contract for write\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  the metadata is required.   data_point DataPointValue  the data_point is required.   message_id uint64  the message_id is required.    \nWriteResponse WriteResponse is the response contract for write\n   Field Type Label Description     message_id uint64  the message_id from request.   status banyandb.model.v1.Status  status indicates the request processing result   metadata banyandb.common.v1.Metadata  the metadata from request when request fails    \nTop\nbanyandb/measure/v1/rpc.proto \nMeasureService    Method Name Request Type Response Type Description     Query QueryRequest QueryResponse    Write WriteRequest stream WriteResponse stream    TopN TopNRequest TopNResponse     \nTop\nbanyandb/property/v1/property.proto \nMetadata Metadata is for multi-tenant use\n   Field Type Label Description     container banyandb.common.v1.Metadata  container is created when it receives the first property   id string  id identifies a property    \nProperty Property stores the user defined data\n   Field Type Label Description     metadata Metadata  metadata is the identity of a property   tags banyandb.model.v1.Tag repeated tag stores the content of a property   updated_at google.protobuf.Timestamp  updated_at indicates when the property is updated   lease_id int64  readonly. lease_id is the ID of the lease that attached to key.   ttl string  ttl indicates the time to live of the property. It's a string in the format of \u0026quot;1h\u0026quot;, \u0026quot;2m\u0026quot;, \u0026quot;3s\u0026quot;, \u0026quot;1500ms\u0026quot;. It defaults to 0s, which means the property never expires. The minimum allowed ttl is 1s.    \nTop\nbanyandb/property/v1/rpc.proto \nApplyRequest    Field Type Label Description     property Property     strategy ApplyRequest.Strategy  strategy indicates how to update a property. It defaults to STRATEGY_MERGE    \nApplyResponse    Field Type Label Description     created bool  created indicates whether the property existed. True: the property is absent. False: the property existed.   tags_num uint32     lease_id int64      \nDeleteRequest    Field Type Label Description     metadata Metadata     tags string repeated     \nDeleteResponse    Field Type Label Description     deleted bool     tags_num uint32      \nGetRequest    Field Type Label Description     metadata Metadata     tags string repeated     \nGetResponse    Field Type Label Description     property Property      \nKeepAliveRequest    Field Type Label Description     lease_id int64      \nKeepAliveResponse \nListRequest    Field Type Label Description     container banyandb.common.v1.Metadata     ids string repeated    tags string repeated     \nListResponse    Field Type Label Description     property Property repeated     \nApplyRequest.Strategy    Name Number Description     STRATEGY_UNSPECIFIED 0    STRATEGY_MERGE 1    STRATEGY_REPLACE 2     \nPropertyService    Method Name Request Type Response Type Description     Apply ApplyRequest ApplyResponse Apply creates a property if it's absent, or update a existed one based on a strategy.   Delete DeleteRequest DeleteResponse    Get GetRequest GetResponse    List ListRequest ListResponse    KeepAlive KeepAliveRequest KeepAliveResponse     \nTop\nbanyandb/stream/v1/query.proto \nElement Element represents (stream context) a Span defined in Google Dapper paper or equivalently a Segment in Skywalking. (Log context) a log\n   Field Type Label Description     element_id string  element_id could be span_id of a Span or segment_id of a Segment in the context of stream   timestamp google.protobuf.Timestamp  timestamp represents a millisecond 1) either the start time of a Span/Segment, 2) or the timestamp of a log   tag_families banyandb.model.v1.TagFamily repeated fields contains all indexed Field. Some typical names, - stream_id - duration - service_name - service_instance_id - end_time_milliseconds    \nQueryRequest QueryRequest is the request contract for query.\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  metadata is required   time_range banyandb.model.v1.TimeRange  time_range is a range query with begin/end time of entities in the timeunit of milliseconds. In the context of stream, it represents the range of the startTime for spans/segments, while in the context of Log, it means the range of the timestamp(s) for logs. it is always recommended to specify time range for performance reason   offset uint32  offset is used to support pagination, together with the following limit   limit uint32  limit is used to impose a boundary on the number of records being returned   order_by banyandb.model.v1.QueryOrder  order_by is given to specify the sort for a field. So far, only fields in the type of Integer are supported   criteria banyandb.model.v1.Criteria  tag_families are indexed.   projection banyandb.model.v1.TagProjection  projection can be used to select the key names of the element in the response    \nQueryResponse QueryResponse is the response for a query to the Query module.\n   Field Type Label Description     elements Element repeated elements are the actual data returned    \nTop\nbanyandb/stream/v1/write.proto \nElementValue    Field Type Label Description     element_id string  element_id could be span_id of a Span or segment_id of a Segment in the context of stream   timestamp google.protobuf.Timestamp  timestamp is in the timeunit of milliseconds. It represents 1) either the start time of a Span/Segment, 2) or the timestamp of a log   tag_families banyandb.model.v1.TagFamilyForWrite repeated the order of tag_families' items match the stream schema    \nInternalWriteRequest    Field Type Label Description     shard_id uint32     series_hash bytes     entity_values banyandb.model.v1.TagValue repeated    request WriteRequest      \nWriteRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata  the metadata is required.   element ElementValue  the element is required.   message_id uint64  the message_id is required.    \nWriteResponse    Field Type Label Description     message_id uint64  the message_id from request.   status banyandb.model.v1.Status  status indicates the request processing result   metadata banyandb.common.v1.Metadata  the metadata from request when request fails    \nTop\nbanyandb/stream/v1/rpc.proto \nStreamService    Method Name Request Type Response Type Description     Query QueryRequest QueryResponse    Write WriteRequest stream WriteResponse stream     Scalar Value Types    .proto Type Notes C++ Java Python Go C# PHP Ruby     double  double double float float64 double float Float   float  float float float float32 float float Float   int32 Uses variable-length encoding. Inefficient for encoding negative numbers – if your field is likely to have negative values, use sint32 instead. int32 int int int32 int integer Bignum or Fixnum (as required)   int64 Uses variable-length encoding. Inefficient for encoding negative numbers – if your field is likely to have negative values, use sint64 instead. int64 long int/long int64 long integer/string Bignum   uint32 Uses variable-length encoding. uint32 int int/long uint32 uint integer Bignum or Fixnum (as required)   uint64 Uses variable-length encoding. uint64 long int/long uint64 ulong integer/string Bignum or Fixnum (as required)   sint32 Uses variable-length encoding. Signed int value. These more efficiently encode negative numbers than regular int32s. int32 int int int32 int integer Bignum or Fixnum (as required)   sint64 Uses variable-length encoding. Signed int value. These more efficiently encode negative numbers than regular int64s. int64 long int/long int64 long integer/string Bignum   fixed32 Always four bytes. More efficient than uint32 if values are often greater than 2^28. uint32 int int uint32 uint integer Bignum or Fixnum (as required)   fixed64 Always eight bytes. More efficient than uint64 if values are often greater than 2^56. uint64 long int/long uint64 ulong integer/string Bignum   sfixed32 Always four bytes. int32 int int int32 int integer Bignum or Fixnum (as required)   sfixed64 Always eight bytes. int64 long int/long int64 long integer/string Bignum   bool  bool boolean boolean bool bool boolean TrueClass/FalseClass   string A string must always contain UTF-8 encoded or 7-bit ASCII text. string String str/unicode string string string String (UTF-8)   bytes May contain any arbitrary sequence of bytes. string ByteString str []byte ByteString string String (ASCII-8BIT)    ","title":"Protocol Documentation","url":"/docs/skywalking-banyandb/latest/api-reference/"},{"content":"Protocol Documentation \nTable of Contents   banyandb/cluster/v1/rpc.proto\n  SendRequest\n  SendResponse\n  Service\n    banyandb/common/v1/common.proto\n  Group\n  IntervalRule\n  Metadata\n  ResourceOpts\n  Catalog\n  IntervalRule.Unit\n    banyandb/database/v1/database.proto\n  Node\n  Shard\n  Role\n    banyandb/model/v1/common.proto\n  FieldValue\n  Float\n  Int\n  IntArray\n  Str\n  StrArray\n  TagFamilyForWrite\n  TagValue\n  AggregationFunction\n    banyandb/model/v1/query.proto\n  Condition\n  Criteria\n  LogicalExpression\n  QueryOrder\n  Tag\n  TagFamily\n  TagProjection\n  TagProjection.TagFamily\n  TimeRange\n  Condition.BinaryOp\n  LogicalExpression.LogicalOp\n  Sort\n    banyandb/database/v1/schema.proto\n  Entity\n  FieldSpec\n  IndexRule\n  IndexRuleBinding\n  Measure\n  Stream\n  Subject\n  TagFamilySpec\n  TagSpec\n  TopNAggregation\n  CompressionMethod\n  EncodingMethod\n  FieldType\n  IndexRule.Analyzer\n  IndexRule.Type\n  TagType\n    banyandb/database/v1/rpc.proto\n  GroupRegistryServiceCreateRequest\n  GroupRegistryServiceCreateResponse\n  GroupRegistryServiceDeleteRequest\n  GroupRegistryServiceDeleteResponse\n  GroupRegistryServiceExistRequest\n  GroupRegistryServiceExistResponse\n  GroupRegistryServiceGetRequest\n  GroupRegistryServiceGetResponse\n  GroupRegistryServiceListRequest\n  GroupRegistryServiceListResponse\n  GroupRegistryServiceUpdateRequest\n  GroupRegistryServiceUpdateResponse\n  IndexRuleBindingRegistryServiceCreateRequest\n  IndexRuleBindingRegistryServiceCreateResponse\n  IndexRuleBindingRegistryServiceDeleteRequest\n  IndexRuleBindingRegistryServiceDeleteResponse\n  IndexRuleBindingRegistryServiceExistRequest\n  IndexRuleBindingRegistryServiceExistResponse\n  IndexRuleBindingRegistryServiceGetRequest\n  IndexRuleBindingRegistryServiceGetResponse\n  IndexRuleBindingRegistryServiceListRequest\n  IndexRuleBindingRegistryServiceListResponse\n  IndexRuleBindingRegistryServiceUpdateRequest\n  IndexRuleBindingRegistryServiceUpdateResponse\n  IndexRuleRegistryServiceCreateRequest\n  IndexRuleRegistryServiceCreateResponse\n  IndexRuleRegistryServiceDeleteRequest\n  IndexRuleRegistryServiceDeleteResponse\n  IndexRuleRegistryServiceExistRequest\n  IndexRuleRegistryServiceExistResponse\n  IndexRuleRegistryServiceGetRequest\n  IndexRuleRegistryServiceGetResponse\n  IndexRuleRegistryServiceListRequest\n  IndexRuleRegistryServiceListResponse\n  IndexRuleRegistryServiceUpdateRequest\n  IndexRuleRegistryServiceUpdateResponse\n  MeasureRegistryServiceCreateRequest\n  MeasureRegistryServiceCreateResponse\n  MeasureRegistryServiceDeleteRequest\n  MeasureRegistryServiceDeleteResponse\n  MeasureRegistryServiceExistRequest\n  MeasureRegistryServiceExistResponse\n  MeasureRegistryServiceGetRequest\n  MeasureRegistryServiceGetResponse\n  MeasureRegistryServiceListRequest\n  MeasureRegistryServiceListResponse\n  MeasureRegistryServiceUpdateRequest\n  MeasureRegistryServiceUpdateResponse\n  StreamRegistryServiceCreateRequest\n  StreamRegistryServiceCreateResponse\n  StreamRegistryServiceDeleteRequest\n  StreamRegistryServiceDeleteResponse\n  StreamRegistryServiceExistRequest\n  StreamRegistryServiceExistResponse\n  StreamRegistryServiceGetRequest\n  StreamRegistryServiceGetResponse\n  StreamRegistryServiceListRequest\n  StreamRegistryServiceListResponse\n  StreamRegistryServiceUpdateRequest\n  StreamRegistryServiceUpdateResponse\n  TopNAggregationRegistryServiceCreateRequest\n  TopNAggregationRegistryServiceCreateResponse\n  TopNAggregationRegistryServiceDeleteRequest\n  TopNAggregationRegistryServiceDeleteResponse\n  TopNAggregationRegistryServiceExistRequest\n  TopNAggregationRegistryServiceExistResponse\n  TopNAggregationRegistryServiceGetRequest\n  TopNAggregationRegistryServiceGetResponse\n  TopNAggregationRegistryServiceListRequest\n  TopNAggregationRegistryServiceListResponse\n  TopNAggregationRegistryServiceUpdateRequest\n  TopNAggregationRegistryServiceUpdateResponse\n  GroupRegistryService\n  IndexRuleBindingRegistryService\n  IndexRuleRegistryService\n  MeasureRegistryService\n  StreamRegistryService\n  TopNAggregationRegistryService\n    banyandb/measure/v1/query.proto\n DataPoint DataPoint.Field QueryRequest QueryRequest.Aggregation QueryRequest.FieldProjection QueryRequest.GroupBy QueryRequest.Top QueryResponse    banyandb/measure/v1/topn.proto\n TopNList TopNList.Item TopNRequest TopNResponse    banyandb/model/v1/write.proto\n Status    banyandb/measure/v1/write.proto\n DataPointValue InternalWriteRequest WriteRequest WriteResponse    banyandb/measure/v1/rpc.proto\n MeasureService    banyandb/property/v1/property.proto\n Metadata Property    banyandb/property/v1/rpc.proto\n  ApplyRequest\n  ApplyResponse\n  DeleteRequest\n  DeleteResponse\n  GetRequest\n  GetResponse\n  KeepAliveRequest\n  KeepAliveResponse\n  ListRequest\n  ListResponse\n  ApplyRequest.Strategy\n  PropertyService\n    banyandb/stream/v1/query.proto\n Element QueryRequest QueryResponse    banyandb/stream/v1/write.proto\n ElementValue InternalWriteRequest WriteRequest WriteResponse    banyandb/stream/v1/rpc.proto\n StreamService    Scalar Value Types\n  \nTop\nbanyandb/cluster/v1/rpc.proto \nSendRequest    Field Type Label Description     topic string     message_id uint64     body google.protobuf.Any     batch_mod bool      \nSendResponse    Field Type Label Description     message_id uint64     error string     body google.protobuf.Any      \nService    Method Name Request Type Response Type Description     Send SendRequest stream SendResponse stream     \nTop\nbanyandb/common/v1/common.proto \nGroup Group is an internal object for Group management\n   Field Type Label Description     metadata Metadata  metadata define the group's identity   catalog Catalog  catalog denotes which type of data the group contains   resource_opts ResourceOpts  resourceOpts indicates the structure of the underlying kv storage   updated_at google.protobuf.Timestamp  updated_at indicates when resources of the group are updated    \nIntervalRule IntervalRule is a structured duration\n   Field Type Label Description     unit IntervalRule.Unit  unit can only be UNIT_HOUR or UNIT_DAY   num uint32      \nMetadata Metadata is for multi-tenant, multi-model use\n   Field Type Label Description     group string  group contains a set of options, like retention policy, max   name string  name of the entity   id uint32  id is the unique identifier of the entity if id is not set, the system will generate a unique id   create_revision int64  readonly. create_revision is the revision of last creation on this key.   mod_revision int64  readonly. mod_revision is the revision of last modification on this key.    \nResourceOpts    Field Type Label Description     shard_num uint32  shard_num is the number of shards   segment_interval IntervalRule  segment_interval indicates the length of a segment   ttl IntervalRule  ttl indicates time to live, how long the data will be cached    \nCatalog    Name Number Description     CATALOG_UNSPECIFIED 0    CATALOG_STREAM 1    CATALOG_MEASURE 2     \nIntervalRule.Unit    Name Number Description     UNIT_UNSPECIFIED 0    UNIT_HOUR 1    UNIT_DAY 2     \nTop\nbanyandb/database/v1/database.proto \nNode    Field Type Label Description     metadata banyandb.common.v1.Metadata     roles Role repeated    grpc_address string     http_address string     created_at google.protobuf.Timestamp      \nShard    Field Type Label Description     id uint64     metadata banyandb.common.v1.Metadata     catalog banyandb.common.v1.Catalog     node string     total uint32     updated_at google.protobuf.Timestamp     created_at google.protobuf.Timestamp      \nRole    Name Number Description     ROLE_UNSPECIFIED 0    ROLE_META 1    ROLE_DATA 2    ROLE_LIAISON 3     \nTop\nbanyandb/model/v1/common.proto \nFieldValue    Field Type Label Description     null google.protobuf.NullValue     str Str     int Int     binary_data bytes     float Float      \nFloat    Field Type Label Description     value double      \nInt    Field Type Label Description     value int64      \nIntArray    Field Type Label Description     value int64 repeated     \nStr    Field Type Label Description     value string      \nStrArray    Field Type Label Description     value string repeated     \nTagFamilyForWrite    Field Type Label Description     tags TagValue repeated     \nTagValue    Field Type Label Description     null google.protobuf.NullValue     str Str     str_array StrArray     int Int     int_array IntArray     binary_data bytes      \nAggregationFunction    Name Number Description     AGGREGATION_FUNCTION_UNSPECIFIED 0    AGGREGATION_FUNCTION_MEAN 1    AGGREGATION_FUNCTION_MAX 2    AGGREGATION_FUNCTION_MIN 3    AGGREGATION_FUNCTION_COUNT 4    AGGREGATION_FUNCTION_SUM 5     \nTop\nbanyandb/model/v1/query.proto \nCondition Condition consists of the query condition with a single binary operator to be imposed For 1:1 BinaryOp, values in condition must be an array with length = 1, while for 1:N BinaryOp, values can be an array with length \u0026gt;= 1.\n   Field Type Label Description     name string     op Condition.BinaryOp     value TagValue      \nCriteria tag_families are indexed.\n   Field Type Label Description     le LogicalExpression     condition Condition      \nLogicalExpression LogicalExpression supports logical operation\n   Field Type Label Description     op LogicalExpression.LogicalOp  op is a logical operation   left Criteria     right Criteria      \nQueryOrder QueryOrder means a Sort operation to be done for a given index rule. The index_rule_name refers to the name of a index rule bound to the subject.\n   Field Type Label Description     index_rule_name string     sort Sort      \nTag Pair is the building block of a record which is equivalent to a key-value pair. In the context of Trace, it could be metadata of a trace such as service_name, service_instance, etc. Besides, other tags are organized in key-value pair in the underlying storage layer. One should notice that the values can be a multi-value.\n   Field Type Label Description     key string     value TagValue      \nTagFamily    Field Type Label Description     name string     tags Tag repeated     \nTagProjection TagProjection is used to select the names of keys to be returned.\n   Field Type Label Description     tag_families TagProjection.TagFamily repeated     \nTagProjection.TagFamily    Field Type Label Description     name string     tags string repeated     \nTimeRange TimeRange is a range query for uint64, the range here follows left-inclusive and right-exclusive rule, i.e. [begin, end) if both edges exist\n   Field Type Label Description     begin google.protobuf.Timestamp     end google.protobuf.Timestamp      \nCondition.BinaryOp BinaryOp specifies the operation imposed to the given query condition For EQ, NE, LT, GT, LE and GE, only one operand should be given, i.e. one-to-one relationship. HAVING and NOT_HAVING allow multi-value to be the operand such as array/vector, i.e. one-to-many relationship. For example, \u0026quot;keyA\u0026quot; contains \u0026quot;valueA\u0026quot; and \u0026quot;valueB\u0026quot; MATCH performances a full-text search if the tag is analyzed. The string value applies to the same analyzer as the tag, but string array value does not. Each item in a string array is seen as a token instead of a query expression.\n   Name Number Description     BINARY_OP_UNSPECIFIED 0    BINARY_OP_EQ 1    BINARY_OP_NE 2    BINARY_OP_LT 3    BINARY_OP_GT 4    BINARY_OP_LE 5    BINARY_OP_GE 6    BINARY_OP_HAVING 7    BINARY_OP_NOT_HAVING 8    BINARY_OP_IN 9    BINARY_OP_NOT_IN 10    BINARY_OP_MATCH 11     \nLogicalExpression.LogicalOp    Name Number Description     LOGICAL_OP_UNSPECIFIED 0    LOGICAL_OP_AND 1    LOGICAL_OP_OR 2     \nSort    Name Number Description     SORT_UNSPECIFIED 0    SORT_DESC 1    SORT_ASC 2     \nTop\nbanyandb/database/v1/schema.proto \nEntity    Field Type Label Description     tag_names string repeated     \nFieldSpec FieldSpec is the specification of field\n   Field Type Label Description     name string  name is the identity of a field   field_type FieldType  field_type denotes the type of field value   encoding_method EncodingMethod  encoding_method indicates how to encode data during writing   compression_method CompressionMethod  compression_method indicates how to compress data during writing    \nIndexRule IndexRule defines how to generate indices based on tags and the index type IndexRule should bind to a subject through an IndexRuleBinding to generate proper indices.\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  metadata define the rule's identity   tags string repeated tags are the combination that refers to an indexed object If the elements in tags are more than 1, the object will generate a multi-tag index Caveat: All tags in a multi-tag MUST have an identical IndexType   type IndexRule.Type  type is the IndexType of this IndexObject.   updated_at google.protobuf.Timestamp  updated_at indicates when the IndexRule is updated   analyzer IndexRule.Analyzer  analyzer analyzes tag value to support the full-text searching for TYPE_INVERTED indices.    \nIndexRuleBinding IndexRuleBinding is a bridge to connect severalIndexRules to a subject This binding is valid between begin_at_nanoseconds and expire_at_nanoseconds, that provides flexible strategies to control how to generate time series indices.\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  metadata is the identity of this binding   rules string repeated rules refers to the IndexRule   subject Subject  subject indicates the subject of binding action   begin_at google.protobuf.Timestamp  begin_at_nanoseconds is the timestamp, after which the binding will be active   expire_at google.protobuf.Timestamp  expire_at_nanoseconds it the timestamp, after which the binding will be inactive expire_at_nanoseconds must be larger than begin_at_nanoseconds   updated_at google.protobuf.Timestamp  updated_at indicates when the IndexRuleBinding is updated    \nMeasure Measure intends to store data point\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  metadata is the identity of a measure   tag_families TagFamilySpec repeated tag_families are for filter measures   fields FieldSpec repeated fields denote measure values   entity Entity  entity indicates which tags will be to generate a series and shard a measure   interval string  interval indicates how frequently to send a data point valid time units are \u0026quot;ns\u0026quot;, \u0026quot;us\u0026quot; (or \u0026quot;µs\u0026quot;), \u0026quot;ms\u0026quot;, \u0026quot;s\u0026quot;, \u0026quot;m\u0026quot;, \u0026quot;h\u0026quot;, \u0026quot;d\u0026quot;.   updated_at google.protobuf.Timestamp  updated_at indicates when the measure is updated    \nStream Stream intends to store streaming data, for example, traces or logs\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  metadata is the identity of a trace series   tag_families TagFamilySpec repeated tag_families   entity Entity  entity indicates how to generate a series and shard a stream   updated_at google.protobuf.Timestamp  updated_at indicates when the stream is updated    \nSubject Subject defines which stream or measure would generate indices\n   Field Type Label Description     catalog banyandb.common.v1.Catalog  catalog is where the subject belongs to todo validate plugin exist bug https://github.com/bufbuild/protoc-gen-validate/issues/672   name string  name refers to a stream or measure in a particular catalog    \nTagFamilySpec    Field Type Label Description     name string     tags TagSpec repeated tags defines accepted tags    \nTagSpec    Field Type Label Description     name string     type TagType     indexed_only bool  indexed_only indicates whether the tag is stored True: It's indexed only, but not stored False: it's stored and indexed    \nTopNAggregation TopNAggregation generates offline TopN statistics for a measure's TopN approximation\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  metadata is the identity of an aggregation   source_measure banyandb.common.v1.Metadata  source_measure denotes the data source of this aggregation   field_name string  field_name is the name of field used for ranking   field_value_sort banyandb.model.v1.Sort  field_value_sort indicates how to sort fields ASC: bottomN DESC: topN UNSPECIFIED: topN + bottomN todo validate plugin exist bug https://github.com/bufbuild/protoc-gen-validate/issues/672   group_by_tag_names string repeated group_by_tag_names groups data points into statistical counters   criteria banyandb.model.v1.Criteria  criteria select partial data points from measure   counters_number int32  counters_number sets the number of counters to be tracked. The default value is 1000   lru_size int32  lru_size defines how much entry is allowed to be maintained in the memory   updated_at google.protobuf.Timestamp  updated_at indicates when the measure is updated    \nCompressionMethod    Name Number Description     COMPRESSION_METHOD_UNSPECIFIED 0    COMPRESSION_METHOD_ZSTD 1     \nEncodingMethod    Name Number Description     ENCODING_METHOD_UNSPECIFIED 0    ENCODING_METHOD_GORILLA 1     \nFieldType    Name Number Description     FIELD_TYPE_UNSPECIFIED 0    FIELD_TYPE_STRING 1    FIELD_TYPE_INT 2    FIELD_TYPE_DATA_BINARY 3    FIELD_TYPE_FLOAT 4     \nIndexRule.Analyzer    Name Number Description     ANALYZER_UNSPECIFIED 0    ANALYZER_KEYWORD 1 Keyword analyzer is a “noop” analyzer which returns the entire input string as a single token.   ANALYZER_STANDARD 2 Standard analyzer provides grammar based tokenization   ANALYZER_SIMPLE 3 Simple analyzer breaks text into tokens at any non-letter character, such as numbers, spaces, hyphens and apostrophes, discards non-letter characters, and changes uppercase to lowercase.    \nIndexRule.Type Type determine the index structure under the hood\n   Name Number Description     TYPE_UNSPECIFIED 0    TYPE_INVERTED 1     \nTagType    Name Number Description     TAG_TYPE_UNSPECIFIED 0    TAG_TYPE_STRING 1    TAG_TYPE_INT 2    TAG_TYPE_STRING_ARRAY 3    TAG_TYPE_INT_ARRAY 4    TAG_TYPE_DATA_BINARY 5     \nTop\nbanyandb/database/v1/rpc.proto \nGroupRegistryServiceCreateRequest    Field Type Label Description     group banyandb.common.v1.Group      \nGroupRegistryServiceCreateResponse \nGroupRegistryServiceDeleteRequest    Field Type Label Description     group string      \nGroupRegistryServiceDeleteResponse    Field Type Label Description     deleted bool      \nGroupRegistryServiceExistRequest    Field Type Label Description     group string      \nGroupRegistryServiceExistResponse    Field Type Label Description     has_group bool      \nGroupRegistryServiceGetRequest    Field Type Label Description     group string      \nGroupRegistryServiceGetResponse    Field Type Label Description     group banyandb.common.v1.Group      \nGroupRegistryServiceListRequest \nGroupRegistryServiceListResponse    Field Type Label Description     group banyandb.common.v1.Group repeated     \nGroupRegistryServiceUpdateRequest    Field Type Label Description     group banyandb.common.v1.Group      \nGroupRegistryServiceUpdateResponse \nIndexRuleBindingRegistryServiceCreateRequest    Field Type Label Description     index_rule_binding IndexRuleBinding      \nIndexRuleBindingRegistryServiceCreateResponse \nIndexRuleBindingRegistryServiceDeleteRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nIndexRuleBindingRegistryServiceDeleteResponse    Field Type Label Description     deleted bool      \nIndexRuleBindingRegistryServiceExistRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nIndexRuleBindingRegistryServiceExistResponse    Field Type Label Description     has_group bool     has_index_rule_binding bool      \nIndexRuleBindingRegistryServiceGetRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nIndexRuleBindingRegistryServiceGetResponse    Field Type Label Description     index_rule_binding IndexRuleBinding      \nIndexRuleBindingRegistryServiceListRequest    Field Type Label Description     group string      \nIndexRuleBindingRegistryServiceListResponse    Field Type Label Description     index_rule_binding IndexRuleBinding repeated     \nIndexRuleBindingRegistryServiceUpdateRequest    Field Type Label Description     index_rule_binding IndexRuleBinding      \nIndexRuleBindingRegistryServiceUpdateResponse \nIndexRuleRegistryServiceCreateRequest    Field Type Label Description     index_rule IndexRule      \nIndexRuleRegistryServiceCreateResponse \nIndexRuleRegistryServiceDeleteRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nIndexRuleRegistryServiceDeleteResponse    Field Type Label Description     deleted bool      \nIndexRuleRegistryServiceExistRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nIndexRuleRegistryServiceExistResponse    Field Type Label Description     has_group bool     has_index_rule bool      \nIndexRuleRegistryServiceGetRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nIndexRuleRegistryServiceGetResponse    Field Type Label Description     index_rule IndexRule      \nIndexRuleRegistryServiceListRequest    Field Type Label Description     group string      \nIndexRuleRegistryServiceListResponse    Field Type Label Description     index_rule IndexRule repeated     \nIndexRuleRegistryServiceUpdateRequest    Field Type Label Description     index_rule IndexRule      \nIndexRuleRegistryServiceUpdateResponse \nMeasureRegistryServiceCreateRequest    Field Type Label Description     measure Measure      \nMeasureRegistryServiceCreateResponse    Field Type Label Description     mod_revision int64      \nMeasureRegistryServiceDeleteRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nMeasureRegistryServiceDeleteResponse    Field Type Label Description     deleted bool      \nMeasureRegistryServiceExistRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nMeasureRegistryServiceExistResponse    Field Type Label Description     has_group bool     has_measure bool      \nMeasureRegistryServiceGetRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nMeasureRegistryServiceGetResponse    Field Type Label Description     measure Measure      \nMeasureRegistryServiceListRequest    Field Type Label Description     group string      \nMeasureRegistryServiceListResponse    Field Type Label Description     measure Measure repeated     \nMeasureRegistryServiceUpdateRequest    Field Type Label Description     measure Measure      \nMeasureRegistryServiceUpdateResponse    Field Type Label Description     mod_revision int64      \nStreamRegistryServiceCreateRequest    Field Type Label Description     stream Stream      \nStreamRegistryServiceCreateResponse    Field Type Label Description     mod_revision int64      \nStreamRegistryServiceDeleteRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nStreamRegistryServiceDeleteResponse    Field Type Label Description     deleted bool      \nStreamRegistryServiceExistRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nStreamRegistryServiceExistResponse    Field Type Label Description     has_group bool     has_stream bool      \nStreamRegistryServiceGetRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nStreamRegistryServiceGetResponse    Field Type Label Description     stream Stream      \nStreamRegistryServiceListRequest    Field Type Label Description     group string      \nStreamRegistryServiceListResponse    Field Type Label Description     stream Stream repeated     \nStreamRegistryServiceUpdateRequest    Field Type Label Description     stream Stream      \nStreamRegistryServiceUpdateResponse    Field Type Label Description     mod_revision int64      \nTopNAggregationRegistryServiceCreateRequest    Field Type Label Description     top_n_aggregation TopNAggregation      \nTopNAggregationRegistryServiceCreateResponse \nTopNAggregationRegistryServiceDeleteRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nTopNAggregationRegistryServiceDeleteResponse    Field Type Label Description     deleted bool      \nTopNAggregationRegistryServiceExistRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nTopNAggregationRegistryServiceExistResponse    Field Type Label Description     has_group bool     has_top_n_aggregation bool      \nTopNAggregationRegistryServiceGetRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nTopNAggregationRegistryServiceGetResponse    Field Type Label Description     top_n_aggregation TopNAggregation      \nTopNAggregationRegistryServiceListRequest    Field Type Label Description     group string      \nTopNAggregationRegistryServiceListResponse    Field Type Label Description     top_n_aggregation TopNAggregation repeated     \nTopNAggregationRegistryServiceUpdateRequest    Field Type Label Description     top_n_aggregation TopNAggregation      \nTopNAggregationRegistryServiceUpdateResponse \nGroupRegistryService    Method Name Request Type Response Type Description     Create GroupRegistryServiceCreateRequest GroupRegistryServiceCreateResponse    Update GroupRegistryServiceUpdateRequest GroupRegistryServiceUpdateResponse    Delete GroupRegistryServiceDeleteRequest GroupRegistryServiceDeleteResponse    Get GroupRegistryServiceGetRequest GroupRegistryServiceGetResponse    List GroupRegistryServiceListRequest GroupRegistryServiceListResponse    Exist GroupRegistryServiceExistRequest GroupRegistryServiceExistResponse Exist doesn't expose an HTTP endpoint. Please use HEAD method to touch Get instead    \nIndexRuleBindingRegistryService    Method Name Request Type Response Type Description     Create IndexRuleBindingRegistryServiceCreateRequest IndexRuleBindingRegistryServiceCreateResponse    Update IndexRuleBindingRegistryServiceUpdateRequest IndexRuleBindingRegistryServiceUpdateResponse    Delete IndexRuleBindingRegistryServiceDeleteRequest IndexRuleBindingRegistryServiceDeleteResponse    Get IndexRuleBindingRegistryServiceGetRequest IndexRuleBindingRegistryServiceGetResponse    List IndexRuleBindingRegistryServiceListRequest IndexRuleBindingRegistryServiceListResponse    Exist IndexRuleBindingRegistryServiceExistRequest IndexRuleBindingRegistryServiceExistResponse Exist doesn't expose an HTTP endpoint. Please use HEAD method to touch Get instead    \nIndexRuleRegistryService    Method Name Request Type Response Type Description     Create IndexRuleRegistryServiceCreateRequest IndexRuleRegistryServiceCreateResponse    Update IndexRuleRegistryServiceUpdateRequest IndexRuleRegistryServiceUpdateResponse    Delete IndexRuleRegistryServiceDeleteRequest IndexRuleRegistryServiceDeleteResponse    Get IndexRuleRegistryServiceGetRequest IndexRuleRegistryServiceGetResponse    List IndexRuleRegistryServiceListRequest IndexRuleRegistryServiceListResponse    Exist IndexRuleRegistryServiceExistRequest IndexRuleRegistryServiceExistResponse Exist doesn't expose an HTTP endpoint. Please use HEAD method to touch Get instead    \nMeasureRegistryService    Method Name Request Type Response Type Description     Create MeasureRegistryServiceCreateRequest MeasureRegistryServiceCreateResponse    Update MeasureRegistryServiceUpdateRequest MeasureRegistryServiceUpdateResponse    Delete MeasureRegistryServiceDeleteRequest MeasureRegistryServiceDeleteResponse    Get MeasureRegistryServiceGetRequest MeasureRegistryServiceGetResponse    List MeasureRegistryServiceListRequest MeasureRegistryServiceListResponse    Exist MeasureRegistryServiceExistRequest MeasureRegistryServiceExistResponse Exist doesn't expose an HTTP endpoint. Please use HEAD method to touch Get instead    \nStreamRegistryService    Method Name Request Type Response Type Description     Create StreamRegistryServiceCreateRequest StreamRegistryServiceCreateResponse    Update StreamRegistryServiceUpdateRequest StreamRegistryServiceUpdateResponse    Delete StreamRegistryServiceDeleteRequest StreamRegistryServiceDeleteResponse    Get StreamRegistryServiceGetRequest StreamRegistryServiceGetResponse    List StreamRegistryServiceListRequest StreamRegistryServiceListResponse    Exist StreamRegistryServiceExistRequest StreamRegistryServiceExistResponse Exist doesn't expose an HTTP endpoint. Please use HEAD method to touch Get instead    \nTopNAggregationRegistryService    Method Name Request Type Response Type Description     Create TopNAggregationRegistryServiceCreateRequest TopNAggregationRegistryServiceCreateResponse    Update TopNAggregationRegistryServiceUpdateRequest TopNAggregationRegistryServiceUpdateResponse    Delete TopNAggregationRegistryServiceDeleteRequest TopNAggregationRegistryServiceDeleteResponse    Get TopNAggregationRegistryServiceGetRequest TopNAggregationRegistryServiceGetResponse    List TopNAggregationRegistryServiceListRequest TopNAggregationRegistryServiceListResponse    Exist TopNAggregationRegistryServiceExistRequest TopNAggregationRegistryServiceExistResponse Exist doesn't expose an HTTP endpoint. Please use HEAD method to touch Get instead    \nTop\nbanyandb/measure/v1/query.proto \nDataPoint DataPoint is stored in Measures\n   Field Type Label Description     timestamp google.protobuf.Timestamp  timestamp is in the timeunit of milliseconds.   tag_families banyandb.model.v1.TagFamily repeated tag_families contains tags selected in the projection   fields DataPoint.Field repeated fields contains fields selected in the projection    \nDataPoint.Field    Field Type Label Description     name string     value banyandb.model.v1.FieldValue      \nQueryRequest QueryRequest is the request contract for query.\n   Field Type Label Description     groups string repeated groups indicate where the data points are stored.   name string  name is the identity of a measure.   time_range banyandb.model.v1.TimeRange  time_range is a range query with begin/end time of entities in the timeunit of milliseconds.   criteria banyandb.model.v1.Criteria  tag_families are indexed.   tag_projection banyandb.model.v1.TagProjection  tag_projection can be used to select tags of the data points in the response   field_projection QueryRequest.FieldProjection  field_projection can be used to select fields of the data points in the response   group_by QueryRequest.GroupBy  group_by groups data points based on their field value for a specific tag and use field_name as the projection name   agg QueryRequest.Aggregation  agg aggregates data points based on a field   top QueryRequest.Top  top limits the result based on a particular field. If order_by is specified, top sorts the dataset based on order_by's output   offset uint32  offset is used to support pagination, together with the following limit. If top is specified, offset processes the dataset based on top's output   limit uint32  limit is used to impose a boundary on the number of records being returned. If top is specified, limit processes the dataset based on top's output   order_by banyandb.model.v1.QueryOrder  order_by is given to specify the sort for a tag.    \nQueryRequest.Aggregation    Field Type Label Description     function banyandb.model.v1.AggregationFunction     field_name string  field_name must be one of files indicated by the field_projection    \nQueryRequest.FieldProjection    Field Type Label Description     names string repeated     \nQueryRequest.GroupBy    Field Type Label Description     tag_projection banyandb.model.v1.TagProjection  tag_projection must be a subset of the tag_projection of QueryRequest   field_name string  field_name must be one of fields indicated by field_projection    \nQueryRequest.Top    Field Type Label Description     number int32  number set the how many items should be returned   field_name string  field_name must be one of files indicated by the field_projection   field_value_sort banyandb.model.v1.Sort  field_value_sort indicates how to sort fields ASC: bottomN DESC: topN UNSPECIFIED: topN    \nQueryResponse QueryResponse is the response for a query to the Query module.\n   Field Type Label Description     data_points DataPoint repeated data_points are the actual data returned    \nTop\nbanyandb/measure/v1/topn.proto \nTopNList TopNList contains a series of topN items\n   Field Type Label Description     timestamp google.protobuf.Timestamp  timestamp is in the timeunit of milliseconds.   items TopNList.Item repeated items contains top-n items in a list    \nTopNList.Item    Field Type Label Description     entity banyandb.model.v1.Tag repeated    value banyandb.model.v1.FieldValue      \nTopNRequest TopNRequest is the request contract for query.\n   Field Type Label Description     groups string repeated groups indicate where the data points are stored.   name string  name is the identity of a measure.   time_range banyandb.model.v1.TimeRange  time_range is a range query with begin/end time of entities in the timeunit of milliseconds.   top_n int32  top_n set the how many items should be returned in each list.   agg banyandb.model.v1.AggregationFunction  agg aggregates lists grouped by field names in the time_range TODO validate enum defined_only   conditions banyandb.model.v1.Condition repeated criteria select counters. Only equals are acceptable.   field_value_sort banyandb.model.v1.Sort  field_value_sort indicates how to sort fields    \nTopNResponse TopNResponse is the response for a query to the Query module.\n   Field Type Label Description     lists TopNList repeated lists contain a series topN lists ranked by timestamp if agg_func in query request is specified, lists' size should be one.    \nTop\nbanyandb/model/v1/write.proto \nStatus Status is the response status for write\n   Name Number Description     STATUS_UNSPECIFIED 0    STATUS_SUCCEED 1    STATUS_INVALID_TIMESTAMP 2    STATUS_NOT_FOUND 3    STATUS_EXPIRED_SCHEMA 4    STATUS_INTERNAL_ERROR 5     \nTop\nbanyandb/measure/v1/write.proto \nDataPointValue DataPointValue is the data point for writing. It only contains values.\n   Field Type Label Description     timestamp google.protobuf.Timestamp  timestamp is in the timeunit of milliseconds.   tag_families banyandb.model.v1.TagFamilyForWrite repeated the order of tag_families' items match the measure schema   fields banyandb.model.v1.FieldValue repeated the order of fields match the measure schema    \nInternalWriteRequest    Field Type Label Description     shard_id uint32     series_hash bytes     entity_values banyandb.model.v1.TagValue repeated    request WriteRequest      \nWriteRequest WriteRequest is the request contract for write\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  the metadata is required.   data_point DataPointValue  the data_point is required.   message_id uint64  the message_id is required.    \nWriteResponse WriteResponse is the response contract for write\n   Field Type Label Description     message_id uint64  the message_id from request.   status banyandb.model.v1.Status  status indicates the request processing result   metadata banyandb.common.v1.Metadata  the metadata from request when request fails    \nTop\nbanyandb/measure/v1/rpc.proto \nMeasureService    Method Name Request Type Response Type Description     Query QueryRequest QueryResponse    Write WriteRequest stream WriteResponse stream    TopN TopNRequest TopNResponse     \nTop\nbanyandb/property/v1/property.proto \nMetadata Metadata is for multi-tenant use\n   Field Type Label Description     container banyandb.common.v1.Metadata  container is created when it receives the first property   id string  id identifies a property    \nProperty Property stores the user defined data\n   Field Type Label Description     metadata Metadata  metadata is the identity of a property   tags banyandb.model.v1.Tag repeated tag stores the content of a property   updated_at google.protobuf.Timestamp  updated_at indicates when the property is updated   lease_id int64  readonly. lease_id is the ID of the lease that attached to key.   ttl string  ttl indicates the time to live of the property. It's a string in the format of \u0026quot;1h\u0026quot;, \u0026quot;2m\u0026quot;, \u0026quot;3s\u0026quot;, \u0026quot;1500ms\u0026quot;. It defaults to 0s, which means the property never expires. The minimum allowed ttl is 1s.    \nTop\nbanyandb/property/v1/rpc.proto \nApplyRequest    Field Type Label Description     property Property     strategy ApplyRequest.Strategy  strategy indicates how to update a property. It defaults to STRATEGY_MERGE    \nApplyResponse    Field Type Label Description     created bool  created indicates whether the property existed. True: the property is absent. False: the property existed.   tags_num uint32     lease_id int64      \nDeleteRequest    Field Type Label Description     metadata Metadata     tags string repeated     \nDeleteResponse    Field Type Label Description     deleted bool     tags_num uint32      \nGetRequest    Field Type Label Description     metadata Metadata     tags string repeated     \nGetResponse    Field Type Label Description     property Property      \nKeepAliveRequest    Field Type Label Description     lease_id int64      \nKeepAliveResponse \nListRequest    Field Type Label Description     container banyandb.common.v1.Metadata     ids string repeated    tags string repeated     \nListResponse    Field Type Label Description     property Property repeated     \nApplyRequest.Strategy    Name Number Description     STRATEGY_UNSPECIFIED 0    STRATEGY_MERGE 1    STRATEGY_REPLACE 2     \nPropertyService    Method Name Request Type Response Type Description     Apply ApplyRequest ApplyResponse Apply creates a property if it's absent, or update a existed one based on a strategy.   Delete DeleteRequest DeleteResponse    Get GetRequest GetResponse    List ListRequest ListResponse    KeepAlive KeepAliveRequest KeepAliveResponse     \nTop\nbanyandb/stream/v1/query.proto \nElement Element represents (stream context) a Span defined in Google Dapper paper or equivalently a Segment in Skywalking. (Log context) a log\n   Field Type Label Description     element_id string  element_id could be span_id of a Span or segment_id of a Segment in the context of stream   timestamp google.protobuf.Timestamp  timestamp represents a millisecond 1) either the start time of a Span/Segment, 2) or the timestamp of a log   tag_families banyandb.model.v1.TagFamily repeated fields contains all indexed Field. Some typical names, - stream_id - duration - service_name - service_instance_id - end_time_milliseconds    \nQueryRequest QueryRequest is the request contract for query.\n   Field Type Label Description     groups string repeated groups indicate where the elements are stored.   name string  name is the identity of a stream.   time_range banyandb.model.v1.TimeRange  time_range is a range query with begin/end time of entities in the timeunit of milliseconds. In the context of stream, it represents the range of the startTime for spans/segments, while in the context of Log, it means the range of the timestamp(s) for logs. it is always recommended to specify time range for performance reason   offset uint32  offset is used to support pagination, together with the following limit   limit uint32  limit is used to impose a boundary on the number of records being returned   order_by banyandb.model.v1.QueryOrder  order_by is given to specify the sort for a field. So far, only fields in the type of Integer are supported   criteria banyandb.model.v1.Criteria  tag_families are indexed.   projection banyandb.model.v1.TagProjection  projection can be used to select the key names of the element in the response    \nQueryResponse QueryResponse is the response for a query to the Query module.\n   Field Type Label Description     elements Element repeated elements are the actual data returned    \nTop\nbanyandb/stream/v1/write.proto \nElementValue    Field Type Label Description     element_id string  element_id could be span_id of a Span or segment_id of a Segment in the context of stream   timestamp google.protobuf.Timestamp  timestamp is in the timeunit of milliseconds. It represents 1) either the start time of a Span/Segment, 2) or the timestamp of a log   tag_families banyandb.model.v1.TagFamilyForWrite repeated the order of tag_families' items match the stream schema    \nInternalWriteRequest    Field Type Label Description     shard_id uint32     series_hash bytes     entity_values banyandb.model.v1.TagValue repeated    request WriteRequest      \nWriteRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata  the metadata is required.   element ElementValue  the element is required.   message_id uint64  the message_id is required.    \nWriteResponse    Field Type Label Description     message_id uint64  the message_id from request.   status banyandb.model.v1.Status  status indicates the request processing result   metadata banyandb.common.v1.Metadata  the metadata from request when request fails    \nTop\nbanyandb/stream/v1/rpc.proto \nStreamService    Method Name Request Type Response Type Description     Query QueryRequest QueryResponse    Write WriteRequest stream WriteResponse stream     Scalar Value Types    .proto Type Notes C++ Java Python Go C# PHP Ruby     double  double double float float64 double float Float   float  float float float float32 float float Float   int32 Uses variable-length encoding. Inefficient for encoding negative numbers – if your field is likely to have negative values, use sint32 instead. int32 int int int32 int integer Bignum or Fixnum (as required)   int64 Uses variable-length encoding. Inefficient for encoding negative numbers – if your field is likely to have negative values, use sint64 instead. int64 long int/long int64 long integer/string Bignum   uint32 Uses variable-length encoding. uint32 int int/long uint32 uint integer Bignum or Fixnum (as required)   uint64 Uses variable-length encoding. uint64 long int/long uint64 ulong integer/string Bignum or Fixnum (as required)   sint32 Uses variable-length encoding. Signed int value. These more efficiently encode negative numbers than regular int32s. int32 int int int32 int integer Bignum or Fixnum (as required)   sint64 Uses variable-length encoding. Signed int value. These more efficiently encode negative numbers than regular int64s. int64 long int/long int64 long integer/string Bignum   fixed32 Always four bytes. More efficient than uint32 if values are often greater than 2^28. uint32 int int uint32 uint integer Bignum or Fixnum (as required)   fixed64 Always eight bytes. More efficient than uint64 if values are often greater than 2^56. uint64 long int/long uint64 ulong integer/string Bignum   sfixed32 Always four bytes. int32 int int int32 int integer Bignum or Fixnum (as required)   sfixed64 Always eight bytes. int64 long int/long int64 long integer/string Bignum   bool  bool boolean boolean bool bool boolean TrueClass/FalseClass   string A string must always contain UTF-8 encoded or 7-bit ASCII text. string String str/unicode string string string String (UTF-8)   bytes May contain any arbitrary sequence of bytes. string ByteString str []byte ByteString string String (ASCII-8BIT)    ","title":"Protocol Documentation","url":"/docs/skywalking-banyandb/next/api-reference/"},{"content":"Protocol Documentation \nTable of Contents   banyandb/cluster/v1/rpc.proto\n  SendRequest\n  SendResponse\n  Service\n    banyandb/common/v1/common.proto\n  Group\n  IntervalRule\n  Metadata\n  ResourceOpts\n  Catalog\n  IntervalRule.Unit\n    banyandb/database/v1/database.proto\n  Node\n  Shard\n  Role\n    banyandb/model/v1/common.proto\n  FieldValue\n  Float\n  Int\n  IntArray\n  Str\n  StrArray\n  TagFamilyForWrite\n  TagValue\n  AggregationFunction\n    banyandb/model/v1/query.proto\n  Condition\n  Criteria\n  LogicalExpression\n  QueryOrder\n  Tag\n  TagFamily\n  TagProjection\n  TagProjection.TagFamily\n  TimeRange\n  Condition.BinaryOp\n  LogicalExpression.LogicalOp\n  Sort\n    banyandb/database/v1/schema.proto\n  Entity\n  FieldSpec\n  IndexRule\n  IndexRuleBinding\n  Measure\n  Stream\n  Subject\n  TagFamilySpec\n  TagSpec\n  TopNAggregation\n  CompressionMethod\n  EncodingMethod\n  FieldType\n  IndexRule.Analyzer\n  IndexRule.Location\n  IndexRule.Type\n  TagType\n    banyandb/database/v1/rpc.proto\n  GroupRegistryServiceCreateRequest\n  GroupRegistryServiceCreateResponse\n  GroupRegistryServiceDeleteRequest\n  GroupRegistryServiceDeleteResponse\n  GroupRegistryServiceExistRequest\n  GroupRegistryServiceExistResponse\n  GroupRegistryServiceGetRequest\n  GroupRegistryServiceGetResponse\n  GroupRegistryServiceListRequest\n  GroupRegistryServiceListResponse\n  GroupRegistryServiceUpdateRequest\n  GroupRegistryServiceUpdateResponse\n  IndexRuleBindingRegistryServiceCreateRequest\n  IndexRuleBindingRegistryServiceCreateResponse\n  IndexRuleBindingRegistryServiceDeleteRequest\n  IndexRuleBindingRegistryServiceDeleteResponse\n  IndexRuleBindingRegistryServiceExistRequest\n  IndexRuleBindingRegistryServiceExistResponse\n  IndexRuleBindingRegistryServiceGetRequest\n  IndexRuleBindingRegistryServiceGetResponse\n  IndexRuleBindingRegistryServiceListRequest\n  IndexRuleBindingRegistryServiceListResponse\n  IndexRuleBindingRegistryServiceUpdateRequest\n  IndexRuleBindingRegistryServiceUpdateResponse\n  IndexRuleRegistryServiceCreateRequest\n  IndexRuleRegistryServiceCreateResponse\n  IndexRuleRegistryServiceDeleteRequest\n  IndexRuleRegistryServiceDeleteResponse\n  IndexRuleRegistryServiceExistRequest\n  IndexRuleRegistryServiceExistResponse\n  IndexRuleRegistryServiceGetRequest\n  IndexRuleRegistryServiceGetResponse\n  IndexRuleRegistryServiceListRequest\n  IndexRuleRegistryServiceListResponse\n  IndexRuleRegistryServiceUpdateRequest\n  IndexRuleRegistryServiceUpdateResponse\n  MeasureRegistryServiceCreateRequest\n  MeasureRegistryServiceCreateResponse\n  MeasureRegistryServiceDeleteRequest\n  MeasureRegistryServiceDeleteResponse\n  MeasureRegistryServiceExistRequest\n  MeasureRegistryServiceExistResponse\n  MeasureRegistryServiceGetRequest\n  MeasureRegistryServiceGetResponse\n  MeasureRegistryServiceListRequest\n  MeasureRegistryServiceListResponse\n  MeasureRegistryServiceUpdateRequest\n  MeasureRegistryServiceUpdateResponse\n  StreamRegistryServiceCreateRequest\n  StreamRegistryServiceCreateResponse\n  StreamRegistryServiceDeleteRequest\n  StreamRegistryServiceDeleteResponse\n  StreamRegistryServiceExistRequest\n  StreamRegistryServiceExistResponse\n  StreamRegistryServiceGetRequest\n  StreamRegistryServiceGetResponse\n  StreamRegistryServiceListRequest\n  StreamRegistryServiceListResponse\n  StreamRegistryServiceUpdateRequest\n  StreamRegistryServiceUpdateResponse\n  TopNAggregationRegistryServiceCreateRequest\n  TopNAggregationRegistryServiceCreateResponse\n  TopNAggregationRegistryServiceDeleteRequest\n  TopNAggregationRegistryServiceDeleteResponse\n  TopNAggregationRegistryServiceExistRequest\n  TopNAggregationRegistryServiceExistResponse\n  TopNAggregationRegistryServiceGetRequest\n  TopNAggregationRegistryServiceGetResponse\n  TopNAggregationRegistryServiceListRequest\n  TopNAggregationRegistryServiceListResponse\n  TopNAggregationRegistryServiceUpdateRequest\n  TopNAggregationRegistryServiceUpdateResponse\n  GroupRegistryService\n  IndexRuleBindingRegistryService\n  IndexRuleRegistryService\n  MeasureRegistryService\n  StreamRegistryService\n  TopNAggregationRegistryService\n    banyandb/measure/v1/query.proto\n DataPoint DataPoint.Field QueryRequest QueryRequest.Aggregation QueryRequest.FieldProjection QueryRequest.GroupBy QueryRequest.Top QueryResponse    banyandb/measure/v1/topn.proto\n TopNList TopNList.Item TopNRequest TopNResponse    banyandb/model/v1/write.proto\n Status    banyandb/measure/v1/write.proto\n DataPointValue InternalWriteRequest WriteRequest WriteResponse    banyandb/measure/v1/rpc.proto\n MeasureService    banyandb/property/v1/property.proto\n Metadata Property    banyandb/property/v1/rpc.proto\n  ApplyRequest\n  ApplyResponse\n  DeleteRequest\n  DeleteResponse\n  GetRequest\n  GetResponse\n  KeepAliveRequest\n  KeepAliveResponse\n  ListRequest\n  ListResponse\n  ApplyRequest.Strategy\n  PropertyService\n    banyandb/stream/v1/query.proto\n Element QueryRequest QueryResponse    banyandb/stream/v1/write.proto\n ElementValue InternalWriteRequest WriteRequest WriteResponse    banyandb/stream/v1/rpc.proto\n StreamService    Scalar Value Types\n  \nTop\nbanyandb/cluster/v1/rpc.proto \nSendRequest    Field Type Label Description     topic string     message_id uint64     body google.protobuf.Any      \nSendResponse    Field Type Label Description     message_id uint64     error string     body google.protobuf.Any      \nService    Method Name Request Type Response Type Description     Send SendRequest stream SendResponse stream     \nTop\nbanyandb/common/v1/common.proto \nGroup Group is an internal object for Group management\n   Field Type Label Description     metadata Metadata  metadata define the group's identity   catalog Catalog  catalog denotes which type of data the group contains   resource_opts ResourceOpts  resourceOpts indicates the structure of the underlying kv storage   updated_at google.protobuf.Timestamp  updated_at indicates when resources of the group are updated    \nIntervalRule IntervalRule is a structured duration\n   Field Type Label Description     unit IntervalRule.Unit  unit can only be UNIT_HOUR or UNIT_DAY   num uint32      \nMetadata Metadata is for multi-tenant, multi-model use\n   Field Type Label Description     group string  group contains a set of options, like retention policy, max   name string  name of the entity   id uint32     create_revision int64  readonly. create_revision is the revision of last creation on this key.   mod_revision int64  readonly. mod_revision is the revision of last modification on this key.    \nResourceOpts    Field Type Label Description     shard_num uint32  shard_num is the number of shards   block_interval IntervalRule  block_interval indicates the length of a block block_interval should be less than or equal to segment_interval   segment_interval IntervalRule  segment_interval indicates the length of a segment   ttl IntervalRule  ttl indicates time to live, how long the data will be cached    \nCatalog    Name Number Description     CATALOG_UNSPECIFIED 0    CATALOG_STREAM 1    CATALOG_MEASURE 2     \nIntervalRule.Unit    Name Number Description     UNIT_UNSPECIFIED 0    UNIT_HOUR 1    UNIT_DAY 2     \nTop\nbanyandb/database/v1/database.proto \nNode    Field Type Label Description     metadata banyandb.common.v1.Metadata     roles Role repeated    grpc_address string     http_address string     created_at google.protobuf.Timestamp      \nShard    Field Type Label Description     id uint64     metadata banyandb.common.v1.Metadata     catalog banyandb.common.v1.Catalog     node string     total uint32     updated_at google.protobuf.Timestamp     created_at google.protobuf.Timestamp      \nRole    Name Number Description     ROLE_UNSPECIFIED 0    ROLE_META 1    ROLE_DATA 2    ROLE_LIAISON 3     \nTop\nbanyandb/model/v1/common.proto \nFieldValue    Field Type Label Description     null google.protobuf.NullValue     str Str     int Int     binary_data bytes     float Float      \nFloat    Field Type Label Description     value double      \nInt    Field Type Label Description     value int64      \nIntArray    Field Type Label Description     value int64 repeated     \nStr    Field Type Label Description     value string      \nStrArray    Field Type Label Description     value string repeated     \nTagFamilyForWrite    Field Type Label Description     tags TagValue repeated     \nTagValue    Field Type Label Description     null google.protobuf.NullValue     str Str     str_array StrArray     int Int     int_array IntArray     binary_data bytes      \nAggregationFunction    Name Number Description     AGGREGATION_FUNCTION_UNSPECIFIED 0    AGGREGATION_FUNCTION_MEAN 1    AGGREGATION_FUNCTION_MAX 2    AGGREGATION_FUNCTION_MIN 3    AGGREGATION_FUNCTION_COUNT 4    AGGREGATION_FUNCTION_SUM 5     \nTop\nbanyandb/model/v1/query.proto \nCondition Condition consists of the query condition with a single binary operator to be imposed For 1:1 BinaryOp, values in condition must be an array with length = 1, while for 1:N BinaryOp, values can be an array with length \u0026gt;= 1.\n   Field Type Label Description     name string     op Condition.BinaryOp     value TagValue      \nCriteria tag_families are indexed.\n   Field Type Label Description     le LogicalExpression     condition Condition      \nLogicalExpression LogicalExpression supports logical operation\n   Field Type Label Description     op LogicalExpression.LogicalOp  op is a logical operation   left Criteria     right Criteria      \nQueryOrder QueryOrder means a Sort operation to be done for a given index rule. The index_rule_name refers to the name of a index rule bound to the subject.\n   Field Type Label Description     index_rule_name string     sort Sort      \nTag Pair is the building block of a record which is equivalent to a key-value pair. In the context of Trace, it could be metadata of a trace such as service_name, service_instance, etc. Besides, other tags are organized in key-value pair in the underlying storage layer. One should notice that the values can be a multi-value.\n   Field Type Label Description     key string     value TagValue      \nTagFamily    Field Type Label Description     name string     tags Tag repeated     \nTagProjection TagProjection is used to select the names of keys to be returned.\n   Field Type Label Description     tag_families TagProjection.TagFamily repeated     \nTagProjection.TagFamily    Field Type Label Description     name string     tags string repeated     \nTimeRange TimeRange is a range query for uint64, the range here follows left-inclusive and right-exclusive rule, i.e. [begin, end) if both edges exist\n   Field Type Label Description     begin google.protobuf.Timestamp     end google.protobuf.Timestamp      \nCondition.BinaryOp BinaryOp specifies the operation imposed to the given query condition For EQ, NE, LT, GT, LE and GE, only one operand should be given, i.e. one-to-one relationship. HAVING and NOT_HAVING allow multi-value to be the operand such as array/vector, i.e. one-to-many relationship. For example, \u0026quot;keyA\u0026quot; contains \u0026quot;valueA\u0026quot; and \u0026quot;valueB\u0026quot; MATCH performances a full-text search if the tag is analyzed. The string value applies to the same analyzer as the tag, but string array value does not. Each item in a string array is seen as a token instead of a query expression.\n   Name Number Description     BINARY_OP_UNSPECIFIED 0    BINARY_OP_EQ 1    BINARY_OP_NE 2    BINARY_OP_LT 3    BINARY_OP_GT 4    BINARY_OP_LE 5    BINARY_OP_GE 6    BINARY_OP_HAVING 7    BINARY_OP_NOT_HAVING 8    BINARY_OP_IN 9    BINARY_OP_NOT_IN 10    BINARY_OP_MATCH 11     \nLogicalExpression.LogicalOp    Name Number Description     LOGICAL_OP_UNSPECIFIED 0    LOGICAL_OP_AND 1    LOGICAL_OP_OR 2     \nSort    Name Number Description     SORT_UNSPECIFIED 0    SORT_DESC 1    SORT_ASC 2     \nTop\nbanyandb/database/v1/schema.proto \nEntity    Field Type Label Description     tag_names string repeated     \nFieldSpec FieldSpec is the specification of field\n   Field Type Label Description     name string  name is the identity of a field   field_type FieldType  field_type denotes the type of field value   encoding_method EncodingMethod  encoding_method indicates how to encode data during writing   compression_method CompressionMethod  compression_method indicates how to compress data during writing    \nIndexRule IndexRule defines how to generate indices based on tags and the index type IndexRule should bind to a subject through an IndexRuleBinding to generate proper indices.\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  metadata define the rule's identity   tags string repeated tags are the combination that refers to an indexed object If the elements in tags are more than 1, the object will generate a multi-tag index Caveat: All tags in a multi-tag MUST have an identical IndexType   type IndexRule.Type  type is the IndexType of this IndexObject.   location IndexRule.Location  location indicates where to store index.   updated_at google.protobuf.Timestamp  updated_at indicates when the IndexRule is updated   analyzer IndexRule.Analyzer  analyzer analyzes tag value to support the full-text searching for TYPE_INVERTED indices.    \nIndexRuleBinding IndexRuleBinding is a bridge to connect severalIndexRules to a subject This binding is valid between begin_at_nanoseconds and expire_at_nanoseconds, that provides flexible strategies to control how to generate time series indices.\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  metadata is the identity of this binding   rules string repeated rules refers to the IndexRule   subject Subject  subject indicates the subject of binding action   begin_at google.protobuf.Timestamp  begin_at_nanoseconds is the timestamp, after which the binding will be active   expire_at google.protobuf.Timestamp  expire_at_nanoseconds it the timestamp, after which the binding will be inactive expire_at_nanoseconds must be larger than begin_at_nanoseconds   updated_at google.protobuf.Timestamp  updated_at indicates when the IndexRuleBinding is updated    \nMeasure Measure intends to store data point\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  metadata is the identity of a measure   tag_families TagFamilySpec repeated tag_families are for filter measures   fields FieldSpec repeated fields denote measure values   entity Entity  entity indicates which tags will be to generate a series and shard a measure   interval string  interval indicates how frequently to send a data point valid time units are \u0026quot;ns\u0026quot;, \u0026quot;us\u0026quot; (or \u0026quot;µs\u0026quot;), \u0026quot;ms\u0026quot;, \u0026quot;s\u0026quot;, \u0026quot;m\u0026quot;, \u0026quot;h\u0026quot;, \u0026quot;d\u0026quot;.   updated_at google.protobuf.Timestamp  updated_at indicates when the measure is updated    \nStream Stream intends to store streaming data, for example, traces or logs\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  metadata is the identity of a trace series   tag_families TagFamilySpec repeated tag_families   entity Entity  entity indicates how to generate a series and shard a stream   updated_at google.protobuf.Timestamp  updated_at indicates when the stream is updated    \nSubject Subject defines which stream or measure would generate indices\n   Field Type Label Description     catalog banyandb.common.v1.Catalog  catalog is where the subject belongs to todo validate plugin exist bug https://github.com/bufbuild/protoc-gen-validate/issues/672   name string  name refers to a stream or measure in a particular catalog    \nTagFamilySpec    Field Type Label Description     name string     tags TagSpec repeated tags defines accepted tags    \nTagSpec    Field Type Label Description     name string     type TagType     indexed_only bool  indexed_only indicates whether the tag is stored True: It's indexed only, but not stored False: it's stored and indexed    \nTopNAggregation TopNAggregation generates offline TopN statistics for a measure's TopN approximation\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  metadata is the identity of an aggregation   source_measure banyandb.common.v1.Metadata  source_measure denotes the data source of this aggregation   field_name string  field_name is the name of field used for ranking   field_value_sort banyandb.model.v1.Sort  field_value_sort indicates how to sort fields ASC: bottomN DESC: topN UNSPECIFIED: topN + bottomN todo validate plugin exist bug https://github.com/bufbuild/protoc-gen-validate/issues/672   group_by_tag_names string repeated group_by_tag_names groups data points into statistical counters   criteria banyandb.model.v1.Criteria  criteria select partial data points from measure   counters_number int32  counters_number sets the number of counters to be tracked. The default value is 1000   lru_size int32  lru_size defines how much entry is allowed to be maintained in the memory   updated_at google.protobuf.Timestamp  updated_at indicates when the measure is updated    \nCompressionMethod    Name Number Description     COMPRESSION_METHOD_UNSPECIFIED 0    COMPRESSION_METHOD_ZSTD 1     \nEncodingMethod    Name Number Description     ENCODING_METHOD_UNSPECIFIED 0    ENCODING_METHOD_GORILLA 1     \nFieldType    Name Number Description     FIELD_TYPE_UNSPECIFIED 0    FIELD_TYPE_STRING 1    FIELD_TYPE_INT 2    FIELD_TYPE_DATA_BINARY 3    FIELD_TYPE_FLOAT 4     \nIndexRule.Analyzer    Name Number Description     ANALYZER_UNSPECIFIED 0    ANALYZER_KEYWORD 1 Keyword analyzer is a “noop” analyzer which returns the entire input string as a single token.   ANALYZER_STANDARD 2 Standard analyzer provides grammar based tokenization   ANALYZER_SIMPLE 3 Simple analyzer breaks text into tokens at any non-letter character, such as numbers, spaces, hyphens and apostrophes, discards non-letter characters, and changes uppercase to lowercase.    \nIndexRule.Location    Name Number Description     LOCATION_UNSPECIFIED 0    LOCATION_SERIES 1    LOCATION_GLOBAL 2     \nIndexRule.Type Type determine the index structure under the hood\n   Name Number Description     TYPE_UNSPECIFIED 0    TYPE_TREE 1    TYPE_INVERTED 2     \nTagType    Name Number Description     TAG_TYPE_UNSPECIFIED 0    TAG_TYPE_STRING 1    TAG_TYPE_INT 2    TAG_TYPE_STRING_ARRAY 3    TAG_TYPE_INT_ARRAY 4    TAG_TYPE_DATA_BINARY 5     \nTop\nbanyandb/database/v1/rpc.proto \nGroupRegistryServiceCreateRequest    Field Type Label Description     group banyandb.common.v1.Group      \nGroupRegistryServiceCreateResponse \nGroupRegistryServiceDeleteRequest    Field Type Label Description     group string      \nGroupRegistryServiceDeleteResponse    Field Type Label Description     deleted bool      \nGroupRegistryServiceExistRequest    Field Type Label Description     group string      \nGroupRegistryServiceExistResponse    Field Type Label Description     has_group bool      \nGroupRegistryServiceGetRequest    Field Type Label Description     group string      \nGroupRegistryServiceGetResponse    Field Type Label Description     group banyandb.common.v1.Group      \nGroupRegistryServiceListRequest \nGroupRegistryServiceListResponse    Field Type Label Description     group banyandb.common.v1.Group repeated     \nGroupRegistryServiceUpdateRequest    Field Type Label Description     group banyandb.common.v1.Group      \nGroupRegistryServiceUpdateResponse \nIndexRuleBindingRegistryServiceCreateRequest    Field Type Label Description     index_rule_binding IndexRuleBinding      \nIndexRuleBindingRegistryServiceCreateResponse \nIndexRuleBindingRegistryServiceDeleteRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nIndexRuleBindingRegistryServiceDeleteResponse    Field Type Label Description     deleted bool      \nIndexRuleBindingRegistryServiceExistRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nIndexRuleBindingRegistryServiceExistResponse    Field Type Label Description     has_group bool     has_index_rule_binding bool      \nIndexRuleBindingRegistryServiceGetRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nIndexRuleBindingRegistryServiceGetResponse    Field Type Label Description     index_rule_binding IndexRuleBinding      \nIndexRuleBindingRegistryServiceListRequest    Field Type Label Description     group string      \nIndexRuleBindingRegistryServiceListResponse    Field Type Label Description     index_rule_binding IndexRuleBinding repeated     \nIndexRuleBindingRegistryServiceUpdateRequest    Field Type Label Description     index_rule_binding IndexRuleBinding      \nIndexRuleBindingRegistryServiceUpdateResponse \nIndexRuleRegistryServiceCreateRequest    Field Type Label Description     index_rule IndexRule      \nIndexRuleRegistryServiceCreateResponse \nIndexRuleRegistryServiceDeleteRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nIndexRuleRegistryServiceDeleteResponse    Field Type Label Description     deleted bool      \nIndexRuleRegistryServiceExistRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nIndexRuleRegistryServiceExistResponse    Field Type Label Description     has_group bool     has_index_rule bool      \nIndexRuleRegistryServiceGetRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nIndexRuleRegistryServiceGetResponse    Field Type Label Description     index_rule IndexRule      \nIndexRuleRegistryServiceListRequest    Field Type Label Description     group string      \nIndexRuleRegistryServiceListResponse    Field Type Label Description     index_rule IndexRule repeated     \nIndexRuleRegistryServiceUpdateRequest    Field Type Label Description     index_rule IndexRule      \nIndexRuleRegistryServiceUpdateResponse \nMeasureRegistryServiceCreateRequest    Field Type Label Description     measure Measure      \nMeasureRegistryServiceCreateResponse    Field Type Label Description     mod_revision int64      \nMeasureRegistryServiceDeleteRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nMeasureRegistryServiceDeleteResponse    Field Type Label Description     deleted bool      \nMeasureRegistryServiceExistRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nMeasureRegistryServiceExistResponse    Field Type Label Description     has_group bool     has_measure bool      \nMeasureRegistryServiceGetRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nMeasureRegistryServiceGetResponse    Field Type Label Description     measure Measure      \nMeasureRegistryServiceListRequest    Field Type Label Description     group string      \nMeasureRegistryServiceListResponse    Field Type Label Description     measure Measure repeated     \nMeasureRegistryServiceUpdateRequest    Field Type Label Description     measure Measure      \nMeasureRegistryServiceUpdateResponse    Field Type Label Description     mod_revision int64      \nStreamRegistryServiceCreateRequest    Field Type Label Description     stream Stream      \nStreamRegistryServiceCreateResponse    Field Type Label Description     mod_revision int64      \nStreamRegistryServiceDeleteRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nStreamRegistryServiceDeleteResponse    Field Type Label Description     deleted bool      \nStreamRegistryServiceExistRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nStreamRegistryServiceExistResponse    Field Type Label Description     has_group bool     has_stream bool      \nStreamRegistryServiceGetRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nStreamRegistryServiceGetResponse    Field Type Label Description     stream Stream      \nStreamRegistryServiceListRequest    Field Type Label Description     group string      \nStreamRegistryServiceListResponse    Field Type Label Description     stream Stream repeated     \nStreamRegistryServiceUpdateRequest    Field Type Label Description     stream Stream      \nStreamRegistryServiceUpdateResponse    Field Type Label Description     mod_revision int64      \nTopNAggregationRegistryServiceCreateRequest    Field Type Label Description     top_n_aggregation TopNAggregation      \nTopNAggregationRegistryServiceCreateResponse \nTopNAggregationRegistryServiceDeleteRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nTopNAggregationRegistryServiceDeleteResponse    Field Type Label Description     deleted bool      \nTopNAggregationRegistryServiceExistRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nTopNAggregationRegistryServiceExistResponse    Field Type Label Description     has_group bool     has_top_n_aggregation bool      \nTopNAggregationRegistryServiceGetRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nTopNAggregationRegistryServiceGetResponse    Field Type Label Description     top_n_aggregation TopNAggregation      \nTopNAggregationRegistryServiceListRequest    Field Type Label Description     group string      \nTopNAggregationRegistryServiceListResponse    Field Type Label Description     top_n_aggregation TopNAggregation repeated     \nTopNAggregationRegistryServiceUpdateRequest    Field Type Label Description     top_n_aggregation TopNAggregation      \nTopNAggregationRegistryServiceUpdateResponse \nGroupRegistryService    Method Name Request Type Response Type Description     Create GroupRegistryServiceCreateRequest GroupRegistryServiceCreateResponse    Update GroupRegistryServiceUpdateRequest GroupRegistryServiceUpdateResponse    Delete GroupRegistryServiceDeleteRequest GroupRegistryServiceDeleteResponse    Get GroupRegistryServiceGetRequest GroupRegistryServiceGetResponse    List GroupRegistryServiceListRequest GroupRegistryServiceListResponse    Exist GroupRegistryServiceExistRequest GroupRegistryServiceExistResponse Exist doesn't expose an HTTP endpoint. Please use HEAD method to touch Get instead    \nIndexRuleBindingRegistryService    Method Name Request Type Response Type Description     Create IndexRuleBindingRegistryServiceCreateRequest IndexRuleBindingRegistryServiceCreateResponse    Update IndexRuleBindingRegistryServiceUpdateRequest IndexRuleBindingRegistryServiceUpdateResponse    Delete IndexRuleBindingRegistryServiceDeleteRequest IndexRuleBindingRegistryServiceDeleteResponse    Get IndexRuleBindingRegistryServiceGetRequest IndexRuleBindingRegistryServiceGetResponse    List IndexRuleBindingRegistryServiceListRequest IndexRuleBindingRegistryServiceListResponse    Exist IndexRuleBindingRegistryServiceExistRequest IndexRuleBindingRegistryServiceExistResponse Exist doesn't expose an HTTP endpoint. Please use HEAD method to touch Get instead    \nIndexRuleRegistryService    Method Name Request Type Response Type Description     Create IndexRuleRegistryServiceCreateRequest IndexRuleRegistryServiceCreateResponse    Update IndexRuleRegistryServiceUpdateRequest IndexRuleRegistryServiceUpdateResponse    Delete IndexRuleRegistryServiceDeleteRequest IndexRuleRegistryServiceDeleteResponse    Get IndexRuleRegistryServiceGetRequest IndexRuleRegistryServiceGetResponse    List IndexRuleRegistryServiceListRequest IndexRuleRegistryServiceListResponse    Exist IndexRuleRegistryServiceExistRequest IndexRuleRegistryServiceExistResponse Exist doesn't expose an HTTP endpoint. Please use HEAD method to touch Get instead    \nMeasureRegistryService    Method Name Request Type Response Type Description     Create MeasureRegistryServiceCreateRequest MeasureRegistryServiceCreateResponse    Update MeasureRegistryServiceUpdateRequest MeasureRegistryServiceUpdateResponse    Delete MeasureRegistryServiceDeleteRequest MeasureRegistryServiceDeleteResponse    Get MeasureRegistryServiceGetRequest MeasureRegistryServiceGetResponse    List MeasureRegistryServiceListRequest MeasureRegistryServiceListResponse    Exist MeasureRegistryServiceExistRequest MeasureRegistryServiceExistResponse Exist doesn't expose an HTTP endpoint. Please use HEAD method to touch Get instead    \nStreamRegistryService    Method Name Request Type Response Type Description     Create StreamRegistryServiceCreateRequest StreamRegistryServiceCreateResponse    Update StreamRegistryServiceUpdateRequest StreamRegistryServiceUpdateResponse    Delete StreamRegistryServiceDeleteRequest StreamRegistryServiceDeleteResponse    Get StreamRegistryServiceGetRequest StreamRegistryServiceGetResponse    List StreamRegistryServiceListRequest StreamRegistryServiceListResponse    Exist StreamRegistryServiceExistRequest StreamRegistryServiceExistResponse Exist doesn't expose an HTTP endpoint. Please use HEAD method to touch Get instead    \nTopNAggregationRegistryService    Method Name Request Type Response Type Description     Create TopNAggregationRegistryServiceCreateRequest TopNAggregationRegistryServiceCreateResponse    Update TopNAggregationRegistryServiceUpdateRequest TopNAggregationRegistryServiceUpdateResponse    Delete TopNAggregationRegistryServiceDeleteRequest TopNAggregationRegistryServiceDeleteResponse    Get TopNAggregationRegistryServiceGetRequest TopNAggregationRegistryServiceGetResponse    List TopNAggregationRegistryServiceListRequest TopNAggregationRegistryServiceListResponse    Exist TopNAggregationRegistryServiceExistRequest TopNAggregationRegistryServiceExistResponse     \nTop\nbanyandb/measure/v1/query.proto \nDataPoint DataPoint is stored in Measures\n   Field Type Label Description     timestamp google.protobuf.Timestamp  timestamp is in the timeunit of milliseconds.   tag_families banyandb.model.v1.TagFamily repeated tag_families contains tags selected in the projection   fields DataPoint.Field repeated fields contains fields selected in the projection    \nDataPoint.Field    Field Type Label Description     name string     value banyandb.model.v1.FieldValue      \nQueryRequest QueryRequest is the request contract for query.\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  metadata is required   time_range banyandb.model.v1.TimeRange  time_range is a range query with begin/end time of entities in the timeunit of milliseconds.   criteria banyandb.model.v1.Criteria  tag_families are indexed.   tag_projection banyandb.model.v1.TagProjection  tag_projection can be used to select tags of the data points in the response   field_projection QueryRequest.FieldProjection  field_projection can be used to select fields of the data points in the response   group_by QueryRequest.GroupBy  group_by groups data points based on their field value for a specific tag and use field_name as the projection name   agg QueryRequest.Aggregation  agg aggregates data points based on a field   top QueryRequest.Top  top limits the result based on a particular field. If order_by is specified, top sorts the dataset based on order_by's output   offset uint32  offset is used to support pagination, together with the following limit. If top is specified, offset processes the dataset based on top's output   limit uint32  limit is used to impose a boundary on the number of records being returned. If top is specified, limit processes the dataset based on top's output   order_by banyandb.model.v1.QueryOrder  order_by is given to specify the sort for a tag.    \nQueryRequest.Aggregation    Field Type Label Description     function banyandb.model.v1.AggregationFunction     field_name string  field_name must be one of files indicated by the field_projection    \nQueryRequest.FieldProjection    Field Type Label Description     names string repeated     \nQueryRequest.GroupBy    Field Type Label Description     tag_projection banyandb.model.v1.TagProjection  tag_projection must be a subset of the tag_projection of QueryRequest   field_name string  field_name must be one of fields indicated by field_projection    \nQueryRequest.Top    Field Type Label Description     number int32  number set the how many items should be returned   field_name string  field_name must be one of files indicated by the field_projection   field_value_sort banyandb.model.v1.Sort  field_value_sort indicates how to sort fields ASC: bottomN DESC: topN UNSPECIFIED: topN    \nQueryResponse QueryResponse is the response for a query to the Query module.\n   Field Type Label Description     data_points DataPoint repeated data_points are the actual data returned    \nTop\nbanyandb/measure/v1/topn.proto \nTopNList TopNList contains a series of topN items\n   Field Type Label Description     timestamp google.protobuf.Timestamp  timestamp is in the timeunit of milliseconds.   items TopNList.Item repeated items contains top-n items in a list    \nTopNList.Item    Field Type Label Description     entity banyandb.model.v1.Tag repeated    value banyandb.model.v1.FieldValue      \nTopNRequest TopNRequest is the request contract for query.\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  metadata is required   time_range banyandb.model.v1.TimeRange  time_range is a range query with begin/end time of entities in the timeunit of milliseconds.   top_n int32  top_n set the how many items should be returned in each list.   agg banyandb.model.v1.AggregationFunction  agg aggregates lists grouped by field names in the time_range TODO validate enum defined_only   conditions banyandb.model.v1.Condition repeated criteria select counters. Only equals are acceptable.   field_value_sort banyandb.model.v1.Sort  field_value_sort indicates how to sort fields    \nTopNResponse TopNResponse is the response for a query to the Query module.\n   Field Type Label Description     lists TopNList repeated lists contain a series topN lists ranked by timestamp if agg_func in query request is specified, lists' size should be one.    \nTop\nbanyandb/model/v1/write.proto \nStatus Status is the response status for write\n   Name Number Description     STATUS_UNSPECIFIED 0    STATUS_SUCCEED 1    STATUS_INVALID_TIMESTAMP 2    STATUS_NOT_FOUND 3    STATUS_EXPIRED_SCHEMA 4    STATUS_INTERNAL_ERROR 5     \nTop\nbanyandb/measure/v1/write.proto \nDataPointValue DataPointValue is the data point for writing. It only contains values.\n   Field Type Label Description     timestamp google.protobuf.Timestamp  timestamp is in the timeunit of milliseconds.   tag_families banyandb.model.v1.TagFamilyForWrite repeated the order of tag_families' items match the measure schema   fields banyandb.model.v1.FieldValue repeated the order of fields match the measure schema    \nInternalWriteRequest    Field Type Label Description     shard_id uint32     series_hash bytes     entity_values banyandb.model.v1.TagValue repeated    request WriteRequest      \nWriteRequest WriteRequest is the request contract for write\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  the metadata is required.   data_point DataPointValue  the data_point is required.   message_id uint64  the message_id is required.    \nWriteResponse WriteResponse is the response contract for write\n   Field Type Label Description     message_id uint64  the message_id from request.   status banyandb.model.v1.Status  status indicates the request processing result   metadata banyandb.common.v1.Metadata  the metadata from request when request fails    \nTop\nbanyandb/measure/v1/rpc.proto \nMeasureService    Method Name Request Type Response Type Description     Query QueryRequest QueryResponse    Write WriteRequest stream WriteResponse stream    TopN TopNRequest TopNResponse     \nTop\nbanyandb/property/v1/property.proto \nMetadata Metadata is for multi-tenant use\n   Field Type Label Description     container banyandb.common.v1.Metadata  container is created when it receives the first property   id string  id identifies a property    \nProperty Property stores the user defined data\n   Field Type Label Description     metadata Metadata  metadata is the identity of a property   tags banyandb.model.v1.Tag repeated tag stores the content of a property   updated_at google.protobuf.Timestamp  updated_at indicates when the property is updated   lease_id int64  readonly. lease_id is the ID of the lease that attached to key.   ttl string  ttl indicates the time to live of the property. It's a string in the format of \u0026quot;1h\u0026quot;, \u0026quot;2m\u0026quot;, \u0026quot;3s\u0026quot;, \u0026quot;1500ms\u0026quot;. It defaults to 0s, which means the property never expires. The minimum allowed ttl is 1s.    \nTop\nbanyandb/property/v1/rpc.proto \nApplyRequest    Field Type Label Description     property Property     strategy ApplyRequest.Strategy  strategy indicates how to update a property. It defaults to STRATEGY_MERGE    \nApplyResponse    Field Type Label Description     created bool  created indicates whether the property existed. True: the property is absent. False: the property existed.   tags_num uint32     lease_id int64      \nDeleteRequest    Field Type Label Description     metadata Metadata     tags string repeated     \nDeleteResponse    Field Type Label Description     deleted bool     tags_num uint32      \nGetRequest    Field Type Label Description     metadata Metadata     tags string repeated     \nGetResponse    Field Type Label Description     property Property      \nKeepAliveRequest    Field Type Label Description     lease_id int64      \nKeepAliveResponse \nListRequest    Field Type Label Description     container banyandb.common.v1.Metadata     ids string repeated    tags string repeated     \nListResponse    Field Type Label Description     property Property repeated     \nApplyRequest.Strategy    Name Number Description     STRATEGY_UNSPECIFIED 0    STRATEGY_MERGE 1    STRATEGY_REPLACE 2     \nPropertyService    Method Name Request Type Response Type Description     Apply ApplyRequest ApplyResponse Apply creates a property if it's absent, or update a existed one based on a strategy.   Delete DeleteRequest DeleteResponse    Get GetRequest GetResponse    List ListRequest ListResponse    KeepAlive KeepAliveRequest KeepAliveResponse     \nTop\nbanyandb/stream/v1/query.proto \nElement Element represents (stream context) a Span defined in Google Dapper paper or equivalently a Segment in Skywalking. (Log context) a log\n   Field Type Label Description     element_id string  element_id could be span_id of a Span or segment_id of a Segment in the context of stream   timestamp google.protobuf.Timestamp  timestamp represents a millisecond 1) either the start time of a Span/Segment, 2) or the timestamp of a log   tag_families banyandb.model.v1.TagFamily repeated fields contains all indexed Field. Some typical names, - stream_id - duration - service_name - service_instance_id - end_time_milliseconds    \nQueryRequest QueryRequest is the request contract for query.\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  metadata is required   time_range banyandb.model.v1.TimeRange  time_range is a range query with begin/end time of entities in the timeunit of milliseconds. In the context of stream, it represents the range of the startTime for spans/segments, while in the context of Log, it means the range of the timestamp(s) for logs. it is always recommended to specify time range for performance reason   offset uint32  offset is used to support pagination, together with the following limit   limit uint32  limit is used to impose a boundary on the number of records being returned   order_by banyandb.model.v1.QueryOrder  order_by is given to specify the sort for a field. So far, only fields in the type of Integer are supported   criteria banyandb.model.v1.Criteria  tag_families are indexed.   projection banyandb.model.v1.TagProjection  projection can be used to select the key names of the element in the response    \nQueryResponse QueryResponse is the response for a query to the Query module.\n   Field Type Label Description     elements Element repeated elements are the actual data returned    \nTop\nbanyandb/stream/v1/write.proto \nElementValue    Field Type Label Description     element_id string  element_id could be span_id of a Span or segment_id of a Segment in the context of stream   timestamp google.protobuf.Timestamp  timestamp is in the timeunit of milliseconds. It represents 1) either the start time of a Span/Segment, 2) or the timestamp of a log   tag_families banyandb.model.v1.TagFamilyForWrite repeated the order of tag_families' items match the stream schema    \nInternalWriteRequest    Field Type Label Description     shard_id uint32     series_hash bytes     entity_values banyandb.model.v1.TagValue repeated    request WriteRequest      \nWriteRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata  the metadata is required.   element ElementValue  the element is required.   message_id uint64  the message_id is required.    \nWriteResponse    Field Type Label Description     message_id uint64  the message_id from request.   status banyandb.model.v1.Status  status indicates the request processing result   metadata banyandb.common.v1.Metadata  the metadata from request when request fails    \nTop\nbanyandb/stream/v1/rpc.proto \nStreamService    Method Name Request Type Response Type Description     Query QueryRequest QueryResponse    Write WriteRequest stream WriteResponse stream     Scalar Value Types    .proto Type Notes C++ Java Python Go C# PHP Ruby     double  double double float float64 double float Float   float  float float float float32 float float Float   int32 Uses variable-length encoding. Inefficient for encoding negative numbers – if your field is likely to have negative values, use sint32 instead. int32 int int int32 int integer Bignum or Fixnum (as required)   int64 Uses variable-length encoding. Inefficient for encoding negative numbers – if your field is likely to have negative values, use sint64 instead. int64 long int/long int64 long integer/string Bignum   uint32 Uses variable-length encoding. uint32 int int/long uint32 uint integer Bignum or Fixnum (as required)   uint64 Uses variable-length encoding. uint64 long int/long uint64 ulong integer/string Bignum or Fixnum (as required)   sint32 Uses variable-length encoding. Signed int value. These more efficiently encode negative numbers than regular int32s. int32 int int int32 int integer Bignum or Fixnum (as required)   sint64 Uses variable-length encoding. Signed int value. These more efficiently encode negative numbers than regular int64s. int64 long int/long int64 long integer/string Bignum   fixed32 Always four bytes. More efficient than uint32 if values are often greater than 2^28. uint32 int int uint32 uint integer Bignum or Fixnum (as required)   fixed64 Always eight bytes. More efficient than uint64 if values are often greater than 2^56. uint64 long int/long uint64 ulong integer/string Bignum   sfixed32 Always four bytes. int32 int int int32 int integer Bignum or Fixnum (as required)   sfixed64 Always eight bytes. int64 long int/long int64 long integer/string Bignum   bool  bool boolean boolean bool bool boolean TrueClass/FalseClass   string A string must always contain UTF-8 encoded or 7-bit ASCII text. string String str/unicode string string string String (UTF-8)   bytes May contain any arbitrary sequence of bytes. string ByteString str []byte ByteString string String (ASCII-8BIT)    ","title":"Protocol Documentation","url":"/docs/skywalking-banyandb/v0.5.0/api-reference/"},{"content":"Pulsar monitoring SkyWalking leverages OpenTelemetry Collector to collect metrics data in Prometheus format from the Pulsar and transfer the metrics to OpenTelemetry receiver and into the Meter System. Kafka entity as a Service in OAP and on the `Layer: PULSAR.\nData flow  Pulsar exposes metrics through Prometheus endpoint. OpenTelemetry Collector fetches metrics from Pulsar cluster via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.`  Setup  Set up Pulsar Cluster. (Pulsar cluster includes pulsar broker cluster and Bookkeeper bookie cluster.) Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  Pulsar Monitoring Pulsar monitoring provides multidimensional metrics monitoring of Pulsar cluster as Layer: PULSAR Service in the OAP. In each cluster, the nodes are represented as Instance.\nPulsar Cluster Supported Metrics    Monitoring Panel Metric Name Description Data Source     Total Topics meter_pulsar_total_topics The number of Pulsar topics in this cluster. Pulsar Cluster   Total Subscriptions meter_pulsar_total_subscriptions The number of Pulsar subscriptions in this cluster. Pulsar Cluster   Total Producers meter_pulsar_total_producers The number of active producers connected to this cluster. Pulsar Cluster   Total Consumers meter_pulsar_total_consumers The number of active consumers connected to this cluster. Pulsar Cluster   Message Rate In meter_pulsar_message_rate_in The total message rate coming into this cluster (message per second). Pulsar Cluster   Message Rate Out meter_pulsar_message_rate_out The total message rate going out from this cluster (message per second). Pulsar Cluster   Throughput In meter_pulsar_throughput_in The total throughput coming into this cluster (byte per second). Pulsar Cluster   Throughput Out meter_pulsar_throughput_out The total throughput going out from this cluster (byte per second). Pulsar Cluster   Storage Size meter_pulsar_storage_size The total storage size of all topics in this broker (in bytes). Pulsar Cluster   Storage Logical Size meter_pulsar_storage_logical_size The storage size of all topics in this broker without replicas (in bytes). Pulsar Cluster   Storage Write Rate meter_pulsar_storage_write_rate The total message batches (entries) written to the storage for this broker (message batch per second). Pulsar Cluster   Storage Read Rate meter_pulsar_storage_read_rate The total message batches (entries) read from the storage for this broker (message batch per second). Pulsar Cluster    Pulsar Node Supported Metrics    Monitoring Panel Metric Name Description Data Source     Active Connections meter_pulsar_broker_active_connections The number of active connections. Pulsar Broker   Total Connections meter_pulsar_broker_total_connections The total number of connections. Pulsar Broker   Connection Create Success Count meter_pulsar_broker_connection_create_success_count The number of successfully created connections. Pulsar Broker   Connection Create Fail Count meter_pulsar_broker_connection_create_fail_count The number of failed connections. Pulsar Broker   Connection Closed Total Count meter_pulsar_broker_connection_closed_total_count The total number of closed connections. Pulsar Broker   JVM Buffer Pool Used meter_pulsar_broker_jvm_buffer_pool_used_bytes The usage of jvm buffer pool. Pulsar Broker   JVM Memory Pool Used meter_pulsar_broker_jvm_memory_pool_used The usage of jvm memory pool. Pulsar Broker   JVM Memory meter_pulsar_broker_jvm_memory_init meter_pulsar_broker_jvm_memory_used meter_pulsar_broker_jvm_memory_committed The usage of jvm memory. Pulsar Broker   JVM Threads meter_pulsar_broker_jvm_threads_current meter_pulsar_broker_jvm_threads_daemon meter_pulsar_broker_jvm_threads_peak meter_pulsar_broker_jvm_threads_deadlocked The usage of jvm threads. Pulsar Broker   GC Time meter_pulsar_broker_jvm_gc_collection_seconds_sum Time spent in a given JVM garbage collector in seconds. Pulsar Broker   GC Count meter_pulsar_broker_jvm_gc_collection_seconds_count The count of a given JVM garbage collector. Pulsar Broker    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in otel-rules/pulsar/pulsar-cluster.yaml, otel-rules/pulsar/pulsar-broker.yaml. The RabbitMQ dashboard panel configurations are found in ui-initialized-templates/pulsar.\n","title":"Pulsar monitoring","url":"/docs/main/latest/en/setup/backend/backend-pulsar-monitoring/"},{"content":"Pulsar monitoring SkyWalking leverages OpenTelemetry Collector to collect metrics data in Prometheus format from the Pulsar and transfer the metrics to OpenTelemetry receiver and into the Meter System. Kafka entity as a Service in OAP and on the `Layer: PULSAR.\nData flow  Pulsar exposes metrics through Prometheus endpoint. OpenTelemetry Collector fetches metrics from Pulsar cluster via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.`  Setup  Set up Pulsar Cluster. (Pulsar cluster includes pulsar broker cluster and Bookkeeper bookie cluster.) Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  Pulsar Monitoring Pulsar monitoring provides multidimensional metrics monitoring of Pulsar cluster as Layer: PULSAR Service in the OAP. In each cluster, the nodes are represented as Instance.\nPulsar Cluster Supported Metrics    Monitoring Panel Metric Name Description Data Source     Total Topics meter_pulsar_total_topics The number of Pulsar topics in this cluster. Pulsar Cluster   Total Subscriptions meter_pulsar_total_subscriptions The number of Pulsar subscriptions in this cluster. Pulsar Cluster   Total Producers meter_pulsar_total_producers The number of active producers connected to this cluster. Pulsar Cluster   Total Consumers meter_pulsar_total_consumers The number of active consumers connected to this cluster. Pulsar Cluster   Message Rate In meter_pulsar_message_rate_in The total message rate coming into this cluster (message per second). Pulsar Cluster   Message Rate Out meter_pulsar_message_rate_out The total message rate going out from this cluster (message per second). Pulsar Cluster   Throughput In meter_pulsar_throughput_in The total throughput coming into this cluster (byte per second). Pulsar Cluster   Throughput Out meter_pulsar_throughput_out The total throughput going out from this cluster (byte per second). Pulsar Cluster   Storage Size meter_pulsar_storage_size The total storage size of all topics in this broker (in bytes). Pulsar Cluster   Storage Logical Size meter_pulsar_storage_logical_size The storage size of all topics in this broker without replicas (in bytes). Pulsar Cluster   Storage Write Rate meter_pulsar_storage_write_rate The total message batches (entries) written to the storage for this broker (message batch per second). Pulsar Cluster   Storage Read Rate meter_pulsar_storage_read_rate The total message batches (entries) read from the storage for this broker (message batch per second). Pulsar Cluster    Pulsar Node Supported Metrics    Monitoring Panel Metric Name Description Data Source     Active Connections meter_pulsar_broker_active_connections The number of active connections. Pulsar Broker   Total Connections meter_pulsar_broker_total_connections The total number of connections. Pulsar Broker   Connection Create Success Count meter_pulsar_broker_connection_create_success_count The number of successfully created connections. Pulsar Broker   Connection Create Fail Count meter_pulsar_broker_connection_create_fail_count The number of failed connections. Pulsar Broker   Connection Closed Total Count meter_pulsar_broker_connection_closed_total_count The total number of closed connections. Pulsar Broker   JVM Buffer Pool Used meter_pulsar_broker_jvm_buffer_pool_used_bytes The usage of jvm buffer pool. Pulsar Broker   JVM Memory Pool Used meter_pulsar_broker_jvm_memory_pool_used The usage of jvm memory pool. Pulsar Broker   JVM Memory meter_pulsar_broker_jvm_memory_init meter_pulsar_broker_jvm_memory_used meter_pulsar_broker_jvm_memory_committed The usage of jvm memory. Pulsar Broker   JVM Threads meter_pulsar_broker_jvm_threads_current meter_pulsar_broker_jvm_threads_daemon meter_pulsar_broker_jvm_threads_peak meter_pulsar_broker_jvm_threads_deadlocked The usage of jvm threads. Pulsar Broker   GC Time meter_pulsar_broker_jvm_gc_collection_seconds_sum Time spent in a given JVM garbage collector in seconds. Pulsar Broker   GC Count meter_pulsar_broker_jvm_gc_collection_seconds_count The count of a given JVM garbage collector. Pulsar Broker    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in otel-rules/pulsar/pulsar-cluster.yaml, otel-rules/pulsar/pulsar-broker.yaml. The Pulsar dashboard panel configurations are found in ui-initialized-templates/pulsar.\n","title":"Pulsar monitoring","url":"/docs/main/next/en/setup/backend/backend-pulsar-monitoring/"},{"content":"Pulsar monitoring SkyWalking leverages OpenTelemetry Collector to collect metrics data in Prometheus format from the Pulsar and transfer the metrics to OpenTelemetry receiver and into the Meter System. Kafka entity as a Service in OAP and on the `Layer: PULSAR.\nData flow  Pulsar exposes metrics through Prometheus endpoint. OpenTelemetry Collector fetches metrics from Pulsar cluster via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.`  Setup  Set up Pulsar Cluster. (Pulsar cluster includes pulsar broker cluster and Bookkeeper bookie cluster.) Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  Pulsar Monitoring Pulsar monitoring provides multidimensional metrics monitoring of Pulsar cluster as Layer: PULSAR Service in the OAP. In each cluster, the nodes are represented as Instance.\nPulsar Cluster Supported Metrics    Monitoring Panel Metric Name Description Data Source     Total Topics meter_pulsar_total_topics The number of Pulsar topics in this cluster. Pulsar Cluster   Total Subscriptions meter_pulsar_total_subscriptions The number of Pulsar subscriptions in this cluster. Pulsar Cluster   Total Producers meter_pulsar_total_producers The number of active producers connected to this cluster. Pulsar Cluster   Total Consumers meter_pulsar_total_consumers The number of active consumers connected to this cluster. Pulsar Cluster   Message Rate In meter_pulsar_message_rate_in The total message rate coming into this cluster (message per second). Pulsar Cluster   Message Rate Out meter_pulsar_message_rate_out The total message rate going out from this cluster (message per second). Pulsar Cluster   Throughput In meter_pulsar_throughput_in The total throughput coming into this cluster (byte per second). Pulsar Cluster   Throughput Out meter_pulsar_throughput_out The total throughput going out from this cluster (byte per second). Pulsar Cluster   Storage Size meter_pulsar_storage_size The total storage size of all topics in this broker (in bytes). Pulsar Cluster   Storage Logical Size meter_pulsar_storage_logical_size The storage size of all topics in this broker without replicas (in bytes). Pulsar Cluster   Storage Write Rate meter_pulsar_storage_write_rate The total message batches (entries) written to the storage for this broker (message batch per second). Pulsar Cluster   Storage Read Rate meter_pulsar_storage_read_rate The total message batches (entries) read from the storage for this broker (message batch per second). Pulsar Cluster    Pulsar Node Supported Metrics    Monitoring Panel Metric Name Description Data Source     Active Connections meter_pulsar_broker_active_connections The number of active connections. Pulsar Broker   Total Connections meter_pulsar_broker_total_connections The total number of connections. Pulsar Broker   Connection Create Success Count meter_pulsar_broker_connection_create_success_count The number of successfully created connections. Pulsar Broker   Connection Create Fail Count meter_pulsar_broker_connection_create_fail_count The number of failed connections. Pulsar Broker   Connection Closed Total Count meter_pulsar_broker_connection_closed_total_count The total number of closed connections. Pulsar Broker   JVM Buffer Pool Used meter_pulsar_broker_jvm_buffer_pool_used_bytes The usage of jvm buffer pool. Pulsar Broker   JVM Memory Pool Used meter_pulsar_broker_jvm_memory_pool_used The usage of jvm memory pool. Pulsar Broker   JVM Memory meter_pulsar_broker_jvm_memory_init meter_pulsar_broker_jvm_memory_used meter_pulsar_broker_jvm_memory_committed The usage of jvm memory. Pulsar Broker   JVM Threads meter_pulsar_broker_jvm_threads_current meter_pulsar_broker_jvm_threads_daemon meter_pulsar_broker_jvm_threads_peak meter_pulsar_broker_jvm_threads_deadlocked The usage of jvm threads. Pulsar Broker   GC Time meter_pulsar_broker_jvm_gc_collection_seconds_sum Time spent in a given JVM garbage collector in seconds. Pulsar Broker   GC Count meter_pulsar_broker_jvm_gc_collection_seconds_count The count of a given JVM garbage collector. Pulsar Broker    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in otel-rules/pulsar/pulsar-cluster.yaml, otel-rules/pulsar/pulsar-broker.yaml. The RabbitMQ dashboard panel configurations are found in ui-initialized-templates/pulsar.\n","title":"Pulsar monitoring","url":"/docs/main/v9.7.0/en/setup/backend/backend-pulsar-monitoring/"},{"content":"Python Agent Asynchronous Enhancement Since 1.1.0, the Python agent supports asynchronous reporting of ALL telemetry data, including traces, metrics, logs and profile. This feature is disabled by default, since it is still in the experimental stage. You can enable it by setting the SW_AGENT_ASYNCIO_ENHANCEMENT environment variable to true. See the configuration document for more information.\nexport SW_AGENT_ASYNCIO_ENHANCEMENT=true Why we need this feature Before version 1.1.0, SkyWalking Python agent had only an implementation with the Threading module to provide data reporters. Yet with the growth of the Python agent, it is now fully capable and requires more resources than when only tracing was supported (we start many threads and gRPC itself creates even more threads when streaming).\nAs well known, the Global Interpreter Lock (GIL) in Python can limit the true parallel execution of threads. This issue also effects the Python agent, especially on network communication with the SkyWalking OAP (gRPC, HTTP and Kafka).\nTherefore, we have decided to implement the reporter code for the SkyWalking Python agent based on the asyncio library. asyncio is an officially supported asynchronous programming library in Python that operates on a single-threaded, coroutine-driven model. Currently, it enjoys widespread adoption and boasts a rich ecosystem, making it the preferred choice for enhancing asynchronous capabilities in many Python projects.\nHow it works To keep the API unchanged, we have completely rewritten a new class called SkyWalkingAgentAsync (identical to the SkyWalkingAgent class). We use the environment variable mentioned above, SW_AGENT_ASYNCIO_ENHANCEMENT, to control which class implements the agent\u0026rsquo;s interface.\nIn the SkyWalkingAgentAsync class, we have employed asyncio coroutines and their related functions to replace the Python threading implementation in nearly all instances. And we have applied asyncio enhancements to all three primary reporting protocols of the current SkyWalking Python agent:\n  gRPC: We use the grpc.aio module to replace the grpc module. Since the grpc.aio module is also officially supported and included in the grpc package, we can use it directly without any additional installation.\n  HTTP: We use the aiohttp module to replace the requests module.\n  Kafka: We use the aiokafka module to replace the kafka-python module.\n  Performance improvement We use wrk to pressure test the network throughput of the Python agents in a FastAPI application.\n gRPC  The performance has been improved by about 32.8%\n   gRPC QPS TPS Avg Latency     sync (original) 899.26 146.66KB 545.97ms   async (new) 1194.55 194.81KB 410.97ms     HTTP  The performance has been improved by about 9.8%\n   HTTP QPS TPS Avg Latency     sync (original) 530.95 86.59KB 1.53s   async (new) 583.37 95.14KB 1.44s     Kafka  The performance has been improved by about 89.6%\n   Kafka QPS TPS Avg Latency     sync (original) 345.89 56.41KB 1.09s   async (new) 655.67 106.93KB 1.24s     In fact, only the performance improvement of gRPC is of more reference value. Because the other two protocols use third-party libraries with completely different implementations, the performance improvement depends to a certain extent on the performance of these third-party libraries.\n More details see this PR .\nPotential problems We have shown that the asynchronous enhancement function improves the transmission efficiency of metrics, traces and logs. But it improves the proformance of profile data very little, and even causes performance degradation.\nThis is mainly because a large part of the data in the profile part comes from the monitoring and measurement of Python threads, which is exactly what we need to avoid in asynchronous enhancement. Since operations on threads cannot be bypassed, we may need additional overhead to support cross-thread coroutine communication, which may lead to performance degradation instead of increase.\nAsynchronous enhancements involve many code changes and introduced some new dependencies. Since this feature is relatively new, it may cause some unexpected errors and problems. If you encounter them, please feel free to contact us or submit issues and PRs!\n","title":"Python Agent Asynchronous Enhancement","url":"/docs/skywalking-python/next/en/setup/advanced/asyncenhancement/"},{"content":"Python Agent Log Reporter This functionality reports logs collected from the Python logging module (in theory, also logging libraries depending on the core logging module) and loguru module.\nFrom Python agent 1.0.0, the log reporter is automatically enabled and can be disabled through agent_log_reporter_active=False or SW_AGENT_LOG_REPORTER_ACTIVE=False.\nLog reporter supports all three protocols including grpc, http and kafka, which shares the same config agent_protocol with trace reporter.\nIf chosen http protocol, the logs will be batch-reported to the collector REST endpoint oap/v3/logs.\nIf chosen kafka protocol, please make sure to config kafka-fetcher on the OAP side, and make sure Python agent config kafka_bootstrap_servers points to your Kafka brokers.\nPlease make sure OAP is consuming the same Kafka topic as your agent produces to, kafka_namespace must match OAP side configuration plugin.kafka.namespace\nagent_log_reporter_active=True - Enables the log reporter.\nagent_log_reporter_max_buffer_size - The maximum queue backlog size for sending log data to backend, logs beyond this are silently dropped.\nAlternatively, you can pass configurations through environment variables. Please refer to the Configuration Vocabulary for the list of environment variables associated with the log reporter.\nSpecify a logging level  [Important] Agent will only report logs that passes the default level threshold logging.getLogger().setLevel(logging.WARNING) For example, if your logger level is logging.INFO, agent will not report info logs even if you set agent_log_reporter_level to INFO\n Additional to the code level configuration, only the logs with a level equal to or higher than the specified configuration will be collected and reported.\nIn other words, the agent skips reporting some unwanted logs based on your level threshold even though they are still logged.\nlog_reporter_level - The string name of a logger level.\nNote that it also works with your custom logger levels, simply specify its string name in the config.\nIgnore log filters The following config is disabled by default. When enabled, the log reporter will collect logs disregarding your custom log filters.\nFor example, if you attach the filter below to the logger - the default behavior of log reporting aligns with the filter (not reporting any logs with a message starting with SW test)\nclass AppFilter(logging.Filter): def filter(self, record): return not record.getMessage().startswith(\u0026#39;SW test\u0026#39;) logger.addFilter(AppFilter()) However, if you do would like to report those filtered logs, set the log_reporter_ignore_filter to True.\nFormatting Note that regardless of the formatting, Python agent will always report the following three tags -\nlevel - the logger level name\nlogger - the logger name\nthread - the thread name\nLimit stacktrace depth You can set the cause_exception_depth config entry to a desired level(defaults to 10), which limits the output depth of exception stacktrace in reporting.\nThis config limits agent to report up to limit stacktrace, please refer to Python traceback for more explanations.\nCustomize the reported log format You can choose to report collected logs in a custom layout.\nIf not set, the agent uses the layout below by default, else the agent uses your custom layout set in log_reporter_layout.\n'%(asctime)s [%(threadName)s] %(levelname)s %(name)s - %(message)s'\nIf the layout is set to None, the reported log content will only contain the pre-formatted LogRecord.message(msg % args) without any additional styles or extra fields, stacktrace will be attached if an exception was raised.\nTransmit un-formatted logs You can also choose to report the log messages without any formatting. It separates the raw log msg logRecord.msg and logRecord.args, then puts them into message content and tags starting from argument.0, respectively, along with an exception tag if an exception was raised.\nNote when you set log_reporter_formatted to False, it ignores your custom layout introduced above.\nAs an example, the following code:\nlogger.info(\u0026#34;SW test log %s%s%s\u0026#34;, \u0026#39;arg0\u0026#39;, \u0026#39;arg1\u0026#39;, \u0026#39;arg2\u0026#39;) Will result in:\n{ \u0026#34;content\u0026#34;: \u0026#34;SW test log %s %s %s\u0026#34;, \u0026#34;tags\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;argument.0\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;arg0\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.1\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;arg1\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.2\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;arg2\u0026#34; } ] } ","title":"Python Agent Log Reporter","url":"/docs/skywalking-python/latest/en/setup/advanced/logreporter/"},{"content":"Python Agent Log Reporter This functionality reports logs collected from the Python logging module (in theory, also logging libraries depending on the core logging module) and loguru module.\nFrom Python agent 1.0.0, the log reporter is automatically enabled and can be disabled through agent_log_reporter_active=False or SW_AGENT_LOG_REPORTER_ACTIVE=False.\nLog reporter supports all three protocols including grpc, http and kafka, which shares the same config agent_protocol with trace reporter.\nIf chosen http protocol, the logs will be batch-reported to the collector REST endpoint oap/v3/logs.\nIf chosen kafka protocol, please make sure to config kafka-fetcher on the OAP side, and make sure Python agent config kafka_bootstrap_servers points to your Kafka brokers.\nPlease make sure OAP is consuming the same Kafka topic as your agent produces to, kafka_namespace must match OAP side configuration plugin.kafka.namespace\nagent_log_reporter_active=True - Enables the log reporter.\nagent_log_reporter_max_buffer_size - The maximum queue backlog size for sending log data to backend, logs beyond this are silently dropped.\nAlternatively, you can pass configurations through environment variables. Please refer to the Configuration Vocabulary for the list of environment variables associated with the log reporter.\nSpecify a logging level  [Important] Agent will only report logs that passes the default level threshold logging.getLogger().setLevel(logging.WARNING) For example, if your logger level is logging.INFO, agent will not report info logs even if you set agent_log_reporter_level to INFO\n Additional to the code level configuration, only the logs with a level equal to or higher than the specified configuration will be collected and reported.\nIn other words, the agent skips reporting some unwanted logs based on your level threshold even though they are still logged.\nlog_reporter_level - The string name of a logger level.\nNote that it also works with your custom logger levels, simply specify its string name in the config.\nIgnore log filters The following config is disabled by default. When enabled, the log reporter will collect logs disregarding your custom log filters.\nFor example, if you attach the filter below to the logger - the default behavior of log reporting aligns with the filter (not reporting any logs with a message starting with SW test)\nclass AppFilter(logging.Filter): def filter(self, record): return not record.getMessage().startswith(\u0026#39;SW test\u0026#39;) logger.addFilter(AppFilter()) However, if you do would like to report those filtered logs, set the log_reporter_ignore_filter to True.\nFormatting Note that regardless of the formatting, Python agent will always report the following three tags -\nlevel - the logger level name\nlogger - the logger name\nthread - the thread name\nLimit stacktrace depth You can set the cause_exception_depth config entry to a desired level(defaults to 10), which limits the output depth of exception stacktrace in reporting.\nThis config limits agent to report up to limit stacktrace, please refer to Python traceback for more explanations.\nCustomize the reported log format You can choose to report collected logs in a custom layout.\nIf not set, the agent uses the layout below by default, else the agent uses your custom layout set in log_reporter_layout.\n'%(asctime)s [%(threadName)s] %(levelname)s %(name)s - %(message)s'\nIf the layout is set to None, the reported log content will only contain the pre-formatted LogRecord.message(msg % args) without any additional styles or extra fields, stacktrace will be attached if an exception was raised.\nTransmit un-formatted logs You can also choose to report the log messages without any formatting. It separates the raw log msg logRecord.msg and logRecord.args, then puts them into message content and tags starting from argument.0, respectively, along with an exception tag if an exception was raised.\nNote when you set log_reporter_formatted to False, it ignores your custom layout introduced above.\nAs an example, the following code:\nlogger.info(\u0026#34;SW test log %s%s%s\u0026#34;, \u0026#39;arg0\u0026#39;, \u0026#39;arg1\u0026#39;, \u0026#39;arg2\u0026#39;) Will result in:\n{ \u0026#34;content\u0026#34;: \u0026#34;SW test log %s %s %s\u0026#34;, \u0026#34;tags\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;argument.0\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;arg0\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.1\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;arg1\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.2\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;arg2\u0026#34; } ] } Print trace ID in your logs To print out the trace IDs in the logs, simply add %(tid)s to the agent_log_reporter_layout.\nYou can take advantage of this feature to print out the trace IDs on any channel you desire, not limited to reporting logs to OAP, this can be achieved by using any formatter you prefer in your own application logic.\n","title":"Python Agent Log Reporter","url":"/docs/skywalking-python/next/en/setup/advanced/logreporter/"},{"content":"Python Agent Log Reporter This functionality reports logs collected from the Python logging module (in theory, also logging libraries depending on the core logging module) and loguru module.\nFrom Python agent 1.0.0, the log reporter is automatically enabled and can be disabled through agent_log_reporter_active=False or SW_AGENT_LOG_REPORTER_ACTIVE=False.\nLog reporter supports all three protocols including grpc, http and kafka, which shares the same config agent_protocol with trace reporter.\nIf chosen http protocol, the logs will be batch-reported to the collector REST endpoint oap/v3/logs.\nIf chosen kafka protocol, please make sure to config kafka-fetcher on the OAP side, and make sure Python agent config kafka_bootstrap_servers points to your Kafka brokers.\nPlease make sure OAP is consuming the same Kafka topic as your agent produces to, kafka_namespace must match OAP side configuration plugin.kafka.namespace\nagent_log_reporter_active=True - Enables the log reporter.\nagent_log_reporter_max_buffer_size - The maximum queue backlog size for sending log data to backend, logs beyond this are silently dropped.\nAlternatively, you can pass configurations through environment variables. Please refer to the Configuration Vocabulary for the list of environment variables associated with the log reporter.\nSpecify a logging level  [Important] Agent will only report logs that passes the default level threshold logging.getLogger().setLevel(logging.WARNING) For example, if your logger level is logging.INFO, agent will not report info logs even if you set agent_log_reporter_level to INFO\n Additional to the code level configuration, only the logs with a level equal to or higher than the specified configuration will be collected and reported.\nIn other words, the agent skips reporting some unwanted logs based on your level threshold even though they are still logged.\nlog_reporter_level - The string name of a logger level.\nNote that it also works with your custom logger levels, simply specify its string name in the config.\nIgnore log filters The following config is disabled by default. When enabled, the log reporter will collect logs disregarding your custom log filters.\nFor example, if you attach the filter below to the logger - the default behavior of log reporting aligns with the filter (not reporting any logs with a message starting with SW test)\nclass AppFilter(logging.Filter): def filter(self, record): return not record.getMessage().startswith(\u0026#39;SW test\u0026#39;) logger.addFilter(AppFilter()) However, if you do would like to report those filtered logs, set the log_reporter_ignore_filter to True.\nFormatting Note that regardless of the formatting, Python agent will always report the following three tags -\nlevel - the logger level name\nlogger - the logger name\nthread - the thread name\nLimit stacktrace depth You can set the cause_exception_depth config entry to a desired level(defaults to 10), which limits the output depth of exception stacktrace in reporting.\nThis config limits agent to report up to limit stacktrace, please refer to Python traceback for more explanations.\nCustomize the reported log format You can choose to report collected logs in a custom layout.\nIf not set, the agent uses the layout below by default, else the agent uses your custom layout set in log_reporter_layout.\n'%(asctime)s [%(threadName)s] %(levelname)s %(name)s - %(message)s'\nIf the layout is set to None, the reported log content will only contain the pre-formatted LogRecord.message(msg % args) without any additional styles or extra fields, stacktrace will be attached if an exception was raised.\nTransmit un-formatted logs You can also choose to report the log messages without any formatting. It separates the raw log msg logRecord.msg and logRecord.args, then puts them into message content and tags starting from argument.0, respectively, along with an exception tag if an exception was raised.\nNote when you set log_reporter_formatted to False, it ignores your custom layout introduced above.\nAs an example, the following code:\nlogger.info(\u0026#34;SW test log %s%s%s\u0026#34;, \u0026#39;arg0\u0026#39;, \u0026#39;arg1\u0026#39;, \u0026#39;arg2\u0026#39;) Will result in:\n{ \u0026#34;content\u0026#34;: \u0026#34;SW test log %s %s %s\u0026#34;, \u0026#34;tags\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;argument.0\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;arg0\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.1\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;arg1\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.2\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;arg2\u0026#34; } ] } ","title":"Python Agent Log Reporter","url":"/docs/skywalking-python/v1.0.1/en/setup/advanced/logreporter/"},{"content":"Python Agent Meter Reporter Important Note: Meter reporter is currently available to send in gRPC and Kafka protocol, HTTP protocol is not implemented yet (requires additional handler on SkyWalking OAP side).\nEnabling the feature (default is enabled) PVM Reporter is also by default enabled, meaning useful Python metrics such as thread count/GC info will be shown in OAP General Services - Instance - PVM Tab) If you really don\u0026rsquo;t need such a feature, disable them through config.agent_pvm_meter_reporter_active or SW_AGENT_PVM_METER_REPORTER_ACTIVE\nconfig.agent_meter_reporter_active = True # Or os.environ[\u0026#39;SW_AGENT_METER_REPORTER_ACTIVE\u0026#39;] = \u0026#39;True\u0026#39; or\nexport SW_AGENT_METER_REPORTER_ACTIVE=True Disable the feature os.environ[\u0026#39;SW_AGENT_METER_REPORTER_ACTIVE\u0026#39;] = \u0026#39;False\u0026#39; or\nexport SW_AGENT_METER_REPORTER_ACTIVE=False Counter  Counter API represents a single monotonically increasing counter, automatic collect data and report to backend.  builder = Counter.Builder(\u0026#39;c2\u0026#39;, CounterMode.INCREMENT, ((\u0026#34;k1\u0026#34;, \u0026#34;v1\u0026#34;), (\u0026#34;k2\u0026#34;, \u0026#34;v2\u0026#34;))) # or this way # builder = Counter.Builder(\u0026#39;c2\u0026#39;, CounterMode.INCREMENT).tag(\u0026#39;key1\u0026#39;, \u0026#39;value1\u0026#39;).tag(\u0026#39;key2\u0026#39;, \u0026#39;value2\u0026#39;) c = builder.build() c.increment(2) Syntactic sugars builder = Counter.Builder(\u0026#39;c2\u0026#39;, CounterMode.INCREMENT) c = builder.build() # increase Counter c by the time the with-wrapped codes consumed with c.create_timer(): # some codes may consume a certain time builder = Counter.Builder(\u0026#39;c3\u0026#39;, CounterMode.INCREMENT) c = builder.build() # increase Counter c by num once counter_decorator_test gets called @Counter.increase(name=\u0026#39;c3\u0026#39;, num=2) def counter_decorator_test(): # some codes builder = Counter.Builder(\u0026#39;c4\u0026#39;, CounterMode.INCREMENT) c = builder.build() # increase Counter c by the time counter_decorator_test consumed @Counter.timer(name=\u0026#39;c4\u0026#39;) def counter_decorator_test(s): # some codes may consume a certain time  Counter.Builder(name, tags) Create a new counter builder with the meter name and optional tags. Counter.tag(key: str, value) Mark a tag key/value pair. Counter.mode(mode: CounterMode) Change the counter mode, RATE mode means reporting rate to the backend. Counter.increment(count) Increment count to the Counter, It could be a positive value.  Gauge  Gauge API represents a single numerical value.  # producer: iterable object builder = Gauge.Builder(\u0026#39;g1\u0026#39;, producer, ((\u0026#34;key\u0026#34;, \u0026#34;value\u0026#34;))) g = Builder.build()  Gauge.Builder(name, tags) Create a new gauge builder with the meter name and iterable object, this iterable object need to produce numeric value. Gauge.tag(key: str, value) Mark a tag key/value pair. Gauge.build() Build a new Gauge which is collected and reported to the backend.  Histogram  Histogram API represents a summary sample observations with customize buckets.  builder = Histogram.Builder(\u0026#39;h2\u0026#39;, [i / 10 for i in range(10)], (\u0026#34;key\u0026#34;, \u0026#34;value\u0026#34;)) h = builder.build() Syntactic sugars builder = Histogram.Builder(\u0026#39;h3\u0026#39;, [i / 10 for i in range(10)]) h = builder.build() # Histogram h will record the time the with-wprapped codes consumed with h.create_timer(): # some codes may consume a certain time builder = Histogram.Builder(\u0026#39;h2\u0026#39;, [i / 10 for i in range(10)]) h = builder.build() # Histogram h will record the time histogram_decorator_test consumed @Histogram.timer(name=\u0026#39;h2\u0026#39;) def histogram_decorator_test(s): time.sleep(s)  Histogram.Builder(name, tags) Create a new histogram builder with the meter name and optional tags. Histogram.tag(key: str, value) Mark a tag key/value pair. Histogram.minValue(value) Set up the minimal value of this histogram, default is 0. Histogram.build() Build a new Histogram which is collected and reported to the backend. Histogram.addValue(value) Add value into the histogram, automatically analyze what bucket count needs to be increment. rule: count into [step1, step2).  ","title":"Python Agent Meter Reporter","url":"/docs/skywalking-python/latest/en/setup/advanced/meterreporter/"},{"content":"Python Agent Meter Reporter Important Note: Meter reporter is currently available to send in gRPC and Kafka protocol, HTTP protocol is not implemented yet (requires additional handler on SkyWalking OAP side).\nEnabling the feature (default is enabled) PVM Reporter is also by default enabled, meaning useful Python metrics such as thread count/GC info will be shown in OAP General Services - Instance - PVM Tab) If you really don\u0026rsquo;t need such a feature, disable them through config.agent_pvm_meter_reporter_active or SW_AGENT_PVM_METER_REPORTER_ACTIVE\nconfig.agent_meter_reporter_active = True # Or os.environ[\u0026#39;SW_AGENT_METER_REPORTER_ACTIVE\u0026#39;] = \u0026#39;True\u0026#39; or\nexport SW_AGENT_METER_REPORTER_ACTIVE=True Disable the feature os.environ[\u0026#39;SW_AGENT_METER_REPORTER_ACTIVE\u0026#39;] = \u0026#39;False\u0026#39; or\nexport SW_AGENT_METER_REPORTER_ACTIVE=False Counter  Counter API represents a single monotonically increasing counter, automatic collect data and report to backend.  builder = Counter.Builder(\u0026#39;c2\u0026#39;, CounterMode.INCREMENT, ((\u0026#34;k1\u0026#34;, \u0026#34;v1\u0026#34;), (\u0026#34;k2\u0026#34;, \u0026#34;v2\u0026#34;))) # or this way # builder = Counter.Builder(\u0026#39;c2\u0026#39;, CounterMode.INCREMENT).tag(\u0026#39;key1\u0026#39;, \u0026#39;value1\u0026#39;).tag(\u0026#39;key2\u0026#39;, \u0026#39;value2\u0026#39;) c = builder.build() c.increment(2) Syntactic sugars builder = Counter.Builder(\u0026#39;c2\u0026#39;, CounterMode.INCREMENT) c = builder.build() # increase Counter c by the time the with-wrapped codes consumed with c.create_timer(): # some codes may consume a certain time builder = Counter.Builder(\u0026#39;c3\u0026#39;, CounterMode.INCREMENT) c = builder.build() # increase Counter c by num once counter_decorator_test gets called @Counter.increase(name=\u0026#39;c3\u0026#39;, num=2) def counter_decorator_test(): # some codes builder = Counter.Builder(\u0026#39;c4\u0026#39;, CounterMode.INCREMENT) c = builder.build() # increase Counter c by the time counter_decorator_test consumed @Counter.timer(name=\u0026#39;c4\u0026#39;) def counter_decorator_test(s): # some codes may consume a certain time  Counter.Builder(name, tags) Create a new counter builder with the meter name and optional tags. Counter.tag(key: str, value) Mark a tag key/value pair. Counter.mode(mode: CounterMode) Change the counter mode, RATE mode means reporting rate to the backend. Counter.increment(count) Increment count to the Counter, It could be a positive value.  Gauge  Gauge API represents a single numerical value.  # producer: iterable object builder = Gauge.Builder(\u0026#39;g1\u0026#39;, producer, ((\u0026#34;key\u0026#34;, \u0026#34;value\u0026#34;))) g = Builder.build()  Gauge.Builder(name, tags) Create a new gauge builder with the meter name and iterable object, this iterable object need to produce numeric value. Gauge.tag(key: str, value) Mark a tag key/value pair. Gauge.build() Build a new Gauge which is collected and reported to the backend.  Histogram  Histogram API represents a summary sample observations with customize buckets.  builder = Histogram.Builder(\u0026#39;h2\u0026#39;, [i / 10 for i in range(10)], (\u0026#34;key\u0026#34;, \u0026#34;value\u0026#34;)) h = builder.build() Syntactic sugars builder = Histogram.Builder(\u0026#39;h3\u0026#39;, [i / 10 for i in range(10)]) h = builder.build() # Histogram h will record the time the with-wprapped codes consumed with h.create_timer(): # some codes may consume a certain time builder = Histogram.Builder(\u0026#39;h2\u0026#39;, [i / 10 for i in range(10)]) h = builder.build() # Histogram h will record the time histogram_decorator_test consumed @Histogram.timer(name=\u0026#39;h2\u0026#39;) def histogram_decorator_test(s): time.sleep(s)  Histogram.Builder(name, tags) Create a new histogram builder with the meter name and optional tags. Histogram.tag(key: str, value) Mark a tag key/value pair. Histogram.minValue(value) Set up the minimal value of this histogram, default is 0. Histogram.build() Build a new Histogram which is collected and reported to the backend. Histogram.addValue(value) Add value into the histogram, automatically analyze what bucket count needs to be increment. rule: count into [step1, step2).  ","title":"Python Agent Meter Reporter","url":"/docs/skywalking-python/next/en/setup/advanced/meterreporter/"},{"content":"Python Agent Meter Reporter Important Note: Meter reporter is currently available to send in gRPC and Kafka protocol, HTTP protocol is not implemented yet (requires additional handler on SkyWalking OAP side).\nEnabling the feature (default is enabled) PVM Reporter is also by default enabled, meaning useful Python metrics such as thread count/GC info will be shown in OAP General Services - Instance - PVM Tab) If you really don\u0026rsquo;t need such a feature, disable them through config.agent_pvm_meter_reporter_active or SW_AGENT_PVM_METER_REPORTER_ACTIVE\nconfig.agent_meter_reporter_active = True # Or os.environ[\u0026#39;SW_AGENT_METER_REPORTER_ACTIVE\u0026#39;] = \u0026#39;True\u0026#39; or\nexport SW_AGENT_METER_REPORTER_ACTIVE=True Disable the feature os.environ[\u0026#39;SW_AGENT_METER_REPORTER_ACTIVE\u0026#39;] = \u0026#39;False\u0026#39; or\nexport SW_AGENT_METER_REPORTER_ACTIVE=False Counter  Counter API represents a single monotonically increasing counter, automatic collect data and report to backend.  builder = Counter.Builder(\u0026#39;c2\u0026#39;, CounterMode.INCREMENT, ((\u0026#34;k1\u0026#34;, \u0026#34;v1\u0026#34;), (\u0026#34;k2\u0026#34;, \u0026#34;v2\u0026#34;))) # or this way # builder = Counter.Builder(\u0026#39;c2\u0026#39;, CounterMode.INCREMENT).tag(\u0026#39;key1\u0026#39;, \u0026#39;value1\u0026#39;).tag(\u0026#39;key2\u0026#39;, \u0026#39;value2\u0026#39;) c = builder.build() c.increment(2) Syntactic sugars builder = Counter.Builder(\u0026#39;c2\u0026#39;, CounterMode.INCREMENT) c = builder.build() # increase Counter c by the time the with-wrapped codes consumed with c.create_timer(): # some codes may consume a certain time builder = Counter.Builder(\u0026#39;c3\u0026#39;, CounterMode.INCREMENT) c = builder.build() # increase Counter c by num once counter_decorator_test gets called @Counter.increase(name=\u0026#39;c3\u0026#39;, num=2) def counter_decorator_test(): # some codes builder = Counter.Builder(\u0026#39;c4\u0026#39;, CounterMode.INCREMENT) c = builder.build() # increase Counter c by the time counter_decorator_test consumed @Counter.timer(name=\u0026#39;c4\u0026#39;) def counter_decorator_test(s): # some codes may consume a certain time  Counter.Builder(name, tags) Create a new counter builder with the meter name and optional tags. Counter.tag(key: str, value) Mark a tag key/value pair. Counter.mode(mode: CounterMode) Change the counter mode, RATE mode means reporting rate to the backend. Counter.increment(count) Increment count to the Counter, It could be a positive value.  Gauge  Gauge API represents a single numerical value.  # producer: iterable object builder = Gauge.Builder(\u0026#39;g1\u0026#39;, producer, ((\u0026#34;key\u0026#34;, \u0026#34;value\u0026#34;))) g = Builder.build()  Gauge.Builder(name, tags) Create a new gauge builder with the meter name and iterable object, this iterable object need to produce numeric value. Gauge.tag(key: str, value) Mark a tag key/value pair. Gauge.build() Build a new Gauge which is collected and reported to the backend.  Histogram  Histogram API represents a summary sample observations with customize buckets.  builder = Histogram.Builder(\u0026#39;h2\u0026#39;, [i / 10 for i in range(10)], (\u0026#34;key\u0026#34;, \u0026#34;value\u0026#34;)) h = builder.build() Syntactic sugars builder = Histogram.Builder(\u0026#39;h3\u0026#39;, [i / 10 for i in range(10)]) h = builder.build() # Histogram h will record the time the with-wprapped codes consumed with h.create_timer(): # some codes may consume a certain time builder = Histogram.Builder(\u0026#39;h2\u0026#39;, [i / 10 for i in range(10)]) h = builder.build() # Histogram h will record the time histogram_decorator_test consumed @Histogram.timer(name=\u0026#39;h2\u0026#39;) def histogram_decorator_test(s): time.sleep(s)  Histogram.Builder(name, tags) Create a new histogram builder with the meter name and optional tags. Histogram.tag(key: str, value) Mark a tag key/value pair. Histogram.minValue(value) Set up the minimal value of this histogram, default is 0. Histogram.build() Build a new Histogram which is collected and reported to the backend. Histogram.addValue(value) Add value into the histogram, automatically analyze what bucket count needs to be increment. rule: count into [step1, step2).  ","title":"Python Agent Meter Reporter","url":"/docs/skywalking-python/v1.0.1/en/setup/advanced/meterreporter/"},{"content":"Query Measures Query operation queries the data in a measure.\nbydbctl is the command line tool in examples.\nThe input contains two parts:\n Request: a YAML-based text which is defined by the API Time Range: YAML and CLI\u0026rsquo;s flags both support it.  Time Range The query specification contains time_range field. The request should set absolute times to it. bydbctl also provides start and end flags to support passing absolute and relative times.\n\u0026ldquo;start\u0026rdquo; and \u0026ldquo;end\u0026rdquo; specify a time range during which the query is performed, they can be an absolute time like \u0026ldquo;2006-01-02T15:04:05Z07:00\u0026rdquo;, or relative time (to the current time) like \u0026ldquo;-30m\u0026rdquo;, or \u0026ldquo;30m\u0026rdquo;. They are both optional and their default values follow the rules below:\n when \u0026ldquo;start\u0026rdquo; and \u0026ldquo;end\u0026rdquo; are both absent, \u0026ldquo;start = now - 30 minutes\u0026rdquo; and \u0026ldquo;end = now\u0026rdquo;, namely past 30 minutes; when \u0026ldquo;start\u0026rdquo; is absent and \u0026ldquo;end\u0026rdquo; is present, this command calculates \u0026ldquo;start\u0026rdquo; (minus 30 units), e.g. \u0026ldquo;end = 2022-11-09T12:34:00Z\u0026rdquo;, so \u0026ldquo;start = end - 30 minutes = 2022-11-09T12:04:00Z\u0026rdquo;; when \u0026ldquo;start\u0026rdquo; is present and \u0026ldquo;end\u0026rdquo; is absent, this command calculates \u0026ldquo;end\u0026rdquo; (plus 30 units), e.g. \u0026ldquo;start = 2022-11-09T12:04:00Z\u0026rdquo;, so \u0026ldquo;end = start + 30 minutes = 2022-11-09T12:34:00Z\u0026rdquo;.  Examples To retrieve a series of data points between 2022-10-15T22:32:48Z and 2022-10-15T23:32:48Z could use the below command. These data points contain tags: id and entity_id that belong to a family default. They also choose fields: total and value.\n$ bydbctl measure query -f - \u0026lt;\u0026lt;EOF metadata: name: \u0026#34;service_cpm_minute\u0026#34; group: \u0026#34;sw_metric\u0026#34; tagProjection: tagFamilies: - name: \u0026#34;default\u0026#34; tags: [\u0026#34;id\u0026#34;, \u0026#34;entity_id\u0026#34;] fieldProjection: names: [\u0026#34;total\u0026#34;, \u0026#34;value\u0026#34;] timeRange: begin: 2022-10-15T22:32:48Z end: 2022-10-15T23:32:48Z EOF The below command could query data in the last 30 minutes using relative time duration :\n$ bydbctl measure query --start -30m -f - \u0026lt;\u0026lt;EOF metadata: name: \u0026#34;service_cpm_minute\u0026#34; group: \u0026#34;sw_metric\u0026#34; tagProjection: tagFamilies: - name: \u0026#34;default\u0026#34; tags: [\u0026#34;id\u0026#34;, \u0026#34;entity_id\u0026#34;] fieldProjection: names: [\u0026#34;total\u0026#34;, \u0026#34;value\u0026#34;] EOF API Reference MeasureService v1\n","title":"Query Measures","url":"/docs/skywalking-banyandb/latest/crud/measure/query/"},{"content":"Query Measures Query operation queries the data in a measure.\nbydbctl is the command line tool in examples.\nThe input contains two parts:\n Request: a YAML-based text which is defined by the API Time Range: YAML and CLI\u0026rsquo;s flags both support it.  Time Range The query specification contains time_range field. The request should set absolute times to it. bydbctl also provides start and end flags to support passing absolute and relative times.\n\u0026ldquo;start\u0026rdquo; and \u0026ldquo;end\u0026rdquo; specify a time range during which the query is performed, they can be an absolute time like \u0026ldquo;2006-01-02T15:04:05Z07:00\u0026rdquo;, or relative time (to the current time) like \u0026ldquo;-30m\u0026rdquo;, or \u0026ldquo;30m\u0026rdquo;. They are both optional and their default values follow the rules below:\n when \u0026ldquo;start\u0026rdquo; and \u0026ldquo;end\u0026rdquo; are both absent, \u0026ldquo;start = now - 30 minutes\u0026rdquo; and \u0026ldquo;end = now\u0026rdquo;, namely past 30 minutes; when \u0026ldquo;start\u0026rdquo; is absent and \u0026ldquo;end\u0026rdquo; is present, this command calculates \u0026ldquo;start\u0026rdquo; (minus 30 units), e.g. \u0026ldquo;end = 2022-11-09T12:34:00Z\u0026rdquo;, so \u0026ldquo;start = end - 30 minutes = 2022-11-09T12:04:00Z\u0026rdquo;; when \u0026ldquo;start\u0026rdquo; is present and \u0026ldquo;end\u0026rdquo; is absent, this command calculates \u0026ldquo;end\u0026rdquo; (plus 30 units), e.g. \u0026ldquo;start = 2022-11-09T12:04:00Z\u0026rdquo;, so \u0026ldquo;end = start + 30 minutes = 2022-11-09T12:34:00Z\u0026rdquo;.  Examples To retrieve a series of data points between 2022-10-15T22:32:48Z and 2022-10-15T23:32:48Z could use the below command. These data points contain tags: id and entity_id that belong to a family default. They also choose fields: total and value.\n$ bydbctl measure query -f - \u0026lt;\u0026lt;EOF metadata: name: \u0026#34;service_cpm_minute\u0026#34; group: \u0026#34;sw_metric\u0026#34; tagProjection: tagFamilies: - name: \u0026#34;default\u0026#34; tags: [\u0026#34;id\u0026#34;, \u0026#34;entity_id\u0026#34;] fieldProjection: names: [\u0026#34;total\u0026#34;, \u0026#34;value\u0026#34;] timeRange: begin: 2022-10-15T22:32:48Z end: 2022-10-15T23:32:48Z EOF The below command could query data in the last 30 minutes using relative time duration :\n$ bydbctl measure query --start -30m -f - \u0026lt;\u0026lt;EOF metadata: name: \u0026#34;service_cpm_minute\u0026#34; group: \u0026#34;sw_metric\u0026#34; tagProjection: tagFamilies: - name: \u0026#34;default\u0026#34; tags: [\u0026#34;id\u0026#34;, \u0026#34;entity_id\u0026#34;] fieldProjection: names: [\u0026#34;total\u0026#34;, \u0026#34;value\u0026#34;] EOF API Reference MeasureService v1\n","title":"Query Measures","url":"/docs/skywalking-banyandb/next/crud/measure/query/"},{"content":"Query Measures Query operation queries the data in a measure.\nbydbctl is the command line tool in examples.\nThe input contains two parts:\n Request: a YAML-based text which is defined by the API Time Range: YAML and CLI\u0026rsquo;s flags both support it.  Time Range The query specification contains time_range field. The request should set absolute times to it. bydbctl also provides start and end flags to support passing absolute and relative times.\n\u0026ldquo;start\u0026rdquo; and \u0026ldquo;end\u0026rdquo; specify a time range during which the query is performed, they can be an absolute time like \u0026ldquo;2006-01-02T15:04:05Z07:00\u0026rdquo;, or relative time (to the current time) like \u0026ldquo;-30m\u0026rdquo;, or \u0026ldquo;30m\u0026rdquo;. They are both optional and their default values follow the rules below:\n when \u0026ldquo;start\u0026rdquo; and \u0026ldquo;end\u0026rdquo; are both absent, \u0026ldquo;start = now - 30 minutes\u0026rdquo; and \u0026ldquo;end = now\u0026rdquo;, namely past 30 minutes; when \u0026ldquo;start\u0026rdquo; is absent and \u0026ldquo;end\u0026rdquo; is present, this command calculates \u0026ldquo;start\u0026rdquo; (minus 30 units), e.g. \u0026ldquo;end = 2022-11-09T12:34:00Z\u0026rdquo;, so \u0026ldquo;start = end - 30 minutes = 2022-11-09T12:04:00Z\u0026rdquo;; when \u0026ldquo;start\u0026rdquo; is present and \u0026ldquo;end\u0026rdquo; is absent, this command calculates \u0026ldquo;end\u0026rdquo; (plus 30 units), e.g. \u0026ldquo;start = 2022-11-09T12:04:00Z\u0026rdquo;, so \u0026ldquo;end = start + 30 minutes = 2022-11-09T12:34:00Z\u0026rdquo;.  Examples To retrieve a series of data points between 2022-10-15T22:32:48Z and 2022-10-15T23:32:48Z could use the below command. These data points contain tags: id and entity_id that belong to a family default. They also choose fields: total and value.\n$ bydbctl measure query -f - \u0026lt;\u0026lt;EOF metadata: name: \u0026#34;service_cpm_minute\u0026#34; group: \u0026#34;sw_metric\u0026#34; tagProjection: tagFamilies: - name: \u0026#34;default\u0026#34; tags: [\u0026#34;id\u0026#34;, \u0026#34;entity_id\u0026#34;] fieldProjection: names: [\u0026#34;total\u0026#34;, \u0026#34;value\u0026#34;] timeRange: begin: 2022-10-15T22:32:48Z end: 2022-10-15T23:32:48Z EOF The below command could query data in the last 30 minutes using relative time duration :\n$ bydbctl measure query --start -30m -f - \u0026lt;\u0026lt;EOF metadata: name: \u0026#34;service_cpm_minute\u0026#34; group: \u0026#34;sw_metric\u0026#34; tagProjection: tagFamilies: - name: \u0026#34;default\u0026#34; tags: [\u0026#34;id\u0026#34;, \u0026#34;entity_id\u0026#34;] fieldProjection: names: [\u0026#34;total\u0026#34;, \u0026#34;value\u0026#34;] EOF API Reference MeasureService v1\n","title":"Query Measures","url":"/docs/skywalking-banyandb/v0.5.0/crud/measure/query/"},{"content":"Query Protocol Query Protocol defines a set of APIs in GraphQL grammar to provide data query and interactive capabilities with SkyWalking native visualization tool or 3rd party system, including Web UI, CLI or private system.\nQuery protocol official repository, https://github.com/apache/skywalking-query-protocol.\nAll deprecated APIs are moved here.\nMetadata Metadata contains concise information on all services and their instances, endpoints, etc. under monitoring. You may query the metadata in different ways.\nextendtypeQuery{# Normal service related meta info getAllServices(duration:Duration!,group:String):[Service!]!searchServices(duration:Duration!,keyword:String!):[Service!]!searchService(serviceCode:String!):Service# Fetch all services of Browser typegetAllBrowserServices(duration:Duration!):[Service!]!searchBrowserServices(duration:Duration!,keyword:String!):[Service!]!searchBrowserService(serviceCode:String!):Service# Service instance querygetServiceInstances(duration:Duration!,serviceId:ID!):[ServiceInstance!]!# Endpoint query# Consider there are huge numbers of endpoint,# must use endpoint owner\u0026#39;s service id, keyword and limit filter to do query.searchEndpoint(keyword:String!,serviceId:ID!,limit:Int!):[Endpoint!]!getEndpointInfo(endpointId:ID!):EndpointInfo# Process query# Read process list.listProcesses(duration:Duration!,instanceId:ID!):[Process!]!# Find process according to given ID. Return null if not existing.getProcess(processId:ID!):Process# Get the number of matched processes through serviceId, labels# Labels: the matched process should contain all labels## The return is not a precise number, the process has its lifecycle, as it reboots and shutdowns with time.# The return number just gives an abstract of the scale of profiling that would be applied.estimateProcessScale(serviceId:ID!,labels:[String!]!):Long!# Database related meta info.getAllDatabases(duration:Duration!):[Database!]!getTimeInfo:TimeInfo}Topology The topology and dependency graphs among services, instances and endpoints. Includes direct relationships or global maps.\nextendtypeQuery{# Query the global topologygetGlobalTopology(duration:Duration!):Topology# Query the topology, based on the given servicegetServiceTopology(serviceId:ID!,duration:Duration!):Topology# Query the topology, based on the given services.# `#getServiceTopology` could be replaced by this.getServicesTopology(serviceIds:[ID!]!,duration:Duration!):Topology# Query the instance topology, based on the given clientServiceId and serverServiceIdgetServiceInstanceTopology(clientServiceId:ID!,serverServiceId:ID!,duration:Duration!):ServiceInstanceTopology# Query the topology, based on the given endpointgetEndpointTopology(endpointId:ID!,duration:Duration!):Topology# v2 of getEndpointTopologygetEndpointDependencies(endpointId:ID!,duration:Duration!):EndpointTopology}Metrics Metrics query targets all objects defined in OAL script and MAL.\nV3 APIs Provide Metrics V3 query APIs since 9.5.0, including metadata and MQE. SkyWalking Metrics Query Expression(MQE) is an extension query mechanism. MQE allows users to do simple query-stage calculation like well known PromQL through GraphQL. The expression\u0026rsquo;s syntax can refer to here.\nextendtypeQuery{# Metrics definition metadata query. Response the metrics type which determines the suitable query methods.typeOfMetrics(name:String!):MetricsType!# Get the list of all available metrics in the current OAP server.# Param, regex, could be used to filter the metrics by name.listMetrics(regex:String):[MetricDefinition!]!execExpression(expression:String!,entity:Entity!,duration:Duration!):ExpressionResult!}typeExpressionResult{type:ExpressionResultType!# When the type == TIME_SERIES_VALUES, the results would be a collection of MQEValues.# In other legal type cases, only one MQEValues is expected in the array.results:[MQEValues!]!# When type == ExpressionResultType.UNKNOWN,# the error message includes the expression resolving errors.error:String}enumExpressionResultType{# Can\u0026#39;t resolve the type of the given expression.UNKNOWN# A single valueSINGLE_VALUE# A collection of time-series values.# The value could have labels or not.TIME_SERIES_VALUES# A collection of aggregated values through metric sort functionSORTED_LIST# A collection of sampled records.# When the original metric type is sampled recordsRECORD_LIST}Logs extendtypeQuery{# Return true if the current storage implementation supports fuzzy query for logs.supportQueryLogsByKeywords:Boolean!queryLogs(condition:LogQueryCondition):Logs# Test the logs and get the results of the LAL output.test(requests:LogTestRequest!):LogTestResponse!}Log implementations vary between different database options. Some search engines like ElasticSearch and OpenSearch can support full log text fuzzy queries, while others do not due to considerations related to performance impact and end user experience.\ntest API serves as the debugging tool for native LAL parsing.\nTrace extendtypeQuery{queryBasicTraces(condition:TraceQueryCondition):TraceBriefqueryTrace(traceId:ID!):Trace}Trace query fetches trace segment lists and spans of given trace IDs.\nAlarm extendtypeQuery{getAlarmTrend(duration:Duration!):AlarmTrend!getAlarm(duration:Duration!,scope:Scope,keyword:String,paging:Pagination!,tags:[AlarmTag]):Alarms}Alarm query identifies alarms and related events.\nEvent extendtypeQuery{queryEvents(condition:EventQueryCondition):Events}Event query fetches the event list based on given sources and time range conditions.\nProfiling SkyWalking offers two types of profiling, in-process and out-process, allowing users to create tasks and check their execution status.\nIn-process profiling extendtypeMutation{# crate new profile taskcreateProfileTask(creationRequest:ProfileTaskCreationRequest):ProfileTaskCreationResult!}extendtypeQuery{# query all task list, order by ProfileTask#startTime descendinggetProfileTaskList(serviceId:ID,endpointName:String):[ProfileTask!]!# query all task logsgetProfileTaskLogs(taskID:String):[ProfileTaskLog!]!# query all task profiled segment listgetProfileTaskSegmentList(taskID:String):[BasicTrace!]!# query profiled segmentgetProfiledSegment(segmentId:String):ProfiledSegment# analyze profiled segment, start and end time use timestamp(millisecond)getProfileAnalyze(segmentId:String!,timeRanges:[ProfileAnalyzeTimeRange!]!):ProfileAnalyzation!}Out-process profiling extendtypeMutation{# create a new eBPF fixed time profiling taskcreateEBPFProfilingFixedTimeTask(request:EBPFProfilingTaskFixedTimeCreationRequest!):EBPFProfilingTaskCreationResult!# create a new eBPF network profiling taskcreateEBPFNetworkProfiling(request:EBPFProfilingNetworkTaskRequest!):EBPFProfilingTaskCreationResult!# keep alive the eBPF profiling taskkeepEBPFNetworkProfiling(taskId:ID!):EBPFNetworkKeepProfilingResult!}extendtypeQuery{# query eBPF profiling data for prepare create taskqueryPrepareCreateEBPFProfilingTaskData(serviceId:ID!):EBPFProfilingTaskPrepare!# query eBPF profiling task listqueryEBPFProfilingTasks(serviceId:ID,serviceInstanceId:ID,targets:[EBPFProfilingTargetType!]):[EBPFProfilingTask!]!# query schedules from profiling taskqueryEBPFProfilingSchedules(taskId:ID!):[EBPFProfilingSchedule!]!# analyze the profiling schedule# aggregateType is \u0026#34;EBPFProfilingAnalyzeAggregateType#COUNT\u0026#34; as default. analysisEBPFProfilingResult(scheduleIdList:[ID!]!,timeRanges:[EBPFProfilingAnalyzeTimeRange!]!,aggregateType:EBPFProfilingAnalyzeAggregateType):EBPFProfilingAnalyzation!}Condition Duration Duration is a widely used parameter type as the APM data is time-related. See the following for more details. Step relates to precision.\n# The Duration defines the start and end time for each query operation.# Fields: `start` and `end`# represents the time span. And each of them matches the step.# ref https://www.ietf.org/rfc/rfc3339.txt# The time formats are# `SECOND` step: yyyy-MM-dd HHmmss# `MINUTE` step: yyyy-MM-dd HHmm# `HOUR` step: yyyy-MM-dd HH# `DAY` step: yyyy-MM-dd# `MONTH` step: yyyy-MM# Field: `step`# represents the accurate time point.# e.g.# if step==HOUR , start=2017-11-08 09, end=2017-11-08 19# then# metrics from the following time points expected# 2017-11-08 9:00 -\u0026gt; 2017-11-08 19:00# there are 11 time points (hours) in the time span.inputDuration{start:String!end:String!step:Step!}enumStep{MONTHDAYHOURMINUTESECOND}","title":"Query Protocol","url":"/docs/main/latest/en/api/query-protocol/"},{"content":"Query Protocol Query Protocol defines a set of APIs in GraphQL grammar to provide data query and interactive capabilities with SkyWalking native visualization tool or 3rd party system, including Web UI, CLI or private system.\nQuery protocol official repository, https://github.com/apache/skywalking-query-protocol.\nAll deprecated APIs are moved here.\nMetadata Metadata contains concise information on all services and their instances, endpoints, etc. under monitoring. You may query the metadata in different ways.\nV2 APIs Provide Metadata V2 query APIs since 9.0.0, including Layer concept.\nextendtypeQuery{# Read all available layers# UI could use this list to determine available dashboards/panels# The available layers would change with time in the runtime, because new service could be detected in any time.# This list should be loaded periodically.listLayers:[String!]!# Read the service list according to layer.listServices(layer:String):[Service!]!# Find service according to given ID. Return null if not existing.getService(serviceId:String!):Service# Search and find service according to given name. Return null if not existing.findService(serviceName:String!):Service# Read service instance list.listInstances(duration:Duration!,serviceId:ID!):[ServiceInstance!]!# Search and find service instance according to given ID. Return null if not existing.getInstance(instanceId:String!):ServiceInstance# Search and find matched endpoints according to given service and keyword(optional)# If no keyword, randomly choose endpoint based on `limit` value.findEndpoint(keyword:String,serviceId:ID!,limit:Int!):[Endpoint!]!getEndpointInfo(endpointId:ID!):EndpointInfo# Read process list.listProcesses(duration:Duration!,instanceId:ID!):[Process!]!# Find process according to given ID. Return null if not existing.getProcess(processId:ID!):Process# Get the number of matched processes through serviceId, labels# Labels: the matched process should contain all labels## The return is not a precise number, the process has its lifecycle, as it reboots and shutdowns with time.# The return number just gives an abstract of the scale of profiling that would be applied.estimateProcessScale(serviceId:ID!,labels:[String!]!):Long!getTimeInfo:TimeInfo}Topology The topology and dependency graphs among services, instances and endpoints. Includes direct relationships or global maps.\nextendtypeQuery{# Query the global topology# When layer is specified, the topology of this layer would be queriedgetGlobalTopology(duration:Duration!,layer:String):Topology# Query the topology, based on the given servicegetServiceTopology(serviceId:ID!,duration:Duration!):Topology# Query the topology, based on the given services.# `#getServiceTopology` could be replaced by this.getServicesTopology(serviceIds:[ID!]!,duration:Duration!):Topology# Query the instance topology, based on the given clientServiceId and serverServiceIdgetServiceInstanceTopology(clientServiceId:ID!,serverServiceId:ID!,duration:Duration!):ServiceInstanceTopology# Query the topology, based on the given endpointgetEndpointTopology(endpointId:ID!,duration:Duration!):Topology# v2 of getEndpointTopologygetEndpointDependencies(endpointId:ID!,duration:Duration!):EndpointTopology# Query the topology, based on the given instancegetProcessTopology(serviceInstanceId:ID!,duration:Duration!):ProcessTopology}Metrics Metrics query targets all objects defined in OAL script and MAL.\nV3 APIs Provide Metrics V3 query APIs since 9.5.0, including metadata and MQE. SkyWalking Metrics Query Expression(MQE) is an extension query mechanism. MQE allows users to do simple query-stage calculation like well known PromQL through GraphQL. The expression\u0026rsquo;s syntax can refer to here.\nextendtypeQuery{# Metrics definition metadata query. Response the metrics type which determines the suitable query methods.typeOfMetrics(name:String!):MetricsType!# Get the list of all available metrics in the current OAP server.# Param, regex, could be used to filter the metrics by name.listMetrics(regex:String):[MetricDefinition!]!execExpression(expression:String!,entity:Entity!,duration:Duration!):ExpressionResult!}typeExpressionResult{type:ExpressionResultType!# When the type == TIME_SERIES_VALUES, the results would be a collection of MQEValues.# In other legal type cases, only one MQEValues is expected in the array.results:[MQEValues!]!# When type == ExpressionResultType.UNKNOWN,# the error message includes the expression resolving errors.error:String}enumExpressionResultType{# Can\u0026#39;t resolve the type of the given expression.UNKNOWN# A single valueSINGLE_VALUE# A collection of time-series values.# The value could have labels or not.TIME_SERIES_VALUES# A collection of aggregated values through metric sort functionSORTED_LIST# A collection of sampled records.# When the original metric type is sampled recordsRECORD_LIST}Logs extendtypeQuery{# Return true if the current storage implementation supports fuzzy query for logs.supportQueryLogsByKeywords:Boolean!queryLogs(condition:LogQueryCondition):Logs# Test the logs and get the results of the LAL output.test(requests:LogTestRequest!):LogTestResponse!# Read the list of searchable keysqueryLogTagAutocompleteKeys(duration:Duration!):[String!]# Search the available value options of the given key.queryLogTagAutocompleteValues(tagKey:String!,duration:Duration!):[String!]}Log implementations vary between different database options. Some search engines like ElasticSearch and OpenSearch can support full log text fuzzy queries, while others do not due to considerations related to performance impact and end user experience.\ntest API serves as the debugging tool for native LAL parsing.\nTrace extendtypeQuery{# Search segment list with given conditionsqueryBasicTraces(condition:TraceQueryCondition):TraceBrief# Read the specific trace ID with given trace IDqueryTrace(traceId:ID!):Trace# Read the list of searchable keysqueryTraceTagAutocompleteKeys(duration:Duration!):[String!]# Search the available value options of the given key.queryTraceTagAutocompleteValues(tagKey:String!,duration:Duration!):[String!]}Trace query fetches trace segment lists and spans of given trace IDs.\nAlarm extendtypeQuery{getAlarmTrend(duration:Duration!):AlarmTrend!getAlarm(duration:Duration!,scope:Scope,keyword:String,paging:Pagination!,tags:[AlarmTag]):Alarms}Alarm query identifies alarms and related events.\nEvent extendtypeQuery{queryEvents(condition:EventQueryCondition):Events}Event query fetches the event list based on given sources and time range conditions.\nProfiling SkyWalking offers two types of profiling, in-process and out-process, allowing users to create tasks and check their execution status.\nIn-process profiling extendtypeMutation{# crate new profile taskcreateProfileTask(creationRequest:ProfileTaskCreationRequest):ProfileTaskCreationResult!}extendtypeQuery{# query all task list, order by ProfileTask#startTime descendinggetProfileTaskList(serviceId:ID,endpointName:String):[ProfileTask!]!# query all task logsgetProfileTaskLogs(taskID:String):[ProfileTaskLog!]!# query all task profiled segment listgetProfileTaskSegments(taskID:ID!):[ProfiledTraceSegments!]!# analyze multiple profiled segments, start and end time use timestamp(millisecond)getSegmentsProfileAnalyze(queries:[SegmentProfileAnalyzeQuery!]!):ProfileAnalyzation!}Out-process profiling extendtypeMutation{# create a new eBPF fixed time profiling taskcreateEBPFProfilingFixedTimeTask(request:EBPFProfilingTaskFixedTimeCreationRequest!):EBPFProfilingTaskCreationResult!# create a new eBPF network profiling taskcreateEBPFNetworkProfiling(request:EBPFProfilingNetworkTaskRequest!):EBPFProfilingTaskCreationResult!# keep alive the eBPF profiling taskkeepEBPFNetworkProfiling(taskId:ID!):EBPFNetworkKeepProfilingResult!}extendtypeQuery{# query eBPF profiling data for prepare create taskqueryPrepareCreateEBPFProfilingTaskData(serviceId:ID!):EBPFProfilingTaskPrepare!# query eBPF profiling task list# query `triggerType == FIXED_TIME` when triggerType is absentqueryEBPFProfilingTasks(serviceId:ID,serviceInstanceId:ID,targets:[EBPFProfilingTargetType!],triggerType:EBPFProfilingTriggerType,duration:Duration):[EBPFProfilingTask!]!# query schedules from profiling taskqueryEBPFProfilingSchedules(taskId:ID!):[EBPFProfilingSchedule!]!# analyze the profiling schedule# aggregateType is \u0026#34;EBPFProfilingAnalyzeAggregateType#COUNT\u0026#34; as default. analysisEBPFProfilingResult(scheduleIdList:[ID!]!,timeRanges:[EBPFProfilingAnalyzeTimeRange!]!,aggregateType:EBPFProfilingAnalyzeAggregateType):EBPFProfilingAnalyzation!}On-Demand Pod Logs Provide APIs to query on-demand pod logs since 9.1.0.\nextendtypeQuery{listContainers(condition:OndemandContainergQueryCondition):PodContainersondemandPodLogs(condition:OndemandLogQueryCondition):Logs}Hierarchy Provide Hierarchy query APIs since 10.0.0, including service and instance hierarchy.\nextendtypeQuery{# Query the service hierarchy, based on the given service. Will recursively return all related layers services in the hierarchy.getServiceHierarchy(serviceId:ID!,layer:String!):ServiceHierarchy!# Query the instance hierarchy, based on the given instance. Will return all direct related layers instances in the hierarchy, no recursive.getInstanceHierarchy(instanceId:ID!,layer:String!):InstanceHierarchy!# List layer hierarchy levels. The layer levels are defined in the `hierarchy-definition.yml`.listLayerLevels:[LayerLevel!]!}Condition Duration Duration is a widely used parameter type as the APM data is time-related. See the following for more details. Step relates to precision.\n# The Duration defines the start and end time for each query operation.# Fields: `start` and `end`# represents the time span. And each of them matches the step.# ref https://www.ietf.org/rfc/rfc3339.txt# The time formats are# `SECOND` step: yyyy-MM-dd HHmmss# `MINUTE` step: yyyy-MM-dd HHmm# `HOUR` step: yyyy-MM-dd HH# `DAY` step: yyyy-MM-dd# `MONTH` step: yyyy-MM# Field: `step`# represents the accurate time point.# e.g.# if step==HOUR , start=2017-11-08 09, end=2017-11-08 19# then# metrics from the following time points expected# 2017-11-08 9:00 -\u0026gt; 2017-11-08 19:00# there are 11 time points (hours) in the time span.inputDuration{start:String!end:String!step:Step!}enumStep{MONTHDAYHOURMINUTESECOND}","title":"Query Protocol","url":"/docs/main/next/en/api/query-protocol/"},{"content":"Query Protocol Query Protocol defines a set of APIs in GraphQL grammar to provide data query and interactive capabilities with SkyWalking native visualization tool or 3rd party system, including Web UI, CLI or private system.\nQuery protocol official repository, https://github.com/apache/skywalking-query-protocol.\nMetadata Metadata contains concise information on all services and their instances, endpoints, etc. under monitoring. You may query the metadata in different ways.\nextendtypeQuery{# Normal service related meta info getAllServices(duration:Duration!,group:String):[Service!]!searchServices(duration:Duration!,keyword:String!):[Service!]!searchService(serviceCode:String!):Service# Fetch all services of Browser typegetAllBrowserServices(duration:Duration!):[Service!]!searchBrowserServices(duration:Duration!,keyword:String!):[Service!]!searchBrowserService(serviceCode:String!):Service# Service instance querygetServiceInstances(duration:Duration!,serviceId:ID!):[ServiceInstance!]!# Endpoint query# Consider there are huge numbers of endpoint,# must use endpoint owner\u0026#39;s service id, keyword and limit filter to do query.searchEndpoint(keyword:String!,serviceId:ID!,limit:Int!):[Endpoint!]!getEndpointInfo(endpointId:ID!):EndpointInfo# Database related meta info.getAllDatabases(duration:Duration!):[Database!]!getTimeInfo:TimeInfo}Topology The topology and dependency graphs among services, instances and endpoints. Includes direct relationships or global maps.\nextendtypeQuery{# Query the global topologygetGlobalTopology(duration:Duration!):Topology# Query the topology, based on the given servicegetServiceTopology(serviceId:ID!,duration:Duration!):Topology# Query the topology, based on the given services.# `#getServiceTopology` could be replaced by this.getServicesTopology(serviceIds:[ID!]!,duration:Duration!):Topology# Query the instance topology, based on the given clientServiceId and serverServiceIdgetServiceInstanceTopology(clientServiceId:ID!,serverServiceId:ID!,duration:Duration!):ServiceInstanceTopology# Query the topology, based on the given endpointgetEndpointTopology(endpointId:ID!,duration:Duration!):Topology# v2 of getEndpointTopologygetEndpointDependencies(endpointId:ID!,duration:Duration!):EndpointTopology}Metrics Metrics query targets all objects defined in OAL script and MAL. You may obtain the metrics data in linear or thermodynamic matrix formats based on the aggregation functions in script.\nV2 APIs Provide Metrics V2 query APIs since 8.0.0, including metadata, single/multiple values, heatmap, and sampled records metrics.\nextendtypeQuery{# Metrics definition metadata query. Response the metrics type which determines the suitable query methods.typeOfMetrics(name:String!):MetricsType!# Get the list of all available metrics in the current OAP server.# Param, regex, could be used to filter the metrics by name.listMetrics(regex:String):[MetricDefinition!]!# Read metrics single value in the duration of required metricsreadMetricsValue(condition:MetricsCondition!,duration:Duration!):Long!# Read time-series values in the duration of required metricsreadMetricsValues(condition:MetricsCondition!,duration:Duration!):MetricsValues!# Read entity list of required metrics and parent entity type.sortMetrics(condition:TopNCondition!,duration:Duration!):[SelectedRecord!]!# Read value in the given time duration, usually as a linear.# labels: the labels you need to query.readLabeledMetricsValues(condition:MetricsCondition!,labels:[String!]!,duration:Duration!):[MetricsValues!]!# Heatmap is bucket based value statistic result.readHeatMap(condition:MetricsCondition!,duration:Duration!):HeatMap# Read the sampled records# TopNCondition#scope is not required.readSampledRecords(condition:TopNCondition!,duration:Duration!):[SelectedRecord!]!}V1 APIs 3 types of metrics can be queried. V1 APIs were introduced since 6.x. Now they are a shell to V2 APIs.\n Single value. Most default metrics are in single value. getValues and getLinearIntValues are suitable for this purpose. Multiple value. A metric defined in OAL includes multiple value calculations. Use getMultipleLinearIntValues to obtain all values. percentile is a typical multiple value function in OAL. Heatmap value. Read Heatmap in WIKI for details. thermodynamic is the only OAL function. Use getThermodynamic to get the values.  extendtypeQuery{getValues(metric:BatchMetricConditions!,duration:Duration!):IntValuesgetLinearIntValues(metric:MetricCondition!,duration:Duration!):IntValues# Query the type of metrics including multiple values, and format them as multiple linears.# The seq of these multiple lines base on the calculation func in OAL# Such as, should us this to query the result of func percentile(50,75,90,95,99) in OAL,# then five lines will be responsed, p50 is the first element of return value.getMultipleLinearIntValues(metric:MetricCondition!,numOfLinear:Int!,duration:Duration!):[IntValues!]!getThermodynamic(metric:MetricCondition!,duration:Duration!):Thermodynamic}Metrics are defined in the config/oal/*.oal files.\nAggregation Aggregation query means that the metrics data need a secondary aggregation at query stage, which causes the query interfaces to have some different arguments. A typical example of aggregation query is the TopN list of services. Metrics stream aggregation simply calculates the metrics values of each service, but the expected list requires ordering metrics data by their values.\nAggregation query is for single value metrics only.\n# The aggregation query is different with the metric query.# All aggregation queries require backend or/and storage do aggregation in query time.extendtypeQuery{# TopN is an aggregation query.getServiceTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getAllServiceInstanceTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getServiceInstanceTopN(serviceId:ID!,name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getAllEndpointTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getEndpointTopN(serviceId:ID!,name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!}Logs extendtypeQuery{# Return true if the current storage implementation supports fuzzy query for logs.supportQueryLogsByKeywords:Boolean!queryLogs(condition:LogQueryCondition):Logs# Test the logs and get the results of the LAL output.test(requests:LogTestRequest!):LogTestResponse!}Log implementations vary between different database options. Some search engines like ElasticSearch and OpenSearch can support full log text fuzzy queries, while others do not due to considerations related to performance impact and end user experience.\ntest API serves as the debugging tool for native LAL parsing.\nTrace extendtypeQuery{queryBasicTraces(condition:TraceQueryCondition):TraceBriefqueryTrace(traceId:ID!):Trace}Trace query fetches trace segment lists and spans of given trace IDs.\nAlarm extendtypeQuery{getAlarmTrend(duration:Duration!):AlarmTrend!getAlarm(duration:Duration!,scope:Scope,keyword:String,paging:Pagination!,tags:[AlarmTag]):Alarms}Alarm query identifies alarms and related events.\nEvent extendtypeQuery{queryEvents(condition:EventQueryCondition):Events}Event query fetches the event list based on given sources and time range conditions.\nCondition Duration Duration is a widely used parameter type as the APM data is time-related. See the following for more details. Step relates to precision.\n# The Duration defines the start and end time for each query operation.# Fields: `start` and `end`# represents the time span. And each of them matches the step.# ref https://www.ietf.org/rfc/rfc3339.txt# The time formats are# `SECOND` step: yyyy-MM-dd HHmmss# `MINUTE` step: yyyy-MM-dd HHmm# `HOUR` step: yyyy-MM-dd HH# `DAY` step: yyyy-MM-dd# `MONTH` step: yyyy-MM# Field: `step`# represents the accurate time point.# e.g.# if step==HOUR , start=2017-11-08 09, end=2017-11-08 19# then# metrics from the following time points expected# 2017-11-08 9:00 -\u0026gt; 2017-11-08 19:00# there are 11 time points (hours) in the time span.inputDuration{start:String!end:String!step:Step!}enumStep{MONTHDAYHOURMINUTESECOND}","title":"Query Protocol","url":"/docs/main/v9.0.0/en/protocols/query-protocol/"},{"content":"Query Protocol Query Protocol defines a set of APIs in GraphQL grammar to provide data query and interactive capabilities with SkyWalking native visualization tool or 3rd party system, including Web UI, CLI or private system.\nQuery protocol official repository, https://github.com/apache/skywalking-query-protocol.\nMetadata Metadata contains concise information on all services and their instances, endpoints, etc. under monitoring. You may query the metadata in different ways.\nextendtypeQuery{# Normal service related meta info getAllServices(duration:Duration!,group:String):[Service!]!searchServices(duration:Duration!,keyword:String!):[Service!]!searchService(serviceCode:String!):Service# Fetch all services of Browser typegetAllBrowserServices(duration:Duration!):[Service!]!searchBrowserServices(duration:Duration!,keyword:String!):[Service!]!searchBrowserService(serviceCode:String!):Service# Service instance querygetServiceInstances(duration:Duration!,serviceId:ID!):[ServiceInstance!]!# Endpoint query# Consider there are huge numbers of endpoint,# must use endpoint owner\u0026#39;s service id, keyword and limit filter to do query.searchEndpoint(keyword:String!,serviceId:ID!,limit:Int!):[Endpoint!]!getEndpointInfo(endpointId:ID!):EndpointInfo# Database related meta info.getAllDatabases(duration:Duration!):[Database!]!getTimeInfo:TimeInfo}Topology The topology and dependency graphs among services, instances and endpoints. Includes direct relationships or global maps.\nextendtypeQuery{# Query the global topologygetGlobalTopology(duration:Duration!):Topology# Query the topology, based on the given servicegetServiceTopology(serviceId:ID!,duration:Duration!):Topology# Query the topology, based on the given services.# `#getServiceTopology` could be replaced by this.getServicesTopology(serviceIds:[ID!]!,duration:Duration!):Topology# Query the instance topology, based on the given clientServiceId and serverServiceIdgetServiceInstanceTopology(clientServiceId:ID!,serverServiceId:ID!,duration:Duration!):ServiceInstanceTopology# Query the topology, based on the given endpointgetEndpointTopology(endpointId:ID!,duration:Duration!):Topology# v2 of getEndpointTopologygetEndpointDependencies(endpointId:ID!,duration:Duration!):EndpointTopology}Metrics Metrics query targets all objects defined in OAL script and MAL. You may obtain the metrics data in linear or thermodynamic matrix formats based on the aggregation functions in script.\nV2 APIs Provide Metrics V2 query APIs since 8.0.0, including metadata, single/multiple values, heatmap, and sampled records metrics.\nextendtypeQuery{# Metrics definition metadata query. Response the metrics type which determines the suitable query methods.typeOfMetrics(name:String!):MetricsType!# Get the list of all available metrics in the current OAP server.# Param, regex, could be used to filter the metrics by name.listMetrics(regex:String):[MetricDefinition!]!# Read metrics single value in the duration of required metricsreadMetricsValue(condition:MetricsCondition!,duration:Duration!):Long!# Read time-series values in the duration of required metricsreadMetricsValues(condition:MetricsCondition!,duration:Duration!):MetricsValues!# Read entity list of required metrics and parent entity type.sortMetrics(condition:TopNCondition!,duration:Duration!):[SelectedRecord!]!# Read value in the given time duration, usually as a linear.# labels: the labels you need to query.readLabeledMetricsValues(condition:MetricsCondition!,labels:[String!]!,duration:Duration!):[MetricsValues!]!# Heatmap is bucket based value statistic result.readHeatMap(condition:MetricsCondition!,duration:Duration!):HeatMap# Read the sampled records# TopNCondition#scope is not required.readSampledRecords(condition:TopNCondition!,duration:Duration!):[SelectedRecord!]!}V1 APIs 3 types of metrics can be queried. V1 APIs were introduced since 6.x. Now they are a shell to V2 APIs.\n Single value. Most default metrics are in single value. getValues and getLinearIntValues are suitable for this purpose. Multiple value. A metric defined in OAL includes multiple value calculations. Use getMultipleLinearIntValues to obtain all values. percentile is a typical multiple value function in OAL. Heatmap value. Read Heatmap in WIKI for details. thermodynamic is the only OAL function. Use getThermodynamic to get the values.  extendtypeQuery{getValues(metric:BatchMetricConditions!,duration:Duration!):IntValuesgetLinearIntValues(metric:MetricCondition!,duration:Duration!):IntValues# Query the type of metrics including multiple values, and format them as multiple linears.# The seq of these multiple lines base on the calculation func in OAL# Such as, should us this to query the result of func percentile(50,75,90,95,99) in OAL,# then five lines will be responsed, p50 is the first element of return value.getMultipleLinearIntValues(metric:MetricCondition!,numOfLinear:Int!,duration:Duration!):[IntValues!]!getThermodynamic(metric:MetricCondition!,duration:Duration!):Thermodynamic}Metrics are defined in the config/oal/*.oal files.\nAggregation Aggregation query means that the metrics data need a secondary aggregation at query stage, which causes the query interfaces to have some different arguments. A typical example of aggregation query is the TopN list of services. Metrics stream aggregation simply calculates the metrics values of each service, but the expected list requires ordering metrics data by their values.\nAggregation query is for single value metrics only.\n# The aggregation query is different with the metric query.# All aggregation queries require backend or/and storage do aggregation in query time.extendtypeQuery{# TopN is an aggregation query.getServiceTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getAllServiceInstanceTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getServiceInstanceTopN(serviceId:ID!,name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getAllEndpointTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getEndpointTopN(serviceId:ID!,name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!}Logs extendtypeQuery{# Return true if the current storage implementation supports fuzzy query for logs.supportQueryLogsByKeywords:Boolean!queryLogs(condition:LogQueryCondition):Logs# Test the logs and get the results of the LAL output.test(requests:LogTestRequest!):LogTestResponse!}Log implementations vary between different database options. Some search engines like ElasticSearch and OpenSearch can support full log text fuzzy queries, while others do not due to considerations related to performance impact and end user experience.\ntest API serves as the debugging tool for native LAL parsing.\nTrace extendtypeQuery{queryBasicTraces(condition:TraceQueryCondition):TraceBriefqueryTrace(traceId:ID!):Trace}Trace query fetches trace segment lists and spans of given trace IDs.\nAlarm extendtypeQuery{getAlarmTrend(duration:Duration!):AlarmTrend!getAlarm(duration:Duration!,scope:Scope,keyword:String,paging:Pagination!,tags:[AlarmTag]):Alarms}Alarm query identifies alarms and related events.\nEvent extendtypeQuery{queryEvents(condition:EventQueryCondition):Events}Event query fetches the event list based on given sources and time range conditions.\nCondition Duration Duration is a widely used parameter type as the APM data is time-related. See the following for more details. Step relates to precision.\n# The Duration defines the start and end time for each query operation.# Fields: `start` and `end`# represents the time span. And each of them matches the step.# ref https://www.ietf.org/rfc/rfc3339.txt# The time formats are# `SECOND` step: yyyy-MM-dd HHmmss# `MINUTE` step: yyyy-MM-dd HHmm# `HOUR` step: yyyy-MM-dd HH# `DAY` step: yyyy-MM-dd# `MONTH` step: yyyy-MM# Field: `step`# represents the accurate time point.# e.g.# if step==HOUR , start=2017-11-08 09, end=2017-11-08 19# then# metrics from the following time points expected# 2017-11-08 9:00 -\u0026gt; 2017-11-08 19:00# there are 11 time points (hours) in the time span.inputDuration{start:String!end:String!step:Step!}enumStep{MONTHDAYHOURMINUTESECOND}","title":"Query Protocol","url":"/docs/main/v9.1.0/en/protocols/query-protocol/"},{"content":"Query Protocol Query Protocol defines a set of APIs in GraphQL grammar to provide data query and interactive capabilities with SkyWalking native visualization tool or 3rd party system, including Web UI, CLI or private system.\nQuery protocol official repository, https://github.com/apache/skywalking-query-protocol.\nMetadata Metadata contains concise information on all services and their instances, endpoints, etc. under monitoring. You may query the metadata in different ways.\nextendtypeQuery{# Normal service related meta info getAllServices(duration:Duration!,group:String):[Service!]!searchServices(duration:Duration!,keyword:String!):[Service!]!searchService(serviceCode:String!):Service# Fetch all services of Browser typegetAllBrowserServices(duration:Duration!):[Service!]!searchBrowserServices(duration:Duration!,keyword:String!):[Service!]!searchBrowserService(serviceCode:String!):Service# Service instance querygetServiceInstances(duration:Duration!,serviceId:ID!):[ServiceInstance!]!# Endpoint query# Consider there are huge numbers of endpoint,# must use endpoint owner\u0026#39;s service id, keyword and limit filter to do query.searchEndpoint(keyword:String!,serviceId:ID!,limit:Int!):[Endpoint!]!getEndpointInfo(endpointId:ID!):EndpointInfo# Database related meta info.getAllDatabases(duration:Duration!):[Database!]!getTimeInfo:TimeInfo}Topology The topology and dependency graphs among services, instances and endpoints. Includes direct relationships or global maps.\nextendtypeQuery{# Query the global topologygetGlobalTopology(duration:Duration!):Topology# Query the topology, based on the given servicegetServiceTopology(serviceId:ID!,duration:Duration!):Topology# Query the topology, based on the given services.# `#getServiceTopology` could be replaced by this.getServicesTopology(serviceIds:[ID!]!,duration:Duration!):Topology# Query the instance topology, based on the given clientServiceId and serverServiceIdgetServiceInstanceTopology(clientServiceId:ID!,serverServiceId:ID!,duration:Duration!):ServiceInstanceTopology# Query the topology, based on the given endpointgetEndpointTopology(endpointId:ID!,duration:Duration!):Topology# v2 of getEndpointTopologygetEndpointDependencies(endpointId:ID!,duration:Duration!):EndpointTopology}Metrics Metrics query targets all objects defined in OAL script and MAL. You may obtain the metrics data in linear or thermodynamic matrix formats based on the aggregation functions in script.\nV2 APIs Provide Metrics V2 query APIs since 8.0.0, including metadata, single/multiple values, heatmap, and sampled records metrics.\nextendtypeQuery{# Metrics definition metadata query. Response the metrics type which determines the suitable query methods.typeOfMetrics(name:String!):MetricsType!# Get the list of all available metrics in the current OAP server.# Param, regex, could be used to filter the metrics by name.listMetrics(regex:String):[MetricDefinition!]!# Read metrics single value in the duration of required metricsreadMetricsValue(condition:MetricsCondition!,duration:Duration!):Long!# Read time-series values in the duration of required metricsreadMetricsValues(condition:MetricsCondition!,duration:Duration!):MetricsValues!# Read entity list of required metrics and parent entity type.sortMetrics(condition:TopNCondition!,duration:Duration!):[SelectedRecord!]!# Read value in the given time duration, usually as a linear.# labels: the labels you need to query.readLabeledMetricsValues(condition:MetricsCondition!,labels:[String!]!,duration:Duration!):[MetricsValues!]!# Heatmap is bucket based value statistic result.readHeatMap(condition:MetricsCondition!,duration:Duration!):HeatMap# Read the sampled records# TopNCondition#scope is not required.readSampledRecords(condition:TopNCondition!,duration:Duration!):[SelectedRecord!]!}V1 APIs 3 types of metrics can be queried. V1 APIs were introduced since 6.x. Now they are a shell to V2 APIs.\n Single value. Most default metrics are in single value. getValues and getLinearIntValues are suitable for this purpose. Multiple value. A metric defined in OAL includes multiple value calculations. Use getMultipleLinearIntValues to obtain all values. percentile is a typical multiple value function in OAL. Heatmap value. Read Heatmap in WIKI for details. thermodynamic is the only OAL function. Use getThermodynamic to get the values.  extendtypeQuery{getValues(metric:BatchMetricConditions!,duration:Duration!):IntValuesgetLinearIntValues(metric:MetricCondition!,duration:Duration!):IntValues# Query the type of metrics including multiple values, and format them as multiple linears.# The seq of these multiple lines base on the calculation func in OAL# Such as, should us this to query the result of func percentile(50,75,90,95,99) in OAL,# then five lines will be responsed, p50 is the first element of return value.getMultipleLinearIntValues(metric:MetricCondition!,numOfLinear:Int!,duration:Duration!):[IntValues!]!getThermodynamic(metric:MetricCondition!,duration:Duration!):Thermodynamic}Metrics are defined in the config/oal/*.oal files.\nAggregation Aggregation query means that the metrics data need a secondary aggregation at query stage, which causes the query interfaces to have some different arguments. A typical example of aggregation query is the TopN list of services. Metrics stream aggregation simply calculates the metrics values of each service, but the expected list requires ordering metrics data by their values.\nAggregation query is for single value metrics only.\n# The aggregation query is different with the metric query.# All aggregation queries require backend or/and storage do aggregation in query time.extendtypeQuery{# TopN is an aggregation query.getServiceTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getAllServiceInstanceTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getServiceInstanceTopN(serviceId:ID!,name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getAllEndpointTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getEndpointTopN(serviceId:ID!,name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!}Logs extendtypeQuery{# Return true if the current storage implementation supports fuzzy query for logs.supportQueryLogsByKeywords:Boolean!queryLogs(condition:LogQueryCondition):Logs# Test the logs and get the results of the LAL output.test(requests:LogTestRequest!):LogTestResponse!}Log implementations vary between different database options. Some search engines like ElasticSearch and OpenSearch can support full log text fuzzy queries, while others do not due to considerations related to performance impact and end user experience.\ntest API serves as the debugging tool for native LAL parsing.\nTrace extendtypeQuery{queryBasicTraces(condition:TraceQueryCondition):TraceBriefqueryTrace(traceId:ID!):Trace}Trace query fetches trace segment lists and spans of given trace IDs.\nAlarm extendtypeQuery{getAlarmTrend(duration:Duration!):AlarmTrend!getAlarm(duration:Duration!,scope:Scope,keyword:String,paging:Pagination!,tags:[AlarmTag]):Alarms}Alarm query identifies alarms and related events.\nEvent extendtypeQuery{queryEvents(condition:EventQueryCondition):Events}Event query fetches the event list based on given sources and time range conditions.\nCondition Duration Duration is a widely used parameter type as the APM data is time-related. See the following for more details. Step relates to precision.\n# The Duration defines the start and end time for each query operation.# Fields: `start` and `end`# represents the time span. And each of them matches the step.# ref https://www.ietf.org/rfc/rfc3339.txt# The time formats are# `SECOND` step: yyyy-MM-dd HHmmss# `MINUTE` step: yyyy-MM-dd HHmm# `HOUR` step: yyyy-MM-dd HH# `DAY` step: yyyy-MM-dd# `MONTH` step: yyyy-MM# Field: `step`# represents the accurate time point.# e.g.# if step==HOUR , start=2017-11-08 09, end=2017-11-08 19# then# metrics from the following time points expected# 2017-11-08 9:00 -\u0026gt; 2017-11-08 19:00# there are 11 time points (hours) in the time span.inputDuration{start:String!end:String!step:Step!}enumStep{MONTHDAYHOURMINUTESECOND}","title":"Query Protocol","url":"/docs/main/v9.2.0/en/protocols/query-protocol/"},{"content":"Query Protocol Query Protocol defines a set of APIs in GraphQL grammar to provide data query and interactive capabilities with SkyWalking native visualization tool or 3rd party system, including Web UI, CLI or private system.\nQuery protocol official repository, https://github.com/apache/skywalking-query-protocol.\nMetadata Metadata contains concise information on all services and their instances, endpoints, etc. under monitoring. You may query the metadata in different ways.\nextendtypeQuery{# Normal service related meta info getAllServices(duration:Duration!,group:String):[Service!]!searchServices(duration:Duration!,keyword:String!):[Service!]!searchService(serviceCode:String!):Service# Fetch all services of Browser typegetAllBrowserServices(duration:Duration!):[Service!]!searchBrowserServices(duration:Duration!,keyword:String!):[Service!]!searchBrowserService(serviceCode:String!):Service# Service instance querygetServiceInstances(duration:Duration!,serviceId:ID!):[ServiceInstance!]!# Endpoint query# Consider there are huge numbers of endpoint,# must use endpoint owner\u0026#39;s service id, keyword and limit filter to do query.searchEndpoint(keyword:String!,serviceId:ID!,limit:Int!):[Endpoint!]!getEndpointInfo(endpointId:ID!):EndpointInfo# Database related meta info.getAllDatabases(duration:Duration!):[Database!]!getTimeInfo:TimeInfo}Topology The topology and dependency graphs among services, instances and endpoints. Includes direct relationships or global maps.\nextendtypeQuery{# Query the global topologygetGlobalTopology(duration:Duration!):Topology# Query the topology, based on the given servicegetServiceTopology(serviceId:ID!,duration:Duration!):Topology# Query the topology, based on the given services.# `#getServiceTopology` could be replaced by this.getServicesTopology(serviceIds:[ID!]!,duration:Duration!):Topology# Query the instance topology, based on the given clientServiceId and serverServiceIdgetServiceInstanceTopology(clientServiceId:ID!,serverServiceId:ID!,duration:Duration!):ServiceInstanceTopology# Query the topology, based on the given endpointgetEndpointTopology(endpointId:ID!,duration:Duration!):Topology# v2 of getEndpointTopologygetEndpointDependencies(endpointId:ID!,duration:Duration!):EndpointTopology}Metrics Metrics query targets all objects defined in OAL script and MAL. You may obtain the metrics data in linear or thermodynamic matrix formats based on the aggregation functions in script.\nV2 APIs Provide Metrics V2 query APIs since 8.0.0, including metadata, single/multiple values, heatmap, and sampled records metrics.\nextendtypeQuery{# Metrics definition metadata query. Response the metrics type which determines the suitable query methods.typeOfMetrics(name:String!):MetricsType!# Get the list of all available metrics in the current OAP server.# Param, regex, could be used to filter the metrics by name.listMetrics(regex:String):[MetricDefinition!]!# Read metrics single value in the duration of required metricsreadMetricsValue(condition:MetricsCondition!,duration:Duration!):Long!# Read time-series values in the duration of required metricsreadMetricsValues(condition:MetricsCondition!,duration:Duration!):MetricsValues!# Read entity list of required metrics and parent entity type.sortMetrics(condition:TopNCondition!,duration:Duration!):[SelectedRecord!]!# Read value in the given time duration, usually as a linear.# labels: the labels you need to query.readLabeledMetricsValues(condition:MetricsCondition!,labels:[String!]!,duration:Duration!):[MetricsValues!]!# Heatmap is bucket based value statistic result.readHeatMap(condition:MetricsCondition!,duration:Duration!):HeatMap# Deprecated since 9.3.0, replaced by readRecords defined in record.graphqls# Read the sampled records# TopNCondition#scope is not required.readSampledRecords(condition:TopNCondition!,duration:Duration!):[SelectedRecord!]!}V1 APIs 3 types of metrics can be queried. V1 APIs were introduced since 6.x. Now they are a shell to V2 APIs.\n Single value. Most default metrics are in single value. getValues and getLinearIntValues are suitable for this purpose. Multiple value. A metric defined in OAL includes multiple value calculations. Use getMultipleLinearIntValues to obtain all values. percentile is a typical multiple value function in OAL. Heatmap value. Read Heatmap in WIKI for details. thermodynamic is the only OAL function. Use getThermodynamic to get the values.  extendtypeQuery{getValues(metric:BatchMetricConditions!,duration:Duration!):IntValuesgetLinearIntValues(metric:MetricCondition!,duration:Duration!):IntValues# Query the type of metrics including multiple values, and format them as multiple lines.# The seq of these multiple lines base on the calculation func in OAL# Such as, should us this to query the result of func percentile(50,75,90,95,99) in OAL,# then five lines will be responded, p50 is the first element of return value.getMultipleLinearIntValues(metric:MetricCondition!,numOfLinear:Int!,duration:Duration!):[IntValues!]!getThermodynamic(metric:MetricCondition!,duration:Duration!):Thermodynamic}Metrics are defined in the config/oal/*.oal files.\nAggregation Aggregation query means that the metrics data need a secondary aggregation at query stage, which causes the query interfaces to have some different arguments. A typical example of aggregation query is the TopN list of services. Metrics stream aggregation simply calculates the metrics values of each service, but the expected list requires ordering metrics data by their values.\nAggregation query is for single value metrics only.\n# The aggregation query is different with the metric query.# All aggregation queries require backend or/and storage do aggregation in query time.extendtypeQuery{# TopN is an aggregation query.getServiceTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getAllServiceInstanceTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getServiceInstanceTopN(serviceId:ID!,name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getAllEndpointTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getEndpointTopN(serviceId:ID!,name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!}Record Record is a general and abstract type for collected raw data. In the observability, traces and logs have specific and well-defined meanings, meanwhile, the general records represent other collected records. Such as sampled slow SQL statement, HTTP request raw data(request/response header/body)\nextendtypeQuery{# Query collected records with given metric name and parent entity conditions, and return in the requested order.readRecords(condition:RecordCondition!,duration:Duration!):[Record!]!}Logs extendtypeQuery{# Return true if the current storage implementation supports fuzzy query for logs.supportQueryLogsByKeywords:Boolean!queryLogs(condition:LogQueryCondition):Logs# Test the logs and get the results of the LAL output.test(requests:LogTestRequest!):LogTestResponse!}Log implementations vary between different database options. Some search engines like ElasticSearch and OpenSearch can support full log text fuzzy queries, while others do not due to considerations related to performance impact and end user experience.\ntest API serves as the debugging tool for native LAL parsing.\nTrace extendtypeQuery{queryBasicTraces(condition:TraceQueryCondition):TraceBriefqueryTrace(traceId:ID!):Trace}Trace query fetches trace segment lists and spans of given trace IDs.\nAlarm extendtypeQuery{getAlarmTrend(duration:Duration!):AlarmTrend!getAlarm(duration:Duration!,scope:Scope,keyword:String,paging:Pagination!,tags:[AlarmTag]):Alarms}Alarm query identifies alarms and related events.\nEvent extendtypeQuery{queryEvents(condition:EventQueryCondition):Events}Event query fetches the event list based on given sources and time range conditions.\nCondition Duration Duration is a widely used parameter type as the APM data is time-related. See the following for more details. Step relates to precision.\n# The Duration defines the start and end time for each query operation.# Fields: `start` and `end`# represents the time span. And each of them matches the step.# ref https://www.ietf.org/rfc/rfc3339.txt# The time formats are# `SECOND` step: yyyy-MM-dd HHmmss# `MINUTE` step: yyyy-MM-dd HHmm# `HOUR` step: yyyy-MM-dd HH# `DAY` step: yyyy-MM-dd# `MONTH` step: yyyy-MM# Field: `step`# represents the accurate time point.# e.g.# if step==HOUR , start=2017-11-08 09, end=2017-11-08 19# then# metrics from the following time points expected# 2017-11-08 9:00 -\u0026gt; 2017-11-08 19:00# there are 11 time points (hours) in the time span.inputDuration{start:String!end:String!step:Step!}enumStep{MONTHDAYHOURMINUTESECOND}","title":"Query Protocol","url":"/docs/main/v9.3.0/en/protocols/query-protocol/"},{"content":"Query Protocol Query Protocol defines a set of APIs in GraphQL grammar to provide data query and interactive capabilities with SkyWalking native visualization tool or 3rd party system, including Web UI, CLI or private system.\nQuery protocol official repository, https://github.com/apache/skywalking-query-protocol.\nMetadata Metadata contains concise information on all services and their instances, endpoints, etc. under monitoring. You may query the metadata in different ways.\nextendtypeQuery{# Normal service related meta info getAllServices(duration:Duration!,group:String):[Service!]!searchServices(duration:Duration!,keyword:String!):[Service!]!searchService(serviceCode:String!):Service# Fetch all services of Browser typegetAllBrowserServices(duration:Duration!):[Service!]!searchBrowserServices(duration:Duration!,keyword:String!):[Service!]!searchBrowserService(serviceCode:String!):Service# Service instance querygetServiceInstances(duration:Duration!,serviceId:ID!):[ServiceInstance!]!# Endpoint query# Consider there are huge numbers of endpoint,# must use endpoint owner\u0026#39;s service id, keyword and limit filter to do query.searchEndpoint(keyword:String!,serviceId:ID!,limit:Int!):[Endpoint!]!getEndpointInfo(endpointId:ID!):EndpointInfo# Process query# Read process list.listProcesses(duration:Duration!,instanceId:ID!):[Process!]!# Find process according to given ID. Return null if not existing.getProcess(processId:ID!):Process# Get the number of matched processes through serviceId, labels# Labels: the matched process should contain all labels## The return is not a precise number, the process has its lifecycle, as it reboots and shutdowns with time.# The return number just gives an abstract of the scale of profiling that would be applied.estimateProcessScale(serviceId:ID!,labels:[String!]!):Long!# Database related meta info.getAllDatabases(duration:Duration!):[Database!]!getTimeInfo:TimeInfo}Topology The topology and dependency graphs among services, instances and endpoints. Includes direct relationships or global maps.\nextendtypeQuery{# Query the global topologygetGlobalTopology(duration:Duration!):Topology# Query the topology, based on the given servicegetServiceTopology(serviceId:ID!,duration:Duration!):Topology# Query the topology, based on the given services.# `#getServiceTopology` could be replaced by this.getServicesTopology(serviceIds:[ID!]!,duration:Duration!):Topology# Query the instance topology, based on the given clientServiceId and serverServiceIdgetServiceInstanceTopology(clientServiceId:ID!,serverServiceId:ID!,duration:Duration!):ServiceInstanceTopology# Query the topology, based on the given endpointgetEndpointTopology(endpointId:ID!,duration:Duration!):Topology# v2 of getEndpointTopologygetEndpointDependencies(endpointId:ID!,duration:Duration!):EndpointTopology}Metrics Metrics query targets all objects defined in OAL script and MAL. You may obtain the metrics data in linear or thermodynamic matrix formats based on the aggregation functions in script.\nV2 APIs Provide Metrics V2 query APIs since 8.0.0, including metadata, single/multiple values, heatmap, and sampled records metrics.\nextendtypeQuery{# Metrics definition metadata query. Response the metrics type which determines the suitable query methods.typeOfMetrics(name:String!):MetricsType!# Get the list of all available metrics in the current OAP server.# Param, regex, could be used to filter the metrics by name.listMetrics(regex:String):[MetricDefinition!]!# Read metrics single value in the duration of required metricsreadMetricsValue(condition:MetricsCondition!,duration:Duration!):Long!# Read time-series values in the duration of required metricsreadMetricsValues(condition:MetricsCondition!,duration:Duration!):MetricsValues!# Read entity list of required metrics and parent entity type.sortMetrics(condition:TopNCondition!,duration:Duration!):[SelectedRecord!]!# Read value in the given time duration, usually as a linear.# labels: the labels you need to query.readLabeledMetricsValues(condition:MetricsCondition!,labels:[String!]!,duration:Duration!):[MetricsValues!]!# Heatmap is bucket based value statistic result.readHeatMap(condition:MetricsCondition!,duration:Duration!):HeatMap# Deprecated since 9.3.0, replaced by readRecords defined in record.graphqls# Read the sampled records# TopNCondition#scope is not required.readSampledRecords(condition:TopNCondition!,duration:Duration!):[SelectedRecord!]!}V1 APIs 3 types of metrics can be queried. V1 APIs were introduced since 6.x. Now they are a shell to V2 APIs.\n Single value. Most default metrics are in single value. getValues and getLinearIntValues are suitable for this purpose. Multiple value. A metric defined in OAL includes multiple value calculations. Use getMultipleLinearIntValues to obtain all values. percentile is a typical multiple value function in OAL. Heatmap value. Read Heatmap in WIKI for details. thermodynamic is the only OAL function. Use getThermodynamic to get the values.  extendtypeQuery{getValues(metric:BatchMetricConditions!,duration:Duration!):IntValuesgetLinearIntValues(metric:MetricCondition!,duration:Duration!):IntValues# Query the type of metrics including multiple values, and format them as multiple lines.# The seq of these multiple lines base on the calculation func in OAL# Such as, should us this to query the result of func percentile(50,75,90,95,99) in OAL,# then five lines will be responded, p50 is the first element of return value.getMultipleLinearIntValues(metric:MetricCondition!,numOfLinear:Int!,duration:Duration!):[IntValues!]!getThermodynamic(metric:MetricCondition!,duration:Duration!):Thermodynamic}Metrics are defined in the config/oal/*.oal files.\nAggregation Aggregation query means that the metrics data need a secondary aggregation at query stage, which causes the query interfaces to have some different arguments. A typical example of aggregation query is the TopN list of services. Metrics stream aggregation simply calculates the metrics values of each service, but the expected list requires ordering metrics data by their values.\nAggregation query is for single value metrics only.\n# The aggregation query is different with the metric query.# All aggregation queries require backend or/and storage do aggregation in query time.extendtypeQuery{# TopN is an aggregation query.getServiceTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getAllServiceInstanceTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getServiceInstanceTopN(serviceId:ID!,name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getAllEndpointTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getEndpointTopN(serviceId:ID!,name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!}Record Record is a general and abstract type for collected raw data. In the observability, traces and logs have specific and well-defined meanings, meanwhile, the general records represent other collected records. Such as sampled slow SQL statement, HTTP request raw data(request/response header/body)\nextendtypeQuery{# Query collected records with given metric name and parent entity conditions, and return in the requested order.readRecords(condition:RecordCondition!,duration:Duration!):[Record!]!}Logs extendtypeQuery{# Return true if the current storage implementation supports fuzzy query for logs.supportQueryLogsByKeywords:Boolean!queryLogs(condition:LogQueryCondition):Logs# Test the logs and get the results of the LAL output.test(requests:LogTestRequest!):LogTestResponse!}Log implementations vary between different database options. Some search engines like ElasticSearch and OpenSearch can support full log text fuzzy queries, while others do not due to considerations related to performance impact and end user experience.\ntest API serves as the debugging tool for native LAL parsing.\nTrace extendtypeQuery{queryBasicTraces(condition:TraceQueryCondition):TraceBriefqueryTrace(traceId:ID!):Trace}Trace query fetches trace segment lists and spans of given trace IDs.\nAlarm extendtypeQuery{getAlarmTrend(duration:Duration!):AlarmTrend!getAlarm(duration:Duration!,scope:Scope,keyword:String,paging:Pagination!,tags:[AlarmTag]):Alarms}Alarm query identifies alarms and related events.\nEvent extendtypeQuery{queryEvents(condition:EventQueryCondition):Events}Event query fetches the event list based on given sources and time range conditions.\nProfiling SkyWalking offers two types of profiling, in-process and out-process, allowing users to create tasks and check their execution status.\nIn-process profiling extendtypeMutation{# crate new profile taskcreateProfileTask(creationRequest:ProfileTaskCreationRequest):ProfileTaskCreationResult!}extendtypeQuery{# query all task list, order by ProfileTask#startTime descendinggetProfileTaskList(serviceId:ID,endpointName:String):[ProfileTask!]!# query all task logsgetProfileTaskLogs(taskID:String):[ProfileTaskLog!]!# query all task profiled segment listgetProfileTaskSegmentList(taskID:String):[BasicTrace!]!# query profiled segmentgetProfiledSegment(segmentId:String):ProfiledSegment# analyze profiled segment, start and end time use timestamp(millisecond)getProfileAnalyze(segmentId:String!,timeRanges:[ProfileAnalyzeTimeRange!]!):ProfileAnalyzation!}Out-process profiling extendtypeMutation{# create a new eBPF fixed time profiling taskcreateEBPFProfilingFixedTimeTask(request:EBPFProfilingTaskFixedTimeCreationRequest!):EBPFProfilingTaskCreationResult!# create a new eBPF network profiling taskcreateEBPFNetworkProfiling(request:EBPFProfilingNetworkTaskRequest!):EBPFProfilingTaskCreationResult!# keep alive the eBPF profiling taskkeepEBPFNetworkProfiling(taskId:ID!):EBPFNetworkKeepProfilingResult!}extendtypeQuery{# query eBPF profiling data for prepare create taskqueryPrepareCreateEBPFProfilingTaskData(serviceId:ID!):EBPFProfilingTaskPrepare!# query eBPF profiling task listqueryEBPFProfilingTasks(serviceId:ID,serviceInstanceId:ID,targets:[EBPFProfilingTargetType!]):[EBPFProfilingTask!]!# query schedules from profiling taskqueryEBPFProfilingSchedules(taskId:ID!):[EBPFProfilingSchedule!]!# analyze the profiling schedule# aggregateType is \u0026#34;EBPFProfilingAnalyzeAggregateType#COUNT\u0026#34; as default. analysisEBPFProfilingResult(scheduleIdList:[ID!]!,timeRanges:[EBPFProfilingAnalyzeTimeRange!]!,aggregateType:EBPFProfilingAnalyzeAggregateType):EBPFProfilingAnalyzation!}Condition Duration Duration is a widely used parameter type as the APM data is time-related. See the following for more details. Step relates to precision.\n# The Duration defines the start and end time for each query operation.# Fields: `start` and `end`# represents the time span. And each of them matches the step.# ref https://www.ietf.org/rfc/rfc3339.txt# The time formats are# `SECOND` step: yyyy-MM-dd HHmmss# `MINUTE` step: yyyy-MM-dd HHmm# `HOUR` step: yyyy-MM-dd HH# `DAY` step: yyyy-MM-dd# `MONTH` step: yyyy-MM# Field: `step`# represents the accurate time point.# e.g.# if step==HOUR , start=2017-11-08 09, end=2017-11-08 19# then# metrics from the following time points expected# 2017-11-08 9:00 -\u0026gt; 2017-11-08 19:00# there are 11 time points (hours) in the time span.inputDuration{start:String!end:String!step:Step!}enumStep{MONTHDAYHOURMINUTESECOND}","title":"Query Protocol","url":"/docs/main/v9.4.0/en/api/query-protocol/"},{"content":"Query Protocol Query Protocol defines a set of APIs in GraphQL grammar to provide data query and interactive capabilities with SkyWalking native visualization tool or 3rd party system, including Web UI, CLI or private system.\nQuery protocol official repository, https://github.com/apache/skywalking-query-protocol.\nAll deprecated APIs are moved here.\nMetadata Metadata contains concise information on all services and their instances, endpoints, etc. under monitoring. You may query the metadata in different ways.\nextendtypeQuery{# Normal service related meta info getAllServices(duration:Duration!,group:String):[Service!]!searchServices(duration:Duration!,keyword:String!):[Service!]!searchService(serviceCode:String!):Service# Fetch all services of Browser typegetAllBrowserServices(duration:Duration!):[Service!]!searchBrowserServices(duration:Duration!,keyword:String!):[Service!]!searchBrowserService(serviceCode:String!):Service# Service instance querygetServiceInstances(duration:Duration!,serviceId:ID!):[ServiceInstance!]!# Endpoint query# Consider there are huge numbers of endpoint,# must use endpoint owner\u0026#39;s service id, keyword and limit filter to do query.searchEndpoint(keyword:String!,serviceId:ID!,limit:Int!):[Endpoint!]!getEndpointInfo(endpointId:ID!):EndpointInfo# Process query# Read process list.listProcesses(duration:Duration!,instanceId:ID!):[Process!]!# Find process according to given ID. Return null if not existing.getProcess(processId:ID!):Process# Get the number of matched processes through serviceId, labels# Labels: the matched process should contain all labels## The return is not a precise number, the process has its lifecycle, as it reboots and shutdowns with time.# The return number just gives an abstract of the scale of profiling that would be applied.estimateProcessScale(serviceId:ID!,labels:[String!]!):Long!# Database related meta info.getAllDatabases(duration:Duration!):[Database!]!getTimeInfo:TimeInfo}Topology The topology and dependency graphs among services, instances and endpoints. Includes direct relationships or global maps.\nextendtypeQuery{# Query the global topologygetGlobalTopology(duration:Duration!):Topology# Query the topology, based on the given servicegetServiceTopology(serviceId:ID!,duration:Duration!):Topology# Query the topology, based on the given services.# `#getServiceTopology` could be replaced by this.getServicesTopology(serviceIds:[ID!]!,duration:Duration!):Topology# Query the instance topology, based on the given clientServiceId and serverServiceIdgetServiceInstanceTopology(clientServiceId:ID!,serverServiceId:ID!,duration:Duration!):ServiceInstanceTopology# Query the topology, based on the given endpointgetEndpointTopology(endpointId:ID!,duration:Duration!):Topology# v2 of getEndpointTopologygetEndpointDependencies(endpointId:ID!,duration:Duration!):EndpointTopology}Metrics Metrics query targets all objects defined in OAL script and MAL.\nV3 APIs Provide Metrics V3 query APIs since 9.5.0, including metadata and MQE. SkyWalking Metrics Query Expression(MQE) is an extension query mechanism. MQE allows users to do simple query-stage calculation like well known PromQL through GraphQL. The expression\u0026rsquo;s syntax can refer to here.\nextendtypeQuery{# Metrics definition metadata query. Response the metrics type which determines the suitable query methods.typeOfMetrics(name:String!):MetricsType!# Get the list of all available metrics in the current OAP server.# Param, regex, could be used to filter the metrics by name.listMetrics(regex:String):[MetricDefinition!]!execExpression(expression:String!,entity:Entity!,duration:Duration!):ExpressionResult!}typeExpressionResult{type:ExpressionResultType!# When the type == TIME_SERIES_VALUES, the results would be a collection of MQEValues.# In other legal type cases, only one MQEValues is expected in the array.results:[MQEValues!]!# When type == ExpressionResultType.UNKNOWN,# the error message includes the expression resolving errors.error:String}enumExpressionResultType{# Can\u0026#39;t resolve the type of the given expression.UNKNOWN# A single valueSINGLE_VALUE# A collection of time-series values.# The value could have labels or not.TIME_SERIES_VALUES# A collection of aggregated values through metric sort functionSORTED_LIST# A collection of sampled records.# When the original metric type is sampled recordsRECORD_LIST}Logs extendtypeQuery{# Return true if the current storage implementation supports fuzzy query for logs.supportQueryLogsByKeywords:Boolean!queryLogs(condition:LogQueryCondition):Logs# Test the logs and get the results of the LAL output.test(requests:LogTestRequest!):LogTestResponse!}Log implementations vary between different database options. Some search engines like ElasticSearch and OpenSearch can support full log text fuzzy queries, while others do not due to considerations related to performance impact and end user experience.\ntest API serves as the debugging tool for native LAL parsing.\nTrace extendtypeQuery{queryBasicTraces(condition:TraceQueryCondition):TraceBriefqueryTrace(traceId:ID!):Trace}Trace query fetches trace segment lists and spans of given trace IDs.\nAlarm extendtypeQuery{getAlarmTrend(duration:Duration!):AlarmTrend!getAlarm(duration:Duration!,scope:Scope,keyword:String,paging:Pagination!,tags:[AlarmTag]):Alarms}Alarm query identifies alarms and related events.\nEvent extendtypeQuery{queryEvents(condition:EventQueryCondition):Events}Event query fetches the event list based on given sources and time range conditions.\nProfiling SkyWalking offers two types of profiling, in-process and out-process, allowing users to create tasks and check their execution status.\nIn-process profiling extendtypeMutation{# crate new profile taskcreateProfileTask(creationRequest:ProfileTaskCreationRequest):ProfileTaskCreationResult!}extendtypeQuery{# query all task list, order by ProfileTask#startTime descendinggetProfileTaskList(serviceId:ID,endpointName:String):[ProfileTask!]!# query all task logsgetProfileTaskLogs(taskID:String):[ProfileTaskLog!]!# query all task profiled segment listgetProfileTaskSegmentList(taskID:String):[BasicTrace!]!# query profiled segmentgetProfiledSegment(segmentId:String):ProfiledSegment# analyze profiled segment, start and end time use timestamp(millisecond)getProfileAnalyze(segmentId:String!,timeRanges:[ProfileAnalyzeTimeRange!]!):ProfileAnalyzation!}Out-process profiling extendtypeMutation{# create a new eBPF fixed time profiling taskcreateEBPFProfilingFixedTimeTask(request:EBPFProfilingTaskFixedTimeCreationRequest!):EBPFProfilingTaskCreationResult!# create a new eBPF network profiling taskcreateEBPFNetworkProfiling(request:EBPFProfilingNetworkTaskRequest!):EBPFProfilingTaskCreationResult!# keep alive the eBPF profiling taskkeepEBPFNetworkProfiling(taskId:ID!):EBPFNetworkKeepProfilingResult!}extendtypeQuery{# query eBPF profiling data for prepare create taskqueryPrepareCreateEBPFProfilingTaskData(serviceId:ID!):EBPFProfilingTaskPrepare!# query eBPF profiling task listqueryEBPFProfilingTasks(serviceId:ID,serviceInstanceId:ID,targets:[EBPFProfilingTargetType!]):[EBPFProfilingTask!]!# query schedules from profiling taskqueryEBPFProfilingSchedules(taskId:ID!):[EBPFProfilingSchedule!]!# analyze the profiling schedule# aggregateType is \u0026#34;EBPFProfilingAnalyzeAggregateType#COUNT\u0026#34; as default. analysisEBPFProfilingResult(scheduleIdList:[ID!]!,timeRanges:[EBPFProfilingAnalyzeTimeRange!]!,aggregateType:EBPFProfilingAnalyzeAggregateType):EBPFProfilingAnalyzation!}Condition Duration Duration is a widely used parameter type as the APM data is time-related. See the following for more details. Step relates to precision.\n# The Duration defines the start and end time for each query operation.# Fields: `start` and `end`# represents the time span. And each of them matches the step.# ref https://www.ietf.org/rfc/rfc3339.txt# The time formats are# `SECOND` step: yyyy-MM-dd HHmmss# `MINUTE` step: yyyy-MM-dd HHmm# `HOUR` step: yyyy-MM-dd HH# `DAY` step: yyyy-MM-dd# `MONTH` step: yyyy-MM# Field: `step`# represents the accurate time point.# e.g.# if step==HOUR , start=2017-11-08 09, end=2017-11-08 19# then# metrics from the following time points expected# 2017-11-08 9:00 -\u0026gt; 2017-11-08 19:00# there are 11 time points (hours) in the time span.inputDuration{start:String!end:String!step:Step!}enumStep{MONTHDAYHOURMINUTESECOND}","title":"Query Protocol","url":"/docs/main/v9.5.0/en/api/query-protocol/"},{"content":"Query Protocol Query Protocol defines a set of APIs in GraphQL grammar to provide data query and interactive capabilities with SkyWalking native visualization tool or 3rd party system, including Web UI, CLI or private system.\nQuery protocol official repository, https://github.com/apache/skywalking-query-protocol.\nAll deprecated APIs are moved here.\nMetadata Metadata contains concise information on all services and their instances, endpoints, etc. under monitoring. You may query the metadata in different ways.\nextendtypeQuery{# Normal service related meta info getAllServices(duration:Duration!,group:String):[Service!]!searchServices(duration:Duration!,keyword:String!):[Service!]!searchService(serviceCode:String!):Service# Fetch all services of Browser typegetAllBrowserServices(duration:Duration!):[Service!]!searchBrowserServices(duration:Duration!,keyword:String!):[Service!]!searchBrowserService(serviceCode:String!):Service# Service instance querygetServiceInstances(duration:Duration!,serviceId:ID!):[ServiceInstance!]!# Endpoint query# Consider there are huge numbers of endpoint,# must use endpoint owner\u0026#39;s service id, keyword and limit filter to do query.searchEndpoint(keyword:String!,serviceId:ID!,limit:Int!):[Endpoint!]!getEndpointInfo(endpointId:ID!):EndpointInfo# Process query# Read process list.listProcesses(duration:Duration!,instanceId:ID!):[Process!]!# Find process according to given ID. Return null if not existing.getProcess(processId:ID!):Process# Get the number of matched processes through serviceId, labels# Labels: the matched process should contain all labels## The return is not a precise number, the process has its lifecycle, as it reboots and shutdowns with time.# The return number just gives an abstract of the scale of profiling that would be applied.estimateProcessScale(serviceId:ID!,labels:[String!]!):Long!# Database related meta info.getAllDatabases(duration:Duration!):[Database!]!getTimeInfo:TimeInfo}Topology The topology and dependency graphs among services, instances and endpoints. Includes direct relationships or global maps.\nextendtypeQuery{# Query the global topologygetGlobalTopology(duration:Duration!):Topology# Query the topology, based on the given servicegetServiceTopology(serviceId:ID!,duration:Duration!):Topology# Query the topology, based on the given services.# `#getServiceTopology` could be replaced by this.getServicesTopology(serviceIds:[ID!]!,duration:Duration!):Topology# Query the instance topology, based on the given clientServiceId and serverServiceIdgetServiceInstanceTopology(clientServiceId:ID!,serverServiceId:ID!,duration:Duration!):ServiceInstanceTopology# Query the topology, based on the given endpointgetEndpointTopology(endpointId:ID!,duration:Duration!):Topology# v2 of getEndpointTopologygetEndpointDependencies(endpointId:ID!,duration:Duration!):EndpointTopology}Metrics Metrics query targets all objects defined in OAL script and MAL.\nV3 APIs Provide Metrics V3 query APIs since 9.5.0, including metadata and MQE. SkyWalking Metrics Query Expression(MQE) is an extension query mechanism. MQE allows users to do simple query-stage calculation like well known PromQL through GraphQL. The expression\u0026rsquo;s syntax can refer to here.\nextendtypeQuery{# Metrics definition metadata query. Response the metrics type which determines the suitable query methods.typeOfMetrics(name:String!):MetricsType!# Get the list of all available metrics in the current OAP server.# Param, regex, could be used to filter the metrics by name.listMetrics(regex:String):[MetricDefinition!]!execExpression(expression:String!,entity:Entity!,duration:Duration!):ExpressionResult!}typeExpressionResult{type:ExpressionResultType!# When the type == TIME_SERIES_VALUES, the results would be a collection of MQEValues.# In other legal type cases, only one MQEValues is expected in the array.results:[MQEValues!]!# When type == ExpressionResultType.UNKNOWN,# the error message includes the expression resolving errors.error:String}enumExpressionResultType{# Can\u0026#39;t resolve the type of the given expression.UNKNOWN# A single valueSINGLE_VALUE# A collection of time-series values.# The value could have labels or not.TIME_SERIES_VALUES# A collection of aggregated values through metric sort functionSORTED_LIST# A collection of sampled records.# When the original metric type is sampled recordsRECORD_LIST}Logs extendtypeQuery{# Return true if the current storage implementation supports fuzzy query for logs.supportQueryLogsByKeywords:Boolean!queryLogs(condition:LogQueryCondition):Logs# Test the logs and get the results of the LAL output.test(requests:LogTestRequest!):LogTestResponse!}Log implementations vary between different database options. Some search engines like ElasticSearch and OpenSearch can support full log text fuzzy queries, while others do not due to considerations related to performance impact and end user experience.\ntest API serves as the debugging tool for native LAL parsing.\nTrace extendtypeQuery{queryBasicTraces(condition:TraceQueryCondition):TraceBriefqueryTrace(traceId:ID!):Trace}Trace query fetches trace segment lists and spans of given trace IDs.\nAlarm extendtypeQuery{getAlarmTrend(duration:Duration!):AlarmTrend!getAlarm(duration:Duration!,scope:Scope,keyword:String,paging:Pagination!,tags:[AlarmTag]):Alarms}Alarm query identifies alarms and related events.\nEvent extendtypeQuery{queryEvents(condition:EventQueryCondition):Events}Event query fetches the event list based on given sources and time range conditions.\nProfiling SkyWalking offers two types of profiling, in-process and out-process, allowing users to create tasks and check their execution status.\nIn-process profiling extendtypeMutation{# crate new profile taskcreateProfileTask(creationRequest:ProfileTaskCreationRequest):ProfileTaskCreationResult!}extendtypeQuery{# query all task list, order by ProfileTask#startTime descendinggetProfileTaskList(serviceId:ID,endpointName:String):[ProfileTask!]!# query all task logsgetProfileTaskLogs(taskID:String):[ProfileTaskLog!]!# query all task profiled segment listgetProfileTaskSegmentList(taskID:String):[BasicTrace!]!# query profiled segmentgetProfiledSegment(segmentId:String):ProfiledSegment# analyze profiled segment, start and end time use timestamp(millisecond)getProfileAnalyze(segmentId:String!,timeRanges:[ProfileAnalyzeTimeRange!]!):ProfileAnalyzation!}Out-process profiling extendtypeMutation{# create a new eBPF fixed time profiling taskcreateEBPFProfilingFixedTimeTask(request:EBPFProfilingTaskFixedTimeCreationRequest!):EBPFProfilingTaskCreationResult!# create a new eBPF network profiling taskcreateEBPFNetworkProfiling(request:EBPFProfilingNetworkTaskRequest!):EBPFProfilingTaskCreationResult!# keep alive the eBPF profiling taskkeepEBPFNetworkProfiling(taskId:ID!):EBPFNetworkKeepProfilingResult!}extendtypeQuery{# query eBPF profiling data for prepare create taskqueryPrepareCreateEBPFProfilingTaskData(serviceId:ID!):EBPFProfilingTaskPrepare!# query eBPF profiling task listqueryEBPFProfilingTasks(serviceId:ID,serviceInstanceId:ID,targets:[EBPFProfilingTargetType!]):[EBPFProfilingTask!]!# query schedules from profiling taskqueryEBPFProfilingSchedules(taskId:ID!):[EBPFProfilingSchedule!]!# analyze the profiling schedule# aggregateType is \u0026#34;EBPFProfilingAnalyzeAggregateType#COUNT\u0026#34; as default. analysisEBPFProfilingResult(scheduleIdList:[ID!]!,timeRanges:[EBPFProfilingAnalyzeTimeRange!]!,aggregateType:EBPFProfilingAnalyzeAggregateType):EBPFProfilingAnalyzation!}Condition Duration Duration is a widely used parameter type as the APM data is time-related. See the following for more details. Step relates to precision.\n# The Duration defines the start and end time for each query operation.# Fields: `start` and `end`# represents the time span. And each of them matches the step.# ref https://www.ietf.org/rfc/rfc3339.txt# The time formats are# `SECOND` step: yyyy-MM-dd HHmmss# `MINUTE` step: yyyy-MM-dd HHmm# `HOUR` step: yyyy-MM-dd HH# `DAY` step: yyyy-MM-dd# `MONTH` step: yyyy-MM# Field: `step`# represents the accurate time point.# e.g.# if step==HOUR , start=2017-11-08 09, end=2017-11-08 19# then# metrics from the following time points expected# 2017-11-08 9:00 -\u0026gt; 2017-11-08 19:00# there are 11 time points (hours) in the time span.inputDuration{start:String!end:String!step:Step!}enumStep{MONTHDAYHOURMINUTESECOND}","title":"Query Protocol","url":"/docs/main/v9.6.0/en/api/query-protocol/"},{"content":"Query Protocol Query Protocol defines a set of APIs in GraphQL grammar to provide data query and interactive capabilities with SkyWalking native visualization tool or 3rd party system, including Web UI, CLI or private system.\nQuery protocol official repository, https://github.com/apache/skywalking-query-protocol.\nAll deprecated APIs are moved here.\nMetadata Metadata contains concise information on all services and their instances, endpoints, etc. under monitoring. You may query the metadata in different ways.\nextendtypeQuery{# Normal service related meta info getAllServices(duration:Duration!,group:String):[Service!]!searchServices(duration:Duration!,keyword:String!):[Service!]!searchService(serviceCode:String!):Service# Fetch all services of Browser typegetAllBrowserServices(duration:Duration!):[Service!]!searchBrowserServices(duration:Duration!,keyword:String!):[Service!]!searchBrowserService(serviceCode:String!):Service# Service instance querygetServiceInstances(duration:Duration!,serviceId:ID!):[ServiceInstance!]!# Endpoint query# Consider there are huge numbers of endpoint,# must use endpoint owner\u0026#39;s service id, keyword and limit filter to do query.searchEndpoint(keyword:String!,serviceId:ID!,limit:Int!):[Endpoint!]!getEndpointInfo(endpointId:ID!):EndpointInfo# Process query# Read process list.listProcesses(duration:Duration!,instanceId:ID!):[Process!]!# Find process according to given ID. Return null if not existing.getProcess(processId:ID!):Process# Get the number of matched processes through serviceId, labels# Labels: the matched process should contain all labels## The return is not a precise number, the process has its lifecycle, as it reboots and shutdowns with time.# The return number just gives an abstract of the scale of profiling that would be applied.estimateProcessScale(serviceId:ID!,labels:[String!]!):Long!# Database related meta info.getAllDatabases(duration:Duration!):[Database!]!getTimeInfo:TimeInfo}Topology The topology and dependency graphs among services, instances and endpoints. Includes direct relationships or global maps.\nextendtypeQuery{# Query the global topologygetGlobalTopology(duration:Duration!):Topology# Query the topology, based on the given servicegetServiceTopology(serviceId:ID!,duration:Duration!):Topology# Query the topology, based on the given services.# `#getServiceTopology` could be replaced by this.getServicesTopology(serviceIds:[ID!]!,duration:Duration!):Topology# Query the instance topology, based on the given clientServiceId and serverServiceIdgetServiceInstanceTopology(clientServiceId:ID!,serverServiceId:ID!,duration:Duration!):ServiceInstanceTopology# Query the topology, based on the given endpointgetEndpointTopology(endpointId:ID!,duration:Duration!):Topology# v2 of getEndpointTopologygetEndpointDependencies(endpointId:ID!,duration:Duration!):EndpointTopology}Metrics Metrics query targets all objects defined in OAL script and MAL.\nV3 APIs Provide Metrics V3 query APIs since 9.5.0, including metadata and MQE. SkyWalking Metrics Query Expression(MQE) is an extension query mechanism. MQE allows users to do simple query-stage calculation like well known PromQL through GraphQL. The expression\u0026rsquo;s syntax can refer to here.\nextendtypeQuery{# Metrics definition metadata query. Response the metrics type which determines the suitable query methods.typeOfMetrics(name:String!):MetricsType!# Get the list of all available metrics in the current OAP server.# Param, regex, could be used to filter the metrics by name.listMetrics(regex:String):[MetricDefinition!]!execExpression(expression:String!,entity:Entity!,duration:Duration!):ExpressionResult!}typeExpressionResult{type:ExpressionResultType!# When the type == TIME_SERIES_VALUES, the results would be a collection of MQEValues.# In other legal type cases, only one MQEValues is expected in the array.results:[MQEValues!]!# When type == ExpressionResultType.UNKNOWN,# the error message includes the expression resolving errors.error:String}enumExpressionResultType{# Can\u0026#39;t resolve the type of the given expression.UNKNOWN# A single valueSINGLE_VALUE# A collection of time-series values.# The value could have labels or not.TIME_SERIES_VALUES# A collection of aggregated values through metric sort functionSORTED_LIST# A collection of sampled records.# When the original metric type is sampled recordsRECORD_LIST}Logs extendtypeQuery{# Return true if the current storage implementation supports fuzzy query for logs.supportQueryLogsByKeywords:Boolean!queryLogs(condition:LogQueryCondition):Logs# Test the logs and get the results of the LAL output.test(requests:LogTestRequest!):LogTestResponse!}Log implementations vary between different database options. Some search engines like ElasticSearch and OpenSearch can support full log text fuzzy queries, while others do not due to considerations related to performance impact and end user experience.\ntest API serves as the debugging tool for native LAL parsing.\nTrace extendtypeQuery{queryBasicTraces(condition:TraceQueryCondition):TraceBriefqueryTrace(traceId:ID!):Trace}Trace query fetches trace segment lists and spans of given trace IDs.\nAlarm extendtypeQuery{getAlarmTrend(duration:Duration!):AlarmTrend!getAlarm(duration:Duration!,scope:Scope,keyword:String,paging:Pagination!,tags:[AlarmTag]):Alarms}Alarm query identifies alarms and related events.\nEvent extendtypeQuery{queryEvents(condition:EventQueryCondition):Events}Event query fetches the event list based on given sources and time range conditions.\nProfiling SkyWalking offers two types of profiling, in-process and out-process, allowing users to create tasks and check their execution status.\nIn-process profiling extendtypeMutation{# crate new profile taskcreateProfileTask(creationRequest:ProfileTaskCreationRequest):ProfileTaskCreationResult!}extendtypeQuery{# query all task list, order by ProfileTask#startTime descendinggetProfileTaskList(serviceId:ID,endpointName:String):[ProfileTask!]!# query all task logsgetProfileTaskLogs(taskID:String):[ProfileTaskLog!]!# query all task profiled segment listgetProfileTaskSegmentList(taskID:String):[BasicTrace!]!# query profiled segmentgetProfiledSegment(segmentId:String):ProfiledSegment# analyze profiled segment, start and end time use timestamp(millisecond)getProfileAnalyze(segmentId:String!,timeRanges:[ProfileAnalyzeTimeRange!]!):ProfileAnalyzation!}Out-process profiling extendtypeMutation{# create a new eBPF fixed time profiling taskcreateEBPFProfilingFixedTimeTask(request:EBPFProfilingTaskFixedTimeCreationRequest!):EBPFProfilingTaskCreationResult!# create a new eBPF network profiling taskcreateEBPFNetworkProfiling(request:EBPFProfilingNetworkTaskRequest!):EBPFProfilingTaskCreationResult!# keep alive the eBPF profiling taskkeepEBPFNetworkProfiling(taskId:ID!):EBPFNetworkKeepProfilingResult!}extendtypeQuery{# query eBPF profiling data for prepare create taskqueryPrepareCreateEBPFProfilingTaskData(serviceId:ID!):EBPFProfilingTaskPrepare!# query eBPF profiling task listqueryEBPFProfilingTasks(serviceId:ID,serviceInstanceId:ID,targets:[EBPFProfilingTargetType!]):[EBPFProfilingTask!]!# query schedules from profiling taskqueryEBPFProfilingSchedules(taskId:ID!):[EBPFProfilingSchedule!]!# analyze the profiling schedule# aggregateType is \u0026#34;EBPFProfilingAnalyzeAggregateType#COUNT\u0026#34; as default. analysisEBPFProfilingResult(scheduleIdList:[ID!]!,timeRanges:[EBPFProfilingAnalyzeTimeRange!]!,aggregateType:EBPFProfilingAnalyzeAggregateType):EBPFProfilingAnalyzation!}Condition Duration Duration is a widely used parameter type as the APM data is time-related. See the following for more details. Step relates to precision.\n# The Duration defines the start and end time for each query operation.# Fields: `start` and `end`# represents the time span. And each of them matches the step.# ref https://www.ietf.org/rfc/rfc3339.txt# The time formats are# `SECOND` step: yyyy-MM-dd HHmmss# `MINUTE` step: yyyy-MM-dd HHmm# `HOUR` step: yyyy-MM-dd HH# `DAY` step: yyyy-MM-dd# `MONTH` step: yyyy-MM# Field: `step`# represents the accurate time point.# e.g.# if step==HOUR , start=2017-11-08 09, end=2017-11-08 19# then# metrics from the following time points expected# 2017-11-08 9:00 -\u0026gt; 2017-11-08 19:00# there are 11 time points (hours) in the time span.inputDuration{start:String!end:String!step:Step!}enumStep{MONTHDAYHOURMINUTESECOND}","title":"Query Protocol","url":"/docs/main/v9.7.0/en/api/query-protocol/"},{"content":"Query Streams Query operation queries the data in a stream.\nbydbctl is the command line tool in examples.\nThe input contains two parts:\n Request: a YAML-based text which is defined by the API Time Range: YAML and CLI\u0026rsquo;s flags both support it.  Time Range The query specification contains time_range field. The request should set absolute times to it. bydbctl also provides start and end flags to support passing absolute and relative times.\n\u0026ldquo;start\u0026rdquo; and \u0026ldquo;end\u0026rdquo; specify a time range during which the query is performed, they can be an absolute time like \u0026ldquo;2006-01-02T15:04:05Z07:00\u0026rdquo;, or relative time (to the current time) like \u0026ldquo;-30m\u0026rdquo;, or \u0026ldquo;30m\u0026rdquo;. They are both optional and their default values follow the rules below:\n when \u0026ldquo;start\u0026rdquo; and \u0026ldquo;end\u0026rdquo; are both absent, \u0026ldquo;start = now - 30 minutes\u0026rdquo; and \u0026ldquo;end = now\u0026rdquo;, namely past 30 minutes; when \u0026ldquo;start\u0026rdquo; is absent and \u0026ldquo;end\u0026rdquo; is present, this command calculates \u0026ldquo;start\u0026rdquo; (minus 30 units), e.g. \u0026ldquo;end = 2022-11-09T12:34:00Z\u0026rdquo;, so \u0026ldquo;start = end - 30 minutes = 2022-11-09T12:04:00Z\u0026rdquo;; when \u0026ldquo;start\u0026rdquo; is present and \u0026ldquo;end\u0026rdquo; is absent, this command calculates \u0026ldquo;end\u0026rdquo; (plus 30 units), e.g. \u0026ldquo;start = 2022-11-09T12:04:00Z\u0026rdquo;, so \u0026ldquo;end = start + 30 minutes = 2022-11-09T12:34:00Z\u0026rdquo;.  Examples To retrieve elements in a stream named sw between 2022-10-15T22:32:48Z and 2022-10-15T23:32:48Z could use the below command. These elements also choose a tag trace_id which lives in a family named searchable.\n$ bydbctl stream query -f - \u0026lt;\u0026lt;EOF metadata: group: \u0026#34;default\u0026#34; name: \u0026#34;sw\u0026#34; projection: tagFamilies: - name: \u0026#34;searchable\u0026#34; tags: [\u0026#34;trace_id\u0026#34;] timeRange: begin: 2022-10-15T22:32:48+08:00 end: 2022-10-15T23:32:48+08:00 EOF The below command could query data in the last 30 minutes using relative time duration :\n$ bydbctl stream query --start -30m -f - \u0026lt;\u0026lt;EOF metadata: group: \u0026#34;default\u0026#34; name: \u0026#34;sw\u0026#34; projection: tagFamilies: - name: \u0026#34;searchable\u0026#34; tags: [\u0026#34;trace_id\u0026#34;] EOF API Reference StreamService v1\n","title":"Query Streams","url":"/docs/skywalking-banyandb/latest/crud/stream/query/"},{"content":"Query Streams Query operation queries the data in a stream.\nbydbctl is the command line tool in examples.\nThe input contains two parts:\n Request: a YAML-based text which is defined by the API Time Range: YAML and CLI\u0026rsquo;s flags both support it.  Time Range The query specification contains time_range field. The request should set absolute times to it. bydbctl also provides start and end flags to support passing absolute and relative times.\n\u0026ldquo;start\u0026rdquo; and \u0026ldquo;end\u0026rdquo; specify a time range during which the query is performed, they can be an absolute time like \u0026ldquo;2006-01-02T15:04:05Z07:00\u0026rdquo;, or relative time (to the current time) like \u0026ldquo;-30m\u0026rdquo;, or \u0026ldquo;30m\u0026rdquo;. They are both optional and their default values follow the rules below:\n when \u0026ldquo;start\u0026rdquo; and \u0026ldquo;end\u0026rdquo; are both absent, \u0026ldquo;start = now - 30 minutes\u0026rdquo; and \u0026ldquo;end = now\u0026rdquo;, namely past 30 minutes; when \u0026ldquo;start\u0026rdquo; is absent and \u0026ldquo;end\u0026rdquo; is present, this command calculates \u0026ldquo;start\u0026rdquo; (minus 30 units), e.g. \u0026ldquo;end = 2022-11-09T12:34:00Z\u0026rdquo;, so \u0026ldquo;start = end - 30 minutes = 2022-11-09T12:04:00Z\u0026rdquo;; when \u0026ldquo;start\u0026rdquo; is present and \u0026ldquo;end\u0026rdquo; is absent, this command calculates \u0026ldquo;end\u0026rdquo; (plus 30 units), e.g. \u0026ldquo;start = 2022-11-09T12:04:00Z\u0026rdquo;, so \u0026ldquo;end = start + 30 minutes = 2022-11-09T12:34:00Z\u0026rdquo;.  Examples To retrieve elements in a stream named sw between 2022-10-15T22:32:48Z and 2022-10-15T23:32:48Z could use the below command. These elements also choose a tag trace_id which lives in a family named searchable.\n$ bydbctl stream query -f - \u0026lt;\u0026lt;EOF metadata: group: \u0026#34;default\u0026#34; name: \u0026#34;sw\u0026#34; projection: tagFamilies: - name: \u0026#34;searchable\u0026#34; tags: [\u0026#34;trace_id\u0026#34;] timeRange: begin: 2022-10-15T22:32:48+08:00 end: 2022-10-15T23:32:48+08:00 EOF The below command could query data in the last 30 minutes using relative time duration :\n$ bydbctl stream query --start -30m -f - \u0026lt;\u0026lt;EOF metadata: group: \u0026#34;default\u0026#34; name: \u0026#34;sw\u0026#34; projection: tagFamilies: - name: \u0026#34;searchable\u0026#34; tags: [\u0026#34;trace_id\u0026#34;] EOF API Reference StreamService v1\n","title":"Query Streams","url":"/docs/skywalking-banyandb/next/crud/stream/query/"},{"content":"Query Streams Query operation queries the data in a stream.\nbydbctl is the command line tool in examples.\nThe input contains two parts:\n Request: a YAML-based text which is defined by the API Time Range: YAML and CLI\u0026rsquo;s flags both support it.  Time Range The query specification contains time_range field. The request should set absolute times to it. bydbctl also provides start and end flags to support passing absolute and relative times.\n\u0026ldquo;start\u0026rdquo; and \u0026ldquo;end\u0026rdquo; specify a time range during which the query is performed, they can be an absolute time like \u0026ldquo;2006-01-02T15:04:05Z07:00\u0026rdquo;, or relative time (to the current time) like \u0026ldquo;-30m\u0026rdquo;, or \u0026ldquo;30m\u0026rdquo;. They are both optional and their default values follow the rules below:\n when \u0026ldquo;start\u0026rdquo; and \u0026ldquo;end\u0026rdquo; are both absent, \u0026ldquo;start = now - 30 minutes\u0026rdquo; and \u0026ldquo;end = now\u0026rdquo;, namely past 30 minutes; when \u0026ldquo;start\u0026rdquo; is absent and \u0026ldquo;end\u0026rdquo; is present, this command calculates \u0026ldquo;start\u0026rdquo; (minus 30 units), e.g. \u0026ldquo;end = 2022-11-09T12:34:00Z\u0026rdquo;, so \u0026ldquo;start = end - 30 minutes = 2022-11-09T12:04:00Z\u0026rdquo;; when \u0026ldquo;start\u0026rdquo; is present and \u0026ldquo;end\u0026rdquo; is absent, this command calculates \u0026ldquo;end\u0026rdquo; (plus 30 units), e.g. \u0026ldquo;start = 2022-11-09T12:04:00Z\u0026rdquo;, so \u0026ldquo;end = start + 30 minutes = 2022-11-09T12:34:00Z\u0026rdquo;.  Examples To retrieve elements in a stream named sw between 2022-10-15T22:32:48Z and 2022-10-15T23:32:48Z could use the below command. These elements also choose a tag trace_id which lives in a family named searchable.\n$ bydbctl stream query -f - \u0026lt;\u0026lt;EOF metadata: group: \u0026#34;default\u0026#34; name: \u0026#34;sw\u0026#34; projection: tagFamilies: - name: \u0026#34;searchable\u0026#34; tags: [\u0026#34;trace_id\u0026#34;] timeRange: begin: 2022-10-15T22:32:48+08:00 end: 2022-10-15T23:32:48+08:00 EOF The below command could query data in the last 30 minutes using relative time duration :\n$ bydbctl stream query --start -30m -f - \u0026lt;\u0026lt;EOF metadata: group: \u0026#34;default\u0026#34; name: \u0026#34;sw\u0026#34; projection: tagFamilies: - name: \u0026#34;searchable\u0026#34; tags: [\u0026#34;trace_id\u0026#34;] EOF API Reference StreamService v1\n","title":"Query Streams","url":"/docs/skywalking-banyandb/v0.5.0/crud/stream/query/"},{"content":"Queue/memory-queue Description This is a memory queue to buffer the input event.\nDefaultConfig # The maximum buffer event size.event_buffer_size:5000# The partition count of queue.partition:1Configuration    Name Type Description     event_buffer_size int configThe maximum buffer event size.   partition int The total partition count.    ","title":"Queue/memory-queue","url":"/docs/skywalking-satellite/latest/en/setup/plugins/queue_memory-queue/"},{"content":"Queue/memory-queue Description This is a memory queue to buffer the input event.\nDefaultConfig # The maximum buffer event size.event_buffer_size:5000# The partition count of queue.partition:1Configuration    Name Type Description     event_buffer_size int configThe maximum buffer event size.   partition int The total partition count.    ","title":"Queue/memory-queue","url":"/docs/skywalking-satellite/next/en/setup/plugins/queue_memory-queue/"},{"content":"Queue/memory-queue Description This is a memory queue to buffer the input event.\nDefaultConfig # The maximum buffer event size.event_buffer_size:5000# The partition count of queue.partition:1Configuration    Name Type Description     event_buffer_size int configThe maximum buffer event size.   partition int The total partition count.    ","title":"Queue/memory-queue","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/queue_memory-queue/"},{"content":"Queue/mmap-queue Description This is a memory mapped queue to provide the persistent storage for the input event. Please note that this plugin does not support Windows platform.\nDefaultConfig # The size of each segment. Default value is 256K. The unit is Byte.segment_size:262114# The max num of segments in memory. Default value is 10.max_in_mem_segments:10# The capacity of Queue = segment_size * queue_capacity_segments.queue_capacity_segments:2000# The period flush time. The unit is ms. Default value is 1 second.flush_period:1000# The max number in one flush time. Default value is 10000.flush_ceiling_num:10000# The max size of the input event. Default value is 20k.max_event_size:20480# The partition count of queue.partition:1Configuration    Name Type Description     segment_size int The size of each segment. The unit is byte.   max_in_mem_segments int32 The max num of segments in memory.   queue_capacity_segments int The capacity of Queue = segment_size * queue_capacity_segments.   flush_period int The period flush time. The unit is ms.   flush_ceiling_num int The max number in one flush time.   max_event_size int The max size of the input event.   partition int The total partition count.    ","title":"Queue/mmap-queue","url":"/docs/skywalking-satellite/latest/en/setup/plugins/queue_mmap-queue/"},{"content":"Queue/mmap-queue Description This is a memory mapped queue to provide the persistent storage for the input event. Please note that this plugin does not support Windows platform.\nDefaultConfig # The size of each segment. Default value is 256K. The unit is Byte.segment_size:262114# The max num of segments in memory. Default value is 10.max_in_mem_segments:10# The capacity of Queue = segment_size * queue_capacity_segments.queue_capacity_segments:2000# The period flush time. The unit is ms. Default value is 1 second.flush_period:1000# The max number in one flush time. Default value is 10000.flush_ceiling_num:10000# The max size of the input event. Default value is 20k.max_event_size:20480# The partition count of queue.partition:1Configuration    Name Type Description     segment_size int The size of each segment. The unit is byte.   max_in_mem_segments int32 The max num of segments in memory.   queue_capacity_segments int The capacity of Queue = segment_size * queue_capacity_segments.   flush_period int The period flush time. The unit is ms.   flush_ceiling_num int The max number in one flush time.   max_event_size int The max size of the input event.   partition int The total partition count.    ","title":"Queue/mmap-queue","url":"/docs/skywalking-satellite/next/en/setup/plugins/queue_mmap-queue/"},{"content":"Queue/mmap-queue Description This is a memory mapped queue to provide the persistent storage for the input event. Please note that this plugin does not support Windows platform.\nDefaultConfig # The size of each segment. Default value is 256K. The unit is Byte.segment_size:262114# The max num of segments in memory. Default value is 10.max_in_mem_segments:10# The capacity of Queue = segment_size * queue_capacity_segments.queue_capacity_segments:2000# The period flush time. The unit is ms. Default value is 1 second.flush_period:1000# The max number in one flush time. Default value is 10000.flush_ceiling_num:10000# The max size of the input event. Default value is 20k.max_event_size:20480# The partition count of queue.partition:1Configuration    Name Type Description     segment_size int The size of each segment. The unit is byte.   max_in_mem_segments int32 The max num of segments in memory.   queue_capacity_segments int The capacity of Queue = segment_size * queue_capacity_segments.   flush_period int The period flush time. The unit is ms.   flush_ceiling_num int The max number in one flush time.   max_event_size int The max size of the input event.   partition int The total partition count.    ","title":"Queue/mmap-queue","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/queue_mmap-queue/"},{"content":"Queue/none-queue Description This is an empty queue for direct connection protocols, such as SkyWalking native configuration discovery service protocol.\nDefaultConfig # The partition count of queue.partition:1Configuration    Name Type Description     partition int The total partition count.    ","title":"Queue/none-queue","url":"/docs/skywalking-satellite/latest/en/setup/plugins/queue_none-queue/"},{"content":"Queue/none-queue Description This is an empty queue for direct connection protocols, such as SkyWalking native configuration discovery service protocol.\nDefaultConfig # The partition count of queue.partition:1Configuration    Name Type Description     partition int The total partition count.    ","title":"Queue/none-queue","url":"/docs/skywalking-satellite/next/en/setup/plugins/queue_none-queue/"},{"content":"Queue/none-queue Description This is an empty queue for direct connection protocols, such as SkyWalking native configuration discovery service protocol.\nDefaultConfig # The partition count of queue.partition:1Configuration    Name Type Description     partition int The total partition count.    ","title":"Queue/none-queue","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/queue_none-queue/"},{"content":"Quick Start for Contributors Make and Makefile We rely on Makefile to automate jobs, including setting up environments, testing and releasing.\nFirst you need to have the make command available:\n# ubuntu/wsl sudo apt-get update sudo apt-get -y install make or\n# windows powershell Set-ExecutionPolicy RemoteSigned -Scope CurrentUser # Optional: Needed to run a remote script the first time irm get.scoop.sh | iex scoop install make Poetry We have migrated from basic pip to Poetry to manage dependencies and package our project.\nOnce you have make ready, run make env, this will automatically install the right Poetry release, and create (plus manage) a .venv virtual environment for us based on the currently activated Python 3 version. Enjoy coding!\nNote: Make sure you have python3 aliased to python available on Windows computers instead of pointing to the Microsoft app store.\nSwitching between Multiple Python Versions Do not develop/test on Python \u0026lt; 3.7, since Poetry and some other functionalities we implement rely on Python 3.7+\nIf you would like to test on multiple Python versions, run the following to switch and recreate virtual environment:\nWithout Python Version Tools poetry env use python3.x poetry install With Python Version Tools pyenv shell 3.9.11 poetry env use $(pyenv which python) poetry install Or try: virtualenvs.prefer-active-python, which is an experimental poetry feature that can be set to true so that it will automatically follow environment.\nNext Refer to the Plugin Development Guide to learn how to build a new plugin for a library.\n","title":"Quick Start for Contributors","url":"/docs/skywalking-python/latest/en/contribution/developer/"},{"content":"Quick Start for Contributors Make and Makefile We rely on Makefile to automate jobs, including setting up environments, testing and releasing.\nFirst you need to have the make command available:\n# ubuntu/wsl sudo apt-get update sudo apt-get -y install make or\n# windows powershell Set-ExecutionPolicy RemoteSigned -Scope CurrentUser # Optional: Needed to run a remote script the first time irm get.scoop.sh | iex scoop install make Poetry We have migrated from basic pip to Poetry to manage dependencies and package our project.\nOnce you have make ready, run make env, this will automatically install the right Poetry release, and create (plus manage) a .venv virtual environment for us based on the currently activated Python 3 version. Enjoy coding!\nNote: Make sure you have python3 aliased to python available on Windows computers instead of pointing to the Microsoft app store.\nSwitching between Multiple Python Versions Do not develop/test on Python \u0026lt; 3.7, since Poetry and some other functionalities we implement rely on Python 3.7+\nIf you would like to test on multiple Python versions, run the following to switch and recreate virtual environment:\nWithout Python Version Tools poetry env use python3.x poetry install With Python Version Tools pyenv shell 3.9.11 poetry env use $(pyenv which python) poetry install Or try: virtualenvs.prefer-active-python, which is an experimental poetry feature that can be set to true so that it will automatically follow environment.\nNext Refer to the Plugin Development Guide to learn how to build a new plugin for a library.\n","title":"Quick Start for Contributors","url":"/docs/skywalking-python/next/en/contribution/developer/"},{"content":"Quick Start for Contributors Make and Makefile We rely on Makefile to automate jobs, including setting up environments, testing and releasing.\nFirst you need to have the make command available:\n# ubuntu/wsl sudo apt-get update sudo apt-get -y install make or\n# windows powershell Set-ExecutionPolicy RemoteSigned -Scope CurrentUser # Optional: Needed to run a remote script the first time irm get.scoop.sh | iex scoop install make Poetry We have migrated from basic pip to Poetry to manage dependencies and package our project.\nOnce you have make ready, run make env, this will automatically install the right Poetry release, and create (plus manage) a .venv virtual environment for us based on the currently activated Python 3 version. Enjoy coding!\nNote: Make sure you have python3 aliased to python available on Windows computers instead of pointing to the Microsoft app store.\nSwitching between Multiple Python Versions Do not develop/test on Python \u0026lt; 3.7, since Poetry and some other functionalities we implement rely on Python 3.7+\nIf you would like to test on multiple Python versions, run the following to switch and recreate virtual environment:\nWithout Python Version Tools poetry env use python3.x poetry install With Python Version Tools pyenv shell 3.9.11 poetry env use $(pyenv which python) poetry install Or try: virtualenvs.prefer-active-python, which is an experimental poetry feature that can be set to true so that it will automatically follow environment.\nNext Refer to the Plugin Development Guide to learn how to build a new plugin for a library.\n","title":"Quick Start for Contributors","url":"/docs/skywalking-python/v1.0.1/en/contribution/developer/"},{"content":"RabbitMQ monitoring SkyWalking leverages rabbitmq_prometheus plugin for collecting metrics data from RabbitMQ. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  The rabbitmq_prometheus plugin collect metrics data from RabbitMQ. Note: The RabbitMQ version is required to be 3.8.0+. The rabbitmq_prometheus plugin is built-in since RabbitMQ v3.8.0. OpenTelemetry Collector fetches metrics from rabbitmq_prometheus plugin via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup rabbitmq_prometheus. Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  RabbitMQ Monitoring RabbitMQ monitoring provides multidimensional metrics monitoring of RabbitMQ cluster as Layer: RABBITMQ Service in the OAP. In each cluster, the nodes are represented as Instance.\nRabbitMQ Cluster Supported Metrics    Monitoring Panel Metric Name Description Data Source     Memory Available Before Publishers Blocked (MB) meter_rabbitmq_memory_available_before_publisher_blocked If the value is zero or less, the memory alarm will be triggered and all publishing connections across all cluster nodes will be blocked. rabbitmq_prometheus plugin   Disk Space Available Before Publishers Blocked (GB) meter_rabbitmq_disk_space_available_before_publisher_blocked This metric is reported for the partition where the RabbitMQ data directory is stored. rabbitmq_prometheus plugin   File Descriptors Available meter_rabbitmq_file_descriptors_available When this value reaches zero, new connections will not be accepted and disk write operations may fail. rabbitmq_prometheus plugin   TCP Sockets Available meter_rabbitmq_tcp_socket_available When this value reaches zero, new connections will not be accepted. rabbitmq_prometheus plugin   Messages Ready To Be Delivered To Consumers meter_rabbitmq_message_ready_delivered_consumers Total number of ready messages ready to be delivered to consumers. rabbitmq_prometheus plugin   Messages Pending Consumer Acknowledgement meter_rabbitmq_message_unacknowledged_delivered_consumers The total number of messages that are either in-flight to consumers, currently being processed by consumers or simply waiting for the consumer acknowledgements to be processed by the queue. Until the queue processes the message acknowledgement, the message will remain unacknowledged. rabbitmq_prometheus plugin   Messages Published meter_rabbitmq_messages_published The incoming message rate before any routing rules are applied. rabbitmq_prometheus plugin   Messages Confirmed To Publishers meter_rabbitmq_messages_confirmed The rate of messages confirmed by the broker to publishers. Publishers must opt-in to receive message confirmations. rabbitmq_prometheus plugin   Messages Unconfirmed To Publishers meter_rabbitmq_messages_unconfirmed The rate of messages received from publishers that have publisher confirms enabled and the broker has not confirmed yet. rabbitmq_prometheus plugin   Messages Routed To Queues meter_rabbitmq_messages_routed The rate of messages received from publishers and successfully routed to the master queue replicas. rabbitmq_prometheus plugin   Unroutable Messages Returned To Publishers meter_rabbitmq_messages_unroutable_returned The rate of messages that cannot be routed and are returned back to publishers. rabbitmq_prometheus plugin   Unroutable Messages Dropped meter_rabbitmq_messages_unroutable_dropped The rate of messages that cannot be routed and are dropped. rabbitmq_prometheus plugin   Queues Total meter_rabbitmq_queues Total number of queue masters per node. rabbitmq_prometheus plugin   Queues Declared meter_rabbitmq_queues_declared_total The rate of queue declarations performed by clients. rabbitmq_prometheus plugin   Queues Created meter_rabbitmq_queues_created_total The rate of new queues created (as opposed to redeclarations). rabbitmq_prometheus plugin   Queues Deleted meter_rabbitmq_queues_deleted_total The rate of queues deleted. rabbitmq_prometheus plugin   Channels Total meter_rabbitmq_channels Total number of channels on all currently opened connections. rabbitmq_prometheus plugin   Channels Opened meter_rabbitmq_channels_opened_total The rate of new channels opened by applications across all connections. Channels are expected to be long-lived. rabbitmq_prometheus plugin   Channels Closed meter_rabbitmq_channels_closed_total The rate of channels closed by applications across all connections. Channels are expected to be long-lived. rabbitmq_prometheus plugin   Connections Total meter_rabbitmq_connections Total number of client connections. rabbitmq_prometheus plugin   Connections Opened meter_rabbitmq_connections_opened_total The rate of new connections opened by clients. Connections are expected to be long-lived. rabbitmq_prometheus plugin   Connections Closed meter_rabbitmq_connections_closed_total The rate of connections closed. Connections are expected to be long-lived. rabbitmq_prometheus plugin    RabbitMQ Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Ready Messages  meter_rabbitmq_node_queue_messages_ready Total number of ready messages ready to be delivered to consumers. rabbitmq_prometheus plugin   Unacknowledged Messages  meter_rabbitmq_node_unacknowledged_messages Messages delivered to consumers but not yet acknowledged rabbitmq_prometheus plugin   Incoming Messages  meter_rabbitmq_node_incoming_messages The incoming message rate before any routing rules are applied. rabbitmq_prometheus plugin   Outgoing Messages  meter_rabbitmq_node_outgoing_messages_total The outgoing message rate before any routing rules are applied. rabbitmq_prometheus plugin   Publishers  meter_rabbitmq_node_publisher_total Publishers rabbitmq_prometheus plugin   Consumers  meter_rabbitmq_node_consumer_total Consumers currently connect rabbitmq_prometheus plugin   Collections  meter_rabbitmq_node_connections_total Connections currently open rabbitmq_prometheus plugin   Channels  meter_rabbitmq_node_channel_total Channels currently open rabbitmq_prometheus plugin   Queues  meter_rabbitmq_node_queue_total Queues available rabbitmq_prometheus plugin   Allocated Used % meter_rabbitmq_node_allocated_used_percent Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Allocated Unused % meter_rabbitmq_node_allocated_unused_percent Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Allocated Used MB meter_rabbitmq_node_allocated_used_bytes Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Allocated Unused MB meter_rabbitmq_node_allocated_unused_bytes Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Allocated Total MB meter_rabbitmq_node_allocated_total_bytes Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Resident Set Size MB meter_rabbitmq_node_process_resident_memory_bytes Erlang VM Resident Set Size (RSS) As reported by the OS rabbitmq_prometheus plugin   Allocators MB meter_rabbitmq_node_allocated_unused_bytes meter_rabbitmq_node_allocated_total_bytes meter_rabbitmq_node_process_resident_memory_bytes  rabbitmq_prometheus plugin   Allocated By Type MB meter_rabbitmq_node_allocated_by_type Allocated by allocator type rabbitmq_prometheus plugin   Multiblock Used MB meter_rabbitmq_node_allocated_multiblock_used Multi block used rabbitmq_prometheus plugin   Multiblock Unused MB meter_rabbitmq_node_allocated_multiblock_unused Multi block used rabbitmq_prometheus plugin   Multiblock Pool Used MB meter_rabbitmq_node_allocated_multiblock_pool_used Multi block pool used rabbitmq_prometheus plugin   Multiblock Pool Unused MB meter_rabbitmq_node_allocated_multiblock_pool_unused Multi block pool unused rabbitmq_prometheus plugin   Singleblock Used MB meter_rabbitmq_node_allocated_singleblock_used Single block used rabbitmq_prometheus plugin   Singleblock Unused MB meter_rabbitmq_node_allocated_singleblock_unused Single block unused rabbitmq_prometheus plugin    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/rabbitmq/rabbitmq-cluster.yaml, /config/otel-rules/rabbitmq/rabbitmq-node.yaml. The RabbitMQ dashboard panel configurations are found in /config/ui-initialized-templates/rabbitmq.\n","title":"RabbitMQ monitoring","url":"/docs/main/latest/en/setup/backend/backend-rabbitmq-monitoring/"},{"content":"RabbitMQ monitoring SkyWalking leverages rabbitmq_prometheus plugin for collecting metrics data from RabbitMQ. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  The rabbitmq_prometheus plugin collect metrics data from RabbitMQ. Note: The RabbitMQ version is required to be 3.8.0+. The rabbitmq_prometheus plugin is built-in since RabbitMQ v3.8.0. OpenTelemetry Collector fetches metrics from rabbitmq_prometheus plugin via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup rabbitmq_prometheus. Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  RabbitMQ Monitoring RabbitMQ monitoring provides multidimensional metrics monitoring of RabbitMQ cluster as Layer: RABBITMQ Service in the OAP. In each cluster, the nodes are represented as Instance.\nRabbitMQ Cluster Supported Metrics    Monitoring Panel Metric Name Description Data Source     Memory Available Before Publishers Blocked (MB) meter_rabbitmq_memory_available_before_publisher_blocked If the value is zero or less, the memory alarm will be triggered and all publishing connections across all cluster nodes will be blocked. rabbitmq_prometheus plugin   Disk Space Available Before Publishers Blocked (GB) meter_rabbitmq_disk_space_available_before_publisher_blocked This metric is reported for the partition where the RabbitMQ data directory is stored. rabbitmq_prometheus plugin   File Descriptors Available meter_rabbitmq_file_descriptors_available When this value reaches zero, new connections will not be accepted and disk write operations may fail. rabbitmq_prometheus plugin   TCP Sockets Available meter_rabbitmq_tcp_socket_available When this value reaches zero, new connections will not be accepted. rabbitmq_prometheus plugin   Messages Ready To Be Delivered To Consumers meter_rabbitmq_message_ready_delivered_consumers Total number of ready messages ready to be delivered to consumers. rabbitmq_prometheus plugin   Messages Pending Consumer Acknowledgement meter_rabbitmq_message_unacknowledged_delivered_consumers The total number of messages that are either in-flight to consumers, currently being processed by consumers or simply waiting for the consumer acknowledgements to be processed by the queue. Until the queue processes the message acknowledgement, the message will remain unacknowledged. rabbitmq_prometheus plugin   Messages Published meter_rabbitmq_messages_published The incoming message rate before any routing rules are applied. rabbitmq_prometheus plugin   Messages Confirmed To Publishers meter_rabbitmq_messages_confirmed The rate of messages confirmed by the broker to publishers. Publishers must opt-in to receive message confirmations. rabbitmq_prometheus plugin   Messages Unconfirmed To Publishers meter_rabbitmq_messages_unconfirmed The rate of messages received from publishers that have publisher confirms enabled and the broker has not confirmed yet. rabbitmq_prometheus plugin   Messages Routed To Queues meter_rabbitmq_messages_routed The rate of messages received from publishers and successfully routed to the master queue replicas. rabbitmq_prometheus plugin   Unroutable Messages Returned To Publishers meter_rabbitmq_messages_unroutable_returned The rate of messages that cannot be routed and are returned back to publishers. rabbitmq_prometheus plugin   Unroutable Messages Dropped meter_rabbitmq_messages_unroutable_dropped The rate of messages that cannot be routed and are dropped. rabbitmq_prometheus plugin   Queues Total meter_rabbitmq_queues Total number of queue masters per node. rabbitmq_prometheus plugin   Queues Declared meter_rabbitmq_queues_declared_total The rate of queue declarations performed by clients. rabbitmq_prometheus plugin   Queues Created meter_rabbitmq_queues_created_total The rate of new queues created (as opposed to redeclarations). rabbitmq_prometheus plugin   Queues Deleted meter_rabbitmq_queues_deleted_total The rate of queues deleted. rabbitmq_prometheus plugin   Channels Total meter_rabbitmq_channels Total number of channels on all currently opened connections. rabbitmq_prometheus plugin   Channels Opened meter_rabbitmq_channels_opened_total The rate of new channels opened by applications across all connections. Channels are expected to be long-lived. rabbitmq_prometheus plugin   Channels Closed meter_rabbitmq_channels_closed_total The rate of channels closed by applications across all connections. Channels are expected to be long-lived. rabbitmq_prometheus plugin   Connections Total meter_rabbitmq_connections Total number of client connections. rabbitmq_prometheus plugin   Connections Opened meter_rabbitmq_connections_opened_total The rate of new connections opened by clients. Connections are expected to be long-lived. rabbitmq_prometheus plugin   Connections Closed meter_rabbitmq_connections_closed_total The rate of connections closed. Connections are expected to be long-lived. rabbitmq_prometheus plugin    RabbitMQ Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Ready Messages  meter_rabbitmq_node_queue_messages_ready Total number of ready messages ready to be delivered to consumers. rabbitmq_prometheus plugin   Unacknowledged Messages  meter_rabbitmq_node_unacknowledged_messages Messages delivered to consumers but not yet acknowledged rabbitmq_prometheus plugin   Incoming Messages  meter_rabbitmq_node_incoming_messages The incoming message rate before any routing rules are applied. rabbitmq_prometheus plugin   Outgoing Messages  meter_rabbitmq_node_outgoing_messages_total The outgoing message rate before any routing rules are applied. rabbitmq_prometheus plugin   Publishers  meter_rabbitmq_node_publisher_total Publishers rabbitmq_prometheus plugin   Consumers  meter_rabbitmq_node_consumer_total Consumers currently connect rabbitmq_prometheus plugin   Collections  meter_rabbitmq_node_connections_total Connections currently open rabbitmq_prometheus plugin   Channels  meter_rabbitmq_node_channel_total Channels currently open rabbitmq_prometheus plugin   Queues  meter_rabbitmq_node_queue_total Queues available rabbitmq_prometheus plugin   Allocated Used % meter_rabbitmq_node_allocated_used_percent Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Allocated Unused % meter_rabbitmq_node_allocated_unused_percent Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Allocated Used MB meter_rabbitmq_node_allocated_used_bytes Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Allocated Unused MB meter_rabbitmq_node_allocated_unused_bytes Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Allocated Total MB meter_rabbitmq_node_allocated_total_bytes Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Resident Set Size MB meter_rabbitmq_node_process_resident_memory_bytes Erlang VM Resident Set Size (RSS) As reported by the OS rabbitmq_prometheus plugin   Allocators MB meter_rabbitmq_node_allocated_unused_bytes meter_rabbitmq_node_allocated_total_bytes meter_rabbitmq_node_process_resident_memory_bytes  rabbitmq_prometheus plugin   Allocated By Type MB meter_rabbitmq_node_allocated_by_type Allocated by allocator type rabbitmq_prometheus plugin   Multiblock Used MB meter_rabbitmq_node_allocated_multiblock_used Multi block used rabbitmq_prometheus plugin   Multiblock Unused MB meter_rabbitmq_node_allocated_multiblock_unused Multi block used rabbitmq_prometheus plugin   Multiblock Pool Used MB meter_rabbitmq_node_allocated_multiblock_pool_used Multi block pool used rabbitmq_prometheus plugin   Multiblock Pool Unused MB meter_rabbitmq_node_allocated_multiblock_pool_unused Multi block pool unused rabbitmq_prometheus plugin   Singleblock Used MB meter_rabbitmq_node_allocated_singleblock_used Single block used rabbitmq_prometheus plugin   Singleblock Unused MB meter_rabbitmq_node_allocated_singleblock_unused Single block unused rabbitmq_prometheus plugin    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/rabbitmq/rabbitmq-cluster.yaml, /config/otel-rules/rabbitmq/rabbitmq-node.yaml. The RabbitMQ dashboard panel configurations are found in /config/ui-initialized-templates/rabbitmq.\n","title":"RabbitMQ monitoring","url":"/docs/main/next/en/setup/backend/backend-rabbitmq-monitoring/"},{"content":"RabbitMQ monitoring SkyWalking leverages rabbitmq_prometheus plugin for collecting metrics data from RabbitMQ. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  The rabbitmq_prometheus plugin collect metrics data from RabbitMQ. Note: The RabbitMQ version is required to be 3.8.0+. The rabbitmq_prometheus plugin is built-in since RabbitMQ v3.8.0. OpenTelemetry Collector fetches metrics from rabbitmq_prometheus plugin via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup rabbitmq_prometheus. Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  RabbitMQ Monitoring RabbitMQ monitoring provides multidimensional metrics monitoring of RabbitMQ cluster as Layer: RABBITMQ Service in the OAP. In each cluster, the nodes are represented as Instance.\nRabbitMQ Cluster Supported Metrics    Monitoring Panel Metric Name Description Data Source     Memory Available Before Publishers Blocked (MB) meter_rabbitmq_memory_available_before_publisher_blocked If the value is zero or less, the memory alarm will be triggered and all publishing connections across all cluster nodes will be blocked. rabbitmq_prometheus plugin   Disk Space Available Before Publishers Blocked (GB) meter_rabbitmq_disk_space_available_before_publisher_blocked This metric is reported for the partition where the RabbitMQ data directory is stored. rabbitmq_prometheus plugin   File Descriptors Available meter_rabbitmq_file_descriptors_available When this value reaches zero, new connections will not be accepted and disk write operations may fail. rabbitmq_prometheus plugin   TCP Sockets Available meter_rabbitmq_tcp_socket_available When this value reaches zero, new connections will not be accepted. rabbitmq_prometheus plugin   Messages Ready To Be Delivered To Consumers meter_rabbitmq_message_ready_delivered_consumers Total number of ready messages ready to be delivered to consumers. rabbitmq_prometheus plugin   Messages Pending Consumer Acknowledgement meter_rabbitmq_message_unacknowledged_delivered_consumers The total number of messages that are either in-flight to consumers, currently being processed by consumers or simply waiting for the consumer acknowledgements to be processed by the queue. Until the queue processes the message acknowledgement, the message will remain unacknowledged. rabbitmq_prometheus plugin   Messages Published meter_rabbitmq_messages_published The incoming message rate before any routing rules are applied. rabbitmq_prometheus plugin   Messages Confirmed To Publishers meter_rabbitmq_messages_confirmed The rate of messages confirmed by the broker to publishers. Publishers must opt-in to receive message confirmations. rabbitmq_prometheus plugin   Messages Unconfirmed To Publishers meter_rabbitmq_messages_unconfirmed The rate of messages received from publishers that have publisher confirms enabled and the broker has not confirmed yet. rabbitmq_prometheus plugin   Messages Routed To Queues meter_rabbitmq_messages_routed The rate of messages received from publishers and successfully routed to the master queue replicas. rabbitmq_prometheus plugin   Unroutable Messages Returned To Publishers meter_rabbitmq_messages_unroutable_returned The rate of messages that cannot be routed and are returned back to publishers. rabbitmq_prometheus plugin   Unroutable Messages Dropped meter_rabbitmq_messages_unroutable_dropped The rate of messages that cannot be routed and are dropped. rabbitmq_prometheus plugin   Queues Total meter_rabbitmq_queues Total number of queue masters per node. rabbitmq_prometheus plugin   Queues Declared meter_rabbitmq_queues_declared_total The rate of queue declarations performed by clients. rabbitmq_prometheus plugin   Queues Created meter_rabbitmq_queues_created_total The rate of new queues created (as opposed to redeclarations). rabbitmq_prometheus plugin   Queues Deleted meter_rabbitmq_queues_deleted_total The rate of queues deleted. rabbitmq_prometheus plugin   Channels Total meter_rabbitmq_channels Total number of channels on all currently opened connections. rabbitmq_prometheus plugin   Channels Opened meter_rabbitmq_channels_opened_total The rate of new channels opened by applications across all connections. Channels are expected to be long-lived. rabbitmq_prometheus plugin   Channels Closed meter_rabbitmq_channels_closed_total The rate of channels closed by applications across all connections. Channels are expected to be long-lived. rabbitmq_prometheus plugin   Connections Total meter_rabbitmq_connections Total number of client connections. rabbitmq_prometheus plugin   Connections Opened meter_rabbitmq_connections_opened_total The rate of new connections opened by clients. Connections are expected to be long-lived. rabbitmq_prometheus plugin   Connections Closed meter_rabbitmq_connections_closed_total The rate of connections closed. Connections are expected to be long-lived. rabbitmq_prometheus plugin    RabbitMQ Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Ready Messages  meter_rabbitmq_node_queue_messages_ready Total number of ready messages ready to be delivered to consumers. rabbitmq_prometheus plugin   Unacknowledged Messages  meter_rabbitmq_node_unacknowledged_messages Messages delivered to consumers but not yet acknowledged rabbitmq_prometheus plugin   Incoming Messages  meter_rabbitmq_node_incoming_messages The incoming message rate before any routing rules are applied. rabbitmq_prometheus plugin   Outgoing Messages  meter_rabbitmq_node_outgoing_messages_total The outgoing message rate before any routing rules are applied. rabbitmq_prometheus plugin   Publishers  meter_rabbitmq_node_publisher_total Publishers rabbitmq_prometheus plugin   Consumers  meter_rabbitmq_node_consumer_total Consumers currently connect rabbitmq_prometheus plugin   Collections  meter_rabbitmq_node_connections_total Connections currently open rabbitmq_prometheus plugin   Channels  meter_rabbitmq_node_channel_total Channels currently open rabbitmq_prometheus plugin   Queues  meter_rabbitmq_node_queue_total Queues available rabbitmq_prometheus plugin   Allocated Used % meter_rabbitmq_node_allocated_used_percent Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Allocated Unused % meter_rabbitmq_node_allocated_unused_percent Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Allocated Used MB meter_rabbitmq_node_allocated_used_bytes Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Allocated Unused MB meter_rabbitmq_node_allocated_unused_bytes Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Allocated Total MB meter_rabbitmq_node_allocated_total_bytes Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Resident Set Size MB meter_rabbitmq_node_process_resident_memory_bytes Erlang VM Resident Set Size (RSS) As reported by the OS rabbitmq_prometheus plugin   Allocators MB meter_rabbitmq_node_allocated_unused_bytes meter_rabbitmq_node_allocated_total_bytes meter_rabbitmq_node_process_resident_memory_bytes  rabbitmq_prometheus plugin   Allocated By Type MB meter_rabbitmq_node_allocated_by_type Allocated by allocator type rabbitmq_prometheus plugin   Multiblock Used MB meter_rabbitmq_node_allocated_multiblock_used Multi block used rabbitmq_prometheus plugin   Multiblock Unused MB meter_rabbitmq_node_allocated_multiblock_unused Multi block used rabbitmq_prometheus plugin   Multiblock Pool Used MB meter_rabbitmq_node_allocated_multiblock_pool_used Multi block pool used rabbitmq_prometheus plugin   Multiblock Pool Unused MB meter_rabbitmq_node_allocated_multiblock_pool_unused Multi block pool unused rabbitmq_prometheus plugin   Singleblock Used MB meter_rabbitmq_node_allocated_singleblock_used Single block used rabbitmq_prometheus plugin   Singleblock Unused MB meter_rabbitmq_node_allocated_singleblock_unused Single block unused rabbitmq_prometheus plugin    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/rabbitmq/rabbitmq-cluster.yaml, /config/otel-rules/rabbitmq/rabbitmq-node.yaml. The RabbitMQ dashboard panel configurations are found in /config/ui-initialized-templates/rabbitmq.\n","title":"RabbitMQ monitoring","url":"/docs/main/v9.5.0/en/setup/backend/backend-rabbitmq-monitoring/"},{"content":"RabbitMQ monitoring SkyWalking leverages rabbitmq_prometheus plugin for collecting metrics data from RabbitMQ. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  The rabbitmq_prometheus plugin collect metrics data from RabbitMQ. Note: The RabbitMQ version is required to be 3.8.0+. The rabbitmq_prometheus plugin is built-in since RabbitMQ v3.8.0. OpenTelemetry Collector fetches metrics from rabbitmq_prometheus plugin via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup rabbitmq_prometheus. Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  RabbitMQ Monitoring RabbitMQ monitoring provides multidimensional metrics monitoring of RabbitMQ cluster as Layer: RABBITMQ Service in the OAP. In each cluster, the nodes are represented as Instance.\nRabbitMQ Cluster Supported Metrics    Monitoring Panel Metric Name Description Data Source     Memory Available Before Publishers Blocked (MB) meter_rabbitmq_memory_available_before_publisher_blocked If the value is zero or less, the memory alarm will be triggered and all publishing connections across all cluster nodes will be blocked. rabbitmq_prometheus plugin   Disk Space Available Before Publishers Blocked (GB) meter_rabbitmq_disk_space_available_before_publisher_blocked This metric is reported for the partition where the RabbitMQ data directory is stored. rabbitmq_prometheus plugin   File Descriptors Available meter_rabbitmq_file_descriptors_available When this value reaches zero, new connections will not be accepted and disk write operations may fail. rabbitmq_prometheus plugin   TCP Sockets Available meter_rabbitmq_tcp_socket_available When this value reaches zero, new connections will not be accepted. rabbitmq_prometheus plugin   Messages Ready To Be Delivered To Consumers meter_rabbitmq_message_ready_delivered_consumers Total number of ready messages ready to be delivered to consumers. rabbitmq_prometheus plugin   Messages Pending Consumer Acknowledgement meter_rabbitmq_message_unacknowledged_delivered_consumers The total number of messages that are either in-flight to consumers, currently being processed by consumers or simply waiting for the consumer acknowledgements to be processed by the queue. Until the queue processes the message acknowledgement, the message will remain unacknowledged. rabbitmq_prometheus plugin   Messages Published meter_rabbitmq_messages_published The incoming message rate before any routing rules are applied. rabbitmq_prometheus plugin   Messages Confirmed To Publishers meter_rabbitmq_messages_confirmed The rate of messages confirmed by the broker to publishers. Publishers must opt-in to receive message confirmations. rabbitmq_prometheus plugin   Messages Unconfirmed To Publishers meter_rabbitmq_messages_unconfirmed The rate of messages received from publishers that have publisher confirms enabled and the broker has not confirmed yet. rabbitmq_prometheus plugin   Messages Routed To Queues meter_rabbitmq_messages_routed The rate of messages received from publishers and successfully routed to the master queue replicas. rabbitmq_prometheus plugin   Unroutable Messages Returned To Publishers meter_rabbitmq_messages_unroutable_returned The rate of messages that cannot be routed and are returned back to publishers. rabbitmq_prometheus plugin   Unroutable Messages Dropped meter_rabbitmq_messages_unroutable_dropped The rate of messages that cannot be routed and are dropped. rabbitmq_prometheus plugin   Queues Total meter_rabbitmq_queues Total number of queue masters per node. rabbitmq_prometheus plugin   Queues Declared meter_rabbitmq_queues_declared_total The rate of queue declarations performed by clients. rabbitmq_prometheus plugin   Queues Created meter_rabbitmq_queues_created_total The rate of new queues created (as opposed to redeclarations). rabbitmq_prometheus plugin   Queues Deleted meter_rabbitmq_queues_deleted_total The rate of queues deleted. rabbitmq_prometheus plugin   Channels Total meter_rabbitmq_channels Total number of channels on all currently opened connections. rabbitmq_prometheus plugin   Channels Opened meter_rabbitmq_channels_opened_total The rate of new channels opened by applications across all connections. Channels are expected to be long-lived. rabbitmq_prometheus plugin   Channels Closed meter_rabbitmq_channels_closed_total The rate of channels closed by applications across all connections. Channels are expected to be long-lived. rabbitmq_prometheus plugin   Connections Total meter_rabbitmq_connections Total number of client connections. rabbitmq_prometheus plugin   Connections Opened meter_rabbitmq_connections_opened_total The rate of new connections opened by clients. Connections are expected to be long-lived. rabbitmq_prometheus plugin   Connections Closed meter_rabbitmq_connections_closed_total The rate of connections closed. Connections are expected to be long-lived. rabbitmq_prometheus plugin    RabbitMQ Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Ready Messages  meter_rabbitmq_node_queue_messages_ready Total number of ready messages ready to be delivered to consumers. rabbitmq_prometheus plugin   Unacknowledged Messages  meter_rabbitmq_node_unacknowledged_messages Messages delivered to consumers but not yet acknowledged rabbitmq_prometheus plugin   Incoming Messages  meter_rabbitmq_node_incoming_messages The incoming message rate before any routing rules are applied. rabbitmq_prometheus plugin   Outgoing Messages  meter_rabbitmq_node_outgoing_messages_total The outgoing message rate before any routing rules are applied. rabbitmq_prometheus plugin   Publishers  meter_rabbitmq_node_publisher_total Publishers rabbitmq_prometheus plugin   Consumers  meter_rabbitmq_node_consumer_total Consumers currently connect rabbitmq_prometheus plugin   Collections  meter_rabbitmq_node_connections_total Connections currently open rabbitmq_prometheus plugin   Channels  meter_rabbitmq_node_channel_total Channels currently open rabbitmq_prometheus plugin   Queues  meter_rabbitmq_node_queue_total Queues available rabbitmq_prometheus plugin   Allocated Used % meter_rabbitmq_node_allocated_used_percent Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Allocated Unused % meter_rabbitmq_node_allocated_unused_percent Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Allocated Used MB meter_rabbitmq_node_allocated_used_bytes Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Allocated Unused MB meter_rabbitmq_node_allocated_unused_bytes Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Allocated Total MB meter_rabbitmq_node_allocated_total_bytes Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Resident Set Size MB meter_rabbitmq_node_process_resident_memory_bytes Erlang VM Resident Set Size (RSS) As reported by the OS rabbitmq_prometheus plugin   Allocators MB meter_rabbitmq_node_allocated_unused_bytes meter_rabbitmq_node_allocated_total_bytes meter_rabbitmq_node_process_resident_memory_bytes  rabbitmq_prometheus plugin   Allocated By Type MB meter_rabbitmq_node_allocated_by_type Allocated by allocator type rabbitmq_prometheus plugin   Multiblock Used MB meter_rabbitmq_node_allocated_multiblock_used Multi block used rabbitmq_prometheus plugin   Multiblock Unused MB meter_rabbitmq_node_allocated_multiblock_unused Multi block used rabbitmq_prometheus plugin   Multiblock Pool Used MB meter_rabbitmq_node_allocated_multiblock_pool_used Multi block pool used rabbitmq_prometheus plugin   Multiblock Pool Unused MB meter_rabbitmq_node_allocated_multiblock_pool_unused Multi block pool unused rabbitmq_prometheus plugin   Singleblock Used MB meter_rabbitmq_node_allocated_singleblock_used Single block used rabbitmq_prometheus plugin   Singleblock Unused MB meter_rabbitmq_node_allocated_singleblock_unused Single block unused rabbitmq_prometheus plugin    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/rabbitmq/rabbitmq-cluster.yaml, /config/otel-rules/rabbitmq/rabbitmq-node.yaml. The RabbitMQ dashboard panel configurations are found in /config/ui-initialized-templates/rabbitmq.\n","title":"RabbitMQ monitoring","url":"/docs/main/v9.6.0/en/setup/backend/backend-rabbitmq-monitoring/"},{"content":"RabbitMQ monitoring SkyWalking leverages rabbitmq_prometheus plugin for collecting metrics data from RabbitMQ. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  The rabbitmq_prometheus plugin collect metrics data from RabbitMQ. Note: The RabbitMQ version is required to be 3.8.0+. The rabbitmq_prometheus plugin is built-in since RabbitMQ v3.8.0. OpenTelemetry Collector fetches metrics from rabbitmq_prometheus plugin via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup rabbitmq_prometheus. Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  RabbitMQ Monitoring RabbitMQ monitoring provides multidimensional metrics monitoring of RabbitMQ cluster as Layer: RABBITMQ Service in the OAP. In each cluster, the nodes are represented as Instance.\nRabbitMQ Cluster Supported Metrics    Monitoring Panel Metric Name Description Data Source     Memory Available Before Publishers Blocked (MB) meter_rabbitmq_memory_available_before_publisher_blocked If the value is zero or less, the memory alarm will be triggered and all publishing connections across all cluster nodes will be blocked. rabbitmq_prometheus plugin   Disk Space Available Before Publishers Blocked (GB) meter_rabbitmq_disk_space_available_before_publisher_blocked This metric is reported for the partition where the RabbitMQ data directory is stored. rabbitmq_prometheus plugin   File Descriptors Available meter_rabbitmq_file_descriptors_available When this value reaches zero, new connections will not be accepted and disk write operations may fail. rabbitmq_prometheus plugin   TCP Sockets Available meter_rabbitmq_tcp_socket_available When this value reaches zero, new connections will not be accepted. rabbitmq_prometheus plugin   Messages Ready To Be Delivered To Consumers meter_rabbitmq_message_ready_delivered_consumers Total number of ready messages ready to be delivered to consumers. rabbitmq_prometheus plugin   Messages Pending Consumer Acknowledgement meter_rabbitmq_message_unacknowledged_delivered_consumers The total number of messages that are either in-flight to consumers, currently being processed by consumers or simply waiting for the consumer acknowledgements to be processed by the queue. Until the queue processes the message acknowledgement, the message will remain unacknowledged. rabbitmq_prometheus plugin   Messages Published meter_rabbitmq_messages_published The incoming message rate before any routing rules are applied. rabbitmq_prometheus plugin   Messages Confirmed To Publishers meter_rabbitmq_messages_confirmed The rate of messages confirmed by the broker to publishers. Publishers must opt-in to receive message confirmations. rabbitmq_prometheus plugin   Messages Unconfirmed To Publishers meter_rabbitmq_messages_unconfirmed The rate of messages received from publishers that have publisher confirms enabled and the broker has not confirmed yet. rabbitmq_prometheus plugin   Messages Routed To Queues meter_rabbitmq_messages_routed The rate of messages received from publishers and successfully routed to the master queue replicas. rabbitmq_prometheus plugin   Unroutable Messages Returned To Publishers meter_rabbitmq_messages_unroutable_returned The rate of messages that cannot be routed and are returned back to publishers. rabbitmq_prometheus plugin   Unroutable Messages Dropped meter_rabbitmq_messages_unroutable_dropped The rate of messages that cannot be routed and are dropped. rabbitmq_prometheus plugin   Queues Total meter_rabbitmq_queues Total number of queue masters per node. rabbitmq_prometheus plugin   Queues Declared meter_rabbitmq_queues_declared_total The rate of queue declarations performed by clients. rabbitmq_prometheus plugin   Queues Created meter_rabbitmq_queues_created_total The rate of new queues created (as opposed to redeclarations). rabbitmq_prometheus plugin   Queues Deleted meter_rabbitmq_queues_deleted_total The rate of queues deleted. rabbitmq_prometheus plugin   Channels Total meter_rabbitmq_channels Total number of channels on all currently opened connections. rabbitmq_prometheus plugin   Channels Opened meter_rabbitmq_channels_opened_total The rate of new channels opened by applications across all connections. Channels are expected to be long-lived. rabbitmq_prometheus plugin   Channels Closed meter_rabbitmq_channels_closed_total The rate of channels closed by applications across all connections. Channels are expected to be long-lived. rabbitmq_prometheus plugin   Connections Total meter_rabbitmq_connections Total number of client connections. rabbitmq_prometheus plugin   Connections Opened meter_rabbitmq_connections_opened_total The rate of new connections opened by clients. Connections are expected to be long-lived. rabbitmq_prometheus plugin   Connections Closed meter_rabbitmq_connections_closed_total The rate of connections closed. Connections are expected to be long-lived. rabbitmq_prometheus plugin    RabbitMQ Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Ready Messages  meter_rabbitmq_node_queue_messages_ready Total number of ready messages ready to be delivered to consumers. rabbitmq_prometheus plugin   Unacknowledged Messages  meter_rabbitmq_node_unacknowledged_messages Messages delivered to consumers but not yet acknowledged rabbitmq_prometheus plugin   Incoming Messages  meter_rabbitmq_node_incoming_messages The incoming message rate before any routing rules are applied. rabbitmq_prometheus plugin   Outgoing Messages  meter_rabbitmq_node_outgoing_messages_total The outgoing message rate before any routing rules are applied. rabbitmq_prometheus plugin   Publishers  meter_rabbitmq_node_publisher_total Publishers rabbitmq_prometheus plugin   Consumers  meter_rabbitmq_node_consumer_total Consumers currently connect rabbitmq_prometheus plugin   Collections  meter_rabbitmq_node_connections_total Connections currently open rabbitmq_prometheus plugin   Channels  meter_rabbitmq_node_channel_total Channels currently open rabbitmq_prometheus plugin   Queues  meter_rabbitmq_node_queue_total Queues available rabbitmq_prometheus plugin   Allocated Used % meter_rabbitmq_node_allocated_used_percent Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Allocated Unused % meter_rabbitmq_node_allocated_unused_percent Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Allocated Used MB meter_rabbitmq_node_allocated_used_bytes Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Allocated Unused MB meter_rabbitmq_node_allocated_unused_bytes Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Allocated Total MB meter_rabbitmq_node_allocated_total_bytes Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Resident Set Size MB meter_rabbitmq_node_process_resident_memory_bytes Erlang VM Resident Set Size (RSS) As reported by the OS rabbitmq_prometheus plugin   Allocators MB meter_rabbitmq_node_allocated_unused_bytes meter_rabbitmq_node_allocated_total_bytes meter_rabbitmq_node_process_resident_memory_bytes  rabbitmq_prometheus plugin   Allocated By Type MB meter_rabbitmq_node_allocated_by_type Allocated by allocator type rabbitmq_prometheus plugin   Multiblock Used MB meter_rabbitmq_node_allocated_multiblock_used Multi block used rabbitmq_prometheus plugin   Multiblock Unused MB meter_rabbitmq_node_allocated_multiblock_unused Multi block used rabbitmq_prometheus plugin   Multiblock Pool Used MB meter_rabbitmq_node_allocated_multiblock_pool_used Multi block pool used rabbitmq_prometheus plugin   Multiblock Pool Unused MB meter_rabbitmq_node_allocated_multiblock_pool_unused Multi block pool unused rabbitmq_prometheus plugin   Singleblock Used MB meter_rabbitmq_node_allocated_singleblock_used Single block used rabbitmq_prometheus plugin   Singleblock Unused MB meter_rabbitmq_node_allocated_singleblock_unused Single block unused rabbitmq_prometheus plugin    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/rabbitmq/rabbitmq-cluster.yaml, /config/otel-rules/rabbitmq/rabbitmq-node.yaml. The RabbitMQ dashboard panel configurations are found in /config/ui-initialized-templates/rabbitmq.\n","title":"RabbitMQ monitoring","url":"/docs/main/v9.7.0/en/setup/backend/backend-rabbitmq-monitoring/"},{"content":"Reading Context All following APIs provide readonly features for the tracing context from tracing system. The values are only available when the current thread is traced.\n Use TraceContext.traceId() API to obtain traceId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;traceId\u0026#34;, TraceContext.traceId());  Use TraceContext.segmentId() API to obtain segmentId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;segmentId\u0026#34;, TraceContext.segmentId());  Use TraceContext.spanId() API to obtain spanId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;spanId\u0026#34;, TraceContext.spanId()); Sample codes only\n","title":"Reading Context","url":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/application-toolkit-trace-read-context/"},{"content":"Reading Context All following APIs provide readonly features for the tracing context from tracing system. The values are only available when the current thread is traced.\n Use TraceContext.traceId() API to obtain traceId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;traceId\u0026#34;, TraceContext.traceId());  Use TraceContext.segmentId() API to obtain segmentId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;segmentId\u0026#34;, TraceContext.segmentId());  Use TraceContext.spanId() API to obtain spanId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;spanId\u0026#34;, TraceContext.spanId()); Sample codes only\n","title":"Reading Context","url":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-trace-read-context/"},{"content":"Reading Context All following APIs provide readonly features for the tracing context from tracing system. The values are only available when the current thread is traced.\n Use TraceContext.traceId() API to obtain traceId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;traceId\u0026#34;, TraceContext.traceId());  Use TraceContext.segmentId() API to obtain segmentId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;segmentId\u0026#34;, TraceContext.segmentId());  Use TraceContext.spanId() API to obtain spanId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;spanId\u0026#34;, TraceContext.spanId()); Sample codes only\n","title":"Reading Context","url":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/application-toolkit-trace-read-context/"},{"content":"Reading Context All following APIs provide readonly features for the tracing context from tracing system. The values are only available when the current thread is traced.\n Use TraceContext.traceId() API to obtain traceId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;traceId\u0026#34;, TraceContext.traceId());  Use TraceContext.segmentId() API to obtain segmentId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;segmentId\u0026#34;, TraceContext.segmentId());  Use TraceContext.spanId() API to obtain spanId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;spanId\u0026#34;, TraceContext.spanId()); Sample codes only\n","title":"Reading Context","url":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/application-toolkit-trace-read-context/"},{"content":"Reading Context All following APIs provide readonly features for the tracing context from tracing system. The values are only available when the current thread is traced.\n Use TraceContext.traceId() API to obtain traceId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;traceId\u0026#34;, TraceContext.traceId());  Use TraceContext.segmentId() API to obtain segmentId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;segmentId\u0026#34;, TraceContext.segmentId());  Use TraceContext.spanId() API to obtain spanId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;spanId\u0026#34;, TraceContext.spanId()); Sample codes only\n","title":"Reading Context","url":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/application-toolkit-trace-read-context/"},{"content":"Receiver/grpc-envoy-als-v2-receiver Description This is a receiver for Envoy ALS format, which is defined at https://github.com/envoyproxy/envoy/blob/v1.17.4/api/envoy/service/accesslog/v2/als.proto.\nSupport Forwarders  envoy-als-v2-grpc-forwarder  DefaultConfig # The time interval between two flush operations. And the time unit is millisecond.flush_time:1000# The max cache count when receive the messagelimit_count:500Configuration    Name Type Description     flush_time int The time interval between two flush operations. And the time unit is millisecond.   limit_count int The max cache count when receive the message    ","title":"Receiver/grpc-envoy-als-v2-receiver","url":"/docs/skywalking-satellite/latest/en/setup/plugins/receiver_grpc-envoy-als-v2-receiver/"},{"content":"Receiver/grpc-envoy-als-v2-receiver Description This is a receiver for Envoy ALS format, which is defined at https://github.com/envoyproxy/envoy/blob/v1.17.4/api/envoy/service/accesslog/v2/als.proto.\nSupport Forwarders  envoy-als-v2-grpc-forwarder  DefaultConfig # The time interval between two flush operations. And the time unit is millisecond.flush_time:1000# The max cache count when receive the messagelimit_count:500Configuration    Name Type Description     flush_time int The time interval between two flush operations. And the time unit is millisecond.   limit_count int The max cache count when receive the message    ","title":"Receiver/grpc-envoy-als-v2-receiver","url":"/docs/skywalking-satellite/next/en/setup/plugins/receiver_grpc-envoy-als-v2-receiver/"},{"content":"Receiver/grpc-envoy-als-v2-receiver Description This is a receiver for Envoy ALS format, which is defined at https://github.com/envoyproxy/envoy/blob/v1.17.4/api/envoy/service/accesslog/v2/als.proto.\nSupport Forwarders  envoy-als-v2-grpc-forwarder  DefaultConfig # The time interval between two flush operations. And the time unit is millisecond.flush_time:1000# The max cache count when receive the messagelimit_count:500Configuration    Name Type Description     flush_time int The time interval between two flush operations. And the time unit is millisecond.   limit_count int The max cache count when receive the message    ","title":"Receiver/grpc-envoy-als-v2-receiver","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/receiver_grpc-envoy-als-v2-receiver/"},{"content":"Receiver/grpc-envoy-als-v3-receiver Description This is a receiver for Envoy ALS format, which is defined at https://github.com/envoyproxy/envoy/blob/3791753e94edbac8a90c5485c68136886c40e719/api/envoy/config/accesslog/v3/accesslog.proto.\nSupport Forwarders  envoy-als-v3-grpc-forwarder  DefaultConfig # The time interval between two flush operations. And the time unit is millisecond.flush_time:1000# The max cache count when receive the messagelimit_count:500Configuration    Name Type Description     flush_time int The time interval between two flush operations. And the time unit is millisecond.   limit_count int The max cache count when receive the message    ","title":"Receiver/grpc-envoy-als-v3-receiver","url":"/docs/skywalking-satellite/latest/en/setup/plugins/receiver_grpc-envoy-als-v3-receiver/"},{"content":"Receiver/grpc-envoy-als-v3-receiver Description This is a receiver for Envoy ALS format, which is defined at https://github.com/envoyproxy/envoy/blob/3791753e94edbac8a90c5485c68136886c40e719/api/envoy/config/accesslog/v3/accesslog.proto.\nSupport Forwarders  envoy-als-v3-grpc-forwarder  DefaultConfig # The time interval between two flush operations. And the time unit is millisecond.flush_time:1000# The max cache count when receive the messagelimit_count:500Configuration    Name Type Description     flush_time int The time interval between two flush operations. And the time unit is millisecond.   limit_count int The max cache count when receive the message    ","title":"Receiver/grpc-envoy-als-v3-receiver","url":"/docs/skywalking-satellite/next/en/setup/plugins/receiver_grpc-envoy-als-v3-receiver/"},{"content":"Receiver/grpc-envoy-als-v3-receiver Description This is a receiver for Envoy ALS format, which is defined at https://github.com/envoyproxy/envoy/blob/3791753e94edbac8a90c5485c68136886c40e719/api/envoy/config/accesslog/v3/accesslog.proto.\nSupport Forwarders  envoy-als-v3-grpc-forwarder  DefaultConfig # The time interval between two flush operations. And the time unit is millisecond.flush_time:1000# The max cache count when receive the messagelimit_count:500Configuration    Name Type Description     flush_time int The time interval between two flush operations. And the time unit is millisecond.   limit_count int The max cache count when receive the message    ","title":"Receiver/grpc-envoy-als-v3-receiver","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/receiver_grpc-envoy-als-v3-receiver/"},{"content":"Receiver/grpc-envoy-metrics-v2-receiver Description This is a receiver for Envoy Metrics format, which is defined at https://github.com/envoyproxy/envoy/blob/v1.17.4/api/envoy/service/metrics/v2/metrics_service.proto.\nSupport Forwarders  envoy-metrics-v2-grpc-forwarder  DefaultConfig # The time interval between two flush operations. And the time unit is millisecond.flush_time:1000# The max cache count when receive the messagelimit_count:500Configuration    Name Type Description     flush_time int The time interval between two flush operations. And the time unit is millisecond.   limit_count int The max cache count when receive the message    ","title":"Receiver/grpc-envoy-metrics-v2-receiver","url":"/docs/skywalking-satellite/latest/en/setup/plugins/receiver_grpc-envoy-metrics-v2-receiver/"},{"content":"Receiver/grpc-envoy-metrics-v2-receiver Description This is a receiver for Envoy Metrics format, which is defined at https://github.com/envoyproxy/envoy/blob/v1.17.4/api/envoy/service/metrics/v2/metrics_service.proto.\nSupport Forwarders  envoy-metrics-v2-grpc-forwarder  DefaultConfig # The time interval between two flush operations. And the time unit is millisecond.flush_time:1000# The max cache count when receive the messagelimit_count:500Configuration    Name Type Description     flush_time int The time interval between two flush operations. And the time unit is millisecond.   limit_count int The max cache count when receive the message    ","title":"Receiver/grpc-envoy-metrics-v2-receiver","url":"/docs/skywalking-satellite/next/en/setup/plugins/receiver_grpc-envoy-metrics-v2-receiver/"},{"content":"Receiver/grpc-envoy-metrics-v2-receiver Description This is a receiver for Envoy Metrics format, which is defined at https://github.com/envoyproxy/envoy/blob/v1.17.4/api/envoy/service/metrics/v2/metrics_service.proto.\nSupport Forwarders  envoy-metrics-v2-grpc-forwarder  DefaultConfig # The time interval between two flush operations. And the time unit is millisecond.flush_time:1000# The max cache count when receive the messagelimit_count:500Configuration    Name Type Description     flush_time int The time interval between two flush operations. And the time unit is millisecond.   limit_count int The max cache count when receive the message    ","title":"Receiver/grpc-envoy-metrics-v2-receiver","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/receiver_grpc-envoy-metrics-v2-receiver/"},{"content":"Receiver/grpc-envoy-metrics-v3-receiver Description This is a receiver for Envoy Metrics format, which is defined at https://github.com/envoyproxy/envoy/blob/5f7d6efb5786ee3de31b1fb37c78fa281718b704/api/envoy/service/metrics/v3/metrics_service.proto.\nSupport Forwarders  envoy-metrics-v3-grpc-forwarder  DefaultConfig # The time interval between two flush operations. And the time unit is millisecond.flush_time:1000# The max cache count when receive the messagelimit_count:500Configuration    Name Type Description     flush_time int The time interval between two flush operations. And the time unit is millisecond.   limit_count int The max cache count when receive the message    ","title":"Receiver/grpc-envoy-metrics-v3-receiver","url":"/docs/skywalking-satellite/latest/en/setup/plugins/receiver_grpc-envoy-metrics-v3-receiver/"},{"content":"Receiver/grpc-envoy-metrics-v3-receiver Description This is a receiver for Envoy Metrics format, which is defined at https://github.com/envoyproxy/envoy/blob/5f7d6efb5786ee3de31b1fb37c78fa281718b704/api/envoy/service/metrics/v3/metrics_service.proto.\nSupport Forwarders  envoy-metrics-v3-grpc-forwarder  DefaultConfig # The time interval between two flush operations. And the time unit is millisecond.flush_time:1000# The max cache count when receive the messagelimit_count:500Configuration    Name Type Description     flush_time int The time interval between two flush operations. And the time unit is millisecond.   limit_count int The max cache count when receive the message    ","title":"Receiver/grpc-envoy-metrics-v3-receiver","url":"/docs/skywalking-satellite/next/en/setup/plugins/receiver_grpc-envoy-metrics-v3-receiver/"},{"content":"Receiver/grpc-envoy-metrics-v3-receiver Description This is a receiver for Envoy Metrics format, which is defined at https://github.com/envoyproxy/envoy/blob/5f7d6efb5786ee3de31b1fb37c78fa281718b704/api/envoy/service/metrics/v3/metrics_service.proto.\nSupport Forwarders  envoy-metrics-v3-grpc-forwarder  DefaultConfig # The time interval between two flush operations. And the time unit is millisecond.flush_time:1000# The max cache count when receive the messagelimit_count:500Configuration    Name Type Description     flush_time int The time interval between two flush operations. And the time unit is millisecond.   limit_count int The max cache count when receive the message    ","title":"Receiver/grpc-envoy-metrics-v3-receiver","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/receiver_grpc-envoy-metrics-v3-receiver/"},{"content":"Receiver/grpc-native-cds-receiver Description This is a receiver for SkyWalking native Configuration Discovery Service format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/language-agent/ConfigurationDiscoveryService.proto.\nSupport Forwarders  native-cds-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Receiver/grpc-native-cds-receiver","url":"/docs/skywalking-satellite/latest/en/setup/plugins/receiver_grpc-native-cds-receiver/"},{"content":"Receiver/grpc-native-cds-receiver Description This is a receiver for SkyWalking native Configuration Discovery Service format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/language-agent/ConfigurationDiscoveryService.proto.\nSupport Forwarders  native-cds-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Receiver/grpc-native-cds-receiver","url":"/docs/skywalking-satellite/next/en/setup/plugins/receiver_grpc-native-cds-receiver/"},{"content":"Receiver/grpc-native-cds-receiver Description This is a receiver for SkyWalking native Configuration Discovery Service format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/language-agent/ConfigurationDiscoveryService.proto.\nSupport Forwarders  native-cds-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Receiver/grpc-native-cds-receiver","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/receiver_grpc-native-cds-receiver/"},{"content":"Receiver/grpc-native-clr-receiver Description This is a receiver for SkyWalking native clr format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/language-agent/CLRMetric.proto.\nSupport Forwarders  native-clr-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Receiver/grpc-native-clr-receiver","url":"/docs/skywalking-satellite/latest/en/setup/plugins/receiver_grpc-native-clr-receiver/"},{"content":"Receiver/grpc-native-clr-receiver Description This is a receiver for SkyWalking native clr format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/language-agent/CLRMetric.proto.\nSupport Forwarders  native-clr-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Receiver/grpc-native-clr-receiver","url":"/docs/skywalking-satellite/next/en/setup/plugins/receiver_grpc-native-clr-receiver/"},{"content":"Receiver/grpc-native-clr-receiver Description This is a receiver for SkyWalking native clr format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/language-agent/CLRMetric.proto.\nSupport Forwarders  native-clr-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Receiver/grpc-native-clr-receiver","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/receiver_grpc-native-clr-receiver/"},{"content":"Receiver/grpc-native-ebpf-accesslog-receiver Description This is a receiver for SkyWalking native accesslog format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/ebpf/accesslog.proto.\nSupport Forwarders  native-ebpf-accesslog-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Receiver/grpc-native-ebpf-accesslog-receiver","url":"/docs/skywalking-satellite/next/en/setup/plugins/receiver_grpc-native-ebpf-accesslog-receiver/"},{"content":"Receiver/grpc-native-ebpf-profiling-receiver Description This is a receiver for SkyWalking native process format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/ebpf/profiling/Process.proto.\nSupport Forwarders  native-ebpf-profiling-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Receiver/grpc-native-ebpf-profiling-receiver","url":"/docs/skywalking-satellite/latest/en/setup/plugins/receiver_grpc-native-ebpf-profiling-receiver/"},{"content":"Receiver/grpc-native-ebpf-profiling-receiver Description This is a receiver for SkyWalking native process format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/ebpf/profiling/Process.proto.\nSupport Forwarders  native-ebpf-profiling-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Receiver/grpc-native-ebpf-profiling-receiver","url":"/docs/skywalking-satellite/next/en/setup/plugins/receiver_grpc-native-ebpf-profiling-receiver/"},{"content":"Receiver/grpc-native-ebpf-profiling-receiver Description This is a receiver for SkyWalking native process format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/ebpf/profiling/Process.proto.\nSupport Forwarders  native-ebpf-profiling-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Receiver/grpc-native-ebpf-profiling-receiver","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/receiver_grpc-native-ebpf-profiling-receiver/"},{"content":"Receiver/grpc-native-event-receiver Description This is a receiver for SkyWalking native meter format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/event/Event.proto.\nSupport Forwarders  native-event-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Receiver/grpc-native-event-receiver","url":"/docs/skywalking-satellite/latest/en/setup/plugins/receiver_grpc-native-event-receiver/"},{"content":"Receiver/grpc-native-event-receiver Description This is a receiver for SkyWalking native meter format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/event/Event.proto.\nSupport Forwarders  native-event-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Receiver/grpc-native-event-receiver","url":"/docs/skywalking-satellite/next/en/setup/plugins/receiver_grpc-native-event-receiver/"},{"content":"Receiver/grpc-native-event-receiver Description This is a receiver for SkyWalking native meter format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/event/Event.proto.\nSupport Forwarders  native-event-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Receiver/grpc-native-event-receiver","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/receiver_grpc-native-event-receiver/"},{"content":"Receiver/grpc-native-jvm-receiver Description This is a receiver for SkyWalking native jvm format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/language-agent/JVMMetric.proto.\nSupport Forwarders  native-jvm-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Receiver/grpc-native-jvm-receiver","url":"/docs/skywalking-satellite/latest/en/setup/plugins/receiver_grpc-native-jvm-receiver/"},{"content":"Receiver/grpc-native-jvm-receiver Description This is a receiver for SkyWalking native jvm format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/language-agent/JVMMetric.proto.\nSupport Forwarders  native-jvm-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Receiver/grpc-native-jvm-receiver","url":"/docs/skywalking-satellite/next/en/setup/plugins/receiver_grpc-native-jvm-receiver/"},{"content":"Receiver/grpc-native-jvm-receiver Description This is a receiver for SkyWalking native jvm format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/language-agent/JVMMetric.proto.\nSupport Forwarders  native-jvm-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Receiver/grpc-native-jvm-receiver","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/receiver_grpc-native-jvm-receiver/"},{"content":"Receiver/grpc-native-log-receiver Description This is a receiver for SkyWalking native logging format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/logging/Logging.proto.\nSupport Forwarders  native-log-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Receiver/grpc-native-log-receiver","url":"/docs/skywalking-satellite/latest/en/setup/plugins/receiver_grpc-native-log-receiver/"},{"content":"Receiver/grpc-native-log-receiver Description This is a receiver for SkyWalking native logging format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/logging/Logging.proto.\nSupport Forwarders  native-log-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Receiver/grpc-native-log-receiver","url":"/docs/skywalking-satellite/next/en/setup/plugins/receiver_grpc-native-log-receiver/"},{"content":"Receiver/grpc-native-log-receiver Description This is a receiver for SkyWalking native logging format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/logging/Logging.proto.\nSupport Forwarders  native-log-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Receiver/grpc-native-log-receiver","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/receiver_grpc-native-log-receiver/"},{"content":"Receiver/grpc-native-management-receiver Description This is a receiver for SkyWalking native management format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/management/Management.proto.\nSupport Forwarders  native-management-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Receiver/grpc-native-management-receiver","url":"/docs/skywalking-satellite/latest/en/setup/plugins/receiver_grpc-native-management-receiver/"},{"content":"Receiver/grpc-native-management-receiver Description This is a receiver for SkyWalking native management format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/management/Management.proto.\nSupport Forwarders  native-management-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Receiver/grpc-native-management-receiver","url":"/docs/skywalking-satellite/next/en/setup/plugins/receiver_grpc-native-management-receiver/"},{"content":"Receiver/grpc-native-management-receiver Description This is a receiver for SkyWalking native management format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/management/Management.proto.\nSupport Forwarders  native-management-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Receiver/grpc-native-management-receiver","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/receiver_grpc-native-management-receiver/"},{"content":"Receiver/grpc-native-meter-receiver Description This is a receiver for SkyWalking native meter format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/event/Event.proto.\nSupport Forwarders  native-meter-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Receiver/grpc-native-meter-receiver","url":"/docs/skywalking-satellite/latest/en/setup/plugins/receiver_grpc-native-meter-receiver/"},{"content":"Receiver/grpc-native-meter-receiver Description This is a receiver for SkyWalking native meter format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/event/Event.proto.\nSupport Forwarders  native-meter-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Receiver/grpc-native-meter-receiver","url":"/docs/skywalking-satellite/next/en/setup/plugins/receiver_grpc-native-meter-receiver/"},{"content":"Receiver/grpc-native-meter-receiver Description This is a receiver for SkyWalking native meter format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/event/Event.proto.\nSupport Forwarders  native-meter-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Receiver/grpc-native-meter-receiver","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/receiver_grpc-native-meter-receiver/"},{"content":"Receiver/grpc-native-process-receiver Description This is a receiver for SkyWalking native process format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/ebpf/profiling/Process.proto.\nSupport Forwarders  native-process-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Receiver/grpc-native-process-receiver","url":"/docs/skywalking-satellite/latest/en/setup/plugins/receiver_grpc-native-process-receiver/"},{"content":"Receiver/grpc-native-process-receiver Description This is a receiver for SkyWalking native process format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/ebpf/profiling/Process.proto.\nSupport Forwarders  native-process-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Receiver/grpc-native-process-receiver","url":"/docs/skywalking-satellite/next/en/setup/plugins/receiver_grpc-native-process-receiver/"},{"content":"Receiver/grpc-native-process-receiver Description This is a receiver for SkyWalking native process format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/ebpf/profiling/Process.proto.\nSupport Forwarders  native-process-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Receiver/grpc-native-process-receiver","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/receiver_grpc-native-process-receiver/"},{"content":"Receiver/grpc-native-profile-receiver Description This is a receiver for SkyWalking native profile format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/profile/Profile.proto.\nSupport Forwarders  native-profile-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Receiver/grpc-native-profile-receiver","url":"/docs/skywalking-satellite/latest/en/setup/plugins/receiver_grpc-native-profile-receiver/"},{"content":"Receiver/grpc-native-profile-receiver Description This is a receiver for SkyWalking native profile format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/profile/Profile.proto.\nSupport Forwarders  native-profile-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Receiver/grpc-native-profile-receiver","url":"/docs/skywalking-satellite/next/en/setup/plugins/receiver_grpc-native-profile-receiver/"},{"content":"Receiver/grpc-native-profile-receiver Description This is a receiver for SkyWalking native profile format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/profile/Profile.proto.\nSupport Forwarders  native-profile-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Receiver/grpc-native-profile-receiver","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/receiver_grpc-native-profile-receiver/"},{"content":"Receiver/grpc-native-tracing-receiver Description This is a receiver for SkyWalking native tracing and span attached event format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/language-agent/Tracing.proto.\nSupport Forwarders  native-tracing-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Receiver/grpc-native-tracing-receiver","url":"/docs/skywalking-satellite/latest/en/setup/plugins/receiver_grpc-native-tracing-receiver/"},{"content":"Receiver/grpc-native-tracing-receiver Description This is a receiver for SkyWalking native tracing and span attached event format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/language-agent/Tracing.proto.\nSupport Forwarders  native-tracing-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Receiver/grpc-native-tracing-receiver","url":"/docs/skywalking-satellite/next/en/setup/plugins/receiver_grpc-native-tracing-receiver/"},{"content":"Receiver/grpc-native-tracing-receiver Description This is a receiver for SkyWalking native tracing and span attached event format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/language-agent/Tracing.proto.\nSupport Forwarders  native-tracing-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Receiver/grpc-native-tracing-receiver","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/receiver_grpc-native-tracing-receiver/"},{"content":"Receiver/grpc-otlp-metrics-v1-receiver Description This is a receiver for OpenTelemetry Metrics v1 format, which is defined at https://github.com/open-telemetry/opentelemetry-proto/blob/724e427879e3d2bae2edc0218fff06e37b9eb46e/opentelemetry/proto/collector/metrics/v1/metrics_service.proto.\nSupport Forwarders  otlp-metrics-v1-grpc-forwarder  DefaultConfig yaml \nConfiguration    Name Type Description    ","title":"Receiver/grpc-otlp-metrics-v1-receiver","url":"/docs/skywalking-satellite/latest/en/setup/plugins/receiver_grpc-otlp-metrics-v1-receiver/"},{"content":"Receiver/grpc-otlp-metrics-v1-receiver Description This is a receiver for OpenTelemetry Metrics v1 format, which is defined at https://github.com/open-telemetry/opentelemetry-proto/blob/724e427879e3d2bae2edc0218fff06e37b9eb46e/opentelemetry/proto/collector/metrics/v1/metrics_service.proto.\nSupport Forwarders  otlp-metrics-v1-grpc-forwarder  DefaultConfig yaml \nConfiguration    Name Type Description    ","title":"Receiver/grpc-otlp-metrics-v1-receiver","url":"/docs/skywalking-satellite/next/en/setup/plugins/receiver_grpc-otlp-metrics-v1-receiver/"},{"content":"Receiver/grpc-otlp-metrics-v1-receiver Description This is a receiver for OpenTelemetry Metrics v1 format, which is defined at https://github.com/open-telemetry/opentelemetry-proto/blob/724e427879e3d2bae2edc0218fff06e37b9eb46e/opentelemetry/proto/collector/metrics/v1/metrics_service.proto.\nSupport Forwarders  otlp-metrics-v1-grpc-forwarder  DefaultConfig yaml \nConfiguration    Name Type Description    ","title":"Receiver/grpc-otlp-metrics-v1-receiver","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/receiver_grpc-otlp-metrics-v1-receiver/"},{"content":"Receiver/http-native-log-receiver Description This is a receiver for SkyWalking http logging format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/logging/Logging.proto.\nSupport Forwarders  native-log-grpc-forwarder  DefaultConfig # The native log request URI.uri:\u0026#34;/logging\u0026#34;# The request timeout seconds.timeout:5Configuration    Name Type Description     uri string config   timeout int     ","title":"Receiver/http-native-log-receiver","url":"/docs/skywalking-satellite/latest/en/setup/plugins/receiver_http-native-log-receiver/"},{"content":"Receiver/http-native-log-receiver Description This is a receiver for SkyWalking http logging format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/logging/Logging.proto.\nSupport Forwarders  native-log-grpc-forwarder  DefaultConfig # The native log request URI.uri:\u0026#34;/logging\u0026#34;# The request timeout seconds.timeout:5Configuration    Name Type Description     uri string config   timeout int     ","title":"Receiver/http-native-log-receiver","url":"/docs/skywalking-satellite/next/en/setup/plugins/receiver_http-native-log-receiver/"},{"content":"Receiver/http-native-log-receiver Description This is a receiver for SkyWalking http logging format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/logging/Logging.proto.\nSupport Forwarders  native-log-grpc-forwarder  DefaultConfig # The native log request URI.uri:\u0026#34;/logging\u0026#34;# The request timeout seconds.timeout:5Configuration    Name Type Description     uri string config   timeout int     ","title":"Receiver/http-native-log-receiver","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/receiver_http-native-log-receiver/"},{"content":"Redis monitoring Redis server performance from redis-exporter SkyWalking leverages redis-exporter for collecting metrics data from Redis. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  redis-exporter collect metrics data from Redis. OpenTelemetry Collector fetches metrics from redis-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up redis-exporter. Set up OpenTelemetry Collector. For details on Redis Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  Redis Monitoring Redis monitoring provides monitoring of the status and resources of the Redis server. Redis cluster is cataloged as a Layer: REDIS Service in OAP. Each Redis server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Uptime day meter_redis_uptime The uptime of Redis. redis-exporter   Connected Clients  meter_redis_connected_clients The number of connected clients. redis-exporter   Blocked Clients  meter_redis_blocked_clients The number of blocked clients. redis-exporter   Memory Max Bytes MB meter_redis_memory_max_bytes The max bytes of memory. redis-exporter   Hits Rate % meter_redis_hit_rate Hit rate of redis when used as a cache. redis-exporter   Average Time Spend By Command second meter_redis_average_time_spent_by_command Average time to execute various types of commands. redis-exporter   Total Commands Trend  meter_redis_total_commands_rate The Trend of total commands. redis-exporter   DB keys  meter_redis_evicted_keys_total  meter_redis_expired_keys_total  meter_redis_db_keys The number of Expired / Evicted / total keys. redis-exporter   Net Input/Output Bytes KB meter_redis_net_input_bytes  meter_redis_net_output_bytes Total bytes of input / output of redis net. redis-exporter   Memory Usage % meter_redis_memory_usage Percentage of used memory. redis-exporter   Total Time Spend By Command Trend  meter_redis_commands_duration_seconds_total_rate The trend of total time spend by command redis-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/redis. The Redis dashboard panel configurations are found in /config/ui-initialized-templates/redis.\nCollect sampled slow commands SkyWalking leverages fluentbit or other log agents for collecting slow commands from Redis.\nData flow  Execute commands periodically to collect slow logs from Redis and save the result locally. Fluent-bit agent collects slow logs from local file. fluent-bit agent sends data to SkyWalking OAP Server using native meter APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Set up fluentbit. Config fluentbit from here for Redis. Config slow log from here for Redis. Periodically execute the commands.  Notice:\n1.The slowlog-log-slower-than and slowlog-max-len configuration items in the configuration file are for the slow log, the former indicating that execution time longer than the specified time (in milliseconds) will be logged to the slowlog, and the latter indicating the maximum number of slow logs that will be stored in the slow log file. 2.In the e2e test, SkyWalking uses cron to periodically execute the redis command to fetch the slow logs and write them to a local file, which is then collected by fluent-bit to send the data to the OAP. You can see the relevant configuration files here.You can also get slow logs periodically and send them to OAP in other ways than using cron and fluent-bit.\nSlow Commands Monitoring Slow SQL monitoring provides monitoring of the slow commands of the Redis servers. Redis servers are cataloged as a Layer: REDIS Service in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Slow Statements ms top_n_database_statement The latency and statement of Redis slow commands fluentbit    Customizations You can customize your own metrics/expression/dashboard panel. The slowsql expression rules are found in /config/lal/redis-slowsql.yaml The Redis dashboard panel configurations are found in /config/ui-initialized-templates/redis. `\n","title":"Redis monitoring","url":"/docs/main/latest/en/setup/backend/backend-redis-monitoring/"},{"content":"Redis monitoring Redis server performance from redis-exporter SkyWalking leverages redis-exporter for collecting metrics data from Redis. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  redis-exporter collect metrics data from Redis. OpenTelemetry Collector fetches metrics from redis-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up redis-exporter. Set up OpenTelemetry Collector. For details on Redis Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  Redis Monitoring Redis monitoring provides monitoring of the status and resources of the Redis server. Redis cluster is cataloged as a Layer: REDIS Service in OAP. Each Redis server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Uptime day meter_redis_uptime The uptime of Redis. redis-exporter   Connected Clients  meter_redis_connected_clients The number of connected clients. redis-exporter   Blocked Clients  meter_redis_blocked_clients The number of blocked clients. redis-exporter   Memory Max Bytes MB meter_redis_memory_max_bytes The max bytes of memory. redis-exporter   Hits Rate % meter_redis_hit_rate Hit rate of redis when used as a cache. redis-exporter   Average Time Spend By Command second meter_redis_average_time_spent_by_command Average time to execute various types of commands. redis-exporter   Total Commands Trend  meter_redis_total_commands_rate The Trend of total commands. redis-exporter   DB keys  meter_redis_evicted_keys_total  meter_redis_expired_keys_total  meter_redis_db_keys The number of Expired / Evicted / total keys. redis-exporter   Net Input/Output Bytes KB meter_redis_net_input_bytes  meter_redis_net_output_bytes Total bytes of input / output of redis net. redis-exporter   Memory Usage % meter_redis_memory_used_bytes  meter_redis_memory_max_bytes Percentage of used memory. redis-exporter   Total Time Spend By Command Trend  meter_redis_commands_duration  meter_redis_commands_total The trend of total time spend by command redis-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/redis. The Redis dashboard panel configurations are found in /config/ui-initialized-templates/redis.\nCollect sampled slow commands SkyWalking leverages fluentbit or other log agents for collecting slow commands from Redis.\nData flow  Execute commands periodically to collect slow logs from Redis and save the result locally. Fluent-bit agent collects slow logs from local file. fluent-bit agent sends data to SkyWalking OAP Server using native meter APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Set up fluentbit. Config fluentbit from here for Redis. Config slow log from here for Redis. Periodically execute the commands.  Notice:\n1.The slowlog-log-slower-than and slowlog-max-len configuration items in the configuration file are for the slow log, the former indicating that execution time longer than the specified time (in milliseconds) will be logged to the slowlog, and the latter indicating the maximum number of slow logs that will be stored in the slow log file. 2.In the e2e test, SkyWalking uses cron to periodically execute the redis command to fetch the slow logs and write them to a local file, which is then collected by fluent-bit to send the data to the OAP. You can see the relevant configuration files here.You can also get slow logs periodically and send them to OAP in other ways than using cron and fluent-bit.\nSlow Commands Monitoring Slow SQL monitoring provides monitoring of the slow commands of the Redis servers. Redis servers are cataloged as a Layer: REDIS Service in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Slow Statements ms top_n_database_statement The latency and statement of Redis slow commands fluentbit    Customizations You can customize your own metrics/expression/dashboard panel. The slowsql expression rules are found in /config/lal/redis-slowsql.yaml The Redis dashboard panel configurations are found in /config/ui-initialized-templates/redis. `\n","title":"Redis monitoring","url":"/docs/main/next/en/setup/backend/backend-redis-monitoring/"},{"content":"Redis monitoring Redis server performance from redis-exporter SkyWalking leverages redis-exporter for collecting metrics data from Redis. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  redis-exporter collect metrics data from Redis. OpenTelemetry Collector fetches metrics from redis-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up redis-exporter. Set up OpenTelemetry Collector. For details on Redis Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  Redis Monitoring Redis monitoring provides monitoring of the status and resources of the Redis server. Redis cluster is cataloged as a Layer: REDIS Service in OAP. Each Redis server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Uptime day meter_redis_uptime The uptime of Redis. redis-exporter   Connected Clients  meter_redis_connected_clients The number of connected clients. redis-exporter   Blocked Clients  meter_redis_blocked_clients The number of blocked clients. redis-exporter   Memory Max Bytes MB meter_redis_memory_max_bytes The max bytes of memory. redis-exporter   Hits Rate % meter_redis_hit_rate Hit rate of redis when used as a cache. redis-exporter   Average Time Spend By Command second meter_redis_average_time_spent_by_command Average time to execute various types of commands. redis-exporter   Total Commands Trend  meter_redis_total_commands_rate The Trend of total commands. redis-exporter   DB keys  meter_redis_evicted_keys_total  meter_redis_expired_keys_total  meter_redis_db_keys The number of Expired / Evicted / total keys. redis-exporter   Net Input/Output Bytes KB meter_redis_net_input_bytes  meter_redis_net_output_bytes Total bytes of input / output of redis net. redis-exporter   Memory Usage % meter_redis_memory_usage Percentage of used memory. redis-exporter   Total Time Spend By Command Trend  meter_redis_commands_duration_seconds_total_rate The trend of total time spend by command redis-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/redis. The Redis dashboard panel configurations are found in /config/ui-initialized-templates/redis.\nCollect sampled slow commands SkyWalking leverages fluentbit or other log agents for collecting slow commands from Redis.\nData flow  Execute commands periodically to collect slow logs from Redis and save the result locally. Fluent-bit agent collects slow logs from local file. fluent-bit agent sends data to SkyWalking OAP Server using native meter APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Set up fluentbit. Config fluentbit from here for Redis. Config slow log from here for Redis. Periodically execute the commands.  Notice:\n1.The slowlog-log-slower-than and slowlog-max-len configuration items in the configuration file are for the slow log, the former indicating that execution time longer than the specified time (in milliseconds) will be logged to the slowlog, and the latter indicating the maximum number of slow logs that will be stored in the slow log file. 2.In the e2e test, SkyWalking uses cron to periodically execute the redis command to fetch the slow logs and write them to a local file, which is then collected by fluent-bit to send the data to the OAP. You can see the relevant configuration files here.You can also get slow logs periodically and send them to OAP in other ways than using cron and fluent-bit.\nSlow Commands Monitoring Slow SQL monitoring provides monitoring of the slow commands of the Redis servers. Redis servers are cataloged as a Layer: REDIS Service in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Slow Statements ms top_n_database_statement The latency and statement of Redis slow commands fluentbit    Customizations You can customize your own metrics/expression/dashboard panel. The slowsql expression rules are found in /config/lal/redis-slowsql.yaml The Redis dashboard panel configurations are found in /config/ui-initialized-templates/redis. `\n","title":"Redis monitoring","url":"/docs/main/v9.5.0/en/setup/backend/backend-redis-monitoring/"},{"content":"Redis monitoring Redis server performance from redis-exporter SkyWalking leverages redis-exporter for collecting metrics data from Redis. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  redis-exporter collect metrics data from Redis. OpenTelemetry Collector fetches metrics from redis-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up redis-exporter. Set up OpenTelemetry Collector. For details on Redis Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  Redis Monitoring Redis monitoring provides monitoring of the status and resources of the Redis server. Redis cluster is cataloged as a Layer: REDIS Service in OAP. Each Redis server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Uptime day meter_redis_uptime The uptime of Redis. redis-exporter   Connected Clients  meter_redis_connected_clients The number of connected clients. redis-exporter   Blocked Clients  meter_redis_blocked_clients The number of blocked clients. redis-exporter   Memory Max Bytes MB meter_redis_memory_max_bytes The max bytes of memory. redis-exporter   Hits Rate % meter_redis_hit_rate Hit rate of redis when used as a cache. redis-exporter   Average Time Spend By Command second meter_redis_average_time_spent_by_command Average time to execute various types of commands. redis-exporter   Total Commands Trend  meter_redis_total_commands_rate The Trend of total commands. redis-exporter   DB keys  meter_redis_evicted_keys_total  meter_redis_expired_keys_total  meter_redis_db_keys The number of Expired / Evicted / total keys. redis-exporter   Net Input/Output Bytes KB meter_redis_net_input_bytes  meter_redis_net_output_bytes Total bytes of input / output of redis net. redis-exporter   Memory Usage % meter_redis_memory_usage Percentage of used memory. redis-exporter   Total Time Spend By Command Trend  meter_redis_commands_duration_seconds_total_rate The trend of total time spend by command redis-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/redis. The Redis dashboard panel configurations are found in /config/ui-initialized-templates/redis.\nCollect sampled slow commands SkyWalking leverages fluentbit or other log agents for collecting slow commands from Redis.\nData flow  Execute commands periodically to collect slow logs from Redis and save the result locally. Fluent-bit agent collects slow logs from local file. fluent-bit agent sends data to SkyWalking OAP Server using native meter APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Set up fluentbit. Config fluentbit from here for Redis. Config slow log from here for Redis. Periodically execute the commands.  Notice:\n1.The slowlog-log-slower-than and slowlog-max-len configuration items in the configuration file are for the slow log, the former indicating that execution time longer than the specified time (in milliseconds) will be logged to the slowlog, and the latter indicating the maximum number of slow logs that will be stored in the slow log file. 2.In the e2e test, SkyWalking uses cron to periodically execute the redis command to fetch the slow logs and write them to a local file, which is then collected by fluent-bit to send the data to the OAP. You can see the relevant configuration files here.You can also get slow logs periodically and send them to OAP in other ways than using cron and fluent-bit.\nSlow Commands Monitoring Slow SQL monitoring provides monitoring of the slow commands of the Redis servers. Redis servers are cataloged as a Layer: REDIS Service in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Slow Statements ms top_n_database_statement The latency and statement of Redis slow commands fluentbit    Customizations You can customize your own metrics/expression/dashboard panel. The slowsql expression rules are found in /config/lal/redis-slowsql.yaml The Redis dashboard panel configurations are found in /config/ui-initialized-templates/redis. `\n","title":"Redis monitoring","url":"/docs/main/v9.6.0/en/setup/backend/backend-redis-monitoring/"},{"content":"Redis monitoring Redis server performance from redis-exporter SkyWalking leverages redis-exporter for collecting metrics data from Redis. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  redis-exporter collect metrics data from Redis. OpenTelemetry Collector fetches metrics from redis-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up redis-exporter. Set up OpenTelemetry Collector. For details on Redis Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  Redis Monitoring Redis monitoring provides monitoring of the status and resources of the Redis server. Redis cluster is cataloged as a Layer: REDIS Service in OAP. Each Redis server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Uptime day meter_redis_uptime The uptime of Redis. redis-exporter   Connected Clients  meter_redis_connected_clients The number of connected clients. redis-exporter   Blocked Clients  meter_redis_blocked_clients The number of blocked clients. redis-exporter   Memory Max Bytes MB meter_redis_memory_max_bytes The max bytes of memory. redis-exporter   Hits Rate % meter_redis_hit_rate Hit rate of redis when used as a cache. redis-exporter   Average Time Spend By Command second meter_redis_average_time_spent_by_command Average time to execute various types of commands. redis-exporter   Total Commands Trend  meter_redis_total_commands_rate The Trend of total commands. redis-exporter   DB keys  meter_redis_evicted_keys_total  meter_redis_expired_keys_total  meter_redis_db_keys The number of Expired / Evicted / total keys. redis-exporter   Net Input/Output Bytes KB meter_redis_net_input_bytes  meter_redis_net_output_bytes Total bytes of input / output of redis net. redis-exporter   Memory Usage % meter_redis_memory_usage Percentage of used memory. redis-exporter   Total Time Spend By Command Trend  meter_redis_commands_duration_seconds_total_rate The trend of total time spend by command redis-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/redis. The Redis dashboard panel configurations are found in /config/ui-initialized-templates/redis.\nCollect sampled slow commands SkyWalking leverages fluentbit or other log agents for collecting slow commands from Redis.\nData flow  Execute commands periodically to collect slow logs from Redis and save the result locally. Fluent-bit agent collects slow logs from local file. fluent-bit agent sends data to SkyWalking OAP Server using native meter APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Set up fluentbit. Config fluentbit from here for Redis. Config slow log from here for Redis. Periodically execute the commands.  Notice:\n1.The slowlog-log-slower-than and slowlog-max-len configuration items in the configuration file are for the slow log, the former indicating that execution time longer than the specified time (in milliseconds) will be logged to the slowlog, and the latter indicating the maximum number of slow logs that will be stored in the slow log file. 2.In the e2e test, SkyWalking uses cron to periodically execute the redis command to fetch the slow logs and write them to a local file, which is then collected by fluent-bit to send the data to the OAP. You can see the relevant configuration files here.You can also get slow logs periodically and send them to OAP in other ways than using cron and fluent-bit.\nSlow Commands Monitoring Slow SQL monitoring provides monitoring of the slow commands of the Redis servers. Redis servers are cataloged as a Layer: REDIS Service in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Slow Statements ms top_n_database_statement The latency and statement of Redis slow commands fluentbit    Customizations You can customize your own metrics/expression/dashboard panel. The slowsql expression rules are found in /config/lal/redis-slowsql.yaml The Redis dashboard panel configurations are found in /config/ui-initialized-templates/redis. `\n","title":"Redis monitoring","url":"/docs/main/v9.7.0/en/setup/backend/backend-redis-monitoring/"},{"content":"Register mechanism is no longer required for local / exit span Since version 6.6.0, SkyWalking has removed the local and exit span registers. If an old java agent (before 6.6.0) is still running, which registers to the 6.6.0+ backend, you will face the following warning message.\nclass=RegisterServiceHandler, message = Unexpected endpoint register, endpoint isn't detected from server side. This will not harm the backend or cause any issues, but serves as a reminder that your agent or other clients should follow the new protocol requirements.\nYou could simply use log4j2.xml to filter this warning message out.\n","title":"Register mechanism is no longer required for local / exit span","url":"/docs/main/latest/en/faq/unexpected-endpoint-register/"},{"content":"Register mechanism is no longer required for local / exit span Since version 6.6.0, SkyWalking has removed the local and exit span registers. If an old java agent (before 6.6.0) is still running, which registers to the 6.6.0+ backend, you will face the following warning message.\nclass=RegisterServiceHandler, message = Unexpected endpoint register, endpoint isn't detected from server side. This will not harm the backend or cause any issues, but serves as a reminder that your agent or other clients should follow the new protocol requirements.\nYou could simply use log4j2.xml to filter this warning message out.\n","title":"Register mechanism is no longer required for local / exit span","url":"/docs/main/next/en/faq/unexpected-endpoint-register/"},{"content":"Register mechanism is no longer required for local / exit span Since version 6.6.0, SkyWalking has removed the local and exit span registers. If an old java agent (before 6.6.0) is still running, which registers to the 6.6.0+ backend, you will face the following warning message.\nclass=RegisterServiceHandler, message = Unexpected endpoint register, endpoint isn't detected from server side. This will not harm the backend or cause any issues, but serves as a reminder that your agent or other clients should follow the new protocol requirements.\nYou could simply use log4j2.xml to filter this warning message out.\n","title":"Register mechanism is no longer required for local / exit span","url":"/docs/main/v9.0.0/en/faq/unexpected-endpoint-register/"},{"content":"Register mechanism is no longer required for local / exit span Since version 6.6.0, SkyWalking has removed the local and exit span registers. If an old java agent (before 6.6.0) is still running, which registers to the 6.6.0+ backend, you will face the following warning message.\nclass=RegisterServiceHandler, message = Unexpected endpoint register, endpoint isn't detected from server side. This will not harm the backend or cause any issues, but serves as a reminder that your agent or other clients should follow the new protocol requirements.\nYou could simply use log4j2.xml to filter this warning message out.\n","title":"Register mechanism is no longer required for local / exit span","url":"/docs/main/v9.1.0/en/faq/unexpected-endpoint-register/"},{"content":"Register mechanism is no longer required for local / exit span Since version 6.6.0, SkyWalking has removed the local and exit span registers. If an old java agent (before 6.6.0) is still running, which registers to the 6.6.0+ backend, you will face the following warning message.\nclass=RegisterServiceHandler, message = Unexpected endpoint register, endpoint isn't detected from server side. This will not harm the backend or cause any issues, but serves as a reminder that your agent or other clients should follow the new protocol requirements.\nYou could simply use log4j2.xml to filter this warning message out.\n","title":"Register mechanism is no longer required for local / exit span","url":"/docs/main/v9.2.0/en/faq/unexpected-endpoint-register/"},{"content":"Register mechanism is no longer required for local / exit span Since version 6.6.0, SkyWalking has removed the local and exit span registers. If an old java agent (before 6.6.0) is still running, which registers to the 6.6.0+ backend, you will face the following warning message.\nclass=RegisterServiceHandler, message = Unexpected endpoint register, endpoint isn't detected from server side. This will not harm the backend or cause any issues, but serves as a reminder that your agent or other clients should follow the new protocol requirements.\nYou could simply use log4j2.xml to filter this warning message out.\n","title":"Register mechanism is no longer required for local / exit span","url":"/docs/main/v9.3.0/en/faq/unexpected-endpoint-register/"},{"content":"Register mechanism is no longer required for local / exit span Since version 6.6.0, SkyWalking has removed the local and exit span registers. If an old java agent (before 6.6.0) is still running, which registers to the 6.6.0+ backend, you will face the following warning message.\nclass=RegisterServiceHandler, message = Unexpected endpoint register, endpoint isn't detected from server side. This will not harm the backend or cause any issues, but serves as a reminder that your agent or other clients should follow the new protocol requirements.\nYou could simply use log4j2.xml to filter this warning message out.\n","title":"Register mechanism is no longer required for local / exit span","url":"/docs/main/v9.4.0/en/faq/unexpected-endpoint-register/"},{"content":"Register mechanism is no longer required for local / exit span Since version 6.6.0, SkyWalking has removed the local and exit span registers. If an old java agent (before 6.6.0) is still running, which registers to the 6.6.0+ backend, you will face the following warning message.\nclass=RegisterServiceHandler, message = Unexpected endpoint register, endpoint isn't detected from server side. This will not harm the backend or cause any issues, but serves as a reminder that your agent or other clients should follow the new protocol requirements.\nYou could simply use log4j2.xml to filter this warning message out.\n","title":"Register mechanism is no longer required for local / exit span","url":"/docs/main/v9.5.0/en/faq/unexpected-endpoint-register/"},{"content":"Register mechanism is no longer required for local / exit span Since version 6.6.0, SkyWalking has removed the local and exit span registers. If an old java agent (before 6.6.0) is still running, which registers to the 6.6.0+ backend, you will face the following warning message.\nclass=RegisterServiceHandler, message = Unexpected endpoint register, endpoint isn't detected from server side. This will not harm the backend or cause any issues, but serves as a reminder that your agent or other clients should follow the new protocol requirements.\nYou could simply use log4j2.xml to filter this warning message out.\n","title":"Register mechanism is no longer required for local / exit span","url":"/docs/main/v9.6.0/en/faq/unexpected-endpoint-register/"},{"content":"Register mechanism is no longer required for local / exit span Since version 6.6.0, SkyWalking has removed the local and exit span registers. If an old java agent (before 6.6.0) is still running, which registers to the 6.6.0+ backend, you will face the following warning message.\nclass=RegisterServiceHandler, message = Unexpected endpoint register, endpoint isn't detected from server side. This will not harm the backend or cause any issues, but serves as a reminder that your agent or other clients should follow the new protocol requirements.\nYou could simply use log4j2.xml to filter this warning message out.\n","title":"Register mechanism is no longer required for local / exit span","url":"/docs/main/v9.7.0/en/faq/unexpected-endpoint-register/"},{"content":"Report service instance status   Service Instance Properties Service instance contains more information than just a name. In order for the agent to report service instance status, use ManagementService#reportInstanceProperties service to provide a string-key/string-value pair list as the parameter. The language of target instance must be provided as the minimum requirement.\n  Service Ping Service instance should keep alive with the backend. The agent should set a scheduler using ManagementService#keepAlive service every minute.\n  syntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.management.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/management/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the service reporting the extra information of the instance. service ManagementService { // Report custom properties of a service instance.  rpc reportInstanceProperties (InstanceProperties) returns (Commands) { } // Keep the instance alive in the backend analysis.  // Only recommend to do separate keepAlive report when no trace and metrics needs to be reported.  // Otherwise, it is duplicated.  rpc keepAlive (InstancePingPkg) returns (Commands) { }}message InstanceProperties { string service = 1; string serviceInstance = 2; repeated KeyStringValuePair properties = 3; // Instance belong layer name which define in the backend, general is default.  string layer = 4;}message InstancePingPkg { string service = 1; string serviceInstance = 2; // Instance belong layer name which define in the backend, general is default.  string layer = 3;}Via HTTP Endpoint  Report service instance properties   POST http://localhost:12800/v3/management/reportProperties\n Input:\n{ \u0026#34;service\u0026#34;: \u0026#34;User Service Name\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User Service Instance Name\u0026#34;, \u0026#34;properties\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;language\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;Lua\u0026#34; } ] } Output JSON Array:\n{}  Service instance ping   POST http://localhost:12800/v3/management/keepAlive\n Input:\n{ \u0026#34;service\u0026#34;: \u0026#34;User Service Name\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User Service Instance Name\u0026#34; } OutPut:\n{} ","title":"Report service instance status","url":"/docs/main/latest/en/api/instance-properties/"},{"content":"Report service instance status   Service Instance Properties Service instance contains more information than just a name. In order for the agent to report service instance status, use ManagementService#reportInstanceProperties service to provide a string-key/string-value pair list as the parameter. The language of target instance must be provided as the minimum requirement.\n  Service Ping Service instance should keep alive with the backend. The agent should set a scheduler using ManagementService#keepAlive service every minute.\n  syntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.management.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/management/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the service reporting the extra information of the instance. service ManagementService { // Report custom properties of a service instance.  rpc reportInstanceProperties (InstanceProperties) returns (Commands) { } // Keep the instance alive in the backend analysis.  // Only recommend to do separate keepAlive report when no trace and metrics needs to be reported.  // Otherwise, it is duplicated.  rpc keepAlive (InstancePingPkg) returns (Commands) { }}message InstanceProperties { string service = 1; string serviceInstance = 2; repeated KeyStringValuePair properties = 3; // Instance belong layer name which define in the backend, general is default.  string layer = 4;}message InstancePingPkg { string service = 1; string serviceInstance = 2; // Instance belong layer name which define in the backend, general is default.  string layer = 3;}Via HTTP Endpoint  Report service instance properties   POST http://localhost:12800/v3/management/reportProperties\n Input:\n{ \u0026#34;service\u0026#34;: \u0026#34;User Service Name\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User Service Instance Name\u0026#34;, \u0026#34;properties\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;language\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;Lua\u0026#34; } ] } Output JSON Array:\n{}  Service instance ping   POST http://localhost:12800/v3/management/keepAlive\n Input:\n{ \u0026#34;service\u0026#34;: \u0026#34;User Service Name\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User Service Instance Name\u0026#34; } OutPut:\n{} ","title":"Report service instance status","url":"/docs/main/next/en/api/instance-properties/"},{"content":"Report service instance status   Service Instance Properties Service instance contains more information than just a name. In order for the agent to report service instance status, use ManagementService#reportInstanceProperties service to provide a string-key/string-value pair list as the parameter. The language of target instance must be provided as the minimum requirement.\n  Service Ping Service instance should keep alive with the backend. The agent should set a scheduler using ManagementService#keepAlive service every minute.\n  syntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.management.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/management/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the service reporting the extra information of the instance. service ManagementService { // Report custom properties of a service instance.  rpc reportInstanceProperties (InstanceProperties) returns (Commands) { } // Keep the instance alive in the backend analysis.  // Only recommend to do separate keepAlive report when no trace and metrics needs to be reported.  // Otherwise, it is duplicated.  rpc keepAlive (InstancePingPkg) returns (Commands) { }}message InstanceProperties { string service = 1; string serviceInstance = 2; repeated KeyStringValuePair properties = 3; // Instance belong layer name which define in the backend, general is default.  string layer = 4;}message InstancePingPkg { string service = 1; string serviceInstance = 2; // Instance belong layer name which define in the backend, general is default.  string layer = 3;}Via HTTP Endpoint  Report service instance properties   POST http://localhost:12800/v3/management/reportProperties\n Input:\n{ \u0026#34;service\u0026#34;: \u0026#34;User Service Name\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User Service Instance Name\u0026#34;, \u0026#34;properties\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;language\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;Lua\u0026#34; } ] } Output JSON Array:\n{}  Service instance ping   POST http://localhost:12800/v3/management/keepAlive\n Input:\n{ \u0026#34;service\u0026#34;: \u0026#34;User Service Name\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User Service Instance Name\u0026#34; } OutPut:\n{} ","title":"Report service instance status","url":"/docs/main/v9.4.0/en/api/instance-properties/"},{"content":"Report service instance status   Service Instance Properties Service instance contains more information than just a name. In order for the agent to report service instance status, use ManagementService#reportInstanceProperties service to provide a string-key/string-value pair list as the parameter. The language of target instance must be provided as the minimum requirement.\n  Service Ping Service instance should keep alive with the backend. The agent should set a scheduler using ManagementService#keepAlive service every minute.\n  syntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.management.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/management/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the service reporting the extra information of the instance. service ManagementService { // Report custom properties of a service instance.  rpc reportInstanceProperties (InstanceProperties) returns (Commands) { } // Keep the instance alive in the backend analysis.  // Only recommend to do separate keepAlive report when no trace and metrics needs to be reported.  // Otherwise, it is duplicated.  rpc keepAlive (InstancePingPkg) returns (Commands) { }}message InstanceProperties { string service = 1; string serviceInstance = 2; repeated KeyStringValuePair properties = 3; // Instance belong layer name which define in the backend, general is default.  string layer = 4;}message InstancePingPkg { string service = 1; string serviceInstance = 2; // Instance belong layer name which define in the backend, general is default.  string layer = 3;}Via HTTP Endpoint  Report service instance properties   POST http://localhost:12800/v3/management/reportProperties\n Input:\n{ \u0026#34;service\u0026#34;: \u0026#34;User Service Name\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User Service Instance Name\u0026#34;, \u0026#34;properties\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;language\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;Lua\u0026#34; } ] } Output JSON Array:\n{}  Service instance ping   POST http://localhost:12800/v3/management/keepAlive\n Input:\n{ \u0026#34;service\u0026#34;: \u0026#34;User Service Name\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User Service Instance Name\u0026#34; } OutPut:\n{} ","title":"Report service instance status","url":"/docs/main/v9.5.0/en/api/instance-properties/"},{"content":"Report service instance status   Service Instance Properties Service instance contains more information than just a name. In order for the agent to report service instance status, use ManagementService#reportInstanceProperties service to provide a string-key/string-value pair list as the parameter. The language of target instance must be provided as the minimum requirement.\n  Service Ping Service instance should keep alive with the backend. The agent should set a scheduler using ManagementService#keepAlive service every minute.\n  syntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.management.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/management/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the service reporting the extra information of the instance. service ManagementService { // Report custom properties of a service instance.  rpc reportInstanceProperties (InstanceProperties) returns (Commands) { } // Keep the instance alive in the backend analysis.  // Only recommend to do separate keepAlive report when no trace and metrics needs to be reported.  // Otherwise, it is duplicated.  rpc keepAlive (InstancePingPkg) returns (Commands) { }}message InstanceProperties { string service = 1; string serviceInstance = 2; repeated KeyStringValuePair properties = 3; // Instance belong layer name which define in the backend, general is default.  string layer = 4;}message InstancePingPkg { string service = 1; string serviceInstance = 2; // Instance belong layer name which define in the backend, general is default.  string layer = 3;}Via HTTP Endpoint  Report service instance properties   POST http://localhost:12800/v3/management/reportProperties\n Input:\n{ \u0026#34;service\u0026#34;: \u0026#34;User Service Name\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User Service Instance Name\u0026#34;, \u0026#34;properties\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;language\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;Lua\u0026#34; } ] } Output JSON Array:\n{}  Service instance ping   POST http://localhost:12800/v3/management/keepAlive\n Input:\n{ \u0026#34;service\u0026#34;: \u0026#34;User Service Name\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User Service Instance Name\u0026#34; } OutPut:\n{} ","title":"Report service instance status","url":"/docs/main/v9.6.0/en/api/instance-properties/"},{"content":"Report service instance status   Service Instance Properties Service instance contains more information than just a name. In order for the agent to report service instance status, use ManagementService#reportInstanceProperties service to provide a string-key/string-value pair list as the parameter. The language of target instance must be provided as the minimum requirement.\n  Service Ping Service instance should keep alive with the backend. The agent should set a scheduler using ManagementService#keepAlive service every minute.\n  syntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.management.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/management/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the service reporting the extra information of the instance. service ManagementService { // Report custom properties of a service instance.  rpc reportInstanceProperties (InstanceProperties) returns (Commands) { } // Keep the instance alive in the backend analysis.  // Only recommend to do separate keepAlive report when no trace and metrics needs to be reported.  // Otherwise, it is duplicated.  rpc keepAlive (InstancePingPkg) returns (Commands) { }}message InstanceProperties { string service = 1; string serviceInstance = 2; repeated KeyStringValuePair properties = 3; // Instance belong layer name which define in the backend, general is default.  string layer = 4;}message InstancePingPkg { string service = 1; string serviceInstance = 2; // Instance belong layer name which define in the backend, general is default.  string layer = 3;}Via HTTP Endpoint  Report service instance properties   POST http://localhost:12800/v3/management/reportProperties\n Input:\n{ \u0026#34;service\u0026#34;: \u0026#34;User Service Name\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User Service Instance Name\u0026#34;, \u0026#34;properties\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;language\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;Lua\u0026#34; } ] } Output JSON Array:\n{}  Service instance ping   POST http://localhost:12800/v3/management/keepAlive\n Input:\n{ \u0026#34;service\u0026#34;: \u0026#34;User Service Name\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User Service Instance Name\u0026#34; } OutPut:\n{} ","title":"Report service instance status","url":"/docs/main/v9.7.0/en/api/instance-properties/"},{"content":"RocketMQ monitoring SkyWalking leverages rocketmq-exporter for collecting metrics data from RocketMQ. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  The rocketmq-exporter (https://github.com/apache/rocketmq-exporter?tab=readme-ov-file#readme) collects metrics data from RocketMQ, The RocketMQ version is required to be 4.3.2+. OpenTelemetry Collector fetches metrics from rocketmq-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup rocketmq-exporter. Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  RocketMQ Monitoring RocketMQ monitoring provides multidimensional metrics monitoring of RocketMQ Exporter as Layer: RocketMQ Service in the OAP. In each cluster, the broker is represented as Instance and the topic is represented as Endpoint.\nRocketMQ Cluster Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Messages Produced Today Count meter_rocketmq_cluster_messages_produced_today The number of cluster messages produced today. RocketMQ Exporter   Messages Consumed Today Count meter_rocketmq_cluster_messages_consumed_today The number of cluster messages consumed today. RocketMQ Exporter   Total Producer Tps Msg/sec meter_rocketmq_cluster_total_producer_tps The number of messages produced per second. RocketMQ Exporter   Total Consume Tps Msg/sec meter_rocketmq_cluster_total_consumer_tps The number of messages consumed per second. RocketMQ Exporter   Producer Message Size Bytes/sec meter_rocketmq_cluster_producer_message_size The max size of a message produced per second. RocketMQ Exporter   Consumer Message Size Bytes/sec meter_rocketmq_cluster_consumer_message_size The max size of the consumed message per second. RocketMQ Exporter   Messages Produced Until Yesterday Count meter_rocketmq_cluster_messages_produced_until_yesterday The total number of messages put until 12 o\u0026rsquo;clock last night. RocketMQ Exporter   Messages Consumed Until Yesterday Count meter_rocketmq_cluster_messages_consumed_until_yesterday The total number of messages read until 12 o\u0026rsquo;clock last night. RocketMQ Exporter   Max Consumer Latency ms meter_rocketmq_cluster_max_consumer_latency The max number of consumer latency. RocketMQ Exporter   Max CommitLog Disk Ratio % meter_rocketmq_cluster_max_commitLog_disk_ratio The max utilization ratio of the commit log disk. RocketMQ Exporter   CommitLog Disk Ratio % meter_rocketmq_cluster_commitLog_disk_ratio The utilization ratio of the commit log disk per broker IP. RocketMQ Exporter   Pull ThreadPool Queue Head Wait Time ms meter_rocketmq_cluster_pull_threadPool_queue_head_wait_time The wait time in milliseconds for pulling threadPool queue per broker IP. RocketMQ Exporter   Send ThreadPool Queue Head Wait Time ms meter_rocketmq_cluster_send_threadPool_queue_head_wait_time The wait time in milliseconds for sending threadPool queue per broker IP. RocketMQ Exporter    RocketMQ Broker Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Produce TPS Msg/sec meter_rocketmq_broker_produce_tps The number of broker produces messages per second. RocketMQ Exporter   Consume QPS Msg/sec meter_rocketmq_broker_consume_qps The number of broker consumes messages per second. RocketMQ Exporter   Producer Message Size Bytes/sec meter_rocketmq_broker_producer_message_size The max size of the messages produced per second. RocketMQ Exporter   Consumer Message Size Bytes/sec meter_rocketmq_broker_consumer_message_size The max size of the messages consumed per second. RocketMQ Exporter    RocketMQ Topic Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Max Producer Message Size Byte meter_rocketmq_topic_max_producer_message_size The maximum number of messages produced. RocketMQ Exporter   Max Consumer Message Size Byte meter_rocketmq_topic_max_consumer_message_size The maximum number of messages consumed. RocketMQ Exporter   Consumer Latency ms meter_rocketmq_topic_consumer_latency Consumption delay time of a consumer group. RocketMQ Exporter   Producer Tps Msg/sec meter_rocketmq_topic_producer_tps The number of messages produced per second. RocketMQ Exporter   Consumer Group Tps Msg/sec meter_rocketmq_topic_consumer_group_tps The number of messages consumed per second per consumer group. RocketMQ Exporter   Producer Offset Count meter_rocketmq_topic_producer_offset The max progress of a topic\u0026rsquo;s production message. RocketMQ Exporter   Consumer Group Offset Count meter_rocketmq_topic_consumer_group_offset The max progress of a topic\u0026rsquo;s consumption message per consumer group. RocketMQ Exporter   Producer Message Size Byte/sec meter_rocketmq_topic_producer_message_size The max size of messages produced per second. RocketMQ Exporter   Consumer Message Size Byte/sec meter_rocketmq_topic_consumer_message_size The max size of messages consumed per second. RocketMQ Exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in otel-rules/rocketmq/rocketmq-cluster.yaml, otel-rules/rocketmq/rocketmq-broker.yaml, otel-rules/rocketmq/rocketmq-topic.yaml. The RocketMQ dashboard panel configurations are found in ui-initialized-templates/rocketmq.\n","title":"RocketMQ monitoring","url":"/docs/main/next/en/setup/backend/backend-rocketmq-monitoring/"},{"content":"Running and Debugging Debugging is essential when developing plugins, as it helps you verify your plugin logic. If you want to perform debugging, follow these steps:\n Write test code: Write a sample application that includes the framework content you need to test. Build the Agent: In the project root directory, run the make build command to compile the Agent program into a binary file. Adjust the test program\u0026rsquo;s Debug configuration: Modify the test program\u0026rsquo;s Debug configuration, which will be explained in more detail later. Launch the program and add breakpoints: Start your sample application and add breakpoints in your plugin code where you want to pause the execution and inspect the program state.  Write test code Please make sure that you have imported github.com/apache/skywalking-go in your test code. You can refer to the documentation on how to compile using go build for specific steps.\nAdjust the test program\u0026rsquo;s Debug configuration Please locate the following two paths:\n Go Agent: Locate the binary file generated through make build in the previous step. Current project path: Find the root directory of the current project, which will be used to search for source files in subsequent steps.  Then, please enter the following command in the tool arguments section of the debug configuration:\n-toolexec '/path/to/skywalking-go-agent -debug /path/to/current-project-path' -a\u0026quot;. ","title":"Running and Debugging","url":"/docs/skywalking-go/latest/en/development-and-contribution/running-and-debugging/"},{"content":"Running and Debugging Debugging is essential when developing plugins, as it helps you verify your plugin logic. If you want to perform debugging, follow these steps:\n Write test code: Write a sample application that includes the framework content you need to test. Build the Agent: In the project root directory, run the make build command to compile the Agent program into a binary file. Adjust the test program\u0026rsquo;s Debug configuration: Modify the test program\u0026rsquo;s Debug configuration, which will be explained in more detail later. Launch the program and add breakpoints: Start your sample application and add breakpoints in your plugin code where you want to pause the execution and inspect the program state.  Write test code Please make sure that you have imported github.com/apache/skywalking-go in your test code. You can refer to the documentation on how to compile using go build for specific steps.\nAdjust the test program\u0026rsquo;s Debug configuration Please locate the following two paths:\n Go Agent: Locate the binary file generated through make build in the previous step. Current project path: Find the root directory of the current project, which will be used to search for source files in subsequent steps.  Then, please enter the following command in the tool arguments section of the debug configuration:\n-toolexec '/path/to/skywalking-go-agent -debug /path/to/current-project-path' -a\u0026quot;. ","title":"Running and Debugging","url":"/docs/skywalking-go/next/en/development-and-contribution/running-and-debugging/"},{"content":"Running and Debugging Debugging is essential when developing plugins, as it helps you verify your plugin logic. If you want to perform debugging, follow these steps:\n Write test code: Write a sample application that includes the framework content you need to test. Build the Agent: In the project root directory, run the make build command to compile the Agent program into a binary file. Adjust the test program\u0026rsquo;s Debug configuration: Modify the test program\u0026rsquo;s Debug configuration, which will be explained in more detail later. Launch the program and add breakpoints: Start your sample application and add breakpoints in your plugin code where you want to pause the execution and inspect the program state.  Write test code Please make sure that you have imported github.com/apache/skywalking-go in your test code. You can refer to the documentation on how to compile using go build for specific steps.\nAdjust the test program\u0026rsquo;s Debug configuration Please locate the following two paths:\n Go Agent: Locate the binary file generated through make build in the previous step. Current project path: Find the root directory of the current project, which will be used to search for source files in subsequent steps.  Then, please enter the following command in the tool arguments section of the debug configuration:\n-toolexec '/path/to/skywalking-go-agent -debug /path/to/current-project-path' -a\u0026quot;. ","title":"Running and Debugging","url":"/docs/skywalking-go/v0.4.0/en/development-and-contribution/running-and-debugging/"},{"content":"Satellite self observability dashboard SkyWalking Satellite collects and exports metrics in Prometheus format and SkyWalking metrics service protobuffer format for consuming, it also provides a dashboard to visualize the Satellite metrics.\nData flow  SkyWalking Satellite collects metrics data internally and pushes the metrics to SkyWalking OAP. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up SkyWalking Satellite Telemetry Exporter. Config SkyWalking OpenTelemetry receiver.  Self observability monitoring Self observability monitoring provides monitoring of the status and resources of the OAP server itself. oap-server is a Service in OAP, and land on the Layer: SO11Y_OAP.\nSelf observability metrics    Monitoring Panel Unit Metric Name Description Data Source      Count satellite_service_grpc_connect_count Connection Count SkyWalking Satellite    Percentage satellite_service_server_cpu_utilization CPU (%) SkyWalking Satellite    Count satellite_service_queue_used_count The used count of queue of pipeline SkyWalking Satellite    Count satellite_service_receive_event_count Receive count of event from downstream SkyWalking Satellite    Count satellite_service_fetch_event_count Fetch count of event from downstream SkyWalking Satellite    Count satellite_service_queue_input_count The event count of push to the queue SkyWalking Satellite    Count satellite_service_send_event_count The event count of push data to the upstream SkyWalking Satellite    Customizations You can customize your own metrics/expression/dashboard panel. The self observability dashboard panel configurations are found in /config/ui-initialized-templates/so11y_satellite/so11y-root.json.\n","title":"Satellite self observability dashboard","url":"/docs/main/latest/en/setup/backend/dashboards-so11y-satellite/"},{"content":"Satellite self observability dashboard SkyWalking Satellite collects and exports metrics in Prometheus format and SkyWalking metrics service protobuffer format for consuming, it also provides a dashboard to visualize the Satellite metrics.\nData flow  SkyWalking Satellite collects metrics data internally and pushes the metrics to SkyWalking OAP. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up SkyWalking Satellite Telemetry Exporter. Config SkyWalking OpenTelemetry receiver.  Self observability monitoring Self observability monitoring provides monitoring of the status and resources of the OAP server itself. oap-server is a Service in OAP, and land on the Layer: SO11Y_OAP.\nSelf observability metrics    Monitoring Panel Unit Metric Name Description Data Source      Count satellite_service_grpc_connect_count Connection Count SkyWalking Satellite    Percentage satellite_service_server_cpu_utilization CPU (%) SkyWalking Satellite    Count satellite_service_queue_used_count The used count of queue of pipeline SkyWalking Satellite    Count satellite_service_receive_event_count Receive count of event from downstream SkyWalking Satellite    Count satellite_service_fetch_event_count Fetch count of event from downstream SkyWalking Satellite    Count satellite_service_queue_input_count The event count of push to the queue SkyWalking Satellite    Count satellite_service_send_event_count The event count of push data to the upstream SkyWalking Satellite    Customizations You can customize your own metrics/expression/dashboard panel. The self observability dashboard panel configurations are found in /config/ui-initialized-templates/so11y_satellite/so11y-root.json.\n","title":"Satellite self observability dashboard","url":"/docs/main/next/en/setup/backend/dashboards-so11y-satellite/"},{"content":"Satellite self observability dashboard SkyWalking Satellite collects and exports metrics in Prometheus format and SkyWalking metrics service protobuffer format for consuming, it also provides a dashboard to visualize the Satellite metrics.\nData flow  SkyWalking Satellite collects metrics data internally and pushes the metrics to SkyWalking OAP. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up SkyWalking Satellite Telemetry Exporter. Config SkyWalking OpenTelemetry receiver.  Self observability monitoring Self observability monitoring provides monitoring of the status and resources of the OAP server itself. oap-server is a Service in OAP, and land on the Layer: SO11Y_OAP.\nSelf observability metrics    Monitoring Panel Unit Metric Name Description Data Source      Count satellite_service_grpc_connect_count Connection Count SkyWalking Satellite    Percentage satellite_service_server_cpu_utilization CPU (%) SkyWalking Satellite    Count satellite_service_queue_used_count The used count of queue of pipeline SkyWalking Satellite    Count satellite_service_receive_event_count Receive count of event from downstream SkyWalking Satellite    Count satellite_service_fetch_event_count Fetch count of event from downstream SkyWalking Satellite    Count satellite_service_queue_input_count The event count of push to the queue SkyWalking Satellite    Count satellite_service_send_event_count The event count of push data to the upstream SkyWalking Satellite    Customizations You can customize your own metrics/expression/dashboard panel. The self observability dashboard panel configurations are found in /config/ui-initialized-templates/so11y_satellite/so11y-root.json.\n","title":"Satellite self observability dashboard","url":"/docs/main/v9.3.0/en/setup/backend/dashboards-so11y-satellite/"},{"content":"Satellite self observability dashboard SkyWalking Satellite collects and exports metrics in Prometheus format and SkyWalking metrics service protobuffer format for consuming, it also provides a dashboard to visualize the Satellite metrics.\nData flow  SkyWalking Satellite collects metrics data internally and pushes the metrics to SkyWalking OAP. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up SkyWalking Satellite Telemetry Exporter. Config SkyWalking OpenTelemetry receiver.  Self observability monitoring Self observability monitoring provides monitoring of the status and resources of the OAP server itself. oap-server is a Service in OAP, and land on the Layer: SO11Y_OAP.\nSelf observability metrics    Monitoring Panel Unit Metric Name Description Data Source      Count satellite_service_grpc_connect_count Connection Count SkyWalking Satellite    Percentage satellite_service_server_cpu_utilization CPU (%) SkyWalking Satellite    Count satellite_service_queue_used_count The used count of queue of pipeline SkyWalking Satellite    Count satellite_service_receive_event_count Receive count of event from downstream SkyWalking Satellite    Count satellite_service_fetch_event_count Fetch count of event from downstream SkyWalking Satellite    Count satellite_service_queue_input_count The event count of push to the queue SkyWalking Satellite    Count satellite_service_send_event_count The event count of push data to the upstream SkyWalking Satellite    Customizations You can customize your own metrics/expression/dashboard panel. The self observability dashboard panel configurations are found in /config/ui-initialized-templates/so11y_satellite/so11y-root.json.\n","title":"Satellite self observability dashboard","url":"/docs/main/v9.4.0/en/setup/backend/dashboards-so11y-satellite/"},{"content":"Satellite self observability dashboard SkyWalking Satellite collects and exports metrics in Prometheus format and SkyWalking metrics service protobuffer format for consuming, it also provides a dashboard to visualize the Satellite metrics.\nData flow  SkyWalking Satellite collects metrics data internally and pushes the metrics to SkyWalking OAP. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up SkyWalking Satellite Telemetry Exporter. Config SkyWalking OpenTelemetry receiver.  Self observability monitoring Self observability monitoring provides monitoring of the status and resources of the OAP server itself. oap-server is a Service in OAP, and land on the Layer: SO11Y_OAP.\nSelf observability metrics    Monitoring Panel Unit Metric Name Description Data Source      Count satellite_service_grpc_connect_count Connection Count SkyWalking Satellite    Percentage satellite_service_server_cpu_utilization CPU (%) SkyWalking Satellite    Count satellite_service_queue_used_count The used count of queue of pipeline SkyWalking Satellite    Count satellite_service_receive_event_count Receive count of event from downstream SkyWalking Satellite    Count satellite_service_fetch_event_count Fetch count of event from downstream SkyWalking Satellite    Count satellite_service_queue_input_count The event count of push to the queue SkyWalking Satellite    Count satellite_service_send_event_count The event count of push data to the upstream SkyWalking Satellite    Customizations You can customize your own metrics/expression/dashboard panel. The self observability dashboard panel configurations are found in /config/ui-initialized-templates/so11y_satellite/so11y-root.json.\n","title":"Satellite self observability dashboard","url":"/docs/main/v9.5.0/en/setup/backend/dashboards-so11y-satellite/"},{"content":"Satellite self observability dashboard SkyWalking Satellite collects and exports metrics in Prometheus format and SkyWalking metrics service protobuffer format for consuming, it also provides a dashboard to visualize the Satellite metrics.\nData flow  SkyWalking Satellite collects metrics data internally and pushes the metrics to SkyWalking OAP. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up SkyWalking Satellite Telemetry Exporter. Config SkyWalking OpenTelemetry receiver.  Self observability monitoring Self observability monitoring provides monitoring of the status and resources of the OAP server itself. oap-server is a Service in OAP, and land on the Layer: SO11Y_OAP.\nSelf observability metrics    Monitoring Panel Unit Metric Name Description Data Source      Count satellite_service_grpc_connect_count Connection Count SkyWalking Satellite    Percentage satellite_service_server_cpu_utilization CPU (%) SkyWalking Satellite    Count satellite_service_queue_used_count The used count of queue of pipeline SkyWalking Satellite    Count satellite_service_receive_event_count Receive count of event from downstream SkyWalking Satellite    Count satellite_service_fetch_event_count Fetch count of event from downstream SkyWalking Satellite    Count satellite_service_queue_input_count The event count of push to the queue SkyWalking Satellite    Count satellite_service_send_event_count The event count of push data to the upstream SkyWalking Satellite    Customizations You can customize your own metrics/expression/dashboard panel. The self observability dashboard panel configurations are found in /config/ui-initialized-templates/so11y_satellite/so11y-root.json.\n","title":"Satellite self observability dashboard","url":"/docs/main/v9.6.0/en/setup/backend/dashboards-so11y-satellite/"},{"content":"Satellite self observability dashboard SkyWalking Satellite collects and exports metrics in Prometheus format and SkyWalking metrics service protobuffer format for consuming, it also provides a dashboard to visualize the Satellite metrics.\nData flow  SkyWalking Satellite collects metrics data internally and pushes the metrics to SkyWalking OAP. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up SkyWalking Satellite Telemetry Exporter. Config SkyWalking OpenTelemetry receiver.  Self observability monitoring Self observability monitoring provides monitoring of the status and resources of the OAP server itself. oap-server is a Service in OAP, and land on the Layer: SO11Y_OAP.\nSelf observability metrics    Monitoring Panel Unit Metric Name Description Data Source      Count satellite_service_grpc_connect_count Connection Count SkyWalking Satellite    Percentage satellite_service_server_cpu_utilization CPU (%) SkyWalking Satellite    Count satellite_service_queue_used_count The used count of queue of pipeline SkyWalking Satellite    Count satellite_service_receive_event_count Receive count of event from downstream SkyWalking Satellite    Count satellite_service_fetch_event_count Fetch count of event from downstream SkyWalking Satellite    Count satellite_service_queue_input_count The event count of push to the queue SkyWalking Satellite    Count satellite_service_send_event_count The event count of push data to the upstream SkyWalking Satellite    Customizations You can customize your own metrics/expression/dashboard panel. The self observability dashboard panel configurations are found in /config/ui-initialized-templates/so11y_satellite/so11y-root.json.\n","title":"Satellite self observability dashboard","url":"/docs/main/v9.7.0/en/setup/backend/dashboards-so11y-satellite/"},{"content":"Satellite Usage In this example, you will learn how to use the Satellite.\nInstall Satellite Install the Satellite component.\nInstall Operator And Backend  Follow Operator installation instrument to install the operator. Follow Deploy OAP server and UI to install backend.  Deploy Satellite with default setting  Deploy the Storage use the below command:  Clone this repo, then change current directory to samples.\nIssue the below command to deploy an OAP server and UI.\nkubectl apply -f satellite.yaml Check the Satellite in Kubernetes:  $ kubectl get satellite NAME INSTANCES RUNNING ADDRESS default 1 1 default-satellite.default Satellite With HPA  Follow Custom Metrics Adapter to install the metrics adapter. Update the config in the Satellite CRD and re-apply it to activate the metrics service in satellite.  config: - name: SATELLITE_TELEMETRY_EXPORT_TYPE value: metrics_service Update the config in the OAP CRD and re-apply it to activate the satellite MAL.  config: - name: SW_METER_ANALYZER_ACTIVE_FILES value: satellite Add the HorizontalPodAutoScaler CRD, and update the config file the service and target to your excepted config. It\u0026rsquo;s recommend to set the stabilizationWindowSeconds and selectPolicy of scaling up in HPA, which would help prevent continuous scaling up of pods due to metric delay fluctuations. Check the HorizontalPodAutoScaler in the Kubernetes:  $ kubectl get HorizontalPodAutoscaler NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE hpa-demo Deployment/skywalking-system-satellite 2/1900, 5/75 1 3 1 92m ","title":"Satellite Usage","url":"/docs/skywalking-swck/latest/examples/satellite/"},{"content":"Satellite Usage In this example, you will learn how to use the Satellite.\nInstall Satellite Install the Satellite component.\nInstall Operator And Backend  Follow Operator installation instrument to install the operator. Follow Deploy OAP server and UI to install backend.  Deploy Satellite with default setting  Deploy the Storage use the below command:  Clone this repo, then change current directory to samples.\nIssue the below command to deploy an OAP server and UI.\nkubectl apply -f satellite.yaml Check the Satellite in Kubernetes:  $ kubectl get satellite NAME INSTANCES RUNNING ADDRESS default 1 1 default-satellite.default Satellite With HPA  Follow Custom Metrics Adapter to install the metrics adapter. Update the config in the Satellite CRD and re-apply it to activate the metrics service in satellite.  config: - name: SATELLITE_TELEMETRY_EXPORT_TYPE value: metrics_service Update the config in the OAP CRD and re-apply it to activate the satellite MAL.  config: - name: SW_METER_ANALYZER_ACTIVE_FILES value: satellite Add the HorizontalPodAutoScaler CRD, and update the config file the service and target to your excepted config. It\u0026rsquo;s recommend to set the stabilizationWindowSeconds and selectPolicy of scaling up in HPA, which would help prevent continuous scaling up of pods due to metric delay fluctuations. Check the HorizontalPodAutoScaler in the Kubernetes:  $ kubectl get HorizontalPodAutoscaler NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE hpa-demo Deployment/skywalking-system-satellite 2/1900, 5/75 1 3 1 92m ","title":"Satellite Usage","url":"/docs/skywalking-swck/next/examples/satellite/"},{"content":"Satellite Usage In this example, you will learn how to use the Satellite.\nInstall Satellite Install the Satellite component.\nInstall Operator And Backend  Follow Operator installation instrument to install the operator. Follow Deploy OAP server and UI to install backend.  Deploy Satellite with default setting  Deploy the Storage use the below command:  Clone this repo, then change current directory to samples.\nIssue the below command to deploy an OAP server and UI.\nkubectl apply -f satellite.yaml Check the Satellite in Kubernetes:  $ kubectl get satellite NAME INSTANCES RUNNING ADDRESS default 1 1 default-satellite.default Satellite With HPA  Follow Custom Metrics Adapter to install the metrics adapter. Update the config in the Satellite CRD and re-apply it to activate the metrics service in satellite.  config: - name: SATELLITE_TELEMETRY_EXPORT_TYPE value: metrics_service Update the config in the OAP CRD and re-apply it to activate the satellite MAL.  config: - name: SW_METER_ANALYZER_ACTIVE_FILES value: satellite Add the HorizontalPodAutoScaler CRD, and update the config file the service and target to your excepted config. It\u0026rsquo;s recommend to set the stabilizationWindowSeconds and selectPolicy of scaling up in HPA, which would help prevent continuous scaling up of pods due to metric delay fluctuations. Check the HorizontalPodAutoScaler in the Kubernetes:  $ kubectl get HorizontalPodAutoscaler NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE hpa-demo Deployment/skywalking-system-satellite 2/1900, 5/75 1 3 1 92m ","title":"Satellite Usage","url":"/docs/skywalking-swck/v0.9.0/examples/satellite/"},{"content":"Scaling with Apache SkyWalking Background In the Apache SkyWalking ecosystem, the OAP obtains metrics, traces, logs, and event data through SkyWalking Agent, Envoy, or other data sources. Under the gRPC protocol, it transmits data by communicating with a single server node. Only when the connection is broken, the reconnecting policy would be used based on DNS round-robin mode. When new services are added at runtime or the OAP load is kept high due to increased traffic of observed services, the OAP cluster needs to scale out for increased traffic. The load of the new OAP node would be less due to all existing agents having connected to previous nodes. Even without scaling, the load of OAP nodes would be unbalanced, because the agent would keep the connection due to random policy at the booting stage. In these cases, it would become a challenge to keep up the health status of all nodes, and be able to scale out when needed.\nIn this article, we mainly discuss how to solve this challenge in SkyWalking.\nHow to Load Balance SkyWalking mainly uses the gRPC protocol for data transmission, so this article mainly introduces load balancing in the gRPC protocol.\nProxy Or Client-side Based on the gRPC official Load Balancing blog, there are two approaches to load balancing:\n Client-side: The client perceives multiple back-end services and uses a load-balancing algorithm to select a back-end service for each RPC. Proxy: The client sends the message to the proxy server, and the proxy server load balances the message to the back-end service.  From the perspective of observability system architecture:\n    Pros Cons     Client-side High performance because of the elimination of extra hop Complex client (cluster awareness, load balancing, health check, etc.)Ensure each data source to be connected provides complex client capabilities   Proxy Simple Client Higher latency    We choose Proxy mode for the following reasons:\n Observable data is not very time-sensitive, a little latency caused by transmission is acceptable. A little extra hop is acceptable and there is no impact on the client-side. As an observability platform, we cannot/should not ask clients to change. They make their own tech decisions and may have their own commercial considerations.  Transmission Policy In the proxy mode, we should determine the transmission path between downstream and upstream.\nDifferent data protocols require different processing policies. There are two transmission policies:\n Synchronous: Suitable for protocols that require data exchange in the client, such as SkyWalking Dynamic Configuration Service. This type of protocol provides real-time results. Asynchronous batch: Used when the client doesn’t care about the upstream processing results, but only the transmitted data (e.g., trace report, log report, etc.)  The synchronization policy requires that the proxy send the message to the upstream server when receiving the client message, and synchronously return the response data to the downstream client. Usually, only a few protocols need to use the synchronization policy.\nAs shown below, after the client sends the request to the Proxy, the proxy would send the message to the server synchronously. When the proxy receives the result, it returns to the client.\nThe asynchronous batch policy means that the data is sent to the upstream server in batches asynchronously. This policy is more common because most protocols in SkyWalking are primarily based on data reporting. We think using the queue as a buffer could have a good effect. The asynchronous batch policy is executed according to the following steps:\n The proxy receives the data and wraps it as an Event object. An event is added into the queue. When the cycle time is reached or when the queue elements reach the fixed number, the elements in the queue will parallel consume and send to the OAP.  The advantage of using queues is:\n Separate data receiving and sending to reduce the mutual influence. The interval quantization mechanism can be used to combine events, which helps to speed up sending events to the OAP. Using multi-threaded consumption queue events can make fuller use of network IO.  As shown below, after the proxy receives the message, the proxy would wrap the message as an event and push it to the queue. The message sender would take batch events from the queue and send them to the upstream OAP.\nRouting Routing algorithms are used to route messages to a single upstream server node.\nThe Round-Robin algorithm selects nodes in order from the list of upstream service nodes. The advantage of this algorithm is that the number of times each node is selected is average. When the size of the data is close to the same, each upstream node can handle the same quantity of data content.\nWith the Weight Round-Robin, each upstream server node has a corresponding routing weight ratio. The difference from Round-Robin is that each upstream node has more chances to be routed according to its weight. This algorithm is more suitable to use when the upstream server node machine configuration is not the same.\nThe Fixed algorithm is a hybrid algorithm. It can ensure that the same data is routed to the same upstream server node, and when the upstream server scales out, it still maintains routing to the same node; unless the upstream node does not exist, it will reroute. This algorithm is mainly used in the SkyWalking Meter protocol because this protocol needs to ensure that the metrics of the same service instance are sent to the same OAP node. The Routing steps are as follows:\n Generate a unique identification string based on the data content, as short as possible. The amount of data is controllable. Get the upstream node of identity from LRU Cache, and use it if it exists. According to the identification, generate the corresponding hash value, and find the upstream server node from the upstream list. Save the mapping relationship between the upstream server node and identification to LRU Cache.  The advantage of this algorithm is to bind the data with the upstream server node as much as possible, so the upstream server can better process continuous data. The disadvantage is that it takes up a certain amount of memory space to save the corresponding relationship.\nAs shown below, the image is divided into two parts:\n The left side represents that the same data content always is routed to the same server node. The right side represents the data routing algorithm. Get the number from the data, and use the remainder algorithm to obtain the position.  We choose to use a combination of Round-Robin and Fixed algorithm for routing:\n The Fixed routing algorithm is suitable for specific protocols, mainly used when passing metrics data to the SkyWalking Meter protocol The Round-Robin algorithm is used by default. When the SkyWalking OAP cluster is deployed, the configuration of the nodes needs to be as much the same as possible, so there would be no need to use the Weight Round-Robin algorithm.  How to balance the load balancer itself? Proxy still needs to deal with the load balancing problem from client to itself, especially when deploying a Proxy cluster in a production environment.\nThere are three ways to solve this problem:\n Connection management: Use the max_connection config on the client-side to specify the maximum connection duration of each connection. For more information, please read the proposal. Cluster awareness: The proxy has cluster awareness, and actively disconnects the connection when the load is unbalanced to allow the client to re-pick up the proxy. Resource limit+HPA: Restrict the connection resource situation of each proxy, and no longer accept new connections when the resource limit is reached. And use the HPA mechanism of Kubernetes to dynamically scale out the number of the proxy.      Connection management Cluster awareness Resource Limit+HPA     Pros Simple to use Ensure that the number of connections in each proxy is relatively  Simple to use   Cons Each client needs to ensure that data is not lostThe client is required to accept GOWAY responses May cause a sudden increase in traffic on some nodesEach client needs to ensure that data is not lost  Traffic will not be particularly balanced in each instance    We choose Limit+HPA for these reasons:\n Easy to config and use the proxy and easy to understand based on basic data metrics. No data loss due to broken connection. There is no need for the client to implement any other protocols to prevent data loss, especially when the client is a commercial product. The connection of each node in the proxy cluster does not need to be particularly balanced, as long as the proxy node itself is high-performance.  SkyWalking-Satellite We have implemented this Proxy in the SkyWalking-Satellite project. It’s used between Client and SkyWalking OAP, effectively solving the load balancing problem.\nAfter the system is deployed, the Satellite would accept the traffic from the Client, and the Satellite will perceive all the nodes of the OAP through Kubernetes Label Selector or manual configuration, and load balance the traffic to the upstream OAP node.\nAs shown below, a single client still maintains a connection with a single Satellite, Satellite would establish the connection with each OAP, and load balance message to the OAP node.\nWhen scaling Satellite, we need to deploy the SWCK adapter and configure the HPA in Kubernetes. SWCK is a platform for the SkyWalking users, provisions, upgrades, maintains SkyWalking relevant components, and makes them work natively on Kubernetes.\nAfter deployment is finished, the following steps would be performed:\n Read metrics from OAP: HPA requests the SWCK metrics adapter to dynamically read the metrics in the OAP. Scaling the Satellite: Kubernetes HPA senses that the metrics values are in line with expectations, so the Satellite would be scaling automatically.  As shown below, use the dotted line to divide the two parts. HPA uses SWCK Adapter to read the metrics in the OAP. When the threshold is met, HPA would scale the Satellite deployment.\nExample In this section, we will demonstrate two cases:\n SkyWalking Scaling: After SkyWalking OAP scaling, the traffic would auto load balancing through Satellite. Satellite Scaling: Satellite’s own traffic load balancing.  NOTE: All commands could be accessed through GitHub.\nSkyWalking Scaling We will use the bookinfo application to demonstrate how to integrate Apache SkyWalking 8.9.1 with Apache SkyWalking-Satellite 0.5.0, and observe the service mesh through the Envoy ALS protocol.\nBefore starting, please make sure that you already have a Kubernetes environment.\nInstall Istio Istio provides a very convenient way to configure the Envoy proxy and enable the access log service. The following step:\n Install the istioctl locally to help manage the Istio mesh. Install Istio into the Kubernetes environment with a demo configuration profile, and enable the Envoy ALS. Transmit the ALS message to the satellite. The satellite we will deploy later. Add the label into the default namespace so Istio could automatically inject Envoy sidecar proxies when you deploy your application later.  # install istioctl export ISTIO_VERSION=1.12.0 curl -L https://istio.io/downloadIstio | sh - sudo mv $PWD/istio-$ISTIO_VERSION/bin/istioctl /usr/local/bin/ # install istio istioctl install -y --set profile=demo \\ \t--set meshConfig.enableEnvoyAccessLogService=true \\ \t--set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-system-satellite.skywalking-system:11800 # enbale envoy proxy in default namespace kubectl label namespace default istio-injection=enabled Install SWCK SWCK provides convenience for users to deploy and upgrade SkyWalking related components based on Kubernetes. The automatic scale function of Satellite also mainly relies on SWCK. For more information, you could refer to the official documentation.\n# Install cert-manager kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.3.1/cert-manager.yaml # Deploy SWCK mkdir -p skywalking-swck \u0026amp;\u0026amp; cd skywalking-swck wget https://dlcdn.apache.org/skywalking/swck/0.6.1/skywalking-swck-0.6.1-bin.tgz tar -zxvf skywalking-swck-0.6.1-bin.tgz cd config kubectl apply -f operator-bundle.yaml Deploy Apache SkyWalking And Apache SkyWalking-Satellite We have provided a simple script to deploy the skywalking OAP, UI, and Satellite.\n# Create the skywalking components namespace kubectl create namespace skywalking-system kubectl label namespace skywalking-system swck-injection=enabled # Deploy components kubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/sw-components.yaml Deploy Bookinfo Application export ISTIO_VERSION=1.12.0 kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/platform/kube/bookinfo.yaml kubectl wait --for=condition=Ready pods --all --timeout=1200s kubectl port-forward service/productpage 9080 Next, please open your browser and visit http://localhost:9080. You should be able to see the Bookinfo application. Refresh the webpage several times to generate enough access logs.\nThen, you can see the topology and metrics of the Bookinfo application on SkyWalking WebUI. At this time, you can see that the Satellite is working!\nDeploy Monitor We need to install OpenTelemetry Collector to collect metrics in OAPs and analyze them.\n# Add OTEL collector kubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/otel-collector-oap.yaml kubectl port-forward -n skywalking-system service/skywalking-system-ui 8080:80 Next, please open your browser and visit http://localhost:8080/ and create a new item on the dashboard. The SkyWalking Web UI pictured below shows how the data content is applied.\nScaling OAP Scaling the number of OAPs by deployment.\nkubectl scale --replicas=3 -n skywalking-system deployment/skywalking-system-oap Done! After a period of time, you will see that the number of OAPs becomes 3, and the ALS traffic is balanced to each OAP.\nSatellite Scaling After we have completed the SkyWalking Scaling, we would carry out the Satellite Scaling demo.\nDeploy SWCK HPA SWCK provides an adapter to implement the Kubernetes external metrics to adapt the HPA through reading the metrics in SkyWalking OAP. We expose the metrics service in Satellite to OAP and configure HPA Resource to auto-scaling the Satellite.\nInstall the SWCK adapter into the Kubernetes environment:\nkubectl apply -f skywalking-swck/config/adapter-bundle.yaml Create the HPA resource, and limit each Satellite to handle a maximum of 10 connections:\nkubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/satellite-hpa.yaml Then, you could see we have 9 connections in one satellite. One envoy proxy may establish multiple connections to the satellite.\n$ kubectl get HorizontalPodAutoscaler -n skywalking-system NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE hpa-demo Deployment/skywalking-system-satellite 9/10 1 3 1 5m18s Scaling Application The scaling application could establish more connections to the satellite, to verify whether the HPA is in effect.\nkubectl scale --replicas=3 deployment/productpage-v1 deployment/details-v1 Done! By default, Satellite will deploy a single instance and a single instance will only accept 11 connections. HPA resources limit one Satellite to handle 10 connections and use a stabilization window to make Satellite stable scaling up. In this case, we deploy the Bookinfo application in 10+ instances after scaling, which means that 10+ connections will be established to the Satellite.\nSo after HPA resources are running, the Satellite would be automatically scaled up to 2 instances. You can learn about the calculation algorithm of replicas through the official documentation. Run the following command to view the running status:\n$ kubectl get HorizontalPodAutoscaler -n skywalking-system --watch NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 1 3m31s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 1 4m20s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 2 4m38s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 2 5m8s hpa-demo Deployment/skywalking-system-satellite 6/10 1 3 2 5m23s By observing the “number of connections” metric, we would be able to see that when the number of connections of each gRPC exceeds 10 connections, then the satellite automatically scales through the HPA rule. As a result, the connection number is down to normal status (in this example, less than 10)\nswctl metrics linear --name satellite_service_grpc_connect_count --service-name satellite::satellite-service ","title":"Scaling with Apache SkyWalking","url":"/docs/main/latest/en/academy/scaling-with-apache-skywalking/"},{"content":"Scaling with Apache SkyWalking Background In the Apache SkyWalking ecosystem, the OAP obtains metrics, traces, logs, and event data through SkyWalking Agent, Envoy, or other data sources. Under the gRPC protocol, it transmits data by communicating with a single server node. Only when the connection is broken, the reconnecting policy would be used based on DNS round-robin mode. When new services are added at runtime or the OAP load is kept high due to increased traffic of observed services, the OAP cluster needs to scale out for increased traffic. The load of the new OAP node would be less due to all existing agents having connected to previous nodes. Even without scaling, the load of OAP nodes would be unbalanced, because the agent would keep the connection due to random policy at the booting stage. In these cases, it would become a challenge to keep up the health status of all nodes, and be able to scale out when needed.\nIn this article, we mainly discuss how to solve this challenge in SkyWalking.\nHow to Load Balance SkyWalking mainly uses the gRPC protocol for data transmission, so this article mainly introduces load balancing in the gRPC protocol.\nProxy Or Client-side Based on the gRPC official Load Balancing blog, there are two approaches to load balancing:\n Client-side: The client perceives multiple back-end services and uses a load-balancing algorithm to select a back-end service for each RPC. Proxy: The client sends the message to the proxy server, and the proxy server load balances the message to the back-end service.  From the perspective of observability system architecture:\n    Pros Cons     Client-side High performance because of the elimination of extra hop Complex client (cluster awareness, load balancing, health check, etc.)Ensure each data source to be connected provides complex client capabilities   Proxy Simple Client Higher latency    We choose Proxy mode for the following reasons:\n Observable data is not very time-sensitive, a little latency caused by transmission is acceptable. A little extra hop is acceptable and there is no impact on the client-side. As an observability platform, we cannot/should not ask clients to change. They make their own tech decisions and may have their own commercial considerations.  Transmission Policy In the proxy mode, we should determine the transmission path between downstream and upstream.\nDifferent data protocols require different processing policies. There are two transmission policies:\n Synchronous: Suitable for protocols that require data exchange in the client, such as SkyWalking Dynamic Configuration Service. This type of protocol provides real-time results. Asynchronous batch: Used when the client doesn’t care about the upstream processing results, but only the transmitted data (e.g., trace report, log report, etc.)  The synchronization policy requires that the proxy send the message to the upstream server when receiving the client message, and synchronously return the response data to the downstream client. Usually, only a few protocols need to use the synchronization policy.\nAs shown below, after the client sends the request to the Proxy, the proxy would send the message to the server synchronously. When the proxy receives the result, it returns to the client.\nThe asynchronous batch policy means that the data is sent to the upstream server in batches asynchronously. This policy is more common because most protocols in SkyWalking are primarily based on data reporting. We think using the queue as a buffer could have a good effect. The asynchronous batch policy is executed according to the following steps:\n The proxy receives the data and wraps it as an Event object. An event is added into the queue. When the cycle time is reached or when the queue elements reach the fixed number, the elements in the queue will parallel consume and send to the OAP.  The advantage of using queues is:\n Separate data receiving and sending to reduce the mutual influence. The interval quantization mechanism can be used to combine events, which helps to speed up sending events to the OAP. Using multi-threaded consumption queue events can make fuller use of network IO.  As shown below, after the proxy receives the message, the proxy would wrap the message as an event and push it to the queue. The message sender would take batch events from the queue and send them to the upstream OAP.\nRouting Routing algorithms are used to route messages to a single upstream server node.\nThe Round-Robin algorithm selects nodes in order from the list of upstream service nodes. The advantage of this algorithm is that the number of times each node is selected is average. When the size of the data is close to the same, each upstream node can handle the same quantity of data content.\nWith the Weight Round-Robin, each upstream server node has a corresponding routing weight ratio. The difference from Round-Robin is that each upstream node has more chances to be routed according to its weight. This algorithm is more suitable to use when the upstream server node machine configuration is not the same.\nThe Fixed algorithm is a hybrid algorithm. It can ensure that the same data is routed to the same upstream server node, and when the upstream server scales out, it still maintains routing to the same node; unless the upstream node does not exist, it will reroute. This algorithm is mainly used in the SkyWalking Meter protocol because this protocol needs to ensure that the metrics of the same service instance are sent to the same OAP node. The Routing steps are as follows:\n Generate a unique identification string based on the data content, as short as possible. The amount of data is controllable. Get the upstream node of identity from LRU Cache, and use it if it exists. According to the identification, generate the corresponding hash value, and find the upstream server node from the upstream list. Save the mapping relationship between the upstream server node and identification to LRU Cache.  The advantage of this algorithm is to bind the data with the upstream server node as much as possible, so the upstream server can better process continuous data. The disadvantage is that it takes up a certain amount of memory space to save the corresponding relationship.\nAs shown below, the image is divided into two parts:\n The left side represents that the same data content always is routed to the same server node. The right side represents the data routing algorithm. Get the number from the data, and use the remainder algorithm to obtain the position.  We choose to use a combination of Round-Robin and Fixed algorithm for routing:\n The Fixed routing algorithm is suitable for specific protocols, mainly used when passing metrics data to the SkyWalking Meter protocol The Round-Robin algorithm is used by default. When the SkyWalking OAP cluster is deployed, the configuration of the nodes needs to be as much the same as possible, so there would be no need to use the Weight Round-Robin algorithm.  How to balance the load balancer itself? Proxy still needs to deal with the load balancing problem from client to itself, especially when deploying a Proxy cluster in a production environment.\nThere are three ways to solve this problem:\n Connection management: Use the max_connection config on the client-side to specify the maximum connection duration of each connection. For more information, please read the proposal. Cluster awareness: The proxy has cluster awareness, and actively disconnects the connection when the load is unbalanced to allow the client to re-pick up the proxy. Resource limit+HPA: Restrict the connection resource situation of each proxy, and no longer accept new connections when the resource limit is reached. And use the HPA mechanism of Kubernetes to dynamically scale out the number of the proxy.      Connection management Cluster awareness Resource Limit+HPA     Pros Simple to use Ensure that the number of connections in each proxy is relatively  Simple to use   Cons Each client needs to ensure that data is not lostThe client is required to accept GOWAY responses May cause a sudden increase in traffic on some nodesEach client needs to ensure that data is not lost  Traffic will not be particularly balanced in each instance    We choose Limit+HPA for these reasons:\n Easy to config and use the proxy and easy to understand based on basic data metrics. No data loss due to broken connection. There is no need for the client to implement any other protocols to prevent data loss, especially when the client is a commercial product. The connection of each node in the proxy cluster does not need to be particularly balanced, as long as the proxy node itself is high-performance.  SkyWalking-Satellite We have implemented this Proxy in the SkyWalking-Satellite project. It’s used between Client and SkyWalking OAP, effectively solving the load balancing problem.\nAfter the system is deployed, the Satellite would accept the traffic from the Client, and the Satellite will perceive all the nodes of the OAP through Kubernetes Label Selector or manual configuration, and load balance the traffic to the upstream OAP node.\nAs shown below, a single client still maintains a connection with a single Satellite, Satellite would establish the connection with each OAP, and load balance message to the OAP node.\nWhen scaling Satellite, we need to deploy the SWCK adapter and configure the HPA in Kubernetes. SWCK is a platform for the SkyWalking users, provisions, upgrades, maintains SkyWalking relevant components, and makes them work natively on Kubernetes.\nAfter deployment is finished, the following steps would be performed:\n Read metrics from OAP: HPA requests the SWCK metrics adapter to dynamically read the metrics in the OAP. Scaling the Satellite: Kubernetes HPA senses that the metrics values are in line with expectations, so the Satellite would be scaling automatically.  As shown below, use the dotted line to divide the two parts. HPA uses SWCK Adapter to read the metrics in the OAP. When the threshold is met, HPA would scale the Satellite deployment.\nExample In this section, we will demonstrate two cases:\n SkyWalking Scaling: After SkyWalking OAP scaling, the traffic would auto load balancing through Satellite. Satellite Scaling: Satellite’s own traffic load balancing.  NOTE: All commands could be accessed through GitHub.\nSkyWalking Scaling We will use the bookinfo application to demonstrate how to integrate Apache SkyWalking 8.9.1 with Apache SkyWalking-Satellite 0.5.0, and observe the service mesh through the Envoy ALS protocol.\nBefore starting, please make sure that you already have a Kubernetes environment.\nInstall Istio Istio provides a very convenient way to configure the Envoy proxy and enable the access log service. The following step:\n Install the istioctl locally to help manage the Istio mesh. Install Istio into the Kubernetes environment with a demo configuration profile, and enable the Envoy ALS. Transmit the ALS message to the satellite. The satellite we will deploy later. Add the label into the default namespace so Istio could automatically inject Envoy sidecar proxies when you deploy your application later.  # install istioctl export ISTIO_VERSION=1.12.0 curl -L https://istio.io/downloadIstio | sh - sudo mv $PWD/istio-$ISTIO_VERSION/bin/istioctl /usr/local/bin/ # install istio istioctl install -y --set profile=demo \\ \t--set meshConfig.enableEnvoyAccessLogService=true \\ \t--set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-system-satellite.skywalking-system:11800 # enbale envoy proxy in default namespace kubectl label namespace default istio-injection=enabled Install SWCK SWCK provides convenience for users to deploy and upgrade SkyWalking related components based on Kubernetes. The automatic scale function of Satellite also mainly relies on SWCK. For more information, you could refer to the official documentation.\n# Install cert-manager kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.3.1/cert-manager.yaml # Deploy SWCK mkdir -p skywalking-swck \u0026amp;\u0026amp; cd skywalking-swck wget https://dlcdn.apache.org/skywalking/swck/0.6.1/skywalking-swck-0.6.1-bin.tgz tar -zxvf skywalking-swck-0.6.1-bin.tgz cd config kubectl apply -f operator-bundle.yaml Deploy Apache SkyWalking And Apache SkyWalking-Satellite We have provided a simple script to deploy the skywalking OAP, UI, and Satellite.\n# Create the skywalking components namespace kubectl create namespace skywalking-system kubectl label namespace skywalking-system swck-injection=enabled # Deploy components kubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/sw-components.yaml Deploy Bookinfo Application export ISTIO_VERSION=1.12.0 kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/platform/kube/bookinfo.yaml kubectl wait --for=condition=Ready pods --all --timeout=1200s kubectl port-forward service/productpage 9080 Next, please open your browser and visit http://localhost:9080. You should be able to see the Bookinfo application. Refresh the webpage several times to generate enough access logs.\nThen, you can see the topology and metrics of the Bookinfo application on SkyWalking WebUI. At this time, you can see that the Satellite is working!\nDeploy Monitor We need to install OpenTelemetry Collector to collect metrics in OAPs and analyze them.\n# Add OTEL collector kubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/otel-collector-oap.yaml kubectl port-forward -n skywalking-system service/skywalking-system-ui 8080:80 Next, please open your browser and visit http://localhost:8080/ and create a new item on the dashboard. The SkyWalking Web UI pictured below shows how the data content is applied.\nScaling OAP Scaling the number of OAPs by deployment.\nkubectl scale --replicas=3 -n skywalking-system deployment/skywalking-system-oap Done! After a period of time, you will see that the number of OAPs becomes 3, and the ALS traffic is balanced to each OAP.\nSatellite Scaling After we have completed the SkyWalking Scaling, we would carry out the Satellite Scaling demo.\nDeploy SWCK HPA SWCK provides an adapter to implement the Kubernetes external metrics to adapt the HPA through reading the metrics in SkyWalking OAP. We expose the metrics service in Satellite to OAP and configure HPA Resource to auto-scaling the Satellite.\nInstall the SWCK adapter into the Kubernetes environment:\nkubectl apply -f skywalking-swck/config/adapter-bundle.yaml Create the HPA resource, and limit each Satellite to handle a maximum of 10 connections:\nkubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/satellite-hpa.yaml Then, you could see we have 9 connections in one satellite. One envoy proxy may establish multiple connections to the satellite.\n$ kubectl get HorizontalPodAutoscaler -n skywalking-system NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE hpa-demo Deployment/skywalking-system-satellite 9/10 1 3 1 5m18s Scaling Application The scaling application could establish more connections to the satellite, to verify whether the HPA is in effect.\nkubectl scale --replicas=3 deployment/productpage-v1 deployment/details-v1 Done! By default, Satellite will deploy a single instance and a single instance will only accept 11 connections. HPA resources limit one Satellite to handle 10 connections and use a stabilization window to make Satellite stable scaling up. In this case, we deploy the Bookinfo application in 10+ instances after scaling, which means that 10+ connections will be established to the Satellite.\nSo after HPA resources are running, the Satellite would be automatically scaled up to 2 instances. You can learn about the calculation algorithm of replicas through the official documentation. Run the following command to view the running status:\n$ kubectl get HorizontalPodAutoscaler -n skywalking-system --watch NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 1 3m31s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 1 4m20s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 2 4m38s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 2 5m8s hpa-demo Deployment/skywalking-system-satellite 6/10 1 3 2 5m23s By observing the “number of connections” metric, we would be able to see that when the number of connections of each gRPC exceeds 10 connections, then the satellite automatically scales through the HPA rule. As a result, the connection number is down to normal status (in this example, less than 10)\nswctl metrics linear --name satellite_service_grpc_connect_count --service-name satellite::satellite-service ","title":"Scaling with Apache SkyWalking","url":"/docs/main/next/en/academy/scaling-with-apache-skywalking/"},{"content":"Scaling with Apache SkyWalking Background In the Apache SkyWalking ecosystem, the OAP obtains metrics, traces, logs, and event data through SkyWalking Agent, Envoy, or other data sources. Under the gRPC protocol, it transmits data by communicating with a single server node. Only when the connection is broken, the reconnecting policy would be used based on DNS round-robin mode. When new services are added at runtime or the OAP load is kept high due to increased traffic of observed services, the OAP cluster needs to scale out for increased traffic. The load of the new OAP node would be less due to all existing agents having connected to previous nodes. Even without scaling, the load of OAP nodes would be unbalanced, because the agent would keep the connection due to random policy at the booting stage. In these cases, it would become a challenge to keep up the health status of all nodes, and be able to scale out when needed.\nIn this article, we mainly discuss how to solve this challenge in SkyWalking.\nHow to Load Balance SkyWalking mainly uses the gRPC protocol for data transmission, so this article mainly introduces load balancing in the gRPC protocol.\nProxy Or Client-side Based on the gRPC official Load Balancing blog, there are two approaches to load balancing:\n Client-side: The client perceives multiple back-end services and uses a load-balancing algorithm to select a back-end service for each RPC. Proxy: The client sends the message to the proxy server, and the proxy server load balances the message to the back-end service.  From the perspective of observability system architecture:\n    Pros Cons     Client-side High performance because of the elimination of extra hop Complex client (cluster awareness, load balancing, health check, etc.)Ensure each data source to be connected provides complex client capabilities   Proxy Simple Client Higher latency    We choose Proxy mode for the following reasons:\n Observable data is not very time-sensitive, a little latency caused by transmission is acceptable. A little extra hop is acceptable and there is no impact on the client-side. As an observability platform, we cannot/should not ask clients to change. They make their own tech decisions and may have their own commercial considerations.  Transmission Policy In the proxy mode, we should determine the transmission path between downstream and upstream.\nDifferent data protocols require different processing policies. There are two transmission policies:\n Synchronous: Suitable for protocols that require data exchange in the client, such as SkyWalking Dynamic Configuration Service. This type of protocol provides real-time results. Asynchronous batch: Used when the client doesn’t care about the upstream processing results, but only the transmitted data (e.g., trace report, log report, etc.)  The synchronization policy requires that the proxy send the message to the upstream server when receiving the client message, and synchronously return the response data to the downstream client. Usually, only a few protocols need to use the synchronization policy.\nAs shown below, after the client sends the request to the Proxy, the proxy would send the message to the server synchronously. When the proxy receives the result, it returns to the client.\nThe asynchronous batch policy means that the data is sent to the upstream server in batches asynchronously. This policy is more common because most protocols in SkyWalking are primarily based on data reporting. We think using the queue as a buffer could have a good effect. The asynchronous batch policy is executed according to the following steps:\n The proxy receives the data and wraps it as an Event object. An event is added into the queue. When the cycle time is reached or when the queue elements reach the fixed number, the elements in the queue will parallel consume and send to the OAP.  The advantage of using queues is:\n Separate data receiving and sending to reduce the mutual influence. The interval quantization mechanism can be used to combine events, which helps to speed up sending events to the OAP. Using multi-threaded consumption queue events can make fuller use of network IO.  As shown below, after the proxy receives the message, the proxy would wrap the message as an event and push it to the queue. The message sender would take batch events from the queue and send them to the upstream OAP.\nRouting Routing algorithms are used to route messages to a single upstream server node.\nThe Round-Robin algorithm selects nodes in order from the list of upstream service nodes. The advantage of this algorithm is that the number of times each node is selected is average. When the size of the data is close to the same, each upstream node can handle the same quantity of data content.\nWith the Weight Round-Robin, each upstream server node has a corresponding routing weight ratio. The difference from Round-Robin is that each upstream node has more chances to be routed according to its weight. This algorithm is more suitable to use when the upstream server node machine configuration is not the same.\nThe Fixed algorithm is a hybrid algorithm. It can ensure that the same data is routed to the same upstream server node, and when the upstream server scales out, it still maintains routing to the same node; unless the upstream node does not exist, it will reroute. This algorithm is mainly used in the SkyWalking Meter protocol because this protocol needs to ensure that the metrics of the same service instance are sent to the same OAP node. The Routing steps are as follows:\n Generate a unique identification string based on the data content, as short as possible. The amount of data is controllable. Get the upstream node of identity from LRU Cache, and use it if it exists. According to the identification, generate the corresponding hash value, and find the upstream server node from the upstream list. Save the mapping relationship between the upstream server node and identification to LRU Cache.  The advantage of this algorithm is to bind the data with the upstream server node as much as possible, so the upstream server can better process continuous data. The disadvantage is that it takes up a certain amount of memory space to save the corresponding relationship.\nAs shown below, the image is divided into two parts:\n The left side represents that the same data content always is routed to the same server node. The right side represents the data routing algorithm. Get the number from the data, and use the remainder algorithm to obtain the position.  We choose to use a combination of Round-Robin and Fixed algorithm for routing:\n The Fixed routing algorithm is suitable for specific protocols, mainly used when passing metrics data to the SkyWalking Meter protocol The Round-Robin algorithm is used by default. When the SkyWalking OAP cluster is deployed, the configuration of the nodes needs to be as much the same as possible, so there would be no need to use the Weight Round-Robin algorithm.  How to balance the load balancer itself? Proxy still needs to deal with the load balancing problem from client to itself, especially when deploying a Proxy cluster in a production environment.\nThere are three ways to solve this problem:\n Connection management: Use the max_connection config on the client-side to specify the maximum connection duration of each connection. For more information, please read the proposal. Cluster awareness: The proxy has cluster awareness, and actively disconnects the connection when the load is unbalanced to allow the client to re-pick up the proxy. Resource limit+HPA: Restrict the connection resource situation of each proxy, and no longer accept new connections when the resource limit is reached. And use the HPA mechanism of Kubernetes to dynamically scale out the number of the proxy.      Connection management Cluster awareness Resource Limit+HPA     Pros Simple to use Ensure that the number of connections in each proxy is relatively  Simple to use   Cons Each client needs to ensure that data is not lostThe client is required to accept GOWAY responses May cause a sudden increase in traffic on some nodesEach client needs to ensure that data is not lost  Traffic will not be particularly balanced in each instance    We choose Limit+HPA for these reasons:\n Easy to config and use the proxy and easy to understand based on basic data metrics. No data loss due to broken connection. There is no need for the client to implement any other protocols to prevent data loss, especially when the client is a commercial product. The connection of each node in the proxy cluster does not need to be particularly balanced, as long as the proxy node itself is high-performance.  SkyWalking-Satellite We have implemented this Proxy in the SkyWalking-Satellite project. It’s used between Client and SkyWalking OAP, effectively solving the load balancing problem.\nAfter the system is deployed, the Satellite would accept the traffic from the Client, and the Satellite will perceive all the nodes of the OAP through Kubernetes Label Selector or manual configuration, and load balance the traffic to the upstream OAP node.\nAs shown below, a single client still maintains a connection with a single Satellite, Satellite would establish the connection with each OAP, and load balance message to the OAP node.\nWhen scaling Satellite, we need to deploy the SWCK adapter and configure the HPA in Kubernetes. SWCK is a platform for the SkyWalking users, provisions, upgrades, maintains SkyWalking relevant components, and makes them work natively on Kubernetes.\nAfter deployment is finished, the following steps would be performed:\n Read metrics from OAP: HPA requests the SWCK metrics adapter to dynamically read the metrics in the OAP. Scaling the Satellite: Kubernetes HPA senses that the metrics values are in line with expectations, so the Satellite would be scaling automatically.  As shown below, use the dotted line to divide the two parts. HPA uses SWCK Adapter to read the metrics in the OAP. When the threshold is met, HPA would scale the Satellite deployment.\nExample In this section, we will demonstrate two cases:\n SkyWalking Scaling: After SkyWalking OAP scaling, the traffic would auto load balancing through Satellite. Satellite Scaling: Satellite’s own traffic load balancing.  NOTE: All commands could be accessed through GitHub.\nSkyWalking Scaling We will use the bookinfo application to demonstrate how to integrate Apache SkyWalking 8.9.1 with Apache SkyWalking-Satellite 0.5.0, and observe the service mesh through the Envoy ALS protocol.\nBefore starting, please make sure that you already have a Kubernetes environment.\nInstall Istio Istio provides a very convenient way to configure the Envoy proxy and enable the access log service. The following step:\n Install the istioctl locally to help manage the Istio mesh. Install Istio into the Kubernetes environment with a demo configuration profile, and enable the Envoy ALS. Transmit the ALS message to the satellite. The satellite we will deploy later. Add the label into the default namespace so Istio could automatically inject Envoy sidecar proxies when you deploy your application later.  # install istioctl export ISTIO_VERSION=1.12.0 curl -L https://istio.io/downloadIstio | sh - sudo mv $PWD/istio-$ISTIO_VERSION/bin/istioctl /usr/local/bin/ # install istio istioctl install -y --set profile=demo \\ \t--set meshConfig.enableEnvoyAccessLogService=true \\ \t--set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-system-satellite.skywalking-system:11800 # enbale envoy proxy in default namespace kubectl label namespace default istio-injection=enabled Install SWCK SWCK provides convenience for users to deploy and upgrade SkyWalking related components based on Kubernetes. The automatic scale function of Satellite also mainly relies on SWCK. For more information, you could refer to the official documentation.\n# Install cert-manager kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.3.1/cert-manager.yaml # Deploy SWCK mkdir -p skywalking-swck \u0026amp;\u0026amp; cd skywalking-swck wget https://dlcdn.apache.org/skywalking/swck/0.6.1/skywalking-swck-0.6.1-bin.tgz tar -zxvf skywalking-swck-0.6.1-bin.tgz cd config kubectl apply -f operator-bundle.yaml Deploy Apache SkyWalking And Apache SkyWalking-Satellite We have provided a simple script to deploy the skywalking OAP, UI, and Satellite.\n# Create the skywalking components namespace kubectl create namespace skywalking-system kubectl label namespace skywalking-system swck-injection=enabled # Deploy components kubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/sw-components.yaml Deploy Bookinfo Application export ISTIO_VERSION=1.12.0 kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/platform/kube/bookinfo.yaml kubectl wait --for=condition=Ready pods --all --timeout=1200s kubectl port-forward service/productpage 9080 Next, please open your browser and visit http://localhost:9080. You should be able to see the Bookinfo application. Refresh the webpage several times to generate enough access logs.\nThen, you can see the topology and metrics of the Bookinfo application on SkyWalking WebUI. At this time, you can see that the Satellite is working!\nDeploy Monitor We need to install OpenTelemetry Collector to collect metrics in OAPs and analyze them.\n# Add OTEL collector kubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/otel-collector-oap.yaml kubectl port-forward -n skywalking-system service/skywalking-system-ui 8080:80 Next, please open your browser and visit http://localhost:8080/ and create a new item on the dashboard. The SkyWalking Web UI pictured below shows how the data content is applied.\nScaling OAP Scaling the number of OAPs by deployment.\nkubectl scale --replicas=3 -n skywalking-system deployment/skywalking-system-oap Done! After a period of time, you will see that the number of OAPs becomes 3, and the ALS traffic is balanced to each OAP.\nSatellite Scaling After we have completed the SkyWalking Scaling, we would carry out the Satellite Scaling demo.\nDeploy SWCK HPA SWCK provides an adapter to implement the Kubernetes external metrics to adapt the HPA through reading the metrics in SkyWalking OAP. We expose the metrics service in Satellite to OAP and configure HPA Resource to auto-scaling the Satellite.\nInstall the SWCK adapter into the Kubernetes environment:\nkubectl apply -f skywalking-swck/config/adapter-bundle.yaml Create the HPA resource, and limit each Satellite to handle a maximum of 10 connections:\nkubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/satellite-hpa.yaml Then, you could see we have 9 connections in one satellite. One envoy proxy may establish multiple connections to the satellite.\n$ kubectl get HorizontalPodAutoscaler -n skywalking-system NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE hpa-demo Deployment/skywalking-system-satellite 9/10 1 3 1 5m18s Scaling Application The scaling application could establish more connections to the satellite, to verify whether the HPA is in effect.\nkubectl scale --replicas=3 deployment/productpage-v1 deployment/details-v1 Done! By default, Satellite will deploy a single instance and a single instance will only accept 11 connections. HPA resources limit one Satellite to handle 10 connections and use a stabilization window to make Satellite stable scaling up. In this case, we deploy the Bookinfo application in 10+ instances after scaling, which means that 10+ connections will be established to the Satellite.\nSo after HPA resources are running, the Satellite would be automatically scaled up to 2 instances. You can learn about the calculation algorithm of replicas through the official documentation. Run the following command to view the running status:\n$ kubectl get HorizontalPodAutoscaler -n skywalking-system --watch NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 1 3m31s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 1 4m20s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 2 4m38s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 2 5m8s hpa-demo Deployment/skywalking-system-satellite 6/10 1 3 2 5m23s By observing the “number of connections” metric, we would be able to see that when the number of connections of each gRPC exceeds 10 connections, then the satellite automatically scales through the HPA rule. As a result, the connection number is down to normal status (in this example, less than 10)\nswctl metrics linear --name satellite_service_grpc_connect_count --service-name satellite::satellite-service ","title":"Scaling with Apache SkyWalking","url":"/docs/main/v9.3.0/en/academy/scaling-with-apache-skywalking/"},{"content":"Scaling with Apache SkyWalking Background In the Apache SkyWalking ecosystem, the OAP obtains metrics, traces, logs, and event data through SkyWalking Agent, Envoy, or other data sources. Under the gRPC protocol, it transmits data by communicating with a single server node. Only when the connection is broken, the reconnecting policy would be used based on DNS round-robin mode. When new services are added at runtime or the OAP load is kept high due to increased traffic of observed services, the OAP cluster needs to scale out for increased traffic. The load of the new OAP node would be less due to all existing agents having connected to previous nodes. Even without scaling, the load of OAP nodes would be unbalanced, because the agent would keep the connection due to random policy at the booting stage. In these cases, it would become a challenge to keep up the health status of all nodes, and be able to scale out when needed.\nIn this article, we mainly discuss how to solve this challenge in SkyWalking.\nHow to Load Balance SkyWalking mainly uses the gRPC protocol for data transmission, so this article mainly introduces load balancing in the gRPC protocol.\nProxy Or Client-side Based on the gRPC official Load Balancing blog, there are two approaches to load balancing:\n Client-side: The client perceives multiple back-end services and uses a load-balancing algorithm to select a back-end service for each RPC. Proxy: The client sends the message to the proxy server, and the proxy server load balances the message to the back-end service.  From the perspective of observability system architecture:\n    Pros Cons     Client-side High performance because of the elimination of extra hop Complex client (cluster awareness, load balancing, health check, etc.)Ensure each data source to be connected provides complex client capabilities   Proxy Simple Client Higher latency    We choose Proxy mode for the following reasons:\n Observable data is not very time-sensitive, a little latency caused by transmission is acceptable. A little extra hop is acceptable and there is no impact on the client-side. As an observability platform, we cannot/should not ask clients to change. They make their own tech decisions and may have their own commercial considerations.  Transmission Policy In the proxy mode, we should determine the transmission path between downstream and upstream.\nDifferent data protocols require different processing policies. There are two transmission policies:\n Synchronous: Suitable for protocols that require data exchange in the client, such as SkyWalking Dynamic Configuration Service. This type of protocol provides real-time results. Asynchronous batch: Used when the client doesn’t care about the upstream processing results, but only the transmitted data (e.g., trace report, log report, etc.)  The synchronization policy requires that the proxy send the message to the upstream server when receiving the client message, and synchronously return the response data to the downstream client. Usually, only a few protocols need to use the synchronization policy.\nAs shown below, after the client sends the request to the Proxy, the proxy would send the message to the server synchronously. When the proxy receives the result, it returns to the client.\nThe asynchronous batch policy means that the data is sent to the upstream server in batches asynchronously. This policy is more common because most protocols in SkyWalking are primarily based on data reporting. We think using the queue as a buffer could have a good effect. The asynchronous batch policy is executed according to the following steps:\n The proxy receives the data and wraps it as an Event object. An event is added into the queue. When the cycle time is reached or when the queue elements reach the fixed number, the elements in the queue will parallel consume and send to the OAP.  The advantage of using queues is:\n Separate data receiving and sending to reduce the mutual influence. The interval quantization mechanism can be used to combine events, which helps to speed up sending events to the OAP. Using multi-threaded consumption queue events can make fuller use of network IO.  As shown below, after the proxy receives the message, the proxy would wrap the message as an event and push it to the queue. The message sender would take batch events from the queue and send them to the upstream OAP.\nRouting Routing algorithms are used to route messages to a single upstream server node.\nThe Round-Robin algorithm selects nodes in order from the list of upstream service nodes. The advantage of this algorithm is that the number of times each node is selected is average. When the size of the data is close to the same, each upstream node can handle the same quantity of data content.\nWith the Weight Round-Robin, each upstream server node has a corresponding routing weight ratio. The difference from Round-Robin is that each upstream node has more chances to be routed according to its weight. This algorithm is more suitable to use when the upstream server node machine configuration is not the same.\nThe Fixed algorithm is a hybrid algorithm. It can ensure that the same data is routed to the same upstream server node, and when the upstream server scales out, it still maintains routing to the same node; unless the upstream node does not exist, it will reroute. This algorithm is mainly used in the SkyWalking Meter protocol because this protocol needs to ensure that the metrics of the same service instance are sent to the same OAP node. The Routing steps are as follows:\n Generate a unique identification string based on the data content, as short as possible. The amount of data is controllable. Get the upstream node of identity from LRU Cache, and use it if it exists. According to the identification, generate the corresponding hash value, and find the upstream server node from the upstream list. Save the mapping relationship between the upstream server node and identification to LRU Cache.  The advantage of this algorithm is to bind the data with the upstream server node as much as possible, so the upstream server can better process continuous data. The disadvantage is that it takes up a certain amount of memory space to save the corresponding relationship.\nAs shown below, the image is divided into two parts:\n The left side represents that the same data content always is routed to the same server node. The right side represents the data routing algorithm. Get the number from the data, and use the remainder algorithm to obtain the position.  We choose to use a combination of Round-Robin and Fixed algorithm for routing:\n The Fixed routing algorithm is suitable for specific protocols, mainly used when passing metrics data to the SkyWalking Meter protocol The Round-Robin algorithm is used by default. When the SkyWalking OAP cluster is deployed, the configuration of the nodes needs to be as much the same as possible, so there would be no need to use the Weight Round-Robin algorithm.  How to balance the load balancer itself? Proxy still needs to deal with the load balancing problem from client to itself, especially when deploying a Proxy cluster in a production environment.\nThere are three ways to solve this problem:\n Connection management: Use the max_connection config on the client-side to specify the maximum connection duration of each connection. For more information, please read the proposal. Cluster awareness: The proxy has cluster awareness, and actively disconnects the connection when the load is unbalanced to allow the client to re-pick up the proxy. Resource limit+HPA: Restrict the connection resource situation of each proxy, and no longer accept new connections when the resource limit is reached. And use the HPA mechanism of Kubernetes to dynamically scale out the number of the proxy.      Connection management Cluster awareness Resource Limit+HPA     Pros Simple to use Ensure that the number of connections in each proxy is relatively  Simple to use   Cons Each client needs to ensure that data is not lostThe client is required to accept GOWAY responses May cause a sudden increase in traffic on some nodesEach client needs to ensure that data is not lost  Traffic will not be particularly balanced in each instance    We choose Limit+HPA for these reasons:\n Easy to config and use the proxy and easy to understand based on basic data metrics. No data loss due to broken connection. There is no need for the client to implement any other protocols to prevent data loss, especially when the client is a commercial product. The connection of each node in the proxy cluster does not need to be particularly balanced, as long as the proxy node itself is high-performance.  SkyWalking-Satellite We have implemented this Proxy in the SkyWalking-Satellite project. It’s used between Client and SkyWalking OAP, effectively solving the load balancing problem.\nAfter the system is deployed, the Satellite would accept the traffic from the Client, and the Satellite will perceive all the nodes of the OAP through Kubernetes Label Selector or manual configuration, and load balance the traffic to the upstream OAP node.\nAs shown below, a single client still maintains a connection with a single Satellite, Satellite would establish the connection with each OAP, and load balance message to the OAP node.\nWhen scaling Satellite, we need to deploy the SWCK adapter and configure the HPA in Kubernetes. SWCK is a platform for the SkyWalking users, provisions, upgrades, maintains SkyWalking relevant components, and makes them work natively on Kubernetes.\nAfter deployment is finished, the following steps would be performed:\n Read metrics from OAP: HPA requests the SWCK metrics adapter to dynamically read the metrics in the OAP. Scaling the Satellite: Kubernetes HPA senses that the metrics values are in line with expectations, so the Satellite would be scaling automatically.  As shown below, use the dotted line to divide the two parts. HPA uses SWCK Adapter to read the metrics in the OAP. When the threshold is met, HPA would scale the Satellite deployment.\nExample In this section, we will demonstrate two cases:\n SkyWalking Scaling: After SkyWalking OAP scaling, the traffic would auto load balancing through Satellite. Satellite Scaling: Satellite’s own traffic load balancing.  NOTE: All commands could be accessed through GitHub.\nSkyWalking Scaling We will use the bookinfo application to demonstrate how to integrate Apache SkyWalking 8.9.1 with Apache SkyWalking-Satellite 0.5.0, and observe the service mesh through the Envoy ALS protocol.\nBefore starting, please make sure that you already have a Kubernetes environment.\nInstall Istio Istio provides a very convenient way to configure the Envoy proxy and enable the access log service. The following step:\n Install the istioctl locally to help manage the Istio mesh. Install Istio into the Kubernetes environment with a demo configuration profile, and enable the Envoy ALS. Transmit the ALS message to the satellite. The satellite we will deploy later. Add the label into the default namespace so Istio could automatically inject Envoy sidecar proxies when you deploy your application later.  # install istioctl export ISTIO_VERSION=1.12.0 curl -L https://istio.io/downloadIstio | sh - sudo mv $PWD/istio-$ISTIO_VERSION/bin/istioctl /usr/local/bin/ # install istio istioctl install -y --set profile=demo \\ \t--set meshConfig.enableEnvoyAccessLogService=true \\ \t--set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-system-satellite.skywalking-system:11800 # enbale envoy proxy in default namespace kubectl label namespace default istio-injection=enabled Install SWCK SWCK provides convenience for users to deploy and upgrade SkyWalking related components based on Kubernetes. The automatic scale function of Satellite also mainly relies on SWCK. For more information, you could refer to the official documentation.\n# Install cert-manager kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.3.1/cert-manager.yaml # Deploy SWCK mkdir -p skywalking-swck \u0026amp;\u0026amp; cd skywalking-swck wget https://dlcdn.apache.org/skywalking/swck/0.6.1/skywalking-swck-0.6.1-bin.tgz tar -zxvf skywalking-swck-0.6.1-bin.tgz cd config kubectl apply -f operator-bundle.yaml Deploy Apache SkyWalking And Apache SkyWalking-Satellite We have provided a simple script to deploy the skywalking OAP, UI, and Satellite.\n# Create the skywalking components namespace kubectl create namespace skywalking-system kubectl label namespace skywalking-system swck-injection=enabled # Deploy components kubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/sw-components.yaml Deploy Bookinfo Application export ISTIO_VERSION=1.12.0 kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/platform/kube/bookinfo.yaml kubectl wait --for=condition=Ready pods --all --timeout=1200s kubectl port-forward service/productpage 9080 Next, please open your browser and visit http://localhost:9080. You should be able to see the Bookinfo application. Refresh the webpage several times to generate enough access logs.\nThen, you can see the topology and metrics of the Bookinfo application on SkyWalking WebUI. At this time, you can see that the Satellite is working!\nDeploy Monitor We need to install OpenTelemetry Collector to collect metrics in OAPs and analyze them.\n# Add OTEL collector kubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/otel-collector-oap.yaml kubectl port-forward -n skywalking-system service/skywalking-system-ui 8080:80 Next, please open your browser and visit http://localhost:8080/ and create a new item on the dashboard. The SkyWalking Web UI pictured below shows how the data content is applied.\nScaling OAP Scaling the number of OAPs by deployment.\nkubectl scale --replicas=3 -n skywalking-system deployment/skywalking-system-oap Done! After a period of time, you will see that the number of OAPs becomes 3, and the ALS traffic is balanced to each OAP.\nSatellite Scaling After we have completed the SkyWalking Scaling, we would carry out the Satellite Scaling demo.\nDeploy SWCK HPA SWCK provides an adapter to implement the Kubernetes external metrics to adapt the HPA through reading the metrics in SkyWalking OAP. We expose the metrics service in Satellite to OAP and configure HPA Resource to auto-scaling the Satellite.\nInstall the SWCK adapter into the Kubernetes environment:\nkubectl apply -f skywalking-swck/config/adapter-bundle.yaml Create the HPA resource, and limit each Satellite to handle a maximum of 10 connections:\nkubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/satellite-hpa.yaml Then, you could see we have 9 connections in one satellite. One envoy proxy may establish multiple connections to the satellite.\n$ kubectl get HorizontalPodAutoscaler -n skywalking-system NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE hpa-demo Deployment/skywalking-system-satellite 9/10 1 3 1 5m18s Scaling Application The scaling application could establish more connections to the satellite, to verify whether the HPA is in effect.\nkubectl scale --replicas=3 deployment/productpage-v1 deployment/details-v1 Done! By default, Satellite will deploy a single instance and a single instance will only accept 11 connections. HPA resources limit one Satellite to handle 10 connections and use a stabilization window to make Satellite stable scaling up. In this case, we deploy the Bookinfo application in 10+ instances after scaling, which means that 10+ connections will be established to the Satellite.\nSo after HPA resources are running, the Satellite would be automatically scaled up to 2 instances. You can learn about the calculation algorithm of replicas through the official documentation. Run the following command to view the running status:\n$ kubectl get HorizontalPodAutoscaler -n skywalking-system --watch NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 1 3m31s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 1 4m20s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 2 4m38s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 2 5m8s hpa-demo Deployment/skywalking-system-satellite 6/10 1 3 2 5m23s By observing the “number of connections” metric, we would be able to see that when the number of connections of each gRPC exceeds 10 connections, then the satellite automatically scales through the HPA rule. As a result, the connection number is down to normal status (in this example, less than 10)\nswctl metrics linear --name satellite_service_grpc_connect_count --service-name satellite::satellite-service ","title":"Scaling with Apache SkyWalking","url":"/docs/main/v9.4.0/en/academy/scaling-with-apache-skywalking/"},{"content":"Scaling with Apache SkyWalking Background In the Apache SkyWalking ecosystem, the OAP obtains metrics, traces, logs, and event data through SkyWalking Agent, Envoy, or other data sources. Under the gRPC protocol, it transmits data by communicating with a single server node. Only when the connection is broken, the reconnecting policy would be used based on DNS round-robin mode. When new services are added at runtime or the OAP load is kept high due to increased traffic of observed services, the OAP cluster needs to scale out for increased traffic. The load of the new OAP node would be less due to all existing agents having connected to previous nodes. Even without scaling, the load of OAP nodes would be unbalanced, because the agent would keep the connection due to random policy at the booting stage. In these cases, it would become a challenge to keep up the health status of all nodes, and be able to scale out when needed.\nIn this article, we mainly discuss how to solve this challenge in SkyWalking.\nHow to Load Balance SkyWalking mainly uses the gRPC protocol for data transmission, so this article mainly introduces load balancing in the gRPC protocol.\nProxy Or Client-side Based on the gRPC official Load Balancing blog, there are two approaches to load balancing:\n Client-side: The client perceives multiple back-end services and uses a load-balancing algorithm to select a back-end service for each RPC. Proxy: The client sends the message to the proxy server, and the proxy server load balances the message to the back-end service.  From the perspective of observability system architecture:\n    Pros Cons     Client-side High performance because of the elimination of extra hop Complex client (cluster awareness, load balancing, health check, etc.)Ensure each data source to be connected provides complex client capabilities   Proxy Simple Client Higher latency    We choose Proxy mode for the following reasons:\n Observable data is not very time-sensitive, a little latency caused by transmission is acceptable. A little extra hop is acceptable and there is no impact on the client-side. As an observability platform, we cannot/should not ask clients to change. They make their own tech decisions and may have their own commercial considerations.  Transmission Policy In the proxy mode, we should determine the transmission path between downstream and upstream.\nDifferent data protocols require different processing policies. There are two transmission policies:\n Synchronous: Suitable for protocols that require data exchange in the client, such as SkyWalking Dynamic Configuration Service. This type of protocol provides real-time results. Asynchronous batch: Used when the client doesn’t care about the upstream processing results, but only the transmitted data (e.g., trace report, log report, etc.)  The synchronization policy requires that the proxy send the message to the upstream server when receiving the client message, and synchronously return the response data to the downstream client. Usually, only a few protocols need to use the synchronization policy.\nAs shown below, after the client sends the request to the Proxy, the proxy would send the message to the server synchronously. When the proxy receives the result, it returns to the client.\nThe asynchronous batch policy means that the data is sent to the upstream server in batches asynchronously. This policy is more common because most protocols in SkyWalking are primarily based on data reporting. We think using the queue as a buffer could have a good effect. The asynchronous batch policy is executed according to the following steps:\n The proxy receives the data and wraps it as an Event object. An event is added into the queue. When the cycle time is reached or when the queue elements reach the fixed number, the elements in the queue will parallel consume and send to the OAP.  The advantage of using queues is:\n Separate data receiving and sending to reduce the mutual influence. The interval quantization mechanism can be used to combine events, which helps to speed up sending events to the OAP. Using multi-threaded consumption queue events can make fuller use of network IO.  As shown below, after the proxy receives the message, the proxy would wrap the message as an event and push it to the queue. The message sender would take batch events from the queue and send them to the upstream OAP.\nRouting Routing algorithms are used to route messages to a single upstream server node.\nThe Round-Robin algorithm selects nodes in order from the list of upstream service nodes. The advantage of this algorithm is that the number of times each node is selected is average. When the size of the data is close to the same, each upstream node can handle the same quantity of data content.\nWith the Weight Round-Robin, each upstream server node has a corresponding routing weight ratio. The difference from Round-Robin is that each upstream node has more chances to be routed according to its weight. This algorithm is more suitable to use when the upstream server node machine configuration is not the same.\nThe Fixed algorithm is a hybrid algorithm. It can ensure that the same data is routed to the same upstream server node, and when the upstream server scales out, it still maintains routing to the same node; unless the upstream node does not exist, it will reroute. This algorithm is mainly used in the SkyWalking Meter protocol because this protocol needs to ensure that the metrics of the same service instance are sent to the same OAP node. The Routing steps are as follows:\n Generate a unique identification string based on the data content, as short as possible. The amount of data is controllable. Get the upstream node of identity from LRU Cache, and use it if it exists. According to the identification, generate the corresponding hash value, and find the upstream server node from the upstream list. Save the mapping relationship between the upstream server node and identification to LRU Cache.  The advantage of this algorithm is to bind the data with the upstream server node as much as possible, so the upstream server can better process continuous data. The disadvantage is that it takes up a certain amount of memory space to save the corresponding relationship.\nAs shown below, the image is divided into two parts:\n The left side represents that the same data content always is routed to the same server node. The right side represents the data routing algorithm. Get the number from the data, and use the remainder algorithm to obtain the position.  We choose to use a combination of Round-Robin and Fixed algorithm for routing:\n The Fixed routing algorithm is suitable for specific protocols, mainly used when passing metrics data to the SkyWalking Meter protocol The Round-Robin algorithm is used by default. When the SkyWalking OAP cluster is deployed, the configuration of the nodes needs to be as much the same as possible, so there would be no need to use the Weight Round-Robin algorithm.  How to balance the load balancer itself? Proxy still needs to deal with the load balancing problem from client to itself, especially when deploying a Proxy cluster in a production environment.\nThere are three ways to solve this problem:\n Connection management: Use the max_connection config on the client-side to specify the maximum connection duration of each connection. For more information, please read the proposal. Cluster awareness: The proxy has cluster awareness, and actively disconnects the connection when the load is unbalanced to allow the client to re-pick up the proxy. Resource limit+HPA: Restrict the connection resource situation of each proxy, and no longer accept new connections when the resource limit is reached. And use the HPA mechanism of Kubernetes to dynamically scale out the number of the proxy.      Connection management Cluster awareness Resource Limit+HPA     Pros Simple to use Ensure that the number of connections in each proxy is relatively  Simple to use   Cons Each client needs to ensure that data is not lostThe client is required to accept GOWAY responses May cause a sudden increase in traffic on some nodesEach client needs to ensure that data is not lost  Traffic will not be particularly balanced in each instance    We choose Limit+HPA for these reasons:\n Easy to config and use the proxy and easy to understand based on basic data metrics. No data loss due to broken connection. There is no need for the client to implement any other protocols to prevent data loss, especially when the client is a commercial product. The connection of each node in the proxy cluster does not need to be particularly balanced, as long as the proxy node itself is high-performance.  SkyWalking-Satellite We have implemented this Proxy in the SkyWalking-Satellite project. It’s used between Client and SkyWalking OAP, effectively solving the load balancing problem.\nAfter the system is deployed, the Satellite would accept the traffic from the Client, and the Satellite will perceive all the nodes of the OAP through Kubernetes Label Selector or manual configuration, and load balance the traffic to the upstream OAP node.\nAs shown below, a single client still maintains a connection with a single Satellite, Satellite would establish the connection with each OAP, and load balance message to the OAP node.\nWhen scaling Satellite, we need to deploy the SWCK adapter and configure the HPA in Kubernetes. SWCK is a platform for the SkyWalking users, provisions, upgrades, maintains SkyWalking relevant components, and makes them work natively on Kubernetes.\nAfter deployment is finished, the following steps would be performed:\n Read metrics from OAP: HPA requests the SWCK metrics adapter to dynamically read the metrics in the OAP. Scaling the Satellite: Kubernetes HPA senses that the metrics values are in line with expectations, so the Satellite would be scaling automatically.  As shown below, use the dotted line to divide the two parts. HPA uses SWCK Adapter to read the metrics in the OAP. When the threshold is met, HPA would scale the Satellite deployment.\nExample In this section, we will demonstrate two cases:\n SkyWalking Scaling: After SkyWalking OAP scaling, the traffic would auto load balancing through Satellite. Satellite Scaling: Satellite’s own traffic load balancing.  NOTE: All commands could be accessed through GitHub.\nSkyWalking Scaling We will use the bookinfo application to demonstrate how to integrate Apache SkyWalking 8.9.1 with Apache SkyWalking-Satellite 0.5.0, and observe the service mesh through the Envoy ALS protocol.\nBefore starting, please make sure that you already have a Kubernetes environment.\nInstall Istio Istio provides a very convenient way to configure the Envoy proxy and enable the access log service. The following step:\n Install the istioctl locally to help manage the Istio mesh. Install Istio into the Kubernetes environment with a demo configuration profile, and enable the Envoy ALS. Transmit the ALS message to the satellite. The satellite we will deploy later. Add the label into the default namespace so Istio could automatically inject Envoy sidecar proxies when you deploy your application later.  # install istioctl export ISTIO_VERSION=1.12.0 curl -L https://istio.io/downloadIstio | sh - sudo mv $PWD/istio-$ISTIO_VERSION/bin/istioctl /usr/local/bin/ # install istio istioctl install -y --set profile=demo \\ \t--set meshConfig.enableEnvoyAccessLogService=true \\ \t--set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-system-satellite.skywalking-system:11800 # enbale envoy proxy in default namespace kubectl label namespace default istio-injection=enabled Install SWCK SWCK provides convenience for users to deploy and upgrade SkyWalking related components based on Kubernetes. The automatic scale function of Satellite also mainly relies on SWCK. For more information, you could refer to the official documentation.\n# Install cert-manager kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.3.1/cert-manager.yaml # Deploy SWCK mkdir -p skywalking-swck \u0026amp;\u0026amp; cd skywalking-swck wget https://dlcdn.apache.org/skywalking/swck/0.6.1/skywalking-swck-0.6.1-bin.tgz tar -zxvf skywalking-swck-0.6.1-bin.tgz cd config kubectl apply -f operator-bundle.yaml Deploy Apache SkyWalking And Apache SkyWalking-Satellite We have provided a simple script to deploy the skywalking OAP, UI, and Satellite.\n# Create the skywalking components namespace kubectl create namespace skywalking-system kubectl label namespace skywalking-system swck-injection=enabled # Deploy components kubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/sw-components.yaml Deploy Bookinfo Application export ISTIO_VERSION=1.12.0 kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/platform/kube/bookinfo.yaml kubectl wait --for=condition=Ready pods --all --timeout=1200s kubectl port-forward service/productpage 9080 Next, please open your browser and visit http://localhost:9080. You should be able to see the Bookinfo application. Refresh the webpage several times to generate enough access logs.\nThen, you can see the topology and metrics of the Bookinfo application on SkyWalking WebUI. At this time, you can see that the Satellite is working!\nDeploy Monitor We need to install OpenTelemetry Collector to collect metrics in OAPs and analyze them.\n# Add OTEL collector kubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/otel-collector-oap.yaml kubectl port-forward -n skywalking-system service/skywalking-system-ui 8080:80 Next, please open your browser and visit http://localhost:8080/ and create a new item on the dashboard. The SkyWalking Web UI pictured below shows how the data content is applied.\nScaling OAP Scaling the number of OAPs by deployment.\nkubectl scale --replicas=3 -n skywalking-system deployment/skywalking-system-oap Done! After a period of time, you will see that the number of OAPs becomes 3, and the ALS traffic is balanced to each OAP.\nSatellite Scaling After we have completed the SkyWalking Scaling, we would carry out the Satellite Scaling demo.\nDeploy SWCK HPA SWCK provides an adapter to implement the Kubernetes external metrics to adapt the HPA through reading the metrics in SkyWalking OAP. We expose the metrics service in Satellite to OAP and configure HPA Resource to auto-scaling the Satellite.\nInstall the SWCK adapter into the Kubernetes environment:\nkubectl apply -f skywalking-swck/config/adapter-bundle.yaml Create the HPA resource, and limit each Satellite to handle a maximum of 10 connections:\nkubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/satellite-hpa.yaml Then, you could see we have 9 connections in one satellite. One envoy proxy may establish multiple connections to the satellite.\n$ kubectl get HorizontalPodAutoscaler -n skywalking-system NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE hpa-demo Deployment/skywalking-system-satellite 9/10 1 3 1 5m18s Scaling Application The scaling application could establish more connections to the satellite, to verify whether the HPA is in effect.\nkubectl scale --replicas=3 deployment/productpage-v1 deployment/details-v1 Done! By default, Satellite will deploy a single instance and a single instance will only accept 11 connections. HPA resources limit one Satellite to handle 10 connections and use a stabilization window to make Satellite stable scaling up. In this case, we deploy the Bookinfo application in 10+ instances after scaling, which means that 10+ connections will be established to the Satellite.\nSo after HPA resources are running, the Satellite would be automatically scaled up to 2 instances. You can learn about the calculation algorithm of replicas through the official documentation. Run the following command to view the running status:\n$ kubectl get HorizontalPodAutoscaler -n skywalking-system --watch NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 1 3m31s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 1 4m20s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 2 4m38s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 2 5m8s hpa-demo Deployment/skywalking-system-satellite 6/10 1 3 2 5m23s By observing the “number of connections” metric, we would be able to see that when the number of connections of each gRPC exceeds 10 connections, then the satellite automatically scales through the HPA rule. As a result, the connection number is down to normal status (in this example, less than 10)\nswctl metrics linear --name satellite_service_grpc_connect_count --service-name satellite::satellite-service ","title":"Scaling with Apache SkyWalking","url":"/docs/main/v9.5.0/en/academy/scaling-with-apache-skywalking/"},{"content":"Scaling with Apache SkyWalking Background In the Apache SkyWalking ecosystem, the OAP obtains metrics, traces, logs, and event data through SkyWalking Agent, Envoy, or other data sources. Under the gRPC protocol, it transmits data by communicating with a single server node. Only when the connection is broken, the reconnecting policy would be used based on DNS round-robin mode. When new services are added at runtime or the OAP load is kept high due to increased traffic of observed services, the OAP cluster needs to scale out for increased traffic. The load of the new OAP node would be less due to all existing agents having connected to previous nodes. Even without scaling, the load of OAP nodes would be unbalanced, because the agent would keep the connection due to random policy at the booting stage. In these cases, it would become a challenge to keep up the health status of all nodes, and be able to scale out when needed.\nIn this article, we mainly discuss how to solve this challenge in SkyWalking.\nHow to Load Balance SkyWalking mainly uses the gRPC protocol for data transmission, so this article mainly introduces load balancing in the gRPC protocol.\nProxy Or Client-side Based on the gRPC official Load Balancing blog, there are two approaches to load balancing:\n Client-side: The client perceives multiple back-end services and uses a load-balancing algorithm to select a back-end service for each RPC. Proxy: The client sends the message to the proxy server, and the proxy server load balances the message to the back-end service.  From the perspective of observability system architecture:\n    Pros Cons     Client-side High performance because of the elimination of extra hop Complex client (cluster awareness, load balancing, health check, etc.)Ensure each data source to be connected provides complex client capabilities   Proxy Simple Client Higher latency    We choose Proxy mode for the following reasons:\n Observable data is not very time-sensitive, a little latency caused by transmission is acceptable. A little extra hop is acceptable and there is no impact on the client-side. As an observability platform, we cannot/should not ask clients to change. They make their own tech decisions and may have their own commercial considerations.  Transmission Policy In the proxy mode, we should determine the transmission path between downstream and upstream.\nDifferent data protocols require different processing policies. There are two transmission policies:\n Synchronous: Suitable for protocols that require data exchange in the client, such as SkyWalking Dynamic Configuration Service. This type of protocol provides real-time results. Asynchronous batch: Used when the client doesn’t care about the upstream processing results, but only the transmitted data (e.g., trace report, log report, etc.)  The synchronization policy requires that the proxy send the message to the upstream server when receiving the client message, and synchronously return the response data to the downstream client. Usually, only a few protocols need to use the synchronization policy.\nAs shown below, after the client sends the request to the Proxy, the proxy would send the message to the server synchronously. When the proxy receives the result, it returns to the client.\nThe asynchronous batch policy means that the data is sent to the upstream server in batches asynchronously. This policy is more common because most protocols in SkyWalking are primarily based on data reporting. We think using the queue as a buffer could have a good effect. The asynchronous batch policy is executed according to the following steps:\n The proxy receives the data and wraps it as an Event object. An event is added into the queue. When the cycle time is reached or when the queue elements reach the fixed number, the elements in the queue will parallel consume and send to the OAP.  The advantage of using queues is:\n Separate data receiving and sending to reduce the mutual influence. The interval quantization mechanism can be used to combine events, which helps to speed up sending events to the OAP. Using multi-threaded consumption queue events can make fuller use of network IO.  As shown below, after the proxy receives the message, the proxy would wrap the message as an event and push it to the queue. The message sender would take batch events from the queue and send them to the upstream OAP.\nRouting Routing algorithms are used to route messages to a single upstream server node.\nThe Round-Robin algorithm selects nodes in order from the list of upstream service nodes. The advantage of this algorithm is that the number of times each node is selected is average. When the size of the data is close to the same, each upstream node can handle the same quantity of data content.\nWith the Weight Round-Robin, each upstream server node has a corresponding routing weight ratio. The difference from Round-Robin is that each upstream node has more chances to be routed according to its weight. This algorithm is more suitable to use when the upstream server node machine configuration is not the same.\nThe Fixed algorithm is a hybrid algorithm. It can ensure that the same data is routed to the same upstream server node, and when the upstream server scales out, it still maintains routing to the same node; unless the upstream node does not exist, it will reroute. This algorithm is mainly used in the SkyWalking Meter protocol because this protocol needs to ensure that the metrics of the same service instance are sent to the same OAP node. The Routing steps are as follows:\n Generate a unique identification string based on the data content, as short as possible. The amount of data is controllable. Get the upstream node of identity from LRU Cache, and use it if it exists. According to the identification, generate the corresponding hash value, and find the upstream server node from the upstream list. Save the mapping relationship between the upstream server node and identification to LRU Cache.  The advantage of this algorithm is to bind the data with the upstream server node as much as possible, so the upstream server can better process continuous data. The disadvantage is that it takes up a certain amount of memory space to save the corresponding relationship.\nAs shown below, the image is divided into two parts:\n The left side represents that the same data content always is routed to the same server node. The right side represents the data routing algorithm. Get the number from the data, and use the remainder algorithm to obtain the position.  We choose to use a combination of Round-Robin and Fixed algorithm for routing:\n The Fixed routing algorithm is suitable for specific protocols, mainly used when passing metrics data to the SkyWalking Meter protocol The Round-Robin algorithm is used by default. When the SkyWalking OAP cluster is deployed, the configuration of the nodes needs to be as much the same as possible, so there would be no need to use the Weight Round-Robin algorithm.  How to balance the load balancer itself? Proxy still needs to deal with the load balancing problem from client to itself, especially when deploying a Proxy cluster in a production environment.\nThere are three ways to solve this problem:\n Connection management: Use the max_connection config on the client-side to specify the maximum connection duration of each connection. For more information, please read the proposal. Cluster awareness: The proxy has cluster awareness, and actively disconnects the connection when the load is unbalanced to allow the client to re-pick up the proxy. Resource limit+HPA: Restrict the connection resource situation of each proxy, and no longer accept new connections when the resource limit is reached. And use the HPA mechanism of Kubernetes to dynamically scale out the number of the proxy.      Connection management Cluster awareness Resource Limit+HPA     Pros Simple to use Ensure that the number of connections in each proxy is relatively  Simple to use   Cons Each client needs to ensure that data is not lostThe client is required to accept GOWAY responses May cause a sudden increase in traffic on some nodesEach client needs to ensure that data is not lost  Traffic will not be particularly balanced in each instance    We choose Limit+HPA for these reasons:\n Easy to config and use the proxy and easy to understand based on basic data metrics. No data loss due to broken connection. There is no need for the client to implement any other protocols to prevent data loss, especially when the client is a commercial product. The connection of each node in the proxy cluster does not need to be particularly balanced, as long as the proxy node itself is high-performance.  SkyWalking-Satellite We have implemented this Proxy in the SkyWalking-Satellite project. It’s used between Client and SkyWalking OAP, effectively solving the load balancing problem.\nAfter the system is deployed, the Satellite would accept the traffic from the Client, and the Satellite will perceive all the nodes of the OAP through Kubernetes Label Selector or manual configuration, and load balance the traffic to the upstream OAP node.\nAs shown below, a single client still maintains a connection with a single Satellite, Satellite would establish the connection with each OAP, and load balance message to the OAP node.\nWhen scaling Satellite, we need to deploy the SWCK adapter and configure the HPA in Kubernetes. SWCK is a platform for the SkyWalking users, provisions, upgrades, maintains SkyWalking relevant components, and makes them work natively on Kubernetes.\nAfter deployment is finished, the following steps would be performed:\n Read metrics from OAP: HPA requests the SWCK metrics adapter to dynamically read the metrics in the OAP. Scaling the Satellite: Kubernetes HPA senses that the metrics values are in line with expectations, so the Satellite would be scaling automatically.  As shown below, use the dotted line to divide the two parts. HPA uses SWCK Adapter to read the metrics in the OAP. When the threshold is met, HPA would scale the Satellite deployment.\nExample In this section, we will demonstrate two cases:\n SkyWalking Scaling: After SkyWalking OAP scaling, the traffic would auto load balancing through Satellite. Satellite Scaling: Satellite’s own traffic load balancing.  NOTE: All commands could be accessed through GitHub.\nSkyWalking Scaling We will use the bookinfo application to demonstrate how to integrate Apache SkyWalking 8.9.1 with Apache SkyWalking-Satellite 0.5.0, and observe the service mesh through the Envoy ALS protocol.\nBefore starting, please make sure that you already have a Kubernetes environment.\nInstall Istio Istio provides a very convenient way to configure the Envoy proxy and enable the access log service. The following step:\n Install the istioctl locally to help manage the Istio mesh. Install Istio into the Kubernetes environment with a demo configuration profile, and enable the Envoy ALS. Transmit the ALS message to the satellite. The satellite we will deploy later. Add the label into the default namespace so Istio could automatically inject Envoy sidecar proxies when you deploy your application later.  # install istioctl export ISTIO_VERSION=1.12.0 curl -L https://istio.io/downloadIstio | sh - sudo mv $PWD/istio-$ISTIO_VERSION/bin/istioctl /usr/local/bin/ # install istio istioctl install -y --set profile=demo \\ \t--set meshConfig.enableEnvoyAccessLogService=true \\ \t--set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-system-satellite.skywalking-system:11800 # enbale envoy proxy in default namespace kubectl label namespace default istio-injection=enabled Install SWCK SWCK provides convenience for users to deploy and upgrade SkyWalking related components based on Kubernetes. The automatic scale function of Satellite also mainly relies on SWCK. For more information, you could refer to the official documentation.\n# Install cert-manager kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.3.1/cert-manager.yaml # Deploy SWCK mkdir -p skywalking-swck \u0026amp;\u0026amp; cd skywalking-swck wget https://dlcdn.apache.org/skywalking/swck/0.6.1/skywalking-swck-0.6.1-bin.tgz tar -zxvf skywalking-swck-0.6.1-bin.tgz cd config kubectl apply -f operator-bundle.yaml Deploy Apache SkyWalking And Apache SkyWalking-Satellite We have provided a simple script to deploy the skywalking OAP, UI, and Satellite.\n# Create the skywalking components namespace kubectl create namespace skywalking-system kubectl label namespace skywalking-system swck-injection=enabled # Deploy components kubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/sw-components.yaml Deploy Bookinfo Application export ISTIO_VERSION=1.12.0 kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/platform/kube/bookinfo.yaml kubectl wait --for=condition=Ready pods --all --timeout=1200s kubectl port-forward service/productpage 9080 Next, please open your browser and visit http://localhost:9080. You should be able to see the Bookinfo application. Refresh the webpage several times to generate enough access logs.\nThen, you can see the topology and metrics of the Bookinfo application on SkyWalking WebUI. At this time, you can see that the Satellite is working!\nDeploy Monitor We need to install OpenTelemetry Collector to collect metrics in OAPs and analyze them.\n# Add OTEL collector kubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/otel-collector-oap.yaml kubectl port-forward -n skywalking-system service/skywalking-system-ui 8080:80 Next, please open your browser and visit http://localhost:8080/ and create a new item on the dashboard. The SkyWalking Web UI pictured below shows how the data content is applied.\nScaling OAP Scaling the number of OAPs by deployment.\nkubectl scale --replicas=3 -n skywalking-system deployment/skywalking-system-oap Done! After a period of time, you will see that the number of OAPs becomes 3, and the ALS traffic is balanced to each OAP.\nSatellite Scaling After we have completed the SkyWalking Scaling, we would carry out the Satellite Scaling demo.\nDeploy SWCK HPA SWCK provides an adapter to implement the Kubernetes external metrics to adapt the HPA through reading the metrics in SkyWalking OAP. We expose the metrics service in Satellite to OAP and configure HPA Resource to auto-scaling the Satellite.\nInstall the SWCK adapter into the Kubernetes environment:\nkubectl apply -f skywalking-swck/config/adapter-bundle.yaml Create the HPA resource, and limit each Satellite to handle a maximum of 10 connections:\nkubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/satellite-hpa.yaml Then, you could see we have 9 connections in one satellite. One envoy proxy may establish multiple connections to the satellite.\n$ kubectl get HorizontalPodAutoscaler -n skywalking-system NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE hpa-demo Deployment/skywalking-system-satellite 9/10 1 3 1 5m18s Scaling Application The scaling application could establish more connections to the satellite, to verify whether the HPA is in effect.\nkubectl scale --replicas=3 deployment/productpage-v1 deployment/details-v1 Done! By default, Satellite will deploy a single instance and a single instance will only accept 11 connections. HPA resources limit one Satellite to handle 10 connections and use a stabilization window to make Satellite stable scaling up. In this case, we deploy the Bookinfo application in 10+ instances after scaling, which means that 10+ connections will be established to the Satellite.\nSo after HPA resources are running, the Satellite would be automatically scaled up to 2 instances. You can learn about the calculation algorithm of replicas through the official documentation. Run the following command to view the running status:\n$ kubectl get HorizontalPodAutoscaler -n skywalking-system --watch NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 1 3m31s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 1 4m20s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 2 4m38s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 2 5m8s hpa-demo Deployment/skywalking-system-satellite 6/10 1 3 2 5m23s By observing the “number of connections” metric, we would be able to see that when the number of connections of each gRPC exceeds 10 connections, then the satellite automatically scales through the HPA rule. As a result, the connection number is down to normal status (in this example, less than 10)\nswctl metrics linear --name satellite_service_grpc_connect_count --service-name satellite::satellite-service ","title":"Scaling with Apache SkyWalking","url":"/docs/main/v9.6.0/en/academy/scaling-with-apache-skywalking/"},{"content":"Scaling with Apache SkyWalking Background In the Apache SkyWalking ecosystem, the OAP obtains metrics, traces, logs, and event data through SkyWalking Agent, Envoy, or other data sources. Under the gRPC protocol, it transmits data by communicating with a single server node. Only when the connection is broken, the reconnecting policy would be used based on DNS round-robin mode. When new services are added at runtime or the OAP load is kept high due to increased traffic of observed services, the OAP cluster needs to scale out for increased traffic. The load of the new OAP node would be less due to all existing agents having connected to previous nodes. Even without scaling, the load of OAP nodes would be unbalanced, because the agent would keep the connection due to random policy at the booting stage. In these cases, it would become a challenge to keep up the health status of all nodes, and be able to scale out when needed.\nIn this article, we mainly discuss how to solve this challenge in SkyWalking.\nHow to Load Balance SkyWalking mainly uses the gRPC protocol for data transmission, so this article mainly introduces load balancing in the gRPC protocol.\nProxy Or Client-side Based on the gRPC official Load Balancing blog, there are two approaches to load balancing:\n Client-side: The client perceives multiple back-end services and uses a load-balancing algorithm to select a back-end service for each RPC. Proxy: The client sends the message to the proxy server, and the proxy server load balances the message to the back-end service.  From the perspective of observability system architecture:\n    Pros Cons     Client-side High performance because of the elimination of extra hop Complex client (cluster awareness, load balancing, health check, etc.)Ensure each data source to be connected provides complex client capabilities   Proxy Simple Client Higher latency    We choose Proxy mode for the following reasons:\n Observable data is not very time-sensitive, a little latency caused by transmission is acceptable. A little extra hop is acceptable and there is no impact on the client-side. As an observability platform, we cannot/should not ask clients to change. They make their own tech decisions and may have their own commercial considerations.  Transmission Policy In the proxy mode, we should determine the transmission path between downstream and upstream.\nDifferent data protocols require different processing policies. There are two transmission policies:\n Synchronous: Suitable for protocols that require data exchange in the client, such as SkyWalking Dynamic Configuration Service. This type of protocol provides real-time results. Asynchronous batch: Used when the client doesn’t care about the upstream processing results, but only the transmitted data (e.g., trace report, log report, etc.)  The synchronization policy requires that the proxy send the message to the upstream server when receiving the client message, and synchronously return the response data to the downstream client. Usually, only a few protocols need to use the synchronization policy.\nAs shown below, after the client sends the request to the Proxy, the proxy would send the message to the server synchronously. When the proxy receives the result, it returns to the client.\nThe asynchronous batch policy means that the data is sent to the upstream server in batches asynchronously. This policy is more common because most protocols in SkyWalking are primarily based on data reporting. We think using the queue as a buffer could have a good effect. The asynchronous batch policy is executed according to the following steps:\n The proxy receives the data and wraps it as an Event object. An event is added into the queue. When the cycle time is reached or when the queue elements reach the fixed number, the elements in the queue will parallel consume and send to the OAP.  The advantage of using queues is:\n Separate data receiving and sending to reduce the mutual influence. The interval quantization mechanism can be used to combine events, which helps to speed up sending events to the OAP. Using multi-threaded consumption queue events can make fuller use of network IO.  As shown below, after the proxy receives the message, the proxy would wrap the message as an event and push it to the queue. The message sender would take batch events from the queue and send them to the upstream OAP.\nRouting Routing algorithms are used to route messages to a single upstream server node.\nThe Round-Robin algorithm selects nodes in order from the list of upstream service nodes. The advantage of this algorithm is that the number of times each node is selected is average. When the size of the data is close to the same, each upstream node can handle the same quantity of data content.\nWith the Weight Round-Robin, each upstream server node has a corresponding routing weight ratio. The difference from Round-Robin is that each upstream node has more chances to be routed according to its weight. This algorithm is more suitable to use when the upstream server node machine configuration is not the same.\nThe Fixed algorithm is a hybrid algorithm. It can ensure that the same data is routed to the same upstream server node, and when the upstream server scales out, it still maintains routing to the same node; unless the upstream node does not exist, it will reroute. This algorithm is mainly used in the SkyWalking Meter protocol because this protocol needs to ensure that the metrics of the same service instance are sent to the same OAP node. The Routing steps are as follows:\n Generate a unique identification string based on the data content, as short as possible. The amount of data is controllable. Get the upstream node of identity from LRU Cache, and use it if it exists. According to the identification, generate the corresponding hash value, and find the upstream server node from the upstream list. Save the mapping relationship between the upstream server node and identification to LRU Cache.  The advantage of this algorithm is to bind the data with the upstream server node as much as possible, so the upstream server can better process continuous data. The disadvantage is that it takes up a certain amount of memory space to save the corresponding relationship.\nAs shown below, the image is divided into two parts:\n The left side represents that the same data content always is routed to the same server node. The right side represents the data routing algorithm. Get the number from the data, and use the remainder algorithm to obtain the position.  We choose to use a combination of Round-Robin and Fixed algorithm for routing:\n The Fixed routing algorithm is suitable for specific protocols, mainly used when passing metrics data to the SkyWalking Meter protocol The Round-Robin algorithm is used by default. When the SkyWalking OAP cluster is deployed, the configuration of the nodes needs to be as much the same as possible, so there would be no need to use the Weight Round-Robin algorithm.  How to balance the load balancer itself? Proxy still needs to deal with the load balancing problem from client to itself, especially when deploying a Proxy cluster in a production environment.\nThere are three ways to solve this problem:\n Connection management: Use the max_connection config on the client-side to specify the maximum connection duration of each connection. For more information, please read the proposal. Cluster awareness: The proxy has cluster awareness, and actively disconnects the connection when the load is unbalanced to allow the client to re-pick up the proxy. Resource limit+HPA: Restrict the connection resource situation of each proxy, and no longer accept new connections when the resource limit is reached. And use the HPA mechanism of Kubernetes to dynamically scale out the number of the proxy.      Connection management Cluster awareness Resource Limit+HPA     Pros Simple to use Ensure that the number of connections in each proxy is relatively  Simple to use   Cons Each client needs to ensure that data is not lostThe client is required to accept GOWAY responses May cause a sudden increase in traffic on some nodesEach client needs to ensure that data is not lost  Traffic will not be particularly balanced in each instance    We choose Limit+HPA for these reasons:\n Easy to config and use the proxy and easy to understand based on basic data metrics. No data loss due to broken connection. There is no need for the client to implement any other protocols to prevent data loss, especially when the client is a commercial product. The connection of each node in the proxy cluster does not need to be particularly balanced, as long as the proxy node itself is high-performance.  SkyWalking-Satellite We have implemented this Proxy in the SkyWalking-Satellite project. It’s used between Client and SkyWalking OAP, effectively solving the load balancing problem.\nAfter the system is deployed, the Satellite would accept the traffic from the Client, and the Satellite will perceive all the nodes of the OAP through Kubernetes Label Selector or manual configuration, and load balance the traffic to the upstream OAP node.\nAs shown below, a single client still maintains a connection with a single Satellite, Satellite would establish the connection with each OAP, and load balance message to the OAP node.\nWhen scaling Satellite, we need to deploy the SWCK adapter and configure the HPA in Kubernetes. SWCK is a platform for the SkyWalking users, provisions, upgrades, maintains SkyWalking relevant components, and makes them work natively on Kubernetes.\nAfter deployment is finished, the following steps would be performed:\n Read metrics from OAP: HPA requests the SWCK metrics adapter to dynamically read the metrics in the OAP. Scaling the Satellite: Kubernetes HPA senses that the metrics values are in line with expectations, so the Satellite would be scaling automatically.  As shown below, use the dotted line to divide the two parts. HPA uses SWCK Adapter to read the metrics in the OAP. When the threshold is met, HPA would scale the Satellite deployment.\nExample In this section, we will demonstrate two cases:\n SkyWalking Scaling: After SkyWalking OAP scaling, the traffic would auto load balancing through Satellite. Satellite Scaling: Satellite’s own traffic load balancing.  NOTE: All commands could be accessed through GitHub.\nSkyWalking Scaling We will use the bookinfo application to demonstrate how to integrate Apache SkyWalking 8.9.1 with Apache SkyWalking-Satellite 0.5.0, and observe the service mesh through the Envoy ALS protocol.\nBefore starting, please make sure that you already have a Kubernetes environment.\nInstall Istio Istio provides a very convenient way to configure the Envoy proxy and enable the access log service. The following step:\n Install the istioctl locally to help manage the Istio mesh. Install Istio into the Kubernetes environment with a demo configuration profile, and enable the Envoy ALS. Transmit the ALS message to the satellite. The satellite we will deploy later. Add the label into the default namespace so Istio could automatically inject Envoy sidecar proxies when you deploy your application later.  # install istioctl export ISTIO_VERSION=1.12.0 curl -L https://istio.io/downloadIstio | sh - sudo mv $PWD/istio-$ISTIO_VERSION/bin/istioctl /usr/local/bin/ # install istio istioctl install -y --set profile=demo \\ \t--set meshConfig.enableEnvoyAccessLogService=true \\ \t--set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-system-satellite.skywalking-system:11800 # enbale envoy proxy in default namespace kubectl label namespace default istio-injection=enabled Install SWCK SWCK provides convenience for users to deploy and upgrade SkyWalking related components based on Kubernetes. The automatic scale function of Satellite also mainly relies on SWCK. For more information, you could refer to the official documentation.\n# Install cert-manager kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.3.1/cert-manager.yaml # Deploy SWCK mkdir -p skywalking-swck \u0026amp;\u0026amp; cd skywalking-swck wget https://dlcdn.apache.org/skywalking/swck/0.6.1/skywalking-swck-0.6.1-bin.tgz tar -zxvf skywalking-swck-0.6.1-bin.tgz cd config kubectl apply -f operator-bundle.yaml Deploy Apache SkyWalking And Apache SkyWalking-Satellite We have provided a simple script to deploy the skywalking OAP, UI, and Satellite.\n# Create the skywalking components namespace kubectl create namespace skywalking-system kubectl label namespace skywalking-system swck-injection=enabled # Deploy components kubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/sw-components.yaml Deploy Bookinfo Application export ISTIO_VERSION=1.12.0 kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/platform/kube/bookinfo.yaml kubectl wait --for=condition=Ready pods --all --timeout=1200s kubectl port-forward service/productpage 9080 Next, please open your browser and visit http://localhost:9080. You should be able to see the Bookinfo application. Refresh the webpage several times to generate enough access logs.\nThen, you can see the topology and metrics of the Bookinfo application on SkyWalking WebUI. At this time, you can see that the Satellite is working!\nDeploy Monitor We need to install OpenTelemetry Collector to collect metrics in OAPs and analyze them.\n# Add OTEL collector kubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/otel-collector-oap.yaml kubectl port-forward -n skywalking-system service/skywalking-system-ui 8080:80 Next, please open your browser and visit http://localhost:8080/ and create a new item on the dashboard. The SkyWalking Web UI pictured below shows how the data content is applied.\nScaling OAP Scaling the number of OAPs by deployment.\nkubectl scale --replicas=3 -n skywalking-system deployment/skywalking-system-oap Done! After a period of time, you will see that the number of OAPs becomes 3, and the ALS traffic is balanced to each OAP.\nSatellite Scaling After we have completed the SkyWalking Scaling, we would carry out the Satellite Scaling demo.\nDeploy SWCK HPA SWCK provides an adapter to implement the Kubernetes external metrics to adapt the HPA through reading the metrics in SkyWalking OAP. We expose the metrics service in Satellite to OAP and configure HPA Resource to auto-scaling the Satellite.\nInstall the SWCK adapter into the Kubernetes environment:\nkubectl apply -f skywalking-swck/config/adapter-bundle.yaml Create the HPA resource, and limit each Satellite to handle a maximum of 10 connections:\nkubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/satellite-hpa.yaml Then, you could see we have 9 connections in one satellite. One envoy proxy may establish multiple connections to the satellite.\n$ kubectl get HorizontalPodAutoscaler -n skywalking-system NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE hpa-demo Deployment/skywalking-system-satellite 9/10 1 3 1 5m18s Scaling Application The scaling application could establish more connections to the satellite, to verify whether the HPA is in effect.\nkubectl scale --replicas=3 deployment/productpage-v1 deployment/details-v1 Done! By default, Satellite will deploy a single instance and a single instance will only accept 11 connections. HPA resources limit one Satellite to handle 10 connections and use a stabilization window to make Satellite stable scaling up. In this case, we deploy the Bookinfo application in 10+ instances after scaling, which means that 10+ connections will be established to the Satellite.\nSo after HPA resources are running, the Satellite would be automatically scaled up to 2 instances. You can learn about the calculation algorithm of replicas through the official documentation. Run the following command to view the running status:\n$ kubectl get HorizontalPodAutoscaler -n skywalking-system --watch NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 1 3m31s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 1 4m20s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 2 4m38s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 2 5m8s hpa-demo Deployment/skywalking-system-satellite 6/10 1 3 2 5m23s By observing the “number of connections” metric, we would be able to see that when the number of connections of each gRPC exceeds 10 connections, then the satellite automatically scales through the HPA rule. As a result, the connection number is down to normal status (in this example, less than 10)\nswctl metrics linear --name satellite_service_grpc_connect_count --service-name satellite::satellite-service ","title":"Scaling with Apache SkyWalking","url":"/docs/main/v9.7.0/en/academy/scaling-with-apache-skywalking/"},{"content":"Scopes and Fields Using the Aggregation Function, the requests will be grouped by time and Group Key(s) in each scope.\nSCOPE Service This calculates the metrics data from each request of the service.\n   Name Remarks Group Key Type     name The name of the service.  string   layer Layer represents an abstract framework in the computer science, such as operation system(OS_LINUX layer), Kubernetes(k8s layer)  enum   serviceInstanceName The name of the service instance ID.  string   endpointName The name of the endpoint, such as a full path of HTTP URI.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request. Such as: Database, HTTP, RPC, gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPService This calculates the metrics data from each request of the TCP service.\n   Name Remarks Group Key Type     name The name of the service.  string   layer Layer represents an abstract framework in the computer science, such as operation system(OS_LINUX layer), Kubernetes(k8s layer)  enum   serviceInstanceName The name of the service instance ID.  string   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    SCOPE ServiceInstance This calculates the metrics data from each request of the service instance.\n   Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   endpointName The name of the endpoint, such as a full path of the HTTP URI.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPServiceInstance This calculates the metrics data from each request of the service instance.\n   Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    Secondary scopes of ServiceInstance This calculates the metrics data if the service instance is a JVM and collects through javaagent.\n SCOPE ServiceInstanceJVMCPU     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   usePercent The percentage of CPU time spent.  double    SCOPE ServiceInstanceJVMMemory     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   heapStatus Indicates whether the metric has a heap property or not.  bool   init See the JVM documentation.  long   max See the JVM documentation.  long   used See the JVM documentation.  long   committed See the JVM documentation.  long    SCOPE ServiceInstanceJVMMemoryPool     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   poolType The type may be CODE_CACHE_USAGE, NEWGEN_USAGE, OLDGEN_USAGE, SURVIVOR_USAGE, PERMGEN_USAGE, or METASPACE_USAGE based on different versions of JVM.  enum   init See the JVM documentation.  long   max See the JVM documentation.  long   used See the JVM documentation.  long   committed See the JVM documentation.  long    SCOPE ServiceInstanceJVMGC     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   phase Includes both NEW and OLD.  Enum   time The time spent in GC.  long   count The count in GC operations.  long    SCOPE ServiceInstanceJVMThread     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   liveCount The current number of live threads.  long   daemonCount The current number of daemon threads.  long   peakCount The current number of peak threads.  long   runnableStateThreadCount The current number of threads in runnable state.  long   blockedStateThreadCount The current number of threads in blocked state.  long   waitingStateThreadCount The current number of threads in waiting state.  long   timedWaitingStateThreadCount The current number of threads in time-waiting state.  long    SCOPE ServiceInstanceJVMClass     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   loadedClassCount The number of classes that are currently loaded in the JVM.  long   totalUnloadedClassCount The total number of classes unloaded since the JVM has started execution.  long   totalLoadedClassCount The total number of classes that have been loaded since the JVM has started execution.  long    SCOPE Endpoint This calculates the metrics data from each request of the endpoint in the service.\n   Name Remarks Group Key Type     name The name of the endpoint, such as a full path of the HTTP URI.  string   serviceName The name of the service.  string   serviceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  enum   serviceInstanceName The name of the service instance ID.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE ServiceRelation This calculates the metrics data from each request between services.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceLayer The layer of the source service.  enum   destServiceName The name of the destination service.  string   destServiceInstanceName The name of the destination service instance.  string   destLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination services, such as service_relation_mtls_cpm = from(ServiceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPServiceRelation This calculates the metrics data from each request between services.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceLayer The layer of the source service.  enum   destServiceName The name of the destination service.  string   destServiceInstanceName The name of the destination service instance.  string   destLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination services, such as service_relation_mtls_cpm = from(ServiceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    SCOPE ServiceInstanceRelation This calculates the metrics data from each request between service instances.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceServiceLayer The layer of the source service.  enum   destServiceName The name of the destination service.     destServiceInstanceName The name of the destination service instance.  string   destServiceLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of the component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination service instances, such as service_instance_relation_mtls_cpm = from(ServiceInstanceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPServiceInstanceRelation This calculates the metrics data from each request between service instances.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceServiceLayer The layer of the source service.  enum   destServiceName The name of the destination service.     destServiceInstanceName The name of the destination service instance.  string   destServiceLayer The layer of the destination service.  enum   componentId The ID of the component used in this call. yes string   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination service instances, such as service_instance_relation_mtls_cpm = from(ServiceInstanceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    SCOPE EndpointRelation This calculates the metrics data of the dependency between endpoints. This relation is hard to detect, and it depends on the tracing library to propagate the previous endpoint. Therefore, the EndpointRelation scope aggregation comes into effect only in services under tracing by SkyWalking native agents, including auto instrument agents (like Java and .NET), or other tracing context propagation in SkyWalking specification.\n   Name Remarks Group Key Type     endpoint The parent endpoint in the dependency.  string   serviceName The name of the service.  string   serviceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  enum   childEndpoint The endpoint used by the parent endpoint in row(1).  string   childServiceName The endpoint used by the parent service in row(1).  string   childServiceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  string   childServiceInstanceName The endpoint used by the parent service instance in row(1).  string   rpcLatency The latency of the RPC between the parent endpoint and childEndpoint, excluding the latency caused by the parent endpoint itself.     componentId The ID of the component used in this call. yes string   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Indicates where the relation is detected. The value may be client, server, or proxy. yes enum    SCOPE BrowserAppTraffic This calculates the metrics data from each request of the browser application (browser only).\n   Name Remarks Group Key Type     name The browser application name of each request.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppSingleVersionTraffic This calculates the metrics data from each request of a single version in the browser application (browser only).\n   Name Remarks Group Key Type     name The single version name of each request.  string   serviceName The name of the browser application.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppPageTraffic This calculates the metrics data from each request of the page in the browser application (browser only).\n   Name Remarks Group Key Type     name The page name of each request.  string   serviceName The name of the browser application.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppPagePerf This calculates the metrics data from each request of the page in the browser application (browser only).\n   Name Remarks Group Key Type     name The page name of each request.  string   serviceName The name of the browser application.  string   redirectTime The time taken to redirect.  int(in ms)   dnsTime The DNS query time.  int(in ms)   ttfbTime Time to first byte.  int(in ms)   tcpTime TCP connection time.  int(in ms)   transTime Content transfer time.  int(in ms)   domAnalysisTime Dom parsing time.  int(in ms)   fptTime First paint time or blank screen time.  int(in ms)   domReadyTime Dom ready time.  int(in ms)   loadPageTime Page full load time.  int(in ms)   resTime Synchronous load resources in the page.  int(in ms)   sslTime Only valid for HTTPS.  int(in ms)   ttlTime Time to interact.  int(in ms)   firstPackTime First pack time.  int(in ms)   fmpTime First Meaningful Paint.  int(in ms)    SCOPE Event This calculates the metrics data from events.\n   Name Remarks Group Key Type     name The name of the event.  string   service The service name to which the event belongs to.  string   serviceInstance The service instance to which the event belongs to, if any.  string   endpoint The service endpoint to which the event belongs to, if any.  string   type The type of the event, Normal or Error.  string   message The message of the event.  string   parameters The parameters in the message, see parameters.  string    SCOPE DatabaseAccess This calculates the metrics data from each request of database.\n   Name Remarks Group Key Type     name The service name of virtual database service.  string   databaseTypeId The ID of the component used in this call.  int   latency The time taken by each request.  int(in ms)   status Indicates the success or failure of the request.  boolean    SCOPE DatabaseSlowStatement This calculates the metrics data from slow request of database.\n   Name Remarks Group Key Type     databaseServiceId The service id of virtual cache service.  string   statement The sql statement .  string   latency The time taken by each request.  int(in ms)   traceId The traceId of this slow statement  string    SCOPE CacheAccess This calculates the metrics data from each request of cache system.\n   Name Remarks Group Key Type     name The service name of virtual cache service.  string   cacheTypeId The ID of the component used in this call.  int   latency The time taken by each request.  int(in ms)   status Indicates the success or failure of the request.  boolean   operation Indicates this access is used for write or read  string    SCOPE CacheSlowAccess This calculates the metrics data from slow request of cache system , which is used for write or read operation.\n   Name Remarks Group Key Type     cacheServiceId The service id of virtual cache service.  string   command The cache command .  string   key The cache command key.  string   latency The time taken by each request.  int(in ms)   traceId The traceId of this slow access  string   status Indicates the success or failure of the request.  boolean   operation Indicates this access is used for write or read  string    SCOPE MQAccess This calculates the service dimensional metrics data from each request of MQ system on consume/produce side\n   Name Remarks Group Key Type     name The service name , usually it\u0026rsquo;s MQ address(es)  string   transmissionLatency The latency from produce side to consume side .  int(in ms)   status Indicates the success or failure of the request.  boolean   operation Indicates this access is on Produce or Consume side  enum    SCOPE MQEndpointAccess This calculates the endpoint dimensional metrics data from each request of MQ system on consume/produce side\n   Name Remarks Group Key Type     serviceName The service name that this endpoint belongs to.  string   endpoint The endpoint name , usually it\u0026rsquo;s combined by queue,topic  string   transmissionLatency The latency from produce side to consume side .  int(in ms)   status Indicates the success or failure of the request.  boolean   operation Indicates this access is on Produce or Consume side  enum    ","title":"Scopes and Fields","url":"/docs/main/latest/en/concepts-and-designs/scope-definitions/"},{"content":"Scopes and Fields Using the Aggregation Function, the requests will be grouped by time and Group Key(s) in each scope.\nSCOPE Service This calculates the metrics data from each request of the service.\n   Name Remarks Group Key Type     name The name of the service.  string   layer Layer represents an abstract framework in the computer science, such as operation system(OS_LINUX layer), Kubernetes(k8s layer)  enum   serviceInstanceName The name of the service instance ID.  string   endpointName The name of the endpoint, such as a full path of HTTP URI.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request. Such as: Database, HTTP, RPC, gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPService This calculates the metrics data from each request of the TCP service.\n   Name Remarks Group Key Type     name The name of the service.  string   layer Layer represents an abstract framework in the computer science, such as operation system(OS_LINUX layer), Kubernetes(k8s layer)  enum   serviceInstanceName The name of the service instance ID.  string   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    SCOPE ServiceInstance This calculates the metrics data from each request of the service instance.\n   Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   endpointName The name of the endpoint, such as a full path of the HTTP URI.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPServiceInstance This calculates the metrics data from each request of the service instance.\n   Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    Secondary scopes of ServiceInstance This calculates the metrics data if the service instance is a JVM and collects through javaagent.\n SCOPE ServiceInstanceJVMCPU     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   usePercent The percentage of CPU time spent.  double    SCOPE ServiceInstanceJVMMemory     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   heapStatus Indicates whether the metric has a heap property or not.  bool   init See the JVM documentation.  long   max See the JVM documentation.  long   used See the JVM documentation.  long   committed See the JVM documentation.  long    SCOPE ServiceInstanceJVMMemoryPool     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   poolType The type may be CODE_CACHE_USAGE, NEWGEN_USAGE, OLDGEN_USAGE, SURVIVOR_USAGE, PERMGEN_USAGE, or METASPACE_USAGE based on different versions of JVM.  enum   init See the JVM documentation.  long   max See the JVM documentation.  long   used See the JVM documentation.  long   committed See the JVM documentation.  long    SCOPE ServiceInstanceJVMGC     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   phase Includes both NEW and OLD.  Enum   time The time spent in GC.  long   count The count in GC operations.  long    SCOPE ServiceInstanceJVMThread     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   liveCount The current number of live threads.  long   daemonCount The current number of daemon threads.  long   peakCount The current number of peak threads.  long   runnableStateThreadCount The current number of threads in runnable state.  long   blockedStateThreadCount The current number of threads in blocked state.  long   waitingStateThreadCount The current number of threads in waiting state.  long   timedWaitingStateThreadCount The current number of threads in time-waiting state.  long    SCOPE ServiceInstanceJVMClass     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   loadedClassCount The number of classes that are currently loaded in the JVM.  long   totalUnloadedClassCount The total number of classes unloaded since the JVM has started execution.  long   totalLoadedClassCount The total number of classes that have been loaded since the JVM has started execution.  long    SCOPE Endpoint This calculates the metrics data from each request of the endpoint in the service.\n   Name Remarks Group Key Type     name The name of the endpoint, such as a full path of the HTTP URI.  string   serviceName The name of the service.  string   serviceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  enum   serviceInstanceName The name of the service instance ID.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE ServiceRelation This calculates the metrics data from each request between services.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceLayer The layer of the source service.  enum   destServiceName The name of the destination service.  string   destServiceInstanceName The name of the destination service instance.  string   destLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination services, such as service_relation_mtls_cpm = from(ServiceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPServiceRelation This calculates the metrics data from each request between services.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceLayer The layer of the source service.  enum   destServiceName The name of the destination service.  string   destServiceInstanceName The name of the destination service instance.  string   destLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination services, such as service_relation_mtls_cpm = from(ServiceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    SCOPE ServiceInstanceRelation This calculates the metrics data from each request between service instances.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceServiceLayer The layer of the source service.  enum   destServiceName The name of the destination service.     destServiceInstanceName The name of the destination service instance.  string   destServiceLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of the component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination service instances, such as service_instance_relation_mtls_cpm = from(ServiceInstanceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPServiceInstanceRelation This calculates the metrics data from each request between service instances.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceServiceLayer The layer of the source service.  enum   destServiceName The name of the destination service.     destServiceInstanceName The name of the destination service instance.  string   destServiceLayer The layer of the destination service.  enum   componentId The ID of the component used in this call. yes string   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination service instances, such as service_instance_relation_mtls_cpm = from(ServiceInstanceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    SCOPE EndpointRelation This calculates the metrics data of the dependency between endpoints. This relation is hard to detect, and it depends on the tracing library to propagate the previous endpoint. Therefore, the EndpointRelation scope aggregation comes into effect only in services under tracing by SkyWalking native agents, including auto instrument agents (like Java and .NET), or other tracing context propagation in SkyWalking specification.\n   Name Remarks Group Key Type     endpoint The parent endpoint in the dependency.  string   serviceName The name of the service.  string   serviceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  enum   childEndpoint The endpoint used by the parent endpoint in row(1).  string   childServiceName The endpoint used by the parent service in row(1).  string   childServiceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  string   childServiceInstanceName The endpoint used by the parent service instance in row(1).  string   rpcLatency The latency of the RPC between the parent endpoint and childEndpoint, excluding the latency caused by the parent endpoint itself.     componentId The ID of the component used in this call. yes string   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Indicates where the relation is detected. The value may be client, server, or proxy. yes enum    SCOPE BrowserAppTraffic This calculates the metrics data from each request of the browser application (browser only).\n   Name Remarks Group Key Type     name The browser application name of each request.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppSingleVersionTraffic This calculates the metrics data from each request of a single version in the browser application (browser only).\n   Name Remarks Group Key Type     name The single version name of each request.  string   serviceName The name of the browser application.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppPageTraffic This calculates the metrics data from each request of the page in the browser application (browser only).\n   Name Remarks Group Key Type     name The page name of each request.  string   serviceName The name of the browser application.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppPagePerf This calculates the metrics data from each request of the page in the browser application (browser only).\n   Name Remarks Group Key Type     name The page name of each request.  string   serviceName The name of the browser application.  string   redirectTime The time taken to redirect.  int(in ms)   dnsTime The DNS query time.  int(in ms)   ttfbTime Time to first byte.  int(in ms)   tcpTime TCP connection time.  int(in ms)   transTime Content transfer time.  int(in ms)   domAnalysisTime Dom parsing time.  int(in ms)   fptTime First paint time or blank screen time.  int(in ms)   domReadyTime Dom ready time.  int(in ms)   loadPageTime Page full load time.  int(in ms)   resTime Synchronous load resources in the page.  int(in ms)   sslTime Only valid for HTTPS.  int(in ms)   ttlTime Time to interact.  int(in ms)   firstPackTime First pack time.  int(in ms)   fmpTime First Meaningful Paint.  int(in ms)    SCOPE Event This calculates the metrics data from events.\n   Name Remarks Group Key Type     name The name of the event.  string   service The service name to which the event belongs to.  string   serviceInstance The service instance to which the event belongs to, if any.  string   endpoint The service endpoint to which the event belongs to, if any.  string   type The type of the event, Normal or Error.  string   message The message of the event.  string   parameters The parameters in the message, see parameters.  string    SCOPE DatabaseAccess This calculates the metrics data from each request of database.\n   Name Remarks Group Key Type     name The service name of virtual database service.  string   databaseTypeId The ID of the component used in this call.  int   latency The time taken by each request.  int(in ms)   status Indicates the success or failure of the request.  boolean    SCOPE DatabaseSlowStatement This calculates the metrics data from slow request of database.\n   Name Remarks Group Key Type     databaseServiceId The service id of virtual cache service.  string   statement The sql statement .  string   latency The time taken by each request.  int(in ms)   traceId The traceId of this slow statement  string    SCOPE CacheAccess This calculates the metrics data from each request of cache system.\n   Name Remarks Group Key Type     name The service name of virtual cache service.  string   cacheTypeId The ID of the component used in this call.  int   latency The time taken by each request.  int(in ms)   status Indicates the success or failure of the request.  boolean   operation Indicates this access is used for write or read  string    SCOPE CacheSlowAccess This calculates the metrics data from slow request of cache system , which is used for write or read operation.\n   Name Remarks Group Key Type     cacheServiceId The service id of virtual cache service.  string   command The cache command .  string   key The cache command key.  string   latency The time taken by each request.  int(in ms)   traceId The traceId of this slow access  string   status Indicates the success or failure of the request.  boolean   operation Indicates this access is used for write or read  string    SCOPE MQAccess This calculates the service dimensional metrics data from each request of MQ system on consume/produce side\n   Name Remarks Group Key Type     name The service name , usually it\u0026rsquo;s MQ address(es)  string   transmissionLatency The latency from produce side to consume side .  int(in ms)   status Indicates the success or failure of the request.  boolean   operation Indicates this access is on Produce or Consume side  enum    SCOPE MQEndpointAccess This calculates the endpoint dimensional metrics data from each request of MQ system on consume/produce side\n   Name Remarks Group Key Type     serviceName The service name that this endpoint belongs to.  string   endpoint The endpoint name , usually it\u0026rsquo;s combined by queue,topic  string   transmissionLatency The latency from produce side to consume side .  int(in ms)   status Indicates the success or failure of the request.  boolean   operation Indicates this access is on Produce or Consume side  enum    SCOPES with K8S Prefix All metrics starting with K8S are derived from Kubernetes monitoring by Rover(eBPF agent).\nService, Service Instance and relations For all K8SService, K8SServiceInstance, K8SServiceRelation and K8SServiceInstanceRelation, they all have the following package/protocol level metric contents.\n   Name Remarks Group Key Type     type The metrics from log type, the following names should have the type prefix. The value may be connect, accept, close, write, read, protocol.  string   connect.duration Connect to other service use duration.  long(in nanoseconds)   connect.success The connect is success or not.  boolean   accept.duration Accept connection from client use duration.  long(in nanoseconds)   close.duration Close one connection use duration.  long(in nanoseconds)   close.success Close one connection is success or not.  boolean   write.duration Write data to the connection use duration.  long(in nanoseconds)   write.syscall Write data to the connection syscall name. The value should be Write, Writev, Send, SendTo, SendMsg, SendMmsg, SendFile, SendFile64.  string   write.l4.duration Write data to the connection use duration on Linux Layer 4.  long(in nanoseconds)   write.l4.transmitPackageCount Total package count on write data to the connection.  long   write.l4.retransmitPackageCount Total retransmit package count on write data to the connection.  long   write.l4.totalPackageSize Total transmit package size on write data to the connection.  long(bytes)   write.l3.duration Write data to the connection use duration on Linux Layer 3.  long(in nanoseconds)   write.l3.localDuration Write data to the connection use local duration on Linux Layer 3.  long(in nanoseconds)   write.l3.outputDuration Write data to the connection use output duration on Linux Layer 3.  long(in nanoseconds)   write.l3.resolveMACCount Total resolve remote MAC address count on write data to the connection.  long   write.l3.resolveMACDuration Total resolve remote MAC address use duration on write data to the connection.  long(in nanoseconds)   write.l3.netFilterCount Total do net filtering count on write data to the connection.  long   write.l3.netFilterDuration Total do net filtering use duration on write data to the connection.  long(in nanoseconds)   write.l2.duration Write data to the connection use duration on Linux L2.  long(nanoseconds)   write.l2.networkDeviceName The network device name on write data to the connection.  string   write.l2.enterQueueBufferCount The write package count to the network device queue on write data to the connection.  long   write.l2.readySendDuration Total ready send buffer duration on write data to the connection.  long(in nanoseconds)   write.l2.networkDeviceSendDuration Total network send buffer use duration on write data to the connection.  long(in nanoseconds)   read.duration Read data from the connection use duration.  long(in nanoseconds)   read.syscall Read data from the connection syscall name. The value should Read, Readv, Recv, RecvFrom, RecvMsg, RecvMmsg.  string   read.l4.duration Read data to the connection use duration on Linux Layer 4.  long(in nanoseconds)   read.l3.duration Read data to the connection use duration on Linux Layer 3.  long(in nanoseconds)   read.l3.rcvDuration Read data to the connection use receive duration on Linux Layer 3.  long(in nanoseconds)   read.l3.localDuration Read data to the connection use local duration on Linux Layer 3.  long(in nanoseconds)   read.l3.netFilterCount Total do net filtering count on read data from the connection.  long   read.l3.netFilterDuration Total do net filtering use duration on read data from the connection.  long(in nanoseconds)   read.l2.netDeviceName The network device name on read data from the connection.  string   read.l2.packageCount Total read package count on the connection.  long   read.l2.totalPackageSize Total read package size on the connection.  long(bytes)   read.l2.packageToQueueDuration Total read package to the queue duration on the connection.  long(in nanoseconds)   read.l2.rcvPackageFromQueueDuration Total read package from the queue duration on the connection.  long(in nanoseconds)   protocol.type The protocol type name, the following names should have the type prefix. The value should be HTTP.  string   protocol.success This protocol request and response is success or not.  boolean   protocol.http.latency The latency of HTTP response.  long(in nanoseconds)   protocol.http.url The url path of HTTP request.  string   protocol.http.method The method name of HTTP request.  string   protocol.http.statusCode The response code of HTTP response.  int   protocol.http.sizeOfRequestHeader The header size of HTTP request.  long(bytes)   protocol.http.sizeOfRequestBody The body size of HTTP request.  long(bytes)   protocol.http.sizeOfResponseHeader The header size of HTTP response.  long(bytes)   protocol.http.sizeOfResponseBody The body size of HTTP response.  long(bytes)    SCOPE K8SService    Name Remarks Group Key Type     name The service name in kubernetes.  string   layer The layer in kubernetes service.  string   detectPoint Where the relation is detected. The value may be client or server.  enum    SCOPE K8SServiceInstance    Name Remarks Group Key Type     serviceName The service name in kubernetes.  string   serviceInstanceName The pod name in kubernetes.  string   layer The layer of kubernetes service.  string   detectPoint Where the relation is detected. The value may be client or server.  enum    SCOPE K8SServiceRelation    Name Remarks Group Key Type     sourceServiceName The source service name in kubernetes.  string   sourceLayer The source layer service in kubernetes.  string   detectPoint Where the relation is detected. The value may be client or server.  enum   componentId The ID of component used in this call.  string   tlsMode The TLS mode of relation. The value may be Plain or TLS.  enum   destServiceName The dest service name in kubernetes.  string   destLayer The dest layer service in kubernetes.  string    SCOPE K8SServiceRelation    Name Remarks Group Key Type     sourceServiceName The source service name in kubernetes.  string   sourceLayer The source layer service in kubernetes.  string   detectPoint Where the relation is detected. The value may be client or server.  enum   componentId The ID of component used in this call.  string   tlsMode The TLS mode of relation. The value may be Plain or TLS.  enum   destServiceName The dest service name in kubernetes.  string   destLayer The dest layer service in kubernetes.  string    SCOPE K8SServiceInstanceRelation    Name Remarks Group Key Type     sourceServiceName The source service name in kubernetes.  string   sourceServiceInstanceName The source pod name in kubernetes.  string   sourceLayer The source layer service in kubernetes.  string   detectPoint Where the relation is detected. The value may be client or server.  enum   componentId The ID of component used in this call.  string   tlsMode The TLS mode of relation. The value may be Plain or TLS.  enum   destServiceName The dest service name in kubernetes.  string   destServiceInstanceName The dest pod name in kubernetes.  string   destLayer The dest layer service in kubernetes.  string    Endpoint and Endpoint Relation For K8SEndpoint and K8SEndpointRelation, they only have the following protocol level metric contents.\n   Name Remarks Group Key Type     protocol.type The protocol type name, the following names should have the type prefix. The value should be HTTP.  string   protocol.success This protocol request and response is success or not.  boolean   protocol.http.latency The latency of HTTP response.  long(in nanoseconds)   protocol.http.url The url path of HTTP request.  string   protocol.http.method The method name of HTTP request.  string   protocol.http.statusCode The response code of HTTP response.  int   protocol.http.sizeOfRequestHeader The header size of HTTP request.  long(bytes)   protocol.http.sizeOfRequestBody The body size of HTTP request.  long(bytes)   protocol.http.sizeOfResponseHeader The header size of HTTP response.  long(bytes)   protocol.http.sizeOfResponseBody The body size of HTTP response.  long(bytes)    SCOPE K8SEndpoint    Name Remarks Group Key Type     serviceName The service name in kubernetes.  string   layer The layer in kubernetes service.  string   endpointName The endpoint name detect in kubernetes service.  string   duration The duration of the service endpoint response latency.  long    SCOPE K8SEndpointRelation    Name Remarks Group Key Type     sourceServiceName The source service name in kubernetes.  string   sourceServiceName The layer in kubernetes source service.  string   sourceEndpointName The endpoint name detect in kubernetes source service.  string   detectPoint Where the relation is detected. The value may be client or server.  enum   componentId The ID of component used in this call.  string   destServiceName The dest service name in kubernetes.  string   destServiceName The layer in kubernetes dest service.  string   destEndpointName The endpoint name detect in kubernetes dest service.  string    ","title":"Scopes and Fields","url":"/docs/main/next/en/concepts-and-designs/scope-definitions/"},{"content":"Scopes and Fields Using the Aggregation Function, the requests will be grouped by time and Group Key(s) in each scope.\nSCOPE Service This calculates the metrics data from each request of the service.\n   Name Remarks Group Key Type     name The name of the service.  string   layer Layer represents an abstract framework in the computer science, such as operation system(OS_LINUX layer), Kubernetes(k8s layer)  enum   serviceInstanceName The name of the service instance ID.  string   endpointName The name of the endpoint, such as a full path of HTTP URI.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   responseCode Deprecated.The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request. Such as: Database, HTTP, RPC, gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   tcpInfo.receivedBytes The received bytes of the TCP traffic, if this request is a TCP call.  long   tcpInfo.sentBytes The sent bytes of the TCP traffic, if this request is a TCP call.  long    SCOPE ServiceInstance This calculates the metrics data from each request of the service instance.\n   Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   layer Layer represents an abstract framework in the computer science, such as operation system(OS_LINUX layer), Kubernetes(k8s layer)  enum   endpointName The name of the endpoint, such as a full path of the HTTP URI.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   responseCode Deprecated.The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   tcpInfo.receivedBytes The received bytes of the TCP traffic, if this request is a TCP call.  long   tcpInfo.sentBytes The sent bytes of the TCP traffic, if this request is a TCP call.  long    Secondary scopes of ServiceInstance This calculates the metrics data if the service instance is a JVM and collects through javaagent.\n SCOPE ServiceInstanceJVMCPU     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   usePercent The percentage of CPU time spent.  double    SCOPE ServiceInstanceJVMMemory     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   heapStatus Indicates whether the metric has a heap property or not.  bool   init See the JVM documentation.  long   max See the JVM documentation.  long   used See the JVM documentation.  long   committed See the JVM documentation.  long    SCOPE ServiceInstanceJVMMemoryPool     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   poolType The type may be CODE_CACHE_USAGE, NEWGEN_USAGE, OLDGEN_USAGE, SURVIVOR_USAGE, PERMGEN_USAGE, or METASPACE_USAGE based on different versions of JVM.  enum   init See the JVM documentation.  long   max See the JVM documentation.  long   used See the JVM documentation.  long   committed See the JVM documentation.  long    SCOPE ServiceInstanceJVMGC     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   phase Includes both NEW and OLD.  Enum   time The time spent in GC.  long   count The count in GC operations.  long    SCOPE ServiceInstanceJVMThread     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   liveCount The current number of live threads.  long   daemonCount The current number of daemon threads.  long   peakCount The current number of peak threads.  long   runnableStateThreadCount The current number of threads in runnable state.  long   blockedStateThreadCount The current number of threads in blocked state.  long   waitingStateThreadCount The current number of threads in waiting state.  long   timedWaitingStateThreadCount The current number of threads in time-waiting state.  long    SCOPE ServiceInstanceJVMClass     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   loadedClassCount The number of classes that are currently loaded in the JVM.  long   totalUnloadedClassCount The total number of classes unloaded since the JVM has started execution.  long   totalLoadedClassCount The total number of classes that have been loaded since the JVM has started execution.  long    SCOPE Endpoint This calculates the metrics data from each request of the endpoint in the service.\n   Name Remarks Group Key Type     name The name of the endpoint, such as a full path of the HTTP URI.  string   serviceName The name of the service.  string   serviceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  enum   serviceInstanceName The name of the service instance ID.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   responseCode Deprecated.The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   tcpInfo.receivedBytes The received bytes of the TCP traffic, if this request is a TCP call.  long   tcpInfo.sentBytes The sent bytes of the TCP traffic, if this request is a TCP call.  long    SCOPE ServiceRelation This calculates the metrics data from each request between services.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceLayer The layer of the source service.  enum   destServiceName The name of the destination service.  string   destServiceInstanceName The name of the destination service instance.  string   destLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   responseCode Deprecated.The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination services, such as service_relation_mtls_cpm = from(ServiceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   tcpInfo.receivedBytes The received bytes of the TCP traffic, if this request is a TCP call.  long   tcpInfo.sentBytes The sent bytes of the TCP traffic, if this request is a TCP call.  long    SCOPE ServiceInstanceRelation This calculates the metrics data from each request between service instances.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceServiceLayer The layer of the source service.  enum   destServiceName The name of the destination service.     destServiceInstanceName The name of the destination service instance.  string   destServiceLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of the component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   responseCode Deprecated.The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination service instances, such as service_instance_relation_mtls_cpm = from(ServiceInstanceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   tcpInfo.receivedBytes The received bytes of the TCP traffic, if this request is a TCP call.  long   tcpInfo.sentBytes The sent bytes of the TCP traffic, if this request is a TCP call.  long    SCOPE EndpointRelation This calculates the metrics data of the dependency between endpoints. This relation is hard to detect, and it depends on the tracing library to propagate the previous endpoint. Therefore, the EndpointRelation scope aggregation comes into effect only in services under tracing by SkyWalking native agents, including auto instrument agents (like Java and .NET), OpenCensus SkyWalking exporter implementation, or other tracing context propagation in SkyWalking specification.\n   Name Remarks Group Key Type     endpoint The parent endpoint in the dependency.  string   serviceName The name of the service.  string   serviceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  enum   childEndpoint The endpoint used by the parent endpoint in row(1).  string   childServiceName The endpoint used by the parent service in row(1).  string   childServiceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  string   childServiceInstanceName The endpoint used by the parent service instance in row(1).  string   rpcLatency The latency of the RPC between the parent endpoint and childEndpoint, excluding the latency caused by the parent endpoint itself.     componentId The ID of the component used in this call. yes string   status Indicates the success or failure of the request.  bool(true for success)   responseCode Deprecated.The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Indicates where the relation is detected. The value may be client, server, or proxy. yes enum    SCOPE BrowserAppTraffic This calculates the metrics data from each request of the browser application (browser only).\n   Name Remarks Group Key Type     name The browser application name of each request.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppSingleVersionTraffic This calculates the metrics data from each request of a single version in the browser application (browser only).\n   Name Remarks Group Key Type     name The single version name of each request.  string   serviceName The name of the browser application.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppPageTraffic This calculates the metrics data from each request of the page in the browser application (browser only).\n   Name Remarks Group Key Type     name The page name of each request.  string   serviceName The name of the browser application.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppPagePerf This calculates the metrics data from each request of the page in the browser application (browser only).\n   Name Remarks Group Key Type     name The page name of each request.  string   serviceName The name of the browser application.  string   redirectTime The time taken to redirect.  int(in ms)   dnsTime The DNS query time.  int(in ms)   ttfbTime Time to first byte.  int(in ms)   tcpTime TCP connection time.  int(in ms)   transTime Content transfer time.  int(in ms)   domAnalysisTime Dom parsing time.  int(in ms)   fptTime First paint time or blank screen time.  int(in ms)   domReadyTime Dom ready time.  int(in ms)   loadPageTime Page full load time.  int(in ms)   resTime Synchronous load resources in the page.  int(in ms)   sslTime Only valid for HTTPS.  int(in ms)   ttlTime Time to interact.  int(in ms)   firstPackTime First pack time.  int(in ms)   fmpTime First Meaningful Paint.  int(in ms)    SCOPE Event This calculates the metrics data from events.\n   Name Remarks Group Key Type     name The name of the event.  string   service The service name to which the event belongs to.  string   serviceInstance The service instance to which the event belongs to, if any.  string   endpoint The service endpoint to which the event belongs to, if any.  string   type The type of the event, Normal or Error.  string   message The message of the event.  string   parameters The parameters in the message, see parameters.  string    ","title":"Scopes and Fields","url":"/docs/main/v9.0.0/en/concepts-and-designs/scope-definitions/"},{"content":"Scopes and Fields Using the Aggregation Function, the requests will be grouped by time and Group Key(s) in each scope.\nSCOPE Service This calculates the metrics data from each request of the service.\n   Name Remarks Group Key Type     name The name of the service.  string   layer Layer represents an abstract framework in the computer science, such as operation system(OS_LINUX layer), Kubernetes(k8s layer)  enum   serviceInstanceName The name of the service instance ID.  string   endpointName The name of the endpoint, such as a full path of HTTP URI.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   responseCode Deprecated.The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request. Such as: Database, HTTP, RPC, gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   tcpInfo.receivedBytes The received bytes of the TCP traffic, if this request is a TCP call.  long   tcpInfo.sentBytes The sent bytes of the TCP traffic, if this request is a TCP call.  long    SCOPE ServiceInstance This calculates the metrics data from each request of the service instance.\n   Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   endpointName The name of the endpoint, such as a full path of the HTTP URI.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   responseCode Deprecated.The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   tcpInfo.receivedBytes The received bytes of the TCP traffic, if this request is a TCP call.  long   tcpInfo.sentBytes The sent bytes of the TCP traffic, if this request is a TCP call.  long    Secondary scopes of ServiceInstance This calculates the metrics data if the service instance is a JVM and collects through javaagent.\n SCOPE ServiceInstanceJVMCPU     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   usePercent The percentage of CPU time spent.  double    SCOPE ServiceInstanceJVMMemory     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   heapStatus Indicates whether the metric has a heap property or not.  bool   init See the JVM documentation.  long   max See the JVM documentation.  long   used See the JVM documentation.  long   committed See the JVM documentation.  long    SCOPE ServiceInstanceJVMMemoryPool     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   poolType The type may be CODE_CACHE_USAGE, NEWGEN_USAGE, OLDGEN_USAGE, SURVIVOR_USAGE, PERMGEN_USAGE, or METASPACE_USAGE based on different versions of JVM.  enum   init See the JVM documentation.  long   max See the JVM documentation.  long   used See the JVM documentation.  long   committed See the JVM documentation.  long    SCOPE ServiceInstanceJVMGC     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   phase Includes both NEW and OLD.  Enum   time The time spent in GC.  long   count The count in GC operations.  long    SCOPE ServiceInstanceJVMThread     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   liveCount The current number of live threads.  long   daemonCount The current number of daemon threads.  long   peakCount The current number of peak threads.  long   runnableStateThreadCount The current number of threads in runnable state.  long   blockedStateThreadCount The current number of threads in blocked state.  long   waitingStateThreadCount The current number of threads in waiting state.  long   timedWaitingStateThreadCount The current number of threads in time-waiting state.  long    SCOPE ServiceInstanceJVMClass     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   loadedClassCount The number of classes that are currently loaded in the JVM.  long   totalUnloadedClassCount The total number of classes unloaded since the JVM has started execution.  long   totalLoadedClassCount The total number of classes that have been loaded since the JVM has started execution.  long    SCOPE Endpoint This calculates the metrics data from each request of the endpoint in the service.\n   Name Remarks Group Key Type     name The name of the endpoint, such as a full path of the HTTP URI.  string   serviceName The name of the service.  string   serviceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  enum   serviceInstanceName The name of the service instance ID.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   responseCode Deprecated.The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   tcpInfo.receivedBytes The received bytes of the TCP traffic, if this request is a TCP call.  long   tcpInfo.sentBytes The sent bytes of the TCP traffic, if this request is a TCP call.  long    SCOPE ServiceRelation This calculates the metrics data from each request between services.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceLayer The layer of the source service.  enum   destServiceName The name of the destination service.  string   destServiceInstanceName The name of the destination service instance.  string   destLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   responseCode Deprecated.The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination services, such as service_relation_mtls_cpm = from(ServiceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   tcpInfo.receivedBytes The received bytes of the TCP traffic, if this request is a TCP call.  long   tcpInfo.sentBytes The sent bytes of the TCP traffic, if this request is a TCP call.  long    SCOPE ServiceInstanceRelation This calculates the metrics data from each request between service instances.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceServiceLayer The layer of the source service.  enum   destServiceName The name of the destination service.     destServiceInstanceName The name of the destination service instance.  string   destServiceLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of the component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   responseCode Deprecated.The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination service instances, such as service_instance_relation_mtls_cpm = from(ServiceInstanceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   tcpInfo.receivedBytes The received bytes of the TCP traffic, if this request is a TCP call.  long   tcpInfo.sentBytes The sent bytes of the TCP traffic, if this request is a TCP call.  long    SCOPE EndpointRelation This calculates the metrics data of the dependency between endpoints. This relation is hard to detect, and it depends on the tracing library to propagate the previous endpoint. Therefore, the EndpointRelation scope aggregation comes into effect only in services under tracing by SkyWalking native agents, including auto instrument agents (like Java and .NET), OpenCensus SkyWalking exporter implementation, or other tracing context propagation in SkyWalking specification.\n   Name Remarks Group Key Type     endpoint The parent endpoint in the dependency.  string   serviceName The name of the service.  string   serviceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  enum   childEndpoint The endpoint used by the parent endpoint in row(1).  string   childServiceName The endpoint used by the parent service in row(1).  string   childServiceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  string   childServiceInstanceName The endpoint used by the parent service instance in row(1).  string   rpcLatency The latency of the RPC between the parent endpoint and childEndpoint, excluding the latency caused by the parent endpoint itself.     componentId The ID of the component used in this call. yes string   status Indicates the success or failure of the request.  bool(true for success)   responseCode Deprecated.The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Indicates where the relation is detected. The value may be client, server, or proxy. yes enum    SCOPE BrowserAppTraffic This calculates the metrics data from each request of the browser application (browser only).\n   Name Remarks Group Key Type     name The browser application name of each request.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppSingleVersionTraffic This calculates the metrics data from each request of a single version in the browser application (browser only).\n   Name Remarks Group Key Type     name The single version name of each request.  string   serviceName The name of the browser application.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppPageTraffic This calculates the metrics data from each request of the page in the browser application (browser only).\n   Name Remarks Group Key Type     name The page name of each request.  string   serviceName The name of the browser application.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppPagePerf This calculates the metrics data from each request of the page in the browser application (browser only).\n   Name Remarks Group Key Type     name The page name of each request.  string   serviceName The name of the browser application.  string   redirectTime The time taken to redirect.  int(in ms)   dnsTime The DNS query time.  int(in ms)   ttfbTime Time to first byte.  int(in ms)   tcpTime TCP connection time.  int(in ms)   transTime Content transfer time.  int(in ms)   domAnalysisTime Dom parsing time.  int(in ms)   fptTime First paint time or blank screen time.  int(in ms)   domReadyTime Dom ready time.  int(in ms)   loadPageTime Page full load time.  int(in ms)   resTime Synchronous load resources in the page.  int(in ms)   sslTime Only valid for HTTPS.  int(in ms)   ttlTime Time to interact.  int(in ms)   firstPackTime First pack time.  int(in ms)   fmpTime First Meaningful Paint.  int(in ms)    SCOPE Event This calculates the metrics data from events.\n   Name Remarks Group Key Type     name The name of the event.  string   service The service name to which the event belongs to.  string   serviceInstance The service instance to which the event belongs to, if any.  string   endpoint The service endpoint to which the event belongs to, if any.  string   type The type of the event, Normal or Error.  string   message The message of the event.  string   parameters The parameters in the message, see parameters.  string    ","title":"Scopes and Fields","url":"/docs/main/v9.1.0/en/concepts-and-designs/scope-definitions/"},{"content":"Scopes and Fields Using the Aggregation Function, the requests will be grouped by time and Group Key(s) in each scope.\nSCOPE Service This calculates the metrics data from each request of the service.\n   Name Remarks Group Key Type     name The name of the service.  string   layer Layer represents an abstract framework in the computer science, such as operation system(OS_LINUX layer), Kubernetes(k8s layer)  enum   serviceInstanceName The name of the service instance ID.  string   endpointName The name of the endpoint, such as a full path of HTTP URI.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   responseCode Deprecated.The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request. Such as: Database, HTTP, RPC, gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   tcpInfo.receivedBytes The received bytes of the TCP traffic, if this request is a TCP call.  long   tcpInfo.sentBytes The sent bytes of the TCP traffic, if this request is a TCP call.  long    SCOPE ServiceInstance This calculates the metrics data from each request of the service instance.\n   Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   endpointName The name of the endpoint, such as a full path of the HTTP URI.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   responseCode Deprecated.The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   tcpInfo.receivedBytes The received bytes of the TCP traffic, if this request is a TCP call.  long   tcpInfo.sentBytes The sent bytes of the TCP traffic, if this request is a TCP call.  long    Secondary scopes of ServiceInstance This calculates the metrics data if the service instance is a JVM and collects through javaagent.\n SCOPE ServiceInstanceJVMCPU     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   usePercent The percentage of CPU time spent.  double    SCOPE ServiceInstanceJVMMemory     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   heapStatus Indicates whether the metric has a heap property or not.  bool   init See the JVM documentation.  long   max See the JVM documentation.  long   used See the JVM documentation.  long   committed See the JVM documentation.  long    SCOPE ServiceInstanceJVMMemoryPool     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   poolType The type may be CODE_CACHE_USAGE, NEWGEN_USAGE, OLDGEN_USAGE, SURVIVOR_USAGE, PERMGEN_USAGE, or METASPACE_USAGE based on different versions of JVM.  enum   init See the JVM documentation.  long   max See the JVM documentation.  long   used See the JVM documentation.  long   committed See the JVM documentation.  long    SCOPE ServiceInstanceJVMGC     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   phase Includes both NEW and OLD.  Enum   time The time spent in GC.  long   count The count in GC operations.  long    SCOPE ServiceInstanceJVMThread     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   liveCount The current number of live threads.  long   daemonCount The current number of daemon threads.  long   peakCount The current number of peak threads.  long   runnableStateThreadCount The current number of threads in runnable state.  long   blockedStateThreadCount The current number of threads in blocked state.  long   waitingStateThreadCount The current number of threads in waiting state.  long   timedWaitingStateThreadCount The current number of threads in time-waiting state.  long    SCOPE ServiceInstanceJVMClass     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   loadedClassCount The number of classes that are currently loaded in the JVM.  long   totalUnloadedClassCount The total number of classes unloaded since the JVM has started execution.  long   totalLoadedClassCount The total number of classes that have been loaded since the JVM has started execution.  long    SCOPE Endpoint This calculates the metrics data from each request of the endpoint in the service.\n   Name Remarks Group Key Type     name The name of the endpoint, such as a full path of the HTTP URI.  string   serviceName The name of the service.  string   serviceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  enum   serviceInstanceName The name of the service instance ID.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   responseCode Deprecated.The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   tcpInfo.receivedBytes The received bytes of the TCP traffic, if this request is a TCP call.  long   tcpInfo.sentBytes The sent bytes of the TCP traffic, if this request is a TCP call.  long    SCOPE ServiceRelation This calculates the metrics data from each request between services.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceLayer The layer of the source service.  enum   destServiceName The name of the destination service.  string   destServiceInstanceName The name of the destination service instance.  string   destLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   responseCode Deprecated.The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination services, such as service_relation_mtls_cpm = from(ServiceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   tcpInfo.receivedBytes The received bytes of the TCP traffic, if this request is a TCP call.  long   tcpInfo.sentBytes The sent bytes of the TCP traffic, if this request is a TCP call.  long    SCOPE ServiceInstanceRelation This calculates the metrics data from each request between service instances.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceServiceLayer The layer of the source service.  enum   destServiceName The name of the destination service.     destServiceInstanceName The name of the destination service instance.  string   destServiceLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of the component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   responseCode Deprecated.The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination service instances, such as service_instance_relation_mtls_cpm = from(ServiceInstanceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   tcpInfo.receivedBytes The received bytes of the TCP traffic, if this request is a TCP call.  long   tcpInfo.sentBytes The sent bytes of the TCP traffic, if this request is a TCP call.  long    SCOPE EndpointRelation This calculates the metrics data of the dependency between endpoints. This relation is hard to detect, and it depends on the tracing library to propagate the previous endpoint. Therefore, the EndpointRelation scope aggregation comes into effect only in services under tracing by SkyWalking native agents, including auto instrument agents (like Java and .NET), OpenCensus SkyWalking exporter implementation, or other tracing context propagation in SkyWalking specification.\n   Name Remarks Group Key Type     endpoint The parent endpoint in the dependency.  string   serviceName The name of the service.  string   serviceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  enum   childEndpoint The endpoint used by the parent endpoint in row(1).  string   childServiceName The endpoint used by the parent service in row(1).  string   childServiceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  string   childServiceInstanceName The endpoint used by the parent service instance in row(1).  string   rpcLatency The latency of the RPC between the parent endpoint and childEndpoint, excluding the latency caused by the parent endpoint itself.     componentId The ID of the component used in this call. yes string   status Indicates the success or failure of the request.  bool(true for success)   responseCode Deprecated.The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Indicates where the relation is detected. The value may be client, server, or proxy. yes enum    SCOPE BrowserAppTraffic This calculates the metrics data from each request of the browser application (browser only).\n   Name Remarks Group Key Type     name The browser application name of each request.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppSingleVersionTraffic This calculates the metrics data from each request of a single version in the browser application (browser only).\n   Name Remarks Group Key Type     name The single version name of each request.  string   serviceName The name of the browser application.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppPageTraffic This calculates the metrics data from each request of the page in the browser application (browser only).\n   Name Remarks Group Key Type     name The page name of each request.  string   serviceName The name of the browser application.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppPagePerf This calculates the metrics data from each request of the page in the browser application (browser only).\n   Name Remarks Group Key Type     name The page name of each request.  string   serviceName The name of the browser application.  string   redirectTime The time taken to redirect.  int(in ms)   dnsTime The DNS query time.  int(in ms)   ttfbTime Time to first byte.  int(in ms)   tcpTime TCP connection time.  int(in ms)   transTime Content transfer time.  int(in ms)   domAnalysisTime Dom parsing time.  int(in ms)   fptTime First paint time or blank screen time.  int(in ms)   domReadyTime Dom ready time.  int(in ms)   loadPageTime Page full load time.  int(in ms)   resTime Synchronous load resources in the page.  int(in ms)   sslTime Only valid for HTTPS.  int(in ms)   ttlTime Time to interact.  int(in ms)   firstPackTime First pack time.  int(in ms)   fmpTime First Meaningful Paint.  int(in ms)    SCOPE Event This calculates the metrics data from events.\n   Name Remarks Group Key Type     name The name of the event.  string   service The service name to which the event belongs to.  string   serviceInstance The service instance to which the event belongs to, if any.  string   endpoint The service endpoint to which the event belongs to, if any.  string   type The type of the event, Normal or Error.  string   message The message of the event.  string   parameters The parameters in the message, see parameters.  string    ","title":"Scopes and Fields","url":"/docs/main/v9.2.0/en/concepts-and-designs/scope-definitions/"},{"content":"Scopes and Fields Using the Aggregation Function, the requests will be grouped by time and Group Key(s) in each scope.\nSCOPE Service This calculates the metrics data from each request of the service.\n   Name Remarks Group Key Type     name The name of the service.  string   layer Layer represents an abstract framework in the computer science, such as operation system(OS_LINUX layer), Kubernetes(k8s layer)  enum   serviceInstanceName The name of the service instance ID.  string   endpointName The name of the endpoint, such as a full path of HTTP URI.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request. Such as: Database, HTTP, RPC, gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPService This calculates the metrics data from each request of the TCP service.\n   Name Remarks Group Key Type     name The name of the service.  string   layer Layer represents an abstract framework in the computer science, such as operation system(OS_LINUX layer), Kubernetes(k8s layer)  enum   serviceInstanceName The name of the service instance ID.  string   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    SCOPE ServiceInstance This calculates the metrics data from each request of the service instance.\n   Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   endpointName The name of the endpoint, such as a full path of the HTTP URI.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPServiceInstance This calculates the metrics data from each request of the service instance.\n   Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    Secondary scopes of ServiceInstance This calculates the metrics data if the service instance is a JVM and collects through javaagent.\n SCOPE ServiceInstanceJVMCPU     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   usePercent The percentage of CPU time spent.  double    SCOPE ServiceInstanceJVMMemory     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   heapStatus Indicates whether the metric has a heap property or not.  bool   init See the JVM documentation.  long   max See the JVM documentation.  long   used See the JVM documentation.  long   committed See the JVM documentation.  long    SCOPE ServiceInstanceJVMMemoryPool     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   poolType The type may be CODE_CACHE_USAGE, NEWGEN_USAGE, OLDGEN_USAGE, SURVIVOR_USAGE, PERMGEN_USAGE, or METASPACE_USAGE based on different versions of JVM.  enum   init See the JVM documentation.  long   max See the JVM documentation.  long   used See the JVM documentation.  long   committed See the JVM documentation.  long    SCOPE ServiceInstanceJVMGC     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   phase Includes both NEW and OLD.  Enum   time The time spent in GC.  long   count The count in GC operations.  long    SCOPE ServiceInstanceJVMThread     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   liveCount The current number of live threads.  long   daemonCount The current number of daemon threads.  long   peakCount The current number of peak threads.  long   runnableStateThreadCount The current number of threads in runnable state.  long   blockedStateThreadCount The current number of threads in blocked state.  long   waitingStateThreadCount The current number of threads in waiting state.  long   timedWaitingStateThreadCount The current number of threads in time-waiting state.  long    SCOPE ServiceInstanceJVMClass     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   loadedClassCount The number of classes that are currently loaded in the JVM.  long   totalUnloadedClassCount The total number of classes unloaded since the JVM has started execution.  long   totalLoadedClassCount The total number of classes that have been loaded since the JVM has started execution.  long    SCOPE Endpoint This calculates the metrics data from each request of the endpoint in the service.\n   Name Remarks Group Key Type     name The name of the endpoint, such as a full path of the HTTP URI.  string   serviceName The name of the service.  string   serviceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  enum   serviceInstanceName The name of the service instance ID.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE ServiceRelation This calculates the metrics data from each request between services.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceLayer The layer of the source service.  enum   destServiceName The name of the destination service.  string   destServiceInstanceName The name of the destination service instance.  string   destLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination services, such as service_relation_mtls_cpm = from(ServiceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPServiceRelation This calculates the metrics data from each request between services.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceLayer The layer of the source service.  enum   destServiceName The name of the destination service.  string   destServiceInstanceName The name of the destination service instance.  string   destLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination services, such as service_relation_mtls_cpm = from(ServiceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    SCOPE ServiceInstanceRelation This calculates the metrics data from each request between service instances.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceServiceLayer The layer of the source service.  enum   destServiceName The name of the destination service.     destServiceInstanceName The name of the destination service instance.  string   destServiceLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of the component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination service instances, such as service_instance_relation_mtls_cpm = from(ServiceInstanceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPServiceInstanceRelation This calculates the metrics data from each request between service instances.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceServiceLayer The layer of the source service.  enum   destServiceName The name of the destination service.     destServiceInstanceName The name of the destination service instance.  string   destServiceLayer The layer of the destination service.  enum   componentId The ID of the component used in this call. yes string   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination service instances, such as service_instance_relation_mtls_cpm = from(ServiceInstanceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    SCOPE EndpointRelation This calculates the metrics data of the dependency between endpoints. This relation is hard to detect, and it depends on the tracing library to propagate the previous endpoint. Therefore, the EndpointRelation scope aggregation comes into effect only in services under tracing by SkyWalking native agents, including auto instrument agents (like Java and .NET), OpenCensus SkyWalking exporter implementation, or other tracing context propagation in SkyWalking specification.\n   Name Remarks Group Key Type     endpoint The parent endpoint in the dependency.  string   serviceName The name of the service.  string   serviceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  enum   childEndpoint The endpoint used by the parent endpoint in row(1).  string   childServiceName The endpoint used by the parent service in row(1).  string   childServiceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  string   childServiceInstanceName The endpoint used by the parent service instance in row(1).  string   rpcLatency The latency of the RPC between the parent endpoint and childEndpoint, excluding the latency caused by the parent endpoint itself.     componentId The ID of the component used in this call. yes string   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Indicates where the relation is detected. The value may be client, server, or proxy. yes enum    SCOPE BrowserAppTraffic This calculates the metrics data from each request of the browser application (browser only).\n   Name Remarks Group Key Type     name The browser application name of each request.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppSingleVersionTraffic This calculates the metrics data from each request of a single version in the browser application (browser only).\n   Name Remarks Group Key Type     name The single version name of each request.  string   serviceName The name of the browser application.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppPageTraffic This calculates the metrics data from each request of the page in the browser application (browser only).\n   Name Remarks Group Key Type     name The page name of each request.  string   serviceName The name of the browser application.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppPagePerf This calculates the metrics data from each request of the page in the browser application (browser only).\n   Name Remarks Group Key Type     name The page name of each request.  string   serviceName The name of the browser application.  string   redirectTime The time taken to redirect.  int(in ms)   dnsTime The DNS query time.  int(in ms)   ttfbTime Time to first byte.  int(in ms)   tcpTime TCP connection time.  int(in ms)   transTime Content transfer time.  int(in ms)   domAnalysisTime Dom parsing time.  int(in ms)   fptTime First paint time or blank screen time.  int(in ms)   domReadyTime Dom ready time.  int(in ms)   loadPageTime Page full load time.  int(in ms)   resTime Synchronous load resources in the page.  int(in ms)   sslTime Only valid for HTTPS.  int(in ms)   ttlTime Time to interact.  int(in ms)   firstPackTime First pack time.  int(in ms)   fmpTime First Meaningful Paint.  int(in ms)    SCOPE Event This calculates the metrics data from events.\n   Name Remarks Group Key Type     name The name of the event.  string   service The service name to which the event belongs to.  string   serviceInstance The service instance to which the event belongs to, if any.  string   endpoint The service endpoint to which the event belongs to, if any.  string   type The type of the event, Normal or Error.  string   message The message of the event.  string   parameters The parameters in the message, see parameters.  string    SCOPE DatabaseAccess This calculates the metrics data from each request of database.\n   Name Remarks Group Key Type     name The service name of virtual database service.  string   databaseTypeId The ID of the component used in this call.  int   latency The time taken by each request.  int(in ms)   status Indicates the success or failure of the request.  boolean    SCOPE DatabaseSlowStatement This calculates the metrics data from slow request of database.\n   Name Remarks Group Key Type     databaseServiceId The service id of virtual cache service.  string   statement The sql statement .  string   latency The time taken by each request.  int(in ms)   traceId The traceId of this slow statement  string    SCOPE CacheAccess This calculates the metrics data from each request of cache system.\n   Name Remarks Group Key Type     name The service name of virtual cache service.  string   cacheTypeId The ID of the component used in this call.  int   latency The time taken by each request.  int(in ms)   status Indicates the success or failure of the request.  boolean   operation Indicates this access is used for write or read  string    SCOPE CacheSlowAccess This calculates the metrics data from slow request of cache system , which is used for write or read operation.\n   Name Remarks Group Key Type     cacheServiceId The service id of virtual cache service.  string   command The cache command .  string   key The cache command key.  string   latency The time taken by each request.  int(in ms)   traceId The traceId of this slow access  string   status Indicates the success or failure of the request.  boolean   operation Indicates this access is used for write or read  string    SCOPE MQAccess This calculates the service dimensional metrics data from each request of MQ system on consume/produce side\n   Name Remarks Group Key Type     name The service name , usually it\u0026rsquo;s MQ address(es)  string   transmissionLatency The latency from produce side to consume side .  int(in ms)   status Indicates the success or failure of the request.  boolean   operation Indicates this access is on Produce or Consume side  enum    SCOPE MQEndpointAccess This calculates the endpoint dimensional metrics data from each request of MQ system on consume/produce side\n   Name Remarks Group Key Type     serviceName The service name that this endpoint belongs to.  string   endpoint The endpoint name , usually it\u0026rsquo;s combined by queue,topic  string   transmissionLatency The latency from produce side to consume side .  int(in ms)   status Indicates the success or failure of the request.  boolean   operation Indicates this access is on Produce or Consume side  enum    ","title":"Scopes and Fields","url":"/docs/main/v9.3.0/en/concepts-and-designs/scope-definitions/"},{"content":"Scopes and Fields Using the Aggregation Function, the requests will be grouped by time and Group Key(s) in each scope.\nSCOPE Service This calculates the metrics data from each request of the service.\n   Name Remarks Group Key Type     name The name of the service.  string   layer Layer represents an abstract framework in the computer science, such as operation system(OS_LINUX layer), Kubernetes(k8s layer)  enum   serviceInstanceName The name of the service instance ID.  string   endpointName The name of the endpoint, such as a full path of HTTP URI.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request. Such as: Database, HTTP, RPC, gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPService This calculates the metrics data from each request of the TCP service.\n   Name Remarks Group Key Type     name The name of the service.  string   layer Layer represents an abstract framework in the computer science, such as operation system(OS_LINUX layer), Kubernetes(k8s layer)  enum   serviceInstanceName The name of the service instance ID.  string   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    SCOPE ServiceInstance This calculates the metrics data from each request of the service instance.\n   Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   endpointName The name of the endpoint, such as a full path of the HTTP URI.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPServiceInstance This calculates the metrics data from each request of the service instance.\n   Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    Secondary scopes of ServiceInstance This calculates the metrics data if the service instance is a JVM and collects through javaagent.\n SCOPE ServiceInstanceJVMCPU     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   usePercent The percentage of CPU time spent.  double    SCOPE ServiceInstanceJVMMemory     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   heapStatus Indicates whether the metric has a heap property or not.  bool   init See the JVM documentation.  long   max See the JVM documentation.  long   used See the JVM documentation.  long   committed See the JVM documentation.  long    SCOPE ServiceInstanceJVMMemoryPool     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   poolType The type may be CODE_CACHE_USAGE, NEWGEN_USAGE, OLDGEN_USAGE, SURVIVOR_USAGE, PERMGEN_USAGE, or METASPACE_USAGE based on different versions of JVM.  enum   init See the JVM documentation.  long   max See the JVM documentation.  long   used See the JVM documentation.  long   committed See the JVM documentation.  long    SCOPE ServiceInstanceJVMGC     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   phase Includes both NEW and OLD.  Enum   time The time spent in GC.  long   count The count in GC operations.  long    SCOPE ServiceInstanceJVMThread     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   liveCount The current number of live threads.  long   daemonCount The current number of daemon threads.  long   peakCount The current number of peak threads.  long   runnableStateThreadCount The current number of threads in runnable state.  long   blockedStateThreadCount The current number of threads in blocked state.  long   waitingStateThreadCount The current number of threads in waiting state.  long   timedWaitingStateThreadCount The current number of threads in time-waiting state.  long    SCOPE ServiceInstanceJVMClass     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   loadedClassCount The number of classes that are currently loaded in the JVM.  long   totalUnloadedClassCount The total number of classes unloaded since the JVM has started execution.  long   totalLoadedClassCount The total number of classes that have been loaded since the JVM has started execution.  long    SCOPE Endpoint This calculates the metrics data from each request of the endpoint in the service.\n   Name Remarks Group Key Type     name The name of the endpoint, such as a full path of the HTTP URI.  string   serviceName The name of the service.  string   serviceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  enum   serviceInstanceName The name of the service instance ID.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE ServiceRelation This calculates the metrics data from each request between services.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceLayer The layer of the source service.  enum   destServiceName The name of the destination service.  string   destServiceInstanceName The name of the destination service instance.  string   destLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination services, such as service_relation_mtls_cpm = from(ServiceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPServiceRelation This calculates the metrics data from each request between services.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceLayer The layer of the source service.  enum   destServiceName The name of the destination service.  string   destServiceInstanceName The name of the destination service instance.  string   destLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination services, such as service_relation_mtls_cpm = from(ServiceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    SCOPE ServiceInstanceRelation This calculates the metrics data from each request between service instances.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceServiceLayer The layer of the source service.  enum   destServiceName The name of the destination service.     destServiceInstanceName The name of the destination service instance.  string   destServiceLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of the component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination service instances, such as service_instance_relation_mtls_cpm = from(ServiceInstanceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPServiceInstanceRelation This calculates the metrics data from each request between service instances.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceServiceLayer The layer of the source service.  enum   destServiceName The name of the destination service.     destServiceInstanceName The name of the destination service instance.  string   destServiceLayer The layer of the destination service.  enum   componentId The ID of the component used in this call. yes string   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination service instances, such as service_instance_relation_mtls_cpm = from(ServiceInstanceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    SCOPE EndpointRelation This calculates the metrics data of the dependency between endpoints. This relation is hard to detect, and it depends on the tracing library to propagate the previous endpoint. Therefore, the EndpointRelation scope aggregation comes into effect only in services under tracing by SkyWalking native agents, including auto instrument agents (like Java and .NET), OpenCensus SkyWalking exporter implementation, or other tracing context propagation in SkyWalking specification.\n   Name Remarks Group Key Type     endpoint The parent endpoint in the dependency.  string   serviceName The name of the service.  string   serviceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  enum   childEndpoint The endpoint used by the parent endpoint in row(1).  string   childServiceName The endpoint used by the parent service in row(1).  string   childServiceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  string   childServiceInstanceName The endpoint used by the parent service instance in row(1).  string   rpcLatency The latency of the RPC between the parent endpoint and childEndpoint, excluding the latency caused by the parent endpoint itself.     componentId The ID of the component used in this call. yes string   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Indicates where the relation is detected. The value may be client, server, or proxy. yes enum    SCOPE BrowserAppTraffic This calculates the metrics data from each request of the browser application (browser only).\n   Name Remarks Group Key Type     name The browser application name of each request.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppSingleVersionTraffic This calculates the metrics data from each request of a single version in the browser application (browser only).\n   Name Remarks Group Key Type     name The single version name of each request.  string   serviceName The name of the browser application.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppPageTraffic This calculates the metrics data from each request of the page in the browser application (browser only).\n   Name Remarks Group Key Type     name The page name of each request.  string   serviceName The name of the browser application.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppPagePerf This calculates the metrics data from each request of the page in the browser application (browser only).\n   Name Remarks Group Key Type     name The page name of each request.  string   serviceName The name of the browser application.  string   redirectTime The time taken to redirect.  int(in ms)   dnsTime The DNS query time.  int(in ms)   ttfbTime Time to first byte.  int(in ms)   tcpTime TCP connection time.  int(in ms)   transTime Content transfer time.  int(in ms)   domAnalysisTime Dom parsing time.  int(in ms)   fptTime First paint time or blank screen time.  int(in ms)   domReadyTime Dom ready time.  int(in ms)   loadPageTime Page full load time.  int(in ms)   resTime Synchronous load resources in the page.  int(in ms)   sslTime Only valid for HTTPS.  int(in ms)   ttlTime Time to interact.  int(in ms)   firstPackTime First pack time.  int(in ms)   fmpTime First Meaningful Paint.  int(in ms)    SCOPE Event This calculates the metrics data from events.\n   Name Remarks Group Key Type     name The name of the event.  string   service The service name to which the event belongs to.  string   serviceInstance The service instance to which the event belongs to, if any.  string   endpoint The service endpoint to which the event belongs to, if any.  string   type The type of the event, Normal or Error.  string   message The message of the event.  string   parameters The parameters in the message, see parameters.  string    SCOPE DatabaseAccess This calculates the metrics data from each request of database.\n   Name Remarks Group Key Type     name The service name of virtual database service.  string   databaseTypeId The ID of the component used in this call.  int   latency The time taken by each request.  int(in ms)   status Indicates the success or failure of the request.  boolean    SCOPE DatabaseSlowStatement This calculates the metrics data from slow request of database.\n   Name Remarks Group Key Type     databaseServiceId The service id of virtual cache service.  string   statement The sql statement .  string   latency The time taken by each request.  int(in ms)   traceId The traceId of this slow statement  string    SCOPE CacheAccess This calculates the metrics data from each request of cache system.\n   Name Remarks Group Key Type     name The service name of virtual cache service.  string   cacheTypeId The ID of the component used in this call.  int   latency The time taken by each request.  int(in ms)   status Indicates the success or failure of the request.  boolean   operation Indicates this access is used for write or read  string    SCOPE CacheSlowAccess This calculates the metrics data from slow request of cache system , which is used for write or read operation.\n   Name Remarks Group Key Type     cacheServiceId The service id of virtual cache service.  string   command The cache command .  string   key The cache command key.  string   latency The time taken by each request.  int(in ms)   traceId The traceId of this slow access  string   status Indicates the success or failure of the request.  boolean   operation Indicates this access is used for write or read  string    SCOPE MQAccess This calculates the service dimensional metrics data from each request of MQ system on consume/produce side\n   Name Remarks Group Key Type     name The service name , usually it\u0026rsquo;s MQ address(es)  string   transmissionLatency The latency from produce side to consume side .  int(in ms)   status Indicates the success or failure of the request.  boolean   operation Indicates this access is on Produce or Consume side  enum    SCOPE MQEndpointAccess This calculates the endpoint dimensional metrics data from each request of MQ system on consume/produce side\n   Name Remarks Group Key Type     serviceName The service name that this endpoint belongs to.  string   endpoint The endpoint name , usually it\u0026rsquo;s combined by queue,topic  string   transmissionLatency The latency from produce side to consume side .  int(in ms)   status Indicates the success or failure of the request.  boolean   operation Indicates this access is on Produce or Consume side  enum    ","title":"Scopes and Fields","url":"/docs/main/v9.4.0/en/concepts-and-designs/scope-definitions/"},{"content":"Scopes and Fields Using the Aggregation Function, the requests will be grouped by time and Group Key(s) in each scope.\nSCOPE Service This calculates the metrics data from each request of the service.\n   Name Remarks Group Key Type     name The name of the service.  string   layer Layer represents an abstract framework in the computer science, such as operation system(OS_LINUX layer), Kubernetes(k8s layer)  enum   serviceInstanceName The name of the service instance ID.  string   endpointName The name of the endpoint, such as a full path of HTTP URI.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request. Such as: Database, HTTP, RPC, gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPService This calculates the metrics data from each request of the TCP service.\n   Name Remarks Group Key Type     name The name of the service.  string   layer Layer represents an abstract framework in the computer science, such as operation system(OS_LINUX layer), Kubernetes(k8s layer)  enum   serviceInstanceName The name of the service instance ID.  string   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    SCOPE ServiceInstance This calculates the metrics data from each request of the service instance.\n   Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   endpointName The name of the endpoint, such as a full path of the HTTP URI.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPServiceInstance This calculates the metrics data from each request of the service instance.\n   Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    Secondary scopes of ServiceInstance This calculates the metrics data if the service instance is a JVM and collects through javaagent.\n SCOPE ServiceInstanceJVMCPU     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   usePercent The percentage of CPU time spent.  double    SCOPE ServiceInstanceJVMMemory     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   heapStatus Indicates whether the metric has a heap property or not.  bool   init See the JVM documentation.  long   max See the JVM documentation.  long   used See the JVM documentation.  long   committed See the JVM documentation.  long    SCOPE ServiceInstanceJVMMemoryPool     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   poolType The type may be CODE_CACHE_USAGE, NEWGEN_USAGE, OLDGEN_USAGE, SURVIVOR_USAGE, PERMGEN_USAGE, or METASPACE_USAGE based on different versions of JVM.  enum   init See the JVM documentation.  long   max See the JVM documentation.  long   used See the JVM documentation.  long   committed See the JVM documentation.  long    SCOPE ServiceInstanceJVMGC     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   phase Includes both NEW and OLD.  Enum   time The time spent in GC.  long   count The count in GC operations.  long    SCOPE ServiceInstanceJVMThread     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   liveCount The current number of live threads.  long   daemonCount The current number of daemon threads.  long   peakCount The current number of peak threads.  long   runnableStateThreadCount The current number of threads in runnable state.  long   blockedStateThreadCount The current number of threads in blocked state.  long   waitingStateThreadCount The current number of threads in waiting state.  long   timedWaitingStateThreadCount The current number of threads in time-waiting state.  long    SCOPE ServiceInstanceJVMClass     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   loadedClassCount The number of classes that are currently loaded in the JVM.  long   totalUnloadedClassCount The total number of classes unloaded since the JVM has started execution.  long   totalLoadedClassCount The total number of classes that have been loaded since the JVM has started execution.  long    SCOPE Endpoint This calculates the metrics data from each request of the endpoint in the service.\n   Name Remarks Group Key Type     name The name of the endpoint, such as a full path of the HTTP URI.  string   serviceName The name of the service.  string   serviceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  enum   serviceInstanceName The name of the service instance ID.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE ServiceRelation This calculates the metrics data from each request between services.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceLayer The layer of the source service.  enum   destServiceName The name of the destination service.  string   destServiceInstanceName The name of the destination service instance.  string   destLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination services, such as service_relation_mtls_cpm = from(ServiceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPServiceRelation This calculates the metrics data from each request between services.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceLayer The layer of the source service.  enum   destServiceName The name of the destination service.  string   destServiceInstanceName The name of the destination service instance.  string   destLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination services, such as service_relation_mtls_cpm = from(ServiceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    SCOPE ServiceInstanceRelation This calculates the metrics data from each request between service instances.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceServiceLayer The layer of the source service.  enum   destServiceName The name of the destination service.     destServiceInstanceName The name of the destination service instance.  string   destServiceLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of the component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination service instances, such as service_instance_relation_mtls_cpm = from(ServiceInstanceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPServiceInstanceRelation This calculates the metrics data from each request between service instances.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceServiceLayer The layer of the source service.  enum   destServiceName The name of the destination service.     destServiceInstanceName The name of the destination service instance.  string   destServiceLayer The layer of the destination service.  enum   componentId The ID of the component used in this call. yes string   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination service instances, such as service_instance_relation_mtls_cpm = from(ServiceInstanceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    SCOPE EndpointRelation This calculates the metrics data of the dependency between endpoints. This relation is hard to detect, and it depends on the tracing library to propagate the previous endpoint. Therefore, the EndpointRelation scope aggregation comes into effect only in services under tracing by SkyWalking native agents, including auto instrument agents (like Java and .NET), or other tracing context propagation in SkyWalking specification.\n   Name Remarks Group Key Type     endpoint The parent endpoint in the dependency.  string   serviceName The name of the service.  string   serviceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  enum   childEndpoint The endpoint used by the parent endpoint in row(1).  string   childServiceName The endpoint used by the parent service in row(1).  string   childServiceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  string   childServiceInstanceName The endpoint used by the parent service instance in row(1).  string   rpcLatency The latency of the RPC between the parent endpoint and childEndpoint, excluding the latency caused by the parent endpoint itself.     componentId The ID of the component used in this call. yes string   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Indicates where the relation is detected. The value may be client, server, or proxy. yes enum    SCOPE BrowserAppTraffic This calculates the metrics data from each request of the browser application (browser only).\n   Name Remarks Group Key Type     name The browser application name of each request.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppSingleVersionTraffic This calculates the metrics data from each request of a single version in the browser application (browser only).\n   Name Remarks Group Key Type     name The single version name of each request.  string   serviceName The name of the browser application.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppPageTraffic This calculates the metrics data from each request of the page in the browser application (browser only).\n   Name Remarks Group Key Type     name The page name of each request.  string   serviceName The name of the browser application.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppPagePerf This calculates the metrics data from each request of the page in the browser application (browser only).\n   Name Remarks Group Key Type     name The page name of each request.  string   serviceName The name of the browser application.  string   redirectTime The time taken to redirect.  int(in ms)   dnsTime The DNS query time.  int(in ms)   ttfbTime Time to first byte.  int(in ms)   tcpTime TCP connection time.  int(in ms)   transTime Content transfer time.  int(in ms)   domAnalysisTime Dom parsing time.  int(in ms)   fptTime First paint time or blank screen time.  int(in ms)   domReadyTime Dom ready time.  int(in ms)   loadPageTime Page full load time.  int(in ms)   resTime Synchronous load resources in the page.  int(in ms)   sslTime Only valid for HTTPS.  int(in ms)   ttlTime Time to interact.  int(in ms)   firstPackTime First pack time.  int(in ms)   fmpTime First Meaningful Paint.  int(in ms)    SCOPE Event This calculates the metrics data from events.\n   Name Remarks Group Key Type     name The name of the event.  string   service The service name to which the event belongs to.  string   serviceInstance The service instance to which the event belongs to, if any.  string   endpoint The service endpoint to which the event belongs to, if any.  string   type The type of the event, Normal or Error.  string   message The message of the event.  string   parameters The parameters in the message, see parameters.  string    SCOPE DatabaseAccess This calculates the metrics data from each request of database.\n   Name Remarks Group Key Type     name The service name of virtual database service.  string   databaseTypeId The ID of the component used in this call.  int   latency The time taken by each request.  int(in ms)   status Indicates the success or failure of the request.  boolean    SCOPE DatabaseSlowStatement This calculates the metrics data from slow request of database.\n   Name Remarks Group Key Type     databaseServiceId The service id of virtual cache service.  string   statement The sql statement .  string   latency The time taken by each request.  int(in ms)   traceId The traceId of this slow statement  string    SCOPE CacheAccess This calculates the metrics data from each request of cache system.\n   Name Remarks Group Key Type     name The service name of virtual cache service.  string   cacheTypeId The ID of the component used in this call.  int   latency The time taken by each request.  int(in ms)   status Indicates the success or failure of the request.  boolean   operation Indicates this access is used for write or read  string    SCOPE CacheSlowAccess This calculates the metrics data from slow request of cache system , which is used for write or read operation.\n   Name Remarks Group Key Type     cacheServiceId The service id of virtual cache service.  string   command The cache command .  string   key The cache command key.  string   latency The time taken by each request.  int(in ms)   traceId The traceId of this slow access  string   status Indicates the success or failure of the request.  boolean   operation Indicates this access is used for write or read  string    SCOPE MQAccess This calculates the service dimensional metrics data from each request of MQ system on consume/produce side\n   Name Remarks Group Key Type     name The service name , usually it\u0026rsquo;s MQ address(es)  string   transmissionLatency The latency from produce side to consume side .  int(in ms)   status Indicates the success or failure of the request.  boolean   operation Indicates this access is on Produce or Consume side  enum    SCOPE MQEndpointAccess This calculates the endpoint dimensional metrics data from each request of MQ system on consume/produce side\n   Name Remarks Group Key Type     serviceName The service name that this endpoint belongs to.  string   endpoint The endpoint name , usually it\u0026rsquo;s combined by queue,topic  string   transmissionLatency The latency from produce side to consume side .  int(in ms)   status Indicates the success or failure of the request.  boolean   operation Indicates this access is on Produce or Consume side  enum    ","title":"Scopes and Fields","url":"/docs/main/v9.5.0/en/concepts-and-designs/scope-definitions/"},{"content":"Scopes and Fields Using the Aggregation Function, the requests will be grouped by time and Group Key(s) in each scope.\nSCOPE Service This calculates the metrics data from each request of the service.\n   Name Remarks Group Key Type     name The name of the service.  string   layer Layer represents an abstract framework in the computer science, such as operation system(OS_LINUX layer), Kubernetes(k8s layer)  enum   serviceInstanceName The name of the service instance ID.  string   endpointName The name of the endpoint, such as a full path of HTTP URI.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request. Such as: Database, HTTP, RPC, gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPService This calculates the metrics data from each request of the TCP service.\n   Name Remarks Group Key Type     name The name of the service.  string   layer Layer represents an abstract framework in the computer science, such as operation system(OS_LINUX layer), Kubernetes(k8s layer)  enum   serviceInstanceName The name of the service instance ID.  string   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    SCOPE ServiceInstance This calculates the metrics data from each request of the service instance.\n   Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   endpointName The name of the endpoint, such as a full path of the HTTP URI.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPServiceInstance This calculates the metrics data from each request of the service instance.\n   Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    Secondary scopes of ServiceInstance This calculates the metrics data if the service instance is a JVM and collects through javaagent.\n SCOPE ServiceInstanceJVMCPU     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   usePercent The percentage of CPU time spent.  double    SCOPE ServiceInstanceJVMMemory     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   heapStatus Indicates whether the metric has a heap property or not.  bool   init See the JVM documentation.  long   max See the JVM documentation.  long   used See the JVM documentation.  long   committed See the JVM documentation.  long    SCOPE ServiceInstanceJVMMemoryPool     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   poolType The type may be CODE_CACHE_USAGE, NEWGEN_USAGE, OLDGEN_USAGE, SURVIVOR_USAGE, PERMGEN_USAGE, or METASPACE_USAGE based on different versions of JVM.  enum   init See the JVM documentation.  long   max See the JVM documentation.  long   used See the JVM documentation.  long   committed See the JVM documentation.  long    SCOPE ServiceInstanceJVMGC     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   phase Includes both NEW and OLD.  Enum   time The time spent in GC.  long   count The count in GC operations.  long    SCOPE ServiceInstanceJVMThread     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   liveCount The current number of live threads.  long   daemonCount The current number of daemon threads.  long   peakCount The current number of peak threads.  long   runnableStateThreadCount The current number of threads in runnable state.  long   blockedStateThreadCount The current number of threads in blocked state.  long   waitingStateThreadCount The current number of threads in waiting state.  long   timedWaitingStateThreadCount The current number of threads in time-waiting state.  long    SCOPE ServiceInstanceJVMClass     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   loadedClassCount The number of classes that are currently loaded in the JVM.  long   totalUnloadedClassCount The total number of classes unloaded since the JVM has started execution.  long   totalLoadedClassCount The total number of classes that have been loaded since the JVM has started execution.  long    SCOPE Endpoint This calculates the metrics data from each request of the endpoint in the service.\n   Name Remarks Group Key Type     name The name of the endpoint, such as a full path of the HTTP URI.  string   serviceName The name of the service.  string   serviceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  enum   serviceInstanceName The name of the service instance ID.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE ServiceRelation This calculates the metrics data from each request between services.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceLayer The layer of the source service.  enum   destServiceName The name of the destination service.  string   destServiceInstanceName The name of the destination service instance.  string   destLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination services, such as service_relation_mtls_cpm = from(ServiceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPServiceRelation This calculates the metrics data from each request between services.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceLayer The layer of the source service.  enum   destServiceName The name of the destination service.  string   destServiceInstanceName The name of the destination service instance.  string   destLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination services, such as service_relation_mtls_cpm = from(ServiceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    SCOPE ServiceInstanceRelation This calculates the metrics data from each request between service instances.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceServiceLayer The layer of the source service.  enum   destServiceName The name of the destination service.     destServiceInstanceName The name of the destination service instance.  string   destServiceLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of the component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination service instances, such as service_instance_relation_mtls_cpm = from(ServiceInstanceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPServiceInstanceRelation This calculates the metrics data from each request between service instances.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceServiceLayer The layer of the source service.  enum   destServiceName The name of the destination service.     destServiceInstanceName The name of the destination service instance.  string   destServiceLayer The layer of the destination service.  enum   componentId The ID of the component used in this call. yes string   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination service instances, such as service_instance_relation_mtls_cpm = from(ServiceInstanceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    SCOPE EndpointRelation This calculates the metrics data of the dependency between endpoints. This relation is hard to detect, and it depends on the tracing library to propagate the previous endpoint. Therefore, the EndpointRelation scope aggregation comes into effect only in services under tracing by SkyWalking native agents, including auto instrument agents (like Java and .NET), or other tracing context propagation in SkyWalking specification.\n   Name Remarks Group Key Type     endpoint The parent endpoint in the dependency.  string   serviceName The name of the service.  string   serviceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  enum   childEndpoint The endpoint used by the parent endpoint in row(1).  string   childServiceName The endpoint used by the parent service in row(1).  string   childServiceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  string   childServiceInstanceName The endpoint used by the parent service instance in row(1).  string   rpcLatency The latency of the RPC between the parent endpoint and childEndpoint, excluding the latency caused by the parent endpoint itself.     componentId The ID of the component used in this call. yes string   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Indicates where the relation is detected. The value may be client, server, or proxy. yes enum    SCOPE BrowserAppTraffic This calculates the metrics data from each request of the browser application (browser only).\n   Name Remarks Group Key Type     name The browser application name of each request.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppSingleVersionTraffic This calculates the metrics data from each request of a single version in the browser application (browser only).\n   Name Remarks Group Key Type     name The single version name of each request.  string   serviceName The name of the browser application.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppPageTraffic This calculates the metrics data from each request of the page in the browser application (browser only).\n   Name Remarks Group Key Type     name The page name of each request.  string   serviceName The name of the browser application.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppPagePerf This calculates the metrics data from each request of the page in the browser application (browser only).\n   Name Remarks Group Key Type     name The page name of each request.  string   serviceName The name of the browser application.  string   redirectTime The time taken to redirect.  int(in ms)   dnsTime The DNS query time.  int(in ms)   ttfbTime Time to first byte.  int(in ms)   tcpTime TCP connection time.  int(in ms)   transTime Content transfer time.  int(in ms)   domAnalysisTime Dom parsing time.  int(in ms)   fptTime First paint time or blank screen time.  int(in ms)   domReadyTime Dom ready time.  int(in ms)   loadPageTime Page full load time.  int(in ms)   resTime Synchronous load resources in the page.  int(in ms)   sslTime Only valid for HTTPS.  int(in ms)   ttlTime Time to interact.  int(in ms)   firstPackTime First pack time.  int(in ms)   fmpTime First Meaningful Paint.  int(in ms)    SCOPE Event This calculates the metrics data from events.\n   Name Remarks Group Key Type     name The name of the event.  string   service The service name to which the event belongs to.  string   serviceInstance The service instance to which the event belongs to, if any.  string   endpoint The service endpoint to which the event belongs to, if any.  string   type The type of the event, Normal or Error.  string   message The message of the event.  string   parameters The parameters in the message, see parameters.  string    SCOPE DatabaseAccess This calculates the metrics data from each request of database.\n   Name Remarks Group Key Type     name The service name of virtual database service.  string   databaseTypeId The ID of the component used in this call.  int   latency The time taken by each request.  int(in ms)   status Indicates the success or failure of the request.  boolean    SCOPE DatabaseSlowStatement This calculates the metrics data from slow request of database.\n   Name Remarks Group Key Type     databaseServiceId The service id of virtual cache service.  string   statement The sql statement .  string   latency The time taken by each request.  int(in ms)   traceId The traceId of this slow statement  string    SCOPE CacheAccess This calculates the metrics data from each request of cache system.\n   Name Remarks Group Key Type     name The service name of virtual cache service.  string   cacheTypeId The ID of the component used in this call.  int   latency The time taken by each request.  int(in ms)   status Indicates the success or failure of the request.  boolean   operation Indicates this access is used for write or read  string    SCOPE CacheSlowAccess This calculates the metrics data from slow request of cache system , which is used for write or read operation.\n   Name Remarks Group Key Type     cacheServiceId The service id of virtual cache service.  string   command The cache command .  string   key The cache command key.  string   latency The time taken by each request.  int(in ms)   traceId The traceId of this slow access  string   status Indicates the success or failure of the request.  boolean   operation Indicates this access is used for write or read  string    SCOPE MQAccess This calculates the service dimensional metrics data from each request of MQ system on consume/produce side\n   Name Remarks Group Key Type     name The service name , usually it\u0026rsquo;s MQ address(es)  string   transmissionLatency The latency from produce side to consume side .  int(in ms)   status Indicates the success or failure of the request.  boolean   operation Indicates this access is on Produce or Consume side  enum    SCOPE MQEndpointAccess This calculates the endpoint dimensional metrics data from each request of MQ system on consume/produce side\n   Name Remarks Group Key Type     serviceName The service name that this endpoint belongs to.  string   endpoint The endpoint name , usually it\u0026rsquo;s combined by queue,topic  string   transmissionLatency The latency from produce side to consume side .  int(in ms)   status Indicates the success or failure of the request.  boolean   operation Indicates this access is on Produce or Consume side  enum    ","title":"Scopes and Fields","url":"/docs/main/v9.6.0/en/concepts-and-designs/scope-definitions/"},{"content":"Scopes and Fields Using the Aggregation Function, the requests will be grouped by time and Group Key(s) in each scope.\nSCOPE Service This calculates the metrics data from each request of the service.\n   Name Remarks Group Key Type     name The name of the service.  string   layer Layer represents an abstract framework in the computer science, such as operation system(OS_LINUX layer), Kubernetes(k8s layer)  enum   serviceInstanceName The name of the service instance ID.  string   endpointName The name of the endpoint, such as a full path of HTTP URI.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request. Such as: Database, HTTP, RPC, gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPService This calculates the metrics data from each request of the TCP service.\n   Name Remarks Group Key Type     name The name of the service.  string   layer Layer represents an abstract framework in the computer science, such as operation system(OS_LINUX layer), Kubernetes(k8s layer)  enum   serviceInstanceName The name of the service instance ID.  string   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    SCOPE ServiceInstance This calculates the metrics data from each request of the service instance.\n   Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   endpointName The name of the endpoint, such as a full path of the HTTP URI.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPServiceInstance This calculates the metrics data from each request of the service instance.\n   Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    Secondary scopes of ServiceInstance This calculates the metrics data if the service instance is a JVM and collects through javaagent.\n SCOPE ServiceInstanceJVMCPU     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   usePercent The percentage of CPU time spent.  double    SCOPE ServiceInstanceJVMMemory     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   heapStatus Indicates whether the metric has a heap property or not.  bool   init See the JVM documentation.  long   max See the JVM documentation.  long   used See the JVM documentation.  long   committed See the JVM documentation.  long    SCOPE ServiceInstanceJVMMemoryPool     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   poolType The type may be CODE_CACHE_USAGE, NEWGEN_USAGE, OLDGEN_USAGE, SURVIVOR_USAGE, PERMGEN_USAGE, or METASPACE_USAGE based on different versions of JVM.  enum   init See the JVM documentation.  long   max See the JVM documentation.  long   used See the JVM documentation.  long   committed See the JVM documentation.  long    SCOPE ServiceInstanceJVMGC     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   phase Includes both NEW and OLD.  Enum   time The time spent in GC.  long   count The count in GC operations.  long    SCOPE ServiceInstanceJVMThread     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   liveCount The current number of live threads.  long   daemonCount The current number of daemon threads.  long   peakCount The current number of peak threads.  long   runnableStateThreadCount The current number of threads in runnable state.  long   blockedStateThreadCount The current number of threads in blocked state.  long   waitingStateThreadCount The current number of threads in waiting state.  long   timedWaitingStateThreadCount The current number of threads in time-waiting state.  long    SCOPE ServiceInstanceJVMClass     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   loadedClassCount The number of classes that are currently loaded in the JVM.  long   totalUnloadedClassCount The total number of classes unloaded since the JVM has started execution.  long   totalLoadedClassCount The total number of classes that have been loaded since the JVM has started execution.  long    SCOPE Endpoint This calculates the metrics data from each request of the endpoint in the service.\n   Name Remarks Group Key Type     name The name of the endpoint, such as a full path of the HTTP URI.  string   serviceName The name of the service.  string   serviceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  enum   serviceInstanceName The name of the service instance ID.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE ServiceRelation This calculates the metrics data from each request between services.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceLayer The layer of the source service.  enum   destServiceName The name of the destination service.  string   destServiceInstanceName The name of the destination service instance.  string   destLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination services, such as service_relation_mtls_cpm = from(ServiceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPServiceRelation This calculates the metrics data from each request between services.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceLayer The layer of the source service.  enum   destServiceName The name of the destination service.  string   destServiceInstanceName The name of the destination service instance.  string   destLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination services, such as service_relation_mtls_cpm = from(ServiceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    SCOPE ServiceInstanceRelation This calculates the metrics data from each request between service instances.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceServiceLayer The layer of the source service.  enum   destServiceName The name of the destination service.     destServiceInstanceName The name of the destination service instance.  string   destServiceLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of the component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination service instances, such as service_instance_relation_mtls_cpm = from(ServiceInstanceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPServiceInstanceRelation This calculates the metrics data from each request between service instances.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceServiceLayer The layer of the source service.  enum   destServiceName The name of the destination service.     destServiceInstanceName The name of the destination service instance.  string   destServiceLayer The layer of the destination service.  enum   componentId The ID of the component used in this call. yes string   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination service instances, such as service_instance_relation_mtls_cpm = from(ServiceInstanceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    SCOPE EndpointRelation This calculates the metrics data of the dependency between endpoints. This relation is hard to detect, and it depends on the tracing library to propagate the previous endpoint. Therefore, the EndpointRelation scope aggregation comes into effect only in services under tracing by SkyWalking native agents, including auto instrument agents (like Java and .NET), or other tracing context propagation in SkyWalking specification.\n   Name Remarks Group Key Type     endpoint The parent endpoint in the dependency.  string   serviceName The name of the service.  string   serviceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  enum   childEndpoint The endpoint used by the parent endpoint in row(1).  string   childServiceName The endpoint used by the parent service in row(1).  string   childServiceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  string   childServiceInstanceName The endpoint used by the parent service instance in row(1).  string   rpcLatency The latency of the RPC between the parent endpoint and childEndpoint, excluding the latency caused by the parent endpoint itself.     componentId The ID of the component used in this call. yes string   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Indicates where the relation is detected. The value may be client, server, or proxy. yes enum    SCOPE BrowserAppTraffic This calculates the metrics data from each request of the browser application (browser only).\n   Name Remarks Group Key Type     name The browser application name of each request.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppSingleVersionTraffic This calculates the metrics data from each request of a single version in the browser application (browser only).\n   Name Remarks Group Key Type     name The single version name of each request.  string   serviceName The name of the browser application.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppPageTraffic This calculates the metrics data from each request of the page in the browser application (browser only).\n   Name Remarks Group Key Type     name The page name of each request.  string   serviceName The name of the browser application.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppPagePerf This calculates the metrics data from each request of the page in the browser application (browser only).\n   Name Remarks Group Key Type     name The page name of each request.  string   serviceName The name of the browser application.  string   redirectTime The time taken to redirect.  int(in ms)   dnsTime The DNS query time.  int(in ms)   ttfbTime Time to first byte.  int(in ms)   tcpTime TCP connection time.  int(in ms)   transTime Content transfer time.  int(in ms)   domAnalysisTime Dom parsing time.  int(in ms)   fptTime First paint time or blank screen time.  int(in ms)   domReadyTime Dom ready time.  int(in ms)   loadPageTime Page full load time.  int(in ms)   resTime Synchronous load resources in the page.  int(in ms)   sslTime Only valid for HTTPS.  int(in ms)   ttlTime Time to interact.  int(in ms)   firstPackTime First pack time.  int(in ms)   fmpTime First Meaningful Paint.  int(in ms)    SCOPE Event This calculates the metrics data from events.\n   Name Remarks Group Key Type     name The name of the event.  string   service The service name to which the event belongs to.  string   serviceInstance The service instance to which the event belongs to, if any.  string   endpoint The service endpoint to which the event belongs to, if any.  string   type The type of the event, Normal or Error.  string   message The message of the event.  string   parameters The parameters in the message, see parameters.  string    SCOPE DatabaseAccess This calculates the metrics data from each request of database.\n   Name Remarks Group Key Type     name The service name of virtual database service.  string   databaseTypeId The ID of the component used in this call.  int   latency The time taken by each request.  int(in ms)   status Indicates the success or failure of the request.  boolean    SCOPE DatabaseSlowStatement This calculates the metrics data from slow request of database.\n   Name Remarks Group Key Type     databaseServiceId The service id of virtual cache service.  string   statement The sql statement .  string   latency The time taken by each request.  int(in ms)   traceId The traceId of this slow statement  string    SCOPE CacheAccess This calculates the metrics data from each request of cache system.\n   Name Remarks Group Key Type     name The service name of virtual cache service.  string   cacheTypeId The ID of the component used in this call.  int   latency The time taken by each request.  int(in ms)   status Indicates the success or failure of the request.  boolean   operation Indicates this access is used for write or read  string    SCOPE CacheSlowAccess This calculates the metrics data from slow request of cache system , which is used for write or read operation.\n   Name Remarks Group Key Type     cacheServiceId The service id of virtual cache service.  string   command The cache command .  string   key The cache command key.  string   latency The time taken by each request.  int(in ms)   traceId The traceId of this slow access  string   status Indicates the success or failure of the request.  boolean   operation Indicates this access is used for write or read  string    SCOPE MQAccess This calculates the service dimensional metrics data from each request of MQ system on consume/produce side\n   Name Remarks Group Key Type     name The service name , usually it\u0026rsquo;s MQ address(es)  string   transmissionLatency The latency from produce side to consume side .  int(in ms)   status Indicates the success or failure of the request.  boolean   operation Indicates this access is on Produce or Consume side  enum    SCOPE MQEndpointAccess This calculates the endpoint dimensional metrics data from each request of MQ system on consume/produce side\n   Name Remarks Group Key Type     serviceName The service name that this endpoint belongs to.  string   endpoint The endpoint name , usually it\u0026rsquo;s combined by queue,topic  string   transmissionLatency The latency from produce side to consume side .  int(in ms)   status Indicates the success or failure of the request.  boolean   operation Indicates this access is on Produce or Consume side  enum    ","title":"Scopes and Fields","url":"/docs/main/v9.7.0/en/concepts-and-designs/scope-definitions/"},{"content":"Scratch The OAP Config Dump SkyWalking OAP behaviors could be controlled through hundreds of configurations. It is hard to know what is the final configuration as all the configurations could be overrided by system environments.\nThe core config file application.yml lists all the configurations and their default values. However, it is still hard to know the runtime value.\nScratch is a tool to dump the final configuration. It is provided within OAP rest server, which could be accessed through HTTP GET http://{core restHost}:{core restPort}/debugging/config/dump.\n\u0026gt; curl http://127.0.0.1:12800/debugging/config/dump cluster.provider=standalone core.provider=default core.default.prepareThreads=2 core.default.restHost=0.0.0.0 core.default.searchableLogsTags=level,http.status_code core.default.role=Mixed core.default.persistentPeriod=25 core.default.syncPeriodHttpUriRecognitionPattern=10 core.default.restIdleTimeOut=30000 core.default.dataKeeperExecutePeriod=5 core.default.topNReportPeriod=10 core.default.gRPCSslTrustedCAPath= core.default.downsampling=[Hour, Day] core.default.serviceNameMaxLength=70 core.default.gRPCSslEnabled=false core.default.restPort=12800 core.default.serviceCacheRefreshInterval=10 ... All booting configurations with their runtime values are listed, including the selected provider for each module.\nProtect The Secrets Some of the configurations contain sensitive values, such as username, password, token, etc. These values would be masked in the dump result. For example, the storage.elasticsearch.password in the following configurations,\nstorage:selector:${SW_STORAGE:h2}elasticsearch:password:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}It would be masked and shown as ******** in the dump result.\n\u0026gt; curl http://127.0.0.1:12800/debugging/config/dump ... storage.elasticsearch.password=******** ... By default, we mask the config keys through the following configurations.\n# Include the list of keywords to filter configurations including secrets. Separate keywords by a comma.keywords4MaskingSecretsOfConfig:${SW_DEBUGGING_QUERY_KEYWORDS_FOR_MASKING_SECRETS:user,password,token,accessKey,secretKey,authentication}Disable The Config Dump Service By default, this service is open for helping users to debug and diagnose. If you want to disable it, you need to diable the whole debugging-query module through setting selector=-.\ndebugging-query:selector:${SW_DEBUGGING_QUERY:-}","title":"Scratch The OAP Config Dump","url":"/docs/main/latest/en/debugging/config_dump/"},{"content":"Scratch The OAP Config Dump SkyWalking OAP behaviors could be controlled through hundreds of configurations. It is hard to know what is the final configuration as all the configurations could be overrided by system environments.\nThe core config file application.yml lists all the configurations and their default values. However, it is still hard to know the runtime value.\nScratch is a tool to dump the final configuration. It is provided within OAP rest server, which could be accessed through HTTP GET http://{core restHost}:{core restPort}/debugging/config/dump.\n\u0026gt; curl http://127.0.0.1:12800/debugging/config/dump cluster.provider=standalone core.provider=default core.default.prepareThreads=2 core.default.restHost=0.0.0.0 core.default.searchableLogsTags=level,http.status_code core.default.role=Mixed core.default.persistentPeriod=25 core.default.syncPeriodHttpUriRecognitionPattern=10 core.default.restIdleTimeOut=30000 core.default.dataKeeperExecutePeriod=5 core.default.topNReportPeriod=10 core.default.gRPCSslTrustedCAPath= core.default.downsampling=[Hour, Day] core.default.serviceNameMaxLength=70 core.default.gRPCSslEnabled=false core.default.restPort=12800 core.default.serviceCacheRefreshInterval=10 ... All booting configurations with their runtime values are listed, including the selected provider for each module.\nProtect The Secrets Some of the configurations contain sensitive values, such as username, password, token, etc. These values would be masked in the dump result. For example, the storage.elasticsearch.password in the following configurations,\nstorage:selector:${SW_STORAGE:h2}elasticsearch:password:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}It would be masked and shown as ******** in the dump result.\n\u0026gt; curl http://127.0.0.1:12800/debugging/config/dump ... storage.elasticsearch.password=******** ... By default, we mask the config keys through the following configurations.\n# Include the list of keywords to filter configurations including secrets. Separate keywords by a comma.keywords4MaskingSecretsOfConfig:${SW_DEBUGGING_QUERY_KEYWORDS_FOR_MASKING_SECRETS:user,password,token,accessKey,secretKey,authentication}Disable The Config Dump Service By default, this service is open for helping users to debug and diagnose. If you want to disable it, you need to diable the whole debugging-query module through setting selector=-.\ndebugging-query:selector:${SW_DEBUGGING_QUERY:-}","title":"Scratch The OAP Config Dump","url":"/docs/main/next/en/debugging/config_dump/"},{"content":"Scratch The OAP Config Dump SkyWalking OAP behaviors could be controlled through hundreds of configurations. It is hard to know what is the final configuration as all the configurations could be overrided by system environments.\nThe core config file application.yml lists all the configurations and their default values. However, it is still hard to know the runtime value.\nScratch is a tool to dump the final configuration. It is provided within OAP rest server, which could be accessed through HTTP GET http://{core restHost}:{core restPort}/debugging/config/dump.\n\u0026gt; curl http://127.0.0.1:12800/debugging/config/dump cluster.provider=standalone core.provider=default core.default.prepareThreads=2 core.default.restHost=0.0.0.0 core.default.searchableLogsTags=level,http.status_code core.default.role=Mixed core.default.persistentPeriod=25 core.default.syncPeriodHttpUriRecognitionPattern=10 core.default.restIdleTimeOut=30000 core.default.dataKeeperExecutePeriod=5 core.default.topNReportPeriod=10 core.default.gRPCSslTrustedCAPath= core.default.downsampling=[Hour, Day] core.default.serviceNameMaxLength=70 core.default.gRPCSslEnabled=false core.default.restPort=12800 core.default.serviceCacheRefreshInterval=10 ... All booting configurations with their runtime values are listed, including the selected provider for each module.\nProtect The Secrets Some of the configurations contain sensitive values, such as username, password, token, etc. These values would be masked in the dump result. For example, the storage.elasticsearch.password in the following configurations,\nstorage:selector:${SW_STORAGE:h2}elasticsearch:password:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}It would be masked and shown as ******** in the dump result.\n\u0026gt; curl http://127.0.0.1:12800/debugging/config/dump ... storage.elasticsearch.password=******** ... By default, we mask the config keys through the following configurations.\n# Include the list of keywords to filter configurations including secrets. Separate keywords by a comma.keywords4MaskingSecretsOfConfig:${SW_DEBUGGING_QUERY_KEYWORDS_FOR_MASKING_SECRETS:user,password,token,accessKey,secretKey,authentication}Disable The Config Dump Service By default, this service is open for helping users to debug and diagnose. If you want to disable it, you need to diable the whole debugging-query module through setting selector=-.\ndebugging-query:selector:${SW_DEBUGGING_QUERY:-}","title":"Scratch The OAP Config Dump","url":"/docs/main/v9.7.0/en/debugging/config_dump/"},{"content":"","title":"Search Results","url":"/search/"},{"content":"Security Notice The SkyWalking OAP server, UI, and agent deployments should run in a secure environment, such as only inside your data center. OAP server, UI, and agent deployments should only be reachable by the operation team on default deployment.\nAll telemetry data are trusted. The OAP server would not validate any field of the telemetry data to avoid extra load for the server.\nIt is up to the operator(OPS team) whether to expose the OAP server, UI, or some agent deployment to unsecured environment. The following security policies should be considered to add to secure your SkyWalking deployment.\n HTTPs and gRPC+TLS should be used between agents and OAP servers, as well as UI. Set up TOKEN or username/password based authentications for the OAP server and UI through your Gateway. Validate all fields of the traceable RPC(including HTTP 1/2, MQ) headers(header names are sw8, sw8-x and sw8-correlation) when requests are from out of the trusted zone. Or simply block/remove those headers unless you are using the client-js agent. All fields of telemetry data(HTTP in raw text or encoded Protobuf format) should be validated and reject malicious data.  Without these protections, an attacker could embed executable Javascript code in those fields, causing XSS or even Remote Code Execution (RCE) issues.\nFor some sensitive environment, consider to limit the telemetry report frequency in case of DoS/DDoS for exposed OAP and UI services.\nappendix The SkyWalking client-js agent is always running out of the secured environment. Please follow its security notice for more details.\n","title":"Security Notice","url":"/docs/main/latest/en/security/readme/"},{"content":"Security Notice The SkyWalking OAP server, UI, and agent deployments should run in a secure environment, such as only inside your data center. OAP server, UI, and agent deployments should only be reachable by the operation team on default deployment.\nAll telemetry data are trusted. The OAP server would not validate any field of the telemetry data to avoid extra load for the server.\nIt is up to the operator(OPS team) whether to expose the OAP server, UI, or some agent deployment to unsecured environment. The following security policies should be considered to add to secure your SkyWalking deployment.\n HTTPs and gRPC+TLS should be used between agents and OAP servers, as well as UI. Set up TOKEN or username/password based authentications for the OAP server and UI through your Gateway. Validate all fields of the traceable RPC(including HTTP 1/2, MQ) headers(header names are sw8, sw8-x and sw8-correlation) when requests are from out of the trusted zone. Or simply block/remove those headers unless you are using the client-js agent. All fields of telemetry data(HTTP in raw text or encoded Protobuf format) should be validated and reject malicious data.  Without these protections, an attacker could embed executable Javascript code in those fields, causing XSS or even Remote Code Execution (RCE) issues.\nFor some sensitive environment, consider to limit the telemetry report frequency in case of DoS/DDoS for exposed OAP and UI services.\nappendix The SkyWalking client-js agent is always running out of the secured environment. Please follow its security notice for more details.\n","title":"Security Notice","url":"/docs/main/next/en/security/readme/"},{"content":"Security Notice The SkyWalking OAP server, UI, and agent deployments should run in a secure environment, such as only inside your data center. OAP server, UI, and agent deployments should only be reachable by the operation team on default deployment.\nAll telemetry data are trusted. The OAP server would not validate any field of the telemetry data to avoid extra load for the server.\nIt is up to the operator(OPS team) whether to expose the OAP server, UI, or some agent deployment to unsecured environment. The following security policies should be considered to add to secure your SkyWalking deployment.\n HTTPs and gRPC+TLS should be used between agents and OAP servers, as well as UI. Set up TOKEN or username/password based authentications for the OAP server and UI through your Gateway. Validate all fields of the traceable RPC(including HTTP 1/2, MQ) headers(header names are sw8, sw8-x and sw8-correlation) when requests are from out of the trusted zone. Or simply block/remove those headers unless you are using the client-js agent. All fields of telemetry data(HTTP in raw text or encoded Protobuf format) should be validated and reject malicious data.  Without these protections, an attacker could embed executable Javascript code in those fields, causing XSS or even Remote Code Execution (RCE) issues.\nFor some sensitive environment, consider to limit the telemetry report frequency in case of DoS/DDoS for exposed OAP and UI services.\nappendix The SkyWalking client-js agent is always running out of the secured environment. Please follow its security notice for more details.\n","title":"Security Notice","url":"/docs/main/v9.3.0/en/security/readme/"},{"content":"Security Notice The SkyWalking OAP server, UI, and agent deployments should run in a secure environment, such as only inside your data center. OAP server, UI, and agent deployments should only be reachable by the operation team on default deployment.\nAll telemetry data are trusted. The OAP server would not validate any field of the telemetry data to avoid extra load for the server.\nIt is up to the operator(OPS team) whether to expose the OAP server, UI, or some agent deployment to unsecured environment. The following security policies should be considered to add to secure your SkyWalking deployment.\n HTTPs and gRPC+TLS should be used between agents and OAP servers, as well as UI. Set up TOKEN or username/password based authentications for the OAP server and UI through your Gateway. Validate all fields of the traceable RPC(including HTTP 1/2, MQ) headers(header names are sw8, sw8-x and sw8-correlation) when requests are from out of the trusted zone. Or simply block/remove those headers unless you are using the client-js agent. All fields of telemetry data(HTTP in raw text or encoded Protobuf format) should be validated and reject malicious data.  Without these protections, an attacker could embed executable Javascript code in those fields, causing XSS or even Remote Code Execution (RCE) issues.\nFor some sensitive environment, consider to limit the telemetry report frequency in case of DoS/DDoS for exposed OAP and UI services.\nappendix The SkyWalking client-js agent is always running out of the secured environment. Please follow its security notice for more details.\n","title":"Security Notice","url":"/docs/main/v9.4.0/en/security/readme/"},{"content":"Security Notice The SkyWalking OAP server, UI, and agent deployments should run in a secure environment, such as only inside your data center. OAP server, UI, and agent deployments should only be reachable by the operation team on default deployment.\nAll telemetry data are trusted. The OAP server would not validate any field of the telemetry data to avoid extra load for the server.\nIt is up to the operator(OPS team) whether to expose the OAP server, UI, or some agent deployment to unsecured environment. The following security policies should be considered to add to secure your SkyWalking deployment.\n HTTPs and gRPC+TLS should be used between agents and OAP servers, as well as UI. Set up TOKEN or username/password based authentications for the OAP server and UI through your Gateway. Validate all fields of the traceable RPC(including HTTP 1/2, MQ) headers(header names are sw8, sw8-x and sw8-correlation) when requests are from out of the trusted zone. Or simply block/remove those headers unless you are using the client-js agent. All fields of telemetry data(HTTP in raw text or encoded Protobuf format) should be validated and reject malicious data.  Without these protections, an attacker could embed executable Javascript code in those fields, causing XSS or even Remote Code Execution (RCE) issues.\nFor some sensitive environment, consider to limit the telemetry report frequency in case of DoS/DDoS for exposed OAP and UI services.\nappendix The SkyWalking client-js agent is always running out of the secured environment. Please follow its security notice for more details.\n","title":"Security Notice","url":"/docs/main/v9.5.0/en/security/readme/"},{"content":"Security Notice The SkyWalking OAP server, UI, and agent deployments should run in a secure environment, such as only inside your data center. OAP server, UI, and agent deployments should only be reachable by the operation team on default deployment.\nAll telemetry data are trusted. The OAP server would not validate any field of the telemetry data to avoid extra load for the server.\nIt is up to the operator(OPS team) whether to expose the OAP server, UI, or some agent deployment to unsecured environment. The following security policies should be considered to add to secure your SkyWalking deployment.\n HTTPs and gRPC+TLS should be used between agents and OAP servers, as well as UI. Set up TOKEN or username/password based authentications for the OAP server and UI through your Gateway. Validate all fields of the traceable RPC(including HTTP 1/2, MQ) headers(header names are sw8, sw8-x and sw8-correlation) when requests are from out of the trusted zone. Or simply block/remove those headers unless you are using the client-js agent. All fields of telemetry data(HTTP in raw text or encoded Protobuf format) should be validated and reject malicious data.  Without these protections, an attacker could embed executable Javascript code in those fields, causing XSS or even Remote Code Execution (RCE) issues.\nFor some sensitive environment, consider to limit the telemetry report frequency in case of DoS/DDoS for exposed OAP and UI services.\nappendix The SkyWalking client-js agent is always running out of the secured environment. Please follow its security notice for more details.\n","title":"Security Notice","url":"/docs/main/v9.6.0/en/security/readme/"},{"content":"Security Notice The SkyWalking OAP server, UI, and agent deployments should run in a secure environment, such as only inside your data center. OAP server, UI, and agent deployments should only be reachable by the operation team on default deployment.\nAll telemetry data are trusted. The OAP server would not validate any field of the telemetry data to avoid extra load for the server.\nIt is up to the operator(OPS team) whether to expose the OAP server, UI, or some agent deployment to unsecured environment. The following security policies should be considered to add to secure your SkyWalking deployment.\n HTTPs and gRPC+TLS should be used between agents and OAP servers, as well as UI. Set up TOKEN or username/password based authentications for the OAP server and UI through your Gateway. Validate all fields of the traceable RPC(including HTTP 1/2, MQ) headers(header names are sw8, sw8-x and sw8-correlation) when requests are from out of the trusted zone. Or simply block/remove those headers unless you are using the client-js agent. All fields of telemetry data(HTTP in raw text or encoded Protobuf format) should be validated and reject malicious data.  Without these protections, an attacker could embed executable Javascript code in those fields, causing XSS or even Remote Code Execution (RCE) issues.\nFor some sensitive environment, consider to limit the telemetry report frequency in case of DoS/DDoS for exposed OAP and UI services.\nappendix The SkyWalking client-js agent is always running out of the secured environment. Please follow its security notice for more details.\n","title":"Security Notice","url":"/docs/main/v9.7.0/en/security/readme/"},{"content":"Send Envoy metrics to SkyWalking with / without Istio Envoy defines a gRPC service to emit metrics, and whatever is used to implement this protocol can be used to receive the metrics. SkyWalking has a built-in receiver that implements this protocol, so you can configure Envoy to emit its metrics to SkyWalking.\nAs an APM system, SkyWalking does not only receive and store the metrics emitted by Envoy, but it also analyzes the topology of services and service instances.\nAttention: There are two versions of Envoy metrics service protocol currently: v2 and v3. SkyWalking (8.3.0+) supports both of them.\nConfigure Envoy to send metrics to SkyWalking without Istio Envoy can be used with / without Istio. This section explains how you can configure the standalone Envoy to send metrics to SkyWalking.\nIn order to let Envoy send metrics to SkyWalking, we need to feed Envoy with a configuration that contains stats_sinks, which in turn includes envoy.metrics_service. This envoy.metrics_service should be configured as a config.grpc_service entry.\nThe noteworthy parts of the config are shown below:\nstats_sinks:- name:envoy.metrics_serviceconfig:grpc_service:# Note: we can use google_grpc implementation as well.envoy_grpc:cluster_name:service_skywalkingstatic_resources:...clusters:- name:service_skywalkingconnect_timeout:5stype:LOGICAL_DNShttp2_protocol_options:{}dns_lookup_family:V4_ONLYlb_policy:ROUND_ROBINload_assignment:cluster_name:service_skywalkingendpoints:- lb_endpoints:- endpoint:address:socket_address:address:skywalking# This is the port where SkyWalking serving the Envoy Metrics Service gRPC stream.port_value:11800The comprehensive static configuration can be found here.\nNote that Envoy can also be configured dynamically through xDS Protocol.\nAs mentioned above, SkyWalking also builds the topology of services from the metrics, since Envoy also carries service metadata along with the metrics. To feed Envoy such metadata, see the other part of the configuration as follows:\nnode:# ... other configsmetadata:LABELS:app:test-appNAME:service-instance-nameConfigure Envoy to send metrics to SkyWalking with Istio Typically, Envoy can also be used with Istio. In this case, the configurations are much simpler because Istio composes the configurations for you and sends them to Envoy via xDS Protocol. Istio also automatically injects the metadata, such as service name and instance name, into the bootstrap configurations.\nEmitting the metrics to SkyWalking is as simple as adding the option --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; to Istio install command, like this:\nistioctl install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*\u0026#39; If you already have Istio installed, you can use the following command to apply the config without re-installing Istio:\nistioctl manifest install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*\u0026#39; Note: proxyStatsMatcher is only supported by Istio 1.8+. We recommend using inclusionRegexps to reserve specific metrics which need to be analyzed, in order to reduce memory usage and avoid CPU overhead. For example, OAP uses these metrics:\nistioctl manifest install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*membership_healthy.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[1]=.*upstream_cx_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[2]=.*upstream_cx_total.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[3]=.*upstream_rq_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[4]=.*upstream_rq_total.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[5]=.*upstream_rq_pending_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[6]=.*lb_healthy_panic.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[7]=.*upstream_cx_none_healthy.*\u0026#39; Metrics data Some Envoy statistics are listed here. Sample data that contain identifiers can be found here, while the metrics can be found here.\n","title":"Send Envoy metrics to SkyWalking with / without Istio","url":"/docs/main/v9.0.0/en/setup/envoy/metrics_service_setting/"},{"content":"Send Envoy metrics to SkyWalking with/without Istio Envoy defines a gRPC service to emit metrics, and whatever is used to implement this protocol can be used to receive the metrics. SkyWalking has a built-in receiver that implements this protocol, so you can configure Envoy to emit its metrics to SkyWalking.\nAs an APM system, SkyWalking not only receives and stores the metrics emitted by Envoy but also analyzes the topology of services and service instances.\nAttention: There are two versions of the Envoy metrics service protocol currently: v2 and v3. SkyWalking (8.3.0+) supports both of them.\nConfigure Envoy to send metrics to SkyWalking without Istio Envoy can be used with/without Istio. This section explains how you can configure the standalone Envoy to send metrics to SkyWalking.\nTo let Envoy send metrics to SkyWalking, we need to feed Envoy with a configuration that contains stats_sinks, which in turn includes envoy.metrics_service. This envoy.metrics_service should be configured as a config.grpc_service entry.\nThe noteworthy parts of the config are shown below:\nstats_sinks:- name:envoy.metrics_serviceconfig:grpc_service:# Note: we can use google_grpc implementation as well.envoy_grpc:cluster_name:service_skywalkingstatic_resources:...clusters:- name:service_skywalkingconnect_timeout:5stype:LOGICAL_DNShttp2_protocol_options:{}dns_lookup_family:V4_ONLYlb_policy:ROUND_ROBINload_assignment:cluster_name:service_skywalkingendpoints:- lb_endpoints:- endpoint:address:socket_address:address:skywalking# This is the port where SkyWalking serving the Envoy Metrics Service gRPC stream.port_value:11800The comprehensive static configuration can be found here.\nNote that Envoy can also be configured dynamically through xDS Protocol.\nAs mentioned above, SkyWalking also builds the topology of services from the metrics since Envoy also carries service metadata along with the metrics. To feed Envoy such metadata, see the other part of the configuration as follows:\nnode:# ... other configsmetadata:LABELS:app:test-appNAME:service-instance-nameConfigure Envoy to send metrics to SkyWalking with Istio Typically, Envoy can also be used with Istio. In this case, the configurations are much simpler because Istio composes the configurations for you and sends them to Envoy via xDS Protocol. Istio also automatically injects the metadata, such as service name and instance name, into the bootstrap configurations.\nEmitting the metrics to SkyWalking is as simple as adding the option --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; to Istio install command, like this:\nistioctl install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*\u0026#39; If you already have Istio installed, you can use the following command to apply the config without re-installing Istio:\nistioctl manifest install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*\u0026#39; Note: proxyStatsMatcher is only supported by Istio 1.8+. We recommend using inclusionRegexps to reserve specific metrics that need to be analyzed to reduce memory usage and avoid CPU overhead. For example, OAP uses these metrics:\nistioctl manifest install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*membership_healthy.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[1]=.*upstream_cx_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[2]=.*upstream_cx_total.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[3]=.*upstream_rq_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[4]=.*upstream_rq_total.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[5]=.*upstream_rq_pending_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[6]=.*lb_healthy_panic.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[7]=.*upstream_cx_none_healthy.*\u0026#39; Metrics data Some Envoy statistics are listed here. Sample data that contain identifiers can be found here, while the metrics can be found here.\nNetwork Monitoring SkyWalking supports network monitoring of the data plane in the Service Mesh. Read this documentation for learn more.\n","title":"Send Envoy metrics to SkyWalking with/without Istio","url":"/docs/main/latest/en/setup/envoy/metrics_service_setting/"},{"content":"Send Envoy metrics to SkyWalking with/without Istio Envoy defines a gRPC service to emit metrics, and whatever is used to implement this protocol can be used to receive the metrics. SkyWalking has a built-in receiver that implements this protocol, so you can configure Envoy to emit its metrics to SkyWalking.\nAs an APM system, SkyWalking not only receives and stores the metrics emitted by Envoy but also analyzes the topology of services and service instances.\nAttention: There are two versions of the Envoy metrics service protocol currently: v2 and v3. SkyWalking (8.3.0+) supports both of them.\nConfigure Envoy to send metrics to SkyWalking without Istio Envoy can be used with/without Istio. This section explains how you can configure the standalone Envoy to send metrics to SkyWalking.\nTo let Envoy send metrics to SkyWalking, we need to feed Envoy with a configuration that contains stats_sinks, which in turn includes envoy.metrics_service. This envoy.metrics_service should be configured as a config.grpc_service entry.\nThe noteworthy parts of the config are shown below:\nstats_sinks:- name:envoy.metrics_serviceconfig:grpc_service:# Note: we can use google_grpc implementation as well.envoy_grpc:cluster_name:service_skywalkingstatic_resources:...clusters:- name:service_skywalkingconnect_timeout:5stype:LOGICAL_DNShttp2_protocol_options:{}dns_lookup_family:V4_ONLYlb_policy:ROUND_ROBINload_assignment:cluster_name:service_skywalkingendpoints:- lb_endpoints:- endpoint:address:socket_address:address:skywalking# This is the port where SkyWalking serving the Envoy Metrics Service gRPC stream.port_value:11800The comprehensive static configuration can be found here.\nNote that Envoy can also be configured dynamically through xDS Protocol.\nAs mentioned above, SkyWalking also builds the topology of services from the metrics since Envoy also carries service metadata along with the metrics. To feed Envoy such metadata, see the other part of the configuration as follows:\nnode:# ... other configsmetadata:LABELS:app:test-appNAME:service-instance-nameConfigure Envoy to send metrics to SkyWalking with Istio Typically, Envoy can also be used with Istio. In this case, the configurations are much simpler because Istio composes the configurations for you and sends them to Envoy via xDS Protocol. Istio also automatically injects the metadata, such as service name and instance name, into the bootstrap configurations.\nEmitting the metrics to SkyWalking is as simple as adding the option --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; to Istio install command, like this:\nistioctl install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*\u0026#39; If you already have Istio installed, you can use the following command to apply the config without re-installing Istio:\nistioctl manifest install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*\u0026#39; Note: proxyStatsMatcher is only supported by Istio 1.8+. We recommend using inclusionRegexps to reserve specific metrics that need to be analyzed to reduce memory usage and avoid CPU overhead. For example, OAP uses these metrics:\nistioctl manifest install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*membership_healthy.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[1]=.*upstream_cx_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[2]=.*upstream_cx_total.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[3]=.*upstream_rq_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[4]=.*upstream_rq_total.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[5]=.*upstream_rq_pending_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[6]=.*lb_healthy_panic.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[7]=.*upstream_cx_none_healthy.*\u0026#39; Metrics data Some Envoy statistics are listed here. Sample data that contain identifiers can be found here, while the metrics can be found here.\nNetwork Monitoring SkyWalking supports network monitoring of the data plane in the Service Mesh. Read this documentation for learn more.\n","title":"Send Envoy metrics to SkyWalking with/without Istio","url":"/docs/main/next/en/setup/envoy/metrics_service_setting/"},{"content":"Send Envoy metrics to SkyWalking with/without Istio Envoy defines a gRPC service to emit metrics, and whatever is used to implement this protocol can be used to receive the metrics. SkyWalking has a built-in receiver that implements this protocol, so you can configure Envoy to emit its metrics to SkyWalking.\nAs an APM system, SkyWalking not only receives and stores the metrics emitted by Envoy but also analyzes the topology of services and service instances.\nAttention: There are two versions of the Envoy metrics service protocol currently: v2 and v3. SkyWalking (8.3.0+) supports both of them.\nConfigure Envoy to send metrics to SkyWalking without Istio Envoy can be used with/without Istio. This section explains how you can configure the standalone Envoy to send metrics to SkyWalking.\nTo let Envoy send metrics to SkyWalking, we need to feed Envoy with a configuration that contains stats_sinks, which in turn includes envoy.metrics_service. This envoy.metrics_service should be configured as a config.grpc_service entry.\nThe noteworthy parts of the config are shown below:\nstats_sinks:- name:envoy.metrics_serviceconfig:grpc_service:# Note: we can use google_grpc implementation as well.envoy_grpc:cluster_name:service_skywalkingstatic_resources:...clusters:- name:service_skywalkingconnect_timeout:5stype:LOGICAL_DNShttp2_protocol_options:{}dns_lookup_family:V4_ONLYlb_policy:ROUND_ROBINload_assignment:cluster_name:service_skywalkingendpoints:- lb_endpoints:- endpoint:address:socket_address:address:skywalking# This is the port where SkyWalking serving the Envoy Metrics Service gRPC stream.port_value:11800The comprehensive static configuration can be found here.\nNote that Envoy can also be configured dynamically through xDS Protocol.\nAs mentioned above, SkyWalking also builds the topology of services from the metrics since Envoy also carries service metadata along with the metrics. To feed Envoy such metadata, see the other part of the configuration as follows:\nnode:# ... other configsmetadata:LABELS:app:test-appNAME:service-instance-nameConfigure Envoy to send metrics to SkyWalking with Istio Typically, Envoy can also be used with Istio. In this case, the configurations are much simpler because Istio composes the configurations for you and sends them to Envoy via xDS Protocol. Istio also automatically injects the metadata, such as service name and instance name, into the bootstrap configurations.\nEmitting the metrics to SkyWalking is as simple as adding the option --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; to Istio install command, like this:\nistioctl install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*\u0026#39; If you already have Istio installed, you can use the following command to apply the config without re-installing Istio:\nistioctl manifest install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*\u0026#39; Note: proxyStatsMatcher is only supported by Istio 1.8+. We recommend using inclusionRegexps to reserve specific metrics that need to be analyzed to reduce memory usage and avoid CPU overhead. For example, OAP uses these metrics:\nistioctl manifest install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*membership_healthy.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[1]=.*upstream_cx_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[2]=.*upstream_cx_total.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[3]=.*upstream_rq_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[4]=.*upstream_rq_total.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[5]=.*upstream_rq_pending_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[6]=.*lb_healthy_panic.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[7]=.*upstream_cx_none_healthy.*\u0026#39; Metrics data Some Envoy statistics are listed here. Sample data that contain identifiers can be found here, while the metrics can be found here.\n","title":"Send Envoy metrics to SkyWalking with/without Istio","url":"/docs/main/v9.1.0/en/setup/envoy/metrics_service_setting/"},{"content":"Send Envoy metrics to SkyWalking with/without Istio Envoy defines a gRPC service to emit metrics, and whatever is used to implement this protocol can be used to receive the metrics. SkyWalking has a built-in receiver that implements this protocol, so you can configure Envoy to emit its metrics to SkyWalking.\nAs an APM system, SkyWalking not only receives and stores the metrics emitted by Envoy but also analyzes the topology of services and service instances.\nAttention: There are two versions of the Envoy metrics service protocol currently: v2 and v3. SkyWalking (8.3.0+) supports both of them.\nConfigure Envoy to send metrics to SkyWalking without Istio Envoy can be used with/without Istio. This section explains how you can configure the standalone Envoy to send metrics to SkyWalking.\nTo let Envoy send metrics to SkyWalking, we need to feed Envoy with a configuration that contains stats_sinks, which in turn includes envoy.metrics_service. This envoy.metrics_service should be configured as a config.grpc_service entry.\nThe noteworthy parts of the config are shown below:\nstats_sinks:- name:envoy.metrics_serviceconfig:grpc_service:# Note: we can use google_grpc implementation as well.envoy_grpc:cluster_name:service_skywalkingstatic_resources:...clusters:- name:service_skywalkingconnect_timeout:5stype:LOGICAL_DNShttp2_protocol_options:{}dns_lookup_family:V4_ONLYlb_policy:ROUND_ROBINload_assignment:cluster_name:service_skywalkingendpoints:- lb_endpoints:- endpoint:address:socket_address:address:skywalking# This is the port where SkyWalking serving the Envoy Metrics Service gRPC stream.port_value:11800The comprehensive static configuration can be found here.\nNote that Envoy can also be configured dynamically through xDS Protocol.\nAs mentioned above, SkyWalking also builds the topology of services from the metrics since Envoy also carries service metadata along with the metrics. To feed Envoy such metadata, see the other part of the configuration as follows:\nnode:# ... other configsmetadata:LABELS:app:test-appNAME:service-instance-nameConfigure Envoy to send metrics to SkyWalking with Istio Typically, Envoy can also be used with Istio. In this case, the configurations are much simpler because Istio composes the configurations for you and sends them to Envoy via xDS Protocol. Istio also automatically injects the metadata, such as service name and instance name, into the bootstrap configurations.\nEmitting the metrics to SkyWalking is as simple as adding the option --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; to Istio install command, like this:\nistioctl install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*\u0026#39; If you already have Istio installed, you can use the following command to apply the config without re-installing Istio:\nistioctl manifest install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*\u0026#39; Note: proxyStatsMatcher is only supported by Istio 1.8+. We recommend using inclusionRegexps to reserve specific metrics that need to be analyzed to reduce memory usage and avoid CPU overhead. For example, OAP uses these metrics:\nistioctl manifest install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*membership_healthy.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[1]=.*upstream_cx_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[2]=.*upstream_cx_total.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[3]=.*upstream_rq_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[4]=.*upstream_rq_total.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[5]=.*upstream_rq_pending_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[6]=.*lb_healthy_panic.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[7]=.*upstream_cx_none_healthy.*\u0026#39; Metrics data Some Envoy statistics are listed here. Sample data that contain identifiers can be found here, while the metrics can be found here.\nNetwork Monitoring SkyWalking supports network monitoring of the data plane in the Service Mesh. Read this documentation for learn more.\n","title":"Send Envoy metrics to SkyWalking with/without Istio","url":"/docs/main/v9.2.0/en/setup/envoy/metrics_service_setting/"},{"content":"Send Envoy metrics to SkyWalking with/without Istio Envoy defines a gRPC service to emit metrics, and whatever is used to implement this protocol can be used to receive the metrics. SkyWalking has a built-in receiver that implements this protocol, so you can configure Envoy to emit its metrics to SkyWalking.\nAs an APM system, SkyWalking not only receives and stores the metrics emitted by Envoy but also analyzes the topology of services and service instances.\nAttention: There are two versions of the Envoy metrics service protocol currently: v2 and v3. SkyWalking (8.3.0+) supports both of them.\nConfigure Envoy to send metrics to SkyWalking without Istio Envoy can be used with/without Istio. This section explains how you can configure the standalone Envoy to send metrics to SkyWalking.\nTo let Envoy send metrics to SkyWalking, we need to feed Envoy with a configuration that contains stats_sinks, which in turn includes envoy.metrics_service. This envoy.metrics_service should be configured as a config.grpc_service entry.\nThe noteworthy parts of the config are shown below:\nstats_sinks:- name:envoy.metrics_serviceconfig:grpc_service:# Note: we can use google_grpc implementation as well.envoy_grpc:cluster_name:service_skywalkingstatic_resources:...clusters:- name:service_skywalkingconnect_timeout:5stype:LOGICAL_DNShttp2_protocol_options:{}dns_lookup_family:V4_ONLYlb_policy:ROUND_ROBINload_assignment:cluster_name:service_skywalkingendpoints:- lb_endpoints:- endpoint:address:socket_address:address:skywalking# This is the port where SkyWalking serving the Envoy Metrics Service gRPC stream.port_value:11800The comprehensive static configuration can be found here.\nNote that Envoy can also be configured dynamically through xDS Protocol.\nAs mentioned above, SkyWalking also builds the topology of services from the metrics since Envoy also carries service metadata along with the metrics. To feed Envoy such metadata, see the other part of the configuration as follows:\nnode:# ... other configsmetadata:LABELS:app:test-appNAME:service-instance-nameConfigure Envoy to send metrics to SkyWalking with Istio Typically, Envoy can also be used with Istio. In this case, the configurations are much simpler because Istio composes the configurations for you and sends them to Envoy via xDS Protocol. Istio also automatically injects the metadata, such as service name and instance name, into the bootstrap configurations.\nEmitting the metrics to SkyWalking is as simple as adding the option --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; to Istio install command, like this:\nistioctl install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*\u0026#39; If you already have Istio installed, you can use the following command to apply the config without re-installing Istio:\nistioctl manifest install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*\u0026#39; Note: proxyStatsMatcher is only supported by Istio 1.8+. We recommend using inclusionRegexps to reserve specific metrics that need to be analyzed to reduce memory usage and avoid CPU overhead. For example, OAP uses these metrics:\nistioctl manifest install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*membership_healthy.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[1]=.*upstream_cx_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[2]=.*upstream_cx_total.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[3]=.*upstream_rq_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[4]=.*upstream_rq_total.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[5]=.*upstream_rq_pending_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[6]=.*lb_healthy_panic.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[7]=.*upstream_cx_none_healthy.*\u0026#39; Metrics data Some Envoy statistics are listed here. Sample data that contain identifiers can be found here, while the metrics can be found here.\nNetwork Monitoring SkyWalking supports network monitoring of the data plane in the Service Mesh. Read this documentation for learn more.\n","title":"Send Envoy metrics to SkyWalking with/without Istio","url":"/docs/main/v9.3.0/en/setup/envoy/metrics_service_setting/"},{"content":"Send Envoy metrics to SkyWalking with/without Istio Envoy defines a gRPC service to emit metrics, and whatever is used to implement this protocol can be used to receive the metrics. SkyWalking has a built-in receiver that implements this protocol, so you can configure Envoy to emit its metrics to SkyWalking.\nAs an APM system, SkyWalking not only receives and stores the metrics emitted by Envoy but also analyzes the topology of services and service instances.\nAttention: There are two versions of the Envoy metrics service protocol currently: v2 and v3. SkyWalking (8.3.0+) supports both of them.\nConfigure Envoy to send metrics to SkyWalking without Istio Envoy can be used with/without Istio. This section explains how you can configure the standalone Envoy to send metrics to SkyWalking.\nTo let Envoy send metrics to SkyWalking, we need to feed Envoy with a configuration that contains stats_sinks, which in turn includes envoy.metrics_service. This envoy.metrics_service should be configured as a config.grpc_service entry.\nThe noteworthy parts of the config are shown below:\nstats_sinks:- name:envoy.metrics_serviceconfig:grpc_service:# Note: we can use google_grpc implementation as well.envoy_grpc:cluster_name:service_skywalkingstatic_resources:...clusters:- name:service_skywalkingconnect_timeout:5stype:LOGICAL_DNShttp2_protocol_options:{}dns_lookup_family:V4_ONLYlb_policy:ROUND_ROBINload_assignment:cluster_name:service_skywalkingendpoints:- lb_endpoints:- endpoint:address:socket_address:address:skywalking# This is the port where SkyWalking serving the Envoy Metrics Service gRPC stream.port_value:11800The comprehensive static configuration can be found here.\nNote that Envoy can also be configured dynamically through xDS Protocol.\nAs mentioned above, SkyWalking also builds the topology of services from the metrics since Envoy also carries service metadata along with the metrics. To feed Envoy such metadata, see the other part of the configuration as follows:\nnode:# ... other configsmetadata:LABELS:app:test-appNAME:service-instance-nameConfigure Envoy to send metrics to SkyWalking with Istio Typically, Envoy can also be used with Istio. In this case, the configurations are much simpler because Istio composes the configurations for you and sends them to Envoy via xDS Protocol. Istio also automatically injects the metadata, such as service name and instance name, into the bootstrap configurations.\nEmitting the metrics to SkyWalking is as simple as adding the option --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; to Istio install command, like this:\nistioctl install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*\u0026#39; If you already have Istio installed, you can use the following command to apply the config without re-installing Istio:\nistioctl manifest install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*\u0026#39; Note: proxyStatsMatcher is only supported by Istio 1.8+. We recommend using inclusionRegexps to reserve specific metrics that need to be analyzed to reduce memory usage and avoid CPU overhead. For example, OAP uses these metrics:\nistioctl manifest install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*membership_healthy.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[1]=.*upstream_cx_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[2]=.*upstream_cx_total.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[3]=.*upstream_rq_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[4]=.*upstream_rq_total.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[5]=.*upstream_rq_pending_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[6]=.*lb_healthy_panic.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[7]=.*upstream_cx_none_healthy.*\u0026#39; Metrics data Some Envoy statistics are listed here. Sample data that contain identifiers can be found here, while the metrics can be found here.\nNetwork Monitoring SkyWalking supports network monitoring of the data plane in the Service Mesh. Read this documentation for learn more.\n","title":"Send Envoy metrics to SkyWalking with/without Istio","url":"/docs/main/v9.4.0/en/setup/envoy/metrics_service_setting/"},{"content":"Send Envoy metrics to SkyWalking with/without Istio Envoy defines a gRPC service to emit metrics, and whatever is used to implement this protocol can be used to receive the metrics. SkyWalking has a built-in receiver that implements this protocol, so you can configure Envoy to emit its metrics to SkyWalking.\nAs an APM system, SkyWalking not only receives and stores the metrics emitted by Envoy but also analyzes the topology of services and service instances.\nAttention: There are two versions of the Envoy metrics service protocol currently: v2 and v3. SkyWalking (8.3.0+) supports both of them.\nConfigure Envoy to send metrics to SkyWalking without Istio Envoy can be used with/without Istio. This section explains how you can configure the standalone Envoy to send metrics to SkyWalking.\nTo let Envoy send metrics to SkyWalking, we need to feed Envoy with a configuration that contains stats_sinks, which in turn includes envoy.metrics_service. This envoy.metrics_service should be configured as a config.grpc_service entry.\nThe noteworthy parts of the config are shown below:\nstats_sinks:- name:envoy.metrics_serviceconfig:grpc_service:# Note: we can use google_grpc implementation as well.envoy_grpc:cluster_name:service_skywalkingstatic_resources:...clusters:- name:service_skywalkingconnect_timeout:5stype:LOGICAL_DNShttp2_protocol_options:{}dns_lookup_family:V4_ONLYlb_policy:ROUND_ROBINload_assignment:cluster_name:service_skywalkingendpoints:- lb_endpoints:- endpoint:address:socket_address:address:skywalking# This is the port where SkyWalking serving the Envoy Metrics Service gRPC stream.port_value:11800The comprehensive static configuration can be found here.\nNote that Envoy can also be configured dynamically through xDS Protocol.\nAs mentioned above, SkyWalking also builds the topology of services from the metrics since Envoy also carries service metadata along with the metrics. To feed Envoy such metadata, see the other part of the configuration as follows:\nnode:# ... other configsmetadata:LABELS:app:test-appNAME:service-instance-nameConfigure Envoy to send metrics to SkyWalking with Istio Typically, Envoy can also be used with Istio. In this case, the configurations are much simpler because Istio composes the configurations for you and sends them to Envoy via xDS Protocol. Istio also automatically injects the metadata, such as service name and instance name, into the bootstrap configurations.\nEmitting the metrics to SkyWalking is as simple as adding the option --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; to Istio install command, like this:\nistioctl install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*\u0026#39; If you already have Istio installed, you can use the following command to apply the config without re-installing Istio:\nistioctl manifest install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*\u0026#39; Note: proxyStatsMatcher is only supported by Istio 1.8+. We recommend using inclusionRegexps to reserve specific metrics that need to be analyzed to reduce memory usage and avoid CPU overhead. For example, OAP uses these metrics:\nistioctl manifest install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*membership_healthy.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[1]=.*upstream_cx_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[2]=.*upstream_cx_total.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[3]=.*upstream_rq_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[4]=.*upstream_rq_total.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[5]=.*upstream_rq_pending_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[6]=.*lb_healthy_panic.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[7]=.*upstream_cx_none_healthy.*\u0026#39; Metrics data Some Envoy statistics are listed here. Sample data that contain identifiers can be found here, while the metrics can be found here.\nNetwork Monitoring SkyWalking supports network monitoring of the data plane in the Service Mesh. Read this documentation for learn more.\n","title":"Send Envoy metrics to SkyWalking with/without Istio","url":"/docs/main/v9.5.0/en/setup/envoy/metrics_service_setting/"},{"content":"Send Envoy metrics to SkyWalking with/without Istio Envoy defines a gRPC service to emit metrics, and whatever is used to implement this protocol can be used to receive the metrics. SkyWalking has a built-in receiver that implements this protocol, so you can configure Envoy to emit its metrics to SkyWalking.\nAs an APM system, SkyWalking not only receives and stores the metrics emitted by Envoy but also analyzes the topology of services and service instances.\nAttention: There are two versions of the Envoy metrics service protocol currently: v2 and v3. SkyWalking (8.3.0+) supports both of them.\nConfigure Envoy to send metrics to SkyWalking without Istio Envoy can be used with/without Istio. This section explains how you can configure the standalone Envoy to send metrics to SkyWalking.\nTo let Envoy send metrics to SkyWalking, we need to feed Envoy with a configuration that contains stats_sinks, which in turn includes envoy.metrics_service. This envoy.metrics_service should be configured as a config.grpc_service entry.\nThe noteworthy parts of the config are shown below:\nstats_sinks:- name:envoy.metrics_serviceconfig:grpc_service:# Note: we can use google_grpc implementation as well.envoy_grpc:cluster_name:service_skywalkingstatic_resources:...clusters:- name:service_skywalkingconnect_timeout:5stype:LOGICAL_DNShttp2_protocol_options:{}dns_lookup_family:V4_ONLYlb_policy:ROUND_ROBINload_assignment:cluster_name:service_skywalkingendpoints:- lb_endpoints:- endpoint:address:socket_address:address:skywalking# This is the port where SkyWalking serving the Envoy Metrics Service gRPC stream.port_value:11800The comprehensive static configuration can be found here.\nNote that Envoy can also be configured dynamically through xDS Protocol.\nAs mentioned above, SkyWalking also builds the topology of services from the metrics since Envoy also carries service metadata along with the metrics. To feed Envoy such metadata, see the other part of the configuration as follows:\nnode:# ... other configsmetadata:LABELS:app:test-appNAME:service-instance-nameConfigure Envoy to send metrics to SkyWalking with Istio Typically, Envoy can also be used with Istio. In this case, the configurations are much simpler because Istio composes the configurations for you and sends them to Envoy via xDS Protocol. Istio also automatically injects the metadata, such as service name and instance name, into the bootstrap configurations.\nEmitting the metrics to SkyWalking is as simple as adding the option --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; to Istio install command, like this:\nistioctl install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*\u0026#39; If you already have Istio installed, you can use the following command to apply the config without re-installing Istio:\nistioctl manifest install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*\u0026#39; Note: proxyStatsMatcher is only supported by Istio 1.8+. We recommend using inclusionRegexps to reserve specific metrics that need to be analyzed to reduce memory usage and avoid CPU overhead. For example, OAP uses these metrics:\nistioctl manifest install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*membership_healthy.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[1]=.*upstream_cx_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[2]=.*upstream_cx_total.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[3]=.*upstream_rq_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[4]=.*upstream_rq_total.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[5]=.*upstream_rq_pending_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[6]=.*lb_healthy_panic.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[7]=.*upstream_cx_none_healthy.*\u0026#39; Metrics data Some Envoy statistics are listed here. Sample data that contain identifiers can be found here, while the metrics can be found here.\nNetwork Monitoring SkyWalking supports network monitoring of the data plane in the Service Mesh. Read this documentation for learn more.\n","title":"Send Envoy metrics to SkyWalking with/without Istio","url":"/docs/main/v9.6.0/en/setup/envoy/metrics_service_setting/"},{"content":"Send Envoy metrics to SkyWalking with/without Istio Envoy defines a gRPC service to emit metrics, and whatever is used to implement this protocol can be used to receive the metrics. SkyWalking has a built-in receiver that implements this protocol, so you can configure Envoy to emit its metrics to SkyWalking.\nAs an APM system, SkyWalking not only receives and stores the metrics emitted by Envoy but also analyzes the topology of services and service instances.\nAttention: There are two versions of the Envoy metrics service protocol currently: v2 and v3. SkyWalking (8.3.0+) supports both of them.\nConfigure Envoy to send metrics to SkyWalking without Istio Envoy can be used with/without Istio. This section explains how you can configure the standalone Envoy to send metrics to SkyWalking.\nTo let Envoy send metrics to SkyWalking, we need to feed Envoy with a configuration that contains stats_sinks, which in turn includes envoy.metrics_service. This envoy.metrics_service should be configured as a config.grpc_service entry.\nThe noteworthy parts of the config are shown below:\nstats_sinks:- name:envoy.metrics_serviceconfig:grpc_service:# Note: we can use google_grpc implementation as well.envoy_grpc:cluster_name:service_skywalkingstatic_resources:...clusters:- name:service_skywalkingconnect_timeout:5stype:LOGICAL_DNShttp2_protocol_options:{}dns_lookup_family:V4_ONLYlb_policy:ROUND_ROBINload_assignment:cluster_name:service_skywalkingendpoints:- lb_endpoints:- endpoint:address:socket_address:address:skywalking# This is the port where SkyWalking serving the Envoy Metrics Service gRPC stream.port_value:11800The comprehensive static configuration can be found here.\nNote that Envoy can also be configured dynamically through xDS Protocol.\nAs mentioned above, SkyWalking also builds the topology of services from the metrics since Envoy also carries service metadata along with the metrics. To feed Envoy such metadata, see the other part of the configuration as follows:\nnode:# ... other configsmetadata:LABELS:app:test-appNAME:service-instance-nameConfigure Envoy to send metrics to SkyWalking with Istio Typically, Envoy can also be used with Istio. In this case, the configurations are much simpler because Istio composes the configurations for you and sends them to Envoy via xDS Protocol. Istio also automatically injects the metadata, such as service name and instance name, into the bootstrap configurations.\nEmitting the metrics to SkyWalking is as simple as adding the option --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; to Istio install command, like this:\nistioctl install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*\u0026#39; If you already have Istio installed, you can use the following command to apply the config without re-installing Istio:\nistioctl manifest install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*\u0026#39; Note: proxyStatsMatcher is only supported by Istio 1.8+. We recommend using inclusionRegexps to reserve specific metrics that need to be analyzed to reduce memory usage and avoid CPU overhead. For example, OAP uses these metrics:\nistioctl manifest install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*membership_healthy.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[1]=.*upstream_cx_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[2]=.*upstream_cx_total.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[3]=.*upstream_rq_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[4]=.*upstream_rq_total.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[5]=.*upstream_rq_pending_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[6]=.*lb_healthy_panic.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[7]=.*upstream_cx_none_healthy.*\u0026#39; Metrics data Some Envoy statistics are listed here. Sample data that contain identifiers can be found here, while the metrics can be found here.\nNetwork Monitoring SkyWalking supports network monitoring of the data plane in the Service Mesh. Read this documentation for learn more.\n","title":"Send Envoy metrics to SkyWalking with/without Istio","url":"/docs/main/v9.7.0/en/setup/envoy/metrics_service_setting/"},{"content":"Sending Envoy Metrics to SkyWalking OAP Server Example This is an example of sending Envoy Stats to the SkyWalking OAP server through Metric Service v2 and v3.\nRunning the example The example requires docker and docker-compose to be installed in your local system. It fetches images from Docker Hub.\nNote that in this setup, we override the log4j2.xml config to set the org.apache.skywalking.oap.server.receiver.envoy logger level to DEBUG. This enables us to see the messages sent by Envoy to the SkyWalking OAP server.\nYou can also find the Envoy Metric Service V3 API example in docker-compose-envoy-v3-api.yaml\n$ make up $ docker-compose logs -f skywalking $ # Please wait for a moment until SkyWalking is ready and Envoy starts sending the stats. You will see similar messages like the following: skywalking_1 | 2021-07-23 13:25:30,683 - org.apache.skywalking.oap.server.receiver.envoy.MetricServiceGRPCHandler -19437 [grpcServerPool-1-thread-2] DEBUG [] - Received msg identifier { skywalking_1 | node { skywalking_1 | id: \u0026quot;ingress\u0026quot; skywalking_1 | cluster: \u0026quot;envoy-proxy\u0026quot; skywalking_1 | metadata { skywalking_1 | fields { skywalking_1 | key: \u0026quot;LABELS\u0026quot; skywalking_1 | value { skywalking_1 | struct_value { skywalking_1 | fields { skywalking_1 | key: \u0026quot;app\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;test-app\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;NAME\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;service-instance-name\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;envoy\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;isawesome\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;skywalking\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;iscool\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | locality { skywalking_1 | region: \u0026quot;ap-southeast-1\u0026quot; skywalking_1 | zone: \u0026quot;zone1\u0026quot; skywalking_1 | sub_zone: \u0026quot;subzone1\u0026quot; skywalking_1 | } skywalking_1 | user_agent_name: \u0026quot;envoy\u0026quot; skywalking_1 | user_agent_build_version { skywalking_1 | version { skywalking_1 | major_number: 1 skywalking_1 | minor_number: 19 skywalking_1 | } skywalking_1 | metadata { skywalking_1 | fields { skywalking_1 | key: \u0026quot;build.type\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;RELEASE\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;revision.sha\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;68fe53a889416fd8570506232052b06f5a531541\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;revision.status\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;Clean\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;ssl.version\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;BoringSSL\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | extensions { skywalking_1 | name: \u0026quot;composite-action\u0026quot; skywalking_1 | category: \u0026quot;envoy.matching.action\u0026quot; skywalking_1 | } ...... skywalking_1 | } skywalking_1 | } skywalking_1 | envoy_metrics { skywalking_1 | name: \u0026quot;cluster.service_google.update_no_rebuild\u0026quot; skywalking_1 | type: COUNTER skywalking_1 | metric { skywalking_1 | counter { skywalking_1 | value: 1.0 skywalking_1 | } skywalking_1 | timestamp_ms: 1627046729718 skywalking_1 | } ...... skywalking_1 | } ... $ # To tear down: $ make down ","title":"Sending Envoy Metrics to SkyWalking OAP Server Example","url":"/docs/main/latest/en/setup/envoy/examples/metrics/readme/"},{"content":"Sending Envoy Metrics to SkyWalking OAP Server Example This is an example of sending Envoy Stats to the SkyWalking OAP server through Metric Service v2 and v3.\nRunning the example The example requires docker and docker-compose to be installed in your local system. It fetches images from Docker Hub.\nNote that in this setup, we override the log4j2.xml config to set the org.apache.skywalking.oap.server.receiver.envoy logger level to DEBUG. This enables us to see the messages sent by Envoy to the SkyWalking OAP server.\nYou can also find the Envoy Metric Service V3 API example in docker-compose-envoy-v3-api.yaml\n$ make up $ docker-compose logs -f skywalking $ # Please wait for a moment until SkyWalking is ready and Envoy starts sending the stats. You will see similar messages like the following: skywalking_1 | 2021-07-23 13:25:30,683 - org.apache.skywalking.oap.server.receiver.envoy.MetricServiceGRPCHandler -19437 [grpcServerPool-1-thread-2] DEBUG [] - Received msg identifier { skywalking_1 | node { skywalking_1 | id: \u0026quot;ingress\u0026quot; skywalking_1 | cluster: \u0026quot;envoy-proxy\u0026quot; skywalking_1 | metadata { skywalking_1 | fields { skywalking_1 | key: \u0026quot;LABELS\u0026quot; skywalking_1 | value { skywalking_1 | struct_value { skywalking_1 | fields { skywalking_1 | key: \u0026quot;app\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;test-app\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;NAME\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;service-instance-name\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;envoy\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;isawesome\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;skywalking\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;iscool\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | locality { skywalking_1 | region: \u0026quot;ap-southeast-1\u0026quot; skywalking_1 | zone: \u0026quot;zone1\u0026quot; skywalking_1 | sub_zone: \u0026quot;subzone1\u0026quot; skywalking_1 | } skywalking_1 | user_agent_name: \u0026quot;envoy\u0026quot; skywalking_1 | user_agent_build_version { skywalking_1 | version { skywalking_1 | major_number: 1 skywalking_1 | minor_number: 19 skywalking_1 | } skywalking_1 | metadata { skywalking_1 | fields { skywalking_1 | key: \u0026quot;build.type\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;RELEASE\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;revision.sha\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;68fe53a889416fd8570506232052b06f5a531541\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;revision.status\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;Clean\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;ssl.version\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;BoringSSL\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | extensions { skywalking_1 | name: \u0026quot;composite-action\u0026quot; skywalking_1 | category: \u0026quot;envoy.matching.action\u0026quot; skywalking_1 | } ...... skywalking_1 | } skywalking_1 | } skywalking_1 | envoy_metrics { skywalking_1 | name: \u0026quot;cluster.service_google.update_no_rebuild\u0026quot; skywalking_1 | type: COUNTER skywalking_1 | metric { skywalking_1 | counter { skywalking_1 | value: 1.0 skywalking_1 | } skywalking_1 | timestamp_ms: 1627046729718 skywalking_1 | } ...... skywalking_1 | } ... $ # To tear down: $ make down ","title":"Sending Envoy Metrics to SkyWalking OAP Server Example","url":"/docs/main/next/en/setup/envoy/examples/metrics/readme/"},{"content":"Sending Envoy Metrics to SkyWalking OAP Server Example This is an example of sending Envoy Stats to SkyWalking OAP server through Metric Service v2 and v3.\nRunning the example The example requires docker and docker-compose to be installed in your local system. It fetches images from Docker Hub.\nNote that in this setup, we override the log4j2.xml config to set the org.apache.skywalking.oap.server.receiver.envoy logger level to DEBUG. This enables us to see the messages sent by Envoy to SkyWalking OAP server.\nYou can also find the Envoy Metric Service V3 API example in docker-compose-envoy-v3-api.yaml\n$ make up $ docker-compose logs -f skywalking $ # Please wait for a moment until SkyWalking is ready and Envoy starts sending the stats. You will see similar messages like the following: skywalking_1 | 2021-07-23 13:25:30,683 - org.apache.skywalking.oap.server.receiver.envoy.MetricServiceGRPCHandler -19437 [grpcServerPool-1-thread-2] DEBUG [] - Received msg identifier { skywalking_1 | node { skywalking_1 | id: \u0026quot;ingress\u0026quot; skywalking_1 | cluster: \u0026quot;envoy-proxy\u0026quot; skywalking_1 | metadata { skywalking_1 | fields { skywalking_1 | key: \u0026quot;LABELS\u0026quot; skywalking_1 | value { skywalking_1 | struct_value { skywalking_1 | fields { skywalking_1 | key: \u0026quot;app\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;test-app\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;NAME\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;service-instance-name\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;envoy\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;isawesome\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;skywalking\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;iscool\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | locality { skywalking_1 | region: \u0026quot;ap-southeast-1\u0026quot; skywalking_1 | zone: \u0026quot;zone1\u0026quot; skywalking_1 | sub_zone: \u0026quot;subzone1\u0026quot; skywalking_1 | } skywalking_1 | user_agent_name: \u0026quot;envoy\u0026quot; skywalking_1 | user_agent_build_version { skywalking_1 | version { skywalking_1 | major_number: 1 skywalking_1 | minor_number: 19 skywalking_1 | } skywalking_1 | metadata { skywalking_1 | fields { skywalking_1 | key: \u0026quot;build.type\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;RELEASE\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;revision.sha\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;68fe53a889416fd8570506232052b06f5a531541\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;revision.status\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;Clean\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;ssl.version\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;BoringSSL\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | extensions { skywalking_1 | name: \u0026quot;composite-action\u0026quot; skywalking_1 | category: \u0026quot;envoy.matching.action\u0026quot; skywalking_1 | } ...... skywalking_1 | } skywalking_1 | } skywalking_1 | envoy_metrics { skywalking_1 | name: \u0026quot;cluster.service_google.update_no_rebuild\u0026quot; skywalking_1 | type: COUNTER skywalking_1 | metric { skywalking_1 | counter { skywalking_1 | value: 1.0 skywalking_1 | } skywalking_1 | timestamp_ms: 1627046729718 skywalking_1 | } ...... skywalking_1 | } ... $ # To tear down: $ make down ","title":"Sending Envoy Metrics to SkyWalking OAP Server Example","url":"/docs/main/v9.0.0/en/setup/envoy/examples/metrics/readme/"},{"content":"Sending Envoy Metrics to SkyWalking OAP Server Example This is an example of sending Envoy Stats to the SkyWalking OAP server through Metric Service v2 and v3.\nRunning the example The example requires docker and docker-compose to be installed in your local system. It fetches images from Docker Hub.\nNote that in this setup, we override the log4j2.xml config to set the org.apache.skywalking.oap.server.receiver.envoy logger level to DEBUG. This enables us to see the messages sent by Envoy to the SkyWalking OAP server.\nYou can also find the Envoy Metric Service V3 API example in docker-compose-envoy-v3-api.yaml\n$ make up $ docker-compose logs -f skywalking $ # Please wait for a moment until SkyWalking is ready and Envoy starts sending the stats. You will see similar messages like the following: skywalking_1 | 2021-07-23 13:25:30,683 - org.apache.skywalking.oap.server.receiver.envoy.MetricServiceGRPCHandler -19437 [grpcServerPool-1-thread-2] DEBUG [] - Received msg identifier { skywalking_1 | node { skywalking_1 | id: \u0026quot;ingress\u0026quot; skywalking_1 | cluster: \u0026quot;envoy-proxy\u0026quot; skywalking_1 | metadata { skywalking_1 | fields { skywalking_1 | key: \u0026quot;LABELS\u0026quot; skywalking_1 | value { skywalking_1 | struct_value { skywalking_1 | fields { skywalking_1 | key: \u0026quot;app\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;test-app\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;NAME\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;service-instance-name\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;envoy\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;isawesome\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;skywalking\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;iscool\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | locality { skywalking_1 | region: \u0026quot;ap-southeast-1\u0026quot; skywalking_1 | zone: \u0026quot;zone1\u0026quot; skywalking_1 | sub_zone: \u0026quot;subzone1\u0026quot; skywalking_1 | } skywalking_1 | user_agent_name: \u0026quot;envoy\u0026quot; skywalking_1 | user_agent_build_version { skywalking_1 | version { skywalking_1 | major_number: 1 skywalking_1 | minor_number: 19 skywalking_1 | } skywalking_1 | metadata { skywalking_1 | fields { skywalking_1 | key: \u0026quot;build.type\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;RELEASE\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;revision.sha\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;68fe53a889416fd8570506232052b06f5a531541\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;revision.status\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;Clean\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;ssl.version\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;BoringSSL\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | extensions { skywalking_1 | name: \u0026quot;composite-action\u0026quot; skywalking_1 | category: \u0026quot;envoy.matching.action\u0026quot; skywalking_1 | } ...... skywalking_1 | } skywalking_1 | } skywalking_1 | envoy_metrics { skywalking_1 | name: \u0026quot;cluster.service_google.update_no_rebuild\u0026quot; skywalking_1 | type: COUNTER skywalking_1 | metric { skywalking_1 | counter { skywalking_1 | value: 1.0 skywalking_1 | } skywalking_1 | timestamp_ms: 1627046729718 skywalking_1 | } ...... skywalking_1 | } ... $ # To tear down: $ make down ","title":"Sending Envoy Metrics to SkyWalking OAP Server Example","url":"/docs/main/v9.1.0/en/setup/envoy/examples/metrics/readme/"},{"content":"Sending Envoy Metrics to SkyWalking OAP Server Example This is an example of sending Envoy Stats to the SkyWalking OAP server through Metric Service v2 and v3.\nRunning the example The example requires docker and docker-compose to be installed in your local system. It fetches images from Docker Hub.\nNote that in this setup, we override the log4j2.xml config to set the org.apache.skywalking.oap.server.receiver.envoy logger level to DEBUG. This enables us to see the messages sent by Envoy to the SkyWalking OAP server.\nYou can also find the Envoy Metric Service V3 API example in docker-compose-envoy-v3-api.yaml\n$ make up $ docker-compose logs -f skywalking $ # Please wait for a moment until SkyWalking is ready and Envoy starts sending the stats. You will see similar messages like the following: skywalking_1 | 2021-07-23 13:25:30,683 - org.apache.skywalking.oap.server.receiver.envoy.MetricServiceGRPCHandler -19437 [grpcServerPool-1-thread-2] DEBUG [] - Received msg identifier { skywalking_1 | node { skywalking_1 | id: \u0026quot;ingress\u0026quot; skywalking_1 | cluster: \u0026quot;envoy-proxy\u0026quot; skywalking_1 | metadata { skywalking_1 | fields { skywalking_1 | key: \u0026quot;LABELS\u0026quot; skywalking_1 | value { skywalking_1 | struct_value { skywalking_1 | fields { skywalking_1 | key: \u0026quot;app\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;test-app\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;NAME\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;service-instance-name\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;envoy\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;isawesome\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;skywalking\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;iscool\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | locality { skywalking_1 | region: \u0026quot;ap-southeast-1\u0026quot; skywalking_1 | zone: \u0026quot;zone1\u0026quot; skywalking_1 | sub_zone: \u0026quot;subzone1\u0026quot; skywalking_1 | } skywalking_1 | user_agent_name: \u0026quot;envoy\u0026quot; skywalking_1 | user_agent_build_version { skywalking_1 | version { skywalking_1 | major_number: 1 skywalking_1 | minor_number: 19 skywalking_1 | } skywalking_1 | metadata { skywalking_1 | fields { skywalking_1 | key: \u0026quot;build.type\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;RELEASE\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;revision.sha\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;68fe53a889416fd8570506232052b06f5a531541\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;revision.status\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;Clean\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;ssl.version\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;BoringSSL\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | extensions { skywalking_1 | name: \u0026quot;composite-action\u0026quot; skywalking_1 | category: \u0026quot;envoy.matching.action\u0026quot; skywalking_1 | } ...... skywalking_1 | } skywalking_1 | } skywalking_1 | envoy_metrics { skywalking_1 | name: \u0026quot;cluster.service_google.update_no_rebuild\u0026quot; skywalking_1 | type: COUNTER skywalking_1 | metric { skywalking_1 | counter { skywalking_1 | value: 1.0 skywalking_1 | } skywalking_1 | timestamp_ms: 1627046729718 skywalking_1 | } ...... skywalking_1 | } ... $ # To tear down: $ make down ","title":"Sending Envoy Metrics to SkyWalking OAP Server Example","url":"/docs/main/v9.2.0/en/setup/envoy/examples/metrics/readme/"},{"content":"Sending Envoy Metrics to SkyWalking OAP Server Example This is an example of sending Envoy Stats to the SkyWalking OAP server through Metric Service v2 and v3.\nRunning the example The example requires docker and docker-compose to be installed in your local system. It fetches images from Docker Hub.\nNote that in this setup, we override the log4j2.xml config to set the org.apache.skywalking.oap.server.receiver.envoy logger level to DEBUG. This enables us to see the messages sent by Envoy to the SkyWalking OAP server.\nYou can also find the Envoy Metric Service V3 API example in docker-compose-envoy-v3-api.yaml\n$ make up $ docker-compose logs -f skywalking $ # Please wait for a moment until SkyWalking is ready and Envoy starts sending the stats. You will see similar messages like the following: skywalking_1 | 2021-07-23 13:25:30,683 - org.apache.skywalking.oap.server.receiver.envoy.MetricServiceGRPCHandler -19437 [grpcServerPool-1-thread-2] DEBUG [] - Received msg identifier { skywalking_1 | node { skywalking_1 | id: \u0026quot;ingress\u0026quot; skywalking_1 | cluster: \u0026quot;envoy-proxy\u0026quot; skywalking_1 | metadata { skywalking_1 | fields { skywalking_1 | key: \u0026quot;LABELS\u0026quot; skywalking_1 | value { skywalking_1 | struct_value { skywalking_1 | fields { skywalking_1 | key: \u0026quot;app\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;test-app\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;NAME\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;service-instance-name\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;envoy\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;isawesome\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;skywalking\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;iscool\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | locality { skywalking_1 | region: \u0026quot;ap-southeast-1\u0026quot; skywalking_1 | zone: \u0026quot;zone1\u0026quot; skywalking_1 | sub_zone: \u0026quot;subzone1\u0026quot; skywalking_1 | } skywalking_1 | user_agent_name: \u0026quot;envoy\u0026quot; skywalking_1 | user_agent_build_version { skywalking_1 | version { skywalking_1 | major_number: 1 skywalking_1 | minor_number: 19 skywalking_1 | } skywalking_1 | metadata { skywalking_1 | fields { skywalking_1 | key: \u0026quot;build.type\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;RELEASE\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;revision.sha\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;68fe53a889416fd8570506232052b06f5a531541\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;revision.status\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;Clean\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;ssl.version\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;BoringSSL\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | extensions { skywalking_1 | name: \u0026quot;composite-action\u0026quot; skywalking_1 | category: \u0026quot;envoy.matching.action\u0026quot; skywalking_1 | } ...... skywalking_1 | } skywalking_1 | } skywalking_1 | envoy_metrics { skywalking_1 | name: \u0026quot;cluster.service_google.update_no_rebuild\u0026quot; skywalking_1 | type: COUNTER skywalking_1 | metric { skywalking_1 | counter { skywalking_1 | value: 1.0 skywalking_1 | } skywalking_1 | timestamp_ms: 1627046729718 skywalking_1 | } ...... skywalking_1 | } ... $ # To tear down: $ make down ","title":"Sending Envoy Metrics to SkyWalking OAP Server Example","url":"/docs/main/v9.3.0/en/setup/envoy/examples/metrics/readme/"},{"content":"Sending Envoy Metrics to SkyWalking OAP Server Example This is an example of sending Envoy Stats to the SkyWalking OAP server through Metric Service v2 and v3.\nRunning the example The example requires docker and docker-compose to be installed in your local system. It fetches images from Docker Hub.\nNote that in this setup, we override the log4j2.xml config to set the org.apache.skywalking.oap.server.receiver.envoy logger level to DEBUG. This enables us to see the messages sent by Envoy to the SkyWalking OAP server.\nYou can also find the Envoy Metric Service V3 API example in docker-compose-envoy-v3-api.yaml\n$ make up $ docker-compose logs -f skywalking $ # Please wait for a moment until SkyWalking is ready and Envoy starts sending the stats. You will see similar messages like the following: skywalking_1 | 2021-07-23 13:25:30,683 - org.apache.skywalking.oap.server.receiver.envoy.MetricServiceGRPCHandler -19437 [grpcServerPool-1-thread-2] DEBUG [] - Received msg identifier { skywalking_1 | node { skywalking_1 | id: \u0026quot;ingress\u0026quot; skywalking_1 | cluster: \u0026quot;envoy-proxy\u0026quot; skywalking_1 | metadata { skywalking_1 | fields { skywalking_1 | key: \u0026quot;LABELS\u0026quot; skywalking_1 | value { skywalking_1 | struct_value { skywalking_1 | fields { skywalking_1 | key: \u0026quot;app\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;test-app\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;NAME\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;service-instance-name\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;envoy\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;isawesome\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;skywalking\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;iscool\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | locality { skywalking_1 | region: \u0026quot;ap-southeast-1\u0026quot; skywalking_1 | zone: \u0026quot;zone1\u0026quot; skywalking_1 | sub_zone: \u0026quot;subzone1\u0026quot; skywalking_1 | } skywalking_1 | user_agent_name: \u0026quot;envoy\u0026quot; skywalking_1 | user_agent_build_version { skywalking_1 | version { skywalking_1 | major_number: 1 skywalking_1 | minor_number: 19 skywalking_1 | } skywalking_1 | metadata { skywalking_1 | fields { skywalking_1 | key: \u0026quot;build.type\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;RELEASE\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;revision.sha\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;68fe53a889416fd8570506232052b06f5a531541\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;revision.status\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;Clean\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;ssl.version\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;BoringSSL\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | extensions { skywalking_1 | name: \u0026quot;composite-action\u0026quot; skywalking_1 | category: \u0026quot;envoy.matching.action\u0026quot; skywalking_1 | } ...... skywalking_1 | } skywalking_1 | } skywalking_1 | envoy_metrics { skywalking_1 | name: \u0026quot;cluster.service_google.update_no_rebuild\u0026quot; skywalking_1 | type: COUNTER skywalking_1 | metric { skywalking_1 | counter { skywalking_1 | value: 1.0 skywalking_1 | } skywalking_1 | timestamp_ms: 1627046729718 skywalking_1 | } ...... skywalking_1 | } ... $ # To tear down: $ make down ","title":"Sending Envoy Metrics to SkyWalking OAP Server Example","url":"/docs/main/v9.4.0/en/setup/envoy/examples/metrics/readme/"},{"content":"Sending Envoy Metrics to SkyWalking OAP Server Example This is an example of sending Envoy Stats to the SkyWalking OAP server through Metric Service v2 and v3.\nRunning the example The example requires docker and docker-compose to be installed in your local system. It fetches images from Docker Hub.\nNote that in this setup, we override the log4j2.xml config to set the org.apache.skywalking.oap.server.receiver.envoy logger level to DEBUG. This enables us to see the messages sent by Envoy to the SkyWalking OAP server.\nYou can also find the Envoy Metric Service V3 API example in docker-compose-envoy-v3-api.yaml\n$ make up $ docker-compose logs -f skywalking $ # Please wait for a moment until SkyWalking is ready and Envoy starts sending the stats. You will see similar messages like the following: skywalking_1 | 2021-07-23 13:25:30,683 - org.apache.skywalking.oap.server.receiver.envoy.MetricServiceGRPCHandler -19437 [grpcServerPool-1-thread-2] DEBUG [] - Received msg identifier { skywalking_1 | node { skywalking_1 | id: \u0026quot;ingress\u0026quot; skywalking_1 | cluster: \u0026quot;envoy-proxy\u0026quot; skywalking_1 | metadata { skywalking_1 | fields { skywalking_1 | key: \u0026quot;LABELS\u0026quot; skywalking_1 | value { skywalking_1 | struct_value { skywalking_1 | fields { skywalking_1 | key: \u0026quot;app\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;test-app\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;NAME\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;service-instance-name\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;envoy\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;isawesome\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;skywalking\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;iscool\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | locality { skywalking_1 | region: \u0026quot;ap-southeast-1\u0026quot; skywalking_1 | zone: \u0026quot;zone1\u0026quot; skywalking_1 | sub_zone: \u0026quot;subzone1\u0026quot; skywalking_1 | } skywalking_1 | user_agent_name: \u0026quot;envoy\u0026quot; skywalking_1 | user_agent_build_version { skywalking_1 | version { skywalking_1 | major_number: 1 skywalking_1 | minor_number: 19 skywalking_1 | } skywalking_1 | metadata { skywalking_1 | fields { skywalking_1 | key: \u0026quot;build.type\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;RELEASE\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;revision.sha\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;68fe53a889416fd8570506232052b06f5a531541\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;revision.status\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;Clean\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;ssl.version\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;BoringSSL\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | extensions { skywalking_1 | name: \u0026quot;composite-action\u0026quot; skywalking_1 | category: \u0026quot;envoy.matching.action\u0026quot; skywalking_1 | } ...... skywalking_1 | } skywalking_1 | } skywalking_1 | envoy_metrics { skywalking_1 | name: \u0026quot;cluster.service_google.update_no_rebuild\u0026quot; skywalking_1 | type: COUNTER skywalking_1 | metric { skywalking_1 | counter { skywalking_1 | value: 1.0 skywalking_1 | } skywalking_1 | timestamp_ms: 1627046729718 skywalking_1 | } ...... skywalking_1 | } ... $ # To tear down: $ make down ","title":"Sending Envoy Metrics to SkyWalking OAP Server Example","url":"/docs/main/v9.5.0/en/setup/envoy/examples/metrics/readme/"},{"content":"Sending Envoy Metrics to SkyWalking OAP Server Example This is an example of sending Envoy Stats to the SkyWalking OAP server through Metric Service v2 and v3.\nRunning the example The example requires docker and docker-compose to be installed in your local system. It fetches images from Docker Hub.\nNote that in this setup, we override the log4j2.xml config to set the org.apache.skywalking.oap.server.receiver.envoy logger level to DEBUG. This enables us to see the messages sent by Envoy to the SkyWalking OAP server.\nYou can also find the Envoy Metric Service V3 API example in docker-compose-envoy-v3-api.yaml\n$ make up $ docker-compose logs -f skywalking $ # Please wait for a moment until SkyWalking is ready and Envoy starts sending the stats. You will see similar messages like the following: skywalking_1 | 2021-07-23 13:25:30,683 - org.apache.skywalking.oap.server.receiver.envoy.MetricServiceGRPCHandler -19437 [grpcServerPool-1-thread-2] DEBUG [] - Received msg identifier { skywalking_1 | node { skywalking_1 | id: \u0026quot;ingress\u0026quot; skywalking_1 | cluster: \u0026quot;envoy-proxy\u0026quot; skywalking_1 | metadata { skywalking_1 | fields { skywalking_1 | key: \u0026quot;LABELS\u0026quot; skywalking_1 | value { skywalking_1 | struct_value { skywalking_1 | fields { skywalking_1 | key: \u0026quot;app\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;test-app\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;NAME\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;service-instance-name\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;envoy\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;isawesome\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;skywalking\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;iscool\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | locality { skywalking_1 | region: \u0026quot;ap-southeast-1\u0026quot; skywalking_1 | zone: \u0026quot;zone1\u0026quot; skywalking_1 | sub_zone: \u0026quot;subzone1\u0026quot; skywalking_1 | } skywalking_1 | user_agent_name: \u0026quot;envoy\u0026quot; skywalking_1 | user_agent_build_version { skywalking_1 | version { skywalking_1 | major_number: 1 skywalking_1 | minor_number: 19 skywalking_1 | } skywalking_1 | metadata { skywalking_1 | fields { skywalking_1 | key: \u0026quot;build.type\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;RELEASE\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;revision.sha\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;68fe53a889416fd8570506232052b06f5a531541\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;revision.status\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;Clean\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;ssl.version\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;BoringSSL\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | extensions { skywalking_1 | name: \u0026quot;composite-action\u0026quot; skywalking_1 | category: \u0026quot;envoy.matching.action\u0026quot; skywalking_1 | } ...... skywalking_1 | } skywalking_1 | } skywalking_1 | envoy_metrics { skywalking_1 | name: \u0026quot;cluster.service_google.update_no_rebuild\u0026quot; skywalking_1 | type: COUNTER skywalking_1 | metric { skywalking_1 | counter { skywalking_1 | value: 1.0 skywalking_1 | } skywalking_1 | timestamp_ms: 1627046729718 skywalking_1 | } ...... skywalking_1 | } ... $ # To tear down: $ make down ","title":"Sending Envoy Metrics to SkyWalking OAP Server Example","url":"/docs/main/v9.6.0/en/setup/envoy/examples/metrics/readme/"},{"content":"Sending Envoy Metrics to SkyWalking OAP Server Example This is an example of sending Envoy Stats to the SkyWalking OAP server through Metric Service v2 and v3.\nRunning the example The example requires docker and docker-compose to be installed in your local system. It fetches images from Docker Hub.\nNote that in this setup, we override the log4j2.xml config to set the org.apache.skywalking.oap.server.receiver.envoy logger level to DEBUG. This enables us to see the messages sent by Envoy to the SkyWalking OAP server.\nYou can also find the Envoy Metric Service V3 API example in docker-compose-envoy-v3-api.yaml\n$ make up $ docker-compose logs -f skywalking $ # Please wait for a moment until SkyWalking is ready and Envoy starts sending the stats. You will see similar messages like the following: skywalking_1 | 2021-07-23 13:25:30,683 - org.apache.skywalking.oap.server.receiver.envoy.MetricServiceGRPCHandler -19437 [grpcServerPool-1-thread-2] DEBUG [] - Received msg identifier { skywalking_1 | node { skywalking_1 | id: \u0026quot;ingress\u0026quot; skywalking_1 | cluster: \u0026quot;envoy-proxy\u0026quot; skywalking_1 | metadata { skywalking_1 | fields { skywalking_1 | key: \u0026quot;LABELS\u0026quot; skywalking_1 | value { skywalking_1 | struct_value { skywalking_1 | fields { skywalking_1 | key: \u0026quot;app\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;test-app\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;NAME\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;service-instance-name\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;envoy\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;isawesome\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;skywalking\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;iscool\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | locality { skywalking_1 | region: \u0026quot;ap-southeast-1\u0026quot; skywalking_1 | zone: \u0026quot;zone1\u0026quot; skywalking_1 | sub_zone: \u0026quot;subzone1\u0026quot; skywalking_1 | } skywalking_1 | user_agent_name: \u0026quot;envoy\u0026quot; skywalking_1 | user_agent_build_version { skywalking_1 | version { skywalking_1 | major_number: 1 skywalking_1 | minor_number: 19 skywalking_1 | } skywalking_1 | metadata { skywalking_1 | fields { skywalking_1 | key: \u0026quot;build.type\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;RELEASE\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;revision.sha\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;68fe53a889416fd8570506232052b06f5a531541\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;revision.status\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;Clean\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;ssl.version\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;BoringSSL\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | extensions { skywalking_1 | name: \u0026quot;composite-action\u0026quot; skywalking_1 | category: \u0026quot;envoy.matching.action\u0026quot; skywalking_1 | } ...... skywalking_1 | } skywalking_1 | } skywalking_1 | envoy_metrics { skywalking_1 | name: \u0026quot;cluster.service_google.update_no_rebuild\u0026quot; skywalking_1 | type: COUNTER skywalking_1 | metric { skywalking_1 | counter { skywalking_1 | value: 1.0 skywalking_1 | } skywalking_1 | timestamp_ms: 1627046729718 skywalking_1 | } ...... skywalking_1 | } ... $ # To tear down: $ make down ","title":"Sending Envoy Metrics to SkyWalking OAP Server Example","url":"/docs/main/v9.7.0/en/setup/envoy/examples/metrics/readme/"},{"content":"Server Agents Server agents in various languages provide auto-instrumentation or/and manual-instrumentation(APIs-based) mechanisms to integrate with target services. They support collecting traces, logs, metrics, and events using SkyWalking\u0026rsquo;s native format and maximize the analysis capabilities of the SkyWalking OAP server.\nInstalling language agents in services   Java agent. Learn how to install the Java agent in your service without affecting your code.\n  LUA agent. Learn how to install the Lua agent in Nginx + LUA module or OpenResty.\n  Kong agent. Learn how to install the Lua agent in Kong.\n  Python Agent. Learn how to install the Python Agent in a Python service without affecting your code.\n  Node.js agent. Learn how to install the NodeJS Agent in a NodeJS service.\n  Rust agent. Learn how to integrate the Rust agent with a rust service.\n  PHP agent. Learn how to install the PHP agent in your service without affecting your code.\n  Go agent. Learn how to integrate the Go agent with a golang service.\n  The following agents and SDKs are compatible with SkyWalking\u0026rsquo;s data formats and network protocols but are maintained by third parties. See their project repositories for guides and releases.\n  SkyAPM .NET Core agent. See .NET Core agent project documentation for more details.\n  SkyAPM C++ SDK. See cpp2sky project documentation for more details.\n  ","title":"Server Agents","url":"/docs/main/latest/en/setup/service-agent/server-agents/"},{"content":"Server Agents Server agents in various languages provide auto-instrumentation or/and manual-instrumentation(APIs-based) mechanisms to integrate with target services. They support collecting traces, logs, metrics, and events using SkyWalking\u0026rsquo;s native format and maximize the analysis capabilities of the SkyWalking OAP server.\nInstalling language agents in services   Java agent. Learn how to install the Java agent in your service without affecting your code.\n  LUA agent. Learn how to install the Lua agent in Nginx + LUA module or OpenResty.\n  Kong agent. Learn how to install the Lua agent in Kong.\n  Python Agent. Learn how to install the Python Agent in a Python service without affecting your code.\n  Node.js agent. Learn how to install the NodeJS Agent in a NodeJS service.\n  Rust agent. Learn how to integrate the Rust agent with a rust service.\n  PHP agent. Learn how to install the PHP agent in your service without affecting your code.\n  Go agent. Learn how to integrate the Go agent with a golang service.\n  The following agents and SDKs are compatible with SkyWalking\u0026rsquo;s data formats and network protocols but are maintained by third parties. See their project repositories for guides and releases.\n  SkyAPM .NET Core agent. See .NET Core agent project documentation for more details.\n  SkyAPM C++ SDK. See cpp2sky project documentation for more details.\n  ","title":"Server Agents","url":"/docs/main/next/en/setup/service-agent/server-agents/"},{"content":"Server Agents Server agents in various languages provide auto-instrumentation or/and manual-instrumentation(APIs-based) mechanism to integrate with target services. They support collecting traces, logs, metrics and events by using SkyWalking\u0026rsquo;s native format, and maximum the analysis capabilities of SkyWalking OAP server.\nInstalling language agents in services   Java agent. Learn how to install the Java agent in your service without affecting your code.\n  LUA agent. Learn how to install the Lua agent in Nginx + LUA module or OpenResty.\n  Kong agent. Learn how to install the Lua agent in Kong.\n  Python Agent. Learn how to install the Python Agent in a Python service.\n  Node.js agent. Learn how to install the NodeJS Agent in a NodeJS service.\n  Rust agent. Learn how to integrate the rust agent in a rust service.\n  The following agents and SDKs are compatible with SkyWalking\u0026rsquo;s data formats and network protocols, but are maintained by third parties. See their project repositories for guides and releases.\n  SkyAPM .NET Core agent. See .NET Core agent project document for more details.\n  SkyAPM PHP agent. See PHP agent project document for more details.\n  SkyAPM Go SDK. See go2sky project document for more details.\n  SkyAPM C++ SDK. See cpp2sky project document for more details.\n  ","title":"Server Agents","url":"/docs/main/v9.0.0/en/setup/service-agent/server-agents/"},{"content":"Server Agents Server agents in various languages provide auto-instrumentation or/and manual-instrumentation(APIs-based) mechanisms to integrate with target services. They support collecting traces, logs, metrics, and events using SkyWalking\u0026rsquo;s native format and maximize the analysis capabilities of the SkyWalking OAP server.\nInstalling language agents in services   Java agent. Learn how to install the Java agent in your service without affecting your code.\n  LUA agent. Learn how to install the Lua agent in Nginx + LUA module or OpenResty.\n  Kong agent. Learn how to install the Lua agent in Kong.\n  Python Agent. Learn how to install the Python Agent in a Python service without affecting your code.\n  Node.js agent. Learn how to install the NodeJS Agent in a NodeJS service.\n  Rust agent. Learn how to integrate the Rust agent with a rust service.\n  The following agents and SDKs are compatible with SkyWalking\u0026rsquo;s data formats and network protocols but are maintained by third parties. See their project repositories for guides and releases.\n  SkyAPM .NET Core agent. See .NET Core agent project documentation for more details.\n  SkyAPM PHP agent. See PHP agent project documentation for more details.\n  SkyAPM Go SDK. See go2sky project documentation for more details.\n  SkyAPM C++ SDK. See cpp2sky project documentation for more details.\n  ","title":"Server Agents","url":"/docs/main/v9.1.0/en/setup/service-agent/server-agents/"},{"content":"Server Agents Server agents in various languages provide auto-instrumentation or/and manual-instrumentation(APIs-based) mechanisms to integrate with target services. They support collecting traces, logs, metrics, and events using SkyWalking\u0026rsquo;s native format and maximize the analysis capabilities of the SkyWalking OAP server.\nInstalling language agents in services   Java agent. Learn how to install the Java agent in your service without affecting your code.\n  LUA agent. Learn how to install the Lua agent in Nginx + LUA module or OpenResty.\n  Kong agent. Learn how to install the Lua agent in Kong.\n  Python Agent. Learn how to install the Python Agent in a Python service without affecting your code.\n  Node.js agent. Learn how to install the NodeJS Agent in a NodeJS service.\n  Rust agent. Learn how to integrate the Rust agent with a rust service.\n  The following agents and SDKs are compatible with SkyWalking\u0026rsquo;s data formats and network protocols but are maintained by third parties. See their project repositories for guides and releases.\n  SkyAPM .NET Core agent. See .NET Core agent project documentation for more details.\n  SkyAPM PHP agent. See PHP agent project documentation for more details.\n  SkyAPM Go SDK. See go2sky project documentation for more details.\n  SkyAPM C++ SDK. See cpp2sky project documentation for more details.\n  ","title":"Server Agents","url":"/docs/main/v9.2.0/en/setup/service-agent/server-agents/"},{"content":"Server Agents Server agents in various languages provide auto-instrumentation or/and manual-instrumentation(APIs-based) mechanisms to integrate with target services. They support collecting traces, logs, metrics, and events using SkyWalking\u0026rsquo;s native format and maximize the analysis capabilities of the SkyWalking OAP server.\nInstalling language agents in services   Java agent. Learn how to install the Java agent in your service without affecting your code.\n  LUA agent. Learn how to install the Lua agent in Nginx + LUA module or OpenResty.\n  Kong agent. Learn how to install the Lua agent in Kong.\n  Python Agent. Learn how to install the Python Agent in a Python service without affecting your code.\n  Node.js agent. Learn how to install the NodeJS Agent in a NodeJS service.\n  Rust agent. Learn how to integrate the Rust agent with a rust service.\n  PHP agent. Learn how to install the PHP agent in your service without affecting your code.\n  The following agents and SDKs are compatible with SkyWalking\u0026rsquo;s data formats and network protocols but are maintained by third parties. See their project repositories for guides and releases.\n  SkyAPM .NET Core agent. See .NET Core agent project documentation for more details.\n  SkyAPM Go SDK. See go2sky project documentation for more details.\n  SkyAPM C++ SDK. See cpp2sky project documentation for more details.\n  ","title":"Server Agents","url":"/docs/main/v9.3.0/en/setup/service-agent/server-agents/"},{"content":"Server Agents Server agents in various languages provide auto-instrumentation or/and manual-instrumentation(APIs-based) mechanisms to integrate with target services. They support collecting traces, logs, metrics, and events using SkyWalking\u0026rsquo;s native format and maximize the analysis capabilities of the SkyWalking OAP server.\nInstalling language agents in services   Java agent. Learn how to install the Java agent in your service without affecting your code.\n  LUA agent. Learn how to install the Lua agent in Nginx + LUA module or OpenResty.\n  Kong agent. Learn how to install the Lua agent in Kong.\n  Python Agent. Learn how to install the Python Agent in a Python service without affecting your code.\n  Node.js agent. Learn how to install the NodeJS Agent in a NodeJS service.\n  Rust agent. Learn how to integrate the Rust agent with a rust service.\n  PHP agent. Learn how to install the PHP agent in your service without affecting your code.\n  The following agents and SDKs are compatible with SkyWalking\u0026rsquo;s data formats and network protocols but are maintained by third parties. See their project repositories for guides and releases.\n  SkyAPM .NET Core agent. See .NET Core agent project documentation for more details.\n  SkyAPM Go SDK. See go2sky project documentation for more details.\n  SkyAPM C++ SDK. See cpp2sky project documentation for more details.\n  ","title":"Server Agents","url":"/docs/main/v9.4.0/en/setup/service-agent/server-agents/"},{"content":"Server Agents Server agents in various languages provide auto-instrumentation or/and manual-instrumentation(APIs-based) mechanisms to integrate with target services. They support collecting traces, logs, metrics, and events using SkyWalking\u0026rsquo;s native format and maximize the analysis capabilities of the SkyWalking OAP server.\nInstalling language agents in services   Java agent. Learn how to install the Java agent in your service without affecting your code.\n  LUA agent. Learn how to install the Lua agent in Nginx + LUA module or OpenResty.\n  Kong agent. Learn how to install the Lua agent in Kong.\n  Python Agent. Learn how to install the Python Agent in a Python service without affecting your code.\n  Node.js agent. Learn how to install the NodeJS Agent in a NodeJS service.\n  Rust agent. Learn how to integrate the Rust agent with a rust service.\n  PHP agent. Learn how to install the PHP agent in your service without affecting your code.\n  The following agents and SDKs are compatible with SkyWalking\u0026rsquo;s data formats and network protocols but are maintained by third parties. See their project repositories for guides and releases.\n  SkyAPM .NET Core agent. See .NET Core agent project documentation for more details.\n  SkyAPM Go SDK. See go2sky project documentation for more details.\n  SkyAPM C++ SDK. See cpp2sky project documentation for more details.\n  ","title":"Server Agents","url":"/docs/main/v9.5.0/en/setup/service-agent/server-agents/"},{"content":"Server Agents Server agents in various languages provide auto-instrumentation or/and manual-instrumentation(APIs-based) mechanisms to integrate with target services. They support collecting traces, logs, metrics, and events using SkyWalking\u0026rsquo;s native format and maximize the analysis capabilities of the SkyWalking OAP server.\nInstalling language agents in services   Java agent. Learn how to install the Java agent in your service without affecting your code.\n  LUA agent. Learn how to install the Lua agent in Nginx + LUA module or OpenResty.\n  Kong agent. Learn how to install the Lua agent in Kong.\n  Python Agent. Learn how to install the Python Agent in a Python service without affecting your code.\n  Node.js agent. Learn how to install the NodeJS Agent in a NodeJS service.\n  Rust agent. Learn how to integrate the Rust agent with a rust service.\n  PHP agent. Learn how to install the PHP agent in your service without affecting your code.\n  Go agent. Learn how to integrate the Go agent with a golang service.\n  The following agents and SDKs are compatible with SkyWalking\u0026rsquo;s data formats and network protocols but are maintained by third parties. See their project repositories for guides and releases.\n  SkyAPM .NET Core agent. See .NET Core agent project documentation for more details.\n  SkyAPM C++ SDK. See cpp2sky project documentation for more details.\n  ","title":"Server Agents","url":"/docs/main/v9.6.0/en/setup/service-agent/server-agents/"},{"content":"Server Agents Server agents in various languages provide auto-instrumentation or/and manual-instrumentation(APIs-based) mechanisms to integrate with target services. They support collecting traces, logs, metrics, and events using SkyWalking\u0026rsquo;s native format and maximize the analysis capabilities of the SkyWalking OAP server.\nInstalling language agents in services   Java agent. Learn how to install the Java agent in your service without affecting your code.\n  LUA agent. Learn how to install the Lua agent in Nginx + LUA module or OpenResty.\n  Kong agent. Learn how to install the Lua agent in Kong.\n  Python Agent. Learn how to install the Python Agent in a Python service without affecting your code.\n  Node.js agent. Learn how to install the NodeJS Agent in a NodeJS service.\n  Rust agent. Learn how to integrate the Rust agent with a rust service.\n  PHP agent. Learn how to install the PHP agent in your service without affecting your code.\n  Go agent. Learn how to integrate the Go agent with a golang service.\n  The following agents and SDKs are compatible with SkyWalking\u0026rsquo;s data formats and network protocols but are maintained by third parties. See their project repositories for guides and releases.\n  SkyAPM .NET Core agent. See .NET Core agent project documentation for more details.\n  SkyAPM C++ SDK. See cpp2sky project documentation for more details.\n  ","title":"Server Agents","url":"/docs/main/v9.7.0/en/setup/service-agent/server-agents/"},{"content":"Server/grpc-server Description This is a sharing plugin, which would start a gRPC server.\nDefaultConfig # The address of grpc server. Default value is :11800address::11800# The network of grpc. Default value is :tcpnetwork:tcp# The max size of receiving log. Default value is 2M. The unit is Byte.max_recv_msg_size:2097152# The max concurrent stream channels.max_concurrent_streams:32# The TLS cert file path.tls_cert_file:\u0026#34;\u0026#34;# The TLS key file path.tls_key_file:\u0026#34;\u0026#34;# To Accept Connection Limiter when reach the resourceaccept_limit:# The max CPU utilization limitcpu_utilization:75# The max connection countconnection_count:4000Configuration    Name Type Description     address string The address of grpc server.   network string The network of grpc.   max_recv_msg_size int The max size of the received log.   max_concurrent_streams uint32 The max concurrent stream channels.   tls_cert_file string The TLS cert file path.   tls_key_file string The TLS key file path.   accept_limit grpc.AcceptConnectionConfig To Accept Connection Limiter when reach the resource    ","title":"Server/grpc-server","url":"/docs/skywalking-satellite/latest/en/setup/plugins/server_grpc-server/"},{"content":"Server/grpc-server Description This is a sharing plugin, which would start a gRPC server.\nDefaultConfig # The address of grpc server. Default value is :11800address::11800# The network of grpc. Default value is :tcpnetwork:tcp# The max size of receiving log. Default value is 2M. The unit is Byte.max_recv_msg_size:2097152# The max concurrent stream channels.max_concurrent_streams:32# The TLS cert file path.tls_cert_file:\u0026#34;\u0026#34;# The TLS key file path.tls_key_file:\u0026#34;\u0026#34;# To Accept Connection Limiter when reach the resourceaccept_limit:# The max CPU utilization limitcpu_utilization:75# The max connection countconnection_count:4000Configuration    Name Type Description     address string The address of grpc server.   network string The network of grpc.   max_recv_msg_size int The max size of the received log.   max_concurrent_streams uint32 The max concurrent stream channels.   tls_cert_file string The TLS cert file path.   tls_key_file string The TLS key file path.   accept_limit grpc.AcceptConnectionConfig To Accept Connection Limiter when reach the resource    ","title":"Server/grpc-server","url":"/docs/skywalking-satellite/next/en/setup/plugins/server_grpc-server/"},{"content":"Server/grpc-server Description This is a sharing plugin, which would start a gRPC server.\nDefaultConfig # The address of grpc server. Default value is :11800address::11800# The network of grpc. Default value is :tcpnetwork:tcp# The max size of receiving log. Default value is 2M. The unit is Byte.max_recv_msg_size:2097152# The max concurrent stream channels.max_concurrent_streams:32# The TLS cert file path.tls_cert_file:\u0026#34;\u0026#34;# The TLS key file path.tls_key_file:\u0026#34;\u0026#34;# To Accept Connection Limiter when reach the resourceaccept_limit:# The max CPU utilization limitcpu_utilization:75# The max connection countconnection_count:4000Configuration    Name Type Description     address string The address of grpc server.   network string The network of grpc.   max_recv_msg_size int The max size of the received log.   max_concurrent_streams uint32 The max concurrent stream channels.   tls_cert_file string The TLS cert file path.   tls_key_file string The TLS key file path.   accept_limit grpc.AcceptConnectionConfig To Accept Connection Limiter when reach the resource    ","title":"Server/grpc-server","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/server_grpc-server/"},{"content":"Server/http-server Description This is a sharing plugin, which would start a http server.\nDefaultConfig # The http server address.address:\u0026#34;:12800\u0026#34;Configuration    Name Type Description     address string     ","title":"Server/http-server","url":"/docs/skywalking-satellite/latest/en/setup/plugins/server_http-server/"},{"content":"Server/http-server Description This is a sharing plugin, which would start a http server.\nDefaultConfig # The http server address.address:\u0026#34;:12800\u0026#34;Configuration    Name Type Description     address string     ","title":"Server/http-server","url":"/docs/skywalking-satellite/next/en/setup/plugins/server_http-server/"},{"content":"Server/http-server Description This is a sharing plugin, which would start a http server.\nDefaultConfig # The http server address.address:\u0026#34;:12800\u0026#34;Configuration    Name Type Description     address string     ","title":"Server/http-server","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/server_http-server/"},{"content":"Service Auto Grouping SkyWalking supports various default and customized dashboard templates. Each template provides an appropriate layout for services in a particular field. For example, the metrics for services with language agents installed may be different from that of services detected by the service mesh observability solution as well as SkyWalking\u0026rsquo;s self-observability metrics dashboard.\nTherefore, since version 8.3.0, the SkyWalking OAP has generated the groups based on this simple naming format:\n${service name} = [${group name}::]${logic name} If the service name includes double colons (::), the literal string before the colons is taken as the group name. In the latest GraphQL query, the group name has been provided as an optional parameter.\n getAllServices(duration: Duration!, group: String): [Service!]!\n RocketBot UI dashboards (Standard type) support the group name for default and custom configurations.\n","title":"Service Auto Grouping","url":"/docs/main/latest/en/setup/backend/service-auto-grouping/"},{"content":"Service Auto Grouping SkyWalking supports various default and customized dashboard templates. Each template provides an appropriate layout for services in a particular field. For example, the metrics for services with language agents installed may be different from that of services detected by the service mesh observability solution as well as SkyWalking\u0026rsquo;s self-observability metrics dashboard.\nTherefore, since version 8.3.0, the SkyWalking OAP has generated the groups based on this simple naming format:\n${service name} = [${group name}::]${logic name} If the service name includes double colons (::), the literal string before the colons is taken as the group name. In the latest GraphQL query, the group name has been provided as an optional parameter.\n getAllServices(duration: Duration!, group: String): [Service!]!\n RocketBot UI dashboards (Standard type) support the group name for default and custom configurations.\n","title":"Service Auto Grouping","url":"/docs/main/next/en/setup/backend/service-auto-grouping/"},{"content":"Service Auto Grouping SkyWalking supports various default and customized dashboard templates. Each template provides an appropriate layout for services in a particular field. For example, the metrics for services with language agents installed may be different from that of services detected by the service mesh observability solution as well as SkyWalking\u0026rsquo;s self-observability metrics dashboard.\nTherefore, since version 8.3.0, the SkyWalking OAP has generated the groups based on this simple naming format:\n${service name} = [${group name}::]${logic name} If the service name includes double colons (::), the literal string before the colons is taken as the group name. In the latest GraphQL query, the group name has been provided as an option parameter.\n getAllServices(duration: Duration!, group: String): [Service!]!\n RocketBot UI dashboards (Standard type) support the group name for default and custom configurations.\n","title":"Service Auto Grouping","url":"/docs/main/v9.0.0/en/setup/backend/service-auto-grouping/"},{"content":"Service Auto Grouping SkyWalking supports various default and customized dashboard templates. Each template provides an appropriate layout for services in a particular field. For example, the metrics for services with language agents installed may be different from that of services detected by the service mesh observability solution as well as SkyWalking\u0026rsquo;s self-observability metrics dashboard.\nTherefore, since version 8.3.0, the SkyWalking OAP has generated the groups based on this simple naming format:\n${service name} = [${group name}::]${logic name} If the service name includes double colons (::), the literal string before the colons is taken as the group name. In the latest GraphQL query, the group name has been provided as an optional parameter.\n getAllServices(duration: Duration!, group: String): [Service!]!\n RocketBot UI dashboards (Standard type) support the group name for default and custom configurations.\n","title":"Service Auto Grouping","url":"/docs/main/v9.1.0/en/setup/backend/service-auto-grouping/"},{"content":"Service Auto Grouping SkyWalking supports various default and customized dashboard templates. Each template provides an appropriate layout for services in a particular field. For example, the metrics for services with language agents installed may be different from that of services detected by the service mesh observability solution as well as SkyWalking\u0026rsquo;s self-observability metrics dashboard.\nTherefore, since version 8.3.0, the SkyWalking OAP has generated the groups based on this simple naming format:\n${service name} = [${group name}::]${logic name} If the service name includes double colons (::), the literal string before the colons is taken as the group name. In the latest GraphQL query, the group name has been provided as an optional parameter.\n getAllServices(duration: Duration!, group: String): [Service!]!\n RocketBot UI dashboards (Standard type) support the group name for default and custom configurations.\n","title":"Service Auto Grouping","url":"/docs/main/v9.2.0/en/setup/backend/service-auto-grouping/"},{"content":"Service Auto Grouping SkyWalking supports various default and customized dashboard templates. Each template provides an appropriate layout for services in a particular field. For example, the metrics for services with language agents installed may be different from that of services detected by the service mesh observability solution as well as SkyWalking\u0026rsquo;s self-observability metrics dashboard.\nTherefore, since version 8.3.0, the SkyWalking OAP has generated the groups based on this simple naming format:\n${service name} = [${group name}::]${logic name} If the service name includes double colons (::), the literal string before the colons is taken as the group name. In the latest GraphQL query, the group name has been provided as an optional parameter.\n getAllServices(duration: Duration!, group: String): [Service!]!\n RocketBot UI dashboards (Standard type) support the group name for default and custom configurations.\n","title":"Service Auto Grouping","url":"/docs/main/v9.3.0/en/setup/backend/service-auto-grouping/"},{"content":"Service Auto Grouping SkyWalking supports various default and customized dashboard templates. Each template provides an appropriate layout for services in a particular field. For example, the metrics for services with language agents installed may be different from that of services detected by the service mesh observability solution as well as SkyWalking\u0026rsquo;s self-observability metrics dashboard.\nTherefore, since version 8.3.0, the SkyWalking OAP has generated the groups based on this simple naming format:\n${service name} = [${group name}::]${logic name} If the service name includes double colons (::), the literal string before the colons is taken as the group name. In the latest GraphQL query, the group name has been provided as an optional parameter.\n getAllServices(duration: Duration!, group: String): [Service!]!\n RocketBot UI dashboards (Standard type) support the group name for default and custom configurations.\n","title":"Service Auto Grouping","url":"/docs/main/v9.4.0/en/setup/backend/service-auto-grouping/"},{"content":"Service Auto Grouping SkyWalking supports various default and customized dashboard templates. Each template provides an appropriate layout for services in a particular field. For example, the metrics for services with language agents installed may be different from that of services detected by the service mesh observability solution as well as SkyWalking\u0026rsquo;s self-observability metrics dashboard.\nTherefore, since version 8.3.0, the SkyWalking OAP has generated the groups based on this simple naming format:\n${service name} = [${group name}::]${logic name} If the service name includes double colons (::), the literal string before the colons is taken as the group name. In the latest GraphQL query, the group name has been provided as an optional parameter.\n getAllServices(duration: Duration!, group: String): [Service!]!\n RocketBot UI dashboards (Standard type) support the group name for default and custom configurations.\n","title":"Service Auto Grouping","url":"/docs/main/v9.5.0/en/setup/backend/service-auto-grouping/"},{"content":"Service Auto Grouping SkyWalking supports various default and customized dashboard templates. Each template provides an appropriate layout for services in a particular field. For example, the metrics for services with language agents installed may be different from that of services detected by the service mesh observability solution as well as SkyWalking\u0026rsquo;s self-observability metrics dashboard.\nTherefore, since version 8.3.0, the SkyWalking OAP has generated the groups based on this simple naming format:\n${service name} = [${group name}::]${logic name} If the service name includes double colons (::), the literal string before the colons is taken as the group name. In the latest GraphQL query, the group name has been provided as an optional parameter.\n getAllServices(duration: Duration!, group: String): [Service!]!\n RocketBot UI dashboards (Standard type) support the group name for default and custom configurations.\n","title":"Service Auto Grouping","url":"/docs/main/v9.6.0/en/setup/backend/service-auto-grouping/"},{"content":"Service Auto Grouping SkyWalking supports various default and customized dashboard templates. Each template provides an appropriate layout for services in a particular field. For example, the metrics for services with language agents installed may be different from that of services detected by the service mesh observability solution as well as SkyWalking\u0026rsquo;s self-observability metrics dashboard.\nTherefore, since version 8.3.0, the SkyWalking OAP has generated the groups based on this simple naming format:\n${service name} = [${group name}::]${logic name} If the service name includes double colons (::), the literal string before the colons is taken as the group name. In the latest GraphQL query, the group name has been provided as an optional parameter.\n getAllServices(duration: Duration!, group: String): [Service!]!\n RocketBot UI dashboards (Standard type) support the group name for default and custom configurations.\n","title":"Service Auto Grouping","url":"/docs/main/v9.7.0/en/setup/backend/service-auto-grouping/"},{"content":"Service Auto Instrument Agent The service auto instrument agent is a subset of language-based native agents. This kind of agents is based on some language-specific features, especially those of a VM-based language.\nWhat does Auto Instrument mean? Many users learned about these agents when they first heard that \u0026ldquo;Not a single line of code has to be changed\u0026rdquo;. SkyWalking used to mention this in its readme page as well. However, this does not reflect the full picture. For end users, it is true that they no longer have to modify their codes in most cases. But it is important to understand that the codes are in fact still modified by the agent, which is usually known as \u0026ldquo;runtime code manipulation\u0026rdquo;. The underlying logic is that the auto instrument agent uses the VM interface for code modification to dynamically add in the instrument code, such as modifying the class in Java through javaagent premain.\nIn fact, although the SkyWalking team has mentioned that most auto instrument agents are VM-based, you may build such tools during compiling time rather than runtime.\nWhat are the limitations? Auto instrument is very helpful, as you may perform auto instrument during compiling time, without having to depend on VM features. But there are also certain limitations that come with it:\n  Higher possibility of in-process propagation in many cases. Many high-level languages, such as Java and .NET, are used for building business systems. Most business logic codes run in the same thread for each request, which causes propagation to be based on thread ID, in order for the stack module to make sure that the context is safe.\n  Only works in certain frameworks or libraries. Since the agents are responsible for modifying the codes during runtime, the codes are already known to the agent plugin developers. There is usually a list of frameworks or libraries supported by this kind of probes. For example, see the SkyWalking Java agent supported list.\n  Cross-thread operations are not always supported. Like what is mentioned above regarding in-process propagation, most codes (especially business codes) run in a single thread per request. But in some other cases, they operate across different threads, such as assigning tasks to other threads, task pools or batch processes. Some languages may even provide coroutine or similar components like Goroutine, which allows developers to run async process with low payload. In such cases, auto instrument will face problems.\n  So, there\u0026rsquo;s nothing mysterious about auto instrument. In short, agent developers write an activation script to make instrument codes work for you. That\u0026rsquo;s it!\nWhat is next? If you want to learn about manual instrument libs in SkyWalking, see the Manual instrument SDK section.\n","title":"Service Auto Instrument Agent","url":"/docs/main/latest/en/concepts-and-designs/service-agent/"},{"content":"Service Auto Instrument Agent The service auto instrument agent is a subset of language-based native agents. This kind of agents is based on some language-specific features, especially those of a VM-based language.\nWhat does Auto Instrument mean? Many users learned about these agents when they first heard that \u0026ldquo;Not a single line of code has to be changed\u0026rdquo;. SkyWalking used to mention this in its readme page as well. However, this does not reflect the full picture. For end users, it is true that they no longer have to modify their codes in most cases. But it is important to understand that the codes are in fact still modified by the agent, which is usually known as \u0026ldquo;runtime code manipulation\u0026rdquo;. The underlying logic is that the auto instrument agent uses the VM interface for code modification to dynamically add in the instrument code, such as modifying the class in Java through javaagent premain.\nIn fact, although the SkyWalking team has mentioned that most auto instrument agents are VM-based, you may build such tools during compiling time rather than runtime.\nWhat are the limitations? Auto instrument is very helpful, as you may perform auto instrument during compiling time, without having to depend on VM features. But there are also certain limitations that come with it:\n  Higher possibility of in-process propagation in many cases. Many high-level languages, such as Java and .NET, are used for building business systems. Most business logic codes run in the same thread for each request, which causes propagation to be based on thread ID, in order for the stack module to make sure that the context is safe.\n  Only works in certain frameworks or libraries. Since the agents are responsible for modifying the codes during runtime, the codes are already known to the agent plugin developers. There is usually a list of frameworks or libraries supported by this kind of probes. For example, see the SkyWalking Java agent supported list.\n  Cross-thread operations are not always supported. Like what is mentioned above regarding in-process propagation, most codes (especially business codes) run in a single thread per request. But in some other cases, they operate across different threads, such as assigning tasks to other threads, task pools or batch processes. Some languages may even provide coroutine or similar components like Goroutine, which allows developers to run async process with low payload. In such cases, auto instrument will face problems.\n  So, there\u0026rsquo;s nothing mysterious about auto instrument. In short, agent developers write an activation script to make instrument codes work for you. That\u0026rsquo;s it!\nWhat is next? If you want to learn about manual instrument libs in SkyWalking, see the Manual instrument SDK section.\n","title":"Service Auto Instrument Agent","url":"/docs/main/next/en/concepts-and-designs/service-agent/"},{"content":"Service Auto Instrument Agent The service auto instrument agent is a subset of language-based native agents. This kind of agents is based on some language-specific features, especially those of a VM-based language.\nWhat does Auto Instrument mean? Many users learned about these agents when they first heard that \u0026ldquo;Not a single line of code has to be changed\u0026rdquo;. SkyWalking used to mention this in its readme page as well. However, this does not reflect the full picture. For end users, it is true that they no longer have to modify their codes in most cases. But it is important to understand that the codes are in fact still modified by the agent, which is usually known as \u0026ldquo;runtime code manipulation\u0026rdquo;. The underlying logic is that the auto instrument agent uses the VM interface for code modification to dynamically add in the instrument code, such as modifying the class in Java through javaagent premain.\nIn fact, although the SkyWalking team has mentioned that most auto instrument agents are VM-based, you may build such tools during compiling time rather than runtime.\nWhat are the limitations? Auto instrument is very helpful, as you may perform auto instrument during compiling time, without having to depend on VM features. But there are also certain limitations that come with it:\n  Higher possibility of in-process propagation in many cases. Many high-level languages, such as Java and .NET, are used for building business systems. Most business logic codes run in the same thread for each request, which causes propagation to be based on thread ID, in order for the stack module to make sure that the context is safe.\n  Only works in certain frameworks or libraries. Since the agents are responsible for modifying the codes during runtime, the codes are already known to the agent plugin developers. There is usually a list of frameworks or libraries supported by this kind of probes. For example, see the SkyWalking Java agent supported list.\n  Cross-thread operations are not always supported. Like what is mentioned above regarding in-process propagation, most codes (especially business codes) run in a single thread per request. But in some other cases, they operate across different threads, such as assigning tasks to other threads, task pools or batch processes. Some languages may even provide coroutine or similar components like Goroutine, which allows developers to run async process with low payload. In such cases, auto instrument will face problems.\n  So, there\u0026rsquo;s nothing mysterious about auto instrument. In short, agent developers write an activation script to make instrument codes work for you. That\u0026rsquo;s it!\nWhat is next? If you want to learn about manual instrument libs in SkyWalking, see the Manual instrument SDK section.\n","title":"Service Auto Instrument Agent","url":"/docs/main/v9.0.0/en/concepts-and-designs/service-agent/"},{"content":"Service Auto Instrument Agent The service auto instrument agent is a subset of language-based native agents. This kind of agents is based on some language-specific features, especially those of a VM-based language.\nWhat does Auto Instrument mean? Many users learned about these agents when they first heard that \u0026ldquo;Not a single line of code has to be changed\u0026rdquo;. SkyWalking used to mention this in its readme page as well. However, this does not reflect the full picture. For end users, it is true that they no longer have to modify their codes in most cases. But it is important to understand that the codes are in fact still modified by the agent, which is usually known as \u0026ldquo;runtime code manipulation\u0026rdquo;. The underlying logic is that the auto instrument agent uses the VM interface for code modification to dynamically add in the instrument code, such as modifying the class in Java through javaagent premain.\nIn fact, although the SkyWalking team has mentioned that most auto instrument agents are VM-based, you may build such tools during compiling time rather than runtime.\nWhat are the limitations? Auto instrument is very helpful, as you may perform auto instrument during compiling time, without having to depend on VM features. But there are also certain limitations that come with it:\n  Higher possibility of in-process propagation in many cases. Many high-level languages, such as Java and .NET, are used for building business systems. Most business logic codes run in the same thread for each request, which causes propagation to be based on thread ID, in order for the stack module to make sure that the context is safe.\n  Only works in certain frameworks or libraries. Since the agents are responsible for modifying the codes during runtime, the codes are already known to the agent plugin developers. There is usually a list of frameworks or libraries supported by this kind of probes. For example, see the SkyWalking Java agent supported list.\n  Cross-thread operations are not always supported. Like what is mentioned above regarding in-process propagation, most codes (especially business codes) run in a single thread per request. But in some other cases, they operate across different threads, such as assigning tasks to other threads, task pools or batch processes. Some languages may even provide coroutine or similar components like Goroutine, which allows developers to run async process with low payload. In such cases, auto instrument will face problems.\n  So, there\u0026rsquo;s nothing mysterious about auto instrument. In short, agent developers write an activation script to make instrument codes work for you. That\u0026rsquo;s it!\nWhat is next? If you want to learn about manual instrument libs in SkyWalking, see the Manual instrument SDK section.\n","title":"Service Auto Instrument Agent","url":"/docs/main/v9.1.0/en/concepts-and-designs/service-agent/"},{"content":"Service Auto Instrument Agent The service auto instrument agent is a subset of language-based native agents. This kind of agents is based on some language-specific features, especially those of a VM-based language.\nWhat does Auto Instrument mean? Many users learned about these agents when they first heard that \u0026ldquo;Not a single line of code has to be changed\u0026rdquo;. SkyWalking used to mention this in its readme page as well. However, this does not reflect the full picture. For end users, it is true that they no longer have to modify their codes in most cases. But it is important to understand that the codes are in fact still modified by the agent, which is usually known as \u0026ldquo;runtime code manipulation\u0026rdquo;. The underlying logic is that the auto instrument agent uses the VM interface for code modification to dynamically add in the instrument code, such as modifying the class in Java through javaagent premain.\nIn fact, although the SkyWalking team has mentioned that most auto instrument agents are VM-based, you may build such tools during compiling time rather than runtime.\nWhat are the limitations? Auto instrument is very helpful, as you may perform auto instrument during compiling time, without having to depend on VM features. But there are also certain limitations that come with it:\n  Higher possibility of in-process propagation in many cases. Many high-level languages, such as Java and .NET, are used for building business systems. Most business logic codes run in the same thread for each request, which causes propagation to be based on thread ID, in order for the stack module to make sure that the context is safe.\n  Only works in certain frameworks or libraries. Since the agents are responsible for modifying the codes during runtime, the codes are already known to the agent plugin developers. There is usually a list of frameworks or libraries supported by this kind of probes. For example, see the SkyWalking Java agent supported list.\n  Cross-thread operations are not always supported. Like what is mentioned above regarding in-process propagation, most codes (especially business codes) run in a single thread per request. But in some other cases, they operate across different threads, such as assigning tasks to other threads, task pools or batch processes. Some languages may even provide coroutine or similar components like Goroutine, which allows developers to run async process with low payload. In such cases, auto instrument will face problems.\n  So, there\u0026rsquo;s nothing mysterious about auto instrument. In short, agent developers write an activation script to make instrument codes work for you. That\u0026rsquo;s it!\nWhat is next? If you want to learn about manual instrument libs in SkyWalking, see the Manual instrument SDK section.\n","title":"Service Auto Instrument Agent","url":"/docs/main/v9.2.0/en/concepts-and-designs/service-agent/"},{"content":"Service Auto Instrument Agent The service auto instrument agent is a subset of language-based native agents. This kind of agents is based on some language-specific features, especially those of a VM-based language.\nWhat does Auto Instrument mean? Many users learned about these agents when they first heard that \u0026ldquo;Not a single line of code has to be changed\u0026rdquo;. SkyWalking used to mention this in its readme page as well. However, this does not reflect the full picture. For end users, it is true that they no longer have to modify their codes in most cases. But it is important to understand that the codes are in fact still modified by the agent, which is usually known as \u0026ldquo;runtime code manipulation\u0026rdquo;. The underlying logic is that the auto instrument agent uses the VM interface for code modification to dynamically add in the instrument code, such as modifying the class in Java through javaagent premain.\nIn fact, although the SkyWalking team has mentioned that most auto instrument agents are VM-based, you may build such tools during compiling time rather than runtime.\nWhat are the limitations? Auto instrument is very helpful, as you may perform auto instrument during compiling time, without having to depend on VM features. But there are also certain limitations that come with it:\n  Higher possibility of in-process propagation in many cases. Many high-level languages, such as Java and .NET, are used for building business systems. Most business logic codes run in the same thread for each request, which causes propagation to be based on thread ID, in order for the stack module to make sure that the context is safe.\n  Only works in certain frameworks or libraries. Since the agents are responsible for modifying the codes during runtime, the codes are already known to the agent plugin developers. There is usually a list of frameworks or libraries supported by this kind of probes. For example, see the SkyWalking Java agent supported list.\n  Cross-thread operations are not always supported. Like what is mentioned above regarding in-process propagation, most codes (especially business codes) run in a single thread per request. But in some other cases, they operate across different threads, such as assigning tasks to other threads, task pools or batch processes. Some languages may even provide coroutine or similar components like Goroutine, which allows developers to run async process with low payload. In such cases, auto instrument will face problems.\n  So, there\u0026rsquo;s nothing mysterious about auto instrument. In short, agent developers write an activation script to make instrument codes work for you. That\u0026rsquo;s it!\nWhat is next? If you want to learn about manual instrument libs in SkyWalking, see the Manual instrument SDK section.\n","title":"Service Auto Instrument Agent","url":"/docs/main/v9.3.0/en/concepts-and-designs/service-agent/"},{"content":"Service Auto Instrument Agent The service auto instrument agent is a subset of language-based native agents. This kind of agents is based on some language-specific features, especially those of a VM-based language.\nWhat does Auto Instrument mean? Many users learned about these agents when they first heard that \u0026ldquo;Not a single line of code has to be changed\u0026rdquo;. SkyWalking used to mention this in its readme page as well. However, this does not reflect the full picture. For end users, it is true that they no longer have to modify their codes in most cases. But it is important to understand that the codes are in fact still modified by the agent, which is usually known as \u0026ldquo;runtime code manipulation\u0026rdquo;. The underlying logic is that the auto instrument agent uses the VM interface for code modification to dynamically add in the instrument code, such as modifying the class in Java through javaagent premain.\nIn fact, although the SkyWalking team has mentioned that most auto instrument agents are VM-based, you may build such tools during compiling time rather than runtime.\nWhat are the limitations? Auto instrument is very helpful, as you may perform auto instrument during compiling time, without having to depend on VM features. But there are also certain limitations that come with it:\n  Higher possibility of in-process propagation in many cases. Many high-level languages, such as Java and .NET, are used for building business systems. Most business logic codes run in the same thread for each request, which causes propagation to be based on thread ID, in order for the stack module to make sure that the context is safe.\n  Only works in certain frameworks or libraries. Since the agents are responsible for modifying the codes during runtime, the codes are already known to the agent plugin developers. There is usually a list of frameworks or libraries supported by this kind of probes. For example, see the SkyWalking Java agent supported list.\n  Cross-thread operations are not always supported. Like what is mentioned above regarding in-process propagation, most codes (especially business codes) run in a single thread per request. But in some other cases, they operate across different threads, such as assigning tasks to other threads, task pools or batch processes. Some languages may even provide coroutine or similar components like Goroutine, which allows developers to run async process with low payload. In such cases, auto instrument will face problems.\n  So, there\u0026rsquo;s nothing mysterious about auto instrument. In short, agent developers write an activation script to make instrument codes work for you. That\u0026rsquo;s it!\nWhat is next? If you want to learn about manual instrument libs in SkyWalking, see the Manual instrument SDK section.\n","title":"Service Auto Instrument Agent","url":"/docs/main/v9.4.0/en/concepts-and-designs/service-agent/"},{"content":"Service Auto Instrument Agent The service auto instrument agent is a subset of language-based native agents. This kind of agents is based on some language-specific features, especially those of a VM-based language.\nWhat does Auto Instrument mean? Many users learned about these agents when they first heard that \u0026ldquo;Not a single line of code has to be changed\u0026rdquo;. SkyWalking used to mention this in its readme page as well. However, this does not reflect the full picture. For end users, it is true that they no longer have to modify their codes in most cases. But it is important to understand that the codes are in fact still modified by the agent, which is usually known as \u0026ldquo;runtime code manipulation\u0026rdquo;. The underlying logic is that the auto instrument agent uses the VM interface for code modification to dynamically add in the instrument code, such as modifying the class in Java through javaagent premain.\nIn fact, although the SkyWalking team has mentioned that most auto instrument agents are VM-based, you may build such tools during compiling time rather than runtime.\nWhat are the limitations? Auto instrument is very helpful, as you may perform auto instrument during compiling time, without having to depend on VM features. But there are also certain limitations that come with it:\n  Higher possibility of in-process propagation in many cases. Many high-level languages, such as Java and .NET, are used for building business systems. Most business logic codes run in the same thread for each request, which causes propagation to be based on thread ID, in order for the stack module to make sure that the context is safe.\n  Only works in certain frameworks or libraries. Since the agents are responsible for modifying the codes during runtime, the codes are already known to the agent plugin developers. There is usually a list of frameworks or libraries supported by this kind of probes. For example, see the SkyWalking Java agent supported list.\n  Cross-thread operations are not always supported. Like what is mentioned above regarding in-process propagation, most codes (especially business codes) run in a single thread per request. But in some other cases, they operate across different threads, such as assigning tasks to other threads, task pools or batch processes. Some languages may even provide coroutine or similar components like Goroutine, which allows developers to run async process with low payload. In such cases, auto instrument will face problems.\n  So, there\u0026rsquo;s nothing mysterious about auto instrument. In short, agent developers write an activation script to make instrument codes work for you. That\u0026rsquo;s it!\nWhat is next? If you want to learn about manual instrument libs in SkyWalking, see the Manual instrument SDK section.\n","title":"Service Auto Instrument Agent","url":"/docs/main/v9.5.0/en/concepts-and-designs/service-agent/"},{"content":"Service Auto Instrument Agent The service auto instrument agent is a subset of language-based native agents. This kind of agents is based on some language-specific features, especially those of a VM-based language.\nWhat does Auto Instrument mean? Many users learned about these agents when they first heard that \u0026ldquo;Not a single line of code has to be changed\u0026rdquo;. SkyWalking used to mention this in its readme page as well. However, this does not reflect the full picture. For end users, it is true that they no longer have to modify their codes in most cases. But it is important to understand that the codes are in fact still modified by the agent, which is usually known as \u0026ldquo;runtime code manipulation\u0026rdquo;. The underlying logic is that the auto instrument agent uses the VM interface for code modification to dynamically add in the instrument code, such as modifying the class in Java through javaagent premain.\nIn fact, although the SkyWalking team has mentioned that most auto instrument agents are VM-based, you may build such tools during compiling time rather than runtime.\nWhat are the limitations? Auto instrument is very helpful, as you may perform auto instrument during compiling time, without having to depend on VM features. But there are also certain limitations that come with it:\n  Higher possibility of in-process propagation in many cases. Many high-level languages, such as Java and .NET, are used for building business systems. Most business logic codes run in the same thread for each request, which causes propagation to be based on thread ID, in order for the stack module to make sure that the context is safe.\n  Only works in certain frameworks or libraries. Since the agents are responsible for modifying the codes during runtime, the codes are already known to the agent plugin developers. There is usually a list of frameworks or libraries supported by this kind of probes. For example, see the SkyWalking Java agent supported list.\n  Cross-thread operations are not always supported. Like what is mentioned above regarding in-process propagation, most codes (especially business codes) run in a single thread per request. But in some other cases, they operate across different threads, such as assigning tasks to other threads, task pools or batch processes. Some languages may even provide coroutine or similar components like Goroutine, which allows developers to run async process with low payload. In such cases, auto instrument will face problems.\n  So, there\u0026rsquo;s nothing mysterious about auto instrument. In short, agent developers write an activation script to make instrument codes work for you. That\u0026rsquo;s it!\nWhat is next? If you want to learn about manual instrument libs in SkyWalking, see the Manual instrument SDK section.\n","title":"Service Auto Instrument Agent","url":"/docs/main/v9.6.0/en/concepts-and-designs/service-agent/"},{"content":"Service Auto Instrument Agent The service auto instrument agent is a subset of language-based native agents. This kind of agents is based on some language-specific features, especially those of a VM-based language.\nWhat does Auto Instrument mean? Many users learned about these agents when they first heard that \u0026ldquo;Not a single line of code has to be changed\u0026rdquo;. SkyWalking used to mention this in its readme page as well. However, this does not reflect the full picture. For end users, it is true that they no longer have to modify their codes in most cases. But it is important to understand that the codes are in fact still modified by the agent, which is usually known as \u0026ldquo;runtime code manipulation\u0026rdquo;. The underlying logic is that the auto instrument agent uses the VM interface for code modification to dynamically add in the instrument code, such as modifying the class in Java through javaagent premain.\nIn fact, although the SkyWalking team has mentioned that most auto instrument agents are VM-based, you may build such tools during compiling time rather than runtime.\nWhat are the limitations? Auto instrument is very helpful, as you may perform auto instrument during compiling time, without having to depend on VM features. But there are also certain limitations that come with it:\n  Higher possibility of in-process propagation in many cases. Many high-level languages, such as Java and .NET, are used for building business systems. Most business logic codes run in the same thread for each request, which causes propagation to be based on thread ID, in order for the stack module to make sure that the context is safe.\n  Only works in certain frameworks or libraries. Since the agents are responsible for modifying the codes during runtime, the codes are already known to the agent plugin developers. There is usually a list of frameworks or libraries supported by this kind of probes. For example, see the SkyWalking Java agent supported list.\n  Cross-thread operations are not always supported. Like what is mentioned above regarding in-process propagation, most codes (especially business codes) run in a single thread per request. But in some other cases, they operate across different threads, such as assigning tasks to other threads, task pools or batch processes. Some languages may even provide coroutine or similar components like Goroutine, which allows developers to run async process with low payload. In such cases, auto instrument will face problems.\n  So, there\u0026rsquo;s nothing mysterious about auto instrument. In short, agent developers write an activation script to make instrument codes work for you. That\u0026rsquo;s it!\nWhat is next? If you want to learn about manual instrument libs in SkyWalking, see the Manual instrument SDK section.\n","title":"Service Auto Instrument Agent","url":"/docs/main/v9.7.0/en/concepts-and-designs/service-agent/"},{"content":"Service Discovery Service discovery is used to discover all Kubernetes services process in the current node and report them to backend services. After the process upload is completed, the other modules could perform more operations with the process, such as process profiling and collecting process metrics.\nConfiguration    Name Default Environment Key Description     process_discovery.heartbeat_period 20s ROVER_PROCESS_DISCOVERY_HEARTBEAT_PERIOD The period of report or keep-alive process to the backend.   process_discovery.properties_report_period 10 ROVER_PROCESS_DISCOVERY_PROPERTIES_REPORT_PERIOD The agent sends the process properties to the backend every: heartbeart period * properties report period.   process_discovery.kubernetes.active false ROVER_PROCESS_DISCOVERY_KUBERNETES_ACTIVE Is active the kubernetes process discovery.   process_discovery.kubernetes.node_name  ROVER_PROCESS_DISCOVERY_KUBERNETES_NODE_NAME Current deployed node name, it could be inject by spec.nodeName.   process_discovery.kubernetes.namespaces  ROVER_PROCESS_DISCOVERY_KUBERNETES_NAMESPACES Including pod by namespaces, if empty means including all namespaces. Multiple namespaces split by \u0026ldquo;,\u0026rdquo;.   process_discovery.kubernetes.analyzers   Declare how to build the process. The istio and k8s resources are active by default.   process_discovery.kubernetes.analyzers.active   Set is active analyzer.   process_discovery.kubernetes.analyzers.filters   Define which process is match to current process builder.   process_discovery.kubernetes.analyzers.service_name   The Service Name of the process entity.   process_discovery.kubernetes.analyzers.instance_name   The Service Instance Name of the process entity, by default, the instance name is the host IP v4 address from \u0026ldquo;en0\u0026rdquo; net interface.   process_discovery.kubernetes.analyzers.process_name   The Process Name of the process entity, by default, the process name is the executable name of the process.   process_discovery.kubernetes.analyzers.labels   The Process Labels, used to aggregate similar process from service entity. Multiple labels split by \u0026ldquo;,\u0026rdquo;.    Kubernetes Process Detector The Kubernetes process detector could detect any process under the Kubernetes container. If active the Kubernetes process detector, the rover must be deployed in the Kubernetes cluster. After finding the process, it would collect the metadata of the process when the report to the backend.\nProcess Analyze The process analysis declares which process could be profiled and how to build the process entity. The Istio and Kubernetes resources are active on default.\nFilter The filter provides an expression(go template) mechanism to match the process that can build the entity. Multiple expressions work together to determine whether the process can create the entity. Each expression must return the boolean value. Otherwise, the decision throws an error.\nThe context is similar to the entity builder. Using context could help the rover understand which process could build the entity.\nProcess Context Is the same with the process context in scanner, but doesn\u0026rsquo;t need to add the {{ and }} in prefix and suffix.\nPod Context Provide current pod information and judgments.\n   Name Argument Example Description     Name None eq .Pod.Name \u0026quot;test-pod-name\u0026quot; The name of the current pod. The example shows the pod name is equal to test-pod-name.   Namespace None eq .Pod.Namespace \u0026quot;test-namesapce\u0026quot; The name of the current pod namespace. The example shows the pod namespace name is equal to test-namespace.   Node None eq .Pod.Node \u0026quot;test-node\u0026quot; The name of the node deployed. The example shows the pod node name is equal to test-node.   LabelValue KeyNames eq .Pod.LavelValue \u0026quot;a,b\u0026quot; \u0026quot;v\u0026quot; The label value of the label keys, If provide multiple keys, if any key has value, then don\u0026rsquo;t need to get other values. The example shows the pod has anyone a or b label key, and the value matches to v.   ServiceName None eq .Pod.ServiceName \u0026quot;test-service\u0026quot; The service name of the pod. The example shows current pods matched service name is test-service.   HasContainer Container name .Pod.HasContainer \u0026quot;istio-proxy\u0026quot; The pod has the appointed container name.   LabelSelector selector .Pod.LabelSelector The pod is matches the label selector. For more details, please read the official documentation.   HasServiceName None .Pod.HasServiceName The pod has the matched service.   HasOwnerName kindNames .Pod.HasOwnerName \u0026quot;Service,Deployment\u0026quot; The pod has the matched owner name.    Container Context Provide current container(under the pod) information.\n   Name Argument Example Description     Name None eq .Container.Name \u0026quot;istio-proxy\u0026quot; The name of the current container under the pod. The examples show the container name is equal to istio-proxy.    Entity The entity including layer, serviceName, instanceName, processName and labels properties.\nThe entity also could use expression to build(serviceName, instanceName and processName).\nRover Rover context provides the context of the rover process instance and VM data.\n   Name Argument Example Description     InstanceID None {{.Rover.InstanceID}} Get the Instance ID of the rover.   HostIPV4 The Interface name {{.Rover.HostIPV4 \u0026quot;en0\u0026quot;}} Get the ipv4 address from the appointed network interface name.   HostIPV6 The Interface name {{.Rover.HostIPV6 \u0026quot;en0\u0026quot;}} Get the ipv6 address from the appointed network interface name.   HostName None {{.Rover.HostName}} Get the host name of current machine.    Process Process context provides the context relate to which process is matched.\n   Name Argument Example Description     ExeFilePath None {{.Process.ExeFilePath}} The execute file path of process.   ExeName None {{.Process.ExeName}} The execute file name.   CommandLine None {{.Process.CommandLine}} The command line of process.   Pid None {{.Process.Pid}} The id of the process.   WorkDir None {{.Process.WorkDir}} The work directory path of the process.    Pod The information on the current pod.\n   Name Argument Example Description     Name None {{.Pod.Name}} The name of current pod.   Namespace None {{.Pod.Namespace}} The name of current pod namespace.   Node None {{.Pod.Node}} The name of the node deployed.   LabelValue KeyNames, Default {{.Pod.LabelValue \u0026quot;a,b\u0026quot; \u0026quot;v\u0026quot;}} The label value of the label keys, If provide multiple keys, if any key has value, then don\u0026rsquo;t need to get other values. If all keys don\u0026rsquo;t have value, then return the default value.   ServiceName None {{.Pod.ServiceName}} The service name of the pod. If the pod hasn\u0026rsquo;t matched service, then return an empty string.   FindContainer ContainerName {{.Pod.FindContainer \u0026quot;test\u0026quot;}} Find the Container context by container name.   OwnerName KindNames {{.Pod.OwnerName \u0026quot;Service,Deployment\u0026quot;}} Find the Owner name by owner kind name.    Container The information of the current container under the pod.\n   Name Argument Example Description     Name None {{.Container.Name}} The name of the current container under the pod.    ID None {{.Container.ID}} The id of the current container under the pod.   EnvValue KeyNames {{.Container.EnvValue \u0026quot;a,b\u0026quot;}} The environment value of the first non-value key in the provided candidates(Iterate from left to right).    ","title":"Service Discovery","url":"/docs/skywalking-rover/latest/en/setup/configuration/service-discovery/"},{"content":"Service Discovery Service discovery is used to discover all Kubernetes services process in the current node and report them to backend services. After the process upload is completed, the other modules could perform more operations with the process, such as process profiling and collecting process metrics.\nConfiguration    Name Default Environment Key Description     process_discovery.heartbeat_period 20s ROVER_PROCESS_DISCOVERY_HEARTBEAT_PERIOD The period of report or keep-alive process to the backend.   process_discovery.properties_report_period 10 ROVER_PROCESS_DISCOVERY_PROPERTIES_REPORT_PERIOD The agent sends the process properties to the backend every: heartbeart period * properties report period.   process_discovery.kubernetes.active false ROVER_PROCESS_DISCOVERY_KUBERNETES_ACTIVE Is active the kubernetes process discovery.   process_discovery.kubernetes.node_name  ROVER_PROCESS_DISCOVERY_KUBERNETES_NODE_NAME Current deployed node name, it could be inject by spec.nodeName.   process_discovery.kubernetes.namespaces  ROVER_PROCESS_DISCOVERY_KUBERNETES_NAMESPACES Including pod by namespaces, if empty means including all namespaces. Multiple namespaces split by \u0026ldquo;,\u0026rdquo;.   process_discovery.kubernetes.analyzers   Declare how to build the process. The istio and k8s resources are active by default.   process_discovery.kubernetes.analyzers.active   Set is active analyzer.   process_discovery.kubernetes.analyzers.filters   Define which process is match to current process builder.   process_discovery.kubernetes.analyzers.service_name   The Service Name of the process entity.   process_discovery.kubernetes.analyzers.instance_name   The Service Instance Name of the process entity, by default, the instance name is the host IP v4 address from \u0026ldquo;en0\u0026rdquo; net interface.   process_discovery.kubernetes.analyzers.process_name   The Process Name of the process entity, by default, the process name is the executable name of the process.   process_discovery.kubernetes.analyzers.labels   The Process Labels, used to aggregate similar process from service entity. Multiple labels split by \u0026ldquo;,\u0026rdquo;.    Kubernetes Process Detector The Kubernetes process detector could detect any process under the Kubernetes container. If active the Kubernetes process detector, the rover must be deployed in the Kubernetes cluster. After finding the process, it would collect the metadata of the process when the report to the backend.\nProcess Analyze The process analysis declares which process could be profiled and how to build the process entity. The Istio and Kubernetes resources are active on default.\nFilter The filter provides an expression(go template) mechanism to match the process that can build the entity. Multiple expressions work together to determine whether the process can create the entity. Each expression must return the boolean value. Otherwise, the decision throws an error.\nThe context is similar to the entity builder. Using context could help the rover understand which process could build the entity.\nProcess Context Is the same with the process context in scanner, but doesn\u0026rsquo;t need to add the {{ and }} in prefix and suffix.\nPod Context Provide current pod information and judgments.\n   Name Argument Example Description     Name None eq .Pod.Name \u0026quot;test-pod-name\u0026quot; The name of the current pod. The example shows the pod name is equal to test-pod-name.   Namespace None eq .Pod.Namespace \u0026quot;test-namesapce\u0026quot; The name of the current pod namespace. The example shows the pod namespace name is equal to test-namespace.   Node None eq .Pod.Node \u0026quot;test-node\u0026quot; The name of the node deployed. The example shows the pod node name is equal to test-node.   LabelValue KeyNames eq .Pod.LavelValue \u0026quot;a,b\u0026quot; \u0026quot;v\u0026quot; The label value of the label keys, If provide multiple keys, if any key has value, then don\u0026rsquo;t need to get other values. The example shows the pod has anyone a or b label key, and the value matches to v.   ServiceName None eq .Pod.ServiceName \u0026quot;test-service\u0026quot; The service name of the pod. The example shows current pods matched service name is test-service.   HasContainer Container name .Pod.HasContainer \u0026quot;istio-proxy\u0026quot; The pod has the appointed container name.   LabelSelector selector .Pod.LabelSelector The pod is matches the label selector. For more details, please read the official documentation.   HasServiceName None .Pod.HasServiceName The pod has the matched service.   HasOwnerName kindNames .Pod.HasOwnerName \u0026quot;Service,Deployment\u0026quot; The pod has the matched owner name.    Container Context Provide current container(under the pod) information.\n   Name Argument Example Description     Name None eq .Container.Name \u0026quot;istio-proxy\u0026quot; The name of the current container under the pod. The examples show the container name is equal to istio-proxy.    Entity The entity including layer, serviceName, instanceName, processName and labels properties.\nThe entity also could use expression to build(serviceName, instanceName and processName).\nRover Rover context provides the context of the rover process instance and VM data.\n   Name Argument Example Description     InstanceID None {{.Rover.InstanceID}} Get the Instance ID of the rover.   HostIPV4 The Interface name {{.Rover.HostIPV4 \u0026quot;en0\u0026quot;}} Get the ipv4 address from the appointed network interface name.   HostIPV6 The Interface name {{.Rover.HostIPV6 \u0026quot;en0\u0026quot;}} Get the ipv6 address from the appointed network interface name.   HostName None {{.Rover.HostName}} Get the host name of current machine.    Process Process context provides the context relate to which process is matched.\n   Name Argument Example Description     ExeFilePath None {{.Process.ExeFilePath}} The execute file path of process.   ExeName None {{.Process.ExeName}} The execute file name.   CommandLine None {{.Process.CommandLine}} The command line of process.   Pid None {{.Process.Pid}} The id of the process.   WorkDir None {{.Process.WorkDir}} The work directory path of the process.    Pod The information on the current pod.\n   Name Argument Example Description     Name None {{.Pod.Name}} The name of current pod.   Namespace None {{.Pod.Namespace}} The name of current pod namespace.   Node None {{.Pod.Node}} The name of the node deployed.   LabelValue KeyNames, Default {{.Pod.LabelValue \u0026quot;a,b\u0026quot; \u0026quot;v\u0026quot;}} The label value of the label keys, If provide multiple keys, if any key has value, then don\u0026rsquo;t need to get other values. If all keys don\u0026rsquo;t have value, then return the default value.   ServiceName None {{.Pod.ServiceName}} The service name of the pod. If the pod hasn\u0026rsquo;t matched service, then return an empty string.   FindContainer ContainerName {{.Pod.FindContainer \u0026quot;test\u0026quot;}} Find the Container context by container name.   OwnerName KindNames {{.Pod.OwnerName \u0026quot;Service,Deployment\u0026quot;}} Find the Owner name by owner kind name.    Container The information of the current container under the pod.\n   Name Argument Example Description     Name None {{.Container.Name}} The name of the current container under the pod.    ID None {{.Container.ID}} The id of the current container under the pod.   EnvValue KeyNames {{.Container.EnvValue \u0026quot;a,b\u0026quot;}} The environment value of the first non-value key in the provided candidates(Iterate from left to right).    ","title":"Service Discovery","url":"/docs/skywalking-rover/next/en/setup/configuration/service-discovery/"},{"content":"Service Discovery Service discovery is used to discover all Kubernetes services process in the current node and report them to backend services. After the process upload is completed, the other modules could perform more operations with the process, such as process profiling and collecting process metrics.\nConfiguration    Name Default Environment Key Description     process_discovery.heartbeat_period 20s ROVER_PROCESS_DISCOVERY_HEARTBEAT_PERIOD The period of report or keep-alive process to the backend.   process_discovery.properties_report_period 10 ROVER_PROCESS_DISCOVERY_PROPERTIES_REPORT_PERIOD The agent sends the process properties to the backend every: heartbeart period * properties report period.   process_discovery.kubernetes.active false ROVER_PROCESS_DISCOVERY_KUBERNETES_ACTIVE Is active the kubernetes process discovery.   process_discovery.kubernetes.node_name  ROVER_PROCESS_DISCOVERY_KUBERNETES_NODE_NAME Current deployed node name, it could be inject by spec.nodeName.   process_discovery.kubernetes.namespaces  ROVER_PROCESS_DISCOVERY_KUBERNETES_NAMESPACES Including pod by namespaces, if empty means including all namespaces. Multiple namespaces split by \u0026ldquo;,\u0026rdquo;.   process_discovery.kubernetes.analyzers   Declare how to build the process. The istio and k8s resources are active by default.   process_discovery.kubernetes.analyzers.active   Set is active analyzer.   process_discovery.kubernetes.analyzers.filters   Define which process is match to current process builder.   process_discovery.kubernetes.analyzers.service_name   The Service Name of the process entity.   process_discovery.kubernetes.analyzers.instance_name   The Service Instance Name of the process entity, by default, the instance name is the host IP v4 address from \u0026ldquo;en0\u0026rdquo; net interface.   process_discovery.kubernetes.analyzers.process_name   The Process Name of the process entity, by default, the process name is the executable name of the process.   process_discovery.kubernetes.analyzers.labels   The Process Labels, used to aggregate similar process from service entity. Multiple labels split by \u0026ldquo;,\u0026rdquo;.    Kubernetes Process Detector The Kubernetes process detector could detect any process under the Kubernetes container. If active the Kubernetes process detector, the rover must be deployed in the Kubernetes cluster. After finding the process, it would collect the metadata of the process when the report to the backend.\nProcess Analyze The process analysis declares which process could be profiled and how to build the process entity. The Istio and Kubernetes resources are active on default.\nFilter The filter provides an expression(go template) mechanism to match the process that can build the entity. Multiple expressions work together to determine whether the process can create the entity. Each expression must return the boolean value. Otherwise, the decision throws an error.\nThe context is similar to the entity builder. Using context could help the rover understand which process could build the entity.\nProcess Context Is the same with the process context in scanner, but doesn\u0026rsquo;t need to add the {{ and }} in prefix and suffix.\nPod Context Provide current pod information and judgments.\n   Name Argument Example Description     Name None eq .Pod.Name \u0026quot;test-pod-name\u0026quot; The name of the current pod. The example shows the pod name is equal to test-pod-name.   Namespace None eq .Pod.Namespace \u0026quot;test-namesapce\u0026quot; The name of the current pod namespace. The example shows the pod namespace name is equal to test-namespace.   Node None eq .Pod.Node \u0026quot;test-node\u0026quot; The name of the node deployed. The example shows the pod node name is equal to test-node.   LabelValue KeyNames eq .Pod.LavelValue \u0026quot;a,b\u0026quot; \u0026quot;v\u0026quot; The label value of the label keys, If provide multiple keys, if any key has value, then don\u0026rsquo;t need to get other values. The example shows the pod has anyone a or b label key, and the value matches to v.   ServiceName None eq .Pod.ServiceName \u0026quot;test-service\u0026quot; The service name of the pod. The example shows current pods matched service name is test-service.   HasContainer Container name .Pod.HasContainer \u0026quot;istio-proxy\u0026quot; The pod has the appointed container name.   LabelSelector selector .Pod.LabelSelector The pod is matches the label selector. For more details, please read the official documentation.   HasServiceName None .Pod.HasServiceName The pod has the matched service.   HasOwnerName kindNames .Pod.HasOwnerName \u0026quot;Service,Deployment\u0026quot; The pod has the matched owner name.    Container Context Provide current container(under the pod) information.\n   Name Argument Example Description     Name None eq .Container.Name \u0026quot;istio-proxy\u0026quot; The name of the current container under the pod. The examples show the container name is equal to istio-proxy.    Entity The entity including layer, serviceName, instanceName, processName and labels properties.\nThe entity also could use expression to build(serviceName, instanceName and processName).\nRover Rover context provides the context of the rover process instance and VM data.\n   Name Argument Example Description     InstanceID None {{.Rover.InstanceID}} Get the Instance ID of the rover.   HostIPV4 The Interface name {{.Rover.HostIPV4 \u0026quot;en0\u0026quot;}} Get the ipv4 address from the appointed network interface name.   HostIPV6 The Interface name {{.Rover.HostIPV6 \u0026quot;en0\u0026quot;}} Get the ipv6 address from the appointed network interface name.   HostName None {{.Rover.HostName}} Get the host name of current machine.    Process Process context provides the context relate to which process is matched.\n   Name Argument Example Description     ExeFilePath None {{.Process.ExeFilePath}} The execute file path of process.   ExeName None {{.Process.ExeName}} The execute file name.   CommandLine None {{.Process.CommandLine}} The command line of process.   Pid None {{.Process.Pid}} The id of the process.   WorkDir None {{.Process.WorkDir}} The work directory path of the process.    Pod The information on the current pod.\n   Name Argument Example Description     Name None {{.Pod.Name}} The name of current pod.   Namespace None {{.Pod.Namespace}} The name of current pod namespace.   Node None {{.Pod.Node}} The name of the node deployed.   LabelValue KeyNames, Default {{.Pod.LabelValue \u0026quot;a,b\u0026quot; \u0026quot;v\u0026quot;}} The label value of the label keys, If provide multiple keys, if any key has value, then don\u0026rsquo;t need to get other values. If all keys don\u0026rsquo;t have value, then return the default value.   ServiceName None {{.Pod.ServiceName}} The service name of the pod. If the pod hasn\u0026rsquo;t matched service, then return an empty string.   FindContainer ContainerName {{.Pod.FindContainer \u0026quot;test\u0026quot;}} Find the Container context by container name.   OwnerName KindNames {{.Pod.OwnerName \u0026quot;Service,Deployment\u0026quot;}} Find the Owner name by owner kind name.    Container The information of the current container under the pod.\n   Name Argument Example Description     Name None {{.Container.Name}} The name of the current container under the pod.    ID None {{.Container.ID}} The id of the current container under the pod.   EnvValue KeyNames {{.Container.EnvValue \u0026quot;a,b\u0026quot;}} The environment value of the first non-value key in the provided candidates(Iterate from left to right).    ","title":"Service Discovery","url":"/docs/skywalking-rover/v0.6.0/en/setup/configuration/service-discovery/"},{"content":"Service Hierarchy SkyWalking v10 introduces a new concept Service Hierarchy which defines the relationships of existing logically same services in various layers. OAP will detect the services from different layers, and try to build the connections.\nDetect Service Hierarchy Connections There 2 ways to detect the connections:\n Automatically matching through OAP internal mechanism, no extra work is required. Build the connections through specific agents.  Note: All the relationships and auto-matching rules should be defined in the config/hierarchy-definition.yml file. If you want to customize it according to your own needs, please refer to Service Hierarchy Configuration.\nAutomatically Matching    Upper layer Lower layer Matching rule     GENERAL K8S_SERVICE GENERAL On K8S_SERVICE   GENERAL APISIX GENERAL On APISIX   VIRTUAL_DATABASE MYSQL VIRTUAL_DATABASE On MYSQL   VIRTUAL_DATABASE POSTGRESQL VIRTUAL_DATABASE On POSTGRESQL   VIRTUAL_DATABASE CLICKHOUSE VIRTUAL_DATABASE On CLICKHOUSE   VIRTUAL_MQ RABBITMQ VIRTUAL_MQ On RABBITMQ   VIRTUAL_MQ ROCKETMQ VIRTUAL_MQ On K8S_SERVICE   VIRTUAL_MQ KAFKA VIRTUAL_MQ On KAFKA   VIRTUAL_MQ RABBITMQ VIRTUAL_MQ On RABBITMQ   VIRTUAL_MQ PULSAR VIRTUAL_MQ On PULSAR   MESH MESH_DP MESH On MESH_DP   MESH K8S_SERVICE MESH On K8S_SERVICE   MESH_DP K8S_SERVICE MESH_DP On K8S_SERVICE   MYSQL K8S_SERVICE MYSQL On K8S_SERVICE   POSTGRESQL K8S_SERVICE POSTGRESQL On K8S_SERVICE   CLICKHOUSE K8S_SERVICE CLICKHOUSE On K8S_SERVICE   NGINX K8S_SERVICE NGINX On K8S_SERVICE   APISIX K8S_SERVICE APISIX On K8S_SERVICE   ROCKETMQ K8S_SERVICE ROCKETMQ On K8S_SERVICE   RABBITMQ K8S_SERVICE RABBITMQ On K8S_SERVICE   KAFKA K8S_SERVICE KAFKA On K8S_SERVICE   PULSAR K8S_SERVICE PULSAR On K8S_SERVICE   SO11Y_OAP K8S_SERVICE SO11Y_OAP On K8S_SERVICE     The following sections will describe the default matching rules in detail and use the upper-layer On lower-layer format. The example service name are based on SkyWalking Showcase default deployment. In SkyWalking the service name could be composed of group and short name with :: separator.  GENERAL On K8S_SERVICE  Rule name: lower-short-name-remove-ns Groovy script: { (u, l) -\u0026gt; u.shortName == l.shortName.substring(0, l.shortName.lastIndexOf('.')) } Description: GENERAL.service.shortName == K8S_SERVICE.service.shortName without namespace Matched Example:  GENERAL.service.name: agent::songs K8S_SERVICE.service.name: skywalking-showcase::songs.sample-services    GENERAL On APISIX  Rule name: lower-short-name-remove-ns Groovy script: { (u, l) -\u0026gt; u.shortName == l.shortName.substring(0, l.shortName.lastIndexOf('.')) } Description: GENERAL.service.shortName == APISIX.service.shortName without namespace Matched Example:  GENERAL.service.name: agent::frontend APISIX.service.name: APISIX::frontend.sample-services    VIRTUAL_DATABASE On MYSQL  Rule name: lower-short-name-with-fqdn Groovy script: { (u, l) -\u0026gt; u.shortName.substring(0, u.shortName.lastIndexOf(':')) == l.shortName.concat('.svc.cluster.local') } Description: VIRTUAL_DATABASE.service.shortName remove port == MYSQL.service.shortName with fqdn suffix Matched Example:  VIRTUAL_DATABASE.service.name: mysql.skywalking-showcase.svc.cluster.local:3306 MYSQL.service.name: mysql::mysql.skywalking-showcase    VIRTUAL_DATABASE On POSTGRESQL  Rule name: lower-short-name-with-fqdn Groovy script: { (u, l) -\u0026gt; u.shortName.substring(0, u.shortName.lastIndexOf(':')) == l.shortName.concat('.svc.cluster.local') } Description: VIRTUAL_DATABASE.service.shortName remove port == POSTGRESQL.service.shortName with fqdn suffix Matched Example:  VIRTUAL_DATABASE.service.name: psql.skywalking-showcase.svc.cluster.local:5432 POSTGRESQL.service.name: postgresql::psql.skywalking-showcase    VIRTUAL_DATABASE On CLICKHOUSE  Rule name: lower-short-name-with-fqdn Groovy script: { (u, l) -\u0026gt; u.shortName.substring(0, u.shortName.lastIndexOf(':')) == l.shortName.concat('.svc.cluster.local') } Description: VIRTUAL_DATABASE.service.shortName remove port == CLICKHOUSE.service.shortName with fqdn suffix Matched Example:  VIRTUAL_DATABASE.service.name: clickhouse.skywalking-showcase.svc.cluster.local:8123 CLICKHOUSE.service.name: clickhouse::clickhouse.skywalking-showcase    VIRTUAL_MQ On ROCKETMQ  Rule name: lower-short-name-with-fqdn Groovy script: { (u, l) -\u0026gt; u.shortName.substring(0, u.shortName.lastIndexOf(':')) == l.shortName.concat('.svc.cluster.local') } Description: VIRTUAL_MQ.service.shortName remove port == ROCKETMQ.service.shortName with fqdn suffix Matched Example:  VIRTUAL_MQ.service.name: rocketmq.skywalking-showcase.svc.cluster.local:9876 ROCKETMQ.service.name: rocketmq::rocketmq.skywalking-showcase    VIRTUAL_MQ On RABBITMQ  Rule name: lower-short-name-with-fqdn Groovy script: { (u, l) -\u0026gt; u.shortName.substring(0, u.shortName.lastIndexOf(':')) == l.shortName.concat('.svc.cluster.local') } Description: VIRTUAL_MQ.service.shortName remove port == RABBITMQ.service.shortName with fqdn suffix Matched Example:  VIRTUAL_MQ.service.name: rabbitmq.skywalking-showcase.svc.cluster.local:5672 RABBITMQ.service.name: rabbitmq::rabbitmq.skywalking-showcase     VIRTUAL_MQ On KAFKA  Rule name: lower-short-name-with-fqdn Groovy script: { (u, l) -\u0026gt; u.shortName.substring(0, u.shortName.lastIndexOf(':')) == l.shortName.concat('.svc.cluster.local') } Description: VIRTUAL_MQ.service.shortName remove port == KAFKA.service.shortName with fqdn suffix Matched Example:  VIRTUAL_MQ.service.name: kafka.skywalking-showcase.svc.cluster.local:9092 KAFKA.service.name: kafka::rocketmq.skywalking-showcase    VIRTUAL_MQ On PULSAR  Rule name: lower-short-name-with-fqdn Groovy script: { (u, l) -\u0026gt; u.shortName.substring(0, u.shortName.lastIndexOf(':')) == l.shortName.concat('.svc.cluster.local') } Description: VIRTUAL_MQ.service.shortName remove port == PULSAR.service.shortName with fqdn suffix Matched Example:  VIRTUAL_MQ.service.name: pulsar.skywalking-showcase.svc.cluster.local:6650 PULSAR.service.name: pulsar::pulsar.skywalking-showcase    MESH On MESH_DP  Rule name: name Groovy script: { (u, l) -\u0026gt; u.name == l.name } Description: MESH.service.name == MESH_DP.service.name Matched Example:  MESH.service.name: mesh-svr::songs.sample-services MESH_DP.service.name: mesh-svr::songs.sample-services    MESH On K8S_SERVICE  Rule name: short-name Groovy script: { (u, l) -\u0026gt; u.shortName == l.shortName } Description: MESH.service.shortName == K8S_SERVICE.service.shortName Matched Example:  MESH.service.name: mesh-svr::songs.sample-services K8S_SERVICE.service.name: skywalking-showcase::songs.sample-services    MESH_DP On K8S_SERVICE  Rule name: short-name Groovy script: { (u, l) -\u0026gt; u.shortName == l.shortName } Description: MESH_DP.service.shortName == K8S_SERVICE.service.shortName Matched Example:  MESH_DP.service.name: mesh-svr::songs.sample-services K8S_SERVICE.service.name: skywalking-showcase::songs.sample-services    MYSQL On K8S_SERVICE  Rule name: short-name Groovy script: { (u, l) -\u0026gt; u.shortName == l.shortName } Description: MYSQL.service.shortName == K8S_SERVICE.service.shortName Matched Example:  MYSQL.service.name: mysql::mysql.skywalking-showcase K8S_SERVICE.service.name: skywalking-showcase::mysql.skywalking-showcase    POSTGRESQL On K8S_SERVICE  Rule name: short-name Groovy script: { (u, l) -\u0026gt; u.shortName == l.shortName } Description: POSTGRESQL.service.shortName == K8S_SERVICE.service.shortName Matched Example:  POSTGRESQL.service.name: postgresql::psql.skywalking-showcase K8S_SERVICE.service.name: skywalking-showcase::psql.skywalking-showcase    CLICKHOUSE On K8S_SERVICE  Rule name: short-name Groovy script: { (u, l) -\u0026gt; u.shortName == l.shortName } Description: CLICKHOUSE.service.shortName == K8S_SERVICE.service.shortName Matched Example:  CLICKHOUSE.service.name: clickhouse::clickhouse.skywalking-showcase K8S_SERVICE.service.name: skywalking-showcase::clickhouse.skywalking-showcase    NGINX On K8S_SERVICE  Rule name: short-name Groovy script: { (u, l) -\u0026gt; u.shortName == l.shortName } Description: NGINX.service.shortName == K8S_SERVICE.service.shortName Matched Example:  NGINX.service.name: nginx::nginx.skywalking-showcase K8S_SERVICE.service.name: skywalking-showcase::nginx.skywalking-showcase    APISIX On K8S_SERVICE  Rule name: short-name Groovy script: { (u, l) -\u0026gt; u.shortName == l.shortName } Description: APISIX.service.shortName == K8S_SERVICE.service.shortName Matched Example:  APISIX.service.name: APISIX::frontend.sample-services K8S_SERVICE.service.name: skywalking-showcase::frontend.sample-services    ROCKETMQ On K8S_SERVICE  Rule name: short-name Groovy script: { (u, l) -\u0026gt; u.shortName == l.shortName } Description: ROCKETMQ.service.shortName == K8S_SERVICE.service.shortName Matched Example:  ROCKETMQ.service.name: rocketmq::rocketmq.skywalking-showcase K8S_SERVICE.service.name: skywalking-showcase::rocketmq.skywalking-showcase    RABBITMQ On K8S_SERVICE  Rule name: short-name Groovy script: { (u, l) -\u0026gt; u.shortName == l.shortName } Description: RABBITMQ.service.shortName == K8S_SERVICE.service.shortName Matched Example:  RABBITMQ.service.name: rabbitmq::rabbitmq.skywalking-showcase K8S_SERVICE.service.name: skywalking-showcase::rabbitmq.skywalking-showcase    KAFKA On K8S_SERVICE  Rule name: short-name Groovy script: { (u, l) -\u0026gt; u.shortName == l.shortName } Description: KAFKA.service.shortName == K8S_SERVICE.service.shortName Matched Example:  KAFKA.service.name: kafka::kafka.skywalking-showcase K8S_SERVICE.service.name: skywalking-showcase::kafka.skywalking-showcase    PULSAR On K8S_SERVICE  Rule name: short-name Groovy script: { (u, l) -\u0026gt; u.shortName == l.shortName } Description: PULSAR.service.shortName == K8S_SERVICE.service.shortName Matched Example:  PULSAR.service.name: pulsar::pulsar.skywalking-showcase K8S_SERVICE.service.name: skywalking-showcase::pulsar.skywalking-showcase    SO11Y_OAP On K8S_SERVICE  Rule name: short-name Groovy script: { (u, l) -\u0026gt; u.shortName == l.shortName } Description: SO11Y_OAP.service.shortName == K8S_SERVICE.service.shortName Matched Example:  SO11Y_OAP.service.name: demo-oap.skywalking-showcase K8S_SERVICE.service.name: skywalking-showcase::demo-oap.skywalking-showcase    Build Through Specific Agents Use agent tech involved(such as eBPF) and deployment tools(such as operator and agent injector) to detect the service hierarchy relations.\n   Upper layer Lower layer Agent    Instance Hierarchy Instance Hierarchy relationship follows the same definition as Service Hierarchy.\nAutomatically Matching If the service hierarchy is built, the instance hierarchy relationship could be detected automatically through the following rules:\n The upper instance name equals the lower instance name. The upper instance attribute pod/hostname equals the lower instance attribute pod/hostname. The upper instance attribute pod/hostname equals the lower instance name. The upper instance name equals the lower instance attribute pod/hostname.  Build Through Specific Agents ","title":"Service Hierarchy","url":"/docs/main/next/en/concepts-and-designs/service-hierarchy/"},{"content":"Setting Override SkyWalking backend supports setting overrides by system properties and system environment variables. You may override the settings in application.yml\nSystem properties key rule ModuleName.ProviderName.SettingKey.\n  Example\nOverride restHost in this setting segment\n  core:default:restHost:${SW_CORE_REST_HOST:0.0.0.0}restPort:${SW_CORE_REST_PORT:12800}restContextPath:${SW_CORE_REST_CONTEXT_PATH:/}gRPCHost:${SW_CORE_GRPC_HOST:0.0.0.0}gRPCPort:${SW_CORE_GRPC_PORT:11800}Use command arg\n-Dcore.default.restHost=172.0.4.12 System environment variables   Example\nOverride restHost in this setting segment through environment variables\n  core:default:restHost:${REST_HOST:0.0.0.0}restPort:${SW_CORE_REST_PORT:12800}restContextPath:${SW_CORE_REST_CONTEXT_PATH:/}gRPCHost:${SW_CORE_GRPC_HOST:0.0.0.0}gRPCPort:${SW_CORE_GRPC_PORT:11800}If the REST_HOST  environment variable exists in your operating system and its value is 172.0.4.12, then the value of restHost here will be overwritten to 172.0.4.12; otherwise, it will be set to 0.0.0.0.\nPlaceholder nesting is also supported, like ${REST_HOST:${ANOTHER_REST_HOST:127.0.0.1}}. In this case, if the REST_HOST  environment variable does not exist, but the REST_ANOTHER_REST_HOSTHOST environment variable exists, and its value is 172.0.4.12, then the value of restHost here will be overwritten to 172.0.4.12; otherwise, it will be set to 127.0.0.1.\n","title":"Setting Override","url":"/docs/main/latest/en/setup/backend/backend-setting-override/"},{"content":"Setting Override SkyWalking backend supports setting overrides by system properties and system environment variables. You may override the settings in application.yml\nSystem properties key rule ModuleName.ProviderName.SettingKey.\n  Example\nOverride restHost in this setting segment\n  core:default:restHost:${SW_CORE_REST_HOST:0.0.0.0}restPort:${SW_CORE_REST_PORT:12800}restContextPath:${SW_CORE_REST_CONTEXT_PATH:/}gRPCHost:${SW_CORE_GRPC_HOST:0.0.0.0}gRPCPort:${SW_CORE_GRPC_PORT:11800}Use command arg\n-Dcore.default.restHost=172.0.4.12 System environment variables   Example\nOverride restHost in this setting segment through environment variables\n  core:default:restHost:${REST_HOST:0.0.0.0}restPort:${SW_CORE_REST_PORT:12800}restContextPath:${SW_CORE_REST_CONTEXT_PATH:/}gRPCHost:${SW_CORE_GRPC_HOST:0.0.0.0}gRPCPort:${SW_CORE_GRPC_PORT:11800}If the REST_HOST  environment variable exists in your operating system and its value is 172.0.4.12, then the value of restHost here will be overwritten to 172.0.4.12; otherwise, it will be set to 0.0.0.0.\nPlaceholder nesting is also supported, like ${REST_HOST:${ANOTHER_REST_HOST:127.0.0.1}}. In this case, if the REST_HOST  environment variable does not exist, but the REST_ANOTHER_REST_HOSTHOST environment variable exists, and its value is 172.0.4.12, then the value of restHost here will be overwritten to 172.0.4.12; otherwise, it will be set to 127.0.0.1.\n","title":"Setting Override","url":"/docs/main/next/en/setup/backend/backend-setting-override/"},{"content":"Setting Override SkyWalking backend supports setting overrides by system properties and system environment variables. You may override the settings in application.yml\nSystem properties key rule ModuleName.ProviderName.SettingKey.\n  Example\nOverride restHost in this setting segment\n  core:default:restHost:${SW_CORE_REST_HOST:0.0.0.0}restPort:${SW_CORE_REST_PORT:12800}restContextPath:${SW_CORE_REST_CONTEXT_PATH:/}gRPCHost:${SW_CORE_GRPC_HOST:0.0.0.0}gRPCPort:${SW_CORE_GRPC_PORT:11800}Use command arg\n-Dcore.default.restHost=172.0.4.12 System environment variables   Example\nOverride restHost in this setting segment through environment variables\n  core:default:restHost:${REST_HOST:0.0.0.0}restPort:${SW_CORE_REST_PORT:12800}restContextPath:${SW_CORE_REST_CONTEXT_PATH:/}gRPCHost:${SW_CORE_GRPC_HOST:0.0.0.0}gRPCPort:${SW_CORE_GRPC_PORT:11800}If the REST_HOST  environment variable exists in your operating system and its value is 172.0.4.12, then the value of restHost here will be overwritten to 172.0.4.12; otherwise, it will be set to 0.0.0.0.\nPlaceholder nesting is also supported, like ${REST_HOST:${ANOTHER_REST_HOST:127.0.0.1}}. In this case, if the REST_HOST  environment variable does not exist, but the REST_ANOTHER_REST_HOSTHOST environment variable exists and its value is 172.0.4.12, then the value of restHost here will be overwritten to 172.0.4.12; otherwise, it will be set to 127.0.0.1.\n","title":"Setting Override","url":"/docs/main/v9.0.0/en/setup/backend/backend-setting-override/"},{"content":"Setting Override SkyWalking backend supports setting overrides by system properties and system environment variables. You may override the settings in application.yml\nSystem properties key rule ModuleName.ProviderName.SettingKey.\n  Example\nOverride restHost in this setting segment\n  core:default:restHost:${SW_CORE_REST_HOST:0.0.0.0}restPort:${SW_CORE_REST_PORT:12800}restContextPath:${SW_CORE_REST_CONTEXT_PATH:/}gRPCHost:${SW_CORE_GRPC_HOST:0.0.0.0}gRPCPort:${SW_CORE_GRPC_PORT:11800}Use command arg\n-Dcore.default.restHost=172.0.4.12 System environment variables   Example\nOverride restHost in this setting segment through environment variables\n  core:default:restHost:${REST_HOST:0.0.0.0}restPort:${SW_CORE_REST_PORT:12800}restContextPath:${SW_CORE_REST_CONTEXT_PATH:/}gRPCHost:${SW_CORE_GRPC_HOST:0.0.0.0}gRPCPort:${SW_CORE_GRPC_PORT:11800}If the REST_HOST  environment variable exists in your operating system and its value is 172.0.4.12, then the value of restHost here will be overwritten to 172.0.4.12; otherwise, it will be set to 0.0.0.0.\nPlaceholder nesting is also supported, like ${REST_HOST:${ANOTHER_REST_HOST:127.0.0.1}}. In this case, if the REST_HOST  environment variable does not exist, but the REST_ANOTHER_REST_HOSTHOST environment variable exists, and its value is 172.0.4.12, then the value of restHost here will be overwritten to 172.0.4.12; otherwise, it will be set to 127.0.0.1.\n","title":"Setting Override","url":"/docs/main/v9.1.0/en/setup/backend/backend-setting-override/"},{"content":"Setting Override SkyWalking backend supports setting overrides by system properties and system environment variables. You may override the settings in application.yml\nSystem properties key rule ModuleName.ProviderName.SettingKey.\n  Example\nOverride restHost in this setting segment\n  core:default:restHost:${SW_CORE_REST_HOST:0.0.0.0}restPort:${SW_CORE_REST_PORT:12800}restContextPath:${SW_CORE_REST_CONTEXT_PATH:/}gRPCHost:${SW_CORE_GRPC_HOST:0.0.0.0}gRPCPort:${SW_CORE_GRPC_PORT:11800}Use command arg\n-Dcore.default.restHost=172.0.4.12 System environment variables   Example\nOverride restHost in this setting segment through environment variables\n  core:default:restHost:${REST_HOST:0.0.0.0}restPort:${SW_CORE_REST_PORT:12800}restContextPath:${SW_CORE_REST_CONTEXT_PATH:/}gRPCHost:${SW_CORE_GRPC_HOST:0.0.0.0}gRPCPort:${SW_CORE_GRPC_PORT:11800}If the REST_HOST  environment variable exists in your operating system and its value is 172.0.4.12, then the value of restHost here will be overwritten to 172.0.4.12; otherwise, it will be set to 0.0.0.0.\nPlaceholder nesting is also supported, like ${REST_HOST:${ANOTHER_REST_HOST:127.0.0.1}}. In this case, if the REST_HOST  environment variable does not exist, but the REST_ANOTHER_REST_HOSTHOST environment variable exists, and its value is 172.0.4.12, then the value of restHost here will be overwritten to 172.0.4.12; otherwise, it will be set to 127.0.0.1.\n","title":"Setting Override","url":"/docs/main/v9.2.0/en/setup/backend/backend-setting-override/"},{"content":"Setting Override SkyWalking backend supports setting overrides by system properties and system environment variables. You may override the settings in application.yml\nSystem properties key rule ModuleName.ProviderName.SettingKey.\n  Example\nOverride restHost in this setting segment\n  core:default:restHost:${SW_CORE_REST_HOST:0.0.0.0}restPort:${SW_CORE_REST_PORT:12800}restContextPath:${SW_CORE_REST_CONTEXT_PATH:/}gRPCHost:${SW_CORE_GRPC_HOST:0.0.0.0}gRPCPort:${SW_CORE_GRPC_PORT:11800}Use command arg\n-Dcore.default.restHost=172.0.4.12 System environment variables   Example\nOverride restHost in this setting segment through environment variables\n  core:default:restHost:${REST_HOST:0.0.0.0}restPort:${SW_CORE_REST_PORT:12800}restContextPath:${SW_CORE_REST_CONTEXT_PATH:/}gRPCHost:${SW_CORE_GRPC_HOST:0.0.0.0}gRPCPort:${SW_CORE_GRPC_PORT:11800}If the REST_HOST  environment variable exists in your operating system and its value is 172.0.4.12, then the value of restHost here will be overwritten to 172.0.4.12; otherwise, it will be set to 0.0.0.0.\nPlaceholder nesting is also supported, like ${REST_HOST:${ANOTHER_REST_HOST:127.0.0.1}}. In this case, if the REST_HOST  environment variable does not exist, but the REST_ANOTHER_REST_HOSTHOST environment variable exists, and its value is 172.0.4.12, then the value of restHost here will be overwritten to 172.0.4.12; otherwise, it will be set to 127.0.0.1.\n","title":"Setting Override","url":"/docs/main/v9.3.0/en/setup/backend/backend-setting-override/"},{"content":"Setting Override SkyWalking backend supports setting overrides by system properties and system environment variables. You may override the settings in application.yml\nSystem properties key rule ModuleName.ProviderName.SettingKey.\n  Example\nOverride restHost in this setting segment\n  core:default:restHost:${SW_CORE_REST_HOST:0.0.0.0}restPort:${SW_CORE_REST_PORT:12800}restContextPath:${SW_CORE_REST_CONTEXT_PATH:/}gRPCHost:${SW_CORE_GRPC_HOST:0.0.0.0}gRPCPort:${SW_CORE_GRPC_PORT:11800}Use command arg\n-Dcore.default.restHost=172.0.4.12 System environment variables   Example\nOverride restHost in this setting segment through environment variables\n  core:default:restHost:${REST_HOST:0.0.0.0}restPort:${SW_CORE_REST_PORT:12800}restContextPath:${SW_CORE_REST_CONTEXT_PATH:/}gRPCHost:${SW_CORE_GRPC_HOST:0.0.0.0}gRPCPort:${SW_CORE_GRPC_PORT:11800}If the REST_HOST  environment variable exists in your operating system and its value is 172.0.4.12, then the value of restHost here will be overwritten to 172.0.4.12; otherwise, it will be set to 0.0.0.0.\nPlaceholder nesting is also supported, like ${REST_HOST:${ANOTHER_REST_HOST:127.0.0.1}}. In this case, if the REST_HOST  environment variable does not exist, but the REST_ANOTHER_REST_HOSTHOST environment variable exists, and its value is 172.0.4.12, then the value of restHost here will be overwritten to 172.0.4.12; otherwise, it will be set to 127.0.0.1.\n","title":"Setting Override","url":"/docs/main/v9.4.0/en/setup/backend/backend-setting-override/"},{"content":"Setting Override SkyWalking backend supports setting overrides by system properties and system environment variables. You may override the settings in application.yml\nSystem properties key rule ModuleName.ProviderName.SettingKey.\n  Example\nOverride restHost in this setting segment\n  core:default:restHost:${SW_CORE_REST_HOST:0.0.0.0}restPort:${SW_CORE_REST_PORT:12800}restContextPath:${SW_CORE_REST_CONTEXT_PATH:/}gRPCHost:${SW_CORE_GRPC_HOST:0.0.0.0}gRPCPort:${SW_CORE_GRPC_PORT:11800}Use command arg\n-Dcore.default.restHost=172.0.4.12 System environment variables   Example\nOverride restHost in this setting segment through environment variables\n  core:default:restHost:${REST_HOST:0.0.0.0}restPort:${SW_CORE_REST_PORT:12800}restContextPath:${SW_CORE_REST_CONTEXT_PATH:/}gRPCHost:${SW_CORE_GRPC_HOST:0.0.0.0}gRPCPort:${SW_CORE_GRPC_PORT:11800}If the REST_HOST  environment variable exists in your operating system and its value is 172.0.4.12, then the value of restHost here will be overwritten to 172.0.4.12; otherwise, it will be set to 0.0.0.0.\nPlaceholder nesting is also supported, like ${REST_HOST:${ANOTHER_REST_HOST:127.0.0.1}}. In this case, if the REST_HOST  environment variable does not exist, but the REST_ANOTHER_REST_HOSTHOST environment variable exists, and its value is 172.0.4.12, then the value of restHost here will be overwritten to 172.0.4.12; otherwise, it will be set to 127.0.0.1.\n","title":"Setting Override","url":"/docs/main/v9.5.0/en/setup/backend/backend-setting-override/"},{"content":"Setting Override SkyWalking backend supports setting overrides by system properties and system environment variables. You may override the settings in application.yml\nSystem properties key rule ModuleName.ProviderName.SettingKey.\n  Example\nOverride restHost in this setting segment\n  core:default:restHost:${SW_CORE_REST_HOST:0.0.0.0}restPort:${SW_CORE_REST_PORT:12800}restContextPath:${SW_CORE_REST_CONTEXT_PATH:/}gRPCHost:${SW_CORE_GRPC_HOST:0.0.0.0}gRPCPort:${SW_CORE_GRPC_PORT:11800}Use command arg\n-Dcore.default.restHost=172.0.4.12 System environment variables   Example\nOverride restHost in this setting segment through environment variables\n  core:default:restHost:${REST_HOST:0.0.0.0}restPort:${SW_CORE_REST_PORT:12800}restContextPath:${SW_CORE_REST_CONTEXT_PATH:/}gRPCHost:${SW_CORE_GRPC_HOST:0.0.0.0}gRPCPort:${SW_CORE_GRPC_PORT:11800}If the REST_HOST  environment variable exists in your operating system and its value is 172.0.4.12, then the value of restHost here will be overwritten to 172.0.4.12; otherwise, it will be set to 0.0.0.0.\nPlaceholder nesting is also supported, like ${REST_HOST:${ANOTHER_REST_HOST:127.0.0.1}}. In this case, if the REST_HOST  environment variable does not exist, but the REST_ANOTHER_REST_HOSTHOST environment variable exists, and its value is 172.0.4.12, then the value of restHost here will be overwritten to 172.0.4.12; otherwise, it will be set to 127.0.0.1.\n","title":"Setting Override","url":"/docs/main/v9.6.0/en/setup/backend/backend-setting-override/"},{"content":"Setting Override SkyWalking backend supports setting overrides by system properties and system environment variables. You may override the settings in application.yml\nSystem properties key rule ModuleName.ProviderName.SettingKey.\n  Example\nOverride restHost in this setting segment\n  core:default:restHost:${SW_CORE_REST_HOST:0.0.0.0}restPort:${SW_CORE_REST_PORT:12800}restContextPath:${SW_CORE_REST_CONTEXT_PATH:/}gRPCHost:${SW_CORE_GRPC_HOST:0.0.0.0}gRPCPort:${SW_CORE_GRPC_PORT:11800}Use command arg\n-Dcore.default.restHost=172.0.4.12 System environment variables   Example\nOverride restHost in this setting segment through environment variables\n  core:default:restHost:${REST_HOST:0.0.0.0}restPort:${SW_CORE_REST_PORT:12800}restContextPath:${SW_CORE_REST_CONTEXT_PATH:/}gRPCHost:${SW_CORE_GRPC_HOST:0.0.0.0}gRPCPort:${SW_CORE_GRPC_PORT:11800}If the REST_HOST  environment variable exists in your operating system and its value is 172.0.4.12, then the value of restHost here will be overwritten to 172.0.4.12; otherwise, it will be set to 0.0.0.0.\nPlaceholder nesting is also supported, like ${REST_HOST:${ANOTHER_REST_HOST:127.0.0.1}}. In this case, if the REST_HOST  environment variable does not exist, but the REST_ANOTHER_REST_HOSTHOST environment variable exists, and its value is 172.0.4.12, then the value of restHost here will be overwritten to 172.0.4.12; otherwise, it will be set to 127.0.0.1.\n","title":"Setting Override","url":"/docs/main/v9.7.0/en/setup/backend/backend-setting-override/"},{"content":"Setting Override By default, SkyWalking Go agent provides a default agent.default.yaml to define the default configuration options.\nThis configuration file is used during hybrid compilation to write the configuration information of the Agent into the program. When the program boots, the agent would read the pre-configured content.\nConfiguration Changes The values in the config file should be updated by following the user requirements. They are applied during the hybrid compilation process.\nFor missing configuration items in the custom file, the Agent would use the values from the default configuration.\nEnvironment Variables In the default configuration, you can see that most of the configurations are in the format ${xxx:config_value}. It means that when the program starts, the agent would first read the xxx from the system environment variables in the runtime. If it cannot be found, the value would be used as the config_value as value.\nNote: that the search for environment variables is at runtime, not compile time.\n","title":"Setting Override","url":"/docs/skywalking-go/latest/en/advanced-features/settings-override/"},{"content":"Setting Override By default, SkyWalking Go agent provides a default agent.default.yaml to define the default configuration options.\nThis configuration file is used during hybrid compilation to write the configuration information of the Agent into the program. When the program boots, the agent would read the pre-configured content.\nConfiguration Changes The values in the config file should be updated by following the user requirements. They are applied during the hybrid compilation process.\nFor missing configuration items in the custom file, the Agent would use the values from the default configuration.\nEnvironment Variables In the default configuration, you can see that most of the configurations are in the format ${xxx:config_value}. It means that when the program starts, the agent would first read the xxx from the system environment variables in the runtime. If it cannot be found, the value would be used as the config_value as value.\nNote: that the search for environment variables is at runtime, not compile time.\n","title":"Setting Override","url":"/docs/skywalking-go/next/en/advanced-features/settings-override/"},{"content":"Setting Override By default, SkyWalking Go agent provides a default agent.default.yaml to define the default configuration options.\nThis configuration file is used during hybrid compilation to write the configuration information of the Agent into the program. When the program boots, the agent would read the pre-configured content.\nConfiguration Changes The values in the config file should be updated by following the user requirements. They are applied during the hybrid compilation process.\nFor missing configuration items in the custom file, the Agent would use the values from the default configuration.\nEnvironment Variables In the default configuration, you can see that most of the configurations are in the format ${xxx:config_value}. It means that when the program starts, the agent would first read the xxx from the system environment variables in the runtime. If it cannot be found, the value would be used as the config_value as value.\nNote: that the search for environment variables is at runtime, not compile time.\n","title":"Setting Override","url":"/docs/skywalking-go/v0.4.0/en/advanced-features/settings-override/"},{"content":"Setting Override In default, SkyWalking provide agent.config for agent.\nSetting override means end user can override the settings in these config file, through using system properties or agent options.\nSystem properties Use skywalking. + key in config file as system properties key, to override the value.\n  Why need this prefix?\nThe agent system properties and env share with target application, this prefix can avoid variable conflict.\n  Example\nOverride agent.application_code by this.\n  -Dskywalking.agent.application_code=31200 Agent options Add the properties after the agent path in JVM arguments.\n-javaagent:/path/to/skywalking-agent.jar=[option1]=[value1],[option2]=[value2]   Example\nOverride agent.application_code and logging.level by this.\n  -javaagent:/path/to/skywalking-agent.jar=agent.application_code=31200,logging.level=debug   Special characters\nIf a separator(, or =) in the option or value, it should be wrapped in quotes.\n  -javaagent:/path/to/skywalking-agent.jar=agent.ignore_suffix='.jpg,.jpeg' System environment variables   Example\nOverride agent.application_code and logging.level by this.\n  # The service name in UI agent.service_name=${SW_AGENT_NAME:Your_ApplicationName} # Logging level logging.level=${SW_LOGGING_LEVEL:INFO} If the SW_AGENT_NAME  environment variable exists in your operating system and its value is skywalking-agent-demo, then the value of agent.service_name here will be overwritten to skywalking-agent-demo, otherwise, it will be set to Your_ApplicationName.\nBy the way, Placeholder nesting is also supported, like ${SW_AGENT_NAME:${ANOTHER_AGENT_NAME:Your_ApplicationName}}. In this case, if the SW_AGENT_NAME  environment variable not exists, but the ANOTHER_AGENT_NAME environment variable exists and its value is skywalking-agent-demo, then the value of agent.service_name here will be overwritten to skywalking-agent-demo,otherwise, it will be set to Your_ApplicationName.\nOverride priority Agent Options \u0026gt; System.Properties(-D) \u0026gt; System environment variables \u0026gt; Config file\n","title":"Setting Override","url":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/setting-override/"},{"content":"Setting Override In default, SkyWalking provide agent.config for agent.\nSetting override means end user can override the settings in these config file, through using system properties or agent options.\nSystem properties Use skywalking. + key in config file as system properties key, to override the value.\n  Why need this prefix?\nThe agent system properties and env share with target application, this prefix can avoid variable conflict.\n  Example\nOverride agent.application_code by this.\n  -Dskywalking.agent.application_code=31200 Agent options Add the properties after the agent path in JVM arguments.\n-javaagent:/path/to/skywalking-agent.jar=[option1]=[value1],[option2]=[value2]   Example\nOverride agent.application_code and logging.level by this.\n  -javaagent:/path/to/skywalking-agent.jar=agent.application_code=31200,logging.level=debug   Special characters\nIf a separator(, or =) in the option or value, it should be wrapped in quotes.\n  -javaagent:/path/to/skywalking-agent.jar=agent.ignore_suffix='.jpg,.jpeg' System environment variables   Example\nOverride agent.application_code and logging.level by this.\n  # The service name in UI agent.service_name=${SW_AGENT_NAME:Your_ApplicationName} # Logging level logging.level=${SW_LOGGING_LEVEL:INFO} If the SW_AGENT_NAME  environment variable exists in your operating system and its value is skywalking-agent-demo, then the value of agent.service_name here will be overwritten to skywalking-agent-demo, otherwise, it will be set to Your_ApplicationName.\nBy the way, Placeholder nesting is also supported, like ${SW_AGENT_NAME:${ANOTHER_AGENT_NAME:Your_ApplicationName}}. In this case, if the SW_AGENT_NAME  environment variable not exists, but the ANOTHER_AGENT_NAME environment variable exists and its value is skywalking-agent-demo, then the value of agent.service_name here will be overwritten to skywalking-agent-demo,otherwise, it will be set to Your_ApplicationName.\nOverride priority Agent Options \u0026gt; System.Properties(-D) \u0026gt; System environment variables \u0026gt; Config file\n","title":"Setting Override","url":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/setting-override/"},{"content":"Setting Override In default, SkyWalking provide agent.config for agent.\nSetting override means end user can override the settings in these config file, through using system properties or agent options.\nSystem properties Use skywalking. + key in config file as system properties key, to override the value.\n  Why need this prefix?\nThe agent system properties and env share with target application, this prefix can avoid variable conflict.\n  Example\nOverride agent.application_code by this.\n  -Dskywalking.agent.application_code=31200 Agent options Add the properties after the agent path in JVM arguments.\n-javaagent:/path/to/skywalking-agent.jar=[option1]=[value1],[option2]=[value2]   Example\nOverride agent.application_code and logging.level by this.\n  -javaagent:/path/to/skywalking-agent.jar=agent.application_code=31200,logging.level=debug   Special characters\nIf a separator(, or =) in the option or value, it should be wrapped in quotes.\n  -javaagent:/path/to/skywalking-agent.jar=agent.ignore_suffix='.jpg,.jpeg' System environment variables   Example\nOverride agent.application_code and logging.level by this.\n  # The service name in UI agent.service_name=${SW_AGENT_NAME:Your_ApplicationName} # Logging level logging.level=${SW_LOGGING_LEVEL:INFO} If the SW_AGENT_NAME  environment variable exists in your operating system and its value is skywalking-agent-demo, then the value of agent.service_name here will be overwritten to skywalking-agent-demo, otherwise, it will be set to Your_ApplicationName.\nBy the way, Placeholder nesting is also supported, like ${SW_AGENT_NAME:${ANOTHER_AGENT_NAME:Your_ApplicationName}}. In this case, if the SW_AGENT_NAME  environment variable not exists, but the ANOTHER_AGENT_NAME environment variable exists and its value is skywalking-agent-demo, then the value of agent.service_name here will be overwritten to skywalking-agent-demo,otherwise, it will be set to Your_ApplicationName.\nOverride priority Agent Options \u0026gt; System.Properties(-D) \u0026gt; System environment variables \u0026gt; Config file\n","title":"Setting Override","url":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/setting-override/"},{"content":"Setting Override In default, SkyWalking provide agent.config for agent.\nSetting override means end user can override the settings in these config file, through using system properties or agent options.\nSystem properties Use skywalking. + key in config file as system properties key, to override the value.\n  Why need this prefix?\nThe agent system properties and env share with target application, this prefix can avoid variable conflict.\n  Example\nOverride agent.application_code by this.\n  -Dskywalking.agent.application_code=31200 Agent options Add the properties after the agent path in JVM arguments.\n-javaagent:/path/to/skywalking-agent.jar=[option1]=[value1],[option2]=[value2]   Example\nOverride agent.application_code and logging.level by this.\n  -javaagent:/path/to/skywalking-agent.jar=agent.application_code=31200,logging.level=debug   Special characters\nIf a separator(, or =) in the option or value, it should be wrapped in quotes.\n  -javaagent:/path/to/skywalking-agent.jar=agent.ignore_suffix='.jpg,.jpeg' System environment variables   Example\nOverride agent.application_code and logging.level by this.\n  # The service name in UI agent.service_name=${SW_AGENT_NAME:Your_ApplicationName} # Logging level logging.level=${SW_LOGGING_LEVEL:INFO} If the SW_AGENT_NAME  environment variable exists in your operating system and its value is skywalking-agent-demo, then the value of agent.service_name here will be overwritten to skywalking-agent-demo, otherwise, it will be set to Your_ApplicationName.\nBy the way, Placeholder nesting is also supported, like ${SW_AGENT_NAME:${ANOTHER_AGENT_NAME:Your_ApplicationName}}. In this case, if the SW_AGENT_NAME  environment variable not exists, but the ANOTHER_AGENT_NAME environment variable exists and its value is skywalking-agent-demo, then the value of agent.service_name here will be overwritten to skywalking-agent-demo,otherwise, it will be set to Your_ApplicationName.\nOverride priority Agent Options \u0026gt; System.Properties(-D) \u0026gt; System environment variables \u0026gt; Config file\n","title":"Setting Override","url":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/setting-override/"},{"content":"Setting Override In default, SkyWalking provide agent.config for agent.\nSetting override means end user can override the settings in these config file, through using system properties or agent options.\nSystem properties Use skywalking. + key in config file as system properties key, to override the value.\n  Why need this prefix?\nThe agent system properties and env share with target application, this prefix can avoid variable conflict.\n  Example\nOverride agent.application_code by this.\n  -Dskywalking.agent.application_code=31200 Agent options Add the properties after the agent path in JVM arguments.\n-javaagent:/path/to/skywalking-agent.jar=[option1]=[value1],[option2]=[value2]   Example\nOverride agent.application_code and logging.level by this.\n  -javaagent:/path/to/skywalking-agent.jar=agent.application_code=31200,logging.level=debug   Special characters\nIf a separator(, or =) in the option or value, it should be wrapped in quotes.\n  -javaagent:/path/to/skywalking-agent.jar=agent.ignore_suffix='.jpg,.jpeg' System environment variables   Example\nOverride agent.application_code and logging.level by this.\n  # The service name in UI agent.service_name=${SW_AGENT_NAME:Your_ApplicationName} # Logging level logging.level=${SW_LOGGING_LEVEL:INFO} If the SW_AGENT_NAME  environment variable exists in your operating system and its value is skywalking-agent-demo, then the value of agent.service_name here will be overwritten to skywalking-agent-demo, otherwise, it will be set to Your_ApplicationName.\nBy the way, Placeholder nesting is also supported, like ${SW_AGENT_NAME:${ANOTHER_AGENT_NAME:Your_ApplicationName}}. In this case, if the SW_AGENT_NAME  environment variable not exists, but the ANOTHER_AGENT_NAME environment variable exists and its value is skywalking-agent-demo, then the value of agent.service_name here will be overwritten to skywalking-agent-demo,otherwise, it will be set to Your_ApplicationName.\nOverride priority Agent Options \u0026gt; System.Properties(-D) \u0026gt; System environment variables \u0026gt; Config file\n","title":"Setting Override","url":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/setting-override/"},{"content":"Setting Override SkyWalking Rover supports setting overrides by system environment variables. You could override the settings in rover_configs.yaml\nSystem environment variables   Example\nOverride core.backend.addr in this setting segment through environment variables\n  core:backend:addr:${ROVER_BACKEND_ADDR:localhost:11800}If the ROVER_BACKEND_ADDR  environment variable exists in your operating system and its value is oap:11800, then the value of core.backend.addr here will be overwritten to oap:11800, otherwise, it will be set to localhost:11800.\n","title":"Setting Override","url":"/docs/skywalking-rover/latest/en/setup/configuration/override-settings/"},{"content":"Setting Override SkyWalking Rover supports setting overrides by system environment variables. You could override the settings in rover_configs.yaml\nSystem environment variables   Example\nOverride core.backend.addr in this setting segment through environment variables\n  core:backend:addr:${ROVER_BACKEND_ADDR:localhost:11800}If the ROVER_BACKEND_ADDR  environment variable exists in your operating system and its value is oap:11800, then the value of core.backend.addr here will be overwritten to oap:11800, otherwise, it will be set to localhost:11800.\n","title":"Setting Override","url":"/docs/skywalking-rover/next/en/setup/configuration/override-settings/"},{"content":"Setting Override SkyWalking Rover supports setting overrides by system environment variables. You could override the settings in rover_configs.yaml\nSystem environment variables   Example\nOverride core.backend.addr in this setting segment through environment variables\n  core:backend:addr:${ROVER_BACKEND_ADDR:localhost:11800}If the ROVER_BACKEND_ADDR  environment variable exists in your operating system and its value is oap:11800, then the value of core.backend.addr here will be overwritten to oap:11800, otherwise, it will be set to localhost:11800.\n","title":"Setting Override","url":"/docs/skywalking-rover/v0.6.0/en/setup/configuration/override-settings/"},{"content":"Setting Override SkyWalking Satellite supports setting overrides by system environment variables. You could override the settings in satellite_config.yaml\nSystem environment variables   Example\nOverride log_pattern in this setting segment through environment variables\n  logger:log_pattern:${SATELLITE_LOGGER_LOG_PATTERN:%time [%level][%field] - %msg}time_pattern:${SATELLITE_LOGGER_TIME_PATTERN:2006-01-02 15:04:05.000}level:${SATELLITE_LOGGER_LEVEL:info}If the SATELLITE_LOGGER_LOG_PATTERN  environment variable exists in your operating system and its value is %msg, then the value of log_pattern here will be overwritten to %msg, otherwise, it will be set to %time [%level][%field] - %msg.\n","title":"Setting Override","url":"/docs/skywalking-satellite/latest/en/setup/configuration/override-settings/"},{"content":"Setting Override SkyWalking Satellite supports setting overrides by system environment variables. You could override the settings in satellite_config.yaml\nSystem environment variables   Example\nOverride log_pattern in this setting segment through environment variables\n  logger:log_pattern:${SATELLITE_LOGGER_LOG_PATTERN:%time [%level][%field] - %msg}time_pattern:${SATELLITE_LOGGER_TIME_PATTERN:2006-01-02 15:04:05.000}level:${SATELLITE_LOGGER_LEVEL:info}If the SATELLITE_LOGGER_LOG_PATTERN  environment variable exists in your operating system and its value is %msg, then the value of log_pattern here will be overwritten to %msg, otherwise, it will be set to %time [%level][%field] - %msg.\n","title":"Setting Override","url":"/docs/skywalking-satellite/next/en/setup/configuration/override-settings/"},{"content":"Setting Override SkyWalking Satellite supports setting overrides by system environment variables. You could override the settings in satellite_config.yaml\nSystem environment variables   Example\nOverride log_pattern in this setting segment through environment variables\n  logger:log_pattern:${SATELLITE_LOGGER_LOG_PATTERN:%time [%level][%field] - %msg}time_pattern:${SATELLITE_LOGGER_TIME_PATTERN:2006-01-02 15:04:05.000}level:${SATELLITE_LOGGER_LEVEL:info}If the SATELLITE_LOGGER_LOG_PATTERN  environment variable exists in your operating system and its value is %msg, then the value of log_pattern here will be overwritten to %msg, otherwise, it will be set to %time [%level][%field] - %msg.\n","title":"Setting Override","url":"/docs/skywalking-satellite/v1.2.0/en/setup/configuration/override-settings/"},{"content":"Setup The most important thing in E2E Testing is that it uses a separate configuration file and command to execute. If you haven\u0026rsquo;t read the Module Design, recommend read this document first.\n Installation Configuration file Run E2E Tests  ","title":"Setup","url":"/docs/skywalking-infra-e2e/latest/en/setup/readme/"},{"content":"Setup The most important thing in E2E Testing is that it uses a separate configuration file and command to execute. If you haven\u0026rsquo;t read the Module Design, recommend read this document first.\n Installation Configuration file Run E2E Tests  ","title":"Setup","url":"/docs/skywalking-infra-e2e/next/en/setup/readme/"},{"content":"Setup The most important thing in E2E Testing is that it uses a separate configuration file and command to execute. If you haven\u0026rsquo;t read the Module Design, recommend read this document first.\n Installation Configuration file Run E2E Tests  ","title":"Setup","url":"/docs/skywalking-infra-e2e/v1.3.0/en/setup/readme/"},{"content":"Setup The first and most important thing is, that SkyWalking Rover startup behaviors are driven by configs/rover_configs.yaml. Understanding the setting file will help you to read this document.\nFollow Deploy on Kubernetes document to run rover in your cluster.\nRequirements and default settings Before you start, you should know that the main purpose of quickstart is to help you obtain a basic configuration for previews/demos. Usually, the process to be monitored is first declared.\nThen, you can use bin/startup.sh to start up the rover with their config.\nSkyWalking OAP Compatibility The SkyWalking Rover requires specialized protocols to communicate with SkyWalking OAP.\n   SkyWalking Rover Version SkyWalking OAP Notice     0.6.0+ \u0026gt; = 10.0.0 Only support Kubernetes.   0.1.0+ \u0026gt; = 9.1.0     Configuration  Common configurations about logs, backend address, cert files, etc. Service Discovery includes advanced setups about the ways of discovering services on your Kubernetes cluster. Access logs reports L2 to L4 network traffic relative information through access logs, to help OAP backend to do topology and metrics analysis. Profiling is an on-demand feature to enhance general observability besides access logs. It provides eBPF powered process ON_CPU, OFF_CPU profiling and network advanced profiling to link HTTP traffic with SkyWalking and Zipkin traces.  To adjust the configurations, refer to Overriding Setting document for more details.\nPrerequisites Currently, Linux operating systems are supported from version 4.9 and above, except for network profiling which requires version 4.16 or higher.\nThe following table lists currently supported/tested operating systems.\n   System Kernel Version On CPU Profiling Off CPU Profiling Network Profiling     CentOS 7 3.10.0 No No No   CentOS Stream 8 4.18.0 Yes Yes Yes   CentOS Stream 9 5.47.0 Yes Yes Yes   Debian 10 4.19.0 Yes Yes Yes   Debian 11 5.10.0 Yes Yes Yes(TCP Drop Monitor Excluded)   Fedora 35 5.14.10 Yes Yes Yes(TCP Drop Monitor Excluded)   RHEL 7 3.10.0 No No No   RHEL 8 4.18.0 Yes Yes Yes   RHEL 9 5.14.0 Yes Yes Yes   Rocky Linux 8 4.18.0 Yes Yes Yes   Rocky Linux 9 5.14.0 Yes Yes Yes   Ubuntu 1804 5.4.0 Yes Yes Yes   Ubuntu 20.04 5.15.0 Yes Yes Yes   Ubuntu 20.04 5.15.0 Yes Yes Yes   Ubuntu 22.04 5.15.0 Yes Yes Yes   Ubuntu 22.04 5.15.0 Yes Yes Yes   Ubuntu 22.10 5.19.0 Yes Yes Yes   Ubuntu Pro 16.04 4.15.0 Yes Yes No   Ubuntu Pro 18.04 5.4.0 Yes Yes Yes   Ubuntu Pro 20.04 5.15.0 Yes Yes Yes   Ubuntu Pro 22.04 5.15.0 Yes Yes Yes   Ubuntu Pro 22.04 5.15.0 Yes Yes Yes    ","title":"Setup","url":"/docs/skywalking-rover/latest/en/setup/overview/"},{"content":"Setup The first and most important thing is, that SkyWalking Rover startup behaviors are driven by configs/rover_configs.yaml. Understanding the setting file will help you to read this document.\nFollow Deploy on Kubernetes document to run rover in your cluster.\nRequirements and default settings Before you start, you should know that the main purpose of quickstart is to help you obtain a basic configuration for previews/demos. Usually, the process to be monitored is first declared.\nThen, you can use bin/startup.sh to start up the rover with their config.\nSkyWalking OAP Compatibility The SkyWalking Rover requires specialized protocols to communicate with SkyWalking OAP.\n   SkyWalking Rover Version SkyWalking OAP Notice     0.6.0+ \u0026gt; = 10.0.0 Only support Kubernetes.   0.1.0+ \u0026gt; = 9.1.0     Configuration  Common configurations about logs, backend address, cert files, etc. Service Discovery includes advanced setups about the ways of discovering services on your Kubernetes cluster. Access logs reports L2 to L4 network traffic relative information through access logs, to help OAP backend to do topology and metrics analysis. Profiling is an on-demand feature to enhance general observability besides access logs. It provides eBPF powered process ON_CPU, OFF_CPU profiling and network advanced profiling to link HTTP traffic with SkyWalking and Zipkin traces.  To adjust the configurations, refer to Overriding Setting document for more details.\nPrerequisites Currently, Linux operating systems are supported from version 4.9 and above, except for network profiling which requires version 4.16 or higher.\nThe following table lists currently supported/tested operating systems.\n   System Kernel Version On CPU Profiling Off CPU Profiling Network Profiling     CentOS 7 3.10.0 No No No   CentOS Stream 8 4.18.0 Yes Yes Yes   CentOS Stream 9 5.47.0 Yes Yes Yes   Debian 10 4.19.0 Yes Yes Yes   Debian 11 5.10.0 Yes Yes Yes(TCP Drop Monitor Excluded)   Fedora 35 5.14.10 Yes Yes Yes(TCP Drop Monitor Excluded)   RHEL 7 3.10.0 No No No   RHEL 8 4.18.0 Yes Yes Yes   RHEL 9 5.14.0 Yes Yes Yes   Rocky Linux 8 4.18.0 Yes Yes Yes   Rocky Linux 9 5.14.0 Yes Yes Yes   Ubuntu 1804 5.4.0 Yes Yes Yes   Ubuntu 20.04 5.15.0 Yes Yes Yes   Ubuntu 20.04 5.15.0 Yes Yes Yes   Ubuntu 22.04 5.15.0 Yes Yes Yes   Ubuntu 22.04 5.15.0 Yes Yes Yes   Ubuntu 22.10 5.19.0 Yes Yes Yes   Ubuntu Pro 16.04 4.15.0 Yes Yes No   Ubuntu Pro 18.04 5.4.0 Yes Yes Yes   Ubuntu Pro 20.04 5.15.0 Yes Yes Yes   Ubuntu Pro 22.04 5.15.0 Yes Yes Yes   Ubuntu Pro 22.04 5.15.0 Yes Yes Yes    ","title":"Setup","url":"/docs/skywalking-rover/next/en/setup/overview/"},{"content":"Setup The first and most important thing is, that SkyWalking Rover startup behaviors are driven by configs/rover_configs.yaml. Understanding the setting file will help you to read this document.\nFollow Deploy on Kubernetes document to run rover in your cluster.\nRequirements and default settings Before you start, you should know that the main purpose of quickstart is to help you obtain a basic configuration for previews/demos. Usually, the process to be monitored is first declared.\nThen, you can use bin/startup.sh to start up the rover with their config.\nSkyWalking OAP Compatibility The SkyWalking Rover requires specialized protocols to communicate with SkyWalking OAP.\n   SkyWalking Rover Version SkyWalking OAP Notice     0.6.0+ \u0026gt; = 10.0.0 Only support Kubernetes.   0.1.0+ \u0026gt; = 9.1.0     Configuration  Common configurations about logs, backend address, cert files, etc. Service Discovery includes advanced setups about the ways of discovering services on your Kubernetes cluster. Access logs reports L2 to L4 network traffic relative information through access logs, to help OAP backend to do topology and metrics analysis. Profiling is an on-demand feature to enhance general observability besides access logs. It provides eBPF powered process ON_CPU, OFF_CPU profiling and network advanced profiling to link HTTP traffic with SkyWalking and Zipkin traces.  To adjust the configurations, refer to Overriding Setting document for more details.\nPrerequisites Currently, Linux operating systems are supported from version 4.9 and above, except for network profiling which requires version 4.16 or higher.\nThe following table lists currently supported/tested operating systems.\n   System Kernel Version On CPU Profiling Off CPU Profiling Network Profiling     CentOS 7 3.10.0 No No No   CentOS Stream 8 4.18.0 Yes Yes Yes   CentOS Stream 9 5.47.0 Yes Yes Yes   Debian 10 4.19.0 Yes Yes Yes   Debian 11 5.10.0 Yes Yes Yes(TCP Drop Monitor Excluded)   Fedora 35 5.14.10 Yes Yes Yes(TCP Drop Monitor Excluded)   RHEL 7 3.10.0 No No No   RHEL 8 4.18.0 Yes Yes Yes   RHEL 9 5.14.0 Yes Yes Yes   Rocky Linux 8 4.18.0 Yes Yes Yes   Rocky Linux 9 5.14.0 Yes Yes Yes   Ubuntu 1804 5.4.0 Yes Yes Yes   Ubuntu 20.04 5.15.0 Yes Yes Yes   Ubuntu 20.04 5.15.0 Yes Yes Yes   Ubuntu 22.04 5.15.0 Yes Yes Yes   Ubuntu 22.04 5.15.0 Yes Yes Yes   Ubuntu 22.10 5.19.0 Yes Yes Yes   Ubuntu Pro 16.04 4.15.0 Yes Yes No   Ubuntu Pro 18.04 5.4.0 Yes Yes Yes   Ubuntu Pro 20.04 5.15.0 Yes Yes Yes   Ubuntu Pro 22.04 5.15.0 Yes Yes Yes   Ubuntu Pro 22.04 5.15.0 Yes Yes Yes    ","title":"Setup","url":"/docs/skywalking-rover/v0.6.0/en/setup/overview/"},{"content":"Setup First and most important thing is, SkyWalking Satellite startup behaviours are driven by configs/satellite_config.yaml. Understanding the setting file will help you to read this document.\nRequirements and default settings Before you start, you should know that the main purpose of quickstart is to help you obtain a basic configuration for previews/demo. Performance and long-term running are not our goals.\nYou can use bin/startup.sh (or cmd) to start up the satellite with their default settings, set out as follows:\n Receive SkyWalking related protocols through grpc(listens on 0.0.0.0/11800) and transmit them to SkyWalking backend(to 0.0.0.0/11800). Expose Self-Observability telemetry data to Prometheus(listens on 0.0.0.0/1234)  Startup script Startup Script\nbin/startup.sh Examples You can quickly build your satellite according to the following examples:\nDeploy  Deploy on Linux and Windows Deploy on Kubernetes  More Use Cases  Transmit Log to Kafka Enable/Disable Channel Telemetry Exporter  satellite_config.yaml The core concept behind this setting file is, SkyWalking Satellite is based on pure modularization design. End user can switch or assemble the collector features by their own requirements.\nSo, in satellite_config.yaml, there are three parts.\n The common configurations. The sharing plugin configurations. The pipe plugin configurations.  Advanced feature document link list  Overriding settings in satellite_config.yaml is supported  Performance  ALS Load Balance.  ","title":"Setup","url":"/docs/skywalking-satellite/latest/en/setup/readme/"},{"content":"Setup First and most important thing is, SkyWalking Satellite startup behaviours are driven by configs/satellite_config.yaml. Understanding the setting file will help you to read this document.\nRequirements and default settings Before you start, you should know that the main purpose of quickstart is to help you obtain a basic configuration for previews/demo. Performance and long-term running are not our goals.\nYou can use bin/startup.sh (or cmd) to start up the satellite with their default settings, set out as follows:\n Receive SkyWalking related protocols through grpc(listens on 0.0.0.0/11800) and transmit them to SkyWalking backend(to 0.0.0.0/11800). Expose Self-Observability telemetry data to Prometheus(listens on 0.0.0.0/1234)  Startup script Startup Script\nbin/startup.sh Examples You can quickly build your satellite according to the following examples:\nDeploy  Deploy on Linux and Windows Deploy on Kubernetes  More Use Cases  Transmit Log to Kafka Enable/Disable Channel Telemetry Exporter  satellite_config.yaml The core concept behind this setting file is, SkyWalking Satellite is based on pure modularization design. End user can switch or assemble the collector features by their own requirements.\nSo, in satellite_config.yaml, there are three parts.\n The common configurations. The sharing plugin configurations. The pipe plugin configurations.  Advanced feature document link list  Overriding settings in satellite_config.yaml is supported  Performance  ALS Load Balance.  ","title":"Setup","url":"/docs/skywalking-satellite/next/en/setup/readme/"},{"content":"Setup First and most important thing is, SkyWalking Satellite startup behaviours are driven by configs/satellite_config.yaml. Understanding the setting file will help you to read this document.\nRequirements and default settings Before you start, you should know that the main purpose of quickstart is to help you obtain a basic configuration for previews/demo. Performance and long-term running are not our goals.\nYou can use bin/startup.sh (or cmd) to start up the satellite with their default settings, set out as follows:\n Receive SkyWalking related protocols through grpc(listens on 0.0.0.0/11800) and transmit them to SkyWalking backend(to 0.0.0.0/11800). Expose Self-Observability telemetry data to Prometheus(listens on 0.0.0.0/1234)  Startup script Startup Script\nbin/startup.sh Examples You can quickly build your satellite according to the following examples:\nDeploy  Deploy on Linux and Windows Deploy on Kubernetes  More Use Cases  Transmit Log to Kafka Enable/Disable Channel Telemetry Exporter  satellite_config.yaml The core concept behind this setting file is, SkyWalking Satellite is based on pure modularization design. End user can switch or assemble the collector features by their own requirements.\nSo, in satellite_config.yaml, there are three parts.\n The common configurations. The sharing plugin configurations. The pipe plugin configurations.  Advanced feature document link list  Overriding settings in satellite_config.yaml is supported  Performance  ALS Load Balance.  ","title":"Setup","url":"/docs/skywalking-satellite/v1.2.0/en/setup/readme/"},{"content":"Setup External Communication Channels SkyWalking has default activated gRPC/HTTP servers in the core module, which serve for both internal communication and external data report or query.\nIn some advanced scenarios, such as security requirements, specific gRPC/HTTP servers should be exposed for external requests.\nreceiver-sharing-server:selector:${SW_RECEIVER_SHARING_SERVER:default}default:# For REST serverrestHost:${SW_RECEIVER_SHARING_REST_HOST:0.0.0.0}restPort:${SW_RECEIVER_SHARING_REST_PORT:0}restContextPath:${SW_RECEIVER_SHARING_REST_CONTEXT_PATH:/}restMaxThreads:${SW_RECEIVER_SHARING_REST_MAX_THREADS:200}restIdleTimeOut:${SW_RECEIVER_SHARING_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_RECEIVER_SHARING_REST_QUEUE_SIZE:0}httpMaxRequestHeaderSize:${SW_RECEIVER_SHARING_HTTP_MAX_REQUEST_HEADER_SIZE:8192}# For gRPC servergRPCHost:${SW_RECEIVER_GRPC_HOST:0.0.0.0}gRPCPort:${SW_RECEIVER_GRPC_PORT:0}maxConcurrentCallsPerConnection:${SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL:0}maxMessageSize:${SW_RECEIVER_GRPC_MAX_MESSAGE_SIZE:0}gRPCThreadPoolQueueSize:${SW_RECEIVER_GRPC_POOL_QUEUE_SIZE:0}gRPCThreadPoolSize:${SW_RECEIVER_GRPC_THREAD_POOL_SIZE:0}gRPCSslEnabled:${SW_RECEIVER_GRPC_SSL_ENABLED:false}gRPCSslKeyPath:${SW_RECEIVER_GRPC_SSL_KEY_PATH:\u0026#34;\u0026#34;}gRPCSslCertChainPath:${SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH:\u0026#34;\u0026#34;}gRPCSslTrustedCAsPath:${SW_RECEIVER_GRPC_SSL_TRUSTED_CAS_PATH:\u0026#34;\u0026#34;}authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}Set restPort(HTTP) and gRPCPort(gRPC) to a legal port(greater than 0), would initialize new gRPC/HTTP servers for external requests with other relative settings. In this case, core/gRPC and core/rest could be served for cluster internal communication only.\n","title":"Setup External Communication Channels","url":"/docs/main/latest/en/setup/backend/backend-expose/"},{"content":"Setup External Communication Channels SkyWalking has default activated gRPC/HTTP servers in the core module, which serve for both internal communication and external data report or query.\nIn some advanced scenarios, such as security requirements, specific gRPC/HTTP servers should be exposed for external requests.\nreceiver-sharing-server:selector:${SW_RECEIVER_SHARING_SERVER:default}default:# For REST serverrestHost:${SW_RECEIVER_SHARING_REST_HOST:0.0.0.0}restPort:${SW_RECEIVER_SHARING_REST_PORT:0}restContextPath:${SW_RECEIVER_SHARING_REST_CONTEXT_PATH:/}restMaxThreads:${SW_RECEIVER_SHARING_REST_MAX_THREADS:200}restIdleTimeOut:${SW_RECEIVER_SHARING_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_RECEIVER_SHARING_REST_QUEUE_SIZE:0}httpMaxRequestHeaderSize:${SW_RECEIVER_SHARING_HTTP_MAX_REQUEST_HEADER_SIZE:8192}# For gRPC servergRPCHost:${SW_RECEIVER_GRPC_HOST:0.0.0.0}gRPCPort:${SW_RECEIVER_GRPC_PORT:0}maxConcurrentCallsPerConnection:${SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL:0}maxMessageSize:${SW_RECEIVER_GRPC_MAX_MESSAGE_SIZE:0}gRPCThreadPoolSize:${SW_RECEIVER_GRPC_THREAD_POOL_SIZE:0}gRPCSslEnabled:${SW_RECEIVER_GRPC_SSL_ENABLED:false}gRPCSslKeyPath:${SW_RECEIVER_GRPC_SSL_KEY_PATH:\u0026#34;\u0026#34;}gRPCSslCertChainPath:${SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH:\u0026#34;\u0026#34;}gRPCSslTrustedCAsPath:${SW_RECEIVER_GRPC_SSL_TRUSTED_CAS_PATH:\u0026#34;\u0026#34;}authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}Set restPort(HTTP) and gRPCPort(gRPC) to a legal port(greater than 0), would initialize new gRPC/HTTP servers for external requests with other relative settings. In this case, core/gRPC and core/rest could be served for cluster internal communication only.\n","title":"Setup External Communication Channels","url":"/docs/main/next/en/setup/backend/backend-expose/"},{"content":"Setup External Communication Channels SkyWalking has default activated gRPC/HTTP servers in the core module, which serve for both internal communication and external data report or query.\nIn some advanced scenarios, such as security requirements, specific gRPC/HTTP servers should be exposed for external requests.\nreceiver-sharing-server:selector:${SW_RECEIVER_SHARING_SERVER:default}default:# For Jetty serverrestHost:${SW_RECEIVER_SHARING_REST_HOST:0.0.0.0}restPort:${SW_RECEIVER_SHARING_REST_PORT:0}restContextPath:${SW_RECEIVER_SHARING_REST_CONTEXT_PATH:/}restMinThreads:${SW_RECEIVER_SHARING_JETTY_MIN_THREADS:1}restMaxThreads:${SW_RECEIVER_SHARING_JETTY_MAX_THREADS:200}restIdleTimeOut:${SW_RECEIVER_SHARING_JETTY_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_RECEIVER_SHARING_JETTY_QUEUE_SIZE:0}httpMaxRequestHeaderSize:${SW_RECEIVER_SHARING_HTTP_MAX_REQUEST_HEADER_SIZE:8192}# For gRPC servergRPCHost:${SW_RECEIVER_GRPC_HOST:0.0.0.0}gRPCPort:${SW_RECEIVER_GRPC_PORT:0}maxConcurrentCallsPerConnection:${SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL:0}maxMessageSize:${SW_RECEIVER_GRPC_MAX_MESSAGE_SIZE:0}gRPCThreadPoolQueueSize:${SW_RECEIVER_GRPC_POOL_QUEUE_SIZE:0}gRPCThreadPoolSize:${SW_RECEIVER_GRPC_THREAD_POOL_SIZE:0}gRPCSslEnabled:${SW_RECEIVER_GRPC_SSL_ENABLED:false}gRPCSslKeyPath:${SW_RECEIVER_GRPC_SSL_KEY_PATH:\u0026#34;\u0026#34;}gRPCSslCertChainPath:${SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH:\u0026#34;\u0026#34;}gRPCSslTrustedCAsPath:${SW_RECEIVER_GRPC_SSL_TRUSTED_CAS_PATH:\u0026#34;\u0026#34;}authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}Set restPort(HTTP) and gRPCPort(gRPC) to a legal port(greater than 0), would initialize new gRPC/HTTP servers for external requests with other relative settings. In this case, core/gRPC and core/rest could be served for cluster internal communication only.\n","title":"Setup External Communication Channels","url":"/docs/main/v9.0.0/en/setup/backend/backend-expose/"},{"content":"Setup External Communication Channels SkyWalking has default activated gRPC/HTTP servers in the core module, which serve for both internal communication and external data report or query.\nIn some advanced scenarios, such as security requirements, specific gRPC/HTTP servers should be exposed for external requests.\nreceiver-sharing-server:selector:${SW_RECEIVER_SHARING_SERVER:default}default:# For REST serverrestHost:${SW_RECEIVER_SHARING_REST_HOST:0.0.0.0}restPort:${SW_RECEIVER_SHARING_REST_PORT:0}restContextPath:${SW_RECEIVER_SHARING_REST_CONTEXT_PATH:/}restMaxThreads:${SW_RECEIVER_SHARING_REST_MAX_THREADS:200}restIdleTimeOut:${SW_RECEIVER_SHARING_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_RECEIVER_SHARING_REST_QUEUE_SIZE:0}httpMaxRequestHeaderSize:${SW_RECEIVER_SHARING_HTTP_MAX_REQUEST_HEADER_SIZE:8192}# For gRPC servergRPCHost:${SW_RECEIVER_GRPC_HOST:0.0.0.0}gRPCPort:${SW_RECEIVER_GRPC_PORT:0}maxConcurrentCallsPerConnection:${SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL:0}maxMessageSize:${SW_RECEIVER_GRPC_MAX_MESSAGE_SIZE:0}gRPCThreadPoolQueueSize:${SW_RECEIVER_GRPC_POOL_QUEUE_SIZE:0}gRPCThreadPoolSize:${SW_RECEIVER_GRPC_THREAD_POOL_SIZE:0}gRPCSslEnabled:${SW_RECEIVER_GRPC_SSL_ENABLED:false}gRPCSslKeyPath:${SW_RECEIVER_GRPC_SSL_KEY_PATH:\u0026#34;\u0026#34;}gRPCSslCertChainPath:${SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH:\u0026#34;\u0026#34;}gRPCSslTrustedCAsPath:${SW_RECEIVER_GRPC_SSL_TRUSTED_CAS_PATH:\u0026#34;\u0026#34;}authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}Set restPort(HTTP) and gRPCPort(gRPC) to a legal port(greater than 0), would initialize new gRPC/HTTP servers for external requests with other relative settings. In this case, core/gRPC and core/rest could be served for cluster internal communication only.\n","title":"Setup External Communication Channels","url":"/docs/main/v9.1.0/en/setup/backend/backend-expose/"},{"content":"Setup External Communication Channels SkyWalking has default activated gRPC/HTTP servers in the core module, which serve for both internal communication and external data report or query.\nIn some advanced scenarios, such as security requirements, specific gRPC/HTTP servers should be exposed for external requests.\nreceiver-sharing-server:selector:${SW_RECEIVER_SHARING_SERVER:default}default:# For REST serverrestHost:${SW_RECEIVER_SHARING_REST_HOST:0.0.0.0}restPort:${SW_RECEIVER_SHARING_REST_PORT:0}restContextPath:${SW_RECEIVER_SHARING_REST_CONTEXT_PATH:/}restMaxThreads:${SW_RECEIVER_SHARING_REST_MAX_THREADS:200}restIdleTimeOut:${SW_RECEIVER_SHARING_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_RECEIVER_SHARING_REST_QUEUE_SIZE:0}httpMaxRequestHeaderSize:${SW_RECEIVER_SHARING_HTTP_MAX_REQUEST_HEADER_SIZE:8192}# For gRPC servergRPCHost:${SW_RECEIVER_GRPC_HOST:0.0.0.0}gRPCPort:${SW_RECEIVER_GRPC_PORT:0}maxConcurrentCallsPerConnection:${SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL:0}maxMessageSize:${SW_RECEIVER_GRPC_MAX_MESSAGE_SIZE:0}gRPCThreadPoolQueueSize:${SW_RECEIVER_GRPC_POOL_QUEUE_SIZE:0}gRPCThreadPoolSize:${SW_RECEIVER_GRPC_THREAD_POOL_SIZE:0}gRPCSslEnabled:${SW_RECEIVER_GRPC_SSL_ENABLED:false}gRPCSslKeyPath:${SW_RECEIVER_GRPC_SSL_KEY_PATH:\u0026#34;\u0026#34;}gRPCSslCertChainPath:${SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH:\u0026#34;\u0026#34;}gRPCSslTrustedCAsPath:${SW_RECEIVER_GRPC_SSL_TRUSTED_CAS_PATH:\u0026#34;\u0026#34;}authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}Set restPort(HTTP) and gRPCPort(gRPC) to a legal port(greater than 0), would initialize new gRPC/HTTP servers for external requests with other relative settings. In this case, core/gRPC and core/rest could be served for cluster internal communication only.\n","title":"Setup External Communication Channels","url":"/docs/main/v9.2.0/en/setup/backend/backend-expose/"},{"content":"Setup External Communication Channels SkyWalking has default activated gRPC/HTTP servers in the core module, which serve for both internal communication and external data report or query.\nIn some advanced scenarios, such as security requirements, specific gRPC/HTTP servers should be exposed for external requests.\nreceiver-sharing-server:selector:${SW_RECEIVER_SHARING_SERVER:default}default:# For REST serverrestHost:${SW_RECEIVER_SHARING_REST_HOST:0.0.0.0}restPort:${SW_RECEIVER_SHARING_REST_PORT:0}restContextPath:${SW_RECEIVER_SHARING_REST_CONTEXT_PATH:/}restMaxThreads:${SW_RECEIVER_SHARING_REST_MAX_THREADS:200}restIdleTimeOut:${SW_RECEIVER_SHARING_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_RECEIVER_SHARING_REST_QUEUE_SIZE:0}httpMaxRequestHeaderSize:${SW_RECEIVER_SHARING_HTTP_MAX_REQUEST_HEADER_SIZE:8192}# For gRPC servergRPCHost:${SW_RECEIVER_GRPC_HOST:0.0.0.0}gRPCPort:${SW_RECEIVER_GRPC_PORT:0}maxConcurrentCallsPerConnection:${SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL:0}maxMessageSize:${SW_RECEIVER_GRPC_MAX_MESSAGE_SIZE:0}gRPCThreadPoolQueueSize:${SW_RECEIVER_GRPC_POOL_QUEUE_SIZE:0}gRPCThreadPoolSize:${SW_RECEIVER_GRPC_THREAD_POOL_SIZE:0}gRPCSslEnabled:${SW_RECEIVER_GRPC_SSL_ENABLED:false}gRPCSslKeyPath:${SW_RECEIVER_GRPC_SSL_KEY_PATH:\u0026#34;\u0026#34;}gRPCSslCertChainPath:${SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH:\u0026#34;\u0026#34;}gRPCSslTrustedCAsPath:${SW_RECEIVER_GRPC_SSL_TRUSTED_CAS_PATH:\u0026#34;\u0026#34;}authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}Set restPort(HTTP) and gRPCPort(gRPC) to a legal port(greater than 0), would initialize new gRPC/HTTP servers for external requests with other relative settings. In this case, core/gRPC and core/rest could be served for cluster internal communication only.\n","title":"Setup External Communication Channels","url":"/docs/main/v9.3.0/en/setup/backend/backend-expose/"},{"content":"Setup External Communication Channels SkyWalking has default activated gRPC/HTTP servers in the core module, which serve for both internal communication and external data report or query.\nIn some advanced scenarios, such as security requirements, specific gRPC/HTTP servers should be exposed for external requests.\nreceiver-sharing-server:selector:${SW_RECEIVER_SHARING_SERVER:default}default:# For REST serverrestHost:${SW_RECEIVER_SHARING_REST_HOST:0.0.0.0}restPort:${SW_RECEIVER_SHARING_REST_PORT:0}restContextPath:${SW_RECEIVER_SHARING_REST_CONTEXT_PATH:/}restMaxThreads:${SW_RECEIVER_SHARING_REST_MAX_THREADS:200}restIdleTimeOut:${SW_RECEIVER_SHARING_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_RECEIVER_SHARING_REST_QUEUE_SIZE:0}httpMaxRequestHeaderSize:${SW_RECEIVER_SHARING_HTTP_MAX_REQUEST_HEADER_SIZE:8192}# For gRPC servergRPCHost:${SW_RECEIVER_GRPC_HOST:0.0.0.0}gRPCPort:${SW_RECEIVER_GRPC_PORT:0}maxConcurrentCallsPerConnection:${SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL:0}maxMessageSize:${SW_RECEIVER_GRPC_MAX_MESSAGE_SIZE:0}gRPCThreadPoolQueueSize:${SW_RECEIVER_GRPC_POOL_QUEUE_SIZE:0}gRPCThreadPoolSize:${SW_RECEIVER_GRPC_THREAD_POOL_SIZE:0}gRPCSslEnabled:${SW_RECEIVER_GRPC_SSL_ENABLED:false}gRPCSslKeyPath:${SW_RECEIVER_GRPC_SSL_KEY_PATH:\u0026#34;\u0026#34;}gRPCSslCertChainPath:${SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH:\u0026#34;\u0026#34;}gRPCSslTrustedCAsPath:${SW_RECEIVER_GRPC_SSL_TRUSTED_CAS_PATH:\u0026#34;\u0026#34;}authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}Set restPort(HTTP) and gRPCPort(gRPC) to a legal port(greater than 0), would initialize new gRPC/HTTP servers for external requests with other relative settings. In this case, core/gRPC and core/rest could be served for cluster internal communication only.\n","title":"Setup External Communication Channels","url":"/docs/main/v9.4.0/en/setup/backend/backend-expose/"},{"content":"Setup External Communication Channels SkyWalking has default activated gRPC/HTTP servers in the core module, which serve for both internal communication and external data report or query.\nIn some advanced scenarios, such as security requirements, specific gRPC/HTTP servers should be exposed for external requests.\nreceiver-sharing-server:selector:${SW_RECEIVER_SHARING_SERVER:default}default:# For REST serverrestHost:${SW_RECEIVER_SHARING_REST_HOST:0.0.0.0}restPort:${SW_RECEIVER_SHARING_REST_PORT:0}restContextPath:${SW_RECEIVER_SHARING_REST_CONTEXT_PATH:/}restMaxThreads:${SW_RECEIVER_SHARING_REST_MAX_THREADS:200}restIdleTimeOut:${SW_RECEIVER_SHARING_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_RECEIVER_SHARING_REST_QUEUE_SIZE:0}httpMaxRequestHeaderSize:${SW_RECEIVER_SHARING_HTTP_MAX_REQUEST_HEADER_SIZE:8192}# For gRPC servergRPCHost:${SW_RECEIVER_GRPC_HOST:0.0.0.0}gRPCPort:${SW_RECEIVER_GRPC_PORT:0}maxConcurrentCallsPerConnection:${SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL:0}maxMessageSize:${SW_RECEIVER_GRPC_MAX_MESSAGE_SIZE:0}gRPCThreadPoolQueueSize:${SW_RECEIVER_GRPC_POOL_QUEUE_SIZE:0}gRPCThreadPoolSize:${SW_RECEIVER_GRPC_THREAD_POOL_SIZE:0}gRPCSslEnabled:${SW_RECEIVER_GRPC_SSL_ENABLED:false}gRPCSslKeyPath:${SW_RECEIVER_GRPC_SSL_KEY_PATH:\u0026#34;\u0026#34;}gRPCSslCertChainPath:${SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH:\u0026#34;\u0026#34;}gRPCSslTrustedCAsPath:${SW_RECEIVER_GRPC_SSL_TRUSTED_CAS_PATH:\u0026#34;\u0026#34;}authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}Set restPort(HTTP) and gRPCPort(gRPC) to a legal port(greater than 0), would initialize new gRPC/HTTP servers for external requests with other relative settings. In this case, core/gRPC and core/rest could be served for cluster internal communication only.\n","title":"Setup External Communication Channels","url":"/docs/main/v9.5.0/en/setup/backend/backend-expose/"},{"content":"Setup External Communication Channels SkyWalking has default activated gRPC/HTTP servers in the core module, which serve for both internal communication and external data report or query.\nIn some advanced scenarios, such as security requirements, specific gRPC/HTTP servers should be exposed for external requests.\nreceiver-sharing-server:selector:${SW_RECEIVER_SHARING_SERVER:default}default:# For REST serverrestHost:${SW_RECEIVER_SHARING_REST_HOST:0.0.0.0}restPort:${SW_RECEIVER_SHARING_REST_PORT:0}restContextPath:${SW_RECEIVER_SHARING_REST_CONTEXT_PATH:/}restMaxThreads:${SW_RECEIVER_SHARING_REST_MAX_THREADS:200}restIdleTimeOut:${SW_RECEIVER_SHARING_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_RECEIVER_SHARING_REST_QUEUE_SIZE:0}httpMaxRequestHeaderSize:${SW_RECEIVER_SHARING_HTTP_MAX_REQUEST_HEADER_SIZE:8192}# For gRPC servergRPCHost:${SW_RECEIVER_GRPC_HOST:0.0.0.0}gRPCPort:${SW_RECEIVER_GRPC_PORT:0}maxConcurrentCallsPerConnection:${SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL:0}maxMessageSize:${SW_RECEIVER_GRPC_MAX_MESSAGE_SIZE:0}gRPCThreadPoolQueueSize:${SW_RECEIVER_GRPC_POOL_QUEUE_SIZE:0}gRPCThreadPoolSize:${SW_RECEIVER_GRPC_THREAD_POOL_SIZE:0}gRPCSslEnabled:${SW_RECEIVER_GRPC_SSL_ENABLED:false}gRPCSslKeyPath:${SW_RECEIVER_GRPC_SSL_KEY_PATH:\u0026#34;\u0026#34;}gRPCSslCertChainPath:${SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH:\u0026#34;\u0026#34;}gRPCSslTrustedCAsPath:${SW_RECEIVER_GRPC_SSL_TRUSTED_CAS_PATH:\u0026#34;\u0026#34;}authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}Set restPort(HTTP) and gRPCPort(gRPC) to a legal port(greater than 0), would initialize new gRPC/HTTP servers for external requests with other relative settings. In this case, core/gRPC and core/rest could be served for cluster internal communication only.\n","title":"Setup External Communication Channels","url":"/docs/main/v9.6.0/en/setup/backend/backend-expose/"},{"content":"Setup External Communication Channels SkyWalking has default activated gRPC/HTTP servers in the core module, which serve for both internal communication and external data report or query.\nIn some advanced scenarios, such as security requirements, specific gRPC/HTTP servers should be exposed for external requests.\nreceiver-sharing-server:selector:${SW_RECEIVER_SHARING_SERVER:default}default:# For REST serverrestHost:${SW_RECEIVER_SHARING_REST_HOST:0.0.0.0}restPort:${SW_RECEIVER_SHARING_REST_PORT:0}restContextPath:${SW_RECEIVER_SHARING_REST_CONTEXT_PATH:/}restMaxThreads:${SW_RECEIVER_SHARING_REST_MAX_THREADS:200}restIdleTimeOut:${SW_RECEIVER_SHARING_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_RECEIVER_SHARING_REST_QUEUE_SIZE:0}httpMaxRequestHeaderSize:${SW_RECEIVER_SHARING_HTTP_MAX_REQUEST_HEADER_SIZE:8192}# For gRPC servergRPCHost:${SW_RECEIVER_GRPC_HOST:0.0.0.0}gRPCPort:${SW_RECEIVER_GRPC_PORT:0}maxConcurrentCallsPerConnection:${SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL:0}maxMessageSize:${SW_RECEIVER_GRPC_MAX_MESSAGE_SIZE:0}gRPCThreadPoolQueueSize:${SW_RECEIVER_GRPC_POOL_QUEUE_SIZE:0}gRPCThreadPoolSize:${SW_RECEIVER_GRPC_THREAD_POOL_SIZE:0}gRPCSslEnabled:${SW_RECEIVER_GRPC_SSL_ENABLED:false}gRPCSslKeyPath:${SW_RECEIVER_GRPC_SSL_KEY_PATH:\u0026#34;\u0026#34;}gRPCSslCertChainPath:${SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH:\u0026#34;\u0026#34;}gRPCSslTrustedCAsPath:${SW_RECEIVER_GRPC_SSL_TRUSTED_CAS_PATH:\u0026#34;\u0026#34;}authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}Set restPort(HTTP) and gRPCPort(gRPC) to a legal port(greater than 0), would initialize new gRPC/HTTP servers for external requests with other relative settings. In this case, core/gRPC and core/rest could be served for cluster internal communication only.\n","title":"Setup External Communication Channels","url":"/docs/main/v9.7.0/en/setup/backend/backend-expose/"},{"content":"Setup in build When you want to integrate the Agent using the original go build command, you need to follow these steps.\n1. Download Agent Download the Agent from the official website.\n2. Install SkyWalking Go SkyWalking Go offers two ways for integration into your project.\n2.1 Agent Injector Agent injector is recommended when you only want to include SkyWalking Go agent in the compiling pipeline or shell.\nPlease execute the following command, which would automatically import SkyWalking Go into your project.\n/path/to/agent -inject /path/to/your/project [-all]  /path/to/agent is the path to the agent which your downloaded. /path/to/your/project is the home path to your project, support absolute and related with current directory path. -all is the parameter for injecting all submodules in your project.  2.2 Code Dependency Use go get to import the skywalking-go program.\ngo get github.com/apache/skywalking-go Also, import the module to your main package:\nimport _ \u0026#34;github.com/apache/skywalking-go\u0026#34; NOTICE: Please ensure that the version of the Agent you downloaded is consistent with the version installed via go get in the previous section, to prevent errors such as missing package references during compilation.\n3. Build with SkyWalking Go Agent Add the following parameters in go build:\n-toolexec=\u0026#34;/path/to/go-agent\u0026#34; -a  -toolexec is the path to the Golang enhancement program. -a is the parameter for rebuilding all packages forcibly.  If you want to customize the configuration information for the current service, please add the following parameters, read more please refer the settings override documentation):\n-toolexec=\u0026#34;/path/to/go-agent -config /path/to/config.yaml\u0026#34; -a Binary Output The binary would be weaved and instrumented by SkyWalking Go.\n","title":"Setup in build","url":"/docs/skywalking-go/latest/en/setup/gobuild/"},{"content":"Setup in build When you want to integrate the Agent using the original go build command, you need to follow these steps.\n1. Download Agent Download the Agent from the official website.\n2. Install SkyWalking Go SkyWalking Go offers two ways for integration into your project.\n2.1 Agent Injector Agent injector is recommended when you only want to include SkyWalking Go agent in the compiling pipeline or shell.\nPlease execute the following command, which would automatically import SkyWalking Go into your project.\n/path/to/agent -inject /path/to/your/project [-all]  /path/to/agent is the path to the agent which your downloaded. /path/to/your/project is the home path to your project, support absolute and related with current directory path. -all is the parameter for injecting all submodules in your project.  2.2 Code Dependency Use go get to import the skywalking-go program.\ngo get github.com/apache/skywalking-go Also, import the module to your main package:\nimport _ \u0026#34;github.com/apache/skywalking-go\u0026#34; NOTICE: Please ensure that the version of the Agent you downloaded is consistent with the version installed via go get in the previous section, to prevent errors such as missing package references during compilation.\n3. Build with SkyWalking Go Agent Add the following parameters in go build:\n-toolexec=\u0026#34;/path/to/go-agent\u0026#34; -a  -toolexec is the path to the Golang enhancement program. -a is the parameter for rebuilding all packages forcibly.  If you want to customize the configuration information for the current service, please add the following parameters, read more please refer the settings override documentation):\n-toolexec=\u0026#34;/path/to/go-agent -config /path/to/config.yaml\u0026#34; -a Binary Output The binary would be weaved and instrumented by SkyWalking Go.\n","title":"Setup in build","url":"/docs/skywalking-go/next/en/setup/gobuild/"},{"content":"Setup in build When you want to integrate the Agent using the original go build command, you need to follow these steps.\n1. Download Agent Download the Agent from the official website.\n2. Install SkyWalking Go SkyWalking Go offers two ways for integration into your project.\n2.1 Agent Injector Agent injector is recommended when you only want to include SkyWalking Go agent in the compiling pipeline or shell.\nPlease execute the following command, which would automatically import SkyWalking Go into your project.\n/path/to/agent -inject /path/to/your/project [-all]  /path/to/agent is the path to the agent which your downloaded. /path/to/your/project is the home path to your project, support absolute and related with current directory path. -all is the parameter for injecting all submodules in your project.  2.2 Code Dependency Use go get to import the skywalking-go program.\ngo get github.com/apache/skywalking-go Also, import the module to your main package:\nimport _ \u0026#34;github.com/apache/skywalking-go\u0026#34; NOTICE: Please ensure that the version of the Agent you downloaded is consistent with the version installed via go get in the previous section, to prevent errors such as missing package references during compilation.\n3. Build with SkyWalking Go Agent Add the following parameters in go build:\n-toolexec=\u0026#34;/path/to/go-agent\u0026#34; -a  -toolexec is the path to the Golang enhancement program. -a is the parameter for rebuilding all packages forcibly.  If you want to customize the configuration information for the current service, please add the following parameters, read more please refer the settings override documentation):\n-toolexec=\u0026#34;/path/to/go-agent -config /path/to/config.yaml\u0026#34; -a Binary Output The binary would be weaved and instrumented by SkyWalking Go.\n","title":"Setup in build","url":"/docs/skywalking-go/v0.4.0/en/setup/gobuild/"},{"content":"Setup in docker SkyWalking Go supports building user applications using Docker as the base container image.\nCustomized Dockerfile Using the SkyWalking Go provided image as the base image, perform file copying and other operations in the Dockerfile.\n# import the skywalking go base imageFROMapache/skywalking-go:\u0026lt;version\u0026gt;-go\u0026lt;go version\u0026gt;# Copy application codeCOPY /path/to/project /path/to/project# Inject the agent into the project or get dependencies by application selfRUN skywalking-go-agent -inject /path/to/project# Building the project including the agentRUN go build -toolexec=\u0026#34;skywalking-go-agent\u0026#34; -a /path/to/project# More operations...In the above code, we have performed the following actions:\n Used the SkyWalking Go provided image as the base image, which currently supports the following Go versions: 1.16, 1.17, 1.18, 1.19, 1.20. Copied the project into the Docker image. Installed SkyWalking Go and compiled the project, read this documentation for more detail. The SkyWalking Go agent is already installed in the /usr/local/bin directory with the name skywalking-go-agent.  ","title":"Setup in docker","url":"/docs/skywalking-go/latest/en/setup/docker/"},{"content":"Setup in docker SkyWalking Go supports building user applications using Docker as the base container image.\nCustomized Dockerfile Using the SkyWalking Go provided image as the base image, perform file copying and other operations in the Dockerfile.\n# import the skywalking go base imageFROMapache/skywalking-go:\u0026lt;version\u0026gt;-go\u0026lt;go version\u0026gt;# Copy application codeCOPY /path/to/project /path/to/project# Inject the agent into the project or get dependencies by application selfRUN skywalking-go-agent -inject /path/to/project# Building the project including the agentRUN go build -toolexec=\u0026#34;skywalking-go-agent\u0026#34; -a /path/to/project# More operations...In the above code, we have performed the following actions:\n Used the SkyWalking Go provided image as the base image, which currently supports the following Go versions: 1.16, 1.17, 1.18, 1.19, 1.20. Copied the project into the Docker image. Installed SkyWalking Go and compiled the project, read this documentation for more detail. The SkyWalking Go agent is already installed in the /usr/local/bin directory with the name skywalking-go-agent.  ","title":"Setup in docker","url":"/docs/skywalking-go/next/en/setup/docker/"},{"content":"Setup in docker SkyWalking Go supports building user applications using Docker as the base container image.\nCustomized Dockerfile Using the SkyWalking Go provided image as the base image, perform file copying and other operations in the Dockerfile.\n# import the skywalking go base imageFROMapache/skywalking-go:\u0026lt;version\u0026gt;-go\u0026lt;go version\u0026gt;# Copy application codeCOPY /path/to/project /path/to/project# Inject the agent into the project or get dependencies by application selfRUN skywalking-go-agent -inject /path/to/project# Building the project including the agentRUN go build -toolexec=\u0026#34;skywalking-go-agent\u0026#34; -a /path/to/project# More operations...In the above code, we have performed the following actions:\n Used the SkyWalking Go provided image as the base image, which currently supports the following Go versions: 1.16, 1.17, 1.18, 1.19, 1.20. Copied the project into the Docker image. Installed SkyWalking Go and compiled the project, read this documentation for more detail. The SkyWalking Go agent is already installed in the /usr/local/bin directory with the name skywalking-go-agent.  ","title":"Setup in docker","url":"/docs/skywalking-go/v0.4.0/en/setup/docker/"},{"content":"Setup java agent  Agent is available for JDK 8 - 21. Find agent folder in SkyWalking release package Set agent.service_name in config/agent.config. Could be any String in English. Set collector.backend_service in config/agent.config. Default point to 127.0.0.1:11800, only works for local backend. Add -javaagent:/path/to/skywalking-package/agent/skywalking-agent.jar to JVM argument. And make sure to add it before the -jar argument.  Require SkyWalking OAP server 9.7.0+ if the agent works on the JRE using ZGC.\nThe agent release dist is included in Apache official release. New agent package looks like this.\n+-- agent +-- activations apm-toolkit-log4j-1.x-activation.jar apm-toolkit-log4j-2.x-activation.jar apm-toolkit-logback-1.x-activation.jar ... +-- config agent.config +-- plugins apm-dubbo-plugin.jar apm-feign-default-http-9.x.jar apm-httpClient-4.x-plugin.jar ..... +-- optional-plugins apm-gson-2.x-plugin.jar ..... +-- bootstrap-plugins jdk-http-plugin.jar ..... +-- expired-plugins # Expired plugins are moved to this folder. No guarantee of working and maintenance. apm-impala-2.6.x-plugin.jar ..... +-- logs skywalking-agent.jar  Start your application.  Install javaagent FAQs  Linux Tomcat 7, Tomcat 8, Tomcat 9\nChange the first line of tomcat/bin/catalina.sh.  CATALINA_OPTS=\u0026#34;$CATALINA_OPTS-javaagent:/path/to/skywalking-agent/skywalking-agent.jar\u0026#34;; export CATALINA_OPTS  Windows Tomcat 7, Tomcat 8, Tomcat 9\nChange the first line of tomcat/bin/catalina.bat.  set \u0026#34;CATALINA_OPTS=-javaagent:/path/to/skywalking-agent/skywalking-agent.jar\u0026#34;  JAR file\nAdd -javaagent argument to command line in which you start your app. eg:  java -javaagent:/path/to/skywalking-agent/skywalking-agent.jar -jar yourApp.jar  Jetty\nModify jetty.sh, add -javaagent argument to command line in which you start your app. eg:  export JAVA_OPTIONS=\u0026#34;${JAVA_OPTIONS}-javaagent:/path/to/skywalking-agent/skywalking-agent.jar\u0026#34; Plugins SkyWalking agent has supported various middlewares, frameworks and libraries. Read supported list to get them and supported version. If the plugin is in Optional² catalog, go to optional plugins and bootstrap class plugin section to learn how to active it.\n All plugins in /plugins folder are active. Remove the plugin jar, it disabled. The default logging output folder is /logs.  ","title":"Setup java agent","url":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/readme/"},{"content":"Setup java agent  Agent is available for JDK 8 - 21. Find agent folder in SkyWalking release package Set agent.service_name in config/agent.config. Could be any String in English. Set collector.backend_service in config/agent.config. Default point to 127.0.0.1:11800, only works for local backend. Add -javaagent:/path/to/skywalking-package/agent/skywalking-agent.jar to JVM argument. And make sure to add it before the -jar argument.  Require SkyWalking OAP server 9.7.0+ if the agent works on the JRE using ZGC.\nThe agent release dist is included in Apache official release. New agent package looks like this.\n+-- agent +-- activations apm-toolkit-log4j-1.x-activation.jar apm-toolkit-log4j-2.x-activation.jar apm-toolkit-logback-1.x-activation.jar ... +-- config agent.config +-- plugins apm-dubbo-plugin.jar apm-feign-default-http-9.x.jar apm-httpClient-4.x-plugin.jar ..... +-- optional-plugins apm-gson-2.x-plugin.jar ..... +-- bootstrap-plugins jdk-http-plugin.jar ..... +-- expired-plugins # Expired plugins are moved to this folder. No guarantee of working and maintenance. apm-impala-2.6.x-plugin.jar ..... +-- logs skywalking-agent.jar  Start your application.  Install javaagent FAQs  Linux Tomcat 7, Tomcat 8, Tomcat 9\nChange the first line of tomcat/bin/catalina.sh.  CATALINA_OPTS=\u0026#34;$CATALINA_OPTS-javaagent:/path/to/skywalking-agent/skywalking-agent.jar\u0026#34;; export CATALINA_OPTS  Windows Tomcat 7, Tomcat 8, Tomcat 9\nChange the first line of tomcat/bin/catalina.bat.  set \u0026#34;CATALINA_OPTS=-javaagent:/path/to/skywalking-agent/skywalking-agent.jar\u0026#34;  JAR file\nAdd -javaagent argument to command line in which you start your app. eg:  java -javaagent:/path/to/skywalking-agent/skywalking-agent.jar -jar yourApp.jar  Jetty\nModify jetty.sh, add -javaagent argument to command line in which you start your app. eg:  export JAVA_OPTIONS=\u0026#34;${JAVA_OPTIONS}-javaagent:/path/to/skywalking-agent/skywalking-agent.jar\u0026#34; Plugins SkyWalking agent has supported various middlewares, frameworks and libraries. Read supported list to get them and supported version. If the plugin is in Optional² catalog, go to optional plugins and bootstrap class plugin section to learn how to active it.\n All plugins in /plugins folder are active. Remove the plugin jar, it disabled. The default logging output folder is /logs.  ","title":"Setup java agent","url":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/readme/"},{"content":"Setup java agent  Agent is available for JDK 8 - 17. Find agent folder in SkyWalking release package Set agent.service_name in config/agent.config. Could be any String in English. Set collector.backend_service in config/agent.config. Default point to 127.0.0.1:11800, only works for local backend. Add -javaagent:/path/to/skywalking-package/agent/skywalking-agent.jar to JVM argument. And make sure to add it before the -jar argument.  The agent release dist is included in Apache official release. New agent package looks like this.\n+-- agent +-- activations apm-toolkit-log4j-1.x-activation.jar apm-toolkit-log4j-2.x-activation.jar apm-toolkit-logback-1.x-activation.jar ... +-- config agent.config +-- plugins apm-dubbo-plugin.jar apm-feign-default-http-9.x.jar apm-httpClient-4.x-plugin.jar ..... +-- optional-plugins apm-gson-2.x-plugin.jar ..... +-- bootstrap-plugins jdk-http-plugin.jar ..... +-- logs skywalking-agent.jar  Start your application.  Install javaagent FAQs  Linux Tomcat 7, Tomcat 8, Tomcat 9\nChange the first line of tomcat/bin/catalina.sh.  CATALINA_OPTS=\u0026#34;$CATALINA_OPTS-javaagent:/path/to/skywalking-agent/skywalking-agent.jar\u0026#34;; export CATALINA_OPTS  Windows Tomcat 7, Tomcat 8, Tomcat 9\nChange the first line of tomcat/bin/catalina.bat.  set \u0026#34;CATALINA_OPTS=-javaagent:/path/to/skywalking-agent/skywalking-agent.jar\u0026#34;  JAR file\nAdd -javaagent argument to command line in which you start your app. eg:  java -javaagent:/path/to/skywalking-agent/skywalking-agent.jar -jar yourApp.jar  Jetty\nModify jetty.sh, add -javaagent argument to command line in which you start your app. eg:  export JAVA_OPTIONS=\u0026#34;${JAVA_OPTIONS}-javaagent:/path/to/skywalking-agent/skywalking-agent.jar\u0026#34; Plugins SkyWalking agent has supported various middlewares, frameworks and libraries. Read supported list to get them and supported version. If the plugin is in Optional² catalog, go to optional plugins and bootstrap class plugin section to learn how to active it.\n All plugins in /plugins folder are active. Remove the plugin jar, it disabled. The default logging output folder is /logs.  ","title":"Setup java agent","url":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/readme/"},{"content":"Setup java agent  Agent is available for JDK 8 - 21. Find agent folder in SkyWalking release package Set agent.service_name in config/agent.config. Could be any String in English. Set collector.backend_service in config/agent.config. Default point to 127.0.0.1:11800, only works for local backend. Add -javaagent:/path/to/skywalking-package/agent/skywalking-agent.jar to JVM argument. And make sure to add it before the -jar argument.  Require SkyWalking OAP server 9.7.0+ if the agent works on the JRE using ZGC.\nThe agent release dist is included in Apache official release. New agent package looks like this.\n+-- agent +-- activations apm-toolkit-log4j-1.x-activation.jar apm-toolkit-log4j-2.x-activation.jar apm-toolkit-logback-1.x-activation.jar ... +-- config agent.config +-- plugins apm-dubbo-plugin.jar apm-feign-default-http-9.x.jar apm-httpClient-4.x-plugin.jar ..... +-- optional-plugins apm-gson-2.x-plugin.jar ..... +-- bootstrap-plugins jdk-http-plugin.jar ..... +-- logs skywalking-agent.jar  Start your application.  Install javaagent FAQs  Linux Tomcat 7, Tomcat 8, Tomcat 9\nChange the first line of tomcat/bin/catalina.sh.  CATALINA_OPTS=\u0026#34;$CATALINA_OPTS-javaagent:/path/to/skywalking-agent/skywalking-agent.jar\u0026#34;; export CATALINA_OPTS  Windows Tomcat 7, Tomcat 8, Tomcat 9\nChange the first line of tomcat/bin/catalina.bat.  set \u0026#34;CATALINA_OPTS=-javaagent:/path/to/skywalking-agent/skywalking-agent.jar\u0026#34;  JAR file\nAdd -javaagent argument to command line in which you start your app. eg:  java -javaagent:/path/to/skywalking-agent/skywalking-agent.jar -jar yourApp.jar  Jetty\nModify jetty.sh, add -javaagent argument to command line in which you start your app. eg:  export JAVA_OPTIONS=\u0026#34;${JAVA_OPTIONS}-javaagent:/path/to/skywalking-agent/skywalking-agent.jar\u0026#34; Plugins SkyWalking agent has supported various middlewares, frameworks and libraries. Read supported list to get them and supported version. If the plugin is in Optional² catalog, go to optional plugins and bootstrap class plugin section to learn how to active it.\n All plugins in /plugins folder are active. Remove the plugin jar, it disabled. The default logging output folder is /logs.  ","title":"Setup java agent","url":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/readme/"},{"content":"Setup java agent  Agent is available for JDK 8 - 21. Find agent folder in SkyWalking release package Set agent.service_name in config/agent.config. Could be any String in English. Set collector.backend_service in config/agent.config. Default point to 127.0.0.1:11800, only works for local backend. Add -javaagent:/path/to/skywalking-package/agent/skywalking-agent.jar to JVM argument. And make sure to add it before the -jar argument.  Require SkyWalking OAP server 9.7.0+ if the agent works on the JRE using ZGC.\nThe agent release dist is included in Apache official release. New agent package looks like this.\n+-- agent +-- activations apm-toolkit-log4j-1.x-activation.jar apm-toolkit-log4j-2.x-activation.jar apm-toolkit-logback-1.x-activation.jar ... +-- config agent.config +-- plugins apm-dubbo-plugin.jar apm-feign-default-http-9.x.jar apm-httpClient-4.x-plugin.jar ..... +-- optional-plugins apm-gson-2.x-plugin.jar ..... +-- bootstrap-plugins jdk-http-plugin.jar ..... +-- expired-plugins # Expired plugins are moved to this folder. No guarantee of working and maintenance. apm-impala-2.6.x-plugin.jar ..... +-- logs skywalking-agent.jar  Start your application.  Install javaagent FAQs  Linux Tomcat 7, Tomcat 8, Tomcat 9\nChange the first line of tomcat/bin/catalina.sh.  CATALINA_OPTS=\u0026#34;$CATALINA_OPTS-javaagent:/path/to/skywalking-agent/skywalking-agent.jar\u0026#34;; export CATALINA_OPTS  Windows Tomcat 7, Tomcat 8, Tomcat 9\nChange the first line of tomcat/bin/catalina.bat.  set \u0026#34;CATALINA_OPTS=-javaagent:/path/to/skywalking-agent/skywalking-agent.jar\u0026#34;  JAR file\nAdd -javaagent argument to command line in which you start your app. eg:  java -javaagent:/path/to/skywalking-agent/skywalking-agent.jar -jar yourApp.jar  Jetty\nModify jetty.sh, add -javaagent argument to command line in which you start your app. eg:  export JAVA_OPTIONS=\u0026#34;${JAVA_OPTIONS}-javaagent:/path/to/skywalking-agent/skywalking-agent.jar\u0026#34; Plugins SkyWalking agent has supported various middlewares, frameworks and libraries. Read supported list to get them and supported version. If the plugin is in Optional² catalog, go to optional plugins and bootstrap class plugin section to learn how to active it.\n All plugins in /plugins folder are active. Remove the plugin jar, it disabled. The default logging output folder is /logs.  ","title":"Setup java agent","url":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/readme/"},{"content":"Setup PHP Agent  Agent is available for PHP 7.2 - 8.x. Build from source. Configure php.ini.  Requirements  GCC Rustc 1.65+ Cargo Libclang 9.0+ Make Protoc  Install dependencies For Debian-base OS sudo apt install gcc make llvm-13-dev libclang-13-dev protobuf-c-compiler protobuf-compiler For Alpine Linux apk add gcc make musl-dev llvm15-dev clang15-dev protobuf-c-compiler Install Rust globally The officially recommended way to install Rust is via rustup.\nBut because the source code toolchain is override by rust-toolchain.toml, so if you don\u0026rsquo;t need multi version Rust, we recommend to install Rust by these way:\n  Install through OS package manager (The Rust version in the source must be \u0026gt;= 1.65).\n  Through standalone installers.\nFor linux x86_64 user:\nwget https://static.rust-lang.org/dist/rust-1.65.0-x86_64-unknown-linux-gnu.tar.gz tar zxvf rust-1.65.0-x86_64-unknown-linux-gnu.tar.gz cd rust-1.65.0-x86_64-unknown-linux-gnu ./install.sh   Through rustup but set default-toolchain to none.\ncurl --proto \u0026#39;=https\u0026#39; --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --default-toolchain none   Install  Notice: If you compile skywalking_agent in Alpine Linux, you have to disable crt-static, otherwise the problem will be throw: \u0026ldquo;the libclang shared library at /usr/lib/libclang.so.15.0.7 could not be opened: Dynamic loading not supported\u0026rdquo;.\nYou can disable crt-static by environment variable:\nexport RUSTFLAGS=\u0026#34;-C target-feature=-crt-static\u0026#34;  Install from pecl.net pecl install skywalking_agent Install from the source codes git clone --recursive https://github.com/apache/skywalking-php.git cd skywalking-php phpize ./configure make make install Configure Configure skywalking agent in your php.ini.\n[skywalking_agent] extension = skywalking_agent.so ; Enable skywalking_agent extension or not. skywalking_agent.enable = Off ; Log file path. skywalking_agent.log_file = /tmp/skywalking-agent.log ; Log level: one of `OFF`, `TRACE`, `DEBUG`, `INFO`, `WARN`, `ERROR`. skywalking_agent.log_level = INFO ; Address of skywalking oap server. skywalking_agent.server_addr = 127.0.0.1:11800 ; Application service name. skywalking_agent.service_name = hello-skywalking Refer to the Configuration section for more configuration items.\n Notice: It is not recommended to enable skywalking_agent.enable by default globally, because skywalking agent will modify the hook function and fork a new process to be a worker. Enabling it by default will cause extra meaningless consumption when skywalking agent is not needed (such as simply executing a php script).\n Run Start php-fpm server:\nphp-fpm -F -d \u0026#34;skywalking_agent.enable=On\u0026#34;  Notice: It is necessary to keep the php-fpm process running in the foreground (by specifying the \u0026gt; -F parameter, etc.), running php-fpm as a daemon will cause the skywalking-agent reporter process immediately exit.\n ","title":"Setup PHP Agent","url":"/docs/skywalking-php/latest/en/setup/service-agent/php-agent/readme/"},{"content":"Setup PHP Agent  Agent is available for PHP 7.2 - 8.x. Build from source. Configure php.ini.  Requirements  GCC Rustc 1.65+ Cargo Libclang 9.0+ Make Protoc  Install dependencies For Debian-base OS sudo apt install gcc make llvm-13-dev libclang-13-dev protobuf-c-compiler protobuf-compiler For Alpine Linux apk add gcc make musl-dev llvm15-dev clang15-dev protobuf-c-compiler Install Rust globally The officially recommended way to install Rust is via rustup.\nBut because the source code toolchain is override by rust-toolchain.toml, so if you don\u0026rsquo;t need multi version Rust, we recommend to install Rust by these way:\n  Install through OS package manager (The Rust version in the source must be \u0026gt;= 1.65).\n  Through standalone installers.\nFor linux x86_64 user:\nwget https://static.rust-lang.org/dist/rust-1.65.0-x86_64-unknown-linux-gnu.tar.gz tar zxvf rust-1.65.0-x86_64-unknown-linux-gnu.tar.gz cd rust-1.65.0-x86_64-unknown-linux-gnu ./install.sh   Through rustup but set default-toolchain to none.\ncurl --proto \u0026#39;=https\u0026#39; --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --default-toolchain none   Install  Notice: If you compile skywalking_agent in Alpine Linux, you have to disable crt-static, otherwise the problem will be throw: \u0026ldquo;the libclang shared library at /usr/lib/libclang.so.15.0.7 could not be opened: Dynamic loading not supported\u0026rdquo;.\nYou can disable crt-static by environment variable:\nexport RUSTFLAGS=\u0026#34;-C target-feature=-crt-static\u0026#34;  Install from pecl.net pecl install skywalking_agent Install from the source codes git clone --recursive https://github.com/apache/skywalking-php.git cd skywalking-php phpize ./configure make make install Configure Configure skywalking agent in your php.ini.\n[skywalking_agent] extension = skywalking_agent.so ; Enable skywalking_agent extension or not. skywalking_agent.enable = Off ; Log file path. skywalking_agent.log_file = /tmp/skywalking-agent.log ; Log level: one of `OFF`, `TRACE`, `DEBUG`, `INFO`, `WARN`, `ERROR`. skywalking_agent.log_level = INFO ; Address of skywalking oap server. skywalking_agent.server_addr = 127.0.0.1:11800 ; Application service name. skywalking_agent.service_name = hello-skywalking Refer to the Configuration section for more configuration items.\n Notice: It is not recommended to enable skywalking_agent.enable by default globally, because skywalking agent will modify the hook function and fork a new process to be a worker. Enabling it by default will cause extra meaningless consumption when skywalking agent is not needed (such as simply executing a php script).\n Run Start php-fpm server:\nphp-fpm -F -d \u0026#34;skywalking_agent.enable=On\u0026#34;  Notice: It is necessary to keep the php-fpm process running in the foreground (by specifying the \u0026gt; -F parameter, etc.), running php-fpm as a daemon will cause the skywalking-agent reporter process immediately exit.\n ","title":"Setup PHP Agent","url":"/docs/skywalking-php/next/en/setup/service-agent/php-agent/readme/"},{"content":"Setup PHP Agent  Agent is available for PHP 7.2 - 8.x. Build from source. Configure php.ini.  Requirements  GCC Rustc 1.65+ Cargo Libclang 9.0+ Make Protoc  Install dependencies For Debian-base OS sudo apt install gcc make llvm-13-dev libclang-13-dev protobuf-c-compiler protobuf-compiler For Alpine Linux apk add gcc make musl-dev llvm15-dev clang15-dev protobuf-c-compiler Install Rust globally The officially recommended way to install Rust is via rustup.\nBut because the source code toolchain is override by rust-toolchain.toml, so if you don\u0026rsquo;t need multi version Rust, we recommend to install Rust by these way:\n  Install through OS package manager (The Rust version in the source must be \u0026gt;= 1.65).\n  Through standalone installers.\nFor linux x86_64 user:\nwget https://static.rust-lang.org/dist/rust-1.65.0-x86_64-unknown-linux-gnu.tar.gz tar zxvf rust-1.65.0-x86_64-unknown-linux-gnu.tar.gz cd rust-1.65.0-x86_64-unknown-linux-gnu ./install.sh   Through rustup but set default-toolchain to none.\ncurl --proto \u0026#39;=https\u0026#39; --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --default-toolchain none   Install  Notice: If you compile skywalking_agent in Alpine Linux, you have to disable crt-static, otherwise the problem will be throw: \u0026ldquo;the libclang shared library at /usr/lib/libclang.so.15.0.7 could not be opened: Dynamic loading not supported\u0026rdquo;.\nYou can disable crt-static by environment variable:\nexport RUSTFLAGS=\u0026#34;-C target-feature=-crt-static\u0026#34;  Install from pecl.net pecl install skywalking_agent Install from the source codes git clone --recursive https://github.com/apache/skywalking-php.git cd skywalking-php phpize ./configure make make install Configure Configure skywalking agent in your php.ini.\n[skywalking_agent] extension = skywalking_agent.so ; Enable skywalking_agent extension or not. skywalking_agent.enable = Off ; Log file path. skywalking_agent.log_file = /tmp/skywalking-agent.log ; Log level: one of `OFF`, `TRACE`, `DEBUG`, `INFO`, `WARN`, `ERROR`. skywalking_agent.log_level = INFO ; Address of skywalking oap server. skywalking_agent.server_addr = 127.0.0.1:11800 ; Application service name. skywalking_agent.service_name = hello-skywalking Refer to the Configuration section for more configuration items.\n Notice: It is not recommended to enable skywalking_agent.enable by default globally, because skywalking agent will modify the hook function and fork a new process to be a worker. Enabling it by default will cause extra meaningless consumption when skywalking agent is not needed (such as simply executing a php script).\n Run Start php-fpm server:\nphp-fpm -F -d \u0026#34;skywalking_agent.enable=On\u0026#34;  Notice: It is necessary to keep the php-fpm process running in the foreground (by specifying the \u0026gt; -F parameter, etc.), running php-fpm as a daemon will cause the skywalking-agent reporter process immediately exit.\n ","title":"Setup PHP Agent","url":"/docs/skywalking-php/v0.7.0/en/setup/service-agent/php-agent/readme/"},{"content":"Sharing Plugins Sharing plugin configurations has three 3 parts, which are common_config, clients and servers.\nCommon Configuration    Config Default Description     pipe_name sharing The group name of sharing plugins    Clients Clients have a series of client plugins, which would be sharing with the plugins of the other pipes. Please read the doc to find all client plugin configurations.\nServers Servers have a series of server plugins, which would be sharing with the plugins of the other pipes. Please read the doc to find all server plugin configurations.\nExample # The sharing plugins referenced by the specific plugins in the different pipes.sharing:common_config:pipe_name:sharingclients:- plugin_name:\u0026#34;kafka-client\u0026#34;brokers:${SATELLITE_KAFKA_CLIENT_BROKERS:127.0.0.1:9092}version:${SATELLITE_KAFKA_VERSION:\u0026#34;2.1.1\u0026#34;}servers:- plugin_name:\u0026#34;grpc-server\u0026#34;- plugin_name:\u0026#34;prometheus-server\u0026#34;address:${SATELLITE_PROMETHEUS_ADDRESS:\u0026#34;:8090\u0026#34;}","title":"Sharing Plugins","url":"/docs/skywalking-satellite/latest/en/setup/configuration/sharing-plugins/"},{"content":"Sharing Plugins Sharing plugin configurations has three 3 parts, which are common_config, clients and servers.\nCommon Configuration    Config Default Description     pipe_name sharing The group name of sharing plugins    Clients Clients have a series of client plugins, which would be sharing with the plugins of the other pipes. Please read the doc to find all client plugin configurations.\nServers Servers have a series of server plugins, which would be sharing with the plugins of the other pipes. Please read the doc to find all server plugin configurations.\nExample # The sharing plugins referenced by the specific plugins in the different pipes.sharing:common_config:pipe_name:sharingclients:- plugin_name:\u0026#34;kafka-client\u0026#34;brokers:${SATELLITE_KAFKA_CLIENT_BROKERS:127.0.0.1:9092}version:${SATELLITE_KAFKA_VERSION:\u0026#34;2.1.1\u0026#34;}servers:- plugin_name:\u0026#34;grpc-server\u0026#34;- plugin_name:\u0026#34;prometheus-server\u0026#34;address:${SATELLITE_PROMETHEUS_ADDRESS:\u0026#34;:8090\u0026#34;}","title":"Sharing Plugins","url":"/docs/skywalking-satellite/next/en/setup/configuration/sharing-plugins/"},{"content":"Sharing Plugins Sharing plugin configurations has three 3 parts, which are common_config, clients and servers.\nCommon Configuration    Config Default Description     pipe_name sharing The group name of sharing plugins    Clients Clients have a series of client plugins, which would be sharing with the plugins of the other pipes. Please read the doc to find all client plugin configurations.\nServers Servers have a series of server plugins, which would be sharing with the plugins of the other pipes. Please read the doc to find all server plugin configurations.\nExample # The sharing plugins referenced by the specific plugins in the different pipes.sharing:common_config:pipe_name:sharingclients:- plugin_name:\u0026#34;kafka-client\u0026#34;brokers:${SATELLITE_KAFKA_CLIENT_BROKERS:127.0.0.1:9092}version:${SATELLITE_KAFKA_VERSION:\u0026#34;2.1.1\u0026#34;}servers:- plugin_name:\u0026#34;grpc-server\u0026#34;- plugin_name:\u0026#34;prometheus-server\u0026#34;address:${SATELLITE_PROMETHEUS_ADDRESS:\u0026#34;:8090\u0026#34;}","title":"Sharing Plugins","url":"/docs/skywalking-satellite/v1.2.0/en/setup/configuration/sharing-plugins/"},{"content":"SkyWalking 9.x showcase This showcase would follow the latest changes of SkyWalking 9.x, even before the official release.\nThis showcase repository includes an example music application and other manifests to demonstrate the main features of SkyWalking. The music application is composed of several microservices that are written in different programming languages. Here is the architecture:\n%% please read this doc in our official website, otherwise the graph is not correctly rendered. graph LR; loadgen[load generator] --\u0026gt; ui(\u0026quot;UI (React)\u0026quot;) --\u0026gt; Traffic1(\u0026quot;HTTP Request for backend serv\u0026quot;) --\u0026gt; apisix(\u0026quot;APISIX as UI container\u0026quot;) --\u0026gt; app(\u0026quot;app server (NodeJS)\u0026quot;) --\u0026gt; gateway(\u0026quot;gateway (Spring)\u0026quot;); ui(\u0026quot;UI (React)\u0026quot;) --\u0026gt; Traffic2(\u0026quot;HTTP Request for UI codes\u0026quot;) --\u0026gt; apisix(\u0026quot;APISIX with UI container\u0026quot;) gateway --\u0026gt; songs(\u0026quot;songs (Spring)\u0026quot;) \u0026amp; rcmd(\u0026quot;recommendations (Python)\u0026quot;); rcmd --\u0026gt; rating(\u0026quot;rating (Go)\u0026quot;); songs --\u0026gt; activeMQ activeMQ --\u0026gt; songs rcmd --\u0026gt; songs; songs --\u0026gt; db(\u0026quot;database (H2)\u0026quot;); Usage Please run the showcase in a brand new test cluster, otherwise the undeploy process may delete some resources that you have installed before running this showcase (for example cert-manager). If you don\u0026rsquo;t do this in a new test cluster, it\u0026rsquo;s all on your own risks!\nThe showcase uses GNU Make and Docker containers to run commands, so please make sure you have make installed and Docker daemon running.\nPrerequisites To deploy the full features of this showcase application, you may need up to 8 CPU cores and 32 GB memory, please increase the Docker daemon resources or Kubernetes cluster resources if you find containers / Pods failed to start up. Alternatively, you can also only deploy part of the features that interest you if you don\u0026rsquo;t want to increase the resources, via the guide in Customization.\nQuick Start Make sure you have a running Kubernetes cluster and kubectl can access to that cluster.\ngit clone https://github.com/apache/skywalking-showcase.git cd skywalking-showcase make deploy.kubernetes This will install SkyWalking components, including OAP in cluster mode with 2 nodes, SkyWalking UI, microservices with SkyWalking agent, microservices without SkyWalking agent but managed by Istio, 2 Pods to mimic virtual machines and export metrics to SkyWalking, and enable kubernetes cluster monitoring as well as SkyWalking self observability.\nFor more advanced deployments, check Customization documentation below.\nNotice, when run this showcase locally such as KinD, the images are downloaded inside the KinD, which could take over 10 mins(depend on local network). Rerun make deploy.kubernetes if some timeout errors break the process.\nCustomization The variables defined in Makefile.in can be overridden to customize the showcase, by specifying an environment variable with the same name, e.g.:\nexport ES_VERSION=7.14.0 make \u0026lt;target\u0026gt; or directly specifying in the make command, e.g.: make \u0026lt;target\u0026gt; ES_VERSION=7.14.0.\nRun make help to get more information.\nFeatures The showcase is composed of a set of scenarios with feature flags, you can deploy some of them that interest you by overriding the FEATURE_FLAGS variable defined in Makefile.in, as documented in Customization, e.g.:\nmake deploy.kubernetes FEATURE_FLAGS=single-node,agent Feature flags for different platforms (Kubernetes and Docker Compose) are not necessarily the same so make sure to specify the right feature flags.\nCurrently, the features supported are:\n   Name Description Note     java-agent-injector Use the java agent injector to inject the Skywalking Java agent and deploy microservices with other SkyWalking agent enabled. The microservices include agents for Java, NodeJS server, browser, Python.   agent Deploy microservices with SkyWalking agent pre-installed. In Kubernetes scenarios, please use java-agent-injector instead of this, if possible.   cluster Deploy SkyWalking OAP in cluster mode, with 2 nodes, and SkyWalking UI. Only one of cluster or single-node can be enabled.   single-node Deploy only one single node of SkyWalking OAP, and SkyWalking UI, ElasticSearch as storage. Only one of cluster or single-node can be enabled.   elasticsearch Deploy ElasticSearch as storage, you may want to disable this if you want to use your own ElasticSearch deployments.    postgresql Deploy PostgreSQL as storage, you may want to disable this if you want to use your own PostgreSQL deployments.    so11y Enable SkyWalking self observability. This is enabled by default for platform Docker Compose.   vm-monitor Start 2 virtual machines and export their metrics to SkyWalking. The \u0026ldquo;virtual machines\u0026rdquo; are mimicked by Docker containers or Pods.   als Start microservices WITHOUT SkyWalking agent enabled, and configure SkyWalking to analyze the topology and metrics from their access logs. Command istioctl is required to run this feature. The agentless microservices will be running at namespace ${NAMESPACE}-agentless   kubernetes-monitor Deploy OpenTelemetry and export Kubernetes monitoring metrics to SkyWalking for analysis and display on UI.    istiod-monitor Deploy OpenTelemetry and export Istio control plane metrics to SkyWalking for analysis and display on UI.    event Deploy tools to trigger events, and SkyWalking Kubernetes event exporter to export events into SkyWalking.    satellite Deploy SkyWalking Satellite to load balance the monitoring data.    trace-profiling Deploy tools to submit trace profiling tasks. Only support deployment with SkyWalking agents installed, currently Java agent and Python agent support trace profiling.   rover Deploy SkyWalking Rover and detect the processes in the Kubernetes environment. Only support deployment in the Kubernetes environment, docker is not supported.   mysql-monitor Start a MySQL server and load generator to execute the sample SQLs periodically, set up fluent bit to fetch slow logs and export to OAP, and export their metrics to SkyWalking.    postgresql-monitor Start a PostgreSQL server, and load generator to execute the sample SQLs periodically, set up fluent bit to fetch slow logs and export to OAP, and export their metrics to SkyWalking.    elasticsearch-monitor Deploy OpenTelemetry and export Elasticsearch monitoring metrics to SkyWalking for analysis and display on UI.    mongodb-monitor Deploy OpenTelemetry and export MongoDB monitoring metrics to SkyWalking for analysis and display on UI.    nginx-monitor Deploy OpenTelemetry and export Nginx metrics and logs to SkyWalking for analysis and display on UI    apisix-monitor Deploy OpenTelemetry and export APISIX metrics to SkyWalking for analysis and display on UI    mesh-with-agent Deploy services with java agent in the service mesh environment. Only support deployment in the Kubernetes environment, docker is not supported.   grafana Deploy a Grafana to show SkyWalking metrics and logs on the Grafana UI. Feel free to modify the Grafana config when deploy your own environment.   r3 Deploy R3 as RESTful URL recognition service.    rocketmq-monitor Deploy OpenTelemetry and export RocketMQ monitoring metrics to SkyWalking for analysis and display on UI.    pulsar-monitor Deploy OpenTelemetry and export Pulsar monitoring metrics to SkyWalking for analysis and display on UI.    rabbitmq-monitor Deploy OpenTelemetry and export RabbitMQ monitoring metrics to SkyWalking for analysis and display on UI.     Kubernetes To deploy the example application in Kubernetes, please make sure that you have kubectl command available, and it can connect to the Kubernetes cluster successfully.\nIf you don\u0026rsquo;t have a running cluster, you can also leverage KinD (Kubernetes in Docker) or minikube to create a cluster.\nRun kubectl get nodes to check the connectivity before going to next step. The typical error message that indicates your kubectl cannot connect to a cluster is:\nThe connection to the server localhost:8080 was refused - did you specify the right host or port? Deploy # Deploy make deploy.kubernetes # Undeploy make undeploy.kubernetes # Redeploy make redeploy.kubernetes # equivalent to make undeploy.kubernetes deploy.kubernetes Docker Compose Deploy # Deploy make deploy.docker # Undeploy make undeploy.docker # Redeploy make redeploy.docker # equivalent to make undeploy.docker deploy.docker Traffic Flow After deploy the showcase, the business system would send monitoring traffic to the OAP node, and one agent/sidecar connect to one OAP node directly.\nSatellite If the business traffic is unbalanced, it would cause the OAP node receive unbalanced monitoring data. So, you could add the Satellite component. After deploy the showcase with the satellite component, the monitoring traffic would send to the Satellite service, and satellite load balances the traffic to the OAP nodes.\n%% please read this doc in our official website, otherwise the graph is not correctly rendered. graph LR; agent[\u0026quot;business app(agent)\u0026quot;] --\u0026gt; satellite(\u0026quot;satellite\u0026quot;) --\u0026gt; oap(\u0026quot;oap\u0026quot;); envoy[\u0026quot;sidecar(envoy)\u0026quot;] --\u0026gt; satellite; Troubleshooting If you encounter any problems, please add DEBUG=true to the command line to get the output of the resources that will be applied.\nmake deploy.kubernetes DEBUG=true # this will print the resources that will be applied to Kubernetes make deploy.docker DEBUG=true # this will print the merged docker-compose.yaml content that will be used to run in Docker Compose ","title":"SkyWalking 9.x showcase","url":"/docs/skywalking-showcase/next/readme/"},{"content":"Skywalking Agent List  aerospike activemq-5.x armeria-063-084 armeria-085 armeria-086 armeria-098 armeria-100 async-http-client-2.x avro-1.x brpc-java brpc-java-3.x canal-1.x cassandra-java-driver-3.x dbcp-2.x druid-1.x dubbo dubbo-2.7.x dubbo-3.x dubbo-threadpool dubbo-threadpool-2.7.x ehcache-2.x elastic-job-2.x elasticjob-3.x elasticsearch-5.x elasticsearch-6.x elasticsearch-7.x fastjson-1.2.x feign-default-http-9.x feign-pathvar-9.x finagle graphql-8.x graphql-9.x graphql-12.x-15.x graphql-16plus grpc-1.x gson-2.8.x guava-cache h2-1.x hbase-1.x/2.x hikaricp-3.x/4.x httpasyncclient-4.x httpclient-3.x httpclient-4.x httpclient-5.x hystrix-1.x influxdb-2.x jackson-2.x jdk-http-plugin jdk-threading-plugin jedis-2.x-3.x jedis-4.x jetty-client-9.0 jetty-client-9.x jetty-server-9.x kafka-0.11.x/1.x/2.x kotlin-coroutine lettuce-5.x light4j mariadb-2.x micrometer-1.10.x memcache-2.x mongodb-2.x mongodb-3.x mongodb-4.x motan-0.x mybatis-3.x mysql-5.x mysql-6.x mysql-8.x nacos-client-2.x netty-socketio netty-http-4.1.x nutz-http-1.x nutz-mvc-annotation-1.x okhttp-3.x okhttp-4.x play-2.x postgresql-8.x pulsar-2.2-2.7 quasar quartz-scheduler-2.x rabbitmq redisson-3.x resteasy-server-3.x resteasy-server-4.x resteasy-server-6.x rocketMQ-3.x rocketMQ-4.x rocketMQ-5.x rocketMQ-client-java-5.x sentinel-1.x servicecomb-2.x sharding-sphere-3.x sharding-sphere-4.0.0 sharding-sphere-4.1.0 sharding-sphere-5.0.0 sofarpc solrj-7.x spring-annotation spring-async-annotation-5.x spring-cloud-feign-1.x spring-cloud-feign-2.x spring-cloud-gateway-2.0.x spring-cloud-gateway-2.1.x spring-concurrent-util-4.x spring-core-patch spring-kafka-1.x spring-kafka-2.x spring-mvc-annotation spring-mvc-annotation-3.x spring-mvc-annotation-4.x spring-mvc-annotation-5.x spring-mvc-annotation-6.x spring-resttemplate-3.x spring-resttemplate-4.x spring-resttemplate-6.x spring-scheduled-annotation spring-tx spring-webflux-5.x spring-webflux-5.x-webclient spymemcached-2.x struts2-2.x thrift tomcat-7.x/8.x tomcat-10.x toolkit-counter toolkit-gauge toolkit-histogram toolkit-kafka toolkit-log4j toolkit-log4j2 toolkit-logback toolkit-opentracing toolkit-tag toolkit-trace toolkit-exception toolkit-tracer toolkit-webflux undertow-2.x-plugin vertx-core-3.x vertx-core-4.x xxl-job-2.x zookeeper-3.4.x mssql-jtds-1.x mssql-jdbc apache-cxf-3.x jsonrpc4j spring-cloud-gateway-3.x neo4j-4.x clickhouse-0.3.1 clickhouse-0.3.2.x kylin-jdbc-2.6.x-3.x-4.x okhttp-2.x pulsar-2.8.x undertow-worker-thread-pool tomcat-thread-pool guava-eventbus shenyu-2.4.x jdk-threadpool-plugin hutool-http-5.x micronaut-http-client-3.2.x-3.6.x micronaut-http-server-3.2.x-3.6.x nats-client-2.14.x-2.15.x impala-jdbc-2.6.x jdk-forkjoinpool-plugin jetty-thread-pool jersey-2.x jersey-3.x grizzly-2.3.x-4.x grizzly-2.3.x-4.x-threadpool jetty-server-11.x jetty-client-11.x websphere-liberty-23.x spring-cloud-gateway-4.x spring-webflux-6.x spring-webflux-6.x-webclient activemq-artemis-jakarta-client-2.x  ","title":"Skywalking Agent List","url":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/plugin-list/"},{"content":"Skywalking Agent List  aerospike activemq-5.x armeria-063-084 armeria-085 armeria-086 armeria-098 armeria-100 async-http-client-2.x avro-1.x brpc-java brpc-java-3.x canal-1.x cassandra-java-driver-3.x dbcp-2.x druid-1.x dubbo dubbo-2.7.x dubbo-3.x dubbo-threadpool dubbo-threadpool-2.7.x ehcache-2.x elastic-job-2.x elasticjob-3.x elasticsearch-5.x elasticsearch-6.x elasticsearch-7.x fastjson-1.2.x feign-default-http-9.x feign-pathvar-9.x finagle graphql-8.x graphql-9.x graphql-12.x-15.x graphql-16plus grpc-1.x gson-2.8.x guava-cache h2-1.x hbase-1.x/2.x hikaricp-3.x/4.x httpasyncclient-4.x httpclient-3.x httpclient-4.x httpclient-5.x hystrix-1.x influxdb-2.x jackson-2.x jdk-http-plugin jdk-threading-plugin jedis-2.x-3.x jedis-4.x jetty-client-9.0 jetty-client-9.x jetty-server-9.x kafka-0.11.x/1.x/2.x kotlin-coroutine lettuce-5.x light4j mariadb-2.x micrometer-1.10.x memcache-2.x mongodb-2.x mongodb-3.x mongodb-4.x motan-0.x mybatis-3.x mysql-5.x mysql-6.x mysql-8.x nacos-client-2.x netty-socketio netty-http-4.1.x nutz-http-1.x nutz-mvc-annotation-1.x okhttp-3.x okhttp-4.x play-2.x postgresql-8.x pulsar-2.2-2.7 quasar quartz-scheduler-2.x rabbitmq redisson-3.x resteasy-server-3.x resteasy-server-4.x resteasy-server-6.x rocketMQ-3.x rocketMQ-4.x rocketMQ-5.x rocketMQ-client-java-5.x sentinel-1.x servicecomb-2.x sharding-sphere-3.x sharding-sphere-4.0.0 sharding-sphere-4.1.0 sharding-sphere-5.0.0 sofarpc solrj-7.x spring-annotation spring-async-annotation-5.x spring-cloud-feign-1.x spring-cloud-feign-2.x spring-cloud-gateway-2.0.x spring-cloud-gateway-2.1.x spring-concurrent-util-4.x spring-core-patch spring-kafka-1.x spring-kafka-2.x spring-mvc-annotation spring-mvc-annotation-3.x spring-mvc-annotation-4.x spring-mvc-annotation-5.x spring-mvc-annotation-6.x spring-resttemplate-3.x spring-resttemplate-4.x spring-resttemplate-6.x spring-scheduled-annotation spring-tx spring-webflux-5.x spring-webflux-5.x-webclient spymemcached-2.x struts2-2.x thrift tomcat-7.x/8.x tomcat-10.x toolkit-counter toolkit-gauge toolkit-histogram toolkit-kafka toolkit-log4j toolkit-log4j2 toolkit-logback toolkit-opentracing toolkit-tag toolkit-trace toolkit-exception toolkit-tracer toolkit-webflux undertow-2.x-plugin vertx-core-3.x vertx-core-4.x xxl-job-2.x zookeeper-3.4.x mssql-jtds-1.x mssql-jdbc apache-cxf-3.x jsonrpc4j spring-cloud-gateway-3.x neo4j-4.x clickhouse-0.3.1 clickhouse-0.3.2.x kylin-jdbc-2.6.x-3.x-4.x okhttp-2.x pulsar-2.8.x undertow-worker-thread-pool tomcat-thread-pool guava-eventbus shenyu-2.4.x jdk-threadpool-plugin hutool-http-5.x micronaut-http-client-3.2.x-3.6.x micronaut-http-server-3.2.x-3.6.x nats-client-2.14.x-2.15.x impala-jdbc-2.6.x jdk-forkjoinpool-plugin jetty-thread-pool jersey-2.x jersey-3.x grizzly-2.3.x-4.x grizzly-2.3.x-4.x-threadpool jetty-server-11.x jetty-client-11.x websphere-liberty-23.x spring-cloud-gateway-4.x spring-webflux-6.x spring-webflux-6.x-webclient activemq-artemis-jakarta-client-2.x c3p0-0.9.x  ","title":"Skywalking Agent List","url":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/plugin-list/"},{"content":"Skywalking Agent List  aerospike activemq-5.x armeria-063-084 armeria-085 armeria-086 armeria-098 armeria-100 async-http-client-2.x avro-1.x brpc-java brpc-java-3.x canal-1.x cassandra-java-driver-3.x dbcp-2.x druid-1.x dubbo dubbo-2.7.x dubbo-3.x dubbo-threadpool dubbo-threadpool-2.7.x ehcache-2.x elastic-job-2.x elasticjob-3.x elasticsearch-5.x elasticsearch-6.x elasticsearch-7.x fastjson-1.2.x feign-default-http-9.x feign-pathvar-9.x finagle graphql-8.x graphql-9.x graphql-12.x-15.x graphql-16plus grpc-1.x gson-2.8.x guava-cache h2-1.x hbase-1.x/2.x hikaricp-3.x/4.x httpasyncclient-4.x httpclient-3.x httpclient-4.x httpclient-5.x hystrix-1.x influxdb-2.x jackson-2.x jdk-http-plugin jdk-threading-plugin jedis-2.x-3.x jedis-4.x jetty-client-9.0 jetty-client-9.x jetty-server-9.x kafka-0.11.x/1.x/2.x kotlin-coroutine lettuce-5.x light4j mariadb-2.x micrometer-1.10.x memcache-2.x mongodb-2.x mongodb-3.x mongodb-4.x motan-0.x mybatis-3.x mysql-5.x mysql-6.x mysql-8.x nacos-client-2.x netty-socketio nutz-http-1.x nutz-mvc-annotation-1.x okhttp-3.x okhttp-4.x play-2.x postgresql-8.x pulsar-2.2-2.7 quasar quartz-scheduler-2.x rabbitmq redisson-3.x resteasy-server-3.x resteasy-server-4.x resteasy-server-6.x rocketMQ-3.x rocketMQ-4.x rocketMQ-5.x rocketMQ-client-java-5.x sentinel-1.x servicecomb-2.x sharding-sphere-3.x sharding-sphere-4.0.0 sharding-sphere-4.1.0 sharding-sphere-5.0.0 sofarpc solrj-7.x spring-annotation spring-async-annotation-5.x spring-cloud-feign-1.x spring-cloud-feign-2.x spring-cloud-gateway-2.0.x spring-cloud-gateway-2.1.x spring-concurrent-util-4.x spring-core-patch spring-kafka-1.x spring-kafka-2.x spring-mvc-annotation spring-mvc-annotation-3.x spring-mvc-annotation-4.x spring-mvc-annotation-5.x spring-mvc-annotation-6.x spring-resttemplate-3.x spring-resttemplate-4.x spring-resttemplate-6.x spring-scheduled-annotation spring-tx spring-webflux-5.x spring-webflux-5.x-webclient spymemcached-2.x struts2-2.x thrift tomcat-7.x/8.x tomcat-10.x toolkit-counter toolkit-gauge toolkit-histogram toolkit-kafka toolkit-log4j toolkit-log4j2 toolkit-logback toolkit-opentracing toolkit-tag toolkit-trace toolkit-exception toolkit-tracer toolkit-webflux undertow-2.x-plugin vertx-core-3.x vertx-core-4.x xxl-job-2.x zookeeper-3.4.x mssql-jtds-1.x mssql-jdbc apache-cxf-3.x jsonrpc4j spring-cloud-gateway-3.x neo4j-4.x clickhouse-0.3.1 clickhouse-0.3.2.x kylin-jdbc-2.6.x-3.x-4.x okhttp-2.x pulsar-2.8.x undertow-worker-thread-pool tomcat-thread-pool guava-eventbus shenyu-2.4.x jdk-threadpool-plugin hutool-http-5.x micronaut-http-client-3.2.x-3.6.x micronaut-http-server-3.2.x-3.6.x nats-client-2.14.x-2.15.x impala-jdbc-2.6.x jdk-forkjoinpool-plugin jetty-thread-pool jersey-2.x jersey-3.x grizzly-2.3.x-4.x grizzly-2.3.x-4.x-threadpool jetty-server-11.x jetty-client-11.x websphere-liberty-23.x  ","title":"Skywalking Agent List","url":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/plugin-list/"},{"content":"Skywalking Agent List  aerospike activemq-5.x armeria-063-084 armeria-085 armeria-086 armeria-098 armeria-100 async-http-client-2.x avro-1.x brpc-java brpc-java-3.x canal-1.x cassandra-java-driver-3.x dbcp-2.x druid-1.x dubbo dubbo-2.7.x dubbo-3.x dubbo-threadpool dubbo-threadpool-2.7.x ehcache-2.x elastic-job-2.x elasticjob-3.x elasticsearch-5.x elasticsearch-6.x elasticsearch-7.x fastjson-1.2.x feign-default-http-9.x feign-pathvar-9.x finagle graphql-8.x graphql-9.x graphql-12.x-15.x graphql-16plus grpc-1.x gson-2.8.x guava-cache h2-1.x hbase-1.x/2.x hikaricp-3.x/4.x httpasyncclient-4.x httpclient-3.x httpclient-4.x httpclient-5.x hystrix-1.x influxdb-2.x jackson-2.x jdk-http-plugin jdk-threading-plugin jedis-2.x-3.x jedis-4.x jetty-client-9.0 jetty-client-9.x jetty-server-9.x kafka-0.11.x/1.x/2.x kotlin-coroutine lettuce-5.x light4j mariadb-2.x micrometer-1.10.x memcache-2.x mongodb-2.x mongodb-3.x mongodb-4.x motan-0.x mybatis-3.x mysql-5.x mysql-6.x mysql-8.x nacos-client-2.x netty-socketio netty-http-4.1.x nutz-http-1.x nutz-mvc-annotation-1.x okhttp-3.x okhttp-4.x play-2.x postgresql-8.x pulsar-2.2-2.7 quasar quartz-scheduler-2.x rabbitmq redisson-3.x resteasy-server-3.x resteasy-server-4.x resteasy-server-6.x rocketMQ-3.x rocketMQ-4.x rocketMQ-5.x rocketMQ-client-java-5.x sentinel-1.x servicecomb-2.x sharding-sphere-3.x sharding-sphere-4.0.0 sharding-sphere-4.1.0 sharding-sphere-5.0.0 sofarpc solrj-7.x spring-annotation spring-async-annotation-5.x spring-cloud-feign-1.x spring-cloud-feign-2.x spring-cloud-gateway-2.0.x spring-cloud-gateway-2.1.x spring-concurrent-util-4.x spring-core-patch spring-kafka-1.x spring-kafka-2.x spring-mvc-annotation spring-mvc-annotation-3.x spring-mvc-annotation-4.x spring-mvc-annotation-5.x spring-mvc-annotation-6.x spring-resttemplate-3.x spring-resttemplate-4.x spring-resttemplate-6.x spring-scheduled-annotation spring-tx spring-webflux-5.x spring-webflux-5.x-webclient spymemcached-2.x struts2-2.x thrift tomcat-7.x/8.x tomcat-10.x toolkit-counter toolkit-gauge toolkit-histogram toolkit-kafka toolkit-log4j toolkit-log4j2 toolkit-logback toolkit-opentracing toolkit-tag toolkit-trace toolkit-exception toolkit-tracer toolkit-webflux undertow-2.x-plugin vertx-core-3.x vertx-core-4.x xxl-job-2.x zookeeper-3.4.x mssql-jtds-1.x mssql-jdbc apache-cxf-3.x jsonrpc4j spring-cloud-gateway-3.x neo4j-4.x clickhouse-0.3.1 clickhouse-0.3.2.x kylin-jdbc-2.6.x-3.x-4.x okhttp-2.x pulsar-2.8.x undertow-worker-thread-pool tomcat-thread-pool guava-eventbus shenyu-2.4.x jdk-threadpool-plugin hutool-http-5.x micronaut-http-client-3.2.x-3.6.x micronaut-http-server-3.2.x-3.6.x nats-client-2.14.x-2.15.x impala-jdbc-2.6.x jdk-forkjoinpool-plugin jetty-thread-pool jersey-2.x jersey-3.x grizzly-2.3.x-4.x grizzly-2.3.x-4.x-threadpool jetty-server-11.x jetty-client-11.x websphere-liberty-23.x  ","title":"Skywalking Agent List","url":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/plugin-list/"},{"content":"Skywalking Agent List  aerospike activemq-5.x armeria-063-084 armeria-085 armeria-086 armeria-098 armeria-100 async-http-client-2.x avro-1.x brpc-java brpc-java-3.x canal-1.x cassandra-java-driver-3.x dbcp-2.x druid-1.x dubbo dubbo-2.7.x dubbo-3.x dubbo-threadpool dubbo-threadpool-2.7.x ehcache-2.x elastic-job-2.x elasticjob-3.x elasticsearch-5.x elasticsearch-6.x elasticsearch-7.x fastjson-1.2.x feign-default-http-9.x feign-pathvar-9.x finagle graphql-8.x graphql-9.x graphql-12.x-15.x graphql-16plus grpc-1.x gson-2.8.x guava-cache h2-1.x hbase-1.x/2.x hikaricp-3.x/4.x httpasyncclient-4.x httpclient-3.x httpclient-4.x httpclient-5.x hystrix-1.x influxdb-2.x jackson-2.x jdk-http-plugin jdk-threading-plugin jedis-2.x-3.x jedis-4.x jetty-client-9.0 jetty-client-9.x jetty-server-9.x kafka-0.11.x/1.x/2.x kotlin-coroutine lettuce-5.x light4j mariadb-2.x micrometer-1.10.x memcache-2.x mongodb-2.x mongodb-3.x mongodb-4.x motan-0.x mybatis-3.x mysql-5.x mysql-6.x mysql-8.x nacos-client-2.x netty-socketio netty-http-4.1.x nutz-http-1.x nutz-mvc-annotation-1.x okhttp-3.x okhttp-4.x play-2.x postgresql-8.x pulsar-2.2-2.7 quasar quartz-scheduler-2.x rabbitmq redisson-3.x resteasy-server-3.x resteasy-server-4.x resteasy-server-6.x rocketMQ-3.x rocketMQ-4.x rocketMQ-5.x rocketMQ-client-java-5.x sentinel-1.x servicecomb-2.x sharding-sphere-3.x sharding-sphere-4.0.0 sharding-sphere-4.1.0 sharding-sphere-5.0.0 sofarpc solrj-7.x spring-annotation spring-async-annotation-5.x spring-cloud-feign-1.x spring-cloud-feign-2.x spring-cloud-gateway-2.0.x spring-cloud-gateway-2.1.x spring-concurrent-util-4.x spring-core-patch spring-kafka-1.x spring-kafka-2.x spring-mvc-annotation spring-mvc-annotation-3.x spring-mvc-annotation-4.x spring-mvc-annotation-5.x spring-mvc-annotation-6.x spring-resttemplate-3.x spring-resttemplate-4.x spring-resttemplate-6.x spring-scheduled-annotation spring-tx spring-webflux-5.x spring-webflux-5.x-webclient spymemcached-2.x struts2-2.x thrift tomcat-7.x/8.x tomcat-10.x toolkit-counter toolkit-gauge toolkit-histogram toolkit-kafka toolkit-log4j toolkit-log4j2 toolkit-logback toolkit-opentracing toolkit-tag toolkit-trace toolkit-exception toolkit-tracer toolkit-webflux undertow-2.x-plugin vertx-core-3.x vertx-core-4.x xxl-job-2.x zookeeper-3.4.x mssql-jtds-1.x mssql-jdbc apache-cxf-3.x jsonrpc4j spring-cloud-gateway-3.x neo4j-4.x clickhouse-0.3.1 clickhouse-0.3.2.x kylin-jdbc-2.6.x-3.x-4.x okhttp-2.x pulsar-2.8.x undertow-worker-thread-pool tomcat-thread-pool guava-eventbus shenyu-2.4.x jdk-threadpool-plugin hutool-http-5.x micronaut-http-client-3.2.x-3.6.x micronaut-http-server-3.2.x-3.6.x nats-client-2.14.x-2.15.x impala-jdbc-2.6.x jdk-forkjoinpool-plugin jetty-thread-pool jersey-2.x jersey-3.x grizzly-2.3.x-4.x grizzly-2.3.x-4.x-threadpool jetty-server-11.x jetty-client-11.x websphere-liberty-23.x spring-cloud-gateway-4.x spring-webflux-6.x spring-webflux-6.x-webclient activemq-artemis-jakarta-client-2.x  ","title":"Skywalking Agent List","url":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/plugin-list/"},{"content":"Apache SkyWalking Cloud on Kubernetes A bridge project between Apache SkyWalking and Kubernetes.\nSWCK is a platform for the SkyWalking user, provisions, upgrades, maintains SkyWalking relevant components, and makes them work natively on Kubernetes.\nFeatures  Java Agent Injector: Inject the java agent into the application pod natively. Operator: Provision and maintain SkyWalking backend components. Custom Metrics Adapter: Provides custom metrics come from SkyWalking OAP cluster for autoscaling by Kubernetes HPA  Build images Issue below instrument to get the docker image:\nmake or\nmake build To onboard operator or adapter, you should push the image to a registry where the kubernetes cluster can pull it.\nOnboard Java Agent Injector and Operator The java agent injector and operator share a same binary. To onboard them, you should follow:\n To install the java agent injector and operator in an existing cluster, make sure you have cert-manager installed. Apply the manifests for the Controller and CRDs in config:  kubectl apply -f config/operator-bundle.yaml Onboard Custom Metrics Adapter  Deploy OAP server by referring to Operator Quick Start. Apply the manifests for an adapter in config:  kubectl apply -f config/adapter-bundle.yaml License Apache 2.0 License.\n","title":"SkyWalking Cloud on Kubernetes","url":"/docs/skywalking-swck/latest/binary-readme/"},{"content":"Apache SkyWalking Cloud on Kubernetes A bridge project between Apache SkyWalking and Kubernetes.\nSWCK is a platform for the SkyWalking user, provisions, upgrades, maintains SkyWalking relevant components, and makes them work natively on Kubernetes.\nFeatures  Java Agent Injector: Inject the java agent into the application pod natively. Operator: Provision and maintain SkyWalking backend components. Custom Metrics Adapter: Provides custom metrics come from SkyWalking OAP cluster for autoscaling by Kubernetes HPA  Build images Issue below instrument to get the docker image:\nmake or\nmake build To onboard operator or adapter, you should push the image to a registry where the kubernetes cluster can pull it.\nOnboard Java Agent Injector and Operator The java agent injector and operator share a same binary. To onboard them, you should follow:\n To install the java agent injector and operator in an existing cluster, make sure you have cert-manager installed. Apply the manifests for the Controller and CRDs in config:  kubectl apply -f config/operator-bundle.yaml Onboard Custom Metrics Adapter  Deploy OAP server by referring to Operator Quick Start. Apply the manifests for an adapter in config:  kubectl apply -f config/adapter-bundle.yaml License Apache 2.0 License.\n","title":"SkyWalking Cloud on Kubernetes","url":"/docs/skywalking-swck/next/binary-readme/"},{"content":"Apache SkyWalking Cloud on Kubernetes A bridge project between Apache SkyWalking and Kubernetes.\nSWCK is a platform for the SkyWalking user, provisions, upgrades, maintains SkyWalking relevant components, and makes them work natively on Kubernetes.\nFeatures  Java Agent Injector: Inject the java agent into the application pod natively. Operator: Provision and maintain SkyWalking backend components. Custom Metrics Adapter: Provides custom metrics come from SkyWalking OAP cluster for autoscaling by Kubernetes HPA  Build images Issue below instrument to get the docker image:\nmake or\nmake build To onboard operator or adapter, you should push the image to a registry where the kubernetes cluster can pull it.\nOnboard Java Agent Injector and Operator The java agent injector and operator share a same binary. To onboard them, you should follow:\n To install the java agent injector and operator in an existing cluster, make sure you have cert-manager installed. Apply the manifests for the Controller and CRDs in config:  kubectl apply -f config/operator-bundle.yaml Onboard Custom Metrics Adapter  Deploy OAP server by referring to Operator Quick Start. Apply the manifests for an adapter in config:  kubectl apply -f config/adapter-bundle.yaml License Apache 2.0 License.\n","title":"SkyWalking Cloud on Kubernetes","url":"/docs/skywalking-swck/v0.9.0/binary-readme/"},{"content":"SkyWalking Cross Process Correlation Headers Protocol  Version 1.0  SkyWalking Cross Process Correlation Headers Protocol is a new in-wire context propagation protocol which is additional and optional. Please read SkyWalking language agents documentation to see whether it is supported.\nThis is an optional and additional protocol for language tracer implementation. All tracer implementation could consider implementing this. Cross Process Correlation Header key is sw8-correlation. The value is the encoded(key):encoded(value) list with elements splitted by , such as base64(string key):base64(string value),base64(string key2):base64(string value2).\nRecommendations for language APIs The following implementation method is recommended for different language APIs.\n TraceContext#putCorrelation and TraceContext#getCorrelation are recommended to write and read the correlation context, with key/value string. The key should be added if it is absent. The latter writes should override the previous value. The total number of all keys should be less than 3, and the length of each value should be less than 128 bytes. The context should be propagated as well when tracing context is propagated across threads and processes.  ","title":"SkyWalking Cross Process Correlation Headers Protocol","url":"/docs/main/latest/en/api/x-process-correlation-headers-v1/"},{"content":"SkyWalking Cross Process Correlation Headers Protocol  Version 1.0  SkyWalking Cross Process Correlation Headers Protocol is a new in-wire context propagation protocol which is additional and optional. Please read SkyWalking language agents documentation to see whether it is supported.\nThis is an optional and additional protocol for language tracer implementation. All tracer implementation could consider implementing this. Cross Process Correlation Header key is sw8-correlation. The value is the encoded(key):encoded(value) list with elements splitted by , such as base64(string key):base64(string value),base64(string key2):base64(string value2).\nRecommendations for language APIs The following implementation method is recommended for different language APIs.\n TraceContext#putCorrelation and TraceContext#getCorrelation are recommended to write and read the correlation context, with key/value string. The key should be added if it is absent. The latter writes should override the previous value. The total number of all keys should be less than 3, and the length of each value should be less than 128 bytes. The context should be propagated as well when tracing context is propagated across threads and processes.  ","title":"SkyWalking Cross Process Correlation Headers Protocol","url":"/docs/main/next/en/api/x-process-correlation-headers-v1/"},{"content":"SkyWalking Cross Process Correlation Headers Protocol  Version 1.0  The Cross Process Correlation Headers Protocol is used to transport custom data by leveraging the capability of Cross Process Propagation Headers Protocol.\nThis is an optional and additional protocol for language tracer implementation. All tracer implementation could consider implementing this. Cross Process Correlation Header key is sw8-correlation. The value is the encoded(key):encoded(value) list with elements splitted by , such as base64(string key):base64(string value),base64(string key2):base64(string value2).\nRecommendations for language APIs The following implementation method is recommended for different language APIs.\n TraceContext#putCorrelation and TraceContext#getCorrelation are recommended to write and read the correlation context, with key/value string. The key should be added if it is absent. The latter writes should override the previous value. The total number of all keys should be less than 3, and the length of each value should be less than 128 bytes. The context should be propagated as well when tracing context is propagated across threads and processes.  ","title":"SkyWalking Cross Process Correlation Headers Protocol","url":"/docs/main/v9.0.0/en/protocols/skywalking-cross-process-correlation-headers-protocol-v1/"},{"content":"SkyWalking Cross Process Correlation Headers Protocol  Version 1.0  The Cross Process Correlation Headers Protocol is used to transport custom data by leveraging the capability of Cross Process Propagation Headers Protocol.\nThis is an optional and additional protocol for language tracer implementation. All tracer implementation could consider implementing this. Cross Process Correlation Header key is sw8-correlation. The value is the encoded(key):encoded(value) list with elements splitted by , such as base64(string key):base64(string value),base64(string key2):base64(string value2).\nRecommendations for language APIs The following implementation method is recommended for different language APIs.\n TraceContext#putCorrelation and TraceContext#getCorrelation are recommended to write and read the correlation context, with key/value string. The key should be added if it is absent. The latter writes should override the previous value. The total number of all keys should be less than 3, and the length of each value should be less than 128 bytes. The context should be propagated as well when tracing context is propagated across threads and processes.  ","title":"SkyWalking Cross Process Correlation Headers Protocol","url":"/docs/main/v9.1.0/en/protocols/skywalking-cross-process-correlation-headers-protocol-v1/"},{"content":"SkyWalking Cross Process Correlation Headers Protocol  Version 1.0  The Cross Process Correlation Headers Protocol is used to transport custom data by leveraging the capability of Cross Process Propagation Headers Protocol.\nThis is an optional and additional protocol for language tracer implementation. All tracer implementation could consider implementing this. Cross Process Correlation Header key is sw8-correlation. The value is the encoded(key):encoded(value) list with elements splitted by , such as base64(string key):base64(string value),base64(string key2):base64(string value2).\nRecommendations for language APIs The following implementation method is recommended for different language APIs.\n TraceContext#putCorrelation and TraceContext#getCorrelation are recommended to write and read the correlation context, with key/value string. The key should be added if it is absent. The latter writes should override the previous value. The total number of all keys should be less than 3, and the length of each value should be less than 128 bytes. The context should be propagated as well when tracing context is propagated across threads and processes.  ","title":"SkyWalking Cross Process Correlation Headers Protocol","url":"/docs/main/v9.2.0/en/protocols/skywalking-cross-process-correlation-headers-protocol-v1/"},{"content":"SkyWalking Cross Process Correlation Headers Protocol  Version 1.0  The Cross Process Correlation Headers Protocol is used to transport custom data by leveraging the capability of Cross Process Propagation Headers Protocol.\nThis is an optional and additional protocol for language tracer implementation. All tracer implementation could consider implementing this. Cross Process Correlation Header key is sw8-correlation. The value is the encoded(key):encoded(value) list with elements splitted by , such as base64(string key):base64(string value),base64(string key2):base64(string value2).\nRecommendations for language APIs The following implementation method is recommended for different language APIs.\n TraceContext#putCorrelation and TraceContext#getCorrelation are recommended to write and read the correlation context, with key/value string. The key should be added if it is absent. The latter writes should override the previous value. The total number of all keys should be less than 3, and the length of each value should be less than 128 bytes. The context should be propagated as well when tracing context is propagated across threads and processes.  ","title":"SkyWalking Cross Process Correlation Headers Protocol","url":"/docs/main/v9.3.0/en/protocols/skywalking-cross-process-correlation-headers-protocol-v1/"},{"content":"SkyWalking Cross Process Correlation Headers Protocol  Version 1.0  SkyWalking Cross Process Correlation Headers Protocol is a new in-wire context propagation protocol which is additional and optional. Please read SkyWalking language agents documentation to see whether it is supported.\nThis is an optional and additional protocol for language tracer implementation. All tracer implementation could consider implementing this. Cross Process Correlation Header key is sw8-correlation. The value is the encoded(key):encoded(value) list with elements splitted by , such as base64(string key):base64(string value),base64(string key2):base64(string value2).\nRecommendations for language APIs The following implementation method is recommended for different language APIs.\n TraceContext#putCorrelation and TraceContext#getCorrelation are recommended to write and read the correlation context, with key/value string. The key should be added if it is absent. The latter writes should override the previous value. The total number of all keys should be less than 3, and the length of each value should be less than 128 bytes. The context should be propagated as well when tracing context is propagated across threads and processes.  ","title":"SkyWalking Cross Process Correlation Headers Protocol","url":"/docs/main/v9.4.0/en/api/x-process-correlation-headers-v1/"},{"content":"SkyWalking Cross Process Correlation Headers Protocol  Version 1.0  SkyWalking Cross Process Correlation Headers Protocol is a new in-wire context propagation protocol which is additional and optional. Please read SkyWalking language agents documentation to see whether it is supported.\nThis is an optional and additional protocol for language tracer implementation. All tracer implementation could consider implementing this. Cross Process Correlation Header key is sw8-correlation. The value is the encoded(key):encoded(value) list with elements splitted by , such as base64(string key):base64(string value),base64(string key2):base64(string value2).\nRecommendations for language APIs The following implementation method is recommended for different language APIs.\n TraceContext#putCorrelation and TraceContext#getCorrelation are recommended to write and read the correlation context, with key/value string. The key should be added if it is absent. The latter writes should override the previous value. The total number of all keys should be less than 3, and the length of each value should be less than 128 bytes. The context should be propagated as well when tracing context is propagated across threads and processes.  ","title":"SkyWalking Cross Process Correlation Headers Protocol","url":"/docs/main/v9.5.0/en/api/x-process-correlation-headers-v1/"},{"content":"SkyWalking Cross Process Correlation Headers Protocol  Version 1.0  SkyWalking Cross Process Correlation Headers Protocol is a new in-wire context propagation protocol which is additional and optional. Please read SkyWalking language agents documentation to see whether it is supported.\nThis is an optional and additional protocol for language tracer implementation. All tracer implementation could consider implementing this. Cross Process Correlation Header key is sw8-correlation. The value is the encoded(key):encoded(value) list with elements splitted by , such as base64(string key):base64(string value),base64(string key2):base64(string value2).\nRecommendations for language APIs The following implementation method is recommended for different language APIs.\n TraceContext#putCorrelation and TraceContext#getCorrelation are recommended to write and read the correlation context, with key/value string. The key should be added if it is absent. The latter writes should override the previous value. The total number of all keys should be less than 3, and the length of each value should be less than 128 bytes. The context should be propagated as well when tracing context is propagated across threads and processes.  ","title":"SkyWalking Cross Process Correlation Headers Protocol","url":"/docs/main/v9.6.0/en/api/x-process-correlation-headers-v1/"},{"content":"SkyWalking Cross Process Correlation Headers Protocol  Version 1.0  SkyWalking Cross Process Correlation Headers Protocol is a new in-wire context propagation protocol which is additional and optional. Please read SkyWalking language agents documentation to see whether it is supported.\nThis is an optional and additional protocol for language tracer implementation. All tracer implementation could consider implementing this. Cross Process Correlation Header key is sw8-correlation. The value is the encoded(key):encoded(value) list with elements splitted by , such as base64(string key):base64(string value),base64(string key2):base64(string value2).\nRecommendations for language APIs The following implementation method is recommended for different language APIs.\n TraceContext#putCorrelation and TraceContext#getCorrelation are recommended to write and read the correlation context, with key/value string. The key should be added if it is absent. The latter writes should override the previous value. The total number of all keys should be less than 3, and the length of each value should be less than 128 bytes. The context should be propagated as well when tracing context is propagated across threads and processes.  ","title":"SkyWalking Cross Process Correlation Headers Protocol","url":"/docs/main/v9.7.0/en/api/x-process-correlation-headers-v1/"},{"content":"SkyWalking Cross Process Propagation Headers Protocol  Version 3.0  SkyWalking is more akin to an APM system, rather than a common distributed tracing system. SkyWalking\u0026rsquo;s headers are much more complex than those found in a common distributed tracing system. The reason behind their complexity is for better analysis performance of the OAP. You can find many similar mechanisms in other commercial APM systems (some of which are even more complex than ours!).\nAbstract The SkyWalking Cross Process Propagation Headers Protocol v3, also known as the sw8 protocol, is designed for context propagation.\nStandard Header Item The standard header is the minimal requirement for context propagation.\n Header Name: sw8. Header Value: 8 fields split by -. The length of header value must be less than 2k (default).  Example of the value format: XXXXX-XXXXX-XXXX-XXXX\nValues Values must include the following segments, and all string type values are in BASE64 encoding.\n Required:   Sample. 0 or 1. 0 means that the context exists, but it could (and most likely will) be ignored. 1 means this trace needs to be sampled and sent to the backend. Trace ID. String(BASE64 encoded). A literal string that is globally unique. Parent trace segment ID. String(BASE64 encoded). A literal string that is globally unique. Parent span ID. Must be an integer. It begins with 0. This span ID points to the parent span in parent trace segment. Parent service. String(BASE64 encoded). Its length should be no more than 50 UTF-8 characters. Parent service instance. String(BASE64 encoded). Its length should be no more than 50 UTF-8 characters. Parent endpoint. String(BASE64 encoded). The operation name of the first entry span in the parent segment. Its length should be less than 150 UTF-8 characters. Target address of this request used on the client end. String(BASE64 encoded). The network address (not necessarily IP + port) used on the client end to access this target service.   Sample values: 1-TRACEID-SEGMENTID-3-PARENT_SERVICE-PARENT_INSTANCE-PARENT_ENDPOINT-IPPORT  Extension Header Item The extension header item is designed for advanced features. It provides interaction capabilities between the agents deployed in upstream and downstream services.\n Header Name: sw8-x Header Value: Split by -. The fields are extendable.  Values The current value includes fields.\n Tracing Mode. Empty, 0, or 1. Empty or 0 is the default. 1 indicates that all spans generated in this context will skip analysis, spanObject#skipAnalysis=true. This context is propagated to upstream by default, unless it is changed in the tracing process. The timestamp of sending on the client end. This is used in async RPC, such as MQ. Once it is set, the consumer end would calculate the latency between sending and receiving, and tag the latency in the span by using key transmission.latency automatically.  ","title":"SkyWalking Cross Process Propagation Headers Protocol","url":"/docs/main/latest/en/api/x-process-propagation-headers-v3/"},{"content":"SkyWalking Cross Process Propagation Headers Protocol  Version 3.0  SkyWalking is more akin to an APM system, rather than a common distributed tracing system. SkyWalking\u0026rsquo;s headers are much more complex than those found in a common distributed tracing system. The reason behind their complexity is for better analysis performance of the OAP. You can find many similar mechanisms in other commercial APM systems (some of which are even more complex than ours!).\nAbstract The SkyWalking Cross Process Propagation Headers Protocol v3, also known as the sw8 protocol, is designed for context propagation.\nStandard Header Item The standard header is the minimal requirement for context propagation.\n Header Name: sw8. Header Value: 8 fields split by -. The length of header value must be less than 2k (default).  Example of the value format: XXXXX-XXXXX-XXXX-XXXX\nValues Values must include the following segments, and all string type values are in BASE64 encoding.\n Required:   Sample. 0 or 1. 0 means that the context exists, but it could (and most likely will) be ignored. 1 means this trace needs to be sampled and sent to the backend. Trace ID. String(BASE64 encoded). A literal string that is globally unique. Parent trace segment ID. String(BASE64 encoded). A literal string that is globally unique. Parent span ID. Must be an integer. It begins with 0. This span ID points to the parent span in parent trace segment. Parent service. String(BASE64 encoded). Its length should be no more than 50 UTF-8 characters. Parent service instance. String(BASE64 encoded). Its length should be no more than 50 UTF-8 characters. Parent endpoint. String(BASE64 encoded). The operation name of the first entry span in the parent segment. Its length should be less than 150 UTF-8 characters. Target address of this request used on the client end. String(BASE64 encoded). The network address (not necessarily IP + port) used on the client end to access this target service.   Sample values: 1-TRACEID-SEGMENTID-3-PARENT_SERVICE-PARENT_INSTANCE-PARENT_ENDPOINT-IPPORT  Extension Header Item The extension header item is designed for advanced features. It provides interaction capabilities between the agents deployed in upstream and downstream services.\n Header Name: sw8-x Header Value: Split by -. The fields are extendable.  Values The current value includes fields.\n Tracing Mode. Empty, 0, or 1. Empty or 0 is the default. 1 indicates that all spans generated in this context will skip analysis, spanObject#skipAnalysis=true. This context is propagated to upstream by default, unless it is changed in the tracing process. The timestamp of sending on the client end. This is used in async RPC, such as MQ. Once it is set, the consumer end would calculate the latency between sending and receiving, and tag the latency in the span by using key transmission.latency automatically.  ","title":"SkyWalking Cross Process Propagation Headers Protocol","url":"/docs/main/next/en/api/x-process-propagation-headers-v3/"},{"content":"SkyWalking Cross Process Propagation Headers Protocol  Version 3.0  SkyWalking is more akin to an APM system, rather than a common distributed tracing system. SkyWalking\u0026rsquo;s headers are much more complex than those found in a common distributed tracing system. The reason behind their complexity is for better analysis performance of the OAP. You can find many similar mechanisms in other commercial APM systems (some of which are even more complex than ours!).\nAbstract The SkyWalking Cross Process Propagation Headers Protocol v3, also known as the sw8 protocol, is designed for context propagation.\nStandard Header Item The standard header is the minimal requirement for context propagation.\n Header Name: sw8. Header Value: 8 fields split by -. The length of header value must be less than 2k (default).  Example of the value format: XXXXX-XXXXX-XXXX-XXXX\nValues Values must include the following segments, and all string type values are in BASE64 encoding.\n Required:   Sample. 0 or 1. 0 means that the context exists, but it could (and most likely will) be ignored. 1 means this trace needs to be sampled and sent to the backend. Trace ID. String(BASE64 encoded). A literal string that is globally unique. Parent trace segment ID. String(BASE64 encoded). A literal string that is globally unique. Parent span ID. Must be an integer. It begins with 0. This span ID points to the parent span in parent trace segment. Parent service. String(BASE64 encoded). Its length should be no more than 50 UTF-8 characters. Parent service instance. String(BASE64 encoded). Its length should be no more than 50 UTF-8 characters. Parent endpoint. String(BASE64 encoded). The operation name of the first entry span in the parent segment. Its length should be less than 150 UTF-8 characters. Target address of this request used on the client end. String(BASE64 encoded). The network address (not necessarily IP + port) used on the client end to access this target service.   Sample values: 1-TRACEID-SEGMENTID-3-PARENT_SERVICE-PARENT_INSTANCE-PARENT_ENDPOINT-IPPORT  Extension Header Item The extension header item is designed for advanced features. It provides interaction capabilities between the agents deployed in upstream and downstream services.\n Header Name: sw8-x Header Value: Split by -. The fields are extendable.  Values The current value includes fields.\n Tracing Mode. Empty, 0, or 1. Empty or 0 is the default. 1 indicates that all spans generated in this context will skip analysis, spanObject#skipAnalysis=true. This context is propagated to upstream by default, unless it is changed in the tracing process. The timestamp of sending on the client end. This is used in async RPC, such as MQ. Once it is set, the consumer end would calculate the latency between sending and receiving, and tag the latency in the span by using key transmission.latency automatically.  ","title":"SkyWalking Cross Process Propagation Headers Protocol","url":"/docs/main/v9.0.0/en/protocols/skywalking-cross-process-propagation-headers-protocol-v3/"},{"content":"SkyWalking Cross Process Propagation Headers Protocol  Version 3.0  SkyWalking is more akin to an APM system, rather than a common distributed tracing system. SkyWalking\u0026rsquo;s headers are much more complex than those found in a common distributed tracing system. The reason behind their complexity is for better analysis performance of the OAP. You can find many similar mechanisms in other commercial APM systems (some of which are even more complex than ours!).\nAbstract The SkyWalking Cross Process Propagation Headers Protocol v3, also known as the sw8 protocol, is designed for context propagation.\nStandard Header Item The standard header is the minimal requirement for context propagation.\n Header Name: sw8. Header Value: 8 fields split by -. The length of header value must be less than 2k (default).  Example of the value format: XXXXX-XXXXX-XXXX-XXXX\nValues Values must include the following segments, and all string type values are in BASE64 encoding.\n Required:   Sample. 0 or 1. 0 means that the context exists, but it could (and most likely will) be ignored. 1 means this trace needs to be sampled and sent to the backend. Trace ID. String(BASE64 encoded). A literal string that is globally unique. Parent trace segment ID. String(BASE64 encoded). A literal string that is globally unique. Parent span ID. Must be an integer. It begins with 0. This span ID points to the parent span in parent trace segment. Parent service. String(BASE64 encoded). Its length should be no more than 50 UTF-8 characters. Parent service instance. String(BASE64 encoded). Its length should be no more than 50 UTF-8 characters. Parent endpoint. String(BASE64 encoded). The operation name of the first entry span in the parent segment. Its length should be less than 150 UTF-8 characters. Target address of this request used on the client end. String(BASE64 encoded). The network address (not necessarily IP + port) used on the client end to access this target service.   Sample values: 1-TRACEID-SEGMENTID-3-PARENT_SERVICE-PARENT_INSTANCE-PARENT_ENDPOINT-IPPORT  Extension Header Item The extension header item is designed for advanced features. It provides interaction capabilities between the agents deployed in upstream and downstream services.\n Header Name: sw8-x Header Value: Split by -. The fields are extendable.  Values The current value includes fields.\n Tracing Mode. Empty, 0, or 1. Empty or 0 is the default. 1 indicates that all spans generated in this context will skip analysis, spanObject#skipAnalysis=true. This context is propagated to upstream by default, unless it is changed in the tracing process. The timestamp of sending on the client end. This is used in async RPC, such as MQ. Once it is set, the consumer end would calculate the latency between sending and receiving, and tag the latency in the span by using key transmission.latency automatically.  ","title":"SkyWalking Cross Process Propagation Headers Protocol","url":"/docs/main/v9.1.0/en/protocols/skywalking-cross-process-propagation-headers-protocol-v3/"},{"content":"SkyWalking Cross Process Propagation Headers Protocol  Version 3.0  SkyWalking is more akin to an APM system, rather than a common distributed tracing system. SkyWalking\u0026rsquo;s headers are much more complex than those found in a common distributed tracing system. The reason behind their complexity is for better analysis performance of the OAP. You can find many similar mechanisms in other commercial APM systems (some of which are even more complex than ours!).\nAbstract The SkyWalking Cross Process Propagation Headers Protocol v3, also known as the sw8 protocol, is designed for context propagation.\nStandard Header Item The standard header is the minimal requirement for context propagation.\n Header Name: sw8. Header Value: 8 fields split by -. The length of header value must be less than 2k (default).  Example of the value format: XXXXX-XXXXX-XXXX-XXXX\nValues Values must include the following segments, and all string type values are in BASE64 encoding.\n Required:   Sample. 0 or 1. 0 means that the context exists, but it could (and most likely will) be ignored. 1 means this trace needs to be sampled and sent to the backend. Trace ID. String(BASE64 encoded). A literal string that is globally unique. Parent trace segment ID. String(BASE64 encoded). A literal string that is globally unique. Parent span ID. Must be an integer. It begins with 0. This span ID points to the parent span in parent trace segment. Parent service. String(BASE64 encoded). Its length should be no more than 50 UTF-8 characters. Parent service instance. String(BASE64 encoded). Its length should be no more than 50 UTF-8 characters. Parent endpoint. String(BASE64 encoded). The operation name of the first entry span in the parent segment. Its length should be less than 150 UTF-8 characters. Target address of this request used on the client end. String(BASE64 encoded). The network address (not necessarily IP + port) used on the client end to access this target service.   Sample values: 1-TRACEID-SEGMENTID-3-PARENT_SERVICE-PARENT_INSTANCE-PARENT_ENDPOINT-IPPORT  Extension Header Item The extension header item is designed for advanced features. It provides interaction capabilities between the agents deployed in upstream and downstream services.\n Header Name: sw8-x Header Value: Split by -. The fields are extendable.  Values The current value includes fields.\n Tracing Mode. Empty, 0, or 1. Empty or 0 is the default. 1 indicates that all spans generated in this context will skip analysis, spanObject#skipAnalysis=true. This context is propagated to upstream by default, unless it is changed in the tracing process. The timestamp of sending on the client end. This is used in async RPC, such as MQ. Once it is set, the consumer end would calculate the latency between sending and receiving, and tag the latency in the span by using key transmission.latency automatically.  ","title":"SkyWalking Cross Process Propagation Headers Protocol","url":"/docs/main/v9.2.0/en/protocols/skywalking-cross-process-propagation-headers-protocol-v3/"},{"content":"SkyWalking Cross Process Propagation Headers Protocol  Version 3.0  SkyWalking is more akin to an APM system, rather than a common distributed tracing system. SkyWalking\u0026rsquo;s headers are much more complex than those found in a common distributed tracing system. The reason behind their complexity is for better analysis performance of the OAP. You can find many similar mechanisms in other commercial APM systems (some of which are even more complex than ours!).\nAbstract The SkyWalking Cross Process Propagation Headers Protocol v3, also known as the sw8 protocol, is designed for context propagation.\nStandard Header Item The standard header is the minimal requirement for context propagation.\n Header Name: sw8. Header Value: 8 fields split by -. The length of header value must be less than 2k (default).  Example of the value format: XXXXX-XXXXX-XXXX-XXXX\nValues Values must include the following segments, and all string type values are in BASE64 encoding.\n Required:   Sample. 0 or 1. 0 means that the context exists, but it could (and most likely will) be ignored. 1 means this trace needs to be sampled and sent to the backend. Trace ID. String(BASE64 encoded). A literal string that is globally unique. Parent trace segment ID. String(BASE64 encoded). A literal string that is globally unique. Parent span ID. Must be an integer. It begins with 0. This span ID points to the parent span in parent trace segment. Parent service. String(BASE64 encoded). Its length should be no more than 50 UTF-8 characters. Parent service instance. String(BASE64 encoded). Its length should be no more than 50 UTF-8 characters. Parent endpoint. String(BASE64 encoded). The operation name of the first entry span in the parent segment. Its length should be less than 150 UTF-8 characters. Target address of this request used on the client end. String(BASE64 encoded). The network address (not necessarily IP + port) used on the client end to access this target service.   Sample values: 1-TRACEID-SEGMENTID-3-PARENT_SERVICE-PARENT_INSTANCE-PARENT_ENDPOINT-IPPORT  Extension Header Item The extension header item is designed for advanced features. It provides interaction capabilities between the agents deployed in upstream and downstream services.\n Header Name: sw8-x Header Value: Split by -. The fields are extendable.  Values The current value includes fields.\n Tracing Mode. Empty, 0, or 1. Empty or 0 is the default. 1 indicates that all spans generated in this context will skip analysis, spanObject#skipAnalysis=true. This context is propagated to upstream by default, unless it is changed in the tracing process. The timestamp of sending on the client end. This is used in async RPC, such as MQ. Once it is set, the consumer end would calculate the latency between sending and receiving, and tag the latency in the span by using key transmission.latency automatically.  ","title":"SkyWalking Cross Process Propagation Headers Protocol","url":"/docs/main/v9.3.0/en/protocols/skywalking-cross-process-propagation-headers-protocol-v3/"},{"content":"SkyWalking Cross Process Propagation Headers Protocol  Version 3.0  SkyWalking is more akin to an APM system, rather than a common distributed tracing system. SkyWalking\u0026rsquo;s headers are much more complex than those found in a common distributed tracing system. The reason behind their complexity is for better analysis performance of the OAP. You can find many similar mechanisms in other commercial APM systems (some of which are even more complex than ours!).\nAbstract The SkyWalking Cross Process Propagation Headers Protocol v3, also known as the sw8 protocol, is designed for context propagation.\nStandard Header Item The standard header is the minimal requirement for context propagation.\n Header Name: sw8. Header Value: 8 fields split by -. The length of header value must be less than 2k (default).  Example of the value format: XXXXX-XXXXX-XXXX-XXXX\nValues Values must include the following segments, and all string type values are in BASE64 encoding.\n Required:   Sample. 0 or 1. 0 means that the context exists, but it could (and most likely will) be ignored. 1 means this trace needs to be sampled and sent to the backend. Trace ID. String(BASE64 encoded). A literal string that is globally unique. Parent trace segment ID. String(BASE64 encoded). A literal string that is globally unique. Parent span ID. Must be an integer. It begins with 0. This span ID points to the parent span in parent trace segment. Parent service. String(BASE64 encoded). Its length should be no more than 50 UTF-8 characters. Parent service instance. String(BASE64 encoded). Its length should be no more than 50 UTF-8 characters. Parent endpoint. String(BASE64 encoded). The operation name of the first entry span in the parent segment. Its length should be less than 150 UTF-8 characters. Target address of this request used on the client end. String(BASE64 encoded). The network address (not necessarily IP + port) used on the client end to access this target service.   Sample values: 1-TRACEID-SEGMENTID-3-PARENT_SERVICE-PARENT_INSTANCE-PARENT_ENDPOINT-IPPORT  Extension Header Item The extension header item is designed for advanced features. It provides interaction capabilities between the agents deployed in upstream and downstream services.\n Header Name: sw8-x Header Value: Split by -. The fields are extendable.  Values The current value includes fields.\n Tracing Mode. Empty, 0, or 1. Empty or 0 is the default. 1 indicates that all spans generated in this context will skip analysis, spanObject#skipAnalysis=true. This context is propagated to upstream by default, unless it is changed in the tracing process. The timestamp of sending on the client end. This is used in async RPC, such as MQ. Once it is set, the consumer end would calculate the latency between sending and receiving, and tag the latency in the span by using key transmission.latency automatically.  ","title":"SkyWalking Cross Process Propagation Headers Protocol","url":"/docs/main/v9.4.0/en/api/x-process-propagation-headers-v3/"},{"content":"SkyWalking Cross Process Propagation Headers Protocol  Version 3.0  SkyWalking is more akin to an APM system, rather than a common distributed tracing system. SkyWalking\u0026rsquo;s headers are much more complex than those found in a common distributed tracing system. The reason behind their complexity is for better analysis performance of the OAP. You can find many similar mechanisms in other commercial APM systems (some of which are even more complex than ours!).\nAbstract The SkyWalking Cross Process Propagation Headers Protocol v3, also known as the sw8 protocol, is designed for context propagation.\nStandard Header Item The standard header is the minimal requirement for context propagation.\n Header Name: sw8. Header Value: 8 fields split by -. The length of header value must be less than 2k (default).  Example of the value format: XXXXX-XXXXX-XXXX-XXXX\nValues Values must include the following segments, and all string type values are in BASE64 encoding.\n Required:   Sample. 0 or 1. 0 means that the context exists, but it could (and most likely will) be ignored. 1 means this trace needs to be sampled and sent to the backend. Trace ID. String(BASE64 encoded). A literal string that is globally unique. Parent trace segment ID. String(BASE64 encoded). A literal string that is globally unique. Parent span ID. Must be an integer. It begins with 0. This span ID points to the parent span in parent trace segment. Parent service. String(BASE64 encoded). Its length should be no more than 50 UTF-8 characters. Parent service instance. String(BASE64 encoded). Its length should be no more than 50 UTF-8 characters. Parent endpoint. String(BASE64 encoded). The operation name of the first entry span in the parent segment. Its length should be less than 150 UTF-8 characters. Target address of this request used on the client end. String(BASE64 encoded). The network address (not necessarily IP + port) used on the client end to access this target service.   Sample values: 1-TRACEID-SEGMENTID-3-PARENT_SERVICE-PARENT_INSTANCE-PARENT_ENDPOINT-IPPORT  Extension Header Item The extension header item is designed for advanced features. It provides interaction capabilities between the agents deployed in upstream and downstream services.\n Header Name: sw8-x Header Value: Split by -. The fields are extendable.  Values The current value includes fields.\n Tracing Mode. Empty, 0, or 1. Empty or 0 is the default. 1 indicates that all spans generated in this context will skip analysis, spanObject#skipAnalysis=true. This context is propagated to upstream by default, unless it is changed in the tracing process. The timestamp of sending on the client end. This is used in async RPC, such as MQ. Once it is set, the consumer end would calculate the latency between sending and receiving, and tag the latency in the span by using key transmission.latency automatically.  ","title":"SkyWalking Cross Process Propagation Headers Protocol","url":"/docs/main/v9.5.0/en/api/x-process-propagation-headers-v3/"},{"content":"SkyWalking Cross Process Propagation Headers Protocol  Version 3.0  SkyWalking is more akin to an APM system, rather than a common distributed tracing system. SkyWalking\u0026rsquo;s headers are much more complex than those found in a common distributed tracing system. The reason behind their complexity is for better analysis performance of the OAP. You can find many similar mechanisms in other commercial APM systems (some of which are even more complex than ours!).\nAbstract The SkyWalking Cross Process Propagation Headers Protocol v3, also known as the sw8 protocol, is designed for context propagation.\nStandard Header Item The standard header is the minimal requirement for context propagation.\n Header Name: sw8. Header Value: 8 fields split by -. The length of header value must be less than 2k (default).  Example of the value format: XXXXX-XXXXX-XXXX-XXXX\nValues Values must include the following segments, and all string type values are in BASE64 encoding.\n Required:   Sample. 0 or 1. 0 means that the context exists, but it could (and most likely will) be ignored. 1 means this trace needs to be sampled and sent to the backend. Trace ID. String(BASE64 encoded). A literal string that is globally unique. Parent trace segment ID. String(BASE64 encoded). A literal string that is globally unique. Parent span ID. Must be an integer. It begins with 0. This span ID points to the parent span in parent trace segment. Parent service. String(BASE64 encoded). Its length should be no more than 50 UTF-8 characters. Parent service instance. String(BASE64 encoded). Its length should be no more than 50 UTF-8 characters. Parent endpoint. String(BASE64 encoded). The operation name of the first entry span in the parent segment. Its length should be less than 150 UTF-8 characters. Target address of this request used on the client end. String(BASE64 encoded). The network address (not necessarily IP + port) used on the client end to access this target service.   Sample values: 1-TRACEID-SEGMENTID-3-PARENT_SERVICE-PARENT_INSTANCE-PARENT_ENDPOINT-IPPORT  Extension Header Item The extension header item is designed for advanced features. It provides interaction capabilities between the agents deployed in upstream and downstream services.\n Header Name: sw8-x Header Value: Split by -. The fields are extendable.  Values The current value includes fields.\n Tracing Mode. Empty, 0, or 1. Empty or 0 is the default. 1 indicates that all spans generated in this context will skip analysis, spanObject#skipAnalysis=true. This context is propagated to upstream by default, unless it is changed in the tracing process. The timestamp of sending on the client end. This is used in async RPC, such as MQ. Once it is set, the consumer end would calculate the latency between sending and receiving, and tag the latency in the span by using key transmission.latency automatically.  ","title":"SkyWalking Cross Process Propagation Headers Protocol","url":"/docs/main/v9.6.0/en/api/x-process-propagation-headers-v3/"},{"content":"SkyWalking Cross Process Propagation Headers Protocol  Version 3.0  SkyWalking is more akin to an APM system, rather than a common distributed tracing system. SkyWalking\u0026rsquo;s headers are much more complex than those found in a common distributed tracing system. The reason behind their complexity is for better analysis performance of the OAP. You can find many similar mechanisms in other commercial APM systems (some of which are even more complex than ours!).\nAbstract The SkyWalking Cross Process Propagation Headers Protocol v3, also known as the sw8 protocol, is designed for context propagation.\nStandard Header Item The standard header is the minimal requirement for context propagation.\n Header Name: sw8. Header Value: 8 fields split by -. The length of header value must be less than 2k (default).  Example of the value format: XXXXX-XXXXX-XXXX-XXXX\nValues Values must include the following segments, and all string type values are in BASE64 encoding.\n Required:   Sample. 0 or 1. 0 means that the context exists, but it could (and most likely will) be ignored. 1 means this trace needs to be sampled and sent to the backend. Trace ID. String(BASE64 encoded). A literal string that is globally unique. Parent trace segment ID. String(BASE64 encoded). A literal string that is globally unique. Parent span ID. Must be an integer. It begins with 0. This span ID points to the parent span in parent trace segment. Parent service. String(BASE64 encoded). Its length should be no more than 50 UTF-8 characters. Parent service instance. String(BASE64 encoded). Its length should be no more than 50 UTF-8 characters. Parent endpoint. String(BASE64 encoded). The operation name of the first entry span in the parent segment. Its length should be less than 150 UTF-8 characters. Target address of this request used on the client end. String(BASE64 encoded). The network address (not necessarily IP + port) used on the client end to access this target service.   Sample values: 1-TRACEID-SEGMENTID-3-PARENT_SERVICE-PARENT_INSTANCE-PARENT_ENDPOINT-IPPORT  Extension Header Item The extension header item is designed for advanced features. It provides interaction capabilities between the agents deployed in upstream and downstream services.\n Header Name: sw8-x Header Value: Split by -. The fields are extendable.  Values The current value includes fields.\n Tracing Mode. Empty, 0, or 1. Empty or 0 is the default. 1 indicates that all spans generated in this context will skip analysis, spanObject#skipAnalysis=true. This context is propagated to upstream by default, unless it is changed in the tracing process. The timestamp of sending on the client end. This is used in async RPC, such as MQ. Once it is set, the consumer end would calculate the latency between sending and receiving, and tag the latency in the span by using key transmission.latency automatically.  ","title":"SkyWalking Cross Process Propagation Headers Protocol","url":"/docs/main/v9.7.0/en/api/x-process-propagation-headers-v3/"},{"content":"All SkyWalking exporter(metrics, trace, log) instructions had been moved here.\n","title":"SkyWalking exporter(metrics, trace, log) instructions had been moved [here](../exporter).","url":"/docs/main/latest/en/setup/backend/metrics-exporter/"},{"content":"All SkyWalking exporter(metrics, trace, log) instructions had been moved here.\n","title":"SkyWalking exporter(metrics, trace, log) instructions had been moved [here](../exporter).","url":"/docs/main/next/en/setup/backend/metrics-exporter/"},{"content":"All SkyWalking exporter(metrics, trace, log) instructions had been moved here.\n","title":"SkyWalking exporter(metrics, trace, log) instructions had been moved [here](../exporter).","url":"/docs/main/v9.3.0/en/setup/backend/metrics-exporter/"},{"content":"All SkyWalking exporter(metrics, trace, log) instructions had been moved here.\n","title":"SkyWalking exporter(metrics, trace, log) instructions had been moved [here](../exporter).","url":"/docs/main/v9.4.0/en/setup/backend/metrics-exporter/"},{"content":"All SkyWalking exporter(metrics, trace, log) instructions had been moved here.\n","title":"SkyWalking exporter(metrics, trace, log) instructions had been moved [here](../exporter).","url":"/docs/main/v9.5.0/en/setup/backend/metrics-exporter/"},{"content":"All SkyWalking exporter(metrics, trace, log) instructions had been moved here.\n","title":"SkyWalking exporter(metrics, trace, log) instructions had been moved [here](../exporter).","url":"/docs/main/v9.6.0/en/setup/backend/metrics-exporter/"},{"content":"All SkyWalking exporter(metrics, trace, log) instructions had been moved here.\n","title":"SkyWalking exporter(metrics, trace, log) instructions had been moved [here](../exporter).","url":"/docs/main/v9.7.0/en/setup/backend/metrics-exporter/"},{"content":"SkyWalking Go Agent This is the official documentation of SkyWalking Go agent. Welcome to the SkyWalking community!\nSkyWalking Go is an open-source Golang auto-instrument agent that provides support for distributed tracing across different frameworks within the Golang language.\nTo use SkyWalking Go, simply import the base dependencies into your code and take advantage of the -toolexec parameter in Golang to enable hybrid compilation capabilities for various frameworks in your application.\n","title":"SkyWalking Go Agent","url":"/docs/skywalking-go/latest/readme/"},{"content":"SkyWalking Go Agent This is the official documentation of SkyWalking Go agent. Welcome to the SkyWalking community!\nSkyWalking Go is an open-source Golang auto-instrument agent that provides support for distributed tracing across different frameworks within the Golang language.\nTo use SkyWalking Go, simply import the base dependencies into your code and take advantage of the -toolexec parameter in Golang to enable hybrid compilation capabilities for various frameworks in your application.\n","title":"SkyWalking Go Agent","url":"/docs/skywalking-go/next/readme/"},{"content":"SkyWalking Go Agent This is the official documentation of SkyWalking Go agent. Welcome to the SkyWalking community!\nSkyWalking Go is an open-source Golang auto-instrument agent that provides support for distributed tracing across different frameworks within the Golang language.\nTo use SkyWalking Go, simply import the base dependencies into your code and take advantage of the -toolexec parameter in Golang to enable hybrid compilation capabilities for various frameworks in your application.\n","title":"SkyWalking Go Agent","url":"/docs/skywalking-go/v0.4.0/readme/"},{"content":"SkyWalking Infra E2E Configuration Guide The configuration file is used to integrate all the step configuration content. You can see the sample configuration files for different environments in the examples directory.\nThere is a quick view about the configuration file, and using the yaml format.\nsetup:# set up the environmentcleanup:# clean up the environmenttrigger:# generate trafficverify:# test casesSetup Support two kinds of the environment to set up the system.\nKinD setup:env:kindfile:path/to/kind.yaml # Specified kinD manifest file pathkubeconfig:path/.kube/config # The path of kubeconfigtimeout:20m # timeout durationinit-system-environment:path/to/env # Import environment filesteps:# customize steps for prepare the environment- name:customize setups # step name# one of command line or kinD manifest filecommand:command lines # use command line to setup path:/path/to/manifest.yaml # the manifest file pathwait:# how to verify the manifest is set up finish- namespace:# The pod namespaceresource:# The pod resource namelabel-selector:# The resource label selectorfor:# The wait conditionkind:import-images:# import docker images to KinD- image:version # support using env to expand image, such as `${env_key}` or `$env_key`expose-ports:# Expose resource for host access- namespace:# The resource namespaceresource:# The resource name, such as `pod/foo` or `service/foo`port:# Want to expose port from resource NOTE: The fields file and kubeconfig are mutually exclusive.\n The KinD environment follow these steps:\n [optional]Start the KinD cluster according to the config file, expose KUBECONFIG to environment for help execute kubectl in the next steps. [optional]Setup the kubeconfig field for help execute kubectl in the next steps. Load docker images from kind.import-images if needed. Apply the resources files (--manifests) or/and run the custom init command (--commands) by steps. Wait until all steps are finished and all services are ready with the timeout(second). Expose all resource ports for host access.  Import docker image If you want to import docker image from private registries, there are several ways to do this:\n Using imagePullSecrets to pull images, please take reference from document. Using kind.import-images to load images from host. kind:import-images:- skywalking/oap:${OAP_HASH}# support using environment to expand the image name  Resource Export If you want to access the resource from host, should follow these steps:\n Declare which resource and ports need to be accessible from host. setup:kind:expose-ports:- namespace:default # Need to expose resource namespaceresource:pod/foo # Resource description, such as `pod/foo` or `service/foo`port:8080# Resource port want to expose, support `\u0026lt;resource_port\u0026gt;`, `\u0026lt;bind_to_host_port\u0026gt;:\u0026lt;resource_port\u0026gt;` Follow this format to get the host and port mapping by the environment, and it\u0026rsquo;s available in steps(trigger, verify). trigger:# trigger with specified mapped port, the resource name replace all `/` or `-` as `_`# host format: \u0026lt;resource_name\u0026gt;_host# port format: \u0026lt;resource_name\u0026gt;_\u0026lt;container_port\u0026gt;url:http://${pod_foo_host}:${pod_foo_8080}/  Log The console output of each pod could be found in ${workDir}/logs/${namespace}/${podName}.log.\nCompose setup:env:composefile:path/to/compose.yaml # Specified docker-compose file pathtimeout:20m # Timeout durationinit-system-environment:path/to/env # Import environment filesteps:# Customize steps for prepare the environment- name:customize setups # Step namecommand:command lines # Use command line to setup The docker-compose environment follow these steps:\n Import init-system-environment file for help build service and execute steps. Each line of the file content is an environment variable, and the key value is separate by \u0026ldquo;=\u0026rdquo;. Start the docker-compose services. Check the services' healthiness. Wait until all services are ready according to the interval, etc. Execute command to set up the testing environment or help verify.  Service Export If you want to get the service host and port mapping, should follow these steps:\n declare the port in the docker-compose service ports config. oap:image:xx.xx:1.0.0ports:# define the port- 8080 Follow this format to get the host and port mapping by the environment, and it\u0026rsquo;s available in steps(trigger, verify). trigger:# trigger with specified mappinged porturl:http://${oap_host}:${oap_8080}/  Log The console output of each service could be found in ${workDir}/logs/{serviceName}/std.log.\nTrigger After the Setup step is finished, use the Trigger step to generate traffic.\ntrigger:action:http # The action of the trigger. support HTTP invoke.interval:3s # Trigger the action every 3 seconds.times:5# The retry count before the request success.url:http://apache.skywalking.com/# Http trigger url link.method:GET # Http trigger method.headers:\u0026#34;Content-Type\u0026#34;: \u0026#34;application/json\u0026#34;\u0026#34;Authorization\u0026#34;: \u0026#34;Basic whatever\u0026#34;body:\u0026#39;{\u0026#34;k1\u0026#34;:\u0026#34;v1\u0026#34;, \u0026#34;k2\u0026#34;:\u0026#34;v2\u0026#34;}\u0026#39;The Trigger executed successfully at least once, after success, the next stage could be continued. Otherwise, there is an error and exit.\nVerify After the Trigger step is finished, running test cases.\nverify:retry:# verify with retry strategycount:10# max retry countinterval:10s # the interval between two attempts, e.g. 10s, 1m.fail-fast:true# when a case fails, whether to stop verifying other cases. This property defaults to true.concurrency:false# whether to verify cases concurrently. This property defaults to false.cases:# verify test cases- actual:path/to/actual.yaml # verify by actual file pathexpected:path/to/expected.yaml # excepted content file path- query:echo \u0026#39;foo\u0026#39; # verify by command execute outputexpected:path/to/expected.yaml # excepted content file path- includes:# including cases- path/to/cases.yaml # cases file pathThe test cases are executed in the order of declaration from top to bottom. When the execution of a case fails and the retry strategy is exceeded, it will stop verifying other cases if fail-fast is true. Otherwise, the process will continue to verify other cases.\nRetry strategy The retry strategy could retry automatically on the test case failure, and restart by the failed test case.\nCase source Support two kind source to verify, one case only supports one kind source type:\n source file: verify by generated yaml format file. command: use command line output as they need to verify content, also only support yaml format.  Excepted verify template After clarifying the content that needs to be verified, you need to write content to verify the real content and ensure that the data is correct.\nYou need to use the form of Go Template to write the verification file, and the data content to be rendered comes from the real data. By verifying whether the rendered data is consistent with the real data, it is verified whether the content is consistent. You could see many test cases in this directory.\nWe use go-cmp to show the parts where excepted do not match the actual data. - prefix represents the expected data content, + prefix represents the actual data content.\nWe have done a lot of extension functions for verification functions on the original Go Template.\nExtension functions Extension functions are used to help users quickly locate the problem content and write test cases that are easier to use.\nBasic Matches Verify that the number fits the range.\n   Function Description Grammar Verify success Verify failure     gt Verify the first param is greater than second param {{gt param1 param2}} param1 \u0026lt;wanted gt $param2, but was $param1\u0026gt;   ge Verify the first param is greater than or equals second param {{ge param1 param2}} param1 \u0026lt;wanted gt $param2, but was $param1\u0026gt;   lt Verify the first param is less than second param {{lt param1 param2}} param1 \u0026lt;wanted gt $param2, but was $param1\u0026gt;   le Verify the first param is less than or equals second param {{le param1 param2}} param1 \u0026lt;wanted gt $param2, but was $param1\u0026gt;   regexp Verify the first param matches the second regular expression {{regexp param1 param2}} param1 \u0026lt;\u0026quot;$param1\u0026quot; does not match the pattern $param2\u0026quot;\u0026gt;   notEmpty Verify The param is not empty {{notEmpty param}} param \u0026lt;\u0026quot;\u0026quot; is empty, wanted is not empty\u0026gt;   hasPrefix Verify The string param has the same prefix. {{hasPrefix param1 param2}} true false   hasSuffix Verify The string param has the same suffix. {{hasSuffix param1 param2}} true false    List Matches Verify the data in the condition list, Currently, it is only supported when all the conditions in the list are executed, it is considered as successful.\nHere is an example, It\u0026rsquo;s means the list values must have value is greater than 0, also have value greater than 1, Otherwise verify is failure.\n{{- contains .list }}- key:{{gt .value 0 }}- key:{{gt .value 1 }}{{- end }}Encoding In order to make the program easier for users to read and use, some code conversions are provided.\n   Function Description Grammar Result     b64enc Base64 encode {{ b64enc \u0026ldquo;Foo\u0026rdquo; }} Zm9v   sha256enc Sha256 encode {{ sha256enc \u0026ldquo;Foo\u0026rdquo; }} 1cbec737f863e4922cee63cc2ebbfaafcd1cff8b790d8cfd2e6a5d550b648afa   sha512enc Sha512 encode {{ sha512enc \u0026ldquo;Foo\u0026rdquo; }} 4abcd2639957cb23e33f63d70659b602a5923fafcfd2768ef79b0badea637e5c837161aa101a557a1d4deacbd912189e2bb11bf3c0c0c70ef7797217da7e8207    Reuse cases You could include multiple cases into one single E2E verify, It\u0026rsquo;s helpful for reusing the same verify cases.\nHere is the reused verify cases, and using includes configuration item to include this into E2E config.\ncases:- actual:path/to/actual.yaml # verify by actual file pathexpected:path/to/expected.yaml # excepted content file path- query:echo \u0026#39;foo\u0026#39; # verify by command execute outputexpected:path/to/expected.yaml # excepted content file pathCleanup After the E2E finished, how to clean up the environment.\ncleanup:on:always # Clean up strategyIf the on option under cleanup is not set, it will be automatically set to always if there is environment variable CI=true, which is present on many popular CI services, such as GitHub Actions, CircleCI, etc., otherwise it will be set to success, so the testing environment can be preserved when tests failed in your local machine.\nAll available strategies:\n always: No matter the execution result is success or failure, cleanup will be performed. success: Only when the execution succeeds. failure: Only when the execution failed. never: Never clean up the environment.  ","title":"SkyWalking Infra E2E Configuration Guide","url":"/docs/skywalking-infra-e2e/latest/en/setup/configuration-file/"},{"content":"SkyWalking Infra E2E Configuration Guide The configuration file is used to integrate all the step configuration content. You can see the sample configuration files for different environments in the examples directory.\nThere is a quick view about the configuration file, and using the yaml format.\nsetup:# set up the environmentcleanup:# clean up the environmenttrigger:# generate trafficverify:# test casesSetup Support two kinds of the environment to set up the system.\nKinD setup:env:kindfile:path/to/kind.yaml # Specified kinD manifest file pathkubeconfig:path/.kube/config # The path of kubeconfigtimeout:20m # timeout durationinit-system-environment:path/to/env # Import environment filesteps:# customize steps for prepare the environment- name:customize setups # step name# one of command line or kinD manifest filecommand:command lines # use command line to setup path:/path/to/manifest.yaml # the manifest file pathwait:# how to verify the manifest is set up finish- namespace:# The pod namespaceresource:# The pod resource namelabel-selector:# The resource label selectorfor:# The wait conditionkind:import-images:# import docker images to KinD- image:version # support using env to expand image, such as `${env_key}` or `$env_key`expose-ports:# Expose resource for host access- namespace:# The resource namespaceresource:# The resource name, such as `pod/foo` or `service/foo`port:# Want to expose port from resource NOTE: The fields file and kubeconfig are mutually exclusive.\n The KinD environment follow these steps:\n [optional]Start the KinD cluster according to the config file, expose KUBECONFIG to environment for help execute kubectl in the next steps. [optional]Setup the kubeconfig field for help execute kubectl in the next steps. Load docker images from kind.import-images if needed. Apply the resources files (--manifests) or/and run the custom init command (--commands) by steps. Wait until all steps are finished and all services are ready with the timeout(second). Expose all resource ports for host access.  Import docker image If you want to import docker image from private registries, there are several ways to do this:\n Using imagePullSecrets to pull images, please take reference from document. Using kind.import-images to load images from host. kind:import-images:- skywalking/oap:${OAP_HASH}# support using environment to expand the image name  Resource Export If you want to access the resource from host, should follow these steps:\n Declare which resource and ports need to be accessible from host. setup:kind:expose-ports:- namespace:default # Need to expose resource namespaceresource:pod/foo # Resource description, such as `pod/foo` or `service/foo`port:8080# Resource port want to expose, support `\u0026lt;resource_port\u0026gt;`, `\u0026lt;bind_to_host_port\u0026gt;:\u0026lt;resource_port\u0026gt;` Follow this format to get the host and port mapping by the environment, and it\u0026rsquo;s available in steps(trigger, verify). trigger:# trigger with specified mapped port, the resource name replace all `/` or `-` as `_`# host format: \u0026lt;resource_name\u0026gt;_host# port format: \u0026lt;resource_name\u0026gt;_\u0026lt;container_port\u0026gt;url:http://${pod_foo_host}:${pod_foo_8080}/  Log The console output of each pod could be found in ${workDir}/logs/${namespace}/${podName}.log.\nCompose setup:env:composefile:path/to/compose.yaml # Specified docker-compose file pathtimeout:20m # Timeout durationinit-system-environment:path/to/env # Import environment filesteps:# Customize steps for prepare the environment- name:customize setups # Step namecommand:command lines # Use command line to setup The docker-compose environment follow these steps:\n Import init-system-environment file for help build service and execute steps. Each line of the file content is an environment variable, and the key value is separate by \u0026ldquo;=\u0026rdquo;. Start the docker-compose services. Check the services' healthiness. Wait until all services are ready according to the interval, etc. Execute command to set up the testing environment or help verify.  Service Export If you want to get the service host and port mapping, should follow these steps:\n declare the port in the docker-compose service ports config. oap:image:xx.xx:1.0.0ports:# define the port- 8080 Follow this format to get the host and port mapping by the environment, and it\u0026rsquo;s available in steps(trigger, verify). trigger:# trigger with specified mappinged porturl:http://${oap_host}:${oap_8080}/  Log The console output of each service could be found in ${workDir}/logs/{serviceName}/std.log.\nTrigger After the Setup step is finished, use the Trigger step to generate traffic.\ntrigger:action:http # The action of the trigger. support HTTP invoke.interval:3s # Trigger the action every 3 seconds.times:5# The retry count before the request success.url:http://apache.skywalking.com/# Http trigger url link.method:GET # Http trigger method.headers:\u0026#34;Content-Type\u0026#34;: \u0026#34;application/json\u0026#34;\u0026#34;Authorization\u0026#34;: \u0026#34;Basic whatever\u0026#34;body:\u0026#39;{\u0026#34;k1\u0026#34;:\u0026#34;v1\u0026#34;, \u0026#34;k2\u0026#34;:\u0026#34;v2\u0026#34;}\u0026#39;The Trigger executed successfully at least once, after success, the next stage could be continued. Otherwise, there is an error and exit.\nVerify After the Trigger step is finished, running test cases.\nverify:retry:# verify with retry strategycount:10# max retry countinterval:10s # the interval between two attempts, e.g. 10s, 1m.fail-fast:true# when a case fails, whether to stop verifying other cases. This property defaults to true.concurrency:false# whether to verify cases concurrently. This property defaults to false.cases:# verify test cases- actual:path/to/actual.yaml # verify by actual file pathexpected:path/to/expected.yaml # excepted content file path- query:echo \u0026#39;foo\u0026#39; # verify by command execute outputexpected:path/to/expected.yaml # excepted content file path- includes:# including cases- path/to/cases.yaml # cases file pathThe test cases are executed in the order of declaration from top to bottom. When the execution of a case fails and the retry strategy is exceeded, it will stop verifying other cases if fail-fast is true. Otherwise, the process will continue to verify other cases.\nRetry strategy The retry strategy could retry automatically on the test case failure, and restart by the failed test case.\nCase source Support two kind source to verify, one case only supports one kind source type:\n source file: verify by generated yaml format file. command: use command line output as they need to verify content, also only support yaml format.  Excepted verify template After clarifying the content that needs to be verified, you need to write content to verify the real content and ensure that the data is correct.\nYou need to use the form of Go Template to write the verification file, and the data content to be rendered comes from the real data. By verifying whether the rendered data is consistent with the real data, it is verified whether the content is consistent. You could see many test cases in this directory.\nWe use go-cmp to show the parts where excepted do not match the actual data. - prefix represents the expected data content, + prefix represents the actual data content.\nWe have done a lot of extension functions for verification functions on the original Go Template.\nExtension functions Extension functions are used to help users quickly locate the problem content and write test cases that are easier to use.\nBasic Matches Verify that the number fits the range.\n   Function Description Grammar Verify success Verify failure     gt Verify the first param is greater than second param {{gt param1 param2}} param1 \u0026lt;wanted gt $param2, but was $param1\u0026gt;   ge Verify the first param is greater than or equals second param {{ge param1 param2}} param1 \u0026lt;wanted gt $param2, but was $param1\u0026gt;   lt Verify the first param is less than second param {{lt param1 param2}} param1 \u0026lt;wanted gt $param2, but was $param1\u0026gt;   le Verify the first param is less than or equals second param {{le param1 param2}} param1 \u0026lt;wanted gt $param2, but was $param1\u0026gt;   regexp Verify the first param matches the second regular expression {{regexp param1 param2}} param1 \u0026lt;\u0026quot;$param1\u0026quot; does not match the pattern $param2\u0026quot;\u0026gt;   notEmpty Verify The param is not empty {{notEmpty param}} param \u0026lt;\u0026quot;\u0026quot; is empty, wanted is not empty\u0026gt;   hasPrefix Verify The string param has the same prefix. {{hasPrefix param1 param2}} true false   hasSuffix Verify The string param has the same suffix. {{hasSuffix param1 param2}} true false    List Matches Verify the data in the condition list, Currently, it is only supported when all the conditions in the list are executed, it is considered as successful.\nHere is an example, It\u0026rsquo;s means the list values must have value is greater than 0, also have value greater than 1, Otherwise verify is failure.\n{{- contains .list }}- key:{{gt .value 0 }}- key:{{gt .value 1 }}{{- end }}Encoding In order to make the program easier for users to read and use, some code conversions are provided.\n   Function Description Grammar Result     b64enc Base64 encode {{ b64enc \u0026ldquo;Foo\u0026rdquo; }} Zm9v   sha256enc Sha256 encode {{ sha256enc \u0026ldquo;Foo\u0026rdquo; }} 1cbec737f863e4922cee63cc2ebbfaafcd1cff8b790d8cfd2e6a5d550b648afa   sha512enc Sha512 encode {{ sha512enc \u0026ldquo;Foo\u0026rdquo; }} 4abcd2639957cb23e33f63d70659b602a5923fafcfd2768ef79b0badea637e5c837161aa101a557a1d4deacbd912189e2bb11bf3c0c0c70ef7797217da7e8207    Reuse cases You could include multiple cases into one single E2E verify, It\u0026rsquo;s helpful for reusing the same verify cases.\nHere is the reused verify cases, and using includes configuration item to include this into E2E config.\ncases:- actual:path/to/actual.yaml # verify by actual file pathexpected:path/to/expected.yaml # excepted content file path- query:echo \u0026#39;foo\u0026#39; # verify by command execute outputexpected:path/to/expected.yaml # excepted content file pathCleanup After the E2E finished, how to clean up the environment.\ncleanup:on:always # Clean up strategyIf the on option under cleanup is not set, it will be automatically set to always if there is environment variable CI=true, which is present on many popular CI services, such as GitHub Actions, CircleCI, etc., otherwise it will be set to success, so the testing environment can be preserved when tests failed in your local machine.\nAll available strategies:\n always: No matter the execution result is success or failure, cleanup will be performed. success: Only when the execution succeeds. failure: Only when the execution failed. never: Never clean up the environment.  ","title":"SkyWalking Infra E2E Configuration Guide","url":"/docs/skywalking-infra-e2e/next/en/setup/configuration-file/"},{"content":"SkyWalking Infra E2E Configuration Guide The configuration file is used to integrate all the step configuration content. You can see the sample configuration files for different environments in the examples directory.\nThere is a quick view about the configuration file, and using the yaml format.\nsetup:# set up the environmentcleanup:# clean up the environmenttrigger:# generate trafficverify:# test casesSetup Support two kinds of the environment to set up the system.\nKinD setup:env:kindfile:path/to/kind.yaml # Specified kinD manifest file pathkubeconfig:path/.kube/config # The path of kubeconfigtimeout:20m # timeout durationinit-system-environment:path/to/env # Import environment filesteps:# customize steps for prepare the environment- name:customize setups # step name# one of command line or kinD manifest filecommand:command lines # use command line to setup path:/path/to/manifest.yaml # the manifest file pathwait:# how to verify the manifest is set up finish- namespace:# The pod namespaceresource:# The pod resource namelabel-selector:# The resource label selectorfor:# The wait conditionkind:import-images:# import docker images to KinD- image:version # support using env to expand image, such as `${env_key}` or `$env_key`expose-ports:# Expose resource for host access- namespace:# The resource namespaceresource:# The resource name, such as `pod/foo` or `service/foo`port:# Want to expose port from resource NOTE: The fields file and kubeconfig are mutually exclusive.\n The KinD environment follow these steps:\n [optional]Start the KinD cluster according to the config file, expose KUBECONFIG to environment for help execute kubectl in the next steps. [optional]Setup the kubeconfig field for help execute kubectl in the next steps. Load docker images from kind.import-images if needed. Apply the resources files (--manifests) or/and run the custom init command (--commands) by steps. Wait until all steps are finished and all services are ready with the timeout(second). Expose all resource ports for host access.  Import docker image If you want to import docker image from private registries, there are several ways to do this:\n Using imagePullSecrets to pull images, please take reference from document. Using kind.import-images to load images from host. kind:import-images:- skywalking/oap:${OAP_HASH}# support using environment to expand the image name  Resource Export If you want to access the resource from host, should follow these steps:\n Declare which resource and ports need to be accessible from host. setup:kind:expose-ports:- namespace:default # Need to expose resource namespaceresource:pod/foo # Resource description, such as `pod/foo` or `service/foo`port:8080# Resource port want to expose, support `\u0026lt;resource_port\u0026gt;`, `\u0026lt;bind_to_host_port\u0026gt;:\u0026lt;resource_port\u0026gt;` Follow this format to get the host and port mapping by the environment, and it\u0026rsquo;s available in steps(trigger, verify). trigger:# trigger with specified mapped port, the resource name replace all `/` or `-` as `_`# host format: \u0026lt;resource_name\u0026gt;_host# port format: \u0026lt;resource_name\u0026gt;_\u0026lt;container_port\u0026gt;url:http://${pod_foo_host}:${pod_foo_8080}/  Log The console output of each pod could be found in ${workDir}/logs/${namespace}/${podName}.log.\nCompose setup:env:composefile:path/to/compose.yaml # Specified docker-compose file pathtimeout:20m # Timeout durationinit-system-environment:path/to/env # Import environment filesteps:# Customize steps for prepare the environment- name:customize setups # Step namecommand:command lines # Use command line to setup The docker-compose environment follow these steps:\n Import init-system-environment file for help build service and execute steps. Each line of the file content is an environment variable, and the key value is separate by \u0026ldquo;=\u0026rdquo;. Start the docker-compose services. Check the services' healthiness. Wait until all services are ready according to the interval, etc. Execute command to set up the testing environment or help verify.  Service Export If you want to get the service host and port mapping, should follow these steps:\n declare the port in the docker-compose service ports config. oap:image:xx.xx:1.0.0ports:# define the port- 8080 Follow this format to get the host and port mapping by the environment, and it\u0026rsquo;s available in steps(trigger, verify). trigger:# trigger with specified mappinged porturl:http://${oap_host}:${oap_8080}/  Log The console output of each service could be found in ${workDir}/logs/{serviceName}/std.log.\nTrigger After the Setup step is finished, use the Trigger step to generate traffic.\ntrigger:action:http # The action of the trigger. support HTTP invoke.interval:3s # Trigger the action every 3 seconds.times:5# The retry count before the request success.url:http://apache.skywalking.com/# Http trigger url link.method:GET # Http trigger method.headers:\u0026#34;Content-Type\u0026#34;: \u0026#34;application/json\u0026#34;\u0026#34;Authorization\u0026#34;: \u0026#34;Basic whatever\u0026#34;body:\u0026#39;{\u0026#34;k1\u0026#34;:\u0026#34;v1\u0026#34;, \u0026#34;k2\u0026#34;:\u0026#34;v2\u0026#34;}\u0026#39;The Trigger executed successfully at least once, after success, the next stage could be continued. Otherwise, there is an error and exit.\nVerify After the Trigger step is finished, running test cases.\nverify:retry:# verify with retry strategycount:10# max retry countinterval:10s # the interval between two attempts, e.g. 10s, 1m.fail-fast:true# when a case fails, whether to stop verifying other cases. This property defaults to true.concurrency:false# whether to verify cases concurrently. This property defaults to false.cases:# verify test cases- actual:path/to/actual.yaml # verify by actual file pathexpected:path/to/expected.yaml # excepted content file path- query:echo \u0026#39;foo\u0026#39; # verify by command execute outputexpected:path/to/expected.yaml # excepted content file path- includes:# including cases- path/to/cases.yaml # cases file pathThe test cases are executed in the order of declaration from top to bottom. When the execution of a case fails and the retry strategy is exceeded, it will stop verifying other cases if fail-fast is true. Otherwise, the process will continue to verify other cases.\nRetry strategy The retry strategy could retry automatically on the test case failure, and restart by the failed test case.\nCase source Support two kind source to verify, one case only supports one kind source type:\n source file: verify by generated yaml format file. command: use command line output as they need to verify content, also only support yaml format.  Excepted verify template After clarifying the content that needs to be verified, you need to write content to verify the real content and ensure that the data is correct.\nYou need to use the form of Go Template to write the verification file, and the data content to be rendered comes from the real data. By verifying whether the rendered data is consistent with the real data, it is verified whether the content is consistent. You could see many test cases in this directory.\nWe use go-cmp to show the parts where excepted do not match the actual data. - prefix represents the expected data content, + prefix represents the actual data content.\nWe have done a lot of extension functions for verification functions on the original Go Template.\nExtension functions Extension functions are used to help users quickly locate the problem content and write test cases that are easier to use.\nBasic Matches Verify that the number fits the range.\n   Function Description Grammar Verify success Verify failure     gt Verify the first param is greater than second param {{gt param1 param2}} param1 \u0026lt;wanted gt $param2, but was $param1\u0026gt;   ge Verify the first param is greater than or equals second param {{ge param1 param2}} param1 \u0026lt;wanted gt $param2, but was $param1\u0026gt;   lt Verify the first param is less than second param {{lt param1 param2}} param1 \u0026lt;wanted gt $param2, but was $param1\u0026gt;   le Verify the first param is less than or equals second param {{le param1 param2}} param1 \u0026lt;wanted gt $param2, but was $param1\u0026gt;   regexp Verify the first param matches the second regular expression {{regexp param1 param2}} param1 \u0026lt;\u0026quot;$param1\u0026quot; does not match the pattern $param2\u0026quot;\u0026gt;   notEmpty Verify The param is not empty {{notEmpty param}} param \u0026lt;\u0026quot;\u0026quot; is empty, wanted is not empty\u0026gt;   hasPrefix Verify The string param has the same prefix. {{hasPrefix param1 param2}} true false   hasSuffix Verify The string param has the same suffix. {{hasSuffix param1 param2}} true false    List Matches Verify the data in the condition list, Currently, it is only supported when all the conditions in the list are executed, it is considered as successful.\nHere is an example, It\u0026rsquo;s means the list values must have value is greater than 0, also have value greater than 1, Otherwise verify is failure.\n{{- contains .list }}- key:{{gt .value 0 }}- key:{{gt .value 1 }}{{- end }}Encoding In order to make the program easier for users to read and use, some code conversions are provided.\n   Function Description Grammar Result     b64enc Base64 encode {{ b64enc \u0026ldquo;Foo\u0026rdquo; }} Zm9v   sha256enc Sha256 encode {{ sha256enc \u0026ldquo;Foo\u0026rdquo; }} 1cbec737f863e4922cee63cc2ebbfaafcd1cff8b790d8cfd2e6a5d550b648afa   sha512enc Sha512 encode {{ sha512enc \u0026ldquo;Foo\u0026rdquo; }} 4abcd2639957cb23e33f63d70659b602a5923fafcfd2768ef79b0badea637e5c837161aa101a557a1d4deacbd912189e2bb11bf3c0c0c70ef7797217da7e8207    Reuse cases You could include multiple cases into one single E2E verify, It\u0026rsquo;s helpful for reusing the same verify cases.\nHere is the reused verify cases, and using includes configuration item to include this into E2E config.\ncases:- actual:path/to/actual.yaml # verify by actual file pathexpected:path/to/expected.yaml # excepted content file path- query:echo \u0026#39;foo\u0026#39; # verify by command execute outputexpected:path/to/expected.yaml # excepted content file pathCleanup After the E2E finished, how to clean up the environment.\ncleanup:on:always # Clean up strategyIf the on option under cleanup is not set, it will be automatically set to always if there is environment variable CI=true, which is present on many popular CI services, such as GitHub Actions, CircleCI, etc., otherwise it will be set to success, so the testing environment can be preserved when tests failed in your local machine.\nAll available strategies:\n always: No matter the execution result is success or failure, cleanup will be performed. success: Only when the execution succeeds. failure: Only when the execution failed. never: Never clean up the environment.  ","title":"SkyWalking Infra E2E Configuration Guide","url":"/docs/skywalking-infra-e2e/v1.3.0/en/setup/configuration-file/"},{"content":"SkyWalking Infra E2E Execute Guide There are two ways to perform E2E Testing:\n Command: Suitable for local debugging and operation. GitHub Action: Suitable for automated execution in GitHub projects.  Command Through commands, you can execute a complete Controller.\n# e2e.yaml configuration file in current directory e2e run # or  # Specified the e2e.yaml file path e2e run -c /path/to/the/test/e2e.yaml Also, could run the separate step in the command line, these commands are all done by reading the configuration.\ne2e setup e2e trigger e2e verify e2e cleanup GitHub Action To use skywalking-infra-e2e in GitHub Actions, add a step in your GitHub workflow.\nThe working directory could be uploaded to GitHub Action Artifact after the task is completed, which contains environment variables and container logs in the environment.\n- name:Run E2E Testuses:apache/skywalking-infra-e2e@main # always prefer to use a revision instead of `main`.with:e2e-file:e2e.yaml # (required)need to run E2E file pathlog-dir:/path/to/log/dir # (Optional)Use `\u0026lt;work_dir\u0026gt;/logs/\u0026lt;job_name\u0026gt;_\u0026lt;matrix_value\u0026gt;`(if have GHA matrix) or `\u0026lt;work_dir\u0026gt;/logs/\u0026lt;job_name\u0026gt;` in GHA, and output logs into `\u0026lt;work_dir\u0026gt;/logs` out of GHA env, such as running locally.If you want to upload the log directory to the GitHub Action Artifact when this E2E test failure, you could define the below content in your GitHub Action Job.\n- name:Upload E2E Loguses:actions/upload-artifact@v2if:${{ failure() }} # Only upload the artifact when E2E testing failurewith:name:e2e-logpath:\u0026#34;${{ env.SW_INFRA_E2E_LOG_DIR }}\u0026#34;# The SkyWalking Infra E2E action sets SW_INFRA_E2E_LOG_DIR automatically. ","title":"SkyWalking Infra E2E Execute Guide","url":"/docs/skywalking-infra-e2e/latest/en/setup/run-e2e-tests/"},{"content":"SkyWalking Infra E2E Execute Guide There are two ways to perform E2E Testing:\n Command: Suitable for local debugging and operation. GitHub Action: Suitable for automated execution in GitHub projects.  Command Through commands, you can execute a complete Controller.\n# e2e.yaml configuration file in current directory e2e run # or  # Specified the e2e.yaml file path e2e run -c /path/to/the/test/e2e.yaml Also, could run the separate step in the command line, these commands are all done by reading the configuration.\ne2e setup e2e trigger e2e verify e2e cleanup GitHub Action To use skywalking-infra-e2e in GitHub Actions, add a step in your GitHub workflow.\nThe working directory could be uploaded to GitHub Action Artifact after the task is completed, which contains environment variables and container logs in the environment.\n- name:Run E2E Testuses:apache/skywalking-infra-e2e@main # always prefer to use a revision instead of `main`.with:e2e-file:e2e.yaml # (required)need to run E2E file pathlog-dir:/path/to/log/dir # (Optional)Use `\u0026lt;work_dir\u0026gt;/logs/\u0026lt;job_name\u0026gt;_\u0026lt;matrix_value\u0026gt;`(if have GHA matrix) or `\u0026lt;work_dir\u0026gt;/logs/\u0026lt;job_name\u0026gt;` in GHA, and output logs into `\u0026lt;work_dir\u0026gt;/logs` out of GHA env, such as running locally.If you want to upload the log directory to the GitHub Action Artifact when this E2E test failure, you could define the below content in your GitHub Action Job.\n- name:Upload E2E Loguses:actions/upload-artifact@v2if:${{ failure() }} # Only upload the artifact when E2E testing failurewith:name:e2e-logpath:\u0026#34;${{ env.SW_INFRA_E2E_LOG_DIR }}\u0026#34;# The SkyWalking Infra E2E action sets SW_INFRA_E2E_LOG_DIR automatically. ","title":"SkyWalking Infra E2E Execute Guide","url":"/docs/skywalking-infra-e2e/next/en/setup/run-e2e-tests/"},{"content":"SkyWalking Infra E2E Execute Guide There are two ways to perform E2E Testing:\n Command: Suitable for local debugging and operation. GitHub Action: Suitable for automated execution in GitHub projects.  Command Through commands, you can execute a complete Controller.\n# e2e.yaml configuration file in current directory e2e run # or  # Specified the e2e.yaml file path e2e run -c /path/to/the/test/e2e.yaml Also, could run the separate step in the command line, these commands are all done by reading the configuration.\ne2e setup e2e trigger e2e verify e2e cleanup GitHub Action To use skywalking-infra-e2e in GitHub Actions, add a step in your GitHub workflow.\nThe working directory could be uploaded to GitHub Action Artifact after the task is completed, which contains environment variables and container logs in the environment.\n- name:Run E2E Testuses:apache/skywalking-infra-e2e@main # always prefer to use a revision instead of `main`.with:e2e-file:e2e.yaml # (required)need to run E2E file pathlog-dir:/path/to/log/dir # (Optional)Use `\u0026lt;work_dir\u0026gt;/logs/\u0026lt;job_name\u0026gt;_\u0026lt;matrix_value\u0026gt;`(if have GHA matrix) or `\u0026lt;work_dir\u0026gt;/logs/\u0026lt;job_name\u0026gt;` in GHA, and output logs into `\u0026lt;work_dir\u0026gt;/logs` out of GHA env, such as running locally.If you want to upload the log directory to the GitHub Action Artifact when this E2E test failure, you could define the below content in your GitHub Action Job.\n- name:Upload E2E Loguses:actions/upload-artifact@v2if:${{ failure() }} # Only upload the artifact when E2E testing failurewith:name:e2e-logpath:\u0026#34;${{ env.SW_INFRA_E2E_LOG_DIR }}\u0026#34;# The SkyWalking Infra E2E action sets SW_INFRA_E2E_LOG_DIR automatically. ","title":"SkyWalking Infra E2E Execute Guide","url":"/docs/skywalking-infra-e2e/v1.3.0/en/setup/run-e2e-tests/"},{"content":"SkyWalking Java Agent This is the official documentation of SkyWalking Java agent. Welcome to the SkyWalking community!\nThe Java Agent for Apache SkyWalking, which provides the native tracing/metrics/logging/event abilities for Java projects.\nIn here, you could learn how to set up Java agent for the Java Runtime Envrionment services.\n","title":"SkyWalking Java Agent","url":"/docs/skywalking-java/latest/readme/"},{"content":"SkyWalking Java Agent This is the official documentation of SkyWalking Java agent. Welcome to the SkyWalking community!\nThe Java Agent for Apache SkyWalking, which provides the native tracing/metrics/logging/event abilities for Java projects.\nIn here, you could learn how to set up Java agent for the Java Runtime Envrionment services.\n","title":"SkyWalking Java Agent","url":"/docs/skywalking-java/next/readme/"},{"content":"SkyWalking Java Agent This is the official documentation of SkyWalking Java agent. Welcome to the SkyWalking community!\nThe Java Agent for Apache SkyWalking, which provides the native tracing/metrics/logging/event abilities for Java projects.\nIn here, you could learn how to set up Java agent for the Java Runtime Envrionment services.\n","title":"SkyWalking Java Agent","url":"/docs/skywalking-java/v9.0.0/readme/"},{"content":"SkyWalking Java Agent This is the official documentation of SkyWalking Java agent. Welcome to the SkyWalking community!\nThe Java Agent for Apache SkyWalking, which provides the native tracing/metrics/logging/event abilities for Java projects.\nIn here, you could learn how to set up Java agent for the Java Runtime Envrionment services.\n","title":"SkyWalking Java Agent","url":"/docs/skywalking-java/v9.1.0/readme/"},{"content":"SkyWalking Java Agent This is the official documentation of SkyWalking Java agent. Welcome to the SkyWalking community!\nThe Java Agent for Apache SkyWalking, which provides the native tracing/metrics/logging/event abilities for Java projects.\nIn here, you could learn how to set up Java agent for the Java Runtime Envrionment services.\n","title":"SkyWalking Java Agent","url":"/docs/skywalking-java/v9.2.0/readme/"},{"content":"Apache SkyWalking Java Agent Release Guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking in The Apache Way and start the voting process by reading this document.\nSet up your development environment Follow the steps in the Apache maven deployment environment document to set gpg tool and encrypt passwords.\nUse the following block as a template and place it in ~/.m2/settings.xml.\n\u0026lt;settings\u0026gt; ... \u0026lt;servers\u0026gt; \u0026lt;!-- To publish a snapshot of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.snapshots.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; \u0026lt;!-- To stage a release of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.releases.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; ... \u0026lt;/servers\u0026gt; \u0026lt;/settings\u0026gt; Add your GPG public key  Add your GPG public key into the SkyWalking GPG KEYS file. If you are a committer, use your Apache ID and password to log in this svn, and update the file. Don\u0026rsquo;t override the existing file. Upload your GPG public key to the public GPG site, such as MIT\u0026rsquo;s site. This site should be in the Apache maven staging repository checklist.  Test your settings This step is only for testing purpose. If your env is correctly set, you don\u0026rsquo;t need to check every time.\n./mvnw clean install(this will build artifacts, sources and sign) Prepare for the release ./mvnw release:clean ./mvnw release:prepare -DautoVersionSubmodules=true -Pall  Set version number as x.y.z, and tag as vx.y.z (The version tag must start with v. You will find out why this is necessary in the next step.)  You could do a GPG signature before preparing for the release. If you need to input the password to sign, and the maven doesn\u0026rsquo;t provide you with the opportunity to do so, this may lead to failure of the release. To resolve this, you may run gpg --sign xxx in any file. This will allow it to remember the password for long enough to prepare for the release.\nStage the release ./mvnw release:perform -DskipTests -Pall  The release will be automatically inserted into a temporary staging repository.  Build and sign the source code and binary package export RELEASE_VERSION=x.y.z (example: RELEASE_VERSION=5.0.0-alpha) cd tools/releasing bash create_release.sh This script takes care of the following things:\n Use v + RELEASE_VERSION as tag to clone the codes. Complete git submodule init/update. Exclude all unnecessary files in the target source tar, such as .git, .github, and .gitmodules. See the script for more details. Execute gpg and shasum 512 for source code tar. Use maven package to build the agent tar. Execute gpg and shasum 512 for binary tar.  apache-skywalking-java-agent-x.y.z-src.tgz and files ending with .asc and .sha512 may be found in the tools/releasing folder. apache-skywalking-java-agent-x.y.z.tgz and files ending with .asc and .sha512 may be found in the tools/releasing/apache-skywalking-java-agent-x.y.z folder.\nUpload to Apache svn  Use your Apache ID to log in to https://dist.apache.org/repos/dist/dev/skywalking/java-agent/. Create a folder and name it by the release version and round, such as: x.y.z Upload the source code package to the folder with files ending with .asc and .sha512. Upload the distribution package to the folder with files ending with .asc and .sha512.  Make the internal announcements Send an announcement mail in dev mail list.\nMail title: [ANNOUNCE] SkyWalking Java Agent x.y.z test build available Mail content: The test build of Java Agent x.y.z is available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking-java/blob/master/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/java-agent/xxxx * sha512 checksums Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking-java/ Release Tag : * (Git Tag) x.y.z Release CommitID : * https://github.com/apache/skywalking-java/tree/(Git Commit ID) * Git submodule * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : \u0026gt; ./mvnw clean package A vote regarding the quality of this test build will be initiated within the next couple of days. Wait for at least 48 hours for test responses Any PMC member, committer or contributor can test the release features and provide feedback. Based on that, the PMC will decide whether to start the voting process.\nCall a vote in dev Call a vote in dev@skywalking.apache.org\nMail title: [VOTE] Release Apache SkyWalking Java Agent version x.y.z Mail content: Hi All, This is a call for vote to release Apache SkyWalking Java Agent version x.y.z. Release notes: * https://github.com/apache/skywalking-java/blob/master/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/java-agent/xxxx * sha512 checksums Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) x.y.z Release CommitID : * https://github.com/apache/skywalking-java/tree/(Git Commit ID) * Git submodule * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : \u0026gt; ./mvnw clean package Voting will start now (xxxx date) and will remain open for at least 72 hours, Request all PMC members to give their vote. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Vote Check All PMC members and committers should check these before casting +1 votes.\n Features test. All artifacts in staging repository are published with .asc, .md5, and *sha1 files. Source code and distribution package (apache-skywalking-java-agent-x.y.z-src.tar.gz, apache-skywalking-java-agent-x.y.z.tar.gz) are found in https://dist.apache.org/repos/dist/dev/skywalking/java-agent/x.y.z with .asc and .sha512. LICENSE and NOTICE are in the source code and distribution package. Check shasum -c apache-skywalking-java-agent-x.y.z-src.tgz.sha512. Check gpg --verify apache-skywalking-java-agent-x.y.z-src.tgz.asc apache-skywalking-apm-x.y.z-src.tgz Build a distribution package from the source code package (apache-skywalking-java-agent-x.y.z-src.tar.gz). Check the Apache License Header. Run docker run --rm -v $(pwd):/github/workspace apache/skywalking-eyes header check. (No binaries in source codes)  The voting process is as follows:\n All PMC member votes are +1 binding, and all other votes are +1 but non-binding. If you obtain at least 3 (+1 binding) votes with more +1 than -1 votes within 72 hours, the release will be approved.  Publish the release  Move source codes tar and distribution packages to https://dist.apache.org/repos/dist/release/skywalking/java-agent/.  \u0026gt; export SVN_EDITOR=vim \u0026gt; svn mv https://dist.apache.org/repos/dist/dev/skywalking/java-agent/x.y.z https://dist.apache.org/repos/dist/release/skywalking/java-agent .... enter your apache password .... Release in the nexus staging repo. Public download source and distribution tar/zip are located in http://www.apache.org/dyn/closer.cgi/skywalking/java-agent/x.y.z/xxx. The Apache mirror path is the only release information that we publish. Public asc and sha512 are located in https://www.apache.org/dist/skywalking/java-agent/x.y.z/xxx. Public KEYS point to https://www.apache.org/dist/skywalking/KEYS. Update the website download page. http://skywalking.apache.org/downloads/ . Add a new download source, distribution, sha512, asc, and document links. The links can be found following rules (3) to (6) above. Add a release event on the website homepage and event page. Announce the public release with changelog or key features. Send ANNOUNCE email to dev@skywalking.apache.org, announce@apache.org. The sender should use the Apache email account.  Mail title: [ANNOUNCE] Apache SkyWalking Java Agent x.y.z released Mail content: Hi all, Apache SkyWalking Team is glad to announce the first release of Apache SkyWalking Java Agent x.y.z. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. The Java Agent for Apache SkyWalking, which provides the native tracing/metrics/logging abilities for Java projects. This release contains a number of new features, bug fixes and improvements compared to version a.b.c(last release). The notable changes since x.y.z include: (Highlight key changes) 1. ... 2. ... 3. ... Please refer to the change log for the complete list of changes: https://github.com/apache/skywalking-java/blob/master/changes/changes-x.y.z.md Apache SkyWalking website: http://skywalking.apache.org/ Downloads: http://skywalking.apache.org/downloads/ Twitter: https://twitter.com/AsfSkyWalking SkyWalking Resources: - GitHub: https://github.com/apache/skywalking-java - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Apache SkyWalking Team Release Docker images export SW_VERSION=x.y.z git clone --depth 1 --branch v$SW_VERSION https://github.com/apache/skywalking-java.git cd skywalking-java curl -O https://dist.apache.org/repos/dist/release/skywalking/java-agent/$SW_VERSION/apache-skywalking-java-agent-$SW_VERSION.tgz tar -xzvf apache-skywalking-java-agent-$SW_VERSION.tgz export NAME=skywalking-java-agent export HUB=apache export TAG=$SW_VERSION make docker.push.alpine docker.push.java8 docker.push.java11 docker.push.java17 docker.push.java21 Clean up the old releases Once the latest release has been published, you should clean up the old releases from the mirror system.\n Update the download links (source, dist, asc, and sha512) on the website to the archive repo (https://archive.apache.org/dist/skywalking). Remove previous releases from https://dist.apache.org/repos/dist/release/skywalking/java-agent.  ","title":"SkyWalking Java Agent Release Guide","url":"/docs/skywalking-java/latest/en/contribution/release-java-agent/"},{"content":"Apache SkyWalking Java Agent Release Guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking in The Apache Way and start the voting process by reading this document.\nSet up your development environment Follow the steps in the Apache maven deployment environment document to set gpg tool and encrypt passwords.\nUse the following block as a template and place it in ~/.m2/settings.xml.\n\u0026lt;settings\u0026gt; ... \u0026lt;servers\u0026gt; \u0026lt;!-- To publish a snapshot of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.snapshots.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; \u0026lt;!-- To stage a release of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.releases.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; ... \u0026lt;/servers\u0026gt; \u0026lt;/settings\u0026gt; Add your GPG public key  Add your GPG public key into the SkyWalking GPG KEYS file. If you are a committer, use your Apache ID and password to log in this svn, and update the file. Don\u0026rsquo;t override the existing file. Upload your GPG public key to the public GPG site, such as MIT\u0026rsquo;s site. This site should be in the Apache maven staging repository checklist.  Test your settings This step is only for testing purpose. If your env is correctly set, you don\u0026rsquo;t need to check every time.\n./mvnw clean install(this will build artifacts, sources and sign) Prepare for the release ./mvnw release:clean ./mvnw release:prepare -DautoVersionSubmodules=true -Pall  Set version number as x.y.z, and tag as vx.y.z (The version tag must start with v. You will find out why this is necessary in the next step.)  You could do a GPG signature before preparing for the release. If you need to input the password to sign, and the maven doesn\u0026rsquo;t provide you with the opportunity to do so, this may lead to failure of the release. To resolve this, you may run gpg --sign xxx in any file. This will allow it to remember the password for long enough to prepare for the release.\nStage the release ./mvnw release:perform -DskipTests -Pall  The release will be automatically inserted into a temporary staging repository.  Build and sign the source code and binary package export RELEASE_VERSION=x.y.z (example: RELEASE_VERSION=5.0.0-alpha) cd tools/releasing bash create_release.sh This script takes care of the following things:\n Use v + RELEASE_VERSION as tag to clone the codes. Complete git submodule init/update. Exclude all unnecessary files in the target source tar, such as .git, .github, and .gitmodules. See the script for more details. Execute gpg and shasum 512 for source code tar. Use maven package to build the agent tar. Execute gpg and shasum 512 for binary tar.  apache-skywalking-java-agent-x.y.z-src.tgz and files ending with .asc and .sha512 may be found in the tools/releasing folder. apache-skywalking-java-agent-x.y.z.tgz and files ending with .asc and .sha512 may be found in the tools/releasing/apache-skywalking-java-agent-x.y.z folder.\nUpload to Apache svn  Use your Apache ID to log in to https://dist.apache.org/repos/dist/dev/skywalking/java-agent/. Create a folder and name it by the release version and round, such as: x.y.z Upload the source code package to the folder with files ending with .asc and .sha512. Upload the distribution package to the folder with files ending with .asc and .sha512.  Make the internal announcements Send an announcement mail in dev mail list.\nMail title: [ANNOUNCE] SkyWalking Java Agent x.y.z test build available Mail content: The test build of Java Agent x.y.z is available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking-java/blob/master/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/java-agent/xxxx * sha512 checksums Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking-java/ Release Tag : * (Git Tag) x.y.z Release CommitID : * https://github.com/apache/skywalking-java/tree/(Git Commit ID) * Git submodule * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : \u0026gt; ./mvnw clean package A vote regarding the quality of this test build will be initiated within the next couple of days. Wait for at least 48 hours for test responses Any PMC member, committer or contributor can test the release features and provide feedback. Based on that, the PMC will decide whether to start the voting process.\nCall a vote in dev Call a vote in dev@skywalking.apache.org\nMail title: [VOTE] Release Apache SkyWalking Java Agent version x.y.z Mail content: Hi All, This is a call for vote to release Apache SkyWalking Java Agent version x.y.z. Release notes: * https://github.com/apache/skywalking-java/blob/master/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/java-agent/xxxx * sha512 checksums Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) x.y.z Release CommitID : * https://github.com/apache/skywalking-java/tree/(Git Commit ID) * Git submodule * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : \u0026gt; ./mvnw clean package Voting will start now (xxxx date) and will remain open for at least 72 hours, Request all PMC members to give their vote. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Vote Check All PMC members and committers should check these before casting +1 votes.\n Features test. All artifacts in staging repository are published with .asc, .md5, and *sha1 files. Source code and distribution package (apache-skywalking-java-agent-x.y.z-src.tar.gz, apache-skywalking-java-agent-x.y.z.tar.gz) are found in https://dist.apache.org/repos/dist/dev/skywalking/java-agent/x.y.z with .asc and .sha512. LICENSE and NOTICE are in the source code and distribution package. Check shasum -c apache-skywalking-java-agent-x.y.z-src.tgz.sha512. Check gpg --verify apache-skywalking-java-agent-x.y.z-src.tgz.asc apache-skywalking-apm-x.y.z-src.tgz Build a distribution package from the source code package (apache-skywalking-java-agent-x.y.z-src.tar.gz). Check the Apache License Header. Run docker run --rm -v $(pwd):/github/workspace apache/skywalking-eyes header check. (No binaries in source codes)  The voting process is as follows:\n All PMC member votes are +1 binding, and all other votes are +1 but non-binding. If you obtain at least 3 (+1 binding) votes with more +1 than -1 votes within 72 hours, the release will be approved.  Publish the release  Move source codes tar and distribution packages to https://dist.apache.org/repos/dist/release/skywalking/java-agent/.  \u0026gt; export SVN_EDITOR=vim \u0026gt; svn mv https://dist.apache.org/repos/dist/dev/skywalking/java-agent/x.y.z https://dist.apache.org/repos/dist/release/skywalking/java-agent .... enter your apache password .... Release in the nexus staging repo. Public download source and distribution tar/zip are located in http://www.apache.org/dyn/closer.cgi/skywalking/java-agent/x.y.z/xxx. The Apache mirror path is the only release information that we publish. Public asc and sha512 are located in https://www.apache.org/dist/skywalking/java-agent/x.y.z/xxx. Public KEYS point to https://www.apache.org/dist/skywalking/KEYS. Update the website download page. http://skywalking.apache.org/downloads/ . Add a new download source, distribution, sha512, asc, and document links. The links can be found following rules (3) to (6) above. Add a release event on the website homepage and event page. Announce the public release with changelog or key features. Send ANNOUNCE email to dev@skywalking.apache.org, announce@apache.org. The sender should use the Apache email account.  Mail title: [ANNOUNCE] Apache SkyWalking Java Agent x.y.z released Mail content: Hi all, Apache SkyWalking Team is glad to announce the first release of Apache SkyWalking Java Agent x.y.z. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. The Java Agent for Apache SkyWalking, which provides the native tracing/metrics/logging abilities for Java projects. This release contains a number of new features, bug fixes and improvements compared to version a.b.c(last release). The notable changes since x.y.z include: (Highlight key changes) 1. ... 2. ... 3. ... Please refer to the change log for the complete list of changes: https://github.com/apache/skywalking-java/blob/master/changes/changes-x.y.z.md Apache SkyWalking website: http://skywalking.apache.org/ Downloads: http://skywalking.apache.org/downloads/ Twitter: https://twitter.com/AsfSkyWalking SkyWalking Resources: - GitHub: https://github.com/apache/skywalking-java - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Apache SkyWalking Team Release Docker images export SW_VERSION=x.y.z git clone --depth 1 --branch v$SW_VERSION https://github.com/apache/skywalking-java.git cd skywalking-java curl -O https://dist.apache.org/repos/dist/release/skywalking/java-agent/$SW_VERSION/apache-skywalking-java-agent-$SW_VERSION.tgz tar -xzvf apache-skywalking-java-agent-$SW_VERSION.tgz export NAME=skywalking-java-agent export HUB=apache export TAG=$SW_VERSION make docker.push.alpine docker.push.java8 docker.push.java11 docker.push.java17 docker.push.java21 Clean up the old releases Once the latest release has been published, you should clean up the old releases from the mirror system.\n Update the download links (source, dist, asc, and sha512) on the website to the archive repo (https://archive.apache.org/dist/skywalking). Remove previous releases from https://dist.apache.org/repos/dist/release/skywalking/java-agent.  ","title":"SkyWalking Java Agent Release Guide","url":"/docs/skywalking-java/next/en/contribution/release-java-agent/"},{"content":"Apache SkyWalking Java Agent Release Guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking in The Apache Way and start the voting process by reading this document.\nSet up your development environment Follow the steps in the Apache maven deployment environment document to set gpg tool and encrypt passwords.\nUse the following block as a template and place it in ~/.m2/settings.xml.\n\u0026lt;settings\u0026gt; ... \u0026lt;servers\u0026gt; \u0026lt;!-- To publish a snapshot of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.snapshots.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; \u0026lt;!-- To stage a release of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.releases.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; ... \u0026lt;/servers\u0026gt; \u0026lt;/settings\u0026gt; Add your GPG public key  Add your GPG public key into the SkyWalking GPG KEYS file. If you are a committer, use your Apache ID and password to log in this svn, and update the file. Don\u0026rsquo;t override the existing file. Upload your GPG public key to the public GPG site, such as MIT\u0026rsquo;s site. This site should be in the Apache maven staging repository checklist.  Test your settings This step is only for testing purpose. If your env is correctly set, you don\u0026rsquo;t need to check every time.\n./mvnw clean install(this will build artifacts, sources and sign) Prepare for the release ./mvnw release:clean ./mvnw release:prepare -DautoVersionSubmodules=true -Pall  Set version number as x.y.z, and tag as vx.y.z (The version tag must start with v. You will find out why this is necessary in the next step.)  You could do a GPG signature before preparing for the release. If you need to input the password to sign, and the maven doesn\u0026rsquo;t provide you with the opportunity to do so, this may lead to failure of the release. To resolve this, you may run gpg --sign xxx in any file. This will allow it to remember the password for long enough to prepare for the release.\nStage the release ./mvnw release:perform -DskipTests -Pall  The release will be automatically inserted into a temporary staging repository.  Build and sign the source code and binary package export RELEASE_VERSION=x.y.z (example: RELEASE_VERSION=5.0.0-alpha) cd tools/releasing bash create_release.sh This script takes care of the following things:\n Use v + RELEASE_VERSION as tag to clone the codes. Complete git submodule init/update. Exclude all unnecessary files in the target source tar, such as .git, .github, and .gitmodules. See the script for more details. Execute gpg and shasum 512 for source code tar. Use maven package to build the agent tar. Execute gpg and shasum 512 for binary tar.  apache-skywalking-java-agent-x.y.z-src.tgz and files ending with .asc and .sha512 may be found in the tools/releasing folder. apache-skywalking-java-agent-x.y.z.tgz and files ending with .asc and .sha512 may be found in the tools/releasing/apache-skywalking-java-agent-x.y.z folder.\nUpload to Apache svn  Use your Apache ID to log in to https://dist.apache.org/repos/dist/dev/skywalking/java-agent/. Create a folder and name it by the release version and round, such as: x.y.z Upload the source code package to the folder with files ending with .asc and .sha512. Upload the distribution package to the folder with files ending with .asc and .sha512.  Make the internal announcements Send an announcement mail in dev mail list.\nMail title: [ANNOUNCE] SkyWalking Java Agent x.y.z test build available Mail content: The test build of Java Agent x.y.z is available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking-java/blob/master/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/java-agent/xxxx * sha512 checksums Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking-java/ Release Tag : * (Git Tag) x.y.z Release CommitID : * https://github.com/apache/skywalking-java/tree/(Git Commit ID) * Git submodule * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : \u0026gt; ./mvnw clean package A vote regarding the quality of this test build will be initiated within the next couple of days. Wait for at least 48 hours for test responses Any PMC member, committer or contributor can test the release features and provide feedback. Based on that, the PMC will decide whether to start the voting process.\nCall a vote in dev Call a vote in dev@skywalking.apache.org\nMail title: [VOTE] Release Apache SkyWalking Java Agent version x.y.z Mail content: Hi All, This is a call for vote to release Apache SkyWalking Java Agent version x.y.z. Release notes: * https://github.com/apache/skywalking-java/blob/master/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/java-agent/xxxx * sha512 checksums Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) x.y.z Release CommitID : * https://github.com/apache/skywalking-java/tree/(Git Commit ID) * Git submodule * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : \u0026gt; ./mvnw clean package Voting will start now (xxxx date) and will remain open for at least 72 hours, Request all PMC members to give their vote. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Vote Check All PMC members and committers should check these before casting +1 votes.\n Features test. All artifacts in staging repository are published with .asc, .md5, and *sha1 files. Source code and distribution package (apache-skywalking-java-agent-x.y.z-src.tar.gz, apache-skywalking-java-agent-x.y.z.tar.gz) are found in https://dist.apache.org/repos/dist/dev/skywalking/java-agent/x.y.z with .asc and .sha512. LICENSE and NOTICE are in the source code and distribution package. Check shasum -c apache-skywalking-java-agent-x.y.z-src.tgz.sha512. Check gpg --verify apache-skywalking-java-agent-x.y.z-src.tgz.asc apache-skywalking-apm-x.y.z-src.tgz Build a distribution package from the source code package (apache-skywalking-java-agent-x.y.z-src.tar.gz). Check the Apache License Header. Run docker run --rm -v $(pwd):/github/workspace apache/skywalking-eyes header check. (No binaries in source codes)  The voting process is as follows:\n All PMC member votes are +1 binding, and all other votes are +1 but non-binding. If you obtain at least 3 (+1 binding) votes with more +1 than -1 votes within 72 hours, the release will be approved.  Publish the release  Move source codes tar and distribution packages to https://dist.apache.org/repos/dist/release/skywalking/java-agent/.  \u0026gt; export SVN_EDITOR=vim \u0026gt; svn mv https://dist.apache.org/repos/dist/dev/skywalking/java-agent/x.y.z https://dist.apache.org/repos/dist/release/skywalking/java-agent .... enter your apache password .... Release in the nexus staging repo. Public download source and distribution tar/zip are located in http://www.apache.org/dyn/closer.cgi/skywalking/java-agent/x.y.z/xxx. The Apache mirror path is the only release information that we publish. Public asc and sha512 are located in https://www.apache.org/dist/skywalking/java-agent/x.y.z/xxx. Public KEYS point to https://www.apache.org/dist/skywalking/KEYS. Update the website download page. http://skywalking.apache.org/downloads/ . Add a new download source, distribution, sha512, asc, and document links. The links can be found following rules (3) to (6) above. Add a release event on the website homepage and event page. Announce the public release with changelog or key features. Send ANNOUNCE email to dev@skywalking.apache.org, announce@apache.org. The sender should use the Apache email account.  Mail title: [ANNOUNCE] Apache SkyWalking Java Agent x.y.z released Mail content: Hi all, Apache SkyWalking Team is glad to announce the first release of Apache SkyWalking Java Agent x.y.z. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. The Java Agent for Apache SkyWalking, which provides the native tracing/metrics/logging abilities for Java projects. This release contains a number of new features, bug fixes and improvements compared to version a.b.c(last release). The notable changes since x.y.z include: (Highlight key changes) 1. ... 2. ... 3. ... Please refer to the change log for the complete list of changes: https://github.com/apache/skywalking-java/blob/master/changes/changes-x.y.z.md Apache SkyWalking website: http://skywalking.apache.org/ Downloads: http://skywalking.apache.org/downloads/ Twitter: https://twitter.com/AsfSkyWalking SkyWalking Resources: - GitHub: https://github.com/apache/skywalking-java - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Apache SkyWalking Team Release Docker images export SW_VERSION=x.y.z git clone --depth 1 --branch v$SW_VERSION https://github.com/apache/skywalking-java.git cd skywalking-java curl -O https://dist.apache.org/repos/dist/release/skywalking/java-agent/$SW_VERSION/apache-skywalking-java-agent-$SW_VERSION.tgz tar -xzvf apache-skywalking-java-agent-$SW_VERSION.tgz export NAME=skywalking-java-agent export HUB=apache export TAG=$SW_VERSION make docker.push.alpine docker.push.java8 docker.push.java11 docker.push.java17 Clean up the old releases Once the latest release has been published, you should clean up the old releases from the mirror system.\n Update the download links (source, dist, asc, and sha512) on the website to the archive repo (https://archive.apache.org/dist/skywalking). Remove previous releases from https://dist.apache.org/repos/dist/release/skywalking/java-agent.  ","title":"SkyWalking Java Agent Release Guide","url":"/docs/skywalking-java/v9.0.0/en/contribution/release-java-agent/"},{"content":"Apache SkyWalking Java Agent Release Guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking in The Apache Way and start the voting process by reading this document.\nSet up your development environment Follow the steps in the Apache maven deployment environment document to set gpg tool and encrypt passwords.\nUse the following block as a template and place it in ~/.m2/settings.xml.\n\u0026lt;settings\u0026gt; ... \u0026lt;servers\u0026gt; \u0026lt;!-- To publish a snapshot of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.snapshots.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; \u0026lt;!-- To stage a release of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.releases.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; ... \u0026lt;/servers\u0026gt; \u0026lt;/settings\u0026gt; Add your GPG public key  Add your GPG public key into the SkyWalking GPG KEYS file. If you are a committer, use your Apache ID and password to log in this svn, and update the file. Don\u0026rsquo;t override the existing file. Upload your GPG public key to the public GPG site, such as MIT\u0026rsquo;s site. This site should be in the Apache maven staging repository checklist.  Test your settings This step is only for testing purpose. If your env is correctly set, you don\u0026rsquo;t need to check every time.\n./mvnw clean install(this will build artifacts, sources and sign) Prepare for the release ./mvnw release:clean ./mvnw release:prepare -DautoVersionSubmodules=true -Pall  Set version number as x.y.z, and tag as vx.y.z (The version tag must start with v. You will find out why this is necessary in the next step.)  You could do a GPG signature before preparing for the release. If you need to input the password to sign, and the maven doesn\u0026rsquo;t provide you with the opportunity to do so, this may lead to failure of the release. To resolve this, you may run gpg --sign xxx in any file. This will allow it to remember the password for long enough to prepare for the release.\nStage the release ./mvnw release:perform -DskipTests -Pall  The release will be automatically inserted into a temporary staging repository.  Build and sign the source code and binary package export RELEASE_VERSION=x.y.z (example: RELEASE_VERSION=5.0.0-alpha) cd tools/releasing bash create_release.sh This script takes care of the following things:\n Use v + RELEASE_VERSION as tag to clone the codes. Complete git submodule init/update. Exclude all unnecessary files in the target source tar, such as .git, .github, and .gitmodules. See the script for more details. Execute gpg and shasum 512 for source code tar. Use maven package to build the agent tar. Execute gpg and shasum 512 for binary tar.  apache-skywalking-java-agent-x.y.z-src.tgz and files ending with .asc and .sha512 may be found in the tools/releasing folder. apache-skywalking-java-agent-x.y.z.tgz and files ending with .asc and .sha512 may be found in the tools/releasing/apache-skywalking-java-agent-x.y.z folder.\nUpload to Apache svn  Use your Apache ID to log in to https://dist.apache.org/repos/dist/dev/skywalking/java-agent/. Create a folder and name it by the release version and round, such as: x.y.z Upload the source code package to the folder with files ending with .asc and .sha512. Upload the distribution package to the folder with files ending with .asc and .sha512.  Make the internal announcements Send an announcement mail in dev mail list.\nMail title: [ANNOUNCE] SkyWalking Java Agent x.y.z test build available Mail content: The test build of Java Agent x.y.z is available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking-java/blob/master/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/java-agent/xxxx * sha512 checksums Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking-java/ Release Tag : * (Git Tag) x.y.z Release CommitID : * https://github.com/apache/skywalking-java/tree/(Git Commit ID) * Git submodule * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : \u0026gt; ./mvnw clean package A vote regarding the quality of this test build will be initiated within the next couple of days. Wait for at least 48 hours for test responses Any PMC member, committer or contributor can test the release features and provide feedback. Based on that, the PMC will decide whether to start the voting process.\nCall a vote in dev Call a vote in dev@skywalking.apache.org\nMail title: [VOTE] Release Apache SkyWalking Java Agent version x.y.z Mail content: Hi All, This is a call for vote to release Apache SkyWalking Java Agent version x.y.z. Release notes: * https://github.com/apache/skywalking-java/blob/master/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/java-agent/xxxx * sha512 checksums Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) x.y.z Release CommitID : * https://github.com/apache/skywalking-java/tree/(Git Commit ID) * Git submodule * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : \u0026gt; ./mvnw clean package Voting will start now (xxxx date) and will remain open for at least 72 hours, Request all PMC members to give their vote. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Vote Check All PMC members and committers should check these before casting +1 votes.\n Features test. All artifacts in staging repository are published with .asc, .md5, and *sha1 files. Source code and distribution package (apache-skywalking-java-agent-x.y.z-src.tar.gz, apache-skywalking-java-agent-x.y.z.tar.gz) are found in https://dist.apache.org/repos/dist/dev/skywalking/java-agent/x.y.z with .asc and .sha512. LICENSE and NOTICE are in the source code and distribution package. Check shasum -c apache-skywalking-java-agent-x.y.z-src.tgz.sha512. Check gpg --verify apache-skywalking-java-agent-x.y.z-src.tgz.asc apache-skywalking-apm-x.y.z-src.tgz Build a distribution package from the source code package (apache-skywalking-java-agent-x.y.z-src.tar.gz). Check the Apache License Header. Run docker run --rm -v $(pwd):/github/workspace apache/skywalking-eyes header check. (No binaries in source codes)  The voting process is as follows:\n All PMC member votes are +1 binding, and all other votes are +1 but non-binding. If you obtain at least 3 (+1 binding) votes with more +1 than -1 votes within 72 hours, the release will be approved.  Publish the release  Move source codes tar and distribution packages to https://dist.apache.org/repos/dist/release/skywalking/java-agent/.  \u0026gt; export SVN_EDITOR=vim \u0026gt; svn mv https://dist.apache.org/repos/dist/dev/skywalking/java-agent/x.y.z https://dist.apache.org/repos/dist/release/skywalking/java-agent .... enter your apache password .... Release in the nexus staging repo. Public download source and distribution tar/zip are located in http://www.apache.org/dyn/closer.cgi/skywalking/java-agent/x.y.z/xxx. The Apache mirror path is the only release information that we publish. Public asc and sha512 are located in https://www.apache.org/dist/skywalking/java-agent/x.y.z/xxx. Public KEYS point to https://www.apache.org/dist/skywalking/KEYS. Update the website download page. http://skywalking.apache.org/downloads/ . Add a new download source, distribution, sha512, asc, and document links. The links can be found following rules (3) to (6) above. Add a release event on the website homepage and event page. Announce the public release with changelog or key features. Send ANNOUNCE email to dev@skywalking.apache.org, announce@apache.org. The sender should use the Apache email account.  Mail title: [ANNOUNCE] Apache SkyWalking Java Agent x.y.z released Mail content: Hi all, Apache SkyWalking Team is glad to announce the first release of Apache SkyWalking Java Agent x.y.z. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. The Java Agent for Apache SkyWalking, which provides the native tracing/metrics/logging abilities for Java projects. This release contains a number of new features, bug fixes and improvements compared to version a.b.c(last release). The notable changes since x.y.z include: (Highlight key changes) 1. ... 2. ... 3. ... Please refer to the change log for the complete list of changes: https://github.com/apache/skywalking-java/blob/master/changes/changes-x.y.z.md Apache SkyWalking website: http://skywalking.apache.org/ Downloads: http://skywalking.apache.org/downloads/ Twitter: https://twitter.com/AsfSkyWalking SkyWalking Resources: - GitHub: https://github.com/apache/skywalking-java - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Apache SkyWalking Team Release Docker images export SW_VERSION=x.y.z git clone --depth 1 --branch v$SW_VERSION https://github.com/apache/skywalking-java.git cd skywalking-java curl -O https://dist.apache.org/repos/dist/release/skywalking/java-agent/$SW_VERSION/apache-skywalking-java-agent-$SW_VERSION.tgz tar -xzvf apache-skywalking-java-agent-$SW_VERSION.tgz export NAME=skywalking-java-agent export HUB=apache export TAG=$SW_VERSION make docker.push.alpine docker.push.java8 docker.push.java11 docker.push.java17 Clean up the old releases Once the latest release has been published, you should clean up the old releases from the mirror system.\n Update the download links (source, dist, asc, and sha512) on the website to the archive repo (https://archive.apache.org/dist/skywalking). Remove previous releases from https://dist.apache.org/repos/dist/release/skywalking/java-agent.  ","title":"SkyWalking Java Agent Release Guide","url":"/docs/skywalking-java/v9.1.0/en/contribution/release-java-agent/"},{"content":"Apache SkyWalking Java Agent Release Guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking in The Apache Way and start the voting process by reading this document.\nSet up your development environment Follow the steps in the Apache maven deployment environment document to set gpg tool and encrypt passwords.\nUse the following block as a template and place it in ~/.m2/settings.xml.\n\u0026lt;settings\u0026gt; ... \u0026lt;servers\u0026gt; \u0026lt;!-- To publish a snapshot of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.snapshots.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; \u0026lt;!-- To stage a release of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.releases.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; ... \u0026lt;/servers\u0026gt; \u0026lt;/settings\u0026gt; Add your GPG public key  Add your GPG public key into the SkyWalking GPG KEYS file. If you are a committer, use your Apache ID and password to log in this svn, and update the file. Don\u0026rsquo;t override the existing file. Upload your GPG public key to the public GPG site, such as MIT\u0026rsquo;s site. This site should be in the Apache maven staging repository checklist.  Test your settings This step is only for testing purpose. If your env is correctly set, you don\u0026rsquo;t need to check every time.\n./mvnw clean install(this will build artifacts, sources and sign) Prepare for the release ./mvnw release:clean ./mvnw release:prepare -DautoVersionSubmodules=true -Pall  Set version number as x.y.z, and tag as vx.y.z (The version tag must start with v. You will find out why this is necessary in the next step.)  You could do a GPG signature before preparing for the release. If you need to input the password to sign, and the maven doesn\u0026rsquo;t provide you with the opportunity to do so, this may lead to failure of the release. To resolve this, you may run gpg --sign xxx in any file. This will allow it to remember the password for long enough to prepare for the release.\nStage the release ./mvnw release:perform -DskipTests -Pall  The release will be automatically inserted into a temporary staging repository.  Build and sign the source code and binary package export RELEASE_VERSION=x.y.z (example: RELEASE_VERSION=5.0.0-alpha) cd tools/releasing bash create_release.sh This script takes care of the following things:\n Use v + RELEASE_VERSION as tag to clone the codes. Complete git submodule init/update. Exclude all unnecessary files in the target source tar, such as .git, .github, and .gitmodules. See the script for more details. Execute gpg and shasum 512 for source code tar. Use maven package to build the agent tar. Execute gpg and shasum 512 for binary tar.  apache-skywalking-java-agent-x.y.z-src.tgz and files ending with .asc and .sha512 may be found in the tools/releasing folder. apache-skywalking-java-agent-x.y.z.tgz and files ending with .asc and .sha512 may be found in the tools/releasing/apache-skywalking-java-agent-x.y.z folder.\nUpload to Apache svn  Use your Apache ID to log in to https://dist.apache.org/repos/dist/dev/skywalking/java-agent/. Create a folder and name it by the release version and round, such as: x.y.z Upload the source code package to the folder with files ending with .asc and .sha512. Upload the distribution package to the folder with files ending with .asc and .sha512.  Make the internal announcements Send an announcement mail in dev mail list.\nMail title: [ANNOUNCE] SkyWalking Java Agent x.y.z test build available Mail content: The test build of Java Agent x.y.z is available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking-java/blob/master/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/java-agent/xxxx * sha512 checksums Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking-java/ Release Tag : * (Git Tag) x.y.z Release CommitID : * https://github.com/apache/skywalking-java/tree/(Git Commit ID) * Git submodule * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : \u0026gt; ./mvnw clean package A vote regarding the quality of this test build will be initiated within the next couple of days. Wait for at least 48 hours for test responses Any PMC member, committer or contributor can test the release features and provide feedback. Based on that, the PMC will decide whether to start the voting process.\nCall a vote in dev Call a vote in dev@skywalking.apache.org\nMail title: [VOTE] Release Apache SkyWalking Java Agent version x.y.z Mail content: Hi All, This is a call for vote to release Apache SkyWalking Java Agent version x.y.z. Release notes: * https://github.com/apache/skywalking-java/blob/master/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/java-agent/xxxx * sha512 checksums Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) x.y.z Release CommitID : * https://github.com/apache/skywalking-java/tree/(Git Commit ID) * Git submodule * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : \u0026gt; ./mvnw clean package Voting will start now (xxxx date) and will remain open for at least 72 hours, Request all PMC members to give their vote. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Vote Check All PMC members and committers should check these before casting +1 votes.\n Features test. All artifacts in staging repository are published with .asc, .md5, and *sha1 files. Source code and distribution package (apache-skywalking-java-agent-x.y.z-src.tar.gz, apache-skywalking-java-agent-x.y.z.tar.gz) are found in https://dist.apache.org/repos/dist/dev/skywalking/java-agent/x.y.z with .asc and .sha512. LICENSE and NOTICE are in the source code and distribution package. Check shasum -c apache-skywalking-java-agent-x.y.z-src.tgz.sha512. Check gpg --verify apache-skywalking-java-agent-x.y.z-src.tgz.asc apache-skywalking-apm-x.y.z-src.tgz Build a distribution package from the source code package (apache-skywalking-java-agent-x.y.z-src.tar.gz). Check the Apache License Header. Run docker run --rm -v $(pwd):/github/workspace apache/skywalking-eyes header check. (No binaries in source codes)  The voting process is as follows:\n All PMC member votes are +1 binding, and all other votes are +1 but non-binding. If you obtain at least 3 (+1 binding) votes with more +1 than -1 votes within 72 hours, the release will be approved.  Publish the release  Move source codes tar and distribution packages to https://dist.apache.org/repos/dist/release/skywalking/java-agent/.  \u0026gt; export SVN_EDITOR=vim \u0026gt; svn mv https://dist.apache.org/repos/dist/dev/skywalking/java-agent/x.y.z https://dist.apache.org/repos/dist/release/skywalking/java-agent .... enter your apache password .... Release in the nexus staging repo. Public download source and distribution tar/zip are located in http://www.apache.org/dyn/closer.cgi/skywalking/java-agent/x.y.z/xxx. The Apache mirror path is the only release information that we publish. Public asc and sha512 are located in https://www.apache.org/dist/skywalking/java-agent/x.y.z/xxx. Public KEYS point to https://www.apache.org/dist/skywalking/KEYS. Update the website download page. http://skywalking.apache.org/downloads/ . Add a new download source, distribution, sha512, asc, and document links. The links can be found following rules (3) to (6) above. Add a release event on the website homepage and event page. Announce the public release with changelog or key features. Send ANNOUNCE email to dev@skywalking.apache.org, announce@apache.org. The sender should use the Apache email account.  Mail title: [ANNOUNCE] Apache SkyWalking Java Agent x.y.z released Mail content: Hi all, Apache SkyWalking Team is glad to announce the first release of Apache SkyWalking Java Agent x.y.z. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. The Java Agent for Apache SkyWalking, which provides the native tracing/metrics/logging abilities for Java projects. This release contains a number of new features, bug fixes and improvements compared to version a.b.c(last release). The notable changes since x.y.z include: (Highlight key changes) 1. ... 2. ... 3. ... Please refer to the change log for the complete list of changes: https://github.com/apache/skywalking-java/blob/master/changes/changes-x.y.z.md Apache SkyWalking website: http://skywalking.apache.org/ Downloads: http://skywalking.apache.org/downloads/ Twitter: https://twitter.com/AsfSkyWalking SkyWalking Resources: - GitHub: https://github.com/apache/skywalking-java - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Apache SkyWalking Team Release Docker images export SW_VERSION=x.y.z git clone --depth 1 --branch v$SW_VERSION https://github.com/apache/skywalking-java.git cd skywalking-java curl -O https://dist.apache.org/repos/dist/release/skywalking/java-agent/$SW_VERSION/apache-skywalking-java-agent-$SW_VERSION.tgz tar -xzvf apache-skywalking-java-agent-$SW_VERSION.tgz export NAME=skywalking-java-agent export HUB=apache export TAG=$SW_VERSION make docker.push.alpine docker.push.java8 docker.push.java11 docker.push.java17 docker.push.java21 Clean up the old releases Once the latest release has been published, you should clean up the old releases from the mirror system.\n Update the download links (source, dist, asc, and sha512) on the website to the archive repo (https://archive.apache.org/dist/skywalking). Remove previous releases from https://dist.apache.org/repos/dist/release/skywalking/java-agent.  ","title":"SkyWalking Java Agent Release Guide","url":"/docs/skywalking-java/v9.2.0/en/contribution/release-java-agent/"},{"content":"SkyWalking Kubernetes Event Exporter User Guide SkyWalking Kubernetes Event Exporter is able to watch, filter, and send Kubernetes events into the Apache SkyWalking backend.\nDemo Step 1: Create a Local Kubernetes Cluster Please follow step 1 to 3 in getting started to create a cluster.\nStep 2: Deploy OAP server and Event Exporter Create the skywalking-system namespace.\n$ kubectl create namespace skywalking-system Deploy an OAP server and an event exporter.\ncat \u0026lt;\u0026lt;EOF | kubectl apply -f - apiVersion: operator.skywalking.apache.org/v1alpha1 kind: OAPServer metadata: name: skywalking-system namespace: skywalking-system spec: version: 9.5.0 instances: 1 image: apache/skywalking-oap-server:9.5.0 service: template: type: ClusterIP --- apiVersion: operator.skywalking.apache.org/v1alpha1 kind: EventExporter metadata: name: skywalking-system namespace: skywalking-system spec: replicas: 1 config: | filters: - reason: \u0026#34;\u0026#34; message: \u0026#34;\u0026#34; minCount: 1 type: \u0026#34;\u0026#34; action: \u0026#34;\u0026#34; kind: \u0026#34;Pod|Service\u0026#34; namespace: \u0026#34;^skywalking-system$\u0026#34; name: \u0026#34;\u0026#34; service: \u0026#34;[^\\\\s]{1,}\u0026#34; exporters: - skywalking exporters: skywalking: template: source: service: \u0026#34;{{ .Service.Name }}\u0026#34; serviceInstance: \u0026#34;{{ .Pod.Name }}\u0026#34; endpoint: \u0026#34;\u0026#34; message: \u0026#34;{{ .Event.Message }}\u0026#34; address: \u0026#34;skywalking-system-oap.skywalking-system:11800\u0026#34; EOF Wait until both components are ready\u0026hellip;\n$ kubectl get pod -n skywalking-system NAME READY STATUS RESTARTS AGE skywalking-system-eventexporter-566db46fb6-npx8v 1/1 Running 0 50s skywalking-system-oap-68bd877f57-zs8hw 1/1 Running 0 50s Step 3: Check Reported Events We can verify k8s events is reported to the OAP server by using skywalking-cli.\nFirst, port-forward the OAP http service to your local machine.\n$ kubectl port-forward svc/skywalking-system-oap 12800:12800 -n skywalking-system Next, use swctl to list reported events in YAML format.\n$ swctl --display yaml event ls The output should contain k8s events of the OAP server.\nevents:- uuid:1d5bfe48-bc8d-4f5a-9680-188f59793459source:service:skywalking-system-oapserviceinstance:skywalking-system-oap-68bd877f57-cvkjbendpoint:\u0026#34;\u0026#34;name:Pulledtype:Normalmessage:Successfully pulled image \u0026#34;apache/skywalking-oap-server:9.5.0\u0026#34; in 6m4.108914335sparameters:[]starttime:1713793327000endtime:1713793327000layer:K8S- uuid:f576f6ad-748d-4cec -9260-6587c145550esource:service:skywalking-system-oapserviceinstance:skywalking-system-oap-68bd877f57-cvkjbendpoint:\u0026#34;\u0026#34;name:Createdtype:Normalmessage:Created container oapparameters:[]starttime:1713793327000endtime:1713793327000layer:K8S- uuid:0cec5b55-4cb0-4ff7-a670-a097609c531fsource:service:skywalking-system-oapserviceinstance:skywalking-system-oap-68bd877f57-cvkjbendpoint:\u0026#34;\u0026#34;name:Startedtype:Normalmessage:Started container oapparameters:[]starttime:1713793327000endtime:1713793327000layer:K8S- uuid:28f0d004-befe-4c27-a7b7-dfdc4dd755fasource:service:skywalking-system-oapserviceinstance:skywalking-system-oap-68bd877f57-cvkjbendpoint:\u0026#34;\u0026#34;name:Pullingtype:Normalmessage:Pulling image \u0026#34;apache/skywalking-oap-server:9.5.0\u0026#34;parameters:[]starttime:1713792963000endtime:1713792963000layer:K8S- uuid:6d766801-5057-42c0-aa63-93ce1e201418source:service:skywalking-system-oapserviceinstance:skywalking-system-oap-68bd877f57-cvkjbendpoint:\u0026#34;\u0026#34;name:Scheduledtype:Normalmessage:Successfully assigned skywalking-system/skywalking-system-oap-68bd877f57-cvkjbto kind-workerparameters:[]starttime:1713792963000endtime:1713792963000layer:K8SWe can also verify by checking logs of the event exporter.\nkubectl logs -f skywalking-system-eventexporter-566db46fb6-npx8v -n skywalking-system ... DEBUG done: rendered event is: uuid:\u0026#34;8d8c2bd1-1812-4b0c-8237-560688366280\u0026#34; source:{service:\u0026#34;skywalking-system-oap\u0026#34; serviceInstance:\u0026#34;skywalking-system-oap-68bd877f57-zs8hw\u0026#34;} name:\u0026#34;Started\u0026#34; message:\u0026#34;Started container oap\u0026#34; startTime:1713795214000 endTime:1713795214000 layer:\u0026#34;K8S\u0026#34; Spec    name description default value     image Docker image of the event exporter. apache/skywalking-kubernetes-event-exporter:latest   replicas Number of event exporter pods. 1   config Configuration of filters and exporters in YAML format. \u0026quot;\u0026quot;    Please note: if you ignore the config field, no filters or exporter will be created.\nThis is because the EventExporter controller creates a configMap for all config values and attach the configMap to the event exporter container as configuration file. Ignoring the config field means an empty configuration file (with content \u0026quot;\u0026quot;) is provided to the event exporter.\nStatus    name description     availableReplicas Total number of available event exporter pods.   conditions Latest available observations of the underlying deployment\u0026rsquo;s current state   configMapName Name of the underlying configMap.    Configuration The event exporter supports reporting specific events by different exporters. We can add filter configs to choose which events we are interested in, and include exporter names in each filter config to tell event exporter how to export filtered events.\nAn example configuration is listed below:\nfilters:- reason:\u0026#34;\u0026#34;message:\u0026#34;\u0026#34;minCount:1type:\u0026#34;\u0026#34;action:\u0026#34;\u0026#34;kind:\u0026#34;Pod|Service\u0026#34;namespace:\u0026#34;^default$\u0026#34;name:\u0026#34;\u0026#34;service:\u0026#34;[^\\\\s]{1,}\u0026#34;exporters:- skywalkingexporters:skywalking:template:source:service:\u0026#34;{{ .Service.Name }}\u0026#34;serviceInstance:\u0026#34;{{ .Pod.Name }}\u0026#34;endpoint:\u0026#34;\u0026#34;message:\u0026#34;{{ .Event.Message }}\u0026#34;address:\u0026#34;skywalking-system-oap.skywalking-system:11800\u0026#34;Filter Config    name description example     reason Filter events of the specified reason, regular expression like \u0026quot;Killing\\|Killed\u0026quot; is supported. \u0026quot;\u0026quot;   message Filter events of the specified message, regular expression like \u0026quot;Pulling container.*\u0026quot; is supported. \u0026quot;\u0026quot;   minCount Filter events whose count is \u0026gt;= the specified value. 1   type Filter events of the specified type, regular expression like \u0026quot;Normal\\|Error\u0026quot; is supported. \u0026quot;\u0026quot;   action Filter events of the specified action, regular expression is supported. \u0026quot;\u0026quot;   kind Filter events of the specified kind, regular expression like \u0026quot;Pod\\|Service\u0026quot; is supported. \u0026quot;Pod\\|Service\u0026quot;   namespace Filter events from the specified namespace, regular expression like \u0026quot;default\\|bookinfo\u0026quot; is supported, empty means all namespaces. \u0026quot;^default$\u0026quot;   name Filter events of the specified involved object name, regular expression like \u0026quot;.*bookinfo.*\u0026quot; is supported. \u0026quot;\u0026quot;   service Filter events belonging to services whose name is not empty. \u0026quot;[^\\\\s]{1,}\u0026quot;   exporters Events satisfy this filter can be exported into several exporters that are defined below. [\u0026quot;skywalking\u0026quot;]    Skywalking Exporter Config SkyWalking exporter exports the events into Apache SkyWalking OAP server using grpc.\n   name description example     address The SkyWalking backend address where this exporter will export to. \u0026quot;skywalking-system-oap.skywalking-system:11800\u0026quot;   enableTLS Whether to use TLS for grpc server connection validation.  If TLS is enabled, the trustedCertPath is required, but clientCertPath and clientKeyPath are optional. false   clientCertPath Path of the X.509 certificate file. \u0026quot;\u0026quot;   clientKeyPath Path of the X.509 private key file. \u0026quot;\u0026quot;   trustedCertPath Path of the root certificate file. \u0026quot;\u0026quot;   insecureSkipVerify Whether a client verifies the server\u0026rsquo;s certificate chain and host name. Check tls.Config for more details. false   template The event template of SkyWalking exporter, it can be composed of metadata like Event, Pod, and Service.    template.source Event source information.    template.source.service Service name, can be a template string. \u0026quot;{{ .Service.Name }}\u0026quot;   template.source.serviceInstance Service instance name, can be a template string. \u0026quot;{{ .Pod.Name }}\u0026quot;   template.source.endpoint Endpoint, can be a template string. \u0026quot;\u0026quot;   template.message Message format, can be a template string. \u0026quot;{{ .Event.Message }}\u0026quot;    Console Exporter Config Console exporter exports the events into console logs, this exporter is typically used for debugging.\n   name description example     template The event template of SkyWalking exporter, it can be composed of metadata like Event, Pod, and Service.    template.source Event source information.    template.source.service Service name, can be a template string. \u0026quot;{{ .Service.Name }}\u0026quot;   template.source.serviceInstance Service instance name, can be a template string. \u0026quot;{{ .Pod.Name }}\u0026quot;   template.source.endpoint Endpoint, can be a template string. \u0026quot;\u0026quot;   template.message Message format, can be a template string. \u0026quot;{{ .Event.Message }}\u0026quot;    ","title":"SkyWalking Kubernetes Event Exporter User Guide","url":"/docs/skywalking-swck/next/examples/event-exporter/"},{"content":"SkyWalking PHP Agent This is the official documentation of SkyWalking PHP Agent. Welcome to the SkyWalking community!\nIn here, you could learn how to set up PHP agent for the PHP services.\n","title":"SkyWalking PHP Agent","url":"/docs/skywalking-php/latest/readme/"},{"content":"SkyWalking PHP Agent This is the official documentation of SkyWalking PHP Agent. Welcome to the SkyWalking community!\nIn here, you could learn how to set up PHP agent for the PHP services.\n","title":"SkyWalking PHP Agent","url":"/docs/skywalking-php/next/readme/"},{"content":"SkyWalking PHP Agent This is the official documentation of SkyWalking PHP Agent. Welcome to the SkyWalking community!\nIn here, you could learn how to set up PHP agent for the PHP services.\n","title":"SkyWalking PHP Agent","url":"/docs/skywalking-php/v0.7.0/readme/"},{"content":"SkyWalking Python Agent This is the official documentation of SkyWalking Python agent. Welcome to the SkyWalking community!\nThe Python Agent for Apache SkyWalking provides the native tracing/metrics/logging/profiling abilities for Python projects.\nThis documentation covers a number of ways to set up the Python agent for various use cases.\n \nCapabilities The following table demonstrates the currently supported telemetry collection capabilities in SkyWalking Python agent:\n   Reporter Supported? Details     Trace ✅ (default: ON) Automatic instrumentation + Manual SDK   Log ✅ (default: ON) Direct reporter only. (Tracing context in log planned)   Meter ✅ (default: ON) Meter API + Automatic PVM metrics   Event ❌ (Planned) Report lifecycle events of your awesome Python application   Profiling ✅ (default: ON) Threading and Greenlet Profiler    Live Demo  Find the live demo with Python agent on our website. Follow the showcase to set up preview deployment quickly.  ","title":"SkyWalking Python Agent","url":"/docs/skywalking-python/latest/readme/"},{"content":"SkyWalking Python Agent This is the official documentation of SkyWalking Python agent. Welcome to the SkyWalking community!\nThe Python Agent for Apache SkyWalking provides the native tracing/metrics/logging/profiling abilities for Python projects.\nThis documentation covers a number of ways to set up the Python agent for various use cases.\n \nCapabilities The following table demonstrates the currently supported telemetry collection capabilities in SkyWalking Python agent:\n   Reporter Supported? Details     Trace ✅ (default: ON) Automatic instrumentation + Manual SDK   Log ✅ (default: ON) Direct reporter only. (Tracing context in log planned)   Meter ✅ (default: ON) Meter API + Automatic PVM metrics   Event ❌ (Planned) Report lifecycle events of your awesome Python application   Profiling ✅ (default: ON) Threading and Greenlet Profiler    Live Demo  Find the live demo with Python agent on our website. Follow the showcase to set up preview deployment quickly.  ","title":"SkyWalking Python Agent","url":"/docs/skywalking-python/next/readme/"},{"content":"SkyWalking Python Agent This is the official documentation of SkyWalking Python agent. Welcome to the SkyWalking community!\nThe Python Agent for Apache SkyWalking provides the native tracing/metrics/logging/profiling abilities for Python projects.\nThis documentation covers a number of ways to set up the Python agent for various use cases.\n \nCapabilities The following table demonstrates the currently supported telemetry collection capabilities in SkyWalking Python agent:\n   Reporter Supported? Details     Trace ✅ (default: ON) Automatic instrumentation + Manual SDK   Log ✅ (default: ON) Direct reporter only. (Tracing context in log planned)   Meter ✅ (default: ON) Meter API + Automatic PVM metrics   Event ❌ (Planned) Report lifecycle events of your awesome Python application   Profiling ✅ (default: ON) Threading and Greenlet Profiler    Live Demo  Find the live demo with Python agent on our website. Follow the showcase to set up preview deployment quickly.  ","title":"SkyWalking Python Agent","url":"/docs/skywalking-python/v1.0.1/readme/"},{"content":"SkyWalking Python Agent Command Line Interface (sw-python CLI) Now, SkyWalking Python Agent CLI is the recommended way of running your application with Python agent, the CLI is well-tested and used by all agent E2E \u0026amp; Plugin tests.\nIn releases before 0.7.0, you would at least need to add the following lines to your applications to get the agent attached and running, this can be tedious in many cases due to large number of services, DevOps practices and can cause problem when used with prefork servers.\nfrom skywalking import agent, config config.init(SomeConfig) agent.start() The SkyWalking Python agent implements a command-line interface that can be utilized to attach the agent to your awesome applications during deployment without changing any application code, just like the SkyWalking Java Agent.\n The following feature is added in v1.0.0 as experimental flag, so you need to specify the -p flag to sw-python run -p. In the future, this flag will be removed and agent will automatically enable prefork/fork support in a more comprehensive manner.\n Especially with the new automatic postfork injection feature, you no longer have to worry about threading and forking incompatibility.\nCheck How to use with uWSGI and How to use with Gunicorn to understand the detailed background on what is post_fork, why you need them and how to easily overcome the trouble with sw-python CLI.\nYou should still read the legacy way to integrate agent in case the sw-python CLI is not working for you.\nUsage Upon successful installation of the SkyWalking Python agent via pip, a command-line script sw-python is installed in your environment (virtual env preferred).\n run sw-python to see if it is available, you will need to pass configuration by environment variables.\n For example: export SW_AGENT_COLLECTOR_BACKEND_SERVICES=localhost:11800\nThe run option The sw-python CLI provides a run option, which you can use to execute your applications (either begins with the python command or Python-based programs like gunicorn on your path) just like you invoke them normally, plus a prefix, the following example demonstrates the usage.\nIf your previous command to run your gunicorn/uwsgi application is:\ngunicorn your_app:app --workers 2 --worker-class uvicorn.workers.UvicornWorker --bind 0.0.0.0:8088\nor\nuwsgi --die-on-term --http 0.0.0.0:5000 --http-manage-expect --master --workers 3 --enable-threads --threads 3 --manage-script-name --mount /=main:app\nPlease change it to (the -p option starts one agent in each process, which is the correct behavior):\nImportant: if the call to uwsgi/gunicorn is prefixed with other commands, this approach will fail since agent currently looks for the command line input at index 0 for safety as an experimental feature.\nsw-python run -p gunicorn your_app:app --workers 2 --worker-class uvicorn.workers.UvicornWorker --bind 0.0.0.0:8088\nor\nsw-python run -p uwsgi --die-on-term --http 0.0.0.0:5000 --http-manage-expect --master --workers 3 --enable-threads --threads 3 --manage-script-name --mount /=main:app\nThe SkyWalking Python agent will start up along with all your application workers shortly.\nNote that sw-python also work with spawned subprocess (os.exec*/subprocess) as long as the PYTHONPATH is inherited.\nAdditionally, sw-python started agent works well with os.fork when your application forks workers, as long as the SW_AGENT_EXPERIMENTAL_FORK_SUPPORT is turned on. (It will be automatically turned on when gunicorn is detected)\nConfiguring the agent You would normally want to provide additional configurations other than the default ones.\nThrough environment variables The currently supported method is to provide the environment variables listed and explained in the Environment Variables List.\nThrough a sw-config.toml (TBD) Currently, only environment variable configuration is supported; an optional toml configuration is to be implemented.\nEnabling CLI DEBUG mode Note the CLI is a feature that manipulates the Python interpreter bootstrap behaviour, there could be unsupported cases.\nIf you encounter unexpected problems, please turn on the DEBUG mode by adding the -d or --debug flag to your sw-python command, as shown below.\nFrom: sw-python run command\nTo: sw-python -d run command\nPlease attach the debug logs to the SkyWalking Issues section if you believe it is a bug, idea discussions and pull requests are always welcomed.\nAdditional Remarks When executing commands with sw-python run command, your command\u0026rsquo;s Python interpreter will pick up the SkyWalking loader module.\nIt is not safe to attach SkyWalking Agent to those commands that resides in another Python installation because incompatible Python versions and mismatched SkyWalking versions can cause problems. Therefore, any attempt to pass a command that uses a different Python interpreter/ environment will not bring up SkyWalking Python Agent even if another SkyWalking Python agent is installed there(no matter the version), and will force exit with an error message indicating the reasoning.\nDisabling spawned processes from starting new agents Sometimes you don\u0026rsquo;t actually need the agent to monitor anything in a new process (when it\u0026rsquo;s not a web service worker). (here we mean process spawned by subprocess and os.exec*(), os.fork() is not controlled by this flag but experimental_fork_support)\nIf you do not need the agent to get loaded for application child processes, you can turn off the behavior by setting an environment variable.\nSW_AGENT_SW_PYTHON_BOOTSTRAP_PROPAGATE to False\nNote the auto bootstrap depends on the environment inherited by child processes, thus prepending a new sitecustomize path to or removing the loader path from the PYTHONPATH could also prevent the agent from loading in a child process.\nKnown limitations  The CLI may not work properly with arguments that involve double quotation marks in some shells. The CLI and bootstrapper stdout logs could get messy in Windows shells.  ","title":"SkyWalking Python Agent Command Line Interface (sw-python CLI)","url":"/docs/skywalking-python/latest/en/setup/cli/"},{"content":"SkyWalking Python Agent Command Line Interface (sw-python CLI) Now, SkyWalking Python Agent CLI is the recommended way of running your application with Python agent, the CLI is well-tested and used by all agent E2E \u0026amp; Plugin tests.\nIn releases before 0.7.0, you would at least need to add the following lines to your applications to get the agent attached and running, this can be tedious in many cases due to large number of services, DevOps practices and can cause problem when used with prefork servers.\nfrom skywalking import agent, config config.init(SomeConfig) agent.start() The SkyWalking Python agent implements a command-line interface that can be utilized to attach the agent to your awesome applications during deployment without changing any application code, just like the SkyWalking Java Agent.\n The following feature is added in v1.0.0 as experimental flag, so you need to specify the -p flag to sw-python run -p. In the future, this flag will be removed and agent will automatically enable prefork/fork support in a more comprehensive manner.\n Especially with the new automatic postfork injection feature, you no longer have to worry about threading and forking incompatibility.\nCheck How to use with uWSGI and How to use with Gunicorn to understand the detailed background on what is post_fork, why you need them and how to easily overcome the trouble with sw-python CLI.\nYou should still read the legacy way to integrate agent in case the sw-python CLI is not working for you.\nUsage Upon successful installation of the SkyWalking Python agent via pip, a command-line script sw-python is installed in your environment (virtual env preferred).\n run sw-python to see if it is available, you will need to pass configuration by environment variables.\n For example: export SW_AGENT_COLLECTOR_BACKEND_SERVICES=localhost:11800\nThe run option The sw-python CLI provides a run option, which you can use to execute your applications (either begins with the python command or Python-based programs like gunicorn on your path) just like you invoke them normally, plus a prefix, the following example demonstrates the usage.\nIf your previous command to run your gunicorn/uwsgi application is:\ngunicorn your_app:app --workers 2 --worker-class uvicorn.workers.UvicornWorker --bind 0.0.0.0:8088\nor\nuwsgi --die-on-term --http 0.0.0.0:5000 --http-manage-expect --master --workers 3 --enable-threads --threads 3 --manage-script-name --mount /=main:app\nPlease change it to (the -p option starts one agent in each process, which is the correct behavior):\nImportant: if the call to uwsgi/gunicorn is prefixed with other commands, this approach will fail since agent currently looks for the command line input at index 0 for safety as an experimental feature.\nsw-python run -p gunicorn your_app:app --workers 2 --worker-class uvicorn.workers.UvicornWorker --bind 0.0.0.0:8088\nor\nsw-python run -p uwsgi --die-on-term --http 0.0.0.0:5000 --http-manage-expect --master --workers 3 --enable-threads --threads 3 --manage-script-name --mount /=main:app\nThe SkyWalking Python agent will start up along with all your application workers shortly.\nNote that sw-python also work with spawned subprocess (os.exec*/subprocess) as long as the PYTHONPATH is inherited.\nAdditionally, sw-python started agent works well with os.fork when your application forks workers, as long as the SW_AGENT_EXPERIMENTAL_FORK_SUPPORT is turned on. (It will be automatically turned on when gunicorn is detected)\nConfiguring the agent You would normally want to provide additional configurations other than the default ones.\nThrough environment variables The currently supported method is to provide the environment variables listed and explained in the Environment Variables List.\nThrough a sw-config.toml (TBD) Currently, only environment variable configuration is supported; an optional toml configuration is to be implemented.\nEnabling CLI DEBUG mode Note the CLI is a feature that manipulates the Python interpreter bootstrap behaviour, there could be unsupported cases.\nIf you encounter unexpected problems, please turn on the DEBUG mode by adding the -d or --debug flag to your sw-python command, as shown below.\nFrom: sw-python run command\nTo: sw-python -d run command\nPlease attach the debug logs to the SkyWalking Issues section if you believe it is a bug, idea discussions and pull requests are always welcomed.\nAdditional Remarks When executing commands with sw-python run command, your command\u0026rsquo;s Python interpreter will pick up the SkyWalking loader module.\nIt is not safe to attach SkyWalking Agent to those commands that resides in another Python installation because incompatible Python versions and mismatched SkyWalking versions can cause problems. Therefore, any attempt to pass a command that uses a different Python interpreter/ environment will not bring up SkyWalking Python Agent even if another SkyWalking Python agent is installed there(no matter the version), and will force exit with an error message indicating the reasoning.\nDisabling spawned processes from starting new agents Sometimes you don\u0026rsquo;t actually need the agent to monitor anything in a new process (when it\u0026rsquo;s not a web service worker). (here we mean process spawned by subprocess and os.exec*(), os.fork() is not controlled by this flag but experimental_fork_support)\nIf you do not need the agent to get loaded for application child processes, you can turn off the behavior by setting an environment variable.\nSW_AGENT_SW_PYTHON_BOOTSTRAP_PROPAGATE to False\nNote the auto bootstrap depends on the environment inherited by child processes, thus prepending a new sitecustomize path to or removing the loader path from the PYTHONPATH could also prevent the agent from loading in a child process.\nKnown limitations  The CLI may not work properly with arguments that involve double quotation marks in some shells. The CLI and bootstrapper stdout logs could get messy in Windows shells.  ","title":"SkyWalking Python Agent Command Line Interface (sw-python CLI)","url":"/docs/skywalking-python/next/en/setup/cli/"},{"content":"SkyWalking Python Agent Command Line Interface (sw-python CLI) Now, SkyWalking Python Agent CLI is the recommended way of running your application with Python agent, the CLI is well-tested and used by all agent E2E \u0026amp; Plugin tests.\nIn releases before 0.7.0, you would at least need to add the following lines to your applications to get the agent attached and running, this can be tedious in many cases due to large number of services, DevOps practices and can cause problem when used with prefork servers.\nfrom skywalking import agent, config config.init(SomeConfig) agent.start() The SkyWalking Python agent implements a command-line interface that can be utilized to attach the agent to your awesome applications during deployment without changing any application code, just like the SkyWalking Java Agent.\n The following feature is added in v1.0.0 as experimental flag, so you need to specify the -p flag to sw-python run -p. In the future, this flag will be removed and agent will automatically enable prefork/fork support in a more comprehensive manner.\n Especially with the new automatic postfork injection feature, you no longer have to worry about threading and forking incompatibility.\nCheck How to use with uWSGI and How to use with Gunicorn to understand the detailed background on what is post_fork, why you need them and how to easily overcome the trouble with sw-python CLI.\nYou should still read the legacy way to integrate agent in case the sw-python CLI is not working for you.\nUsage Upon successful installation of the SkyWalking Python agent via pip, a command-line script sw-python is installed in your environment (virtual env preferred).\n run sw-python to see if it is available, you will need to pass configuration by environment variables.\n For example: export SW_AGENT_COLLECTOR_BACKEND_SERVICES=localhost:11800\nThe run option The sw-python CLI provides a run option, which you can use to execute your applications (either begins with the python command or Python-based programs like gunicorn on your path) just like you invoke them normally, plus a prefix, the following example demonstrates the usage.\nIf your previous command to run your gunicorn/uwsgi application is:\ngunicorn your_app:app --workers 2 --worker-class uvicorn.workers.UvicornWorker --bind 0.0.0.0:8088\nor\nuwsgi --die-on-term --http 0.0.0.0:5000 --http-manage-expect --master --workers 3 --enable-threads --threads 3 --manage-script-name --mount /=main:app\nPlease change it to (the -p option starts one agent in each process, which is the correct behavior):\nImportant: if the call to uwsgi/gunicorn is prefixed with other commands, this approach will fail since agent currently looks for the command line input at index 0 for safety as an experimental feature.\nsw-python run -p gunicorn your_app:app --workers 2 --worker-class uvicorn.workers.UvicornWorker --bind 0.0.0.0:8088\nor\nsw-python run -p uwsgi --die-on-term --http 0.0.0.0:5000 --http-manage-expect --master --workers 3 --enable-threads --threads 3 --manage-script-name --mount /=main:app\nThe SkyWalking Python agent will start up along with all your application workers shortly.\nNote that sw-python also work with spawned subprocess (os.exec*/subprocess) as long as the PYTHONPATH is inherited.\nAdditionally, sw-python started agent works well with os.fork when your application forks workers, as long as the SW_AGENT_EXPERIMENTAL_FORK_SUPPORT is turned on. (It will be automatically turned on when gunicorn is detected)\nConfiguring the agent You would normally want to provide additional configurations other than the default ones.\nThrough environment variables The currently supported method is to provide the environment variables listed and explained in the Environment Variables List.\nThrough a sw-config.toml (TBD) Currently, only environment variable configuration is supported; an optional toml configuration is to be implemented.\nEnabling CLI DEBUG mode Note the CLI is a feature that manipulates the Python interpreter bootstrap behaviour, there could be unsupported cases.\nIf you encounter unexpected problems, please turn on the DEBUG mode by adding the -d or --debug flag to your sw-python command, as shown below.\nFrom: sw-python run command\nTo: sw-python -d run command\nPlease attach the debug logs to the SkyWalking Issues section if you believe it is a bug, idea discussions and pull requests are always welcomed.\nAdditional Remarks When executing commands with sw-python run command, your command\u0026rsquo;s Python interpreter will pick up the SkyWalking loader module.\nIt is not safe to attach SkyWalking Agent to those commands that resides in another Python installation because incompatible Python versions and mismatched SkyWalking versions can cause problems. Therefore, any attempt to pass a command that uses a different Python interpreter/ environment will not bring up SkyWalking Python Agent even if another SkyWalking Python agent is installed there(no matter the version), and will force exit with an error message indicating the reasoning.\nDisabling spawned processes from starting new agents Sometimes you don\u0026rsquo;t actually need the agent to monitor anything in a new process (when it\u0026rsquo;s not a web service worker). (here we mean process spawned by subprocess and os.exec*(), os.fork() is not controlled by this flag but experimental_fork_support)\nIf you do not need the agent to get loaded for application child processes, you can turn off the behavior by setting an environment variable.\nSW_AGENT_SW_PYTHON_BOOTSTRAP_PROPAGATE to False\nNote the auto bootstrap depends on the environment inherited by child processes, thus prepending a new sitecustomize path to or removing the loader path from the PYTHONPATH could also prevent the agent from loading in a child process.\nKnown limitations  The CLI may not work properly with arguments that involve double quotation marks in some shells. The CLI and bootstrapper stdout logs could get messy in Windows shells.  ","title":"SkyWalking Python Agent Command Line Interface (sw-python CLI)","url":"/docs/skywalking-python/v1.0.1/en/setup/cli/"},{"content":"SkyWalking Python Instrumentation API Apart from the supported libraries that can be instrumented automatically, SkyWalking also provides some APIs to enable manual instrumentation.\nCreate Spans The code snippet below shows how to create entry span, exit span and local span.\nfrom skywalking import Component from skywalking.trace.context import SpanContext, get_context from skywalking.trace.tags import Tag context: SpanContext = get_context() # get a tracing context # create an entry span, by using `with` statement, # the span automatically starts/stops when entering/exiting the context with context.new_entry_span(op=\u0026#39;https://github.com/apache\u0026#39;) as span: span.component = Component.Flask # the span automatically stops when exiting the `with` context class TagSinger(Tag): key = \u0026#39;Singer\u0026#39; with context.new_exit_span(op=\u0026#39;https://github.com/apache\u0026#39;, peer=\u0026#39;localhost:8080\u0026#39;, component=Component.Flask) as span: span.tag(TagSinger(\u0026#39;Nakajima\u0026#39;)) with context.new_local_span(op=\u0026#39;https://github.com/apache\u0026#39;) as span: span.tag(TagSinger(\u0026#39;Nakajima\u0026#39;)) Decorators from time import sleep from skywalking import Component from skywalking.decorators import trace, runnable from skywalking.trace.context import SpanContext, get_context @trace() # the operation name is the method name(\u0026#39;some_other_method\u0026#39;) by default def some_other_method(): sleep(1) @trace(op=\u0026#39;awesome\u0026#39;) # customize the operation name to \u0026#39;awesome\u0026#39; def some_method(): some_other_method() @trace(op=\u0026#39;async_functions_are_also_supported\u0026#39;) async def async_func(): return \u0026#39;asynchronous\u0026#39; @trace() async def async_func2(): return await async_func() @runnable() # cross thread propagation def some_method(): some_other_method() from threading import Thread t = Thread(target=some_method) t.start() context: SpanContext = get_context() with context.new_entry_span(op=str(\u0026#39;https://github.com/apache/skywalking\u0026#39;)) as span: span.component = Component.Flask some_method() ","title":"SkyWalking Python Instrumentation API","url":"/docs/skywalking-python/latest/en/setup/advanced/api/"},{"content":"SkyWalking Python Instrumentation API Apart from the supported libraries that can be instrumented automatically, SkyWalking also provides some APIs to enable manual instrumentation.\nCreate Spans The code snippet below shows how to create entry span, exit span and local span.\nfrom skywalking import Component from skywalking.trace.context import SpanContext, get_context from skywalking.trace.tags import Tag context: SpanContext = get_context() # get a tracing context # create an entry span, by using `with` statement, # the span automatically starts/stops when entering/exiting the context with context.new_entry_span(op=\u0026#39;https://github.com/apache\u0026#39;) as span: span.component = Component.Flask # the span automatically stops when exiting the `with` context class TagSinger(Tag): key = \u0026#39;Singer\u0026#39; with context.new_exit_span(op=\u0026#39;https://github.com/apache\u0026#39;, peer=\u0026#39;localhost:8080\u0026#39;, component=Component.Flask) as span: span.tag(TagSinger(\u0026#39;Nakajima\u0026#39;)) with context.new_local_span(op=\u0026#39;https://github.com/apache\u0026#39;) as span: span.tag(TagSinger(\u0026#39;Nakajima\u0026#39;)) Decorators from time import sleep from skywalking import Component from skywalking.decorators import trace, runnable from skywalking.trace.context import SpanContext, get_context @trace() # the operation name is the method name(\u0026#39;some_other_method\u0026#39;) by default def some_other_method(): sleep(1) @trace(op=\u0026#39;awesome\u0026#39;) # customize the operation name to \u0026#39;awesome\u0026#39; def some_method(): some_other_method() @trace(op=\u0026#39;async_functions_are_also_supported\u0026#39;) async def async_func(): return \u0026#39;asynchronous\u0026#39; @trace() async def async_func2(): return await async_func() @runnable() # cross thread propagation def some_method(): some_other_method() from threading import Thread t = Thread(target=some_method) t.start() context: SpanContext = get_context() with context.new_entry_span(op=str(\u0026#39;https://github.com/apache/skywalking\u0026#39;)) as span: span.component = Component.Flask some_method() ","title":"SkyWalking Python Instrumentation API","url":"/docs/skywalking-python/next/en/setup/advanced/api/"},{"content":"SkyWalking Python Instrumentation API Apart from the supported libraries that can be instrumented automatically, SkyWalking also provides some APIs to enable manual instrumentation.\nCreate Spans The code snippet below shows how to create entry span, exit span and local span.\nfrom skywalking import Component from skywalking.trace.context import SpanContext, get_context from skywalking.trace.tags import Tag context: SpanContext = get_context() # get a tracing context # create an entry span, by using `with` statement, # the span automatically starts/stops when entering/exiting the context with context.new_entry_span(op=\u0026#39;https://github.com/apache\u0026#39;) as span: span.component = Component.Flask # the span automatically stops when exiting the `with` context class TagSinger(Tag): key = \u0026#39;Singer\u0026#39; with context.new_exit_span(op=\u0026#39;https://github.com/apache\u0026#39;, peer=\u0026#39;localhost:8080\u0026#39;, component=Component.Flask) as span: span.tag(TagSinger(\u0026#39;Nakajima\u0026#39;)) with context.new_local_span(op=\u0026#39;https://github.com/apache\u0026#39;) as span: span.tag(TagSinger(\u0026#39;Nakajima\u0026#39;)) Decorators from time import sleep from skywalking import Component from skywalking.decorators import trace, runnable from skywalking.trace.context import SpanContext, get_context @trace() # the operation name is the method name(\u0026#39;some_other_method\u0026#39;) by default def some_other_method(): sleep(1) @trace(op=\u0026#39;awesome\u0026#39;) # customize the operation name to \u0026#39;awesome\u0026#39; def some_method(): some_other_method() @trace(op=\u0026#39;async_functions_are_also_supported\u0026#39;) async def async_func(): return \u0026#39;asynchronous\u0026#39; @trace() async def async_func2(): return await async_func() @runnable() # cross thread propagation def some_method(): some_other_method() from threading import Thread t = Thread(target=some_method) t.start() context: SpanContext = get_context() with context.new_entry_span(op=str(\u0026#39;https://github.com/apache/skywalking\u0026#39;)) as span: span.component = Component.Flask some_method() ","title":"SkyWalking Python Instrumentation API","url":"/docs/skywalking-python/v1.0.1/en/setup/advanced/api/"},{"content":"Apache SkyWalking release guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking in The Apache Way and start the voting process by reading this document.\nSet up your development environment Follow the steps in the Apache maven deployment environment document to set gpg tool and encrypt passwords.\nUse the following block as a template and place it in ~/.m2/settings.xml.\n\u0026lt;settings\u0026gt; ... \u0026lt;servers\u0026gt; \u0026lt;!-- To publish a snapshot of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.snapshots.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; \u0026lt;!-- To stage a release of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.releases.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; ... \u0026lt;/servers\u0026gt; \u0026lt;/settings\u0026gt; Add your GPG public key  Add your GPG public key into the SkyWalking GPG KEYS file. If you are a committer, use your Apache ID and password to log in this svn, and update the file. Don\u0026rsquo;t override the existing file. Upload your GPG public key to the public GPG site, such as MIT\u0026rsquo;s site. This site should be in the Apache maven staging repository checklist.  Test your settings This step is only for testing purpose. If your env is correctly set, you don\u0026rsquo;t need to check every time.\n./mvnw clean install -Pall (this will build artifacts, sources and sign) Prepare for the release ./mvnw release:clean ./mvnw release:prepare -DautoVersionSubmodules=true -Darguments='-Dmaven.test.skip' -Pall  Set version number as x.y.z, and tag as vx.y.z (The version tag must start with v. You will find out why this is necessary in the next step.)  You could do a GPG signature before preparing for the release. If you need to input the password to sign, and the maven doesn\u0026rsquo;t provide you with the opportunity to do so, this may lead to failure of the release. To resolve this, you may run gpg --sign xxx in any file. This will allow it to remember the password for long enough to prepare for the release.\nStage the release ./mvnw release:perform -Darguments='-Dmaven.test.skip' -Pall  The release will be automatically inserted into a temporary staging repository.  Build and sign the source code package export RELEASE_VERSION=x.y.z (example: RELEASE_VERSION=5.0.0-alpha) cd tools/releasing bash create_source_release.sh This script takes care of the following things:\n Use v + RELEASE_VERSION as tag to clone the codes. Complete git submodule init/update. Exclude all unnecessary files in the target source tar, such as .git, .github, and .gitmodules. See the script for more details. Execute gpg and shasum 512.  apache-skywalking-apm-x.y.z-src.tgz and files ending with .asc and .sha512 may be found in the tools/releasing folder.\nLocate and download the distribution package in Apache Nexus Staging repositories  Use your Apache ID to log in to https://repository.apache.org/. Go to https://repository.apache.org/#stagingRepositories. Search skywalking and find your staging repository. Close the repository and wait for all checks to pass. In this step, your GPG KEYS will be checked. See the set PGP document, if you haven\u0026rsquo;t done it before. Go to {REPO_URL}/org/apache/skywalking/apache-skywalking-apm/x.y.z. Download .tar.gz and .zip and files ending with .asc and .sha1.  Upload to Apache svn  Use your Apache ID to log in to https://dist.apache.org/repos/dist/dev/skywalking/. Create a folder and name it by the release version and round, such as: x.y.z Upload the source code package to the folder with files ending with .asc and .sha512.  Package name: apache-skywalking-x.y.z-src.tar.gz See Section \u0026ldquo;Build and sign the source code package\u0026rdquo; for more details   Upload the distribution package to the folder with files ending with .asc and .sha512.  Package name: apache-skywalking-bin-x.y.z.tar.gz. See Section \u0026ldquo;Locate and download the distribution package in Apache Nexus Staging repositories\u0026rdquo; for more details. Create a .sha512 package: shasum -a 512 file \u0026gt; file.sha512    Make the internal announcements Send an announcement mail in dev mail list.\nMail title: [ANNOUNCE] SkyWalking x.y.z test build available Mail content: The test build of x.y.z is available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/xxxx * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-apm-x.x.x-src.tgz - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.tar.gz Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) vx.y.z Release CommitID : * https://github.com/apache/skywalking/tree/(Git Commit ID) * Git submodule * skywalking-ui: https://github.com/apache/skywalking-booster-ui/tree/(Git Commit ID) * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) * oap-server/server-query-plugin/query-graphql-plugin/src/main/resources/query-protocol https://github.com/apache/skywalking-query-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking/blob/vx.y.z/docs/en/guides/How-to-build.md A vote regarding the quality of this test build will be initiated within the next couple of days. Wait for at least 48 hours for test responses Any PMC member, committer or contributor can test the release features and provide feedback. Based on that, the PMC will decide whether to start the voting process.\nCall a vote in dev Call a vote in dev@skywalking.apache.org\nMail title: [VOTE] Release Apache SkyWalking version x.y.z Mail content: Hi All, This is a call for vote to release Apache SkyWalking version x.y.z. Release notes: * https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/xxxx * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-apm-x.x.x-src.tgz - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.tar.gz Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) vx.y.z Release CommitID : * https://github.com/apache/skywalking/tree/(Git Commit ID) * Git submodule * skywalking-ui: https://github.com/apache/skywalking-booster-ui/tree/(Git Commit ID) * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) * oap-server/server-query-plugin/query-graphql-plugin/src/main/resources/query-protocol https://github.com/apache/skywalking-query-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking/blob/vx.y.z/docs/en/guides/How-to-build.md Voting will start now (xxxx date) and will remain open for at least 72 hours, Request all PMC members to give their vote. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Vote Check All PMC members and committers should check these before casting +1 votes.\n Features test. All artifacts in staging repository are published with .asc, .md5, and *sha1 files. Source code and distribution package (apache-skywalking-x.y.z-src.tar.gz, apache-skywalking-bin-x.y.z.tar.gz, apache-skywalking-bin-x.y.z.zip) are found in https://dist.apache.org/repos/dist/dev/skywalking/x.y.z with .asc and .sha512. LICENSE and NOTICE are in the source code and distribution package. Check shasum -c apache-skywalking-apm-x.y.z-src.tgz.sha512. Check gpg --verify apache-skywalking-apm-x.y.z-src.tgz.asc apache-skywalking-apm-x.y.z-src.tgz Build a distribution package from the source code package (apache-skywalking-x.y.z-src.tar.gz) by following this doc. Check the Apache License Header. Run docker run --rm -v $(pwd):/github/workspace apache/skywalking-eyes header check. (No binaries in source codes)  The voting process is as follows:\n All PMC member votes are +1 binding, and all other votes are +1 but non-binding. If you obtain at least 3 (+1 binding) votes with more +1 than -1 votes within 72 hours, the release will be approved.  Publish the release  Move source codes tar and distribution packages to https://dist.apache.org/repos/dist/release/skywalking/.  \u0026gt; export SVN_EDITOR=vim \u0026gt; svn mv https://dist.apache.org/repos/dist/dev/skywalking/x.y.z https://dist.apache.org/repos/dist/release/skywalking .... enter your apache password .... Release in the nexus staging repo. Public download source and distribution tar/zip are located in http://www.apache.org/dyn/closer.cgi/skywalking/x.y.z/xxx. The Apache mirror path is the only release information that we publish. Public asc and sha512 are located in https://www.apache.org/dist/skywalking/x.y.z/xxx. Public KEYS point to https://www.apache.org/dist/skywalking/KEYS. Update the website download page. http://skywalking.apache.org/downloads/ . Add a new download source, distribution, sha512, asc, and document links. The links can be found following rules (3) to (6) above. Add a release event on the website homepage and event page. Announce the public release with changelog or key features. Send ANNOUNCE email to dev@skywalking.apache.org, announce@apache.org. The sender should use the Apache email account.  Mail title: [ANNOUNCE] Apache SkyWalking x.y.z released Mail content: Hi all, Apache SkyWalking Team is glad to announce the first release of Apache SkyWalking x.y.z. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. This release contains a number of new features, bug fixes and improvements compared to version a.b.c(last release). The notable changes since x.y.z include: (Highlight key changes) 1. ... 2. ... 3. ... Please refer to the change log for the complete list of changes: https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Apache SkyWalking website: http://skywalking.apache.org/ Downloads: http://skywalking.apache.org/downloads/ Twitter: https://twitter.com/ASFSkyWalking SkyWalking Resources: - GitHub: https://github.com/apache/skywalking - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Apache SkyWalking Team Publish the Docker images We have a GitHub workflow to automatically publish the Docker images to Docker Hub after you set the version from pre-release to release, all you need to do is to watch that workflow and see whether it succeeds, if it fails, you can use the following steps to publish the Docker images in your local machine.\nexport SW_VERSION=x.y.z git clone --depth 1 --branch v$SW_VERSION https://github.com/apache/skywalking.git cd skywalking svn co https://dist.apache.org/repos/dist/release/skywalking/$SW_VERSION release # (1) export CONTEXT=release export HUB=apache export OAP_NAME=skywalking-oap-server export UI_NAME=skywalking-ui export TAG=$SW_VERSION export DIST=\u0026lt;the binary package name inside (1), e.g. apache-skywalking-apm-8.8.0.tar.gz\u0026gt; make docker.push Clean up the old releases Once the latest release has been published, you should clean up the old releases from the mirror system.\n Update the download links (source, dist, asc, and sha512) on the website to the archive repo (https://archive.apache.org/dist/skywalking). Remove previous releases from https://dist.apache.org/repos/dist/release/skywalking/.  ","title":"SkyWalking release guide","url":"/docs/main/latest/en/guides/how-to-release/"},{"content":"Apache SkyWalking release guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking in The Apache Way and start the voting process by reading this document.\nSet up your development environment Follow the steps in the Apache maven deployment environment document to set gpg tool and encrypt passwords.\nUse the following block as a template and place it in ~/.m2/settings.xml.\n\u0026lt;settings\u0026gt; ... \u0026lt;servers\u0026gt; \u0026lt;!-- To publish a snapshot of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.snapshots.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; \u0026lt;!-- To stage a release of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.releases.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; ... \u0026lt;/servers\u0026gt; \u0026lt;/settings\u0026gt; Add your GPG public key  Add your GPG public key into the SkyWalking GPG KEYS file. If you are a committer, use your Apache ID and password to log in this svn, and update the file. Don\u0026rsquo;t override the existing file. Upload your GPG public key to the public GPG site, such as MIT\u0026rsquo;s site. This site should be in the Apache maven staging repository checklist.  Test your settings This step is only for testing purpose. If your env is correctly set, you don\u0026rsquo;t need to check every time.\n./mvnw clean install -Pall (this will build artifacts, sources and sign) Prepare for the release ./mvnw release:clean ./mvnw release:prepare -DautoVersionSubmodules=true -Darguments='-Dmaven.test.skip' -Pall  Set version number as x.y.z, and tag as vx.y.z (The version tag must start with v. You will find out why this is necessary in the next step.)  You could do a GPG signature before preparing for the release. If you need to input the password to sign, and the maven doesn\u0026rsquo;t provide you with the opportunity to do so, this may lead to failure of the release. To resolve this, you may run gpg --sign xxx in any file. This will allow it to remember the password for long enough to prepare for the release.\nStage the release ./mvnw release:perform -Darguments='-Dmaven.test.skip' -Pall  The release will be automatically inserted into a temporary staging repository.  Build and sign the source code package export RELEASE_VERSION=x.y.z (example: RELEASE_VERSION=5.0.0-alpha) cd tools/releasing bash create_source_release.sh This script takes care of the following things:\n Use v + RELEASE_VERSION as tag to clone the codes. Complete git submodule init/update. Exclude all unnecessary files in the target source tar, such as .git, .github, and .gitmodules. See the script for more details. Execute gpg and shasum 512.  apache-skywalking-apm-x.y.z-src.tgz and files ending with .asc and .sha512 may be found in the tools/releasing folder.\nLocate and download the distribution package in Apache Nexus Staging repositories  Use your Apache ID to log in to https://repository.apache.org/. Go to https://repository.apache.org/#stagingRepositories. Search skywalking and find your staging repository. Close the repository and wait for all checks to pass. In this step, your GPG KEYS will be checked. See the set PGP document, if you haven\u0026rsquo;t done it before. Go to {REPO_URL}/org/apache/skywalking/apache-skywalking-apm/x.y.z. Download .tar.gz and .zip and files ending with .asc and .sha1.  Upload to Apache svn  Use your Apache ID to log in to https://dist.apache.org/repos/dist/dev/skywalking/. Create a folder and name it by the release version and round, such as: x.y.z Upload the source code package to the folder with files ending with .asc and .sha512.  Package name: apache-skywalking-x.y.z-src.tar.gz See Section \u0026ldquo;Build and sign the source code package\u0026rdquo; for more details   Upload the distribution package to the folder with files ending with .asc and .sha512.  Package name: apache-skywalking-bin-x.y.z.tar.gz. See Section \u0026ldquo;Locate and download the distribution package in Apache Nexus Staging repositories\u0026rdquo; for more details. Create a .sha512 package: shasum -a 512 file \u0026gt; file.sha512    Call a vote in dev Call a vote in dev@skywalking.apache.org\nMail title: [VOTE] Release Apache SkyWalking version x.y.z Mail content: Hi All, This is a call for vote to release Apache SkyWalking version x.y.z. Release notes: * https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/xxxx * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-apm-x.x.x-src.tgz - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.tar.gz Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) vx.y.z Release CommitID : * https://github.com/apache/skywalking/tree/(Git Commit ID) * Git submodule * skywalking-ui: https://github.com/apache/skywalking-booster-ui/tree/(Git Commit ID) * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) * oap-server/server-query-plugin/query-graphql-plugin/src/main/resources/query-protocol https://github.com/apache/skywalking-query-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking/blob/vx.y.z/docs/en/guides/How-to-build.md Voting will start now (xxxx date) and will remain open for at least 72 hours, Request all PMC members to give their vote. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Vote Check All PMC members and committers should check these before casting +1 votes.\n Features test. All artifacts in staging repository are published with .asc, .md5, and *sha1 files. Source code and distribution package (apache-skywalking-x.y.z-src.tar.gz, apache-skywalking-bin-x.y.z.tar.gz, apache-skywalking-bin-x.y.z.zip) are found in https://dist.apache.org/repos/dist/dev/skywalking/x.y.z with .asc and .sha512. LICENSE and NOTICE are in the source code and distribution package. Check shasum -c apache-skywalking-apm-x.y.z-src.tgz.sha512. Check gpg --verify apache-skywalking-apm-x.y.z-src.tgz.asc apache-skywalking-apm-x.y.z-src.tgz Build a distribution package from the source code package (apache-skywalking-x.y.z-src.tar.gz) by following this doc. Check the Apache License Header. Run docker run --rm -v $(pwd):/github/workspace apache/skywalking-eyes header check. (No binaries in source codes)  The voting process is as follows:\n All PMC member votes are +1 binding, and all other votes are +1 but non-binding. If you obtain at least 3 (+1 binding) votes with more +1 than -1 votes within 72 hours, the release will be approved.  Publish the release  Move source codes tar and distribution packages to https://dist.apache.org/repos/dist/release/skywalking/.  \u0026gt; export SVN_EDITOR=vim \u0026gt; svn mv https://dist.apache.org/repos/dist/dev/skywalking/x.y.z https://dist.apache.org/repos/dist/release/skywalking .... enter your apache password .... Release in the nexus staging repo. Public download source and distribution tar/zip are located in http://www.apache.org/dyn/closer.cgi/skywalking/x.y.z/xxx. The Apache mirror path is the only release information that we publish. Public asc and sha512 are located in https://www.apache.org/dist/skywalking/x.y.z/xxx. Public KEYS point to https://www.apache.org/dist/skywalking/KEYS. Update the website download page. http://skywalking.apache.org/downloads/ . Add a new download source, distribution, sha512, asc, and document links. The links can be found following rules (3) to (6) above. Add a release event on the website homepage and event page. Announce the public release with changelog or key features. Send ANNOUNCE email to dev@skywalking.apache.org, announce@apache.org. The sender should use the Apache email account.  Mail title: [ANNOUNCE] Apache SkyWalking x.y.z released Mail content: Hi all, Apache SkyWalking Team is glad to announce the first release of Apache SkyWalking x.y.z. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based architectures. This release contains a number of new features, bug fixes and improvements compared to version a.b.c(last release). The notable changes since x.y.z include: (Highlight key changes) 1. ... 2. ... 3. ... Please refer to the change log for the complete list of changes: https://skywalking.apache.org/docs/main/vx.y.z/en/changes/changes/ Apache SkyWalking website: http://skywalking.apache.org/ Downloads: https://skywalking.apache.org/downloads/#SkyWalkingAPM Twitter: https://twitter.com/ASFSkyWalking SkyWalking Resources: - GitHub: https://github.com/apache/skywalking - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Apache SkyWalking Team Publish the Docker images We have a GitHub workflow to automatically publish the Docker images to Docker Hub after you set the version from pre-release to release, all you need to do is to watch that workflow and see whether it succeeds, if it fails, you can use the following steps to publish the Docker images in your local machine.\nexport SW_VERSION=x.y.z git clone --depth 1 --branch v$SW_VERSION https://github.com/apache/skywalking.git cd skywalking svn co https://dist.apache.org/repos/dist/release/skywalking/$SW_VERSION release # (1) export CONTEXT=release export HUB=apache export OAP_NAME=skywalking-oap-server export UI_NAME=skywalking-ui export TAG=$SW_VERSION export DIST=\u0026lt;the binary package name inside (1), e.g. apache-skywalking-apm-8.8.0.tar.gz\u0026gt; make docker.push Clean up the old releases Once the latest release has been published, you should clean up the old releases from the mirror system.\n Update the download links (source, dist, asc, and sha512) on the website to the archive repo (https://archive.apache.org/dist/skywalking). Remove previous releases from https://dist.apache.org/repos/dist/release/skywalking/.  ","title":"SkyWalking release guide","url":"/docs/main/next/en/guides/how-to-release/"},{"content":"Apache SkyWalking release guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking in The Apache Way and start the voting process by reading this document.\nSet up your development environment Follow the steps in the Apache maven deployment environment document to set gpg tool and encrypt passwords.\nUse the following block as a template and place it in ~/.m2/settings.xml.\n\u0026lt;settings\u0026gt; ... \u0026lt;servers\u0026gt; \u0026lt;!-- To publish a snapshot of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.snapshots.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; \u0026lt;!-- To stage a release of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.releases.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; ... \u0026lt;/servers\u0026gt; \u0026lt;/settings\u0026gt; Add your GPG public key  Add your GPG public key into the SkyWalking GPG KEYS file. If you are a committer, use your Apache ID and password to log in this svn, and update the file. Don\u0026rsquo;t override the existing file. Upload your GPG public key to the public GPG site, such as MIT\u0026rsquo;s site. This site should be in the Apache maven staging repository checklist.  Test your settings This step is only for testing purpose. If your env is correctly set, you don\u0026rsquo;t need to check every time.\n./mvnw clean install -Pall (this will build artifacts, sources and sign) Prepare for the release ./mvnw release:clean ./mvnw release:prepare -DautoVersionSubmodules=true -Pall  Set version number as x.y.z, and tag as vx.y.z (The version tag must start with v. You will find out why this is necessary in the next step.)  You could do a GPG signature before preparing for the release. If you need to input the password to sign, and the maven doesn\u0026rsquo;t provide you with the opportunity to do so, this may lead to failure of the release. To resolve this, you may run gpg --sign xxx in any file. This will allow it to remember the password for long enough to prepare for the release.\nStage the release ./mvnw release:perform -Dmaven.test.skip -Pall  The release will be automatically inserted into a temporary staging repository.  Build and sign the source code package export RELEASE_VERSION=x.y.z (example: RELEASE_VERSION=5.0.0-alpha) cd tools/releasing bash create_source_release.sh This script takes care of the following things:\n Use v + RELEASE_VERSION as tag to clone the codes. Complete git submodule init/update. Exclude all unnecessary files in the target source tar, such as .git, .github, and .gitmodules. See the script for more details. Execute gpg and shasum 512.  apache-skywalking-apm-x.y.z-src.tgz and files ending with .asc and .sha512 may be found in the tools/releasing folder.\nLocate and download the distribution package in Apache Nexus Staging repositories  Use your Apache ID to log in to https://repository.apache.org/. Go to https://repository.apache.org/#stagingRepositories. Search skywalking and find your staging repository. Close the repository and wait for all checks to pass. In this step, your GPG KEYS will be checked. See the set PGP document, if you haven\u0026rsquo;t done it before. Go to {REPO_URL}/org/apache/skywalking/apache-skywalking-apm/x.y.z. Download .tar.gz and .zip and files ending with .asc and .sha1.  Upload to Apache svn  Use your Apache ID to log in to https://dist.apache.org/repos/dist/dev/skywalking/. Create a folder and name it by the release version and round, such as: x.y.z Upload the source code package to the folder with files ending with .asc and .sha512.  Package name: apache-skywalking-x.y.z-src.tar.gz See Section \u0026ldquo;Build and sign the source code package\u0026rdquo; for more details   Upload the distribution package to the folder with files ending with .asc and .sha512.  Package name: apache-skywalking-bin-x.y.z.tar.gz. See Section \u0026ldquo;Locate and download the distribution package in Apache Nexus Staging repositories\u0026rdquo; for more details. Create a .sha512 package: shasum -a 512 file \u0026gt; file.sha512    Make the internal announcements Send an announcement mail in dev mail list.\nMail title: [ANNOUNCE] SkyWalking x.y.z test build available Mail content: The test build of x.y.z is available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking/blob/master/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/xxxx * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-apm-x.x.x-src.tgz - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.tar.gz - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.zip Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) x.y.z Release CommitID : * https://github.com/apache/skywalking/tree/(Git Commit ID) * Git submodule * skywalking-ui: https://github.com/apache/skywalking-rocketbot-ui/tree/(Git Commit ID) * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) * oap-server/server-query-plugin/query-graphql-plugin/src/main/resources/query-protocol https://github.com/apache/skywalking-query-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking/blob/x.y.z/docs/en/guides/How-to-build.md A vote regarding the quality of this test build will be initiated within the next couple of days. Wait for at least 48 hours for test responses Any PMC member, committer or contributor can test the release features and provide feedback. Based on that, the PMC will decide whether to start the voting process.\nCall a vote in dev Call a vote in dev@skywalking.apache.org\nMail title: [VOTE] Release Apache SkyWalking version x.y.z Mail content: Hi All, This is a call for vote to release Apache SkyWalking version x.y.z. Release notes: * https://github.com/apache/skywalking/blob/master/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/xxxx * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-apm-x.x.x-src.tgz - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.tar.gz - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.zip Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) x.y.z Release CommitID : * https://github.com/apache/skywalking/tree/(Git Commit ID) * Git submodule * skywalking-ui: https://github.com/apache/skywalking-rocketbot-ui/tree/(Git Commit ID) * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) * oap-server/server-query-plugin/query-graphql-plugin/src/main/resources/query-protocol https://github.com/apache/skywalking-query-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking/blob/x.y.z/docs/en/guides/How-to-build.md Voting will start now (xxxx date) and will remain open for at least 72 hours, Request all PMC members to give their vote. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Vote Check All PMC members and committers should check these before casting +1 votes.\n Features test. All artifacts in staging repository are published with .asc, .md5, and *sha1 files. Source code and distribution package (apache-skywalking-x.y.z-src.tar.gz, apache-skywalking-bin-x.y.z.tar.gz, apache-skywalking-bin-x.y.z.zip) are found in https://dist.apache.org/repos/dist/dev/skywalking/x.y.z with .asc and .sha512. LICENSE and NOTICE are in the source code and distribution package. Check shasum -c apache-skywalking-apm-x.y.z-src.tgz.sha512. Check gpg --verify apache-skywalking-apm-x.y.z-src.tgz.asc apache-skywalking-apm-x.y.z-src.tgz Build a distribution package from the source code package (apache-skywalking-x.y.z-src.tar.gz) by following this doc. Check the Apache License Header. Run docker run --rm -v $(pwd):/github/workspace apache/skywalking-eyes header check. (No binaries in source codes)  The voting process is as follows:\n All PMC member votes are +1 binding, and all other votes are +1 but non-binding. If you obtain at least 3 (+1 binding) votes with more +1 than -1 votes within 72 hours, the release will be approved.  Publish the release  Move source codes tar and distribution packages to https://dist.apache.org/repos/dist/release/skywalking/.  \u0026gt; export SVN_EDITOR=vim \u0026gt; svn mv https://dist.apache.org/repos/dist/dev/skywalking/x.y.z https://dist.apache.org/repos/dist/release/skywalking .... enter your apache password .... Release in the nexus staging repo. Public download source and distribution tar/zip are located in http://www.apache.org/dyn/closer.cgi/skywalking/x.y.z/xxx. The Apache mirror path is the only release information that we publish. Public asc and sha512 are located in https://www.apache.org/dist/skywalking/x.y.z/xxx. Public KEYS point to https://www.apache.org/dist/skywalking/KEYS. Update the website download page. http://skywalking.apache.org/downloads/ . Add a new download source, distribution, sha512, asc, and document links. The links can be found following rules (3) to (6) above. Add a release event on the website homepage and event page. Announce the public release with changelog or key features. Send ANNOUNCE email to dev@skywalking.apache.org, announce@apache.org. The sender should use the Apache email account.  Mail title: [ANNOUNCE] Apache SkyWalking x.y.z released Mail content: Hi all, Apache SkyWalking Team is glad to announce the first release of Apache SkyWalking x.y.z. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. This release contains a number of new features, bug fixes and improvements compared to version a.b.c(last release). The notable changes since x.y.z include: (Highlight key changes) 1. ... 2. ... 3. ... Please refer to the change log for the complete list of changes: https://github.com/apache/skywalking/blob/master/changes/changes-x.y.z.md Apache SkyWalking website: http://skywalking.apache.org/ Downloads: http://skywalking.apache.org/downloads/ Twitter: https://twitter.com/ASFSkyWalking SkyWalking Resources: - GitHub: https://github.com/apache/skywalking - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Apache SkyWalking Team Publish the Docker images export SW_VERSION=x.y.z git clone --depth 1 --branch v$SW_VERSION https://github.com/apache/skywalking.git cd skywalking svn co https://dist.apache.org/repos/dist/release/skywalking/$SW_VERSION release # (1) export CONTEXT=release export HUB=apache export OAP_NAME=skywalking-oap-server export UI_NAME=skywalking-ui export TAG=$SW_VERSION export DIST=\u0026lt;the binary package name inside (1), e.g. apache-skywalking-apm-8.8.0.tar.gz\u0026gt; make docker.push Clean up the old releases Once the latest release has been published, you should clean up the old releases from the mirror system.\n Update the download links (source, dist, asc, and sha512) on the website to the archive repo (https://archive.apache.org/dist/skywalking). Remove previous releases from https://dist.apache.org/repos/dist/release/skywalking/.  ","title":"SkyWalking release guide","url":"/docs/main/v9.0.0/en/guides/how-to-release/"},{"content":"Apache SkyWalking release guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking in The Apache Way and start the voting process by reading this document.\nSet up your development environment Follow the steps in the Apache maven deployment environment document to set gpg tool and encrypt passwords.\nUse the following block as a template and place it in ~/.m2/settings.xml.\n\u0026lt;settings\u0026gt; ... \u0026lt;servers\u0026gt; \u0026lt;!-- To publish a snapshot of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.snapshots.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; \u0026lt;!-- To stage a release of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.releases.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; ... \u0026lt;/servers\u0026gt; \u0026lt;/settings\u0026gt; Add your GPG public key  Add your GPG public key into the SkyWalking GPG KEYS file. If you are a committer, use your Apache ID and password to log in this svn, and update the file. Don\u0026rsquo;t override the existing file. Upload your GPG public key to the public GPG site, such as MIT\u0026rsquo;s site. This site should be in the Apache maven staging repository checklist.  Test your settings This step is only for testing purpose. If your env is correctly set, you don\u0026rsquo;t need to check every time.\n./mvnw clean install -Pall (this will build artifacts, sources and sign) Prepare for the release ./mvnw release:clean ./mvnw release:prepare -DautoVersionSubmodules=true -Pall  Set version number as x.y.z, and tag as vx.y.z (The version tag must start with v. You will find out why this is necessary in the next step.)  You could do a GPG signature before preparing for the release. If you need to input the password to sign, and the maven doesn\u0026rsquo;t provide you with the opportunity to do so, this may lead to failure of the release. To resolve this, you may run gpg --sign xxx in any file. This will allow it to remember the password for long enough to prepare for the release.\nStage the release ./mvnw release:perform -Dmaven.test.skip -Pall  The release will be automatically inserted into a temporary staging repository.  Build and sign the source code package export RELEASE_VERSION=x.y.z (example: RELEASE_VERSION=5.0.0-alpha) cd tools/releasing bash create_source_release.sh This script takes care of the following things:\n Use v + RELEASE_VERSION as tag to clone the codes. Complete git submodule init/update. Exclude all unnecessary files in the target source tar, such as .git, .github, and .gitmodules. See the script for more details. Execute gpg and shasum 512.  apache-skywalking-apm-x.y.z-src.tgz and files ending with .asc and .sha512 may be found in the tools/releasing folder.\nLocate and download the distribution package in Apache Nexus Staging repositories  Use your Apache ID to log in to https://repository.apache.org/. Go to https://repository.apache.org/#stagingRepositories. Search skywalking and find your staging repository. Close the repository and wait for all checks to pass. In this step, your GPG KEYS will be checked. See the set PGP document, if you haven\u0026rsquo;t done it before. Go to {REPO_URL}/org/apache/skywalking/apache-skywalking-apm/x.y.z. Download .tar.gz and .zip and files ending with .asc and .sha1.  Upload to Apache svn  Use your Apache ID to log in to https://dist.apache.org/repos/dist/dev/skywalking/. Create a folder and name it by the release version and round, such as: x.y.z Upload the source code package to the folder with files ending with .asc and .sha512.  Package name: apache-skywalking-x.y.z-src.tar.gz See Section \u0026ldquo;Build and sign the source code package\u0026rdquo; for more details   Upload the distribution package to the folder with files ending with .asc and .sha512.  Package name: apache-skywalking-bin-x.y.z.tar.gz. See Section \u0026ldquo;Locate and download the distribution package in Apache Nexus Staging repositories\u0026rdquo; for more details. Create a .sha512 package: shasum -a 512 file \u0026gt; file.sha512    Make the internal announcements Send an announcement mail in dev mail list.\nMail title: [ANNOUNCE] SkyWalking x.y.z test build available Mail content: The test build of x.y.z is available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking/blob/master/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/xxxx * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-apm-x.x.x-src.tgz - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.tar.gz Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) x.y.z Release CommitID : * https://github.com/apache/skywalking/tree/(Git Commit ID) * Git submodule * skywalking-ui: https://github.com/apache/skywalking-booster-ui/tree/(Git Commit ID) * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) * oap-server/server-query-plugin/query-graphql-plugin/src/main/resources/query-protocol https://github.com/apache/skywalking-query-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking/blob/x.y.z/docs/en/guides/How-to-build.md A vote regarding the quality of this test build will be initiated within the next couple of days. Wait for at least 48 hours for test responses Any PMC member, committer or contributor can test the release features and provide feedback. Based on that, the PMC will decide whether to start the voting process.\nCall a vote in dev Call a vote in dev@skywalking.apache.org\nMail title: [VOTE] Release Apache SkyWalking version x.y.z Mail content: Hi All, This is a call for vote to release Apache SkyWalking version x.y.z. Release notes: * https://github.com/apache/skywalking/blob/master/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/xxxx * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-apm-x.x.x-src.tgz - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.tar.gz Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) x.y.z Release CommitID : * https://github.com/apache/skywalking/tree/(Git Commit ID) * Git submodule * skywalking-ui: https://github.com/apache/skywalking-booster-ui/tree/(Git Commit ID) * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) * oap-server/server-query-plugin/query-graphql-plugin/src/main/resources/query-protocol https://github.com/apache/skywalking-query-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking/blob/x.y.z/docs/en/guides/How-to-build.md Voting will start now (xxxx date) and will remain open for at least 72 hours, Request all PMC members to give their vote. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Vote Check All PMC members and committers should check these before casting +1 votes.\n Features test. All artifacts in staging repository are published with .asc, .md5, and *sha1 files. Source code and distribution package (apache-skywalking-x.y.z-src.tar.gz, apache-skywalking-bin-x.y.z.tar.gz, apache-skywalking-bin-x.y.z.zip) are found in https://dist.apache.org/repos/dist/dev/skywalking/x.y.z with .asc and .sha512. LICENSE and NOTICE are in the source code and distribution package. Check shasum -c apache-skywalking-apm-x.y.z-src.tgz.sha512. Check gpg --verify apache-skywalking-apm-x.y.z-src.tgz.asc apache-skywalking-apm-x.y.z-src.tgz Build a distribution package from the source code package (apache-skywalking-x.y.z-src.tar.gz) by following this doc. Check the Apache License Header. Run docker run --rm -v $(pwd):/github/workspace apache/skywalking-eyes header check. (No binaries in source codes)  The voting process is as follows:\n All PMC member votes are +1 binding, and all other votes are +1 but non-binding. If you obtain at least 3 (+1 binding) votes with more +1 than -1 votes within 72 hours, the release will be approved.  Publish the release  Move source codes tar and distribution packages to https://dist.apache.org/repos/dist/release/skywalking/.  \u0026gt; export SVN_EDITOR=vim \u0026gt; svn mv https://dist.apache.org/repos/dist/dev/skywalking/x.y.z https://dist.apache.org/repos/dist/release/skywalking .... enter your apache password .... Release in the nexus staging repo. Public download source and distribution tar/zip are located in http://www.apache.org/dyn/closer.cgi/skywalking/x.y.z/xxx. The Apache mirror path is the only release information that we publish. Public asc and sha512 are located in https://www.apache.org/dist/skywalking/x.y.z/xxx. Public KEYS point to https://www.apache.org/dist/skywalking/KEYS. Update the website download page. http://skywalking.apache.org/downloads/ . Add a new download source, distribution, sha512, asc, and document links. The links can be found following rules (3) to (6) above. Add a release event on the website homepage and event page. Announce the public release with changelog or key features. Send ANNOUNCE email to dev@skywalking.apache.org, announce@apache.org. The sender should use the Apache email account.  Mail title: [ANNOUNCE] Apache SkyWalking x.y.z released Mail content: Hi all, Apache SkyWalking Team is glad to announce the first release of Apache SkyWalking x.y.z. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. This release contains a number of new features, bug fixes and improvements compared to version a.b.c(last release). The notable changes since x.y.z include: (Highlight key changes) 1. ... 2. ... 3. ... Please refer to the change log for the complete list of changes: https://github.com/apache/skywalking/blob/master/changes/changes-x.y.z.md Apache SkyWalking website: http://skywalking.apache.org/ Downloads: http://skywalking.apache.org/downloads/ Twitter: https://twitter.com/ASFSkyWalking SkyWalking Resources: - GitHub: https://github.com/apache/skywalking - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Apache SkyWalking Team Publish the Docker images export SW_VERSION=x.y.z git clone --depth 1 --branch v$SW_VERSION https://github.com/apache/skywalking.git cd skywalking svn co https://dist.apache.org/repos/dist/release/skywalking/$SW_VERSION release # (1) export CONTEXT=release export HUB=apache export OAP_NAME=skywalking-oap-server export UI_NAME=skywalking-ui export TAG=$SW_VERSION export DIST=\u0026lt;the binary package name inside (1), e.g. apache-skywalking-apm-8.8.0.tar.gz\u0026gt; make docker.push Clean up the old releases Once the latest release has been published, you should clean up the old releases from the mirror system.\n Update the download links (source, dist, asc, and sha512) on the website to the archive repo (https://archive.apache.org/dist/skywalking). Remove previous releases from https://dist.apache.org/repos/dist/release/skywalking/.  ","title":"SkyWalking release guide","url":"/docs/main/v9.1.0/en/guides/how-to-release/"},{"content":"Apache SkyWalking release guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking in The Apache Way and start the voting process by reading this document.\nSet up your development environment Follow the steps in the Apache maven deployment environment document to set gpg tool and encrypt passwords.\nUse the following block as a template and place it in ~/.m2/settings.xml.\n\u0026lt;settings\u0026gt; ... \u0026lt;servers\u0026gt; \u0026lt;!-- To publish a snapshot of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.snapshots.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; \u0026lt;!-- To stage a release of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.releases.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; ... \u0026lt;/servers\u0026gt; \u0026lt;/settings\u0026gt; Add your GPG public key  Add your GPG public key into the SkyWalking GPG KEYS file. If you are a committer, use your Apache ID and password to log in this svn, and update the file. Don\u0026rsquo;t override the existing file. Upload your GPG public key to the public GPG site, such as MIT\u0026rsquo;s site. This site should be in the Apache maven staging repository checklist.  Test your settings This step is only for testing purpose. If your env is correctly set, you don\u0026rsquo;t need to check every time.\n./mvnw clean install -Pall (this will build artifacts, sources and sign) Prepare for the release ./mvnw release:clean ./mvnw release:prepare -DautoVersionSubmodules=true -Pall  Set version number as x.y.z, and tag as vx.y.z (The version tag must start with v. You will find out why this is necessary in the next step.)  You could do a GPG signature before preparing for the release. If you need to input the password to sign, and the maven doesn\u0026rsquo;t provide you with the opportunity to do so, this may lead to failure of the release. To resolve this, you may run gpg --sign xxx in any file. This will allow it to remember the password for long enough to prepare for the release.\nStage the release ./mvnw release:perform -Dmaven.test.skip -Pall  The release will be automatically inserted into a temporary staging repository.  Build and sign the source code package export RELEASE_VERSION=x.y.z (example: RELEASE_VERSION=5.0.0-alpha) cd tools/releasing bash create_source_release.sh This script takes care of the following things:\n Use v + RELEASE_VERSION as tag to clone the codes. Complete git submodule init/update. Exclude all unnecessary files in the target source tar, such as .git, .github, and .gitmodules. See the script for more details. Execute gpg and shasum 512.  apache-skywalking-apm-x.y.z-src.tgz and files ending with .asc and .sha512 may be found in the tools/releasing folder.\nLocate and download the distribution package in Apache Nexus Staging repositories  Use your Apache ID to log in to https://repository.apache.org/. Go to https://repository.apache.org/#stagingRepositories. Search skywalking and find your staging repository. Close the repository and wait for all checks to pass. In this step, your GPG KEYS will be checked. See the set PGP document, if you haven\u0026rsquo;t done it before. Go to {REPO_URL}/org/apache/skywalking/apache-skywalking-apm/x.y.z. Download .tar.gz and .zip and files ending with .asc and .sha1.  Upload to Apache svn  Use your Apache ID to log in to https://dist.apache.org/repos/dist/dev/skywalking/. Create a folder and name it by the release version and round, such as: x.y.z Upload the source code package to the folder with files ending with .asc and .sha512.  Package name: apache-skywalking-x.y.z-src.tar.gz See Section \u0026ldquo;Build and sign the source code package\u0026rdquo; for more details   Upload the distribution package to the folder with files ending with .asc and .sha512.  Package name: apache-skywalking-bin-x.y.z.tar.gz. See Section \u0026ldquo;Locate and download the distribution package in Apache Nexus Staging repositories\u0026rdquo; for more details. Create a .sha512 package: shasum -a 512 file \u0026gt; file.sha512    Make the internal announcements Send an announcement mail in dev mail list.\nMail title: [ANNOUNCE] SkyWalking x.y.z test build available Mail content: The test build of x.y.z is available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/xxxx * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-apm-x.x.x-src.tgz - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.tar.gz Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) vx.y.z Release CommitID : * https://github.com/apache/skywalking/tree/(Git Commit ID) * Git submodule * skywalking-ui: https://github.com/apache/skywalking-booster-ui/tree/(Git Commit ID) * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) * oap-server/server-query-plugin/query-graphql-plugin/src/main/resources/query-protocol https://github.com/apache/skywalking-query-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking/blob/vx.y.z/docs/en/guides/How-to-build.md A vote regarding the quality of this test build will be initiated within the next couple of days. Wait for at least 48 hours for test responses Any PMC member, committer or contributor can test the release features and provide feedback. Based on that, the PMC will decide whether to start the voting process.\nCall a vote in dev Call a vote in dev@skywalking.apache.org\nMail title: [VOTE] Release Apache SkyWalking version x.y.z Mail content: Hi All, This is a call for vote to release Apache SkyWalking version x.y.z. Release notes: * https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/xxxx * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-apm-x.x.x-src.tgz - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.tar.gz Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) vx.y.z Release CommitID : * https://github.com/apache/skywalking/tree/(Git Commit ID) * Git submodule * skywalking-ui: https://github.com/apache/skywalking-booster-ui/tree/(Git Commit ID) * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) * oap-server/server-query-plugin/query-graphql-plugin/src/main/resources/query-protocol https://github.com/apache/skywalking-query-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking/blob/vx.y.z/docs/en/guides/How-to-build.md Voting will start now (xxxx date) and will remain open for at least 72 hours, Request all PMC members to give their vote. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Vote Check All PMC members and committers should check these before casting +1 votes.\n Features test. All artifacts in staging repository are published with .asc, .md5, and *sha1 files. Source code and distribution package (apache-skywalking-x.y.z-src.tar.gz, apache-skywalking-bin-x.y.z.tar.gz, apache-skywalking-bin-x.y.z.zip) are found in https://dist.apache.org/repos/dist/dev/skywalking/x.y.z with .asc and .sha512. LICENSE and NOTICE are in the source code and distribution package. Check shasum -c apache-skywalking-apm-x.y.z-src.tgz.sha512. Check gpg --verify apache-skywalking-apm-x.y.z-src.tgz.asc apache-skywalking-apm-x.y.z-src.tgz Build a distribution package from the source code package (apache-skywalking-x.y.z-src.tar.gz) by following this doc. Check the Apache License Header. Run docker run --rm -v $(pwd):/github/workspace apache/skywalking-eyes header check. (No binaries in source codes)  The voting process is as follows:\n All PMC member votes are +1 binding, and all other votes are +1 but non-binding. If you obtain at least 3 (+1 binding) votes with more +1 than -1 votes within 72 hours, the release will be approved.  Publish the release  Move source codes tar and distribution packages to https://dist.apache.org/repos/dist/release/skywalking/.  \u0026gt; export SVN_EDITOR=vim \u0026gt; svn mv https://dist.apache.org/repos/dist/dev/skywalking/x.y.z https://dist.apache.org/repos/dist/release/skywalking .... enter your apache password .... Release in the nexus staging repo. Public download source and distribution tar/zip are located in http://www.apache.org/dyn/closer.cgi/skywalking/x.y.z/xxx. The Apache mirror path is the only release information that we publish. Public asc and sha512 are located in https://www.apache.org/dist/skywalking/x.y.z/xxx. Public KEYS point to https://www.apache.org/dist/skywalking/KEYS. Update the website download page. http://skywalking.apache.org/downloads/ . Add a new download source, distribution, sha512, asc, and document links. The links can be found following rules (3) to (6) above. Add a release event on the website homepage and event page. Announce the public release with changelog or key features. Send ANNOUNCE email to dev@skywalking.apache.org, announce@apache.org. The sender should use the Apache email account.  Mail title: [ANNOUNCE] Apache SkyWalking x.y.z released Mail content: Hi all, Apache SkyWalking Team is glad to announce the first release of Apache SkyWalking x.y.z. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. This release contains a number of new features, bug fixes and improvements compared to version a.b.c(last release). The notable changes since x.y.z include: (Highlight key changes) 1. ... 2. ... 3. ... Please refer to the change log for the complete list of changes: https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Apache SkyWalking website: http://skywalking.apache.org/ Downloads: http://skywalking.apache.org/downloads/ Twitter: https://twitter.com/ASFSkyWalking SkyWalking Resources: - GitHub: https://github.com/apache/skywalking - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Apache SkyWalking Team Publish the Docker images export SW_VERSION=x.y.z git clone --depth 1 --branch v$SW_VERSION https://github.com/apache/skywalking.git cd skywalking svn co https://dist.apache.org/repos/dist/release/skywalking/$SW_VERSION release # (1) export CONTEXT=release export HUB=apache export OAP_NAME=skywalking-oap-server export UI_NAME=skywalking-ui export TAG=$SW_VERSION export DIST=\u0026lt;the binary package name inside (1), e.g. apache-skywalking-apm-8.8.0.tar.gz\u0026gt; make docker.push Clean up the old releases Once the latest release has been published, you should clean up the old releases from the mirror system.\n Update the download links (source, dist, asc, and sha512) on the website to the archive repo (https://archive.apache.org/dist/skywalking). Remove previous releases from https://dist.apache.org/repos/dist/release/skywalking/.  ","title":"SkyWalking release guide","url":"/docs/main/v9.2.0/en/guides/how-to-release/"},{"content":"Apache SkyWalking release guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking in The Apache Way and start the voting process by reading this document.\nSet up your development environment Follow the steps in the Apache maven deployment environment document to set gpg tool and encrypt passwords.\nUse the following block as a template and place it in ~/.m2/settings.xml.\n\u0026lt;settings\u0026gt; ... \u0026lt;servers\u0026gt; \u0026lt;!-- To publish a snapshot of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.snapshots.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; \u0026lt;!-- To stage a release of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.releases.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; ... \u0026lt;/servers\u0026gt; \u0026lt;/settings\u0026gt; Add your GPG public key  Add your GPG public key into the SkyWalking GPG KEYS file. If you are a committer, use your Apache ID and password to log in this svn, and update the file. Don\u0026rsquo;t override the existing file. Upload your GPG public key to the public GPG site, such as MIT\u0026rsquo;s site. This site should be in the Apache maven staging repository checklist.  Test your settings This step is only for testing purpose. If your env is correctly set, you don\u0026rsquo;t need to check every time.\n./mvnw clean install -Pall (this will build artifacts, sources and sign) Prepare for the release ./mvnw release:clean ./mvnw release:prepare -DautoVersionSubmodules=true -Pall  Set version number as x.y.z, and tag as vx.y.z (The version tag must start with v. You will find out why this is necessary in the next step.)  You could do a GPG signature before preparing for the release. If you need to input the password to sign, and the maven doesn\u0026rsquo;t provide you with the opportunity to do so, this may lead to failure of the release. To resolve this, you may run gpg --sign xxx in any file. This will allow it to remember the password for long enough to prepare for the release.\nStage the release ./mvnw release:perform -Dmaven.test.skip -Pall  The release will be automatically inserted into a temporary staging repository.  Build and sign the source code package export RELEASE_VERSION=x.y.z (example: RELEASE_VERSION=5.0.0-alpha) cd tools/releasing bash create_source_release.sh This script takes care of the following things:\n Use v + RELEASE_VERSION as tag to clone the codes. Complete git submodule init/update. Exclude all unnecessary files in the target source tar, such as .git, .github, and .gitmodules. See the script for more details. Execute gpg and shasum 512.  apache-skywalking-apm-x.y.z-src.tgz and files ending with .asc and .sha512 may be found in the tools/releasing folder.\nLocate and download the distribution package in Apache Nexus Staging repositories  Use your Apache ID to log in to https://repository.apache.org/. Go to https://repository.apache.org/#stagingRepositories. Search skywalking and find your staging repository. Close the repository and wait for all checks to pass. In this step, your GPG KEYS will be checked. See the set PGP document, if you haven\u0026rsquo;t done it before. Go to {REPO_URL}/org/apache/skywalking/apache-skywalking-apm/x.y.z. Download .tar.gz and .zip and files ending with .asc and .sha1.  Upload to Apache svn  Use your Apache ID to log in to https://dist.apache.org/repos/dist/dev/skywalking/. Create a folder and name it by the release version and round, such as: x.y.z Upload the source code package to the folder with files ending with .asc and .sha512.  Package name: apache-skywalking-x.y.z-src.tar.gz See Section \u0026ldquo;Build and sign the source code package\u0026rdquo; for more details   Upload the distribution package to the folder with files ending with .asc and .sha512.  Package name: apache-skywalking-bin-x.y.z.tar.gz. See Section \u0026ldquo;Locate and download the distribution package in Apache Nexus Staging repositories\u0026rdquo; for more details. Create a .sha512 package: shasum -a 512 file \u0026gt; file.sha512    Make the internal announcements Send an announcement mail in dev mail list.\nMail title: [ANNOUNCE] SkyWalking x.y.z test build available Mail content: The test build of x.y.z is available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/xxxx * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-apm-x.x.x-src.tgz - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.tar.gz Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) vx.y.z Release CommitID : * https://github.com/apache/skywalking/tree/(Git Commit ID) * Git submodule * skywalking-ui: https://github.com/apache/skywalking-booster-ui/tree/(Git Commit ID) * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) * oap-server/server-query-plugin/query-graphql-plugin/src/main/resources/query-protocol https://github.com/apache/skywalking-query-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking/blob/vx.y.z/docs/en/guides/How-to-build.md A vote regarding the quality of this test build will be initiated within the next couple of days. Wait for at least 48 hours for test responses Any PMC member, committer or contributor can test the release features and provide feedback. Based on that, the PMC will decide whether to start the voting process.\nCall a vote in dev Call a vote in dev@skywalking.apache.org\nMail title: [VOTE] Release Apache SkyWalking version x.y.z Mail content: Hi All, This is a call for vote to release Apache SkyWalking version x.y.z. Release notes: * https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/xxxx * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-apm-x.x.x-src.tgz - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.tar.gz Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) vx.y.z Release CommitID : * https://github.com/apache/skywalking/tree/(Git Commit ID) * Git submodule * skywalking-ui: https://github.com/apache/skywalking-booster-ui/tree/(Git Commit ID) * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) * oap-server/server-query-plugin/query-graphql-plugin/src/main/resources/query-protocol https://github.com/apache/skywalking-query-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking/blob/vx.y.z/docs/en/guides/How-to-build.md Voting will start now (xxxx date) and will remain open for at least 72 hours, Request all PMC members to give their vote. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Vote Check All PMC members and committers should check these before casting +1 votes.\n Features test. All artifacts in staging repository are published with .asc, .md5, and *sha1 files. Source code and distribution package (apache-skywalking-x.y.z-src.tar.gz, apache-skywalking-bin-x.y.z.tar.gz, apache-skywalking-bin-x.y.z.zip) are found in https://dist.apache.org/repos/dist/dev/skywalking/x.y.z with .asc and .sha512. LICENSE and NOTICE are in the source code and distribution package. Check shasum -c apache-skywalking-apm-x.y.z-src.tgz.sha512. Check gpg --verify apache-skywalking-apm-x.y.z-src.tgz.asc apache-skywalking-apm-x.y.z-src.tgz Build a distribution package from the source code package (apache-skywalking-x.y.z-src.tar.gz) by following this doc. Check the Apache License Header. Run docker run --rm -v $(pwd):/github/workspace apache/skywalking-eyes header check. (No binaries in source codes)  The voting process is as follows:\n All PMC member votes are +1 binding, and all other votes are +1 but non-binding. If you obtain at least 3 (+1 binding) votes with more +1 than -1 votes within 72 hours, the release will be approved.  Publish the release  Move source codes tar and distribution packages to https://dist.apache.org/repos/dist/release/skywalking/.  \u0026gt; export SVN_EDITOR=vim \u0026gt; svn mv https://dist.apache.org/repos/dist/dev/skywalking/x.y.z https://dist.apache.org/repos/dist/release/skywalking .... enter your apache password .... Release in the nexus staging repo. Public download source and distribution tar/zip are located in http://www.apache.org/dyn/closer.cgi/skywalking/x.y.z/xxx. The Apache mirror path is the only release information that we publish. Public asc and sha512 are located in https://www.apache.org/dist/skywalking/x.y.z/xxx. Public KEYS point to https://www.apache.org/dist/skywalking/KEYS. Update the website download page. http://skywalking.apache.org/downloads/ . Add a new download source, distribution, sha512, asc, and document links. The links can be found following rules (3) to (6) above. Add a release event on the website homepage and event page. Announce the public release with changelog or key features. Send ANNOUNCE email to dev@skywalking.apache.org, announce@apache.org. The sender should use the Apache email account.  Mail title: [ANNOUNCE] Apache SkyWalking x.y.z released Mail content: Hi all, Apache SkyWalking Team is glad to announce the first release of Apache SkyWalking x.y.z. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. This release contains a number of new features, bug fixes and improvements compared to version a.b.c(last release). The notable changes since x.y.z include: (Highlight key changes) 1. ... 2. ... 3. ... Please refer to the change log for the complete list of changes: https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Apache SkyWalking website: http://skywalking.apache.org/ Downloads: http://skywalking.apache.org/downloads/ Twitter: https://twitter.com/ASFSkyWalking SkyWalking Resources: - GitHub: https://github.com/apache/skywalking - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Apache SkyWalking Team Publish the Docker images export SW_VERSION=x.y.z git clone --depth 1 --branch v$SW_VERSION https://github.com/apache/skywalking.git cd skywalking svn co https://dist.apache.org/repos/dist/release/skywalking/$SW_VERSION release # (1) export CONTEXT=release export HUB=apache export OAP_NAME=skywalking-oap-server export UI_NAME=skywalking-ui export TAG=$SW_VERSION export DIST=\u0026lt;the binary package name inside (1), e.g. apache-skywalking-apm-8.8.0.tar.gz\u0026gt; make docker.push Clean up the old releases Once the latest release has been published, you should clean up the old releases from the mirror system.\n Update the download links (source, dist, asc, and sha512) on the website to the archive repo (https://archive.apache.org/dist/skywalking). Remove previous releases from https://dist.apache.org/repos/dist/release/skywalking/.  ","title":"SkyWalking release guide","url":"/docs/main/v9.3.0/en/guides/how-to-release/"},{"content":"Apache SkyWalking release guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking in The Apache Way and start the voting process by reading this document.\nSet up your development environment Follow the steps in the Apache maven deployment environment document to set gpg tool and encrypt passwords.\nUse the following block as a template and place it in ~/.m2/settings.xml.\n\u0026lt;settings\u0026gt; ... \u0026lt;servers\u0026gt; \u0026lt;!-- To publish a snapshot of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.snapshots.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; \u0026lt;!-- To stage a release of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.releases.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; ... \u0026lt;/servers\u0026gt; \u0026lt;/settings\u0026gt; Add your GPG public key  Add your GPG public key into the SkyWalking GPG KEYS file. If you are a committer, use your Apache ID and password to log in this svn, and update the file. Don\u0026rsquo;t override the existing file. Upload your GPG public key to the public GPG site, such as MIT\u0026rsquo;s site. This site should be in the Apache maven staging repository checklist.  Test your settings This step is only for testing purpose. If your env is correctly set, you don\u0026rsquo;t need to check every time.\n./mvnw clean install -Pall (this will build artifacts, sources and sign) Prepare for the release ./mvnw release:clean ./mvnw release:prepare -DautoVersionSubmodules=true -Pall  Set version number as x.y.z, and tag as vx.y.z (The version tag must start with v. You will find out why this is necessary in the next step.)  You could do a GPG signature before preparing for the release. If you need to input the password to sign, and the maven doesn\u0026rsquo;t provide you with the opportunity to do so, this may lead to failure of the release. To resolve this, you may run gpg --sign xxx in any file. This will allow it to remember the password for long enough to prepare for the release.\nStage the release ./mvnw release:perform -Dmaven.test.skip -Pall  The release will be automatically inserted into a temporary staging repository.  Build and sign the source code package export RELEASE_VERSION=x.y.z (example: RELEASE_VERSION=5.0.0-alpha) cd tools/releasing bash create_source_release.sh This script takes care of the following things:\n Use v + RELEASE_VERSION as tag to clone the codes. Complete git submodule init/update. Exclude all unnecessary files in the target source tar, such as .git, .github, and .gitmodules. See the script for more details. Execute gpg and shasum 512.  apache-skywalking-apm-x.y.z-src.tgz and files ending with .asc and .sha512 may be found in the tools/releasing folder.\nLocate and download the distribution package in Apache Nexus Staging repositories  Use your Apache ID to log in to https://repository.apache.org/. Go to https://repository.apache.org/#stagingRepositories. Search skywalking and find your staging repository. Close the repository and wait for all checks to pass. In this step, your GPG KEYS will be checked. See the set PGP document, if you haven\u0026rsquo;t done it before. Go to {REPO_URL}/org/apache/skywalking/apache-skywalking-apm/x.y.z. Download .tar.gz and .zip and files ending with .asc and .sha1.  Upload to Apache svn  Use your Apache ID to log in to https://dist.apache.org/repos/dist/dev/skywalking/. Create a folder and name it by the release version and round, such as: x.y.z Upload the source code package to the folder with files ending with .asc and .sha512.  Package name: apache-skywalking-x.y.z-src.tar.gz See Section \u0026ldquo;Build and sign the source code package\u0026rdquo; for more details   Upload the distribution package to the folder with files ending with .asc and .sha512.  Package name: apache-skywalking-bin-x.y.z.tar.gz. See Section \u0026ldquo;Locate and download the distribution package in Apache Nexus Staging repositories\u0026rdquo; for more details. Create a .sha512 package: shasum -a 512 file \u0026gt; file.sha512    Make the internal announcements Send an announcement mail in dev mail list.\nMail title: [ANNOUNCE] SkyWalking x.y.z test build available Mail content: The test build of x.y.z is available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/xxxx * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-apm-x.x.x-src.tgz - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.tar.gz Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) vx.y.z Release CommitID : * https://github.com/apache/skywalking/tree/(Git Commit ID) * Git submodule * skywalking-ui: https://github.com/apache/skywalking-booster-ui/tree/(Git Commit ID) * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) * oap-server/server-query-plugin/query-graphql-plugin/src/main/resources/query-protocol https://github.com/apache/skywalking-query-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking/blob/vx.y.z/docs/en/guides/How-to-build.md A vote regarding the quality of this test build will be initiated within the next couple of days. Wait for at least 48 hours for test responses Any PMC member, committer or contributor can test the release features and provide feedback. Based on that, the PMC will decide whether to start the voting process.\nCall a vote in dev Call a vote in dev@skywalking.apache.org\nMail title: [VOTE] Release Apache SkyWalking version x.y.z Mail content: Hi All, This is a call for vote to release Apache SkyWalking version x.y.z. Release notes: * https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/xxxx * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-apm-x.x.x-src.tgz - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.tar.gz Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) vx.y.z Release CommitID : * https://github.com/apache/skywalking/tree/(Git Commit ID) * Git submodule * skywalking-ui: https://github.com/apache/skywalking-booster-ui/tree/(Git Commit ID) * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) * oap-server/server-query-plugin/query-graphql-plugin/src/main/resources/query-protocol https://github.com/apache/skywalking-query-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking/blob/vx.y.z/docs/en/guides/How-to-build.md Voting will start now (xxxx date) and will remain open for at least 72 hours, Request all PMC members to give their vote. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Vote Check All PMC members and committers should check these before casting +1 votes.\n Features test. All artifacts in staging repository are published with .asc, .md5, and *sha1 files. Source code and distribution package (apache-skywalking-x.y.z-src.tar.gz, apache-skywalking-bin-x.y.z.tar.gz, apache-skywalking-bin-x.y.z.zip) are found in https://dist.apache.org/repos/dist/dev/skywalking/x.y.z with .asc and .sha512. LICENSE and NOTICE are in the source code and distribution package. Check shasum -c apache-skywalking-apm-x.y.z-src.tgz.sha512. Check gpg --verify apache-skywalking-apm-x.y.z-src.tgz.asc apache-skywalking-apm-x.y.z-src.tgz Build a distribution package from the source code package (apache-skywalking-x.y.z-src.tar.gz) by following this doc. Check the Apache License Header. Run docker run --rm -v $(pwd):/github/workspace apache/skywalking-eyes header check. (No binaries in source codes)  The voting process is as follows:\n All PMC member votes are +1 binding, and all other votes are +1 but non-binding. If you obtain at least 3 (+1 binding) votes with more +1 than -1 votes within 72 hours, the release will be approved.  Publish the release  Move source codes tar and distribution packages to https://dist.apache.org/repos/dist/release/skywalking/.  \u0026gt; export SVN_EDITOR=vim \u0026gt; svn mv https://dist.apache.org/repos/dist/dev/skywalking/x.y.z https://dist.apache.org/repos/dist/release/skywalking .... enter your apache password .... Release in the nexus staging repo. Public download source and distribution tar/zip are located in http://www.apache.org/dyn/closer.cgi/skywalking/x.y.z/xxx. The Apache mirror path is the only release information that we publish. Public asc and sha512 are located in https://www.apache.org/dist/skywalking/x.y.z/xxx. Public KEYS point to https://www.apache.org/dist/skywalking/KEYS. Update the website download page. http://skywalking.apache.org/downloads/ . Add a new download source, distribution, sha512, asc, and document links. The links can be found following rules (3) to (6) above. Add a release event on the website homepage and event page. Announce the public release with changelog or key features. Send ANNOUNCE email to dev@skywalking.apache.org, announce@apache.org. The sender should use the Apache email account.  Mail title: [ANNOUNCE] Apache SkyWalking x.y.z released Mail content: Hi all, Apache SkyWalking Team is glad to announce the first release of Apache SkyWalking x.y.z. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. This release contains a number of new features, bug fixes and improvements compared to version a.b.c(last release). The notable changes since x.y.z include: (Highlight key changes) 1. ... 2. ... 3. ... Please refer to the change log for the complete list of changes: https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Apache SkyWalking website: http://skywalking.apache.org/ Downloads: http://skywalking.apache.org/downloads/ Twitter: https://twitter.com/ASFSkyWalking SkyWalking Resources: - GitHub: https://github.com/apache/skywalking - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Apache SkyWalking Team Publish the Docker images We have a GitHub workflow to automatically publish the Docker images to Docker Hub after you set the version from pre-release to release, all you need to do is to watch that workflow and see whether it succeeds, if it fails, you can use the following steps to publish the Docker images in your local machine.\nexport SW_VERSION=x.y.z git clone --depth 1 --branch v$SW_VERSION https://github.com/apache/skywalking.git cd skywalking svn co https://dist.apache.org/repos/dist/release/skywalking/$SW_VERSION release # (1) export CONTEXT=release export HUB=apache export OAP_NAME=skywalking-oap-server export UI_NAME=skywalking-ui export TAG=$SW_VERSION export DIST=\u0026lt;the binary package name inside (1), e.g. apache-skywalking-apm-8.8.0.tar.gz\u0026gt; make docker.push Clean up the old releases Once the latest release has been published, you should clean up the old releases from the mirror system.\n Update the download links (source, dist, asc, and sha512) on the website to the archive repo (https://archive.apache.org/dist/skywalking). Remove previous releases from https://dist.apache.org/repos/dist/release/skywalking/.  ","title":"SkyWalking release guide","url":"/docs/main/v9.4.0/en/guides/how-to-release/"},{"content":"Apache SkyWalking release guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking in The Apache Way and start the voting process by reading this document.\nSet up your development environment Follow the steps in the Apache maven deployment environment document to set gpg tool and encrypt passwords.\nUse the following block as a template and place it in ~/.m2/settings.xml.\n\u0026lt;settings\u0026gt; ... \u0026lt;servers\u0026gt; \u0026lt;!-- To publish a snapshot of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.snapshots.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; \u0026lt;!-- To stage a release of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.releases.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; ... \u0026lt;/servers\u0026gt; \u0026lt;/settings\u0026gt; Add your GPG public key  Add your GPG public key into the SkyWalking GPG KEYS file. If you are a committer, use your Apache ID and password to log in this svn, and update the file. Don\u0026rsquo;t override the existing file. Upload your GPG public key to the public GPG site, such as MIT\u0026rsquo;s site. This site should be in the Apache maven staging repository checklist.  Test your settings This step is only for testing purpose. If your env is correctly set, you don\u0026rsquo;t need to check every time.\n./mvnw clean install -Pall (this will build artifacts, sources and sign) Prepare for the release ./mvnw release:clean ./mvnw release:prepare -DautoVersionSubmodules=true -Darguments='-Dmaven.test.skip' -Pall  Set version number as x.y.z, and tag as vx.y.z (The version tag must start with v. You will find out why this is necessary in the next step.)  You could do a GPG signature before preparing for the release. If you need to input the password to sign, and the maven doesn\u0026rsquo;t provide you with the opportunity to do so, this may lead to failure of the release. To resolve this, you may run gpg --sign xxx in any file. This will allow it to remember the password for long enough to prepare for the release.\nStage the release ./mvnw release:perform -Darguments='-Dmaven.test.skip' -Pall  The release will be automatically inserted into a temporary staging repository.  Build and sign the source code package export RELEASE_VERSION=x.y.z (example: RELEASE_VERSION=5.0.0-alpha) cd tools/releasing bash create_source_release.sh This script takes care of the following things:\n Use v + RELEASE_VERSION as tag to clone the codes. Complete git submodule init/update. Exclude all unnecessary files in the target source tar, such as .git, .github, and .gitmodules. See the script for more details. Execute gpg and shasum 512.  apache-skywalking-apm-x.y.z-src.tgz and files ending with .asc and .sha512 may be found in the tools/releasing folder.\nLocate and download the distribution package in Apache Nexus Staging repositories  Use your Apache ID to log in to https://repository.apache.org/. Go to https://repository.apache.org/#stagingRepositories. Search skywalking and find your staging repository. Close the repository and wait for all checks to pass. In this step, your GPG KEYS will be checked. See the set PGP document, if you haven\u0026rsquo;t done it before. Go to {REPO_URL}/org/apache/skywalking/apache-skywalking-apm/x.y.z. Download .tar.gz and .zip and files ending with .asc and .sha1.  Upload to Apache svn  Use your Apache ID to log in to https://dist.apache.org/repos/dist/dev/skywalking/. Create a folder and name it by the release version and round, such as: x.y.z Upload the source code package to the folder with files ending with .asc and .sha512.  Package name: apache-skywalking-x.y.z-src.tar.gz See Section \u0026ldquo;Build and sign the source code package\u0026rdquo; for more details   Upload the distribution package to the folder with files ending with .asc and .sha512.  Package name: apache-skywalking-bin-x.y.z.tar.gz. See Section \u0026ldquo;Locate and download the distribution package in Apache Nexus Staging repositories\u0026rdquo; for more details. Create a .sha512 package: shasum -a 512 file \u0026gt; file.sha512    Make the internal announcements Send an announcement mail in dev mail list.\nMail title: [ANNOUNCE] SkyWalking x.y.z test build available Mail content: The test build of x.y.z is available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/xxxx * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-apm-x.x.x-src.tgz - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.tar.gz Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) vx.y.z Release CommitID : * https://github.com/apache/skywalking/tree/(Git Commit ID) * Git submodule * skywalking-ui: https://github.com/apache/skywalking-booster-ui/tree/(Git Commit ID) * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) * oap-server/server-query-plugin/query-graphql-plugin/src/main/resources/query-protocol https://github.com/apache/skywalking-query-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking/blob/vx.y.z/docs/en/guides/How-to-build.md A vote regarding the quality of this test build will be initiated within the next couple of days. Wait for at least 48 hours for test responses Any PMC member, committer or contributor can test the release features and provide feedback. Based on that, the PMC will decide whether to start the voting process.\nCall a vote in dev Call a vote in dev@skywalking.apache.org\nMail title: [VOTE] Release Apache SkyWalking version x.y.z Mail content: Hi All, This is a call for vote to release Apache SkyWalking version x.y.z. Release notes: * https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/xxxx * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-apm-x.x.x-src.tgz - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.tar.gz Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) vx.y.z Release CommitID : * https://github.com/apache/skywalking/tree/(Git Commit ID) * Git submodule * skywalking-ui: https://github.com/apache/skywalking-booster-ui/tree/(Git Commit ID) * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) * oap-server/server-query-plugin/query-graphql-plugin/src/main/resources/query-protocol https://github.com/apache/skywalking-query-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking/blob/vx.y.z/docs/en/guides/How-to-build.md Voting will start now (xxxx date) and will remain open for at least 72 hours, Request all PMC members to give their vote. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Vote Check All PMC members and committers should check these before casting +1 votes.\n Features test. All artifacts in staging repository are published with .asc, .md5, and *sha1 files. Source code and distribution package (apache-skywalking-x.y.z-src.tar.gz, apache-skywalking-bin-x.y.z.tar.gz, apache-skywalking-bin-x.y.z.zip) are found in https://dist.apache.org/repos/dist/dev/skywalking/x.y.z with .asc and .sha512. LICENSE and NOTICE are in the source code and distribution package. Check shasum -c apache-skywalking-apm-x.y.z-src.tgz.sha512. Check gpg --verify apache-skywalking-apm-x.y.z-src.tgz.asc apache-skywalking-apm-x.y.z-src.tgz Build a distribution package from the source code package (apache-skywalking-x.y.z-src.tar.gz) by following this doc. Check the Apache License Header. Run docker run --rm -v $(pwd):/github/workspace apache/skywalking-eyes header check. (No binaries in source codes)  The voting process is as follows:\n All PMC member votes are +1 binding, and all other votes are +1 but non-binding. If you obtain at least 3 (+1 binding) votes with more +1 than -1 votes within 72 hours, the release will be approved.  Publish the release  Move source codes tar and distribution packages to https://dist.apache.org/repos/dist/release/skywalking/.  \u0026gt; export SVN_EDITOR=vim \u0026gt; svn mv https://dist.apache.org/repos/dist/dev/skywalking/x.y.z https://dist.apache.org/repos/dist/release/skywalking .... enter your apache password .... Release in the nexus staging repo. Public download source and distribution tar/zip are located in http://www.apache.org/dyn/closer.cgi/skywalking/x.y.z/xxx. The Apache mirror path is the only release information that we publish. Public asc and sha512 are located in https://www.apache.org/dist/skywalking/x.y.z/xxx. Public KEYS point to https://www.apache.org/dist/skywalking/KEYS. Update the website download page. http://skywalking.apache.org/downloads/ . Add a new download source, distribution, sha512, asc, and document links. The links can be found following rules (3) to (6) above. Add a release event on the website homepage and event page. Announce the public release with changelog or key features. Send ANNOUNCE email to dev@skywalking.apache.org, announce@apache.org. The sender should use the Apache email account.  Mail title: [ANNOUNCE] Apache SkyWalking x.y.z released Mail content: Hi all, Apache SkyWalking Team is glad to announce the first release of Apache SkyWalking x.y.z. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. This release contains a number of new features, bug fixes and improvements compared to version a.b.c(last release). The notable changes since x.y.z include: (Highlight key changes) 1. ... 2. ... 3. ... Please refer to the change log for the complete list of changes: https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Apache SkyWalking website: http://skywalking.apache.org/ Downloads: http://skywalking.apache.org/downloads/ Twitter: https://twitter.com/ASFSkyWalking SkyWalking Resources: - GitHub: https://github.com/apache/skywalking - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Apache SkyWalking Team Publish the Docker images We have a GitHub workflow to automatically publish the Docker images to Docker Hub after you set the version from pre-release to release, all you need to do is to watch that workflow and see whether it succeeds, if it fails, you can use the following steps to publish the Docker images in your local machine.\nexport SW_VERSION=x.y.z git clone --depth 1 --branch v$SW_VERSION https://github.com/apache/skywalking.git cd skywalking svn co https://dist.apache.org/repos/dist/release/skywalking/$SW_VERSION release # (1) export CONTEXT=release export HUB=apache export OAP_NAME=skywalking-oap-server export UI_NAME=skywalking-ui export TAG=$SW_VERSION export DIST=\u0026lt;the binary package name inside (1), e.g. apache-skywalking-apm-8.8.0.tar.gz\u0026gt; make docker.push Clean up the old releases Once the latest release has been published, you should clean up the old releases from the mirror system.\n Update the download links (source, dist, asc, and sha512) on the website to the archive repo (https://archive.apache.org/dist/skywalking). Remove previous releases from https://dist.apache.org/repos/dist/release/skywalking/.  ","title":"SkyWalking release guide","url":"/docs/main/v9.5.0/en/guides/how-to-release/"},{"content":"Apache SkyWalking release guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking in The Apache Way and start the voting process by reading this document.\nSet up your development environment Follow the steps in the Apache maven deployment environment document to set gpg tool and encrypt passwords.\nUse the following block as a template and place it in ~/.m2/settings.xml.\n\u0026lt;settings\u0026gt; ... \u0026lt;servers\u0026gt; \u0026lt;!-- To publish a snapshot of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.snapshots.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; \u0026lt;!-- To stage a release of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.releases.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; ... \u0026lt;/servers\u0026gt; \u0026lt;/settings\u0026gt; Add your GPG public key  Add your GPG public key into the SkyWalking GPG KEYS file. If you are a committer, use your Apache ID and password to log in this svn, and update the file. Don\u0026rsquo;t override the existing file. Upload your GPG public key to the public GPG site, such as MIT\u0026rsquo;s site. This site should be in the Apache maven staging repository checklist.  Test your settings This step is only for testing purpose. If your env is correctly set, you don\u0026rsquo;t need to check every time.\n./mvnw clean install -Pall (this will build artifacts, sources and sign) Prepare for the release ./mvnw release:clean ./mvnw release:prepare -DautoVersionSubmodules=true -Darguments='-Dmaven.test.skip' -Pall  Set version number as x.y.z, and tag as vx.y.z (The version tag must start with v. You will find out why this is necessary in the next step.)  You could do a GPG signature before preparing for the release. If you need to input the password to sign, and the maven doesn\u0026rsquo;t provide you with the opportunity to do so, this may lead to failure of the release. To resolve this, you may run gpg --sign xxx in any file. This will allow it to remember the password for long enough to prepare for the release.\nStage the release ./mvnw release:perform -Darguments='-Dmaven.test.skip' -Pall  The release will be automatically inserted into a temporary staging repository.  Build and sign the source code package export RELEASE_VERSION=x.y.z (example: RELEASE_VERSION=5.0.0-alpha) cd tools/releasing bash create_source_release.sh This script takes care of the following things:\n Use v + RELEASE_VERSION as tag to clone the codes. Complete git submodule init/update. Exclude all unnecessary files in the target source tar, such as .git, .github, and .gitmodules. See the script for more details. Execute gpg and shasum 512.  apache-skywalking-apm-x.y.z-src.tgz and files ending with .asc and .sha512 may be found in the tools/releasing folder.\nLocate and download the distribution package in Apache Nexus Staging repositories  Use your Apache ID to log in to https://repository.apache.org/. Go to https://repository.apache.org/#stagingRepositories. Search skywalking and find your staging repository. Close the repository and wait for all checks to pass. In this step, your GPG KEYS will be checked. See the set PGP document, if you haven\u0026rsquo;t done it before. Go to {REPO_URL}/org/apache/skywalking/apache-skywalking-apm/x.y.z. Download .tar.gz and .zip and files ending with .asc and .sha1.  Upload to Apache svn  Use your Apache ID to log in to https://dist.apache.org/repos/dist/dev/skywalking/. Create a folder and name it by the release version and round, such as: x.y.z Upload the source code package to the folder with files ending with .asc and .sha512.  Package name: apache-skywalking-x.y.z-src.tar.gz See Section \u0026ldquo;Build and sign the source code package\u0026rdquo; for more details   Upload the distribution package to the folder with files ending with .asc and .sha512.  Package name: apache-skywalking-bin-x.y.z.tar.gz. See Section \u0026ldquo;Locate and download the distribution package in Apache Nexus Staging repositories\u0026rdquo; for more details. Create a .sha512 package: shasum -a 512 file \u0026gt; file.sha512    Make the internal announcements Send an announcement mail in dev mail list.\nMail title: [ANNOUNCE] SkyWalking x.y.z test build available Mail content: The test build of x.y.z is available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/xxxx * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-apm-x.x.x-src.tgz - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.tar.gz Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) vx.y.z Release CommitID : * https://github.com/apache/skywalking/tree/(Git Commit ID) * Git submodule * skywalking-ui: https://github.com/apache/skywalking-booster-ui/tree/(Git Commit ID) * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) * oap-server/server-query-plugin/query-graphql-plugin/src/main/resources/query-protocol https://github.com/apache/skywalking-query-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking/blob/vx.y.z/docs/en/guides/How-to-build.md A vote regarding the quality of this test build will be initiated within the next couple of days. Wait for at least 48 hours for test responses Any PMC member, committer or contributor can test the release features and provide feedback. Based on that, the PMC will decide whether to start the voting process.\nCall a vote in dev Call a vote in dev@skywalking.apache.org\nMail title: [VOTE] Release Apache SkyWalking version x.y.z Mail content: Hi All, This is a call for vote to release Apache SkyWalking version x.y.z. Release notes: * https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/xxxx * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-apm-x.x.x-src.tgz - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.tar.gz Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) vx.y.z Release CommitID : * https://github.com/apache/skywalking/tree/(Git Commit ID) * Git submodule * skywalking-ui: https://github.com/apache/skywalking-booster-ui/tree/(Git Commit ID) * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) * oap-server/server-query-plugin/query-graphql-plugin/src/main/resources/query-protocol https://github.com/apache/skywalking-query-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking/blob/vx.y.z/docs/en/guides/How-to-build.md Voting will start now (xxxx date) and will remain open for at least 72 hours, Request all PMC members to give their vote. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Vote Check All PMC members and committers should check these before casting +1 votes.\n Features test. All artifacts in staging repository are published with .asc, .md5, and *sha1 files. Source code and distribution package (apache-skywalking-x.y.z-src.tar.gz, apache-skywalking-bin-x.y.z.tar.gz, apache-skywalking-bin-x.y.z.zip) are found in https://dist.apache.org/repos/dist/dev/skywalking/x.y.z with .asc and .sha512. LICENSE and NOTICE are in the source code and distribution package. Check shasum -c apache-skywalking-apm-x.y.z-src.tgz.sha512. Check gpg --verify apache-skywalking-apm-x.y.z-src.tgz.asc apache-skywalking-apm-x.y.z-src.tgz Build a distribution package from the source code package (apache-skywalking-x.y.z-src.tar.gz) by following this doc. Check the Apache License Header. Run docker run --rm -v $(pwd):/github/workspace apache/skywalking-eyes header check. (No binaries in source codes)  The voting process is as follows:\n All PMC member votes are +1 binding, and all other votes are +1 but non-binding. If you obtain at least 3 (+1 binding) votes with more +1 than -1 votes within 72 hours, the release will be approved.  Publish the release  Move source codes tar and distribution packages to https://dist.apache.org/repos/dist/release/skywalking/.  \u0026gt; export SVN_EDITOR=vim \u0026gt; svn mv https://dist.apache.org/repos/dist/dev/skywalking/x.y.z https://dist.apache.org/repos/dist/release/skywalking .... enter your apache password .... Release in the nexus staging repo. Public download source and distribution tar/zip are located in http://www.apache.org/dyn/closer.cgi/skywalking/x.y.z/xxx. The Apache mirror path is the only release information that we publish. Public asc and sha512 are located in https://www.apache.org/dist/skywalking/x.y.z/xxx. Public KEYS point to https://www.apache.org/dist/skywalking/KEYS. Update the website download page. http://skywalking.apache.org/downloads/ . Add a new download source, distribution, sha512, asc, and document links. The links can be found following rules (3) to (6) above. Add a release event on the website homepage and event page. Announce the public release with changelog or key features. Send ANNOUNCE email to dev@skywalking.apache.org, announce@apache.org. The sender should use the Apache email account.  Mail title: [ANNOUNCE] Apache SkyWalking x.y.z released Mail content: Hi all, Apache SkyWalking Team is glad to announce the first release of Apache SkyWalking x.y.z. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. This release contains a number of new features, bug fixes and improvements compared to version a.b.c(last release). The notable changes since x.y.z include: (Highlight key changes) 1. ... 2. ... 3. ... Please refer to the change log for the complete list of changes: https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Apache SkyWalking website: http://skywalking.apache.org/ Downloads: http://skywalking.apache.org/downloads/ Twitter: https://twitter.com/ASFSkyWalking SkyWalking Resources: - GitHub: https://github.com/apache/skywalking - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Apache SkyWalking Team Publish the Docker images We have a GitHub workflow to automatically publish the Docker images to Docker Hub after you set the version from pre-release to release, all you need to do is to watch that workflow and see whether it succeeds, if it fails, you can use the following steps to publish the Docker images in your local machine.\nexport SW_VERSION=x.y.z git clone --depth 1 --branch v$SW_VERSION https://github.com/apache/skywalking.git cd skywalking svn co https://dist.apache.org/repos/dist/release/skywalking/$SW_VERSION release # (1) export CONTEXT=release export HUB=apache export OAP_NAME=skywalking-oap-server export UI_NAME=skywalking-ui export TAG=$SW_VERSION export DIST=\u0026lt;the binary package name inside (1), e.g. apache-skywalking-apm-8.8.0.tar.gz\u0026gt; make docker.push Clean up the old releases Once the latest release has been published, you should clean up the old releases from the mirror system.\n Update the download links (source, dist, asc, and sha512) on the website to the archive repo (https://archive.apache.org/dist/skywalking). Remove previous releases from https://dist.apache.org/repos/dist/release/skywalking/.  ","title":"SkyWalking release guide","url":"/docs/main/v9.6.0/en/guides/how-to-release/"},{"content":"Apache SkyWalking release guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking in The Apache Way and start the voting process by reading this document.\nSet up your development environment Follow the steps in the Apache maven deployment environment document to set gpg tool and encrypt passwords.\nUse the following block as a template and place it in ~/.m2/settings.xml.\n\u0026lt;settings\u0026gt; ... \u0026lt;servers\u0026gt; \u0026lt;!-- To publish a snapshot of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.snapshots.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; \u0026lt;!-- To stage a release of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.releases.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; ... \u0026lt;/servers\u0026gt; \u0026lt;/settings\u0026gt; Add your GPG public key  Add your GPG public key into the SkyWalking GPG KEYS file. If you are a committer, use your Apache ID and password to log in this svn, and update the file. Don\u0026rsquo;t override the existing file. Upload your GPG public key to the public GPG site, such as MIT\u0026rsquo;s site. This site should be in the Apache maven staging repository checklist.  Test your settings This step is only for testing purpose. If your env is correctly set, you don\u0026rsquo;t need to check every time.\n./mvnw clean install -Pall (this will build artifacts, sources and sign) Prepare for the release ./mvnw release:clean ./mvnw release:prepare -DautoVersionSubmodules=true -Darguments='-Dmaven.test.skip' -Pall  Set version number as x.y.z, and tag as vx.y.z (The version tag must start with v. You will find out why this is necessary in the next step.)  You could do a GPG signature before preparing for the release. If you need to input the password to sign, and the maven doesn\u0026rsquo;t provide you with the opportunity to do so, this may lead to failure of the release. To resolve this, you may run gpg --sign xxx in any file. This will allow it to remember the password for long enough to prepare for the release.\nStage the release ./mvnw release:perform -Darguments='-Dmaven.test.skip' -Pall  The release will be automatically inserted into a temporary staging repository.  Build and sign the source code package export RELEASE_VERSION=x.y.z (example: RELEASE_VERSION=5.0.0-alpha) cd tools/releasing bash create_source_release.sh This script takes care of the following things:\n Use v + RELEASE_VERSION as tag to clone the codes. Complete git submodule init/update. Exclude all unnecessary files in the target source tar, such as .git, .github, and .gitmodules. See the script for more details. Execute gpg and shasum 512.  apache-skywalking-apm-x.y.z-src.tgz and files ending with .asc and .sha512 may be found in the tools/releasing folder.\nLocate and download the distribution package in Apache Nexus Staging repositories  Use your Apache ID to log in to https://repository.apache.org/. Go to https://repository.apache.org/#stagingRepositories. Search skywalking and find your staging repository. Close the repository and wait for all checks to pass. In this step, your GPG KEYS will be checked. See the set PGP document, if you haven\u0026rsquo;t done it before. Go to {REPO_URL}/org/apache/skywalking/apache-skywalking-apm/x.y.z. Download .tar.gz and .zip and files ending with .asc and .sha1.  Upload to Apache svn  Use your Apache ID to log in to https://dist.apache.org/repos/dist/dev/skywalking/. Create a folder and name it by the release version and round, such as: x.y.z Upload the source code package to the folder with files ending with .asc and .sha512.  Package name: apache-skywalking-x.y.z-src.tar.gz See Section \u0026ldquo;Build and sign the source code package\u0026rdquo; for more details   Upload the distribution package to the folder with files ending with .asc and .sha512.  Package name: apache-skywalking-bin-x.y.z.tar.gz. See Section \u0026ldquo;Locate and download the distribution package in Apache Nexus Staging repositories\u0026rdquo; for more details. Create a .sha512 package: shasum -a 512 file \u0026gt; file.sha512    Make the internal announcements Send an announcement mail in dev mail list.\nMail title: [ANNOUNCE] SkyWalking x.y.z test build available Mail content: The test build of x.y.z is available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/xxxx * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-apm-x.x.x-src.tgz - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.tar.gz Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) vx.y.z Release CommitID : * https://github.com/apache/skywalking/tree/(Git Commit ID) * Git submodule * skywalking-ui: https://github.com/apache/skywalking-booster-ui/tree/(Git Commit ID) * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) * oap-server/server-query-plugin/query-graphql-plugin/src/main/resources/query-protocol https://github.com/apache/skywalking-query-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking/blob/vx.y.z/docs/en/guides/How-to-build.md A vote regarding the quality of this test build will be initiated within the next couple of days. Wait for at least 48 hours for test responses Any PMC member, committer or contributor can test the release features and provide feedback. Based on that, the PMC will decide whether to start the voting process.\nCall a vote in dev Call a vote in dev@skywalking.apache.org\nMail title: [VOTE] Release Apache SkyWalking version x.y.z Mail content: Hi All, This is a call for vote to release Apache SkyWalking version x.y.z. Release notes: * https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/xxxx * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-apm-x.x.x-src.tgz - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.tar.gz Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) vx.y.z Release CommitID : * https://github.com/apache/skywalking/tree/(Git Commit ID) * Git submodule * skywalking-ui: https://github.com/apache/skywalking-booster-ui/tree/(Git Commit ID) * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) * oap-server/server-query-plugin/query-graphql-plugin/src/main/resources/query-protocol https://github.com/apache/skywalking-query-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking/blob/vx.y.z/docs/en/guides/How-to-build.md Voting will start now (xxxx date) and will remain open for at least 72 hours, Request all PMC members to give their vote. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Vote Check All PMC members and committers should check these before casting +1 votes.\n Features test. All artifacts in staging repository are published with .asc, .md5, and *sha1 files. Source code and distribution package (apache-skywalking-x.y.z-src.tar.gz, apache-skywalking-bin-x.y.z.tar.gz, apache-skywalking-bin-x.y.z.zip) are found in https://dist.apache.org/repos/dist/dev/skywalking/x.y.z with .asc and .sha512. LICENSE and NOTICE are in the source code and distribution package. Check shasum -c apache-skywalking-apm-x.y.z-src.tgz.sha512. Check gpg --verify apache-skywalking-apm-x.y.z-src.tgz.asc apache-skywalking-apm-x.y.z-src.tgz Build a distribution package from the source code package (apache-skywalking-x.y.z-src.tar.gz) by following this doc. Check the Apache License Header. Run docker run --rm -v $(pwd):/github/workspace apache/skywalking-eyes header check. (No binaries in source codes)  The voting process is as follows:\n All PMC member votes are +1 binding, and all other votes are +1 but non-binding. If you obtain at least 3 (+1 binding) votes with more +1 than -1 votes within 72 hours, the release will be approved.  Publish the release  Move source codes tar and distribution packages to https://dist.apache.org/repos/dist/release/skywalking/.  \u0026gt; export SVN_EDITOR=vim \u0026gt; svn mv https://dist.apache.org/repos/dist/dev/skywalking/x.y.z https://dist.apache.org/repos/dist/release/skywalking .... enter your apache password .... Release in the nexus staging repo. Public download source and distribution tar/zip are located in http://www.apache.org/dyn/closer.cgi/skywalking/x.y.z/xxx. The Apache mirror path is the only release information that we publish. Public asc and sha512 are located in https://www.apache.org/dist/skywalking/x.y.z/xxx. Public KEYS point to https://www.apache.org/dist/skywalking/KEYS. Update the website download page. http://skywalking.apache.org/downloads/ . Add a new download source, distribution, sha512, asc, and document links. The links can be found following rules (3) to (6) above. Add a release event on the website homepage and event page. Announce the public release with changelog or key features. Send ANNOUNCE email to dev@skywalking.apache.org, announce@apache.org. The sender should use the Apache email account.  Mail title: [ANNOUNCE] Apache SkyWalking x.y.z released Mail content: Hi all, Apache SkyWalking Team is glad to announce the first release of Apache SkyWalking x.y.z. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. This release contains a number of new features, bug fixes and improvements compared to version a.b.c(last release). The notable changes since x.y.z include: (Highlight key changes) 1. ... 2. ... 3. ... Please refer to the change log for the complete list of changes: https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Apache SkyWalking website: http://skywalking.apache.org/ Downloads: http://skywalking.apache.org/downloads/ Twitter: https://twitter.com/ASFSkyWalking SkyWalking Resources: - GitHub: https://github.com/apache/skywalking - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Apache SkyWalking Team Publish the Docker images We have a GitHub workflow to automatically publish the Docker images to Docker Hub after you set the version from pre-release to release, all you need to do is to watch that workflow and see whether it succeeds, if it fails, you can use the following steps to publish the Docker images in your local machine.\nexport SW_VERSION=x.y.z git clone --depth 1 --branch v$SW_VERSION https://github.com/apache/skywalking.git cd skywalking svn co https://dist.apache.org/repos/dist/release/skywalking/$SW_VERSION release # (1) export CONTEXT=release export HUB=apache export OAP_NAME=skywalking-oap-server export UI_NAME=skywalking-ui export TAG=$SW_VERSION export DIST=\u0026lt;the binary package name inside (1), e.g. apache-skywalking-apm-8.8.0.tar.gz\u0026gt; make docker.push Clean up the old releases Once the latest release has been published, you should clean up the old releases from the mirror system.\n Update the download links (source, dist, asc, and sha512) on the website to the archive repo (https://archive.apache.org/dist/skywalking). Remove previous releases from https://dist.apache.org/repos/dist/release/skywalking/.  ","title":"SkyWalking release guide","url":"/docs/main/v9.7.0/en/guides/how-to-release/"},{"content":"Skywalking with Kotlin coroutine This Plugin provides an auto instrument support plugin for Kotlin coroutine based on context snapshot.\nDescription SkyWalking provide tracing context propagation inside thread. In order to support Kotlin Coroutine, we provide this additional plugin.\nImplementation principle As we know, Kotlin coroutine switches the execution thread by CoroutineDispatcher.\n Create a snapshot of the current context before dispatch the continuation. Then create a coroutine span after thread switched, mark the span continued with the snapshot. Every new span which created in the new thread will be a child of this coroutine span. So we can link those span together in a tracing. After the original runnable executed, we need to stop the coroutine span for cleaning thread state.  Some screenshots Run without the plugin We run a Kotlin coroutine based gRPC server without this coroutine plugin.\nYou can find, the one call (client -\u0026gt; server1 -\u0026gt; server2) has been split two tracing paths.\n Server1 without exit span and server2 tracing path.  Server2 tracing path.   Run with the plugin Without changing codes manually, just install the plugin. We can find the spans be connected together. We can get all info of one client call.\n","title":"Skywalking with Kotlin coroutine","url":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/agent-optional-plugins/kotlin-coroutine-plugin/"},{"content":"Skywalking with Kotlin coroutine This Plugin provides an auto instrument support plugin for Kotlin coroutine based on context snapshot.\nDescription SkyWalking provide tracing context propagation inside thread. In order to support Kotlin Coroutine, we provide this additional plugin.\nImplementation principle As we know, Kotlin coroutine switches the execution thread by CoroutineDispatcher.\n Create a snapshot of the current context before dispatch the continuation. Then create a coroutine span after thread switched, mark the span continued with the snapshot. Every new span which created in the new thread will be a child of this coroutine span. So we can link those span together in a tracing. After the original runnable executed, we need to stop the coroutine span for cleaning thread state.  Some screenshots Run without the plugin We run a Kotlin coroutine based gRPC server without this coroutine plugin.\nYou can find, the one call (client -\u0026gt; server1 -\u0026gt; server2) has been split two tracing paths.\n Server1 without exit span and server2 tracing path.  Server2 tracing path.   Run with the plugin Without changing codes manually, just install the plugin. We can find the spans be connected together. We can get all info of one client call.\n","title":"Skywalking with Kotlin coroutine","url":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/agent-optional-plugins/kotlin-coroutine-plugin/"},{"content":"Skywalking with Kotlin coroutine This Plugin provides an auto instrument support plugin for Kotlin coroutine based on context snapshot.\nDescription SkyWalking provide tracing context propagation inside thread. In order to support Kotlin Coroutine, we provide this additional plugin.\nImplementation principle As we know, Kotlin coroutine switches the execution thread by CoroutineDispatcher.\n Create a snapshot of the current context before dispatch the continuation. Then create a coroutine span after thread switched, mark the span continued with the snapshot. Every new span which created in the new thread will be a child of this coroutine span. So we can link those span together in a tracing. After the original runnable executed, we need to stop the coroutine span for cleaning thread state.  Some screenshots Run without the plugin We run a Kotlin coroutine based gRPC server without this coroutine plugin.\nYou can find, the one call (client -\u0026gt; server1 -\u0026gt; server2) has been split two tracing paths.\n Server1 without exit span and server2 tracing path.  Server2 tracing path.   Run with the plugin Without changing codes manually, just install the plugin. We can find the spans be connected together. We can get all info of one client call.\n","title":"Skywalking with Kotlin coroutine","url":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/agent-optional-plugins/kotlin-coroutine-plugin/"},{"content":"Skywalking with Kotlin coroutine This Plugin provides an auto instrument support plugin for Kotlin coroutine based on context snapshot.\nDescription SkyWalking provide tracing context propagation inside thread. In order to support Kotlin Coroutine, we provide this additional plugin.\nImplementation principle As we know, Kotlin coroutine switches the execution thread by CoroutineDispatcher.\n Create a snapshot of the current context before dispatch the continuation. Then create a coroutine span after thread switched, mark the span continued with the snapshot. Every new span which created in the new thread will be a child of this coroutine span. So we can link those span together in a tracing. After the original runnable executed, we need to stop the coroutine span for cleaning thread state.  Some screenshots Run without the plugin We run a Kotlin coroutine based gRPC server without this coroutine plugin.\nYou can find, the one call (client -\u0026gt; server1 -\u0026gt; server2) has been split two tracing paths.\n Server1 without exit span and server2 tracing path.  Server2 tracing path.   Run with the plugin Without changing codes manually, just install the plugin. We can find the spans be connected together. We can get all info of one client call.\n","title":"Skywalking with Kotlin coroutine","url":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/agent-optional-plugins/kotlin-coroutine-plugin/"},{"content":"Skywalking with Kotlin coroutine This Plugin provides an auto instrument support plugin for Kotlin coroutine based on context snapshot.\nDescription SkyWalking provide tracing context propagation inside thread. In order to support Kotlin Coroutine, we provide this additional plugin.\nImplementation principle As we know, Kotlin coroutine switches the execution thread by CoroutineDispatcher.\n Create a snapshot of the current context before dispatch the continuation. Then create a coroutine span after thread switched, mark the span continued with the snapshot. Every new span which created in the new thread will be a child of this coroutine span. So we can link those span together in a tracing. After the original runnable executed, we need to stop the coroutine span for cleaning thread state.  Some screenshots Run without the plugin We run a Kotlin coroutine based gRPC server without this coroutine plugin.\nYou can find, the one call (client -\u0026gt; server1 -\u0026gt; server2) has been split two tracing paths.\n Server1 without exit span and server2 tracing path.  Server2 tracing path.   Run with the plugin Without changing codes manually, just install the plugin. We can find the spans be connected together. We can get all info of one client call.\n","title":"Skywalking with Kotlin coroutine","url":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/agent-optional-plugins/kotlin-coroutine-plugin/"},{"content":"Slow Cache Command Slow Cache command are sensitive for you to identify bottlenecks of a system which relies on cache system.\nSlow Cache command are based on sampling. Right now, the core samples are the top 50 slowest every 10 minutes. Note that the duration of these command must be slower than the threshold.\nHere\u0026rsquo;s the format of the settings (in milliseconds):\n cache-type:thresholdValue,cache-type2:thresholdValue2\n The default settings are default:20,redis:10. Reserved Cache type is default, which is the default threshold for all cache types, unless set explicitly.\nNote:\n The threshold should not be set too small, like 1ms. Although it works in theory, OAP performance issues may arise if your system statement access time is usually more than 1ms. The OAP server would run statistic per service and only persistent the top 50 every 10(controlled by topNReportPeriod: ${SW_CORE_TOPN_REPORT_PERIOD:10}) minutes by default.  ","title":"Slow Cache Command","url":"/docs/main/latest/en/setup/backend/slow-cache-command/"},{"content":"Slow Cache Command Slow Cache command are sensitive for you to identify bottlenecks of a system which relies on cache system.\nSlow Cache command are based on sampling. Right now, the core samples are the top 50 slowest every 10 minutes. Note that the duration of these command must be slower than the threshold.\nHere\u0026rsquo;s the format of the settings (in milliseconds):\n cache-type:thresholdValue,cache-type2:thresholdValue2\n The default settings are default:20,redis:10. Reserved Cache type is default, which is the default threshold for all cache types, unless set explicitly.\nNote:\n The threshold should not be set too small, like 1ms. Although it works in theory, OAP performance issues may arise if your system statement access time is usually more than 1ms. The OAP server would run statistic per service and only persistent the top 50 every 10(controlled by topNReportPeriod: ${SW_CORE_TOPN_REPORT_PERIOD:10}) minutes by default.  ","title":"Slow Cache Command","url":"/docs/main/next/en/setup/backend/slow-cache-command/"},{"content":"Slow Cache Command Slow Cache command are sensitive for you to identify bottlenecks of a system which relies on cache system.\nSlow Cache command are based on sampling. Right now, the core samples are the top 50 slowest every 10 minutes. Note that the duration of these command must be slower than the threshold.\nHere\u0026rsquo;s the format of the settings (in milliseconds):\n cache-type:thresholdValue,cache-type2:thresholdValue2\n The default settings are default:20,redis:10. Reserved Cache type is default, which is the default threshold for all cache types, unless set explicitly.\nNote:\n The threshold should not be set too small, like 1ms. Although it works in theory, OAP performance issues may arise if your system statement access time is usually more than 1ms. The OAP server would run statistic per service and only persistent the top 50 every 10(controlled by topNReportPeriod: ${SW_CORE_TOPN_REPORT_PERIOD:10}) minutes by default.  ","title":"Slow Cache Command","url":"/docs/main/v9.3.0/en/setup/backend/slow-cache-command/"},{"content":"Slow Cache Command Slow Cache command are sensitive for you to identify bottlenecks of a system which relies on cache system.\nSlow Cache command are based on sampling. Right now, the core samples are the top 50 slowest every 10 minutes. Note that the duration of these command must be slower than the threshold.\nHere\u0026rsquo;s the format of the settings (in milliseconds):\n cache-type:thresholdValue,cache-type2:thresholdValue2\n The default settings are default:20,redis:10. Reserved Cache type is default, which is the default threshold for all cache types, unless set explicitly.\nNote:\n The threshold should not be set too small, like 1ms. Although it works in theory, OAP performance issues may arise if your system statement access time is usually more than 1ms. The OAP server would run statistic per service and only persistent the top 50 every 10(controlled by topNReportPeriod: ${SW_CORE_TOPN_REPORT_PERIOD:10}) minutes by default.  ","title":"Slow Cache Command","url":"/docs/main/v9.4.0/en/setup/backend/slow-cache-command/"},{"content":"Slow Cache Command Slow Cache command are sensitive for you to identify bottlenecks of a system which relies on cache system.\nSlow Cache command are based on sampling. Right now, the core samples are the top 50 slowest every 10 minutes. Note that the duration of these command must be slower than the threshold.\nHere\u0026rsquo;s the format of the settings (in milliseconds):\n cache-type:thresholdValue,cache-type2:thresholdValue2\n The default settings are default:20,redis:10. Reserved Cache type is default, which is the default threshold for all cache types, unless set explicitly.\nNote:\n The threshold should not be set too small, like 1ms. Although it works in theory, OAP performance issues may arise if your system statement access time is usually more than 1ms. The OAP server would run statistic per service and only persistent the top 50 every 10(controlled by topNReportPeriod: ${SW_CORE_TOPN_REPORT_PERIOD:10}) minutes by default.  ","title":"Slow Cache Command","url":"/docs/main/v9.5.0/en/setup/backend/slow-cache-command/"},{"content":"Slow Cache Command Slow Cache command are sensitive for you to identify bottlenecks of a system which relies on cache system.\nSlow Cache command are based on sampling. Right now, the core samples are the top 50 slowest every 10 minutes. Note that the duration of these command must be slower than the threshold.\nHere\u0026rsquo;s the format of the settings (in milliseconds):\n cache-type:thresholdValue,cache-type2:thresholdValue2\n The default settings are default:20,redis:10. Reserved Cache type is default, which is the default threshold for all cache types, unless set explicitly.\nNote:\n The threshold should not be set too small, like 1ms. Although it works in theory, OAP performance issues may arise if your system statement access time is usually more than 1ms. The OAP server would run statistic per service and only persistent the top 50 every 10(controlled by topNReportPeriod: ${SW_CORE_TOPN_REPORT_PERIOD:10}) minutes by default.  ","title":"Slow Cache Command","url":"/docs/main/v9.6.0/en/setup/backend/slow-cache-command/"},{"content":"Slow Cache Command Slow Cache command are sensitive for you to identify bottlenecks of a system which relies on cache system.\nSlow Cache command are based on sampling. Right now, the core samples are the top 50 slowest every 10 minutes. Note that the duration of these command must be slower than the threshold.\nHere\u0026rsquo;s the format of the settings (in milliseconds):\n cache-type:thresholdValue,cache-type2:thresholdValue2\n The default settings are default:20,redis:10. Reserved Cache type is default, which is the default threshold for all cache types, unless set explicitly.\nNote:\n The threshold should not be set too small, like 1ms. Although it works in theory, OAP performance issues may arise if your system statement access time is usually more than 1ms. The OAP server would run statistic per service and only persistent the top 50 every 10(controlled by topNReportPeriod: ${SW_CORE_TOPN_REPORT_PERIOD:10}) minutes by default.  ","title":"Slow Cache Command","url":"/docs/main/v9.7.0/en/setup/backend/slow-cache-command/"},{"content":"Slow Database Statement Slow Database statements are crucial for you to identify bottlenecks of a system which relies on databases.\nSlow DB statements are based on sampling. Right now, the core samples are the top 50 slowest every 10 minutes. Note that the duration of these statements must be slower than the threshold.\nHere\u0026rsquo;s the format of the settings (in milliseconds):\n database-type:thresholdValue,database-type2:thresholdValue2\n The default settings are default:200,mongodb:100. Reserved DB type is default, which is the default threshold for all database types, unless set explicitly.\nNote:\n The threshold should not be set too small, like 1ms. Although it works in theory, OAP performance issues may arise if your system statement access time is usually more than 1ms. The OAP server would run statistic per service and only persistent the top 50 every 10(controlled by topNReportPeriod: ${SW_CORE_TOPN_REPORT_PERIOD:10}) minutes by default.  ","title":"Slow Database Statement","url":"/docs/main/latest/en/setup/backend/slow-db-statement/"},{"content":"Slow Database Statement Slow Database statements are crucial for you to identify bottlenecks of a system which relies on databases.\nSlow DB statements are based on sampling. Right now, the core samples are the top 50 slowest every 10 minutes. Note that the duration of these statements must be slower than the threshold.\nHere\u0026rsquo;s the format of the settings (in milliseconds):\n database-type:thresholdValue,database-type2:thresholdValue2\n The default settings are default:200,mongodb:100. Reserved DB type is default, which is the default threshold for all database types, unless set explicitly.\nNote:\n The threshold should not be set too small, like 1ms. Although it works in theory, OAP performance issues may arise if your system statement access time is usually more than 1ms. The OAP server would run statistic per service and only persistent the top 50 every 10(controlled by topNReportPeriod: ${SW_CORE_TOPN_REPORT_PERIOD:10}) minutes by default.  ","title":"Slow Database Statement","url":"/docs/main/next/en/setup/backend/slow-db-statement/"},{"content":"Slow Database Statement Slow Database statements are crucial in order for you to identify bottlenecks of a system which relies on the database.\nSlow DB statements are based on sampling. Right now, the core samples the top 50 slowest every 10 minutes. Note that the duration of these statements must be slower than the threshold.\nHere\u0026rsquo;s the format of the settings (in milliseconds):\n database-type:thresholdValue,database-type2:thresholdValue2\n The default settings are default:200,mongodb:100. Reserved DB type is default, which is the default threshold for all database types, unless set explicitly.\nNote: The threshold should not be set too small, like 1ms. Although it works in theory, OAP performance issues may arise if your system statement access time is usually more than 1ms.\n","title":"Slow Database Statement","url":"/docs/main/v9.0.0/en/setup/backend/slow-db-statement/"},{"content":"Slow Database Statement Slow Database statements are crucial for you to identify bottlenecks of a system which relies on databases.\nSlow DB statements are based on sampling. Right now, the core samples are the top 50 slowest every 10 minutes. Note that the duration of these statements must be slower than the threshold.\nHere\u0026rsquo;s the format of the settings (in milliseconds):\n database-type:thresholdValue,database-type2:thresholdValue2\n The default settings are default:200,mongodb:100. Reserved DB type is default, which is the default threshold for all database types, unless set explicitly.\nNote: The threshold should not be set too small, like 1ms. Although it works in theory, OAP performance issues may arise if your system statement access time is usually more than 1ms.\n","title":"Slow Database Statement","url":"/docs/main/v9.1.0/en/setup/backend/slow-db-statement/"},{"content":"Slow Database Statement Slow Database statements are crucial for you to identify bottlenecks of a system which relies on databases.\nSlow DB statements are based on sampling. Right now, the core samples are the top 50 slowest every 10 minutes. Note that the duration of these statements must be slower than the threshold.\nHere\u0026rsquo;s the format of the settings (in milliseconds):\n database-type:thresholdValue,database-type2:thresholdValue2\n The default settings are default:200,mongodb:100. Reserved DB type is default, which is the default threshold for all database types, unless set explicitly.\nNote: The threshold should not be set too small, like 1ms. Although it works in theory, OAP performance issues may arise if your system statement access time is usually more than 1ms.\n","title":"Slow Database Statement","url":"/docs/main/v9.2.0/en/setup/backend/slow-db-statement/"},{"content":"Slow Database Statement Slow Database statements are crucial for you to identify bottlenecks of a system which relies on databases.\nSlow DB statements are based on sampling. Right now, the core samples are the top 50 slowest every 10 minutes. Note that the duration of these statements must be slower than the threshold.\nHere\u0026rsquo;s the format of the settings (in milliseconds):\n database-type:thresholdValue,database-type2:thresholdValue2\n The default settings are default:200,mongodb:100. Reserved DB type is default, which is the default threshold for all database types, unless set explicitly.\nNote:\n The threshold should not be set too small, like 1ms. Although it works in theory, OAP performance issues may arise if your system statement access time is usually more than 1ms. The OAP server would run statistic per service and only persistent the top 50 every 10(controlled by topNReportPeriod: ${SW_CORE_TOPN_REPORT_PERIOD:10}) minutes by default.  ","title":"Slow Database Statement","url":"/docs/main/v9.3.0/en/setup/backend/slow-db-statement/"},{"content":"Slow Database Statement Slow Database statements are crucial for you to identify bottlenecks of a system which relies on databases.\nSlow DB statements are based on sampling. Right now, the core samples are the top 50 slowest every 10 minutes. Note that the duration of these statements must be slower than the threshold.\nHere\u0026rsquo;s the format of the settings (in milliseconds):\n database-type:thresholdValue,database-type2:thresholdValue2\n The default settings are default:200,mongodb:100. Reserved DB type is default, which is the default threshold for all database types, unless set explicitly.\nNote:\n The threshold should not be set too small, like 1ms. Although it works in theory, OAP performance issues may arise if your system statement access time is usually more than 1ms. The OAP server would run statistic per service and only persistent the top 50 every 10(controlled by topNReportPeriod: ${SW_CORE_TOPN_REPORT_PERIOD:10}) minutes by default.  ","title":"Slow Database Statement","url":"/docs/main/v9.4.0/en/setup/backend/slow-db-statement/"},{"content":"Slow Database Statement Slow Database statements are crucial for you to identify bottlenecks of a system which relies on databases.\nSlow DB statements are based on sampling. Right now, the core samples are the top 50 slowest every 10 minutes. Note that the duration of these statements must be slower than the threshold.\nHere\u0026rsquo;s the format of the settings (in milliseconds):\n database-type:thresholdValue,database-type2:thresholdValue2\n The default settings are default:200,mongodb:100. Reserved DB type is default, which is the default threshold for all database types, unless set explicitly.\nNote:\n The threshold should not be set too small, like 1ms. Although it works in theory, OAP performance issues may arise if your system statement access time is usually more than 1ms. The OAP server would run statistic per service and only persistent the top 50 every 10(controlled by topNReportPeriod: ${SW_CORE_TOPN_REPORT_PERIOD:10}) minutes by default.  ","title":"Slow Database Statement","url":"/docs/main/v9.5.0/en/setup/backend/slow-db-statement/"},{"content":"Slow Database Statement Slow Database statements are crucial for you to identify bottlenecks of a system which relies on databases.\nSlow DB statements are based on sampling. Right now, the core samples are the top 50 slowest every 10 minutes. Note that the duration of these statements must be slower than the threshold.\nHere\u0026rsquo;s the format of the settings (in milliseconds):\n database-type:thresholdValue,database-type2:thresholdValue2\n The default settings are default:200,mongodb:100. Reserved DB type is default, which is the default threshold for all database types, unless set explicitly.\nNote:\n The threshold should not be set too small, like 1ms. Although it works in theory, OAP performance issues may arise if your system statement access time is usually more than 1ms. The OAP server would run statistic per service and only persistent the top 50 every 10(controlled by topNReportPeriod: ${SW_CORE_TOPN_REPORT_PERIOD:10}) minutes by default.  ","title":"Slow Database Statement","url":"/docs/main/v9.6.0/en/setup/backend/slow-db-statement/"},{"content":"Slow Database Statement Slow Database statements are crucial for you to identify bottlenecks of a system which relies on databases.\nSlow DB statements are based on sampling. Right now, the core samples are the top 50 slowest every 10 minutes. Note that the duration of these statements must be slower than the threshold.\nHere\u0026rsquo;s the format of the settings (in milliseconds):\n database-type:thresholdValue,database-type2:thresholdValue2\n The default settings are default:200,mongodb:100. Reserved DB type is default, which is the default threshold for all database types, unless set explicitly.\nNote:\n The threshold should not be set too small, like 1ms. Although it works in theory, OAP performance issues may arise if your system statement access time is usually more than 1ms. The OAP server would run statistic per service and only persistent the top 50 every 10(controlled by topNReportPeriod: ${SW_CORE_TOPN_REPORT_PERIOD:10}) minutes by default.  ","title":"Slow Database Statement","url":"/docs/main/v9.7.0/en/setup/backend/slow-db-statement/"},{"content":"Source and scope extension for new metrics From the OAL scope introduction, you should already have understood what a scope is. If you would like to create more extensions, you need to have a deeper understanding of what a source is.\nSource and scope are interrelated concepts. Scope declares the ID (int) and name, while source declares the attributes. Follow these steps to create a new source and sccope.\n In the OAP core module, it provides SourceReceiver internal services.  public interface SourceReceiver extends Service { void receive(Source source); } All data of the analysis must be a org.apache.skywalking.oap.server.core.source.Source sub class that is tagged by @SourceType annotation, and included in the org.apache.skywalking package. Then, it can be supported by the OAL script and OAP core.  Take the existing source service as an example.\n@ScopeDeclaration(id = SERVICE_INSTANCE, name = \u0026#34;ServiceInstance\u0026#34;, catalog = SERVICE_INSTANCE_CATALOG_NAME) @ScopeDefaultColumn.VirtualColumnDefinition(fieldName = \u0026#34;entityId\u0026#34;, columnName = \u0026#34;entity_id\u0026#34;, isID = true, type = String.class) public class ServiceInstance extends Source { @Override public int scope() { return DefaultScopeDefine.SERVICE_INSTANCE; } @Override public String getEntityId() { return String.valueOf(id); } @Getter @Setter private int id; @Getter @Setter @ScopeDefaultColumn.DefinedByField(columnName = \u0026#34;service_id\u0026#34;) private int serviceId; @Getter @Setter private String name; @Getter @Setter private String serviceName; @Getter @Setter private String endpointName; @Getter @Setter private int latency; @Getter @Setter private boolean status; @Getter @Setter private int responseCode; @Getter @Setter private RequestType type; }  The scope() method in source returns an ID, which is not a random value. This ID must be declared through the @ScopeDeclaration annotation too. The ID in @ScopeDeclaration and ID in scope() method must be the same for this source.\n  The String getEntityId() method in source requests the return value representing the unique entity to which the scope relates. For example, in this service scope, the ID is the service ID, which represents a particular service, like the Order service. This value is used in the OAL group mechanism.\n  @ScopeDefaultColumn.VirtualColumnDefinition and @ScopeDefaultColumn.DefinedByField are required. All declared fields (virtual/byField) will be pushed into a persistent entity, and maps to lists such as the ElasticSearch index and Database table column. For example, the entity ID and service ID for endpoint and service instance level scope are usually included. Take a reference from all existing scopes. All these fields are detected by OAL Runtime, and are required during query.\n  Add scope name as keyword to OAL grammar definition file, OALLexer.g4, which is at the antlr4 folder of the generate-tool-grammar module.\n  Add scope name as keyword to the parser definition file, OALParser.g4, which is located in the same folder as OALLexer.g4.\n   After finishing these steps, you could build a receiver, which do\n Obtain the original data of the metrics. Build the source, and send to SourceReceiver. Complete your OAL scripts. Repackage the project.  ","title":"Source and scope extension for new metrics","url":"/docs/main/latest/en/guides/source-extension/"},{"content":"Source and scope extension for new metrics From the OAL scope introduction, you should already have understood what a scope is. If you would like to create more extensions, you need to have a deeper understanding of what a source is.\nSource and scope are interrelated concepts. Scope declares the ID (int) and name, while source declares the attributes. Follow these steps to create a new source and sccope.\n In the OAP core module, it provides SourceReceiver internal services.  public interface SourceReceiver extends Service { void receive(Source source); } All data of the analysis must be a org.apache.skywalking.oap.server.core.source.Source sub class that is tagged by @SourceType annotation, and included in the org.apache.skywalking package. Then, it can be supported by the OAL script and OAP core.  Take the existing source service as an example.\n@ScopeDeclaration(id = SERVICE_INSTANCE, name = \u0026#34;ServiceInstance\u0026#34;, catalog = SERVICE_INSTANCE_CATALOG_NAME) @ScopeDefaultColumn.VirtualColumnDefinition(fieldName = \u0026#34;entityId\u0026#34;, columnName = \u0026#34;entity_id\u0026#34;, isID = true, type = String.class) public class ServiceInstance extends Source { @Override public int scope() { return DefaultScopeDefine.SERVICE_INSTANCE; } @Override public String getEntityId() { return String.valueOf(id); } @Getter @Setter private int id; @Getter @Setter @ScopeDefaultColumn.DefinedByField(columnName = \u0026#34;service_id\u0026#34;) private int serviceId; @Getter @Setter private String name; @Getter @Setter private String serviceName; @Getter @Setter private String endpointName; @Getter @Setter private int latency; @Getter @Setter private boolean status; @Getter @Setter private int responseCode; @Getter @Setter private RequestType type; }  The scope() method in source returns an ID, which is not a random value. This ID must be declared through the @ScopeDeclaration annotation too. The ID in @ScopeDeclaration and ID in scope() method must be the same for this source.\n  The String getEntityId() method in source requests the return value representing the unique entity to which the scope relates. For example, in this service scope, the ID is the service ID, which represents a particular service, like the Order service. This value is used in the OAL group mechanism.\n  @ScopeDefaultColumn.VirtualColumnDefinition and @ScopeDefaultColumn.DefinedByField are required. All declared fields (virtual/byField) will be pushed into a persistent entity, and maps to lists such as the ElasticSearch index and Database table column. For example, the entity ID and service ID for endpoint and service instance level scope are usually included. Take a reference from all existing scopes. All these fields are detected by OAL Runtime, and are required during query.\n  Add scope name as keyword to OAL grammar definition file, OALLexer.g4, which is at the antlr4 folder of the generate-tool-grammar module.\n  Add scope name as keyword to the parser definition file, OALParser.g4, which is located in the same folder as OALLexer.g4.\n   After finishing these steps, you could build a receiver, which do\n Obtain the original data of the metrics. Build the source, and send to SourceReceiver. Complete your OAL scripts. Repackage the project.  ","title":"Source and scope extension for new metrics","url":"/docs/main/next/en/guides/source-extension/"},{"content":"Source and scope extension for new metrics From the OAL scope introduction, you should already have understood what a scope is. If you would like to create more extensions, you need to have a deeper understanding of what a source is.\nSource and scope are interrelated concepts. Scope declares the ID (int) and name, while source declares the attributes. Follow these steps to create a new source and sccope.\n In the OAP core module, it provides SourceReceiver internal services.  public interface SourceReceiver extends Service { void receive(Source source); } All data of the analysis must be a org.apache.skywalking.oap.server.core.source.Source sub class that is tagged by @SourceType annotation, and included in the org.apache.skywalking package. Then, it can be supported by the OAL script and OAP core.  Take the existing source service as an example.\n@ScopeDeclaration(id = SERVICE_INSTANCE, name = \u0026#34;ServiceInstance\u0026#34;, catalog = SERVICE_INSTANCE_CATALOG_NAME) @ScopeDefaultColumn.VirtualColumnDefinition(fieldName = \u0026#34;entityId\u0026#34;, columnName = \u0026#34;entity_id\u0026#34;, isID = true, type = String.class) public class ServiceInstance extends Source { @Override public int scope() { return DefaultScopeDefine.SERVICE_INSTANCE; } @Override public String getEntityId() { return String.valueOf(id); } @Getter @Setter private int id; @Getter @Setter @ScopeDefaultColumn.DefinedByField(columnName = \u0026#34;service_id\u0026#34;) private int serviceId; @Getter @Setter private String name; @Getter @Setter private String serviceName; @Getter @Setter private String endpointName; @Getter @Setter private int latency; @Getter @Setter private boolean status; @Getter @Setter private int responseCode; @Getter @Setter private RequestType type; }  The scope() method in source returns an ID, which is not a random value. This ID must be declared through the @ScopeDeclaration annotation too. The ID in @ScopeDeclaration and ID in scope() method must be the same for this source.\n  The String getEntityId() method in source requests the return value representing the unique entity to which the scope relates. For example, in this service scope, the ID is the service ID, which represents a particular service, like the Order service. This value is used in the OAL group mechanism.\n  @ScopeDefaultColumn.VirtualColumnDefinition and @ScopeDefaultColumn.DefinedByField are required. All declared fields (virtual/byField) will be pushed into a persistent entity, and maps to lists such as the ElasticSearch index and Database table column. For example, the entity ID and service ID for endpoint and service instance level scope are usually included. Take a reference from all existing scopes. All these fields are detected by OAL Runtime, and are required during query.\n  Add scope name as keyword to OAL grammar definition file, OALLexer.g4, which is at the antlr4 folder of the generate-tool-grammar module.\n  Add scope name as keyword to the parser definition file, OALParser.g4, which is located in the same folder as OALLexer.g4.\n   After finishing these steps, you could build a receiver, which do\n Obtain the original data of the metrics. Build the source, and send to SourceReceiver. Complete your OAL scripts. Repackage the project.  ","title":"Source and scope extension for new metrics","url":"/docs/main/v9.0.0/en/guides/source-extension/"},{"content":"Source and scope extension for new metrics From the OAL scope introduction, you should already have understood what a scope is. If you would like to create more extensions, you need to have a deeper understanding of what a source is.\nSource and scope are interrelated concepts. Scope declares the ID (int) and name, while source declares the attributes. Follow these steps to create a new source and sccope.\n In the OAP core module, it provides SourceReceiver internal services.  public interface SourceReceiver extends Service { void receive(Source source); } All data of the analysis must be a org.apache.skywalking.oap.server.core.source.Source sub class that is tagged by @SourceType annotation, and included in the org.apache.skywalking package. Then, it can be supported by the OAL script and OAP core.  Take the existing source service as an example.\n@ScopeDeclaration(id = SERVICE_INSTANCE, name = \u0026#34;ServiceInstance\u0026#34;, catalog = SERVICE_INSTANCE_CATALOG_NAME) @ScopeDefaultColumn.VirtualColumnDefinition(fieldName = \u0026#34;entityId\u0026#34;, columnName = \u0026#34;entity_id\u0026#34;, isID = true, type = String.class) public class ServiceInstance extends Source { @Override public int scope() { return DefaultScopeDefine.SERVICE_INSTANCE; } @Override public String getEntityId() { return String.valueOf(id); } @Getter @Setter private int id; @Getter @Setter @ScopeDefaultColumn.DefinedByField(columnName = \u0026#34;service_id\u0026#34;) private int serviceId; @Getter @Setter private String name; @Getter @Setter private String serviceName; @Getter @Setter private String endpointName; @Getter @Setter private int latency; @Getter @Setter private boolean status; @Getter @Setter private int responseCode; @Getter @Setter private RequestType type; }  The scope() method in source returns an ID, which is not a random value. This ID must be declared through the @ScopeDeclaration annotation too. The ID in @ScopeDeclaration and ID in scope() method must be the same for this source.\n  The String getEntityId() method in source requests the return value representing the unique entity to which the scope relates. For example, in this service scope, the ID is the service ID, which represents a particular service, like the Order service. This value is used in the OAL group mechanism.\n  @ScopeDefaultColumn.VirtualColumnDefinition and @ScopeDefaultColumn.DefinedByField are required. All declared fields (virtual/byField) will be pushed into a persistent entity, and maps to lists such as the ElasticSearch index and Database table column. For example, the entity ID and service ID for endpoint and service instance level scope are usually included. Take a reference from all existing scopes. All these fields are detected by OAL Runtime, and are required during query.\n  Add scope name as keyword to OAL grammar definition file, OALLexer.g4, which is at the antlr4 folder of the generate-tool-grammar module.\n  Add scope name as keyword to the parser definition file, OALParser.g4, which is located in the same folder as OALLexer.g4.\n   After finishing these steps, you could build a receiver, which do\n Obtain the original data of the metrics. Build the source, and send to SourceReceiver. Complete your OAL scripts. Repackage the project.  ","title":"Source and scope extension for new metrics","url":"/docs/main/v9.1.0/en/guides/source-extension/"},{"content":"Source and scope extension for new metrics From the OAL scope introduction, you should already have understood what a scope is. If you would like to create more extensions, you need to have a deeper understanding of what a source is.\nSource and scope are interrelated concepts. Scope declares the ID (int) and name, while source declares the attributes. Follow these steps to create a new source and sccope.\n In the OAP core module, it provides SourceReceiver internal services.  public interface SourceReceiver extends Service { void receive(Source source); } All data of the analysis must be a org.apache.skywalking.oap.server.core.source.Source sub class that is tagged by @SourceType annotation, and included in the org.apache.skywalking package. Then, it can be supported by the OAL script and OAP core.  Take the existing source service as an example.\n@ScopeDeclaration(id = SERVICE_INSTANCE, name = \u0026#34;ServiceInstance\u0026#34;, catalog = SERVICE_INSTANCE_CATALOG_NAME) @ScopeDefaultColumn.VirtualColumnDefinition(fieldName = \u0026#34;entityId\u0026#34;, columnName = \u0026#34;entity_id\u0026#34;, isID = true, type = String.class) public class ServiceInstance extends Source { @Override public int scope() { return DefaultScopeDefine.SERVICE_INSTANCE; } @Override public String getEntityId() { return String.valueOf(id); } @Getter @Setter private int id; @Getter @Setter @ScopeDefaultColumn.DefinedByField(columnName = \u0026#34;service_id\u0026#34;) private int serviceId; @Getter @Setter private String name; @Getter @Setter private String serviceName; @Getter @Setter private String endpointName; @Getter @Setter private int latency; @Getter @Setter private boolean status; @Getter @Setter private int responseCode; @Getter @Setter private RequestType type; }  The scope() method in source returns an ID, which is not a random value. This ID must be declared through the @ScopeDeclaration annotation too. The ID in @ScopeDeclaration and ID in scope() method must be the same for this source.\n  The String getEntityId() method in source requests the return value representing the unique entity to which the scope relates. For example, in this service scope, the ID is the service ID, which represents a particular service, like the Order service. This value is used in the OAL group mechanism.\n  @ScopeDefaultColumn.VirtualColumnDefinition and @ScopeDefaultColumn.DefinedByField are required. All declared fields (virtual/byField) will be pushed into a persistent entity, and maps to lists such as the ElasticSearch index and Database table column. For example, the entity ID and service ID for endpoint and service instance level scope are usually included. Take a reference from all existing scopes. All these fields are detected by OAL Runtime, and are required during query.\n  Add scope name as keyword to OAL grammar definition file, OALLexer.g4, which is at the antlr4 folder of the generate-tool-grammar module.\n  Add scope name as keyword to the parser definition file, OALParser.g4, which is located in the same folder as OALLexer.g4.\n   After finishing these steps, you could build a receiver, which do\n Obtain the original data of the metrics. Build the source, and send to SourceReceiver. Complete your OAL scripts. Repackage the project.  ","title":"Source and scope extension for new metrics","url":"/docs/main/v9.2.0/en/guides/source-extension/"},{"content":"Source and scope extension for new metrics From the OAL scope introduction, you should already have understood what a scope is. If you would like to create more extensions, you need to have a deeper understanding of what a source is.\nSource and scope are interrelated concepts. Scope declares the ID (int) and name, while source declares the attributes. Follow these steps to create a new source and sccope.\n In the OAP core module, it provides SourceReceiver internal services.  public interface SourceReceiver extends Service { void receive(Source source); } All data of the analysis must be a org.apache.skywalking.oap.server.core.source.Source sub class that is tagged by @SourceType annotation, and included in the org.apache.skywalking package. Then, it can be supported by the OAL script and OAP core.  Take the existing source service as an example.\n@ScopeDeclaration(id = SERVICE_INSTANCE, name = \u0026#34;ServiceInstance\u0026#34;, catalog = SERVICE_INSTANCE_CATALOG_NAME) @ScopeDefaultColumn.VirtualColumnDefinition(fieldName = \u0026#34;entityId\u0026#34;, columnName = \u0026#34;entity_id\u0026#34;, isID = true, type = String.class) public class ServiceInstance extends Source { @Override public int scope() { return DefaultScopeDefine.SERVICE_INSTANCE; } @Override public String getEntityId() { return String.valueOf(id); } @Getter @Setter private int id; @Getter @Setter @ScopeDefaultColumn.DefinedByField(columnName = \u0026#34;service_id\u0026#34;) private int serviceId; @Getter @Setter private String name; @Getter @Setter private String serviceName; @Getter @Setter private String endpointName; @Getter @Setter private int latency; @Getter @Setter private boolean status; @Getter @Setter private int responseCode; @Getter @Setter private RequestType type; }  The scope() method in source returns an ID, which is not a random value. This ID must be declared through the @ScopeDeclaration annotation too. The ID in @ScopeDeclaration and ID in scope() method must be the same for this source.\n  The String getEntityId() method in source requests the return value representing the unique entity to which the scope relates. For example, in this service scope, the ID is the service ID, which represents a particular service, like the Order service. This value is used in the OAL group mechanism.\n  @ScopeDefaultColumn.VirtualColumnDefinition and @ScopeDefaultColumn.DefinedByField are required. All declared fields (virtual/byField) will be pushed into a persistent entity, and maps to lists such as the ElasticSearch index and Database table column. For example, the entity ID and service ID for endpoint and service instance level scope are usually included. Take a reference from all existing scopes. All these fields are detected by OAL Runtime, and are required during query.\n  Add scope name as keyword to OAL grammar definition file, OALLexer.g4, which is at the antlr4 folder of the generate-tool-grammar module.\n  Add scope name as keyword to the parser definition file, OALParser.g4, which is located in the same folder as OALLexer.g4.\n   After finishing these steps, you could build a receiver, which do\n Obtain the original data of the metrics. Build the source, and send to SourceReceiver. Complete your OAL scripts. Repackage the project.  ","title":"Source and scope extension for new metrics","url":"/docs/main/v9.3.0/en/guides/source-extension/"},{"content":"Source and scope extension for new metrics From the OAL scope introduction, you should already have understood what a scope is. If you would like to create more extensions, you need to have a deeper understanding of what a source is.\nSource and scope are interrelated concepts. Scope declares the ID (int) and name, while source declares the attributes. Follow these steps to create a new source and sccope.\n In the OAP core module, it provides SourceReceiver internal services.  public interface SourceReceiver extends Service { void receive(Source source); } All data of the analysis must be a org.apache.skywalking.oap.server.core.source.Source sub class that is tagged by @SourceType annotation, and included in the org.apache.skywalking package. Then, it can be supported by the OAL script and OAP core.  Take the existing source service as an example.\n@ScopeDeclaration(id = SERVICE_INSTANCE, name = \u0026#34;ServiceInstance\u0026#34;, catalog = SERVICE_INSTANCE_CATALOG_NAME) @ScopeDefaultColumn.VirtualColumnDefinition(fieldName = \u0026#34;entityId\u0026#34;, columnName = \u0026#34;entity_id\u0026#34;, isID = true, type = String.class) public class ServiceInstance extends Source { @Override public int scope() { return DefaultScopeDefine.SERVICE_INSTANCE; } @Override public String getEntityId() { return String.valueOf(id); } @Getter @Setter private int id; @Getter @Setter @ScopeDefaultColumn.DefinedByField(columnName = \u0026#34;service_id\u0026#34;) private int serviceId; @Getter @Setter private String name; @Getter @Setter private String serviceName; @Getter @Setter private String endpointName; @Getter @Setter private int latency; @Getter @Setter private boolean status; @Getter @Setter private int responseCode; @Getter @Setter private RequestType type; }  The scope() method in source returns an ID, which is not a random value. This ID must be declared through the @ScopeDeclaration annotation too. The ID in @ScopeDeclaration and ID in scope() method must be the same for this source.\n  The String getEntityId() method in source requests the return value representing the unique entity to which the scope relates. For example, in this service scope, the ID is the service ID, which represents a particular service, like the Order service. This value is used in the OAL group mechanism.\n  @ScopeDefaultColumn.VirtualColumnDefinition and @ScopeDefaultColumn.DefinedByField are required. All declared fields (virtual/byField) will be pushed into a persistent entity, and maps to lists such as the ElasticSearch index and Database table column. For example, the entity ID and service ID for endpoint and service instance level scope are usually included. Take a reference from all existing scopes. All these fields are detected by OAL Runtime, and are required during query.\n  Add scope name as keyword to OAL grammar definition file, OALLexer.g4, which is at the antlr4 folder of the generate-tool-grammar module.\n  Add scope name as keyword to the parser definition file, OALParser.g4, which is located in the same folder as OALLexer.g4.\n   After finishing these steps, you could build a receiver, which do\n Obtain the original data of the metrics. Build the source, and send to SourceReceiver. Complete your OAL scripts. Repackage the project.  ","title":"Source and scope extension for new metrics","url":"/docs/main/v9.4.0/en/guides/source-extension/"},{"content":"Source and scope extension for new metrics From the OAL scope introduction, you should already have understood what a scope is. If you would like to create more extensions, you need to have a deeper understanding of what a source is.\nSource and scope are interrelated concepts. Scope declares the ID (int) and name, while source declares the attributes. Follow these steps to create a new source and sccope.\n In the OAP core module, it provides SourceReceiver internal services.  public interface SourceReceiver extends Service { void receive(Source source); } All data of the analysis must be a org.apache.skywalking.oap.server.core.source.Source sub class that is tagged by @SourceType annotation, and included in the org.apache.skywalking package. Then, it can be supported by the OAL script and OAP core.  Take the existing source service as an example.\n@ScopeDeclaration(id = SERVICE_INSTANCE, name = \u0026#34;ServiceInstance\u0026#34;, catalog = SERVICE_INSTANCE_CATALOG_NAME) @ScopeDefaultColumn.VirtualColumnDefinition(fieldName = \u0026#34;entityId\u0026#34;, columnName = \u0026#34;entity_id\u0026#34;, isID = true, type = String.class) public class ServiceInstance extends Source { @Override public int scope() { return DefaultScopeDefine.SERVICE_INSTANCE; } @Override public String getEntityId() { return String.valueOf(id); } @Getter @Setter private int id; @Getter @Setter @ScopeDefaultColumn.DefinedByField(columnName = \u0026#34;service_id\u0026#34;) private int serviceId; @Getter @Setter private String name; @Getter @Setter private String serviceName; @Getter @Setter private String endpointName; @Getter @Setter private int latency; @Getter @Setter private boolean status; @Getter @Setter private int responseCode; @Getter @Setter private RequestType type; }  The scope() method in source returns an ID, which is not a random value. This ID must be declared through the @ScopeDeclaration annotation too. The ID in @ScopeDeclaration and ID in scope() method must be the same for this source.\n  The String getEntityId() method in source requests the return value representing the unique entity to which the scope relates. For example, in this service scope, the ID is the service ID, which represents a particular service, like the Order service. This value is used in the OAL group mechanism.\n  @ScopeDefaultColumn.VirtualColumnDefinition and @ScopeDefaultColumn.DefinedByField are required. All declared fields (virtual/byField) will be pushed into a persistent entity, and maps to lists such as the ElasticSearch index and Database table column. For example, the entity ID and service ID for endpoint and service instance level scope are usually included. Take a reference from all existing scopes. All these fields are detected by OAL Runtime, and are required during query.\n  Add scope name as keyword to OAL grammar definition file, OALLexer.g4, which is at the antlr4 folder of the generate-tool-grammar module.\n  Add scope name as keyword to the parser definition file, OALParser.g4, which is located in the same folder as OALLexer.g4.\n   After finishing these steps, you could build a receiver, which do\n Obtain the original data of the metrics. Build the source, and send to SourceReceiver. Complete your OAL scripts. Repackage the project.  ","title":"Source and scope extension for new metrics","url":"/docs/main/v9.5.0/en/guides/source-extension/"},{"content":"Source and scope extension for new metrics From the OAL scope introduction, you should already have understood what a scope is. If you would like to create more extensions, you need to have a deeper understanding of what a source is.\nSource and scope are interrelated concepts. Scope declares the ID (int) and name, while source declares the attributes. Follow these steps to create a new source and sccope.\n In the OAP core module, it provides SourceReceiver internal services.  public interface SourceReceiver extends Service { void receive(Source source); } All data of the analysis must be a org.apache.skywalking.oap.server.core.source.Source sub class that is tagged by @SourceType annotation, and included in the org.apache.skywalking package. Then, it can be supported by the OAL script and OAP core.  Take the existing source service as an example.\n@ScopeDeclaration(id = SERVICE_INSTANCE, name = \u0026#34;ServiceInstance\u0026#34;, catalog = SERVICE_INSTANCE_CATALOG_NAME) @ScopeDefaultColumn.VirtualColumnDefinition(fieldName = \u0026#34;entityId\u0026#34;, columnName = \u0026#34;entity_id\u0026#34;, isID = true, type = String.class) public class ServiceInstance extends Source { @Override public int scope() { return DefaultScopeDefine.SERVICE_INSTANCE; } @Override public String getEntityId() { return String.valueOf(id); } @Getter @Setter private int id; @Getter @Setter @ScopeDefaultColumn.DefinedByField(columnName = \u0026#34;service_id\u0026#34;) private int serviceId; @Getter @Setter private String name; @Getter @Setter private String serviceName; @Getter @Setter private String endpointName; @Getter @Setter private int latency; @Getter @Setter private boolean status; @Getter @Setter private int responseCode; @Getter @Setter private RequestType type; }  The scope() method in source returns an ID, which is not a random value. This ID must be declared through the @ScopeDeclaration annotation too. The ID in @ScopeDeclaration and ID in scope() method must be the same for this source.\n  The String getEntityId() method in source requests the return value representing the unique entity to which the scope relates. For example, in this service scope, the ID is the service ID, which represents a particular service, like the Order service. This value is used in the OAL group mechanism.\n  @ScopeDefaultColumn.VirtualColumnDefinition and @ScopeDefaultColumn.DefinedByField are required. All declared fields (virtual/byField) will be pushed into a persistent entity, and maps to lists such as the ElasticSearch index and Database table column. For example, the entity ID and service ID for endpoint and service instance level scope are usually included. Take a reference from all existing scopes. All these fields are detected by OAL Runtime, and are required during query.\n  Add scope name as keyword to OAL grammar definition file, OALLexer.g4, which is at the antlr4 folder of the generate-tool-grammar module.\n  Add scope name as keyword to the parser definition file, OALParser.g4, which is located in the same folder as OALLexer.g4.\n   After finishing these steps, you could build a receiver, which do\n Obtain the original data of the metrics. Build the source, and send to SourceReceiver. Complete your OAL scripts. Repackage the project.  ","title":"Source and scope extension for new metrics","url":"/docs/main/v9.6.0/en/guides/source-extension/"},{"content":"Source and scope extension for new metrics From the OAL scope introduction, you should already have understood what a scope is. If you would like to create more extensions, you need to have a deeper understanding of what a source is.\nSource and scope are interrelated concepts. Scope declares the ID (int) and name, while source declares the attributes. Follow these steps to create a new source and sccope.\n In the OAP core module, it provides SourceReceiver internal services.  public interface SourceReceiver extends Service { void receive(Source source); } All data of the analysis must be a org.apache.skywalking.oap.server.core.source.Source sub class that is tagged by @SourceType annotation, and included in the org.apache.skywalking package. Then, it can be supported by the OAL script and OAP core.  Take the existing source service as an example.\n@ScopeDeclaration(id = SERVICE_INSTANCE, name = \u0026#34;ServiceInstance\u0026#34;, catalog = SERVICE_INSTANCE_CATALOG_NAME) @ScopeDefaultColumn.VirtualColumnDefinition(fieldName = \u0026#34;entityId\u0026#34;, columnName = \u0026#34;entity_id\u0026#34;, isID = true, type = String.class) public class ServiceInstance extends Source { @Override public int scope() { return DefaultScopeDefine.SERVICE_INSTANCE; } @Override public String getEntityId() { return String.valueOf(id); } @Getter @Setter private int id; @Getter @Setter @ScopeDefaultColumn.DefinedByField(columnName = \u0026#34;service_id\u0026#34;) private int serviceId; @Getter @Setter private String name; @Getter @Setter private String serviceName; @Getter @Setter private String endpointName; @Getter @Setter private int latency; @Getter @Setter private boolean status; @Getter @Setter private int responseCode; @Getter @Setter private RequestType type; }  The scope() method in source returns an ID, which is not a random value. This ID must be declared through the @ScopeDeclaration annotation too. The ID in @ScopeDeclaration and ID in scope() method must be the same for this source.\n  The String getEntityId() method in source requests the return value representing the unique entity to which the scope relates. For example, in this service scope, the ID is the service ID, which represents a particular service, like the Order service. This value is used in the OAL group mechanism.\n  @ScopeDefaultColumn.VirtualColumnDefinition and @ScopeDefaultColumn.DefinedByField are required. All declared fields (virtual/byField) will be pushed into a persistent entity, and maps to lists such as the ElasticSearch index and Database table column. For example, the entity ID and service ID for endpoint and service instance level scope are usually included. Take a reference from all existing scopes. All these fields are detected by OAL Runtime, and are required during query.\n  Add scope name as keyword to OAL grammar definition file, OALLexer.g4, which is at the antlr4 folder of the generate-tool-grammar module.\n  Add scope name as keyword to the parser definition file, OALParser.g4, which is located in the same folder as OALLexer.g4.\n   After finishing these steps, you could build a receiver, which do\n Obtain the original data of the metrics. Build the source, and send to SourceReceiver. Complete your OAL scripts. Repackage the project.  ","title":"Source and scope extension for new metrics","url":"/docs/main/v9.7.0/en/guides/source-extension/"},{"content":"Spring annotation plugin This plugin allows to trace all methods of beans in Spring context, which are annotated with @Bean, @Service, @Component and @Repository.\n Why does this plugin optional?  Tracing all methods in Spring context all creates a lot of spans, which also spend more CPU, memory and network. Of course you want to have spans as many as possible, but please make sure your system payload can support these.\n","title":"Spring annotation plugin","url":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/agent-optional-plugins/spring-annotation-plugin/"},{"content":"Spring annotation plugin This plugin allows to trace all methods of beans in Spring context, which are annotated with @Bean, @Service, @Component and @Repository.\n Why does this plugin optional?  Tracing all methods in Spring context all creates a lot of spans, which also spend more CPU, memory and network. Of course you want to have spans as many as possible, but please make sure your system payload can support these.\n","title":"Spring annotation plugin","url":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/agent-optional-plugins/spring-annotation-plugin/"},{"content":"Spring annotation plugin This plugin allows to trace all methods of beans in Spring context, which are annotated with @Bean, @Service, @Component and @Repository.\n Why does this plugin optional?  Tracing all methods in Spring context all creates a lot of spans, which also spend more CPU, memory and network. Of course you want to have spans as many as possible, but please make sure your system payload can support these.\n","title":"Spring annotation plugin","url":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/agent-optional-plugins/spring-annotation-plugin/"},{"content":"Spring annotation plugin This plugin allows to trace all methods of beans in Spring context, which are annotated with @Bean, @Service, @Component and @Repository.\n Why does this plugin optional?  Tracing all methods in Spring context all creates a lot of spans, which also spend more CPU, memory and network. Of course you want to have spans as many as possible, but please make sure your system payload can support these.\n","title":"Spring annotation plugin","url":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/agent-optional-plugins/spring-annotation-plugin/"},{"content":"Spring annotation plugin This plugin allows to trace all methods of beans in Spring context, which are annotated with @Bean, @Service, @Component and @Repository.\n Why does this plugin optional?  Tracing all methods in Spring context all creates a lot of spans, which also spend more CPU, memory and network. Of course you want to have spans as many as possible, but please make sure your system payload can support these.\n","title":"Spring annotation plugin","url":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/agent-optional-plugins/spring-annotation-plugin/"},{"content":"Spring sleuth setup Spring Sleuth provides Spring Boot auto-configuration for distributed tracing. Skywalking integrates its micrometer so that it can send metrics to the Skywalking Meter System.\nSet up agent  Add micrometer and Skywalking meter registry dependency into the project\u0026rsquo;s pom.xml file. You can find more details at Toolkit micrometer.  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.springframework.boot\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;spring-boot-starter-actuator\u0026lt;/artifactId\u0026gt; \u0026lt;/dependency\u0026gt; \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-micrometer-registry\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Create Skywalking meter registry in spring bean management.  @Bean SkywalkingMeterRegistry skywalkingMeterRegistry() { // Add rate configs If you need, otherwise using none args construct  SkywalkingConfig config = new SkywalkingConfig(Arrays.asList(\u0026#34;\u0026#34;)); return new SkywalkingMeterRegistry(config); } Set up backend receiver  Make sure to enable meter receiver in application.yml.  receiver-meter:selector:${SW_RECEIVER_METER:default}default: Configure the meter config file. It already has the spring sleuth meter config. If you have a customized meter at the agent side, please configure the meter using the steps set out in the meter document.\n  Enable Spring sleuth config in application.yml.\n  agent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:meterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:spring-sleuth}Add UI dashboard   Open the dashboard view. Click edit button to edit the templates.\n  Create a new template. Template type: Standard -\u0026gt; Template Configuration: Spring -\u0026gt; Input the Template Name.\n  Click view button. You\u0026rsquo;ll see the spring sleuth dashboard.\n  Supported meter Three types of information are supported: Application, System, and JVM.\n Application: HTTP request count and duration, JDBC max/idle/active connection count, and Tomcat session active/reject count. System: CPU system/process usage, OS system load, and OS process file count. JVM: GC pause count and duration, memory max/used/committed size, thread peak/live/daemon count, and classes loaded/unloaded count.  ","title":"Spring sleuth setup","url":"/docs/main/v9.0.0/en/setup/backend/spring-sleuth-setup/"},{"content":"Spring sleuth setup Spring Sleuth provides Spring Boot auto-configuration for distributed tracing. Skywalking integrates its micrometer so that it can send metrics to the Skywalking Meter System.\nSet up agent  Add micrometer and Skywalking meter registry dependency into the project\u0026rsquo;s pom.xml file. You can find more details at Toolkit micrometer.  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.springframework.boot\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;spring-boot-starter-actuator\u0026lt;/artifactId\u0026gt; \u0026lt;/dependency\u0026gt; \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-micrometer-registry\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Create Skywalking meter registry in spring bean management.  @Bean SkywalkingMeterRegistry skywalkingMeterRegistry() { // Add rate configs If you need, otherwise using none args construct  SkywalkingConfig config = new SkywalkingConfig(Arrays.asList(\u0026#34;\u0026#34;)); return new SkywalkingMeterRegistry(config); } Set up backend receiver  Make sure to enable meter receiver in application.yml.  receiver-meter:selector:${SW_RECEIVER_METER:default}default: Configure the meter config file. It already has the spring sleuth meter config. If you have a customized meter at the agent side, please configure the meter using the steps set out in the meter document.\n  Enable Spring sleuth config in application.yml.\n  agent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:meterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:spring-sleuth}Add UI dashboard   Open the dashboard view. Click edit button to edit the templates.\n  Create a new template. Template type: Standard -\u0026gt; Template Configuration: Spring -\u0026gt; Input the Template Name.\n  Click view button. You\u0026rsquo;ll see the spring sleuth dashboard.\n  Supported meter Three types of information are supported: Application, System, and JVM.\n Application: HTTP request count and duration, JDBC max/idle/active connection count, and Tomcat session active/reject count. System: CPU system/process usage, OS system load, and OS process file count. JVM: GC pause count and duration, memory max/used/committed size, thread peak/live/daemon count, and classes loaded/unloaded count.  ","title":"Spring sleuth setup","url":"/docs/main/v9.1.0/en/setup/backend/spring-sleuth-setup/"},{"content":"Spring sleuth setup Spring Sleuth provides Spring Boot auto-configuration for distributed tracing. Skywalking integrates its micrometer so that it can send metrics to the Skywalking Meter System.\nSet up agent  Add micrometer and Skywalking meter registry dependency into the project\u0026rsquo;s pom.xml file. You can find more details at Toolkit micrometer.  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.springframework.boot\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;spring-boot-starter-actuator\u0026lt;/artifactId\u0026gt; \u0026lt;/dependency\u0026gt; \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-micrometer-registry\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Create Skywalking meter registry in spring bean management.  @Bean SkywalkingMeterRegistry skywalkingMeterRegistry() { // Add rate configs If you need, otherwise using none args construct  SkywalkingConfig config = new SkywalkingConfig(Arrays.asList(\u0026#34;\u0026#34;)); return new SkywalkingMeterRegistry(config); } Set up backend receiver  Make sure to enable meter receiver in application.yml.  receiver-meter:selector:${SW_RECEIVER_METER:default}default: Configure the meter config file. It already has the spring sleuth meter config. If you have a customized meter at the agent side, please configure the meter using the steps set out in the meter document.\n  Enable Spring sleuth config in application.yml.\n  agent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:meterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:spring-sleuth}Dashboard configuration SkyWalking provides the Spring Sleuth dashboard by default under the general service instance, which contains the metrics provided by Spring Sleuth by default. Once you have added customized metrics in the application and configuration the meter config file in the backend. Please following the customized dashboard documentation to add the metrics in the dashboard.\nSupported meter Three types of information are supported: Application, System, and JVM.\n Application: HTTP request count and duration, JDBC max/idle/active connection count, and Tomcat session active/reject count. System: CPU system/process usage, OS system load, and OS process file count. JVM: GC pause count and duration, memory max/used/committed size, thread peak/live/daemon count, and classes loaded/unloaded count.  ","title":"Spring sleuth setup","url":"/docs/main/v9.2.0/en/setup/backend/spring-sleuth-setup/"},{"content":"Spring sleuth setup Spring Sleuth provides Spring Boot auto-configuration for distributed tracing. Skywalking integrates its micrometer so that it can send metrics to the Skywalking Meter System.\nSet up agent  Add micrometer and Skywalking meter registry dependency into the project\u0026rsquo;s pom.xml file. You can find more details at Toolkit micrometer.  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.springframework.boot\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;spring-boot-starter-actuator\u0026lt;/artifactId\u0026gt; \u0026lt;/dependency\u0026gt; \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-micrometer-registry\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Create Skywalking meter registry in spring bean management.  @Bean SkywalkingMeterRegistry skywalkingMeterRegistry() { // Add rate configs If you need, otherwise using none args construct  SkywalkingConfig config = new SkywalkingConfig(Arrays.asList(\u0026#34;\u0026#34;)); return new SkywalkingMeterRegistry(config); } Set up backend receiver  Make sure to enable meter receiver in application.yml.  receiver-meter:selector:${SW_RECEIVER_METER:default}default: Configure the meter config file. It already has the spring sleuth meter config. If you have a customized meter at the agent side, please configure the meter using the steps set out in the meter document.\n  Enable Spring sleuth config in application.yml.\n  agent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:meterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:spring-sleuth}Dashboard configuration SkyWalking provides the Spring Sleuth dashboard by default under the general service instance, which contains the metrics provided by Spring Sleuth by default. Once you have added customized metrics in the application and configuration the meter config file in the backend. Please following the customized dashboard documentation to add the metrics in the dashboard.\nSupported meter Three types of information are supported: Application, System, and JVM.\n Application: HTTP request count and duration, JDBC max/idle/active connection count, and Tomcat session active/reject count. System: CPU system/process usage, OS system load, and OS process file count. JVM: GC pause count and duration, memory max/used/committed size, thread peak/live/daemon count, and classes loaded/unloaded count.  ","title":"Spring sleuth setup","url":"/docs/main/v9.3.0/en/setup/backend/spring-sleuth-setup/"},{"content":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System  Sheng Wu 吴 晟 wusheng@apache.org  Editor\u0026rsquo;s note This paper was written by Sheng Wu, project founder, in 2017, to describe the fundamental theory of all current agent core concepts. Readers could learn why SkyWalking agents are significantly different from other tracing system and Dapper[1] Paper\u0026rsquo;s description.\nAbstract Monitoring, visualizing and troubleshooting a large-scale distributed system is a major challenge. One common tool used today is the distributed tracing system (e.g., Google Dapper)[1], and detecting topology and metrics based on the tracing data. One big limitation of today’s topology detection is that the analysis depends on aggregating the client-side and server-side tracing spans in a given time window to generate the dependency of services. This causes more latency and memory use, because the client and server spans of every RPC must be matched in millions of randomly occurring requests in a highly distributed system. More importantly, it could fail to match if the duration of RPC between client and server is longer than the prior setup time window, or across the two windows.\nIn this paper, we present the STAM, Streaming Topology Analysis Method. In STAM, we could use auto instrumentation or a manual instrumentation mechanism to intercept and manipulate RPC at both client-side and server-side. In the case of auto instrumentation, STAM manipulates application codes at runtime, such as Java agent. As such, this monitoring system doesn’t require any source code changes from the application development team or RPC framework development team. The STAM injects an RPC network address used at client side, a service name and a service instance name into the RPC context, and binds the server-side service name and service instance name as the alias name for this network address used at the client side. Freeing the dependency analysis from the mechanisms that cause blocking and delay, the analysis core can process the monitoring data in stream mode and generate the accurate topology.\nThe STAM has been implemented in the Apache SkyWalking[2], an open source APM (application performance monitoring system) project of the Apache Software Foundation, which is widely used in many big enterprises[3] including Alibaba, Huawei, Tencent, Didi, Xiaomi, China Mobile and other enterprises (airlines, financial institutions and others) to support their large-scale distributed systems in the production environment. It reduces the load and memory cost significantly, with better horizontal scale capability.\nIntroduction Monitoring the highly distributed system, especially with a micro-service architecture, is very complex. Many RPCs, including HTTP, gRPC, MQ, Cache, and Database accesses, are behind a single client-side request. Allowing the IT team to understand the dependency relationships among thousands of services is the key feature and first step for observability of a whole distributed system. A distributed tracing system is capable of collecting traces, including all distributed request paths. Dependency relationships have been logically included in the trace data. A distributed tracing system, such as Zipkin [4] or Jaeger Tracing [10], provides built-in dependency analysis features, but many analysis features build on top of that. There are at least two fundamental limitations: timeliness and consistent accuracy.\nStrong timeliness is required to match the mutability of distributed application system dependency relationship, including service level and service instance level dependency.\nA Service is a logic group of instances which have the same functions or codes.\nA Service Instance is usually an OS level process, such as a JVM process. The relationships between services and instances are mutable, depending on the configuration, codes and network status. The dependency could change over time.\n Figure 1, Generated spans in traditional Dapper based tracing system. The span model in the Dapper paper and existing tracing systems,such as Zipkin instrumenting mode[9], just propagates the span id to the server side. Due to this model, dependency analysis requires a certain time window. The tracing spans are collected at both client- and server-sides, because the relationship is recorded. Due to that, the analysis process has to wait for the client and server spans to match in the same time window, in order to output the result, Service A depending on Service B. So, this time window must be over the duration of this RPC request; otherwise, the conclusion will be lost. This condition makes the analysis would not react the dependency mutation in second level, in production, it sometimes has to set the window duration in 3-5 mins. Also, because of the Windows-based design, if one side involves a long duration task, it can’t easily achieve consistent accuracy. Because in order to make the analysis as fast as possible, the analysis period is less than 5 minutes. But some spans can’t match its parent or children if the analysis is incomplete or crosses two time windows. Even if we added a mechanism to process the spans left in the previous stages, still some would have to be abandoned to keep the dataset size and memory usage reasonable.\nIn the STAM, we introduce a new span and context propagation models, with the new analysis method. These new models add the peer network address (IP or hostname) used at client side, client service instance name and client service name, into the context propagation model. Then it passes the RPC call from client to server, just as the original trace id and span id in the existing tracing system, and collects it in the server-side span. The new analysis method can easily generate the client-server relationship directly without waiting on the client span. It also sets the peer network address as one alias of the server service. After the across cluster node data sync, the client-side span analysis could use this alias metadata to generate the client-server relationship directly too. By using these new models and method in Apache SkyWalking, we remove the time windows-based analysis permanently, and fully use the streaming analysis mode with less than 5 seconds latency and consistent accuracy\nNew Span Model and Context Model The traditional span of a tracing system includes the following fields [1][6][10].\n A trace id to represent the whole trace. A span id to represent the current span. An operation name to describe what operation this span did. A start timestamp. A finish timestamp Service and Service Instance names of current span. A set of zero or more key:value Span Tags. A set of zero or more Span Logs, each of which is itself a key:value map paired with a timestamp. References to zero or more causally related Spans. Reference includes the parent span id and trace id.  In the new span model of STAM we add the following fields in the span.\nSpan type. Enumeration, including exit, local and entry. Entry and Exit spans are used in a networking related library. Entry spans represent a server-side networking library, such as Apache Tomcat[7]. Exit spans represent the client-side networking library, such as Apache HttpComponents [8].\nPeer Network Address. Remote \u0026ldquo;address,\u0026rdquo; suitable for use in exit and entry spans. In Exit spans, the peer network address is the address by the client library to access the server.\nThese fields usually are optionally included in many tracing system,. But in STAM, we require them in all RPC cases.\nContext Model is used to propagate the client-side information to server-side carried by the original RPC call, usually in the header, such as HTTP header or MQ header. In the old design, it carries the trace id and span id of client-side span. In the STAM, we enhance this model, adding the parent service name, parent service instance name and peer of exit span. The names could be literal strings. All these extra fields will help to remove the block of streaming analysis. Compared to the existing context model, this uses a little more bandwidth, but it could be optimized. In Apache SkyWalking, we design a register mechanism to exchange unique IDs to represent these names. As a result, only 3 integers are added in the RPC context, so the increase of bandwidth is at least less than 1% in the production environment.\nThe changes of two models could eliminate the time windows in the analysis process. Server-side span analysis enhances the context aware capability.\nNew Topology Analysis Method The new topology analysis method at the core of STAM is processing the span in stream mode. The analysis of the server-side span, also named entry span, includes the parent service name, parent service instance name and peer of exit span. So the analysis process could establish the following results.\n Set the peer of exit span as client using alias name of current service and instance. Peer network address \u0026lt;-\u0026gt; service name and peer network address \u0026lt;-\u0026gt; Service instance name aliases created. These two will sync with all analysis nodes and persistent in the storage, allowing more analysis processers to have this alias information. Generate relationships of parent service name -\u0026gt; current service name and parent service instance name -\u0026gt; current service instance name, unless there is another different Peer network address \u0026lt;-\u0026gt; Service Instance Name mapping found. In that case, only generate relationships of peer network address \u0026lt;-\u0026gt; service name and peer network address \u0026lt;-\u0026gt; Service instance name.  For analysis of the client-side span (exit span), there could three possibilities.\n The peer in the exit span already has the alias names established by server-side span analysis from step (1). Then use alias names to replace the peer, and generate traffic of current service name -\u0026gt; alias service name and current service instance name -\u0026gt; alias service instance name. If the alias could not be found, then just simply generate traffic for current service name -\u0026gt; peer and current service instance name -\u0026gt; peer. If multiple alias names of peer network address \u0026lt;-\u0026gt; Service Instance Name could be found, then keep generating traffic for current service name -\u0026gt; peer network address and current service instance name -\u0026gt; peer network address.   Figure 2, Apache SkyWalking uses STAM to detect and visualize the topology of distributed systems. Evaluation In this section, we evaluate the new models and analysis method in the context of several typical cases in which the old method loses timeliness and consistent accuracy.\n 1.New Service Online or Auto Scale Out  New services could be added into the whole topology by the developer team randomly, or container operation platform automatically by some scale out policy, like Kubernetes [5]. The monitoring system could not be notified in any case manually. By using STAM, we could detect the new node automatically and also keep the analysis process unblocked and consistent with detected nodes. In this case, a new service and network address (could be IP, port or both) are used. The peer network address \u0026lt;-\u0026gt; service mapping does not exist, the traffic of client service -\u0026gt; peer network address will be generated and persistent in the storage first. After mapping is generated, further traffic of client-service to server-service could be identified, generated and aggregated in the analysis platform. For filling the gap of a few traffic before the mapping generated, we require doing peer network address \u0026lt;-\u0026gt; service mapping translation again in query stage, to merge client service-\u0026gt;peer network address and client-service to server-service. In production, the amount of VM for the whole SkyWalking analysis platform deployment is less than 100, syncing among them will finish less than 10 seconds, in most cases it only takes 3-5 seconds. And in the query stage, the data has been aggregated in minutes or seconds at least. The query merge performance is not related to how much traffic happens before the mapping generated, only affected by sync duration, in here, only 3 seconds. Due to that, in minute level aggregation topology, it only adds 1 or 2 relationship records in the whole topology relationship dataset. Considering an over 100 services topology having over 500 relationship records per minute, the payload increase for this query merge is very limited and affordable. This feature is significant in a large and high load distributed system, as we don’t need to concern its scaling capability. And in some fork versions, they choose to update the existing client service-\u0026gt;peer network address to client-service to server-service after detecting the new mapping for peer generated, in order to remove the extra load at query stage permanently.\n Figure 3, Span analysis by using the new topology analysis method  2.Existing Uninstrumented Nodes  Every topology detection method has to work in this case. In many cases, there are nodes in the production environment that can’t be instrumented. Causes for this might include:(1) Restriction of the technology. In some golang or C++ written applications, there is no easy way in Java or .Net to do auto instrumentation by the agent. So, the codes may not be instrumented automatically. (2) The middleware, such as MQ, database server, has not adopted the tracing system. This would make it difficult or time consuming to implement the middleware instrumentation. (3) A 3rd party service or cloud service doesn’t support work with the current tracing system. (4) Lack of resources: e.g., the developer or operation team lacks time to make the instrumentation ready.\nThe STAM works well even if the client or server side has no instrumentation. It still keeps the topology as accurate as possible.\nIf the client side hasn’t instrumented, the server-side span wouldn’t get any reference through RPC context, so, it would simply use peer to generate traffic, as shown in Figure 4.\n Figure 4, STAM traffic generation when no client-side instrumentation As shown in Figure 5, in the other case, with no server-side instrumentation, the client span analysis doesn’t need to process this case. The STAM analysis core just simply keeps generating client service-\u0026gt;peer network address traffic. As there is no mapping for peer network address generated, there is no merging.\n Figure 5, STAM traffic generation when no server-side instrumentation  3.Uninstrumented Node Having Header Forward Capability  Besides the cases we evaluated in (2) Uninstrumented Nodes, there is one complex and special case: the instrumented node has the capability to propagate the header from downstream to upstream, typically in all proxy, such as Envoy[11], Nginx[12], Spring Cloud Gateway[13]. As proxy, it has the capability to forward all headers from downstream to upstream to keep some of information in the header, including the tracing context, authentication, browser information, and routing information, in order to make them accessible by the business services behind the proxy, like Envoy route configuration. When some proxy can’t be instrumented, no matter what the reason, it should not affect the topology detection.\nIn this case, the proxy address would be used at the client side and propagate through RPC context as peer network address, and the proxy forwards this to different upstream services. Then STAM could detect this case and generate the proxy as a conjectural node. In the STAM, more than one alias names for this network address should be generated. After those two are detected and synchronized to the analysis node, the analysis core knows there is at least one uninstrumented service standing between client and servers. So, it will generate the relationships of client service-\u0026gt;peer network address, peer-\u0026gt;server service B and peer network address -\u0026gt;server service C, as shown in Figure 6.\n Figure 6, STAM traffic generation when the proxy uninstrumentation Conclusion This paper described the STAM, which is to the best of our knowledge the best topology detection method for distributed tracing systems. It replaces the time-window based topology analysis method for tracing-based monitoring systems. It removes the resource cost of disk and memory for time-window baseds analysis permanently and totally, and the barriers of horizontal scale. One STAM implementation, Apache SkyWalking, is widely used for monitoring hundreds of applications in production. Some of them generated over 100 TB tracing data per day and topology for over 200 services in real time.\nAcknowledgments We thank all contributors of Apache SkyWalking project for suggestions, code contributions to implement the STAM, and feedback from using the STAM and SkyWalking in their production environment.\nLicense This paper and the STAM are licensed in the Apache 2.0.\nReferences  Dapper, a Large-Scale Distributed Systems Tracing Infrastructure, https://research.google.com/pubs/pub36356.html?spm=5176.100239.blogcont60165.11.OXME9Z Apache SkyWalking, http://skywalking.apache.org/ Apache Open Users, https://skywalking.apache.org/users/ Zipkin, https://zipkin.io/ Kubernetes, Production-Grade Container Orchestration. Automated container deployment, scaling, and management. https://kubernetes.io/ OpenTracing Specification https://github.com/opentracing/specification/blob/master/specification.md Apache Tomcat, http://tomcat.apache.org/ Apache HttpComponents, https://hc.apache.org/ Zipkin doc, ‘Instrumenting a library’ section, ‘Communicating trace information’ paragraph. https://zipkin.io/pages/instrumenting Jaeger Tracing, https://jaegertracing.io/ Envoy Proxy, http://envoyproxy.io/ Nginx, http://nginx.org/ Spring Cloud Gateway, https://spring.io/projects/spring-cloud-gateway  ","title":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System","url":"/docs/main/latest/en/papers/stam/"},{"content":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System  Sheng Wu 吴 晟 wusheng@apache.org  Editor\u0026rsquo;s note This paper was written by Sheng Wu, project founder, in 2017, to describe the fundamental theory of all current agent core concepts. Readers could learn why SkyWalking agents are significantly different from other tracing system and Dapper[1] Paper\u0026rsquo;s description.\nAbstract Monitoring, visualizing and troubleshooting a large-scale distributed system is a major challenge. One common tool used today is the distributed tracing system (e.g., Google Dapper)[1], and detecting topology and metrics based on the tracing data. One big limitation of today’s topology detection is that the analysis depends on aggregating the client-side and server-side tracing spans in a given time window to generate the dependency of services. This causes more latency and memory use, because the client and server spans of every RPC must be matched in millions of randomly occurring requests in a highly distributed system. More importantly, it could fail to match if the duration of RPC between client and server is longer than the prior setup time window, or across the two windows.\nIn this paper, we present the STAM, Streaming Topology Analysis Method. In STAM, we could use auto instrumentation or a manual instrumentation mechanism to intercept and manipulate RPC at both client-side and server-side. In the case of auto instrumentation, STAM manipulates application codes at runtime, such as Java agent. As such, this monitoring system doesn’t require any source code changes from the application development team or RPC framework development team. The STAM injects an RPC network address used at client side, a service name and a service instance name into the RPC context, and binds the server-side service name and service instance name as the alias name for this network address used at the client side. Freeing the dependency analysis from the mechanisms that cause blocking and delay, the analysis core can process the monitoring data in stream mode and generate the accurate topology.\nThe STAM has been implemented in the Apache SkyWalking[2], an open source APM (application performance monitoring system) project of the Apache Software Foundation, which is widely used in many big enterprises[3] including Alibaba, Huawei, Tencent, Didi, Xiaomi, China Mobile and other enterprises (airlines, financial institutions and others) to support their large-scale distributed systems in the production environment. It reduces the load and memory cost significantly, with better horizontal scale capability.\nIntroduction Monitoring the highly distributed system, especially with a micro-service architecture, is very complex. Many RPCs, including HTTP, gRPC, MQ, Cache, and Database accesses, are behind a single client-side request. Allowing the IT team to understand the dependency relationships among thousands of services is the key feature and first step for observability of a whole distributed system. A distributed tracing system is capable of collecting traces, including all distributed request paths. Dependency relationships have been logically included in the trace data. A distributed tracing system, such as Zipkin [4] or Jaeger Tracing [10], provides built-in dependency analysis features, but many analysis features build on top of that. There are at least two fundamental limitations: timeliness and consistent accuracy.\nStrong timeliness is required to match the mutability of distributed application system dependency relationship, including service level and service instance level dependency.\nA Service is a logic group of instances which have the same functions or codes.\nA Service Instance is usually an OS level process, such as a JVM process. The relationships between services and instances are mutable, depending on the configuration, codes and network status. The dependency could change over time.\n Figure 1, Generated spans in traditional Dapper based tracing system. The span model in the Dapper paper and existing tracing systems,such as Zipkin instrumenting mode[9], just propagates the span id to the server side. Due to this model, dependency analysis requires a certain time window. The tracing spans are collected at both client- and server-sides, because the relationship is recorded. Due to that, the analysis process has to wait for the client and server spans to match in the same time window, in order to output the result, Service A depending on Service B. So, this time window must be over the duration of this RPC request; otherwise, the conclusion will be lost. This condition makes the analysis would not react the dependency mutation in second level, in production, it sometimes has to set the window duration in 3-5 mins. Also, because of the Windows-based design, if one side involves a long duration task, it can’t easily achieve consistent accuracy. Because in order to make the analysis as fast as possible, the analysis period is less than 5 minutes. But some spans can’t match its parent or children if the analysis is incomplete or crosses two time windows. Even if we added a mechanism to process the spans left in the previous stages, still some would have to be abandoned to keep the dataset size and memory usage reasonable.\nIn the STAM, we introduce a new span and context propagation models, with the new analysis method. These new models add the peer network address (IP or hostname) used at client side, client service instance name and client service name, into the context propagation model. Then it passes the RPC call from client to server, just as the original trace id and span id in the existing tracing system, and collects it in the server-side span. The new analysis method can easily generate the client-server relationship directly without waiting on the client span. It also sets the peer network address as one alias of the server service. After the across cluster node data sync, the client-side span analysis could use this alias metadata to generate the client-server relationship directly too. By using these new models and method in Apache SkyWalking, we remove the time windows-based analysis permanently, and fully use the streaming analysis mode with less than 5 seconds latency and consistent accuracy\nNew Span Model and Context Model The traditional span of a tracing system includes the following fields [1][6][10].\n A trace id to represent the whole trace. A span id to represent the current span. An operation name to describe what operation this span did. A start timestamp. A finish timestamp Service and Service Instance names of current span. A set of zero or more key:value Span Tags. A set of zero or more Span Logs, each of which is itself a key:value map paired with a timestamp. References to zero or more causally related Spans. Reference includes the parent span id and trace id.  In the new span model of STAM we add the following fields in the span.\nSpan type. Enumeration, including exit, local and entry. Entry and Exit spans are used in a networking related library. Entry spans represent a server-side networking library, such as Apache Tomcat[7]. Exit spans represent the client-side networking library, such as Apache HttpComponents [8].\nPeer Network Address. Remote \u0026ldquo;address,\u0026rdquo; suitable for use in exit and entry spans. In Exit spans, the peer network address is the address by the client library to access the server.\nThese fields usually are optionally included in many tracing system,. But in STAM, we require them in all RPC cases.\nContext Model is used to propagate the client-side information to server-side carried by the original RPC call, usually in the header, such as HTTP header or MQ header. In the old design, it carries the trace id and span id of client-side span. In the STAM, we enhance this model, adding the parent service name, parent service instance name and peer of exit span. The names could be literal strings. All these extra fields will help to remove the block of streaming analysis. Compared to the existing context model, this uses a little more bandwidth, but it could be optimized. In Apache SkyWalking, we design a register mechanism to exchange unique IDs to represent these names. As a result, only 3 integers are added in the RPC context, so the increase of bandwidth is at least less than 1% in the production environment.\nThe changes of two models could eliminate the time windows in the analysis process. Server-side span analysis enhances the context aware capability.\nNew Topology Analysis Method The new topology analysis method at the core of STAM is processing the span in stream mode. The analysis of the server-side span, also named entry span, includes the parent service name, parent service instance name and peer of exit span. So the analysis process could establish the following results.\n Set the peer of exit span as client using alias name of current service and instance. Peer network address \u0026lt;-\u0026gt; service name and peer network address \u0026lt;-\u0026gt; Service instance name aliases created. These two will sync with all analysis nodes and persistent in the storage, allowing more analysis processers to have this alias information. Generate relationships of parent service name -\u0026gt; current service name and parent service instance name -\u0026gt; current service instance name, unless there is another different Peer network address \u0026lt;-\u0026gt; Service Instance Name mapping found. In that case, only generate relationships of peer network address \u0026lt;-\u0026gt; service name and peer network address \u0026lt;-\u0026gt; Service instance name.  For analysis of the client-side span (exit span), there could three possibilities.\n The peer in the exit span already has the alias names established by server-side span analysis from step (1). Then use alias names to replace the peer, and generate traffic of current service name -\u0026gt; alias service name and current service instance name -\u0026gt; alias service instance name. If the alias could not be found, then just simply generate traffic for current service name -\u0026gt; peer and current service instance name -\u0026gt; peer. If multiple alias names of peer network address \u0026lt;-\u0026gt; Service Instance Name could be found, then keep generating traffic for current service name -\u0026gt; peer network address and current service instance name -\u0026gt; peer network address.   Figure 2, Apache SkyWalking uses STAM to detect and visualize the topology of distributed systems. Evaluation In this section, we evaluate the new models and analysis method in the context of several typical cases in which the old method loses timeliness and consistent accuracy.\n 1.New Service Online or Auto Scale Out  New services could be added into the whole topology by the developer team randomly, or container operation platform automatically by some scale out policy, like Kubernetes [5]. The monitoring system could not be notified in any case manually. By using STAM, we could detect the new node automatically and also keep the analysis process unblocked and consistent with detected nodes. In this case, a new service and network address (could be IP, port or both) are used. The peer network address \u0026lt;-\u0026gt; service mapping does not exist, the traffic of client service -\u0026gt; peer network address will be generated and persistent in the storage first. After mapping is generated, further traffic of client-service to server-service could be identified, generated and aggregated in the analysis platform. For filling the gap of a few traffic before the mapping generated, we require doing peer network address \u0026lt;-\u0026gt; service mapping translation again in query stage, to merge client service-\u0026gt;peer network address and client-service to server-service. In production, the amount of VM for the whole SkyWalking analysis platform deployment is less than 100, syncing among them will finish less than 10 seconds, in most cases it only takes 3-5 seconds. And in the query stage, the data has been aggregated in minutes or seconds at least. The query merge performance is not related to how much traffic happens before the mapping generated, only affected by sync duration, in here, only 3 seconds. Due to that, in minute level aggregation topology, it only adds 1 or 2 relationship records in the whole topology relationship dataset. Considering an over 100 services topology having over 500 relationship records per minute, the payload increase for this query merge is very limited and affordable. This feature is significant in a large and high load distributed system, as we don’t need to concern its scaling capability. And in some fork versions, they choose to update the existing client service-\u0026gt;peer network address to client-service to server-service after detecting the new mapping for peer generated, in order to remove the extra load at query stage permanently.\n Figure 3, Span analysis by using the new topology analysis method  2.Existing Uninstrumented Nodes  Every topology detection method has to work in this case. In many cases, there are nodes in the production environment that can’t be instrumented. Causes for this might include:(1) Restriction of the technology. In some golang or C++ written applications, there is no easy way in Java or .Net to do auto instrumentation by the agent. So, the codes may not be instrumented automatically. (2) The middleware, such as MQ, database server, has not adopted the tracing system. This would make it difficult or time consuming to implement the middleware instrumentation. (3) A 3rd party service or cloud service doesn’t support work with the current tracing system. (4) Lack of resources: e.g., the developer or operation team lacks time to make the instrumentation ready.\nThe STAM works well even if the client or server side has no instrumentation. It still keeps the topology as accurate as possible.\nIf the client side hasn’t instrumented, the server-side span wouldn’t get any reference through RPC context, so, it would simply use peer to generate traffic, as shown in Figure 4.\n Figure 4, STAM traffic generation when no client-side instrumentation As shown in Figure 5, in the other case, with no server-side instrumentation, the client span analysis doesn’t need to process this case. The STAM analysis core just simply keeps generating client service-\u0026gt;peer network address traffic. As there is no mapping for peer network address generated, there is no merging.\n Figure 5, STAM traffic generation when no server-side instrumentation  3.Uninstrumented Node Having Header Forward Capability  Besides the cases we evaluated in (2) Uninstrumented Nodes, there is one complex and special case: the instrumented node has the capability to propagate the header from downstream to upstream, typically in all proxy, such as Envoy[11], Nginx[12], Spring Cloud Gateway[13]. As proxy, it has the capability to forward all headers from downstream to upstream to keep some of information in the header, including the tracing context, authentication, browser information, and routing information, in order to make them accessible by the business services behind the proxy, like Envoy route configuration. When some proxy can’t be instrumented, no matter what the reason, it should not affect the topology detection.\nIn this case, the proxy address would be used at the client side and propagate through RPC context as peer network address, and the proxy forwards this to different upstream services. Then STAM could detect this case and generate the proxy as a conjectural node. In the STAM, more than one alias names for this network address should be generated. After those two are detected and synchronized to the analysis node, the analysis core knows there is at least one uninstrumented service standing between client and servers. So, it will generate the relationships of client service-\u0026gt;peer network address, peer-\u0026gt;server service B and peer network address -\u0026gt;server service C, as shown in Figure 6.\n Figure 6, STAM traffic generation when the proxy uninstrumentation Conclusion This paper described the STAM, which is to the best of our knowledge the best topology detection method for distributed tracing systems. It replaces the time-window based topology analysis method for tracing-based monitoring systems. It removes the resource cost of disk and memory for time-window baseds analysis permanently and totally, and the barriers of horizontal scale. One STAM implementation, Apache SkyWalking, is widely used for monitoring hundreds of applications in production. Some of them generated over 100 TB tracing data per day and topology for over 200 services in real time.\nAcknowledgments We thank all contributors of Apache SkyWalking project for suggestions, code contributions to implement the STAM, and feedback from using the STAM and SkyWalking in their production environment.\nLicense This paper and the STAM are licensed in the Apache 2.0.\nReferences  Dapper, a Large-Scale Distributed Systems Tracing Infrastructure, https://research.google.com/pubs/pub36356.html?spm=5176.100239.blogcont60165.11.OXME9Z Apache SkyWalking, http://skywalking.apache.org/ Apache Open Users, https://skywalking.apache.org/users/ Zipkin, https://zipkin.io/ Kubernetes, Production-Grade Container Orchestration. Automated container deployment, scaling, and management. https://kubernetes.io/ OpenTracing Specification https://github.com/opentracing/specification/blob/master/specification.md Apache Tomcat, http://tomcat.apache.org/ Apache HttpComponents, https://hc.apache.org/ Zipkin doc, ‘Instrumenting a library’ section, ‘Communicating trace information’ paragraph. https://zipkin.io/pages/instrumenting Jaeger Tracing, https://jaegertracing.io/ Envoy Proxy, http://envoyproxy.io/ Nginx, http://nginx.org/ Spring Cloud Gateway, https://spring.io/projects/spring-cloud-gateway  ","title":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System","url":"/docs/main/next/en/papers/stam/"},{"content":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System  Sheng Wu 吴 晟 wusheng@apache.org  Editor\u0026rsquo;s note This paper was written by Sheng Wu, project founder, in 2017, to describe the fundamental theory of all current agent core concepts. Readers could learn why SkyWalking agents are significantly different from other tracing system and Dapper[1] Paper\u0026rsquo;s description.\nAbstract Monitoring, visualizing and troubleshooting a large-scale distributed system is a major challenge. One common tool used today is the distributed tracing system (e.g., Google Dapper)[1], and detecting topology and metrics based on the tracing data. One big limitation of today’s topology detection is that the analysis depends on aggregating the client-side and server-side tracing spans in a given time window to generate the dependency of services. This causes more latency and memory use, because the client and server spans of every RPC must be matched in millions of randomly occurring requests in a highly distributed system. More importantly, it could fail to match if the duration of RPC between client and server is longer than the prior setup time window, or across the two windows.\nIn this paper, we present the STAM, Streaming Topology Analysis Method. In STAM, we could use auto instrumentation or a manual instrumentation mechanism to intercept and manipulate RPC at both client-side and server-side. In the case of auto instrumentation, STAM manipulates application codes at runtime, such as Java agent. As such, this monitoring system doesn’t require any source code changes from the application development team or RPC framework development team. The STAM injects an RPC network address used at client side, a service name and a service instance name into the RPC context, and binds the server-side service name and service instance name as the alias name for this network address used at the client side. Freeing the dependency analysis from the mechanisms that cause blocking and delay, the analysis core can process the monitoring data in stream mode and generate the accurate topology.\nThe STAM has been implemented in the Apache SkyWalking[2], an open source APM (application performance monitoring system) project of the Apache Software Foundation, which is widely used in many big enterprises[3] including Alibaba, Huawei, Tencent, Didi, Xiaomi, China Mobile and other enterprises (airlines, financial institutions and others) to support their large-scale distributed systems in the production environment. It reduces the load and memory cost significantly, with better horizontal scale capability.\nIntroduction Monitoring the highly distributed system, especially with a micro-service architecture, is very complex. Many RPCs, including HTTP, gRPC, MQ, Cache, and Database accesses, are behind a single client-side request. Allowing the IT team to understand the dependency relationships among thousands of services is the key feature and first step for observability of a whole distributed system. A distributed tracing system is capable of collecting traces, including all distributed request paths. Dependency relationships have been logically included in the trace data. A distributed tracing system, such as Zipkin [4] or Jaeger Tracing [10], provides built-in dependency analysis features, but many analysis features build on top of that. There are at least two fundamental limitations: timeliness and consistent accuracy.\nStrong timeliness is required to match the mutability of distributed application system dependency relationship, including service level and service instance level dependency.\nA Service is a logic group of instances which have the same functions or codes.\nA Service Instance is usually an OS level process, such as a JVM process. The relationships between services and instances are mutable, depending on the configuration, codes and network status. The dependency could change over time.\n Figure 1, Generated spans in traditional Dapper based tracing system. The span model in the Dapper paper and existing tracing systems,such as Zipkin instrumenting mode[9], just propagates the span id to the server side. Due to this model, dependency analysis requires a certain time window. The tracing spans are collected at both client- and server-sides, because the relationship is recorded. Due to that, the analysis process has to wait for the client and server spans to match in the same time window, in order to output the result, Service A depending on Service B. So, this time window must be over the duration of this RPC request; otherwise, the conclusion will be lost. This condition makes the analysis would not react the dependency mutation in second level, in production, it sometimes has to set the window duration in 3-5 mins. Also, because of the Windows-based design, if one side involves a long duration task, it can’t easily achieve consistent accuracy. Because in order to make the analysis as fast as possible, the analysis period is less than 5 minutes. But some spans can’t match its parent or children if the analysis is incomplete or crosses two time windows. Even if we added a mechanism to process the spans left in the previous stages, still some would have to be abandoned to keep the dataset size and memory usage reasonable.\nIn the STAM, we introduce a new span and context propagation models, with the new analysis method. These new models add the peer network address (IP or hostname) used at client side, client service instance name and client service name, into the context propagation model. Then it passes the RPC call from client to server, just as the original trace id and span id in the existing tracing system, and collects it in the server-side span. The new analysis method can easily generate the client-server relationship directly without waiting on the client span. It also sets the peer network address as one alias of the server service. After the across cluster node data sync, the client-side span analysis could use this alias metadata to generate the client-server relationship directly too. By using these new models and method in Apache SkyWalking, we remove the time windows-based analysis permanently, and fully use the streaming analysis mode with less than 5 seconds latency and consistent accuracy\nNew Span Model and Context Model The traditional span of a tracing system includes the following fields [1][6][10].\n A trace id to represent the whole trace. A span id to represent the current span. An operation name to describe what operation this span did. A start timestamp. A finish timestamp Service and Service Instance names of current span. A set of zero or more key:value Span Tags. A set of zero or more Span Logs, each of which is itself a key:value map paired with a timestamp. References to zero or more causally related Spans. Reference includes the parent span id and trace id.  In the new span model of STAM we add the following fields in the span.\nSpan type. Enumeration, including exit, local and entry. Entry and Exit spans are used in a networking related library. Entry spans represent a server-side networking library, such as Apache Tomcat[7]. Exit spans represent the client-side networking library, such as Apache HttpComponents [8].\nPeer Network Address. Remote \u0026ldquo;address,\u0026rdquo; suitable for use in exit and entry spans. In Exit spans, the peer network address is the address by the client library to access the server.\nThese fields usually are optionally included in many tracing system,. But in STAM, we require them in all RPC cases.\nContext Model is used to propagate the client-side information to server-side carried by the original RPC call, usually in the header, such as HTTP header or MQ header. In the old design, it carries the trace id and span id of client-side span. In the STAM, we enhance this model, adding the parent service name, parent service instance name and peer of exit span. The names could be literal strings. All these extra fields will help to remove the block of streaming analysis. Compared to the existing context model, this uses a little more bandwidth, but it could be optimized. In Apache SkyWalking, we design a register mechanism to exchange unique IDs to represent these names. As a result, only 3 integers are added in the RPC context, so the increase of bandwidth is at least less than 1% in the production environment.\nThe changes of two models could eliminate the time windows in the analysis process. Server-side span analysis enhances the context aware capability.\nNew Topology Analysis Method The new topology analysis method at the core of STAM is processing the span in stream mode. The analysis of the server-side span, also named entry span, includes the parent service name, parent service instance name and peer of exit span. So the analysis process could establish the following results.\n Set the peer of exit span as client using alias name of current service and instance. Peer network address \u0026lt;-\u0026gt; service name and peer network address \u0026lt;-\u0026gt; Service instance name aliases created. These two will sync with all analysis nodes and persistent in the storage, allowing more analysis processers to have this alias information. Generate relationships of parent service name -\u0026gt; current service name and parent service instance name -\u0026gt; current service instance name, unless there is another different Peer network address \u0026lt;-\u0026gt; Service Instance Name mapping found. In that case, only generate relationships of peer network address \u0026lt;-\u0026gt; service name and peer network address \u0026lt;-\u0026gt; Service instance name.  For analysis of the client-side span (exit span), there could three possibilities.\n The peer in the exit span already has the alias names established by server-side span analysis from step (1). Then use alias names to replace the peer, and generate traffic of current service name -\u0026gt; alias service name and current service instance name -\u0026gt; alias service instance name. If the alias could not be found, then just simply generate traffic for current service name -\u0026gt; peer and current service instance name -\u0026gt; peer. If multiple alias names of peer network address \u0026lt;-\u0026gt; Service Instance Name could be found, then keep generating traffic for current service name -\u0026gt; peer network address and current service instance name -\u0026gt; peer network address.   Figure 2, Apache SkyWalking uses STAM to detect and visualize the topology of distributed systems. Evaluation In this section, we evaluate the new models and analysis method in the context of several typical cases in which the old method loses timeliness and consistent accuracy.\n 1.New Service Online or Auto Scale Out  New services could be added into the whole topology by the developer team randomly, or container operation platform automatically by some scale out policy, like Kubernetes [5]. The monitoring system could not be notified in any case manually. By using STAM, we could detect the new node automatically and also keep the analysis process unblocked and consistent with detected nodes. In this case, a new service and network address (could be IP, port or both) are used. The peer network address \u0026lt;-\u0026gt; service mapping does not exist, the traffic of client service -\u0026gt; peer network address will be generated and persistent in the storage first. After mapping is generated, further traffic of client-service to server-service could be identified, generated and aggregated in the analysis platform. For filling the gap of a few traffic before the mapping generated, we require doing peer network address \u0026lt;-\u0026gt; service mapping translation again in query stage, to merge client service-\u0026gt;peer network address and client-service to server-service. In production, the amount of VM for the whole SkyWalking analysis platform deployment is less than 100, syncing among them will finish less than 10 seconds, in most cases it only takes 3-5 seconds. And in the query stage, the data has been aggregated in minutes or seconds at least. The query merge performance is not related to how much traffic happens before the mapping generated, only affected by sync duration, in here, only 3 seconds. Due to that, in minute level aggregation topology, it only adds 1 or 2 relationship records in the whole topology relationship dataset. Considering an over 100 services topology having over 500 relationship records per minute, the payload increase for this query merge is very limited and affordable. This feature is significant in a large and high load distributed system, as we don’t need to concern its scaling capability. And in some fork versions, they choose to update the existing client service-\u0026gt;peer network address to client-service to server-service after detecting the new mapping for peer generated, in order to remove the extra load at query stage permanently.\n Figure 3, Span analysis by using the new topology analysis method  2.Existing Uninstrumented Nodes  Every topology detection method has to work in this case. In many cases, there are nodes in the production environment that can’t be instrumented. Causes for this might include:(1) Restriction of the technology. In some golang or C++ written applications, there is no easy way in Java or .Net to do auto instrumentation by the agent. So, the codes may not be instrumented automatically. (2) The middleware, such as MQ, database server, has not adopted the tracing system. This would make it difficult or time consuming to implement the middleware instrumentation. (3) A 3rd party service or cloud service doesn’t support work with the current tracing system. (4) Lack of resources: e.g., the developer or operation team lacks time to make the instrumentation ready.\nThe STAM works well even if the client or server side has no instrumentation. It still keeps the topology as accurate as possible.\nIf the client side hasn’t instrumented, the server-side span wouldn’t get any reference through RPC context, so, it would simply use peer to generate traffic, as shown in Figure 4.\n Figure 4, STAM traffic generation when no client-side instrumentation As shown in Figure 5, in the other case, with no server-side instrumentation, the client span analysis doesn’t need to process this case. The STAM analysis core just simply keeps generating client service-\u0026gt;peer network address traffic. As there is no mapping for peer network address generated, there is no merging.\n Figure 5, STAM traffic generation when no server-side instrumentation  3.Uninstrumented Node Having Header Forward Capability  Besides the cases we evaluated in (2) Uninstrumented Nodes, there is one complex and special case: the instrumented node has the capability to propagate the header from downstream to upstream, typically in all proxy, such as Envoy[11], Nginx[12], Spring Cloud Gateway[13]. As proxy, it has the capability to forward all headers from downstream to upstream to keep some of information in the header, including the tracing context, authentication, browser information, and routing information, in order to make them accessible by the business services behind the proxy, like Envoy route configuration [14]. When some proxy can’t be instrumented, no matter what the reason, it should not affect the topology detection.\nIn this case, the proxy address would be used at the client side and propagate through RPC context as peer network address, and the proxy forwards this to different upstream services. Then STAM could detect this case and generate the proxy as a conjectural node. In the STAM, more than one alias names for this network address should be generated. After those two are detected and synchronized to the analysis node, the analysis core knows there is at least one uninstrumented service standing between client and servers. So, it will generate the relationships of client service-\u0026gt;peer network address, peer-\u0026gt;server service B and peer network address -\u0026gt;server service C, as shown in Figure 6.\n Figure 6, STAM traffic generation when the proxy uninstrumentatio Conclusion This paper described the STAM, which is to the best of our knowledge the best topology detection method for distributed tracing systems. It replaces the time-window based topology analysis method for tracing-based monitoring systems. It removes the resource cost of disk and memory for time-window baseds analysis permanently and totally, and the barriers of horizontal scale. One STAM implementation, Apache SkyWalking, is widely used for monitoring hundreds of applications in production. Some of them generated over 100 TB tracing data per day and topology for over 200 services in real time.\nAcknowledgments We thank all contributors of Apache SkyWalking project for suggestions, code contributions to implement the STAM, and feedback from using the STAM and SkyWalking in their production environment.\nLicense This paper and the STAM are licensed in the Apache 2.0.\nReferences  Dapper, a Large-Scale Distributed Systems Tracing Infrastructure, https://research.google.com/pubs/pub36356.html?spm=5176.100239.blogcont60165.11.OXME9Z Apache SkyWalking, http://skywalking.apache.org/ Apache Open Users, https://skywalking.apache.org/users/ Zipkin, https://zipkin.io/ Kubernetes, Production-Grade Container Orchestration. Automated container deployment, scaling, and management. https://kubernetes.io/ OpenTracing Specification https://github.com/opentracing/specification/blob/master/specification.md Apache Tomcat, http://tomcat.apache.org/ Apache HttpComponents, https://hc.apache.org/ Zipkin doc, ‘Instrumenting a library’ section, ‘Communicating trace information’ paragraph. https://zipkin.io/pages/instrumenting Jaeger Tracing, https://jaegertracing.io/ Envoy Proxy, http://envoyproxy.io/ Nginx, http://nginx.org/ Spring Cloud Gateway, https://spring.io/projects/spring-cloud-gateway Envoy Route Configuration, https://www.envoyproxy.io/docs/envoy/latest/api-v2/api/v2/rds.proto.html?highlight=request_headers_to_  ","title":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System","url":"/docs/main/v9.0.0/en/papers/stam/"},{"content":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System  Sheng Wu 吴 晟 wusheng@apache.org  Editor\u0026rsquo;s note This paper was written by Sheng Wu, project founder, in 2017, to describe the fundamental theory of all current agent core concepts. Readers could learn why SkyWalking agents are significantly different from other tracing system and Dapper[1] Paper\u0026rsquo;s description.\nAbstract Monitoring, visualizing and troubleshooting a large-scale distributed system is a major challenge. One common tool used today is the distributed tracing system (e.g., Google Dapper)[1], and detecting topology and metrics based on the tracing data. One big limitation of today’s topology detection is that the analysis depends on aggregating the client-side and server-side tracing spans in a given time window to generate the dependency of services. This causes more latency and memory use, because the client and server spans of every RPC must be matched in millions of randomly occurring requests in a highly distributed system. More importantly, it could fail to match if the duration of RPC between client and server is longer than the prior setup time window, or across the two windows.\nIn this paper, we present the STAM, Streaming Topology Analysis Method. In STAM, we could use auto instrumentation or a manual instrumentation mechanism to intercept and manipulate RPC at both client-side and server-side. In the case of auto instrumentation, STAM manipulates application codes at runtime, such as Java agent. As such, this monitoring system doesn’t require any source code changes from the application development team or RPC framework development team. The STAM injects an RPC network address used at client side, a service name and a service instance name into the RPC context, and binds the server-side service name and service instance name as the alias name for this network address used at the client side. Freeing the dependency analysis from the mechanisms that cause blocking and delay, the analysis core can process the monitoring data in stream mode and generate the accurate topology.\nThe STAM has been implemented in the Apache SkyWalking[2], an open source APM (application performance monitoring system) project of the Apache Software Foundation, which is widely used in many big enterprises[3] including Alibaba, Huawei, Tencent, Didi, Xiaomi, China Mobile and other enterprises (airlines, financial institutions and others) to support their large-scale distributed systems in the production environment. It reduces the load and memory cost significantly, with better horizontal scale capability.\nIntroduction Monitoring the highly distributed system, especially with a micro-service architecture, is very complex. Many RPCs, including HTTP, gRPC, MQ, Cache, and Database accesses, are behind a single client-side request. Allowing the IT team to understand the dependency relationships among thousands of services is the key feature and first step for observability of a whole distributed system. A distributed tracing system is capable of collecting traces, including all distributed request paths. Dependency relationships have been logically included in the trace data. A distributed tracing system, such as Zipkin [4] or Jaeger Tracing [10], provides built-in dependency analysis features, but many analysis features build on top of that. There are at least two fundamental limitations: timeliness and consistent accuracy.\nStrong timeliness is required to match the mutability of distributed application system dependency relationship, including service level and service instance level dependency.\nA Service is a logic group of instances which have the same functions or codes.\nA Service Instance is usually an OS level process, such as a JVM process. The relationships between services and instances are mutable, depending on the configuration, codes and network status. The dependency could change over time.\n Figure 1, Generated spans in traditional Dapper based tracing system. The span model in the Dapper paper and existing tracing systems,such as Zipkin instrumenting mode[9], just propagates the span id to the server side. Due to this model, dependency analysis requires a certain time window. The tracing spans are collected at both client- and server-sides, because the relationship is recorded. Due to that, the analysis process has to wait for the client and server spans to match in the same time window, in order to output the result, Service A depending on Service B. So, this time window must be over the duration of this RPC request; otherwise, the conclusion will be lost. This condition makes the analysis would not react the dependency mutation in second level, in production, it sometimes has to set the window duration in 3-5 mins. Also, because of the Windows-based design, if one side involves a long duration task, it can’t easily achieve consistent accuracy. Because in order to make the analysis as fast as possible, the analysis period is less than 5 minutes. But some spans can’t match its parent or children if the analysis is incomplete or crosses two time windows. Even if we added a mechanism to process the spans left in the previous stages, still some would have to be abandoned to keep the dataset size and memory usage reasonable.\nIn the STAM, we introduce a new span and context propagation models, with the new analysis method. These new models add the peer network address (IP or hostname) used at client side, client service instance name and client service name, into the context propagation model. Then it passes the RPC call from client to server, just as the original trace id and span id in the existing tracing system, and collects it in the server-side span. The new analysis method can easily generate the client-server relationship directly without waiting on the client span. It also sets the peer network address as one alias of the server service. After the across cluster node data sync, the client-side span analysis could use this alias metadata to generate the client-server relationship directly too. By using these new models and method in Apache SkyWalking, we remove the time windows-based analysis permanently, and fully use the streaming analysis mode with less than 5 seconds latency and consistent accuracy\nNew Span Model and Context Model The traditional span of a tracing system includes the following fields [1][6][10].\n A trace id to represent the whole trace. A span id to represent the current span. An operation name to describe what operation this span did. A start timestamp. A finish timestamp Service and Service Instance names of current span. A set of zero or more key:value Span Tags. A set of zero or more Span Logs, each of which is itself a key:value map paired with a timestamp. References to zero or more causally related Spans. Reference includes the parent span id and trace id.  In the new span model of STAM we add the following fields in the span.\nSpan type. Enumeration, including exit, local and entry. Entry and Exit spans are used in a networking related library. Entry spans represent a server-side networking library, such as Apache Tomcat[7]. Exit spans represent the client-side networking library, such as Apache HttpComponents [8].\nPeer Network Address. Remote \u0026ldquo;address,\u0026rdquo; suitable for use in exit and entry spans. In Exit spans, the peer network address is the address by the client library to access the server.\nThese fields usually are optionally included in many tracing system,. But in STAM, we require them in all RPC cases.\nContext Model is used to propagate the client-side information to server-side carried by the original RPC call, usually in the header, such as HTTP header or MQ header. In the old design, it carries the trace id and span id of client-side span. In the STAM, we enhance this model, adding the parent service name, parent service instance name and peer of exit span. The names could be literal strings. All these extra fields will help to remove the block of streaming analysis. Compared to the existing context model, this uses a little more bandwidth, but it could be optimized. In Apache SkyWalking, we design a register mechanism to exchange unique IDs to represent these names. As a result, only 3 integers are added in the RPC context, so the increase of bandwidth is at least less than 1% in the production environment.\nThe changes of two models could eliminate the time windows in the analysis process. Server-side span analysis enhances the context aware capability.\nNew Topology Analysis Method The new topology analysis method at the core of STAM is processing the span in stream mode. The analysis of the server-side span, also named entry span, includes the parent service name, parent service instance name and peer of exit span. So the analysis process could establish the following results.\n Set the peer of exit span as client using alias name of current service and instance. Peer network address \u0026lt;-\u0026gt; service name and peer network address \u0026lt;-\u0026gt; Service instance name aliases created. These two will sync with all analysis nodes and persistent in the storage, allowing more analysis processers to have this alias information. Generate relationships of parent service name -\u0026gt; current service name and parent service instance name -\u0026gt; current service instance name, unless there is another different Peer network address \u0026lt;-\u0026gt; Service Instance Name mapping found. In that case, only generate relationships of peer network address \u0026lt;-\u0026gt; service name and peer network address \u0026lt;-\u0026gt; Service instance name.  For analysis of the client-side span (exit span), there could three possibilities.\n The peer in the exit span already has the alias names established by server-side span analysis from step (1). Then use alias names to replace the peer, and generate traffic of current service name -\u0026gt; alias service name and current service instance name -\u0026gt; alias service instance name. If the alias could not be found, then just simply generate traffic for current service name -\u0026gt; peer and current service instance name -\u0026gt; peer. If multiple alias names of peer network address \u0026lt;-\u0026gt; Service Instance Name could be found, then keep generating traffic for current service name -\u0026gt; peer network address and current service instance name -\u0026gt; peer network address.   Figure 2, Apache SkyWalking uses STAM to detect and visualize the topology of distributed systems. Evaluation In this section, we evaluate the new models and analysis method in the context of several typical cases in which the old method loses timeliness and consistent accuracy.\n 1.New Service Online or Auto Scale Out  New services could be added into the whole topology by the developer team randomly, or container operation platform automatically by some scale out policy, like Kubernetes [5]. The monitoring system could not be notified in any case manually. By using STAM, we could detect the new node automatically and also keep the analysis process unblocked and consistent with detected nodes. In this case, a new service and network address (could be IP, port or both) are used. The peer network address \u0026lt;-\u0026gt; service mapping does not exist, the traffic of client service -\u0026gt; peer network address will be generated and persistent in the storage first. After mapping is generated, further traffic of client-service to server-service could be identified, generated and aggregated in the analysis platform. For filling the gap of a few traffic before the mapping generated, we require doing peer network address \u0026lt;-\u0026gt; service mapping translation again in query stage, to merge client service-\u0026gt;peer network address and client-service to server-service. In production, the amount of VM for the whole SkyWalking analysis platform deployment is less than 100, syncing among them will finish less than 10 seconds, in most cases it only takes 3-5 seconds. And in the query stage, the data has been aggregated in minutes or seconds at least. The query merge performance is not related to how much traffic happens before the mapping generated, only affected by sync duration, in here, only 3 seconds. Due to that, in minute level aggregation topology, it only adds 1 or 2 relationship records in the whole topology relationship dataset. Considering an over 100 services topology having over 500 relationship records per minute, the payload increase for this query merge is very limited and affordable. This feature is significant in a large and high load distributed system, as we don’t need to concern its scaling capability. And in some fork versions, they choose to update the existing client service-\u0026gt;peer network address to client-service to server-service after detecting the new mapping for peer generated, in order to remove the extra load at query stage permanently.\n Figure 3, Span analysis by using the new topology analysis method  2.Existing Uninstrumented Nodes  Every topology detection method has to work in this case. In many cases, there are nodes in the production environment that can’t be instrumented. Causes for this might include:(1) Restriction of the technology. In some golang or C++ written applications, there is no easy way in Java or .Net to do auto instrumentation by the agent. So, the codes may not be instrumented automatically. (2) The middleware, such as MQ, database server, has not adopted the tracing system. This would make it difficult or time consuming to implement the middleware instrumentation. (3) A 3rd party service or cloud service doesn’t support work with the current tracing system. (4) Lack of resources: e.g., the developer or operation team lacks time to make the instrumentation ready.\nThe STAM works well even if the client or server side has no instrumentation. It still keeps the topology as accurate as possible.\nIf the client side hasn’t instrumented, the server-side span wouldn’t get any reference through RPC context, so, it would simply use peer to generate traffic, as shown in Figure 4.\n Figure 4, STAM traffic generation when no client-side instrumentation As shown in Figure 5, in the other case, with no server-side instrumentation, the client span analysis doesn’t need to process this case. The STAM analysis core just simply keeps generating client service-\u0026gt;peer network address traffic. As there is no mapping for peer network address generated, there is no merging.\n Figure 5, STAM traffic generation when no server-side instrumentation  3.Uninstrumented Node Having Header Forward Capability  Besides the cases we evaluated in (2) Uninstrumented Nodes, there is one complex and special case: the instrumented node has the capability to propagate the header from downstream to upstream, typically in all proxy, such as Envoy[11], Nginx[12], Spring Cloud Gateway[13]. As proxy, it has the capability to forward all headers from downstream to upstream to keep some of information in the header, including the tracing context, authentication, browser information, and routing information, in order to make them accessible by the business services behind the proxy, like Envoy route configuration. When some proxy can’t be instrumented, no matter what the reason, it should not affect the topology detection.\nIn this case, the proxy address would be used at the client side and propagate through RPC context as peer network address, and the proxy forwards this to different upstream services. Then STAM could detect this case and generate the proxy as a conjectural node. In the STAM, more than one alias names for this network address should be generated. After those two are detected and synchronized to the analysis node, the analysis core knows there is at least one uninstrumented service standing between client and servers. So, it will generate the relationships of client service-\u0026gt;peer network address, peer-\u0026gt;server service B and peer network address -\u0026gt;server service C, as shown in Figure 6.\n Figure 6, STAM traffic generation when the proxy uninstrumentation Conclusion This paper described the STAM, which is to the best of our knowledge the best topology detection method for distributed tracing systems. It replaces the time-window based topology analysis method for tracing-based monitoring systems. It removes the resource cost of disk and memory for time-window baseds analysis permanently and totally, and the barriers of horizontal scale. One STAM implementation, Apache SkyWalking, is widely used for monitoring hundreds of applications in production. Some of them generated over 100 TB tracing data per day and topology for over 200 services in real time.\nAcknowledgments We thank all contributors of Apache SkyWalking project for suggestions, code contributions to implement the STAM, and feedback from using the STAM and SkyWalking in their production environment.\nLicense This paper and the STAM are licensed in the Apache 2.0.\nReferences  Dapper, a Large-Scale Distributed Systems Tracing Infrastructure, https://research.google.com/pubs/pub36356.html?spm=5176.100239.blogcont60165.11.OXME9Z Apache SkyWalking, http://skywalking.apache.org/ Apache Open Users, https://skywalking.apache.org/users/ Zipkin, https://zipkin.io/ Kubernetes, Production-Grade Container Orchestration. Automated container deployment, scaling, and management. https://kubernetes.io/ OpenTracing Specification https://github.com/opentracing/specification/blob/master/specification.md Apache Tomcat, http://tomcat.apache.org/ Apache HttpComponents, https://hc.apache.org/ Zipkin doc, ‘Instrumenting a library’ section, ‘Communicating trace information’ paragraph. https://zipkin.io/pages/instrumenting Jaeger Tracing, https://jaegertracing.io/ Envoy Proxy, http://envoyproxy.io/ Nginx, http://nginx.org/ Spring Cloud Gateway, https://spring.io/projects/spring-cloud-gateway  ","title":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System","url":"/docs/main/v9.1.0/en/papers/stam/"},{"content":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System  Sheng Wu 吴 晟 wusheng@apache.org  Editor\u0026rsquo;s note This paper was written by Sheng Wu, project founder, in 2017, to describe the fundamental theory of all current agent core concepts. Readers could learn why SkyWalking agents are significantly different from other tracing system and Dapper[1] Paper\u0026rsquo;s description.\nAbstract Monitoring, visualizing and troubleshooting a large-scale distributed system is a major challenge. One common tool used today is the distributed tracing system (e.g., Google Dapper)[1], and detecting topology and metrics based on the tracing data. One big limitation of today’s topology detection is that the analysis depends on aggregating the client-side and server-side tracing spans in a given time window to generate the dependency of services. This causes more latency and memory use, because the client and server spans of every RPC must be matched in millions of randomly occurring requests in a highly distributed system. More importantly, it could fail to match if the duration of RPC between client and server is longer than the prior setup time window, or across the two windows.\nIn this paper, we present the STAM, Streaming Topology Analysis Method. In STAM, we could use auto instrumentation or a manual instrumentation mechanism to intercept and manipulate RPC at both client-side and server-side. In the case of auto instrumentation, STAM manipulates application codes at runtime, such as Java agent. As such, this monitoring system doesn’t require any source code changes from the application development team or RPC framework development team. The STAM injects an RPC network address used at client side, a service name and a service instance name into the RPC context, and binds the server-side service name and service instance name as the alias name for this network address used at the client side. Freeing the dependency analysis from the mechanisms that cause blocking and delay, the analysis core can process the monitoring data in stream mode and generate the accurate topology.\nThe STAM has been implemented in the Apache SkyWalking[2], an open source APM (application performance monitoring system) project of the Apache Software Foundation, which is widely used in many big enterprises[3] including Alibaba, Huawei, Tencent, Didi, Xiaomi, China Mobile and other enterprises (airlines, financial institutions and others) to support their large-scale distributed systems in the production environment. It reduces the load and memory cost significantly, with better horizontal scale capability.\nIntroduction Monitoring the highly distributed system, especially with a micro-service architecture, is very complex. Many RPCs, including HTTP, gRPC, MQ, Cache, and Database accesses, are behind a single client-side request. Allowing the IT team to understand the dependency relationships among thousands of services is the key feature and first step for observability of a whole distributed system. A distributed tracing system is capable of collecting traces, including all distributed request paths. Dependency relationships have been logically included in the trace data. A distributed tracing system, such as Zipkin [4] or Jaeger Tracing [10], provides built-in dependency analysis features, but many analysis features build on top of that. There are at least two fundamental limitations: timeliness and consistent accuracy.\nStrong timeliness is required to match the mutability of distributed application system dependency relationship, including service level and service instance level dependency.\nA Service is a logic group of instances which have the same functions or codes.\nA Service Instance is usually an OS level process, such as a JVM process. The relationships between services and instances are mutable, depending on the configuration, codes and network status. The dependency could change over time.\n Figure 1, Generated spans in traditional Dapper based tracing system. The span model in the Dapper paper and existing tracing systems,such as Zipkin instrumenting mode[9], just propagates the span id to the server side. Due to this model, dependency analysis requires a certain time window. The tracing spans are collected at both client- and server-sides, because the relationship is recorded. Due to that, the analysis process has to wait for the client and server spans to match in the same time window, in order to output the result, Service A depending on Service B. So, this time window must be over the duration of this RPC request; otherwise, the conclusion will be lost. This condition makes the analysis would not react the dependency mutation in second level, in production, it sometimes has to set the window duration in 3-5 mins. Also, because of the Windows-based design, if one side involves a long duration task, it can’t easily achieve consistent accuracy. Because in order to make the analysis as fast as possible, the analysis period is less than 5 minutes. But some spans can’t match its parent or children if the analysis is incomplete or crosses two time windows. Even if we added a mechanism to process the spans left in the previous stages, still some would have to be abandoned to keep the dataset size and memory usage reasonable.\nIn the STAM, we introduce a new span and context propagation models, with the new analysis method. These new models add the peer network address (IP or hostname) used at client side, client service instance name and client service name, into the context propagation model. Then it passes the RPC call from client to server, just as the original trace id and span id in the existing tracing system, and collects it in the server-side span. The new analysis method can easily generate the client-server relationship directly without waiting on the client span. It also sets the peer network address as one alias of the server service. After the across cluster node data sync, the client-side span analysis could use this alias metadata to generate the client-server relationship directly too. By using these new models and method in Apache SkyWalking, we remove the time windows-based analysis permanently, and fully use the streaming analysis mode with less than 5 seconds latency and consistent accuracy\nNew Span Model and Context Model The traditional span of a tracing system includes the following fields [1][6][10].\n A trace id to represent the whole trace. A span id to represent the current span. An operation name to describe what operation this span did. A start timestamp. A finish timestamp Service and Service Instance names of current span. A set of zero or more key:value Span Tags. A set of zero or more Span Logs, each of which is itself a key:value map paired with a timestamp. References to zero or more causally related Spans. Reference includes the parent span id and trace id.  In the new span model of STAM we add the following fields in the span.\nSpan type. Enumeration, including exit, local and entry. Entry and Exit spans are used in a networking related library. Entry spans represent a server-side networking library, such as Apache Tomcat[7]. Exit spans represent the client-side networking library, such as Apache HttpComponents [8].\nPeer Network Address. Remote \u0026ldquo;address,\u0026rdquo; suitable for use in exit and entry spans. In Exit spans, the peer network address is the address by the client library to access the server.\nThese fields usually are optionally included in many tracing system,. But in STAM, we require them in all RPC cases.\nContext Model is used to propagate the client-side information to server-side carried by the original RPC call, usually in the header, such as HTTP header or MQ header. In the old design, it carries the trace id and span id of client-side span. In the STAM, we enhance this model, adding the parent service name, parent service instance name and peer of exit span. The names could be literal strings. All these extra fields will help to remove the block of streaming analysis. Compared to the existing context model, this uses a little more bandwidth, but it could be optimized. In Apache SkyWalking, we design a register mechanism to exchange unique IDs to represent these names. As a result, only 3 integers are added in the RPC context, so the increase of bandwidth is at least less than 1% in the production environment.\nThe changes of two models could eliminate the time windows in the analysis process. Server-side span analysis enhances the context aware capability.\nNew Topology Analysis Method The new topology analysis method at the core of STAM is processing the span in stream mode. The analysis of the server-side span, also named entry span, includes the parent service name, parent service instance name and peer of exit span. So the analysis process could establish the following results.\n Set the peer of exit span as client using alias name of current service and instance. Peer network address \u0026lt;-\u0026gt; service name and peer network address \u0026lt;-\u0026gt; Service instance name aliases created. These two will sync with all analysis nodes and persistent in the storage, allowing more analysis processers to have this alias information. Generate relationships of parent service name -\u0026gt; current service name and parent service instance name -\u0026gt; current service instance name, unless there is another different Peer network address \u0026lt;-\u0026gt; Service Instance Name mapping found. In that case, only generate relationships of peer network address \u0026lt;-\u0026gt; service name and peer network address \u0026lt;-\u0026gt; Service instance name.  For analysis of the client-side span (exit span), there could three possibilities.\n The peer in the exit span already has the alias names established by server-side span analysis from step (1). Then use alias names to replace the peer, and generate traffic of current service name -\u0026gt; alias service name and current service instance name -\u0026gt; alias service instance name. If the alias could not be found, then just simply generate traffic for current service name -\u0026gt; peer and current service instance name -\u0026gt; peer. If multiple alias names of peer network address \u0026lt;-\u0026gt; Service Instance Name could be found, then keep generating traffic for current service name -\u0026gt; peer network address and current service instance name -\u0026gt; peer network address.   Figure 2, Apache SkyWalking uses STAM to detect and visualize the topology of distributed systems. Evaluation In this section, we evaluate the new models and analysis method in the context of several typical cases in which the old method loses timeliness and consistent accuracy.\n 1.New Service Online or Auto Scale Out  New services could be added into the whole topology by the developer team randomly, or container operation platform automatically by some scale out policy, like Kubernetes [5]. The monitoring system could not be notified in any case manually. By using STAM, we could detect the new node automatically and also keep the analysis process unblocked and consistent with detected nodes. In this case, a new service and network address (could be IP, port or both) are used. The peer network address \u0026lt;-\u0026gt; service mapping does not exist, the traffic of client service -\u0026gt; peer network address will be generated and persistent in the storage first. After mapping is generated, further traffic of client-service to server-service could be identified, generated and aggregated in the analysis platform. For filling the gap of a few traffic before the mapping generated, we require doing peer network address \u0026lt;-\u0026gt; service mapping translation again in query stage, to merge client service-\u0026gt;peer network address and client-service to server-service. In production, the amount of VM for the whole SkyWalking analysis platform deployment is less than 100, syncing among them will finish less than 10 seconds, in most cases it only takes 3-5 seconds. And in the query stage, the data has been aggregated in minutes or seconds at least. The query merge performance is not related to how much traffic happens before the mapping generated, only affected by sync duration, in here, only 3 seconds. Due to that, in minute level aggregation topology, it only adds 1 or 2 relationship records in the whole topology relationship dataset. Considering an over 100 services topology having over 500 relationship records per minute, the payload increase for this query merge is very limited and affordable. This feature is significant in a large and high load distributed system, as we don’t need to concern its scaling capability. And in some fork versions, they choose to update the existing client service-\u0026gt;peer network address to client-service to server-service after detecting the new mapping for peer generated, in order to remove the extra load at query stage permanently.\n Figure 3, Span analysis by using the new topology analysis method  2.Existing Uninstrumented Nodes  Every topology detection method has to work in this case. In many cases, there are nodes in the production environment that can’t be instrumented. Causes for this might include:(1) Restriction of the technology. In some golang or C++ written applications, there is no easy way in Java or .Net to do auto instrumentation by the agent. So, the codes may not be instrumented automatically. (2) The middleware, such as MQ, database server, has not adopted the tracing system. This would make it difficult or time consuming to implement the middleware instrumentation. (3) A 3rd party service or cloud service doesn’t support work with the current tracing system. (4) Lack of resources: e.g., the developer or operation team lacks time to make the instrumentation ready.\nThe STAM works well even if the client or server side has no instrumentation. It still keeps the topology as accurate as possible.\nIf the client side hasn’t instrumented, the server-side span wouldn’t get any reference through RPC context, so, it would simply use peer to generate traffic, as shown in Figure 4.\n Figure 4, STAM traffic generation when no client-side instrumentation As shown in Figure 5, in the other case, with no server-side instrumentation, the client span analysis doesn’t need to process this case. The STAM analysis core just simply keeps generating client service-\u0026gt;peer network address traffic. As there is no mapping for peer network address generated, there is no merging.\n Figure 5, STAM traffic generation when no server-side instrumentation  3.Uninstrumented Node Having Header Forward Capability  Besides the cases we evaluated in (2) Uninstrumented Nodes, there is one complex and special case: the instrumented node has the capability to propagate the header from downstream to upstream, typically in all proxy, such as Envoy[11], Nginx[12], Spring Cloud Gateway[13]. As proxy, it has the capability to forward all headers from downstream to upstream to keep some of information in the header, including the tracing context, authentication, browser information, and routing information, in order to make them accessible by the business services behind the proxy, like Envoy route configuration. When some proxy can’t be instrumented, no matter what the reason, it should not affect the topology detection.\nIn this case, the proxy address would be used at the client side and propagate through RPC context as peer network address, and the proxy forwards this to different upstream services. Then STAM could detect this case and generate the proxy as a conjectural node. In the STAM, more than one alias names for this network address should be generated. After those two are detected and synchronized to the analysis node, the analysis core knows there is at least one uninstrumented service standing between client and servers. So, it will generate the relationships of client service-\u0026gt;peer network address, peer-\u0026gt;server service B and peer network address -\u0026gt;server service C, as shown in Figure 6.\n Figure 6, STAM traffic generation when the proxy uninstrumentation Conclusion This paper described the STAM, which is to the best of our knowledge the best topology detection method for distributed tracing systems. It replaces the time-window based topology analysis method for tracing-based monitoring systems. It removes the resource cost of disk and memory for time-window baseds analysis permanently and totally, and the barriers of horizontal scale. One STAM implementation, Apache SkyWalking, is widely used for monitoring hundreds of applications in production. Some of them generated over 100 TB tracing data per day and topology for over 200 services in real time.\nAcknowledgments We thank all contributors of Apache SkyWalking project for suggestions, code contributions to implement the STAM, and feedback from using the STAM and SkyWalking in their production environment.\nLicense This paper and the STAM are licensed in the Apache 2.0.\nReferences  Dapper, a Large-Scale Distributed Systems Tracing Infrastructure, https://research.google.com/pubs/pub36356.html?spm=5176.100239.blogcont60165.11.OXME9Z Apache SkyWalking, http://skywalking.apache.org/ Apache Open Users, https://skywalking.apache.org/users/ Zipkin, https://zipkin.io/ Kubernetes, Production-Grade Container Orchestration. Automated container deployment, scaling, and management. https://kubernetes.io/ OpenTracing Specification https://github.com/opentracing/specification/blob/master/specification.md Apache Tomcat, http://tomcat.apache.org/ Apache HttpComponents, https://hc.apache.org/ Zipkin doc, ‘Instrumenting a library’ section, ‘Communicating trace information’ paragraph. https://zipkin.io/pages/instrumenting Jaeger Tracing, https://jaegertracing.io/ Envoy Proxy, http://envoyproxy.io/ Nginx, http://nginx.org/ Spring Cloud Gateway, https://spring.io/projects/spring-cloud-gateway  ","title":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System","url":"/docs/main/v9.2.0/en/papers/stam/"},{"content":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System  Sheng Wu 吴 晟 wusheng@apache.org  Editor\u0026rsquo;s note This paper was written by Sheng Wu, project founder, in 2017, to describe the fundamental theory of all current agent core concepts. Readers could learn why SkyWalking agents are significantly different from other tracing system and Dapper[1] Paper\u0026rsquo;s description.\nAbstract Monitoring, visualizing and troubleshooting a large-scale distributed system is a major challenge. One common tool used today is the distributed tracing system (e.g., Google Dapper)[1], and detecting topology and metrics based on the tracing data. One big limitation of today’s topology detection is that the analysis depends on aggregating the client-side and server-side tracing spans in a given time window to generate the dependency of services. This causes more latency and memory use, because the client and server spans of every RPC must be matched in millions of randomly occurring requests in a highly distributed system. More importantly, it could fail to match if the duration of RPC between client and server is longer than the prior setup time window, or across the two windows.\nIn this paper, we present the STAM, Streaming Topology Analysis Method. In STAM, we could use auto instrumentation or a manual instrumentation mechanism to intercept and manipulate RPC at both client-side and server-side. In the case of auto instrumentation, STAM manipulates application codes at runtime, such as Java agent. As such, this monitoring system doesn’t require any source code changes from the application development team or RPC framework development team. The STAM injects an RPC network address used at client side, a service name and a service instance name into the RPC context, and binds the server-side service name and service instance name as the alias name for this network address used at the client side. Freeing the dependency analysis from the mechanisms that cause blocking and delay, the analysis core can process the monitoring data in stream mode and generate the accurate topology.\nThe STAM has been implemented in the Apache SkyWalking[2], an open source APM (application performance monitoring system) project of the Apache Software Foundation, which is widely used in many big enterprises[3] including Alibaba, Huawei, Tencent, Didi, Xiaomi, China Mobile and other enterprises (airlines, financial institutions and others) to support their large-scale distributed systems in the production environment. It reduces the load and memory cost significantly, with better horizontal scale capability.\nIntroduction Monitoring the highly distributed system, especially with a micro-service architecture, is very complex. Many RPCs, including HTTP, gRPC, MQ, Cache, and Database accesses, are behind a single client-side request. Allowing the IT team to understand the dependency relationships among thousands of services is the key feature and first step for observability of a whole distributed system. A distributed tracing system is capable of collecting traces, including all distributed request paths. Dependency relationships have been logically included in the trace data. A distributed tracing system, such as Zipkin [4] or Jaeger Tracing [10], provides built-in dependency analysis features, but many analysis features build on top of that. There are at least two fundamental limitations: timeliness and consistent accuracy.\nStrong timeliness is required to match the mutability of distributed application system dependency relationship, including service level and service instance level dependency.\nA Service is a logic group of instances which have the same functions or codes.\nA Service Instance is usually an OS level process, such as a JVM process. The relationships between services and instances are mutable, depending on the configuration, codes and network status. The dependency could change over time.\n Figure 1, Generated spans in traditional Dapper based tracing system. The span model in the Dapper paper and existing tracing systems,such as Zipkin instrumenting mode[9], just propagates the span id to the server side. Due to this model, dependency analysis requires a certain time window. The tracing spans are collected at both client- and server-sides, because the relationship is recorded. Due to that, the analysis process has to wait for the client and server spans to match in the same time window, in order to output the result, Service A depending on Service B. So, this time window must be over the duration of this RPC request; otherwise, the conclusion will be lost. This condition makes the analysis would not react the dependency mutation in second level, in production, it sometimes has to set the window duration in 3-5 mins. Also, because of the Windows-based design, if one side involves a long duration task, it can’t easily achieve consistent accuracy. Because in order to make the analysis as fast as possible, the analysis period is less than 5 minutes. But some spans can’t match its parent or children if the analysis is incomplete or crosses two time windows. Even if we added a mechanism to process the spans left in the previous stages, still some would have to be abandoned to keep the dataset size and memory usage reasonable.\nIn the STAM, we introduce a new span and context propagation models, with the new analysis method. These new models add the peer network address (IP or hostname) used at client side, client service instance name and client service name, into the context propagation model. Then it passes the RPC call from client to server, just as the original trace id and span id in the existing tracing system, and collects it in the server-side span. The new analysis method can easily generate the client-server relationship directly without waiting on the client span. It also sets the peer network address as one alias of the server service. After the across cluster node data sync, the client-side span analysis could use this alias metadata to generate the client-server relationship directly too. By using these new models and method in Apache SkyWalking, we remove the time windows-based analysis permanently, and fully use the streaming analysis mode with less than 5 seconds latency and consistent accuracy\nNew Span Model and Context Model The traditional span of a tracing system includes the following fields [1][6][10].\n A trace id to represent the whole trace. A span id to represent the current span. An operation name to describe what operation this span did. A start timestamp. A finish timestamp Service and Service Instance names of current span. A set of zero or more key:value Span Tags. A set of zero or more Span Logs, each of which is itself a key:value map paired with a timestamp. References to zero or more causally related Spans. Reference includes the parent span id and trace id.  In the new span model of STAM we add the following fields in the span.\nSpan type. Enumeration, including exit, local and entry. Entry and Exit spans are used in a networking related library. Entry spans represent a server-side networking library, such as Apache Tomcat[7]. Exit spans represent the client-side networking library, such as Apache HttpComponents [8].\nPeer Network Address. Remote \u0026ldquo;address,\u0026rdquo; suitable for use in exit and entry spans. In Exit spans, the peer network address is the address by the client library to access the server.\nThese fields usually are optionally included in many tracing system,. But in STAM, we require them in all RPC cases.\nContext Model is used to propagate the client-side information to server-side carried by the original RPC call, usually in the header, such as HTTP header or MQ header. In the old design, it carries the trace id and span id of client-side span. In the STAM, we enhance this model, adding the parent service name, parent service instance name and peer of exit span. The names could be literal strings. All these extra fields will help to remove the block of streaming analysis. Compared to the existing context model, this uses a little more bandwidth, but it could be optimized. In Apache SkyWalking, we design a register mechanism to exchange unique IDs to represent these names. As a result, only 3 integers are added in the RPC context, so the increase of bandwidth is at least less than 1% in the production environment.\nThe changes of two models could eliminate the time windows in the analysis process. Server-side span analysis enhances the context aware capability.\nNew Topology Analysis Method The new topology analysis method at the core of STAM is processing the span in stream mode. The analysis of the server-side span, also named entry span, includes the parent service name, parent service instance name and peer of exit span. So the analysis process could establish the following results.\n Set the peer of exit span as client using alias name of current service and instance. Peer network address \u0026lt;-\u0026gt; service name and peer network address \u0026lt;-\u0026gt; Service instance name aliases created. These two will sync with all analysis nodes and persistent in the storage, allowing more analysis processers to have this alias information. Generate relationships of parent service name -\u0026gt; current service name and parent service instance name -\u0026gt; current service instance name, unless there is another different Peer network address \u0026lt;-\u0026gt; Service Instance Name mapping found. In that case, only generate relationships of peer network address \u0026lt;-\u0026gt; service name and peer network address \u0026lt;-\u0026gt; Service instance name.  For analysis of the client-side span (exit span), there could three possibilities.\n The peer in the exit span already has the alias names established by server-side span analysis from step (1). Then use alias names to replace the peer, and generate traffic of current service name -\u0026gt; alias service name and current service instance name -\u0026gt; alias service instance name. If the alias could not be found, then just simply generate traffic for current service name -\u0026gt; peer and current service instance name -\u0026gt; peer. If multiple alias names of peer network address \u0026lt;-\u0026gt; Service Instance Name could be found, then keep generating traffic for current service name -\u0026gt; peer network address and current service instance name -\u0026gt; peer network address.   Figure 2, Apache SkyWalking uses STAM to detect and visualize the topology of distributed systems. Evaluation In this section, we evaluate the new models and analysis method in the context of several typical cases in which the old method loses timeliness and consistent accuracy.\n 1.New Service Online or Auto Scale Out  New services could be added into the whole topology by the developer team randomly, or container operation platform automatically by some scale out policy, like Kubernetes [5]. The monitoring system could not be notified in any case manually. By using STAM, we could detect the new node automatically and also keep the analysis process unblocked and consistent with detected nodes. In this case, a new service and network address (could be IP, port or both) are used. The peer network address \u0026lt;-\u0026gt; service mapping does not exist, the traffic of client service -\u0026gt; peer network address will be generated and persistent in the storage first. After mapping is generated, further traffic of client-service to server-service could be identified, generated and aggregated in the analysis platform. For filling the gap of a few traffic before the mapping generated, we require doing peer network address \u0026lt;-\u0026gt; service mapping translation again in query stage, to merge client service-\u0026gt;peer network address and client-service to server-service. In production, the amount of VM for the whole SkyWalking analysis platform deployment is less than 100, syncing among them will finish less than 10 seconds, in most cases it only takes 3-5 seconds. And in the query stage, the data has been aggregated in minutes or seconds at least. The query merge performance is not related to how much traffic happens before the mapping generated, only affected by sync duration, in here, only 3 seconds. Due to that, in minute level aggregation topology, it only adds 1 or 2 relationship records in the whole topology relationship dataset. Considering an over 100 services topology having over 500 relationship records per minute, the payload increase for this query merge is very limited and affordable. This feature is significant in a large and high load distributed system, as we don’t need to concern its scaling capability. And in some fork versions, they choose to update the existing client service-\u0026gt;peer network address to client-service to server-service after detecting the new mapping for peer generated, in order to remove the extra load at query stage permanently.\n Figure 3, Span analysis by using the new topology analysis method  2.Existing Uninstrumented Nodes  Every topology detection method has to work in this case. In many cases, there are nodes in the production environment that can’t be instrumented. Causes for this might include:(1) Restriction of the technology. In some golang or C++ written applications, there is no easy way in Java or .Net to do auto instrumentation by the agent. So, the codes may not be instrumented automatically. (2) The middleware, such as MQ, database server, has not adopted the tracing system. This would make it difficult or time consuming to implement the middleware instrumentation. (3) A 3rd party service or cloud service doesn’t support work with the current tracing system. (4) Lack of resources: e.g., the developer or operation team lacks time to make the instrumentation ready.\nThe STAM works well even if the client or server side has no instrumentation. It still keeps the topology as accurate as possible.\nIf the client side hasn’t instrumented, the server-side span wouldn’t get any reference through RPC context, so, it would simply use peer to generate traffic, as shown in Figure 4.\n Figure 4, STAM traffic generation when no client-side instrumentation As shown in Figure 5, in the other case, with no server-side instrumentation, the client span analysis doesn’t need to process this case. The STAM analysis core just simply keeps generating client service-\u0026gt;peer network address traffic. As there is no mapping for peer network address generated, there is no merging.\n Figure 5, STAM traffic generation when no server-side instrumentation  3.Uninstrumented Node Having Header Forward Capability  Besides the cases we evaluated in (2) Uninstrumented Nodes, there is one complex and special case: the instrumented node has the capability to propagate the header from downstream to upstream, typically in all proxy, such as Envoy[11], Nginx[12], Spring Cloud Gateway[13]. As proxy, it has the capability to forward all headers from downstream to upstream to keep some of information in the header, including the tracing context, authentication, browser information, and routing information, in order to make them accessible by the business services behind the proxy, like Envoy route configuration. When some proxy can’t be instrumented, no matter what the reason, it should not affect the topology detection.\nIn this case, the proxy address would be used at the client side and propagate through RPC context as peer network address, and the proxy forwards this to different upstream services. Then STAM could detect this case and generate the proxy as a conjectural node. In the STAM, more than one alias names for this network address should be generated. After those two are detected and synchronized to the analysis node, the analysis core knows there is at least one uninstrumented service standing between client and servers. So, it will generate the relationships of client service-\u0026gt;peer network address, peer-\u0026gt;server service B and peer network address -\u0026gt;server service C, as shown in Figure 6.\n Figure 6, STAM traffic generation when the proxy uninstrumentation Conclusion This paper described the STAM, which is to the best of our knowledge the best topology detection method for distributed tracing systems. It replaces the time-window based topology analysis method for tracing-based monitoring systems. It removes the resource cost of disk and memory for time-window baseds analysis permanently and totally, and the barriers of horizontal scale. One STAM implementation, Apache SkyWalking, is widely used for monitoring hundreds of applications in production. Some of them generated over 100 TB tracing data per day and topology for over 200 services in real time.\nAcknowledgments We thank all contributors of Apache SkyWalking project for suggestions, code contributions to implement the STAM, and feedback from using the STAM and SkyWalking in their production environment.\nLicense This paper and the STAM are licensed in the Apache 2.0.\nReferences  Dapper, a Large-Scale Distributed Systems Tracing Infrastructure, https://research.google.com/pubs/pub36356.html?spm=5176.100239.blogcont60165.11.OXME9Z Apache SkyWalking, http://skywalking.apache.org/ Apache Open Users, https://skywalking.apache.org/users/ Zipkin, https://zipkin.io/ Kubernetes, Production-Grade Container Orchestration. Automated container deployment, scaling, and management. https://kubernetes.io/ OpenTracing Specification https://github.com/opentracing/specification/blob/master/specification.md Apache Tomcat, http://tomcat.apache.org/ Apache HttpComponents, https://hc.apache.org/ Zipkin doc, ‘Instrumenting a library’ section, ‘Communicating trace information’ paragraph. https://zipkin.io/pages/instrumenting Jaeger Tracing, https://jaegertracing.io/ Envoy Proxy, http://envoyproxy.io/ Nginx, http://nginx.org/ Spring Cloud Gateway, https://spring.io/projects/spring-cloud-gateway  ","title":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System","url":"/docs/main/v9.3.0/en/papers/stam/"},{"content":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System  Sheng Wu 吴 晟 wusheng@apache.org  Editor\u0026rsquo;s note This paper was written by Sheng Wu, project founder, in 2017, to describe the fundamental theory of all current agent core concepts. Readers could learn why SkyWalking agents are significantly different from other tracing system and Dapper[1] Paper\u0026rsquo;s description.\nAbstract Monitoring, visualizing and troubleshooting a large-scale distributed system is a major challenge. One common tool used today is the distributed tracing system (e.g., Google Dapper)[1], and detecting topology and metrics based on the tracing data. One big limitation of today’s topology detection is that the analysis depends on aggregating the client-side and server-side tracing spans in a given time window to generate the dependency of services. This causes more latency and memory use, because the client and server spans of every RPC must be matched in millions of randomly occurring requests in a highly distributed system. More importantly, it could fail to match if the duration of RPC between client and server is longer than the prior setup time window, or across the two windows.\nIn this paper, we present the STAM, Streaming Topology Analysis Method. In STAM, we could use auto instrumentation or a manual instrumentation mechanism to intercept and manipulate RPC at both client-side and server-side. In the case of auto instrumentation, STAM manipulates application codes at runtime, such as Java agent. As such, this monitoring system doesn’t require any source code changes from the application development team or RPC framework development team. The STAM injects an RPC network address used at client side, a service name and a service instance name into the RPC context, and binds the server-side service name and service instance name as the alias name for this network address used at the client side. Freeing the dependency analysis from the mechanisms that cause blocking and delay, the analysis core can process the monitoring data in stream mode and generate the accurate topology.\nThe STAM has been implemented in the Apache SkyWalking[2], an open source APM (application performance monitoring system) project of the Apache Software Foundation, which is widely used in many big enterprises[3] including Alibaba, Huawei, Tencent, Didi, Xiaomi, China Mobile and other enterprises (airlines, financial institutions and others) to support their large-scale distributed systems in the production environment. It reduces the load and memory cost significantly, with better horizontal scale capability.\nIntroduction Monitoring the highly distributed system, especially with a micro-service architecture, is very complex. Many RPCs, including HTTP, gRPC, MQ, Cache, and Database accesses, are behind a single client-side request. Allowing the IT team to understand the dependency relationships among thousands of services is the key feature and first step for observability of a whole distributed system. A distributed tracing system is capable of collecting traces, including all distributed request paths. Dependency relationships have been logically included in the trace data. A distributed tracing system, such as Zipkin [4] or Jaeger Tracing [10], provides built-in dependency analysis features, but many analysis features build on top of that. There are at least two fundamental limitations: timeliness and consistent accuracy.\nStrong timeliness is required to match the mutability of distributed application system dependency relationship, including service level and service instance level dependency.\nA Service is a logic group of instances which have the same functions or codes.\nA Service Instance is usually an OS level process, such as a JVM process. The relationships between services and instances are mutable, depending on the configuration, codes and network status. The dependency could change over time.\n Figure 1, Generated spans in traditional Dapper based tracing system. The span model in the Dapper paper and existing tracing systems,such as Zipkin instrumenting mode[9], just propagates the span id to the server side. Due to this model, dependency analysis requires a certain time window. The tracing spans are collected at both client- and server-sides, because the relationship is recorded. Due to that, the analysis process has to wait for the client and server spans to match in the same time window, in order to output the result, Service A depending on Service B. So, this time window must be over the duration of this RPC request; otherwise, the conclusion will be lost. This condition makes the analysis would not react the dependency mutation in second level, in production, it sometimes has to set the window duration in 3-5 mins. Also, because of the Windows-based design, if one side involves a long duration task, it can’t easily achieve consistent accuracy. Because in order to make the analysis as fast as possible, the analysis period is less than 5 minutes. But some spans can’t match its parent or children if the analysis is incomplete or crosses two time windows. Even if we added a mechanism to process the spans left in the previous stages, still some would have to be abandoned to keep the dataset size and memory usage reasonable.\nIn the STAM, we introduce a new span and context propagation models, with the new analysis method. These new models add the peer network address (IP or hostname) used at client side, client service instance name and client service name, into the context propagation model. Then it passes the RPC call from client to server, just as the original trace id and span id in the existing tracing system, and collects it in the server-side span. The new analysis method can easily generate the client-server relationship directly without waiting on the client span. It also sets the peer network address as one alias of the server service. After the across cluster node data sync, the client-side span analysis could use this alias metadata to generate the client-server relationship directly too. By using these new models and method in Apache SkyWalking, we remove the time windows-based analysis permanently, and fully use the streaming analysis mode with less than 5 seconds latency and consistent accuracy\nNew Span Model and Context Model The traditional span of a tracing system includes the following fields [1][6][10].\n A trace id to represent the whole trace. A span id to represent the current span. An operation name to describe what operation this span did. A start timestamp. A finish timestamp Service and Service Instance names of current span. A set of zero or more key:value Span Tags. A set of zero or more Span Logs, each of which is itself a key:value map paired with a timestamp. References to zero or more causally related Spans. Reference includes the parent span id and trace id.  In the new span model of STAM we add the following fields in the span.\nSpan type. Enumeration, including exit, local and entry. Entry and Exit spans are used in a networking related library. Entry spans represent a server-side networking library, such as Apache Tomcat[7]. Exit spans represent the client-side networking library, such as Apache HttpComponents [8].\nPeer Network Address. Remote \u0026ldquo;address,\u0026rdquo; suitable for use in exit and entry spans. In Exit spans, the peer network address is the address by the client library to access the server.\nThese fields usually are optionally included in many tracing system,. But in STAM, we require them in all RPC cases.\nContext Model is used to propagate the client-side information to server-side carried by the original RPC call, usually in the header, such as HTTP header or MQ header. In the old design, it carries the trace id and span id of client-side span. In the STAM, we enhance this model, adding the parent service name, parent service instance name and peer of exit span. The names could be literal strings. All these extra fields will help to remove the block of streaming analysis. Compared to the existing context model, this uses a little more bandwidth, but it could be optimized. In Apache SkyWalking, we design a register mechanism to exchange unique IDs to represent these names. As a result, only 3 integers are added in the RPC context, so the increase of bandwidth is at least less than 1% in the production environment.\nThe changes of two models could eliminate the time windows in the analysis process. Server-side span analysis enhances the context aware capability.\nNew Topology Analysis Method The new topology analysis method at the core of STAM is processing the span in stream mode. The analysis of the server-side span, also named entry span, includes the parent service name, parent service instance name and peer of exit span. So the analysis process could establish the following results.\n Set the peer of exit span as client using alias name of current service and instance. Peer network address \u0026lt;-\u0026gt; service name and peer network address \u0026lt;-\u0026gt; Service instance name aliases created. These two will sync with all analysis nodes and persistent in the storage, allowing more analysis processers to have this alias information. Generate relationships of parent service name -\u0026gt; current service name and parent service instance name -\u0026gt; current service instance name, unless there is another different Peer network address \u0026lt;-\u0026gt; Service Instance Name mapping found. In that case, only generate relationships of peer network address \u0026lt;-\u0026gt; service name and peer network address \u0026lt;-\u0026gt; Service instance name.  For analysis of the client-side span (exit span), there could three possibilities.\n The peer in the exit span already has the alias names established by server-side span analysis from step (1). Then use alias names to replace the peer, and generate traffic of current service name -\u0026gt; alias service name and current service instance name -\u0026gt; alias service instance name. If the alias could not be found, then just simply generate traffic for current service name -\u0026gt; peer and current service instance name -\u0026gt; peer. If multiple alias names of peer network address \u0026lt;-\u0026gt; Service Instance Name could be found, then keep generating traffic for current service name -\u0026gt; peer network address and current service instance name -\u0026gt; peer network address.   Figure 2, Apache SkyWalking uses STAM to detect and visualize the topology of distributed systems. Evaluation In this section, we evaluate the new models and analysis method in the context of several typical cases in which the old method loses timeliness and consistent accuracy.\n 1.New Service Online or Auto Scale Out  New services could be added into the whole topology by the developer team randomly, or container operation platform automatically by some scale out policy, like Kubernetes [5]. The monitoring system could not be notified in any case manually. By using STAM, we could detect the new node automatically and also keep the analysis process unblocked and consistent with detected nodes. In this case, a new service and network address (could be IP, port or both) are used. The peer network address \u0026lt;-\u0026gt; service mapping does not exist, the traffic of client service -\u0026gt; peer network address will be generated and persistent in the storage first. After mapping is generated, further traffic of client-service to server-service could be identified, generated and aggregated in the analysis platform. For filling the gap of a few traffic before the mapping generated, we require doing peer network address \u0026lt;-\u0026gt; service mapping translation again in query stage, to merge client service-\u0026gt;peer network address and client-service to server-service. In production, the amount of VM for the whole SkyWalking analysis platform deployment is less than 100, syncing among them will finish less than 10 seconds, in most cases it only takes 3-5 seconds. And in the query stage, the data has been aggregated in minutes or seconds at least. The query merge performance is not related to how much traffic happens before the mapping generated, only affected by sync duration, in here, only 3 seconds. Due to that, in minute level aggregation topology, it only adds 1 or 2 relationship records in the whole topology relationship dataset. Considering an over 100 services topology having over 500 relationship records per minute, the payload increase for this query merge is very limited and affordable. This feature is significant in a large and high load distributed system, as we don’t need to concern its scaling capability. And in some fork versions, they choose to update the existing client service-\u0026gt;peer network address to client-service to server-service after detecting the new mapping for peer generated, in order to remove the extra load at query stage permanently.\n Figure 3, Span analysis by using the new topology analysis method  2.Existing Uninstrumented Nodes  Every topology detection method has to work in this case. In many cases, there are nodes in the production environment that can’t be instrumented. Causes for this might include:(1) Restriction of the technology. In some golang or C++ written applications, there is no easy way in Java or .Net to do auto instrumentation by the agent. So, the codes may not be instrumented automatically. (2) The middleware, such as MQ, database server, has not adopted the tracing system. This would make it difficult or time consuming to implement the middleware instrumentation. (3) A 3rd party service or cloud service doesn’t support work with the current tracing system. (4) Lack of resources: e.g., the developer or operation team lacks time to make the instrumentation ready.\nThe STAM works well even if the client or server side has no instrumentation. It still keeps the topology as accurate as possible.\nIf the client side hasn’t instrumented, the server-side span wouldn’t get any reference through RPC context, so, it would simply use peer to generate traffic, as shown in Figure 4.\n Figure 4, STAM traffic generation when no client-side instrumentation As shown in Figure 5, in the other case, with no server-side instrumentation, the client span analysis doesn’t need to process this case. The STAM analysis core just simply keeps generating client service-\u0026gt;peer network address traffic. As there is no mapping for peer network address generated, there is no merging.\n Figure 5, STAM traffic generation when no server-side instrumentation  3.Uninstrumented Node Having Header Forward Capability  Besides the cases we evaluated in (2) Uninstrumented Nodes, there is one complex and special case: the instrumented node has the capability to propagate the header from downstream to upstream, typically in all proxy, such as Envoy[11], Nginx[12], Spring Cloud Gateway[13]. As proxy, it has the capability to forward all headers from downstream to upstream to keep some of information in the header, including the tracing context, authentication, browser information, and routing information, in order to make them accessible by the business services behind the proxy, like Envoy route configuration. When some proxy can’t be instrumented, no matter what the reason, it should not affect the topology detection.\nIn this case, the proxy address would be used at the client side and propagate through RPC context as peer network address, and the proxy forwards this to different upstream services. Then STAM could detect this case and generate the proxy as a conjectural node. In the STAM, more than one alias names for this network address should be generated. After those two are detected and synchronized to the analysis node, the analysis core knows there is at least one uninstrumented service standing between client and servers. So, it will generate the relationships of client service-\u0026gt;peer network address, peer-\u0026gt;server service B and peer network address -\u0026gt;server service C, as shown in Figure 6.\n Figure 6, STAM traffic generation when the proxy uninstrumentation Conclusion This paper described the STAM, which is to the best of our knowledge the best topology detection method for distributed tracing systems. It replaces the time-window based topology analysis method for tracing-based monitoring systems. It removes the resource cost of disk and memory for time-window baseds analysis permanently and totally, and the barriers of horizontal scale. One STAM implementation, Apache SkyWalking, is widely used for monitoring hundreds of applications in production. Some of them generated over 100 TB tracing data per day and topology for over 200 services in real time.\nAcknowledgments We thank all contributors of Apache SkyWalking project for suggestions, code contributions to implement the STAM, and feedback from using the STAM and SkyWalking in their production environment.\nLicense This paper and the STAM are licensed in the Apache 2.0.\nReferences  Dapper, a Large-Scale Distributed Systems Tracing Infrastructure, https://research.google.com/pubs/pub36356.html?spm=5176.100239.blogcont60165.11.OXME9Z Apache SkyWalking, http://skywalking.apache.org/ Apache Open Users, https://skywalking.apache.org/users/ Zipkin, https://zipkin.io/ Kubernetes, Production-Grade Container Orchestration. Automated container deployment, scaling, and management. https://kubernetes.io/ OpenTracing Specification https://github.com/opentracing/specification/blob/master/specification.md Apache Tomcat, http://tomcat.apache.org/ Apache HttpComponents, https://hc.apache.org/ Zipkin doc, ‘Instrumenting a library’ section, ‘Communicating trace information’ paragraph. https://zipkin.io/pages/instrumenting Jaeger Tracing, https://jaegertracing.io/ Envoy Proxy, http://envoyproxy.io/ Nginx, http://nginx.org/ Spring Cloud Gateway, https://spring.io/projects/spring-cloud-gateway  ","title":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System","url":"/docs/main/v9.4.0/en/papers/stam/"},{"content":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System  Sheng Wu 吴 晟 wusheng@apache.org  Editor\u0026rsquo;s note This paper was written by Sheng Wu, project founder, in 2017, to describe the fundamental theory of all current agent core concepts. Readers could learn why SkyWalking agents are significantly different from other tracing system and Dapper[1] Paper\u0026rsquo;s description.\nAbstract Monitoring, visualizing and troubleshooting a large-scale distributed system is a major challenge. One common tool used today is the distributed tracing system (e.g., Google Dapper)[1], and detecting topology and metrics based on the tracing data. One big limitation of today’s topology detection is that the analysis depends on aggregating the client-side and server-side tracing spans in a given time window to generate the dependency of services. This causes more latency and memory use, because the client and server spans of every RPC must be matched in millions of randomly occurring requests in a highly distributed system. More importantly, it could fail to match if the duration of RPC between client and server is longer than the prior setup time window, or across the two windows.\nIn this paper, we present the STAM, Streaming Topology Analysis Method. In STAM, we could use auto instrumentation or a manual instrumentation mechanism to intercept and manipulate RPC at both client-side and server-side. In the case of auto instrumentation, STAM manipulates application codes at runtime, such as Java agent. As such, this monitoring system doesn’t require any source code changes from the application development team or RPC framework development team. The STAM injects an RPC network address used at client side, a service name and a service instance name into the RPC context, and binds the server-side service name and service instance name as the alias name for this network address used at the client side. Freeing the dependency analysis from the mechanisms that cause blocking and delay, the analysis core can process the monitoring data in stream mode and generate the accurate topology.\nThe STAM has been implemented in the Apache SkyWalking[2], an open source APM (application performance monitoring system) project of the Apache Software Foundation, which is widely used in many big enterprises[3] including Alibaba, Huawei, Tencent, Didi, Xiaomi, China Mobile and other enterprises (airlines, financial institutions and others) to support their large-scale distributed systems in the production environment. It reduces the load and memory cost significantly, with better horizontal scale capability.\nIntroduction Monitoring the highly distributed system, especially with a micro-service architecture, is very complex. Many RPCs, including HTTP, gRPC, MQ, Cache, and Database accesses, are behind a single client-side request. Allowing the IT team to understand the dependency relationships among thousands of services is the key feature and first step for observability of a whole distributed system. A distributed tracing system is capable of collecting traces, including all distributed request paths. Dependency relationships have been logically included in the trace data. A distributed tracing system, such as Zipkin [4] or Jaeger Tracing [10], provides built-in dependency analysis features, but many analysis features build on top of that. There are at least two fundamental limitations: timeliness and consistent accuracy.\nStrong timeliness is required to match the mutability of distributed application system dependency relationship, including service level and service instance level dependency.\nA Service is a logic group of instances which have the same functions or codes.\nA Service Instance is usually an OS level process, such as a JVM process. The relationships between services and instances are mutable, depending on the configuration, codes and network status. The dependency could change over time.\n Figure 1, Generated spans in traditional Dapper based tracing system. The span model in the Dapper paper and existing tracing systems,such as Zipkin instrumenting mode[9], just propagates the span id to the server side. Due to this model, dependency analysis requires a certain time window. The tracing spans are collected at both client- and server-sides, because the relationship is recorded. Due to that, the analysis process has to wait for the client and server spans to match in the same time window, in order to output the result, Service A depending on Service B. So, this time window must be over the duration of this RPC request; otherwise, the conclusion will be lost. This condition makes the analysis would not react the dependency mutation in second level, in production, it sometimes has to set the window duration in 3-5 mins. Also, because of the Windows-based design, if one side involves a long duration task, it can’t easily achieve consistent accuracy. Because in order to make the analysis as fast as possible, the analysis period is less than 5 minutes. But some spans can’t match its parent or children if the analysis is incomplete or crosses two time windows. Even if we added a mechanism to process the spans left in the previous stages, still some would have to be abandoned to keep the dataset size and memory usage reasonable.\nIn the STAM, we introduce a new span and context propagation models, with the new analysis method. These new models add the peer network address (IP or hostname) used at client side, client service instance name and client service name, into the context propagation model. Then it passes the RPC call from client to server, just as the original trace id and span id in the existing tracing system, and collects it in the server-side span. The new analysis method can easily generate the client-server relationship directly without waiting on the client span. It also sets the peer network address as one alias of the server service. After the across cluster node data sync, the client-side span analysis could use this alias metadata to generate the client-server relationship directly too. By using these new models and method in Apache SkyWalking, we remove the time windows-based analysis permanently, and fully use the streaming analysis mode with less than 5 seconds latency and consistent accuracy\nNew Span Model and Context Model The traditional span of a tracing system includes the following fields [1][6][10].\n A trace id to represent the whole trace. A span id to represent the current span. An operation name to describe what operation this span did. A start timestamp. A finish timestamp Service and Service Instance names of current span. A set of zero or more key:value Span Tags. A set of zero or more Span Logs, each of which is itself a key:value map paired with a timestamp. References to zero or more causally related Spans. Reference includes the parent span id and trace id.  In the new span model of STAM we add the following fields in the span.\nSpan type. Enumeration, including exit, local and entry. Entry and Exit spans are used in a networking related library. Entry spans represent a server-side networking library, such as Apache Tomcat[7]. Exit spans represent the client-side networking library, such as Apache HttpComponents [8].\nPeer Network Address. Remote \u0026ldquo;address,\u0026rdquo; suitable for use in exit and entry spans. In Exit spans, the peer network address is the address by the client library to access the server.\nThese fields usually are optionally included in many tracing system,. But in STAM, we require them in all RPC cases.\nContext Model is used to propagate the client-side information to server-side carried by the original RPC call, usually in the header, such as HTTP header or MQ header. In the old design, it carries the trace id and span id of client-side span. In the STAM, we enhance this model, adding the parent service name, parent service instance name and peer of exit span. The names could be literal strings. All these extra fields will help to remove the block of streaming analysis. Compared to the existing context model, this uses a little more bandwidth, but it could be optimized. In Apache SkyWalking, we design a register mechanism to exchange unique IDs to represent these names. As a result, only 3 integers are added in the RPC context, so the increase of bandwidth is at least less than 1% in the production environment.\nThe changes of two models could eliminate the time windows in the analysis process. Server-side span analysis enhances the context aware capability.\nNew Topology Analysis Method The new topology analysis method at the core of STAM is processing the span in stream mode. The analysis of the server-side span, also named entry span, includes the parent service name, parent service instance name and peer of exit span. So the analysis process could establish the following results.\n Set the peer of exit span as client using alias name of current service and instance. Peer network address \u0026lt;-\u0026gt; service name and peer network address \u0026lt;-\u0026gt; Service instance name aliases created. These two will sync with all analysis nodes and persistent in the storage, allowing more analysis processers to have this alias information. Generate relationships of parent service name -\u0026gt; current service name and parent service instance name -\u0026gt; current service instance name, unless there is another different Peer network address \u0026lt;-\u0026gt; Service Instance Name mapping found. In that case, only generate relationships of peer network address \u0026lt;-\u0026gt; service name and peer network address \u0026lt;-\u0026gt; Service instance name.  For analysis of the client-side span (exit span), there could three possibilities.\n The peer in the exit span already has the alias names established by server-side span analysis from step (1). Then use alias names to replace the peer, and generate traffic of current service name -\u0026gt; alias service name and current service instance name -\u0026gt; alias service instance name. If the alias could not be found, then just simply generate traffic for current service name -\u0026gt; peer and current service instance name -\u0026gt; peer. If multiple alias names of peer network address \u0026lt;-\u0026gt; Service Instance Name could be found, then keep generating traffic for current service name -\u0026gt; peer network address and current service instance name -\u0026gt; peer network address.   Figure 2, Apache SkyWalking uses STAM to detect and visualize the topology of distributed systems. Evaluation In this section, we evaluate the new models and analysis method in the context of several typical cases in which the old method loses timeliness and consistent accuracy.\n 1.New Service Online or Auto Scale Out  New services could be added into the whole topology by the developer team randomly, or container operation platform automatically by some scale out policy, like Kubernetes [5]. The monitoring system could not be notified in any case manually. By using STAM, we could detect the new node automatically and also keep the analysis process unblocked and consistent with detected nodes. In this case, a new service and network address (could be IP, port or both) are used. The peer network address \u0026lt;-\u0026gt; service mapping does not exist, the traffic of client service -\u0026gt; peer network address will be generated and persistent in the storage first. After mapping is generated, further traffic of client-service to server-service could be identified, generated and aggregated in the analysis platform. For filling the gap of a few traffic before the mapping generated, we require doing peer network address \u0026lt;-\u0026gt; service mapping translation again in query stage, to merge client service-\u0026gt;peer network address and client-service to server-service. In production, the amount of VM for the whole SkyWalking analysis platform deployment is less than 100, syncing among them will finish less than 10 seconds, in most cases it only takes 3-5 seconds. And in the query stage, the data has been aggregated in minutes or seconds at least. The query merge performance is not related to how much traffic happens before the mapping generated, only affected by sync duration, in here, only 3 seconds. Due to that, in minute level aggregation topology, it only adds 1 or 2 relationship records in the whole topology relationship dataset. Considering an over 100 services topology having over 500 relationship records per minute, the payload increase for this query merge is very limited and affordable. This feature is significant in a large and high load distributed system, as we don’t need to concern its scaling capability. And in some fork versions, they choose to update the existing client service-\u0026gt;peer network address to client-service to server-service after detecting the new mapping for peer generated, in order to remove the extra load at query stage permanently.\n Figure 3, Span analysis by using the new topology analysis method  2.Existing Uninstrumented Nodes  Every topology detection method has to work in this case. In many cases, there are nodes in the production environment that can’t be instrumented. Causes for this might include:(1) Restriction of the technology. In some golang or C++ written applications, there is no easy way in Java or .Net to do auto instrumentation by the agent. So, the codes may not be instrumented automatically. (2) The middleware, such as MQ, database server, has not adopted the tracing system. This would make it difficult or time consuming to implement the middleware instrumentation. (3) A 3rd party service or cloud service doesn’t support work with the current tracing system. (4) Lack of resources: e.g., the developer or operation team lacks time to make the instrumentation ready.\nThe STAM works well even if the client or server side has no instrumentation. It still keeps the topology as accurate as possible.\nIf the client side hasn’t instrumented, the server-side span wouldn’t get any reference through RPC context, so, it would simply use peer to generate traffic, as shown in Figure 4.\n Figure 4, STAM traffic generation when no client-side instrumentation As shown in Figure 5, in the other case, with no server-side instrumentation, the client span analysis doesn’t need to process this case. The STAM analysis core just simply keeps generating client service-\u0026gt;peer network address traffic. As there is no mapping for peer network address generated, there is no merging.\n Figure 5, STAM traffic generation when no server-side instrumentation  3.Uninstrumented Node Having Header Forward Capability  Besides the cases we evaluated in (2) Uninstrumented Nodes, there is one complex and special case: the instrumented node has the capability to propagate the header from downstream to upstream, typically in all proxy, such as Envoy[11], Nginx[12], Spring Cloud Gateway[13]. As proxy, it has the capability to forward all headers from downstream to upstream to keep some of information in the header, including the tracing context, authentication, browser information, and routing information, in order to make them accessible by the business services behind the proxy, like Envoy route configuration. When some proxy can’t be instrumented, no matter what the reason, it should not affect the topology detection.\nIn this case, the proxy address would be used at the client side and propagate through RPC context as peer network address, and the proxy forwards this to different upstream services. Then STAM could detect this case and generate the proxy as a conjectural node. In the STAM, more than one alias names for this network address should be generated. After those two are detected and synchronized to the analysis node, the analysis core knows there is at least one uninstrumented service standing between client and servers. So, it will generate the relationships of client service-\u0026gt;peer network address, peer-\u0026gt;server service B and peer network address -\u0026gt;server service C, as shown in Figure 6.\n Figure 6, STAM traffic generation when the proxy uninstrumentation Conclusion This paper described the STAM, which is to the best of our knowledge the best topology detection method for distributed tracing systems. It replaces the time-window based topology analysis method for tracing-based monitoring systems. It removes the resource cost of disk and memory for time-window baseds analysis permanently and totally, and the barriers of horizontal scale. One STAM implementation, Apache SkyWalking, is widely used for monitoring hundreds of applications in production. Some of them generated over 100 TB tracing data per day and topology for over 200 services in real time.\nAcknowledgments We thank all contributors of Apache SkyWalking project for suggestions, code contributions to implement the STAM, and feedback from using the STAM and SkyWalking in their production environment.\nLicense This paper and the STAM are licensed in the Apache 2.0.\nReferences  Dapper, a Large-Scale Distributed Systems Tracing Infrastructure, https://research.google.com/pubs/pub36356.html?spm=5176.100239.blogcont60165.11.OXME9Z Apache SkyWalking, http://skywalking.apache.org/ Apache Open Users, https://skywalking.apache.org/users/ Zipkin, https://zipkin.io/ Kubernetes, Production-Grade Container Orchestration. Automated container deployment, scaling, and management. https://kubernetes.io/ OpenTracing Specification https://github.com/opentracing/specification/blob/master/specification.md Apache Tomcat, http://tomcat.apache.org/ Apache HttpComponents, https://hc.apache.org/ Zipkin doc, ‘Instrumenting a library’ section, ‘Communicating trace information’ paragraph. https://zipkin.io/pages/instrumenting Jaeger Tracing, https://jaegertracing.io/ Envoy Proxy, http://envoyproxy.io/ Nginx, http://nginx.org/ Spring Cloud Gateway, https://spring.io/projects/spring-cloud-gateway  ","title":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System","url":"/docs/main/v9.5.0/en/papers/stam/"},{"content":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System  Sheng Wu 吴 晟 wusheng@apache.org  Editor\u0026rsquo;s note This paper was written by Sheng Wu, project founder, in 2017, to describe the fundamental theory of all current agent core concepts. Readers could learn why SkyWalking agents are significantly different from other tracing system and Dapper[1] Paper\u0026rsquo;s description.\nAbstract Monitoring, visualizing and troubleshooting a large-scale distributed system is a major challenge. One common tool used today is the distributed tracing system (e.g., Google Dapper)[1], and detecting topology and metrics based on the tracing data. One big limitation of today’s topology detection is that the analysis depends on aggregating the client-side and server-side tracing spans in a given time window to generate the dependency of services. This causes more latency and memory use, because the client and server spans of every RPC must be matched in millions of randomly occurring requests in a highly distributed system. More importantly, it could fail to match if the duration of RPC between client and server is longer than the prior setup time window, or across the two windows.\nIn this paper, we present the STAM, Streaming Topology Analysis Method. In STAM, we could use auto instrumentation or a manual instrumentation mechanism to intercept and manipulate RPC at both client-side and server-side. In the case of auto instrumentation, STAM manipulates application codes at runtime, such as Java agent. As such, this monitoring system doesn’t require any source code changes from the application development team or RPC framework development team. The STAM injects an RPC network address used at client side, a service name and a service instance name into the RPC context, and binds the server-side service name and service instance name as the alias name for this network address used at the client side. Freeing the dependency analysis from the mechanisms that cause blocking and delay, the analysis core can process the monitoring data in stream mode and generate the accurate topology.\nThe STAM has been implemented in the Apache SkyWalking[2], an open source APM (application performance monitoring system) project of the Apache Software Foundation, which is widely used in many big enterprises[3] including Alibaba, Huawei, Tencent, Didi, Xiaomi, China Mobile and other enterprises (airlines, financial institutions and others) to support their large-scale distributed systems in the production environment. It reduces the load and memory cost significantly, with better horizontal scale capability.\nIntroduction Monitoring the highly distributed system, especially with a micro-service architecture, is very complex. Many RPCs, including HTTP, gRPC, MQ, Cache, and Database accesses, are behind a single client-side request. Allowing the IT team to understand the dependency relationships among thousands of services is the key feature and first step for observability of a whole distributed system. A distributed tracing system is capable of collecting traces, including all distributed request paths. Dependency relationships have been logically included in the trace data. A distributed tracing system, such as Zipkin [4] or Jaeger Tracing [10], provides built-in dependency analysis features, but many analysis features build on top of that. There are at least two fundamental limitations: timeliness and consistent accuracy.\nStrong timeliness is required to match the mutability of distributed application system dependency relationship, including service level and service instance level dependency.\nA Service is a logic group of instances which have the same functions or codes.\nA Service Instance is usually an OS level process, such as a JVM process. The relationships between services and instances are mutable, depending on the configuration, codes and network status. The dependency could change over time.\n Figure 1, Generated spans in traditional Dapper based tracing system. The span model in the Dapper paper and existing tracing systems,such as Zipkin instrumenting mode[9], just propagates the span id to the server side. Due to this model, dependency analysis requires a certain time window. The tracing spans are collected at both client- and server-sides, because the relationship is recorded. Due to that, the analysis process has to wait for the client and server spans to match in the same time window, in order to output the result, Service A depending on Service B. So, this time window must be over the duration of this RPC request; otherwise, the conclusion will be lost. This condition makes the analysis would not react the dependency mutation in second level, in production, it sometimes has to set the window duration in 3-5 mins. Also, because of the Windows-based design, if one side involves a long duration task, it can’t easily achieve consistent accuracy. Because in order to make the analysis as fast as possible, the analysis period is less than 5 minutes. But some spans can’t match its parent or children if the analysis is incomplete or crosses two time windows. Even if we added a mechanism to process the spans left in the previous stages, still some would have to be abandoned to keep the dataset size and memory usage reasonable.\nIn the STAM, we introduce a new span and context propagation models, with the new analysis method. These new models add the peer network address (IP or hostname) used at client side, client service instance name and client service name, into the context propagation model. Then it passes the RPC call from client to server, just as the original trace id and span id in the existing tracing system, and collects it in the server-side span. The new analysis method can easily generate the client-server relationship directly without waiting on the client span. It also sets the peer network address as one alias of the server service. After the across cluster node data sync, the client-side span analysis could use this alias metadata to generate the client-server relationship directly too. By using these new models and method in Apache SkyWalking, we remove the time windows-based analysis permanently, and fully use the streaming analysis mode with less than 5 seconds latency and consistent accuracy\nNew Span Model and Context Model The traditional span of a tracing system includes the following fields [1][6][10].\n A trace id to represent the whole trace. A span id to represent the current span. An operation name to describe what operation this span did. A start timestamp. A finish timestamp Service and Service Instance names of current span. A set of zero or more key:value Span Tags. A set of zero or more Span Logs, each of which is itself a key:value map paired with a timestamp. References to zero or more causally related Spans. Reference includes the parent span id and trace id.  In the new span model of STAM we add the following fields in the span.\nSpan type. Enumeration, including exit, local and entry. Entry and Exit spans are used in a networking related library. Entry spans represent a server-side networking library, such as Apache Tomcat[7]. Exit spans represent the client-side networking library, such as Apache HttpComponents [8].\nPeer Network Address. Remote \u0026ldquo;address,\u0026rdquo; suitable for use in exit and entry spans. In Exit spans, the peer network address is the address by the client library to access the server.\nThese fields usually are optionally included in many tracing system,. But in STAM, we require them in all RPC cases.\nContext Model is used to propagate the client-side information to server-side carried by the original RPC call, usually in the header, such as HTTP header or MQ header. In the old design, it carries the trace id and span id of client-side span. In the STAM, we enhance this model, adding the parent service name, parent service instance name and peer of exit span. The names could be literal strings. All these extra fields will help to remove the block of streaming analysis. Compared to the existing context model, this uses a little more bandwidth, but it could be optimized. In Apache SkyWalking, we design a register mechanism to exchange unique IDs to represent these names. As a result, only 3 integers are added in the RPC context, so the increase of bandwidth is at least less than 1% in the production environment.\nThe changes of two models could eliminate the time windows in the analysis process. Server-side span analysis enhances the context aware capability.\nNew Topology Analysis Method The new topology analysis method at the core of STAM is processing the span in stream mode. The analysis of the server-side span, also named entry span, includes the parent service name, parent service instance name and peer of exit span. So the analysis process could establish the following results.\n Set the peer of exit span as client using alias name of current service and instance. Peer network address \u0026lt;-\u0026gt; service name and peer network address \u0026lt;-\u0026gt; Service instance name aliases created. These two will sync with all analysis nodes and persistent in the storage, allowing more analysis processers to have this alias information. Generate relationships of parent service name -\u0026gt; current service name and parent service instance name -\u0026gt; current service instance name, unless there is another different Peer network address \u0026lt;-\u0026gt; Service Instance Name mapping found. In that case, only generate relationships of peer network address \u0026lt;-\u0026gt; service name and peer network address \u0026lt;-\u0026gt; Service instance name.  For analysis of the client-side span (exit span), there could three possibilities.\n The peer in the exit span already has the alias names established by server-side span analysis from step (1). Then use alias names to replace the peer, and generate traffic of current service name -\u0026gt; alias service name and current service instance name -\u0026gt; alias service instance name. If the alias could not be found, then just simply generate traffic for current service name -\u0026gt; peer and current service instance name -\u0026gt; peer. If multiple alias names of peer network address \u0026lt;-\u0026gt; Service Instance Name could be found, then keep generating traffic for current service name -\u0026gt; peer network address and current service instance name -\u0026gt; peer network address.   Figure 2, Apache SkyWalking uses STAM to detect and visualize the topology of distributed systems. Evaluation In this section, we evaluate the new models and analysis method in the context of several typical cases in which the old method loses timeliness and consistent accuracy.\n 1.New Service Online or Auto Scale Out  New services could be added into the whole topology by the developer team randomly, or container operation platform automatically by some scale out policy, like Kubernetes [5]. The monitoring system could not be notified in any case manually. By using STAM, we could detect the new node automatically and also keep the analysis process unblocked and consistent with detected nodes. In this case, a new service and network address (could be IP, port or both) are used. The peer network address \u0026lt;-\u0026gt; service mapping does not exist, the traffic of client service -\u0026gt; peer network address will be generated and persistent in the storage first. After mapping is generated, further traffic of client-service to server-service could be identified, generated and aggregated in the analysis platform. For filling the gap of a few traffic before the mapping generated, we require doing peer network address \u0026lt;-\u0026gt; service mapping translation again in query stage, to merge client service-\u0026gt;peer network address and client-service to server-service. In production, the amount of VM for the whole SkyWalking analysis platform deployment is less than 100, syncing among them will finish less than 10 seconds, in most cases it only takes 3-5 seconds. And in the query stage, the data has been aggregated in minutes or seconds at least. The query merge performance is not related to how much traffic happens before the mapping generated, only affected by sync duration, in here, only 3 seconds. Due to that, in minute level aggregation topology, it only adds 1 or 2 relationship records in the whole topology relationship dataset. Considering an over 100 services topology having over 500 relationship records per minute, the payload increase for this query merge is very limited and affordable. This feature is significant in a large and high load distributed system, as we don’t need to concern its scaling capability. And in some fork versions, they choose to update the existing client service-\u0026gt;peer network address to client-service to server-service after detecting the new mapping for peer generated, in order to remove the extra load at query stage permanently.\n Figure 3, Span analysis by using the new topology analysis method  2.Existing Uninstrumented Nodes  Every topology detection method has to work in this case. In many cases, there are nodes in the production environment that can’t be instrumented. Causes for this might include:(1) Restriction of the technology. In some golang or C++ written applications, there is no easy way in Java or .Net to do auto instrumentation by the agent. So, the codes may not be instrumented automatically. (2) The middleware, such as MQ, database server, has not adopted the tracing system. This would make it difficult or time consuming to implement the middleware instrumentation. (3) A 3rd party service or cloud service doesn’t support work with the current tracing system. (4) Lack of resources: e.g., the developer or operation team lacks time to make the instrumentation ready.\nThe STAM works well even if the client or server side has no instrumentation. It still keeps the topology as accurate as possible.\nIf the client side hasn’t instrumented, the server-side span wouldn’t get any reference through RPC context, so, it would simply use peer to generate traffic, as shown in Figure 4.\n Figure 4, STAM traffic generation when no client-side instrumentation As shown in Figure 5, in the other case, with no server-side instrumentation, the client span analysis doesn’t need to process this case. The STAM analysis core just simply keeps generating client service-\u0026gt;peer network address traffic. As there is no mapping for peer network address generated, there is no merging.\n Figure 5, STAM traffic generation when no server-side instrumentation  3.Uninstrumented Node Having Header Forward Capability  Besides the cases we evaluated in (2) Uninstrumented Nodes, there is one complex and special case: the instrumented node has the capability to propagate the header from downstream to upstream, typically in all proxy, such as Envoy[11], Nginx[12], Spring Cloud Gateway[13]. As proxy, it has the capability to forward all headers from downstream to upstream to keep some of information in the header, including the tracing context, authentication, browser information, and routing information, in order to make them accessible by the business services behind the proxy, like Envoy route configuration. When some proxy can’t be instrumented, no matter what the reason, it should not affect the topology detection.\nIn this case, the proxy address would be used at the client side and propagate through RPC context as peer network address, and the proxy forwards this to different upstream services. Then STAM could detect this case and generate the proxy as a conjectural node. In the STAM, more than one alias names for this network address should be generated. After those two are detected and synchronized to the analysis node, the analysis core knows there is at least one uninstrumented service standing between client and servers. So, it will generate the relationships of client service-\u0026gt;peer network address, peer-\u0026gt;server service B and peer network address -\u0026gt;server service C, as shown in Figure 6.\n Figure 6, STAM traffic generation when the proxy uninstrumentation Conclusion This paper described the STAM, which is to the best of our knowledge the best topology detection method for distributed tracing systems. It replaces the time-window based topology analysis method for tracing-based monitoring systems. It removes the resource cost of disk and memory for time-window baseds analysis permanently and totally, and the barriers of horizontal scale. One STAM implementation, Apache SkyWalking, is widely used for monitoring hundreds of applications in production. Some of them generated over 100 TB tracing data per day and topology for over 200 services in real time.\nAcknowledgments We thank all contributors of Apache SkyWalking project for suggestions, code contributions to implement the STAM, and feedback from using the STAM and SkyWalking in their production environment.\nLicense This paper and the STAM are licensed in the Apache 2.0.\nReferences  Dapper, a Large-Scale Distributed Systems Tracing Infrastructure, https://research.google.com/pubs/pub36356.html?spm=5176.100239.blogcont60165.11.OXME9Z Apache SkyWalking, http://skywalking.apache.org/ Apache Open Users, https://skywalking.apache.org/users/ Zipkin, https://zipkin.io/ Kubernetes, Production-Grade Container Orchestration. Automated container deployment, scaling, and management. https://kubernetes.io/ OpenTracing Specification https://github.com/opentracing/specification/blob/master/specification.md Apache Tomcat, http://tomcat.apache.org/ Apache HttpComponents, https://hc.apache.org/ Zipkin doc, ‘Instrumenting a library’ section, ‘Communicating trace information’ paragraph. https://zipkin.io/pages/instrumenting Jaeger Tracing, https://jaegertracing.io/ Envoy Proxy, http://envoyproxy.io/ Nginx, http://nginx.org/ Spring Cloud Gateway, https://spring.io/projects/spring-cloud-gateway  ","title":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System","url":"/docs/main/v9.6.0/en/papers/stam/"},{"content":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System  Sheng Wu 吴 晟 wusheng@apache.org  Editor\u0026rsquo;s note This paper was written by Sheng Wu, project founder, in 2017, to describe the fundamental theory of all current agent core concepts. Readers could learn why SkyWalking agents are significantly different from other tracing system and Dapper[1] Paper\u0026rsquo;s description.\nAbstract Monitoring, visualizing and troubleshooting a large-scale distributed system is a major challenge. One common tool used today is the distributed tracing system (e.g., Google Dapper)[1], and detecting topology and metrics based on the tracing data. One big limitation of today’s topology detection is that the analysis depends on aggregating the client-side and server-side tracing spans in a given time window to generate the dependency of services. This causes more latency and memory use, because the client and server spans of every RPC must be matched in millions of randomly occurring requests in a highly distributed system. More importantly, it could fail to match if the duration of RPC between client and server is longer than the prior setup time window, or across the two windows.\nIn this paper, we present the STAM, Streaming Topology Analysis Method. In STAM, we could use auto instrumentation or a manual instrumentation mechanism to intercept and manipulate RPC at both client-side and server-side. In the case of auto instrumentation, STAM manipulates application codes at runtime, such as Java agent. As such, this monitoring system doesn’t require any source code changes from the application development team or RPC framework development team. The STAM injects an RPC network address used at client side, a service name and a service instance name into the RPC context, and binds the server-side service name and service instance name as the alias name for this network address used at the client side. Freeing the dependency analysis from the mechanisms that cause blocking and delay, the analysis core can process the monitoring data in stream mode and generate the accurate topology.\nThe STAM has been implemented in the Apache SkyWalking[2], an open source APM (application performance monitoring system) project of the Apache Software Foundation, which is widely used in many big enterprises[3] including Alibaba, Huawei, Tencent, Didi, Xiaomi, China Mobile and other enterprises (airlines, financial institutions and others) to support their large-scale distributed systems in the production environment. It reduces the load and memory cost significantly, with better horizontal scale capability.\nIntroduction Monitoring the highly distributed system, especially with a micro-service architecture, is very complex. Many RPCs, including HTTP, gRPC, MQ, Cache, and Database accesses, are behind a single client-side request. Allowing the IT team to understand the dependency relationships among thousands of services is the key feature and first step for observability of a whole distributed system. A distributed tracing system is capable of collecting traces, including all distributed request paths. Dependency relationships have been logically included in the trace data. A distributed tracing system, such as Zipkin [4] or Jaeger Tracing [10], provides built-in dependency analysis features, but many analysis features build on top of that. There are at least two fundamental limitations: timeliness and consistent accuracy.\nStrong timeliness is required to match the mutability of distributed application system dependency relationship, including service level and service instance level dependency.\nA Service is a logic group of instances which have the same functions or codes.\nA Service Instance is usually an OS level process, such as a JVM process. The relationships between services and instances are mutable, depending on the configuration, codes and network status. The dependency could change over time.\n Figure 1, Generated spans in traditional Dapper based tracing system. The span model in the Dapper paper and existing tracing systems,such as Zipkin instrumenting mode[9], just propagates the span id to the server side. Due to this model, dependency analysis requires a certain time window. The tracing spans are collected at both client- and server-sides, because the relationship is recorded. Due to that, the analysis process has to wait for the client and server spans to match in the same time window, in order to output the result, Service A depending on Service B. So, this time window must be over the duration of this RPC request; otherwise, the conclusion will be lost. This condition makes the analysis would not react the dependency mutation in second level, in production, it sometimes has to set the window duration in 3-5 mins. Also, because of the Windows-based design, if one side involves a long duration task, it can’t easily achieve consistent accuracy. Because in order to make the analysis as fast as possible, the analysis period is less than 5 minutes. But some spans can’t match its parent or children if the analysis is incomplete or crosses two time windows. Even if we added a mechanism to process the spans left in the previous stages, still some would have to be abandoned to keep the dataset size and memory usage reasonable.\nIn the STAM, we introduce a new span and context propagation models, with the new analysis method. These new models add the peer network address (IP or hostname) used at client side, client service instance name and client service name, into the context propagation model. Then it passes the RPC call from client to server, just as the original trace id and span id in the existing tracing system, and collects it in the server-side span. The new analysis method can easily generate the client-server relationship directly without waiting on the client span. It also sets the peer network address as one alias of the server service. After the across cluster node data sync, the client-side span analysis could use this alias metadata to generate the client-server relationship directly too. By using these new models and method in Apache SkyWalking, we remove the time windows-based analysis permanently, and fully use the streaming analysis mode with less than 5 seconds latency and consistent accuracy\nNew Span Model and Context Model The traditional span of a tracing system includes the following fields [1][6][10].\n A trace id to represent the whole trace. A span id to represent the current span. An operation name to describe what operation this span did. A start timestamp. A finish timestamp Service and Service Instance names of current span. A set of zero or more key:value Span Tags. A set of zero or more Span Logs, each of which is itself a key:value map paired with a timestamp. References to zero or more causally related Spans. Reference includes the parent span id and trace id.  In the new span model of STAM we add the following fields in the span.\nSpan type. Enumeration, including exit, local and entry. Entry and Exit spans are used in a networking related library. Entry spans represent a server-side networking library, such as Apache Tomcat[7]. Exit spans represent the client-side networking library, such as Apache HttpComponents [8].\nPeer Network Address. Remote \u0026ldquo;address,\u0026rdquo; suitable for use in exit and entry spans. In Exit spans, the peer network address is the address by the client library to access the server.\nThese fields usually are optionally included in many tracing system,. But in STAM, we require them in all RPC cases.\nContext Model is used to propagate the client-side information to server-side carried by the original RPC call, usually in the header, such as HTTP header or MQ header. In the old design, it carries the trace id and span id of client-side span. In the STAM, we enhance this model, adding the parent service name, parent service instance name and peer of exit span. The names could be literal strings. All these extra fields will help to remove the block of streaming analysis. Compared to the existing context model, this uses a little more bandwidth, but it could be optimized. In Apache SkyWalking, we design a register mechanism to exchange unique IDs to represent these names. As a result, only 3 integers are added in the RPC context, so the increase of bandwidth is at least less than 1% in the production environment.\nThe changes of two models could eliminate the time windows in the analysis process. Server-side span analysis enhances the context aware capability.\nNew Topology Analysis Method The new topology analysis method at the core of STAM is processing the span in stream mode. The analysis of the server-side span, also named entry span, includes the parent service name, parent service instance name and peer of exit span. So the analysis process could establish the following results.\n Set the peer of exit span as client using alias name of current service and instance. Peer network address \u0026lt;-\u0026gt; service name and peer network address \u0026lt;-\u0026gt; Service instance name aliases created. These two will sync with all analysis nodes and persistent in the storage, allowing more analysis processers to have this alias information. Generate relationships of parent service name -\u0026gt; current service name and parent service instance name -\u0026gt; current service instance name, unless there is another different Peer network address \u0026lt;-\u0026gt; Service Instance Name mapping found. In that case, only generate relationships of peer network address \u0026lt;-\u0026gt; service name and peer network address \u0026lt;-\u0026gt; Service instance name.  For analysis of the client-side span (exit span), there could three possibilities.\n The peer in the exit span already has the alias names established by server-side span analysis from step (1). Then use alias names to replace the peer, and generate traffic of current service name -\u0026gt; alias service name and current service instance name -\u0026gt; alias service instance name. If the alias could not be found, then just simply generate traffic for current service name -\u0026gt; peer and current service instance name -\u0026gt; peer. If multiple alias names of peer network address \u0026lt;-\u0026gt; Service Instance Name could be found, then keep generating traffic for current service name -\u0026gt; peer network address and current service instance name -\u0026gt; peer network address.   Figure 2, Apache SkyWalking uses STAM to detect and visualize the topology of distributed systems. Evaluation In this section, we evaluate the new models and analysis method in the context of several typical cases in which the old method loses timeliness and consistent accuracy.\n 1.New Service Online or Auto Scale Out  New services could be added into the whole topology by the developer team randomly, or container operation platform automatically by some scale out policy, like Kubernetes [5]. The monitoring system could not be notified in any case manually. By using STAM, we could detect the new node automatically and also keep the analysis process unblocked and consistent with detected nodes. In this case, a new service and network address (could be IP, port or both) are used. The peer network address \u0026lt;-\u0026gt; service mapping does not exist, the traffic of client service -\u0026gt; peer network address will be generated and persistent in the storage first. After mapping is generated, further traffic of client-service to server-service could be identified, generated and aggregated in the analysis platform. For filling the gap of a few traffic before the mapping generated, we require doing peer network address \u0026lt;-\u0026gt; service mapping translation again in query stage, to merge client service-\u0026gt;peer network address and client-service to server-service. In production, the amount of VM for the whole SkyWalking analysis platform deployment is less than 100, syncing among them will finish less than 10 seconds, in most cases it only takes 3-5 seconds. And in the query stage, the data has been aggregated in minutes or seconds at least. The query merge performance is not related to how much traffic happens before the mapping generated, only affected by sync duration, in here, only 3 seconds. Due to that, in minute level aggregation topology, it only adds 1 or 2 relationship records in the whole topology relationship dataset. Considering an over 100 services topology having over 500 relationship records per minute, the payload increase for this query merge is very limited and affordable. This feature is significant in a large and high load distributed system, as we don’t need to concern its scaling capability. And in some fork versions, they choose to update the existing client service-\u0026gt;peer network address to client-service to server-service after detecting the new mapping for peer generated, in order to remove the extra load at query stage permanently.\n Figure 3, Span analysis by using the new topology analysis method  2.Existing Uninstrumented Nodes  Every topology detection method has to work in this case. In many cases, there are nodes in the production environment that can’t be instrumented. Causes for this might include:(1) Restriction of the technology. In some golang or C++ written applications, there is no easy way in Java or .Net to do auto instrumentation by the agent. So, the codes may not be instrumented automatically. (2) The middleware, such as MQ, database server, has not adopted the tracing system. This would make it difficult or time consuming to implement the middleware instrumentation. (3) A 3rd party service or cloud service doesn’t support work with the current tracing system. (4) Lack of resources: e.g., the developer or operation team lacks time to make the instrumentation ready.\nThe STAM works well even if the client or server side has no instrumentation. It still keeps the topology as accurate as possible.\nIf the client side hasn’t instrumented, the server-side span wouldn’t get any reference through RPC context, so, it would simply use peer to generate traffic, as shown in Figure 4.\n Figure 4, STAM traffic generation when no client-side instrumentation As shown in Figure 5, in the other case, with no server-side instrumentation, the client span analysis doesn’t need to process this case. The STAM analysis core just simply keeps generating client service-\u0026gt;peer network address traffic. As there is no mapping for peer network address generated, there is no merging.\n Figure 5, STAM traffic generation when no server-side instrumentation  3.Uninstrumented Node Having Header Forward Capability  Besides the cases we evaluated in (2) Uninstrumented Nodes, there is one complex and special case: the instrumented node has the capability to propagate the header from downstream to upstream, typically in all proxy, such as Envoy[11], Nginx[12], Spring Cloud Gateway[13]. As proxy, it has the capability to forward all headers from downstream to upstream to keep some of information in the header, including the tracing context, authentication, browser information, and routing information, in order to make them accessible by the business services behind the proxy, like Envoy route configuration. When some proxy can’t be instrumented, no matter what the reason, it should not affect the topology detection.\nIn this case, the proxy address would be used at the client side and propagate through RPC context as peer network address, and the proxy forwards this to different upstream services. Then STAM could detect this case and generate the proxy as a conjectural node. In the STAM, more than one alias names for this network address should be generated. After those two are detected and synchronized to the analysis node, the analysis core knows there is at least one uninstrumented service standing between client and servers. So, it will generate the relationships of client service-\u0026gt;peer network address, peer-\u0026gt;server service B and peer network address -\u0026gt;server service C, as shown in Figure 6.\n Figure 6, STAM traffic generation when the proxy uninstrumentation Conclusion This paper described the STAM, which is to the best of our knowledge the best topology detection method for distributed tracing systems. It replaces the time-window based topology analysis method for tracing-based monitoring systems. It removes the resource cost of disk and memory for time-window baseds analysis permanently and totally, and the barriers of horizontal scale. One STAM implementation, Apache SkyWalking, is widely used for monitoring hundreds of applications in production. Some of them generated over 100 TB tracing data per day and topology for over 200 services in real time.\nAcknowledgments We thank all contributors of Apache SkyWalking project for suggestions, code contributions to implement the STAM, and feedback from using the STAM and SkyWalking in their production environment.\nLicense This paper and the STAM are licensed in the Apache 2.0.\nReferences  Dapper, a Large-Scale Distributed Systems Tracing Infrastructure, https://research.google.com/pubs/pub36356.html?spm=5176.100239.blogcont60165.11.OXME9Z Apache SkyWalking, http://skywalking.apache.org/ Apache Open Users, https://skywalking.apache.org/users/ Zipkin, https://zipkin.io/ Kubernetes, Production-Grade Container Orchestration. Automated container deployment, scaling, and management. https://kubernetes.io/ OpenTracing Specification https://github.com/opentracing/specification/blob/master/specification.md Apache Tomcat, http://tomcat.apache.org/ Apache HttpComponents, https://hc.apache.org/ Zipkin doc, ‘Instrumenting a library’ section, ‘Communicating trace information’ paragraph. https://zipkin.io/pages/instrumenting Jaeger Tracing, https://jaegertracing.io/ Envoy Proxy, http://envoyproxy.io/ Nginx, http://nginx.org/ Spring Cloud Gateway, https://spring.io/projects/spring-cloud-gateway  ","title":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System","url":"/docs/main/v9.7.0/en/papers/stam/"},{"content":"Standalone Mode The standalone mode is the simplest way to run Banyand. It is suitable for the development and testing environment. The standalone mode is running as a standalone process by\n$ ./banyand-server standalone ██████╗ █████╗ ███╗ ██╗██╗ ██╗ █████╗ ███╗ ██╗██████╗ ██████╗ ██╔══██╗██╔══██╗████╗ ██║╚██╗ ██╔╝██╔══██╗████╗ ██║██╔══██╗██╔══██╗ ██████╔╝███████║██╔██╗ ██║ ╚████╔╝ ███████║██╔██╗ ██║██║ ██║██████╔╝ ██╔══██╗██╔══██║██║╚██╗██║ ╚██╔╝ ██╔══██║██║╚██╗██║██║ ██║██╔══██╗ ██████╔╝██║ ██║██║ ╚████║ ██║ ██║ ██║██║ ╚████║██████╔╝██████╔╝ ╚═════╝ ╚═╝ ╚═╝╚═╝ ╚═══╝ ╚═╝ ╚═╝ ╚═╝╚═╝ ╚═══╝╚═════╝ ╚═════╝ ***starting as a standalone server**** ... ... ***Listening to**** addr::17912 module:LIAISON-GRPC The banyand-server would be listening on the 0.0.0.0:17912 to access gRPC requests. if no errors occurred.\nAt the same time, the banyand-server would be listening on the 0.0.0.0:17913 to access HTTP requests. if no errors occurred. The HTTP server is used for CLI and Web UI.\n","title":"Standalone Mode","url":"/docs/skywalking-banyandb/latest/installation/standalone/"},{"content":"Standalone Mode The standalone mode is the simplest way to run Banyand. It is suitable for the development and testing environment. Once you unpack and extract the skywalking-banyandb-x.x.x-bin.tgz, you could startup BanyanDB server, the standalone mode is running as a standalone process.\n$ cd skywalking-banyandb-x.x.x-bin/bin $ ./banyand-server-static standalone ██████╗ █████╗ ███╗ ██╗██╗ ██╗ █████╗ ███╗ ██╗██████╗ ██████╗ ██╔══██╗██╔══██╗████╗ ██║╚██╗ ██╔╝██╔══██╗████╗ ██║██╔══██╗██╔══██╗ ██████╔╝███████║██╔██╗ ██║ ╚████╔╝ ███████║██╔██╗ ██║██║ ██║██████╔╝ ██╔══██╗██╔══██║██║╚██╗██║ ╚██╔╝ ██╔══██║██║╚██╗██║██║ ██║██╔══██╗ ██████╔╝██║ ██║██║ ╚████║ ██║ ██║ ██║██║ ╚████║██████╔╝██████╔╝ ╚═════╝ ╚═╝ ╚═╝╚═╝ ╚═══╝ ╚═╝ ╚═╝ ╚═╝╚═╝ ╚═══╝╚═════╝ ╚═════╝ ***starting as a standalone server**** ... ... ***Listening to**** addr::17912 module:LIAISON-GRPC The banyand server would be listening on the 0.0.0.0:17912 to access gRPC requests. if no errors occurred.\nAt the same time, the banyand server would be listening on the 0.0.0.0:17913 to access HTTP requests. if no errors occurred. The HTTP server is used for CLI and Web UI.\n","title":"Standalone Mode","url":"/docs/skywalking-banyandb/next/installation/standalone/"},{"content":"Standalone Mode The standalone mode is the simplest way to run Banyand. It is suitable for the development and testing environment. The standalone mode is running as a standalone process by\n$ ./banyand-server standalone ██████╗ █████╗ ███╗ ██╗██╗ ██╗ █████╗ ███╗ ██╗██████╗ ██████╗ ██╔══██╗██╔══██╗████╗ ██║╚██╗ ██╔╝██╔══██╗████╗ ██║██╔══██╗██╔══██╗ ██████╔╝███████║██╔██╗ ██║ ╚████╔╝ ███████║██╔██╗ ██║██║ ██║██████╔╝ ██╔══██╗██╔══██║██║╚██╗██║ ╚██╔╝ ██╔══██║██║╚██╗██║██║ ██║██╔══██╗ ██████╔╝██║ ██║██║ ╚████║ ██║ ██║ ██║██║ ╚████║██████╔╝██████╔╝ ╚═════╝ ╚═╝ ╚═╝╚═╝ ╚═══╝ ╚═╝ ╚═╝ ╚═╝╚═╝ ╚═══╝╚═════╝ ╚═════╝ ***starting as a standalone server**** ... ... ***Listening to**** addr::17912 module:LIAISON-GRPC The banyand-server would be listening on the 0.0.0.0:17912 to access gRPC requests. if no errors occurred.\nAt the same time, the banyand-server would be listening on the 0.0.0.0:17913 to access HTTP requests. if no errors occurred. The HTTP server is used for CLI and Web UI.\n","title":"Standalone Mode","url":"/docs/skywalking-banyandb/v0.5.0/installation/standalone/"},{"content":"Start up mode You may need different startup modes in different deployment tools, such as k8s. We provide two additional optional startup modes.\nDefault mode The default mode carries out tasks to initialize as necessary, starts to listen, and provides services.\nRun /bin/oapService.sh(.bat) to start in this mode. This is also applicable when you\u0026rsquo;re using startup.sh(.bat) to start.\nInit mode In this mode, the OAP server starts up to carry out initialization and then exits. You could use this mode to initialize your storage (such as ElasticSearch indexes, MySQL, and TiDB tables) as well as your data.\nRun /bin/oapServiceInit.sh(.bat) to start in this mode.\nNo-init mode In this mode, the OAP server starts up without carrying out initialization. Rather, it watches out for the ElasticSearch indexes, MySQL, TiDB and other storage tables, starts listening and provides services. In other words, the OAP server would anticipate having another OAP server carrying out the initialization.\nRun /bin/oapServiceNoInit.sh(.bat) to start in this mode.\n","title":"Start up mode","url":"/docs/main/latest/en/setup/backend/backend-start-up-mode/"},{"content":"Start up mode You may need different startup modes in different deployment tools, such as k8s. We provide two additional optional startup modes.\nDefault mode The default mode carries out tasks to initialize as necessary, starts to listen, and provides services.\nRun /bin/oapService.sh(.bat) to start in this mode. This is also applicable when you\u0026rsquo;re using startup.sh(.bat) to start.\nInit mode In this mode, the OAP server starts up to carry out initialization and then exits. You could use this mode to initialize your storage (such as ElasticSearch indexes, MySQL, and TiDB tables) as well as your data.\nRun /bin/oapServiceInit.sh(.bat) to start in this mode.\nNo-init mode In this mode, the OAP server starts up without carrying out initialization. Rather, it watches out for the ElasticSearch indexes, MySQL, TiDB and other storage tables, starts listening and provides services. In other words, the OAP server would anticipate having another OAP server carrying out the initialization.\nRun /bin/oapServiceNoInit.sh(.bat) to start in this mode.\n","title":"Start up mode","url":"/docs/main/next/en/setup/backend/backend-start-up-mode/"},{"content":"Start up mode In different deployment tools, such as k8s, you may need different startup modes. We provide two other optional startup modes.\nDefault mode The default mode carries out tasks to initialize as necessary, starts to listen, and provide services.\nRun /bin/oapService.sh(.bat) to start in this mode. This is also applicable when you\u0026rsquo;re using startup.sh(.bat) to start.\nInit mode In this mode, the OAP server starts up to carry out initialization, and then exits. You could use this mode to initialize your storage (such as ElasticSearch indexes, MySQL, and TiDB tables), as well as your data.\nRun /bin/oapServiceInit.sh(.bat) to start in this mode.\nNo-init mode In this mode, the OAP server starts up without carrying out initialization. Rather, it watches out for the ElasticSearch indexes, MySQL, and TiDB tables, starts to listen, and provide services. In other words, the OAP server would anticipate having another OAP server to carry out the initialization.\nRun /bin/oapServiceNoInit.sh(.bat) to start in this mode.\n","title":"Start up mode","url":"/docs/main/v9.0.0/en/setup/backend/backend-start-up-mode/"},{"content":"Start up mode You may need different startup modes in different deployment tools, such as k8s. We provide two additional optional startup modes.\nDefault mode The default mode carries out tasks to initialize as necessary, starts to listen, and provides services.\nRun /bin/oapService.sh(.bat) to start in this mode. This is also applicable when you\u0026rsquo;re using startup.sh(.bat) to start.\nInit mode In this mode, the OAP server starts up to carry out initialization and then exits. You could use this mode to initialize your storage (such as ElasticSearch indexes, MySQL, and TiDB tables) as well as your data.\nRun /bin/oapServiceInit.sh(.bat) to start in this mode.\nNo-init mode In this mode, the OAP server starts up without carrying out initialization. Rather, it watches out for the ElasticSearch indexes, MySQL, TiDB and other storage tables, starts listening and provides services. In other words, the OAP server would anticipate having another OAP server carrying out the initialization.\nRun /bin/oapServiceNoInit.sh(.bat) to start in this mode.\n","title":"Start up mode","url":"/docs/main/v9.1.0/en/setup/backend/backend-start-up-mode/"},{"content":"Start up mode You may need different startup modes in different deployment tools, such as k8s. We provide two additional optional startup modes.\nDefault mode The default mode carries out tasks to initialize as necessary, starts to listen, and provides services.\nRun /bin/oapService.sh(.bat) to start in this mode. This is also applicable when you\u0026rsquo;re using startup.sh(.bat) to start.\nInit mode In this mode, the OAP server starts up to carry out initialization and then exits. You could use this mode to initialize your storage (such as ElasticSearch indexes, MySQL, and TiDB tables) as well as your data.\nRun /bin/oapServiceInit.sh(.bat) to start in this mode.\nNo-init mode In this mode, the OAP server starts up without carrying out initialization. Rather, it watches out for the ElasticSearch indexes, MySQL, TiDB and other storage tables, starts listening and provides services. In other words, the OAP server would anticipate having another OAP server carrying out the initialization.\nRun /bin/oapServiceNoInit.sh(.bat) to start in this mode.\n","title":"Start up mode","url":"/docs/main/v9.2.0/en/setup/backend/backend-start-up-mode/"},{"content":"Start up mode You may need different startup modes in different deployment tools, such as k8s. We provide two additional optional startup modes.\nDefault mode The default mode carries out tasks to initialize as necessary, starts to listen, and provides services.\nRun /bin/oapService.sh(.bat) to start in this mode. This is also applicable when you\u0026rsquo;re using startup.sh(.bat) to start.\nInit mode In this mode, the OAP server starts up to carry out initialization and then exits. You could use this mode to initialize your storage (such as ElasticSearch indexes, MySQL, and TiDB tables) as well as your data.\nRun /bin/oapServiceInit.sh(.bat) to start in this mode.\nNo-init mode In this mode, the OAP server starts up without carrying out initialization. Rather, it watches out for the ElasticSearch indexes, MySQL, TiDB and other storage tables, starts listening and provides services. In other words, the OAP server would anticipate having another OAP server carrying out the initialization.\nRun /bin/oapServiceNoInit.sh(.bat) to start in this mode.\n","title":"Start up mode","url":"/docs/main/v9.3.0/en/setup/backend/backend-start-up-mode/"},{"content":"Start up mode You may need different startup modes in different deployment tools, such as k8s. We provide two additional optional startup modes.\nDefault mode The default mode carries out tasks to initialize as necessary, starts to listen, and provides services.\nRun /bin/oapService.sh(.bat) to start in this mode. This is also applicable when you\u0026rsquo;re using startup.sh(.bat) to start.\nInit mode In this mode, the OAP server starts up to carry out initialization and then exits. You could use this mode to initialize your storage (such as ElasticSearch indexes, MySQL, and TiDB tables) as well as your data.\nRun /bin/oapServiceInit.sh(.bat) to start in this mode.\nNo-init mode In this mode, the OAP server starts up without carrying out initialization. Rather, it watches out for the ElasticSearch indexes, MySQL, TiDB and other storage tables, starts listening and provides services. In other words, the OAP server would anticipate having another OAP server carrying out the initialization.\nRun /bin/oapServiceNoInit.sh(.bat) to start in this mode.\n","title":"Start up mode","url":"/docs/main/v9.4.0/en/setup/backend/backend-start-up-mode/"},{"content":"Start up mode You may need different startup modes in different deployment tools, such as k8s. We provide two additional optional startup modes.\nDefault mode The default mode carries out tasks to initialize as necessary, starts to listen, and provides services.\nRun /bin/oapService.sh(.bat) to start in this mode. This is also applicable when you\u0026rsquo;re using startup.sh(.bat) to start.\nInit mode In this mode, the OAP server starts up to carry out initialization and then exits. You could use this mode to initialize your storage (such as ElasticSearch indexes, MySQL, and TiDB tables) as well as your data.\nRun /bin/oapServiceInit.sh(.bat) to start in this mode.\nNo-init mode In this mode, the OAP server starts up without carrying out initialization. Rather, it watches out for the ElasticSearch indexes, MySQL, TiDB and other storage tables, starts listening and provides services. In other words, the OAP server would anticipate having another OAP server carrying out the initialization.\nRun /bin/oapServiceNoInit.sh(.bat) to start in this mode.\n","title":"Start up mode","url":"/docs/main/v9.5.0/en/setup/backend/backend-start-up-mode/"},{"content":"Start up mode You may need different startup modes in different deployment tools, such as k8s. We provide two additional optional startup modes.\nDefault mode The default mode carries out tasks to initialize as necessary, starts to listen, and provides services.\nRun /bin/oapService.sh(.bat) to start in this mode. This is also applicable when you\u0026rsquo;re using startup.sh(.bat) to start.\nInit mode In this mode, the OAP server starts up to carry out initialization and then exits. You could use this mode to initialize your storage (such as ElasticSearch indexes, MySQL, and TiDB tables) as well as your data.\nRun /bin/oapServiceInit.sh(.bat) to start in this mode.\nNo-init mode In this mode, the OAP server starts up without carrying out initialization. Rather, it watches out for the ElasticSearch indexes, MySQL, TiDB and other storage tables, starts listening and provides services. In other words, the OAP server would anticipate having another OAP server carrying out the initialization.\nRun /bin/oapServiceNoInit.sh(.bat) to start in this mode.\n","title":"Start up mode","url":"/docs/main/v9.6.0/en/setup/backend/backend-start-up-mode/"},{"content":"Start up mode You may need different startup modes in different deployment tools, such as k8s. We provide two additional optional startup modes.\nDefault mode The default mode carries out tasks to initialize as necessary, starts to listen, and provides services.\nRun /bin/oapService.sh(.bat) to start in this mode. This is also applicable when you\u0026rsquo;re using startup.sh(.bat) to start.\nInit mode In this mode, the OAP server starts up to carry out initialization and then exits. You could use this mode to initialize your storage (such as ElasticSearch indexes, MySQL, and TiDB tables) as well as your data.\nRun /bin/oapServiceInit.sh(.bat) to start in this mode.\nNo-init mode In this mode, the OAP server starts up without carrying out initialization. Rather, it watches out for the ElasticSearch indexes, MySQL, TiDB and other storage tables, starts listening and provides services. In other words, the OAP server would anticipate having another OAP server carrying out the initialization.\nRun /bin/oapServiceNoInit.sh(.bat) to start in this mode.\n","title":"Start up mode","url":"/docs/main/v9.7.0/en/setup/backend/backend-start-up-mode/"},{"content":"Storage Usage In this example, you will learn how to use the Storage.\nInstall Operator Follow Operator installation instrument to install the operator.\nDefine Storage with default setting  sample.yaml(use the internal type)  apiVersion:operator.skywalking.apache.org/v1alpha1kind:Storagemetadata:name:samplespec:type:elasticsearchconnectType:internalversion:7.5.1instances:3image:docker.elastic.co/elasticsearch/elasticsearch:7.5.1security:user:secretName:defaulttls:truesample.yaml(use the external type)  apiVersion:operator.skywalking.apache.org/v1alpha1kind:Storagemetadata:name:samplespec:type:elasticsearchconnectType:externaladdress:\u0026#34;https://elasticsearch\u0026#34;security:user:secretName:defaultDeploy Storage  Deploy the Storage use the below command:  $ kubectl apply -f sample.yaml Check the Storage in Kubernetes:   If you deploy the storage with the internal type:  $ kubectl get storage NAME INSTANCES TYPE VERSION CONNECTTYPE sample 3 elasticsearch 7.5.1 internal  If you deploy the storage with the external type:  $ kubectl get storage NAME INSTANCES TYPE VERSION CONNECTTYPE sample elasticsearch 7.5.1 external Check the Statefulset in Kubernetes:  $ kubectl get statefulset NAME READY AGE sample-elasticsearch 3/3 7s Specify Storage Name in OAP server Here we modify the default OAP server configuration file,the new yaml file as follows:\napiVersion:operator.skywalking.apache.org/v1alpha1kind:OAPServermetadata:name:defaultspec:version:9.5.0instances:1image:apache/skywalking-oap-server:9.5.0service:template:type:ClusterIPstorage:name:sample Deploy the OAP server use the new yaml file:  $ kubectl apply -f oap.yaml Check the OAP server in Kubernetes:  $ kubectl get oapserver NAME INSTANCES RUNNING ADDRESS sample 1 1 sample-oap.default Check whether the pod generated by OAP server is running correctly.  $ kubectl get pod -l app=oap NAME READY STATUS RESTARTS AGE sample-oap-5bc79567b7-tkw6q 1/1 Running 0 6m31s ","title":"Storage Usage","url":"/docs/skywalking-swck/latest/examples/storage/"},{"content":"Storage Usage In this example, you will learn how to use the Storage.\nInstall Operator Follow Operator installation instrument to install the operator.\nDefine Storage with default setting  sample.yaml(use the internal type)  apiVersion:operator.skywalking.apache.org/v1alpha1kind:Storagemetadata:name:samplespec:type:elasticsearchconnectType:internalversion:7.5.1instances:3image:docker.elastic.co/elasticsearch/elasticsearch:7.5.1security:user:secretName:defaulttls:truesample.yaml(use the external type)  apiVersion:operator.skywalking.apache.org/v1alpha1kind:Storagemetadata:name:samplespec:type:elasticsearchconnectType:externaladdress:\u0026#34;https://elasticsearch\u0026#34;security:user:secretName:defaultDeploy Storage  Deploy the Storage use the below command:  $ kubectl apply -f sample.yaml Check the Storage in Kubernetes:   If you deploy the storage with the internal type:  $ kubectl get storage NAME INSTANCES TYPE VERSION CONNECTTYPE sample 3 elasticsearch 7.5.1 internal  If you deploy the storage with the external type:  $ kubectl get storage NAME INSTANCES TYPE VERSION CONNECTTYPE sample elasticsearch 7.5.1 external Check the Statefulset in Kubernetes:  $ kubectl get statefulset NAME READY AGE sample-elasticsearch 3/3 7s Specify Storage Name in OAP server Here we modify the default OAP server configuration file,the new yaml file as follows:\napiVersion:operator.skywalking.apache.org/v1alpha1kind:OAPServermetadata:name:defaultspec:version:9.5.0instances:1image:apache/skywalking-oap-server:9.5.0service:template:type:ClusterIPstorage:name:sample Deploy the OAP server use the new yaml file:  $ kubectl apply -f oap.yaml Check the OAP server in Kubernetes:  $ kubectl get oapserver NAME INSTANCES RUNNING ADDRESS sample 1 1 sample-oap.default Check whether the pod generated by OAP server is running correctly.  $ kubectl get pod -l app=oap NAME READY STATUS RESTARTS AGE sample-oap-5bc79567b7-tkw6q 1/1 Running 0 6m31s ","title":"Storage Usage","url":"/docs/skywalking-swck/next/examples/storage/"},{"content":"Storage Usage In this example, you will learn how to use the Storage.\nInstall Operator Follow Operator installation instrument to install the operator.\nDefine Storage with default setting  sample.yaml(use the internal type)  apiVersion:operator.skywalking.apache.org/v1alpha1kind:Storagemetadata:name:samplespec:type:elasticsearchconnectType:internalversion:7.5.1instances:3image:docker.elastic.co/elasticsearch/elasticsearch:7.5.1security:user:secretName:defaulttls:truesample.yaml(use the external type)  apiVersion:operator.skywalking.apache.org/v1alpha1kind:Storagemetadata:name:samplespec:type:elasticsearchconnectType:externaladdress:\u0026#34;https://elasticsearch\u0026#34;security:user:secretName:defaultDeploy Storage  Deploy the Storage use the below command:  $ kubectl apply -f sample.yaml Check the Storage in Kubernetes:   If you deploy the storage with the internal type:  $ kubectl get storage NAME INSTANCES TYPE VERSION CONNECTTYPE sample 3 elasticsearch 7.5.1 internal  If you deploy the storage with the external type:  $ kubectl get storage NAME INSTANCES TYPE VERSION CONNECTTYPE sample elasticsearch 7.5.1 external Check the Statefulset in Kubernetes:  $ kubectl get statefulset NAME READY AGE sample-elasticsearch 3/3 7s Specify Storage Name in OAP server Here we modify the default OAP server configuration file,the new yaml file as follows:\napiVersion:operator.skywalking.apache.org/v1alpha1kind:OAPServermetadata:name:defaultspec:version:9.5.0instances:1image:apache/skywalking-oap-server:9.5.0service:template:type:ClusterIPstorage:name:sample Deploy the OAP server use the new yaml file:  $ kubectl apply -f oap.yaml Check the OAP server in Kubernetes:  $ kubectl get oapserver NAME INSTANCES RUNNING ADDRESS sample 1 1 sample-oap.default Check whether the pod generated by OAP server is running correctly.  $ kubectl get pod -l app=oap NAME READY STATUS RESTARTS AGE sample-oap-5bc79567b7-tkw6q 1/1 Running 0 6m31s ","title":"Storage Usage","url":"/docs/skywalking-swck/v0.9.0/examples/storage/"},{"content":"Summary The SkyWalking Cloud on Kubernetes is proposed in order to:\n Managing and Monitoring Scaling backend cluster capacity up and down Changing backend cluster configuration Injecting configuration into the target cluster. Securing traffic between target clusters and backend cluster, or between backend cluster with TLS certificate  Motivation If the user of SkyWalking decided to deploy it into Kubernetes, there’re some critical challenges for them.\nFirst of them is the complex of deployment, it doesn’t only mean the OAP server and storage cluster, but also include configuring target cluster to send data to backend. Then they might struggle to keep all of them reliable. The size of the data transferred is very big and the cost of data stored is very high. The user usually faces some problems, for instance, OAP server stuck, Elasticsearch cluster GC rate sharply increases, the system load of some OAP instances is much more than others, and etc.\nWith the help of CRDs and the Controller, we can figure out the above problems and give users a more pleasing experience when using SWCK.\nProposal Production Design I proposed two crucial components for SWCK, backend operator and target injector. The first one intends to solve the problems of the backend operation, and another focus on simplifying the configuration of the target cluster.\nThey should be built as two separate binary/image, then are installed according to user’s requirements.\nBackend Operator The operator might be a GO application that manages and monitors other components, for example, OAP pods, storage pods(ES, MySQL, and etc.), ingress/entry and configuration.\nIt should be capable of HA, performance, and scalability.\nIt should also have the following capabilities:\n Defining CRDs for provisioning and configuring Provisioning backend automatically Splitting OAP instances according to their type(L1/L2), improving the ratio of them. Performance tuning of OAP and storage. Updating configuration dynamically, irrespectively it’s dynamic or not. Upgrading mirror version seamlessly. Health checking and failure recovery Collecting and analyzing metrics and logs, abnormal detection Horizontal scaling and scheduling tuning. Loadbalancing input gPRC stream and GraphQL querying. Supporting externally hosted storage service. Securing traffic  The above items should be accomplished in several versions/releases. The developer should sort the priority of them and grind the design.\nTarget injector The injector can inject agent lib and configuration into the target cluster automatically, enable/disable distributed tracing according to labels marked on resources or namespace.\nIt also integrates backend with service mesh platform, for example, Istio.\nIt should be a GO application and a GO lib to be invoked by swctl to generate pod YAMLs manually.\nTechnology Selection  Development Language: GO Operator dev tool: TBD Building tool: Make(Docker for windows) Installation: Helm3 chart Repository: github.com/apache/skywalking-swck CI: Github action  ","title":"Summary","url":"/docs/skywalking-swck/latest/design/proposal/"},{"content":"Summary The SkyWalking Cloud on Kubernetes is proposed in order to:\n Managing and Monitoring Scaling backend cluster capacity up and down Changing backend cluster configuration Injecting configuration into the target cluster. Securing traffic between target clusters and backend cluster, or between backend cluster with TLS certificate  Motivation If the user of SkyWalking decided to deploy it into Kubernetes, there’re some critical challenges for them.\nFirst of them is the complex of deployment, it doesn’t only mean the OAP server and storage cluster, but also include configuring target cluster to send data to backend. Then they might struggle to keep all of them reliable. The size of the data transferred is very big and the cost of data stored is very high. The user usually faces some problems, for instance, OAP server stuck, Elasticsearch cluster GC rate sharply increases, the system load of some OAP instances is much more than others, and etc.\nWith the help of CRDs and the Controller, we can figure out the above problems and give users a more pleasing experience when using SWCK.\nProposal Production Design I proposed two crucial components for SWCK, backend operator and target injector. The first one intends to solve the problems of the backend operation, and another focus on simplifying the configuration of the target cluster.\nThey should be built as two separate binary/image, then are installed according to user’s requirements.\nBackend Operator The operator might be a GO application that manages and monitors other components, for example, OAP pods, storage pods(ES, MySQL, and etc.), ingress/entry and configuration.\nIt should be capable of HA, performance, and scalability.\nIt should also have the following capabilities:\n Defining CRDs for provisioning and configuring Provisioning backend automatically Splitting OAP instances according to their type(L1/L2), improving the ratio of them. Performance tuning of OAP and storage. Updating configuration dynamically, irrespectively it’s dynamic or not. Upgrading mirror version seamlessly. Health checking and failure recovery Collecting and analyzing metrics and logs, abnormal detection Horizontal scaling and scheduling tuning. Loadbalancing input gPRC stream and GraphQL querying. Supporting externally hosted storage service. Securing traffic  The above items should be accomplished in several versions/releases. The developer should sort the priority of them and grind the design.\nTarget injector The injector can inject agent lib and configuration into the target cluster automatically, enable/disable distributed tracing according to labels marked on resources or namespace.\nIt also integrates backend with service mesh platform, for example, Istio.\nIt should be a GO application and a GO lib to be invoked by swctl to generate pod YAMLs manually.\nTechnology Selection  Development Language: GO Operator dev tool: TBD Building tool: Make(Docker for windows) Installation: Helm3 chart Repository: github.com/apache/skywalking-swck CI: Github action  ","title":"Summary","url":"/docs/skywalking-swck/next/design/proposal/"},{"content":"Summary The SkyWalking Cloud on Kubernetes is proposed in order to:\n Managing and Monitoring Scaling backend cluster capacity up and down Changing backend cluster configuration Injecting configuration into the target cluster. Securing traffic between target clusters and backend cluster, or between backend cluster with TLS certificate  Motivation If the user of SkyWalking decided to deploy it into Kubernetes, there’re some critical challenges for them.\nFirst of them is the complex of deployment, it doesn’t only mean the OAP server and storage cluster, but also include configuring target cluster to send data to backend. Then they might struggle to keep all of them reliable. The size of the data transferred is very big and the cost of data stored is very high. The user usually faces some problems, for instance, OAP server stuck, Elasticsearch cluster GC rate sharply increases, the system load of some OAP instances is much more than others, and etc.\nWith the help of CRDs and the Controller, we can figure out the above problems and give users a more pleasing experience when using SWCK.\nProposal Production Design I proposed two crucial components for SWCK, backend operator and target injector. The first one intends to solve the problems of the backend operation, and another focus on simplifying the configuration of the target cluster.\nThey should be built as two separate binary/image, then are installed according to user’s requirements.\nBackend Operator The operator might be a GO application that manages and monitors other components, for example, OAP pods, storage pods(ES, MySQL, and etc.), ingress/entry and configuration.\nIt should be capable of HA, performance, and scalability.\nIt should also have the following capabilities:\n Defining CRDs for provisioning and configuring Provisioning backend automatically Splitting OAP instances according to their type(L1/L2), improving the ratio of them. Performance tuning of OAP and storage. Updating configuration dynamically, irrespectively it’s dynamic or not. Upgrading mirror version seamlessly. Health checking and failure recovery Collecting and analyzing metrics and logs, abnormal detection Horizontal scaling and scheduling tuning. Loadbalancing input gPRC stream and GraphQL querying. Supporting externally hosted storage service. Securing traffic  The above items should be accomplished in several versions/releases. The developer should sort the priority of them and grind the design.\nTarget injector The injector can inject agent lib and configuration into the target cluster automatically, enable/disable distributed tracing according to labels marked on resources or namespace.\nIt also integrates backend with service mesh platform, for example, Istio.\nIt should be a GO application and a GO lib to be invoked by swctl to generate pod YAMLs manually.\nTechnology Selection  Development Language: GO Operator dev tool: TBD Building tool: Make(Docker for windows) Installation: Helm3 chart Repository: github.com/apache/skywalking-swck CI: Github action  ","title":"Summary","url":"/docs/skywalking-swck/v0.9.0/design/proposal/"},{"content":"Support ActiveMQ classic Monitoring Motivation Apache ActiveMQ Classic is a popular and powerful open source messaging and Integration Patterns server. It supports many Cross Language Clients and Protocols, comes with easy to use Enterprise Integration Patterns and many advanced features.\nNow I want to add ActiveMQ Classic monitoring via the OpenTelemetry Collector which fetches metrics from jmx prometheus exporter run as a Java Agent.\nArchitecture Graph There is no significant architecture-level change.\nProposed Changes Apache ActiveMQ Classic has extensive support for JMX to allow you to monitor and control the behavior of the broker via the JMX MBeans.\nJmx prometheus exporter collects metrics data from ActiveMQ classic, this exporter is intended to be run as a Java Agent, exposing a HTTP server and serving metrics of the local JVM.\nUsing openTelemetry receiver to fetch these metrics to SkyWalking OAP server.\nActiveMQ Cluster Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     System Load Average Count meter_activemq_cluster_system_load_average The average system load, range:[0, 10000]. JMX Prometheus Exporter   Thread Count Count meter_activemq_cluster_thread_count Threads currently used by the JVM. JMX Prometheus Exporter   Init Heap Memory Usage Bytes meter_activemq_cluster_heap_memory_usage_init The initial amount of heap memory available. JMX Prometheus Exporter   Committed Heap Memory Usage Bytes meter_activemq_cluster_heap_memory_usage_committed The memory is guaranteed to be available for the JVM to use. JMX Prometheus Exporter   Used Heap Memory Usage Bytes meter_activemq_cluster_heap_memory_usage_used The amount of JVM heap memory currently in use. JMX Prometheus Exporter   Max Heap Memory Usage Bytes meter_activemq_cluster_heap_memory_usage_max The maximum possible size of the heap memory. JMX Prometheus Exporter   GC G1 Old Collection Count Count meter_activemq_cluster_gc_g1_old_collection_count The gc count of G1 Old Generation(JDK[9,17]). JMX Prometheus Exporter   GC G1 Young Collection Count Count meter_activemq_cluster_gc_g1_young_collection_count The gc count of G1 Young Generation(JDK[9,17]). JMX Prometheus Exporter   GC G1 Old Collection Time ms meter_activemq_cluster_gc_g1_old_collection_time The gc time spent in G1 Old Generation in milliseconds(JDK[9,17]). JMX Prometheus Exporter   GC G1 Young Collection Time ms meter_activemq_cluster_gc_g1_young_collection_time The gc time spent in G1 Young Generation in milliseconds(JDK[9,17]). JMX Prometheus Exporter   GC Parallel Old Collection Count Count meter_activemq_cluster_gc_parallel_old_collection_count The gc count of Parallel Old Generation(JDK[6,8]). JMX Prometheus Exporter   GC Parallel Young Collection Count Count meter_activemq_cluster_gc_parallel_young_collection_count The gc count of Parallel Young Generation(JDK[6,8]). JMX Prometheus Exporter   GC Parallel Old Collection Time ms meter_activemq_cluster_gc_parallel_old_collection_time The gc time spent in Parallel Old Generation in milliseconds(JDK[6,8]). JMX Prometheus Exporter   GC Parallel Young Collection Time ms meter_activemq_cluster_gc_parallel_young_collection_time The gc time spent in Parallel Young Generation in milliseconds(JDK[6,8]). JMX Prometheus Exporter   Enqueue Rate Count/s meter_activemq_cluster_enqueue_rate Number of messages that have been sent to the cluster per second(JDK[6,8]). JMX Prometheus Exporter   Dequeue Rate Count/s meter_activemq_cluster_dequeue_rate Number of messages that have been acknowledged or discarded on the cluster per second. JMX Prometheus Exporter   Dispatch Rate Count/s meter_activemq_cluster_dispatch_rate Number of messages that has been delivered to consumers per second. JMX Prometheus Exporter   Expired Rate Count/s meter_activemq_cluster_expired_rate Number of messages that have been expired per second. JMX Prometheus Exporter   Average Enqueue Time ms meter_activemq_cluster_average_enqueue_time The average time a message was held on this cluster. JMX Prometheus Exporter   Max Enqueue Time ms meter_activemq_cluster_max_enqueue_time The max time a message was held on this cluster. JMX Prometheus Exporter    ActiveMQ Broker Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Uptime sec meter_activemq_broker_uptime Uptime of the broker in day. JMX Prometheus Exporter   State  meter_activemq_broker_state If slave broker 1 else 0. JMX Prometheus Exporter   Current Connections Count meter_activemq_broker_current_connections The number of clients connected to the broker currently. JMX Prometheus Exporter   Current Producer Count Count meter_activemq_broker_current_producer_count The number of producers currently attached to the broker. JMX Prometheus Exporter   Current Consumer Count Count meter_activemq_broker_current_consumer_count The number of consumers consuming messages from the broker. JMX Prometheus Exporter   Producer Count Count meter_activemq_broker_producer_count Number of message producers active on destinations. JMX Prometheus Exporter   Consumer Count Count meter_activemq_broker_consumer_count Number of message consumers subscribed to destinations. JMX Prometheus Exporter   Enqueue Count Count meter_activemq_broker_enqueue_count The total number of messages sent to the broker. JMX Prometheus Exporter   Dequeue Count Count meter_activemq_broker_dequeue_count The total number of messages the broker has delivered to consumers. JMX Prometheus Exporter   Enqueue Rate Count/sec meter_activemq_broker_enqueue_rate The total number of messages sent to the broker per second. JMX Prometheus Exporter   Dequeue Rate Count/sec meter_activemq_broker_dequeue_rate The total number of messages the broker has delivered to consumers per second. JMX Prometheus Exporter   Memory Percent Usage % meter_activemq_broker_memory_percent_usage Percentage of configured memory used by the broker. JMX Prometheus Exporter   Memory Usage Bytes meter_activemq_broker_memory_percent_usage Memory used by undelivered messages in bytes. JMX Prometheus Exporter   Memory Limit Bytes meter_activemq_broker_memory_limit Memory limited used for holding undelivered messages before paging to temporary storage. JMX Prometheus Exporter   Store Percent Usage % meter_activemq_broker_store_percent_usage Percentage of available disk space used for persistent message storage. JMX Prometheus Exporter   Store Limit Bytes meter_activemq_broker_store_limit Disk limited used for persistent messages before producers are blocked. JMX Prometheus Exporter   Temp Percent Usage Bytes meter_activemq_broker_temp_percent_usage Percentage of available disk space used for non-persistent message storage. JMX Prometheus Exporter   Temp Limit Bytes meter_activemq_broker_temp_limit Disk limited used for non-persistent messages and temporary data before producers are blocked. JMX Prometheus Exporter   Average Message Size Bytes meter_activemq_broker_average_message_size Average message size on this broker. JMX Prometheus Exporter   Max Message Size Bytes meter_activemq_broker_max_message_size Max message size on this broker. JMX Prometheus Exporter   Queue Size Count meter_activemq_broker_queue_size Number of messages on this broker that have been dispatched but not acknowledged. JMX Prometheus Exporter    ActiveMQ Destination Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Producer Count Count meter_activemq_destination_producer_count Number of producers attached to this destination. JMX Prometheus Exporter   Consumer Count Count meter_activemq_destination_consumer_count Number of consumers subscribed to this destination. JMX Prometheus Exporter   Topic Consumer Count Count meter_activemq_destination_topic_consumer_count Number of consumers subscribed to the topics. JMX Prometheus Exporter   Queue Size Count meter_activemq_destination_queue_size The number of messages that have not been acknowledged by a consumer. JMX Prometheus Exporter   Memory Usage Bytes meter_activemq_destination_memory_usage Memory used by undelivered messages in bytes. JMX Prometheus Exporter   Memory Percent Usage % meter_activemq_destination_memory_percent_usage Percentage of configured memory used by the destination. JMX Prometheus Exporter   Enqueue Count Count meter_activemq_destination_enqueue_count The number of messages sent to the destination. JMX Prometheus Exporter   Dequeue Count Count meter_activemq_destination_dequeue_count The number of messages the destination has delivered to consumers. JMX Prometheus Exporter   Average Enqueue Time ms meter_activemq_destination_average_enqueue_time The average time a message was held on this destination. JMX Prometheus Exporter   Max Enqueue Time ms meter_activemq_destination_max_enqueue_time The max time a message was held on this destination. JMX Prometheus Exporter   Dispatch Count Count meter_activemq_destination_dispatch_count Number of messages that has been delivered to consumers. JMX Prometheus Exporter   Expired Count Count meter_activemq_destination_expired_count Number of messages that have been expired. JMX Prometheus Exporter   Inflight Count Count meter_activemq_destination_inflight_count Number of messages that have been dispatched to but not acknowledged by consumers. JMX Prometheus Exporter   Average Message Size Bytes meter_activemq_destination_average_message_size Average message size on this destination. JMX Prometheus Exporter   Max Message Size Bytes meter_activemq_destination_max_message_size Max message size on this destination. JMX Prometheus Exporter    Imported Dependencies libs and their licenses. No new dependency.\nCompatibility no breaking changes.\nGeneral usage docs ","title":"Support ActiveMQ classic Monitoring","url":"/docs/main/next/en/swip/swip-8/"},{"content":"Support available layers of service in the topology. Motivation UI could jump to the service dashboard and query service hierarchy from the topology node. For now topology node includes name and ID but without layer, as the service could have multiple layers, the limitation is that it is only works on the current layer which the topology represents:\n UI could not jump into another layer\u0026rsquo;s dashboard of the service. UI could not query the service hierarchy from the topology node if the node is not in current layer.  Here are typical use cases: should have a chance to jump into another layer\u0026rsquo;s dashboard of the service:\n In the mesh topology, mesh(layer MESH) and mesh-dp(layer MESH_DP) share a similar topology, one node will have two layers. In the mesh topology, agent(layer GENERAL) + virtual database(layer VIRTUAL_DATABASE), the node is in different layers.  Both of these two cases have hybrid layer topology. If we could support that, we could have a better x-layer interaction.\nArchitecture Graph There is no significant architecture-level change.\nPropose Changes Add the layers info into topology node:\n When building the topology node fetch the layers info from the service according to the service id. Return layers info in the Node when query the topology.  Imported Dependencies libs and their licenses. No new library is planned to be added to the codebase.\nCompatibility About the protocol, there should be no breaking changes, but enhancements only. New field layers is going to be added to the Node in the query protocol topology.graphqls.\ntypeNode{# The service ID of the node.id:ID!# The literal name of the #id.name:String!# The type name may be# 1. The service provider/middleware tech, such as: Tomcat, SpringMVC# 2. Conjectural Service, e.g. MySQL, Redis, Kafkatype:String# It is a conjecture node or real node, to represent a service or endpoint.isReal:Boolean!# The layers of the service.layers:[String!]!}General usage docs This proposal doesn\u0026rsquo;t impact the end user in any way of using SkyWalking. The remarkable change will be in the UI topology map, users could jump into the proper layer\u0026rsquo;s service dashboard and query the service hierarchy from the topology node.\n","title":"Support available layers of service in the topology.","url":"/docs/main/next/en/swip/swip-4/"},{"content":"Support ClickHouse Monitoring Motivation ClickHouse is a high-performance, column-oriented SQL database management system (DBMS) for online analytical processing (OLAP). It is available as both an open-source software and a cloud offering.\nNow I want to add ClickHouse monitoring via the OpenTelemetry Collector which fetches metrics from it\u0026rsquo;s own HTTP endpoint to expose metrics data for Prometheus (since ClickHouse v20.1.2.4). Clickhouse Exporter used only for old ClickHouse versions, modern versions have embedded prometheus endpoint.\nArchitecture Graph There is no significant architecture-level change.\nProposed Changes ClickHouse expose own metrics via HTTP endpoint to opentelemetry collector, using skyWalking openTelemetry receiver to fetch these metrics.\nThe exposed metrics are from the system.metrics table / the system.events table / the system.asynchronous_metrics table.\nClickHouse Instance Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     CpuUsage count meter_clickhouse_instance_cpu_usage CPU time spent seen by OS per second(according to ClickHouse.system.dashboard.CPU Usage (cores)). ClickHouse   MemoryUsage percentage meter_clickhouse_instance_memory_usage Total amount of memory (bytes) allocated by the server/ total amount of OS memory. ClickHouse   MemoryAvailable percentage meter_clickhouse_instance_memory_available Total amount of memory (bytes) available for program / total amount of OS memory. ClickHouse   Uptime sec meter_clickhouse_instance_uptime The server uptime in seconds. It includes the time spent for server initialization before accepting connections. ClickHouse   Version string meter_clickhouse_instance_version Version of the server in a single integer number in base-1000. ClickHouse   FileOpen count meter_clickhouse_instance_file_open Number of files opened. ClickHouse    ClickHouse Network Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     TcpConnections count meter_clickhouse_instance_tcp_connectionsmeter_clickhouse_tcp_connections Number of connections to TCP server. ClickHouse   MysqlConnections count meter_clickhouse_instance_mysql_connectionsmeter_clickhouse_mysql_connections Number of client connections using MySQL protocol. ClickHouse   HttpConnections count meter_clickhouse_instance_http_connectionsmeter_clickhouse_mysql_connections Number of connections to HTTP server. ClickHouse   InterserverConnections count meter_clickhouse_instance_interserver_connectionsmeter_clickhouse_interserver_connections Number of connections from other replicas to fetch parts. ClickHouse   PostgresqlConnections count meter_clickhouse_instance_postgresql_connectionsmeter_clickhouse_postgresql_connections Number of client connections using PostgreSQL protocol. ClickHouse   ReceiveBytes bytes meter_clickhouse_instance_network_receive_bytesmeter_clickhouse_network_receive_bytes Total number of bytes received from network. ClickHouse   SendBytes bytes meter_clickhouse_instance_network_send_bytesmeter_clickhouse_network_send_bytes Total number of bytes send to network. ClickHouse    ClickHouse Query Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     QueryCount count meter_clickhouse_instance_querymeter_clickhouse_query Number of executing queries. ClickHouse   SelectQueryCount count meter_clickhouse_instance_query_selectmeter_clickhouse_query_select Number of executing queries, but only for SELECT queries. ClickHouse   InsertQueryCount count meter_clickhouse_instance_query_insertmeter_clickhouse_query_insert Number of executing queries, but only for INSERT queries. ClickHouse   SelectQueryRate count/sec meter_clickhouse_instance_query_select_ratemeter_clickhouse_query_select_rate Number of SELECT queries per second. ClickHouse   InsertQueryRate count/sec meter_clickhouse_instance_query_insert_ratemeter_clickhouse_query_insert_rate Number of INSERT queries per second. ClickHouse   Querytime microsec meter_clickhouse_instance_querytime_microsecondsmeter_clickhouse_querytime_microseconds Total time of all queries. ClickHouse   SelectQuerytime microsec meter_clickhouse_instance_querytime_select_microsecondsmeter_clickhouse_querytime_select_microseconds Total time of SELECT queries. ClickHouse   InsertQuerytime microsec meter_clickhouse_instance_querytime_insert_microsecondsmeter_clickhouse_querytime_insert_microseconds Total time of INSERT queries. ClickHouse   OtherQuerytime microsec meter_clickhouse_instance_querytime_other_microsecondsmeter_clickhouse_querytime_other_microseconds Total time of queries that are not SELECT or INSERT. ClickHouse   QuerySlowCount count meter_clickhouse_instance_query_slowmeter_clickhouse_query_slow Number of reads from a file that were slow. ClickHouse    ClickHouse Insertion Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     InsertQueryCount count meter_clickhouse_instance_query_insertmeter_clickhouse_query_insert Number of executing queries, but only for INSERT queries. ClickHouse   InsertedRowCount count meter_clickhouse_instance_inserted_rowsmeter_clickhouse_inserted_rows Number of rows INSERTed to all tables. ClickHouse   InsertedBytes bytes meter_clickhouse_instance_inserted_bytesmeter_clickhouse_inserted_bytes Number of bytes INSERTed to all tables. ClickHouse   DelayedInsertCount count meter_clickhouse_instance_delayed_insertmeter_clickhouse_delayed_insert Number of times the INSERT of a block to a MergeTree table was throttled due to high number of active data parts for partition. ClickHouse    ClickHouse Replicas Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     ReplicatedChecks count meter_clickhouse_instance_replicated_checksmeter_clickhouse_replicated_checks Number of data parts checking for consistency. ClickHouse   ReplicatedFetch count meter_clickhouse_instance_replicated_fetchmeter_clickhouse_replicated_fetch Number of data parts being fetched from replica. ClickHouse   ReplicatedSend count meter_clickhouse_instance_replicated_sendmeter_clickhouse_replicated_send Number of data parts being sent to replicas. ClickHouse    ClickHouse MergeTree Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     BackgroundMergeCount count meter_clickhouse_instance_background_mergemeter_clickhouse_background_merge Number of executing background merges. ClickHouse   MergeRows count meter_clickhouse_instance_merge_rowsmeter_clickhouse_merge_rows Rows read for background merges. This is the number of rows before merge. ClickHouse   MergeUncompressedBytes bytes meter_clickhouse_instance_merge_uncompressed_bytesmeter_clickhouse_merge_uncompressed_bytes Uncompressed bytes (for columns as they stored in memory) that was read for background merges. This is the number before merge. ClickHouse   MoveCount count meter_clickhouse_instance_movemeter_clickhouse_move Number of currently executing moves. ClickHouse   PartsActive Count meter_clickhouse_instance_parts_activemeter_clickhouse_parts_active Active data part, used by current and upcoming SELECTs. ClickHouse   MutationsCount count meter_clickhouse_instance_mutationsmeter_clickhouse_mutations Number of mutations (ALTER DELETE/UPDATE). ClickHouse    ClickHouse Kafka Table Engine Supported Metrics When table engine works with Apache Kafka.\nKafka lets you:\n Publish or subscribe to data flows. Organize fault-tolerant storage. Process streams as they become available.     Monitoring Panel Unit Metric Name Description Data Source     KafkaMessagesRead count meter_clickhouse_instance_kafka_messages_readmeter_clickhouse_kafka_messages_read Number of Kafka messages already processed by ClickHouse. ClickHouse   KafkaWrites count meter_clickhouse_instance_kafka_writesmeter_clickhouse_kafka_writes Number of writes (inserts) to Kafka tables. ClickHouse   KafkaConsumers count meter_clickhouse_instance_kafka_consumersmeter_clickhouse_kafka_consumers Number of active Kafka consumers. ClickHouse   KafkProducers count meter_clickhouse_instance_kafka_producersmeter_clickhouse_kafka_producers Number of active Kafka producer created. ClickHouse    ClickHouse ZooKeeper Supported Metrics ClickHouse uses ZooKeeper for storing metadata of replicas when using replicated tables. If replicated tables are not used, this section of parameters can be omitted.\n   Monitoring Panel Unit Metric Name Description Data Source     ZookeeperSession count meter_clickhouse_instance_zookeeper_sessionmeter_clickhouse_zookeeper_session Number of sessions (connections) to ZooKeeper. ClickHouse   ZookeeperWatch count meter_clickhouse_instance_zookeeper_watchmeter_clickhouse_zookeeper_watch Number of watches (event subscriptions) in ZooKeeper. ClickHouse   ZookeeperBytesSent bytes meter_clickhouse_instance_zookeeper_bytes_sentmeter_clickhouse_zookeeper_bytes_sent Number of bytes send over network while communicating with ZooKeeper. ClickHouse   ZookeeperBytesReceive bytes meter_clickhouse_instance_zookeeper_bytes_receivedmeter_clickhouse_zookeeper_bytes_received Number of bytes send over network while communicating with ZooKeeper. ClickHouse    ClickHouse Keeper Supported Metrics ClickHouse Keeper provides the coordination system for data replication and distributed DDL queries execution. ClickHouse Keeper is compatible with ZooKeeper.\nClickHouse Keeper can work in embedded mode or standalone cluster mode, the metrics below are for embedded mode.\n   Monitoring Panel Unit Metric Name Description Data Source     KeeperAliveConnections count meter_clickhouse_instance_keeper_connections_alivemeter_clickhouse_keeper_connections_alive Number of alive connections for embedded ClickHouse Keeper. ClickHouse   KeeperOutstandingRequets count meter_clickhouse_instance_keeper_outstanding_requestsmeter_clickhouse_keeper_outstanding_requests Number of outstanding requests for embedded ClickHouse Keeper. ClickHouse    Imported Dependencies libs and their licenses. No new dependency.\nCompatibility no breaking changes.\nGeneral usage docs ","title":"Support ClickHouse Monitoring","url":"/docs/main/next/en/swip/swip-5/"},{"content":"Support custom enhance Here is an optional plugin apm-customize-enhance-plugin\nIntroduce SkyWalking has provided Java agent plugin development guide to help developers to build new plugin.\nThis plugin is not designed for replacement but for user convenience. The behaviour is very similar with @Trace toolkit, but without code change requirement, and more powerful, such as provide tag and log.\nHow to configure Implementing enhancements to custom classes requires two steps.\n Active the plugin, move the optional-plugins/apm-customize-enhance-plugin.jar to plugin/apm-customize-enhance-plugin.jar. Set plugin.customize.enhance_file in agent.config, which targets to rule file, such as /absolute/path/to/customize_enhance.xml. Set enhancement rules in customize_enhance.xml. \u0026lt;?xml version=\u0026#34;1.0\u0026#34; encoding=\u0026#34;UTF-8\u0026#34;?\u0026gt; \u0026lt;enhanced\u0026gt; \u0026lt;class class_name=\u0026#34;test.apache.skywalking.testcase.customize.service.TestService1\u0026#34;\u0026gt; \u0026lt;method method=\u0026#34;staticMethod()\u0026#34; operation_name=\u0026#34;/is_static_method\u0026#34; static=\u0026#34;true\u0026#34;/\u0026gt; \u0026lt;method method=\u0026#34;staticMethod(java.lang.String,int.class,java.util.Map,java.util.List,[Ljava.lang.Object;)\u0026#34; operation_name=\u0026#34;/is_static_method_args\u0026#34; static=\u0026#34;true\u0026#34;\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0]\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[1]\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[3].[0]\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;tag key=\u0026#34;tag_1\u0026#34;\u0026gt;arg[2].[\u0026#39;k1\u0026#39;]\u0026lt;/tag\u0026gt; \u0026lt;tag key=\u0026#34;tag_2\u0026#34;\u0026gt;arg[4].[1]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_1\u0026#34;\u0026gt;arg[4].[2]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;method()\u0026#34; static=\u0026#34;false\u0026#34;/\u0026gt; \u0026lt;method method=\u0026#34;method(java.lang.String,int.class)\u0026#34; operation_name=\u0026#34;/method_2\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0]\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;tag key=\u0026#34;tag_1\u0026#34;\u0026gt;arg[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_1\u0026#34;\u0026gt;arg[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;method(test.apache.skywalking.testcase.customize.model.Model0,java.lang.String,int.class)\u0026#34; operation_name=\u0026#34;/method_3\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0].id\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0].model1.name\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0].model1.getId()\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;tag key=\u0026#34;tag_os\u0026#34;\u0026gt;arg[0].os.[1]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;arg[0].getM().[\u0026#39;k1\u0026#39;]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retString(java.lang.String)\u0026#34; operation_name=\u0026#34;/retString\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retModel0(test.apache.skywalking.apm.testcase.customize.model.Model0)\u0026#34; operation_name=\u0026#34;/retModel0\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj.model1.id\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj.model1.getId()\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;/class\u0026gt; \u0026lt;class class_name=\u0026#34;test.apache.skywalking.testcase.customize.service.TestService2\u0026#34;\u0026gt; \u0026lt;method method=\u0026#34;staticMethod(java.lang.String,int.class)\u0026#34; operation_name=\u0026#34;/is_2_static_method\u0026#34; static=\u0026#34;true\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_2_1\u0026#34;\u0026gt;arg[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_1_1\u0026#34;\u0026gt;arg[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;method([Ljava.lang.Object;)\u0026#34; operation_name=\u0026#34;/method_4\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_4_1\u0026#34;\u0026gt;arg[0].[0]\u0026lt;/tag\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;method(java.util.List,int.class)\u0026#34; operation_name=\u0026#34;/method_5\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_5_1\u0026#34;\u0026gt;arg[0].[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_5_1\u0026#34;\u0026gt;arg[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retArray([Ljava.lang.Object;)\u0026#34; operation_name=\u0026#34;/retArray\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj.[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj.[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retList(java.util.List)\u0026#34; operation_name=\u0026#34;/retList\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj.[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj.[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retMap(java.util.Map)\u0026#34; operation_name=\u0026#34;/retMap\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj.[\u0026#39;k1\u0026#39;]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj.[\u0026#39;k2\u0026#39;]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;/class\u0026gt; \u0026lt;/enhanced\u0026gt;    Explanation of the configuration in the file    configuration explanation     class_name The enhanced class   method The interceptor method of the class   operation_name If fill it out, will use it instead of the default operation_name.   operation_name_suffix What it means adding dynamic data after the operation_name.   static Is this method static.   tag Will add a tag in local span. The value of key needs to be represented on the XML node.   log Will add a log in local span. The value of key needs to be represented on the XML node.   arg[x] What it means is to get the input arguments. such as arg[0] is means get first arguments.   .[x] When the parsing object is Array or List, you can use it to get the object at the specified index.   .[\u0026lsquo;key\u0026rsquo;] When the parsing object is Map, you can get the map \u0026lsquo;key\u0026rsquo; through it.   returnedObj What it means is to get the return value.      ","title":"Support custom enhance","url":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/customize-enhance-trace/"},{"content":"Support custom enhance Here is an optional plugin apm-customize-enhance-plugin\nIntroduce SkyWalking has provided Java agent plugin development guide to help developers to build new plugin.\nThis plugin is not designed for replacement but for user convenience. The behaviour is very similar with @Trace toolkit, but without code change requirement, and more powerful, such as provide tag and log.\nHow to configure Implementing enhancements to custom classes requires two steps.\n Active the plugin, move the optional-plugins/apm-customize-enhance-plugin.jar to plugin/apm-customize-enhance-plugin.jar. Set plugin.customize.enhance_file in agent.config, which targets to rule file, such as /absolute/path/to/customize_enhance.xml. Set enhancement rules in customize_enhance.xml. \u0026lt;?xml version=\u0026#34;1.0\u0026#34; encoding=\u0026#34;UTF-8\u0026#34;?\u0026gt; \u0026lt;enhanced\u0026gt; \u0026lt;class class_name=\u0026#34;test.apache.skywalking.testcase.customize.service.TestService1\u0026#34;\u0026gt; \u0026lt;method method=\u0026#34;staticMethod()\u0026#34; operation_name=\u0026#34;/is_static_method\u0026#34; static=\u0026#34;true\u0026#34;/\u0026gt; \u0026lt;method method=\u0026#34;staticMethod(java.lang.String,int.class,java.util.Map,java.util.List,[Ljava.lang.Object;)\u0026#34; operation_name=\u0026#34;/is_static_method_args\u0026#34; static=\u0026#34;true\u0026#34;\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0]\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[1]\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[3].[0]\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;tag key=\u0026#34;tag_1\u0026#34;\u0026gt;arg[2].[\u0026#39;k1\u0026#39;]\u0026lt;/tag\u0026gt; \u0026lt;tag key=\u0026#34;tag_2\u0026#34;\u0026gt;arg[4].[1]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_1\u0026#34;\u0026gt;arg[4].[2]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;method()\u0026#34; static=\u0026#34;false\u0026#34;/\u0026gt; \u0026lt;method method=\u0026#34;method(java.lang.String,int.class)\u0026#34; operation_name=\u0026#34;/method_2\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0]\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;tag key=\u0026#34;tag_1\u0026#34;\u0026gt;arg[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_1\u0026#34;\u0026gt;arg[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;method(test.apache.skywalking.testcase.customize.model.Model0,java.lang.String,int.class)\u0026#34; operation_name=\u0026#34;/method_3\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0].id\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0].model1.name\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0].model1.getId()\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;tag key=\u0026#34;tag_os\u0026#34;\u0026gt;arg[0].os.[1]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;arg[0].getM().[\u0026#39;k1\u0026#39;]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retString(java.lang.String)\u0026#34; operation_name=\u0026#34;/retString\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retModel0(test.apache.skywalking.apm.testcase.customize.model.Model0)\u0026#34; operation_name=\u0026#34;/retModel0\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj.model1.id\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj.model1.getId()\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;/class\u0026gt; \u0026lt;class class_name=\u0026#34;test.apache.skywalking.testcase.customize.service.TestService2\u0026#34;\u0026gt; \u0026lt;method method=\u0026#34;staticMethod(java.lang.String,int.class)\u0026#34; operation_name=\u0026#34;/is_2_static_method\u0026#34; static=\u0026#34;true\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_2_1\u0026#34;\u0026gt;arg[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_1_1\u0026#34;\u0026gt;arg[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;method([Ljava.lang.Object;)\u0026#34; operation_name=\u0026#34;/method_4\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_4_1\u0026#34;\u0026gt;arg[0].[0]\u0026lt;/tag\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;method(java.util.List,int.class)\u0026#34; operation_name=\u0026#34;/method_5\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_5_1\u0026#34;\u0026gt;arg[0].[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_5_1\u0026#34;\u0026gt;arg[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retArray([Ljava.lang.Object;)\u0026#34; operation_name=\u0026#34;/retArray\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj.[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj.[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retList(java.util.List)\u0026#34; operation_name=\u0026#34;/retList\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj.[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj.[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retMap(java.util.Map)\u0026#34; operation_name=\u0026#34;/retMap\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj.[\u0026#39;k1\u0026#39;]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj.[\u0026#39;k2\u0026#39;]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;/class\u0026gt; \u0026lt;/enhanced\u0026gt;    Explanation of the configuration in the file    configuration explanation     class_name The enhanced class   method The interceptor method of the class   operation_name If fill it out, will use it instead of the default operation_name.   operation_name_suffix What it means adding dynamic data after the operation_name.   static Is this method static.   tag Will add a tag in local span. The value of key needs to be represented on the XML node.   log Will add a log in local span. The value of key needs to be represented on the XML node.   arg[x] What it means is to get the input arguments. such as arg[0] is means get first arguments.   .[x] When the parsing object is Array or List, you can use it to get the object at the specified index.   .[\u0026lsquo;key\u0026rsquo;] When the parsing object is Map, you can get the map \u0026lsquo;key\u0026rsquo; through it.   returnedObj What it means is to get the return value.      ","title":"Support custom enhance","url":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/customize-enhance-trace/"},{"content":"Support custom enhance Here is an optional plugin apm-customize-enhance-plugin\nIntroduce SkyWalking has provided Java agent plugin development guide to help developers to build new plugin.\nThis plugin is not designed for replacement but for user convenience. The behaviour is very similar with @Trace toolkit, but without code change requirement, and more powerful, such as provide tag and log.\nHow to configure Implementing enhancements to custom classes requires two steps.\n Active the plugin, move the optional-plugins/apm-customize-enhance-plugin.jar to plugin/apm-customize-enhance-plugin.jar. Set plugin.customize.enhance_file in agent.config, which targets to rule file, such as /absolute/path/to/customize_enhance.xml. Set enhancement rules in customize_enhance.xml. \u0026lt;?xml version=\u0026#34;1.0\u0026#34; encoding=\u0026#34;UTF-8\u0026#34;?\u0026gt; \u0026lt;enhanced\u0026gt; \u0026lt;class class_name=\u0026#34;test.apache.skywalking.testcase.customize.service.TestService1\u0026#34;\u0026gt; \u0026lt;method method=\u0026#34;staticMethod()\u0026#34; operation_name=\u0026#34;/is_static_method\u0026#34; static=\u0026#34;true\u0026#34;/\u0026gt; \u0026lt;method method=\u0026#34;staticMethod(java.lang.String,int.class,java.util.Map,java.util.List,[Ljava.lang.Object;)\u0026#34; operation_name=\u0026#34;/is_static_method_args\u0026#34; static=\u0026#34;true\u0026#34;\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0]\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[1]\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[3].[0]\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;tag key=\u0026#34;tag_1\u0026#34;\u0026gt;arg[2].[\u0026#39;k1\u0026#39;]\u0026lt;/tag\u0026gt; \u0026lt;tag key=\u0026#34;tag_2\u0026#34;\u0026gt;arg[4].[1]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_1\u0026#34;\u0026gt;arg[4].[2]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;method()\u0026#34; static=\u0026#34;false\u0026#34;/\u0026gt; \u0026lt;method method=\u0026#34;method(java.lang.String,int.class)\u0026#34; operation_name=\u0026#34;/method_2\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0]\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;tag key=\u0026#34;tag_1\u0026#34;\u0026gt;arg[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_1\u0026#34;\u0026gt;arg[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;method(test.apache.skywalking.testcase.customize.model.Model0,java.lang.String,int.class)\u0026#34; operation_name=\u0026#34;/method_3\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0].id\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0].model1.name\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0].model1.getId()\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;tag key=\u0026#34;tag_os\u0026#34;\u0026gt;arg[0].os.[1]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;arg[0].getM().[\u0026#39;k1\u0026#39;]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retString(java.lang.String)\u0026#34; operation_name=\u0026#34;/retString\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retModel0(test.apache.skywalking.apm.testcase.customize.model.Model0)\u0026#34; operation_name=\u0026#34;/retModel0\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj.model1.id\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj.model1.getId()\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;/class\u0026gt; \u0026lt;class class_name=\u0026#34;test.apache.skywalking.testcase.customize.service.TestService2\u0026#34;\u0026gt; \u0026lt;method method=\u0026#34;staticMethod(java.lang.String,int.class)\u0026#34; operation_name=\u0026#34;/is_2_static_method\u0026#34; static=\u0026#34;true\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_2_1\u0026#34;\u0026gt;arg[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_1_1\u0026#34;\u0026gt;arg[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;method([Ljava.lang.Object;)\u0026#34; operation_name=\u0026#34;/method_4\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_4_1\u0026#34;\u0026gt;arg[0].[0]\u0026lt;/tag\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;method(java.util.List,int.class)\u0026#34; operation_name=\u0026#34;/method_5\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_5_1\u0026#34;\u0026gt;arg[0].[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_5_1\u0026#34;\u0026gt;arg[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retArray([Ljava.lang.Object;)\u0026#34; operation_name=\u0026#34;/retArray\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj.[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj.[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retList(java.util.List)\u0026#34; operation_name=\u0026#34;/retList\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj.[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj.[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retMap(java.util.Map)\u0026#34; operation_name=\u0026#34;/retMap\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj.[\u0026#39;k1\u0026#39;]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj.[\u0026#39;k2\u0026#39;]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;/class\u0026gt; \u0026lt;/enhanced\u0026gt;    Explanation of the configuration in the file    configuration explanation     class_name The enhanced class   method The interceptor method of the class   operation_name If fill it out, will use it instead of the default operation_name.   operation_name_suffix What it means adding dynamic data after the operation_name.   static Is this method static.   tag Will add a tag in local span. The value of key needs to be represented on the XML node.   log Will add a log in local span. The value of key needs to be represented on the XML node.   arg[x] What it means is to get the input arguments. such as arg[0] is means get first arguments.   .[x] When the parsing object is Array or List, you can use it to get the object at the specified index.   .[\u0026lsquo;key\u0026rsquo;] When the parsing object is Map, you can get the map \u0026lsquo;key\u0026rsquo; through it.   returnedObj What it means is to get the return value.      ","title":"Support custom enhance","url":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/customize-enhance-trace/"},{"content":"Support custom enhance Here is an optional plugin apm-customize-enhance-plugin\nIntroduce SkyWalking has provided Java agent plugin development guide to help developers to build new plugin.\nThis plugin is not designed for replacement but for user convenience. The behaviour is very similar with @Trace toolkit, but without code change requirement, and more powerful, such as provide tag and log.\nHow to configure Implementing enhancements to custom classes requires two steps.\n Active the plugin, move the optional-plugins/apm-customize-enhance-plugin.jar to plugin/apm-customize-enhance-plugin.jar. Set plugin.customize.enhance_file in agent.config, which targets to rule file, such as /absolute/path/to/customize_enhance.xml. Set enhancement rules in customize_enhance.xml. \u0026lt;?xml version=\u0026#34;1.0\u0026#34; encoding=\u0026#34;UTF-8\u0026#34;?\u0026gt; \u0026lt;enhanced\u0026gt; \u0026lt;class class_name=\u0026#34;test.apache.skywalking.testcase.customize.service.TestService1\u0026#34;\u0026gt; \u0026lt;method method=\u0026#34;staticMethod()\u0026#34; operation_name=\u0026#34;/is_static_method\u0026#34; static=\u0026#34;true\u0026#34;/\u0026gt; \u0026lt;method method=\u0026#34;staticMethod(java.lang.String,int.class,java.util.Map,java.util.List,[Ljava.lang.Object;)\u0026#34; operation_name=\u0026#34;/is_static_method_args\u0026#34; static=\u0026#34;true\u0026#34;\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0]\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[1]\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[3].[0]\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;tag key=\u0026#34;tag_1\u0026#34;\u0026gt;arg[2].[\u0026#39;k1\u0026#39;]\u0026lt;/tag\u0026gt; \u0026lt;tag key=\u0026#34;tag_2\u0026#34;\u0026gt;arg[4].[1]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_1\u0026#34;\u0026gt;arg[4].[2]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;method()\u0026#34; static=\u0026#34;false\u0026#34;/\u0026gt; \u0026lt;method method=\u0026#34;method(java.lang.String,int.class)\u0026#34; operation_name=\u0026#34;/method_2\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0]\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;tag key=\u0026#34;tag_1\u0026#34;\u0026gt;arg[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_1\u0026#34;\u0026gt;arg[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;method(test.apache.skywalking.testcase.customize.model.Model0,java.lang.String,int.class)\u0026#34; operation_name=\u0026#34;/method_3\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0].id\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0].model1.name\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0].model1.getId()\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;tag key=\u0026#34;tag_os\u0026#34;\u0026gt;arg[0].os.[1]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;arg[0].getM().[\u0026#39;k1\u0026#39;]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retString(java.lang.String)\u0026#34; operation_name=\u0026#34;/retString\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retModel0(test.apache.skywalking.apm.testcase.customize.model.Model0)\u0026#34; operation_name=\u0026#34;/retModel0\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj.model1.id\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj.model1.getId()\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;/class\u0026gt; \u0026lt;class class_name=\u0026#34;test.apache.skywalking.testcase.customize.service.TestService2\u0026#34;\u0026gt; \u0026lt;method method=\u0026#34;staticMethod(java.lang.String,int.class)\u0026#34; operation_name=\u0026#34;/is_2_static_method\u0026#34; static=\u0026#34;true\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_2_1\u0026#34;\u0026gt;arg[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_1_1\u0026#34;\u0026gt;arg[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;method([Ljava.lang.Object;)\u0026#34; operation_name=\u0026#34;/method_4\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_4_1\u0026#34;\u0026gt;arg[0].[0]\u0026lt;/tag\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;method(java.util.List,int.class)\u0026#34; operation_name=\u0026#34;/method_5\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_5_1\u0026#34;\u0026gt;arg[0].[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_5_1\u0026#34;\u0026gt;arg[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retArray([Ljava.lang.Object;)\u0026#34; operation_name=\u0026#34;/retArray\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj.[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj.[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retList(java.util.List)\u0026#34; operation_name=\u0026#34;/retList\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj.[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj.[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retMap(java.util.Map)\u0026#34; operation_name=\u0026#34;/retMap\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj.[\u0026#39;k1\u0026#39;]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj.[\u0026#39;k2\u0026#39;]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;/class\u0026gt; \u0026lt;/enhanced\u0026gt;    Explanation of the configuration in the file    configuration explanation     class_name The enhanced class   method The interceptor method of the class   operation_name If fill it out, will use it instead of the default operation_name.   operation_name_suffix What it means adding dynamic data after the operation_name.   static Is this method static.   tag Will add a tag in local span. The value of key needs to be represented on the XML node.   log Will add a log in local span. The value of key needs to be represented on the XML node.   arg[x] What it means is to get the input arguments. such as arg[0] is means get first arguments.   .[x] When the parsing object is Array or List, you can use it to get the object at the specified index.   .[\u0026lsquo;key\u0026rsquo;] When the parsing object is Map, you can get the map \u0026lsquo;key\u0026rsquo; through it.   returnedObj What it means is to get the return value.      ","title":"Support custom enhance","url":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/customize-enhance-trace/"},{"content":"Support custom enhance Here is an optional plugin apm-customize-enhance-plugin\nIntroduce SkyWalking has provided Java agent plugin development guide to help developers to build new plugin.\nThis plugin is not designed for replacement but for user convenience. The behaviour is very similar with @Trace toolkit, but without code change requirement, and more powerful, such as provide tag and log.\nHow to configure Implementing enhancements to custom classes requires two steps.\n Active the plugin, move the optional-plugins/apm-customize-enhance-plugin.jar to plugin/apm-customize-enhance-plugin.jar. Set plugin.customize.enhance_file in agent.config, which targets to rule file, such as /absolute/path/to/customize_enhance.xml. Set enhancement rules in customize_enhance.xml. \u0026lt;?xml version=\u0026#34;1.0\u0026#34; encoding=\u0026#34;UTF-8\u0026#34;?\u0026gt; \u0026lt;enhanced\u0026gt; \u0026lt;class class_name=\u0026#34;test.apache.skywalking.testcase.customize.service.TestService1\u0026#34;\u0026gt; \u0026lt;method method=\u0026#34;staticMethod()\u0026#34; operation_name=\u0026#34;/is_static_method\u0026#34; static=\u0026#34;true\u0026#34;/\u0026gt; \u0026lt;method method=\u0026#34;staticMethod(java.lang.String,int.class,java.util.Map,java.util.List,[Ljava.lang.Object;)\u0026#34; operation_name=\u0026#34;/is_static_method_args\u0026#34; static=\u0026#34;true\u0026#34;\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0]\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[1]\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[3].[0]\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;tag key=\u0026#34;tag_1\u0026#34;\u0026gt;arg[2].[\u0026#39;k1\u0026#39;]\u0026lt;/tag\u0026gt; \u0026lt;tag key=\u0026#34;tag_2\u0026#34;\u0026gt;arg[4].[1]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_1\u0026#34;\u0026gt;arg[4].[2]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;method()\u0026#34; static=\u0026#34;false\u0026#34;/\u0026gt; \u0026lt;method method=\u0026#34;method(java.lang.String,int.class)\u0026#34; operation_name=\u0026#34;/method_2\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0]\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;tag key=\u0026#34;tag_1\u0026#34;\u0026gt;arg[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_1\u0026#34;\u0026gt;arg[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;method(test.apache.skywalking.testcase.customize.model.Model0,java.lang.String,int.class)\u0026#34; operation_name=\u0026#34;/method_3\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0].id\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0].model1.name\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0].model1.getId()\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;tag key=\u0026#34;tag_os\u0026#34;\u0026gt;arg[0].os.[1]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;arg[0].getM().[\u0026#39;k1\u0026#39;]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retString(java.lang.String)\u0026#34; operation_name=\u0026#34;/retString\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retModel0(test.apache.skywalking.apm.testcase.customize.model.Model0)\u0026#34; operation_name=\u0026#34;/retModel0\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj.model1.id\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj.model1.getId()\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;/class\u0026gt; \u0026lt;class class_name=\u0026#34;test.apache.skywalking.testcase.customize.service.TestService2\u0026#34;\u0026gt; \u0026lt;method method=\u0026#34;staticMethod(java.lang.String,int.class)\u0026#34; operation_name=\u0026#34;/is_2_static_method\u0026#34; static=\u0026#34;true\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_2_1\u0026#34;\u0026gt;arg[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_1_1\u0026#34;\u0026gt;arg[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;method([Ljava.lang.Object;)\u0026#34; operation_name=\u0026#34;/method_4\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_4_1\u0026#34;\u0026gt;arg[0].[0]\u0026lt;/tag\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;method(java.util.List,int.class)\u0026#34; operation_name=\u0026#34;/method_5\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_5_1\u0026#34;\u0026gt;arg[0].[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_5_1\u0026#34;\u0026gt;arg[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retArray([Ljava.lang.Object;)\u0026#34; operation_name=\u0026#34;/retArray\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj.[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj.[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retList(java.util.List)\u0026#34; operation_name=\u0026#34;/retList\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj.[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj.[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retMap(java.util.Map)\u0026#34; operation_name=\u0026#34;/retMap\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj.[\u0026#39;k1\u0026#39;]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj.[\u0026#39;k2\u0026#39;]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;/class\u0026gt; \u0026lt;/enhanced\u0026gt;    Explanation of the configuration in the file    configuration explanation     class_name The enhanced class   method The interceptor method of the class   operation_name If fill it out, will use it instead of the default operation_name.   operation_name_suffix What it means adding dynamic data after the operation_name.   static Is this method static.   tag Will add a tag in local span. The value of key needs to be represented on the XML node.   log Will add a log in local span. The value of key needs to be represented on the XML node.   arg[x] What it means is to get the input arguments. such as arg[0] is means get first arguments.   .[x] When the parsing object is Array or List, you can use it to get the object at the specified index.   .[\u0026lsquo;key\u0026rsquo;] When the parsing object is Map, you can get the map \u0026lsquo;key\u0026rsquo; through it.   returnedObj What it means is to get the return value.      ","title":"Support custom enhance","url":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/customize-enhance-trace/"},{"content":"Support custom trace ignore Here is an optional plugin apm-trace-ignore-plugin\nNotice: Sampling still works when the trace ignores plug-in activation.\nIntroduce  The purpose of this plugin is to filter endpoint which are expected to be ignored by the tracing system. You can setup multiple URL path patterns, The endpoints match these patterns wouldn\u0026rsquo;t be traced. The current matching rules follow Ant Path match style , like /path/*, /path/**, /path/?. Copy apm-trace-ignore-plugin-x.jar to agent/plugins, restarting the agent can effect the plugin.  How to configure There are two ways to configure ignore patterns. Settings through system env has higher priority.\n Set through the system environment variable,you need to add skywalking.trace.ignore_path to the system variables, the value is the path that you need to ignore, multiple paths should be separated by , Create file named as apm-trace-ignore-plugin.config in /agent/config/ dir, and add rules to filter traces  trace.ignore_path=/your/path/1/**,/your/path/2/** ","title":"Support custom trace ignore","url":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/agent-optional-plugins/trace-ignore-plugin/"},{"content":"Support custom trace ignore Here is an optional plugin apm-trace-ignore-plugin\nNotice: Sampling still works when the trace ignores plug-in activation.\nIntroduce  The purpose of this plugin is to filter endpoint which are expected to be ignored by the tracing system. You can setup multiple URL path patterns, The endpoints match these patterns wouldn\u0026rsquo;t be traced. The current matching rules follow Ant Path match style , like /path/*, /path/**, /path/?. Copy apm-trace-ignore-plugin-x.jar to agent/plugins, restarting the agent can effect the plugin.  How to configure There are two ways to configure ignore patterns. Settings through system env has higher priority.\n Set through the system environment variable,you need to add skywalking.trace.ignore_path to the system variables, the value is the path that you need to ignore, multiple paths should be separated by , Create file named as apm-trace-ignore-plugin.config in /agent/config/ dir, and add rules to filter traces  trace.ignore_path=/your/path/1/**,/your/path/2/** ","title":"Support custom trace ignore","url":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/agent-optional-plugins/trace-ignore-plugin/"},{"content":"Support custom trace ignore Here is an optional plugin apm-trace-ignore-plugin\nNotice: Sampling still works when the trace ignores plug-in activation.\nIntroduce  The purpose of this plugin is to filter endpoint which are expected to be ignored by the tracing system. You can setup multiple URL path patterns, The endpoints match these patterns wouldn\u0026rsquo;t be traced. The current matching rules follow Ant Path match style , like /path/*, /path/**, /path/?. Copy apm-trace-ignore-plugin-x.jar to agent/plugins, restarting the agent can effect the plugin.  How to configure There are two ways to configure ignore patterns. Settings through system env has higher priority.\n Set through the system environment variable,you need to add skywalking.trace.ignore_path to the system variables, the value is the path that you need to ignore, multiple paths should be separated by , Create file named as apm-trace-ignore-plugin.config in /agent/config/ dir, and add rules to filter traces  trace.ignore_path=/your/path/1/**,/your/path/2/** ","title":"Support custom trace ignore","url":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/agent-optional-plugins/trace-ignore-plugin/"},{"content":"Support custom trace ignore Here is an optional plugin apm-trace-ignore-plugin\nNotice: Sampling still works when the trace ignores plug-in activation.\nIntroduce  The purpose of this plugin is to filter endpoint which are expected to be ignored by the tracing system. You can setup multiple URL path patterns, The endpoints match these patterns wouldn\u0026rsquo;t be traced. The current matching rules follow Ant Path match style , like /path/*, /path/**, /path/?. Copy apm-trace-ignore-plugin-x.jar to agent/plugins, restarting the agent can effect the plugin.  How to configure There are two ways to configure ignore patterns. Settings through system env has higher priority.\n Set through the system environment variable,you need to add skywalking.trace.ignore_path to the system variables, the value is the path that you need to ignore, multiple paths should be separated by , Create file named as apm-trace-ignore-plugin.config in /agent/config/ dir, and add rules to filter traces  trace.ignore_path=/your/path/1/**,/your/path/2/** ","title":"Support custom trace ignore","url":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/agent-optional-plugins/trace-ignore-plugin/"},{"content":"Support custom trace ignore Here is an optional plugin apm-trace-ignore-plugin\nNotice: Sampling still works when the trace ignores plug-in activation.\nIntroduce  The purpose of this plugin is to filter endpoint which are expected to be ignored by the tracing system. You can setup multiple URL path patterns, The endpoints match these patterns wouldn\u0026rsquo;t be traced. The current matching rules follow Ant Path match style , like /path/*, /path/**, /path/?. Copy apm-trace-ignore-plugin-x.jar to agent/plugins, restarting the agent can effect the plugin.  How to configure There are two ways to configure ignore patterns. Settings through system env has higher priority.\n Set through the system environment variable,you need to add skywalking.trace.ignore_path to the system variables, the value is the path that you need to ignore, multiple paths should be separated by , Create file named as apm-trace-ignore-plugin.config in /agent/config/ dir, and add rules to filter traces  trace.ignore_path=/your/path/1/**,/your/path/2/** ","title":"Support custom trace ignore","url":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/agent-optional-plugins/trace-ignore-plugin/"},{"content":"Support RocketMQ Monitoring Motivation RocketMQ is a cloud native messaging and streaming platform, making it simple to build event-driven applications. Now that Skywalking can monitor OpenTelemetry metrics, I want to add RocketMQ monitoring via the OpenTelemetry Collector, which fetches metrics from the RocketMQ Exporter\nArchitecture Graph There is no significant architecture-level change.\nProposed Changes rocketmq-exporter collects metrics from RocketMQ and transport the data to OpenTelemetry collector, using SkyWalking openTelemetry receiver to receive these metrics。 Provide cluster, broker, and topic dimensions monitoring.\nRocketMQ Cluster Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Messages Produced Today Count meter_rocketmq_cluster_messages_produced_today The number of cluster messages produced today. RocketMQ Exporter   Messages Consumed Today Count meter_rocketmq_cluster_messages_consumed_today The number of cluster messages consumed today. RocketMQ Exporter   Total Producer Tps Msg/sec meter_rocketmq_cluster_total_producer_tps The number of messages produced per second. RocketMQ Exporter   Total Consume Tps Msg/sec meter_rocketmq_cluster_total_consumer_tps The number of messages consumed per second. RocketMQ Exporter   Producer Message Size Bytes/sec meter_rocketmq_cluster_producer_message_size The max size of a message produced per second. RocketMQ Exporter   Consumer Message Size Bytes/sec meter_rocketmq_cluster_consumer_message_size The max size of the consumed message per second. RocketMQ Exporter   Messages Produced Until Yesterday Count meter_rocketmq_cluster_messages_produced_until_yesterday The total number of messages put until 12 o\u0026rsquo;clock last night. RocketMQ Exporter   Messages Consumed Until Yesterday Count meter_rocketmq_cluster_messages_consumed_until_yesterday The total number of messages read until 12 o\u0026rsquo;clock last night. RocketMQ Exporter   Max Consumer Latency ms meter_rocketmq_cluster_max_consumer_latency The max number of consumer latency. RocketMQ Exporter   Max CommitLog Disk Ratio % meter_rocketmq_cluster_max_commitLog_disk_ratio The max utilization ratio of the commit log disk. RocketMQ Exporter   CommitLog Disk Ratio % meter_rocketmq_cluster_commitLog_disk_ratio The utilization ratio of the commit log disk per broker IP. RocketMQ Exporter   Pull ThreadPool Queue Head Wait Time ms meter_rocketmq_cluster_pull_threadPool_queue_head_wait_time The wait time in milliseconds for pulling threadPool queue per broker IP. RocketMQ Exporter   Send ThreadPool Queue Head Wait Time ms meter_rocketmq_cluster_send_threadPool_queue_head_wait_time The wait time in milliseconds for sending threadPool queue per broker IP. RocketMQ Exporter   Topic Count Count meter_rocketmq_cluster_topic_count The number of topics that received messages from the producer. RocketMQ Exporter   Broker Count Count meter_rocketmq_cluster_broker_count The number of brokers that received messages from the producer. RocketMQ Exporter    RocketMQ Broker Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Produce TPS Msg/sec meter_rocketmq_broker_produce_tps The number of broker produces messages per second. RocketMQ Exporter   Consume QPS Msg/sec meter_rocketmq_broker_consume_qps The number of broker consumes messages per second. RocketMQ Exporter   Producer Message Size Bytes/sec meter_rocketmq_broker_producer_message_size The max size of the messages produced per second. RocketMQ Exporter   Consumer Message Size Bytes/sec meter_rocketmq_broker_consumer_message_size The max size of the messages consumed per second. RocketMQ Exporter    RocketMQ Topic Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Max Producer Message Size Byte meter_rocketmq_topic_max_producer_message_size The maximum number of messages produced. RocketMQ Exporter   Max Consumer Message Size Byte meter_rocketmq_topic_max_consumer_message_size The maximum number of messages consumed. RocketMQ Exporter   Consumer Latency ms meter_rocketmq_topic_consumer_latency Consumption delay time of a consumer group. RocketMQ Exporter   Producer Tps Msg/sec meter_rocketmq_topic_producer_tps The number of messages produced per second. RocketMQ Exporter   Consumer Group Tps Msg/sec meter_rocketmq_topic_consumer_group_tps The number of messages consumed per second per consumer group. RocketMQ Exporter   Producer Offset Count meter_rocketmq_topic_producer_offset The max progress of a topic\u0026rsquo;s production message. RocketMQ Exporter   Consumer Group Offset Count meter_rocketmq_topic_consumer_group_offset The max progress of a topic\u0026rsquo;s consumption message per consumer group. RocketMQ Exporter   Producer Message Size Byte/sec meter_rocketmq_topic_producer_message_size The max size of messages produced per second. RocketMQ Exporter   Consumer Message Size Byte/sec meter_rocketmq_topic_consumer_message_size The max size of messages consumed per second. RocketMQ Exporter   Consumer Group_Count Count meter_rocketmq_topic_consumer_group_count The number of consumer groups. RocketMQ Exporter   Broker Count Count meter_rocketmq_topic_broker_count The number of topics that received messages from the producer. RocketMQ Exporter    Imported Dependencies libs and their licenses. No new dependency.\nCompatibility no breaking changes.\nGeneral usage docs This feature is out of the box.\n","title":"Support RocketMQ Monitoring","url":"/docs/main/next/en/swip/swip-3/"},{"content":"Support Transport Layer Security (TLS) Transport Layer Security (TLS) is a very common security way when transport data through Internet. In some use cases, end users report the background:\nCreating SSL/TLS Certificates The first step is to generate certificates and key files for encrypting communication. This is fairly straightforward: use openssl from the command line.\nUse this script if you are not familiar with how to generate key files.\nWe need the following files:\n client.pem: A private RSA key to sign and authenticate the public key. It\u0026rsquo;s either a PKCS#8(PEM) or PKCS#1(DER). client.crt: Self-signed X.509 public keys for distribution. ca.crt: A certificate authority public key for a client to validate the server\u0026rsquo;s certificate.  Authentication Mode  Find ca.crt, and use it at client side. In mTLS mode, client.crt and client.pem are required at client side. Find server.crt, server.pem and ca.crt. Use them at server side. Please refer to gRPC Security of the OAP server doc for more details.  Enable TLS  Enable (m)TLS on the OAP server side, read more on this documentation. Following the configuration to enable (m)TLS on the agent side.     Name Environment Variable Required Type Description     reporter.grpc.tls.enable SW_AGENT_REPORTER_GRPC_TLS_ENABLE TLS/mTLS Enable (m)TLS on the gRPC reporter.   reporter.grpc.tls.ca_path SW_AGENT_REPORTER_GRPC_TLS_CA_PATH TLS The path of the CA certificate file. eg: /path/to/ca.cert.   reporter.grpc.tls.client.key_path SW_AGENT_REPORTER_GRPC_TLS_CLIENT_KEY_PATH mTLS The path of the client private key file, eg: /path/to/client.pem.   reporter.grpc.tls.client.client_cert_chain_path SW_AGENT_REPORTER_GRPC_TLS_CLIENT_CERT_CHAIN_PATH mTLS The path of the client certificate file, eg: /path/to/client.crt.   reporter.grpc.tls.insecure_skip_verify SW_AGENT_REPORTER_GRPC_TLS_INSECURE_SKIP_VERIFY TLS/mTLS Skip the server certificate and domain name verification.    ","title":"Support Transport Layer Security (TLS)","url":"/docs/skywalking-go/latest/en/advanced-features/grpc-tls/"},{"content":"Support Transport Layer Security (TLS) Transport Layer Security (TLS) is a very common security way when transport data through Internet. In some use cases, end users report the background:\nCreating SSL/TLS Certificates The first step is to generate certificates and key files for encrypting communication. This is fairly straightforward: use openssl from the command line.\nUse this script if you are not familiar with how to generate key files.\nWe need the following files:\n client.pem: A private RSA key to sign and authenticate the public key. It\u0026rsquo;s either a PKCS#8(PEM) or PKCS#1(DER). client.crt: Self-signed X.509 public keys for distribution. ca.crt: A certificate authority public key for a client to validate the server\u0026rsquo;s certificate.  Authentication Mode  Find ca.crt, and use it at client side. In mTLS mode, client.crt and client.pem are required at client side. Find server.crt, server.pem and ca.crt. Use them at server side. Please refer to gRPC Security of the OAP server doc for more details.  Enable TLS  Enable (m)TLS on the OAP server side, read more on this documentation. Following the configuration to enable (m)TLS on the agent side.     Name Environment Variable Required Type Description     reporter.grpc.tls.enable SW_AGENT_REPORTER_GRPC_TLS_ENABLE TLS/mTLS Enable (m)TLS on the gRPC reporter.   reporter.grpc.tls.ca_path SW_AGENT_REPORTER_GRPC_TLS_CA_PATH TLS The path of the CA certificate file. eg: /path/to/ca.cert.   reporter.grpc.tls.client.key_path SW_AGENT_REPORTER_GRPC_TLS_CLIENT_KEY_PATH mTLS The path of the client private key file, eg: /path/to/client.pem.   reporter.grpc.tls.client.client_cert_chain_path SW_AGENT_REPORTER_GRPC_TLS_CLIENT_CERT_CHAIN_PATH mTLS The path of the client certificate file, eg: /path/to/client.crt.   reporter.grpc.tls.insecure_skip_verify SW_AGENT_REPORTER_GRPC_TLS_INSECURE_SKIP_VERIFY TLS/mTLS Skip the server certificate and domain name verification.    ","title":"Support Transport Layer Security (TLS)","url":"/docs/skywalking-go/next/en/advanced-features/grpc-tls/"},{"content":"Support Transport Layer Security (TLS) Transport Layer Security (TLS) is a very common security way when transport data through Internet. In some use cases, end users report the background:\nCreating SSL/TLS Certificates The first step is to generate certificates and key files for encrypting communication. This is fairly straightforward: use openssl from the command line.\nUse this script if you are not familiar with how to generate key files.\nWe need the following files:\n client.pem: A private RSA key to sign and authenticate the public key. It\u0026rsquo;s either a PKCS#8(PEM) or PKCS#1(DER). client.crt: Self-signed X.509 public keys for distribution. ca.crt: A certificate authority public key for a client to validate the server\u0026rsquo;s certificate.  Authentication Mode  Find ca.crt, and use it at client side. In mTLS mode, client.crt and client.pem are required at client side. Find server.crt, server.pem and ca.crt. Use them at server side. Please refer to gRPC Security of the OAP server doc for more details.  Enable TLS  Enable (m)TLS on the OAP server side, read more on this documentation. Following the configuration to enable (m)TLS on the agent side.     Name Environment Variable Required Type Description     reporter.grpc.tls.enable SW_AGENT_REPORTER_GRPC_TLS_ENABLE TLS/mTLS Enable (m)TLS on the gRPC reporter.   reporter.grpc.tls.ca_path SW_AGENT_REPORTER_GRPC_TLS_CA_PATH TLS The path of the CA certificate file. eg: /path/to/ca.cert.   reporter.grpc.tls.client.key_path SW_AGENT_REPORTER_GRPC_TLS_CLIENT_KEY_PATH mTLS The path of the client private key file, eg: /path/to/client.pem.   reporter.grpc.tls.client.client_cert_chain_path SW_AGENT_REPORTER_GRPC_TLS_CLIENT_CERT_CHAIN_PATH mTLS The path of the client certificate file, eg: /path/to/client.crt.   reporter.grpc.tls.insecure_skip_verify SW_AGENT_REPORTER_GRPC_TLS_INSECURE_SKIP_VERIFY TLS/mTLS Skip the server certificate and domain name verification.    ","title":"Support Transport Layer Security (TLS)","url":"/docs/skywalking-go/v0.4.0/en/advanced-features/grpc-tls/"},{"content":"Support Transport Layer Security (TLS) Transport Layer Security (TLS) is a very common security way when transport data through Internet. In some use cases, end users report the background:\n Target(under monitoring) applications are in a region, which also named VPC, at the same time, the SkyWalking backend is in another region (VPC).\nBecause of that, security requirement is very obvious.\n Creating SSL/TLS Certificates The first step is to generate certificates and key files for encrypting communication. This is fairly straightforward: use openssl from the command line.\nUse this script if you are not familiar with how to generate key files.\nWe need the following files:\n client.pem: A private RSA key to sign and authenticate the public key. It\u0026rsquo;s either a PKCS#8(PEM) or PKCS#1(DER). client.crt: Self-signed X.509 public keys for distribution. ca.crt: A certificate authority public key for a client to validate the server\u0026rsquo;s certificate.  Authentication Mode  Find ca.crt, and use it at client side. In mTLS mode, client.crt and client.pem are required at client side. Find server.crt, server.pem and ca.crt. Use them at server side. Please refer to gRPC Security of the OAP server doc for more details.  Open and config TLS Agent config  Agent enables TLS automatically after the ca.crt(by default /ca folder in agent package) file is detected. TLS with no CA mode could be activated by this setting.  agent.force_tls=${SW_AGENT_FORCE_TLS:true} Enable mutual TLS  Sharing gRPC server must be started with mTLS enabled. More details can be found in receiver-sharing-server section in application.yaml. Please refer to gRPC Security and gRPC/HTTP server for receiver. Copy CA certificate, certificate and private key of client into agent/ca. Configure client-side SSL/TLS in agent.conf. Change SW_AGENT_COLLECTOR_BACKEND_SERVICES targeting to host and port of receiver-sharing-server.  For example:\nagent.force_tls=${SW_AGENT_FORCE_TLS:true} agent.ssl_trusted_ca_path=${SW_AGENT_SSL_TRUSTED_CA_PATH:/ca/ca.crt} agent.ssl_key_path=${SW_AGENT_SSL_KEY_PATH:/ca/client.pem} agent.ssl_cert_chain_path=${SW_AGENT_SSL_CERT_CHAIN_PATH:/ca/client.crt} collector.backend_service=${SW_AGENT_COLLECTOR_BACKEND_SERVICES:skywalking-oap:11801} Notice, the client-side\u0026rsquo;s certificate and the private key are from the same CA certificate with server-side.\n","title":"Support Transport Layer Security (TLS)","url":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/tls/"},{"content":"Support Transport Layer Security (TLS) Transport Layer Security (TLS) is a very common security way when transport data through Internet. In some use cases, end users report the background:\n Target(under monitoring) applications are in a region, which also named VPC, at the same time, the SkyWalking backend is in another region (VPC).\nBecause of that, security requirement is very obvious.\n Creating SSL/TLS Certificates The first step is to generate certificates and key files for encrypting communication. This is fairly straightforward: use openssl from the command line.\nUse this script if you are not familiar with how to generate key files.\nWe need the following files:\n client.pem: A private RSA key to sign and authenticate the public key. It\u0026rsquo;s either a PKCS#8(PEM) or PKCS#1(DER). client.crt: Self-signed X.509 public keys for distribution. ca.crt: A certificate authority public key for a client to validate the server\u0026rsquo;s certificate.  Authentication Mode  Find ca.crt, and use it at client side. In mTLS mode, client.crt and client.pem are required at client side. Find server.crt, server.pem and ca.crt. Use them at server side. Please refer to gRPC Security of the OAP server doc for more details.  Open and config TLS Agent config  Agent enables TLS automatically after the ca.crt(by default /ca folder in agent package) file is detected. TLS with no CA mode could be activated by this setting.  agent.force_tls=${SW_AGENT_FORCE_TLS:true} Enable mutual TLS  Sharing gRPC server must be started with mTLS enabled. More details can be found in receiver-sharing-server section in application.yaml. Please refer to gRPC Security and gRPC/HTTP server for receiver. Copy CA certificate, certificate and private key of client into agent/ca. Configure client-side SSL/TLS in agent.conf. Change SW_AGENT_COLLECTOR_BACKEND_SERVICES targeting to host and port of receiver-sharing-server.  For example:\nagent.force_tls=${SW_AGENT_FORCE_TLS:true} agent.ssl_trusted_ca_path=${SW_AGENT_SSL_TRUSTED_CA_PATH:/ca/ca.crt} agent.ssl_key_path=${SW_AGENT_SSL_KEY_PATH:/ca/client.pem} agent.ssl_cert_chain_path=${SW_AGENT_SSL_CERT_CHAIN_PATH:/ca/client.crt} collector.backend_service=${SW_AGENT_COLLECTOR_BACKEND_SERVICES:skywalking-oap:11801} Notice, the client-side\u0026rsquo;s certificate and the private key are from the same CA certificate with server-side.\n","title":"Support Transport Layer Security (TLS)","url":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/tls/"},{"content":"Support Transport Layer Security (TLS) Transport Layer Security (TLS) is a very common security way when transport data through Internet. In some use cases, end users report the background:\n Target(under monitoring) applications are in a region, which also named VPC, at the same time, the SkyWalking backend is in another region (VPC).\nBecause of that, security requirement is very obvious.\n Creating SSL/TLS Certificates The first step is to generate certificates and key files for encrypting communication. This is fairly straightforward: use openssl from the command line.\nUse this script if you are not familiar with how to generate key files.\nWe need the following files:\n client.pem: A private RSA key to sign and authenticate the public key. It\u0026rsquo;s either a PKCS#8(PEM) or PKCS#1(DER). client.crt: Self-signed X.509 public keys for distribution. ca.crt: A certificate authority public key for a client to validate the server\u0026rsquo;s certificate.  Authentication Mode  Find ca.crt, and use it at client side. In mTLS mode, client.crt and client.pem are required at client side. Find server.crt, server.pem and ca.crt. Use them at server side. Please refer to gRPC Security of the OAP server doc for more details.  Open and config TLS Agent config  Agent enables TLS automatically after the ca.crt(by default /ca folder in agent package) file is detected. TLS with no CA mode could be activated by this setting.  agent.force_tls=${SW_AGENT_FORCE_TLS:true} Enable mutual TLS  Sharing gRPC server must be started with mTLS enabled. More details can be found in receiver-sharing-server section in application.yaml. Please refer to gRPC Security and gRPC/HTTP server for receiver. Copy CA certificate, certificate and private key of client into agent/ca. Configure client-side SSL/TLS in agent.conf. Change SW_AGENT_COLLECTOR_BACKEND_SERVICES targeting to host and port of receiver-sharing-server.  For example:\nagent.force_tls=${SW_AGENT_FORCE_TLS:true} agent.ssl_trusted_ca_path=${SW_AGENT_SSL_TRUSTED_CA_PATH:/ca/ca.crt} agent.ssl_key_path=${SW_AGENT_SSL_KEY_PATH:/ca/client.pem} agent.ssl_cert_chain_path=${SW_AGENT_SSL_CERT_CHAIN_PATH:/ca/client.crt} collector.backend_service=${SW_AGENT_COLLECTOR_BACKEND_SERVICES:skywalking-oap:11801} Notice, the client-side\u0026rsquo;s certificate and the private key are from the same CA certificate with server-side.\n","title":"Support Transport Layer Security (TLS)","url":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/tls/"},{"content":"Support Transport Layer Security (TLS) Transport Layer Security (TLS) is a very common security way when transport data through Internet. In some use cases, end users report the background:\n Target(under monitoring) applications are in a region, which also named VPC, at the same time, the SkyWalking backend is in another region (VPC).\nBecause of that, security requirement is very obvious.\n Creating SSL/TLS Certificates The first step is to generate certificates and key files for encrypting communication. This is fairly straightforward: use openssl from the command line.\nUse this script if you are not familiar with how to generate key files.\nWe need the following files:\n client.pem: A private RSA key to sign and authenticate the public key. It\u0026rsquo;s either a PKCS#8(PEM) or PKCS#1(DER). client.crt: Self-signed X.509 public keys for distribution. ca.crt: A certificate authority public key for a client to validate the server\u0026rsquo;s certificate.  Authentication Mode  Find ca.crt, and use it at client side. In mTLS mode, client.crt and client.pem are required at client side. Find server.crt, server.pem and ca.crt. Use them at server side. Please refer to gRPC Security of the OAP server doc for more details.  Open and config TLS Agent config  Agent enables TLS automatically after the ca.crt(by default /ca folder in agent package) file is detected. TLS with no CA mode could be activated by this setting.  agent.force_tls=${SW_AGENT_FORCE_TLS:true} Enable mutual TLS  Sharing gRPC server must be started with mTLS enabled. More details can be found in receiver-sharing-server section in application.yaml. Please refer to gRPC Security and gRPC/HTTP server for receiver. Copy CA certificate, certificate and private key of client into agent/ca. Configure client-side SSL/TLS in agent.conf. Change SW_AGENT_COLLECTOR_BACKEND_SERVICES targeting to host and port of receiver-sharing-server.  For example:\nagent.force_tls=${SW_AGENT_FORCE_TLS:true} agent.ssl_trusted_ca_path=${SW_AGENT_SSL_TRUSTED_CA_PATH:/ca/ca.crt} agent.ssl_key_path=${SW_AGENT_SSL_KEY_PATH:/ca/client.pem} agent.ssl_cert_chain_path=${SW_AGENT_SSL_CERT_CHAIN_PATH:/ca/client.crt} collector.backend_service=${SW_AGENT_COLLECTOR_BACKEND_SERVICES:skywalking-oap:11801} Notice, the client-side\u0026rsquo;s certificate and the private key are from the same CA certificate with server-side.\n","title":"Support Transport Layer Security (TLS)","url":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/tls/"},{"content":"Support Transport Layer Security (TLS) Transport Layer Security (TLS) is a very common security way when transport data through Internet. In some use cases, end users report the background:\n Target(under monitoring) applications are in a region, which also named VPC, at the same time, the SkyWalking backend is in another region (VPC).\nBecause of that, security requirement is very obvious.\n Creating SSL/TLS Certificates The first step is to generate certificates and key files for encrypting communication. This is fairly straightforward: use openssl from the command line.\nUse this script if you are not familiar with how to generate key files.\nWe need the following files:\n client.pem: A private RSA key to sign and authenticate the public key. It\u0026rsquo;s either a PKCS#8(PEM) or PKCS#1(DER). client.crt: Self-signed X.509 public keys for distribution. ca.crt: A certificate authority public key for a client to validate the server\u0026rsquo;s certificate.  Authentication Mode  Find ca.crt, and use it at client side. In mTLS mode, client.crt and client.pem are required at client side. Find server.crt, server.pem and ca.crt. Use them at server side. Please refer to gRPC Security of the OAP server doc for more details.  Open and config TLS Agent config  Agent enables TLS automatically after the ca.crt(by default /ca folder in agent package) file is detected. TLS with no CA mode could be activated by this setting.  agent.force_tls=${SW_AGENT_FORCE_TLS:true} Enable mutual TLS  Sharing gRPC server must be started with mTLS enabled. More details can be found in receiver-sharing-server section in application.yaml. Please refer to gRPC Security and gRPC/HTTP server for receiver. Copy CA certificate, certificate and private key of client into agent/ca. Configure client-side SSL/TLS in agent.conf. Change SW_AGENT_COLLECTOR_BACKEND_SERVICES targeting to host and port of receiver-sharing-server.  For example:\nagent.force_tls=${SW_AGENT_FORCE_TLS:true} agent.ssl_trusted_ca_path=${SW_AGENT_SSL_TRUSTED_CA_PATH:/ca/ca.crt} agent.ssl_key_path=${SW_AGENT_SSL_KEY_PATH:/ca/client.pem} agent.ssl_cert_chain_path=${SW_AGENT_SSL_CERT_CHAIN_PATH:/ca/client.crt} collector.backend_service=${SW_AGENT_COLLECTOR_BACKEND_SERVICES:skywalking-oap:11801} Notice, the client-side\u0026rsquo;s certificate and the private key are from the same CA certificate with server-side.\n","title":"Support Transport Layer Security (TLS)","url":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/tls/"},{"content":"Supported Agent Configuration Options Below is the full list of supported configurations you can set to customize the agent behavior, please take some time to read the descriptions for what they can achieve.\n Usage: (Pass in intrusive setup)\n from skywalking import config, agent config.init(YourConfiguration=YourValue)) agent.start()  Usage: (Pass by environment variables)\n export SW_AGENT_YourConfiguration=YourValue Agent Core Configuration Options    Configuration Environment Variable Type Default Value Description     agent_collector_backend_services SW_AGENT_COLLECTOR_BACKEND_SERVICES \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; oap_host:oap_port The backend OAP server address, 11800 is default OAP gRPC port, 12800 is HTTP, Kafka ignores this option and uses kafka_bootstrap_servers option. This option should be changed accordingly with selected protocol   agent_protocol SW_AGENT_PROTOCOL \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; grpc The protocol to communicate with the backend OAP, http, grpc or kafka, we highly suggest using grpc in production as it\u0026rsquo;s well optimized than http. The kafka protocol provides an alternative way to submit data to the backend.   agent_name SW_AGENT_NAME \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; Python Service Name The name of your awesome Python service   agent_instance_name SW_AGENT_INSTANCE_NAME \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; str(uuid.uuid1()).replace('-', \u0026lsquo;') The name of this particular awesome Python service instance   agent_namespace SW_AGENT_NAMESPACE \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt;  The agent namespace of the Python service (available as tag and the suffix of service name)   kafka_bootstrap_servers SW_KAFKA_BOOTSTRAP_SERVERS \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; localhost:9092 A list of host/port pairs to use for establishing the initial connection to your Kafka cluster. It is in the form of host1:port1,host2:port2,\u0026hellip; (used for Kafka reporter protocol)   kafka_namespace SW_KAFKA_NAMESPACE \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt;  The kafka namespace specified by OAP side SW_NAMESPACE, prepends the following kafka topic names with a -.   kafka_topic_management SW_KAFKA_TOPIC_MANAGEMENT \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; skywalking-managements Specifying Kafka topic name for service instance reporting and registering, this should be in sync with OAP   kafka_topic_segment SW_KAFKA_TOPIC_SEGMENT \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; skywalking-segments Specifying Kafka topic name for Tracing data, this should be in sync with OAP   kafka_topic_log SW_KAFKA_TOPIC_LOG \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; skywalking-logs Specifying Kafka topic name for Log data, this should be in sync with OAP   kafka_topic_meter SW_KAFKA_TOPIC_METER \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; skywalking-meters Specifying Kafka topic name for Meter data, this should be in sync with OAP   kafka_reporter_custom_configurations SW_KAFKA_REPORTER_CUSTOM_CONFIGURATIONS \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt;  The configs to init KafkaProducer, supports the basic arguments (whose type is either str, bool, or int) listed here This config only works from env variables, each one should be passed in SW_KAFKA_REPORTER_CONFIG_\u0026lt;KEY_NAME\u0026gt;   agent_force_tls SW_AGENT_FORCE_TLS \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False Use TLS for communication with SkyWalking OAP (no cert required)   agent_authentication SW_AGENT_AUTHENTICATION \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt;  The authentication token to verify that the agent is trusted by the backend OAP, as for how to configure the backend, refer to the yaml.   agent_logging_level SW_AGENT_LOGGING_LEVEL \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; INFO The level of agent self-logs, could be one of CRITICAL, FATAL, ERROR, WARN(WARNING), INFO, DEBUG. Please turn on debug if an issue is encountered to find out what\u0026rsquo;s going on    Agent Core Danger Zone    Configuration Environment Variable Type Default Value Description     agent_collector_heartbeat_period SW_AGENT_COLLECTOR_HEARTBEAT_PERIOD \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 30 The agent will exchange heartbeat message with SkyWalking OAP backend every period seconds   agent_collector_properties_report_period_factor SW_AGENT_COLLECTOR_PROPERTIES_REPORT_PERIOD_FACTOR \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 10 The agent will report service instance properties every factor * heartbeat period seconds default: 10*30 = 300 seconds   agent_instance_properties_json SW_AGENT_INSTANCE_PROPERTIES_JSON \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt;  A custom JSON string to be reported as service instance properties, e.g. {\u0026quot;key\u0026quot;: \u0026quot;value\u0026quot;}   agent_experimental_fork_support SW_AGENT_EXPERIMENTAL_FORK_SUPPORT \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False The agent will restart itself in any os.fork()-ed child process. Important Note: it\u0026rsquo;s not suitable for short-lived processes as each one will create a new instance in SkyWalking dashboard in format of service_instance-child(pid). This feature may not work when a precise combination of gRPC + Python 3.7 + subprocess (not fork) is used together. The agent will output a warning log when using on Python 3.7 for such a reason.   agent_queue_timeout SW_AGENT_QUEUE_TIMEOUT \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 1 DANGEROUS - This option controls the interval of each bulk report from telemetry data queues Do not modify unless you have evaluated its impact given your service load.    SW_PYTHON Auto Instrumentation CLI    Configuration Environment Variable Type Default Value Description     agent_sw_python_bootstrap_propagate SW_AGENT_SW_PYTHON_BOOTSTRAP_PROPAGATE \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False Special: can only be passed via environment. This config controls the child process agent bootstrap behavior in sw-python CLI, if set to False, a valid child process will not boot up a SkyWalking Agent. Please refer to the CLI Guide for details.   agent_sw_python_cli_debug_enabled SW_AGENT_SW_PYTHON_CLI_DEBUG_ENABLED \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False Special: can only be passed via environment. This config controls the CLI and agent logging debug mode, if set to True, the CLI and agent will print out debug logs. Please refer to the CLI Guide for details. Important: this config will set agent logging level to DEBUG as well, do not use it in production otherwise it will flood your logs. This normally shouldn\u0026rsquo;t be pass as a simple flag -d will be the same.    Trace Reporter Configurations    Configuration Environment Variable Type Default Value Description     agent_trace_reporter_max_buffer_size SW_AGENT_TRACE_REPORTER_MAX_BUFFER_SIZE \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 10000 The maximum queue backlog size for sending the segment data to backend, segments beyond this are silently dropped   agent_trace_ignore_path SW_AGENT_TRACE_IGNORE_PATH \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt;  You can setup multiple URL path patterns, The endpoints match these patterns wouldn\u0026rsquo;t be traced. the current matching rules follow Ant Path match style , like /path/*, /path/**, /path/?.   agent_ignore_suffix SW_AGENT_IGNORE_SUFFIX \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; .jpg,.jpeg,.js,.css,.png,.bmp,.gif,.ico,.mp3,.mp4,.html,.svg If the operation name of the first span is included in this set, this segment should be ignored.   correlation_element_max_number SW_CORRELATION_ELEMENT_MAX_NUMBER \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 3 Max element count of the correlation context.   correlation_value_max_length SW_CORRELATION_VALUE_MAX_LENGTH \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 128 Max value length of correlation context element.    Profiling Configurations    Configuration Environment Variable Type Default Value Description     agent_profile_active SW_AGENT_PROFILE_ACTIVE \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; True If True, Python agent will enable profiler when user create a new profiling task.   agent_collector_get_profile_task_interval SW_AGENT_COLLECTOR_GET_PROFILE_TASK_INTERVAL \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 20 The number of seconds between two profile task query.   agent_profile_max_parallel SW_AGENT_PROFILE_MAX_PARALLEL \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 5 The number of parallel monitor segment count.   agent_profile_duration SW_AGENT_PROFILE_DURATION \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 10 The maximum monitor segment time(minutes), if current segment monitor time out of limit, then stop it.   agent_profile_dump_max_stack_depth SW_AGENT_PROFILE_DUMP_MAX_STACK_DEPTH \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 500 The number of max dump thread stack depth   agent_profile_snapshot_transport_buffer_size SW_AGENT_PROFILE_SNAPSHOT_TRANSPORT_BUFFER_SIZE \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 50 The number of snapshot transport to backend buffer size    Log Reporter Configurations    Configuration Environment Variable Type Default Value Description     agent_log_reporter_active SW_AGENT_LOG_REPORTER_ACTIVE \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; True If True, Python agent will report collected logs to the OAP or Satellite. Otherwise, it disables the feature.   agent_log_reporter_safe_mode SW_AGENT_LOG_REPORTER_SAFE_MODE \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False If True, Python agent will filter out HTTP basic auth information from log records. By default, it disables the feature due to potential performance impact brought by regular expression   agent_log_reporter_max_buffer_size SW_AGENT_LOG_REPORTER_MAX_BUFFER_SIZE \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 10000 The maximum queue backlog size for sending log data to backend, logs beyond this are silently dropped.   agent_log_reporter_level SW_AGENT_LOG_REPORTER_LEVEL \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; WARNING This config specifies the logger levels of concern, any logs with a level below the config will be ignored.   agent_log_reporter_ignore_filter SW_AGENT_LOG_REPORTER_IGNORE_FILTER \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False This config customizes whether to ignore the application-defined logger filters, if True, all logs are reported disregarding any filter rules.   agent_log_reporter_formatted SW_AGENT_LOG_REPORTER_FORMATTED \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; True If True, the log reporter will transmit the logs as formatted. Otherwise, puts logRecord.msg and logRecord.args into message content and tags(argument.n), respectively. Along with an exception tag if an exception was raised. Only applies to logging module.   agent_log_reporter_layout SW_AGENT_LOG_REPORTER_LAYOUT \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; %(asctime)s [%(threadName)s] %(levelname)s %(name)s - %(message)s The log reporter formats the logRecord message based on the layout given. Only applies to logging module.   agent_cause_exception_depth SW_AGENT_CAUSE_EXCEPTION_DEPTH \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 10 This configuration is shared by log reporter and tracer. This config limits agent to report up to limit stacktrace, please refer to [Python traceback](../ https://docs.python.org/3/library/traceback.html#traceback.print_tb) for more explanations.    Meter Reporter Configurations    Configuration Environment Variable Type Default Value Description     agent_meter_reporter_active SW_AGENT_METER_REPORTER_ACTIVE \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; True If True, Python agent will report collected meters to the OAP or Satellite. Otherwise, it disables the feature.   agent_meter_reporter_max_buffer_size SW_AGENT_METER_REPORTER_MAX_BUFFER_SIZE \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 10000 The maximum queue backlog size for sending meter data to backend, meters beyond this are silently dropped.   agent_meter_reporter_period SW_AGENT_METER_REPORTER_PERIOD \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 20 The interval in seconds between each meter data report   agent_pvm_meter_reporter_active SW_AGENT_PVM_METER_REPORTER_ACTIVE \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; True If True, Python agent will report collected Python Virtual Machine (PVM) meters to the OAP or Satellite. Otherwise, it disables the feature.    Plugin Related configurations    Configuration Environment Variable Type Default Value Description     agent_disable_plugins SW_AGENT_DISABLE_PLUGINS \u0026lt;class \u0026lsquo;list\u0026rsquo;\u0026gt; [''] The name patterns in comma-separated pattern, plugins whose name matches one of the pattern won\u0026rsquo;t be installed   plugin_http_http_params_length_threshold SW_PLUGIN_HTTP_HTTP_PARAMS_LENGTH_THRESHOLD \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 1024 When COLLECT_HTTP_PARAMS is enabled, how many characters to keep and send to the OAP backend, use negative values to keep and send the complete parameters, NB. this config item is added for the sake of performance.   plugin_http_ignore_method SW_PLUGIN_HTTP_IGNORE_METHOD \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt;  Comma-delimited list of http methods to ignore (GET, POST, HEAD, OPTIONS, etc\u0026hellip;)   plugin_sql_parameters_max_length SW_PLUGIN_SQL_PARAMETERS_MAX_LENGTH \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 0 The maximum length of the collected parameter, parameters longer than the specified length will be truncated, length 0 turns off parameter tracing   plugin_pymongo_trace_parameters SW_PLUGIN_PYMONGO_TRACE_PARAMETERS \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False Indicates whether to collect the filters of pymongo   plugin_pymongo_parameters_max_length SW_PLUGIN_PYMONGO_PARAMETERS_MAX_LENGTH \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 512 The maximum length of the collected filters, filters longer than the specified length will be truncated   plugin_elasticsearch_trace_dsl SW_PLUGIN_ELASTICSEARCH_TRACE_DSL \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False If true, trace all the DSL(Domain Specific Language) in ElasticSearch access, default is false   plugin_flask_collect_http_params SW_PLUGIN_FLASK_COLLECT_HTTP_PARAMS \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False This config item controls that whether the Flask plugin should collect the parameters of the request.   plugin_sanic_collect_http_params SW_PLUGIN_SANIC_COLLECT_HTTP_PARAMS \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False This config item controls that whether the Sanic plugin should collect the parameters of the request.   plugin_django_collect_http_params SW_PLUGIN_DJANGO_COLLECT_HTTP_PARAMS \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False This config item controls that whether the Django plugin should collect the parameters of the request.   plugin_fastapi_collect_http_params SW_PLUGIN_FASTAPI_COLLECT_HTTP_PARAMS \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False This config item controls that whether the FastAPI plugin should collect the parameters of the request.   plugin_bottle_collect_http_params SW_PLUGIN_BOTTLE_COLLECT_HTTP_PARAMS \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False This config item controls that whether the Bottle plugin should collect the parameters of the request.   plugin_celery_parameters_length SW_PLUGIN_CELERY_PARAMETERS_LENGTH \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 512 The maximum length of celery functions parameters, longer than this will be truncated, 0 turns off    ","title":"Supported Agent Configuration Options","url":"/docs/skywalking-python/latest/en/setup/configuration/"},{"content":"Supported Agent Configuration Options Below is the full list of supported configurations you can set to customize the agent behavior, please take some time to read the descriptions for what they can achieve.\n Usage: (Pass in intrusive setup)\n from skywalking import config, agent config.init(YourConfiguration=YourValue)) agent.start()  Usage: (Pass by environment variables)\n export SW_AGENT_YourConfiguration=YourValue Agent Core Configuration Options    Configuration Environment Variable Type Default Value Description     agent_collector_backend_services SW_AGENT_COLLECTOR_BACKEND_SERVICES \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; oap_host:oap_port The backend OAP server address, 11800 is default OAP gRPC port, 12800 is HTTP, Kafka ignores this option and uses kafka_bootstrap_servers option. This option should be changed accordingly with selected protocol   agent_protocol SW_AGENT_PROTOCOL \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; grpc The protocol to communicate with the backend OAP, http, grpc or kafka, we highly suggest using grpc in production as it\u0026rsquo;s well optimized than http. The kafka protocol provides an alternative way to submit data to the backend.   agent_name SW_AGENT_NAME \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; Python Service Name The name of your awesome Python service   agent_instance_name SW_AGENT_INSTANCE_NAME \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; str(uuid.uuid1()).replace('-', \u0026lsquo;') The name of this particular awesome Python service instance   agent_namespace SW_AGENT_NAMESPACE \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt;  The agent namespace of the Python service (available as tag and the suffix of service name)   kafka_bootstrap_servers SW_KAFKA_BOOTSTRAP_SERVERS \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; localhost:9092 A list of host/port pairs to use for establishing the initial connection to your Kafka cluster. It is in the form of host1:port1,host2:port2,\u0026hellip; (used for Kafka reporter protocol)   kafka_namespace SW_KAFKA_NAMESPACE \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt;  The kafka namespace specified by OAP side SW_NAMESPACE, prepends the following kafka topic names with a -.   kafka_topic_management SW_KAFKA_TOPIC_MANAGEMENT \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; skywalking-managements Specifying Kafka topic name for service instance reporting and registering, this should be in sync with OAP   kafka_topic_segment SW_KAFKA_TOPIC_SEGMENT \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; skywalking-segments Specifying Kafka topic name for Tracing data, this should be in sync with OAP   kafka_topic_log SW_KAFKA_TOPIC_LOG \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; skywalking-logs Specifying Kafka topic name for Log data, this should be in sync with OAP   kafka_topic_meter SW_KAFKA_TOPIC_METER \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; skywalking-meters Specifying Kafka topic name for Meter data, this should be in sync with OAP   kafka_reporter_custom_configurations SW_KAFKA_REPORTER_CUSTOM_CONFIGURATIONS \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt;  The configs to init KafkaProducer, supports the basic arguments (whose type is either str, bool, or int) listed here This config only works from env variables, each one should be passed in SW_KAFKA_REPORTER_CONFIG_\u0026lt;KEY_NAME\u0026gt;   agent_force_tls SW_AGENT_FORCE_TLS \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False Use TLS for communication with SkyWalking OAP (no cert required)   agent_authentication SW_AGENT_AUTHENTICATION \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt;  The authentication token to verify that the agent is trusted by the backend OAP, as for how to configure the backend, refer to the yaml.   agent_logging_level SW_AGENT_LOGGING_LEVEL \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; INFO The level of agent self-logs, could be one of CRITICAL, FATAL, ERROR, WARN(WARNING), INFO, DEBUG. Please turn on debug if an issue is encountered to find out what\u0026rsquo;s going on    Agent Core Danger Zone    Configuration Environment Variable Type Default Value Description     agent_collector_heartbeat_period SW_AGENT_COLLECTOR_HEARTBEAT_PERIOD \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 30 The agent will exchange heartbeat message with SkyWalking OAP backend every period seconds   agent_collector_properties_report_period_factor SW_AGENT_COLLECTOR_PROPERTIES_REPORT_PERIOD_FACTOR \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 10 The agent will report service instance properties every factor * heartbeat period seconds default: 10*30 = 300 seconds   agent_instance_properties_json SW_AGENT_INSTANCE_PROPERTIES_JSON \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt;  A custom JSON string to be reported as service instance properties, e.g. {\u0026quot;key\u0026quot;: \u0026quot;value\u0026quot;}   agent_experimental_fork_support SW_AGENT_EXPERIMENTAL_FORK_SUPPORT \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False The agent will restart itself in any os.fork()-ed child process. Important Note: it\u0026rsquo;s not suitable for short-lived processes as each one will create a new instance in SkyWalking dashboard in format of service_instance-child(pid). This feature may not work when a precise combination of gRPC + Python 3.7 + subprocess (not fork) is used together. The agent will output a warning log when using on Python 3.7 for such a reason.   agent_queue_timeout SW_AGENT_QUEUE_TIMEOUT \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 1 DANGEROUS - This option controls the interval of each bulk report from telemetry data queues Do not modify unless you have evaluated its impact given your service load.   agent_asyncio_enhancement SW_AGENT_ASYNCIO_ENHANCEMENT \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False Replace the threads to asyncio coroutines to report telemetry data to the OAP. This option is experimental and may not work as expected.    SW_PYTHON Auto Instrumentation CLI    Configuration Environment Variable Type Default Value Description     agent_sw_python_bootstrap_propagate SW_AGENT_SW_PYTHON_BOOTSTRAP_PROPAGATE \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False Special: can only be passed via environment. This config controls the child process agent bootstrap behavior in sw-python CLI, if set to False, a valid child process will not boot up a SkyWalking Agent. Please refer to the CLI Guide for details.   agent_sw_python_cli_debug_enabled SW_AGENT_SW_PYTHON_CLI_DEBUG_ENABLED \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False Special: can only be passed via environment. This config controls the CLI and agent logging debug mode, if set to True, the CLI and agent will print out debug logs. Please refer to the CLI Guide for details. Important: this config will set agent logging level to DEBUG as well, do not use it in production otherwise it will flood your logs. This normally shouldn\u0026rsquo;t be pass as a simple flag -d will be the same.    Trace Reporter Configurations    Configuration Environment Variable Type Default Value Description     agent_trace_reporter_max_buffer_size SW_AGENT_TRACE_REPORTER_MAX_BUFFER_SIZE \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 10000 The maximum queue backlog size for sending the segment data to backend, segments beyond this are silently dropped   agent_trace_ignore_path SW_AGENT_TRACE_IGNORE_PATH \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt;  You can setup multiple URL path patterns, The endpoints match these patterns wouldn\u0026rsquo;t be traced. the current matching rules follow Ant Path match style , like /path/*, /path/**, /path/?.   agent_ignore_suffix SW_AGENT_IGNORE_SUFFIX \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; .jpg,.jpeg,.js,.css,.png,.bmp,.gif,.ico,.mp3,.mp4,.html,.svg If the operation name of the first span is included in this set, this segment should be ignored.   correlation_element_max_number SW_CORRELATION_ELEMENT_MAX_NUMBER \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 3 Max element count of the correlation context.   correlation_value_max_length SW_CORRELATION_VALUE_MAX_LENGTH \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 128 Max value length of correlation context element.    Profiling Configurations    Configuration Environment Variable Type Default Value Description     agent_profile_active SW_AGENT_PROFILE_ACTIVE \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; True If True, Python agent will enable profiler when user create a new profiling task.   agent_collector_get_profile_task_interval SW_AGENT_COLLECTOR_GET_PROFILE_TASK_INTERVAL \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 20 The number of seconds between two profile task query.   agent_profile_max_parallel SW_AGENT_PROFILE_MAX_PARALLEL \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 5 The number of parallel monitor segment count.   agent_profile_duration SW_AGENT_PROFILE_DURATION \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 10 The maximum monitor segment time(minutes), if current segment monitor time out of limit, then stop it.   agent_profile_dump_max_stack_depth SW_AGENT_PROFILE_DUMP_MAX_STACK_DEPTH \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 500 The number of max dump thread stack depth   agent_profile_snapshot_transport_buffer_size SW_AGENT_PROFILE_SNAPSHOT_TRANSPORT_BUFFER_SIZE \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 50 The number of snapshot transport to backend buffer size    Log Reporter Configurations    Configuration Environment Variable Type Default Value Description     agent_log_reporter_active SW_AGENT_LOG_REPORTER_ACTIVE \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; True If True, Python agent will report collected logs to the OAP or Satellite. Otherwise, it disables the feature.   agent_log_reporter_safe_mode SW_AGENT_LOG_REPORTER_SAFE_MODE \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False If True, Python agent will filter out HTTP basic auth information from log records. By default, it disables the feature due to potential performance impact brought by regular expression   agent_log_reporter_max_buffer_size SW_AGENT_LOG_REPORTER_MAX_BUFFER_SIZE \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 10000 The maximum queue backlog size for sending log data to backend, logs beyond this are silently dropped.   agent_log_reporter_level SW_AGENT_LOG_REPORTER_LEVEL \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; WARNING This config specifies the logger levels of concern, any logs with a level below the config will be ignored.   agent_log_reporter_ignore_filter SW_AGENT_LOG_REPORTER_IGNORE_FILTER \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False This config customizes whether to ignore the application-defined logger filters, if True, all logs are reported disregarding any filter rules.   agent_log_reporter_formatted SW_AGENT_LOG_REPORTER_FORMATTED \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; True If True, the log reporter will transmit the logs as formatted. Otherwise, puts logRecord.msg and logRecord.args into message content and tags(argument.n), respectively. Along with an exception tag if an exception was raised. Only applies to logging module.   agent_log_reporter_layout SW_AGENT_LOG_REPORTER_LAYOUT \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; %(asctime)s [%(threadName)s] %(levelname)s %(name)s - %(message)s The log reporter formats the logRecord message based on the layout given. Only applies to logging module.   agent_cause_exception_depth SW_AGENT_CAUSE_EXCEPTION_DEPTH \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 10 This configuration is shared by log reporter and tracer. This config limits agent to report up to limit stacktrace, please refer to [Python traceback](../ https://docs.python.org/3/library/traceback.html#traceback.print_tb) for more explanations.    Meter Reporter Configurations    Configuration Environment Variable Type Default Value Description     agent_meter_reporter_active SW_AGENT_METER_REPORTER_ACTIVE \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; True If True, Python agent will report collected meters to the OAP or Satellite. Otherwise, it disables the feature.   agent_meter_reporter_max_buffer_size SW_AGENT_METER_REPORTER_MAX_BUFFER_SIZE \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 10000 The maximum queue backlog size for sending meter data to backend, meters beyond this are silently dropped.   agent_meter_reporter_period SW_AGENT_METER_REPORTER_PERIOD \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 20 The interval in seconds between each meter data report   agent_pvm_meter_reporter_active SW_AGENT_PVM_METER_REPORTER_ACTIVE \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; True If True, Python agent will report collected Python Virtual Machine (PVM) meters to the OAP or Satellite. Otherwise, it disables the feature.    Plugin Related configurations    Configuration Environment Variable Type Default Value Description     agent_disable_plugins SW_AGENT_DISABLE_PLUGINS \u0026lt;class \u0026lsquo;list\u0026rsquo;\u0026gt; [''] The name patterns in comma-separated pattern, plugins whose name matches one of the pattern won\u0026rsquo;t be installed   plugin_http_http_params_length_threshold SW_PLUGIN_HTTP_HTTP_PARAMS_LENGTH_THRESHOLD \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 1024 When COLLECT_HTTP_PARAMS is enabled, how many characters to keep and send to the OAP backend, use negative values to keep and send the complete parameters, NB. this config item is added for the sake of performance.   plugin_http_ignore_method SW_PLUGIN_HTTP_IGNORE_METHOD \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt;  Comma-delimited list of http methods to ignore (GET, POST, HEAD, OPTIONS, etc\u0026hellip;)   plugin_sql_parameters_max_length SW_PLUGIN_SQL_PARAMETERS_MAX_LENGTH \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 0 The maximum length of the collected parameter, parameters longer than the specified length will be truncated, length 0 turns off parameter tracing   plugin_pymongo_trace_parameters SW_PLUGIN_PYMONGO_TRACE_PARAMETERS \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False Indicates whether to collect the filters of pymongo   plugin_pymongo_parameters_max_length SW_PLUGIN_PYMONGO_PARAMETERS_MAX_LENGTH \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 512 The maximum length of the collected filters, filters longer than the specified length will be truncated   plugin_elasticsearch_trace_dsl SW_PLUGIN_ELASTICSEARCH_TRACE_DSL \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False If true, trace all the DSL(Domain Specific Language) in ElasticSearch access, default is false   plugin_flask_collect_http_params SW_PLUGIN_FLASK_COLLECT_HTTP_PARAMS \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False This config item controls that whether the Flask plugin should collect the parameters of the request.   plugin_sanic_collect_http_params SW_PLUGIN_SANIC_COLLECT_HTTP_PARAMS \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False This config item controls that whether the Sanic plugin should collect the parameters of the request.   plugin_django_collect_http_params SW_PLUGIN_DJANGO_COLLECT_HTTP_PARAMS \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False This config item controls that whether the Django plugin should collect the parameters of the request.   plugin_fastapi_collect_http_params SW_PLUGIN_FASTAPI_COLLECT_HTTP_PARAMS \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False This config item controls that whether the FastAPI plugin should collect the parameters of the request.   plugin_bottle_collect_http_params SW_PLUGIN_BOTTLE_COLLECT_HTTP_PARAMS \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False This config item controls that whether the Bottle plugin should collect the parameters of the request.   plugin_celery_parameters_length SW_PLUGIN_CELERY_PARAMETERS_LENGTH \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 512 The maximum length of celery functions parameters, longer than this will be truncated, 0 turns off    ","title":"Supported Agent Configuration Options","url":"/docs/skywalking-python/next/en/setup/configuration/"},{"content":"Supported Agent Configuration Options Below is the full list of supported configurations you can set to customize the agent behavior, please take some time to read the descriptions for what they can achieve.\n Usage: (Pass in intrusive setup)\n from skywalking import config, agent config.init(YourConfiguration=YourValue)) agent.start()  Usage: (Pass by environment variables)\n export SW_AGENT_YourConfiguration=YourValue Agent Core Configuration Options    Configuration Environment Variable Type Default Value Description     agent_collector_backend_services SW_AGENT_COLLECTOR_BACKEND_SERVICES \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; oap_host:oap_port The backend OAP server address, 11800 is default OAP gRPC port, 12800 is HTTP, Kafka ignores this option and uses kafka_bootstrap_servers option. This option should be changed accordingly with selected protocol   agent_protocol SW_AGENT_PROTOCOL \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; grpc The protocol to communicate with the backend OAP, http, grpc or kafka, we highly suggest using grpc in production as it\u0026rsquo;s well optimized than http. The kafka protocol provides an alternative way to submit data to the backend.   agent_name SW_AGENT_NAME \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; Python Service Name The name of your awesome Python service   agent_instance_name SW_AGENT_INSTANCE_NAME \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; str(uuid.uuid1()).replace('-', \u0026lsquo;') The name of this particular awesome Python service instance   agent_namespace SW_AGENT_NAMESPACE \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt;  The agent namespace of the Python service (available as tag and the suffix of service name)   kafka_bootstrap_servers SW_KAFKA_BOOTSTRAP_SERVERS \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; localhost:9092 A list of host/port pairs to use for establishing the initial connection to your Kafka cluster. It is in the form of host1:port1,host2:port2,\u0026hellip; (used for Kafka reporter protocol)   kafka_namespace SW_KAFKA_NAMESPACE \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt;  The kafka namespace specified by OAP side SW_NAMESPACE, prepends the following kafka topic names with a -.   kafka_topic_management SW_KAFKA_TOPIC_MANAGEMENT \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; skywalking-managements Specifying Kafka topic name for service instance reporting and registering, this should be in sync with OAP   kafka_topic_segment SW_KAFKA_TOPIC_SEGMENT \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; skywalking-segments Specifying Kafka topic name for Tracing data, this should be in sync with OAP   kafka_topic_log SW_KAFKA_TOPIC_LOG \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; skywalking-logs Specifying Kafka topic name for Log data, this should be in sync with OAP   kafka_topic_meter SW_KAFKA_TOPIC_METER \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; skywalking-meters Specifying Kafka topic name for Meter data, this should be in sync with OAP   kafka_reporter_custom_configurations SW_KAFKA_REPORTER_CUSTOM_CONFIGURATIONS \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt;  The configs to init KafkaProducer, supports the basic arguments (whose type is either str, bool, or int) listed here This config only works from env variables, each one should be passed in SW_KAFKA_REPORTER_CONFIG_\u0026lt;KEY_NAME\u0026gt;   agent_force_tls SW_AGENT_FORCE_TLS \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False Use TLS for communication with SkyWalking OAP (no cert required)   agent_authentication SW_AGENT_AUTHENTICATION \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt;  The authentication token to verify that the agent is trusted by the backend OAP, as for how to configure the backend, refer to the yaml.   agent_logging_level SW_AGENT_LOGGING_LEVEL \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; INFO The level of agent self-logs, could be one of CRITICAL, FATAL, ERROR, WARN(WARNING), INFO, DEBUG. Please turn on debug if an issue is encountered to find out what\u0026rsquo;s going on    Agent Core Danger Zone    Configuration Environment Variable Type Default Value Description     agent_collector_heartbeat_period SW_AGENT_COLLECTOR_HEARTBEAT_PERIOD \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 30 The agent will exchange heartbeat message with SkyWalking OAP backend every period seconds   agent_collector_properties_report_period_factor SW_AGENT_COLLECTOR_PROPERTIES_REPORT_PERIOD_FACTOR \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 10 The agent will report service instance properties every factor * heartbeat period seconds default: 10*30 = 300 seconds   agent_instance_properties_json SW_AGENT_INSTANCE_PROPERTIES_JSON \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt;  A custom JSON string to be reported as service instance properties, e.g. {\u0026quot;key\u0026quot;: \u0026quot;value\u0026quot;}   agent_experimental_fork_support SW_AGENT_EXPERIMENTAL_FORK_SUPPORT \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False The agent will restart itself in any os.fork()-ed child process. Important Note: it\u0026rsquo;s not suitable for short-lived processes as each one will create a new instance in SkyWalking dashboard in format of service_instance-child(pid). This feature may not work when a precise combination of gRPC + Python 3.7 + subprocess (not fork) is used together. The agent will output a warning log when using on Python 3.7 for such a reason.   agent_queue_timeout SW_AGENT_QUEUE_TIMEOUT \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 1 DANGEROUS - This option controls the interval of each bulk report from telemetry data queues Do not modify unless you have evaluated its impact given your service load.    SW_PYTHON Auto Instrumentation CLI    Configuration Environment Variable Type Default Value Description     agent_sw_python_bootstrap_propagate SW_AGENT_SW_PYTHON_BOOTSTRAP_PROPAGATE \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False Special: can only be passed via environment. This config controls the child process agent bootstrap behavior in sw-python CLI, if set to False, a valid child process will not boot up a SkyWalking Agent. Please refer to the CLI Guide for details.   agent_sw_python_cli_debug_enabled SW_AGENT_SW_PYTHON_CLI_DEBUG_ENABLED \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False Special: can only be passed via environment. This config controls the CLI and agent logging debug mode, if set to True, the CLI and agent will print out debug logs. Please refer to the CLI Guide for details. Important: this config will set agent logging level to DEBUG as well, do not use it in production otherwise it will flood your logs. This normally shouldn\u0026rsquo;t be pass as a simple flag -d will be the same.    Trace Reporter Configurations    Configuration Environment Variable Type Default Value Description     agent_trace_reporter_max_buffer_size SW_AGENT_TRACE_REPORTER_MAX_BUFFER_SIZE \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 10000 The maximum queue backlog size for sending the segment data to backend, segments beyond this are silently dropped   agent_trace_ignore_path SW_AGENT_TRACE_IGNORE_PATH \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt;  You can setup multiple URL path patterns, The endpoints match these patterns wouldn\u0026rsquo;t be traced. the current matching rules follow Ant Path match style , like /path/*, /path/**, /path/?.   agent_ignore_suffix SW_AGENT_IGNORE_SUFFIX \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; .jpg,.jpeg,.js,.css,.png,.bmp,.gif,.ico,.mp3,.mp4,.html,.svg If the operation name of the first span is included in this set, this segment should be ignored.   correlation_element_max_number SW_CORRELATION_ELEMENT_MAX_NUMBER \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 3 Max element count of the correlation context.   correlation_value_max_length SW_CORRELATION_VALUE_MAX_LENGTH \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 128 Max value length of correlation context element.    Profiling Configurations    Configuration Environment Variable Type Default Value Description     agent_profile_active SW_AGENT_PROFILE_ACTIVE \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; True If True, Python agent will enable profiler when user create a new profiling task.   agent_collector_get_profile_task_interval SW_AGENT_COLLECTOR_GET_PROFILE_TASK_INTERVAL \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 20 The number of seconds between two profile task query.   agent_profile_max_parallel SW_AGENT_PROFILE_MAX_PARALLEL \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 5 The number of parallel monitor segment count.   agent_profile_duration SW_AGENT_PROFILE_DURATION \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 10 The maximum monitor segment time(minutes), if current segment monitor time out of limit, then stop it.   agent_profile_dump_max_stack_depth SW_AGENT_PROFILE_DUMP_MAX_STACK_DEPTH \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 500 The number of max dump thread stack depth   agent_profile_snapshot_transport_buffer_size SW_AGENT_PROFILE_SNAPSHOT_TRANSPORT_BUFFER_SIZE \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 50 The number of snapshot transport to backend buffer size    Log Reporter Configurations    Configuration Environment Variable Type Default Value Description     agent_log_reporter_active SW_AGENT_LOG_REPORTER_ACTIVE \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; True If True, Python agent will report collected logs to the OAP or Satellite. Otherwise, it disables the feature.   agent_log_reporter_safe_mode SW_AGENT_LOG_REPORTER_SAFE_MODE \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False If True, Python agent will filter out HTTP basic auth information from log records. By default, it disables the feature due to potential performance impact brought by regular expression   agent_log_reporter_max_buffer_size SW_AGENT_LOG_REPORTER_MAX_BUFFER_SIZE \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 10000 The maximum queue backlog size for sending log data to backend, logs beyond this are silently dropped.   agent_log_reporter_level SW_AGENT_LOG_REPORTER_LEVEL \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; WARNING This config specifies the logger levels of concern, any logs with a level below the config will be ignored.   agent_log_reporter_ignore_filter SW_AGENT_LOG_REPORTER_IGNORE_FILTER \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False This config customizes whether to ignore the application-defined logger filters, if True, all logs are reported disregarding any filter rules.   agent_log_reporter_formatted SW_AGENT_LOG_REPORTER_FORMATTED \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; True If True, the log reporter will transmit the logs as formatted. Otherwise, puts logRecord.msg and logRecord.args into message content and tags(argument.n), respectively. Along with an exception tag if an exception was raised. Only applies to logging module.   agent_log_reporter_layout SW_AGENT_LOG_REPORTER_LAYOUT \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; %(asctime)s [%(threadName)s] %(levelname)s %(name)s - %(message)s The log reporter formats the logRecord message based on the layout given. Only applies to logging module.   agent_cause_exception_depth SW_AGENT_CAUSE_EXCEPTION_DEPTH \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 10 This configuration is shared by log reporter and tracer. This config limits agent to report up to limit stacktrace, please refer to [Python traceback](../ https://docs.python.org/3/library/traceback.html#traceback.print_tb) for more explanations.    Meter Reporter Configurations    Configuration Environment Variable Type Default Value Description     agent_meter_reporter_active SW_AGENT_METER_REPORTER_ACTIVE \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; True If True, Python agent will report collected meters to the OAP or Satellite. Otherwise, it disables the feature.   agent_meter_reporter_max_buffer_size SW_AGENT_METER_REPORTER_MAX_BUFFER_SIZE \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 10000 The maximum queue backlog size for sending meter data to backend, meters beyond this are silently dropped.   agent_meter_reporter_period SW_AGENT_METER_REPORTER_PERIOD \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 20 The interval in seconds between each meter data report   agent_pvm_meter_reporter_active SW_AGENT_PVM_METER_REPORTER_ACTIVE \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; True If True, Python agent will report collected Python Virtual Machine (PVM) meters to the OAP or Satellite. Otherwise, it disables the feature.    Plugin Related configurations    Configuration Environment Variable Type Default Value Description     agent_disable_plugins SW_AGENT_DISABLE_PLUGINS \u0026lt;class \u0026lsquo;list\u0026rsquo;\u0026gt; [''] The name patterns in comma-separated pattern, plugins whose name matches one of the pattern won\u0026rsquo;t be installed   plugin_http_http_params_length_threshold SW_PLUGIN_HTTP_HTTP_PARAMS_LENGTH_THRESHOLD \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 1024 When COLLECT_HTTP_PARAMS is enabled, how many characters to keep and send to the OAP backend, use negative values to keep and send the complete parameters, NB. this config item is added for the sake of performance.   plugin_http_ignore_method SW_PLUGIN_HTTP_IGNORE_METHOD \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt;  Comma-delimited list of http methods to ignore (GET, POST, HEAD, OPTIONS, etc\u0026hellip;)   plugin_sql_parameters_max_length SW_PLUGIN_SQL_PARAMETERS_MAX_LENGTH \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 0 The maximum length of the collected parameter, parameters longer than the specified length will be truncated, length 0 turns off parameter tracing   plugin_pymongo_trace_parameters SW_PLUGIN_PYMONGO_TRACE_PARAMETERS \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False Indicates whether to collect the filters of pymongo   plugin_pymongo_parameters_max_length SW_PLUGIN_PYMONGO_PARAMETERS_MAX_LENGTH \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 512 The maximum length of the collected filters, filters longer than the specified length will be truncated   plugin_elasticsearch_trace_dsl SW_PLUGIN_ELASTICSEARCH_TRACE_DSL \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False If true, trace all the DSL(Domain Specific Language) in ElasticSearch access, default is false   plugin_flask_collect_http_params SW_PLUGIN_FLASK_COLLECT_HTTP_PARAMS \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False This config item controls that whether the Flask plugin should collect the parameters of the request.   plugin_sanic_collect_http_params SW_PLUGIN_SANIC_COLLECT_HTTP_PARAMS \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False This config item controls that whether the Sanic plugin should collect the parameters of the request.   plugin_django_collect_http_params SW_PLUGIN_DJANGO_COLLECT_HTTP_PARAMS \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False This config item controls that whether the Django plugin should collect the parameters of the request.   plugin_fastapi_collect_http_params SW_PLUGIN_FASTAPI_COLLECT_HTTP_PARAMS \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False This config item controls that whether the FastAPI plugin should collect the parameters of the request.   plugin_bottle_collect_http_params SW_PLUGIN_BOTTLE_COLLECT_HTTP_PARAMS \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False This config item controls that whether the Bottle plugin should collect the parameters of the request.   plugin_celery_parameters_length SW_PLUGIN_CELERY_PARAMETERS_LENGTH \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 512 The maximum length of celery functions parameters, longer than this will be truncated, 0 turns off    ","title":"Supported Agent Configuration Options","url":"/docs/skywalking-python/v1.0.1/en/setup/configuration/"},{"content":"Supported Libraries This document is automatically generated from the SkyWalking Python testing matrix.\nThe column of versions only indicates the set of library versions tested in a best-effort manner.\nIf you find newer major versions that are missing from the following table, and it\u0026rsquo;s not documented as a limitation, please PR to update the test matrix in the plugin.\nVersions marked as NOT SUPPORTED may be due to an incompatible version with Python in the original library or a limitation of SkyWalking auto-instrumentation (welcome to contribute!)\nPlugin Support Table    Library Python Version - Lib Version Plugin Name     aiohttp Python \u0026gt;=3.7 - [\u0026lsquo;3.7.*']; sw_aiohttp   aioredis Python \u0026gt;=3.7 - [\u0026lsquo;2.0.*']; sw_aioredis   aiormq Python \u0026gt;=3.7 - [\u0026lsquo;6.3\u0026rsquo;, \u0026lsquo;6.4\u0026rsquo;]; sw_aiormq   amqp Python \u0026gt;=3.7 - [\u0026lsquo;2.6.1\u0026rsquo;]; sw_amqp   asyncpg Python \u0026gt;=3.7 - [\u0026lsquo;0.25.0\u0026rsquo;]; sw_asyncpg   bottle Python \u0026gt;=3.7 - [\u0026lsquo;0.12.23\u0026rsquo;]; sw_bottle   celery Python \u0026gt;=3.7 - [\u0026lsquo;5.1\u0026rsquo;]; sw_celery   confluent_kafka Python \u0026gt;=3.7 - [\u0026lsquo;1.5.0\u0026rsquo;, \u0026lsquo;1.7.0\u0026rsquo;, \u0026lsquo;1.8.2\u0026rsquo;]; sw_confluent_kafka   django Python \u0026gt;=3.7 - [\u0026lsquo;3.2\u0026rsquo;]; sw_django   elasticsearch Python \u0026gt;=3.7 - [\u0026lsquo;7.13\u0026rsquo;, \u0026lsquo;7.14\u0026rsquo;, \u0026lsquo;7.15\u0026rsquo;]; sw_elasticsearch   hug Python \u0026gt;=3.11 - NOT SUPPORTED YET; Python \u0026gt;=3.10 - [\u0026lsquo;2.5\u0026rsquo;, \u0026lsquo;2.6\u0026rsquo;]; Python \u0026gt;=3.7 - [\u0026lsquo;2.4.1\u0026rsquo;, \u0026lsquo;2.5\u0026rsquo;, \u0026lsquo;2.6\u0026rsquo;]; sw_falcon   fastapi Python \u0026gt;=3.7 - [\u0026lsquo;0.89.\u0026rsquo;, \u0026lsquo;0.88.']; sw_fastapi   flask Python \u0026gt;=3.7 - [\u0026lsquo;2.0\u0026rsquo;]; sw_flask   happybase Python \u0026gt;=3.7 - [\u0026lsquo;1.2.0\u0026rsquo;]; sw_happybase   http_server Python \u0026gt;=3.7 - ['*']; sw_http_server   werkzeug Python \u0026gt;=3.7 - [\u0026lsquo;1.0.1\u0026rsquo;, \u0026lsquo;2.0\u0026rsquo;]; sw_http_server   httpx Python \u0026gt;=3.7 - [\u0026lsquo;0.23.\u0026rsquo;, \u0026lsquo;0.22.']; sw_httpx   kafka-python Python \u0026gt;=3.7 - [\u0026lsquo;2.0\u0026rsquo;]; sw_kafka   loguru Python \u0026gt;=3.7 - [\u0026lsquo;0.6.0\u0026rsquo;, \u0026lsquo;0.7.0\u0026rsquo;]; sw_loguru   mysqlclient Python \u0026gt;=3.7 - [\u0026lsquo;2.1.*']; sw_mysqlclient   psycopg[binary] Python \u0026gt;=3.11 - [\u0026lsquo;3.1.']; Python \u0026gt;=3.7 - [\u0026lsquo;3.0.18\u0026rsquo;, \u0026lsquo;3.1.']; sw_psycopg   psycopg2-binary Python \u0026gt;=3.10 - NOT SUPPORTED YET; Python \u0026gt;=3.7 - [\u0026lsquo;2.9\u0026rsquo;]; sw_psycopg2   pymongo Python \u0026gt;=3.7 - [\u0026lsquo;3.11.*']; sw_pymongo   pymysql Python \u0026gt;=3.7 - [\u0026lsquo;1.0\u0026rsquo;]; sw_pymysql   pyramid Python \u0026gt;=3.7 - [\u0026lsquo;1.10\u0026rsquo;, \u0026lsquo;2.0\u0026rsquo;]; sw_pyramid   pika Python \u0026gt;=3.7 - [\u0026lsquo;1.2\u0026rsquo;]; sw_rabbitmq   redis Python \u0026gt;=3.7 - [\u0026lsquo;3.5.*\u0026rsquo;, \u0026lsquo;4.5.1\u0026rsquo;]; sw_redis   requests Python \u0026gt;=3.7 - [\u0026lsquo;2.26\u0026rsquo;, \u0026lsquo;2.25\u0026rsquo;]; sw_requests   sanic Python \u0026gt;=3.10 - NOT SUPPORTED YET; Python \u0026gt;=3.7 - [\u0026lsquo;20.12\u0026rsquo;]; sw_sanic   tornado Python \u0026gt;=3.7 - [\u0026lsquo;6.0\u0026rsquo;, \u0026lsquo;6.1\u0026rsquo;]; sw_tornado   urllib3 Python \u0026gt;=3.7 - [\u0026lsquo;1.26\u0026rsquo;, \u0026lsquo;1.25\u0026rsquo;]; sw_urllib3   urllib_request Python \u0026gt;=3.7 - ['*']; sw_urllib_request   websockets Python \u0026gt;=3.7 - [\u0026lsquo;10.3\u0026rsquo;, \u0026lsquo;10.4\u0026rsquo;]; sw_websockets    Notes  The celery server running with \u0026ldquo;celery -A \u0026hellip;\u0026rdquo; should be run with the HTTP protocol as it uses multiprocessing by default which is not compatible with the gRPC protocol implementation in SkyWalking currently. Celery clients can use whatever protocol they want. While Falcon is instrumented, only Hug is tested. Hug is believed to be abandoned project, use this plugin with a bit more caution. Instead of Hug, plugin test should move to test actual Falcon. The websocket instrumentation only traces client side connection handshake, the actual message exchange (send/recv) is not traced since injecting headers to socket message body is the only way to propagate the trace context, which requires customization of message structure and extreme care. (Feel free to add this feature by instrumenting the send/recv methods commented out in the code by either injecting sw8 headers or propagate the trace context in a separate message)  ","title":"Supported Libraries","url":"/docs/skywalking-python/latest/en/setup/plugins/"},{"content":"Supported Libraries This document is automatically generated from the SkyWalking Python testing matrix.\nThe column of versions only indicates the set of library versions tested in a best-effort manner.\nIf you find newer major versions that are missing from the following table, and it\u0026rsquo;s not documented as a limitation, please PR to update the test matrix in the plugin.\nVersions marked as NOT SUPPORTED may be due to an incompatible version with Python in the original library or a limitation of SkyWalking auto-instrumentation (welcome to contribute!)\nPlugin Support Table    Library Python Version - Lib Version Plugin Name     aiohttp Python \u0026gt;=3.7 - [\u0026lsquo;3.7.*']; sw_aiohttp   aioredis Python \u0026gt;=3.7 - [\u0026lsquo;2.0.*']; sw_aioredis   aiormq Python \u0026gt;=3.7 - [\u0026lsquo;6.3\u0026rsquo;, \u0026lsquo;6.4\u0026rsquo;]; sw_aiormq   amqp Python \u0026gt;=3.7 - [\u0026lsquo;2.6.1\u0026rsquo;]; sw_amqp   asyncpg Python \u0026gt;=3.7 - [\u0026lsquo;0.25.0\u0026rsquo;]; sw_asyncpg   bottle Python \u0026gt;=3.7 - [\u0026lsquo;0.12.23\u0026rsquo;]; sw_bottle   celery Python \u0026gt;=3.7 - [\u0026lsquo;5.1\u0026rsquo;]; sw_celery   confluent_kafka Python \u0026gt;=3.7 - [\u0026lsquo;1.5.0\u0026rsquo;, \u0026lsquo;1.7.0\u0026rsquo;, \u0026lsquo;1.8.2\u0026rsquo;]; sw_confluent_kafka   django Python \u0026gt;=3.7 - [\u0026lsquo;3.2\u0026rsquo;]; sw_django   elasticsearch Python \u0026gt;=3.7 - [\u0026lsquo;7.13\u0026rsquo;, \u0026lsquo;7.14\u0026rsquo;, \u0026lsquo;7.15\u0026rsquo;]; sw_elasticsearch   hug Python \u0026gt;=3.11 - NOT SUPPORTED YET; Python \u0026gt;=3.10 - [\u0026lsquo;2.5\u0026rsquo;, \u0026lsquo;2.6\u0026rsquo;]; Python \u0026gt;=3.7 - [\u0026lsquo;2.4.1\u0026rsquo;, \u0026lsquo;2.5\u0026rsquo;, \u0026lsquo;2.6\u0026rsquo;]; sw_falcon   fastapi Python \u0026gt;=3.7 - [\u0026lsquo;0.89.\u0026rsquo;, \u0026lsquo;0.88.']; sw_fastapi   flask Python \u0026gt;=3.7 - [\u0026lsquo;2.0\u0026rsquo;]; sw_flask   happybase Python \u0026gt;=3.7 - [\u0026lsquo;1.2.0\u0026rsquo;]; sw_happybase   http_server Python \u0026gt;=3.7 - ['*']; sw_http_server   werkzeug Python \u0026gt;=3.7 - [\u0026lsquo;1.0.1\u0026rsquo;, \u0026lsquo;2.0\u0026rsquo;]; sw_http_server   httpx Python \u0026gt;=3.7 - [\u0026lsquo;0.23.\u0026rsquo;, \u0026lsquo;0.22.']; sw_httpx   kafka-python Python \u0026gt;=3.7 - [\u0026lsquo;2.0\u0026rsquo;]; sw_kafka   loguru Python \u0026gt;=3.7 - [\u0026lsquo;0.6.0\u0026rsquo;, \u0026lsquo;0.7.0\u0026rsquo;]; sw_loguru   mysqlclient Python \u0026gt;=3.7 - [\u0026lsquo;2.1.*']; sw_mysqlclient   neo4j Python \u0026gt;=3.7 - [\u0026lsquo;5.*']; sw_neo4j   psycopg[binary] Python \u0026gt;=3.11 - [\u0026lsquo;3.1.']; Python \u0026gt;=3.7 - [\u0026lsquo;3.0.18\u0026rsquo;, \u0026lsquo;3.1.']; sw_psycopg   psycopg2-binary Python \u0026gt;=3.10 - NOT SUPPORTED YET; Python \u0026gt;=3.7 - [\u0026lsquo;2.9\u0026rsquo;]; sw_psycopg2   pymongo Python \u0026gt;=3.7 - [\u0026lsquo;3.11.*']; sw_pymongo   pymysql Python \u0026gt;=3.7 - [\u0026lsquo;1.0\u0026rsquo;]; sw_pymysql   pyramid Python \u0026gt;=3.7 - [\u0026lsquo;1.10\u0026rsquo;, \u0026lsquo;2.0\u0026rsquo;]; sw_pyramid   pika Python \u0026gt;=3.7 - [\u0026lsquo;1.2\u0026rsquo;]; sw_rabbitmq   redis Python \u0026gt;=3.7 - [\u0026lsquo;3.5.*\u0026rsquo;, \u0026lsquo;4.5.1\u0026rsquo;]; sw_redis   requests Python \u0026gt;=3.7 - [\u0026lsquo;2.26\u0026rsquo;, \u0026lsquo;2.25\u0026rsquo;]; sw_requests   sanic Python \u0026gt;=3.10 - NOT SUPPORTED YET; Python \u0026gt;=3.7 - [\u0026lsquo;20.12\u0026rsquo;]; sw_sanic   tornado Python \u0026gt;=3.7 - [\u0026lsquo;6.0\u0026rsquo;, \u0026lsquo;6.1\u0026rsquo;]; sw_tornado   urllib3 Python \u0026gt;=3.7 - [\u0026lsquo;1.26\u0026rsquo;, \u0026lsquo;1.25\u0026rsquo;]; sw_urllib3   urllib_request Python \u0026gt;=3.7 - ['*']; sw_urllib_request   websockets Python \u0026gt;=3.7 - [\u0026lsquo;10.3\u0026rsquo;, \u0026lsquo;10.4\u0026rsquo;]; sw_websockets    Notes  The celery server running with \u0026ldquo;celery -A \u0026hellip;\u0026rdquo; should be run with the HTTP protocol as it uses multiprocessing by default which is not compatible with the gRPC protocol implementation in SkyWalking currently. Celery clients can use whatever protocol they want. While Falcon is instrumented, only Hug is tested. Hug is believed to be abandoned project, use this plugin with a bit more caution. Instead of Hug, plugin test should move to test actual Falcon. The Neo4j plugin integrates neo4j python driver 5.x.x versions which support both Neo4j 5 and 4.4 DBMS. The websocket instrumentation only traces client side connection handshake, the actual message exchange (send/recv) is not traced since injecting headers to socket message body is the only way to propagate the trace context, which requires customization of message structure and extreme care. (Feel free to add this feature by instrumenting the send/recv methods commented out in the code by either injecting sw8 headers or propagate the trace context in a separate message)  ","title":"Supported Libraries","url":"/docs/skywalking-python/next/en/setup/plugins/"},{"content":"Supported Libraries This document is automatically generated from the SkyWalking Python testing matrix.\nThe column of versions only indicates the set of library versions tested in a best-effort manner.\nIf you find newer major versions that are missing from the following table, and it\u0026rsquo;s not documented as a limitation, please PR to update the test matrix in the plugin.\nVersions marked as NOT SUPPORTED may be due to an incompatible version with Python in the original library or a limitation of SkyWalking auto-instrumentation (welcome to contribute!)\nPlugin Support Table    Library Python Version - Lib Version Plugin Name     aiohttp Python \u0026gt;=3.7 - [\u0026lsquo;3.7.*']; sw_aiohttp   aioredis Python \u0026gt;=3.7 - [\u0026lsquo;2.0.*']; sw_aioredis   aiormq Python \u0026gt;=3.7 - [\u0026lsquo;6.3\u0026rsquo;, \u0026lsquo;6.4\u0026rsquo;]; sw_aiormq   amqp Python \u0026gt;=3.7 - [\u0026lsquo;2.6.1\u0026rsquo;]; sw_amqp   asyncpg Python \u0026gt;=3.7 - [\u0026lsquo;0.25.0\u0026rsquo;]; sw_asyncpg   bottle Python \u0026gt;=3.7 - [\u0026lsquo;0.12.23\u0026rsquo;]; sw_bottle   celery Python \u0026gt;=3.7 - [\u0026lsquo;5.1\u0026rsquo;]; sw_celery   confluent_kafka Python \u0026gt;=3.7 - [\u0026lsquo;1.5.0\u0026rsquo;, \u0026lsquo;1.7.0\u0026rsquo;, \u0026lsquo;1.8.2\u0026rsquo;]; sw_confluent_kafka   django Python \u0026gt;=3.7 - [\u0026lsquo;3.2\u0026rsquo;]; sw_django   elasticsearch Python \u0026gt;=3.7 - [\u0026lsquo;7.13\u0026rsquo;, \u0026lsquo;7.14\u0026rsquo;, \u0026lsquo;7.15\u0026rsquo;]; sw_elasticsearch   hug Python \u0026gt;=3.11 - NOT SUPPORTED YET; Python \u0026gt;=3.10 - [\u0026lsquo;2.5\u0026rsquo;, \u0026lsquo;2.6\u0026rsquo;]; Python \u0026gt;=3.7 - [\u0026lsquo;2.4.1\u0026rsquo;, \u0026lsquo;2.5\u0026rsquo;, \u0026lsquo;2.6\u0026rsquo;]; sw_falcon   fastapi Python \u0026gt;=3.7 - [\u0026lsquo;0.89.\u0026rsquo;, \u0026lsquo;0.88.']; sw_fastapi   flask Python \u0026gt;=3.7 - [\u0026lsquo;2.0\u0026rsquo;]; sw_flask   happybase Python \u0026gt;=3.7 - [\u0026lsquo;1.2.0\u0026rsquo;]; sw_happybase   http_server Python \u0026gt;=3.7 - ['*']; sw_http_server   werkzeug Python \u0026gt;=3.7 - [\u0026lsquo;1.0.1\u0026rsquo;, \u0026lsquo;2.0\u0026rsquo;]; sw_http_server   httpx Python \u0026gt;=3.7 - [\u0026lsquo;0.23.\u0026rsquo;, \u0026lsquo;0.22.']; sw_httpx   kafka-python Python \u0026gt;=3.7 - [\u0026lsquo;2.0\u0026rsquo;]; sw_kafka   loguru Python \u0026gt;=3.7 - [\u0026lsquo;0.6.0\u0026rsquo;, \u0026lsquo;0.7.0\u0026rsquo;]; sw_loguru   mysqlclient Python \u0026gt;=3.7 - [\u0026lsquo;2.1.*']; sw_mysqlclient   psycopg[binary] Python \u0026gt;=3.11 - [\u0026lsquo;3.1.']; Python \u0026gt;=3.7 - [\u0026lsquo;3.0.18\u0026rsquo;, \u0026lsquo;3.1.']; sw_psycopg   psycopg2-binary Python \u0026gt;=3.10 - NOT SUPPORTED YET; Python \u0026gt;=3.7 - [\u0026lsquo;2.9\u0026rsquo;]; sw_psycopg2   pymongo Python \u0026gt;=3.7 - [\u0026lsquo;3.11.*']; sw_pymongo   pymysql Python \u0026gt;=3.7 - [\u0026lsquo;1.0\u0026rsquo;]; sw_pymysql   pyramid Python \u0026gt;=3.7 - [\u0026lsquo;1.10\u0026rsquo;, \u0026lsquo;2.0\u0026rsquo;]; sw_pyramid   pika Python \u0026gt;=3.7 - [\u0026lsquo;1.2\u0026rsquo;]; sw_rabbitmq   redis Python \u0026gt;=3.7 - [\u0026lsquo;3.5.*\u0026rsquo;, \u0026lsquo;4.5.1\u0026rsquo;]; sw_redis   requests Python \u0026gt;=3.7 - [\u0026lsquo;2.26\u0026rsquo;, \u0026lsquo;2.25\u0026rsquo;]; sw_requests   sanic Python \u0026gt;=3.10 - NOT SUPPORTED YET; Python \u0026gt;=3.7 - [\u0026lsquo;20.12\u0026rsquo;]; sw_sanic   tornado Python \u0026gt;=3.7 - [\u0026lsquo;6.0\u0026rsquo;, \u0026lsquo;6.1\u0026rsquo;]; sw_tornado   urllib3 Python \u0026gt;=3.7 - [\u0026lsquo;1.26\u0026rsquo;, \u0026lsquo;1.25\u0026rsquo;]; sw_urllib3   urllib_request Python \u0026gt;=3.7 - ['*']; sw_urllib_request   websockets Python \u0026gt;=3.7 - [\u0026lsquo;10.3\u0026rsquo;, \u0026lsquo;10.4\u0026rsquo;]; sw_websockets    Notes  The celery server running with \u0026ldquo;celery -A \u0026hellip;\u0026rdquo; should be run with the HTTP protocol as it uses multiprocessing by default which is not compatible with the gRPC protocol implementation in SkyWalking currently. Celery clients can use whatever protocol they want. While Falcon is instrumented, only Hug is tested. Hug is believed to be abandoned project, use this plugin with a bit more caution. Instead of Hug, plugin test should move to test actual Falcon. The websocket instrumentation only traces client side connection handshake, the actual message exchange (send/recv) is not traced since injecting headers to socket message body is the only way to propagate the trace context, which requires customization of message structure and extreme care. (Feel free to add this feature by instrumenting the send/recv methods commented out in the code by either injecting sw8 headers or propagate the trace context in a separate message)  ","title":"Supported Libraries","url":"/docs/skywalking-python/v1.0.1/en/setup/plugins/"},{"content":"Supported SAPI, extension and library The following plugins provide the distributed tracing capability.\nSupported SAPI  PHP-FPM CLI under Swoole  Supported PHP extension  cURL PDO MySQL Improved Memcached phpredis MongoDB Memcache  Supported PHP library  predis php-amqplib for Message Queuing Producer  ","title":"Supported SAPI, extension and library","url":"/docs/skywalking-php/latest/en/setup/service-agent/php-agent/supported-list/"},{"content":"Supported SAPI, extension and library The following plugins provide the distributed tracing capability.\nSupported SAPI  PHP-FPM CLI under Swoole  Supported PHP extension  cURL PDO MySQL Improved Memcached phpredis MongoDB Memcache  Supported PHP library  predis php-amqplib for Message Queuing Producer  ","title":"Supported SAPI, extension and library","url":"/docs/skywalking-php/next/en/setup/service-agent/php-agent/supported-list/"},{"content":"Supported SAPI, extension and library The following plugins provide the distributed tracing capability.\nSupported SAPI  PHP-FPM CLI under Swoole  Supported PHP extension  cURL PDO MySQL Improved Memcached phpredis MongoDB Memcache  Supported PHP library  predis php-amqplib for Message Queuing Producer  ","title":"Supported SAPI, extension and library","url":"/docs/skywalking-php/v0.7.0/en/setup/service-agent/php-agent/supported-list/"},{"content":"SWIP - SkyWalking Improvement Proposal SWIP - SkyWalking Improvement Proposal, is an official document to propose a new feature and/or feature improvement, which are relative to end users and developers.\nSkyWalking has been very stable since v9.x. We are getting over the rapid changing stage. The core concepts, protocols for reporting telemetry and query, 3rd party integration, and the streaming process kernel are very stable. From now(2024) on, SkyWalking community would focus more on improvement and controllable improvement. All major changes should be evaluated more seriously, and try as good as possible to avoid incompatible breaking changes.\nWhat is considered a major change? The catalogs of a major change are listed as follows\n New Feature. A feature doesn\u0026rsquo;t exist for the latest version. Any change of the network Interfaces, especially for Query Protocol, Data Collect Protocols, Dynamic Configuration APIs, Exporting APIs, AI pipeline APIs. Any change of storage structure.  Q: Is Agent side feature or change considered a SWIP?\nA: Right now, SWIP targets OAP and UI side changes. All agent side changes are pending on the reviews from the committers of those agents.\nSWIP Template The purpose of this template should not be considered a hard requirement. The major purpose of SWIP is helping the PMC and community member to understand the proposal better.\n# Title: SWIP-1234 xxxx  ## Motivation The description of new feature or improvement. ## Architecture Graph Describe the relationship between your new proposal part and existing components. ## Proposed Changes State your proposal in detail. ## Imported Dependencies libs and their licenses.  ## Compatibility Whether breaking configuration, storage structure, or protocols. ## General usage docs This doesn\u0026#39;t have to be a final version, but helps the reviewers to understand how to use this new feature. SWIP Process Here is the process for starting a SWIP.\n Start a SWIP discussion at GitHub Discussion Page with title [DISCUSS] xxxx. Fill in the sections as described above in SWIP Template. At least one SkyWalking committer commented on the discussion to show interest in adopting it. This committer could update this page to grant a SWIP ID, and update the title to [SWIP-ID NO.] [DISCUSS] xxxx. All further discussion could happen on the discussion page. Once the consensus is made by enough committer supporters, and/or through a mail list vote, this SWIP should be added here as SWIP-ID NO.md and listed in the below as Known SWIPs.  All accepted and proposed SWIPs can be found in here.\nKnown SWIPs Next SWIP Number: 8\nAccepted SWIPs  SWIP-8 Support ActiveMQ Monitoring SWIP-5 Support ClickHouse Monitoring SWIP-4 Support available layers of service in the topology SWIP-3 Support RocketMQ Monitoring SWIP-2 Collecting and Gathering Kubernetes Monitoring Data SWIP-1 Create and detect Service Hierarchy Relationship  ","title":"SWIP - SkyWalking Improvement Proposal","url":"/docs/main/next/en/swip/readme/"},{"content":"Table of Agent Configuration Properties This is the properties list supported in agent/config/agent.config.\n   property key Description System Environment Variable Default     agent.service_name The service name to represent a logic group providing the same capabilities/logic. Suggestion: set a unique name for every logic service group, service instance nodes share the same code, Max length is 50(UTF-8 char). Optional, once service_name follows \u0026lt;group name\u0026gt;::\u0026lt;logic name\u0026gt; format, OAP server assigns the group name to the service metadata. SW_AGENT_NAME Your_ApplicationName   agent.namespace Namespace represents a subnet, such as kubernetes namespace, or 172.10.. SW_AGENT_NAMESPACE Not set   agent.cluster Cluster defines the physical cluster in a data center or same network segment. SW_AGENT_CLUSTER Not set   agent.sample_n_per_3_secs Negative or zero means off, by default.SAMPLE_N_PER_3_SECS means sampling N TraceSegment in 3 seconds tops. SW_AGENT_SAMPLE Not set   agent.authentication Authentication active is based on backend setting, see application.yml for more details.For most scenarios, this needs backend extensions, only basic match auth provided in default implementation. SW_AGENT_AUTHENTICATION Not set   agent.trace_segment_ref_limit_per_span The max number of TraceSegmentRef in a single span to keep memory cost estimatable. SW_TRACE_SEGMENT_LIMIT 500   agent.span_limit_per_segment The max number of spans in a single segment. Through this config item, SkyWalking keep your application memory cost estimated. SW_AGENT_SPAN_LIMIT 300   agent.ignore_suffix If the operation name of the first span is included in this set, this segment should be ignored. SW_AGENT_IGNORE_SUFFIX Not set   agent.is_open_debugging_class If true, skywalking agent will save all instrumented classes files in /debugging folder. SkyWalking team may ask for these files in order to resolve compatible problem. SW_AGENT_OPEN_DEBUG Not set   agent.instance_name Instance name is the identity of an instance, should be unique in the service. If empty, SkyWalking agent will generate an 32-bit uuid. Default, use UUID@hostname as the instance name. Max length is 50(UTF-8 char) SW_AGENT_INSTANCE_NAME \u0026quot;\u0026quot;   agent.instance_properties_json={\u0026quot;key\u0026quot;:\u0026quot;value\u0026quot;} Add service instance custom properties in json format. SW_INSTANCE_PROPERTIES_JSON Not set   agent.cause_exception_depth How depth the agent goes, when log all cause exceptions. SW_AGENT_CAUSE_EXCEPTION_DEPTH 5   agent.force_reconnection_period  Force reconnection period of grpc, based on grpc_channel_check_interval. SW_AGENT_FORCE_RECONNECTION_PERIOD 1   agent.operation_name_threshold  The operationName max length, setting this value \u0026gt; 190 is not recommended. SW_AGENT_OPERATION_NAME_THRESHOLD 150   agent.keep_tracing Keep tracing even the backend is not available if this value is true. SW_AGENT_KEEP_TRACING false   agent.force_tls Force open TLS for gRPC channel if this value is true. SW_AGENT_FORCE_TLS false   agent.ssl_trusted_ca_path gRPC SSL trusted ca file. SW_AGENT_SSL_TRUSTED_CA_PATH /ca/ca.crt   agent.ssl_key_path The private key file. Enable mTLS when ssl_key_path and ssl_cert_chain_path exist. SW_AGENT_SSL_KEY_PATH \u0026quot;\u0026quot;   agent.ssl_cert_chain_path The certificate file. Enable mTLS when ssl_key_path and ssl_cert_chain_path exist. SW_AGENT_SSL_CERT_CHAIN_PATH \u0026quot;\u0026quot;   agent.enable Enable the agent kernel services and instrumentation. SW_AGENT_ENABLE true   osinfo.ipv4_list_size Limit the length of the ipv4 list size. SW_AGENT_OSINFO_IPV4_LIST_SIZE 10   collector.grpc_channel_check_interval grpc channel status check interval. SW_AGENT_COLLECTOR_GRPC_CHANNEL_CHECK_INTERVAL 30   collector.heartbeat_period agent heartbeat report period. Unit, second. SW_AGENT_COLLECTOR_HEARTBEAT_PERIOD 30   collector.properties_report_period_factor The agent sends the instance properties to the backend every collector.heartbeat_period * collector.properties_report_period_factor seconds SW_AGENT_COLLECTOR_PROPERTIES_REPORT_PERIOD_FACTOR 10   collector.backend_service Collector SkyWalking trace receiver service addresses. SW_AGENT_COLLECTOR_BACKEND_SERVICES 127.0.0.1:11800   collector.grpc_upstream_timeout How long grpc client will timeout in sending data to upstream. Unit is second. SW_AGENT_COLLECTOR_GRPC_UPSTREAM_TIMEOUT 30 seconds   collector.get_profile_task_interval Sniffer get profile task list interval. SW_AGENT_COLLECTOR_GET_PROFILE_TASK_INTERVAL 20   collector.get_agent_dynamic_config_interval Sniffer get agent dynamic config interval SW_AGENT_COLLECTOR_GET_AGENT_DYNAMIC_CONFIG_INTERVAL 20   collector.is_resolve_dns_periodically If true, skywalking agent will enable periodically resolving DNS to update receiver service addresses. SW_AGENT_COLLECTOR_IS_RESOLVE_DNS_PERIODICALLY false   logging.level Log level: TRACE, DEBUG, INFO, WARN, ERROR, OFF. Default is info. SW_LOGGING_LEVEL INFO   logging.file_name Log file name. SW_LOGGING_FILE_NAME skywalking-api.log   logging.output Log output. Default is FILE. Use CONSOLE means output to stdout. SW_LOGGING_OUTPUT FILE   logging.dir Log files directory. Default is blank string, means, use \u0026ldquo;{theSkywalkingAgentJarDir}/logs \u0026quot; to output logs. {theSkywalkingAgentJarDir} is the directory where the skywalking agent jar file is located SW_LOGGING_DIR \u0026quot;\u0026quot;   logging.resolver Logger resolver: PATTERN or JSON. The default is PATTERN, which uses logging.pattern to print traditional text logs. JSON resolver prints logs in JSON format. SW_LOGGING_RESOLVER PATTERN   logging.pattern  Logging format. There are all conversion specifiers: * %level means log level. * %timestamp means now of time with format yyyy-MM-dd HH:mm:ss:SSS.\n* %thread means name of current thread.\n* %msg means some message which user logged. * %class means SimpleName of TargetClass. * %throwable means a throwable which user called. * %agent_name means agent.service_name. Only apply to the PatternLogger. SW_LOGGING_PATTERN %level %timestamp %thread %class : %msg %throwable   logging.max_file_size The max size of log file. If the size is bigger than this, archive the current file, and write into a new file. SW_LOGGING_MAX_FILE_SIZE 300 * 1024 * 1024   logging.max_history_files The max history log files. When rollover happened, if log files exceed this number,then the oldest file will be delete. Negative or zero means off, by default. SW_LOGGING_MAX_HISTORY_FILES -1   statuscheck.ignored_exceptions Listed exceptions would not be treated as an error. Because in some codes, the exception is being used as a way of controlling business flow. SW_STATUSCHECK_IGNORED_EXCEPTIONS \u0026quot;\u0026quot;   statuscheck.max_recursive_depth The max recursive depth when checking the exception traced by the agent. Typically, we don\u0026rsquo;t recommend setting this more than 10, which could cause a performance issue. Negative value and 0 would be ignored, which means all exceptions would make the span tagged in error status. SW_STATUSCHECK_MAX_RECURSIVE_DEPTH 1   correlation.element_max_number Max element count in the correlation context. SW_CORRELATION_ELEMENT_MAX_NUMBER 3   correlation.value_max_length Max value length of each element. SW_CORRELATION_VALUE_MAX_LENGTH 128   correlation.auto_tag_keys Tag the span by the key/value in the correlation context, when the keys listed here exist. SW_CORRELATION_AUTO_TAG_KEYS \u0026quot;\u0026quot;   jvm.buffer_size The buffer size of collected JVM info. SW_JVM_BUFFER_SIZE 60 * 10   jvm.metrics_collect_period The period in seconds of JVM metrics collection. Unit is second. SW_JVM_METRICS_COLLECT_PERIOD 1   buffer.channel_size The buffer channel size. SW_BUFFER_CHANNEL_SIZE 5   buffer.buffer_size The buffer size. SW_BUFFER_BUFFER_SIZE 300   profile.active If true, skywalking agent will enable profile when user create a new profile task. Otherwise disable profile. SW_AGENT_PROFILE_ACTIVE true   profile.max_parallel Parallel monitor segment count SW_AGENT_PROFILE_MAX_PARALLEL 5   profile.max_accept_sub_parallel Max monitoring sub-tasks count of one single endpoint access SW_AGENT_PROFILE_MAX_ACCEPT_SUB_PARALLEL 5   profile.duration Max monitor segment time(minutes), if current segment monitor time out of limit, then stop it. SW_AGENT_PROFILE_DURATION 10   profile.dump_max_stack_depth Max dump thread stack depth SW_AGENT_PROFILE_DUMP_MAX_STACK_DEPTH 500   profile.snapshot_transport_buffer_size Snapshot transport to backend buffer size SW_AGENT_PROFILE_SNAPSHOT_TRANSPORT_BUFFER_SIZE 4500   meter.active If true, the agent collects and reports metrics to the backend. SW_METER_ACTIVE true   meter.report_interval Report meters interval. The unit is second SW_METER_REPORT_INTERVAL 20   meter.max_meter_size Max size of the meter pool SW_METER_MAX_METER_SIZE 500   log.max_message_size The max size of message to send to server.Default is 10 MB. SW_GRPC_LOG_MAX_MESSAGE_SIZE 10485760   plugin.mount Mount the specific folders of the plugins. Plugins in mounted folders would work. SW_MOUNT_FOLDERS plugins,activations   plugin.peer_max_length  Peer maximum description limit. SW_PLUGIN_PEER_MAX_LENGTH 200   plugin.exclude_plugins  Exclude some plugins define in plugins dir,Multiple plugins are separated by comma.Plugin names is defined in Agent plugin list SW_EXCLUDE_PLUGINS \u0026quot;\u0026quot;   plugin.mongodb.trace_param If true, trace all the parameters in MongoDB access, default is false. Only trace the operation, not include parameters. SW_PLUGIN_MONGODB_TRACE_PARAM false   plugin.mongodb.filter_length_limit If set to positive number, the WriteRequest.params would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_MONGODB_FILTER_LENGTH_LIMIT 256   plugin.elasticsearch.trace_dsl If true, trace all the DSL(Domain Specific Language) in ElasticSearch access, default is false. SW_PLUGIN_ELASTICSEARCH_TRACE_DSL false   plugin.springmvc.use_qualified_name_as_endpoint_name If true, the fully qualified method name will be used as the endpoint name instead of the request URL, default is false. SW_PLUGIN_SPRINGMVC_USE_QUALIFIED_NAME_AS_ENDPOINT_NAME false   plugin.toolkit.use_qualified_name_as_operation_name If true, the fully qualified method name will be used as the operation name instead of the given operation name, default is false. SW_PLUGIN_TOOLKIT_USE_QUALIFIED_NAME_AS_OPERATION_NAME false   plugin.jdbc.trace_sql_parameters If set to true, the parameters of the sql (typically java.sql.PreparedStatement) would be collected. SW_JDBC_TRACE_SQL_PARAMETERS false   plugin.jdbc.sql_parameters_max_length If set to positive number, the db.sql.parameters would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_JDBC_SQL_PARAMETERS_MAX_LENGTH 512   plugin.jdbc.sql_body_max_length If set to positive number, the db.statement would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_JDBC_SQL_BODY_MAX_LENGTH 2048   plugin.solrj.trace_statement If true, trace all the query parameters(include deleteByIds and deleteByQuery) in Solr query request, default is false. SW_PLUGIN_SOLRJ_TRACE_STATEMENT false   plugin.solrj.trace_ops_params If true, trace all the operation parameters in Solr request, default is false. SW_PLUGIN_SOLRJ_TRACE_OPS_PARAMS false   plugin.light4j.trace_handler_chain If true, trace all middleware/business handlers that are part of the Light4J handler chain for a request. SW_PLUGIN_LIGHT4J_TRACE_HANDLER_CHAIN false   plugin.springtransaction.simplify_transaction_definition_name If true, the transaction definition name will be simplified. SW_PLUGIN_SPRINGTRANSACTION_SIMPLIFY_TRANSACTION_DEFINITION_NAME false   plugin.jdkthreading.threading_class_prefixes Threading classes (java.lang.Runnable and java.util.concurrent.Callable) and their subclasses, including anonymous inner classes whose name match any one of the THREADING_CLASS_PREFIXES (splitted by ,) will be instrumented, make sure to only specify as narrow prefixes as what you\u0026rsquo;re expecting to instrument, (java. and javax. will be ignored due to safety issues) SW_PLUGIN_JDKTHREADING_THREADING_CLASS_PREFIXES Not set   plugin.tomcat.collect_http_params This config item controls that whether the Tomcat plugin should collect the parameters of the request. Also, activate implicitly in the profiled trace. SW_PLUGIN_TOMCAT_COLLECT_HTTP_PARAMS false   plugin.springmvc.collect_http_params This config item controls that whether the SpringMVC plugin should collect the parameters of the request, when your Spring application is based on Tomcat, consider only setting either plugin.tomcat.collect_http_params or plugin.springmvc.collect_http_params. Also, activate implicitly in the profiled trace. SW_PLUGIN_SPRINGMVC_COLLECT_HTTP_PARAMS false   plugin.httpclient.collect_http_params This config item controls that whether the HttpClient plugin should collect the parameters of the request SW_PLUGIN_HTTPCLIENT_COLLECT_HTTP_PARAMS false   plugin.http.http_params_length_threshold When COLLECT_HTTP_PARAMS is enabled, how many characters to keep and send to the OAP backend, use negative values to keep and send the complete parameters, NB. this config item is added for the sake of performance. SW_PLUGIN_HTTP_HTTP_PARAMS_LENGTH_THRESHOLD 1024   plugin.http.http_headers_length_threshold When include_http_headers declares header names, this threshold controls the length limitation of all header values. use negative values to keep and send the complete headers. Note. this config item is added for the sake of performance. SW_PLUGIN_HTTP_HTTP_HEADERS_LENGTH_THRESHOLD 2048   plugin.http.include_http_headers Set the header names, which should be collected by the plugin. Header name must follow javax.servlet.http definition. Multiple names should be split by comma. SW_PLUGIN_HTTP_INCLUDE_HTTP_HEADERS ``(No header would be collected) |   plugin.feign.collect_request_body This config item controls that whether the Feign plugin should collect the http body of the request. SW_PLUGIN_FEIGN_COLLECT_REQUEST_BODY false   plugin.feign.filter_length_limit When COLLECT_REQUEST_BODY is enabled, how many characters to keep and send to the OAP backend, use negative values to keep and send the complete body. SW_PLUGIN_FEIGN_FILTER_LENGTH_LIMIT 1024   plugin.feign.supported_content_types_prefix When COLLECT_REQUEST_BODY is enabled and content-type start with SUPPORTED_CONTENT_TYPES_PREFIX, collect the body of the request , multiple paths should be separated by , SW_PLUGIN_FEIGN_SUPPORTED_CONTENT_TYPES_PREFIX application/json,text/   plugin.influxdb.trace_influxql If true, trace all the influxql(query and write) in InfluxDB access, default is true. SW_PLUGIN_INFLUXDB_TRACE_INFLUXQL true   plugin.dubbo.collect_consumer_arguments Apache Dubbo consumer collect arguments in RPC call, use Object#toString to collect arguments. SW_PLUGIN_DUBBO_COLLECT_CONSUMER_ARGUMENTS false   plugin.dubbo.consumer_arguments_length_threshold When plugin.dubbo.collect_consumer_arguments is true, Arguments of length from the front will to the OAP backend SW_PLUGIN_DUBBO_CONSUMER_ARGUMENTS_LENGTH_THRESHOLD 256   plugin.dubbo.collect_provider_arguments Apache Dubbo provider collect arguments in RPC call, use Object#toString to collect arguments. SW_PLUGIN_DUBBO_COLLECT_PROVIDER_ARGUMENTS false   plugin.dubbo.provider_arguments_length_threshold When plugin.dubbo.collect_provider_arguments is true, Arguments of length from the front will to the OAP backend SW_PLUGIN_DUBBO_PROVIDER_ARGUMENTS_LENGTH_THRESHOLD 256   plugin.kafka.bootstrap_servers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_KAFKA_BOOTSTRAP_SERVERS localhost:9092   plugin.kafka.get_topic_timeout Timeout period of reading topics from the Kafka server, the unit is second. SW_GET_TOPIC_TIMEOUT 10   plugin.kafka.producer_config Kafka producer configuration. Read producer configure to get more details. Check Kafka report doc for more details and examples. SW_PLUGIN_KAFKA_PRODUCER_CONFIG    plugin.kafka.producer_config_json Configure Kafka Producer configuration in JSON format. Notice it will be overridden by plugin.kafka.producer_config[key], if the key duplication. SW_PLUGIN_KAFKA_PRODUCER_CONFIG_JSON    plugin.kafka.topic_meter Specify which Kafka topic name for Meter System data to report to. SW_PLUGIN_KAFKA_TOPIC_METER skywalking-meters   plugin.kafka.topic_metrics Specify which Kafka topic name for JVM metrics data to report to. SW_PLUGIN_KAFKA_TOPIC_METRICS skywalking-metrics   plugin.kafka.topic_segment Specify which Kafka topic name for traces data to report to. SW_PLUGIN_KAFKA_TOPIC_SEGMENT skywalking-segments   plugin.kafka.topic_profiling Specify which Kafka topic name for Thread Profiling snapshot to report to. SW_PLUGIN_KAFKA_TOPIC_PROFILINGS skywalking-profilings   plugin.kafka.topic_management Specify which Kafka topic name for the register or heartbeat data of Service Instance to report to. SW_PLUGIN_KAFKA_TOPIC_MANAGEMENT skywalking-managements   plugin.kafka.topic_logging Specify which Kafka topic name for the logging data to report to. SW_PLUGIN_KAFKA_TOPIC_LOGGING skywalking-logging   plugin.kafka.namespace isolate multi OAP server when using same Kafka cluster (final topic name will append namespace before Kafka topics with - ). SW_KAFKA_NAMESPACE `` |   plugin.kafka.decode_class Specify which class to decode encoded configuration of kafka.You can set encoded information in plugin.kafka.producer_config_json or plugin.kafka.producer_config if you need. SW_KAFKA_DECODE_CLASS `` |   plugin.springannotation.classname_match_regex Match spring beans with regular expression for the class name. Multiple expressions could be separated by a comma. This only works when Spring annotation plugin has been activated. SW_SPRINGANNOTATION_CLASSNAME_MATCH_REGEX All the spring beans tagged with @Bean,@Service,@Dao, or @Repository.   plugin.toolkit.log.transmit_formatted Whether or not to transmit logged data as formatted or un-formatted. SW_PLUGIN_TOOLKIT_LOG_TRANSMIT_FORMATTED true   plugin.lettuce.trace_redis_parameters If set to true, the parameters of Redis commands would be collected by Lettuce agent. SW_PLUGIN_LETTUCE_TRACE_REDIS_PARAMETERS false   plugin.lettuce.redis_parameter_max_length If set to positive number and plugin.lettuce.trace_redis_parameters is set to true, Redis command parameters would be collected and truncated to this length. SW_PLUGIN_LETTUCE_REDIS_PARAMETER_MAX_LENGTH 128   plugin.lettuce.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_LETTUCE_OPERATION_MAPPING_WRITE    plugin.lettuce.operation_mapping_read  Specify which command should be converted to read operation SW_PLUGIN_LETTUCE_OPERATION_MAPPING_READ Referenc Lettuce-5.x-plugin   plugin.jedis.trace_redis_parameters If set to true, the parameters of Redis commands would be collected by Jedis agent. SW_PLUGIN_JEDIS_TRACE_REDIS_PARAMETERS false   plugin.jedis.redis_parameter_max_length If set to positive number and plugin.jedis.trace_redis_parameters is set to true, Redis command parameters would be collected and truncated to this length. SW_PLUGIN_JEDIS_REDIS_PARAMETER_MAX_LENGTH 128   plugin.jedis.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_JEDIS_OPERATION_MAPPING_WRITE    plugin.jedis.operation_mapping_read  Specify which command should be converted to read operation SW_PLUGIN_JEDIS_OPERATION_MAPPING_READ Referenc Jedis-4.x-plugin jedis-2.x-3.x-plugin   plugin.redisson.trace_redis_parameters If set to true, the parameters of Redis commands would be collected by Redisson agent. SW_PLUGIN_REDISSON_TRACE_REDIS_PARAMETERS false   plugin.redisson.redis_parameter_max_length If set to positive number and plugin.redisson.trace_redis_parameters is set to true, Redis command parameters would be collected and truncated to this length. SW_PLUGIN_REDISSON_REDIS_PARAMETER_MAX_LENGTH 128   plugin.redisson.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_REDISSON_OPERATION_MAPPING_WRITE    plugin.redisson.operation_mapping_read  Specify which command should be converted to read operation SW_PLUGIN_REDISSON_OPERATION_MAPPING_READ Referenc Redisson-3.x-plugin   plugin.neo4j.trace_cypher_parameters If set to true, the parameters of the cypher would be collected. SW_PLUGIN_NEO4J_TRACE_CYPHER_PARAMETERS false   plugin.neo4j.cypher_parameters_max_length If set to positive number, the db.cypher.parameters would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_NEO4J_CYPHER_PARAMETERS_MAX_LENGTH 512   plugin.neo4j.cypher_body_max_length If set to positive number, the db.statement would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_NEO4J_CYPHER_BODY_MAX_LENGTH 2048   plugin.cpupolicy.sample_cpu_usage_percent_limit If set to a positive number and activate trace sampler CPU policy plugin, the trace would not be collected when agent process CPU usage percent is greater than plugin.cpupolicy.sample_cpu_usage_percent_limit. SW_SAMPLE_CPU_USAGE_PERCENT_LIMIT -1   plugin.micronauthttpclient.collect_http_params This config item controls that whether the Micronaut http client plugin should collect the parameters of the request. Also, activate implicitly in the profiled trace. SW_PLUGIN_MICRONAUTHTTPCLIENT_COLLECT_HTTP_PARAMS false   plugin.micronauthttpserver.collect_http_params This config item controls that whether the Micronaut http server plugin should collect the parameters of the request. Also, activate implicitly in the profiled trace. SW_PLUGIN_MICRONAUTHTTPSERVER_COLLECT_HTTP_PARAMS false   plugin.memcached.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_MEMCACHED_OPERATION_MAPPING_WRITE get,gets,getAndTouch,getKeys,getKeysWithExpiryCheck,getKeysNoDuplicateCheck   plugin.memcached.operation_mapping_read Specify which command should be converted to read operation SW_PLUGIN_MEMCACHED_OPERATION_MAPPING_READ set,add,replace,append,prepend,cas,delete,touch,incr,decr   plugin.ehcache.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_EHCACHE_OPERATION_MAPPING_WRITE get,getAll,getQuiet,getKeys,getKeysWithExpiryCheck,getKeysNoDuplicateCheck,releaseRead,tryRead,getWithLoader,getAll,loadAll,getAllWithLoader   plugin.ehcache.operation_mapping_read Specify which command should be converted to read operation SW_PLUGIN_EHCACHE_OPERATION_MAPPING_READ tryRemoveImmediately,remove,removeAndReturnElement,removeAll,removeQuiet,removeWithWriter,put,putAll,replace,removeQuiet,removeWithWriter,removeElement,removeAll,putWithWriter,putQuiet,putIfAbsent,putIfAbsent   plugin.guavacache.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_GUAVACACHE_OPERATION_MAPPING_WRITE getIfPresent,get,getAllPresent,size   plugin.guavacache.operation_mapping_read Specify which command should be converted to read operation SW_PLUGIN_GUAVACACHE_OPERATION_MAPPING_READ put,putAll,invalidate,invalidateAll,invalidateAll,cleanUp   plugin.nettyhttp.collect_request_body This config item controls that whether the Netty-http plugin should collect the http body of the request. SW_PLUGIN_NETTY_HTTP_COLLECT_REQUEST_BODY false   plugin.nettyhttp.filter_length_limit When COLLECT_REQUEST_BODY is enabled, how many characters to keep and send to the OAP backend, use negative values to keep and send the complete body. SW_PLUGIN_NETTY_HTTP_FILTER_LENGTH_LIMIT 1024   plugin.nettyhttp.supported_content_types_prefix When COLLECT_REQUEST_BODY is enabled and content-type start with HTTP_SUPPORTED_CONTENT_TYPES_PREFIX, collect the body of the request , multiple paths should be separated by , SW_PLUGIN_NETTY_HTTP_SUPPORTED_CONTENT_TYPES_PREFIX application/json,text/   plugin.rocketmqclient.collect_message_keys If set to true, the keys of messages would be collected by the plugin for RocketMQ Java client.     plugin.rocketmqclient.collect_message_tags If set to true, the tags of messages would be collected by the plugin for RocketMQ Java client.            Reset Collection/Map type configurations as empty collection.  Collection type config, e.g. using  plugin.kafka.topics= to override default plugin.kafka.topics=a,b,c,d Map type config, e.g. using plugin.kafka.producer_config[]= to override default plugin.kafka.producer_config[key]=value  Dynamic Configurations All configurations above are static, if you need to change some agent settings at runtime, please read CDS - Configuration Discovery Service document for more details.\n","title":"Table of Agent Configuration Properties","url":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/configurations/"},{"content":"Table of Agent Configuration Properties This is the properties list supported in agent/config/agent.config.\n   property key Description System Environment Variable Default     agent.service_name The service name to represent a logic group providing the same capabilities/logic. Suggestion: set a unique name for every logic service group, service instance nodes share the same code, Max length is 50(UTF-8 char). Optional, once service_name follows \u0026lt;group name\u0026gt;::\u0026lt;logic name\u0026gt; format, OAP server assigns the group name to the service metadata. SW_AGENT_NAME Your_ApplicationName   agent.namespace Namespace represents a subnet, such as kubernetes namespace, or 172.10.. SW_AGENT_NAMESPACE Not set   agent.cluster Cluster defines the physical cluster in a data center or same network segment. SW_AGENT_CLUSTER Not set   agent.sample_n_per_3_secs Negative or zero means off, by default.SAMPLE_N_PER_3_SECS means sampling N TraceSegment in 3 seconds tops. SW_AGENT_SAMPLE Not set   agent.authentication Authentication active is based on backend setting, see application.yml for more details.For most scenarios, this needs backend extensions, only basic match auth provided in default implementation. SW_AGENT_AUTHENTICATION Not set   agent.trace_segment_ref_limit_per_span The max number of TraceSegmentRef in a single span to keep memory cost estimatable. SW_TRACE_SEGMENT_LIMIT 500   agent.span_limit_per_segment The max number of spans in a single segment. Through this config item, SkyWalking keep your application memory cost estimated. SW_AGENT_SPAN_LIMIT 300   agent.ignore_suffix If the operation name of the first span is included in this set, this segment should be ignored. SW_AGENT_IGNORE_SUFFIX Not set   agent.is_open_debugging_class If true, skywalking agent will save all instrumented classes files in /debugging folder. SkyWalking team may ask for these files in order to resolve compatible problem. SW_AGENT_OPEN_DEBUG Not set   agent.instance_name Instance name is the identity of an instance, should be unique in the service. If empty, SkyWalking agent will generate an 32-bit uuid. Default, use UUID@hostname as the instance name. Max length is 50(UTF-8 char) SW_AGENT_INSTANCE_NAME \u0026quot;\u0026quot;   agent.instance_properties_json={\u0026quot;key\u0026quot;:\u0026quot;value\u0026quot;} Add service instance custom properties in json format. SW_INSTANCE_PROPERTIES_JSON Not set   agent.cause_exception_depth How depth the agent goes, when log all cause exceptions. SW_AGENT_CAUSE_EXCEPTION_DEPTH 5   agent.force_reconnection_period  Force reconnection period of grpc, based on grpc_channel_check_interval. SW_AGENT_FORCE_RECONNECTION_PERIOD 1   agent.operation_name_threshold  The operationName max length, setting this value \u0026gt; 190 is not recommended. SW_AGENT_OPERATION_NAME_THRESHOLD 150   agent.keep_tracing Keep tracing even the backend is not available if this value is true. SW_AGENT_KEEP_TRACING false   agent.force_tls Force open TLS for gRPC channel if this value is true. SW_AGENT_FORCE_TLS false   agent.ssl_trusted_ca_path gRPC SSL trusted ca file. SW_AGENT_SSL_TRUSTED_CA_PATH /ca/ca.crt   agent.ssl_key_path The private key file. Enable mTLS when ssl_key_path and ssl_cert_chain_path exist. SW_AGENT_SSL_KEY_PATH \u0026quot;\u0026quot;   agent.ssl_cert_chain_path The certificate file. Enable mTLS when ssl_key_path and ssl_cert_chain_path exist. SW_AGENT_SSL_CERT_CHAIN_PATH \u0026quot;\u0026quot;   agent.enable Enable the agent kernel services and instrumentation. SW_AGENT_ENABLE true   osinfo.ipv4_list_size Limit the length of the ipv4 list size. SW_AGENT_OSINFO_IPV4_LIST_SIZE 10   collector.grpc_channel_check_interval grpc channel status check interval. SW_AGENT_COLLECTOR_GRPC_CHANNEL_CHECK_INTERVAL 30   collector.heartbeat_period agent heartbeat report period. Unit, second. SW_AGENT_COLLECTOR_HEARTBEAT_PERIOD 30   collector.properties_report_period_factor The agent sends the instance properties to the backend every collector.heartbeat_period * collector.properties_report_period_factor seconds SW_AGENT_COLLECTOR_PROPERTIES_REPORT_PERIOD_FACTOR 10   collector.backend_service Collector SkyWalking trace receiver service addresses. SW_AGENT_COLLECTOR_BACKEND_SERVICES 127.0.0.1:11800   collector.grpc_upstream_timeout How long grpc client will timeout in sending data to upstream. Unit is second. SW_AGENT_COLLECTOR_GRPC_UPSTREAM_TIMEOUT 30 seconds   collector.get_profile_task_interval Sniffer get profile task list interval. SW_AGENT_COLLECTOR_GET_PROFILE_TASK_INTERVAL 20   collector.get_agent_dynamic_config_interval Sniffer get agent dynamic config interval SW_AGENT_COLLECTOR_GET_AGENT_DYNAMIC_CONFIG_INTERVAL 20   collector.is_resolve_dns_periodically If true, skywalking agent will enable periodically resolving DNS to update receiver service addresses. SW_AGENT_COLLECTOR_IS_RESOLVE_DNS_PERIODICALLY false   logging.level Log level: TRACE, DEBUG, INFO, WARN, ERROR, OFF. Default is info. SW_LOGGING_LEVEL INFO   logging.file_name Log file name. SW_LOGGING_FILE_NAME skywalking-api.log   logging.output Log output. Default is FILE. Use CONSOLE means output to stdout. SW_LOGGING_OUTPUT FILE   logging.dir Log files directory. Default is blank string, means, use \u0026ldquo;{theSkywalkingAgentJarDir}/logs \u0026quot; to output logs. {theSkywalkingAgentJarDir} is the directory where the skywalking agent jar file is located SW_LOGGING_DIR \u0026quot;\u0026quot;   logging.resolver Logger resolver: PATTERN or JSON. The default is PATTERN, which uses logging.pattern to print traditional text logs. JSON resolver prints logs in JSON format. SW_LOGGING_RESOLVER PATTERN   logging.pattern  Logging format. There are all conversion specifiers: * %level means log level. * %timestamp means now of time with format yyyy-MM-dd HH:mm:ss:SSS.\n* %thread means name of current thread.\n* %msg means some message which user logged. * %class means SimpleName of TargetClass. * %throwable means a throwable which user called. * %agent_name means agent.service_name. Only apply to the PatternLogger. SW_LOGGING_PATTERN %level %timestamp %thread %class : %msg %throwable   logging.max_file_size The max size of log file. If the size is bigger than this, archive the current file, and write into a new file. SW_LOGGING_MAX_FILE_SIZE 300 * 1024 * 1024   logging.max_history_files The max history log files. When rollover happened, if log files exceed this number,then the oldest file will be delete. Negative or zero means off, by default. SW_LOGGING_MAX_HISTORY_FILES -1   statuscheck.ignored_exceptions Listed exceptions would not be treated as an error. Because in some codes, the exception is being used as a way of controlling business flow. SW_STATUSCHECK_IGNORED_EXCEPTIONS \u0026quot;\u0026quot;   statuscheck.max_recursive_depth The max recursive depth when checking the exception traced by the agent. Typically, we don\u0026rsquo;t recommend setting this more than 10, which could cause a performance issue. Negative value and 0 would be ignored, which means all exceptions would make the span tagged in error status. SW_STATUSCHECK_MAX_RECURSIVE_DEPTH 1   correlation.element_max_number Max element count in the correlation context. SW_CORRELATION_ELEMENT_MAX_NUMBER 3   correlation.value_max_length Max value length of each element. SW_CORRELATION_VALUE_MAX_LENGTH 128   correlation.auto_tag_keys Tag the span by the key/value in the correlation context, when the keys listed here exist. SW_CORRELATION_AUTO_TAG_KEYS \u0026quot;\u0026quot;   jvm.buffer_size The buffer size of collected JVM info. SW_JVM_BUFFER_SIZE 60 * 10   jvm.metrics_collect_period The period in seconds of JVM metrics collection. Unit is second. SW_JVM_METRICS_COLLECT_PERIOD 1   buffer.channel_size The buffer channel size. SW_BUFFER_CHANNEL_SIZE 5   buffer.buffer_size The buffer size. SW_BUFFER_BUFFER_SIZE 300   profile.active If true, skywalking agent will enable profile when user create a new profile task. Otherwise disable profile. SW_AGENT_PROFILE_ACTIVE true   profile.max_parallel Parallel monitor segment count SW_AGENT_PROFILE_MAX_PARALLEL 5   profile.max_accept_sub_parallel Max monitoring sub-tasks count of one single endpoint access SW_AGENT_PROFILE_MAX_ACCEPT_SUB_PARALLEL 5   profile.duration Max monitor segment time(minutes), if current segment monitor time out of limit, then stop it. SW_AGENT_PROFILE_DURATION 10   profile.dump_max_stack_depth Max dump thread stack depth SW_AGENT_PROFILE_DUMP_MAX_STACK_DEPTH 500   profile.snapshot_transport_buffer_size Snapshot transport to backend buffer size SW_AGENT_PROFILE_SNAPSHOT_TRANSPORT_BUFFER_SIZE 4500   meter.active If true, the agent collects and reports metrics to the backend. SW_METER_ACTIVE true   meter.report_interval Report meters interval. The unit is second SW_METER_REPORT_INTERVAL 20   meter.max_meter_size Max size of the meter pool SW_METER_MAX_METER_SIZE 500   log.max_message_size The max size of message to send to server.Default is 10 MB. SW_GRPC_LOG_MAX_MESSAGE_SIZE 10485760   plugin.mount Mount the specific folders of the plugins. Plugins in mounted folders would work. SW_MOUNT_FOLDERS plugins,activations   plugin.peer_max_length  Peer maximum description limit. SW_PLUGIN_PEER_MAX_LENGTH 200   plugin.exclude_plugins  Exclude some plugins define in plugins dir,Multiple plugins are separated by comma.Plugin names is defined in Agent plugin list SW_EXCLUDE_PLUGINS \u0026quot;\u0026quot;   plugin.mongodb.trace_param If true, trace all the parameters in MongoDB access, default is false. Only trace the operation, not include parameters. SW_PLUGIN_MONGODB_TRACE_PARAM false   plugin.mongodb.filter_length_limit If set to positive number, the WriteRequest.params would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_MONGODB_FILTER_LENGTH_LIMIT 256   plugin.elasticsearch.trace_dsl If true, trace all the DSL(Domain Specific Language) in ElasticSearch access, default is false. SW_PLUGIN_ELASTICSEARCH_TRACE_DSL false   plugin.springmvc.use_qualified_name_as_endpoint_name If true, the fully qualified method name will be used as the endpoint name instead of the request URL, default is false. SW_PLUGIN_SPRINGMVC_USE_QUALIFIED_NAME_AS_ENDPOINT_NAME false   plugin.toolkit.use_qualified_name_as_operation_name If true, the fully qualified method name will be used as the operation name instead of the given operation name, default is false. SW_PLUGIN_TOOLKIT_USE_QUALIFIED_NAME_AS_OPERATION_NAME false   plugin.jdbc.trace_sql_parameters If set to true, the parameters of the sql (typically java.sql.PreparedStatement) would be collected. SW_JDBC_TRACE_SQL_PARAMETERS false   plugin.jdbc.sql_parameters_max_length If set to positive number, the db.sql.parameters would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_JDBC_SQL_PARAMETERS_MAX_LENGTH 512   plugin.jdbc.sql_body_max_length If set to positive number, the db.statement would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_JDBC_SQL_BODY_MAX_LENGTH 2048   plugin.solrj.trace_statement If true, trace all the query parameters(include deleteByIds and deleteByQuery) in Solr query request, default is false. SW_PLUGIN_SOLRJ_TRACE_STATEMENT false   plugin.solrj.trace_ops_params If true, trace all the operation parameters in Solr request, default is false. SW_PLUGIN_SOLRJ_TRACE_OPS_PARAMS false   plugin.light4j.trace_handler_chain If true, trace all middleware/business handlers that are part of the Light4J handler chain for a request. SW_PLUGIN_LIGHT4J_TRACE_HANDLER_CHAIN false   plugin.springtransaction.simplify_transaction_definition_name If true, the transaction definition name will be simplified. SW_PLUGIN_SPRINGTRANSACTION_SIMPLIFY_TRANSACTION_DEFINITION_NAME false   plugin.jdkthreading.threading_class_prefixes Threading classes (java.lang.Runnable and java.util.concurrent.Callable) and their subclasses, including anonymous inner classes whose name match any one of the THREADING_CLASS_PREFIXES (splitted by ,) will be instrumented, make sure to only specify as narrow prefixes as what you\u0026rsquo;re expecting to instrument, (java. and javax. will be ignored due to safety issues) SW_PLUGIN_JDKTHREADING_THREADING_CLASS_PREFIXES Not set   plugin.tomcat.collect_http_params This config item controls that whether the Tomcat plugin should collect the parameters of the request. Also, activate implicitly in the profiled trace. SW_PLUGIN_TOMCAT_COLLECT_HTTP_PARAMS false   plugin.springmvc.collect_http_params This config item controls that whether the SpringMVC plugin should collect the parameters of the request, when your Spring application is based on Tomcat, consider only setting either plugin.tomcat.collect_http_params or plugin.springmvc.collect_http_params. Also, activate implicitly in the profiled trace. SW_PLUGIN_SPRINGMVC_COLLECT_HTTP_PARAMS false   plugin.httpclient.collect_http_params This config item controls that whether the HttpClient plugin should collect the parameters of the request SW_PLUGIN_HTTPCLIENT_COLLECT_HTTP_PARAMS false   plugin.http.http_params_length_threshold When COLLECT_HTTP_PARAMS is enabled, how many characters to keep and send to the OAP backend, use negative values to keep and send the complete parameters, NB. this config item is added for the sake of performance. SW_PLUGIN_HTTP_HTTP_PARAMS_LENGTH_THRESHOLD 1024   plugin.http.http_headers_length_threshold When include_http_headers declares header names, this threshold controls the length limitation of all header values. use negative values to keep and send the complete headers. Note. this config item is added for the sake of performance. SW_PLUGIN_HTTP_HTTP_HEADERS_LENGTH_THRESHOLD 2048   plugin.http.include_http_headers Set the header names, which should be collected by the plugin. Header name must follow javax.servlet.http definition. Multiple names should be split by comma. SW_PLUGIN_HTTP_INCLUDE_HTTP_HEADERS ``(No header would be collected) |   plugin.feign.collect_request_body This config item controls that whether the Feign plugin should collect the http body of the request. SW_PLUGIN_FEIGN_COLLECT_REQUEST_BODY false   plugin.feign.filter_length_limit When COLLECT_REQUEST_BODY is enabled, how many characters to keep and send to the OAP backend, use negative values to keep and send the complete body. SW_PLUGIN_FEIGN_FILTER_LENGTH_LIMIT 1024   plugin.feign.supported_content_types_prefix When COLLECT_REQUEST_BODY is enabled and content-type start with SUPPORTED_CONTENT_TYPES_PREFIX, collect the body of the request , multiple paths should be separated by , SW_PLUGIN_FEIGN_SUPPORTED_CONTENT_TYPES_PREFIX application/json,text/   plugin.influxdb.trace_influxql If true, trace all the influxql(query and write) in InfluxDB access, default is true. SW_PLUGIN_INFLUXDB_TRACE_INFLUXQL true   plugin.dubbo.collect_consumer_arguments Apache Dubbo consumer collect arguments in RPC call, use Object#toString to collect arguments. SW_PLUGIN_DUBBO_COLLECT_CONSUMER_ARGUMENTS false   plugin.dubbo.consumer_arguments_length_threshold When plugin.dubbo.collect_consumer_arguments is true, Arguments of length from the front will to the OAP backend SW_PLUGIN_DUBBO_CONSUMER_ARGUMENTS_LENGTH_THRESHOLD 256   plugin.dubbo.collect_provider_arguments Apache Dubbo provider collect arguments in RPC call, use Object#toString to collect arguments. SW_PLUGIN_DUBBO_COLLECT_PROVIDER_ARGUMENTS false   plugin.dubbo.provider_arguments_length_threshold When plugin.dubbo.collect_provider_arguments is true, Arguments of length from the front will to the OAP backend SW_PLUGIN_DUBBO_PROVIDER_ARGUMENTS_LENGTH_THRESHOLD 256   plugin.kafka.bootstrap_servers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_KAFKA_BOOTSTRAP_SERVERS localhost:9092   plugin.kafka.get_topic_timeout Timeout period of reading topics from the Kafka server, the unit is second. SW_GET_TOPIC_TIMEOUT 10   plugin.kafka.producer_config Kafka producer configuration. Read producer configure to get more details. Check Kafka report doc for more details and examples. SW_PLUGIN_KAFKA_PRODUCER_CONFIG    plugin.kafka.producer_config_json Configure Kafka Producer configuration in JSON format. Notice it will be overridden by plugin.kafka.producer_config[key], if the key duplication. SW_PLUGIN_KAFKA_PRODUCER_CONFIG_JSON    plugin.kafka.topic_meter Specify which Kafka topic name for Meter System data to report to. SW_PLUGIN_KAFKA_TOPIC_METER skywalking-meters   plugin.kafka.topic_metrics Specify which Kafka topic name for JVM metrics data to report to. SW_PLUGIN_KAFKA_TOPIC_METRICS skywalking-metrics   plugin.kafka.topic_segment Specify which Kafka topic name for traces data to report to. SW_PLUGIN_KAFKA_TOPIC_SEGMENT skywalking-segments   plugin.kafka.topic_profiling Specify which Kafka topic name for Thread Profiling snapshot to report to. SW_PLUGIN_KAFKA_TOPIC_PROFILINGS skywalking-profilings   plugin.kafka.topic_management Specify which Kafka topic name for the register or heartbeat data of Service Instance to report to. SW_PLUGIN_KAFKA_TOPIC_MANAGEMENT skywalking-managements   plugin.kafka.topic_logging Specify which Kafka topic name for the logging data to report to. SW_PLUGIN_KAFKA_TOPIC_LOGGING skywalking-logging   plugin.kafka.namespace isolate multi OAP server when using same Kafka cluster (final topic name will append namespace before Kafka topics with - ). SW_KAFKA_NAMESPACE `` |   plugin.kafka.decode_class Specify which class to decode encoded configuration of kafka.You can set encoded information in plugin.kafka.producer_config_json or plugin.kafka.producer_config if you need. SW_KAFKA_DECODE_CLASS `` |   plugin.springannotation.classname_match_regex Match spring beans with regular expression for the class name. Multiple expressions could be separated by a comma. This only works when Spring annotation plugin has been activated. SW_SPRINGANNOTATION_CLASSNAME_MATCH_REGEX All the spring beans tagged with @Bean,@Service,@Dao, or @Repository.   plugin.toolkit.log.transmit_formatted Whether or not to transmit logged data as formatted or un-formatted. SW_PLUGIN_TOOLKIT_LOG_TRANSMIT_FORMATTED true   plugin.lettuce.trace_redis_parameters If set to true, the parameters of Redis commands would be collected by Lettuce agent. SW_PLUGIN_LETTUCE_TRACE_REDIS_PARAMETERS false   plugin.lettuce.redis_parameter_max_length If set to positive number and plugin.lettuce.trace_redis_parameters is set to true, Redis command parameters would be collected and truncated to this length. SW_PLUGIN_LETTUCE_REDIS_PARAMETER_MAX_LENGTH 128   plugin.lettuce.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_LETTUCE_OPERATION_MAPPING_WRITE    plugin.lettuce.operation_mapping_read  Specify which command should be converted to read operation SW_PLUGIN_LETTUCE_OPERATION_MAPPING_READ Referenc Lettuce-5.x-plugin   plugin.jedis.trace_redis_parameters If set to true, the parameters of Redis commands would be collected by Jedis agent. SW_PLUGIN_JEDIS_TRACE_REDIS_PARAMETERS false   plugin.jedis.redis_parameter_max_length If set to positive number and plugin.jedis.trace_redis_parameters is set to true, Redis command parameters would be collected and truncated to this length. SW_PLUGIN_JEDIS_REDIS_PARAMETER_MAX_LENGTH 128   plugin.jedis.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_JEDIS_OPERATION_MAPPING_WRITE    plugin.jedis.operation_mapping_read  Specify which command should be converted to read operation SW_PLUGIN_JEDIS_OPERATION_MAPPING_READ Referenc Jedis-4.x-plugin jedis-2.x-3.x-plugin   plugin.redisson.trace_redis_parameters If set to true, the parameters of Redis commands would be collected by Redisson agent. SW_PLUGIN_REDISSON_TRACE_REDIS_PARAMETERS false   plugin.redisson.redis_parameter_max_length If set to positive number and plugin.redisson.trace_redis_parameters is set to true, Redis command parameters would be collected and truncated to this length. SW_PLUGIN_REDISSON_REDIS_PARAMETER_MAX_LENGTH 128   plugin.redisson.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_REDISSON_OPERATION_MAPPING_WRITE    plugin.redisson.operation_mapping_read  Specify which command should be converted to read operation SW_PLUGIN_REDISSON_OPERATION_MAPPING_READ Referenc Redisson-3.x-plugin   plugin.neo4j.trace_cypher_parameters If set to true, the parameters of the cypher would be collected. SW_PLUGIN_NEO4J_TRACE_CYPHER_PARAMETERS false   plugin.neo4j.cypher_parameters_max_length If set to positive number, the db.cypher.parameters would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_NEO4J_CYPHER_PARAMETERS_MAX_LENGTH 512   plugin.neo4j.cypher_body_max_length If set to positive number, the db.statement would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_NEO4J_CYPHER_BODY_MAX_LENGTH 2048   plugin.cpupolicy.sample_cpu_usage_percent_limit If set to a positive number and activate trace sampler CPU policy plugin, the trace would not be collected when agent process CPU usage percent is greater than plugin.cpupolicy.sample_cpu_usage_percent_limit. SW_SAMPLE_CPU_USAGE_PERCENT_LIMIT -1   plugin.micronauthttpclient.collect_http_params This config item controls that whether the Micronaut http client plugin should collect the parameters of the request. Also, activate implicitly in the profiled trace. SW_PLUGIN_MICRONAUTHTTPCLIENT_COLLECT_HTTP_PARAMS false   plugin.micronauthttpserver.collect_http_params This config item controls that whether the Micronaut http server plugin should collect the parameters of the request. Also, activate implicitly in the profiled trace. SW_PLUGIN_MICRONAUTHTTPSERVER_COLLECT_HTTP_PARAMS false   plugin.memcached.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_MEMCACHED_OPERATION_MAPPING_WRITE get,gets,getAndTouch,getKeys,getKeysWithExpiryCheck,getKeysNoDuplicateCheck   plugin.memcached.operation_mapping_read Specify which command should be converted to read operation SW_PLUGIN_MEMCACHED_OPERATION_MAPPING_READ set,add,replace,append,prepend,cas,delete,touch,incr,decr   plugin.ehcache.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_EHCACHE_OPERATION_MAPPING_WRITE get,getAll,getQuiet,getKeys,getKeysWithExpiryCheck,getKeysNoDuplicateCheck,releaseRead,tryRead,getWithLoader,getAll,loadAll,getAllWithLoader   plugin.ehcache.operation_mapping_read Specify which command should be converted to read operation SW_PLUGIN_EHCACHE_OPERATION_MAPPING_READ tryRemoveImmediately,remove,removeAndReturnElement,removeAll,removeQuiet,removeWithWriter,put,putAll,replace,removeQuiet,removeWithWriter,removeElement,removeAll,putWithWriter,putQuiet,putIfAbsent,putIfAbsent   plugin.guavacache.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_GUAVACACHE_OPERATION_MAPPING_WRITE getIfPresent,get,getAllPresent,size   plugin.guavacache.operation_mapping_read Specify which command should be converted to read operation SW_PLUGIN_GUAVACACHE_OPERATION_MAPPING_READ put,putAll,invalidate,invalidateAll,invalidateAll,cleanUp   plugin.nettyhttp.collect_request_body This config item controls that whether the Netty-http plugin should collect the http body of the request. SW_PLUGIN_NETTY_HTTP_COLLECT_REQUEST_BODY false   plugin.nettyhttp.filter_length_limit When COLLECT_REQUEST_BODY is enabled, how many characters to keep and send to the OAP backend, use negative values to keep and send the complete body. SW_PLUGIN_NETTY_HTTP_FILTER_LENGTH_LIMIT 1024   plugin.nettyhttp.supported_content_types_prefix When COLLECT_REQUEST_BODY is enabled and content-type start with HTTP_SUPPORTED_CONTENT_TYPES_PREFIX, collect the body of the request , multiple paths should be separated by , SW_PLUGIN_NETTY_HTTP_SUPPORTED_CONTENT_TYPES_PREFIX application/json,text/   plugin.rocketmqclient.collect_message_keys If set to true, the keys of messages would be collected by the plugin for RocketMQ Java client.     plugin.rocketmqclient.collect_message_tags If set to true, the tags of messages would be collected by the plugin for RocketMQ Java client.            Reset Collection/Map type configurations as empty collection.  Collection type config, e.g. using  plugin.kafka.topics= to override default plugin.kafka.topics=a,b,c,d Map type config, e.g. using plugin.kafka.producer_config[]= to override default plugin.kafka.producer_config[key]=value  Dynamic Configurations All configurations above are static, if you need to change some agent settings at runtime, please read CDS - Configuration Discovery Service document for more details.\n","title":"Table of Agent Configuration Properties","url":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/configurations/"},{"content":"Table of Agent Configuration Properties This is the properties list supported in agent/config/agent.config.\n   property key Description System Environment Variable Default     agent.service_name The service name to represent a logic group providing the same capabilities/logic. Suggestion: set a unique name for every logic service group, service instance nodes share the same code, Max length is 50(UTF-8 char). Optional, once service_name follows \u0026lt;group name\u0026gt;::\u0026lt;logic name\u0026gt; format, OAP server assigns the group name to the service metadata. SW_AGENT_NAME Your_ApplicationName   agent.namespace Namespace represents a subnet, such as kubernetes namespace, or 172.10.. SW_AGENT_NAMESPACE Not set   agent.cluster Cluster defines the physical cluster in a data center or same network segment. SW_AGENT_CLUSTER Not set   agent.sample_n_per_3_secs Negative or zero means off, by default.SAMPLE_N_PER_3_SECS means sampling N TraceSegment in 3 seconds tops. SW_AGENT_SAMPLE Not set   agent.authentication Authentication active is based on backend setting, see application.yml for more details.For most scenarios, this needs backend extensions, only basic match auth provided in default implementation. SW_AGENT_AUTHENTICATION Not set   agent.trace_segment_ref_limit_per_span The max number of TraceSegmentRef in a single span to keep memory cost estimatable. SW_TRACE_SEGMENT_LIMIT 500   agent.span_limit_per_segment The max number of spans in a single segment. Through this config item, SkyWalking keep your application memory cost estimated. SW_AGENT_SPAN_LIMIT 300   agent.ignore_suffix If the operation name of the first span is included in this set, this segment should be ignored. SW_AGENT_IGNORE_SUFFIX Not set   agent.is_open_debugging_class If true, skywalking agent will save all instrumented classes files in /debugging folder. SkyWalking team may ask for these files in order to resolve compatible problem. SW_AGENT_OPEN_DEBUG Not set   agent.instance_name Instance name is the identity of an instance, should be unique in the service. If empty, SkyWalking agent will generate an 32-bit uuid. Default, use UUID@hostname as the instance name. Max length is 50(UTF-8 char) SW_AGENT_INSTANCE_NAME \u0026quot;\u0026quot;   agent.instance_properties_json={\u0026quot;key\u0026quot;:\u0026quot;value\u0026quot;} Add service instance custom properties in json format. SW_INSTANCE_PROPERTIES_JSON Not set   agent.cause_exception_depth How depth the agent goes, when log all cause exceptions. SW_AGENT_CAUSE_EXCEPTION_DEPTH 5   agent.force_reconnection_period  Force reconnection period of grpc, based on grpc_channel_check_interval. SW_AGENT_FORCE_RECONNECTION_PERIOD 1   agent.operation_name_threshold  The operationName max length, setting this value \u0026gt; 190 is not recommended. SW_AGENT_OPERATION_NAME_THRESHOLD 150   agent.keep_tracing Keep tracing even the backend is not available if this value is true. SW_AGENT_KEEP_TRACING false   agent.force_tls Force open TLS for gRPC channel if this value is true. SW_AGENT_FORCE_TLS false   agent.ssl_trusted_ca_path gRPC SSL trusted ca file. SW_AGENT_SSL_TRUSTED_CA_PATH /ca/ca.crt   agent.ssl_key_path The private key file. Enable mTLS when ssl_key_path and ssl_cert_chain_path exist. SW_AGENT_SSL_KEY_PATH \u0026quot;\u0026quot;   agent.ssl_cert_chain_path The certificate file. Enable mTLS when ssl_key_path and ssl_cert_chain_path exist. SW_AGENT_SSL_CERT_CHAIN_PATH \u0026quot;\u0026quot;   agent.enable Enable the agent kernel services and instrumentation. SW_AGENT_ENABLE true   osinfo.ipv4_list_size Limit the length of the ipv4 list size. SW_AGENT_OSINFO_IPV4_LIST_SIZE 10   collector.grpc_channel_check_interval grpc channel status check interval. SW_AGENT_COLLECTOR_GRPC_CHANNEL_CHECK_INTERVAL 30   collector.heartbeat_period agent heartbeat report period. Unit, second. SW_AGENT_COLLECTOR_HEARTBEAT_PERIOD 30   collector.properties_report_period_factor The agent sends the instance properties to the backend every collector.heartbeat_period * collector.properties_report_period_factor seconds SW_AGENT_COLLECTOR_PROPERTIES_REPORT_PERIOD_FACTOR 10   collector.backend_service Collector SkyWalking trace receiver service addresses. SW_AGENT_COLLECTOR_BACKEND_SERVICES 127.0.0.1:11800   collector.grpc_upstream_timeout How long grpc client will timeout in sending data to upstream. Unit is second. SW_AGENT_COLLECTOR_GRPC_UPSTREAM_TIMEOUT 30 seconds   collector.get_profile_task_interval Sniffer get profile task list interval. SW_AGENT_COLLECTOR_GET_PROFILE_TASK_INTERVAL 20   collector.get_agent_dynamic_config_interval Sniffer get agent dynamic config interval SW_AGENT_COLLECTOR_GET_AGENT_DYNAMIC_CONFIG_INTERVAL 20   collector.is_resolve_dns_periodically If true, skywalking agent will enable periodically resolving DNS to update receiver service addresses. SW_AGENT_COLLECTOR_IS_RESOLVE_DNS_PERIODICALLY false   logging.level Log level: TRACE, DEBUG, INFO, WARN, ERROR, OFF. Default is info. SW_LOGGING_LEVEL INFO   logging.file_name Log file name. SW_LOGGING_FILE_NAME skywalking-api.log   logging.output Log output. Default is FILE. Use CONSOLE means output to stdout. SW_LOGGING_OUTPUT FILE   logging.dir Log files directory. Default is blank string, means, use \u0026ldquo;{theSkywalkingAgentJarDir}/logs \u0026quot; to output logs. {theSkywalkingAgentJarDir} is the directory where the skywalking agent jar file is located SW_LOGGING_DIR \u0026quot;\u0026quot;   logging.resolver Logger resolver: PATTERN or JSON. The default is PATTERN, which uses logging.pattern to print traditional text logs. JSON resolver prints logs in JSON format. SW_LOGGING_RESOLVER PATTERN   logging.pattern  Logging format. There are all conversion specifiers: * %level means log level. * %timestamp means now of time with format yyyy-MM-dd HH:mm:ss:SSS.\n* %thread means name of current thread.\n* %msg means some message which user logged. * %class means SimpleName of TargetClass. * %throwable means a throwable which user called. * %agent_name means agent.service_name. Only apply to the PatternLogger. SW_LOGGING_PATTERN %level %timestamp %thread %class : %msg %throwable   logging.max_file_size The max size of log file. If the size is bigger than this, archive the current file, and write into a new file. SW_LOGGING_MAX_FILE_SIZE 300 * 1024 * 1024   logging.max_history_files The max history log files. When rollover happened, if log files exceed this number,then the oldest file will be delete. Negative or zero means off, by default. SW_LOGGING_MAX_HISTORY_FILES -1   statuscheck.ignored_exceptions Listed exceptions would not be treated as an error. Because in some codes, the exception is being used as a way of controlling business flow. SW_STATUSCHECK_IGNORED_EXCEPTIONS \u0026quot;\u0026quot;   statuscheck.max_recursive_depth The max recursive depth when checking the exception traced by the agent. Typically, we don\u0026rsquo;t recommend setting this more than 10, which could cause a performance issue. Negative value and 0 would be ignored, which means all exceptions would make the span tagged in error status. SW_STATUSCHECK_MAX_RECURSIVE_DEPTH 1   correlation.element_max_number Max element count in the correlation context. SW_CORRELATION_ELEMENT_MAX_NUMBER 3   correlation.value_max_length Max value length of each element. SW_CORRELATION_VALUE_MAX_LENGTH 128   correlation.auto_tag_keys Tag the span by the key/value in the correlation context, when the keys listed here exist. SW_CORRELATION_AUTO_TAG_KEYS \u0026quot;\u0026quot;   jvm.buffer_size The buffer size of collected JVM info. SW_JVM_BUFFER_SIZE 60 * 10   jvm.metrics_collect_period The period in seconds of JVM metrics collection. Unit is second. SW_JVM_METRICS_COLLECT_PERIOD 1   buffer.channel_size The buffer channel size. SW_BUFFER_CHANNEL_SIZE 5   buffer.buffer_size The buffer size. SW_BUFFER_BUFFER_SIZE 300   profile.active If true, skywalking agent will enable profile when user create a new profile task. Otherwise disable profile. SW_AGENT_PROFILE_ACTIVE true   profile.max_parallel Parallel monitor segment count SW_AGENT_PROFILE_MAX_PARALLEL 5   profile.max_accept_sub_parallel Max monitoring sub-tasks count of one single endpoint access SW_AGENT_PROFILE_MAX_ACCEPT_SUB_PARALLEL 5   profile.duration Max monitor segment time(minutes), if current segment monitor time out of limit, then stop it. SW_AGENT_PROFILE_DURATION 10   profile.dump_max_stack_depth Max dump thread stack depth SW_AGENT_PROFILE_DUMP_MAX_STACK_DEPTH 500   profile.snapshot_transport_buffer_size Snapshot transport to backend buffer size SW_AGENT_PROFILE_SNAPSHOT_TRANSPORT_BUFFER_SIZE 4500   meter.active If true, the agent collects and reports metrics to the backend. SW_METER_ACTIVE true   meter.report_interval Report meters interval. The unit is second SW_METER_REPORT_INTERVAL 20   meter.max_meter_size Max size of the meter pool SW_METER_MAX_METER_SIZE 500   log.max_message_size The max size of message to send to server.Default is 10 MB. SW_GRPC_LOG_MAX_MESSAGE_SIZE 10485760   plugin.mount Mount the specific folders of the plugins. Plugins in mounted folders would work. SW_MOUNT_FOLDERS plugins,activations   plugin.peer_max_length  Peer maximum description limit. SW_PLUGIN_PEER_MAX_LENGTH 200   plugin.exclude_plugins  Exclude some plugins define in plugins dir,Multiple plugins are separated by comma.Plugin names is defined in Agent plugin list SW_EXCLUDE_PLUGINS \u0026quot;\u0026quot;   plugin.mongodb.trace_param If true, trace all the parameters in MongoDB access, default is false. Only trace the operation, not include parameters. SW_PLUGIN_MONGODB_TRACE_PARAM false   plugin.mongodb.filter_length_limit If set to positive number, the WriteRequest.params would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_MONGODB_FILTER_LENGTH_LIMIT 256   plugin.elasticsearch.trace_dsl If true, trace all the DSL(Domain Specific Language) in ElasticSearch access, default is false. SW_PLUGIN_ELASTICSEARCH_TRACE_DSL false   plugin.springmvc.use_qualified_name_as_endpoint_name If true, the fully qualified method name will be used as the endpoint name instead of the request URL, default is false. SW_PLUGIN_SPRINGMVC_USE_QUALIFIED_NAME_AS_ENDPOINT_NAME false   plugin.toolkit.use_qualified_name_as_operation_name If true, the fully qualified method name will be used as the operation name instead of the given operation name, default is false. SW_PLUGIN_TOOLKIT_USE_QUALIFIED_NAME_AS_OPERATION_NAME false   plugin.jdbc.trace_sql_parameters If set to true, the parameters of the sql (typically java.sql.PreparedStatement) would be collected. SW_JDBC_TRACE_SQL_PARAMETERS false   plugin.jdbc.sql_parameters_max_length If set to positive number, the db.sql.parameters would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_JDBC_SQL_PARAMETERS_MAX_LENGTH 512   plugin.jdbc.sql_body_max_length If set to positive number, the db.statement would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_JDBC_SQL_BODY_MAX_LENGTH 2048   plugin.solrj.trace_statement If true, trace all the query parameters(include deleteByIds and deleteByQuery) in Solr query request, default is false. SW_PLUGIN_SOLRJ_TRACE_STATEMENT false   plugin.solrj.trace_ops_params If true, trace all the operation parameters in Solr request, default is false. SW_PLUGIN_SOLRJ_TRACE_OPS_PARAMS false   plugin.light4j.trace_handler_chain If true, trace all middleware/business handlers that are part of the Light4J handler chain for a request. SW_PLUGIN_LIGHT4J_TRACE_HANDLER_CHAIN false   plugin.springtransaction.simplify_transaction_definition_name If true, the transaction definition name will be simplified. SW_PLUGIN_SPRINGTRANSACTION_SIMPLIFY_TRANSACTION_DEFINITION_NAME false   plugin.jdkthreading.threading_class_prefixes Threading classes (java.lang.Runnable and java.util.concurrent.Callable) and their subclasses, including anonymous inner classes whose name match any one of the THREADING_CLASS_PREFIXES (splitted by ,) will be instrumented, make sure to only specify as narrow prefixes as what you\u0026rsquo;re expecting to instrument, (java. and javax. will be ignored due to safety issues) SW_PLUGIN_JDKTHREADING_THREADING_CLASS_PREFIXES Not set   plugin.tomcat.collect_http_params This config item controls that whether the Tomcat plugin should collect the parameters of the request. Also, activate implicitly in the profiled trace. SW_PLUGIN_TOMCAT_COLLECT_HTTP_PARAMS false   plugin.springmvc.collect_http_params This config item controls that whether the SpringMVC plugin should collect the parameters of the request, when your Spring application is based on Tomcat, consider only setting either plugin.tomcat.collect_http_params or plugin.springmvc.collect_http_params. Also, activate implicitly in the profiled trace. SW_PLUGIN_SPRINGMVC_COLLECT_HTTP_PARAMS false   plugin.httpclient.collect_http_params This config item controls that whether the HttpClient plugin should collect the parameters of the request SW_PLUGIN_HTTPCLIENT_COLLECT_HTTP_PARAMS false   plugin.http.http_params_length_threshold When COLLECT_HTTP_PARAMS is enabled, how many characters to keep and send to the OAP backend, use negative values to keep and send the complete parameters, NB. this config item is added for the sake of performance. SW_PLUGIN_HTTP_HTTP_PARAMS_LENGTH_THRESHOLD 1024   plugin.http.http_headers_length_threshold When include_http_headers declares header names, this threshold controls the length limitation of all header values. use negative values to keep and send the complete headers. Note. this config item is added for the sake of performance. SW_PLUGIN_HTTP_HTTP_HEADERS_LENGTH_THRESHOLD 2048   plugin.http.include_http_headers Set the header names, which should be collected by the plugin. Header name must follow javax.servlet.http definition. Multiple names should be split by comma. SW_PLUGIN_HTTP_INCLUDE_HTTP_HEADERS ``(No header would be collected) |   plugin.feign.collect_request_body This config item controls that whether the Feign plugin should collect the http body of the request. SW_PLUGIN_FEIGN_COLLECT_REQUEST_BODY false   plugin.feign.filter_length_limit When COLLECT_REQUEST_BODY is enabled, how many characters to keep and send to the OAP backend, use negative values to keep and send the complete body. SW_PLUGIN_FEIGN_FILTER_LENGTH_LIMIT 1024   plugin.feign.supported_content_types_prefix When COLLECT_REQUEST_BODY is enabled and content-type start with SUPPORTED_CONTENT_TYPES_PREFIX, collect the body of the request , multiple paths should be separated by , SW_PLUGIN_FEIGN_SUPPORTED_CONTENT_TYPES_PREFIX application/json,text/   plugin.influxdb.trace_influxql If true, trace all the influxql(query and write) in InfluxDB access, default is true. SW_PLUGIN_INFLUXDB_TRACE_INFLUXQL true   plugin.dubbo.collect_consumer_arguments Apache Dubbo consumer collect arguments in RPC call, use Object#toString to collect arguments. SW_PLUGIN_DUBBO_COLLECT_CONSUMER_ARGUMENTS false   plugin.dubbo.consumer_arguments_length_threshold When plugin.dubbo.collect_consumer_arguments is true, Arguments of length from the front will to the OAP backend SW_PLUGIN_DUBBO_CONSUMER_ARGUMENTS_LENGTH_THRESHOLD 256   plugin.dubbo.collect_provider_arguments Apache Dubbo provider collect arguments in RPC call, use Object#toString to collect arguments. SW_PLUGIN_DUBBO_COLLECT_PROVIDER_ARGUMENTS false   plugin.dubbo.provider_arguments_length_threshold When plugin.dubbo.collect_provider_arguments is true, Arguments of length from the front will to the OAP backend SW_PLUGIN_DUBBO_PROVIDER_ARGUMENTS_LENGTH_THRESHOLD 256   plugin.kafka.bootstrap_servers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_KAFKA_BOOTSTRAP_SERVERS localhost:9092   plugin.kafka.get_topic_timeout Timeout period of reading topics from the Kafka server, the unit is second. SW_GET_TOPIC_TIMEOUT 10   plugin.kafka.producer_config Kafka producer configuration. Read producer configure to get more details. Check Kafka report doc for more details and examples. sw_plugin_kafka_producer_config    plugin.kafka.producer_config_json Configure Kafka Producer configuration in JSON format. Notice it will be overridden by plugin.kafka.producer_config[key], if the key duplication. SW_PLUGIN_KAFKA_PRODUCER_CONFIG_JSON    plugin.kafka.topic_meter Specify which Kafka topic name for Meter System data to report to. SW_PLUGIN_KAFKA_TOPIC_METER skywalking-meters   plugin.kafka.topic_metrics Specify which Kafka topic name for JVM metrics data to report to. SW_PLUGIN_KAFKA_TOPIC_METRICS skywalking-metrics   plugin.kafka.topic_segment Specify which Kafka topic name for traces data to report to. SW_PLUGIN_KAFKA_TOPIC_SEGMENT skywalking-segments   plugin.kafka.topic_profiling Specify which Kafka topic name for Thread Profiling snapshot to report to. SW_PLUGIN_KAFKA_TOPIC_PROFILINGS skywalking-profilings   plugin.kafka.topic_management Specify which Kafka topic name for the register or heartbeat data of Service Instance to report to. SW_PLUGIN_KAFKA_TOPIC_MANAGEMENT skywalking-managements   plugin.kafka.topic_logging Specify which Kafka topic name for the logging data to report to. SW_PLUGIN_KAFKA_TOPIC_LOGGING skywalking-logging   plugin.kafka.namespace isolate multi OAP server when using same Kafka cluster (final topic name will append namespace before Kafka topics with - ). SW_KAFKA_NAMESPACE `` |   plugin.kafka.decode_class Specify which class to decode encoded configuration of kafka.You can set encoded information in plugin.kafka.producer_config_json or plugin.kafka.producer_config if you need. SW_KAFKA_DECODE_CLASS `` |   plugin.springannotation.classname_match_regex Match spring beans with regular expression for the class name. Multiple expressions could be separated by a comma. This only works when Spring annotation plugin has been activated. SW_SPRINGANNOTATION_CLASSNAME_MATCH_REGEX All the spring beans tagged with @Bean,@Service,@Dao, or @Repository.   plugin.toolkit.log.transmit_formatted Whether or not to transmit logged data as formatted or un-formatted. SW_PLUGIN_TOOLKIT_LOG_TRANSMIT_FORMATTED true   plugin.lettuce.trace_redis_parameters If set to true, the parameters of Redis commands would be collected by Lettuce agent. SW_PLUGIN_LETTUCE_TRACE_REDIS_PARAMETERS false   plugin.lettuce.redis_parameter_max_length If set to positive number and plugin.lettuce.trace_redis_parameters is set to true, Redis command parameters would be collected and truncated to this length. SW_PLUGIN_LETTUCE_REDIS_PARAMETER_MAX_LENGTH 128   plugin.lettuce.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_LETTUCE_OPERATION_MAPPING_WRITE    plugin.lettuce.operation_mapping_read  Specify which command should be converted to read operation SW_PLUGIN_LETTUCE_OPERATION_MAPPING_READ Referenc Lettuce-5.x-plugin   plugin.jedis.trace_redis_parameters If set to true, the parameters of Redis commands would be collected by Jedis agent. SW_PLUGIN_JEDIS_TRACE_REDIS_PARAMETERS false   plugin.jedis.redis_parameter_max_length If set to positive number and plugin.jedis.trace_redis_parameters is set to true, Redis command parameters would be collected and truncated to this length. SW_PLUGIN_JEDIS_REDIS_PARAMETER_MAX_LENGTH 128   plugin.jedis.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_JEDIS_OPERATION_MAPPING_WRITE    plugin.jedis.operation_mapping_read  Specify which command should be converted to read operation SW_PLUGIN_JEDIS_OPERATION_MAPPING_READ Referenc Jedis-4.x-plugin jedis-2.x-3.x-plugin   plugin.redisson.trace_redis_parameters If set to true, the parameters of Redis commands would be collected by Redisson agent. SW_PLUGIN_REDISSON_TRACE_REDIS_PARAMETERS false   plugin.redisson.redis_parameter_max_length If set to positive number and plugin.redisson.trace_redis_parameters is set to true, Redis command parameters would be collected and truncated to this length. SW_PLUGIN_REDISSON_REDIS_PARAMETER_MAX_LENGTH 128   plugin.redisson.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_REDISSON_OPERATION_MAPPING_WRITE    plugin.redisson.operation_mapping_read  Specify which command should be converted to read operation SW_PLUGIN_REDISSON_OPERATION_MAPPING_READ Referenc Redisson-3.x-plugin   plugin.neo4j.trace_cypher_parameters If set to true, the parameters of the cypher would be collected. SW_PLUGIN_NEO4J_TRACE_CYPHER_PARAMETERS false   plugin.neo4j.cypher_parameters_max_length If set to positive number, the db.cypher.parameters would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_NEO4J_CYPHER_PARAMETERS_MAX_LENGTH 512   plugin.neo4j.cypher_body_max_length If set to positive number, the db.statement would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_NEO4J_CYPHER_BODY_MAX_LENGTH 2048   plugin.cpupolicy.sample_cpu_usage_percent_limit If set to a positive number and activate trace sampler CPU policy plugin, the trace would not be collected when agent process CPU usage percent is greater than plugin.cpupolicy.sample_cpu_usage_percent_limit. SW_SAMPLE_CPU_USAGE_PERCENT_LIMIT -1   plugin.micronauthttpclient.collect_http_params This config item controls that whether the Micronaut http client plugin should collect the parameters of the request. Also, activate implicitly in the profiled trace. SW_PLUGIN_MICRONAUTHTTPCLIENT_COLLECT_HTTP_PARAMS false   plugin.micronauthttpserver.collect_http_params This config item controls that whether the Micronaut http server plugin should collect the parameters of the request. Also, activate implicitly in the profiled trace. SW_PLUGIN_MICRONAUTHTTPSERVER_COLLECT_HTTP_PARAMS false   plugin.memcached.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_MEMCACHED_OPERATION_MAPPING_WRITE get,gets,getAndTouch,getKeys,getKeysWithExpiryCheck,getKeysNoDuplicateCheck   plugin.memcached.operation_mapping_read Specify which command should be converted to read operation SW_PLUGIN_MEMCACHED_OPERATION_MAPPING_READ set,add,replace,append,prepend,cas,delete,touch,incr,decr   plugin.ehcache.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_EHCACHE_OPERATION_MAPPING_WRITE get,getAll,getQuiet,getKeys,getKeysWithExpiryCheck,getKeysNoDuplicateCheck,releaseRead,tryRead,getWithLoader,getAll,loadAll,getAllWithLoader   plugin.ehcache.operation_mapping_read Specify which command should be converted to read operation SW_PLUGIN_EHCACHE_OPERATION_MAPPING_READ tryRemoveImmediately,remove,removeAndReturnElement,removeAll,removeQuiet,removeWithWriter,put,putAll,replace,removeQuiet,removeWithWriter,removeElement,removeAll,putWithWriter,putQuiet,putIfAbsent,putIfAbsent   plugin.guavacache.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_GUAVACACHE_OPERATION_MAPPING_WRITE getIfPresent,get,getAllPresent,size   plugin.guavacache.operation_mapping_read Specify which command should be converted to read operation SW_PLUGIN_GUAVACACHE_OPERATION_MAPPING_READ put,putAll,invalidate,invalidateAll,invalidateAll,cleanUp    Reset Collection/Map type configurations as empty collection.  Collection type config, e.g. using  plugin.kafka.topics= to override default plugin.kafka.topics=a,b,c,d Map type config, e.g. using plugin.kafka.producer_config[]= to override default plugin.kafka.producer_config[key]=value  Dynamic Configurations All configurations above are static, if you need to change some agent settings at runtime, please read CDS - Configuration Discovery Service document for more details.\n","title":"Table of Agent Configuration Properties","url":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/configurations/"},{"content":"Table of Agent Configuration Properties This is the properties list supported in agent/config/agent.config.\n   property key Description System Environment Variable Default     agent.service_name The service name to represent a logic group providing the same capabilities/logic. Suggestion: set a unique name for every logic service group, service instance nodes share the same code, Max length is 50(UTF-8 char). Optional, once service_name follows \u0026lt;group name\u0026gt;::\u0026lt;logic name\u0026gt; format, OAP server assigns the group name to the service metadata. SW_AGENT_NAME Your_ApplicationName   agent.namespace Namespace represents a subnet, such as kubernetes namespace, or 172.10.. SW_AGENT_NAMESPACE Not set   agent.cluster Cluster defines the physical cluster in a data center or same network segment. SW_AGENT_CLUSTER Not set   agent.sample_n_per_3_secs Negative or zero means off, by default.SAMPLE_N_PER_3_SECS means sampling N TraceSegment in 3 seconds tops. SW_AGENT_SAMPLE Not set   agent.authentication Authentication active is based on backend setting, see application.yml for more details.For most scenarios, this needs backend extensions, only basic match auth provided in default implementation. SW_AGENT_AUTHENTICATION Not set   agent.trace_segment_ref_limit_per_span The max number of TraceSegmentRef in a single span to keep memory cost estimatable. SW_TRACE_SEGMENT_LIMIT 500   agent.span_limit_per_segment The max number of spans in a single segment. Through this config item, SkyWalking keep your application memory cost estimated. SW_AGENT_SPAN_LIMIT 300   agent.ignore_suffix If the operation name of the first span is included in this set, this segment should be ignored. SW_AGENT_IGNORE_SUFFIX Not set   agent.is_open_debugging_class If true, skywalking agent will save all instrumented classes files in /debugging folder. SkyWalking team may ask for these files in order to resolve compatible problem. SW_AGENT_OPEN_DEBUG Not set   agent.instance_name Instance name is the identity of an instance, should be unique in the service. If empty, SkyWalking agent will generate an 32-bit uuid. Default, use UUID@hostname as the instance name. Max length is 50(UTF-8 char) SW_AGENT_INSTANCE_NAME \u0026quot;\u0026quot;   agent.instance_properties_json={\u0026quot;key\u0026quot;:\u0026quot;value\u0026quot;} Add service instance custom properties in json format. SW_INSTANCE_PROPERTIES_JSON Not set   agent.cause_exception_depth How depth the agent goes, when log all cause exceptions. SW_AGENT_CAUSE_EXCEPTION_DEPTH 5   agent.force_reconnection_period  Force reconnection period of grpc, based on grpc_channel_check_interval. SW_AGENT_FORCE_RECONNECTION_PERIOD 1   agent.operation_name_threshold  The operationName max length, setting this value \u0026gt; 190 is not recommended. SW_AGENT_OPERATION_NAME_THRESHOLD 150   agent.keep_tracing Keep tracing even the backend is not available if this value is true. SW_AGENT_KEEP_TRACING false   agent.force_tls Force open TLS for gRPC channel if this value is true. SW_AGENT_FORCE_TLS false   agent.ssl_trusted_ca_path gRPC SSL trusted ca file. SW_AGENT_SSL_TRUSTED_CA_PATH /ca/ca.crt   agent.ssl_key_path The private key file. Enable mTLS when ssl_key_path and ssl_cert_chain_path exist. SW_AGENT_SSL_KEY_PATH \u0026quot;\u0026quot;   agent.ssl_cert_chain_path The certificate file. Enable mTLS when ssl_key_path and ssl_cert_chain_path exist. SW_AGENT_SSL_CERT_CHAIN_PATH \u0026quot;\u0026quot;   agent.enable Enable the agent kernel services and instrumentation. SW_AGENT_ENABLE true   osinfo.ipv4_list_size Limit the length of the ipv4 list size. SW_AGENT_OSINFO_IPV4_LIST_SIZE 10   collector.grpc_channel_check_interval grpc channel status check interval. SW_AGENT_COLLECTOR_GRPC_CHANNEL_CHECK_INTERVAL 30   collector.heartbeat_period agent heartbeat report period. Unit, second. SW_AGENT_COLLECTOR_HEARTBEAT_PERIOD 30   collector.properties_report_period_factor The agent sends the instance properties to the backend every collector.heartbeat_period * collector.properties_report_period_factor seconds SW_AGENT_COLLECTOR_PROPERTIES_REPORT_PERIOD_FACTOR 10   collector.backend_service Collector SkyWalking trace receiver service addresses. SW_AGENT_COLLECTOR_BACKEND_SERVICES 127.0.0.1:11800   collector.grpc_upstream_timeout How long grpc client will timeout in sending data to upstream. Unit is second. SW_AGENT_COLLECTOR_GRPC_UPSTREAM_TIMEOUT 30 seconds   collector.get_profile_task_interval Sniffer get profile task list interval. SW_AGENT_COLLECTOR_GET_PROFILE_TASK_INTERVAL 20   collector.get_agent_dynamic_config_interval Sniffer get agent dynamic config interval SW_AGENT_COLLECTOR_GET_AGENT_DYNAMIC_CONFIG_INTERVAL 20   collector.is_resolve_dns_periodically If true, skywalking agent will enable periodically resolving DNS to update receiver service addresses. SW_AGENT_COLLECTOR_IS_RESOLVE_DNS_PERIODICALLY false   logging.level Log level: TRACE, DEBUG, INFO, WARN, ERROR, OFF. Default is info. SW_LOGGING_LEVEL INFO   logging.file_name Log file name. SW_LOGGING_FILE_NAME skywalking-api.log   logging.output Log output. Default is FILE. Use CONSOLE means output to stdout. SW_LOGGING_OUTPUT FILE   logging.dir Log files directory. Default is blank string, means, use \u0026ldquo;{theSkywalkingAgentJarDir}/logs \u0026quot; to output logs. {theSkywalkingAgentJarDir} is the directory where the skywalking agent jar file is located SW_LOGGING_DIR \u0026quot;\u0026quot;   logging.resolver Logger resolver: PATTERN or JSON. The default is PATTERN, which uses logging.pattern to print traditional text logs. JSON resolver prints logs in JSON format. SW_LOGGING_RESOLVER PATTERN   logging.pattern  Logging format. There are all conversion specifiers: * %level means log level. * %timestamp means now of time with format yyyy-MM-dd HH:mm:ss:SSS.\n* %thread means name of current thread.\n* %msg means some message which user logged. * %class means SimpleName of TargetClass. * %throwable means a throwable which user called. * %agent_name means agent.service_name. Only apply to the PatternLogger. SW_LOGGING_PATTERN %level %timestamp %thread %class : %msg %throwable   logging.max_file_size The max size of log file. If the size is bigger than this, archive the current file, and write into a new file. SW_LOGGING_MAX_FILE_SIZE 300 * 1024 * 1024   logging.max_history_files The max history log files. When rollover happened, if log files exceed this number,then the oldest file will be delete. Negative or zero means off, by default. SW_LOGGING_MAX_HISTORY_FILES -1   statuscheck.ignored_exceptions Listed exceptions would not be treated as an error. Because in some codes, the exception is being used as a way of controlling business flow. SW_STATUSCHECK_IGNORED_EXCEPTIONS \u0026quot;\u0026quot;   statuscheck.max_recursive_depth The max recursive depth when checking the exception traced by the agent. Typically, we don\u0026rsquo;t recommend setting this more than 10, which could cause a performance issue. Negative value and 0 would be ignored, which means all exceptions would make the span tagged in error status. SW_STATUSCHECK_MAX_RECURSIVE_DEPTH 1   correlation.element_max_number Max element count in the correlation context. SW_CORRELATION_ELEMENT_MAX_NUMBER 3   correlation.value_max_length Max value length of each element. SW_CORRELATION_VALUE_MAX_LENGTH 128   correlation.auto_tag_keys Tag the span by the key/value in the correlation context, when the keys listed here exist. SW_CORRELATION_AUTO_TAG_KEYS \u0026quot;\u0026quot;   jvm.buffer_size The buffer size of collected JVM info. SW_JVM_BUFFER_SIZE 60 * 10   jvm.metrics_collect_period The period in seconds of JVM metrics collection. Unit is second. SW_JVM_METRICS_COLLECT_PERIOD 1   buffer.channel_size The buffer channel size. SW_BUFFER_CHANNEL_SIZE 5   buffer.buffer_size The buffer size. SW_BUFFER_BUFFER_SIZE 300   profile.active If true, skywalking agent will enable profile when user create a new profile task. Otherwise disable profile. SW_AGENT_PROFILE_ACTIVE true   profile.max_parallel Parallel monitor segment count SW_AGENT_PROFILE_MAX_PARALLEL 5   profile.max_accept_sub_parallel Max monitoring sub-tasks count of one single endpoint access SW_AGENT_PROFILE_MAX_ACCEPT_SUB_PARALLEL 5   profile.duration Max monitor segment time(minutes), if current segment monitor time out of limit, then stop it. SW_AGENT_PROFILE_DURATION 10   profile.dump_max_stack_depth Max dump thread stack depth SW_AGENT_PROFILE_DUMP_MAX_STACK_DEPTH 500   profile.snapshot_transport_buffer_size Snapshot transport to backend buffer size SW_AGENT_PROFILE_SNAPSHOT_TRANSPORT_BUFFER_SIZE 4500   meter.active If true, the agent collects and reports metrics to the backend. SW_METER_ACTIVE true   meter.report_interval Report meters interval. The unit is second SW_METER_REPORT_INTERVAL 20   meter.max_meter_size Max size of the meter pool SW_METER_MAX_METER_SIZE 500   log.max_message_size The max size of message to send to server.Default is 10 MB. SW_GRPC_LOG_MAX_MESSAGE_SIZE 10485760   plugin.mount Mount the specific folders of the plugins. Plugins in mounted folders would work. SW_MOUNT_FOLDERS plugins,activations   plugin.peer_max_length  Peer maximum description limit. SW_PLUGIN_PEER_MAX_LENGTH 200   plugin.exclude_plugins  Exclude some plugins define in plugins dir,Multiple plugins are separated by comma.Plugin names is defined in Agent plugin list SW_EXCLUDE_PLUGINS \u0026quot;\u0026quot;   plugin.mongodb.trace_param If true, trace all the parameters in MongoDB access, default is false. Only trace the operation, not include parameters. SW_PLUGIN_MONGODB_TRACE_PARAM false   plugin.mongodb.filter_length_limit If set to positive number, the WriteRequest.params would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_MONGODB_FILTER_LENGTH_LIMIT 256   plugin.elasticsearch.trace_dsl If true, trace all the DSL(Domain Specific Language) in ElasticSearch access, default is false. SW_PLUGIN_ELASTICSEARCH_TRACE_DSL false   plugin.springmvc.use_qualified_name_as_endpoint_name If true, the fully qualified method name will be used as the endpoint name instead of the request URL, default is false. SW_PLUGIN_SPRINGMVC_USE_QUALIFIED_NAME_AS_ENDPOINT_NAME false   plugin.toolkit.use_qualified_name_as_operation_name If true, the fully qualified method name will be used as the operation name instead of the given operation name, default is false. SW_PLUGIN_TOOLKIT_USE_QUALIFIED_NAME_AS_OPERATION_NAME false   plugin.jdbc.trace_sql_parameters If set to true, the parameters of the sql (typically java.sql.PreparedStatement) would be collected. SW_JDBC_TRACE_SQL_PARAMETERS false   plugin.jdbc.sql_parameters_max_length If set to positive number, the db.sql.parameters would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_JDBC_SQL_PARAMETERS_MAX_LENGTH 512   plugin.jdbc.sql_body_max_length If set to positive number, the db.statement would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_JDBC_SQL_BODY_MAX_LENGTH 2048   plugin.solrj.trace_statement If true, trace all the query parameters(include deleteByIds and deleteByQuery) in Solr query request, default is false. SW_PLUGIN_SOLRJ_TRACE_STATEMENT false   plugin.solrj.trace_ops_params If true, trace all the operation parameters in Solr request, default is false. SW_PLUGIN_SOLRJ_TRACE_OPS_PARAMS false   plugin.light4j.trace_handler_chain If true, trace all middleware/business handlers that are part of the Light4J handler chain for a request. SW_PLUGIN_LIGHT4J_TRACE_HANDLER_CHAIN false   plugin.springtransaction.simplify_transaction_definition_name If true, the transaction definition name will be simplified. SW_PLUGIN_SPRINGTRANSACTION_SIMPLIFY_TRANSACTION_DEFINITION_NAME false   plugin.jdkthreading.threading_class_prefixes Threading classes (java.lang.Runnable and java.util.concurrent.Callable) and their subclasses, including anonymous inner classes whose name match any one of the THREADING_CLASS_PREFIXES (splitted by ,) will be instrumented, make sure to only specify as narrow prefixes as what you\u0026rsquo;re expecting to instrument, (java. and javax. will be ignored due to safety issues) SW_PLUGIN_JDKTHREADING_THREADING_CLASS_PREFIXES Not set   plugin.tomcat.collect_http_params This config item controls that whether the Tomcat plugin should collect the parameters of the request. Also, activate implicitly in the profiled trace. SW_PLUGIN_TOMCAT_COLLECT_HTTP_PARAMS false   plugin.springmvc.collect_http_params This config item controls that whether the SpringMVC plugin should collect the parameters of the request, when your Spring application is based on Tomcat, consider only setting either plugin.tomcat.collect_http_params or plugin.springmvc.collect_http_params. Also, activate implicitly in the profiled trace. SW_PLUGIN_SPRINGMVC_COLLECT_HTTP_PARAMS false   plugin.httpclient.collect_http_params This config item controls that whether the HttpClient plugin should collect the parameters of the request SW_PLUGIN_HTTPCLIENT_COLLECT_HTTP_PARAMS false   plugin.http.http_params_length_threshold When COLLECT_HTTP_PARAMS is enabled, how many characters to keep and send to the OAP backend, use negative values to keep and send the complete parameters, NB. this config item is added for the sake of performance. SW_PLUGIN_HTTP_HTTP_PARAMS_LENGTH_THRESHOLD 1024   plugin.http.http_headers_length_threshold When include_http_headers declares header names, this threshold controls the length limitation of all header values. use negative values to keep and send the complete headers. Note. this config item is added for the sake of performance. SW_PLUGIN_HTTP_HTTP_HEADERS_LENGTH_THRESHOLD 2048   plugin.http.include_http_headers Set the header names, which should be collected by the plugin. Header name must follow javax.servlet.http definition. Multiple names should be split by comma. SW_PLUGIN_HTTP_INCLUDE_HTTP_HEADERS ``(No header would be collected) |   plugin.feign.collect_request_body This config item controls that whether the Feign plugin should collect the http body of the request. SW_PLUGIN_FEIGN_COLLECT_REQUEST_BODY false   plugin.feign.filter_length_limit When COLLECT_REQUEST_BODY is enabled, how many characters to keep and send to the OAP backend, use negative values to keep and send the complete body. SW_PLUGIN_FEIGN_FILTER_LENGTH_LIMIT 1024   plugin.feign.supported_content_types_prefix When COLLECT_REQUEST_BODY is enabled and content-type start with SUPPORTED_CONTENT_TYPES_PREFIX, collect the body of the request , multiple paths should be separated by , SW_PLUGIN_FEIGN_SUPPORTED_CONTENT_TYPES_PREFIX application/json,text/   plugin.influxdb.trace_influxql If true, trace all the influxql(query and write) in InfluxDB access, default is true. SW_PLUGIN_INFLUXDB_TRACE_INFLUXQL true   plugin.dubbo.collect_consumer_arguments Apache Dubbo consumer collect arguments in RPC call, use Object#toString to collect arguments. SW_PLUGIN_DUBBO_COLLECT_CONSUMER_ARGUMENTS false   plugin.dubbo.consumer_arguments_length_threshold When plugin.dubbo.collect_consumer_arguments is true, Arguments of length from the front will to the OAP backend SW_PLUGIN_DUBBO_CONSUMER_ARGUMENTS_LENGTH_THRESHOLD 256   plugin.dubbo.collect_provider_arguments Apache Dubbo provider collect arguments in RPC call, use Object#toString to collect arguments. SW_PLUGIN_DUBBO_COLLECT_PROVIDER_ARGUMENTS false   plugin.dubbo.provider_arguments_length_threshold When plugin.dubbo.collect_provider_arguments is true, Arguments of length from the front will to the OAP backend SW_PLUGIN_DUBBO_PROVIDER_ARGUMENTS_LENGTH_THRESHOLD 256   plugin.kafka.bootstrap_servers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_KAFKA_BOOTSTRAP_SERVERS localhost:9092   plugin.kafka.get_topic_timeout Timeout period of reading topics from the Kafka server, the unit is second. SW_GET_TOPIC_TIMEOUT 10   plugin.kafka.producer_config Kafka producer configuration. Read producer configure to get more details. Check Kafka report doc for more details and examples. sw_plugin_kafka_producer_config    plugin.kafka.producer_config_json Configure Kafka Producer configuration in JSON format. Notice it will be overridden by plugin.kafka.producer_config[key], if the key duplication. SW_PLUGIN_KAFKA_PRODUCER_CONFIG_JSON    plugin.kafka.topic_meter Specify which Kafka topic name for Meter System data to report to. SW_PLUGIN_KAFKA_TOPIC_METER skywalking-meters   plugin.kafka.topic_metrics Specify which Kafka topic name for JVM metrics data to report to. SW_PLUGIN_KAFKA_TOPIC_METRICS skywalking-metrics   plugin.kafka.topic_segment Specify which Kafka topic name for traces data to report to. SW_PLUGIN_KAFKA_TOPIC_SEGMENT skywalking-segments   plugin.kafka.topic_profiling Specify which Kafka topic name for Thread Profiling snapshot to report to. SW_PLUGIN_KAFKA_TOPIC_PROFILINGS skywalking-profilings   plugin.kafka.topic_management Specify which Kafka topic name for the register or heartbeat data of Service Instance to report to. SW_PLUGIN_KAFKA_TOPIC_MANAGEMENT skywalking-managements   plugin.kafka.topic_logging Specify which Kafka topic name for the logging data to report to. SW_PLUGIN_KAFKA_TOPIC_LOGGING skywalking-logging   plugin.kafka.namespace isolate multi OAP server when using same Kafka cluster (final topic name will append namespace before Kafka topics with - ). SW_KAFKA_NAMESPACE `` |   plugin.kafka.decode_class Specify which class to decode encoded configuration of kafka.You can set encoded information in plugin.kafka.producer_config_json or plugin.kafka.producer_config if you need. SW_KAFKA_DECODE_CLASS `` |   plugin.springannotation.classname_match_regex Match spring beans with regular expression for the class name. Multiple expressions could be separated by a comma. This only works when Spring annotation plugin has been activated. SW_SPRINGANNOTATION_CLASSNAME_MATCH_REGEX All the spring beans tagged with @Bean,@Service,@Dao, or @Repository.   plugin.toolkit.log.transmit_formatted Whether or not to transmit logged data as formatted or un-formatted. SW_PLUGIN_TOOLKIT_LOG_TRANSMIT_FORMATTED true   plugin.lettuce.trace_redis_parameters If set to true, the parameters of Redis commands would be collected by Lettuce agent. SW_PLUGIN_LETTUCE_TRACE_REDIS_PARAMETERS false   plugin.lettuce.redis_parameter_max_length If set to positive number and plugin.lettuce.trace_redis_parameters is set to true, Redis command parameters would be collected and truncated to this length. SW_PLUGIN_LETTUCE_REDIS_PARAMETER_MAX_LENGTH 128   plugin.lettuce.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_LETTUCE_OPERATION_MAPPING_WRITE    plugin.lettuce.operation_mapping_read  Specify which command should be converted to read operation SW_PLUGIN_LETTUCE_OPERATION_MAPPING_READ Referenc Lettuce-5.x-plugin   plugin.jedis.trace_redis_parameters If set to true, the parameters of Redis commands would be collected by Jedis agent. SW_PLUGIN_JEDIS_TRACE_REDIS_PARAMETERS false   plugin.jedis.redis_parameter_max_length If set to positive number and plugin.jedis.trace_redis_parameters is set to true, Redis command parameters would be collected and truncated to this length. SW_PLUGIN_JEDIS_REDIS_PARAMETER_MAX_LENGTH 128   plugin.jedis.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_JEDIS_OPERATION_MAPPING_WRITE    plugin.jedis.operation_mapping_read  Specify which command should be converted to read operation SW_PLUGIN_JEDIS_OPERATION_MAPPING_READ Referenc Jedis-4.x-plugin jedis-2.x-3.x-plugin   plugin.redisson.trace_redis_parameters If set to true, the parameters of Redis commands would be collected by Redisson agent. SW_PLUGIN_REDISSON_TRACE_REDIS_PARAMETERS false   plugin.redisson.redis_parameter_max_length If set to positive number and plugin.redisson.trace_redis_parameters is set to true, Redis command parameters would be collected and truncated to this length. SW_PLUGIN_REDISSON_REDIS_PARAMETER_MAX_LENGTH 128   plugin.redisson.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_REDISSON_OPERATION_MAPPING_WRITE    plugin.redisson.operation_mapping_read  Specify which command should be converted to read operation SW_PLUGIN_REDISSON_OPERATION_MAPPING_READ Referenc Redisson-3.x-plugin   plugin.neo4j.trace_cypher_parameters If set to true, the parameters of the cypher would be collected. SW_PLUGIN_NEO4J_TRACE_CYPHER_PARAMETERS false   plugin.neo4j.cypher_parameters_max_length If set to positive number, the db.cypher.parameters would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_NEO4J_CYPHER_PARAMETERS_MAX_LENGTH 512   plugin.neo4j.cypher_body_max_length If set to positive number, the db.statement would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_NEO4J_CYPHER_BODY_MAX_LENGTH 2048   plugin.cpupolicy.sample_cpu_usage_percent_limit If set to a positive number and activate trace sampler CPU policy plugin, the trace would not be collected when agent process CPU usage percent is greater than plugin.cpupolicy.sample_cpu_usage_percent_limit. SW_SAMPLE_CPU_USAGE_PERCENT_LIMIT -1   plugin.micronauthttpclient.collect_http_params This config item controls that whether the Micronaut http client plugin should collect the parameters of the request. Also, activate implicitly in the profiled trace. SW_PLUGIN_MICRONAUTHTTPCLIENT_COLLECT_HTTP_PARAMS false   plugin.micronauthttpserver.collect_http_params This config item controls that whether the Micronaut http server plugin should collect the parameters of the request. Also, activate implicitly in the profiled trace. SW_PLUGIN_MICRONAUTHTTPSERVER_COLLECT_HTTP_PARAMS false   plugin.memcached.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_MEMCACHED_OPERATION_MAPPING_WRITE get,gets,getAndTouch,getKeys,getKeysWithExpiryCheck,getKeysNoDuplicateCheck   plugin.memcached.operation_mapping_read Specify which command should be converted to read operation SW_PLUGIN_MEMCACHED_OPERATION_MAPPING_READ set,add,replace,append,prepend,cas,delete,touch,incr,decr   plugin.ehcache.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_EHCACHE_OPERATION_MAPPING_WRITE get,getAll,getQuiet,getKeys,getKeysWithExpiryCheck,getKeysNoDuplicateCheck,releaseRead,tryRead,getWithLoader,getAll,loadAll,getAllWithLoader   plugin.ehcache.operation_mapping_read Specify which command should be converted to read operation SW_PLUGIN_EHCACHE_OPERATION_MAPPING_READ tryRemoveImmediately,remove,removeAndReturnElement,removeAll,removeQuiet,removeWithWriter,put,putAll,replace,removeQuiet,removeWithWriter,removeElement,removeAll,putWithWriter,putQuiet,putIfAbsent,putIfAbsent   plugin.guavacache.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_GUAVACACHE_OPERATION_MAPPING_WRITE getIfPresent,get,getAllPresent,size   plugin.guavacache.operation_mapping_read Specify which command should be converted to read operation SW_PLUGIN_GUAVACACHE_OPERATION_MAPPING_READ put,putAll,invalidate,invalidateAll,invalidateAll,cleanUp   plugin.nettyhttp.collect_request_body This config item controls that whether the Netty-http plugin should collect the http body of the request. SW_PLUGIN_NETTY_HTTP_COLLECT_REQUEST_BODY false   plugin.nettyhttp.filter_length_limit When COLLECT_REQUEST_BODY is enabled, how many characters to keep and send to the OAP backend, use negative values to keep and send the complete body. SW_PLUGIN_NETTY_HTTP_FILTER_LENGTH_LIMIT 1024   plugin.nettyhttp.supported_content_types_prefix When COLLECT_REQUEST_BODY is enabled and content-type start with HTTP_SUPPORTED_CONTENT_TYPES_PREFIX, collect the body of the request , multiple paths should be separated by , SW_PLUGIN_NETTY_HTTP_SUPPORTED_CONTENT_TYPES_PREFIX application/json,text/          Reset Collection/Map type configurations as empty collection.  Collection type config, e.g. using  plugin.kafka.topics= to override default plugin.kafka.topics=a,b,c,d Map type config, e.g. using plugin.kafka.producer_config[]= to override default plugin.kafka.producer_config[key]=value  Dynamic Configurations All configurations above are static, if you need to change some agent settings at runtime, please read CDS - Configuration Discovery Service document for more details.\n","title":"Table of Agent Configuration Properties","url":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/configurations/"},{"content":"Table of Agent Configuration Properties This is the properties list supported in agent/config/agent.config.\n   property key Description System Environment Variable Default     agent.service_name The service name to represent a logic group providing the same capabilities/logic. Suggestion: set a unique name for every logic service group, service instance nodes share the same code, Max length is 50(UTF-8 char). Optional, once service_name follows \u0026lt;group name\u0026gt;::\u0026lt;logic name\u0026gt; format, OAP server assigns the group name to the service metadata. SW_AGENT_NAME Your_ApplicationName   agent.namespace Namespace represents a subnet, such as kubernetes namespace, or 172.10.. SW_AGENT_NAMESPACE Not set   agent.cluster Cluster defines the physical cluster in a data center or same network segment. SW_AGENT_CLUSTER Not set   agent.sample_n_per_3_secs Negative or zero means off, by default.SAMPLE_N_PER_3_SECS means sampling N TraceSegment in 3 seconds tops. SW_AGENT_SAMPLE Not set   agent.authentication Authentication active is based on backend setting, see application.yml for more details.For most scenarios, this needs backend extensions, only basic match auth provided in default implementation. SW_AGENT_AUTHENTICATION Not set   agent.trace_segment_ref_limit_per_span The max number of TraceSegmentRef in a single span to keep memory cost estimatable. SW_TRACE_SEGMENT_LIMIT 500   agent.span_limit_per_segment The max number of spans in a single segment. Through this config item, SkyWalking keep your application memory cost estimated. SW_AGENT_SPAN_LIMIT 300   agent.ignore_suffix If the operation name of the first span is included in this set, this segment should be ignored. SW_AGENT_IGNORE_SUFFIX Not set   agent.is_open_debugging_class If true, skywalking agent will save all instrumented classes files in /debugging folder. SkyWalking team may ask for these files in order to resolve compatible problem. SW_AGENT_OPEN_DEBUG Not set   agent.instance_name Instance name is the identity of an instance, should be unique in the service. If empty, SkyWalking agent will generate an 32-bit uuid. Default, use UUID@hostname as the instance name. Max length is 50(UTF-8 char) SW_AGENT_INSTANCE_NAME \u0026quot;\u0026quot;   agent.instance_properties_json={\u0026quot;key\u0026quot;:\u0026quot;value\u0026quot;} Add service instance custom properties in json format. SW_INSTANCE_PROPERTIES_JSON Not set   agent.cause_exception_depth How depth the agent goes, when log all cause exceptions. SW_AGENT_CAUSE_EXCEPTION_DEPTH 5   agent.force_reconnection_period  Force reconnection period of grpc, based on grpc_channel_check_interval. SW_AGENT_FORCE_RECONNECTION_PERIOD 1   agent.operation_name_threshold  The operationName max length, setting this value \u0026gt; 190 is not recommended. SW_AGENT_OPERATION_NAME_THRESHOLD 150   agent.keep_tracing Keep tracing even the backend is not available if this value is true. SW_AGENT_KEEP_TRACING false   agent.force_tls Force open TLS for gRPC channel if this value is true. SW_AGENT_FORCE_TLS false   agent.ssl_trusted_ca_path gRPC SSL trusted ca file. SW_AGENT_SSL_TRUSTED_CA_PATH /ca/ca.crt   agent.ssl_key_path The private key file. Enable mTLS when ssl_key_path and ssl_cert_chain_path exist. SW_AGENT_SSL_KEY_PATH \u0026quot;\u0026quot;   agent.ssl_cert_chain_path The certificate file. Enable mTLS when ssl_key_path and ssl_cert_chain_path exist. SW_AGENT_SSL_CERT_CHAIN_PATH \u0026quot;\u0026quot;   agent.enable Enable the agent kernel services and instrumentation. SW_AGENT_ENABLE true   osinfo.ipv4_list_size Limit the length of the ipv4 list size. SW_AGENT_OSINFO_IPV4_LIST_SIZE 10   collector.grpc_channel_check_interval grpc channel status check interval. SW_AGENT_COLLECTOR_GRPC_CHANNEL_CHECK_INTERVAL 30   collector.heartbeat_period agent heartbeat report period. Unit, second. SW_AGENT_COLLECTOR_HEARTBEAT_PERIOD 30   collector.properties_report_period_factor The agent sends the instance properties to the backend every collector.heartbeat_period * collector.properties_report_period_factor seconds SW_AGENT_COLLECTOR_PROPERTIES_REPORT_PERIOD_FACTOR 10   collector.backend_service Collector SkyWalking trace receiver service addresses. SW_AGENT_COLLECTOR_BACKEND_SERVICES 127.0.0.1:11800   collector.grpc_upstream_timeout How long grpc client will timeout in sending data to upstream. Unit is second. SW_AGENT_COLLECTOR_GRPC_UPSTREAM_TIMEOUT 30 seconds   collector.get_profile_task_interval Sniffer get profile task list interval. SW_AGENT_COLLECTOR_GET_PROFILE_TASK_INTERVAL 20   collector.get_agent_dynamic_config_interval Sniffer get agent dynamic config interval SW_AGENT_COLLECTOR_GET_AGENT_DYNAMIC_CONFIG_INTERVAL 20   collector.is_resolve_dns_periodically If true, skywalking agent will enable periodically resolving DNS to update receiver service addresses. SW_AGENT_COLLECTOR_IS_RESOLVE_DNS_PERIODICALLY false   logging.level Log level: TRACE, DEBUG, INFO, WARN, ERROR, OFF. Default is info. SW_LOGGING_LEVEL INFO   logging.file_name Log file name. SW_LOGGING_FILE_NAME skywalking-api.log   logging.output Log output. Default is FILE. Use CONSOLE means output to stdout. SW_LOGGING_OUTPUT FILE   logging.dir Log files directory. Default is blank string, means, use \u0026ldquo;{theSkywalkingAgentJarDir}/logs \u0026quot; to output logs. {theSkywalkingAgentJarDir} is the directory where the skywalking agent jar file is located SW_LOGGING_DIR \u0026quot;\u0026quot;   logging.resolver Logger resolver: PATTERN or JSON. The default is PATTERN, which uses logging.pattern to print traditional text logs. JSON resolver prints logs in JSON format. SW_LOGGING_RESOLVER PATTERN   logging.pattern  Logging format. There are all conversion specifiers: * %level means log level. * %timestamp means now of time with format yyyy-MM-dd HH:mm:ss:SSS.\n* %thread means name of current thread.\n* %msg means some message which user logged. * %class means SimpleName of TargetClass. * %throwable means a throwable which user called. * %agent_name means agent.service_name. Only apply to the PatternLogger. SW_LOGGING_PATTERN %level %timestamp %thread %class : %msg %throwable   logging.max_file_size The max size of log file. If the size is bigger than this, archive the current file, and write into a new file. SW_LOGGING_MAX_FILE_SIZE 300 * 1024 * 1024   logging.max_history_files The max history log files. When rollover happened, if log files exceed this number,then the oldest file will be delete. Negative or zero means off, by default. SW_LOGGING_MAX_HISTORY_FILES -1   statuscheck.ignored_exceptions Listed exceptions would not be treated as an error. Because in some codes, the exception is being used as a way of controlling business flow. SW_STATUSCHECK_IGNORED_EXCEPTIONS \u0026quot;\u0026quot;   statuscheck.max_recursive_depth The max recursive depth when checking the exception traced by the agent. Typically, we don\u0026rsquo;t recommend setting this more than 10, which could cause a performance issue. Negative value and 0 would be ignored, which means all exceptions would make the span tagged in error status. SW_STATUSCHECK_MAX_RECURSIVE_DEPTH 1   correlation.element_max_number Max element count in the correlation context. SW_CORRELATION_ELEMENT_MAX_NUMBER 3   correlation.value_max_length Max value length of each element. SW_CORRELATION_VALUE_MAX_LENGTH 128   correlation.auto_tag_keys Tag the span by the key/value in the correlation context, when the keys listed here exist. SW_CORRELATION_AUTO_TAG_KEYS \u0026quot;\u0026quot;   jvm.buffer_size The buffer size of collected JVM info. SW_JVM_BUFFER_SIZE 60 * 10   jvm.metrics_collect_period The period in seconds of JVM metrics collection. Unit is second. SW_JVM_METRICS_COLLECT_PERIOD 1   buffer.channel_size The buffer channel size. SW_BUFFER_CHANNEL_SIZE 5   buffer.buffer_size The buffer size. SW_BUFFER_BUFFER_SIZE 300   profile.active If true, skywalking agent will enable profile when user create a new profile task. Otherwise disable profile. SW_AGENT_PROFILE_ACTIVE true   profile.max_parallel Parallel monitor segment count SW_AGENT_PROFILE_MAX_PARALLEL 5   profile.max_accept_sub_parallel Max monitoring sub-tasks count of one single endpoint access SW_AGENT_PROFILE_MAX_ACCEPT_SUB_PARALLEL 5   profile.duration Max monitor segment time(minutes), if current segment monitor time out of limit, then stop it. SW_AGENT_PROFILE_DURATION 10   profile.dump_max_stack_depth Max dump thread stack depth SW_AGENT_PROFILE_DUMP_MAX_STACK_DEPTH 500   profile.snapshot_transport_buffer_size Snapshot transport to backend buffer size SW_AGENT_PROFILE_SNAPSHOT_TRANSPORT_BUFFER_SIZE 4500   meter.active If true, the agent collects and reports metrics to the backend. SW_METER_ACTIVE true   meter.report_interval Report meters interval. The unit is second SW_METER_REPORT_INTERVAL 20   meter.max_meter_size Max size of the meter pool SW_METER_MAX_METER_SIZE 500   log.max_message_size The max size of message to send to server.Default is 10 MB. SW_GRPC_LOG_MAX_MESSAGE_SIZE 10485760   plugin.mount Mount the specific folders of the plugins. Plugins in mounted folders would work. SW_MOUNT_FOLDERS plugins,activations   plugin.peer_max_length  Peer maximum description limit. SW_PLUGIN_PEER_MAX_LENGTH 200   plugin.exclude_plugins  Exclude some plugins define in plugins dir,Multiple plugins are separated by comma.Plugin names is defined in Agent plugin list SW_EXCLUDE_PLUGINS \u0026quot;\u0026quot;   plugin.mongodb.trace_param If true, trace all the parameters in MongoDB access, default is false. Only trace the operation, not include parameters. SW_PLUGIN_MONGODB_TRACE_PARAM false   plugin.mongodb.filter_length_limit If set to positive number, the WriteRequest.params would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_MONGODB_FILTER_LENGTH_LIMIT 256   plugin.elasticsearch.trace_dsl If true, trace all the DSL(Domain Specific Language) in ElasticSearch access, default is false. SW_PLUGIN_ELASTICSEARCH_TRACE_DSL false   plugin.springmvc.use_qualified_name_as_endpoint_name If true, the fully qualified method name will be used as the endpoint name instead of the request URL, default is false. SW_PLUGIN_SPRINGMVC_USE_QUALIFIED_NAME_AS_ENDPOINT_NAME false   plugin.toolkit.use_qualified_name_as_operation_name If true, the fully qualified method name will be used as the operation name instead of the given operation name, default is false. SW_PLUGIN_TOOLKIT_USE_QUALIFIED_NAME_AS_OPERATION_NAME false   plugin.jdbc.trace_sql_parameters If set to true, the parameters of the sql (typically java.sql.PreparedStatement) would be collected. SW_JDBC_TRACE_SQL_PARAMETERS false   plugin.jdbc.sql_parameters_max_length If set to positive number, the db.sql.parameters would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_JDBC_SQL_PARAMETERS_MAX_LENGTH 512   plugin.jdbc.sql_body_max_length If set to positive number, the db.statement would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_JDBC_SQL_BODY_MAX_LENGTH 2048   plugin.solrj.trace_statement If true, trace all the query parameters(include deleteByIds and deleteByQuery) in Solr query request, default is false. SW_PLUGIN_SOLRJ_TRACE_STATEMENT false   plugin.solrj.trace_ops_params If true, trace all the operation parameters in Solr request, default is false. SW_PLUGIN_SOLRJ_TRACE_OPS_PARAMS false   plugin.light4j.trace_handler_chain If true, trace all middleware/business handlers that are part of the Light4J handler chain for a request. SW_PLUGIN_LIGHT4J_TRACE_HANDLER_CHAIN false   plugin.springtransaction.simplify_transaction_definition_name If true, the transaction definition name will be simplified. SW_PLUGIN_SPRINGTRANSACTION_SIMPLIFY_TRANSACTION_DEFINITION_NAME false   plugin.jdkthreading.threading_class_prefixes Threading classes (java.lang.Runnable and java.util.concurrent.Callable) and their subclasses, including anonymous inner classes whose name match any one of the THREADING_CLASS_PREFIXES (splitted by ,) will be instrumented, make sure to only specify as narrow prefixes as what you\u0026rsquo;re expecting to instrument, (java. and javax. will be ignored due to safety issues) SW_PLUGIN_JDKTHREADING_THREADING_CLASS_PREFIXES Not set   plugin.tomcat.collect_http_params This config item controls that whether the Tomcat plugin should collect the parameters of the request. Also, activate implicitly in the profiled trace. SW_PLUGIN_TOMCAT_COLLECT_HTTP_PARAMS false   plugin.springmvc.collect_http_params This config item controls that whether the SpringMVC plugin should collect the parameters of the request, when your Spring application is based on Tomcat, consider only setting either plugin.tomcat.collect_http_params or plugin.springmvc.collect_http_params. Also, activate implicitly in the profiled trace. SW_PLUGIN_SPRINGMVC_COLLECT_HTTP_PARAMS false   plugin.httpclient.collect_http_params This config item controls that whether the HttpClient plugin should collect the parameters of the request SW_PLUGIN_HTTPCLIENT_COLLECT_HTTP_PARAMS false   plugin.http.http_params_length_threshold When COLLECT_HTTP_PARAMS is enabled, how many characters to keep and send to the OAP backend, use negative values to keep and send the complete parameters, NB. this config item is added for the sake of performance. SW_PLUGIN_HTTP_HTTP_PARAMS_LENGTH_THRESHOLD 1024   plugin.http.http_headers_length_threshold When include_http_headers declares header names, this threshold controls the length limitation of all header values. use negative values to keep and send the complete headers. Note. this config item is added for the sake of performance. SW_PLUGIN_HTTP_HTTP_HEADERS_LENGTH_THRESHOLD 2048   plugin.http.include_http_headers Set the header names, which should be collected by the plugin. Header name must follow javax.servlet.http definition. Multiple names should be split by comma. SW_PLUGIN_HTTP_INCLUDE_HTTP_HEADERS ``(No header would be collected) |   plugin.feign.collect_request_body This config item controls that whether the Feign plugin should collect the http body of the request. SW_PLUGIN_FEIGN_COLLECT_REQUEST_BODY false   plugin.feign.filter_length_limit When COLLECT_REQUEST_BODY is enabled, how many characters to keep and send to the OAP backend, use negative values to keep and send the complete body. SW_PLUGIN_FEIGN_FILTER_LENGTH_LIMIT 1024   plugin.feign.supported_content_types_prefix When COLLECT_REQUEST_BODY is enabled and content-type start with SUPPORTED_CONTENT_TYPES_PREFIX, collect the body of the request , multiple paths should be separated by , SW_PLUGIN_FEIGN_SUPPORTED_CONTENT_TYPES_PREFIX application/json,text/   plugin.influxdb.trace_influxql If true, trace all the influxql(query and write) in InfluxDB access, default is true. SW_PLUGIN_INFLUXDB_TRACE_INFLUXQL true   plugin.dubbo.collect_consumer_arguments Apache Dubbo consumer collect arguments in RPC call, use Object#toString to collect arguments. SW_PLUGIN_DUBBO_COLLECT_CONSUMER_ARGUMENTS false   plugin.dubbo.consumer_arguments_length_threshold When plugin.dubbo.collect_consumer_arguments is true, Arguments of length from the front will to the OAP backend SW_PLUGIN_DUBBO_CONSUMER_ARGUMENTS_LENGTH_THRESHOLD 256   plugin.dubbo.collect_provider_arguments Apache Dubbo provider collect arguments in RPC call, use Object#toString to collect arguments. SW_PLUGIN_DUBBO_COLLECT_PROVIDER_ARGUMENTS false   plugin.dubbo.provider_arguments_length_threshold When plugin.dubbo.collect_provider_arguments is true, Arguments of length from the front will to the OAP backend SW_PLUGIN_DUBBO_PROVIDER_ARGUMENTS_LENGTH_THRESHOLD 256   plugin.kafka.bootstrap_servers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_KAFKA_BOOTSTRAP_SERVERS localhost:9092   plugin.kafka.get_topic_timeout Timeout period of reading topics from the Kafka server, the unit is second. SW_GET_TOPIC_TIMEOUT 10   plugin.kafka.producer_config Kafka producer configuration. Read producer configure to get more details. Check Kafka report doc for more details and examples. SW_PLUGIN_KAFKA_PRODUCER_CONFIG    plugin.kafka.producer_config_json Configure Kafka Producer configuration in JSON format. Notice it will be overridden by plugin.kafka.producer_config[key], if the key duplication. SW_PLUGIN_KAFKA_PRODUCER_CONFIG_JSON    plugin.kafka.topic_meter Specify which Kafka topic name for Meter System data to report to. SW_PLUGIN_KAFKA_TOPIC_METER skywalking-meters   plugin.kafka.topic_metrics Specify which Kafka topic name for JVM metrics data to report to. SW_PLUGIN_KAFKA_TOPIC_METRICS skywalking-metrics   plugin.kafka.topic_segment Specify which Kafka topic name for traces data to report to. SW_PLUGIN_KAFKA_TOPIC_SEGMENT skywalking-segments   plugin.kafka.topic_profiling Specify which Kafka topic name for Thread Profiling snapshot to report to. SW_PLUGIN_KAFKA_TOPIC_PROFILINGS skywalking-profilings   plugin.kafka.topic_management Specify which Kafka topic name for the register or heartbeat data of Service Instance to report to. SW_PLUGIN_KAFKA_TOPIC_MANAGEMENT skywalking-managements   plugin.kafka.topic_logging Specify which Kafka topic name for the logging data to report to. SW_PLUGIN_KAFKA_TOPIC_LOGGING skywalking-logging   plugin.kafka.namespace isolate multi OAP server when using same Kafka cluster (final topic name will append namespace before Kafka topics with - ). SW_KAFKA_NAMESPACE `` |   plugin.kafka.decode_class Specify which class to decode encoded configuration of kafka.You can set encoded information in plugin.kafka.producer_config_json or plugin.kafka.producer_config if you need. SW_KAFKA_DECODE_CLASS `` |   plugin.springannotation.classname_match_regex Match spring beans with regular expression for the class name. Multiple expressions could be separated by a comma. This only works when Spring annotation plugin has been activated. SW_SPRINGANNOTATION_CLASSNAME_MATCH_REGEX All the spring beans tagged with @Bean,@Service,@Dao, or @Repository.   plugin.toolkit.log.transmit_formatted Whether or not to transmit logged data as formatted or un-formatted. SW_PLUGIN_TOOLKIT_LOG_TRANSMIT_FORMATTED true   plugin.lettuce.trace_redis_parameters If set to true, the parameters of Redis commands would be collected by Lettuce agent. SW_PLUGIN_LETTUCE_TRACE_REDIS_PARAMETERS false   plugin.lettuce.redis_parameter_max_length If set to positive number and plugin.lettuce.trace_redis_parameters is set to true, Redis command parameters would be collected and truncated to this length. SW_PLUGIN_LETTUCE_REDIS_PARAMETER_MAX_LENGTH 128   plugin.lettuce.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_LETTUCE_OPERATION_MAPPING_WRITE    plugin.lettuce.operation_mapping_read  Specify which command should be converted to read operation SW_PLUGIN_LETTUCE_OPERATION_MAPPING_READ Referenc Lettuce-5.x-plugin   plugin.jedis.trace_redis_parameters If set to true, the parameters of Redis commands would be collected by Jedis agent. SW_PLUGIN_JEDIS_TRACE_REDIS_PARAMETERS false   plugin.jedis.redis_parameter_max_length If set to positive number and plugin.jedis.trace_redis_parameters is set to true, Redis command parameters would be collected and truncated to this length. SW_PLUGIN_JEDIS_REDIS_PARAMETER_MAX_LENGTH 128   plugin.jedis.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_JEDIS_OPERATION_MAPPING_WRITE    plugin.jedis.operation_mapping_read  Specify which command should be converted to read operation SW_PLUGIN_JEDIS_OPERATION_MAPPING_READ Referenc Jedis-4.x-plugin jedis-2.x-3.x-plugin   plugin.redisson.trace_redis_parameters If set to true, the parameters of Redis commands would be collected by Redisson agent. SW_PLUGIN_REDISSON_TRACE_REDIS_PARAMETERS false   plugin.redisson.redis_parameter_max_length If set to positive number and plugin.redisson.trace_redis_parameters is set to true, Redis command parameters would be collected and truncated to this length. SW_PLUGIN_REDISSON_REDIS_PARAMETER_MAX_LENGTH 128   plugin.redisson.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_REDISSON_OPERATION_MAPPING_WRITE    plugin.redisson.operation_mapping_read  Specify which command should be converted to read operation SW_PLUGIN_REDISSON_OPERATION_MAPPING_READ Referenc Redisson-3.x-plugin   plugin.neo4j.trace_cypher_parameters If set to true, the parameters of the cypher would be collected. SW_PLUGIN_NEO4J_TRACE_CYPHER_PARAMETERS false   plugin.neo4j.cypher_parameters_max_length If set to positive number, the db.cypher.parameters would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_NEO4J_CYPHER_PARAMETERS_MAX_LENGTH 512   plugin.neo4j.cypher_body_max_length If set to positive number, the db.statement would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_NEO4J_CYPHER_BODY_MAX_LENGTH 2048   plugin.cpupolicy.sample_cpu_usage_percent_limit If set to a positive number and activate trace sampler CPU policy plugin, the trace would not be collected when agent process CPU usage percent is greater than plugin.cpupolicy.sample_cpu_usage_percent_limit. SW_SAMPLE_CPU_USAGE_PERCENT_LIMIT -1   plugin.micronauthttpclient.collect_http_params This config item controls that whether the Micronaut http client plugin should collect the parameters of the request. Also, activate implicitly in the profiled trace. SW_PLUGIN_MICRONAUTHTTPCLIENT_COLLECT_HTTP_PARAMS false   plugin.micronauthttpserver.collect_http_params This config item controls that whether the Micronaut http server plugin should collect the parameters of the request. Also, activate implicitly in the profiled trace. SW_PLUGIN_MICRONAUTHTTPSERVER_COLLECT_HTTP_PARAMS false   plugin.memcached.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_MEMCACHED_OPERATION_MAPPING_WRITE get,gets,getAndTouch,getKeys,getKeysWithExpiryCheck,getKeysNoDuplicateCheck   plugin.memcached.operation_mapping_read Specify which command should be converted to read operation SW_PLUGIN_MEMCACHED_OPERATION_MAPPING_READ set,add,replace,append,prepend,cas,delete,touch,incr,decr   plugin.ehcache.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_EHCACHE_OPERATION_MAPPING_WRITE get,getAll,getQuiet,getKeys,getKeysWithExpiryCheck,getKeysNoDuplicateCheck,releaseRead,tryRead,getWithLoader,getAll,loadAll,getAllWithLoader   plugin.ehcache.operation_mapping_read Specify which command should be converted to read operation SW_PLUGIN_EHCACHE_OPERATION_MAPPING_READ tryRemoveImmediately,remove,removeAndReturnElement,removeAll,removeQuiet,removeWithWriter,put,putAll,replace,removeQuiet,removeWithWriter,removeElement,removeAll,putWithWriter,putQuiet,putIfAbsent,putIfAbsent   plugin.guavacache.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_GUAVACACHE_OPERATION_MAPPING_WRITE getIfPresent,get,getAllPresent,size   plugin.guavacache.operation_mapping_read Specify which command should be converted to read operation SW_PLUGIN_GUAVACACHE_OPERATION_MAPPING_READ put,putAll,invalidate,invalidateAll,invalidateAll,cleanUp   plugin.nettyhttp.collect_request_body This config item controls that whether the Netty-http plugin should collect the http body of the request. SW_PLUGIN_NETTY_HTTP_COLLECT_REQUEST_BODY false   plugin.nettyhttp.filter_length_limit When COLLECT_REQUEST_BODY is enabled, how many characters to keep and send to the OAP backend, use negative values to keep and send the complete body. SW_PLUGIN_NETTY_HTTP_FILTER_LENGTH_LIMIT 1024   plugin.nettyhttp.supported_content_types_prefix When COLLECT_REQUEST_BODY is enabled and content-type start with HTTP_SUPPORTED_CONTENT_TYPES_PREFIX, collect the body of the request , multiple paths should be separated by , SW_PLUGIN_NETTY_HTTP_SUPPORTED_CONTENT_TYPES_PREFIX application/json,text/   plugin.rocketmqclient.collect_message_keys If set to true, the keys of messages would be collected by the plugin for RocketMQ Java client.     plugin.rocketmqclient.collect_message_tags If set to true, the tags of messages would be collected by the plugin for RocketMQ Java client.            Reset Collection/Map type configurations as empty collection.  Collection type config, e.g. using  plugin.kafka.topics= to override default plugin.kafka.topics=a,b,c,d Map type config, e.g. using plugin.kafka.producer_config[]= to override default plugin.kafka.producer_config[key]=value  Dynamic Configurations All configurations above are static, if you need to change some agent settings at runtime, please read CDS - Configuration Discovery Service document for more details.\n","title":"Table of Agent Configuration Properties","url":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/configurations/"},{"content":"Telegraf receiver The Telegraf receiver supports receiving InfluxDB Telegraf\u0026rsquo;s metrics by meter-system. The OAP can load the configuration at bootstrap. The files are located at $CLASSPATH/telegraf-rules. If the new configuration is not well-formed, the OAP may fail to start up.\nThis is the InfluxDB Telegraf Document, the Telegraf receiver can handle Telegraf\u0026rsquo;s CPU Input Plugin, Memory Input Plugin.\nThere are many other telegraf input plugins, users can customize different input plugins' rule files. The rule file should be in YAML format, defined by the scheme described in MAL. Please see the telegraf plugin directory for more input plugins information.\nNotice:\n  The Telegraf receiver module uses HTTP to receive telegraf\u0026rsquo;s metrics, so the outputs method should be set [[outputs.http]] in telegraf.conf file. Please see the http outputs for more details.\n  The Telegraf receiver module only process telegraf\u0026rsquo;s JSON metrics format, the data format should be set data_format = \u0026quot;json\u0026quot; in telegraf.conf file. Please see the JSON data format for more details.\n  The default json_timestamp_units is second in JSON output, and the Telegraf receiver module only process second timestamp unit. If users configure json_timestamp_units in telegraf.conf file, json_timestamp_units = \u0026quot;1s\u0026quot; is feasible. Please see the JSON data format for more details.\n  The following is the default telegraf receiver YAML rule file in the application.yml, Set SW_RECEIVER_TELEGRAF:default through system environment or change SW_RECEIVER_TELEGRAF_ACTIVE_FILES:vm to activate the OpenTelemetry receiver with vm.yml in telegraf-rules.\nreceiver-telegraf:selector:${SW_RECEIVER_TELEGRAF:default}default:activeFiles:${SW_RECEIVER_TELEGRAF_ACTIVE_FILES:vm}   Rule Name Description Configuration File Data Source     vm Metrics of VMs telegraf-rules/vm.yaml Telegraf inputs plugins \u0026ndash;\u0026gt; Telegraf Receiver \u0026ndash;\u0026gt; SkyWalking OAP Server    ","title":"Telegraf receiver","url":"/docs/main/latest/en/setup/backend/telegraf-receiver/"},{"content":"Telegraf receiver The Telegraf receiver supports receiving InfluxDB Telegraf\u0026rsquo;s metrics by meter-system. The OAP can load the configuration at bootstrap. The files are located at $CLASSPATH/telegraf-rules. If the new configuration is not well-formed, the OAP may fail to start up.\nThis is the InfluxDB Telegraf Document, the Telegraf receiver can handle Telegraf\u0026rsquo;s CPU Input Plugin, Memory Input Plugin.\nThere are many other telegraf input plugins, users can customize different input plugins' rule files. The rule file should be in YAML format, defined by the scheme described in MAL. Please see the telegraf plugin directory for more input plugins information.\nNotice:\n  The Telegraf receiver module uses HTTP to receive telegraf\u0026rsquo;s metrics, so the outputs method should be set [[outputs.http]] in telegraf.conf file. Please see the http outputs for more details.\n  The Telegraf receiver module only process telegraf\u0026rsquo;s JSON metrics format, the data format should be set data_format = \u0026quot;json\u0026quot; in telegraf.conf file. Please see the JSON data format for more details.\n  The default json_timestamp_units is second in JSON output, and the Telegraf receiver module only process second timestamp unit. If users configure json_timestamp_units in telegraf.conf file, json_timestamp_units = \u0026quot;1s\u0026quot; is feasible. Please see the JSON data format for more details.\n  The following is the default telegraf receiver YAML rule file in the application.yml, Set SW_RECEIVER_TELEGRAF:default through system environment or change SW_RECEIVER_TELEGRAF_ACTIVE_FILES:vm to activate the OpenTelemetry receiver with vm.yml in telegraf-rules.\nreceiver-telegraf:selector:${SW_RECEIVER_TELEGRAF:default}default:activeFiles:${SW_RECEIVER_TELEGRAF_ACTIVE_FILES:vm}   Rule Name Description Configuration File Data Source     vm Metrics of VMs telegraf-rules/vm.yaml Telegraf inputs plugins \u0026ndash;\u0026gt; Telegraf Receiver \u0026ndash;\u0026gt; SkyWalking OAP Server    ","title":"Telegraf receiver","url":"/docs/main/next/en/setup/backend/telegraf-receiver/"},{"content":"Telegraf receiver The Telegraf receiver supports receiving InfluxDB Telegraf\u0026rsquo;s metrics by meter-system. The OAP can load the configuration at bootstrap. The files are located at $CLASSPATH/telegraf-rules. If the new configuration is not well-formed, the OAP may fail to start up.\nThis is the InfluxDB Telegraf Document, the Telegraf receiver can handle Telegraf\u0026rsquo;s CPU Input Plugin, Memory Input Plugin.\nThere are many other telegraf input plugins, users can customize different input plugins' rule files. The rule file should be in YAML format, defined by the scheme described in MAL. Please see the telegraf plugin directory for more input plugins information.\nNotice:\n  The Telegraf receiver module uses HTTP to receive telegraf\u0026rsquo;s metrics, so the outputs method should be set [[outputs.http]] in telegraf.conf file. Please see the http outputs for more details.\n  The Telegraf receiver module only process telegraf\u0026rsquo;s JSON metrics format, the data format should be set data_format = \u0026quot;json\u0026quot; in telegraf.conf file. Please see the JSON data format for more details.\n  The default json_timestamp_units is second in JSON output, and the Telegraf receiver module only process second timestamp unit. If users configure json_timestamp_units in telegraf.conf file, json_timestamp_units = \u0026quot;1s\u0026quot; is feasible. Please see the JSON data format for more details.\n  The following is the default telegraf receiver YAML rule file in the application.yml, Set SW_RECEIVER_TELEGRAF:default through system environment or change SW_RECEIVER_TELEGRAF_ACTIVE_FILES:vm to activate the OpenTelemetry receiver with vm.yml in telegraf-rules.\nreceiver-telegraf:selector:${SW_RECEIVER_TELEGRAF:default}default:activeFiles:${SW_RECEIVER_TELEGRAF_ACTIVE_FILES:vm}   Rule Name Description Configuration File Data Source     vm Metrics of VMs telegraf-rules/vm.yaml Telegraf inputs plugins \u0026ndash;\u0026gt; Telegraf Receiver \u0026ndash;\u0026gt; SkyWalking OAP Server    ","title":"Telegraf receiver","url":"/docs/main/v9.3.0/en/setup/backend/telegraf-receiver/"},{"content":"Telegraf receiver The Telegraf receiver supports receiving InfluxDB Telegraf\u0026rsquo;s metrics by meter-system. The OAP can load the configuration at bootstrap. The files are located at $CLASSPATH/telegraf-rules. If the new configuration is not well-formed, the OAP may fail to start up.\nThis is the InfluxDB Telegraf Document, the Telegraf receiver can handle Telegraf\u0026rsquo;s CPU Input Plugin, Memory Input Plugin.\nThere are many other telegraf input plugins, users can customize different input plugins' rule files. The rule file should be in YAML format, defined by the scheme described in MAL. Please see the telegraf plugin directory for more input plugins information.\nNotice:\n  The Telegraf receiver module uses HTTP to receive telegraf\u0026rsquo;s metrics, so the outputs method should be set [[outputs.http]] in telegraf.conf file. Please see the http outputs for more details.\n  The Telegraf receiver module only process telegraf\u0026rsquo;s JSON metrics format, the data format should be set data_format = \u0026quot;json\u0026quot; in telegraf.conf file. Please see the JSON data format for more details.\n  The default json_timestamp_units is second in JSON output, and the Telegraf receiver module only process second timestamp unit. If users configure json_timestamp_units in telegraf.conf file, json_timestamp_units = \u0026quot;1s\u0026quot; is feasible. Please see the JSON data format for more details.\n  The following is the default telegraf receiver YAML rule file in the application.yml, Set SW_RECEIVER_TELEGRAF:default through system environment or change SW_RECEIVER_TELEGRAF_ACTIVE_FILES:vm to activate the OpenTelemetry receiver with vm.yml in telegraf-rules.\nreceiver-telegraf:selector:${SW_RECEIVER_TELEGRAF:default}default:activeFiles:${SW_RECEIVER_TELEGRAF_ACTIVE_FILES:vm}   Rule Name Description Configuration File Data Source     vm Metrics of VMs telegraf-rules/vm.yaml Telegraf inputs plugins \u0026ndash;\u0026gt; Telegraf Receiver \u0026ndash;\u0026gt; SkyWalking OAP Server    ","title":"Telegraf receiver","url":"/docs/main/v9.4.0/en/setup/backend/telegraf-receiver/"},{"content":"Telegraf receiver The Telegraf receiver supports receiving InfluxDB Telegraf\u0026rsquo;s metrics by meter-system. The OAP can load the configuration at bootstrap. The files are located at $CLASSPATH/telegraf-rules. If the new configuration is not well-formed, the OAP may fail to start up.\nThis is the InfluxDB Telegraf Document, the Telegraf receiver can handle Telegraf\u0026rsquo;s CPU Input Plugin, Memory Input Plugin.\nThere are many other telegraf input plugins, users can customize different input plugins' rule files. The rule file should be in YAML format, defined by the scheme described in MAL. Please see the telegraf plugin directory for more input plugins information.\nNotice:\n  The Telegraf receiver module uses HTTP to receive telegraf\u0026rsquo;s metrics, so the outputs method should be set [[outputs.http]] in telegraf.conf file. Please see the http outputs for more details.\n  The Telegraf receiver module only process telegraf\u0026rsquo;s JSON metrics format, the data format should be set data_format = \u0026quot;json\u0026quot; in telegraf.conf file. Please see the JSON data format for more details.\n  The default json_timestamp_units is second in JSON output, and the Telegraf receiver module only process second timestamp unit. If users configure json_timestamp_units in telegraf.conf file, json_timestamp_units = \u0026quot;1s\u0026quot; is feasible. Please see the JSON data format for more details.\n  The following is the default telegraf receiver YAML rule file in the application.yml, Set SW_RECEIVER_TELEGRAF:default through system environment or change SW_RECEIVER_TELEGRAF_ACTIVE_FILES:vm to activate the OpenTelemetry receiver with vm.yml in telegraf-rules.\nreceiver-telegraf:selector:${SW_RECEIVER_TELEGRAF:default}default:activeFiles:${SW_RECEIVER_TELEGRAF_ACTIVE_FILES:vm}   Rule Name Description Configuration File Data Source     vm Metrics of VMs telegraf-rules/vm.yaml Telegraf inputs plugins \u0026ndash;\u0026gt; Telegraf Receiver \u0026ndash;\u0026gt; SkyWalking OAP Server    ","title":"Telegraf receiver","url":"/docs/main/v9.5.0/en/setup/backend/telegraf-receiver/"},{"content":"Telegraf receiver The Telegraf receiver supports receiving InfluxDB Telegraf\u0026rsquo;s metrics by meter-system. The OAP can load the configuration at bootstrap. The files are located at $CLASSPATH/telegraf-rules. If the new configuration is not well-formed, the OAP may fail to start up.\nThis is the InfluxDB Telegraf Document, the Telegraf receiver can handle Telegraf\u0026rsquo;s CPU Input Plugin, Memory Input Plugin.\nThere are many other telegraf input plugins, users can customize different input plugins' rule files. The rule file should be in YAML format, defined by the scheme described in MAL. Please see the telegraf plugin directory for more input plugins information.\nNotice:\n  The Telegraf receiver module uses HTTP to receive telegraf\u0026rsquo;s metrics, so the outputs method should be set [[outputs.http]] in telegraf.conf file. Please see the http outputs for more details.\n  The Telegraf receiver module only process telegraf\u0026rsquo;s JSON metrics format, the data format should be set data_format = \u0026quot;json\u0026quot; in telegraf.conf file. Please see the JSON data format for more details.\n  The default json_timestamp_units is second in JSON output, and the Telegraf receiver module only process second timestamp unit. If users configure json_timestamp_units in telegraf.conf file, json_timestamp_units = \u0026quot;1s\u0026quot; is feasible. Please see the JSON data format for more details.\n  The following is the default telegraf receiver YAML rule file in the application.yml, Set SW_RECEIVER_TELEGRAF:default through system environment or change SW_RECEIVER_TELEGRAF_ACTIVE_FILES:vm to activate the OpenTelemetry receiver with vm.yml in telegraf-rules.\nreceiver-telegraf:selector:${SW_RECEIVER_TELEGRAF:default}default:activeFiles:${SW_RECEIVER_TELEGRAF_ACTIVE_FILES:vm}   Rule Name Description Configuration File Data Source     vm Metrics of VMs telegraf-rules/vm.yaml Telegraf inputs plugins \u0026ndash;\u0026gt; Telegraf Receiver \u0026ndash;\u0026gt; SkyWalking OAP Server    ","title":"Telegraf receiver","url":"/docs/main/v9.6.0/en/setup/backend/telegraf-receiver/"},{"content":"Telegraf receiver The Telegraf receiver supports receiving InfluxDB Telegraf\u0026rsquo;s metrics by meter-system. The OAP can load the configuration at bootstrap. The files are located at $CLASSPATH/telegraf-rules. If the new configuration is not well-formed, the OAP may fail to start up.\nThis is the InfluxDB Telegraf Document, the Telegraf receiver can handle Telegraf\u0026rsquo;s CPU Input Plugin, Memory Input Plugin.\nThere are many other telegraf input plugins, users can customize different input plugins' rule files. The rule file should be in YAML format, defined by the scheme described in MAL. Please see the telegraf plugin directory for more input plugins information.\nNotice:\n  The Telegraf receiver module uses HTTP to receive telegraf\u0026rsquo;s metrics, so the outputs method should be set [[outputs.http]] in telegraf.conf file. Please see the http outputs for more details.\n  The Telegraf receiver module only process telegraf\u0026rsquo;s JSON metrics format, the data format should be set data_format = \u0026quot;json\u0026quot; in telegraf.conf file. Please see the JSON data format for more details.\n  The default json_timestamp_units is second in JSON output, and the Telegraf receiver module only process second timestamp unit. If users configure json_timestamp_units in telegraf.conf file, json_timestamp_units = \u0026quot;1s\u0026quot; is feasible. Please see the JSON data format for more details.\n  The following is the default telegraf receiver YAML rule file in the application.yml, Set SW_RECEIVER_TELEGRAF:default through system environment or change SW_RECEIVER_TELEGRAF_ACTIVE_FILES:vm to activate the OpenTelemetry receiver with vm.yml in telegraf-rules.\nreceiver-telegraf:selector:${SW_RECEIVER_TELEGRAF:default}default:activeFiles:${SW_RECEIVER_TELEGRAF_ACTIVE_FILES:vm}   Rule Name Description Configuration File Data Source     vm Metrics of VMs telegraf-rules/vm.yaml Telegraf inputs plugins \u0026ndash;\u0026gt; Telegraf Receiver \u0026ndash;\u0026gt; SkyWalking OAP Server    ","title":"Telegraf receiver","url":"/docs/main/v9.7.0/en/setup/backend/telegraf-receiver/"},{"content":"Telemetry Exporter Satellite supports three ways to export its own telemetry data, prometheus, metrics-service or pprof.\nMultiple export methods are supported simultaneously, separated by commas.\nPrometheus Start HTTP port to export the satellite telemetry metrics.\nWhen the following configuration is completed, then the satellite telemetry metrics export to: http://localhost${SATELLITE_TELEMETRY_PROMETHEUS_ADDRESS}${SATELLITE_TELEMETRY_PROMETHEUS_ENDPOINT}, and all the metrics contain the cluster, service and instance tag.\n# The Satellite self telemetry configuration. telemetry: # The space concept for the deployment, such as the namespace concept in the Kubernetes. cluster: ${SATELLITE_TELEMETRY_CLUSTER:satellite-cluster} # The group concept for the deployment, such as the service resource concept in the Kubernetes. service: ${SATELLITE_TELEMETRY_SERVICE:satellite-service} # The minimum running unit, such as the pod concept in the Kubernetes. instance: ${SATELLITE_TELEMETRY_SERVICE:satellite-instance} # Telemetry export type, support \u0026#34;prometheus\u0026#34;, \u0026#34;metrics_service\u0026#34;, \u0026#34;pprof\u0026#34; or \u0026#34;none\u0026#34; export_type: ${SATELLITE_TELEMETRY_EXPORT_TYPE:prometheus} # Export telemetry data through Prometheus server, only works on \u0026#34;export_type=prometheus\u0026#34;. prometheus: # The prometheus server address. address: ${SATELLITE_TELEMETRY_PROMETHEUS_ADDRESS::1234} # The prometheus server metrics endpoint. endpoint: ${SATELLITE_TELEMETRY_PROMETHEUS_ENDPOINT:/metrics} Metrics Service Send the message to the gRPC service that supports SkyWalking\u0026rsquo;s native Meter protocol with interval.\nWhen the following configuration is completed, send the message to the specified grpc-client component at the specified time interval. Among them, service and instance will correspond to the services and service instances in SkyWalking.\n# The Satellite self telemetry configuration. telemetry: # The space concept for the deployment, such as the namespace concept in the Kubernetes. cluster: ${SATELLITE_TELEMETRY_CLUSTER:satellite-cluster} # The group concept for the deployment, such as the service resource concept in the Kubernetes. service: ${SATELLITE_TELEMETRY_SERVICE:satellite-service} # The minimum running unit, such as the pod concept in the Kubernetes. instance: ${SATELLITE_TELEMETRY_SERVICE:satellite-instance} # Telemetry export type, support \u0026#34;prometheus\u0026#34;, \u0026#34;metrics_service\u0026#34;, \u0026#34;pprof\u0026#34; or \u0026#34;none\u0026#34; export_type: ${SATELLITE_TELEMETRY_EXPORT_TYPE:metrics_service} # Export telemetry data through native meter format to OAP backend, only works on \u0026#34;export_type=metrics_service\u0026#34;. metrics_service: # The grpc-client plugin name, using the SkyWalking native batch meter protocol client_name: ${SATELLITE_TELEMETRY_METRICS_SERVICE_CLIENT_NAME:grpc-client} # The interval second for sending metrics interval: ${SATELLITE_TELEMETRY_METRICS_SERVICE_INTERVAL:10} # The prefix of telemetry metric name metric_prefix: ${SATELLITE_TELEMETRY_METRICS_SERVICE_METRIC_PREFIX:sw_stl_} pprof pprof can provide HTTP services to allow remote viewing of service execution status, helping you discover performance issues.\n# The Satellite self telemetry configuration. telemetry: # Telemetry export type, support \u0026#34;prometheus\u0026#34;, \u0026#34;metrics_service\u0026#34;, \u0026#34;pprof\u0026#34; or \u0026#34;none\u0026#34; export_type: ${SATELLITE_TELEMETRY_EXPORT_TYPE:pprof} # Export pprof service for detect performance issue pprof: # The pprof server address. address: ${SATELLITE_TELEMETRY_PPROF_ADDRESS::6060} ","title":"Telemetry Exporter","url":"/docs/skywalking-satellite/latest/en/setup/examples/feature/telemetry-exporter/readme/"},{"content":"Telemetry Exporter Satellite supports three ways to export its own telemetry data, prometheus, metrics-service or pprof.\nMultiple export methods are supported simultaneously, separated by commas.\nPrometheus Start HTTP port to export the satellite telemetry metrics.\nWhen the following configuration is completed, then the satellite telemetry metrics export to: http://localhost${SATELLITE_TELEMETRY_PROMETHEUS_ADDRESS}${SATELLITE_TELEMETRY_PROMETHEUS_ENDPOINT}, and all the metrics contain the cluster, service and instance tag.\n# The Satellite self telemetry configuration. telemetry: # The space concept for the deployment, such as the namespace concept in the Kubernetes. cluster: ${SATELLITE_TELEMETRY_CLUSTER:satellite-cluster} # The group concept for the deployment, such as the service resource concept in the Kubernetes. service: ${SATELLITE_TELEMETRY_SERVICE:satellite-service} # The minimum running unit, such as the pod concept in the Kubernetes. instance: ${SATELLITE_TELEMETRY_SERVICE:satellite-instance} # Telemetry export type, support \u0026#34;prometheus\u0026#34;, \u0026#34;metrics_service\u0026#34;, \u0026#34;pprof\u0026#34; or \u0026#34;none\u0026#34; export_type: ${SATELLITE_TELEMETRY_EXPORT_TYPE:prometheus} # Export telemetry data through Prometheus server, only works on \u0026#34;export_type=prometheus\u0026#34;. prometheus: # The prometheus server address. address: ${SATELLITE_TELEMETRY_PROMETHEUS_ADDRESS::1234} # The prometheus server metrics endpoint. endpoint: ${SATELLITE_TELEMETRY_PROMETHEUS_ENDPOINT:/metrics} Metrics Service Send the message to the gRPC service that supports SkyWalking\u0026rsquo;s native Meter protocol with interval.\nWhen the following configuration is completed, send the message to the specified grpc-client component at the specified time interval. Among them, service and instance will correspond to the services and service instances in SkyWalking.\n# The Satellite self telemetry configuration. telemetry: # The space concept for the deployment, such as the namespace concept in the Kubernetes. cluster: ${SATELLITE_TELEMETRY_CLUSTER:satellite-cluster} # The group concept for the deployment, such as the service resource concept in the Kubernetes. service: ${SATELLITE_TELEMETRY_SERVICE:satellite-service} # The minimum running unit, such as the pod concept in the Kubernetes. instance: ${SATELLITE_TELEMETRY_SERVICE:satellite-instance} # Telemetry export type, support \u0026#34;prometheus\u0026#34;, \u0026#34;metrics_service\u0026#34;, \u0026#34;pprof\u0026#34; or \u0026#34;none\u0026#34; export_type: ${SATELLITE_TELEMETRY_EXPORT_TYPE:metrics_service} # Export telemetry data through native meter format to OAP backend, only works on \u0026#34;export_type=metrics_service\u0026#34;. metrics_service: # The grpc-client plugin name, using the SkyWalking native batch meter protocol client_name: ${SATELLITE_TELEMETRY_METRICS_SERVICE_CLIENT_NAME:grpc-client} # The interval second for sending metrics interval: ${SATELLITE_TELEMETRY_METRICS_SERVICE_INTERVAL:10} # The prefix of telemetry metric name metric_prefix: ${SATELLITE_TELEMETRY_METRICS_SERVICE_METRIC_PREFIX:sw_stl_} pprof pprof can provide HTTP services to allow remote viewing of service execution status, helping you discover performance issues.\n# The Satellite self telemetry configuration. telemetry: # Telemetry export type, support \u0026#34;prometheus\u0026#34;, \u0026#34;metrics_service\u0026#34;, \u0026#34;pprof\u0026#34; or \u0026#34;none\u0026#34; export_type: ${SATELLITE_TELEMETRY_EXPORT_TYPE:pprof} # Export pprof service for detect performance issue pprof: # The pprof server address. address: ${SATELLITE_TELEMETRY_PPROF_ADDRESS::6060} ","title":"Telemetry Exporter","url":"/docs/skywalking-satellite/next/en/setup/examples/feature/telemetry-exporter/readme/"},{"content":"Telemetry Exporter Satellite supports three ways to export its own telemetry data, prometheus, metrics-service or pprof.\nMultiple export methods are supported simultaneously, separated by commas.\nPrometheus Start HTTP port to export the satellite telemetry metrics.\nWhen the following configuration is completed, then the satellite telemetry metrics export to: http://localhost${SATELLITE_TELEMETRY_PROMETHEUS_ADDRESS}${SATELLITE_TELEMETRY_PROMETHEUS_ENDPOINT}, and all the metrics contain the cluster, service and instance tag.\n# The Satellite self telemetry configuration. telemetry: # The space concept for the deployment, such as the namespace concept in the Kubernetes. cluster: ${SATELLITE_TELEMETRY_CLUSTER:satellite-cluster} # The group concept for the deployment, such as the service resource concept in the Kubernetes. service: ${SATELLITE_TELEMETRY_SERVICE:satellite-service} # The minimum running unit, such as the pod concept in the Kubernetes. instance: ${SATELLITE_TELEMETRY_SERVICE:satellite-instance} # Telemetry export type, support \u0026#34;prometheus\u0026#34;, \u0026#34;metrics_service\u0026#34;, \u0026#34;pprof\u0026#34; or \u0026#34;none\u0026#34; export_type: ${SATELLITE_TELEMETRY_EXPORT_TYPE:prometheus} # Export telemetry data through Prometheus server, only works on \u0026#34;export_type=prometheus\u0026#34;. prometheus: # The prometheus server address. address: ${SATELLITE_TELEMETRY_PROMETHEUS_ADDRESS::1234} # The prometheus server metrics endpoint. endpoint: ${SATELLITE_TELEMETRY_PROMETHEUS_ENDPOINT:/metrics} Metrics Service Send the message to the gRPC service that supports SkyWalking\u0026rsquo;s native Meter protocol with interval.\nWhen the following configuration is completed, send the message to the specified grpc-client component at the specified time interval. Among them, service and instance will correspond to the services and service instances in SkyWalking.\n# The Satellite self telemetry configuration. telemetry: # The space concept for the deployment, such as the namespace concept in the Kubernetes. cluster: ${SATELLITE_TELEMETRY_CLUSTER:satellite-cluster} # The group concept for the deployment, such as the service resource concept in the Kubernetes. service: ${SATELLITE_TELEMETRY_SERVICE:satellite-service} # The minimum running unit, such as the pod concept in the Kubernetes. instance: ${SATELLITE_TELEMETRY_SERVICE:satellite-instance} # Telemetry export type, support \u0026#34;prometheus\u0026#34;, \u0026#34;metrics_service\u0026#34;, \u0026#34;pprof\u0026#34; or \u0026#34;none\u0026#34; export_type: ${SATELLITE_TELEMETRY_EXPORT_TYPE:metrics_service} # Export telemetry data through native meter format to OAP backend, only works on \u0026#34;export_type=metrics_service\u0026#34;. metrics_service: # The grpc-client plugin name, using the SkyWalking native batch meter protocol client_name: ${SATELLITE_TELEMETRY_METRICS_SERVICE_CLIENT_NAME:grpc-client} # The interval second for sending metrics interval: ${SATELLITE_TELEMETRY_METRICS_SERVICE_INTERVAL:10} # The prefix of telemetry metric name metric_prefix: ${SATELLITE_TELEMETRY_METRICS_SERVICE_METRIC_PREFIX:sw_stl_} pprof pprof can provide HTTP services to allow remote viewing of service execution status, helping you discover performance issues.\n# The Satellite self telemetry configuration. telemetry: # Telemetry export type, support \u0026#34;prometheus\u0026#34;, \u0026#34;metrics_service\u0026#34;, \u0026#34;pprof\u0026#34; or \u0026#34;none\u0026#34; export_type: ${SATELLITE_TELEMETRY_EXPORT_TYPE:pprof} # Export pprof service for detect performance issue pprof: # The pprof server address. address: ${SATELLITE_TELEMETRY_PPROF_ADDRESS::6060} ","title":"Telemetry Exporter","url":"/docs/skywalking-satellite/v1.2.0/en/setup/examples/feature/telemetry-exporter/readme/"},{"content":"Telemetry for backend The OAP backend cluster itself is a distributed streaming process system. To assist the Ops team, we provide the telemetry for the OAP backend itself, also known as self-observability (so11y)\nBy default, the telemetry is disabled by setting selector to none, like this:\ntelemetry:selector:${SW_TELEMETRY:none}none:prometheus:host:${SW_TELEMETRY_PROMETHEUS_HOST:0.0.0.0}port:${SW_TELEMETRY_PROMETHEUS_PORT:1234}sslEnabled:${SW_TELEMETRY_PROMETHEUS_SSL_ENABLED:false}sslKeyPath:${SW_TELEMETRY_PROMETHEUS_SSL_KEY_PATH:\u0026#34;\u0026#34;}sslCertChainPath:${SW_TELEMETRY_PROMETHEUS_SSL_CERT_CHAIN_PATH:\u0026#34;\u0026#34;}You may also set Prometheus to enable them. For more information, refer to the details below.\nSelf Observability SkyWalking supports exposing telemetry data representing OAP running status through Prometheus endpoint. Users could set up OpenTelemetry collector to scrap and forward telemetry data to OAP server for further analysis, eventually showing up UI or GraphQL API.\nStatic IP or hostname Add the following configuration to enable self-observability-related modules.\n Set up prometheus telemetry.  telemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543Set up OpenTelemetry to scrape the metrics from OAP telemetry.  Refer to the E2E test case as an example.\nFor Kubernetes deployments, read the following section, otherwise you should be able to adjust the configurations below to fit your scenarios.\nService discovery on Kubernetes If you deploy an OAP server cluster on Kubernetes, the oap-server instance (pod) would not have a static IP or hostname. We can leverage OpenTelemetry Collector to discover the oap-server instance, and scrape \u0026amp; transfer the metrics to OAP OpenTelemetry receiver.\nOn how to install SkyWalking on k8s, you can refer to Apache SkyWalking Kubernetes.\nSet this up following these steps:\n Set up oap-server.    Set the metrics port.\nprometheus-port: 1234   Set environment variables.\nSW_TELEMETRY=prometheus SW_OTEL_RECEIVER=default SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES=oap Here is an example to install by Apache SkyWalking Kubernetes:\nhelm -n istio-system install skywalking skywalking \\ --set elasticsearch.replicas=1 \\ --set elasticsearch.minimumMasterNodes=1 \\ --set elasticsearch.imageTag=7.5.1 \\ --set oap.replicas=2 \\ --set ui.image.repository=$HUB/skywalking-ui \\ --set ui.image.tag=$TAG \\ --set oap.image.tag=$TAG \\ --set oap.image.repository=$HUB/skywalking-oap \\ --set oap.storageType=elasticsearch \\ --set oap.ports.prometheus-port=1234 \\ # \u0026lt;\u0026lt;\u0026lt; Expose self observability metrics port --set oap.env.SW_TELEMETRY=prometheus \\ --set oap.env.SW_OTEL_RECEIVER=default \\ # \u0026lt;\u0026lt;\u0026lt; Enable Otel receiver --set oap.env.SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES=oap # \u0026lt;\u0026lt;\u0026lt; Add oap analyzer for Otel metrics   Set up OpenTelemetry Collector and config a scrape job:  - job_name:\u0026#39;skywalking-so11y\u0026#39;# make sure to use this in the so11y.yaml to filter only so11y metricsmetrics_path:\u0026#39;/metrics\u0026#39;kubernetes_sd_configs:- role:podrelabel_configs:- source_labels:[__meta_kubernetes_pod_container_name, __meta_kubernetes_pod_container_port_name]action:keepregex:oap;prometheus-port- source_labels:[]target_label:servicereplacement:oap-server- source_labels:[__meta_kubernetes_pod_name]target_label:host_nameregex:(.+)replacement:$$1For the full example for OpenTelemetry Collector configuration and recommended version, you can refer to showcase.\n Users also could leverage the Prometheus endpoint for their own Prometheus and Grafana.\nNOTE: Since Apr 21, 2021, the Grafana project has been relicensed to AGPL-v3, and is no longer licensed for Apache 2.0. Check the LICENSE details. The following Prometheus + Grafana solution is optional rather than recommended.\nPrometheus Prometheus is supported as a telemetry implementor, which collects metrics from SkyWalking\u0026rsquo;s backend.\nSet prometheus to provider. The endpoint opens at http://0.0.0.0:1234/ and http://0.0.0.0:1234/metrics.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:Set host and port if needed.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543Set relevant SSL settings to expose a secure endpoint. Note that the private key file and cert chain file could be uploaded once changes are applied to them.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543sslEnabled:truesslKeyPath:/etc/ssl/key.pemsslCertChainPath:/etc/ssl/cert-chain.pemGrafana Visualization Provide the Grafana dashboard settings. Check SkyWalking OAP Cluster Monitor Dashboard config and SkyWalking OAP Instance Monitor Dashboard config.\n","title":"Telemetry for backend","url":"/docs/main/latest/en/setup/backend/backend-telemetry/"},{"content":"Telemetry for backend The OAP backend cluster itself is a distributed streaming process system. To assist the Ops team, we provide the telemetry for the OAP backend itself, also known as self-observability (so11y)\nBy default, the telemetry is disabled by setting selector to none, like this:\ntelemetry:selector:${SW_TELEMETRY:none}none:prometheus:host:${SW_TELEMETRY_PROMETHEUS_HOST:0.0.0.0}port:${SW_TELEMETRY_PROMETHEUS_PORT:1234}sslEnabled:${SW_TELEMETRY_PROMETHEUS_SSL_ENABLED:false}sslKeyPath:${SW_TELEMETRY_PROMETHEUS_SSL_KEY_PATH:\u0026#34;\u0026#34;}sslCertChainPath:${SW_TELEMETRY_PROMETHEUS_SSL_CERT_CHAIN_PATH:\u0026#34;\u0026#34;}You may also set Prometheus to enable them. For more information, refer to the details below.\nSelf Observability SkyWalking supports exposing telemetry data representing OAP running status through Prometheus endpoint. Users could set up OpenTelemetry collector to scrap and forward telemetry data to OAP server for further analysis, eventually showing up UI or GraphQL API.\nStatic IP or hostname Add the following configuration to enable self-observability-related modules.\n Set up prometheus telemetry.  telemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543Set up OpenTelemetry to scrape the metrics from OAP telemetry.  Refer to the E2E test case as an example.\nFor Kubernetes deployments, read the following section, otherwise you should be able to adjust the configurations below to fit your scenarios.\nService discovery on Kubernetes If you deploy an OAP server cluster on Kubernetes, the oap-server instance (pod) would not have a static IP or hostname. We can leverage OpenTelemetry Collector to discover the oap-server instance, and scrape \u0026amp; transfer the metrics to OAP OpenTelemetry receiver.\nOn how to install SkyWalking on k8s, you can refer to Apache SkyWalking Kubernetes.\nSet this up following these steps:\n Set up oap-server.    Set the metrics port.\nprometheus-port: 1234   Set environment variables.\nSW_TELEMETRY=prometheus SW_OTEL_RECEIVER=default SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES=oap Here is an example to install by Apache SkyWalking Kubernetes:\nhelm -n istio-system install skywalking skywalking \\ --set elasticsearch.replicas=1 \\ --set elasticsearch.minimumMasterNodes=1 \\ --set elasticsearch.imageTag=7.5.1 \\ --set oap.replicas=2 \\ --set ui.image.repository=$HUB/skywalking-ui \\ --set ui.image.tag=$TAG \\ --set oap.image.tag=$TAG \\ --set oap.image.repository=$HUB/skywalking-oap \\ --set oap.storageType=elasticsearch \\ --set oap.ports.prometheus-port=1234 \\ # \u0026lt;\u0026lt;\u0026lt; Expose self observability metrics port --set oap.env.SW_TELEMETRY=prometheus \\ --set oap.env.SW_OTEL_RECEIVER=default \\ # \u0026lt;\u0026lt;\u0026lt; Enable Otel receiver --set oap.env.SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES=oap # \u0026lt;\u0026lt;\u0026lt; Add oap analyzer for Otel metrics   Set up OpenTelemetry Collector and config a scrape job:  - job_name:\u0026#39;skywalking-so11y\u0026#39;# make sure to use this in the so11y.yaml to filter only so11y metricsmetrics_path:\u0026#39;/metrics\u0026#39;kubernetes_sd_configs:- role:podrelabel_configs:- source_labels:[__meta_kubernetes_pod_container_name, __meta_kubernetes_pod_container_port_name]action:keepregex:oap;prometheus-port- source_labels:[]target_label:servicereplacement:oap-server- source_labels:[__meta_kubernetes_pod_name]target_label:host_nameregex:(.+)replacement:$$1For the full example for OpenTelemetry Collector configuration and recommended version, you can refer to showcase.\n Users also could leverage the Prometheus endpoint for their own Prometheus and Grafana.\nNOTE: Since Apr 21, 2021, the Grafana project has been relicensed to AGPL-v3, and is no longer licensed for Apache 2.0. Check the LICENSE details. The following Prometheus + Grafana solution is optional rather than recommended.\nPrometheus Prometheus is supported as a telemetry implementor, which collects metrics from SkyWalking\u0026rsquo;s backend.\nSet prometheus to provider. The endpoint opens at http://0.0.0.0:1234/ and http://0.0.0.0:1234/metrics.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:Set host and port if needed.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543Set relevant SSL settings to expose a secure endpoint. Note that the private key file and cert chain file could be uploaded once changes are applied to them.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543sslEnabled:truesslKeyPath:/etc/ssl/key.pemsslCertChainPath:/etc/ssl/cert-chain.pemGrafana Visualization Provide the Grafana dashboard settings. Check SkyWalking OAP Cluster Monitor Dashboard config and SkyWalking OAP Instance Monitor Dashboard config.\n","title":"Telemetry for backend","url":"/docs/main/next/en/setup/backend/backend-telemetry/"},{"content":"Telemetry for backend The OAP backend cluster itself is a distributed streaming process system. To assist the Ops team, we provide the telemetry for the OAP backend itself.\nBy default, the telemetry is disabled by setting selector to none, like this:\ntelemetry:selector:${SW_TELEMETRY:none}none:prometheus:host:${SW_TELEMETRY_PROMETHEUS_HOST:0.0.0.0}port:${SW_TELEMETRY_PROMETHEUS_PORT:1234}sslEnabled:${SW_TELEMETRY_PROMETHEUS_SSL_ENABLED:false}sslKeyPath:${SW_TELEMETRY_PROMETHEUS_SSL_KEY_PATH:\u0026#34;\u0026#34;}sslCertChainPath:${SW_TELEMETRY_PROMETHEUS_SSL_CERT_CHAIN_PATH:\u0026#34;\u0026#34;}You may also set Prometheus to enable them. For more information, refer to the details below.\nSelf Observability Static IP or hostname SkyWalking supports collecting telemetry data into OAP backend directly. Users could check them out through UI or GraphQL API.\nAdd the following configuration to enable self-observability related modules.\n Set up prometheus telemetry.  telemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543Set up prometheus fetcher.  prometheus-fetcher:selector:${SW_PROMETHEUS_FETCHER:default}default:enabledRules:${SW_PROMETHEUS_FETCHER_ENABLED_RULES:\u0026#34;self\u0026#34;}Make sure config/fetcher-prom-rules/self.yaml exists.  Once you deploy an oap-server cluster, the target host should be replaced with a dedicated IP or hostname. For instances, there are three OAP servers in your cluster. Their host is service1, service2, and service3 respectively. You should update each self.yaml to switch the target host.\nservice1:\nfetcherInterval:PT15SfetcherTimeout:PT10SmetricsPath:/metricsstaticConfig:# targets will be labeled as \u0026#34;instance\u0026#34;targets:- service1:1234labels:service:oap-server...service2:\nfetcherInterval:PT15SfetcherTimeout:PT10SmetricsPath:/metricsstaticConfig:# targets will be labeled as \u0026#34;instance\u0026#34;targets:- service2:1234labels:service:oap-server...service3:\nfetcherInterval:PT15SfetcherTimeout:PT10SmetricsPath:/metricsstaticConfig:# targets will be labeled as \u0026#34;instance\u0026#34;targets:- service3:1234labels:service:oap-server...Service discovery (k8s) If you deploy an oap-server cluster on k8s, the oap-server instance (pod) would not have a static IP or hostname. We can leverage OpenTelemetry Collector to discover the oap-server instance, and scrape \u0026amp; transfer the metrics to OAP OpenTelemetry receiver.\nOn how to install SkyWalking on k8s, you can refer to Apache SkyWalking Kubernetes.\nSet this up following these steps:\n Set up oap-server.    Set the metrics port.\nprometheus-port: 1234   Set environment variables.\nSW_TELEMETRY=prometheus SW_OTEL_RECEIVER=default SW_OTEL_RECEIVER_ENABLED_OC_RULES=oap Here is an example to install by Apache SkyWalking Kubernetes:\nhelm -n istio-system install skywalking skywalking \\ --set elasticsearch.replicas=1 \\ --set elasticsearch.minimumMasterNodes=1 \\ --set elasticsearch.imageTag=7.5.1 \\ --set oap.replicas=2 \\ --set ui.image.repository=$HUB/skywalking-ui \\ --set ui.image.tag=$TAG \\ --set oap.image.tag=$TAG \\ --set oap.image.repository=$HUB/skywalking-oap \\ --set oap.storageType=elasticsearch \\ --set oap.ports.prometheus-port=1234 \\ # \u0026lt;\u0026lt;\u0026lt; Expose self observability metrics port --set oap.env.SW_TELEMETRY=prometheus \\ --set oap.env.SW_OTEL_RECEIVER=default \\ # \u0026lt;\u0026lt;\u0026lt; Enable Otel receiver --set oap.env.SW_OTEL_RECEIVER_ENABLED_OC_RULES=oap # \u0026lt;\u0026lt;\u0026lt; Add oap analyzer for Otel metrics   Set up OpenTelemetry Collector and config a scrape job:  - job_name:\u0026#39;skywalking-so11y\u0026#39;# make sure to use this in the so11y.yaml to filter only so11y metricsmetrics_path:\u0026#39;/metrics\u0026#39;kubernetes_sd_configs:- role:podrelabel_configs:- source_labels:[__meta_kubernetes_pod_container_name, __meta_kubernetes_pod_container_port_name]action:keepregex:oap;prometheus-port - source_labels:[]target_label:servicereplacement:oap-server- source_labels:[__meta_kubernetes_pod_name]target_label:host_nameregex:(.+)replacement:$$1 For the full example for OpenTelemetry Collector configuration and recommended version, you can refer to showcase.\n NOTE: Since Apr 21, 2021, the Grafana project has been relicensed to AGPL-v3, and is no longer licensed for Apache 2.0. Check the LICENSE details. The following Prometheus + Grafana solution is optional, rather than recommended.\nPrometheus Prometheus is supported as a telemetry implementor, which collects metrics from SkyWalking\u0026rsquo;s backend.\nSet prometheus to provider. The endpoint opens at http://0.0.0.0:1234/ and http://0.0.0.0:1234/metrics.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:Set host and port if needed.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543Set relevant SSL settings to expose a secure endpoint. Note that the private key file and cert chain file could be uploaded once changes are applied to them.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543sslEnabled:truesslKeyPath:/etc/ssl/key.pemsslCertChainPath:/etc/ssl/cert-chain.pemGrafana Visualization Provide the Grafana dashboard settings. Check SkyWalking OAP Cluster Monitor Dashboard config and SkyWalking OAP Instance Monitor Dashboard config.\n","title":"Telemetry for backend","url":"/docs/main/v9.0.0/en/setup/backend/backend-telemetry/"},{"content":"Telemetry for backend The OAP backend cluster itself is a distributed streaming process system. To assist the Ops team, we provide the telemetry for the OAP backend itself, also known as self-observability (so11y)\nBy default, the telemetry is disabled by setting selector to none, like this:\ntelemetry:selector:${SW_TELEMETRY:none}none:prometheus:host:${SW_TELEMETRY_PROMETHEUS_HOST:0.0.0.0}port:${SW_TELEMETRY_PROMETHEUS_PORT:1234}sslEnabled:${SW_TELEMETRY_PROMETHEUS_SSL_ENABLED:false}sslKeyPath:${SW_TELEMETRY_PROMETHEUS_SSL_KEY_PATH:\u0026#34;\u0026#34;}sslCertChainPath:${SW_TELEMETRY_PROMETHEUS_SSL_CERT_CHAIN_PATH:\u0026#34;\u0026#34;}You may also set Prometheus to enable them. For more information, refer to the details below.\nSelf Observability Static IP or hostname SkyWalking supports collecting telemetry data into the OAP backend directly. Users could check them out through UI or GraphQL API.\nAdd the following configuration to enable self-observability-related modules.\n Set up prometheus telemetry.  telemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543Set up Prometheus fetcher.  prometheus-fetcher:selector:${SW_PROMETHEUS_FETCHER:default}default:enabledRules:${SW_PROMETHEUS_FETCHER_ENABLED_RULES:\u0026#34;self\u0026#34;}Make sure config/fetcher-prom-rules/self.yaml exists.  Once you deploy an OAP server cluster, the target host should be replaced with a dedicated IP or hostname. For instance, if there are three OAP servers in your cluster, their hosts are service1, service2, and service3, respectively. You should update each self.yaml to switch the target host.\nservice1:\nfetcherInterval:PT15SfetcherTimeout:PT10SmetricsPath:/metricsstaticConfig:# targets will be labeled as \u0026#34;instance\u0026#34;targets:- service1:1234labels:service:oap-server...service2:\nfetcherInterval:PT15SfetcherTimeout:PT10SmetricsPath:/metricsstaticConfig:# targets will be labeled as \u0026#34;instance\u0026#34;targets:- service2:1234labels:service:oap-server...service3:\nfetcherInterval:PT15SfetcherTimeout:PT10SmetricsPath:/metricsstaticConfig:# targets will be labeled as \u0026#34;instance\u0026#34;targets:- service3:1234labels:service:oap-server...Service discovery on Kubernetes If you deploy an OAP server cluster on Kubernetes, the oap-server instance (pod) would not have a static IP or hostname. We can leverage OpenTelemetry Collector to discover the oap-server instance, and scrape \u0026amp; transfer the metrics to OAP OpenTelemetry receiver.\nOn how to install SkyWalking on k8s, you can refer to Apache SkyWalking Kubernetes.\nSet this up following these steps:\n Set up oap-server.    Set the metrics port.\nprometheus-port: 1234   Set environment variables.\nSW_TELEMETRY=prometheus SW_OTEL_RECEIVER=default SW_OTEL_RECEIVER_ENABLED_OC_RULES=oap Here is an example to install by Apache SkyWalking Kubernetes:\nhelm -n istio-system install skywalking skywalking \\ --set elasticsearch.replicas=1 \\ --set elasticsearch.minimumMasterNodes=1 \\ --set elasticsearch.imageTag=7.5.1 \\ --set oap.replicas=2 \\ --set ui.image.repository=$HUB/skywalking-ui \\ --set ui.image.tag=$TAG \\ --set oap.image.tag=$TAG \\ --set oap.image.repository=$HUB/skywalking-oap \\ --set oap.storageType=elasticsearch \\ --set oap.ports.prometheus-port=1234 \\ # \u0026lt;\u0026lt;\u0026lt; Expose self observability metrics port --set oap.env.SW_TELEMETRY=prometheus \\ --set oap.env.SW_OTEL_RECEIVER=default \\ # \u0026lt;\u0026lt;\u0026lt; Enable Otel receiver --set oap.env.SW_OTEL_RECEIVER_ENABLED_OC_RULES=oap # \u0026lt;\u0026lt;\u0026lt; Add oap analyzer for Otel metrics   Set up OpenTelemetry Collector and config a scrape job:  - job_name:\u0026#39;skywalking-so11y\u0026#39;# make sure to use this in the so11y.yaml to filter only so11y metricsmetrics_path:\u0026#39;/metrics\u0026#39;kubernetes_sd_configs:- role:podrelabel_configs:- source_labels:[__meta_kubernetes_pod_container_name, __meta_kubernetes_pod_container_port_name]action:keepregex:oap;prometheus-port - source_labels:[]target_label:servicereplacement:oap-server- source_labels:[__meta_kubernetes_pod_name]target_label:host_nameregex:(.+)replacement:$$1 For the full example for OpenTelemetry Collector configuration and recommended version, you can refer to showcase.\n NOTE: Since Apr 21, 2021, the Grafana project has been relicensed to AGPL-v3, and is no longer licensed for Apache 2.0. Check the LICENSE details. The following Prometheus + Grafana solution is optional rather than recommended.\nPrometheus Prometheus is supported as a telemetry implementor, which collects metrics from SkyWalking\u0026rsquo;s backend.\nSet prometheus to provider. The endpoint opens at http://0.0.0.0:1234/ and http://0.0.0.0:1234/metrics.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:Set host and port if needed.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543Set relevant SSL settings to expose a secure endpoint. Note that the private key file and cert chain file could be uploaded once changes are applied to them.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543sslEnabled:truesslKeyPath:/etc/ssl/key.pemsslCertChainPath:/etc/ssl/cert-chain.pemGrafana Visualization Provide the Grafana dashboard settings. Check SkyWalking OAP Cluster Monitor Dashboard config and SkyWalking OAP Instance Monitor Dashboard config.\n","title":"Telemetry for backend","url":"/docs/main/v9.1.0/en/setup/backend/backend-telemetry/"},{"content":"Telemetry for backend The OAP backend cluster itself is a distributed streaming process system. To assist the Ops team, we provide the telemetry for the OAP backend itself, also known as self-observability (so11y)\nBy default, the telemetry is disabled by setting selector to none, like this:\ntelemetry:selector:${SW_TELEMETRY:none}none:prometheus:host:${SW_TELEMETRY_PROMETHEUS_HOST:0.0.0.0}port:${SW_TELEMETRY_PROMETHEUS_PORT:1234}sslEnabled:${SW_TELEMETRY_PROMETHEUS_SSL_ENABLED:false}sslKeyPath:${SW_TELEMETRY_PROMETHEUS_SSL_KEY_PATH:\u0026#34;\u0026#34;}sslCertChainPath:${SW_TELEMETRY_PROMETHEUS_SSL_CERT_CHAIN_PATH:\u0026#34;\u0026#34;}You may also set Prometheus to enable them. For more information, refer to the details below.\nSelf Observability Static IP or hostname SkyWalking supports collecting telemetry data into the OAP backend directly. Users could check them out through UI or GraphQL API.\nAdd the following configuration to enable self-observability-related modules.\n Set up prometheus telemetry.  telemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543Set up Prometheus fetcher.  prometheus-fetcher:selector:${SW_PROMETHEUS_FETCHER:default}default:enabledRules:${SW_PROMETHEUS_FETCHER_ENABLED_RULES:\u0026#34;self\u0026#34;}Make sure config/fetcher-prom-rules/self.yaml exists.  Once you deploy an OAP server cluster, the target host should be replaced with a dedicated IP or hostname. For instance, if there are three OAP servers in your cluster, their hosts are service1, service2, and service3, respectively. You should update each self.yaml to switch the target host.\nservice1:\nfetcherInterval:PT15SfetcherTimeout:PT10SmetricsPath:/metricsstaticConfig:# targets will be labeled as \u0026#34;instance\u0026#34;targets:- service1:1234labels:service:oap-server...service2:\nfetcherInterval:PT15SfetcherTimeout:PT10SmetricsPath:/metricsstaticConfig:# targets will be labeled as \u0026#34;instance\u0026#34;targets:- service2:1234labels:service:oap-server...service3:\nfetcherInterval:PT15SfetcherTimeout:PT10SmetricsPath:/metricsstaticConfig:# targets will be labeled as \u0026#34;instance\u0026#34;targets:- service3:1234labels:service:oap-server...Service discovery on Kubernetes If you deploy an OAP server cluster on Kubernetes, the oap-server instance (pod) would not have a static IP or hostname. We can leverage OpenTelemetry Collector to discover the oap-server instance, and scrape \u0026amp; transfer the metrics to OAP OpenTelemetry receiver.\nOn how to install SkyWalking on k8s, you can refer to Apache SkyWalking Kubernetes.\nSet this up following these steps:\n Set up oap-server.    Set the metrics port.\nprometheus-port: 1234   Set environment variables.\nSW_TELEMETRY=prometheus SW_OTEL_RECEIVER=default SW_OTEL_RECEIVER_ENABLED_OTEL_RULES=oap Here is an example to install by Apache SkyWalking Kubernetes:\nhelm -n istio-system install skywalking skywalking \\ --set elasticsearch.replicas=1 \\ --set elasticsearch.minimumMasterNodes=1 \\ --set elasticsearch.imageTag=7.5.1 \\ --set oap.replicas=2 \\ --set ui.image.repository=$HUB/skywalking-ui \\ --set ui.image.tag=$TAG \\ --set oap.image.tag=$TAG \\ --set oap.image.repository=$HUB/skywalking-oap \\ --set oap.storageType=elasticsearch \\ --set oap.ports.prometheus-port=1234 \\ # \u0026lt;\u0026lt;\u0026lt; Expose self observability metrics port --set oap.env.SW_TELEMETRY=prometheus \\ --set oap.env.SW_OTEL_RECEIVER=default \\ # \u0026lt;\u0026lt;\u0026lt; Enable Otel receiver --set oap.env.SW_OTEL_RECEIVER_ENABLED_OTEL_RULES=oap # \u0026lt;\u0026lt;\u0026lt; Add oap analyzer for Otel metrics   Set up OpenTelemetry Collector and config a scrape job:  - job_name:\u0026#39;skywalking-so11y\u0026#39;# make sure to use this in the so11y.yaml to filter only so11y metricsmetrics_path:\u0026#39;/metrics\u0026#39;kubernetes_sd_configs:- role:podrelabel_configs:- source_labels:[__meta_kubernetes_pod_container_name, __meta_kubernetes_pod_container_port_name]action:keepregex:oap;prometheus-port- source_labels:[]target_label:servicereplacement:oap-server- source_labels:[__meta_kubernetes_pod_name]target_label:host_nameregex:(.+)replacement:$$1For the full example for OpenTelemetry Collector configuration and recommended version, you can refer to showcase.\n NOTE: Since Apr 21, 2021, the Grafana project has been relicensed to AGPL-v3, and is no longer licensed for Apache 2.0. Check the LICENSE details. The following Prometheus + Grafana solution is optional rather than recommended.\nPrometheus Prometheus is supported as a telemetry implementor, which collects metrics from SkyWalking\u0026rsquo;s backend.\nSet prometheus to provider. The endpoint opens at http://0.0.0.0:1234/ and http://0.0.0.0:1234/metrics.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:Set host and port if needed.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543Set relevant SSL settings to expose a secure endpoint. Note that the private key file and cert chain file could be uploaded once changes are applied to them.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543sslEnabled:truesslKeyPath:/etc/ssl/key.pemsslCertChainPath:/etc/ssl/cert-chain.pemGrafana Visualization Provide the Grafana dashboard settings. Check SkyWalking OAP Cluster Monitor Dashboard config and SkyWalking OAP Instance Monitor Dashboard config.\n","title":"Telemetry for backend","url":"/docs/main/v9.2.0/en/setup/backend/backend-telemetry/"},{"content":"Telemetry for backend The OAP backend cluster itself is a distributed streaming process system. To assist the Ops team, we provide the telemetry for the OAP backend itself, also known as self-observability (so11y)\nBy default, the telemetry is disabled by setting selector to none, like this:\ntelemetry:selector:${SW_TELEMETRY:none}none:prometheus:host:${SW_TELEMETRY_PROMETHEUS_HOST:0.0.0.0}port:${SW_TELEMETRY_PROMETHEUS_PORT:1234}sslEnabled:${SW_TELEMETRY_PROMETHEUS_SSL_ENABLED:false}sslKeyPath:${SW_TELEMETRY_PROMETHEUS_SSL_KEY_PATH:\u0026#34;\u0026#34;}sslCertChainPath:${SW_TELEMETRY_PROMETHEUS_SSL_CERT_CHAIN_PATH:\u0026#34;\u0026#34;}You may also set Prometheus to enable them. For more information, refer to the details below.\nSelf Observability Static IP or hostname SkyWalking supports collecting telemetry data into the OAP backend directly. Users could check them out through UI or GraphQL API.\nAdd the following configuration to enable self-observability-related modules.\n Set up prometheus telemetry.  telemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543Set up OpenTelemetry to scrape the metrics from OAP telemetry.  Refer to the E2E test case as an example.\nFor Kubernetes deployments, read the following section, otherwise you should be able to adjust the configurations below to fit your scenarios.\nService discovery on Kubernetes If you deploy an OAP server cluster on Kubernetes, the oap-server instance (pod) would not have a static IP or hostname. We can leverage OpenTelemetry Collector to discover the oap-server instance, and scrape \u0026amp; transfer the metrics to OAP OpenTelemetry receiver.\nOn how to install SkyWalking on k8s, you can refer to Apache SkyWalking Kubernetes.\nSet this up following these steps:\n Set up oap-server.    Set the metrics port.\nprometheus-port: 1234   Set environment variables.\nSW_TELEMETRY=prometheus SW_OTEL_RECEIVER=default SW_OTEL_RECEIVER_ENABLED_OTEL_RULES=oap Here is an example to install by Apache SkyWalking Kubernetes:\nhelm -n istio-system install skywalking skywalking \\ --set elasticsearch.replicas=1 \\ --set elasticsearch.minimumMasterNodes=1 \\ --set elasticsearch.imageTag=7.5.1 \\ --set oap.replicas=2 \\ --set ui.image.repository=$HUB/skywalking-ui \\ --set ui.image.tag=$TAG \\ --set oap.image.tag=$TAG \\ --set oap.image.repository=$HUB/skywalking-oap \\ --set oap.storageType=elasticsearch \\ --set oap.ports.prometheus-port=1234 \\ # \u0026lt;\u0026lt;\u0026lt; Expose self observability metrics port --set oap.env.SW_TELEMETRY=prometheus \\ --set oap.env.SW_OTEL_RECEIVER=default \\ # \u0026lt;\u0026lt;\u0026lt; Enable Otel receiver --set oap.env.SW_OTEL_RECEIVER_ENABLED_OTEL_RULES=oap # \u0026lt;\u0026lt;\u0026lt; Add oap analyzer for Otel metrics   Set up OpenTelemetry Collector and config a scrape job:  - job_name:\u0026#39;skywalking-so11y\u0026#39;# make sure to use this in the so11y.yaml to filter only so11y metricsmetrics_path:\u0026#39;/metrics\u0026#39;kubernetes_sd_configs:- role:podrelabel_configs:- source_labels:[__meta_kubernetes_pod_container_name, __meta_kubernetes_pod_container_port_name]action:keepregex:oap;prometheus-port- source_labels:[]target_label:servicereplacement:oap-server- source_labels:[__meta_kubernetes_pod_name]target_label:host_nameregex:(.+)replacement:$$1For the full example for OpenTelemetry Collector configuration and recommended version, you can refer to showcase.\n NOTE: Since Apr 21, 2021, the Grafana project has been relicensed to AGPL-v3, and is no longer licensed for Apache 2.0. Check the LICENSE details. The following Prometheus + Grafana solution is optional rather than recommended.\nPrometheus Prometheus is supported as a telemetry implementor, which collects metrics from SkyWalking\u0026rsquo;s backend.\nSet prometheus to provider. The endpoint opens at http://0.0.0.0:1234/ and http://0.0.0.0:1234/metrics.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:Set host and port if needed.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543Set relevant SSL settings to expose a secure endpoint. Note that the private key file and cert chain file could be uploaded once changes are applied to them.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543sslEnabled:truesslKeyPath:/etc/ssl/key.pemsslCertChainPath:/etc/ssl/cert-chain.pemGrafana Visualization Provide the Grafana dashboard settings. Check SkyWalking OAP Cluster Monitor Dashboard config and SkyWalking OAP Instance Monitor Dashboard config.\n","title":"Telemetry for backend","url":"/docs/main/v9.3.0/en/setup/backend/backend-telemetry/"},{"content":"Telemetry for backend The OAP backend cluster itself is a distributed streaming process system. To assist the Ops team, we provide the telemetry for the OAP backend itself, also known as self-observability (so11y)\nBy default, the telemetry is disabled by setting selector to none, like this:\ntelemetry:selector:${SW_TELEMETRY:none}none:prometheus:host:${SW_TELEMETRY_PROMETHEUS_HOST:0.0.0.0}port:${SW_TELEMETRY_PROMETHEUS_PORT:1234}sslEnabled:${SW_TELEMETRY_PROMETHEUS_SSL_ENABLED:false}sslKeyPath:${SW_TELEMETRY_PROMETHEUS_SSL_KEY_PATH:\u0026#34;\u0026#34;}sslCertChainPath:${SW_TELEMETRY_PROMETHEUS_SSL_CERT_CHAIN_PATH:\u0026#34;\u0026#34;}You may also set Prometheus to enable them. For more information, refer to the details below.\nSelf Observability SkyWalking supports exposing telemetry data representing OAP running status through Prometheus endpoint. Users could set up OpenTelemetry collector to scrap and forward telemetry data to OAP server for further analysis, eventually showing up UI or GraphQL API.\nStatic IP or hostname Add the following configuration to enable self-observability-related modules.\n Set up prometheus telemetry.  telemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543Set up OpenTelemetry to scrape the metrics from OAP telemetry.  Refer to the E2E test case as an example.\nFor Kubernetes deployments, read the following section, otherwise you should be able to adjust the configurations below to fit your scenarios.\nService discovery on Kubernetes If you deploy an OAP server cluster on Kubernetes, the oap-server instance (pod) would not have a static IP or hostname. We can leverage OpenTelemetry Collector to discover the oap-server instance, and scrape \u0026amp; transfer the metrics to OAP OpenTelemetry receiver.\nOn how to install SkyWalking on k8s, you can refer to Apache SkyWalking Kubernetes.\nSet this up following these steps:\n Set up oap-server.    Set the metrics port.\nprometheus-port: 1234   Set environment variables.\nSW_TELEMETRY=prometheus SW_OTEL_RECEIVER=default SW_OTEL_RECEIVER_ENABLED_OTEL_RULES=oap Here is an example to install by Apache SkyWalking Kubernetes:\nhelm -n istio-system install skywalking skywalking \\ --set elasticsearch.replicas=1 \\ --set elasticsearch.minimumMasterNodes=1 \\ --set elasticsearch.imageTag=7.5.1 \\ --set oap.replicas=2 \\ --set ui.image.repository=$HUB/skywalking-ui \\ --set ui.image.tag=$TAG \\ --set oap.image.tag=$TAG \\ --set oap.image.repository=$HUB/skywalking-oap \\ --set oap.storageType=elasticsearch \\ --set oap.ports.prometheus-port=1234 \\ # \u0026lt;\u0026lt;\u0026lt; Expose self observability metrics port --set oap.env.SW_TELEMETRY=prometheus \\ --set oap.env.SW_OTEL_RECEIVER=default \\ # \u0026lt;\u0026lt;\u0026lt; Enable Otel receiver --set oap.env.SW_OTEL_RECEIVER_ENABLED_OTEL_RULES=oap # \u0026lt;\u0026lt;\u0026lt; Add oap analyzer for Otel metrics   Set up OpenTelemetry Collector and config a scrape job:  - job_name:\u0026#39;skywalking-so11y\u0026#39;# make sure to use this in the so11y.yaml to filter only so11y metricsmetrics_path:\u0026#39;/metrics\u0026#39;kubernetes_sd_configs:- role:podrelabel_configs:- source_labels:[__meta_kubernetes_pod_container_name, __meta_kubernetes_pod_container_port_name]action:keepregex:oap;prometheus-port- source_labels:[]target_label:servicereplacement:oap-server- source_labels:[__meta_kubernetes_pod_name]target_label:host_nameregex:(.+)replacement:$$1For the full example for OpenTelemetry Collector configuration and recommended version, you can refer to showcase.\n Users also could leverage the Prometheus endpoint for their own Prometheus and Grafana.\nNOTE: Since Apr 21, 2021, the Grafana project has been relicensed to AGPL-v3, and is no longer licensed for Apache 2.0. Check the LICENSE details. The following Prometheus + Grafana solution is optional rather than recommended.\nPrometheus Prometheus is supported as a telemetry implementor, which collects metrics from SkyWalking\u0026rsquo;s backend.\nSet prometheus to provider. The endpoint opens at http://0.0.0.0:1234/ and http://0.0.0.0:1234/metrics.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:Set host and port if needed.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543Set relevant SSL settings to expose a secure endpoint. Note that the private key file and cert chain file could be uploaded once changes are applied to them.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543sslEnabled:truesslKeyPath:/etc/ssl/key.pemsslCertChainPath:/etc/ssl/cert-chain.pemGrafana Visualization Provide the Grafana dashboard settings. Check SkyWalking OAP Cluster Monitor Dashboard config and SkyWalking OAP Instance Monitor Dashboard config.\n","title":"Telemetry for backend","url":"/docs/main/v9.4.0/en/setup/backend/backend-telemetry/"},{"content":"Telemetry for backend The OAP backend cluster itself is a distributed streaming process system. To assist the Ops team, we provide the telemetry for the OAP backend itself, also known as self-observability (so11y)\nBy default, the telemetry is disabled by setting selector to none, like this:\ntelemetry:selector:${SW_TELEMETRY:none}none:prometheus:host:${SW_TELEMETRY_PROMETHEUS_HOST:0.0.0.0}port:${SW_TELEMETRY_PROMETHEUS_PORT:1234}sslEnabled:${SW_TELEMETRY_PROMETHEUS_SSL_ENABLED:false}sslKeyPath:${SW_TELEMETRY_PROMETHEUS_SSL_KEY_PATH:\u0026#34;\u0026#34;}sslCertChainPath:${SW_TELEMETRY_PROMETHEUS_SSL_CERT_CHAIN_PATH:\u0026#34;\u0026#34;}You may also set Prometheus to enable them. For more information, refer to the details below.\nSelf Observability SkyWalking supports exposing telemetry data representing OAP running status through Prometheus endpoint. Users could set up OpenTelemetry collector to scrap and forward telemetry data to OAP server for further analysis, eventually showing up UI or GraphQL API.\nStatic IP or hostname Add the following configuration to enable self-observability-related modules.\n Set up prometheus telemetry.  telemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543Set up OpenTelemetry to scrape the metrics from OAP telemetry.  Refer to the E2E test case as an example.\nFor Kubernetes deployments, read the following section, otherwise you should be able to adjust the configurations below to fit your scenarios.\nService discovery on Kubernetes If you deploy an OAP server cluster on Kubernetes, the oap-server instance (pod) would not have a static IP or hostname. We can leverage OpenTelemetry Collector to discover the oap-server instance, and scrape \u0026amp; transfer the metrics to OAP OpenTelemetry receiver.\nOn how to install SkyWalking on k8s, you can refer to Apache SkyWalking Kubernetes.\nSet this up following these steps:\n Set up oap-server.    Set the metrics port.\nprometheus-port: 1234   Set environment variables.\nSW_TELEMETRY=prometheus SW_OTEL_RECEIVER=default SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES=oap Here is an example to install by Apache SkyWalking Kubernetes:\nhelm -n istio-system install skywalking skywalking \\ --set elasticsearch.replicas=1 \\ --set elasticsearch.minimumMasterNodes=1 \\ --set elasticsearch.imageTag=7.5.1 \\ --set oap.replicas=2 \\ --set ui.image.repository=$HUB/skywalking-ui \\ --set ui.image.tag=$TAG \\ --set oap.image.tag=$TAG \\ --set oap.image.repository=$HUB/skywalking-oap \\ --set oap.storageType=elasticsearch \\ --set oap.ports.prometheus-port=1234 \\ # \u0026lt;\u0026lt;\u0026lt; Expose self observability metrics port --set oap.env.SW_TELEMETRY=prometheus \\ --set oap.env.SW_OTEL_RECEIVER=default \\ # \u0026lt;\u0026lt;\u0026lt; Enable Otel receiver --set oap.env.SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES=oap # \u0026lt;\u0026lt;\u0026lt; Add oap analyzer for Otel metrics   Set up OpenTelemetry Collector and config a scrape job:  - job_name:\u0026#39;skywalking-so11y\u0026#39;# make sure to use this in the so11y.yaml to filter only so11y metricsmetrics_path:\u0026#39;/metrics\u0026#39;kubernetes_sd_configs:- role:podrelabel_configs:- source_labels:[__meta_kubernetes_pod_container_name, __meta_kubernetes_pod_container_port_name]action:keepregex:oap;prometheus-port- source_labels:[]target_label:servicereplacement:oap-server- source_labels:[__meta_kubernetes_pod_name]target_label:host_nameregex:(.+)replacement:$$1For the full example for OpenTelemetry Collector configuration and recommended version, you can refer to showcase.\n Users also could leverage the Prometheus endpoint for their own Prometheus and Grafana.\nNOTE: Since Apr 21, 2021, the Grafana project has been relicensed to AGPL-v3, and is no longer licensed for Apache 2.0. Check the LICENSE details. The following Prometheus + Grafana solution is optional rather than recommended.\nPrometheus Prometheus is supported as a telemetry implementor, which collects metrics from SkyWalking\u0026rsquo;s backend.\nSet prometheus to provider. The endpoint opens at http://0.0.0.0:1234/ and http://0.0.0.0:1234/metrics.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:Set host and port if needed.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543Set relevant SSL settings to expose a secure endpoint. Note that the private key file and cert chain file could be uploaded once changes are applied to them.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543sslEnabled:truesslKeyPath:/etc/ssl/key.pemsslCertChainPath:/etc/ssl/cert-chain.pemGrafana Visualization Provide the Grafana dashboard settings. Check SkyWalking OAP Cluster Monitor Dashboard config and SkyWalking OAP Instance Monitor Dashboard config.\n","title":"Telemetry for backend","url":"/docs/main/v9.5.0/en/setup/backend/backend-telemetry/"},{"content":"Telemetry for backend The OAP backend cluster itself is a distributed streaming process system. To assist the Ops team, we provide the telemetry for the OAP backend itself, also known as self-observability (so11y)\nBy default, the telemetry is disabled by setting selector to none, like this:\ntelemetry:selector:${SW_TELEMETRY:none}none:prometheus:host:${SW_TELEMETRY_PROMETHEUS_HOST:0.0.0.0}port:${SW_TELEMETRY_PROMETHEUS_PORT:1234}sslEnabled:${SW_TELEMETRY_PROMETHEUS_SSL_ENABLED:false}sslKeyPath:${SW_TELEMETRY_PROMETHEUS_SSL_KEY_PATH:\u0026#34;\u0026#34;}sslCertChainPath:${SW_TELEMETRY_PROMETHEUS_SSL_CERT_CHAIN_PATH:\u0026#34;\u0026#34;}You may also set Prometheus to enable them. For more information, refer to the details below.\nSelf Observability SkyWalking supports exposing telemetry data representing OAP running status through Prometheus endpoint. Users could set up OpenTelemetry collector to scrap and forward telemetry data to OAP server for further analysis, eventually showing up UI or GraphQL API.\nStatic IP or hostname Add the following configuration to enable self-observability-related modules.\n Set up prometheus telemetry.  telemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543Set up OpenTelemetry to scrape the metrics from OAP telemetry.  Refer to the E2E test case as an example.\nFor Kubernetes deployments, read the following section, otherwise you should be able to adjust the configurations below to fit your scenarios.\nService discovery on Kubernetes If you deploy an OAP server cluster on Kubernetes, the oap-server instance (pod) would not have a static IP or hostname. We can leverage OpenTelemetry Collector to discover the oap-server instance, and scrape \u0026amp; transfer the metrics to OAP OpenTelemetry receiver.\nOn how to install SkyWalking on k8s, you can refer to Apache SkyWalking Kubernetes.\nSet this up following these steps:\n Set up oap-server.    Set the metrics port.\nprometheus-port: 1234   Set environment variables.\nSW_TELEMETRY=prometheus SW_OTEL_RECEIVER=default SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES=oap Here is an example to install by Apache SkyWalking Kubernetes:\nhelm -n istio-system install skywalking skywalking \\ --set elasticsearch.replicas=1 \\ --set elasticsearch.minimumMasterNodes=1 \\ --set elasticsearch.imageTag=7.5.1 \\ --set oap.replicas=2 \\ --set ui.image.repository=$HUB/skywalking-ui \\ --set ui.image.tag=$TAG \\ --set oap.image.tag=$TAG \\ --set oap.image.repository=$HUB/skywalking-oap \\ --set oap.storageType=elasticsearch \\ --set oap.ports.prometheus-port=1234 \\ # \u0026lt;\u0026lt;\u0026lt; Expose self observability metrics port --set oap.env.SW_TELEMETRY=prometheus \\ --set oap.env.SW_OTEL_RECEIVER=default \\ # \u0026lt;\u0026lt;\u0026lt; Enable Otel receiver --set oap.env.SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES=oap # \u0026lt;\u0026lt;\u0026lt; Add oap analyzer for Otel metrics   Set up OpenTelemetry Collector and config a scrape job:  - job_name:\u0026#39;skywalking-so11y\u0026#39;# make sure to use this in the so11y.yaml to filter only so11y metricsmetrics_path:\u0026#39;/metrics\u0026#39;kubernetes_sd_configs:- role:podrelabel_configs:- source_labels:[__meta_kubernetes_pod_container_name, __meta_kubernetes_pod_container_port_name]action:keepregex:oap;prometheus-port- source_labels:[]target_label:servicereplacement:oap-server- source_labels:[__meta_kubernetes_pod_name]target_label:host_nameregex:(.+)replacement:$$1For the full example for OpenTelemetry Collector configuration and recommended version, you can refer to showcase.\n Users also could leverage the Prometheus endpoint for their own Prometheus and Grafana.\nNOTE: Since Apr 21, 2021, the Grafana project has been relicensed to AGPL-v3, and is no longer licensed for Apache 2.0. Check the LICENSE details. The following Prometheus + Grafana solution is optional rather than recommended.\nPrometheus Prometheus is supported as a telemetry implementor, which collects metrics from SkyWalking\u0026rsquo;s backend.\nSet prometheus to provider. The endpoint opens at http://0.0.0.0:1234/ and http://0.0.0.0:1234/metrics.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:Set host and port if needed.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543Set relevant SSL settings to expose a secure endpoint. Note that the private key file and cert chain file could be uploaded once changes are applied to them.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543sslEnabled:truesslKeyPath:/etc/ssl/key.pemsslCertChainPath:/etc/ssl/cert-chain.pemGrafana Visualization Provide the Grafana dashboard settings. Check SkyWalking OAP Cluster Monitor Dashboard config and SkyWalking OAP Instance Monitor Dashboard config.\n","title":"Telemetry for backend","url":"/docs/main/v9.6.0/en/setup/backend/backend-telemetry/"},{"content":"Telemetry for backend The OAP backend cluster itself is a distributed streaming process system. To assist the Ops team, we provide the telemetry for the OAP backend itself, also known as self-observability (so11y)\nBy default, the telemetry is disabled by setting selector to none, like this:\ntelemetry:selector:${SW_TELEMETRY:none}none:prometheus:host:${SW_TELEMETRY_PROMETHEUS_HOST:0.0.0.0}port:${SW_TELEMETRY_PROMETHEUS_PORT:1234}sslEnabled:${SW_TELEMETRY_PROMETHEUS_SSL_ENABLED:false}sslKeyPath:${SW_TELEMETRY_PROMETHEUS_SSL_KEY_PATH:\u0026#34;\u0026#34;}sslCertChainPath:${SW_TELEMETRY_PROMETHEUS_SSL_CERT_CHAIN_PATH:\u0026#34;\u0026#34;}You may also set Prometheus to enable them. For more information, refer to the details below.\nSelf Observability SkyWalking supports exposing telemetry data representing OAP running status through Prometheus endpoint. Users could set up OpenTelemetry collector to scrap and forward telemetry data to OAP server for further analysis, eventually showing up UI or GraphQL API.\nStatic IP or hostname Add the following configuration to enable self-observability-related modules.\n Set up prometheus telemetry.  telemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543Set up OpenTelemetry to scrape the metrics from OAP telemetry.  Refer to the E2E test case as an example.\nFor Kubernetes deployments, read the following section, otherwise you should be able to adjust the configurations below to fit your scenarios.\nService discovery on Kubernetes If you deploy an OAP server cluster on Kubernetes, the oap-server instance (pod) would not have a static IP or hostname. We can leverage OpenTelemetry Collector to discover the oap-server instance, and scrape \u0026amp; transfer the metrics to OAP OpenTelemetry receiver.\nOn how to install SkyWalking on k8s, you can refer to Apache SkyWalking Kubernetes.\nSet this up following these steps:\n Set up oap-server.    Set the metrics port.\nprometheus-port: 1234   Set environment variables.\nSW_TELEMETRY=prometheus SW_OTEL_RECEIVER=default SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES=oap Here is an example to install by Apache SkyWalking Kubernetes:\nhelm -n istio-system install skywalking skywalking \\ --set elasticsearch.replicas=1 \\ --set elasticsearch.minimumMasterNodes=1 \\ --set elasticsearch.imageTag=7.5.1 \\ --set oap.replicas=2 \\ --set ui.image.repository=$HUB/skywalking-ui \\ --set ui.image.tag=$TAG \\ --set oap.image.tag=$TAG \\ --set oap.image.repository=$HUB/skywalking-oap \\ --set oap.storageType=elasticsearch \\ --set oap.ports.prometheus-port=1234 \\ # \u0026lt;\u0026lt;\u0026lt; Expose self observability metrics port --set oap.env.SW_TELEMETRY=prometheus \\ --set oap.env.SW_OTEL_RECEIVER=default \\ # \u0026lt;\u0026lt;\u0026lt; Enable Otel receiver --set oap.env.SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES=oap # \u0026lt;\u0026lt;\u0026lt; Add oap analyzer for Otel metrics   Set up OpenTelemetry Collector and config a scrape job:  - job_name:\u0026#39;skywalking-so11y\u0026#39;# make sure to use this in the so11y.yaml to filter only so11y metricsmetrics_path:\u0026#39;/metrics\u0026#39;kubernetes_sd_configs:- role:podrelabel_configs:- source_labels:[__meta_kubernetes_pod_container_name, __meta_kubernetes_pod_container_port_name]action:keepregex:oap;prometheus-port- source_labels:[]target_label:servicereplacement:oap-server- source_labels:[__meta_kubernetes_pod_name]target_label:host_nameregex:(.+)replacement:$$1For the full example for OpenTelemetry Collector configuration and recommended version, you can refer to showcase.\n Users also could leverage the Prometheus endpoint for their own Prometheus and Grafana.\nNOTE: Since Apr 21, 2021, the Grafana project has been relicensed to AGPL-v3, and is no longer licensed for Apache 2.0. Check the LICENSE details. The following Prometheus + Grafana solution is optional rather than recommended.\nPrometheus Prometheus is supported as a telemetry implementor, which collects metrics from SkyWalking\u0026rsquo;s backend.\nSet prometheus to provider. The endpoint opens at http://0.0.0.0:1234/ and http://0.0.0.0:1234/metrics.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:Set host and port if needed.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543Set relevant SSL settings to expose a secure endpoint. Note that the private key file and cert chain file could be uploaded once changes are applied to them.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543sslEnabled:truesslKeyPath:/etc/ssl/key.pemsslCertChainPath:/etc/ssl/cert-chain.pemGrafana Visualization Provide the Grafana dashboard settings. Check SkyWalking OAP Cluster Monitor Dashboard config and SkyWalking OAP Instance Monitor Dashboard config.\n","title":"Telemetry for backend","url":"/docs/main/v9.7.0/en/setup/backend/backend-telemetry/"},{"content":"The Logic Endpoint In default, all the RPC server-side names as entry spans, such as RESTFul API path and gRPC service name, would be endpoints with metrics. At the same time, SkyWalking introduces the logic endpoint concept, which allows plugins and users to add new endpoints without adding new spans. The following logic endpoints are added automatically by plugins.\n GraphQL Query and Mutation are logic endpoints by using the names of them. Spring\u0026rsquo;s ScheduledMethodRunnable jobs are logic endpoints. The name format is SpringScheduled/${className}/${methodName}. Apache ShardingSphere ElasticJob\u0026rsquo;s jobs are logic endpoints. The name format is ElasticJob/${jobName}. XXLJob\u0026rsquo;s jobs are logic endpoints. The name formats include xxl-job/MethodJob/${className}.${methodName}, xxl-job/ScriptJob/${GlueType}/id/${jobId}, and xxl-job/SimpleJob/${className}. Quartz(optional plugin)\u0026rsquo;s jobs are logic endpoints. the name format is quartz-scheduler/${className}.  User could use the SkyWalking\u0026rsquo;s application toolkits to add the tag into the local span to label the span as a logic endpoint in the analysis stage. The tag is, key=x-le and value = {\u0026quot;logic-span\u0026quot;:true}.\n","title":"The Logic Endpoint","url":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/logic-endpoint/"},{"content":"The Logic Endpoint In default, all the RPC server-side names as entry spans, such as RESTFul API path and gRPC service name, would be endpoints with metrics. At the same time, SkyWalking introduces the logic endpoint concept, which allows plugins and users to add new endpoints without adding new spans. The following logic endpoints are added automatically by plugins.\n GraphQL Query and Mutation are logic endpoints by using the names of them. Spring\u0026rsquo;s ScheduledMethodRunnable jobs are logic endpoints. The name format is SpringScheduled/${className}/${methodName}. Apache ShardingSphere ElasticJob\u0026rsquo;s jobs are logic endpoints. The name format is ElasticJob/${jobName}. XXLJob\u0026rsquo;s jobs are logic endpoints. The name formats include xxl-job/MethodJob/${className}.${methodName}, xxl-job/ScriptJob/${GlueType}/id/${jobId}, and xxl-job/SimpleJob/${className}. Quartz(optional plugin)\u0026rsquo;s jobs are logic endpoints. the name format is quartz-scheduler/${className}.  User could use the SkyWalking\u0026rsquo;s application toolkits to add the tag into the local span to label the span as a logic endpoint in the analysis stage. The tag is, key=x-le and value = {\u0026quot;logic-span\u0026quot;:true}.\n","title":"The Logic Endpoint","url":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/logic-endpoint/"},{"content":"The Logic Endpoint In default, all the RPC server-side names as entry spans, such as RESTFul API path and gRPC service name, would be endpoints with metrics. At the same time, SkyWalking introduces the logic endpoint concept, which allows plugins and users to add new endpoints without adding new spans. The following logic endpoints are added automatically by plugins.\n GraphQL Query and Mutation are logic endpoints by using the names of them. Spring\u0026rsquo;s ScheduledMethodRunnable jobs are logic endpoints. The name format is SpringScheduled/${className}/${methodName}. Apache ShardingSphere ElasticJob\u0026rsquo;s jobs are logic endpoints. The name format is ElasticJob/${jobName}. XXLJob\u0026rsquo;s jobs are logic endpoints. The name formats include xxl-job/MethodJob/${className}.${methodName}, xxl-job/ScriptJob/${GlueType}/id/${jobId}, and xxl-job/SimpleJob/${className}. Quartz(optional plugin)\u0026rsquo;s jobs are logic endpoints. the name format is quartz-scheduler/${className}.  User could use the SkyWalking\u0026rsquo;s application toolkits to add the tag into the local span to label the span as a logic endpoint in the analysis stage. The tag is, key=x-le and value = {\u0026quot;logic-span\u0026quot;:true}.\n","title":"The Logic Endpoint","url":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/logic-endpoint/"},{"content":"The Logic Endpoint In default, all the RPC server-side names as entry spans, such as RESTFul API path and gRPC service name, would be endpoints with metrics. At the same time, SkyWalking introduces the logic endpoint concept, which allows plugins and users to add new endpoints without adding new spans. The following logic endpoints are added automatically by plugins.\n GraphQL Query and Mutation are logic endpoints by using the names of them. Spring\u0026rsquo;s ScheduledMethodRunnable jobs are logic endpoints. The name format is SpringScheduled/${className}/${methodName}. Apache ShardingSphere ElasticJob\u0026rsquo;s jobs are logic endpoints. The name format is ElasticJob/${jobName}. XXLJob\u0026rsquo;s jobs are logic endpoints. The name formats include xxl-job/MethodJob/${className}.${methodName}, xxl-job/ScriptJob/${GlueType}/id/${jobId}, and xxl-job/SimpleJob/${className}. Quartz(optional plugin)\u0026rsquo;s jobs are logic endpoints. the name format is quartz-scheduler/${className}.  User could use the SkyWalking\u0026rsquo;s application toolkits to add the tag into the local span to label the span as a logic endpoint in the analysis stage. The tag is, key=x-le and value = {\u0026quot;logic-span\u0026quot;:true}.\n","title":"The Logic Endpoint","url":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/logic-endpoint/"},{"content":"The Logic Endpoint In default, all the RPC server-side names as entry spans, such as RESTFul API path and gRPC service name, would be endpoints with metrics. At the same time, SkyWalking introduces the logic endpoint concept, which allows plugins and users to add new endpoints without adding new spans. The following logic endpoints are added automatically by plugins.\n GraphQL Query and Mutation are logic endpoints by using the names of them. Spring\u0026rsquo;s ScheduledMethodRunnable jobs are logic endpoints. The name format is SpringScheduled/${className}/${methodName}. Apache ShardingSphere ElasticJob\u0026rsquo;s jobs are logic endpoints. The name format is ElasticJob/${jobName}. XXLJob\u0026rsquo;s jobs are logic endpoints. The name formats include xxl-job/MethodJob/${className}.${methodName}, xxl-job/ScriptJob/${GlueType}/id/${jobId}, and xxl-job/SimpleJob/${className}. Quartz(optional plugin)\u0026rsquo;s jobs are logic endpoints. the name format is quartz-scheduler/${className}.  User could use the SkyWalking\u0026rsquo;s application toolkits to add the tag into the local span to label the span as a logic endpoint in the analysis stage. The tag is, key=x-le and value = {\u0026quot;logic-span\u0026quot;:true}.\n","title":"The Logic Endpoint","url":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/logic-endpoint/"},{"content":"Dependency the toolkit, such as using maven or gradle\nAdd Trace Toolkit apm-toolkit-trace provides the APIs to enhance the trace context, such as createLocalSpan, createExitSpan, createEntrySpan, log, tag, prepareForAsync and asyncFinish. Add the toolkit dependency to your project.\n\u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-trace\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; ","title":"the toolkit, such as using maven or gradle","url":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/application-toolkit-dependency/"},{"content":"Dependency the toolkit, such as using maven or gradle\nAdd Trace Toolkit apm-toolkit-trace provides the APIs to enhance the trace context, such as createLocalSpan, createExitSpan, createEntrySpan, log, tag, prepareForAsync and asyncFinish. Add the toolkit dependency to your project.\n\u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-trace\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; ","title":"the toolkit, such as using maven or gradle","url":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-dependency/"},{"content":"Dependency the toolkit, such as using maven or gradle\nAdd Trace Toolkit apm-toolkit-trace provides the APIs to enhance the trace context, such as createLocalSpan, createExitSpan, createEntrySpan, log, tag, prepareForAsync and asyncFinish. Add the toolkit dependency to your project.\n\u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-trace\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; ","title":"the toolkit, such as using maven or gradle","url":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/application-toolkit-dependency/"},{"content":"Dependency the toolkit, such as using maven or gradle\nAdd Trace Toolkit apm-toolkit-trace provides the APIs to enhance the trace context, such as createLocalSpan, createExitSpan, createEntrySpan, log, tag, prepareForAsync and asyncFinish. Add the toolkit dependency to your project.\n\u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-trace\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; ","title":"the toolkit, such as using maven or gradle","url":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/application-toolkit-dependency/"},{"content":"Dependency the toolkit, such as using maven or gradle\nAdd Trace Toolkit apm-toolkit-trace provides the APIs to enhance the trace context, such as createLocalSpan, createExitSpan, createEntrySpan, log, tag, prepareForAsync and asyncFinish. Add the toolkit dependency to your project.\n\u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-trace\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; ","title":"the toolkit, such as using maven or gradle","url":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/application-toolkit-dependency/"},{"content":"Thread dump merging mechanism The performance profile is an enhancement feature in the APM system. We are using the thread dump to estimate the method execution time, rather than adding multiple local spans. In this way, the resource cost would be much less than using distributed tracing to locate slow method. This feature is suitable in the production environment. This document introduces how thread dumps are merged into the final report as a stack tree(s).\nThread analyst Read data and transform Read the data from the database and convert it to a data structure in gRPC.\nst=\u0026gt;start: Start e=\u0026gt;end: End op1=\u0026gt;operation: Load data using paging op2=\u0026gt;operation: Transform data using parallel st(right)-\u0026gt;op1(right)-\u0026gt;op2 op2(right)-\u0026gt;e Copy the code and paste it into this link to generate flow chart.\n Use the stream to read data by page (50 records per page). Convert the data into gRPC data structures in the form of parallel streams. Merge into a list of data.  Data analysis Use the group-by and collector modes in the Java parallel stream to group according to the first stack element in the database records, and use the collector to perform data aggregation. Generate a multi-root tree.\nst=\u0026gt;start: Start e=\u0026gt;end: End op1=\u0026gt;operation: Group by first stack element sup=\u0026gt;operation: Generate empty stack tree acc=\u0026gt;operation: Accumulator data to stack tree com=\u0026gt;operation: Combine stack trees fin=\u0026gt;operation: Calculate durations and build result st(right)-\u0026gt;op1-\u0026gt;sup(right)-\u0026gt;acc acc(right)-\u0026gt;com(right)-\u0026gt;fin-\u0026gt;e Copy the code and paste it into this link to generate a flow chart.\n Group by first stack element: Use the first level element in each stack to group, ensuring that the stacks have the same root node. Generate empty stack tree: Generate multiple top-level empty trees to prepare for the following steps. The reason for generating multiple top-level trees is that original data can be added in parallel without generating locks. Accumulator data to stack tree: Add every thread dump into the generated trees.  Iterate through each element in the thread dump to find if there is any child element with the same code signature and same stack depth in the parent element. If not, add this element. Keep the dump sequences and timestamps in each nodes from the source.   Combine stack trees: Combine all trees structures into one by using the same rules as the Accumulator.  Use LDR to traverse the tree node. Use the Stack data structure to avoid recursive calls. Each stack element represents the node that needs to be merged. The task of merging two nodes is to merge the list of children nodes. If they have the same code signature and same parents, save the dump sequences and timestamps in this node. Otherwise, the node needs to be added into the target node as a new child.   Calculate durations and build result: Calculate relevant statistics and generate response.  Use the same traversal node logic as in the Combine stack trees step. Convert to a GraphQL data structure, and put all nodes into a list for subsequent duration calculations. Calculate each node\u0026rsquo;s duration in parallel. For each node, sort the sequences. If there are two continuous sequences, the duration should add the duration of these two seq\u0026rsquo;s timestamp. Calculate each node execution in parallel. For each node, the duration of the current node should deduct the time consumed by all children.    Profile data debugging Please follow the exporter tool to package profile data. Unzip the profile data and use analyzer main function to run it.\n","title":"Thread dump merging mechanism","url":"/docs/main/latest/en/setup/backend/backend-profile-thread-merging/"},{"content":"Thread dump merging mechanism The performance profile is an enhancement feature in the APM system. We are using the thread dump to estimate the method execution time, rather than adding multiple local spans. In this way, the resource cost would be much less than using distributed tracing to locate slow method. This feature is suitable in the production environment. This document introduces how thread dumps are merged into the final report as a stack tree(s).\nThread analyst Read data and transform Read the data from the database and convert it to a data structure in gRPC.\nst=\u0026gt;start: Start e=\u0026gt;end: End op1=\u0026gt;operation: Load data using paging op2=\u0026gt;operation: Transform data using parallel st(right)-\u0026gt;op1(right)-\u0026gt;op2 op2(right)-\u0026gt;e Copy the code and paste it into this link to generate flow chart.\n Use the stream to read data by page (50 records per page). Convert the data into gRPC data structures in the form of parallel streams. Merge into a list of data.  Data analysis Use the group-by and collector modes in the Java parallel stream to group according to the first stack element in the database records, and use the collector to perform data aggregation. Generate a multi-root tree.\nst=\u0026gt;start: Start e=\u0026gt;end: End op1=\u0026gt;operation: Group by first stack element sup=\u0026gt;operation: Generate empty stack tree acc=\u0026gt;operation: Accumulator data to stack tree com=\u0026gt;operation: Combine stack trees fin=\u0026gt;operation: Calculate durations and build result st(right)-\u0026gt;op1-\u0026gt;sup(right)-\u0026gt;acc acc(right)-\u0026gt;com(right)-\u0026gt;fin-\u0026gt;e Copy the code and paste it into this link to generate a flow chart.\n Group by first stack element: Use the first level element in each stack to group, ensuring that the stacks have the same root node. Generate empty stack tree: Generate multiple top-level empty trees to prepare for the following steps. The reason for generating multiple top-level trees is that original data can be added in parallel without generating locks. Accumulator data to stack tree: Add every thread dump into the generated trees.  Iterate through each element in the thread dump to find if there is any child element with the same code signature and same stack depth in the parent element. If not, add this element. Keep the dump sequences and timestamps in each nodes from the source.   Combine stack trees: Combine all trees structures into one by using the same rules as the Accumulator.  Use LDR to traverse the tree node. Use the Stack data structure to avoid recursive calls. Each stack element represents the node that needs to be merged. The task of merging two nodes is to merge the list of children nodes. If they have the same code signature and same parents, save the dump sequences and timestamps in this node. Otherwise, the node needs to be added into the target node as a new child.   Calculate durations and build result: Calculate relevant statistics and generate response.  Use the same traversal node logic as in the Combine stack trees step. Convert to a GraphQL data structure, and put all nodes into a list for subsequent duration calculations. Calculate each node\u0026rsquo;s duration in parallel. For each node, sort the sequences. If there are two continuous sequences, the duration should add the duration of these two seq\u0026rsquo;s timestamp. Calculate each node execution in parallel. For each node, the duration of the current node should deduct the time consumed by all children.    Profile data debugging Please follow the exporter tool to package profile data. Unzip the profile data and use analyzer main function to run it.\n","title":"Thread dump merging mechanism","url":"/docs/main/next/en/setup/backend/backend-profile-thread-merging/"},{"content":"Thread dump merging mechanism The performance profile is an enhancement feature in the APM system. We are using the thread dump to estimate the method execution time, rather than adding multiple local spans. In this way, the resource cost would be much less than using distributed tracing to locate slow method. This feature is suitable in the production environment. This document introduces how thread dumps are merged into the final report as a stack tree(s).\nThread analyst Read data and transform Read the data from the database and convert it to a data structure in gRPC.\nst=\u0026gt;start: Start e=\u0026gt;end: End op1=\u0026gt;operation: Load data using paging op2=\u0026gt;operation: Transform data using parallel st(right)-\u0026gt;op1(right)-\u0026gt;op2 op2(right)-\u0026gt;e Copy the code and paste it into this link to generate flow chart.\n Use the stream to read data by page (50 records per page). Convert the data into gRPC data structures in the form of parallel streams. Merge into a list of data.  Data analysis Use the group-by and collector modes in the Java parallel stream to group according to the first stack element in the database records, and use the collector to perform data aggregation. Generate a multi-root tree.\nst=\u0026gt;start: Start e=\u0026gt;end: End op1=\u0026gt;operation: Group by first stack element sup=\u0026gt;operation: Generate empty stack tree acc=\u0026gt;operation: Accumulator data to stack tree com=\u0026gt;operation: Combine stack trees fin=\u0026gt;operation: Calculate durations and build result st(right)-\u0026gt;op1-\u0026gt;sup(right)-\u0026gt;acc acc(right)-\u0026gt;com(right)-\u0026gt;fin-\u0026gt;e Copy the code and paste it into this link to generate a flow chart.\n Group by first stack element: Use the first level element in each stack to group, ensuring that the stacks have the same root node. Generate empty stack tree: Generate multiple top-level empty trees to prepare for the following steps. The reason for generating multiple top-level trees is that original data can be added in parallel without generating locks. Accumulator data to stack tree: Add every thread dump into the generated trees.  Iterate through each element in the thread dump to find if there is any child element with the same code signature and same stack depth in the parent element. If not, add this element. Keep the dump sequences and timestamps in each nodes from the source.   Combine stack trees: Combine all trees structures into one by using the same rules as the Accumulator.  Use LDR to traverse the tree node. Use the Stack data structure to avoid recursive calls. Each stack element represents the node that needs to be merged. The task of merging two nodes is to merge the list of children nodes. If they have the same code signature and same parents, save the dump sequences and timestamps in this node. Otherwise, the node needs to be added into the target node as a new child.   Calculate durations and build result: Calculate relevant statistics and generate response.  Use the same traversal node logic as in the Combine stack trees step. Convert to a GraphQL data structure, and put all nodes into a list for subsequent duration calculations. Calculate each node\u0026rsquo;s duration in parallel. For each node, sort the sequences. If there are two continuous sequences, the duration should add the duration of these two seq\u0026rsquo;s timestamp. Calculate each node execution in parallel. For each node, the duration of the current node should deduct the time consumed by all children.    Profile data debugging Please follow the exporter tool to package profile data. Unzip the profile data and use analyzer main function to run it.\n","title":"Thread dump merging mechanism","url":"/docs/main/v9.0.0/en/guides/backend-profile/"},{"content":"Thread dump merging mechanism The performance profile is an enhancement feature in the APM system. We are using the thread dump to estimate the method execution time, rather than adding multiple local spans. In this way, the resource cost would be much less than using distributed tracing to locate slow method. This feature is suitable in the production environment. This document introduces how thread dumps are merged into the final report as a stack tree(s).\nThread analyst Read data and transform Read the data from the database and convert it to a data structure in gRPC.\nst=\u0026gt;start: Start e=\u0026gt;end: End op1=\u0026gt;operation: Load data using paging op2=\u0026gt;operation: Transform data using parallel st(right)-\u0026gt;op1(right)-\u0026gt;op2 op2(right)-\u0026gt;e Copy the code and paste it into this link to generate flow chart.\n Use the stream to read data by page (50 records per page). Convert the data into gRPC data structures in the form of parallel streams. Merge into a list of data.  Data analysis Use the group-by and collector modes in the Java parallel stream to group according to the first stack element in the database records, and use the collector to perform data aggregation. Generate a multi-root tree.\nst=\u0026gt;start: Start e=\u0026gt;end: End op1=\u0026gt;operation: Group by first stack element sup=\u0026gt;operation: Generate empty stack tree acc=\u0026gt;operation: Accumulator data to stack tree com=\u0026gt;operation: Combine stack trees fin=\u0026gt;operation: Calculate durations and build result st(right)-\u0026gt;op1-\u0026gt;sup(right)-\u0026gt;acc acc(right)-\u0026gt;com(right)-\u0026gt;fin-\u0026gt;e Copy the code and paste it into this link to generate a flow chart.\n Group by first stack element: Use the first level element in each stack to group, ensuring that the stacks have the same root node. Generate empty stack tree: Generate multiple top-level empty trees to prepare for the following steps. The reason for generating multiple top-level trees is that original data can be added in parallel without generating locks. Accumulator data to stack tree: Add every thread dump into the generated trees.  Iterate through each element in the thread dump to find if there is any child element with the same code signature and same stack depth in the parent element. If not, add this element. Keep the dump sequences and timestamps in each nodes from the source.   Combine stack trees: Combine all trees structures into one by using the same rules as the Accumulator.  Use LDR to traverse the tree node. Use the Stack data structure to avoid recursive calls. Each stack element represents the node that needs to be merged. The task of merging two nodes is to merge the list of children nodes. If they have the same code signature and same parents, save the dump sequences and timestamps in this node. Otherwise, the node needs to be added into the target node as a new child.   Calculate durations and build result: Calculate relevant statistics and generate response.  Use the same traversal node logic as in the Combine stack trees step. Convert to a GraphQL data structure, and put all nodes into a list for subsequent duration calculations. Calculate each node\u0026rsquo;s duration in parallel. For each node, sort the sequences. If there are two continuous sequences, the duration should add the duration of these two seq\u0026rsquo;s timestamp. Calculate each node execution in parallel. For each node, the duration of the current node should deduct the time consumed by all children.    Profile data debugging Please follow the exporter tool to package profile data. Unzip the profile data and use analyzer main function to run it.\n","title":"Thread dump merging mechanism","url":"/docs/main/v9.1.0/en/guides/backend-profile/"},{"content":"Thread dump merging mechanism The performance profile is an enhancement feature in the APM system. We are using the thread dump to estimate the method execution time, rather than adding multiple local spans. In this way, the resource cost would be much less than using distributed tracing to locate slow method. This feature is suitable in the production environment. This document introduces how thread dumps are merged into the final report as a stack tree(s).\nThread analyst Read data and transform Read the data from the database and convert it to a data structure in gRPC.\nst=\u0026gt;start: Start e=\u0026gt;end: End op1=\u0026gt;operation: Load data using paging op2=\u0026gt;operation: Transform data using parallel st(right)-\u0026gt;op1(right)-\u0026gt;op2 op2(right)-\u0026gt;e Copy the code and paste it into this link to generate flow chart.\n Use the stream to read data by page (50 records per page). Convert the data into gRPC data structures in the form of parallel streams. Merge into a list of data.  Data analysis Use the group-by and collector modes in the Java parallel stream to group according to the first stack element in the database records, and use the collector to perform data aggregation. Generate a multi-root tree.\nst=\u0026gt;start: Start e=\u0026gt;end: End op1=\u0026gt;operation: Group by first stack element sup=\u0026gt;operation: Generate empty stack tree acc=\u0026gt;operation: Accumulator data to stack tree com=\u0026gt;operation: Combine stack trees fin=\u0026gt;operation: Calculate durations and build result st(right)-\u0026gt;op1-\u0026gt;sup(right)-\u0026gt;acc acc(right)-\u0026gt;com(right)-\u0026gt;fin-\u0026gt;e Copy the code and paste it into this link to generate a flow chart.\n Group by first stack element: Use the first level element in each stack to group, ensuring that the stacks have the same root node. Generate empty stack tree: Generate multiple top-level empty trees to prepare for the following steps. The reason for generating multiple top-level trees is that original data can be added in parallel without generating locks. Accumulator data to stack tree: Add every thread dump into the generated trees.  Iterate through each element in the thread dump to find if there is any child element with the same code signature and same stack depth in the parent element. If not, add this element. Keep the dump sequences and timestamps in each nodes from the source.   Combine stack trees: Combine all trees structures into one by using the same rules as the Accumulator.  Use LDR to traverse the tree node. Use the Stack data structure to avoid recursive calls. Each stack element represents the node that needs to be merged. The task of merging two nodes is to merge the list of children nodes. If they have the same code signature and same parents, save the dump sequences and timestamps in this node. Otherwise, the node needs to be added into the target node as a new child.   Calculate durations and build result: Calculate relevant statistics and generate response.  Use the same traversal node logic as in the Combine stack trees step. Convert to a GraphQL data structure, and put all nodes into a list for subsequent duration calculations. Calculate each node\u0026rsquo;s duration in parallel. For each node, sort the sequences. If there are two continuous sequences, the duration should add the duration of these two seq\u0026rsquo;s timestamp. Calculate each node execution in parallel. For each node, the duration of the current node should deduct the time consumed by all children.    Profile data debugging Please follow the exporter tool to package profile data. Unzip the profile data and use analyzer main function to run it.\n","title":"Thread dump merging mechanism","url":"/docs/main/v9.2.0/en/guides/backend-profile/"},{"content":"Thread dump merging mechanism The performance profile is an enhancement feature in the APM system. We are using the thread dump to estimate the method execution time, rather than adding multiple local spans. In this way, the resource cost would be much less than using distributed tracing to locate slow method. This feature is suitable in the production environment. This document introduces how thread dumps are merged into the final report as a stack tree(s).\nThread analyst Read data and transform Read the data from the database and convert it to a data structure in gRPC.\nst=\u0026gt;start: Start e=\u0026gt;end: End op1=\u0026gt;operation: Load data using paging op2=\u0026gt;operation: Transform data using parallel st(right)-\u0026gt;op1(right)-\u0026gt;op2 op2(right)-\u0026gt;e Copy the code and paste it into this link to generate flow chart.\n Use the stream to read data by page (50 records per page). Convert the data into gRPC data structures in the form of parallel streams. Merge into a list of data.  Data analysis Use the group-by and collector modes in the Java parallel stream to group according to the first stack element in the database records, and use the collector to perform data aggregation. Generate a multi-root tree.\nst=\u0026gt;start: Start e=\u0026gt;end: End op1=\u0026gt;operation: Group by first stack element sup=\u0026gt;operation: Generate empty stack tree acc=\u0026gt;operation: Accumulator data to stack tree com=\u0026gt;operation: Combine stack trees fin=\u0026gt;operation: Calculate durations and build result st(right)-\u0026gt;op1-\u0026gt;sup(right)-\u0026gt;acc acc(right)-\u0026gt;com(right)-\u0026gt;fin-\u0026gt;e Copy the code and paste it into this link to generate a flow chart.\n Group by first stack element: Use the first level element in each stack to group, ensuring that the stacks have the same root node. Generate empty stack tree: Generate multiple top-level empty trees to prepare for the following steps. The reason for generating multiple top-level trees is that original data can be added in parallel without generating locks. Accumulator data to stack tree: Add every thread dump into the generated trees.  Iterate through each element in the thread dump to find if there is any child element with the same code signature and same stack depth in the parent element. If not, add this element. Keep the dump sequences and timestamps in each nodes from the source.   Combine stack trees: Combine all trees structures into one by using the same rules as the Accumulator.  Use LDR to traverse the tree node. Use the Stack data structure to avoid recursive calls. Each stack element represents the node that needs to be merged. The task of merging two nodes is to merge the list of children nodes. If they have the same code signature and same parents, save the dump sequences and timestamps in this node. Otherwise, the node needs to be added into the target node as a new child.   Calculate durations and build result: Calculate relevant statistics and generate response.  Use the same traversal node logic as in the Combine stack trees step. Convert to a GraphQL data structure, and put all nodes into a list for subsequent duration calculations. Calculate each node\u0026rsquo;s duration in parallel. For each node, sort the sequences. If there are two continuous sequences, the duration should add the duration of these two seq\u0026rsquo;s timestamp. Calculate each node execution in parallel. For each node, the duration of the current node should deduct the time consumed by all children.    Profile data debugging Please follow the exporter tool to package profile data. Unzip the profile data and use analyzer main function to run it.\n","title":"Thread dump merging mechanism","url":"/docs/main/v9.3.0/en/guides/backend-profile/"},{"content":"Thread dump merging mechanism The performance profile is an enhancement feature in the APM system. We are using the thread dump to estimate the method execution time, rather than adding multiple local spans. In this way, the resource cost would be much less than using distributed tracing to locate slow method. This feature is suitable in the production environment. This document introduces how thread dumps are merged into the final report as a stack tree(s).\nThread analyst Read data and transform Read the data from the database and convert it to a data structure in gRPC.\nst=\u0026gt;start: Start e=\u0026gt;end: End op1=\u0026gt;operation: Load data using paging op2=\u0026gt;operation: Transform data using parallel st(right)-\u0026gt;op1(right)-\u0026gt;op2 op2(right)-\u0026gt;e Copy the code and paste it into this link to generate flow chart.\n Use the stream to read data by page (50 records per page). Convert the data into gRPC data structures in the form of parallel streams. Merge into a list of data.  Data analysis Use the group-by and collector modes in the Java parallel stream to group according to the first stack element in the database records, and use the collector to perform data aggregation. Generate a multi-root tree.\nst=\u0026gt;start: Start e=\u0026gt;end: End op1=\u0026gt;operation: Group by first stack element sup=\u0026gt;operation: Generate empty stack tree acc=\u0026gt;operation: Accumulator data to stack tree com=\u0026gt;operation: Combine stack trees fin=\u0026gt;operation: Calculate durations and build result st(right)-\u0026gt;op1-\u0026gt;sup(right)-\u0026gt;acc acc(right)-\u0026gt;com(right)-\u0026gt;fin-\u0026gt;e Copy the code and paste it into this link to generate a flow chart.\n Group by first stack element: Use the first level element in each stack to group, ensuring that the stacks have the same root node. Generate empty stack tree: Generate multiple top-level empty trees to prepare for the following steps. The reason for generating multiple top-level trees is that original data can be added in parallel without generating locks. Accumulator data to stack tree: Add every thread dump into the generated trees.  Iterate through each element in the thread dump to find if there is any child element with the same code signature and same stack depth in the parent element. If not, add this element. Keep the dump sequences and timestamps in each nodes from the source.   Combine stack trees: Combine all trees structures into one by using the same rules as the Accumulator.  Use LDR to traverse the tree node. Use the Stack data structure to avoid recursive calls. Each stack element represents the node that needs to be merged. The task of merging two nodes is to merge the list of children nodes. If they have the same code signature and same parents, save the dump sequences and timestamps in this node. Otherwise, the node needs to be added into the target node as a new child.   Calculate durations and build result: Calculate relevant statistics and generate response.  Use the same traversal node logic as in the Combine stack trees step. Convert to a GraphQL data structure, and put all nodes into a list for subsequent duration calculations. Calculate each node\u0026rsquo;s duration in parallel. For each node, sort the sequences. If there are two continuous sequences, the duration should add the duration of these two seq\u0026rsquo;s timestamp. Calculate each node execution in parallel. For each node, the duration of the current node should deduct the time consumed by all children.    Profile data debugging Please follow the exporter tool to package profile data. Unzip the profile data and use analyzer main function to run it.\n","title":"Thread dump merging mechanism","url":"/docs/main/v9.4.0/en/guides/backend-profile/"},{"content":"Thread dump merging mechanism The performance profile is an enhancement feature in the APM system. We are using the thread dump to estimate the method execution time, rather than adding multiple local spans. In this way, the resource cost would be much less than using distributed tracing to locate slow method. This feature is suitable in the production environment. This document introduces how thread dumps are merged into the final report as a stack tree(s).\nThread analyst Read data and transform Read the data from the database and convert it to a data structure in gRPC.\nst=\u0026gt;start: Start e=\u0026gt;end: End op1=\u0026gt;operation: Load data using paging op2=\u0026gt;operation: Transform data using parallel st(right)-\u0026gt;op1(right)-\u0026gt;op2 op2(right)-\u0026gt;e Copy the code and paste it into this link to generate flow chart.\n Use the stream to read data by page (50 records per page). Convert the data into gRPC data structures in the form of parallel streams. Merge into a list of data.  Data analysis Use the group-by and collector modes in the Java parallel stream to group according to the first stack element in the database records, and use the collector to perform data aggregation. Generate a multi-root tree.\nst=\u0026gt;start: Start e=\u0026gt;end: End op1=\u0026gt;operation: Group by first stack element sup=\u0026gt;operation: Generate empty stack tree acc=\u0026gt;operation: Accumulator data to stack tree com=\u0026gt;operation: Combine stack trees fin=\u0026gt;operation: Calculate durations and build result st(right)-\u0026gt;op1-\u0026gt;sup(right)-\u0026gt;acc acc(right)-\u0026gt;com(right)-\u0026gt;fin-\u0026gt;e Copy the code and paste it into this link to generate a flow chart.\n Group by first stack element: Use the first level element in each stack to group, ensuring that the stacks have the same root node. Generate empty stack tree: Generate multiple top-level empty trees to prepare for the following steps. The reason for generating multiple top-level trees is that original data can be added in parallel without generating locks. Accumulator data to stack tree: Add every thread dump into the generated trees.  Iterate through each element in the thread dump to find if there is any child element with the same code signature and same stack depth in the parent element. If not, add this element. Keep the dump sequences and timestamps in each nodes from the source.   Combine stack trees: Combine all trees structures into one by using the same rules as the Accumulator.  Use LDR to traverse the tree node. Use the Stack data structure to avoid recursive calls. Each stack element represents the node that needs to be merged. The task of merging two nodes is to merge the list of children nodes. If they have the same code signature and same parents, save the dump sequences and timestamps in this node. Otherwise, the node needs to be added into the target node as a new child.   Calculate durations and build result: Calculate relevant statistics and generate response.  Use the same traversal node logic as in the Combine stack trees step. Convert to a GraphQL data structure, and put all nodes into a list for subsequent duration calculations. Calculate each node\u0026rsquo;s duration in parallel. For each node, sort the sequences. If there are two continuous sequences, the duration should add the duration of these two seq\u0026rsquo;s timestamp. Calculate each node execution in parallel. For each node, the duration of the current node should deduct the time consumed by all children.    Profile data debugging Please follow the exporter tool to package profile data. Unzip the profile data and use analyzer main function to run it.\n","title":"Thread dump merging mechanism","url":"/docs/main/v9.5.0/en/setup/backend/backend-profile-thread-merging/"},{"content":"Thread dump merging mechanism The performance profile is an enhancement feature in the APM system. We are using the thread dump to estimate the method execution time, rather than adding multiple local spans. In this way, the resource cost would be much less than using distributed tracing to locate slow method. This feature is suitable in the production environment. This document introduces how thread dumps are merged into the final report as a stack tree(s).\nThread analyst Read data and transform Read the data from the database and convert it to a data structure in gRPC.\nst=\u0026gt;start: Start e=\u0026gt;end: End op1=\u0026gt;operation: Load data using paging op2=\u0026gt;operation: Transform data using parallel st(right)-\u0026gt;op1(right)-\u0026gt;op2 op2(right)-\u0026gt;e Copy the code and paste it into this link to generate flow chart.\n Use the stream to read data by page (50 records per page). Convert the data into gRPC data structures in the form of parallel streams. Merge into a list of data.  Data analysis Use the group-by and collector modes in the Java parallel stream to group according to the first stack element in the database records, and use the collector to perform data aggregation. Generate a multi-root tree.\nst=\u0026gt;start: Start e=\u0026gt;end: End op1=\u0026gt;operation: Group by first stack element sup=\u0026gt;operation: Generate empty stack tree acc=\u0026gt;operation: Accumulator data to stack tree com=\u0026gt;operation: Combine stack trees fin=\u0026gt;operation: Calculate durations and build result st(right)-\u0026gt;op1-\u0026gt;sup(right)-\u0026gt;acc acc(right)-\u0026gt;com(right)-\u0026gt;fin-\u0026gt;e Copy the code and paste it into this link to generate a flow chart.\n Group by first stack element: Use the first level element in each stack to group, ensuring that the stacks have the same root node. Generate empty stack tree: Generate multiple top-level empty trees to prepare for the following steps. The reason for generating multiple top-level trees is that original data can be added in parallel without generating locks. Accumulator data to stack tree: Add every thread dump into the generated trees.  Iterate through each element in the thread dump to find if there is any child element with the same code signature and same stack depth in the parent element. If not, add this element. Keep the dump sequences and timestamps in each nodes from the source.   Combine stack trees: Combine all trees structures into one by using the same rules as the Accumulator.  Use LDR to traverse the tree node. Use the Stack data structure to avoid recursive calls. Each stack element represents the node that needs to be merged. The task of merging two nodes is to merge the list of children nodes. If they have the same code signature and same parents, save the dump sequences and timestamps in this node. Otherwise, the node needs to be added into the target node as a new child.   Calculate durations and build result: Calculate relevant statistics and generate response.  Use the same traversal node logic as in the Combine stack trees step. Convert to a GraphQL data structure, and put all nodes into a list for subsequent duration calculations. Calculate each node\u0026rsquo;s duration in parallel. For each node, sort the sequences. If there are two continuous sequences, the duration should add the duration of these two seq\u0026rsquo;s timestamp. Calculate each node execution in parallel. For each node, the duration of the current node should deduct the time consumed by all children.    Profile data debugging Please follow the exporter tool to package profile data. Unzip the profile data and use analyzer main function to run it.\n","title":"Thread dump merging mechanism","url":"/docs/main/v9.6.0/en/setup/backend/backend-profile-thread-merging/"},{"content":"Thread dump merging mechanism The performance profile is an enhancement feature in the APM system. We are using the thread dump to estimate the method execution time, rather than adding multiple local spans. In this way, the resource cost would be much less than using distributed tracing to locate slow method. This feature is suitable in the production environment. This document introduces how thread dumps are merged into the final report as a stack tree(s).\nThread analyst Read data and transform Read the data from the database and convert it to a data structure in gRPC.\nst=\u0026gt;start: Start e=\u0026gt;end: End op1=\u0026gt;operation: Load data using paging op2=\u0026gt;operation: Transform data using parallel st(right)-\u0026gt;op1(right)-\u0026gt;op2 op2(right)-\u0026gt;e Copy the code and paste it into this link to generate flow chart.\n Use the stream to read data by page (50 records per page). Convert the data into gRPC data structures in the form of parallel streams. Merge into a list of data.  Data analysis Use the group-by and collector modes in the Java parallel stream to group according to the first stack element in the database records, and use the collector to perform data aggregation. Generate a multi-root tree.\nst=\u0026gt;start: Start e=\u0026gt;end: End op1=\u0026gt;operation: Group by first stack element sup=\u0026gt;operation: Generate empty stack tree acc=\u0026gt;operation: Accumulator data to stack tree com=\u0026gt;operation: Combine stack trees fin=\u0026gt;operation: Calculate durations and build result st(right)-\u0026gt;op1-\u0026gt;sup(right)-\u0026gt;acc acc(right)-\u0026gt;com(right)-\u0026gt;fin-\u0026gt;e Copy the code and paste it into this link to generate a flow chart.\n Group by first stack element: Use the first level element in each stack to group, ensuring that the stacks have the same root node. Generate empty stack tree: Generate multiple top-level empty trees to prepare for the following steps. The reason for generating multiple top-level trees is that original data can be added in parallel without generating locks. Accumulator data to stack tree: Add every thread dump into the generated trees.  Iterate through each element in the thread dump to find if there is any child element with the same code signature and same stack depth in the parent element. If not, add this element. Keep the dump sequences and timestamps in each nodes from the source.   Combine stack trees: Combine all trees structures into one by using the same rules as the Accumulator.  Use LDR to traverse the tree node. Use the Stack data structure to avoid recursive calls. Each stack element represents the node that needs to be merged. The task of merging two nodes is to merge the list of children nodes. If they have the same code signature and same parents, save the dump sequences and timestamps in this node. Otherwise, the node needs to be added into the target node as a new child.   Calculate durations and build result: Calculate relevant statistics and generate response.  Use the same traversal node logic as in the Combine stack trees step. Convert to a GraphQL data structure, and put all nodes into a list for subsequent duration calculations. Calculate each node\u0026rsquo;s duration in parallel. For each node, sort the sequences. If there are two continuous sequences, the duration should add the duration of these two seq\u0026rsquo;s timestamp. Calculate each node execution in parallel. For each node, the duration of the current node should deduct the time consumed by all children.    Profile data debugging Please follow the exporter tool to package profile data. Unzip the profile data and use analyzer main function to run it.\n","title":"Thread dump merging mechanism","url":"/docs/main/v9.7.0/en/setup/backend/backend-profile-thread-merging/"},{"content":"TimeSeries Database(TSDB) TSDB is a time-series storage engine designed to store and query large volumes of time-series data. One of the key features of TSDB is its ability to automatically manage data storage over time, optimize performance and ensure that the system can scale to handle large workloads. TSDB empowers Measure and Stream relevant data.\nShard In TSDB, the data in a group is partitioned into shards based on a configurable sharding scheme. Each shard is assigned to a specific set of storage nodes, and those nodes store and process the data within that shard. This allows BanyanDB to scale horizontally by adding more storage nodes to the cluster as needed.\nshard\n Buffer: It is typically implemented as an in-memory queue managed by a shard. When new time-series data is ingested into the system, it is added to the end of the queue, and when the buffer reaches a specific size, the data is flushed to disk in batches. SST: When a bucket of buffer becomes full or reaches a certain size threshold, it is flushed to disk as a new Sorted String Table (SST) file. This process is known as compaction. Segments and Blocks: Time-series data is stored in data segments/blocks within each shard. Blocks contain a fixed number of data points and are organized into time windows. Each data segment includes an index that efficiently retrieves data within the block. Block Cache: It manages the in-memory cache of data blocks, improving query performance by caching frequently accessed data blocks in memory.  Write Path The write path of TSDB begins when time-series data is ingested into the system. TSDB will consult the schema repository to check if the group exists, and if it does, then it will hash the SeriesID to determine which shard it belongs to.\nEach shard in TSDB is responsible for storing a subset of the time-series data, and it uses a write-ahead log to record incoming writes in a durable and fault-tolerant manner. The shard also holds an in-memory index allowing fast lookups of time-series data.\nWhen a shard receives a write request, the data is written to the buffer as a series of buckets. Each bucket is a fixed-size chunk of time-series data typically configured to be several minutes or hours long. As new data is written to the buffer, it is appended to the current bucket until it is full. Once the bucket is full, it is closed, and a new bucket is created to continue buffering writes.\nOnce a bucket is closed, it is stored as a single SST in a shard. The file is indexed and added to the index for the corresponding time range and resolution.\nRead Path The read path in TSDB retrieves time-series data from disk or memory and returns it to the query engine. The read path comprises several components: the buffer, cache, and SST file. The following is a high-level overview of how these components work together to retrieve time-series data in TSDB.\nThe first step in the read path is to perform an index lookup to determine which blocks contain the desired time range. The index contains metadata about each data block, including its start and end time and its location on disk.\nIf the requested data is present in the buffer (i.e., it has been recently written but not yet persisted to disk), the buffer is checked to see if the data can be returned directly from memory. The read path determines which bucket(s) contain the requested time range. If the data is not present in the buffer, the read path proceeds to the next step.\nIf the requested data is present in the cache (i.e., it has been recently read from disk and is still in memory), it is checked to see if the data can be returned directly from memory. The read path proceeds to the next step if the data is not in the cache.\nThe final step in the read path is to look up the appropriate SST file on disk. Files are the on-disk representation of data blocks and are organized by shard and time range. The read path determines which SST files contain the requested time range and reads the appropriate data blocks from the disk.\n","title":"TimeSeries Database(TSDB)","url":"/docs/skywalking-banyandb/latest/concept/tsdb/"},{"content":"TimeSeries Database(TSDB) TSDB is a time-series storage engine designed to store and query large volumes of time-series data. One of the key features of TSDB is its ability to automatically manage data storage over time, optimize performance and ensure that the system can scale to handle large workloads. TSDB empowers Measure and Stream relevant data.\nShard In TSDB, the data in a group is partitioned into shards based on a configurable sharding scheme. Each shard is assigned to a specific set of storage nodes, and those nodes store and process the data within that shard. This allows BanyanDB to scale horizontally by adding more storage nodes to the cluster as needed.\nWithin each shard, data is stored in different segments based on time ranges. The series indexes are generated based on entities, and the indexes generated based on indexing rules of the Measure types are also stored under the shard.\nSegment Each segment is composed of multiple parts. Whenever SkyWalking sends a batch of data, BanyanDB writes this batch of data into a new part. For data of the Stream type, the inverted indexes generated based on the indexing rules are also stored in the segment. Since BanyanDB adopts a snapshot approach for data read and write operations, the segment also needs to maintain additional snapshot information to record the validity of the parts.\nPart Within a part, data is split into multiple files in a columnar manner. The timestamps are stored in the timestamps.bin file, tags are organized in persistent tag families as various files with the .tf suffix, and fields are stored separately in the fields.bin file.\nIn addition, each part maintains several metadata files. Among them, metadata.json is the metadata file for the part, storing descriptive information, such as start and end times, part size, etc.\nThe meta.bin is a skipping index file that serves as the entry file for the entire part, helping to index the primary.bin file.\nThe primary.bin file contains the index of each block. Through it, the actual data files or the tagFamily metadata files ending with .tfm can be indexed, which in turn helps to locate the data in blocks.\nNotably, for data of the Stream type, since there are no field columns, the fields.bin file does not exist, while the rest of the structure is entirely consistent with the Measure type.\nBlock Each block holds data with the same series ID. The max size of the measure block is controlled by data volume and the number of rows. Meanwhile, the max size of the stream block is controlled by data volume. The diagram below shows the detailed fields within each block. The block is the minimal unit of TSDB, which contains several rows of data. Due to the column-based design, each block is spread over several files.\nWrite Path The write path of TSDB begins when time-series data is ingested into the system. TSDB will consult the schema repository to check if the group exists, and if it does, then it will hash the SeriesID to determine which shard it belongs to.\nEach shard in TSDB is responsible for storing a subset of the time-series data. The shard also holds an in-memory index allowing fast lookups of time-series data.\nWhen a shard receives a write request, the data is written to the buffer as a memory part. Meanwhile, the series index and inverted index will also be updated. The worker in the background periodically flushes data, writing the memory part to the disk. After the flush operation is completed, it triggers a merge operation to combine the parts and remove invalid data.\nWhenever a new memory part is generated, or when a flush or merge operation is triggered, they initiate an update of the snapshot and delete outdated snapshots. The parts in a persistent snapshot could be accessible to the reader.\nRead Path The read path in TSDB retrieves time-series data from disk or memory, and returns it to the query engine. The read path comprises several components: the buffer and parts. The following is a high-level overview of how these components work together to retrieve time-series data in TSDB.\nThe first step in the read path is to perform an index lookup to determine which parts contain the desired time range. The index contains metadata about each data part, including its start and end time.\nIf the requested data is present in the buffer (i.e., it has been recently written but not yet persisted to disk), the buffer is checked to see if the data can be returned directly from memory. The read path determines which memory part(s) contain the requested time range. If the data is not present in the buffer, the read path proceeds to the next step.\nThe next step in the read path is to look up the appropriate parts on disk. Files are the on-disk representation of blocks and are organized by shard and time range. The read path determines which parts contain the requested time range and reads the appropriate blocks from the disk. Due to the column-based storage design, it may be necessary to read multiple data files.\n","title":"TimeSeries Database(TSDB)","url":"/docs/skywalking-banyandb/next/concept/tsdb/"},{"content":"TimeSeries Database(TSDB) TSDB is a time-series storage engine designed to store and query large volumes of time-series data. One of the key features of TSDB is its ability to automatically manage data storage over time, optimize performance and ensure that the system can scale to handle large workloads. TSDB empowers Measure and Stream relevant data.\nShard In TSDB, the data in a group is partitioned into shards based on a configurable sharding scheme. Each shard is assigned to a specific set of storage nodes, and those nodes store and process the data within that shard. This allows BanyanDB to scale horizontally by adding more storage nodes to the cluster as needed.\nshard\n Buffer: It is typically implemented as an in-memory queue managed by a shard. When new time-series data is ingested into the system, it is added to the end of the queue, and when the buffer reaches a specific size, the data is flushed to disk in batches. SST: When a bucket of buffer becomes full or reaches a certain size threshold, it is flushed to disk as a new Sorted String Table (SST) file. This process is known as compaction. Segments and Blocks: Time-series data is stored in data segments/blocks within each shard. Blocks contain a fixed number of data points and are organized into time windows. Each data segment includes an index that efficiently retrieves data within the block. Block Cache: It manages the in-memory cache of data blocks, improving query performance by caching frequently accessed data blocks in memory.  Write Path The write path of TSDB begins when time-series data is ingested into the system. TSDB will consult the schema repository to check if the group exists, and if it does, then it will hash the SeriesID to determine which shard it belongs to.\nEach shard in TSDB is responsible for storing a subset of the time-series data, and it uses a write-ahead log to record incoming writes in a durable and fault-tolerant manner. The shard also holds an in-memory index allowing fast lookups of time-series data.\nWhen a shard receives a write request, the data is written to the buffer as a series of buckets. Each bucket is a fixed-size chunk of time-series data typically configured to be several minutes or hours long. As new data is written to the buffer, it is appended to the current bucket until it is full. Once the bucket is full, it is closed, and a new bucket is created to continue buffering writes.\nOnce a bucket is closed, it is stored as a single SST in a shard. The file is indexed and added to the index for the corresponding time range and resolution.\nRead Path The read path in TSDB retrieves time-series data from disk or memory and returns it to the query engine. The read path comprises several components: the buffer, cache, and SST file. The following is a high-level overview of how these components work together to retrieve time-series data in TSDB.\nThe first step in the read path is to perform an index lookup to determine which blocks contain the desired time range. The index contains metadata about each data block, including its start and end time and its location on disk.\nIf the requested data is present in the buffer (i.e., it has been recently written but not yet persisted to disk), the buffer is checked to see if the data can be returned directly from memory. The read path determines which bucket(s) contain the requested time range. If the data is not present in the buffer, the read path proceeds to the next step.\nIf the requested data is present in the cache (i.e., it has been recently read from disk and is still in memory), it is checked to see if the data can be returned directly from memory. The read path proceeds to the next step if the data is not in the cache.\nThe final step in the read path is to look up the appropriate SST file on disk. Files are the on-disk representation of data blocks and are organized by shard and time range. The read path determines which SST files contain the requested time range and reads the appropriate data blocks from the disk.\n","title":"TimeSeries Database(TSDB)","url":"/docs/skywalking-banyandb/v0.5.0/concept/tsdb/"},{"content":"Welcome to Apache SkyWalking Cloud on Kubernetes Document Repository Here you can lean all you need to know about Apache SkyWalking Cloud on Kubernetes(SWCK). This repository provides how to onboard and customize the agent injector, operator and adapter.\n Design. Some materials regarding the design decision under the hood. Setup. Several instruments to onboard the agent injector, operator and adapter. Examples. A number of examples of how to use SWCK.  We\u0026rsquo;re always looking for help to improve our documentation and codes, so please don’t hesitate to file an issue if you see any problems. Or better yet, directly contribute by submitting a pull request to help us get better!\n","title":"to Apache SkyWalking Cloud on Kubernetes Document Repository","url":"/docs/skywalking-swck/latest/readme/"},{"content":"Welcome to Apache SkyWalking Cloud on Kubernetes Document Repository Here you can lean all you need to know about Apache SkyWalking Cloud on Kubernetes(SWCK). This repository provides how to onboard and customize the agent injector, operator and adapter.\n Design. Some materials regarding the design decision under the hood. Setup. Several instruments to onboard the agent injector, operator and adapter. Examples. A number of examples of how to use SWCK.  We\u0026rsquo;re always looking for help to improve our documentation and codes, so please don’t hesitate to file an issue if you see any problems. Or better yet, directly contribute by submitting a pull request to help us get better!\n","title":"to Apache SkyWalking Cloud on Kubernetes Document Repository","url":"/docs/skywalking-swck/next/readme/"},{"content":"Welcome to Apache SkyWalking Cloud on Kubernetes Document Repository Here you can lean all you need to know about Apache SkyWalking Cloud on Kubernetes(SWCK). This repository provides how to onboard and customize the agent injector, operator and adapter.\n Design. Some materials regarding the design decision under the hood. Setup. Several instruments to onboard the agent injector, operator and adapter. Examples. A number of examples of how to use SWCK.  We\u0026rsquo;re always looking for help to improve our documentation and codes, so please don’t hesitate to file an issue if you see any problems. Or better yet, directly contribute by submitting a pull request to help us get better!\n","title":"to Apache SkyWalking Cloud on Kubernetes Document Repository","url":"/docs/skywalking-swck/v0.9.0/readme/"},{"content":"Token Authentication Supported version 7.0.0+\nWhy do we need token authentication after TLS? TLS is about transport security, ensuring a trusted network. On the other hand, token authentication is about monitoring whether application data can be trusted.\nToken In the current version, a token is considered a simple string.\nSet Token  Set token in agent.config file  # Authentication active is based on backend setting, see application.yml for more details. agent.authentication = ${SW_AGENT_AUTHENTICATION:xxxx} Set token in application.yml file  ······receiver-sharing-server:default:authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}······Authentication failure The Skywalking OAP verifies every request from the agent and only allows requests whose token matches the one configured in application.yml to pass through.\nIf the token does not match, you will see the following log in the agent:\norg.apache.skywalking.apm.dependencies.io.grpc.StatusRuntimeException: PERMISSION_DENIED FAQ Can I use token authentication instead of TLS? No, you shouldn\u0026rsquo;t. Of course, it\u0026rsquo;s technically possible, but token and TLS are used for untrusted network environments. In these circumstances, TLS has a higher priority. Tokens can be trusted only under TLS protection, and they can be easily stolen if sent through a non-TLS network.\nDo you support other authentication mechanisms, such as ak/sk? Not for now. But we welcome contributions to this feature.\n","title":"Token Authentication","url":"/docs/main/latest/en/setup/backend/backend-token-auth/"},{"content":"Token Authentication Supported version 7.0.0+\nWhy do we need token authentication after TLS? TLS is about transport security, ensuring a trusted network. On the other hand, token authentication is about monitoring whether application data can be trusted.\nToken In the current version, a token is considered a simple string.\nSet Token  Set token in agent.config file  # Authentication active is based on backend setting, see application.yml for more details. agent.authentication = ${SW_AGENT_AUTHENTICATION:xxxx} Set token in application.yml file  ······receiver-sharing-server:default:authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}······Authentication failure The Skywalking OAP verifies every request from the agent and only allows requests whose token matches the one configured in application.yml to pass through.\nIf the token does not match, you will see the following log in the agent:\norg.apache.skywalking.apm.dependencies.io.grpc.StatusRuntimeException: PERMISSION_DENIED FAQ Can I use token authentication instead of TLS? No, you shouldn\u0026rsquo;t. Of course, it\u0026rsquo;s technically possible, but token and TLS are used for untrusted network environments. In these circumstances, TLS has a higher priority. Tokens can be trusted only under TLS protection, and they can be easily stolen if sent through a non-TLS network.\nDo you support other authentication mechanisms, such as ak/sk? Not for now. But we welcome contributions to this feature.\n","title":"Token Authentication","url":"/docs/main/next/en/setup/backend/backend-token-auth/"},{"content":"Token Authentication Supported version 7.0.0+\nWhy do we need token authentication after TLS? TLS is about transport security, which makes sure that a network can be trusted. On the other hand, token authentication is about monitoring whether application data can be trusted.\nToken In the current version, token is considered a simple string.\nSet Token  Set token in agent.config file  # Authentication active is based on backend setting, see application.yml for more details. agent.authentication = ${SW_AGENT_AUTHENTICATION:xxxx} Set token in application.yml file  ······receiver-sharing-server:default:authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}······Authentication failure The Skywalking OAP verifies every request from the agent, and only allows requests whose token matches the one configured in application.yml to pass through.\nIf the token does not match, you will see the following log in the agent:\norg.apache.skywalking.apm.dependencies.io.grpc.StatusRuntimeException: PERMISSION_DENIED FAQ Can I use token authentication instead of TLS? No, you shouldn\u0026rsquo;t. Of course it\u0026rsquo;s technically possible, but token and TLS are used for untrusted network environments. In these circumstances, TLS has a higher priority. Tokens can be trusted only under TLS protection, and they can be easily stolen if sent through a non-TLS network.\nDo you support other authentication mechanisms, such as ak/sk? Not for now. But we welcome contributions on this feature.\n","title":"Token Authentication","url":"/docs/main/v9.0.0/en/setup/backend/backend-token-auth/"},{"content":"Token Authentication Supported version 7.0.0+\nWhy do we need token authentication after TLS? TLS is about transport security, ensuring a trusted network. On the other hand, token authentication is about monitoring whether application data can be trusted.\nToken In the current version, a token is considered a simple string.\nSet Token  Set token in agent.config file  # Authentication active is based on backend setting, see application.yml for more details. agent.authentication = ${SW_AGENT_AUTHENTICATION:xxxx} Set token in application.yml file  ······receiver-sharing-server:default:authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}······Authentication failure The Skywalking OAP verifies every request from the agent and only allows requests whose token matches the one configured in application.yml to pass through.\nIf the token does not match, you will see the following log in the agent:\norg.apache.skywalking.apm.dependencies.io.grpc.StatusRuntimeException: PERMISSION_DENIED FAQ Can I use token authentication instead of TLS? No, you shouldn\u0026rsquo;t. Of course, it\u0026rsquo;s technically possible, but token and TLS are used for untrusted network environments. In these circumstances, TLS has a higher priority. Tokens can be trusted only under TLS protection, and they can be easily stolen if sent through a non-TLS network.\nDo you support other authentication mechanisms, such as ak/sk? Not for now. But we welcome contributions to this feature.\n","title":"Token Authentication","url":"/docs/main/v9.1.0/en/setup/backend/backend-token-auth/"},{"content":"Token Authentication Supported version 7.0.0+\nWhy do we need token authentication after TLS? TLS is about transport security, ensuring a trusted network. On the other hand, token authentication is about monitoring whether application data can be trusted.\nToken In the current version, a token is considered a simple string.\nSet Token  Set token in agent.config file  # Authentication active is based on backend setting, see application.yml for more details. agent.authentication = ${SW_AGENT_AUTHENTICATION:xxxx} Set token in application.yml file  ······receiver-sharing-server:default:authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}······Authentication failure The Skywalking OAP verifies every request from the agent and only allows requests whose token matches the one configured in application.yml to pass through.\nIf the token does not match, you will see the following log in the agent:\norg.apache.skywalking.apm.dependencies.io.grpc.StatusRuntimeException: PERMISSION_DENIED FAQ Can I use token authentication instead of TLS? No, you shouldn\u0026rsquo;t. Of course, it\u0026rsquo;s technically possible, but token and TLS are used for untrusted network environments. In these circumstances, TLS has a higher priority. Tokens can be trusted only under TLS protection, and they can be easily stolen if sent through a non-TLS network.\nDo you support other authentication mechanisms, such as ak/sk? Not for now. But we welcome contributions to this feature.\n","title":"Token Authentication","url":"/docs/main/v9.2.0/en/setup/backend/backend-token-auth/"},{"content":"Token Authentication Supported version 7.0.0+\nWhy do we need token authentication after TLS? TLS is about transport security, ensuring a trusted network. On the other hand, token authentication is about monitoring whether application data can be trusted.\nToken In the current version, a token is considered a simple string.\nSet Token  Set token in agent.config file  # Authentication active is based on backend setting, see application.yml for more details. agent.authentication = ${SW_AGENT_AUTHENTICATION:xxxx} Set token in application.yml file  ······receiver-sharing-server:default:authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}······Authentication failure The Skywalking OAP verifies every request from the agent and only allows requests whose token matches the one configured in application.yml to pass through.\nIf the token does not match, you will see the following log in the agent:\norg.apache.skywalking.apm.dependencies.io.grpc.StatusRuntimeException: PERMISSION_DENIED FAQ Can I use token authentication instead of TLS? No, you shouldn\u0026rsquo;t. Of course, it\u0026rsquo;s technically possible, but token and TLS are used for untrusted network environments. In these circumstances, TLS has a higher priority. Tokens can be trusted only under TLS protection, and they can be easily stolen if sent through a non-TLS network.\nDo you support other authentication mechanisms, such as ak/sk? Not for now. But we welcome contributions to this feature.\n","title":"Token Authentication","url":"/docs/main/v9.3.0/en/setup/backend/backend-token-auth/"},{"content":"Token Authentication Supported version 7.0.0+\nWhy do we need token authentication after TLS? TLS is about transport security, ensuring a trusted network. On the other hand, token authentication is about monitoring whether application data can be trusted.\nToken In the current version, a token is considered a simple string.\nSet Token  Set token in agent.config file  # Authentication active is based on backend setting, see application.yml for more details. agent.authentication = ${SW_AGENT_AUTHENTICATION:xxxx} Set token in application.yml file  ······receiver-sharing-server:default:authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}······Authentication failure The Skywalking OAP verifies every request from the agent and only allows requests whose token matches the one configured in application.yml to pass through.\nIf the token does not match, you will see the following log in the agent:\norg.apache.skywalking.apm.dependencies.io.grpc.StatusRuntimeException: PERMISSION_DENIED FAQ Can I use token authentication instead of TLS? No, you shouldn\u0026rsquo;t. Of course, it\u0026rsquo;s technically possible, but token and TLS are used for untrusted network environments. In these circumstances, TLS has a higher priority. Tokens can be trusted only under TLS protection, and they can be easily stolen if sent through a non-TLS network.\nDo you support other authentication mechanisms, such as ak/sk? Not for now. But we welcome contributions to this feature.\n","title":"Token Authentication","url":"/docs/main/v9.4.0/en/setup/backend/backend-token-auth/"},{"content":"Token Authentication Supported version 7.0.0+\nWhy do we need token authentication after TLS? TLS is about transport security, ensuring a trusted network. On the other hand, token authentication is about monitoring whether application data can be trusted.\nToken In the current version, a token is considered a simple string.\nSet Token  Set token in agent.config file  # Authentication active is based on backend setting, see application.yml for more details. agent.authentication = ${SW_AGENT_AUTHENTICATION:xxxx} Set token in application.yml file  ······receiver-sharing-server:default:authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}······Authentication failure The Skywalking OAP verifies every request from the agent and only allows requests whose token matches the one configured in application.yml to pass through.\nIf the token does not match, you will see the following log in the agent:\norg.apache.skywalking.apm.dependencies.io.grpc.StatusRuntimeException: PERMISSION_DENIED FAQ Can I use token authentication instead of TLS? No, you shouldn\u0026rsquo;t. Of course, it\u0026rsquo;s technically possible, but token and TLS are used for untrusted network environments. In these circumstances, TLS has a higher priority. Tokens can be trusted only under TLS protection, and they can be easily stolen if sent through a non-TLS network.\nDo you support other authentication mechanisms, such as ak/sk? Not for now. But we welcome contributions to this feature.\n","title":"Token Authentication","url":"/docs/main/v9.5.0/en/setup/backend/backend-token-auth/"},{"content":"Token Authentication Supported version 7.0.0+\nWhy do we need token authentication after TLS? TLS is about transport security, ensuring a trusted network. On the other hand, token authentication is about monitoring whether application data can be trusted.\nToken In the current version, a token is considered a simple string.\nSet Token  Set token in agent.config file  # Authentication active is based on backend setting, see application.yml for more details. agent.authentication = ${SW_AGENT_AUTHENTICATION:xxxx} Set token in application.yml file  ······receiver-sharing-server:default:authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}······Authentication failure The Skywalking OAP verifies every request from the agent and only allows requests whose token matches the one configured in application.yml to pass through.\nIf the token does not match, you will see the following log in the agent:\norg.apache.skywalking.apm.dependencies.io.grpc.StatusRuntimeException: PERMISSION_DENIED FAQ Can I use token authentication instead of TLS? No, you shouldn\u0026rsquo;t. Of course, it\u0026rsquo;s technically possible, but token and TLS are used for untrusted network environments. In these circumstances, TLS has a higher priority. Tokens can be trusted only under TLS protection, and they can be easily stolen if sent through a non-TLS network.\nDo you support other authentication mechanisms, such as ak/sk? Not for now. But we welcome contributions to this feature.\n","title":"Token Authentication","url":"/docs/main/v9.6.0/en/setup/backend/backend-token-auth/"},{"content":"Token Authentication Supported version 7.0.0+\nWhy do we need token authentication after TLS? TLS is about transport security, ensuring a trusted network. On the other hand, token authentication is about monitoring whether application data can be trusted.\nToken In the current version, a token is considered a simple string.\nSet Token  Set token in agent.config file  # Authentication active is based on backend setting, see application.yml for more details. agent.authentication = ${SW_AGENT_AUTHENTICATION:xxxx} Set token in application.yml file  ······receiver-sharing-server:default:authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}······Authentication failure The Skywalking OAP verifies every request from the agent and only allows requests whose token matches the one configured in application.yml to pass through.\nIf the token does not match, you will see the following log in the agent:\norg.apache.skywalking.apm.dependencies.io.grpc.StatusRuntimeException: PERMISSION_DENIED FAQ Can I use token authentication instead of TLS? No, you shouldn\u0026rsquo;t. Of course, it\u0026rsquo;s technically possible, but token and TLS are used for untrusted network environments. In these circumstances, TLS has a higher priority. Tokens can be trusted only under TLS protection, and they can be easily stolen if sent through a non-TLS network.\nDo you support other authentication mechanisms, such as ak/sk? Not for now. But we welcome contributions to this feature.\n","title":"Token Authentication","url":"/docs/main/v9.7.0/en/setup/backend/backend-token-auth/"},{"content":"Token Authentication Token In current version, Token is considered as a simple string.\nSet Token Set token in agent.config file\n# Authentication active is based on backend setting, see application.yml for more details. agent.authentication = xxxx Meanwhile, open the backend token authentication.\nAuthentication fails The Collector verifies every request from agent, allowed only the token match.\nIf the token is not right, you will see the following log in agent\norg.apache.skywalking.apm.dependencies.io.grpc.StatusRuntimeException: PERMISSION_DENIED FAQ Can I use token authentication instead of TLS? No, you shouldn\u0026rsquo;t. In tech way, you can of course, but token and TLS are used for untrusted network env. In that circumstance, TLS has higher priority than this. Token can be trusted only under TLS protection.Token can be stolen easily if you send it through a non-TLS network.\nDo you support other authentication mechanisms? Such as ak/sk? For now, no. But we appreciate someone contributes this feature.\n","title":"Token Authentication","url":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/token-auth/"},{"content":"Token Authentication Token In current version, Token is considered as a simple string.\nSet Token Set token in agent.config file\n# Authentication active is based on backend setting, see application.yml for more details. agent.authentication = xxxx Meanwhile, open the backend token authentication.\nAuthentication fails The Collector verifies every request from agent, allowed only the token match.\nIf the token is not right, you will see the following log in agent\norg.apache.skywalking.apm.dependencies.io.grpc.StatusRuntimeException: PERMISSION_DENIED FAQ Can I use token authentication instead of TLS? No, you shouldn\u0026rsquo;t. In tech way, you can of course, but token and TLS are used for untrusted network env. In that circumstance, TLS has higher priority than this. Token can be trusted only under TLS protection.Token can be stolen easily if you send it through a non-TLS network.\nDo you support other authentication mechanisms? Such as ak/sk? For now, no. But we appreciate someone contributes this feature.\n","title":"Token Authentication","url":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/token-auth/"},{"content":"Token Authentication Token In current version, Token is considered as a simple string.\nSet Token Set token in agent.config file\n# Authentication active is based on backend setting, see application.yml for more details. agent.authentication = xxxx Meanwhile, open the backend token authentication.\nAuthentication fails The Collector verifies every request from agent, allowed only the token match.\nIf the token is not right, you will see the following log in agent\norg.apache.skywalking.apm.dependencies.io.grpc.StatusRuntimeException: PERMISSION_DENIED FAQ Can I use token authentication instead of TLS? No, you shouldn\u0026rsquo;t. In tech way, you can of course, but token and TLS are used for untrusted network env. In that circumstance, TLS has higher priority than this. Token can be trusted only under TLS protection.Token can be stolen easily if you send it through a non-TLS network.\nDo you support other authentication mechanisms? Such as ak/sk? For now, no. But we appreciate someone contributes this feature.\n","title":"Token Authentication","url":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/token-auth/"},{"content":"Token Authentication Token In current version, Token is considered as a simple string.\nSet Token Set token in agent.config file\n# Authentication active is based on backend setting, see application.yml for more details. agent.authentication = xxxx Meanwhile, open the backend token authentication.\nAuthentication fails The Collector verifies every request from agent, allowed only the token match.\nIf the token is not right, you will see the following log in agent\norg.apache.skywalking.apm.dependencies.io.grpc.StatusRuntimeException: PERMISSION_DENIED FAQ Can I use token authentication instead of TLS? No, you shouldn\u0026rsquo;t. In tech way, you can of course, but token and TLS are used for untrusted network env. In that circumstance, TLS has higher priority than this. Token can be trusted only under TLS protection.Token can be stolen easily if you send it through a non-TLS network.\nDo you support other authentication mechanisms? Such as ak/sk? For now, no. But we appreciate someone contributes this feature.\n","title":"Token Authentication","url":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/token-auth/"},{"content":"Token Authentication Token In current version, Token is considered as a simple string.\nSet Token Set token in agent.config file\n# Authentication active is based on backend setting, see application.yml for more details. agent.authentication = xxxx Meanwhile, open the backend token authentication.\nAuthentication fails The Collector verifies every request from agent, allowed only the token match.\nIf the token is not right, you will see the following log in agent\norg.apache.skywalking.apm.dependencies.io.grpc.StatusRuntimeException: PERMISSION_DENIED FAQ Can I use token authentication instead of TLS? No, you shouldn\u0026rsquo;t. In tech way, you can of course, but token and TLS are used for untrusted network env. In that circumstance, TLS has higher priority than this. Token can be trusted only under TLS protection.Token can be stolen easily if you send it through a non-TLS network.\nDo you support other authentication mechanisms? Such as ak/sk? For now, no. But we appreciate someone contributes this feature.\n","title":"Token Authentication","url":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/token-auth/"},{"content":"Trace Correlation Context Trace correlation context APIs provide a way to put custom data in tracing context. All the data in the context will be propagated with the in-wire process automatically.\n Use TraceContext.putCorrelation() API to put custom data in tracing context.  Optional\u0026lt;String\u0026gt; previous = TraceContext.putCorrelation(\u0026#34;customKey\u0026#34;, \u0026#34;customValue\u0026#34;); CorrelationContext will remove the item when the value is null or empty.\n Use TraceContext.getCorrelation() API to get custom data.  Optional\u0026lt;String\u0026gt; value = TraceContext.getCorrelation(\u0026#34;customKey\u0026#34;); CorrelationContext configuration descriptions could be found in the agent configuration documentation, with correlation. as the prefix. Sample codes only\n","title":"Trace Correlation Context","url":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/application-toolkit-trace-correlation-context/"},{"content":"Trace Correlation Context Trace correlation context APIs provide a way to put custom data in tracing context. All the data in the context will be propagated with the in-wire process automatically.\n Use TraceContext.putCorrelation() API to put custom data in tracing context.  Optional\u0026lt;String\u0026gt; previous = TraceContext.putCorrelation(\u0026#34;customKey\u0026#34;, \u0026#34;customValue\u0026#34;); CorrelationContext will remove the item when the value is null or empty.\n Use TraceContext.getCorrelation() API to get custom data.  Optional\u0026lt;String\u0026gt; value = TraceContext.getCorrelation(\u0026#34;customKey\u0026#34;); CorrelationContext configuration descriptions could be found in the agent configuration documentation, with correlation. as the prefix. Sample codes only\n","title":"Trace Correlation Context","url":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-trace-correlation-context/"},{"content":"Trace Correlation Context Trace correlation context APIs provide a way to put custom data in tracing context. All the data in the context will be propagated with the in-wire process automatically.\n Use TraceContext.putCorrelation() API to put custom data in tracing context.  Optional\u0026lt;String\u0026gt; previous = TraceContext.putCorrelation(\u0026#34;customKey\u0026#34;, \u0026#34;customValue\u0026#34;); CorrelationContext will remove the item when the value is null or empty.\n Use TraceContext.getCorrelation() API to get custom data.  Optional\u0026lt;String\u0026gt; value = TraceContext.getCorrelation(\u0026#34;customKey\u0026#34;); CorrelationContext configuration descriptions could be found in the agent configuration documentation, with correlation. as the prefix. Sample codes only\n","title":"Trace Correlation Context","url":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/application-toolkit-trace-correlation-context/"},{"content":"Trace Correlation Context Trace correlation context APIs provide a way to put custom data in tracing context. All the data in the context will be propagated with the in-wire process automatically.\n Use TraceContext.putCorrelation() API to put custom data in tracing context.  Optional\u0026lt;String\u0026gt; previous = TraceContext.putCorrelation(\u0026#34;customKey\u0026#34;, \u0026#34;customValue\u0026#34;); CorrelationContext will remove the item when the value is null or empty.\n Use TraceContext.getCorrelation() API to get custom data.  Optional\u0026lt;String\u0026gt; value = TraceContext.getCorrelation(\u0026#34;customKey\u0026#34;); CorrelationContext configuration descriptions could be found in the agent configuration documentation, with correlation. as the prefix. Sample codes only\n","title":"Trace Correlation Context","url":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/application-toolkit-trace-correlation-context/"},{"content":"Trace Correlation Context Trace correlation context APIs provide a way to put custom data in tracing context. All the data in the context will be propagated with the in-wire process automatically.\n Use TraceContext.putCorrelation() API to put custom data in tracing context.  Optional\u0026lt;String\u0026gt; previous = TraceContext.putCorrelation(\u0026#34;customKey\u0026#34;, \u0026#34;customValue\u0026#34;); CorrelationContext will remove the item when the value is null or empty.\n Use TraceContext.getCorrelation() API to get custom data.  Optional\u0026lt;String\u0026gt; value = TraceContext.getCorrelation(\u0026#34;customKey\u0026#34;); CorrelationContext configuration descriptions could be found in the agent configuration documentation, with correlation. as the prefix. Sample codes only\n","title":"Trace Correlation Context","url":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/application-toolkit-trace-correlation-context/"},{"content":"Trace Cross Thread These APIs provide ways to continuous tracing in the cross thread scenario with minimal code changes. All following are sample codes only to demonstrate how to adopt cross thread cases easier.\n Case 1.  @TraceCrossThread public static class MyCallable\u0026lt;String\u0026gt; implements Callable\u0026lt;String\u0026gt; { @Override public String call() throws Exception { return null; } } ... ExecutorService executorService = Executors.newFixedThreadPool(1); executorService.submit(new MyCallable());  Case 2.  ExecutorService executorService = Executors.newFixedThreadPool(1); executorService.submit(CallableWrapper.of(new Callable\u0026lt;String\u0026gt;() { @Override public String call() throws Exception { return null; } })); or\nExecutorService executorService = Executors.newFixedThreadPool(1); executorService.execute(RunnableWrapper.of(new Runnable() { @Override public void run() { //your code  } }));  Case 3.  @TraceCrossThread public class MySupplier\u0026lt;String\u0026gt; implements Supplier\u0026lt;String\u0026gt; { @Override public String get() { return null; } } ... CompletableFuture.supplyAsync(new MySupplier\u0026lt;String\u0026gt;()); or\nCompletableFuture.supplyAsync(SupplierWrapper.of(()-\u0026gt;{ return \u0026#34;SupplierWrapper\u0026#34;; })).thenAccept(System.out::println);  Case 4.  CompletableFuture.supplyAsync(SupplierWrapper.of(() -\u0026gt; { return \u0026#34;SupplierWrapper\u0026#34;; })).thenAcceptAsync(ConsumerWrapper.of(c -\u0026gt; { // your code visit(url)  System.out.println(\u0026#34;ConsumerWrapper\u0026#34;); })); or\nCompletableFuture.supplyAsync(SupplierWrapper.of(() -\u0026gt; { return \u0026#34;SupplierWrapper\u0026#34;; })).thenApplyAsync(FunctionWrapper.of(f -\u0026gt; { // your code visit(url)  return \u0026#34;FunctionWrapper\u0026#34;; })); ","title":"Trace Cross Thread","url":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/application-toolkit-trace-cross-thread/"},{"content":"Trace Cross Thread These APIs provide ways to continuous tracing in the cross thread scenario with minimal code changes. All following are sample codes only to demonstrate how to adopt cross thread cases easier.\n Case 1.  @TraceCrossThread public static class MyCallable\u0026lt;String\u0026gt; implements Callable\u0026lt;String\u0026gt; { @Override public String call() throws Exception { return null; } } ... ExecutorService executorService = Executors.newFixedThreadPool(1); executorService.submit(new MyCallable());  Case 2.  ExecutorService executorService = Executors.newFixedThreadPool(1); executorService.submit(CallableWrapper.of(new Callable\u0026lt;String\u0026gt;() { @Override public String call() throws Exception { return null; } })); or\nExecutorService executorService = Executors.newFixedThreadPool(1); executorService.execute(RunnableWrapper.of(new Runnable() { @Override public void run() { //your code  } }));  Case 3.  @TraceCrossThread public class MySupplier\u0026lt;String\u0026gt; implements Supplier\u0026lt;String\u0026gt; { @Override public String get() { return null; } } ... CompletableFuture.supplyAsync(new MySupplier\u0026lt;String\u0026gt;()); or\nCompletableFuture.supplyAsync(SupplierWrapper.of(()-\u0026gt;{ return \u0026#34;SupplierWrapper\u0026#34;; })).thenAccept(System.out::println);  Case 4.  CompletableFuture.supplyAsync(SupplierWrapper.of(() -\u0026gt; { return \u0026#34;SupplierWrapper\u0026#34;; })).thenAcceptAsync(ConsumerWrapper.of(c -\u0026gt; { // your code visit(url)  System.out.println(\u0026#34;ConsumerWrapper\u0026#34;); })); or\nCompletableFuture.supplyAsync(SupplierWrapper.of(() -\u0026gt; { return \u0026#34;SupplierWrapper\u0026#34;; })).thenApplyAsync(FunctionWrapper.of(f -\u0026gt; { // your code visit(url)  return \u0026#34;FunctionWrapper\u0026#34;; })); ","title":"Trace Cross Thread","url":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-trace-cross-thread/"},{"content":"Trace Cross Thread These APIs provide ways to continuous tracing in the cross thread scenario with minimal code changes. All following are sample codes only to demonstrate how to adopt cross thread cases easier.\n Case 1.  @TraceCrossThread public static class MyCallable\u0026lt;String\u0026gt; implements Callable\u0026lt;String\u0026gt; { @Override public String call() throws Exception { return null; } } ... ExecutorService executorService = Executors.newFixedThreadPool(1); executorService.submit(new MyCallable());  Case 2.  ExecutorService executorService = Executors.newFixedThreadPool(1); executorService.submit(CallableWrapper.of(new Callable\u0026lt;String\u0026gt;() { @Override public String call() throws Exception { return null; } })); or\nExecutorService executorService = Executors.newFixedThreadPool(1); executorService.execute(RunnableWrapper.of(new Runnable() { @Override public void run() { //your code  } }));  Case 3.  @TraceCrossThread public class MySupplier\u0026lt;String\u0026gt; implements Supplier\u0026lt;String\u0026gt; { @Override public String get() { return null; } } ... CompletableFuture.supplyAsync(new MySupplier\u0026lt;String\u0026gt;()); or\nCompletableFuture.supplyAsync(SupplierWrapper.of(()-\u0026gt;{ return \u0026#34;SupplierWrapper\u0026#34;; })).thenAccept(System.out::println);  Case 4.  CompletableFuture.supplyAsync(SupplierWrapper.of(() -\u0026gt; { return \u0026#34;SupplierWrapper\u0026#34;; })).thenAcceptAsync(ConsumerWrapper.of(c -\u0026gt; { // your code visit(url)  System.out.println(\u0026#34;ConsumerWrapper\u0026#34;); })); or\nCompletableFuture.supplyAsync(SupplierWrapper.of(() -\u0026gt; { return \u0026#34;SupplierWrapper\u0026#34;; })).thenApplyAsync(FunctionWrapper.of(f -\u0026gt; { // your code visit(url)  return \u0026#34;FunctionWrapper\u0026#34;; })); ","title":"Trace Cross Thread","url":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/application-toolkit-trace-cross-thread/"},{"content":"Trace Cross Thread These APIs provide ways to continuous tracing in the cross thread scenario with minimal code changes. All following are sample codes only to demonstrate how to adopt cross thread cases easier.\n Case 1.  @TraceCrossThread public static class MyCallable\u0026lt;String\u0026gt; implements Callable\u0026lt;String\u0026gt; { @Override public String call() throws Exception { return null; } } ... ExecutorService executorService = Executors.newFixedThreadPool(1); executorService.submit(new MyCallable());  Case 2.  ExecutorService executorService = Executors.newFixedThreadPool(1); executorService.submit(CallableWrapper.of(new Callable\u0026lt;String\u0026gt;() { @Override public String call() throws Exception { return null; } })); or\nExecutorService executorService = Executors.newFixedThreadPool(1); executorService.execute(RunnableWrapper.of(new Runnable() { @Override public void run() { //your code  } }));  Case 3.  @TraceCrossThread public class MySupplier\u0026lt;String\u0026gt; implements Supplier\u0026lt;String\u0026gt; { @Override public String get() { return null; } } ... CompletableFuture.supplyAsync(new MySupplier\u0026lt;String\u0026gt;()); or\nCompletableFuture.supplyAsync(SupplierWrapper.of(()-\u0026gt;{ return \u0026#34;SupplierWrapper\u0026#34;; })).thenAccept(System.out::println);  Case 4.  CompletableFuture.supplyAsync(SupplierWrapper.of(() -\u0026gt; { return \u0026#34;SupplierWrapper\u0026#34;; })).thenAcceptAsync(ConsumerWrapper.of(c -\u0026gt; { // your code visit(url)  System.out.println(\u0026#34;ConsumerWrapper\u0026#34;); })); or\nCompletableFuture.supplyAsync(SupplierWrapper.of(() -\u0026gt; { return \u0026#34;SupplierWrapper\u0026#34;; })).thenApplyAsync(FunctionWrapper.of(f -\u0026gt; { // your code visit(url)  return \u0026#34;FunctionWrapper\u0026#34;; })); ","title":"Trace Cross Thread","url":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/application-toolkit-trace-cross-thread/"},{"content":"Trace Cross Thread These APIs provide ways to continuous tracing in the cross thread scenario with minimal code changes. All following are sample codes only to demonstrate how to adopt cross thread cases easier.\n Case 1.  @TraceCrossThread public static class MyCallable\u0026lt;String\u0026gt; implements Callable\u0026lt;String\u0026gt; { @Override public String call() throws Exception { return null; } } ... ExecutorService executorService = Executors.newFixedThreadPool(1); executorService.submit(new MyCallable());  Case 2.  ExecutorService executorService = Executors.newFixedThreadPool(1); executorService.submit(CallableWrapper.of(new Callable\u0026lt;String\u0026gt;() { @Override public String call() throws Exception { return null; } })); or\nExecutorService executorService = Executors.newFixedThreadPool(1); executorService.execute(RunnableWrapper.of(new Runnable() { @Override public void run() { //your code  } }));  Case 3.  @TraceCrossThread public class MySupplier\u0026lt;String\u0026gt; implements Supplier\u0026lt;String\u0026gt; { @Override public String get() { return null; } } ... CompletableFuture.supplyAsync(new MySupplier\u0026lt;String\u0026gt;()); or\nCompletableFuture.supplyAsync(SupplierWrapper.of(()-\u0026gt;{ return \u0026#34;SupplierWrapper\u0026#34;; })).thenAccept(System.out::println);  Case 4.  CompletableFuture.supplyAsync(SupplierWrapper.of(() -\u0026gt; { return \u0026#34;SupplierWrapper\u0026#34;; })).thenAcceptAsync(ConsumerWrapper.of(c -\u0026gt; { // your code visit(url)  System.out.println(\u0026#34;ConsumerWrapper\u0026#34;); })); or\nCompletableFuture.supplyAsync(SupplierWrapper.of(() -\u0026gt; { return \u0026#34;SupplierWrapper\u0026#34;; })).thenApplyAsync(FunctionWrapper.of(f -\u0026gt; { // your code visit(url)  return \u0026#34;FunctionWrapper\u0026#34;; })); ","title":"Trace Cross Thread","url":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/application-toolkit-trace-cross-thread/"},{"content":"Trace Data Protocol  Version, v3.1  Trace Data Protocol describes the data format between SkyWalking agent/sniffer and backend.\nTrace data protocol is defined and provided in gRPC format, and implemented in HTTP 1.1.\nFor trace format, note that:\n The segment is a unique concept in SkyWalking. It should include all spans for each request in a single OS process, which is usually a single language-based thread. There are three types of spans.    EntrySpan EntrySpan represents a service provider, which is also the endpoint on the server end. As an APM system, SkyWalking targets the application servers. Therefore, almost all the services and MQ-consumers are EntrySpans.\n  LocalSpan LocalSpan represents a typical Java method which is not related to remote services. It is neither a MQ producer/consumer nor a provider/consumer of a service (e.g. HTTP service).\n  ExitSpan ExitSpan represents a client of service or MQ-producer. It is known as the LeafSpan in the early stages of SkyWalking. For example, accessing DB by JDBC, and reading Redis/Memcached are classified as ExitSpans.\n   Cross-thread/process span parent information is called \u0026ldquo;reference\u0026rdquo;. Reference carries the trace ID, segment ID, span ID, service name, service instance name, endpoint name, and target address used on the client end (note: this is not required in cross-thread operations) of this request in the parent. See Cross Process Propagation Headers Protocol v3 for more details.\n  Span#skipAnalysis may be TRUE, if this span doesn\u0026rsquo;t require backend analysis.\n  Trace Report Protocol // The segment is a collection of spans. It includes all collected spans in a simple one request context, such as a HTTP request process. // // We recommend the agent/SDK report all tracked data of one request once for all, such as, // typically, such as in Java, one segment represent all tracked operations(spans) of one request context in the same thread. // At the same time, in some language there is not a clear concept like golang, it could represent all tracked operations of one request context. message SegmentObject { // A string id represents the whole trace.  string traceId = 1; // A unique id represents this segment. Other segments could use this id to reference as a child segment.  string traceSegmentId = 2; // Span collections included in this segment.  repeated SpanObject spans = 3; // **Service**. Represents a set/group of workloads which provide the same behaviours for incoming requests.  //  // The logic name represents the service. This would show as a separate node in the topology.  // The metrics analyzed from the spans, would be aggregated for this entity as the service level.  string service = 4; // **Service Instance**. Each individual workload in the Service group is known as an instance. Like `pods` in Kubernetes, it  // doesn\u0026#39;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process.  //  // The logic name represents the service instance. This would show as a separate node in the instance relationship.  // The metrics analyzed from the spans, would be aggregated for this entity as the service instance level.  string serviceInstance = 5; // Whether the segment includes all tracked spans.  // In the production environment tracked, some tasks could include too many spans for one request context, such as a batch update for a cache, or an async job.  // The agent/SDK could optimize or ignore some tracked spans for better performance.  // In this case, the value should be flagged as TRUE.  bool isSizeLimited = 6;}// Segment reference represents the link between two existing segment. message SegmentReference { // Represent the reference type. It could be across thread or across process.  // Across process means there is a downstream RPC call for this.  // Typically, refType == CrossProcess means SpanObject#spanType = entry.  RefType refType = 1; // A string id represents the whole trace.  string traceId = 2; // Another segment id as the parent.  string parentTraceSegmentId = 3; // The span id in the parent trace segment.  int32 parentSpanId = 4; // The service logic name of the parent segment.  // If refType == CrossThread, this name is as same as the trace segment.  string parentService = 5; // The service logic name instance of the parent segment.  // If refType == CrossThread, this name is as same as the trace segment.  string parentServiceInstance = 6; // The endpoint name of the parent segment.  // **Endpoint**. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature.  // In a trace segment, the endpoint name is the name of first entry span.  string parentEndpoint = 7; // The network address, including ip/hostname and port, which is used in the client side.  // Such as Client --\u0026gt; use 127.0.11.8:913 -\u0026gt; Server  // then, in the reference of entry span reported by Server, the value of this field is 127.0.11.8:913.  // This plays the important role in the SkyWalking STAM(Streaming Topology Analysis Method)  // For more details, read https://wu-sheng.github.io/STAM/  string networkAddressUsedAtPeer = 8;}// Span represents a execution unit in the system, with duration and many other attributes. // Span could be a method, a RPC, MQ message produce or consume. // In the practice, the span should be added when it is really necessary, to avoid payload overhead. // We recommend to creating spans in across process(client/server of RPC/MQ) and across thread cases only. message SpanObject { // The number id of the span. Should be unique in the whole segment.  // Starting at 0.  int32 spanId = 1; // The number id of the parent span in the whole segment.  // -1 represents no parent span.  // Also, be known as the root/first span of the segment.  int32 parentSpanId = 2; // Start timestamp in milliseconds of this span,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 startTime = 3; // End timestamp in milliseconds of this span,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 endTime = 4; // \u0026lt;Optional\u0026gt;  // In the across thread and across process, these references targeting the parent segments.  // The references usually have only one element, but in batch consumer case, such as in MQ or async batch process, it could be multiple.  repeated SegmentReference refs = 5; // A logic name represents this span.  //  // We don\u0026#39;t recommend to include the parameter, such as HTTP request parameters, as a part of the operation, especially this is the name of the entry span.  // All statistic for the endpoints are aggregated base on this name. Those parameters should be added in the tags if necessary.  // If in some cases, it have to be a part of the operation name,  // users should use the Group Parameterized Endpoints capability at the backend to get the meaningful metrics.  // Read https://github.com/apache/skywalking/blob/master/docs/en/setup/backend/endpoint-grouping-rules.md  string operationName = 6; // Remote address of the peer in RPC/MQ case.  // This is required when spanType = Exit, as it is a part of the SkyWalking STAM(Streaming Topology Analysis Method).  // For more details, read https://wu-sheng.github.io/STAM/  string peer = 7; // Span type represents the role in the RPC context.  SpanType spanType = 8; // Span layer represent the component tech stack, related to the network tech.  SpanLayer spanLayer = 9; // Component id is a predefined number id in the SkyWalking.  // It represents the framework, tech stack used by this tracked span, such as Spring.  // All IDs are defined in the https://github.com/apache/skywalking/blob/master/oap-server/server-bootstrap/src/main/resources/component-libraries.yml  // Send a pull request if you want to add languages, components or mapping definitions,  // all public components could be accepted.  // Follow this doc for more details, https://github.com/apache/skywalking/blob/master/docs/en/guides/Component-library-settings.md  int32 componentId = 10; // The status of the span. False means the tracked execution ends in the unexpected status.  // This affects the successful rate statistic in the backend.  // Exception or error code happened in the tracked process doesn\u0026#39;t mean isError == true, the implementations of agent plugin and tracing SDK make the final decision.  bool isError = 11; // String key, String value pair.  // Tags provides more information, includes parameters.  //  // In the OAP backend analysis, some special tag or tag combination could provide other advanced features.  // https://github.com/apache/skywalking/blob/master/docs/en/guides/Java-Plugin-Development-Guide.md#special-span-tags  repeated KeyStringValuePair tags = 12; // String key, String value pair with an accurate timestamp.  // Logging some events happening in the context of the span duration.  repeated Log logs = 13; // Force the backend don\u0026#39;t do analysis, if the value is TRUE.  // The backend has its own configurations to follow or override this.  //  // Use this mostly because the agent/SDK could know more context of the service role.  bool skipAnalysis = 14;}message Log { // The timestamp in milliseconds of this event.,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 time = 1; // String key, String value pair.  repeated KeyStringValuePair data = 2;}// Map to the type of span enum SpanType { // Server side of RPC. Consumer side of MQ.  Entry = 0; // Client side of RPC. Producer side of MQ.  Exit = 1; // A common local code execution.  Local = 2;}// A ID could be represented by multiple string sections. message ID { repeated string id = 1;}// Type of the reference enum RefType { // Map to the reference targeting the segment in another OS process.  CrossProcess = 0; // Map to the reference targeting the segment in the same process of the current one, just across thread.  // This is only used when the coding language has the thread concept.  CrossThread = 1;}// Map to the layer of span enum SpanLayer { // Unknown layer. Could be anything.  Unknown = 0; // A database layer, used in tracing the database client component.  Database = 1; // A RPC layer, used in both client and server sides of RPC component.  RPCFramework = 2; // HTTP is a more specific RPCFramework.  Http = 3; // A MQ layer, used in both producer and consumer sides of the MQ component.  MQ = 4; // A cache layer, used in tracing the cache client component.  Cache = 5;}// The segment collections for trace report in batch and sync mode. message SegmentCollection { repeated SegmentObject segments = 1;}Report Span Attached Events Besides in-process agents, there are other out-of-process agent, such as ebpf agent, could report additional information as attached events for the relative spans.\nSpanAttachedEventReportService#collect for attached event reporting.\n//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // ebpf agent(SkyWalking Rover) collects extra information from the OS(Linux Only) level to attach on the traced span. // Since v3.1 //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// service SpanAttachedEventReportService { // Collect SpanAttachedEvent to the OAP server in the streaming mode.  rpc collect (stream SpanAttachedEvent) returns (Commands) { }}// SpanAttachedEvent represents an attached event for a traced RPC. // // When an RPC is being traced by the in-process language agent, a span would be reported by the client-side agent. // And the rover would be aware of this RPC due to the existing tracing header. // Then, the rover agent collects extra information from the OS level to provide assistance information to diagnose network performance. message SpanAttachedEvent { // The nanosecond timestamp of the event\u0026#39;s start time.  // Notice, most unit of timestamp in SkyWalking is milliseconds, but NANO-SECOND is required here.  // Because the attached event happens in the OS syscall level, most of them are executed rapidly.  Instant startTime = 1; // The official event name.  // For example, the event name is a method signature from syscall stack.  string event = 2; // [Optional] The nanosecond timestamp of the event\u0026#39;s end time.  Instant endTime = 3; // The tags for this event includes some extra OS level information,  // such as  // 1. net_device used for this exit span.  // 2. network L7 protocol  repeated KeyStringValuePair tags = 4; // The summary of statistics during this event.  // Each statistic provides a name(metric name) to represent the name, and an int64/long as the value.  repeated KeyIntValuePair summary = 5; // Refer to a trace context decoded from `sw8` header through network, such as HTTP header, MQ metadata  // https://skywalking.apache.org/docs/main/next/en/protocols/skywalking-cross-process-propagation-headers-protocol-v3/#standard-header-item  SpanReference traceContext = 6; message SpanReference { SpanReferenceType type = 1; // [Optional] A string id represents the whole trace.  string traceId = 2; // A unique id represents this segment. Other segments could use this id to reference as a child segment.  // [Optional] when this span reference  string traceSegmentId = 3; // If type == SKYWALKING  // The number id of the span. Should be unique in the whole segment.  // Starting at 0  //  // If type == ZIPKIN  // The type of span ID is string.  string spanId = 4; } enum SpanReferenceType { SKYWALKING = 0; ZIPKIN = 1; }}Via HTTP Endpoint Detailed information about data format can be found in Instance Management. There are two ways to report segment data: one segment per request or segment array in bulk mode.\nPOST http://localhost:12800/v3/segment Send a single segment object in JSON format.\nInput:\n{ \u0026#34;traceId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34; } OutPut:\nPOST http://localhost:12800/v3/segments Send a segment object list in JSON format.\nInput:\n[{ \u0026#34;traceId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34; }, { \u0026#34;traceId\u0026#34;: \u0026#34;f956699e-5106-4ea3-95e5-da748c55bac1\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577250, \u0026#34;endTime\u0026#34;: 1588664577250, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577250, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577250, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;f956699e-5106-4ea3-95e5-da748c55bac1\u0026#34; }] OutPut:\n","title":"Trace Data Protocol","url":"/docs/main/latest/en/api/trace-data-protocol-v3/"},{"content":"Trace Data Protocol  Version, v3.1  Trace Data Protocol describes the data format between SkyWalking agent/sniffer and backend.\nTrace data protocol is defined and provided in gRPC format, and implemented in HTTP 1.1.\nFor trace format, note that:\n The segment is a unique concept in SkyWalking. It should include all spans for each request in a single OS process, which is usually a single language-based thread. There are three types of spans.    EntrySpan EntrySpan represents a service provider, which is also the endpoint on the server end. As an APM system, SkyWalking targets the application servers. Therefore, almost all the services and MQ-consumers are EntrySpans.\n  LocalSpan LocalSpan represents a typical Java method which is not related to remote services. It is neither a MQ producer/consumer nor a provider/consumer of a service (e.g. HTTP service).\n  ExitSpan ExitSpan represents a client of service or MQ-producer. It is known as the LeafSpan in the early stages of SkyWalking. For example, accessing DB by JDBC, and reading Redis/Memcached are classified as ExitSpans.\n   Cross-thread/process span parent information is called \u0026ldquo;reference\u0026rdquo;. Reference carries the trace ID, segment ID, span ID, service name, service instance name, endpoint name, and target address used on the client end (note: this is not required in cross-thread operations) of this request in the parent. See Cross Process Propagation Headers Protocol v3 for more details.\n  Span#skipAnalysis may be TRUE, if this span doesn\u0026rsquo;t require backend analysis.\n  Trace Report Protocol // The segment is a collection of spans. It includes all collected spans in a simple one request context, such as a HTTP request process. // // We recommend the agent/SDK report all tracked data of one request once for all, such as, // typically, such as in Java, one segment represent all tracked operations(spans) of one request context in the same thread. // At the same time, in some language there is not a clear concept like golang, it could represent all tracked operations of one request context. message SegmentObject { // A string id represents the whole trace.  string traceId = 1; // A unique id represents this segment. Other segments could use this id to reference as a child segment.  string traceSegmentId = 2; // Span collections included in this segment.  repeated SpanObject spans = 3; // **Service**. Represents a set/group of workloads which provide the same behaviours for incoming requests.  //  // The logic name represents the service. This would show as a separate node in the topology.  // The metrics analyzed from the spans, would be aggregated for this entity as the service level.  string service = 4; // **Service Instance**. Each individual workload in the Service group is known as an instance. Like `pods` in Kubernetes, it  // doesn\u0026#39;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process.  //  // The logic name represents the service instance. This would show as a separate node in the instance relationship.  // The metrics analyzed from the spans, would be aggregated for this entity as the service instance level.  string serviceInstance = 5; // Whether the segment includes all tracked spans.  // In the production environment tracked, some tasks could include too many spans for one request context, such as a batch update for a cache, or an async job.  // The agent/SDK could optimize or ignore some tracked spans for better performance.  // In this case, the value should be flagged as TRUE.  bool isSizeLimited = 6;}// Segment reference represents the link between two existing segment. message SegmentReference { // Represent the reference type. It could be across thread or across process.  // Across process means there is a downstream RPC call for this.  // Typically, refType == CrossProcess means SpanObject#spanType = entry.  RefType refType = 1; // A string id represents the whole trace.  string traceId = 2; // Another segment id as the parent.  string parentTraceSegmentId = 3; // The span id in the parent trace segment.  int32 parentSpanId = 4; // The service logic name of the parent segment.  // If refType == CrossThread, this name is as same as the trace segment.  string parentService = 5; // The service logic name instance of the parent segment.  // If refType == CrossThread, this name is as same as the trace segment.  string parentServiceInstance = 6; // The endpoint name of the parent segment.  // **Endpoint**. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature.  // In a trace segment, the endpoint name is the name of first entry span.  string parentEndpoint = 7; // The network address, including ip/hostname and port, which is used in the client side.  // Such as Client --\u0026gt; use 127.0.11.8:913 -\u0026gt; Server  // then, in the reference of entry span reported by Server, the value of this field is 127.0.11.8:913.  // This plays the important role in the SkyWalking STAM(Streaming Topology Analysis Method)  // For more details, read https://wu-sheng.github.io/STAM/  string networkAddressUsedAtPeer = 8;}// Span represents a execution unit in the system, with duration and many other attributes. // Span could be a method, a RPC, MQ message produce or consume. // In the practice, the span should be added when it is really necessary, to avoid payload overhead. // We recommend to creating spans in across process(client/server of RPC/MQ) and across thread cases only. message SpanObject { // The number id of the span. Should be unique in the whole segment.  // Starting at 0.  int32 spanId = 1; // The number id of the parent span in the whole segment.  // -1 represents no parent span.  // Also, be known as the root/first span of the segment.  int32 parentSpanId = 2; // Start timestamp in milliseconds of this span,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 startTime = 3; // End timestamp in milliseconds of this span,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 endTime = 4; // \u0026lt;Optional\u0026gt;  // In the across thread and across process, these references targeting the parent segments.  // The references usually have only one element, but in batch consumer case, such as in MQ or async batch process, it could be multiple.  repeated SegmentReference refs = 5; // A logic name represents this span.  //  // We don\u0026#39;t recommend to include the parameter, such as HTTP request parameters, as a part of the operation, especially this is the name of the entry span.  // All statistic for the endpoints are aggregated base on this name. Those parameters should be added in the tags if necessary.  // If in some cases, it have to be a part of the operation name,  // users should use the Group Parameterized Endpoints capability at the backend to get the meaningful metrics.  // Read https://github.com/apache/skywalking/blob/master/docs/en/setup/backend/endpoint-grouping-rules.md  string operationName = 6; // Remote address of the peer in RPC/MQ case.  // This is required when spanType = Exit, as it is a part of the SkyWalking STAM(Streaming Topology Analysis Method).  // For more details, read https://wu-sheng.github.io/STAM/  string peer = 7; // Span type represents the role in the RPC context.  SpanType spanType = 8; // Span layer represent the component tech stack, related to the network tech.  SpanLayer spanLayer = 9; // Component id is a predefined number id in the SkyWalking.  // It represents the framework, tech stack used by this tracked span, such as Spring.  // All IDs are defined in the https://github.com/apache/skywalking/blob/master/oap-server/server-bootstrap/src/main/resources/component-libraries.yml  // Send a pull request if you want to add languages, components or mapping definitions,  // all public components could be accepted.  // Follow this doc for more details, https://github.com/apache/skywalking/blob/master/docs/en/guides/Component-library-settings.md  int32 componentId = 10; // The status of the span. False means the tracked execution ends in the unexpected status.  // This affects the successful rate statistic in the backend.  // Exception or error code happened in the tracked process doesn\u0026#39;t mean isError == true, the implementations of agent plugin and tracing SDK make the final decision.  bool isError = 11; // String key, String value pair.  // Tags provides more information, includes parameters.  //  // In the OAP backend analysis, some special tag or tag combination could provide other advanced features.  // https://github.com/apache/skywalking/blob/master/docs/en/guides/Java-Plugin-Development-Guide.md#special-span-tags  repeated KeyStringValuePair tags = 12; // String key, String value pair with an accurate timestamp.  // Logging some events happening in the context of the span duration.  repeated Log logs = 13; // Force the backend don\u0026#39;t do analysis, if the value is TRUE.  // The backend has its own configurations to follow or override this.  //  // Use this mostly because the agent/SDK could know more context of the service role.  bool skipAnalysis = 14;}message Log { // The timestamp in milliseconds of this event.,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 time = 1; // String key, String value pair.  repeated KeyStringValuePair data = 2;}// Map to the type of span enum SpanType { // Server side of RPC. Consumer side of MQ.  Entry = 0; // Client side of RPC. Producer side of MQ.  Exit = 1; // A common local code execution.  Local = 2;}// A ID could be represented by multiple string sections. message ID { repeated string id = 1;}// Type of the reference enum RefType { // Map to the reference targeting the segment in another OS process.  CrossProcess = 0; // Map to the reference targeting the segment in the same process of the current one, just across thread.  // This is only used when the coding language has the thread concept.  CrossThread = 1;}// Map to the layer of span enum SpanLayer { // Unknown layer. Could be anything.  Unknown = 0; // A database layer, used in tracing the database client component.  Database = 1; // A RPC layer, used in both client and server sides of RPC component.  RPCFramework = 2; // HTTP is a more specific RPCFramework.  Http = 3; // A MQ layer, used in both producer and consumer sides of the MQ component.  MQ = 4; // A cache layer, used in tracing the cache client component.  Cache = 5;}// The segment collections for trace report in batch and sync mode. message SegmentCollection { repeated SegmentObject segments = 1;}Report Span Attached Events Besides in-process agents, there are other out-of-process agent, such as ebpf agent, could report additional information as attached events for the relative spans.\nSpanAttachedEventReportService#collect for attached event reporting.\n//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // ebpf agent(SkyWalking Rover) collects extra information from the OS(Linux Only) level to attach on the traced span. // Since v3.1 //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// service SpanAttachedEventReportService { // Collect SpanAttachedEvent to the OAP server in the streaming mode.  rpc collect (stream SpanAttachedEvent) returns (Commands) { }}// SpanAttachedEvent represents an attached event for a traced RPC. // // When an RPC is being traced by the in-process language agent, a span would be reported by the client-side agent. // And the rover would be aware of this RPC due to the existing tracing header. // Then, the rover agent collects extra information from the OS level to provide assistance information to diagnose network performance. message SpanAttachedEvent { // The nanosecond timestamp of the event\u0026#39;s start time.  // Notice, most unit of timestamp in SkyWalking is milliseconds, but NANO-SECOND is required here.  // Because the attached event happens in the OS syscall level, most of them are executed rapidly.  Instant startTime = 1; // The official event name.  // For example, the event name is a method signature from syscall stack.  string event = 2; // [Optional] The nanosecond timestamp of the event\u0026#39;s end time.  Instant endTime = 3; // The tags for this event includes some extra OS level information,  // such as  // 1. net_device used for this exit span.  // 2. network L7 protocol  repeated KeyStringValuePair tags = 4; // The summary of statistics during this event.  // Each statistic provides a name(metric name) to represent the name, and an int64/long as the value.  repeated KeyIntValuePair summary = 5; // Refer to a trace context decoded from `sw8` header through network, such as HTTP header, MQ metadata  // https://skywalking.apache.org/docs/main/next/en/protocols/skywalking-cross-process-propagation-headers-protocol-v3/#standard-header-item  SpanReference traceContext = 6; message SpanReference { SpanReferenceType type = 1; // [Optional] A string id represents the whole trace.  string traceId = 2; // A unique id represents this segment. Other segments could use this id to reference as a child segment.  // [Optional] when this span reference  string traceSegmentId = 3; // If type == SKYWALKING  // The number id of the span. Should be unique in the whole segment.  // Starting at 0  //  // If type == ZIPKIN  // The type of span ID is string.  string spanId = 4; } enum SpanReferenceType { SKYWALKING = 0; ZIPKIN = 1; }}Via HTTP Endpoint Detailed information about data format can be found in Instance Management. There are two ways to report segment data: one segment per request or segment array in bulk mode.\nPOST http://localhost:12800/v3/segment Send a single segment object in JSON format.\nInput:\n{ \u0026#34;traceId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34; } OutPut:\nPOST http://localhost:12800/v3/segments Send a segment object list in JSON format.\nInput:\n[{ \u0026#34;traceId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34; }, { \u0026#34;traceId\u0026#34;: \u0026#34;f956699e-5106-4ea3-95e5-da748c55bac1\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577250, \u0026#34;endTime\u0026#34;: 1588664577250, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577250, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577250, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;f956699e-5106-4ea3-95e5-da748c55bac1\u0026#34; }] OutPut:\n","title":"Trace Data Protocol","url":"/docs/main/next/en/api/trace-data-protocol-v3/"},{"content":"Trace Data Protocol  Version, v3.1  Trace Data Protocol describes the data format between SkyWalking agent/sniffer and backend.\nTrace data protocol is defined and provided in gRPC format, and implemented in HTTP 1.1.\nFor trace format, note that:\n The segment is a unique concept in SkyWalking. It should include all spans for each request in a single OS process, which is usually a single language-based thread. There are three types of spans.    EntrySpan EntrySpan represents a service provider, which is also the endpoint on the server end. As an APM system, SkyWalking targets the application servers. Therefore, almost all the services and MQ-consumers are EntrySpans.\n  LocalSpan LocalSpan represents a typical Java method which is not related to remote services. It is neither a MQ producer/consumer nor a provider/consumer of a service (e.g. HTTP service).\n  ExitSpan ExitSpan represents a client of service or MQ-producer. It is known as the LeafSpan in the early stages of SkyWalking. For example, accessing DB by JDBC, and reading Redis/Memcached are classified as ExitSpans.\n   Cross-thread/process span parent information is called \u0026ldquo;reference\u0026rdquo;. Reference carries the trace ID, segment ID, span ID, service name, service instance name, endpoint name, and target address used on the client end (note: this is not required in cross-thread operations) of this request in the parent. See Cross Process Propagation Headers Protocol v3 for more details.\n  Span#skipAnalysis may be TRUE, if this span doesn\u0026rsquo;t require backend analysis.\n  Trace Report Protocol // The segment is a collection of spans. It includes all collected spans in a simple one request context, such as a HTTP request process. // // We recommend the agent/SDK report all tracked data of one request once for all, such as, // typically, such as in Java, one segment represent all tracked operations(spans) of one request context in the same thread. // At the same time, in some language there is not a clear concept like golang, it could represent all tracked operations of one request context. message SegmentObject { // A string id represents the whole trace.  string traceId = 1; // A unique id represents this segment. Other segments could use this id to reference as a child segment.  string traceSegmentId = 2; // Span collections included in this segment.  repeated SpanObject spans = 3; // **Service**. Represents a set/group of workloads which provide the same behaviours for incoming requests.  //  // The logic name represents the service. This would show as a separate node in the topology.  // The metrics analyzed from the spans, would be aggregated for this entity as the service level.  string service = 4; // **Service Instance**. Each individual workload in the Service group is known as an instance. Like `pods` in Kubernetes, it  // doesn\u0026#39;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process.  //  // The logic name represents the service instance. This would show as a separate node in the instance relationship.  // The metrics analyzed from the spans, would be aggregated for this entity as the service instance level.  string serviceInstance = 5; // Whether the segment includes all tracked spans.  // In the production environment tracked, some tasks could include too many spans for one request context, such as a batch update for a cache, or an async job.  // The agent/SDK could optimize or ignore some tracked spans for better performance.  // In this case, the value should be flagged as TRUE.  bool isSizeLimited = 6;}// Segment reference represents the link between two existing segment. message SegmentReference { // Represent the reference type. It could be across thread or across process.  // Across process means there is a downstream RPC call for this.  // Typically, refType == CrossProcess means SpanObject#spanType = entry.  RefType refType = 1; // A string id represents the whole trace.  string traceId = 2; // Another segment id as the parent.  string parentTraceSegmentId = 3; // The span id in the parent trace segment.  int32 parentSpanId = 4; // The service logic name of the parent segment.  // If refType == CrossThread, this name is as same as the trace segment.  string parentService = 5; // The service logic name instance of the parent segment.  // If refType == CrossThread, this name is as same as the trace segment.  string parentServiceInstance = 6; // The endpoint name of the parent segment.  // **Endpoint**. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature.  // In a trace segment, the endpoint name is the name of first entry span.  string parentEndpoint = 7; // The network address, including ip/hostname and port, which is used in the client side.  // Such as Client --\u0026gt; use 127.0.11.8:913 -\u0026gt; Server  // then, in the reference of entry span reported by Server, the value of this field is 127.0.11.8:913.  // This plays the important role in the SkyWalking STAM(Streaming Topology Analysis Method)  // For more details, read https://wu-sheng.github.io/STAM/  string networkAddressUsedAtPeer = 8;}// Span represents a execution unit in the system, with duration and many other attributes. // Span could be a method, a RPC, MQ message produce or consume. // In the practice, the span should be added when it is really necessary, to avoid payload overhead. // We recommend to creating spans in across process(client/server of RPC/MQ) and across thread cases only. message SpanObject { // The number id of the span. Should be unique in the whole segment.  // Starting at 0.  int32 spanId = 1; // The number id of the parent span in the whole segment.  // -1 represents no parent span.  // Also, be known as the root/first span of the segment.  int32 parentSpanId = 2; // Start timestamp in milliseconds of this span,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 startTime = 3; // End timestamp in milliseconds of this span,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 endTime = 4; // \u0026lt;Optional\u0026gt;  // In the across thread and across process, these references targeting the parent segments.  // The references usually have only one element, but in batch consumer case, such as in MQ or async batch process, it could be multiple.  repeated SegmentReference refs = 5; // A logic name represents this span.  //  // We don\u0026#39;t recommend to include the parameter, such as HTTP request parameters, as a part of the operation, especially this is the name of the entry span.  // All statistic for the endpoints are aggregated base on this name. Those parameters should be added in the tags if necessary.  // If in some cases, it have to be a part of the operation name,  // users should use the Group Parameterized Endpoints capability at the backend to get the meaningful metrics.  // Read https://github.com/apache/skywalking/blob/master/docs/en/setup/backend/endpoint-grouping-rules.md  string operationName = 6; // Remote address of the peer in RPC/MQ case.  // This is required when spanType = Exit, as it is a part of the SkyWalking STAM(Streaming Topology Analysis Method).  // For more details, read https://wu-sheng.github.io/STAM/  string peer = 7; // Span type represents the role in the RPC context.  SpanType spanType = 8; // Span layer represent the component tech stack, related to the network tech.  SpanLayer spanLayer = 9; // Component id is a predefined number id in the SkyWalking.  // It represents the framework, tech stack used by this tracked span, such as Spring.  // All IDs are defined in the https://github.com/apache/skywalking/blob/master/oap-server/server-bootstrap/src/main/resources/component-libraries.yml  // Send a pull request if you want to add languages, components or mapping definitions,  // all public components could be accepted.  // Follow this doc for more details, https://github.com/apache/skywalking/blob/master/docs/en/guides/Component-library-settings.md  int32 componentId = 10; // The status of the span. False means the tracked execution ends in the unexpected status.  // This affects the successful rate statistic in the backend.  // Exception or error code happened in the tracked process doesn\u0026#39;t mean isError == true, the implementations of agent plugin and tracing SDK make the final decision.  bool isError = 11; // String key, String value pair.  // Tags provides more information, includes parameters.  //  // In the OAP backend analysis, some special tag or tag combination could provide other advanced features.  // https://github.com/apache/skywalking/blob/master/docs/en/guides/Java-Plugin-Development-Guide.md#special-span-tags  repeated KeyStringValuePair tags = 12; // String key, String value pair with an accurate timestamp.  // Logging some events happening in the context of the span duration.  repeated Log logs = 13; // Force the backend don\u0026#39;t do analysis, if the value is TRUE.  // The backend has its own configurations to follow or override this.  //  // Use this mostly because the agent/SDK could know more context of the service role.  bool skipAnalysis = 14;}message Log { // The timestamp in milliseconds of this event.,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 time = 1; // String key, String value pair.  repeated KeyStringValuePair data = 2;}// Map to the type of span enum SpanType { // Server side of RPC. Consumer side of MQ.  Entry = 0; // Client side of RPC. Producer side of MQ.  Exit = 1; // A common local code execution.  Local = 2;}// A ID could be represented by multiple string sections. message ID { repeated string id = 1;}// Type of the reference enum RefType { // Map to the reference targeting the segment in another OS process.  CrossProcess = 0; // Map to the reference targeting the segment in the same process of the current one, just across thread.  // This is only used when the coding language has the thread concept.  CrossThread = 1;}// Map to the layer of span enum SpanLayer { // Unknown layer. Could be anything.  Unknown = 0; // A database layer, used in tracing the database client component.  Database = 1; // A RPC layer, used in both client and server sides of RPC component.  RPCFramework = 2; // HTTP is a more specific RPCFramework.  Http = 3; // A MQ layer, used in both producer and consumer sides of the MQ component.  MQ = 4; // A cache layer, used in tracing the cache client component.  Cache = 5;}// The segment collections for trace report in batch and sync mode. message SegmentCollection { repeated SegmentObject segments = 1;}Report Span Attached Events Besides in-process agents, there are other out-of-process agent, such as ebpf agent, could report additional information as attached events for the relative spans.\nSpanAttachedEventReportService#collect for attached event reporting.\n//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // ebpf agent(SkyWalking Rover) collects extra information from the OS(Linux Only) level to attach on the traced span. // Since v3.1 //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// service SpanAttachedEventReportService { // Collect SpanAttachedEvent to the OAP server in the streaming mode.  rpc collect (stream SpanAttachedEvent) returns (Commands) { }}// SpanAttachedEvent represents an attached event for a traced RPC. // // When an RPC is being traced by the in-process language agent, a span would be reported by the client-side agent. // And the rover would be aware of this RPC due to the existing tracing header. // Then, the rover agent collects extra information from the OS level to provide assistance information to diagnose network performance. message SpanAttachedEvent { // The nanosecond timestamp of the event\u0026#39;s start time.  // Notice, most unit of timestamp in SkyWalking is milliseconds, but NANO-SECOND is required here.  // Because the attached event happens in the OS syscall level, most of them are executed rapidly.  Instant startTime = 1; // The official event name.  // For example, the event name is a method signature from syscall stack.  string event = 2; // [Optional] The nanosecond timestamp of the event\u0026#39;s end time.  Instant endTime = 3; // The tags for this event includes some extra OS level information,  // such as  // 1. net_device used for this exit span.  // 2. network L7 protocol  repeated KeyStringValuePair tags = 4; // The summary of statistics during this event.  // Each statistic provides a name(metric name) to represent the name, and an int64/long as the value.  repeated KeyIntValuePair summary = 5; // Refer to a trace context decoded from `sw8` header through network, such as HTTP header, MQ metadata  // https://skywalking.apache.org/docs/main/next/en/protocols/skywalking-cross-process-propagation-headers-protocol-v3/#standard-header-item  SpanReference traceContext = 6; message SpanReference { SpanReferenceType type = 1; // [Optional] A string id represents the whole trace.  string traceId = 2; // A unique id represents this segment. Other segments could use this id to reference as a child segment.  // [Optional] when this span reference  string traceSegmentId = 3; // If type == SKYWALKING  // The number id of the span. Should be unique in the whole segment.  // Starting at 0  //  // If type == ZIPKIN  // The type of span ID is string.  string spanId = 4; } enum SpanReferenceType { SKYWALKING = 0; ZIPKIN = 1; }}Via HTTP Endpoint Detailed information about data format can be found in Instance Management. There are two ways to report segment data: one segment per request or segment array in bulk mode.\nPOST http://localhost:12800/v3/segment Send a single segment object in JSON format.\nInput:\n{ \u0026#34;traceId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34; } OutPut:\nPOST http://localhost:12800/v3/segments Send a segment object list in JSON format.\nInput:\n[{ \u0026#34;traceId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34; }, { \u0026#34;traceId\u0026#34;: \u0026#34;f956699e-5106-4ea3-95e5-da748c55bac1\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577250, \u0026#34;endTime\u0026#34;: 1588664577250, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577250, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577250, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;f956699e-5106-4ea3-95e5-da748c55bac1\u0026#34; }] OutPut:\n","title":"Trace Data Protocol","url":"/docs/main/v9.4.0/en/api/trace-data-protocol-v3/"},{"content":"Trace Data Protocol  Version, v3.1  Trace Data Protocol describes the data format between SkyWalking agent/sniffer and backend.\nTrace data protocol is defined and provided in gRPC format, and implemented in HTTP 1.1.\nFor trace format, note that:\n The segment is a unique concept in SkyWalking. It should include all spans for each request in a single OS process, which is usually a single language-based thread. There are three types of spans.    EntrySpan EntrySpan represents a service provider, which is also the endpoint on the server end. As an APM system, SkyWalking targets the application servers. Therefore, almost all the services and MQ-consumers are EntrySpans.\n  LocalSpan LocalSpan represents a typical Java method which is not related to remote services. It is neither a MQ producer/consumer nor a provider/consumer of a service (e.g. HTTP service).\n  ExitSpan ExitSpan represents a client of service or MQ-producer. It is known as the LeafSpan in the early stages of SkyWalking. For example, accessing DB by JDBC, and reading Redis/Memcached are classified as ExitSpans.\n   Cross-thread/process span parent information is called \u0026ldquo;reference\u0026rdquo;. Reference carries the trace ID, segment ID, span ID, service name, service instance name, endpoint name, and target address used on the client end (note: this is not required in cross-thread operations) of this request in the parent. See Cross Process Propagation Headers Protocol v3 for more details.\n  Span#skipAnalysis may be TRUE, if this span doesn\u0026rsquo;t require backend analysis.\n  Trace Report Protocol // The segment is a collection of spans. It includes all collected spans in a simple one request context, such as a HTTP request process. // // We recommend the agent/SDK report all tracked data of one request once for all, such as, // typically, such as in Java, one segment represent all tracked operations(spans) of one request context in the same thread. // At the same time, in some language there is not a clear concept like golang, it could represent all tracked operations of one request context. message SegmentObject { // A string id represents the whole trace.  string traceId = 1; // A unique id represents this segment. Other segments could use this id to reference as a child segment.  string traceSegmentId = 2; // Span collections included in this segment.  repeated SpanObject spans = 3; // **Service**. Represents a set/group of workloads which provide the same behaviours for incoming requests.  //  // The logic name represents the service. This would show as a separate node in the topology.  // The metrics analyzed from the spans, would be aggregated for this entity as the service level.  string service = 4; // **Service Instance**. Each individual workload in the Service group is known as an instance. Like `pods` in Kubernetes, it  // doesn\u0026#39;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process.  //  // The logic name represents the service instance. This would show as a separate node in the instance relationship.  // The metrics analyzed from the spans, would be aggregated for this entity as the service instance level.  string serviceInstance = 5; // Whether the segment includes all tracked spans.  // In the production environment tracked, some tasks could include too many spans for one request context, such as a batch update for a cache, or an async job.  // The agent/SDK could optimize or ignore some tracked spans for better performance.  // In this case, the value should be flagged as TRUE.  bool isSizeLimited = 6;}// Segment reference represents the link between two existing segment. message SegmentReference { // Represent the reference type. It could be across thread or across process.  // Across process means there is a downstream RPC call for this.  // Typically, refType == CrossProcess means SpanObject#spanType = entry.  RefType refType = 1; // A string id represents the whole trace.  string traceId = 2; // Another segment id as the parent.  string parentTraceSegmentId = 3; // The span id in the parent trace segment.  int32 parentSpanId = 4; // The service logic name of the parent segment.  // If refType == CrossThread, this name is as same as the trace segment.  string parentService = 5; // The service logic name instance of the parent segment.  // If refType == CrossThread, this name is as same as the trace segment.  string parentServiceInstance = 6; // The endpoint name of the parent segment.  // **Endpoint**. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature.  // In a trace segment, the endpoint name is the name of first entry span.  string parentEndpoint = 7; // The network address, including ip/hostname and port, which is used in the client side.  // Such as Client --\u0026gt; use 127.0.11.8:913 -\u0026gt; Server  // then, in the reference of entry span reported by Server, the value of this field is 127.0.11.8:913.  // This plays the important role in the SkyWalking STAM(Streaming Topology Analysis Method)  // For more details, read https://wu-sheng.github.io/STAM/  string networkAddressUsedAtPeer = 8;}// Span represents a execution unit in the system, with duration and many other attributes. // Span could be a method, a RPC, MQ message produce or consume. // In the practice, the span should be added when it is really necessary, to avoid payload overhead. // We recommend to creating spans in across process(client/server of RPC/MQ) and across thread cases only. message SpanObject { // The number id of the span. Should be unique in the whole segment.  // Starting at 0.  int32 spanId = 1; // The number id of the parent span in the whole segment.  // -1 represents no parent span.  // Also, be known as the root/first span of the segment.  int32 parentSpanId = 2; // Start timestamp in milliseconds of this span,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 startTime = 3; // End timestamp in milliseconds of this span,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 endTime = 4; // \u0026lt;Optional\u0026gt;  // In the across thread and across process, these references targeting the parent segments.  // The references usually have only one element, but in batch consumer case, such as in MQ or async batch process, it could be multiple.  repeated SegmentReference refs = 5; // A logic name represents this span.  //  // We don\u0026#39;t recommend to include the parameter, such as HTTP request parameters, as a part of the operation, especially this is the name of the entry span.  // All statistic for the endpoints are aggregated base on this name. Those parameters should be added in the tags if necessary.  // If in some cases, it have to be a part of the operation name,  // users should use the Group Parameterized Endpoints capability at the backend to get the meaningful metrics.  // Read https://github.com/apache/skywalking/blob/master/docs/en/setup/backend/endpoint-grouping-rules.md  string operationName = 6; // Remote address of the peer in RPC/MQ case.  // This is required when spanType = Exit, as it is a part of the SkyWalking STAM(Streaming Topology Analysis Method).  // For more details, read https://wu-sheng.github.io/STAM/  string peer = 7; // Span type represents the role in the RPC context.  SpanType spanType = 8; // Span layer represent the component tech stack, related to the network tech.  SpanLayer spanLayer = 9; // Component id is a predefined number id in the SkyWalking.  // It represents the framework, tech stack used by this tracked span, such as Spring.  // All IDs are defined in the https://github.com/apache/skywalking/blob/master/oap-server/server-bootstrap/src/main/resources/component-libraries.yml  // Send a pull request if you want to add languages, components or mapping definitions,  // all public components could be accepted.  // Follow this doc for more details, https://github.com/apache/skywalking/blob/master/docs/en/guides/Component-library-settings.md  int32 componentId = 10; // The status of the span. False means the tracked execution ends in the unexpected status.  // This affects the successful rate statistic in the backend.  // Exception or error code happened in the tracked process doesn\u0026#39;t mean isError == true, the implementations of agent plugin and tracing SDK make the final decision.  bool isError = 11; // String key, String value pair.  // Tags provides more information, includes parameters.  //  // In the OAP backend analysis, some special tag or tag combination could provide other advanced features.  // https://github.com/apache/skywalking/blob/master/docs/en/guides/Java-Plugin-Development-Guide.md#special-span-tags  repeated KeyStringValuePair tags = 12; // String key, String value pair with an accurate timestamp.  // Logging some events happening in the context of the span duration.  repeated Log logs = 13; // Force the backend don\u0026#39;t do analysis, if the value is TRUE.  // The backend has its own configurations to follow or override this.  //  // Use this mostly because the agent/SDK could know more context of the service role.  bool skipAnalysis = 14;}message Log { // The timestamp in milliseconds of this event.,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 time = 1; // String key, String value pair.  repeated KeyStringValuePair data = 2;}// Map to the type of span enum SpanType { // Server side of RPC. Consumer side of MQ.  Entry = 0; // Client side of RPC. Producer side of MQ.  Exit = 1; // A common local code execution.  Local = 2;}// A ID could be represented by multiple string sections. message ID { repeated string id = 1;}// Type of the reference enum RefType { // Map to the reference targeting the segment in another OS process.  CrossProcess = 0; // Map to the reference targeting the segment in the same process of the current one, just across thread.  // This is only used when the coding language has the thread concept.  CrossThread = 1;}// Map to the layer of span enum SpanLayer { // Unknown layer. Could be anything.  Unknown = 0; // A database layer, used in tracing the database client component.  Database = 1; // A RPC layer, used in both client and server sides of RPC component.  RPCFramework = 2; // HTTP is a more specific RPCFramework.  Http = 3; // A MQ layer, used in both producer and consumer sides of the MQ component.  MQ = 4; // A cache layer, used in tracing the cache client component.  Cache = 5;}// The segment collections for trace report in batch and sync mode. message SegmentCollection { repeated SegmentObject segments = 1;}Report Span Attached Events Besides in-process agents, there are other out-of-process agent, such as ebpf agent, could report additional information as attached events for the relative spans.\nSpanAttachedEventReportService#collect for attached event reporting.\n//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // ebpf agent(SkyWalking Rover) collects extra information from the OS(Linux Only) level to attach on the traced span. // Since v3.1 //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// service SpanAttachedEventReportService { // Collect SpanAttachedEvent to the OAP server in the streaming mode.  rpc collect (stream SpanAttachedEvent) returns (Commands) { }}// SpanAttachedEvent represents an attached event for a traced RPC. // // When an RPC is being traced by the in-process language agent, a span would be reported by the client-side agent. // And the rover would be aware of this RPC due to the existing tracing header. // Then, the rover agent collects extra information from the OS level to provide assistance information to diagnose network performance. message SpanAttachedEvent { // The nanosecond timestamp of the event\u0026#39;s start time.  // Notice, most unit of timestamp in SkyWalking is milliseconds, but NANO-SECOND is required here.  // Because the attached event happens in the OS syscall level, most of them are executed rapidly.  Instant startTime = 1; // The official event name.  // For example, the event name is a method signature from syscall stack.  string event = 2; // [Optional] The nanosecond timestamp of the event\u0026#39;s end time.  Instant endTime = 3; // The tags for this event includes some extra OS level information,  // such as  // 1. net_device used for this exit span.  // 2. network L7 protocol  repeated KeyStringValuePair tags = 4; // The summary of statistics during this event.  // Each statistic provides a name(metric name) to represent the name, and an int64/long as the value.  repeated KeyIntValuePair summary = 5; // Refer to a trace context decoded from `sw8` header through network, such as HTTP header, MQ metadata  // https://skywalking.apache.org/docs/main/next/en/protocols/skywalking-cross-process-propagation-headers-protocol-v3/#standard-header-item  SpanReference traceContext = 6; message SpanReference { SpanReferenceType type = 1; // [Optional] A string id represents the whole trace.  string traceId = 2; // A unique id represents this segment. Other segments could use this id to reference as a child segment.  // [Optional] when this span reference  string traceSegmentId = 3; // If type == SKYWALKING  // The number id of the span. Should be unique in the whole segment.  // Starting at 0  //  // If type == ZIPKIN  // The type of span ID is string.  string spanId = 4; } enum SpanReferenceType { SKYWALKING = 0; ZIPKIN = 1; }}Via HTTP Endpoint Detailed information about data format can be found in Instance Management. There are two ways to report segment data: one segment per request or segment array in bulk mode.\nPOST http://localhost:12800/v3/segment Send a single segment object in JSON format.\nInput:\n{ \u0026#34;traceId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34; } OutPut:\nPOST http://localhost:12800/v3/segments Send a segment object list in JSON format.\nInput:\n[{ \u0026#34;traceId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34; }, { \u0026#34;traceId\u0026#34;: \u0026#34;f956699e-5106-4ea3-95e5-da748c55bac1\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577250, \u0026#34;endTime\u0026#34;: 1588664577250, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577250, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577250, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;f956699e-5106-4ea3-95e5-da748c55bac1\u0026#34; }] OutPut:\n","title":"Trace Data Protocol","url":"/docs/main/v9.5.0/en/api/trace-data-protocol-v3/"},{"content":"Trace Data Protocol  Version, v3.1  Trace Data Protocol describes the data format between SkyWalking agent/sniffer and backend.\nTrace data protocol is defined and provided in gRPC format, and implemented in HTTP 1.1.\nFor trace format, note that:\n The segment is a unique concept in SkyWalking. It should include all spans for each request in a single OS process, which is usually a single language-based thread. There are three types of spans.    EntrySpan EntrySpan represents a service provider, which is also the endpoint on the server end. As an APM system, SkyWalking targets the application servers. Therefore, almost all the services and MQ-consumers are EntrySpans.\n  LocalSpan LocalSpan represents a typical Java method which is not related to remote services. It is neither a MQ producer/consumer nor a provider/consumer of a service (e.g. HTTP service).\n  ExitSpan ExitSpan represents a client of service or MQ-producer. It is known as the LeafSpan in the early stages of SkyWalking. For example, accessing DB by JDBC, and reading Redis/Memcached are classified as ExitSpans.\n   Cross-thread/process span parent information is called \u0026ldquo;reference\u0026rdquo;. Reference carries the trace ID, segment ID, span ID, service name, service instance name, endpoint name, and target address used on the client end (note: this is not required in cross-thread operations) of this request in the parent. See Cross Process Propagation Headers Protocol v3 for more details.\n  Span#skipAnalysis may be TRUE, if this span doesn\u0026rsquo;t require backend analysis.\n  Trace Report Protocol // The segment is a collection of spans. It includes all collected spans in a simple one request context, such as a HTTP request process. // // We recommend the agent/SDK report all tracked data of one request once for all, such as, // typically, such as in Java, one segment represent all tracked operations(spans) of one request context in the same thread. // At the same time, in some language there is not a clear concept like golang, it could represent all tracked operations of one request context. message SegmentObject { // A string id represents the whole trace.  string traceId = 1; // A unique id represents this segment. Other segments could use this id to reference as a child segment.  string traceSegmentId = 2; // Span collections included in this segment.  repeated SpanObject spans = 3; // **Service**. Represents a set/group of workloads which provide the same behaviours for incoming requests.  //  // The logic name represents the service. This would show as a separate node in the topology.  // The metrics analyzed from the spans, would be aggregated for this entity as the service level.  string service = 4; // **Service Instance**. Each individual workload in the Service group is known as an instance. Like `pods` in Kubernetes, it  // doesn\u0026#39;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process.  //  // The logic name represents the service instance. This would show as a separate node in the instance relationship.  // The metrics analyzed from the spans, would be aggregated for this entity as the service instance level.  string serviceInstance = 5; // Whether the segment includes all tracked spans.  // In the production environment tracked, some tasks could include too many spans for one request context, such as a batch update for a cache, or an async job.  // The agent/SDK could optimize or ignore some tracked spans for better performance.  // In this case, the value should be flagged as TRUE.  bool isSizeLimited = 6;}// Segment reference represents the link between two existing segment. message SegmentReference { // Represent the reference type. It could be across thread or across process.  // Across process means there is a downstream RPC call for this.  // Typically, refType == CrossProcess means SpanObject#spanType = entry.  RefType refType = 1; // A string id represents the whole trace.  string traceId = 2; // Another segment id as the parent.  string parentTraceSegmentId = 3; // The span id in the parent trace segment.  int32 parentSpanId = 4; // The service logic name of the parent segment.  // If refType == CrossThread, this name is as same as the trace segment.  string parentService = 5; // The service logic name instance of the parent segment.  // If refType == CrossThread, this name is as same as the trace segment.  string parentServiceInstance = 6; // The endpoint name of the parent segment.  // **Endpoint**. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature.  // In a trace segment, the endpoint name is the name of first entry span.  string parentEndpoint = 7; // The network address, including ip/hostname and port, which is used in the client side.  // Such as Client --\u0026gt; use 127.0.11.8:913 -\u0026gt; Server  // then, in the reference of entry span reported by Server, the value of this field is 127.0.11.8:913.  // This plays the important role in the SkyWalking STAM(Streaming Topology Analysis Method)  // For more details, read https://wu-sheng.github.io/STAM/  string networkAddressUsedAtPeer = 8;}// Span represents a execution unit in the system, with duration and many other attributes. // Span could be a method, a RPC, MQ message produce or consume. // In the practice, the span should be added when it is really necessary, to avoid payload overhead. // We recommend to creating spans in across process(client/server of RPC/MQ) and across thread cases only. message SpanObject { // The number id of the span. Should be unique in the whole segment.  // Starting at 0.  int32 spanId = 1; // The number id of the parent span in the whole segment.  // -1 represents no parent span.  // Also, be known as the root/first span of the segment.  int32 parentSpanId = 2; // Start timestamp in milliseconds of this span,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 startTime = 3; // End timestamp in milliseconds of this span,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 endTime = 4; // \u0026lt;Optional\u0026gt;  // In the across thread and across process, these references targeting the parent segments.  // The references usually have only one element, but in batch consumer case, such as in MQ or async batch process, it could be multiple.  repeated SegmentReference refs = 5; // A logic name represents this span.  //  // We don\u0026#39;t recommend to include the parameter, such as HTTP request parameters, as a part of the operation, especially this is the name of the entry span.  // All statistic for the endpoints are aggregated base on this name. Those parameters should be added in the tags if necessary.  // If in some cases, it have to be a part of the operation name,  // users should use the Group Parameterized Endpoints capability at the backend to get the meaningful metrics.  // Read https://github.com/apache/skywalking/blob/master/docs/en/setup/backend/endpoint-grouping-rules.md  string operationName = 6; // Remote address of the peer in RPC/MQ case.  // This is required when spanType = Exit, as it is a part of the SkyWalking STAM(Streaming Topology Analysis Method).  // For more details, read https://wu-sheng.github.io/STAM/  string peer = 7; // Span type represents the role in the RPC context.  SpanType spanType = 8; // Span layer represent the component tech stack, related to the network tech.  SpanLayer spanLayer = 9; // Component id is a predefined number id in the SkyWalking.  // It represents the framework, tech stack used by this tracked span, such as Spring.  // All IDs are defined in the https://github.com/apache/skywalking/blob/master/oap-server/server-bootstrap/src/main/resources/component-libraries.yml  // Send a pull request if you want to add languages, components or mapping definitions,  // all public components could be accepted.  // Follow this doc for more details, https://github.com/apache/skywalking/blob/master/docs/en/guides/Component-library-settings.md  int32 componentId = 10; // The status of the span. False means the tracked execution ends in the unexpected status.  // This affects the successful rate statistic in the backend.  // Exception or error code happened in the tracked process doesn\u0026#39;t mean isError == true, the implementations of agent plugin and tracing SDK make the final decision.  bool isError = 11; // String key, String value pair.  // Tags provides more information, includes parameters.  //  // In the OAP backend analysis, some special tag or tag combination could provide other advanced features.  // https://github.com/apache/skywalking/blob/master/docs/en/guides/Java-Plugin-Development-Guide.md#special-span-tags  repeated KeyStringValuePair tags = 12; // String key, String value pair with an accurate timestamp.  // Logging some events happening in the context of the span duration.  repeated Log logs = 13; // Force the backend don\u0026#39;t do analysis, if the value is TRUE.  // The backend has its own configurations to follow or override this.  //  // Use this mostly because the agent/SDK could know more context of the service role.  bool skipAnalysis = 14;}message Log { // The timestamp in milliseconds of this event.,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 time = 1; // String key, String value pair.  repeated KeyStringValuePair data = 2;}// Map to the type of span enum SpanType { // Server side of RPC. Consumer side of MQ.  Entry = 0; // Client side of RPC. Producer side of MQ.  Exit = 1; // A common local code execution.  Local = 2;}// A ID could be represented by multiple string sections. message ID { repeated string id = 1;}// Type of the reference enum RefType { // Map to the reference targeting the segment in another OS process.  CrossProcess = 0; // Map to the reference targeting the segment in the same process of the current one, just across thread.  // This is only used when the coding language has the thread concept.  CrossThread = 1;}// Map to the layer of span enum SpanLayer { // Unknown layer. Could be anything.  Unknown = 0; // A database layer, used in tracing the database client component.  Database = 1; // A RPC layer, used in both client and server sides of RPC component.  RPCFramework = 2; // HTTP is a more specific RPCFramework.  Http = 3; // A MQ layer, used in both producer and consumer sides of the MQ component.  MQ = 4; // A cache layer, used in tracing the cache client component.  Cache = 5;}// The segment collections for trace report in batch and sync mode. message SegmentCollection { repeated SegmentObject segments = 1;}Report Span Attached Events Besides in-process agents, there are other out-of-process agent, such as ebpf agent, could report additional information as attached events for the relative spans.\nSpanAttachedEventReportService#collect for attached event reporting.\n//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // ebpf agent(SkyWalking Rover) collects extra information from the OS(Linux Only) level to attach on the traced span. // Since v3.1 //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// service SpanAttachedEventReportService { // Collect SpanAttachedEvent to the OAP server in the streaming mode.  rpc collect (stream SpanAttachedEvent) returns (Commands) { }}// SpanAttachedEvent represents an attached event for a traced RPC. // // When an RPC is being traced by the in-process language agent, a span would be reported by the client-side agent. // And the rover would be aware of this RPC due to the existing tracing header. // Then, the rover agent collects extra information from the OS level to provide assistance information to diagnose network performance. message SpanAttachedEvent { // The nanosecond timestamp of the event\u0026#39;s start time.  // Notice, most unit of timestamp in SkyWalking is milliseconds, but NANO-SECOND is required here.  // Because the attached event happens in the OS syscall level, most of them are executed rapidly.  Instant startTime = 1; // The official event name.  // For example, the event name is a method signature from syscall stack.  string event = 2; // [Optional] The nanosecond timestamp of the event\u0026#39;s end time.  Instant endTime = 3; // The tags for this event includes some extra OS level information,  // such as  // 1. net_device used for this exit span.  // 2. network L7 protocol  repeated KeyStringValuePair tags = 4; // The summary of statistics during this event.  // Each statistic provides a name(metric name) to represent the name, and an int64/long as the value.  repeated KeyIntValuePair summary = 5; // Refer to a trace context decoded from `sw8` header through network, such as HTTP header, MQ metadata  // https://skywalking.apache.org/docs/main/next/en/protocols/skywalking-cross-process-propagation-headers-protocol-v3/#standard-header-item  SpanReference traceContext = 6; message SpanReference { SpanReferenceType type = 1; // [Optional] A string id represents the whole trace.  string traceId = 2; // A unique id represents this segment. Other segments could use this id to reference as a child segment.  // [Optional] when this span reference  string traceSegmentId = 3; // If type == SKYWALKING  // The number id of the span. Should be unique in the whole segment.  // Starting at 0  //  // If type == ZIPKIN  // The type of span ID is string.  string spanId = 4; } enum SpanReferenceType { SKYWALKING = 0; ZIPKIN = 1; }}Via HTTP Endpoint Detailed information about data format can be found in Instance Management. There are two ways to report segment data: one segment per request or segment array in bulk mode.\nPOST http://localhost:12800/v3/segment Send a single segment object in JSON format.\nInput:\n{ \u0026#34;traceId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34; } OutPut:\nPOST http://localhost:12800/v3/segments Send a segment object list in JSON format.\nInput:\n[{ \u0026#34;traceId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34; }, { \u0026#34;traceId\u0026#34;: \u0026#34;f956699e-5106-4ea3-95e5-da748c55bac1\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577250, \u0026#34;endTime\u0026#34;: 1588664577250, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577250, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577250, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;f956699e-5106-4ea3-95e5-da748c55bac1\u0026#34; }] OutPut:\n","title":"Trace Data Protocol","url":"/docs/main/v9.6.0/en/api/trace-data-protocol-v3/"},{"content":"Trace Data Protocol  Version, v3.1  Trace Data Protocol describes the data format between SkyWalking agent/sniffer and backend.\nTrace data protocol is defined and provided in gRPC format, and implemented in HTTP 1.1.\nFor trace format, note that:\n The segment is a unique concept in SkyWalking. It should include all spans for each request in a single OS process, which is usually a single language-based thread. There are three types of spans.    EntrySpan EntrySpan represents a service provider, which is also the endpoint on the server end. As an APM system, SkyWalking targets the application servers. Therefore, almost all the services and MQ-consumers are EntrySpans.\n  LocalSpan LocalSpan represents a typical Java method which is not related to remote services. It is neither a MQ producer/consumer nor a provider/consumer of a service (e.g. HTTP service).\n  ExitSpan ExitSpan represents a client of service or MQ-producer. It is known as the LeafSpan in the early stages of SkyWalking. For example, accessing DB by JDBC, and reading Redis/Memcached are classified as ExitSpans.\n   Cross-thread/process span parent information is called \u0026ldquo;reference\u0026rdquo;. Reference carries the trace ID, segment ID, span ID, service name, service instance name, endpoint name, and target address used on the client end (note: this is not required in cross-thread operations) of this request in the parent. See Cross Process Propagation Headers Protocol v3 for more details.\n  Span#skipAnalysis may be TRUE, if this span doesn\u0026rsquo;t require backend analysis.\n  Trace Report Protocol // The segment is a collection of spans. It includes all collected spans in a simple one request context, such as a HTTP request process. // // We recommend the agent/SDK report all tracked data of one request once for all, such as, // typically, such as in Java, one segment represent all tracked operations(spans) of one request context in the same thread. // At the same time, in some language there is not a clear concept like golang, it could represent all tracked operations of one request context. message SegmentObject { // A string id represents the whole trace.  string traceId = 1; // A unique id represents this segment. Other segments could use this id to reference as a child segment.  string traceSegmentId = 2; // Span collections included in this segment.  repeated SpanObject spans = 3; // **Service**. Represents a set/group of workloads which provide the same behaviours for incoming requests.  //  // The logic name represents the service. This would show as a separate node in the topology.  // The metrics analyzed from the spans, would be aggregated for this entity as the service level.  string service = 4; // **Service Instance**. Each individual workload in the Service group is known as an instance. Like `pods` in Kubernetes, it  // doesn\u0026#39;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process.  //  // The logic name represents the service instance. This would show as a separate node in the instance relationship.  // The metrics analyzed from the spans, would be aggregated for this entity as the service instance level.  string serviceInstance = 5; // Whether the segment includes all tracked spans.  // In the production environment tracked, some tasks could include too many spans for one request context, such as a batch update for a cache, or an async job.  // The agent/SDK could optimize or ignore some tracked spans for better performance.  // In this case, the value should be flagged as TRUE.  bool isSizeLimited = 6;}// Segment reference represents the link between two existing segment. message SegmentReference { // Represent the reference type. It could be across thread or across process.  // Across process means there is a downstream RPC call for this.  // Typically, refType == CrossProcess means SpanObject#spanType = entry.  RefType refType = 1; // A string id represents the whole trace.  string traceId = 2; // Another segment id as the parent.  string parentTraceSegmentId = 3; // The span id in the parent trace segment.  int32 parentSpanId = 4; // The service logic name of the parent segment.  // If refType == CrossThread, this name is as same as the trace segment.  string parentService = 5; // The service logic name instance of the parent segment.  // If refType == CrossThread, this name is as same as the trace segment.  string parentServiceInstance = 6; // The endpoint name of the parent segment.  // **Endpoint**. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature.  // In a trace segment, the endpoint name is the name of first entry span.  string parentEndpoint = 7; // The network address, including ip/hostname and port, which is used in the client side.  // Such as Client --\u0026gt; use 127.0.11.8:913 -\u0026gt; Server  // then, in the reference of entry span reported by Server, the value of this field is 127.0.11.8:913.  // This plays the important role in the SkyWalking STAM(Streaming Topology Analysis Method)  // For more details, read https://wu-sheng.github.io/STAM/  string networkAddressUsedAtPeer = 8;}// Span represents a execution unit in the system, with duration and many other attributes. // Span could be a method, a RPC, MQ message produce or consume. // In the practice, the span should be added when it is really necessary, to avoid payload overhead. // We recommend to creating spans in across process(client/server of RPC/MQ) and across thread cases only. message SpanObject { // The number id of the span. Should be unique in the whole segment.  // Starting at 0.  int32 spanId = 1; // The number id of the parent span in the whole segment.  // -1 represents no parent span.  // Also, be known as the root/first span of the segment.  int32 parentSpanId = 2; // Start timestamp in milliseconds of this span,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 startTime = 3; // End timestamp in milliseconds of this span,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 endTime = 4; // \u0026lt;Optional\u0026gt;  // In the across thread and across process, these references targeting the parent segments.  // The references usually have only one element, but in batch consumer case, such as in MQ or async batch process, it could be multiple.  repeated SegmentReference refs = 5; // A logic name represents this span.  //  // We don\u0026#39;t recommend to include the parameter, such as HTTP request parameters, as a part of the operation, especially this is the name of the entry span.  // All statistic for the endpoints are aggregated base on this name. Those parameters should be added in the tags if necessary.  // If in some cases, it have to be a part of the operation name,  // users should use the Group Parameterized Endpoints capability at the backend to get the meaningful metrics.  // Read https://github.com/apache/skywalking/blob/master/docs/en/setup/backend/endpoint-grouping-rules.md  string operationName = 6; // Remote address of the peer in RPC/MQ case.  // This is required when spanType = Exit, as it is a part of the SkyWalking STAM(Streaming Topology Analysis Method).  // For more details, read https://wu-sheng.github.io/STAM/  string peer = 7; // Span type represents the role in the RPC context.  SpanType spanType = 8; // Span layer represent the component tech stack, related to the network tech.  SpanLayer spanLayer = 9; // Component id is a predefined number id in the SkyWalking.  // It represents the framework, tech stack used by this tracked span, such as Spring.  // All IDs are defined in the https://github.com/apache/skywalking/blob/master/oap-server/server-bootstrap/src/main/resources/component-libraries.yml  // Send a pull request if you want to add languages, components or mapping definitions,  // all public components could be accepted.  // Follow this doc for more details, https://github.com/apache/skywalking/blob/master/docs/en/guides/Component-library-settings.md  int32 componentId = 10; // The status of the span. False means the tracked execution ends in the unexpected status.  // This affects the successful rate statistic in the backend.  // Exception or error code happened in the tracked process doesn\u0026#39;t mean isError == true, the implementations of agent plugin and tracing SDK make the final decision.  bool isError = 11; // String key, String value pair.  // Tags provides more information, includes parameters.  //  // In the OAP backend analysis, some special tag or tag combination could provide other advanced features.  // https://github.com/apache/skywalking/blob/master/docs/en/guides/Java-Plugin-Development-Guide.md#special-span-tags  repeated KeyStringValuePair tags = 12; // String key, String value pair with an accurate timestamp.  // Logging some events happening in the context of the span duration.  repeated Log logs = 13; // Force the backend don\u0026#39;t do analysis, if the value is TRUE.  // The backend has its own configurations to follow or override this.  //  // Use this mostly because the agent/SDK could know more context of the service role.  bool skipAnalysis = 14;}message Log { // The timestamp in milliseconds of this event.,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 time = 1; // String key, String value pair.  repeated KeyStringValuePair data = 2;}// Map to the type of span enum SpanType { // Server side of RPC. Consumer side of MQ.  Entry = 0; // Client side of RPC. Producer side of MQ.  Exit = 1; // A common local code execution.  Local = 2;}// A ID could be represented by multiple string sections. message ID { repeated string id = 1;}// Type of the reference enum RefType { // Map to the reference targeting the segment in another OS process.  CrossProcess = 0; // Map to the reference targeting the segment in the same process of the current one, just across thread.  // This is only used when the coding language has the thread concept.  CrossThread = 1;}// Map to the layer of span enum SpanLayer { // Unknown layer. Could be anything.  Unknown = 0; // A database layer, used in tracing the database client component.  Database = 1; // A RPC layer, used in both client and server sides of RPC component.  RPCFramework = 2; // HTTP is a more specific RPCFramework.  Http = 3; // A MQ layer, used in both producer and consumer sides of the MQ component.  MQ = 4; // A cache layer, used in tracing the cache client component.  Cache = 5;}// The segment collections for trace report in batch and sync mode. message SegmentCollection { repeated SegmentObject segments = 1;}Report Span Attached Events Besides in-process agents, there are other out-of-process agent, such as ebpf agent, could report additional information as attached events for the relative spans.\nSpanAttachedEventReportService#collect for attached event reporting.\n//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // ebpf agent(SkyWalking Rover) collects extra information from the OS(Linux Only) level to attach on the traced span. // Since v3.1 //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// service SpanAttachedEventReportService { // Collect SpanAttachedEvent to the OAP server in the streaming mode.  rpc collect (stream SpanAttachedEvent) returns (Commands) { }}// SpanAttachedEvent represents an attached event for a traced RPC. // // When an RPC is being traced by the in-process language agent, a span would be reported by the client-side agent. // And the rover would be aware of this RPC due to the existing tracing header. // Then, the rover agent collects extra information from the OS level to provide assistance information to diagnose network performance. message SpanAttachedEvent { // The nanosecond timestamp of the event\u0026#39;s start time.  // Notice, most unit of timestamp in SkyWalking is milliseconds, but NANO-SECOND is required here.  // Because the attached event happens in the OS syscall level, most of them are executed rapidly.  Instant startTime = 1; // The official event name.  // For example, the event name is a method signature from syscall stack.  string event = 2; // [Optional] The nanosecond timestamp of the event\u0026#39;s end time.  Instant endTime = 3; // The tags for this event includes some extra OS level information,  // such as  // 1. net_device used for this exit span.  // 2. network L7 protocol  repeated KeyStringValuePair tags = 4; // The summary of statistics during this event.  // Each statistic provides a name(metric name) to represent the name, and an int64/long as the value.  repeated KeyIntValuePair summary = 5; // Refer to a trace context decoded from `sw8` header through network, such as HTTP header, MQ metadata  // https://skywalking.apache.org/docs/main/next/en/protocols/skywalking-cross-process-propagation-headers-protocol-v3/#standard-header-item  SpanReference traceContext = 6; message SpanReference { SpanReferenceType type = 1; // [Optional] A string id represents the whole trace.  string traceId = 2; // A unique id represents this segment. Other segments could use this id to reference as a child segment.  // [Optional] when this span reference  string traceSegmentId = 3; // If type == SKYWALKING  // The number id of the span. Should be unique in the whole segment.  // Starting at 0  //  // If type == ZIPKIN  // The type of span ID is string.  string spanId = 4; } enum SpanReferenceType { SKYWALKING = 0; ZIPKIN = 1; }}Via HTTP Endpoint Detailed information about data format can be found in Instance Management. There are two ways to report segment data: one segment per request or segment array in bulk mode.\nPOST http://localhost:12800/v3/segment Send a single segment object in JSON format.\nInput:\n{ \u0026#34;traceId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34; } OutPut:\nPOST http://localhost:12800/v3/segments Send a segment object list in JSON format.\nInput:\n[{ \u0026#34;traceId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34; }, { \u0026#34;traceId\u0026#34;: \u0026#34;f956699e-5106-4ea3-95e5-da748c55bac1\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577250, \u0026#34;endTime\u0026#34;: 1588664577250, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577250, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577250, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;f956699e-5106-4ea3-95e5-da748c55bac1\u0026#34; }] OutPut:\n","title":"Trace Data Protocol","url":"/docs/main/v9.7.0/en/api/trace-data-protocol-v3/"},{"content":"Trace Data Protocol v3 Trace Data Protocol describes the data format between SkyWalking agent/sniffer and backend.\nOverview Trace data protocol is defined and provided in gRPC format, and implemented in HTTP 1.1.\nReport service instance status   Service Instance Properties Service instance contains more information than just a name. In order for the agent to report service instance status, use ManagementService#reportInstanceProperties service to provide a string-key/string-value pair list as the parameter. The language of target instance must be provided as the minimum requirement.\n  Service Ping Service instance should keep alive with the backend. The agent should set a scheduler using ManagementService#keepAlive service every minute.\n  Send trace and metrics After you have the service ID and service instance ID ready, you could send traces and metrics. Now we have\n TraceSegmentReportService#collect for the SkyWalking native trace format JVMMetricReportService#collect for the SkyWalking native JVM format  For trace format, note that:\n The segment is a unique concept in SkyWalking. It should include all spans for each request in a single OS process, which is usually a single language-based thread. There are three types of spans.    EntrySpan EntrySpan represents a service provider, which is also the endpoint on the server end. As an APM system, SkyWalking targets the application servers. Therefore, almost all the services and MQ-consumers are EntrySpans.\n  LocalSpan LocalSpan represents a typical Java method which is not related to remote services. It is neither a MQ producer/consumer nor a provider/consumer of a service (e.g. HTTP service).\n  ExitSpan ExitSpan represents a client of service or MQ-producer. It is known as the LeafSpan in the early stages of SkyWalking. For example, accessing DB by JDBC, and reading Redis/Memcached are classified as ExitSpans.\n   Cross-thread/process span parent information is called \u0026ldquo;reference\u0026rdquo;. Reference carries the trace ID, segment ID, span ID, service name, service instance name, endpoint name, and target address used on the client end (note: this is not required in cross-thread operations) of this request in the parent. See Cross Process Propagation Headers Protocol v3 for more details.\n  Span#skipAnalysis may be TRUE, if this span doesn\u0026rsquo;t require backend analysis.\n  Protocol Definition // The segment is a collection of spans. It includes all collected spans in a simple one request context, such as a HTTP request process. // // We recommend the agent/SDK report all tracked data of one request once for all, such as, // typically, such as in Java, one segment represent all tracked operations(spans) of one request context in the same thread. // At the same time, in some language there is not a clear concept like golang, it could represent all tracked operations of one request context. message SegmentObject { // A string id represents the whole trace.  string traceId = 1; // A unique id represents this segment. Other segments could use this id to reference as a child segment.  string traceSegmentId = 2; // Span collections included in this segment.  repeated SpanObject spans = 3; // **Service**. Represents a set/group of workloads which provide the same behaviours for incoming requests.  //  // The logic name represents the service. This would show as a separate node in the topology.  // The metrics analyzed from the spans, would be aggregated for this entity as the service level.  string service = 4; // **Service Instance**. Each individual workload in the Service group is known as an instance. Like `pods` in Kubernetes, it  // doesn\u0026#39;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process.  //  // The logic name represents the service instance. This would show as a separate node in the instance relationship.  // The metrics analyzed from the spans, would be aggregated for this entity as the service instance level.  string serviceInstance = 5; // Whether the segment includes all tracked spans.  // In the production environment tracked, some tasks could include too many spans for one request context, such as a batch update for a cache, or an async job.  // The agent/SDK could optimize or ignore some tracked spans for better performance.  // In this case, the value should be flagged as TRUE.  bool isSizeLimited = 6;}// Segment reference represents the link between two existing segment. message SegmentReference { // Represent the reference type. It could be across thread or across process.  // Across process means there is a downstream RPC call for this.  // Typically, refType == CrossProcess means SpanObject#spanType = entry.  RefType refType = 1; // A string id represents the whole trace.  string traceId = 2; // Another segment id as the parent.  string parentTraceSegmentId = 3; // The span id in the parent trace segment.  int32 parentSpanId = 4; // The service logic name of the parent segment.  // If refType == CrossThread, this name is as same as the trace segment.  string parentService = 5; // The service logic name instance of the parent segment.  // If refType == CrossThread, this name is as same as the trace segment.  string parentServiceInstance = 6; // The endpoint name of the parent segment.  // **Endpoint**. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature.  // In a trace segment, the endpoint name is the name of first entry span.  string parentEndpoint = 7; // The network address, including ip/hostname and port, which is used in the client side.  // Such as Client --\u0026gt; use 127.0.11.8:913 -\u0026gt; Server  // then, in the reference of entry span reported by Server, the value of this field is 127.0.11.8:913.  // This plays the important role in the SkyWalking STAM(Streaming Topology Analysis Method)  // For more details, read https://wu-sheng.github.io/STAM/  string networkAddressUsedAtPeer = 8;}// Span represents a execution unit in the system, with duration and many other attributes. // Span could be a method, a RPC, MQ message produce or consume. // In the practice, the span should be added when it is really necessary, to avoid payload overhead. // We recommend to creating spans in across process(client/server of RPC/MQ) and across thread cases only. message SpanObject { // The number id of the span. Should be unique in the whole segment.  // Starting at 0.  int32 spanId = 1; // The number id of the parent span in the whole segment.  // -1 represents no parent span.  // Also, be known as the root/first span of the segment.  int32 parentSpanId = 2; // Start timestamp in milliseconds of this span,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 startTime = 3; // End timestamp in milliseconds of this span,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 endTime = 4; // \u0026lt;Optional\u0026gt;  // In the across thread and across process, these references targeting the parent segments.  // The references usually have only one element, but in batch consumer case, such as in MQ or async batch process, it could be multiple.  repeated SegmentReference refs = 5; // A logic name represents this span.  //  // We don\u0026#39;t recommend to include the parameter, such as HTTP request parameters, as a part of the operation, especially this is the name of the entry span.  // All statistic for the endpoints are aggregated base on this name. Those parameters should be added in the tags if necessary.  // If in some cases, it have to be a part of the operation name,  // users should use the Group Parameterized Endpoints capability at the backend to get the meaningful metrics.  // Read https://github.com/apache/skywalking/blob/master/docs/en/setup/backend/endpoint-grouping-rules.md  string operationName = 6; // Remote address of the peer in RPC/MQ case.  // This is required when spanType = Exit, as it is a part of the SkyWalking STAM(Streaming Topology Analysis Method).  // For more details, read https://wu-sheng.github.io/STAM/  string peer = 7; // Span type represents the role in the RPC context.  SpanType spanType = 8; // Span layer represent the component tech stack, related to the network tech.  SpanLayer spanLayer = 9; // Component id is a predefinited number id in the SkyWalking.  // It represents the framework, tech stack used by this tracked span, such as Spring.  // All IDs are defined in the https://github.com/apache/skywalking/blob/master/oap-server/server-bootstrap/src/main/resources/component-libraries.yml  // Send a pull request if you want to add languages, components or mapping defintions,  // all public components could be accepted.  // Follow this doc for more details, https://github.com/apache/skywalking/blob/master/docs/en/guides/Component-library-settings.md  int32 componentId = 10; // The status of the span. False means the tracked execution ends in the unexpected status.  // This affects the successful rate statistic in the backend.  // Exception or error code happened in the tracked process doesn\u0026#39;t mean isError == true, the implementations of agent plugin and tracing SDK make the final decision.  bool isError = 11; // String key, String value pair.  // Tags provides more informance, includes parameters.  //  // In the OAP backend analysis, some special tag or tag combination could provide other advanced features.  // https://github.com/apache/skywalking/blob/master/docs/en/guides/Java-Plugin-Development-Guide.md#special-span-tags  repeated KeyStringValuePair tags = 12; // String key, String value pair with an accurate timestamp.  // Logging some events happening in the context of the span duration.  repeated Log logs = 13; // Force the backend don\u0026#39;t do analysis, if the value is TRUE.  // The backend has its own configurations to follow or override this.  //  // Use this mostly because the agent/SDK could know more context of the service role.  bool skipAnalysis = 14;}message Log { // The timestamp in milliseconds of this event.,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 time = 1; // String key, String value pair.  repeated KeyStringValuePair data = 2;}// Map to the type of span enum SpanType { // Server side of RPC. Consumer side of MQ.  Entry = 0; // Client side of RPC. Producer side of MQ.  Exit = 1; // A common local code execution.  Local = 2;}// A ID could be represented by multiple string sections. message ID { repeated string id = 1;}// Type of the reference enum RefType { // Map to the reference targeting the segment in another OS process.  CrossProcess = 0; // Map to the reference targeting the segment in the same process of the current one, just across thread.  // This is only used when the coding language has the thread concept.  CrossThread = 1;}// Map to the layer of span enum SpanLayer { // Unknown layer. Could be anything.  Unknown = 0; // A database layer, used in tracing the database client component.  Database = 1; // A RPC layer, used in both client and server sides of RPC component.  RPCFramework = 2; // HTTP is a more specific RPCFramework.  Http = 3; // A MQ layer, used in both producer and consuer sides of the MQ component.  MQ = 4; // A cache layer, used in tracing the cache client component.  Cache = 5;}// The segment collections for trace report in batch and sync mode. message SegmentCollection { repeated SegmentObject segments = 1;}","title":"Trace Data Protocol v3","url":"/docs/main/v9.0.0/en/protocols/trace-data-protocol-v3/"},{"content":"Trace Data Protocol v3 Trace Data Protocol describes the data format between SkyWalking agent/sniffer and backend.\nOverview Trace data protocol is defined and provided in gRPC format, and implemented in HTTP 1.1.\nReport service instance status   Service Instance Properties Service instance contains more information than just a name. In order for the agent to report service instance status, use ManagementService#reportInstanceProperties service to provide a string-key/string-value pair list as the parameter. The language of target instance must be provided as the minimum requirement.\n  Service Ping Service instance should keep alive with the backend. The agent should set a scheduler using ManagementService#keepAlive service every minute.\n  Send trace and metrics After you have the service ID and service instance ID ready, you could send traces and metrics. Now we have\n TraceSegmentReportService#collect for the SkyWalking native trace format JVMMetricReportService#collect for the SkyWalking native JVM format  For trace format, note that:\n The segment is a unique concept in SkyWalking. It should include all spans for each request in a single OS process, which is usually a single language-based thread. There are three types of spans.    EntrySpan EntrySpan represents a service provider, which is also the endpoint on the server end. As an APM system, SkyWalking targets the application servers. Therefore, almost all the services and MQ-consumers are EntrySpans.\n  LocalSpan LocalSpan represents a typical Java method which is not related to remote services. It is neither a MQ producer/consumer nor a provider/consumer of a service (e.g. HTTP service).\n  ExitSpan ExitSpan represents a client of service or MQ-producer. It is known as the LeafSpan in the early stages of SkyWalking. For example, accessing DB by JDBC, and reading Redis/Memcached are classified as ExitSpans.\n   Cross-thread/process span parent information is called \u0026ldquo;reference\u0026rdquo;. Reference carries the trace ID, segment ID, span ID, service name, service instance name, endpoint name, and target address used on the client end (note: this is not required in cross-thread operations) of this request in the parent. See Cross Process Propagation Headers Protocol v3 for more details.\n  Span#skipAnalysis may be TRUE, if this span doesn\u0026rsquo;t require backend analysis.\n  Protocol Definition // The segment is a collection of spans. It includes all collected spans in a simple one request context, such as a HTTP request process. // // We recommend the agent/SDK report all tracked data of one request once for all, such as, // typically, such as in Java, one segment represent all tracked operations(spans) of one request context in the same thread. // At the same time, in some language there is not a clear concept like golang, it could represent all tracked operations of one request context. message SegmentObject { // A string id represents the whole trace.  string traceId = 1; // A unique id represents this segment. Other segments could use this id to reference as a child segment.  string traceSegmentId = 2; // Span collections included in this segment.  repeated SpanObject spans = 3; // **Service**. Represents a set/group of workloads which provide the same behaviours for incoming requests.  //  // The logic name represents the service. This would show as a separate node in the topology.  // The metrics analyzed from the spans, would be aggregated for this entity as the service level.  string service = 4; // **Service Instance**. Each individual workload in the Service group is known as an instance. Like `pods` in Kubernetes, it  // doesn\u0026#39;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process.  //  // The logic name represents the service instance. This would show as a separate node in the instance relationship.  // The metrics analyzed from the spans, would be aggregated for this entity as the service instance level.  string serviceInstance = 5; // Whether the segment includes all tracked spans.  // In the production environment tracked, some tasks could include too many spans for one request context, such as a batch update for a cache, or an async job.  // The agent/SDK could optimize or ignore some tracked spans for better performance.  // In this case, the value should be flagged as TRUE.  bool isSizeLimited = 6;}// Segment reference represents the link between two existing segment. message SegmentReference { // Represent the reference type. It could be across thread or across process.  // Across process means there is a downstream RPC call for this.  // Typically, refType == CrossProcess means SpanObject#spanType = entry.  RefType refType = 1; // A string id represents the whole trace.  string traceId = 2; // Another segment id as the parent.  string parentTraceSegmentId = 3; // The span id in the parent trace segment.  int32 parentSpanId = 4; // The service logic name of the parent segment.  // If refType == CrossThread, this name is as same as the trace segment.  string parentService = 5; // The service logic name instance of the parent segment.  // If refType == CrossThread, this name is as same as the trace segment.  string parentServiceInstance = 6; // The endpoint name of the parent segment.  // **Endpoint**. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature.  // In a trace segment, the endpoint name is the name of first entry span.  string parentEndpoint = 7; // The network address, including ip/hostname and port, which is used in the client side.  // Such as Client --\u0026gt; use 127.0.11.8:913 -\u0026gt; Server  // then, in the reference of entry span reported by Server, the value of this field is 127.0.11.8:913.  // This plays the important role in the SkyWalking STAM(Streaming Topology Analysis Method)  // For more details, read https://wu-sheng.github.io/STAM/  string networkAddressUsedAtPeer = 8;}// Span represents a execution unit in the system, with duration and many other attributes. // Span could be a method, a RPC, MQ message produce or consume. // In the practice, the span should be added when it is really necessary, to avoid payload overhead. // We recommend to creating spans in across process(client/server of RPC/MQ) and across thread cases only. message SpanObject { // The number id of the span. Should be unique in the whole segment.  // Starting at 0.  int32 spanId = 1; // The number id of the parent span in the whole segment.  // -1 represents no parent span.  // Also, be known as the root/first span of the segment.  int32 parentSpanId = 2; // Start timestamp in milliseconds of this span,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 startTime = 3; // End timestamp in milliseconds of this span,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 endTime = 4; // \u0026lt;Optional\u0026gt;  // In the across thread and across process, these references targeting the parent segments.  // The references usually have only one element, but in batch consumer case, such as in MQ or async batch process, it could be multiple.  repeated SegmentReference refs = 5; // A logic name represents this span.  //  // We don\u0026#39;t recommend to include the parameter, such as HTTP request parameters, as a part of the operation, especially this is the name of the entry span.  // All statistic for the endpoints are aggregated base on this name. Those parameters should be added in the tags if necessary.  // If in some cases, it have to be a part of the operation name,  // users should use the Group Parameterized Endpoints capability at the backend to get the meaningful metrics.  // Read https://github.com/apache/skywalking/blob/master/docs/en/setup/backend/endpoint-grouping-rules.md  string operationName = 6; // Remote address of the peer in RPC/MQ case.  // This is required when spanType = Exit, as it is a part of the SkyWalking STAM(Streaming Topology Analysis Method).  // For more details, read https://wu-sheng.github.io/STAM/  string peer = 7; // Span type represents the role in the RPC context.  SpanType spanType = 8; // Span layer represent the component tech stack, related to the network tech.  SpanLayer spanLayer = 9; // Component id is a predefinited number id in the SkyWalking.  // It represents the framework, tech stack used by this tracked span, such as Spring.  // All IDs are defined in the https://github.com/apache/skywalking/blob/master/oap-server/server-bootstrap/src/main/resources/component-libraries.yml  // Send a pull request if you want to add languages, components or mapping defintions,  // all public components could be accepted.  // Follow this doc for more details, https://github.com/apache/skywalking/blob/master/docs/en/guides/Component-library-settings.md  int32 componentId = 10; // The status of the span. False means the tracked execution ends in the unexpected status.  // This affects the successful rate statistic in the backend.  // Exception or error code happened in the tracked process doesn\u0026#39;t mean isError == true, the implementations of agent plugin and tracing SDK make the final decision.  bool isError = 11; // String key, String value pair.  // Tags provides more informance, includes parameters.  //  // In the OAP backend analysis, some special tag or tag combination could provide other advanced features.  // https://github.com/apache/skywalking/blob/master/docs/en/guides/Java-Plugin-Development-Guide.md#special-span-tags  repeated KeyStringValuePair tags = 12; // String key, String value pair with an accurate timestamp.  // Logging some events happening in the context of the span duration.  repeated Log logs = 13; // Force the backend don\u0026#39;t do analysis, if the value is TRUE.  // The backend has its own configurations to follow or override this.  //  // Use this mostly because the agent/SDK could know more context of the service role.  bool skipAnalysis = 14;}message Log { // The timestamp in milliseconds of this event.,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 time = 1; // String key, String value pair.  repeated KeyStringValuePair data = 2;}// Map to the type of span enum SpanType { // Server side of RPC. Consumer side of MQ.  Entry = 0; // Client side of RPC. Producer side of MQ.  Exit = 1; // A common local code execution.  Local = 2;}// A ID could be represented by multiple string sections. message ID { repeated string id = 1;}// Type of the reference enum RefType { // Map to the reference targeting the segment in another OS process.  CrossProcess = 0; // Map to the reference targeting the segment in the same process of the current one, just across thread.  // This is only used when the coding language has the thread concept.  CrossThread = 1;}// Map to the layer of span enum SpanLayer { // Unknown layer. Could be anything.  Unknown = 0; // A database layer, used in tracing the database client component.  Database = 1; // A RPC layer, used in both client and server sides of RPC component.  RPCFramework = 2; // HTTP is a more specific RPCFramework.  Http = 3; // A MQ layer, used in both producer and consuer sides of the MQ component.  MQ = 4; // A cache layer, used in tracing the cache client component.  Cache = 5;}// The segment collections for trace report in batch and sync mode. message SegmentCollection { repeated SegmentObject segments = 1;}","title":"Trace Data Protocol v3","url":"/docs/main/v9.1.0/en/protocols/trace-data-protocol-v3/"},{"content":"Trace Data Protocol v3 Trace Data Protocol describes the data format between SkyWalking agent/sniffer and backend.\nOverview Trace data protocol is defined and provided in gRPC format, and implemented in HTTP 1.1.\nReport service instance status   Service Instance Properties Service instance contains more information than just a name. In order for the agent to report service instance status, use ManagementService#reportInstanceProperties service to provide a string-key/string-value pair list as the parameter. The language of target instance must be provided as the minimum requirement.\n  Service Ping Service instance should keep alive with the backend. The agent should set a scheduler using ManagementService#keepAlive service every minute.\n  Send trace and metrics After you have the service ID and service instance ID ready, you could send traces and metrics. Now we have\n TraceSegmentReportService#collect for the SkyWalking native trace format JVMMetricReportService#collect for the SkyWalking native JVM format  For trace format, note that:\n The segment is a unique concept in SkyWalking. It should include all spans for each request in a single OS process, which is usually a single language-based thread. There are three types of spans.    EntrySpan EntrySpan represents a service provider, which is also the endpoint on the server end. As an APM system, SkyWalking targets the application servers. Therefore, almost all the services and MQ-consumers are EntrySpans.\n  LocalSpan LocalSpan represents a typical Java method which is not related to remote services. It is neither a MQ producer/consumer nor a provider/consumer of a service (e.g. HTTP service).\n  ExitSpan ExitSpan represents a client of service or MQ-producer. It is known as the LeafSpan in the early stages of SkyWalking. For example, accessing DB by JDBC, and reading Redis/Memcached are classified as ExitSpans.\n   Cross-thread/process span parent information is called \u0026ldquo;reference\u0026rdquo;. Reference carries the trace ID, segment ID, span ID, service name, service instance name, endpoint name, and target address used on the client end (note: this is not required in cross-thread operations) of this request in the parent. See Cross Process Propagation Headers Protocol v3 for more details.\n  Span#skipAnalysis may be TRUE, if this span doesn\u0026rsquo;t require backend analysis.\n  Protocol Definition // The segment is a collection of spans. It includes all collected spans in a simple one request context, such as a HTTP request process. // // We recommend the agent/SDK report all tracked data of one request once for all, such as, // typically, such as in Java, one segment represent all tracked operations(spans) of one request context in the same thread. // At the same time, in some language there is not a clear concept like golang, it could represent all tracked operations of one request context. message SegmentObject { // A string id represents the whole trace.  string traceId = 1; // A unique id represents this segment. Other segments could use this id to reference as a child segment.  string traceSegmentId = 2; // Span collections included in this segment.  repeated SpanObject spans = 3; // **Service**. Represents a set/group of workloads which provide the same behaviours for incoming requests.  //  // The logic name represents the service. This would show as a separate node in the topology.  // The metrics analyzed from the spans, would be aggregated for this entity as the service level.  string service = 4; // **Service Instance**. Each individual workload in the Service group is known as an instance. Like `pods` in Kubernetes, it  // doesn\u0026#39;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process.  //  // The logic name represents the service instance. This would show as a separate node in the instance relationship.  // The metrics analyzed from the spans, would be aggregated for this entity as the service instance level.  string serviceInstance = 5; // Whether the segment includes all tracked spans.  // In the production environment tracked, some tasks could include too many spans for one request context, such as a batch update for a cache, or an async job.  // The agent/SDK could optimize or ignore some tracked spans for better performance.  // In this case, the value should be flagged as TRUE.  bool isSizeLimited = 6;}// Segment reference represents the link between two existing segment. message SegmentReference { // Represent the reference type. It could be across thread or across process.  // Across process means there is a downstream RPC call for this.  // Typically, refType == CrossProcess means SpanObject#spanType = entry.  RefType refType = 1; // A string id represents the whole trace.  string traceId = 2; // Another segment id as the parent.  string parentTraceSegmentId = 3; // The span id in the parent trace segment.  int32 parentSpanId = 4; // The service logic name of the parent segment.  // If refType == CrossThread, this name is as same as the trace segment.  string parentService = 5; // The service logic name instance of the parent segment.  // If refType == CrossThread, this name is as same as the trace segment.  string parentServiceInstance = 6; // The endpoint name of the parent segment.  // **Endpoint**. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature.  // In a trace segment, the endpoint name is the name of first entry span.  string parentEndpoint = 7; // The network address, including ip/hostname and port, which is used in the client side.  // Such as Client --\u0026gt; use 127.0.11.8:913 -\u0026gt; Server  // then, in the reference of entry span reported by Server, the value of this field is 127.0.11.8:913.  // This plays the important role in the SkyWalking STAM(Streaming Topology Analysis Method)  // For more details, read https://wu-sheng.github.io/STAM/  string networkAddressUsedAtPeer = 8;}// Span represents a execution unit in the system, with duration and many other attributes. // Span could be a method, a RPC, MQ message produce or consume. // In the practice, the span should be added when it is really necessary, to avoid payload overhead. // We recommend to creating spans in across process(client/server of RPC/MQ) and across thread cases only. message SpanObject { // The number id of the span. Should be unique in the whole segment.  // Starting at 0.  int32 spanId = 1; // The number id of the parent span in the whole segment.  // -1 represents no parent span.  // Also, be known as the root/first span of the segment.  int32 parentSpanId = 2; // Start timestamp in milliseconds of this span,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 startTime = 3; // End timestamp in milliseconds of this span,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 endTime = 4; // \u0026lt;Optional\u0026gt;  // In the across thread and across process, these references targeting the parent segments.  // The references usually have only one element, but in batch consumer case, such as in MQ or async batch process, it could be multiple.  repeated SegmentReference refs = 5; // A logic name represents this span.  //  // We don\u0026#39;t recommend to include the parameter, such as HTTP request parameters, as a part of the operation, especially this is the name of the entry span.  // All statistic for the endpoints are aggregated base on this name. Those parameters should be added in the tags if necessary.  // If in some cases, it have to be a part of the operation name,  // users should use the Group Parameterized Endpoints capability at the backend to get the meaningful metrics.  // Read https://github.com/apache/skywalking/blob/master/docs/en/setup/backend/endpoint-grouping-rules.md  string operationName = 6; // Remote address of the peer in RPC/MQ case.  // This is required when spanType = Exit, as it is a part of the SkyWalking STAM(Streaming Topology Analysis Method).  // For more details, read https://wu-sheng.github.io/STAM/  string peer = 7; // Span type represents the role in the RPC context.  SpanType spanType = 8; // Span layer represent the component tech stack, related to the network tech.  SpanLayer spanLayer = 9; // Component id is a predefined number id in the SkyWalking.  // It represents the framework, tech stack used by this tracked span, such as Spring.  // All IDs are defined in the https://github.com/apache/skywalking/blob/master/oap-server/server-bootstrap/src/main/resources/component-libraries.yml  // Send a pull request if you want to add languages, components or mapping definitions,  // all public components could be accepted.  // Follow this doc for more details, https://github.com/apache/skywalking/blob/master/docs/en/guides/Component-library-settings.md  int32 componentId = 10; // The status of the span. False means the tracked execution ends in the unexpected status.  // This affects the successful rate statistic in the backend.  // Exception or error code happened in the tracked process doesn\u0026#39;t mean isError == true, the implementations of agent plugin and tracing SDK make the final decision.  bool isError = 11; // String key, String value pair.  // Tags provides more informance, includes parameters.  //  // In the OAP backend analysis, some special tag or tag combination could provide other advanced features.  // https://github.com/apache/skywalking/blob/master/docs/en/guides/Java-Plugin-Development-Guide.md#special-span-tags  repeated KeyStringValuePair tags = 12; // String key, String value pair with an accurate timestamp.  // Logging some events happening in the context of the span duration.  repeated Log logs = 13; // Force the backend don\u0026#39;t do analysis, if the value is TRUE.  // The backend has its own configurations to follow or override this.  //  // Use this mostly because the agent/SDK could know more context of the service role.  bool skipAnalysis = 14;}message Log { // The timestamp in milliseconds of this event.,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 time = 1; // String key, String value pair.  repeated KeyStringValuePair data = 2;}// Map to the type of span enum SpanType { // Server side of RPC. Consumer side of MQ.  Entry = 0; // Client side of RPC. Producer side of MQ.  Exit = 1; // A common local code execution.  Local = 2;}// A ID could be represented by multiple string sections. message ID { repeated string id = 1;}// Type of the reference enum RefType { // Map to the reference targeting the segment in another OS process.  CrossProcess = 0; // Map to the reference targeting the segment in the same process of the current one, just across thread.  // This is only used when the coding language has the thread concept.  CrossThread = 1;}// Map to the layer of span enum SpanLayer { // Unknown layer. Could be anything.  Unknown = 0; // A database layer, used in tracing the database client component.  Database = 1; // A RPC layer, used in both client and server sides of RPC component.  RPCFramework = 2; // HTTP is a more specific RPCFramework.  Http = 3; // A MQ layer, used in both producer and consuer sides of the MQ component.  MQ = 4; // A cache layer, used in tracing the cache client component.  Cache = 5;}// The segment collections for trace report in batch and sync mode. message SegmentCollection { repeated SegmentObject segments = 1;}","title":"Trace Data Protocol v3","url":"/docs/main/v9.2.0/en/protocols/trace-data-protocol-v3/"},{"content":"Trace Data Protocol v3.1 Trace Data Protocol describes the data format between SkyWalking agent/sniffer and backend.\nTrace data protocol is defined and provided in gRPC format, and implemented in HTTP 1.1.\nReport service instance status   Service Instance Properties Service instance contains more information than just a name. In order for the agent to report service instance status, use ManagementService#reportInstanceProperties service to provide a string-key/string-value pair list as the parameter. The language of target instance must be provided as the minimum requirement.\n  Service Ping Service instance should keep alive with the backend. The agent should set a scheduler using ManagementService#keepAlive service every minute.\n  Send trace and JVM metrics After you have the service ID and service instance ID ready, you could send traces and metrics. Now we have\n TraceSegmentReportService#collect for the SkyWalking native trace format JVMMetricReportService#collect for the SkyWalking native JVM format  For trace format, note that:\n The segment is a unique concept in SkyWalking. It should include all spans for each request in a single OS process, which is usually a single language-based thread. There are three types of spans.    EntrySpan EntrySpan represents a service provider, which is also the endpoint on the server end. As an APM system, SkyWalking targets the application servers. Therefore, almost all the services and MQ-consumers are EntrySpans.\n  LocalSpan LocalSpan represents a typical Java method which is not related to remote services. It is neither a MQ producer/consumer nor a provider/consumer of a service (e.g. HTTP service).\n  ExitSpan ExitSpan represents a client of service or MQ-producer. It is known as the LeafSpan in the early stages of SkyWalking. For example, accessing DB by JDBC, and reading Redis/Memcached are classified as ExitSpans.\n   Cross-thread/process span parent information is called \u0026ldquo;reference\u0026rdquo;. Reference carries the trace ID, segment ID, span ID, service name, service instance name, endpoint name, and target address used on the client end (note: this is not required in cross-thread operations) of this request in the parent. See Cross Process Propagation Headers Protocol v3 for more details.\n  Span#skipAnalysis may be TRUE, if this span doesn\u0026rsquo;t require backend analysis.\n  Trace Report Protocol // The segment is a collection of spans. It includes all collected spans in a simple one request context, such as a HTTP request process. // // We recommend the agent/SDK report all tracked data of one request once for all, such as, // typically, such as in Java, one segment represent all tracked operations(spans) of one request context in the same thread. // At the same time, in some language there is not a clear concept like golang, it could represent all tracked operations of one request context. message SegmentObject { // A string id represents the whole trace.  string traceId = 1; // A unique id represents this segment. Other segments could use this id to reference as a child segment.  string traceSegmentId = 2; // Span collections included in this segment.  repeated SpanObject spans = 3; // **Service**. Represents a set/group of workloads which provide the same behaviours for incoming requests.  //  // The logic name represents the service. This would show as a separate node in the topology.  // The metrics analyzed from the spans, would be aggregated for this entity as the service level.  string service = 4; // **Service Instance**. Each individual workload in the Service group is known as an instance. Like `pods` in Kubernetes, it  // doesn\u0026#39;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process.  //  // The logic name represents the service instance. This would show as a separate node in the instance relationship.  // The metrics analyzed from the spans, would be aggregated for this entity as the service instance level.  string serviceInstance = 5; // Whether the segment includes all tracked spans.  // In the production environment tracked, some tasks could include too many spans for one request context, such as a batch update for a cache, or an async job.  // The agent/SDK could optimize or ignore some tracked spans for better performance.  // In this case, the value should be flagged as TRUE.  bool isSizeLimited = 6;}// Segment reference represents the link between two existing segment. message SegmentReference { // Represent the reference type. It could be across thread or across process.  // Across process means there is a downstream RPC call for this.  // Typically, refType == CrossProcess means SpanObject#spanType = entry.  RefType refType = 1; // A string id represents the whole trace.  string traceId = 2; // Another segment id as the parent.  string parentTraceSegmentId = 3; // The span id in the parent trace segment.  int32 parentSpanId = 4; // The service logic name of the parent segment.  // If refType == CrossThread, this name is as same as the trace segment.  string parentService = 5; // The service logic name instance of the parent segment.  // If refType == CrossThread, this name is as same as the trace segment.  string parentServiceInstance = 6; // The endpoint name of the parent segment.  // **Endpoint**. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature.  // In a trace segment, the endpoint name is the name of first entry span.  string parentEndpoint = 7; // The network address, including ip/hostname and port, which is used in the client side.  // Such as Client --\u0026gt; use 127.0.11.8:913 -\u0026gt; Server  // then, in the reference of entry span reported by Server, the value of this field is 127.0.11.8:913.  // This plays the important role in the SkyWalking STAM(Streaming Topology Analysis Method)  // For more details, read https://wu-sheng.github.io/STAM/  string networkAddressUsedAtPeer = 8;}// Span represents a execution unit in the system, with duration and many other attributes. // Span could be a method, a RPC, MQ message produce or consume. // In the practice, the span should be added when it is really necessary, to avoid payload overhead. // We recommend to creating spans in across process(client/server of RPC/MQ) and across thread cases only. message SpanObject { // The number id of the span. Should be unique in the whole segment.  // Starting at 0.  int32 spanId = 1; // The number id of the parent span in the whole segment.  // -1 represents no parent span.  // Also, be known as the root/first span of the segment.  int32 parentSpanId = 2; // Start timestamp in milliseconds of this span,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 startTime = 3; // End timestamp in milliseconds of this span,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 endTime = 4; // \u0026lt;Optional\u0026gt;  // In the across thread and across process, these references targeting the parent segments.  // The references usually have only one element, but in batch consumer case, such as in MQ or async batch process, it could be multiple.  repeated SegmentReference refs = 5; // A logic name represents this span.  //  // We don\u0026#39;t recommend to include the parameter, such as HTTP request parameters, as a part of the operation, especially this is the name of the entry span.  // All statistic for the endpoints are aggregated base on this name. Those parameters should be added in the tags if necessary.  // If in some cases, it have to be a part of the operation name,  // users should use the Group Parameterized Endpoints capability at the backend to get the meaningful metrics.  // Read https://github.com/apache/skywalking/blob/master/docs/en/setup/backend/endpoint-grouping-rules.md  string operationName = 6; // Remote address of the peer in RPC/MQ case.  // This is required when spanType = Exit, as it is a part of the SkyWalking STAM(Streaming Topology Analysis Method).  // For more details, read https://wu-sheng.github.io/STAM/  string peer = 7; // Span type represents the role in the RPC context.  SpanType spanType = 8; // Span layer represent the component tech stack, related to the network tech.  SpanLayer spanLayer = 9; // Component id is a predefined number id in the SkyWalking.  // It represents the framework, tech stack used by this tracked span, such as Spring.  // All IDs are defined in the https://github.com/apache/skywalking/blob/master/oap-server/server-bootstrap/src/main/resources/component-libraries.yml  // Send a pull request if you want to add languages, components or mapping definitions,  // all public components could be accepted.  // Follow this doc for more details, https://github.com/apache/skywalking/blob/master/docs/en/guides/Component-library-settings.md  int32 componentId = 10; // The status of the span. False means the tracked execution ends in the unexpected status.  // This affects the successful rate statistic in the backend.  // Exception or error code happened in the tracked process doesn\u0026#39;t mean isError == true, the implementations of agent plugin and tracing SDK make the final decision.  bool isError = 11; // String key, String value pair.  // Tags provides more information, includes parameters.  //  // In the OAP backend analysis, some special tag or tag combination could provide other advanced features.  // https://github.com/apache/skywalking/blob/master/docs/en/guides/Java-Plugin-Development-Guide.md#special-span-tags  repeated KeyStringValuePair tags = 12; // String key, String value pair with an accurate timestamp.  // Logging some events happening in the context of the span duration.  repeated Log logs = 13; // Force the backend don\u0026#39;t do analysis, if the value is TRUE.  // The backend has its own configurations to follow or override this.  //  // Use this mostly because the agent/SDK could know more context of the service role.  bool skipAnalysis = 14;}message Log { // The timestamp in milliseconds of this event.,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 time = 1; // String key, String value pair.  repeated KeyStringValuePair data = 2;}// Map to the type of span enum SpanType { // Server side of RPC. Consumer side of MQ.  Entry = 0; // Client side of RPC. Producer side of MQ.  Exit = 1; // A common local code execution.  Local = 2;}// A ID could be represented by multiple string sections. message ID { repeated string id = 1;}// Type of the reference enum RefType { // Map to the reference targeting the segment in another OS process.  CrossProcess = 0; // Map to the reference targeting the segment in the same process of the current one, just across thread.  // This is only used when the coding language has the thread concept.  CrossThread = 1;}// Map to the layer of span enum SpanLayer { // Unknown layer. Could be anything.  Unknown = 0; // A database layer, used in tracing the database client component.  Database = 1; // A RPC layer, used in both client and server sides of RPC component.  RPCFramework = 2; // HTTP is a more specific RPCFramework.  Http = 3; // A MQ layer, used in both producer and consumer sides of the MQ component.  MQ = 4; // A cache layer, used in tracing the cache client component.  Cache = 5;}// The segment collections for trace report in batch and sync mode. message SegmentCollection { repeated SegmentObject segments = 1;}Report Span Attached Events Besides in-process agents, there are other out-of-process agent, such as ebpf agent, could report additional information as attached events for the relative spans.\nSpanAttachedEventReportService#collect for attached event reporting.\n//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // ebpf agent(SkyWalking Rover) collects extra information from the OS(Linux Only) level to attach on the traced span. // Since v3.1 //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// service SpanAttachedEventReportService { // Collect SpanAttachedEvent to the OAP server in the streaming mode.  rpc collect (stream SpanAttachedEvent) returns (Commands) { }}// SpanAttachedEvent represents an attached event for a traced RPC. // // When an RPC is being traced by the in-process language agent, a span would be reported by the client-side agent. // And the rover would be aware of this RPC due to the existing tracing header. // Then, the rover agent collects extra information from the OS level to provide assistance information to diagnose network performance. message SpanAttachedEvent { // The nanosecond timestamp of the event\u0026#39;s start time.  // Notice, most unit of timestamp in SkyWalking is milliseconds, but NANO-SECOND is required here.  // Because the attached event happens in the OS syscall level, most of them are executed rapidly.  Instant startTime = 1; // The official event name.  // For example, the event name is a method signature from syscall stack.  string event = 2; // [Optional] The nanosecond timestamp of the event\u0026#39;s end time.  Instant endTime = 3; // The tags for this event includes some extra OS level information,  // such as  // 1. net_device used for this exit span.  // 2. network L7 protocol  repeated KeyStringValuePair tags = 4; // The summary of statistics during this event.  // Each statistic provides a name(metric name) to represent the name, and an int64/long as the value.  repeated KeyIntValuePair summary = 5; // Refer to a trace context decoded from `sw8` header through network, such as HTTP header, MQ metadata  // https://skywalking.apache.org/docs/main/next/en/protocols/skywalking-cross-process-propagation-headers-protocol-v3/#standard-header-item  SpanReference traceContext = 6; message SpanReference { SpanReferenceType type = 1; // [Optional] A string id represents the whole trace.  string traceId = 2; // A unique id represents this segment. Other segments could use this id to reference as a child segment.  // [Optional] when this span reference  string traceSegmentId = 3; // If type == SKYWALKING  // The number id of the span. Should be unique in the whole segment.  // Starting at 0  //  // If type == ZIPKIN  // The type of span ID is string.  string spanId = 4; } enum SpanReferenceType { SKYWALKING = 0; ZIPKIN = 1; }}","title":"Trace Data Protocol v3.1","url":"/docs/main/v9.3.0/en/protocols/trace-data-protocol-v3/"},{"content":"Trace Profiling Trace Profiling is bound within the auto-instrument agent and corresponds to In-Process Profiling.\nIt is delivered to the agent in the form of a task, allowing for dynamic enabling or disabling. Trace Profiling tasks can be created when an endpoint within a service experiences high latency. When the agent receives the task, it periodically samples the thread stack related to the endpoint when requested. Once the sampling is complete, the thread stack within the endpoint can be analyzed to determine the specific line of business code causing the performance issue.\nLean more about the trace profiling, please read this blog.\nActive in the OAP OAP and the agent use a brand-new protocol to exchange Trace Profiling data, so it is necessary to start OAP with the following configuration:\nreceiver-profile:selector:${SW_RECEIVER_PROFILE:default}default:Trace Profiling Task with Analysis To use the Trace Profiling feature, please follow these steps:\n Create profiling task: Use the UI or CLI tool to create a task. Generate requests: Ensure that the service has generated requests. Query task details: Check that the created task has Trace data generated. Analyze the data: Analyze the Trace data to determine where performance bottlenecks exist in the service.  Create profiling task Creating a Trace Profiling task is used to notify all agent nodes that execute the service entity which endpoint needs to perform the Trace Profiling feature. This Endpoint is typically an HTTP request or an RPC request address.\nWhen creating a task, the following configuration fields are required:\n Service: Which agent under the service needs to be monitored. Endpoint: The specific endpoint name, such as \u0026ldquo;POST:/path/to/request.\u0026rdquo; Start Time: The start time of the task, which can be executed immediately or at a future time. Duration: The duration of the task execution. Min Duration Threshold: The monitoring will only be triggered when the specified endpoint\u0026rsquo;s execution time exceeds this threshold. This effectively prevents the collection of ineffective data due to short execution times. Dump Period: The thread stack collection period, which will trigger thread sampling every specified number of milliseconds. Max Sampling Count: The maximum number of traces that can be collected in a task. This effectively prevents the program execution from being affected by excessive trace sampling, such as the Stop The World situation in Java.  When the Agent receives a Trace Profiling task from OAP, it automatically generates a log to notify that the task has been acknowledged. The log contains the following field information:\n Instance: The name of the instance where the Agent is located. Type: Supports \u0026ldquo;NOTIFIED\u0026rdquo; and \u0026ldquo;EXECUTION_FINISHED\u0026rdquo;, with the current log displaying \u0026ldquo;NOTIFIED\u0026rdquo;. Time: The time when the Agent received the task.  Generate Requests At this point, Tracing requests matching the specified Endpoint and other conditions would undergo Profiling.\nNotice, whether profiling is thread sensitive, it relies on the agent side implementation. The Java Agent already supports cross-thread requests, so when a request involves cross-thread operations, it would also be periodically sampled for thread stack.\nQuery task details Once the Tracing request is completed, we can query the Tracing data associated with this Trace Profiling task, which includes the following information:\n TraceId: The Trace ID of the current request. Instance: The instance to which the current profiling data belongs. Duration: The total time taken by the current instance to process the Tracing request. Spans: The list of Spans associated with the current Tracing.  SpanId: The ID of the current span. Parent Span Id: The ID of the parent span, allowing for a tree structure. SegmentId: The ID of the segment to which the span belongs. Refs: References of the current span, note that it only includes \u0026ldquo;CROSS_THREAD\u0026rdquo; type references. Service: The service entity information to which the current span belongs. Instance: The instance entity information to which the current span belongs. Time: The start and end time of the current span. Endpoint Name: The name of the current Span. Type: The type of the current span, either \u0026ldquo;Entry\u0026rdquo;, \u0026ldquo;Local\u0026rdquo;, or \u0026ldquo;Exit\u0026rdquo;. Peer: The remote network address. Component: The name of the component used by the current span. Layer: The layer to which the current span belongs. Tags: The tags information contained in the current span. Logs: The log information in the current span. Profiled: Whether the current span supports Profiling data analysis.    Analyze the data Once we know which segments can be analyzed for profiling, we can then determine the time ranges available for thread stack analysis based on the \u0026ldquo;profiled\u0026rdquo; field in the span. Next, we can provide the following query content to analyze the data:\n segmentId: The segment to be analyzed. Segments are usually bound to individual threads, so we can determine which thread needs to be analyzed. time range: Includes the start and end time.  By combining the segmentId with the time range, we can confirm the data for a specific thread during a specific time period. This allows us to merge the thread stack data from the specified thread and time range and analyze which lines of code take longer to execute. The following fields help you understand the program execution:\n Id: Used to identify the current thread stack frame. Parent Id: Combined with \u0026ldquo;id\u0026rdquo; to determine the hierarchical relationship. Code Signature: The method signature of the current thread stack frame. Duration: The total time consumed by the current thread stack frame. Duration Child Excluded: Excludes the child method calls of the current method, only obtaining the time consumed by the current method. Count: The number of times the current thread stack frame was sampled.  If you want to learn more about the thread stack merging mechanism, please read this documentation.\nExporter If you find that the results of profiling data are not correct, you can report an issue through this documentation.\n","title":"Trace Profiling","url":"/docs/main/latest/en/setup/backend/backend-trace-profiling/"},{"content":"Trace Profiling Trace Profiling is bound within the auto-instrument agent and corresponds to In-Process Profiling.\nIt is delivered to the agent in the form of a task, allowing for dynamic enabling or disabling. Trace Profiling tasks can be created when an endpoint within a service experiences high latency. When the agent receives the task, it periodically samples the thread stack related to the endpoint when requested. Once the sampling is complete, the thread stack within the endpoint can be analyzed to determine the specific line of business code causing the performance issue.\nLean more about the trace profiling, please read this blog.\nActive in the OAP OAP and the agent use a brand-new protocol to exchange Trace Profiling data, so it is necessary to start OAP with the following configuration:\nreceiver-profile:selector:${SW_RECEIVER_PROFILE:default}default:Trace Profiling Task with Analysis To use the Trace Profiling feature, please follow these steps:\n Create profiling task: Use the UI or CLI tool to create a task. Generate requests: Ensure that the service has generated requests. Query task details: Check that the created task has Trace data generated. Analyze the data: Analyze the Trace data to determine where performance bottlenecks exist in the service.  Create profiling task Creating a Trace Profiling task is used to notify all agent nodes that execute the service entity which endpoint needs to perform the Trace Profiling feature. This Endpoint is typically an HTTP request or an RPC request address.\nWhen creating a task, the following configuration fields are required:\n Service: Which agent under the service needs to be monitored. Endpoint: The specific endpoint name, such as \u0026ldquo;POST:/path/to/request.\u0026rdquo; Start Time: The start time of the task, which can be executed immediately or at a future time. Duration: The duration of the task execution. Min Duration Threshold: The monitoring will only be triggered when the specified endpoint\u0026rsquo;s execution time exceeds this threshold. This effectively prevents the collection of ineffective data due to short execution times. Dump Period: The thread stack collection period, which will trigger thread sampling every specified number of milliseconds. Max Sampling Count: The maximum number of traces that can be collected in a task. This effectively prevents the program execution from being affected by excessive trace sampling, such as the Stop The World situation in Java.  When the Agent receives a Trace Profiling task from OAP, it automatically generates a log to notify that the task has been acknowledged. The log contains the following field information:\n Instance: The name of the instance where the Agent is located. Type: Supports \u0026ldquo;NOTIFIED\u0026rdquo; and \u0026ldquo;EXECUTION_FINISHED\u0026rdquo;, with the current log displaying \u0026ldquo;NOTIFIED\u0026rdquo;. Time: The time when the Agent received the task.  Generate Requests At this point, Tracing requests matching the specified Endpoint and other conditions would undergo Profiling.\nNotice, whether profiling is thread sensitive, it relies on the agent side implementation. The Java Agent already supports cross-thread requests, so when a request involves cross-thread operations, it would also be periodically sampled for thread stack.\nQuery task details Once the Tracing request is completed, we can query the Tracing data associated with this Trace Profiling task, which includes the following information:\n TraceId: The Trace ID of the current request. Instance: The instance to which the current profiling data belongs. Duration: The total time taken by the current instance to process the Tracing request. Spans: The list of Spans associated with the current Tracing.  SpanId: The ID of the current span. Parent Span Id: The ID of the parent span, allowing for a tree structure. SegmentId: The ID of the segment to which the span belongs. Refs: References of the current span, note that it only includes \u0026ldquo;CROSS_THREAD\u0026rdquo; type references. Service: The service entity information to which the current span belongs. Instance: The instance entity information to which the current span belongs. Time: The start and end time of the current span. Endpoint Name: The name of the current Span. Type: The type of the current span, either \u0026ldquo;Entry\u0026rdquo;, \u0026ldquo;Local\u0026rdquo;, or \u0026ldquo;Exit\u0026rdquo;. Peer: The remote network address. Component: The name of the component used by the current span. Layer: The layer to which the current span belongs. Tags: The tags information contained in the current span. Logs: The log information in the current span. Profiled: Whether the current span supports Profiling data analysis.    Analyze the data Once we know which segments can be analyzed for profiling, we can then determine the time ranges available for thread stack analysis based on the \u0026ldquo;profiled\u0026rdquo; field in the span. Next, we can provide the following query content to analyze the data:\n segmentId: The segment to be analyzed. Segments are usually bound to individual threads, so we can determine which thread needs to be analyzed. time range: Includes the start and end time.  By combining the segmentId with the time range, we can confirm the data for a specific thread during a specific time period. This allows us to merge the thread stack data from the specified thread and time range and analyze which lines of code take longer to execute. The following fields help you understand the program execution:\n Id: Used to identify the current thread stack frame. Parent Id: Combined with \u0026ldquo;id\u0026rdquo; to determine the hierarchical relationship. Code Signature: The method signature of the current thread stack frame. Duration: The total time consumed by the current thread stack frame. Duration Child Excluded: Excludes the child method calls of the current method, only obtaining the time consumed by the current method. Count: The number of times the current thread stack frame was sampled.  If you want to learn more about the thread stack merging mechanism, please read this documentation.\nExporter If you find that the results of profiling data are not correct, you can report an issue through this documentation.\n","title":"Trace Profiling","url":"/docs/main/next/en/setup/backend/backend-trace-profiling/"},{"content":"Trace Profiling Trace Profiling is bound within the auto-instrument agent and corresponds to In-Process Profiling.\nIt is delivered to the agent in the form of a task, allowing for dynamic enabling or disabling. Trace Profiling tasks can be created when an endpoint within a service experiences high latency. When the agent receives the task, it periodically samples the thread stack related to the endpoint when requested. Once the sampling is complete, the thread stack within the endpoint can be analyzed to determine the specific line of business code causing the performance issue.\nLean more about the trace profiling, please read this blog.\nActive in the OAP OAP and the agent use a brand-new protocol to exchange Trace Profiling data, so it is necessary to start OAP with the following configuration:\nreceiver-profile:selector:${SW_RECEIVER_PROFILE:default}default:Trace Profiling Task with Analysis To use the Trace Profiling feature, please follow these steps:\n Create profiling task: Use the UI or CLI tool to create a task. Generate requests: Ensure that the service has generated requests. Query task details: Check that the created task has Trace data generated. Analyze the data: Analyze the Trace data to determine where performance bottlenecks exist in the service.  Create profiling task Creating a Trace Profiling task is used to notify all agent nodes that execute the service entity which endpoint needs to perform the Trace Profiling feature. This Endpoint is typically an HTTP request or an RPC request address.\nWhen creating a task, the following configuration fields are required:\n Service: Which agent under the service needs to be monitored. Endpoint: The specific endpoint name, such as \u0026ldquo;POST:/path/to/request.\u0026rdquo; Start Time: The start time of the task, which can be executed immediately or at a future time. Duration: The duration of the task execution. Min Duration Threshold: The monitoring will only be triggered when the specified endpoint\u0026rsquo;s execution time exceeds this threshold. This effectively prevents the collection of ineffective data due to short execution times. Dump Period: The thread stack collection period, which will trigger thread sampling every specified number of milliseconds. Max Sampling Count: The maximum number of traces that can be collected in a task. This effectively prevents the program execution from being affected by excessive trace sampling, such as the Stop The World situation in Java.  When the Agent receives a Trace Profiling task from OAP, it automatically generates a log to notify that the task has been acknowledged. The log contains the following field information:\n Instance: The name of the instance where the Agent is located. Type: Supports \u0026ldquo;NOTIFIED\u0026rdquo; and \u0026ldquo;EXECUTION_FINISHED\u0026rdquo;, with the current log displaying \u0026ldquo;NOTIFIED\u0026rdquo;. Time: The time when the Agent received the task.  Generate Requests At this point, Tracing requests matching the specified Endpoint and other conditions would undergo Profiling.\nNotice, whether profiling is thread sensitive, it relies on the agent side implementation. The Java Agent already supports cross-thread requests, so when a request involves cross-thread operations, it would also be periodically sampled for thread stack.\nQuery task details Once the Tracing request is completed, we can query the Tracing data associated with this Trace Profiling task, which includes the following information:\n TraceId: The Trace ID of the current request. Instance: The instance to which the current profiling data belongs. Duration: The total time taken by the current instance to process the Tracing request. Spans: The list of Spans associated with the current Tracing.  SpanId: The ID of the current span. Parent Span Id: The ID of the parent span, allowing for a tree structure. SegmentId: The ID of the segment to which the span belongs. Refs: References of the current span, note that it only includes \u0026ldquo;CROSS_THREAD\u0026rdquo; type references. Service: The service entity information to which the current span belongs. Instance: The instance entity information to which the current span belongs. Time: The start and end time of the current span. Endpoint Name: The name of the current Span. Type: The type of the current span, either \u0026ldquo;Entry\u0026rdquo;, \u0026ldquo;Local\u0026rdquo;, or \u0026ldquo;Exit\u0026rdquo;. Peer: The remote network address. Component: The name of the component used by the current span. Layer: The layer to which the current span belongs. Tags: The tags information contained in the current span. Logs: The log information in the current span. Profiled: Whether the current span supports Profiling data analysis.    Analyze the data Once we know which segments can be analyzed for profiling, we can then determine the time ranges available for thread stack analysis based on the \u0026ldquo;profiled\u0026rdquo; field in the span. Next, we can provide the following query content to analyze the data:\n segmentId: The segment to be analyzed. Segments are usually bound to individual threads, so we can determine which thread needs to be analyzed. time range: Includes the start and end time.  By combining the segmentId with the time range, we can confirm the data for a specific thread during a specific time period. This allows us to merge the thread stack data from the specified thread and time range and analyze which lines of code take longer to execute. The following fields help you understand the program execution:\n Id: Used to identify the current thread stack frame. Parent Id: Combined with \u0026ldquo;id\u0026rdquo; to determine the hierarchical relationship. Code Signature: The method signature of the current thread stack frame. Duration: The total time consumed by the current thread stack frame. Duration Child Excluded: Excludes the child method calls of the current method, only obtaining the time consumed by the current method. Count: The number of times the current thread stack frame was sampled.  If you want to learn more about the thread stack merging mechanism, please read this documentation.\nExporter If you find that the results of profiling data are not correct, you can report an issue through this documentation.\n","title":"Trace Profiling","url":"/docs/main/v9.5.0/en/setup/backend/backend-trace-profiling/"},{"content":"Trace Profiling Trace Profiling is bound within the auto-instrument agent and corresponds to In-Process Profiling.\nIt is delivered to the agent in the form of a task, allowing for dynamic enabling or disabling. Trace Profiling tasks can be created when an endpoint within a service experiences high latency. When the agent receives the task, it periodically samples the thread stack related to the endpoint when requested. Once the sampling is complete, the thread stack within the endpoint can be analyzed to determine the specific line of business code causing the performance issue.\nLean more about the trace profiling, please read this blog.\nActive in the OAP OAP and the agent use a brand-new protocol to exchange Trace Profiling data, so it is necessary to start OAP with the following configuration:\nreceiver-profile:selector:${SW_RECEIVER_PROFILE:default}default:Trace Profiling Task with Analysis To use the Trace Profiling feature, please follow these steps:\n Create profiling task: Use the UI or CLI tool to create a task. Generate requests: Ensure that the service has generated requests. Query task details: Check that the created task has Trace data generated. Analyze the data: Analyze the Trace data to determine where performance bottlenecks exist in the service.  Create profiling task Creating a Trace Profiling task is used to notify all agent nodes that execute the service entity which endpoint needs to perform the Trace Profiling feature. This Endpoint is typically an HTTP request or an RPC request address.\nWhen creating a task, the following configuration fields are required:\n Service: Which agent under the service needs to be monitored. Endpoint: The specific endpoint name, such as \u0026ldquo;POST:/path/to/request.\u0026rdquo; Start Time: The start time of the task, which can be executed immediately or at a future time. Duration: The duration of the task execution. Min Duration Threshold: The monitoring will only be triggered when the specified endpoint\u0026rsquo;s execution time exceeds this threshold. This effectively prevents the collection of ineffective data due to short execution times. Dump Period: The thread stack collection period, which will trigger thread sampling every specified number of milliseconds. Max Sampling Count: The maximum number of traces that can be collected in a task. This effectively prevents the program execution from being affected by excessive trace sampling, such as the Stop The World situation in Java.  When the Agent receives a Trace Profiling task from OAP, it automatically generates a log to notify that the task has been acknowledged. The log contains the following field information:\n Instance: The name of the instance where the Agent is located. Type: Supports \u0026ldquo;NOTIFIED\u0026rdquo; and \u0026ldquo;EXECUTION_FINISHED\u0026rdquo;, with the current log displaying \u0026ldquo;NOTIFIED\u0026rdquo;. Time: The time when the Agent received the task.  Generate Requests At this point, Tracing requests matching the specified Endpoint and other conditions would undergo Profiling.\nNotice, whether profiling is thread sensitive, it relies on the agent side implementation. The Java Agent already supports cross-thread requests, so when a request involves cross-thread operations, it would also be periodically sampled for thread stack.\nQuery task details Once the Tracing request is completed, we can query the Tracing data associated with this Trace Profiling task, which includes the following information:\n TraceId: The Trace ID of the current request. Instance: The instance to which the current profiling data belongs. Duration: The total time taken by the current instance to process the Tracing request. Spans: The list of Spans associated with the current Tracing.  SpanId: The ID of the current span. Parent Span Id: The ID of the parent span, allowing for a tree structure. SegmentId: The ID of the segment to which the span belongs. Refs: References of the current span, note that it only includes \u0026ldquo;CROSS_THREAD\u0026rdquo; type references. Service: The service entity information to which the current span belongs. Instance: The instance entity information to which the current span belongs. Time: The start and end time of the current span. Endpoint Name: The name of the current Span. Type: The type of the current span, either \u0026ldquo;Entry\u0026rdquo;, \u0026ldquo;Local\u0026rdquo;, or \u0026ldquo;Exit\u0026rdquo;. Peer: The remote network address. Component: The name of the component used by the current span. Layer: The layer to which the current span belongs. Tags: The tags information contained in the current span. Logs: The log information in the current span. Profiled: Whether the current span supports Profiling data analysis.    Analyze the data Once we know which segments can be analyzed for profiling, we can then determine the time ranges available for thread stack analysis based on the \u0026ldquo;profiled\u0026rdquo; field in the span. Next, we can provide the following query content to analyze the data:\n segmentId: The segment to be analyzed. Segments are usually bound to individual threads, so we can determine which thread needs to be analyzed. time range: Includes the start and end time.  By combining the segmentId with the time range, we can confirm the data for a specific thread during a specific time period. This allows us to merge the thread stack data from the specified thread and time range and analyze which lines of code take longer to execute. The following fields help you understand the program execution:\n Id: Used to identify the current thread stack frame. Parent Id: Combined with \u0026ldquo;id\u0026rdquo; to determine the hierarchical relationship. Code Signature: The method signature of the current thread stack frame. Duration: The total time consumed by the current thread stack frame. Duration Child Excluded: Excludes the child method calls of the current method, only obtaining the time consumed by the current method. Count: The number of times the current thread stack frame was sampled.  If you want to learn more about the thread stack merging mechanism, please read this documentation.\nExporter If you find that the results of profiling data are not correct, you can report an issue through this documentation.\n","title":"Trace Profiling","url":"/docs/main/v9.6.0/en/setup/backend/backend-trace-profiling/"},{"content":"Trace Profiling Trace Profiling is bound within the auto-instrument agent and corresponds to In-Process Profiling.\nIt is delivered to the agent in the form of a task, allowing for dynamic enabling or disabling. Trace Profiling tasks can be created when an endpoint within a service experiences high latency. When the agent receives the task, it periodically samples the thread stack related to the endpoint when requested. Once the sampling is complete, the thread stack within the endpoint can be analyzed to determine the specific line of business code causing the performance issue.\nLean more about the trace profiling, please read this blog.\nActive in the OAP OAP and the agent use a brand-new protocol to exchange Trace Profiling data, so it is necessary to start OAP with the following configuration:\nreceiver-profile:selector:${SW_RECEIVER_PROFILE:default}default:Trace Profiling Task with Analysis To use the Trace Profiling feature, please follow these steps:\n Create profiling task: Use the UI or CLI tool to create a task. Generate requests: Ensure that the service has generated requests. Query task details: Check that the created task has Trace data generated. Analyze the data: Analyze the Trace data to determine where performance bottlenecks exist in the service.  Create profiling task Creating a Trace Profiling task is used to notify all agent nodes that execute the service entity which endpoint needs to perform the Trace Profiling feature. This Endpoint is typically an HTTP request or an RPC request address.\nWhen creating a task, the following configuration fields are required:\n Service: Which agent under the service needs to be monitored. Endpoint: The specific endpoint name, such as \u0026ldquo;POST:/path/to/request.\u0026rdquo; Start Time: The start time of the task, which can be executed immediately or at a future time. Duration: The duration of the task execution. Min Duration Threshold: The monitoring will only be triggered when the specified endpoint\u0026rsquo;s execution time exceeds this threshold. This effectively prevents the collection of ineffective data due to short execution times. Dump Period: The thread stack collection period, which will trigger thread sampling every specified number of milliseconds. Max Sampling Count: The maximum number of traces that can be collected in a task. This effectively prevents the program execution from being affected by excessive trace sampling, such as the Stop The World situation in Java.  When the Agent receives a Trace Profiling task from OAP, it automatically generates a log to notify that the task has been acknowledged. The log contains the following field information:\n Instance: The name of the instance where the Agent is located. Type: Supports \u0026ldquo;NOTIFIED\u0026rdquo; and \u0026ldquo;EXECUTION_FINISHED\u0026rdquo;, with the current log displaying \u0026ldquo;NOTIFIED\u0026rdquo;. Time: The time when the Agent received the task.  Generate Requests At this point, Tracing requests matching the specified Endpoint and other conditions would undergo Profiling.\nNotice, whether profiling is thread sensitive, it relies on the agent side implementation. The Java Agent already supports cross-thread requests, so when a request involves cross-thread operations, it would also be periodically sampled for thread stack.\nQuery task details Once the Tracing request is completed, we can query the Tracing data associated with this Trace Profiling task, which includes the following information:\n TraceId: The Trace ID of the current request. Instance: The instance to which the current profiling data belongs. Duration: The total time taken by the current instance to process the Tracing request. Spans: The list of Spans associated with the current Tracing.  SpanId: The ID of the current span. Parent Span Id: The ID of the parent span, allowing for a tree structure. SegmentId: The ID of the segment to which the span belongs. Refs: References of the current span, note that it only includes \u0026ldquo;CROSS_THREAD\u0026rdquo; type references. Service: The service entity information to which the current span belongs. Instance: The instance entity information to which the current span belongs. Time: The start and end time of the current span. Endpoint Name: The name of the current Span. Type: The type of the current span, either \u0026ldquo;Entry\u0026rdquo;, \u0026ldquo;Local\u0026rdquo;, or \u0026ldquo;Exit\u0026rdquo;. Peer: The remote network address. Component: The name of the component used by the current span. Layer: The layer to which the current span belongs. Tags: The tags information contained in the current span. Logs: The log information in the current span. Profiled: Whether the current span supports Profiling data analysis.    Analyze the data Once we know which segments can be analyzed for profiling, we can then determine the time ranges available for thread stack analysis based on the \u0026ldquo;profiled\u0026rdquo; field in the span. Next, we can provide the following query content to analyze the data:\n segmentId: The segment to be analyzed. Segments are usually bound to individual threads, so we can determine which thread needs to be analyzed. time range: Includes the start and end time.  By combining the segmentId with the time range, we can confirm the data for a specific thread during a specific time period. This allows us to merge the thread stack data from the specified thread and time range and analyze which lines of code take longer to execute. The following fields help you understand the program execution:\n Id: Used to identify the current thread stack frame. Parent Id: Combined with \u0026ldquo;id\u0026rdquo; to determine the hierarchical relationship. Code Signature: The method signature of the current thread stack frame. Duration: The total time consumed by the current thread stack frame. Duration Child Excluded: Excludes the child method calls of the current method, only obtaining the time consumed by the current method. Count: The number of times the current thread stack frame was sampled.  If you want to learn more about the thread stack merging mechanism, please read this documentation.\nExporter If you find that the results of profiling data are not correct, you can report an issue through this documentation.\n","title":"Trace Profiling","url":"/docs/main/v9.7.0/en/setup/backend/backend-trace-profiling/"},{"content":"Trace Sampling at server side An advantage of a distributed tracing system is that detailed information from the traces can be obtained. However, the downside is that these traces use up a lot of storage.\nIf you enable the trace sampling mechanism at the server-side, you will find that the service metrics, service instance, endpoint, and topology all have the same accuracy as before. The only difference is that they do not save all traces in storage.\nOf course, even if you enable sampling, the traces will be kept as consistent as possible. Being consistent means that once the trace segments have been collected and reported by agents, the backend would make its best effort not to split the traces. See our recommendation to understand why you should keep the traces as consistent as possible and try not to split them.\nSet the sample rate In the agent-analyzer module, you will find the sampleRate setting by the configuration traceSamplingPolicySettingsFile.\nagent-analyzer:default:...# The default sampling rate and the default trace latency time configured by the \u0026#39;traceSamplingPolicySettingsFile\u0026#39; file.traceSamplingPolicySettingsFile:${SW_TRACE_SAMPLING_POLICY_SETTINGS_FILE:trace-sampling-policy-settings.yml}forceSampleErrorSegment:${SW_FORCE_SAMPLE_ERROR_SEGMENT:true}# When sampling mechanism activated, this config would make the error status segment sampled, ignoring the sampling rate.The default trace-sampling-policy-settings.yml uses the following format. Could use dynamic configuration to update the settings in the runtime.\ndefault:# Default sampling rate that replaces the \u0026#39;agent-analyzer.default.sampleRate\u0026#39;# The sample rate precision is 1/10000. 10000 means 100% sample in default.rate:10000# Default trace latency time that replaces the \u0026#39;agent-analyzer.default.slowTraceSegmentThreshold\u0026#39;# Setting this threshold about the latency would make the slow trace segments sampled if they cost more time, even the sampling mechanism is activated. The default value is `-1`, which would not sample slow traces. Unit, millisecond.duration:-1#services:# - name: serverName# rate: 1000 # Sampling rate of this specific service# duration: 10000 # Trace latency threshold for trace sampling for this specific serviceduration.rate allows you to set the sample rate to this backend. The sample rate precision is 1/10000. 10000 means 100% sample by default.\nforceSampleErrorSegment allows you to save all error segments when the sampling mechanism is activated. This config will cause the error status segment to be sampled when the sampling mechanism is activated, ignoring the sampling rate.\ndefault.duration allows you to save all slow trace segments when the sampling mechanism is activated. Setting this threshold on latency (in milliseconds) would cause slow trace segments to be sampled if they use up more time, even if the sampling mechanism is activated. The default value is -1, which means that slow traces would not be sampled.\nNote: services.[].rate and services.[].duration has a higher priority than default.rare and default.duration.\nRecommendation You may choose to set different backend instances with different sampleRate values, although we recommend that you set the values to be the same.\nWhen you set the different rates, let\u0026rsquo;s say:\n Backend-InstanceA.sampleRate = 35 Backend-InstanceB.sampleRate = 55  Assume the agents have reported all trace segments to the backend. 35% of the traces at the global level will be collected and saved in storage consistently/completely together with all spans. 20% of the trace segments reported to Backend-Instance B will be saved in storage, whereas some trace segments may be missed, as they are reported to Backend-InstanceA and ignored.\nNote When you enable sampling, the actual sample rate may exceed sampleRate. The reason is that currently, all error/slow segments will be saved; meanwhile, the upstream and downstream may not be sampled. This feature ensures that you have the error/slow stacks and segments, although it is not guaranteed that you would have the whole traces.\nNote that if most of the accesses have failed or are slow, the sampling rate would be close to 100%. This may cause the backend or storage clusters to crash.\n","title":"Trace Sampling at server side","url":"/docs/main/latest/en/setup/backend/trace-sampling/"},{"content":"Trace Sampling at server side An advantage of a distributed tracing system is that detailed information from the traces can be obtained. However, the downside is that these traces use up a lot of storage.\nIf you enable the trace sampling mechanism at the server-side, you will find that the service metrics, service instance, endpoint, and topology all have the same accuracy as before. The only difference is that they do not save all traces in storage.\nOf course, even if you enable sampling, the traces will be kept as consistent as possible. Being consistent means that once the trace segments have been collected and reported by agents, the backend would make its best effort not to split the traces. See our recommendation to understand why you should keep the traces as consistent as possible and try not to split them.\nSet the sample rate In the agent-analyzer module, you will find the sampleRate setting by the configuration traceSamplingPolicySettingsFile.\nagent-analyzer:default:...# The default sampling rate and the default trace latency time configured by the \u0026#39;traceSamplingPolicySettingsFile\u0026#39; file.traceSamplingPolicySettingsFile:${SW_TRACE_SAMPLING_POLICY_SETTINGS_FILE:trace-sampling-policy-settings.yml}forceSampleErrorSegment:${SW_FORCE_SAMPLE_ERROR_SEGMENT:true}# When sampling mechanism activated, this config would make the error status segment sampled, ignoring the sampling rate.The default trace-sampling-policy-settings.yml uses the following format. Could use dynamic configuration to update the settings in the runtime.\ndefault:# Default sampling rate that replaces the \u0026#39;agent-analyzer.default.sampleRate\u0026#39;# The sample rate precision is 1/10000. 10000 means 100% sample in default.rate:10000# Default trace latency time that replaces the \u0026#39;agent-analyzer.default.slowTraceSegmentThreshold\u0026#39;# Setting this threshold about the latency would make the slow trace segments sampled if they cost more time, even the sampling mechanism is activated. The default value is `-1`, which would not sample slow traces. Unit, millisecond.duration:-1#services:# - name: serverName# rate: 1000 # Sampling rate of this specific service# duration: 10000 # Trace latency threshold for trace sampling for this specific serviceduration.rate allows you to set the sample rate to this backend. The sample rate precision is 1/10000. 10000 means 100% sample by default.\nforceSampleErrorSegment allows you to save all error segments when the sampling mechanism is activated. This config will cause the error status segment to be sampled when the sampling mechanism is activated, ignoring the sampling rate.\ndefault.duration allows you to save all slow trace segments when the sampling mechanism is activated. Setting this threshold on latency (in milliseconds) would cause slow trace segments to be sampled if they use up more time, even if the sampling mechanism is activated. The default value is -1, which means that slow traces would not be sampled.\nNote: services.[].rate and services.[].duration has a higher priority than default.rare and default.duration.\nRecommendation You may choose to set different backend instances with different sampleRate values, although we recommend that you set the values to be the same.\nWhen you set the different rates, let\u0026rsquo;s say:\n Backend-InstanceA.sampleRate = 35 Backend-InstanceB.sampleRate = 55  Assume the agents have reported all trace segments to the backend. 35% of the traces at the global level will be collected and saved in storage consistently/completely together with all spans. 20% of the trace segments reported to Backend-Instance B will be saved in storage, whereas some trace segments may be missed, as they are reported to Backend-InstanceA and ignored.\nNote When you enable sampling, the actual sample rate may exceed sampleRate. The reason is that currently, all error/slow segments will be saved; meanwhile, the upstream and downstream may not be sampled. This feature ensures that you have the error/slow stacks and segments, although it is not guaranteed that you would have the whole traces.\nNote that if most of the accesses have failed or are slow, the sampling rate would be close to 100%. This may cause the backend or storage clusters to crash.\n","title":"Trace Sampling at server side","url":"/docs/main/next/en/setup/backend/trace-sampling/"},{"content":"Trace Sampling at server side An advantage of a distributed tracing system is that detailed information from the traces can be obtained. However, the downside is that these traces use up a lot of storage. If you enable the trace sampling mechanism at the server side, you will find that the metrics of the service, service instance, endpoint, and topology all have the same accuracy as before. The only difference is that they do not save all traces into storage.\nOf course, even if you enable sampling, the traces will be kept as consistent as possible. Being consistent means that once the trace segments have been collected and reported by agents, the backend would do their best not to split the traces. See our recommendation to understand why you should keep the traces as consistent as possible and try not to split them.\nSet the sample rate In the agent-analyzer module, you will find the sampleRate setting by the configuration traceSamplingPolicySettingsFile.\nagent-analyzer:default:...# The default sampling rate and the default trace latency time configured by the \u0026#39;traceSamplingPolicySettingsFile\u0026#39; file.traceSamplingPolicySettingsFile:${SW_TRACE_SAMPLING_POLICY_SETTINGS_FILE:trace-sampling-policy-settings.yml}forceSampleErrorSegment:${SW_FORCE_SAMPLE_ERROR_SEGMENT:true}# When sampling mechanism activated, this config would make the error status segment sampled, ignoring the sampling rate.The default trace-sampling-policy-settings.yml uses the following format. Could use dynamic configuration to update the settings in the runtime.\ndefault:# Default sampling rate that replaces the \u0026#39;agent-analyzer.default.sampleRate\u0026#39;# The sample rate precision is 1/10000. 10000 means 100% sample in default.rate:10000# Default trace latency time that replaces the \u0026#39;agent-analyzer.default.slowTraceSegmentThreshold\u0026#39;# Setting this threshold about the latency would make the slow trace segments sampled if they cost more time, even the sampling mechanism activated. The default value is `-1`, which means would not sample slow traces. Unit, millisecond.duration:-1#services:# - name: serverName# rate: 1000 # Sampling rate of this specific service# duration: 10000 # Trace latency threshold for trace sampling for this specific serviceduration.rate allows you to set the sample rate to this backend. The sample rate precision is 1/10000. 10000 means 100% sample by default.\nforceSampleErrorSegment allows you to save all error segments when sampling mechanism is activated. When sampling mechanism is activated, this config would cause the error status segment to be sampled, ignoring the sampling rate.\ndefault.duration allows you to save all slow trace segments when sampling mechanism is activated. Setting this threshold on latency (in milliseconds) would cause slow trace segments to be sampled if they use up more time, even if the sampling mechanism is activated. The default value is -1, which means that slow traces would not be sampled.\nNote: services.[].rate and services.[].duration has a higher priority than default.rare and default.duration.\nRecommendation You may choose to set different backend instances with different sampleRate values, although we recommend that you set the values to be the same.\nWhen you set the different rates, let\u0026rsquo;s say:\n Backend-InstanceA.sampleRate = 35 Backend-InstanceB.sampleRate = 55  Assume the agents have reported all trace segments to the backend. 35% of the traces at the global level will be collected and saved in storage consistently/completely together with all spans. 20% of the trace segments which are reported to Backend-Instance B will be saved in storage, whereas some trace segments may be missed, as they are reported to Backend-InstanceA and ignored.\nNote When you enable sampling, the actual sample rate may exceed sampleRate. The reason is that currently all error/slow segments will be saved; meanwhile, the upstream and downstream may not be sampled. This feature ensures that you have the error/slow stacks and segments, although it is not guaranteed that you would have the whole traces.\nNote also if most of the access have failed or are slow, the sampling rate would be close to 100%. This may cause the backend or storage clusters to crash.\n","title":"Trace Sampling at server side","url":"/docs/main/v9.0.0/en/setup/backend/trace-sampling/"},{"content":"Trace Sampling at server side An advantage of a distributed tracing system is that detailed information from the traces can be obtained. However, the downside is that these traces use up a lot of storage.\nIf you enable the trace sampling mechanism at the server-side, you will find that the service metrics, service instance, endpoint, and topology all have the same accuracy as before. The only difference is that they do not save all traces in storage.\nOf course, even if you enable sampling, the traces will be kept as consistent as possible. Being consistent means that once the trace segments have been collected and reported by agents, the backend would make its best effort not to split the traces. See our recommendation to understand why you should keep the traces as consistent as possible and try not to split them.\nSet the sample rate In the agent-analyzer module, you will find the sampleRate setting by the configuration traceSamplingPolicySettingsFile.\nagent-analyzer:default:...# The default sampling rate and the default trace latency time configured by the \u0026#39;traceSamplingPolicySettingsFile\u0026#39; file.traceSamplingPolicySettingsFile:${SW_TRACE_SAMPLING_POLICY_SETTINGS_FILE:trace-sampling-policy-settings.yml}forceSampleErrorSegment:${SW_FORCE_SAMPLE_ERROR_SEGMENT:true}# When sampling mechanism activated, this config would make the error status segment sampled, ignoring the sampling rate.The default trace-sampling-policy-settings.yml uses the following format. Could use dynamic configuration to update the settings in the runtime.\ndefault:# Default sampling rate that replaces the \u0026#39;agent-analyzer.default.sampleRate\u0026#39;# The sample rate precision is 1/10000. 10000 means 100% sample in default.rate:10000# Default trace latency time that replaces the \u0026#39;agent-analyzer.default.slowTraceSegmentThreshold\u0026#39;# Setting this threshold about the latency would make the slow trace segments sampled if they cost more time, even the sampling mechanism is activated. The default value is `-1`, which would not sample slow traces. Unit, millisecond.duration:-1#services:# - name: serverName# rate: 1000 # Sampling rate of this specific service# duration: 10000 # Trace latency threshold for trace sampling for this specific serviceduration.rate allows you to set the sample rate to this backend. The sample rate precision is 1/10000. 10000 means 100% sample by default.\nforceSampleErrorSegment allows you to save all error segments when the sampling mechanism is activated. This config will cause the error status segment to be sampled when the sampling mechanism is activated, ignoring the sampling rate.\ndefault.duration allows you to save all slow trace segments when the sampling mechanism is activated. Setting this threshold on latency (in milliseconds) would cause slow trace segments to be sampled if they use up more time, even if the sampling mechanism is activated. The default value is -1, which means that slow traces would not be sampled.\nNote: services.[].rate and services.[].duration has a higher priority than default.rare and default.duration.\nRecommendation You may choose to set different backend instances with different sampleRate values, although we recommend that you set the values to be the same.\nWhen you set the different rates, let\u0026rsquo;s say:\n Backend-InstanceA.sampleRate = 35 Backend-InstanceB.sampleRate = 55  Assume the agents have reported all trace segments to the backend. 35% of the traces at the global level will be collected and saved in storage consistently/completely together with all spans. 20% of the trace segments reported to Backend-Instance B will be saved in storage, whereas some trace segments may be missed, as they are reported to Backend-InstanceA and ignored.\nNote When you enable sampling, the actual sample rate may exceed sampleRate. The reason is that currently, all error/slow segments will be saved; meanwhile, the upstream and downstream may not be sampled. This feature ensures that you have the error/slow stacks and segments, although it is not guaranteed that you would have the whole traces.\nNote that if most of the accesses have failed or are slow, the sampling rate would be close to 100%. This may cause the backend or storage clusters to crash.\n","title":"Trace Sampling at server side","url":"/docs/main/v9.1.0/en/setup/backend/trace-sampling/"},{"content":"Trace Sampling at server side An advantage of a distributed tracing system is that detailed information from the traces can be obtained. However, the downside is that these traces use up a lot of storage.\nIf you enable the trace sampling mechanism at the server-side, you will find that the service metrics, service instance, endpoint, and topology all have the same accuracy as before. The only difference is that they do not save all traces in storage.\nOf course, even if you enable sampling, the traces will be kept as consistent as possible. Being consistent means that once the trace segments have been collected and reported by agents, the backend would make its best effort not to split the traces. See our recommendation to understand why you should keep the traces as consistent as possible and try not to split them.\nSet the sample rate In the agent-analyzer module, you will find the sampleRate setting by the configuration traceSamplingPolicySettingsFile.\nagent-analyzer:default:...# The default sampling rate and the default trace latency time configured by the \u0026#39;traceSamplingPolicySettingsFile\u0026#39; file.traceSamplingPolicySettingsFile:${SW_TRACE_SAMPLING_POLICY_SETTINGS_FILE:trace-sampling-policy-settings.yml}forceSampleErrorSegment:${SW_FORCE_SAMPLE_ERROR_SEGMENT:true}# When sampling mechanism activated, this config would make the error status segment sampled, ignoring the sampling rate.The default trace-sampling-policy-settings.yml uses the following format. Could use dynamic configuration to update the settings in the runtime.\ndefault:# Default sampling rate that replaces the \u0026#39;agent-analyzer.default.sampleRate\u0026#39;# The sample rate precision is 1/10000. 10000 means 100% sample in default.rate:10000# Default trace latency time that replaces the \u0026#39;agent-analyzer.default.slowTraceSegmentThreshold\u0026#39;# Setting this threshold about the latency would make the slow trace segments sampled if they cost more time, even the sampling mechanism is activated. The default value is `-1`, which would not sample slow traces. Unit, millisecond.duration:-1#services:# - name: serverName# rate: 1000 # Sampling rate of this specific service# duration: 10000 # Trace latency threshold for trace sampling for this specific serviceduration.rate allows you to set the sample rate to this backend. The sample rate precision is 1/10000. 10000 means 100% sample by default.\nforceSampleErrorSegment allows you to save all error segments when the sampling mechanism is activated. This config will cause the error status segment to be sampled when the sampling mechanism is activated, ignoring the sampling rate.\ndefault.duration allows you to save all slow trace segments when the sampling mechanism is activated. Setting this threshold on latency (in milliseconds) would cause slow trace segments to be sampled if they use up more time, even if the sampling mechanism is activated. The default value is -1, which means that slow traces would not be sampled.\nNote: services.[].rate and services.[].duration has a higher priority than default.rare and default.duration.\nRecommendation You may choose to set different backend instances with different sampleRate values, although we recommend that you set the values to be the same.\nWhen you set the different rates, let\u0026rsquo;s say:\n Backend-InstanceA.sampleRate = 35 Backend-InstanceB.sampleRate = 55  Assume the agents have reported all trace segments to the backend. 35% of the traces at the global level will be collected and saved in storage consistently/completely together with all spans. 20% of the trace segments reported to Backend-Instance B will be saved in storage, whereas some trace segments may be missed, as they are reported to Backend-InstanceA and ignored.\nNote When you enable sampling, the actual sample rate may exceed sampleRate. The reason is that currently, all error/slow segments will be saved; meanwhile, the upstream and downstream may not be sampled. This feature ensures that you have the error/slow stacks and segments, although it is not guaranteed that you would have the whole traces.\nNote that if most of the accesses have failed or are slow, the sampling rate would be close to 100%. This may cause the backend or storage clusters to crash.\n","title":"Trace Sampling at server side","url":"/docs/main/v9.2.0/en/setup/backend/trace-sampling/"},{"content":"Trace Sampling at server side An advantage of a distributed tracing system is that detailed information from the traces can be obtained. However, the downside is that these traces use up a lot of storage.\nIf you enable the trace sampling mechanism at the server-side, you will find that the service metrics, service instance, endpoint, and topology all have the same accuracy as before. The only difference is that they do not save all traces in storage.\nOf course, even if you enable sampling, the traces will be kept as consistent as possible. Being consistent means that once the trace segments have been collected and reported by agents, the backend would make its best effort not to split the traces. See our recommendation to understand why you should keep the traces as consistent as possible and try not to split them.\nSet the sample rate In the agent-analyzer module, you will find the sampleRate setting by the configuration traceSamplingPolicySettingsFile.\nagent-analyzer:default:...# The default sampling rate and the default trace latency time configured by the \u0026#39;traceSamplingPolicySettingsFile\u0026#39; file.traceSamplingPolicySettingsFile:${SW_TRACE_SAMPLING_POLICY_SETTINGS_FILE:trace-sampling-policy-settings.yml}forceSampleErrorSegment:${SW_FORCE_SAMPLE_ERROR_SEGMENT:true}# When sampling mechanism activated, this config would make the error status segment sampled, ignoring the sampling rate.The default trace-sampling-policy-settings.yml uses the following format. Could use dynamic configuration to update the settings in the runtime.\ndefault:# Default sampling rate that replaces the \u0026#39;agent-analyzer.default.sampleRate\u0026#39;# The sample rate precision is 1/10000. 10000 means 100% sample in default.rate:10000# Default trace latency time that replaces the \u0026#39;agent-analyzer.default.slowTraceSegmentThreshold\u0026#39;# Setting this threshold about the latency would make the slow trace segments sampled if they cost more time, even the sampling mechanism is activated. The default value is `-1`, which would not sample slow traces. Unit, millisecond.duration:-1#services:# - name: serverName# rate: 1000 # Sampling rate of this specific service# duration: 10000 # Trace latency threshold for trace sampling for this specific serviceduration.rate allows you to set the sample rate to this backend. The sample rate precision is 1/10000. 10000 means 100% sample by default.\nforceSampleErrorSegment allows you to save all error segments when the sampling mechanism is activated. This config will cause the error status segment to be sampled when the sampling mechanism is activated, ignoring the sampling rate.\ndefault.duration allows you to save all slow trace segments when the sampling mechanism is activated. Setting this threshold on latency (in milliseconds) would cause slow trace segments to be sampled if they use up more time, even if the sampling mechanism is activated. The default value is -1, which means that slow traces would not be sampled.\nNote: services.[].rate and services.[].duration has a higher priority than default.rare and default.duration.\nRecommendation You may choose to set different backend instances with different sampleRate values, although we recommend that you set the values to be the same.\nWhen you set the different rates, let\u0026rsquo;s say:\n Backend-InstanceA.sampleRate = 35 Backend-InstanceB.sampleRate = 55  Assume the agents have reported all trace segments to the backend. 35% of the traces at the global level will be collected and saved in storage consistently/completely together with all spans. 20% of the trace segments reported to Backend-Instance B will be saved in storage, whereas some trace segments may be missed, as they are reported to Backend-InstanceA and ignored.\nNote When you enable sampling, the actual sample rate may exceed sampleRate. The reason is that currently, all error/slow segments will be saved; meanwhile, the upstream and downstream may not be sampled. This feature ensures that you have the error/slow stacks and segments, although it is not guaranteed that you would have the whole traces.\nNote that if most of the accesses have failed or are slow, the sampling rate would be close to 100%. This may cause the backend or storage clusters to crash.\n","title":"Trace Sampling at server side","url":"/docs/main/v9.3.0/en/setup/backend/trace-sampling/"},{"content":"Trace Sampling at server side An advantage of a distributed tracing system is that detailed information from the traces can be obtained. However, the downside is that these traces use up a lot of storage.\nIf you enable the trace sampling mechanism at the server-side, you will find that the service metrics, service instance, endpoint, and topology all have the same accuracy as before. The only difference is that they do not save all traces in storage.\nOf course, even if you enable sampling, the traces will be kept as consistent as possible. Being consistent means that once the trace segments have been collected and reported by agents, the backend would make its best effort not to split the traces. See our recommendation to understand why you should keep the traces as consistent as possible and try not to split them.\nSet the sample rate In the agent-analyzer module, you will find the sampleRate setting by the configuration traceSamplingPolicySettingsFile.\nagent-analyzer:default:...# The default sampling rate and the default trace latency time configured by the \u0026#39;traceSamplingPolicySettingsFile\u0026#39; file.traceSamplingPolicySettingsFile:${SW_TRACE_SAMPLING_POLICY_SETTINGS_FILE:trace-sampling-policy-settings.yml}forceSampleErrorSegment:${SW_FORCE_SAMPLE_ERROR_SEGMENT:true}# When sampling mechanism activated, this config would make the error status segment sampled, ignoring the sampling rate.The default trace-sampling-policy-settings.yml uses the following format. Could use dynamic configuration to update the settings in the runtime.\ndefault:# Default sampling rate that replaces the \u0026#39;agent-analyzer.default.sampleRate\u0026#39;# The sample rate precision is 1/10000. 10000 means 100% sample in default.rate:10000# Default trace latency time that replaces the \u0026#39;agent-analyzer.default.slowTraceSegmentThreshold\u0026#39;# Setting this threshold about the latency would make the slow trace segments sampled if they cost more time, even the sampling mechanism is activated. The default value is `-1`, which would not sample slow traces. Unit, millisecond.duration:-1#services:# - name: serverName# rate: 1000 # Sampling rate of this specific service# duration: 10000 # Trace latency threshold for trace sampling for this specific serviceduration.rate allows you to set the sample rate to this backend. The sample rate precision is 1/10000. 10000 means 100% sample by default.\nforceSampleErrorSegment allows you to save all error segments when the sampling mechanism is activated. This config will cause the error status segment to be sampled when the sampling mechanism is activated, ignoring the sampling rate.\ndefault.duration allows you to save all slow trace segments when the sampling mechanism is activated. Setting this threshold on latency (in milliseconds) would cause slow trace segments to be sampled if they use up more time, even if the sampling mechanism is activated. The default value is -1, which means that slow traces would not be sampled.\nNote: services.[].rate and services.[].duration has a higher priority than default.rare and default.duration.\nRecommendation You may choose to set different backend instances with different sampleRate values, although we recommend that you set the values to be the same.\nWhen you set the different rates, let\u0026rsquo;s say:\n Backend-InstanceA.sampleRate = 35 Backend-InstanceB.sampleRate = 55  Assume the agents have reported all trace segments to the backend. 35% of the traces at the global level will be collected and saved in storage consistently/completely together with all spans. 20% of the trace segments reported to Backend-Instance B will be saved in storage, whereas some trace segments may be missed, as they are reported to Backend-InstanceA and ignored.\nNote When you enable sampling, the actual sample rate may exceed sampleRate. The reason is that currently, all error/slow segments will be saved; meanwhile, the upstream and downstream may not be sampled. This feature ensures that you have the error/slow stacks and segments, although it is not guaranteed that you would have the whole traces.\nNote that if most of the accesses have failed or are slow, the sampling rate would be close to 100%. This may cause the backend or storage clusters to crash.\n","title":"Trace Sampling at server side","url":"/docs/main/v9.4.0/en/setup/backend/trace-sampling/"},{"content":"Trace Sampling at server side An advantage of a distributed tracing system is that detailed information from the traces can be obtained. However, the downside is that these traces use up a lot of storage.\nIf you enable the trace sampling mechanism at the server-side, you will find that the service metrics, service instance, endpoint, and topology all have the same accuracy as before. The only difference is that they do not save all traces in storage.\nOf course, even if you enable sampling, the traces will be kept as consistent as possible. Being consistent means that once the trace segments have been collected and reported by agents, the backend would make its best effort not to split the traces. See our recommendation to understand why you should keep the traces as consistent as possible and try not to split them.\nSet the sample rate In the agent-analyzer module, you will find the sampleRate setting by the configuration traceSamplingPolicySettingsFile.\nagent-analyzer:default:...# The default sampling rate and the default trace latency time configured by the \u0026#39;traceSamplingPolicySettingsFile\u0026#39; file.traceSamplingPolicySettingsFile:${SW_TRACE_SAMPLING_POLICY_SETTINGS_FILE:trace-sampling-policy-settings.yml}forceSampleErrorSegment:${SW_FORCE_SAMPLE_ERROR_SEGMENT:true}# When sampling mechanism activated, this config would make the error status segment sampled, ignoring the sampling rate.The default trace-sampling-policy-settings.yml uses the following format. Could use dynamic configuration to update the settings in the runtime.\ndefault:# Default sampling rate that replaces the \u0026#39;agent-analyzer.default.sampleRate\u0026#39;# The sample rate precision is 1/10000. 10000 means 100% sample in default.rate:10000# Default trace latency time that replaces the \u0026#39;agent-analyzer.default.slowTraceSegmentThreshold\u0026#39;# Setting this threshold about the latency would make the slow trace segments sampled if they cost more time, even the sampling mechanism is activated. The default value is `-1`, which would not sample slow traces. Unit, millisecond.duration:-1#services:# - name: serverName# rate: 1000 # Sampling rate of this specific service# duration: 10000 # Trace latency threshold for trace sampling for this specific serviceduration.rate allows you to set the sample rate to this backend. The sample rate precision is 1/10000. 10000 means 100% sample by default.\nforceSampleErrorSegment allows you to save all error segments when the sampling mechanism is activated. This config will cause the error status segment to be sampled when the sampling mechanism is activated, ignoring the sampling rate.\ndefault.duration allows you to save all slow trace segments when the sampling mechanism is activated. Setting this threshold on latency (in milliseconds) would cause slow trace segments to be sampled if they use up more time, even if the sampling mechanism is activated. The default value is -1, which means that slow traces would not be sampled.\nNote: services.[].rate and services.[].duration has a higher priority than default.rare and default.duration.\nRecommendation You may choose to set different backend instances with different sampleRate values, although we recommend that you set the values to be the same.\nWhen you set the different rates, let\u0026rsquo;s say:\n Backend-InstanceA.sampleRate = 35 Backend-InstanceB.sampleRate = 55  Assume the agents have reported all trace segments to the backend. 35% of the traces at the global level will be collected and saved in storage consistently/completely together with all spans. 20% of the trace segments reported to Backend-Instance B will be saved in storage, whereas some trace segments may be missed, as they are reported to Backend-InstanceA and ignored.\nNote When you enable sampling, the actual sample rate may exceed sampleRate. The reason is that currently, all error/slow segments will be saved; meanwhile, the upstream and downstream may not be sampled. This feature ensures that you have the error/slow stacks and segments, although it is not guaranteed that you would have the whole traces.\nNote that if most of the accesses have failed or are slow, the sampling rate would be close to 100%. This may cause the backend or storage clusters to crash.\n","title":"Trace Sampling at server side","url":"/docs/main/v9.5.0/en/setup/backend/trace-sampling/"},{"content":"Trace Sampling at server side An advantage of a distributed tracing system is that detailed information from the traces can be obtained. However, the downside is that these traces use up a lot of storage.\nIf you enable the trace sampling mechanism at the server-side, you will find that the service metrics, service instance, endpoint, and topology all have the same accuracy as before. The only difference is that they do not save all traces in storage.\nOf course, even if you enable sampling, the traces will be kept as consistent as possible. Being consistent means that once the trace segments have been collected and reported by agents, the backend would make its best effort not to split the traces. See our recommendation to understand why you should keep the traces as consistent as possible and try not to split them.\nSet the sample rate In the agent-analyzer module, you will find the sampleRate setting by the configuration traceSamplingPolicySettingsFile.\nagent-analyzer:default:...# The default sampling rate and the default trace latency time configured by the \u0026#39;traceSamplingPolicySettingsFile\u0026#39; file.traceSamplingPolicySettingsFile:${SW_TRACE_SAMPLING_POLICY_SETTINGS_FILE:trace-sampling-policy-settings.yml}forceSampleErrorSegment:${SW_FORCE_SAMPLE_ERROR_SEGMENT:true}# When sampling mechanism activated, this config would make the error status segment sampled, ignoring the sampling rate.The default trace-sampling-policy-settings.yml uses the following format. Could use dynamic configuration to update the settings in the runtime.\ndefault:# Default sampling rate that replaces the \u0026#39;agent-analyzer.default.sampleRate\u0026#39;# The sample rate precision is 1/10000. 10000 means 100% sample in default.rate:10000# Default trace latency time that replaces the \u0026#39;agent-analyzer.default.slowTraceSegmentThreshold\u0026#39;# Setting this threshold about the latency would make the slow trace segments sampled if they cost more time, even the sampling mechanism is activated. The default value is `-1`, which would not sample slow traces. Unit, millisecond.duration:-1#services:# - name: serverName# rate: 1000 # Sampling rate of this specific service# duration: 10000 # Trace latency threshold for trace sampling for this specific serviceduration.rate allows you to set the sample rate to this backend. The sample rate precision is 1/10000. 10000 means 100% sample by default.\nforceSampleErrorSegment allows you to save all error segments when the sampling mechanism is activated. This config will cause the error status segment to be sampled when the sampling mechanism is activated, ignoring the sampling rate.\ndefault.duration allows you to save all slow trace segments when the sampling mechanism is activated. Setting this threshold on latency (in milliseconds) would cause slow trace segments to be sampled if they use up more time, even if the sampling mechanism is activated. The default value is -1, which means that slow traces would not be sampled.\nNote: services.[].rate and services.[].duration has a higher priority than default.rare and default.duration.\nRecommendation You may choose to set different backend instances with different sampleRate values, although we recommend that you set the values to be the same.\nWhen you set the different rates, let\u0026rsquo;s say:\n Backend-InstanceA.sampleRate = 35 Backend-InstanceB.sampleRate = 55  Assume the agents have reported all trace segments to the backend. 35% of the traces at the global level will be collected and saved in storage consistently/completely together with all spans. 20% of the trace segments reported to Backend-Instance B will be saved in storage, whereas some trace segments may be missed, as they are reported to Backend-InstanceA and ignored.\nNote When you enable sampling, the actual sample rate may exceed sampleRate. The reason is that currently, all error/slow segments will be saved; meanwhile, the upstream and downstream may not be sampled. This feature ensures that you have the error/slow stacks and segments, although it is not guaranteed that you would have the whole traces.\nNote that if most of the accesses have failed or are slow, the sampling rate would be close to 100%. This may cause the backend or storage clusters to crash.\n","title":"Trace Sampling at server side","url":"/docs/main/v9.6.0/en/setup/backend/trace-sampling/"},{"content":"Trace Sampling at server side An advantage of a distributed tracing system is that detailed information from the traces can be obtained. However, the downside is that these traces use up a lot of storage.\nIf you enable the trace sampling mechanism at the server-side, you will find that the service metrics, service instance, endpoint, and topology all have the same accuracy as before. The only difference is that they do not save all traces in storage.\nOf course, even if you enable sampling, the traces will be kept as consistent as possible. Being consistent means that once the trace segments have been collected and reported by agents, the backend would make its best effort not to split the traces. See our recommendation to understand why you should keep the traces as consistent as possible and try not to split them.\nSet the sample rate In the agent-analyzer module, you will find the sampleRate setting by the configuration traceSamplingPolicySettingsFile.\nagent-analyzer:default:...# The default sampling rate and the default trace latency time configured by the \u0026#39;traceSamplingPolicySettingsFile\u0026#39; file.traceSamplingPolicySettingsFile:${SW_TRACE_SAMPLING_POLICY_SETTINGS_FILE:trace-sampling-policy-settings.yml}forceSampleErrorSegment:${SW_FORCE_SAMPLE_ERROR_SEGMENT:true}# When sampling mechanism activated, this config would make the error status segment sampled, ignoring the sampling rate.The default trace-sampling-policy-settings.yml uses the following format. Could use dynamic configuration to update the settings in the runtime.\ndefault:# Default sampling rate that replaces the \u0026#39;agent-analyzer.default.sampleRate\u0026#39;# The sample rate precision is 1/10000. 10000 means 100% sample in default.rate:10000# Default trace latency time that replaces the \u0026#39;agent-analyzer.default.slowTraceSegmentThreshold\u0026#39;# Setting this threshold about the latency would make the slow trace segments sampled if they cost more time, even the sampling mechanism is activated. The default value is `-1`, which would not sample slow traces. Unit, millisecond.duration:-1#services:# - name: serverName# rate: 1000 # Sampling rate of this specific service# duration: 10000 # Trace latency threshold for trace sampling for this specific serviceduration.rate allows you to set the sample rate to this backend. The sample rate precision is 1/10000. 10000 means 100% sample by default.\nforceSampleErrorSegment allows you to save all error segments when the sampling mechanism is activated. This config will cause the error status segment to be sampled when the sampling mechanism is activated, ignoring the sampling rate.\ndefault.duration allows you to save all slow trace segments when the sampling mechanism is activated. Setting this threshold on latency (in milliseconds) would cause slow trace segments to be sampled if they use up more time, even if the sampling mechanism is activated. The default value is -1, which means that slow traces would not be sampled.\nNote: services.[].rate and services.[].duration has a higher priority than default.rare and default.duration.\nRecommendation You may choose to set different backend instances with different sampleRate values, although we recommend that you set the values to be the same.\nWhen you set the different rates, let\u0026rsquo;s say:\n Backend-InstanceA.sampleRate = 35 Backend-InstanceB.sampleRate = 55  Assume the agents have reported all trace segments to the backend. 35% of the traces at the global level will be collected and saved in storage consistently/completely together with all spans. 20% of the trace segments reported to Backend-Instance B will be saved in storage, whereas some trace segments may be missed, as they are reported to Backend-InstanceA and ignored.\nNote When you enable sampling, the actual sample rate may exceed sampleRate. The reason is that currently, all error/slow segments will be saved; meanwhile, the upstream and downstream may not be sampled. This feature ensures that you have the error/slow stacks and segments, although it is not guaranteed that you would have the whole traces.\nNote that if most of the accesses have failed or are slow, the sampling rate would be close to 100%. This may cause the backend or storage clusters to crash.\n","title":"Trace Sampling at server side","url":"/docs/main/v9.7.0/en/setup/backend/trace-sampling/"},{"content":"Tracing and Tracing based Metrics Analyze Plugins The following plugins provide the distributed tracing capability, and the OAP backend would analyze the topology and metrics based on the tracing data.\n HTTP Server  Tomcat 7 Tomcat 8 Tomcat 9 Tomcat 10 Spring Boot Web 4.x Spring MVC 3.x, 4.x 5.x with servlet 3.x Spring MVC 6.x (Optional²) Nutz Web Framework 1.x Struts2 MVC 2.3.x -\u0026gt; 2.5.x Resin 3 (Optional¹) Resin 4 (Optional¹) Jetty Server 9.x -\u0026gt; 11.x Spring WebFlux 5.x (Optional¹) -\u0026gt; 6.x (Optional¹) Undertow 1.3.0.Final -\u0026gt; 2.0.27.Final RESTEasy 3.1.0.Final -\u0026gt; 6.2.4.Final Play Framework 2.6.x -\u0026gt; 2.8.x Light4J Microservices Framework 1.6.x -\u0026gt; 2.x Netty SocketIO 1.x Micronaut HTTP Server 3.2.x -\u0026gt; 3.6.x Jersey REST framework 2.x -\u0026gt; 3.x Grizzly 2.3.x -\u0026gt; 4.x WebSphere Liberty 23.x Netty HTTP 4.1.x (Optional²)   HTTP Client  Feign 9.x Netflix Spring Cloud Feign 1.1.x -\u0026gt; 2.x Okhttp 2.x -\u0026gt; 3.x -\u0026gt; 4.x Apache httpcomponent HttpClient 2.0 -\u0026gt; 3.1, 4.2, 4.3, 5.0, 5.1 Spring RestTemplate 4.x Spring RestTemplate 6.x (Optional²) Jetty Client 9.x -\u0026gt; 11.x Apache httpcomponent AsyncClient 4.x AsyncHttpClient 2.1+ Spring Webflux WebClient 5.x -\u0026gt; 6.x JRE HttpURLConnection (Optional²) Hutool-http client 5.x Micronaut HTTP Client 3.2.x -\u0026gt; 3.6.x   HTTP Gateway  Spring Cloud Gateway 2.0.2.RELEASE -\u0026gt; 4.1.x (Optional²) Apache ShenYu (Rich protocol support: HTTP,Spring Cloud,gRPC,Dubbo,SOFARPC,Motan,Tars) 2.4.x (Optional²)   JDBC  Mysql Driver 5.x, 6.x, 8.x Oracle Driver (Optional¹) H2 Driver 1.3.x -\u0026gt; 1.4.x ShardingSphere 3.0.0, 4.0.0, 4.0.1, 4.1.0, 4.1.1, 5.0.0 PostgreSQL Driver 8.x, 9.x, 42.x Mariadb Driver 2.x, 1.8 InfluxDB 2.5 -\u0026gt; 2.17 Mssql-Jtds 1.x Mssql-jdbc 6.x -\u0026gt; 8.x ClickHouse-jdbc 0.3.x Apache-Kylin-Jdbc 2.6.x -\u0026gt; 3.x -\u0026gt; 4.x Impala-jdbc 2.6.x (Optional³)   RPC Frameworks  Dubbo 2.5.4 -\u0026gt; 2.6.0 Dubbox 2.8.4 Apache Dubbo 2.7.x -\u0026gt; 3.x Motan 0.2.x -\u0026gt; 1.1.0 gRPC 1.x Apache ServiceComb Java Chassis 1.x, 2.x SOFARPC 5.4.0 Armeria 0.63.0 -\u0026gt; 1.22.0 Apache Avro 1.7.0 - 1.8.x Finagle 6.44.0 -\u0026gt; 20.1.0 (6.25.0 -\u0026gt; 6.44.0 not tested) Brpc-Java 2.3.7 -\u0026gt; 3.0.5 Thrift 0.10.0 -\u0026gt; 0.12.0 Apache CXF 3.x JSONRPC4J 1.2.0 -\u0026gt; 1.6 Nacos-Client 2.x (Optional²)   MQ  RocketMQ 3.x-\u0026gt; 5.x RocketMQ-gRPC 5.x Kafka 0.11.0.0 -\u0026gt; 3.2.3 Spring-Kafka Spring Kafka Consumer 1.3.x -\u0026gt; 2.3.x (2.0.x and 2.1.x not tested and not recommended by the official document) ActiveMQ 5.10.0 -\u0026gt; 5.15.4 RabbitMQ 3.x-\u0026gt; 5.x Pulsar 2.2.x -\u0026gt; 2.9.x NATS 2.14.x -\u0026gt; 2.15.x ActiveMQ-Artemis 2.30.0 -\u0026gt; 2.31.2 Aliyun ONS 1.x (Optional¹)   NoSQL  aerospike 3.x -\u0026gt; 6.x Redis  Jedis 2.x-4.x Redisson Easy Java Redis client 3.5.2+ Lettuce 5.x   MongoDB Java Driver 2.13-2.14, 3.4.0-3.12.7, 4.0.0-4.1.0 Memcached Client  Spymemcached 2.x Xmemcached 2.x   Elasticsearch  transport-client 5.2.x-5.6.x transport-client 6.2.3-6.8.4 transport-client 7.0.0-7.5.2 rest-high-level-client 6.7.1-6.8.4 rest-high-level-client 7.0.0-7.5.2   Solr  SolrJ 7.x   Cassandra 3.x  cassandra-java-driver 3.7.0-3.7.2   HBase  hbase-client HTable 1.0.0-2.4.2   Neo4j  Neo4j-java 4.x     Service Discovery  Netflix Eureka   Distributed Coordination  Zookeeper 3.4.x (Optional² \u0026amp; Except 3.4.4)   Spring Ecosystem  Spring Bean annotations(@Bean, @Service, @Component, @Repository) 3.x and 4.x (Optional²) Spring Core Async SuccessCallback/FailureCallback/ListenableFutureCallback 4.x Spring Transaction 4.x and 5.x (Optional²)   Hystrix: Latency and Fault Tolerance for Distributed Systems 1.4.20 -\u0026gt; 1.5.18 Sentinel: The Sentinel of Your Microservices 1.7.0 -\u0026gt; 1.8.1 Scheduler  Elastic Job 2.x Apache ShardingSphere-Elasticjob 3.x Spring @Scheduled 3.1+ Quartz Scheduler 2.x (Optional²) XXL Job 2.x   OpenTracing community supported Canal: Alibaba mysql database binlog incremental subscription \u0026amp; consumer components 1.0.25 -\u0026gt; 1.1.2 JSON  GSON 2.8.x (Optional²) Fastjson 1.2.x (Optional²) Jackson 2.x (Optional²)   Vert.x Ecosystem  Vert.x Eventbus 3.2 -\u0026gt; 4.x Vert.x Web 3.x -\u0026gt; 4.x   Thread Schedule Framework  Spring @Async 4.x and 5.x Quasar 0.7.x JRE Callable and Runnable (Optional²) JRE ForkJoinPool (Optional²)   Cache  Ehcache 2.x GuavaCache 18.x -\u0026gt; 23.x (Optional²)   Kotlin  Coroutine 1.0.1 -\u0026gt; 1.3.x (Optional²)   GraphQL  Graphql 8.0 -\u0026gt; 17.x   Pool  Apache Commons DBCP 2.x Alibaba Druid 1.x HikariCP 3.x -\u0026gt; 4.x   Logging Framework  log4j 2.x log4j2 1.2.x logback 1.2.x   ORM  MyBatis 3.4.x -\u0026gt; 3.5.x   Event  GuavaEventBus 19.x -\u0026gt; 31.x-jre    Meter Plugins The meter plugin provides the advanced metrics collections, which are not a part of tracing.\n Thread Pool  Undertow 2.1.x -\u0026gt; 2.6.x Tomcat 7.0.x -\u0026gt; 10.0.x Dubbo 2.5.x -\u0026gt; 2.7.x Jetty 9.1.x -\u0026gt; 11.x Grizzly 2.3.x -\u0026gt; 4.x     ¹Due to license incompatibilities/restrictions these plugins are hosted and released in 3rd part repository, go to SkyAPM java plugin extension repository to get these.\n²These plugins affect the performance or must be used under some conditions, from experiences. So only released in /optional-plugins or /bootstrap-plugins, copy to /plugins in order to make them work.\n³These plugins are not tested in the CI/CD pipeline, as the previous added tests are not able to run according to the latest CI/CD infrastructure limitations, lack of maintenance, or dependencies/images not available(e.g. removed from DockerHub).\n","title":"Tracing and Tracing based Metrics Analyze Plugins","url":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/supported-list/"},{"content":"Tracing and Tracing based Metrics Analyze Plugins The following plugins provide the distributed tracing capability, and the OAP backend would analyze the topology and metrics based on the tracing data.\n HTTP Server  Tomcat 7 Tomcat 8 Tomcat 9 Tomcat 10 Spring Boot Web 4.x Spring MVC 3.x, 4.x 5.x with servlet 3.x Spring MVC 6.x (Optional²) Nutz Web Framework 1.x Struts2 MVC 2.3.x -\u0026gt; 2.5.x Resin 3 (Optional¹) Resin 4 (Optional¹) Jetty Server 9.x -\u0026gt; 11.x Spring WebFlux 5.x (Optional¹) -\u0026gt; 6.x (Optional¹) Undertow 1.3.0.Final -\u0026gt; 2.0.27.Final RESTEasy 3.1.0.Final -\u0026gt; 6.2.4.Final Play Framework 2.6.x -\u0026gt; 2.8.x Light4J Microservices Framework 1.6.x -\u0026gt; 2.x Netty SocketIO 1.x Micronaut HTTP Server 3.2.x -\u0026gt; 3.6.x Jersey REST framework 2.x -\u0026gt; 3.x Grizzly 2.3.x -\u0026gt; 4.x WebSphere Liberty 23.x Netty HTTP 4.1.x (Optional²)   HTTP Client  Feign 9.x Netflix Spring Cloud Feign 1.1.x -\u0026gt; 2.x Okhttp 2.x -\u0026gt; 3.x -\u0026gt; 4.x Apache httpcomponent HttpClient 2.0 -\u0026gt; 3.1, 4.2, 4.3, 5.0, 5.1 Spring RestTemplate 4.x Spring RestTemplate 6.x (Optional²) Jetty Client 9.x -\u0026gt; 11.x Apache httpcomponent AsyncClient 4.x AsyncHttpClient 2.1+ Spring Webflux WebClient 5.x -\u0026gt; 6.x JRE HttpURLConnection (Optional²) Hutool-http client 5.x Micronaut HTTP Client 3.2.x -\u0026gt; 3.6.x   HTTP Gateway  Spring Cloud Gateway 2.0.2.RELEASE -\u0026gt; 4.1.x (Optional²) Apache ShenYu (Rich protocol support: HTTP,Spring Cloud,gRPC,Dubbo,SOFARPC,Motan,Tars) 2.4.x (Optional²)   JDBC  Mysql Driver 5.x, 6.x, 8.x Oracle Driver (Optional¹) H2 Driver 1.3.x -\u0026gt; 1.4.x ShardingSphere 3.0.0, 4.0.0, 4.0.1, 4.1.0, 4.1.1, 5.0.0 PostgreSQL Driver 8.x, 9.x, 42.x Mariadb Driver 2.x, 1.8 InfluxDB 2.5 -\u0026gt; 2.17 Mssql-Jtds 1.x Mssql-jdbc 6.x -\u0026gt; 8.x ClickHouse-jdbc 0.3.x Apache-Kylin-Jdbc 2.6.x -\u0026gt; 3.x -\u0026gt; 4.x Impala-jdbc 2.6.x (Optional³)   RPC Frameworks  Dubbo 2.5.4 -\u0026gt; 2.6.0 Dubbox 2.8.4 Apache Dubbo 2.7.x -\u0026gt; 3.x Motan 0.2.x -\u0026gt; 1.1.0 gRPC 1.x Apache ServiceComb Java Chassis 1.x, 2.x SOFARPC 5.4.0 Armeria 0.63.0 -\u0026gt; 1.22.0 Apache Avro 1.7.0 - 1.8.x Finagle 6.44.0 -\u0026gt; 20.1.0 (6.25.0 -\u0026gt; 6.44.0 not tested) Brpc-Java 2.3.7 -\u0026gt; 3.0.5 Thrift 0.10.0 -\u0026gt; 0.12.0 Apache CXF 3.x JSONRPC4J 1.2.0 -\u0026gt; 1.6 Nacos-Client 2.x (Optional²)   MQ  RocketMQ 3.x-\u0026gt; 5.x RocketMQ-gRPC 5.x Kafka 0.11.0.0 -\u0026gt; 3.2.3 Spring-Kafka Spring Kafka Consumer 1.3.x -\u0026gt; 2.3.x (2.0.x and 2.1.x not tested and not recommended by the official document) ActiveMQ 5.10.0 -\u0026gt; 5.15.4 RabbitMQ 3.x-\u0026gt; 5.x Pulsar 2.2.x -\u0026gt; 2.9.x NATS 2.14.x -\u0026gt; 2.15.x ActiveMQ-Artemis 2.30.0 -\u0026gt; 2.31.2 Aliyun ONS 1.x (Optional¹)   NoSQL  aerospike 3.x -\u0026gt; 6.x Redis  Jedis 2.x-4.x Redisson Easy Java Redis client 3.5.2+ Lettuce 5.x   MongoDB Java Driver 2.13-2.14, 3.4.0-3.12.7, 4.0.0-4.1.0 Memcached Client  Spymemcached 2.x Xmemcached 2.x   Elasticsearch  transport-client 5.2.x-5.6.x transport-client 6.2.3-6.8.4 transport-client 7.0.0-7.5.2 rest-high-level-client 6.7.1-6.8.4 rest-high-level-client 7.0.0-7.5.2   Solr  SolrJ 7.x   Cassandra 3.x  cassandra-java-driver 3.7.0-3.7.2   HBase  hbase-client HTable 1.0.0-2.4.2   Neo4j  Neo4j-java 4.x     Service Discovery  Netflix Eureka   Distributed Coordination  Zookeeper 3.4.x (Optional² \u0026amp; Except 3.4.4)   Spring Ecosystem  Spring Bean annotations(@Bean, @Service, @Component, @Repository) 3.x and 4.x (Optional²) Spring Core Async SuccessCallback/FailureCallback/ListenableFutureCallback 4.x Spring Transaction 4.x and 5.x (Optional²)   Hystrix: Latency and Fault Tolerance for Distributed Systems 1.4.20 -\u0026gt; 1.5.18 Sentinel: The Sentinel of Your Microservices 1.7.0 -\u0026gt; 1.8.1 Scheduler  Elastic Job 2.x Apache ShardingSphere-Elasticjob 3.x Spring @Scheduled 3.1+ Quartz Scheduler 2.x (Optional²) XXL Job 2.x   OpenTracing community supported Canal: Alibaba mysql database binlog incremental subscription \u0026amp; consumer components 1.0.25 -\u0026gt; 1.1.2 JSON  GSON 2.8.x (Optional²) Fastjson 1.2.x (Optional²) Jackson 2.x (Optional²)   Vert.x Ecosystem  Vert.x Eventbus 3.2 -\u0026gt; 4.x Vert.x Web 3.x -\u0026gt; 4.x   Thread Schedule Framework  Spring @Async 4.x and 5.x Quasar 0.7.x JRE Callable and Runnable (Optional²) JRE ForkJoinPool (Optional²)   Cache  Ehcache 2.x GuavaCache 18.x -\u0026gt; 23.x (Optional²)   Kotlin  Coroutine 1.0.1 -\u0026gt; 1.3.x (Optional²)   GraphQL  Graphql 8.0 -\u0026gt; 17.x   Pool  Apache Commons DBCP 2.x Alibaba Druid 1.x HikariCP 3.x -\u0026gt; 4.x C3P0 0.9.0 -\u0026gt; 0.10.0   Logging Framework  log4j 2.x log4j2 1.2.x logback 1.2.x   ORM  MyBatis 3.4.x -\u0026gt; 3.5.x   Event  GuavaEventBus 19.x -\u0026gt; 31.x-jre    Meter Plugins The meter plugin provides the advanced metrics collections, which are not a part of tracing.\n Thread Pool  Undertow 2.1.x -\u0026gt; 2.6.x Tomcat 7.0.x -\u0026gt; 10.0.x Dubbo 2.5.x -\u0026gt; 2.7.x Jetty 9.1.x -\u0026gt; 11.x Grizzly 2.3.x -\u0026gt; 4.x   Connection Pool  Apache Commons DBCP 2.x Alibaba Druid 1.x HikariCP 3.x -\u0026gt; 4.x C3P0 0.9.0 -\u0026gt; 0.10.0     ¹Due to license incompatibilities/restrictions these plugins are hosted and released in 3rd part repository, go to SkyAPM java plugin extension repository to get these.\n²These plugins affect the performance or must be used under some conditions, from experiences. So only released in /optional-plugins or /bootstrap-plugins, copy to /plugins in order to make them work.\n³These plugins are not tested in the CI/CD pipeline, as the previous added tests are not able to run according to the latest CI/CD infrastructure limitations, lack of maintenance, or dependencies/images not available(e.g. removed from DockerHub).\n","title":"Tracing and Tracing based Metrics Analyze Plugins","url":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/supported-list/"},{"content":"Tracing and Tracing based Metrics Analyze Plugins The following plugins provide the distributed tracing capability, and the OAP backend would analyze the topology and metrics based on the tracing data.\n HTTP Server  Tomcat 7 Tomcat 8 Tomcat 9 Tomcat 10 Spring Boot Web 4.x Spring MVC 3.x, 4.x 5.x with servlet 3.x Spring MVC 6.x (Optional²) Nutz Web Framework 1.x Struts2 MVC 2.3.x -\u0026gt; 2.5.x Resin 3 (Optional¹) Resin 4 (Optional¹) Jetty Server 9.x -\u0026gt; 11.x Spring WebFlux 5.x (Optional¹) Undertow 1.3.0.Final -\u0026gt; 2.0.27.Final RESTEasy 3.1.0.Final -\u0026gt; 6.2.4.Final Play Framework 2.6.x -\u0026gt; 2.8.x Light4J Microservices Framework 1.6.x -\u0026gt; 2.x Netty SocketIO 1.x Micronaut HTTP Server 3.2.x -\u0026gt; 3.6.x Jersey REST framework 2.x -\u0026gt; 3.x Grizzly 2.3.x -\u0026gt; 4.x WebSphere Liberty 23.x   HTTP Client  Feign 9.x Netflix Spring Cloud Feign 1.1.x -\u0026gt; 2.x Okhttp 2.x -\u0026gt; 3.x -\u0026gt; 4.x Apache httpcomponent HttpClient 2.0 -\u0026gt; 3.1, 4.2, 4.3, 5.0, 5.1 Spring RestTemplate 4.x Spring RestTemplate 6.x (Optional²) Jetty Client 9.x -\u0026gt; 11.x Apache httpcomponent AsyncClient 4.x AsyncHttpClient 2.1+ JRE HttpURLConnection (Optional²) Hutool-http client 5.x Micronaut HTTP Client 3.2.x -\u0026gt; 3.6.x   HTTP Gateway  Spring Cloud Gateway 2.0.2.RELEASE -\u0026gt; 3.x (Optional²) Apache ShenYu (Rich protocol support: HTTP,Spring Cloud,gRPC,Dubbo,SOFARPC,Motan,Tars) 2.4.x (Optional²)   JDBC  Mysql Driver 5.x, 6.x, 8.x Oracle Driver (Optional¹) H2 Driver 1.3.x -\u0026gt; 1.4.x ShardingSphere 3.0.0, 4.0.0, 4.0.1, 4.1.0, 4.1.1, 5.0.0 PostgreSQL Driver 8.x, 9.x, 42.x Mariadb Driver 2.x, 1.8 InfluxDB 2.5 -\u0026gt; 2.17 Mssql-Jtds 1.x Mssql-jdbc 6.x -\u0026gt; 8.x ClickHouse-jdbc 0.3.x Apache-Kylin-Jdbc 2.6.x -\u0026gt; 3.x -\u0026gt; 4.x Impala-jdbc 2.6.x   RPC Frameworks  Dubbo 2.5.4 -\u0026gt; 2.6.0 Dubbox 2.8.4 Apache Dubbo 2.7.x -\u0026gt; 3.x Motan 0.2.x -\u0026gt; 1.1.0 gRPC 1.x Apache ServiceComb Java Chassis 1.x, 2.x SOFARPC 5.4.0 Armeria 0.63.0 -\u0026gt; 1.22.0 Apache Avro 1.7.0 - 1.8.x Finagle 6.44.0 -\u0026gt; 20.1.0 (6.25.0 -\u0026gt; 6.44.0 not tested) Brpc-Java 2.3.7 -\u0026gt; 3.0.5 Thrift 0.10.0 -\u0026gt; 0.12.0 Apache CXF 3.x JSONRPC4J 1.2.0 -\u0026gt; 1.6 Nacos-Client 2.x (Optional²)   MQ  RocketMQ 3.x-\u0026gt; 5.x RocketMQ-gRPC 5.x Kafka 0.11.0.0 -\u0026gt; 3.2.3 Spring-Kafka Spring Kafka Consumer 1.3.x -\u0026gt; 2.3.x (2.0.x and 2.1.x not tested and not recommended by the official document) ActiveMQ 5.10.0 -\u0026gt; 5.15.4 RabbitMQ 3.x-\u0026gt; 5.x Pulsar 2.2.x -\u0026gt; 2.9.x NATS 2.14.x -\u0026gt; 2.15.x Aliyun ONS 1.x (Optional¹)   NoSQL  aerospike 3.x -\u0026gt; 6.x Redis  Jedis 2.x-4.x Redisson Easy Java Redis client 3.5.2+ Lettuce 5.x   MongoDB Java Driver 2.13-2.14, 3.4.0-3.12.7, 4.0.0-4.1.0 Memcached Client  Spymemcached 2.x Xmemcached 2.x   Elasticsearch  transport-client 5.2.x-5.6.x transport-client 6.2.3-6.8.4 transport-client 7.0.0-7.5.2 rest-high-level-client 6.7.1-6.8.4 rest-high-level-client 7.0.0-7.5.2   Solr  SolrJ 7.x   Cassandra 3.x  cassandra-java-driver 3.7.0-3.7.2   HBase  hbase-client HTable 1.0.0-2.4.2   Neo4j  Neo4j-java 4.x     Service Discovery  Netflix Eureka   Distributed Coordination  Zookeeper 3.4.x (Optional² \u0026amp; Except 3.4.4)   Spring Ecosystem  Spring Bean annotations(@Bean, @Service, @Component, @Repository) 3.x and 4.x (Optional²) Spring Core Async SuccessCallback/FailureCallback/ListenableFutureCallback 4.x Spring Transaction 4.x and 5.x (Optional²)   Hystrix: Latency and Fault Tolerance for Distributed Systems 1.4.20 -\u0026gt; 1.5.18 Sentinel: The Sentinel of Your Microservices 1.7.0 -\u0026gt; 1.8.1 Scheduler  Elastic Job 2.x Apache ShardingSphere-Elasticjob 3.x Spring @Scheduled 3.1+ Quartz Scheduler 2.x (Optional²) XXL Job 2.x   OpenTracing community supported Canal: Alibaba mysql database binlog incremental subscription \u0026amp; consumer components 1.0.25 -\u0026gt; 1.1.2 JSON  GSON 2.8.x (Optional²) Fastjson 1.2.x (Optional²) Jackson 2.x (Optional²)   Vert.x Ecosystem  Vert.x Eventbus 3.2 -\u0026gt; 4.x Vert.x Web 3.x -\u0026gt; 4.x   Thread Schedule Framework  Spring @Async 4.x and 5.x Quasar 0.7.x JRE Callable and Runnable (Optional²) JRE ForkJoinPool (Optional²)   Cache  Ehcache 2.x GuavaCache 18.x -\u0026gt; 23.x (Optional²)   Kotlin  Coroutine 1.0.1 -\u0026gt; 1.3.x (Optional²)   GraphQL  Graphql 8.0 -\u0026gt; 17.x   Pool  Apache Commons DBCP 2.x Alibaba Druid 1.x HikariCP 3.x -\u0026gt; 4.x   Logging Framework  log4j 2.x log4j2 1.2.x logback 1.2.x   ORM  MyBatis 3.4.x -\u0026gt; 3.5.x   Event  GuavaEventBus 19.x -\u0026gt; 31.x-jre    Meter Plugins The meter plugin provides the advanced metrics collections, which are not a part of tracing.\n Thread Pool  Undertow 2.1.x -\u0026gt; 2.6.x Tomcat 7.0.x -\u0026gt; 10.0.x Dubbo 2.5.x -\u0026gt; 2.7.x Jetty 9.1.x -\u0026gt; 11.x Grizzly 2.3.x -\u0026gt; 4.x     ¹Due to license incompatibilities/restrictions these plugins are hosted and released in 3rd part repository, go to SkyAPM java plugin extension repository to get these.\n²These plugins affect the performance or must be used under some conditions, from experiences. So only released in /optional-plugins or /bootstrap-plugins, copy to /plugins in order to make them work.\n","title":"Tracing and Tracing based Metrics Analyze Plugins","url":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/supported-list/"},{"content":"Tracing and Tracing based Metrics Analyze Plugins The following plugins provide the distributed tracing capability, and the OAP backend would analyze the topology and metrics based on the tracing data.\n HTTP Server  Tomcat 7 Tomcat 8 Tomcat 9 Tomcat 10 Spring Boot Web 4.x Spring MVC 3.x, 4.x 5.x with servlet 3.x Spring MVC 6.x (Optional²) Nutz Web Framework 1.x Struts2 MVC 2.3.x -\u0026gt; 2.5.x Resin 3 (Optional¹) Resin 4 (Optional¹) Jetty Server 9.x -\u0026gt; 11.x Spring WebFlux 5.x (Optional¹) Undertow 1.3.0.Final -\u0026gt; 2.0.27.Final RESTEasy 3.1.0.Final -\u0026gt; 6.2.4.Final Play Framework 2.6.x -\u0026gt; 2.8.x Light4J Microservices Framework 1.6.x -\u0026gt; 2.x Netty SocketIO 1.x Micronaut HTTP Server 3.2.x -\u0026gt; 3.6.x Jersey REST framework 2.x -\u0026gt; 3.x Grizzly 2.3.x -\u0026gt; 4.x WebSphere Liberty 23.x Netty HTTP 4.1.x (Optional²)   HTTP Client  Feign 9.x Netflix Spring Cloud Feign 1.1.x -\u0026gt; 2.x Okhttp 2.x -\u0026gt; 3.x -\u0026gt; 4.x Apache httpcomponent HttpClient 2.0 -\u0026gt; 3.1, 4.2, 4.3, 5.0, 5.1 Spring RestTemplate 4.x Spring RestTemplate 6.x (Optional²) Jetty Client 9.x -\u0026gt; 11.x Apache httpcomponent AsyncClient 4.x AsyncHttpClient 2.1+ JRE HttpURLConnection (Optional²) Hutool-http client 5.x Micronaut HTTP Client 3.2.x -\u0026gt; 3.6.x   HTTP Gateway  Spring Cloud Gateway 2.0.2.RELEASE -\u0026gt; 3.x (Optional²) Apache ShenYu (Rich protocol support: HTTP,Spring Cloud,gRPC,Dubbo,SOFARPC,Motan,Tars) 2.4.x (Optional²)   JDBC  Mysql Driver 5.x, 6.x, 8.x Oracle Driver (Optional¹) H2 Driver 1.3.x -\u0026gt; 1.4.x ShardingSphere 3.0.0, 4.0.0, 4.0.1, 4.1.0, 4.1.1, 5.0.0 PostgreSQL Driver 8.x, 9.x, 42.x Mariadb Driver 2.x, 1.8 InfluxDB 2.5 -\u0026gt; 2.17 Mssql-Jtds 1.x Mssql-jdbc 6.x -\u0026gt; 8.x ClickHouse-jdbc 0.3.x Apache-Kylin-Jdbc 2.6.x -\u0026gt; 3.x -\u0026gt; 4.x Impala-jdbc 2.6.x   RPC Frameworks  Dubbo 2.5.4 -\u0026gt; 2.6.0 Dubbox 2.8.4 Apache Dubbo 2.7.x -\u0026gt; 3.x Motan 0.2.x -\u0026gt; 1.1.0 gRPC 1.x Apache ServiceComb Java Chassis 1.x, 2.x SOFARPC 5.4.0 Armeria 0.63.0 -\u0026gt; 1.22.0 Apache Avro 1.7.0 - 1.8.x Finagle 6.44.0 -\u0026gt; 20.1.0 (6.25.0 -\u0026gt; 6.44.0 not tested) Brpc-Java 2.3.7 -\u0026gt; 3.0.5 Thrift 0.10.0 -\u0026gt; 0.12.0 Apache CXF 3.x JSONRPC4J 1.2.0 -\u0026gt; 1.6 Nacos-Client 2.x (Optional²)   MQ  RocketMQ 3.x-\u0026gt; 5.x RocketMQ-gRPC 5.x Kafka 0.11.0.0 -\u0026gt; 3.2.3 Spring-Kafka Spring Kafka Consumer 1.3.x -\u0026gt; 2.3.x (2.0.x and 2.1.x not tested and not recommended by the official document) ActiveMQ 5.10.0 -\u0026gt; 5.15.4 RabbitMQ 3.x-\u0026gt; 5.x Pulsar 2.2.x -\u0026gt; 2.9.x NATS 2.14.x -\u0026gt; 2.15.x Aliyun ONS 1.x (Optional¹)   NoSQL  aerospike 3.x -\u0026gt; 6.x Redis  Jedis 2.x-4.x Redisson Easy Java Redis client 3.5.2+ Lettuce 5.x   MongoDB Java Driver 2.13-2.14, 3.4.0-3.12.7, 4.0.0-4.1.0 Memcached Client  Spymemcached 2.x Xmemcached 2.x   Elasticsearch  transport-client 5.2.x-5.6.x transport-client 6.2.3-6.8.4 transport-client 7.0.0-7.5.2 rest-high-level-client 6.7.1-6.8.4 rest-high-level-client 7.0.0-7.5.2   Solr  SolrJ 7.x   Cassandra 3.x  cassandra-java-driver 3.7.0-3.7.2   HBase  hbase-client HTable 1.0.0-2.4.2   Neo4j  Neo4j-java 4.x     Service Discovery  Netflix Eureka   Distributed Coordination  Zookeeper 3.4.x (Optional² \u0026amp; Except 3.4.4)   Spring Ecosystem  Spring Bean annotations(@Bean, @Service, @Component, @Repository) 3.x and 4.x (Optional²) Spring Core Async SuccessCallback/FailureCallback/ListenableFutureCallback 4.x Spring Transaction 4.x and 5.x (Optional²)   Hystrix: Latency and Fault Tolerance for Distributed Systems 1.4.20 -\u0026gt; 1.5.18 Sentinel: The Sentinel of Your Microservices 1.7.0 -\u0026gt; 1.8.1 Scheduler  Elastic Job 2.x Apache ShardingSphere-Elasticjob 3.x Spring @Scheduled 3.1+ Quartz Scheduler 2.x (Optional²) XXL Job 2.x   OpenTracing community supported Canal: Alibaba mysql database binlog incremental subscription \u0026amp; consumer components 1.0.25 -\u0026gt; 1.1.2 JSON  GSON 2.8.x (Optional²) Fastjson 1.2.x (Optional²) Jackson 2.x (Optional²)   Vert.x Ecosystem  Vert.x Eventbus 3.2 -\u0026gt; 4.x Vert.x Web 3.x -\u0026gt; 4.x   Thread Schedule Framework  Spring @Async 4.x and 5.x Quasar 0.7.x JRE Callable and Runnable (Optional²) JRE ForkJoinPool (Optional²)   Cache  Ehcache 2.x GuavaCache 18.x -\u0026gt; 23.x (Optional²)   Kotlin  Coroutine 1.0.1 -\u0026gt; 1.3.x (Optional²)   GraphQL  Graphql 8.0 -\u0026gt; 17.x   Pool  Apache Commons DBCP 2.x Alibaba Druid 1.x HikariCP 3.x -\u0026gt; 4.x   Logging Framework  log4j 2.x log4j2 1.2.x logback 1.2.x   ORM  MyBatis 3.4.x -\u0026gt; 3.5.x   Event  GuavaEventBus 19.x -\u0026gt; 31.x-jre    Meter Plugins The meter plugin provides the advanced metrics collections, which are not a part of tracing.\n Thread Pool  Undertow 2.1.x -\u0026gt; 2.6.x Tomcat 7.0.x -\u0026gt; 10.0.x Dubbo 2.5.x -\u0026gt; 2.7.x Jetty 9.1.x -\u0026gt; 11.x Grizzly 2.3.x -\u0026gt; 4.x     ¹Due to license incompatibilities/restrictions these plugins are hosted and released in 3rd part repository, go to SkyAPM java plugin extension repository to get these.\n²These plugins affect the performance or must be used under some conditions, from experiences. So only released in /optional-plugins or /bootstrap-plugins, copy to /plugins in order to make them work.\n","title":"Tracing and Tracing based Metrics Analyze Plugins","url":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/supported-list/"},{"content":"Tracing and Tracing based Metrics Analyze Plugins The following plugins provide the distributed tracing capability, and the OAP backend would analyze the topology and metrics based on the tracing data.\n HTTP Server  Tomcat 7 Tomcat 8 Tomcat 9 Tomcat 10 Spring Boot Web 4.x Spring MVC 3.x, 4.x 5.x with servlet 3.x Spring MVC 6.x (Optional²) Nutz Web Framework 1.x Struts2 MVC 2.3.x -\u0026gt; 2.5.x Resin 3 (Optional¹) Resin 4 (Optional¹) Jetty Server 9.x -\u0026gt; 11.x Spring WebFlux 5.x (Optional¹) -\u0026gt; 6.x (Optional¹) Undertow 1.3.0.Final -\u0026gt; 2.0.27.Final RESTEasy 3.1.0.Final -\u0026gt; 6.2.4.Final Play Framework 2.6.x -\u0026gt; 2.8.x Light4J Microservices Framework 1.6.x -\u0026gt; 2.x Netty SocketIO 1.x Micronaut HTTP Server 3.2.x -\u0026gt; 3.6.x Jersey REST framework 2.x -\u0026gt; 3.x Grizzly 2.3.x -\u0026gt; 4.x WebSphere Liberty 23.x Netty HTTP 4.1.x (Optional²)   HTTP Client  Feign 9.x Netflix Spring Cloud Feign 1.1.x -\u0026gt; 2.x Okhttp 2.x -\u0026gt; 3.x -\u0026gt; 4.x Apache httpcomponent HttpClient 2.0 -\u0026gt; 3.1, 4.2, 4.3, 5.0, 5.1 Spring RestTemplate 4.x Spring RestTemplate 6.x (Optional²) Jetty Client 9.x -\u0026gt; 11.x Apache httpcomponent AsyncClient 4.x AsyncHttpClient 2.1+ Spring Webflux WebClient 5.x -\u0026gt; 6.x JRE HttpURLConnection (Optional²) Hutool-http client 5.x Micronaut HTTP Client 3.2.x -\u0026gt; 3.6.x   HTTP Gateway  Spring Cloud Gateway 2.0.2.RELEASE -\u0026gt; 4.1.x (Optional²) Apache ShenYu (Rich protocol support: HTTP,Spring Cloud,gRPC,Dubbo,SOFARPC,Motan,Tars) 2.4.x (Optional²)   JDBC  Mysql Driver 5.x, 6.x, 8.x Oracle Driver (Optional¹) H2 Driver 1.3.x -\u0026gt; 1.4.x ShardingSphere 3.0.0, 4.0.0, 4.0.1, 4.1.0, 4.1.1, 5.0.0 PostgreSQL Driver 8.x, 9.x, 42.x Mariadb Driver 2.x, 1.8 InfluxDB 2.5 -\u0026gt; 2.17 Mssql-Jtds 1.x Mssql-jdbc 6.x -\u0026gt; 8.x ClickHouse-jdbc 0.3.x Apache-Kylin-Jdbc 2.6.x -\u0026gt; 3.x -\u0026gt; 4.x Impala-jdbc 2.6.x (Optional³)   RPC Frameworks  Dubbo 2.5.4 -\u0026gt; 2.6.0 Dubbox 2.8.4 Apache Dubbo 2.7.x -\u0026gt; 3.x Motan 0.2.x -\u0026gt; 1.1.0 gRPC 1.x Apache ServiceComb Java Chassis 1.x, 2.x SOFARPC 5.4.0 Armeria 0.63.0 -\u0026gt; 1.22.0 Apache Avro 1.7.0 - 1.8.x Finagle 6.44.0 -\u0026gt; 20.1.0 (6.25.0 -\u0026gt; 6.44.0 not tested) Brpc-Java 2.3.7 -\u0026gt; 3.0.5 Thrift 0.10.0 -\u0026gt; 0.12.0 Apache CXF 3.x JSONRPC4J 1.2.0 -\u0026gt; 1.6 Nacos-Client 2.x (Optional²)   MQ  RocketMQ 3.x-\u0026gt; 5.x RocketMQ-gRPC 5.x Kafka 0.11.0.0 -\u0026gt; 3.2.3 Spring-Kafka Spring Kafka Consumer 1.3.x -\u0026gt; 2.3.x (2.0.x and 2.1.x not tested and not recommended by the official document) ActiveMQ 5.10.0 -\u0026gt; 5.15.4 RabbitMQ 3.x-\u0026gt; 5.x Pulsar 2.2.x -\u0026gt; 2.9.x NATS 2.14.x -\u0026gt; 2.15.x ActiveMQ-Artemis 2.30.0 -\u0026gt; 2.31.2 Aliyun ONS 1.x (Optional¹)   NoSQL  aerospike 3.x -\u0026gt; 6.x Redis  Jedis 2.x-4.x Redisson Easy Java Redis client 3.5.2+ Lettuce 5.x   MongoDB Java Driver 2.13-2.14, 3.4.0-3.12.7, 4.0.0-4.1.0 Memcached Client  Spymemcached 2.x Xmemcached 2.x   Elasticsearch  transport-client 5.2.x-5.6.x transport-client 6.2.3-6.8.4 transport-client 7.0.0-7.5.2 rest-high-level-client 6.7.1-6.8.4 rest-high-level-client 7.0.0-7.5.2   Solr  SolrJ 7.x   Cassandra 3.x  cassandra-java-driver 3.7.0-3.7.2   HBase  hbase-client HTable 1.0.0-2.4.2   Neo4j  Neo4j-java 4.x     Service Discovery  Netflix Eureka   Distributed Coordination  Zookeeper 3.4.x (Optional² \u0026amp; Except 3.4.4)   Spring Ecosystem  Spring Bean annotations(@Bean, @Service, @Component, @Repository) 3.x and 4.x (Optional²) Spring Core Async SuccessCallback/FailureCallback/ListenableFutureCallback 4.x Spring Transaction 4.x and 5.x (Optional²)   Hystrix: Latency and Fault Tolerance for Distributed Systems 1.4.20 -\u0026gt; 1.5.18 Sentinel: The Sentinel of Your Microservices 1.7.0 -\u0026gt; 1.8.1 Scheduler  Elastic Job 2.x Apache ShardingSphere-Elasticjob 3.x Spring @Scheduled 3.1+ Quartz Scheduler 2.x (Optional²) XXL Job 2.x   OpenTracing community supported Canal: Alibaba mysql database binlog incremental subscription \u0026amp; consumer components 1.0.25 -\u0026gt; 1.1.2 JSON  GSON 2.8.x (Optional²) Fastjson 1.2.x (Optional²) Jackson 2.x (Optional²)   Vert.x Ecosystem  Vert.x Eventbus 3.2 -\u0026gt; 4.x Vert.x Web 3.x -\u0026gt; 4.x   Thread Schedule Framework  Spring @Async 4.x and 5.x Quasar 0.7.x JRE Callable and Runnable (Optional²) JRE ForkJoinPool (Optional²)   Cache  Ehcache 2.x GuavaCache 18.x -\u0026gt; 23.x (Optional²)   Kotlin  Coroutine 1.0.1 -\u0026gt; 1.3.x (Optional²)   GraphQL  Graphql 8.0 -\u0026gt; 17.x   Pool  Apache Commons DBCP 2.x Alibaba Druid 1.x HikariCP 3.x -\u0026gt; 4.x   Logging Framework  log4j 2.x log4j2 1.2.x logback 1.2.x   ORM  MyBatis 3.4.x -\u0026gt; 3.5.x   Event  GuavaEventBus 19.x -\u0026gt; 31.x-jre    Meter Plugins The meter plugin provides the advanced metrics collections, which are not a part of tracing.\n Thread Pool  Undertow 2.1.x -\u0026gt; 2.6.x Tomcat 7.0.x -\u0026gt; 10.0.x Dubbo 2.5.x -\u0026gt; 2.7.x Jetty 9.1.x -\u0026gt; 11.x Grizzly 2.3.x -\u0026gt; 4.x     ¹Due to license incompatibilities/restrictions these plugins are hosted and released in 3rd part repository, go to SkyAPM java plugin extension repository to get these.\n²These plugins affect the performance or must be used under some conditions, from experiences. So only released in /optional-plugins or /bootstrap-plugins, copy to /plugins in order to make them work.\n³These plugins are not tested in the CI/CD pipeline, as the previous added tests are not able to run according to the latest CI/CD infrastructure limitations, lack of maintenance, or dependencies/images not available(e.g. removed from DockerHub).\n","title":"Tracing and Tracing based Metrics Analyze Plugins","url":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/supported-list/"},{"content":"Tracing APIs Add trace Toolkit toolkit/trace provides the APIs to enhance the trace context, such as createLocalSpan, createExitSpan, createEntrySpan, log, tag, prepareForAsync and asyncFinish. Add the toolkit dependency to your project.\nimport \u0026#34;github.com/apache/skywalking-go/toolkit/trace\u0026#34; Use Native Tracing Context Carrier The context carrier is used to pass the context between the difference application.\nWhen creating an Entry Span, you need to obtain the context carrier from the request. When creating an Exit Span, you need to write the context carrier into the target RPC request.\ntype ExtractorRef func(headerKey string) (string, error) type InjectorRef func(headerKey, headerValue string) error The following demo demonstrates how to pass the Context Carrier in the Tracing API:\n// create a new entry span and extract the context carrier from the request trace.CreateEntrySpan(\u0026#34;EntrySpan\u0026#34;, func(headerKey string) (string, error) { return request.Header.Get(headerKey), nil }) // create a new exit span and inject the context carrier into the request trace.CreateExitSpan(\u0026#34;ExitSpan\u0026#34;, request.Host, func(headerKey, headerValue string) error { request.Header.Add(headerKey, headerValue) return nil }) Create Span Use trace.CreateEntrySpan() API to create entry span, and then use SpanRef to contain the reference of created span in agent kernel.\n The first parameter is operation name of span the second parameter is InjectorRef.  spanRef, err := trace.CreateEntrySpan(\u0026#34;operationName\u0026#34;, InjectorRef) Use trace.CreateLocalSpan() API to create local span\n the only parameter is the operation name of span.  spanRef, err := trace.CreateLocalSpan(\u0026#34;operationName\u0026#34;) Use trace.CreateExitSpan() API to create exit span.\n the first parameter is the operation name of span the second parameter is the remote peer which means the peer address of exit operation. the third parameter is the ExtractorRef  spanRef, err := trace.CreateExitSpan(\u0026#34;operationName\u0026#34;, \u0026#34;peer\u0026#34;, ExtractorRef) Use trace.StopSpan() API to stop current span\ntrace.StopSpan() Add Span’s Tag and Log Use trace.AddLog() to record log in span.\nUse trace.SetTag() to add tag to span, the parameters of tag are two String which are key and value respectively.\ntrace.AddLog(...string) trace.SetTag(\u0026#34;key\u0026#34;,\u0026#34;value\u0026#34;) Set ComponentID Use trace.SetComponent() to set the component id of the Span\n the type of parameter is int32.  trace.SetComponent(ComponentID) The Component ID in Span is used to identify the current component, which is declared in the component libraries YAML from the OAP server side.\nAsync Prepare/Finish SpanRef is the return value of CreateSpan.Use SpanRef.PrepareAsync() to make current span still alive until SpanRef.AsyncFinish() called.\n Call PrepareAsync(). Use trace.StopSpan() to stop span in the original goroutine. Propagate the SpanRef to any other goroutine. Call SpanRef.AsyncFinish() in any goroutine.  Capture/Continue Context Snapshot  Use trace.CaptureContext() to get the segment info and store it in ContextSnapshotRef. Propagate the snapshot context to any other goroutine. Use trace.ContinueContext(snapshotRef) to load the snapshotRef in the target goroutine.  Reading Context All following APIs provide readonly features for the tracing context from tracing system. The values are only available when the current thread is traced.\n  Use trace.GetTraceID() API to get traceID.\ntraceID := trace.GetTraceID()   Use trace.GetSegmentID() API to get segmentID.\nsegmentID := trace.GetSegmentID()   Use trace.GetSpanID() API to get spanID.\nspanID := trace.GetSpanID()   Trace Correlation Context Trace correlation context APIs provide a way to put custom data in tracing context. All the data in the context will be propagated with the in-wire process automatically.\nUse trace.SetCorrelation() API to set custom data in tracing context.\ntrace.SetCorrelation(\u0026#34;key\u0026#34;,\u0026#34;value\u0026#34;)  Max element count in the correlation context is 3 Max value length of each element is 128  CorrelationContext will remove the key when the value is empty.\nUse trace.GetCorrelation() API to get custom data.\nvalue := trace.GetCorrelation(\u0026#34;key\u0026#34;) ","title":"Tracing APIs","url":"/docs/skywalking-go/latest/en/advanced-features/manual-apis/toolkit-trace/"},{"content":"Tracing APIs Add trace Toolkit toolkit/trace provides the APIs to enhance the trace context, such as createLocalSpan, createExitSpan, createEntrySpan, log, tag, prepareForAsync and asyncFinish. Add the toolkit dependency to your project.\nimport \u0026#34;github.com/apache/skywalking-go/toolkit/trace\u0026#34; Use Native Tracing Context Carrier The context carrier is used to pass the context between the difference application.\nWhen creating an Entry Span, you need to obtain the context carrier from the request. When creating an Exit Span, you need to write the context carrier into the target RPC request.\ntype ExtractorRef func(headerKey string) (string, error) type InjectorRef func(headerKey, headerValue string) error The following demo demonstrates how to pass the Context Carrier in the Tracing API:\n// create a new entry span and extract the context carrier from the request trace.CreateEntrySpan(\u0026#34;EntrySpan\u0026#34;, func(headerKey string) (string, error) { return request.Header.Get(headerKey), nil }) // create a new exit span and inject the context carrier into the request trace.CreateExitSpan(\u0026#34;ExitSpan\u0026#34;, request.Host, func(headerKey, headerValue string) error { request.Header.Add(headerKey, headerValue) return nil }) Create Span Use trace.CreateEntrySpan() API to create entry span, and then use SpanRef to contain the reference of created span in agent kernel.\n The first parameter is operation name of span the second parameter is InjectorRef.  spanRef, err := trace.CreateEntrySpan(\u0026#34;operationName\u0026#34;, InjectorRef) Use trace.CreateLocalSpan() API to create local span\n the only parameter is the operation name of span.  spanRef, err := trace.CreateLocalSpan(\u0026#34;operationName\u0026#34;) Use trace.CreateExitSpan() API to create exit span.\n the first parameter is the operation name of span the second parameter is the remote peer which means the peer address of exit operation. the third parameter is the ExtractorRef  spanRef, err := trace.CreateExitSpan(\u0026#34;operationName\u0026#34;, \u0026#34;peer\u0026#34;, ExtractorRef) Use trace.StopSpan() API to stop current span\ntrace.StopSpan() Add Span’s Tag and Log Use trace.AddLog() to record log in span.\nUse trace.SetTag() to add tag to span, the parameters of tag are two String which are key and value respectively.\ntrace.AddLog(...string) trace.SetTag(\u0026#34;key\u0026#34;,\u0026#34;value\u0026#34;) Set ComponentID Use trace.SetComponent() to set the component id of the Span\n the type of parameter is int32.  trace.SetComponent(ComponentID) The Component ID in Span is used to identify the current component, which is declared in the component libraries YAML from the OAP server side.\nAsync Prepare/Finish SpanRef is the return value of CreateSpan.Use SpanRef.PrepareAsync() to make current span still alive until SpanRef.AsyncFinish() called.\n Call PrepareAsync(). Use trace.StopSpan() to stop span in the original goroutine. Propagate the SpanRef to any other goroutine. Call SpanRef.AsyncFinish() in any goroutine.  Capture/Continue Context Snapshot  Use trace.CaptureContext() to get the segment info and store it in ContextSnapshotRef. Propagate the snapshot context to any other goroutine. Use trace.ContinueContext(snapshotRef) to load the snapshotRef in the target goroutine.  Reading Context All following APIs provide readonly features for the tracing context from tracing system. The values are only available when the current thread is traced.\n  Use trace.GetTraceID() API to get traceID.\ntraceID := trace.GetTraceID()   Use trace.GetSegmentID() API to get segmentID.\nsegmentID := trace.GetSegmentID()   Use trace.GetSpanID() API to get spanID.\nspanID := trace.GetSpanID()   Trace Correlation Context Trace correlation context APIs provide a way to put custom data in tracing context. All the data in the context will be propagated with the in-wire process automatically.\nUse trace.SetCorrelation() API to set custom data in tracing context.\ntrace.SetCorrelation(\u0026#34;key\u0026#34;,\u0026#34;value\u0026#34;)  Max element count in the correlation context is 3 Max value length of each element is 128  CorrelationContext will remove the key when the value is empty.\nUse trace.GetCorrelation() API to get custom data.\nvalue := trace.GetCorrelation(\u0026#34;key\u0026#34;) ","title":"Tracing APIs","url":"/docs/skywalking-go/next/en/advanced-features/manual-apis/toolkit-trace/"},{"content":"Tracing APIs Add trace Toolkit toolkit/trace provides the APIs to enhance the trace context, such as createLocalSpan, createExitSpan, createEntrySpan, log, tag, prepareForAsync and asyncFinish. Add the toolkit dependency to your project.\nimport \u0026#34;github.com/apache/skywalking-go/toolkit/trace\u0026#34; Use Native Tracing Context Carrier The context carrier is used to pass the context between the difference application.\nWhen creating an Entry Span, you need to obtain the context carrier from the request. When creating an Exit Span, you need to write the context carrier into the target RPC request.\ntype ExtractorRef func(headerKey string) (string, error) type InjectorRef func(headerKey, headerValue string) error The following demo demonstrates how to pass the Context Carrier in the Tracing API:\n// create a new entry span and extract the context carrier from the request trace.CreateEntrySpan(\u0026#34;EntrySpan\u0026#34;, func(headerKey string) (string, error) { return request.Header.Get(headerKey), nil }) // create a new exit span and inject the context carrier into the request trace.CreateExitSpan(\u0026#34;ExitSpan\u0026#34;, request.Host, func(headerKey, headerValue string) error { request.Header.Add(headerKey, headerValue) return nil }) Create Span Use trace.CreateEntrySpan() API to create entry span, and then use SpanRef to contain the reference of created span in agent kernel.\n The first parameter is operation name of span the second parameter is InjectorRef.  spanRef, err := trace.CreateEntrySpan(\u0026#34;operationName\u0026#34;, InjectorRef) Use trace.CreateLocalSpan() API to create local span\n the only parameter is the operation name of span.  spanRef, err := trace.CreateLocalSpan(\u0026#34;operationName\u0026#34;) Use trace.CreateExitSpan() API to create exit span.\n the first parameter is the operation name of span the second parameter is the remote peer which means the peer address of exit operation. the third parameter is the ExtractorRef  spanRef, err := trace.CreateExitSpan(\u0026#34;operationName\u0026#34;, \u0026#34;peer\u0026#34;, ExtractorRef) Use trace.StopSpan() API to stop current span\ntrace.StopSpan() Add Span’s Tag and Log Use trace.AddLog() to record log in span.\nUse trace.SetTag() to add tag to span, the parameters of tag are two String which are key and value respectively.\ntrace.AddLog(...string) trace.SetTag(\u0026#34;key\u0026#34;,\u0026#34;value\u0026#34;) Set ComponentID Use trace.SetComponent() to set the component id of the Span\n the type of parameter is int32.  trace.SetComponent(ComponentID) The Component ID in Span is used to identify the current component, which is declared in the component libraries YAML from the OAP server side.\nAsync Prepare/Finish SpanRef is the return value of CreateSpan.Use SpanRef.PrepareAsync() to make current span still alive until SpanRef.AsyncFinish() called.\n Call PrepareAsync(). Use trace.StopSpan() to stop span in the original goroutine. Propagate the SpanRef to any other goroutine. Call SpanRef.AsyncFinish() in any goroutine.  Capture/Continue Context Snapshot  Use trace.CaptureContext() to get the segment info and store it in ContextSnapshotRef. Propagate the snapshot context to any other goroutine. Use trace.ContinueContext(snapshotRef) to load the snapshotRef in the target goroutine.  Reading Context All following APIs provide readonly features for the tracing context from tracing system. The values are only available when the current thread is traced.\n  Use trace.GetTraceID() API to get traceID.\ntraceID := trace.GetTraceID()   Use trace.GetSegmentID() API to get segmentID.\nsegmentID := trace.GetSegmentID()   Use trace.GetSpanID() API to get spanID.\nspanID := trace.GetSpanID()   Trace Correlation Context Trace correlation context APIs provide a way to put custom data in tracing context. All the data in the context will be propagated with the in-wire process automatically.\nUse trace.SetCorrelation() API to set custom data in tracing context.\ntrace.SetCorrelation(\u0026#34;key\u0026#34;,\u0026#34;value\u0026#34;)  Max element count in the correlation context is 3 Max value length of each element is 128  CorrelationContext will remove the key when the value is empty.\nUse trace.GetCorrelation() API to get custom data.\nvalue := trace.GetCorrelation(\u0026#34;key\u0026#34;) ","title":"Tracing APIs","url":"/docs/skywalking-go/v0.4.0/en/advanced-features/manual-apis/toolkit-trace/"},{"content":"Tracing Plugins The following plugins provide the distributed tracing capability, and the OAP backend would analyze the topology and metrics based on the tracing data.\n HTTP Server  gin: Gin tested v1.7.0 to v1.9.0. http: Native HTTP tested go v1.17 to go v1.20. go-restfulv3: Go-Restful tested v3.7.1 to 3.10.2. mux: Mux tested v1.7.0 to v1.8.0. iris: Iris tested v12.1.0 to 12.2.5. fasthttp: FastHttp tested v1.10.0 to v1.50.0. fiber: Fiber tested v2.49.0 to v2.50.0. echov4: Echov4 tested v4.0.0 to v4.11.4   HTTP Client  http: Native HTTP tested go v1.17 to go v1.20. fasthttp: FastHttp tested v1.10.0 to v1.50.0.   RPC Frameworks  dubbo: Dubbo tested v3.0.1 to v3.0.5. kratosv2: Kratos tested v2.3.1 to v2.6.2. microv4: Go-Micro tested v4.6.0 to v4.10.2. grpc : gRPC tested v1.55.0 to v1.57.0.   Database Client  gorm: GORM tested v1.22.0 to v1.25.1.  MySQL Driver   mongo: Mongo tested v1.11.1 to v1.11.7. sql: Native SQL tested go v1.17 to go v1.20.  MySQL Driver tested v1.4.0 to v1.7.1.     Cache Client  go-redisv9: go-redis tested v9.0.3 to v9.0.5.   MQ Client  rocketMQ: rocketmq-client-go tested v2.1.2. amqp: AMQP tested v1.9.0.    Metrics Plugins The meter plugin provides the advanced metrics collections.\n runtimemetrics: Native Runtime Metrics tested go v1.17 to go v1.20.  Logging Plugins The logging plugin provides the advanced logging collections.\n logrus: Logrus tested v1.8.2 to v1.9.3. zap: Zap tested v1.17.0 to v1.24.0.  ","title":"Tracing Plugins","url":"/docs/skywalking-go/latest/en/agent/support-plugins/"},{"content":"Tracing Plugins The following plugins provide the distributed tracing capability, and the OAP backend would analyze the topology and metrics based on the tracing data.\n HTTP Server  gin: Gin tested v1.7.0 to v1.9.0. http: Native HTTP tested go v1.17 to go v1.20. go-restfulv3: Go-Restful tested v3.7.1 to 3.10.2. mux: Mux tested v1.7.0 to v1.8.0. iris: Iris tested v12.1.0 to 12.2.5. fasthttp: FastHttp tested v1.10.0 to v1.50.0. fiber: Fiber tested v2.49.0 to v2.50.0. echov4: Echov4 tested v4.0.0 to v4.11.4   HTTP Client  http: Native HTTP tested go v1.17 to go v1.20. fasthttp: FastHttp tested v1.10.0 to v1.50.0.   RPC Frameworks  dubbo: Dubbo tested v3.0.1 to v3.0.5. kratosv2: Kratos tested v2.3.1 to v2.6.2. microv4: Go-Micro tested v4.6.0 to v4.10.2. grpc : gRPC tested v1.55.0 to v1.57.0.   Database Client  gorm: GORM tested v1.22.0 to v1.25.1.  MySQL Driver   mongo: Mongo tested v1.11.1 to v1.11.7. sql: Native SQL tested go v1.17 to go v1.20.  MySQL Driver tested v1.4.0 to v1.7.1.     Cache Client  go-redisv9: go-redis tested v9.0.3 to v9.0.5.   MQ Client  rocketMQ: rocketmq-client-go tested v2.1.2. amqp: AMQP tested v1.9.0. pulsar: pulsar-client-go tested v0.12.0. segmentio-kafka: segmentio-kafka tested v0.4.47.    Metrics Plugins The meter plugin provides the advanced metrics collections.\n runtimemetrics: Native Runtime Metrics tested go v1.17 to go v1.20.  Logging Plugins The logging plugin provides the advanced logging collections.\n logrus: Logrus tested v1.8.2 to v1.9.3. zap: Zap tested v1.17.0 to v1.24.0.  ","title":"Tracing Plugins","url":"/docs/skywalking-go/next/en/agent/support-plugins/"},{"content":"Tracing Plugins The following plugins provide the distributed tracing capability, and the OAP backend would analyze the topology and metrics based on the tracing data.\n HTTP Server  gin: Gin tested v1.7.0 to v1.9.0. http: Native HTTP tested go v1.17 to go v1.20. go-restfulv3: Go-Restful tested v3.7.1 to 3.10.2. mux: Mux tested v1.7.0 to v1.8.0. iris: Iris tested v12.1.0 to 12.2.5. fasthttp: FastHttp tested v1.10.0 to v1.50.0. fiber: Fiber tested v2.49.0 to v2.50.0. echov4: Echov4 tested v4.0.0 to v4.11.4   HTTP Client  http: Native HTTP tested go v1.17 to go v1.20. fasthttp: FastHttp tested v1.10.0 to v1.50.0.   RPC Frameworks  dubbo: Dubbo tested v3.0.1 to v3.0.5. kratosv2: Kratos tested v2.3.1 to v2.6.2. microv4: Go-Micro tested v4.6.0 to v4.10.2. grpc : gRPC tested v1.55.0 to v1.57.0.   Database Client  gorm: GORM tested v1.22.0 to v1.25.1.  MySQL Driver   mongo: Mongo tested v1.11.1 to v1.11.7. sql: Native SQL tested go v1.17 to go v1.20.  MySQL Driver tested v1.4.0 to v1.7.1.     Cache Client  go-redisv9: go-redis tested v9.0.3 to v9.0.5.   MQ Client  rocketMQ: rocketmq-client-go tested v2.1.2. amqp: AMQP tested v1.9.0.    Metrics Plugins The meter plugin provides the advanced metrics collections.\n runtimemetrics: Native Runtime Metrics tested go v1.17 to go v1.20.  Logging Plugins The logging plugin provides the advanced logging collections.\n logrus: Logrus tested v1.8.2 to v1.9.3. zap: Zap tested v1.17.0 to v1.24.0.  ","title":"Tracing Plugins","url":"/docs/skywalking-go/v0.4.0/en/agent/support-plugins/"},{"content":"Tracing, Metrics and Logging with Go Agent All plugins in SkyWalking Go Agent are designed to provide functionality for distributed tracing, metrics, and logging data. For a detailed list of supported plugins, please refer to the documentation. This document aims to provide you with some configuration information for your usage. Please ensure that you have followed the documentation to successfully install the SkyWalking Go Agent into your application.\nMetadata Mechanism The Go Agent would be identified by the SkyWalking backend after startup and maintain a heartbeat to keep alive.\n   Name Environment Key Default Value Description     agent.service_name SW_AGENT_NAME Your_Application_Name The name of the service which showed in UI.   agent.instance_env_name  SW_AGENT_INSTANCE_NAME To obtain the environment variable key for the instance name, if it cannot be obtained, an instance name will be automatically generated.    Tracing Distributed tracing is the most common form of plugin in the Go Agent, and it becomes active with each new incoming request. By default, all plugins are enabled. For a specific list of plugins, please refer to the documentation.\nIf you wish to disable a particular plugin to prevent enhancements related to that plugin, please consult the documentation on how to disable plugins.\nThe basic configuration is as follows:\n   Name Environment Key Default Value Description     agent.sampler SW_AGENT_SAMPLER 1 Sampling rate of tracing data, which is a floating-point value that must be between 0 and 1.   agent.ignore_suffix SW_AGENT_IGNORE_SUFFIX .jpg,.jpeg,.js,.css,.png,.bmp,.gif,.ico,.mp3,.mp4,.html,.svg If the operation name of the first span is included in this set, this segment should be ignored.(multiple split by \u0026ldquo;,\u0026quot;).    Metrics The metrics plugin can dynamically monitor the execution status of the current program and aggregate the data into corresponding metrics. Eventually, the data is reported to the SkyWalking backend at a specified interval. For a specific list of plugins, please refer to the documentation.\nThe current configuration information is as follows:\n   Name Environment Key Default Value Description     agent.meter.collect_interval SW_AGENT_METER_COLLECT_INTERVAL 20 The interval of collecting metrics, in seconds.    Logging The logging plugin in SkyWalking Go Agent are used to handle agent and application logs, as well as application log querying. They primarily consist of the following three functionalities:\n Agent Log Adaptation: The plugin detects the logging framework used in the current system and integrates the agent\u0026rsquo;s logs with the system\u0026rsquo;s logging framework. Distributed Tracing Enhancement: It combines the distributed tracing information from the current request with the application logs, allowing you to have real-time visibility into all log contents related to specific requests. Log Reporting: The plugin reports both application and agent logs to the SkyWalking backend for data retrieval and display purposes.  For more details, please refer to the documentation to learn more detail.\n","title":"Tracing, Metrics and Logging with Go Agent","url":"/docs/skywalking-go/latest/en/agent/tracing-metrics-logging/"},{"content":"Tracing, Metrics and Logging with Go Agent All plugins in SkyWalking Go Agent are designed to provide functionality for distributed tracing, metrics, and logging data. For a detailed list of supported plugins, please refer to the documentation. This document aims to provide you with some configuration information for your usage. Please ensure that you have followed the documentation to successfully install the SkyWalking Go Agent into your application.\nMetadata Mechanism The Go Agent would be identified by the SkyWalking backend after startup and maintain a heartbeat to keep alive.\n   Name Environment Key Default Value Description     agent.service_name SW_AGENT_NAME Your_Application_Name The name of the service which showed in UI.   agent.instance_env_name  SW_AGENT_INSTANCE_NAME To obtain the environment variable key for the instance name, if it cannot be obtained, an instance name will be automatically generated.    Tracing Distributed tracing is the most common form of plugin in the Go Agent, and it becomes active with each new incoming request. By default, all plugins are enabled. For a specific list of plugins, please refer to the documentation.\nIf you wish to disable a particular plugin to prevent enhancements related to that plugin, please consult the documentation on how to disable plugins.\nThe basic configuration is as follows:\n   Name Environment Key Default Value Description     agent.sampler SW_AGENT_SAMPLER 1 Sampling rate of tracing data, which is a floating-point value that must be between 0 and 1.   agent.ignore_suffix SW_AGENT_IGNORE_SUFFIX .jpg,.jpeg,.js,.css,.png,.bmp,.gif,.ico,.mp3,.mp4,.html,.svg If the suffix obtained by splitting the operation name by the last index of \u0026ldquo;.\u0026rdquo; in this set, this segment should be ignored.(multiple split by \u0026ldquo;,\u0026quot;).   agent.trace_ignore_path SW_AGENT_TRACE_IGNORE_PATH  If the operation name of the first span is matching, this segment should be ignored.(multiple split by \u0026ldquo;,\u0026quot;).    Metrics The metrics plugin can dynamically monitor the execution status of the current program and aggregate the data into corresponding metrics. Eventually, the data is reported to the SkyWalking backend at a specified interval. For a specific list of plugins, please refer to the documentation.\nThe current configuration information is as follows:\n   Name Environment Key Default Value Description     agent.meter.collect_interval SW_AGENT_METER_COLLECT_INTERVAL 20 The interval of collecting metrics, in seconds.    Logging The logging plugin in SkyWalking Go Agent are used to handle agent and application logs, as well as application log querying. They primarily consist of the following three functionalities:\n Agent Log Adaptation: The plugin detects the logging framework used in the current system and integrates the agent\u0026rsquo;s logs with the system\u0026rsquo;s logging framework. Distributed Tracing Enhancement: It combines the distributed tracing information from the current request with the application logs, allowing you to have real-time visibility into all log contents related to specific requests. Log Reporting: The plugin reports both application and agent logs to the SkyWalking backend for data retrieval and display purposes.  For more details, please refer to the documentation to learn more detail.\n","title":"Tracing, Metrics and Logging with Go Agent","url":"/docs/skywalking-go/next/en/agent/tracing-metrics-logging/"},{"content":"Tracing, Metrics and Logging with Go Agent All plugins in SkyWalking Go Agent are designed to provide functionality for distributed tracing, metrics, and logging data. For a detailed list of supported plugins, please refer to the documentation. This document aims to provide you with some configuration information for your usage. Please ensure that you have followed the documentation to successfully install the SkyWalking Go Agent into your application.\nMetadata Mechanism The Go Agent would be identified by the SkyWalking backend after startup and maintain a heartbeat to keep alive.\n   Name Environment Key Default Value Description     agent.service_name SW_AGENT_NAME Your_Application_Name The name of the service which showed in UI.   agent.instance_env_name  SW_AGENT_INSTANCE_NAME To obtain the environment variable key for the instance name, if it cannot be obtained, an instance name will be automatically generated.    Tracing Distributed tracing is the most common form of plugin in the Go Agent, and it becomes active with each new incoming request. By default, all plugins are enabled. For a specific list of plugins, please refer to the documentation.\nIf you wish to disable a particular plugin to prevent enhancements related to that plugin, please consult the documentation on how to disable plugins.\nThe basic configuration is as follows:\n   Name Environment Key Default Value Description     agent.sampler SW_AGENT_SAMPLER 1 Sampling rate of tracing data, which is a floating-point value that must be between 0 and 1.   agent.ignore_suffix SW_AGENT_IGNORE_SUFFIX .jpg,.jpeg,.js,.css,.png,.bmp,.gif,.ico,.mp3,.mp4,.html,.svg If the operation name of the first span is included in this set, this segment should be ignored.(multiple split by \u0026ldquo;,\u0026quot;).    Metrics The metrics plugin can dynamically monitor the execution status of the current program and aggregate the data into corresponding metrics. Eventually, the data is reported to the SkyWalking backend at a specified interval. For a specific list of plugins, please refer to the documentation.\nThe current configuration information is as follows:\n   Name Environment Key Default Value Description     agent.meter.collect_interval SW_AGENT_METER_COLLECT_INTERVAL 20 The interval of collecting metrics, in seconds.    Logging The logging plugin in SkyWalking Go Agent are used to handle agent and application logs, as well as application log querying. They primarily consist of the following three functionalities:\n Agent Log Adaptation: The plugin detects the logging framework used in the current system and integrates the agent\u0026rsquo;s logs with the system\u0026rsquo;s logging framework. Distributed Tracing Enhancement: It combines the distributed tracing information from the current request with the application logs, allowing you to have real-time visibility into all log contents related to specific requests. Log Reporting: The plugin reports both application and agent logs to the SkyWalking backend for data retrieval and display purposes.  For more details, please refer to the documentation to learn more detail.\n","title":"Tracing, Metrics and Logging with Go Agent","url":"/docs/skywalking-go/v0.4.0/en/agent/tracing-metrics-logging/"},{"content":"Traffic The traffic is used to collecting the network access logs from services through the Service Discovery, and send access logs to the backend server for analyze.\nConfiguration    Name Default Environment Key Description     access_log.active false ROVER_ACCESS_LOG_ACTIVE Is active the access log monitoring.   access_log.exclude_namespaces istio-system,cert-manager,kube-system ROVER_ACCESS_LOG_EXCLUDE_NAMESPACES Exclude processes in the specified Kubernetes namespace. Multiple namespaces split by \u0026ldquo;,\u0026rdquo;   access_log.exclude_cluster  ROVER_ACCESS_LOG_EXCLUDE_CLUSTER Exclude processes in the specified cluster which defined in the process module. Multiple clusters split by \u0026ldquo;,\u0026rdquo;   access_log.flush.max_count 2000 ROVER_ACCESS_LOG_FLUSH_MAX_COUNT The max count of the access log when flush to the backend.   access_log.flush.period 5s ROVER_ACCESS_LOG_FLUSH_PERIOD The period of flush access log to the backend.   access_log_protocol_analyze.per_cpu_buffer 400KB ROVER_ACCESS_LOG_PROTOCOL_ANALYZE_PER_CPU_BUFFER The size of socket data buffer on each CPU.   access_log.protocol_analyze.parallels 2 ROVER_ACCESS_LOG_PROTOCOL_ANALYZE_PARALLELS The count of parallel protocol analyzer.   access_log.protocol_analyze.queue_size 5000 ROVER_ACCESS_LOG_PROTOCOL_ANALYZE_QUEUE_SIZE The size of per paralleled analyze queue.    Collectors Socket Connect/Accept/Close Monitor all socket connect, accept, and close events from monitored processes by attaching eBPF program to the respective trace points.\nSocket traffic Capture all socket traffic from monitored processes by attaching eBPF program to network syscalls.\nProtocol Data collection is followed by protocol analysis. Currently, the supported protocols include:\n HTTP/1.x HTTP/2  Note: As HTTP2 is a stateful protocol, it only supports monitoring processes that start after monitor. Processes already running at the time of monitoring may fail to provide complete data, leading to unsuccessful analysis.\nTLS When a process uses the TLS protocol for data transfer, Rover monitors libraries such as OpenSSL, BoringSSL, GoTLS, and NodeTLS to access the raw content. This feature is also applicable for protocol analysis.\nNote: the parsing of TLS protocols in Java is currently not supported.\nL2-L4 During data transmission, Rover records each packet\u0026rsquo;s through the network layers L2 to L4 using kprobes. This approach enhances the understanding of each packet\u0026rsquo;s transmission process, facilitating easier localization and troubleshooting of network issues.\n","title":"Traffic","url":"/docs/skywalking-rover/latest/en/setup/configuration/traffic/"},{"content":"Traffic The traffic is used to collecting the network access logs from services through the Service Discovery, and send access logs to the backend server for analyze.\nConfiguration    Name Default Environment Key Description     access_log.active false ROVER_ACCESS_LOG_ACTIVE Is active the access log monitoring.   access_log.exclude_namespaces istio-system,cert-manager,kube-system ROVER_ACCESS_LOG_EXCLUDE_NAMESPACES Exclude processes in the specified Kubernetes namespace. Multiple namespaces split by \u0026ldquo;,\u0026rdquo;   access_log.exclude_cluster  ROVER_ACCESS_LOG_EXCLUDE_CLUSTER Exclude processes in the specified cluster which defined in the process module. Multiple clusters split by \u0026ldquo;,\u0026rdquo;   access_log.flush.max_count 2000 ROVER_ACCESS_LOG_FLUSH_MAX_COUNT The max count of the access log when flush to the backend.   access_log.flush.period 5s ROVER_ACCESS_LOG_FLUSH_PERIOD The period of flush access log to the backend.   access_log_protocol_analyze.per_cpu_buffer 400KB ROVER_ACCESS_LOG_PROTOCOL_ANALYZE_PER_CPU_BUFFER The size of socket data buffer on each CPU.   access_log.protocol_analyze.parallels 2 ROVER_ACCESS_LOG_PROTOCOL_ANALYZE_PARALLELS The count of parallel protocol analyzer.   access_log.protocol_analyze.queue_size 5000 ROVER_ACCESS_LOG_PROTOCOL_ANALYZE_QUEUE_SIZE The size of per paralleled analyze queue.    Collectors Socket Connect/Accept/Close Monitor all socket connect, accept, and close events from monitored processes by attaching eBPF program to the respective trace points.\nSocket traffic Capture all socket traffic from monitored processes by attaching eBPF program to network syscalls.\nProtocol Data collection is followed by protocol analysis. Currently, the supported protocols include:\n HTTP/1.x HTTP/2  Note: As HTTP2 is a stateful protocol, it only supports monitoring processes that start after monitor. Processes already running at the time of monitoring may fail to provide complete data, leading to unsuccessful analysis.\nTLS When a process uses the TLS protocol for data transfer, Rover monitors libraries such as OpenSSL, BoringSSL, GoTLS, and NodeTLS to access the raw content. This feature is also applicable for protocol analysis.\nNote: the parsing of TLS protocols in Java is currently not supported.\nL2-L4 During data transmission, Rover records each packet\u0026rsquo;s through the network layers L2 to L4 using kprobes. This approach enhances the understanding of each packet\u0026rsquo;s transmission process, facilitating easier localization and troubleshooting of network issues.\n","title":"Traffic","url":"/docs/skywalking-rover/next/en/setup/configuration/traffic/"},{"content":"Traffic The traffic is used to collecting the network access logs from services through the Service Discovery, and send access logs to the backend server for analyze.\nConfiguration    Name Default Environment Key Description     access_log.active false ROVER_ACCESS_LOG_ACTIVE Is active the access log monitoring.   access_log.exclude_namespaces istio-system,cert-manager,kube-system ROVER_ACCESS_LOG_EXCLUDE_NAMESPACES Exclude processes in the specified Kubernetes namespace. Multiple namespaces split by \u0026ldquo;,\u0026rdquo;   access_log.exclude_cluster  ROVER_ACCESS_LOG_EXCLUDE_CLUSTER Exclude processes in the specified cluster which defined in the process module. Multiple clusters split by \u0026ldquo;,\u0026rdquo;   access_log.flush.max_count 2000 ROVER_ACCESS_LOG_FLUSH_MAX_COUNT The max count of the access log when flush to the backend.   access_log.flush.period 5s ROVER_ACCESS_LOG_FLUSH_PERIOD The period of flush access log to the backend.   access_log_protocol_analyze.per_cpu_buffer 400KB ROVER_ACCESS_LOG_PROTOCOL_ANALYZE_PER_CPU_BUFFER The size of socket data buffer on each CPU.   access_log.protocol_analyze.parallels 2 ROVER_ACCESS_LOG_PROTOCOL_ANALYZE_PARALLELS The count of parallel protocol analyzer.   access_log.protocol_analyze.queue_size 5000 ROVER_ACCESS_LOG_PROTOCOL_ANALYZE_QUEUE_SIZE The size of per paralleled analyze queue.    Collectors Socket Connect/Accept/Close Monitor all socket connect, accept, and close events from monitored processes by attaching eBPF program to the respective trace points.\nSocket traffic Capture all socket traffic from monitored processes by attaching eBPF program to network syscalls.\nProtocol Data collection is followed by protocol analysis. Currently, the supported protocols include:\n HTTP/1.x HTTP/2  Note: As HTTP2 is a stateful protocol, it only supports monitoring processes that start after monitor. Processes already running at the time of monitoring may fail to provide complete data, leading to unsuccessful analysis.\nTLS When a process uses the TLS protocol for data transfer, Rover monitors libraries such as OpenSSL, BoringSSL, GoTLS, and NodeTLS to access the raw content. This feature is also applicable for protocol analysis.\nNote: the parsing of TLS protocols in Java is currently not supported.\nL2-L4 During data transmission, Rover records each packet\u0026rsquo;s through the network layers L2 to L4 using kprobes. This approach enhances the understanding of each packet\u0026rsquo;s transmission process, facilitating easier localization and troubleshooting of network issues.\n","title":"Traffic","url":"/docs/skywalking-rover/v0.6.0/en/setup/configuration/traffic/"},{"content":"Transmit Log to Kafka Using Satellite to receive the SkyWalking log protocol from agent, and transport data to the Kafka Topic.\nConfig Here is config file, set out as follows:\n Declare gRPC server and kafka client to receive and transmit data. Declare the SkyWalking Log protocol gatherer and sender to transmit protocol via pipeline. Expose Self-Observability telemetry data to Prometheus.  ","title":"Transmit Log to Kafka","url":"/docs/skywalking-satellite/latest/en/setup/examples/feature/transmit-log-to-kafka/readme/"},{"content":"Transmit Log to Kafka Using Satellite to receive the SkyWalking log protocol from agent, and transport data to the Kafka Topic.\nConfig Here is config file, set out as follows:\n Declare gRPC server and kafka client to receive and transmit data. Declare the SkyWalking Log protocol gatherer and sender to transmit protocol via pipeline. Expose Self-Observability telemetry data to Prometheus.  ","title":"Transmit Log to Kafka","url":"/docs/skywalking-satellite/next/en/setup/examples/feature/transmit-log-to-kafka/readme/"},{"content":"Transmit Log to Kafka Using Satellite to receive the SkyWalking log protocol from agent, and transport data to the Kafka Topic.\nConfig Here is config file, set out as follows:\n Declare gRPC server and kafka client to receive and transmit data. Declare the SkyWalking Log protocol gatherer and sender to transmit protocol via pipeline. Expose Self-Observability telemetry data to Prometheus.  ","title":"Transmit Log to Kafka","url":"/docs/skywalking-satellite/v1.2.0/en/setup/examples/feature/transmit-log-to-kafka/readme/"},{"content":"TTL In SkyWalking, there are two types of observability data:\n Records include traces, logs, topN sampled statements and alarm. recordDataTTL applies to record data. Metrics include all metrics for service, instance, endpoint, and topology map. Metadata(lists of services, instances, or endpoints) also belongs to metrics. metricsDataTTL applies to Metrics data.  These are the settings for the different types:\n# Set a timeout on metrics data. After the timeout has expired, the metrics data will automatically be deleted.recordDataTTL:${SW_CORE_RECORD_DATA_TTL:3}# Unit is daymetricsDataTTL:${SW_CORE_METRICS_DATA_TTL:7}# Unit is day","title":"TTL","url":"/docs/main/latest/en/setup/backend/ttl/"},{"content":"TTL In SkyWalking, there are two types of observability data:\n Records include traces, logs, topN sampled statements and alarm. recordDataTTL applies to record data. Metrics include all metrics for service, instance, endpoint, and topology map. Metadata(lists of services, instances, or endpoints) also belongs to metrics. metricsDataTTL applies to Metrics data.  These are the settings for the different types:\n# Set a timeout on metrics data. After the timeout has expired, the metrics data will automatically be deleted.recordDataTTL:${SW_CORE_RECORD_DATA_TTL:3}# Unit is daymetricsDataTTL:${SW_CORE_METRICS_DATA_TTL:7}# Unit is day","title":"TTL","url":"/docs/main/next/en/setup/backend/ttl/"},{"content":"TTL In SkyWalking, there are two types of observability data:\n Records include traces, logs, topN sampled statements and alarm. recordDataTTL applies to record data. Metrics include all metrics for service, instance, endpoint, and topology map. Metadata(lists of services, instances, or endpoints) also belongs to metrics. metricsDataTTL applies to Metrics data.  These are the settings for the different types:\n# Set a timeout on metrics data. After the timeout has expired, the metrics data will automatically be deleted.recordDataTTL:${SW_CORE_RECORD_DATA_TTL:3}# Unit is daymetricsDataTTL:${SW_CORE_METRICS_DATA_TTL:7}# Unit is day","title":"TTL","url":"/docs/main/v9.0.0/en/setup/backend/ttl/"},{"content":"TTL In SkyWalking, there are two types of observability data:\n Records include traces, logs, topN sampled statements and alarm. recordDataTTL applies to record data. Metrics include all metrics for service, instance, endpoint, and topology map. Metadata(lists of services, instances, or endpoints) also belongs to metrics. metricsDataTTL applies to Metrics data.  These are the settings for the different types:\n# Set a timeout on metrics data. After the timeout has expired, the metrics data will automatically be deleted.recordDataTTL:${SW_CORE_RECORD_DATA_TTL:3}# Unit is daymetricsDataTTL:${SW_CORE_METRICS_DATA_TTL:7}# Unit is day","title":"TTL","url":"/docs/main/v9.1.0/en/setup/backend/ttl/"},{"content":"TTL In SkyWalking, there are two types of observability data:\n Records include traces, logs, topN sampled statements and alarm. recordDataTTL applies to record data. Metrics include all metrics for service, instance, endpoint, and topology map. Metadata(lists of services, instances, or endpoints) also belongs to metrics. metricsDataTTL applies to Metrics data.  These are the settings for the different types:\n# Set a timeout on metrics data. After the timeout has expired, the metrics data will automatically be deleted.recordDataTTL:${SW_CORE_RECORD_DATA_TTL:3}# Unit is daymetricsDataTTL:${SW_CORE_METRICS_DATA_TTL:7}# Unit is day","title":"TTL","url":"/docs/main/v9.2.0/en/setup/backend/ttl/"},{"content":"TTL In SkyWalking, there are two types of observability data:\n Records include traces, logs, topN sampled statements and alarm. recordDataTTL applies to record data. Metrics include all metrics for service, instance, endpoint, and topology map. Metadata(lists of services, instances, or endpoints) also belongs to metrics. metricsDataTTL applies to Metrics data.  These are the settings for the different types:\n# Set a timeout on metrics data. After the timeout has expired, the metrics data will automatically be deleted.recordDataTTL:${SW_CORE_RECORD_DATA_TTL:3}# Unit is daymetricsDataTTL:${SW_CORE_METRICS_DATA_TTL:7}# Unit is day","title":"TTL","url":"/docs/main/v9.3.0/en/setup/backend/ttl/"},{"content":"TTL In SkyWalking, there are two types of observability data:\n Records include traces, logs, topN sampled statements and alarm. recordDataTTL applies to record data. Metrics include all metrics for service, instance, endpoint, and topology map. Metadata(lists of services, instances, or endpoints) also belongs to metrics. metricsDataTTL applies to Metrics data.  These are the settings for the different types:\n# Set a timeout on metrics data. After the timeout has expired, the metrics data will automatically be deleted.recordDataTTL:${SW_CORE_RECORD_DATA_TTL:3}# Unit is daymetricsDataTTL:${SW_CORE_METRICS_DATA_TTL:7}# Unit is day","title":"TTL","url":"/docs/main/v9.4.0/en/setup/backend/ttl/"},{"content":"TTL In SkyWalking, there are two types of observability data:\n Records include traces, logs, topN sampled statements and alarm. recordDataTTL applies to record data. Metrics include all metrics for service, instance, endpoint, and topology map. Metadata(lists of services, instances, or endpoints) also belongs to metrics. metricsDataTTL applies to Metrics data.  These are the settings for the different types:\n# Set a timeout on metrics data. After the timeout has expired, the metrics data will automatically be deleted.recordDataTTL:${SW_CORE_RECORD_DATA_TTL:3}# Unit is daymetricsDataTTL:${SW_CORE_METRICS_DATA_TTL:7}# Unit is day","title":"TTL","url":"/docs/main/v9.5.0/en/setup/backend/ttl/"},{"content":"TTL In SkyWalking, there are two types of observability data:\n Records include traces, logs, topN sampled statements and alarm. recordDataTTL applies to record data. Metrics include all metrics for service, instance, endpoint, and topology map. Metadata(lists of services, instances, or endpoints) also belongs to metrics. metricsDataTTL applies to Metrics data.  These are the settings for the different types:\n# Set a timeout on metrics data. After the timeout has expired, the metrics data will automatically be deleted.recordDataTTL:${SW_CORE_RECORD_DATA_TTL:3}# Unit is daymetricsDataTTL:${SW_CORE_METRICS_DATA_TTL:7}# Unit is day","title":"TTL","url":"/docs/main/v9.6.0/en/setup/backend/ttl/"},{"content":"TTL In SkyWalking, there are two types of observability data:\n Records include traces, logs, topN sampled statements and alarm. recordDataTTL applies to record data. Metrics include all metrics for service, instance, endpoint, and topology map. Metadata(lists of services, instances, or endpoints) also belongs to metrics. metricsDataTTL applies to Metrics data.  These are the settings for the different types:\n# Set a timeout on metrics data. After the timeout has expired, the metrics data will automatically be deleted.recordDataTTL:${SW_CORE_RECORD_DATA_TTL:3}# Unit is daymetricsDataTTL:${SW_CORE_METRICS_DATA_TTL:7}# Unit is day","title":"TTL","url":"/docs/main/v9.7.0/en/setup/backend/ttl/"},{"content":"UI SkyWalking UI distribution is already included in our Apache official release.\nStartup Startup script is also in /bin/webappService.sh(.bat). UI runs as a Java process, powered-by Armeria.\nSettings The settings file of UI is webapp/webapp.yml in the distribution package. It has three parts.\n Listening port. Backend connect info.  serverPort:${SW_SERVER_PORT:-8080}# Comma separated list of OAP addresses, with `http://` or `https://` prefix.oapServices:${SW_OAP_ADDRESS:-http://localhost:12800}zipkinServices:${SW_ZIPKIN_ADDRESS:http://localhost:9412}Start with Docker Image Start a container to connect OAP server whose address is http://oap:12800.\nexport version=9.0.0 docker run --name oap --restart always -d -e SW_OAP_ADDRESS=http://oap:12800 -e SW_ZIPKIN_ADDRESS=http://oap:9412 apache/skywalking-ui:$version Configuration We could set up environment variables to configure this image.\nSW_OAP_ADDRESS The address of your OAP server. The default value is http://127.0.0.1:12800.\nSW_ZIPKIN_ADDRESS The address of your Zipkin server. The default value is http://127.0.0.1:9412.\n","title":"UI","url":"/docs/main/latest/en/setup/backend/ui-setup/"},{"content":"UI SkyWalking UI distribution is already included in our Apache official release.\nStartup Startup script is also in /bin/webappService.sh(.bat). UI runs as a Java process, powered-by Armeria.\nSettings The settings file of UI is webapp/webapp.yml in the distribution package. It has three parts.\n Listening port. Backend connect info.  serverPort:${SW_SERVER_PORT:-8080}# Comma separated list of OAP addresses, with `http://` or `https://` prefix.oapServices:${SW_OAP_ADDRESS:-http://localhost:12800}zipkinServices:${SW_ZIPKIN_ADDRESS:http://localhost:9412}Start with Docker Image Start a container to connect OAP server whose address is http://oap:12800.\nexport version=9.0.0 docker run --name oap --restart always -d -e SW_OAP_ADDRESS=http://oap:12800 -e SW_ZIPKIN_ADDRESS=http://oap:9412 apache/skywalking-ui:$version Configuration We could set up environment variables to configure this image.\nSW_OAP_ADDRESS The address of your OAP server. The default value is http://127.0.0.1:12800.\nSW_ZIPKIN_ADDRESS The address of your Zipkin server. The default value is http://127.0.0.1:9412.\n","title":"UI","url":"/docs/main/next/en/setup/backend/ui-setup/"},{"content":"UI SkyWalking UI distribution is already included in our Apache official release.\nStartup Startup script is also in /bin/webappService.sh(.bat). UI runs as an OS Java process, powered-by Zuul.\nSettings Settings file of UI is webapp/webapp.yml in distribution package. It has three parts.\n Listening port. Backend connect info.  server:port:8080spring:cloud:gateway:routes:- id:oap-routeuri:lb://oap-servicepredicates:- Path=/graphql/**discovery:client:simple:instances:oap-service:# Point to all backend\u0026#39;s restHost:restPort, split by URI arrays.- uri:http://127.0.0.1:12800- uri:http://instance-2:12800Start with Docker Image Start a container to connect oap server whose address is http://oap:12800.\ndocker run --name oap --restart always -d -e SW_OAP_ADDRESS=http://oap:12800 apache/skywalking-ui:8.8.0 Configuration We could set up environment variables to configure this image.\nSW_OAP_ADDRESS The address of OAP server. Default value is http://127.0.0.1:12800.\n","title":"UI","url":"/docs/main/v9.0.0/en/setup/backend/ui-setup/"},{"content":"UI SkyWalking UI distribution is already included in our Apache official release.\nStartup Startup script is also in /bin/webappService.sh(.bat). UI runs as an OS Java process, powered-by Zuul.\nSettings The settings file of UI is webapp/webapp.yml in the distribution package. It has three parts.\n Listening port. Backend connect info.  server:port:8080spring:cloud:gateway:routes:- id:oap-routeuri:lb://oap-servicepredicates:- Path=/graphql/**discovery:client:simple:instances:oap-service:# Point to all backend\u0026#39;s restHost:restPort, split by URI arrays.- uri:http://127.0.0.1:12800- uri:http://instance-2:12800Start with Docker Image Start a container to connect OAP server whose address is http://oap:12800.\ndocker run --name oap --restart always -d -e SW_OAP_ADDRESS=http://oap:12800 apache/skywalking-ui:8.8.0 Configuration We could set up environment variables to configure this image.\nSW_OAP_ADDRESS The address of your OAP server. The default value is http://127.0.0.1:12800.\n","title":"UI","url":"/docs/main/v9.1.0/en/setup/backend/ui-setup/"},{"content":"UI SkyWalking UI distribution is already included in our Apache official release.\nStartup Startup script is also in /bin/webappService.sh(.bat). UI runs as an OS Java process, powered-by Zuul.\nSettings The settings file of UI is webapp/webapp.yml in the distribution package. It has three parts.\n Listening port. Backend connect info.  server:port:8080spring:cloud:gateway:routes:- id:oap-routeuri:lb://oap-servicepredicates:- Path=/graphql/**discovery:client:simple:instances:oap-service:# Point to all backend\u0026#39;s restHost:restPort, split by URI arrays.- uri:http://127.0.0.1:12800- uri:http://instance-2:12800Start with Docker Image Start a container to connect OAP server whose address is http://oap:12800.\ndocker run --name oap --restart always -d -e SW_OAP_ADDRESS=http://oap:12800 apache/skywalking-ui:8.8.0 Configuration We could set up environment variables to configure this image.\nSW_OAP_ADDRESS The address of your OAP server. The default value is http://127.0.0.1:12800.\n","title":"UI","url":"/docs/main/v9.2.0/en/setup/backend/ui-setup/"},{"content":"UI SkyWalking UI distribution is already included in our Apache official release.\nStartup Startup script is also in /bin/webappService.sh(.bat). UI runs as an OS Java process, powered-by Zuul.\nSettings The settings file of UI is webapp/webapp.yml in the distribution package. It has three parts.\n Listening port. Backend connect info.  serverPort:${SW_SERVER_PORT:-8080}# Comma separated list of OAP addresses, without http:// prefix.oapServices:${SW_OAP_ADDRESS:-localhost:12800}Start with Docker Image Start a container to connect OAP server whose address is http://oap:12800.\ndocker run --name oap --restart always -d -e SW_OAP_ADDRESS=http://oap:12800 apache/skywalking-ui:8.8.0 Configuration We could set up environment variables to configure this image.\nSW_OAP_ADDRESS The address of your OAP server. The default value is http://127.0.0.1:12800.\n","title":"UI","url":"/docs/main/v9.3.0/en/setup/backend/ui-setup/"},{"content":"UI SkyWalking UI distribution is already included in our Apache official release.\nStartup Startup script is also in /bin/webappService.sh(.bat). UI runs as an OS Java process, powered-by Zuul.\nSettings The settings file of UI is webapp/webapp.yml in the distribution package. It has three parts.\n Listening port. Backend connect info.  serverPort:${SW_SERVER_PORT:-8080}# Comma separated list of OAP addresses, without http:// prefix.oapServices:${SW_OAP_ADDRESS:-localhost:12800}zipkinServices:${SW_ZIPKIN_ADDRESS:localhost:9412}Start with Docker Image Start a container to connect OAP server whose address is http://oap:12800.\ndocker run --name oap --restart always -d -e SW_OAP_ADDRESS=http://oap:12800 -e SW_ZIPKIN_ADDRESS=http://oap:9412 apache/skywalking-ui:8.8.0 Configuration We could set up environment variables to configure this image.\nSW_OAP_ADDRESS The address of your OAP server. The default value is http://127.0.0.1:12800.\nSW_ZIPKIN_ADDRESS The address of your Zipkin server. The default value is http://127.0.0.1:9412.\n","title":"UI","url":"/docs/main/v9.4.0/en/setup/backend/ui-setup/"},{"content":"UI SkyWalking UI distribution is already included in our Apache official release.\nStartup Startup script is also in /bin/webappService.sh(.bat). UI runs as a Java process, powered-by Armeria.\nSettings The settings file of UI is webapp/webapp.yml in the distribution package. It has three parts.\n Listening port. Backend connect info.  serverPort:${SW_SERVER_PORT:-8080}# Comma separated list of OAP addresses, with `http://` or `https://` prefix.oapServices:${SW_OAP_ADDRESS:-http://localhost:12800}zipkinServices:${SW_ZIPKIN_ADDRESS:http://localhost:9412}Start with Docker Image Start a container to connect OAP server whose address is http://oap:12800.\ndocker run --name oap --restart always -d -e SW_OAP_ADDRESS=http://oap:12800 -e SW_ZIPKIN_ADDRESS=http://oap:9412 apache/skywalking-ui:8.8.0 Configuration We could set up environment variables to configure this image.\nSW_OAP_ADDRESS The address of your OAP server. The default value is http://127.0.0.1:12800.\nSW_ZIPKIN_ADDRESS The address of your Zipkin server. The default value is http://127.0.0.1:9412.\n","title":"UI","url":"/docs/main/v9.5.0/en/setup/backend/ui-setup/"},{"content":"UI SkyWalking UI distribution is already included in our Apache official release.\nStartup Startup script is also in /bin/webappService.sh(.bat). UI runs as a Java process, powered-by Armeria.\nSettings The settings file of UI is webapp/webapp.yml in the distribution package. It has three parts.\n Listening port. Backend connect info.  serverPort:${SW_SERVER_PORT:-8080}# Comma separated list of OAP addresses, with `http://` or `https://` prefix.oapServices:${SW_OAP_ADDRESS:-http://localhost:12800}zipkinServices:${SW_ZIPKIN_ADDRESS:http://localhost:9412}Start with Docker Image Start a container to connect OAP server whose address is http://oap:12800.\nexport version=9.0.0 docker run --name oap --restart always -d -e SW_OAP_ADDRESS=http://oap:12800 -e SW_ZIPKIN_ADDRESS=http://oap:9412 apache/skywalking-ui:$version Configuration We could set up environment variables to configure this image.\nSW_OAP_ADDRESS The address of your OAP server. The default value is http://127.0.0.1:12800.\nSW_ZIPKIN_ADDRESS The address of your Zipkin server. The default value is http://127.0.0.1:9412.\n","title":"UI","url":"/docs/main/v9.6.0/en/setup/backend/ui-setup/"},{"content":"UI SkyWalking UI distribution is already included in our Apache official release.\nStartup Startup script is also in /bin/webappService.sh(.bat). UI runs as a Java process, powered-by Armeria.\nSettings The settings file of UI is webapp/webapp.yml in the distribution package. It has three parts.\n Listening port. Backend connect info.  serverPort:${SW_SERVER_PORT:-8080}# Comma separated list of OAP addresses, with `http://` or `https://` prefix.oapServices:${SW_OAP_ADDRESS:-http://localhost:12800}zipkinServices:${SW_ZIPKIN_ADDRESS:http://localhost:9412}Start with Docker Image Start a container to connect OAP server whose address is http://oap:12800.\nexport version=9.0.0 docker run --name oap --restart always -d -e SW_OAP_ADDRESS=http://oap:12800 -e SW_ZIPKIN_ADDRESS=http://oap:9412 apache/skywalking-ui:$version Configuration We could set up environment variables to configure this image.\nSW_OAP_ADDRESS The address of your OAP server. The default value is http://127.0.0.1:12800.\nSW_ZIPKIN_ADDRESS The address of your Zipkin server. The default value is http://127.0.0.1:9412.\n","title":"UI","url":"/docs/main/v9.7.0/en/setup/backend/ui-setup/"},{"content":"Uninstrumented Gateways/Proxies The uninstrumented gateways are not instrumented by the SkyWalking agent plugin when they start, but they can be configured in gateways.yml file or via Dynamic Configuration. The reason why they can\u0026rsquo;t register to backend automatically is that there are no suitable agent plugins. For example, there are no agent plugins for Nginx, HAProxy, etc. So to visualize the real topology, we provide a way to configure the gateways/proxies manually.\nConfiguration Format The configuration content includes gateway names and their instances:\ngateways:- name:proxy0# the name is not used for nowinstances:- host:127.0.0.1# the host/IP of this gateway instanceport:9099# the port of this gateway instance defaults to 80Note: The host of the instance must be the one that is actually used on the client-side. For example, if instance proxyA has 2 IPs, say 192.168.1.110 and 192.168.1.111, both of which delegate the target service, and the client connects to 192.168.1.110, then configuring 192.168.1.111 as the host won\u0026rsquo;t work properly.\n","title":"Uninstrumented Gateways/Proxies","url":"/docs/main/latest/en/setup/backend/uninstrumented-gateways/"},{"content":"Uninstrumented Gateways/Proxies The uninstrumented gateways are not instrumented by the SkyWalking agent plugin when they start, but they can be configured in gateways.yml file or via Dynamic Configuration. The reason why they can\u0026rsquo;t register to backend automatically is that there are no suitable agent plugins. For example, there are no agent plugins for Nginx, HAProxy, etc. So to visualize the real topology, we provide a way to configure the gateways/proxies manually.\nConfiguration Format The configuration content includes gateway names and their instances:\ngateways:- name:proxy0# the name is not used for nowinstances:- host:127.0.0.1# the host/IP of this gateway instanceport:9099# the port of this gateway instance defaults to 80Note: The host of the instance must be the one that is actually used on the client-side. For example, if instance proxyA has 2 IPs, say 192.168.1.110 and 192.168.1.111, both of which delegate the target service, and the client connects to 192.168.1.110, then configuring 192.168.1.111 as the host won\u0026rsquo;t work properly.\n","title":"Uninstrumented Gateways/Proxies","url":"/docs/main/next/en/setup/backend/uninstrumented-gateways/"},{"content":"Uninstrumented Gateways/Proxies The uninstrumented gateways are not instrumented by SkyWalking agent plugin when they are started, but they can be configured in gateways.yml file or via Dynamic Configuration. The reason why they can\u0026rsquo;t register to backend automatically is that there\u0026rsquo;re no suitable agent plugins. For example, there are no agent plugins for Nginx, haproxy, etc. So in order to visualize the real topology, we provide a way to configure the gateways/proxies manually.\nConfiguration Format The configuration content includes gateway names and their instances:\ngateways:- name:proxy0# the name is not used for nowinstances:- host:127.0.0.1# the host/ip of this gateway instanceport:9099# the port of this gateway instance, defaults to 80Note: The host of the instance must be the one that is actually used at client side. For example, if instance proxyA has 2 IPs, say 192.168.1.110 and 192.168.1.111, both of which delegate the target service, and the client connects to 192.168.1.110, then configuring 192.168.1.111 as the host won\u0026rsquo;t work properly.\n","title":"Uninstrumented Gateways/Proxies","url":"/docs/main/v9.0.0/en/setup/backend/uninstrumented-gateways/"},{"content":"Uninstrumented Gateways/Proxies The uninstrumented gateways are not instrumented by the SkyWalking agent plugin when they start, but they can be configured in gateways.yml file or via Dynamic Configuration. The reason why they can\u0026rsquo;t register to backend automatically is that there are no suitable agent plugins. For example, there are no agent plugins for Nginx, HAProxy, etc. So to visualize the real topology, we provide a way to configure the gateways/proxies manually.\nConfiguration Format The configuration content includes gateway names and their instances:\ngateways:- name:proxy0# the name is not used for nowinstances:- host:127.0.0.1# the host/IP of this gateway instanceport:9099# the port of this gateway instance defaults to 80Note: The host of the instance must be the one that is actually used on the client-side. For example, if instance proxyA has 2 IPs, say 192.168.1.110 and 192.168.1.111, both of which delegate the target service, and the client connects to 192.168.1.110, then configuring 192.168.1.111 as the host won\u0026rsquo;t work properly.\n","title":"Uninstrumented Gateways/Proxies","url":"/docs/main/v9.1.0/en/setup/backend/uninstrumented-gateways/"},{"content":"Uninstrumented Gateways/Proxies The uninstrumented gateways are not instrumented by the SkyWalking agent plugin when they start, but they can be configured in gateways.yml file or via Dynamic Configuration. The reason why they can\u0026rsquo;t register to backend automatically is that there are no suitable agent plugins. For example, there are no agent plugins for Nginx, HAProxy, etc. So to visualize the real topology, we provide a way to configure the gateways/proxies manually.\nConfiguration Format The configuration content includes gateway names and their instances:\ngateways:- name:proxy0# the name is not used for nowinstances:- host:127.0.0.1# the host/IP of this gateway instanceport:9099# the port of this gateway instance defaults to 80Note: The host of the instance must be the one that is actually used on the client-side. For example, if instance proxyA has 2 IPs, say 192.168.1.110 and 192.168.1.111, both of which delegate the target service, and the client connects to 192.168.1.110, then configuring 192.168.1.111 as the host won\u0026rsquo;t work properly.\n","title":"Uninstrumented Gateways/Proxies","url":"/docs/main/v9.2.0/en/setup/backend/uninstrumented-gateways/"},{"content":"Uninstrumented Gateways/Proxies The uninstrumented gateways are not instrumented by the SkyWalking agent plugin when they start, but they can be configured in gateways.yml file or via Dynamic Configuration. The reason why they can\u0026rsquo;t register to backend automatically is that there are no suitable agent plugins. For example, there are no agent plugins for Nginx, HAProxy, etc. So to visualize the real topology, we provide a way to configure the gateways/proxies manually.\nConfiguration Format The configuration content includes gateway names and their instances:\ngateways:- name:proxy0# the name is not used for nowinstances:- host:127.0.0.1# the host/IP of this gateway instanceport:9099# the port of this gateway instance defaults to 80Note: The host of the instance must be the one that is actually used on the client-side. For example, if instance proxyA has 2 IPs, say 192.168.1.110 and 192.168.1.111, both of which delegate the target service, and the client connects to 192.168.1.110, then configuring 192.168.1.111 as the host won\u0026rsquo;t work properly.\n","title":"Uninstrumented Gateways/Proxies","url":"/docs/main/v9.3.0/en/setup/backend/uninstrumented-gateways/"},{"content":"Uninstrumented Gateways/Proxies The uninstrumented gateways are not instrumented by the SkyWalking agent plugin when they start, but they can be configured in gateways.yml file or via Dynamic Configuration. The reason why they can\u0026rsquo;t register to backend automatically is that there are no suitable agent plugins. For example, there are no agent plugins for Nginx, HAProxy, etc. So to visualize the real topology, we provide a way to configure the gateways/proxies manually.\nConfiguration Format The configuration content includes gateway names and their instances:\ngateways:- name:proxy0# the name is not used for nowinstances:- host:127.0.0.1# the host/IP of this gateway instanceport:9099# the port of this gateway instance defaults to 80Note: The host of the instance must be the one that is actually used on the client-side. For example, if instance proxyA has 2 IPs, say 192.168.1.110 and 192.168.1.111, both of which delegate the target service, and the client connects to 192.168.1.110, then configuring 192.168.1.111 as the host won\u0026rsquo;t work properly.\n","title":"Uninstrumented Gateways/Proxies","url":"/docs/main/v9.4.0/en/setup/backend/uninstrumented-gateways/"},{"content":"Uninstrumented Gateways/Proxies The uninstrumented gateways are not instrumented by the SkyWalking agent plugin when they start, but they can be configured in gateways.yml file or via Dynamic Configuration. The reason why they can\u0026rsquo;t register to backend automatically is that there are no suitable agent plugins. For example, there are no agent plugins for Nginx, HAProxy, etc. So to visualize the real topology, we provide a way to configure the gateways/proxies manually.\nConfiguration Format The configuration content includes gateway names and their instances:\ngateways:- name:proxy0# the name is not used for nowinstances:- host:127.0.0.1# the host/IP of this gateway instanceport:9099# the port of this gateway instance defaults to 80Note: The host of the instance must be the one that is actually used on the client-side. For example, if instance proxyA has 2 IPs, say 192.168.1.110 and 192.168.1.111, both of which delegate the target service, and the client connects to 192.168.1.110, then configuring 192.168.1.111 as the host won\u0026rsquo;t work properly.\n","title":"Uninstrumented Gateways/Proxies","url":"/docs/main/v9.5.0/en/setup/backend/uninstrumented-gateways/"},{"content":"Uninstrumented Gateways/Proxies The uninstrumented gateways are not instrumented by the SkyWalking agent plugin when they start, but they can be configured in gateways.yml file or via Dynamic Configuration. The reason why they can\u0026rsquo;t register to backend automatically is that there are no suitable agent plugins. For example, there are no agent plugins for Nginx, HAProxy, etc. So to visualize the real topology, we provide a way to configure the gateways/proxies manually.\nConfiguration Format The configuration content includes gateway names and their instances:\ngateways:- name:proxy0# the name is not used for nowinstances:- host:127.0.0.1# the host/IP of this gateway instanceport:9099# the port of this gateway instance defaults to 80Note: The host of the instance must be the one that is actually used on the client-side. For example, if instance proxyA has 2 IPs, say 192.168.1.110 and 192.168.1.111, both of which delegate the target service, and the client connects to 192.168.1.110, then configuring 192.168.1.111 as the host won\u0026rsquo;t work properly.\n","title":"Uninstrumented Gateways/Proxies","url":"/docs/main/v9.6.0/en/setup/backend/uninstrumented-gateways/"},{"content":"Uninstrumented Gateways/Proxies The uninstrumented gateways are not instrumented by the SkyWalking agent plugin when they start, but they can be configured in gateways.yml file or via Dynamic Configuration. The reason why they can\u0026rsquo;t register to backend automatically is that there are no suitable agent plugins. For example, there are no agent plugins for Nginx, HAProxy, etc. So to visualize the real topology, we provide a way to configure the gateways/proxies manually.\nConfiguration Format The configuration content includes gateway names and their instances:\ngateways:- name:proxy0# the name is not used for nowinstances:- host:127.0.0.1# the host/IP of this gateway instanceport:9099# the port of this gateway instance defaults to 80Note: The host of the instance must be the one that is actually used on the client-side. For example, if instance proxyA has 2 IPs, say 192.168.1.110 and 192.168.1.111, both of which delegate the target service, and the client connects to 192.168.1.110, then configuring 192.168.1.111 as the host won\u0026rsquo;t work properly.\n","title":"Uninstrumented Gateways/Proxies","url":"/docs/main/v9.7.0/en/setup/backend/uninstrumented-gateways/"},{"content":"Unit Test For Satellite, the specific plugin may have some common dependencies. So we provide a global test initializer to init the dependencies.\nimport ( _ \u0026quot;github.com/apache/skywalking-satellite/internal/satellite/test\u0026quot; ) ","title":"Unit Test","url":"/docs/skywalking-satellite/latest/en/guides/test/how-to-unit-test/"},{"content":"Unit Test For Satellite, the specific plugin may have some common dependencies. So we provide a global test initializer to init the dependencies.\nimport ( _ \u0026quot;github.com/apache/skywalking-satellite/internal/satellite/test\u0026quot; ) ","title":"Unit Test","url":"/docs/skywalking-satellite/next/en/guides/test/how-to-unit-test/"},{"content":"Unit Test For Satellite, the specific plugin may have some common dependencies. So we provide a global test initializer to init the dependencies.\nimport ( _ \u0026quot;github.com/apache/skywalking-satellite/internal/satellite/test\u0026quot; ) ","title":"Unit Test","url":"/docs/skywalking-satellite/v1.2.0/en/guides/test/how-to-unit-test/"},{"content":"Use annotation to mark the method you want to trace.  Add @Trace to any method you want to trace. After that, you can see the span in the Stack. Methods annotated with @Tag will try to tag the current active span with the given key (Tag#key()) and (Tag#value()), if there is no active span at all, this annotation takes no effect. @Tag can be repeated, and can be used in companion with @Trace, see examples below. The value of Tag is the same as what are supported in Customize Enhance Trace.  /** * The codes below will generate a span, * and two types of tags, one type tag: keys are `tag1` and `tag2`, values are the passed-in parameters, respectively, the other type tag: keys are `username` and `age`, values are the return value in User, respectively */ @Trace @Tag(key = \u0026#34;tag1\u0026#34;, value = \u0026#34;arg[0]\u0026#34;) @Tag(key = \u0026#34;tag2\u0026#34;, value = \u0026#34;arg[1]\u0026#34;) @Tag(key = \u0026#34;username\u0026#34;, value = \u0026#34;returnedObj.username\u0026#34;) @Tag(key = \u0026#34;age\u0026#34;, value = \u0026#34;returnedObj.age\u0026#34;) public User methodYouWantToTrace(String param1, String param2) { // ... } Sample codes only\n","title":"Use annotation to mark the method you want to trace.","url":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/application-toolkit-trace-annotation/"},{"content":"Use annotation to mark the method you want to trace.  Add @Trace to any method you want to trace. After that, you can see the span in the Stack. Methods annotated with @Tag will try to tag the current active span with the given key (Tag#key()) and (Tag#value()), if there is no active span at all, this annotation takes no effect. @Tag can be repeated, and can be used in companion with @Trace, see examples below. The value of Tag is the same as what are supported in Customize Enhance Trace.  /** * The codes below will generate a span, * and two types of tags, one type tag: keys are `tag1` and `tag2`, values are the passed-in parameters, respectively, the other type tag: keys are `username` and `age`, values are the return value in User, respectively */ @Trace @Tag(key = \u0026#34;tag1\u0026#34;, value = \u0026#34;arg[0]\u0026#34;) @Tag(key = \u0026#34;tag2\u0026#34;, value = \u0026#34;arg[1]\u0026#34;) @Tag(key = \u0026#34;username\u0026#34;, value = \u0026#34;returnedObj.username\u0026#34;) @Tag(key = \u0026#34;age\u0026#34;, value = \u0026#34;returnedObj.age\u0026#34;) public User methodYouWantToTrace(String param1, String param2) { // ... } Sample codes only\n","title":"Use annotation to mark the method you want to trace.","url":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-trace-annotation/"},{"content":"Use annotation to mark the method you want to trace.  Add @Trace to any method you want to trace. After that, you can see the span in the Stack. Methods annotated with @Tag will try to tag the current active span with the given key (Tag#key()) and (Tag#value()), if there is no active span at all, this annotation takes no effect. @Tag can be repeated, and can be used in companion with @Trace, see examples below. The value of Tag is the same as what are supported in Customize Enhance Trace.  /** * The codes below will generate a span, * and two types of tags, one type tag: keys are `tag1` and `tag2`, values are the passed-in parameters, respectively, the other type tag: keys are `username` and `age`, values are the return value in User, respectively */ @Trace @Tag(key = \u0026#34;tag1\u0026#34;, value = \u0026#34;arg[0]\u0026#34;) @Tag(key = \u0026#34;tag2\u0026#34;, value = \u0026#34;arg[1]\u0026#34;) @Tag(key = \u0026#34;username\u0026#34;, value = \u0026#34;returnedObj.username\u0026#34;) @Tag(key = \u0026#34;age\u0026#34;, value = \u0026#34;returnedObj.age\u0026#34;) public User methodYouWantToTrace(String param1, String param2) { // ... } Sample codes only\n","title":"Use annotation to mark the method you want to trace.","url":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/application-toolkit-trace-annotation/"},{"content":"Use annotation to mark the method you want to trace.  Add @Trace to any method you want to trace. After that, you can see the span in the Stack. Methods annotated with @Tag will try to tag the current active span with the given key (Tag#key()) and (Tag#value()), if there is no active span at all, this annotation takes no effect. @Tag can be repeated, and can be used in companion with @Trace, see examples below. The value of Tag is the same as what are supported in Customize Enhance Trace.  /** * The codes below will generate a span, * and two types of tags, one type tag: keys are `tag1` and `tag2`, values are the passed-in parameters, respectively, the other type tag: keys are `username` and `age`, values are the return value in User, respectively */ @Trace @Tag(key = \u0026#34;tag1\u0026#34;, value = \u0026#34;arg[0]\u0026#34;) @Tag(key = \u0026#34;tag2\u0026#34;, value = \u0026#34;arg[1]\u0026#34;) @Tag(key = \u0026#34;username\u0026#34;, value = \u0026#34;returnedObj.username\u0026#34;) @Tag(key = \u0026#34;age\u0026#34;, value = \u0026#34;returnedObj.age\u0026#34;) public User methodYouWantToTrace(String param1, String param2) { // ... } Sample codes only\n","title":"Use annotation to mark the method you want to trace.","url":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/application-toolkit-trace-annotation/"},{"content":"Use annotation to mark the method you want to trace.  Add @Trace to any method you want to trace. After that, you can see the span in the Stack. Methods annotated with @Tag will try to tag the current active span with the given key (Tag#key()) and (Tag#value()), if there is no active span at all, this annotation takes no effect. @Tag can be repeated, and can be used in companion with @Trace, see examples below. The value of Tag is the same as what are supported in Customize Enhance Trace.  /** * The codes below will generate a span, * and two types of tags, one type tag: keys are `tag1` and `tag2`, values are the passed-in parameters, respectively, the other type tag: keys are `username` and `age`, values are the return value in User, respectively */ @Trace @Tag(key = \u0026#34;tag1\u0026#34;, value = \u0026#34;arg[0]\u0026#34;) @Tag(key = \u0026#34;tag2\u0026#34;, value = \u0026#34;arg[1]\u0026#34;) @Tag(key = \u0026#34;username\u0026#34;, value = \u0026#34;returnedObj.username\u0026#34;) @Tag(key = \u0026#34;age\u0026#34;, value = \u0026#34;returnedObj.age\u0026#34;) public User methodYouWantToTrace(String param1, String param2) { // ... } Sample codes only\n","title":"Use annotation to mark the method you want to trace.","url":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/application-toolkit-trace-annotation/"},{"content":"Use Grafana As The UI SkyWalking provide PromQL Service since 9.4.0 and LogQL Service since 9.6.0. You can choose Grafana as the SkyWalking UI. About the installation and how to use please refer to the official document.\nNotice \u0026lt;1\u0026gt;, Gafana is AGPL-3.0 license, which is very different from Apache 2.0. Please follow AGPL 3.0 license requirements.\nNotice \u0026lt;2\u0026gt;, SkyWalking always uses its native UI as first class. All visualization features are only available on native UI. Grafana UI is an extension on our support of PromQL APIs. We don\u0026rsquo;t maintain or promise the complete Grafana UI dashboard setup.\nConfigure Data Source Prometheus Data Source In the data source config panel, chose the Prometheus and set the url to the OAP server address, the default port is 9090. SkyWalking Data Source Before you start, please install the SkyWalking data source plugin. In the data source config panel, chose the SkyWalking and set the url to the OAP server graphql service address, the default port is 12800. Loki Data Source In the data source config panel, chose the Loki and set the url to the OAP server address, the default port is 3100. Configure Metric Dashboards Dashboards Settings The following steps are the example of config a General Service dashboard:\n Create a dashboard named General Service. A layer is recommended as a dashboard. Configure variables for the dashboard: After configure, you can select the service/instance/endpoint on the top of the dashboard:   Add Panels The following contents show how to add several typical metrics panels. General settings:\n Chose the metrics and chart. Set Query options --\u0026gt; Min interval = 1m, because the metrics min time bucket in SkyWalking is 1m. Add PromQL expressions, use the variables configured above for the labels then you can select the labels value from top. Note: Some metrics values may be required calculations to match units. Select the returned labels you want to show on panel. Test query and save the panel.  Common Value Metrics  For example service_apdex and Time series chart. Add PromQL expression, the metric scope is Service, so add labels service and layer for match. Set Connect null values --\u0026gt; Always and Show points --\u0026gt; Always because when the query interval \u0026gt; 1hour or 1day SkyWalking return the hour/day step metrics values.   Labeled Value Metrics  For example service_percentile and Time series chart. Add PromQL expressions, the metric scope is Service, add labels service and layer for match. And it\u0026rsquo;s a labeled value metric, add labels='0,1,2,3,4' filter the result label, and addrelabels='P50,P75,P90,P95,P99' rename the result label. Set Connect null values --\u0026gt; Always and Show points --\u0026gt; Always because when the query interval \u0026gt; 1hour or 1day SkyWalking return the hour/day step metrics values.   Sort Metrics  For example service_instance_cpm and Bar gauge chart. Add PromQL expressions, add labels parent_service and layer for match, add top_n='10' and order='DES' filter the result. Set the Calculation --\u0026gt; Latest*.   Sampled Records Same as the Sort Metrics.\nConfigure Topology Dashboards Dashboards Settings For now, SkyWalking support General Service and Service Mesh topology dashboards, the layer is GENERAL and MESH respectively. The following configuration can reuse the above General Service dashboard and add a new variable Plugin_SkyWalking for the dashboard: Add Topology Panel  Chose the Node Graph chart. Set Layer and Service by the variables. If you want to show all services in this layer, set Service empty. Set Node Metrics and Edge Metrics which you want to show on the topology.   Configure Log Dashboard Dashboards Settings The following steps are the example of config a log dashboard:\n Create a dashboard named Log. Configure variables for the dashboard:  Please make sure service_instance and endpoint variable enabled Include All option and set Custom all value to * or blank (typed by space button on the keyboard):  Tags variable is a little different from others, for more details, please refer Ad hoc filters:  After configure, you can select log query variables on the top of the dashboard:   Add Log Panel The following steps show how to add a log panel.\n Choose Logs chart. Set the Line limit value (The max number of logs to return in a query) and Order value (Determines the sort order of logs). Add LogQL expressions, use the variables configured above for the labels and searching keyword. service_instance \u0026amp; endpoint variable ref should use raw variable-format-options to prevent it value be escaped. Test query and save the panel.   Preview on demo.skywalking.a.o SkyWalking community provides a preview site for services of General and Service Mesh layers from the demo environment. You could take a glance through Preview metrics on Grafana of the demo deployment.\nNotice, we don\u0026rsquo;t provide all setups due to our monitoring target expanding fast. This demo is for helping you understand the above documents only.\n","title":"Use Grafana As The UI","url":"/docs/main/latest/en/setup/backend/ui-grafana/"},{"content":"Use Grafana As The UI SkyWalking provide PromQL Service since 9.4.0 and LogQL Service since 9.6.0. You can choose Grafana as the SkyWalking UI. About the installation and how to use please refer to the official document.\nNotice \u0026lt;1\u0026gt;, Gafana is AGPL-3.0 license, which is very different from Apache 2.0. Please follow AGPL 3.0 license requirements.\nNotice \u0026lt;2\u0026gt;, SkyWalking always uses its native UI as first class. All visualization features are only available on native UI. Grafana UI is an extension on our support of PromQL APIs. We don\u0026rsquo;t maintain or promise the complete Grafana UI dashboard setup.\nConfigure Data Source Prometheus Data Source In the data source config panel, chose the Prometheus and set the url to the OAP server address, the default port is 9090. SkyWalking Data Source Before you start, please install the SkyWalking data source plugin. In the data source config panel, chose the SkyWalking and set the url to the OAP server graphql service address, the default port is 12800. Loki Data Source In the data source config panel, chose the Loki and set the url to the OAP server address, the default port is 3100. Configure Metric Dashboards Dashboards Settings The following steps are the example of config a General Service dashboard:\n Create a dashboard named General Service. A layer is recommended as a dashboard. Configure variables for the dashboard: After configure, you can select the service/instance/endpoint on the top of the dashboard:   Add Panels The following contents show how to add several typical metrics panels. General settings:\n Chose the metrics and chart. Set Query options --\u0026gt; Min interval = 1m, because the metrics min time bucket in SkyWalking is 1m. Add PromQL expressions, use the variables configured above for the labels then you can select the labels value from top. Note: Some metrics values may be required calculations to match units. Select the returned labels you want to show on panel. Test query and save the panel.  Common Value Metrics  For example service_apdex and Time series chart. Add PromQL expression, the metric scope is Service, so add labels service and layer for match. Set Connect null values --\u0026gt; Always and Show points --\u0026gt; Always because when the query interval \u0026gt; 1hour or 1day SkyWalking return the hour/day step metrics values.   Labeled Value Metrics  For example service_percentile and Time series chart. Add PromQL expressions, the metric scope is Service, add labels service and layer for match. And it\u0026rsquo;s a labeled value metric, add labels='0,1,2,3,4' filter the result label, and addrelabels='P50,P75,P90,P95,P99' rename the result label. Set Connect null values --\u0026gt; Always and Show points --\u0026gt; Always because when the query interval \u0026gt; 1hour or 1day SkyWalking return the hour/day step metrics values.   Sort Metrics  For example service_instance_cpm and Bar gauge chart. Add PromQL expressions, add labels parent_service and layer for match, add top_n='10' and order='DES' filter the result. Set the Calculation --\u0026gt; Latest*.   Sampled Records Same as the Sort Metrics.\nConfigure Topology Dashboards Dashboards Settings For now, SkyWalking support General Service and Service Mesh topology dashboards, the layer is GENERAL and MESH respectively. The following configuration can reuse the above General Service dashboard and add a new variable Plugin_SkyWalking for the dashboard: Add Topology Panel  Chose the Node Graph chart. Set Layer and Service by the variables. If you want to show all services in this layer, set Service empty. Set Node Metrics and Edge Metrics which you want to show on the topology.   Configure Log Dashboard Dashboards Settings The following steps are the example of config a log dashboard:\n Create a dashboard named Log. Configure variables for the dashboard:  Please make sure service_instance and endpoint variable enabled Include All option and set Custom all value to * or blank (typed by space button on the keyboard):  Tags variable is a little different from others, for more details, please refer Ad hoc filters:  After configure, you can select log query variables on the top of the dashboard:   Add Log Panel The following steps show how to add a log panel.\n Choose Logs chart. Set the Line limit value (The max number of logs to return in a query) and Order value (Determines the sort order of logs). Add LogQL expressions, use the variables configured above for the labels and searching keyword. service_instance \u0026amp; endpoint variable ref should use raw variable-format-options to prevent it value be escaped. Test query and save the panel.   Preview on demo.skywalking.a.o SkyWalking community provides a preview site for services of General and Service Mesh layers from the demo environment. You could take a glance through Preview metrics on Grafana of the demo deployment.\nNotice, we don\u0026rsquo;t provide all setups due to our monitoring target expanding fast. This demo is for helping you understand the above documents only.\n","title":"Use Grafana As The UI","url":"/docs/main/next/en/setup/backend/ui-grafana/"},{"content":"Use Grafana As The UI Since 9.4.0, SkyWalking provide PromQL Service. You can choose Grafana as the SkyWalking UI. About the installation and how to use please refer to the official document.\nNotice \u0026lt;1\u0026gt;, Gafana is AGPL-3.0 license, which is very different from Apache 2.0. Please follow AGPL 3.0 license requirements.\nNotice \u0026lt;2\u0026gt;, SkyWalking always uses its native UI as first class. All visualization features are only available on native UI. Grafana UI is an extension on our support of PromQL APIs. We don\u0026rsquo;t maintain or promise the complete Grafana UI dashboard setup.\nConfigure Data Source In the data source config panel, chose the Prometheus and set the url to the OAP server address, the default port is 9090. Configure Dashboards Dashboards Settings The following steps are the example of config a General Service dashboard:\n Create a dashboard named General Service. A layer is recommended as a dashboard. Configure variables for the dashboard: After configure, you can select the service/instance/endpoint on the top of the dashboard:   Add Panels The following contents show how to add several typical metrics panels. General settings:\n Chose the metrics and chart. Set Query options --\u0026gt; Min interval = 1m, because the metrics min time bucket in SkyWalking is 1m. Add PromQL expressions, use the variables configured above for the labels then you can select the labels value from top. Note: Some metrics values may be required calculations to match units. Select the returned labels you want to show on panel. Test query and save the panel.  Common Value Metrics  For example service_apdex and Time series chart. Add PromQL expression, the metric scope is Service, so add labels service and layer for match. Set Connect null values --\u0026gt; Always and Show points --\u0026gt; Always because when the query interval \u0026gt; 1hour or 1day SkyWalking return the hour/day step metrics values.   Labeled Value Metrics  For example service_percentile and Time series chart. Add PromQL expressions, the metric scope is Service, add labels service and layer for match. And it\u0026rsquo;s a labeled value metric, add labels='0,1,2,3,4' filter the result label, and addrelabels='P50,P75,P90,P95,P99' rename the result label. Set Connect null values --\u0026gt; Always and Show points --\u0026gt; Always because when the query interval \u0026gt; 1hour or 1day SkyWalking return the hour/day step metrics values.   Sort Metrics  For example service_instance_cpm and Bar gauge chart. Add PromQL expressions, add labels parent_service and layer for match, add top_n='10' and order='DES' filter the result. Set the Calculation --\u0026gt; Latest*.   Sampled Records Same as the Sort Metrics.\nPreview on demo.skywalking.a.o SkyWalking community provides a preview site for services of General and Service Mesh layers from the demo environment. You could take a glance through Preview metrics on Grafana of the demo deployment.\nNotice, we don\u0026rsquo;t provide all setups due to our monitoring target expanding fast. This demo is for helping you understand the above documents only.\n","title":"Use Grafana As The UI","url":"/docs/main/v9.4.0/en/setup/backend/ui-grafana/"},{"content":"Use Grafana As The UI Since 9.4.0, SkyWalking provide PromQL Service. You can choose Grafana as the SkyWalking UI. About the installation and how to use please refer to the official document.\nNotice \u0026lt;1\u0026gt;, Gafana is AGPL-3.0 license, which is very different from Apache 2.0. Please follow AGPL 3.0 license requirements.\nNotice \u0026lt;2\u0026gt;, SkyWalking always uses its native UI as first class. All visualization features are only available on native UI. Grafana UI is an extension on our support of PromQL APIs. We don\u0026rsquo;t maintain or promise the complete Grafana UI dashboard setup.\nConfigure Data Source In the data source config panel, chose the Prometheus and set the url to the OAP server address, the default port is 9090. Configure Dashboards Dashboards Settings The following steps are the example of config a General Service dashboard:\n Create a dashboard named General Service. A layer is recommended as a dashboard. Configure variables for the dashboard: After configure, you can select the service/instance/endpoint on the top of the dashboard:   Add Panels The following contents show how to add several typical metrics panels. General settings:\n Chose the metrics and chart. Set Query options --\u0026gt; Min interval = 1m, because the metrics min time bucket in SkyWalking is 1m. Add PromQL expressions, use the variables configured above for the labels then you can select the labels value from top. Note: Some metrics values may be required calculations to match units. Select the returned labels you want to show on panel. Test query and save the panel.  Common Value Metrics  For example service_apdex and Time series chart. Add PromQL expression, the metric scope is Service, so add labels service and layer for match. Set Connect null values --\u0026gt; Always and Show points --\u0026gt; Always because when the query interval \u0026gt; 1hour or 1day SkyWalking return the hour/day step metrics values.   Labeled Value Metrics  For example service_percentile and Time series chart. Add PromQL expressions, the metric scope is Service, add labels service and layer for match. And it\u0026rsquo;s a labeled value metric, add labels='0,1,2,3,4' filter the result label, and addrelabels='P50,P75,P90,P95,P99' rename the result label. Set Connect null values --\u0026gt; Always and Show points --\u0026gt; Always because when the query interval \u0026gt; 1hour or 1day SkyWalking return the hour/day step metrics values.   Sort Metrics  For example service_instance_cpm and Bar gauge chart. Add PromQL expressions, add labels parent_service and layer for match, add top_n='10' and order='DES' filter the result. Set the Calculation --\u0026gt; Latest*.   Sampled Records Same as the Sort Metrics.\nPreview on demo.skywalking.a.o SkyWalking community provides a preview site for services of General and Service Mesh layers from the demo environment. You could take a glance through Preview metrics on Grafana of the demo deployment.\nNotice, we don\u0026rsquo;t provide all setups due to our monitoring target expanding fast. This demo is for helping you understand the above documents only.\n","title":"Use Grafana As The UI","url":"/docs/main/v9.5.0/en/setup/backend/ui-grafana/"},{"content":"Use Grafana As The UI SkyWalking provide PromQL Service since 9.4.0 and LogQL Service since 9.6.0. You can choose Grafana as the SkyWalking UI. About the installation and how to use please refer to the official document.\nNotice \u0026lt;1\u0026gt;, Gafana is AGPL-3.0 license, which is very different from Apache 2.0. Please follow AGPL 3.0 license requirements.\nNotice \u0026lt;2\u0026gt;, SkyWalking always uses its native UI as first class. All visualization features are only available on native UI. Grafana UI is an extension on our support of PromQL APIs. We don\u0026rsquo;t maintain or promise the complete Grafana UI dashboard setup.\nConfigure Data Source Prometheus Data Source In the data source config panel, chose the Prometheus and set the url to the OAP server address, the default port is 9090. Loki Data Source In the data source config panel, chose the Loki and set the url to the OAP server address, the default port is 3100. Configure Metric Dashboards Dashboards Settings The following steps are the example of config a General Service dashboard:\n Create a dashboard named General Service. A layer is recommended as a dashboard. Configure variables for the dashboard: After configure, you can select the service/instance/endpoint on the top of the dashboard:   Add Panels The following contents show how to add several typical metrics panels. General settings:\n Chose the metrics and chart. Set Query options --\u0026gt; Min interval = 1m, because the metrics min time bucket in SkyWalking is 1m. Add PromQL expressions, use the variables configured above for the labels then you can select the labels value from top. Note: Some metrics values may be required calculations to match units. Select the returned labels you want to show on panel. Test query and save the panel.  Common Value Metrics  For example service_apdex and Time series chart. Add PromQL expression, the metric scope is Service, so add labels service and layer for match. Set Connect null values --\u0026gt; Always and Show points --\u0026gt; Always because when the query interval \u0026gt; 1hour or 1day SkyWalking return the hour/day step metrics values.   Labeled Value Metrics  For example service_percentile and Time series chart. Add PromQL expressions, the metric scope is Service, add labels service and layer for match. And it\u0026rsquo;s a labeled value metric, add labels='0,1,2,3,4' filter the result label, and addrelabels='P50,P75,P90,P95,P99' rename the result label. Set Connect null values --\u0026gt; Always and Show points --\u0026gt; Always because when the query interval \u0026gt; 1hour or 1day SkyWalking return the hour/day step metrics values.   Sort Metrics  For example service_instance_cpm and Bar gauge chart. Add PromQL expressions, add labels parent_service and layer for match, add top_n='10' and order='DES' filter the result. Set the Calculation --\u0026gt; Latest*.   Sampled Records Same as the Sort Metrics.\nConfigure Log Dashboard Dashboards Settings The following steps are the example of config a log dashboard:\n Create a dashboard named Log. Configure variables for the dashboard:  Please make sure service_instance and endpoint variable enabled Include All option and set Custom all value to * or blank (typed by space button on the keyboard):  Tags variable is a little different from others, for more details, please refer Ad hoc filters:  After configure, you can select log query variables on the top of the dashboard:   Add Log Panel The following steps show how to add a log panel.\n Choose Logs chart. Set the Line limit value (The max number of logs to return in a query) and Order value (Determines the sort order of logs). Add LogQL expressions, use the variables configured above for the labels and searching keyword. service_instance \u0026amp; endpoint variable ref should use raw variable-format-options to prevent it value be escaped. Test query and save the panel.   Preview on demo.skywalking.a.o SkyWalking community provides a preview site for services of General and Service Mesh layers from the demo environment. You could take a glance through Preview metrics on Grafana of the demo deployment.\nNotice, we don\u0026rsquo;t provide all setups due to our monitoring target expanding fast. This demo is for helping you understand the above documents only.\n","title":"Use Grafana As The UI","url":"/docs/main/v9.6.0/en/setup/backend/ui-grafana/"},{"content":"Use Grafana As The UI SkyWalking provide PromQL Service since 9.4.0 and LogQL Service since 9.6.0. You can choose Grafana as the SkyWalking UI. About the installation and how to use please refer to the official document.\nNotice \u0026lt;1\u0026gt;, Gafana is AGPL-3.0 license, which is very different from Apache 2.0. Please follow AGPL 3.0 license requirements.\nNotice \u0026lt;2\u0026gt;, SkyWalking always uses its native UI as first class. All visualization features are only available on native UI. Grafana UI is an extension on our support of PromQL APIs. We don\u0026rsquo;t maintain or promise the complete Grafana UI dashboard setup.\nConfigure Data Source Prometheus Data Source In the data source config panel, chose the Prometheus and set the url to the OAP server address, the default port is 9090. SkyWalking Data Source Before you start, please install the SkyWalking data source plugin. In the data source config panel, chose the SkyWalking and set the url to the OAP server graphql service address, the default port is 12800. Loki Data Source In the data source config panel, chose the Loki and set the url to the OAP server address, the default port is 3100. Configure Metric Dashboards Dashboards Settings The following steps are the example of config a General Service dashboard:\n Create a dashboard named General Service. A layer is recommended as a dashboard. Configure variables for the dashboard: After configure, you can select the service/instance/endpoint on the top of the dashboard:   Add Panels The following contents show how to add several typical metrics panels. General settings:\n Chose the metrics and chart. Set Query options --\u0026gt; Min interval = 1m, because the metrics min time bucket in SkyWalking is 1m. Add PromQL expressions, use the variables configured above for the labels then you can select the labels value from top. Note: Some metrics values may be required calculations to match units. Select the returned labels you want to show on panel. Test query and save the panel.  Common Value Metrics  For example service_apdex and Time series chart. Add PromQL expression, the metric scope is Service, so add labels service and layer for match. Set Connect null values --\u0026gt; Always and Show points --\u0026gt; Always because when the query interval \u0026gt; 1hour or 1day SkyWalking return the hour/day step metrics values.   Labeled Value Metrics  For example service_percentile and Time series chart. Add PromQL expressions, the metric scope is Service, add labels service and layer for match. And it\u0026rsquo;s a labeled value metric, add labels='0,1,2,3,4' filter the result label, and addrelabels='P50,P75,P90,P95,P99' rename the result label. Set Connect null values --\u0026gt; Always and Show points --\u0026gt; Always because when the query interval \u0026gt; 1hour or 1day SkyWalking return the hour/day step metrics values.   Sort Metrics  For example service_instance_cpm and Bar gauge chart. Add PromQL expressions, add labels parent_service and layer for match, add top_n='10' and order='DES' filter the result. Set the Calculation --\u0026gt; Latest*.   Sampled Records Same as the Sort Metrics.\nConfigure Topology Dashboards Dashboards Settings For now, SkyWalking support General Service and Service Mesh topology dashboards, the layer is GENERAL and MESH respectively. The following configuration can reuse the above General Service dashboard and add a new variable Plugin_SkyWalking for the dashboard: Add Topology Panel  Chose the Node Graph chart. Set Layer and Service by the variables. If you want to show all services in this layer, set Service empty. Set Node Metrics and Edge Metrics which you want to show on the topology.   Configure Log Dashboard Dashboards Settings The following steps are the example of config a log dashboard:\n Create a dashboard named Log. Configure variables for the dashboard:  Please make sure service_instance and endpoint variable enabled Include All option and set Custom all value to * or blank (typed by space button on the keyboard):  Tags variable is a little different from others, for more details, please refer Ad hoc filters:  After configure, you can select log query variables on the top of the dashboard:   Add Log Panel The following steps show how to add a log panel.\n Choose Logs chart. Set the Line limit value (The max number of logs to return in a query) and Order value (Determines the sort order of logs). Add LogQL expressions, use the variables configured above for the labels and searching keyword. service_instance \u0026amp; endpoint variable ref should use raw variable-format-options to prevent it value be escaped. Test query and save the panel.   Preview on demo.skywalking.a.o SkyWalking community provides a preview site for services of General and Service Mesh layers from the demo environment. You could take a glance through Preview metrics on Grafana of the demo deployment.\nNotice, we don\u0026rsquo;t provide all setups due to our monitoring target expanding fast. This demo is for helping you understand the above documents only.\n","title":"Use Grafana As The UI","url":"/docs/main/v9.7.0/en/setup/backend/ui-grafana/"},{"content":"Use Profiling to Fix the Blind Spot of Distributed Tracing  This post introduces a way to automatically profile code in production with Apache SkyWalking. We believe the profile method helps reduce maintenance and overhead while increasing the precision in root cause analysis.\n This post introduces a way to automatically profile code in production with Apache SkyWalking. We believe the profile method helps reduce maintenance and overhead while increasing the precision in root cause analysis.\nLimitations of the Distributed Tracing In the early days, metrics and logging systems were the key solutions in monitoring platforms. With the adoption of microservice and distributed system-based architecture, distributed tracing has become more important. Distributed tracing provides relevant service context, such as system topology map and RPC parent-child relationships.\nSome claim that distributed tracing is the best way to discover the cause of performance issues in a distributed system. It’s good at finding issues at the RPC abstraction, or in the scope of components instrumented with spans. However, it isn’t that perfect.\nHave you been surprised to find a span duration longer than expected, but no insight into why? What should you do next? Some may think that the next step is to add more instrumentation, more spans into the trace, thinking that you would eventually find the root cause, with more data points. We’ll argue this is not a good option within a production environment. Here’s why:\n There is a risk of application overhead and system overload. Ad-hoc spans measure the performance of specific scopes or methods, but picking the right place can be difficult. To identify the precise cause, you can “instrument” (add spans to) many suspicious places. The additional instrumentation costs more CPU and memory in the production environment. Next, ad-hoc instrumentation that didn’t help is often forgotten, not deleted. This creates a valueless overhead load. In the worst case, excess instrumentation can cause performance problems in the production app or overload the tracing system. The process of ad-hoc (manual) instrumentation usually implies at least a restart. Trace instrumentation libraries, like Zipkin Brave, are integrated into many framework libraries. To instrument a method’s performance typically implies changing code, even if only an annotation. This implies a re-deploy. Even if you have the way to do auto instrumentation, like Apache SkyWalking, you still need to change the configuration and reboot the app. Otherwise, you take the risk of GC caused by hot dynamic instrumentation. Injecting instrumentation into an uninstrumented third party library is hard and complex. It takes more time and many won’t know how to do this. Usually, we don’t have code line numbers in the distributed tracing. Particularly when lambdas are in use, it can be difficult to identify the line of code associated with a span. Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.  Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.\nProfiling in Production Introduction To reuse distributed tracing to achieve method scope precision requires an understanding of the above limitations and a different approach. We called it PROFILE.\nMost high-level languages build and run on a thread concept. The profile approach takes continuous thread dumps. We merge the thread dumps to estimate the execution time of every method shown in the thread dumps. The key for distributed tracing is the tracing context, identifiers active (or current) for the profiled method. Using this trace context, we can weave data harvested from profiling into existing traces. This allows the system to automate otherwise ad-hoc instrumentation. Let’s dig deeper into how profiling works:\nWe consider a method invocation with the same stack depth and signature (method, line number etc), the same operation. We derive span timestamps from the thread dumps the same operation is in. Let’s put this visually:\nAbove, represents 10 successive thread dumps. If this method is in dumps 4-8, we assume it started before dump 4 and finished after dump 8. We can’t tell exactly when the method started and stopped. but the timestamps of thread dumps are close enough.\nTo reduce overhead caused by thread dumps, we only profile methods enclosed by a specific entry point, such as a URI or MVC Controller method. We identify these entry points through the trace context and the APM system.\nThe profile does thread dump analysis and gives us:\n The root cause, precise to the line number in the code. Reduced maintenance as ad-hoc instrumentation is obviated. Reduced overload risk caused by ad-hoc instrumentation. Dynamic activation: only when necessary and with a very clear profile target.  Implementing Precise Profiling Distributed profiling is built-into Apache SkyWalking application performance monitoring (APM). Let’s demonstrate how the profiling approach locates the root cause of the performance issue.\nfinal CountDownLatchcountDownLatch= new CountDownLatch(2); threadPool.submit(new Task1(countDownLatch)); threadPool.submit(new Task2(countDownLatch)); try { countDownLatch.await(500, TimeUnit.MILLISECONDS); } catch (InterruptedException) { } Task1 and Task2 have a race condition and unstable execution time: they will impact the performance of each other and anything calling them. While this code looks suspicious, it is representative of real life. People in the OPS/SRE team are not usually aware of all code changes and who did them. They only know something in the new code is causing a problem.\nTo make matters interesting, the above code is not always slow: it only happens when the condition is locked. In SkyWalking APM, we have metrics of endpoint p99/p95 latency, so, we are easy to find out the p99 of this endpoint is far from the avg response time. However, this is not the same as understanding the cause of the latency. To locate the root cause, add a profile condition to this endpoint: duration greater than 500ms. This means faster executions will not add profiling load.\nThis is a typical profiled trace segment (part of the whole distributed trace) shown on the SkyWalking UI. We now notice the “service/processWithThreadPool” span is slow as we expected, but why? This method is the one we added the faulty code to. As the UI shows that method, we know the profiler is working. Now, let’s see what the profile analysis result say.\nThis is the profile analysis stack view. We see the stack element names, duration (include/exclude the children) and slowest methods have been highlighted. It shows clearly, “sun.misc.Unsafe.park” costs the most time. If we look for the caller, it is the code we added: CountDownLatch.await.\nThe Limitations of the Profile Method No diagnostic tool can fit all cases, not even the profile method.\nThe first consideration is mistaking a repeatedly called method for a slow method. Thread dumps are periodic. If there is a loop of calling one method, the profile analysis result would say the target method is slow because it is captured every time in the dump process. There could be another reason. A method called many times can also end up captured in each thread dump. Even so, the profile did what it is designed for. It still helps the OPS/SRE team to locate the code having the issue.\nThe second consideration is overhead, the impact of repeated thread dumps is real and can’t be ignored. In SkyWalking, we set the profile dump period to at least 10ms. This means we can’t locate method performance issues if they complete in less than 10ms. SkyWalking has a threshold to control the maximum parallel degree as well.\nThe third consideration is profiling wouldn\u0026rsquo;t work for a low latency trace. Because the trace could be completed before profiling starts. But in reality, this is not an issue, profiling targets slow requests.\nUnderstanding the above keeps distributed tracing and APM systems useful for your OPS/SRE team.\nSupported Agents This feature was first implemented in Java agent since 7.0. The Python agent supported this since 0.7.0. Read this for more details\n","title":"Use Profiling to Fix the Blind Spot of Distributed Tracing","url":"/docs/main/latest/en/concepts-and-designs/sdk-profiling/"},{"content":"Use Profiling to Fix the Blind Spot of Distributed Tracing  This post introduces a way to automatically profile code in production with Apache SkyWalking. We believe the profile method helps reduce maintenance and overhead while increasing the precision in root cause analysis.\n This post introduces a way to automatically profile code in production with Apache SkyWalking. We believe the profile method helps reduce maintenance and overhead while increasing the precision in root cause analysis.\nLimitations of the Distributed Tracing In the early days, metrics and logging systems were the key solutions in monitoring platforms. With the adoption of microservice and distributed system-based architecture, distributed tracing has become more important. Distributed tracing provides relevant service context, such as system topology map and RPC parent-child relationships.\nSome claim that distributed tracing is the best way to discover the cause of performance issues in a distributed system. It’s good at finding issues at the RPC abstraction, or in the scope of components instrumented with spans. However, it isn’t that perfect.\nHave you been surprised to find a span duration longer than expected, but no insight into why? What should you do next? Some may think that the next step is to add more instrumentation, more spans into the trace, thinking that you would eventually find the root cause, with more data points. We’ll argue this is not a good option within a production environment. Here’s why:\n There is a risk of application overhead and system overload. Ad-hoc spans measure the performance of specific scopes or methods, but picking the right place can be difficult. To identify the precise cause, you can “instrument” (add spans to) many suspicious places. The additional instrumentation costs more CPU and memory in the production environment. Next, ad-hoc instrumentation that didn’t help is often forgotten, not deleted. This creates a valueless overhead load. In the worst case, excess instrumentation can cause performance problems in the production app or overload the tracing system. The process of ad-hoc (manual) instrumentation usually implies at least a restart. Trace instrumentation libraries, like Zipkin Brave, are integrated into many framework libraries. To instrument a method’s performance typically implies changing code, even if only an annotation. This implies a re-deploy. Even if you have the way to do auto instrumentation, like Apache SkyWalking, you still need to change the configuration and reboot the app. Otherwise, you take the risk of GC caused by hot dynamic instrumentation. Injecting instrumentation into an uninstrumented third party library is hard and complex. It takes more time and many won’t know how to do this. Usually, we don’t have code line numbers in the distributed tracing. Particularly when lambdas are in use, it can be difficult to identify the line of code associated with a span. Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.  Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.\nProfiling in Production Introduction To reuse distributed tracing to achieve method scope precision requires an understanding of the above limitations and a different approach. We called it PROFILE.\nMost high-level languages build and run on a thread concept. The profile approach takes continuous thread dumps. We merge the thread dumps to estimate the execution time of every method shown in the thread dumps. The key for distributed tracing is the tracing context, identifiers active (or current) for the profiled method. Using this trace context, we can weave data harvested from profiling into existing traces. This allows the system to automate otherwise ad-hoc instrumentation. Let’s dig deeper into how profiling works:\nWe consider a method invocation with the same stack depth and signature (method, line number etc), the same operation. We derive span timestamps from the thread dumps the same operation is in. Let’s put this visually:\nAbove, represents 10 successive thread dumps. If this method is in dumps 4-8, we assume it started before dump 4 and finished after dump 8. We can’t tell exactly when the method started and stopped. but the timestamps of thread dumps are close enough.\nTo reduce overhead caused by thread dumps, we only profile methods enclosed by a specific entry point, such as a URI or MVC Controller method. We identify these entry points through the trace context and the APM system.\nThe profile does thread dump analysis and gives us:\n The root cause, precise to the line number in the code. Reduced maintenance as ad-hoc instrumentation is obviated. Reduced overload risk caused by ad-hoc instrumentation. Dynamic activation: only when necessary and with a very clear profile target.  Implementing Precise Profiling Distributed profiling is built-into Apache SkyWalking application performance monitoring (APM). Let’s demonstrate how the profiling approach locates the root cause of the performance issue.\nfinal CountDownLatchcountDownLatch= new CountDownLatch(2); threadPool.submit(new Task1(countDownLatch)); threadPool.submit(new Task2(countDownLatch)); try { countDownLatch.await(500, TimeUnit.MILLISECONDS); } catch (InterruptedException) { } Task1 and Task2 have a race condition and unstable execution time: they will impact the performance of each other and anything calling them. While this code looks suspicious, it is representative of real life. People in the OPS/SRE team are not usually aware of all code changes and who did them. They only know something in the new code is causing a problem.\nTo make matters interesting, the above code is not always slow: it only happens when the condition is locked. In SkyWalking APM, we have metrics of endpoint p99/p95 latency, so, we are easy to find out the p99 of this endpoint is far from the avg response time. However, this is not the same as understanding the cause of the latency. To locate the root cause, add a profile condition to this endpoint: duration greater than 500ms. This means faster executions will not add profiling load.\nThis is a typical profiled trace segment (part of the whole distributed trace) shown on the SkyWalking UI. We now notice the “service/processWithThreadPool” span is slow as we expected, but why? This method is the one we added the faulty code to. As the UI shows that method, we know the profiler is working. Now, let’s see what the profile analysis result say.\nThis is the profile analysis stack view. We see the stack element names, duration (include/exclude the children) and slowest methods have been highlighted. It shows clearly, “sun.misc.Unsafe.park” costs the most time. If we look for the caller, it is the code we added: CountDownLatch.await.\nThe Limitations of the Profile Method No diagnostic tool can fit all cases, not even the profile method.\nThe first consideration is mistaking a repeatedly called method for a slow method. Thread dumps are periodic. If there is a loop of calling one method, the profile analysis result would say the target method is slow because it is captured every time in the dump process. There could be another reason. A method called many times can also end up captured in each thread dump. Even so, the profile did what it is designed for. It still helps the OPS/SRE team to locate the code having the issue.\nThe second consideration is overhead, the impact of repeated thread dumps is real and can’t be ignored. In SkyWalking, we set the profile dump period to at least 10ms. This means we can’t locate method performance issues if they complete in less than 10ms. SkyWalking has a threshold to control the maximum parallel degree as well.\nThe third consideration is profiling wouldn\u0026rsquo;t work for a low latency trace. Because the trace could be completed before profiling starts. But in reality, this is not an issue, profiling targets slow requests.\nUnderstanding the above keeps distributed tracing and APM systems useful for your OPS/SRE team.\nSupported Agents This feature was first implemented in Java agent since 7.0. The Python agent supported this since 0.7.0. Read this for more details\n","title":"Use Profiling to Fix the Blind Spot of Distributed Tracing","url":"/docs/main/next/en/concepts-and-designs/sdk-profiling/"},{"content":"Use Profiling to Fix the Blind Spot of Distributed Tracing  This post introduces a way to automatically profile code in production with Apache SkyWalking. We believe the profile method helps reduce maintenance and overhead while increasing the precision in root cause analysis.\n This post introduces a way to automatically profile code in production with Apache SkyWalking. We believe the profile method helps reduce maintenance and overhead while increasing the precision in root cause analysis.\nLimitations of the Distributed Tracing In the early days, metrics and logging systems were the key solutions in monitoring platforms. With the adoption of microservice and distributed system-based architecture, distributed tracing has become more important. Distributed tracing provides relevant service context, such as system topology map and RPC parent-child relationships.\nSome claim that distributed tracing is the best way to discover the cause of performance issues in a distributed system. It’s good at finding issues at the RPC abstraction, or in the scope of components instrumented with spans. However, it isn’t that perfect.\nHave you been surprised to find a span duration longer than expected, but no insight into why? What should you do next? Some may think that the next step is to add more instrumentation, more spans into the trace, thinking that you would eventually find the root cause, with more data points. We’ll argue this is not a good option within a production environment. Here’s why:\n There is a risk of application overhead and system overload. Ad-hoc spans measure the performance of specific scopes or methods, but picking the right place can be difficult. To identify the precise cause, you can “instrument” (add spans to) many suspicious places. The additional instrumentation costs more CPU and memory in the production environment. Next, ad-hoc instrumentation that didn’t help is often forgotten, not deleted. This creates a valueless overhead load. In the worst case, excess instrumentation can cause performance problems in the production app or overload the tracing system. The process of ad-hoc (manual) instrumentation usually implies at least a restart. Trace instrumentation libraries, like Zipkin Brave, are integrated into many framework libraries. To instrument a method’s performance typically implies changing code, even if only an annotation. This implies a re-deploy. Even if you have the way to do auto instrumentation, like Apache SkyWalking, you still need to change the configuration and reboot the app. Otherwise, you take the risk of GC caused by hot dynamic instrumentation. Injecting instrumentation into an uninstrumented third party library is hard and complex. It takes more time and many won’t know how to do this. Usually, we don’t have code line numbers in the distributed tracing. Particularly when lambdas are in use, it can be difficult to identify the line of code associated with a span. Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.  Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.\nProfiling in Production Introduction To reuse distributed tracing to achieve method scope precision requires an understanding of the above limitations and a different approach. We called it PROFILE.\nMost high-level languages build and run on a thread concept. The profile approach takes continuous thread dumps. We merge the thread dumps to estimate the execution time of every method shown in the thread dumps. The key for distributed tracing is the tracing context, identifiers active (or current) for the profiled method. Using this trace context, we can weave data harvested from profiling into existing traces. This allows the system to automate otherwise ad-hoc instrumentation. Let’s dig deeper into how profiling works:\nWe consider a method invocation with the same stack depth and signature (method, line number etc), the same operation. We derive span timestamps from the thread dumps the same operation is in. Let’s put this visually:\nAbove, represents 10 successive thread dumps. If this method is in dumps 4-8, we assume it started before dump 4 and finished after dump 8. We can’t tell exactly when the method started and stopped. but the timestamps of thread dumps are close enough.\nTo reduce overhead caused by thread dumps, we only profile methods enclosed by a specific entry point, such as a URI or MVC Controller method. We identify these entry points through the trace context and the APM system.\nThe profile does thread dump analysis and gives us:\n The root cause, precise to the line number in the code. Reduced maintenance as ad-hoc instrumentation is obviated. Reduced overload risk caused by ad-hoc instrumentation. Dynamic activation: only when necessary and with a very clear profile target.  Implementing Precise Profiling Distributed profiling is built-into Apache SkyWalking application performance monitoring (APM). Let’s demonstrate how the profiling approach locates the root cause of the performance issue.\nfinal CountDownLatchcountDownLatch= new CountDownLatch(2); threadPool.submit(new Task1(countDownLatch)); threadPool.submit(new Task2(countDownLatch)); try { countDownLatch.await(500, TimeUnit.MILLISECONDS); } catch (InterruptedExceptione) { } Task1 and Task2 have a race condition and unstable execution time: they will impact the performance of each other and anything calling them. While this code looks suspicious, it is representative of real life. People in the OPS/SRE team are not usually aware of all code changes and who did them. They only know something in the new code is causing a problem.\nTo make matters interesting, the above code is not always slow: it only happens when the condition is locked. In SkyWalking APM, we have metrics of endpoint p99/p95 latency, so, we are easy to find out the p99 of this endpoint is far from the avg response time. However, this is not the same as understanding the cause of the latency. To locate the root cause, add a profile condition to this endpoint: duration greater than 500ms. This means faster executions will not add profiling load.\nThis is a typical profiled trace segment (part of the whole distributed trace) shown on the SkyWalking UI. We now notice the “service/processWithThreadPool” span is slow as we expected, but why? This method is the one we added the faulty code to. As the UI shows that method, we know the profiler is working. Now, let’s see what the profile analysis result say.\nThis is the profile analysis stack view. We see the stack element names, duration (include/exclude the children) and slowest methods have been highlighted. It shows clearly, “sun.misc.Unsafe.park” costs the most time. If we look for the caller, it is the code we added: CountDownLatch.await.\nThe Limitations of the Profile Method No diagnostic tool can fit all cases, not even the profile method.\nThe first consideration is mistaking a repeatedly called method for a slow method. Thread dumps are periodic. If there is a loop of calling one method, the profile analysis result would say the target method is slow because it is captured every time in the dump process. There could be another reason. A method called many times can also end up captured in each thread dump. Even so, the profile did what it is designed for. It still helps the OPS/SRE team to locate the code having the issue.\nThe second consideration is overhead, the impact of repeated thread dumps is real and can’t be ignored. In SkyWalking, we set the profile dump period to at least 10ms. This means we can’t locate method performance issues if they complete in less than 10ms. SkyWalking has a threshold to control the maximum parallel degree as well.\nThe third consideration is profiling wouldn\u0026rsquo;t work for a low latency trace. Because the trace could be completed before profiling starts. But in reality, this is not an issue, profiling targets slow requests.\nUnderstanding the above keeps distributed tracing and APM systems useful for your OPS/SRE team.\nSupported Agents This feature was first implemented in Java agent since 7.0. The Python agent supported this since 0.7.0. Read this for more details\n","title":"Use Profiling to Fix the Blind Spot of Distributed Tracing","url":"/docs/main/v9.0.0/en/concepts-and-designs/sdk-profiling/"},{"content":"Use Profiling to Fix the Blind Spot of Distributed Tracing  This post introduces a way to automatically profile code in production with Apache SkyWalking. We believe the profile method helps reduce maintenance and overhead while increasing the precision in root cause analysis.\n This post introduces a way to automatically profile code in production with Apache SkyWalking. We believe the profile method helps reduce maintenance and overhead while increasing the precision in root cause analysis.\nLimitations of the Distributed Tracing In the early days, metrics and logging systems were the key solutions in monitoring platforms. With the adoption of microservice and distributed system-based architecture, distributed tracing has become more important. Distributed tracing provides relevant service context, such as system topology map and RPC parent-child relationships.\nSome claim that distributed tracing is the best way to discover the cause of performance issues in a distributed system. It’s good at finding issues at the RPC abstraction, or in the scope of components instrumented with spans. However, it isn’t that perfect.\nHave you been surprised to find a span duration longer than expected, but no insight into why? What should you do next? Some may think that the next step is to add more instrumentation, more spans into the trace, thinking that you would eventually find the root cause, with more data points. We’ll argue this is not a good option within a production environment. Here’s why:\n There is a risk of application overhead and system overload. Ad-hoc spans measure the performance of specific scopes or methods, but picking the right place can be difficult. To identify the precise cause, you can “instrument” (add spans to) many suspicious places. The additional instrumentation costs more CPU and memory in the production environment. Next, ad-hoc instrumentation that didn’t help is often forgotten, not deleted. This creates a valueless overhead load. In the worst case, excess instrumentation can cause performance problems in the production app or overload the tracing system. The process of ad-hoc (manual) instrumentation usually implies at least a restart. Trace instrumentation libraries, like Zipkin Brave, are integrated into many framework libraries. To instrument a method’s performance typically implies changing code, even if only an annotation. This implies a re-deploy. Even if you have the way to do auto instrumentation, like Apache SkyWalking, you still need to change the configuration and reboot the app. Otherwise, you take the risk of GC caused by hot dynamic instrumentation. Injecting instrumentation into an uninstrumented third party library is hard and complex. It takes more time and many won’t know how to do this. Usually, we don’t have code line numbers in the distributed tracing. Particularly when lambdas are in use, it can be difficult to identify the line of code associated with a span. Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.  Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.\nProfiling in Production Introduction To reuse distributed tracing to achieve method scope precision requires an understanding of the above limitations and a different approach. We called it PROFILE.\nMost high-level languages build and run on a thread concept. The profile approach takes continuous thread dumps. We merge the thread dumps to estimate the execution time of every method shown in the thread dumps. The key for distributed tracing is the tracing context, identifiers active (or current) for the profiled method. Using this trace context, we can weave data harvested from profiling into existing traces. This allows the system to automate otherwise ad-hoc instrumentation. Let’s dig deeper into how profiling works:\nWe consider a method invocation with the same stack depth and signature (method, line number etc), the same operation. We derive span timestamps from the thread dumps the same operation is in. Let’s put this visually:\nAbove, represents 10 successive thread dumps. If this method is in dumps 4-8, we assume it started before dump 4 and finished after dump 8. We can’t tell exactly when the method started and stopped. but the timestamps of thread dumps are close enough.\nTo reduce overhead caused by thread dumps, we only profile methods enclosed by a specific entry point, such as a URI or MVC Controller method. We identify these entry points through the trace context and the APM system.\nThe profile does thread dump analysis and gives us:\n The root cause, precise to the line number in the code. Reduced maintenance as ad-hoc instrumentation is obviated. Reduced overload risk caused by ad-hoc instrumentation. Dynamic activation: only when necessary and with a very clear profile target.  Implementing Precise Profiling Distributed profiling is built-into Apache SkyWalking application performance monitoring (APM). Let’s demonstrate how the profiling approach locates the root cause of the performance issue.\nfinal CountDownLatchcountDownLatch= new CountDownLatch(2); threadPool.submit(new Task1(countDownLatch)); threadPool.submit(new Task2(countDownLatch)); try { countDownLatch.await(500, TimeUnit.MILLISECONDS); } catch (InterruptedExceptione) { } Task1 and Task2 have a race condition and unstable execution time: they will impact the performance of each other and anything calling them. While this code looks suspicious, it is representative of real life. People in the OPS/SRE team are not usually aware of all code changes and who did them. They only know something in the new code is causing a problem.\nTo make matters interesting, the above code is not always slow: it only happens when the condition is locked. In SkyWalking APM, we have metrics of endpoint p99/p95 latency, so, we are easy to find out the p99 of this endpoint is far from the avg response time. However, this is not the same as understanding the cause of the latency. To locate the root cause, add a profile condition to this endpoint: duration greater than 500ms. This means faster executions will not add profiling load.\nThis is a typical profiled trace segment (part of the whole distributed trace) shown on the SkyWalking UI. We now notice the “service/processWithThreadPool” span is slow as we expected, but why? This method is the one we added the faulty code to. As the UI shows that method, we know the profiler is working. Now, let’s see what the profile analysis result say.\nThis is the profile analysis stack view. We see the stack element names, duration (include/exclude the children) and slowest methods have been highlighted. It shows clearly, “sun.misc.Unsafe.park” costs the most time. If we look for the caller, it is the code we added: CountDownLatch.await.\nThe Limitations of the Profile Method No diagnostic tool can fit all cases, not even the profile method.\nThe first consideration is mistaking a repeatedly called method for a slow method. Thread dumps are periodic. If there is a loop of calling one method, the profile analysis result would say the target method is slow because it is captured every time in the dump process. There could be another reason. A method called many times can also end up captured in each thread dump. Even so, the profile did what it is designed for. It still helps the OPS/SRE team to locate the code having the issue.\nThe second consideration is overhead, the impact of repeated thread dumps is real and can’t be ignored. In SkyWalking, we set the profile dump period to at least 10ms. This means we can’t locate method performance issues if they complete in less than 10ms. SkyWalking has a threshold to control the maximum parallel degree as well.\nThe third consideration is profiling wouldn\u0026rsquo;t work for a low latency trace. Because the trace could be completed before profiling starts. But in reality, this is not an issue, profiling targets slow requests.\nUnderstanding the above keeps distributed tracing and APM systems useful for your OPS/SRE team.\nSupported Agents This feature was first implemented in Java agent since 7.0. The Python agent supported this since 0.7.0. Read this for more details\n","title":"Use Profiling to Fix the Blind Spot of Distributed Tracing","url":"/docs/main/v9.1.0/en/concepts-and-designs/sdk-profiling/"},{"content":"Use Profiling to Fix the Blind Spot of Distributed Tracing  This post introduces a way to automatically profile code in production with Apache SkyWalking. We believe the profile method helps reduce maintenance and overhead while increasing the precision in root cause analysis.\n This post introduces a way to automatically profile code in production with Apache SkyWalking. We believe the profile method helps reduce maintenance and overhead while increasing the precision in root cause analysis.\nLimitations of the Distributed Tracing In the early days, metrics and logging systems were the key solutions in monitoring platforms. With the adoption of microservice and distributed system-based architecture, distributed tracing has become more important. Distributed tracing provides relevant service context, such as system topology map and RPC parent-child relationships.\nSome claim that distributed tracing is the best way to discover the cause of performance issues in a distributed system. It’s good at finding issues at the RPC abstraction, or in the scope of components instrumented with spans. However, it isn’t that perfect.\nHave you been surprised to find a span duration longer than expected, but no insight into why? What should you do next? Some may think that the next step is to add more instrumentation, more spans into the trace, thinking that you would eventually find the root cause, with more data points. We’ll argue this is not a good option within a production environment. Here’s why:\n There is a risk of application overhead and system overload. Ad-hoc spans measure the performance of specific scopes or methods, but picking the right place can be difficult. To identify the precise cause, you can “instrument” (add spans to) many suspicious places. The additional instrumentation costs more CPU and memory in the production environment. Next, ad-hoc instrumentation that didn’t help is often forgotten, not deleted. This creates a valueless overhead load. In the worst case, excess instrumentation can cause performance problems in the production app or overload the tracing system. The process of ad-hoc (manual) instrumentation usually implies at least a restart. Trace instrumentation libraries, like Zipkin Brave, are integrated into many framework libraries. To instrument a method’s performance typically implies changing code, even if only an annotation. This implies a re-deploy. Even if you have the way to do auto instrumentation, like Apache SkyWalking, you still need to change the configuration and reboot the app. Otherwise, you take the risk of GC caused by hot dynamic instrumentation. Injecting instrumentation into an uninstrumented third party library is hard and complex. It takes more time and many won’t know how to do this. Usually, we don’t have code line numbers in the distributed tracing. Particularly when lambdas are in use, it can be difficult to identify the line of code associated with a span. Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.  Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.\nProfiling in Production Introduction To reuse distributed tracing to achieve method scope precision requires an understanding of the above limitations and a different approach. We called it PROFILE.\nMost high-level languages build and run on a thread concept. The profile approach takes continuous thread dumps. We merge the thread dumps to estimate the execution time of every method shown in the thread dumps. The key for distributed tracing is the tracing context, identifiers active (or current) for the profiled method. Using this trace context, we can weave data harvested from profiling into existing traces. This allows the system to automate otherwise ad-hoc instrumentation. Let’s dig deeper into how profiling works:\nWe consider a method invocation with the same stack depth and signature (method, line number etc), the same operation. We derive span timestamps from the thread dumps the same operation is in. Let’s put this visually:\nAbove, represents 10 successive thread dumps. If this method is in dumps 4-8, we assume it started before dump 4 and finished after dump 8. We can’t tell exactly when the method started and stopped. but the timestamps of thread dumps are close enough.\nTo reduce overhead caused by thread dumps, we only profile methods enclosed by a specific entry point, such as a URI or MVC Controller method. We identify these entry points through the trace context and the APM system.\nThe profile does thread dump analysis and gives us:\n The root cause, precise to the line number in the code. Reduced maintenance as ad-hoc instrumentation is obviated. Reduced overload risk caused by ad-hoc instrumentation. Dynamic activation: only when necessary and with a very clear profile target.  Implementing Precise Profiling Distributed profiling is built-into Apache SkyWalking application performance monitoring (APM). Let’s demonstrate how the profiling approach locates the root cause of the performance issue.\nfinal CountDownLatchcountDownLatch= new CountDownLatch(2); threadPool.submit(new Task1(countDownLatch)); threadPool.submit(new Task2(countDownLatch)); try { countDownLatch.await(500, TimeUnit.MILLISECONDS); } catch (InterruptedException) { } Task1 and Task2 have a race condition and unstable execution time: they will impact the performance of each other and anything calling them. While this code looks suspicious, it is representative of real life. People in the OPS/SRE team are not usually aware of all code changes and who did them. They only know something in the new code is causing a problem.\nTo make matters interesting, the above code is not always slow: it only happens when the condition is locked. In SkyWalking APM, we have metrics of endpoint p99/p95 latency, so, we are easy to find out the p99 of this endpoint is far from the avg response time. However, this is not the same as understanding the cause of the latency. To locate the root cause, add a profile condition to this endpoint: duration greater than 500ms. This means faster executions will not add profiling load.\nThis is a typical profiled trace segment (part of the whole distributed trace) shown on the SkyWalking UI. We now notice the “service/processWithThreadPool” span is slow as we expected, but why? This method is the one we added the faulty code to. As the UI shows that method, we know the profiler is working. Now, let’s see what the profile analysis result say.\nThis is the profile analysis stack view. We see the stack element names, duration (include/exclude the children) and slowest methods have been highlighted. It shows clearly, “sun.misc.Unsafe.park” costs the most time. If we look for the caller, it is the code we added: CountDownLatch.await.\nThe Limitations of the Profile Method No diagnostic tool can fit all cases, not even the profile method.\nThe first consideration is mistaking a repeatedly called method for a slow method. Thread dumps are periodic. If there is a loop of calling one method, the profile analysis result would say the target method is slow because it is captured every time in the dump process. There could be another reason. A method called many times can also end up captured in each thread dump. Even so, the profile did what it is designed for. It still helps the OPS/SRE team to locate the code having the issue.\nThe second consideration is overhead, the impact of repeated thread dumps is real and can’t be ignored. In SkyWalking, we set the profile dump period to at least 10ms. This means we can’t locate method performance issues if they complete in less than 10ms. SkyWalking has a threshold to control the maximum parallel degree as well.\nThe third consideration is profiling wouldn\u0026rsquo;t work for a low latency trace. Because the trace could be completed before profiling starts. But in reality, this is not an issue, profiling targets slow requests.\nUnderstanding the above keeps distributed tracing and APM systems useful for your OPS/SRE team.\nSupported Agents This feature was first implemented in Java agent since 7.0. The Python agent supported this since 0.7.0. Read this for more details\n","title":"Use Profiling to Fix the Blind Spot of Distributed Tracing","url":"/docs/main/v9.2.0/en/concepts-and-designs/sdk-profiling/"},{"content":"Use Profiling to Fix the Blind Spot of Distributed Tracing  This post introduces a way to automatically profile code in production with Apache SkyWalking. We believe the profile method helps reduce maintenance and overhead while increasing the precision in root cause analysis.\n This post introduces a way to automatically profile code in production with Apache SkyWalking. We believe the profile method helps reduce maintenance and overhead while increasing the precision in root cause analysis.\nLimitations of the Distributed Tracing In the early days, metrics and logging systems were the key solutions in monitoring platforms. With the adoption of microservice and distributed system-based architecture, distributed tracing has become more important. Distributed tracing provides relevant service context, such as system topology map and RPC parent-child relationships.\nSome claim that distributed tracing is the best way to discover the cause of performance issues in a distributed system. It’s good at finding issues at the RPC abstraction, or in the scope of components instrumented with spans. However, it isn’t that perfect.\nHave you been surprised to find a span duration longer than expected, but no insight into why? What should you do next? Some may think that the next step is to add more instrumentation, more spans into the trace, thinking that you would eventually find the root cause, with more data points. We’ll argue this is not a good option within a production environment. Here’s why:\n There is a risk of application overhead and system overload. Ad-hoc spans measure the performance of specific scopes or methods, but picking the right place can be difficult. To identify the precise cause, you can “instrument” (add spans to) many suspicious places. The additional instrumentation costs more CPU and memory in the production environment. Next, ad-hoc instrumentation that didn’t help is often forgotten, not deleted. This creates a valueless overhead load. In the worst case, excess instrumentation can cause performance problems in the production app or overload the tracing system. The process of ad-hoc (manual) instrumentation usually implies at least a restart. Trace instrumentation libraries, like Zipkin Brave, are integrated into many framework libraries. To instrument a method’s performance typically implies changing code, even if only an annotation. This implies a re-deploy. Even if you have the way to do auto instrumentation, like Apache SkyWalking, you still need to change the configuration and reboot the app. Otherwise, you take the risk of GC caused by hot dynamic instrumentation. Injecting instrumentation into an uninstrumented third party library is hard and complex. It takes more time and many won’t know how to do this. Usually, we don’t have code line numbers in the distributed tracing. Particularly when lambdas are in use, it can be difficult to identify the line of code associated with a span. Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.  Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.\nProfiling in Production Introduction To reuse distributed tracing to achieve method scope precision requires an understanding of the above limitations and a different approach. We called it PROFILE.\nMost high-level languages build and run on a thread concept. The profile approach takes continuous thread dumps. We merge the thread dumps to estimate the execution time of every method shown in the thread dumps. The key for distributed tracing is the tracing context, identifiers active (or current) for the profiled method. Using this trace context, we can weave data harvested from profiling into existing traces. This allows the system to automate otherwise ad-hoc instrumentation. Let’s dig deeper into how profiling works:\nWe consider a method invocation with the same stack depth and signature (method, line number etc), the same operation. We derive span timestamps from the thread dumps the same operation is in. Let’s put this visually:\nAbove, represents 10 successive thread dumps. If this method is in dumps 4-8, we assume it started before dump 4 and finished after dump 8. We can’t tell exactly when the method started and stopped. but the timestamps of thread dumps are close enough.\nTo reduce overhead caused by thread dumps, we only profile methods enclosed by a specific entry point, such as a URI or MVC Controller method. We identify these entry points through the trace context and the APM system.\nThe profile does thread dump analysis and gives us:\n The root cause, precise to the line number in the code. Reduced maintenance as ad-hoc instrumentation is obviated. Reduced overload risk caused by ad-hoc instrumentation. Dynamic activation: only when necessary and with a very clear profile target.  Implementing Precise Profiling Distributed profiling is built-into Apache SkyWalking application performance monitoring (APM). Let’s demonstrate how the profiling approach locates the root cause of the performance issue.\nfinal CountDownLatchcountDownLatch= new CountDownLatch(2); threadPool.submit(new Task1(countDownLatch)); threadPool.submit(new Task2(countDownLatch)); try { countDownLatch.await(500, TimeUnit.MILLISECONDS); } catch (InterruptedException) { } Task1 and Task2 have a race condition and unstable execution time: they will impact the performance of each other and anything calling them. While this code looks suspicious, it is representative of real life. People in the OPS/SRE team are not usually aware of all code changes and who did them. They only know something in the new code is causing a problem.\nTo make matters interesting, the above code is not always slow: it only happens when the condition is locked. In SkyWalking APM, we have metrics of endpoint p99/p95 latency, so, we are easy to find out the p99 of this endpoint is far from the avg response time. However, this is not the same as understanding the cause of the latency. To locate the root cause, add a profile condition to this endpoint: duration greater than 500ms. This means faster executions will not add profiling load.\nThis is a typical profiled trace segment (part of the whole distributed trace) shown on the SkyWalking UI. We now notice the “service/processWithThreadPool” span is slow as we expected, but why? This method is the one we added the faulty code to. As the UI shows that method, we know the profiler is working. Now, let’s see what the profile analysis result say.\nThis is the profile analysis stack view. We see the stack element names, duration (include/exclude the children) and slowest methods have been highlighted. It shows clearly, “sun.misc.Unsafe.park” costs the most time. If we look for the caller, it is the code we added: CountDownLatch.await.\nThe Limitations of the Profile Method No diagnostic tool can fit all cases, not even the profile method.\nThe first consideration is mistaking a repeatedly called method for a slow method. Thread dumps are periodic. If there is a loop of calling one method, the profile analysis result would say the target method is slow because it is captured every time in the dump process. There could be another reason. A method called many times can also end up captured in each thread dump. Even so, the profile did what it is designed for. It still helps the OPS/SRE team to locate the code having the issue.\nThe second consideration is overhead, the impact of repeated thread dumps is real and can’t be ignored. In SkyWalking, we set the profile dump period to at least 10ms. This means we can’t locate method performance issues if they complete in less than 10ms. SkyWalking has a threshold to control the maximum parallel degree as well.\nThe third consideration is profiling wouldn\u0026rsquo;t work for a low latency trace. Because the trace could be completed before profiling starts. But in reality, this is not an issue, profiling targets slow requests.\nUnderstanding the above keeps distributed tracing and APM systems useful for your OPS/SRE team.\nSupported Agents This feature was first implemented in Java agent since 7.0. The Python agent supported this since 0.7.0. Read this for more details\n","title":"Use Profiling to Fix the Blind Spot of Distributed Tracing","url":"/docs/main/v9.3.0/en/concepts-and-designs/sdk-profiling/"},{"content":"Use Profiling to Fix the Blind Spot of Distributed Tracing  This post introduces a way to automatically profile code in production with Apache SkyWalking. We believe the profile method helps reduce maintenance and overhead while increasing the precision in root cause analysis.\n This post introduces a way to automatically profile code in production with Apache SkyWalking. We believe the profile method helps reduce maintenance and overhead while increasing the precision in root cause analysis.\nLimitations of the Distributed Tracing In the early days, metrics and logging systems were the key solutions in monitoring platforms. With the adoption of microservice and distributed system-based architecture, distributed tracing has become more important. Distributed tracing provides relevant service context, such as system topology map and RPC parent-child relationships.\nSome claim that distributed tracing is the best way to discover the cause of performance issues in a distributed system. It’s good at finding issues at the RPC abstraction, or in the scope of components instrumented with spans. However, it isn’t that perfect.\nHave you been surprised to find a span duration longer than expected, but no insight into why? What should you do next? Some may think that the next step is to add more instrumentation, more spans into the trace, thinking that you would eventually find the root cause, with more data points. We’ll argue this is not a good option within a production environment. Here’s why:\n There is a risk of application overhead and system overload. Ad-hoc spans measure the performance of specific scopes or methods, but picking the right place can be difficult. To identify the precise cause, you can “instrument” (add spans to) many suspicious places. The additional instrumentation costs more CPU and memory in the production environment. Next, ad-hoc instrumentation that didn’t help is often forgotten, not deleted. This creates a valueless overhead load. In the worst case, excess instrumentation can cause performance problems in the production app or overload the tracing system. The process of ad-hoc (manual) instrumentation usually implies at least a restart. Trace instrumentation libraries, like Zipkin Brave, are integrated into many framework libraries. To instrument a method’s performance typically implies changing code, even if only an annotation. This implies a re-deploy. Even if you have the way to do auto instrumentation, like Apache SkyWalking, you still need to change the configuration and reboot the app. Otherwise, you take the risk of GC caused by hot dynamic instrumentation. Injecting instrumentation into an uninstrumented third party library is hard and complex. It takes more time and many won’t know how to do this. Usually, we don’t have code line numbers in the distributed tracing. Particularly when lambdas are in use, it can be difficult to identify the line of code associated with a span. Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.  Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.\nProfiling in Production Introduction To reuse distributed tracing to achieve method scope precision requires an understanding of the above limitations and a different approach. We called it PROFILE.\nMost high-level languages build and run on a thread concept. The profile approach takes continuous thread dumps. We merge the thread dumps to estimate the execution time of every method shown in the thread dumps. The key for distributed tracing is the tracing context, identifiers active (or current) for the profiled method. Using this trace context, we can weave data harvested from profiling into existing traces. This allows the system to automate otherwise ad-hoc instrumentation. Let’s dig deeper into how profiling works:\nWe consider a method invocation with the same stack depth and signature (method, line number etc), the same operation. We derive span timestamps from the thread dumps the same operation is in. Let’s put this visually:\nAbove, represents 10 successive thread dumps. If this method is in dumps 4-8, we assume it started before dump 4 and finished after dump 8. We can’t tell exactly when the method started and stopped. but the timestamps of thread dumps are close enough.\nTo reduce overhead caused by thread dumps, we only profile methods enclosed by a specific entry point, such as a URI or MVC Controller method. We identify these entry points through the trace context and the APM system.\nThe profile does thread dump analysis and gives us:\n The root cause, precise to the line number in the code. Reduced maintenance as ad-hoc instrumentation is obviated. Reduced overload risk caused by ad-hoc instrumentation. Dynamic activation: only when necessary and with a very clear profile target.  Implementing Precise Profiling Distributed profiling is built-into Apache SkyWalking application performance monitoring (APM). Let’s demonstrate how the profiling approach locates the root cause of the performance issue.\nfinal CountDownLatchcountDownLatch= new CountDownLatch(2); threadPool.submit(new Task1(countDownLatch)); threadPool.submit(new Task2(countDownLatch)); try { countDownLatch.await(500, TimeUnit.MILLISECONDS); } catch (InterruptedException) { } Task1 and Task2 have a race condition and unstable execution time: they will impact the performance of each other and anything calling them. While this code looks suspicious, it is representative of real life. People in the OPS/SRE team are not usually aware of all code changes and who did them. They only know something in the new code is causing a problem.\nTo make matters interesting, the above code is not always slow: it only happens when the condition is locked. In SkyWalking APM, we have metrics of endpoint p99/p95 latency, so, we are easy to find out the p99 of this endpoint is far from the avg response time. However, this is not the same as understanding the cause of the latency. To locate the root cause, add a profile condition to this endpoint: duration greater than 500ms. This means faster executions will not add profiling load.\nThis is a typical profiled trace segment (part of the whole distributed trace) shown on the SkyWalking UI. We now notice the “service/processWithThreadPool” span is slow as we expected, but why? This method is the one we added the faulty code to. As the UI shows that method, we know the profiler is working. Now, let’s see what the profile analysis result say.\nThis is the profile analysis stack view. We see the stack element names, duration (include/exclude the children) and slowest methods have been highlighted. It shows clearly, “sun.misc.Unsafe.park” costs the most time. If we look for the caller, it is the code we added: CountDownLatch.await.\nThe Limitations of the Profile Method No diagnostic tool can fit all cases, not even the profile method.\nThe first consideration is mistaking a repeatedly called method for a slow method. Thread dumps are periodic. If there is a loop of calling one method, the profile analysis result would say the target method is slow because it is captured every time in the dump process. There could be another reason. A method called many times can also end up captured in each thread dump. Even so, the profile did what it is designed for. It still helps the OPS/SRE team to locate the code having the issue.\nThe second consideration is overhead, the impact of repeated thread dumps is real and can’t be ignored. In SkyWalking, we set the profile dump period to at least 10ms. This means we can’t locate method performance issues if they complete in less than 10ms. SkyWalking has a threshold to control the maximum parallel degree as well.\nThe third consideration is profiling wouldn\u0026rsquo;t work for a low latency trace. Because the trace could be completed before profiling starts. But in reality, this is not an issue, profiling targets slow requests.\nUnderstanding the above keeps distributed tracing and APM systems useful for your OPS/SRE team.\nSupported Agents This feature was first implemented in Java agent since 7.0. The Python agent supported this since 0.7.0. Read this for more details\n","title":"Use Profiling to Fix the Blind Spot of Distributed Tracing","url":"/docs/main/v9.4.0/en/concepts-and-designs/sdk-profiling/"},{"content":"Use Profiling to Fix the Blind Spot of Distributed Tracing  This post introduces a way to automatically profile code in production with Apache SkyWalking. We believe the profile method helps reduce maintenance and overhead while increasing the precision in root cause analysis.\n This post introduces a way to automatically profile code in production with Apache SkyWalking. We believe the profile method helps reduce maintenance and overhead while increasing the precision in root cause analysis.\nLimitations of the Distributed Tracing In the early days, metrics and logging systems were the key solutions in monitoring platforms. With the adoption of microservice and distributed system-based architecture, distributed tracing has become more important. Distributed tracing provides relevant service context, such as system topology map and RPC parent-child relationships.\nSome claim that distributed tracing is the best way to discover the cause of performance issues in a distributed system. It’s good at finding issues at the RPC abstraction, or in the scope of components instrumented with spans. However, it isn’t that perfect.\nHave you been surprised to find a span duration longer than expected, but no insight into why? What should you do next? Some may think that the next step is to add more instrumentation, more spans into the trace, thinking that you would eventually find the root cause, with more data points. We’ll argue this is not a good option within a production environment. Here’s why:\n There is a risk of application overhead and system overload. Ad-hoc spans measure the performance of specific scopes or methods, but picking the right place can be difficult. To identify the precise cause, you can “instrument” (add spans to) many suspicious places. The additional instrumentation costs more CPU and memory in the production environment. Next, ad-hoc instrumentation that didn’t help is often forgotten, not deleted. This creates a valueless overhead load. In the worst case, excess instrumentation can cause performance problems in the production app or overload the tracing system. The process of ad-hoc (manual) instrumentation usually implies at least a restart. Trace instrumentation libraries, like Zipkin Brave, are integrated into many framework libraries. To instrument a method’s performance typically implies changing code, even if only an annotation. This implies a re-deploy. Even if you have the way to do auto instrumentation, like Apache SkyWalking, you still need to change the configuration and reboot the app. Otherwise, you take the risk of GC caused by hot dynamic instrumentation. Injecting instrumentation into an uninstrumented third party library is hard and complex. It takes more time and many won’t know how to do this. Usually, we don’t have code line numbers in the distributed tracing. Particularly when lambdas are in use, it can be difficult to identify the line of code associated with a span. Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.  Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.\nProfiling in Production Introduction To reuse distributed tracing to achieve method scope precision requires an understanding of the above limitations and a different approach. We called it PROFILE.\nMost high-level languages build and run on a thread concept. The profile approach takes continuous thread dumps. We merge the thread dumps to estimate the execution time of every method shown in the thread dumps. The key for distributed tracing is the tracing context, identifiers active (or current) for the profiled method. Using this trace context, we can weave data harvested from profiling into existing traces. This allows the system to automate otherwise ad-hoc instrumentation. Let’s dig deeper into how profiling works:\nWe consider a method invocation with the same stack depth and signature (method, line number etc), the same operation. We derive span timestamps from the thread dumps the same operation is in. Let’s put this visually:\nAbove, represents 10 successive thread dumps. If this method is in dumps 4-8, we assume it started before dump 4 and finished after dump 8. We can’t tell exactly when the method started and stopped. but the timestamps of thread dumps are close enough.\nTo reduce overhead caused by thread dumps, we only profile methods enclosed by a specific entry point, such as a URI or MVC Controller method. We identify these entry points through the trace context and the APM system.\nThe profile does thread dump analysis and gives us:\n The root cause, precise to the line number in the code. Reduced maintenance as ad-hoc instrumentation is obviated. Reduced overload risk caused by ad-hoc instrumentation. Dynamic activation: only when necessary and with a very clear profile target.  Implementing Precise Profiling Distributed profiling is built-into Apache SkyWalking application performance monitoring (APM). Let’s demonstrate how the profiling approach locates the root cause of the performance issue.\nfinal CountDownLatchcountDownLatch= new CountDownLatch(2); threadPool.submit(new Task1(countDownLatch)); threadPool.submit(new Task2(countDownLatch)); try { countDownLatch.await(500, TimeUnit.MILLISECONDS); } catch (InterruptedException) { } Task1 and Task2 have a race condition and unstable execution time: they will impact the performance of each other and anything calling them. While this code looks suspicious, it is representative of real life. People in the OPS/SRE team are not usually aware of all code changes and who did them. They only know something in the new code is causing a problem.\nTo make matters interesting, the above code is not always slow: it only happens when the condition is locked. In SkyWalking APM, we have metrics of endpoint p99/p95 latency, so, we are easy to find out the p99 of this endpoint is far from the avg response time. However, this is not the same as understanding the cause of the latency. To locate the root cause, add a profile condition to this endpoint: duration greater than 500ms. This means faster executions will not add profiling load.\nThis is a typical profiled trace segment (part of the whole distributed trace) shown on the SkyWalking UI. We now notice the “service/processWithThreadPool” span is slow as we expected, but why? This method is the one we added the faulty code to. As the UI shows that method, we know the profiler is working. Now, let’s see what the profile analysis result say.\nThis is the profile analysis stack view. We see the stack element names, duration (include/exclude the children) and slowest methods have been highlighted. It shows clearly, “sun.misc.Unsafe.park” costs the most time. If we look for the caller, it is the code we added: CountDownLatch.await.\nThe Limitations of the Profile Method No diagnostic tool can fit all cases, not even the profile method.\nThe first consideration is mistaking a repeatedly called method for a slow method. Thread dumps are periodic. If there is a loop of calling one method, the profile analysis result would say the target method is slow because it is captured every time in the dump process. There could be another reason. A method called many times can also end up captured in each thread dump. Even so, the profile did what it is designed for. It still helps the OPS/SRE team to locate the code having the issue.\nThe second consideration is overhead, the impact of repeated thread dumps is real and can’t be ignored. In SkyWalking, we set the profile dump period to at least 10ms. This means we can’t locate method performance issues if they complete in less than 10ms. SkyWalking has a threshold to control the maximum parallel degree as well.\nThe third consideration is profiling wouldn\u0026rsquo;t work for a low latency trace. Because the trace could be completed before profiling starts. But in reality, this is not an issue, profiling targets slow requests.\nUnderstanding the above keeps distributed tracing and APM systems useful for your OPS/SRE team.\nSupported Agents This feature was first implemented in Java agent since 7.0. The Python agent supported this since 0.7.0. Read this for more details\n","title":"Use Profiling to Fix the Blind Spot of Distributed Tracing","url":"/docs/main/v9.5.0/en/concepts-and-designs/sdk-profiling/"},{"content":"Use Profiling to Fix the Blind Spot of Distributed Tracing  This post introduces a way to automatically profile code in production with Apache SkyWalking. We believe the profile method helps reduce maintenance and overhead while increasing the precision in root cause analysis.\n This post introduces a way to automatically profile code in production with Apache SkyWalking. We believe the profile method helps reduce maintenance and overhead while increasing the precision in root cause analysis.\nLimitations of the Distributed Tracing In the early days, metrics and logging systems were the key solutions in monitoring platforms. With the adoption of microservice and distributed system-based architecture, distributed tracing has become more important. Distributed tracing provides relevant service context, such as system topology map and RPC parent-child relationships.\nSome claim that distributed tracing is the best way to discover the cause of performance issues in a distributed system. It’s good at finding issues at the RPC abstraction, or in the scope of components instrumented with spans. However, it isn’t that perfect.\nHave you been surprised to find a span duration longer than expected, but no insight into why? What should you do next? Some may think that the next step is to add more instrumentation, more spans into the trace, thinking that you would eventually find the root cause, with more data points. We’ll argue this is not a good option within a production environment. Here’s why:\n There is a risk of application overhead and system overload. Ad-hoc spans measure the performance of specific scopes or methods, but picking the right place can be difficult. To identify the precise cause, you can “instrument” (add spans to) many suspicious places. The additional instrumentation costs more CPU and memory in the production environment. Next, ad-hoc instrumentation that didn’t help is often forgotten, not deleted. This creates a valueless overhead load. In the worst case, excess instrumentation can cause performance problems in the production app or overload the tracing system. The process of ad-hoc (manual) instrumentation usually implies at least a restart. Trace instrumentation libraries, like Zipkin Brave, are integrated into many framework libraries. To instrument a method’s performance typically implies changing code, even if only an annotation. This implies a re-deploy. Even if you have the way to do auto instrumentation, like Apache SkyWalking, you still need to change the configuration and reboot the app. Otherwise, you take the risk of GC caused by hot dynamic instrumentation. Injecting instrumentation into an uninstrumented third party library is hard and complex. It takes more time and many won’t know how to do this. Usually, we don’t have code line numbers in the distributed tracing. Particularly when lambdas are in use, it can be difficult to identify the line of code associated with a span. Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.  Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.\nProfiling in Production Introduction To reuse distributed tracing to achieve method scope precision requires an understanding of the above limitations and a different approach. We called it PROFILE.\nMost high-level languages build and run on a thread concept. The profile approach takes continuous thread dumps. We merge the thread dumps to estimate the execution time of every method shown in the thread dumps. The key for distributed tracing is the tracing context, identifiers active (or current) for the profiled method. Using this trace context, we can weave data harvested from profiling into existing traces. This allows the system to automate otherwise ad-hoc instrumentation. Let’s dig deeper into how profiling works:\nWe consider a method invocation with the same stack depth and signature (method, line number etc), the same operation. We derive span timestamps from the thread dumps the same operation is in. Let’s put this visually:\nAbove, represents 10 successive thread dumps. If this method is in dumps 4-8, we assume it started before dump 4 and finished after dump 8. We can’t tell exactly when the method started and stopped. but the timestamps of thread dumps are close enough.\nTo reduce overhead caused by thread dumps, we only profile methods enclosed by a specific entry point, such as a URI or MVC Controller method. We identify these entry points through the trace context and the APM system.\nThe profile does thread dump analysis and gives us:\n The root cause, precise to the line number in the code. Reduced maintenance as ad-hoc instrumentation is obviated. Reduced overload risk caused by ad-hoc instrumentation. Dynamic activation: only when necessary and with a very clear profile target.  Implementing Precise Profiling Distributed profiling is built-into Apache SkyWalking application performance monitoring (APM). Let’s demonstrate how the profiling approach locates the root cause of the performance issue.\nfinal CountDownLatchcountDownLatch= new CountDownLatch(2); threadPool.submit(new Task1(countDownLatch)); threadPool.submit(new Task2(countDownLatch)); try { countDownLatch.await(500, TimeUnit.MILLISECONDS); } catch (InterruptedException) { } Task1 and Task2 have a race condition and unstable execution time: they will impact the performance of each other and anything calling them. While this code looks suspicious, it is representative of real life. People in the OPS/SRE team are not usually aware of all code changes and who did them. They only know something in the new code is causing a problem.\nTo make matters interesting, the above code is not always slow: it only happens when the condition is locked. In SkyWalking APM, we have metrics of endpoint p99/p95 latency, so, we are easy to find out the p99 of this endpoint is far from the avg response time. However, this is not the same as understanding the cause of the latency. To locate the root cause, add a profile condition to this endpoint: duration greater than 500ms. This means faster executions will not add profiling load.\nThis is a typical profiled trace segment (part of the whole distributed trace) shown on the SkyWalking UI. We now notice the “service/processWithThreadPool” span is slow as we expected, but why? This method is the one we added the faulty code to. As the UI shows that method, we know the profiler is working. Now, let’s see what the profile analysis result say.\nThis is the profile analysis stack view. We see the stack element names, duration (include/exclude the children) and slowest methods have been highlighted. It shows clearly, “sun.misc.Unsafe.park” costs the most time. If we look for the caller, it is the code we added: CountDownLatch.await.\nThe Limitations of the Profile Method No diagnostic tool can fit all cases, not even the profile method.\nThe first consideration is mistaking a repeatedly called method for a slow method. Thread dumps are periodic. If there is a loop of calling one method, the profile analysis result would say the target method is slow because it is captured every time in the dump process. There could be another reason. A method called many times can also end up captured in each thread dump. Even so, the profile did what it is designed for. It still helps the OPS/SRE team to locate the code having the issue.\nThe second consideration is overhead, the impact of repeated thread dumps is real and can’t be ignored. In SkyWalking, we set the profile dump period to at least 10ms. This means we can’t locate method performance issues if they complete in less than 10ms. SkyWalking has a threshold to control the maximum parallel degree as well.\nThe third consideration is profiling wouldn\u0026rsquo;t work for a low latency trace. Because the trace could be completed before profiling starts. But in reality, this is not an issue, profiling targets slow requests.\nUnderstanding the above keeps distributed tracing and APM systems useful for your OPS/SRE team.\nSupported Agents This feature was first implemented in Java agent since 7.0. The Python agent supported this since 0.7.0. Read this for more details\n","title":"Use Profiling to Fix the Blind Spot of Distributed Tracing","url":"/docs/main/v9.6.0/en/concepts-and-designs/sdk-profiling/"},{"content":"Use Profiling to Fix the Blind Spot of Distributed Tracing  This post introduces a way to automatically profile code in production with Apache SkyWalking. We believe the profile method helps reduce maintenance and overhead while increasing the precision in root cause analysis.\n This post introduces a way to automatically profile code in production with Apache SkyWalking. We believe the profile method helps reduce maintenance and overhead while increasing the precision in root cause analysis.\nLimitations of the Distributed Tracing In the early days, metrics and logging systems were the key solutions in monitoring platforms. With the adoption of microservice and distributed system-based architecture, distributed tracing has become more important. Distributed tracing provides relevant service context, such as system topology map and RPC parent-child relationships.\nSome claim that distributed tracing is the best way to discover the cause of performance issues in a distributed system. It’s good at finding issues at the RPC abstraction, or in the scope of components instrumented with spans. However, it isn’t that perfect.\nHave you been surprised to find a span duration longer than expected, but no insight into why? What should you do next? Some may think that the next step is to add more instrumentation, more spans into the trace, thinking that you would eventually find the root cause, with more data points. We’ll argue this is not a good option within a production environment. Here’s why:\n There is a risk of application overhead and system overload. Ad-hoc spans measure the performance of specific scopes or methods, but picking the right place can be difficult. To identify the precise cause, you can “instrument” (add spans to) many suspicious places. The additional instrumentation costs more CPU and memory in the production environment. Next, ad-hoc instrumentation that didn’t help is often forgotten, not deleted. This creates a valueless overhead load. In the worst case, excess instrumentation can cause performance problems in the production app or overload the tracing system. The process of ad-hoc (manual) instrumentation usually implies at least a restart. Trace instrumentation libraries, like Zipkin Brave, are integrated into many framework libraries. To instrument a method’s performance typically implies changing code, even if only an annotation. This implies a re-deploy. Even if you have the way to do auto instrumentation, like Apache SkyWalking, you still need to change the configuration and reboot the app. Otherwise, you take the risk of GC caused by hot dynamic instrumentation. Injecting instrumentation into an uninstrumented third party library is hard and complex. It takes more time and many won’t know how to do this. Usually, we don’t have code line numbers in the distributed tracing. Particularly when lambdas are in use, it can be difficult to identify the line of code associated with a span. Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.  Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.\nProfiling in Production Introduction To reuse distributed tracing to achieve method scope precision requires an understanding of the above limitations and a different approach. We called it PROFILE.\nMost high-level languages build and run on a thread concept. The profile approach takes continuous thread dumps. We merge the thread dumps to estimate the execution time of every method shown in the thread dumps. The key for distributed tracing is the tracing context, identifiers active (or current) for the profiled method. Using this trace context, we can weave data harvested from profiling into existing traces. This allows the system to automate otherwise ad-hoc instrumentation. Let’s dig deeper into how profiling works:\nWe consider a method invocation with the same stack depth and signature (method, line number etc), the same operation. We derive span timestamps from the thread dumps the same operation is in. Let’s put this visually:\nAbove, represents 10 successive thread dumps. If this method is in dumps 4-8, we assume it started before dump 4 and finished after dump 8. We can’t tell exactly when the method started and stopped. but the timestamps of thread dumps are close enough.\nTo reduce overhead caused by thread dumps, we only profile methods enclosed by a specific entry point, such as a URI or MVC Controller method. We identify these entry points through the trace context and the APM system.\nThe profile does thread dump analysis and gives us:\n The root cause, precise to the line number in the code. Reduced maintenance as ad-hoc instrumentation is obviated. Reduced overload risk caused by ad-hoc instrumentation. Dynamic activation: only when necessary and with a very clear profile target.  Implementing Precise Profiling Distributed profiling is built-into Apache SkyWalking application performance monitoring (APM). Let’s demonstrate how the profiling approach locates the root cause of the performance issue.\nfinal CountDownLatchcountDownLatch= new CountDownLatch(2); threadPool.submit(new Task1(countDownLatch)); threadPool.submit(new Task2(countDownLatch)); try { countDownLatch.await(500, TimeUnit.MILLISECONDS); } catch (InterruptedException) { } Task1 and Task2 have a race condition and unstable execution time: they will impact the performance of each other and anything calling them. While this code looks suspicious, it is representative of real life. People in the OPS/SRE team are not usually aware of all code changes and who did them. They only know something in the new code is causing a problem.\nTo make matters interesting, the above code is not always slow: it only happens when the condition is locked. In SkyWalking APM, we have metrics of endpoint p99/p95 latency, so, we are easy to find out the p99 of this endpoint is far from the avg response time. However, this is not the same as understanding the cause of the latency. To locate the root cause, add a profile condition to this endpoint: duration greater than 500ms. This means faster executions will not add profiling load.\nThis is a typical profiled trace segment (part of the whole distributed trace) shown on the SkyWalking UI. We now notice the “service/processWithThreadPool” span is slow as we expected, but why? This method is the one we added the faulty code to. As the UI shows that method, we know the profiler is working. Now, let’s see what the profile analysis result say.\nThis is the profile analysis stack view. We see the stack element names, duration (include/exclude the children) and slowest methods have been highlighted. It shows clearly, “sun.misc.Unsafe.park” costs the most time. If we look for the caller, it is the code we added: CountDownLatch.await.\nThe Limitations of the Profile Method No diagnostic tool can fit all cases, not even the profile method.\nThe first consideration is mistaking a repeatedly called method for a slow method. Thread dumps are periodic. If there is a loop of calling one method, the profile analysis result would say the target method is slow because it is captured every time in the dump process. There could be another reason. A method called many times can also end up captured in each thread dump. Even so, the profile did what it is designed for. It still helps the OPS/SRE team to locate the code having the issue.\nThe second consideration is overhead, the impact of repeated thread dumps is real and can’t be ignored. In SkyWalking, we set the profile dump period to at least 10ms. This means we can’t locate method performance issues if they complete in less than 10ms. SkyWalking has a threshold to control the maximum parallel degree as well.\nThe third consideration is profiling wouldn\u0026rsquo;t work for a low latency trace. Because the trace could be completed before profiling starts. But in reality, this is not an issue, profiling targets slow requests.\nUnderstanding the above keeps distributed tracing and APM systems useful for your OPS/SRE team.\nSupported Agents This feature was first implemented in Java agent since 7.0. The Python agent supported this since 0.7.0. Read this for more details\n","title":"Use Profiling to Fix the Blind Spot of Distributed Tracing","url":"/docs/main/v9.7.0/en/concepts-and-designs/sdk-profiling/"},{"content":"V6 upgrade SkyWalking v6 is widely used in many production environments. Follow the steps in the guide below to learn how to upgrade to a new release.\nNOTE: The ways to upgrade are not limited to the steps below.\nUse Canary Release Like all applications, you may upgrade SkyWalking using the canary release method through the following steps.\n Deploy a new cluster by using the latest version of SkyWalking OAP cluster with the new database cluster. Once the target service (i.e. the service being monitored) has upgraded the agent.jar (or simply by rebooting), have collector.backend_service pointing to the new OAP backend, and use/add a new namespace(agent.namespace in Table of Agent Configuration Properties). The namespace will prevent conflicts from arising between different versions. When all target services have been rebooted, the old OAP clusters could be discarded.  The Canary Release method works for any version upgrades.\nOnline Hot Reboot Upgrade The reason we require Canary Release is that the SkyWalking agent has cache mechanisms, and switching to a new cluster causes the cache to become unavailable for new OAP clusters. In version 6.5.0+ (especially for agent versions), we have Agent hot reboot trigger mechanism. This streamlines the upgrade process as we deploy a new cluster by using the latest version of SkyWalking OAP cluster with the new database cluster, and shift the traffic to the new cluster once and for all. Based on the mechanism, all agents will enter the cool_down mode, and come back online. For more details, see the backend setup documentation.\nNOTE: A known bug in 6.4.0 is that its agent may have re-connection issues; therefore, even though this bot reboot mechanism has been included in 6.4.0, it may not work under some network scenarios, especially in Kubernetes.\nAgent Compatibility All versions of SkyWalking 6.x (and even 7.x) are compatible with each other, so users could simply upgrade the OAP servers. As the agent has also been enhanced in the latest versions, according to the SkyWalking team\u0026rsquo;s recommendation, upgrade the agent as soon as practicable.\n","title":"V6 upgrade","url":"/docs/main/latest/en/faq/v6-version-upgrade/"},{"content":"V6 upgrade SkyWalking v6 is widely used in many production environments. Follow the steps in the guide below to learn how to upgrade to a new release.\nNOTE: The ways to upgrade are not limited to the steps below.\nUse Canary Release Like all applications, you may upgrade SkyWalking using the canary release method through the following steps.\n Deploy a new cluster by using the latest version of SkyWalking OAP cluster with the new database cluster. Once the target service (i.e. the service being monitored) has upgraded the agent.jar (or simply by rebooting), have collector.backend_service pointing to the new OAP backend, and use/add a new namespace(agent.namespace in Table of Agent Configuration Properties). The namespace will prevent conflicts from arising between different versions. When all target services have been rebooted, the old OAP clusters could be discarded.  The Canary Release method works for any version upgrades.\nOnline Hot Reboot Upgrade The reason we require Canary Release is that the SkyWalking agent has cache mechanisms, and switching to a new cluster causes the cache to become unavailable for new OAP clusters. In version 6.5.0+ (especially for agent versions), we have Agent hot reboot trigger mechanism. This streamlines the upgrade process as we deploy a new cluster by using the latest version of SkyWalking OAP cluster with the new database cluster, and shift the traffic to the new cluster once and for all. Based on the mechanism, all agents will enter the cool_down mode, and come back online. For more details, see the backend setup documentation.\nNOTE: A known bug in 6.4.0 is that its agent may have re-connection issues; therefore, even though this bot reboot mechanism has been included in 6.4.0, it may not work under some network scenarios, especially in Kubernetes.\nAgent Compatibility All versions of SkyWalking 6.x (and even 7.x) are compatible with each other, so users could simply upgrade the OAP servers. As the agent has also been enhanced in the latest versions, according to the SkyWalking team\u0026rsquo;s recommendation, upgrade the agent as soon as practicable.\n","title":"V6 upgrade","url":"/docs/main/next/en/faq/v6-version-upgrade/"},{"content":"V6 upgrade SkyWalking v6 is widely used in many production environments. Follow the steps in the guide below to learn how to upgrade to a new release.\nNOTE: The ways to upgrade are not limited to the steps below.\nUse Canary Release Like all applications, you may upgrade SkyWalking using the canary release method through the following steps.\n Deploy a new cluster by using the latest version of SkyWalking OAP cluster with the new database cluster. Once the target service (i.e. the service being monitored) has upgraded the agent.jar (or simply by rebooting), have collector.backend_service pointing to the new OAP backend, and use/add a new namespace(agent.namespace in Table of Agent Configuration Properties). The namespace will prevent conflicts from arising between different versions. When all target services have been rebooted, the old OAP clusters could be discarded.  The Canary Release method works for any version upgrades.\nOnline Hot Reboot Upgrade The reason we require Canary Release is that the SkyWalking agent has cache mechanisms, and switching to a new cluster causes the cache to become unavailable for new OAP clusters. In version 6.5.0+ (especially for agent versions), we have Agent hot reboot trigger mechanism. This streamlines the upgrade process as we deploy a new cluster by using the latest version of SkyWalking OAP cluster with the new database cluster, and shift the traffic to the new cluster once and for all. Based on the mechanism, all agents will enter the cool_down mode, and come back online. For more details, see the backend setup documentation.\nNOTE: A known bug in 6.4.0 is that its agent may have re-connection issues; therefore, even though this bot reboot mechanism has been included in 6.4.0, it may not work under some network scenarios, especially in Kubernetes.\nAgent Compatibility All versions of SkyWalking 6.x (and even 7.x) are compatible with each other, so users could simply upgrade the OAP servers. As the agent has also been enhanced in the latest versions, according to the SkyWalking team\u0026rsquo;s recommendation, upgrade the agent as soon as practicable.\n","title":"V6 upgrade","url":"/docs/main/v9.0.0/en/faq/v6-version-upgrade/"},{"content":"V6 upgrade SkyWalking v6 is widely used in many production environments. Follow the steps in the guide below to learn how to upgrade to a new release.\nNOTE: The ways to upgrade are not limited to the steps below.\nUse Canary Release Like all applications, you may upgrade SkyWalking using the canary release method through the following steps.\n Deploy a new cluster by using the latest version of SkyWalking OAP cluster with the new database cluster. Once the target service (i.e. the service being monitored) has upgraded the agent.jar (or simply by rebooting), have collector.backend_service pointing to the new OAP backend, and use/add a new namespace(agent.namespace in Table of Agent Configuration Properties). The namespace will prevent conflicts from arising between different versions. When all target services have been rebooted, the old OAP clusters could be discarded.  The Canary Release method works for any version upgrades.\nOnline Hot Reboot Upgrade The reason we require Canary Release is that the SkyWalking agent has cache mechanisms, and switching to a new cluster causes the cache to become unavailable for new OAP clusters. In version 6.5.0+ (especially for agent versions), we have Agent hot reboot trigger mechanism. This streamlines the upgrade process as we deploy a new cluster by using the latest version of SkyWalking OAP cluster with the new database cluster, and shift the traffic to the new cluster once and for all. Based on the mechanism, all agents will enter the cool_down mode, and come back online. For more details, see the backend setup documentation.\nNOTE: A known bug in 6.4.0 is that its agent may have re-connection issues; therefore, even though this bot reboot mechanism has been included in 6.4.0, it may not work under some network scenarios, especially in Kubernetes.\nAgent Compatibility All versions of SkyWalking 6.x (and even 7.x) are compatible with each other, so users could simply upgrade the OAP servers. As the agent has also been enhanced in the latest versions, according to the SkyWalking team\u0026rsquo;s recommendation, upgrade the agent as soon as practicable.\n","title":"V6 upgrade","url":"/docs/main/v9.1.0/en/faq/v6-version-upgrade/"},{"content":"V6 upgrade SkyWalking v6 is widely used in many production environments. Follow the steps in the guide below to learn how to upgrade to a new release.\nNOTE: The ways to upgrade are not limited to the steps below.\nUse Canary Release Like all applications, you may upgrade SkyWalking using the canary release method through the following steps.\n Deploy a new cluster by using the latest version of SkyWalking OAP cluster with the new database cluster. Once the target service (i.e. the service being monitored) has upgraded the agent.jar (or simply by rebooting), have collector.backend_service pointing to the new OAP backend, and use/add a new namespace(agent.namespace in Table of Agent Configuration Properties). The namespace will prevent conflicts from arising between different versions. When all target services have been rebooted, the old OAP clusters could be discarded.  The Canary Release method works for any version upgrades.\nOnline Hot Reboot Upgrade The reason we require Canary Release is that the SkyWalking agent has cache mechanisms, and switching to a new cluster causes the cache to become unavailable for new OAP clusters. In version 6.5.0+ (especially for agent versions), we have Agent hot reboot trigger mechanism. This streamlines the upgrade process as we deploy a new cluster by using the latest version of SkyWalking OAP cluster with the new database cluster, and shift the traffic to the new cluster once and for all. Based on the mechanism, all agents will enter the cool_down mode, and come back online. For more details, see the backend setup documentation.\nNOTE: A known bug in 6.4.0 is that its agent may have re-connection issues; therefore, even though this bot reboot mechanism has been included in 6.4.0, it may not work under some network scenarios, especially in Kubernetes.\nAgent Compatibility All versions of SkyWalking 6.x (and even 7.x) are compatible with each other, so users could simply upgrade the OAP servers. As the agent has also been enhanced in the latest versions, according to the SkyWalking team\u0026rsquo;s recommendation, upgrade the agent as soon as practicable.\n","title":"V6 upgrade","url":"/docs/main/v9.2.0/en/faq/v6-version-upgrade/"},{"content":"V6 upgrade SkyWalking v6 is widely used in many production environments. Follow the steps in the guide below to learn how to upgrade to a new release.\nNOTE: The ways to upgrade are not limited to the steps below.\nUse Canary Release Like all applications, you may upgrade SkyWalking using the canary release method through the following steps.\n Deploy a new cluster by using the latest version of SkyWalking OAP cluster with the new database cluster. Once the target service (i.e. the service being monitored) has upgraded the agent.jar (or simply by rebooting), have collector.backend_service pointing to the new OAP backend, and use/add a new namespace(agent.namespace in Table of Agent Configuration Properties). The namespace will prevent conflicts from arising between different versions. When all target services have been rebooted, the old OAP clusters could be discarded.  The Canary Release method works for any version upgrades.\nOnline Hot Reboot Upgrade The reason we require Canary Release is that the SkyWalking agent has cache mechanisms, and switching to a new cluster causes the cache to become unavailable for new OAP clusters. In version 6.5.0+ (especially for agent versions), we have Agent hot reboot trigger mechanism. This streamlines the upgrade process as we deploy a new cluster by using the latest version of SkyWalking OAP cluster with the new database cluster, and shift the traffic to the new cluster once and for all. Based on the mechanism, all agents will enter the cool_down mode, and come back online. For more details, see the backend setup documentation.\nNOTE: A known bug in 6.4.0 is that its agent may have re-connection issues; therefore, even though this bot reboot mechanism has been included in 6.4.0, it may not work under some network scenarios, especially in Kubernetes.\nAgent Compatibility All versions of SkyWalking 6.x (and even 7.x) are compatible with each other, so users could simply upgrade the OAP servers. As the agent has also been enhanced in the latest versions, according to the SkyWalking team\u0026rsquo;s recommendation, upgrade the agent as soon as practicable.\n","title":"V6 upgrade","url":"/docs/main/v9.3.0/en/faq/v6-version-upgrade/"},{"content":"V6 upgrade SkyWalking v6 is widely used in many production environments. Follow the steps in the guide below to learn how to upgrade to a new release.\nNOTE: The ways to upgrade are not limited to the steps below.\nUse Canary Release Like all applications, you may upgrade SkyWalking using the canary release method through the following steps.\n Deploy a new cluster by using the latest version of SkyWalking OAP cluster with the new database cluster. Once the target service (i.e. the service being monitored) has upgraded the agent.jar (or simply by rebooting), have collector.backend_service pointing to the new OAP backend, and use/add a new namespace(agent.namespace in Table of Agent Configuration Properties). The namespace will prevent conflicts from arising between different versions. When all target services have been rebooted, the old OAP clusters could be discarded.  The Canary Release method works for any version upgrades.\nOnline Hot Reboot Upgrade The reason we require Canary Release is that the SkyWalking agent has cache mechanisms, and switching to a new cluster causes the cache to become unavailable for new OAP clusters. In version 6.5.0+ (especially for agent versions), we have Agent hot reboot trigger mechanism. This streamlines the upgrade process as we deploy a new cluster by using the latest version of SkyWalking OAP cluster with the new database cluster, and shift the traffic to the new cluster once and for all. Based on the mechanism, all agents will enter the cool_down mode, and come back online. For more details, see the backend setup documentation.\nNOTE: A known bug in 6.4.0 is that its agent may have re-connection issues; therefore, even though this bot reboot mechanism has been included in 6.4.0, it may not work under some network scenarios, especially in Kubernetes.\nAgent Compatibility All versions of SkyWalking 6.x (and even 7.x) are compatible with each other, so users could simply upgrade the OAP servers. As the agent has also been enhanced in the latest versions, according to the SkyWalking team\u0026rsquo;s recommendation, upgrade the agent as soon as practicable.\n","title":"V6 upgrade","url":"/docs/main/v9.4.0/en/faq/v6-version-upgrade/"},{"content":"V6 upgrade SkyWalking v6 is widely used in many production environments. Follow the steps in the guide below to learn how to upgrade to a new release.\nNOTE: The ways to upgrade are not limited to the steps below.\nUse Canary Release Like all applications, you may upgrade SkyWalking using the canary release method through the following steps.\n Deploy a new cluster by using the latest version of SkyWalking OAP cluster with the new database cluster. Once the target service (i.e. the service being monitored) has upgraded the agent.jar (or simply by rebooting), have collector.backend_service pointing to the new OAP backend, and use/add a new namespace(agent.namespace in Table of Agent Configuration Properties). The namespace will prevent conflicts from arising between different versions. When all target services have been rebooted, the old OAP clusters could be discarded.  The Canary Release method works for any version upgrades.\nOnline Hot Reboot Upgrade The reason we require Canary Release is that the SkyWalking agent has cache mechanisms, and switching to a new cluster causes the cache to become unavailable for new OAP clusters. In version 6.5.0+ (especially for agent versions), we have Agent hot reboot trigger mechanism. This streamlines the upgrade process as we deploy a new cluster by using the latest version of SkyWalking OAP cluster with the new database cluster, and shift the traffic to the new cluster once and for all. Based on the mechanism, all agents will enter the cool_down mode, and come back online. For more details, see the backend setup documentation.\nNOTE: A known bug in 6.4.0 is that its agent may have re-connection issues; therefore, even though this bot reboot mechanism has been included in 6.4.0, it may not work under some network scenarios, especially in Kubernetes.\nAgent Compatibility All versions of SkyWalking 6.x (and even 7.x) are compatible with each other, so users could simply upgrade the OAP servers. As the agent has also been enhanced in the latest versions, according to the SkyWalking team\u0026rsquo;s recommendation, upgrade the agent as soon as practicable.\n","title":"V6 upgrade","url":"/docs/main/v9.5.0/en/faq/v6-version-upgrade/"},{"content":"V6 upgrade SkyWalking v6 is widely used in many production environments. Follow the steps in the guide below to learn how to upgrade to a new release.\nNOTE: The ways to upgrade are not limited to the steps below.\nUse Canary Release Like all applications, you may upgrade SkyWalking using the canary release method through the following steps.\n Deploy a new cluster by using the latest version of SkyWalking OAP cluster with the new database cluster. Once the target service (i.e. the service being monitored) has upgraded the agent.jar (or simply by rebooting), have collector.backend_service pointing to the new OAP backend, and use/add a new namespace(agent.namespace in Table of Agent Configuration Properties). The namespace will prevent conflicts from arising between different versions. When all target services have been rebooted, the old OAP clusters could be discarded.  The Canary Release method works for any version upgrades.\nOnline Hot Reboot Upgrade The reason we require Canary Release is that the SkyWalking agent has cache mechanisms, and switching to a new cluster causes the cache to become unavailable for new OAP clusters. In version 6.5.0+ (especially for agent versions), we have Agent hot reboot trigger mechanism. This streamlines the upgrade process as we deploy a new cluster by using the latest version of SkyWalking OAP cluster with the new database cluster, and shift the traffic to the new cluster once and for all. Based on the mechanism, all agents will enter the cool_down mode, and come back online. For more details, see the backend setup documentation.\nNOTE: A known bug in 6.4.0 is that its agent may have re-connection issues; therefore, even though this bot reboot mechanism has been included in 6.4.0, it may not work under some network scenarios, especially in Kubernetes.\nAgent Compatibility All versions of SkyWalking 6.x (and even 7.x) are compatible with each other, so users could simply upgrade the OAP servers. As the agent has also been enhanced in the latest versions, according to the SkyWalking team\u0026rsquo;s recommendation, upgrade the agent as soon as practicable.\n","title":"V6 upgrade","url":"/docs/main/v9.6.0/en/faq/v6-version-upgrade/"},{"content":"V6 upgrade SkyWalking v6 is widely used in many production environments. Follow the steps in the guide below to learn how to upgrade to a new release.\nNOTE: The ways to upgrade are not limited to the steps below.\nUse Canary Release Like all applications, you may upgrade SkyWalking using the canary release method through the following steps.\n Deploy a new cluster by using the latest version of SkyWalking OAP cluster with the new database cluster. Once the target service (i.e. the service being monitored) has upgraded the agent.jar (or simply by rebooting), have collector.backend_service pointing to the new OAP backend, and use/add a new namespace(agent.namespace in Table of Agent Configuration Properties). The namespace will prevent conflicts from arising between different versions. When all target services have been rebooted, the old OAP clusters could be discarded.  The Canary Release method works for any version upgrades.\nOnline Hot Reboot Upgrade The reason we require Canary Release is that the SkyWalking agent has cache mechanisms, and switching to a new cluster causes the cache to become unavailable for new OAP clusters. In version 6.5.0+ (especially for agent versions), we have Agent hot reboot trigger mechanism. This streamlines the upgrade process as we deploy a new cluster by using the latest version of SkyWalking OAP cluster with the new database cluster, and shift the traffic to the new cluster once and for all. Based on the mechanism, all agents will enter the cool_down mode, and come back online. For more details, see the backend setup documentation.\nNOTE: A known bug in 6.4.0 is that its agent may have re-connection issues; therefore, even though this bot reboot mechanism has been included in 6.4.0, it may not work under some network scenarios, especially in Kubernetes.\nAgent Compatibility All versions of SkyWalking 6.x (and even 7.x) are compatible with each other, so users could simply upgrade the OAP servers. As the agent has also been enhanced in the latest versions, according to the SkyWalking team\u0026rsquo;s recommendation, upgrade the agent as soon as practicable.\n","title":"V6 upgrade","url":"/docs/main/v9.7.0/en/faq/v6-version-upgrade/"},{"content":"V8 upgrade Starting from SkyWalking v8, the v3 protocol has been used. This makes it incompatible with previous releases. Users who intend to upgrade in v8 series releases could follow the steps below.\nRegisters in v6 and v7 have been removed in v8 for better scaling out performance. Please upgrade following the instructions below.\n Use a different storage or a new namespace. You may also consider erasing the whole storage indexes or tables related to SkyWalking. Deploy the whole SkyWalking cluster, and expose it in a new network address. If you are using language agents, upgrade the new agents too; meanwhile, make sure the agents are supported in a different language. Then, set up the backend address to the new SkyWalking OAP cluster.  ","title":"V8 upgrade","url":"/docs/main/latest/en/faq/v8-version-upgrade/"},{"content":"V8 upgrade Starting from SkyWalking v8, the v3 protocol has been used. This makes it incompatible with previous releases. Users who intend to upgrade in v8 series releases could follow the steps below.\nRegisters in v6 and v7 have been removed in v8 for better scaling out performance. Please upgrade following the instructions below.\n Use a different storage or a new namespace. You may also consider erasing the whole storage indexes or tables related to SkyWalking. Deploy the whole SkyWalking cluster, and expose it in a new network address. If you are using language agents, upgrade the new agents too; meanwhile, make sure the agents are supported in a different language. Then, set up the backend address to the new SkyWalking OAP cluster.  ","title":"V8 upgrade","url":"/docs/main/next/en/faq/v8-version-upgrade/"},{"content":"V8 upgrade Starting from SkyWalking v8, the v3 protocol has been used. This makes it incompatible with previous releases. Users who intend to upgrade in v8 series releases could follow the steps below.\nRegisters in v6 and v7 have been removed in v8 for better scaling out performance. Please upgrade following the instructions below.\n Use a different storage or a new namespace. You may also consider erasing the whole storage indexes or tables related to SkyWalking. Deploy the whole SkyWalking cluster, and expose it in a new network address. If you are using language agents, upgrade the new agents too; meanwhile, make sure the agents are supported in a different language. Then, set up the backend address to the new SkyWalking OAP cluster.  ","title":"V8 upgrade","url":"/docs/main/v9.0.0/en/faq/v8-version-upgrade/"},{"content":"V8 upgrade Starting from SkyWalking v8, the v3 protocol has been used. This makes it incompatible with previous releases. Users who intend to upgrade in v8 series releases could follow the steps below.\nRegisters in v6 and v7 have been removed in v8 for better scaling out performance. Please upgrade following the instructions below.\n Use a different storage or a new namespace. You may also consider erasing the whole storage indexes or tables related to SkyWalking. Deploy the whole SkyWalking cluster, and expose it in a new network address. If you are using language agents, upgrade the new agents too; meanwhile, make sure the agents are supported in a different language. Then, set up the backend address to the new SkyWalking OAP cluster.  ","title":"V8 upgrade","url":"/docs/main/v9.1.0/en/faq/v8-version-upgrade/"},{"content":"V8 upgrade Starting from SkyWalking v8, the v3 protocol has been used. This makes it incompatible with previous releases. Users who intend to upgrade in v8 series releases could follow the steps below.\nRegisters in v6 and v7 have been removed in v8 for better scaling out performance. Please upgrade following the instructions below.\n Use a different storage or a new namespace. You may also consider erasing the whole storage indexes or tables related to SkyWalking. Deploy the whole SkyWalking cluster, and expose it in a new network address. If you are using language agents, upgrade the new agents too; meanwhile, make sure the agents are supported in a different language. Then, set up the backend address to the new SkyWalking OAP cluster.  ","title":"V8 upgrade","url":"/docs/main/v9.2.0/en/faq/v8-version-upgrade/"},{"content":"V8 upgrade Starting from SkyWalking v8, the v3 protocol has been used. This makes it incompatible with previous releases. Users who intend to upgrade in v8 series releases could follow the steps below.\nRegisters in v6 and v7 have been removed in v8 for better scaling out performance. Please upgrade following the instructions below.\n Use a different storage or a new namespace. You may also consider erasing the whole storage indexes or tables related to SkyWalking. Deploy the whole SkyWalking cluster, and expose it in a new network address. If you are using language agents, upgrade the new agents too; meanwhile, make sure the agents are supported in a different language. Then, set up the backend address to the new SkyWalking OAP cluster.  ","title":"V8 upgrade","url":"/docs/main/v9.3.0/en/faq/v8-version-upgrade/"},{"content":"V8 upgrade Starting from SkyWalking v8, the v3 protocol has been used. This makes it incompatible with previous releases. Users who intend to upgrade in v8 series releases could follow the steps below.\nRegisters in v6 and v7 have been removed in v8 for better scaling out performance. Please upgrade following the instructions below.\n Use a different storage or a new namespace. You may also consider erasing the whole storage indexes or tables related to SkyWalking. Deploy the whole SkyWalking cluster, and expose it in a new network address. If you are using language agents, upgrade the new agents too; meanwhile, make sure the agents are supported in a different language. Then, set up the backend address to the new SkyWalking OAP cluster.  ","title":"V8 upgrade","url":"/docs/main/v9.4.0/en/faq/v8-version-upgrade/"},{"content":"V8 upgrade Starting from SkyWalking v8, the v3 protocol has been used. This makes it incompatible with previous releases. Users who intend to upgrade in v8 series releases could follow the steps below.\nRegisters in v6 and v7 have been removed in v8 for better scaling out performance. Please upgrade following the instructions below.\n Use a different storage or a new namespace. You may also consider erasing the whole storage indexes or tables related to SkyWalking. Deploy the whole SkyWalking cluster, and expose it in a new network address. If you are using language agents, upgrade the new agents too; meanwhile, make sure the agents are supported in a different language. Then, set up the backend address to the new SkyWalking OAP cluster.  ","title":"V8 upgrade","url":"/docs/main/v9.5.0/en/faq/v8-version-upgrade/"},{"content":"V8 upgrade Starting from SkyWalking v8, the v3 protocol has been used. This makes it incompatible with previous releases. Users who intend to upgrade in v8 series releases could follow the steps below.\nRegisters in v6 and v7 have been removed in v8 for better scaling out performance. Please upgrade following the instructions below.\n Use a different storage or a new namespace. You may also consider erasing the whole storage indexes or tables related to SkyWalking. Deploy the whole SkyWalking cluster, and expose it in a new network address. If you are using language agents, upgrade the new agents too; meanwhile, make sure the agents are supported in a different language. Then, set up the backend address to the new SkyWalking OAP cluster.  ","title":"V8 upgrade","url":"/docs/main/v9.6.0/en/faq/v8-version-upgrade/"},{"content":"V8 upgrade Starting from SkyWalking v8, the v3 protocol has been used. This makes it incompatible with previous releases. Users who intend to upgrade in v8 series releases could follow the steps below.\nRegisters in v6 and v7 have been removed in v8 for better scaling out performance. Please upgrade following the instructions below.\n Use a different storage or a new namespace. You may also consider erasing the whole storage indexes or tables related to SkyWalking. Deploy the whole SkyWalking cluster, and expose it in a new network address. If you are using language agents, upgrade the new agents too; meanwhile, make sure the agents are supported in a different language. Then, set up the backend address to the new SkyWalking OAP cluster.  ","title":"V8 upgrade","url":"/docs/main/v9.7.0/en/faq/v8-version-upgrade/"},{"content":"V9 upgrade Starting from v9, SkyWalking introduces the new core concept Layer. A layer represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), Kubernetes(k8s layer). This kind of layer would be catalogs on the new booster UI of various services/instances detected by different technologies. The query-protocol metadata-v2 has been used. The compatibility with previous releases is as below.\nQuery compatibility from previous version  The query-protocol metadata-v1 is provided on the top of the v2 implementation. All metrics are compatible with the previous data format, so you wouldn\u0026rsquo;t lose metrics.  Notice Incompatibility (1), the UI template configuration protocol is incompatible.\nIncompatibility  The UI configuration protocol has been changed by following the design of new booster UI. So, the RocketBot UI can\u0026rsquo;t work with the v9 backend. You need to remove ui_template index/template/table in your chosen storage, and reboot OAP in default or init mode. MAL: metric level function add an required argument Layer. Previous MAL expressions should add this argument. LAL: Extractor add function layer. If don\u0026rsquo;t set it manual, the default layer is GENERAL and the logs from ALS the default layer is mesh. Storage:Add service_id, short_name and layer columns to table ServiceTraffic. These data would be incompatible with previous versions. Make sure to remove the older ServiceTraffic table before OAP(v9) starts. OAP would generate the new table in the start procedure, and recreate all existing services when traffic comes. Since V9.1, SQL Database: move Tags list from Segment, Logs, Alarms to their additional tables, remove them before OAP starts. UI-template: Re-design for V9. Make sure to remove the older ui_template table before OAP(v9) starts.  ","title":"V9 upgrade","url":"/docs/main/latest/en/faq/v9-version-upgrade/"},{"content":"V9 upgrade Starting from v9, SkyWalking introduces the new core concept Layer. A layer represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), Kubernetes(k8s layer). This kind of layer would be catalogs on the new booster UI of various services/instances detected by different technologies. The query-protocol metadata-v2 has been used. The compatibility with previous releases is as below.\nQuery compatibility from previous version  The query-protocol metadata-v1 is provided on the top of the v2 implementation. All metrics are compatible with the previous data format, so you wouldn\u0026rsquo;t lose metrics.  Notice Incompatibility (1), the UI template configuration protocol is incompatible.\nIncompatibility  The UI configuration protocol has been changed by following the design of new booster UI. So, the RocketBot UI can\u0026rsquo;t work with the v9 backend. You need to remove ui_template index/template/table in your chosen storage, and reboot OAP in default or init mode. MAL: metric level function add an required argument Layer. Previous MAL expressions should add this argument. LAL: Extractor add function layer. If don\u0026rsquo;t set it manual, the default layer is GENERAL and the logs from ALS the default layer is mesh. Storage:Add service_id, short_name and layer columns to table ServiceTraffic. These data would be incompatible with previous versions. Make sure to remove the older ServiceTraffic table before OAP(v9) starts. OAP would generate the new table in the start procedure, and recreate all existing services when traffic comes. Since V9.1, SQL Database: move Tags list from Segment, Logs, Alarms to their additional tables, remove them before OAP starts. UI-template: Re-design for V9. Make sure to remove the older ui_template table before OAP(v9) starts.  ","title":"V9 upgrade","url":"/docs/main/next/en/faq/v9-version-upgrade/"},{"content":"V9 upgrade Starting from v9, SkyWalking introduces the new core concept Layer. A layer represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), Kubernetes(k8s layer). This kind of layer would be catalogs on the new booster UI of various services/instances detected by different technologies. The query-protocol metadata-v2 has been used. The compatibility with previous releases is as below.\nQuery compatibility from previous version  The query-protocol metadata-v1 is provided on the top of the v2 implementation. All metrics are compatible with the previous data format, so you wouldn\u0026rsquo;t lose metrics.  Notice Incompatibility (1), the UI template configuration protocol is incompatible.\nIncompatibility  The UI configuration protocol has been changed by following the design of new booster UI. So, the RocketBot UI can\u0026rsquo;t work with the v9 backend. You need to remove ui_template index/template/table in your chosen storage, and reboot OAP in default or init mode. MAL: metric level function add an required argument Layer. Previous MAL expressions should add this argument. LAL: Extractor add function layer. If don\u0026rsquo;t set it manual, the default layer is GENERAL and the logs from ALS the default layer is mesh. Storage:Add service_id, short_name and layer columns to table ServiceTraffic, add layer column to table InstanceTraffic. These data would be incompatible with previous versions. Make sure to remove the older ServiceTraffic and InstanceTraffic tables before OAP(v9) starts. OAP would generate the new table in the start procedure, and recreate all existing services and instances when traffic comes. UI-template: Re-design for V9. Make sure to remove the older ui_template table before OAP(v9) starts.  ","title":"V9 upgrade","url":"/docs/main/v9.0.0/en/faq/v9-version-upgrade/"},{"content":"V9 upgrade Starting from v9, SkyWalking introduces the new core concept Layer. A layer represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), Kubernetes(k8s layer). This kind of layer would be catalogs on the new booster UI of various services/instances detected by different technologies. The query-protocol metadata-v2 has been used. The compatibility with previous releases is as below.\nQuery compatibility from previous version  The query-protocol metadata-v1 is provided on the top of the v2 implementation. All metrics are compatible with the previous data format, so you wouldn\u0026rsquo;t lose metrics.  Notice Incompatibility (1), the UI template configuration protocol is incompatible.\nIncompatibility  The UI configuration protocol has been changed by following the design of new booster UI. So, the RocketBot UI can\u0026rsquo;t work with the v9 backend. You need to remove ui_template index/template/table in your chosen storage, and reboot OAP in default or init mode. MAL: metric level function add an required argument Layer. Previous MAL expressions should add this argument. LAL: Extractor add function layer. If don\u0026rsquo;t set it manual, the default layer is GENERAL and the logs from ALS the default layer is mesh. Storage:Add service_id, short_name and layer columns to table ServiceTraffic. These data would be incompatible with previous versions. Make sure to remove the older ServiceTraffic table before OAP(v9) starts. OAP would generate the new table in the start procedure, and recreate all existing services when traffic comes. Since V9.1, SQL Database: move Tags list from Segment, Logs, Alarms to their additional tables, remove them before OAP starts. UI-template: Re-design for V9. Make sure to remove the older ui_template table before OAP(v9) starts.  ","title":"V9 upgrade","url":"/docs/main/v9.1.0/en/faq/v9-version-upgrade/"},{"content":"V9 upgrade Starting from v9, SkyWalking introduces the new core concept Layer. A layer represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), Kubernetes(k8s layer). This kind of layer would be catalogs on the new booster UI of various services/instances detected by different technologies. The query-protocol metadata-v2 has been used. The compatibility with previous releases is as below.\nQuery compatibility from previous version  The query-protocol metadata-v1 is provided on the top of the v2 implementation. All metrics are compatible with the previous data format, so you wouldn\u0026rsquo;t lose metrics.  Notice Incompatibility (1), the UI template configuration protocol is incompatible.\nIncompatibility  The UI configuration protocol has been changed by following the design of new booster UI. So, the RocketBot UI can\u0026rsquo;t work with the v9 backend. You need to remove ui_template index/template/table in your chosen storage, and reboot OAP in default or init mode. MAL: metric level function add an required argument Layer. Previous MAL expressions should add this argument. LAL: Extractor add function layer. If don\u0026rsquo;t set it manual, the default layer is GENERAL and the logs from ALS the default layer is mesh. Storage:Add service_id, short_name and layer columns to table ServiceTraffic. These data would be incompatible with previous versions. Make sure to remove the older ServiceTraffic table before OAP(v9) starts. OAP would generate the new table in the start procedure, and recreate all existing services when traffic comes. Since V9.1, SQL Database: move Tags list from Segment, Logs, Alarms to their additional tables, remove them before OAP starts. UI-template: Re-design for V9. Make sure to remove the older ui_template table before OAP(v9) starts.  ","title":"V9 upgrade","url":"/docs/main/v9.2.0/en/faq/v9-version-upgrade/"},{"content":"V9 upgrade Starting from v9, SkyWalking introduces the new core concept Layer. A layer represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), Kubernetes(k8s layer). This kind of layer would be catalogs on the new booster UI of various services/instances detected by different technologies. The query-protocol metadata-v2 has been used. The compatibility with previous releases is as below.\nQuery compatibility from previous version  The query-protocol metadata-v1 is provided on the top of the v2 implementation. All metrics are compatible with the previous data format, so you wouldn\u0026rsquo;t lose metrics.  Notice Incompatibility (1), the UI template configuration protocol is incompatible.\nIncompatibility  The UI configuration protocol has been changed by following the design of new booster UI. So, the RocketBot UI can\u0026rsquo;t work with the v9 backend. You need to remove ui_template index/template/table in your chosen storage, and reboot OAP in default or init mode. MAL: metric level function add an required argument Layer. Previous MAL expressions should add this argument. LAL: Extractor add function layer. If don\u0026rsquo;t set it manual, the default layer is GENERAL and the logs from ALS the default layer is mesh. Storage:Add service_id, short_name and layer columns to table ServiceTraffic. These data would be incompatible with previous versions. Make sure to remove the older ServiceTraffic table before OAP(v9) starts. OAP would generate the new table in the start procedure, and recreate all existing services when traffic comes. Since V9.1, SQL Database: move Tags list from Segment, Logs, Alarms to their additional tables, remove them before OAP starts. UI-template: Re-design for V9. Make sure to remove the older ui_template table before OAP(v9) starts.  ","title":"V9 upgrade","url":"/docs/main/v9.3.0/en/faq/v9-version-upgrade/"},{"content":"V9 upgrade Starting from v9, SkyWalking introduces the new core concept Layer. A layer represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), Kubernetes(k8s layer). This kind of layer would be catalogs on the new booster UI of various services/instances detected by different technologies. The query-protocol metadata-v2 has been used. The compatibility with previous releases is as below.\nQuery compatibility from previous version  The query-protocol metadata-v1 is provided on the top of the v2 implementation. All metrics are compatible with the previous data format, so you wouldn\u0026rsquo;t lose metrics.  Notice Incompatibility (1), the UI template configuration protocol is incompatible.\nIncompatibility  The UI configuration protocol has been changed by following the design of new booster UI. So, the RocketBot UI can\u0026rsquo;t work with the v9 backend. You need to remove ui_template index/template/table in your chosen storage, and reboot OAP in default or init mode. MAL: metric level function add an required argument Layer. Previous MAL expressions should add this argument. LAL: Extractor add function layer. If don\u0026rsquo;t set it manual, the default layer is GENERAL and the logs from ALS the default layer is mesh. Storage:Add service_id, short_name and layer columns to table ServiceTraffic. These data would be incompatible with previous versions. Make sure to remove the older ServiceTraffic table before OAP(v9) starts. OAP would generate the new table in the start procedure, and recreate all existing services when traffic comes. Since V9.1, SQL Database: move Tags list from Segment, Logs, Alarms to their additional tables, remove them before OAP starts. UI-template: Re-design for V9. Make sure to remove the older ui_template table before OAP(v9) starts.  ","title":"V9 upgrade","url":"/docs/main/v9.4.0/en/faq/v9-version-upgrade/"},{"content":"V9 upgrade Starting from v9, SkyWalking introduces the new core concept Layer. A layer represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), Kubernetes(k8s layer). This kind of layer would be catalogs on the new booster UI of various services/instances detected by different technologies. The query-protocol metadata-v2 has been used. The compatibility with previous releases is as below.\nQuery compatibility from previous version  The query-protocol metadata-v1 is provided on the top of the v2 implementation. All metrics are compatible with the previous data format, so you wouldn\u0026rsquo;t lose metrics.  Notice Incompatibility (1), the UI template configuration protocol is incompatible.\nIncompatibility  The UI configuration protocol has been changed by following the design of new booster UI. So, the RocketBot UI can\u0026rsquo;t work with the v9 backend. You need to remove ui_template index/template/table in your chosen storage, and reboot OAP in default or init mode. MAL: metric level function add an required argument Layer. Previous MAL expressions should add this argument. LAL: Extractor add function layer. If don\u0026rsquo;t set it manual, the default layer is GENERAL and the logs from ALS the default layer is mesh. Storage:Add service_id, short_name and layer columns to table ServiceTraffic. These data would be incompatible with previous versions. Make sure to remove the older ServiceTraffic table before OAP(v9) starts. OAP would generate the new table in the start procedure, and recreate all existing services when traffic comes. Since V9.1, SQL Database: move Tags list from Segment, Logs, Alarms to their additional tables, remove them before OAP starts. UI-template: Re-design for V9. Make sure to remove the older ui_template table before OAP(v9) starts.  ","title":"V9 upgrade","url":"/docs/main/v9.5.0/en/faq/v9-version-upgrade/"},{"content":"V9 upgrade Starting from v9, SkyWalking introduces the new core concept Layer. A layer represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), Kubernetes(k8s layer). This kind of layer would be catalogs on the new booster UI of various services/instances detected by different technologies. The query-protocol metadata-v2 has been used. The compatibility with previous releases is as below.\nQuery compatibility from previous version  The query-protocol metadata-v1 is provided on the top of the v2 implementation. All metrics are compatible with the previous data format, so you wouldn\u0026rsquo;t lose metrics.  Notice Incompatibility (1), the UI template configuration protocol is incompatible.\nIncompatibility  The UI configuration protocol has been changed by following the design of new booster UI. So, the RocketBot UI can\u0026rsquo;t work with the v9 backend. You need to remove ui_template index/template/table in your chosen storage, and reboot OAP in default or init mode. MAL: metric level function add an required argument Layer. Previous MAL expressions should add this argument. LAL: Extractor add function layer. If don\u0026rsquo;t set it manual, the default layer is GENERAL and the logs from ALS the default layer is mesh. Storage:Add service_id, short_name and layer columns to table ServiceTraffic. These data would be incompatible with previous versions. Make sure to remove the older ServiceTraffic table before OAP(v9) starts. OAP would generate the new table in the start procedure, and recreate all existing services when traffic comes. Since V9.1, SQL Database: move Tags list from Segment, Logs, Alarms to their additional tables, remove them before OAP starts. UI-template: Re-design for V9. Make sure to remove the older ui_template table before OAP(v9) starts.  ","title":"V9 upgrade","url":"/docs/main/v9.6.0/en/faq/v9-version-upgrade/"},{"content":"V9 upgrade Starting from v9, SkyWalking introduces the new core concept Layer. A layer represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), Kubernetes(k8s layer). This kind of layer would be catalogs on the new booster UI of various services/instances detected by different technologies. The query-protocol metadata-v2 has been used. The compatibility with previous releases is as below.\nQuery compatibility from previous version  The query-protocol metadata-v1 is provided on the top of the v2 implementation. All metrics are compatible with the previous data format, so you wouldn\u0026rsquo;t lose metrics.  Notice Incompatibility (1), the UI template configuration protocol is incompatible.\nIncompatibility  The UI configuration protocol has been changed by following the design of new booster UI. So, the RocketBot UI can\u0026rsquo;t work with the v9 backend. You need to remove ui_template index/template/table in your chosen storage, and reboot OAP in default or init mode. MAL: metric level function add an required argument Layer. Previous MAL expressions should add this argument. LAL: Extractor add function layer. If don\u0026rsquo;t set it manual, the default layer is GENERAL and the logs from ALS the default layer is mesh. Storage:Add service_id, short_name and layer columns to table ServiceTraffic. These data would be incompatible with previous versions. Make sure to remove the older ServiceTraffic table before OAP(v9) starts. OAP would generate the new table in the start procedure, and recreate all existing services when traffic comes. Since V9.1, SQL Database: move Tags list from Segment, Logs, Alarms to their additional tables, remove them before OAP starts. UI-template: Re-design for V9. Make sure to remove the older ui_template table before OAP(v9) starts.  ","title":"V9 upgrade","url":"/docs/main/v9.7.0/en/faq/v9-version-upgrade/"},{"content":"Version 3.x -\u0026gt; 5.0.0-alpha Upgrade FAQs Collector Problem There is no information showing in the UI.\nCause In the upgrade from version 3.2.6 to 5.0.0, the existing Elasticsearch indexes are kept, but aren\u0026rsquo;t compatible with 5.0.0-alpha. When service name is registered, ElasticSearch will create this column by default type string, which will lead to an error.\nSolution Clean the data folder in ElasticSearch and restart ElasticSearch, collector and your application under monitoring.\n","title":"Version 3.x -\u003e 5.0.0-alpha Upgrade FAQs","url":"/docs/main/latest/en/faq/v3-version-upgrade/"},{"content":"Version 3.x -\u0026gt; 5.0.0-alpha Upgrade FAQs Collector Problem There is no information showing in the UI.\nCause In the upgrade from version 3.2.6 to 5.0.0, the existing Elasticsearch indexes are kept, but aren\u0026rsquo;t compatible with 5.0.0-alpha. When service name is registered, ElasticSearch will create this column by default type string, which will lead to an error.\nSolution Clean the data folder in ElasticSearch and restart ElasticSearch, collector and your application under monitoring.\n","title":"Version 3.x -\u003e 5.0.0-alpha Upgrade FAQs","url":"/docs/main/next/en/faq/v3-version-upgrade/"},{"content":"Version 3.x -\u0026gt; 5.0.0-alpha Upgrade FAQs Collector Problem There is no information showing in the UI.\nCause In the upgrade from version 3.2.6 to 5.0.0, the existing Elasticsearch indexes are kept, but aren\u0026rsquo;t compatible with 5.0.0-alpha. When service name is registered, ElasticSearch will create this column by default type string, which will lead to an error.\nSolution Clean the data folder in ElasticSearch and restart ElasticSearch, collector and your application under monitoring.\n","title":"Version 3.x -\u003e 5.0.0-alpha Upgrade FAQs","url":"/docs/main/v9.0.0/en/faq/v3-version-upgrade/"},{"content":"Version 3.x -\u0026gt; 5.0.0-alpha Upgrade FAQs Collector Problem There is no information showing in the UI.\nCause In the upgrade from version 3.2.6 to 5.0.0, the existing Elasticsearch indexes are kept, but aren\u0026rsquo;t compatible with 5.0.0-alpha. When service name is registered, ElasticSearch will create this column by default type string, which will lead to an error.\nSolution Clean the data folder in ElasticSearch and restart ElasticSearch, collector and your application under monitoring.\n","title":"Version 3.x -\u003e 5.0.0-alpha Upgrade FAQs","url":"/docs/main/v9.1.0/en/faq/v3-version-upgrade/"},{"content":"Version 3.x -\u0026gt; 5.0.0-alpha Upgrade FAQs Collector Problem There is no information showing in the UI.\nCause In the upgrade from version 3.2.6 to 5.0.0, the existing Elasticsearch indexes are kept, but aren\u0026rsquo;t compatible with 5.0.0-alpha. When service name is registered, ElasticSearch will create this column by default type string, which will lead to an error.\nSolution Clean the data folder in ElasticSearch and restart ElasticSearch, collector and your application under monitoring.\n","title":"Version 3.x -\u003e 5.0.0-alpha Upgrade FAQs","url":"/docs/main/v9.2.0/en/faq/v3-version-upgrade/"},{"content":"Version 3.x -\u0026gt; 5.0.0-alpha Upgrade FAQs Collector Problem There is no information showing in the UI.\nCause In the upgrade from version 3.2.6 to 5.0.0, the existing Elasticsearch indexes are kept, but aren\u0026rsquo;t compatible with 5.0.0-alpha. When service name is registered, ElasticSearch will create this column by default type string, which will lead to an error.\nSolution Clean the data folder in ElasticSearch and restart ElasticSearch, collector and your application under monitoring.\n","title":"Version 3.x -\u003e 5.0.0-alpha Upgrade FAQs","url":"/docs/main/v9.3.0/en/faq/v3-version-upgrade/"},{"content":"Version 3.x -\u0026gt; 5.0.0-alpha Upgrade FAQs Collector Problem There is no information showing in the UI.\nCause In the upgrade from version 3.2.6 to 5.0.0, the existing Elasticsearch indexes are kept, but aren\u0026rsquo;t compatible with 5.0.0-alpha. When service name is registered, ElasticSearch will create this column by default type string, which will lead to an error.\nSolution Clean the data folder in ElasticSearch and restart ElasticSearch, collector and your application under monitoring.\n","title":"Version 3.x -\u003e 5.0.0-alpha Upgrade FAQs","url":"/docs/main/v9.4.0/en/faq/v3-version-upgrade/"},{"content":"Version 3.x -\u0026gt; 5.0.0-alpha Upgrade FAQs Collector Problem There is no information showing in the UI.\nCause In the upgrade from version 3.2.6 to 5.0.0, the existing Elasticsearch indexes are kept, but aren\u0026rsquo;t compatible with 5.0.0-alpha. When service name is registered, ElasticSearch will create this column by default type string, which will lead to an error.\nSolution Clean the data folder in ElasticSearch and restart ElasticSearch, collector and your application under monitoring.\n","title":"Version 3.x -\u003e 5.0.0-alpha Upgrade FAQs","url":"/docs/main/v9.5.0/en/faq/v3-version-upgrade/"},{"content":"Version 3.x -\u0026gt; 5.0.0-alpha Upgrade FAQs Collector Problem There is no information showing in the UI.\nCause In the upgrade from version 3.2.6 to 5.0.0, the existing Elasticsearch indexes are kept, but aren\u0026rsquo;t compatible with 5.0.0-alpha. When service name is registered, ElasticSearch will create this column by default type string, which will lead to an error.\nSolution Clean the data folder in ElasticSearch and restart ElasticSearch, collector and your application under monitoring.\n","title":"Version 3.x -\u003e 5.0.0-alpha Upgrade FAQs","url":"/docs/main/v9.6.0/en/faq/v3-version-upgrade/"},{"content":"Version 3.x -\u0026gt; 5.0.0-alpha Upgrade FAQs Collector Problem There is no information showing in the UI.\nCause In the upgrade from version 3.2.6 to 5.0.0, the existing Elasticsearch indexes are kept, but aren\u0026rsquo;t compatible with 5.0.0-alpha. When service name is registered, ElasticSearch will create this column by default type string, which will lead to an error.\nSolution Clean the data folder in ElasticSearch and restart ElasticSearch, collector and your application under monitoring.\n","title":"Version 3.x -\u003e 5.0.0-alpha Upgrade FAQs","url":"/docs/main/v9.7.0/en/faq/v3-version-upgrade/"},{"content":"Virtual Cache Virtual cache represent the cache nodes detected by server agents' plugins. The performance metrics of the cache are also from the Cache client-side perspective.\nFor example, Redis plugins in the Java agent could detect the latency of command As a result, SkyWalking would show traffic, latency, success rate, and sampled slow operations(write/read) powered by backend analysis capabilities in this dashboard.\nThe cache operation span should have\n It is an Exit or Local span Span\u0026rsquo;s layer == CACHE Tag key = cache.type, value = The type of cache system , e.g. redis Tag key = cache.op, value = read or write , indicates the value of tag cache.cmd is used for write or read operation Tag key = cache.cmd, value = the cache command , e.g. get,set,del Tag key = cache.key, value = the cache key If the cache system is in-memory (e.g. Guava-cache), agents' plugin would create a local span usually, and the span\u0026rsquo;s peer would be null ,otherwise the peer is the network address(IP or domain) of Cache server.  Ref slow cache doc to know more slow Cache commands settings.\n","title":"Virtual Cache","url":"/docs/main/latest/en/setup/service-agent/virtual-cache/"},{"content":"Virtual Cache Virtual cache represent the cache nodes detected by server agents' plugins. The performance metrics of the cache are also from the Cache client-side perspective.\nFor example, Redis plugins in the Java agent could detect the latency of command As a result, SkyWalking would show traffic, latency, success rate, and sampled slow operations(write/read) powered by backend analysis capabilities in this dashboard.\nThe cache operation span should have\n It is an Exit or Local span Span\u0026rsquo;s layer == CACHE Tag key = cache.type, value = The type of cache system , e.g. redis Tag key = cache.op, value = read or write , indicates the value of tag cache.cmd is used for write or read operation Tag key = cache.cmd, value = the cache command , e.g. get,set,del Tag key = cache.key, value = the cache key If the cache system is in-memory (e.g. Guava-cache), agents' plugin would create a local span usually, and the span\u0026rsquo;s peer would be null ,otherwise the peer is the network address(IP or domain) of Cache server.  Ref slow cache doc to know more slow Cache commands settings.\n","title":"Virtual Cache","url":"/docs/main/next/en/setup/service-agent/virtual-cache/"},{"content":"Virtual Cache Virtual cache represent the cache nodes detected by server agents' plugins. The performance metrics of the cache are also from the Cache client-side perspective.\nFor example, Redis plugins in the Java agent could detect the latency of command As a result, SkyWalking would show traffic, latency, success rate, and sampled slow operations(write/read) powered by backend analysis capabilities in this dashboard.\nThe cache operation span should have\n It is an Exit or Local span Span\u0026rsquo;s layer == CACHE Tag key = cache.type, value = The type of cache system , e.g. redis Tag key = cache.op, value = read or write , indicates the value of tag cache.cmd is used for write or read operation Tag key = cache.cmd, value = the cache command , e.g. get,set,del Tag key = cache.key, value = the cache key If the cache system is in-memory (e.g. Guava-cache), agents' plugin would create a local span usually, and the span\u0026rsquo;s peer would be null ,otherwise the peer is the network address(IP or domain) of Cache server.  Ref slow cache doc to know more slow Cache commands settings.\n","title":"Virtual Cache","url":"/docs/main/v9.3.0/en/setup/service-agent/virtual-cache/"},{"content":"Virtual Cache Virtual cache represent the cache nodes detected by server agents' plugins. The performance metrics of the cache are also from the Cache client-side perspective.\nFor example, Redis plugins in the Java agent could detect the latency of command As a result, SkyWalking would show traffic, latency, success rate, and sampled slow operations(write/read) powered by backend analysis capabilities in this dashboard.\nThe cache operation span should have\n It is an Exit or Local span Span\u0026rsquo;s layer == CACHE Tag key = cache.type, value = The type of cache system , e.g. redis Tag key = cache.op, value = read or write , indicates the value of tag cache.cmd is used for write or read operation Tag key = cache.cmd, value = the cache command , e.g. get,set,del Tag key = cache.key, value = the cache key If the cache system is in-memory (e.g. Guava-cache), agents' plugin would create a local span usually, and the span\u0026rsquo;s peer would be null ,otherwise the peer is the network address(IP or domain) of Cache server.  Ref slow cache doc to know more slow Cache commands settings.\n","title":"Virtual Cache","url":"/docs/main/v9.4.0/en/setup/service-agent/virtual-cache/"},{"content":"Virtual Cache Virtual cache represent the cache nodes detected by server agents' plugins. The performance metrics of the cache are also from the Cache client-side perspective.\nFor example, Redis plugins in the Java agent could detect the latency of command As a result, SkyWalking would show traffic, latency, success rate, and sampled slow operations(write/read) powered by backend analysis capabilities in this dashboard.\nThe cache operation span should have\n It is an Exit or Local span Span\u0026rsquo;s layer == CACHE Tag key = cache.type, value = The type of cache system , e.g. redis Tag key = cache.op, value = read or write , indicates the value of tag cache.cmd is used for write or read operation Tag key = cache.cmd, value = the cache command , e.g. get,set,del Tag key = cache.key, value = the cache key If the cache system is in-memory (e.g. Guava-cache), agents' plugin would create a local span usually, and the span\u0026rsquo;s peer would be null ,otherwise the peer is the network address(IP or domain) of Cache server.  Ref slow cache doc to know more slow Cache commands settings.\n","title":"Virtual Cache","url":"/docs/main/v9.5.0/en/setup/service-agent/virtual-cache/"},{"content":"Virtual Cache Virtual cache represent the cache nodes detected by server agents' plugins. The performance metrics of the cache are also from the Cache client-side perspective.\nFor example, Redis plugins in the Java agent could detect the latency of command As a result, SkyWalking would show traffic, latency, success rate, and sampled slow operations(write/read) powered by backend analysis capabilities in this dashboard.\nThe cache operation span should have\n It is an Exit or Local span Span\u0026rsquo;s layer == CACHE Tag key = cache.type, value = The type of cache system , e.g. redis Tag key = cache.op, value = read or write , indicates the value of tag cache.cmd is used for write or read operation Tag key = cache.cmd, value = the cache command , e.g. get,set,del Tag key = cache.key, value = the cache key If the cache system is in-memory (e.g. Guava-cache), agents' plugin would create a local span usually, and the span\u0026rsquo;s peer would be null ,otherwise the peer is the network address(IP or domain) of Cache server.  Ref slow cache doc to know more slow Cache commands settings.\n","title":"Virtual Cache","url":"/docs/main/v9.6.0/en/setup/service-agent/virtual-cache/"},{"content":"Virtual Cache Virtual cache represent the cache nodes detected by server agents' plugins. The performance metrics of the cache are also from the Cache client-side perspective.\nFor example, Redis plugins in the Java agent could detect the latency of command As a result, SkyWalking would show traffic, latency, success rate, and sampled slow operations(write/read) powered by backend analysis capabilities in this dashboard.\nThe cache operation span should have\n It is an Exit or Local span Span\u0026rsquo;s layer == CACHE Tag key = cache.type, value = The type of cache system , e.g. redis Tag key = cache.op, value = read or write , indicates the value of tag cache.cmd is used for write or read operation Tag key = cache.cmd, value = the cache command , e.g. get,set,del Tag key = cache.key, value = the cache key If the cache system is in-memory (e.g. Guava-cache), agents' plugin would create a local span usually, and the span\u0026rsquo;s peer would be null ,otherwise the peer is the network address(IP or domain) of Cache server.  Ref slow cache doc to know more slow Cache commands settings.\n","title":"Virtual Cache","url":"/docs/main/v9.7.0/en/setup/service-agent/virtual-cache/"},{"content":"Virtual Database Virtual databases represent the database nodes detected by server agents' plugins. The performance metrics of the databases are also from the Database client-side perspective.\nFor example, JDBC plugins(MySQL, PostgreSQL, MariaDB, MSSQL) in the Java agent could detect the latency of SQL performance and SQL statements. As a result, SkyWalking would show database traffic, latency, success rate, and sampled slow SQLs powered by backend analysis capabilities in this dashboard.\nThe Database access span should have\n It is an Exit span Span\u0026rsquo;s layer == DATABASE Tag key = db.statement, value = SQL statement Tag key = db.type, value = the type of Database Span\u0026rsquo;s peer is the network address(IP or domain) of Database server.  Ref slow cache doc to know more slow SQL settings.\n","title":"Virtual Database","url":"/docs/main/latest/en/setup/service-agent/virtual-database/"},{"content":"Virtual Database Virtual databases represent the database nodes detected by server agents' plugins. The performance metrics of the databases are also from the Database client-side perspective.\nFor example, JDBC plugins(MySQL, PostgreSQL, MariaDB, MSSQL) in the Java agent could detect the latency of SQL performance and SQL statements. As a result, SkyWalking would show database traffic, latency, success rate, and sampled slow SQLs powered by backend analysis capabilities in this dashboard.\nThe Database access span should have\n It is an Exit span Span\u0026rsquo;s layer == DATABASE Tag key = db.statement, value = SQL statement Tag key = db.type, value = the type of Database Span\u0026rsquo;s peer is the network address(IP or domain) of Database server.  Ref slow cache doc to know more slow SQL settings.\n","title":"Virtual Database","url":"/docs/main/next/en/setup/service-agent/virtual-database/"},{"content":"Virtual Database Virtual databases represents the database nodes detected by server agents' plugins. The performance metrics of the databases are also from Database client side perspective.\nFor example, JDBC plugins(MySQL, PostgreSQL, Mariadb, MSSQL) in the Java agent could detect the latency of SQL performance, as well as SQL statements. As a result, in this dashboard, SkyWalking would show database traffic, latency, success rate and sampled slow SQLs powered by backend analysis capabilities.\nThe Database access span should have\n It is an Exit span Span\u0026rsquo;s layer == DATABASE Tag key = db.statement, value = SQL statement Tag key = db.type, value = the type of Database Span\u0026rsquo;s peer is the network address(IP or domain) of Database server.  ","title":"Virtual Database","url":"/docs/main/v9.0.0/en/setup/service-agent/virtual-database/"},{"content":"Virtual Database Virtual databases represent the database nodes detected by server agents' plugins. The performance metrics of the databases are also from the Database client-side perspective.\nFor example, JDBC plugins(MySQL, PostgreSQL, MariaDB, MSSQL) in the Java agent could detect the latency of SQL performance and SQL statements. As a result, SkyWalking would show database traffic, latency, success rate, and sampled slow SQLs powered by backend analysis capabilities in this dashboard.\nThe Database access span should have\n It is an Exit span Span\u0026rsquo;s layer == DATABASE Tag key = db.statement, value = SQL statement Tag key = db.type, value = the type of Database Span\u0026rsquo;s peer is the network address(IP or domain) of Database server.  ","title":"Virtual Database","url":"/docs/main/v9.1.0/en/setup/service-agent/virtual-database/"},{"content":"Virtual Database Virtual databases represent the database nodes detected by server agents' plugins. The performance metrics of the databases are also from the Database client-side perspective.\nFor example, JDBC plugins(MySQL, PostgreSQL, MariaDB, MSSQL) in the Java agent could detect the latency of SQL performance and SQL statements. As a result, SkyWalking would show database traffic, latency, success rate, and sampled slow SQLs powered by backend analysis capabilities in this dashboard.\nThe Database access span should have\n It is an Exit span Span\u0026rsquo;s layer == DATABASE Tag key = db.statement, value = SQL statement Tag key = db.type, value = the type of Database Span\u0026rsquo;s peer is the network address(IP or domain) of Database server.  ","title":"Virtual Database","url":"/docs/main/v9.2.0/en/setup/service-agent/virtual-database/"},{"content":"Virtual Database Virtual databases represent the database nodes detected by server agents' plugins. The performance metrics of the databases are also from the Database client-side perspective.\nFor example, JDBC plugins(MySQL, PostgreSQL, MariaDB, MSSQL) in the Java agent could detect the latency of SQL performance and SQL statements. As a result, SkyWalking would show database traffic, latency, success rate, and sampled slow SQLs powered by backend analysis capabilities in this dashboard.\nThe Database access span should have\n It is an Exit span Span\u0026rsquo;s layer == DATABASE Tag key = db.statement, value = SQL statement Tag key = db.type, value = the type of Database Span\u0026rsquo;s peer is the network address(IP or domain) of Database server.  Ref slow cache doc to know more slow SQL settings.\n","title":"Virtual Database","url":"/docs/main/v9.3.0/en/setup/service-agent/virtual-database/"},{"content":"Virtual Database Virtual databases represent the database nodes detected by server agents' plugins. The performance metrics of the databases are also from the Database client-side perspective.\nFor example, JDBC plugins(MySQL, PostgreSQL, MariaDB, MSSQL) in the Java agent could detect the latency of SQL performance and SQL statements. As a result, SkyWalking would show database traffic, latency, success rate, and sampled slow SQLs powered by backend analysis capabilities in this dashboard.\nThe Database access span should have\n It is an Exit span Span\u0026rsquo;s layer == DATABASE Tag key = db.statement, value = SQL statement Tag key = db.type, value = the type of Database Span\u0026rsquo;s peer is the network address(IP or domain) of Database server.  Ref slow cache doc to know more slow SQL settings.\n","title":"Virtual Database","url":"/docs/main/v9.4.0/en/setup/service-agent/virtual-database/"},{"content":"Virtual Database Virtual databases represent the database nodes detected by server agents' plugins. The performance metrics of the databases are also from the Database client-side perspective.\nFor example, JDBC plugins(MySQL, PostgreSQL, MariaDB, MSSQL) in the Java agent could detect the latency of SQL performance and SQL statements. As a result, SkyWalking would show database traffic, latency, success rate, and sampled slow SQLs powered by backend analysis capabilities in this dashboard.\nThe Database access span should have\n It is an Exit span Span\u0026rsquo;s layer == DATABASE Tag key = db.statement, value = SQL statement Tag key = db.type, value = the type of Database Span\u0026rsquo;s peer is the network address(IP or domain) of Database server.  Ref slow cache doc to know more slow SQL settings.\n","title":"Virtual Database","url":"/docs/main/v9.5.0/en/setup/service-agent/virtual-database/"},{"content":"Virtual Database Virtual databases represent the database nodes detected by server agents' plugins. The performance metrics of the databases are also from the Database client-side perspective.\nFor example, JDBC plugins(MySQL, PostgreSQL, MariaDB, MSSQL) in the Java agent could detect the latency of SQL performance and SQL statements. As a result, SkyWalking would show database traffic, latency, success rate, and sampled slow SQLs powered by backend analysis capabilities in this dashboard.\nThe Database access span should have\n It is an Exit span Span\u0026rsquo;s layer == DATABASE Tag key = db.statement, value = SQL statement Tag key = db.type, value = the type of Database Span\u0026rsquo;s peer is the network address(IP or domain) of Database server.  Ref slow cache doc to know more slow SQL settings.\n","title":"Virtual Database","url":"/docs/main/v9.6.0/en/setup/service-agent/virtual-database/"},{"content":"Virtual Database Virtual databases represent the database nodes detected by server agents' plugins. The performance metrics of the databases are also from the Database client-side perspective.\nFor example, JDBC plugins(MySQL, PostgreSQL, MariaDB, MSSQL) in the Java agent could detect the latency of SQL performance and SQL statements. As a result, SkyWalking would show database traffic, latency, success rate, and sampled slow SQLs powered by backend analysis capabilities in this dashboard.\nThe Database access span should have\n It is an Exit span Span\u0026rsquo;s layer == DATABASE Tag key = db.statement, value = SQL statement Tag key = db.type, value = the type of Database Span\u0026rsquo;s peer is the network address(IP or domain) of Database server.  Ref slow cache doc to know more slow SQL settings.\n","title":"Virtual Database","url":"/docs/main/v9.7.0/en/setup/service-agent/virtual-database/"},{"content":"Virtual Message Queue (MQ) Virtual MQ represent the MQ nodes detected by server agents' plugins. The performance metrics of the MQ are also from the MQ client-side perspective.\nFor example, Kafka plugins in the Java agent could detect the transmission latency of message As a result, SkyWalking would show message count, transmission latency, success rate powered by backend analysis capabilities in this dashboard.\nThe MQ operation span should have\n It is an Exit(at producer side) or Entry(at consumer side) span Span\u0026rsquo;s layer == MQ Tag key = mq.queue, value = MQ queue name Tag key = mq.topic, value = MQ queue topic , it\u0026rsquo;s optional as some MQ don\u0026rsquo;t have topic concept. Tag key = transmission.latency, value = Transmission latency from consumer to producer Set peer at both sides(producer and consumer). And the value of peer should represent the MQ server cluster.  ","title":"Virtual Message Queue (MQ)","url":"/docs/main/latest/en/setup/service-agent/virtual-mq/"},{"content":"Virtual Message Queue (MQ) Virtual MQ represent the MQ nodes detected by server agents' plugins. The performance metrics of the MQ are also from the MQ client-side perspective.\nFor example, Kafka plugins in the Java agent could detect the transmission latency of message As a result, SkyWalking would show message count, transmission latency, success rate powered by backend analysis capabilities in this dashboard.\nThe MQ operation span should have\n It is an Exit(at producer side) or Entry(at consumer side) span Span\u0026rsquo;s layer == MQ Tag key = mq.queue, value = MQ queue name Tag key = mq.topic, value = MQ queue topic , it\u0026rsquo;s optional as some MQ don\u0026rsquo;t have topic concept. Tag key = transmission.latency, value = Transmission latency from consumer to producer Set peer at both sides(producer and consumer). And the value of peer should represent the MQ server cluster.  ","title":"Virtual Message Queue (MQ)","url":"/docs/main/next/en/setup/service-agent/virtual-mq/"},{"content":"Virtual Message Queue (MQ) Virtual MQ represent the MQ nodes detected by server agents' plugins. The performance metrics of the MQ are also from the MQ client-side perspective.\nFor example, Kafka plugins in the Java agent could detect the transmission latency of message As a result, SkyWalking would show message count, transmission latency, success rate powered by backend analysis capabilities in this dashboard.\nThe MQ operation span should have\n It is an Exit(at producer side) or Entry(at consumer side) span Span\u0026rsquo;s layer == MQ Tag key = mq.queue, value = MQ queue name Tag key = mq.topic, value = MQ queue topic , it\u0026rsquo;s optional as some MQ don\u0026rsquo;t have topic concept. Tag key = transmission.latency, value = Transmission latency from consumer to producer Set peer at both sides(producer and consumer). And the value of peer should represent the MQ server cluster.  ","title":"Virtual Message Queue (MQ)","url":"/docs/main/v9.3.0/en/setup/service-agent/virtual-mq/"},{"content":"Virtual Message Queue (MQ) Virtual MQ represent the MQ nodes detected by server agents' plugins. The performance metrics of the MQ are also from the MQ client-side perspective.\nFor example, Kafka plugins in the Java agent could detect the transmission latency of message As a result, SkyWalking would show message count, transmission latency, success rate powered by backend analysis capabilities in this dashboard.\nThe MQ operation span should have\n It is an Exit(at producer side) or Entry(at consumer side) span Span\u0026rsquo;s layer == MQ Tag key = mq.queue, value = MQ queue name Tag key = mq.topic, value = MQ queue topic , it\u0026rsquo;s optional as some MQ don\u0026rsquo;t have topic concept. Tag key = transmission.latency, value = Transmission latency from consumer to producer Set peer at both sides(producer and consumer). And the value of peer should represent the MQ server cluster.  ","title":"Virtual Message Queue (MQ)","url":"/docs/main/v9.4.0/en/setup/service-agent/virtual-mq/"},{"content":"Virtual Message Queue (MQ) Virtual MQ represent the MQ nodes detected by server agents' plugins. The performance metrics of the MQ are also from the MQ client-side perspective.\nFor example, Kafka plugins in the Java agent could detect the transmission latency of message As a result, SkyWalking would show message count, transmission latency, success rate powered by backend analysis capabilities in this dashboard.\nThe MQ operation span should have\n It is an Exit(at producer side) or Entry(at consumer side) span Span\u0026rsquo;s layer == MQ Tag key = mq.queue, value = MQ queue name Tag key = mq.topic, value = MQ queue topic , it\u0026rsquo;s optional as some MQ don\u0026rsquo;t have topic concept. Tag key = transmission.latency, value = Transmission latency from consumer to producer Set peer at both sides(producer and consumer). And the value of peer should represent the MQ server cluster.  ","title":"Virtual Message Queue (MQ)","url":"/docs/main/v9.5.0/en/setup/service-agent/virtual-mq/"},{"content":"Virtual Message Queue (MQ) Virtual MQ represent the MQ nodes detected by server agents' plugins. The performance metrics of the MQ are also from the MQ client-side perspective.\nFor example, Kafka plugins in the Java agent could detect the transmission latency of message As a result, SkyWalking would show message count, transmission latency, success rate powered by backend analysis capabilities in this dashboard.\nThe MQ operation span should have\n It is an Exit(at producer side) or Entry(at consumer side) span Span\u0026rsquo;s layer == MQ Tag key = mq.queue, value = MQ queue name Tag key = mq.topic, value = MQ queue topic , it\u0026rsquo;s optional as some MQ don\u0026rsquo;t have topic concept. Tag key = transmission.latency, value = Transmission latency from consumer to producer Set peer at both sides(producer and consumer). And the value of peer should represent the MQ server cluster.  ","title":"Virtual Message Queue (MQ)","url":"/docs/main/v9.6.0/en/setup/service-agent/virtual-mq/"},{"content":"Virtual Message Queue (MQ) Virtual MQ represent the MQ nodes detected by server agents' plugins. The performance metrics of the MQ are also from the MQ client-side perspective.\nFor example, Kafka plugins in the Java agent could detect the transmission latency of message As a result, SkyWalking would show message count, transmission latency, success rate powered by backend analysis capabilities in this dashboard.\nThe MQ operation span should have\n It is an Exit(at producer side) or Entry(at consumer side) span Span\u0026rsquo;s layer == MQ Tag key = mq.queue, value = MQ queue name Tag key = mq.topic, value = MQ queue topic , it\u0026rsquo;s optional as some MQ don\u0026rsquo;t have topic concept. Tag key = transmission.latency, value = Transmission latency from consumer to producer Set peer at both sides(producer and consumer). And the value of peer should represent the MQ server cluster.  ","title":"Virtual Message Queue (MQ)","url":"/docs/main/v9.7.0/en/setup/service-agent/virtual-mq/"},{"content":"Webflux Tracing Assistant APIs These APIs provide advanced features to enhance interaction capabilities in Webflux cases.\nAdd the toolkit to your project dependency, through Maven or Gradle\n\u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-webflux\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; The following scenarios are supported for tracing assistance.\nContinue Tracing from Client The WebFluxSkyWalkingOperators#continueTracing provides manual tracing continuous capabilities to adopt native Webflux APIs\n@GetMapping(\u0026#34;/testcase/annotation/mono/onnext\u0026#34;) public Mono\u0026lt;String\u0026gt; monoOnNext(@RequestBody(required = false) String body) { return Mono.subscriberContext() .flatMap(ctx -\u0026gt; WebFluxSkyWalkingOperators.continueTracing(ctx, () -\u0026gt; { visit(\u0026#34;http://localhost:\u0026#34; + serverPort + \u0026#34;/testcase/success\u0026#34;); return Mono.just(\u0026#34;Hello World\u0026#34;); })); } @GetMapping(\u0026#34;/login/userFunctions\u0026#34;) public Mono\u0026lt;Response\u0026lt;FunctionInfoResult\u0026gt;\u0026gt; functionInfo(ServerWebExchange exchange, @RequestParam String userId) { return ReactiveSecurityContextHolder.getContext() .flatMap(context -\u0026gt; { return exchange.getSession().map(session -\u0026gt; WebFluxSkyWalkingOperators.continueTracing(exchange, () -\u0026gt; handle(session, userId))); }); } private Response\u0026lt;FunctionInfoResult\u0026gt; handle(WebSession session, String userId) { //...dubbo rpc  } Mono.just(\u0026#34;key\u0026#34;).subscribeOn(Schedulers.boundedElastic()) .doOnEach(WebFluxSkyWalkingOperators.continueTracing(SignalType.ON_NEXT, () -\u0026gt; log.info(\u0026#34;test log with tid\u0026#34;))) .flatMap(key -\u0026gt; Mono.deferContextual(ctx -\u0026gt; WebFluxSkyWalkingOperators.continueTracing(Context.of(ctx), () -\u0026gt; { redis.hasKey(key); return Mono.just(\u0026#34;SUCCESS\u0026#34;); }) )); ... Fetch trace context relative IDs @Override public Mono\u0026lt;Void\u0026gt; filter(ServerWebExchange exchange, GatewayFilterChain chain){ // fetch trace ID  String traceId = WebFluxSkyWalkingTraceContext.traceId(exchange); // fetch segment ID  String segmentId = WebFluxSkyWalkingTraceContext.segmentId(exchange); // fetch span ID  int spanId = WebFluxSkyWalkingTraceContext.spanId(exchange); return chain.filter(exchange); } Manipulate Correlation Context @Override public Mono\u0026lt;Void\u0026gt; filter(ServerWebExchange exchange, GatewayFilterChain chain){ // Set correlation data can be retrieved by upstream nodes.  WebFluxSkyWalkingTraceContext.putCorrelation(exchange, \u0026#34;key1\u0026#34;, \u0026#34;value\u0026#34;); // Get correlation data  Optional\u0026lt;String\u0026gt; value2 = WebFluxSkyWalkingTraceContext.getCorrelation(exchange, \u0026#34;key2\u0026#34;); // dosomething...  return chain.filter(exchange); } Sample codes only\n","title":"Webflux Tracing Assistant APIs","url":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/application-toolkit-webflux/"},{"content":"Webflux Tracing Assistant APIs These APIs provide advanced features to enhance interaction capabilities in Webflux cases.\nAdd the toolkit to your project dependency, through Maven or Gradle\n\u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-webflux\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; The following scenarios are supported for tracing assistance.\nContinue Tracing from Client The WebFluxSkyWalkingOperators#continueTracing provides manual tracing continuous capabilities to adopt native Webflux APIs\n@GetMapping(\u0026#34;/testcase/annotation/mono/onnext\u0026#34;) public Mono\u0026lt;String\u0026gt; monoOnNext(@RequestBody(required = false) String body) { return Mono.subscriberContext() .flatMap(ctx -\u0026gt; WebFluxSkyWalkingOperators.continueTracing(ctx, () -\u0026gt; { visit(\u0026#34;http://localhost:\u0026#34; + serverPort + \u0026#34;/testcase/success\u0026#34;); return Mono.just(\u0026#34;Hello World\u0026#34;); })); } @GetMapping(\u0026#34;/login/userFunctions\u0026#34;) public Mono\u0026lt;Response\u0026lt;FunctionInfoResult\u0026gt;\u0026gt; functionInfo(ServerWebExchange exchange, @RequestParam String userId) { return ReactiveSecurityContextHolder.getContext() .flatMap(context -\u0026gt; { return exchange.getSession().map(session -\u0026gt; WebFluxSkyWalkingOperators.continueTracing(exchange, () -\u0026gt; handle(session, userId))); }); } private Response\u0026lt;FunctionInfoResult\u0026gt; handle(WebSession session, String userId) { //...dubbo rpc  } Mono.just(\u0026#34;key\u0026#34;).subscribeOn(Schedulers.boundedElastic()) .doOnEach(WebFluxSkyWalkingOperators.continueTracing(SignalType.ON_NEXT, () -\u0026gt; log.info(\u0026#34;test log with tid\u0026#34;))) .flatMap(key -\u0026gt; Mono.deferContextual(ctx -\u0026gt; WebFluxSkyWalkingOperators.continueTracing(Context.of(ctx), () -\u0026gt; { redis.hasKey(key); return Mono.just(\u0026#34;SUCCESS\u0026#34;); }) )); ... Fetch trace context relative IDs @Override public Mono\u0026lt;Void\u0026gt; filter(ServerWebExchange exchange, GatewayFilterChain chain){ // fetch trace ID  String traceId = WebFluxSkyWalkingTraceContext.traceId(exchange); // fetch segment ID  String segmentId = WebFluxSkyWalkingTraceContext.segmentId(exchange); // fetch span ID  int spanId = WebFluxSkyWalkingTraceContext.spanId(exchange); return chain.filter(exchange); } Manipulate Correlation Context @Override public Mono\u0026lt;Void\u0026gt; filter(ServerWebExchange exchange, GatewayFilterChain chain){ // Set correlation data can be retrieved by upstream nodes.  WebFluxSkyWalkingTraceContext.putCorrelation(exchange, \u0026#34;key1\u0026#34;, \u0026#34;value\u0026#34;); // Get correlation data  Optional\u0026lt;String\u0026gt; value2 = WebFluxSkyWalkingTraceContext.getCorrelation(exchange, \u0026#34;key2\u0026#34;); // dosomething...  return chain.filter(exchange); } Sample codes only\n","title":"Webflux Tracing Assistant APIs","url":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-webflux/"},{"content":"Webflux Tracing Assistant APIs These APIs provide advanced features to enhance interaction capabilities in Webflux cases.\nAdd the toolkit to your project dependency, through Maven or Gradle\n\u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-webflux\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; The following scenarios are supported for tracing assistance.\nContinue Tracing from Client The WebFluxSkyWalkingOperators#continueTracing provides manual tracing continuous capabilities to adopt native Webflux APIs\n@GetMapping(\u0026#34;/testcase/annotation/mono/onnext\u0026#34;) public Mono\u0026lt;String\u0026gt; monoOnNext(@RequestBody(required = false) String body) { return Mono.subscriberContext() .flatMap(ctx -\u0026gt; WebFluxSkyWalkingOperators.continueTracing(ctx, () -\u0026gt; { visit(\u0026#34;http://localhost:\u0026#34; + serverPort + \u0026#34;/testcase/success\u0026#34;); return Mono.just(\u0026#34;Hello World\u0026#34;); })); } @GetMapping(\u0026#34;/login/userFunctions\u0026#34;) public Mono\u0026lt;Response\u0026lt;FunctionInfoResult\u0026gt;\u0026gt; functionInfo(ServerWebExchange exchange, @RequestParam String userId) { return ReactiveSecurityContextHolder.getContext() .flatMap(context -\u0026gt; { return exchange.getSession().map(session -\u0026gt; WebFluxSkyWalkingOperators.continueTracing(exchange, () -\u0026gt; handle(session, userId))); }); } private Response\u0026lt;FunctionInfoResult\u0026gt; handle(WebSession session, String userId) { //...dubbo rpc  } Mono.just(\u0026#34;key\u0026#34;).subscribeOn(Schedulers.boundedElastic()) .doOnEach(WebFluxSkyWalkingOperators.continueTracing(SignalType.ON_NEXT, () -\u0026gt; log.info(\u0026#34;test log with tid\u0026#34;))) .flatMap(key -\u0026gt; Mono.deferContextual(ctx -\u0026gt; WebFluxSkyWalkingOperators.continueTracing(Context.of(ctx), () -\u0026gt; { redis.hasKey(key); return Mono.just(\u0026#34;SUCCESS\u0026#34;); }) )); ... Fetch trace context relative IDs @Override public Mono\u0026lt;Void\u0026gt; filter(ServerWebExchange exchange, GatewayFilterChain chain){ // fetch trace ID  String traceId = WebFluxSkyWalkingTraceContext.traceId(exchange); // fetch segment ID  String segmentId = WebFluxSkyWalkingTraceContext.segmentId(exchange); // fetch span ID  int spanId = WebFluxSkyWalkingTraceContext.spanId(exchange); return chain.filter(exchange); } Manipulate Correlation Context @Override public Mono\u0026lt;Void\u0026gt; filter(ServerWebExchange exchange, GatewayFilterChain chain){ // Set correlation data can be retrieved by upstream nodes.  WebFluxSkyWalkingTraceContext.putCorrelation(exchange, \u0026#34;key1\u0026#34;, \u0026#34;value\u0026#34;); // Get correlation data  Optional\u0026lt;String\u0026gt; value2 = WebFluxSkyWalkingTraceContext.getCorrelation(exchange, \u0026#34;key2\u0026#34;); // dosomething...  return chain.filter(exchange); } Sample codes only\n","title":"Webflux Tracing Assistant APIs","url":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/application-toolkit-webflux/"},{"content":"Webflux Tracing Assistant APIs These APIs provide advanced features to enhance interaction capabilities in Webflux cases.\nAdd the toolkit to your project dependency, through Maven or Gradle\n\u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-webflux\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; The following scenarios are supported for tracing assistance.\nContinue Tracing from Client The WebFluxSkyWalkingOperators#continueTracing provides manual tracing continuous capabilities to adopt native Webflux APIs\n@GetMapping(\u0026#34;/testcase/annotation/mono/onnext\u0026#34;) public Mono\u0026lt;String\u0026gt; monoOnNext(@RequestBody(required = false) String body) { return Mono.subscriberContext() .flatMap(ctx -\u0026gt; WebFluxSkyWalkingOperators.continueTracing(ctx, () -\u0026gt; { visit(\u0026#34;http://localhost:\u0026#34; + serverPort + \u0026#34;/testcase/success\u0026#34;); return Mono.just(\u0026#34;Hello World\u0026#34;); })); } @GetMapping(\u0026#34;/login/userFunctions\u0026#34;) public Mono\u0026lt;Response\u0026lt;FunctionInfoResult\u0026gt;\u0026gt; functionInfo(ServerWebExchange exchange, @RequestParam String userId) { return ReactiveSecurityContextHolder.getContext() .flatMap(context -\u0026gt; { return exchange.getSession().map(session -\u0026gt; WebFluxSkyWalkingOperators.continueTracing(exchange, () -\u0026gt; handle(session, userId))); }); } private Response\u0026lt;FunctionInfoResult\u0026gt; handle(WebSession session, String userId) { //...dubbo rpc  } Mono.just(\u0026#34;key\u0026#34;).subscribeOn(Schedulers.boundedElastic()) .doOnEach(WebFluxSkyWalkingOperators.continueTracing(SignalType.ON_NEXT, () -\u0026gt; log.info(\u0026#34;test log with tid\u0026#34;))) .flatMap(key -\u0026gt; Mono.deferContextual(ctx -\u0026gt; WebFluxSkyWalkingOperators.continueTracing(Context.of(ctx), () -\u0026gt; { redis.hasKey(key); return Mono.just(\u0026#34;SUCCESS\u0026#34;); }) )); ... Fetch trace context relative IDs @Override public Mono\u0026lt;Void\u0026gt; filter(ServerWebExchange exchange, GatewayFilterChain chain){ // fetch trace ID  String traceId = WebFluxSkyWalkingTraceContext.traceId(exchange); // fetch segment ID  String segmentId = WebFluxSkyWalkingTraceContext.segmentId(exchange); // fetch span ID  int spanId = WebFluxSkyWalkingTraceContext.spanId(exchange); return chain.filter(exchange); } Manipulate Correlation Context @Override public Mono\u0026lt;Void\u0026gt; filter(ServerWebExchange exchange, GatewayFilterChain chain){ // Set correlation data can be retrieved by upstream nodes.  WebFluxSkyWalkingTraceContext.putCorrelation(exchange, \u0026#34;key1\u0026#34;, \u0026#34;value\u0026#34;); // Get correlation data  Optional\u0026lt;String\u0026gt; value2 = WebFluxSkyWalkingTraceContext.getCorrelation(exchange, \u0026#34;key2\u0026#34;); // dosomething...  return chain.filter(exchange); } Sample codes only\n","title":"Webflux Tracing Assistant APIs","url":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/application-toolkit-webflux/"},{"content":"Webflux Tracing Assistant APIs These APIs provide advanced features to enhance interaction capabilities in Webflux cases.\nAdd the toolkit to your project dependency, through Maven or Gradle\n\u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-webflux\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; The following scenarios are supported for tracing assistance.\nContinue Tracing from Client The WebFluxSkyWalkingOperators#continueTracing provides manual tracing continuous capabilities to adopt native Webflux APIs\n@GetMapping(\u0026#34;/testcase/annotation/mono/onnext\u0026#34;) public Mono\u0026lt;String\u0026gt; monoOnNext(@RequestBody(required = false) String body) { return Mono.subscriberContext() .flatMap(ctx -\u0026gt; WebFluxSkyWalkingOperators.continueTracing(ctx, () -\u0026gt; { visit(\u0026#34;http://localhost:\u0026#34; + serverPort + \u0026#34;/testcase/success\u0026#34;); return Mono.just(\u0026#34;Hello World\u0026#34;); })); } @GetMapping(\u0026#34;/login/userFunctions\u0026#34;) public Mono\u0026lt;Response\u0026lt;FunctionInfoResult\u0026gt;\u0026gt; functionInfo(ServerWebExchange exchange, @RequestParam String userId) { return ReactiveSecurityContextHolder.getContext() .flatMap(context -\u0026gt; { return exchange.getSession().map(session -\u0026gt; WebFluxSkyWalkingOperators.continueTracing(exchange, () -\u0026gt; handle(session, userId))); }); } private Response\u0026lt;FunctionInfoResult\u0026gt; handle(WebSession session, String userId) { //...dubbo rpc  } Mono.just(\u0026#34;key\u0026#34;).subscribeOn(Schedulers.boundedElastic()) .doOnEach(WebFluxSkyWalkingOperators.continueTracing(SignalType.ON_NEXT, () -\u0026gt; log.info(\u0026#34;test log with tid\u0026#34;))) .flatMap(key -\u0026gt; Mono.deferContextual(ctx -\u0026gt; WebFluxSkyWalkingOperators.continueTracing(Context.of(ctx), () -\u0026gt; { redis.hasKey(key); return Mono.just(\u0026#34;SUCCESS\u0026#34;); }) )); ... Fetch trace context relative IDs @Override public Mono\u0026lt;Void\u0026gt; filter(ServerWebExchange exchange, GatewayFilterChain chain){ // fetch trace ID  String traceId = WebFluxSkyWalkingTraceContext.traceId(exchange); // fetch segment ID  String segmentId = WebFluxSkyWalkingTraceContext.segmentId(exchange); // fetch span ID  int spanId = WebFluxSkyWalkingTraceContext.spanId(exchange); return chain.filter(exchange); } Manipulate Correlation Context @Override public Mono\u0026lt;Void\u0026gt; filter(ServerWebExchange exchange, GatewayFilterChain chain){ // Set correlation data can be retrieved by upstream nodes.  WebFluxSkyWalkingTraceContext.putCorrelation(exchange, \u0026#34;key1\u0026#34;, \u0026#34;value\u0026#34;); // Get correlation data  Optional\u0026lt;String\u0026gt; value2 = WebFluxSkyWalkingTraceContext.getCorrelation(exchange, \u0026#34;key2\u0026#34;); // dosomething...  return chain.filter(exchange); } Sample codes only\n","title":"Webflux Tracing Assistant APIs","url":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/application-toolkit-webflux/"},{"content":"Welcome This is the official documentation of SkyWalking 9. Welcome to the SkyWalking community!\nHere you can learn all you need to know about SkyWalking’s architecture, understand how to deploy and use SkyWalking, and contribute to the project based on SkyWalking\u0026rsquo;s contributing guidelines.\n  Concepts and Designs. You\u0026rsquo;ll find the core logic behind SkyWalking. You may start from here if you want to understand what is going on under our cool features and visualization.\n  Setup. A guide to installing SkyWalking for different use cases. It is an observability platform that supports multiple observability modes.\n  Contributing Guides. If you are a PMC member, a committer, or a new contributor, learn how to start contributing with these guides!\n  Protocols. The protocols show how agents/probes and the backend communicate with one another. Anyone interested in uplink telemetry data should definitely read this.\n  FAQs. A manifest of known issues with setup and secondary developments processes. Should you encounter any problems, check here first.\n  You might also find these links interesting:\n  The latest and old releases are all available at Apache SkyWalking release page. The change logs can be found here.\n  SkyWalking WIKI hosts the context of some changes and events.\n  You can find the conference schedules, video recordings, and articles about SkyWalking in the community resource catalog.\n  We\u0026rsquo;re always looking for help to improve our documentation and codes, so please don’t hesitate to file an issue if you see any problems. Or better yet, directly contribute by submitting a pull request to help us get better!\n","title":"Welcome","url":"/docs/main/latest/readme/"},{"content":"Welcome This is the official documentation of SkyWalking 10. Welcome to the SkyWalking community!\nHere you can learn all you need to know about SkyWalking\u0026rsquo;s architecture, understand how to deploy and use SkyWalking, and contribute to the project based on SkyWalking\u0026rsquo;s contributing guidelines.\n  Concepts and Designs. You\u0026rsquo;ll find the core logic behind SkyWalking. You may start from here if you want to understand what is going on under our cool features and visualization.\n  Setup. A guide to install SkyWalking for different use cases. It is an observability platform that supports multiple observability modes.\n  Contributing Guides. If you are a PMC member, a committer, or a new contributor, learn how to start contributing with these guides!\n  Protocols. The protocols show how agents/probes and the backend communicate with one another. Anyone interested in uplinking telemetry data should definitely read this.\n  FAQs. A manifest of known issues with setup and secondary developments processes. Should you encounter any problems, check here first.\n  You might also find these links interesting:\n  The latest and old releases are all available at Apache SkyWalking release page. The change logs can be found here.\n  SkyWalking WIKI hosts the context of some changes and events.\n  You can find the conference schedules, video recordings, and articles about SkyWalking in the community resource catalog.\n  We\u0026rsquo;re always looking for help to improve our documentation and codes, so please don’t hesitate to file an issue if you see any problems. Or better yet, directly contribute by submitting a pull request to help us get better!\n","title":"Welcome","url":"/docs/main/next/readme/"},{"content":"Welcome This is the official documentation of SkyWalking 9. Welcome to the SkyWalking community!\nHere you can learn all you need to know about SkyWalking’s architecture, understand how to deploy and use SkyWalking, and contribute to the project based on SkyWalking\u0026rsquo;s contributing guidelines.\n  Concepts and Designs. You\u0026rsquo;ll find the core logic behind SkyWalking. You may start from here if you want to understand what is going on under our cool features and visualization.\n  Setup. A guide to installing SkyWalking for different use cases. It is an observability platform that supports multiple observability modes.\n  Contributing Guides. If you are a PMC member, a committer, or a new contributor, learn how to start contributing with these guides!\n  Protocols. The protocols show how agents/probes and the backend communicate with one another. Anyone interested in uplink telemetry data should definitely read this.\n  FAQs. A manifest of known issues with setup and secondary developments processes. Should you encounter any problems, check here first.\n  You might also find these links interesting:\n  The latest and old releases are all available at Apache SkyWalking release page. The change logs can be found here.\n  SkyWalking WIKI hosts the context of some changes and events.\n  You can find the conference schedules, video recordings, and articles about SkyWalking in the community resource catalog.\n  We\u0026rsquo;re always looking for help to improve our documentation and codes, so please don’t hesitate to file an issue if you see any problems. Or better yet, directly contribute by submitting a pull request to help us get better!\n","title":"Welcome","url":"/docs/main/v9.0.0/readme/"},{"content":"Welcome This is the official documentation of SkyWalking 9. Welcome to the SkyWalking community!\nHere you can learn all you need to know about SkyWalking’s architecture, understand how to deploy and use SkyWalking, and contribute to the project based on SkyWalking\u0026rsquo;s contributing guidelines.\n  Concepts and Designs. You\u0026rsquo;ll find the core logic behind SkyWalking. You may start from here if you want to understand what is going on under our cool features and visualization.\n  Setup. A guide to installing SkyWalking for different use cases. It is an observability platform that supports multiple observability modes.\n  Contributing Guides. If you are a PMC member, a committer, or a new contributor, learn how to start contributing with these guides!\n  Protocols. The protocols show how agents/probes and the backend communicate with one another. Anyone interested in uplink telemetry data should definitely read this.\n  FAQs. A manifest of known issues with setup and secondary developments processes. Should you encounter any problems, check here first.\n  You might also find these links interesting:\n  The latest and old releases are all available at Apache SkyWalking release page. The change logs can be found here.\n  SkyWalking WIKI hosts the context of some changes and events.\n  You can find the conference schedules, video recordings, and articles about SkyWalking in the community resource catalog.\n  We\u0026rsquo;re always looking for help to improve our documentation and codes, so please don’t hesitate to file an issue if you see any problems. Or better yet, directly contribute by submitting a pull request to help us get better!\n","title":"Welcome","url":"/docs/main/v9.1.0/readme/"},{"content":"Welcome This is the official documentation of SkyWalking 9. Welcome to the SkyWalking community!\nHere you can learn all you need to know about SkyWalking’s architecture, understand how to deploy and use SkyWalking, and contribute to the project based on SkyWalking\u0026rsquo;s contributing guidelines.\n  Concepts and Designs. You\u0026rsquo;ll find the core logic behind SkyWalking. You may start from here if you want to understand what is going on under our cool features and visualization.\n  Setup. A guide to installing SkyWalking for different use cases. It is an observability platform that supports multiple observability modes.\n  Contributing Guides. If you are a PMC member, a committer, or a new contributor, learn how to start contributing with these guides!\n  Protocols. The protocols show how agents/probes and the backend communicate with one another. Anyone interested in uplink telemetry data should definitely read this.\n  FAQs. A manifest of known issues with setup and secondary developments processes. Should you encounter any problems, check here first.\n  You might also find these links interesting:\n  The latest and old releases are all available at Apache SkyWalking release page. The change logs can be found here.\n  SkyWalking WIKI hosts the context of some changes and events.\n  You can find the conference schedules, video recordings, and articles about SkyWalking in the community resource catalog.\n  We\u0026rsquo;re always looking for help to improve our documentation and codes, so please don’t hesitate to file an issue if you see any problems. Or better yet, directly contribute by submitting a pull request to help us get better!\n","title":"Welcome","url":"/docs/main/v9.2.0/readme/"},{"content":"Welcome This is the official documentation of SkyWalking 9. Welcome to the SkyWalking community!\nHere you can learn all you need to know about SkyWalking’s architecture, understand how to deploy and use SkyWalking, and contribute to the project based on SkyWalking\u0026rsquo;s contributing guidelines.\n  Concepts and Designs. You\u0026rsquo;ll find the core logic behind SkyWalking. You may start from here if you want to understand what is going on under our cool features and visualization.\n  Setup. A guide to installing SkyWalking for different use cases. It is an observability platform that supports multiple observability modes.\n  Contributing Guides. If you are a PMC member, a committer, or a new contributor, learn how to start contributing with these guides!\n  Protocols. The protocols show how agents/probes and the backend communicate with one another. Anyone interested in uplink telemetry data should definitely read this.\n  FAQs. A manifest of known issues with setup and secondary developments processes. Should you encounter any problems, check here first.\n  You might also find these links interesting:\n  The latest and old releases are all available at Apache SkyWalking release page. The change logs can be found here.\n  SkyWalking WIKI hosts the context of some changes and events.\n  You can find the conference schedules, video recordings, and articles about SkyWalking in the community resource catalog.\n  We\u0026rsquo;re always looking for help to improve our documentation and codes, so please don’t hesitate to file an issue if you see any problems. Or better yet, directly contribute by submitting a pull request to help us get better!\n","title":"Welcome","url":"/docs/main/v9.3.0/readme/"},{"content":"Welcome This is the official documentation of SkyWalking 9. Welcome to the SkyWalking community!\nHere you can learn all you need to know about SkyWalking’s architecture, understand how to deploy and use SkyWalking, and contribute to the project based on SkyWalking\u0026rsquo;s contributing guidelines.\n  Concepts and Designs. You\u0026rsquo;ll find the core logic behind SkyWalking. You may start from here if you want to understand what is going on under our cool features and visualization.\n  Setup. A guide to installing SkyWalking for different use cases. It is an observability platform that supports multiple observability modes.\n  Contributing Guides. If you are a PMC member, a committer, or a new contributor, learn how to start contributing with these guides!\n  Protocols. The protocols show how agents/probes and the backend communicate with one another. Anyone interested in uplink telemetry data should definitely read this.\n  FAQs. A manifest of known issues with setup and secondary developments processes. Should you encounter any problems, check here first.\n  You might also find these links interesting:\n  The latest and old releases are all available at Apache SkyWalking release page. The change logs can be found here.\n  SkyWalking WIKI hosts the context of some changes and events.\n  You can find the conference schedules, video recordings, and articles about SkyWalking in the community resource catalog.\n  We\u0026rsquo;re always looking for help to improve our documentation and codes, so please don’t hesitate to file an issue if you see any problems. Or better yet, directly contribute by submitting a pull request to help us get better!\n","title":"Welcome","url":"/docs/main/v9.4.0/readme/"},{"content":"Welcome This is the official documentation of SkyWalking 9. Welcome to the SkyWalking community!\nHere you can learn all you need to know about SkyWalking’s architecture, understand how to deploy and use SkyWalking, and contribute to the project based on SkyWalking\u0026rsquo;s contributing guidelines.\n  Concepts and Designs. You\u0026rsquo;ll find the core logic behind SkyWalking. You may start from here if you want to understand what is going on under our cool features and visualization.\n  Setup. A guide to installing SkyWalking for different use cases. It is an observability platform that supports multiple observability modes.\n  Contributing Guides. If you are a PMC member, a committer, or a new contributor, learn how to start contributing with these guides!\n  Protocols. The protocols show how agents/probes and the backend communicate with one another. Anyone interested in uplink telemetry data should definitely read this.\n  FAQs. A manifest of known issues with setup and secondary developments processes. Should you encounter any problems, check here first.\n  You might also find these links interesting:\n  The latest and old releases are all available at Apache SkyWalking release page. The change logs can be found here.\n  SkyWalking WIKI hosts the context of some changes and events.\n  You can find the conference schedules, video recordings, and articles about SkyWalking in the community resource catalog.\n  We\u0026rsquo;re always looking for help to improve our documentation and codes, so please don’t hesitate to file an issue if you see any problems. Or better yet, directly contribute by submitting a pull request to help us get better!\n","title":"Welcome","url":"/docs/main/v9.5.0/readme/"},{"content":"Welcome This is the official documentation of SkyWalking 9. Welcome to the SkyWalking community!\nHere you can learn all you need to know about SkyWalking’s architecture, understand how to deploy and use SkyWalking, and contribute to the project based on SkyWalking\u0026rsquo;s contributing guidelines.\n  Concepts and Designs. You\u0026rsquo;ll find the core logic behind SkyWalking. You may start from here if you want to understand what is going on under our cool features and visualization.\n  Setup. A guide to installing SkyWalking for different use cases. It is an observability platform that supports multiple observability modes.\n  Contributing Guides. If you are a PMC member, a committer, or a new contributor, learn how to start contributing with these guides!\n  Protocols. The protocols show how agents/probes and the backend communicate with one another. Anyone interested in uplink telemetry data should definitely read this.\n  FAQs. A manifest of known issues with setup and secondary developments processes. Should you encounter any problems, check here first.\n  You might also find these links interesting:\n  The latest and old releases are all available at Apache SkyWalking release page. The change logs can be found here.\n  SkyWalking WIKI hosts the context of some changes and events.\n  You can find the conference schedules, video recordings, and articles about SkyWalking in the community resource catalog.\n  We\u0026rsquo;re always looking for help to improve our documentation and codes, so please don’t hesitate to file an issue if you see any problems. Or better yet, directly contribute by submitting a pull request to help us get better!\n","title":"Welcome","url":"/docs/main/v9.6.0/readme/"},{"content":"Welcome This is the official documentation of SkyWalking 9. Welcome to the SkyWalking community!\nHere you can learn all you need to know about SkyWalking’s architecture, understand how to deploy and use SkyWalking, and contribute to the project based on SkyWalking\u0026rsquo;s contributing guidelines.\n  Concepts and Designs. You\u0026rsquo;ll find the core logic behind SkyWalking. You may start from here if you want to understand what is going on under our cool features and visualization.\n  Setup. A guide to installing SkyWalking for different use cases. It is an observability platform that supports multiple observability modes.\n  Contributing Guides. If you are a PMC member, a committer, or a new contributor, learn how to start contributing with these guides!\n  Protocols. The protocols show how agents/probes and the backend communicate with one another. Anyone interested in uplink telemetry data should definitely read this.\n  FAQs. A manifest of known issues with setup and secondary developments processes. Should you encounter any problems, check here first.\n  You might also find these links interesting:\n  The latest and old releases are all available at Apache SkyWalking release page. The change logs can be found here.\n  SkyWalking WIKI hosts the context of some changes and events.\n  You can find the conference schedules, video recordings, and articles about SkyWalking in the community resource catalog.\n  We\u0026rsquo;re always looking for help to improve our documentation and codes, so please don’t hesitate to file an issue if you see any problems. Or better yet, directly contribute by submitting a pull request to help us get better!\n","title":"Welcome","url":"/docs/main/v9.7.0/readme/"},{"content":"Welcome Welcome to the BanyanDB Here you can learn all you need to know about BanyanDB.\n Installation. Instruments about how to download and onboard BanyanDB server, Banyand. Clients. Some native clients to access Banyand. Observability. Learn how to effectively monitor, diagnose and optimize Banyand. Concept. Learn the concepts of Banyand. Includes the architecture, data model, and so on. CRUD Operations. To create, read, update, and delete data points or entities on resources in the schema.  You might also find these links interesting:\n  The latest and old releases are all available at Apache SkyWalking release page. The change logs can be found here.\n  SkyWalking WIKI hosts the context of some changes and events.\n  You can find the conference schedules, video recordings, and articles about SkyWalking in the community resource catalog.\n  We\u0026rsquo;re always looking for help to improve our documentation and codes, so please don’t hesitate to file an issue if you see any problems. Or better yet, directly contribute by submitting a pull request to help us get better!\n","title":"Welcome","url":"/docs/skywalking-banyandb/latest/readme/"},{"content":"Welcome Welcome to the BanyanDB Here you can learn all you need to know about BanyanDB.\n Installation. Instruments about how to download and onboard BanyanDB server, Banyand. Clients. Some native clients to access Banyand. Observability. Learn how to effectively monitor, diagnose and optimize Banyand. Concept. Learn the concepts of Banyand. Includes the architecture, data model, and so on. CRUD Operations. To create, read, update, and delete data points or entities on resources in the schema.  You might also find these links interesting:\n  The latest and old releases are all available at Apache SkyWalking release page. The change logs can be found here.\n  SkyWalking WIKI hosts the context of some changes and events.\n  You can find the conference schedules, video recordings, and articles about SkyWalking in the community resource catalog.\n  We\u0026rsquo;re always looking for help to improve our documentation and codes, so please don’t hesitate to file an issue if you see any problems. Or better yet, directly contribute by submitting a pull request to help us get better!\n","title":"Welcome","url":"/docs/skywalking-banyandb/next/readme/"},{"content":"Welcome Welcome to the BanyanDB Here you can learn all you need to know about BanyanDB.\n Installation. Instruments about how to download and onboard BanyanDB server, Banyand. Clients. Some native clients to access Banyand. Observability. Learn how to effectively monitor, diagnose and optimize Banyand. Concept. Learn the concepts of Banyand. Includes the architecture, data model, and so on. CRUD Operations. To create, read, update, and delete data points or entities on resources in the schema.  You might also find these links interesting:\n  The latest and old releases are all available at Apache SkyWalking release page. The change logs can be found here.\n  SkyWalking WIKI hosts the context of some changes and events.\n  You can find the conference schedules, video recordings, and articles about SkyWalking in the community resource catalog.\n  We\u0026rsquo;re always looking for help to improve our documentation and codes, so please don’t hesitate to file an issue if you see any problems. Or better yet, directly contribute by submitting a pull request to help us get better!\n","title":"Welcome","url":"/docs/skywalking-banyandb/v0.5.0/readme/"},{"content":"Welcome Here are SkyWalking Infra E2E official documentations. Welcome to use it.\nSkyWalking Infra E2E is an End-to-End Testing framework that aims to help developers to set up, debug, and verify E2E tests with ease. It’s built based on the lessons learned from tens of hundreds of test cases in the SkyWalking main repo.\nFrom here you can learn all about SkyWalking Infra E2E\u0026rsquo;s architecture, how to set up E2E testing.\n  Concepts and Designs. The most important core ideas about SkyWalking Infra E2E. You can learn from here if you want to understand what is going on under our cool features.\n  Setup. Introduce how to set up and running E2E testing.\n  Contribution. Introduce how to contribute SkyWalking Infra E2E.\n  We\u0026rsquo;re always looking for help improve our documentation and codes, so please don’t hesitate to file an issue if you see any problem. Or better yet, submit your contributions through the pull request to help make them better.\n","title":"Welcome","url":"/docs/skywalking-infra-e2e/latest/readme/"},{"content":"Welcome Here are SkyWalking Infra E2E official documentations. Welcome to use it.\nSkyWalking Infra E2E is an End-to-End Testing framework that aims to help developers to set up, debug, and verify E2E tests with ease. It’s built based on the lessons learned from tens of hundreds of test cases in the SkyWalking main repo.\nFrom here you can learn all about SkyWalking Infra E2E\u0026rsquo;s architecture, how to set up E2E testing.\n  Concepts and Designs. The most important core ideas about SkyWalking Infra E2E. You can learn from here if you want to understand what is going on under our cool features.\n  Setup. Introduce how to set up and running E2E testing.\n  Contribution. Introduce how to contribute SkyWalking Infra E2E.\n  We\u0026rsquo;re always looking for help improve our documentation and codes, so please don’t hesitate to file an issue if you see any problem. Or better yet, submit your contributions through the pull request to help make them better.\n","title":"Welcome","url":"/docs/skywalking-infra-e2e/next/readme/"},{"content":"Welcome Here are SkyWalking Infra E2E official documentations. Welcome to use it.\nSkyWalking Infra E2E is an End-to-End Testing framework that aims to help developers to set up, debug, and verify E2E tests with ease. It’s built based on the lessons learned from tens of hundreds of test cases in the SkyWalking main repo.\nFrom here you can learn all about SkyWalking Infra E2E\u0026rsquo;s architecture, how to set up E2E testing.\n  Concepts and Designs. The most important core ideas about SkyWalking Infra E2E. You can learn from here if you want to understand what is going on under our cool features.\n  Setup. Introduce how to set up and running E2E testing.\n  Contribution. Introduce how to contribute SkyWalking Infra E2E.\n  We\u0026rsquo;re always looking for help improve our documentation and codes, so please don’t hesitate to file an issue if you see any problem. Or better yet, submit your contributions through the pull request to help make them better.\n","title":"Welcome","url":"/docs/skywalking-infra-e2e/v1.3.0/readme/"},{"content":"Welcome Here are SkyWalking Rover official documentation. You\u0026rsquo;re welcome to join us.\nFrom here you can learn all about SkyWalking Rover\u0026rsquo;s architecture, and how to deploy and use SkyWalking Rover.\n  Concepts and Designs. The most important core ideas about SkyWalking Rover. You can learn from here if you want to understand what is going on under our cool features.\n  Setup. Introduce how to set up the SkyWalking Rover.\n  Guides. Guide users to develop or debug SkyWalking Rover.\n  We\u0026rsquo;re always looking for help to improve our documentation and codes, so please don’t hesitate to file an issue if you see any problem. Or better yet, submit your contributions through a pull request to help make them better.\n","title":"Welcome","url":"/docs/skywalking-rover/latest/readme/"},{"content":"Welcome Here are SkyWalking Rover official documentation. You\u0026rsquo;re welcome to join us.\nFrom here you can learn all about SkyWalking Rover\u0026rsquo;s architecture, and how to deploy and use SkyWalking Rover.\n  Concepts and Designs. The most important core ideas about SkyWalking Rover. You can learn from here if you want to understand what is going on under our cool features.\n  Setup. Introduce how to set up the SkyWalking Rover.\n  Guides. Guide users to develop or debug SkyWalking Rover.\n  We\u0026rsquo;re always looking for help to improve our documentation and codes, so please don’t hesitate to file an issue if you see any problem. Or better yet, submit your contributions through a pull request to help make them better.\n","title":"Welcome","url":"/docs/skywalking-rover/next/readme/"},{"content":"Welcome Here are SkyWalking Rover official documentation. You\u0026rsquo;re welcome to join us.\nFrom here you can learn all about SkyWalking Rover\u0026rsquo;s architecture, and how to deploy and use SkyWalking Rover.\n  Concepts and Designs. The most important core ideas about SkyWalking Rover. You can learn from here if you want to understand what is going on under our cool features.\n  Setup. Introduce how to set up the SkyWalking Rover.\n  Guides. Guide users to develop or debug SkyWalking Rover.\n  We\u0026rsquo;re always looking for help to improve our documentation and codes, so please don’t hesitate to file an issue if you see any problem. Or better yet, submit your contributions through a pull request to help make them better.\n","title":"Welcome","url":"/docs/skywalking-rover/v0.6.0/readme/"},{"content":"Welcome Here are SkyWalking Satellite official documentations. You\u0026rsquo;re welcome to join us.\nFrom here you can learn all about SkyWalking Satellite\u0026rsquo;s architecture, how to deploy and use SkyWalking Satellite.\n  Concepts and Designs. The most important core ideas about SkyWalking Satellite. You can learn from here if you want to understand what is going on under our cool features.\n  Setup. Introduce how to set up the SkyWalking Satellite.\n  Guides. Guide users to develop or debug SkyWalking Satellite.\n  Protocols. Protocols show the communication ways between agents/probes, Satellite and SkyWalking. Anyone interested in uplink telemetry data should definitely read this.\n  Change logs. The feature records of the different versions.\n  FAQs. A manifest of already known setup problems, secondary developments experiments. When you are facing a problem, check here first.\n  We\u0026rsquo;re always looking for help improve our documentation and codes, so please don’t hesitate to file an issue if you see any problem. Or better yet, submit your own contributions through pull request to help make them better.\n","title":"Welcome","url":"/docs/skywalking-satellite/latest/readme/"},{"content":"Welcome Here are SkyWalking Satellite official documentations. You\u0026rsquo;re welcome to join us.\nFrom here you can learn all about SkyWalking Satellite\u0026rsquo;s architecture, how to deploy and use SkyWalking Satellite.\n  Concepts and Designs. The most important core ideas about SkyWalking Satellite. You can learn from here if you want to understand what is going on under our cool features.\n  Setup. Introduce how to set up the SkyWalking Satellite.\n  Guides. Guide users to develop or debug SkyWalking Satellite.\n  Protocols. Protocols show the communication ways between agents/probes, Satellite and SkyWalking. Anyone interested in uplink telemetry data should definitely read this.\n  Change logs. The feature records of the different versions.\n  FAQs. A manifest of already known setup problems, secondary developments experiments. When you are facing a problem, check here first.\n  We\u0026rsquo;re always looking for help improve our documentation and codes, so please don’t hesitate to file an issue if you see any problem. Or better yet, submit your own contributions through pull request to help make them better.\n","title":"Welcome","url":"/docs/skywalking-satellite/next/readme/"},{"content":"Welcome Here are SkyWalking Satellite official documentations. You\u0026rsquo;re welcome to join us.\nFrom here you can learn all about SkyWalking Satellite\u0026rsquo;s architecture, how to deploy and use SkyWalking Satellite.\n  Concepts and Designs. The most important core ideas about SkyWalking Satellite. You can learn from here if you want to understand what is going on under our cool features.\n  Setup. Introduce how to set up the SkyWalking Satellite.\n  Guides. Guide users to develop or debug SkyWalking Satellite.\n  Protocols. Protocols show the communication ways between agents/probes, Satellite and SkyWalking. Anyone interested in uplink telemetry data should definitely read this.\n  Change logs. The feature records of the different versions.\n  FAQs. A manifest of already known setup problems, secondary developments experiments. When you are facing a problem, check here first.\n  We\u0026rsquo;re always looking for help improve our documentation and codes, so please don’t hesitate to file an issue if you see any problem. Or better yet, submit your own contributions through pull request to help make them better.\n","title":"Welcome","url":"/docs/skywalking-satellite/v1.2.0/readme/"},{"content":"What is VNode? On the trace page, you may sometimes find nodes with their spans named VNode, and that there are no attributes for such spans.\nVNode is created by the UI itself, rather than being reported by the agent or tracing SDK. It indicates that some spans are missed in the trace data in this query.\nHow does the UI detect the missing span(s)? The UI checks the parent spans and reference segments of all spans in real time. If no parent id(segment id + span id) could be found, then it creates a VNode automatically.\nHow did this happen? The VNode appears when the trace data is incomplete.\n The agent fail-safe mechanism has been activated. The SkyWalking agent could abandon the trace data if there are any network issues between the agent and the OAP (e.g. failure to connect, slow network speeds, etc.), or if the OAP cluster is not capable of processing all traces. Some plug-ins may have bugs, and some segments in the trace do not stop correctly and are held in the memory.  In such case, the trace would not exist in the query, thus the VNode shows up.\n","title":"What is VNode?","url":"/docs/main/latest/en/faq/vnode/"},{"content":"What is VNode? On the trace page, you may sometimes find nodes with their spans named VNode, and that there are no attributes for such spans.\nVNode is created by the UI itself, rather than being reported by the agent or tracing SDK. It indicates that some spans are missed in the trace data in this query.\nHow does the UI detect the missing span(s)? The UI checks the parent spans and reference segments of all spans in real time. If no parent id(segment id + span id) could be found, then it creates a VNode automatically.\nHow did this happen? The VNode appears when the trace data is incomplete.\n The agent fail-safe mechanism has been activated. The SkyWalking agent could abandon the trace data if there are any network issues between the agent and the OAP (e.g. failure to connect, slow network speeds, etc.), or if the OAP cluster is not capable of processing all traces. Some plug-ins may have bugs, and some segments in the trace do not stop correctly and are held in the memory.  In such case, the trace would not exist in the query, thus the VNode shows up.\n","title":"What is VNode?","url":"/docs/main/next/en/faq/vnode/"},{"content":"What is VNode? On the trace page, you may sometimes find nodes with their spans named VNode, and that there are no attributes for such spans.\nVNode is created by the UI itself, rather than being reported by the agent or tracing SDK. It indicates that some spans are missed in the trace data in this query.\nHow does the UI detect the missing span(s)? The UI checks the parent spans and reference segments of all spans in real time. If no parent id(segment id + span id) could be found, then it creates a VNode automatically.\nHow did this happen? The VNode appears when the trace data is incomplete.\n The agent fail-safe mechanism has been activated. The SkyWalking agent could abandon the trace data if there are any network issues between the agent and the OAP (e.g. failure to connect, slow network speeds, etc.), or if the OAP cluster is not capable of processing all traces. Some plug-ins may have bugs, and some segments in the trace do not stop correctly and are held in the memory.  In such case, the trace would not exist in the query, thus the VNode shows up.\n","title":"What is VNode?","url":"/docs/main/v9.0.0/en/faq/vnode/"},{"content":"What is VNode? On the trace page, you may sometimes find nodes with their spans named VNode, and that there are no attributes for such spans.\nVNode is created by the UI itself, rather than being reported by the agent or tracing SDK. It indicates that some spans are missed in the trace data in this query.\nHow does the UI detect the missing span(s)? The UI checks the parent spans and reference segments of all spans in real time. If no parent id(segment id + span id) could be found, then it creates a VNode automatically.\nHow did this happen? The VNode appears when the trace data is incomplete.\n The agent fail-safe mechanism has been activated. The SkyWalking agent could abandon the trace data if there are any network issues between the agent and the OAP (e.g. failure to connect, slow network speeds, etc.), or if the OAP cluster is not capable of processing all traces. Some plug-ins may have bugs, and some segments in the trace do not stop correctly and are held in the memory.  In such case, the trace would not exist in the query, thus the VNode shows up.\n","title":"What is VNode?","url":"/docs/main/v9.1.0/en/faq/vnode/"},{"content":"What is VNode? On the trace page, you may sometimes find nodes with their spans named VNode, and that there are no attributes for such spans.\nVNode is created by the UI itself, rather than being reported by the agent or tracing SDK. It indicates that some spans are missed in the trace data in this query.\nHow does the UI detect the missing span(s)? The UI checks the parent spans and reference segments of all spans in real time. If no parent id(segment id + span id) could be found, then it creates a VNode automatically.\nHow did this happen? The VNode appears when the trace data is incomplete.\n The agent fail-safe mechanism has been activated. The SkyWalking agent could abandon the trace data if there are any network issues between the agent and the OAP (e.g. failure to connect, slow network speeds, etc.), or if the OAP cluster is not capable of processing all traces. Some plug-ins may have bugs, and some segments in the trace do not stop correctly and are held in the memory.  In such case, the trace would not exist in the query, thus the VNode shows up.\n","title":"What is VNode?","url":"/docs/main/v9.2.0/en/faq/vnode/"},{"content":"What is VNode? On the trace page, you may sometimes find nodes with their spans named VNode, and that there are no attributes for such spans.\nVNode is created by the UI itself, rather than being reported by the agent or tracing SDK. It indicates that some spans are missed in the trace data in this query.\nHow does the UI detect the missing span(s)? The UI checks the parent spans and reference segments of all spans in real time. If no parent id(segment id + span id) could be found, then it creates a VNode automatically.\nHow did this happen? The VNode appears when the trace data is incomplete.\n The agent fail-safe mechanism has been activated. The SkyWalking agent could abandon the trace data if there are any network issues between the agent and the OAP (e.g. failure to connect, slow network speeds, etc.), or if the OAP cluster is not capable of processing all traces. Some plug-ins may have bugs, and some segments in the trace do not stop correctly and are held in the memory.  In such case, the trace would not exist in the query, thus the VNode shows up.\n","title":"What is VNode?","url":"/docs/main/v9.3.0/en/faq/vnode/"},{"content":"What is VNode? On the trace page, you may sometimes find nodes with their spans named VNode, and that there are no attributes for such spans.\nVNode is created by the UI itself, rather than being reported by the agent or tracing SDK. It indicates that some spans are missed in the trace data in this query.\nHow does the UI detect the missing span(s)? The UI checks the parent spans and reference segments of all spans in real time. If no parent id(segment id + span id) could be found, then it creates a VNode automatically.\nHow did this happen? The VNode appears when the trace data is incomplete.\n The agent fail-safe mechanism has been activated. The SkyWalking agent could abandon the trace data if there are any network issues between the agent and the OAP (e.g. failure to connect, slow network speeds, etc.), or if the OAP cluster is not capable of processing all traces. Some plug-ins may have bugs, and some segments in the trace do not stop correctly and are held in the memory.  In such case, the trace would not exist in the query, thus the VNode shows up.\n","title":"What is VNode?","url":"/docs/main/v9.4.0/en/faq/vnode/"},{"content":"What is VNode? On the trace page, you may sometimes find nodes with their spans named VNode, and that there are no attributes for such spans.\nVNode is created by the UI itself, rather than being reported by the agent or tracing SDK. It indicates that some spans are missed in the trace data in this query.\nHow does the UI detect the missing span(s)? The UI checks the parent spans and reference segments of all spans in real time. If no parent id(segment id + span id) could be found, then it creates a VNode automatically.\nHow did this happen? The VNode appears when the trace data is incomplete.\n The agent fail-safe mechanism has been activated. The SkyWalking agent could abandon the trace data if there are any network issues between the agent and the OAP (e.g. failure to connect, slow network speeds, etc.), or if the OAP cluster is not capable of processing all traces. Some plug-ins may have bugs, and some segments in the trace do not stop correctly and are held in the memory.  In such case, the trace would not exist in the query, thus the VNode shows up.\n","title":"What is VNode?","url":"/docs/main/v9.5.0/en/faq/vnode/"},{"content":"What is VNode? On the trace page, you may sometimes find nodes with their spans named VNode, and that there are no attributes for such spans.\nVNode is created by the UI itself, rather than being reported by the agent or tracing SDK. It indicates that some spans are missed in the trace data in this query.\nHow does the UI detect the missing span(s)? The UI checks the parent spans and reference segments of all spans in real time. If no parent id(segment id + span id) could be found, then it creates a VNode automatically.\nHow did this happen? The VNode appears when the trace data is incomplete.\n The agent fail-safe mechanism has been activated. The SkyWalking agent could abandon the trace data if there are any network issues between the agent and the OAP (e.g. failure to connect, slow network speeds, etc.), or if the OAP cluster is not capable of processing all traces. Some plug-ins may have bugs, and some segments in the trace do not stop correctly and are held in the memory.  In such case, the trace would not exist in the query, thus the VNode shows up.\n","title":"What is VNode?","url":"/docs/main/v9.6.0/en/faq/vnode/"},{"content":"What is VNode? On the trace page, you may sometimes find nodes with their spans named VNode, and that there are no attributes for such spans.\nVNode is created by the UI itself, rather than being reported by the agent or tracing SDK. It indicates that some spans are missed in the trace data in this query.\nHow does the UI detect the missing span(s)? The UI checks the parent spans and reference segments of all spans in real time. If no parent id(segment id + span id) could be found, then it creates a VNode automatically.\nHow did this happen? The VNode appears when the trace data is incomplete.\n The agent fail-safe mechanism has been activated. The SkyWalking agent could abandon the trace data if there are any network issues between the agent and the OAP (e.g. failure to connect, slow network speeds, etc.), or if the OAP cluster is not capable of processing all traces. Some plug-ins may have bugs, and some segments in the trace do not stop correctly and are held in the memory.  In such case, the trace would not exist in the query, thus the VNode shows up.\n","title":"What is VNode?","url":"/docs/main/v9.7.0/en/faq/vnode/"},{"content":"Why can\u0026rsquo;t I see any data in the UI? There are three main reasons no data can be shown by the UI:\n No traces have been sent to the collector. Traces have been sent, but the timezone of your containers is incorrect. Traces are in the collector, but you\u0026rsquo;re not watching the correct timeframe in the UI.  No traces Be sure to check the logs of your agents to see if they are connected to the collector and traces are being sent.\nIncorrect timezone in containers Be sure to check the time in your containers.\nThe UI isn\u0026rsquo;t showing any data Be sure to configure the timeframe shown by the UI.\n","title":"Why can't I see any data in the UI?","url":"/docs/main/latest/en/faq/time-and-timezone/"},{"content":"Why can\u0026rsquo;t I see any data in the UI? There are three main reasons no data can be shown by the UI:\n No traces have been sent to the collector. Traces have been sent, but the timezone of your containers is incorrect. Traces are in the collector, but you\u0026rsquo;re not watching the correct timeframe in the UI.  No traces Be sure to check the logs of your agents to see if they are connected to the collector and traces are being sent.\nIncorrect timezone in containers Be sure to check the time in your containers.\nThe UI isn\u0026rsquo;t showing any data Be sure to configure the timeframe shown by the UI.\n","title":"Why can't I see any data in the UI?","url":"/docs/main/next/en/faq/time-and-timezone/"},{"content":"Why can\u0026rsquo;t I see any data in the UI? There are three main reasons no data can be shown by the UI:\n No traces have been sent to the collector. Traces have been sent, but the timezone of your containers is incorrect. Traces are in the collector, but you\u0026rsquo;re not watching the correct timeframe in the UI.  No traces Be sure to check the logs of your agents to see if they are connected to the collector and traces are being sent.\nIncorrect timezone in containers Be sure to check the time in your containers.\nThe UI isn\u0026rsquo;t showing any data Be sure to configure the timeframe shown by the UI.\n","title":"Why can't I see any data in the UI?","url":"/docs/main/v9.0.0/en/faq/time-and-timezone/"},{"content":"Why can\u0026rsquo;t I see any data in the UI? There are three main reasons no data can be shown by the UI:\n No traces have been sent to the collector. Traces have been sent, but the timezone of your containers is incorrect. Traces are in the collector, but you\u0026rsquo;re not watching the correct timeframe in the UI.  No traces Be sure to check the logs of your agents to see if they are connected to the collector and traces are being sent.\nIncorrect timezone in containers Be sure to check the time in your containers.\nThe UI isn\u0026rsquo;t showing any data Be sure to configure the timeframe shown by the UI.\n","title":"Why can't I see any data in the UI?","url":"/docs/main/v9.1.0/en/faq/time-and-timezone/"},{"content":"Why can\u0026rsquo;t I see any data in the UI? There are three main reasons no data can be shown by the UI:\n No traces have been sent to the collector. Traces have been sent, but the timezone of your containers is incorrect. Traces are in the collector, but you\u0026rsquo;re not watching the correct timeframe in the UI.  No traces Be sure to check the logs of your agents to see if they are connected to the collector and traces are being sent.\nIncorrect timezone in containers Be sure to check the time in your containers.\nThe UI isn\u0026rsquo;t showing any data Be sure to configure the timeframe shown by the UI.\n","title":"Why can't I see any data in the UI?","url":"/docs/main/v9.2.0/en/faq/time-and-timezone/"},{"content":"Why can\u0026rsquo;t I see any data in the UI? There are three main reasons no data can be shown by the UI:\n No traces have been sent to the collector. Traces have been sent, but the timezone of your containers is incorrect. Traces are in the collector, but you\u0026rsquo;re not watching the correct timeframe in the UI.  No traces Be sure to check the logs of your agents to see if they are connected to the collector and traces are being sent.\nIncorrect timezone in containers Be sure to check the time in your containers.\nThe UI isn\u0026rsquo;t showing any data Be sure to configure the timeframe shown by the UI.\n","title":"Why can't I see any data in the UI?","url":"/docs/main/v9.3.0/en/faq/time-and-timezone/"},{"content":"Why can\u0026rsquo;t I see any data in the UI? There are three main reasons no data can be shown by the UI:\n No traces have been sent to the collector. Traces have been sent, but the timezone of your containers is incorrect. Traces are in the collector, but you\u0026rsquo;re not watching the correct timeframe in the UI.  No traces Be sure to check the logs of your agents to see if they are connected to the collector and traces are being sent.\nIncorrect timezone in containers Be sure to check the time in your containers.\nThe UI isn\u0026rsquo;t showing any data Be sure to configure the timeframe shown by the UI.\n","title":"Why can't I see any data in the UI?","url":"/docs/main/v9.4.0/en/faq/time-and-timezone/"},{"content":"Why can\u0026rsquo;t I see any data in the UI? There are three main reasons no data can be shown by the UI:\n No traces have been sent to the collector. Traces have been sent, but the timezone of your containers is incorrect. Traces are in the collector, but you\u0026rsquo;re not watching the correct timeframe in the UI.  No traces Be sure to check the logs of your agents to see if they are connected to the collector and traces are being sent.\nIncorrect timezone in containers Be sure to check the time in your containers.\nThe UI isn\u0026rsquo;t showing any data Be sure to configure the timeframe shown by the UI.\n","title":"Why can't I see any data in the UI?","url":"/docs/main/v9.5.0/en/faq/time-and-timezone/"},{"content":"Why can\u0026rsquo;t I see any data in the UI? There are three main reasons no data can be shown by the UI:\n No traces have been sent to the collector. Traces have been sent, but the timezone of your containers is incorrect. Traces are in the collector, but you\u0026rsquo;re not watching the correct timeframe in the UI.  No traces Be sure to check the logs of your agents to see if they are connected to the collector and traces are being sent.\nIncorrect timezone in containers Be sure to check the time in your containers.\nThe UI isn\u0026rsquo;t showing any data Be sure to configure the timeframe shown by the UI.\n","title":"Why can't I see any data in the UI?","url":"/docs/main/v9.6.0/en/faq/time-and-timezone/"},{"content":"Why can\u0026rsquo;t I see any data in the UI? There are three main reasons no data can be shown by the UI:\n No traces have been sent to the collector. Traces have been sent, but the timezone of your containers is incorrect. Traces are in the collector, but you\u0026rsquo;re not watching the correct timeframe in the UI.  No traces Be sure to check the logs of your agents to see if they are connected to the collector and traces are being sent.\nIncorrect timezone in containers Be sure to check the time in your containers.\nThe UI isn\u0026rsquo;t showing any data Be sure to configure the timeframe shown by the UI.\n","title":"Why can't I see any data in the UI?","url":"/docs/main/v9.7.0/en/faq/time-and-timezone/"},{"content":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? This issue is to be expected with an upgrade from 6.x to 7.x. See the Downsampling Data Packing feature of the ElasticSearch storage.\nYou may simply delete all expired *-day_xxxxx and *-hour_xxxxx(xxxxx is a timestamp) indexes. Currently, SkyWalking uses the metrics name-xxxxx and metrics name-month_xxxxx indexes only.\n","title":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x?","url":"/docs/main/latest/en/faq/hour-day-metrics-stopping/"},{"content":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? This issue is to be expected with an upgrade from 6.x to 7.x. See the Downsampling Data Packing feature of the ElasticSearch storage.\nYou may simply delete all expired *-day_xxxxx and *-hour_xxxxx(xxxxx is a timestamp) indexes. Currently, SkyWalking uses the metrics name-xxxxx and metrics name-month_xxxxx indexes only.\n","title":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x?","url":"/docs/main/next/en/faq/hour-day-metrics-stopping/"},{"content":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? This issue is to be expected with an upgrade from 6.x to 7.x. See the Downsampling Data Packing feature of the ElasticSearch storage.\nYou may simply delete all expired *-day_xxxxx and *-hour_xxxxx(xxxxx is a timestamp) indexes. Currently, SkyWalking uses the metrics name-xxxxx and metrics name-month_xxxxx indexes only.\n","title":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x?","url":"/docs/main/v9.0.0/en/faq/hour-day-metrics-stopping/"},{"content":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? This issue is to be expected with an upgrade from 6.x to 7.x. See the Downsampling Data Packing feature of the ElasticSearch storage.\nYou may simply delete all expired *-day_xxxxx and *-hour_xxxxx(xxxxx is a timestamp) indexes. Currently, SkyWalking uses the metrics name-xxxxx and metrics name-month_xxxxx indexes only.\n","title":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x?","url":"/docs/main/v9.1.0/en/faq/hour-day-metrics-stopping/"},{"content":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? This issue is to be expected with an upgrade from 6.x to 7.x. See the Downsampling Data Packing feature of the ElasticSearch storage.\nYou may simply delete all expired *-day_xxxxx and *-hour_xxxxx(xxxxx is a timestamp) indexes. Currently, SkyWalking uses the metrics name-xxxxx and metrics name-month_xxxxx indexes only.\n","title":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x?","url":"/docs/main/v9.2.0/en/faq/hour-day-metrics-stopping/"},{"content":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? This issue is to be expected with an upgrade from 6.x to 7.x. See the Downsampling Data Packing feature of the ElasticSearch storage.\nYou may simply delete all expired *-day_xxxxx and *-hour_xxxxx(xxxxx is a timestamp) indexes. Currently, SkyWalking uses the metrics name-xxxxx and metrics name-month_xxxxx indexes only.\n","title":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x?","url":"/docs/main/v9.3.0/en/faq/hour-day-metrics-stopping/"},{"content":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? This issue is to be expected with an upgrade from 6.x to 7.x. See the Downsampling Data Packing feature of the ElasticSearch storage.\nYou may simply delete all expired *-day_xxxxx and *-hour_xxxxx(xxxxx is a timestamp) indexes. Currently, SkyWalking uses the metrics name-xxxxx and metrics name-month_xxxxx indexes only.\n","title":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x?","url":"/docs/main/v9.4.0/en/faq/hour-day-metrics-stopping/"},{"content":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? This issue is to be expected with an upgrade from 6.x to 7.x. See the Downsampling Data Packing feature of the ElasticSearch storage.\nYou may simply delete all expired *-day_xxxxx and *-hour_xxxxx(xxxxx is a timestamp) indexes. Currently, SkyWalking uses the metrics name-xxxxx and metrics name-month_xxxxx indexes only.\n","title":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x?","url":"/docs/main/v9.5.0/en/faq/hour-day-metrics-stopping/"},{"content":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? This issue is to be expected with an upgrade from 6.x to 7.x. See the Downsampling Data Packing feature of the ElasticSearch storage.\nYou may simply delete all expired *-day_xxxxx and *-hour_xxxxx(xxxxx is a timestamp) indexes. Currently, SkyWalking uses the metrics name-xxxxx and metrics name-month_xxxxx indexes only.\n","title":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x?","url":"/docs/main/v9.6.0/en/faq/hour-day-metrics-stopping/"},{"content":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? This issue is to be expected with an upgrade from 6.x to 7.x. See the Downsampling Data Packing feature of the ElasticSearch storage.\nYou may simply delete all expired *-day_xxxxx and *-hour_xxxxx(xxxxx is a timestamp) indexes. Currently, SkyWalking uses the metrics name-xxxxx and metrics name-month_xxxxx indexes only.\n","title":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x?","url":"/docs/main/v9.7.0/en/faq/hour-day-metrics-stopping/"},{"content":"Why does SkyWalking use RPC(gRPC and RESTful) rather than MQ as transport layer by default? This is often asked by those who are first introduced to SkyWalking. Many believe that MQ should have better performance and should be able to support higher throughput, like the following:\nHere\u0026rsquo;s what we think.\nIs MQ appropriate for communicating with the OAP backend? This question arises when users consider the circumstances where the OAP cluster may not be powerful enough or becomes offline. But the following issues must first be addressed:\n Why do you think that the OAP is not powerful enough? Were it not powerful, the speed of data analysis wouldn\u0026rsquo;t have caught up with the producers (or agents). Then what is the point of adding new deployment requirements? Some may argue that the payload is sometimes higher than usual during peak times. But we must consider how much higher the payload really is. If it is higher by less than 40%, how many resources would you use for the new MQ cluster? How about moving them to new OAP and ES nodes? Say it is higher by 40% or more, such as by 70% to 200%. Then, it is likely that your MQ would use up more resources than it saves. Your MQ would support 2 to 3 times the payload using 10%-20% of the cost during usual times. Furthermore, in this case, if the payload/throughput are so high, how long would it take for the OAP cluster to catch up? The challenge here is that well before it catches up, the next peak times would have come.  With the analysis above in mind, why would you still want the traces to be 100%, given the resources they would cost? The preferred way to do this would be adding a better dynamic trace sampling mechanism at the backend. When throughput exceeds the threshold, gradually modify the active sampling rate from 100% to 10%, which means you could get the OAP and ES 3 times more powerful than usual, while ignoring the traces at peak times.\nIs MQ transport recommended despite its side effects? Even though MQ transport is not recommended from the production perspective, SkyWalking still provides optional plugins named kafka-reporter and kafka-fetcher for this feature since 8.1.0.\nHow about MQ metrics data exporter? Log and trace exporters are using MQ as transport channel. And metrics exporter uses gRPC, as considering the scale.\n","title":"Why does SkyWalking use RPC(gRPC and RESTful) rather than MQ as transport layer by default?","url":"/docs/main/next/en/faq/why_mq_not_involved/"},{"content":"Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture? This is often asked by those who are first introduced to SkyWalking. Many believe that MQ should have better performance and should be able to support higher throughput, like the following:\nHere\u0026rsquo;s what we think.\nIs MQ appropriate for communicating with the OAP backend? This question arises when users consider the circumstances where the OAP cluster may not be powerful enough or becomes offline. But the following issues must first be addressed:\n Why do you think that the OAP is not powerful enough? Were it not powerful, the speed of data analysis wouldn\u0026rsquo;t have caught up with the producers (or agents). Then what is the point of adding new deployment requirements? Some may argue that the payload is sometimes higher than usual during peak times. But we must consider how much higher the payload really is. If it is higher by less than 40%, how many resources would you use for the new MQ cluster? How about moving them to new OAP and ES nodes? Say it is higher by 40% or more, such as by 70% to 200%. Then, it is likely that your MQ would use up more resources than it saves. Your MQ would support 2 to 3 times the payload using 10%-20% of the cost during usual times. Furthermore, in this case, if the payload/throughput are so high, how long would it take for the OAP cluster to catch up? The challenge here is that well before it catches up, the next peak times would have come.  With the analysis above in mind, why would you still want the traces to be 100%, given the resources they would cost? The preferred way to do this would be adding a better dynamic trace sampling mechanism at the backend. When throughput exceeds the threshold, gradually modify the active sampling rate from 100% to 10%, which means you could get the OAP and ES 3 times more powerful than usual, while ignoring the traces at peak times.\nIs MQ transport recommended despite its side effects? Even though MQ transport is not recommended from the production perspective, SkyWalking still provides optional plugins named kafka-reporter and kafka-fetcher for this feature since 8.1.0.\nHow about MQ metrics data exporter? The answer is that the MQ metrics data exporter is already readily available. The exporter module with gRPC default mechanism is there, and you can easily provide a new implementor of this module.\n","title":"Why doesn't SkyWalking involve MQ in its architecture?","url":"/docs/main/latest/en/faq/why_mq_not_involved/"},{"content":"Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture? This is often asked by those who are first introduced to SkyWalking. Many believe that MQ should have better performance and should be able to support higher throughput, like the following:\nHere\u0026rsquo;s what we think.\nIs MQ appropriate for communicating with the OAP backend? This question arises when users consider the circumstances where the OAP cluster may not be powerful enough or becomes offline. But the following issues must first be addressed:\n Why do you think that the OAP is not powerful enough? Were it not powerful, the speed of data analysis wouldn\u0026rsquo;t have caught up with the producers (or agents). Then what is the point of adding new deployment requirements? Some may argue that the payload is sometimes higher than usual during peak times. But we must consider how much higher the payload really is. If it is higher by less than 40%, how many resources would you use for the new MQ cluster? How about moving them to new OAP and ES nodes? Say it is higher by 40% or more, such as by 70% to 200%. Then, it is likely that your MQ would use up more resources than it saves. Your MQ would support 2 to 3 times the payload using 10%-20% of the cost during usual times. Furthermore, in this case, if the payload/throughput are so high, how long would it take for the OAP cluster to catch up? The challenge here is that well before it catches up, the next peak times would have come.  With the analysis above in mind, why would you still want the traces to be 100%, given the resources they would cost? The preferred way to do this would be adding a better dynamic trace sampling mechanism at the backend. When throughput exceeds the threshold, gradually modify the active sampling rate from 100% to 10%, which means you could get the OAP and ES 3 times more powerful than usual, while ignoring the traces at peak times.\nIs MQ transport recommended despite its side effects? Even though MQ transport is not recommended from the production perspective, SkyWalking still provides optional plugins named kafka-reporter and kafka-fetcher for this feature since 8.1.0.\nHow about MQ metrics data exporter? The answer is that the MQ metrics data exporter is already readily available. The exporter module with gRPC default mechanism is there, and you can easily provide a new implementor of this module.\n","title":"Why doesn't SkyWalking involve MQ in its architecture?","url":"/docs/main/v9.0.0/en/faq/why_mq_not_involved/"},{"content":"Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture? This is often asked by those who are first introduced to SkyWalking. Many believe that MQ should have better performance and should be able to support higher throughput, like the following:\nHere\u0026rsquo;s what we think.\nIs MQ appropriate for communicating with the OAP backend? This question arises when users consider the circumstances where the OAP cluster may not be powerful enough or becomes offline. But the following issues must first be addressed:\n Why do you think that the OAP is not powerful enough? Were it not powerful, the speed of data analysis wouldn\u0026rsquo;t have caught up with the producers (or agents). Then what is the point of adding new deployment requirements? Some may argue that the payload is sometimes higher than usual during peak times. But we must consider how much higher the payload really is. If it is higher by less than 40%, how many resources would you use for the new MQ cluster? How about moving them to new OAP and ES nodes? Say it is higher by 40% or more, such as by 70% to 200%. Then, it is likely that your MQ would use up more resources than it saves. Your MQ would support 2 to 3 times the payload using 10%-20% of the cost during usual times. Furthermore, in this case, if the payload/throughput are so high, how long would it take for the OAP cluster to catch up? The challenge here is that well before it catches up, the next peak times would have come.  With the analysis above in mind, why would you still want the traces to be 100%, given the resources they would cost? The preferred way to do this would be adding a better dynamic trace sampling mechanism at the backend. When throughput exceeds the threshold, gradually modify the active sampling rate from 100% to 10%, which means you could get the OAP and ES 3 times more powerful than usual, while ignoring the traces at peak times.\nIs MQ transport recommended despite its side effects? Even though MQ transport is not recommended from the production perspective, SkyWalking still provides optional plugins named kafka-reporter and kafka-fetcher for this feature since 8.1.0.\nHow about MQ metrics data exporter? The answer is that the MQ metrics data exporter is already readily available. The exporter module with gRPC default mechanism is there, and you can easily provide a new implementor of this module.\n","title":"Why doesn't SkyWalking involve MQ in its architecture?","url":"/docs/main/v9.1.0/en/faq/why_mq_not_involved/"},{"content":"Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture? This is often asked by those who are first introduced to SkyWalking. Many believe that MQ should have better performance and should be able to support higher throughput, like the following:\nHere\u0026rsquo;s what we think.\nIs MQ appropriate for communicating with the OAP backend? This question arises when users consider the circumstances where the OAP cluster may not be powerful enough or becomes offline. But the following issues must first be addressed:\n Why do you think that the OAP is not powerful enough? Were it not powerful, the speed of data analysis wouldn\u0026rsquo;t have caught up with the producers (or agents). Then what is the point of adding new deployment requirements? Some may argue that the payload is sometimes higher than usual during peak times. But we must consider how much higher the payload really is. If it is higher by less than 40%, how many resources would you use for the new MQ cluster? How about moving them to new OAP and ES nodes? Say it is higher by 40% or more, such as by 70% to 200%. Then, it is likely that your MQ would use up more resources than it saves. Your MQ would support 2 to 3 times the payload using 10%-20% of the cost during usual times. Furthermore, in this case, if the payload/throughput are so high, how long would it take for the OAP cluster to catch up? The challenge here is that well before it catches up, the next peak times would have come.  With the analysis above in mind, why would you still want the traces to be 100%, given the resources they would cost? The preferred way to do this would be adding a better dynamic trace sampling mechanism at the backend. When throughput exceeds the threshold, gradually modify the active sampling rate from 100% to 10%, which means you could get the OAP and ES 3 times more powerful than usual, while ignoring the traces at peak times.\nIs MQ transport recommended despite its side effects? Even though MQ transport is not recommended from the production perspective, SkyWalking still provides optional plugins named kafka-reporter and kafka-fetcher for this feature since 8.1.0.\nHow about MQ metrics data exporter? The answer is that the MQ metrics data exporter is already readily available. The exporter module with gRPC default mechanism is there, and you can easily provide a new implementor of this module.\n","title":"Why doesn't SkyWalking involve MQ in its architecture?","url":"/docs/main/v9.2.0/en/faq/why_mq_not_involved/"},{"content":"Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture? This is often asked by those who are first introduced to SkyWalking. Many believe that MQ should have better performance and should be able to support higher throughput, like the following:\nHere\u0026rsquo;s what we think.\nIs MQ appropriate for communicating with the OAP backend? This question arises when users consider the circumstances where the OAP cluster may not be powerful enough or becomes offline. But the following issues must first be addressed:\n Why do you think that the OAP is not powerful enough? Were it not powerful, the speed of data analysis wouldn\u0026rsquo;t have caught up with the producers (or agents). Then what is the point of adding new deployment requirements? Some may argue that the payload is sometimes higher than usual during peak times. But we must consider how much higher the payload really is. If it is higher by less than 40%, how many resources would you use for the new MQ cluster? How about moving them to new OAP and ES nodes? Say it is higher by 40% or more, such as by 70% to 200%. Then, it is likely that your MQ would use up more resources than it saves. Your MQ would support 2 to 3 times the payload using 10%-20% of the cost during usual times. Furthermore, in this case, if the payload/throughput are so high, how long would it take for the OAP cluster to catch up? The challenge here is that well before it catches up, the next peak times would have come.  With the analysis above in mind, why would you still want the traces to be 100%, given the resources they would cost? The preferred way to do this would be adding a better dynamic trace sampling mechanism at the backend. When throughput exceeds the threshold, gradually modify the active sampling rate from 100% to 10%, which means you could get the OAP and ES 3 times more powerful than usual, while ignoring the traces at peak times.\nIs MQ transport recommended despite its side effects? Even though MQ transport is not recommended from the production perspective, SkyWalking still provides optional plugins named kafka-reporter and kafka-fetcher for this feature since 8.1.0.\nHow about MQ metrics data exporter? The answer is that the MQ metrics data exporter is already readily available. The exporter module with gRPC default mechanism is there, and you can easily provide a new implementor of this module.\n","title":"Why doesn't SkyWalking involve MQ in its architecture?","url":"/docs/main/v9.3.0/en/faq/why_mq_not_involved/"},{"content":"Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture? This is often asked by those who are first introduced to SkyWalking. Many believe that MQ should have better performance and should be able to support higher throughput, like the following:\nHere\u0026rsquo;s what we think.\nIs MQ appropriate for communicating with the OAP backend? This question arises when users consider the circumstances where the OAP cluster may not be powerful enough or becomes offline. But the following issues must first be addressed:\n Why do you think that the OAP is not powerful enough? Were it not powerful, the speed of data analysis wouldn\u0026rsquo;t have caught up with the producers (or agents). Then what is the point of adding new deployment requirements? Some may argue that the payload is sometimes higher than usual during peak times. But we must consider how much higher the payload really is. If it is higher by less than 40%, how many resources would you use for the new MQ cluster? How about moving them to new OAP and ES nodes? Say it is higher by 40% or more, such as by 70% to 200%. Then, it is likely that your MQ would use up more resources than it saves. Your MQ would support 2 to 3 times the payload using 10%-20% of the cost during usual times. Furthermore, in this case, if the payload/throughput are so high, how long would it take for the OAP cluster to catch up? The challenge here is that well before it catches up, the next peak times would have come.  With the analysis above in mind, why would you still want the traces to be 100%, given the resources they would cost? The preferred way to do this would be adding a better dynamic trace sampling mechanism at the backend. When throughput exceeds the threshold, gradually modify the active sampling rate from 100% to 10%, which means you could get the OAP and ES 3 times more powerful than usual, while ignoring the traces at peak times.\nIs MQ transport recommended despite its side effects? Even though MQ transport is not recommended from the production perspective, SkyWalking still provides optional plugins named kafka-reporter and kafka-fetcher for this feature since 8.1.0.\nHow about MQ metrics data exporter? The answer is that the MQ metrics data exporter is already readily available. The exporter module with gRPC default mechanism is there, and you can easily provide a new implementor of this module.\n","title":"Why doesn't SkyWalking involve MQ in its architecture?","url":"/docs/main/v9.4.0/en/faq/why_mq_not_involved/"},{"content":"Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture? This is often asked by those who are first introduced to SkyWalking. Many believe that MQ should have better performance and should be able to support higher throughput, like the following:\nHere\u0026rsquo;s what we think.\nIs MQ appropriate for communicating with the OAP backend? This question arises when users consider the circumstances where the OAP cluster may not be powerful enough or becomes offline. But the following issues must first be addressed:\n Why do you think that the OAP is not powerful enough? Were it not powerful, the speed of data analysis wouldn\u0026rsquo;t have caught up with the producers (or agents). Then what is the point of adding new deployment requirements? Some may argue that the payload is sometimes higher than usual during peak times. But we must consider how much higher the payload really is. If it is higher by less than 40%, how many resources would you use for the new MQ cluster? How about moving them to new OAP and ES nodes? Say it is higher by 40% or more, such as by 70% to 200%. Then, it is likely that your MQ would use up more resources than it saves. Your MQ would support 2 to 3 times the payload using 10%-20% of the cost during usual times. Furthermore, in this case, if the payload/throughput are so high, how long would it take for the OAP cluster to catch up? The challenge here is that well before it catches up, the next peak times would have come.  With the analysis above in mind, why would you still want the traces to be 100%, given the resources they would cost? The preferred way to do this would be adding a better dynamic trace sampling mechanism at the backend. When throughput exceeds the threshold, gradually modify the active sampling rate from 100% to 10%, which means you could get the OAP and ES 3 times more powerful than usual, while ignoring the traces at peak times.\nIs MQ transport recommended despite its side effects? Even though MQ transport is not recommended from the production perspective, SkyWalking still provides optional plugins named kafka-reporter and kafka-fetcher for this feature since 8.1.0.\nHow about MQ metrics data exporter? The answer is that the MQ metrics data exporter is already readily available. The exporter module with gRPC default mechanism is there, and you can easily provide a new implementor of this module.\n","title":"Why doesn't SkyWalking involve MQ in its architecture?","url":"/docs/main/v9.5.0/en/faq/why_mq_not_involved/"},{"content":"Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture? This is often asked by those who are first introduced to SkyWalking. Many believe that MQ should have better performance and should be able to support higher throughput, like the following:\nHere\u0026rsquo;s what we think.\nIs MQ appropriate for communicating with the OAP backend? This question arises when users consider the circumstances where the OAP cluster may not be powerful enough or becomes offline. But the following issues must first be addressed:\n Why do you think that the OAP is not powerful enough? Were it not powerful, the speed of data analysis wouldn\u0026rsquo;t have caught up with the producers (or agents). Then what is the point of adding new deployment requirements? Some may argue that the payload is sometimes higher than usual during peak times. But we must consider how much higher the payload really is. If it is higher by less than 40%, how many resources would you use for the new MQ cluster? How about moving them to new OAP and ES nodes? Say it is higher by 40% or more, such as by 70% to 200%. Then, it is likely that your MQ would use up more resources than it saves. Your MQ would support 2 to 3 times the payload using 10%-20% of the cost during usual times. Furthermore, in this case, if the payload/throughput are so high, how long would it take for the OAP cluster to catch up? The challenge here is that well before it catches up, the next peak times would have come.  With the analysis above in mind, why would you still want the traces to be 100%, given the resources they would cost? The preferred way to do this would be adding a better dynamic trace sampling mechanism at the backend. When throughput exceeds the threshold, gradually modify the active sampling rate from 100% to 10%, which means you could get the OAP and ES 3 times more powerful than usual, while ignoring the traces at peak times.\nIs MQ transport recommended despite its side effects? Even though MQ transport is not recommended from the production perspective, SkyWalking still provides optional plugins named kafka-reporter and kafka-fetcher for this feature since 8.1.0.\nHow about MQ metrics data exporter? The answer is that the MQ metrics data exporter is already readily available. The exporter module with gRPC default mechanism is there, and you can easily provide a new implementor of this module.\n","title":"Why doesn't SkyWalking involve MQ in its architecture?","url":"/docs/main/v9.6.0/en/faq/why_mq_not_involved/"},{"content":"Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture? This is often asked by those who are first introduced to SkyWalking. Many believe that MQ should have better performance and should be able to support higher throughput, like the following:\nHere\u0026rsquo;s what we think.\nIs MQ appropriate for communicating with the OAP backend? This question arises when users consider the circumstances where the OAP cluster may not be powerful enough or becomes offline. But the following issues must first be addressed:\n Why do you think that the OAP is not powerful enough? Were it not powerful, the speed of data analysis wouldn\u0026rsquo;t have caught up with the producers (or agents). Then what is the point of adding new deployment requirements? Some may argue that the payload is sometimes higher than usual during peak times. But we must consider how much higher the payload really is. If it is higher by less than 40%, how many resources would you use for the new MQ cluster? How about moving them to new OAP and ES nodes? Say it is higher by 40% or more, such as by 70% to 200%. Then, it is likely that your MQ would use up more resources than it saves. Your MQ would support 2 to 3 times the payload using 10%-20% of the cost during usual times. Furthermore, in this case, if the payload/throughput are so high, how long would it take for the OAP cluster to catch up? The challenge here is that well before it catches up, the next peak times would have come.  With the analysis above in mind, why would you still want the traces to be 100%, given the resources they would cost? The preferred way to do this would be adding a better dynamic trace sampling mechanism at the backend. When throughput exceeds the threshold, gradually modify the active sampling rate from 100% to 10%, which means you could get the OAP and ES 3 times more powerful than usual, while ignoring the traces at peak times.\nIs MQ transport recommended despite its side effects? Even though MQ transport is not recommended from the production perspective, SkyWalking still provides optional plugins named kafka-reporter and kafka-fetcher for this feature since 8.1.0.\nHow about MQ metrics data exporter? The answer is that the MQ metrics data exporter is already readily available. The exporter module with gRPC default mechanism is there, and you can easily provide a new implementor of this module.\n","title":"Why doesn't SkyWalking involve MQ in its architecture?","url":"/docs/main/v9.7.0/en/faq/why_mq_not_involved/"},{"content":"Why is -Djava.ext.dirs not supported? -Djava.ext.dirs provides the extension class loader mechanism which was introduced in JDK 1.2, which was released in 1998. According to JEP 220: Modular Run-Time Images, it ends in JDK 9, to simplify both the Java SE Platform and the JDK we have removed the extension mechanism, including the java.ext.dirs system property and the lib/ext directory.\nThis JEP has been applied since JDK11, which is the most active LTS JDK version. When use -Djava.ext.dirs in JDK11+, the JVM would not be able to boot with following error.\n\u0026lt;JAVA_HOME\u0026gt;/lib/ext exists, extensions mechanism no longer supported; Use -classpath instead. .Error: Could not create the Java Virtual Machine. Error: A fatal exception has occurred. Program will exit. So, SkyWalking agent would not support the extension class loader mechanism.\nHow to resolve this issue? If you are using JDK8 and -Djava.ext.dirs, follow the JRE recommendations, Use -classpath instead. This should be a transparent change, which only affects your booting script.\nAlso, if you insist on keeping using -Djava.ext.dirs, the community had a pull request, which leverages the bootstrap instrumentation core of the agent to support the extension class loader.\nIn theory, this should work, but the SkyWalking doesn\u0026rsquo;t officially verify it before noticing the above JEP. You could take it as a reference.\nThe official recommendation still keeps as Use -classpath instead.\n","title":"Why is `-Djava.ext.dirs` not supported?","url":"/docs/skywalking-java/latest/en/faq/ext-dirs/"},{"content":"Why is -Djava.ext.dirs not supported? -Djava.ext.dirs provides the extension class loader mechanism which was introduced in JDK 1.2, which was released in 1998. According to JEP 220: Modular Run-Time Images, it ends in JDK 9, to simplify both the Java SE Platform and the JDK we have removed the extension mechanism, including the java.ext.dirs system property and the lib/ext directory.\nThis JEP has been applied since JDK11, which is the most active LTS JDK version. When use -Djava.ext.dirs in JDK11+, the JVM would not be able to boot with following error.\n\u0026lt;JAVA_HOME\u0026gt;/lib/ext exists, extensions mechanism no longer supported; Use -classpath instead. .Error: Could not create the Java Virtual Machine. Error: A fatal exception has occurred. Program will exit. So, SkyWalking agent would not support the extension class loader mechanism.\nHow to resolve this issue? If you are using JDK8 and -Djava.ext.dirs, follow the JRE recommendations, Use -classpath instead. This should be a transparent change, which only affects your booting script.\nAlso, if you insist on keeping using -Djava.ext.dirs, the community had a pull request, which leverages the bootstrap instrumentation core of the agent to support the extension class loader.\nIn theory, this should work, but the SkyWalking doesn\u0026rsquo;t officially verify it before noticing the above JEP. You could take it as a reference.\nThe official recommendation still keeps as Use -classpath instead.\n","title":"Why is `-Djava.ext.dirs` not supported?","url":"/docs/skywalking-java/next/en/faq/ext-dirs/"},{"content":"Why is -Djava.ext.dirs not supported? -Djava.ext.dirs provides the extension class loader mechanism which was introduced in JDK 1.2, which was released in 1998. According to JEP 220: Modular Run-Time Images, it ends in JDK 9, to simplify both the Java SE Platform and the JDK we have removed the extension mechanism, including the java.ext.dirs system property and the lib/ext directory.\nThis JEP has been applied since JDK11, which is the most active LTS JDK version. When use -Djava.ext.dirs in JDK11+, the JVM would not be able to boot with following error.\n\u0026lt;JAVA_HOME\u0026gt;/lib/ext exists, extensions mechanism no longer supported; Use -classpath instead. .Error: Could not create the Java Virtual Machine. Error: A fatal exception has occurred. Program will exit. So, SkyWalking agent would not support the extension class loader mechanism.\nHow to resolve this issue? If you are using JDK8 and -Djava.ext.dirs, follow the JRE recommendations, Use -classpath instead. This should be a transparent change, which only affects your booting script.\nAlso, if you insist on keeping using -Djava.ext.dirs, the community had a pull request, which leverages the bootstrap instrumentation core of the agent to support the extension class loader.\nIn theory, this should work, but the SkyWalking doesn\u0026rsquo;t officially verify it before noticing the above JEP. You could take it as a reference.\nThe official recommendation still keeps as Use -classpath instead.\n","title":"Why is `-Djava.ext.dirs` not supported?","url":"/docs/skywalking-java/v9.0.0/en/faq/ext-dirs/"},{"content":"Why is -Djava.ext.dirs not supported? -Djava.ext.dirs provides the extension class loader mechanism which was introduced in JDK 1.2, which was released in 1998. According to JEP 220: Modular Run-Time Images, it ends in JDK 9, to simplify both the Java SE Platform and the JDK we have removed the extension mechanism, including the java.ext.dirs system property and the lib/ext directory.\nThis JEP has been applied since JDK11, which is the most active LTS JDK version. When use -Djava.ext.dirs in JDK11+, the JVM would not be able to boot with following error.\n\u0026lt;JAVA_HOME\u0026gt;/lib/ext exists, extensions mechanism no longer supported; Use -classpath instead. .Error: Could not create the Java Virtual Machine. Error: A fatal exception has occurred. Program will exit. So, SkyWalking agent would not support the extension class loader mechanism.\nHow to resolve this issue? If you are using JDK8 and -Djava.ext.dirs, follow the JRE recommendations, Use -classpath instead. This should be a transparent change, which only affects your booting script.\nAlso, if you insist on keeping using -Djava.ext.dirs, the community had a pull request, which leverages the bootstrap instrumentation core of the agent to support the extension class loader.\nIn theory, this should work, but the SkyWalking doesn\u0026rsquo;t officially verify it before noticing the above JEP. You could take it as a reference.\nThe official recommendation still keeps as Use -classpath instead.\n","title":"Why is `-Djava.ext.dirs` not supported?","url":"/docs/skywalking-java/v9.1.0/en/faq/ext-dirs/"},{"content":"Why is -Djava.ext.dirs not supported? -Djava.ext.dirs provides the extension class loader mechanism which was introduced in JDK 1.2, which was released in 1998. According to JEP 220: Modular Run-Time Images, it ends in JDK 9, to simplify both the Java SE Platform and the JDK we have removed the extension mechanism, including the java.ext.dirs system property and the lib/ext directory.\nThis JEP has been applied since JDK11, which is the most active LTS JDK version. When use -Djava.ext.dirs in JDK11+, the JVM would not be able to boot with following error.\n\u0026lt;JAVA_HOME\u0026gt;/lib/ext exists, extensions mechanism no longer supported; Use -classpath instead. .Error: Could not create the Java Virtual Machine. Error: A fatal exception has occurred. Program will exit. So, SkyWalking agent would not support the extension class loader mechanism.\nHow to resolve this issue? If you are using JDK8 and -Djava.ext.dirs, follow the JRE recommendations, Use -classpath instead. This should be a transparent change, which only affects your booting script.\nAlso, if you insist on keeping using -Djava.ext.dirs, the community had a pull request, which leverages the bootstrap instrumentation core of the agent to support the extension class loader.\nIn theory, this should work, but the SkyWalking doesn\u0026rsquo;t officially verify it before noticing the above JEP. You could take it as a reference.\nThe official recommendation still keeps as Use -classpath instead.\n","title":"Why is `-Djava.ext.dirs` not supported?","url":"/docs/skywalking-java/v9.2.0/en/faq/ext-dirs/"},{"content":"Why is Clickhouse or Loki or xxx not supported as a storage option? Background In the past several years, community users have asked why Clickhouse, Loki, or some other storage is not supported in the upstream. We have repeated the answer many times, but it is still happening, at here, I would like to write down the summary to help people understand more\nPrevious Discussions All the following issues were about discussing new storage extension topics.\n Loki as storage  https://github.com/apache/skywalking/discussions/9836   ClickHouse  https://github.com/apache/skywalking/issues/11924 https://github.com/apache/skywalking/discussions/9011   Vertica  https://github.com/apache/skywalking/discussions/8817    Generally, all those asking are about adding a new kind of storage.\nWhy they don\u0026rsquo;t exist ? First of all, WHY is not a suitable question. SkyWalking is a volunteer-driven community, the volunteers build this project including bug fixes, maintenance work, and new features from their personal and employer interests. What you saw about the current status is the combination of all those interests rather than responsibilities. So, in SkyWalking, anything you saw existing is/was someone\u0026rsquo;s interest and contributed to upstream.\nThis logic is the same as this question, SkyWalking active maintainers are focusing on JDBC(MySQL and PostgreSQL ecosystem) Database and Elasticsearch for existing users, and moving forward on BanyanDB as the native one. We for now don\u0026rsquo;t have people interested in ClickHouse or any other database. That is why they are not there.\nHow could add one? To add a new feature, including a new storage plugin, you should go through SWIP - SkyWalking Improvement Proposal workflow, and have a full discussion with the maintenance team. SkyWalking has a pluggable storage system, so, ideally new storage option is possible to implement a new provider for the storage module. Meanwhile, in practice, as storage implementation should be in high performance and well optimized, considering our experiences with JDBC and Elasticsearch implementations, some flags and annotations may need to be added in the kernel level and data model declarations.\nFurthermore, as current maintainers are not a fun of Clickhouse or others(otherwise, you should have seen those implementations), they are not going to be involved in the code implementations and they don\u0026rsquo;t know much more from a general perspective about which kind of implementation in that specific database will have a better behavior and performance. So, if you want to propose this to upstream, you should be very experienced in that database, and have enough scale and environments to provide solid benchmark.\nWhat happens next if the new implementation gets accepted/merged/released? Who proposed this new implementation(such as clickhouse storage), has to take the responsibilities of the maintenance. The maintenance means they need to\n Join storage relative discussion to make sure SkyWalking can move forward on a kernel-level optimization without being blocked by these specific storage options. Respond to this storage relative questions, bugs, CVEs, and performance issues. Make the implementation performance match the expectation of the original proposal. Such as, about clickhouse, people are talking about how they are faster and have higher efficiency than Elasticsearch for large-scale deployments. Then we should always be able to see it has better benchmark and product side practice.  Even if the storage gets accepted/merged/released, but no one can\u0026rsquo;t take the above responsibilities or the community doesn\u0026rsquo;t receive the feedback and questions about those storages, SkyWalking PMC(Project Management Committee) will start the process to remove the implementations. This happened before for Apache IoTDB and InfluxDB storage options. Here is the last vote about this,\n https://github.com/apache/skywalking/discussions/9059  ","title":"Why is Clickhouse or Loki or xxx not supported as a storage option?","url":"/docs/main/next/en/faq/why-clickhouse-not-supported/"},{"content":"Windows Monitoring SkyWalking leverages Prometheus windows_exporter to collect metrics data from the Windows and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. Windows entity as a Service in OAP and on the Layer: OS_WINDOWS.\nData flow For OpenTelemetry receiver:\n The Prometheus windows_exporter collects metrics data from the VMs. The OpenTelemetry Collector fetches metrics from windows_exporter via Prometheus Receiver and pushes metrics to the SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup For OpenTelemetry receiver:\n Setup Prometheus windows_exporter. Setup OpenTelemetry Collector . This is an example for OpenTelemetry Collector configuration otel-collector-config.yaml. Config SkyWalking OpenTelemetry receiver.  Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     CPU Usage % meter_win_cpu_total_percentage The total percentage usage of the CPU core. If there are 2 cores, the maximum usage is 200%. Prometheus windows_exporter   Memory RAM Usage MB meter_win_memory_used The total RAM usage Prometheus windows_exporter   Memory Swap Usage % meter_win_memory_swap_percentage The percentage usage of swap memory Prometheus windows_exporter   CPU Average Used % meter_win_cpu_average_used The percentage usage of the CPU core in each mode Prometheus windows_exporter   Memory RAM MB meter_win_memory_total\nmeter_win_memory_available\nmeter_win_memory_used The RAM statistics, including Total / Available / Used Prometheus windows_exporter   Memory Swap MB meter_win_memory_swap_free\nmeter_win_memory_swap_total Swap memory statistics, including Free / Total Prometheus windows_exporter   Disk R/W KB/s meter_win_disk_read,meter_win_disk_written The disk read and written Prometheus windows_exporter   Network Bandwidth Usage KB/s meter_win_network_receive\nmeter_win_network_transmit The network receive and transmit Prometheus windows_exporter    Customizing You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/windows.yaml. The dashboard panel confirmations are found in /config/ui-initialized-templates/os_windows.\n","title":"Windows Monitoring","url":"/docs/main/latest/en/setup/backend/backend-win-monitoring/"},{"content":"Windows Monitoring SkyWalking leverages Prometheus windows_exporter to collect metrics data from the Windows and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. Windows entity as a Service in OAP and on the Layer: OS_WINDOWS.\nData flow For OpenTelemetry receiver:\n The Prometheus windows_exporter collects metrics data from the VMs. The OpenTelemetry Collector fetches metrics from windows_exporter via Prometheus Receiver and pushes metrics to the SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup For OpenTelemetry receiver:\n Setup Prometheus windows_exporter. Setup OpenTelemetry Collector . This is an example for OpenTelemetry Collector configuration otel-collector-config.yaml. Config SkyWalking OpenTelemetry receiver.  Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     CPU Usage % meter_win_cpu_total_percentage The total percentage usage of the CPU core. If there are 2 cores, the maximum usage is 200%. Prometheus windows_exporter   Memory RAM Usage MB meter_win_memory_used The total RAM usage Prometheus windows_exporter   Memory Swap Usage % meter_win_memory_swap_percentage The percentage usage of swap memory Prometheus windows_exporter   CPU Average Used % meter_win_cpu_average_used The percentage usage of the CPU core in each mode Prometheus windows_exporter   Memory RAM MB meter_win_memory_total\nmeter_win_memory_available\nmeter_win_memory_used The RAM statistics, including Total / Available / Used Prometheus windows_exporter   Memory Swap MB meter_win_memory_swap_free\nmeter_win_memory_swap_total Swap memory statistics, including Free / Total Prometheus windows_exporter   Disk R/W KB/s meter_win_disk_read,meter_win_disk_written The disk read and written Prometheus windows_exporter   Network Bandwidth Usage KB/s meter_win_network_receive\nmeter_win_network_transmit The network receive and transmit Prometheus windows_exporter    Customizing You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/windows.yaml. The dashboard panel confirmations are found in /config/ui-initialized-templates/os_windows.\n","title":"Windows Monitoring","url":"/docs/main/next/en/setup/backend/backend-win-monitoring/"},{"content":"Windows Monitoring SkyWalking leverages Prometheus windows_exporter to collect metrics data from the Windows and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. Windows entity as a Service in OAP and on the Layer: OS_WINDOWS.\nData flow For OpenTelemetry receiver:\n The Prometheus windows_exporter collects metrics data from the VMs. The OpenTelemetry Collector fetches metrics from windows_exporter via Prometheus Receiver and pushes metrics to the SkyWalking OAP Server via the OpenCensus gRPC Exporter or OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup For OpenTelemetry receiver:\n Setup Prometheus windows_exporter. Setup OpenTelemetry Collector . This is an example for OpenTelemetry Collector configuration otel-collector-config.yaml. Config SkyWalking OpenTelemetry receiver.  Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     CPU Usage % meter_win_cpu_total_percentage The total percentage usage of the CPU core. If there are 2 cores, the maximum usage is 200%. Prometheus windows_exporter   Memory RAM Usage MB meter_win_memory_used The total RAM usage Prometheus windows_exporter   Memory Swap Usage % meter_win_memory_swap_percentage The percentage usage of swap memory Prometheus windows_exporter   CPU Average Used % meter_win_cpu_average_used The percentage usage of the CPU core in each mode Prometheus windows_exporter   Memory RAM MB meter_win_memory_total\nmeter_win_memory_available\nmeter_win_memory_used The RAM statistics, including Total / Available / Used Prometheus windows_exporter   Memory Swap MB meter_win_memory_swap_free\nmeter_win_memory_swap_total Swap memory statistics, including Free / Total Prometheus windows_exporter   Disk R/W KB/s meter_win_disk_read,meter_win_disk_written The disk read and written Prometheus windows_exporter   Network Bandwidth Usage KB/s meter_win_network_receive\nmeter_win_network_transmit The network receive and transmit Prometheus windows_exporter    Customizing You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/windows.yaml. The dashboard panel confirmations are found in /config/ui-initialized-templates/os_windows.\n","title":"Windows Monitoring","url":"/docs/main/v9.4.0/en/setup/backend/backend-win-monitoring/"},{"content":"Windows Monitoring SkyWalking leverages Prometheus windows_exporter to collect metrics data from the Windows and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. Windows entity as a Service in OAP and on the Layer: OS_WINDOWS.\nData flow For OpenTelemetry receiver:\n The Prometheus windows_exporter collects metrics data from the VMs. The OpenTelemetry Collector fetches metrics from windows_exporter via Prometheus Receiver and pushes metrics to the SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup For OpenTelemetry receiver:\n Setup Prometheus windows_exporter. Setup OpenTelemetry Collector . This is an example for OpenTelemetry Collector configuration otel-collector-config.yaml. Config SkyWalking OpenTelemetry receiver.  Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     CPU Usage % meter_win_cpu_total_percentage The total percentage usage of the CPU core. If there are 2 cores, the maximum usage is 200%. Prometheus windows_exporter   Memory RAM Usage MB meter_win_memory_used The total RAM usage Prometheus windows_exporter   Memory Swap Usage % meter_win_memory_swap_percentage The percentage usage of swap memory Prometheus windows_exporter   CPU Average Used % meter_win_cpu_average_used The percentage usage of the CPU core in each mode Prometheus windows_exporter   Memory RAM MB meter_win_memory_total\nmeter_win_memory_available\nmeter_win_memory_used The RAM statistics, including Total / Available / Used Prometheus windows_exporter   Memory Swap MB meter_win_memory_swap_free\nmeter_win_memory_swap_total Swap memory statistics, including Free / Total Prometheus windows_exporter   Disk R/W KB/s meter_win_disk_read,meter_win_disk_written The disk read and written Prometheus windows_exporter   Network Bandwidth Usage KB/s meter_win_network_receive\nmeter_win_network_transmit The network receive and transmit Prometheus windows_exporter    Customizing You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/windows.yaml. The dashboard panel confirmations are found in /config/ui-initialized-templates/os_windows.\n","title":"Windows Monitoring","url":"/docs/main/v9.5.0/en/setup/backend/backend-win-monitoring/"},{"content":"Windows Monitoring SkyWalking leverages Prometheus windows_exporter to collect metrics data from the Windows and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. Windows entity as a Service in OAP and on the Layer: OS_WINDOWS.\nData flow For OpenTelemetry receiver:\n The Prometheus windows_exporter collects metrics data from the VMs. The OpenTelemetry Collector fetches metrics from windows_exporter via Prometheus Receiver and pushes metrics to the SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup For OpenTelemetry receiver:\n Setup Prometheus windows_exporter. Setup OpenTelemetry Collector . This is an example for OpenTelemetry Collector configuration otel-collector-config.yaml. Config SkyWalking OpenTelemetry receiver.  Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     CPU Usage % meter_win_cpu_total_percentage The total percentage usage of the CPU core. If there are 2 cores, the maximum usage is 200%. Prometheus windows_exporter   Memory RAM Usage MB meter_win_memory_used The total RAM usage Prometheus windows_exporter   Memory Swap Usage % meter_win_memory_swap_percentage The percentage usage of swap memory Prometheus windows_exporter   CPU Average Used % meter_win_cpu_average_used The percentage usage of the CPU core in each mode Prometheus windows_exporter   Memory RAM MB meter_win_memory_total\nmeter_win_memory_available\nmeter_win_memory_used The RAM statistics, including Total / Available / Used Prometheus windows_exporter   Memory Swap MB meter_win_memory_swap_free\nmeter_win_memory_swap_total Swap memory statistics, including Free / Total Prometheus windows_exporter   Disk R/W KB/s meter_win_disk_read,meter_win_disk_written The disk read and written Prometheus windows_exporter   Network Bandwidth Usage KB/s meter_win_network_receive\nmeter_win_network_transmit The network receive and transmit Prometheus windows_exporter    Customizing You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/windows.yaml. The dashboard panel confirmations are found in /config/ui-initialized-templates/os_windows.\n","title":"Windows Monitoring","url":"/docs/main/v9.6.0/en/setup/backend/backend-win-monitoring/"},{"content":"Windows Monitoring SkyWalking leverages Prometheus windows_exporter to collect metrics data from the Windows and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. Windows entity as a Service in OAP and on the Layer: OS_WINDOWS.\nData flow For OpenTelemetry receiver:\n The Prometheus windows_exporter collects metrics data from the VMs. The OpenTelemetry Collector fetches metrics from windows_exporter via Prometheus Receiver and pushes metrics to the SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup For OpenTelemetry receiver:\n Setup Prometheus windows_exporter. Setup OpenTelemetry Collector . This is an example for OpenTelemetry Collector configuration otel-collector-config.yaml. Config SkyWalking OpenTelemetry receiver.  Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     CPU Usage % meter_win_cpu_total_percentage The total percentage usage of the CPU core. If there are 2 cores, the maximum usage is 200%. Prometheus windows_exporter   Memory RAM Usage MB meter_win_memory_used The total RAM usage Prometheus windows_exporter   Memory Swap Usage % meter_win_memory_swap_percentage The percentage usage of swap memory Prometheus windows_exporter   CPU Average Used % meter_win_cpu_average_used The percentage usage of the CPU core in each mode Prometheus windows_exporter   Memory RAM MB meter_win_memory_total\nmeter_win_memory_available\nmeter_win_memory_used The RAM statistics, including Total / Available / Used Prometheus windows_exporter   Memory Swap MB meter_win_memory_swap_free\nmeter_win_memory_swap_total Swap memory statistics, including Free / Total Prometheus windows_exporter   Disk R/W KB/s meter_win_disk_read,meter_win_disk_written The disk read and written Prometheus windows_exporter   Network Bandwidth Usage KB/s meter_win_network_receive\nmeter_win_network_transmit The network receive and transmit Prometheus windows_exporter    Customizing You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/windows.yaml. The dashboard panel confirmations are found in /config/ui-initialized-templates/os_windows.\n","title":"Windows Monitoring","url":"/docs/main/v9.7.0/en/setup/backend/backend-win-monitoring/"},{"content":"Working with Istio This document provides instructions on transporting Istio\u0026rsquo;s metrics to the SkyWalking OAP server.\nPrerequisites Istio should be installed in a Kubernetes cluster. Simply follow the steps in Getting Started in Istio.\nDeploy SkyWalking backend Follow the steps in deploying backend in Kubernetes to install the OAP server in the Kubernetes cluster. Refer to OpenTelemetry receiver to ingest metrics.\nDeploy OpenTelemetry Collector OpenTelemetry Collector is the location where Istio telemetry sends metrics, which are then processed and shipped to SkyWalking backend.\nTo deploy this collector, follow the steps in Getting Started in OpenTelemetry Collector. Several components are available in the collector, and they could be combined for different use cases.\nAfter installing the collector, you may configure it to scrape metrics from Istio and send them to SkyWalking backend.\nThe job configuration to scrape metrics from Istio and send them to SkyWalking backend is as follows:\nreceivers:prometheus:config:scrape_configs:- job_name:\u0026#39;istiod-monitor\u0026#39;kubernetes_sd_configs:- role:endpointsrelabel_configs:- source_labels:[__meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name ]action:keepregex:istiod;http-monitoring- action:labelmapregex:__meta_kubernetes_service_label_(.+)- source_labels:[]target_label:clusterreplacement:your-cluster# replace this with your cluster nameexporters:otlp:endpoint:oap.skywalking:11800# replace this with the OAP gRPC service addresstls:insecure:trueservice:pipelines:metrics:receivers:[prometheus ]exporters:[otlp,logging ]Observing Istio Open Istio Dashboard in SkyWaling UI by clicking Dashboard -\u0026gt; Istio. You can then view charts and diagrams generated by Istio metrics. You may also view them through swctl and set up alarm rules based on them.\nNote: If you would like to see metrics of Istio managed services, including topology, you may try our ALS solution.\n","title":"Working with Istio","url":"/docs/main/latest/en/setup/istio/readme/"},{"content":"Working with Istio This document provides instructions on transporting Istio\u0026rsquo;s metrics to the SkyWalking OAP server.\nPrerequisites Istio should be installed in a Kubernetes cluster. Simply follow the steps in Getting Started in Istio.\nDeploy SkyWalking backend Follow the steps in deploying backend in Kubernetes to install the OAP server in the Kubernetes cluster. Refer to OpenTelemetry receiver to ingest metrics.\nDeploy OpenTelemetry Collector OpenTelemetry Collector is the location where Istio telemetry sends metrics, which are then processed and shipped to SkyWalking backend.\nTo deploy this collector, follow the steps in Getting Started in OpenTelemetry Collector. Several components are available in the collector, and they could be combined for different use cases.\nAfter installing the collector, you may configure it to scrape metrics from Istio and send them to SkyWalking backend.\nThe job configuration to scrape metrics from Istio and send them to SkyWalking backend is as follows:\nreceivers:prometheus:config:scrape_configs:- job_name:\u0026#39;istiod-monitor\u0026#39;kubernetes_sd_configs:- role:endpointsrelabel_configs:- source_labels:[__meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name ]action:keepregex:istiod;http-monitoring- action:labelmapregex:__meta_kubernetes_service_label_(.+)- source_labels:[]target_label:clusterreplacement:your-cluster# replace this with your cluster nameexporters:otlp:endpoint:oap.skywalking:11800# replace this with the OAP gRPC service addresstls:insecure:trueservice:pipelines:metrics:receivers:[prometheus ]exporters:[otlp,logging ]Observing Istio Open Istio Dashboard in SkyWaling UI by clicking Dashboard -\u0026gt; Istio. You can then view charts and diagrams generated by Istio metrics. You may also view them through swctl and set up alarm rules based on them.\nNote: If you would like to see metrics of Istio managed services, including topology, you may try our ALS solution.\n","title":"Working with Istio","url":"/docs/main/next/en/setup/istio/readme/"},{"content":"Working with Istio This document provides instructions on transporting Istio\u0026rsquo;s metrics to the SkyWalking OAP server.\nPrerequisites Istio should be installed in the Kubernetes cluster. Simply follow the steps in Getting Started in Istio.\nDeploying SkyWalking backend Follow the steps in deploying backend in Kubernetes to install the OAP server in the Kubernetes cluster. Refer to OpenTelemetry receiver to ingest metrics. otel-receiver is disabled by default. Set env var SW_OTEL_RECEIVER to default to enable it.\nDeploying OpenTelemetry Collector OpenTelemetry Collector is the location where Istio telemetry sends metrics, which is then processed and sent to SkyWalking backend.\nFollow the steps in Getting Started in OpenTelemetry Collector to deploy this collector. There are several components available in the collector, and they could be combined for different use cases. For the sake of brevity, we use the Prometheus receiver to retrieve metrics from Istio control and data plane, then send them to SkyWalking by OpenCensus exporter.\nPrometheus Receiver Refer to Prometheus Receiver to set up this receiver. You could find more configuration details in Prometheus Integration of Istio to figure out how to direct Prometheus Receiver to query Istio metrics.\nSkyWalking supports receiving multi-cluster metrics in a single OAP cluster. A cluster label should be appended to every metric fetched by this receiver even if there\u0026rsquo;s only a single cluster needed to be collected. You could use relabel to add it, like this:\nrelabel_configs: - source_labels: [] target_label: cluster replacement: \u0026lt;cluster name\u0026gt; or you can do so through Resource Processor:\nprocessors: resource: attributes: - key: cluster value: \u0026quot;\u0026lt;cluster name\u0026gt;\u0026quot; action: upsert Note: If you try the sample Istio Prometheus Kubernetes configuration, you may experience an issue. Try to fix it using the solution described in the issue.\nOpenCensus exporter Follow OpenCensus exporter configuration to set up a connection between OpenTelemetry Collector and OAP cluster. endpoint is the address of OAP gRPC service.\nObserving Istio Open Istio Dashboard in SkyWaling UI by clicking Dashboard -\u0026gt; Istio. You can then view charts and diagrams generated by Istio metrics. You may also view them through swctl and set up alarm rules based on them.\nNOTE: If you would like to see metrics of Istio managed services, including topology, you may try our ALS solution.\n","title":"Working with Istio","url":"/docs/main/v9.0.0/en/setup/istio/readme/"},{"content":"Working with Istio This document provides instructions on transporting Istio\u0026rsquo;s metrics to the SkyWalking OAP server.\nPrerequisites Istio should be installed in a Kubernetes cluster. Simply follow the steps in Getting Started in Istio.\nDeploying SkyWalking backend Follow the steps in deploying backend in Kubernetes to install the OAP server in the Kubernetes cluster. Refer to OpenTelemetry receiver to ingest metrics. otel-receiver is disabled by default. Set env var SW_OTEL_RECEIVER to default to enable it.\nDeploying OpenTelemetry Collector OpenTelemetry Collector is the location where Istio telemetry sends metrics, which are then processed and shipped to SkyWalking backend.\nTo deploy this collector, follow the steps in Getting Started in OpenTelemetry Collector. Several components are available in the collector, and they could be combined for different use cases.\nFor the sake of brevity, we use the Prometheus receiver to retrieve metrics from Istio control and data plane, then send them to SkyWalking by OpenCensus exporter.\nPrometheus Receiver Refer to Prometheus Receiver to set up this receiver. You could find more configuration details in Prometheus Integration of Istio to figure out how to direct Prometheus Receiver to query Istio metrics.\nSkyWalking supports receiving multi-cluster metrics in a single OAP cluster. A cluster label should be appended to every metric fetched by this receiver even if there\u0026rsquo;s only a single cluster needed to be collected. You could use relabel to add it, like this:\nrelabel_configs: - source_labels: [] target_label: cluster replacement: \u0026lt;cluster name\u0026gt; or you can do so through Resource Processor:\nprocessors: resource: attributes: - key: cluster value: \u0026quot;\u0026lt;cluster name\u0026gt;\u0026quot; action: upsert Note: If you try the sample Istio Prometheus Kubernetes configuration, you may experience an issue. Try to fix it using the solution described in the issue.\nOpenCensus exporter Follow OpenCensus exporter configuration to set up a connection between OpenTelemetry Collector and OAP cluster. endpoint is the address of the OAP gRPC service.\nObserving Istio Open Istio Dashboard in SkyWaling UI by clicking Dashboard -\u0026gt; Istio. You can then view charts and diagrams generated by Istio metrics. You may also view them through swctl and set up alarm rules based on them.\nNote: If you would like to see metrics of Istio managed services, including topology, you may try our ALS solution.\n","title":"Working with Istio","url":"/docs/main/v9.1.0/en/setup/istio/readme/"},{"content":"Working with Istio This document provides instructions on transporting Istio\u0026rsquo;s metrics to the SkyWalking OAP server.\nPrerequisites Istio should be installed in a Kubernetes cluster. Simply follow the steps in Getting Started in Istio.\nDeploying SkyWalking backend Follow the steps in deploying backend in Kubernetes to install the OAP server in the Kubernetes cluster. Refer to OpenTelemetry receiver to ingest metrics. otel-receiver is disabled by default. Set env var SW_OTEL_RECEIVER to default to enable it.\nDeploying OpenTelemetry Collector OpenTelemetry Collector is the location where Istio telemetry sends metrics, which are then processed and shipped to SkyWalking backend.\nTo deploy this collector, follow the steps in Getting Started in OpenTelemetry Collector. Several components are available in the collector, and they could be combined for different use cases.\nFor the sake of brevity, we use the Prometheus receiver to retrieve metrics from Istio control and data plane, then send them to SkyWalking by OpenCensus exporter.\nPrometheus Receiver Refer to Prometheus Receiver to set up this receiver. You could find more configuration details in Prometheus Integration of Istio to figure out how to direct Prometheus Receiver to query Istio metrics.\nSkyWalking supports receiving multi-cluster metrics in a single OAP cluster. A cluster label should be appended to every metric fetched by this receiver even if there\u0026rsquo;s only a single cluster needed to be collected. You could use relabel to add it, like this:\nrelabel_configs: - source_labels: [] target_label: cluster replacement: \u0026lt;cluster name\u0026gt; or you can do so through Resource Processor:\nprocessors: resource: attributes: - key: cluster value: \u0026quot;\u0026lt;cluster name\u0026gt;\u0026quot; action: upsert Note: If you try the sample Istio Prometheus Kubernetes configuration, you may experience an issue. Try to fix it using the solution described in the issue.\nOpenCensus exporter Follow OpenCensus exporter configuration to set up a connection between OpenTelemetry Collector and OAP cluster. endpoint is the address of the OAP gRPC service.\nObserving Istio Open Istio Dashboard in SkyWaling UI by clicking Dashboard -\u0026gt; Istio. You can then view charts and diagrams generated by Istio metrics. You may also view them through swctl and set up alarm rules based on them.\nNote: If you would like to see metrics of Istio managed services, including topology, you may try our ALS solution.\n","title":"Working with Istio","url":"/docs/main/v9.2.0/en/setup/istio/readme/"},{"content":"Working with Istio This document provides instructions on transporting Istio\u0026rsquo;s metrics to the SkyWalking OAP server.\nPrerequisites Istio should be installed in a Kubernetes cluster. Simply follow the steps in Getting Started in Istio.\nDeploying SkyWalking backend Follow the steps in deploying backend in Kubernetes to install the OAP server in the Kubernetes cluster. Refer to OpenTelemetry receiver to ingest metrics. otel-receiver is disabled by default. Set env var SW_OTEL_RECEIVER to default to enable it.\nDeploying OpenTelemetry Collector OpenTelemetry Collector is the location where Istio telemetry sends metrics, which are then processed and shipped to SkyWalking backend.\nTo deploy this collector, follow the steps in Getting Started in OpenTelemetry Collector. Several components are available in the collector, and they could be combined for different use cases.\nFor the sake of brevity, we use the Prometheus receiver to retrieve metrics from Istio control and data plane, then send them to SkyWalking by OpenCensus exporter.\nPrometheus Receiver Refer to Prometheus Receiver to set up this receiver. You could find more configuration details in Prometheus Integration of Istio to figure out how to direct Prometheus Receiver to query Istio metrics.\nSkyWalking supports receiving multi-cluster metrics in a single OAP cluster. A cluster label should be appended to every metric fetched by this receiver even if there\u0026rsquo;s only a single cluster needed to be collected. You could use relabel to add it, like this:\nrelabel_configs: - source_labels: [] target_label: cluster replacement: \u0026lt;cluster name\u0026gt; or you can do so through Resource Processor:\nprocessors: resource: attributes: - key: cluster value: \u0026quot;\u0026lt;cluster name\u0026gt;\u0026quot; action: upsert Note: If you try the sample Istio Prometheus Kubernetes configuration, you may experience an issue. Try to fix it using the solution described in the issue.\nOpenCensus exporter Follow OpenCensus exporter configuration to set up a connection between OpenTelemetry Collector and OAP cluster. endpoint is the address of the OAP gRPC service.\nObserving Istio Open Istio Dashboard in SkyWaling UI by clicking Dashboard -\u0026gt; Istio. You can then view charts and diagrams generated by Istio metrics. You may also view them through swctl and set up alarm rules based on them.\nNote: If you would like to see metrics of Istio managed services, including topology, you may try our ALS solution.\n","title":"Working with Istio","url":"/docs/main/v9.3.0/en/setup/istio/readme/"},{"content":"Working with Istio This document provides instructions on transporting Istio\u0026rsquo;s metrics to the SkyWalking OAP server.\nPrerequisites Istio should be installed in a Kubernetes cluster. Simply follow the steps in Getting Started in Istio.\nDeploying SkyWalking backend Follow the steps in deploying backend in Kubernetes to install the OAP server in the Kubernetes cluster. Refer to OpenTelemetry receiver to ingest metrics. otel-receiver is disabled by default. Set env var SW_OTEL_RECEIVER to default to enable it.\nDeploying OpenTelemetry Collector OpenTelemetry Collector is the location where Istio telemetry sends metrics, which are then processed and shipped to SkyWalking backend.\nTo deploy this collector, follow the steps in Getting Started in OpenTelemetry Collector. Several components are available in the collector, and they could be combined for different use cases.\nFor the sake of brevity, we use the Prometheus receiver to retrieve metrics from Istio control and data plane, then send them to SkyWalking by OpenCensus exporter.\nPrometheus Receiver Refer to Prometheus Receiver to set up this receiver. You could find more configuration details in Prometheus Integration of Istio to figure out how to direct Prometheus Receiver to query Istio metrics.\nSkyWalking supports receiving multi-cluster metrics in a single OAP cluster. A cluster label should be appended to every metric fetched by this receiver even if there\u0026rsquo;s only a single cluster needed to be collected. You could use relabel to add it, like this:\nrelabel_configs: - source_labels: [] target_label: cluster replacement: \u0026lt;cluster name\u0026gt; or you can do so through Resource Processor:\nprocessors: resource: attributes: - key: cluster value: \u0026quot;\u0026lt;cluster name\u0026gt;\u0026quot; action: upsert Note: If you try the sample Istio Prometheus Kubernetes configuration, you may experience an issue. Try to fix it using the solution described in the issue.\nOpenCensus exporter Follow OpenCensus exporter configuration to set up a connection between OpenTelemetry Collector and OAP cluster. endpoint is the address of the OAP gRPC service.\nObserving Istio Open Istio Dashboard in SkyWaling UI by clicking Dashboard -\u0026gt; Istio. You can then view charts and diagrams generated by Istio metrics. You may also view them through swctl and set up alarm rules based on them.\nNote: If you would like to see metrics of Istio managed services, including topology, you may try our ALS solution.\n","title":"Working with Istio","url":"/docs/main/v9.4.0/en/setup/istio/readme/"},{"content":"Working with Istio This document provides instructions on transporting Istio\u0026rsquo;s metrics to the SkyWalking OAP server.\nPrerequisites Istio should be installed in a Kubernetes cluster. Simply follow the steps in Getting Started in Istio.\nDeploy SkyWalking backend Follow the steps in deploying backend in Kubernetes to install the OAP server in the Kubernetes cluster. Refer to OpenTelemetry receiver to ingest metrics.\nDeploy OpenTelemetry Collector OpenTelemetry Collector is the location where Istio telemetry sends metrics, which are then processed and shipped to SkyWalking backend.\nTo deploy this collector, follow the steps in Getting Started in OpenTelemetry Collector. Several components are available in the collector, and they could be combined for different use cases.\nAfter installing the collector, you may configure it to scrape metrics from Istio and send them to SkyWalking backend.\nThe job configuration to scrape metrics from Istio and send them to SkyWalking backend is as follows:\nreceivers:prometheus:config:scrape_configs:- job_name:\u0026#39;istiod-monitor\u0026#39;kubernetes_sd_configs:- role:endpointsrelabel_configs:- source_labels:[__meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name ]action:keepregex:istiod;http-monitoring- action:labelmapregex:__meta_kubernetes_service_label_(.+)- source_labels:[]target_label:clusterreplacement:your-cluster# replace this with your cluster nameexporters:otlp:endpoint:oap.skywalking:11800# replace this with the OAP gRPC service addresstls:insecure:trueservice:pipelines:metrics:receivers:[prometheus ]exporters:[otlp,logging ]Observing Istio Open Istio Dashboard in SkyWaling UI by clicking Dashboard -\u0026gt; Istio. You can then view charts and diagrams generated by Istio metrics. You may also view them through swctl and set up alarm rules based on them.\nNote: If you would like to see metrics of Istio managed services, including topology, you may try our ALS solution.\n","title":"Working with Istio","url":"/docs/main/v9.5.0/en/setup/istio/readme/"},{"content":"Working with Istio This document provides instructions on transporting Istio\u0026rsquo;s metrics to the SkyWalking OAP server.\nPrerequisites Istio should be installed in a Kubernetes cluster. Simply follow the steps in Getting Started in Istio.\nDeploy SkyWalking backend Follow the steps in deploying backend in Kubernetes to install the OAP server in the Kubernetes cluster. Refer to OpenTelemetry receiver to ingest metrics.\nDeploy OpenTelemetry Collector OpenTelemetry Collector is the location where Istio telemetry sends metrics, which are then processed and shipped to SkyWalking backend.\nTo deploy this collector, follow the steps in Getting Started in OpenTelemetry Collector. Several components are available in the collector, and they could be combined for different use cases.\nAfter installing the collector, you may configure it to scrape metrics from Istio and send them to SkyWalking backend.\nThe job configuration to scrape metrics from Istio and send them to SkyWalking backend is as follows:\nreceivers:prometheus:config:scrape_configs:- job_name:\u0026#39;istiod-monitor\u0026#39;kubernetes_sd_configs:- role:endpointsrelabel_configs:- source_labels:[__meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name ]action:keepregex:istiod;http-monitoring- action:labelmapregex:__meta_kubernetes_service_label_(.+)- source_labels:[]target_label:clusterreplacement:your-cluster# replace this with your cluster nameexporters:otlp:endpoint:oap.skywalking:11800# replace this with the OAP gRPC service addresstls:insecure:trueservice:pipelines:metrics:receivers:[prometheus ]exporters:[otlp,logging ]Observing Istio Open Istio Dashboard in SkyWaling UI by clicking Dashboard -\u0026gt; Istio. You can then view charts and diagrams generated by Istio metrics. You may also view them through swctl and set up alarm rules based on them.\nNote: If you would like to see metrics of Istio managed services, including topology, you may try our ALS solution.\n","title":"Working with Istio","url":"/docs/main/v9.6.0/en/setup/istio/readme/"},{"content":"Working with Istio This document provides instructions on transporting Istio\u0026rsquo;s metrics to the SkyWalking OAP server.\nPrerequisites Istio should be installed in a Kubernetes cluster. Simply follow the steps in Getting Started in Istio.\nDeploy SkyWalking backend Follow the steps in deploying backend in Kubernetes to install the OAP server in the Kubernetes cluster. Refer to OpenTelemetry receiver to ingest metrics.\nDeploy OpenTelemetry Collector OpenTelemetry Collector is the location where Istio telemetry sends metrics, which are then processed and shipped to SkyWalking backend.\nTo deploy this collector, follow the steps in Getting Started in OpenTelemetry Collector. Several components are available in the collector, and they could be combined for different use cases.\nAfter installing the collector, you may configure it to scrape metrics from Istio and send them to SkyWalking backend.\nThe job configuration to scrape metrics from Istio and send them to SkyWalking backend is as follows:\nreceivers:prometheus:config:scrape_configs:- job_name:\u0026#39;istiod-monitor\u0026#39;kubernetes_sd_configs:- role:endpointsrelabel_configs:- source_labels:[__meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name ]action:keepregex:istiod;http-monitoring- action:labelmapregex:__meta_kubernetes_service_label_(.+)- source_labels:[]target_label:clusterreplacement:your-cluster# replace this with your cluster nameexporters:otlp:endpoint:oap.skywalking:11800# replace this with the OAP gRPC service addresstls:insecure:trueservice:pipelines:metrics:receivers:[prometheus ]exporters:[otlp,logging ]Observing Istio Open Istio Dashboard in SkyWaling UI by clicking Dashboard -\u0026gt; Istio. You can then view charts and diagrams generated by Istio metrics. You may also view them through swctl and set up alarm rules based on them.\nNote: If you would like to see metrics of Istio managed services, including topology, you may try our ALS solution.\n","title":"Working with Istio","url":"/docs/main/v9.7.0/en/setup/istio/readme/"},{"content":"Write Plugin Test Writing plugin test cases can greatly help you determine if your plugin is running well across multiple versions. If you haven\u0026rsquo;t started developing your plugin yet, please read this Plugin Development Guide first.\nDeveloping a plugin involves the following steps:\n Create a new module: Please create a new module in the specified directory, and it is recommended to name the module the same as the plugin for easy reference. Write the configuration file: This file serves as the declaration file for the plugin, and test cases would be run based on this file. Write the test code: Simulate the actual service operation, including the plugin you want to test. Test execution: Check if the plugin is running properly.  Write Configuration File The configuration file is used to define the basic information of the test plugin. You can use the gin plugin configuration file as an example to write your own. It includes the following information:\n entry-service: The test HTTP service entry URL. When this address is accessed, the plugin code should be triggered. health-checker: Executed before the entry-service is accessed to ensure that the service starts without any issues. Status code of 200 is considered a successful service start. start-script: The script execution file path. Please compile and start the service in this file. framework: The access address of the current framework to be tested. During testing, this address would be used to switch between different framework versions. export-port: The port number for the external service entry. support-version: The version information supported by the current plugin.  go: The supported Golang language version for the current plugin. framework: A list of plugin version information. It would be used to switch between multiple framework versions.   dependencies: If your program relies on certain containers, please declare them here. The syntax is largely similar to the services in docker-compose.  image: The image name of service. hostname: The hostname of the container which deployed. port: The port list of the container which deployed. expose: The export port list of the container which deployed. environment: The environment variables of the container which deployed. command: The start command of the container. healthcheck: The health check command of the container. If the service defines a healthcheck, then the service being tested would depend on the current service\u0026rsquo;s service_healthy status. Otherwise, it depends on the service_started status.    URL Access When the service address is accessed, please use ${HTTP_HOST} and ${HTTP_PORT} to represent the domain name and port number to be accessed. The port number corresponds to the export-port field.\nStart Script The startup script is used to compile and execute the program.\nWhen starting, please add the ${GO_BUILD_OPTS} parameter, which specifies the Go Agent program information for hybrid compilation.\nWhen starting, just let the program keep running.\nVersion Matrix Multi-version support is a crucial step in plugin testing. It can test whether the plugin runs stably across multiple framework versions and go versions.\nPlugin testing would use the go get command to modify the plugin version. Please make sure you have filled in the correct framework and support-version.framework. The format is: ${framework}@${support-version.framework}\nDuring plugin execution, the specified official Golang image would be used, allowing the plugin to run in the designated Golang version.\nExcepted File For each plugin, you need to define the config/expected.yml file, which is used to define the observable data generated after the plugin runs. After the plugin runs, this file would be used to validate the data.\nPlease refer to the documentation to write this file.\nWrite Test Code In the test code, please start an HTTP service and expose the following two interfaces:\n Check service: Used to ensure that the service is running properly. This corresponds to the health-checker address in configuration. Entry service: Write the complete framework business logic at this address. Validate all the features provided by the plugin as much as possible. This corresponds to the entry-service address in configuration.  The test code, like a regular program, needs to import the github.com/apache/skywalking-go package.\nTest Execution Once you have completed the plugin configuration and test code writing, you can proceed to test the framework. Please follow these steps:\n Build tools: Execute the make build command in the test/plugins directory. It would generate some tools needed for testing in the dist folder of this directory. Run the plugin locally: Start the plugin test program and iterate through all framework versions for testing on your local environment. Add to GitHub Action: Fill in the name of the test plugin in this file, and the plugin test would be executed and validated each time a pull request is submitted.  Run the Plugin Test Locally Please execute the run.sh script in the test/plugins directory and pass in the name of the plugin you wrote (the folder name). At this point, the script would read the configuration file of the plugin test and create a workspace directory in this location for temporarily storing files generated by each plugin. Finally, it would start the test code and validate the data sequentially according to the supported version information.\nThe script supports the following two parameters:\n \u0026ndash;clean: Clean up the files and containers generated by the current running environment. \u0026ndash;debug: Enable debug mode for plugin testing. In this mode, the content generated by each framework in the workspace would not be cleared, and the temporary files generated during hybrid compilation would be saved.  ","title":"Write Plugin Test","url":"/docs/skywalking-go/latest/en/development-and-contribution/write-plugin-testing/"},{"content":"Write Plugin Test Writing plugin test cases can greatly help you determine if your plugin is running well across multiple versions. If you haven\u0026rsquo;t started developing your plugin yet, please read this Plugin Development Guide first.\nDeveloping a plugin involves the following steps:\n Create a new module: Please create a new module in the specified directory, and it is recommended to name the module the same as the plugin for easy reference. Write the configuration file: This file serves as the declaration file for the plugin, and test cases would be run based on this file. Write the test code: Simulate the actual service operation, including the plugin you want to test. Test execution: Check if the plugin is running properly.  Write Configuration File The configuration file is used to define the basic information of the test plugin. You can use the gin plugin configuration file as an example to write your own. It includes the following information:\n entry-service: The test HTTP service entry URL. When this address is accessed, the plugin code should be triggered. health-checker: Executed before the entry-service is accessed to ensure that the service starts without any issues. Status code of 200 is considered a successful service start. start-script: The script execution file path. Please compile and start the service in this file. framework: The access address of the current framework to be tested. During testing, this address would be used to switch between different framework versions. export-port: The port number for the external service entry. support-version: The version information supported by the current plugin.  go: The supported Golang language version for the current plugin. framework: A list of plugin version information. It would be used to switch between multiple framework versions.   dependencies: If your program relies on certain containers, please declare them here. The syntax is largely similar to the services in docker-compose.  image: The image name of service. hostname: The hostname of the container which deployed. port: The port list of the container which deployed. expose: The export port list of the container which deployed. environment: The environment variables of the container which deployed. command: The start command of the container. healthcheck: The health check command of the container. If the service defines a healthcheck, then the service being tested would depend on the current service\u0026rsquo;s service_healthy status. Otherwise, it depends on the service_started status.    URL Access When the service address is accessed, please use ${HTTP_HOST} and ${HTTP_PORT} to represent the domain name and port number to be accessed. The port number corresponds to the export-port field.\nStart Script The startup script is used to compile and execute the program.\nWhen starting, please add the ${GO_BUILD_OPTS} parameter, which specifies the Go Agent program information for hybrid compilation.\nWhen starting, just let the program keep running.\nVersion Matrix Multi-version support is a crucial step in plugin testing. It can test whether the plugin runs stably across multiple framework versions and go versions.\nPlugin testing would use the go get command to modify the plugin version. Please make sure you have filled in the correct framework and support-version.framework. The format is: ${framework}@${support-version.framework}\nDuring plugin execution, the specified official Golang image would be used, allowing the plugin to run in the designated Golang version.\nExcepted File For each plugin, you need to define the config/expected.yml file, which is used to define the observable data generated after the plugin runs. After the plugin runs, this file would be used to validate the data.\nPlease refer to the documentation to write this file.\nWrite Test Code In the test code, please start an HTTP service and expose the following two interfaces:\n Check service: Used to ensure that the service is running properly. This corresponds to the health-checker address in configuration. Entry service: Write the complete framework business logic at this address. Validate all the features provided by the plugin as much as possible. This corresponds to the entry-service address in configuration.  The test code, like a regular program, needs to import the github.com/apache/skywalking-go package.\nTest Execution Once you have completed the plugin configuration and test code writing, you can proceed to test the framework. Please follow these steps:\n Build tools: Execute the make build command in the test/plugins directory. It would generate some tools needed for testing in the dist folder of this directory. Run the plugin locally: Start the plugin test program and iterate through all framework versions for testing on your local environment. Add to GitHub Action: Fill in the name of the test plugin in this file, and the plugin test would be executed and validated each time a pull request is submitted.  Run the Plugin Test Locally Please execute the run.sh script in the test/plugins directory and pass in the name of the plugin you wrote (the folder name). At this point, the script would read the configuration file of the plugin test and create a workspace directory in this location for temporarily storing files generated by each plugin. Finally, it would start the test code and validate the data sequentially according to the supported version information.\nThe script supports the following two parameters:\n \u0026ndash;clean: Clean up the files and containers generated by the current running environment. \u0026ndash;debug: Enable debug mode for plugin testing. In this mode, the content generated by each framework in the workspace would not be cleared, and the temporary files generated during hybrid compilation would be saved.  ","title":"Write Plugin Test","url":"/docs/skywalking-go/next/en/development-and-contribution/write-plugin-testing/"},{"content":"Write Plugin Test Writing plugin test cases can greatly help you determine if your plugin is running well across multiple versions. If you haven\u0026rsquo;t started developing your plugin yet, please read this Plugin Development Guide first.\nDeveloping a plugin involves the following steps:\n Create a new module: Please create a new module in the specified directory, and it is recommended to name the module the same as the plugin for easy reference. Write the configuration file: This file serves as the declaration file for the plugin, and test cases would be run based on this file. Write the test code: Simulate the actual service operation, including the plugin you want to test. Test execution: Check if the plugin is running properly.  Write Configuration File The configuration file is used to define the basic information of the test plugin. You can use the gin plugin configuration file as an example to write your own. It includes the following information:\n entry-service: The test HTTP service entry URL. When this address is accessed, the plugin code should be triggered. health-checker: Executed before the entry-service is accessed to ensure that the service starts without any issues. Status code of 200 is considered a successful service start. start-script: The script execution file path. Please compile and start the service in this file. framework: The access address of the current framework to be tested. During testing, this address would be used to switch between different framework versions. export-port: The port number for the external service entry. support-version: The version information supported by the current plugin.  go: The supported Golang language version for the current plugin. framework: A list of plugin version information. It would be used to switch between multiple framework versions.   dependencies: If your program relies on certain containers, please declare them here. The syntax is largely similar to the services in docker-compose.  image: The image name of service. hostname: The hostname of the container which deployed. port: The port list of the container which deployed. expose: The export port list of the container which deployed. environment: The environment variables of the container which deployed. command: The start command of the container. healthcheck: The health check command of the container. If the service defines a healthcheck, then the service being tested would depend on the current service\u0026rsquo;s service_healthy status. Otherwise, it depends on the service_started status.    URL Access When the service address is accessed, please use ${HTTP_HOST} and ${HTTP_PORT} to represent the domain name and port number to be accessed. The port number corresponds to the export-port field.\nStart Script The startup script is used to compile and execute the program.\nWhen starting, please add the ${GO_BUILD_OPTS} parameter, which specifies the Go Agent program information for hybrid compilation.\nWhen starting, just let the program keep running.\nVersion Matrix Multi-version support is a crucial step in plugin testing. It can test whether the plugin runs stably across multiple framework versions and go versions.\nPlugin testing would use the go get command to modify the plugin version. Please make sure you have filled in the correct framework and support-version.framework. The format is: ${framework}@${support-version.framework}\nDuring plugin execution, the specified official Golang image would be used, allowing the plugin to run in the designated Golang version.\nExcepted File For each plugin, you need to define the config/expected.yml file, which is used to define the observable data generated after the plugin runs. After the plugin runs, this file would be used to validate the data.\nPlease refer to the documentation to write this file.\nWrite Test Code In the test code, please start an HTTP service and expose the following two interfaces:\n Check service: Used to ensure that the service is running properly. This corresponds to the health-checker address in configuration. Entry service: Write the complete framework business logic at this address. Validate all the features provided by the plugin as much as possible. This corresponds to the entry-service address in configuration.  The test code, like a regular program, needs to import the github.com/apache/skywalking-go package.\nTest Execution Once you have completed the plugin configuration and test code writing, you can proceed to test the framework. Please follow these steps:\n Build tools: Execute the make build command in the test/plugins directory. It would generate some tools needed for testing in the dist folder of this directory. Run the plugin locally: Start the plugin test program and iterate through all framework versions for testing on your local environment. Add to GitHub Action: Fill in the name of the test plugin in this file, and the plugin test would be executed and validated each time a pull request is submitted.  Run the Plugin Test Locally Please execute the run.sh script in the test/plugins directory and pass in the name of the plugin you wrote (the folder name). At this point, the script would read the configuration file of the plugin test and create a workspace directory in this location for temporarily storing files generated by each plugin. Finally, it would start the test code and validate the data sequentially according to the supported version information.\nThe script supports the following two parameters:\n \u0026ndash;clean: Clean up the files and containers generated by the current running environment. \u0026ndash;debug: Enable debug mode for plugin testing. In this mode, the content generated by each framework in the workspace would not be cleared, and the temporary files generated during hybrid compilation would be saved.  ","title":"Write Plugin Test","url":"/docs/skywalking-go/v0.4.0/en/development-and-contribution/write-plugin-testing/"},{"content":"Zabbix Receiver The Zabbix receiver accepts metrics of Zabbix Agent Active Checks protocol format into the Meter System. Zabbix Agent is based on GPL-2.0 License.\nModule definition receiver-zabbix:selector:${SW_RECEIVER_ZABBIX:default}default:# Export tcp port, Zabbix agent could connected and transport dataport:10051# Bind to hosthost:0.0.0.0# Enable config when receive agent requestactiveFiles:agentConfiguration file The Zabbix receiver is configured via a configuration file that defines everything related to receiving from agents, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP fails to start up. The files are located at $CLASSPATH/zabbix-rules.\nThe file is written in YAML format, defined by the scheme described below. Square brackets indicate that a parameter is optional.\nAn example for Zabbix agent configuration could be found here. You can find details on Zabbix agent items from Zabbix Agent documentation.\nConfiguration file # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# Datasource from Zabbix Item keys.requiredZabbixItemKeys:- \u0026lt;zabbix item keys\u0026gt;# Support agent entities information.entities:# Allow hostname patterns to build metrics.hostPatterns:- \u0026lt;regex string\u0026gt;# Customized metrics label before parse to meter system.labels:[- \u0026lt;labels\u0026gt; ]# Metrics rule allow you to recompute queries.metrics:[- \u0026lt;metrics_rules\u0026gt; ] # Define the label name. The label value must query from `value` or `fromItem` attribute.name:\u0026lt;string\u0026gt;# Appoint value to label.[value:\u0026lt;string\u0026gt;]# Query label value from Zabbix Agent Item key.[fromItem:\u0026lt;string\u0026gt;]\u0026lt;metric_rules\u0026gt; # The name of rule, which combinates with a prefix \u0026#39;meter_\u0026#39; as the index/table name in storage.name:\u0026lt;string\u0026gt;# MAL expression.exp:\u0026lt;string\u0026gt;For more on MAL, please refer to mal.md.\n","title":"Zabbix Receiver","url":"/docs/main/latest/en/setup/backend/backend-zabbix/"},{"content":"Zabbix Receiver The Zabbix receiver accepts metrics of Zabbix Agent Active Checks protocol format into the Meter System. Zabbix Agent is based on GPL-2.0 License.\nModule definition receiver-zabbix:selector:${SW_RECEIVER_ZABBIX:default}default:# Export tcp port, Zabbix agent could connected and transport dataport:10051# Bind to hosthost:0.0.0.0# Enable config when receive agent requestactiveFiles:agentConfiguration file The Zabbix receiver is configured via a configuration file that defines everything related to receiving from agents, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP fails to start up. The files are located at $CLASSPATH/zabbix-rules.\nThe file is written in YAML format, defined by the scheme described below. Square brackets indicate that a parameter is optional.\nAn example for Zabbix agent configuration could be found here. You can find details on Zabbix agent items from Zabbix Agent documentation.\nConfiguration file # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# Datasource from Zabbix Item keys.requiredZabbixItemKeys:- \u0026lt;zabbix item keys\u0026gt;# Support agent entities information.entities:# Allow hostname patterns to build metrics.hostPatterns:- \u0026lt;regex string\u0026gt;# Customized metrics label before parse to meter system.labels:[- \u0026lt;labels\u0026gt; ]# Metrics rule allow you to recompute queries.metrics:[- \u0026lt;metrics_rules\u0026gt; ] # Define the label name. The label value must query from `value` or `fromItem` attribute.name:\u0026lt;string\u0026gt;# Appoint value to label.[value:\u0026lt;string\u0026gt;]# Query label value from Zabbix Agent Item key.[fromItem:\u0026lt;string\u0026gt;]\u0026lt;metric_rules\u0026gt; # The name of rule, which combinates with a prefix \u0026#39;meter_\u0026#39; as the index/table name in storage.name:\u0026lt;string\u0026gt;# MAL expression.exp:\u0026lt;string\u0026gt;For more on MAL, please refer to mal.md.\n","title":"Zabbix Receiver","url":"/docs/main/next/en/setup/backend/backend-zabbix/"},{"content":"Zabbix Receiver The Zabbix receiver accepts metrics of Zabbix Agent Active Checks protocol format into the Meter System. Zabbix Agent is based on GPL-2.0 License.\nModule definition receiver-zabbix:selector:${SW_RECEIVER_ZABBIX:default}default:# Export tcp port, Zabbix agent could connected and transport dataport:10051# Bind to hosthost:0.0.0.0# Enable config when receive agent requestactiveFiles:agentConfiguration file The Zabbix receiver is configured via a configuration file that defines everything related to receiving from agents, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP fails to start up. The files are located at $CLASSPATH/zabbix-rules.\nThe file is written in YAML format, defined by the scheme described below. Square brackets indicate that a parameter is optional.\nAn example for Zabbix agent configuration could be found here. You could find details on Zabbix agent items from Zabbix Agent documentation.\nConfiguration file # insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# Datasource from Zabbix Item keys.requiredZabbixItemKeys:- \u0026lt;zabbix item keys\u0026gt;# Support agent entities information.entities:# Allow hostname patterns to build metrics.hostPatterns:- \u0026lt;regex string\u0026gt;# Customized metrics label before parse to meter system.labels:[- \u0026lt;labels\u0026gt; ]# Metrics rule allow you to recompute queries.metrics:[- \u0026lt;metrics_rules\u0026gt; ] # Define the label name. The label value must query from `value` or `fromItem` attribute.name:\u0026lt;string\u0026gt;# Appoint value to label.[value:\u0026lt;string\u0026gt;]# Query label value from Zabbix Agent Item key.[fromItem:\u0026lt;string\u0026gt;]\u0026lt;metric_rules\u0026gt; # The name of rule, which combinates with a prefix \u0026#39;meter_\u0026#39; as the index/table name in storage.name:\u0026lt;string\u0026gt;# MAL expression.exp:\u0026lt;string\u0026gt;For more on MAL, please refer to mal.md.\n","title":"Zabbix Receiver","url":"/docs/main/v9.0.0/en/setup/backend/backend-zabbix/"},{"content":"Zabbix Receiver The Zabbix receiver accepts metrics of Zabbix Agent Active Checks protocol format into the Meter System. Zabbix Agent is based on GPL-2.0 License.\nModule definition receiver-zabbix:selector:${SW_RECEIVER_ZABBIX:default}default:# Export tcp port, Zabbix agent could connected and transport dataport:10051# Bind to hosthost:0.0.0.0# Enable config when receive agent requestactiveFiles:agentConfiguration file The Zabbix receiver is configured via a configuration file that defines everything related to receiving from agents, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP fails to start up. The files are located at $CLASSPATH/zabbix-rules.\nThe file is written in YAML format, defined by the scheme described below. Square brackets indicate that a parameter is optional.\nAn example for Zabbix agent configuration could be found here. You can find details on Zabbix agent items from Zabbix Agent documentation.\nConfiguration file # insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# Datasource from Zabbix Item keys.requiredZabbixItemKeys:- \u0026lt;zabbix item keys\u0026gt;# Support agent entities information.entities:# Allow hostname patterns to build metrics.hostPatterns:- \u0026lt;regex string\u0026gt;# Customized metrics label before parse to meter system.labels:[- \u0026lt;labels\u0026gt; ]# Metrics rule allow you to recompute queries.metrics:[- \u0026lt;metrics_rules\u0026gt; ] # Define the label name. The label value must query from `value` or `fromItem` attribute.name:\u0026lt;string\u0026gt;# Appoint value to label.[value:\u0026lt;string\u0026gt;]# Query label value from Zabbix Agent Item key.[fromItem:\u0026lt;string\u0026gt;]\u0026lt;metric_rules\u0026gt; # The name of rule, which combinates with a prefix \u0026#39;meter_\u0026#39; as the index/table name in storage.name:\u0026lt;string\u0026gt;# MAL expression.exp:\u0026lt;string\u0026gt;For more on MAL, please refer to mal.md.\n","title":"Zabbix Receiver","url":"/docs/main/v9.1.0/en/setup/backend/backend-zabbix/"},{"content":"Zabbix Receiver The Zabbix receiver accepts metrics of Zabbix Agent Active Checks protocol format into the Meter System. Zabbix Agent is based on GPL-2.0 License.\nModule definition receiver-zabbix:selector:${SW_RECEIVER_ZABBIX:default}default:# Export tcp port, Zabbix agent could connected and transport dataport:10051# Bind to hosthost:0.0.0.0# Enable config when receive agent requestactiveFiles:agentConfiguration file The Zabbix receiver is configured via a configuration file that defines everything related to receiving from agents, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP fails to start up. The files are located at $CLASSPATH/zabbix-rules.\nThe file is written in YAML format, defined by the scheme described below. Square brackets indicate that a parameter is optional.\nAn example for Zabbix agent configuration could be found here. You can find details on Zabbix agent items from Zabbix Agent documentation.\nConfiguration file # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# Datasource from Zabbix Item keys.requiredZabbixItemKeys:- \u0026lt;zabbix item keys\u0026gt;# Support agent entities information.entities:# Allow hostname patterns to build metrics.hostPatterns:- \u0026lt;regex string\u0026gt;# Customized metrics label before parse to meter system.labels:[- \u0026lt;labels\u0026gt; ]# Metrics rule allow you to recompute queries.metrics:[- \u0026lt;metrics_rules\u0026gt; ] # Define the label name. The label value must query from `value` or `fromItem` attribute.name:\u0026lt;string\u0026gt;# Appoint value to label.[value:\u0026lt;string\u0026gt;]# Query label value from Zabbix Agent Item key.[fromItem:\u0026lt;string\u0026gt;]\u0026lt;metric_rules\u0026gt; # The name of rule, which combinates with a prefix \u0026#39;meter_\u0026#39; as the index/table name in storage.name:\u0026lt;string\u0026gt;# MAL expression.exp:\u0026lt;string\u0026gt;For more on MAL, please refer to mal.md.\n","title":"Zabbix Receiver","url":"/docs/main/v9.2.0/en/setup/backend/backend-zabbix/"},{"content":"Zabbix Receiver The Zabbix receiver accepts metrics of Zabbix Agent Active Checks protocol format into the Meter System. Zabbix Agent is based on GPL-2.0 License.\nModule definition receiver-zabbix:selector:${SW_RECEIVER_ZABBIX:default}default:# Export tcp port, Zabbix agent could connected and transport dataport:10051# Bind to hosthost:0.0.0.0# Enable config when receive agent requestactiveFiles:agentConfiguration file The Zabbix receiver is configured via a configuration file that defines everything related to receiving from agents, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP fails to start up. The files are located at $CLASSPATH/zabbix-rules.\nThe file is written in YAML format, defined by the scheme described below. Square brackets indicate that a parameter is optional.\nAn example for Zabbix agent configuration could be found here. You can find details on Zabbix agent items from Zabbix Agent documentation.\nConfiguration file # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# Datasource from Zabbix Item keys.requiredZabbixItemKeys:- \u0026lt;zabbix item keys\u0026gt;# Support agent entities information.entities:# Allow hostname patterns to build metrics.hostPatterns:- \u0026lt;regex string\u0026gt;# Customized metrics label before parse to meter system.labels:[- \u0026lt;labels\u0026gt; ]# Metrics rule allow you to recompute queries.metrics:[- \u0026lt;metrics_rules\u0026gt; ] # Define the label name. The label value must query from `value` or `fromItem` attribute.name:\u0026lt;string\u0026gt;# Appoint value to label.[value:\u0026lt;string\u0026gt;]# Query label value from Zabbix Agent Item key.[fromItem:\u0026lt;string\u0026gt;]\u0026lt;metric_rules\u0026gt; # The name of rule, which combinates with a prefix \u0026#39;meter_\u0026#39; as the index/table name in storage.name:\u0026lt;string\u0026gt;# MAL expression.exp:\u0026lt;string\u0026gt;For more on MAL, please refer to mal.md.\n","title":"Zabbix Receiver","url":"/docs/main/v9.3.0/en/setup/backend/backend-zabbix/"},{"content":"Zabbix Receiver The Zabbix receiver accepts metrics of Zabbix Agent Active Checks protocol format into the Meter System. Zabbix Agent is based on GPL-2.0 License.\nModule definition receiver-zabbix:selector:${SW_RECEIVER_ZABBIX:default}default:# Export tcp port, Zabbix agent could connected and transport dataport:10051# Bind to hosthost:0.0.0.0# Enable config when receive agent requestactiveFiles:agentConfiguration file The Zabbix receiver is configured via a configuration file that defines everything related to receiving from agents, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP fails to start up. The files are located at $CLASSPATH/zabbix-rules.\nThe file is written in YAML format, defined by the scheme described below. Square brackets indicate that a parameter is optional.\nAn example for Zabbix agent configuration could be found here. You can find details on Zabbix agent items from Zabbix Agent documentation.\nConfiguration file # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# Datasource from Zabbix Item keys.requiredZabbixItemKeys:- \u0026lt;zabbix item keys\u0026gt;# Support agent entities information.entities:# Allow hostname patterns to build metrics.hostPatterns:- \u0026lt;regex string\u0026gt;# Customized metrics label before parse to meter system.labels:[- \u0026lt;labels\u0026gt; ]# Metrics rule allow you to recompute queries.metrics:[- \u0026lt;metrics_rules\u0026gt; ] # Define the label name. The label value must query from `value` or `fromItem` attribute.name:\u0026lt;string\u0026gt;# Appoint value to label.[value:\u0026lt;string\u0026gt;]# Query label value from Zabbix Agent Item key.[fromItem:\u0026lt;string\u0026gt;]\u0026lt;metric_rules\u0026gt; # The name of rule, which combinates with a prefix \u0026#39;meter_\u0026#39; as the index/table name in storage.name:\u0026lt;string\u0026gt;# MAL expression.exp:\u0026lt;string\u0026gt;For more on MAL, please refer to mal.md.\n","title":"Zabbix Receiver","url":"/docs/main/v9.4.0/en/setup/backend/backend-zabbix/"},{"content":"Zabbix Receiver The Zabbix receiver accepts metrics of Zabbix Agent Active Checks protocol format into the Meter System. Zabbix Agent is based on GPL-2.0 License.\nModule definition receiver-zabbix:selector:${SW_RECEIVER_ZABBIX:default}default:# Export tcp port, Zabbix agent could connected and transport dataport:10051# Bind to hosthost:0.0.0.0# Enable config when receive agent requestactiveFiles:agentConfiguration file The Zabbix receiver is configured via a configuration file that defines everything related to receiving from agents, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP fails to start up. The files are located at $CLASSPATH/zabbix-rules.\nThe file is written in YAML format, defined by the scheme described below. Square brackets indicate that a parameter is optional.\nAn example for Zabbix agent configuration could be found here. You can find details on Zabbix agent items from Zabbix Agent documentation.\nConfiguration file # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# Datasource from Zabbix Item keys.requiredZabbixItemKeys:- \u0026lt;zabbix item keys\u0026gt;# Support agent entities information.entities:# Allow hostname patterns to build metrics.hostPatterns:- \u0026lt;regex string\u0026gt;# Customized metrics label before parse to meter system.labels:[- \u0026lt;labels\u0026gt; ]# Metrics rule allow you to recompute queries.metrics:[- \u0026lt;metrics_rules\u0026gt; ] # Define the label name. The label value must query from `value` or `fromItem` attribute.name:\u0026lt;string\u0026gt;# Appoint value to label.[value:\u0026lt;string\u0026gt;]# Query label value from Zabbix Agent Item key.[fromItem:\u0026lt;string\u0026gt;]\u0026lt;metric_rules\u0026gt; # The name of rule, which combinates with a prefix \u0026#39;meter_\u0026#39; as the index/table name in storage.name:\u0026lt;string\u0026gt;# MAL expression.exp:\u0026lt;string\u0026gt;For more on MAL, please refer to mal.md.\n","title":"Zabbix Receiver","url":"/docs/main/v9.5.0/en/setup/backend/backend-zabbix/"},{"content":"Zabbix Receiver The Zabbix receiver accepts metrics of Zabbix Agent Active Checks protocol format into the Meter System. Zabbix Agent is based on GPL-2.0 License.\nModule definition receiver-zabbix:selector:${SW_RECEIVER_ZABBIX:default}default:# Export tcp port, Zabbix agent could connected and transport dataport:10051# Bind to hosthost:0.0.0.0# Enable config when receive agent requestactiveFiles:agentConfiguration file The Zabbix receiver is configured via a configuration file that defines everything related to receiving from agents, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP fails to start up. The files are located at $CLASSPATH/zabbix-rules.\nThe file is written in YAML format, defined by the scheme described below. Square brackets indicate that a parameter is optional.\nAn example for Zabbix agent configuration could be found here. You can find details on Zabbix agent items from Zabbix Agent documentation.\nConfiguration file # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# Datasource from Zabbix Item keys.requiredZabbixItemKeys:- \u0026lt;zabbix item keys\u0026gt;# Support agent entities information.entities:# Allow hostname patterns to build metrics.hostPatterns:- \u0026lt;regex string\u0026gt;# Customized metrics label before parse to meter system.labels:[- \u0026lt;labels\u0026gt; ]# Metrics rule allow you to recompute queries.metrics:[- \u0026lt;metrics_rules\u0026gt; ] # Define the label name. The label value must query from `value` or `fromItem` attribute.name:\u0026lt;string\u0026gt;# Appoint value to label.[value:\u0026lt;string\u0026gt;]# Query label value from Zabbix Agent Item key.[fromItem:\u0026lt;string\u0026gt;]\u0026lt;metric_rules\u0026gt; # The name of rule, which combinates with a prefix \u0026#39;meter_\u0026#39; as the index/table name in storage.name:\u0026lt;string\u0026gt;# MAL expression.exp:\u0026lt;string\u0026gt;For more on MAL, please refer to mal.md.\n","title":"Zabbix Receiver","url":"/docs/main/v9.6.0/en/setup/backend/backend-zabbix/"},{"content":"Zabbix Receiver The Zabbix receiver accepts metrics of Zabbix Agent Active Checks protocol format into the Meter System. Zabbix Agent is based on GPL-2.0 License.\nModule definition receiver-zabbix:selector:${SW_RECEIVER_ZABBIX:default}default:# Export tcp port, Zabbix agent could connected and transport dataport:10051# Bind to hosthost:0.0.0.0# Enable config when receive agent requestactiveFiles:agentConfiguration file The Zabbix receiver is configured via a configuration file that defines everything related to receiving from agents, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP fails to start up. The files are located at $CLASSPATH/zabbix-rules.\nThe file is written in YAML format, defined by the scheme described below. Square brackets indicate that a parameter is optional.\nAn example for Zabbix agent configuration could be found here. You can find details on Zabbix agent items from Zabbix Agent documentation.\nConfiguration file # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# Datasource from Zabbix Item keys.requiredZabbixItemKeys:- \u0026lt;zabbix item keys\u0026gt;# Support agent entities information.entities:# Allow hostname patterns to build metrics.hostPatterns:- \u0026lt;regex string\u0026gt;# Customized metrics label before parse to meter system.labels:[- \u0026lt;labels\u0026gt; ]# Metrics rule allow you to recompute queries.metrics:[- \u0026lt;metrics_rules\u0026gt; ] # Define the label name. The label value must query from `value` or `fromItem` attribute.name:\u0026lt;string\u0026gt;# Appoint value to label.[value:\u0026lt;string\u0026gt;]# Query label value from Zabbix Agent Item key.[fromItem:\u0026lt;string\u0026gt;]\u0026lt;metric_rules\u0026gt; # The name of rule, which combinates with a prefix \u0026#39;meter_\u0026#39; as the index/table name in storage.name:\u0026lt;string\u0026gt;# MAL expression.exp:\u0026lt;string\u0026gt;For more on MAL, please refer to mal.md.\n","title":"Zabbix Receiver","url":"/docs/main/v9.7.0/en/setup/backend/backend-zabbix/"},{"content":"Zend observer  Refer to: https://www.datadoghq.com/blog/engineering/php-8-observability-baked-right-in/#the-observability-landscape-before-php-8\n By default, skywalking-php hooks the zend_execute_internal and zend_execute_ex functions to implement auto instrumentation.\nBut there are some drawbacks:\n All PHP function calls are placed on the native C stack, which is limited by the value set in ulimit -s. Not compatible with the new JIT added in PHP 8.  The observer API in PHP 8+ Now, zend observer api is a new generation method, and it is also a method currently recommended by PHP8.\nThis method has no stack problem and will not affect JIT.\nConfiguration The following configuration example enables JIT in PHP8 and zend observer support in skywalking-php at the same time.\n[opcache] zend_extension = opcache ; Enable JIT opcache.jit = tracing [skywalking_agent] extension = skywalking_agent.so ; Switch to use zend observer api to implement auto instrumentation. skywalking_agent.enable_zend_observer = On ","title":"Zend observer","url":"/docs/skywalking-php/latest/en/configuration/zend-observer/"},{"content":"Zend observer  Refer to: https://www.datadoghq.com/blog/engineering/php-8-observability-baked-right-in/#the-observability-landscape-before-php-8\n By default, skywalking-php hooks the zend_execute_internal and zend_execute_ex functions to implement auto instrumentation.\nBut there are some drawbacks:\n All PHP function calls are placed on the native C stack, which is limited by the value set in ulimit -s. Not compatible with the new JIT added in PHP 8.  The observer API in PHP 8+ Now, zend observer api is a new generation method, and it is also a method currently recommended by PHP8.\nThis method has no stack problem and will not affect JIT.\nConfiguration The following configuration example enables JIT in PHP8 and zend observer support in skywalking-php at the same time.\n[opcache] zend_extension = opcache ; Enable JIT opcache.jit = tracing [skywalking_agent] extension = skywalking_agent.so ; Switch to use zend observer api to implement auto instrumentation. skywalking_agent.enable_zend_observer = On ","title":"Zend observer","url":"/docs/skywalking-php/next/en/configuration/zend-observer/"},{"content":"Zend observer  Refer to: https://www.datadoghq.com/blog/engineering/php-8-observability-baked-right-in/#the-observability-landscape-before-php-8\n By default, skywalking-php hooks the zend_execute_internal and zend_execute_ex functions to implement auto instrumentation.\nBut there are some drawbacks:\n All PHP function calls are placed on the native C stack, which is limited by the value set in ulimit -s. Not compatible with the new JIT added in PHP 8.  The observer API in PHP 8+ Now, zend observer api is a new generation method, and it is also a method currently recommended by PHP8.\nThis method has no stack problem and will not affect JIT.\nConfiguration The following configuration example enables JIT in PHP8 and zend observer support in skywalking-php at the same time.\n[opcache] zend_extension = opcache ; Enable JIT opcache.jit = tracing [skywalking_agent] extension = skywalking_agent.so ; Switch to use zend observer api to implement auto instrumentation. skywalking_agent.enable_zend_observer = On ","title":"Zend observer","url":"/docs/skywalking-php/v0.7.0/en/configuration/zend-observer/"},{"content":"Zipkin receiver The Zipkin receiver makes the OAP server work as an alternative Zipkin server implementation for collecting traces. It supports Zipkin v1/v2 formats through the HTTP collector and Kafka collector.\nNOTICE, Zipkin trace would not be analyzed like SkyWalking native trace format.\nUse the following config to activate it. Set enableHttpCollector to enable HTTP collector and enableKafkaCollector to enable Kafka collector.\nreceiver-zipkin:selector:${SW_RECEIVER_ZIPKIN:default}default:# Defines a set of span tag keys which are searchable.# The max length of key=value should be less than 256 or will be dropped.searchableTracesTags:${SW_ZIPKIN_SEARCHABLE_TAG_KEYS:http.method}# The sample rate precision is 1/10000, should be between 0 and 10000sampleRate:${SW_ZIPKIN_SAMPLE_RATE:10000}## The below configs are for OAP collect zipkin trace from HTTPenableHttpCollector:${SW_ZIPKIN_HTTP_COLLECTOR_ENABLED:true}restHost:${SW_RECEIVER_ZIPKIN_REST_HOST:0.0.0.0}restPort:${SW_RECEIVER_ZIPKIN_REST_PORT:9411}restContextPath:${SW_RECEIVER_ZIPKIN_REST_CONTEXT_PATH:/}restMaxThreads:${SW_RECEIVER_ZIPKIN_REST_MAX_THREADS:200}restIdleTimeOut:${SW_RECEIVER_ZIPKIN_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_RECEIVER_ZIPKIN_REST_QUEUE_SIZE:0}## The below configs are for OAP collect zipkin trace from kafkaenableKafkaCollector:${SW_ZIPKIN_KAFKA_COLLECTOR_ENABLED:true}kafkaBootstrapServers:${SW_ZIPKIN_KAFKA_SERVERS:localhost:9092}kafkaGroupId:${SW_ZIPKIN_KAFKA_Group_Id:zipkin}kafkaTopic:${SW_ZIPKIN_KAFKA_TOPIC:zipkin}# Kafka consumer config, JSON format as Properties. If it contains the same key with above, would override.kafkaConsumerConfig:${SW_ZIPKIN_KAFKA_CONSUMER_CONFIG:\u0026#34;{\\\u0026#34;auto.offset.reset\\\u0026#34;:\\\u0026#34;earliest\\\u0026#34;,\\\u0026#34;enable.auto.commit\\\u0026#34;:true}\u0026#34;}# The Count of the topic consumerskafkaConsumers:${SW_ZIPKIN_KAFKA_CONSUMERS:1}kafkaHandlerThreadPoolSize:${SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_SIZE:-1}kafkaHandlerThreadPoolQueueSize:${SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE:-1}Zipkin query The Zipkin receiver makes the OAP server work as an alternative Zipkin server implementation for query traces. It implemented ZipkinQueryApiV2 through the HTTP service, supporting Zipkin-lens UI.\nUse the following config to activate it.\nquery-zipkin:selector:${SW_QUERY_ZIPKIN:default}default:# For HTTP serverrestHost:${SW_QUERY_ZIPKIN_REST_HOST:0.0.0.0}restPort:${SW_QUERY_ZIPKIN_REST_PORT:9412}restContextPath:${SW_QUERY_ZIPKIN_REST_CONTEXT_PATH:/zipkin}restMaxThreads:${SW_QUERY_ZIPKIN_REST_MAX_THREADS:200}restIdleTimeOut:${SW_QUERY_ZIPKIN_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_QUERY_ZIPKIN_REST_QUEUE_SIZE:0}# Default look back for serviceNames, remoteServiceNames and spanNames, 1 day in millislookback:${SW_QUERY_ZIPKIN_LOOKBACK:86400000}# The Cache-Control max-age (seconds) for serviceNames, remoteServiceNames and spanNamesnamesMaxAge:${SW_QUERY_ZIPKIN_NAMES_MAX_AGE:300}## The below config are OAP support for zipkin-lens UI# Default traces query max sizeuiQueryLimit:${SW_QUERY_ZIPKIN_UI_QUERY_LIMIT:10}# Default look back for search traces, 15 minutes in millisuiDefaultLookback:${SW_QUERY_ZIPKIN_UI_DEFAULT_LOOKBACK:900000}Lens UI Lens UI is Zipkin native UI. SkyWalking webapp has bundled it in the binary distribution. {webapp IP}:{webapp port}/zipkin is exposed and accessible for the browser. Meanwhile, Iframe UI component could be used to host Zipkin Lens UI on the SkyWalking booster UI dashboard.(link=/zipkin)\nZipkin Lens UI source codes could be found here.\n","title":"Zipkin receiver","url":"/docs/main/latest/en/setup/backend/zipkin-trace/"},{"content":"Zipkin receiver The Zipkin receiver makes the OAP server work as an alternative Zipkin server implementation for collecting traces. It supports Zipkin v1/v2 formats through the HTTP collector and Kafka collector.\nNOTICE, Zipkin trace would not be analyzed like SkyWalking native trace format.\nUse the following config to activate it. Set enableHttpCollector to enable HTTP collector and enableKafkaCollector to enable Kafka collector.\nreceiver-zipkin:selector:${SW_RECEIVER_ZIPKIN:default}default:# Defines a set of span tag keys which are searchable.# The max length of key=value should be less than 256 or will be dropped.searchableTracesTags:${SW_ZIPKIN_SEARCHABLE_TAG_KEYS:http.method}# The sample rate precision is 1/10000, should be between 0 and 10000sampleRate:${SW_ZIPKIN_SAMPLE_RATE:10000}## The below configs are for OAP collect zipkin trace from HTTPenableHttpCollector:${SW_ZIPKIN_HTTP_COLLECTOR_ENABLED:true}restHost:${SW_RECEIVER_ZIPKIN_REST_HOST:0.0.0.0}restPort:${SW_RECEIVER_ZIPKIN_REST_PORT:9411}restContextPath:${SW_RECEIVER_ZIPKIN_REST_CONTEXT_PATH:/}restMaxThreads:${SW_RECEIVER_ZIPKIN_REST_MAX_THREADS:200}restIdleTimeOut:${SW_RECEIVER_ZIPKIN_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_RECEIVER_ZIPKIN_REST_QUEUE_SIZE:0}## The below configs are for OAP collect zipkin trace from kafkaenableKafkaCollector:${SW_ZIPKIN_KAFKA_COLLECTOR_ENABLED:true}kafkaBootstrapServers:${SW_ZIPKIN_KAFKA_SERVERS:localhost:9092}kafkaGroupId:${SW_ZIPKIN_KAFKA_Group_Id:zipkin}kafkaTopic:${SW_ZIPKIN_KAFKA_TOPIC:zipkin}# Kafka consumer config, JSON format as Properties. If it contains the same key with above, would override.kafkaConsumerConfig:${SW_ZIPKIN_KAFKA_CONSUMER_CONFIG:\u0026#34;{\\\u0026#34;auto.offset.reset\\\u0026#34;:\\\u0026#34;earliest\\\u0026#34;,\\\u0026#34;enable.auto.commit\\\u0026#34;:true}\u0026#34;}# The Count of the topic consumerskafkaConsumers:${SW_ZIPKIN_KAFKA_CONSUMERS:1}kafkaHandlerThreadPoolSize:${SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_SIZE:-1}kafkaHandlerThreadPoolQueueSize:${SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE:-1}Zipkin query The Zipkin receiver makes the OAP server work as an alternative Zipkin server implementation for query traces. It implemented ZipkinQueryApiV2 through the HTTP service, supporting Zipkin-lens UI.\nUse the following config to activate it.\nquery-zipkin:selector:${SW_QUERY_ZIPKIN:default}default:# For HTTP serverrestHost:${SW_QUERY_ZIPKIN_REST_HOST:0.0.0.0}restPort:${SW_QUERY_ZIPKIN_REST_PORT:9412}restContextPath:${SW_QUERY_ZIPKIN_REST_CONTEXT_PATH:/zipkin}restMaxThreads:${SW_QUERY_ZIPKIN_REST_MAX_THREADS:200}restIdleTimeOut:${SW_QUERY_ZIPKIN_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_QUERY_ZIPKIN_REST_QUEUE_SIZE:0}# Default look back for serviceNames, remoteServiceNames and spanNames, 1 day in millislookback:${SW_QUERY_ZIPKIN_LOOKBACK:86400000}# The Cache-Control max-age (seconds) for serviceNames, remoteServiceNames and spanNamesnamesMaxAge:${SW_QUERY_ZIPKIN_NAMES_MAX_AGE:300}## The below config are OAP support for zipkin-lens UI# Default traces query max sizeuiQueryLimit:${SW_QUERY_ZIPKIN_UI_QUERY_LIMIT:10}# Default look back for search traces, 15 minutes in millisuiDefaultLookback:${SW_QUERY_ZIPKIN_UI_DEFAULT_LOOKBACK:900000}Lens UI Lens UI is Zipkin native UI. SkyWalking webapp has bundled it in the binary distribution. {webapp IP}:{webapp port}/zipkin is exposed and accessible for the browser. Meanwhile, Iframe UI component could be used to host Zipkin Lens UI on the SkyWalking booster UI dashboard.(link=/zipkin)\nZipkin Lens UI source codes could be found here.\n","title":"Zipkin receiver","url":"/docs/main/next/en/setup/backend/zipkin-trace/"},{"content":"Zipkin receiver The Zipkin receiver makes the OAP server work as an alternative Zipkin server implementation. It supports Zipkin v1/v2 formats through HTTP service. Make sure you use this with SW_STORAGE=zipkin-elasticsearch option to activate Zipkin storage implementation. Once this receiver and storage are activated, SkyWalking\u0026rsquo;s native traces would be ignored, and SkyWalking wouldn\u0026rsquo;t analyze topology, metrics, and endpoint dependency from Zipkin\u0026rsquo;s trace.\nUse the following config to activate it.\nreceiver-zipkin:selector:${SW_RECEIVER_ZIPKIN:-}default:host:${SW_RECEIVER_ZIPKIN_HOST:0.0.0.0}port:${SW_RECEIVER_ZIPKIN_PORT:9411}contextPath:${SW_RECEIVER_ZIPKIN_CONTEXT_PATH:/}jettyMinThreads:${SW_RECEIVER_ZIPKIN_JETTY_MIN_THREADS:1}jettyMaxThreads:${SW_RECEIVER_ZIPKIN_JETTY_MAX_THREADS:200}jettyIdleTimeOut:${SW_RECEIVER_ZIPKIN_JETTY_IDLE_TIMEOUT:30000}jettyAcceptorPriorityDelta:${SW_RECEIVER_ZIPKIN_JETTY_DELTA:0}jettyAcceptQueueSize:${SW_RECEIVER_ZIPKIN_QUEUE_SIZE:0}NOTE: Zipkin receiver requires zipkin-elasticsearch storage implementation to be activated. Read this doc to learn about Zipkin as a storage option.\n","title":"Zipkin receiver","url":"/docs/main/v9.0.0/en/setup/backend/zipkin-trace/"},{"content":"Zipkin receiver The Zipkin receiver makes the OAP server work as an alternative Zipkin server implementation for collecting traces. It supports Zipkin v1/v2 formats through the HTTP service.\nUse the following config to activate it.\nreceiver-zipkin:selector:${SW_RECEIVER_ZIPKIN:default}default:# For HTTP serverrestHost:${SW_RECEIVER_ZIPKIN_REST_HOST:0.0.0.0}restPort:${SW_RECEIVER_ZIPKIN_REST_PORT:9411}restContextPath:${SW_RECEIVER_ZIPKIN_REST_CONTEXT_PATH:/}restMaxThreads:${SW_RECEIVER_ZIPKIN_REST_MAX_THREADS:200}restIdleTimeOut:${SW_RECEIVER_ZIPKIN_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_RECEIVER_ZIPKIN_REST_QUEUE_SIZE:0}searchableTracesTags:${SW_ZIPKIN_SEARCHABLE_TAG_KEYS:http.method}# The sample rate precision is 1/10000, should be between 0 and 10000sampleRate:${SW_ZIPKIN_SAMPLE_RATE:10000}Zipkin query The Zipkin receiver makes the OAP server work as an alternative Zipkin server implementation for query traces. It implemented ZipkinQueryApiV2 through the HTTP service, supporting Zipkin-lens UI. Notice: Zipkin query API implementation does not support BanyanDB yet.\nUse the following config to activate it.\nquery-zipkin:selector:${SW_QUERY_ZIPKIN:default}default:# For HTTP serverrestHost:${SW_QUERY_ZIPKIN_REST_HOST:0.0.0.0}restPort:${SW_QUERY_ZIPKIN_REST_PORT:9412}restContextPath:${SW_QUERY_ZIPKIN_REST_CONTEXT_PATH:/zipkin}restMaxThreads:${SW_QUERY_ZIPKIN_REST_MAX_THREADS:200}restIdleTimeOut:${SW_QUERY_ZIPKIN_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_QUERY_ZIPKIN_REST_QUEUE_SIZE:0}# Default look back for serviceNames, remoteServiceNames and spanNames, 1 day in millislookback:${SW_QUERY_ZIPKIN_LOOKBACK:86400000}# The Cache-Control max-age (seconds) for serviceNames, remoteServiceNames and spanNamesnamesMaxAge:${SW_QUERY_ZIPKIN_NAMES_MAX_AGE:300}## The below config are OAP support for zipkin-lens UI# Default traces query max sizeuiQueryLimit:${SW_QUERY_ZIPKIN_UI_QUERY_LIMIT:10}# Default look back for search traces, 15 minutes in millisuiDefaultLookback:${SW_QUERY_ZIPKIN_UI_DEFAULT_LOOKBACK:900000}","title":"Zipkin receiver","url":"/docs/main/v9.1.0/en/setup/backend/zipkin-trace/"},{"content":"Zipkin receiver The Zipkin receiver makes the OAP server work as an alternative Zipkin server implementation for collecting traces. It supports Zipkin v1/v2 formats through the HTTP collector and Kafka collector.\nUse the following config to activate it. Set enableHttpCollector to enable HTTP collector and enableKafkaCollector to enable Kafka collector.\nreceiver-zipkin:selector:${SW_RECEIVER_ZIPKIN:default}default:searchableTracesTags:${SW_ZIPKIN_SEARCHABLE_TAG_KEYS:http.method}# The sample rate precision is 1/10000, should be between 0 and 10000sampleRate:${SW_ZIPKIN_SAMPLE_RATE:10000}## The below configs are for OAP collect zipkin trace from HTTPenableHttpCollector:${SW_ZIPKIN_HTTP_COLLECTOR_ENABLED:true}restHost:${SW_RECEIVER_ZIPKIN_REST_HOST:0.0.0.0}restPort:${SW_RECEIVER_ZIPKIN_REST_PORT:9411}restContextPath:${SW_RECEIVER_ZIPKIN_REST_CONTEXT_PATH:/}restMaxThreads:${SW_RECEIVER_ZIPKIN_REST_MAX_THREADS:200}restIdleTimeOut:${SW_RECEIVER_ZIPKIN_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_RECEIVER_ZIPKIN_REST_QUEUE_SIZE:0}## The below configs are for OAP collect zipkin trace from kafkaenableKafkaCollector:${SW_ZIPKIN_KAFKA_COLLECTOR_ENABLED:true}kafkaBootstrapServers:${SW_ZIPKIN_KAFKA_SERVERS:localhost:9092}kafkaGroupId:${SW_ZIPKIN_KAFKA_Group_Id:zipkin}kafkaTopic:${SW_ZIPKIN_KAFKA_TOPIC:zipkin}# Kafka consumer config, JSON format as Properties. If it contains the same key with above, would override.kafkaConsumerConfig:${SW_ZIPKIN_KAFKA_CONSUMER_CONFIG:\u0026#34;{\\\u0026#34;auto.offset.reset\\\u0026#34;:\\\u0026#34;earliest\\\u0026#34;,\\\u0026#34;enable.auto.commit\\\u0026#34;:true}\u0026#34;}# The Count of the topic consumerskafkaConsumers:${SW_ZIPKIN_KAFKA_CONSUMERS:1}kafkaHandlerThreadPoolSize:${SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_SIZE:-1}kafkaHandlerThreadPoolQueueSize:${SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE:-1}Zipkin query The Zipkin receiver makes the OAP server work as an alternative Zipkin server implementation for query traces. It implemented ZipkinQueryApiV2 through the HTTP service, supporting Zipkin-lens UI. Notice: Zipkin query API implementation does not support BanyanDB yet.\nUse the following config to activate it.\nquery-zipkin:selector:${SW_QUERY_ZIPKIN:default}default:# For HTTP serverrestHost:${SW_QUERY_ZIPKIN_REST_HOST:0.0.0.0}restPort:${SW_QUERY_ZIPKIN_REST_PORT:9412}restContextPath:${SW_QUERY_ZIPKIN_REST_CONTEXT_PATH:/zipkin}restMaxThreads:${SW_QUERY_ZIPKIN_REST_MAX_THREADS:200}restIdleTimeOut:${SW_QUERY_ZIPKIN_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_QUERY_ZIPKIN_REST_QUEUE_SIZE:0}# Default look back for serviceNames, remoteServiceNames and spanNames, 1 day in millislookback:${SW_QUERY_ZIPKIN_LOOKBACK:86400000}# The Cache-Control max-age (seconds) for serviceNames, remoteServiceNames and spanNamesnamesMaxAge:${SW_QUERY_ZIPKIN_NAMES_MAX_AGE:300}## The below config are OAP support for zipkin-lens UI# Default traces query max sizeuiQueryLimit:${SW_QUERY_ZIPKIN_UI_QUERY_LIMIT:10}# Default look back for search traces, 15 minutes in millisuiDefaultLookback:${SW_QUERY_ZIPKIN_UI_DEFAULT_LOOKBACK:900000}","title":"Zipkin receiver","url":"/docs/main/v9.2.0/en/setup/backend/zipkin-trace/"},{"content":"Zipkin receiver The Zipkin receiver makes the OAP server work as an alternative Zipkin server implementation for collecting traces. It supports Zipkin v1/v2 formats through the HTTP collector and Kafka collector.\nUse the following config to activate it. Set enableHttpCollector to enable HTTP collector and enableKafkaCollector to enable Kafka collector.\nreceiver-zipkin:selector:${SW_RECEIVER_ZIPKIN:default}default:# Defines a set of span tag keys which are searchable.# The max length of key=value should be less than 256 or will be dropped.searchableTracesTags:${SW_ZIPKIN_SEARCHABLE_TAG_KEYS:http.method}# The sample rate precision is 1/10000, should be between 0 and 10000sampleRate:${SW_ZIPKIN_SAMPLE_RATE:10000}## The below configs are for OAP collect zipkin trace from HTTPenableHttpCollector:${SW_ZIPKIN_HTTP_COLLECTOR_ENABLED:true}restHost:${SW_RECEIVER_ZIPKIN_REST_HOST:0.0.0.0}restPort:${SW_RECEIVER_ZIPKIN_REST_PORT:9411}restContextPath:${SW_RECEIVER_ZIPKIN_REST_CONTEXT_PATH:/}restMaxThreads:${SW_RECEIVER_ZIPKIN_REST_MAX_THREADS:200}restIdleTimeOut:${SW_RECEIVER_ZIPKIN_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_RECEIVER_ZIPKIN_REST_QUEUE_SIZE:0}## The below configs are for OAP collect zipkin trace from kafkaenableKafkaCollector:${SW_ZIPKIN_KAFKA_COLLECTOR_ENABLED:true}kafkaBootstrapServers:${SW_ZIPKIN_KAFKA_SERVERS:localhost:9092}kafkaGroupId:${SW_ZIPKIN_KAFKA_Group_Id:zipkin}kafkaTopic:${SW_ZIPKIN_KAFKA_TOPIC:zipkin}# Kafka consumer config, JSON format as Properties. If it contains the same key with above, would override.kafkaConsumerConfig:${SW_ZIPKIN_KAFKA_CONSUMER_CONFIG:\u0026#34;{\\\u0026#34;auto.offset.reset\\\u0026#34;:\\\u0026#34;earliest\\\u0026#34;,\\\u0026#34;enable.auto.commit\\\u0026#34;:true}\u0026#34;}# The Count of the topic consumerskafkaConsumers:${SW_ZIPKIN_KAFKA_CONSUMERS:1}kafkaHandlerThreadPoolSize:${SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_SIZE:-1}kafkaHandlerThreadPoolQueueSize:${SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE:-1}Zipkin query The Zipkin receiver makes the OAP server work as an alternative Zipkin server implementation for query traces. It implemented ZipkinQueryApiV2 through the HTTP service, supporting Zipkin-lens UI.\nUse the following config to activate it.\nquery-zipkin:selector:${SW_QUERY_ZIPKIN:default}default:# For HTTP serverrestHost:${SW_QUERY_ZIPKIN_REST_HOST:0.0.0.0}restPort:${SW_QUERY_ZIPKIN_REST_PORT:9412}restContextPath:${SW_QUERY_ZIPKIN_REST_CONTEXT_PATH:/zipkin}restMaxThreads:${SW_QUERY_ZIPKIN_REST_MAX_THREADS:200}restIdleTimeOut:${SW_QUERY_ZIPKIN_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_QUERY_ZIPKIN_REST_QUEUE_SIZE:0}# Default look back for serviceNames, remoteServiceNames and spanNames, 1 day in millislookback:${SW_QUERY_ZIPKIN_LOOKBACK:86400000}# The Cache-Control max-age (seconds) for serviceNames, remoteServiceNames and spanNamesnamesMaxAge:${SW_QUERY_ZIPKIN_NAMES_MAX_AGE:300}## The below config are OAP support for zipkin-lens UI# Default traces query max sizeuiQueryLimit:${SW_QUERY_ZIPKIN_UI_QUERY_LIMIT:10}# Default look back for search traces, 15 minutes in millisuiDefaultLookback:${SW_QUERY_ZIPKIN_UI_DEFAULT_LOOKBACK:900000}","title":"Zipkin receiver","url":"/docs/main/v9.3.0/en/setup/backend/zipkin-trace/"},{"content":"Zipkin receiver The Zipkin receiver makes the OAP server work as an alternative Zipkin server implementation for collecting traces. It supports Zipkin v1/v2 formats through the HTTP collector and Kafka collector.\nUse the following config to activate it. Set enableHttpCollector to enable HTTP collector and enableKafkaCollector to enable Kafka collector.\nreceiver-zipkin:selector:${SW_RECEIVER_ZIPKIN:default}default:# Defines a set of span tag keys which are searchable.# The max length of key=value should be less than 256 or will be dropped.searchableTracesTags:${SW_ZIPKIN_SEARCHABLE_TAG_KEYS:http.method}# The sample rate precision is 1/10000, should be between 0 and 10000sampleRate:${SW_ZIPKIN_SAMPLE_RATE:10000}## The below configs are for OAP collect zipkin trace from HTTPenableHttpCollector:${SW_ZIPKIN_HTTP_COLLECTOR_ENABLED:true}restHost:${SW_RECEIVER_ZIPKIN_REST_HOST:0.0.0.0}restPort:${SW_RECEIVER_ZIPKIN_REST_PORT:9411}restContextPath:${SW_RECEIVER_ZIPKIN_REST_CONTEXT_PATH:/}restMaxThreads:${SW_RECEIVER_ZIPKIN_REST_MAX_THREADS:200}restIdleTimeOut:${SW_RECEIVER_ZIPKIN_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_RECEIVER_ZIPKIN_REST_QUEUE_SIZE:0}## The below configs are for OAP collect zipkin trace from kafkaenableKafkaCollector:${SW_ZIPKIN_KAFKA_COLLECTOR_ENABLED:true}kafkaBootstrapServers:${SW_ZIPKIN_KAFKA_SERVERS:localhost:9092}kafkaGroupId:${SW_ZIPKIN_KAFKA_Group_Id:zipkin}kafkaTopic:${SW_ZIPKIN_KAFKA_TOPIC:zipkin}# Kafka consumer config, JSON format as Properties. If it contains the same key with above, would override.kafkaConsumerConfig:${SW_ZIPKIN_KAFKA_CONSUMER_CONFIG:\u0026#34;{\\\u0026#34;auto.offset.reset\\\u0026#34;:\\\u0026#34;earliest\\\u0026#34;,\\\u0026#34;enable.auto.commit\\\u0026#34;:true}\u0026#34;}# The Count of the topic consumerskafkaConsumers:${SW_ZIPKIN_KAFKA_CONSUMERS:1}kafkaHandlerThreadPoolSize:${SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_SIZE:-1}kafkaHandlerThreadPoolQueueSize:${SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE:-1}Zipkin query The Zipkin receiver makes the OAP server work as an alternative Zipkin server implementation for query traces. It implemented ZipkinQueryApiV2 through the HTTP service, supporting Zipkin-lens UI.\nUse the following config to activate it.\nquery-zipkin:selector:${SW_QUERY_ZIPKIN:default}default:# For HTTP serverrestHost:${SW_QUERY_ZIPKIN_REST_HOST:0.0.0.0}restPort:${SW_QUERY_ZIPKIN_REST_PORT:9412}restContextPath:${SW_QUERY_ZIPKIN_REST_CONTEXT_PATH:/zipkin}restMaxThreads:${SW_QUERY_ZIPKIN_REST_MAX_THREADS:200}restIdleTimeOut:${SW_QUERY_ZIPKIN_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_QUERY_ZIPKIN_REST_QUEUE_SIZE:0}# Default look back for serviceNames, remoteServiceNames and spanNames, 1 day in millislookback:${SW_QUERY_ZIPKIN_LOOKBACK:86400000}# The Cache-Control max-age (seconds) for serviceNames, remoteServiceNames and spanNamesnamesMaxAge:${SW_QUERY_ZIPKIN_NAMES_MAX_AGE:300}## The below config are OAP support for zipkin-lens UI# Default traces query max sizeuiQueryLimit:${SW_QUERY_ZIPKIN_UI_QUERY_LIMIT:10}# Default look back for search traces, 15 minutes in millisuiDefaultLookback:${SW_QUERY_ZIPKIN_UI_DEFAULT_LOOKBACK:900000}Lens UI Lens UI is Zipkin native UI. SkyWalking webapp has bundled it in the binary distribution. {webapp IP}:{webapp port}/zipkin is exposed and accessible for the browser. Meanwhile, Iframe UI component could be used to host Zipkin Lens UI on the SkyWalking booster UI dashboard.(link=/zipkin)\nZipkin Lens UI source codes could be found here.\n","title":"Zipkin receiver","url":"/docs/main/v9.4.0/en/setup/backend/zipkin-trace/"},{"content":"Zipkin receiver The Zipkin receiver makes the OAP server work as an alternative Zipkin server implementation for collecting traces. It supports Zipkin v1/v2 formats through the HTTP collector and Kafka collector.\nUse the following config to activate it. Set enableHttpCollector to enable HTTP collector and enableKafkaCollector to enable Kafka collector.\nreceiver-zipkin:selector:${SW_RECEIVER_ZIPKIN:default}default:# Defines a set of span tag keys which are searchable.# The max length of key=value should be less than 256 or will be dropped.searchableTracesTags:${SW_ZIPKIN_SEARCHABLE_TAG_KEYS:http.method}# The sample rate precision is 1/10000, should be between 0 and 10000sampleRate:${SW_ZIPKIN_SAMPLE_RATE:10000}## The below configs are for OAP collect zipkin trace from HTTPenableHttpCollector:${SW_ZIPKIN_HTTP_COLLECTOR_ENABLED:true}restHost:${SW_RECEIVER_ZIPKIN_REST_HOST:0.0.0.0}restPort:${SW_RECEIVER_ZIPKIN_REST_PORT:9411}restContextPath:${SW_RECEIVER_ZIPKIN_REST_CONTEXT_PATH:/}restMaxThreads:${SW_RECEIVER_ZIPKIN_REST_MAX_THREADS:200}restIdleTimeOut:${SW_RECEIVER_ZIPKIN_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_RECEIVER_ZIPKIN_REST_QUEUE_SIZE:0}## The below configs are for OAP collect zipkin trace from kafkaenableKafkaCollector:${SW_ZIPKIN_KAFKA_COLLECTOR_ENABLED:true}kafkaBootstrapServers:${SW_ZIPKIN_KAFKA_SERVERS:localhost:9092}kafkaGroupId:${SW_ZIPKIN_KAFKA_Group_Id:zipkin}kafkaTopic:${SW_ZIPKIN_KAFKA_TOPIC:zipkin}# Kafka consumer config, JSON format as Properties. If it contains the same key with above, would override.kafkaConsumerConfig:${SW_ZIPKIN_KAFKA_CONSUMER_CONFIG:\u0026#34;{\\\u0026#34;auto.offset.reset\\\u0026#34;:\\\u0026#34;earliest\\\u0026#34;,\\\u0026#34;enable.auto.commit\\\u0026#34;:true}\u0026#34;}# The Count of the topic consumerskafkaConsumers:${SW_ZIPKIN_KAFKA_CONSUMERS:1}kafkaHandlerThreadPoolSize:${SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_SIZE:-1}kafkaHandlerThreadPoolQueueSize:${SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE:-1}Zipkin query The Zipkin receiver makes the OAP server work as an alternative Zipkin server implementation for query traces. It implemented ZipkinQueryApiV2 through the HTTP service, supporting Zipkin-lens UI.\nUse the following config to activate it.\nquery-zipkin:selector:${SW_QUERY_ZIPKIN:default}default:# For HTTP serverrestHost:${SW_QUERY_ZIPKIN_REST_HOST:0.0.0.0}restPort:${SW_QUERY_ZIPKIN_REST_PORT:9412}restContextPath:${SW_QUERY_ZIPKIN_REST_CONTEXT_PATH:/zipkin}restMaxThreads:${SW_QUERY_ZIPKIN_REST_MAX_THREADS:200}restIdleTimeOut:${SW_QUERY_ZIPKIN_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_QUERY_ZIPKIN_REST_QUEUE_SIZE:0}# Default look back for serviceNames, remoteServiceNames and spanNames, 1 day in millislookback:${SW_QUERY_ZIPKIN_LOOKBACK:86400000}# The Cache-Control max-age (seconds) for serviceNames, remoteServiceNames and spanNamesnamesMaxAge:${SW_QUERY_ZIPKIN_NAMES_MAX_AGE:300}## The below config are OAP support for zipkin-lens UI# Default traces query max sizeuiQueryLimit:${SW_QUERY_ZIPKIN_UI_QUERY_LIMIT:10}# Default look back for search traces, 15 minutes in millisuiDefaultLookback:${SW_QUERY_ZIPKIN_UI_DEFAULT_LOOKBACK:900000}Lens UI Lens UI is Zipkin native UI. SkyWalking webapp has bundled it in the binary distribution. {webapp IP}:{webapp port}/zipkin is exposed and accessible for the browser. Meanwhile, Iframe UI component could be used to host Zipkin Lens UI on the SkyWalking booster UI dashboard.(link=/zipkin)\nZipkin Lens UI source codes could be found here.\n","title":"Zipkin receiver","url":"/docs/main/v9.5.0/en/setup/backend/zipkin-trace/"},{"content":"Zipkin receiver The Zipkin receiver makes the OAP server work as an alternative Zipkin server implementation for collecting traces. It supports Zipkin v1/v2 formats through the HTTP collector and Kafka collector.\nNOTICE, Zipkin trace would not be analyzed like SkyWalking native trace format.\nUse the following config to activate it. Set enableHttpCollector to enable HTTP collector and enableKafkaCollector to enable Kafka collector.\nreceiver-zipkin:selector:${SW_RECEIVER_ZIPKIN:default}default:# Defines a set of span tag keys which are searchable.# The max length of key=value should be less than 256 or will be dropped.searchableTracesTags:${SW_ZIPKIN_SEARCHABLE_TAG_KEYS:http.method}# The sample rate precision is 1/10000, should be between 0 and 10000sampleRate:${SW_ZIPKIN_SAMPLE_RATE:10000}## The below configs are for OAP collect zipkin trace from HTTPenableHttpCollector:${SW_ZIPKIN_HTTP_COLLECTOR_ENABLED:true}restHost:${SW_RECEIVER_ZIPKIN_REST_HOST:0.0.0.0}restPort:${SW_RECEIVER_ZIPKIN_REST_PORT:9411}restContextPath:${SW_RECEIVER_ZIPKIN_REST_CONTEXT_PATH:/}restMaxThreads:${SW_RECEIVER_ZIPKIN_REST_MAX_THREADS:200}restIdleTimeOut:${SW_RECEIVER_ZIPKIN_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_RECEIVER_ZIPKIN_REST_QUEUE_SIZE:0}## The below configs are for OAP collect zipkin trace from kafkaenableKafkaCollector:${SW_ZIPKIN_KAFKA_COLLECTOR_ENABLED:true}kafkaBootstrapServers:${SW_ZIPKIN_KAFKA_SERVERS:localhost:9092}kafkaGroupId:${SW_ZIPKIN_KAFKA_Group_Id:zipkin}kafkaTopic:${SW_ZIPKIN_KAFKA_TOPIC:zipkin}# Kafka consumer config, JSON format as Properties. If it contains the same key with above, would override.kafkaConsumerConfig:${SW_ZIPKIN_KAFKA_CONSUMER_CONFIG:\u0026#34;{\\\u0026#34;auto.offset.reset\\\u0026#34;:\\\u0026#34;earliest\\\u0026#34;,\\\u0026#34;enable.auto.commit\\\u0026#34;:true}\u0026#34;}# The Count of the topic consumerskafkaConsumers:${SW_ZIPKIN_KAFKA_CONSUMERS:1}kafkaHandlerThreadPoolSize:${SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_SIZE:-1}kafkaHandlerThreadPoolQueueSize:${SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE:-1}Zipkin query The Zipkin receiver makes the OAP server work as an alternative Zipkin server implementation for query traces. It implemented ZipkinQueryApiV2 through the HTTP service, supporting Zipkin-lens UI.\nUse the following config to activate it.\nquery-zipkin:selector:${SW_QUERY_ZIPKIN:default}default:# For HTTP serverrestHost:${SW_QUERY_ZIPKIN_REST_HOST:0.0.0.0}restPort:${SW_QUERY_ZIPKIN_REST_PORT:9412}restContextPath:${SW_QUERY_ZIPKIN_REST_CONTEXT_PATH:/zipkin}restMaxThreads:${SW_QUERY_ZIPKIN_REST_MAX_THREADS:200}restIdleTimeOut:${SW_QUERY_ZIPKIN_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_QUERY_ZIPKIN_REST_QUEUE_SIZE:0}# Default look back for serviceNames, remoteServiceNames and spanNames, 1 day in millislookback:${SW_QUERY_ZIPKIN_LOOKBACK:86400000}# The Cache-Control max-age (seconds) for serviceNames, remoteServiceNames and spanNamesnamesMaxAge:${SW_QUERY_ZIPKIN_NAMES_MAX_AGE:300}## The below config are OAP support for zipkin-lens UI# Default traces query max sizeuiQueryLimit:${SW_QUERY_ZIPKIN_UI_QUERY_LIMIT:10}# Default look back for search traces, 15 minutes in millisuiDefaultLookback:${SW_QUERY_ZIPKIN_UI_DEFAULT_LOOKBACK:900000}Lens UI Lens UI is Zipkin native UI. SkyWalking webapp has bundled it in the binary distribution. {webapp IP}:{webapp port}/zipkin is exposed and accessible for the browser. Meanwhile, Iframe UI component could be used to host Zipkin Lens UI on the SkyWalking booster UI dashboard.(link=/zipkin)\nZipkin Lens UI source codes could be found here.\n","title":"Zipkin receiver","url":"/docs/main/v9.6.0/en/setup/backend/zipkin-trace/"},{"content":"Zipkin receiver The Zipkin receiver makes the OAP server work as an alternative Zipkin server implementation for collecting traces. It supports Zipkin v1/v2 formats through the HTTP collector and Kafka collector.\nNOTICE, Zipkin trace would not be analyzed like SkyWalking native trace format.\nUse the following config to activate it. Set enableHttpCollector to enable HTTP collector and enableKafkaCollector to enable Kafka collector.\nreceiver-zipkin:selector:${SW_RECEIVER_ZIPKIN:default}default:# Defines a set of span tag keys which are searchable.# The max length of key=value should be less than 256 or will be dropped.searchableTracesTags:${SW_ZIPKIN_SEARCHABLE_TAG_KEYS:http.method}# The sample rate precision is 1/10000, should be between 0 and 10000sampleRate:${SW_ZIPKIN_SAMPLE_RATE:10000}## The below configs are for OAP collect zipkin trace from HTTPenableHttpCollector:${SW_ZIPKIN_HTTP_COLLECTOR_ENABLED:true}restHost:${SW_RECEIVER_ZIPKIN_REST_HOST:0.0.0.0}restPort:${SW_RECEIVER_ZIPKIN_REST_PORT:9411}restContextPath:${SW_RECEIVER_ZIPKIN_REST_CONTEXT_PATH:/}restMaxThreads:${SW_RECEIVER_ZIPKIN_REST_MAX_THREADS:200}restIdleTimeOut:${SW_RECEIVER_ZIPKIN_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_RECEIVER_ZIPKIN_REST_QUEUE_SIZE:0}## The below configs are for OAP collect zipkin trace from kafkaenableKafkaCollector:${SW_ZIPKIN_KAFKA_COLLECTOR_ENABLED:true}kafkaBootstrapServers:${SW_ZIPKIN_KAFKA_SERVERS:localhost:9092}kafkaGroupId:${SW_ZIPKIN_KAFKA_Group_Id:zipkin}kafkaTopic:${SW_ZIPKIN_KAFKA_TOPIC:zipkin}# Kafka consumer config, JSON format as Properties. If it contains the same key with above, would override.kafkaConsumerConfig:${SW_ZIPKIN_KAFKA_CONSUMER_CONFIG:\u0026#34;{\\\u0026#34;auto.offset.reset\\\u0026#34;:\\\u0026#34;earliest\\\u0026#34;,\\\u0026#34;enable.auto.commit\\\u0026#34;:true}\u0026#34;}# The Count of the topic consumerskafkaConsumers:${SW_ZIPKIN_KAFKA_CONSUMERS:1}kafkaHandlerThreadPoolSize:${SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_SIZE:-1}kafkaHandlerThreadPoolQueueSize:${SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE:-1}Zipkin query The Zipkin receiver makes the OAP server work as an alternative Zipkin server implementation for query traces. It implemented ZipkinQueryApiV2 through the HTTP service, supporting Zipkin-lens UI.\nUse the following config to activate it.\nquery-zipkin:selector:${SW_QUERY_ZIPKIN:default}default:# For HTTP serverrestHost:${SW_QUERY_ZIPKIN_REST_HOST:0.0.0.0}restPort:${SW_QUERY_ZIPKIN_REST_PORT:9412}restContextPath:${SW_QUERY_ZIPKIN_REST_CONTEXT_PATH:/zipkin}restMaxThreads:${SW_QUERY_ZIPKIN_REST_MAX_THREADS:200}restIdleTimeOut:${SW_QUERY_ZIPKIN_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_QUERY_ZIPKIN_REST_QUEUE_SIZE:0}# Default look back for serviceNames, remoteServiceNames and spanNames, 1 day in millislookback:${SW_QUERY_ZIPKIN_LOOKBACK:86400000}# The Cache-Control max-age (seconds) for serviceNames, remoteServiceNames and spanNamesnamesMaxAge:${SW_QUERY_ZIPKIN_NAMES_MAX_AGE:300}## The below config are OAP support for zipkin-lens UI# Default traces query max sizeuiQueryLimit:${SW_QUERY_ZIPKIN_UI_QUERY_LIMIT:10}# Default look back for search traces, 15 minutes in millisuiDefaultLookback:${SW_QUERY_ZIPKIN_UI_DEFAULT_LOOKBACK:900000}Lens UI Lens UI is Zipkin native UI. SkyWalking webapp has bundled it in the binary distribution. {webapp IP}:{webapp port}/zipkin is exposed and accessible for the browser. Meanwhile, Iframe UI component could be used to host Zipkin Lens UI on the SkyWalking booster UI dashboard.(link=/zipkin)\nZipkin Lens UI source codes could be found here.\n","title":"Zipkin receiver","url":"/docs/main/v9.7.0/en/setup/backend/zipkin-trace/"}]
\ No newline at end of file
+[{"content":"Apache SkyWalking从2015年开源到2024年,已经走过了9个年头,项目的规模和功能也得到了极大的丰富。 2024年4月至6月,SkyWalking社区联合纵目,举办线上的联合直播,分多个主题介绍SkyWalking的核心特性,也提供更多的答疑时间。\n2024年4月25日,SkyWalking创始人带来了第一次分享和Q\u0026amp;A\n 熟悉SkyWalking项目结构 介绍项目工程划分,边界,定位 SkyWalking文档使用,以及如何使用AI助手 Q\u0026amp;A  B站视频地址\n想参与直播的小伙伴,可以关注后续的直播安排和我们的B站直播预约\n","title":"SkyWalking从入门到精通 - 2024系列线上分享活动(第一讲)","url":"/zh/2024-04-26-skywalking-in-practice-s01e01/"},{"content":"Introduction Apache ActiveMQ Classic is a popular and powerful open-source messaging and integration pattern server. Founded in 2004, it has evolved into a mature and widely used open-source messaging middleware that complies with the Java Message Service (JMS). Today, with its stability and wide range of feature support, it still has a certain number of users of small and medium-sized enterprises. It‘s high-performance version Apache Artemis is developing rapidly and is also attracting attention from users of ActiveMQ.\nActiveMQ has broad support for JMX (Java Management Extensions), allowing to be monitored through JMX MBean. After enabling JMX, you can use JAVA\u0026rsquo;s built-in jconsole or VisualVM to view the metrics. In addition, some Collector components can also be used to convert JMX-style data into Prometheus-style data, which is suitable for more tools.\nOpenTelemetry as an industry-recognized, standardized solution that provides consistent and interoperable telemetry data collection, transmission, and analysis capabilities for distributed systems, and is also used here for data collection and transmission. Although it can directly accept JMX type data, the JMX indicators for collecting ActiveMQ are not in the standard library, and some versions are incompatible, so this article adopts two steps: convert JMX data into Prometheus-style indicator data, and then use OpenTelemetry to scrape HTTP endpoint data.\nSkyWalking as a one-stop distributed system monitoring solution, it accepts metrics from ActiveMQ and provides a basic monitoring dashboard.\nDeployment Please set up the following services:\n SkyWalking OAP, v10.0+. ActiveMQ v6.0.X+. JMX Exporter v0.20.0. If using docker, refer bitnami/jmx-exporter. OpenTelmetry-Collector v0.92.0.  Preparation The following describes how to deploy ActiveMQ with 2 single-node brokers and SkyWalking OAP with one single node. JMX Exporter runs in agent mode (recommended).\nConfiguration  Enable JMX in ActiveMQ, the JMX remote port defaults to 1616, you can change it through ACTIVEMQ_SUNJMX_START. Set up the exporter:  [Recommended] If run exporter in agent mode, need to append the startup parameter -DACTIVEMQ_OPTS=-javaagent:{activemqPath}/bin/jmx_prometheus_javaagent-0.20.0.jar=2345:{activemqPath}/conf/config.yaml in ActiveMQ env, then exporter server starts at the same time. If run exporter in single server, refer here to deploy the server alone. 2345 is open HTTP port that can be customized. JMX\u0026rsquo;s metrics can be queried through http://localhost:2345/metrics.    example of docker-compose.yml with agent exporter for ActiveMQ:\nversion:\u0026#39;3.8\u0026#39;services:amq1:image:apache/activemq-classic:latestcontainer_name:amq1hostname:amq1volumes:- ~/activemq1/conf/activemq.xml:/opt/apache-activemq/conf/activemq.xml- ~/activemq1/bin/jmx_prometheus_javaagent-0.20.0.jar:/opt/apache-activemq/bin/jmx_prometheus_javaagent-0.20.0.jar- ~/activemq1/conf/config.yaml:/opt/apache-activemq/conf/config.yamlports:- \u0026#34;61616:61616\u0026#34;- \u0026#34;8161:8161\u0026#34;- \u0026#34;2345:2345\u0026#34;environment:ACTIVEMQ_OPTS:\u0026#34;-javaagent:/opt/apache-activemq/bin/jmx_prometheus_javaagent-0.20.0.jar=2345:/opt/apache-activemq/conf/config.yaml\u0026#34;ACTIVEMQ_BROKER_NAME:broker-1networks:- amqtest amq2:image:apache/activemq-classic:latestcontainer_name:amq2hostname:amq2volumes:- ~/activemq2/conf/activemq.xml:/opt/apache-activemq/conf/activemq.xml- ~/activemq2/bin/jmx_prometheus_javaagent-0.20.0.jar:/opt/apache-activemq/bin/jmx_prometheus_javaagent-0.20.0.jar- ~/activemq2/conf/config.yaml:/opt/apache-activemq/conf/config.yaml ports:- \u0026#34;61617:61616\u0026#34;- \u0026#34;8162:8161\u0026#34;- \u0026#34;2346:2346\u0026#34;environment:ACTIVEMQ_OPTS:\u0026#34;-javaagent:/opt/apache-activemq/bin/jmx_prometheus_javaagent-0.20.0.jar=2346:/opt/apache-activemq/conf/config.yaml\u0026#34;ACTIVEMQ_BROKER_NAME:broker-2 networks:- amqtestotel-collector1:image:otel/opentelemetry-collector:latestcontainer_name:otel-collector1command:[\u0026#34;--config=/etc/otel-collector-config.yaml\u0026#34;]volumes:- ./otel-collector-config1.yaml:/etc/otel-collector-config.yamldepends_on:- amq1networks:- amqtest otel-collector2:image:otel/opentelemetry-collector:latestcontainer_name:otel-collector2command:[\u0026#34;--config=/etc/otel-collector-config.yaml\u0026#34;]volumes:- ./otel-collector-config2.yaml:/etc/otel-collector-config.yamldepends_on:- amq2networks:- amqtest networks:amqtest:example of otel-collector-config.yaml for OpenTelemetry:\nreceivers:prometheus:config:scrape_configs:- job_name:\u0026#39;activemq-monitoring\u0026#39;scrape_interval:30sstatic_configs:- targets:[\u0026#39;amq1:2345\u0026#39;]labels:cluster:activemq-broker1processors:batch:exporters:otlp:endpoint:oap:11800tls:insecure:trueservice:pipelines:metrics:receivers:- prometheusprocessors:- batchexporters:- otlpexample of config.yaml for ActiveMQ Exporter:\n---startDelaySeconds:10username:adminpassword:activemqssl:falselowercaseOutputName:falselowercaseOutputLabelNames:falseincludeObjectNames:[\u0026#34;org.apache.activemq:*\u0026#34;,\u0026#34;java.lang:type=OperatingSystem\u0026#34;,\u0026#34;java.lang:type=GarbageCollector,*\u0026#34;,\u0026#34;java.lang:type=Threading\u0026#34;,\u0026#34;java.lang:type=Runtime\u0026#34;,\u0026#34;java.lang:type=Memory\u0026#34;,\u0026#34;java.lang:name=*\u0026#34;]excludeObjectNames:[\u0026#34;org.apache.activemq:type=ColumnFamily,*\u0026#34;]autoExcludeObjectNameAttributes:trueexcludeObjectNameAttributes:\u0026#34;java.lang:type=OperatingSystem\u0026#34;:- \u0026#34;ObjectName\u0026#34;\u0026#34;java.lang:type=Runtime\u0026#34;:- \u0026#34;ClassPath\u0026#34;- \u0026#34;SystemProperties\u0026#34;rules:- pattern:\u0026#34;.*\u0026#34;Steps  Start ActiveMQ, and the Exporter(agent) and the service start at the same time. Start SkyWalking OAP and SkyWalking UI. Start OpenTelmetry-Collector.  After completed, node metrics will be captured and pushed to SkyWalking.\nMetrics Monitoring metrics involve in Cluster Metrics, Broker Metrics, and Destination Metrics.\n Cluster Metrics: including memory usage, rates of write/read, and average/max duration of write. Broker Metrics: including node state, number of connections, number of producers/consumers, and rate of write/read under the broker. Depending on the cluster mode, one cluster may include one or more brokers. Destination Metrics: including number of producers/consumers, messages in different states, queues, and enqueue duration in a queue/topic.  Cluster Metrics  System Load: range in [0, 100]. Thread Count: the number of threads currently used by the JVM. Heap Memory: capacity of heap memory. GC: memory of ActiveMQ is managed by Java\u0026rsquo;s garbage collection (GC) process. Enqueue/Dequeue/Dispatch/Expired Rate: growth rate of messages in different states. Average/Max Enqueue Time: time taken to join the queue.  Broker Metrics  Uptime: duration of the node. State: 1 = slave node, 0 = master node. Current Connentions: number of connections. Current Producer/Consumer Count: number of current producers/consumers. Increased Producer/Consumer Count: number of increased producers/consumers. Enqueue/Dequeue Count: number of enqueue and dequeue. Enqueue/Dequeue Rate: rate of enqueue and dequeue. Memory Percent Usage: amount of memory space used by undelivered messages. Store Percent Usage: space used by pending persistent messages. Temp Percent Usage: space used by non-persistent messages. Average/Max Message Size: number of messages. Queue Size: number of messages in the queue.  Destination Metrics  Produser/Consumer Count: number of producers/Consumers. Queue Size: unacknowledged messages of the queue. Memory usage: usage of memory. Enqueue/Dequeue/Dispatch/Expired/Inflight Count: number of messages in different states. Average/Max Message Size: number of messages. Average/Max Enqueue Time: time taken to join the queue.  Reference  ActiveMQ Classic clustering JMX Exporter Configuration JMX Exporter-Running the Standalone HTTP Server OpenTelemetry Collector Contrib Jmxreceiver  ","title":"Monitoring ActiveMQ through SkyWalking","url":"/blog/2024-04-19-monitoring-activemq-through-skywalking/"},{"content":"引言 Apache ActiveMQ Classic 是一个流行且功能强大的开源消息传递和集成模式服务器。始于2004年,逐渐发展成为了一个成熟且广泛使用的开源消息中间件,符合Java消息服务(JMS)规范。 发展至今,凭借其稳定性和广泛的特性支持,仍然拥有一定数量的中小型企业的使用者。其高性能版本 Apache Artemis 目前处于快速发展阶段,也受到了 ActiveMQ 现有使用者的关注。\nActiveMQ 对 JMX(Java Management Extensions) 有广泛的支持,允许通过 JMX MBean 监视和控制代理的行为。 开启JMX之后,就可以使用 JAVA 自带的 jconsole 工具或者 VisualVM 等工具直观查看指标。此外也可以通过一些 Collector 组件,将 JMX 风格的数据转换为 prometheus 风格的数据,适配更多查询与展示工具。\nOpenTelemetry 作为业界公认的标准化解决方案,可为分布式系统提供一致且可互操作的遥测数据收集、传输和分析能力,这里也主要借助它实现数据的采集和传输。 它虽然可以直接接受 JMX 类型的数据,但是关于采集 ActiveMQ 的 JMX 指标并不在标准库,存在部分版本不兼容,因此本文采用两步:将 JMX 数据转换为 Prometheus 风格的指标数据,再使用 OpenTelemetry 传递。\nSkyWalking 作为一站式的分布式系统监控解决方案,接纳来自 ActiveMQ 的指标数据,并提供基础的指标监控面板。\n服务部署 请准备以下服务\n SkyWalking OAP, v10.0+。 ActiveMQ v6.0.X+。 JMX Exporter v0.20.0。如果你使用docker,参考使用 bitnami/jmx-exporter。 OpenTelmetry-Collector v0.92.0。  服务准备 以下通过 SkyWalking OAP 单节点、ActiveMQ 2个单节点服务的部署方式介绍。JMX Exporter 采用推荐的 agent 方式启动。\n配置流程  在 ActiveMQ 中开启JMX,其中 JMX 远程端口默认1616,如需修改可通过 ACTIVEMQ_SUNJMX_START 参数调整。 设置 Exporter:  如果采用推荐的 Agent 方式启动,需要追加启动参数 -DACTIVEMQ_OPTS=-javaagent:{activemqPath}/bin/jmx_prometheus_javaagent-0.20.0.jar=2345:{activemqPath}/conf/config.yaml 如果采用单独服务的方式启动,可以参考这里独立部署 Exporter 服务。 其中 2345 为开放的 HTTP 端口可自定义。最终可通过访问 http://localhost:2345/metrics 查询到 JMX 的指标数据。    采用 Agent Exporter 方式的 docker-compose.yml 配置样例:\nversion:\u0026#39;3.8\u0026#39;services:amq1:image:apache/activemq-classic:latestcontainer_name:amq1hostname:amq1volumes:- ~/activemq1/conf/activemq.xml:/opt/apache-activemq/conf/activemq.xml- ~/activemq1/bin/jmx_prometheus_javaagent-0.20.0.jar:/opt/apache-activemq/bin/jmx_prometheus_javaagent-0.20.0.jar- ~/activemq1/conf/config.yaml:/opt/apache-activemq/conf/config.yamlports:- \u0026#34;61616:61616\u0026#34;- \u0026#34;8161:8161\u0026#34;- \u0026#34;2345:2345\u0026#34;environment:ACTIVEMQ_OPTS:\u0026#34;-javaagent:/opt/apache-activemq/bin/jmx_prometheus_javaagent-0.20.0.jar=2345:/opt/apache-activemq/conf/config.yaml\u0026#34;ACTIVEMQ_BROKER_NAME:broker-1networks:- amqtest amq2:image:apache/activemq-classic:latestcontainer_name:amq2hostname:amq2volumes:- ~/activemq2/conf/activemq.xml:/opt/apache-activemq/conf/activemq.xml- ~/activemq2/bin/jmx_prometheus_javaagent-0.20.0.jar:/opt/apache-activemq/bin/jmx_prometheus_javaagent-0.20.0.jar- ~/activemq2/conf/config.yaml:/opt/apache-activemq/conf/config.yaml ports:- \u0026#34;61617:61616\u0026#34;- \u0026#34;8162:8161\u0026#34;- \u0026#34;2346:2346\u0026#34;environment:ACTIVEMQ_OPTS:\u0026#34;-javaagent:/opt/apache-activemq/bin/jmx_prometheus_javaagent-0.20.0.jar=2346:/opt/apache-activemq/conf/config.yaml\u0026#34;ACTIVEMQ_BROKER_NAME:broker-2 networks:- amqtestotel-collector1:image:otel/opentelemetry-collector:latestcontainer_name:otel-collector1command:[\u0026#34;--config=/etc/otel-collector-config.yaml\u0026#34;]volumes:- ./otel-collector-config1.yaml:/etc/otel-collector-config.yamldepends_on:- amq1networks:- amqtest otel-collector2:image:otel/opentelemetry-collector:latestcontainer_name:otel-collector2command:[\u0026#34;--config=/etc/otel-collector-config.yaml\u0026#34;]volumes:- ./otel-collector-config2.yaml:/etc/otel-collector-config.yamldepends_on:- amq2networks:- amqtest networks:amqtest:OpenTelemetry otel-collector-config.yaml 配置样例:\nreceivers:prometheus:config:scrape_configs:- job_name:\u0026#39;activemq-monitoring\u0026#39;scrape_interval:30sstatic_configs:- targets:[\u0026#39;amq1:2345\u0026#39;]labels:cluster:activemq-broker1processors:batch:exporters:otlp:endpoint:oap:11800tls:insecure:trueservice:pipelines:metrics:receivers:- prometheusprocessors:- batchexporters:- otlpActiveMQ Exporter config.yaml 配置样例:\n---startDelaySeconds:10username:adminpassword:activemqssl:falselowercaseOutputName:falselowercaseOutputLabelNames:falseincludeObjectNames:[\u0026#34;org.apache.activemq:*\u0026#34;,\u0026#34;java.lang:type=OperatingSystem\u0026#34;,\u0026#34;java.lang:type=GarbageCollector,*\u0026#34;,\u0026#34;java.lang:type=Threading\u0026#34;,\u0026#34;java.lang:type=Runtime\u0026#34;,\u0026#34;java.lang:type=Memory\u0026#34;,\u0026#34;java.lang:name=*\u0026#34;]excludeObjectNames:[\u0026#34;org.apache.activemq:type=ColumnFamily,*\u0026#34;]autoExcludeObjectNameAttributes:trueexcludeObjectNameAttributes:\u0026#34;java.lang:type=OperatingSystem\u0026#34;:- \u0026#34;ObjectName\u0026#34;\u0026#34;java.lang:type=Runtime\u0026#34;:- \u0026#34;ClassPath\u0026#34;- \u0026#34;SystemProperties\u0026#34;rules:- pattern:\u0026#34;.*\u0026#34;启动步骤  启动 ActiveMQ,Exporter 和服务同时启动。 启动 SkyWalking OAP 和 SkyWalking UI。 启动 OpenTelmetry-Collector。  以上步骤执行完成后,节点指标就会定时抓取后推送到 SkyWalking,经过分组聚合后前端页面可查看到 ActiveMQ 的面板数据。\n监控指标 监控指标主要分为3类:Cluster 指标、Broker 指标、Destination 指标\n Cluster 指标:主要关注集群的内存使用情况、数据写入与读取速率平均情况、平均与最大的写入时长等。 Broker 指标:主要关注 Broker 下节点状态、连接数、生产者消费者数量、写入读取速率等。根据集群形式不同,一个Cluster可能包括一个或多个Broker。 Destination 指标:主要关注 Queue/Topic 下的生产者消费者数量、不同状态消息数量、队列数量、入队时长等。  Cluster 指标  System Load:[0, 100]的值来反馈系统负载。 Thread Count:JVM 当前使用的线程数。 Heap Memory:堆内存的容量一定程度反映服务的处理性能。 GC:ActiveMQ 在 JVM 中运行,其内存由 Java 的垃圾回收 (GC) 进程管理,GC能直接反映服务的状态。 Enqueue/Dequeue/Dispatch/Expired Rate:不同状态信息的增长速率能直接反映生产活动。 Average/Max Enqueue Time:入队的耗时能一定程度影响生产者。  Broker 指标  Uptime:节点存活时长。 State:是否为从节点,1=从节点,0=主节点。 Current Connentions:目前的连接数。 Current Producer/Consumer Count:目前生产者消费者数量。 Increased Producer/Consumer Count:增长的生产者消费者数量。 Enqueue/Dequeue Count: 入队出队数量。 Enqueue/Dequeue Rate: 入队出队速率。 Memory Percent Usage:未送达消息使用的内存空间。 Store Percent Usage: 挂起的持久性消息占用的空间。 Temp Percent Usage:非持久化消息占用的空间。 Average/Max Message Size:消息量。 Queue Size:队列中消息量。  Destination 指标  Producer/Consumer Count:生产者/消费者数量。 Queue Size:队列的未消费数量。 Memory Usage:内存的使用。 Enqueue/Dequeue/Dispatch/Expired/Inflight Count:不同状态消息数。 Average/Max Enqueue Time:入队的耗时。 Average/Max Message Size:消息量。  参考文档  ActiveMQ Classic clustering JMX Exporter Configuration JMX Exporter-Running the Standalone HTTP Server OpenTelemetry Collector Contrib Jmxreceiver  ","title":"使用 SkyWalking 监控 ActiveMQ","url":"/zh/2024-04-19-monitoring-activemq-through-skywalking/"},{"content":"Zixin Zhou(GitHub ID, CodePrometheus[1]) began the code contributions since Oct 28, 2023.\nUp to date, he has submitted 8 PRs in the Go agent repository, 7 PRs in the main repo, 1 PR in the UI repository and 2 PRs in the showcase repository.\nAt Apr 15th, 2024, the project management committee(PMC) passed the proposal of promoting him as a new committer. He has accepted the invitation at the same day.\nWelcome Zixin Zhou join the committer team.\n[1] https://github.com/CodePrometheus\n","title":"Welcome Zixin Zhou as new committer","url":"/events/welcome-zixin-zhou-as-new-committer/"},{"content":"SkyWalking Eyes 0.6.0 is released. Go to downloads page to find release tars.\n Add | as comment indicator by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/168 Correct the way of joining slack channels by @wu-sheng in https://github.com/apache/skywalking-eyes/pull/169 update: add weak-compatible to dependency check by @Two-Hearts in https://github.com/apache/skywalking-eyes/pull/171 feature: add support for Protocol Buffer by @spacewander in https://github.com/apache/skywalking-eyes/pull/172 feature: add support for OPA policy files by @spacewander in https://github.com/apache/skywalking-eyes/pull/174 add Eclipse Foundation specific Apache 2.0 license header by @gdams in https://github.com/apache/skywalking-eyes/pull/178 add instructions to fix header issues in markdown comment by @gdams in https://github.com/apache/skywalking-eyes/pull/179 bump action/setup-go to v5 by @gdams in https://github.com/apache/skywalking-eyes/pull/180 Draft release notes for 0.6.0 by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/181  Full Changelog: https://github.com/apache/skywalking-eyes/compare/v0.5.0...v0.6.0\n","title":"Release Apache SkyWalking Eyes 0.6.0","url":"/events/release-apache-skywalking-eyes-0-6-0/"},{"content":"SkyWalking Java Agent 9.2.0 is released. Go to downloads page to find release tars. Changes by Version\n9.2.0  Fix NoSuchMethodError in mvc-annotation-commons and change deprecated method. Fix forkjoinpool plugin in JDK11. Support for tracing spring-cloud-gateway 4.x in gateway-4.x-plugin. Fix re-transform bug when plugin enhanced class proxy parent method. Fix error HTTP status codes not recording as SLA failures in Vert.x plugins. Support for HttpExchange request tracing. Support tracing for async producing, batch sync consuming, and batch async consuming in rocketMQ-client-java-5.x-plugin. Convert the Redisson span into an async span. Rename system env name from sw_plugin_kafka_producer_config to SW_PLUGIN_KAFKA_PRODUCER_CONFIG. Support for ActiveMQ-Artemis messaging tracing. Archive the expired plugins impala-jdbc-2.6.x-plugin. Fix a bug in Spring Cloud Gateway if HttpClientFinalizer#send does not invoke, the span created at NettyRoutingFilterInterceptor can not stop. Fix not tracing in HttpClient v5 when HttpHost(arg[0]) is null but RoutingSupport#determineHost works. Support across thread tracing for SOFA-RPC. Update Jedis 4.x plugin to support Sharding and Cluster models.  Documentation  Update docs to describe expired-plugins.  All issues and pull requests are here\n","title":"Release Apache SkyWalking Java Agent 9.2.0","url":"/events/release-apache-skywalking-java-agent-9-2-0/"},{"content":"SkyWalking Rover 0.6.0 is released. Go to downloads page to find release tars.\nFeatures  Enhance compatibility when profiling with SSL. Update LabelValue obtain pod information function to add default value parameter. Add HasOwnerName to judgement pod has owner name. Publish the latest Docker image tag. Improve the stability of Off CPU Profiling. Support collecting the access log from Kubernetes. Remove the scanner mode in the process discovery module. Upgrade Go library to 1.21, eBPF library to 0.13.2. Support using make docker.debug to building the debug docker image.  Bug Fixes Documentation  Update architecture diagram. Delete module design and project structure document. Adjust configuration modules during setup.  Issues and PR  All issues are here All and pull requests are here  ","title":"Release Apache SkyWalking Rover 0.6.0","url":"/events/release-apache-skwaylking-rover-0-6-0/"},{"content":"SkyWalking Cloud on Kubernetes 0.9.0 is released. Go to downloads page to find release tars.\n0.9.0 Features  Add a getting started document about how to deploy swck on the kubernetes cluster.  Bugs  Fix the bug that the java agent is duplicated injected when update the pod.  Chores  Bump up custom-metrics-apiserver Bump up golang to v1.22 Bump up controller-gen to v0.14.0  ","title":"Release Apache SkyWalking Cloud on Kubernetes 0.9.0","url":"/events/release-apache-skywalking-cloud-on-kubernetes-0-9-0/"},{"content":"Background Apache SkyWalking is an open-source Application Performance Management system that helps users gather logs, traces, metrics, and events from various platforms and display them on the UI. With version 9.7.0, SkyWalking can collect access logs from probes in multiple languages and from Service Mesh, generating corresponding topologies, tracing, and other data. However, it could not initially collect and map access logs from applications in Kubernetes environments. This article explores how the 10.0.0 version of Apache SkyWalking employs eBPF technology to collect and store application access logs, addressing this limitation.\nWhy eBPF? To monitor the network traffic in Kubernetes, the following features support be support:\n Cross Language: Applications deployed in Kubernetes may be written in any programming language, making support for diverse languages important. Non-Intrusiveness: It\u0026rsquo;s imperative to monitor network traffic without making any modifications to the applications, as direct intervention with applications in Kubernetes is not feasible. Kernel Metrics Monitoring: Often, diagnosing network issues by analyzing traffic performance at the user-space level is insufficient. A deeper analysis incorporating kernel-space network traffic metrics is frequently necessary. Support for Various Network Protocols: Applications may communicate using different transport protocols, necessitating support for a range of protocols.  Given these requirements, eBPF emerges as a capable solution. In the next section, we will delve into detailed explanations of how Apache SkyWalking Rover resolves these aspects.\nKernel Monitoring and Protocol Analysis In previous articles, we\u0026rsquo;ve discussed how to monitor network traffic from programs written in various languages. This technique remains essential for network traffic monitoring, allowing for the collection of traffic data without language limitations. However, due to the unique aspects of our monitoring trigger mechanism and the specific features of kernel monitoring, these two areas warrant separate explanations.\nKernel Monitoring Kernel monitoring allows users to gain insights into network traffic performance based on the execution at the kernel level, specifically from Layer 2 (Data Link) to Layer 4 (Transport) of the OSI model.\nNetwork monitoring at the kernel layer is deference from the syscall (user-space) layer in terms of the metrics and identifiers used. While the syscalls layer can utilize file descriptors to correlate various operations, kernel layer network operations primarily use packets as unique identifiers. This discrepancy necessitates a mapping relationship that SkyWalking Rover can use to bind these two layers together for comprehensive monitoring.\nLet\u0026rsquo;s dive into the details of how data is monitored in both sending and receiving modes.\nObserve Sending When sending data, tracking the status and timing of each packet is crucial for understanding the state of each transmission. Within the kernel, operations progress from Layer 4 (L4) down to Layer 2 (L2), maintaining the same thread ID as during the syscalls layer, which simplifies data correlation.\nSkyWalking Rover monitors several key kernel functions to observe packet transmission dynamics, listed from L4 to L2:\n kprobe/tcp_sendmsg: Captures the time when a packet enters the L4 protocol stack for sending and the time it finishes processing. This function is essential for tracking the initial handling of packets at the transport layer. kprobe/tcp_transmit_skb: Records the total number of packet transmissions and the size of each packet sent. This function helps identify how many times a packet or a batch of packets is attempted to be sent, which is critical for understanding network throughput and congestion. tracepoint/tcp/tcp_retransmit_skb: Notes whether packet retransmission occurs, providing insights into network reliability and connection quality. Retransmissions can significantly impact application performance and user experience. tracepoint/skb/kfree_skb: Records packet loss during transmission and logs the reason for such occurrences. Understanding packet loss is crucial for diagnosing network issues and ensuring data integrity. kprobe/__ip_queue_xmit: Records the start and end times of processing by the L3 protocol. This function is vital for understanding the time taken for IP-level operations, including routing decisions. kprobe/nf_hook_slow: Records the total time and number of occurrences spent in Netfilter hooks, such as iptables rule evaluations. This monitoring point is important for assessing the impact of firewall rules and other filtering mechanisms on packet flow. kprobe/neigh_resolve_output: If resolving an unknown MAC address is necessary before sending a network request, this function records the occurrences and total time spent on this resolution. MAC address resolution times can affect the initial packet transmission delay. kprobe/__dev_queue_xmit: Records the start and end times of entering the L2 protocol stack, providing insights into the data link layer\u0026rsquo;s processing times. tracepoint/net/net_dev_start_xmit and tracepoint/net/net_dev_xmit: Records the actual time taken to transmit each packet at the network interface card (NIC). These functions are crucial for understanding the hardware-level performance and potential bottlenecks at the point of sending data to the physical network.  According to the interception of the above method, Apache SkyWalking Rover can provide key execution time and metrics for each level when sending network data, from the application layer (Layer 7) to the transport layer (Layer 4), and finally to the data link layer (Layer 2).\nObserve Receiving When receiving data, the focus is often on the time it takes for packets to travel from the network interface card (NIC) to the user space. Unlike the process of sending data, data receiving in the kernel proceeds from the data link layer (Layer 2) up to the transport layer (Layer 4), until the application layer (Layer 7) retrieves the packet\u0026rsquo;s content. In SkyWalking Rover, monitors the following key system functions to observe this process, listed from L2 to L4:\n tracepoint/net/netif_receive_skb: Records the time when a packet is received by the network interface card. This tracepoint is crucial for understanding the initial point of entry for incoming data into the system. kprobe/ip_rcv: Records the start and end times of packet processing at the network layer (Layer 3). This probe provides insights into how long it takes for the IP layer to handle routing, forwarding, and delivering packets to the correct application. kprobe/nf_hook_slow: Records the total time and occurrences spent in Netfilter hooks, same with the sending traffic flow. kprobe/tcp_v4_rcv: Records the start and end times of packet processing at the transport layer (Layer 4). This probe is key to understanding the efficiency of TCP operations, including connection management, congestion control, and data flow. tracepoint/skb/skb_copy_datagram_iovec: When application layer protocols use the data, this tracepoint binds the packet to the syscall layer data at Layer 7. This connection is essential for correlating the kernel\u0026rsquo;s handling of packets with their consumption by user-space applications.  Based on the above methods, network monitoring can help you understand the complete execution process and execution time from when data is received by the network card to when it is used by the program.\nMetrics By intercepting the methods mentioned above, we can gather key metrics that provide insights into network performance and behavior. These metrics include:\n Packets: The size of the packets and the frequency of their transmission or reception. These metric offers a fundamental understanding of the network load and the efficiency of data movement between the sender and receiver. Connections: The number of connections established or accepted between services and the time taken for these connections to be set up. This metric is crucial for analyzing the efficiency of communication and connection management between different services within the network. L2-L4 Events: The time spent on key events within the Layer 2 to Layer 4 protocols. This metric sheds light on the processing efficiency and potential bottlenecks within the lower layers of the network stack, which are essential for data transmission and reception.  Protocol Analyzing In previous articles, we have discussed parsing HTTP/1.x protocols. However, with HTTP/2.x, the protocol\u0026rsquo;s stateful nature and the pre-established connections between services complicate network profiling. This complexity makes it challenging for Apache SkyWalking Rover to fully perceive the connection context, hindering protocol parsing operations.\nTransitioning network monitoring to Daemon mode offers a solution to this challenge. By continuously observing service operations around the clock, SkyWalking Rover can begin monitoring as soon as a service starts. This immediate initiation allows for the tracking of the complete execution context, making the observation of stateful protocols like HTTP/2.x feasible.\nProbes To detect when a process is started, monitoring a specific trace point (tracepoint/sched/sched_process_fork) is essential. This approach enables the system to be aware of process initiation events. Given the necessity to filter process traffic based on certain criteria such as the process\u0026rsquo;s namespace, Apache SkyWalking Rover follows a series of steps to ensure accurate and efficient monitoring. These steps include:\n Monitoring Activation: The process is immediately added to a monitoring whitelist upon detection. This step ensures that the process is considered for monitoring from the moment it starts, without delay. Push to Queue: The process\u0026rsquo;s PID (Process ID) is pushed into a monitoring confirmation queue. This queue holds the PIDs of newly detected processes that are pending further confirmation from a user-space program. This asynchronous approach allows for the separation of immediate detection and subsequent processing, optimizing the monitoring workflow. User-Space Program Confirmation: The user-space program retrieves process PIDs from the queue and assesses whether each process should continue to be monitored. If a process is deemed unnecessary for monitoring, it is removed from the whitelist.  This process ensures that SkyWalking Rover can dynamically adapt its monitoring scope based on real-time conditions and configurations, allowing for both comprehensive coverage and efficient resource use.\nLimitations The monitoring of stateful protocols like HTTP/2.x currently faces certain limitations:\n Inability to Observe Pre-existing Connections: Monitoring the complete request and response cycle requires that monitoring be initiated before any connections are established. This requirement means that connections set up before the start of monitoring cannot be observed. Challenges with TLS Requests: Observing TLS encrypted traffic is complex because it relies on asynchronously attaching uprobes (user-space attaching) for observation. If new requests are made before these uprobes are successfully attached, it becomes impossible to access the data before encryption or after decryption.  Demo Next, let’s quickly demonstrate the Kubernetes monitoring feature, so you can understand more specifically what it accomplishes.\nDeploy SkyWalking Showcase SkyWalking Showcase contains a complete set of example services and can be monitored using SkyWalking. For more information, please check the official documentation.\nIn this demo, we only deploy service, the latest released SkyWalking OAP, and UI.\nexport FEATURE_FLAGS=java-agent-injector,single-node,elasticsearch,rover make deploy.kubernetes After deployment is complete, please run the following script to open SkyWalking UI: http://localhost:8080/.\nkubectl port-forward svc/ui 8080:8080 --namespace default Done Once deployed, Apache SkyWalking Rover automatically begins monitoring traffic within the system upon startup. Then, reports this traffic data to SkyWalking OAP, where it is ultimately stored in a database.\nIn the Service Dashboard within Kubernetes, you can view a list of monitored Kubernetes services. If any of these services have HTTP traffic, this information would be displayed alongside them in the dashboard.\nFigure 1: Kubernetes Service List\nAdditionally, within the Topology Tab, you can observe the topology among related services. In each service or call relationship, there would display relevant TCP and HTTP metrics.\nFigure 2: Kubernetes Service Topology\nWhen you select a specific service from the Service list, you can view service metrics at both the TCP and HTTP levels for the chosen service.\nFigure 3: Kubernetes Service TCP Metrics\nFigure 4: Kubernetes Service HTTP Metrics\nFurthermore, by using the Endpoint Tab, you can see which URIs have been accessed for the current service.\nFigure 5: Kubernetes Service Endpoint List\nConclusion In this article, I\u0026rsquo;ve detailed how to utilize eBPF technology for network monitoring of services within a Kubernetes cluster, a capability that has been implemented in Apache SkyWalking Rover. This approach leverages the power of eBPF to provide deep insights into network traffic and service interactions, enhancing visibility and observability across the cluster.\n","title":"Monitoring Kubernetes network traffic by using eBPF","url":"/blog/2024-03-18-monitor-kubernetes-network-by-ebpf/"},{"content":"SkyWalking Client JS 0.11.0 is released. Go to downloads page to find release tars.\n Fixed the bug that navigator.sendBeacon sent json to backend report \u0026ldquo;No suitable request converter found for a @RequestObject List\u0026rdquo;. Fix reading property from null. Pin selenium version and update license CI. Bump dependencies. Update README.  ","title":"Release Apache SkyWalking Client JS 0.11.0","url":"/events/release-apache-skywalking-client-js-0-11-0/"},{"content":"背景 Apache SkyWalking 是一个开源的应用性能管理系统,帮助用户从各种平台收集日志、跟踪、指标和事件,并在用户界面上展示它们。\n在9.7.0版本中,Apache SkyWalking 可以从多语言的探针和 Service Mesh 中收集访问日志,并生成相应的拓扑图、链路和其他数据。 但是对于Kubernetes环境,暂时无法提供对应用程序的访问日志进行采集并生成拓扑图。本文探讨了Apache SkyWalking 10.0.0版本如何采用eBPF技术来收集和存储应用访问日志,解决了这一限制。\n为什么使用 eBPF? 为了在Kubernetes中监控网络流量,以下特性需得到支持:\n 跨语言: 在Kubernetes部署的应用可能使用任何编程语言编写,因此对多种语言的支持十分重要。 非侵入性: 监控网络流量时不对应用程序进行任何修改是必要的,因为直接干预Kubernetes中的应用程序是不可行的。 内核指标监控: 通常,仅通过分析用户空间级别的流量来诊断网络问题是不够的。经常需要深入分析,结合内核空间的网络流量指标。 支持多种网络协议: 应用程序可能使用不同的传输协议进行通信,这就需要支持一系列的协议。  鉴于这些要求,eBPF显现出作为一个有能力的解决方案。在下一节中,我们将深入讨论Apache SkyWalking Rover是如何解决这些方面作出更详细解释。\n内核监控与协议分析 在之前的文章中,我们讨论了如何对不同编程语言的程序进行网络流量获取。在网络流量监控中,我们仍然会使用该技术进行流量采集。 但是由于这次监控触发方式和内核监控方面的不同特性,所以这两部分会单独进行说明。\n内核监控 内核监控允许用户根据在内核层面的执行,洞察网络流量性能,特别是从OSI模型的第2层(数据链路层)到第4层(传输层)。\n内核层的网络监控与syscall(用户空间系统调用)层在关联指标不同。虽然syscall层可以利用文件描述符来关联各种操作,但内核层的网络操作主要使用数据包作为唯一标识符。 这种差异需要映射关系,Apache SkyWalking Rover可以使用它将这两层绑定在一起,进行全面监控。\n让我们深入了解数据在发送和接收模式下是如何被监控的。\n监控数据发送 在发送数据时,跟踪每个数据包的状态和时间对于理解每次传输的状态至关重要。在内核中,操作从第4层(L4)一直调用到第2层(L2),并且会保持与在syscall层相同的线程ID,这简化了数据的相关性分析。\nSkyWalking Rover监控了几个关键的内核函数,以观察数据包传输动态,顺序从L4到L2:\n kprobe/tcp_sendmsg: 记录数据包进入L4协议栈进行发送以及完成处理的时间。这个函数对于跟踪传输层对数据包的初始处理至关重要。 kprobe/tcp_transmit_skb: 记录数据包传输的总次数和每个发送的数据包的大小。这个函数有助于识别尝试发送一个数据包或一段时间内发送一批数据包的次数,这对于理解网络吞吐量和拥塞至关重要。 tracepoint/tcp/tcp_retransmit_skb: 记录是否发生数据包重传,提供网络可靠性和连接质量的见解。重传可以显著影响应用性能和用户体验。 tracepoint/skb/kfree_skb: 记录传输过程中的数据包丢失,并记录发生这种情况的原因。理解数据包丢失对于诊断网络问题和确保数据完整性至关重要。 kprobe/__ip_queue_xmit: 记录L3协议处理的开始和结束时间。这个功能对于理解IP级操作所需的时间至关重要,包括路由决策。 kprobe/nf_hook_slow: 记录在Netfilter钩子中花费的总时间和发生次数,例如 iptables 规则评估。这个函数对于评估防火墙规则和其他过滤机制对数据流的影响非常重要。 kprobe/neigh_resolve_output: 如果在发送网络请求之前需要解析未知的MAC地址,这个函数会记录发生的次数和在这个解析上花费的总时间。MAC地址解析时间可以影响初始数据包传输的延迟。 kprobe/__dev_queue_xmit: 记录进入L2协议栈的开始和结束时间,提供对数据链路层处理时间的见解。 tracepoint/net/net_dev_start_xmit and tracepoint/net/net_dev_xmit: 记录在网卡(NIC)上传输每个数据包所需的实际时间。这些功能对于理解硬件级性能和在将数据发送到物理网络时可能出现的瓶颈至关重要。  根据上述方法的拦截,Apache SkyWalking Rover可以在发送网络数据时为每个层级提供关键的执行时间和指标,从应用层(第7层)到传输层(第4层),最终到数据链路层(第2层)。\n监控数据接收 在接收数据时,通常关注的是数据包从网卡(NIC)到用户空间的传输时间。与发送数据的过程不同,在内核中接收数据是从数据链路层(第2层)开始,一直上升到传输层(第4层),直到应用层(第7层)检索到数据包的内容。\n在SkyWalking Rover中,监控以下关键系统功能以观察这一过程,顺序从L2到L4:\n tracepoint/net/netif_receive_skb: 记录网卡接收到数据包的时间。这个追踪点对于理解进入系统的传入数据的初始入口点至关重要。 kprobe/ip_rcv: 记录网络层(第3层)数据包处理的开始和结束时间。这个探针提供了IP层处理路由、转发和将数据包正确传递给应用程序所需时间的见解。 kprobe/nf_hook_slow: 记录在Netfilter钩子中花费的总时间和发生次数,与发送流量的情况相同。 kprobe/tcp_v4_rcv: 记录传输层(第4层)数据包处理的开始和结束时间。这个探针对于理解TCP操作的效率至关重要,包括连接管理、拥塞控制和数据流。 tracepoint/skb/skb_copy_datagram_iovec: 当应用层协议使用数据时,这个追踪点在第7层将数据包与syscall层的数据绑定。这种连接对于将内核对数据包的处理与用户空间应用程序的消费相关联是至关重要的。  基于上述方法,网络监控可以帮助您理解从网卡接收数据到程序使用数据的完整执行过程和执行时间。\n指标 通过拦截上述提到的方法,我们可以收集提供网络性能的关键指标。这些指标包括:\n 数据包: 数据包的大小及其传输或接收的频率。这些指标提供了对网络负载和数据在发送者与接收者之间传输效率的基本理解。 连接: 服务之间建立或接收的连接数量,以及设置这些连接所需的时间。这个指标对于分析网络内不同服务之间的通信效率和连接管理至关重要。 L2-L4 事件: 在第2层到第4层协议中关键事件上所花费的时间。这个指标揭示了网络堆栈较低层的处理效率和潜在瓶颈,这对于数据传输至关重要。  协议分析 在之前的文章中,我们已经讨论了解析 HTTP/1.x 协议。然而,对于 HTTP/2.x,协议的有状态性质和服务之间预先建立的连接使得网络分析变得复杂。 这种复杂性使得Apache SkyWalking Rover很难完全感知连接上下文,阻碍了协议解析操作。\n将网络监控转移到守护进程模式提供了一种解决这一挑战的方法。通过全天候不断观察服务,Apache SkyWalking Rover可以在服务启动时立即开始监控。 这种立即启动允许跟踪完整的执行上下文,使得观察像 HTTP/2.x 这样的有状态协议变得可行。\n追踪 为了检测到一个进程何时启动,监控一个特定的追踪点 (tracepoint/sched/sched_process_fork) 是必不可少的。这追踪点使系统能够意识到进程启动事件。\n鉴于需要根据某些标准(如进程的命名空间)过滤进程流量,Apache SkyWalking Rover遵循一系列步骤来确保准确和高效的监控。这些步骤包括:\n 启动监控: 一旦检测到进程,立即将其添加到监控白名单中。这一步确保从进程启动的那一刻起就考虑对其进行监控,不会有延迟。 推送队列: 进程的PID(进程ID)被推送到一个监控确认队列中。这个队列保存了新检测到的进程的PID,这些进程等待来自用户空间程序的进一步确认。这种异步方法对立即检测和后续处理进行分离,优化了监控工作流程。 用户态程序确认: 用户空间程序从队列中检索进程PID,并评估每个进程是否应该继续被监控。如果一个进程被认为不必要进行监控,它将被从白名单中移除。  这个过程确保了Apache SkyWalking Rover可以根据实时条件和配置动态调整其监控范围,允许既全面覆盖又有效的资源监控。\n限制 像 HTTP/2.x 这样的有状态协议的监控目前仍然面临一些限制:\n 无法观察现有连接: 要监控完整的请求和响应周期,需要在建立任何连接之前启动监控。这个要求意味着在监控开始之前建立的连接无法被观察到。 TLS请求的挑战: 观察TLS加密流量是复杂的,因为它依赖于异步加载uprobes(用户空间加载)进行观察。如果在成功加载这些uprobes之前发出新的请求,那么在加密之前或解密之后访问数据就变得不可能。  演示 接下来,让我们快速演示Kubernetes监控功能,以便更具体地了解它的功能。\n部署 SkyWalking Showcase SkyWalking Showcase 包含完整的示例服务,并可以使用 SkyWalking 进行监视。有关详细信息,请查看官方文档。\n在此演示中,我们只部署服务、最新发布的 SkyWalking OAP,UI和Rover。\nexport FEATURE_FLAGS=java-agent-injector,single-node,elasticsearch,rover make deploy.kubernetes 部署完成后,请运行以下脚本以打开 SkyWalking UI:http://localhost:8080/ 。\nkubectl port-forward svc/ui 8080:8080 --namespace default 完成 一旦部署,Apache SkyWalking Rover在启动时会自动开始监控系统中的流量。然后,它将这些流量数据报告给SkyWalking OAP,并最终存储在数据库中。\n在Kubernetes中的服务仪表板中,您可以查看被监控的Kubernetes服务列表。如果其中任何服务具有HTTP流量,这些指标信息将在列表中显示。\n图 1: Kubernetes 服务列表\n此外,在拓扑图选项卡中,您可以观察相关服务之间的拓扑关系。在每个服务节点或服务之间调用关系中,将显示相关的TCP和HTTP指标。\n图 2: Kubernetes 服务拓扑图\n当您从服务列表中选择特定服务时,您可以查看所选服务在TCP和HTTP级别的服务指标。\n图 3: Kubernetes 服务 TCP 指标\n图 4: Kubernetes 服务 HTTP 指标\n此外,通过使用端点选项卡,您可以查看当前服务所访问的URI。\n图 5: Kubernetes 服务端点列表\n结论 在本文中,我详细介绍了如何利用eBPF技术对Kubernetes集群中的服务进行网络流量监控,这是Apache SkyWalking Rover中实现的一项功能。\n这项功能利用了eBPF的强大功能,提供了对网络流量和服务交互的深入洞察,增强了对整个集群的可观测性。\n","title":"使用 eBPF 监控 Kubernetes 网络流量","url":"/zh/2024-03-18-monitor-kubernetes-network-by-ebpf/"},{"content":"Background ClickHouse is an open-source column-oriented database management system that allows generating analytical data reports in real-time, so it is widely used for online analytical processing (OLAP).\nApache SkyWalking is an open-source APM system that provides monitoring, tracing and diagnosing capabilities for distributed systems in Cloud Native architectures. Increasingly, App Service architectures incorporate Skywalking as an essential monitoring component of a service or instance.\nBoth ClickHouse and Skywalking are popular frameworks, and it would be great to monitor your ClickHouse database through Skywalking. Next, let\u0026rsquo;s share how to monitor ClickHouse database with Skywalking.\nPrerequisites and configurations Make sure you\u0026rsquo;ve met the following prerequisites before you start onboarding your monitor.\nConfig steps:\n Exposing prometheus endpoint. Fetching ClickHouse metrics by OpenTelemetry. Exporting metrics to Skywalking OAP server.  Prerequisites for setup The monitoring for ClickHouse relies on the embedded prometheus endpoint of ClickHouse and will not be supported in previous versions starting from v20.1.2.4.\nYou can check the version of your server:\n:) select version(); SELECT version() Query id: 2d3773ca-c320-41f6-b2ac-7ebe37eddc58 ┌─version()───┐ │ 24.2.1.2248 │ └─────────────┘ If your ClickHouse version is earlier than v20.1.2.4, you need to set up ClickHouse-exporter to access data.\nExpose prometheus Endpoint The embedded prometheus endpoint will make it easy for data collection, you just need to open the required configuration in the core configuration file config.xml of ClickHouse. In addition to your original configuration, you only need to modify the configuration of Prometheus.\n/etc/clickhouse-server/config.xml:\n\u0026lt;clickhouse\u0026gt; ...... \u0026lt;prometheus\u0026gt; \u0026lt;endpoint\u0026gt;/metrics\u0026lt;/endpoint\u0026gt; \u0026lt;port\u0026gt;9363\u0026lt;/port\u0026gt; \u0026lt;metrics\u0026gt;true\u0026lt;/metrics\u0026gt; \u0026lt;events\u0026gt;true\u0026lt;/events\u0026gt; \u0026lt;asynchronous_metrics\u0026gt;true\u0026lt;/asynchronous_metrics\u0026gt; \u0026lt;errors\u0026gt;true\u0026lt;/errors\u0026gt; \u0026lt;/prometheus\u0026gt; \u0026lt;/clickhouse\u0026gt; Settings:\n endpoint – HTTP endpoint for scraping metrics by prometheus server. Start from ‘/’. port – Port for endpoint. metrics – Expose metrics from the system.metrics table. events – Expose metrics from the system.events table. asynchronous_metrics – Expose current metrics values from the system.asynchronous_metrics table. errors - Expose the number of errors by error codes occurred since the last server restart. This information could be obtained from the system.errors as well.  Save the config and restart the ClickHouse server.\nIt contains more than 1,000 metrics, covering services、networks、disk、MergeTree、errors and so on. For more details, after restarting the server, you can call curl 127.0.0.1:9363/metrics to know about the metrics.\nYou also can check the metrics by tables to make a contrast.\n:) select * from system.metrics limit 10 SELECT * FROM system.metrics LIMIT 10 Query id: af677622-960e-4589-b2ca-0b6a40c443aa ┌─metric───────────────────────────────┬─value─┬─description─────────────────────────────────────────────────────────────────────┐ │ Query │ 1 │ Number of executing queries │ │ Merge │ 0 │ Number of executing background merges │ │ Move │ 0 │ Number of currently executing moves │ │ PartMutation │ 0 │ Number of mutations (ALTER DELETE/UPDATE) │ │ ReplicatedFetch │ 0 │ Number of data parts being fetched from replica │ │ ReplicatedSend │ 0 │ Number of data parts being sent to replicas │ │ ReplicatedChecks │ 0 │ Number of data parts checking for consistency │ │ BackgroundMergesAndMutationsPoolTask │ 0 │ Number of active merges and mutations in an associated background pool │ │ BackgroundMergesAndMutationsPoolSize │ 64 │ Limit on number of active merges and mutations in an associated background pool │ │ BackgroundFetchesPoolTask │ 0 │ Number of active fetches in an associated background pool │ └──────────────────────────────────────┴───────┴─────────────────────────────────────────────────────────────────────────────────┘ :) select * from system.events limit 10; SELECT * FROM system.events LIMIT 10 Query id: 32c618d0-037a-400a-92a4-59fde832e4e2 ┌─event────────────────────────────┬──value─┬─description────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐ │ Query │ 7 │ Number of queries to be interpreted and potentially executed. Does not include queries that failed to parse or were rejected due to AST size limits, quota limits or limits on the number of simultaneously running queries. May include internal queries initiated by ClickHouse itself. Does not count subqueries. │ │ SelectQuery │ 7 │ Same as Query, but only for SELECT queries. │ │ InitialQuery │ 7 │ Same as Query, but only counts initial queries (see is_initial_query). │ │ QueriesWithSubqueries │ 40 │ Count queries with all subqueries │ │ SelectQueriesWithSubqueries │ 40 │ Count SELECT queries with all subqueries │ │ QueryTimeMicroseconds │ 202862 │ Total time of all queries. │ │ SelectQueryTimeMicroseconds │ 202862 │ Total time of SELECT queries. │ │ FileOpen │ 40473 │ Number of files opened. │ │ Seek │ 100 │ Number of times the \u0026#39;lseek\u0026#39; function was called. │ │ ReadBufferFromFileDescriptorRead │ 67995 │ Number of reads (read/pread) from a file descriptor. Does not include sockets. │ └──────────────────────────────────┴────────┴────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ Start up Opentelemetry-Collector Configure OpenTelemetry based on your own requirements. Following the example below:\notel-collector-config.yaml:\nreceivers: prometheus: config: scrape_configs: - job_name: \u0026#39;clickhouse-monitoring\u0026#39; scrape_interval: 15s static_configs: - targets: [\u0026#39;127.0.0.1:9363\u0026#39;,\u0026#39;127.0.0.1:9364\u0026#39;,\u0026#39;127.0.0.1:9365\u0026#39;] labels: host_name: prometheus-clickhouse processors: batch: exporters: otlp: endpoint: 127.0.0.1:11800 tls: insecure: true service: pipelines: metrics: receivers: - prometheus processors: - batch exporters: - otlp Please ensure:\n job_name: 'clickhouse-monitoring' that marked the data from ClickHouse, If modified, it will be ignored. host_name defines the service name, you have to make one. endpoint point to the oap server address. the network between ClickHouse, OpenTelemetry Collector, and Skywalking OAP Server must be accessible.  If goes well, refresh the Skywalking-ui home page in a few seconds and you can see ClickHouse under the database menu.\nsuccess log:\n2024-03-12T03:57:39.407Z\tinfo\tservice@v0.93.0/telemetry.go:76\tSetting up own telemetry... 2024-03-12T03:57:39.412Z\tinfo\tservice@v0.93.0/telemetry.go:146\tServing metrics\t{\u0026quot;address\u0026quot;: \u0026quot;:8888\u0026quot;, \u0026quot;level\u0026quot;: \u0026quot;Basic\u0026quot;} 2024-03-12T03:57:39.416Z\tinfo\tservice@v0.93.0/service.go:139\tStarting otelcol...\t{\u0026quot;Version\u0026quot;: \u0026quot;0.93.0\u0026quot;, \u0026quot;NumCPU\u0026quot;: 4} 2024-03-12T03:57:39.416Z\tinfo\textensions/extensions.go:34\tStarting extensions... 2024-03-12T03:57:39.423Z\tinfo\tprometheusreceiver@v0.93.0/metrics_receiver.go:240\tStarting discovery manager\t{\u0026quot;kind\u0026quot;: \u0026quot;receiver\u0026quot;, \u0026quot;name\u0026quot;: \u0026quot;prometheus\u0026quot;, \u0026quot;data_type\u0026quot;: \u0026quot;metrics\u0026quot;} 2024-03-12T03:57:59.431Z\tinfo\tprometheusreceiver@v0.93.0/metrics_receiver.go:231\tScrape job added\t{\u0026quot;kind\u0026quot;: \u0026quot;receiver\u0026quot;, \u0026quot;name\u0026quot;: \u0026quot;prometheus\u0026quot;, \u0026quot;data_type\u0026quot;: \u0026quot;metrics\u0026quot;, \u0026quot;jobName\u0026quot;: \u0026quot;clickhouse-monitoring\u0026quot;} 2024-03-12T03:57:59.431Z\tinfo\tservice@v0.93.0/service.go:165\tEverything is ready. Begin running and processing data. 2024-03-12T03:57:59.432Z\tinfo\tprometheusreceiver@v0.93.0/metrics_receiver.go:282\tStarting scrape manager\t{\u0026quot;kind\u0026quot;: \u0026quot;receiver\u0026quot;, \u0026quot;name\u0026quot;: \u0026quot;prometheus\u0026quot;, \u0026quot;data_type\u0026quot;: \u0026quot;metrics\u0026quot;} ClickHouse monitoring dashboard About the dashboard The dashboard includes the service dashboard and the instance dashboard.\nMetrics include servers, queries, networks, insertions, replicas, MergeTree, ZooKeeper and embedded ClickHouse Keeper.\nThe service dashboard displays the metrics of the entire cluster.\nThe instance dashboard displays the metrics of an instance.\nAbout the metrics Here are some meanings of ClickHouse Instance metrics, more here.\n   Monitoring Panel Unit Description Data Source     CpuUsage count CPU time spent seen by OS per second(according to ClickHouse.system.dashboard.CPU Usage (cores)). ClickHouse   MemoryUsage percentage Total amount of memory (bytes) allocated by the server/ total amount of OS memory. ClickHouse   MemoryAvailable percentage Total amount of memory (bytes) available for program / total amount of OS memory. ClickHouse   Uptime sec The server uptime in seconds. It includes the time spent for server initialization before accepting connections. ClickHouse   Version string Version of the server in a single integer number in base-1000. ClickHouse   FileOpen count Number of files opened. ClickHouse     metrics about ZooKeeper are valid when managing cluster by ZooKeeper metrics about embedded ClickHouse Keeper are valid when ClickHouse Keeper is enabled  References  ClickHouse prometheus endpoint ClickHouse built-in observability dashboard ClickHouse Keeper  ","title":"Monitoring Clickhouse Server through SkyWalking","url":"/blog/2024-03-12-monitoring-clickhouse-through-skywalking/"},{"content":"背景介绍 ClickHouse 是一个开源的面向列的数据库管理系统,可以实时生成分析数据报告,因此被广泛用于在线分析处理(OLAP)。\nApache SkyWalking 是一个开源的 APM 系统,为云原生架构中的分布式系统提供监控、跟踪和诊断能力。应用服务体系越来越多地将 Skywalking 作为服务或实例的基本监视组件。\nClickHouse 和 Skywalking 框架都是当下流行的服务组件,通过 Skywalking 监控您的 ClickHouse 数据库将是一个不错的选择。接下来,就来分享一下如何使用 Skywalking 监控 ClickHouse 数据库。\n前提与配置 在开始接入监控之前,请先确认以下前提条件。\n配置步骤:\n 暴露 Prometheus 端点。 通过 OpenTelemetry 拉取 ClickHouse 的指标数据。 将指标数据发送到 Skywalking OAP server.  使用的前提 ClickHouse 的监控依赖于 ClickHouse 的内嵌 Prometheus 端点配置,配置从 v20.1.2.4 开始支持,因此之前的老版本将无法支持。\n您可以检查 ClickHouse 服务的版本:\n:) select version(); SELECT version() Query id: 2d3773ca-c320-41f6-b2ac-7ebe37eddc58 ┌─version()───┐ │ 24.2.1.2248 │ └─────────────┘ 如果您的 ClickHouse 版本低于 v20.1.2.4,则需要依靠 ClickHouse-exporter 获取数据。\n暴露 Prometheus 端点 内嵌的 Prometheus 端点简化了数据采集流程,您只需要在 ClickHouse 的核心配置文件 config.xml 打开所需的配置即可。除了您原来的配置,您只需要参考如下修改 Prometheus 的配置。\n/etc/clickhouse-server/config.xml:\n\u0026lt;clickhouse\u0026gt; ...... \u0026lt;prometheus\u0026gt; \u0026lt;endpoint\u0026gt;/metrics\u0026lt;/endpoint\u0026gt; \u0026lt;port\u0026gt;9363\u0026lt;/port\u0026gt; \u0026lt;metrics\u0026gt;true\u0026lt;/metrics\u0026gt; \u0026lt;events\u0026gt;true\u0026lt;/events\u0026gt; \u0026lt;asynchronous_metrics\u0026gt;true\u0026lt;/asynchronous_metrics\u0026gt; \u0026lt;errors\u0026gt;true\u0026lt;/errors\u0026gt; \u0026lt;/prometheus\u0026gt; \u0026lt;/clickhouse\u0026gt; 配置说明:\n endpoint – 通过 prometheus 服务器抓取指标的 HTTP 端点。从/开始。 port – 端点的端口。 metrics – 暴露 system.metrics 表中的指标。 events – 暴露 system.events 表中的指标。 asynchronous_metrics – 暴露 system.asynchronous_metrics 表中的当前指标值。 errors - 按错误代码暴露自上次服务器重新启动以来发生的错误数。此信息也可以从 system.errors 中获得。  保存配置并重启 ClickHouse 服务。\n端点数据包含1000多个指标,涵盖服务、网络、磁盘、MergeTree、错误等。想了解更多指标细节,在重启服务后,可以调用 curl 127.0.0.1:9363/metrics 看到具体指标的内容。\n您还可以通过数据库表的数据与端点数据进行检查对比。\n:) select * from system.metrics limit 10 SELECT * FROM system.metrics LIMIT 10 Query id: af677622-960e-4589-b2ca-0b6a40c443aa ┌─metric───────────────────────────────┬─value─┬─description─────────────────────────────────────────────────────────────────────┐ │ Query │ 1 │ Number of executing queries │ │ Merge │ 0 │ Number of executing background merges │ │ Move │ 0 │ Number of currently executing moves │ │ PartMutation │ 0 │ Number of mutations (ALTER DELETE/UPDATE) │ │ ReplicatedFetch │ 0 │ Number of data parts being fetched from replica │ │ ReplicatedSend │ 0 │ Number of data parts being sent to replicas │ │ ReplicatedChecks │ 0 │ Number of data parts checking for consistency │ │ BackgroundMergesAndMutationsPoolTask │ 0 │ Number of active merges and mutations in an associated background pool │ │ BackgroundMergesAndMutationsPoolSize │ 64 │ Limit on number of active merges and mutations in an associated background pool │ │ BackgroundFetchesPoolTask │ 0 │ Number of active fetches in an associated background pool │ └──────────────────────────────────────┴───────┴─────────────────────────────────────────────────────────────────────────────────┘ :) select * from system.events limit 10; SELECT * FROM system.events LIMIT 10 Query id: 32c618d0-037a-400a-92a4-59fde832e4e2 ┌─event────────────────────────────┬──value─┬─description────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐ │ Query │ 7 │ Number of queries to be interpreted and potentially executed. Does not include queries that failed to parse or were rejected due to AST size limits, quota limits or limits on the number of simultaneously running queries. May include internal queries initiated by ClickHouse itself. Does not count subqueries. │ │ SelectQuery │ 7 │ Same as Query, but only for SELECT queries. │ │ InitialQuery │ 7 │ Same as Query, but only counts initial queries (see is_initial_query). │ │ QueriesWithSubqueries │ 40 │ Count queries with all subqueries │ │ SelectQueriesWithSubqueries │ 40 │ Count SELECT queries with all subqueries │ │ QueryTimeMicroseconds │ 202862 │ Total time of all queries. │ │ SelectQueryTimeMicroseconds │ 202862 │ Total time of SELECT queries. │ │ FileOpen │ 40473 │ Number of files opened. │ │ Seek │ 100 │ Number of times the \u0026#39;lseek\u0026#39; function was called. │ │ ReadBufferFromFileDescriptorRead │ 67995 │ Number of reads (read/pread) from a file descriptor. Does not include sockets. │ └──────────────────────────────────┴────────┴────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ 启动 Opentelemetry-Collector 根据自身环境 配置 OpenTelemetry。 您可参照下面的例子:\notel-collector-config.yaml:\nreceivers: prometheus: config: scrape_configs: - job_name: \u0026#39;clickhouse-monitoring\u0026#39; scrape_interval: 15s static_configs: - targets: [\u0026#39;127.0.0.1:9363\u0026#39;,\u0026#39;127.0.0.1:9364\u0026#39;,\u0026#39;127.0.0.1:9365\u0026#39;] labels: host_name: prometheus-clickhouse processors: batch: exporters: otlp: endpoint: 127.0.0.1:11800 tls: insecure: true service: pipelines: metrics: receivers: - prometheus processors: - batch exporters: - otlp 请着重关注:\n job_name: 'clickhouse-monitoring' 标记着来自 ClickHouse 的数据,如果自行修改,数据会被服务忽略。 host_name 定义服务的名称。 endpoint 指向您的 OAP 服务地址. ClickHouse、OpenTelemetry Collector 和 Skywalking OAP Server 之间的网络必须可访问。  如果进展顺利,几秒钟后刷新 Skywalking-ui 网页,您可以在数据库的菜单下看到 ClickHouse。\n启动成功日志样例:\n2024-03-12T03:57:39.407Z\tinfo\tservice@v0.93.0/telemetry.go:76\tSetting up own telemetry... 2024-03-12T03:57:39.412Z\tinfo\tservice@v0.93.0/telemetry.go:146\tServing metrics\t{\u0026quot;address\u0026quot;: \u0026quot;:8888\u0026quot;, \u0026quot;level\u0026quot;: \u0026quot;Basic\u0026quot;} 2024-03-12T03:57:39.416Z\tinfo\tservice@v0.93.0/service.go:139\tStarting otelcol...\t{\u0026quot;Version\u0026quot;: \u0026quot;0.93.0\u0026quot;, \u0026quot;NumCPU\u0026quot;: 4} 2024-03-12T03:57:39.416Z\tinfo\textensions/extensions.go:34\tStarting extensions... 2024-03-12T03:57:39.423Z\tinfo\tprometheusreceiver@v0.93.0/metrics_receiver.go:240\tStarting discovery manager\t{\u0026quot;kind\u0026quot;: \u0026quot;receiver\u0026quot;, \u0026quot;name\u0026quot;: \u0026quot;prometheus\u0026quot;, \u0026quot;data_type\u0026quot;: \u0026quot;metrics\u0026quot;} 2024-03-12T03:57:59.431Z\tinfo\tprometheusreceiver@v0.93.0/metrics_receiver.go:231\tScrape job added\t{\u0026quot;kind\u0026quot;: \u0026quot;receiver\u0026quot;, \u0026quot;name\u0026quot;: \u0026quot;prometheus\u0026quot;, \u0026quot;data_type\u0026quot;: \u0026quot;metrics\u0026quot;, \u0026quot;jobName\u0026quot;: \u0026quot;clickhouse-monitoring\u0026quot;} 2024-03-12T03:57:59.431Z\tinfo\tservice@v0.93.0/service.go:165\tEverything is ready. Begin running and processing data. 2024-03-12T03:57:59.432Z\tinfo\tprometheusreceiver@v0.93.0/metrics_receiver.go:282\tStarting scrape manager\t{\u0026quot;kind\u0026quot;: \u0026quot;receiver\u0026quot;, \u0026quot;name\u0026quot;: \u0026quot;prometheus\u0026quot;, \u0026quot;data_type\u0026quot;: \u0026quot;metrics\u0026quot;} ClickHouse 监控面板 关于面板 这个仪表盘包含服务仪表盘和实例仪表盘。\n指标涵盖服务器、查询、网络、插入、副本、MergeTree、ZooKeeper 和内嵌 ClickHouse Keeper。\n服务仪表盘主要展示整个集群相关的指标。\n实例仪表盘主要展示单个实例相关的指标。\n关于指标 以下是ClickHouse实例指标的一些含义,前往了解完整的指标列表。\n   面板名称 单位 指标含义 数据源     CpuUsage count 操作系统每秒花费的 CPU 时间(根据 ClickHouse.system.dashboard.CPU 使用率(核心数))。 ClickHouse   MemoryUsage percentage 服务器分配的内存总量(字节)/操作系统内存总量。 ClickHouse   MemoryAvailable percentage 可用于程序的内存总量(字节)/操作系统内存总量。 ClickHouse   Uptime sec 服务器正常运行时间(以秒为单位)。它包括在接受连接之前进行服务器初始化所花费的时间。 ClickHouse   Version string 以 base-1000 样式展示的服务器版本。 ClickHouse   FileOpen count 打开的文件数。 ClickHouse     ZooKeeper 的指标在 ZooKeeper 管理集群时有效。 内嵌ClickHouse Keeper的指标在开启内嵌 ClickHouse Keeper 配置时有效。  参考文档  ClickHouse prometheus endpoint ClickHouse built-in observability dashboard ClickHouse Keeper  ","title":"使用 SkyWalking 监控 ClickHouse Server","url":"/zh/2024-03-12-monitoring-clickhouse-through-skywalking/"},{"content":"背景介绍 Apache RocketMQ 是一个开源的低延迟、高并发、高可用、高可靠的分布式消息中间件, 从SkyWalking OAP 10.0 版本开始, 新增了 对 RocketMQ Server的监控面板。本文将展示并介绍如何使用 Skywalking来监控RocketMQ\n部署 流程 通过RocketMQ官方提供的RocketMQ exporter来采集RocketMQ Server数据,再通过opentelmetry-collector来拉取RocketMQ exporter并传输到skywalking oap服务来处理\nDataFlow: 准备  Skywalking oap服务,v10.0 + RocketMQ v4.3.2 + RocketMQ exporter v0.0.2+ Opentelmetry-collector v0.87+  启动顺序  启动 RocketMQ namesrv 和 broker 启动 skywalking oap 和 ui 启动 RocketMQ exporter 启动 opentelmetry-collector  具体如何启动和配置请参考以上链接中官方教程.\n需要注意下的是 opentelmetry-collector 的配置文件.\njob_name: \u0026quot;rocketmq-monitoring\u0026quot; 请不要修改,否则 skywalking 不会处理这部分数据.\nrocketmq-exporter 替换成RocketMQ exporter 的地址.\nreplacement: rocketmq-cluster 中的rocketmq-cluster如果想要使用下文介绍的服务分层功能,请自行定义为其他服务层级相匹配的名称.\noap 为 skywalking oap 地址,请自行替换.\nreceivers: prometheus: config: scrape_configs: - job_name: \u0026quot;rocketmq-monitoring\u0026quot; scrape_interval: 30s static_configs: - targets: ['rocketmq-exporter:5557'] relabel_configs: - source_labels: [ ] target_label: cluster replacement: rocketmq-cluster exporters: otlp: endpoint: oap:11800 tls: insecure: true processors: batch: service: pipelines: metrics: receivers: - prometheus processors: - batch exporters: - otlp 监控指标 指标分为 三个维度, cluster,broker,topic\ncluster监控 cluster 主要是站在集群的角度来统计展示,比如\nMessages Produced Today 今日集群产生的消息数\nMax CommitLog Disk Ratio 展示集群中磁盘使用率最高的broker\nTotal Producer Tps 集群生产者tps\nbroker 监控 broker 主要是站在节点的角度来统计展示,比如\nProduce Tps 节点生产者tps\nProducer Message Size(MB)节点生产消息大小\ntopic 监控 topic 主要是站在主题的角度来统计展示,比如\nConsumer Group Count 消费该主题的消费者组个数\nConsumer Latency(s) 消费者组的消费延时时间\nBacklogged Messages 消费者组消费消息堆积\n注意:topic 维度是整个 topic 来聚合,并不是在一个 broker 上的 topic 聚合,在 dashboard 上你也可以看到 broker 跟 topic 是平级的。\n各个指标的含义可以在图标的 tip 上找到解释\n更多指标可以参考文档\ndemo 已经在 skywalking showcase 上线,可以在上面看到展示效果\n服务分层 skywalking 10 新增了重要功能Service Hierarchy,接收来自不同层级的服务数据,比如 java agent 上报,k8s 监控数据或者 otel 的监控数据. 根据设置规则如果发现这些服务名称符合匹配规则,则可以将这些不同层级的服务联系起来。\n如下图所示:\nskywalking 采集部署在 k8s 的 RocketMQ 服务端的k8s 数据,并接收来自 otel 的 RocketMQ 服务端监控数据,根据匹配规则这些服务具有相同的服务名称,则可以在 ui 上观察到它们的联系\n","title":"使用 SkyWalking 监控 RocketMQ Server","url":"/zh/2024-02-29-rocketmq-monitoring-by-skywalking/"},{"content":"SkyWalking Go 0.4.0 is released. Go to downloads page to find release tars.\nFeatures  Add support ignore suffix for span name. Adding go 1.21 and 1.22 in docker image.  Plugins  Support setting a discard type of reporter. Add redis.max_args_bytes parameter for redis plugin. Changing intercept point for gin, make sure interfaces could be grouped when params defined in relativePath. Support RocketMQ MQ. Support AMQP MQ. support Echov4 framework.  Documentation Bug Fixes  Fix users can not use async api in toolkit-trace. Fix cannot enhance the vendor management project. Fix SW_AGENT_REPORTER_GRPC_MAX_SEND_QUEUE not working on metricsSendCh \u0026amp; logSendCh chans of gRPC reporter. Fix ParseVendorModule error for special case in vendor/modules.txt. Fix enhance method error when unknown parameter type. Fix wrong tracing context when trace have been sampled. Fix enhance param error when there are multiple params. Fix lost trace when multi middleware handlerFunc in gin plugin. Fix DBQueryContext execute error in sql plugin. Fix stack overflow as endless logs triggered.  Issues and PR  All issues are here All and pull requests are here  ","title":"Release Apache SkyWalking Go 0.4.0","url":"/events/release-apache-skwaylking-go-0.4.0/"},{"content":"背景介绍 在 Scala 中,纯函数式中主要使用 Fiber,而不是线程,诸如 Cats-Effect、ZIO 等 Effect 框架。 您可以将 Fiber 视为轻量级线程,它是一种并发模型,由框架本身掌控控制权,从而消除了上下文切换的开销。 基于这些 Effect 框架开发的 HTTP、gRCP、GraphQL 库而开发的应用,我们一般称为 纯函数式应用程序。\n我们以 ZIO 为切入点, 演示 SkyWalking Scala 如何支持 Effect 生态。\nZIO Trace 首先,我们想要实现 Fiber 上下文传递,而不是监控 Fiber 本身。对于一个大型应用来说,可能存在成千上万个 Fiber,监控 Fiber 本身的意义不大。\n虽然 Fiber 的 Span 是在活跃时才会创建,但难免会有目前遗漏的场景,所以提供了一个配置 plugin.ziov2.ignore_fiber_regexes。 它将使用正则去匹配 Fiber location,匹配上的 Fiber 将不会创建 Span。\nFiber Span的信息如下:\n下面是我们使用本 ZIO 插件,和一些官方插件(hikaricp、jdbc、pulsar)完成的 Trace:\n分析 在 ZIO 中,Fiber可以有两种方式被调度,它们都是 zio.Executor 的子类。当然您也可以使用自己的线程池,这样也需被 ZIO 包装,其实就类似下面的 blockingExecutor。\nabstract class Executor extends ExecutorPlatformSpecific { self =\u0026gt; def submit(runnable: Runnable)(implicit unsafe: Unsafe): Boolean } 一种是系统默认线程池 defaultExecutor:\nprivate[zio] trait RuntimePlatformSpecific { final val defaultExecutor: Executor = Executor.makeDefault() } 另一种是专用于阻塞 IO 的线程池 blockingExecutor:\nprivate[zio] trait RuntimePlatformSpecific { final val defaultBlockingExecutor: Executor = Blocking.blockingExecutor } 默认线程池 defaultExecutor 对于 defaultExecutor,其本身是很复杂的,但它就是一个 ZIO 的 Fiber 调度(执行)器:\n/** * A `ZScheduler` is an `Executor` that is optimized for running ZIO * applications. Inspired by \u0026#34;Making the Tokio Scheduler 10X Faster\u0026#34; by Carl * Lerche. [[https://tokio.rs/blog/2019-10-scheduler]] */ private final class ZScheduler extends Executor 由于它们都是 zio.Executor 的子类,我们只需要对其及其子类进行增强:\nfinal val ENHANCE_CLASS = LogicalMatchOperation.or( HierarchyMatch.byHierarchyMatch(\u0026#34;zio.Executor\u0026#34;), MultiClassNameMatch.byMultiClassMatch(\u0026#34;zio.Executor\u0026#34;) ) 它们都是线程池,我们只需要在 zio.Executor 的 submit 方法上进行类似 ThreadPoolExecutor 上下文捕获的操作,可以参考 jdk-threadpool-plugin\n这里需要注意,因为 Fiber 也是一种 Runnable:\nprivate[zio] trait FiberRunnable extends Runnable { def location: Trace def run(depth: Int): Unit } zio-v2x-plugin\n阻塞线程池 blockingExecutor 对于 blockingExecutor,其实它只是对 Java 线程池进行了一个包装:\nobject Blocking { val blockingExecutor: zio.Executor = zio.Executor.fromThreadPoolExecutor { val corePoolSize = 0 val maxPoolSize = Int.MaxValue val keepAliveTime = 60000L val timeUnit = TimeUnit.MILLISECONDS val workQueue = new SynchronousQueue[Runnable]() val threadFactory = new NamedThreadFactory(\u0026#34;zio-default-blocking\u0026#34;, true) val threadPool = new ThreadPoolExecutor( corePoolSize, maxPoolSize, keepAliveTime, timeUnit, workQueue, threadFactory ) threadPool } } 由于其本身是对 ThreadPoolExecutor 的封装,所以,当我们已经实现了 zio.Executor 的增强后,只需要使用官方 jdk-threadpool-plugin 插件即可。 这里我们还想要对代码进行定制修改和复用,所以重新使用 Scala 实现了一个 executors-plugin 插件。\n串连 Fiber 上下文 最后,上面谈到过,Fiber 也是一种 Runnable,因此还需要对 zio.internal.FiberRunnable 进行增强。大致分为两点,其实与 jdk-threading-plugin 是一样的。\n 每次创建 zio.internal.FiberRunnable 实例时,都需要保存 现场,即构造函数增强。 每次运行时创建一个过渡的 Span,将当前线程上下文与之前保存在构造函数中的上下文进行关联。Fiber 可能被不同线程执行,所以这是必须的。  zio-v2x-plugin\n说明 当我们完成了对 ZIO Fiber 的上下文传播处理后,任意基于 ZIO 的应用层框架都可以按照普通的 Java 插件思路去开发。 我们只需要找到一个全局切入点,这个切入点应该是每个请求都会调用的方法,然后对这个方法进行增强。\n要想激活插件,只需要在 Release Notes 下载插件,放到您的 skywalking-agent/plugins 目录,重新启动服务即可。\n如果您的项目使用 sbt assembly 打包,您可以参考这个 示例。该项目使用了下列技术栈:\nlibraryDependencies ++= Seq( \u0026#34;io.d11\u0026#34; %% \u0026#34;zhttp\u0026#34; % zioHttp2Version, \u0026#34;dev.zio\u0026#34; %% \u0026#34;zio\u0026#34; % zioVersion, \u0026#34;io.grpc\u0026#34; % \u0026#34;grpc-netty\u0026#34; % \u0026#34;1.50.1\u0026#34;, \u0026#34;com.thesamet.scalapb\u0026#34; %% \u0026#34;scalapb-runtime-grpc\u0026#34; % scalapb.compiler.Version.scalapbVersion ) ++ Seq( \u0026#34;dev.profunktor\u0026#34; %% \u0026#34;redis4cats-effects\u0026#34; % \u0026#34;1.3.0\u0026#34;, \u0026#34;dev.profunktor\u0026#34; %% \u0026#34;redis4cats-log4cats\u0026#34; % \u0026#34;1.3.0\u0026#34;, \u0026#34;dev.profunktor\u0026#34; %% \u0026#34;redis4cats-streams\u0026#34; % \u0026#34;1.3.0\u0026#34;, \u0026#34;org.typelevel\u0026#34; %% \u0026#34;log4cats-slf4j\u0026#34; % \u0026#34;2.5.0\u0026#34;, \u0026#34;dev.zio\u0026#34; %% \u0026#34;zio-interop-cats\u0026#34; % \u0026#34;23.0.03\u0026#34;, \u0026#34;ch.qos.logback\u0026#34; % \u0026#34;logback-classic\u0026#34; % \u0026#34;1.2.11\u0026#34;, \u0026#34;dev.zio\u0026#34; %% \u0026#34;zio-cache\u0026#34; % zioCacheVersion ) ","title":"SkyWalking 如何支持 ZIO 等 Scala Effect Runtime","url":"/zh/2024-01-04-skywalking-for-scala-effect-runtime/"},{"content":"Xiang Wei(GitHub ID, weixiang1862) made a lot of significant contributions to SkyWalking since 2023. He made dozens of pull requests to multiple SkyWalking repositories, including very important features, such as Loki LogQL support, Nginx monitoring, MongoDB monitoring, as well as bug fixes, blog posts, and showcase updates.\nHere are the complete pull request list grouped by repositories.\nskywalking  Support Nginx monitoring. (https://github.com/apache/skywalking/pull/11558) Fix JDBC Log query order. (https://github.com/apache/skywalking/pull/11544) Isolate MAL CounterWindow cache by metric name.(https://github.com/apache/skywalking/pull/11526) Support extract timestamp from patterned datetime string in LAL.(https://github.com/apache/skywalking/pull/11489) Adjust AlarmRecord alarmMessage column length to 512. (https://github.com/apache/skywalking/pull/11404) Use listening mode for Apollo configuration.(https://github.com/apache/skywalking/pull/11186) Support LogQL HTTP query APIs. (https://github.com/apache/skywalking/pull/11168) Support MongoDB monitoring (https://github.com/apache/skywalking/pull/11111) Support reduce aggregate function in MQE.(https://github.com/apache/skywalking/pull/11036) Fix instance query in JDBC implementation.(https://github.com/apache/skywalking/pull/11024) Fix metric session cache saving after batch insert when using mysql-connector-java.(https://github.com/apache/skywalking/pull/11012) Add component ID for WebSphere.(https://github.com/apache/skywalking/pull/10974) Support sumLabeled in MAL (https://github.com/apache/skywalking/pull/10916)  skywalking-java  Optimize plugin selector logic.(https://github.com/apache/skywalking-java/pull/651) Fix config length limitation.(https://github.com/apache/skywalking-java/pull/623) Optimize spring-cloud-gateway 2.1.x, 3.x witness class.(https://github.com/apache/skywalking-java/pull/610) Add WebSphere Liberty 23.x plugin.(https://github.com/apache/skywalking-java/pull/560)  skywalking-swck  Remove SwAgent default env JAVA_TOOL_OPTIONS.(https://github.com/apache/skywalking-swck/pull/106) Fix panic in storage reconciler.(https://github.com/apache/skywalking-swck/pull/94) Support inject java agent bootstrap-plugins.(https://github.com/apache/skywalking-swck/pull/91) Fix number env value format error in template yaml.(https://github.com/apache/skywalking-swck/pull/90)  skywalking-showcase  Nginx monitoring showcase.(https://github.com/apache/skywalking-showcase/pull/153) LogQL showcase. (https://github.com/apache/skywalking-showcase/pull/146) MongoDB monitoring showcase. (https://github.com/apache/skywalking-showcase/pull/144)##  skywalking-website  Add blog: monitoring-nginx-by-skywalking.(https://github.com/apache/skywalking-website/pull/666) Add blog: collect and analyse nginx access log by LAL.(https://github.com/apache/skywalking-website/pull/652) Add blog: integrating-skywalking-with-arthas.(https://github.com/apache/skywalking-website/pull/641)   At Dec. 28th, 2023, the project management committee (PMC) passed the proposal of promoting him as a new committer. He has accepted the invitation at the same day.\nWelcome to join the committer team, Xiang Wei! We are honored to have you in the team.\n","title":"Welcome Xiang Wei as new committer","url":"/events/welcome-xiang-wei-as-new-committer/"},{"content":"Background Apache SkyWalking is an open-source application performance management system that helps users collect and aggregate logs, traces, metrics, and events, and display them on the UI.\nIn order to achieve monitoring capabilities for Nginx, we have introduced the Nginx monitoring dashboard in SkyWalking 9.7, and this article will demonstrate the use of this monitoring dashboard and introduce the meaning of related metrics.\nSetup Monitoring Dashboard Metric Define and Collection Since nginx-lua-prometheus is used to define and expose metrics, we need to install lua_nginx_module for Nginx, or use OpenResty directly.\nIn the following example, we define four metrics via nginx-lua-prometheus and expose the metrics interface via nginx ip:9145/metrics:\n histogram: nginx_http_latency,monitoring http latency gauge: nginx_http_connections,monitoring nginx http connections counter: nginx_http_size_bytes,monitoring http size of request and response counter: nginx_http_requests_total,monitoring total http request numbers  http { log_format main '$remote_addr - $remote_user [$time_local] \u0026quot;$request\u0026quot; ' '$status $body_bytes_sent \u0026quot;$http_referer\u0026quot; ' '\u0026quot;$http_user_agent\u0026quot; \u0026quot;$http_x_forwarded_for\u0026quot;'; access_log /var/log/nginx/access.log main; lua_shared_dict prometheus_metrics 10M; # lua_package_path \u0026quot;/path/to/nginx-lua-prometheus/?.lua;;\u0026quot;; init_worker_by_lua_block { prometheus = require(\u0026quot;prometheus\u0026quot;).init(\u0026quot;prometheus_metrics\u0026quot;) metric_bytes = prometheus:counter( \u0026quot;nginx_http_size_bytes\u0026quot;, \u0026quot;Total size of HTTP\u0026quot;, {\u0026quot;type\u0026quot;, \u0026quot;route\u0026quot;}) metric_requests = prometheus:counter( \u0026quot;nginx_http_requests_total\u0026quot;, \u0026quot;Number of HTTP requests\u0026quot;, {\u0026quot;status\u0026quot;, \u0026quot;route\u0026quot;}) metric_latency = prometheus:histogram( \u0026quot;nginx_http_latency\u0026quot;, \u0026quot;HTTP request latency\u0026quot;, {\u0026quot;route\u0026quot;}) metric_connections = prometheus:gauge( \u0026quot;nginx_http_connections\u0026quot;, \u0026quot;Number of HTTP connections\u0026quot;, {\u0026quot;state\u0026quot;}) } server { listen 8080; location /test { default_type application/json; return 200 '{\u0026quot;code\u0026quot;: 200, \u0026quot;message\u0026quot;: \u0026quot;success\u0026quot;}'; log_by_lua_block { metric_bytes:inc(tonumber(ngx.var.request_length), {\u0026quot;request\u0026quot;, \u0026quot;/test/**\u0026quot;}) metric_bytes:inc(tonumber(ngx.var.bytes_send), {\u0026quot;response\u0026quot;, \u0026quot;/test/**\u0026quot;}) metric_requests:inc(1, {ngx.var.status, \u0026quot;/test/**\u0026quot;}) metric_latency:observe(tonumber(ngx.var.request_time), {\u0026quot;/test/**\u0026quot;}) } } } server { listen 9145; location /metrics { content_by_lua_block { metric_connections:set(ngx.var.connections_reading, {\u0026quot;reading\u0026quot;}) metric_connections:set(ngx.var.connections_waiting, {\u0026quot;waiting\u0026quot;}) metric_connections:set(ngx.var.connections_writing, {\u0026quot;writing\u0026quot;}) prometheus:collect() } } } } In the above example, we exposed the route-level metrics, and you can also choose to expose the host-level metrics according to the monitoring granularity:\nhttp { log_by_lua_block { metric_bytes:inc(tonumber(ngx.var.request_length), {\u0026quot;request\u0026quot;, ngx.var.host}) metric_bytes:inc(tonumber(ngx.var.bytes_send), {\u0026quot;response\u0026quot;, ngx.var.host}) metric_requests:inc(1, {ngx.var.status, ngx.var.host}) metric_latency:observe(tonumber(ngx.var.request_time), {ngx.var.host}) } } or upstream-level metrics:\nupstream backend { server ip:port; } server { location /test_upstream { proxy_pass http://backend; log_by_lua_block { metric_bytes:inc(tonumber(ngx.var.request_length), {\u0026quot;request\u0026quot;, \u0026quot;upstream/backend\u0026quot;}) metric_bytes:inc(tonumber(ngx.var.bytes_send), {\u0026quot;response\u0026quot;, \u0026quot;upstream/backend\u0026quot;}) metric_requests:inc(1, {ngx.var.status, \u0026quot;upstream/backend\u0026quot;}) metric_latency:observe(tonumber(ngx.var.request_time), {\u0026quot;upstream/backend\u0026quot;}) } } } After defining the metrics, we start nginx and opentelemetry-collector to collect the metrics and send them to the SkyWalking backend for analysis and storage.\nPlease ensure that job_name: 'nginx-monitoring', otherwise the reported data will be ignored by SkyWalking. If you have multiple Nginx instances, you can distinguish them using the service and service_instance_id labels:\nreceivers: prometheus: config: scrape_configs: - job_name: 'nginx-monitoring' scrape_interval: 5s metrics_path: \u0026quot;/metrics\u0026quot; static_configs: - targets: ['nginx:9145'] labels: service: nginx service_instance_id: nginx-instance processors: batch: exporters: otlp: endpoint: oap:11800 tls: insecure: true service: pipelines: metrics: receivers: - prometheus processors: - batch exporters: - otlp If everything goes well, you will see the metric data reported by Nginx under the gateway menu of the skywalking-ui:\nAccess \u0026amp; Error Log Collection SkyWalking Nginx monitoring provides log collection and error log analysis. We can use fluent-bit to collect and report access logs and error logs to SkyWalking for analysis and storage.\nFluent-bit configuration below defines the log collection directory as /var/log/nginx/. The access and error logs will be reported through rest port 12800 of oap after being processed by rewrite_access_log and rewrite_error_log functions:\n[SERVICE] Flush 5 Daemon Off Log_Level warn [INPUT] Name tail Tag access Path /var/log/nginx/access.log [INPUT] Name tail Tag error Path /var/log/nginx/error.log [FILTER] Name lua Match access Script fluent-bit-script.lua Call rewrite_access_log [FILTER] Name lua Match error Script fluent-bit-script.lua Call rewrite_error_log [OUTPUT] Name stdout Match * Format json [OUTPUT] Name http Match * Host oap Port 12800 URI /v3/logs Format json In the fluent-bit-script.lua, we use LOG_KIND tag to distinguish between access logs and error logs.\nTo associate with the metrics, please ensure that the values of service and serviceInstance are consistent with the metric collection definition in the previous section.\nfunction rewrite_access_log(tag, timestamp, record) local newRecord = {} newRecord[\u0026quot;layer\u0026quot;] = \u0026quot;NGINX\u0026quot; newRecord[\u0026quot;service\u0026quot;] = \u0026quot;nginx::nginx\u0026quot; newRecord[\u0026quot;serviceInstance\u0026quot;] = \u0026quot;nginx-instance\u0026quot; newRecord[\u0026quot;body\u0026quot;] = { text = { text = record.log } } newRecord[\u0026quot;tags\u0026quot;] = { data = {{ key = \u0026quot;LOG_KIND\u0026quot;, value = \u0026quot;NGINX_ACCESS_LOG\u0026quot;}}} return 1, timestamp, newRecord end function rewrite_error_log(tag, timestamp, record) local newRecord = {} newRecord[\u0026quot;layer\u0026quot;] = \u0026quot;NGINX\u0026quot; newRecord[\u0026quot;service\u0026quot;] = \u0026quot;nginx::nginx\u0026quot; newRecord[\u0026quot;serviceInstance\u0026quot;] = \u0026quot;nginx-instance\u0026quot; newRecord[\u0026quot;body\u0026quot;] = { text = { text = record.log } } newRecord[\u0026quot;tags\u0026quot;] = { data = {{ key = \u0026quot;LOG_KIND\u0026quot;, value = \u0026quot;NGINX_ERROR_LOG\u0026quot; }}} return 1, timestamp, newRecord end After starting fluent-it, we can see the collected log information in the Log tab of the monitoring panel:\nMeaning of Metrics    Metric Name Unit Description Data Source     HTTP Request Trend  The increment rate of HTTP requests nginx-lua-prometheus   HTTP Latency ms The increment rate of the latency of HTTP requests nginx-lua-prometheus   HTTP Bandwidth KB The increment rate of the bandwidth of HTTP requests nginx-lua-prometheus   HTTP Connections  The avg number of the connections nginx-lua-prometheus   HTTP Status Trend % The increment rate of the status of HTTP requests nginx-lua-prometheus   HTTP Status 4xx Percent % The percentage of 4xx status of HTTP requests nginx-lua-prometheus   HTTP Status 5xx Percent % The percentage of 4xx status of HTTP requests nginx-lua-prometheus   Error Log Count  The count of log level of nginx error.log fluent-bit    References  nginx-lua-prometheus fluent-bit-lua-filter skywalking-apisix-monitoring  ","title":"Monitoring Nginx with SkyWalking","url":"/blog/2023-12-23-monitoring-nginx-by-skywalking/"},{"content":"背景介绍 在前面的 Blog 使用 LAL 收集并分析 Nginx access log 中,我们以 Nginx access log 为切入点, 演示了 SkyWalking LAL 的日志分析能力。\n为了实现对 Nginx 更全面的监控能力,我们在 SkyWalking 9.7 中引入了 Nginx 监控面板,本文将演示该监控面板的使用,并介绍相关指标的含义。\n监控面板接入 Metric 定义与采集 由于使用了 nginx-lua-prometheus 来定义及暴露指标, 我们需要为 Nginx 安装 lua_nginx_module, 或者直接使用OpenResty。\n下面的例子中,我们通过 nginx-lua-prometheus 定义了四个指标,并通过 ip:9145/metrics 暴露指标接口:\n histogram: nginx_http_latency,监控 http 延时 gauge: nginx_http_connections,监控 http 连接数 counter: nginx_http_size_bytes,监控 http 请求和响应大小 counter: nginx_http_requests_total,监控 http 请求次数  http { log_format main '$remote_addr - $remote_user [$time_local] \u0026quot;$request\u0026quot; ' '$status $body_bytes_sent \u0026quot;$http_referer\u0026quot; ' '\u0026quot;$http_user_agent\u0026quot; \u0026quot;$http_x_forwarded_for\u0026quot;'; access_log /var/log/nginx/access.log main; lua_shared_dict prometheus_metrics 10M; # lua_package_path \u0026quot;/path/to/nginx-lua-prometheus/?.lua;;\u0026quot;; init_worker_by_lua_block { prometheus = require(\u0026quot;prometheus\u0026quot;).init(\u0026quot;prometheus_metrics\u0026quot;) metric_bytes = prometheus:counter( \u0026quot;nginx_http_size_bytes\u0026quot;, \u0026quot;Total size of HTTP\u0026quot;, {\u0026quot;type\u0026quot;, \u0026quot;route\u0026quot;}) metric_requests = prometheus:counter( \u0026quot;nginx_http_requests_total\u0026quot;, \u0026quot;Number of HTTP requests\u0026quot;, {\u0026quot;status\u0026quot;, \u0026quot;route\u0026quot;}) metric_latency = prometheus:histogram( \u0026quot;nginx_http_latency\u0026quot;, \u0026quot;HTTP request latency\u0026quot;, {\u0026quot;route\u0026quot;}) metric_connections = prometheus:gauge( \u0026quot;nginx_http_connections\u0026quot;, \u0026quot;Number of HTTP connections\u0026quot;, {\u0026quot;state\u0026quot;}) } server { listen 8080; location /test { default_type application/json; return 200 '{\u0026quot;code\u0026quot;: 200, \u0026quot;message\u0026quot;: \u0026quot;success\u0026quot;}'; log_by_lua_block { metric_bytes:inc(tonumber(ngx.var.request_length), {\u0026quot;request\u0026quot;, \u0026quot;/test/**\u0026quot;}) metric_bytes:inc(tonumber(ngx.var.bytes_send), {\u0026quot;response\u0026quot;, \u0026quot;/test/**\u0026quot;}) metric_requests:inc(1, {ngx.var.status, \u0026quot;/test/**\u0026quot;}) metric_latency:observe(tonumber(ngx.var.request_time), {\u0026quot;/test/**\u0026quot;}) } } } server { listen 9145; location /metrics { content_by_lua_block { metric_connections:set(ngx.var.connections_reading, {\u0026quot;reading\u0026quot;}) metric_connections:set(ngx.var.connections_waiting, {\u0026quot;waiting\u0026quot;}) metric_connections:set(ngx.var.connections_writing, {\u0026quot;writing\u0026quot;}) prometheus:collect() } } } } 上面的例子中,我们暴露了 route 级别的指标,你也可以根据监控粒度的需要,选择暴露 host 指标:\nhttp { log_by_lua_block { metric_bytes:inc(tonumber(ngx.var.request_length), {\u0026quot;request\u0026quot;, ngx.var.host}) metric_bytes:inc(tonumber(ngx.var.bytes_send), {\u0026quot;response\u0026quot;, ngx.var.host}) metric_requests:inc(1, {ngx.var.status, ngx.var.host}) metric_latency:observe(tonumber(ngx.var.request_time), {ngx.var.host}) } } 或者 upstream 指标:\nupstream backend { server ip:port; } server { location /test_upstream { proxy_pass http://backend; log_by_lua_block { metric_bytes:inc(tonumber(ngx.var.request_length), {\u0026quot;request\u0026quot;, \u0026quot;upstream/backend\u0026quot;}) metric_bytes:inc(tonumber(ngx.var.bytes_send), {\u0026quot;response\u0026quot;, \u0026quot;upstream/backend\u0026quot;}) metric_requests:inc(1, {ngx.var.status, \u0026quot;upstream/backend\u0026quot;}) metric_latency:observe(tonumber(ngx.var.request_time), {\u0026quot;upstream/backend\u0026quot;}) } } } 完成指标定义后,我们启动 nginx 和 opentelemetry-collector,将指标采集到 SkyWalking 后端进行分析和存储。\n请确保job_name: 'nginx-monitoring',否则上报的数据将被 SkyWalking 忽略。如果你有多个 Nginx 实例,你可以通过service及service_instance_id这两个 label 进行区分:\nreceivers: prometheus: config: scrape_configs: - job_name: 'nginx-monitoring' scrape_interval: 5s metrics_path: \u0026quot;/metrics\u0026quot; static_configs: - targets: ['nginx:9145'] labels: service: nginx service_instance_id: nginx-instance processors: batch: exporters: otlp: endpoint: oap:11800 tls: insecure: true service: pipelines: metrics: receivers: - prometheus processors: - batch exporters: - otlp 如果一切顺利,你将在 skywalking-ui 的网关菜单下看到 nginx 上报的指标数据:\nAccess \u0026amp; Error Log 采集 SkyWalking Nginx 监控提供了日志采集及错误日志统计功能,我们可以借助 fluent-bit 采集并上报 access log、error log 给 SkyWalking 分析存储。\n下面 fluent-bit 配置定义了日志采集目录为/var/log/nginx/,access 和 error log 经过 rewrite_access_log 和 rewrite_error_log 处理后会通过 oap 12800 端口进行上报:\n[SERVICE] Flush 5 Daemon Off Log_Level warn [INPUT] Name tail Tag access Path /var/log/nginx/access.log [INPUT] Name tail Tag error Path /var/log/nginx/error.log [FILTER] Name lua Match access Script fluent-bit-script.lua Call rewrite_access_log [FILTER] Name lua Match error Script fluent-bit-script.lua Call rewrite_error_log [OUTPUT] Name stdout Match * Format json [OUTPUT] Name http Match * Host oap Port 12800 URI /v3/logs Format json 在 fluent-bit-script.lua 中,我们通过 LOG_KIND 来区分 access log 和 error log。\n为了能够关联上文采集的 metric,请确保 service 和 serviceInstance 值与上文中指标采集定义一致。\nfunction rewrite_access_log(tag, timestamp, record) local newRecord = {} newRecord[\u0026quot;layer\u0026quot;] = \u0026quot;NGINX\u0026quot; newRecord[\u0026quot;service\u0026quot;] = \u0026quot;nginx::nginx\u0026quot; newRecord[\u0026quot;serviceInstance\u0026quot;] = \u0026quot;nginx-instance\u0026quot; newRecord[\u0026quot;body\u0026quot;] = { text = { text = record.log } } newRecord[\u0026quot;tags\u0026quot;] = { data = {{ key = \u0026quot;LOG_KIND\u0026quot;, value = \u0026quot;NGINX_ACCESS_LOG\u0026quot;}}} return 1, timestamp, newRecord end function rewrite_error_log(tag, timestamp, record) local newRecord = {} newRecord[\u0026quot;layer\u0026quot;] = \u0026quot;NGINX\u0026quot; newRecord[\u0026quot;service\u0026quot;] = \u0026quot;nginx::nginx\u0026quot; newRecord[\u0026quot;serviceInstance\u0026quot;] = \u0026quot;nginx-instance\u0026quot; newRecord[\u0026quot;body\u0026quot;] = { text = { text = record.log } } newRecord[\u0026quot;tags\u0026quot;] = { data = {{ key = \u0026quot;LOG_KIND\u0026quot;, value = \u0026quot;NGINX_ERROR_LOG\u0026quot; }}} return 1, timestamp, newRecord end 启动 fluent-it 后,我们便可以在监控面板的 Log tab 看到采集到的日志信息:\n面板指标含义    面板名称 单位 指标含义 数据源     HTTP Request Trend  每秒钟平均请求数 nginx-lua-prometheus   HTTP Latency ms 平均响应延时 nginx-lua-prometheus   HTTP Bandwidth KB 请求响应流量 nginx-lua-prometheus   HTTP Connections  nginx http 连接数 nginx-lua-prometheus   HTTP Status Trend % 每分钟 http 状态码统计 nginx-lua-prometheus   HTTP Status 4xx Percent % 4xx状态码比例 nginx-lua-prometheus   HTTP Status 5xx Percent % 5xx状态码比例 nginx-lua-prometheus   Error Log Count  每分钟错误日志数统计 fluent-bit    参考文档  nginx-lua-prometheus fluent-bit-lua-filter skywalking-apisix-monitoring  ","title":"使用 SkyWalking 监控 Nginx","url":"/zh/2023-12-23-monitoring-nginx-by-skywalking/"},{"content":"🚀 Dive into the World of Cutting-Edge Technology with Apache\u0026rsquo;s Finest! 🌐 Join me today as we embark on an exhilarating journey with two of Apache\u0026rsquo;s most brilliant minds - Sheng Wu and Trista Pan. We\u0026rsquo;re exploring the realms of Apache SkyWalking and Apache ShardingSphere, two groundbreaking initiatives that are reshaping the landscape of open-source technology. 🌟\nIn this exclusive session, we delve deep into Apache SkyWalking - an innovative observability platform that\u0026rsquo;s revolutionizing how we monitor and manage distributed systems in the cloud. Witness firsthand how SkyWalking is empowering developers and organizations to gain unparalleled insights into their applications, ensuring performance, reliability, and efficient troubleshooting. 🛰️🔍\nBut there\u0026rsquo;s more! We\u0026rsquo;re also unveiling the secrets of Apache ShardingSphere, a dynamic distributed database ecosystem. Learn how ShardingSphere is making waves in the world of big data, offering scalable, high-performance solutions for data sharding, encryption, and more. This is your gateway to understanding how these technologies are pivotal in handling massive data sets across various industries. 🌐💾\nWhether you\u0026rsquo;re a developer, tech enthusiast, or just curious about the future of open-source technology, this is a conversation you don\u0026rsquo;t want to miss! Get ready to be inspired and informed as we unlock new possibilities and applications of Apache SkyWalking and ShardingSphere. 🚀🌟\nJoin us, and let\u0026rsquo;s decode the future together!\n  Please join and follow Josh\u0026rsquo;s 龙之春 Youtube Coffee + Software with Josh Long Channel to learn more about technology and open source from telanted engineers and industry leads.\n","title":"[Video] Coffee + Software with Josh Long - Apache SkyWalking with Sheng Wu and Apache ShardingSphere with Trista Pan","url":"/blog/2023-12-04-coffee+software-with-josh-long/"},{"content":"SkyWalking CLI 0.13.0 is released. Go to downloads page to find release tars.\nFeatures  Add the sub-command menu get for get the ui menu items by @mrproliu in https://github.com/apache/skywalking-cli/pull/187  Bug Fixes  Fix the record list query does not support new OAP versions (with major version number \u0026gt; 9).  ","title":"Release Apache SkyWalking CLI 0.13.0","url":"/events/release-apache-skywalking-cli-0-13-0/"},{"content":"SkyWalking Java Agent 9.1.0 is released. Go to downloads page to find release tars. Changes by Version\n9.1.0  Fix hbase onConstruct NPE in the file configuration scenario Fix the issue of createSpan failure caused by invalid request URL in HttpClient 4.x/5.x plugin Optimize ElasticSearch 6.x 7.x plugin compatibility Fix an issue with the httpasyncclient component where the isError state is incorrect. Support customization for the length limitation of string configurations Add max length configurations in agent.config file for service_name and instance_name Optimize spring-cloud-gateway 2.1.x, 3.x witness class. Support report MongoDB instance info in Mongodb 4.x plugin. To compatible upper and lower case Oracle TNS url parse. Support collecting ZGC memory pool metrics. Require OAP 9.7.0 to support these new metrics. Upgrade netty-codec-http2 to 4.1.100.Final Add a netty-http 4.1.x plugin to trace HTTP requests. Fix Impala Jdbc URL (including schema without properties) parsing exception. Optimize byte-buddy type description performance. Add eclipse-temurin:21-jre as another base image. Bump byte-buddy to 1.14.9 for JDK21 support. Add JDK21 plugin tests for Spring 6. Bump Lombok to 1.18.30 to adopt JDK21 compiling. Fix PostgreSQL Jdbc URL parsing exception. Bump up grpc version. Optimize plugin selector logic.  Documentation  Fix JDK requirement in the compiling docs. Add JDK21 support in the compiling docs.  All issues and pull requests are here\n","title":"Release Apache SkyWalking Java Agent 9.1.0","url":"/events/release-apache-skywalking-java-agent-9-1-0/"},{"content":"SkyWalking 9.7.0 is released. Go to downloads page to find release tars.\nDark Mode The dafult style mode is changed to the dark mode, and light mode is still available.\nNew Design Log View A new design for the log view is currently available. Easier to locate the logs, and more space for the raw text.\nProject  Bump Java agent to 9.1-dev in the e2e tests. Bump up netty to 4.1.100. Update Groovy 3 to 4.0.15. Support packaging the project in JDK21. Compiler source and target remain in JDK11.  OAP Server  ElasticSearchClient: Add deleteById API. Fix Custom alarm rules are overwritten by \u0026lsquo;resource/alarm-settings.yml\u0026rsquo; Support Kafka Monitoring. Support Pulsar server and BookKeeper server Monitoring. [Breaking Change] Elasticsearch storage merge all management data indices into one index management, including ui_template,ui_menu,continuous_profiling_policy. Add a release mechanism for alarm windows when it is expired in case of OOM. Fix Zipkin trace receiver response: make the HTTP status code from 200 to 202. Update BanyanDB Java Client to 0.5.0. Fix getInstances query in the BanyanDB Metadata DAO. BanyanDBStorageClient: Add keepAliveProperty API. Fix table exists check in the JDBC Storage Plugin. Enhance extensibility of HTTP Server library. Adjust AlarmRecord alarmMessage column length to 512. Fix EventHookCallback build event: build the layer from Service's Layer. Fix AlarmCore doAlarm: catch exception for each callback to avoid interruption. Optimize queryBasicTraces in TraceQueryEsDAO. Fix WebhookCallback send incorrect messages, add catch exception for each callback HTTP Post. Fix AlarmRule expression validation: add labeled metrics mock data for check. Support collect ZGC memory pool metrics. Add a component ID for Netty-http (ID=151). Add a component ID for Fiber (ID=5021). BanyanDBStorageClient: Add define(Property property, PropertyStore.Strategy strategy) API. Correct the file format and fix typos in the filenames for monitoring Kafka\u0026rsquo;s e2e tests. Support extract timestamp from patterned datetime string in LAL. Support output key parameters in the booting logs. Fix cannot query zipkin traces with annotationQuery parameter in the JDBC related storage. Fix limit doesn\u0026rsquo;t work for findEndpoint API in ES storage. Isolate MAL CounterWindow cache by metric name. Fix JDBC Log query order. Change the DataCarrier IF_POSSIBLE strategy to use ArrayBlockingQueue implementation. Change the policy of the queue(DataCarrier) in the L1 metric aggregate worker to IF_POSSIBLE mode. Add self-observability metric metrics_aggregator_abandon to count the number of abandon metrics. Support Nginx monitoring. Fix BanyanDB Metadata Query: make query single instance/process return full tags to avoid NPE. Repleace go2sky E2E to GO agent. Replace Metrics v2 protocol with MQE in UI templates and E2E Test. Fix incorrect apisix metrics otel rules. Support Scratch The OAP Config Dump. Support increase/rate function in the MQE query language. Group service endpoints into _abandoned when endpoints have high cardinality.  UI  Add new menu for kafka monitoring. Fix independent widget duration. Fix the display height of the link tree structure. Replace the name by shortName on service widget. Refactor: update pagination style. No visualization style change. Apply MQE on K8s layer UI-templates. Fix icons display in trace tree diagram. Fix: update tooltip style to support multiple metrics scrolling view in a metrics graph. Add a new widget to show jvm memory pool detail. Fix: avoid querying data with empty parameters. Add a title and a description for trace segments. Add Netty icon for Netty HTTP plugin. Add Pulsar menu i18n files. Refactor Logs view. Implement the Dark Theme. Change UI templates for Text widgets. Add Nginx menu i18n. Fix the height for trace widget. Polish list style. Fix Log associate with Trace. Enhance layout for broken Topology widget. Fix calls metric with call type for Topology widget. Fix changing metrics config for Topology widget. Fix routes for Tab widget. Remove OpenFunction(FAAS layer) relative UI templates and menu item. Fix: change colors to match dark theme for Network Profiling. Remove the description of OpenFunction in the UI i18n. Reduce component chunks to improve page loading resource time.  Documentation  Separate storage docs to different files, and add an estimated timeline for BanyanDB(end of 2023). Add topology configuration in UI-Grafana doc. Add missing metrics to the OpenTelemetry Metrics doc. Polish docs of Concepts and Designs. Fix incorrect notes of slowCacheReadThreshold. Update OAP setup and cluster coordinator docs to explain new booting parameters table in the logs, and how to setup cluster mode.  All issues and pull requests are here\n","title":"Release Apache SkyWalking APM 9.7.0","url":"/events/release-apache-skywalking-apm-9.7.0/"},{"content":"SkyWalking Summit 2023 @ Shanghai 会议时间:2023年11月4日 全天 地点:上海大华虹桥假日酒店 赞助商:纵目科技,Tetrate\n会议议程 与 PDF SkyWalking V9 In 2023 - 5 featured releases  吴晟 PDF  B站视频地址\n使用 Terraform 与 Ansible 快速部署 SkyWalking 集群  柯振旭 PDF  B站视频地址\n基于SkyWalking构建全域一体化观测平台  陈修能 PDF  B站视频地址\n云原生可观测性数据库BanyanDB  高洪涛 PDF  B站视频地址\n基于 SkyWalking Agent 的性能剖析和实时诊断  陆家靖 PDF  B站视频地址\n太保科技-多云环境下Zabbix的运用实践  田川 PDF  B站视频地址\nKubeSphere 在可观测性领域的探索与实践  霍秉杰 PDF  B站视频地址\n大型跨国企业的微服务治理  张文杰 PDF  B站视频地址\n","title":"SkyWalking Summit 2023 @ Shanghai 会议回顾","url":"/zh/2023-11-04-skywalking-summit-shanghai/"},{"content":"SkyWalking Infra E2E 1.3.0 is released. Go to downloads page to find release tars.\nFeatures  Support sha256enc and sha512enc encoding in verify case. Support hasPrefix and hasSuffix string verifier in verify case. Bump up kind to v0.14.0. Add a field kubeconfig to support running e2e test on an existing kubernetes cluster. Support non-fail-fast execution of test cases support verify cases concurrently Add .exe suffix to windows build artifact Export the kubeconfig path during executing the following steps Automatically pull images before loading into KinD Support outputting the result of \u0026lsquo;verify\u0026rsquo; in YAML format and only outputting the summary of the result of \u0026lsquo;verify\u0026rsquo; Make e2e test itself in github action Support outputting the summary of \u0026lsquo;verify\u0026rsquo; in YAML format Make e2e output summary with numeric information Add \u0026lsquo;subtractor\u0026rsquo; function  Improvements  Bump up GHA to avoid too many warnings Leverage the built-in cache in setup-go@v4 Add batchOutput config to reduce outputs Disable batch mode by default, add it to GHA and enable by default Improve GitHub Actions usability and speed by using composite actions' new feature Migrate deprecated GitHub Actions command to recommended ones Bump up kind to v0.14.0 Optimization of the output information of verification verifier: notEmpty should be able to handle nil Remove invalid configuration in GitHub Actions  Bug Fixes  Fix deprecation warnings Ignore cancel error when copying container logs  Documentation  Add a doc to introduce how to use e2e to test itself  Issues and PR  All issues are here All pull requests are here  ","title":"Release Apache SkyWalking Infra E2E 1.3.0","url":"/events/release-apache-skywalking-infra-e2e-1-3-0/"},{"content":"Aapche SkyWalking PMC 和 committer团队参加了\u0026quot;开源之夏 2023\u0026quot;活动,作为导师,共获得了9个官方赞助名额。最终对学生开放如下任务\n SkyWalking 支持 GraalVM Skywalking Infra E2E 自测试 监控Apache Pulsar 统一BanyanDB的查询计划和查询执行器 使用Helm部署BanyanDB 编写go agent的gRPC插件 监控Kafka 集成SkyWalking PHP到SkyWalking E2E 测试 在线黄金指标异常检测  经过3个月的开发,上游评审,PMC成员评议,PMC Chair复议,OSPP官方委员会评审多个步骤,现公布项目参与人员与最终结果\n通过评审项目(共6个) SkyWalking 支持 GraalVM  学生:张跃骎 学校:辽宁大学 本科 合并PR:11354 后续情况说明:GraalVM因为复杂的生态,替代的代码将被分离到SkyWalking GraalVM Distro, 相关讨论,请参见Issue 11518  Skywalking Infra E2E 自测试  学生:王子忱 学校:华中师范大学 本科 合并PR:115, 116, 117, 118, 119 后续情况说明:此特性已经包含在发行版skywalking-infra-e2e v1.3.0中  统一BanyanDB的查询计划和查询执行器  学生:曾家华 学校:电子科技大学 本科 合并PR:343  使用Helm部署BanyanDB  学生:黄友亮 学校:北京邮电大学 硕士研究生 合并PR:1 情况说明:因为BanyanDB Helm为新项目,学生承接了项目初始化、功能提交、自动化测试,发布准备等多项任务。所参与功能包含在skywalking-banyandb-helm v0.1.0中  编写go agent的gRPC插件  学生:胡宇腾 学校:西安邮电大学 合并PR:88, 94 后续情况说明:该学生在开源之夏相关项目外,完成了feature: add support for iris #99和Go agent APIs功能开发。并发表文章SkyWalking Go Toolkit Trace 详解以及英文译本Detailed explanation of SkyWalking Go Toolkit Trace  监控Kafka  学生:王竹 学校:美国东北大学 ( Northeastern University) 合并PR:11282, UI 318  未通过评审项目(3个) 下列项目因为质量无法达到社区要求,违规等原因,将被标定为失败。 注:在开源之夏中失败的项目,其Pull Reqeust可能因为符合社区功能要求,也被接受合并。\n监控Apache Pulsar  学生:孟祥迎 学校:重庆邮电大学 本科 合并PR:11339 失败原因:项目申请成员,作为ASF Pulsar项目的Committer,在担任Pulsar开源之夏项目导师期间,但依然申请了学生参与项目。属于违规行为。SkyWalking PMC审查了此行为并通报开源之夏组委会。开源之夏组委会依据活动规则取消其结项奖金。  集成SkyWalking PHP到SkyWalking E2E 测试  学生:罗文 学校:San Jose State University B.S. 合并PR:11330 失败原因:根据pull reqeust中的提交记录,SkyWalking PMC Chair审查了提交明细,学生参与代码数量大幅度小于导师的提交代码。并在考虑到这个项目难度以及明显低于SkyWalking 开源之夏项目的平均水平的情况下,通报给开源之夏组委会。经过组委会综合评定,项目不合格。  在线黄金指标异常检测  学生:黄颖 学校:同济大学 研究生 合并PR:无 失败原因:项目在进度延迟后实现较为简单且粗糙,并且没有提供算法评估结果和文档等。在 PR 开启后的为期一个月审核合并期间,学生并未能成功按预定计划改善实现的质量和文档。和导师以及 SkyWalking 社区缺少沟通。  结语 SkyWalking社区每年都有近10位PMC成员或Committer参与开源之夏中,帮助在校学生了解顶级开源项目、开源社区的运作方式。我们希望大家在每年经过3个月的时间,能够真正的帮助在校学生了解开源和参与开源。 因为,社区即使在考虑到学生能力的情况下,不会明显的降低pull request的接受标准。希望今后的学生,能够在早期,积极、主动和导师,社区其他成员保持高频率的沟通,对参与的项目有更深入、准确的了解。\n","title":"开源之夏 2023 SkyWalking 社区项目情况公示","url":"/zh/2023-11-09-ospp-summary/"},{"content":"SkyWalking NodeJS 0.7.0 is released. Go to downloads page to find release tars.\n Add deadline config for trace request (#118)  ","title":"Release Apache SkyWalking for NodeJS 0.7.0","url":"/events/release-apache-skywalking-nodejs-0-7-0/"},{"content":"背景介绍 Nginx access log 中包含了丰富的信息,例如:日志时间、状态码、响应时间、body 大小等。通过收集并分析 access log,我们可以实现对 Nginx 中接口状态的监控。\n在本案例中,将由 fluent-bit 收集 access log,并通过 HTTP 将日志信息发送给 SkyWalking OAP Server 进行进一步的分析。\n环境准备 实验需要的 Nginx 及 Fluent-bit 相关配置文件都被上传到了Github,有需要的读者可以自行 git clone 并通过 docker compose 启动,本文中将介绍配置文件中几个关键点。\nNginx日志格式配置 LAL 目前支持 JSON、YAML 及 REGEX 日志解析,为了方便获取到日志中的指标字段,我们将 Nginx 的日志格式定义为 JSON.\nhttp { ... ... log_format main '{\u0026quot;remote_addr\u0026quot;: \u0026quot;$remote_addr\u0026quot;,' '\u0026quot;remote_user\u0026quot;: \u0026quot;$remote_user\u0026quot;,' '\u0026quot;request\u0026quot;: \u0026quot;$request\u0026quot;,' '\u0026quot;time\u0026quot;: \u0026quot;$time_iso8601\u0026quot;,' '\u0026quot;status\u0026quot;: \u0026quot;$status\u0026quot;,' '\u0026quot;request_time\u0026quot;:\u0026quot;$request_time\u0026quot;,' '\u0026quot;body_bytes_sent\u0026quot;: \u0026quot;$body_bytes_sent\u0026quot;,' '\u0026quot;http_referer\u0026quot;: \u0026quot;$http_referer\u0026quot;,' '\u0026quot;http_user_agent\u0026quot;: \u0026quot;$http_user_agent\u0026quot;,' '\u0026quot;http_x_forwarded_for\u0026quot;: \u0026quot;$http_x_forwarded_for\u0026quot;}'; access_log /var/log/nginx/access.log main; ... ... } Fluent bit Filter 我们通过 Fluent bit 的 lua filter 进行日志格式的改写,将其调整为 SkyWalking 所需要的格式,record的各个字段含义如下:\n body:日志内容体 service:服务名称 serviceInstance:实例名称  function rewrite_body(tag, timestamp, record) local newRecord = {} newRecord[\u0026quot;body\u0026quot;] = { json = { json = record.log } } newRecord[\u0026quot;service\u0026quot;] = \u0026quot;nginx::nginx\u0026quot; newRecord[\u0026quot;serviceInstance\u0026quot;] = \u0026quot;localhost\u0026quot; return 1, timestamp, newRecord end OAP 日志分析 LAL定义 在 filter 中,我们通过条件判断,只处理 service=nginx::nginx 的服务,其他服务依旧走默认逻辑:\n第一步,使用 json 指令对日志进行解析,解析的结果会被存放到 parsed 字段中,通过 parsed 字段我们可以获取 json 日志中的字段信息。\n第二步,使用 timestamp 指令解析 parsed.time 并将其赋值给日志的 timestamp 字段,这里的 time 就是access log json 中的 time。\n第三步,使用 tag 指令给日志打上对应的标签,标签的值依然可以通过 parsed 字段获取。\n第四步,使用 metrics 指令从日志中提取出指标信息,我们共提取了四个指标:\n nginx_log_count:Nginx 每次请求都会生成一条 access log,该指标可以帮助我们统计 Nginx 当前的请求数。 nginx_request_time:access log 中会记录请求时间,该指标可以帮助我们统计上游接口的响应时长。 nginx_body_bytes_sent:body 大小指标可以帮助我们了解网关上的流量情况。 nginx_status_code:状态码指标可以实现对状态码的监控,如果出现异常上涨可以结合 alarm 进行告警。  rules:- name:defaultlayer:GENERALdsl:|filter { if (log.service == \u0026#34;nginx::nginx\u0026#34;) { json { abortOnFailure true }extractor {timestamp parsed.time as String, \u0026#34;yyyy-MM-dd\u0026#39;T\u0026#39;HH:mm:ssXXX\u0026#34;tag status:parsed.statustag remote_addr:parsed.remote_addrmetrics {timestamp log.timestamp as Longlabels service: log.service, instance:log.serviceInstancename \u0026#34;nginx_log_count\u0026#34;value 1}metrics {timestamp log.timestamp as Longlabels service: log.service, instance:log.serviceInstancename \u0026#34;nginx_request_time\u0026#34;value parsed.request_time as Double}metrics {timestamp log.timestamp as Longlabels service: log.service, instance:log.serviceInstancename \u0026#34;nginx_body_bytes_sent\u0026#34;value parsed.body_bytes_sent as Long}metrics {timestamp log.timestamp as Longlabels service: log.service, instance: log.serviceInstance, status:parsed.statusname \u0026#34;nginx_status_code\u0026#34;value 1}}}sink {}}经过 LAL 处理后,我们已经可以在日志面板看到日志信息了,接下来我们将对 LAL 中提取的指标进行进一步分析:\nMAL定义 在 MAL 中,我们可以对上一步 LAL 中提取的指标进行进一步的分析聚合,下面的例子里:\nnginx_log_count、nginx_request_time、nginx_status_code 使用 sum 聚合函数处理,并使用 SUM 方式 downsampling,\nnginx_request_time 使用 avg 聚合函数求平均值,默认使用 AVG 方式 downsampling。\n完成聚合分析后,SkyWalking Meter System 会完成对上述指标的持久化。\nexpSuffix:service([\u0026#39;service\u0026#39;], Layer.GENERAL)metricPrefix:nginxmetricsRules:- name:cpmexp:nginx_log_count.sum([\u0026#39;service\u0026#39;]).downsampling(SUM)- name:avg_request_timeexp:nginx_request_time.avg([\u0026#39;service\u0026#39;])- name:body_bytes_sent_countexp:nginx_body_bytes_sent.sum([\u0026#39;service\u0026#39;]).downsampling(SUM)- name:status_code_countexp:nginx_status_code.sum([\u0026#39;service\u0026#39;,\u0026#39;status\u0026#39;]).downsampling(SUM)最后,我们便可以来到 SkyWalking UI 页面新建 Nginx 仪表板,使用刚刚 MAL 中定义的指标信息创建 Nginx Dashboard(也可以通过上文提到仓库中的 dashboard.json 直接导入测试):\n参考文档  Fluent Bit lua Filter Log Analysis Language Meter Analysis Language  ","title":"使用 LAL 收集并分析 Nginx access log","url":"/zh/2023-10-29-collect-and-analyse-nginx-accesslog-by-lal/"},{"content":"SkyWalking BanyanDB 0.5.0 is released. Go to downloads page to find release tars.\nFeatures  List all properties in a group. Implement Write-ahead Logging Document the clustering. Support multiple roles for banyand server. Support for recovery buffer using wal. Register the node role to the metadata registry. Implement the remote queue to spreading data to data nodes. Fix parse environment variables error Implement the distributed query engine. Add mod revision check to write requests. Add TTL to the property. Implement node selector (e.g. PickFirst Selector, Maglev Selector). Unified the buffers separated in blocks to a single buffer in the shard.  Bugs  BanyanDB ui unable to load icon. BanyanDB ui type error Fix timer not released BanyanDB ui misses fields when creating a group Fix data duplicate writing Syncing metadata change events from etcd instead of a local channel.  Chores  Bump several dependencies and tools. Drop redundant \u0026ldquo;discovery\u0026rdquo; module from banyand. \u0026ldquo;metadata\u0026rdquo; module is enough to play the node and shard discovery role.  ","title":"Release Apache SkyWalking BanyanDB 0.5.0","url":"/events/release-apache-skywalking-banyandb-0-5-0/"},{"content":"SkyWalking Go 0.3.0 is released. Go to downloads page to find release tars.\nFeatures  Support manual tracing APIs for users.  Plugins  Support mux HTTP server framework. Support grpc server and client framework. Support iris framework.  Documentation  Add Tracing APIs document into Manual APIs.  Bug Fixes Issues and PR  All issues are here All and pull requests are here  ","title":"Release Apache SkyWalking Go 0.3.0","url":"/events/release-apache-skwaylking-go-0.3.0/"},{"content":"Background SkyWalking Go is an open-source, non-intrusive Golang agent used for monitoring, tracing, and data collection within distributed systems. It enables users to observe the flow and latency of requests within the system, collect performance data from various system components for performance monitoring, and troubleshoot issues by tracing the complete path of requests.\nIn version v0.3.0, Skywalking Go introduced the toolkit trace tool. Trace APIs allow users to include critical operations, functions, or services in the tracing scope in situations where plugins do not support them. This inclusion enables tracking and monitoring of these operations and can be used for fault analysis, diagnosis, and performance monitoring.\nBefore diving into this, you can learn how to use the Skywalking Go agent by referring to the SkyWalking Go Agent Quick Start Guide.\nThe following sections will explain how to use these interfaces in specific scenarios.\nIntroducing the Trace Toolkit Execute the following command in the project\u0026rsquo;s root directory:\ngo get github.com/apache/skywalking-go/toolkit To use the toolkit trace interface, you need to import the package into your project:\n\u0026#34;github.com/apache/skywalking-go/toolkit/trace\u0026#34; Manual Tracing A Span is the fundamental unit of an operation in Tracing. It represents an operation within a specific timeframe, such as a request, a function call, or a specific action. It records essential information about a particular operation, including start and end times, the operation\u0026rsquo;s name, tags (key-value pairs), and relationships between operations. Multiple Spans can form a hierarchical structure.\nIn situations where Skywalking-go doesn\u0026rsquo;t support a particular framework, users can manually create Spans to obtain tracing information.\n(Here, I have removed the supported frameworks for the sake of the example. These are only examples. You should reference this when using the APIs in private and/or unsupported frameworks)\nFor example, when you need to trace an HTTP response, you can create a span using trace.CreateEntrySpan() within the method handling the request, and end the span using trace.StopSpan() after processing. When sending an HTTP request, use trace.CreateExitSpan() to create a span, and end the span after the request returns.\nHere are two HTTP services named consumer and provider. When a user accesses the consumer service, it receives the user\u0026rsquo;s request internally and then accesses the provider to obtain resources.\n// consumer.go package main import ( \u0026#34;io\u0026#34; \u0026#34;net/http\u0026#34; _ \u0026#34;github.com/apache/skywalking-go\u0026#34; \u0026#34;github.com/apache/skywalking-go/toolkit/trace\u0026#34; ) func getProvider() (*http.Response, error) { // Create an HTTP request \treq, err := http.NewRequest(\u0026#34;GET\u0026#34;, \u0026#34;http://localhost:9998/provider\u0026#34;, http.NoBody) // Create an ExitSpan before sending the HTTP request. \ttrace.CreateExitSpan(\u0026#34;GET:/provider\u0026#34;, \u0026#34;localhost:9999\u0026#34;, func(headerKey, headerValue string) error { // Injector adds specific header information to the request. \treq.Header.Add(headerKey, headerValue) return nil }) // Finish the ExitSpan and ensure it executes when the function returns using defer. \tdefer trace.StopSpan() // Send the request. \tclient := \u0026amp;http.Client{} resp, err := client.Do(req) if err != nil { return nil, err } return resp, nil } func consumerHandler(w http.ResponseWriter, r *http.Request) { // Create an EntrySpan to trace the execution of the consumerHandler method. \ttrace.CreateEntrySpan(r.Method+\u0026#34;/consumer\u0026#34;, func(headerKey string) (string, error) { // Extractor retrieves the header information added to the request. \treturn r.Header.Get(headerKey), nil }) // Finish the EntrySpan. \tdefer trace.StopSpan() // Prepare to send an HTTP request. \tresp, err := getProvider() body, err := io.ReadAll(resp.Body) if err != nil { return } _, _ = w.Write(body) } func main() { http.HandleFunc(\u0026#34;/consumer\u0026#34;, consumerHandler) _ = http.ListenAndServe(\u0026#34;:9999\u0026#34;, nil) } // provider.go package main import ( \u0026#34;net/http\u0026#34; _ \u0026#34;github.com/apache/skywalking-go\u0026#34; \u0026#34;github.com/apache/skywalking-go/toolkit/trace\u0026#34; ) func providerHandler(w http.ResponseWriter, r *http.Request) { //Create an EntrySpan to trace the execution of the providerHandler method. \ttrace.CreateEntrySpan(\u0026#34;GET:/provider\u0026#34;, func(headerKey string) (string, error) { return r.Header.Get(headerKey), nil }) // Finish the EntrySpan. \tdefer trace.StopSpan() _, _ = w.Write([]byte(\u0026#34;success from provider\u0026#34;)) } func main() { http.HandleFunc(\u0026#34;/provider\u0026#34;, providerHandler) _ = http.ListenAndServe(\u0026#34;:9998\u0026#34;, nil) } Then, in the terminal, execute:\ngo build -toolexec=\u0026#34;/path/to/go-agent\u0026#34; -a -o consumer ./consumer.go ./consumer go build -toolexec=\u0026#34;/path/to/go-agent\u0026#34; -a -o provider ./provider.go ./provider curl 127.0.0.1:9999/consumer At this point, the UI will display the span information you created.\nIf you need to trace methods that are executed only locally, you can use trace.CreateLocalSpan(). If you don\u0026rsquo;t need to monitor information or states from the other end, you can change ExitSpan and EntrySpan to LocalSpan.\nThe usage examples provided are for illustration purposes, and users can decide the tracing granularity and where in the program they need tracing.\nPlease note that if a program ends too quickly, it may cause tracing data to be unable to be asynchronously sent to the SkyWalking backend.\nPopulate The Span When there\u0026rsquo;s a necessity to record additional information, including creating/updating tags, appending logs, and setting a new operation name of the current traced Span, these APIs should be considered. These actions are used to enhance trace information, providing a more detailed and precise contextual description, which aids in better understanding the events or operations being traced.\nToolkit trace APIs provide a convenient way to access and manipulate trace data, including:\n Setting Tags: SetTag() Adding Logs: AddLog() Setting Span Names: SetOperationName() Getting various IDs: GetTraceID(), GetSegmentID(), GetSpanID()  For example, if you need to record the HTTP status code in a span, you can use the following interfaces while the span is not yet finished:\ntrace.CreateExitSpan(\u0026#34;GET:/provider\u0026#34;, \u0026#34;localhost:9999\u0026#34;, func(headerKey, headerValue string) error { r.Header.Add(headerKey, headerValue) return nil }) resp, err := http.Get(\u0026#34;http://localhost:9999/provider\u0026#34;) trace.SetTag(\u0026#34;status_code\u0026#34;, fmt.Sprintf(\u0026#34;%d\u0026#34;, resp.StatusCode)) spanID := trace.GetSpanID() trace.StopSpan() It\u0026rsquo;s important to note that when making these method calls, the current thread should have an active span.\nAsync APIs Async APIs work for manipulating spans across Goroutines. These scenarios might include:\n Applications involving concurrency or multiple goroutines where operating on Spans across different execution contexts is necessary. Updating or logging information for a Span during asynchronous operations. Requiring a delayed completion of a Span.  To use it, follow these steps:\n Obtain the return value of CreateSpan, which is SpanRef. Call spanRef.PrepareAsync() to prepare for operations in another goroutine. When the current goroutine\u0026rsquo;s work is done, call trace.StopSpan() to end the span (affecting only in the current goroutine). Pass the spanRef to another goroutine. After the work is done in any goroutine, call spanRef.AsyncFinish().  Here\u0026rsquo;s an example:\nspanRef, err := trace.CreateLocalSpan(\u0026#34;LocalSpan\u0026#34;) if err != nil { return } spanRef.PrepareAsync() go func(){ // some work  spanRef.AsyncFinish() }() // some work trace.StopSpan() Correlation Context Correlation Context is used to pass parameters within a Span, and the parent Span will pass the Correlation Context to all its child Spans. It allows the transmission of information between spans across different applications. The default number of elements in the Correlation Context is 3, and the content\u0026rsquo;s length cannot exceed 128 bytes.\nCorrelation Context is commonly applied in the following scenarios:\n Passing Information Between Spans: It facilitates the transfer of critical information between different Spans, enabling upstream and downstream Spans to understand the correlation and context between each other. Passing Business Parameters: In business scenarios, it involves transmitting specific parameters or information between different Spans, such as authentication tokens, business transaction IDs, and more.  Users can set the Correlation Context using trace.SetCorrelation(key, value) and then retrieve the corresponding value in downstream spans using value := trace.GetCorrelation(key).\nFor example, in the code below, we store the value in the tag of the span, making it easier to observe the result:\npackage main import ( _ \u0026#34;github.com/apache/skywalking-go\u0026#34; \u0026#34;github.com/apache/skywalking-go/toolkit/trace\u0026#34; \u0026#34;net/http\u0026#34; ) func providerHandler(w http.ResponseWriter, r *http.Request) { ctxValue := trace.GetCorrelation(\u0026#34;key\u0026#34;) trace.SetTag(\u0026#34;result\u0026#34;, ctxValue) } func consumerHandler(w http.ResponseWriter, r *http.Request) { trace.SetCorrelation(\u0026#34;key\u0026#34;, \u0026#34;value\u0026#34;) _, err := http.Get(\u0026#34;http://localhost:9999/provider\u0026#34;) if err != nil { return } } func main() { http.HandleFunc(\u0026#34;/provider\u0026#34;, providerHandler) http.HandleFunc(\u0026#34;/consumer\u0026#34;, consumerHandler) _ = http.ListenAndServe(\u0026#34;:9999\u0026#34;, nil) } Then, in the terminal, execute:\nexport SW_AGENT_NAME=server go build -toolexec=\u0026#34;/path/to/go-agent\u0026#34; -a -o server ./server.go ./server curl 127.0.0.1:9999/consumer Finally, in the providerHandler() span, you will find the information from the Correlation Context:\nConclusion This article provides an overview of Skywalking Go\u0026rsquo;s Trace APIs and their practical application. These APIs empower users with the ability to customize tracing functionality according to their specific needs.\nFor detailed information about the interfaces, please refer to the documentation: Tracing APIs.\nWelcome everyone to try out the new version.\n","title":"Detailed explanation of SkyWalking Go Toolkit Trace","url":"/blog/2023-10-18-skywalking-toolkit-trace/"},{"content":"背景介绍 SkyWalking Go是一个开源的非侵入式Golang代理程序,用于监控、追踪和在分布式系统中进行数据收集。它使用户能够观察系统内请求的流程和延迟,从各个系统组件收集性能数据以进行性能监控,并通过追踪请求的完整路径来解决问题。\n在版本v0.3.0中,Skywalking Go引入了 toolkit-trace 工具。Trace APIs 允许用户在插件不支持的情况下将关键操作、函数或服务添加到追踪范围。从而实现追踪和监控这些操作,并可用于故障分析、诊断和性能监控。\n在深入了解之前,您可以参考SkyWalking Go Agent快速开始指南来学习如何使用SkyWalking Go Agent。\n下面将会介绍如何在特定场景中使用这些接口。\n导入 Trace Toolkit 在项目的根目录中执行以下命令:\ngo get github.com/apache/skywalking-go/toolkit 使用 toolkit trace 接口前,需要将该包导入到您的项目中:\n\u0026#34;github.com/apache/skywalking-go/toolkit/trace\u0026#34; 手动追踪 Span 是 Tracing 中单个操作的基本单元。它代表在特定时间范围内的操作,比如一个请求、一个函数调用或特定动作。Span记录了特定操作的关键信息,包括开始和结束时间、操作名称、标签(键-值对)以及操作之间的关系。多个 Span 可以形成层次结构。\n在遇到 Skywalking Go 不支持的框架的情况下,用户可以手动创建 Span 以获取追踪信息。\n(为了作为示例,我删除了已支持的框架。以下仅为示例。请在使用私有或不支持的框架的 API 时参考)\n例如,当需要追踪HTTP响应时,可以在处理请求的方法内部使用 trace.CreateEntrySpan() 来创建一个 span,在处理完成后使用 trace.StopSpan() 来结束这个 span。在发送HTTP请求时,使用 trace.CreateExitSpan() 来创建一个 span,在请求返回后结束这个 span。\n这里有两个名为 consumer 和 provider 的HTTP服务。当用户访问 consumer 服务时,它在内部接收用户的请求,然后访问 provider 以获取资源。\n// consumer.go package main import ( \u0026#34;io\u0026#34; \u0026#34;net/http\u0026#34; _ \u0026#34;github.com/apache/skywalking-go\u0026#34; \u0026#34;github.com/apache/skywalking-go/toolkit/trace\u0026#34; ) func getProvider() (*http.Response, error) { // 新建 HTTP 请求 \treq, err := http.NewRequest(\u0026#34;GET\u0026#34;, \u0026#34;http://localhost:9998/provider\u0026#34;, http.NoBody) // 在发送 HTTP 请求之前创建 ExitSpan \ttrace.CreateExitSpan(\u0026#34;GET:/provider\u0026#34;, \u0026#34;localhost:9999\u0026#34;, func(headerKey, headerValue string) error { // Injector 向请求中添加特定的 header 信息 \treq.Header.Add(headerKey, headerValue) return nil }) // 结束 ExitSpan,使用 defer 确保在函数返回时执行 \tdefer trace.StopSpan() // 发送请求 \tclient := \u0026amp;http.Client{} resp, err := client.Do(req) if err != nil { return nil, err } return resp, nil } func consumerHandler(w http.ResponseWriter, r *http.Request) { // 创建 EntrySpan 来追踪 consumerHandler 方法的执行 \ttrace.CreateEntrySpan(r.Method+\u0026#34;/consumer\u0026#34;, func(headerKey string) (string, error) { // Extractor 获取请求中添加的 header 信息 \treturn r.Header.Get(headerKey), nil }) // 结束 EntrySpan \tdefer trace.StopSpan() // 准备发送 HTTP 请求 \tresp, err := getProvider() body, err := io.ReadAll(resp.Body) if err != nil { return } _, _ = w.Write(body) } func main() { http.HandleFunc(\u0026#34;/consumer\u0026#34;, consumerHandler) _ = http.ListenAndServe(\u0026#34;:9999\u0026#34;, nil) } // provider.go package main import ( \u0026#34;net/http\u0026#34; _ \u0026#34;github.com/apache/skywalking-go\u0026#34; \u0026#34;github.com/apache/skywalking-go/toolkit/trace\u0026#34; ) func providerHandler(w http.ResponseWriter, r *http.Request) { // 创建 EntrySpan 来追踪 providerHandler 方法的执行 \ttrace.CreateEntrySpan(\u0026#34;GET:/provider\u0026#34;, func(headerKey string) (string, error) { return r.Header.Get(headerKey), nil }) // 结束 EntrySpan \tdefer trace.StopSpan() _, _ = w.Write([]byte(\u0026#34;success from provider\u0026#34;)) } func main() { http.HandleFunc(\u0026#34;/provider\u0026#34;, providerHandler) _ = http.ListenAndServe(\u0026#34;:9998\u0026#34;, nil) } 然后中终端中执行:\ngo build -toolexec=\u0026#34;/path/to/go-agent\u0026#34; -a -o consumer ./consumer.go ./consumer go build -toolexec=\u0026#34;/path/to/go-agent\u0026#34; -a -o provider ./provider.go ./provider curl 127.0.0.1:9999/consumer 此时 UI 中将会显示你所创建的span信息\n如果需要追踪仅在本地执行的方法,可以使用 trace.CreateLocalSpan()。如果不需要监控来自另一端的信息或状态,可以将 ExitSpan 和 EntrySpan 更改为 LocalSpan。\n以上方法仅作为示例,用户可以决定追踪的粒度以及程序中需要进行追踪的位置。\n注意,如果程序结束得太快,可能会导致 Tracing 数据无法异步发送到 SkyWalking 后端。\n填充 Span 当需要记录额外信息时,包括创建/更新标签、追加日志和设置当前被追踪 Span 的新操作名称时,可以使用这些API。这些操作用于增强追踪信息,提供更详细的上下文描述,有助于更好地理解被追踪的事件或操作。\nToolkit trace APIs 提供了一种简便的方式来访问和操作 Trace 数据:\n 设置标签:SetTag() 添加日志:AddLog() 设置 Span 名称:SetOperationName() 获取各种ID:GetTraceID(), GetSegmentID(), GetSpanID()  例如,如果需要在一个 Span 中记录HTTP状态码,就可以在 Span 未结束时调用以下接口:\ntrace.CreateExitSpan(\u0026#34;GET:/provider\u0026#34;, \u0026#34;localhost:9999\u0026#34;, func(headerKey, headerValue string) error { r.Header.Add(headerKey, headerValue) return nil }) resp, err := http.Get(\u0026#34;http://localhost:9999/provider\u0026#34;) trace.SetTag(\u0026#34;status_code\u0026#34;, fmt.Sprintf(\u0026#34;%d\u0026#34;, resp.StatusCode)) spanID := trace.GetSpanID() trace.StopSpan() 在调用这些方法时,当前线程需要有正在活跃的 span。\n异步 APIs 异步API 用于跨 goroutines 操作 spans。包括以下情况:\n 包含多个 goroutines 的程序,需要在不同上下文中中操作 Span。 在异步操作时更新或记录 Span 的信息。 延迟结束 Span。  按照以下步骤使用:\n 获取 CreateSpan 的返回值 SpanRef。 调用 spanRef.PrepareAsync() ,准备在另一个 goroutine 中执行操作。 当前 goroutine 工作结束后,调用 trace.StopSpan() 结束该 span(仅影响当前 goroutine)。 将 spanRef 传递给另一个 goroutine。 完成工作后在任意 goroutine 中调用 spanRef.AsyncFinish()。  以下为示例:\nspanRef, err := trace.CreateLocalSpan(\u0026#34;LocalSpan\u0026#34;) if err != nil { return } spanRef.PrepareAsync() go func(){ // some work \tspanRef.AsyncFinish() }() // some work trace.StopSpan() Correlation Context Correlation Context 用于在 Span 间传递参数,父 Span 会把 Correlation Context 递给其所有子 Spans。它允许在不同应用程序的 spans 之间传输信息。Correlation Context 的默认元素个数为3,其内容长度不能超过128字节。\nCorrelation Context 通常用于以下等情况:\n 在 Spans 之间传递信息:它允许关键信息在不同 Span 之间传输,使上游和下游 Spans 能够获取彼此之间的关联和上下文。 传递业务参数:在业务场景中,涉及在不同 Span 之间传输特定参数或信息,如认证令牌、交易ID等。  用户可以使用 trace.SetCorrelation(key, value) 设置 Correlation Context ,并可以使用 value := trace.GetCorrelation(key) 在下游 spans 中获取相应的值。\n例如在下面的代码中,我们将值存储在 span 的标签中,以便观察结果:\npackage main import ( _ \u0026#34;github.com/apache/skywalking-go\u0026#34; \u0026#34;github.com/apache/skywalking-go/toolkit/trace\u0026#34; \u0026#34;net/http\u0026#34; ) func providerHandler(w http.ResponseWriter, r *http.Request) { ctxValue := trace.GetCorrelation(\u0026#34;key\u0026#34;) trace.SetTag(\u0026#34;result\u0026#34;, ctxValue) } func consumerHandler(w http.ResponseWriter, r *http.Request) { trace.SetCorrelation(\u0026#34;key\u0026#34;, \u0026#34;value\u0026#34;) _, err := http.Get(\u0026#34;http://localhost:9999/provider\u0026#34;) if err != nil { return } } func main() { http.HandleFunc(\u0026#34;/provider\u0026#34;, providerHandler) http.HandleFunc(\u0026#34;/consumer\u0026#34;, consumerHandler) _ = http.ListenAndServe(\u0026#34;:9999\u0026#34;, nil) } 然后在终端执行:\nexport SW_AGENT_NAME=server go build -toolexec=\u0026#34;/path/to/go-agent\u0026#34; -a -o server ./server.go ./server curl 127.0.0.1:9999/consumer 最后在 providerHandler() 的 Span 中找到了 Correlation Context 的信息:\n总结 本文讲述了Skywalking Go的 Trace APIs 及其应用。它为用户提供了自定义追踪的功能。\n更多关于该接口的介绍见文档:Tracing APIs。\n欢迎大家来使用新版本。\n","title":"SkyWalking Go Toolkit Trace 详解","url":"/zh/2023-10-18-skywalking-toolkit-trace/"},{"content":"CommunityOverCode (原 ApacheCon) 是 Apache 软件基金会(ASF)的官方全球系列大会。自 1998 年以来\u0026ndash;在 ASF 成立之前 \u0026ndash; ApacheCon 已经吸引了各个层次的参与者,在 300 多个 Apache 项目及其不同的社区中探索 \u0026ldquo;明天的技术\u0026rdquo;。CommunityOverCode 通过动手实作、主题演讲、实际案例研究、培训、黑客松活动等方式,展示 Apache 项目的最新发展和新兴创新。\nCommunityOverCode 展示了无处不在的 Apache 项目的最新突破和 Apache 孵化器中即将到来的创新,以及开源开发和以 Apache 之道领导社区驱动的项目。与会者可以了解到独立于商业利益、企业偏见或推销话术之外的核心开源技术。\nSkyWalking的Golang自动探针实践 刘晗 分布式追踪技术在可观测领域尤为重要,促使各个语言的追踪探针的易用性获得了更多的关注。目前在golang语言探针方面大多为手动埋点探针,接入流程过于复杂,而且局限性很强。本次讨论的重点着重于简化golang语言探针的接入方式,创新性的使用了自动埋点技术,并且突破了很多框架中对于上下文信息的依赖限制。\nB站视频地址\nBanyanDB一个高扩展性的分布式追踪数据库 高洪涛 追踪数据是一种用于分析微服务系统性能和故障的重要数据源,它记录了系统中每个请求的调用链路和相关指标。随着微服务系统的规模和复杂度的增长,追踪数据的量级也呈指数级增长,给追踪数据的存储和查询带来了巨大的挑战。传统的关系型数据库或者时序数据库往往难以满足追踪数据的高效存储和灵活查询的需求。 BanyanDB是一个专为追踪数据而设计的分布式数据库,它具有高扩展性、高性能、高可用性和高灵活性的特点。BanyanDB采用了基于时间序列的分片策略,将追踪数据按照时间范围划分为多个分片,每个分片可以独立地进行存储、复制和负载均衡。BanyanDB还支持多维索引,可以根据不同的维度对追踪数据进行快速过滤和聚合。 在本次演讲中,我们将介绍BanyanDB的设计思想、架构和实现细节,以及它在实际场景中的应用和效果。我们也将展示BanyanDB与其他数据库的对比和优势,以及它未来的发展方向和计划。\nB站视频地址\n","title":"CommunityOverCode Conference 2023 Asia","url":"/zh/2023-08-20-coc-asia-2023/"},{"content":"SkyWalking PHP 0.7.0 is released. Go to downloads page to find release tars.\nWhat\u0026rsquo;s Changed  Start 0.7.0 development. by @jmjoy in https://github.com/apache/skywalking-php/pull/90 Add more info for error log. by @jmjoy in https://github.com/apache/skywalking-php/pull/91 Fix amqplib and predis argument problems. by @jmjoy in https://github.com/apache/skywalking-php/pull/92 Add Memcache plugin. by @jmjoy in https://github.com/apache/skywalking-php/pull/93 Refactor mysqli plugin, support procedural api. by @jmjoy in https://github.com/apache/skywalking-php/pull/94 Fix target address in cross process header. by @jmjoy in https://github.com/apache/skywalking-php/pull/95 Release SkyWalking PHP 0.7.0 by @jmjoy in https://github.com/apache/skywalking-php/pull/96  Full Changelog: https://github.com/apache/skywalking-php/compare/v0.7.0...v0.7.0\nPECL https://pecl.php.net/package/skywalking_agent/0.7.0\n","title":"Release Apache SkyWalking PHP 0.7.0","url":"/events/release-apache-skywalking-php-0-7-0/"},{"content":"SkyWalking BanyanDB Helm 0.1.0 is released. Go to downloads page to find release tars.\nFeatures  Deploy banyandb with standalone mode by Chart  ","title":"Release Apache SkyWalking BanyanDB Helm 0.1.0","url":"/events/release-apache-skywalking-banyandb-helm-0-1-0/"},{"content":"背景介绍 Arthas 是一款常用的 Java 诊断工具,我们可以在 SkyWalking 监控到服务异常后,通过 Arthas 进一步分析和诊断以快速定位问题。\n在 Arthas 实际使用中,通常由开发人员拷贝或者下载安装包到服务对应的VM或者容器中,attach 到对应的 Java 进程进行问题排查。这一过程不可避免的会造成服务器敏感运维信息的扩散, 而且在分秒必争的问题排查过程中,这些繁琐的操作无疑会浪费大量时间。\nSkyWalking Java Agent 伴随 Java 服务一起启动,并定期上报服务、实例信息给OAP Server。我们可以借助 SkyWalking Java Agent 的插件化能力,开发一个 Arthas 控制插件, 由该插件管理 Arthas 运行生命周期,通过页面化的方式,完成Arthas的启动与停止。最终实现效果可以参考下图:\n要完成上述功能,我们需要实现以下几个关键点:\n 开发 agent arthas-control-plugin,执行 arthas 的启动与停止命令 开发 oap arthas-controller-module ,下发控制命令给 arthas agent plugin 定制 skywalking-ui, 连接 arthas-tunnel-server,发送 arthas 命令并获取执行结果  以上各个模块之间的交互流程如下图所示:\nconnect disconnect 本文涉及的所有代码均已发布在 github skywalking-x-arthas 上,如有需要,大家可以自行下载代码测试。 文章后半部分将主要介绍代码逻辑及其中包含的SkyWalking扩展点。\nagent arthas-control-plugin 首先在 skywalking-java/apm-sniffer/apm-sdk-plugin 下创建一个 arthas-control-plugin, 该模块在打包后会成为 skywalking-agent/plugins 下的一个插件, 其目录结构如下:\narthas-control-plugin/ ├── pom.xml └── src └── main ├── java │ └── org │ └── apache │ └── skywalking │ └── apm │ └── plugin │ └── arthas │ ├── config │ │ └── ArthasConfig.java # 模块配置 │ ├── service │ │ └── CommandListener.java # boot service,监听 oap command │ └── util │ ├── ArthasCtl.java # 控制 arthas 的启动与停止 │ └── ProcessUtils.java ├── proto │ └── ArthasCommandService.proto # 与oap server通信的 grpc 协议定义 └── resources └── META-INF └── services # boot service spi service └── org.apache.skywalking.apm.agent.core.boot.BootService 16 directories, 7 files 在 ArthasConfig.java 中,我们定义了以下配置,这些参数将在 arthas 启动时传递。\n以下的配置可以通过 agent.config 文件、system prop、env variable指定。 关于 skywalking-agent 配置的初始化的具体流程,大家可以参考 SnifferConfigInitializer 。\npublic class ArthasConfig { public static class Plugin { @PluginConfig(root = ArthasConfig.class) public static class Arthas { // arthas 目录  public static String ARTHAS_HOME; // arthas 启动时连接的tunnel server  public static String TUNNEL_SERVER; // arthas 会话超时时间  public static Long SESSION_TIMEOUT; // 禁用的 arthas command  public static String DISABLED_COMMANDS; } } } 接着,我们看下 CommandListener.java 的实现,CommandListener 实现了 BootService 接口, 并通过 resources/META-INF/services 下的文件暴露给 ServiceLoader。\nBootService 的定义如下,共有prepare()、boot()、onComplete()、shutdown()几个方法,这几个方法分别对应插件生命周期的不同阶段。\npublic interface BootService { void prepare() throws Throwable; void boot() throws Throwable; void onComplete() throws Throwable; void shutdown() throws Throwable; default int priority() { return 0; } } 在 ServiceManager 类的 boot() 方法中, 定义了BootService 的 load 与启动流程,该方法 由SkyWalkingAgent 的 premain 调用,在主程序运行前完成初始化与启动:\npublic enum ServiceManager { INSTANCE; ... ... public void boot() { bootedServices = loadAllServices(); prepare(); startup(); onComplete(); } ... ... } 回到我们 CommandListener 的 boot 方法,该方法在 agent 启动之初定义了一个定时任务,这个定时任务会轮询 oap ,查询是否需要启动或者停止arthas:\npublic class CommandListener implements BootService, GRPCChannelListener { ... ... @Override public void boot() throws Throwable { getCommandFuture = Executors.newSingleThreadScheduledExecutor( new DefaultNamedThreadFactory(\u0026#34;CommandListener\u0026#34;) ).scheduleWithFixedDelay( new RunnableWithExceptionProtection( this::getCommand, t -\u0026gt; LOGGER.error(\u0026#34;get arthas command error.\u0026#34;, t) ), 0, 2, TimeUnit.SECONDS ); } ... ... } getCommand方法中定义了start、stop的处理逻辑,分别对应页面上的 connect 和 disconnect 操作。 这两个 command 有分别转给 ArthasCtl 的 startArthas 和 stopArthas 两个方法处理,用来控制 arthas 的启停。\n在 startArthas 方法中,启动arthas-core.jar 并使用 skywalking-agent 的 serviceName 和 instanceName 注册连接至配置文件中指定的arthas-tunnel-server。\nArthasCtl 逻辑参考自 Arthas 的 BootStrap.java ,由于不是本篇文章的重点,这里不再赘述,感兴趣的小伙伴可以自行查看。\nswitch (commandResponse.getCommand()) { case START: if (alreadyAttached()) { LOGGER.warn(\u0026#34;arthas already attached, no need start again\u0026#34;); return; } try { arthasTelnetPort = SocketUtils.findAvailableTcpPort(); ArthasCtl.startArthas(PidUtils.currentLongPid(), arthasTelnetPort); } catch (Exception e) { LOGGER.info(\u0026#34;error when start arthas\u0026#34;, e); } break; case STOP: if (!alreadyAttached()) { LOGGER.warn(\u0026#34;no arthas attached, no need to stop\u0026#34;); return; } try { ArthasCtl.stopArthas(arthasTelnetPort); arthasTelnetPort = null; } catch (Exception e) { LOGGER.info(\u0026#34;error when stop arthas\u0026#34;, e); } break; } 看完 arthas 的启动与停止控制逻辑,我们回到 CommandListener 的 statusChanged 方法, 由于要和 oap 通信,这里我们按照惯例监听 grpc channel 的状态,只有状态正常时才会执行上面的getCommand轮询。\npublic class CommandListener implements BootService, GRPCChannelListener { ... ... @Override public void statusChanged(final GRPCChannelStatus status) { if (GRPCChannelStatus.CONNECTED.equals(status)) { Object channel = ServiceManager.INSTANCE.findService(GRPCChannelManager.class).getChannel(); // DO NOT REMOVE Channel CAST, or it will throw `incompatible types: org.apache.skywalking.apm.dependencies.io.grpc.Channel  // cannot be converted to io.grpc.Channel` exception when compile due to agent core\u0026#39;s shade of grpc dependencies.  commandServiceBlockingStub = ArthasCommandServiceGrpc.newBlockingStub((Channel) channel); } else { commandServiceBlockingStub = null; } this.status = status; } ... ... } 上面的代码,细心的小伙伴可能会发现,getChannel() 的返回值被向上转型成了 Object, 而在下面的 newBlockingStub 方法中,又强制转成了 Channel。\n看似有点多此一举,其实不然,我们将这里的转型去掉,尝试编译就会收到下面的错误:\n[ERROR] Failed to execute goal org.apache.maven.plugins:maven-compiler-plugin:3.10.1:compile (default-compile) on project arthas-control-plugin: Compilation failure [ERROR] .../CommandListener.java:[59,103] 不兼容的类型: org.apache.skywalking.apm.dependencies.io.grpc.Channel无法转换为io.grpc.Channel 上面的错误提示 ServiceManager.INSTANCE.findService(GRPCChannelManager.class).getChannel() 的返回值类型是 org.apache.skywalking.apm.dependencies.io.grpc.Channel,无法被赋值给 io.grpc.Channel 引用。\n我们查看GRPCChannelManager的getChannel()方法代码会发现,方法定义的返回值明明是 io.grpc.Channel,为什么编译时会报上面的错误?\n其实这是skywalking-agent的一个小魔法,由于 agent-core 最终会被打包进 skywalking-agent.jar,启动时由系统类装载器(或者其他父级类装载器)直接装载, 为了防止所依赖的类库和被监控服务的类发生版本冲突,agent 核心代码在打包时使用了maven-shade-plugin, 该插件会在 maven package 阶段改变 grpc 依赖的包名, 我们在源代码里看到的是 io.grpc.Channel,其实在真正运行时已经被改成了 org.apache.skywalking.apm.dependencies.io.grpc.Channel,这便可解释上面编译报错的原因。\n除了grpc以外,其他一些 well-known 的 dependency 也会进行 shade 操作,详情大家可以参考 apm-agent-core pom.xml :\n\u0026lt;plugin\u0026gt; \u0026lt;artifactId\u0026gt;maven-shade-plugin\u0026lt;/artifactId\u0026gt; \u0026lt;executions\u0026gt; \u0026lt;execution\u0026gt; \u0026lt;phase\u0026gt;package\u0026lt;/phase\u0026gt; \u0026lt;goals\u0026gt; \u0026lt;goal\u0026gt;shade\u0026lt;/goal\u0026gt; \u0026lt;/goals\u0026gt; \u0026lt;configuration\u0026gt; ... ... \u0026lt;relocations\u0026gt; \u0026lt;relocation\u0026gt; \u0026lt;pattern\u0026gt;${shade.com.google.source}\u0026lt;/pattern\u0026gt; \u0026lt;shadedPattern\u0026gt;${shade.com.google.target}\u0026lt;/shadedPattern\u0026gt; \u0026lt;/relocation\u0026gt; \u0026lt;relocation\u0026gt; \u0026lt;pattern\u0026gt;${shade.io.grpc.source}\u0026lt;/pattern\u0026gt; \u0026lt;shadedPattern\u0026gt;${shade.io.grpc.target}\u0026lt;/shadedPattern\u0026gt; \u0026lt;/relocation\u0026gt; \u0026lt;relocation\u0026gt; \u0026lt;pattern\u0026gt;${shade.io.netty.source}\u0026lt;/pattern\u0026gt; \u0026lt;shadedPattern\u0026gt;${shade.io.netty.target}\u0026lt;/shadedPattern\u0026gt; \u0026lt;/relocation\u0026gt; \u0026lt;relocation\u0026gt; \u0026lt;pattern\u0026gt;${shade.io.opencensus.source}\u0026lt;/pattern\u0026gt; \u0026lt;shadedPattern\u0026gt;${shade.io.opencensus.target}\u0026lt;/shadedPattern\u0026gt; \u0026lt;/relocation\u0026gt; \u0026lt;relocation\u0026gt; \u0026lt;pattern\u0026gt;${shade.io.perfmark.source}\u0026lt;/pattern\u0026gt; \u0026lt;shadedPattern\u0026gt;${shade.io.perfmark.target}\u0026lt;/shadedPattern\u0026gt; \u0026lt;/relocation\u0026gt; \u0026lt;relocation\u0026gt; \u0026lt;pattern\u0026gt;${shade.org.slf4j.source}\u0026lt;/pattern\u0026gt; \u0026lt;shadedPattern\u0026gt;${shade.org.slf4j.target}\u0026lt;/shadedPattern\u0026gt; \u0026lt;/relocation\u0026gt; \u0026lt;/relocations\u0026gt; ... ... \u0026lt;/configuration\u0026gt; \u0026lt;/execution\u0026gt; \u0026lt;/executions\u0026gt; \u0026lt;/plugin\u0026gt; 除了上面的注意点以外,我们来看一下另一个场景,假设我们需要在 agent plugin 的 interceptor 中使用 plugin 中定义的 BootService 会发生什么?\n我们回到 BootService 的加载逻辑,为了加载到 plugin 中定义的BootService,ServiceLoader 指定了类装载器为AgentClassLoader.getDefault(), (这行代码历史非常悠久,可以追溯到2018年:Allow use SkyWalking plugin to override service in Agent core. #1111 ), 由此可见,plugin 中定义的 BootService 的 classloader 是 AgentClassLoader.getDefault():\nvoid load(List\u0026lt;BootService\u0026gt; allServices) { for (final BootService bootService : ServiceLoader.load(BootService.class, AgentClassLoader.getDefault())) { allServices.add(bootService); } } 再来看下 interceptor 的加载逻辑,InterceptorInstanceLoader.java 的 load 方法规定了如果父加载器相同,plugin 中的 interceptor 将使用一个新创建的 AgentClassLoader (在绝大部分简单场景中,plugin 的 interceptor 都由同一个 AgentClassLoader 加载):\npublic static \u0026lt;T\u0026gt; T load(String className, ClassLoader targetClassLoader) throws IllegalAccessException, InstantiationException, ClassNotFoundException, AgentPackageNotFoundException { ... ... pluginLoader = EXTEND_PLUGIN_CLASSLOADERS.get(targetClassLoader); if (pluginLoader == null) { pluginLoader = new AgentClassLoader(targetClassLoader); EXTEND_PLUGIN_CLASSLOADERS.put(targetClassLoader, pluginLoader); } ... ... } 按照类装载器的委派机制,interceptor 中如果用到了 BootService,也会由当前的类的装载器去装载。 所以 ServiceManager 中装载的 BootService 和 interceptor 装载的 BootService 并不是同一个 (一个 class 文件被不同的 classloader 装载了两次),如果在 interceptor 中 调用 BootService 方法,同样会发生 cast 异常。 由此可见,目前的实现并不支持我们在interceptor中直接调用 plugin 中 BootService 的方法,如果需要调用,只能将 BootService 放到 agent-core 中,由更高级别的类装载器优先装载。\n这其实并不是 skywalking-agent 的问题,skywalking agent plugin 专注于自己的应用场景,只需要关注 trace、meter 以及默认 BootService 的覆盖就可以了。 只是我们如果有扩展 skywalking-agent 的需求,要对其类装载机制做到心中有数,否则可能会出现一些意想不到的问题。\noap arthas-controller-module 看完 agent-plugin 的实现,我们再来看看 oap 部分的修改,oap 同样是模块化的设计,我们可以很轻松的增加一个新的模块,在 /oap-server/ 目录下新建 arthas-controller 子模块:\narthas-controller/ ├── pom.xml └── src └── main ├── java │ └── org │ └── apache │ └── skywalking │ └── oap │ └── arthas │ ├── ArthasControllerModule.java # 模块定义 │ ├── ArthasControllerProvider.java # 模块逻辑实现者 │ ├── CommandQueue.java │ └── handler │ ├── CommandGrpcHandler.java # grpc handler,供 plugin 通信使用 │ └── CommandRestHandler.java # http handler,供 skywalking-ui 通信使用 ├── proto │ └── ArthasCommandService.proto └── resources └── META-INF └── services # 模块及模块实现的 spi service ├── org.apache.skywalking.oap.server.library.module.ModuleDefine └── org.apache.skywalking.oap.server.library.module.ModuleProvider 模块的定义非常简单,只包含一个模块名,由于我们新增的模块并不需要暴露service给其他模块调用,services 我们返回一个空数组\npublic class ArthasControllerModule extends ModuleDefine { public static final String NAME = \u0026#34;arthas-controller\u0026#34;; public ArthasControllerModule() { super(NAME); } @Override public Class\u0026lt;?\u0026gt;[] services() { return new Class[0]; } } 接着是模块实现者,实现者取名为 default,module 指定该 provider 所属模块,由于没有模块的自定义配置,newConfigCreator 我们返回null即可。 start 方法分别向 CoreModule 的 grpc 服务和 http 服务注册了两个 handler,grpc 服务和 http 服务就是我们熟知的 11800 和 12800 端口:\npublic class ArthasControllerProvider extends ModuleProvider { @Override public String name() { return \u0026#34;default\u0026#34;; } @Override public Class\u0026lt;? extends ModuleDefine\u0026gt; module() { return ArthasControllerModule.class; } @Override public ConfigCreator\u0026lt;?\u0026gt; newConfigCreator() { return null; } @Override public void prepare() throws ServiceNotProvidedException { } @Override public void start() throws ServiceNotProvidedException, ModuleStartException { // grpc service for agent  GRPCHandlerRegister grpcService = getManager().find(CoreModule.NAME) .provider() .getService(GRPCHandlerRegister.class); grpcService.addHandler( new CommandGrpcHandler() ); // rest service for ui  HTTPHandlerRegister restService = getManager().find(CoreModule.NAME) .provider() .getService(HTTPHandlerRegister.class); restService.addHandler( new CommandRestHandler(), Collections.singletonList(HttpMethod.POST) ); } @Override public void notifyAfterCompleted() throws ServiceNotProvidedException { } @Override public String[] requiredModules() { return new String[0]; } } 最后在配置文件中注册本模块及模块实现者,下面的配置表示 arthas-controller 这个 module 由 default provider 提供实现:\narthas-controller:selector:defaultdefault:CommandGrpcHandler 和 CommandHttpHandler 的逻辑非常简单,CommandHttpHandler 定义了 connect 和 disconnect 接口, 收到请求后会放到一个 Queue 中供 CommandGrpcHandler 消费,Queue 的实现如下,这里不再赘述:\npublic class CommandQueue { private static final Map\u0026lt;String, Command\u0026gt; COMMANDS = new ConcurrentHashMap\u0026lt;\u0026gt;(); // produce by connect、disconnect public static void produceCommand(String serviceName, String instanceName, Command command) { COMMANDS.put(serviceName + instanceName, command); } // consume by agent getCommand task public static Optional\u0026lt;Command\u0026gt; consumeCommand(String serviceName, String instanceName) { return Optional.ofNullable(COMMANDS.remove(serviceName + instanceName)); } } skywalking-ui arthas console 完成了 agent 和 oap 的开发,我们再看下 ui 部分:\n connect:调用oap server connect 接口,并连接 arthas-tunnel-server disconnect:调用oap server disconnect 接口,并与 arthas-tunnel-server 断开连接 arthas 命令交互,这部分代码主要参考 arthas,大家可以查看 web-ui console 的实现  修改完skywalking-ui的代码后,我们可以直接通过 npm run dev 测试了。\n如果需要通过主项目打包,别忘了在apm-webapp 的 ApplicationStartUp.java 类中添加一条 arthas 的路由:\nServer .builder() .port(port, SessionProtocol.HTTP) .service(\u0026#34;/arthas\u0026#34;, oap) .service(\u0026#34;/graphql\u0026#34;, oap) .service(\u0026#34;/internal/l7check\u0026#34;, HealthCheckService.of()) .service(\u0026#34;/zipkin/config.json\u0026#34;, zipkin) .serviceUnder(\u0026#34;/zipkin/api\u0026#34;, zipkin) .serviceUnder(\u0026#34;/zipkin\u0026#34;, FileService.of( ApplicationStartUp.class.getClassLoader(), \u0026#34;/zipkin-lens\u0026#34;) .orElse(zipkinIndexPage)) .serviceUnder(\u0026#34;/\u0026#34;, FileService.of( ApplicationStartUp.class.getClassLoader(), \u0026#34;/public\u0026#34;) .orElse(indexPage)) .build() .start() .join(); 总结  BootService 启动及停止流程 如何利用 BootService 实现自定义逻辑 Agent Plugin 的类装载机制 maven-shade-plugin 的使用与注意点 如何利用 ModuleDefine 与 ModuleProvider 定义新的模块 如何向 GRPC、HTTP Service 添加新的 handler  如果你还有任何的疑问,欢迎大家与我交流 。\n","title":"将 Apache SkyWalking 与 Arthas 集成","url":"/zh/2023-09-17-integrating-skywalking-with-arthas/"},{"content":"SkyWalking Eyes 0.5.0 is released. Go to downloads page to find release tars.\n feat(header templates): add support for AGPL-3.0 by @elijaholmos in https://github.com/apache/skywalking-eyes/pull/125 Upgrade go version to 1.18 by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/126 Add MulanPSL-2.0 support. by @jmjoy in https://github.com/apache/skywalking-eyes/pull/127 New Header Template: GPL-3.0-or-later by @ddlees in https://github.com/apache/skywalking-eyes/pull/128 Update README.md by @rovast in https://github.com/apache/skywalking-eyes/pull/129 Add more .env.[mode] support for VueJS project by @rovast in https://github.com/apache/skywalking-eyes/pull/130 Docker Multiple Architecture Support :fixes#9089 by @mohammedtabish0 in https://github.com/apache/skywalking-eyes/pull/132 Polish maven test for convenient debug by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/134 feat: list files by git when possible by @tisonkun in https://github.com/apache/skywalking-eyes/pull/133 Switch to npm ci for reliable builds by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/135 Fix optional dependencies are not excluded by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/136 Fix exclude not work for transitive dependencies and add recursive config by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/137 Add some tests for maven resovler by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/138 feat(header-fix): add Svelte support by @elijaholmos in https://github.com/apache/skywalking-eyes/pull/139 dep: do not write license files if they already exist by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/140 fix: not ignore *.txt to make sure files like CMakeLists.txt can be checked by @acelyc111 in https://github.com/apache/skywalking-eyes/pull/141 fix license header normalizer by @xiaoyawei in https://github.com/apache/skywalking-eyes/pull/142 Substitute variables in license content for header command by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/143 Correct indent in Apache-2.0 template by @tisonkun in https://github.com/apache/skywalking-eyes/pull/144 Add copyright-year configuration by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/145 dep/maven: use output file to store the dep tree for cleaner result by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/146 dep/maven: resolve dependencies before analysis by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/147 gha: switch to composite running mode and set up cache by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/149 gha: switch to composite running mode and set up cache by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/150 Fix GitHub Actions wrong path by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/151 Normalize license for cargo. by @jmjoy in https://github.com/apache/skywalking-eyes/pull/153 Remove space characters in license for cargo. by @jmjoy in https://github.com/apache/skywalking-eyes/pull/154 Bump up dependencies to fix CVE by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/155 Bump up GHA to depress warnings by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/156 Leverage the built-in cache in setup-go@v4 by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/157 Dependencies check should report unknown licneses by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/158 Fix wrong indentation in doc by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/159 Add EPL-2.0 header template by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/160 Fix wrong indentation in doc about multi license config by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/161 dependency resolve with default template and specified output of license by @crholm in https://github.com/apache/skywalking-eyes/pull/163 Bump up go git to support .gitconfig user path by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/164 Draft release notes for 0.5.0 by @kezhenxu94 in https://github.com/apache/skywalking-eyes/pull/165 Remove \u0026ldquo;portions copyright\u0026rdquo; header normalizer by @antgamdia in https://github.com/apache/skywalking-eyes/pull/166  Full Changelog: https://github.com/apache/skywalking-eyes/compare/v0.4.0...v0.5.0\n","title":"Release Apache SkyWalking Eyes 0.5.0","url":"/events/release-apache-skywalking-eyes-0-5-0/"},{"content":"Abstract Apache SkyWalking hosts SkyWalking Summit 2023 on Nov. 4th, 2023, UTC+8, sponsored by ZMOps and Tetrate.\nWe are going to share SkyWalking\u0026rsquo;s roadmap, features, product experiences, and open-source culture.\nWelcome to join us.\nVenue Addr./地址 上海大华虹桥假日酒店\nDate 8:00 - 17:00, Nov 4th.\nRegister Register for IN-PERSON ticket\nCall For Proposals (CFP) The Call For Proposals open from now to 18:00 on Oct. 27th 2023, UTC+8. Submit your proposal at here\nWe have 1 open session and 8 sessions for the whole event.\n Open session is reserved for SkyWalking PMC members. 6 sessions are opened for CFP process. 2 sessions are reserved for sponsors.  Sponsors  ZMOps Inc. Tetrate Inc.  Anti-harassment policy SkyWalkingDay is dedicated to providing a harassment-free experience for everyone. We do not tolerate harassment of participants in any form. Sexual language and imagery will also not be tolerated in any event venue. Participants violating these rules may be sanctioned or expelled without a refund, at the discretion of the event organizers. Our anti-harassment policy can be found at Apache website.\nContact Us Send mail to dev@skywalking.apache.org.\n","title":"SkyWalking Summit 2023 @ Shanghai China","url":"/events/summit-23-cn/"},{"content":"SkyWalking 9.6.0 is released. Go to downloads page to find release tars.\nNew Alerting Kernel  MQE(Metrics Query Expression) and a new notification mechanism are supported.  Support Loki LogQL  Newly added support for Loki LogQL and Grafana Loki Dashboard for SkyWalking collected logs  WARNING  ElasticSearch 6 storage relative tests are removed. It worked and is not promised due to end of life officially.  Project  Bump up Guava to 32.0.1 to avoid the lib listed as vulnerable due to CVE-2020-8908. This API is never used. Maven artifact skywalking-log-recevier-plugin is renamed to skywalking-log-receiver-plugin. Bump up cli version 0.11 to 0.12. Bump up the version of ASF parent pom to v30. Make builds reproducible for automatic releases CI.  OAP Server  Add Neo4j component ID(112) language: Python. Add Istio ServiceEntry registry to resolve unknown IPs in ALS. Wrap deleteProperty API to the BanyanDBStorageClient. [Breaking change] Remove matchedCounter from HttpUriRecognitionService#feedRawData. Remove patterns from HttpUriRecognitionService#feedRawData and add max 10 candidates of raw URIs for each pattern. Add component ID for WebSphere. Fix AI Pipeline uri caching NullPointer and IllegalArgument Exceptions. Fix NPE in metrics query when the metric is not exist. Remove E2E tests for Istio \u0026lt; 1.15, ElasticSearch \u0026lt; 7.16.3, they might still work but are not supported as planed. Scroll all results in ElasticSearch storage and refactor scrolling logics, including Service, Instance, Endpoint, Process, etc. Improve Kubernetes coordinator to remove Terminating OAP Pods in cluster. Support SW_CORE_SYNC_PERIOD_HTTP_URI_RECOGNITION_PATTERN and SW_CORE_TRAINING_PERIOD_HTTP_URI_RECOGNITION_PATTERN to control the period of training and sync HTTP URI recognition patterns. And shorten the default period to 10s for sync and 60s for training. Fix ElasticSearch scroller bug. Add component ID for Aerospike(ID=149). Packages with name recevier are renamed to receiver. BanyanDBMetricsDAO handles storeIDTag in multiGet for BanyanDBModelExtension. Fix endpoint grouping-related logic and enhance the performance of PatternTree retrieval. Fix metric session cache saving after batch insert when using mysql-connector-java. Support dynamic UI menu query. Add comment for docker/.env to explain the usage. Fix wrong environment variable name SW_OTEL_RECEIVER_ENABLED_OTEL_RULES to right SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES. Fix instance query in JDBC implementation. Set the SW_QUERY_MAX_QUERY_COMPLEXITY default value to 3000(was 1000). Accept length=4000 parameter value of the event. It was 2000. Tolerate parameter value in illegal JSON format. Update BanyanDB Java Client to 0.4.0 Support aggregate Labeled Value Metrics in MQE. [Breaking change] Change the default label name in MQE from label to _. Bump up grpc version to 1.53.0. [Breaking change] Removed \u0026lsquo;\u0026amp;\u0026rsquo; symbols from shell scripts to avoid OAP server process running as a background process. Revert part of #10616 to fix the unexpected changes: if there is no data we should return an array with 0s, but in #10616, an empty array is returned. Cache all service entity in memory for query. Bump up jackson version to 2.15.2. Increase the default memory size to avoid OOM. Bump up graphql-java to 21.0. Add Echo component ID(5015) language: Golang. Fix index out of bounds exception in aggregate_labels MQE function. Support MongoDB Server/Cluster monitoring powered by OTEL. Do not print configurations values in logs to avoid sensitive info leaked. Move created the latest index before retrieval indexes by aliases to avoid the 404 exception. This just prevents some interference from manual operations. Add more Go VM metrics, as new skywalking-go agent provided since its 0.2 release. Add component ID for Lock (ID=5016). [Breaking change] Adjust the structure of hooks in the alarm-settings.yml. Support multiple configs for each hook types and specifying the hooks in the alarm rule. Bump up Armeria to 1.24.3. Fix BooleanMatch and BooleanNotEqualMatch doing Boolean comparison. Support LogQL HTTP query APIs. Add Mux Server component ID(5017) language: Golang. Remove ElasticSearch 6.3.2 from our client lib tests. Bump up ElasticSearch server 8.8.1 to 8.9.0 for latest e2e testing. 8.1.0, 7.16.3 and 7.17.10 are still tested. Add OpenSearch 2.8.0 to our client lib tests. Use listening mode for apollo implementation of dynamic configuration. Add view_as_seq function in MQE for listing metrics in the given prioritized sequence. Fix the wrong default value of k8sServiceNameRule if it\u0026rsquo;s not explicitly set. Improve PromQL to allow for multiple metric operations within a single query. Fix MQE Binary Operation between labeled metrics and other type of value result. Add component ID for Nacos (ID=150). Support Compare Operation in MQE. Fix the Kubernetes resource cache not refreshed. Fix wrong classpath that might cause OOM in startup. Enhance the serviceRelation in MAL by adding settings for the delimiter and component fields. [Breaking change] Support MQE in the Alerting. The Alarm Rules configuration(alarm-settings.yml), add expression field and remove metrics-name/count/threshold/op/only-as-condition fields and remove composite-rules configuration. Check results in ALS as per downstream/upstream instead of per log. Fix GraphQL query listInstances not using endTime query Do not start server and Kafka consumer in init mode. Add Iris component ID(5018). Add OTLP Tracing support as a Zipkin trace input.  UI  Fix metric name browser_app_error_rate in Browser-Root dashboard. Fix display name of endpoint_cpm for endpoint list in General-Service dashboard. Implement customize menus and marketplace page. Fix minTraceDuration and maxTraceDuration types. Fix init minTime to Infinity. Bump dependencies to fix vulnerabilities. Add scss variables. Fix the title of instance list and notices in the continue profiling. Add a link to explain the expression metric, add units in the continue profiling widget. Calculate string width to set Tabs name width. [Breaking change] Removed \u0026lsquo;\u0026amp;\u0026rsquo; symbols from shell scripts to avoid web application server process running as a background process. Reset chart label. Fix service associates instances. Remove node-sass. Fix commit error on Windows. Apply MQE on MYSQL, POSTGRESQL, REDIS, ELASTICSEARCH and DYNAMODB layer UI-templates. Apply MQE on Virtual-Cache layer UI-templates Apply MQE on APISIX, AWS_EKS, AWS_GATEWAY and AWS_S3 layer UI templates. Apply MQE on RabbitMQ Dashboards. Apply MQE on Virtual-MQ layer UI-templates Apply MQE on Infra-Linux layer UI-templates Apply MQE on Infra-Windows layer UI-templates Apply MQE on Browser layer UI-templates. Implement MQE on topology widget. Fix getEndpoints keyword blank. Implement a breadcrumb component as navigation.  Documentation  Add Go agent into the server agent documentation. Add data unit description in the configuration of continuous profiling policy. Remove storage extension doc, as it is expired. Remove how to add menu doc, as SkyWalking supports marketplace and new backend-based setup. Separate contribution docs to a new menu structure. Add a doc to explain how to manage i18n. Add a doc to explain OTLP Trace support. Fix typo in dynamic-config-configmap.md. Fix out-dated docs about Kafka fetcher. Remove 3rd part fetchers from the docs, as they are not maintained anymore.  All issues and pull requests are here\n","title":"Release Apache SkyWalking APM 9.6.0","url":"/events/release-apache-skywalking-apm-9.6.0/"},{"content":"SkyWalking Java Agent 9.0.0 is released. Go to downloads page to find release tars. Changes by Version\n9.0.0 Kernel Updates  Support re-transform/hot-swap classes with other java agents, and remove the obsolete cache enhanced class feature. Implement new naming policies for names of auxiliary type, interceptor delegate field, renamed origin method, method access name, method cache value field. All names are under sw$ name trait. They are predictable and unchanged after re-transform.  * SWAuxiliaryTypeNamingStrategy Auxiliary type name pattern: \u0026lt;origin_class_name\u0026gt;$\u0026lt;name_trait\u0026gt;$auxiliary$\u0026lt;auxiliary_type_instance_hash\u0026gt; * DelegateNamingResolver Interceptor delegate field name pattern: \u0026lt;name_trait\u0026gt;$delegate$\u0026lt;class_name_hash\u0026gt;$\u0026lt;plugin_define_hash\u0026gt;$\u0026lt;intercept_point_hash\u0026gt; * SWMethodNameTransformer Renamed origin method pattern: \u0026lt;name_trait\u0026gt;$original$\u0026lt;method_name\u0026gt;$\u0026lt;method_description_hash\u0026gt; * SWImplementationContextFactory Method cache value field pattern: cachedValue$\u0026lt;name_trait\u0026gt;$\u0026lt;origin_class_name_hash\u0026gt;$\u0026lt;field_value_hash\u0026gt; Accessor method name pattern: \u0026lt;renamed_origin_method\u0026gt;$accessor$\u0026lt;name_trait\u0026gt;$\u0026lt;origin_class_name_hash\u0026gt; Here is an example of manipulated enhanced class with new naming policies of auxiliary classes, fields, and methods\nimport sample.mybatis.controller.HotelController$sw$auxiliary$19cja42; import sample.mybatis.controller.HotelController$sw$auxiliary$p257su0; import sample.mybatis.domain.Hotel; import sample.mybatis.service.HotelService; @RequestMapping(value={\u0026#34;/hotel\u0026#34;}) @RestController public class HotelController implements EnhancedInstance { @Autowired @lazy private HotelService hotelService; private volatile Object _$EnhancedClassField_ws; // Interceptor delegate fields  public static volatile /* synthetic */ InstMethodsInter sw$delegate$td03673$ain2do0$8im5jm1; public static volatile /* synthetic */ InstMethodsInter sw$delegate$td03673$ain2do0$edkmf61; public static volatile /* synthetic */ ConstructorInter sw$delegate$td03673$ain2do0$qs9unv1; public static volatile /* synthetic */ InstMethodsInter sw$delegate$td03673$fl4lnk1$m3ia3a2; public static volatile /* synthetic */ InstMethodsInter sw$delegate$td03673$fl4lnk1$sufrvp1; public static volatile /* synthetic */ ConstructorInter sw$delegate$td03673$fl4lnk1$cteu7s1; // Origin method cache value field  private static final /* synthetic */ Method cachedValue$sw$td03673$g5sobj1; public HotelController() { this(null); sw$delegate$td03673$ain2do0$qs9unv1.intercept(this, new Object[0]); } private /* synthetic */ HotelController(sw.auxiliary.p257su0 p257su02) { } @GetMapping(value={\u0026#34;city/{cityId}\u0026#34;}) public Hotel selectByCityId(@PathVariable(value=\u0026#34;cityId\u0026#34;) int n) { // call interceptor with auxiliary type and parameters and origin method object  return (Hotel)sw$delegate$td03673$ain2do0$8im5jm1.intercept(this, new Object[]{n}, new HotelController$sw$auxiliary$19cja42(this, n), cachedValue$sw$td03673$g5sobj1); } // Renamed origin method  private /* synthetic */ Hotel sw$origin$selectByCityId$a8458p3(int cityId) { /*22*/ return this.hotelService.selectByCityId(cityId); } // Accessor of renamed origin method, calling from auxiliary type  final /* synthetic */ Hotel sw$origin$selectByCityId$a8458p3$accessor$sw$td03673(int n) { // Calling renamed origin method  return this.sw$origin$selectByCityId$a8458p3(n); } @OverRide public Object getSkyWalkingDynamicField() { return this._$EnhancedClassField_ws; } @OverRide public void setSkyWalkingDynamicField(Object object) { this._$EnhancedClassField_ws = object; } static { ClassLoader.getSystemClassLoader().loadClass(\u0026#34;org.apache.skywalking.apm.dependencies.net.bytebuddy.dynamic.Nexus\u0026#34;).getMethod(\u0026#34;initialize\u0026#34;, Class.class, Integer.TYPE).invoke(null, HotelController.class, -1072476370); // Method object  cachedValue$sw$td03673$g5sobj1 = HotelController.class.getMethod(\u0026#34;selectByCityId\u0026#34;, Integer.TYPE); } } Auxiliary type of Constructor :\nclass HotelController$sw$auxiliary$p257su0 { } Auxiliary type of selectByCityId method:\nclass HotelController$sw$auxiliary$19cja42 implements Runnable, Callable { private HotelController argument0; private int argument1; public Object call() throws Exception { return this.argument0.sw$origin$selectByCityId$a8458p3$accessor$sw$td03673(this.argument1); } @OverRide public void run() { this.argument0.sw$origin$selectByCityId$a8458p3$accessor$sw$td03673(this.argument1); } HotelController$sw$auxiliary$19cja42(HotelController hotelController, int n) { this.argument0 = hotelController; this.argument1 = n; } } Features and Bug Fixes  Support Jdk17 ZGC metric collect Support Jetty 11.x plugin Support access to the sky-walking tracer context in spring gateway filter Fix the scenario of using the HBase plugin with spring-data-hadoop. Add RocketMQ 5.x plugin Fix the conflict between the logging kernel and the JDK threadpool plugin. Fix the thread safety bug of finishing operation for the span named \u0026ldquo;SpringCloudGateway/sendRequest\u0026rdquo; Fix NPE in guava-eventbus-plugin. Add WebSphere Liberty 23.x plugin Add Plugin to support aerospike Java client Add ClickHouse parsing to the jdbc-common plugin. Support to trace redisson lock Upgrade netty-codec-http2 to 4.1.94.Final Upgrade guava to 32.0.1 Fix issue with duplicate enhancement by ThreadPoolExecutor Add plugin to support for RESTeasy 6.x. Fix the conditions for resetting UUID, avoid the same uuid causing the configuration not to be updated. Fix witness class in springmvc-annotation-5.x-plugin to avoid falling into v3 use cases. Fix Jedis-2.x plugin bug and add test for Redis cluster scene Merge two instrumentation classes to avoid duplicate enhancements in MySQL plugins. Support asynchronous invocation in jetty client 9.0 and 9.x plugin Add nacos-client 2.x plugin Staticize the tags for preventing synchronization in JDK 8 Add RocketMQ-Client-Java 5.x plugin Fix NullPointerException in lettuce-5.x-plugin.  All issues and pull requests are here\n","title":"Release Apache SkyWalking Java Agent 9.0.0","url":"/events/release-apache-skywalking-java-agent-9-0-0/"},{"content":"SkyWalking PHP 0.6.0 is released. Go to downloads page to find release tars.\nWhat\u0026rsquo;s Changed  Polish doc about Swoole by @wu-sheng in https://github.com/apache/skywalking-php/pull/73 Start 0.6.0 development. by @jmjoy in https://github.com/apache/skywalking-php/pull/74 Fix hook for Doctrine PDO class by @matikij in https://github.com/apache/skywalking-php/pull/76 Log Exception in tracing span when throw. by @jmjoy in https://github.com/apache/skywalking-php/pull/75 Upgrade dependencies and adapt. by @jmjoy in https://github.com/apache/skywalking-php/pull/77 Fix required rust version and add runing php-fpm notice in docs. by @jmjoy in https://github.com/apache/skywalking-php/pull/78 Bump openssl from 0.10.48 to 0.10.55 by @dependabot in https://github.com/apache/skywalking-php/pull/79 Fix the situation where the redis port is string. by @jmjoy in https://github.com/apache/skywalking-php/pull/80 Optionally enable zend observer api for auto instrumentation. by @jmjoy in https://github.com/apache/skywalking-php/pull/81 Fix the empty span situation in redis after hook. by @jmjoy in https://github.com/apache/skywalking-php/pull/82 Add mongodb pluhgin. by @jmjoy in https://github.com/apache/skywalking-php/pull/83 Update rust nightly toolchain in CI and format. by @jmjoy in https://github.com/apache/skywalking-php/pull/84 Add notice document for skywalking_agent.enable. by @jmjoy in https://github.com/apache/skywalking-php/pull/85 Upgrade dependencies. by @jmjoy in https://github.com/apache/skywalking-php/pull/86 Fix docs by @heyanlong in https://github.com/apache/skywalking-php/pull/87 Add kafka reporter. by @jmjoy in https://github.com/apache/skywalking-php/pull/88 Release SkyWalking PHP Agent 0.6.0 by @jmjoy in https://github.com/apache/skywalking-php/pull/89  New Contributors  @matikij made their first contribution in https://github.com/apache/skywalking-php/pull/76  Full Changelog: https://github.com/apache/skywalking-php/compare/v0.5.0...v0.6.0\nPECL https://pecl.php.net/package/skywalking_agent/0.6.0\n","title":"Release Apache SkyWalking PHP 0.6.0","url":"/events/release-apache-skwaylking-php-0-6-0/"},{"content":"On Aug. 10th, 2023, HashiCorp announced to adopt the Business Source License (BSL) from Mozilla Public License v2.0 (MPL 2.0), here is their post. They officially annouced they have changed the license for the ALL of their open-source products from the previous MPL 2.0 to a source-available license, BSL 1.1. Meanwhile, HashiCorp APIs, SDKs, and almost all other libraries will remain MPL 2.0.\nHashiCorp Inc. is one of the most important vendors in the cloud-native landscape, as well as Golang ecosystem. This kind of changes would have potential implications for SkyWalking, which is closely integrated with cloud-native technology stacks.\nConclusion First  What does that mean for SkyWalking users?  SkyWalking community has evaluated our dependencies from HashiCorp products and libraries, the current conclusion is\nSkyWalking users would NOT suffer any implication. All components of SkyWalking don\u0026rsquo;t have hard-dependency on BSL license affected codes.\nSkyWalking community have found out all following dependencies of all relative repositories, all licenses are TRUELY stayed unchanged, and compatible with Apache 2.0 License.\n OAP Server @kezhenxu94 @wu-sheng  consul-client Apache 2.0 Repo archived on Jul 27, 2023   BanyanDB @hanahmily @lujiajing1126  Server @hanahmily  hashicorp/golang-lru MPL-2.0 hashicorp/hcl MPL-2.0   CLI @hanahmily No HashiCorp Dependency   SkyWalking OAP CLI @kezhenxu94  github.com/hashicorp/hcl v1.0.0 MPL-2.0 All under swck as transitive dependencies   SWCK @hanahmily  hashicorp/consul/api MPL-2.0 hashicorp/consul/sdk MPL-2.0 hashicorp/errwrap MPL-2.0 hashicorp/go-cleanhttp MPL-2.0 hashicorp/go-immutable-radix MPL-2.0 hashicorp/go-msgpack MIT hashicorp/go-multierror MPL-2.0 hashicorp/go-rootcerts MPL-2.0 hashicorp/go-sockaddr MPL-2.0 hashicorp/go-syslog MIT hashicorp/go-uuid MPL-2.0 hashicorp/go.net BSD-3 hashicorp/golang-lru MPL-2.0 hashicorp/hcl MPL-2.0 hashicorp/logutils MPL-2.0 hashicorp/mdns MIT hashicorp/memberlist MPL-2.0 hashicorp/serf MPL-2.0   Go agent @mrproliu  hashicorp/consul/api MPL-2.0 hashicorp/consul/sdk MPL-2.0 hashicorp/errwrap MPL-2.0 hashicorp/go-cleanhttp MPL-2.0 hashicorp/go-hclog MIT hashicorp/go-immutable-radix MPL-2.0 hashicorp/go-kms-wrapping/entropy MPL-2.0 hashicorp/go-kms-wrapping/entropy/v2 MPL-2.0 hashicorp/go-msgpack MIT hashicorp/go-multierror MPL-2.0 hashicorp/go-plugin MPL-2.0 hashicorp/go-retryablehttp MPL-2.0 hashicorp/go-rootcerts MPL-2.0 hashicorp/go-secure-stdlib/base62 MPL-2.0 hashicorp/go-secure-stdlib/mlock MPL-2.0 hashicorp/go-secure-stdlib/parseutil MPL-2.0 hashicorp/go-secure-stdlib/password MPL-2.0 hashicorp/go-secure-stdlib/tlsutil MPL-2.0 hashicorp/go-sockaddr MPL-2.0 hashicorp/go-syslog MIT hashicorp/go-uuid MPL-2.0 hashicorp/go-version MPL-2.0 hashicorp/go.net BSD-3-Clause hashicorp/golang-lru MPL-2.0 hashicorp/logutils MPL-2.0 hashicorp/mdns MIT hashicorp/memberlist MPL-2.0 hashicorp/serf MPL-2.0 hashicorp/vault/api MPL-2.0 hashicorp/vault/sdk MPL-2.0 hashicorp/yamux MPL-2.0   SkyWalking eyes @kezhenxu94  none   SkyWalking Infra e2e @kezhenxu94  all under swck as transitive dependencies   SkyWalking rover(ebpf agent) @mrproliu  hashicorp/consul/api MPL-2.0 hashicorp/consul/sdk MPL-2.0 hashicorp/errwrap MPL-2.0 hashicorp/go-cleanhttp MPL-2.0 hashicorp/go-hclog MIT hashicorp/go-immutable-radix MPL-2.0 hashicorp/go-msgpack MIT hashicorp/go-multierror MPL-2.0 hashicorp/go-retryablehttp MPL-2.0 hashicorp/go-rootcerts MPL-2.0 hashicorp/go-sockaddr MPL-2.0 hashicorp/go-syslog MIT hashicorp/go-uuid MPL-2.0 hashicorp/golang-lru MPL-2.0 hashicorp/hcl MPL-2.0 hashicorp/logutils MPL-2.0 hashicorp/mdns MIT hashicorp/memberlist MPL-2.0 hashicorp/serf MPL-2.0   SkyWalking satellite @mrproliu  hashicorp/consul/api MPL-2.0 hashicorp/consul/sdk MPL-2.0 hashicorp/errwrap MPL-2.0 hashicorp/go-cleanhttp MPL-2.0 hashicorp/go-immutable-radix MPL-2.0 hashicorp/go-msgpack MIT hashicorp/go-multierror MPL-2.0 hashicorp/go-rootcerts MPL-2.0 hashicorp/go-sockaddr MPL-2.0 hashicorp/go-syslog MIT hashicorp/go-uuid MPL-2.0 hashicorp/go.net BSD-3-Clause hashicorp/golang-lru MPL-2.0 hashicorp/hcl MPL-2.0 hashicorp/logutils MPL-2.0 hashicorp/mdns MIT hashicorp/memberlist MPL-2.0 hashicorp/serf MPL-2.0   SkyWalking Terraform (scripts) @kezhenxu94  No HashiCorp Dependency The scripts for Terraform users only. No hard requirement.    The GitHub ID is listed about the PMC members did the evaluations.\nFAQ If I am using Consul to manage SkyWalking Cluster or configurations, does this license change bring an implication? YES, anyone using their server sides would be affected once you upgrade to later released versions after Aug. 10th, 2023.\nThis is HashiCorp\u0026rsquo;s statement\n End users can continue to copy, modify, and redistribute the code for all non-commercial and commercial use, except where providing a competitive offering to HashiCorp. Partners can continue to build integrations for our joint customers. We will continue to work closely with the cloud service providers to ensure deep support for our mutual technologies. Customers of enterprise and cloud-managed HashiCorp products will see no change as well. Vendors who provide competitive services built on our community products will no longer be able to incorporate future releases, bug fixes, or security patches contributed to our products.\n So, notice that, the implication about whether voilating BSL 1.1 is determined by the HashiCorp Inc about the status of the identified competitive relationship. We can\u0026rsquo;t provide any suggestions. Please refer to FAQs and contacts for the official explanations.\nWill SkyWalking continoue to use HashiCorp Consul as an optional cluster coordinator and/or an optional dynamic configuration server? For short term, YES, we will keep that part of codes, as the licenses of the SDK and the APIs are still in the MPL 2.0.\nBut, during the evaluation, we noticed the consul client we are using is rickfast/consul-client which had been archived by the owner on Jul 27, 2023. So, we are facing the issues that no maintaining and no version to upgrade. If there is not a new consul Java client lib available, we may have to remove this to avoid CVEs or version incompatible with new released servers.\n","title":"The Statement for SkyWalking users on HashiCorp license changes","url":"/blog/2023-08-13-hashicorp-bsl/"},{"content":"SkyWalking Rust 0.8.0 is released. Go to downloads page to find release tars.\nWhat\u0026rsquo;s Changed  Add kafka reporter. by @jmjoy in https://github.com/apache/skywalking-rust/pull/61 Rename AbstractSpan to HandleSpanObject. by @jmjoy in https://github.com/apache/skywalking-rust/pull/62 Bump to 0.8.0. by @jmjoy in https://github.com/apache/skywalking-rust/pull/63  ","title":"Release Apache SkyWalking Rust 0.8.0","url":"/events/release-apache-skywalking-rust-0-8-0/"},{"content":"SkyWalking Cloud on Kubernetes 0.8.0 is released. Go to downloads page to find release tars.\nFeatures  [Breaking Change] Remove the way to configure the agent through Configmap.  Bugs  Fix errors in banyandb e2e test.  Chores  Bump up golang to v1.20. Bump up golangci-lint to v1.53.3. Bump up skywalking-java-agent to v8.16.0. Bump up kustomize to v4.5.6. Bump up SkyWalking OAP to 9.5.0.  ","title":"Release Apache SkyWalking Cloud on Kubernetes 0.8.0","url":"/events/release-apache-skywalking-cloud-on-kubernetes-0-8-0/"},{"content":"Announcing Apache SkyWalking Go 0.2.0 I\u0026rsquo;m excited to announce the release of Apache SkyWalking Go 0.2.0! This version packs several awesome new features that I\u0026rsquo;ll overview below.\nLog Reporting The log reporting feature allows the Go agent to automatically collect log content from supported logging frameworks like logrus and zap. The logs are organized and sent to the SkyWalking backend for visualization. You can see how the logs appear for each service in the SkyWalking UI:\nMaking Logs Searchable You can configure certain log fields to make them searchable in SkyWalking. Set the SW_AGENT_LOG_REPORTER_LABEL_KEYS environment variable to include additional fields beyond the default log level.\nFor example, with logrus:\n# define log with fields logrus.WithField(\u0026#34;module\u0026#34;, \u0026#34;test-service\u0026#34;).Info(\u0026#34;test log\u0026#34;) Metrics Reporting The agent can now collect and report custom metrics data from runtime/metrics to the backend. Supported metrics are documented here.\nAutomatic Instrumentation In 0.1.0, you had to manually integrate the agent into your apps. Now, the new commands can automatically analyze and instrument projects at a specified path, no code changes needed! Try using the following command to import skywalking-go into your project:\n# inject to project at current path skywalking-go-agent -inject=./ -all Or you can still use the original manual approach if preferred.\nGet It Now! Check out the CHANGELOG for the full list of additions and fixes. I encourage you to try out SkyWalking Go 0.2.0 today! Let me know if you have any feedback.\n","title":"New Features of SkyWalking Go 0.2.0","url":"/blog/2023-07-31-skywalking-go-0.2.0-release/"},{"content":"SkyWalking Go 0.2.0 is released. Go to downloads page to find release tars.\nFeatures  Enhance the plugin rewrite ability to support switch and if/else in the plugin codes. Support inject the skywalking-go into project through agent. Support add configuration for plugin. Support metrics report API for plugin. Support report Golang runtime metrics. Support log reporter. Enhance the logrus logger plugin to support adapt without any settings method invoke. Disable sending observing data if the gRPC connection is not established for reducing the connection error log. Support enhance vendor management project. Support using base docker image to building the application.  Plugins  Support go-redis v9 redis client framework. Support collecting Native HTTP URI parameter on server side. Support Mongo database client framework. Support Native SQL database client framework with MySQL Driver. Support Logrus log report to the backend. Support Zap log report to the backend.  Documentation  Combine Supported Libraries and Performance Test into Plugins section. Add Tracing, Metrics and Logging document into Plugins section.  Bug Fixes  Fix throw panic when log the tracing context before agent core initialized. Fix plugin version matcher tryToFindThePluginVersion to support capital letters in module paths and versions.  Issues and PR  All issues are here All and pull requests are here  ","title":"Release Apache SkyWalking Go 0.2.0","url":"/events/release-apache-skwaylking-go-0.2.0/"},{"content":"今年 COSCUP 2023 在国立台湾科技大学举办。 COSCUP 是由台湾开放原始码社群联合推动的年度研讨会,起源于2006年,是台湾自由软体运动 (FOSSM) 重要的推动者之一。活动包括有讲座、摊位、社团同乐会等,除了邀请国际的重量级演讲者之外,台湾本土的自由软体推动者也经常在此发表演说,会议的发起人、工作人员与演讲者都是志愿参与的志工。COSCUP 的宗旨在于提供一个连接开放原始码开发者、使用者与推广者的平台。希望借由每年一度的研讨会来推动自由及开放原始码软体 (FLOSS)。由于有许多赞助商及热心捐助者,所有议程都是免费参加。\n在Go语言中使用自动增强探针完成链路追踪以及监控 B站视频地址\n刘晗,Tetrate\n  讲师介绍 刘晗,Tetrate 工程师,Apache SkyWalking PMC 成员,专注于应用性能可观测性领域。\n  议题概要\n   为什么需要自动增强探针 Go Agent演示 实现原理 未来展望  ","title":"[视频] 在Go语言中使用自动增强探针完成链路追踪以及监控 - COSCUP Taiwan 2023","url":"/zh/2023-07-30-complete-auto-instrumentation-go-agent-for-distributed-tracing-and-monitoring/"},{"content":"SkyWalking Kubernetes Helm Chart 4.5.0 is released. Go to downloads page to find release tars.\n Add helm chart for swck v0.7.0. Add pprof port export in satellite. Trunc the resource name in swck\u0026rsquo;s helm chart to no more than 63 characters. Adding the configmap into cluster role for oap init mode. Add config to set Pod securityContext. Keep the job name prefix the same as OAP Deployment name. Use startup probe option for first initialization of application Allow setting env for UI deployment. Add Istio ServiceEntry permissions.  ","title":"Release Apache SkyWalking Kubernetes Helm Chart 4.5.0","url":"/events/release-apache-skywalking-kubernetes-helm-chart-4.5.0/"},{"content":"SkyWalking BanyanDB 0.4.0 is released. Go to downloads page to find release tars.\nFeatures  Add TSDB concept document. [UI] Add YAML editor for inputting query criteria. Refactor TopN to support NULL group while keeping seriesID from the source measure. Add a sharded buffer to TSDB to replace Badger\u0026rsquo;s memtable. Badger KV only provides SST. Add a meter system to control the internal metrics. Add multiple metrics for measuring the storage subsystem. Refactor callback of TopNAggregation schema event to avoid deadlock and reload issue. Fix max ModRevision computation with inclusion of TopNAggregation Enhance meter performance Reduce logger creation frequency Add units to memory flags Introduce TSTable to customize the block\u0026rsquo;s structure Add /system endpoint to the monitoring server that displays a list of nodes' system information. Enhance the liaison module by implementing access logging. Add the Istio scenario stress test based on the data generated by the integration access log. Generalize the index\u0026rsquo;s docID to uint64. Remove redundant ID tag type. Improve granularity of index in measure by leveling up from data point to series. [UI] Add measure CRUD operations. [UI] Add indexRule CRUD operations. [UI] Add indexRuleBinding CRUD operations.  Bugs  Fix iterator leaks and ensure proper closure and introduce a closer to guarantee all iterators are closed Fix resource corrupts caused by update indexRule operation Set the maximum integer as the limit for aggregation or grouping operations when performing aggregation or grouping operations in a query plan.  Chores  Bump go to 1.20. Set KV\u0026rsquo;s minimum memtable size to 8MB [docs] Fix docs crud examples error Modified TestGoVersion to check for CPU architecture and Go Version Bump node to 18.16  ","title":"Release Apache SkyWalking BanyanDB 0.4.0","url":"/events/release-apache-skywalking-banyandb-0-4-0/"},{"content":"Background In previous articles, We have discussed how to use SkyWalking and eBPF for performance problem detection within processes and networks. They are good methods to locate issues, but still there are some challenges:\n The timing of the task initiation: It\u0026rsquo;s always challenging to address the processes that require performance monitoring when problems occur. Typically, manual engagement is required to identify processes and the types of performance analysis necessary, which cause extra time during the crash recovery. The root cause locating and the time of crash recovery conflict with each other from time to time. In the real case, rebooting would be the first choice of recovery, meanwhile, it destroys the site of crashing. Resource consumption of tasks: The difficulties to determine the profiling scope. Wider profiling causes more resources than it should. We need a method to manage resource consumption and understand which processes necessitate performance analysis. Engineer capabilities: On-call is usually covered by the whole team, which have junior and senior engineers, even senior engineers have their understanding limitation of the complex distributed system, it is nearly impossible to understand the whole system by a single one person.  The Continuous Profiling is a new created mechanism to resolve the above issues.\nAutomate Profiling As profiling is resource costing and high experience required, how about introducing a method to narrow the scope and automate the profiling driven by polices creates by senior SRE engineer? So, in 9.5.0, SkyWalking first introduced preset policy rules for specific services to be monitored by the eBPF Agent in a low-energy manner, and run profiling when necessary automatically.\nPolicy Policy rules specify how to monitor target processes and determine the type of profiling task to initiate when certain threshold conditions are met.\nThese policy rules primarily consist of the following configuration information:\n Monitoring type: This specifies what kind of monitoring should be implemented on the target process. Threshold determination: This defines how to determine whether the target process requires the initiation of a profiling task. Trigger task: This specifies what kind of performance analysis task should be initiated.  Monitoring type The type of monitoring is determined by observing the data values of a specified process to generate corresponding metrics. These metric values can then facilitate subsequent threshold judgment operations. In eBPF observation, we believe the following metrics can most directly reflect the current performance of the program:\n   Monitor Type Unit Description     System Load Load System load average over a specified period.   Process CPU Percentage The CPU usage of the process as a percentage.   Process Thread Count Count The number of threads in the process.   HTTP Error Rate Percentage The percentage of HTTP requests that result in error responses (e.g., 4xx or 5xx status codes).   HTTP Avg Response Time Millisecond The average response time for HTTP requests.    Network related monitoring Monitoring network type metrics is not as simple as obtaining basic process information. It requires the initiation of eBPF programs and attaching them to the target process for observation. This is similar to the principles of network profiling task we introduced in the previous article, except that we no longer collect the full content of the data packets. Instead, we only collect the content of messages that match specified HTTP prefixes.\nBy using this method, we can significantly reduce the number of times the kernel sends data to the user space, and the user-space program can parse the data content with less system resource usage. This ultimately helps in conserving system resources.\nMetrics collector The eBPF agent would report metrics of processes periodically as follows to indicate the process performance in time.\n   Name Unit Description     process_cpu (0-100)% The CPU usage percent   process_thread_count count The thread count of process   system_load count The average system load for the last minute, each process have same value   http_error_rate (0-100)% The network request error rate percentage   http_avg_response_time ms The network average response duration    Threshold determination For the threshold determination, the judgement is made by the eBPF Agent based on the target monitoring process in its own memory, rather than relying on calculations performed by the SkyWalking backend. The advantage of this approach is that it doesn\u0026rsquo;t have to wait for the results of complex backend computations, and it reduces potential issues brought about by complicated interactions.\nBy using this method, the eBPF Agent can swiftly initiate tasks immediately after conditions are met, without any delay.\nIt includes the following configuration items:\n Threshold: Check if the monitoring value meets the specified expectations. Period: The time period(seconds) for monitoring data, which can also be understood as the most recent duration. Count: The number of times(seconds) the threshold is triggered within the detection period, which can also be understood as the total number of times the specified threshold rule is triggered in the most recent duration(seconds). Once the count check is met, the specified Profiling task will be started.  Trigger task When the eBPF Agent detects that the threshold determination in the specified policy meets the rules, it can initiate the corresponding task according to pre-configured rules. For each different target performance task, their task initiation parameters are different:\n On/Off CPU Profiling: It automatically performs performance analysis on processes that meet the conditions, defaulting to 10 minutes of monitoring. Network Profiling: It performs network performance analysis on all processes in the same Service Instance on the current machine, to prevent the cause of the issue from being unrealizable due to too few process being collected, defaulting to 10 minutes of monitoring.  Once the task is initiated, no new profiling tasks would be started for the current process for a certain period. The main reason for this is to prevent frequent task creation due to low threshold settings, which could affect program execution. The default time period is 20 minutes.\nData Flow The figure 1 illustrates the data flow of the continuous profiling feature:\nFigure 1: Data Flow of Continuous Profiling\neBPF Agent with Process Firstly, we need to ensure that the eBPF Agent and the process to be monitored are deployed on the same host machine, so that we can collect relevant data from the process. When the eBPF Agent detects a threshold validation rule that conforms to the policy, it immediately triggers the profiling task for the target process, thereby reducing any intermediate steps and accelerating the ability to pinpoint performance issues.\nSliding window The sliding window plays a crucial role in the eBPF Agent\u0026rsquo;s threshold determination process, as illustrated in the figure 2:\nFigure 2: Sliding Window in eBPF Agent\nEach element in the array represents the data value for a specified second in time. When the sliding window needs to verify whether it is responsible for a rule, it fetches the content of each element from a certain number of recent elements (period parameter). If an element exceeds the threshold, it is marked in red and counted. If the number of red elements exceeds a certain number, it is deemed to trigger a task.\nUsing a sliding window offers the following two advantages:\n Fast retrieval of recent content: With a sliding window, complex calculations are unnecessary. You can know the data by simply reading a certain number of recent array elements. Solving data spikes issues: Validation through count prevents situations where a data point suddenly spikes and then quickly returns to normal. Verification with multiple values can reveal whether exceeding the threshold is frequent or occasional.  eBPF Agent with SkyWalking Backend The eBPF Agent communicates periodically with the SkyWalking backend, involving three most crucial operations:\n Policy synchronization: Through periodic policy synchronization, the eBPF Agent can keep processes on the local machine updated with the latest policy rules as much as possible. Metrics sending: For processes that are already being monitored, the eBPF Agent periodically sends the collected data to the backend program. This facilitates real-time query of current data values by users, who can also compare this data with historical values or thresholds when problems arise. Profiling task reporting: When the eBPF detects that a certain process has triggered a policy rule, it automatically initiates a performance task, collects relevant information from the current process, and reports it to the SkyWalking backend. This allows users to know when, why, and what type of profiling task was triggered from the interface.  Demo Next, let\u0026rsquo;s quickly demonstrate the continuous profiling feature, so you can understand more specifically what it accomplishes.\nDeploy SkyWalking Showcase SkyWalking Showcase contains a complete set of example services and can be monitored using SkyWalking. For more information, please check the official documentation.\nIn this demo, we only deploy service, the latest released SkyWalking OAP, and UI.\nexport SW_OAP_IMAGE=apache/skywalking-oap-server:9.5.0 export SW_UI_IMAGE=apache/skywalking-ui:9.5.0 export SW_ROVER_IMAGE=apache/skywalking-rover:0.5.0 export FEATURE_FLAGS=mesh-with-agent,single-node,elasticsearch,rover make deploy.kubernetes After deployment is complete, please run the following script to open SkyWalking UI: http://localhost:8080/.\nkubectl port-forward svc/ui 8080:8080 --namespace default Create Continuous Profiling Policy Currently, continues profiling feature is set by default in the Service Mesh panel at the Service level.\nFigure 3: Continuous Policy Tab\nBy clicking on the edit button aside from the Policy List, the polices of current service could be created or updated.\nFigure 4: Edit Continuous Profiling Policy\nMultiple polices are supported. Every policy has the following configurations.\n Target Type: Specifies the type of profiling task to be triggered when the threshold determination is met. Items: For profiling task of the same target, one or more validation items can be specified. As long as one validation item meets the threshold determination, the corresponding performance analysis task will be launched.  Monitor Type: Specifies the type of monitoring to be carried out for the target process. Threshold: Depending on the type of monitoring, you need to fill in the corresponding threshold to complete the verification work. Period: Specifies the number of recent seconds of data you want to monitor. Count: Determines the total number of seconds triggered within the recent period. URI Regex/List: This is applicable to HTTP monitoring types, allowing URL filtering.    Done After clicking the save button, you can see the currently created monitoring rules, as shown in the figure 5:\nFigure 5: Continuous Profiling Monitoring Processes\nThe data can be divided into the following parts:\n Policy list: On the left, you can see the rule list you have created. Monitoring Summary List: Once a rule is selected, you can see which pods and processes would be monitored by this rule. It also summarizes how many profiling tasks have been triggered in the last 48 hours by the current pod or process, as well as the last trigger time. This list is also sorted in descending order by the number of triggers to facilitate your quick review.  When you click on a specific process, a new dashboard would show to list metrics and triggered profiling results.\nFigure 6: Continuous Profiling Triggered Tasks\nThe current figure contains the following data contents:\n Task Timeline: It lists all profiling tasks in the past 48 hours. And when the mouse hovers over a task, it would also display detailed information:  Task start and end time: It indicates when the current performance analysis task was triggered. Trigger reason: It would display the reason why the current process was profiled and list out the value of the metric exceeding the threshold when the profiling was triggered. so you can quickly understand the reason.   Task Detail: Similar to the CPU Profiling and Network Profiling introduced in previous articles, this would display the flame graph or process topology map of the current task, depending on the profiling type.  Meanwhile, on the Metrics tab, metrics relative to profiling policies are collected to retrieve the historical trend, in order to provide a comprehensive explanation of the trigger point about the profiling.\nFigure 7: Continuous Profiling Metrics\nConclusion In this article, I have detailed how the continuous profiling feature in SkyWalking and eBPF works. In general, it involves deploying the eBPF Agent service on the same machine where the process to be monitored resides, and monitoring the target process with low resource consumption. When it meets the threshold conditions, it would initiate more complex CPU Profiling and Network Profiling tasks.\nIn the future, we will offer even more features. Stay tuned!\n Twitter, ASFSkyWalking Slack. Send Request to join SkyWalking slack mail to the mail list(dev@skywalking.apache.org), we will invite you in. Subscribe to our medium list.  ","title":"Activating Automatical Performance Analysis -- Continuous Profiling","url":"/blog/2023-06-25-intruducing-continuous-profiling-skywalking-with-ebpf/"},{"content":"SkyWalking CLI 0.12.0 is released. Go to downloads page to find release tars.\n Add the sub-command records list for adapt the new record query API by @mrproliu in https://github.com/apache/skywalking-cli/pull/167 Add the attached events fields into the trace sub-command by @mrproliu in https://github.com/apache/skywalking-cli/pull/169 Add the sampling config file into the profiling ebpf create network sub-command by @mrproliu in https://github.com/apache/skywalking-cli/pull/171 Add the sub-command profiling continuous for adapt the new continuous profiling API by @mrproliu in https://github.com/apache/skywalking-cli/pull/173 Adapt the sub-command metrics for deprecate scope fron entity by @mrproliu in https://github.com/apache/skywalking-cli/pull/173 Add components in topology related sub-commands. @mrproliu in https://github.com/apache/skywalking-cli/pull/175 Add the sub-command metrics nullable for query the nullable metrics value. @mrproliu in https://github.com/apache/skywalking-cli/pull/176 Adapt the sub-command profiling trace for adapt the new trace profiling protocol. @mrproliu in https://github.com/apache/skywalking-cli/pull/177 Add isEmptyValue field in metrics related sub-commands. @mrproliu in https://github.com/apache/skywalking-cli/pull/180 Add the sub-command metrics execute for execute the metrics query. @mrproliu in https://github.com/apache/skywalking-cli/pull/182 Add the sub-command profiling continuous monitoring for query all continuous profiling monitoring instances. @mrproliu in https://github.com/apache/skywalking-cli/pull/182 Add continuousProfilingCauses.message field in the profiling ebpf list comamnds by @mrproliu in https://github.com/apache/skywalking-cli/pull/184  ","title":"Release Apache SkyWalking CLI 0.12.0","url":"/events/release-apache-skywalking-cli-0-12-0/"},{"content":"SkyWalking Rover 0.5.0 is released. Go to downloads page to find release tars.\nFeatures  Enhance the protocol reader for support long socket data. Add the syscall level event to the trace. Support OpenSSL 3.0.x. Optimized the data structure in BPF. Support continuous profiling. Improve the performance when getting goid in eBPF. Support build multiple architecture docker image: x86_64, arm64.  Bug Fixes  Fix HTTP method name in protocol analyzer. Fixed submitting multiple network profiling tasks with the same uri causing the rover to restart.  Documentation Issues and PR  All issues are here All and pull requests are here  ","title":"Release Apache SkyWalking Rover 0.5.0","url":"/events/release-apache-skwaylking-rover-0-5-0/"},{"content":"SkyWalking Satellite 1.2.0 is released. Go to downloads page to find release tars.\nFeatures  Introduce pprof module. Support export multiple telemetry service. Update the base docker image. Add timeout configuration for gRPC client. Reduce log print when the enqueue data to the pipeline error. Support transmit the Continuous Profiling protocol.  Bug Fixes  Fix CVE-2022-41721. Use Go 19 to build the Docker image to fix CVEs.  Issues and PR  All issues are here All and pull requests are here  ","title":"Release Apache SkyWalking Satellite 1.2.0","url":"/events/release-apache-skwaylking-satellite-1-2-0/"},{"content":"背景 在之前的文章中,我们讨论了如何使用 SkyWalking 和 eBPF 来检测性能问题,包括进程和网络。这些方法可以很好地定位问题,但仍然存在一些挑战:\n 任务启动的时间: 当需要进行性能监控时,解决需要性能监控的进程始终是一个挑战。通常需要手动参与,以标识进程和所需的性能分析类型,这会在崩溃恢复期间耗费额外的时间。根本原因定位和崩溃恢复时间有时会发生冲突。在实际情况中,重新启动可能是恢复的第一选择,同时也会破坏崩溃的现场。 任务的资源消耗: 确定分析范围的困难。过宽的分析范围会导致需要更多的资源。我们需要一种方法来管理资源消耗并了解哪些进程需要性能分析。 工程师能力: 通常由整个团队负责呼叫,其中有初级和高级工程师,即使是高级工程师也对复杂的分布式系统有其理解限制,单个人几乎无法理解整个系统。  持续剖析(Continuous Profiling) 是解决上述问题的新机制。\n自动剖析 由于性能分析的资源消耗和高经验要求,因此引入一种方法以缩小范围并由高级 SRE 工程师创建策略自动剖析。因此,在 9.5.0 中,SkyWalking 首先引入了预设策略规则,以低功耗方式监视特定服务的 eBPF 代理,并在必要时自动运行剖析。\n策略 策略规则指定了如何监视目标进程并确定在满足某些阈值条件时应启动何种类型的分析任务。\n这些策略规则主要包括以下配置信息:\n 监测类型: 这指定了应在目标进程上实施什么样的监测。 阈值确定: 这定义了如何确定目标进程是否需要启动分析任务。 触发任务: 这指定了应启动什么类型的性能分析任务。  监测类型 监测类型是通过观察指定进程的数据值来生成相应的指标来确定的。这些指标值可以促进后续的阈值判断操作。在 eBPF 观测中,我们认为以下指标最能直接反映程序的当前性能:\n   监测类型 单位 描述     系统负载 负载 在指定时间段内的系统负载平均值。   进程 CPU 百分比 进程的 CPU 使用率百分比。   进程线程计数 计数 进程中的线程数。   HTTP 错误率 百分比 导致错误响应(例如,4xx 或 5xx 状态代码)的 HTTP 请求的百分比。   HTTP 平均响应时间 毫秒 HTTP 请求的平均响应时间。    相关网络监测 监测网络类型的指标不像获取基本进程信息那么简单。它需要启动 eBPF 程序并将其附加到目标进程以进行观测。这类似于我们在先前文章中介绍的网络分析任务,不同的是我们不再收集数据包的完整内容。相反,我们仅收集与指定 HTTP 前缀匹配的消息的内容。\n通过使用此方法,我们可以大大减少内核向用户空间发送数据的次数,用户空间程序可以使用更少的系统资源来解析数据内容。这最终有助于节省系统资源。\n指标收集器 eBPF 代理会定期报告以下进程度量,以指示进程性能:\n   名称 单位 描述     process_cpu (0-100)% CPU 使用率百分比   process_thread_count 计数 进程中的线程数   system_load 计数 最近一分钟的平均系统负载,每个进程的值相同   http_error_rate (0-100)% 网络请求错误率百分比   http_avg_response_time 毫秒 网络平均响应持续时间    阈值确定 对于阈值的确定,eBPF 代理是基于其自身内存中的目标监测进程进行判断,而不是依赖于 SkyWalking 后端执行的计算。这种方法的优点在于,它不必等待复杂后端计算的结果,减少了复杂交互所带来的潜在问题。\n通过使用此方法,eBPF 代理可以在条件满足后立即启动任务,而无需任何延迟。\n它包括以下配置项:\n 阈值: 检查监测值是否符合指定的期望值。 周期: 监控数据的时间周期(秒),也可以理解为最近的持续时间。 计数: 检测期间触发阈值的次数(秒),也可以理解为最近持续时间内指定阈值规则触发的总次数(秒)。一旦满足计数检查,指定的分析任务将被开始。  触发任务 当 eBPF Agent 检测到指定策略中的阈值决策符合规则时,根据预配置的规则可以启动相应的任务。对于每个不同的目标性能任务,它们的任务启动参数都不同:\n On/Off CPU Profiling: 它会自动对符合条件的进程进行性能分析,缺省情况下监控时间为 10 分钟。 Network Profiling: 它会对当前机器上同一 Service Instance 中的所有进程进行网络性能分析,以防问题的原因因被收集进程太少而无法实现,缺省情况下监控时间为 10 分钟。  一旦任务启动,当前进程将在一定时间内不会启动新的剖析任务。主要原因是为了防止因低阈值设置而频繁创建任务,从而影响程序执行。缺省时间为 20 分钟。\n数据流 图 1 展示了持续剖析功能的数据流:\n图 1: 持续剖析的数据流\neBPF Agent进行进程跟踪 首先,我们需要确保 eBPF Agent 和要监测的进程部署在同一台主机上,以便我们可以从进程中收集相关数据。当 eBPF Agent 检测到符合策略的阈值验证规则时,它会立即为目标进程触发剖析任务,从而减少任何中间步骤并加速定位性能问题的能力。\n滑动窗口 滑动窗口在 eBPF Agent 的阈值决策过程中发挥着至关重要的作用,如图 2 所示:\n图 2: eBPF Agent 中的滑动窗口\n数组中的每个元素表示指定时间内的数据值。当滑动窗口需要验证是否负责某个规则时,它从最近的一定数量的元素 (period 参数) 中获取每个元素的内容。如果一个元素超过了阈值,则标记为红色并计数。如果红色元素的数量超过一定数量,则被认为触发了任务。\n使用滑动窗口具有以下两个优点:\n 快速检索最近的内容:使用滑动窗口,无需进行复杂的计算。你可以通过简单地读取一定数量的最近数组元素来了解数据。 解决数据峰值问题:通过计数进行验证,可以避免数据点突然增加然后快速返回正常的情况。使用多个值进行验证可以揭示超过阈值是频繁还是偶然发生的。  eBPF Agent与OAP后端通讯 eBPF Agent 定期与 SkyWalking 后端通信,涉及三个最关键的操作:\n 策略同步:通过定期的策略同步,eBPF Agent 可以尽可能地让本地机器上的进程与最新的策略规则保持同步。 指标发送:对于已经被监视的进程,eBPF Agent 定期将收集到的数据发送到后端程序。这就使用户能够实时查询当前数据值,用户也可以在出现问题时将此数据与历史值或阈值进行比较。 剖析任务报告:当 eBPF 检测到某个进程触发了策略规则时,它会自动启动性能任务,从当前进程收集相关信息,并将其报告给 SkyWalking 后端。这使用户可以从界面了解何时、为什么和触发了什么类型的剖析任务。  演示 接下来,让我们快速演示持续剖析功能,以便你更具体地了解它的功能。\n部署 SkyWalking Showcase SkyWalking Showcase 包含完整的示例服务,并可以使用 SkyWalking 进行监视。有关详细信息,请查看官方文档。\n在此演示中,我们只部署服务、最新发布的 SkyWalking OAP 和 UI。\nexport SW_OAP_IMAGE=apache/skywalking-oap-server:9.5.0 export SW_UI_IMAGE=apache/skywalking-ui:9.5.0 export SW_ROVER_IMAGE=apache/skywalking-rover:0.5.0 export FEATURE_FLAGS=mesh-with-agent,single-node,elasticsearch,rover make deploy.kubernetes 部署完成后,请运行以下脚本以打开 SkyWalking UI:http://localhost:8080/。\nkubectl port-forward svc/ui 8080:8080 --namespace default 创建持续剖析策略 目前,持续剖析功能在 Service Mesh 面板的 Service 级别中默认设置。\n图 3: 持续策略选项卡\n通过点击 Policy List 旁边的编辑按钮,可以创建或更新当前服务的策略。\n图 4: 编辑持续剖析策略\n支持多个策略。每个策略都有以下配置。\n Target Type:指定符合阈值决策时要触发的剖析任务的类型。 Items:对于相同目标的剖析任务,可以指定一个或多个验证项目。只要一个验证项目符合阈值决策,就会启动相应的性能分析任务。  Monitor Type:指定要为目标进程执行的监视类型。 Threshold:根据监视类型的不同,需要填写相应的阈值才能完成验证工作。 Period:指定你要监测的最近几秒钟的数据数量。 Count:确定最近时间段内触发的总秒数。 URI 正则表达式/列表:这适用于 HTTP 监控类型,允许 URL 过滤。    完成 单击保存按钮后,你可以看到当前已创建的监控规则,如图 5 所示:\n图 5: 持续剖析监控进程\n数据可以分为以下几个部分:\n 策略列表:在左侧,你可以看到已创建的规则列表。 监测摘要列表:选择规则后,你可以看到哪些 pod 和进程将受到该规则的监视。它还总结了当前 pod 或进程在过去 48 小时内触发的性能分析任务数量,以及最后一个触发时间。该列表还按触发次数降序排列,以便你快速查看。  当你单击特定进程时,将显示一个新的仪表板以列出指标和触发的剖析结果。\n图 6: 持续剖析触发的任务\n当前图包含以下数据内容:\n 任务时间轴:它列出了过去 48 小时的所有剖析任务。当鼠标悬停在任务上时,它还会显示详细信息:  任务的开始和结束时间:它指示当前性能分析任务何时被触发。 触发原因:它会显示为什么会对当前进程进行剖析,并列出当剖析被触发时超过阈值的度量值,以便你快速了解原因。   任务详情:与前几篇文章介绍的 CPU 剖析和网络剖析类似,它会显示当前任务的火焰图或进程拓扑图,具体取决于剖析类型。  同时,在 Metrics 选项卡中,收集与剖析策略相关的指标以检索历史趋势,以便在剖析的触发点提供全面的解释。\n图 7: 持续剖析指标\n结论 在本文中,我详细介绍了 SkyWalking 和 eBPF 中持续剖析功能的工作原理。通常情况下,它涉及将 eBPF Agent 服务部署在要监视的进程所在的同一台计算机上,并以低资源消耗监测目标进程。当它符合阈值条件时,它会启动更复杂的 CPU 剖析和网络剖析任务。\n在未来,我们将提供更多功能。敬请期待!\n Twitter:ASFSkyWalking Slack:向邮件列表 (dev@skywalking.apache.org) 发送“Request to join SkyWalking Slack”,我们会邀请你加入。 订阅我们的 Medium 列表。  ","title":"自动化性能分析——持续剖析","url":"/zh/2023-06-25-intruducing-continuous-profiling-skywalking-with-ebpf/"},{"content":"SkyWalking 9.5.0 is released. Go to downloads page to find release tars.\nNew Topology Layout Elasticsearch Server Monitoring Project  Fix Duplicate class found due to the delombok goal.  OAP Server  Fix wrong layer of metric user error in DynamoDB monitoring. ElasticSearch storage does not check field types when OAP running in no-init mode. Support to bind TLS status as a part of component for service topology. Fix component ID priority bug. Fix component ID of topology overlap due to storage layer bugs. [Breaking Change] Enhance JDBC storage through merging tables and managing day-based table rolling. [Breaking Change] Sharding-MySQL implementations and tests get removed due to we have the day-based rolling mechanism by default Fix otel k8s-cluster rule add namespace dimension for MAL aggregation calculation(Deployment Status,Deployment Spec Replicas) Support continuous profiling feature. Support collect process level related metrics. Fix K8sRetag reads the wrong k8s service from the cache due to a possible namespace mismatch. [Breaking Change] Support cross-thread trace profiling. The data structure and query APIs are changed. Fix PromQL HTTP API /api/v1/labels response missing service label. Fix possible NPE when initialize IntList. Support parse PromQL expression has empty labels in the braces for metadata query. Support alarm metric OP !=. Support metrics query indicates whether value == 0 represents actually zero or no data. Fix NPE when query the not exist series indexes in ElasticSearch storage. Support collecting memory buff/cache metrics in VM monitoring. PromQL: Remove empty values from the query result, fix /api/v1/metadata param limit could cause out of bound. Support monitoring the total number metrics of k8s StatefulSet and DaemonSet. Support Amazon API Gateway monitoring. Bump up graphql-java to fix cve. Bump up Kubernetes Java client. Support Redis Monitoring. Add component ID for amqp, amqp-producer and amqp-consumer. Support no-proxy mode for aws-firehose receiver Bump up armeria to 1.23.1 Support Elasticsearch Monitoring. Fix PromQL HTTP API /api/v1/series response missing service label when matching metric. Support ServerSide TopN for BanyanDB. Add component ID for Jersey. Remove OpenCensus support, the related codes and docs as it\u0026rsquo;s sunsetting. Support dynamic configuration of searchableTracesTags Support exportErrorStatusTraceOnly for export the error status trace segments through the Kafka channel Add component ID for Grizzly. Fix potential NPE in Zipkin receiver when the Span is missing some fields. Filter out unknown_cluster metric data. Support RabbitMQ Monitoring. Support Redis slow logs collection. Fix data loss when query continuous profiling task record. Adapt the continuous profiling task query GraphQL. Support Metrics Query Expression(MQE) and allows users to do simple query-stage calculation through the expression. Deprecated metrics query v2 protocol. Deprecated record query protocol. Add component ID for go-redis. Add OpenSearch 2.8.0 to test case. Add ai-pipeline module. Support HTTP URI formatting through ai-pipeline to do pattern recognition. Add new HTTP URI grouping engine with benchmark. [Breaking Change] Use the new HTTP URI grouping engine to replace the old regex based mechanism. Support sumLabeled in MAL. Migrate from kubernetes-client/java to fabric8 client. Envoy ALS generated relation metrics considers http status codes \u0026gt;= 400 has an error at the client side. Add cause message field when query continuous profiling task.  UI  Revert: cpm5d function. This feature is cancelled from backend. Fix: alerting link breaks on the topology. Refactor Topology widget to make it more hierarchical.  Choose User as the first node. If User node is absent, choose the busiest node(which has the most calls of all). Do a left-to-right flow process. At the same level, list nodes from top to bottom in alphabetical order.   Fix filter ID when ReadRecords metric associates with trace. Add AWS API Gateway menu. Change trace profiling protocol. Add Redis menu. Optimize data types. Support isEmptyValue flag for metrics query. Add elasticsearch menu. [Clean UI templates before upgrade] Set showSymbol: true, and make the data point shows on the Line graph. Please clean ui_template index in elasticsearch storage or table in JDBC storage. [Clean UI templates before upgrade] UI templates: Simplify metric name with the label. Add MQ menu. Add Jeysey icon. Fix: set endpoint and instance selectors with url parameters correctly. Bump up dependencies versions icons-vue 1.1.4, element-plus 2.1.0, nanoid 3.3.6, postcss 8.4.23 Add OpenTelemetry log protocol support. [Breaking Change] Configuration key enabledOtelRules is renamed to enabledOtelMetricsRules and the corresponding environment variable is renamed to SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES. Add grizzly icon. Fix: the Instance List data display error. Fix: set topN type to Number. Support Metrics Query Expression(MQE) and allows users to do simple query-stage calculation through the expression. Bump up zipkin ui dependency to 2.24.1. Bump up vite to 4.0.5. Apply MQE on General and Virtual-Database layer UI-templates.  Documentation  Add Profiling related documentations. Add SUM_PER_MIN to MAL documentation. Make the log relative docs more clear, and easier for further more formats support. Update the cluster management and advanced deployment docs.  All issues and pull requests are here\n","title":"Release Apache SkyWalking APM 9.5.0","url":"/events/release-apache-skywalking-apm-9.5.0/"},{"content":"Celebrating 22k Stars! The Apache SkyWalking community is thrilled to reach the milestone of 22k stars on GitHub! This showcases its popularity and impact as an APM and observability tool.\nSince launching in 2016 to provide an open source APM solution, SkyWalking has evolved into a full stack observability platform with distributed tracing, metrics monitoring and alerting. It\u0026rsquo;s seeing widespread adoption globally, especially in Asia where APM needs are expanding rapidly.\nThe growing user base has enabled SkyWalking to achieve massive deployments demonstrating its ability to scale to extreme levels. There have been reported deployments collecting over 100TB of data from companies' complex distributed applications, monitoring over 8000 microservices and analyzing 100 billion distributed traces - providing end-to-end visibility, performance monitoring and issue troubleshooting for some of the largest distributed systems in the world.\nThis success and widespread adoption has attracted an active community of nearly 800 contributors, thanks in part to programs like GSoC and OSPP(Open Source Promotion Plan) that bring in university contributors. The SkyWalking team remains focused on building a reliable, performant platform to observe complex distributed systems. We\u0026rsquo;ll continue innovating with features like service mesh monitoring and metric analytics.Your ongoing support, feedback and contributions inspire us!\nThank you for helping SkyWalking reach 22k stars on GitHub! This is just the beginning - we have ambitious plans and can\u0026rsquo;t wait to have you along our journey!\n","title":"Celebrate 22k stars","url":"/blog/2023-06-13-celebrate-22k-stars/"},{"content":"本文演示如何将 Dubbo-Go 应用程序与 SkyWalking Go 集成,并在 SkyWalking UI 中查看结果。\n以前,如果你想要在 SkyWalking 中监控 Golang 应用程序,需要将项目与 go2sky 项目集成,并手动编写各种带有 go2sky 插件的框架。现在,我们有一个全新的项目( Skywalking Go ),允许你将 Golang 项目集成到 SkyWalking 中,几乎不需要编码,同时提供更大的灵活性和可扩展性。\n在本文中,我们将指导你快速将 skywalking-go 项目集成到 dubbo-go 项目中。\n演示包括以下步骤:\n 部署 SkyWalking:这涉及设置 SkyWalking 后端和 UI 程序,使你能够看到最终效果。 使用 SkyWalking Go 编译程序:在这里,你将把 SkyWalking Go Agent 编译到要监控的 Golang 程序中。 应用部署:你将导出环境变量并部署应用程序,以促进你的服务与 SkyWalking 后端之间的通信。 在 SkyWalking UI 上可视化:最后,你将发送请求并在 SkyWalking UI 中观察效果。  部署 SkyWalking 请从官方 SkyWalking 网站下载 SkyWalking APM 程序 。然后执行以下两个命令来启动服务:\n# 启动 OAP 后端 \u0026gt; bin/oapService.sh # 启动 UI \u0026gt; bin/webappService.sh 接下来,你可以访问地址 http://localhost:8080/ 。此时,由于尚未部署任何应用程序,因此你将看不到任何数据。\n使用 SkyWalking GO 编译 Dubbo Go 程序 这里将演示如何将 Dubbo-go 程序与SkyWalking Go Agent集成。请依次执行如下命令来创建一个新的项目:\n# 安装dubbo-go基础环境 \u0026gt; export GOPROXY=\u0026#34;https://goproxy.cn\u0026#34; \u0026gt; go install github.com/dubbogo/dubbogo-cli@latest \u0026gt; dubbogo-cli install all # 创建demo项目 \u0026gt; mkdir demo \u0026amp;\u0026amp; cd demo \u0026gt; dubbogo-cli newDemo . # 升级dubbo-go依赖到最新版本 \u0026gt; go get -u dubbo.apache.org/dubbo-go/v3 在项目的根目录中执行以下命令。此命令将下载 skywalking-go 所需的依赖项:\ngo get github.com/apache/skywalking-go 接下来,请分别在服务端和客户端的main包中引入。包含之后,代码将会更新为:\n// go-server/cmd/server.go package main import ( \u0026#34;context\u0026#34; ) import ( \u0026#34;dubbo.apache.org/dubbo-go/v3/common/logger\u0026#34; \u0026#34;dubbo.apache.org/dubbo-go/v3/config\u0026#34; _ \u0026#34;dubbo.apache.org/dubbo-go/v3/imports\u0026#34; \u0026#34;helloworld/api\u0026#34; // 引入skywalking-go \t_ \u0026#34;github.com/apache/skywalking-go\u0026#34; ) type GreeterProvider struct { api.UnimplementedGreeterServer } func (s *GreeterProvider) SayHello(ctx context.Context, in *api.HelloRequest) (*api.User, error) { logger.Infof(\u0026#34;Dubbo3 GreeterProvider get user name = %s\\n\u0026#34;, in.Name) return \u0026amp;api.User{Name: \u0026#34;Hello \u0026#34; + in.Name, Id: \u0026#34;12345\u0026#34;, Age: 21}, nil } // export DUBBO_GO_CONFIG_PATH= PATH_TO_SAMPLES/helloworld/go-server/conf/dubbogo.yaml func main() { config.SetProviderService(\u0026amp;GreeterProvider{}) if err := config.Load(); err != nil { panic(err) } select {} } 在客户端代码中除了需要引入skywalking-go之外,还需要在main方法中的最后一行增加主携程等待语句,以防止因为客户端快速关闭而无法将Tracing数据异步发送到SkyWalking后端:\npackage main import ( \u0026#34;context\u0026#34; ) import ( \u0026#34;dubbo.apache.org/dubbo-go/v3/common/logger\u0026#34; \u0026#34;dubbo.apache.org/dubbo-go/v3/config\u0026#34; _ \u0026#34;dubbo.apache.org/dubbo-go/v3/imports\u0026#34; \u0026#34;helloworld/api\u0026#34; // 引入skywalking-go \t_ \u0026#34;github.com/apache/skywalking-go\u0026#34; ) var grpcGreeterImpl = new(api.GreeterClientImpl) // export DUBBO_GO_CONFIG_PATH= PATH_TO_SAMPLES/helloworld/go-client/conf/dubbogo.yaml func main() { config.SetConsumerService(grpcGreeterImpl) if err := config.Load(); err != nil { panic(err) } logger.Info(\u0026#34;start to test dubbo\u0026#34;) req := \u0026amp;api.HelloRequest{ Name: \u0026#34;laurence\u0026#34;, } reply, err := grpcGreeterImpl.SayHello(context.Background(), req) if err != nil { logger.Error(err) } logger.Infof(\u0026#34;client response result: %v\\n\u0026#34;, reply) // 增加主携程等待语句 \tselect {} } 接下来,请从官方 SkyWalking 网站下载 Go Agent 程序 。当你使用 go build 命令进行编译时,请在 bin 目录中找到与当前操作系统匹配的代理程序,并添加 -toolexec=\u0026quot;/path/to/go-agent -a 参数。例如,请使用以下命令:\n# 进入项目主目录 \u0026gt; cd demo # 分别编译服务端和客户端 # -toolexec 参数定义为go-agent的路径 # -a 参数用于强制重新编译所有依赖项 \u0026gt; cd go-server \u0026amp;\u0026amp; go build -toolexec=\u0026#34;/path/to/go-agent\u0026#34; -a -o go-server cmd/server.go \u0026amp;\u0026amp; cd .. \u0026gt; cd go-client \u0026amp;\u0026amp; go build -toolexec=\u0026#34;/path/to/go-agent\u0026#34; -a -o go-client cmd/client.go \u0026amp;\u0026amp; cd .. 应用部署 在开始部署应用程序之前,你可以通过环境变量更改 SkyWalking 中当前应用程序的服务名称。你还可以更改其配置,例如服务器端的地址。有关详细信息,请参阅文档 。\n在这里,我们分别启动两个终端窗口来分别启动服务端和客户端。\n在服务端,将服务的名称更改为dubbo-server:\n# 导出dubbo-go服务端配置文件路径 export DUBBO_GO_CONFIG_PATH=/path/to/demo/go-server/conf/dubbogo.yaml # 导出skywalking-go的服务名称 export SW_AGENT_NAME=dubbo-server ./go-server/go-server 在客户端,将服务的名称更改为dubbo-client:\n# 导出dubbo-go客户端配置文件路径 export DUBBO_GO_CONFIG_PATH=/path/to/demo/go-client/conf/dubbogo.yaml # 导出skywalking-go的服务名称 export SW_AGENT_NAME=dubbo-client ./go-client/go-client 在 SkyWalking UI 上可视化 现在,由于客户端会自动像服务器端发送请求,现在就可以在 SkyWalking UI 中观察结果。\n几秒钟后,重新访问 http://localhost:8080 的 SkyWalking UI。能够在主页上看到部署的 dubbo-server 和 dubbo-client 服务。\n此外,在追踪页面上,可以看到刚刚发送的请求。\n并可以在拓扑图页面中看到服务之间的关系。\n总结 在本文中,我们指导你快速开发dubbo-go服务,并将其与 SkyWalking Go Agent 集成。这个过程也适用于你自己的任意 Golang 服务。最终,可以在 SkyWalking 服务中查看显示效果。如果你有兴趣了解 SkyWalking Go 代理当前支持的框架,请参阅此文档 。\n将来,我们将继续扩展 SkyWalking Go 的功能,添加更多插件支持。所以,请继续关注!\n","title":"使用SkyWalking go agent快速实现Dubbo Go监控","url":"/zh/2023-06-05-quick-start-using-skywalking-go-monitoring-dubbo-go/"},{"content":"SkyWalking Go 0.1.0 is released. Go to downloads page to find release tars.\nFeatures  Initialize the agent core and user import library. Support gRPC reporter for management, tracing protocols. Automatic detect the log frameworks and inject the log context.  Plugins  Support Gin framework. Support Native HTTP server and client framework. Support Go Restful v3 framework. Support Dubbo server and client framework. Support Kratos v2 server and client framework. Support Go-Micro v4 server and client framework. Support GORM v2 database client framework.  Support MySQL Driver detection.    Documentation  Initialize the documentation.  Issues and PR  All issues are here All and pull requests are here  ","title":"Release Apache SkyWalking Go 0.1.0","url":"/events/release-apache-skwaylking-go-0.1.0/"},{"content":"SkyWalking Java Agent 8.16.0 is released. Go to downloads page to find release tars. Changes by Version\n8.16.0  Exclude synthetic methods for the WitnessMethod mechanism Support ForkJoinPool trace Support clickhouse-jdbc-plugin trace sql parameters Support monitor jetty server work thread pool metric Support Jersey REST framework Fix ClassCastException when SQLServer inserts data [Chore] Exclude org.checkerframework:checker-qual and com.google.j2objc:j2objc-annotations [Chore] Exclude proto files in the generated jar Fix Jedis-2.x plugin can not get host info in jedis 3.3.x+ Change the classloader to locate the agent path in AgentPackagePath, from SystemClassLoader to AgentPackagePath\u0026rsquo;s loader. Support Grizzly Trace Fix possible IllegalStateException when using Micrometer. Support Grizzly Work ThreadPool Metric Monitor Fix the gson dependency in the kafka-reporter-plugin. Fix deserialization of kafka producer json config in the kafka-reporter-plugin. Support to config custom decode methods for kafka configurations  All issues and pull requests are here\n","title":"Release Apache SkyWalking Java Agent 8.16.0","url":"/events/release-apache-skywalking-java-agent-8-16-0/"},{"content":"Background Previously, if you wanted to monitor a Golang application in SkyWalking, you would integrate your project with the go2sky project and manually write various frameworks with go2sky plugins. Now, we have a brand-new project (Skywalking Go) that allows you to integrate your Golang projects into SkyWalking with almost zero coding, while offering greater flexibility and scalability.\nIn this article, we will guide you quickly integrating the skywalking-go project into your Golang project.\nQuick start This demonstration will consist of the following steps:\n Deploy SkyWalking: This involves setting up the SkyWalking backend and UI programs, enabling you to see the final effect. Compile Golang with SkyWalking Go: Here, you\u0026rsquo;ll compile the SkyWalking Go Agent into the Golang program you wish to monitor. Application Deployment: You\u0026rsquo;ll export environment variables and deploy the application to facilitate communication between your service and the SkyWalking backend. Visualization on SkyWalking UI: Finally, you\u0026rsquo;ll send requests and observe the effects within the SkyWalking UI.  Deploy SkyWalking Please download the SkyWalking APM program from the official SkyWalking website. Then execute the following two commands to start the service:\n# startup the OAP backend \u0026gt; bin/oapService.sh # startup the UI \u0026gt; bin/webappService.sh Next, you can access the address at http://localhost:8080/. At this point, as no applications have been deployed yet, you will not see any data.\nCompile Golang with SkyWalking GO Here is a simple business application here that starts an HTTP service.\npackage main import \u0026#34;net/http\u0026#34; func main() { http.HandleFunc(\u0026#34;/hello\u0026#34;, func(writer http.ResponseWriter, request *http.Request) { writer.Write([]byte(\u0026#34;Hello World\u0026#34;)) }) err := http.ListenAndServe(\u0026#34;:8000\u0026#34;, nil) if err != nil { panic(err) } } Execute the following command in the project\u0026rsquo;s root directory. This command will download the dependencies required for skywalking-go:\ngo get github.com/apache/skywalking-go Also, include it in the main package of the project. After the inclusion, the code will update to:\npackage main import ( \u0026#34;net/http\u0026#34; // This is an important step. DON\u0026#39;T MISS IT. \t_ \u0026#34;github.com/apache/skywalking-go\u0026#34; ) func main() { http.HandleFunc(\u0026#34;/hello\u0026#34;, func(writer http.ResponseWriter, request *http.Request) { writer.Write([]byte(\u0026#34;Hello World\u0026#34;)) }) err := http.ListenAndServe(\u0026#34;:8000\u0026#34;, nil) if err != nil { panic(err) } } Next, please download the Go Agent program from the official SkyWalking website. When you compile with the go build command, find the agent program that matches your current operating system in the bin directory, and add the -toolexec=\u0026quot;/path/to/go-agent -a parameter. For example, use the following command:\n# Build application with SkyWalking go agent # -toolexec parameter define the path of go-agent # -a parameter is used to force rebuild all packages \u0026gt; go build -toolexec=\u0026#34;/path/to/go-agent\u0026#34; -a -o test . Application Deployment Before you start to deploy the application, you can change the service name of the current application in SkyWalking through environment variables. You can also change its configuration such as the address with the server-side. For specific details, please refer to the documentation.\nHere, we\u0026rsquo;re just changing the name of the current service to demo.\n# Change the service name \u0026gt; export SW_AGENT_NAME=demo Next, you can start the application:\n# Start the application \u0026gt; ./test Visualization on SkyWalking UI Now, you can send a request to the application and observe the results in the SkyWalking UI.\n# Send a request \u0026gt; curl http://localhost:8000/hello After a few seconds, you can revisit the SkyWalking UI at http://localhost:8080. You will be able to see the demo service you deployed on the homepage.\nMoreover, on the Trace page, you can see the request you just sent.\nConclusion In this article, we\u0026rsquo;ve guided you to quickly develop a demo service and integrate it with SkyWalking Go Agent. This process is also applicable to your own Golang services. Ultimately, you can view the display effect in the SkyWalking service. If you\u0026rsquo;re interested in learning which frameworks the SkyWalking Go agent currently supports, please refer to this documentation.\nIn the future, we will continue to expand the functionality of SkyWalking Go, adding more plugin support. So, stay tuned!\n","title":"Quick start with SkyWalking Go Agent","url":"/blog/2023-06-01-quick-start-with-skywalking-go-agent/"},{"content":"本文演示如何将应用程序与 SkyWalking Go 集成,并在 SkyWalking UI 中查看结果。\n以前,如果你想要在 SkyWalking 中监控 Golang 应用程序,需要将项目与 go2sky 项目集成,并手动编写各种带有 go2sky 插件的框架。现在,我们有一个全新的项目(Skywalking Go ),允许你将 Golang 项目集成到 SkyWalking 中,几乎不需要编码,同时提供更大的灵活性和可扩展性。\n在本文中,我们将指导你快速将 skywalking-go 项目集成到 Golang 项目中。\n演示包括以下步骤:\n 部署 SkyWalking:这涉及设置 SkyWalking 后端和 UI 程序,使你能够看到最终效果。 使用 SkyWalking Go 编译 Golang:在这里,你将把 SkyWalking Go Agent 编译到要监控的 Golang 程序中。 应用部署:你将导出环境变量并部署应用程序,以促进你的服务与 SkyWalking 后端之间的通信。 在 SkyWalking UI 上可视化:最后,你将发送请求并在 SkyWalking UI 中观察效果。  部署 SkyWalking 请从官方 SkyWalking 网站下载 SkyWalking APM 程序 。然后执行以下两个命令来启动服务:\n# 启动 OAP 后端 \u0026gt; bin/oapService.sh # 启动 UI \u0026gt; bin/webappService.sh 接下来,你可以访问地址 http://localhost:8080/ 。此时,由于尚未部署任何应用程序,因此你将看不到任何数据。\n使用 SkyWalking GO 编译 Golang 这里有一个简单的业务应用程序,启动了一个 HTTP 服务。\npackage main import \u0026#34;net/http\u0026#34; func main() { http.HandleFunc(\u0026#34;/hello\u0026#34;, func(writer http.ResponseWriter, request *http.Request) { writer.Write([]byte(\u0026#34;Hello World\u0026#34;)) }) err := http.ListenAndServe(\u0026#34;:8000\u0026#34;, nil) if err != nil { panic(err) } } 在项目的根目录中执行以下命令。此命令将下载 skywalking-go 所需的依赖项:\ngo get github.com/apache/skywalking-go 接下来,请将其包含在项目的 main 包中。包含之后,代码将会更新为:\npackage main import ( \u0026#34;net/http\u0026#34; _ \u0026#34;github.com/apache/skywalking-go\u0026#34; ) func main() { http.HandleFunc(\u0026#34;/hello\u0026#34;, func(writer http.ResponseWriter, request *http.Request) { writer.Write([]byte(\u0026#34;Hello World\u0026#34;)) }) err := http.ListenAndServe(\u0026#34;:8000\u0026#34;, nil) if err != nil { panic(err) } } 接下来,请从官方 SkyWalking 网站下载 Go Agent 程序 。当你使用 go build 命令进行编译时,请在 bin 目录中找到与当前操作系统匹配的代理程序,并添加 -toolexec=\u0026quot;/path/to/go-agent\u0026quot; -a 参数。例如,请使用以下命令:\ngo build -toolexec=\u0026#34;/path/to/go-agent\u0026#34; -a -o test . 应用部署 在开始部署应用程序之前,你可以通过环境变量更改 SkyWalking 中当前应用程序的服务名称。你还可以更改其配置,例如服务器端的地址。有关详细信息,请参阅文档 。\n在这里,我们只是将当前服务的名称更改为 demo。\n接下来,你可以启动应用程序:\nexport SW_AGENT_NAME=demo ./test 在 SkyWalking UI 上可视化 现在,向应用程序发送请求并在 SkyWalking UI 中观察结果。\n几秒钟后,重新访问 http://localhost:8080 的 SkyWalking UI。能够在主页上看到部署的 demo 服务。\n此外,在追踪页面上,可以看到刚刚发送的请求。\n总结 在本文中,我们指导你快速开发 demo 服务,并将其与 SkyWalking Go Agent 集成。这个过程也适用于你自己的 Golang 服务。最终,可以在 SkyWalking 服务中查看显示效果。如果你有兴趣了解 SkyWalking Go 代理当前支持的框架,请参阅此文档 。\n将来,我们将继续扩展 SkyWalking Go 的功能,添加更多插件支持。所以,请继续关注!\n","title":"SkyWalking Go Agent 快速开始指南","url":"/zh/2023-06-01-quick-start-with-skywalking-go-agent/"},{"content":"SkyWalking Rust 0.7.0 is released. Go to downloads page to find release tars.\nWhat\u0026rsquo;s Changed  Obtain Span object without intermediary. by @jmjoy in https://github.com/apache/skywalking-rust/pull/57 Rename module skywalking_proto to proto. by @jmjoy in https://github.com/apache/skywalking-rust/pull/59 Add Span::prepare_for_async method and AbstractSpan trait. by @jmjoy in https://github.com/apache/skywalking-rust/pull/58 Bump to 0.7.0. by @jmjoy in https://github.com/apache/skywalking-rust/pull/60  ","title":"Release Apache SkyWalking Rust 0.7.0","url":"/events/release-apache-skywalking-rust-0-7-0/"},{"content":"SkyWalking PHP 0.5.0 is released. Go to downloads page to find release tars.\nWhat\u0026rsquo;s Changed  Bump openssl from 0.10.45 to 0.10.48 by @dependabot in https://github.com/apache/skywalking-php/pull/60 Make the SKYWALKING_AGENT_ENABLE work in the request hook as well. by @jmjoy in https://github.com/apache/skywalking-php/pull/61 Support tracing curl_multi_* api. by @jmjoy in https://github.com/apache/skywalking-php/pull/62 Fix parent endpoint and peer in segment ref and tag url in entry span. by @jmjoy in https://github.com/apache/skywalking-php/pull/63 Bump h2 from 0.3.15 to 0.3.17 by @dependabot in https://github.com/apache/skywalking-php/pull/65 Add amqplib plugin for producer. by @jmjoy in https://github.com/apache/skywalking-php/pull/64 Upgrade and adapt phper. by @jmjoy in https://github.com/apache/skywalking-php/pull/66 Refactor script create_package_xml. by @jmjoy in https://github.com/apache/skywalking-php/pull/67 Refactor predis plugin to hook Client. by @jmjoy in https://github.com/apache/skywalking-php/pull/68 Canonicalize unknown. by @jmjoy in https://github.com/apache/skywalking-php/pull/69 Bump guzzlehttp/psr7 from 2.4.0 to 2.5.0 in /tests/php by @dependabot in https://github.com/apache/skywalking-php/pull/70 Enhance support for Swoole. by @jmjoy in https://github.com/apache/skywalking-php/pull/71 Bump to 0.5.0. by @jmjoy in https://github.com/apache/skywalking-php/pull/72  Full Changelog: https://github.com/apache/skywalking-php/compare/v0.4.0...v0.5.0\nPECL https://pecl.php.net/package/skywalking_agent/0.5.0\n","title":"Release Apache SkyWalking PHP 0.5.0","url":"/events/release-apache-skwaylking-php-0-5-0/"},{"content":"SkyWalking Python 1.0.1 is released! Go to downloads page to find release tars.\nPyPI Wheel: https://pypi.org/project/apache-skywalking/1.0.1/\nDockerHub Image: https://hub.docker.com/r/apache/skywalking-python\n  Upgrading from v1.0.0 to v1.0.1 is strongly encouraged\n This is a critical performance-oriented patch to address a CPU surge reported in https://github.com/apache/skywalking/issues/10672    Feature:\n Add a new workflow to push docker images for arm64 and amd64 (#297)    Plugins:\n Optimize loguru reporter plugin.(#302)    Fixes:\n Fix sw8 loss when use aiohttp (#299, issue#10669) Critical: Fix a bug that leads to high cpu usage (#300, issue#10672)    Others:\n Use Kraft mode in E2E Kafka reporter tests (#303)    New Contributors  @Forstwith made their first contribution in https://github.com/apache/skywalking-python/pull/299 @FAWC438 made their first contribution in https://github.com/apache/skywalking-python/pull/300  Full Changelog: https://github.com/apache/skywalking-python/compare/v1.0.0...v1.0.1\n","title":"Release Apache SkyWalking Python 1.0.1","url":"/events/release-apache-skywalking-python-1-0-1/"},{"content":"本次活动于 2023 年 4 月 22 日在北京奥加美术馆酒店举行。该会议旨在探讨和分享有关可观测性的最佳实践, 包括在云原生应用程序和基础架构中实现可观测性的最新技术和工具。与会者将有机会了解行业领袖的最新见解,并与同行们分享经验和知识。 我们期待这次会议能够给云原生社区带来更多的启发和动力,推动我们在可观测性方面的进一步发展。\n圆桌讨论:云原生应用可观测性现状及趋势 B站视频地址\n嘉宾\n 罗广明,主持人 吴晟,Tetrate 创始工程师 向阳,云杉科技研发 VP 乔新亮,原苏宁科技副总裁,现彩食鲜 CTO 董江,中国移动云能力中心高级系统架构专家  为 Apache SkyWalking 构建 Grafana dashboards \u0026ndash; 基于对原生 PromQL 的支持 B站视频地址\n万凯,Tetrate\n  讲师介绍 万凯,Tetrate 工程师,Apache SkyWalking PMC 成员,专注于应用性能可观测性领域。\n  议题概要 本次分享将介绍 Apache SkyWalking 的新特性 PromQL Service,它将为 SkyWalking 带来更广泛的生态集成能力: 什么是 PromQL SkyWalking 的 PromQL Service 是什么,能够做什么 SkyWalking 中的基本概念和 metrics 的特性 如何使用 PromQL Service 使用 PromQL Service 构建 Grafana dashboards 的实践\n  ","title":"[视频] 可观测性峰会2023 - Observability Summit","url":"/zh/2023-04-23-obs-summit-china/"},{"content":"SkyWalking Client JS 0.10.0 is released. Go to downloads page to find release tars.\n Fix the ability of Fetch constructure. Update README. Bump up dependencies.  ","title":"Release Apache SkyWalking Client JS 0.10.0","url":"/events/release-apache-skywalking-client-js-0-10-0/"},{"content":"SkyWalking Java Agent 8.15.0 is released. Go to downloads page to find release tars. Changes by Version\n8.15.0  Enhance lettuce plugin to adopt uniform tags. Expose complete Tracing APIs in the tracing toolkit. Add plugin to trace Spring 6 and Resttemplate 6. Move the baseline to JDK 17 for development, the runtime baseline is still Java 8 compatible. Remove Powermock entirely from the test cases. Fix H2 instrumentation point Refactor pipeline in jedis-plugin. Add plugin to support ClickHouse JDBC driver (0.3.2.*). Refactor kotlin coroutine plugin with CoroutineContext. Fix OracleURLParser ignoring actual port when :SID is absent. Change gRPC instrumentation point to fix plugin not working for server side. Fix servicecomb plugin trace break. Adapt Armeria\u0026rsquo;s plugins to the latest version 1.22.x Fix tomcat-10x-plugin and add test case to support tomcat7.x-8.x-9.x. Fix thrift plugin generate duplicate traceid when sendBase error occurs Support keep trace profiling when cross-thread. Fix unexpected whitespace of the command catalogs in several Redis plugins. Fix a thread leak in SamplingService when updated sampling policy in the runtime. Support MySQL plugin tracing SQL parameters when useServerPrepStmts Update the endpoint name of Undertow plugin to Method:Path. Build a dummy(empty) javadoc of finagle and jdk-http plugins due to incompatibility.  Documentation  Update docs of Tracing APIs, reorganize the API docs into six parts. Correct missing package name in native manual API docs. Add a FAQ doc about \u0026ldquo;How to make SkyWalking agent works in OSGI environment?\u0026rdquo;  All issues and pull requests are here\n","title":"Release Apache SkyWalking Java Agent 8.15.0","url":"/events/release-apache-skywalking-java-agent-8-15-0/"},{"content":"SkyWalking PHP 0.4.0 is released. Go to downloads page to find release tars.\nWhat\u0026rsquo;s Changed  Bump tokio from 1.24.1 to 1.24.2 by @dependabot in https://github.com/apache/skywalking-php/pull/52 Bump to 0.4.0-dev by @heyanlong in https://github.com/apache/skywalking-php/pull/53 Avoid potential panic for logger. by @jmjoy in https://github.com/apache/skywalking-php/pull/54 Fix the curl plugin hook curl_setopt by mistake. by @jmjoy in https://github.com/apache/skywalking-php/pull/55 Update documents. by @jmjoy in https://github.com/apache/skywalking-php/pull/56 Upgrade dependencies and adapt the codes. by @jmjoy in https://github.com/apache/skywalking-php/pull/57 Add sub components licenses in dist material. by @jmjoy in https://github.com/apache/skywalking-php/pull/58 Bump to 0.4.0. by @jmjoy in https://github.com/apache/skywalking-php/pull/59  New Contributors  @dependabot made their first contribution in https://github.com/apache/skywalking-php/pull/52  Full Changelog: https://github.com/apache/skywalking-php/compare/v0.3.0...v0.4.0\nPECL https://pecl.php.net/package/skywalking_agent/0.4.0\n","title":"Release Apache SkyWalking PHP 0.4.0","url":"/events/release-apache-skwaylking-php-0-4-0/"},{"content":"Background As an application performance monitoring tool for distributed systems, Apache SkyWalking provides monitoring, tracing, diagnosing capabilities for distributed system in Cloud Native architecture. Prometheus is an open-source systems monitoring and alerting toolkit with an active ecosystem. Especially Prometheus metrics receive widespread support through exporters and integrations. PromQL as Prometheus Querying Language containing a set of expressions and expose HTTP APIs to read metrics.\nSkyWalking supports to ingest Prometheus metrics through OpenTelemetry collector and through the aggregate calculation of these metrics to provide a variety of systems monitoring, such as Linux Monitoring and Kubernetes monitoring. SkyWalking already provides native UI and GraphQL API for users. But as designed to provide wider ecological integration capabilities, since 9.4.0, it provides PromQL Service, the third-party systems or visualization platforms that already support PromQL (such as Grafana), could obtain metrics through it. SkyWalking users will benefit from it when they integrate with different systems.\nWhat is PromQL Service in SkyWalking? PromQL Service is a query engine on the top of SkyWalking native GraphQL query, with additional query stage calculation capabilities powered by Prometheus expressions. It can accept PromQL HTTP API requests, parse Prometheus expressions, and transform between Prometheus metrics and SkyWalking metrics.\nThe PromQL Service follows all PromQL\u0026rsquo;s protocols and grammar and users can use it as they would with PromQL. As SkyWalking is fundamentally different from Prometheus in terms of metric classification, format, storage, etc. PromQL Service doesn\u0026rsquo;t have to implement the full PromQL feature. Refer to the documentation for the detail.\nSkyWalking Basic Concepts Here are some basic concepts and differences from Prometheus that users need to understand in order to use the PromQL service: Prometheus metrics specify the naming format and structure, the actual metric names and labels are determined by the client provider, and the details are stored. The user aggregates and calculates the metrics using the expression in PromQL. Unlike Prometheus, SkyWalking\u0026rsquo;s metric mechanism is built around the following core concepts with a hierarchical structure:\n Layer: represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), Kubernetes(k8s layer). This layer would be the owner of different services detected from different technologies. All Layers definitions can be found here. Service: Represents a set/group of workloads which provides the same behaviors for incoming requests. Service Instance: An individual workload in the Service group. Endpoint: A path in a service for incoming requests. Process: An operating system process. In some scenarios, a service instance is not a process, such as a pod Kubernetes could contain multiple processes.  The metric name and properties (labels) are configured by the SkyWalking OAP server based on the data source as well as OAL and MAL. SkyWalking provides the ability to down-sampling time series metrics, and generate different time bucket data (minute, hour, day).\nThe SkyWalking metric stream is as follows:\nTraffic  The metadata of the Service/ServiceRelation/Instance/ServiceInstanceRelation/Endpoint/EndpointRelation/Process/ProcessRelation. Include names, layers, properties, relations between them, etc.  Metric  Name: metric name, configuration from OAL and MAL. Entity: represents the metrics' belonging and used for the query. An Entity will contain the following information depending on the Scope: Scope represents the metrics level and in query stage represents the Scope catalog, Scope catalog provides high-dimension classifications for all scopes as a hierarchy structure.     Scope Entity Info     Service Service(include layer info)   ServiceInstance Service, ServiceInstance   Endpoint Service, Endpoint   ServiceRelation Service, DestService   ServiceInstanceRelation ServiceInstance, DestServiceInstance   EndpointRelation Endpoint, DestEndpoint   Process Service, ServiceInstance, Process   ProcessRelation Process, ServiceInstance, DestProcess     Value:   single value: long. labeled value: text, label1,value1|label2,value2|..., such as L2 aggregation,5000 | L1 aggregation,8000.   TimeBucket: the time is accurate to minute, hour, day.  How to use PromQL Service Setup PromQL Service is enabled by default after v9.4.0, so no additional configuration is required. The default ports, for example, can be configured by using OAP environment variables:\nrestHost: ${SW_PROMQL_REST_HOST:0.0.0.0} restPort: ${SW_PROMQL_REST_PORT:9090} restContextPath: ${SW_PROMQL_REST_CONTEXT_PATH:/} restMaxThreads: ${SW_PROMQL_REST_MAX_THREADS:200} restIdleTimeOut: ${SW_PROMQL_REST_IDLE_TIMEOUT:30000} restAcceptQueueSize: ${SW_PROMQL_REST_QUEUE_SIZE:0} Use Prometheus expression PromQL matches metric through the Prometheus expression. Here is a typical Prometheus metric.\nTo match the metric, the Prometheus expression is as follows:\nIn the PromQL Service, these reserved labels would be parsed as the metric name and entity info fields with other labels for the query. The mappings are as follows.\n   SkyWalking Concepts Prometheus expression     Metric name Metric name   Layer Label   Service Label   ServiceInstance Label\u0026lt;service_instance\u0026gt;   Endpoint Label   \u0026hellip; \u0026hellip;    For example, the following expressions are used to match query metrics: service_cpm, service_instance_cpm, endpoint_cpm\nservice_cpm{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;} service_instance_cpm{service=\u0026#39;agent::songs\u0026#39;, service_instance=\u0026#39;agent::songs_instance_1\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;} endpoint_cpm{service=\u0026#39;agent::songs\u0026#39;, endpoint=\u0026#39;GET:/songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;} Typical Query Example At here, we take the SkyWalking Showcase deployment as the playground to demonstrate how to use PromQL for SkyWalking metrics.\nThe following examples can be used to query the metadata and metrics of services through PromQL Service.\nGet metrics names Query:\nhttp://localhost:9099/api/v1/label/__name__/values Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;meter_mysql_instance_qps\u0026#34;, \u0026#34;service_cpm\u0026#34;, \u0026#34;envoy_cluster_up_rq_active\u0026#34;, \u0026#34;instance_jvm_class_loaded_class_count\u0026#34;, \u0026#34;k8s_cluster_memory_requests\u0026#34;, \u0026#34;meter_vm_memory_used\u0026#34;, \u0026#34;meter_apisix_sv_bandwidth_unmatched\u0026#34;, \u0026#34;meter_vm_memory_total\u0026#34;, ... ] } Select a metric and get the labels Query:\nhttp://localhost:9099/api/v1/labels?match[]=service_cpm Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;layer\u0026#34;, \u0026#34;service\u0026#34;, \u0026#34;top_n\u0026#34;, \u0026#34;order\u0026#34; ] } Get services from a specific layer Query:\nhttp://127.0.0.1:9099/api/v1/series?match[]=service_traffic{layer=\u0026#39;GENERAL\u0026#39;}\u0026amp;start=1677479336\u0026amp;end=1677479636 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::recommendation\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::app\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::gateway\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::frontend\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; } ] } Query specific metric for a service Query:\nhttp://127.0.0.1:9099/api/v1/query?query=service_cpm{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;} Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1679559960, \u0026#34;6\u0026#34; ] } ] } } About the range query and different metrics type for query can refer to the document here.\nBuild Grafana Dashboard From the above, we know the mechanism and how to query from PromQL Service, now we can build the Grafana Dashboard for the above service example. Note: All the following configurations are based on Grafana version 9.1.0.\nSkyWalking Showcase provides dashboards files such as services of General and Service Mesh layers, we can quickly create a dashboard for the General layer service by importing the dashboard JSON file.\nAfter the Grafana application is deployed, follow the steps below:\nConfigure Data Source First, we need to create a data source: In the data source config panel, chose Prometheus and set the URL to the OAP server address, the default port is 9090. Here set the data source name SkyWalking in case there are multiple Prometheus data sources.\nImport Dashboard File   Create a dashboard folder named SkyWalking.\n  Import the dashboard file into Grafana, there are two ways to get the file:\n From SkyWalking Showcase. Go to SkyWaking Demo: Preview metrics on Grafana, and export it from the General Service dashboard.    Done! Now we can see the dashboard is working, the services are in the drop-down list and the metrics are displayed on the panels.\n  This is an easy way to build, but we need to know how it works if we want to customize it.\nHow the dashboard works Dashboard Settings Open the Settings-Variables we can see the following variables:\nLet\u0026rsquo;s look at what each variable does:\n  $DS_SkyWalking\nThis is a data source ty variable that specifies the Prometheus data source which was defined earlier as SkyWalking.\n  $layer\nThis is a constant type because in the \u0026lsquo;General Service\u0026rsquo; dashboard, all services belong to the \u0026lsquo;GENERAL\u0026rsquo; layer, so they can be used directly in each query Note When you customize other layers, this value must be defined in the Layer mentioned above.\n  $service\nQuery type variable, to get all service names under this layer for the drop-down list.\nQuery expression:\nlabel_values(service_traffic{layer=\u0026#39;$layer\u0026#39;}, service) The query expression will query HTTP API /api/v1/series for service metadata in $layer and fetch the service name according to the label(service).\n  $service_instance\nSame as the $service is a query variable that is used to select all instances of the service in the drop-down list.\nQuery expression:\nlabel_values(instance_traffic{layer=\u0026#39;$layer\u0026#39;, service=\u0026#39;$service\u0026#39;}, service_instance) The query expression here not only specifies the $layer but also contains the variable $service, which is used to correlate with the services for the drop-down list.\n  $endpoint\nSame as the $service is a query variable that is used to select all endpoints of the service in the drop-down list.\nQuery expression:\nlabel_values(endpoint_traffic{layer=\u0026#39;$layer\u0026#39;, service=\u0026#39;$service\u0026#39;, keyword=\u0026#39;$endpoint_keyword\u0026#39;, limit=\u0026#39;$endpoint_limit\u0026#39;}, endpoint) The query expression here specifies the $layer and $service which are used to correlate with the services for the drop-down list. And also accept variables $endpoint_keyword and $endpoint_limit as filtering condition.\n  $endpoint_keyword\nA text type variable that the user can input to filter the return value of $endpoint.\n  $endpoint_limit\nCustom type, which the user can select to limit the maximum number of returned endpoints.\n  Panel Configurations There are several typical metrics panels on this dashboard, let\u0026rsquo;s see how it\u0026rsquo;s configured.\nCommon Value Metrics Select Time series chart panel Service Apdex and click edit.  Query expression service_apdex{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} / 10000 The metric scope is Service, add labels service and layer for the match, and the label value used the variables configured above. The calculation Divided by 10000 is used for matching the result units. The document for the query can refer to here.\n Set Query options --\u0026gt; Min interval = 1m, because the metrics min time bucket in SkyWalking is 1m. Set Connect null values --\u0026gt; Always and Show points --\u0026gt; Always because when the query interval \u0026gt; 1 hour or 1 day SkyWalking returns the hour/day step metrics values.  Labeled Value Metrics Select Time series chart panel Service Response Time Percentile and click edit.  Query expression service_percentile{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;, labels=\u0026#39;0,1,2,3,4\u0026#39;, relabels=\u0026#39;P50,P75,P90,P95,P99\u0026#39;} The metric scope is Service, add labels service and layer for the match, and the label value used the variables configured above. Add labels='0,1,2,3,4' filter the result label, and addrelabels='P50,P75,P90,P95,P99' rename the result label. The document for the query can refer to here.\n Set Query options --\u0026gt; Min interval = 1m, because the metrics min time bucket in SkyWalking is 1m. Set Connect null values --\u0026gt; Always and Show points --\u0026gt; Always because when the query interval \u0026gt; 1 hour or 1 day SkyWalking returns the hour/day step metrics values. Set Legend to {{label}} for show up.  Sort Metrics Select Time series chart panel Service Response Time Percentile and click edit.  Query expression service_instance_cpm{parent_service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;, top_n=\u0026#39;10\u0026#39;, order=\u0026#39;DES\u0026#39;} The expression is used for query the sore metrics under service, so add labels parent_service and layer for the match. Add top_n='10' and order='DES' filter the result. The document for the query can refer to here.\n Set Query options --\u0026gt; Min interval = 1m, because the metrics min time bucket in SkyWalking is 1m. Set the Calculation --\u0026gt; Latest*. Set Legend to {{service_instance}} for show up.  Conclusion In this article, we introduced what is the PromQL Service in SkyWalking and its background. Detailed how to use PromQL Service and the basic concepts related to SkyWalking, and show how to use PromQL Service to build Grafana dashboards for SkyWalking.\nIn the future, there will be more integrations by leveraging this protocol, such as CI/CD, HPA (scaling), etc.\n","title":"Build Grafana dashboards for Apache SkyWalking -- Native PromQL Support","url":"/blog/2023-03-17-build-grafana-dashboards-for-apache-skywalking-native-promql-support/"},{"content":"背景 Apache SkyWalking 作为分布式系统的应用性能监控工具,提供了对云原生架构下的分布式系统的监控、跟踪、诊断能力。Prometheus 是一个开源系统监控和警报工具包,具有活跃的生态系统。特别是 Prometheus 指标通过 导出器和集成 得到广泛支持。 PromQL 作为 Prometheus 查询语言,包含一组表达式并公开 HTTP API 以读取指标。\nSkyWalking 支持通过 OpenTelemetry 收集器 摄取 Prometheus 指标,并通过这些指标的聚合计算提供多种系统监控,例如 Linux 监控和 Kubernetes 监控。SkyWalking 已经为用户提供了 原生 UI 和 GraphQL API。但为了提供更广泛的生态整合能力,从 9.4.0 开始,它提供了 PromQL 服务,已经支持 PromQL 的第三方系统或可视化平台(如 Grafana),可以通过它获取指标。SkyWalking 用户在与不同系统集成时将从中受益。\nSkyWalking 中的 PromQL 服务是什么? PromQL 服务是 SkyWalking 原生 GraphQL 查询之上的查询引擎,具有由 Prometheus 表达式提供支持的附加查询阶段计算能力。它可以接受 PromQL HTTP API 请求,解析 Prometheus 表达式,并在 Prometheus 指标和 SkyWalking 指标之间进行转换。\nPromQL 服务遵循 PromQL 的所有协议和语法,用户可以像使用 PromQL 一样使用它。由于 SkyWalking 在度量分类、格式、存储等方面与 Prometheus 有根本不同,因此 PromQL 服务不必实现完整的 PromQL 功能。有关详细信息,请参阅文档。\nSkyWalking 基本概念 以下是用户使用 PromQL 服务需要了解的一些基本概念和与 Prometheus 的区别: Prometheus 指标指定命名格式和结构,实际指标名称和标签由客户端提供商确定,并存储详细信息。用户使用 PromQL 中的表达式聚合和计算指标。与 Prometheus 不同,SkyWalking 的度量机制是围绕以下具有层次结构的核心概念构建的:\n  层(Layer):表示计算机科学中的一个抽象框架,如 Operating System(OS_LINUX 层)、Kubernetes(k8s 层)。该层将是从不同技术检测到的不同服务的所有者。可以在此处\n找到所有层定义。\n  服务:表示一组 / 一组工作负载,它为传入请求提供相同的行为。\n  服务实例:服务组中的单个工作负载。\n  端点:传入请求的服务路径。\n  进程:操作系统进程。在某些场景下,service instance 不是一个进程,比如一个 Kubernetes Pod 可能包含多个进程。\n  Metric 名称和属性(标签)由 SkyWalking OAP 服务器根据数据源以及 OAL 和 MAL 配置。SkyWalking 提供了对时间序列指标进行下采样(down-sampling),并生成不同时间段数据(分钟、小时、天)的能力。\nSkyWalking 指标流如下:\n流量  Service/ServiceRelation/Instance/ServiceInstanceRelation/Endpoint/EndpointRelation/Process/ProcessRelation 的元数据。包括名称、层、属性、它们之间的关系等。  指标  名称(Name):指标名称,来自 OAL 和 MAL 的配置。 实体(Entity):表示指标的归属,用于查询。一个 Entity 根据 Scope 不同会包含如下信息: Scope 代表指标级别,在查询阶段代表 Scope catalog,Scope catalog 为所有的 scope 提供了高维的分类,层次结构。     Scope 实体信息     Service 服务(包括图层信息)   ServiceInstance 服务、服务实例   Endpoint 服务、端点   ServiceRelation 服务,目标服务   ServiceInstanceRelation 服务实例、目标服务实例   EndpointRelation 端点、目标端点   Process 服务、服务实例、流程   ProcessRelation 进程、服务实例、DestProcess     值:   单值:long 标签值:文本,label1,value1|label2,value2|... ,例如 L2 aggregation,5000 | L1 aggregation,8000   TimeBucket:时间精确到分钟、小时、天  如何使用 PromQL 服务 设置 PromQL 服务在 v9.4.0 之后默认开启,不需要额外配置。例如,可以使用 OAP 环境变量配置默认端口:\nrestHost: ${SW_PROMQL_REST_HOST:0.0.0.0} restPort: ${SW_PROMQL_REST_PORT:9090} restContextPath: ${SW_PROMQL_REST_CONTEXT_PATH:/} restMaxThreads: ${SW_PROMQL_REST_MAX_THREADS:200} restIdleTimeOut: ${SW_PROMQL_REST_IDLE_TIMEOUT:30000} restAcceptQueueSize: ${SW_PROMQL_REST_QUEUE_SIZE:0} 使用 Prometheus 表达式 PromQL 通过 Prometheus 表达式匹配指标。这是一个典型的 Prometheus 指标。\n为了匹配指标,Prometheus 表达式如下:\n在 PromQL 服务中,这些保留的标签将被解析为度量名称和实体信息字段以及用于查询的其他标签。映射如下。\n   SkyWalking 概念 Prometheus 表达     指标名称 指标名称   层 标签   服务 标签   服务实例 标签 \u0026lt;服务实例\u0026gt;   端点 标签   …… ……    例如,以下表达式用于匹配查询指标:service_cpm、service_instance_cpm、endpoint_cpm\nservice_cpm {service='agent::songs', layer='GENERAL'} service_instance_cpm {service='agent::songs', service_instance='agent::songs_instance_1', layer='GENERAL'} endpoint_cpm {service='agent::songs', endpoint='GET:/songs', layer='GENERAL'} 典型查询示例 在这里,我们将 SkyWalking Showcase 部署作为 Playground 来演示如何使用 PromQL 获取 SkyWalking 指标。\n以下示例可用于通过 PromQL 服务查询服务的元数据和指标。\n获取指标名称 查询:\nhttp://localhost:9099/api/v1/label/__name__/values 结果:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;meter_mysql_instance_qps\u0026#34;, \u0026#34;service_cpm\u0026#34;, \u0026#34;envoy_cluster_up_rq_active\u0026#34;, \u0026#34;instance_jvm_class_loaded_class_count\u0026#34;, \u0026#34;k8s_cluster_memory_requests\u0026#34;, \u0026#34;meter_vm_memory_used\u0026#34;, \u0026#34;meter_apisix_sv_bandwidth_unmatched\u0026#34;, \u0026#34;meter_vm_memory_total\u0026#34;, ... ] } 选择一个指标并获取标签 查询:\nhttp://localhost:9099/api/v1/labels?match []=service_cpm 结果:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;layer\u0026#34;, \u0026#34;service\u0026#34;, \u0026#34;top_n\u0026#34;, \u0026#34;order\u0026#34; ] } 从特定层获取服务 查询:\nhttp://127.0.0.1:9099/api/v1/series?match []=service_traffic {layer='GENERAL'}\u0026amp;start=1677479336\u0026amp;end=1677479636 结果:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ {\u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, {\u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::recommendation\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, {\u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::app\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, {\u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::gateway\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, {\u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::frontend\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; } ] } 查询服务的特定指标 查询:\nhttp://127.0.0.1:9099/api/v1/query?query=service_cpm {service='agent::songs', layer='GENERAL'} 结果:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ {\u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; },\u0026#34;value\u0026#34;: [ 1679559960, \u0026#34;6\u0026#34; ] } ] } } 关于range query和不同的metrics type for query 可以参考 这里的 文档。\n构建 Grafana Dashboard 从上面我们知道了 PromQL 服务的机制和查询方式,现在我们可以为上面的服务示例构建 Grafana Dashboard。注:以下所有配置均基于 Grafana 9.1.0 版本。\nSkyWalking Showcase 提供了 General Service 和 Service Mesh 层等 Dashboard 文件,我们可以通过导入 Dashboard JSON 文件快速为层服务创建 Dashboard。\n部署 Grafana 应用程序后,请按照以下步骤操作:\n配置数据源 首先,我们需要创建一个数据源: 在数据源配置面板中,选择 Prometheus 并设置 URL 为 OAP 服务器地址,默认端口为 9090。 SkyWalking 如果有多个 Prometheus 数据源,请在此处设置数据源名称。\n导入 Dashboard 文件   创建一个名为 SkyWalking 的 Dashboard 文件夹。\n  将 Dashboard 文件导入到 Grafana 中,有两种获取文件的方式:\n 来自 SkyWalking Showcase 转到 SkyWaking Demo:在 Grafana 上预览指标,并将其从 General Service Dashboard 导出。    完毕!现在我们可以看到 Dashboard 正在运行,服务位于下拉列表中,指标显示在面板上。\n  这是一种简单的构建方式,但是如果我们想要自定义它,我们需要知道它是如何工作的。\nDashboard 的工作原理 Dashboard 设置 打开 Settings-Variables 我们可以看到如下变量:\n让我们看看每个变量的作用:\n  $DS_SkyWalking\n这是一个数据源 ty 变量,它指定了之前定义为 SkyWalking 的 Prometheus 数据源。\n  $layer\n这是一个常量类型,因为在 \u0026lsquo;General Service\u0026rsquo; Dashboard 中,所有服务都属于 \u0026lsquo;GENERAL\u0026rsquo; 层,因此可以在每个查询中直接使用它们。注意,当您自定义其他层时,必须在 Layer 上面定义该值。\n  $service\n查询类型变量,为下拉列表获取该层下的所有服务名称。\n查询表达式:\nlabel_values (service_traffic {layer='$layer'}, service) 查询表达式将查询 HTTP API /api/v1/series,以获取 $layer 中服务元数据,并根据标签(服务)提取服务名称。\n  $service_instance\n与 $service 一样,是一个查询变量,用于在下拉列表中选择服务的所有实例。\n查询表达式:\nlabel_values (instance_traffic {layer='$layer', service='$service'}, service_instance) 这里的查询表达式不仅指定了 $layer 还包含 $service 变量,用于关联下拉列表的服务。\n  $endpoint\n与 $service 一样,是一个查询变量,用于在下拉列表中选择服务的所有端点。\n查询表达式:\nlabel_values (endpoint_traffic {layer='$layer', service='$service', keyword='$endpoint_keyword', limit='$endpoint_limit'}, endpoint) 此处的查询表达式指定 $layer 和 $service 用于与下拉列表的服务相关联的。并且还接受 $endpoint_keyword 和 $endpoint_limit 变量作为过滤条件。\n  $endpoint_keyword\n一个文本类型的变量,用户可以输入它来过滤 $endpoint 的返回值。\n  $endpoint_limit\n自定义类型,用户可以选择它以限制返回端点的最大数量。\n  Dashboard 配置 这个 Dashboard 上有几个典型的指标面板,让我们看看它是如何配置的。\n普通值指标 选择 Time series chart 面板 Service Apdex 并单击 edit。\n  查询表达式\nservice_apdex {service='$service', layer='$layer'} / 10000 指标范围为 Service,添加 service 和 layer 标签用于匹配,label 值使用上面配置的变量。该计算 Divided by 10000 用于匹配结果单位。查询文档可以参考 这里。\n  设置 Query options --\u0026gt; Min interval = 1m,因为 SkyWalking 中的指标最小时间段是 1m。\n  设置 Connect null values --\u0026gt; AlwaysShow points --\u0026gt; Always,因为当查询间隔大于 1 小时或 1 天时,SkyWalking 返回小时 / 天步长指标值。\n  标签值指标 选择 Time series chart 面板 Service Response Time Percentile 并单击 edit。\n  查询表达式\nservice_percentile {service='$service', layer='$layer', labels='0,1,2,3,4', relabels='P50,P75,P90,P95,P99'} 指标范围为 Service,添加 service 和 layer 标签用于匹配,label 值使用上面配置的变量。添加 labels='0,1,2,3,4' 过滤结果标签,并添加 relabels='P50,P75,P90,P95,P99' 重命名结果标签。查询文档可以参考 这里。\n  设置 Query options --\u0026gt; Min interval = 1m,因为 SkyWalking 中的指标最小时间段是 1m。\n  设置 Connect null values --\u0026gt; AlwaysShow points --\u0026gt; Always,因为当查询间隔 \u0026gt; 1 小时或 1 天时,SkyWalking 返回小时 / 天步长指标值。\n  设置 Legend 为 {{label}} 来展示。\n  排序指标 选择 Time series chart 面板 Service Response Time Percentile 并单击 edit。\n  查询表达式\nservice_instance_cpm {parent_service='$service', layer='$layer', top_n='10', order='DES'} 该表达式用于查询服务下的排序指标,因此添加标签 parent_service 和 layer 进行匹配。添加 top_n='10' 和 order='DES' 过滤结果。查询文档可以参考 这里。\n  设置 Query options --\u0026gt; Min interval = 1m,因为 SkyWalking 中的指标最小时间段是 1m。\n  设置 Calculation --\u0026gt; Latest*。\n  设置 Legend 为 {{service_instance}} 来展示。\n  结论 在这篇文章中,我们介绍了 SkyWalking 中的 PromQL 服务是什么以及它的背景。详细介绍了 PromQL 服务的使用方法和 SkyWalking 相关的基本概念,展示了如何使用 PromQL 服务为 SkyWalking 构建 Grafana Dashboard。\n未来,将会有更多的集成利用这个协议,比如 CI/CD、HPA(缩放)等。\n","title":"为 Apache SkyWalking 构建 Grafana Dashboard —— 原生 PromQL 支持","url":"/zh/2023-03-17-build-grafana-dashboards-for-apache-skywalking-native-promql-support/"},{"content":"Background Apache SkyWalking is an open-source application performance management system that helps users collect and aggregate logs, traces, metrics, and events, and display them on the UI. Starting from OAP 9.4.0, SkyWalking has added AWS Firehose receiver, which is used to receive and calculate the data of CloudWatch metrics. In this article, we will take DynamoDB as an example to show how to use SkyWalking to receive and calculate CloudWatch metrics data for monitoring Amazon Web Services.\nWhat are Amazon CloudWatch and Amazon Kinesis Data Firehose? Amazon CloudWatch is a metrics repository, this tool can collect raw data from AWS (e.g. DynamoDB) and process it into readable metrics in near real-time. Also, we can use Metric Stream to continuously stream CloudWatch metrics to a selected target location for near real-time delivery and low latency. SkyWalking takes advantage of this feature to create metric streams and direct them to Amazon Kinesis Data Firehose transport streams for further transport processing.\nAmazon Kinesis Data Firehoseis an extract, transform, and load (ETL) service that reliably captures, transforms, and delivers streaming data to data lakes, data stores, and analytics services. SkyWalking takes advantage of this feature to eventually direct the metrics stream to the aws-firehose-receiver for OAP to calculate and ultimately display the metrics.\nThe flow chart is as follows.\nNotice  Due to Kinesis Data Firehose specifications, the URL of the HTTP endpoint must use the HTTPS protocol and must use port 443. Also, this URL must be proxied by Gateway and forwarded to the real aws-firehose-receiver. The TLS certificate must be signed by a CA and the self-signed certificate will not be trusted by Kinesis Data Firehose.  Setting up DynamoDB monitoring Next, let\u0026rsquo;s take DynamoDB as an example to illustrate the necessary settings in aws before using OAP to collect CloudWatch metrics:\n Go to Kinesis Console, create a data stream, and select Direct PUT for Source and HTTP Endpoint for Destination. And set HTTP Endpoint URL to Gateway URL. The rest of the configuration options can be configured as needed.  Go to the CloudWatch Console, select Metrics-Stream in the left control panel, and click Create metric stream. Select AWS/DynamoDB for namespace. Also, you can add other namespaces as needed. Kinesis Data Firehose selects the data stream created in the first step. Finally, set the output format to opentelemetry0.7. The rest of the configuration options can be configured as needed.  At this point, the AWS side of DynamoDB monitoring configuration is set up.\nSkyWalking OAP metrics processing analysis SkyWalking uses aws-firehose-receiver to receive and decode AWS metrics streams forwarded by Gateway, and send it to Opentelemetry-receiver for processing and transforming into SkyWalking metrics. Then, the metrics are analyzed and aggregated by Meter Analysis Language (MAL) and finally presented on the UI.\nThe MAL part and the UI part of SkyWalking support users' customization, to display the metrics data in a more diversified way. For details, please refer to MAL doc and UI doc.\nTypical metrics analysis Scope In SkyWalking, there is the concept of scope. By using scopes, we can classify and aggregate metrics more rationally. In the monitoring of DynamoDB, two of these scopes are used - Service and Endpoint.\nService represents a set of workloads that provide the same behavior for incoming requests. Commonly used as cluster-level scopes for services, user accounts are closer to the concept of clusters in AWS. So SkyWalking uses AWS account id as a key to map AWS accounts to Service types.\nSimilarly, Endpoint represents a logical concept, often used in services for the path of incoming requests, such as HTTP URI path or gRPC service class + method signature, and can also represent the table structure in the database. So SkyWalking maps DynamoDB tables to Endpoint type.\nMetrics    Metric Name Meaning     AccountMaxReads / AccountMaxWrites The maximum number of read/write capacity units that can be used by an account.   AccountMaxTableLevelReads / AccountMaxTableLevelWrites The maximum number of read/write capacity units that can be used by a table or global secondary index of an account.   AccountProvisionedReadCapacityUtilization / AccountProvisionedWriteCapacityUtilization The percentage of provisioned read/write capacity units utilized by an account.   MaxProvisionedTableReadCapacityUtilization / MaxProvisionedTableWriteCapacityUtilization The percentage of provisioned read/write capacity utilized by the highest provisioned read table or global secondary index of an account.    Above are some common account metrics (Serivce scope). They are various configuration information in DynamoDB, and SkyWalking can show a complete picture of the database configuration changes by monitoring these metrics.\n   Metric Name Meaning     ConsumedReadCapacityUnits / ConsumedWriteCapacityUnits The number of read/write capacity units consumed over the specified time period.   ReturnedItemCount The number of items returned by Query, Scan or ExecuteStatement (select) operations during the specified time period.   SuccessfulRequestLatency The latency of successful requests to DynamoDB or Amazon DynamoDB Streams during the specified time period.   TimeToLiveDeletedItemCount The number of items deleted by Time to Live (TTL) during the specified time period.    The above are some common table metrics (Endpoint scope), which will also be aggregated into account metrics. These metrics are generally used to analyze the performance of the database, and users can use them to determine the reasonable level of database configuration. For example, users can track how much of their provisioned throughput is used through ConsumedReadCapicityUnits / ConsumedReadCapicityUnits to determine the reasonableness of the preconfigured throughput of a table or account. For more information about provisioned throughput, see Provisioned Throughput Intro.\n   Metric Name Meaning     UserErrors Requests to DynamoDB or Amazon DynamoDB Streams that generate an HTTP 400 status code during the specified time period.   SystemErrors The requests to DynamoDB or Amazon DynamoDB Streams that generate an HTTP 500 status code during the specified time period.   ThrottledRequests Requests to DynamoDB that exceed the provisioned throughput limits on a resource.   TransactionConflict Rejected item-level requests due to transactional conflicts between concurrent requests on the same items.    The above are some common error metrics, among which UserErrors are account-level metrics and the rest are table-level metrics. Users can set alarms on these metrics, and if warnings appear, then it may indicate that there are some problems with the use of the database, and users need to check and verify by themselves.\nNotice SkyWalking\u0026rsquo;s metrics selection for DynamoDB comes directly from CloudWatch metrics, which can also be found at CloudWatch metrics doc to get metrics details.\nDemo In this section, we will demonstrate how to use terraform to create a DynamoDB table and other AWS services that can generate metrics streams, and deploy Skywalking to complete the metrics collection.\nFirst, you need a running gateway instance, such as NGINX, which is responsible for receiving metrics streams from AWS and forwarding them to the aws-firehose-receiver. Note that the gateway needs to be configured with certificates to accept HTTPS protocol requests.\nBelow is an example configuration for NGINX. The configuration does not need to be identical, as long as it can send incoming HTTPS requests to oap host:12801/aws/firehose/metrics.\nserver { listen 443 ssl; ssl_certificate /crt/test.pem; ssl_certificate_key /crt/test.key; ssl_session_timeout 5m; ssl_ciphers ECDHE-RSA-AES128-GCM-SHA256:ECDHE:ECDH:AES:HIGH:!NULL:!aNULL:!MD5:!ADH:!RC4; ssl_protocols TLSv1 TLSv1.1 TLSv1.2; ssl_prefer_server_ciphers on; location /aws/firehose/metrics { proxy_pass http://test.xyz:12801/aws/firehose/metrics; } } Deploying SkyWalking There are various ways to deploy SkyWalking, and you can get them directly from the release page.\nOf course, if you are more comfortable with Kubernetes, you can also find the appropriate deployment method from SkyWalking-kubernetes.\nPlease note that no matter which deployment method you use, please make sure that the OAP and UI version is 9.4.0 or higher and that port 12801 needs to be open.\nThe following is an example of a deployment using the helm command.\nexport SKYWALKING_RELEASE_VERSION=4.3.0 export SKYWALKING_RELEASE_NAME=skywalking export SKYWALKING_RELEASE_NAMESPACE=default helm install \u0026quot;${SKYWALKING_RELEASE_NAME}\u0026quot; \\ oci://registry-1.docker.io/apache/skywalking-helm \\ --version \u0026quot;${SKYWALKING_RELEASE_VERSION}\u0026quot; \\ -n \u0026quot;${SKYWALKING_RELEASE_NAMESPACE}\u0026quot; \\ --set oap.image.tag=9.4.0 \\ --set oap.storageType=elasticsearch \\ --set ui.image.tag=9.4.0 \\ --set oap.ports.firehose=12801 Start the corresponding AWS service The terraform configuration file is as follows (example modified inTerraform Registry - kinesis_firehose_delivery_stream):\n terraform configuration file  provider \u0026quot;aws\u0026quot; { region = \u0026quot;ap-northeast-1\u0026quot; access_key = \u0026quot;[need change]your access_key\u0026quot; secret_key = \u0026quot;[need change]your secret_key\u0026quot; } resource \u0026quot;aws_dynamodb_table\u0026quot; \u0026quot;basic-dynamodb-table\u0026quot; { name = \u0026quot;GameScores\u0026quot; billing_mode = \u0026quot;PROVISIONED\u0026quot; read_capacity = 20 write_capacity = 20 hash_key = \u0026quot;UserId\u0026quot; range_key = \u0026quot;GameTitle\u0026quot; attribute { name = \u0026quot;UserId\u0026quot; type = \u0026quot;S\u0026quot; } attribute { name = \u0026quot;GameTitle\u0026quot; type = \u0026quot;S\u0026quot; } attribute { name = \u0026quot;TopScore\u0026quot; type = \u0026quot;N\u0026quot; } ttl { attribute_name = \u0026quot;TimeToExist\u0026quot; enabled = true } global_secondary_index { name = \u0026quot;GameTitleIndex\u0026quot; hash_key = \u0026quot;GameTitle\u0026quot; range_key = \u0026quot;TopScore\u0026quot; write_capacity = 10 read_capacity = 10 projection_type = \u0026quot;INCLUDE\u0026quot; non_key_attributes = [\u0026quot;UserId\u0026quot;] } tags = { Name = \u0026quot;dynamodb-table-1\u0026quot; Environment = \u0026quot;production\u0026quot; } } resource \u0026quot;aws_cloudwatch_metric_stream\u0026quot; \u0026quot;main\u0026quot; { name = \u0026quot;my-metric-stream\u0026quot; role_arn = aws_iam_role.metric_stream_to_firehose.arn firehose_arn = aws_kinesis_firehose_delivery_stream.http_stream.arn output_format = \u0026quot;opentelemetry0.7\u0026quot; include_filter { namespace = \u0026quot;AWS/DynamoDB\u0026quot; } } # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-metric-streams-trustpolicy.html data \u0026quot;aws_iam_policy_document\u0026quot; \u0026quot;streams_assume_role\u0026quot; { statement { effect = \u0026quot;Allow\u0026quot; principals { type = \u0026quot;Service\u0026quot; identifiers = [\u0026quot;streams.metrics.cloudwatch.amazonaws.com\u0026quot;] } actions = [\u0026quot;sts:AssumeRole\u0026quot;] } } resource \u0026quot;aws_iam_role\u0026quot; \u0026quot;metric_stream_to_firehose\u0026quot; { name = \u0026quot;metric_stream_to_firehose_role\u0026quot; assume_role_policy = data.aws_iam_policy_document.streams_assume_role.json } # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-metric-streams-trustpolicy.html data \u0026quot;aws_iam_policy_document\u0026quot; \u0026quot;metric_stream_to_firehose\u0026quot; { statement { effect = \u0026quot;Allow\u0026quot; actions = [ \u0026quot;firehose:PutRecord\u0026quot;, \u0026quot;firehose:PutRecordBatch\u0026quot;, ] resources = [aws_kinesis_firehose_delivery_stream.http_stream.arn] } } resource \u0026quot;aws_iam_role_policy\u0026quot; \u0026quot;metric_stream_to_firehose\u0026quot; { name = \u0026quot;default\u0026quot; role = aws_iam_role.metric_stream_to_firehose.id policy = data.aws_iam_policy_document.metric_stream_to_firehose.json } resource \u0026quot;aws_s3_bucket\u0026quot; \u0026quot;bucket\u0026quot; { bucket = \u0026quot;metric-stream-test-bucket\u0026quot; } resource \u0026quot;aws_s3_bucket_acl\u0026quot; \u0026quot;bucket_acl\u0026quot; { bucket = aws_s3_bucket.bucket.id acl = \u0026quot;private\u0026quot; } data \u0026quot;aws_iam_policy_document\u0026quot; \u0026quot;firehose_assume_role\u0026quot; { statement { effect = \u0026quot;Allow\u0026quot; principals { type = \u0026quot;Service\u0026quot; identifiers = [\u0026quot;firehose.amazonaws.com\u0026quot;] } actions = [\u0026quot;sts:AssumeRole\u0026quot;] } } resource \u0026quot;aws_iam_role\u0026quot; \u0026quot;firehose_to_s3\u0026quot; { assume_role_policy = data.aws_iam_policy_document.firehose_assume_role.json } data \u0026quot;aws_iam_policy_document\u0026quot; \u0026quot;firehose_to_s3\u0026quot; { statement { effect = \u0026quot;Allow\u0026quot; actions = [ \u0026quot;s3:AbortMultipartUpload\u0026quot;, \u0026quot;s3:GetBucketLocation\u0026quot;, \u0026quot;s3:GetObject\u0026quot;, \u0026quot;s3:ListBucket\u0026quot;, \u0026quot;s3:ListBucketMultipartUploads\u0026quot;, \u0026quot;s3:PutObject\u0026quot;, ] resources = [ aws_s3_bucket.bucket.arn, \u0026quot;${aws_s3_bucket.bucket.arn}/*\u0026quot;, ] } } resource \u0026quot;aws_iam_role_policy\u0026quot; \u0026quot;firehose_to_s3\u0026quot; { name = \u0026quot;default\u0026quot; role = aws_iam_role.firehose_to_s3.id policy = data.aws_iam_policy_document.firehose_to_s3.json } resource \u0026quot;aws_kinesis_firehose_delivery_stream\u0026quot; \u0026quot;http_stream\u0026quot; { name = \u0026quot;metric-stream-test-stream\u0026quot; destination = \u0026quot;http_endpoint\u0026quot; http_endpoint_configuration { name = \u0026quot;test_http_endpoint\u0026quot; url = \u0026quot;[need change]Gateway url\u0026quot; role_arn = aws_iam_role.firehose_to_s3.arn } s3_configuration { role_arn = aws_iam_role.firehose_to_s3.arn bucket_arn = aws_s3_bucket.bucket.arn } }  Steps to use.\n  Get the access_key and secret_key of the AWS account.( For how to get them, please refer to create-access-key )\n  Fill in the access_key and secret_key you got in the previous step, and fill in the corresponding URL of your gateway in the corresponding location of aws_kinesis_firehose_delivery_stream configuration.\n  Copy the above content and save it to the main.tf file.\n  Execute the following code in the corresponding path.\n  terraform init terraform apply At this point, all the required AWS services have been successfully created, and you can check your console to see if the services were successfully created.\nDone! If all the above steps were successful, please wait for about five minutes. After that, you can visit the SkyWalking UI to see the metrics.\nCurrently, the metrics collected by SkyWalking by default are displayed as follows.\naccount metrics:\ntable metrics:\nOther services Currently, SkyWalking officially supports EKS, S3, DynamoDB monitoring. Users also refer to the OpenTelemetry receiver to configure OTel rules to collect and analyze CloudWatch metrics of other AWS services and display them through a custom dashboard.\nMaterial  Monitoring S3 metrics with Amazon CloudWatch Monitoring DynamoDB metrics with Amazon CloudWatch Supported metrics in AWS Firehose receiver of OAP Configuration Vocabulary | Apache SkyWalking  ","title":"Monitoring DynamoDB with SkyWalking","url":"/blog/2023-03-13-skywalking-aws-dynamodb/"},{"content":"背景 Apache SkyWalking 是一个开源应用性能管理系统,帮助用户收集和聚合日志、追踪、指标和事件,并在 UI 上显示。从 OAP 9.4.0 开始,SkyWalking 新增了 AWS Firehose receiver,用来接收,计算CloudWatch metrics的数据。本文将以DynamoDB为例,展示如何使用 SkyWalking接收并计算 CloudWatch metrics 数据,以监控Amazon Web Services。\n什么是 Amazon CloudWatch 与 Amazon Kinesis Data Firehose ? Amazon CloudWatch 是一个指标存储库, 此工具可从 AWS中 ( 如 DynamoDB ) 收集原始数据,近实时处理为可读取的指标。同时,我们也可以使用指标流持续地将 CloudWatch 指标流式传输到所选的目标位置,实现近实时传送和低延迟。SkyWalking 利用此特性,创建指标流并将其导向 Amazon Kinesis Data Firehose 传输流,并由后者进一步传输处理。\nAmazon Kinesis Data Firehose是一项提取、转换、加载服务,可以将流式处理数据以可靠方式捕获、转换和提供到数据湖、数据存储和分析服务中。SkyWalking利用此特性,将指标流最终导向 aws-firehose-receiver,交由OAP计算并最终展示指标。\n整体过程流程图如下:\n注意  由于 Kinesis Data Firehose 规定,HTTP端点的URL必须使用HTTPS协议,且必须使用443端口。同时,此URL必须由Gateway代理并转发到真正的aws-firehose-receiver。 TLS 证书必须由CA签发的,自签证书不会被 Kinesis Data Firehose 信任。  设置DynamoDB监控 接下来以DynamoDB为例说明使用OAP 收集CloudWatch metrics 前,aws中必要的设置:\n 进入 Kinesis 控制台,创建数据流, Source选择 Direct PUT, Destination 选择 HTTP Endpoint. 并且设置HTTP Endpoint URL 为 Gateway对应URL。 其余配置选项可由需要自行配置。  进入 CloudWatch 控制台,在左侧控制面板中选择Metrics-Stream,点击Create metric stream。其中,namespace 选择 AWS/DynamoDB。同时,根据需要,也可以增加其他命名空间。 Kinesis Data Firehose选择在第一步中创建好的数据流。最后,设置输出格式为opentelemetry0.7。其余配置选项可由需要自行配置。  至此,DynamoDB监控配置的AWS方面设置完成。\nSkyWalking OAP 指标处理分析 SkyWalking 利用 aws-firehose-receiver 接收并解码由Gateway转发来的 AWS 指标流,交由Opentelemetry-receiver进行处理,转化为SkyWalking metrics。并由Meter Analysis Language (MAL)进行指标的分析与聚合,最终呈现在UI上。\n其中 MAL 部分以及 UI 部分,SkyWalking支持用户自由定制,从而更多样性的展示指标数据。详情请参考MAL doc 以及 UI doc。\n典型指标分析 作用域 SkyWalking中,有作用域 ( scope ) 的概念。通过作用域, 我们可以对指标进行更合理的分类与聚合。在对DynamoDB的监控中,使用到了其中两种作用域———Service和Endpoint。\nService表示一组工作负荷,这些工作负荷为传入请求提供相同的行为。常用作服务的集群级别作用域,在AWS中,用户的账户更接近集群的概念。 所以SkyWalking将AWS account id作为key,将AWS账户映射为Service类型。\n同理,Endpoint表示一种逻辑概念,常用于服务中用于传入请求的路径,例如 HTTP URI 路径或 gRPC 服务类 + 方法签名,也可以表示数据库中的表结构。所以SkyWalking将DynamoDB表映射为Endpoint类型。\n指标    指标名称 含义     AccountMaxReads / AccountMaxWrites 账户可以使用的最大 读取/写入 容量单位数。   AccountMaxTableLevelReads / AccountMaxTableLevelWrites 账户的表或全局二级索引可以使用的最大 读取/写入 容量单位数。   AccountProvisionedReadCapacityUtilization / AccountProvisionedWriteCapacityUtilization 账户使用的预置 读取/写入 容量单位百分比。   MaxProvisionedTableReadCapacityUtilization / MaxProvisionedTableWriteCapacityUtilization 账户的最高预调配 读取/写入 表或全局二级索引使用的预调配读取容量单位百分比。    以上为一些常用的账户指标(Serivce 作用域)。它们是DynamoDB中的各种配置信息,SkyWalking通过对这些指标的监控,可以完整的展示出数据库配置的变动情况。\n   指标名称 含义     ConsumedReadCapacityUnits / ConsumedWriteCapacityUnits 指定时间段内占用的 读取/写入 容量单位数   ReturnedItemCount Query、Scan 或 ExecuteStatement(可选择)操作在指定时段内返回的项目数。   SuccessfulRequestLatency 指定时间段内对于 DynamoDB 或 Amazon DynamoDB Streams 的成功请求的延迟。   TimeToLiveDeletedItemCount 指定时间段内按存活时间 (TTL) 删除的项目数。    以上为一些常用的表指标(Endpoint作用域),它们也会被聚合到账户指标中。这些指标一般用于分析数据库的性能,用户可以通过它们判断出数据库配置的合理程度。例如,用户可以通过ConsumedReadCapicityUnits / ConsumedReadCapicityUnits,跟踪预置吞吐量的使用,从而判断表或账户的预制吞吐量的合理性。关于预置吞吐量,请参见读/写容量模式。\n   指标名称 含义     UserErrors 在指定时间段内生成 HTTP 400 状态代码的对 DynamoDB 或 Amazon DynamoDB Streams 的请求。HTTP 400 通常表示客户端错误,如参数组合无效,尝试更新不存在的表或请求签名错误。   SystemErrors 在指定的时间段内生成 HTTP 500 状态代码的对 DynamoDB 或 Amazon DynamoDB Streams 的请求。HTTP 500 通常指示内部服务错误。   ThrottledRequests 超出资源(如表或索引)预置吞吐量限制的 DynamoDB 请求。   TransactionConflict 由于同一项目的并发请求之间的事务性冲突而被拒绝的项目级请求。    以上为一些常用的错误指标,其中UserErrors为用户级别指标,其余为表级别指标。用户可以在这些指标上设置告警,如果警告出现,那么可能说明数据库的使用出现了一些问题,需要用户自行查看验证。\n注意 SkyWalking对于DynamoDB的指标选取直接来源于CloudWatch metrics, 您也可以通过CloudWatch metrics doc来获取指标详细信息。\nDemo 在本节中,我们将演示如何利用terraform创建一个DynamoDB表,以及可以产生指标流的其他AWS服务,并部署Skywalking完成指标收集。\n首先,您需要一个正在运行的网关实例,例如 NGINX,它负责接收AWS传来的指标流并且转发到aws-firehose-receiver。注意, 网关需要配置证书以便接受HTTPS协议的请求。\n下面是一个NGINX的示例配置。配置不要求完全一致,只要能将收到的HTTPS请求发送到oap所在host:12801/aws/firehose/metrics即可。\nserver { listen 443 ssl; ssl_certificate /crt/test.pem; ssl_certificate_key /crt/test.key; ssl_session_timeout 5m; ssl_ciphers ECDHE-RSA-AES128-GCM-SHA256:ECDHE:ECDH:AES:HIGH:!NULL:!aNULL:!MD5:!ADH:!RC4; ssl_protocols TLSv1 TLSv1.1 TLSv1.2; ssl_prefer_server_ciphers on; location /aws/firehose/metrics { proxy_pass http://test.xyz:12801/aws/firehose/metrics; } } 部署SkyWalking SkyWalking的部署方式有很多种,您可以直接从release页面中直接获取。\n当然,如果您更习惯于 Kubernetes,您也可以从SkyWalking-kubernetes找到相应部署方式。\n请注意,无论使用哪种部署方式,请确保OAP和UI的版本为9.4.0以上,并且需要开放12801端口。\n下面是一个使用helm指令部署的示例:\nexport SKYWALKING_RELEASE_VERSION=4.3.0 export SKYWALKING_RELEASE_NAME=skywalking export SKYWALKING_RELEASE_NAMESPACE=default helm install \u0026quot;${SKYWALKING_RELEASE_NAME}\u0026quot; \\ oci://registry-1.docker.io/apache/skywalking-helm \\ --version \u0026quot;${SKYWALKING_RELEASE_VERSION}\u0026quot; \\ -n \u0026quot;${SKYWALKING_RELEASE_NAMESPACE}\u0026quot; \\ --set oap.image.tag=9.4.0 \\ --set oap.storageType=elasticsearch \\ --set ui.image.tag=9.4.0 \\ --set oap.ports.firehose=12801 开启对应AWS服务 terraform 配置文件如下(实例修改于Terraform Registry - kinesis_firehose_delivery_stream):\n terraform 配置文件  provider \u0026quot;aws\u0026quot; { region = \u0026quot;ap-northeast-1\u0026quot; access_key = \u0026quot;在这里填入您的access_key\u0026quot; secret_key = \u0026quot;在这里填入您的secret_key\u0026quot; } resource \u0026quot;aws_dynamodb_table\u0026quot; \u0026quot;basic-dynamodb-table\u0026quot; { name = \u0026quot;GameScores\u0026quot; billing_mode = \u0026quot;PROVISIONED\u0026quot; read_capacity = 20 write_capacity = 20 hash_key = \u0026quot;UserId\u0026quot; range_key = \u0026quot;GameTitle\u0026quot; attribute { name = \u0026quot;UserId\u0026quot; type = \u0026quot;S\u0026quot; } attribute { name = \u0026quot;GameTitle\u0026quot; type = \u0026quot;S\u0026quot; } attribute { name = \u0026quot;TopScore\u0026quot; type = \u0026quot;N\u0026quot; } ttl { attribute_name = \u0026quot;TimeToExist\u0026quot; enabled = true } global_secondary_index { name = \u0026quot;GameTitleIndex\u0026quot; hash_key = \u0026quot;GameTitle\u0026quot; range_key = \u0026quot;TopScore\u0026quot; write_capacity = 10 read_capacity = 10 projection_type = \u0026quot;INCLUDE\u0026quot; non_key_attributes = [\u0026quot;UserId\u0026quot;] } tags = { Name = \u0026quot;dynamodb-table-1\u0026quot; Environment = \u0026quot;production\u0026quot; } } resource \u0026quot;aws_cloudwatch_metric_stream\u0026quot; \u0026quot;main\u0026quot; { name = \u0026quot;my-metric-stream\u0026quot; role_arn = aws_iam_role.metric_stream_to_firehose.arn firehose_arn = aws_kinesis_firehose_delivery_stream.http_stream.arn output_format = \u0026quot;opentelemetry0.7\u0026quot; include_filter { namespace = \u0026quot;AWS/DynamoDB\u0026quot; } } # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-metric-streams-trustpolicy.html data \u0026quot;aws_iam_policy_document\u0026quot; \u0026quot;streams_assume_role\u0026quot; { statement { effect = \u0026quot;Allow\u0026quot; principals { type = \u0026quot;Service\u0026quot; identifiers = [\u0026quot;streams.metrics.cloudwatch.amazonaws.com\u0026quot;] } actions = [\u0026quot;sts:AssumeRole\u0026quot;] } } resource \u0026quot;aws_iam_role\u0026quot; \u0026quot;metric_stream_to_firehose\u0026quot; { name = \u0026quot;metric_stream_to_firehose_role\u0026quot; assume_role_policy = data.aws_iam_policy_document.streams_assume_role.json } # https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-metric-streams-trustpolicy.html data \u0026quot;aws_iam_policy_document\u0026quot; \u0026quot;metric_stream_to_firehose\u0026quot; { statement { effect = \u0026quot;Allow\u0026quot; actions = [ \u0026quot;firehose:PutRecord\u0026quot;, \u0026quot;firehose:PutRecordBatch\u0026quot;, ] resources = [aws_kinesis_firehose_delivery_stream.http_stream.arn] } } resource \u0026quot;aws_iam_role_policy\u0026quot; \u0026quot;metric_stream_to_firehose\u0026quot; { name = \u0026quot;default\u0026quot; role = aws_iam_role.metric_stream_to_firehose.id policy = data.aws_iam_policy_document.metric_stream_to_firehose.json } resource \u0026quot;aws_s3_bucket\u0026quot; \u0026quot;bucket\u0026quot; { bucket = \u0026quot;metric-stream-test-bucket\u0026quot; } resource \u0026quot;aws_s3_bucket_acl\u0026quot; \u0026quot;bucket_acl\u0026quot; { bucket = aws_s3_bucket.bucket.id acl = \u0026quot;private\u0026quot; } data \u0026quot;aws_iam_policy_document\u0026quot; \u0026quot;firehose_assume_role\u0026quot; { statement { effect = \u0026quot;Allow\u0026quot; principals { type = \u0026quot;Service\u0026quot; identifiers = [\u0026quot;firehose.amazonaws.com\u0026quot;] } actions = [\u0026quot;sts:AssumeRole\u0026quot;] } } resource \u0026quot;aws_iam_role\u0026quot; \u0026quot;firehose_to_s3\u0026quot; { assume_role_policy = data.aws_iam_policy_document.firehose_assume_role.json } data \u0026quot;aws_iam_policy_document\u0026quot; \u0026quot;firehose_to_s3\u0026quot; { statement { effect = \u0026quot;Allow\u0026quot; actions = [ \u0026quot;s3:AbortMultipartUpload\u0026quot;, \u0026quot;s3:GetBucketLocation\u0026quot;, \u0026quot;s3:GetObject\u0026quot;, \u0026quot;s3:ListBucket\u0026quot;, \u0026quot;s3:ListBucketMultipartUploads\u0026quot;, \u0026quot;s3:PutObject\u0026quot;, ] resources = [ aws_s3_bucket.bucket.arn, \u0026quot;${aws_s3_bucket.bucket.arn}/*\u0026quot;, ] } } resource \u0026quot;aws_iam_role_policy\u0026quot; \u0026quot;firehose_to_s3\u0026quot; { name = \u0026quot;default\u0026quot; role = aws_iam_role.firehose_to_s3.id policy = data.aws_iam_policy_document.firehose_to_s3.json } resource \u0026quot;aws_kinesis_firehose_delivery_stream\u0026quot; \u0026quot;http_stream\u0026quot; { name = \u0026quot;metric-stream-test-stream\u0026quot; destination = \u0026quot;http_endpoint\u0026quot; http_endpoint_configuration { name = \u0026quot;test_http_endpoint\u0026quot; url = \u0026quot;这里填入Gateway的url\u0026quot; role_arn = aws_iam_role.firehose_to_s3.arn } s3_configuration { role_arn = aws_iam_role.firehose_to_s3.arn bucket_arn = aws_s3_bucket.bucket.arn } }  使用步骤:\n1.获取AWS账户的access_key以及secret_key。( 关于如何获取,请参考:create-access-key )\n2.将上一步中获取的access_key与secret_key填入对应位置,并将您的网关对应 url 填入 aws_kinesis_firehose_delivery_stream 配置的对应位置中。\n3.复制以上内容并保存到main.tf文件中。\n4.在对应路径下执行以下代码。\nterraform init terraform apply 至此,需要的AWS服务已全部建立成功,您可以检查您的控制台,查看服务是否成功创建。\n完成! 如果以上步骤全部成功,请耐心等待约五分钟。之后您可以访问SkyWalking UI,查看指标变动情况\n目前,SkyWalking 默认收集的指标展示如下:\n账户指标:\n表指标:\n现已支持的服务 目前SkyWalking官方支持EKS,S3,DynamoDB监控。 用户也参考 OpenTelemetry receiver 配置OTEL rules来收集,计算AWS其他服务的CloudWatch metrics,并且通过自定义dashboard展示。\n相关的资料  Monitoring S3 metrics with Amazon CloudWatch Monitoring DynamoDB metrics with Amazon CloudWatch Supported metrics in AWS Firehose receiver of OAP Configuration Vocabulary | Apache SkyWalking  ","title":"使用SkyWalking监控DynamoDB","url":"/zh/2023-03-13-skywalking-aws-dynamodb/"},{"content":"SKyWalking OAP\u0026rsquo;s existing OpenTelemetry receiver can receive metrics through the OTLP protocol, and use MAL to analyze related metrics in real time. Starting from OAP 9.4.0, SkyWalking has added an AWS Firehose receiver to receive and analyze CloudWatch metrics data. This article will take EKS and S3 as examples to introduce the process of SkyWalking OAP receiving and analyzing the indicator data of AWS services.\nEKS OpenTelemetry Collector OpenTelemetry (OTel) is a series of tools, APIs, and SDKs that can generate, collect, and export telemetry data, such as metrics, logs, and traces. OTel Collector is mainly responsible for collecting, processing, and exporting. For telemetry data, Collector consists of the following main components:\n Receiver: Responsible for obtaining telemetry data, different receivers support different data sources, such as prometheus, kafka, otlp. Processor: Process data between receiver and exporter, such as adding or deleting attributes. Exporter: Responsible for sending data to different backends, such as kafka, SkyWalking OAP (via OTLP). Service: Components enabled as a unit configuration, only configured components will be enabled.  OpenTelemetry Protocol Specification(OTLP) OTLP mainly describes how to receive (pull) indicator data through gRPC and HTTP protocols. The OpenTelemetry receiver of SKyWalking OAP implements the OTLP/gRPC protocol, and the indicator data can be exported to OAP through the OTLP/gRPC exporter. Usually the data flow of a Collector is as follows:\nMonitor EKS with OTel EKS monitoring is realized through OTel. You only need to deploy OpenTelemetry Collector in the EKS cluster in the way of DaemonSet  \u0026ndash; use AWS Container Insights Receiver as the receiver, and set the address of otlp exporter to the address of OAP. In addition, it should be noted that OAP is used job_name : aws-cloud-eks-monitoring as the identifier of EKS metrics according to the attribute, so it is necessary to configure a processor in the collector to add this attribute.\nOTel Collector configuration demo extensions:health_check:receivers:awscontainerinsightreceiver:processors:# To enable OAP to correctly identify EKS metrics, add the job_name attributeresource/job-name:attributes:- key:job_name value:aws-cloud-eks-monitoringaction:insert # Specify OAP as exportersexporters:otlp:endpoint:oap-service:11800 tls:insecure:truelogging:loglevel:debug service:pipelines:metrics:receivers:[awscontainerinsightreceiver]processors:[resource/job-name]exporters:[otlp,logging]extensions:[health_check]By default, SkyWalking OAP counts the network, disk, CPU and other related indicator data in the three dimensions of Node, Pod, and Service. Only part of the content is shown here.\nPod dimensions Service dimensions EKS monitoring complete configuration  Click here to view complete k8s resource configuration  apiVersion:v1kind:ServiceAccountmetadata:name:aws-otel-sanamespace:aws-otel-eks---kind:ClusterRoleapiVersion:rbac.authorization.k8s.io/v1metadata:name:aoc-agent-rolerules:- apiGroups:[\u0026#34;\u0026#34;]resources:[\u0026#34;pods\u0026#34;,\u0026#34;nodes\u0026#34;,\u0026#34;endpoints\u0026#34;]verbs:[\u0026#34;list\u0026#34;,\u0026#34;watch\u0026#34;]- apiGroups:[\u0026#34;apps\u0026#34;]resources:[\u0026#34;replicasets\u0026#34;]verbs:[\u0026#34;list\u0026#34;,\u0026#34;watch\u0026#34;]- apiGroups:[\u0026#34;batch\u0026#34;]resources:[\u0026#34;jobs\u0026#34;]verbs:[\u0026#34;list\u0026#34;,\u0026#34;watch\u0026#34;]- apiGroups:[\u0026#34;\u0026#34;]resources:[\u0026#34;nodes/proxy\u0026#34;]verbs:[\u0026#34;get\u0026#34;]- apiGroups:[\u0026#34;\u0026#34;]resources:[\u0026#34;nodes/stats\u0026#34;,\u0026#34;configmaps\u0026#34;,\u0026#34;events\u0026#34;]verbs:[\u0026#34;create\u0026#34;,\u0026#34;get\u0026#34;]- apiGroups:[\u0026#34;\u0026#34;]resources:[\u0026#34;configmaps\u0026#34;]resourceNames:[\u0026#34;otel-container-insight-clusterleader\u0026#34;]verbs:[\u0026#34;get\u0026#34;,\u0026#34;update\u0026#34;]- apiGroups:[\u0026#34;coordination.k8s.io\u0026#34;]resources:[\u0026#34;leases\u0026#34;]verbs:[\u0026#34;create\u0026#34;,\u0026#34;get\u0026#34;,\u0026#34;update\u0026#34;]---kind:ClusterRoleBindingapiVersion:rbac.authorization.k8s.io/v1metadata:name:aoc-agent-role-bindingsubjects:- kind:ServiceAccountname:aws-otel-sanamespace:aws-otel-eksroleRef:kind:ClusterRolename:aoc-agent-roleapiGroup:rbac.authorization.k8s.io---apiVersion:v1kind:ConfigMapmetadata:name:otel-agent-confnamespace:aws-otel-ekslabels:app:opentelemetrycomponent:otel-agent-confdata:otel-agent-config:|extensions: health_check: receivers: awscontainerinsightreceiver: processors: resource/job-name: attributes: - key: job_name value: aws-cloud-eks-monitoring action: insert exporters: otlp: endpoint: oap-service:11800 tls: insecure: true logging: loglevel: debug service: pipelines: metrics: receivers: [awscontainerinsightreceiver] processors: [resource/job-name] exporters: [otlp,logging] extensions: [health_check]---apiVersion:apps/v1kind:DaemonSetmetadata:name:aws-otel-eks-cinamespace:aws-otel-eksspec:selector:matchLabels:name:aws-otel-eks-citemplate:metadata:labels:name:aws-otel-eks-cispec:containers:- name:aws-otel-collectorimage:amazon/aws-otel-collector:v0.23.0env:# Specify region- name:AWS_REGIONvalue:\u0026#34;ap-northeast-1\u0026#34;- name:K8S_NODE_NAMEvalueFrom:fieldRef:fieldPath:spec.nodeName- name:HOST_IPvalueFrom:fieldRef:fieldPath:status.hostIP- name:HOST_NAMEvalueFrom:fieldRef:fieldPath:spec.nodeName- name:K8S_NAMESPACEvalueFrom:fieldRef:fieldPath:metadata.namespaceimagePullPolicy:Alwayscommand:- \u0026#34;/awscollector\u0026#34;- \u0026#34;--config=/conf/otel-agent-config.yaml\u0026#34;volumeMounts:- name:rootfsmountPath:/rootfsreadOnly:true- name:dockersockmountPath:/var/run/docker.sockreadOnly:true- name:varlibdockermountPath:/var/lib/dockerreadOnly:true- name:containerdsockmountPath:/run/containerd/containerd.sockreadOnly:true- name:sysmountPath:/sysreadOnly:true- name:devdiskmountPath:/dev/diskreadOnly:true- name:otel-agent-config-volmountPath:/conf- name:otel-output-vol mountPath:/otel-outputresources:limits:cpu:200mmemory:200Mirequests:cpu:200mmemory:200Mivolumes:- configMap:name:otel-agent-confitems:- key:otel-agent-configpath:otel-agent-config.yamlname:otel-agent-config-vol- name:rootfshostPath:path:/- name:dockersockhostPath:path:/var/run/docker.sock- name:varlibdockerhostPath:path:/var/lib/docker- name:containerdsockhostPath:path:/run/containerd/containerd.sock- name:syshostPath:path:/sys- name:devdiskhostPath:path:/dev/disk/- name:otel-output-vol hostPath:path:/otel-outputserviceAccountName:aws-otel-sa S3 Amazon CloudWatch Amazon CloudWatch is a monitoring service provided by AWS. It is responsible for collecting indicator data of AWS services and resources. CloudWatch metrics stream is responsible for converting indicator data into stream processing data, and supports output in two formats: json and OTel v0.7.0.\nAmazon Kinesis Data Firehose (Firehose) Firehose is an extract, transform, load (ETL) service that reliably captures, transforms, and serves streaming data into data lakes, data stores (such as S3), and analytics services.\nTo ensure that external services can correctly receive indicator data, AWS provides Kinesis Data Firehose HTTP Endpoint Delivery Request and Response Specifications (Firehose Specifications) . Firhose pushes Json data by POST\nJson data example { \u0026#34;requestId\u0026#34;: \u0026#34;ed4acda5-034f-9f42-bba1-f29aea6d7d8f\u0026#34;, \u0026#34;timestamp\u0026#34;: 1578090901599 \u0026#34;records\u0026#34;: [ { \u0026#34;data\u0026#34;: \u0026#34;aGVsbG8=\u0026#34; }, { \u0026#34;data\u0026#34;: \u0026#34;aGVsbG8gd29ybGQ=\u0026#34; } ] }  requestId: Request id, which can achieve deduplication and debugging purposes. timestamp: Firehose generated the timestamp of the request (in milliseconds). records: Actual delivery records  data: The delivered data, encoded in base64, can be in json or OTel v0.7.0 format, depending on the format of CloudWatch data (described later). Skywalking currently supports OTel v0.7.0 format.    aws-firehose-receiver aws-firehose-receiver provides an HTTP Endpoint that implements Firehose Specifications: /aws/firehose/metrics. The figure below shows the data flow of monitoring DynamoDB, S3 and other services through CloudWatch, and using Firehose to send indicator data to SKywalking OAP.\nStep-by-step setup of S3 monitoring  Enter the S3 console and create a filter forRequest metrics: Amazon S3 \u0026gt;\u0026gt; Buckets \u0026gt;\u0026gt; (Your Bucket) \u0026gt;\u0026gt; Metrics \u0026gt;\u0026gt; metrics \u0026gt;\u0026gt; View additional charts \u0026gt;\u0026gt; Request metrics  Enter the Amazon Kinesis console, create a delivery stream, Source select Direct PUT, Destination select HTTP Endpoint. And set HTTP endpoint URL to https://your_domain/aws/firehose/metrics. Other configuration items:   Buffer hints: Set the size and period of the cache Access key just matches the AccessKey in aws-firehose-receiver Retry duration: Retry period Backup settings: Backup settings, optionally backup the posted data to S3 at the same time.  Enter the CloudWatch console Streams and click Create CloudWatch Stream. And Select your Kinesis Data Firehose stream configure the delivery stream created in the second step in the item. Note that it needs to be set Change output format to OpenTelemetry v0.7.0.  At this point, the S3 monitoring configuration settings are complete. The S3 metrics currently collected by SkyWalking by default are shown below:\nOther service Currently SkyWalking officially supports EKS, S3, DynamoDB monitoring. Users also refer to the OpenTelemetry receiver to configure OTel rules to collect and analyze CloudWatch metrics of other AWS services, and display them through a custom dashboard.\nMaterial  Monitoring S3 metrics with Amazon CloudWatch Monitoring DynamoDB metrics with Amazon CloudWatch Supported metrics in AWS Firehose receiver of OAP Configuration Vocabulary | Apache SkyWalking  ","title":"Monitoring AWS EKS and S3 with SkyWalking","url":"/blog/2023-03-12-skywalking-aws-s3-eks/"},{"content":"SKyWalking OAP 现有的 OpenTelemetry receiver 可以通过OTLP协议接收指标(metrics),并且使用MAL实时分析相关指标。从OAP 9.4.0开始,SkyWalking 新增了AWS Firehose receiver,用来接收,分析CloudWatch metrics数据。本文将以EKS和S3为例介绍SkyWalking OAP 接收,分析 AWS 服务的指标数据的过程\nEKS OpenTelemetry Collector OpenTelemetry (OTel) 是一系列tools,API,SDK,可以生成,收集,导出遥测数据,比如 指标(metrics),日志(logs)和链路信息(traces),而OTel Collector主要负责收集、处理和导出遥测数据,Collector由以下主要组件组成:\n receiver: 负责获取遥测数据,不同的receiver支持不同的数据源,比如prometheus ,kafka,otlp, processor:在receiver和exporter之间处理数据,比如增加或者删除attributes, exporter:负责发送数据到不同的后端,比如kafka,SkyWalking OAP(通过OTLP) service: 作为一个单元配置启用的组件,只有配置的组件才会被启用  OpenTelemetry Protocol Specification(OTLP) OTLP 主要描述了如何通过gRPC,HTTP协议接收(拉取)指标数据。SKyWalking OAP的 OpenTelemetry receiver 实现了OTLP/gRPC协议,通过OTLP/gRPC exporter可以将指标数据导出到OAP。通常一个Collector的数据流向如下:\n使用OTel监控EKS EKS的监控就是通过OTel实现的,只需在EKS集群中以DaemonSet  的方式部署 OpenTelemetry Collector,使用 AWS Container Insights Receiver 作为receiver,并且设置otlp exporter的地址为OAP的的地址即可。另外需要注意的是OAP根据attribute job_name : aws-cloud-eks-monitoring 作为EKS metrics的标识,所以还需要再collector中配置一个processor来增加这个属性\nOTel Collector配置demo extensions:health_check:receivers:awscontainerinsightreceiver:processors:# 为了OAP能够正确识别EKS metrics,增加job_name attributeresource/job-name:attributes:- key:job_name value:aws-cloud-eks-monitoringaction:insert # 指定OAP作为 exportersexporters:otlp:endpoint:oap-service:11800 tls:insecure:truelogging:loglevel:debug service:pipelines:metrics:receivers:[awscontainerinsightreceiver]processors:[resource/job-name]exporters:[otlp,logging]extensions:[health_check]SkyWalking OAP 默认统计 Node,Pod,Service 三个维度的网络、磁盘、CPU等相关的指标数据,这里仅展示了部分内容\nPod 维度 Service 维度 EKS监控完整配置  Click here to view complete k8s resource configuration  apiVersion:v1kind:ServiceAccountmetadata:name:aws-otel-sanamespace:aws-otel-eks---kind:ClusterRoleapiVersion:rbac.authorization.k8s.io/v1metadata:name:aoc-agent-rolerules:- apiGroups:[\u0026#34;\u0026#34;]resources:[\u0026#34;pods\u0026#34;,\u0026#34;nodes\u0026#34;,\u0026#34;endpoints\u0026#34;]verbs:[\u0026#34;list\u0026#34;,\u0026#34;watch\u0026#34;]- apiGroups:[\u0026#34;apps\u0026#34;]resources:[\u0026#34;replicasets\u0026#34;]verbs:[\u0026#34;list\u0026#34;,\u0026#34;watch\u0026#34;]- apiGroups:[\u0026#34;batch\u0026#34;]resources:[\u0026#34;jobs\u0026#34;]verbs:[\u0026#34;list\u0026#34;,\u0026#34;watch\u0026#34;]- apiGroups:[\u0026#34;\u0026#34;]resources:[\u0026#34;nodes/proxy\u0026#34;]verbs:[\u0026#34;get\u0026#34;]- apiGroups:[\u0026#34;\u0026#34;]resources:[\u0026#34;nodes/stats\u0026#34;,\u0026#34;configmaps\u0026#34;,\u0026#34;events\u0026#34;]verbs:[\u0026#34;create\u0026#34;,\u0026#34;get\u0026#34;]- apiGroups:[\u0026#34;\u0026#34;]resources:[\u0026#34;configmaps\u0026#34;]resourceNames:[\u0026#34;otel-container-insight-clusterleader\u0026#34;]verbs:[\u0026#34;get\u0026#34;,\u0026#34;update\u0026#34;]- apiGroups:[\u0026#34;coordination.k8s.io\u0026#34;]resources:[\u0026#34;leases\u0026#34;]verbs:[\u0026#34;create\u0026#34;,\u0026#34;get\u0026#34;,\u0026#34;update\u0026#34;]---kind:ClusterRoleBindingapiVersion:rbac.authorization.k8s.io/v1metadata:name:aoc-agent-role-bindingsubjects:- kind:ServiceAccountname:aws-otel-sanamespace:aws-otel-eksroleRef:kind:ClusterRolename:aoc-agent-roleapiGroup:rbac.authorization.k8s.io---apiVersion:v1kind:ConfigMapmetadata:name:otel-agent-confnamespace:aws-otel-ekslabels:app:opentelemetrycomponent:otel-agent-confdata:otel-agent-config:|extensions: health_check: receivers: awscontainerinsightreceiver: processors: resource/job-name: attributes: - key: job_name value: aws-cloud-eks-monitoring action: insert exporters: otlp: endpoint: oap-service:11800 tls: insecure: true logging: loglevel: debug service: pipelines: metrics: receivers: [awscontainerinsightreceiver] processors: [resource/job-name] exporters: [otlp,logging] extensions: [health_check]---apiVersion:apps/v1kind:DaemonSetmetadata:name:aws-otel-eks-cinamespace:aws-otel-eksspec:selector:matchLabels:name:aws-otel-eks-citemplate:metadata:labels:name:aws-otel-eks-cispec:containers:- name:aws-otel-collectorimage:amazon/aws-otel-collector:v0.23.0env:# Specify region- name:AWS_REGIONvalue:\u0026#34;ap-northeast-1\u0026#34;- name:K8S_NODE_NAMEvalueFrom:fieldRef:fieldPath:spec.nodeName- name:HOST_IPvalueFrom:fieldRef:fieldPath:status.hostIP- name:HOST_NAMEvalueFrom:fieldRef:fieldPath:spec.nodeName- name:K8S_NAMESPACEvalueFrom:fieldRef:fieldPath:metadata.namespaceimagePullPolicy:Alwayscommand:- \u0026#34;/awscollector\u0026#34;- \u0026#34;--config=/conf/otel-agent-config.yaml\u0026#34;volumeMounts:- name:rootfsmountPath:/rootfsreadOnly:true- name:dockersockmountPath:/var/run/docker.sockreadOnly:true- name:varlibdockermountPath:/var/lib/dockerreadOnly:true- name:containerdsockmountPath:/run/containerd/containerd.sockreadOnly:true- name:sysmountPath:/sysreadOnly:true- name:devdiskmountPath:/dev/diskreadOnly:true- name:otel-agent-config-volmountPath:/conf- name:otel-output-vol mountPath:/otel-outputresources:limits:cpu:200mmemory:200Mirequests:cpu:200mmemory:200Mivolumes:- configMap:name:otel-agent-confitems:- key:otel-agent-configpath:otel-agent-config.yamlname:otel-agent-config-vol- name:rootfshostPath:path:/- name:dockersockhostPath:path:/var/run/docker.sock- name:varlibdockerhostPath:path:/var/lib/docker- name:containerdsockhostPath:path:/run/containerd/containerd.sock- name:syshostPath:path:/sys- name:devdiskhostPath:path:/dev/disk/- name:otel-output-vol hostPath:path:/otel-outputserviceAccountName:aws-otel-sa S3 Amazon CloudWatch Amazon CloudWatch 是AWS提供的监控服务,负责收集AWS 服务,资源的指标数据,CloudWatch metrics stream 负责将指标数据转换为流式处理数据,支持输出json,OTel v0.7.0 两种格式。\nAmazon Kinesis Data Firehose (Firehose) Firehose 是一项提取、转换、加载(ETL)服务,可以将流式处理数据以可靠方式捕获、转换和提供到数据湖、数据存储(比如S3)和分析服务中。\n为了确保外部服务能够正确地接收指标数据, AWS提供了 Kinesis Data Firehose HTTP Endpoint Delivery Request and Response Specifications (Firehose Specifications)。Firhose以POST的方式推送Json数据\nJson数据示例 { \u0026#34;requestId\u0026#34;: \u0026#34;ed4acda5-034f-9f42-bba1-f29aea6d7d8f\u0026#34;, \u0026#34;timestamp\u0026#34;: 1578090901599 \u0026#34;records\u0026#34;: [ { \u0026#34;data\u0026#34;: \u0026#34;aGVsbG8=\u0026#34; }, { \u0026#34;data\u0026#34;: \u0026#34;aGVsbG8gd29ybGQ=\u0026#34; } ] }  requestId: 请求id,可以实现去重,debug目的 timestamp: Firehose 产生该请求的时间戳(毫秒) records: 实际投递的记录  data: 投递的数据,以base64编码数据,可以是json或者OTel v0.7.0格式,取决于CloudWatch数据数据的格式(稍后会有描述)。Skywalking目前支持OTel v0.7.0格式    aws-firehose-receiver aws-firehose-receiver 就是提供了一个实现了Firehose Specifications的HTTP Endpoint:/aws/firehose/metrics。下图展示了通过CloudWatch监控DynamoDB,S3等服务,并利用Firehose将指标数据发送到SKywalking OAP的数据流向\n从上图可以看到 aws-firehose-receiver 将数据转换后交由 OpenTelemetry-receiver处理 ,所以 OpenTelemetry receiver 中配置的 otel-rules 同样可以适用CloudWatch metrics\n注意  因为 Kinesis Data Firehose 要求,必须在AWS Firehose receiver 前放置一个Gateway用来建立HTTPS链接。aws-firehose-receiver 将从v9.5.0开始支持HTTPS协议 TLS 证书必须是CA签发的  逐步设置S3监控  进入 S3控制台,通过 Amazon S3 \u0026gt;\u0026gt; Buckets \u0026gt;\u0026gt; (Your Bucket) \u0026gt;\u0026gt; Metrics \u0026gt;\u0026gt; metrics \u0026gt;\u0026gt; View additional charts \u0026gt;\u0026gt; Request metrics 为 Request metrics 创建filter  进入Amazon Kinesis 控制台,创建一个delivery stream, Source选择 Direct PUT, Destination 选择 HTTP Endpoint. 并且设置HTTP endpoint URL 为 https://your_domain/aws/firehose/metrics。其他配置项:  Buffer hints: 设置缓存的大小和周期 Access key 与aws-firehose-receiver中的AccessKey一致即可 Retry duration: 重试周期 Backup settings: 备份设置,可选地将投递的数据同时备份到S3。    进入 CloudWatch控制台,Streams 标签创建CloudWatch Stream。并且在Select your Kinesis Data Firehose stream项中配置第二步创建的delivery stream。注意需要设置Change output format 为 OpenTelemetry v0.7.0。  至此,S3监控配置设置完成。目前SkyWalking默认收集的S3 metrics 展示如下\n其他服务 目前SkyWalking官方支持EKS,S3,DynamoDB监控。 用户也参考 OpenTelemetry receiver 配置OTel rules来收集,分析AWS其他服务的CloudWatch metrics,并且通过自定义dashboard展示\n资料  Monitoring S3 metrics with Amazon CloudWatch Monitoring DynamoDB metrics with Amazon CloudWatch Supported metrics in AWS Firehose receiver of OAP Configuration Vocabulary | Apache SkyWalking  ","title":"使用SkyWalking监控AWS EKS和S3","url":"/zh/2023-03-12-skywalking-aws-s3-eks/"},{"content":"SkyWalking Rust 0.6.0 is released. Go to downloads page to find release tars.\nWhat\u0026rsquo;s Changed  Refactor span object api to make it more friendly. by @jmjoy in https://github.com/apache/skywalking-rust/pull/52 Refactor management report and keep alive api. by @jmjoy in https://github.com/apache/skywalking-rust/pull/53 Use stream and completed for a bulk to collect for grpc reporter. by @jmjoy in https://github.com/apache/skywalking-rust/pull/54 Add sub components licenses in dist material. by @jmjoy in https://github.com/apache/skywalking-rust/pull/55 Bump to 0.6.0. by @jmjoy in https://github.com/apache/skywalking-rust/pull/56  ","title":"Release Apache SkyWalking Rust 0.6.0","url":"/events/release-apache-skywalking-rust-0-6-0/"},{"content":"SkyWalking 9.4.0 is released. Go to downloads page to find release tars.\nPromQL and Grafana Support Zipkin Lens UI Bundled AWS S3 and DynamoDB monitoring Project  Bump up Zipkin and Zipkin lens UI dependency to 2.24.0. Bump up Apache parent pom version to 29. Bump up Armeria version to 1.21.0. Clean up maven pom.xmls. Bump up Java version to 11. Bump up snakeyaml to 2.0.  OAP Server  Add ServerStatusService in the core module to provide a new way to expose booting status to other modules. Adds Micrometer as a new component.(ID=141) Refactor session cache in MetricsPersistentWorker. Cache enhancement - don\u0026rsquo;t read new metrics from database in minute dimensionality.   // When // (1) the time bucket of the server's latest stability status is provided // 1.1 the OAP has booted successfully // 1.2 the current dimensionality is in minute. // 1.3 the OAP cluster is rebalanced due to scaling // (2) the metrics are from the time after the timeOfLatestStabilitySts // (3) the metrics don't exist in the cache // the kernel should NOT try to load it from the database. // // Notice, about condition (2), // for the specific minute of booted successfully, the metrics are expected to load from database when // it doesn't exist in the cache.  Remove the offset of metric session timeout according to worker creation sequence. Correct MetricsExtension annotations declarations in manual entities. Support component IDs' priority in process relation metrics. Remove abandon logic in MergableBufferedData, which caused unexpected no-update. Fix miss set LastUpdateTimestamp that caused the metrics session to expire. Rename MAL rule spring-sleuth.yaml to spring-micrometer.yaml. Fix memory leak in Zipkin API. Remove the dependency of refresh_interval of ElasticSearch indices from elasticsearch/flushInterval config. Now, it uses core/persistentPeriod + 5s as refresh_interval for all indices instead. Change elasticsearch/flushInterval to 5s(was 15s). Optimize flushInterval of ElasticSearch BulkProcessor to avoid extra periodical flush in the continuous bulk streams. An unexpected dot is added when exp is a pure metric name and expPrefix != null. Support monitoring MariaDB. Remove measure/stream specific interval settings in BanyanDB. Add global-specific settings used to override global configurations (e.g segmentIntervalDays, blockIntervalHours) in BanyanDB. Use TTL-driven interval settings for the measure-default group in BanyanDB. Fix wrong group of non time-relative metadata in BanyanDB. Refactor StorageData#id to the new StorageID object from a String type. Support multiple component IDs in the service topology level. Add ElasticSearch.Keyword annotation to declare the target field type as keyword. [Breaking Change] Column component_id of service_relation_client_side and service_relation_server_side have been replaced by component_ids. Support priority definition in the component-libraries.yml. Enhance service topology query. When there are multiple components detected from the server side, the component type of the node would be determined by the priority, which was random in the previous release. Remove component_id from service_instance_relation_client_side and service_instance_relation_server_side. Make the satellite E2E test more stable. Add Istio 1.16 to test matrix. Register ValueColumn as Tag for Record in BanyanDB storage plugin. Bump up Netty to 4.1.86. Remove unnecessary additional columns when storage is in logical sharding mode. The cluster coordinator support watch mechanism for notifying RemoteClientManager and ServerStatusService. Fix ServiceMeshServiceDispatcher overwrite ServiceDispatcher debug file when open SW_OAL_ENGINE_DEBUG. Use groupBy and in operators to optimize topology query for BanyanDB storage plugin. Support server status watcher for MetricsPersistentWorker to check the metrics whether required initialization. Fix the meter value are not correct when using sumPerMinLabeld or sumHistogramPercentile MAL function. Fix cannot display attached events when using Zipkin Lens UI query traces. Remove time_bucket for both Stream and Measure kinds in BanyanDB plugin. Merge TIME_BUCKET of Metrics and Record into StorageData. Support no layer in the listServices query. Fix time_bucket of ServiceTraffic not set correctly in slowSql of MAL. Correct the TopN record query DAO of BanyanDB. Tweak interval settings of BanyanDB. Support monitoring AWS Cloud EKS. Bump BanyanDB Java client to 0.3.0-rc1. Remove id tag from measures. Add Banyandb.MeasureField to mark a column as a BanyanDB Measure field. Add BanyanDB.StoreIDTag to store a process\u0026rsquo;s id for searching. [Breaking Change] The supported version of ShardingSphere-Proxy is upgraded from 5.1.2 to 5.3.1. Due to the changes of ShardingSphere\u0026rsquo;s API, versions before 5.3.1 are not compatible. Add the eBPF network profiling E2E Test in the per storage. Fix TCP service instances are lack of instance properties like pod and namespace, which causes Pod log not to work for TCP workloads. Add Python HBase happybase module component ID(94). Fix gRPC alarm cannot update settings from dynamic configuration source. Add batchOfBytes configuration to limit the size of bulk flush. Add Python Websocket module component ID(7018). [Optional] Optimize single trace query performance by customizing routing in ElasticSearch. SkyWalking trace segments and Zipkin spans are using trace ID for routing. This is OFF by default, controlled by storage/elasticsearch/enableCustomRouting. Enhance OAP HTTP server to support HTTPS Remove handler scan in otel receiver, manual initialization instead Add aws-firehose-receiver to support collecting AWS CloudWatch metric(OpenTelemetry format). Notice, no HTTPS/TLS setup support. By following AWS Firehose request, it uses proxy request (https://... instead of /aws/firehose/metrics), there must be a proxy(Nginx, Envoy, etc.). Avoid Antlr dependencies' versions might be different in compile time and runtime. Now PrometheusMetricConverter#escapedName also support converting / to _. Add missing TCP throughput metrics. Refactor @Column annotation, swap Column#name and ElasticSearch.Column#columnAlias and rename ElasticSearch.Column#columnAlias to ElasticSearch.Column#legacyName. Add Python HTTPX module component ID(7019). Migrate tests from junit 4 to junit 5. Refactor http-based alarm plugins and extract common logic to HttpAlarmCallback. Support Amazon Simple Storage Service (Amazon S3) metrics monitoring Support process Sum metrics with AGGREGATION_TEMPORALITY_DELTA case Support Amazon DynamoDB monitoring. Support prometheus HTTP API and promQL. Scope in the Entity of Metrics query v1 protocol is not required and automatical correction. The scope is determined based on the metric itself. Add explicit ReadTimeout for ConsulConfigurationWatcher to avoid IllegalArgumentException: Cache watchInterval=10sec \u0026gt;= networkClientReadTimeout=10000ms. Fix DurationUtils.getDurationPoints exceed, when startTimeBucket equals endTimeBucket. Support process OpenTelemetry ExponentialHistogram metrics Add FreeRedis component ID(3018).  UI  Add Zipkin Lens UI to webapp, and proxy it to context path /zipkin. Migrate the build tool from vue cli to Vite4. Fix Instance Relation and Endpoint Relation dashboards show up. Add Micrometer icon. Update MySQL UI to support MariaDB. Add AWS menu for supporting AWS monitoring. Add missing FastAPI logo. Update the log details page to support the formatted display of JSON content. Fix build config. Avoid being unable to drag process nodes for the first time. Add node folder into ignore list. Add ElPopconfirm to component types. Add an iframe widget for zipkin UI. Optimize graph tooltips to make them more friendly. Bump json5 from 1.0.1 to 1.0.2. Add websockets icon. Implement independent mode for widgets. Bump http-cache-semantics from 4.1.0 to 4.1.1. Update menus for OpenFunction. Add auto fresh to widgets independent mode. Fix: clear trace ID on the Log and Trace widgets after using association. Fix: reset duration for query conditions after time range changes. Add AWS S3 menu. Refactor: optimize side bar component to make it more friendly. Fix: remove duplicate popup message for query result. Add logo for HTTPX. Refactor: optimize the attached events visualization in the trace widget. Update BanyanDB client to 0.3.1. Add AWS DynamoDB menu. Fix: add auto period to the independent mode for widgets. Optimize menus and add Windows monitoring menu. Add a calculation for the cpm5dAvg. add a cpm5d calculation. Fix data processing error in the eBPF profiling widget. Support for double quotes in SlowSQL statements. Fix: the wrong position of the menu when clicking the topology node.  Documentation  Remove Spring Sleuth docs, and add Spring MicroMeter Observations Analysis with the latest Java agent side enhancement. Update monitoring MySQL document to add the MariaDB part. Reorganize the protocols docs to a more clear API docs. Add documentation about replacing Zipkin server with SkyWalking OAP. Add Lens UI relative docs in Zipkin trace section. Add Profiling APIs. Fix backend telemetry doc and so11y dashboard doc as the OAP Prometheus fetcher was removed since 9.3.0  All issues and pull requests are here\n","title":"Release Apache SkyWalking APM 9.4.0","url":"/events/release-apache-skywalking-apm-9.4.0/"},{"content":"SkyWalking BanyanDB 0.3.1 is released. Go to downloads page to find release tars.\nBugs  Fix the broken of schema chain. Add a timeout to all go leaking checkers.  Chores  Bump golang.org/x/net from 0.2.0 to 0.7.0.  ","title":"Release Apache SkyWalking BanyanDB 0.3.1","url":"/events/release-apache-skywalking-banyandb-0-3-1/"},{"content":"SkyWalking Python 1.0.0 is released! Go to downloads page to find release tars.\nPyPI Wheel: https://pypi.org/project/apache-skywalking/1.0.0/\nDockerHub Image: https://hub.docker.com/r/apache/skywalking-python\n  Important Notes and Breaking Changes:\n The new PVM metrics reported from Python agent requires SkyWalking OAP v9.3.0 to show out-of-the-box. BREAKING: Python 3.6 is no longer supported and may not function properly, Python 3.11 support is added and tested. BREAKING: A number of common configuration options and environment variables are renamed to follow the convention of Java agent, please check with the latest official documentation before upgrading. (#273, #282) https://skywalking.apache.org/docs/skywalking-python/v1.0.0/en/setup/configuration/ BREAKING: All agent core capabilities are now covered by test cases and enabled by default (Trace, Log, PVM runtime metrics, Profiler) BREAKING: DockerHub Python agent images since v1.0.0 will no longer include the run part in ENTRYPOINT [\u0026quot;sw-python\u0026quot;, \u0026quot;run\u0026quot;], user should prefix their command with [-d/--debug] run [-p/--prefork] \u0026lt;Command\u0026gt; for extra flexibility. Packaged wheel now provides a extra [all] option to support all three report protocols    Feature:\n Add support for Python 3.11 (#285) Add MeterReportService (gRPC, Kafka reporter) (default:enabled) (#231, #236, #241, #243) Add reporter for PVM runtime metrics (default:enabled) (#238, #247) Add Greenlet profiler (#246) Add test and support for Python Slim base images (#249) Add support for the tags of Virtual Cache for Redis (#263) Add a new configuration kafka_namespace to prefix the kafka topic names (#277) Add log reporter support for loguru (#276) Add experimental support for explicit os.fork(), restarts agent in forked process (#286) Add experimental sw-python CLI sw-python run [-p] flag (-p/\u0026ndash;prefork) to enable non-intrusive uWSGI and Gunicorn postfork support (#288)    Plugins:\n Add aioredis, aiormq, amqp, asyncpg, aio-pika, kombu RMQ plugins (#230 Missing test coverage) Add Confluent Kafka plugin (#233 Missing test coverage) Add HBase plugin Python HappyBase model (#266) Add FastAPI plugin websocket protocol support (#269) Add Websockets (client) plugin (#269) Add HTTPX plugin (#283)    Fixes:\n Allow RabbitMQ BlockingChannel.basic_consume() to link with outgoing spans (#224) Fix RabbitMQ basic_get bug (#225, #226) Fix case when tornado socket name is None (#227) Fix misspelled text \u0026ldquo;PostgreSLQ\u0026rdquo; -\u0026gt; \u0026ldquo;PostgreSQL\u0026rdquo; in Postgres-related plugins (#234) Make sure span.component initialized as Unknown rather than 0 (#242) Ignore websocket connections inside fastapi temporarily (#244, issue#9724) Fix Kafka-python plugin SkyWalking self reporter ignore condition (#249) Add primary endpoint in tracing context and endpoint info to log reporter (#261) Enforce tag class type conversion (#262) Fix sw_logging (log reporter) potentially throw exception leading to traceback confusion (#267) Avoid reporting meaningless tracecontext with logs when there\u0026rsquo;s no active span, UI will now show empty traceID (#272) Fix exception handler in profile_context (#273) Add namespace suffix to service name (#275) Add periodical instance property report to prevent data loss (#279) Fix sw_logging when Logger.disabled is true (#281)    Docs:\n New documentation on how to test locally (#222) New documentation on the newly added meter reporter feature (#240) New documentation on the newly added greenlet profiler and the original threading profiler (#250) Overhaul documentation on development setup and testing (#249) Add tables to state currently supported features of Python agent. (#271) New configuration documentation generator (#273)    Others:\n Pin CI SkyWalking License Eye (#221) Fix dead link due to the \u0026lsquo;next\u0026rsquo; url change (#235) Pin CI SkyWalking Infra-E2E (#251) Sync OAP, SWCTL versions in E2E and fix test cases (#249) Overhaul development flow with Poetry (#249) Fix grpcio-tools generated message type (#253) Switch plugin tests to use slim Python images (#268) Add unit tests to sw_filters (#269)    New Contributors  @ZEALi made their first contribution in https://github.com/apache/skywalking-python/pull/242 @westarest made their first contribution in https://github.com/apache/skywalking-python/pull/246 @Jedore made their first contribution in https://github.com/apache/skywalking-python/pull/263 @alidisi made their first contribution in https://github.com/apache/skywalking-python/pull/266 @SheltonZSL made their first contribution in https://github.com/apache/skywalking-python/pull/275 @XinweiLyu made their first contribution in https://github.com/apache/skywalking-python/pull/283  Full Changelog: https://github.com/apache/skywalking-python/compare/v0.8.0...v1.0.0\n","title":"Release Apache SkyWalking Python 1.0.0","url":"/events/release-apache-skywalking-python-1-0-0/"},{"content":"SkyWalking BanyanDB 0.3.0 is released. Go to downloads page to find release tars.\nFeatures  Support 64-bit float type. Web Application. Close components in tsdb gracefully. Add TLS for the HTTP server. Use the table builder to compress data.  Bugs  Open blocks concurrently. Sync index writing and shard closing. TimestampRange query throws an exception if no data in this time range.  Chores  Fixes issues related to leaked goroutines. Add validations to APIs.  ","title":"Release Apache SkyWalking BanyanDB 0.3.0","url":"/events/release-apache-skywalking-banyandb-0-3-0/"},{"content":"SkyWalking PHP 0.3.0 is released. Go to downloads page to find release tars.\nWhat\u0026rsquo;s Changed  Make explicit rust version requirement by @wu-sheng in https://github.com/apache/skywalking-php/pull/35 Update dependencies version limitation. by @jmjoy in https://github.com/apache/skywalking-php/pull/36 Startup 0.3.0 by @heyanlong in https://github.com/apache/skywalking-php/pull/37 Support PHP 8.2 by @heyanlong in https://github.com/apache/skywalking-php/pull/38 Fix php-fpm freeze after large amount of request. by @jmjoy in https://github.com/apache/skywalking-php/pull/39 Lock develop rust version to 1.65, upgrade deps. by @jmjoy in https://github.com/apache/skywalking-php/pull/41 Fix worker unexpected shutdown. by @jmjoy in https://github.com/apache/skywalking-php/pull/42 Update docs about installing rust. by @jmjoy in https://github.com/apache/skywalking-php/pull/43 Retry cargo test when failed in CI. by @jmjoy in https://github.com/apache/skywalking-php/pull/44 Hack dtor for mysqli to cleanup resources. by @jmjoy in https://github.com/apache/skywalking-php/pull/45 Report instance properties and keep alive. by @jmjoy in https://github.com/apache/skywalking-php/pull/46 Add configuration option skywalking_agent.runtime_dir. by @jmjoy in https://github.com/apache/skywalking-php/pull/47 Add authentication support. by @jmjoy in https://github.com/apache/skywalking-php/pull/48 Support TLS. by @jmjoy in https://github.com/apache/skywalking-php/pull/49 Periodic reporting instance properties. by @jmjoy in https://github.com/apache/skywalking-php/pull/50 Bump to 0.3.0. by @jmjoy in https://github.com/apache/skywalking-php/pull/51  Breaking  Remove http:// scheme in skywalking_agent.server_addr.  New Contributors  @wu-sheng made their first contribution in https://github.com/apache/skywalking-php/pull/35  Full Changelog: https://github.com/apache/skywalking-php/compare/v0.2.0...v0.3.0\nPECL https://pecl.php.net/package/skywalking_agent/0.3.0\n","title":"Release Apache SkyWalking PHP 0.3.0","url":"/events/release-apache-skwaylking-php-0-3-0/"},{"content":"SkyWalking Java Agent 8.14.0 is released. Go to downloads page to find release tars. Changes by Version\n8.14.0  Polish test framework to support arm64/v8 platforms Fix wrong config name plugin.toolkit.use_qualified_name_as_operation_name, and system variable name SW_PLUGIN_TOOLKIT_USE_QUALIFIED_NAME_AS_OPERATION_NAME:false. They were toolit. Rename JDBI to JDBC Support collecting dubbo thread pool metrics Bump up byte-buddy to 1.12.19 Upgrade agent test tools [Breaking Change] Compatible with 3.x and 4.x RabbitMQ Client, rename rabbitmq-5.x-plugin to rabbitmq-plugin Polish JDBC plugins to make DBType accurate Report the agent version to OAP as an instance attribute Polish jedis-4.x-plugin to change command to lowercase, which is consistent with jedis-2.x-3.x-plugin Add micronauthttpclient,micronauthttpserver,memcached,ehcache,guavacache,jedis,redisson plugin config properties to agent.config Add Micrometer Observation support Add tags mq.message.keys and mq.message.tags for RocketMQ producer span Clean the trace context which injected into Pulsar MessageImpl after the instance recycled Fix In the higher version of mysql-connector-java 8x, there is an error in the value of db.instance. Add support for KafkaClients 3.x. Support to customize the collect period of JVM relative metrics. Upgrade netty-codec-http2 to 4.1.86.Final. Put Agent-Version property reading in the premain stage to avoid deadlock when using jarsigner. Add a config agent.enable(default: true) to support disabling the agent through system property -Dskywalking.agent.disable=false or system environment variable setting SW_AGENT_ENABLE=false. Enhance redisson plugin to adopt uniform tags.  Documentation  Update Plugin-test.md, support string operators start with and end with Polish agent configurations doc to fix type error  All issues and pull requests are here\n","title":"Release Apache SkyWalking Java Agent 8.14.0","url":"/events/release-apache-skywalking-java-agent-8-14-0/"},{"content":"Background Apache SkyWalking is an open-source Application Performance Management system that helps users collect and aggregate logs, traces, metrics, and events for display on a UI. In the previous article, we introduced how to use Apache SkyWalking Rover to analyze the network performance issue in the service mesh environment. However, in business scenarios, users often rely on mature layer 7 protocols, such as HTTP, for interactions between systems. In this article, we will discuss how to use eBPF techniques to analyze performance bottlenecks of layer 7 protocols and how to enhance the tracing system using network sampling.\nThis article will show how to use Apache SkyWalking with eBPF to enhance metrics and traces in HTTP observability.\nHTTP Protocol Analysis HTTP is one of the most common Layer 7 protocols and is usually used to provide services to external parties and for inter-system communication. In the following sections, we will show how to identify and analyze HTTP/1.x protocols.\nProtocol Identification In HTTP/1.x, the client and server communicate through a single file descriptor (FD) on each side. Figure 1 shows the process of communication involving the following steps:\n Connect/accept: The client establishes a connection with the HTTP server, or the server accepts a connection from the client. Read/write (multiple times): The client or server reads and writes HTTPS requests and responses. A single request-response pair occurs within the same connection on each side. Close: The client and server close the connection.  To obtain HTTP content, it’s necessary to read it from the second step of this process. As defined in the RFC, the content is contained within the data of the Layer 4 protocol and can be obtained by parsing the data. The request and response pair can be correlated because they both occur within the same connection on each side.\nFigure 1: HTTP communication timeline.\nHTTP Pipeline HTTP pipelining is a feature of HTTP/1.1 that enables multiple HTTP requests to be sent over a single TCP connection without waiting for the corresponding responses. This feature is important because it ensures that the order of the responses on the server side matches the order of the requests.\nFigure 2 illustrates how this works. Consider the following scenario: an HTTP client sends multiple requests to a server, and the server responds by sending the HTTP responses in the same order as the requests. This means that the first request sent by the client will receive the first response from the server, the second request will receive the second response, and so on.\nWhen designing HTTP parsing, we should follow this principle by adding request data to a list and removing the first item when parsing a response. This ensures that the responses are processed in the correct order.\nFigure 2: HTTP/1.1 pipeline.\nMetrics Based on the identification of the HTTP content and process topology diagram mentioned in the previous article, we can combine these two to generate process-to-process metrics data.\nFigure 3 shows the metrics that currently support the analysis between the two processes. Based on the HTTP request and response data, we can analyze the following data:\n   Metrics Name Type Unit Description     Request CPM(Call Per Minute) Counter count The HTTP request count   Response Status CPM(Call Per Minute) Counter count The count of per HTTP response status code   Request Package Size Counter/Histogram Byte The request package size   Response Package Size Counter/Histogram Byte The response package size   Client Duration Counter/Histogram Millisecond The duration of single HTTP response on the client side   Server Duration Counter/Histogram Millisecond The duration of single HTTP response on the server side    Figure 3: Process-to-process metrics.\nHTTP and Trace During the HTTP process, if we unpack the HTTP requests and responses from raw data, we can use this data to correlate with the existing tracing system.\nTrace Context Identification In order to track the flow of requests between multiple services, the trace system usually creates a trace context when a request enters a service and passes it along to other services during the request-response process. For example, when an HTTP request is sent to another server, the trace context is included in the request header.\nFigure 4 displays the raw content of an HTTP request intercepted by Wireshark. The trace context information generated by the Zipkin Tracing system can be identified by the “X-B3” prefix in the header. By using eBPF to intercept the trace context in the HTTP header, we can connect the current request with the trace system.\nFigure 4: View of HTTP headers in Wireshark.\nTrace Event We have added the concept of an event to traces. An event can be attached to a span and consists of start and end times, tags, and summaries, allowing us to attach any desired information to the Trace.\nWhen performing eBPF network profiling, two events can be generated based on the request-response data. Figure 5 illustrates what happens when a service performs an HTTP request with profiling. The trace system generates trace context information and sends it in the request. When the service executes in the kernel, we can generate an event for the corresponding trace span by interacting with the request-response data and execution time in the kernel space.\nPreviously, we could only observe the execution status in the user space. However, by combining traces and eBPF technologies, we can now also get more information about the current trace in the kernel space, which would impact less performance for the target service if we do similar things in the tracing SDK and agent.\nFigure 5: Logical view of profiling an HTTP request and response.\nSampling To ensure efficient data storage and minimize unnecessary data sampling, we use a sampling mechanism for traces in our system. This mechanism triggers sampling only when certain conditions are met. We also provide a list of the top N traces, which allows users to quickly access the relevant request information for a specific trace.\nTo help users easily identify and analyze relevant events, we offer three different sampling rules:\n Slow Traces: Sampling is triggered when the response time for a request exceeds a specified threshold. Response Status [400, 500): Sampling is triggered when the response status code is greater than or equal to 400 and less than 500. Response Status [500, 600): Sampling is triggered when the response status code is greater than or equal to 500 and less than 600.  In addition, we recognize that not all request or response raw data may be necessary for analysis. For example, users may be more interested in requesting data when trying to identify performance issues, while they may be more interested in response data when troubleshooting errors. As such, we also provide configuration options for request or response events to allow users to specify which type of data they would like to sample.\nProfiling in a Service Mesh The SkyWalking and SkyWalking Rover projects have already implemented the HTTP protocol analyze and trace associations. How do they perform when running in a service mesh environment?\nDeployment Figure 6 demonstrates the deployment of SkyWalking and SkyWalking Rover in a service mesh environment. SkyWalking Rover is deployed as a DaemonSet on each machine where a service is located and communicates with the SkyWalking backend cluster. It automatically recognizes the services on the machine and reports metadata information to the SkyWalking backend cluster. When a new network profiling task arises, SkyWalking Rover senses the task and analyzes the designated processes, collecting and aggregating network data before ultimately reporting it back to the SkyWalking backend service.\nFigure 6: SkyWalking rover deployment topology in a service mesh.\nTracing Systems Starting from version 9.3.0, the SkyWalking backend fully supports all functions in the Zipkin server. Therefore, the SkyWalking backend can collect traces from both the SkyWalking and Zipkin protocols. Similarly, SkyWalking Rover can identify and analyze trace context in both the SkyWalking and Zipkin trace systems. In the following two sections, network analysis results will be displayed in the SkyWalking and Zipkin UI respectively.\nSkyWalking When SkyWalking performs network profiling, similar to the TCP metrics in the previous article, the SkyWalking UI will first display the topology between processes. When you open the dashboard of the line representing the traffic metrics between processes, you can see the metrics of HTTP traffic from the “HTTP/1.x” tab and the sampled HTTP requests with tracing in the “HTTP Requests” tab.\nAs shown in Figure 7, there are three lists in the tab, each corresponding to a condition in the event sampling rules. Each list displays the traces that meet the pre-specified conditions. When you click on an item in the trace list, you can view the complete trace.\nFigure 7: Sampled HTTP requests within tracing context.\nWhen you click on an item in the trace list, you can quickly view the specified trace. In Figure 8, we can see that in the current service-related span, there is a tag with a number indicating how many HTTP events are related to that trace span.\nSince we are in a service mesh environment, each service involves interacting with Envoy. Therefore, the current span includes Envoy’s request and response information. Additionally, since the current service has both incoming and outgoing requests, there are events in the corresponding span.\nFigure 8: Events in the trace detail.\nWhen the span is clicked, the details of the span will be displayed. If there are events in the current span, the relevant event information will be displayed on a time axis. As shown in Figure 9, there are a total of 6 related events in the current Span. Each event represents a data sample of an HTTP request/response. One of the events spans multiple time ranges, indicating a longer system call time. It may be due to a blocked system call, depending on the implementation details of the HTTP request in different languages. This can also help us query the possible causes of errors.\nFigure 9: Events in one trace span.\nFinally, we can click on a specific event to see its complete information. As shown in Figure 10, it displays the sampling information of a request, including the SkyWalking trace context protocol contained in the request header from the HTTP raw data. The raw request data allows you to quickly re-request the request to solve any issues.\nFigure 10: The detail of the event.\nZipkin Zipkin is one of the most widely used distributed tracing systems in the world. SkyWalking can function as an alternative server to provide advanced features for Zipkin users. Here, we use this way to bring the feature into the Zipkin ecosystem out-of-box. The new events would also be treated as a kind of Zipkin’s tags and annotations.\nTo add events to a Zipkin span, we need to do the following:\n Split the start and end times of each event into two annotations with a canonical name. Add the sampled HTTP raw data from the event to the Zipkin span tags, using the same event name for corresponding purposes.  Figures 11 and 12 show annotations and tags in the same span. In these figures, we can see that the span includes at least two events with the same event name and sequence suffix (e.g., “Start/Finished HTTP Request/Response Sampling-x” in the figure). Both events have separate timestamps to represent their relative times within the span. In the tags, the data content of the corresponding event is represented by the event name and sequence number, respectively.\nFigure 11: Event timestamp in the Zipkin span annotation.\nFigure 12: Event raw data in the Zipkin span tag.\nDemo In this section, we demonstrate how to perform network profiling in a service mesh and complete metrics collection and HTTP raw data sampling. To follow along, you will need a running Kubernetes environment.\nDeploy SkyWalking Showcase SkyWalking Showcase contains a complete set of example services and can be monitored using SkyWalking. For more information, please check the official documentation.\nIn this demo, we only deploy service, the latest released SkyWalking OAP, and UI.\nexport SW_OAP_IMAGE=apache/skywalking-oap-server:9.3.0 export SW_UI_IMAGE=apache/skywalking-ui:9.3.0 export SW_ROVER_IMAGE=apache/skywalking-rover:0.4.0 export FEATURE_FLAGS=mesh-with-agent,single-node,elasticsearch,rover make deploy.kubernetes After deployment is complete, please run the following script to open SkyWalking UI: http://localhost:8080/.\nkubectl port-forward svc/ui 8080:8080 --namespace default Start Network Profiling Task Currently, we can select the specific instances that we wish to monitor by clicking the Data Plane item in the Service Mesh panel and the Service item in the Kubernetes panel.\nIn figure 13, we have selected an instance with a list of tasks in the network profiling tab.\nFigure 13: Network Profiling tab in the Data Plane.\nWhen we click the Start button, as shown in Figure 14, we need to specify the sampling rules for the profiling task. The sampling rules consist of one or more rules, each of which is distinguished by a different URI regular expression. When the HTTP request URI matches the regular expression, the rule is used. If the URI regular expression is empty, the default rule is used. Using multiple rules can help us make different sampling configurations for different requests.\nEach rule has three parameters to determine if sampling is needed:\n Minimal Request Duration (ms): requests with a response time exceeding the specified time will be sampled. Sampling response status code between 400 and 499: all status codes in the range [400-499) will be sampled. Sampling response status code between 500 and 599: all status codes in the range [500-599) will be sampled.  Once the sampling configuration is complete, we can create the task.\nFigure 14: Create network profiling task page.\nDone! After a few seconds, you will see the process topology appear on the right side of the page.\nWhen you click on the line between processes, you can view the data between the two processes, which is divided into three tabs:\n TCP: displays TCP-related metrics. HTTP/1.x: displays metrics in the HTTP 1 protocol. HTTP Requests: displays the analyzed request and saves it to a list according to the sampling rule.  Figure 16: TCP metrics in a network profiling task.\nFigure 17: HTTP/1.x metrics in a network profiling task.\nFigure 18: HTTP sampled requests in a network profiling task.\nConclusion In this article, we detailed the overview of how to analyze the Layer 7 HTTP/1.x protocol in network analysis, and how to associate it with existing trace systems. This allows us to extend the scope of data we can observe from just user space to also include kernel-space data.\nIn the future, we will delve further into the analysis of kernel data, such as collecting information on TCP packet size, transmission frequency, network card, and help on enhancing distributed tracing from another perspective.\nAdditional Resources  SkyWalking Github Repo › SkyWalking Rover Github Repo › SkyWalking Rover Documentation › Diagnose Service Mesh Network Performance with eBPF blog post \u0026gt; SkyWalking Profiling Documentation \u0026gt; SkyWalking Trace Context Propagation \u0026gt; Zipkin Trace Context Propagation \u0026gt; RFC - Hypertext Transfer Protocol – HTTP/1.1 \u0026gt;  ","title":"eBPF enhanced HTTP observability - L7 metrics and tracing","url":"/blog/ebpf-enhanced-http-observability-l7-metrics-and-tracing/"},{"content":"背景 Apache SkyWalking 是一个开源应用性能管理系统,帮助用户收集和聚合日志、追踪、指标和事件,并在 UI 上显示。在上一篇文章中,我们介绍了如何使用 Apache SkyWalking Rover 分析服务网格环境中的网络性能问题。但是,在商业场景中,用户通常依靠成熟的第 7 层协议(如 HTTP)来进行系统之间的交互。在本文中,我们将讨论如何使用 eBPF 技术来分析第 7 层协议的性能瓶颈,以及如何使用网络采样来增强追踪系统。\n本文将演示如何使用 Apache SkyWalking 与 eBPF 来增强 HTTP 可观察性中的指标和追踪。\nHTTP 协议分析 HTTP 是最常用的 7 层协议之一,通常用于为外部方提供服务和进行系统间通信。在下面的章节中,我们将展示如何识别和分析 HTTP/1.x 协议。\n协议识别 在 HTTP/1.x 中,客户端和服务器通过两端的单个文件描述符(File Descriptor)进行通信。图 1 显示了涉及以下步骤的通信过程:\n Connect/Accept:客户端与 HTTP 服务器建立连接,或者服务器接受客户端的连接。 Read/Write(多次):客户端或服务器读取和写入 HTTPS 请求和响应。单个请求 - 响应对在每边的同一连接内发生。 Close:客户端和服务器关闭连接。  为了获取 HTTP 内容,必须从此过程的第二步读取它。根据 RFC 定义,内容包含在 4 层协议的数据中,可以通过解析数据来获取。请求和响应对可以相关联,因为它们都在两端的同一连接内发生。\n图 1:HTTP 通信时间线。\nHTTP 管线化 HTTP 管线化(Pipelining)是 HTTP/1.1 的一个特性,允许在等待对应的响应的情况下在单个 TCP 连接上发送多个 HTTP 请求。这个特性很重要,因为它确保了服务器端的响应顺序必须与请求的顺序匹配。\n图 2 说明了这是如何工作的,考虑以下情况:HTTP 客户端向服务器发送多个请求,服务器通过按照请求的顺序发送 HTTP 响应来响应。这意味着客户端发送的第一个请求将收到服务器的第一个响应,第二个请求将收到第二个响应,以此类推。\n在设计 HTTP 解析时,我们应该遵循这个原则,将请求数据添加到列表中,并在解析响应时删除第一个项目。这可以确保响应按正确的顺序处理。\n图 2: HTTP/1.1 管道。\n指标 根据前文提到的 HTTP 内容和流程拓扑图的识别,我们可以将这两者结合起来生成进程间的指标数据。\n图 3 显示了目前支持两个进程间分析的指标。基于 HTTP 请求和响应数据,可以分析以下数据:\n   指标名称 类型 单位 描述     请求 CPM(Call Per Minute) 计数器 计数 HTTP 请求计数   响应状态 CPM (Call Per Minute) 计数器 计数 每个 HTTP 响应状态码的计数   请求包大小 计数器 / 直方图 字节 请求包大小   响应包大小 计数器 / 直方图 字节 响应包大小   客户端持续时间 计数器 / 直方图 毫秒 客户端单个 HTTP 响应的持续时间   服务器持续时间 计数器 / 直方图 毫秒 服务器端单个 HTTP 响应的持续时间    图 3:进程到进程指标。\nHTTP 和追踪 在 HTTP 过程中,如果我们能够从原始数据中解包 HTTP 请求和响应,就可以使用这些数据与现有的追踪系统进行关联。\n追踪上下文标识 为了追踪多个服务之间的请求流,追踪系统通常在请求进入服务时创建追踪上下文,并在请求 - 响应过程中将其传递给其他服务。例如,当 HTTP 请求发送到另一个服务器时,追踪上下文包含在请求头中。\n图 4 显示了 Wireshark 拦截的 HTTP 请求的原始内容。由 Zipkin Tracing 系统生成的追踪上下文信息可以通过头中的 “X-B3” 前缀进行标识。通过使用 eBPF 拦截 HTTP 头中的追踪上下文,可以将当前请求与追踪系统连接起来。\n图 4:Wireshark 中的 HTTP Header 视图。\nTrace 事件 我们已经将事件这个概念加入了追踪中。事件可以附加到跨度上,并包含起始和结束时间、标签和摘要,允许我们将任何所需的信息附加到追踪中。\n在执行 eBPF 网络分析时,可以根据请求 - 响应数据生成两个事件。图 5 说明了在带分析的情况下执行 HTTP 请求时发生的情况。追踪系统生成追踪上下文信息并将其发送到请求中。当服务在内核中执行时,我们可以通过与内核空间中的请求 - 响应数据和执行时间交互,为相应的追踪跨度生成事件。\n以前,我们只能观察用户空间的执行状态。现在,通过结合追踪和 eBPF 技术,我们还可以在内核空间获取更多关于当前追踪的信息,如果我们在追踪 SDK 和代理中执行类似的操作,将对目标服务的性能产生较小的影响。\n图 5:分析 HTTP 请求和响应的逻辑视图。\n抽样 该机制仅在满足特定条件时触发抽样。我们还提供了前 N 条追踪的列表,允许用户快速访问特定追踪的相关请求信息。为了帮助用户轻松识别和分析相关事件,我们提供了三种不同的抽样规则:\n 慢速追踪:当请求的响应时间超过指定阈值时触发抽样。 响应状态 [400,500):当响应状态代码大于或等于 400 且小于 500 时触发抽样。 响应状态 [500,600):当响应状态代码大于或等于 500 且小于 600 时触发抽样。  此外,我们认识到分析时可能并不需要所有请求或响应的原始数据。例如,当试图识别性能问题时,用户可能更感兴趣于请求数据,而在解决错误时,他们可能更感兴趣于响应数据。因此,我们还提供了请求或响应事件的配置选项,允许用户指定要抽样的数据类型。\n服务网格中的分析 SkyWalking Rover 项目已经实现了 HTTP 协议的分析和追踪关联。当在服务网格环境中运行时它们的表现如何?\n部署 图 6 演示了 SkyWalking 和 SkyWalking Rover 在服务网格环境中的部署方式。SkyWalking Rover 作为一个 DaemonSet 部署在每台服务所在的机器上,并与 SkyWalking 后端集群通信。它会自动识别机器上的服务并向 SkyWalking 后端集群报告元数据信息。当出现新的网络分析任务时,SkyWalking Rover 会感知该任务并对指定的进程进行分析,在最终将数据报告回 SkyWalking 后端服务之前,收集和聚合网络数据。\n图 6:服务网格中的 SkyWalking rover 部署拓扑。\n追踪系统 从版本 9.3.0 开始,SkyWalking 后端完全支持 Zipkin 服务器中的所有功能。因此,SkyWalking 后端可以收集来自 SkyWalking 和 Zipkin 协议的追踪。同样,SkyWalking Rover 可以在 SkyWalking 和 Zipkin 追踪系统中识别和分析追踪上下文。在接下来的两节中,网络分析结果将分别在 SkyWalking 和 Zipkin UI 中显示。\nSkyWalking 当 SkyWalking 执行网络分析时,与前文中的 TCP 指标类似,SkyWalking UI 会首先显示进程间的拓扑图。当打开代表进程间流量指标的线的仪表板时,您可以在 “HTTP/1.x” 选项卡中看到 HTTP 流量的指标,并在 “HTTP Requests” 选项卡中看到带追踪的抽样的 HTTP 请求。\n如图 7 所示,选项卡中有三个列表,每个列表对应事件抽样规则中的一个条件。每个列表显示符合预先规定条件的追踪。当您单击追踪列表中的一个项目时,就可以查看完整的追踪。\n图 7:Tracing 上下文中的采样 HTTP 请求。\n当您单击追踪列表中的一个项目时,就可以快速查看指定的追踪。在图 8 中,我们可以看到在当前的服务相关的跨度中,有一个带有数字的标签,表示与该追踪跨度相关的 HTTP 事件数。\n由于我们在服务网格环境中,每个服务都涉及与 Envoy 交互。因此,当前的跨度包括 Envoy 的请求和响应信息。此外,由于当前的服务有传入和传出的请求,因此相应的跨度中有事件。\n图 8:Tracing 详细信息中的事件。\n当单击跨度时,将显示跨度的详细信息。如果当前跨度中有事件,则相关事件信息将在时间轴上显示。如图 9 所示,当前跨度中一共有 6 个相关事件。每个事件代表一个 HTTP 请求 / 响应的数据样本。其中一个事件跨越多个时间范围,表示较长的系统调用时间。这可能是由于系统调用被阻塞,具体取决于不同语言中的 HTTP 请求的实现细节。这也可以帮助我们查询错误的可能原因。\n图 9:一个 Tracing 范围内的事件。\n最后,我们可以单击特定的事件查看它的完整信息。如图 10 所示,它显示了一个请求的抽样信息,包括从 HTTP 原始数据中的请求头中包含的 SkyWalking 追踪上下文协议。原始请求数据允许您快速重新请求以解决任何问题。\n图 10:事件的详细信息。\nZipkin Zipkin 是世界上广泛使用的分布式追踪系统。SkyWalking 可以作为替代服务器,提供高级功能。在这里,我们使用这种方式将功能无缝集成到 Zipkin 生态系统中。新事件也将被视为 Zipkin 的标签和注释的一种。\n为 Zipkin 跨度添加事件,需要执行以下操作:\n 将每个事件的开始时间和结束时间分别拆分为两个具有规范名称的注释。 将抽样的 HTTP 原始数据从事件添加到 Zipkin 跨度标签中,使用相同的事件名称用于相应的目的。  图 11 和图 12 显示了同一跨度中的注释和标签。在这些图中,我们可以看到跨度包含至少两个具有相同事件名称和序列后缀的事件(例如,图中的 “Start/Finished HTTP Request/Response Sampling-x”)。这两个事件均具有单独的时间戳,用于表示其在跨度内的相对时间。在标签中,对应事件的数据内容分别由事件名称和序列号表示。\n图 11:Zipkin span 注释中的事件时间戳。\n图 12:Zipkin span 标签中的事件原始数据。\n演示 在本节中,我们将演示如何在服务网格中执行网络分析,并完成指标收集和 HTTP 原始数据抽样。要进行操作,您需要一个运行中的 Kubernetes 环境。\n部署 SkyWalking Showcase SkyWalking Showcase 包含一套完整的示例服务,可以使用 SkyWalking 进行监控。有关详细信息,请参阅官方文档。\n在本演示中,我们只部署了服务、最新发布的 SkyWalking OAP 和 UI。\nexport SW_OAP_IMAGE=apache/skywalking-oap-server:9.3.0 export SW_UI_IMAGE=apache/skywalking-ui:9.3.0 export SW_ROVER_IMAGE=apache/skywalking-rover:0.4.0 export FEATURE_FLAGS=mesh-with-agent,single-node,elasticsearch,rover make deploy.kubernetes 部署完成后,运行下面的脚本启动 SkyWalking UI:http://localhost:8080/。\nkubectl port-forward svc/ui 8080:8080 --namespace default 启动网络分析任务 目前,我们可以通过单击服务网格面板中的 Data Plane 项和 Kubernetes 面板中的 Service 项来选择要监视的特定实例。\n在图 13 中,我们已在网络分析选项卡中选择了一个具有任务列表的实例。\n图 13:数据平面中的网络分析选项卡。\n当我们单击 “开始” 按钮时,如图 14 所示,我们需要为分析任务指定抽样规则。抽样规则由一个或多个规则组成,每个规则都由不同的 URI 正则表达式区分。当 HTTP 请求的 URI 与正则表达式匹配时,将使用该规则。如果 URI 正则表达式为空,则使用默认规则。使用多个规则可以帮助我们为不同的请求配置不同的抽样配置。\n每个规则都有三个参数来确定是否需要抽样:\n 最小请求持续时间(毫秒):响应时间超过指定时间的请求将被抽样。 在 400 和 499 之间的抽样响应状态代码:范围 [400-499) 中的所有状态代码将被抽样。 在 500 和 599 之间的抽样响应状态代码:范围 [500-599) 中的所有状态码将被抽样。  抽样配置完成后,我们就可以创建任务了。\n图 14:创建网络分析任务页面。\n完成 几秒钟后,你会看到页面的右侧出现进程拓扑结构。\n图 15:网络分析任务中的流程拓扑。\n当您单击进程之间的线时,您可以查看两个过程之间的数据,它被分为三个选项卡:\n TCP:显示与 TCP 相关的指标。 HTTP/1.x:显示 HTTP 1 协议中的指标。 HTTP 请求:显示已分析的请求,并根据抽样规则保存到列表中。  图 16:网络分析任务中的 TCP 指标。\n图 17:网络分析任务中的 HTTP/1.x 指标。\n图 18:网络分析任务中的 HTTP 采样请求。\n总结 在本文中,我们详细介绍了如何在网络分析中分析 7 层 HTTP/1.x 协议,以及如何将其与现有追踪系统相关联。这使我们能够将我们能够观察到的数据从用户空间扩展到内核空间数据。\n在未来,我们将进一步探究内核数据的分析,例如收集 TCP 包大小、传输频率、网卡等信息,并从另一个角度提升分布式追踪。\n其他资源  SkyWalking Github Repo › SkyWalking Rover Github Repo › SkyWalking Rover Documentation › Diagnose Service Mesh Network Performance with eBPF blog post \u0026gt; SkyWalking Profiling Documentation \u0026gt; SkyWalking Trace Context Propagation \u0026gt; Zipkin Trace Context Propagation \u0026gt; RFC - Hypertext Transfer Protocol – HTTP/1.1 \u0026gt;  ","title":"使用 eBPF 提升 HTTP 可观测性 - L7 指标和追踪","url":"/zh/ebpf-enhanced-http-observability-l7-metrics-and-tracing/"},{"content":"SkyWalking Rust 0.5.0 is released. Go to downloads page to find release tars.\nWhat\u0026rsquo;s Changed  Add management support. by @jmjoy in https://github.com/apache/skywalking-rust/pull/48 Add missing_docs lint and supply documents. by @jmjoy in https://github.com/apache/skywalking-rust/pull/49 Add authentication and custom intercept support. by @jmjoy in https://github.com/apache/skywalking-rust/pull/50 Bump to 0.5.0. by @jmjoy in https://github.com/apache/skywalking-rust/pull/51  ","title":"Release Apache SkyWalking Rust 0.5.0","url":"/events/release-apache-skywalking-rust-0-5-0/"},{"content":"SkyWalking Satellite 1.1.0 is released. Go to downloads page to find release tars.\nFeatures  Support transmit the OpenTelemetry Metrics protocol. Upgrade to GO 1.18. Add Docker images for arm64 architecture. Support transmit Span Attached Event protocol data. Support dotnet CLRMetric forward.  Bug Fixes  Fix the missing return data when receive metrics in batch mode. Fix CVE-2022-21698, CVE-2022-27664.  Issues and PR  All issues are here All and pull requests are here  ","title":"Release Apache SkyWalking Satellite 1.1.0","url":"/events/release-apache-skwaylking-satellite-1-1-0/"},{"content":"Apache SkyWalking is an open-source APM for a distributed system, Apache Software Foundation top-level project.\nOn Jan. 3rd, 2023, we received reports about Aliyun Trace Analysis Service. It provides a cloud service compatible with SkyWalking trace APIs and agents.\nOn their product page, there is a best-practice document describing about their service is not SkyWalking OAP, but can work with SkyWalking agents to support SkyWalking\u0026rsquo;s In-Process(Trace) Profiling.\nBUT, they copied the whole page of SkyWalking\u0026rsquo;s profiling UI, including page layout, words, and profiling task setup. The only difference is the color schemes.\nSkyWalking UI Aliyun Trace Analysis UI on their document page  The UI visualization is a part of the copyright. Aliyun declared their backend is NOT a re-distribution of SkyWalking repeatedly on their website, and they never mentioned this page is actually copied from upstream.\nThis is a LICENSE issue, violating SkyWalking\u0026rsquo;s copyright and Apache 2.0 License. They don\u0026rsquo;t respect Apache Software Foundation and Apache SkyWalking\u0026rsquo;s IP and Branding.\n","title":"[License Issue] Aliyun(阿里云)'s trace analysis service copied SkyWalking's trace profiling page.","url":"/blog/2023-01-03-aliyun-copy-page/"},{"content":"SkyWalking Rover 0.4.0 is released. Go to downloads page to find release tars.\nFeatures  Enhancing the render context for the Kubernetes process. Simplify the logic of network protocol analysis. Upgrade Go library to 1.18, eBPF library to 0.9.3. Make the Profiling module compatible with more Linux systems. Support monitor HTTP/1.x in the NETWORK profiling.  Bug Fixes Documentation  Adding support version of Linux documentation.  Issues and PR  All issues are here All and pull requests are here  ","title":"Release Apache SkyWalking Rover 0.4.0","url":"/events/release-apache-skwaylking-rover-0-4-0/"},{"content":"Observability for modern distributed applications work is critical for understanding how they behave under a variety of conditions and for troubleshooting and resolving issues when they arise. Traces, metrics, and logs are regarded as fundamental parts of the observability stack. Traces are the footprints of distributed system executions, meanwhile, metrics measure system performance with numbers in the timeline. Essentially, they measure the performance from two dimensions. Being able to quickly visualize the connection between traces and corresponding metrics makes it possible to quickly diagnose which process flows are correlated to potentially pathological behavior. This powerful new capability is now available in SkyWalking 9.3.0.\nThe SkyWalking project started only with tracing, with a focus on 100% sampling-based metrics and topology analysis since 2018. When users face anomaly trends of time-series metrics, like a peak on the line chart, or histogram shows a larger gap between p95 and p95, the immediate question is, why is this happening? One of SkyWalking\u0026rsquo;s latest features, the trace-metric association, makes it much easier to answer that question and to address the root cause.\nHow Are Metrics Generated? SkyWalking provides three ways to calculate metrics:\n Metrics built from trace spans, depending on the span’s layer, kind, and tags. Metrics extracted from logs—a kind of keyword and tags-based metrics extraction. Metrics reported from mature and mainstream metrics/meter systems, such as OpenTelemetry, Prometheus, and Zabbix.  Tracing tracks the processes of requests between an application\u0026rsquo;s services. Most systems that generate traffic and performance-related metrics also generate tracing data, either from server-side trace-based aggregations or through client SDKs.\nUse SkyWalking to Reduce the Traditional Cost of Trace Indexing Tracing data and visualization are critical troubleshooting tools for both developers and operators alike because of how helpful they are in locating issue boundaries. But, because it has traditionally been difficult to find associations between metrics and traces, teams have added increasingly more tags into the spans, and search through various combinations. This trend of increased instrumentation and searching has required increased infrastructure investment to support this kind of search. SkyWalking\u0026rsquo;s metrics and tracing association capabilities can help reduce the cost of indexing and searching that data.\nFind the Associated Trace When looking for association between metrics and traces, the kind of metrics we\u0026rsquo;re dealing with determines their relationships to traces. Let’s review the standard request rate, error, and duration (RED) metrics to see how it works.\nSuccess Rate Metrics The success rate is determined by the return code, RPC response code, or exceptions of the process. When the success rate decreases, looking for errors in the traces of this service or pod are the first place to look to find clues.\nFigure 1: The success rate graph from SkyWalking\u0026rsquo;s 9.3.0 dashboard with the option to view related traces at a particular time.\nDrilling down from the peak of the success rate, SkyWalking lists all traces and their error status that were collected in this particular minute (Figure 2):\nFigure 2: SkyWalking shows related traces with an error status.\nRequests to /test can be located from the trace, and the span’s tag indicates a 404 response code of the HTTP request.\nFigure 3: A detail view of a request to http://frontend/test showing that the URI doesn\u0026rsquo;t exist.\nBy looking at the trace data, it becomes immediately clear that the drop in success rate is caused by requests to a nonexistent URI.\nAverage Response Time The average response time metric provides a general overview of service performance. When average response time is unstable, this usually means that the system is facing serious performance impacts.\nFigure 4: SkyWalking\u0026rsquo;s query UI for searching for related traces showing traces for requests that exceed a particular duration threshold.\nWhen you drill down from this metric, this query condition (Figure 4) will reveal the slowest traces of the service in this specific minute. Notice, at least 168ms is added as a condition automatically, to avoid scanning a large number of rows in the Database.\nApdex Apdex—the Application Performance Index—is a measure of response time based against a set threshold. It measures the ratio of satisfactory response times to unsatisfactory response times (Figure 5). The response time is measured from an asset request to completed delivery back to the requestor.\nFigure 5: The Apdex formula\nA user defines a response time tolerating threshold T. All responses handled in T or less time satisfy the user.\nFor example, if T is 1.2 seconds and a response completes in 0.5 seconds, then the user is satisfied. All responses greater than 1.2 seconds dissatisfy the user. Responses greater than 4.8 seconds frustrate the user.\nWhen the Apdex score decreases, we need to find related traces from two perspectives: slow traces and error status traces. SkyWalking\u0026rsquo;s new related tracing features offers a quick way to view both (Figure 6) directly from the Apdex graph.\nFigure 6: Show slow trace and error status traces from the Apdex graph\nService Response Time Percentile MetricThe percentile graph (Figure 7) provides p50, p75, p90, p95, and p99 latency ranks to measure the long-tail issues of service performance.\nFigure 7: The service response time percentile graph helps to highlight long-tail issues of service performance.\nThis percentile graph shows a typical long-tail issue. P99 latency is four times slower than the P95. When we use the association, we see the traces with latency between P95 - P99 and P99 - Infinity.\nThe traces of requests causing this kind of long-tail phenomena are automatically listing from there.\nFigure 8: Query parameters to search for traces based on latency.\nAre More Associations Available? SkyWalking provides more than just associations between between traces and metrics to help you find possible causal relationships and to avoid looking for the proverbial needle in a haystack.\nCurrently, SkyWalking 9.3.0 offers two more associations: metric-to-metric associations and event-to-metric associations.\nMetric-to-metric Associations There are dozens of metrics on the dashboard—which is great for getting a complete picture of application behavior. During a typical performance issue, the peaks of multiple metrics are affected simultaneously. But, trying to correlate peaks across all of these graphs can be difficult\u0026hellip;\nNow in SkyWalking 9.3.0, when you click the peak of one graph, the pop-out box lets you see associated metrics.\nFigure 9: SkyWalking\u0026rsquo;s option to view associated metrics.\nWhen you choose that option, all associated metrics graphs will show axis pointers (the dotted vertical lines) in all associated graphs like in Figure 10. This makes it easier to correlate the peaks in different graphs with each other. Often, these correlated peaks with have the same root cause.\nFigure 10: Axis pointers (vertical dotted lines) show associations between peaks across multiple metrics graphs.\nEvent-to-Metric Associations SkyWalking provides the event concept to associate possible service performance impacted by the infrastructure, such as new deployment even from k8s. Or, the anomaly had been detected by alerting or integrated AIOps engine.\nThe event to metrics association is also automatically, it could cover the time range of the event on the metric graphs(blue areas). If the area of event and peaks are matched, most likely this event covered this anomaly.\nFigure 11: SkyWalking\u0026rsquo;s event to metric association view.\nSkyWalking Makes it Easier and Faster to Find Root Causes SkyWalking now makes it easy to find associations between metrics, events, and traces, ultimately making it possible to identify root causes and fix problems fast. The associations we\u0026rsquo;ve discussed in this article are available out-of-box in the SkyWalking 9.3.0 release.\nFigure 12: Just click on the dots to see related traces and metrics associations.\nClick the dots on any metric graph, and you will see a View Related Traces item pop-out if this metric has logical mapping traces.\nConclusion In this blog, we took a look at the newly-added association feature between metrics and traces. With this new visualization, it\u0026rsquo;s now much easier to find key traces to identify root cause of issues.Associations in SkyWalking can go even deeper. Associations from metrics to traces is not the end of diagnosing system bottleneck. In the next post, we will introduce an eBPF powered trace enhancement where you’ll be able to see HTTP request and response details associated with tracing spans from network profiling. Stay tuned.\n","title":"Boost Root Cause Analysis Quickly With SkyWalking’s New Trace-Metrics Association Feature","url":"/blog/boost-root-cause-analysis-quickly-with-skywalking-new-trace-metrics-association-feature/"},{"content":"现代分布式应用程序工作的可观测性对于了解它们在各种条件下的行为方式以及在出现问题时进行故障排除和解决至关重要。追踪、指标和日志被视为可观测性堆栈的基本部分。Trace 是分布式系统执行的足迹,而 metric 则是用时间轴上的数字衡量系统性能。本质上,它们从两个维度衡量性能。能够快速可视化追踪和相应指标之间的联系,可以快速诊断哪些流程与潜在的异常相关。SkyWalking 9.3.0 现在提供了这一强大的新功能。\nSkyWalking 项目从 tracing 开始,从 2018 年开始专注于 100% 基于采样的指标和拓扑分析。当用户面对时间序列指标的异常趋势时,比如折线图上的峰值,或者直方图显示 p95 和 p95 之间的差距较大,直接的问题是,为什么会出现这种情况?SkyWalking 的最新功能之一,trace 与 metric 关联,使得回答这个问题和解决根本原因更加容易。\n指标是如何生成的? SkyWalking 提供了三种计算指标的方式:\n 根据追踪跨度构建的指标,具体取决于跨度的层、种类和标签。 从日志中提取指标—— 一种基于关键词和标签的指标提取。 从成熟和主流的指标 / 仪表系统报告的指标,例如 OpenTelemetry、Prometheus 和 Zabbix。  Tracing 追踪应用程序服务之间的请求过程。大多数生成流量和性能相关指标的系统也会生成追踪数据,这些数据来自服务器端基于追踪的聚合或通过客户端 SDK。\n使用 SkyWalking 降低追踪索引的传统成本 Trace 数据和可视化对于开发人员和运维人员来说都是至关重要的故障排除工具,因为它们在定位问题边界方面非常有帮助。但是,由于传统上很难找到指标和痕迹之间的关联,团队已经将越来越多的标签添加到跨度中,并搜索各种组合。这种增加仪器和搜索的趋势需要增加基础设施投资来支持这种搜索。SkyWalking 的指标和追踪关联功能有助于降低索引和搜索该数据的成本。\n查找关联的 trace 在寻找 metric 和 trace 之间的关联时,我们处理的指标类型决定了它们与 trace 的关系。让我们回顾一下标准请求*率、错误和持续时间(RED)*指标,看看它是如何工作的。\n成功率指标 成功率由返回码、RPC 响应码或进程异常决定。当成功率下降时,在这个服务或 Pod 的 trace 中寻找错误是第一个寻找线索的地方。\n图 1:SkyWalking 9.3.0 仪表板的成功率图表,带有在特定时间查看相关 trace 的选项。\n从成功率的峰值向下探索,SkyWalking 列出了在这一特定分钟内收集的所有 trace 及其错误状态(图 2):\n图 2:SkyWalking 显示具有错误状态的相关追踪。\n可以从 trace 中找到对 /test 的请求,并且 span 的标记指示 HTTP 请求的 404 响应代码。\n图 3:显示 URI 不存在的 http://frontend/test 请求的详细视图。\n通过查看 trace 数据,很明显成功率的下降是由对不存在的 URI 的请求引起的。\n平均响应时间 平均响应时间指标提供了服务性能的一般概览。当平均响应时间不稳定时,这通常意味着系统面临严重的性能影响。\n图 4:SkyWalking 用于搜索相关 trace 的查询 UI,显示超过特定持续时间阈值的请求的 trace。\n当您从该指标向下探索时,该查询条件(图 4)将揭示该特定分钟内服务的最慢 trace。请注意,至少 168ms 作为条件自动添加,以避免扫描数据库中的大量行。\nApdex Apdex(应用程序性能指数)是根据设定的阈值衡量响应时间的指标。它测量令人满意的响应时间与不令人满意的响应时间的比率(图 5)。响应时间是从资产请求到完成交付回请求者的时间。\n图 5:Apdex 公式\n用户定义响应时间容忍阈值 T。在 T 或更短时间内处理的所有响应都使用户满意。\n例如,如果 T 为 1.2 秒,响应在 0.5 秒内完成,则用户会感到满意。所有大于 1.2 秒的响应都会让用户不满意。超过 4.8 秒的响应会让用户感到沮丧。\n当 Apdex 分数下降时,我们需要从两个角度寻找相关的 trace:慢速和错误状态的 trace。SkyWalking 的新相关追踪功能提供了一种直接从 Apdex 图表查看两者(图 6)的快速方法。\n图 6:显示 Apdex 图中的慢速 trace 和错误状态 trace\n服务响应时间 百分位指标百分位图(图 7)提供 p50、p75、p90、p95 和 p99 延迟排名,以衡量服务性能的长尾问题。\n图 7:服务响应时间百分位图有助于突出服务性能的长尾问题。\n这个百分位数图显示了一个典型的长尾问题。P99 延迟比 P95 慢四倍。当我们使用关联时,我们会看到 P95 - P99 和 P99 - Infinity 之间具有延迟的 trace。\n造成这种长尾现象的请求 trace,就是从那里自动列出来的。\n图 8:用于根据延迟搜索 trace 的查询参数。\n是否有更多关联可用? SkyWalking 提供的不仅仅是 trace 和 metric 之间的关联,还可以帮助您找到可能的因果关系,避免大海捞针。\n目前,SkyWalking 9.3.0 提供了两种关联:metric-to-metric 关联和 event-to-metric 关联。\nMetric-to-metric 关联 仪表板上有许多指标 —— 这对于全面了解应用程序行为非常有用。在典型的性能问题中,多个指标的峰值会同时受到影响。但是,尝试关联所有这些图表中的峰值可能很困难……\n现在在 SkyWalking 9.3.0 中,当你点击一个图表的峰值时,弹出框可以让你看到相关的指标。\n图 9:SkyWalking 用于查看相关指标的选项。\n当您选择该选项时,所有关联的指标图表将在所有关联的图表中显示轴指针(垂直虚线),如图 10 所示。这使得将不同图表中的峰值相互关联起来变得更加容易。通常,这些相关的峰值具有相同的根本原因。\n图 10:轴指针(垂直虚线)显示多个指标图中峰值之间的关联。\nEvent-to-metric 关联 SkyWalking 提供了事件概念来关联可能受基础设施影响的服务性能,例如来自 Kubernetes 的新部署。或者,已通过警报或集成 AIOps 引擎检测到异常。\n事件到指标的关联也是自动的,它可以覆盖指标图上事件的时间范围(蓝色区域)。如果事件区域和峰值匹配,则很可能该事件覆盖了该异常。\n图 11:SkyWalking 的事件与指标关联视图。\nSkyWalking 使查找根本原因变得更加容易和快速 SkyWalking 现在可以轻松找到指标、事件和追踪之间的关联,最终可以确定根本原因并快速解决问题。我们在本文中讨论的关联在 SkyWalking 9.3.0 版本中开箱即用。\n图 12:只需单击圆点即可查看相关 trace 和 metric 关联。\n单击任何指标图上的点,如果该指标具有逻辑映射,您将看到一个查看相关 trace 弹出窗口。\n结论 在这篇博客中,我们了解了 metric 和 trace 之间新增的关联功能。有了这个新的可视化,现在可以更容易地找到关键 trace 来识别问题的根本原因。SkyWalking 中的关联可以更深入。从 metric 到 trace 的关联并不是诊断系统瓶颈的终点。在下一篇文章中,我们将介绍 eBPF 支持的追踪增强功能,您将看到与网络分析中的追踪跨度相关的 HTTP 请求和响应详细信息。敬请关注。\n","title":"SkyWalking 推出 trace-metric 关联功能助力快速根源问题排查","url":"/zh/boost-root-cause-analysis-quickly-with-skywalking-new-trace-metrics-association-feature/"},{"content":"In cloud native applications, a request often needs to be processed through a series of APIs or backend services, some of which are parallel and some serial and located on different platforms or nodes. How do we determine the service paths and nodes a call goes through to help us troubleshoot the problem? This is where distributed tracing comes into play.\nThis article covers:\n How distributed tracing works How to choose distributed tracing software How to use distributed tracing in Istio How to view distributed tracing data using Bookinfo and SkyWalking as examples  Distributed Tracing Basics Distributed tracing is a method for tracing requests in a distributed system to help users better understand, control, and optimize distributed systems. There are two concepts used in distributed tracing: TraceID and SpanID. You can see them in Figure 1 below.\n TraceID is a globally unique ID that identifies the trace information of a request. All traces of a request belong to the same TraceID, and the TraceID remains constant throughout the trace of the request. SpanID is a locally unique ID that identifies a request’s trace information at a certain time. A request generates different SpanIDs at different periods, and SpanIDs are used to distinguish trace information for a request at different periods.  TraceID and SpanID are the basis of distributed tracing. They provide a uniform identifier for request tracing in distributed systems and facilitate users’ ability to query, manage, and analyze the trace information of requests.\nFigure 1: Trace and span\nThe following is the process of distributed tracing:\n When a system receives a request, the distributed tracing system assigns a TraceID to the request, which is used to chain together the entire chain of invocations. The distributed trace system generates a SpanID and ParentID for each service call within the system for the request, which is used to record the parent-child relationship of the call; a Span without a ParentID is used as the entry point of the call chain. TraceID and SpanID are to be passed during each service call. When viewing a distributed trace, query the full process of a particular request by TraceID.  How Istio Implements Distributed Tracing Istio’s distributed tracing is based on information collected by the Envoy proxy in the data plane. After a service request is intercepted by Envoy, Envoy adds tracing information as headers to the request forwarded to the destination workload. The following headers are relevant for distributed tracing:\n As TraceID: x-request-id Used to establish parent-child relationships for Span in the LightStep trace: x-ot-span-context\u0026lt;/li Used for Zipkin, also for Jaeger, SkyWalking, see b3-propagation:  x-b3-traceid x-b3-traceid x-b3-spanid x-b3-parentspanid x-b3-sampled x-b3-flags b3   For Datadog:  x-datadog-trace-id x-datadog-parent-id x-datadog-sampling-priority   For SkyWalking: sw8 For AWS X-Ray: x-amzn-trace-id  For more information on how to use these headers, please see the Envoy documentation.\nRegardless of the language of your application, Envoy will generate the appropriate tracing headers for you at the Ingress Gateway and forward these headers to the upstream cluster. However, in order to utilize the distributed tracing feature, you must modify your application code to attach the tracing headers to upstream requests. Since neither the service mesh nor the application can automatically propagate these headers, you can integrate the agent for distributed tracing into the application or manually propagate these headers in the application code itself. Once the tracing headers are propagated to all upstream requests, Envoy will send the tracing data to the tracer’s back-end processing, and then you can view the tracing data in the UI.\nFor example, look at the code of the Productpage service in the Bookinfo application. You can see that it integrates the Jaeger client library and synchronizes the header generated by Envoy with the HTTP requests to the Details and Reviews services in the getForwardHeaders (request) function.\ndef getForwardHeaders(request): headers = {} # Using Jaeger agent to get the x-b3-* headers span = get_current_span() carrier = {} tracer.inject( span_context=span.context, format=Format.HTTP_HEADERS, carrier=carrier) headers.update(carrier) # Dealing with the non x-b3-* header manually if \u0026#39;user\u0026#39; in session: headers[\u0026#39;end-user\u0026#39;] = session[\u0026#39;user\u0026#39;] incoming_headers = [ \u0026#39;x-request-id\u0026#39;, \u0026#39;x-ot-span-context\u0026#39;, \u0026#39;x-datadog-trace-id\u0026#39;, \u0026#39;x-datadog-parent-id\u0026#39;, \u0026#39;x-datadog-sampling-priority\u0026#39;, \u0026#39;traceparent\u0026#39;, \u0026#39;tracestate\u0026#39;, \u0026#39;x-cloud-trace-context\u0026#39;, \u0026#39;grpc-trace-bin\u0026#39;, \u0026#39;sw8\u0026#39;, \u0026#39;user-agent\u0026#39;, \u0026#39;cookie\u0026#39;, \u0026#39;authorization\u0026#39;, \u0026#39;jwt\u0026#39;, ] for ihdr in incoming_headers: val = request.headers.get(ihdr) if val is not None: headers[ihdr] = val return headers For more information, the Istio documentation provides answers to frequently asked questions about distributed tracing in Istio.\nHow to Choose A Distributed Tracing System Distributed tracing systems are similar in principle. There are many such systems on the market, such as Apache SkyWalking, Jaeger, Zipkin, Lightstep, Pinpoint, and so on. For our purposes here, we will choose three of them and compare them in several dimensions. Here are our inclusion criteria:\n They are currently the most popular open-source distributed tracing systems. All are based on the OpenTracing specification. They support integration with Istio and Envoy.     Items Apache SkyWalking Jaeger Zipkin     Implementations Language-based probes, service mesh probes, eBPF agent, third-party instrumental libraries (Zipkin currently supported) Language-based probes Language-based probes   Database ES, H2, MySQL, TiDB, Sharding-sphere, BanyanDB ES, MySQL, Cassandra, Memory ES, MySQL, Cassandra, Memory   Supported Languages Java, Rust, PHP, NodeJS, Go, Python, C++, .Net, Lua Java, Go, Python, NodeJS, C#, PHP, Ruby, C++ Java, Go, Python, NodeJS, C#, PHP, Ruby, C++   Initiator Personal Uber Twitter   Governance Apache Foundation CNCF CNCF   Version 9.3.0 1.39.0 2.23.19   Stars 20.9k 16.8k 15.8k    Although Apache SkyWalking’s agent does not support as many languages as Jaeger and Zipkin, SkyWalking’s implementation is richer and compatible with Jaeger and Zipkin trace data, and development is more active, so it is one of the best choices for building a telemetry platform.\nDemo Refer to the Istio documentation to install and configure Apache SkyWalking.\nEnvironment Description The following is the environment for our demo:\n Kubernetes 1.24.5 Istio 1.16 SkyWalking 9.1.0  Install Istio Before installing Istio, you can check the environment for any problems:\n$ istioctl experimental precheck ✔ No issues found when checking the cluster. Istio is safe to install or upgrade! To get started, check out https://istio.io/latest/docs/setup/getting-started/ Then install Istio and configure the destination for sending tracing messages as SkyWalking:\n# Initial Istio Operator istioctl operator init # Configure tracing destination kubectl apply -f - \u0026lt;\u0026lt;EOF apiVersion: install.istio.io/v1alpha1 kind: IstioOperator metadata: namespace: istio-system name: istio-with-skywalking spec: meshConfig: defaultProviders: tracing: - \u0026#34;skywalking\u0026#34; enableTracing: true extensionProviders: - name: \u0026#34;skywalking\u0026#34; skywalking: service: tracing.istio-system.svc.cluster.local port: 11800 EOF Deploy Apache SkyWalking Istio 1.16 supports distributed tracing using Apache SkyWalking. Install SkyWalking by executing the following code:\nkubectl apply -f https://raw.githubusercontent.com/istio/istio/release-1.16/samples/addons/extras/skywalking.yaml It will install the following components under the istio-system namespace:\n SkyWalking Observability Analysis Platform (OAP): Used to receive trace data, supports SkyWalking native data formats, Zipkin v1 and v2 and Jaeger format. UI: Used to query distributed trace data.  For more information about SkyWalking, please refer to the SkyWalking documentation.\nDeploy the Bookinfo Application Execute the following command to install the bookinfo application:\nkubectl label namespace default istio-injection=enabled kubectl apply -f samples/bookinfo/platform/kube/bookinfo.yaml kubectl apply -f samples/bookinfo/networking/bookinfo-gateway.yaml Launch the SkyWalking UI:\nistioctl dashboard skywalking Figure 2 shows all the services available in the bookinfo application:\nFigure 2: SkyWalking General Service page\nYou can also see information about instances, endpoints, topology, tracing, etc. For example, Figure 3 shows the service topology of the bookinfo application:\nFigure 3: Topology diagram of the Bookinfo application\nTracing views in SkyWalking can be displayed in a variety of formats, including list, tree, table, and statistics. See Figure 4:\nFigure 4: SkyWalking General Service trace supports multiple display formats\nTo facilitate our examination, set the sampling rate of the trace to 100%:\nkubectl apply -f - \u0026lt;\u0026lt;EOF apiVersion: telemetry.istio.io/v1alpha1 kind: Telemetry metadata: name: mesh-default namespace: istio-system spec: tracing: - randomSamplingPercentage: 100.00 EOF  Important: It’s generally not good practice to set the sampling rate to 100% in a production environment. To avoid the overhead of generating too many trace logs in production, please adjust the sampling strategy (sampling percentage).\n Uninstall After experimenting, uninstall Istio and SkyWalking by executing the following command.\nsamples/bookinfo/platform/kube/cleanup.sh istioctl unintall --purge kubectl delete namespace istio-system Understanding the Bookinfo Tracing Information Navigate to the General Service tab in the Apache SkyWalking UI, and you can see the trace information for the most recent istio-ingressgateway service, as shown in Figure 5. Click on each span to see the details.\nFigure 5: The table view shows the basic information about each span.\nSwitching to the list view, you can see the execution order and duration of each span, as shown in Figure 6:\nFigure 6: List display\nYou might want to know why such a straightforward application generates so much span data. Because after we inject the Envoy proxy into the pod, every request between services will be intercepted and processed by Envoy, as shown in Figure 7:\nFigure 7: Envoy intercepts requests to generate a span\nThe tracing process is shown in Figure 8:\nFigure 8: Trace of the Bookinfo application\nWe give each span a label with a serial number, and the time taken is indicated in parentheses. For illustration purposes, we have summarized all spans in the table below.\n   No. Endpoint Total Duration (ms) Component Duration (ms) Current Service Description     1 /productpage 190 0 istio-ingressgateway Envoy Outbound   2 /productpage 190 1 istio-ingressgateway Ingress -\u0026gt; Productpage network transmission   3 /productpage 189 1 productpage Envoy Inbound   4 /productpage 188 21 productpage Application internal processing   5 /details/0 8 1 productpage Envoy Outbound   6 /details/0 7 3 productpage Productpage -\u0026gt; Details network transmission   7 /details/0 4 0 details Envoy Inbound   8 /details/0 4 4 details Application internal processing   9 /reviews/0 159 0 productpage Envoy Outbound   10 /reviews/0 159 14 productpage Productpage -\u0026gt; Reviews network transmission   11 /reviews/0 145 1 reviews Envoy Inbound   12 /reviews/0 144 109 reviews Application internal processing   13 /ratings/0 35 2 reviews Envoy Outbound   14 /ratings/0 33 16 reviews Reviews -\u0026gt; Ratings network transmission   15 /ratings/0 17 1 ratings Envoy Inbound   16 /ratings/0 16 16 ratings Application internal processing    From the above information, it can be seen that:\n The total time consumed for this request is 190 ms. In Istio sidecar mode, each traffic flow in and out of the application container must pass through the Envoy proxy once, each time taking 0 to 2 ms. Network requests between Pods take between 1 and 16ms. This is because the data itself has errors and the start time of the Span is not necessarily equal to the end time of the parent Span. We can see that the most time-consuming part is the Reviews application, which takes 109 ms so that we can optimize it for that application.  Summary Distributed tracing is an indispensable tool for analyzing performance and troubleshooting modern distributed applications. In this tutorial, we’ve seen how, with just a few minor changes to your application code to propagate tracing headers, Istio makes distributed tracing simple to use. We’ve also reviewed Apache SkyWalking as one of the best distributed tracing systems that Istio supports. It is a fully functional platform for cloud native application analytics, with features such as metrics and log collection, alerting, Kubernetes monitoring, service mesh performance diagnosis using eBPF, and more.\n If you’re new to service mesh and Kubernetes security, we have a bunch of free online courses available at Tetrate Academy that will quickly get you up to speed with Istio and Envoy.\nIf you’re looking for a fast way to get to production with Istio, check out Tetrate Istio Distribution (TID). TID is Tetrate’s hardened, fully upstream Istio distribution, with FIPS-verified builds and support available. It’s a great way to get started with Istio knowing you have a trusted distribution to begin with, have an expert team supporting you, and also have the option to get to FIPS compliance quickly if you need to.\nOnce you have Istio up and running, you will probably need simpler ways to manage and secure your services beyond what’s available in Istio, that’s where Tetrate Service Bridge comes in. You can learn more about how Tetrate Service Bridge makes service mesh more secure, manageable, and resilient here, or contact us for a quick demo.\n","title":"How to Use SkyWalking for Distributed Tracing in Istio?","url":"/blog/how-to-use-skywalking-for-distributed-tracing-in-istio/"},{"content":"在云原生应用中,一次请求往往需要经过一系列的 API 或后台服务处理才能完成,这些服务有些是并行的,有些是串行的,而且位于不同的平台或节点。那么如何确定一次调用的经过的服务路径和节点以帮助我们进行问题排查?这时候就需要使用到分布式追踪。\n本文将向你介绍:\n 分布式追踪的原理 如何选择分布式追踪软件 在 Istio 中如何使用分布式追踪 以 Bookinfo 和 SkyWalking 为例说明如何查看分布式追踪数据  分布式追踪基础 分布式追踪是一种用来跟踪分布式系统中请求的方法,它可以帮助用户更好地理解、控制和优化分布式系统。分布式追踪中用到了两个概念:TraceID 和 SpanID。\n TraceID 是一个全局唯一的 ID,用来标识一个请求的追踪信息。一个请求的所有追踪信息都属于同一个 TraceID,TraceID 在整个请求的追踪过程中都是不变的; SpanID 是一个局部唯一的 ID,用来标识一个请求在某一时刻的追踪信息。一个请求在不同的时间段会产生不同的 SpanID,SpanID 用来区分一个请求在不同时间段的追踪信息;  TraceID 和 SpanID 是分布式追踪的基础,它们为分布式系统中请求的追踪提供了一个统一的标识,方便用户查询、管理和分析请求的追踪信息。\n下面是分布式追踪的过程:\n 当一个系统收到请求后,分布式追踪系统会为该请求分配一个 TraceID,用于串联起整个调用链; 分布式追踪系统会为该请求在系统内的每一次服务调用生成一个 SpanID 和 ParentID,用于记录调用的父子关系,没有 ParentID 的 Span 将作为调用链的入口; 每个服务调用过程中都要传递 TraceID 和 SpanID; 在查看分布式追踪时,通过 TraceID 查询某次请求的全过程;  Istio 如何实现分布式追踪 Istio 中的分布式追踪是基于数据平面中的 Envoy 代理实现的。服务请求在被劫持到 Envoy 中后,Envoy 在转发请求时会附加大量 Header,其中与分布式追踪相关的有:\n 作为 TraceID:x-request-id 用于在 LightStep 追踪系统中建立 Span 的父子关系:x-ot-span-context 用于 Zipkin,同时适用于 Jaeger、SkyWalking,详见 b3-propagation:  x-b3-traceid x-b3-spanid x-b3-parentspanid x-b3-sampled x-b3-flags b3   用于 Datadog:  x-datadog-trace-id x-datadog-parent-id x-datadog-sampling-priority   用于 SkyWalking:sw8 用于 AWS X-Ray:x-amzn-trace-id  关于这些 Header 的详细用法请参考 Envoy 文档 。\nEnvoy 会在 Ingress Gateway 中为你产生用于追踪的 Header,不论你的应用程序使用何种语言开发,Envoy 都会将这些 Header 转发到上游集群。但是,你还要对应用程序代码做一些小的修改,才能为使用分布式追踪功能。这是因为应用程序无法自动传播这些 Header,可以在程序中集成分布式追踪的 Agent,或者在代码中手动传播这些 Header。Envoy 会将追踪数据发送到 tracer 后端处理,然后就可以在 UI 中查看追踪数据了。\n例如在 Bookinfo 应用中的 Productpage 服务,如果你查看它的代码可以发现,其中集成了 Jaeger 客户端库,并在 getForwardHeaders (request) 方法中将 Envoy 生成的 Header 同步给对 Details 和 Reviews 服务的 HTTP 请求:\ndef getForwardHeaders(request): headers = {} # 使用 Jaeger agent 获取 x-b3-* header span = get_current_span() carrier = {} tracer.inject( span_context=span.context, format=Format.HTTP_HEADERS, carrier=carrier) headers.update(carrier) # 手动处理非 x-b3-* header if \u0026#39;user\u0026#39; in session: headers[\u0026#39;end-user\u0026#39;] = session[\u0026#39;user\u0026#39;] incoming_headers = [ \u0026#39;x-request-id\u0026#39;, \u0026#39;x-ot-span-context\u0026#39;, \u0026#39;x-datadog-trace-id\u0026#39;, \u0026#39;x-datadog-parent-id\u0026#39;, \u0026#39;x-datadog-sampling-priority\u0026#39;, \u0026#39;traceparent\u0026#39;, \u0026#39;tracestate\u0026#39;, \u0026#39;x-cloud-trace-context\u0026#39;, \u0026#39;grpc-trace-bin\u0026#39;, \u0026#39;sw8\u0026#39;, \u0026#39;user-agent\u0026#39;, \u0026#39;cookie\u0026#39;, \u0026#39;authorization\u0026#39;, \u0026#39;jwt\u0026#39;, ] for ihdr in incoming_headers: val = request.headers.get(ihdr) if val is not None: headers[ihdr] = val return headers 关于 Istio 中分布式追踪的常见问题请见 Istio 文档 。\n分布式追踪系统如何选择 分布式追踪系统的原理类似,市面上也有很多这样的系统,例如 Apache SkyWalking 、Jaeger 、Zipkin 、LightStep 、Pinpoint 等。我们将选择其中三个,从多个维度进行对比。之所以选择它们是因为:\n 它们是当前最流行的开源分布式追踪系统; 都是基于 OpenTracing 规范; 都支持与 Istio 及 Envoy 集成;     类别 Apache SkyWalking Jaeger Zipkin     实现方式 基于语言的探针、服务网格探针、eBPF agent、第三方指标库(当前支持 Zipkin) 基于语言的探针 基于语言的探针   数据存储 ES、H2、MySQL、TiDB、Sharding-sphere、BanyanDB ES、MySQL、Cassandra、内存 ES、MySQL、Cassandra、内存   支持语言 Java、Rust、PHP、NodeJS、Go、Python、C++、.NET、Lua Java、Go、Python、NodeJS、C#、PHP、Ruby、C++ Java、Go、Python、NodeJS、C#、PHP、Ruby、C++   发起者 个人 Uber Twitter   治理方式 Apache Foundation CNCF CNCF   版本 9.3.0 1.39.0 2.23.19   Star 数量 20.9k 16.8k 15.8k    分布式追踪系统对比表(数据截止时间 2022-12-07)\n虽然 Apache SkyWalking 的 Agent 支持的语言没有 Jaeger 和 Zipkin 多,但是 SkyWalking 的实现方式更丰富,并且与 Jaeger、Zipkin 的追踪数据兼容,开发更为活跃,且为国人开发,中文资料丰富,是构建遥测平台的最佳选择之一。\n实验 参考 Istio 文档 来安装和配置 Apache SkyWalking。\n环境说明 以下是我们实验的环境:\n Kubernetes 1.24.5 Istio 1.16 SkyWalking 9.1.0  安装 Istio 安装之前可以先检查下环境是否有问题:\n$ istioctl experimental precheck ✔ No issues found when checking the cluster. Istio is safe to install or upgrade! To get started, check out https://istio.io/latest/docs/setup/getting-started/ 然后安装 Istio 同时配置发送追踪信息的目的地为 SkyWalking:\n# 初始化 Istio Operator istioctl operator init # 安装 Istio 并配置使用 SkyWalking kubectl apply -f - \u0026lt;\u0026lt;EOF apiVersion: install.istio.io/v1alpha1 kind: IstioOperator metadata: namespace: istio-system name: istio-with-skywalking spec: meshConfig: defaultProviders: tracing: - \u0026#34;skywalking\u0026#34; enableTracing: true extensionProviders: - name: \u0026#34;skywalking\u0026#34; skywalking: service: tracing.istio-system.svc.cluster.local port: 11800 EOF 部署 Apache SkyWalking Istio 1.16 支持使用 Apache SkyWalking 进行分布式追踪,执行下面的代码安装 SkyWalking:\nkubectl apply -f https://raw.githubusercontent.com/istio/istio/release-1.16/samples/addons/extras/skywalking.yaml 它将在 istio-system 命名空间下安装:\n SkyWalking OAP (Observability Analysis Platform) :用于接收追踪数据,支持 SkyWalking 原生数据格式,Zipkin v1 和 v2 以及 Jaeger 格式。 UI :用于查询分布式追踪数据。  关于 SkyWalking 的详细信息请参考 SkyWalking 文档 。\n部署 Bookinfo 应用 执行下面的命令安装 bookinfo 示例:\nkubectl label namespace default istio-injection=enabled kubectl apply -f samples/bookinfo/platform/kube/bookinfo.yaml kubectl apply -f samples/bookinfo/networking/bookinfo-gateway.yaml 打开 SkyWalking UI:\nistioctl dashboard skywalking SkyWalking 的 General Service 页面展示了 bookinfo 应用中的所有服务。\n你还可以看到实例、端点、拓扑、追踪等信息。例如下图展示了 bookinfo 应用的服务拓扑。\nSkyWalking 的追踪视图有多种显示形式,如列表、树形、表格和统计。\nSkyWalking 通用服务追踪支持多种显示样式\n为了方便我们检查,将追踪的采样率设置为 100%:\nkubectl apply -f - \u0026lt;\u0026lt;EOF apiVersion: telemetry.istio.io/v1alpha1 kind: Telemetry metadata: name: mesh-default namespace: istio-system spec: tracing: - randomSamplingPercentage: 100.00 EOF 卸载 在实验完后,执行下面的命令卸载 Istio 和 SkyWalking:\nsamples/bookinfo/platform/kube/cleanup.sh istioctl unintall --purge kubectl delete namespace istio-system Bookinfo demo 追踪信息说明 在 Apache SkyWalking UI 中导航到 General Service 分页,查看最近的 istio-ingressgateway 服务的追踪信息,表视图如下所示。图中展示了此次请求所有 Span 的基本信息,点击每个 Span 可以查看详细信息。\n切换为列表视图,可以看到每个 Span 的执行顺序及持续时间,如下图所示。\n你可能会感到困惑,为什么这么简单的一个应用会产生如此多的 Span 信息?因为我们为 Pod 注入了 Envoy 代理之后,每个服务间的请求都会被 Envoy 拦截和处理,如下图所示。\n整个追踪流程如下图所示。\n图中给每一个 Span 标记了序号,并在括号里注明了耗时。为了便于说明我们将所有 Span 汇总在下面的表格中。\n   序号 方法 总耗时(ms) 组件耗时(ms) 当前服务 说明     1 /productpage 190 0 istio-ingressgateway Envoy Outbound   2 /productpage 190 1 istio-ingressgateway Ingress -\u0026gt; Productpage 网络传输   3 /productpage 189 1 productpage Envoy Inbound   4 /productpage 188 21 productpage 应用内部处理   5 /details/0 8 1 productpage Envoy Outbound   6 /details/0 7 3 productpage Productpage -\u0026gt; Details 网络传输   7 /details/0 4 0 details Envoy Inbound   8 /details/0 4 4 details 应用内部   9 /reviews/0 159 0 productpage Envoy Outbound   10 /reviews/0 159 14 productpage Productpage -\u0026gt; Reviews 网络传输   11 /reviews/0 145 1 reviews Envoy Inbound   12 /reviews/0 144 109 reviews 应用内部处理   13 /ratings/0 35 2 reviews Envoy Outbound   14 /ratings/0 33 16 reviews Reviews -\u0026gt; Ratings 网络传输   15 /ratings/0 17 1 ratings Envoy Inbound   16 /ratings/0 16 16 ratings 应用内部处理    从以上信息可以发现:\n 本次请求总耗时 190ms; 在 Istio sidecar 模式下,每次流量在进出应用容器时都需要经过一次 Envoy 代理,每次耗时在 0 到 2 ms; 在 Pod 间的网络请求耗时在 1 到 16ms 之间; 将耗时做多的调用链 Ingress Gateway -\u0026gt; Productpage -\u0026gt; Reviews -\u0026gt; Ratings 上的所有耗时累计 182 ms,小于请求总耗时 190ms,这是因为数据本身有误差,以及 Span 的开始时间并不一定等于父 Span 的结束时间,如果你在 SkyWalking 的追踪页面,选择「列表」样式查看追踪数据(见图 2)可以更直观的发现这个问题; 我们可以查看到最耗时的部分是 Reviews 应用,耗时 109ms,因此我们可以针对该应用进行优化;  总结 只要对应用代码稍作修改就可以在 Istio 很方便的使用分布式追踪功能。在 Istio 支持的众多分布式追踪系统中,Apache SkyWalking 是其中的佼佼者。它不仅支持分布式追踪,还支持指标和日志收集、报警、Kubernetes 和服务网格监控,使用 eBPF 诊断服务网格性能 等功能,是一个功能完备的云原生应用分析平台。本文中为了方便演示,将追踪采样率设置为了 100%,在生产使用时请根据需要调整采样策略(采样百分比),防止产生过多的追踪日志。\n 如果您不熟悉服务网格和 Kubernetes 安全性,我们在 Tetrate Academy 提供了一系列免费在线课程,可以让您快速了解 Istio 和 Envoy。\n如果您正在寻找一种快速将 Istio 投入生产的方法,请查看 Tetrate Istio Distribution (TID)。TID 是 Tetrate 的强化、完全上游的 Istio 发行版,具有经过 FIPS 验证的构建和支持。这是开始使用 Istio 的好方法,因为您知道您有一个值得信赖的发行版,有一个支持您的专家团队,并且如果需要,还可以选择快速获得 FIPS 合规性。\n一旦启动并运行 Istio,您可能需要更简单的方法来管理和保护您的服务,而不仅仅是 Istio 中可用的方法,这就是 Tetrate Service Bridge 的用武之地。您可以在这里详细了解 Tetrate Service Bridge 如何使服务网格更安全、更易于管理和弹性,或联系我们进行快速演示。\n","title":"如何在 Istio 中使用 SkyWalking 进行分布式追踪?","url":"/zh/how-to-use-skywalking-for-distributed-tracing-in-istio/"},{"content":"Introduction Apache SkyWalking is an open source APM tool for monitoring and troubleshooting distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. It provides distributed tracing, service mesh observability, metric aggregation and visualization, and alarm.\nIn this article, I will introduce how to quickly set up Apache SkyWalking on AWS EKS and RDS/Aurora, as well as a couple of sample services, monitoring services to observe SkyWalking itself.\nPrerequisites  AWS account AWS CLI Terraform kubectl  We can use the AWS web console or CLI to create all resources needed in this tutorial, but it can be too tedious and hard to debug when something goes wrong. So in this artical I will use Terraform to create all AWS resources, deploy SkyWalking, sample services, and load generator services (Locust).\nArchitecture The demo architecture is as follows:\ngraph LR subgraph AWS subgraph EKS subgraph istio-system namespace direction TB OAP[[SkyWalking OAP]] UI[[SkyWalking UI]] Istio[[istiod]] end subgraph sample namespace Service0[[Service0]] Service1[[Service1]] ServiceN[[Service ...]] end subgraph locust namespace LocustMaster[[Locust Master]] LocustWorkers0[[Locust Worker 0]] LocustWorkers1[[Locust Worker 1]] LocustWorkersN[[Locust Worker ...]] end end RDS[[RDS/Aurora]] end OAP --\u0026gt; RDS Service0 -. telemetry data -.-\u0026gt; OAP Service1 -. telemetry data -.-\u0026gt; OAP ServiceN -. telemetry data -.-\u0026gt; OAP UI --query--\u0026gt; OAP LocustWorkers0 -- traffic --\u0026gt; Service0 LocustWorkers1 -- traffic --\u0026gt; Service0 LocustWorkersN -- traffic --\u0026gt; Service0 Service0 --\u0026gt; Service1 --\u0026gt; ServiceN LocustMaster --\u0026gt; LocustWorkers0 LocustMaster --\u0026gt; LocustWorkers1 LocustMaster --\u0026gt; LocustWorkersN User --\u0026gt; LocustMaster As shown in the architecture diagram, we need to create the following AWS resources:\n EKS cluster RDS instance or Aurora cluster  Sounds simple, but there are a lot of things behind the scenes, such as VPC, subnets, security groups, etc. You have to configure them correctly to make sure the EKS cluster can connect to RDS instance/Aurora cluster otherwise the SkyWalking won\u0026rsquo;t work. Luckily, Terraform can help us to create and destroy all these resources automatically.\nI have created a Terraform module to create all AWS resources needed in this tutorial, you can find it in the GitHub repository.\nCreate AWS resources First, we need to clone the GitHub repository and cd into the folder:\ngit clone https://github.com/kezhenxu94/oap-load-test.git Then, we need to create a file named terraform.tfvars to specify the AWS region and other variables:\ncat \u0026gt; terraform.tfvars \u0026lt;\u0026lt;EOF aws_access_key = \u0026#34;\u0026#34; aws_secret_key = \u0026#34;\u0026#34; cluster_name = \u0026#34;skywalking-on-aws\u0026#34; region = \u0026#34;ap-east-1\u0026#34; db_type = \u0026#34;rds-postgresql\u0026#34; EOF If you have already configured the AWS CLI, you can skip the aws_access_key and aws_secret_key variables. To install SkyWalking with RDS postgresql, set the db_type to rds-postgresql, to install SkyWalking with Aurora postgresql, set the db_type to aurora-postgresql.\nThere are a lot of other variables you can configure, such as tags, sample services count, replicas, etc., you can find them in the variables.tf.\nThen, we can run the following commands to initialize the Terraform module and download the required providers, then create all AWS resources:\nterraform init terraform apply -var-file=terraform.tfvars Type yes to confirm the creation of all AWS resources, or add the -auto-approve flag to the terraform apply to skip the confirmation:\nterraform apply -var-file=terraform.tfvars -auto-approve Now what you need to do is to wait for the creation of all AWS resources to complete, it may take a few minutes. You can check the progress of the creation in the AWS web console, and check the deployment progress of the services inside the EKS cluster.\nGenerate traffic Besides creating necessary AWS resources, the Terraform module also deploys SkyWalking, sample services, and Locust load generator services to the EKS cluster.\nYou can access the Locust web UI to generate traffic to the sample services:\nopen http://$(kubectl get svc -n locust -l app=locust-master -o jsonpath=\u0026#39;{.items[0].status.loadBalancer.ingress[0].hostname}\u0026#39;):8089 The command opens the browser to the Locust web UI, you can configure the number of users and hatch rate to generate traffic.\nObserve SkyWalking You can access the SkyWalking web UI to observe the sample services.\nFirst you need to forward the SkyWalking UI port to local\nkubectl -n istio-system port-forward $(kubectl -n istio-system get pod -l app=skywalking -l component=ui -o name) 8080:8080 And then open the browser to http://localhost:8080 to access the SkyWalking web UI.\nObserve RDS/Aurora You can also access the RDS/Aurora web console to observe the performance of RDS/Aurora instance/Aurora cluste.\nTest Results Test 1: SkyWalking with EKS and RDS PostgreSQL Service Traffic RDS Performance SkyWalking Performance Test 2: SkyWalking with EKS and Aurora PostgreSQL Service Traffic RDS Performance SkyWalking Performance Clean up When you are done with the demo, you can run the following command to destroy all AWS resources:\nterraform destroy -var-file=terraform.tfvars -auto-approve ","title":"How to run Apache SkyWalking on AWS EKS and RDS/Aurora","url":"/blog/2022-12-13-how-to-run-apache-skywalking-on-aws-eks-rds/"},{"content":"介绍 Apache SkyWalking 是一个开源的 APM 工具,用于监控分布式系统和排除故障,特别是为微服务、云原生和基于容器(Docker、Kubernetes、Mesos)的架构而设计。它提供分布式跟踪、服务网格可观测性、指标聚合和可视化以及警报。\n在本文中,我将介绍如何在 AWS EKS 和 RDS/Aurora 上快速设置 Apache SkyWalking,以及几个示例服务,监控服务以观察 SkyWalking 本身。\n先决条件  AWS 账号 AWS CLI Terraform kubectl  我们可以使用 AWS Web 控制台或 CLI 来创建本教程所需的所有资源,但是当出现问题时,它可能过于繁琐且难以调试。因此,在本文中,我将使用 Terraform 创建所有 AWS 资源、部署 SkyWalking、示例服务和负载生成器服务 (Locust)。\n架构 演示架构如下:\ngraph LR subgraph AWS subgraph EKS subgraph istio-system namespace direction TB OAP[[SkyWalking OAP]] UI[[SkyWalking UI]] Istio[[istiod]] end subgraph sample namespace Service0[[Service0]] Service1[[Service1]] ServiceN[[Service ...]] end subgraph locust namespace LocustMaster[[Locust Master]] LocustWorkers0[[Locust Worker 0]] LocustWorkers1[[Locust Worker 1]] LocustWorkersN[[Locust Worker ...]] end end RDS[[RDS/Aurora]] end OAP --\u0026gt; RDS Service0 -. telemetry data -.-\u0026gt; OAP Service1 -. telemetry data -.-\u0026gt; OAP ServiceN -. telemetry data -.-\u0026gt; OAP UI --query--\u0026gt; OAP LocustWorkers0 -- traffic --\u0026gt; Service0 LocustWorkers1 -- traffic --\u0026gt; Service0 LocustWorkersN -- traffic --\u0026gt; Service0 Service0 --\u0026gt; Service1 --\u0026gt; ServiceN LocustMaster --\u0026gt; LocustWorkers0 LocustMaster --\u0026gt; LocustWorkers1 LocustMaster --\u0026gt; LocustWorkersN User --\u0026gt; LocustMaster 如架构图所示,我们需要创建以下 AWS 资源:\n EKS 集群 RDS 实例或 Aurora 集群  听起来很简单,但背后有很多东西,比如 VPC、子网、安全组等。你必须正确配置它们以确保 EKS 集群可以连接到 RDS 实例 / Aurora 集群,否则 SkyWalking 不会不工作。幸运的是,Terraform 可以帮助我们自动创建和销毁所有这些资源。\n我创建了一个 Terraform 模块来创建本教程所需的所有 AWS 资源,您可以在 GitHub 存储库中找到它。\n创建 AWS 资源 首先,我们需要将 GitHub 存储库克隆 cd 到文件夹中:\ngit clone https://github.com/kezhenxu94/oap-load-test.git 然后,我们需要创建一个文件 terraform.tfvars 来指定 AWS 区域和其他变量:\ncat \u0026gt; terraform.tfvars \u0026lt;\u0026lt;EOF aws_access_key = \u0026#34;\u0026#34; aws_secret_key = \u0026#34;\u0026#34; cluster_name = \u0026#34;skywalking-on-aws\u0026#34; region = \u0026#34;ap-east-1\u0026#34; db_type = \u0026#34;rds-postgresql\u0026#34; EOF 如果您已经配置了 AWS CLI,则可以跳过 aws_access_key 和 aws_secret_key 变量。要使用 RDS postgresql 安装 SkyWalking,请将 db_type 设置为 rds-postgresql,要使用 Aurora postgresql 安装 SkyWalking,请将 db_type 设置为 aurora-postgresql。\n您可以配置许多其他变量,例如标签、示例服务计数、副本等,您可以在 variables.tf 中找到它们。\n然后,我们可以运行以下命令来初始化 Terraform 模块并下载所需的提供程序,然后创建所有 AWS 资源:\nterraform init terraform apply -var-file=terraform.tfvars 键入 yes 以确认所有 AWS 资源的创建,或将标志 -auto-approve 添加到 terraform apply 以跳过确认:\nterraform apply -var-file=terraform.tfvars -auto-approve 现在你需要做的就是等待所有 AWS 资源的创建完成,这可能需要几分钟的时间。您可以在 AWS Web 控制台查看创建进度,也可以查看 EKS 集群内部服务的部署进度。\n产生流量 除了创建必要的 AWS 资源外,Terraform 模块还将 SkyWalking、示例服务和 Locust 负载生成器服务部署到 EKS 集群。\n您可以访问 Locust Web UI 以生成到示例服务的流量:\nopen http://$(kubectl get svc -n locust -l app=locust-master -o jsonpath=\u0026#39;{.items[0].status.loadBalancer.ingress[0].hostname}\u0026#39;):8089 该命令将浏览器打开到 Locust web UI,您可以配置用户数量和孵化率以生成流量。\n观察 SkyWalking 您可以访问 SkyWalking Web UI 来观察示例服务。\n首先需要将 SkyWalking UI 端口转发到本地:\nkubectl -n istio-system port-forward $(kubectl -n istio-system get pod -l app=skywalking -l component=ui -o name) 8080:8080 然后在浏览器中打开 http://localhost:8080 访问 SkyWalking web UI。\n观察 RDS/Aurora 您也可以访问 RDS/Aurora web 控制台,观察 RDS/Aurora 实例 / Aurora 集群的性能。\n试验结果 测试 1:使用 EKS 和 RDS PostgreSQL 的 SkyWalking 服务流量 RDS 性能 SkyWalking 性能 测试 2:使用 EKS 和 Aurora PostgreSQL 的 SkyWalking 服务流量 RDS 性能 SkyWalking 性能 清理 完成演示后,您可以运行以下命令销毁所有 AWS 资源:\nterraform destroy -var-file=terraform.tfvars -auto-approve ","title":"如何在 AWS EKS 和 RDS/Aurora 上运行 Apache SkyWalking","url":"/zh/2022-12-13-how-to-run-apache-skywalking-on-aws-eks-rds/"},{"content":"As an application performance monitoring tool for distributed systems, Apache SkyWalking observes metrics, logs, traces, and events in the service mesh.\nSkyWalking OAP’s dataflow processing architecture boasts high performance and is capable of dealing with massive data traffic in real-time. However, storing, updating, and querying massive amounts of data poses a great challenge to its backend storage system.\nBy default, SkyWalking provides storage methods including H2, OpenSearch, ElasticSearch, MySQL, TiDB, PostgreSQL, and BanyanDB. Among them, MySQL storage is suited to a single machine and table (MySQL cluster capability depends on your technology selection). Nevertheless, in the context of high-traffic business systems, the storage of monitoring data is put under great pressure and query performance is lowered.\nBased on MySQL storage, SkyWalking v9.3.0 provides a new storage method: MySQL-Sharding. It supports database and table sharding features thanks to ShardingSphere-Proxy, which is a mature solution for dealing with relational databases’ massive amounts of data.\n1. Architecture Deployment  SkyWalking will only interact with ShardingSphere-Proxy instead of directly connecting to the database. The connection exposed by each MySQL node is a data source managed by ShardingSphere-Proxy. ShardingSphere-Proxy will establish a virtual logical database based on the configuration and then carry out database and table sharding and routing according to the OAP provided data sharding rules. SkyWalking OAP creates data sharding rules and performs DDL and DML on a virtual logical database just like it does with MySQL.  2. Application Scenario Applicable to scenarios where MySQL is used for storage, but the single-table mode cannot meet the performance requirements created by business growth.\n3. How Does Data Sharding Work with SkyWalking? Data sharding defines the data Model in SkyWalking with the annotation @SQLDatabase.Sharding.\n@interface Sharding { ShardingAlgorithm shardingAlgorithm(); String dataSourceShardingColumn() default \u0026#34;\u0026#34;; String tableShardingColumn() default \u0026#34;\u0026#34;; } Note:\n shardingAlgorithm: Table sharding algorithm dataSourceShardingColumn: Database sharding key tableShardingColumn: Table sharding key\n SkyWalking selects database sharding key, table sharding key and table sharding algorithm based on @SQLDatabase.Sharding, in order to dynamically generate sharding rules for each table. Next, it performs rule definition by operating ShardingSphere-Proxy via DistSQL. ShardingSphere-Proxy carries out data sharding based on the rule definition.\n3.1 Database Sharding Method SkyWalking adopts a unified method to carry out database sharding. The number of databases that need to be sharded requires modulo by the hash value of the database sharding key, which should be the numeric suffix of the routing target database. Therefore, the routing target database is:\nds_{dataSourceShardingColumn.hashcode() % dataSourceList.size()} For example, we now have dataSourceList = ds_0…ds_n. If {dataSourceShardingColumn.hashcode() % dataSourceList.size() = 2}, all the data will be routed to the data source node ds_2.\n3.2 Table Sharding Method The table sharding algorithm mainly shards according to the data owing to the TTL mechanism. According to TTL, there will be one sharding table per day:\n{tableName = logicTableName_timeSeries (data)} To ensure that data within the TTL can be written and queried, the time series will generate the current date:\n{timeSeries = currentDate - TTL +1...currentDate + 1} For example, if TTL=3 and currentDate=20220907, sharding tables will be: logicTableName_20220905 logicTableName_20220906 logicTableName_20220907 logicTableName_20220908\nSkyWalking provides table sharding algorithms for different data models:\n   Algorithm Name Sharding Description Time Precision Requirements for Sharding Key Typical Application Data Model     NO_SHARDING No table sharding and single-table mode is maintained. N/A Data model with a small amount of data and no need for sharding.   TIME_RELATIVE_ID_SHARDING_ALGORITHM Shard by day using time_bucket in the ID column. time_bucket can be accurate to seconds, minutes, hours, or days in the same table. Various metrics.   TIME_SEC_RANGE_SHARDING_ALGORITHM Shard by day using time_bucket column. time_bucket must be accurate to seconds. SegmentRecordLogRecord, etc.   TIME_MIN_RANGE_SHARDING_ALGORITHM Shard by day using time_bucket column. time_bucket must be accurate to minutes. EndpointTraffic   TIME_BUCKET_SHARDING_ALGORITHM Shard by day using time_bucket column. time_bucket can be accurate to seconds, minutes, hours, and days in the same table. Service, Instance, Endpoint and other call relations such as ServiceRelationServerSideMetrics    4. TTL Mechanism   For sharding tables, delete the physical table deadline \u0026gt;= timeSeries according to TTL.\n{deadline = new DateTime().plusDays(-ttl)}   TTL timer will delete the expired tables according to the current date while updating sharding rules according to the new date and informing ShardingSphere-Proxy to create new sharding tables.\n  For a single table, use the previous method and delete the row record of deadline \u0026gt;=time_bucket.\n  5. Examples of Sharding Data Storage Next, we’ll take segment (Record type) and service_resp_time (Metrics type) as examples to illustrate the data storage logic and physical distribution. Here, imagine MySQL has two nodes ds_0 and ds_1.\nNote:\n The following storage table structure is just a simplified version as an example, and does not represent the real SkyWalking table structure.\n 5.1 segment The sharding configuration is as follows:\n@SQLDatabase.Sharding(shardingAlgorithm = ShardingAlgorithm.TIME_SEC_RANGE_SHARDING_ALGORITHM, dataSourceShardingColumn = service_id, tableShardingColumn = time_bucket) The logical database, table structures and actual ones are as follows:\n5.2 service_resp_time The sharding configuration is as follows:\n@SQLDatabase.Sharding(shardingAlgorithm = ShardingAlgorithm.TIME_RELATIVE_ID_SHARDING_ALGORITHM, tableShardingColumn = id, dataSourceShardingColumn = entity_id) The logical database and table structures and actual ones are as follows:\n6. How to Use ShardingSphere-Proxy? 6.1 Manual Deployment Here we take the deployment of a single-node SkyWalking OAP and ShardingSphere-Proxy 5.1.2 as an example. Please refer to the relevant documentation for the cluster deployment.\n Prepare the MySQL cluster. Deploy, install and configure ShardingSphere-Proxy:    conf/server.yaml and props.proxy-hint-enabled must be true. Refer to the link for the complete configuration.\n  conf/config-sharding.yaml configures logical database and dataSources list. The dataSource name must be prefixed with ds_ and start with ds_0. For details about the configuration, please refer to this page.\n   Deploy, install and configure SkyWalking OAP:    Set up OAP environment variables: ${SW_STORAGE:mysql-sharding},\n  Configure the connection information based on the actual deployment: ${SW_JDBC_URL} ${SW_DATA_SOURCE_USER} ${SW_DATA_SOURCE_PASSWORD}\n  Note:\n Connection information must correspond to ShardingSphere-Proxy virtual database.\n Configure the data source name configured by conf/config-sharding.yaml in ShardingSphere-Proxy to ${SW_JDBC_SHARDING_DATA_SOURCES} and separate names with commas.   Start the MySQL cluster. Start ShardingSphere-Proxy. Start SkyWalking OAP.  6.2 Running Demo with Docker Our GitHub repository provides a complete and operational demo based on Docker, allowing you to quickly grasp the operation’s effectiveness. The deployment includes the following:\n One OAP service. The TTL of Metrics and Record data set to 2 days. One sharding-proxy service with version 5.1.2. Its external port is 13307 and the logical database name is swtest. Two MySQL services. Their external ports are 3306 and 3307 respectively and they are configured as ds_0 and ds_1 in sharding-proxy’s conf/config-sharding.yaml. One provider service (simulated business programs used to verify trace and metrics and other data). Its external port is 9090. One consumer service (simulated business programs used to verify trace and metrics and other data). Its external port is 9092.  Download the demo program locally and run it directly in the directory skywalking-mysql-sharding-demo.\ndocker-compose up -d Note:\n The first startup may take some time to pull images and create all the tables.\n Once all the services are started, database tools can be used to check the creation of sharding-proxy logical tables and the actual physical sharding table in the two MySQL databases. Additionally, you can also connect the sharding-proxy logical database to view the data query routing. For example:\nPREVIEW SELECT * FROM SEGMENT The result is as follows:\nThe simulated business program provided by the demo can simulate business requests by requesting the consumer service to verify various types of data distribution:\ncurl http://127.0.0.1:9092/info 7. Conclusion In this blog, we introduced SkyWalking’s new storage feature, MySQL sharding, which leverage ShardingSphere-Proxy and covered details of its deployment architecture, application scenarios, sharding logic, and TTL mechanism. We’ve also provided sample data and deployment steps to help get started.\nSkyWalking offers a variety of storage options to fit many use cases. If you need a solution to store large volumes of telemetry data in a relational database, the new MySQL sharding feature is worth a look. For more information on the SkyWalking 9.3.0 release and where to get it, check out the release notes.\n","title":"SkyWalking's New Storage Feature Based on ShardingSphere-Proxy: MySQL-Sharding","url":"/blog/skywalkings-new-storage-feature-based-on-shardingsphere-proxy-mysql-sharding/"},{"content":"SkyWalking NodeJS 0.6.0 is released. Go to downloads page to find release tars.\n Add missing build doc by @kezhenxu94 in https://github.com/apache/skywalking-nodejs/pull/92 Fix invalid url error in axios plugin by @kezhenxu94 in https://github.com/apache/skywalking-nodejs/pull/93 Ignore no requests if ignoreSuffix is empty by @michaelzangl in https://github.com/apache/skywalking-nodejs/pull/94 Escape HTTP method in regexp by @michaelzangl in https://github.com/apache/skywalking-nodejs/pull/95 docs: grammar improvements by @BFergerson in https://github.com/apache/skywalking-nodejs/pull/97 fix: entry span url in endponts using Express middleware/router objects by @BFergerson in https://github.com/apache/skywalking-nodejs/pull/96 chore: use openapi format for endpoint uris by @BFergerson in https://github.com/apache/skywalking-nodejs/pull/98 AWS DynamoDB, Lambda, SQS and SNS plugins, webpack by @tom-pytel in https://github.com/apache/skywalking-nodejs/pull/100 Fix nits by @wu-sheng in https://github.com/apache/skywalking-nodejs/pull/101 Update AxiosPlugin for v1.0+ by @tom-pytel in https://github.com/apache/skywalking-nodejs/pull/102  ","title":"Release Apache SkyWalking for NodeJS 0.6.0","url":"/events/release-apache-skywalking-nodejs-0-6-0/"},{"content":"SkyWalking 9.3.0 is released. Go to downloads page to find release tars.\nMetrics Association    Dashboard Pop-up Trace Query          APISIX Dashboard Use Sharding MySQL as the Database Virtual Cache Performance Virtual MQ Performance Project  Bump up the embedded swctl version in OAP Docker image.  OAP Server  Add component ID(133) for impala JDBC Java agent plugin and component ID(134) for impala server. Use prepareStatement in H2SQLExecutor#getByIDs.(No function change). Bump up snakeyaml to 1.32 for fixing CVE. Fix DurationUtils.convertToTimeBucket missed verify date format. Enhance LAL to support converting LogData to DatabaseSlowStatement. [Breaking Change] Change the LAL script format(Add layer property). Adapt ElasticSearch 8.1+, migrate from removed APIs to recommended APIs. Support monitoring MySQL slow SQLs. Support analyzing cache related spans to provide metrics and slow commands for cache services from client side Optimize virtual database, fix dynamic config watcher NPE when default value is null Remove physical index existing check and keep template existing check only to avoid meaningless retry wait in no-init mode. Make sure instance list ordered in TTL processor to avoid TTL timer never runs. Support monitoring PostgreSQL slow SQLs. [Breaking Change] Support sharding MySQL database instances and tables by Shardingsphere-Proxy. SQL-Database requires removing tables log_tag/segment_tag/zipkin_query before OAP starts, if bump up from previous releases. Fix meter functions avgHistogram, avgHistogramPercentile, avgLabeled, sumHistogram having data conflict when downsampling. Do sorting readLabeledMetricsValues result forcedly in case the storage(database) doesn\u0026rsquo;t return data consistent with the parameter list. Fix the wrong watch semantics in Kubernetes watchers, which causes heavy traffic to API server in some Kubernetes clusters, we should use Get State and Start at Most Recent semantic instead of Start at Exact because we don\u0026rsquo;t need the changing history events, see https://kubernetes.io/docs/reference/using-api/api-concepts/#semantics-for-watch. Unify query services and DAOs codes time range condition to Duration. [Breaking Change]: Remove prometheus-fetcher plugin, please use OpenTelemetry to scrape Prometheus metrics and set up SkyWalking OpenTelemetry receiver instead. BugFix: histogram metrics sent to MAL should be treated as OpenTelemetry style, not Prometheus style: (-infinity, explicit_bounds[i]] for i == 0 (explicit_bounds[i-1], explicit_bounds[i]] for 0 \u0026lt; i \u0026lt; size(explicit_bounds) (explicit_bounds[i-1], +infinity) for i == size(explicit_bounds)  Support Golang runtime metrics analysis. Add APISIX metrics monitoring Support skywalking-client-js report empty service version and page path , set default version as latest and default page path as /(root). Fix the error fetching data (/browser_app_page_pv0) : Can't split endpoint id into 2 parts. [Breaking Change] Limit the max length of trace/log/alarm tag\u0026rsquo;s key=value, set the max length of column tags in tableslog_tag/segment_tag/alarm_record_tag and column query in zipkin_query and column tag_value in tag_autocomplete to 256. SQL-Database requires altering these columns' length or removing these tables before OAP starts, if bump up from previous releases. Optimize the creation conditions of profiling task. Lazy load the Kubernetes metadata and switch from event-driven to polling. Previously we set up watchers to watch the Kubernetes metadata changes, this is perfect when there are deployments changes and SkyWalking can react to the changes in real time. However when the cluster has many events (such as in large cluster or some special Kubernetes engine like OpenShift), the requests sent from SkyWalking becomes unpredictable, i.e. SkyWalking might send massive requests to Kubernetes API server, causing heavy load to the API server. This PR switches from the watcher mechanism to polling mechanism, SkyWalking polls the metadata in a specified interval, so that the requests sent to API server is predictable (~10 requests every interval, 3 minutes), and the requests count is constant regardless of the cluster\u0026rsquo;s changes. However with this change SkyWalking can\u0026rsquo;t react to the cluster changes in time, but the delay is acceptable in our case. Optimize the query time of tasks in ProfileTaskCache. Fix metrics was put into wrong slot of the window in the alerting kernel. Support sumPerMinLabeled in MAL. Bump up jackson databind, snakeyaml, grpc dependencies. Support export Trace and Log through Kafka. Add new config initialization mechanism of module provider. This is a ModuleManager lib kernel level change. [Breaking Change] Support new records query protocol, rename the column named service_id to entity_id for support difference entity. Please re-create top_n_database_statement index/table. Remove improper self-obs metrics in JvmMetricsHandler(for Kafka channel). gRPC stream canceling code is not logged as an error when the client cancels the stream. The client cancels the stream when the pod is terminated. [Breaking Change] Change the way of loading MAL rules(support pattern). Move k8s relative MAL files into /otel-rules/k8s. [Breaking Change] Refactor service mesh protobuf definitions and split TCP-related metrics to individual definition. Add TCP{Service,ServiceInstance,ServiceRelation,ServiceInstanceRelation} sources and split TCP-related entities out from original Service,ServiceInstance,ServiceRelation,ServiceInstanceRelation. [Breaking Change] TCP-related source names are changed, fields of TCP-related sources are changed, please refer to the latest oal/tcp.oal file. Do not log error logs when failed to create ElasticSearch index because the index is created already. Add virtual MQ analysis for native traces. Support Python runtime metrics analysis. Support sampledTrace in LAL. Support multiple rules with different names under the same layer of LAL script. (Optimization) Reduce the buffer size(queue) of MAL(only) metric streams. Set L1 queue size as 1/20, L2 queue size as 1/2. Support monitoring MySQL/PostgreSQL in the cluster mode. [Breaking Change] Migrate to BanyanDB v0.2.0.  Adopt new OR logical operator for,  MeasureIDs query BanyanDBProfileThreadSnapshotQueryDAO query Multiple Event conditions query Metrics query   Simplify Group check and creation Partially apply UITemplate changes Support index_only Return CompletableFuture\u0026lt;Void\u0026gt; directly from BanyanDB client Optimize data binary parse methods in *LogQueryDAO Support different indexType Support configuration for TTL and (block|segment) intervals   Elasticsearch storage: Provide system environment variable(SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS) and support specify the settings (number_of_shards/number_of_replicas) for each index individually. Elasticsearch storage: Support update index settings (number_of_shards/number_of_replicas) for the index template after rebooting. Optimize MQ Topology analysis. Use entry span\u0026rsquo;s peer from the consumer side as source service when no producer instrumentation(no cross-process reference). Refactor JDBC storage implementations to reuse logics. Fix ClassCastException in LoggingConfigWatcher. Support span attached event concept in Zipkin and SkyWalking trace query. Support span attached events on Zipkin lens UI. Force UTF-8 encoding in JsonLogHandler of kafka-fetcher-plugin. Fix max length to 512 of entity, instance and endpoint IDs in trace, log, profiling, topN tables(JDBC storages). The value was 200 by default. Add component IDs(135, 136, 137) for EventMesh server and client-side plugins. Bump up Kafka client to 2.8.1 to fix CVE-2021-38153. Remove lengthEnvVariable for Column as it never works as expected. Add LongText to support longer logs persistent as a text type in ElasticSearch, instead of a keyword, to avoid length limitation. Fix wrong system variable name SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI. It was opaenapi. Fix not-time-series model blocking OAP boots in no-init mode. Fix ShardingTopologyQueryDAO.loadServiceRelationsDetectedAtServerSide invoke backend miss parameter serviceIds. Changed system variable SW_SUPERDATASET_STORAGE_DAY_STEP to SW_STORAGE_ES_SUPER_DATASET_DAY_STEP to be consistent with other ES storage related variables. Fix ESEventQueryDAO missing metric_table boolQuery criteria. Add default entity name(_blank) if absent to avoid NPE in the decoding. This caused Can't split xxx id into 2 parts. Support dynamic config the sampling strategy in network profiling. Zipkin module support BanyanDB storage. Zipkin traces query API, sort the result set by start time by default. Enhance the cache mechanism in the metric persistent process.  This cache only worked when the metric is accessible(readable) from the database. Once the insert execution is delayed due to the scale, the cache loses efficacy. It only works for the last time update per minute, considering our 25s period. Fix ID conflicts for all JDBC storage implementations. Due to the insert delay, the JDBC storage implementation would still generate another new insert statement.   [Breaking Change] Remove core/default/enableDatabaseSession config. [Breaking Change] Add @BanyanDB.TimestampColumn to identify which column in Record is providing the timestamp(milliseconds) for BanyanDB, since BanyanDB stream requires a timestamp in milliseconds. For SQL-Database: add new column timestamp for tables profile_task_log/top_n_database_statement, requires altering this column or removing these tables before OAP starts, if bump up from previous releases. Fix Elasticsearch storage: In No-Sharding Mode, add specific analyzer to the template before index creation to avoid update index error. Internal API: remove undocumented ElasticSearch API usage and use documented one. Fix BanyanDB.ShardingKey annotation missed in the generated OAL metrics classes. Fix Elasticsearch storage: Query sortMetrics missing transform real index column name. Rename BanyanDB.ShardingKey to BanyanDB.SeriesID. Self-Observability: Add counters for metrics reading from DB or cached. Dashboard:Metrics Persistent Cache Count. Self-Observability: Fix GC Time calculation. Fix Elasticsearch storage: In No-Sharding Mode, column\u0026rsquo;s property indexOnly not applied and cannot be updated. Update the trace_id field as storage only(cannot be queried) in top_n_database_statement, top_n_cache_read_command, top_n_cache_read_command index.  UI  Fix: tab active incorrectly, when click tab space Add impala icon for impala JDBC Java agent plugin. (Webapp)Bump up snakeyaml to 1.31 for fixing CVE-2022-25857 [Breaking Change]: migrate from Spring Web to Armeria, now you should use the environment variable name SW_OAP_ADDRESS to change the OAP backend service addresses, like SW_OAP_ADDRESS=localhost:12800,localhost:12801, and use environment variable SW_SERVER_PORT to change the port. Other Spring-related configurations don\u0026rsquo;t take effect anymore. Polish the endpoint list graph. Fix styles for an adaptive height. Fix setting up a new time range after clicking the refresh button. Enhance the process topology graph to support dragging nodes. UI-template: Fix metrics calculation in general-service/mesh-service/faas-function top-list dashboard. Update MySQL dashboard to visualize collected slow SQLs. Add virtual cache dashboard. Remove responseCode fields of all OAL sources, as well as examples to avoid user\u0026rsquo;s confusion. Remove All from the endpoints selector. Enhance menu configurations to make it easier to change. Update PostgreSQL dashboard to visualize collected slow SQLs. Add Golang runtime metrics and cpu/memory used rate panels in General-Instance dashboard. Add gateway apisix menu. Query logs with the specific service ID. Bump d3-color from 3.0.1 to 3.1.0. Add Golang runtime metrics and cpu/memory used rate panels in FaaS-Instance dashboard. Revert logs on trace widget. Add a sub-menu for virtual mq. Add readRecords to metric types. Verify dashboard names for new dashboards. Associate metrics with the trace widget on dashboards. Fix configuration panel styles. Remove a un-use icon. Support labeled value on the service/instance/endpoint list widgets. Add menu for virtual MQ. Set selector props and update configuration panel styles. Add Python runtime metrics and cpu/memory utilization panels to General-Instance and Fass-Instance dashboards. Enhance the legend of metrics graph widget with the summary table. Add apache eventMesh logo file. Fix conditions for trace profiling. Fix tag keys list and duration condition. Fix typo. Fix condition logic for trace tree data. Enhance tags component to search tags with the input value. Fix topology loading style. Fix update metric processor for the readRecords and remove readSampledRecords from metrics selector. Add trace association for FAAS dashboards. Visualize attached events on the trace widget. Add HTTP/1.x metrics and HTTP req/resp body collecting tabs on the network profiling widget. Implement creating tasks ui for network profiling widget. Fix entity types for ProcessRelation. Add trace association for general service dashboards.  Documentation  Add metadata-uid setup doc about Kubernetes coordinator in the cluster management. Add a doc for adding menus to booster UI. Move general good read blogs from Agent Introduction to Academy. Add re-post for blog Scaling with Apache SkyWalking in the academy list. Add re-post for blog Diagnose Service Mesh Network Performance with eBPF in the academy list. Add Security Notice doc. Add new docs for Report Span Attached Events data collecting protocol. Add new docs for Record query protocol Update Server Agents and Compatibility for PHP agent. Add docs for profiling. Update the network profiling documentation.  All issues and pull requests are here\n","title":"Release Apache SkyWalking APM 9.3.0","url":"/events/release-apache-skywalking-apm-9.3.0/"},{"content":"Apache SkyWalking 作为一个分布式系统的应用性能监控工具,它观察服务网格中的指标、日志、痕迹和事件。其中 SkyWalking OAP 高性能的数据流处理架构能够实时处理庞大的数据流量,但是这些海量数据的存储更新和后续查询对后端存储系统带来了挑战。\nSkyWalking 默认已经提供了多种存储支持包括 H2、OpenSearch、ElasticSearch、MySQL、TiDB、PostgreSQL、BanyanDB。其中 MySQL 存储提供的是针对单机和单表的存储方式(MySQL 的集群能力需要自己选型提供),在面对高流量的业务系统时,监控数据的存储存在较大压力,同时影响查询性能。\n在 MySQL 存储基础上 SkyWalking v9.3.0 提供了一种新的存储方式 MySQL-Sharding,它提供了基于 ShardingSphere-Proxy 的分库分表特性,而分库分表是关系型数据库面对大数据量处理的成熟解决方案。\n部署架构 SkyWalking 使用 ShardingSphere-Proxy 的部署方式如下图所示。\n SkyWalking OAP 由直连数据库的方式变成只与 ShardingSphere-Proxy 进行交互; 每一个 MySQL 节点暴露的连接都是一个数据源,由 ShardingSphere-Proxy 进行统一管理; ShardingSphere-Proxy 会根据配置建立一个虚拟逻辑数据库,根据 OAP 提供的分库分表规则进行库表分片和路由; SkyWalking OAP 负责生成分库分表规则并且像操作 MySQL 一样对虚拟逻辑库执行 DDL 和 DML;  适用场景 希望使用 MySQL 作为存储,随着业务规模的增长,单表模式已经无法满足性能需要。\nSkyWalking 分库分表逻辑 分库分表逻辑通过注解 @SQLDatabase.Sharding 对 SkyWalking 中的数据模型 Model 进行定义:\n@interface Sharding { ShardingAlgorithm shardingAlgorithm(); String dataSourceShardingColumn() default \u0026#34;\u0026#34;; String tableShardingColumn() default \u0026#34;\u0026#34;; } 其中:\n  shardingAlgorithm:表分片算法\n  dataSourceShardingColumn:分库键\n  tableShardingColumn:分表键\n  SkyWalking 根据注解 @SQLDatabase.Sharding 选择分库键、分表键以及表分片算法对每个表动态生成分片规则通过 DistSQL 操作 Shardingsphere-Proxy 执行规则定义 Shardingsphere-Proxy 根据规则定义进行数据分片。\n分库方式 SkyWalking 对于分库采用统一的方式,路由目标库的数字后缀使用分库键的哈希值取模需要分库的数据库数量,所以路由目标库为:\nds_{dataSourceShardingColumn.hashcode() % dataSourceList.size()} 例如我们有 dataSourceList = ds_0...ds_n,如果\n{dataSourceShardingColumn.hashcode() % dataSourceList.size() = 2} 那么所有数据将会路由到 ds_2 这个数据源节点上。\n分表方式 由于 TTL 机制的存在,分表算法主要根据时间的日期进行分片,分片表的数量是根据 TTL 每天一个表:\n分片表名 = 逻辑表名_时间序列(日期):{tableName =logicTableName_timeSeries}\n为保证在 TTL 有效期内的数据能够被写入和查询,时间序列将生成当前日期\n{timeSeries = currentDate - TTL +1...currentDate + 1} 例如:如果 TTL=3, currentDate = 20220907,则分片表为:\nlogicTableName_20220905 logicTableName_20220906 logicTableName_20220907 logicTableName_20220908 SkyWalking 提供了多种不同的分表算法用于不同的数据模型:\n   算法名称 分片说明 分片键时间精度要求 典型应用数据模型     NO_SHARDING 不做任何表分片,保持单表模式 / 数据量小无需分片的数据模型   TIME_RELATIVE_ID_SHARDING_ALGORITHM 使用 ID 列中的 time_bucket 按天分片 time_bucket 的精度可以是同一表中的秒、分、小时和天 各类 Metrics 指标   TIME_SEC_RANGE_SHARDING_ALGORITHM 使用 time_bucket 列按天分片 time_bucket 的精度必须是秒 SegmentRecordLogRecord 等   TIME_MIN_RANGE_SHARDING_ALGORITHM 使用 time_bucket 列按天分片 time_bucket 的精度必须是分钟 EndpointTraffic   TIME_BUCKET_SHARDING_ALGORITHM 使用 time_bucket 列按天分片 time_bucket 的精度可以是同一个表中的秒、分、小时和天 Service、Instance、Endpoint 调用关系等如 ServiceRelationServerSideMetrics    TTL 机制  对于进行分片的表根据 TTL 直接删除 deadline \u0026gt;= timeSeries 的物理表 {deadline = new DateTime().plusDays(-ttl)} TTL 定时器在根据当前日期删除过期表的同时也会根据新日期更新分片规则,通知 ShardingSphere-Proxy 创建新的分片表 对于单表的延续之前的方式,删除 deadline \u0026gt;= time_bucket 的行记录  分片数据存储示例 下面以 segment(Record 类型)和 service_resp_time(Metrics 类型)两个为例说明数据存储的逻辑和物理分布。这里假设 MySQL 为 ds_0 和 ds_1 两个节点。\n注意:以下的存储表结构仅为简化后的存储示例,不表示 SkyWalking 真实的表结构。\nsegment 分片配置为:\n@SQLDatabase.Sharding(shardingAlgorithm = ShardingAlgorithm.TIME_SEC_RANGE_SHARDING_ALGORITHM, dataSourceShardingColumn = service_id, tableShardingColumn = time_bucket) 逻辑库表结构和实际库表如下图:\nservice_resp_time 分片配置为:\n@SQLDatabase.Sharding(shardingAlgorithm = ShardingAlgorithm.TIME_RELATIVE_ID_SHARDING_ALGORITHM, tableShardingColumn = id, dataSourceShardingColumn = entity_id) 逻辑库表结构和实际库表如下图:\n如何使用 你可以选择手动或使用 Docker 来运行 Demo。\n手动部署 这里以单节点 SkyWalking OAP 和 Shardingsphere-Proxy 5.1.2 部署为例,集群部署请参考其他相关文档。\n  准备好 MySQL 集群\n  部署安装并配置 Shardingsphere-Proxy:\n conf/server.yaml,props.proxy-hint-enabled 必须为 true,完整配置可参考这里。 conf/config-sharding.yaml,配置逻辑数据库和 dataSources 列表,dataSource 的名称必须以 ds_为前缀,并且从 ds_0 开始,完整配置可参考这里。    部署安装并配置 SkyWalking OAP:\n 设置 OAP 环境变量 ${SW_STORAGE:mysql-sharding} 根据实际部署情况配置连接信息: ${SW_JDBC_URL} ${SW_DATA_SOURCE_USER} ${SW_DATA_SOURCE_PASSWORD}  注意:连接信息需对应 Shardingsphere-Proxy 虚拟数据库。\n  将 Shardingsphere-Proxy 中 conf/config-sharding.yaml 配置的数据源名称配置在 ${SW_JDBC_SHARDING_DATA_SOURCES} 中,用 , 分割\n  启动 MySQL 集群\n  启动 Shardingsphere-Proxy\n  启动 SkyWalking OAP\n  使用 Docker 运行 Demo GitHub 资源库提供了一个基于 Docker 完整可运行的 demo:skywalking-mysql-sharding-demo,可以快速尝试实际运行效果。\n其中部署包含:\n oap 服务 1 个,Metrics 和 Record 数据的 TTL 均设为 2 天 sharding-proxy 服务 1 个版本为 5.1.2,对外端口为 13307,创建的逻辑库名称为 swtest mysql 服务 2 个,对外端口分别为 3306,3307,在 sharding-proxy 的 conf/config-sharding.yaml 中配置为 ds_0 和 ds_1 provider 服务 1 个(模拟业务程序用于验证 trace 和 metrics 等数据),对外端口为 9090 consumer 服务 1 个(模拟业务程序用于验证 trace 和 metrics 等数据),对外端口为 9092  将 Demo 程序获取到本地后,在 skywalking-mysql-sharding-demo 目录下直接运行:\ndocker-compose up -d 注意:初次启动由于拉取镜像和新建所有表可能需要一定的时间。\n所有服务启动完成之后可以通过数据库工具查看 sharding-proxy 逻辑表创建情况,以及两个 MySQL 库中实际的物理分片表创建情况。也可以连接 sharding-proxy 逻辑库 swtest 查看数据查询路由情况,如:\nPREVIEW SELECT * FROM SEGMENT 显示结果如下:\nDemo 提供的模拟业务程序可以通过请求 consumer 服务模拟业务请求,用于验证各类型数据分布:\ncurl http://127.0.0.1:9092/info 总结 在这篇文章中我们详细介绍了 SkyWalking 基于 ShardingSphere-Proxy 的 MySQL-Sharding 存储特性的部署架构、适应场景、核心分库分表逻辑以及 TTL 机制,并提供了运行后的数据存储示例和详细部署配置步骤以便大家快速理解上手。SkyWalking 提供了多种存储方式以供选择,如果你目前的需求如本文所述,欢迎使用该新特性。\n","title":"SkyWalking 基于 ShardingSphere-Proxy 的 MySQL-Sharding 分库分表的存储特性介绍","url":"/zh/skywalking-shardingsphere-proxy/"},{"content":"SkyWalking Kubernetes Helm Chart 4.4.0 is released. Go to downloads page to find release tars.\n [Breaking Change]: remove .Values.oap.initEs, there is no need to use this to control whether to run init job anymore, SkyWalking Helm Chart automatically delete the init job when installing/upgrading. [Breaking Change]: remove files/config.d mechanism and use values.yaml files to put the configurations to override default config files in the /skywalking/config folder, using files/config.d is very limited and you have to clone the source codes if you want to use this mechanism, now you can simply use our Docker Helm Chart to install. Refactor oap init job, and support postgresql storage. Upgrade ElasticSearch Helm Chart dependency version.  ","title":"Release Apache SkyWalking Kubernetes Helm Chart 4.4.0","url":"/events/release-apache-skywalking-kubernetes-helm-chart-4.4.0/"},{"content":"SkyWalking PHP 0.2.0 is released. Go to downloads page to find release tars.\nWhat\u0026rsquo;s Changed  Update PECL user by @heyanlong in https://github.com/apache/skywalking-php/pull/12 Start up 0.2.0 by @heyanlong in https://github.com/apache/skywalking-php/pull/13 Update compiling project document. by @jmjoy in https://github.com/apache/skywalking-php/pull/14 Add PDO plugin, and switch unix datagram to stream. by @jmjoy in https://github.com/apache/skywalking-php/pull/15 Update readme about creating issue. by @jmjoy in https://github.com/apache/skywalking-php/pull/17 Fix package.xml role error by @heyanlong in https://github.com/apache/skywalking-php/pull/16 Add swoole support. by @jmjoy in https://github.com/apache/skywalking-php/pull/19 Add .fleet to .gitignore by @heyanlong in https://github.com/apache/skywalking-php/pull/20 [Feature] Add Mysql Improved Extension by @heyanlong in https://github.com/apache/skywalking-php/pull/18 Add predis plugin. by @jmjoy in https://github.com/apache/skywalking-php/pull/21 Take care of PDO false and DSN tailing semicolons. by @phanalpha in https://github.com/apache/skywalking-php/pull/22 Add container by @heyanlong in https://github.com/apache/skywalking-php/pull/23 Save PDO exceptions. by @phanalpha in https://github.com/apache/skywalking-php/pull/24 Update minimal supported PHP version to 7.2. by @jmjoy in https://github.com/apache/skywalking-php/pull/25 Utilize UnixListener for the worker process to accept reports. by @phanalpha in https://github.com/apache/skywalking-php/pull/26 Kill the worker on module shutdown. by @phanalpha in https://github.com/apache/skywalking-php/pull/28 Add plugin for memcached. by @jmjoy in https://github.com/apache/skywalking-php/pull/27 Upgrade rust mini version to 1.65. by @jmjoy in https://github.com/apache/skywalking-php/pull/30 Add plugin for phpredis. by @jmjoy in https://github.com/apache/skywalking-php/pull/29 Add missing request_id. by @jmjoy in https://github.com/apache/skywalking-php/pull/31 Adapt virtual cache. by @jmjoy in https://github.com/apache/skywalking-php/pull/32 Fix permission denied of unix socket. by @jmjoy in https://github.com/apache/skywalking-php/pull/33 Bump to 0.2.0. by @jmjoy in https://github.com/apache/skywalking-php/pull/34  New Contributors  @phanalpha made their first contribution in https://github.com/apache/skywalking-php/pull/22  Full Changelog: https://github.com/apache/skywalking-php/compare/v0.1.0...v0.2.0\nPECL https://pecl.php.net/package/skywalking_agent/0.2.0\n","title":"Release Apache SkyWalking PHP 0.2.0","url":"/events/release-apache-skwaylking-php-0-2-0/"},{"content":"This is an official annoucement from SkyWalking team.\nDue to the Plan to End-of-life(EOL) all v8 releases in Nov. 2022 had been posted in 3 months, SkyWalking community doesn\u0026rsquo;t received any objection or a proposal about releasing a new patch version.\nNow, it is time to end the v8 series. All documents of v8 are not going to be hosted on the website. You only could find the artifacts and source codes from the Apache\u0026rsquo;s archive repository. The documents of each version are included in /docs/ folder in the source tars.\nThe SkyWalking community would reject the bug reports and release proposal due to its End-of-life(EOL) status. v9 provides more powerful features and covers all capabilities of the latest v8. Recommend upgrading to the latest.\nV8 was a memorable and significative release series, which makes the project globally adopted. It brought dev community scale up to over 500 contributors.\nWe want to highlight and thank all those contributors and end users again. You made today\u0026rsquo;s SkyWalking.\nWelcome more contributors and users to join the community, to contribute your ideas, experiences, and feedback. We need you to improve and enhance the project to a higher level.\n","title":"SkyWalking v8 OAP server End-of-life(EOL)","url":"/events/v8-eol/"},{"content":"SkyWalking BanyanDB 0.2.0 is released. Go to downloads page to find release tars.\nFeatures  Command line tool: bydbctl. Retention controller. Full-text searching. TopN aggregation. Add RESTFul style APIs based on gRPC gateway. Add \u0026ldquo;exists\u0026rdquo; endpoints to the schema registry. Support tag-based CRUD of the property. Support index-only tags. Support logical operator(and \u0026amp; or) for the query.  Bugs  \u0026ldquo;metadata\u0026rdquo; syncing pipeline complains about an \u0026ldquo;unknown group\u0026rdquo;. \u0026ldquo;having\u0026rdquo; semantic inconsistency. \u0026ldquo;tsdb\u0026rdquo; leaked goroutines.  Chores  \u0026ldquo;tsdb\u0026rdquo; structure optimization.  Merge the primary index into the LSM-based index Remove term metadata.   Memory parameters optimization. Bump go to 1.19.  ","title":"Release Apache SkyWalking BanyanDB 0.2.0","url":"/events/release-apache-skywalking-banyandb-0-2-0/"},{"content":"SkyWalking Java Agent 8.13.0 is released. Go to downloads page to find release tars. Changes by Version\n8.13.0 This release begins to adopt SkyWalking 9.3.0+ Virtual Cache Analysis,Virtual MQ Analysis\n Support set-type in the agent or plugin configurations Optimize ConfigInitializer to output warning messages when the config value is truncated. Fix the default value of the Map field would merge rather than override by new values in the config. Support to set the value of Map/List field to an empty map/list. Add plugin to support Impala JDBC 2.6.x. Update guava-cache, jedis, memcached, ehcache plugins to adopt uniform tags. Fix Apache ShenYu plugin traceId empty string value. Add plugin to support brpc-java-3.x Update compose-start-script.template to make compatible with new version docker compose Bump up grpc to 1.50.0 to fix CVE-2022-3171 Polish up nats plugin to unify MQ related tags Correct the duration of the transaction span for Neo4J 4.x. Plugin-test configuration.yml dependencies support docker service command field Polish up rabbitmq-5.x plugin to fix missing broker tag on consumer side Polish up activemq plugin to fix missing broker tag on consumer side Enhance MQ plugin relative tests to check key tags not blank. Add RocketMQ test scenarios for version 4.3 - 4.9. No 4.0 - 4.2 release images for testing. Support mannual propagation of tracing context to next operators for webflux. Add MQ_TOPIC and MQ_BROKER tags for RocketMQ consumer\u0026rsquo;s span. Polish up Pulsar plugins to remove unnecessary dynamic value , set peer at consumer side Polish Kafka plugin to set peer at the consumer side. Polish NATS plugin to set peer at the consumer side. Polish ActiveMQ plugin to set peer at the consumer side. Polish RabbitMQ plugin to set peer at the consumer side.  Documentation  Update configuration doc about overriding default value as empty map/list accordingly. Update plugin dev tags for cache relative tags. Add plugin dev docs for virtual database tags. Add plugin dev docs for virtual MQ tags. Add doc about kafka plugin Manual APIs.  All issues and pull requests are here\n","title":"Release Apache SkyWalking Java Agent 8.13.0","url":"/events/release-apache-skywalking-java-agent-8-13-0/"},{"content":"SkyWalking Client JS 0.9.0 is released. Go to downloads page to find release tars.\n Fix custom configurations when the page router changed for SPA. Fix reporting data by navigator.sendbeacon when pages is closed. Bump dependencies. Add Security Notice. Support adding custom tags to spans. Validate custom parameters for register.  ","title":"Release Apache SkyWalking Client JS 0.9.0","url":"/events/release-apache-skywalking-client-js-0-9-0/"},{"content":"I am excited to announce a new SkyWalking committer, Yueqin Zhang(GitHub ID, yswdqz). Yueqin entered the SkyWalking community on Jul. 3rd[1], 2022, for the first time. Later, I knew he was invited by Yihao Chen, our committer, who is running an open-source program for students who can\u0026rsquo;t join Summer 2022 due to SkyWalking having limited slots.\nHis first PR[2] for Issue #7420 took 20 days to propose. I believe he took incredibly hard work in his own time. For every PMC member, we all were there. Purely following documents and existing codes to build a new feature is always not easy to start.\nAfter that, we had several private talks, he asked for more possible directions to join the community deeper. Then, I am honored to witness a great landscape extension in SkyWalking feature territory, SkyWalking adopts OpenTelemetry features quickly, and is powered by our powerful MAL and v9 kernel/UI, He built MySQL and PostgreSQL server monitoring, metrics, and slow SQLs collecting(through enhancing LAL with a new layer concept), under a new menu, .\nIt is unbelievable to see his contributions in the main repo, 8 PRs[3], LOC 4,857++, 1,627\u0026ndash;\nMeanwhile, this story continues, he is trying to build A lightweight and APM-oriented SQL parser module[4] under my mentoring. This would be another challenging idea, but also very useful to enhance existing virtual database perf. analyzing.\nI believe this would not be the end for the moment between SkyWalking and him.\nWelcome to join the team.\nReferrer \u0026amp; PMC member, Sheng Wu.\n [1] https://github.com/apache/skywalking/issues/7420#issuecomment-1173061870 [2] https://github.com/apache/skywalking-java/pull/286 [3] https://github.com/apache/skywalking/commits?author=yswdqz [4] https://github.com/apache/skywalking/issues/9661  ","title":"Welcome Yueqin Zhang as a new committer","url":"/events/welcome-yueqin-zhang-as-new-committer/"},{"content":"SkyWalking PHP 0.1.0 is released. Go to downloads page to find release tars.\nWhat's Changed  [docs] Update README by @heyanlong in https://github.com/apache/skywalking-php/pull/1 Remove the CI limit first, in order to run CI. by @jmjoy in https://github.com/apache/skywalking-php/pull/3 Setup CI. by @jmjoy in https://github.com/apache/skywalking-php/pull/5 Implementation, with curl support. By @jmjoy in https://github.com/apache/skywalking-php/pull/4 Turn off Swoole support, and fix Makefile. By @jmjoy in https://github.com/apache/skywalking-php/pull/6 Update docs by @heyanlong in https://github.com/apache/skywalking-php/pull/7 Add PECL support. By @jmjoy in https://github.com/apache/skywalking-php/pull/8 Support macOS by replace ipc-channel with socket pair, upgrade dependencies and improve CI. by @jmjoy in https://github.com/apache/skywalking-php/pull/9 Add compile and release docs. By @jmjoy in https://github.com/apache/skywalking-php/pull/10 Update official documentation link. By @jmjoy in https://github.com/apache/skywalking-php/pull/11  New Contributors  @heyanlong made their first contribution in https://github.com/apache/skywalking-php/pull/1 @jmjoy made their first contribution in https://github.com/apache/skywalking-php/pull/3  Full Changelog: https://github.com/apache/skywalking-php/commits/v0.1.0\nPECL https://pecl.php.net/package/skywalking_agent/0.1.0\n","title":"Release Apache SkyWalking PHP 0.1.0","url":"/events/release-apache-skwaylking-php-0-1-0/"},{"content":"Yanlong He (GitHub: heyanlong) is a SkyWalking committer for years. He was working on skyapm-php for years to support the SkyWalking ecosystem. That PHP agent has significant contributions for SkyWalking\u0026rsquo;s users adoption in the PHP landscape. Yanlong keeps active in supporting and maintaining the project to help the community.\nJiemin Xia (GitHub: jmjoy) is a new committer voted in July 2022. He is super active in this year. He took over the maintaince capatbilify from Rei Shimizu, who is too busy in his daily work. He leads on the Rust SDK, and is also a release manager for the Rust SDK.\nRecently, both of them are working with Yanlong He to build a new skywalking PHP agent.\nWe are having our PHP agent v0.1.0 for the community.\nSkyWalking PHP Agent\nNotice, SkyAPM PHP is going to be archived and replaced by SkyWalking PHP agent according to its project maintainer, Yanlong He. Our community would work more closely forward the new PHP agent together.\nLet\u0026rsquo;s welcome and congrats to our 31st and 32nd PMC members, Yanlong He and Jiemin Xia. We are honored to have you.\n","title":"Welcome Yanlong He and Jiemin Xia to join the PMC","url":"/events/welcome-heyanlong-xiajiemin-join-the-pmc/"},{"content":"Background This article will show how to use Apache SkyWalking with eBPF to make network troubleshooting easier in a service mesh environment.\nApache SkyWalking is an application performance monitor tool for distributed systems. It observes metrics, logs, traces, and events in the service mesh environment and uses that data to generate a dependency graph of your pods and services. This dependency graph can provide quick insights into your system, especially when there\u0026rsquo;s an issue.\nHowever, when troubleshooting network issues in SkyWalking\u0026rsquo;s service topology, it is not always easy to pinpoint where the error actually is. There are two reasons for the difficulty:\n Traffic through the Envoy sidecar is not easy to observe. Data from Envoy\u0026rsquo;s Access Log Service (ALS) shows traffic between services (sidecar-to-sidecar), but not metrics on communication between the Envoy sidecar and the service it proxies. Without that information, it is more difficult to understand the impact of the sidecar. There is a lack of data from transport layer (OSI Layer 4) communication. Since services generally use application layer (OSI Layer 7) protocols such as HTTP, observability data is generally restricted to application layer communication. However, the root cause may actually be in the transport layer, which is typically opaque to observability tools.  Access to metrics from Envoy-to-service and transport layer communication can make it easier to diagnose service issues. To this end, SkyWalking needs to collect and analyze transport layer metrics between processes inside Kubernetes pods - a task well suited to eBPF. We investigated using eBPF for this purpose and present our results and a demo below.\nMonitoring Kubernetes Networks with eBPF With its origins as the Extended Berkeley Packet Filter, eBPF is a general purpose mechanism for injecting and running your own code into the Linux kernel and is an excellent tool for monitoring network traffic in Kubernetes Pods. In the next few sections, we'll provide an overview of how to use eBPF for network monitoring as background for introducing Skywalking Rover, a metrics collector and profiler powered by eBPF to diagnose CPU and network performance.\nHow Applications and the Network Interact Interactions between the application and the network can generally be divided into the following steps from higher to lower levels of abstraction:\n User Code: Application code uses high-level network libraries in the application stack to exchange data across the network, like sending and receiving HTTP requests. Network Library: When the network library receives a network request, it interacts with the language API to send the network data. Language API: Each language provides an API for operating the network, system, etc. When a request is received, it interacts with the system API. In Linux, this API is called syscalls. Linux API: When the Linux kernel receives the request through the API, it communicates with the socket to send the data, which is usually closer to an OSI Layer 4 protocol, such as TCP, UDP, etc. Socket Ops: Sending or receiving the data to/from the NIC.  Our hypothesis is that eBPF can monitor the network. There are two ways to implement the interception: User space (uprobe) or Kernel space (kprobe). The table below summarizes the differences.\n    Pros Cons     uprobe •\tGet more application-related contexts, such as whether the current request is HTTP or HTTPS.•\tRequests and responses can be intercepted by a single method •\tData structures can be unstable, so it is more difficult to get the desired data.  •\tImplementation may differ between language/library versions.  •\tDoes not work in applications without symbol tables.   kprobe •\tAvailable for all languages.  •\tThe data structure and methods are stable and do not require much adaptation.  •\tEasier correlation with underlying data, such as getting the destination address of TCP, OSI Layer 4 protocol metrics, etc. •\tA single request and response may be split into multiple probes.  •\tContextual information is not easy to get for stateful requests. For example header compression in HTTP/2.    For the general network performance monitor, we chose to use the kprobe (intercept the syscalls) for the following reasons:\n It\u0026rsquo;s available for applications written in any programming language, and it\u0026rsquo;s stable, so it saves a lot of development/adaptation costs. It can be correlated with metrics from the system level, which makes it easier to troubleshoot. As a single request and response are split into multiple probes, we can use technology to correlate them. For contextual information, It\u0026rsquo;s usually used in OSI Layer 7 protocol network analysis. So, if we just monitor the network performance, then they can be ignored.  Kprobes and network monitoring Following the network syscalls of Linux documentation, we can implement network monitoring by intercepting two types of methods: socket operations and send/receive methods.\nSocket Operations When accepting or connecting with another socket, we can get the following information:\n Connection information: Includes the remote address from the connection which helps us to understand which pod is connected. Connection statics: Includes basic metrics from sockets, such as round-trip time (RTT), lost packet count in TCP, etc. Socket and file descriptor (FD) mapping: Includes the relationship between the Linux file descriptor and socket object. It is useful when sending and receiving data through a Linux file descriptor.  Send/Receive The interface related to sending or receiving data is the focus of performance analysis. It mainly contains the following parameters:\n Socket file descriptor: The file descriptor of the current operation corresponding to the socket. Buffer: The data sent or received, passed as a byte array.  Based on the above parameters, we can analyze the following data:\n Bytes: The size of the packet in bytes. Protocol: The protocol analysis according to the buffer data, such as HTTP, MySQL, etc. Execution Time: The time it takes to send/receive the data.  At this point (Figure 1) we can analyze the following steps for the whole lifecycle of the connection:\n Connect/Accept: When the connection is created. Transform: Sending and receiving data on the connection. Close: When the connection is closed.  Figure 1\nProtocol and TLS The previous section described how to analyze connections using send or receive buffer data. For example, following the HTTP/1.1 message specification to analyze the connection. However, this does not work for TLS requests/responses.\nFigure 2\nWhen TLS is in use, the Linux Kernel transmits data encrypted in user space. In the figure above, The application usually transmits SSL data through a third-party library (such as OpenSSL). For this case, the Linux API can only get the encrypted data, so it cannot recognize any higher layer protocol. To decrypt inside eBPF, we need to follow these steps:\n Read unencrypted data through uprobe: Compatible multiple languages, using uprobe to capture the data that is not encrypted before sending or after receiving. In this way, we can get the original data and associate it with the socket. Associate with socket: We can associate unencrypted data with the socket.  OpenSSL Use case For example, the most common way to send/receive SSL data is to use OpenSSL as a shared library, specifically the SSL_read and SSL_write methods to submit the buffer data with the socket.\nFollowing the documentation, we can intercept these two methods, which are almost identical to the API in Linux. The source code of the SSL structure in OpenSSL shows that the Socket FD exists in the BIO object of the SSL structure, and we can get it by the offset.\nIn summary, with knowledge of how OpenSSL works, we can read unencrypted data in an eBPF function.\nIntroducing SkyWalking Rover, an eBPF-based Metrics Collector and Profiler SkyWalking Rover introduces the eBPF network profiling feature into the SkyWalking ecosystem. It\u0026rsquo;s currently supported in a Kubernetes environment, so must be deployed inside a Kubernetes cluster. Once the deployment is complete, SkyWalking Rover can monitor the network for all processes inside a given Pod. Based on the monitoring data, SkyWalking can generate the topology relationship diagram and metrics between processes.\nTopology Diagram The topology diagram can help us understand the network access between processes inside the same Pod, and between the process and external environment (other Pod or service). Additionally, it can identify the data direction of traffic based on the line flow direction.\nIn Figure 3 below, all nodes within the hexagon are the internal process of a Pod, and nodes outside the hexagon are externally associated services or Pods. Nodes are connected by lines, which indicate the direction of requests or responses between nodes (client or server). The protocol is indicated on the line, and it\u0026rsquo;s either HTTP(S), TCP, or TCP(TLS). Also, we can see in this figure that the line between Envoy and Python applications is bidirectional because Envoy intercepts all application traffic.\nFigure 3\nMetrics Once we recognize the network call relationship between processes through the topology, we can select a specific line and view the TCP metrics between the two processes.\nThe diagram below (Figure 4) shows the metrics of network monitoring between two processes. There are four metrics in each line. Two on the left side are on the client side, and two on the right side are on the server side. If the remote process is not in the same Pod, only one side of the metrics is displayed.\nFigure 4\nThe following two metric types are available:\n Counter: Records the total number of data in a certain period. Each counter contains the following data: a. Count: Execution count. b. Bytes: Packet size in bytes. c. Execution time: Execution duration. Histogram: Records the distribution of data in the buckets.  Based on the above data types, the following metrics are exposed:\n   Name Type Unit Description     Write Counter and histogram Millisecond The socket write counter.   Read Counter and histogram Millisecond The socket read counter.   Write RTT Counter and histogram Microsecond The socket write round trip time (RTT) counter.   Connect Counter and histogram Millisecond The socket connect/accept with another server/client counter.   Close Counter and histogram Millisecond The socket with other socket counter.   Retransmit Counter Millisecond The socket retransmit package counter.   Drop Counter Millisecond The socket drop package counter.    Demo In this section, we demonstrate how to perform network profiling in the service mesh. To follow along, you will need a running Kubernetes environment.\nNOTE: All commands and scripts are available in this GitHub repository.\nInstall Istio Istio is the most widely deployed service mesh, and comes with a complete demo application that we can use for testing. To install Istio and the demo application, follow these steps:\n Install Istio using the demo configuration profile. Label the default namespace, so Istio automatically injects Envoy sidecar proxies when we\u0026rsquo;ll deploy the application. Deploy the bookinfo application to the cluster. Deploy the traffic generator to generate some traffic to the application.  export ISTIO_VERSION=1.13.1 # install istio istioctl install -y --set profile=demo kubectl label namespace default istio-injection=enabled # deploy the bookinfo applications kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/platform/kube/bookinfo.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/bookinfo-gateway.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/destination-rule-all.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/virtual-service-all-v1.yaml # generate traffic kubectl apply -f https://raw.githubusercontent.com/mrproliu/skywalking-network-profiling-demo/main/resources/traffic-generator.yaml Install SkyWalking The following will install the storage, backend, and UI needed for SkyWalking:\ngit clone https://github.com/apache/skywalking-kubernetes.git cd skywalking-kubernetes cd chart helm dep up skywalking helm -n istio-system install skywalking skywalking \\  --set fullnameOverride=skywalking \\  --set elasticsearch.minimumMasterNodes=1 \\  --set elasticsearch.imageTag=7.5.1 \\  --set oap.replicas=1 \\  --set ui.image.repository=apache/skywalking-ui \\  --set ui.image.tag=9.2.0 \\  --set oap.image.tag=9.2.0 \\  --set oap.envoy.als.enabled=true \\  --set oap.image.repository=apache/skywalking-oap-server \\  --set oap.storageType=elasticsearch \\  --set oap.env.SW_METER_ANALYZER_ACTIVE_FILES=\u0026#39;network-profiling\u0026#39; Install SkyWalking Rover SkyWalking Rover is deployed on every node in Kubernetes, and it automatically detects the services in the Kubernetes cluster. The network profiling feature has been released in the version 0.3.0 of SkyWalking Rover. When a network monitoring task is created, the SkyWalking rover sends the data to the SkyWalking backend.\nkubectl apply -f https://raw.githubusercontent.com/mrproliu/skywalking-network-profiling-demo/main/resources/skywalking-rover.yaml Start the Network Profiling Task Once all deployments are completed, we must create a network profiling task for a specific instance of the service in the SkyWalking UI.\nTo open SkyWalking UI, run:\nkubectl port-forward svc/skywalking-ui 8080:80 --namespace istio-system Currently, we can select the specific instances that we wish to monitor by clicking the Data Plane item in the Service Mesh panel and the Service item in the Kubernetes panel.\nIn the figure below, we have selected an instance with a list of tasks in the network profiling tab. When we click the start button, the SkyWalking Rover starts monitoring this instance\u0026rsquo;s network.\nFigure 5\nDone! After a few seconds, you will see the process topology appear on the right side of the page.\nFigure 6\nWhen you click on the line between processes, you can see the TCP metrics between the two processes.\nFigure 7\nConclusion In this article, we detailed a problem that makes troubleshooting service mesh architectures difficult: lack of context between layers in the network stack. These are the cases when eBPF begins to really help with debugging/productivity when existing service mesh/envoy cannot. Then, we researched how eBPF could be applied to common communication, such as TLS. Finally, we demo the implementation of this process with SkyWalking Rover.\nFor now, we have completed the performance analysis for OSI layer 4 (mostly TCP). In the future, we will also introduce the analysis for OSI layer 7 protocols like HTTP.\nGet Started with Istio To get started with service mesh today, Tetrate Istio Distro is the easiest way to install, manage, and upgrade Istio. It provides a vetted upstream distribution of Istio that\u0026rsquo;s tested and optimized for specific platforms by Tetrate plus a CLI that facilitates acquiring, installing, and configuring multiple Istio versions. Tetrate Istio Distro also offers FIPS certified Istio builds for FedRAMP environments.\nFor enterprises that need a unified and consistent way to secure and manage services and traditional workloads across complex, heterogeneous deployment environments, we offer Tetrate Service Bridge, our flagship edge-to-workload application connectivity platform built on Istio and Envoy.\nContact us to learn more.\nAdditional Resources  SkyWalking Github Repo SkyWalking Rover Github Repo SkyWalking Rover Documentation Pinpoint Service Mesh Critical Performance impact by using eBPF blog post Apache SkyWalking with Native eBPF Agent presentation eBPF hook overview  ","title":"Diagnose Service Mesh Network Performance with eBPF","url":"/blog/diagnose-service-mesh-network-performance-with-ebpf/"},{"content":"本文将展示如何利用 Apache SkyWalking 与 eBPF,使服务网格下的网络故障排除更加容易。\nApache SkyWalking 是一个分布式系统的应用性能监控工具。它观察服务网格中的指标、日志、痕迹和事件,并使用这些数据来生成 pod 和服务的依赖图。这个依赖关系图可以帮助你快速系统,尤其是在出现问题的时候。\n然而,在排除 SkyWalking 服务拓扑中的网络问题时,确定错误的实际位置有时候并不容易。造成这种困难的原因有两个:\n 通过 Envoy sidecar 的流量并不容易观察:来自 Envoy 的访问日志服务(ALS)的数据显示了服务之间的流量(sidecar-to-sidecar),但没有关于 Envoy sidecar 和它代理的服务之间的通信指标。如果没有这些信息,就很难理解 sidecar 的影响。 缺乏来自传输层(OSI 第 4 层)通信的数据:由于服务通常使用应用层(OSI 第 7 层)协议,如 HTTP,可观测性数据通常被限制在应用层通信中。然而,根本原因可能实际上是在传输层,而传输层对可观测性工具来说通常是不透明的。  获取 Envoy-to-service 和传输层通信的指标,可以更容易诊断服务问题。为此,SkyWalking 需要收集和分析 Kubernetes pod 内进程之间的传输层指标 —— 这项任务很适合 eBPF。我们调查了为此目的使用 eBPF 的情况,并在下面介绍了我们的结果和演示。\n用 eBPF 监控 Kubernetes 网络 eBPF 起源于 Extended Berkeley Packet Filter,是一种通用的机制,可以在 Linux 内核中注入和运行自己的代码,是监测 Kubernetes Pod 中网络流量的优秀工具。在接下来的几节中,我们将概述如何使用 eBPF 进行网络监控,作为介绍 Skywalking Rover 的背景,这是一个由 eBPF 驱动的指标收集器和分析器,用于诊断 CPU 和网络性能。\n应用程序和网络如何相互作用 应用程序和网络之间的互动一般可分为以下步骤,从较高的抽象层次到较低的抽象层次:\n 用户代码:应用程序代码使用应用程序堆栈中的高级网络库,在网络上交换数据,如发送和接收 HTTP 请求。 网络库:当网络库收到网络请求时,它与语言 API 进行交互以发送网络数据。 语言 API:每种语言都提供了一个操作网络、系统等的 API。当收到一个请求时,它与系统的 API 进行交互。在 Linux 中,这个 API 被称为系统调用(syscalls)。 Linux API:当 Linux 内核通过 API 收到请求时,它与套接字进行通信以发送数据,这通常更接近于 OSI 第四层协议,如 TCP、UDP 等。 Socket Ops:向 / 从网卡发送或接收数据。  我们的假设是,eBPF 可以监控网络。有两种方法可以实现拦截:用户空间(uprobe)或内核空间(kprobe)。下表总结了两者的区别。\n   方式 优点 缺点     uprobe • 获取更多与应用相关的上下文,例如当前请求是 HTTP 还是 HTTPS。 • 请求和响应可以通过一个方法来截获。 • 数据结构可能是不稳定的,所以更难获得所需的数据。 • 不同语言/库版本的实现可能不同。 • 在没有符号表的应用程序中不起作用。   kprobe • 可用于所有语言。 • 数据结构和方法很稳定,不需要太多调整。 • 更容易与底层数据相关联,如获得 TCP 的目标地址、OSI 第四层协议指标等。 • 一个单一的请求和响应可能被分割成多个 probe。 • 对于有状态的请求,上下文信息不容易得到。例如 HTTP/2 中的头压缩。    对于一般的网络性能监控,我们选择使用 kprobe(拦截系统调用),原因如下:\n 它可用于用任何编程语言编写的应用程序,而且很稳定,所以可以节省大量的开发 / 适应成本。 它可以与系统层面的指标相关联,这使得故障排除更加容易。 由于一个请求和响应被分割成多个 probe,我们可以利用技术将它们关联起来。 对于背景信息,它通常用于 OSI 第七层协议网络分析。因此,如果我们只是监测网络性能,那么它们可以被忽略。  Kprobes 和网络监控 按照 Linux 文档中的网络系统调用,我们可以通过两类拦截方法实现网络监控:套接字操作和发送 / 接收方法。\n套接字操作 当接受或与另一个套接字连接时,我们可以得到以下信息:\n 连接信息:包括来自连接的远程地址,这有助于我们了解哪个 pod 被连接。 连接统计 :包括来自套接字的基本指标,如往返时间(RTT)、TCP 的丢包数等。 套接字和文件描述符(FD)的映射:包括 Linux 文件描述符和套接字对象之间的关系。在通过 Linux 文件描述符发送和接收数据时,它很有用。  发送 / 接收 与发送或接收数据有关的接口是性能分析的重点。它主要包含以下参数:\n Socket 文件描述符:当前操作对应的套接字的文件描述符。 缓冲区:发送或接收的数据,以字节数组形式传递。  基于上述参数,我们可以分析以下数据:\n 字节:数据包的大小,以字节为单位。 协议:根据缓冲区的数据进行协议分析,如 HTTP、MySQL 等。 执行时间:发送 / 接收数据所需的时间。  在这一点上(图 1),我们可以分析出连接的整个生命周期的以下步骤:\n 连接 / 接受:当连接被创建时。 转化:在连接上发送和接收数据。 关闭:当连接被关闭时。  图 1\n协议和 TLS 上一节描述了如何使用发送或接收缓冲区数据来分析连接。例如,遵循 HTTP/1.1 消息规范来分析连接。然而,这对 TLS 请求 / 响应不起作用。\n图 2\n当使用 TLS 时,Linux 内核在用户空间中传输加密的数据。在上图中,应用程序通常通过第三方库(如 OpenSSL)传输 SSL 数据。对于这种情况,Linux API 只能得到加密的数据,所以它不能识别任何高层协议。为了在 eBPF 内部解密,我们需要遵循以下步骤:\n 通过 uprobe 读取未加密的数据:兼容多种语言,使用 uprobe 来捕获发送前或接收后没有加密的数据。通过这种方式,我们可以获得原始数据并将其与套接字联系起来。 与套接字关联:我们可以将未加密的数据与套接字关联。  OpenSSL 用例 例如,发送 / 接收 SSL 数据最常见的方法是使用 OpenSSL 作为共享库,特别是 SSL_read 和 SSL_write 方法,以提交缓冲区数据与套接字。\n按照文档,我们可以截获这两种方法,这与 Linux 中的 API 几乎相同。OpenSSL 中 SSL 结构的源代码显示, Socket FD 存在于 SSL 结构的 BIO 对象中,我们可以通过 offset 得到它。\n综上所述,通过对 OpenSSL 工作原理的了解,我们可以在一个 eBPF 函数中读取未加密的数据。\nSkyWalking Rover—— 基于 eBPF 的指标收集器和分析器 SkyWalking Rover 在 SkyWalking 生态系统中引入了 eBPF 网络分析功能。目前已在 Kubernetes 环境中得到支持,所以必须在 Kubernetes 集群内部署。部署完成后,SkyWalking Rover 可以监控特定 Pod 内所有进程的网络。基于监测数据,SkyWalking 可以生成进程之间的拓扑关系图和指标。\n拓扑结构图 拓扑图可以帮助我们了解同一 Pod 内的进程之间以及进程与外部环境(其他 Pod 或服务)之间的网络访问情况。此外,它还可以根据线路的流动方向来确定流量的数据方向。\n在下面的图 3 中,六边形内的所有节点都是一个 Pod 的内部进程,六边形外的节点是外部关联的服务或 Pod。节点由线连接,表示节点之间的请求或响应方向(客户端或服务器)。线条上标明了协议,它是 HTTP (S)、TCP 或 TCP (TLS)。另外,我们可以在这个图中看到,Envoy 和 Python 应用程序之间的线是双向的,因为 Envoy 拦截了所有的应用程序流量。\n图 3\n度量 一旦我们通过拓扑结构认识到进程之间的网络调用关系,我们就可以选择一个特定的线路,查看两个进程之间的 TCP 指标。\n下图(图4)显示了两个进程之间网络监控的指标。每行有四个指标。左边的两个是在客户端,右边的两个是在服务器端。如果远程进程不在同一个 Pod 中,则只显示一边的指标。\n图 4\n有以下两种度量类型。\n 计数器(Counter):记录一定时期内的数据总数。每个计数器包含以下数据。  计数:执行次数。 字节:数据包大小,以字节为单位。 执行时间:执行时间。   柱状图(Histogram):记录数据在桶中的分布。  基于上述数据类型,暴露了以下指标:\n   名称 类型 单位 描述     Write 计数器和柱状图 毫秒 套接字写计数器。   Read 计数器和柱状图 毫秒 套接字读计数器。   Write RTT 计数器和柱状图 微秒 套接字写入往返时间(RTT)计数器。   Connect 计数器和柱状图 毫秒 套接字连接/接受另一个服务器/客户端的计数器。   Close 计数器和柱状图 毫秒 有其他套接字的计数器。   Retransmit 计数器 毫秒 套接字重发包计数器   Drop 计数器 毫秒 套接字掉包计数器。    演示 在本节中,我们将演示如何在服务网格中执行网络分析。要跟上进度,你需要一个正在运行的 Kubernetes 环境。\n注意:所有的命令和脚本都可以在这个 GitHub 资源库中找到。\n安装 Istio Istio是最广泛部署的服务网格,并附带一个完整的演示应用程序,我们可以用来测试。要安装 Istio 和演示应用程序,请遵循以下步骤:\n 使用演示配置文件安装 Istio。 标记 default 命名空间,所以当我们要部署应用程序时,Istio 会自动注入 Envoy 的 sidecar 代理。 将 bookinfo 应用程序部署到集群上。 部署流量生成器,为应用程序生成一些流量。  export ISTIO_VERSION=1.13.1 # 安装 istio istioctl install -y --set profile=demo kubectl label namespace default istio-injection=enabled # 部署 bookinfo 应用程序 kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/platform/kube/bookinfo.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/bookinfo-gateway.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/destination-rule-all.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/virtual-service-all-v1.yaml # 产生流量 kubectl apply -f https://raw.githubusercontent.com/mrproliu/skywalking-network-profiling-demo/main/resources/traffic-generator.yaml 安装 SkyWalking 下面将安装 SkyWalking 所需的存储、后台和用户界面。\ngit clone https://github.com/apache/skywalking-kubernetes.git cd skywalking-kubernetes cd chart helm dep up skywalking helm -n istio-system install skywalking skywalking \\  --set fullnameOverride=skywalking \\  --set elasticsearch.minimumMasterNodes=1 \\  --set elasticsearch.imageTag=7.5.1 \\  --set oap.replicas=1 \\  --set ui.image.repository=apache/skywalking-ui \\  --set ui.image.tag=9.2.0 \\  --set oap.image.tag=9.2.0 \\  --set oap.envoy.als.enabled=true \\  --set oap.image.repository=apache/skywalking-oap-server \\  --set oap.storageType=elasticsearch \\  --set oap.env.SW_METER_ANALYZER_ACTIVE_FILES=\u0026#39;network-profiling\u0026#39; 安装 SkyWalking Rover SkyWalking Rover 部署在 Kubernetes 的每个节点上,它自动检测 Kubernetes 集群中的服务。网络剖析功能已经在 SkyWalking Rover 的 0.3.0 版本中发布。当网络监控任务被创建时,SkyWalking Rover 会将数据发送到 SkyWalking 后台。\nkubectl apply -f https://raw.githubusercontent.com/mrproliu/skywalking-network-profiling-demo/main/resources/skywalking-rover.yaml 启动网络分析任务 一旦所有部署完成,我们必须在 SkyWalking UI 中为服务的特定实例创建一个网络分析任务。\n要打开 SkyWalking UI,请运行:\nkubectl port-forward svc/skywalking-ui 8080:80 --namespace istio-system 目前,我们可以通过点击服务网格面板中的数据平面项目和 Kubernetes 面板中的服务项目来选择我们想要监控的特定实例。\n在下图中,我们选择了一个实例,在网络剖析标签里有一个任务列表。当我们点击启动按钮时,SkyWalking Rover 开始监测这个实例的网络。\n图 5\n完成 几秒钟后,你会看到页面的右侧出现进程拓扑结构。\n图 6\n当你点击进程之间的线时,你可以看到两个进程之间的 TCP 指标。\n图 7\n总结 在这篇文章中,我们详细介绍了一个使服务网格故障排除困难的问题:网络堆栈中各层之间缺乏上下文。这些情况下,当现有的服务网格 /envoy 不能时,eBPF 开始真正帮助调试 / 生产。然后,我们研究了如何将 eBPF 应用于普通的通信,如 TLS。最后,我们用 SkyWalking Rover 演示了这个过程的实现。\n目前,我们已经完成了对 OSI 第四层(主要是 TCP)的性能分析。在未来,我们还将介绍对 OSI 第 7 层协议的分析,如 HTTP。\n开始使用 Istio 开始使用服务网格,Tetrate Istio Distro 是安装、管理和升级 Istio 的最简单方法。它提供了一个经过审查的 Istio 上游发布,由 Tetrate 为特定平台进行测试和优化,加上一个 CLI,方便获取、安装和配置多个 Istio 版本。Tetrate Istio Distro 还为 FedRAMP 环境提供 FIPS 认证的 Istio 构建。\n对于需要以统一和一致的方式在复杂的异构部署环境中保护和管理服务和传统工作负载的企业,我们提供 Tetrate Service Bridge,这是我们建立在 Istio 和 Envoy 上的旗舰工作负载应用连接平台。\n联系我们以了解更多。\n其他资源  SkyWalking Github Repo SkyWalking Rover Github Repo SkyWalking Rover 文件 通过使用 eBPF 博文准确定位服务网格关键性能影响 Apache SkyWalking 与本地 eBPF 代理的介绍 eBPF hook概述  ","title":"使用 eBPF 诊断服务网格网络性能","url":"/zh/diagnose-service-mesh-network-performance-with-ebpf/"},{"content":"SkyWalking CLI 0.11.0 is released. Go to downloads page to find release tars.\n Add .github/scripts to release source tarball by @kezhenxu94 in https://github.com/apache/skywalking-cli/pull/140 Let the eBPF profiling could performs by service level by @mrproliu in https://github.com/apache/skywalking-cli/pull/141 Add the sub-command for estimate the process scale by @mrproliu in https://github.com/apache/skywalking-cli/pull/142 feature: update install.sh version regex by @Alexxxing in https://github.com/apache/skywalking-cli/pull/143 Update the commands relate to the process by @mrproliu in https://github.com/apache/skywalking-cli/pull/144 Add layer to event related commands by @fgksgf in https://github.com/apache/skywalking-cli/pull/145 Add layer to events.graphql by @fgksgf in https://github.com/apache/skywalking-cli/pull/146 Add layer field to alarms.graphql by @fgksgf in https://github.com/apache/skywalking-cli/pull/147 Upgrade crypto lib to fix cve by @kezhenxu94 in https://github.com/apache/skywalking-cli/pull/148 Remove layer field in the instance and process commands by @mrproliu in https://github.com/apache/skywalking-cli/pull/149 Remove duration flag in profiling ebpf schedules by @mrproliu in https://github.com/apache/skywalking-cli/pull/150 Remove total field in trace list and logs list commands by @mrproliu in https://github.com/apache/skywalking-cli/pull/152 Remove total field in event list, browser logs, alarm list commands. by @mrproliu in https://github.com/apache/skywalking-cli/pull/153 Add aggregate flag in profiling ebpf analysis commands by @mrproliu in https://github.com/apache/skywalking-cli/pull/154 event: fix event query should query all types by default by @kezhenxu94 in https://github.com/apache/skywalking-cli/pull/155 Fix a possible lint error and update CI lint version by @JarvisG495 in https://github.com/apache/skywalking-cli/pull/156 Add commands for support network profiling by @mrproliu in https://github.com/apache/skywalking-cli/pull/158 Add the components field in the process relation by @mrproliu in https://github.com/apache/skywalking-cli/pull/159 Trim license headers in query string by @kezhenxu94 in https://github.com/apache/skywalking-cli/pull/160 Bump up dependency swck version to fix CVE by @kezhenxu94 in https://github.com/apache/skywalking-cli/pull/161 Bump up swck dependency for transitive dep upgrade by @kezhenxu94 in https://github.com/apache/skywalking-cli/pull/162 Add the sub-commands for query sorted metrics/records by @mrproliu in https://github.com/apache/skywalking-cli/pull/163 Add compatibility documentation by @mrproliu in https://github.com/apache/skywalking-cli/pull/164 Overhaul licenses, prepare for 0.11.0 by @kezhenxu94 in https://github.com/apache/skywalking-cli/pull/165  ","title":"Release Apache SkyWalking CLI 0.11.0","url":"/events/release-apache-skywalking-cli-0-11-0/"},{"content":"SkyWalking Kubernetes Helm Chart 4.3.0 is released. Go to downloads page to find release tars.\n Fix hasSuffix replace hasPrefix by @geffzhang in https://github.com/apache/skywalking-kubernetes/pull/86 Add \u0026ldquo;pods/log\u0026rdquo; permission to OAP so on-demand Pod log can work by @kezhenxu94 in https://github.com/apache/skywalking-kubernetes/pull/87 add .Values.oap.initEs to work with ES initial by @williamyao1982 in https://github.com/apache/skywalking-kubernetes/pull/88 Remove Istio adapter, add changelog for 4.3.0 by @kezhenxu94 in https://github.com/apache/skywalking-kubernetes/pull/89 Bump up helm chart version by @kezhenxu94 in https://github.com/apache/skywalking-kubernetes/pull/90  ","title":"Release Apache SkyWalking Kubernetes Helm Chart 4.3.0","url":"/events/release-apache-skywalking-kubernetes-helm-chart-4.3.0/"},{"content":"SkyWalking Cloud on Kubernetes 0.7.0 is released. Go to downloads page to find release tars.\nFeatures  Replace go-bindata with embed lib. Add the OAPServerConfig CRD, webhooks and controller. Add the OAPServerDynamicConfig CRD, webhooks and controller. Add the SwAgent CRD, webhooks and controller. [Breaking Change] Remove the way to configure the agent through Configmap.  Bugs  Fix the error in e2e testing. Fix status inconsistent with CI. Bump up prometheus client version to fix cve.  Chores  Bump several dependencies of adapter. Update license eye version. Bump up SkyWalking OAP to 9.0.0. Bump up the k8s api of the e2e environment to v1.21.10.  ","title":"Release Apache SkyWalking Cloud on Kubernetes 0.7.0","url":"/events/release-apache-skywalking-cloud-on-kubernetes-0-7-0/"},{"content":"SkyWalking Rover 0.3.0 is released. Go to downloads page to find release tars.\nFeatures  Support NETWORK Profiling. Let the logger as a configurable module. Support analyze the data of OpenSSL, BoringSSL library, GoTLS, NodeTLS in NETWORK Profiling. Enhancing the kubernetes process finder.  Bug Fixes  Fixed reading process paths incorrect when running as a container. Fix the crash caused by multiple profiling tasks.  Issues and PR  All issues are here All and pull requests are here  ","title":"Release Apache SkyWalking Rover 0.3.0","url":"/events/release-apache-skwaylking-rover-0-3-0/"},{"content":"SkyWalking Java Agent 8.12.0 is released. Go to downloads page to find release tars. Changes by Version\n8.12.0  Fix Shenyu plugin\u0026rsquo;s NPE in reading trace ID when IgnoredTracerContext is used in the context. Update witness class in elasticsearch-6.x-plugin, avoid throw NPE. Fix onHalfClose using span operation name /Request/onComplete instead of the wrong name /Request/onHalfClose. Add plugin to support RESTeasy 4.x. Add plugin to support hutool-http 5.x. Add plugin to support Tomcat 10.x. Save http status code regardless of it\u0026rsquo;s status. Upgrade byte-buddy to 1.12.13, and adopt byte-buddy APIs changes. Upgrade gson to 2.8.9. Upgrade netty-codec-http2 to 4.1.79.Final. Fix race condition causing agent to not reconnect after network error Force the injected high-priority classes in order to avoid NoClassDefFoundError. Plugin to support xxl-job 2.3.x. Add plugin to support Micronaut(HTTP Client/Server) 3.2.x-3.6.x Add plugin to support NATS Java client 2.14.x-2.15.x Remove inappropriate dependency from elasticsearch-7.x-plugin Upgrade jedis plugin to support 3.x(stream),4.x  Documentation  Add a section in Bootstrap-plugins doc, introducing HttpURLConnection Plugin compatibility. Update Plugin automatic test framework, fix inconsistent description about configuration.yml. Update Plugin automatic test framework, add expected data format of the log items.  All issues and pull requests are here\n","title":"Release Apache SkyWalking Java Agent 8.12.0","url":"/events/release-apache-skywalking-java-agent-8-12-0/"},{"content":"This is an official annoucement from SkyWalking team.\nSkyWalking backend server and UI released significant 9.2.0 at Sep. 2nd, 2022. With the new added Layer concept, the ebpf agent, wider middleware server monitoring(Such as MySQL and PostgreSQL servers) powered by OpenTelemetry ecosystem, SkyWalking v9 has been much more powerful than the last v8 version(8.9.1).\nFrom now, we have resolved all found critical bugs since 9.0.0 release which could block the v8 users to upgrade. v9 releases also provide the as same compatibility as the 8.9.1 release. So, end users would not have a block when they apply to upgrade. (We don\u0026rsquo;t provide storage structure compatibility as usually, users should use an empty database to initialize for a new version.)\nAnd more importantly, we are confident that, v9 could provide a stable and higher performance APM in the product environment.\nThe 8.9.1 release was released at Dec., 2021. Since then, there is no one contributed any code, and there is no committer requested to begin a new iteration or plan to run a patch release. From the project management committee perspective, the 8.x had became inactive.\nWe are going to wait for another 3 month to official end 8.x series' life.\nNotice, this could be changed if there are at least 3 committers supporting to work on further 8.x releases officially, and provide a release plan.\n","title":"Plan to End-of-life(EOL) all v8 releases in Nov. 2022","url":"/events/deprecate-v8/"},{"content":"SkyWalking 9.2.0 is released. Go to downloads page to find release tars.\neBPF Network Profiling for K8s Pod Event and Metrics Association MySQL Server Monitoring PostgreSQL Server Monitoring Project  [Critical] Fix a low performance issue of metrics persistent in the ElasticSearch storage implementation. One single metric could have to wait for an unnecessary 7~10s(System Env Variable SW_STORAGE_ES_FLUSH_INTERVAL) since 8.8.0 - 9.1.0 releases. Upgrade Armeria to 1.16.0, Kubernetes Java client to 15.0.1.  OAP Server  Add more entities for Zipkin to improve performance. ElasticSearch: scroll id should be updated when scrolling as it may change. Mesh: fix only last rule works when multiple rules are defined in metadata-service-mapping.yaml. Support sending alarm messages to PagerDuty. Support Zipkin kafka collector. Add VIRTUAL detect type to Process for Network Profiling. Add component ID(128) for Java Hutool plugin. Add Zipkin query exception handler, response error message for illegal arguments. Fix a NullPointerException in the endpoint analysis, which would cause missing MQ-related LocalSpan in the trace. Add forEach, processRelation function to MAL expression. Add expPrefix, initExp in MAL config. Add component ID(7015) for Python Bottle plugin. Remove legacy OAL percentile functions, p99, p95, p90, p75, p50 func(s). Revert #8066. Keep all metrics persistent even it is default value. Skip loading UI templates if folder is empty or doesn\u0026rsquo;t exist. Optimize ElasticSearch query performance by using _mGet and physical index name rather than alias in these scenarios, (a) Metrics aggregation (b) Zipkin query (c) Metrics query (d) Log query Support the NETWORK type of eBPF Profiling task. Support sumHistogram in MAL. [Breaking Change] Make the eBPF Profiling task support to the service instance level, index/table ebpf_profiling_task is required to be re-created when bump up from previous releases. Fix race condition in Banyandb storage Support SUM_PER_MIN downsampling in MAL. Support sumHistogramPercentile in MAL. Add VIRTUAL_CACHE to Layer, to fix conjectured Redis server, which icon can\u0026rsquo;t show on the topology. [Breaking Change] Elasticsearch storage merge all metrics/meter and records(without super datasets) indices into one physical index template metrics-all and records-all on the default setting. Provide system environment variable(SW_STORAGE_ES_LOGIC_SHARDING) to shard metrics/meter indices into multi-physical indices as the previous versions(one index template per metric/meter aggregation function). In the current one index mode, users still could choose to adjust ElasticSearch\u0026rsquo;s shard number(SW_STORAGE_ES_INDEX_SHARDS_NUMBER) to scale out. More details please refer to New ElasticSearch storage option explanation in 9.2.0 and backend-storage.md [Breaking Change] Index/table ebpf_profiling_schedule added a new column ebpf_profiling_schedule_id, the H2/Mysql/Tidb/Postgres storage users are required to re-created it when bump up from previous releases. Fix Zipkin trace query the max size of spans. Add tls and https component IDs for Network Profiling. Support Elasticsearch column alias for the compatibility between storage logicSharding model and no-logicSharding model. Support MySQL monitoring. Support PostgreSQL monitoring. Fix query services by serviceId error when Elasticsearch storage SW_STORAGE_ES_QUERY_MAX_SIZE \u0026gt; 10000. Support sending alarm messages to Discord. Fix query history process data failure. Optimize TTL mechanism for Elasticsearch storage, skip executed indices in one TTL rotation. Add Kubernetes support module to share codes between modules and reduce calls to Kubernetes API server. Bump up Kubernetes Java client to fix cve. Adapt OpenTelemetry native metrics protocol. [Breaking Change] rename configuration folder from otel-oc-rules to otel-rules. [Breaking Change] rename configuration field from enabledOcRules to enabledOtelRules and environment variable name from SW_OTEL_RECEIVER_ENABLED_OC_RULES to SW_OTEL_RECEIVER_ENABLED_OTEL_RULES. [Breaking Change] Fix JDBC TTL to delete additional tables data. SQL Database requires removing segment,segment_tag, logs, logs_tag, alarms, alarms_tag, zipkin_span, zipkin_query before OAP starts. SQL Database: add @SQLDatabase.ExtraColumn4AdditionalEntity to support add an extra column from parent to an additional table. Add component ID(131) for Java Micronaut plugin Add component ID(132) for Nats java client plugin  UI  Fix query conditions for the browser logs. Implement a URL parameter to activate tab index. Fix clear interval fail when switch autoRefresh to off. Optimize log tables. Fix log detail pop-up page doesn\u0026rsquo;t work. Optimize table widget to hide the whole metric column when no metric is set. Implement the Event widget. Remove event menu. Fix span detail text overlap. Add Python Bottle Plugin Logo. Implement an association between widgets(line, bar, area graphs) with time. Fix tag dropdown style. Hide the copy button when db.statement is empty. Fix legend metrics for topology. Dashboard: Add metrics association. Dashboard: Fix FaaS-Root document link and topology service relation dashboard link. Dashboard: Fix Mesh-Instance metric Throughput. Dashboard: Fix Mesh-Service-Relation metric Throughput and Proxy Sidecar Internal Latency in Nanoseconds (Client Response). Dashboard: Fix Mesh-Instance-Relation metric Throughput. Enhance associations for the Event widget. Add event widgets in dashboard where applicable. Fix dashboard list search box not work. Fix short time range. Fix event widget incompatibility in Safari. Refactor the tags component to support searching for tag keys and values. Implement the log widget and the trace widget associate with each other, remove log tables on the trace widget. Add log widget to general service root. Associate the event widget with the trace and log widget. Add the MySQL layer and update layer routers. Fix query order for trace list. Add a calculation to convert seconds to days. q* Add Spring Sleuth dashboard to general service instance. Support the process dashboard and create the time range text widget. Fix picking calendar with a wrong time range and setting a unique value for dashboard grid key. Add PostgreSQL to Database sub-menu. Implement the network profiling widget. Add Micronaut icon for Java plugin. Add Nats icon for Java plugin. Bump moment and @vue/cli-plugin-e2e-cypress. Add Network Profiling for Service Mesh DP instance and K8s pod panels.  Documentation  Fix invalid links in release docs. Clean up doc about event metrics. Add a table for metric calculations in the UI doc. Add an explanation for alerting kernel and its in-memory window mechanism. Add more docs for widget details. Update alarm doc introduce configuration property key Fix dependency license\u0026rsquo;s NOTICE and binary jar included issues in the source release. Add eBPF CPU profiling doc.  All issues and pull requests are here\n","title":"Release Apache SkyWalking APM 9.2.0","url":"/events/release-apache-skywalking-apm-9.2.0/"},{"content":"SkyWalking Rust 0.4.0 is released. Go to downloads page to find release tars.\nWhat\u0026rsquo;s Changed  Publish release doc. by @wu-sheng in https://github.com/apache/skywalking-rust/pull/31 Set up CI and approval requirements by @wu-sheng in https://github.com/apache/skywalking-rust/pull/32 Move skywalking_proto mod to single files. by @jmjoy in https://github.com/apache/skywalking-rust/pull/33 Polish the release doc. by @wu-sheng in https://github.com/apache/skywalking-rust/pull/34 Add serde support for protobuf generated struct. by @jmjoy in https://github.com/apache/skywalking-rust/pull/35 Improve LogReporter and fix tests. by @jmjoy in https://github.com/apache/skywalking-rust/pull/36 Split tracer inner segment sender and receiver into traits. by @jmjoy in https://github.com/apache/skywalking-rust/pull/37 Switch to use nightly rustfmt. by @jmjoy in https://github.com/apache/skywalking-rust/pull/38 Change Span to refer to SpanStack, rather than TracingContext. by @jmjoy in https://github.com/apache/skywalking-rust/pull/39 Adjust the trace structure. by @jmjoy in https://github.com/apache/skywalking-rust/pull/40 Add logging. by @jmjoy in https://github.com/apache/skywalking-rust/pull/41 Upgrade dependencies. by @jmjoy in https://github.com/apache/skywalking-rust/pull/42 Add feature vendored, to auto build protoc. by @jmjoy in https://github.com/apache/skywalking-rust/pull/43 Add metrics. by @jmjoy in https://github.com/apache/skywalking-rust/pull/44 Add more GH labels as new supports by @wu-sheng in https://github.com/apache/skywalking-rust/pull/45 Bump to 0.4.0. by @jmjoy in https://github.com/apache/skywalking-rust/pull/46 Fix trace id is not transmitted. by @jmjoy in https://github.com/apache/skywalking-rust/pull/47  ","title":"Release Apache SkyWalking Rust 0.4.0","url":"/events/release-apache-skywalking-rust-0-4-0/"},{"content":"目录  开篇 为什么需要全链路监控 为什么选择SkyWalking 预研 POC 优化 未来  1、开篇 自从SkyWalking开始在公司推广,时不时会在排查问题的人群中听到这样的话:“你咋还没接SkyWalking?接入后,一眼就看出是哪儿的问题了\u0026hellip;\u0026quot;,正如同事所说的,在许多情况下,SkyWalking就是这么秀。作为实践者,我非常感谢SkyWalking,因为这款国产全链路监控产品给公司的的伙伴们带来了实实在在的帮助;也特别感谢公司的领导和同事们,正因为他们的支持和帮助,才让这套SkyWalking(V8.5.0)系统从起初的有用进化到现在的好用;从几十亿的Segment储能上限、几十秒的查询耗时,优化到千亿级的Segment储能、毫秒级的查询耗时。\n小提示:\n SkyWalking迭代速度很快,公司使用的是8.5.0版本,其新版本的性能肯定有改善。 Segment是SkyWalking中提出的概念,表示一次请求在某个服务内的执行链路片段的合集,一个请求在多个服务中先后产生的Segment串起来构成一个完整的Trace,如下图所示:  SkyWalking的这次实践,截止到现在有一年多的时间,回顾总结一下这段历程中的些许积累和收获,愿能反哺社区,给有需求的道友提供个案例借鉴;也希望能收获到专家们的指导建议,把项目做得更好。因为安全约束,要把有些内容和谐掉,但也努力把这段历程中那些**靓丽的风景,**尽可能完整的呈现给大家。\n2、为什么需要全链路监控 随着微服务架构的演进,单体应用按照服务维度进行拆分,组织架构也随之演进以横向、纵向维度拆分;一个业务请求的执行轨迹,也从单体应用时期一个应用实例内一个接口,变成多个服务实例的多个接口;对应到组织架构,可能跨越多个BU、多个Owner。虽然微服务架构高内聚低耦合的优势是不言而喻的,但是低耦合也有明显的副作用,它在现实中给跨部门沟通、协作带来额外的不可控的开销;因此开发者尤其是终端业务侧的架构师、管理者,特别需要一些可以帮助理解系统拓扑和用于分析性能问题的工具,便于在架构调整、性能检测和发生故障时,缩减沟通协作方面的精力和时间耗费,快速定位并解决问题。\n我所在的平安健康互联网股份有限公司(文中简称公司),是微服务架构的深度实践者。公司用互联网技术搭建医疗服务平台,致力于构筑专业的医患桥梁,提供专业、全面、高品质、一站式企业健康管理服务。为了进一步提高系统服务质量、提升问题响应效率,部门在21年结合自身的一些情况,决定对现行的全链路监控系统进行升级,目的与以下网络中常见的描述基本一致:\n 快速发现问题 判断故障影响范围 梳理服务依赖并判断依赖的合理性 分析链路性能并实施容量规划  3、为什么选择SkyWalking 在做技术选型时,网络中搜集的资料显示,谷歌的 Dapper系统,算是链路追踪领域的始祖。受其公开论文中提出的概念和理念的影响,一些优秀的企业、个人先后做出不少非常nice的产品,有些还在社区开源共建,如:韩国的Pinpoint,Twitter的Zipkin,Uber的Jaeger及中国的SkyWalking 等,我司选型立项的过程中综合考虑的因素较多,这里只归纳一下SkyWalking吸引我们的2个优势:\n  产品的完善度高:\n java生态,功能丰富 社区活跃,迭代迅速    链路追踪、拓扑分析的能力强:\n 插件丰富,探针无侵入。 采用先进的流式拓扑分析设计    “好东西不需要多说,实际行动告诉你“,这句话我个人非常喜欢,关于SkyWalking的众多的优点,网络上可以找到很多,此处先不逐一比较、赘述了。\n4、预研 当时最新版本8.5.0,梳理分析8.x的发布记录后,评估此版本的核心功能是蛮稳定的,于是基于此版本开始了SkyWalking的探索之旅。当时的认知是有限的,串行思维模型驱使我将关注的问题聚焦在架构原理是怎样、有什么副作用这2个方面:\n  架构和原理:\n agent端 主要关注 Java Agent的机制、SkyWalking Agent端的配置、插件的工作机制、数据采集及上报的机制。 服务端 主要关注 角色和职责、模块和配置、数据接收的机制、指标构建的机制、指标聚合的机制及指标存储的机制。 存储端 主要关注 数据量,存储架构要求以及资源评估。    副作用:\n 功能干扰 性能损耗    4.1 架构和原理 SkyWalking社区很棒,官网文档和官方出版的书籍有较系统化的讲解,因为自己在APM系统以及Java Agent方面有一些相关的经验沉淀,通过在这两个渠道的学习,对Agent端和OAP(服务端)很快便有了较系统化的认知。在做系统架构选型时,评估数据量会比较大(成千上万的JVM实例数,每天采集的Segment数量可能是50-100亿的级别),所以传输通道选择Kafka、存储选择Elasticsearch,如此简易版的架构以及数据流转如下图所示:\n这里有几处要解释一下:\n Agent上报数据给OAP端,有grpc通道和kafka通道,当时就盲猜grpc通道可能撑不住,所以选择kafka通道来削峰;kafka通道是在8.x里加入的。 千亿级的数据用ES来做存储肯定是可以的。 图中L1聚合的意思是:SkyWalking OAP服务端 接收数据后,构建metric并完成metric 的Level-1聚合,这里简称L1聚合。 图中L2聚合的意思是:服务端 基于metric的Level-1聚合结果,再做一次聚合,即Level-2聚合,这里简称L2聚合。后续把纯Mixed角色的集群拆成了两个集群。  4.2 副作用 对于质量团队和接入方来说,他们最关注的问题是,接入SkyWalking后:\n 是否对应用有功能性干扰 在运行期能带来哪些性能损耗  这两个问题从3个维度来得到答案:\n  网络资料显示:\n Agent带来的性能损耗在5%以内 未搜到功能性干扰相关的资料(盲猜没有这方面问题)    实现机制评估:\n 字节码增强机制是JVM提供的机制,SkyWalking使用的字节码操控框架ByteBuddy也是成熟稳定的;通过自定义ClassLoader来加载管理插件类,不会产生冲突和污染。 Agent内插件开发所使用的AOP机制是基于模板方法模式实现的,风控很到位,即使插件的实现逻辑有异常也不影响用户逻辑的执行; 插件采集数据跟上报逻辑之间用了一个轻量级的无锁环形队列进行解耦,算是一种保护机制;这个队列在MPSC场景下性能还不错;队列采用满时丢弃的策略,不会有积压阻塞和OOM。    性能测试验证\n 测试的老师针对dubbo、http 这两种常规RPC通信场景,进行压力测试和稳定性测试,结果与网络资料描述一致,符合预期。    5、POC 在POC阶段,接入几十个种子应用,在非生产环境试点观察,同时完善插件补全链路,对接公司的配置中心,对接发布系统,完善自监控.全面准备达到推广就绪状态。\n5.1 对接发布系统 为了对接公司的发布系统,方便系统的发布,将SkyWalking应用拆分为4个子应用:\n   应用 介绍     Webapp Skywalking的web端   Agent Skywalking的Agent端   OAP-Receiver skywakling的服务端,角色是Mixed或Receiver   OAP-Aggregator skywalking的服务端,角色是Aggregator    这里有个考虑,暂定先使用纯Mixed角色的单集群,有性能问题时就试试 Receiver+Aggregator双角色集群模式,最终选哪种视效果而定。\nSkyWalking Agent端是基于Java Agent机制实现的,采用的是启动挂载模式;启动挂载需在启动脚本里加入挂载Java Agent的逻辑,发布系统实现这个功能需要注意2点:\n 启动脚本挂载SkyWalking Agent的环节,尽量让用户无感知。 发布系统在挂载Agent的时候,给Agent指定应用名称和所属分组信息。  SkyWalking Agent的发布和升级也由发布系统来负责;Agent的升级采用了灰度管控的方案,控制的粒度是应用级和实例级两种:\n 按照应用灰度,可给应用指定使用什么版本的Agent 按照应用的实例灰度,可给应用指定其若干实例使用什么版本的Agent  5.2 完善插件补全链路 针对公司OLTP技术栈,量身定制了插件套,其中大部分在开源社区的插件库中有,缺失的部分通过自研快速补齐。\n这些插件给各组件的核心环节埋点,采集数据上报给SkyWalking后,Web端的【追踪】页面就能勾勒出丰满完美的请求执行链路;这对架构师理解真实架构,测试同学验证逻辑变更和分析性能损耗,开发同学精准定位问题都非常的有帮助。这里借官方在线Demo的截图一用(抱歉后端程序员,五毛特效都没做出来,丰满画面还请自行脑补)\n友情小提示:移除不用的插件对程序编译打包和减少应用启动耗时很有帮助。\n5.3压测稳测 测试的老师,针对SkyWalking Agent端的插件套,设计了丰富的用例,压力测试和稳定性测试的结果都符合预期;每家公司的标准不尽一致,此处不再赘述。\n5.4 对接自研的配置中心 把应用中繁杂的配置交给配置中心来管理是非常必要的,配置中心既能提供启动时的静态配置,又能管理运行期的动态配置,而且外部化配置的机制特别容易满足容器场景下应用的无状态化要求。啰嗦一下,举2个例子:\n 调优时,修改参数的值不用来一遍开发到测试再到生产的发布。 观测系统状态,修改日志配置后不需要来一遍开发到测试再到生产的发布。  Skywaling在外接配置中心这块儿,适配了市面中主流的配置中心产品。而公司的配置中心是自研的,需要对接一下,得益于SkyWalking提供的模块化管理机制,只用扩展一个模块即可。\n在POC阶段,梳理服务端各模块的功能,能感受到其配置化做的不错,配置项很丰富,管控的粒度也很细;在POC阶段几乎没有变动,除了对Webapp模块的外部化配置稍作改造,与配置中心打通以便在配置中心管理 Webapp模块中Ribbon和Hystrix的相关配置。\n5.5完善自监控 自监控是说监控SkyWalking系统内各模块的运转情况:\n   组件 监控方案 说明     kafka kafka-manager 它俩是老搭档了   Agent端 Skywalking Agent端会发心跳信息给服务端,可在Web端看到Agent的信息   OAP集群 prometheus 指标还算丰富,感觉缺的可以自己补充   ES集群 prometheus 指标还算丰富    完善自监控后的架构如下图所示:\n5.6 自研Native端SDK 公司移动端的应用很核心,也要使用链路追踪的功能,社区缺了这块,于是基于SkyWalking的协议,移动端的伙伴们自研了一套SDK,弥补了Native端链路数据的缺失,也在后来的秒开页面指标统计中发挥了作用。随着口口相传,不断有团队提出需求、加入建设,所以也在持续迭代中;内容很多,这里先不展开。\n5.7 小结 POC阶段数据量不大,主要是发现系统的各种功能性问题,查缺补漏。\n6、优化 SkyWalking的正式推广采用的是城市包围农村的策略;公司的核心应用作为第一批次接入,这个策略有几个好处:\n 核心应用的监管是重中之重,优先级默认最高。 核心应用的上下游应用,会随着大家对SkyWalking依赖的加深,而逐步自主接入。  当然安全是第一位的,无论新系统多好、多厉害,其引入都需遵守安全稳定的前提要求。既要安全又要快速还要方便,于是基于之前Agent灰度接入的能力,在发布系统中增加应用Owner自助式灰度接入和快速卸载SkyWalking Agent的能力,即应用负责人可自主选择哪个应用接入,接入几个实例,倘若遇到问题仅通过重启即可完成快速卸载;这个能力在推广的前期发挥了巨大的作用;毕竟安全第一,信任也需逐步建立。\n随着应用的接入、使用,我们也逐渐遇到了一些问题,这里按照时间递增的顺序将问题和优化效果快速的介绍给大家,更多技术原理的内容计划在【SkyWalking(v8.5.0)调优系列】补充。开始之前有几个事项要说明:\n 下文中提到的数字仅代表我司的情况,标注的Segment数量是处理这个问题的那段时间的情况,并不是说达到这个数量才开始出现这个现象。 这些数值以及当时的现象,受到宿主机配置、Segment数据的大小、存储处理能力等多种因素的影响;请关注调整的过程和效果,不必把数字和现象对号入座哈。  6.1 启动耗时: 问题: 有同事反馈应用启动变慢,排查发现容器中多数应用启动的总耗时,在接入SkyWalking前是2秒,接入后变成了16秒以上,公司很多核心应用的实例数很多,这样的启动损耗对它们的发布影响太大。\n优化:  记录启动耗时并随着其他启动数据上报到服务端,方便查看对比。 优化Kafka Reporter的启动过程,将启动耗时减少了3-4秒。 优化类匹配和增强环节(重点)后,容器中的应用启动总耗时从之前16秒以上降低到了3秒内。 梳理Kafka 启动和上报的过程中,顺带调整了Agent端的数据上报到kafka的分区选择策略,将一个JVM实例中的数据全部发送到同一个的分区中,如此在L1层的聚合就完成了JVM实例级的Metric聚合,需注意调整Kafka分片数来保证负载均衡。  6.2 kafka积压-6亿segment/天 问题: SkyWalking OAP端消费慢,导致Kafka中Segment积压。未能达到能用的目标。\n优化: 从SkyWalking OAP端的监控指标中没有定位出哪个环节的问题,把服务端单集群拆为双集群,即把 Mixed角色的集群 ,修改为 Receiver 角色(接收和L1聚合)的集群 ,并加入 Aggregation角色(L2聚合)的集群,调整成了双集群模式,数据流传如下图所示:\n6.3 kafka积压-8亿segment/天 问题: SkyWalking OAP端消费慢,导致Kafka中Segment积压,监控指标能看出是在ES存储环节慢,未能达到能用的目标。\n优化:  优化segment保存到ES的批处理过程,调整BulkProcessor的线程数和批处理大小。 优化metrics保存到ES的批处理过程,调整批处理的时间间隔、线程数、批处理大小以及刷盘时间。  6.4 kafka积压-20亿segment/天 问题: Aggregation集群的实例持续Full GC,Receiver集群通过grpc 给Aggregation集群发送metric失败。未能达到能用的目标。\n优化:  增加ES节点、分片,效果不明显。 ES集群有压力,但无法精准定位出是什么数据的什么操作引发的。采用分治策略,尝试将数据拆分,从OAP服务端读写逻辑调整,将ES单集群拆分为 trace集群 和 metric集群;之后对比ES的监控指标明确看出是metric集群读写压力太大。  优化Receiver集群metric的L1聚合,完成1分钟的数据聚合后,再提交给Aggregation集群做L2聚合。 Aggregation集群metric的L2 聚合是基于db实现的,会有 空读-写-再读-累加-更新写 这样的逻辑,每次写都会有读,调整逻辑是:提升读的性能,优化缓存机制减少读的触发;调整间隔,避免触发累加和更新。 将metric批量写ES操作调整成BulkProcessor。 ES的metric集群 使用SSD存储,增加节点数和分片数。  这一次的持续优化具有里程碑式的意义,Kafka消费很快,OAP各机器的Full GC没了,ES的各方面指标也很稳定;接下来开始优化查询,提升易用性。\n6.5 trace查询慢-25亿segment/天 问题: Web端【追踪】页中的查询都很慢,仅保存了15天的数据,按照traceId查询耗时要20多秒,按照条件查询trace列表的耗时更糟糕;这给人的感受就是“一肚子墨水倒不出来”,未能达到好用的目标。\n优化: ES查询优化方面的信息挺多,但通过百度筛选出解决此问题的有效方案,就要看咱家爱犬的品类了;当时搜集整理了并尝试了N多优化条款,可惜没有跟好运偶遇,结论是颜值不可靠。言归正传,影响读写性能的基本要素有3个:读写频率,数据规模,硬件性能;trace的情况从这三个维度来套一套模板:\n   要素 trace的情况 备注     读写频率 宏观来看是写多读少的状况    数据规模 按照每天50亿个segment来算,半个月是750亿,1个月是1500亿。    硬件性能 普通硬盘速度一般     这个分析没有得出具有指导意义的结论,读写频率这里粒度太粗,用户的使用情况跟时间也有紧密的关系,情况大概是:\n 当天的数据是读多写多(当天不断有新数据写入,基于紧急响应的需求,问题出现时可能是近实时的排查处理)。 前一天的数据是读多写少(一般也会有问题隔天密集上报的情况,0点后会有前一天数据延迟到达的情况)。 再早的话无新数据写入,数据越早被读的概率也越小。  基于以上分析,增加时间维度并细化更多的参考因素后,分析模型变成了这样:\n   要素 当天 当天-1 当天-2 ~ 当天-N     写频率 多 少 无   读(查询)频率 多 多 少   读响应速度要求 快 快 慢点也行   数据规模 50亿 50亿 50亿* (N-2)   宿主机性能要求 高 高 次高   硬盘速度要求 高(SSD) 高(SSD) 次高(机械)   硬件成本 高 高 次高   期望成本 低 低 低    从上表可以看出,整体呈现出hot-warm数据架构的需求之势,近1-2天为hot数据,之前的为warm数据;恰好ES7提供了hot-warm架构支持,按照hot-warm改造后架构如下图所示:\n 恰逢公司ES中台调优版的ES发布,其内置的ZSTD压缩算法 空间压缩效果非常显著。 对 trace集群进行hot-warm架构调整,查询耗时从20多秒变成了2-3秒,效果是非常明显的。 从查询逻辑进一步调整,充分利用ES的数据分片、路由机制,把全量检索调整为精准检索,即降低检索时需要扫描的数据量,把2-3秒优化到毫秒。  这里要炫一个5毛特效,这套机制下,Segment数据即使是保留半年的,按照TraceId查询的耗时也是毫秒。\n至此完成了查询千亿级Trace数据只要毫秒级耗时的阶段性优化。\n6.6 仪表盘和拓扑查询慢 问题: Web端的【拓扑】页,在开始只有几十个应用的时候,虽然很慢,但还是能看到数据,随着应用增多后,【拓扑】页面数据请求一直是超时(配置的60s超时)的,精力有限,先通过功能降级把这个页面隐藏了;【仪表盘】的指标查询也非常的慢,未能达到好用的目标。\n优化: Web端的【仪表盘】页和【拓扑】页是对SkyWalking里metric数据的展现,metric数据同trace数据一样满足hot-warm的特征。\n metric集群采用hot-warm架构调整,之后仪表盘中的查询耗时也都减小为毫秒级。 【拓扑】页接口依然是超时(60s),对拓扑这里做了几个针对性的调整:  把内部的循环调用合并,压缩调用次数。 去除非必要的查询。 拆分隔离通用索引中的数据,避免互相干扰。 全量检索调整为精准检索,即降低检索时需要扫描的数据量。    至此完成了拓扑页数据查询毫秒级耗时的阶段性优化。\n6.7 小结 SkyWalking调优这个阶段,恰逢上海疫情封城,既要为生存抢菜,又要翻阅学习着各种ES原理、调优的文档资料,一行一行反复的品味思考SkyWalking相关的源码,尝试各种方案去优化它,梦中都在努力提升它的性能。疫情让很多人变得焦虑烦躁,但以我的感受来看在系统的性能压力下疫情不值一提。凡事贵在坚持,时间搞定了诸多困难,调优的效果是很显著的。\n可能在业务价值驱动的价值观中这些技术优化不产生直接业务价值,顶多是五毛特效,但从其他维度来看它价值显著:\n 对个人来说,技术有提升。 对团队来说,实战练兵提升战力,团队协作加深友情;特别感谢ES中台这段时间的鼎力支持! 对公司来说,易用性的提升将充分发挥SkyWalking的价值,在问题发生时,给到同事们切实、高效的帮助,使得问题可以被快速响应;须知战争拼的是保障。  这期间其实也是有考虑过其他的2个方案的:\n 使用降低采样率的兜底方案;但为了得到更准确的指标数据,以及后续其他的规划而坚持了全采样。 采用ClickHouse优化存储;因为公司有定制优化的ES版本,所以就继续在ES上做存储优化,刚好借此机会验证一下。后续【全链路结构化日志】的存储会使用ClickHouse。  这个章节将内容聚焦在落地推广时期技术层面的准备和调优,未描述团队协调、推广等方面的情况;因每个公司情况不同,所以并未提及;但其实对多数公司来说,有些项目的推广比技术本身可能难度更大,这个项目也遇到过一些困难,PM去推广是既靠能力又靠颜值, 以后有机会再与大家探讨。\n7、未来 H5、Native以及后端应用都在持续接入中,相应的SDK也在不断的迭代;目前正在基于已建立的链路通道,完善【全链路业务状态追踪】和【全链路结构化日志追踪】,旨在给运营、客服、运维、开发等服务在一线的同事们提供多视角一站式的观测平台,全方位提升系统服务质量、提高问题响应速度。\n","title":"SkyWalking on the way - 平安健康千亿级的全链路追踪系统的建设与实践","url":"/zh/2022-08-30-pingan-jiankang/"},{"content":"Observability essential when working with distributed systems. Built on 3 pillars of metrics, logging and tracing, having the right tools in place to quickly identify and determine the root cause of an issue in production is imperative. In this Kongcast interview, we explore the benefits of having observability and demo the use of Apache SkyWalking. We walk through the capabilities that SkyWalking offers out of the box and debug a common HTTP 500 error using the tool.\nAndrew Kew is interviewed by Viktor Gamov, a developer advocate at Kong Inc\nAndrew is a highly passionate technologist with over 16 valuable years experience in building server side and cloud applications. Having spent the majority of his time in the Financial Services domain, his meritocratic rise to CTO of an Algorithmic Trading firm allowed him to not only steer the business from a technology standpoint, but build robust and scalable trading algorithms. His mantra is \u0026ldquo;right first time\u0026rdquo;, thus ensuring the projects or clients he is involved in are left in a better place than they were before he arrived.\nHe is the founder of a boutique software consultancy in the United Kingdom, QuadCorps Ltd, working in the API and Integration Ecosystem space and is currently on a residency programme at Kong Inc as a senior field engineer and technical account manager working across many of their enterprise strategic accounts.\n  ","title":"[Video] Distributed tracing demo using Apache SkyWalking and Kong API Gateway","url":"/blog/2022-08-11-kongcast-20-distributed-tracing-using-skywalking-kong/"},{"content":"SkyWalking Rust 0.3.0 is released. Go to downloads page to find release tars.\nWhat\u0026rsquo;s Changed  Update README.md by @wu-sheng in https://github.com/apache/skywalking-rust/pull/24 Improve errors. by @jmjoy in https://github.com/apache/skywalking-rust/pull/25 Add tracer. by @jmjoy in https://github.com/apache/skywalking-rust/pull/26 Move e2e to workspace. by @jmjoy in https://github.com/apache/skywalking-rust/pull/27 Auto finalize context and span when dropped. by @jmjoy in https://github.com/apache/skywalking-rust/pull/28 Add context capture and continued methods. by @jmjoy in https://github.com/apache/skywalking-rust/pull/29 Bump to 0.3.0. by @jmjoy in https://github.com/apache/skywalking-rust/pull/30  ","title":"Release Apache SkyWalking Rust 0.3.0","url":"/events/release-apache-skywalking-rust-0-3-0/"},{"content":"SkyWalking NodeJS 0.5.1 is released. Go to downloads page to find release tars.\nSkyWalking NodeJS 0.5.1 is a patch release that fixed a vulnerability(CVE-2022-36127) in all previous versions \u0026lt;=0.5.0, we recommend all users who are using versions \u0026lt;=0.5.0 should upgrade to this version.\nThe vulnerability could cause NodeJS services that has this agent installed to be unavailable if the header includes an illegal SkyWalking header, such as\n OAP is unhealthy and the downstream service\u0026rsquo;s agent can\u0026rsquo;t establish the connection. Some sampling mechanism is activated in downstream agents.  ","title":"[CVE-2022-36127] Release Apache SkyWalking for NodeJS 0.5.1","url":"/events/release-apache-skywalking-nodejs-0-5-1/"},{"content":"SkyWalking Eyes 0.4.0 is released. Go to downloads page to find release tars.\n Reorganize GHA by header and dependency. (#123) Add rust cargo support for dep command. (#121) Support license expression in dep check. (#120) Prune npm packages before listing all dependencies (#119) Add support for multiple licenses in the header config section (#118) Add excludes to license resolve config (#117) maven: set group:artifact as dependency name and extend functions in summary template (#116) Stablize summary context to perform consistant output (#115) Add custom license urls for identification (#114) Lazy initialize GitHub client for comment (#111) Make license identifying threshold configurable (#110) Use Google\u0026rsquo;s licensecheck to identify licenses (#107) dep: short circuit if user declare dep license (#108)  ","title":"Release Apache SkyWalking Eyes 0.4.0","url":"/events/release-apache-skywalking-eyes-0-4-0/"},{"content":"SkyWalking NodeJS 0.5.0 is released. Go to downloads page to find release tars.\n Bump up grpc-node to 1.6.7 to fix CVE-2022-25878 (#85) Fix issue #9165 express router entry duplicated (#84) Fix skywalking s3 upload error #8824 (#82) Improved ignore path regex (#81) Upgrade data collect protocol (#78) Fix wrong instance properties (#77) Fix wrong command in release doc (#76)  ","title":"Release Apache SkyWalking for NodeJS 0.5.0","url":"/events/release-apache-skywalking-nodejs-0-5-0/"},{"content":"SkyWalking Infra E2E 1.2.0 is released. Go to downloads page to find release tars.\nFeatures  Expand kind file path with system environment. Support shutdown service during setup phase in compose mode. Expand kind file path with system environment. Support arbitrary os and arch. Support docker-compose v2 container naming. Support installing via go install and add install doc. Add retry when delete kind cluster. Upgrade to go1.18.  Bug Fixes  Fix the problem of parsing verify.retry.interval without setting value.  Documentation  Make trigger.times parameter doc more clear.  Issues and PR  All issues are here All and pull requests are here  ","title":"Release Apache SkyWalking Infra E2E 1.2.0","url":"/events/release-apache-skywalking-infra-e2e-1-2-0/"},{"content":"SkyWalking Python 0.8.0 is released. Go to downloads page to find release tars.\n  Feature:\n Update mySQL plugin to support two different parameter keys. (#186) Add a SW_AGENT_LOG_REPORTER_SAFE_MODE option to control the HTTP basic auth credential filter (#200)    Plugins:\n Add Psycopg(3.x) support (#168) Add MySQL support (#178) Add FastAPI support (#181) Drop support for flask 1.x due to dependency issue in Jinja2 and EOL (#195) Add Bottle support (#214)    Fixes:\n Spans now correctly reference finished parents (#161) Remove potential password leak from Aiohttp outgoing url (#175) Handle error when REMOTE_PORT is missing in Flask (#176) Fix sw-rabbitmq TypeError when there are no headers (#182) Fix agent bootstrap traceback not shown in sw-python CLI (#183) Fix local log stack depth overridden by agent log formatter (#192) Fix typo that cause user sitecustomize.py not loaded (#193) Fix instance property wrongly shown as UNKNOWN in OAP (#194) Fix multiple components inconsistently named on SkyWalking UI (#199) Fix SW_AGENT_LOGGING_LEVEL not properly set during startup (#196) Unify the http tag name with other agents (#208) Remove namespace to instance properties and add pid property (#205) Fix the properties are not set correctly (#198) Improved ignore path regex (#210) Fix sw_psycopg2 register_type() (#211) Fix psycopg2 register_type() second arg default (#212) Enhance Traceback depth (#206) Set spans whose http code \u0026gt; 400 to error (#187)    Docs:\n Add a FAQ doc on how to use with uwsgi (#188)    Others:\n Refactor current Python agent docs to serve on SkyWalking official website (#162) Refactor SkyWalking Python to use the CLI for CI instead of legacy setup (#165) Add support for Python 3.10 (#167) Move flake configs all together (#169) Introduce another set of flake8 extensions (#174) Add E2E test coverage for trace and logging (#199) Now Log reporter cause_exception_depth traceback limit defaults to 10 Enable faster CI by categorical parallelism (#170)    ","title":"Release Apache SkyWalking Python 0.8.0","url":"/events/release-apache-skywalking-python-0-8-0/"},{"content":"SkyWalking Satellite 1.0.1 is released. Go to downloads page to find release tars.\nFeatures Bug Fixes  Fix metadata messed up when transferring Log data.  Issues and PR  All issues are here All and pull requests are here  ","title":"Release Apache SkyWalking Satellite 1.0.1","url":"/events/release-apache-skwaylking-satellite-1-0-1/"},{"content":"Content Background Apache SkyWalking observes metrics, logs, traces, and events for services deployed into the service mesh. When troubleshooting, SkyWalking error analysis can be an invaluable tool helping to pinpoint where an error occurred. However, performance problems are more difficult: It’s often impossible to locate the root cause of performance problems with pre-existing observation data. To move beyond the status quo, dynamic debugging and troubleshooting are essential service performance tools. In this article, we\u0026rsquo;ll discuss how to use eBPF technology to improve the profiling feature in SkyWalking and analyze the performance impact in the service mesh.\nTrace Profiling in SkyWalking Since SkyWalking 7.0.0, Trace Profiling has helped developers find performance problems by periodically sampling the thread stack to let developers know which lines of code take more time. However, Trace Profiling is not suitable for the following scenarios:\n Thread Model: Trace Profiling is most useful for profiling code that executes in a single thread. It is less useful for middleware that relies heavily on async execution models. For example Goroutines in Go or Kotlin Coroutines. Language: Currently, Trace Profiling is only supported in Java and Python, since it’s not easy to obtain the thread stack in the runtimes of some languages such as Go and Node.js. Agent Binding: Trace Profiling requires Agent installation, which can be tricky depending on the language (e.g., PHP has to rely on its C kernel; Rust and C/C++ require manual instrumentation to make install). Trace Correlation: Since Trace Profiling is only associated with a single request it can be hard to determine which request is causing the problem. Short Lifecycle Services: Trace Profiling doesn\u0026rsquo;t support short-lived services for (at least) two reasons:  It\u0026rsquo;s hard to differentiate system performance from class code manipulation in the booting stage. Trace profiling is linked to an endpoint to identify performance impact, but there is no endpoint to match these short-lived services.    Fortunately, there are techniques that can go further than Trace Profiling in these situations.\nIntroduce eBPF We have found that eBPF — a technology that can run sandboxed programs in an operating system kernel and thus safely and efficiently extend the capabilities of the kernel without requiring kernel modifications or loading kernel modules — can help us fill gaps left by Trace Profiling. eBPF is a trending technology because it breaks the traditional barrier between user and kernel space. Programs can now inject bytecode that runs in the kernel, instead of having to recompile the kernel to customize it. This is naturally a good fit for observability.\nIn the figure below, we can see that when the system executes the execve syscalls, the eBPF program is triggered, and the current process runtime information is obtained by using function calls.\nUsing eBPF technology, we can expand the scope of Skywalking\u0026rsquo;s profiling capabilities:\n Global Performance Analysis: Before eBPF, data collection was limited to what agents can observe. Since eBPF programs run in the kernel, they can observe all threads. This is especially useful when you are not sure whether a performance problem is caused by a particular request. Data Content: eBPF can dump both user and kernel space thread stacks, so if a performance issue happens in kernel space, it’s easier to find. Agent Binding: All modern Linux kernels support eBPF, so there is no need to install anything. This means it is an orchestration-free vs an agent model. This reduces friction caused by built-in software which may not have the correct agents installed, such as Envoy in a Service Mesh. Sampling Type: Unlike Trace Profiling, eBPF is event-driven and, therefore, not constrained by interval polling. For example, eBPF can trigger events and collect more data depending on a transfer size threshold. This can allow the system to triage and prioritize data collection under extreme load.  eBPF Limitations While eBPF offers significant advantages for hunting performance bottlenecks, no technology is perfect. eBPF has a number of limitations described below. Fortunately, since SkyWalking does not require eBPF, the impact is limited.\n Linux Version Requirement: eBPF programs require a Linux kernel version above 4.4, with later kernel versions offering more data to be collected. The BCC has documented the features supported by different Linux kernel versions, with the differences between versions usually being what data can be collected with eBPF. Privileges Required: All processes that intend to load eBPF programs into the Linux kernel must be running in privileged mode. As such, bugs or other issues in such code may have a big impact. Weak Support for Dynamic Language: eBPF has weak support for JIT-based dynamic languages, such as Java. It also depends on what data you want to collect. For Profiling, eBPF does not support parsing the symbols of the program, which is why most eBPF-based profiling technologies only support static languages like C, C++, Go, and Rust. However, symbol mapping can sometimes be solved through tools provided by the language. For example, in Java, perf-map-agent can be used to generate the symbol mapping. However, dynamic languages don\u0026rsquo;t support the attach (uprobe) functionality that would allow us to trace execution events through symbols.  Introducing SkyWalking Rover SkyWalking Rover introduces the eBPF profiling feature into the SkyWalking ecosystem. The figure below shows the overall architecture of SkyWalking Rover. SkyWalking Rover is currently supported in Kubernetes environments and must be deployed inside a Kubernetes cluster. After establishing a connection with the SkyWalking backend server, it saves information about the processes on the current machine to SkyWalking. When the user creates an eBPF profiling task via the user interface, SkyWalking Rover receives the task and executes it in the relevant C, C++, Golang, and Rust language-based programs.\nOther than an eBPF-capable kernel, there are no additional prerequisites for deploying SkyWalking Rover.\nCPU Profiling with Rover CPU profiling is the most intuitive way to show service performance. Inspired by Brendan Gregg‘s blog post, we\u0026rsquo;ve divided CPU profiling into two types that we have implemented in Rover:\n On-CPU Profiling: Where threads are spending time running on-CPU. Off-CPU Profiling: Where time is spent waiting while blocked on I/O, locks, timers, paging/swapping, etc.  Profiling Envoy with eBPF Envoy is a popular proxy, used as the data plane by the Istio service mesh. In a Kubernetes cluster, Istio injects Envoy into each service’s pod as a sidecar where it transparently intercepts and processes incoming and outgoing traffic. As the data plane, any performance issues in Envoy can affect all service traffic in the mesh. In this scenario, it’s more powerful to use eBPF profiling to analyze issues in production caused by service mesh configuration.\nDemo Environment If you want to see this scenario in action, we\u0026rsquo;ve built a demo environment where we deploy an Nginx service for stress testing. Traffic is intercepted by Envoy and forwarded to Nginx. The commands to install the whole environment can be accessed through GitHub.\nOn-CPU Profiling On-CPU profiling is suitable for analyzing thread stacks when service CPU usage is high. If the stack is dumped more times, it means that the thread stack occupies more CPU resources.\nWhen installing Istio using the demo configuration profile, we found there are two places where we can optimize performance:\n Zipkin Tracing: Different Zipkin sampling percentages have a direct impact on QPS. Access Log Format: Reducing the fields of the Envoy access log can improve QPS.  Zipkin Tracing Zipkin with 100% sampling In the default demo configuration profile, Envoy is using 100% sampling as default tracing policy. How does that impact the performance?\nAs shown in the figure below, using the on-CPU profiling, we found that it takes about 16% of the CPU overhead. At a fixed consumption of 2 CPUs, its QPS can reach 5.7K.\nDisable Zipkin tracing At this point, we found that if Zipkin is not necessary, the sampling percentage can be reduced or we can even disable tracing. Based on the Istio documentation, we can disable tracing when installing the service mesh using the following command:\nistioctl install -y --set profile=demo \\  --set \u0026#39;meshConfig.enableTracing=false\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.tracing.sampling=0.0\u0026#39; After disabling tracing, we performed on-CPU profiling again. According to the figure below, we found that Zipkin has disappeared from the flame graph. With the same 2 CPU consumption as in the previous example, the QPS reached 9K, which is an almost 60% increase. Tracing with Throughput With the same CPU usage, we\u0026rsquo;ve discovered that Envoy performance greatly improves when the tracing feature is disabled. Of course, this requires us to make trade-offs between the number of samples Zipkin collects and the desired performance of Envoy (QPS).\nThe table below illustrates how different Zipkin sampling percentages under the same CPU usage affect QPS.\n   Zipkin sampling % QPS CPUs Note     100% (default) 5.7K 2 16% used by Zipkin   1% 8.1K 2 0.3% used by Zipkin   disabled 9.2K 2 0% used by Zipkin    Access Log Format Default Log Format In the default demo configuration profile, the default Access Log format contains a lot of data. The flame graph below shows various functions involved in parsing the data such as request headers, response headers, and streaming the body.\nSimplifying Access Log Format Typically, we don’t need all the information in the access log, so we can often simplify it to get what we need. The following command simplifies the access log format to only display basic information:\nistioctl install -y --set profile=demo \\  --set meshConfig.accessLogFormat=\u0026#34;[%START_TIME%] \\\u0026#34;%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\\\u0026#34; %RESPONSE_CODE%\\n\u0026#34; After simplifying the access log format, we found that the QPS increased from 5.7K to 5.9K. When executing the on-CPU profiling again, the CPU usage of log formatting dropped from 2.4% to 0.7%.\nSimplifying the log format helped us to improve the performance.\nOff-CPU Profiling Off-CPU profiling is suitable for performance issues that are not caused by high CPU usage. For example, when there are too many threads in one service, using off-CPU profiling could reveal which threads spend more time context switching.\nWe provide data aggregation in two dimensions:\n Switch count: The number of times a thread switches context. When the thread returns to the CPU, it completes one context switch. A thread stack with a higher switch count spends more time context switching. Switch duration: The time it takes a thread to switch the context. A thread stack with a higher switch duration spends more time off-CPU.  Write Access Log Enable Write Using the same environment and settings as before in the on-CPU test, we performed off-CPU profiling. As shown below, we found that access log writes accounted for about 28% of the total context switches. The \u0026ldquo;__write\u0026rdquo; shown below also indicates that this method is the Linux kernel method.\nDisable Write SkyWalking implements Envoy\u0026rsquo;s Access Log Service (ALS) feature which allows us to send access logs to the SkyWalking Observability Analysis Platform (OAP) using the gRPC protocol. Even by disabling the access logging, we can still use ALS to capture/aggregate the logs. We\u0026rsquo;ve disabled writing to the access log using the following command:\nistioctl install -y --set profile=demo --set meshConfig.accessLogFile=\u0026#34;\u0026#34; After disabling the Access Log feature, we performed the off-CPU profiling. File writing entries have disappeared as shown in the figure below. Envoy throughput also increased from 5.7K to 5.9K.\nConclusion In this article, we\u0026rsquo;ve examined the insights Apache Skywalking\u0026rsquo;s Trace Profiling can give us and how much more can be achieved with eBPF profiling. All of these features are implemented in skywalking-rover. In addition to on- and off-CPU profiling, you will also find the following features:\n Continuous profiling, helps you automatically profile without manual intervention. For example, when Rover detects that the CPU exceeds a configurable threshold, it automatically executes the on-CPU profiling task. More profiling types to enrich usage scenarios, such as network, and memory profiling.  ","title":"Pinpoint Service Mesh Critical Performance Impact by using eBPF","url":"/blog/2022-07-05-pinpoint-service-mesh-critical-performance-impact-by-using-ebpf/"},{"content":"SkyWalking Rust 0.2.0 is released. Go to downloads page to find release tars.\nWhat\u0026rsquo;s Changed  add a description to compile in README.md by @Shikugawa in https://github.com/apache/skywalking-rust/pull/16 Update NOTICE to 2022 by @wu-sheng in https://github.com/apache/skywalking-rust/pull/17 fix ignore /e2e/target folder by @tisonkun in https://github.com/apache/skywalking-rust/pull/18 Remove Cargo.lock, update dependencies, update submodule, disable build grpc server api. by @jmjoy in https://github.com/apache/skywalking-rust/pull/19 Enhance Trace Context machenism. by @jmjoy in https://github.com/apache/skywalking-rust/pull/20 chore(typo): fix typo in context/propagation/context.rs by @CherishCai in https://github.com/apache/skywalking-rust/pull/21 Feature(tonic-build): set tonic-build.build_server(false), do not build Server code. by @CherishCai in https://github.com/apache/skywalking-rust/pull/22 Rename crate name skywalking_rust to skywalking? by @jmjoy in https://github.com/apache/skywalking-rust/pull/23  ","title":"Release Apache SkyWalking Rust 0.2.0","url":"/events/release-apache-skywalking-rust-0-2-0/"},{"content":"B站视频地址\n","title":"阿里云 - 可观测技术峰会 2022 - More than Tracing Logging Metrics","url":"/zh/2022-06-23-more-than-tracing-logging-metrics/"},{"content":"SkyWalking Java Agent 8.11.0 is released. Go to downloads page to find release tars. Changes by Version\n8.11.0  Fix cluster and namespace value duplicated(namespace value) in properties report. Add layer field to event when reporting. Remove redundant shade.package property. Add servicecomb-2.x plugin and Testcase. Fix NPE in gateway plugin when the timer triggers webflux webclient call. Add an optional plugin, trace-sampler-cpu-policy-plugin, which could disable trace collecting in high CPU load. Change the dateformat of logs to yyyy-MM-dd HH:mm:ss.SSS(was yyyy-MM-dd HH:mm:ss:SSS). Fix NPE in elasticsearch plugin. Grpc plugin support trace client async generic call(without grpc stubs), support Method type: UNARY、SERVER_STREAMING. Enhance Apache ShenYu (incubating) plugin: support trace grpc,sofarpc,motan,tars rpc proxy. Add primary endpoint name to log events. Fix Span not finished in gateway plugin when the gateway request timeout. Support -Dlog4j2.contextSelector=org.apache.logging.log4j.core.async.AsyncLoggerContextSelector in gRPC log report. Fix tcnative libraries relocation for aarch64. Add plugin.jdbc.trace_sql_parameters into Configuration Discovery Service. Fix argument type name of Array in postgresql-8.x-plugin from java.lang.String[] to [Ljava.lang.String; Add type name checking in ArgumentTypeNameMatch and ReturnTypeNameMatch Highlight ArgumentTypeNameMatch and ReturnTypeNameMatch type naming rule in docs/en/setup/service-agent/java-agent/Java-Plugin-Development-Guide.md Fix FileWriter scheduled task NPE Optimize gRPC Log reporter to set service name for the first element in the streaming.(No change for Kafka reporter)  All issues and pull requests are here\n","title":"Release Apache SkyWalking Java Agent 8.11.0","url":"/events/release-apache-skywalking-java-agent-8-11-0/"},{"content":"SkyWalking Rover 0.2.0 is released. Go to downloads page to find release tars.\nFeatures  Support OFF_CPU Profiling. Introduce the BTFHub module. Update to using frequency mode to ON_CPU Profiling. Add logs in the profiling module logical.  Bug Fixes  Fix docker based process could not be detected.  Issues and PR  All issues are here All and pull requests are here  ","title":"Release Apache SkyWalking Rover 0.2.0","url":"/events/release-apache-skwaylking-rover-0-2-0/"},{"content":"SkyWalking 9.1.0 is released. Go to downloads page to find release tars.\n eBPF agent(skywalking rover) is integrated in the first time  BanyanDB(skywalking native database) is integrated and passed MVP phase. On-demand logs are provided first time in skywalking for all mesh services and k8s deployment as a zero cost log solution  Zipkin alternative is being official, and Zipkin\u0026rsquo;s HTTP APIs are supported as well as lens UI.  Changes by Version Project  [IMPORTANT] Remove InfluxDB 1.x and Apache IoTDB 0.X as storage options, check details at here. Remove converter-moshi 2.5.0, influx-java 2.15, iotdb java 0.12.5, thrift 0.14.1, moshi 1.5.0, msgpack 0.8.16 dependencies. Remove InfluxDB and IoTDB relative codes and E2E tests. Upgrade OAP dependencies zipkin to 2.23.16, H2 to 2.1.212, Apache Freemarker to 2.3.31, gRPC-java 1.46.0, netty to 4.1.76. Upgrade Webapp dependencies, spring-cloud-dependencies to 2021.0.2, logback-classic to 1.2.11 [IMPORTANT] Add BanyanDB storage implementation. Notice BanyanDB is currently under active development and SHOULD NOT be used in production cluster.  OAP Server  Add component definition(ID=127) for Apache ShenYu (incubating). Fix Zipkin receiver: Decode spans error, missing Layer for V9 and wrong time bucket for generate Service and Endpoint. [Refactor] Move SQLDatabase(H2/MySQL/PostgreSQL), ElasticSearch and BanyanDB specific configurations out of column. Support BanyanDB global index for entities. Log and Segment record entities declare this new feature. Remove unnecessary analyzer settings in columns of templates. Many were added due to analyzer\u0026rsquo;s default value. Simplify the Kafka Fetch configuration in cluster mode. [Breaking Change] Update the eBPF Profiling task to the service level, please delete index/table: ebpf_profiling_task, process_traffic. Fix event can\u0026rsquo;t split service ID into 2 parts. Fix OAP Self-Observability metric GC Time calculation. Set SW_QUERY_MAX_QUERY_COMPLEXITY default value to 1000 Webapp module (for UI) enabled compression. [Breaking Change] Add layer field to event, report an event without layer is not allowed. Fix ES flush thread stops when flush schedule task throws exception, such as ElasticSearch flush failed. Fix ES BulkProcessor in BatchProcessEsDAO was initialized multiple times and created multiple ES flush schedule tasks. HTTPServer support the handler register with allowed HTTP methods. [Critical] Revert Enhance DataCarrier#MultipleChannelsConsumer to add priority to avoid consuming issues. Fix the problem that some configurations (such as group.id) did not take effect due to the override order when using the kafkaConsumerConfig property to extend the configuration in Kafka Fetcher. Remove build time from the OAP version. Add data-generator module to run OAP in testing mode, generating mock data for testing. Support receive Kubernetes processes from gRPC protocol. Fix the problem that es index(TimeSeriesTable, eg. endpoint_traffic, alarm_record) didn\u0026rsquo;t create even after rerun with init-mode. This problem caused the OAP server to fail to start when the OAP server was down for more than a day. Support autocomplete tags in traces query. [Breaking Change] Replace all configurations **_JETTY_** to **_REST_**. Add the support eBPF profiling field into the process entity. E2E: fix log test miss verify LAL and metrics. Enhance Converter mechanism in kernel level to make BanyanDB native feature more effective. Add TermsAggregation properties collect_mode and execution_hint. Add \u0026ldquo;execution_hint\u0026rdquo;: \u0026ldquo;map\u0026rdquo;, \u0026ldquo;collect_mode\u0026rdquo;: \u0026ldquo;breadth_first\u0026rdquo; for aggregation and topology query to improve 5-10x performance. Clean up scroll contexts after used. Support autocomplete tags in logs query. Enhance Deprecated MetricQuery(v1) getValues querying to asynchronous concurrency query Fix the pod match error when the service has multiple selector in kubernetes environment. VM monitoring adapts the 0.50.0 of the opentelemetry-collector. Add Envoy internal cost metrics. Remove Layer concept from ServiceInstance. Remove unnecessary onCompleted on gRPC onError callback. Remove Layer concept form Process. Update to list all eBPF profiling schedulers without duration. Storage(ElasticSearch): add search options to tolerate inexisting indices. Fix the problem that MQ has the wrong Layer type. Fix NoneStream model has wrong downsampling(was Second, should be Minute). SQL Database: provide @SQLDatabase.AdditionalEntity to support create additional tables from a model. [Breaking Change] SQL Database: remove SQL Database config maxSizeOfArrayColumn and numOfSearchableValuesPerTag. [Breaking Change] SQL Database: move Tags list from Segment,Logs,Alarms to their additional table. [Breaking Change] Remove total field in Trace, Log, Event, Browser log, and alarm list query. Support OFF_CPU eBPF Profiling. Fix SumAggregationBuilder#build should use the SumAggregation rather than MaxAggregation. Add TiDB, OpenSearch, Postgres storage optional to Trace and eBPF Profiling E2E testing. Add OFF CPU eBPF Profiling E2E Testing. Fix searchableTag as rpc.status_code and http.status_code. status_code had been removed. Fix scroll query failure exception. Add profileDataQueryBatchSize config in Elasticsearch Storage. Add APIs to query Pod log on demand. Remove OAL for events. Simplify the format index name logical in ES storage. Add instance properties extractor in MAL. Support Zipkin traces collect and zipkin traces query API. [Breaking Change] Zipkin receiver mechanism changes and traces do not stream into OAP Segment anymore.  UI  General service instance: move Thread Pool from JVM to Overview, fix JVM GC Count calculation. Add Apache ShenYu (incubating) component LOGO. Show more metrics on service/instance/endpoint list on the dashboards. Support average values of metrics on the service/list/endpoint table widgets, with pop-up linear graph. Fix viewLogs button query no data. Fix UTC when page loads. Implement the eBPF profile widget on dashboard. Optimize the trace widget. Avoid invalid query for topology metrics. Add the alarm and log tag tips. Fix spans details and task logs. Verify query params to avoid invalid queries. Mobile terminal adaptation. Fix: set dropdown for the Tab widget, init instance/endpoint relation selectors, update sankey graph. Add eBPF Profiling widget into General service, Service Mesh and Kubernetes tabs. Fix jump to endpoint-relation dashboard template. Fix set graph options. Remove the Layer filed from the Instance and Process. Fix date time picker display when set hour to 0. Implement tags auto-complete for Trace and Log. Support multiple trees for the flame graph. Fix the page doesn\u0026rsquo;t need to be re-rendered when the url changes. Remove unexpected data for exporting dashboards. Fix duration time. Remove the total field from query conditions. Fix minDuration and maxDuration for the trace filter. Add Log configuration for the browser templates. Fix query conditions for the browser logs. Add Spanish Translation. Visualize the OFF CPU eBPF profiling. Add Spanish language to UI. Sort spans with startTime or spanId in a segment. Visualize a on-demand log widget. Fix activate the correct tab index after renaming a Tabs name. FaaS dashboard support on-demand log (OpenFunction/functions-framework-go version \u0026gt; 0.3.0).  Documentation  Add eBPF agent into probe introduction.  All issues and pull requests are here\n","title":"Release Apache SkyWalking APM 9.1.0","url":"/events/release-apache-skywalking-apm-9.1.0/"},{"content":"SkyWalking BanyanDB 0.1.0 is released. Go to downloads page to find release tars.\nFeatures  BanyanD is the server of BanyanDB  TSDB module. It provides the primary time series database with a key-value data module. Stream module. It implements the stream data model\u0026rsquo;s writing. Measure module. It implements the measure data model\u0026rsquo;s writing. Metadata module. It implements resource registering and property CRUD. Query module. It handles the querying requests of stream and measure. Liaison module. It\u0026rsquo;s the gateway to other modules and provides access endpoints to clients.   gRPC based APIs Document  API reference Installation instrument Basic concepts   Testing  UT E2E with Java Client and OAP    ","title":"Release Apache SkyWalking BanyanDB 0.1.0","url":"/events/release-apache-skywalking-banyandb-0-1-0/"},{"content":"SkyWalking BanyanDB 0.1.0 is released. Go to downloads page to find release tars.\nFeatures  Support Measure, Stream and Property Query and Write APIs Support Metadata Management APIs for Measure, Stream, IndexRule and IndexRuleBinding  Chores  Set up GitHub actions to check code styles, licenses, and tests.  ","title":"Release Apache SkyWalking BanyanDB Java Client 0.1.0","url":"/events/release-apache-skywalking-banyandb-java-client-0-1-0/"},{"content":"SkyWalking Rover 0.1.0 is released. Go to downloads page to find release tars.\nFeatures  Support detect processes in scanner or kubernetes mode. Support profiling C, C++, Golang, and Rust service.  Bug Fixes Issues and PR  All issues are here All and pull requests are here  ","title":"Release Apache SkyWalking Rover 0.1.0","url":"/events/release-apache-skwaylking-rover-0-1-0/"},{"content":"SkyWalking Satellite 1.0.0 is released. Go to downloads page to find release tars.\nFeatures  Add the compat protocol receiver for the old version of agents. Support transmit the native eBPF Process and Profiling protocol. Change the name of plugin that is not well-named.  Bug Fixes  Fix Metadata lost in the Native Meter protocol.  Issues and PR  All issues are here All and pull requests are here  ","title":"Release Apache SkyWalking Satellite 1.0.0","url":"/events/release-apache-skwaylking-satellite-1-0-0/"},{"content":"SkyWalking Eyes 0.3.0 is released. Go to downloads page to find release tars.\n  Dependency License\n Fix license check in go library testify (#93)    License Header\n fix command supports more languages:  Add comment style for cmake language (#86) Add comment style for hcl (#89) Add mpl-2.0 header template (#87) Support fix license header for tcl files (#102) Add python docstring comment style (#100) Add comment style for makefile \u0026amp; editorconfig (#90)   Support config license header comment style (#97) Trim leading and trailing newlines before rewrite license header cotent (#94) Replace already existing license header based on pattern (#98) [docs] add the usage for config the license header comment style (#99)    Project\n Obtain default github token in github actions (#82) Add tests for bare spdx license header content (#92) Add github action step summary for better experience (#104) Adds an option to the action to run in fix mode (#84) Provide --summary flag to generate the license summary file (#103) Add .exe suffix to windows binary (#101) Fix wrong file path and exclude binary files in src release (#81) Use t.tempdir to create temporary test directory (#95) Config: fix incorrect log message (#91) [docs] correct spelling mistakes (#96)    ","title":"Release Apache SkyWalking Eyes 0.3.0","url":"/events/release-apache-skywalking-eyes-0-3-0/"},{"content":"目录  SkyWalking和ShenYu介绍 ApacheShenYu插件实现原理 给gRPC插件增加泛化调用追踪并保持兼容 ShenYu网关可观测性实践 总结  1.SkyWalking和ShenYu介绍 1.1 SkyWalking SkyWalking是一个针对微服务、分布式系统、云原生的应用性能监控(APM)和可观测性分析平台(OAP), 拥有强大的功能,提供了多维度应用性能分析手段,包含分布式拓扑图、应用性能指标、分布式链路追踪、日志关联分析和告警。同时还拥有非常丰富的生态。广泛应用于各个公司和开源项目。\n1.2 Apache ShenYu (incubating) Apache ShenYu (incubating)是一个高性能,多协议,易扩展,响应式的API网关。 兼容各种主流框架体系,支持热插拔,用户可以定制化开发,满足用户各种场景的现状和未来需求,经历过大规模场景的锤炼。 支持丰富的协议:Http、Spring Cloud、gRPC、Dubbo、SOFARPC、Motan、Tars等等。\n2.ApacheShenYu插件实现原理 ShenYu的异步和以往接触的异步有一点不一样,是一种全链路异步,每一个插件的执行都是异步的,并且线程切换并不是单一固定的情况(和各个插件实现有关)。 网关会发起各种协议类型的服务调用,现有的SkyWalking插件发起服务调用的时候会创建ExitSpan(同步或异步). 网关接收到请求会创建异步的EntrySpan。 异步的EntrySpan需要和同步或异步的ExitSpan串联起来,否则链路会断。 串联方案有2种:\n 快照传递: 将创建EntrySpan之后的快照通过某种方式传递到创建ExitSpan的线程中。\n目前这种方式应用在异步的WebClient插件中,该插件能接收异步快照。ShenYu代理Http服务或SpringCloud服务便是通过快照传递实现span串联。 LocalSpan中转: 其它RPC类插件不像异步WebClient那样可以接收快照实现串联。尽管你可以改动其它RPC插件让其接收快照实现串联,但不推荐也没必要, 因为可以通过在创建ExitSpan的线程中,创建一个LocalSpan就可以实现和ExitSpan串联,然后将异步的EntrySpan和LocalSpan通过快照传递的方式串联。这样实现完全可以不改动原先插件的代码。  span连接如下图所示:\n也许你会问是否可以在一个通用的插件里面创建LocalSpan,而不是ShenYu RPC插件分别创建一个? 答案是不行,因为需要保证LocalSpan和ExitSpan在同一个线程,而ShenYu是全链路异步. 在实现上创建LocalSpan的代码是复用的。\n3. 给gRPC插件增加泛化调用追踪并保持兼容 现有的SkyWalking gRPC插件只支持通过存根的方式发起的调用。而对于网关而言并没有proto文件,网关采取的是泛化调用(不通过存根),所以追踪rpc请求,你会发现链路会在网关节点断掉。 在这种情况下,需要让gRPC插件支持泛化调用,而同时需要保持兼容,不影响原先的追踪方式。实现上通过判断请求参数是否是动态消息(DynamicMessage),如果不是则走原先通过存根的追踪逻辑, 如果是则走泛化调用追踪逻辑。另外的兼容则是在gRPC新旧版本的差异,以及获取服务端IP各种情况的兼容,感兴趣的可以看看源码。\n4. ShenYu网关可观测性实践 上面讲解了SkyWalking ShenYu插件的实现原理,下面部署应用看下效果。SkyWalking功能强大,除了了链路追踪需要开发插件外,其它功能强大功能开箱即用。 这里只描述链路追踪和应用性能剖析部分,如果想体验SkyWalking功能的强大,请参考SkyWalking官方文档。\n版本说明:\n skywalking-java: 8.11.0-SNAPSHOT源码构建。说明:shenyu插件会在8.11.0版本发布,可能会在5月或6月初步发布它。Java代理正处于常规发布阶段。 skywalking: 9.0.0 V9 版本  用法说明:\nSkyWalking的设计非常易用,配置和激活插件请参考官方文档。\n SkyWalking Documentation SkyWalking Java Agent Documentation  4.1 向网关发起请求 通过postman客户端或者其它方式向网关发起各种服务请求\n4.2 请求拓扑图  4.3 请求链路(以gRPC为例) 正常链路: 异常链路: 点击链路节点变可以看到对应的节点信息和异常信息\n服务提供者span 网关请求span 4.4 服务指标监控 服务指标监控 4.5 网关后台指标监控 数据库监控: 线程池和连接池监控 4.6 JVM监控 4.7 接口分析 4.8 异常日志和异常链路分析 日志配置见官方文档\n日志监控 异常日志对应的分布式链路追踪详情 5. 总结 SkyWalking在可观测性方面对指标、链路追踪、日志有着非常全面的支持,功能强大,简单易用,专为大型分布式系统、微服务、云原生、容器架构而设计,拥有丰富的生态。 使用SkyWalking为Apache ShenYu (incubating)提供强大的可观测性支持,让ShenYu如虎添翼。最后,如果你对高性能响应式网关感兴趣,可以关注 Apache ShenYu (incubating) 。 同时感谢SkyWalking这么优秀的开源软件对行业所作的贡献。\n","title":"Apache ShenYu (incubating)插件实现原理和可观测性实践","url":"/zh/2022-05-08-apache-shenyuincubating-integrated-skywalking-practice-observability/"},{"content":"Content  Introduction of SkyWalking and ShenYu Apache ShenYu plugin implementation principle Adding generalized call tracking to the gRPC plugin and keeping it compatible ShenYu Gateway Observability Practice Summary  1. Introduction of SkyWalking and ShenYu 1.1 SkyWalking SkyWalking is an Application Performance Monitoring (APM) and Observability Analysis Platform (OAP) for microservices, distributed systems, and cloud natives, Has powerful features that provide a multi-dimensional means of application performance analysis, including distributed topology diagrams, application performance metrics, distributed link tracing, log correlation analysis and alerts. Also has a very rich ecology. Widely used in various companies and open source projects.\n1.2 Apache ShenYu (incubating) Apache ShenYu (incubating) High-performance,multi-protocol,extensible,responsive API Gateway. Compatible with a variety of mainstream framework systems, support hot plug, users can customize the development, meet the current situation and future needs of users in a variety of scenarios, experienced the temper of large-scale scenes. Rich protocol support: Http, Spring Cloud, gRPC, Dubbo, SOFARPC, Motan, Tars, etc.\n2. Apache ShenYu plugin implementation principle ShenYu\u0026rsquo;s asynchrony is a little different from previous exposure to asynchrony, it is a full-link asynchrony, the execution of each plug-in is asynchronous, and thread switching is not a single fixed situation (and the individual plug-in implementation is related). The gateway initiates service calls of various protocol types, and the existing SkyWalking plugins create ExitSpan (synchronous or asynchronous) when they initiate service calls. The gateway receives the request and creates an asynchronous EntrySpan. The asynchronous EntrySpan needs to be concatenated with the synchronous or asynchronous ExitSpan, otherwise the link will be broken.\nThere are 2 types of tandem solutions:\n Snapshot Delivery:\nPass the snapshot after creating the EntrySpan to the thread that created the ExitSpan in some way.\nCurrently this approach is used in the asynchronous WebClient plugin, which can receive asynchronous snapshots. shenYu proxy Http service or SpringCloud service is to achieve span concatenation through snapshot passing. LocalSpan transit:\nOther RPC class plugins do not receive snapshots for concatenation like Asynchronous WebClient. Although you can modify other RPC plugins to receive snapshots for concatenation, it is not recommended or necessary to do so. This can be achieved by creating a LocalSpan in the thread where the ExitSpan is created, and then connecting the asynchronous EntrySpan and LocalSpan by snapshot passing. This can be done without changing the original plugin code.  The span connection is shown below:\nYou may ask if it is possible to create LocalSpan inside a generic plugin, instead of creating one separately for ShenYu RPC plugin? The answer is no, because you need to ensure that LocalSpan and ExitSpan are in the same thread, and ShenYu is fully linked asynchronously. The code to create LocalSpan is reused in the implementation.\n3. Adding generalized call tracking to the gRPC plugin and keeping it compatible The existing SkyWalking gRPC plugin only supports calls initiated by way of stubs. For the gateway there is no proto file, the gateway takes generalized calls (not through stubs), so tracing RPC requests, you will find that the link will break at the gateway node. In this case, it is necessary to make the gRPC plugin support generalized calls, while at the same time needing to remain compatible and not affect the original tracing method. This is achieved by determining whether the request parameter is a DynamicMessage, and if it is not, then the original tracing logic through the stub is used. If not, then the original tracing logic via stubs is used, and if not, then the generalized call tracing logic is used. The other compatibility is the difference between the old and new versions of gRPC, as well as the compatibility of various cases of obtaining server-side IP, for those interested in the source code.\n4. ShenYu Gateway Observability Practice The above explains the principle of SkyWalking ShenYu plug-in implementation, the following deployment application to see the effect. SkyWalking powerful, in addition to the link tracking requires the development of plug-ins, other powerful features out of the box. Here only describe the link tracking and application performance analysis part, if you want to experience the power of SkyWalking features, please refer to the SkyWalking official documentation.\nVersion description:\n skywalking-java: 8.11.0-SNAPSHOT source code build. Note: The shenyu plugin will be released in version 8.11.0, and will probably release it initially in May or June. the Java agent is in the regular release phase. skywalking: 9.0.0 V9 version  Usage instructions:\nSkyWalking is designed to be very easy to use. Please refer to the official documentation for configuring and activating the shenyu plugin.\n SkyWalking Documentation SkyWalking Java Agent Documentation  4.1 Sending requests to the gateway Initiate various service requests to the gateway via the postman client or other means.\n4.2 Request Topology Diagram   4.3 Request Trace (in the case of gRPC) Normal Trace: Abnormal Trace: Click on the link node to see the corresponding node information and exception information\nService Provider Span Gateway request span 4.4 Service Metrics Monitoring 4.5 Gateway background metrics monitoring Database Monitoring: Thread pool and connection pool monitoring: 4.6 JVM Monitoring 4.7 Endpoint Analysis 4.8 Exception log and exception link analysis See official documentation for log configuration\nLog monitoring Distributed link trace details corresponding to exception logs 5. Summary SkyWalking has very comprehensive support for metrics, link tracing, and logging in observability, and is powerful, easy to use, and designed for large distributed systems, microservices, cloud-native, container architectures, and has a rich ecosystem. Using SkyWalking to provide powerful observability support for Apache ShenYu (incubating) gives ShenYu a boost. Finally, if you are interested in high-performance responsive gateways, you can follow Apache ShenYu (incubating). Also, thanks to SkyWalking such an excellent open source software to the industry contributions.\n","title":"Apache ShenYu(incubating) plugin implementation principles and observability practices","url":"/blog/2022-05-08-apache-shenyuincubating-integrated-skywalking-practice-observability/"},{"content":"SkyWalking Kubernetes Event Exporter 1.0.0 is released. Go to downloads page to find release tars.\n Add Apache SkyWalking exporter to export events into SkyWalking OAP. Add console exporter for debugging purpose.  ","title":"Release Apache SkyWalking Kubernetes Event Exporter 1.0.0","url":"/events/release-apache-skywalking-kubernetes-event-exporter-1.0.0/"},{"content":"content:  Introduction Features Install SWCK Deploy a demo application Verify the injector Concluding remarks  1. Introduction 1.1 What\u0026rsquo;s SWCK? SWCK is a platform for the SkyWalking user, provisions, upgrades, maintains SkyWalking relevant components, and makes them work natively on Kubernetes.\nIn fact, SWCK is an operator developed based on kubebuilder, providing users with Custom Resources ( CR ) and controllers for managing resources ( Controller ), all CustomResourceDefinitions(CRDs)are as follows:\n JavaAgent OAP UI Storage Satellite Fetcher  1.2 What\u0026rsquo;s the java agent injector? For a java application, users need to inject the java agent into the application to get metadata and send it to the SkyWalking backend. To make users use the java agent more natively, we propose the java agent injector to inject the java agent sidecar into a pod. The java agent injector is actually a Kubernetes Mutation Webhook Controller. The controller intercepts pod events and applies mutations to the pod if annotations exist within the request.\n2. Features   Transparent. User’s applications generally run in normal containers while the java agent runs in the init container, and both belong to the same pod. Each container in the pod mounts a shared memory volume that provides a storage path for the java agent. When the pod starts, the java agent in the init container will run before the application container, and the injector will store the java agent file in the shared memory volume. When the application container starts, the injector injects the agent file into the application by setting the JVM parameter. Users can inject the java agent in this way without rebuilding the container image containing the java agent.\n  Configurability. The injector provides two ways to configure the java agent: global configuration and custom configuration. The default global configuration is stored in the configmap, you can update it as your own global configuration, such as backend_service. In addition, you can also set custom configuration for some applications via annotation, such as “service_name”. For more information, please see java-agent-injector.\n  Observability. For each injected java agent, we provide CustomDefinitionResources called JavaAgent to observe the final agent configuration. Please refer to javaagent to get more details.\n  3. Install SWCK In the next steps, we will show how to build a stand-alone Kubernetes cluster and deploy the 0.6.1 version of SWCK on the platform.\n3.1 Tool Preparation Firstly, you need to install some tools as follows:\n kind, which is used to create a stand-alone Kubernetes cluster. kubectl, which is used to communicate with the Kubernetes cluster.  3.2 Install stand-alone Kubernetes cluster After installing kind , you could use the following command to create a stand-alone Kubernetes cluster.\n Notice! If your terminal is configured with a proxy, you need to close it before the cluster is created to avoid some errors.\n $ kind create cluster --image=kindest/node:v1.19.1 After creating a cluster, you can get the pods as below.\n$ kubectl get pod -A NAMESPACE NAME READY STATUS RESTARTS AGE kube-system coredns-f9fd979d6-57xpc 1/1 Running 0 7m16s kube-system coredns-f9fd979d6-8zj8h 1/1 Running 0 7m16s kube-system etcd-kind-control-plane 1/1 Running 0 7m23s kube-system kindnet-gc9gt 1/1 Running 0 7m16s kube-system kube-apiserver-kind-control-plane 1/1 Running 0 7m23s kube-system kube-controller-manager-kind-control-plane 1/1 Running 0 7m23s kube-system kube-proxy-6zbtb 1/1 Running 0 7m16s kube-system kube-scheduler-kind-control-plane 1/1 Running 0 7m23s local-path-storage local-path-provisioner-78776bfc44-jwwcs 1/1 Running 0 7m16s 3.3 Install certificates manger(cert-manger) The certificates of SWCK are distributed and verified by the certificate manager. You need to install the cert-manager through the following command.\n$ kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.3.1/cert-manager.yaml Verify whether cert-manager is installed successfully.\n$ kubectl get pod -n cert-manager NAME READY STATUS RESTARTS AGE cert-manager-7dd5854bb4-slcmd 1/1 Running 0 73s cert-manager-cainjector-64c949654c-tfmt2 1/1 Running 0 73s cert-manager-webhook-6bdffc7c9d-h8cfv 1/1 Running 0 73s 3.4 Install SWCK The java agent injector is a component of the operator, so please follow the next steps to install the operator first.\n Get the deployment yaml file of SWCK and deploy it.  $ curl -Ls https://archive.apache.org/dist/skywalking/swck/0.6.1/skywalking-swck-0.6.1-bin.tgz | tar -zxf - -O ./config/operator-bundle.yaml | kubectl apply -f - Check SWCK as below.  $ kubectl get pod -n skywalking-swck-system NAME READY STATUS RESTARTS AGE skywalking-swck-controller-manager-7f64f996fc-qh8s9 2/2 Running 0 94s 3.5 Install Skywalking components — OAPServer and UI  Deploy the OAPServer and UI in the default namespace.  $ kubectl apply -f https://raw.githubusercontent.com/apache/skywalking-swck/master/operator/config/samples/default.yaml Check the OAPServer.  $ kubectl get oapserver NAME INSTANCES RUNNING ADDRESS default 1 1 default-oap.default Check the UI.  $ kubectl get ui NAME INSTANCES RUNNING INTERNALADDRESS EXTERNALIPS PORTS default 1 1 default-ui.default [80] 4. Deploy a demo application In the third step, we have installed SWCK and related Skywalking components. Next, we will show how to use the java agent injector in SWCK through two java application examples in two ways: global configuration and custom configuration.\n4.1 Set the global configuration When we have installed SWCK, the default configuration is the configmap in the system namespace, we can get it as follows.\n$ kubectl get configmap skywalking-swck-java-agent-configmap -n skywalking-swck-system -oyaml apiVersion: v1 data: agent.config: |- # The service name in UI agent.service_name=${SW_AGENT_NAME:Your_ApplicationName} # Backend service addresses. collector.backend_service=${SW_AGENT_COLLECTOR_BACKEND_SERVICES:127.0.0.1:11800} # Please refer to https://skywalking.apache.org/docs/skywalking-java/latest/en/setup/service-agent/java-agent/configurations/#table-of-agent-configuration-properties to get more details. In the cluster created by kind, the backend_service may not be correct, we need to use the real OAPServer\u0026rsquo;s address default-oap.default to replace the default 127.0.0.1, so we can edit the configmap as follow.\n$ kubectl edit configmap skywalking-swck-java-agent-configmap -n skywalking-swck-system configmap/skywalking-swck-java-agent-configmap edited $ kubectl get configmap skywalking-swck-java-agent-configmap -n skywalking-swck-system -oyaml apiVersion: v1 data: agent.config: |- # The service name in UI agent.service_name=${SW_AGENT_NAME:Your_ApplicationName} # Backend service addresses. collector.backend_service=${SW_AGENT_COLLECTOR_BACKEND_SERVICES:default-oap.default:11800} # Please refer to https://skywalking.apache.org/docs/skywalking-java/latest/en/setup/service-agent/java-agent/configurations/#table-of-agent-configuration-properties to get more details. 4.2 Set the custom configuration In some cases, we need to use the Skywalking component to monitor different java applications, so the agent configuration of different applications may be different, such as the name of the application, and the plugins that the application needs to use, etc. Next, we will take two simple java applications developed based on spring boot and spring cloud gateway as examples for a detailed description. You can use the source code to build the image.\n# build the springboot and springcloudgateway image  $ git clone https://github.com/dashanji/swck-spring-cloud-k8s-demo $ cd swck-spring-cloud-k8s-demo \u0026amp;\u0026amp; make # check the image $ docker images REPOSITORY TAG IMAGE ID CREATED SIZE gateway v0.0.1 51d16251c1d5 48 minutes ago 723MB app v0.0.1 62f4dbcde2ed 48 minutes ago 561MB # load the image into the cluster $ kind load docker-image app:v0.0.1 \u0026amp;\u0026amp; kind load docker-image gateway:v0.0.1 4.3 deploy spring boot application  Create the springboot-system namespace.  $ kubectl create namespace springboot-system Label the springboot-systemnamespace to enable the java agent injector.  $ kubectl label namespace springboot-system swck-injection=enabled Deploy the corresponding deployment file springboot.yaml for the spring boot application, which uses annotation to override the default agent configuration, such as service_name.   Notice! Before using the annotation to override the agent configuration, you need to add strategy.skywalking.apache.org/agent.Overlay: \u0026quot;true\u0026quot; to make the override take effect.\n apiVersion:apps/v1kind:Deploymentmetadata:name:demo-springbootnamespace:springboot-systemspec:selector:matchLabels:app:demo-springboottemplate:metadata:labels:swck-java-agent-injected:\u0026#34;true\u0026#34;# enable the java agent injectorapp:demo-springbootannotations:strategy.skywalking.apache.org/agent.Overlay:\u0026#34;true\u0026#34;# enable the agent overlayagent.skywalking.apache.org/agent.service_name:\u0026#34;backend-service\u0026#34;spec:containers:- name:springbootimagePullPolicy:IfNotPresentimage:app:v0.0.1command:[\u0026#34;java\u0026#34;]args:[\u0026#34;-jar\u0026#34;,\u0026#34;/app.jar\u0026#34;]---apiVersion:v1kind:Servicemetadata:name:demonamespace:springboot-systemspec:type:ClusterIPports:- name:8085-tcpport:8085protocol:TCPtargetPort:8085selector:app:demo-springbootDeploy a spring boot application in the springboot-system namespace.  $ kubectl apply -f springboot.yaml Check for deployment.  $ kubectl get pod -n springboot-system NAME READY STATUS RESTARTS AGE demo-springboot-7c89f79885-dvk8m 1/1 Running 0 11s Get the finnal injected java agent configuration through JavaAgent.  $ kubectl get javaagent -n springboot-system NAME PODSELECTOR SERVICENAME BACKENDSERVICE app-demo-springboot-javaagent app=demo-springboot backend-service default-oap.default:11800 4.4 deploy spring cloud gateway application  Create the gateway-system namespace.  $ kubectl create namespace gateway-system Label the gateway-systemnamespace to enable the java agent injector.  $ kubectl label namespace gateway-system swck-injection=enabled Deploy the corresponding deployment file springgateway.yaml for the spring cloud gateway application, which uses annotation to override the default agent configuration, such as service_name. In addition, when using spring cloud gateway, we need to add the spring cloud gateway plugin to the agent configuration.   Notice! Before using the annotation to override the agent configuration, you need to add strategy.skywalking.apache.org/agent.Overlay: \u0026quot;true\u0026quot; to make the override take effect.\n apiVersion:apps/v1kind:Deploymentmetadata:labels:app:demo-gatewayname:demo-gatewaynamespace:gateway-systemspec:selector:matchLabels:app:demo-gatewaytemplate:metadata:labels:swck-java-agent-injected:\u0026#34;true\u0026#34;app:demo-gatewayannotations:strategy.skywalking.apache.org/agent.Overlay:\u0026#34;true\u0026#34;agent.skywalking.apache.org/agent.service_name:\u0026#34;gateway-service\u0026#34;optional.skywalking.apache.org:\u0026#34;cloud-gateway-3.x\u0026#34;# add spring cloud gateway pluginspec:containers:- image:gateway:v0.0.1name:gatewaycommand:[\u0026#34;java\u0026#34;]args:[\u0026#34;-jar\u0026#34;,\u0026#34;/gateway.jar\u0026#34;]---apiVersion:v1kind:Servicemetadata:name:service-gatewaynamespace:gateway-systemspec:type:ClusterIPports:- name:9999-tcpport:9999protocol:TCPtargetPort:9999selector:app:demo-gatewayDeploy a spring cloud gateway application in the gateway-system namespace.  $ kubectl apply -f springgateway.yaml Check for deployment.  $ kubectl get pod -n gateway-system NAME READY STATUS RESTARTS AGE demo-gateway-5bb77f6d85-9j7c6 1/1 Running 0 15s Get the finnal injected java agent configuration through JavaAgent.  $ kubectl get javaagent -n gateway-system NAME PODSELECTOR SERVICENAME BACKENDSERVICE app-demo-gateway-javaagent app=demo-gateway gateway-service default-oap.default:11800 5. Verify the injector  After completing the above steps, we can view detailed state of the injected pod, like the injected agent container.  # get all injected pod $ kubectl get pod -A -lswck-java-agent-injected=true NAMESPACE NAME READY STATUS RESTARTS AGE gateway-system demo-gateway-5bb77f6d85-lt4z7 1/1 Running 0 69s springboot-system demo-springboot-7c89f79885-lkb5j 1/1 Running 0 75s # view detailed state of the injected pod [demo-springboot] $ kubectl describe pod -l app=demo-springboot -n springboot-system ... Events: Type Reason Age From Message ---- ------ ---- ---- ------- ... Normal Created 91s kubelet,kind-control-plane Created container inject-skywalking-agent Normal Started 91s kubelet,kind-control-plane Started container inject-skywalking-agent ... Normal Created 90s kubelet,kind-control-plane Created container springboot Normal Started 90s kubelet,kind-control-plane Started container springboot # view detailed state of the injected pod [demo-gateway]  $ kubectl describe pod -l app=demo-gateway -n gateway-system ... Events: Type Reason Age From Message ---- ------ ---- ---- ------- ... Normal Created 2m20s kubelet,kind-control-plane Created container inject-skywalking-agent Normal Started 2m20s kubelet,kind-control-plane Started container inject-skywalking-agent ... Normal Created 2m20s kubelet,kind-control-plane Created container gateway Normal Started 2m20s kubelet,kind-control-plane Started container gateway Now we can expose the service and watch the data displayed on the web. First of all, we need to get the gateway service and the ui service as follows.  $ kubectl get service service-gateway -n gateway-system NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE service-gateway ClusterIP 10.99.181.145 \u0026lt;none\u0026gt; 9999/TCP 9m19s $ kubectl get service default-ui NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE default-ui ClusterIP 10.111.39.250 \u0026lt;none\u0026gt; 80/TCP 82m Then open two terminals to expose the service: service-gateway、default-ui.  $ kubectl port-forward service/service-gateway -n gateway-system 9999:9999 Forwarding from 127.0.0.1:9999 -\u0026gt; 9999 Forwarding from [::1]:9999 -\u0026gt; 9999 $ kubectl port-forward service/default-ui 8090:80 Forwarding from 127.0.0.1:8090 -\u0026gt; 8080 Forwarding from [::1]:8090 -\u0026gt; 8080 Use the following commands to access the spring boot demo 10 times through the spring cloud gateway service.  $ for i in {1..10}; do curl http://127.0.0.1:9999/gateway/hello \u0026amp;\u0026amp; echo \u0026#34;\u0026#34;; done Hello World! Hello World! Hello World! Hello World! Hello World! Hello World! Hello World! Hello World! Hello World! Hello World! We can see the Dashboard by accessing http://127.0.0.1:8090.  All services' topology is shown below.  We can see the trace information of gateway-service.  We can see the trace information of backend-service.  6. Concluding remarks If your application is deployed in the Kubernetes platform and requires Skywalking to provide monitoring services, SWCK can help you deploy, upgrade and maintain the Skywalking components in the Kubernetes cluster. In addition to this blog, you can also view swck document and Java agent injector documentation for more information. If you find this project useful, please give SWCK a star! If you have any questions, welcome to ask in Issues or Discussions.\n","title":"How to use the java agent injector?","url":"/blog/2022-04-19-how-to-use-the-java-agent-injector/"},{"content":"目录  介绍 主要特点 安装SWCK 部署demo应用 验证注入器 结束语  1. 介绍 1.1 SWCK 是什么? SWCK是部署在 Kubernetes 环境中,为 Skywalking 用户提供服务的平台,用户可以基于该平台使用、升级和维护 SkyWalking 相关组件。\n实际上,SWCK 是基于 kubebuilder 开发的Operator,为用户提供自定义资源( CR )以及管理资源的控制器( Controller ),所有的自定义资源定义(CRD)如下所示:\n JavaAgent OAP UI Storage Satellite Fetcher  1.2 java 探针注入器是什么? 对于 java 应用来说,用户需要将 java 探针注入到应用程序中获取元数据并发送到 Skywalking 后端。为了让用户在 Kubernetes 平台上更原生地使用 java 探针,我们提供了 java 探针注入器,该注入器能够将 java 探针通过 sidecar 方式注入到应用程序所在的 pod 中。 java 探针注入器实际上是一个Kubernetes Mutation Webhook控制器,如果请求中存在 annotations ,控制器会拦截 pod 事件并将其应用于 pod 上。\n2. 主要特点  透明性。用户应用一般运行在普通容器中而 java 探针则运行在初始化容器中,且两者都属于同一个 pod 。该 pod 中的每个容器都会挂载一个共享内存卷,为 java 探针提供存储路径。在 pod 启动时,初始化容器中的 java 探针会先于应用容器运行,由注入器将其中的探针文件存放在共享内存卷中。在应用容器启动时,注入器通过设置 JVM 参数将探针文件注入到应用程序中。用户可以通过这种方式实现 java 探针的注入,而无需重新构建包含 java 探针的容器镜像。 可配置性。注入器提供两种方式配置 java 探针:全局配置和自定义配置。默认的全局配置存放在 configmap 中,用户可以根据需求修改全局配置,比如修改 backend_service 的地址。此外,用户也能通过 annotation 为特定应用设置自定义的一些配置,比如不同服务的 service_name 名称。详情可见 java探针说明书。 可观察性。每个 java 探针在被注入时,用户可以查看名为 JavaAgent 的 CRD 资源,用于观测注入后的 java 探针配置。详情可见 JavaAgent说明。  3. 安装SWCK 在接下来的几个步骤中,我们将演示如何从0开始搭建单机版的 Kubernetes 集群,并在该平台部署0.6.1版本的 SWCK。\n3.1 工具准备 首先,你需要安装一些必要的工具,如下所示:\n kind,用于创建单机版 Kubernetes集群。 kubectl,用于和Kubernetes 集群交互。  3.2 搭建单机版 Kubernetes集群 在安装完 kind 工具后,可通过如下命令创建一个单机集群。\n 注意!如果你的终端配置了代理,在运行以下命令之前最好先关闭代理,防止一些意外错误的发生。\n $ kind create cluster --image=kindest/node:v1.19.1 在集群创建完毕后,可获得如下的pod信息。\n$ kubectl get pod -A NAMESPACE NAME READY STATUS RESTARTS AGE kube-system coredns-f9fd979d6-57xpc 1/1 Running 0 7m16s kube-system coredns-f9fd979d6-8zj8h 1/1 Running 0 7m16s kube-system etcd-kind-control-plane 1/1 Running 0 7m23s kube-system kindnet-gc9gt 1/1 Running 0 7m16s kube-system kube-apiserver-kind-control-plane 1/1 Running 0 7m23s kube-system kube-controller-manager-kind-control-plane 1/1 Running 0 7m23s kube-system kube-proxy-6zbtb 1/1 Running 0 7m16s kube-system kube-scheduler-kind-control-plane 1/1 Running 0 7m23s local-path-storage local-path-provisioner-78776bfc44-jwwcs 1/1 Running 0 7m16s 3.3 安装证书管理器(cert-manger) SWCK 的证书都是由证书管理器分发和验证,需要先通过如下命令安装证书管理器cert-manger。\n$ kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.3.1/cert-manager.yaml 验证 cert-manger 是否安装成功。\n$ kubectl get pod -n cert-manager NAME READY STATUS RESTARTS AGE cert-manager-7dd5854bb4-slcmd 1/1 Running 0 73s cert-manager-cainjector-64c949654c-tfmt2 1/1 Running 0 73s cert-manager-webhook-6bdffc7c9d-h8cfv 1/1 Running 0 73s 3.4 安装SWCK java 探针注入器是 SWCK 中的一个组件,首先需要按照如下步骤安装 SWCK:\n 输入如下命令获取 SWCK 的 yaml 文件并部署在 Kubernetes 集群中。  $ curl -Ls https://archive.apache.org/dist/skywalking/swck/0.6.1/skywalking-swck-0.6.1-bin.tgz | tar -zxf - -O ./config/operator-bundle.yaml | kubectl apply -f - 检查 SWCK 是否正常运行。  $ kubectl get pod -n skywalking-swck-system NAME READY STATUS RESTARTS AGE skywalking-swck-controller-manager-7f64f996fc-qh8s9 2/2 Running 0 94s 3.5 安装 Skywalking 组件 — OAPServer 和 UI  在 default 命名空间中部署 OAPServer 组件和 UI 组件。  $ kubectl apply -f https://raw.githubusercontent.com/apache/skywalking-swck/master/operator/config/samples/default.yaml 查看 OAPServer 组件部署情况。  $ kubectl get oapserver NAME INSTANCES RUNNING ADDRESS default 1 1 default-oap.default 查看 UI 组件部署情况。  $ kubectl get ui NAME INSTANCES RUNNING INTERNALADDRESS EXTERNALIPS PORTS default 1 1 default-ui.default [80] 4. 部署demo应用 在第3个步骤中,我们已经安装好 SWCK 以及相关的 Skywalking 组件,接下来按照全局配置以及自定义配置两种方式,通过两个 java 应用实例,分别演示如何使用 SWCK 中的 java 探针注入器。\n4.1 设置全局配置 当 SWCK 安装完成后,默认的全局配置就会以 configmap 的形式存储在系统命令空间中,可通过如下命令查看。\n$ kubectl get configmap skywalking-swck-java-agent-configmap -n skywalking-swck-system -oyaml apiVersion: v1 data: agent.config: |- # The service name in UI agent.service_name=${SW_AGENT_NAME:Your_ApplicationName} # Backend service addresses. collector.backend_service=${SW_AGENT_COLLECTOR_BACKEND_SERVICES:127.0.0.1:11800} # Please refer to https://skywalking.apache.org/docs/skywalking-java/latest/en/setup/service-agent/java-agent/configurations/#table-of-agent-configuration-properties to get more details. 在 kind 创建的 Kubernetes 集群中, SkyWalking 后端地址和 configmap 中指定的地址可能不同,我们需要使用真正的 OAPServer 组件的地址 default-oap.default 来代替默认的 127.0.0.1 ,可通过修改 configmap 实现。\n$ kubectl edit configmap skywalking-swck-java-agent-configmap -n skywalking-swck-system configmap/skywalking-swck-java-agent-configmap edited $ kubectl get configmap skywalking-swck-java-agent-configmap -n skywalking-swck-system -oyaml apiVersion: v1 data: agent.config: |- # The service name in UI agent.service_name=${SW_AGENT_NAME:Your_ApplicationName} # Backend service addresses. collector.backend_service=${SW_AGENT_COLLECTOR_BACKEND_SERVICES:default-oap.default:11800} # Please refer to https://skywalking.apache.org/docs/skywalking-java/latest/en/setup/service-agent/java-agent/configurations/#table-of-agent-configuration-properties to get more details. 4.2 设置自定义配置 在实际使用场景中,我们需要使用 Skywalking 组件监控不同的 java 应用,因此不同应用的探针配置可能有所不同,比如应用的名称、应用需要使用的插件等。为了支持自定义配置,注入器提供 annotation 来覆盖默认的全局配置。接下来我们将分别以基于 spring boot 以及 spring cloud gateway 开发的两个简单java应用为例进行详细说明,你可以使用这两个应用的源代码构建镜像。\n# build the springboot and springcloudgateway image  $ git clone https://github.com/dashanji/swck-spring-cloud-k8s-demo $ cd swck-spring-cloud-k8s-demo \u0026amp;\u0026amp; make # check the image $ docker images REPOSITORY TAG IMAGE ID CREATED SIZE gateway v0.0.1 51d16251c1d5 48 minutes ago 723MB app v0.0.1 62f4dbcde2ed 48 minutes ago 561MB # load the image into the cluster $ kind load docker-image app:v0.0.1 \u0026amp;\u0026amp; kind load docker-image gateway:v0.0.1 4.3 部署 spring boot 应用  创建 springboot-system 命名空间。  $ kubectl create namespace springboot-system 给 springboot-system 命名空间打上标签使能 java 探针注入器。  $ kubectl label namespace springboot-system swck-injection=enabled 接下来为 spring boot 应用对应的部署文件 springboot.yaml ,其中使用了 annotation 覆盖默认的探针配置,比如 service_name ,将其覆盖为 backend-service 。   需要注意的是,在使用 annotation 覆盖探针配置之前,需要增加 strategy.skywalking.apache.org/agent.Overlay: \u0026quot;true\u0026quot; 来使覆盖生效。\n apiVersion:apps/v1kind:Deploymentmetadata:name:demo-springbootnamespace:springboot-systemspec:selector:matchLabels:app:demo-springboottemplate:metadata:labels:swck-java-agent-injected:\u0026#34;true\u0026#34;# enable the java agent injectorapp:demo-springbootannotations:strategy.skywalking.apache.org/agent.Overlay:\u0026#34;true\u0026#34;# enable the agent overlayagent.skywalking.apache.org/agent.service_name:\u0026#34;backend-service\u0026#34;spec:containers:- name:springbootimagePullPolicy:IfNotPresentimage:app:v0.0.1command:[\u0026#34;java\u0026#34;]args:[\u0026#34;-jar\u0026#34;,\u0026#34;/app.jar\u0026#34;]---apiVersion:v1kind:Servicemetadata:name:demonamespace:springboot-systemspec:type:ClusterIPports:- name:8085-tcpport:8085protocol:TCPtargetPort:8085selector:app:demo-springboot在 springboot-system 命名空间中部署 spring boot 应用。  $ kubectl apply -f springboot.yaml 查看部署情况。  $ kubectl get pod -n springboot-system NAME READY STATUS RESTARTS AGE demo-springboot-7c89f79885-dvk8m 1/1 Running 0 11s 通过 JavaAgent 查看最终注入的 java 探针配置。  $ kubectl get javaagent -n springboot-system NAME PODSELECTOR SERVICENAME BACKENDSERVICE app-demo-springboot-javaagent app=demo-springboot backend-service default-oap.default:11800 4.4 部署 spring cloud gateway 应用  创建 gateway-system 命名空间。  $ kubectl create namespace gateway-system 给 gateway-system 命名空间打上标签使能 java 探针注入器。  $ kubectl label namespace gateway-system swck-injection=enabled 接下来为 spring cloud gateway 应用对应的部署文件 springgateway.yaml ,其中使用了 annotation 覆盖默认的探针配置,比如 service_name ,将其覆盖为 gateway-service 。此外,在使用 spring cloud gateway 时,我们需要在探针配置中添加 spring cloud gateway 插件。   需要注意的是,在使用 annotation 覆盖探针配置之前,需要增加 strategy.skywalking.apache.org/agent.Overlay: \u0026quot;true\u0026quot; 来使覆盖生效。\n apiVersion:apps/v1kind:Deploymentmetadata:labels:app:demo-gatewayname:demo-gatewaynamespace:gateway-systemspec:selector:matchLabels:app:demo-gatewaytemplate:metadata:labels:swck-java-agent-injected:\u0026#34;true\u0026#34;app:demo-gatewayannotations:strategy.skywalking.apache.org/agent.Overlay:\u0026#34;true\u0026#34;agent.skywalking.apache.org/agent.service_name:\u0026#34;gateway-service\u0026#34;optional.skywalking.apache.org:\u0026#34;cloud-gateway-3.x\u0026#34;# add spring cloud gateway pluginspec:containers:- image:gateway:v0.0.1name:gatewaycommand:[\u0026#34;java\u0026#34;]args:[\u0026#34;-jar\u0026#34;,\u0026#34;/gateway.jar\u0026#34;]---apiVersion:v1kind:Servicemetadata:name:service-gatewaynamespace:gateway-systemspec:type:ClusterIPports:- name:9999-tcpport:9999protocol:TCPtargetPort:9999selector:app:demo-gateway在 gateway-system 命名空间中部署 spring cloud gateway 应用。  $ kubectl apply -f springgateway.yaml 查看部署情况。  $ kubectl get pod -n gateway-system NAME READY STATUS RESTARTS AGE demo-gateway-758899c99-6872s 1/1 Running 0 15s 通过 JavaAgent 获取最终注入的java探针配置。  $ kubectl get javaagent -n gateway-system NAME PODSELECTOR SERVICENAME BACKENDSERVICE app-demo-gateway-javaagent app=demo-gateway gateway-service default-oap.default:11800 5. 验证注入器  当完成上述步骤后,我们可以查看被注入pod的详细状态,比如被注入的agent容器。  # get all injected pod $ kubectl get pod -A -lswck-java-agent-injected=true NAMESPACE NAME READY STATUS RESTARTS AGE gateway-system demo-gateway-5bb77f6d85-lt4z7 1/1 Running 0 69s springboot-system demo-springboot-7c89f79885-lkb5j 1/1 Running 0 75s # view detailed state of the injected pod [demo-springboot] $ kubectl describe pod -l app=demo-springboot -n springboot-system ... Events: Type Reason Age From Message ---- ------ ---- ---- ------- ... Normal Created 91s kubelet,kind-control-plane Created container inject-skywalking-agent Normal Started 91s kubelet,kind-control-plane Started container inject-skywalking-agent ... Normal Created 90s kubelet,kind-control-plane Created container springboot Normal Started 90s kubelet,kind-control-plane Started container springboot # view detailed state of the injected pod [demo-gateway]  $ kubectl describe pod -l app=demo-gateway -n gateway-system ... Events: Type Reason Age From Message ---- ------ ---- ---- ------- ... Normal Created 2m20s kubelet,kind-control-plane Created container inject-skywalking-agent Normal Started 2m20s kubelet,kind-control-plane Started container inject-skywalking-agent ... Normal Created 2m20s kubelet,kind-control-plane Created container gateway Normal Started 2m20s kubelet,kind-control-plane Started container gateway 现在我们可以将服务绑定在某个端口上并通过 web 浏览器查看采样数据。首先,我们需要通过以下命令获取gateway服务和ui服务的信息。  $ kubectl get service service-gateway -n gateway-system NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE service-gateway ClusterIP 10.99.181.145 \u0026lt;none\u0026gt; 9999/TCP 9m19s $ kubectl get service default-ui NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE default-ui ClusterIP 10.111.39.250 \u0026lt;none\u0026gt; 80/TCP 82m 接下来分别启动2个终端将service-gateway 以及 default-ui 绑定到本地端口上,如下所示:  $ kubectl port-forward service/service-gateway -n gateway-system 9999:9999 Forwarding from 127.0.0.1:9999 -\u0026gt; 9999 Forwarding from [::1]:9999 -\u0026gt; 9999 $ kubectl port-forward service/default-ui 8090:80 Forwarding from 127.0.0.1:8090 -\u0026gt; 8080 Forwarding from [::1]:8090 -\u0026gt; 8080 使用以下命令通过spring cloud gateway 网关服务暴露的端口来访问 spring boot 应用服务。  $ for i in {1..10}; do curl http://127.0.0.1:9999/gateway/hello \u0026amp;\u0026amp; echo \u0026#34;\u0026#34;; done Hello World! Hello World! Hello World! Hello World! Hello World! Hello World! Hello World! Hello World! Hello World! Hello World! 我们可以在 web 浏览器中输入 http://127.0.0.1:8090 来访问探针采集到的数据。  所有服务的拓扑图如下所示。  查看 gateway-service 网关服务的 trace 信息。  查看 backend-service 应用服务的 trace 信息。  6. 结束语 如果你的应用部署在 Kubernetes 平台中,且需要 Skywalking 提供监控服务, SWCK 能够帮助你部署、升级和维护 Kubernetes 集群中的 Skywalking 组件。除了本篇博客外,你还可以查看 SWCK文档 以及 java探针注入器文档 获取更多的信息。如果你觉得这个项目好用,请给 SWCK 一个star! 如果你有任何疑问,欢迎在Issues或者Discussions中提出。\n","title":"如何使用java探针注入器?","url":"/zh/2022-04-19-how-to-use-the-java-agent-injector/"},{"content":"Apache SkyWalking 是中国首个,也是目前唯一的个人开源的 Apache 顶级项目。\n作为一个针对分布式系统的应用性能监控 APM 和可观测性分析平台, SkyWalking 提供了媲美商业APM/监控的功能。\nCSDN云原生系列在线峰会第4期,特邀SkyWalking创始人、Apache基金会首位中国董事、Tetrate创始工程师吴晟担任出品人,推出SkyWalking峰会。\nSkyWalking峰会在解读SkyWalking v9新特性的同时,还将首发解密APM的专用数据库BanyanDB,以及分享SkyWalking在原生eBPF探针、监控虚拟机和Kubernetes、云原生函数计算可观测性等方面的应用实践。\n峰会议程:\n14:00-14:30 开场演讲:SkyWalking v9解析 吴晟 Tetrate 创始工程师、Apache 基金会首位中国董事\n14:30-15:00 首发解密:APM的专用数据库BanyanDB\n高洪涛 Tetrate 创始工程师\n15:00-15:30 SkyWalking 原生eBPF探针展示\n刘晗 Tetrate 工程师\n15:30-16:00 Apache SkyWalking MAL实践-监控虚拟机和Kubernetes\n万凯 Tetrate 工程师\n16:00-16:30 SkyWalking助力云原生函数计算可观测\n霍秉杰 青云科技 资深架构师\n峰会视频 B站视频地址\n","title":"Apache SkyWalking 2022 峰会","url":"/zh/2022-04-18-meeting/"},{"content":"SkyWalking Java Agent 8.10.0 is released. Go to downloads page to find release tars. Changes by Version\n8.10.0  [Important] Namespace represents a subnet, such as kubernetes namespace, or 172.10... Make namespace concept as a part of service naming format. [Important] Add cluster concept, also as a part of service naming format. The cluster name would be  Add as {@link #SERVICE_NAME} suffix. Add as exit span\u0026rsquo;s peer, ${CLUSTER} / original peer Cross Process Propagation Header\u0026rsquo;s value addressUsedAtClient[index=8] (Target address of this request used on the client end).   Support Undertow thread pool metrics collecting. Support Tomcat thread pool metric collect. Remove plugin for ServiceComb Java Chassis 0.x Add Guava EventBus plugin. Fix Dubbo 3.x plugin\u0026rsquo;s tracing problem. Fix the bug that maybe generate multiple trace when invoke http request by spring webflux webclient. Support Druid Connection pool metrics collecting. Support HikariCP Connection pool metrics collecting. Support Dbcp2 Connection pool metrics collecting. Ignore the synthetic constructor created by the agent in the Spring patch plugin. Add witness class for vertx-core-3.x plugin. Add witness class for graphql plugin. Add vertx-core-4.x plugin. Renamed graphql-12.x-plugin to graphql-12.x-15.x-plugin and graphql-12.x-scenario to graphql-12.x-15.x-scenario. Add graphql-16plus plugin. [Test] Support to configure plugin test base images. [Breaking Change] Remove deprecated agent.instance_properties configuration. Recommend agent.instance_properties_json. The namespace and cluster would be reported as instance properties, keys are namespace and cluster. Notice, if instance_properties_json includes these two keys, they would be overrided by the agent core. [Breaking Change] Remove the namespace from cross process propagation key. Make sure the parent endpoint in tracing context from existing first ENTRY span, rather than first span only. Fix the bug that maybe causing memory leak and repeated traceId when use gateway-2.1.x-plugin or gateway-3.x-plugin. Fix Grpc 1.x plugin could leak context due to gRPC cancelled. Add JDK ThreadPoolExecutor Plugin. Support default database(not set through JDBC URL) in mysql-5.x plugin.  Documentation  Add link about java agent injector. Update configurations doc, remove agent.instance_properties[key]=value. Update configurations doc, add agent.cluster and update agent.namespace.  All issues and pull requests are here\n","title":"Release Apache SkyWalking Java Agent 8.10.0","url":"/events/release-apache-skywalking-java-agent-8-10-0/"},{"content":"Introduction  The most profound technologies are those that disappear. They weave themselves into the fabric of everyday life until they are indistinguishable from it. - Mark Weiser\n Mark Weiser prophetically argued in the late 1980s, that the most far-reaching technologies are those which vanish into thin air. According to Weiser, \u0026ldquo;Whenever people learn something sufficiently well, they cease to be aware of it.\u0026rdquo; This disappearing act, as Weiser claimed, is not limited to technology but rather human psychology. It is this very experience that allows us to escape lower-level thinking into higher-level thinking. For once we are no longer impeded by mundane details, we are then free to focus on new goals.\nThis realization becomes more relevant as APMs become increasingly popular. As more applications are deployed with APMs, the number of abstract representations of the underlying source code also increases. While this provides great value to many non-development roles within an organization, it does pose additional challenges to those in development roles who must translate these representations into concepts they can work with (i.e. source code). Weiser sums this difficultly up rather succinctly when he states that \u0026ldquo;Programmers should no more be asked to work without access to source code than auto-mechanics should be asked to work without looking at the engine.\u0026rdquo;\nStill, APMs collect more information only to produce a plethora of new abstract representations. In this article, we will introduce a new concept in Source++, the open-source live-coding platform, specifically designed to allow developers to monitor production applications more intuitively.\nLive Views  And we really don\u0026rsquo;t understand even yet, hundreds of metrics later, what make a program easier to understand or modify or reuse or borrow. I don\u0026rsquo;t think we\u0026rsquo;ll find out by looking away from programs to their abstract interfaces. The answers are in the source code. - Mark Weiser\n As APMs move from the \u0026ldquo;nice to have\u0026rdquo; category to the \u0026ldquo;must-have\u0026rdquo; category, there is a fundamental feature holding them back from ubiquity. They must disappear from consciousness. As developers, we should feel no impulse to open our browsers to better understand the underlying source code. The answers are literally in the source code. Instead, we should improve our tools so the source code conveniently tells us what we need to know. Think of how simple life could be if failing code always indicated how and why it failed. This is the idea behind Source++.\nIn our last blog post, we discussed Extending Apache SkyWalking with non-breaking breakpoints. In that post, we introduced a concept called Live Instruments, which developers can use to easily debug live production applications without leaving their IDE. Today, we will discuss how existing SkyWalking installations can be integrated into your IDE via a new concept called Live Views. Unlike Live Instruments, which are designed for debugging live applications, Live Views are designed for increasing application comprehension and awareness. This is accomplished through a variety of commands which are input into the Live Command Palette.\nLive Command Palette The Live Command Palette (LCP) is a contextual command prompt, included in the Source++ JetBrains Plugin, that allows developers to control and query live applications from their IDE. Opened via keyboard shortcut (Ctrl+Shift+S), the LCP allows developers to easily view metrics relevant to the source code they\u0026rsquo;re currently viewing. The following Live View commands are currently supported:\nCommand: view (overview/activity/traces/logs) The view commands display contextual popups with live operational data of the current source code. These commands allow developers to view traditional SkyWalking operational data filtered down to the relevant metrics.\nCommand: watch log The watch log command allows developers to follow individual log statements of a running application in real-time. This command allows developers to negate the need for manually scrolling through the logs to find instances of a specific log statement.\nCommand: (show/hide) quick stats The show quick stats command displays live endpoint metrics for a quick idea of an endpoint\u0026rsquo;s activity. Using this command, developers can quickly assess the status of an endpoint and determine if the endpoint is performing as expected.\nFuture Work  A good tool is an invisible tool. By invisible, I mean that the tool does not intrude on your consciousness; you focus on the task, not the tool. Eyeglasses are a good tool \u0026ndash; you look at the world, not the eyeglasses. - Mark Weiser\n Source++ aims to extend SkyWalking in such a way that SkyWalking itself becomes invisible. To accomplish this, we plan to support custom developer commands. Developers will be able to build customized commands for themselves, as well as commands to share with their team. These commands will recognize context, types, and conditions allowing for a wide possibility of operations. As more commands are added, developers will be able to expose everything SkyWalking has to offer while focusing on what matters most, the source code.\nIf you find these features useful, please consider giving Source++ a try. You can install the plugin directly from your JetBrains IDE, or through the JetBrains Marketplace. If you have any issues or questions, please open an issue. Feedback is always welcome!\n","title":"Integrating Apache SkyWalking with source code","url":"/blog/2022-04-14-integrating-skywalking-with-source-code/"},{"content":"Read this post in original language: English\n介绍  最具影响力的技术是那些消失的技术。他们交织在日常生活中,直到二者完全相融。 - 马克韦瑟\n 马克韦瑟在 1980 年代后期预言,影响最深远的技术是那些消失在空气中的技术。\n“当人们足够熟知它,就不会再意识到它。”\n正如韦瑟所说,这种消失的现象不只源于技术,更是人类的心理。 正是这种经验使我们能够摆脱对底层的考量,进入更高层次的思考。 一旦我们不再被平凡的细枝末节所阻碍,我们就可以自如地专注于新的目标。\n随着 APM(应用性能管理系统) 变得越来越普遍,这种认识变得更加重要。随着更多的应用程序开始使用 APM 部署,底层源代码抽象表示的数量也在同步增加。 虽然这为组织内的许多非开发角色提供了巨大的价值,但它确实也对开发人员提出了额外的挑战 - 他们必须将这些表示转化为可操作的概念(即源代码)。 对此,韦瑟相当简洁的总结道,“就像不应要求汽车机械师在不查看引擎的情况下工作一样,我们不应要求程序员在不访问源代码的情况下工作”。\n尽管如此,APM 收集更多信息只是为了产生充足的新抽象表示。 在本文中,我们将介绍开源实时编码平台 Source++ 中的一个新概念,旨在让开发人员更直观地监控生产应用程序。\n实时查看  我们尚且不理解在收集了数百个指标之后,是什么让程序更容易理解、修改、重复使用或借用。 我不认为我们能够通过原理程序本身而到它们的抽象接口中找到答案。答案就在源代码之中。 - 马克韦瑟\n 随着 APM 从“有了更好”转变为“必须拥有”,有一个基本特性阻碍了它们的普及。 它们必须从意识中消失。作为开发人员,我们不应急于打开浏览器以更好地理解底层源代码,答案就在源代码中。 相反,我们应该改进我们的工具,以便源代码直观地告诉我们需要了解的内容。 想想如果失败的代码总是表明它是如何以及为什么失败的,生活会多么简单。这就是 Source++ 背后的理念。\n在我们的上一篇博客中,我们讨论了不间断断点 Extending Apache SkyWalking。 我们介绍了一个名为 Live Instruments(实时埋点) 的概念,开发人员可以使用它轻松调试实时生产应用程序,而无需离开他们的开发环境。 而今天,我们将讨论如何通过一个名为 Live Views(实时查看)的新概念将现有部署的 SkyWalking 集成到您的 IDE 中。 与专为调试实时应用程序而设计的 Live Instruments (实时埋点) 不同,Live Views(实时查看)旨在提高对应用程序的理解和领悟。 这将通过输入到 Live Command Palette (实时命令面板) 中的各种命令来完成。\n实时命令面板 Live Command Palette (LCP) 是一个当前上下文场景下的命令行面板,这个组件包含在 Source++ JetBrains 插件中,它允许开发人员从 IDE 中直接控制和对实时应用程序发起查询。\nLCP 通过键盘快捷键 (Ctrl+Shift+S) 打开,允许开发人员轻松了解与他们当前正在查看的源代码相关的运行指标。\n目前 LCP 支持以下实时查看命令:\n命令:view(overview/activity/traces/Logs)- 查看 总览/活动/追踪/日志 view 查看命令会展示一个与当前源码的实时运维数据关联的弹窗。 这些命令允许开发人员查看根据相关指标过滤的传统 SkyWalking 的运维数据。\n命令:watch log - 实时监听日志 本日志命令允许开发人员实时跟踪正在运行的应用程序的每一条日志。 通过此命令开发人员无需手动查阅大量日志就可以查找特定日志语句的实例。\n命令:(show/hide) quick stats (显示/隐藏)快速统计 show quick stats 显示快速统计命令显示实时端点指标,以便快速了解端点的活动。 使用此命令,开发人员可以快速评估端点的状态并确定端点是否按预期正常运行。\n未来的工作  好工具是无形的。我所指的无形,是指这个工具不会侵入你的意识; 你专注于任务,而不是工具。 眼镜就是很好的工具——你看的是世界,而不是眼镜。 - 马克韦瑟\n Source++ 旨在扩展 SkyWalking,使 SkyWalking 本身变得无需感知。 为此,我们计划支持自定义的开发人员命令。 开发人员将能够构建自定义命令,以及与团队共享的命令。 这些命令将识别上下文、类型和条件,从而允许广泛的操作。 随着更多命令的添加,开发人员将能够洞悉 SkyWalking 所提供的所有功能,同时专注于最重要的源码。\n如果您觉得这些功能有用,请考虑尝试使用 Source++。 您可以通过 JetBrains Marketplace 或直接从您的 JetBrains IDE 安装插件。 如果您有任何疑问,请到这提 issue。\n欢迎随时反馈!\n","title":"将 Apache SkyWalking 与源代码集成","url":"/zh/2022-04-14-integrating-skywalking-with-source-code/"},{"content":"随着无人驾驶在行业的不断发展和技术的持续革新,规范化、常态化的真无人运营逐渐成为事实标准,而要保障各个场景下的真无人业务运作,一个迫切需要解决的现状就是业务链路长,出现问题难以定位。本文由此前于 KubeSphere 直播上的分享整理而成,主要介绍 SkyWalking 的基本概念和使用方法,以及在无人驾驶领域的一系列实践。\nB站视频地址\n行业背景 驭势科技(UISEE)是国内领先的无人驾驶公司。致力于为全行业、全场景提供 AI 驾驶服务,做赋能出行和物流新生态的 AI 驾驶员。早在三年前, 驭势科技已在机场和厂区领域实现了“去安全员” 无人驾驶常态化运营的重大突破,落地“全场景、真无人、全天候”的自动驾驶技术,并由此迈向大规模商用。要保证各个场景下没有安全员参与的业务运作,我们在链路追踪上做了一系列实践。\n对于无人驾驶来说,从云端到车端的链路长且复杂,任何一层出问题都会导致严重的后果;然而在如下图所示的链路中,准确迅速地定位故障服务并不容易,经常遇到多个服务层层排查的情况。我们希望做到的事情,就是在出现问题以后,能够尽快定位到源头,从而快速解决问题,以绝后患。\n前提条件 SkyWalking 简介 Apache SkyWalking 是一个开源的可观察性平台,用于收集、分析、聚集和可视化来自服务和云原生基础设施的数据。SkyWalking 通过简单的方法,提拱了分布式系统的清晰视图,甚至跨云。它是一个现代的 APM(Application Performence Management),专门为云原生、基于容器的分布式系统设计。它在逻辑上被分成四个部分。探针、平台后端、存储和用户界面。\n 探针收集数据并根据 SkyWalking 的要求重新格式化(不同的探针支持不同的来源)。 平台后端支持数据聚合、分析以及从探针接收数据流的过程,包括 Tracing、Logging、Metrics。 存储系统通过一个开放/可插拔接口容纳 SkyWalking 数据。用户可以选择一个现有的实现,如 ElasticSearch、H2、MySQL、TiDB、InfluxDB,或实现自定义的存储。 UI是一个高度可定制的基于网络的界面,允许 SkyWalking 终端用户可视化和管理 SkyWalking 数据。  综合考虑了对各语言、各框架的支持性、可观测性的全面性以及社区环境等因素,我们选择了 SkyWalking 进行链路追踪。\n链路追踪简介 关于链路追踪的基本概念,可以参看吴晟老师翻译的 OpenTracing 概念和术语 以及 OpenTelemetry。在这里,择取几个重要的概念供大家参考:\n Trace:代表一个潜在的分布式的存在并行数据或者并行执行轨迹的系统。一个 Trace 可以认为是多个 Span 的有向无环图(DAG)。简单来说,在微服务体系下,一个 Trace 代表从第一个服务到最后一个服务经历的一系列的服务的调用链。   Span:在服务中埋点时,最需要关注的内容。一个 Span 代表系统中具有开始时间和执行时长的逻辑运行单元。举例来说,在一个服务发出请求时,可以认为是一个 Span 的开始;在这个服务接收到上游服务的返回值时,可以认为是这个 Span 的结束。Span 之间通过嵌套或者顺序排列建立逻辑因果关系。在 SkyWalking 中,Span 被区分为:  LocalSpan:服务内部调用方法时创建的 Span 类型 EntrySpan:请求进入服务时会创建的 Span 类型(例如处理其他服务对于本服务接口的调用) ExitSpan:请求离开服务时会创建的 Span 类型(例如调用其他服务的接口)   TraceSegment:SkyWalking 中的概念,介于 Trace 和 Span 之间,是一条 Trace 的一段,可以包含多个 Span。一个 TraceSegment 记录了一个线程中的执行过程,一个 Trace 由一个或多个 TraceSegment 组成,一个 TraceSegment 又由一个或多个 Span 组成。 SpanContext:代表跨越进程上下文,传递到下级 Span 的状态。一般包含 Trace ID、Span ID 等信息。 Baggage:存储在 SpanContext 中的一个键值对集合。它会在一条追踪链路上的所有 Span 内全局传输,包含这些 Span 对应的 SpanContext。Baggage 会随着 Trace 一同传播。  SkyWalking 中,上下文数据通过名为 sw8 的头部项进行传递,值中包含 8 个字段,由 - 进行分割(包括 Trace ID,Parent Span ID 等等) 另外 SkyWalking 中还提供名为 sw8-correlation 的扩展头部项,可以传递一些自定义的信息    快速上手 以 Go 为例,介绍如何使用 SkyWalking 在服务中埋点。\n部署 我们选择使用 Helm Chart 在 Kubernetes 中进行部署。\nexport SKYWALKING_RELEASE_NAME=skywalking # change the release name according to your scenario export SKYWALKING_RELEASE_NAMESPACE=default # change the namespace to where you want to install SkyWalking export REPO=skywalking helm repo add ${REPO} https://apache.jfrog.io/artifactory/skywalking-helm helm install \u0026#34;${SKYWALKING_RELEASE_NAME}\u0026#34; ${REPO}/skywalking -n \u0026#34;${SKYWALKING_RELEASE_NAMESPACE}\u0026#34; \\  --set oap.image.tag=8.8.1 \\  --set oap.storageType=elasticsearch \\  --set ui.image.tag=8.8.1 \\  --set elasticsearch.imageTag=6.8.6 埋点 部署完以后,需要在服务中进行埋点,以生成 Span 数据:主要的方式即在服务的入口和出口创建 Span。在代码中,首先我们会创建一个 Reporter,用于向 SkyWalking 后端发送数据。接下来,我们需要创建一个名为 \u0026quot;example\u0026quot; 的 Tracer 实例。此时,我们就可以使用 Tracer 实例来创建 Span。 在 Go 中,主要利用 context.Context 来创建以及传递 Span。\nimport \u0026#34;github.com/SkyAPM/go2sky\u0026#34; // configure to export to OAP server r, err := reporter.NewGRPCReporter(\u0026#34;oap-skywalking:11800\u0026#34;) if err != nil { log.Fatalf(\u0026#34;new reporter error %v \\n\u0026#34;, err) } defer r.Close() tracer, err := go2sky.NewTracer(\u0026#34;example\u0026#34;, go2sky.WithReporter(r)) 服务内部 在下面的代码片段中,通过 context.background() 生成的 Context 创建了一个 Root Span,同时在创建该 Span 的时候,也会产生一个跟这 个 Span 相关联的 Context。利用这个新的 Context,就可以创建一个与 Root Span 相关联的 Child Span。\n// create root span span, ctx, err := tracer.CreateLocalSpan(context.Background()) // create sub span w/ context above subSpan, newCtx, err := tracer.CreateLocalSpan(ctx) 服务间通信 在服务内部,我们会利用 Context 传的递来进行 Span 的创建。但是如果是服务间通信的话,这也是链路追踪最为广泛的应用场景,肯定是没有办法直接传递 Context 参数的。这种情况下,应该怎么做呢?一般来说,SkyWalking 会把 Context 中与当前 Span 相关的键值对进行编码,后续在服务通信时进行传递。例如,在 HTTP 协议中,一般利用请求头进行链路传递。再例如 gRPC 协议,一般想到的就是利用 Metadata 进行传递。\n在服务间通信的时候,我们会利用 EntrySpan 和 ExitSpan 进行链路的串联。以 HTTP 请求为例,在创建 EntrySpan 时,会从请求头中获取到 Span 上下文信息。而在 ExitSpan 中,则在请求中注入了上下文。这里的上下文是经过了 SkyWalking 编码后的字符串,以便在服务间进行传递。除了传递 Span 信息,也可以给 Span 打上 Tag 进行标记。例如,记录 HTTP 请求的方法,URL 等等,以便于后续数据的可视化。\n//Extract context from HTTP request header `sw8` span, ctx, err := tracer.CreateEntrySpan(r.Context(), \u0026#34;/api/login\u0026#34;, func(key string) (string, error) { return r.Header.Get(key), nil }) // Some operation ... // Inject context into HTTP request header `sw8` span, err := tracer.CreateExitSpan(req.Context(), \u0026#34;/service/validate\u0026#34;, \u0026#34;tomcat-service:8080\u0026#34;, func(key, value string) error { req.Header.Set(key, value) return nil }) // tags span.Tag(go2sky.TagHTTPMethod, req.Method) span.Tag(go2sky.TagURL, req.URL.String()) 但是,我们可能也会用到一些不那么常用的协议,比如说 MQTT 协议。在这些情况下,应该如何传递上下文呢?关于这个问题,我们在自定义插件的部分做了实践。\nUI 经过刚才的埋点以后,就可以在 SkyWalking 的 UI 界面看到调用链。SkyWalking 官方提供了一个 Demo 页面,有兴趣可以一探究竟:\n UI http://demo.skywalking.apache.org\nUsername skywalking Password skywalking\n 插件体系 如上述埋点的方式,其实是比较麻烦的。好在 SkyWalking 官方提供了很多插件,一般情况下,直接接入插件便能达到埋点效果。SkyWalking 官方为多种语言都是提供了丰富的插件,对一些主流框架都有插件支持。由于我们部门使用的主要是 Go 和 Python 插件,下文中便主要介绍这两种语言的插件。同时,由于我们的链路复杂,用到的协议较多,不可避免的是也需要开发一些自定义插件。下图中整理了 Go 与 Python 插件的主要思想,以及我们开发的各框架协议自定义插件的研发思路。\n官方插件 Go · Gin 插件 Gin 是 Go 的 Web 框架,利用其中间件,可以进行链路追踪。由于是接收请求,所以需要在中间件中,创建一个 EntrySpan,同时从请求头中获取 Span 的上下文的信息。获取到上下文信息以后,还需要再进行一步操作:把当前请求请求的上下文 c.Request.Context(), 设置成为刚才创建完 EntrySpan 时生成的 Context。这样一来,这个请求的 Context 就会携带有 Span 上下文信息,可以用于在后续的请求处理中进行后续传递。\nfunc Middleware(engine *gin.Engine, tracer *go2sky.Tracer) gin.HandlerFunc { return func(c *gin.Context) { span, ctx, err := tracer.CreateEntrySpan(c.Request.Context(), getOperationName(c), func(key string) (string, error) { return c.Request.Header.Get(key), nil }) // some operation \tc.Request = c.Request.WithContext(ctx) c.Next() span.End() } } Python · requests Requests 插件会直接修改 Requests 库中的request函数,把它替换成 SkyWalking 自定义的_sw_request函数。在这个函数中,创建了 ExitSpan,并将 ExitSpan 上下文注入到请求头中。在服务安装该插件后,实际调用 Requests 库进行请求的时候,就会携带带有上下文的请求体进行请求。\ndef install(): from requests import Session _request = Session.request def _sw_request(this: Session, method, url, other params...): span = get_context().new_exit_span(op=url_param.path or \u0026#39;/\u0026#39;, peer=url_param.netloc, component=Component.Requests) with span: carrier = span.inject() span.layer = Layer.Http if headers is None: headers = {} for item in carrier: headers[item.key] = item.val span.tag(TagHttpMethod(method.upper())) span.tag(TagHttpURL(url_param.geturl())) res = _request(this, method, url, , other params...n) # some operation return res Session.request = _sw_request 自定义插件 Go · Gorm Gorm 框架是 Go 的 ORM 框架。我们自己在开发的时候经常用到这个框架,因此希望能对通过 Gorm 调用数据库的链路进行追踪。\nGorm 有自己的插件体系,会在数据库的操作前调用BeforeCallback函数,数据库的操作后调用AfterCallback函数。于是在BeforeCallback中,我们创建 ExitSpan,并在AfterCallback里结束先前在BeforeCallback中创建的 ExitSpan。\nfunc (s *SkyWalking) BeforeCallback(operation string) func(db *gorm.DB) { // some operation  return func(db *gorm.DB) { tableName := db.Statement.Table operation := fmt.Sprintf(\u0026#34;%s/%s\u0026#34;, tableName, operation) span, err := tracer.CreateExitSpan(db.Statement.Context, operation, peer, func(key, value string) error { return nil }) // set span from db instance\u0026#39;s context to pass span  db.Set(spanKey, span) } } 需要注意的是,因为 Gorm 的插件分为 Before 与 After 两个 Callback,所以需要在两个回调函数间传递 Span,这样我们才可以在AfterCallback中结束当前的 Span。\nfunc (s *SkyWalking) AfterCallback() func(db *gorm.DB) { // some operation  return func(db *gorm.DB) { // get span from db instance\u0026#39;s context  spanInterface, _ := db.Get(spanKey) span, ok := spanInterface.(go2sky.Span) if !ok { return } defer span.End() // some operation  } } Python · MQTT 在 IoT 领域,MQTT 是非常常用的协议,无人驾驶领域自然也相当依赖这个协议。\n以 Publish 为例,根据官方插件的示例,我们直接修改 paho.mqtt 库中的publish函数,改为自己定义的_sw_publish函数。在自定义函数中,创建 ExitSpan,并将上下文注入到 MQTT 的 Payload 中。\ndef install(): from paho.mqtt.client import Client _publish = Client.publish Client.publish = _sw_publish_func(_publish) def _sw_publish_func(_publish): def _sw_publish(this, topic, payload=None, qos=0, retain=False, properties=None): # some operation with get_context().new_exit_span(op=\u0026#34;EMQX/Topic/\u0026#34; + topic + \u0026#34;/Producer\u0026#34; or \u0026#34;/\u0026#34;, peer=peer) as span: carrier = span.inject() span.layer = Layer.MQ span.component = Component.RabbitmqProducer payload = {} if payload is None else json.loads(payload) payload[\u0026#39;headers\u0026#39;] = {} for item in carrier: payload[\u0026#39;headers\u0026#39;][item.key] = item.val # ... return _sw_publish 可能这个方式不是特别优雅:因为我们目前使用 MQTT 3.1 版本,此时尚未引入 Properties 属性(类似于请求头)。直到 MQTT 5.0,才对此有相关支持。我们希望在升级到 MQTT 5.0 以后,能够将上下文注入到 Properties 中进行传递。\n无人驾驶领域的实践 虽然这些插件基本上涵盖了所有的场景,但是链路追踪并不是只要接入插件就万事大吉。在一些复杂场景下,尤其无人驾驶领域的链路追踪,由于微服务架构中涉及的语言环境、中间件种类以及业务诉求通常都比较丰富,导致在接入全链路追踪的过程中,难免遇到各种主观和客观的坑。下面选取了几个典型例子和大家分享。\n【问题一】Kong 网关的插件链路接入 我们的请求在进入服务之前,都会通过 API 网关 Kong,同时我们在 Kong 中定义了一个自定义权限插件,这个插件会调用权限服务接口进行授权。如果只是单独单纯地接入 SkyWalking Kong 插件,对于权限服务的调用无法在调用链中体现。所以我们的解决思路是,直接地在权限插件里进行埋点,而不是使用官方的插件,这样就可以把对于权限服务的调用也纳入到调用链中。\n【问题二】 Context 传递 我们有这样一个场景:一个服务,使用 Gin Web 框架,同时在处理 HTTP 请求时调用上游服务的 gRPC 接口。起初以为只要接入 Gin 的插件以及 gRPC 的插件,这个场景的链路就会轻松地接上。但是结果并不如预期。\n最后发现,Gin 提供一个 Contextc;同时对于某一个请求,可以通过c.Request.Context()获取到请求的 ContextreqCtx,二者不一致;接入 SkyWalking 提供的 Gin 插件后,修改的是reqCtx,使其包含 Span 上下文信息;而现有服务,在 gRPC 调用时传入的 Context 是c,所以一开始 HTTP -\u0026gt; gRPC 无法连接。最后通过一个工具函数,复制了reqCtx的键值对到c后,解决了这个问题。\n【问题三】官方 Python·Redis 插件 Pub/Sub 断路 由于官方提供了 Python ·Redis 插件,所以一开始认为,安装了 Redis 插件,对于一切 Redis 操作,都能互相连接。但是实际上,对于 Pub/Sub 操作,链路会断开。\n查看代码后发现,对于所有的 Redis 操作,插件都创建一个 ExitSpan;也就是说该插件其实仅适用于 Redis 作缓存等情况;但是在我们的场景中,需要进行 Pub/Sub 操作。这导致两个操作都会创建 ExitSpan,而使链路无法相连。通过改造插件,在 Pub 时创建 ExitSpan,在 Sub 时创建 EntrySpan 后,解决该问题。\n【问题四】MQTT Broker 的多种 DataBridge 接入 一般来说,对 MQTT 的追踪链路是 Publisher -\u0026gt; Subscriber,但是在我们的使用场景中,存在 MQTT broker 接收到消息后,通过规则引擎调用其他服务接口这种特殊场景。这便不是 Publisher -\u0026gt; Subscriber,而是 Publisher -\u0026gt; HTTP。\n我们希望能够从 MQTT Payload 中取出 Span 上下文,再注入到 HTTP 的请求头中。然而规则引擎调用接口时,没有办法自定义请求头,所以我们最后的做法是,约定好参数名称,将上下文放到请求体中,在服务收到请求后,从请求体中提取 Context。\n【问题五】Tracing 与 Logging 如何结合 很多时候,只有 Tracing 信息,对于问题排查来说可能还是不充分的,我们非常的期望也能够把 Tracing 和 Logging 进行结合。\n如上图所示,我们会把所有服务的 Tracing 的信息发送到 SkyWalking,同时也会把这个服务产生的日志通过 Fluent Bit 以及 Fluentd 发送到 ElasticSearch。对于这种情况,我们只需要在日志中去记录 Span 的上下文,比如记录 Trace ID 或者 Span ID 等,就可以在 Kibana 里面去进行对于 Trace ID 的搜索,来快速的查看同一次调用链中的日志。\n当然,SkyWalking 它本身也提供了自己的日志收集和分析机制,可以利用 Fluentd 或者 Fluent Bit 等向 SkyWalking 后端发送日志(我们选用了 Fluentd)。当然,像 SkyWalking 后端发送日志的时候,也要符合其日志协议,即可在 UI 上查看相应日志。\n本文介绍了 SkyWalking 的使用方法、插件体系以及实践踩坑等,希望对大家有所帮助。总结一下,SkyWalking 的使用的确是有迹可循的,一般来说我们只要接入插件,基本上可以涵盖大部分的场景,达到链路追踪的目的。但是也要注意,很多时候需要具体问题具体分析,尤其是在链路复杂的情况下,很多地方还是需要根据不同场景来进行一些特殊处理。\n最后,我们正在使用的 FaaS 平台 OpenFunction 近期也接入了 SkyWalking 作为其 链路追踪的解决方案:\nOpenFunction 提供了插件体系,并预先定义了 SkyWalking pre/post 插件;编写函数时,用户无需手动埋点,只需在 OpenFunction 配置文件中简单配置,即可开启 SkyWalking 插件,达到链路追踪的目的。\n 在感叹 OpenFunction 动作迅速的同时,也能够看到 SkyWalking 已成为链路追踪领域的首要选择之一。\n参考资料  OpenTracing 文档:https://wu-sheng.gitbooks.io/opentracing-io/content/pages/spec.html SkyWalking 文档:https://skywalking.apache.org/docs/main/latest/readme/ SkyWalking GitHub:https://github.com/apache/skywalking SkyWalking go2sky GitHub:https://github.com/SkyAPM/go2sky SkyWalking Python GitHub:https://github.com/apache/skywalking-python SkyWalking Helm Chart:https://github.com/apache/skywalking-kubernetes SkyWalking Solution for OpenFunction https://openfunction.dev/docs/best-practices/skywalking-solution-for-openfunction/  ","title":"SkyWalking 在无人驾驶领域的实践","url":"/zh/2022-04-13-skywalking-in-autonomous-driving/"},{"content":"SkyWalking Client JS 0.8.0 is released. Go to downloads page to find release tars.\n Fix fmp metric. Add e2e tese based on skywaling-infra-e2e. Update metric and events. Remove ServiceTag by following SkyWalking v9 new layer model.  ","title":"Release Apache SkyWalking Client JS 0.8.0","url":"/events/release-apache-skywalking-client-js-0-8-0/"},{"content":"SkyWalking 9.0.0 is released. Go to downloads page to find release tars.\nSkyWalking v9 is the next main stream of the OAP and UI.\nStarting from v9, SkyWalking introduces the new core concept Layer. A layer represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), Kubernetes(k8s layer). All detected instances belong to a layer to represent the running environment of this instance, the service would have one or multiple layer definitions according to its instances.\nRocketBot UI has officially been replaced by the Booster UI.\nChanges by Version Project  Upgrade log4j2 to 2.17.1 for CVE-2021-44228, CVE-2021-45046, CVE-2021-45105 and CVE-2021-44832. This CVE only effects on JDK if JNDI is opened in default. Notice, using JVM option -Dlog4j2.formatMsgNoLookups=true or setting the LOG4J_FORMAT_MSG_NO_LOOKUPS=”true” environment variable also avoids CVEs. Upgrade maven-wrapper to 3.1.0, maven to 3.8.4 for performance improvements and ARM more native support. Exclude unnecessary libs when building under JDK 9+. Migrate base Docker image to eclipse-temurin as adoptopenjdk is deprecated. Add E2E test under Java 17. Upgrade protoc to 3.19.2. Add Istio 1.13.1 to E2E test matrix for verification. Upgrade Apache parent pom version to 25. Use the plugin version defined by the Apache maven parent.  Upgrade maven-dependency-plugin to 3.2.0. Upgrade maven-assembly-plugin to 3.3.0. Upgrade maven-failsafe-plugin to 2.22.2. Upgrade maven-surefire-plugin to 2.22.2. Upgrade maven-jar-plugin to 3.2.2. Upgrade maven-enforcer-plugin to 3.0.0. Upgrade maven-compiler-plugin to 3.10.0. Upgrade maven-resources-plugin to 3.2.0. Upgrade maven-source-plugin to 3.2.1.   Update codeStyle.xml to fix incompatibility on M1\u0026rsquo;s IntelliJ IDEA 2021.3.2. Update frontend-maven-plugin to 1.12 and npm to 16.14.0 for booster UI build. Improve CI with the GHA new feature \u0026ldquo;run failed jobs\u0026rdquo;. Fix ./mvnw compile not work if ./mvnw install is not executed at least once. Add JD_PRESERVE_LINE_FEEDS=true in official code style file. Upgrade OAP dependencies gson(2.9.0), guava(31.1), jackson(2.13.2), protobuf-java(3.18.4), commons-io(2.7), postgresql(42.3.3). Remove commons-pool and commons-dbcp from OAP dependencies(Not used before). Upgrade webapp dependencies gson(2.9.0), spring boot(2.6.6), jackson(2.13.2.2), spring cloud(2021.0.1), Apache httpclient(4.5.13).  OAP Server  Fix potential NPE in OAL string match and a bug when right-hand-side variable includes double quotes. Bump up Armeria version to 1.14.1 to fix CVE. Polish ETCD cluster config environment variables. Add the analysis of metrics in Satellite MetricsService. Fix Can't split endpoint id into 2 parts bug for endpoint ID. In the TCP in service mesh observability, endpoint name doesn\u0026rsquo;t exist in TCP traffic. Upgrade H2 version to 2.0.206 to fix CVE-2021-23463 and GHSA-h376-j262-vhq6. Extend column name override mechanism working for ValueColumnMetadata. Introduce new concept Layer and removed NodeType. More details refer to v9-version-upgrade. Fix query sort metrics failure in H2 Storage. Bump up grpc to 1.43.2 and protobuf to 3.19.2 to fix CVE-2021-22569. Add source layer and dest layer to relation. Follow protocol grammar fix GCPhrase -\u0026gt; GCPhase. Set layer to mesh relation. Add FAAS to SpanLayer. Adjust e2e case for V9 core. Support ZGC GC time and count metric collecting. Sync proto buffers files from upstream Envoy (Related to https://github.com/envoyproxy/envoy/pull/18955). Bump up GraphQL related dependencies to latest versions. Add normal to V9 service meta query. Support scope=ALL catalog for metrics. Bump up H2 to 2.1.210 to fix CVE-2022-23221. E2E: Add normal field to Service. Add FreeSql component ID(3017) of dotnet agent. E2E: verify OAP cluster model data aggregation. Fix SelfRemoteClient self observing metrics. Add env variables SW_CLUSTER_INTERNAL_COM_HOST and SW_CLUSTER_INTERNAL_COM_PORT for cluster selectors zookeeper ,consul,etcd and nacos. Doc update: configuration-vocabulary,backend-cluster about env variables SW_CLUSTER_INTERNAL_COM_HOST and SW_CLUSTER_INTERNAL_COM_PORT. Add Python MysqlClient component ID(7013) with mapping information. Support Java thread pool metrics analysis. Fix IoTDB Storage Option insert null index value. Set the default value of SW_STORAGE_IOTDB_SESSIONPOOL_SIZE to 8. Bump up iotdb-session to 0.12.4. Bump up PostgreSQL driver to fix CVE. Add Guava EventBus component ID(123) of Java agent. Add OpenFunction component ID(5013). Expose configuration responseTimeout of ES client. Support datasource metric analysis. [Breaking Change] Keep the endpoint avg resp time meter name the same with others scope. (This may break 3rd party integration and existing alarm rule settings) Add Python FastAPI component ID(7014). Support all metrics from MAL engine in alarm core, including Prometheus, OC receiver, meter receiver. Allow updating non-metrics templates when structure changed. Set default connection timeout of ElasticSearch to 3000 milliseconds. Support ElasticSearch 8 and add it into E2E tests. Disable indexing for field alarm_record.tags_raw_data of binary type in ElasticSearch storage. Fix Zipkin receiver wrong condition for decoding gzip. Add a new sampler (possibility) in LAL. Unify module name receiver_zipkin to receiver-zipkin, remove receiver_jaeger from application.yaml. Introduce the entity of Process type. Set the length of event#parameters to 2000. Limit the length of Event#parameters. Support large service/instance/networkAddressAlias list query by using ElasticSearch scrolling API, add metadataQueryBatchSize to configure scrolling page size. Change default value of metadataQueryMaxSize from 5000 to 10000 Replace deprecated Armeria API BasicToken.of with AuthToken.ofBasic. Implement v9 UI template management protocol. Implement process metadata query protocol. Expose more ElasticSearch health check related logs to help to diagnose Health check fails. reason: No healthy endpoint. Add source event generated metrics to SERVICE_CATALOG_NAME catalog. [Breaking Change] Deprecate All from OAL source. [Breaking Change] Remove SRC_ALL: 'All' from OAL grammar tree. Remove all_heatmap and all_percentile metrics. Fix ElasticSearch normal index couldn\u0026rsquo;t apply mapping and update. Enhance DataCarrier#MultipleChannelsConsumer to add priority for the channels, which makes OAP server has a better performance to activate all analyzers on default. Activate receiver-otel#enabledOcRules receiver with k8s-node,oap,vm rules on default. Activate satellite,spring-sleuth for agent-analyzer#meterAnalyzerActiveFiles on default. Activate receiver-zabbix receiver with agent rule on default. Replace HTTP server (GraphQL, agent HTTP protocol) from Jetty with Armeria. [Breaking Change] Remove configuration restAcceptorPriorityDelta (env var: SW_RECEIVER_SHARING_JETTY_DELTA , SW_CORE_REST_JETTY_DELTA). [Breaking Change] Remove configuration graphql/path (env var: SW_QUERY_GRAPHQL_PATH). Add storage column attribute indexOnly, support ElasticSearch only index and not store some fields. Add indexOnly=true to SegmentRecord.tags, AlarmRecord.tags, AbstractLogRecord.tags, to reduce unnecessary storage. [Breaking Change] Remove configuration restMinThreads (env var: SW_CORE_REST_JETTY_MIN_THREADS , SW_RECEIVER_SHARING_JETTY_MIN_THREADS). Refactor the core Builder mechanism, new storage plugin could implement their own converter and get rid of hard requirement of using HashMap to communicate between data object and database native structure. [Breaking Change] Break all existing 3rd-party storage extensions. Remove hard requirement of BASE64 encoding for binary field. Add complexity limitation for GraphQL query to avoid malicious query. Add Column.shardingKeyIdx for column definition for BanyanDB.  Sharding key is used to group time series data per metric of one entity in one place (same sharding and/or same row for column-oriented database). For example, ServiceA's traffic gauge, service call per minute, includes following timestamp values, then it should be sharded by service ID [ServiceA(encoded ID): 01-28 18:30 values-1, 01-28 18:31 values-2, 01-28 18:32 values-3, 01-28 18:32 values-4] BanyanDB is the 1st storage implementation supporting this. It would make continuous time series metrics stored closely and compressed better. NOTICE, this sharding concept is NOT just for splitting data into different database instances or physical files.  Support ElasticSearch template mappings properties parameters and _source update. Implement the eBPF profiling query and data collect protocol. [Breaking Change] Remove Deprecated responseCode from sources, including Service, ServiceInstance, Endpoint Enhance endpoint dependency analysis to support cross threads cases. Refactor span analysis code structures. Remove isNotNormal service requirement when use alias to merge service topology from client side. All RPCs' peer services from client side are always normal services. This cause the topology is not merged correctly. Fix event type of export data is incorrect, it was EventType.TOTAL always. Reduce redundancy ThreadLocal in MAL core. Improve MAL performance. Trim tag\u0026rsquo;s key and value in log query. Refactor IoTDB storage plugin, add IoTDBDataConverter and fix ModifyCollectionInEnhancedForLoop bug. Bump up iotdb-session to 0.12.5. Fix the configuration of Aggregation and GC Count metrics for oap self observability E2E: Add verify OAP eBPF Profiling. Let multiGet could query without tag value in the InfluxDB storage plugin. Adjust MAL for V9, remove some groups, add a new Service function for the custom delimiter. Add service catalog DatabaseSlowStatement. Add Error Prone Annotations dependency to suppress warnings, which are not errors.  UI  [Breaking Change] Introduce Booster UI, remove RocketBot UI. [Breaking Change] UI Templates have been redesigned totally. GraphQL query is minimal compatible for metadata and metrics query. Remove unused jars (log4j-api.jar) in classpath. Bump up netty version to fix CVE. Add Database Connection pool metric. Re-implement UI template initialization for Booster UI. Add environment variable SW_ENABLE_UPDATE_UI_TEMPLATE to control user edit UI template. Add the Self Observability template of the SkyWalking Satellite. Add the template of OpenFunction observability.  Documentation  Reconstruction doc menu for v9. Update backend-alarm.md doc, support op \u0026ldquo;=\u0026rdquo; to \u0026ldquo;==\u0026rdquo;. Update backend-meter.md doc . Add \u0026lt;STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System\u0026gt; paper. Add Academy menu for recommending articles. Remove All source relative document and examples. Update Booster UI\u0026rsquo;s dependency licenses. Add profiling doc, and remove service mesh intro doc(not necessary). Add a doc for virtual database. Rewrite UI introduction. Update k8s-monitoring, backend-telemetry and v9-version-upgrade doc for v9.  All issues and pull requests are here\n","title":"Release Apache SkyWalking APM 9.0.0","url":"/events/release-apache-skywalking-apm-9.0.0/"},{"content":"SkyWalking CLI 0.10.0 is released. Go to downloads page to find release tars.\nFeatures  Allow setting start and end with relative time (#128) Add some commands for the browser (#126) Add the sub-command service layer to query services according to layer (#133) Add the sub-command layer list to query layer list (#133) Add the sub-command instance get to query single instance (#134) Add the sub-command endpoint get to query single endpoint info (#134) Change the GraphQL method to the v9 version according to the server version (#134) Add normal field to Service entity (#136) Add the command process for query Process metadata (#137) Add the command profiling ebpf for process ebpf profiling (#138) Support getprofiletasklogs query (#125) Support query list alarms (#127) [Breaking Change] Update the command profile as a sub-command profiling trace, and update profiled-analyze command to analysis (#138) profiling ebpf/trace analysis generates the profiling graph HTML on default and saves it to the current work directory (#138)  Bug Fixes  Fix quick install (#131) Set correct go version in publishing snapshot docker image (#124) Stop build kit container after finishing (#130)  Chores  Add cross platform build targets (#129) Update download host (#132)  ","title":"Release Apache SkyWalking CLI 0.10.0","url":"/events/release-apache-skywalking-cli-0-10-0/"},{"content":"SkyWalking is an open-source APM system, including monitoring, tracing, and diagnosing capabilities for distributed systems in Cloud Native architecture. It covers monitoring for Linux, Kubernetes, Service Mesh, Serverless/Function-as-a-Service, agent-attached services, and browsers. With data covering traces, metrics, logs, and events, SkyWalking is a full-stack observability APM system.\nOpen Source Promotion Plan is a summer program organized and long-term supported by Open Source Software Supply Chain Promotion Plan. It aims to encourage college students to actively participate in developing and maintaining open-source software and promote the vigorous development of an excellent open-source software community.\nApache SkyWalking has been accepted in OSPP 2022\n   Project Description Difficulty Mentor / E-mail Expectation Tech. Requirements Repository     SkyAPM-PHP Add switches for monitoring items Advanced Level Yanlong He / heyanlong@apache.org Complete project development work C++, GO, PHP https://github.com/SkyAPM/SkyAPM-php-sdk   SkyWalking-Infra-E2E Optimize verifier Normal Level Huaxi Jiang / hoshea@apache.org 1. Continue to verify cases when other cases fail  2. Merge retry outputs  3. Prettify verify results' output Go https://github.com/apache/skywalking-infra-e2e   SkyWalking Metrics anomaly detection with machine learning Advanced Level Yihao Chen / yihaochen@apache.org An MVP version of ML-powered metrics anomaly detection using dynamic baselines and thresholds Python, Java https://github.com/apache/skywalking   SkyWalking Python Collect PVM metrics and send the metrics to OAP backend, configure dashboard in UI Normal Level Zhenxu Ke / kezhenxu94@apache.org Core Python VM metrics should be collected and displayed in SkyWalking. Python https://github.com/apache/skywalking-python issue   SkyWalking BanyanDB Command line tools for BanyanDB Normal Level Hongtao Gao / hanahmily@apache.org Command line tools should access relevant APIs to manage resources and online data. Go https://github.com/apache/skywalking-banyandb   SkyWalking SWCK CRD and controller for BanyanDB Advance Level Ye Cao / dashanji@apache.org CRD and controller provision BanyanDB as the native Storage resource. Go https://github.com/apache/skywalking-swck   SkyAPM-Go2sky Collect golang metrics such as gc, goroutines and threads, and send the the metrics to OAP backend, configure dashboard in UI Normal Level Wei Zhang / zhangwei24@apache.org Core golang metrics should be collected and displayed in SkyWalking. Go https://github.com/SkyAPM/go2sky   SkyWalking Collect system metrics such as system_load, cpu_usage, mem_usage from telegraf and send the metrics to OAP backend, configure dashboard in UI Normal Level Haoyang Liu / liuhaoyangzz@apache.org System metrics should be collected and displayed in SkyWalking. Java https://github.com/apache/skywalking    Mentors could submit pull requests to update the above list.\nContact the community You could send emails to mentor\u0026rsquo;s personal email to talk about the project and details. The official mail list of the community is dev@skywalking.apache.org. You need to subscribe to the mail list to get all replies. Send mail to dev-suscribe@skywalking.apache.org and follow the replies.\n","title":"Open Source Promotion Plan 2022 -- Project List","url":"/events/summer-ospp-2022/readme/"},{"content":"如果要讨论提高自己系统设计能力的方式,我想大多数人都会选择去阅读优秀开源项目的源代码。近年来我参与了多个监控服务的开发工作,并在工作中大量地使用了 SkyWalking 并对其进行二次开发。在这个过程中,我发现 SkyWalking 天然的因其国产的身份,整套源代码地组织和设计非常符合国人的编程思维。由此我录制了本套课程,旨在和大家分享我的一些浅薄的心得和体会。\n本套课程分为两个阶段,分别讲解 Agent 端和 OAP 端地设计和实现。每个阶段的内容都是以启动流程作为讲解主线,逐步展开相关的功能模块。除了对 SKyWalking 本身内容进行讲解,课程还针对 SKyWalking 使用到的一些较为生僻的知识点进行了补充讲解(如 synthetic、NBAC 机制、自定义类加载器等),以便于大家更清晰地掌握课程内容。\nSkyWalking8.7.0 源码分析 - 视频课程直达链接\n目前课程已更新完 Agent 端的讲解,目录如下:\n 01-开篇和源码环境准备 02-Agent 启动流程 03-Agent 配置加载流程 04-自定义类加载器 AgentClassLoader 05-插件定义体系 07-插件加载 06-定制 Agent 08-什么是 synthetic 09-NBAC 机制 10-服务加载 11-witness 组件版本识别 12-Transform 工作流程 13-静态方法插桩 14-构造器和实例方法插桩 15-插件拦截器加载流程(非常重要) 16-运行时插件效果的字节码讲解 17-JDK 类库插件工作原理 18-服务-GRPCChanelService 19-服务-ServiceManagementClient 20-服务-CommandService 21-服务-SamplingService 22-服务-JVMService 23-服务-KafkaXxxService 24-服务-StatusCheckService 25-链路基础知识 26-链路 ID 生成 27-TraceSegment 28-Span 基本概念 29-Span 完整模型 30-StackBasedTracingSpan 31-ExitSpan 和 LocalSpan 32-链路追踪上下文 TracerContext 33-上下文适配器 ContextManager 34-DataCarrier-Buffer 35-DataCarrier-全解 36-链路数据发送到 OAP  B站视频地址\n","title":"[视频] SkyWalking 8.7.0 源码分析","url":"/zh/2022-03-25-skywalking-source-code-analyzation/"},{"content":"SkyWalking NodeJS 0.4.0 is released. Go to downloads page to find release tars.\n Fix mysql2 plugin install error. (#74) Update IORedis Plugin, fill dbinstance tag as host if condition.select doesn\u0026rsquo;t exist. (#73) Experimental AWS Lambda Function support. (#70) Upgrade dependencies to fix vulnerabilities. (#68) Add lint pre-commit hook and migrate to eslint. (#66, #67) Bump up gRPC version, and use its new release repository. (#65) Regard baseURL when in Axios Plugin. (#63) Add an API to access the trace id. (#60) Use agent test tool snapshot Docker image instead of building in CI. (#59) Wrapped IORedisPlugin call in try/catch. (#58)  ","title":"Release Apache SkyWalking for NodeJS 0.4.0","url":"/events/release-apache-skywalking-nodejs-0-4-0/"},{"content":"大约二十年前我刚开始进入互联网的世界的时候,支撑起整个网络的基础设施,就包括了 Apache 软件基金会(ASF)治下的软件。\nApache Httpd 是开启这个故事的软件,巅峰时期有超过七成的市场占有率,即使是在今天 NGINX 等新技术蓬勃发展的时代,也有三成左右的市场占有率。由 Linux、Apache Httpd、MySQL 和 PHP 组成的 LAMP 技术栈,是开源吞噬软件应用的第一场大型胜利。\n我从 2018 年参与 Apache Flink 开始正式直接接触到成立于 1999 年,如今已经有二十年以上历史的 Apache 软件基金会,并在一年后的 2019 年成为 Apache Flink 项目 Committer 队伍的一员,2020 年成为 Apache Curator 项目 PMC(项目管理委员会)的一员。今年,经由姜宁老师推荐,成为了 Apache Members 之一,也就是 Apache 软件基金会层面的正式成员。\n我想系统性地做一个开源案例库已经很久了。无论怎么分类筛选优秀的开源共同体,The Apache Community 都是无法绕开的。然而,拥有三百余个开源软件项目的 Apache 软件基金会,并不是一篇文章就能讲清楚的案例。本文也没有打算写成一篇长文顾及方方面面,而是启发于自己的新角色,回顾过去近五年在 Apache Community 当中的经历和体验,简单讨论 Apache 的理念,以及这些理念是如何落实到基金会组织、项目组织以及每一个参与者的日常生活事务当中的。\n不过,尽管对讨论的对象做了如此大幅度的缩减,由我自己来定义什么是 Apache 的理念未免也太容易有失偏颇。幸运的是,Apache Community 作为优秀的开源共同体,当然做到了我在《共同创造价值》一文中提到的回答好“我能为你做什么”以及“我应该怎么做到”的问题。Apache Community 的理念之一就是 Open Communications 即开放式讨论,由此产生的公开材料以及基于公开材料整理的文档汗牛充栋。这既是研究 Apache Community 的珍贵材料,也为还原和讨论一个真实的 Apache Community 提出了不小的挑战。\n无论如何,本文将以 Apache 软件基金会在 2020 年发布的纪录片 Trillions and Trillions Served 为主线,结合其他文档和文字材料来介绍 Apache 的理念。\n以人为本 纪录片一开始就讲起了 Apache Httpd 项目的历史,当初的 Apache Group 是基于一个源代码共享的 Web Server 建立起来的邮件列表上的一群人。软件开发当初的印象如同科学研究,因此交流源码在近似科学共同体的开源共同体当中是非常自然的。\n如同 ASF 的联合创始人 Brian Behlendorf 所说,每当有人解决了一个问题或者实现了一个新功能,他出于一种朴素的分享精神,也就是“为什么不把补丁提交回共享的源代码当中呢”的念头,基于开源软件的协作就这样自然发生了。纪录片中有一位提到,她很喜欢 Apache 这个词和 a patchy software 的谐音,共享同一个软件的补丁(patches)就是开源精神最早诞生的形式。\n这是 Apache Community 的根基,我们将会看到这种朴素精神经过发展形成了一个怎样的共同体,在共同体的发展过程当中,这样的根基又是如何深刻地影响了 Apache 理念的方方面面。\nApache Group 的工作模式还有一个重要的特征,那就是每个人都是基于自己的需求修复缺陷或是新增功能,在邮件列表上交流和提交补丁的个人,仅仅只是代表他个人,而没有一个“背后的组织”或者“背后的公司”。因此,ASF 的 How it Works 文档中一直强调,在基金会当中的个体,都只是个体(individuals),或者称之为志愿者(volunteers)。\n我在某公司的分享当中提到过,商业产品可以基于开源软件打造,但是当公司的雇员出现在社群当中的时候,他应该保持自己志愿者的身份。这就像是开源软件可以被用于生产环境或者严肃场景,例如航空器的发射和运行离不开 Linux 操作系统,但是开源软件本身是具有免责条款的。商业公司或专业团队提供服务保障,而开源软件本身是 AS IS 的。同样,社群成员本人可以有商业公司雇员的身份,但是他在社群当中,就是一个志愿者。\n毫无疑问,这种论调当即受到了质疑,因为通常的认知里,我就是拿了公司的钱,就是因为在给这家公司打工,才会去关注这个项目,你非要说我是一个志愿者,我还就真不是一个志愿者,你怎么说?\n其实这个问题,同样在 How it Works 文档中已经有了解答。\n All participants in ASF projects are volunteers and nobody (not even members or officers) is paid directly by the foundation to do their job. There are many examples of committers who are paid to work on projects, but never by the foundation itself. Rather, companies or institutions that use the software and want to enhance it or maintain it provide the salary.\n 我当时基于这样的认识,给到质疑的回答是,如果你不想背负起因为你是员工,因此必须响应社群成员的 issue 或 PR 等信息,那么你可以试着把自己摆在一个 volunteer 的角度来观察和参与社群。实际上,你并没有这样的义务,即使公司要求你必须回答,那也是公司的规定,而不是社群的要求。如果你保持着这样的认识和心态,那么社群于你而言,才有可能是一个跨越职业生涯不同阶段的归属地,而不是工作的附庸。\n社群从来不会从你这里索取什么,因为你的参与本身也是自愿的。其他社群成员会感谢你的参与,并且如果相处得好,这会是一个可爱的去处。社群不是你的敌人,不要因为公司下达了离谱的社群指标而把怒火发泄在社群和社群成员身上。压力来源于公司,作为社群成员的你本来可以不用承受这些。\nApache Community 对个体贡献者组成社群这点有多么重视呢?只看打印出来不过 10 页 A4 纸的 How it Works 文档,volunteer 和 individuals 两个词加起来出现了 19 次。The Apache Way 文档中强调的社群特征就包括了 Independence 一条,唯一并列的另一个是经常被引用的 Community over code 原则。甚至,有一个专门的 Project independence 文档讨论了 ASF 治下的项目如何由个体志愿者开发和维护,又为何因此是中立和非商业性的。\nINDIVIDUALS COMPOSE THE ASF 集中体现了 ASF 以人为本的理念。实际上,不止上面提到的 Independence 强调了社群成员个体志愿者的属性,Community over code 这一原则也在强调 ASF 关注围绕开源软件聚集起来的人,包括开发者、用户和其他各种形式的参与者。人是维持社群常青的根本,在后面具体讨论 The Apache Way 的内容的时候还会展开。\n上善若水 众所周知,Apache License 2.0 (APL-2.0) 是所谓的宽容式软件协议。也就是说,不同于 GPL 3.0 这样的 Copyleft 软件协议要求衍生作品需要以相同的条款发布,其中包括开放源代码和自由修改从而使得软件源代码总是可以获取和修改的,Apache License 在协议内容当中仅保留了著作权和商标,并要求保留软件作者的任何声明(NOTICE)。\nASF 在软件协议上的理念是赋予最大程度的使用自由,鼓励用户和开发者参与到共同体当中来,鼓励与上游共同创造价值,共享补丁。“鼓励”而不是“要求”,是 ASF 和自由软件基金会(Free Software Foundation, FSF)最主要的区别。\n这一倾向可以追溯到 Apache Group 建立的基础。Apache Httpd 派生自伊利诺伊大学的 NCSA Httpd 项目,由于使用并开发这个 web server 的人以邮件列表为纽带聚集在一起,通过交换补丁来开发同一个项目。在项目的发起人 Robert McCool 等大学生毕业以后,Apache Group 的发起人们接过这个软件的维护和开发工作。当时他们看到的软件协议,就是一个 MIT License 精神下的宽容式软件协议。自然而然地,Apache Group 维护 Apache Httpd 的时候,也就继承了这个协议。\n后来,Apache Httpd 打下了 web server 的半壁江山,也验证了这一模式的可靠性。虽然有些路径依赖的嫌疑,但是 ASF 凭借近似“上善若水”的宽容理念,在二十年间成功创造了数以百亿计美元价值的三百多个软件项目。\n纪录片中 ASF 的元老 Ted Dunning 提到,在他早期创造的软件当中,他会在宽容式软件协议之上,添加一个商用的例外条款。这就像是著名开源领域律师 Heather Meeker 起草的 The Commons Clause 附加条款。\n Without limiting other conditions in the License, the grant of rights under the License will not include, and the License does not grant to you, the right to Sell the Software.\n 附加 The Commons Clause 条款的软件都不是符合 OSD 定义的开源软件,也不再是原来的协议了。NebulaGraph 曾经在附加 The Commons Clause 条款的情况下声称自己是 APL-2.0 协议许可的软件,当时的 ASF 董事吴晟就提 issue (vesoft-inc/nebula#3247) 指出这一问题。NebulaGraph 于是删除了所有 The Commons Clause 的字样,保证无误地以 APL-2.0 协议许可该软件。\nTed Dunning 随后提到,这样的附加条款实际上严重影响了软件的采用。他意识到自己实际上并不想为此打官司,因此加上这样的条款对他而言是毫无意义的。Ted Dunning 于是去掉了附加条款,而这使得使用他的软件的条件能够简单的被理解,从而需要这些软件的用户能够大规模的采用。“水利万物而不争”,反而是不去强迫和约束用户行为的做法,为软件赢得了更多贡献。\n我仍然很敬佩采用 GPL 系列协议发布高质量软件的开发者,Linux 和 GCC 这样的软件的成功改变了世人对软件领域的自由的认识。然而,FSF 自己也认识到需要提出修正的 LGPL 来改进应用程序以外的软件的发布和采用,例如基础库。\nAPL-2.0 的思路与之不同,它允许任何人以任何形式使用、修改和分发软件,因此 ASF 治下的项目,以及 Linux Foundation 治下采用 APL-2.0 的项目,以及更多个人或组织采用 APL-2.0 的项目,共同构成了强大的开源软件生态,涵盖了应用软件,基础库,开发工具和框架等等各个方面。事实证明,“鼓励”而不是“要求”用户秉持 upstream first 的理念,尽可能参与到开源共同体并交换知识和补丁,共同创造价值,是能够制造出高质量的软件,构建出繁荣的社群和生态的。\n匠人精神 Apache Community 关注开发者的需要。\nApache Group 成立 ASF 的原因,是在 Apache Httpd 流行起来以后,商业公司和社会团体开始寻求和这个围绕项目形成的群体交流。然而,缺少一个正式的法律实体让组织之间的往来缺乏保障和流程。因此,如同纪录片当中提到的,ASF 成立的主要原因,是为了支撑 Apache Httpd 项目。只不过当初的创始成员们很难想到的是,ASF 最终支撑了数百个开源项目。\n不同于 Linux Foundation 是行业联盟,主要目的是为了促进其成员的共同商业利益,ASF 主要服务于开发者,由此支撑开源项目的开发以及开源共同体的发展。\n举例来说,进入 ASF 孵化器的项目都能够在 ASF Infra 的支持下运行自己的 apache.org 域名的网站,将代码托管在 ASF 仓库中上,例如 Apache GitBox Repositories 和 Apache GitHub Organization 等。这些仓库上运行着自由取用的开发基础设施,例如持续集成和持续发布的工具和资源等等。ASF 还维护了自己的邮件列表和文件服务器等一系列资源,以帮助开源项目建立起自己的共同体和发布自己的构件。\n反观 Linux Foundation 的主要思路,则是关注围绕项目聚集起来的供应商,以行业联盟的形式举办联合市场活动扩大影响,协调谈判推出行业标准等等。典型地,例如 CNCF 一直致力于定义云上应用开发的标准,容器虚拟化技术的标准。上述 ASF Infra 关注的内容和资源,则大多需要项目开发者自己解决,这些开发者往往主要为一个或若干个供应商工作,他们解决的方式通常也是依赖供应商出力。\n当然,上面的对比只是为了说明区别,并无优劣之分,也不相互对立。ASF 的创始成员 Brian Behlendorf 同时是 Linux Foundation 下 Open Source Security Foundation 的经理,以及 Hyperledger 的执行董事。\nASF 关注开发者的需要,体现出 Apache Community 及其成员对开发者的人文关怀。纪录片中谈到 ASF 治下项目的开发体验时,几乎每个人的眼里都有光。他们谈论着匠人精神,称赞知识分享,与人合作,以及打磨技艺的愉快经历。实际上,要想从 Apache 孵化器中成功毕业,相当部分的 mentor 关注的是围绕开源软件形成的共同体,能否支撑开源软件长久的发展和采用,这其中就包括共同体成员是否能够沉下心来做技术,而不是追求花哨的数字指标和人头凑数。\n讲几个具体的开发者福利。\n每个拥有 @apache.org 邮箱的人,即成为 ASF 治下项目 Committer 或 ASF Member 的成员,JetBrains 会提供免费的全家桶订阅授权码。我从 2019 年成为 Apache Flink 项目的 Committer 以后,已经三年沉浸在 IDEA 和 CLion 的包容下,成为彻底使用 IDE 主力开发的程序员了。\nApache GitHub Organization 下的 GitHub Actions 资源是企业级支持,这部分开销也是由 ASF 作为非营利组织募资和运营得到的资金支付的。基本上,如果你的项目成为 Apache 孵化器项目或顶级项目,那么和 GitHub Actions 集成的 CI 体验是非常顺畅的。Apache SkyWalking 只算主仓库就基于 GitHub Actions 运行了十多个端到端测试作业,Apache Pulsar 也全面基于 GitHub Actions 集成了自己的 CI 作业。\n提到匠人精神,一个隐形的开发者福利,其实是 ASF 的成员尤其是孵化器的 mentor 大多是经验非常丰富的开发者。软件开发不只是写代码,Apache Community 成员之间相互帮助,能够帮你跟上全世界最前沿的开发实践。如何提问题,如何做项目管理,如何发布软件,这些平日里在学校在公司很难有机会接触的知识和实践机会,在 Apache Community 当中只要你积极承担责任,都是触手可得的。\n当然,如何写代码也是开发当中最常交流的话题。我深入接触 Maven 开始于跟 Flink Community 的 Chesnay Schepler 的交流。我对 Java 开发的理解,分布式系统开发的知识,很大程度上也得到了 Apache Flink 和 Apache ZooKeeper 等项目的成员的帮助,尤其是 Till Rohrmann 和 Enrico Olivelli 几位。上面提到的 Ted Dunning 开始攻读博士的时候,我还没出生。但是我在项目当中用到 ZooKeeper 的 multi 功能并提出疑问和改进想法的时候,也跟他有过一系列的讨论。\n谈到技艺就会想起人,这也是 ASF 一直坚持以人为本带来的社群风气。\n我跟姜宁老师在一年前认识,交流 The Apache Way 期间萌生出相互认同。姜宁老师在 Apache 孵化器当中帮助众多项目理解 The Apache Way 并予以实践,德高望重。在今年的 ASF Members 年会当中,姜宁老师也被推举为 ASF Board 的一员。\n我跟吴晟老师在去年认识。他经常会强调开发者尤其是没有强烈公司背景的开发者的视角,多次提到这些开发者是整个开源生态的重要组成部分。他作为 PMC Chair 的 Apache SkyWalking 项目相信“没有下一个版本的计划,只知道会有下一个版本”,这是最佳实践的传播,也是伴随技术的文化理念的传播。SkyWalking 项目出于自己需要,也出于为开源世界添砖加瓦的动机创建的 SkyWalking Eyes 项目,被广泛用在不止于 ASF 治下项目,而是整个开源世界的轻量级的软件协议审计和 License Header 检查上。\n主要贡献在 Apache APISIX 的琚致远同学今年也被推选成为 Apache Members 的一员。他最让我印象深刻的是在 APISIX 社群当中积极讨论社群建设的议题,以及作为 APISIX 发布的 GSoC 项目的 mentor 帮助在校学生接触开源,实践开源,锻炼技艺。巧合的是,他跟我年龄相同,于是我痛失 Youngest Apache Member 的噱头,哈哈。\n或许,参与 Apache Community 就是这样的一种体验。并不是什么复杂的叙事,只是找到志同道合的人做出好的软件。我希望能够为提升整个软件行业付出自己的努力,希望我(参与)制造的软件创造出更大的价值,这里的人看起来大都也有相似的想法,这很好。仅此而已。\n原本还想聊聊 The Apache Way 的具体内容,还有介绍 Apache Incubator 这个保持 Apache Community 理念常青,完成代际传承的重要机制,但是到此为止似乎也很好。Apache Community 的故事和经验很难用一篇文章讲完,这两个话题就留待以后再写吧。\n","title":"我眼中的 The Apache Way","url":"/zh/2022-03-14-the-apache-community/"},{"content":"SkyWalking Client Rust 0.1.0 is released. Go to downloads page to find release tars.\n","title":"Release Apache SkyWalking Client Rust 0.1.0","url":"/events/release-apache-skywalking-client-rust-0-1-0/"},{"content":"SkyWalking Java Agent 8.9.0 is released. Go to downloads page to find release tars. Changes by Version\n8.9.0  Support Transaction and fix duplicated methods enhancements for jedis-2.x plugin. Add ConsumerWrapper/FunctionWrapper to support CompletableFuture.x.thenAcceptAsync/thenApplyAsync. Build CLI from Docker instead of source codes, add alpine based Docker image. Support set instance properties in json format. Upgrade grpc-java to 1.42.1 and protoc to 3.17.3 to allow using native Mac osx-aarch_64 artifacts. Add doc about system environment variables to configurations.md Avoid ProfileTaskChannelService.addProfilingSnapshot throw IllegalStateException(Queue full) Increase ProfileTaskChannelService.snapshotQueue default size from 50 to 4500 Support 2.8 and 2.9 of pulsar client. Add dubbo 3.x plugin. Fix TracePathMatcher should match pattern \u0026ldquo;**\u0026rdquo; with paths end by \u0026ldquo;/\u0026rdquo; Add support returnedObj expression for apm-customize-enhance-plugin Fix the bug that httpasyncclient-4.x-plugin puts the dirty tracing context in the connection context Compatible with the versions after dubbo-2.7.14 Follow protocol grammar fix GCPhrase -\u0026gt; GCPhase. Support ZGC GC time and count metric collect. (Require 9.0.0 OAP) Support configuration for collecting redis parameters for jedis-2.x and redisson-3.x plugin. Migrate base images to Temurin and add images for ARM. (Plugin Test) Fix compiling issues in many plugin tests due to they didn\u0026rsquo;t lock the Spring version, and Spring 3 is incompatible with 2.x APIs and JDK8 compiling. Support ShardingSphere 5.0.0 Bump up gRPC to 1.44.0, fix relative CVEs.  Documentation  Add a FAQ, Why is -Djava.ext.dirs not supported?.  All issues and pull requests are here\n","title":"Release Apache SkyWalking Java Agent 8.9.0","url":"/events/release-apache-skywalking-java-agent-8-9-0/"},{"content":"Apache SkyWalking is an open-source APM for a distributed system, Apache Software Foundation top-level project.\nOn Jan. 28th, we received a License violation report from one of the committers (anonymously). They have a cloud service called Application Performance Monitoring - Distributed Tracing (应用性能监控全链路版). At the Java service monitoring section, it provides this agent download link\n wget https://datarangers.com.cn/apminsight/repo/v2/download/java-agent/apminsight-java-agent_latest.tar.gz\n We downloaded it at 23:15 Jan. 28th UTC+8(Beijing), and archived it at here\nWe have confirmed this is a distribution of SkyWalking Java agent.\nWe listed several pieces of evidence to prove this here, every reader could compare with the official SkyWalking source codes\n The first and the easiest one is agent.config file, which is using the same config keys, and the same config format.  This is the Volcengine\u0026rsquo;s version, and check SkyWalking agent.config In the apmplus-agent.jar, Volcengine\u0026rsquo;s agent core jar, you could easily find several core classes exactly as same as SkyWalking\u0026rsquo;s.  The ComponentsDefine class is unchanged, even with component ID and name. This is Volcengine\u0026rsquo;s version, and check SkyWalking\u0026rsquo;s version\nThe whole code names, package names, and hierarchy structure are all as same as SkyWalking 6.x version.  This is the Volcengine package hierarchy structure, and check the SkyWalking\u0026rsquo;s version\n Volcengine Inc.\u0026rsquo;s team changed all package names, removed the Apache Software Foundation\u0026rsquo;s header, and don\u0026rsquo;t keep Apache Software Foundation and Apache SkyWalking\u0026rsquo;s LICENSE and NOTICE file in their redistribution.\nAlso, we can\u0026rsquo;t find anything on their website to declare they are distributing SkyWalking.\nAll above have proved they are violating the Apache 2.0 License, and don\u0026rsquo;t respect Apache Software Foundation and Apache SkyWalking\u0026rsquo;s IP and Branding.\nWe have contacted their legal team, and wait for their official response.\nResolution On Jan. 30th night, UTC+8, 2022. We received a response from Volcengine\u0026rsquo;s APMPlus team. They admitted their violation behaviors, and made the following changes.\n Volcengine\u0026rsquo;s APMPlus service page was updated on January 30th and stated that the agent is a fork version(re-distribution) of Apache SkyWalking agent. Below is the screenshot of Volcengine\u0026rsquo;s APMPlus product page.  Volcengine\u0026rsquo;s APMPlus agent distributions were also updated and include SkyWalking\u0026rsquo;s License and NOTICE now. Below is the screenshot of Volcengine\u0026rsquo;s APMPlus latest agent, you could download from the product page. We keep a copy of their Jan. 30th 2022 at here.  Volcengine\u0026rsquo;s APMPlus team had restored all license headers of SkyWalking in the agent, and the modifications of the project files are also listed in \u0026ldquo;SkyWalking-NOTICE\u0026rdquo;, which you could download from the product page.  We have updated the status to the PMC mail list. This license violation issue has been resolved for now.\n Appendix Inquiries of committers Q: I hope Volcengine Inc. can give a reason for this license issue, not just an afterthought PR. This will not only let us know where the issue is but also avoid similar problems in the future.\nA(apmplus apmplus@volcengine.com):\nThe developers neglected this repository during submitting compliance assessment. Currently, APMPlus team had introduced advanced tools provided by the company for compliance assessment, and we also strengthened training for our developers. In the future, the compliance assessment process will be further improved from tool assessment and manual assessment. ","title":"[Resolved][License Issue] Volcengine Inc.(火山引擎) violates the Apache 2.0 License when using SkyWalking.","url":"/blog/2022-01-28-volcengine-violates-aplv2/"},{"content":"Background In the Apache SkyWalking ecosystem, the OAP obtains metrics, traces, logs, and event data through SkyWalking Agent, Envoy, or other data sources. Under the gRPC protocol, it transmits data by communicating with a single server node. Only when the connection is broken, the reconnecting policy would be used based on DNS round-robin mode. When new services are added at runtime or the OAP load is kept high due to increased traffic of observed services, the OAP cluster needs to scale out for increased traffic. The load of the new OAP node would be less due to all existing agents having connected to previous nodes. Even without scaling, the load of OAP nodes would be unbalanced, because the agent would keep the connection due to random policy at the booting stage. In these cases, it would become a challenge to keep up the health status of all nodes, and be able to scale out when needed.\nIn this article, we mainly discuss how to solve this challenge in SkyWalking.\nHow to Load Balance SkyWalking mainly uses the gRPC protocol for data transmission, so this article mainly introduces load balancing in the gRPC protocol.\nProxy Or Client-side Based on the gRPC official Load Balancing blog, there are two approaches to load balancing:\n Client-side: The client perceives multiple back-end services and uses a load-balancing algorithm to select a back-end service for each RPC. Proxy: The client sends the message to the proxy server, and the proxy server load balances the message to the back-end service.  From the perspective of observability system architecture:\n    Pros Cons     Client-side High performance because of the elimination of extra hop Complex client (cluster awareness, load balancing, health check, etc.)Ensure each data source to be connected provides complex client capabilities   Proxy Simple Client Higher latency    We choose Proxy mode for the following reasons:\n Observable data is not very time-sensitive, a little latency caused by transmission is acceptable. A little extra hop is acceptable and there is no impact on the client-side. As an observability platform, we cannot/should not ask clients to change. They make their own tech decisions and may have their own commercial considerations.  Transmission Policy In the proxy mode, we should determine the transmission path between downstream and upstream.\nDifferent data protocols require different processing policies. There are two transmission policies:\n Synchronous: Suitable for protocols that require data exchange in the client, such as SkyWalking Dynamic Configuration Service. This type of protocol provides real-time results. Asynchronous batch: Used when the client doesn’t care about the upstream processing results, but only the transmitted data (e.g., trace report, log report, etc.)  The synchronization policy requires that the proxy send the message to the upstream server when receiving the client message, and synchronously return the response data to the downstream client. Usually, only a few protocols need to use the synchronization policy.\nAs shown below, after the client sends the request to the Proxy, the proxy would send the message to the server synchronously. When the proxy receives the result, it returns to the client.\nThe asynchronous batch policy means that the data is sent to the upstream server in batches asynchronously. This policy is more common because most protocols in SkyWalking are primarily based on data reporting. We think using the queue as a buffer could have a good effect. The asynchronous batch policy is executed according to the following steps:\n The proxy receives the data and wraps it as an Event object. An event is added into the queue. When the cycle time is reached or when the queue elements reach the fixed number, the elements in the queue will parallel consume and send to the OAP.  The advantage of using queues is:\n Separate data receiving and sending to reduce the mutual influence. The interval quantization mechanism can be used to combine events, which helps to speed up sending events to the OAP. Using multi-threaded consumption queue events can make fuller use of network IO.  As shown below, after the proxy receives the message, the proxy would wrap the message as an event and push it to the queue. The message sender would take batch events from the queue and send them to the upstream OAP.\nRouting Routing algorithms are used to route messages to a single upstream server node.\nThe Round-Robin algorithm selects nodes in order from the list of upstream service nodes. The advantage of this algorithm is that the number of times each node is selected is average. When the size of the data is close to the same, each upstream node can handle the same quantity of data content.\nWith the Weight Round-Robin, each upstream server node has a corresponding routing weight ratio. The difference from Round-Robin is that each upstream node has more chances to be routed according to its weight. This algorithm is more suitable to use when the upstream server node machine configuration is not the same.\nThe Fixed algorithm is a hybrid algorithm. It can ensure that the same data is routed to the same upstream server node, and when the upstream server scales out, it still maintains routing to the same node; unless the upstream node does not exist, it will reroute. This algorithm is mainly used in the SkyWalking Meter protocol because this protocol needs to ensure that the metrics of the same service instance are sent to the same OAP node. The Routing steps are as follows:\n Generate a unique identification string based on the data content, as short as possible. The amount of data is controllable. Get the upstream node of identity from LRU Cache, and use it if it exists. According to the identification, generate the corresponding hash value, and find the upstream server node from the upstream list. Save the mapping relationship between the upstream server node and identification to LRU Cache.  The advantage of this algorithm is to bind the data with the upstream server node as much as possible, so the upstream server can better process continuous data. The disadvantage is that it takes up a certain amount of memory space to save the corresponding relationship.\nAs shown below, the image is divided into two parts:\n The left side represents that the same data content always is routed to the same server node. The right side represents the data routing algorithm. Get the number from the data, and use the remainder algorithm to obtain the position.  We choose to use a combination of Round-Robin and Fixed algorithm for routing:\n The Fixed routing algorithm is suitable for specific protocols, mainly used when passing metrics data to the SkyWalking Meter protocol The Round-Robin algorithm is used by default. When the SkyWalking OAP cluster is deployed, the configuration of the nodes needs to be as much the same as possible, so there would be no need to use the Weight Round-Robin algorithm.  How to balance the load balancer itself? Proxy still needs to deal with the load balancing problem from client to itself, especially when deploying a Proxy cluster in a production environment.\nThere are three ways to solve this problem:\n Connection management: Use the max_connection config on the client-side to specify the maximum connection duration of each connection. For more information, please read the proposal. Cluster awareness: The proxy has cluster awareness, and actively disconnects the connection when the load is unbalanced to allow the client to re-pick up the proxy. Resource limit+HPA: Restrict the connection resource situation of each proxy, and no longer accept new connections when the resource limit is reached. And use the HPA mechanism of Kubernetes to dynamically scale out the number of the proxy.      Connection management Cluster awareness Resource Limit+HPA     Pros Simple to use Ensure that the number of connections in each proxy is relatively  Simple to use   Cons Each client needs to ensure that data is not lostThe client is required to accept GOWAY responses May cause a sudden increase in traffic on some nodesEach client needs to ensure that data is not lost  Traffic will not be particularly balanced in each instance    We choose Limit+HPA for these reasons:\n Easy to config and use the proxy and easy to understand based on basic data metrics. No data loss due to broken connection. There is no need for the client to implement any other protocols to prevent data loss, especially when the client is a commercial product. The connection of each node in the proxy cluster does not need to be particularly balanced, as long as the proxy node itself is high-performance.  SkyWalking-Satellite We have implemented this Proxy in the SkyWalking-Satellite project. It’s used between Client and SkyWalking OAP, effectively solving the load balancing problem.\nAfter the system is deployed, the Satellite would accept the traffic from the Client, and the Satellite will perceive all the nodes of the OAP through Kubernetes Label Selector or manual configuration, and load balance the traffic to the upstream OAP node.\nAs shown below, a single client still maintains a connection with a single Satellite, Satellite would establish the connection with each OAP, and load balance message to the OAP node.\nWhen scaling Satellite, we need to deploy the SWCK adapter and configure the HPA in Kubernetes. SWCK is a platform for the SkyWalking users, provisions, upgrades, maintains SkyWalking relevant components, and makes them work natively on Kubernetes.\nAfter deployment is finished, the following steps would be performed:\n Read metrics from OAP: HPA requests the SWCK metrics adapter to dynamically read the metrics in the OAP. Scaling the Satellite: Kubernetes HPA senses that the metrics values are in line with expectations, so the Satellite would be scaling automatically.  As shown below, use the dotted line to divide the two parts. HPA uses SWCK Adapter to read the metrics in the OAP. When the threshold is met, HPA would scale the Satellite deployment.\nExample In this section, we will demonstrate two cases:\n SkyWalking Scaling: After SkyWalking OAP scaling, the traffic would auto load balancing through Satellite. Satellite Scaling: Satellite’s own traffic load balancing.  NOTE: All commands could be accessed through GitHub.\nSkyWalking Scaling We will use the bookinfo application to demonstrate how to integrate Apache SkyWalking 8.9.1 with Apache SkyWalking-Satellite 0.5.0, and observe the service mesh through the Envoy ALS protocol.\nBefore starting, please make sure that you already have a Kubernetes environment.\nInstall Istio Istio provides a very convenient way to configure the Envoy proxy and enable the access log service. The following step:\n Install the istioctl locally to help manage the Istio mesh. Install Istio into the Kubernetes environment with a demo configuration profile, and enable the Envoy ALS. Transmit the ALS message to the satellite. The satellite we will deploy later. Add the label into the default namespace so Istio could automatically inject Envoy sidecar proxies when you deploy your application later.  # install istioctl export ISTIO_VERSION=1.12.0 curl -L https://istio.io/downloadIstio | sh - sudo mv $PWD/istio-$ISTIO_VERSION/bin/istioctl /usr/local/bin/ # install istio istioctl install -y --set profile=demo \\ \t--set meshConfig.enableEnvoyAccessLogService=true \\ \t--set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-system-satellite.skywalking-system:11800 # enbale envoy proxy in default namespace kubectl label namespace default istio-injection=enabled Install SWCK SWCK provides convenience for users to deploy and upgrade SkyWalking related components based on Kubernetes. The automatic scale function of Satellite also mainly relies on SWCK. For more information, you could refer to the official documentation.\n# Install cert-manager kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.3.1/cert-manager.yaml # Deploy SWCK mkdir -p skywalking-swck \u0026amp;\u0026amp; cd skywalking-swck wget https://dlcdn.apache.org/skywalking/swck/0.6.1/skywalking-swck-0.6.1-bin.tgz tar -zxvf skywalking-swck-0.6.1-bin.tgz cd config kubectl apply -f operator-bundle.yaml Deploy Apache SkyWalking And Apache SkyWalking-Satellite We have provided a simple script to deploy the skywalking OAP, UI, and Satellite.\n# Create the skywalking components namespace kubectl create namespace skywalking-system kubectl label namespace skywalking-system swck-injection=enabled # Deploy components kubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/sw-components.yaml Deploy Bookinfo Application export ISTIO_VERSION=1.12.0 kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/platform/kube/bookinfo.yaml kubectl wait --for=condition=Ready pods --all --timeout=1200s kubectl port-forward service/productpage 9080 Next, please open your browser and visit http://localhost:9080. You should be able to see the Bookinfo application. Refresh the webpage several times to generate enough access logs.\nThen, you can see the topology and metrics of the Bookinfo application on SkyWalking WebUI. At this time, you can see that the Satellite is working!\nDeploy Monitor We need to install OpenTelemetry Collector to collect metrics in OAPs and analyze them.\n# Add OTEL collector kubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/otel-collector-oap.yaml kubectl port-forward -n skywalking-system service/skywalking-system-ui 8080:80 Next, please open your browser and visit http://localhost:8080/ and create a new item on the dashboard. The SkyWalking Web UI pictured below shows how the data content is applied.\nScaling OAP Scaling the number of OAPs by deployment.\nkubectl scale --replicas=3 -n skywalking-system deployment/skywalking-system-oap Done! After a period of time, you will see that the number of OAPs becomes 3, and the ALS traffic is balanced to each OAP.\nSatellite Scaling After we have completed the SkyWalking Scaling, we would carry out the Satellite Scaling demo.\nDeploy SWCK HPA SWCK provides an adapter to implement the Kubernetes external metrics to adapt the HPA through reading the metrics in SkyWalking OAP. We expose the metrics service in Satellite to OAP and configure HPA Resource to auto-scaling the Satellite.\nInstall the SWCK adapter into the Kubernetes environment:\nkubectl apply -f skywalking-swck/config/adapter-bundle.yaml Create the HPA resource, and limit each Satellite to handle a maximum of 10 connections:\nkubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/satellite-hpa.yaml Then, you could see we have 9 connections in one satellite. One envoy proxy may establish multiple connections to the satellite.\n$ kubectl get HorizontalPodAutoscaler -n skywalking-system NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE hpa-demo Deployment/skywalking-system-satellite 9/10 1 3 1 5m18s Scaling Application The scaling application could establish more connections to the satellite, to verify whether the HPA is in effect.\nkubectl scale --replicas=3 deployment/productpage-v1 deployment/details-v1 Done! By default, Satellite will deploy a single instance and a single instance will only accept 11 connections. HPA resources limit one Satellite to handle 10 connections and use a stabilization window to make Satellite stable scaling up. In this case, we deploy the Bookinfo application in 10+ instances after scaling, which means that 10+ connections will be established to the Satellite.\nSo after HPA resources are running, the Satellite would be automatically scaled up to 2 instances. You can learn about the calculation algorithm of replicas through the official documentation. Run the following command to view the running status:\n$ kubectl get HorizontalPodAutoscaler -n skywalking-system --watch NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 1 3m31s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 1 4m20s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 2 4m38s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 2 5m8s hpa-demo Deployment/skywalking-system-satellite 6/10 1 3 2 5m23s By observing the “number of connections” metric, we would be able to see that when the number of connections of each gRPC exceeds 10 connections, then the satellite automatically scales through the HPA rule. As a result, the connection number is down to normal status (in this example, less than 10)\nswctl metrics linear --name satellite_service_grpc_connect_count --service-name satellite::satellite-service ","title":"Scaling with Apache SkyWalking","url":"/blog/2022-01-24-scaling-with-apache-skywalking/"},{"content":"SkyWalking Cloud on Kubernetes 0.6.1 is released. Go to downloads page to find release tars.\n Bugs  Fix could not deploy metrics adapter to GKE    ","title":"Release Apache SkyWalking Cloud on Kubernetes 0.6.1","url":"/events/release-apache-skywalking-cloud-on-kubernetes-0-6-1/"},{"content":"随着业务与用户量的持续发展,系统的瓶颈也逐渐出现。尤其在一些节假日、突发的营销活动中,访问量激增可能会导致系统性能下降,甚至造成系统瘫痪。 全链路压测可以很好的帮助我们预先演练高峰流量,从而提前模拟出系统的执行情况,帮助我们预估系统容量。当流量真正来临时,也可以更从容面对。 Apache SkyWalking 联合 Apache APISIX 及 Apache ShardingSphere,三大顶级开源社区通力合作,共同打造生产级可用的全链路压测解决方案,CyborgFlow。\n介绍 CyborgFlow 是一款面向生产级可用的全链路压测解决方案。总共由三个组件组成,如下图所示。\n Flow Gateway: 压测流量网关。当流量到达该组件时,则会将请求认定为压测流量,并将压测流量标识传递至上游服务。 Database Shadow: 数据库中间件。当数据库中间件感知到当前流量为压测流量时,则会将数据库操作路由至影子表中进行操作。 Agent/Dashboard: 分布式监控系统。与业务系统紧密结合,当感知到压测请求后,自动将其标识传递至上游,无需业务代码改造。并且利用分析能力,构建Dashboard来便于查看流量情况。  以此,便覆盖了单个请求的完整生命周期,在网关层构建压测标识,到业务系统透传标识,最终将请求与影子表交互。同时整个流程拥有完整的监控分析。\n原理 依托于三大社区合作,让这一切变得简单易用。下图为全链路压测系统的运行原理,橙色和蓝色分别代表正常流量和压测流量。\nFlow Gateway Flow Gateway 作为压测流量网关,主要负责接收流量,并传递压测流量表示至上游。\n 添加 skywalking插件 构建链路入口。 依据 proxy-rewrite插件 将压测流量标识注入到上游的请求头中。  Agent/Dashboard 该组件中则分为两部分内容说明。\nAgent Agent与业务程序拥有相同生命周期,负责压测流量标识在各个业务系统之间传递,并与 Database Shadow 交互。\n SkyWalking Agent通过读取从Flow Gateway传递的压测流量标识,利用 透传协议 将该标识在应用之间传递。 当准备进行数据库调用时,则通过判断是否包含压测流量标识来决定是否SQL调用时追加压测流量标识(/* cyborg-flow: true */)。 当检测到当前请求包含压测流量标识后,将该数据与Trace绑定,用于Dashboard数据分析。  Dashboard Dashboard 用于压测过程进行中的监控数据分析,并最终以图表的方式进行展示。\n 接收来自Agent中上报的Trace数据,并依据OAL中的Tag过滤器(.filter(tags contain \u0026quot;cyborg-flow:true\u0026quot;))来生成压测与非压测的指标数据。 利用指标数据便可以在Dashboard中创建图表进行观察。  Database Shadow Database Shadow 作为 Proxy 在业务程序与数据库中间完成数据交互,当检测到压测流量时则会将SQL传递至影子表中处理。\n 检测下游传递的数据库语句中是否包含压测流量标识(/* cyborg-flow: true */),存在时则将SQL交给由用户配置的影子表中处理。  快速上手 下面将带你快速将Cyborg Flow集成至你的项目中。相关组件的下载请至 Github Release 中下载,目前已发布 0.1.0 版本。\n部署 Database Shadow  解压缩cyborg-database-shadow.tar.gz。 将 conf/config-shadow.yaml 文件中的业务数据库与影子数据库配置为自身业务中的配置。 启动 Database Shadow服务,启动脚本位于bin/start.sh中。  如需了解更详细的部署参数配置,请参考 官方文档 。\n部署 Cyborg Dashboard  解压缩cyborg-dashboard.tar.gz。 启动后端与UI界面服务,用于链路数据解析与界面展示,启动脚本位于bin/startup.sh中。 接下来就可以通过打开浏览器并访问http://localhost:8080/,此页面为Cyborg Dashboard界面,由于目前尚未部署任何业务程序,所以暂无任何数据。  如需了解更详细的部署参数配置,请参考 后端服务 与 UI界面服务 的安装文档。\n部署 Cyborg Agent 到业务程序中  解压缩cyborg-agent.tar.gz. 修改config/agent.config中的collector.backend_service为 Cyborg Dashboard 中后端地址(默认为11800端口),用于将监控数据上报至 Cyborg Dashboard 。 修改业务程序中与数据库的链接,将其更改为 Database Shadow 中的配置。默认访问端口为3307,用户名密码均为root。 当程序启动时,增加该参数到启动命令中:-jar path/to/cyborg-agent/skywalking-agent.jar。  如需了解更详细的部署参数配置,请参考 Agent安装文档 。\n部署 Flow Gateway  参考 Flow Gateway 快速开始 进行下载 Apache APISIX 并配置相关插件。 基于 APISIX 创建路由文档 进行路由创建。  完成! 最后,通过Flow Gateway访问业务系统资源,便完成了一次压测流量请求。\n 压测流量最终访问至影子表进行数据操作。 如下图所示,通过观察 Cyborg Dashboard 便可以得知压测与非压测请求的执行情况。  总结 在本文中,我们详细介绍了Cyborg Flow中的各个组件的功能、原理,最终搭配快速上手来快速将该系统与自己的业务系统结合。 如果在使用中有任何问题,欢迎来共同讨论。\n","title":"Cyborg Flow X SkyWalking: 生产环境全链路压测","url":"/zh/2022-01-18-cyborg-flow/"},{"content":"SkyWalking Cloud on Kubernetes 0.6.0 is released. Go to downloads page to find release tars.\n Features  Add the Satellite CRD, webhooks and controller   Bugs  Update release images to set numeric user id Fix the satellite config not support number error Use env JAVA_TOOL_OPTIONS to replace AGENT_OPTS   Chores  Add stabilization windows feature in satellite HPA documentation    ","title":"Release Apache SkyWalking Cloud on Kubernetes 0.6.0","url":"/events/release-apache-skywalking-cloud-on-kubernetes-0-6-0/"},{"content":"SkyWalking Kong Agent 0.2.0 is released. Go to downloads page to find release tars.\n Establish the SkyWalking Kong Agent.  ","title":"Release Apache SkyWalking Kong 0.2.0","url":"/events/release-apache-skywalking-kong-0-2-0/"},{"content":"SkyWalking Satellite 0.5.0 is released. Go to downloads page to find release tars.\nFeatures  Make the gRPC client client_pem_path and client_key_path as an optional config. Remove prometheus-server sharing server plugin. Support let the telemetry metrics export to prometheus or metricsService. Add the resource limit when gRPC server accept connection.  Bug Fixes  Fix the gRPC server enable TLS failure. Fix the native meter protocol message load balance bug.  Issues and PR  All issues are here All and pull requests are here  ","title":"Release Apache SkyWalking Satellite 0.5.0","url":"/events/release-apache-skwaylking-satellite-0-5-0/"},{"content":"SkyWalking LUA Nginx 0.6.0 is released. Go to downloads page to find release tars.\n fix: skywalking_tracer:finish() will not be called in some case such as upstream timeout.  ","title":"Release Apache SkyWalking LUA Nginx 0.6.0","url":"/events/release-apache-skywalking-lua-nginx-0.6.0/"},{"content":"Chaos Mesh is an open-source cloud-native chaos engineering platform. You can use Chaos Mesh to conveniently inject failures and simulate abnormalities that might occur in reality, so you can identify potential problems in your system. Chaos Mesh also offers a Chaos Dashboard which allows you to monitor the status of a chaos experiment. However, this dashboard cannot let you observe how the failures in the experiment impact the service performance of applications. This hinders us from further testing our systems and finding potential problems.\n Apache SkyWalking is an open-source application performance monitor (APM), specially designed to monitor, track, and diagnose cloud native, container-based distributed systems. It collects events that occur and then displays them on its dashboard, allowing you to observe directly the type and number of events that have occurred in your system and how different events impact the service performance.\nWhen you use SkyWalking and Chaos Mesh together during chaos experiments, you can observe how different failures impact the service performance.\nThis tutorial will show you how to configure SkyWalking and Chaos Mesh. You’ll also learn how to leverage the two systems to monitor events and observe in real time how chaos experiments impact applications’ service performance.\nPreparation Before you start to use SkyWalking and Chaos Mesh, you have to:\n Set up a SkyWalking cluster according to the SkyWalking configuration guide. Deploy Chao Mesh using Helm. Install JMeter or other Java testing tools (to increase service loads). Configure SkyWalking and Chaos Mesh according to this guide if you just want to run a demo.  Now, you are fully prepared, and we can cut to the chase.\nStep 1: Access the SkyWalking cluster After you install the SkyWalking cluster, you can access its user interface (UI). However, no service is running at this point, so before you start monitoring, you have to add one and set the agents.\nIn this tutorial, we take Spring Boot, a lightweight microservice framework, as an example to build a simplified demo environment.\n Create a SkyWalking demo in Spring Boot by referring to this document. Execute the command kubectl apply -f demo-deployment.yaml -n skywalking to deploy the demo.  After you finish deployment, you can observe the real-time monitoring results at the SkyWalking UI.\nNote: Spring Boot and SkyWalking have the same default port number: 8080. Be careful when you configure the port forwarding; otherise, you may have port conflicts. For example, you can set Spring Boot’s port to 8079 by using a command like kubectl port-forward svc/spring-boot-skywalking-demo 8079:8080 -n skywalking to avoid conflicts.\nStep 2: Deploy SkyWalking Kubernetes Event Exporter SkyWalking Kubernetes Event Exporter is able to watch, filter, and send Kubernetes events into the SkyWalking backend. SkyWalking then associates the events with the system metrics and displays an overview about when and how the metrics are affected by the events.\nIf you want to deploy SkyWalking Kubernetes Event Explorer with one line of commands, refer to this document to create configuration files in YAML format and then customize the parameters in the filters and exporters. Now, you can use the command kubectl apply to deploy SkyWalking Kubernetes Event Explorer.\nStep 3: Use JMeter to increase service loads To better observe the change in service performance, you need to increase the service loads on Spring Boot. In this tutorial, we use JMeter, a widely adopted Java testing tool, to increase the service loads.\nPerform a stress test on localhost:8079 using JMeter and add five threads to continuously increase the service loads.\nOpen the SkyWalking Dashboard. You can see that the access rate is 100%, and that the service loads reach about 5,300 calls per minute (CPM).\nStep 4: Inject failures via Chaos Mesh and observe results After you finish the three steps above, you can use the Chaos Dashboard to simulate stress scenarios and observe the change in service performance during chaos experiments.\nThe following sections describe how service performance varies under the stress of three chaos conditions:\n  CPU load: 10%; memory load: 128 MB\nThe first chaos experiment simulates low CPU usage. To display when a chaos experiment starts and ends, click the switching button on the right side of the dashboard. To learn whether the experiment is Applied to the system or Recovered from the system, move your cursor onto the short, green line.\nDuring the time period between the two short, green lines, the service load decreases to 4,929 CPM, but returns to normal after the chaos experiment ends.\n  CPU load: 50%; memory load: 128 MB\nWhen the application’s CPU load increases to 50%, the service load decreases to 4,307 CPM.\n  CPU load: 100%; memory load: 128 MB\nWhen the CPU usage is at 100%, the service load decreases to only 40% of what it would be if no chaos experiments were taking place.\nBecause the process scheduling under the Linux system does not allow a process to occupy the CPU all the time, the deployed Spring Boot Demo can still handle 40% of the access requests even in the extreme case of a full CPU load.\n  Summary By combining SkyWalking and Chaos Mesh, you can clearly observe when and to what extent chaos experiments affect application service performance. This combination of tools lets you observe the service performance in various extreme conditions, thus boosting your confidence in your services.\nChaos Mesh has grown a lot in 2021 thanks to the unremitting efforts of all PingCAP engineers and community contributors. In order to continue to upgrade our support for our wide variety of users and learn more about users’ experience in Chaos Engineering, we’d like to invite you to takethis survey and give us your valuable feedback.\nIf you want to know more about Chaos Mesh, you’re welcome to join the Chaos Mesh community on GitHub or our Slack discussions (#project-chaos-mesh). If you find any bugs or missing features when using Chaos Mesh, you can submit your pull requests or issues to our GitHub repository.\n","title":"Chaos Mesh + SkyWalking: Better Observability for Chaos Engineering","url":"/blog/2021-12-21-better-observability-for-chaos-engineering/"},{"content":"SkyWalking Cloud on Kubernetes 0.5.0 is released. Go to downloads page to find release tars.\n Features  Add E2E test cases to verify OAPServer, UI, Java agent and Storage components.   Bugs  Fix operator role patch issues Fix invalid CSR signername Fix bug in the configmap controller   Chores  Bump up KubeBuilder to V3 Bump up metric adapter server to v1.21.0 Split mono-project to two independent projects    ","title":"Release Apache SkyWalking Cloud on Kubernetes 0.5.0","url":"/events/release-apache-skywalking-cloud-on-kubernetes-0-5-0/"},{"content":"We Can integrate Skywalking to Java Application by Java Agent TEC., In typical application, the system runs Java Web applications at the backend of the load balancer, and the most commonly used load balancer is nginx. What should we do if we want to bring it under surveillance? Fortunately, skywalking has provided Nginx agent。 During the integration process, it is found that the examples on the official website only support openresty. For openresty, common modules such as luajit and Lua nginx module have been integrated. Adding skywalking related configurations according to the examples on the official website can take effect. However, when configured for nginx startup, many errors will be reported. We may not want to change a load balancer (nginx to openresty) in order to use skywalking. Therefore, we must solve the integration problem between skywalking and nginx.\nNote: openresty is a high-performance web development platform based on nginx + Lua, which solves the short board that is not easy to program in nginx.\nBased on Skywalking-8.7.0 and Nginx-1.20.1\nUpgrade of nginx: The agent plug-in of nginx is written based on Lua, so nginx needs to add support for Lua, Lua nginx module It just provides this function. The Lua nginx module depends on luajit Therefore, first we need to install luajit. In the environment, it is best to choose version 2.1.\nFor nginx, you need to compile the necessary modules yourself. It depends on the following two modules:\nlua-nginx-module The version is lua-nginx-module-0.10.21rc1\nngx_devel_kit The version using ngx_devel_kit-0.3.1\nCompile nginx parameters\nconfigure arguments: --add-module=/path/to/ngx_devel_kit-0.3.1 --add-module=/path/to/lua-nginx-module-0.10.21rc1 --with-ld-opt=-Wl,-rpath,/usr/local/LuaJIT/lib The following is for skywalking-nginx-lua-0.3.0 and 0.3.0+ are described separately.\nskywalking-nginx-lua-0.3.0 After testing, skywalking-nginx-lua-0.3.0 requires the following Lua related modules\nlua-resty-core https://github.com/openresty/lua-resty-core lua-resty-lrucache https://github.com/openresty/lua-resty-lrucache lua-cjson https://github.com/openresty/lua-cjson The dependent Lua modules are as follows:\nlua_package_path \u0026#34;/path/to/lua-resty-core/lua-resty-core-master/lib/?.lua;/path/to/lua-resty-lrucache-0.11/lib/?.lua;/path/to/skywalking-nginx-lua-0.3.0/lib/?.lua;;\u0026#34;; In the process of make \u0026amp; \u0026amp; make install, Lua cjson needs to pay attention to:\nModify a path in makefile\nLUA_INCLUDE_DIR ?= /usr/local/LuaJIT/include/luajit-2.0\nReference:https://blog.csdn.net/ymeputer/article/details/50146143 \nskywalking-nginx-lua-0.3.0+ For skywalking-nginx-lua-0.3.0+, tablepool support needs to be added, but it seems that cjson is not required\nlua-resty-core https://github.com/openresty/lua-resty-core lua-resty-lrucache https://github.com/openresty/lua-resty-lrucache lua-tablepool https://github.com/openresty/lua-tablepool lua_ package_ path \u0026#34;/path/to/lua-resty-core/lua-resty-core-master/lib/?.lua;/path/to/lua-resty-lrucache-0.11/lib/?.lua;/path/to/lua-tablepool-master/lib/?.lua;/path/to/skywalking-nginx-lua-master/lib/?.lua;;\u0026#34;; tablepool introduces two APIs according to its official documents table new and table. Clear requires luajit2.1, there is a paragraph in the skywalking-nginx-lua document that says you can use \u0026lsquo;require (\u0026ldquo;skywalking. Util\u0026rdquo;) disable_ Tablepool() ` disable tablepool\nWhen you start nginx, you will be prompted to install openresty\u0026rsquo;s own [luajit version]( https://github.com/openresty/luajit2 )\ndetected a LuaJIT version which is not OpenResty\u0026#39;s; many optimizations will be disabled and performance will be compromised (see https://github.com/openresty/luajit2 for OpenResty\u0026#39;s LuaJIT or, even better, consider using the OpenResty releases from https://openresty.org/en/download.html ) here is successful configuration:\nhttp { lua_package_path \u0026#34;/path/to/lua-resty-core/lua-resty-core-master/lib/?.lua;/path/to/lua-resty-lrucache-0.11/lib/?.lua;/path/to/lua-tablepool-master/lib/?.lua;/path/to/skywalking-nginx-lua-master/lib/?.lua;;\u0026#34;; # Buffer represents the register inform and the queue of the finished segment lua_shared_dict tracing_buffer 100m; # Init is the timer setter and keeper # Setup an infinite loop timer to do register and trace report. init_worker_by_lua_block { local metadata_buffer = ngx.shared.tracing_buffer -- Set service name metadata_buffer:set(\u0026#39;serviceName\u0026#39;, \u0026#39;User Service Name\u0026#39;) -- Instance means the number of Nginx deployment, does not mean the worker instances metadata_buffer:set(\u0026#39;serviceInstanceName\u0026#39;, \u0026#39;User Service Instance Name\u0026#39;) -- type \u0026#39;boolean\u0026#39;, mark the entrySpan include host/domain metadata_buffer:set(\u0026#39;includeHostInEntrySpan\u0026#39;, false) -- set random seed require(\u0026#34;skywalking.util\u0026#34;).set_randomseed() require(\u0026#34;skywalking.client\u0026#34;):startBackendTimer(\u0026#34;http://127.0.0.1:12800\u0026#34;) -- If there is a bug of this `tablepool` implementation, we can -- disable it in this way -- require(\u0026#34;skywalking.util\u0026#34;).disable_tablepool() skywalking_tracer = require(\u0026#34;skywalking.tracer\u0026#34;) } server { listen 8090; location /ingress { default_type text/html; rewrite_by_lua_block { ------------------------------------------------------ -- NOTICE, this should be changed manually -- This variable represents the upstream logic address -- Please set them as service logic name or DNS name -- -- Currently, we can not have the upstream real network address ------------------------------------------------------ skywalking_tracer:start(\u0026#34;upstream service\u0026#34;) -- If you want correlation custom data to the downstream service -- skywalking_tracer:start(\u0026#34;upstream service\u0026#34;, {custom = \u0026#34;custom_value\u0026#34;}) } -- Target upstream service proxy_pass http://127.0.0.1:8080/backend; body_filter_by_lua_block { if ngx.arg[2] then skywalking_tracer:finish() end } log_by_lua_block { skywalking_tracer:prepareForReport() } } } } Original post:https://www.cnblogs.com/kebibuluan/p/14440228.html\n","title":"How to integrate skywalking-nginx-lua to Nginx?","url":"/blog/2021-12-13-skywalking-nginx-agent-integration/"},{"content":"SkyWalking 8.9.1 is released. Go to downloads page to find release tars.\nChanges by Version\nProject  Upgrade log4j2 to 2.15.0 for CVE-2021-44228. This CVE only effects on JDK versions below 6u211, 7u201, 8u191 and 11.0.1 according to the post. Notice, using JVM option -Dlog4j2.formatMsgNoLookups=true also avoids CVE if your JRE opened JNDI in default.  ","title":"Release Apache SkyWalking APM 8.9.1","url":"/events/release-apache-skywalking-apm-8-9-1/"},{"content":"In the field of observability, the three main directions of data collection and analysis, Metrics, Logger and Tracing, are usually used to achieve insight into the operational status of applications.\nApache APISIX has integrated Apache SkyWaling Tracing capabilities as early as version 1.4, with features such as error logging and access log collection added in subsequent versions. Now with Apache SkyWalking\u0026rsquo;s support for Metrics, it enables Apache APISIX to implement a one-stop observable solution in integrated mode, covering both logging, metrics and call tracing.\nFeature Development Background Those of you who are familiar with Apache APISIX should know that Apache APISIX produces two types of logs during operation, namely the access log and the error log.\nAccess logs record detailed information about each request and are logs generated within the scope of the request, so they can be directly associated with Tracing. Error logs, on the other hand, are Apache APISIX runtime output log messages, which are application-wide logs, but cannot be 100% associated with requests.\nAt present, Apache APISIX provides very rich log processing plug-ins, including TCP/HTTP/Kafka and other collection and reporting plug-ins, but they are weakly associated with Tracing. Take Apache SkyWalking as an example. We extract the SkyWalking Tracing Conetxt Header from the log records of Apache APISIX and export it to the file system, and then use the log processing framework (fluentbit) to convert the logs into a log format acceptable to SkyWalking. The Tracing Context is then parsed and extracted to obtain the Tracing ID to establish a connection with the Trace.\nObviously, the above way of handling the process is tedious and complicated, and requires additional conversion of log formats. For this reason, in PR#5500 we have implemented the Apache SkyWalking access log into the Apache APISIX plug-in ecosystem to make it easier for users to collect and process logs using Apache SkyWalking in Apache APISIX.\nIntroduction of the New Plugins SkyWalking Logger Pulgin The SkyWalking Logger plugin parses the SkyWalking Tracing Context Header and prints the relevant Tracing Context information to the log, thus enabling the log to be associated with the call chain.\nBy using this plug-in, Apache APISIX can get the SkyWalking Tracing Context and associate it with Tracing even if the SkyWalking Tracing plug-in is not turned on, if Apache SkyWalking is already integrated downstream.\nThe above Content is the log content, where the Apache APISIX metadata configuration is used to collect request-related information. You can later modify the Log Format to customize the log content by Plugin Metadata, please refer to the official documentation.\nHow to Use When using this plugin, since the SkyWalking plugin is \u0026ldquo;not enabled\u0026rdquo; by default, you need to manually modify the plugins section in the conf/default-apisix.yaml file to enable the plugin.\nplugins:...- error-log-logger...Then you can use the SkyWalking Tracing plug-in to get the tracing data directly, so you can verify that the Logging plug-in-related features are enabled and working properly.\nStep 1: Create a route Next, create a route and bind the SkyWalking Tracing plugin and the SkyWalking Logging plugin. More details of the plugin configuration can be found in the official Apache APISIX documentation.\ncurl -X PUT \u0026#39;http://192.168.0.108:9080/apisix/admin/routes/1001\u0026#39; \\ -H \u0026#39;X-API-KEY: edd1c9f034335f136f87ad84b625c8f1\u0026#39; \\ -H \u0026#39;Content-Type: application/json\u0026#39; \\ -d \u0026#39;{ \u0026#34;uri\u0026#34;: \u0026#34;/get\u0026#34;, \u0026#34;plugins\u0026#34;: { \u0026#34;skywalking\u0026#34;: { \u0026#34;sample_ratio\u0026#34;: 1 }, \u0026#34;skywalking-logger\u0026#34;: { \u0026#34;endpoint_addr\u0026#34;: \u0026#34;http://127.0.0.1:12800\u0026#34; } }, \u0026#34;upstream\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;roundrobin\u0026#34;, \u0026#34;nodes\u0026#34;: { \u0026#34;httpbin.org:80\u0026#34;: 1 } } }\u0026#39; Step 2: Log Processing On the Apache SkyWalking side, you can use LAL (Logger Analysis Language) scripts for log processing, such as Tag extraction, SkyWalking metadata correction, and so on.\nThe main purpose of Tag extraction here is to facilitate subsequent retrieval and to add dependencies to the Metrics statistics. The following code can be used to configure the SkyWalking LAL script to complete the Tag extraction. For more information on how to use the SkyWalking LAL script, please refer to the official Apache SkyWalking documentation.\n# The default LAL script to save all logs, behaving like the versions before 8.5.0.rules:- name:defaultdsl:|filter { json { abortOnFailure false } extractor { tag routeId: parsed.route_id tag upstream: parsed.upstream tag clientIp: parsed.client_ip tag latency: parsed.latency } sink { } }After configuring the above LAL script in SkyWalking OAP Server the following log will be displayed.\nDetails of the expanded log are as follows.\nAs you can see from the above, displaying routeId, upstream and clientIp as key-value pairs is much easier than searching directly in the log body. This is because the Tag format not only supports log display format and search, but also generates information such as Metrics using MAL statistics.\nSkyWalking Error Logger Plugin The error-log-logger plug-in now supports the SkyWalking log format, and you can now use the http-error-log plug-in to quickly connect Apache APISIX error logs to Apache SkyWalking. Currently, error logs do not have access to SkyWalking Tracing Context information, and therefore cannot be directly associated with SkyWalking Tracing.\nThe main reason for the error log to be integrated into SkyWalking is to centralize the Apache APISIX log data and to make it easier to view all observable data within SkyWalking.\nHow to Use Since the error-log-logger plugin is \u0026ldquo;not enabled\u0026rdquo; by default, you still need to enable the plugin in the way mentioned above.\nplugins:...- error-log-logger...Step 1: Bind the route After enabling, you need to bind the plugin to routes or global rules. Here we take \u0026ldquo;bind routes\u0026rdquo; as an example.\ncurl -X PUT \u0026#39;http://192.168.0.108:9080/apisix/admin/plugin_metadata/error-log-logger\u0026#39; \\ -H \u0026#39;X-API-KEY: edd1c9f034335f136f87ad84b625c8f1\u0026#39; \\ -H \u0026#39;Content-Type: application/json\u0026#39; \\ -d \u0026#39;{ \u0026#34;inactive_timeout\u0026#34;: 10, \u0026#34;level\u0026#34;: \u0026#34;ERROR\u0026#34;, \u0026#34;skywalking\u0026#34;: { \u0026#34;endpoint_addr\u0026#34;: \u0026#34;http://127.0.0.1:12800/v3/logs\u0026#34; } }\u0026#39;  Note that the endpoint_addr is the SkyWalking OAP Server address and needs to have the URI (i.e. /v3/logs).\n Step 2: LAL Processing In much the same way as the Access Log processing, the logs are also processed by LAL when they reach SkyWalking OAP Server. Therefore, we can still use the SkyWalking LAL script to analyze and process the log messages.\nIt is important to note that the Error Log message body is in text format. If you are extracting tags, you will need to use regular expressions to do this. Unlike Access Log, which handles the message body in a slightly different way, Acces Log uses JSON format and can directly reference the fields of the JSON object using JSON parsing, but the rest of the process is largely the same.\nTags can also be used to optimize the display and retrieval for subsequent metrics calculations using SkyWalking MAL.\nrules: - name: apisix-errlog dsl: | filter { text { regexp \u0026#34;(?\u0026lt;datetime\u0026gt;\\\\d{4}/\\\\d{2}/\\\\d{2} \\\\d{2}:\\\\d{2}:\\\\d{2}) \\\\[(?\u0026lt;level\u0026gt;\\\\w+)\\\\] \\\\d+\\\\#\\\\d+:( \\\\*\\\\d+ \\\\[(?\u0026lt;module\u0026gt;\\\\w+)\\\\] (?\u0026lt;position\u0026gt;.*\\\\.lua:\\\\d+): (?\u0026lt;function\u0026gt;\\\\w+\\\\(\\\\)):)* (?\u0026lt;msg\u0026gt;.+)\u0026#34; } extractor { tag level: parsed.level if (parsed?.module) { tag module: parsed.module tag position: parsed.position tag function: parsed.function } } sink { } } After the LAL script used by SkyWalking OAP Server, some of the Tags will be extracted from the logs, as shown below.\nSummary This article introduces two logging plug-ins for Apache APISIX that integrate with SkyWalking to provide a more convenient operation and environment for logging in Apache APISIX afterwards.\nWe hope that through this article, you will have a fuller understanding of the new features and be able to use Apache APISIX for centralized management of observable data more conveniently in the future.\n","title":"Apache APISIX Integrates with SkyWalking to Create a Full Range of Log Processing","url":"/blog/2021-12-08-apisix-integrate-skywalking-plugin/apisix-integrate-skywalking-plugin/"},{"content":"This document is one of the outcomes of Apache IoTDB - Apache SkyWalking Adapter in Summer 2021 of Open Source Promotion Plan. The design and development work is under the guidance of @jixuan1989 from IoTDB and @wu-sheng from SkyWalking. Thanks for their guidance and the help from community.\nStart with SkyWalking Showcase Before using SkyWalking Showcase to quick start with IoTDB, please ensure your have make installed and Docker daemon running.\nPlease run the command below.\ngit clone https://github.com/LIU-WEI-git/skywalking-showcase.git cd skywalking-showcase make deploy.docker FEATURE_FLAGS=single-node.iotdb,agent The former variable single-node.iotdb will deploy only one single node of SkyWalking OAP-v8.9.0, and SkyWalking RocketBot UI-v8.9.0, IoTDB-v0.12.3 as storage. The latter variable agent will deploy micro-services with SkyWalking agent enabled, which include agents for Java, NodeJS server, browser, Python.\nThese shell command maybe take a long while. After pulling and running docker image, please visit http://localhost:9999/. Then you will see the SkyWalking UI and data from OAP backend.\nIf you want to use more functions of SkyWalking Showcase, please visit its official document and clone official repository.\nStart Manually If you want to download and run IoTDB and SkyWalking manually, here is the guidance.\nInstall and Run IoTDB Apache IoTDB (Database for Internet of Things) is an IoT native database with high performance for data management and analysis, deployable on the edge and the cloud. It is a time-series database storage option for SkyWalking now. Please ensure your IoTDB server version \u0026gt;= 0.12.3 and a single node version is sufficient. For more installation details, please see official document: IoTDB Quick Start and IoTDB Download Page. You could download it from Docker Hub as well.\nThere is some connection tools for IoTDB\n Command Line Interface(CLI)\nIf iotdb-cli connects successfully, you will see   _____ _________ ______ ______ |_ _| | _ _ ||_ _ `.|_ _ \\ | | .--.|_/ | | \\_| | | `. \\ | |_) | | | / .'`\\ \\ | | | | | | | __'. _| |_| \\__. | _| |_ _| |_.' /_| |__) | |_____|'.__.' |_____| |______.'|_______/ version x.x.x IoTDB\u0026gt; login successfully IoTDB\u0026gt;  IoTDB-Grafana\nIoTDB-Grafana is a connector which we developed to show time series data in IoTDB by reading data from IoTDB and sends to Grafana.  Zeppelin-IoTDB\nYou could enable Zeppelin to operate IoTDB via SQL.   For more ecosystem integration, please visit official documents.\nWe will use iotdb-cli in the next examples.\nRun SkyWalking OAP Server There are some SkyWalking official documents which will help you start. Please ensure your SkyWalking version \u0026gt;= 8.9.0. We recommend you download SkyWalking OAP distributions from its official download page or pull docker images.\n SkyWalking Download Page SkyWalking Backend Setup SkyWalking UI Setup  Before starting SkyWalking backend, please edit /config/application.yml, set storage.selector: ${SW_STORAGE:iotdb} or set environment variable SW_STORAGE=iotdb. All config options about IoTDB is following, please edit it or not according to your local environment:\nstorage:selector:${SW_STORAGE:iotdb}iotdb:host:${SW_STORAGE_IOTDB_HOST:127.0.0.1}rpcPort:${SW_STORAGE_IOTDB_RPC_PORT:6667}username:${SW_STORAGE_IOTDB_USERNAME:root}password:${SW_STORAGE_IOTDB_PASSWORD:root}storageGroup:${SW_STORAGE_IOTDB_STORAGE_GROUP:root.skywalking}sessionPoolSize:${SW_STORAGE_IOTDB_SESSIONPOOL_SIZE:16}fetchTaskLogMaxSize:${SW_STORAGE_IOTDB_FETCH_TASK_LOG_MAX_SIZE:1000}# the max number of fetch task log in a requestVisit IoTDB Server and Query SkyWalking Data There are some official document about data model and IoTDB-SQL language:\n Data Model and Terminology DDL (Data Definition Language) DML (Data Manipulation Language) Maintenance Command  Example Model and Insert SQL Before giving any example, we set time display type as long (CLI: set time_display_type=long).\nIn our design, we choose id, entity_id, node_type, service_id, service_group, trace_id as indexes and fix their appearance order. The value of these indexed fields store in the path with double quotation mark wrapping, just like \u0026quot;value\u0026quot;.\nThere is a model named service_traffic with fields id, time_bucket, name, node_type, service_group. In order to see its data, we could use a query SQL: select * from root.skywalking.service_traffic align by device. root.skywalking is the default storage group and align by device could return a more friendly result. The query result is following:\n   Time Device name     1637919540000 root.skywalking.service_traffic.\u0026ldquo;YXBwbGljYXRpb24tZGVtbw==.1\u0026rdquo;.\u0026ldquo;0\u0026rdquo;.\u0026quot;\u0026quot; application-demo   1637919600000 root.skywalking.service_traffic.\u0026ldquo;YXBwbGljYXRpb24tZGVtby1teXNxbA==.1\u0026rdquo;.\u0026ldquo;0\u0026rdquo;.\u0026quot;\u0026quot; application-demo-mysql    Another example model is service_cpm which has fields id, service_id, total, value. Query its data with select * from root.skywalking.service_cpm align by device. The result is following:\n   Time Device total value     1637919540000 root.skywalking.service_cpm.\u0026ldquo;202111261739_YXBwbGljYXRpb24tZGVtbw==.1\u0026rdquo;.\u0026ldquo;YXBwbGljYXRpb24tZGVtbw==.1\u0026rdquo; 2 2   1637919600000 root.skywalking.service_cpm.\u0026ldquo;202111261740_YXBwbGljYXRpb24tZGVtby1teXNxbA==.1\u0026rdquo;.\u0026ldquo;YXBwbGljYXRpb24tZGVtby1teXNxbA==.1\u0026rdquo; 1 1   1637917200000 root.skywalking.service_cpm.\u0026ldquo;2021112617_YXBwbGljYXRpb24tZGVtbw==.1\u0026rdquo;.\u0026ldquo;YXBwbGljYXRpb24tZGVtbw==.1\u0026rdquo; 2 0    For the first data of service_traffic, the mapping between fields and values is following. Notice, all time_bucket are converted to timestamp(also named time in IoTDB) and the value of all indexed fields are stored in the Device path.\n   Field Value     id(indexed) YXBwbGljYXRpb24tZGVtbw==.1   time(converted from time_bucket) 1637919540000   name application-demo   node_type(indexed) 0   service_group(indexed) (empty string)    You could use the SQL below to insert example data.\ncreate storage group root.skywalking insert into root.skywalking.service_traffic.\u0026#34;YXBwbGljYXRpb24tZGVtbw==.1\u0026#34;.\u0026#34;0\u0026#34;.\u0026#34;\u0026#34;(timestamp, name) values(1637919540000, \u0026#34;application-demo\u0026#34;) insert into root.skywalking.service_traffic.\u0026#34;YXBwbGljYXRpb24tZGVtby1teXNxbA==.1\u0026#34;.\u0026#34;0\u0026#34;.\u0026#34;\u0026#34;(timestamp, name) values(1637919600000, \u0026#34;application-demo-mysql\u0026#34;) insert into root.skywalking.service_cpm.\u0026#34;202111261739_YXBwbGljYXRpb24tZGVtbw==.1\u0026#34;.\u0026#34;YXBwbGljYXRpb24tZGVtbw==.1\u0026#34;(timestamp, total, value) values(1637919540000, 2, 2) insert into root.skywalking.service_cpm.\u0026#34;202111261740_YXBwbGljYXRpb24tZGVtby1teXNxbA==.1\u0026#34;.\u0026#34;YXBwbGljYXRpb24tZGVtby1teXNxbA==.1\u0026#34;(timestamp, total, value) values(1637919600000, 1, 1) insert into root.skywalking.service_cpm.\u0026#34;2021112617_YXBwbGljYXRpb24tZGVtbw==.1\u0026#34;.\u0026#34;YXBwbGljYXRpb24tZGVtbw==.1\u0026#34;(timestamp, total, value) values(1637917200000, 2, 0) Query SQL Now, let\u0026rsquo;s show some query examples.\n  Filter Query\n If you want to query name field of service_traffic, the query SQL is select name from root.skywalking.service_traffic align by device. If you want to query service_traffic with id = YXBwbGljYXRpb24tZGVtbw==.1, the query SQL is select * from root.skywalking.service_traffic.\u0026quot;YXBwbGljYXRpb24tZGVtbw==.1\u0026quot; align by device. If you want to query service_traffic with name = application-demo, the query SQL is select * from root.skywalking.service_traffic where name = \u0026quot;application-demo\u0026quot; align by device. Combining the above three, the query SQL is select name from root.skywalking.service_traffic.\u0026quot;YXBwbGljYXRpb24tZGVtbw==.1\u0026quot; where name = \u0026quot;application-demo\u0026quot; align by device.    Fuzzy Query\n If you want to query service_traffic with name contains application, the query SQL is select * from root.skywalking.service_traffic.*.*.* where name like '%application%' align by device.    Aggregate Query\nIoTDB only supports group by time and group by level. The former please refer to Down-Frequency Aggregate Query and the latter please refer to Aggregation By Level. Here is an example about group by level: select sum(total) from root.skywalking.service_cpm.*.* group by level = 3. We couldn\u0026rsquo;t get a expected result since our design make the data of one model spread across multiple devices. So we don\u0026rsquo;t recommend using group by level to query SkyWalking backend data. You could refer to the Discussion #3907 in IoTDB community for more details.\n  Sort Query\nIoTDB only supports order by time, but we could use its select function which contains top_k and bottom_k to get top/bottom k data. For example, select top_k(total, \u0026quot;k\u0026quot;=\u0026quot;3\u0026quot;) from root.skywalking.service_cpm.*.*. We don\u0026rsquo;t recommend using this to query SkyWalking backend data since its result is not friendly. You could refer to the Discussion #3888 in IoTDB community for more details.\n  Pagination Query\nWe could use limit and offset to paginate the query result. Please refer to Row and Column Control over Query Results.\n  Delete\n Delete storage group:  delete storage group root.skywalking   Delete timeseries:  delete timeseries root.skywalking.service_cpm.*.*.total delete timeseries root.skywalking.service_cpm.\u0026quot;202111261739_YXBwbGljYXRpb24tZGVtbw==.1\u0026quot;.\u0026quot;YXBwbGljYXRpb24tZGVtbw==.1\u0026quot;.total   Delete data:  delete from root.skywalking.service_traffic delete from root.skywalking.service_traffic where time \u0026lt; 1637919540000      ","title":"The Application Guide of Apache IoTDB Storage Option","url":"/blog/2021-12-08-application-guide-of-iotdb-storage-option/"},{"content":"Non-breaking breakpoints are breakpoints specifically designed for live production environments. With non-breaking breakpoints, reproducing production bugs locally or in staging is conveniently replaced with capturing them directly in production.\nLike regular breakpoints, non-breaking breakpoints can be:\n placed almost anywhere added and removed at will set to fire on specific conditions expose internal application state persist as long as desired (even between application reboots)  The last feature is especially useful given non-breaking breakpoints can be left in production for days, weeks, and even months at a time while waiting to capture behavior that happens rarely and unpredictably.\nHow do non-breaking breakpoints work? If you\u0026rsquo;re familiar with general distributed tracing concepts, such as \u0026ldquo;traces\u0026rdquo; and \u0026ldquo;spans\u0026rdquo;, then you\u0026rsquo;re already broadly familiar with how non-breaking breakpoints work. Put simply, non-breaking breakpoints are small fragments of code added during runtime that, upon the proper conditions, save a portion of the application\u0026rsquo;s current state, and resume normal execution. In SkyWalking, this can be implemented by simply opening a new local span, adding some tags, and closing the local span.\nWhile this process is relatively simple, the range of functionality that can be achieved through this technique is quite impressive. Save the current and global variables to create a non-breaking breakpoint; add the ability to format log messages to create just-in-time logging; add the ability to trigger metric telemetry to create real-time KPI monitoring. If you keep moving in this direction, you eventually enter the realm of live debugging/coding, and this is where Source++ comes in.\nLive Coding Platform Source++ is an open-source live coding platform designed for production environments, powered by Apache SkyWalking. Using Source++, developers can add breakpoints, logs, metrics, and distributed tracing to live production software in real-time on-demand, right from their IDE or CLI. While capable of stand-alone deployment, the latest version of Source++ makes it easier than ever to integrate into existing Apache SkyWalking installations. This process can be completed in a few minutes and is easy to customize for your specific needs.\nFor a better idea of how Source++ works, take a look at the following diagram:\nIn this diagram, blue components represent existing SkyWalking architecture, black components represent new Source++ architecture, and the red arrows show how non-breaking breakpoints make their way from production to IDEs. A process that is facilitated by Source++ components: Live Probe, Live Processors, Live Platform, and Live Interface.\nLive Probe The Live Probe is currently available for JVM and Python applications. It runs alongside the SkyWalking agent and is responsible for dynamically adding and removing code fragments based on valid instrumentation requests from developers. These code fragments in turn make use of the SkyWalking agent\u0026rsquo;s internal APIs to facilitate production instrumentation.\nLive Processors Live Processors are responsible for finding, extracting, and transforming data found in distributed traces produced via live probes. They run alongside SkyWalking collectors and implement additional post-processing logic, such as PII redaction. Live processors work via uniquely identifiable tags (prefix spp.) added previously by live probes.\nOne could easily view a non-breaking breakpoint ready for processing using Rocketbot, however, it will look like this:\nEven though the above does not resemble what\u0026rsquo;s normally thought of as a breakpoint, the necessary information is there. With live processors added to your SkyWalking installation, this data is refined and may be viewed more traditionally via live interfaces.\nLive Platform The Live Platform is the core part of the Source++ architecture. Unlike the live probe and processors, the live platform does not have a direct correlation with SkyWalking components. It is a standalone server responsible for validating and distributing production breakpoints, logs, metrics, and traces. Each component of the Source++ architecture (probes, processors, interfaces) communicates with each other through the live platform. It is important to ensure the live platform is accessible to all of these components.\nLive Interface Finally, with all the previous parts installed, we\u0026rsquo;re now at the component software developers will find the most useful. A Live Interface is what developers use to create, manage, and view non-breaking breakpoints, and so on. There are a few live interfaces available:\n JetBrains Plugin CLI  With the Live Instrument Processor enabled, and the JetBrains Plugin installed, non-breaking breakpoints appear as such:\nThe above should be a sight far more familiar to software developers. Beyond the fact that you can\u0026rsquo;t step through execution, non-breaking breakpoints look and feel just like regular breakpoints.\n For more details and complete setup instructions, please visit:\n https://github.com/sourceplusplus/deploy-skywalking  ","title":"Extending Apache SkyWalking with non-breaking breakpoints","url":"/blog/2021-12-06-extend-skywalking-with-nbb/"},{"content":"SkyWalking Kubernetes Helm Chart 4.2.0 is released. Go to downloads page to find release tars.\n Fix Can\u0026rsquo;t evaluate field Capabilities in type interface{}. Update the document let that all docker images use the latest version. Fix missing nodes resource permission when the OAP using k8s-mesh analyzer. Fix bug that customized config files are not loaded into es-init job. Add skywalking satellite support.  ","title":"Release Apache SkyWalking Kubernetes Helm Chart 4.2.0","url":"/events/release-apache-skywalking-kubernetes-helm-chart-4.2.0/"},{"content":"SkyWalking Satellite 0.4.0 is released. Go to downloads page to find release tars.\nFeatures  Support partition queue. Using byte array to transmit the ALS streaming, Native tracing segment and log, reducing en/decoding cpu usage. Support using the new ALS protocol to transmit the Envoy accesslog. Support transmit the Native Meter Batch protocol.  Bug Fixes Issues and PR  All issues are here All and pull requests are here  ","title":"Release Apache SkyWalking Satellite 0.4.0","url":"/events/release-apache-skwaylking-satellite-0-4-0/"},{"content":"SkyWalking 8.9.0 is released. Go to downloads page to find release tars.\nChanges by Version\nProject  E2E tests immigrate to e2e-v2. Support JDK 16 and 17. Add Docker images for arm64 architecture.  OAP Server  Add component definition for Jackson. Fix that zipkin-receiver plugin is not packaged into dist. Upgrade Armeria to 1.12, upgrade OpenSearch test version to 1.1.0. Add component definition for Apache-Kylin. Enhance get generation mechanism of OAL engine, support map type of source\u0026rsquo;s field. Add tag(Map) into All, Service, ServiceInstance and Endpoint sources. Fix funcParamExpression and literalExpression can\u0026rsquo;t be used in the same aggregation function. Support cast statement in the OAL core engine. Support (str-\u0026gt;long) and (long) for string to long cast statement. Support (str-\u0026gt;int) and (int) for string to int cast statement. Support Long literal number in the OAL core engine. Support literal string as parameter of aggregation function. Add attributeExpression and attributeExpressionSegment in the OAL grammar tree to support map type for the attribute expression. Refactor the OAL compiler context to improve readability. Fix wrong generated codes of hashCode and remoteHashCode methods for numeric fields. Support != null in OAL engine. Add Message Queue Consuming Count metric for MQ consuming service and endpoint. Add Message Queue Avg Consuming Latency metric for MQ consuming service and endpoint. Support -Inf as bucket in the meter system. Fix setting wrong field when combining Events. Support search browser service. Add getProfileTaskLogs to profile query protocol. Set SW_KAFKA_FETCHER_ENABLE_NATIVE_PROTO_LOG, SW_KAFKA_FETCHER_ENABLE_NATIVE_JSON_LOG default true. Fix unexpected deleting due to TTL mechanism bug for H2, MySQL, TiDB and PostgreSQL. Add a GraphQL query to get OAP version, display OAP version in startup message and error logs. Fix TimeBucket missing in H2, MySQL, TiDB and PostgreSQL bug, which causes TTL doesn\u0026rsquo;t work for service_traffic. Fix TimeBucket missing in ElasticSearch and provide compatible storage2Entity for previous versions. Fix ElasticSearch implementation of queryMetricsValues and readLabeledMetricsValues doesn\u0026rsquo;t fill default values when no available data in the ElasticSearch server. Fix config yaml data type conversion bug when meets special character like !. Optimize metrics of minute dimensionality persistence. The value of metrics, which has declaration of the default value and current value equals the default value logically, the whole row wouldn\u0026rsquo;t be pushed into database. Fix max function in OAL doesn\u0026rsquo;t support negative long. Add MicroBench module to make it easier for developers to write JMH test. Upgrade Kubernetes Java client to 14.0.0, supports GCP token refreshing and fixes some bugs. Change SO11Y metric envoy_als_in_count to calculate the ALS message count. Support Istio 1.10.3, 1.11.4, 1.12.0 release.(Tested through e2e) Add filter mechanism in MAL core to filter metrics. Fix concurrency bug in MAL increase-related calculation. Fix a null pointer bug when building SampleFamily. Fix the so11y latency of persistence execution latency not correct in ElasticSearch storage. Add MeterReportService collectBatch method. Add OpenSearch 1.2.0 to test and verify it works. Upgrade grpc-java to 1.42.1 and protoc to 3.17.3 to allow using native Mac osx-aarch_64 artifacts. Fix TopologyQuery.loadEndpointRelation bug. Support using IoTDB as a new storage option. Add customized envoy ALS protocol receiver for satellite transmit batch data. Remove logback dependencies in IoTDB plugin. Fix StorageModuleElasticsearchProvider doesn\u0026rsquo;t watch on trustStorePath. Fix a wrong check about entity if GraphQL at the endpoint relation level.  UI  Optimize endpoint dependency. Show service name by hovering nodes in the sankey chart. Add Apache Kylin logo. Add ClickHouse logo. Optimize the style and add tips for log conditions. Fix the condition for trace table. Optimize profile functions. Implement a reminder to clear cache for dashboard templates. Support +/- hh:mm in TimeZone setting. Optimize global settings. Fix current endpoint for endpoint dependency. Add version in the global settings popup. Optimize Log page style. Avoid some abnormal settings. Fix query condition of events.  Documentation  Enhance documents about the data report and query protocols. Restructure documents about receivers and fetchers.  Remove general receiver and fetcher docs Add more specific menu with docs to help users to find documents easier.   Add a guidance doc about the logic endpoint. Link Satellite as Load Balancer documentation and compatibility with satellite.  All issues and pull requests are here\n","title":"Release Apache SkyWalking APM 8.9.0","url":"/events/release-apache-skywalking-apm-8-9-0/"},{"content":"Chaos Mesh 是一个开源的云原生混沌工程平台,借助 Chaos Mesh,用户可以很方便地对服务注入异常故障,并配合 Chaos Dashboard 实现对整个混沌实验运行状况的监测 。然而,对混沌实验运行情况的监控并不能告诉我们应用服务性能的变化。从系统可观测性的角度来说,我们可能无法单纯通过混沌实验的动态了解故障的全貌,这也阻碍了我们对系统和故障的进一步了解,调试。\nApache SkyWalking 是一个开源的 APM (Application Performance Monitor) 系统,可以对云原生服务提供监控、跟踪、诊断等功能。SkyWalking 支持收集 Event(事件),可在 Dashboard 中查看分布式系统中发生了哪些事件,并可以直观地观测到不同 Event 对服务性能造成的影响,和 Chaos Mesh 结合使用,便可为混沌实验造成的服务影响提供监控。\n本教程将分享如何通过将 SkyWalking 和 Chaos Mesh 结合,运用 Event 信息监控,实时了解混沌实验对应用服务性能造成的影响。\n准备工作  创建 Skywalking 集群,具体可以参考 SkyWalking Readme。 部署 Chaos Mesh,推荐使用 helm 安装。 安装 Java 测试工具 JMeter (其他工具亦可,仅用于增加服务负载) 如果仅作为 Demo 使用,可以参考 chaos-mesh-on-skywalking 这个仓库进行配置  Step 1 - 访问 SkyWalking 集群 安装 SkyWalking 后,就可以访问它的UI了,但因为还没有服务进行监控,这里还需要添加服务并进行 Agent 埋点设置。本文选用轻量级微服务框架 Spring Boot 作为埋点对象搭建一个简易 Demo 环境。\n可以参考 chaos-mesh-on-skywalking 仓库中的 demo-deployment.yaml 文件创建。之后使用 kubectl apply -f demo-deployment.yaml -n skywalking 进行部署。部署成功后即可在SkyWalking-UI 中看到实时监控的服务信息。\n注意:因为 Spring Boot 的端口也是8080,在端口转发时要避免和 **SkyWalking **的端口冲突,比如使用 kubectl port-forward svc/spring-boot-skywalking-demo 8079:8080 -n skywalking 。\nStep 2 - 部署 SkyWalking Kubernetes Event Exporter SkyWalking Kubernetes Event Exporter 可以用来监控和过滤 Kubernetes 集群中的 Event ,通过设置过滤条件筛选出需要的 Event,并将这些 Event 发送到 SkyWalking 后台, 这样就可以通过 SkyWalking 观察到你的 Kubernetes 集群中的Event 何时影响到服务的各项指标了。如果想要一条命令部署,可以参考此配置创建 yaml 文件 ,设置 filters 和 exporters 的参数后,使用 kubectl apply 进行部署。\nStep 3 - 使用 JMeter 对服务加压 为了达到更好的观察效果,需要先对 Spring Boot 增加服务负载,本文选择使用 JMeter 这一使用广泛的 Java 压力测试工具来对服务加压。\n通过 JMeter 对 localhost:8079 进行压测,添加5个线程持续进行加压。 通过 SkyWalking Dashboard 可以看到,目前访问成功率为100%,服务负载大约在5300 CPM (Calls Per Minute)。\nStep 4 - Chaos Mesh 注入故障,观察效果 做好了这些准备工便可以使用 Chaos Dashboard 进行压力场景模拟,并在实验进程中观察服务性能的变化。\n以下使用不同 Stress Chaos 配置,观测对应服务性能变化:\n  CPU 负载10%,内存负载128 MB 。\n混沌实验开始和结束的时间点标记可以通过右侧开关显示在在图表中,将鼠标移至短线出可以看到是实验的 Applied 或 Recovered。可以看到两个绿色短线之间的时间段里,服务处理调用的的性能降低,为4929 CPM,在实验结束后,性能恢复正常。\n  CPU load 增加到50%,发现服务负载进一步降低至4307 CPM。\n  极端情况下 CPU 负载达到100%,服务负载降至无混沌实验时的40% 。\n  因为 Linux 系统下的进程调度并不会让某个进程一直占据 CPU,所以即使实在 CPU 满载的极端情况下,该部署的 Spring Boot Demo 仍可以处理40%的访问请求。\n小结 通过 SkyWalking 与 Chaos Mesh 的结合,我们可以清晰的观察到服务在何时受到混沌实验的影响,在注入混沌后服务的表现性能又将如何。SkyWalking 与 Chaos Mesh 的结合使得我们轻松地观察到了服务在各种极端情况下的表现,增强了我们对服务的信心。\nChaos Mesh 在 2021 年成长了许多。为了更多地了解用户在实践混沌工程方面的经验,以便持续完善和提升对用户的支持,社区发起了 Chaos Mesh 用户问卷调查,点击【阅读原文】参与调查,谢谢!\nhttps://www.surveymonkey.com/r/X78WQPC\n欢迎大家加入 Chaos Mesh 社区,加入 CNCF Slack (slack.cncf.io) 底下的 Chaos Mesh 频道: project-chaos-mesh,一起参与到项目的讨论与开发中来!大家在使用过程发现 Bug 或缺失什么功能,也可以直接在 GitHub (https://github.com/chaos-mesh) 上提 Issue 或 PR。\n","title":"Chaos Mesh X SkyWalking: 可观测的混沌工程","url":"/zh/2021-11-29-better-observability-for-chaos-engineering/"},{"content":"This plugin is one of the outcomes of Apache IoTDB - Apache SkyWalking Adapter in Summer 2021 of Open Source Promotion Plan. The design and development work is under the guidance of @jixuan1989 from IoTDB and @wu-sheng from SkyWalking. Thanks for their guidance and the help from community.\nIoTDB Storage Plugin Setup IoTDB is a time-series database from Apache, which is one of the storage plugin options. If you want to use iotdb as SkyWalking backend storage, please refer to the following configuration.\nIoTDB storage plugin is still in progress. Its efficiency will improve in the future.\nstorage:selector:${SW_STORAGE:iotdb}iotdb:host:${SW_STORAGE_IOTDB_HOST:127.0.0.1}rpcPort:${SW_STORAGE_IOTDB_RPC_PORT:6667}username:${SW_STORAGE_IOTDB_USERNAME:root}password:${SW_STORAGE_IOTDB_PASSWORD:root}storageGroup:${SW_STORAGE_IOTDB_STORAGE_GROUP:root.skywalking}sessionPoolSize:${SW_STORAGE_IOTDB_SESSIONPOOL_SIZE:16}fetchTaskLogMaxSize:${SW_STORAGE_IOTDB_FETCH_TASK_LOG_MAX_SIZE:1000}# the max number of fetch task log in a requestAll connection related settings, including host, rpcPort, username, and password are found in application.yml. Please ensure the IoTDB version \u0026gt;= 0.12.3.\nIoTDB Introduction Apache IoTDB (Database for Internet of Things) is an IoT native database with high performance for data management and analysis, deployable on the edge and the cloud. It is a time-series database donated by Tsinghua University to Apache Foundation.\nThe Data Model of IoTDB We can use the tree structure to understand the data model of iotdb. If divided according to layers, from high to low is: Storage Group \u0026ndash; (LayerName) \u0026ndash; Device \u0026ndash; Measurement. From the top layer to a certain layer below it is called a Path. The top layer is Storage Group (must start with root), the penultimate layer is Device, and the bottom layer is Measurement. There can be many layers in the middle, and each layer is called a LayerName. For more information, please refer to the Data Model and Terminology in the official document of the version 0.12.x.\nThe Design of IoTDB Storage Plugin The Data Model of SkyWalking Each storage model of SkyWalking can be considered as a Model, which contains multiple Columns. Each Column has ColumnName and ColumnType attributes, representing the name and type of Column respectively. Each Column named ColumnName stores multiple Value of the ColumnType. From a relational database perspective, Model is a relational table and Column is the field in a relational table.\nSchema Design Since each LayerName of IoTDB is stored in memory, it can be considered as an index, and this feature can be fully utilized to improve IoTDB query performance. The default storage group is root.skywalking, it will occupy the first and the second layer of the path. The model name is stored at the next layer of the storage group (the third layer of the path), such as root.skywalking.model_name.\nSkyWalking has its own index requirement, but it isn\u0026rsquo;t applicable to IoTDB. Considering query frequency and referring to the implementation of the other storage options, we choose id, entity_id, node_type, service_id, service_group, trace_id as indexes and fix their appearance order in the path. The value of these indexed columns will occupy the last few layers of the path. If we don\u0026rsquo;t fix their order, we cannot map their value to column, since we only store their value in the path but don\u0026rsquo;t store their column name. The other columns are treated as Measurements.\nThe mapping from SkyWalking data model to IoTDB data model is below.\n   SkyWalking IoTDB     Database Storage Group (1st and 2nd layer of the path)   Model LayerName (3rd layer of the path)   Indexed Column stored in memory through hard-code   Indexed Column Value LayerName (after 3rd layer of the path)   Non-indexed Column Measurement   Non-indexed Value the value of Measurement    For general example There are model1(column11, column12), model2(column21, column22, column23), model3(column31). Underline indicates that the column requires to be indexed. In this example, modelx_name refers to the name of modelx, columnx_name refers to the name of columnx and columnx_value refers to the value of columnx.\nBefore these 3 model storage schema, here are some points we need to know.\n In order to avoid the value of indexed column contains dot(.), all of them should be wrapped in double quotation mark since IoTDB use dot(.) as the separator in the path. We use align by device in query SQL to get a more friendly result. For more information about align by device, please see DML (Data Manipulation Language) and Query by device alignment.  The path of them is following:\n The Model with index:  root.skywalking.model1_name.column11_value.column12_name root.skywalking.model2_name.column21_value.column22_value.column23_name   The Model without index:  root.skywalking.model3_name.column31_Name    Use select * from root.skywalking.modelx_name align by device respectively to get their schema and data. The SQL result is following:\n   Time Device column12_name     1637494020000 root.skywalking.model1_name.\u0026ldquo;column11_value\u0026rdquo; column12_value       Time Device column23_name     1637494020000 root.skywalking.model2_name.\u0026ldquo;column21_value\u0026rdquo;.\u0026ldquo;column22_value\u0026rdquo; column23_value       Time Device column31_name     1637494020000 root.skywalking.model3_name column31_value    For specific example Before 5 typical examples, here are some points we need to know.\n The indexed columns and their order: id, entity_id, node_type, service_id, service_group, trace_id. Other columns are treated as non indexed and stored as Measurement. The storage entity extends Metrics or Record contains a column time_bucket. The time_bucket column in SkyWalking Model can be converted to the timestamp of IoTDB when inserting data. We don\u0026rsquo;t need to store time_bucket separately. In the next examples, we won\u0026rsquo;t list time_bucket anymore. The Time in query result corresponds to the timestamp in insert SQL and API.   Metadata: service_traffic\nservice_traffic entity has 4 columns: id, name, node_type, service_group. When service_traffic entity includes a row with timestamp 1637494020000, the row should be as following: (Notice: the value of service_group is null.)     id name node_type service_group     ZTJlLXNlcnZpY2UtcHJvdmlkZXI=.1 e2e-service-provider 0     And the row stored in IoTDB should be as following: (Query SQL: select from root.skywalking.service_traffic align by device)\n   Time Device name     1637494020000 root.skywalking.service_traffic.\u0026ldquo;ZTJlLXNlcnZpY2UtcHJvdmlkZXI=.1\u0026rdquo;.\u0026ldquo;0\u0026rdquo;.\u0026ldquo;null\u0026rdquo; e2e-service-provider    The value of id, node_type and service_group are stored in the path in the specified order. Notice: If those index value is null, it will be transformed to a string \u0026ldquo;null\u0026rdquo;.\nMetrics: service_cpm\nservice_cpm entity has 4 columns: id, service_id, total, value.\nWhen service_cpm entity includes a row with timestamp 1637494020000, the row should be as following:     id service_id total value     202111211127_ZTJlLXNlcnZpY2UtY29uc3VtZXI=.1 ZTJlLXNlcnZpY2UtY29uc3VtZXI=.1 4 4    And the row stored in IoTDB should be as following: (Query SQL: select from root.skywalking.service_cpm align by device)\n   Time Device total value     1637494020000 root.skywalking.service_cpm.\u0026ldquo;202111211127_ZTJlLXNlcnZpY2UtY29uc3VtZXI=.1\u0026rdquo;.\u0026ldquo;ZTJlLXNlcnZpY2UtY29uc3VtZXI=.1\u0026rdquo; 4 4    The value of id and service_id are stored in the path in the specified order.\nTrace segment: segment\nsegment entity has 10 columns at least: id, segment_id, trace_id, service_id, service_instance_id, endpoint_id, start_time, latency, is_error, data_binary. In addition, it could have variable number of tags.\nWhen segment entity includes 2 rows with timestamp 1637494106000 and 1637494134000, these rows should be as following. The db.type and db.instance are two tags. The first data has two tags, and the second data doesn\u0026rsquo;t have tag.     id segment_id trace_id service_id service_instance_id endpoint_id start_time latency is_error data_binary db.type db.instance     id_1 segment_id_1 trace_id_1 service_id_1 service_instance_id_1 endpoint_id_1 1637494106515 1425 0 data_binary_1 sql testdb   id_2 segment_id_2 trace_id_2 service_id_2 service_instance_id_2 endpoint_id_2 2637494106765 1254 0 data_binary_2      And these row stored in IoTDB should be as following: (Query SQL: select from root.skywalking.segment align by device)\n   Time Device start_time data_binary latency endpoint_id is_error service_instance_id segment_id \u0026ldquo;db.type\u0026rdquo; \u0026ldquo;db.instance\u0026rdquo;     1637494106000 root.skywalking.segment.\u0026ldquo;id_1\u0026rdquo;.\u0026ldquo;service_id_1\u0026rdquo;.\u0026ldquo;trace_id_1\u0026rdquo; 1637494106515 data_binary_1 1425 endpoint_id_1 0 service_instance_id_1 segment_id_1 sql testdb   1637494106000 root.skywalking.segment.\u0026ldquo;id_2\u0026rdquo;.\u0026ldquo;service_id_2\u0026rdquo;.\u0026ldquo;trace_id_2\u0026rdquo; 1637494106765 data_binary_2 1254 endpoint_id_2 0 service_instance_id_2 segment_id_2 null null    The value of id, service_id and trace_id are stored in the path in the specified order. Notice: If the measurement contains dot(.), it will be wrapped in double quotation mark since IoTDB doesn\u0026rsquo;t allow it. In order to align, IoTDB will append null value for those data without tag in some models.\nLog\nlog entity has 12 columns at least: id, unique_id, service_id, service_instance_id, endpoint_id, trace_id, trace_segment_id, span_id, content_type, content, tags_raw_data, timestamp. In addition, it could have variable number of tags. When log entity includes a row with timestamp 1637494052000, the row should be as following and the level is a tag.     id unique_id service_id service_instance_id endpoint_id trace_id trace_segment_id span_id content_type content tags_raw_data timestamp level     id_1 unique_id_1 service_id_1 service_instance_id_1 endpoint_id_1 trace_id_1 trace_segment_id_1 0 1 content_1 tags_raw_data_1 1637494052118 INFO    And the row stored in IoTDB should be as following: (Query SQL: select from root.skywalking.log align by device)\n   Time Device unique_id content_type span_id tags_raw_data \u0026ldquo;timestamp\u0026rdquo; level service_instance_id content trace_segment_id     1637494052000 root.skywalking.\u0026ldquo;id_1\u0026rdquo;.\u0026ldquo;service_id_1\u0026rdquo;.\u0026ldquo;trace_id_1\u0026rdquo; unique_id_1 1 0 tags_raw_data_1 1637494052118 INFO service_instance_id_1 content_1 trace_segment_id_1    The value of id, service_id and trace_id are stored in the path in the specified order. Notice: If the measurement named timestamp, it will be wrapped in double quotation mark since IoTDB doesn\u0026rsquo;t allow it.\nProfiling snapshots: profile_task_segment_snapshot\nprofile_task_segment_snapshot entity has 6 columns: id, task_id, segment_id, dump_time, sequence, stack_binary. When profile_task_segment_snapshot includes a row with timestamp 1637494131000, the row should be as following.     id task_id segment_id dump_time sequence stack_binary     id_1 task_id_1 segment_id_1 1637494131153 0 stack_binary_1    And the row stored in IoTDB should be as following: (Query SQL: select from root.skywalking.profile_task_segment_snapshot align by device)\n   Time Device sequence dump_time stack_binary task_id segment_id     1637494131000 root.skywalking.profile_task_segment_snapshot.\u0026ldquo;id_1\u0026rdquo; 0 1637494131153 stack_binary_1 task_id_1 segment_id_1    The value of id is stored in the path in the specified order.\nQuery In this design, part of the data is stored in memory through LayerName, so data from the same Model is spread across multiple devices. Queries often need to cross multiple devices. But in this aspect, IoTDB\u0026rsquo;s support is not perfect in cross-device aggregation query, sort query and pagination query. In some cases, we have to use a violence method that query all data meets the condition and then aggregate, sort or paginate them. So it might not be efficient. For detailed descriptions, please refer to the Discussion submitted in IoTDB community below.\n Discussion:  一个有关排序查询的问题(A problem about sort query)#3888 一个有关聚合查询的问题(A problem about aggregation query)#3907    Query SQL for the general example above:\n-- query all data in model1 select * from root.skywalking.model1_name align by device; -- query the data in model2 with column22_value=\u0026#34;test\u0026#34; select * from root.skywalking.model2_name.*.\u0026#34;test\u0026#34; align by device; -- query the sum of column23 in model2 and group by column21 select sum(column23) from root.skywalking.model2_name.*.* group by level = 3; iotdb-cli is a useful tools to connect and visit IoTDB server. More information please refer Command Line Interface(CLI)\n","title":"The Design of Apache IoTDB Storage Option","url":"/blog/2021-11-23-design-of-iotdb-storage-option/"},{"content":"SkyWalking Infra E2E 1.1.0 is released. Go to downloads page to find release tars.\nFeatures  Support using setup.init-system-environment to import environment. Support body and headers in http trigger. Add install target in makefile. Stop trigger when cleaning up. Change interval setting to Duration style. Add reasonable default cleanup.on. Support float value compare when type not match Support reuse verify.cases. Ignore trigger when not set. Support export KUBECONFIG to the environment. Support using setup.kind.import-images to load local docker images. Support using setup.kind.expose-ports to declare the resource port for host access. Support save pod/container std log on the Environment.  Bug Fixes  Fix that trigger is not continuously triggered when running e2e trigger. Migrate timeout config to Duration style and wait for node ready in KinD setup. Remove manifest only could apply the default namespace resource.  Issues and PR  All issues are here All and pull requests are here  ","title":"Release Apache SkyWalking Infra E2E 1.1.0","url":"/events/release-apache-skywalking-infra-e2e-1-1-0/"},{"content":"SkyWalking Cloud on Kubernetes 0.4.0 is released. Go to downloads page to find release tars.\n  Support special characters in the metric selector of HPA metric adapter.\n  Add the namespace to HPA metric name.\n  Features\n Add Java agent injector. Add JavaAgent and Storage CRDs of the operator.    Vulnerabilities\n CVE-2021-3121: An issue was discovered in GoGo Protobuf before 1.3.2. plugin/unmarshal/unmarshal.go lacks certain index validation CVE-2020-29652: A nil pointer dereference in the golang.org/x/crypto/ssh component through v0.0.0-20201203163018-be400aefbc4c for Go allows remote attackers to cause a denial of service against SSH servers.    Chores\n Bump up GO to 1.17. Bump up k8s api to 0.20.11. Polish documents. Bump up SkyWalking OAP to 8.8.1.    ","title":"Release Apache SkyWalking Cloud on Kubernetes 0.4.0","url":"/events/release-apache-skywalking-cloud-on-kubernetes-0-4-0/"},{"content":"SkyWalking Satellite 0.3.0 is released. Go to downloads page to find release tars.\nFeatures  Support load-balance GRPC client with the static server list. Support load-balance GRPC client with the Kubernetes selector. Support transmit Envoy ALS v2/v3 protocol. Support transmit Envoy Metrics v2/v3 protocol.  Bug Fixes  Fix errors when converting meter data from histogram and summary.#75  Issues and PR  All issues are here All and pull requests are here  ","title":"Release Apache SkyWalking Satellite 0.3.0","url":"/events/release-apache-skwaylking-satellite-0-3-0/"},{"content":"SkyWalking Java Agent 8.8.0 is released. Go to downloads page to find release tars. Changes by Version\n8.8.0  Split Java agent from the main monorepo. It is a separate repository and going to release separately. Support JDK 8-17 through upgrading byte-buddy to 1.11.18. Upgrade JDK 11 in dockerfile and remove unused java_opts. DataCarrier changes a #consume API to add properties as a parameter to initialize consumer when use Class\u0026lt;? extends IConsumer\u0026lt;T\u0026gt;\u0026gt; consumerClass. Support Multiple DNS period resolving mechanism Modify Tags.STATUS_CODE field name to Tags.HTTP_RESPONSE_STATUS_CODE and type from StringTag to IntegerTag, add Tags.RPC_RESPONSE_STATUS_CODE field to hold rpc response code value. Fix kafka-reporter-plugin shade package conflict Add all config items to agent.conf file for convenient containerization use cases. Advanced Kafka Producer configuration enhancement. Support mTLS for gRPC channel. fix the bug that plugin record wrong time elapse for lettuce plugin fix the bug that the wrong db.instance value displayed on Skywalking-UI when existing multi-database-instance on same host port pair. Add thrift plugin support thrift TMultiplexedProcessor. Add benchmark result for exception-ignore plugin and polish plugin guide. Provide Alibaba Druid database connection pool plugin. Provide HikariCP database connection pool plugin. Fix NumberFormat exception in jdbc-commons plugin when MysqlURLParser parser jdbcurl Provide Alibaba Fastjson parser/generator plugin. Provide Jackson serialization and deserialization plugin. Fix a tracing context leak of SpringMVC plugin, when an internal exception throws due to response can\u0026rsquo;t be found. Make GRPC log reporter sharing GRPC channel with other reporters of agent. Remove config items of agent.conf, plugin.toolkit.log.grpc.reporter.server_host, plugin.toolkit.log.grpc.reporter.server_port, and plugin.toolkit.log.grpc.reporter.upstream_timeout. rename plugin.toolkit.log.grpc.reporter.max_message_size to log.max_message_size. Implement Kafka Log Reporter. Add config item of agnt.conf, plugin.kafka.topic_logging. Add plugin to support Apache HttpClient 5. Format SpringMVC \u0026amp; Tomcat EntrySpan operation name to METHOD:URI. Make HTTP method in the operation name according to runtime, rather than previous code-level definition, which used to have possibilities including multiple HTTP methods. Fix the bug that httpasyncclient-4.x-plugin does not take effect every time. Add plugin to support ClickHouse JDBC driver. Fix version compatibility for JsonRPC4J plugin. Add plugin to support Apache Kylin-jdbc 2.6.x 3.x 4.x Fix instrumentation v2 API doesn\u0026rsquo;t work for constructor instrumentation. Add plugin to support okhttp 2.x Optimize okhttp 3.x 4.x plugin to get span time cost precisely Adapt message header properties of RocketMQ 4.9.x  Documentation All issues and pull requests are here\n","title":"Release Apache SkyWalking Java Agent 8.8.0","url":"/events/release-apache-skywalking-java-agent-8-8-0/"},{"content":"SkyWalking CLI 0.9.0 is released. Go to downloads page to find release tars.\nFeatures  Add the sub-command dependency instance to query instance relationships (#117)  Bug Fixes  fix: multiple-linear command\u0026rsquo;s labels type can be string type (#122) Add missing dest-service-id dest-service-name to metrics linear command (#121) Fix the wrong name when getting destInstance flag (#118)  Chores  Upgrade Go version to 1.16 (#120) Migrate tests to infra-e2e, overhaul the flags names (#119) Publish Docker snapshot images to ghcr (#116) Remove dist directory when build release source tar (#115)  ","title":"Release Apache SkyWalking CLI 0.9.0","url":"/events/release-apache-skywalking-cli-0-9-0/"},{"content":"SkyWalking Eyes 0.2.0 is released. Go to downloads page to find release tars.\n  Dependency License\n Support resolving go.mod for Go Support resolving pom.xml for maven (#50) Support resolving jars' licenses (#53) Support resolving npm dependencies' licenses (#48) Support saving dependencies' licenses (#69) Add dependency check to check dependencies license compatibilities (#58)    License Header\n fix command supports more languages:  Add support for plantuml (#42) Add support for PHP (#40) Add support for Twig template language (#39) Add support for Smarty template language (#38) Add support for MatLab files (#37) Add support for TypeScript language files (#73) Add support for nextflow files (#65) Add support for perl files (#63) Add support for ini extension (#24) Add support for R files (#64) Add support for .rst files and allow fixing header of a single file (#25) Add support for Rust files (#29) Add support for bat files (#32)   Remove .tsx from XML language extensions Honor Python\u0026rsquo;s coding directive (#68) Fix file extension conflict between RenderScript and Rust (#66) Add comment type to cython declaration (#62) header fix: respect user configured license content (#60) Expose license-location-threshold as config item (#34) Fix infinite recursive calls when containing symbolic files (#33) defect: avoid crash when no comment style is found (#23)    Project\n Enhance license identification (#79) Support installing via go install (#76) Speed up the initialization phase (#75) Resolve absolute path in .gitignore to relative path (#67) Reduce img size and add npm env (#59) Make the config file and log level in GitHub Action configurable (#56, #57) doc: add a PlantUML activity diagram of header fixing mechanism (#41) Fix bug: license file is not found but reported message is nil (#49) Add all well-known licenses and polish normalizers (#47) Fix compatibility issues in Windows (#44) feature: add reasonable default config to allow running in a new repo without copying config file (#28) chore: only build linux binary when building inside docker (#26) chore: upgrade to go 1.16 and remove go-bindata (#22) Add documentation about how to use via docker image (#20)    ","title":"Release Apache SkyWalking Eyes 0.2.0","url":"/events/release-apache-skywalking-eyes-0-2-0/"},{"content":"SkyWalking Client JS 0.7.0 is released. Go to downloads page to find release tars.\n Support setting time interval to report segments. Fix segments report only send once. Fix apache/skywalking#7335. Fix apache/skywalking#7793. Fix firstReportedError for SPA.  ","title":"Release Apache SkyWalking Client JS 0.7.0","url":"/events/release-apache-skywalking-client-js-0-7-0/"},{"content":"SkyWalking 8.8.1 is released. Go to downloads page to find release tars.\nThis is a bugfix version that fixes several important bugs in previous version 8.8.0.\nChanges OAP Server  Fix wrong (de)serializer of ElasticSearch client for OpenSearch storage. Fix that traces query with tags will report error. Replace e2e simple cases to e2e-v2. Fix endpoint dependency breaking.  UI  Delete duplicate calls for endpoint dependency.  All issues and pull requests are here\n","title":"Release Apache SkyWalking APM 8.8.1","url":"/events/release-apache-skywalking-apm-8-8-1/"},{"content":"Kai Wan has been involved in SkyWalking for over half a year since the first PR(Dec 21, 2020). He majorly focuses on the Service Mesh and metrics analysis engine(MAL). And recently add the support of OpenAPI specification into SkyWalking.\nHe learnd fast, and dedicates hours every day on the project, and has finished 37 PRs 11,168 LOC++ 1,586 LOC\u0026ndash;. In these days, he is working with PMC and infra-e2e team to upgrade our main repository\u0026rsquo;s test framework to the NGET(Next Generation E2E Test framework).\nIt is our honor to have him join the team.\n","title":"Welcome Kai Wan (万凯) to join the PMC","url":"/events/welcome-kai-wan-to-join-the-pmc/"},{"content":"SkyWalking 8.8.0 is released. Go to downloads page to find release tars.\nThis is a first OAP server + UI release, Java agent will be release independently. Check the latest compatibility document to find suitable agent releases.\nChanges by Version\nProject  Split javaagent into skywalking-java repository. https://github.com/apache/skywalking-java Merge Dockerfiles from apache/skywalking-docker into this codebase.  OAP Server  Fix CVE-2021-35515, CVE-2021-35516, CVE-2021-35517, CVE-2021-36090. Upgrade org.apache.commons:commons-compress to 1.21. kubernetes java client upgrade from 12.0.1 to 13.0.0 Add event http receiver Support Metric level function serviceRelation in MAL. Support envoy metrics binding into the topology. Fix openapi-definitions folder not being read correctly. Trace segment wouldn\u0026rsquo;t be recognized as a TopN sample service. Add through #4694 experimentally, but it caused performance impact. Remove version and endTime in the segment entity. Reduce indexing payload. Fix mapper_parsing_exception in ElasticSearch 7.14. Support component IDs for Go-Kratos framework. [Break Change] Remove endpoint name in the trace query condition. Only support query by endpoint id. Fix ProfileSnapshotExporterTest case on OpenJDK Runtime Environment AdoptOpenJDK-11.0.11+9 (build 11.0.11+9), MacOS. [Break Change] Remove page path in the browser log query condition. Only support query by page path id. [Break Change] Remove endpoint name in the backend log query condition. Only support query by endpoint id. [Break Change] Fix typo for a column page_path_id(was pate_path_id) of storage entity browser_error_log. Add component id for Python falcon plugin. Add rpcStatusCode for rpc.status_code tag. The responseCode field is marked as deprecated and replaced by httpResponseStatusCode field. Remove the duplicated tags to reduce the storage payload. Add a new API to test log analysis language. Harden the security of Groovy-based DSL, MAL and LAL. Fix distinct in Service/Instance/Endpoint query is not working. Support collection type in dynamic configuration core. Support zookeeper grouped dynamic configurations. Fix NPE when OAP nodes synchronize events with each other in cluster mode. Support k8s configmap grouped dynamic configurations. Add desc sort function in H2 and ElasticSearch implementations of IBrowserLogQueryDAO Support configure sampling policy by configuration module dynamically and static configuration file trace-sampling-policy-settings.yml for service dimension on the backend side. Dynamic configurations agent-analyzer.default.sampleRate and agent-analyzer.default.slowTraceSegmentThreshold are replaced by agent-analyzer.default.traceSamplingPolicy. Static configurations agent-analyzer.default.sampleRate and agent-analyzer.default.slowTraceSegmentThreshold are replaced by agent-analyzer.default.traceSamplingPolicySettingsFile. Fix dynamic configuration watch implementation current value not null when the config is deleted. Fix LoggingConfigWatcher return watch.value would not consistent with the real configuration content. Fix ZookeeperConfigWatcherRegister.readConfig() could cause NPE when data.getData() is null. Support nacos grouped dynamic configurations. Support for filter function filtering of int type values. Support mTLS for gRPC channel. Add yaml file suffix limit when reading ui templates. Support consul grouped dynamic configurations. Fix H2MetadataQueryDAO.searchService doesn\u0026rsquo;t support auto grouping. Rebuilt ElasticSearch client on top of their REST API. Fix ElasticSearch storage plugin doesn\u0026rsquo;t work when hot reloading from secretsManagementFile. Support etcd grouped dynamic configurations. Unified the config word namespace in the project. Switch JRE base image for dev images. Support apollo grouped dynamic configurations. Fix ProfileThreadSnapshotQuery.queryProfiledSegments adopts a wrong sort function Support gRPC sync grouped dynamic configurations. Fix H2EventQueryDAO doesn\u0026rsquo;t sort data by Event.START_TIME and uses a wrong pagination query. Fix LogHandler of kafka-fetcher-plugin cannot recognize namespace. Improve the speed of writing TiDB by batching the SQL execution. Fix wrong service name when IP is node IP in k8s-mesh. Support dynamic configurations for openAPI endpoint name grouping rule. Add component definition for Alibaba Druid and HikariCP. Fix Hour and Day dimensionality metrics not accurate, due to the cache read-then-clear mechanism conflicts with low down metrics flush period added in 8.7.0. Fix Slow SQL sampling not accurate, due to TopN works conflict with cache read-then-clear mechanism. The persistent cache is only read when necessary. Add component definition for Alibaba Fastjson. Fix entity(service/instance/endpoint) names in the MAL system(prometheus, native meter, open census, envoy metric service) are not controlled by core\u0026rsquo;s naming-control mechanism. Upgrade netty version to 4.1.68.Final avoid cve-2021-37136.  UI  Fix not found error when refresh UI. Update endpointName to endpointId in the query trace condition. Add Python falcon icon on the UI. Fix searching endpoints with keywords. Support clicking the service name in the chart to link to the trace or log page. Implement the Log Analysis Language text regexp debugger. Fix fetching nodes and calls with serviceIds on the topology side. Implement Alerts for query errors. Fixes graph parameter of query for topology metrics.  Documentation  Add a section in Log Collecting And Analysis doc, introducing the new Python agent log reporter. Add one missing step in otel-receiver doc about how to activate the default receiver. Reorganize dynamic configuration doc. Add more description about meter configurations in backend-meter doc. Fix typo in endpoint-grouping-rules doc.  All issues and pull requests are here\n","title":"Release Apache SkyWalking APM 8.8.0","url":"/events/release-apache-skywalking-apm-8-8-0/"},{"content":"SkyWalking CLI 0.8.0 is released. Go to downloads page to find release tars.\n  Features\n Add profile command Add logs command Add dependency command Support query events protocol Support auto-completion for bash and powershell    Bug Fixes\n Fix missing service instance name in trace command    Chores\n Optimize output by adding color to help information Set display style explicitly for commands in the test script Set different default display style for different commands Add scripts for quick install Update release doc and add scripts for release split into multiple workflows to speed up CI    ","title":"Release Apache SkyWalking CLI 0.8.0","url":"/events/release-apache-skywalking-cli-0-8-0/"},{"content":"SkyWalking Satellite 0.2.0 is released. Go to downloads page to find release tars.\nFeatures  Set MAXPROCS according to real cpu quota. Update golangci-lint version to 1.39.0. Update protoc-gen-go version to 1.26.0. Add prometheus-metrics-fetcher plugin. Add grpc client plugin. Add nativelog-grpc-forwarder plugin. Add meter-grpc-forwarder plugin. Support native management protocol. Support native tracing protocol. Support native profile protocol. Support native CDS protocol. Support native JVM protocol. Support native Meter protocol. Support native Event protocol. Support native protocols E2E testing. Add Prometheus service discovery in Kubernetes.  Bug Fixes  Fix the data race in mmap queue. Fix channel blocking in sender module. Fix pipes.sender.min_flush_events config could not support min number. Remove service name and instance name labels from Prometheus fetcher.  Issues and PR  All issues are here All and pull requests are here  ","title":"Release Apache SkyWalking Satellite 0.2.0","url":"/events/release-apache-skwaylking-satellite-0-2-0/"},{"content":"SkyWalking Python 0.7.0 is released. Go to downloads page to find release tars.\n  Feature:\n Support collecting and reporting logs to backend (#147) Support profiling Python method level performance (#127 Add a new sw-python CLI that enables agent non-intrusive integration (#156) Add exponential reconnection backoff strategy when OAP is down (#157) Support ignoring traces by http method (#143) NoopSpan on queue full, propagation downstream (#141) Support agent namespace. (#126) Support secure connection option for GRPC and HTTP (#134)    Plugins:\n Add Falcon Plugin (#146) Update sw_pymongo.py to be compatible with cluster mode (#150) Add Python celery plugin (#125) Support tornado5+ and tornado6+ (#119)    Fixes:\n Remove HTTP basic auth credentials from log, stacktrace, segment (#152) Fix @trace decorator not work (#136) Fix grpc disconnect, add SW_AGENT_MAX_BUFFER_SIZE to control buffer queue size (#138)    Others:\n Chore: bump up requests version to avoid license issue (#142) Fix module wrapt as normal install dependency (#123) Explicit component inheritance (#132) Provide dockerfile \u0026amp; images for easy integration in containerized scenarios (#159)    ","title":"Release Apache SkyWalking Python 0.7.0","url":"/events/release-apache-skywalking-python-0-7-0/"},{"content":"SkyWalking Infra E2E 1.0.0 is released. Go to downloads page to find release tars.\nFeatures  Support using docker-compose to setup the environment. Support using the HTTP request as trigger. Support verify test case by command-line or file with retry strategy. Support GitHub Action.  Bug Fixes Issues and PR  All issues are here All and pull requests are here  ","title":"Release Apache SkyWalking Infra E2E 1.0.0","url":"/events/release-apache-skywalking-infra-e2e-1-0-0/"},{"content":"The Java Agent of Apache SkyWalking has supported profiling since v7.0.0, and it enables users to troubleshoot the root cause of performance issues, and now we bring it into Python Agent. In this blog, we will show you how to use it, and we will introduce the mechanism of profiling.\nHow to use profiling in Python Agent This feature is released in Python Agent at v0.7.0. It is turned on by default, so you don\u0026rsquo;t need any extra configuration to use it. You can find the environment variables about it here.\nHere are the demo codes of an intentional slow application.\nimport time def method1(): time.sleep(0.02) return \u0026#39;1\u0026#39; def method2(): time.sleep(0.02) return method1() def method3(): time.sleep(0.02) return method2() if __name__ == \u0026#39;__main__\u0026#39;: import socketserver from http.server import BaseHTTPRequestHandler class SimpleHTTPRequestHandler(BaseHTTPRequestHandler): def do_POST(self): method3() time.sleep(0.5) self.send_response(200) self.send_header(\u0026#39;Content-Type\u0026#39;, \u0026#39;application/json\u0026#39;) self.end_headers() self.wfile.write(\u0026#39;{\u0026#34;song\u0026#34;: \u0026#34;Despacito\u0026#34;, \u0026#34;artist\u0026#34;: \u0026#34;Luis Fonsi\u0026#34;}\u0026#39;.encode(\u0026#39;ascii\u0026#39;)) PORT = 19090 Handler = SimpleHTTPRequestHandler with socketserver.TCPServer((\u0026#34;\u0026#34;, PORT), Handler) as httpd: httpd.serve_forever() We can start it with SkyWalking Python Agent CLI without changing any application code now, which is also the latest feature of v0.7.0. We just need to add sw-python run before our start command(i.e. sw-python run python3 main.py), to start the application with python agent attached. More information about sw-python can be found there.\nThen, we should add a new profile task for the / endpoint from the SkyWalking UI, as shown below.\nWe can access it by curl -X POST http://localhost:19090/, after that, we can view the result of this profile task on the SkyWalking UI.\nThe mechanism of profiling When a request lands on an application with the profile function enabled, the agent begins the profiling automatically if the request’s URI is as required by the profiling task. A new thread is spawned to fetch the thread dump periodically until the end of request.\nThe agent sends these thread dumps, called ThreadSnapshot, to SkyWalking OAPServer, and the OAPServer analyzes those ThreadSnapshot(s) and gets the final result. It will take a method invocation with the same stack depth and code signature as the same operation, and estimate the execution time of each method from this.\nLet\u0026rsquo;s demonstrate how this analysis works through the following example. Suppose we have such a program below and we profile it at 10ms intervals.\ndef main(): methodA() def methodA(): methodB() def methodB(): methodC() methodD() def methodC(): time.sleep(0.04) def methodD(): time.sleep(0.06) The agent collects a total of 10 ThreadSnapShot(s) over the entire time period(Diagram A). The first 4 snapshots represent the thread dumps during the execution of function C, and the last 6 snapshots represent the thread dumps during the execution of function D. After the analysis of OAPServer, we can see the result of this profile task on the SkyWalking Rocketbot UI as shown in the right of the diagram. With this result, we can clearly see the function call relationship and the time consumption situation of this program.\nDiagram A You can read more details of profiling theory from this blog.\nWe hope you enjoy the profile in the Python Agent, and if so, you can give us a star on Python Agent and SkyWalking on GitHub.\n","title":"SkyWalking Python Agent Supports Profiling Now","url":"/blog/2021-09-12-skywalking-python-profiling/"},{"content":"SkyWalking Kubernetes Helm Chart 4.1.0 is released. Go to downloads page to find release tars.\n Add missing service account to init job. Improve notes.txt and nodePort configuration. Improve ingress compatibility. Fix bug that customized config files are not loaded into es-init job. Add imagePullSecrets and node selector. Fix istio adapter description. Enhancement: allow mounting binary data files.  ","title":"Release Apache SkyWalking Kubernetes Helm Chart 4.1.0","url":"/events/release-apache-skywalking-kubernetes-helm-chart-4.1.0/"},{"content":"GOUP hosted a webinar, and invited Sheng Wu to introduce Apache SkyWalking. This is a 1.5 hours presentation including the full landscape of Apache SkyWalking 8.x.\nChapter04 Session10 - Apache Skywalking by Sheng Wu   ","title":"[Webinar] SkyWalking 8.x Introduction","url":"/blog/2021-08-01-skywalking-8-intro/"},{"content":"SkyWalking 8.7.0 is released. Go to downloads page to find release tars. Changes by Version\nProject  Extract dependency management to a bom. Add JDK 16 to test matrix. DataCarrier consumer add a new event notification, call nothingToConsume method if the queue has no element to consume. Build and push snapshot Docker images to GitHub Container Registry, this is only for people who want to help to test the master branch codes, please don\u0026rsquo;t use in production environments.  Java Agent  Supports modifying span attributes in async mode. Agent supports the collection of JVM arguments and jar dependency information. [Temporary] Support authentication for log report channel. This feature and grpc channel is going to be removed after Satellite 0.2.0 release. Remove deprecated gRPC method, io.grpc.ManagedChannelBuilder#nameResolverFactory. See gRPC-java 7133 for more details. Add Neo4j-4.x plugin. Correct profile.duration to profile.max_duration in the default agent.config file. Fix the response time of gRPC. Support parameter collection for SqlServer. Add ShardingSphere-5.0.0-beta plugin. Fix some method exception error. Fix async finish repeatedly in spring-webflux-5.x-webclient plugin. Add agent plugin to support Sentinel. Move ehcache-2.x plugin as an optional plugin. Support guava-cache plugin. Enhance the compatibility of mysql-8.x-plugin plugin. Support Kafka SASL login module. Fix gateway plugin async finish repeatedly when fallback url configured. Chore: polish methods naming for Spring-Kafka plugins. Remove plugins for ShardingSphere legacy version. Update agent plugin for ElasticJob GA version Remove the logic of generating instance name in KafkaServiceManagementServiceClient class. Improve okhttp plugin performance by optimizing Class.getDeclaredField(). Fix GRPCLogClientAppender no context warning. Fix spring-webflux-5.x-webclient-plugin NPE.  OAP-Backend  Disable Spring sleuth meter analyzer by default. Only count 5xx as error in Envoy ALS receiver. Upgrade apollo core caused by CVE-2020-15170. Upgrade kubernetes client caused by CVE-2020-28052. Upgrade Elasticsearch 7 client caused by CVE-2020-7014. Upgrade jackson related libs caused by CVE-2018-11307, CVE-2018-14718 ~ CVE-2018-14721, CVE-2018-19360 ~ CVE-2018-19362, CVE-2019-14379, CVE-2019-14540, CVE-2019-14892, CVE-2019-14893, CVE-2019-16335, CVE-2019-16942, CVE-2019-16943, CVE-2019-17267, CVE-2019-17531, CVE-2019-20330, CVE-2020-8840, CVE-2020-9546, CVE-2020-9547, CVE-2020-9548, CVE-2018-12022, CVE-2018-12023, CVE-2019-12086, CVE-2019-14439, CVE-2020-10672, CVE-2020-10673, CVE-2020-10968, CVE-2020-10969, CVE-2020-11111, CVE-2020-11112, CVE-2020-11113, CVE-2020-11619, CVE-2020-11620, CVE-2020-14060, CVE-2020-14061, CVE-2020-14062, CVE-2020-14195, CVE-2020-24616, CVE-2020-24750, CVE-2020-25649, CVE-2020-35490, CVE-2020-35491, CVE-2020-35728 and CVE-2020-36179 ~ CVE-2020-36190. Exclude log4j 1.x caused by CVE-2019-17571. Upgrade log4j 2.x caused by CVE-2020-9488. Upgrade nacos libs caused by CVE-2021-29441 and CVE-2021-29442. Upgrade netty caused by CVE-2019-20444, CVE-2019-20445, CVE-2019-16869, CVE-2020-11612, CVE-2021-21290, CVE-2021-21295 and CVE-2021-21409. Upgrade consul client caused by CVE-2018-1000844, CVE-2018-1000850. Upgrade zookeeper caused by CVE-2019-0201, zookeeper cluster coordinator plugin now requires zookeeper server 3.5+. Upgrade snake yaml caused by CVE-2017-18640. Upgrade embed tomcat caused by CVE-2020-13935. Upgrade commons-lang3 to avoid potential NPE in some JDK versions. OAL supports generating metrics from events. Support endpoint name grouping by OpenAPI definitions. Concurrent create PrepareRequest when persist Metrics Fix CounterWindow increase computing issue. Performance: optimize Envoy ALS analyzer performance in high traffic load scenario (reduce ~1cpu in ~10k RPS). Performance: trim useless metadata fields in Envoy ALS metadata to improve performance. Fix: slowDBAccessThreshold dynamic config error when not configured. Performance: cache regex pattern and result, optimize string concatenation in Envy ALS analyzer. Performance: cache metrics id and entity id in Metrics and ISource. Performance: enhance persistent session mechanism, about differentiating cache timeout for different dimensionality metrics. The timeout of the cache for minute and hour level metrics has been prolonged to ~5 min. Performance: Add L1 aggregation flush period, which reduce the CPU load and help young GC. Support connectTimeout and socketTimeout settings for ElasticSearch6 and ElasticSearch7 storages. Re-implement storage session mechanism, cached metrics are removed only according to their last access timestamp, rather than first time. This makes sure hot data never gets removed unexpectedly. Support session expired threshold configurable. Fix InfluxDB storage-plugin Metrics#multiGet issue. Replace zuul proxy with spring cloud gateway 2.x. in webapp module. Upgrade etcd cluster coordinator and dynamic configuration to v3.x. Configuration: Allow configuring server maximum request header size and ES index template order. Add thread state metric and class loaded info metric to JVMMetric. Performance: compile LAL DSL statically and run with type checked. Add pagination to event query protocol. Performance: optimize Envoy error logs persistence performance. Support envoy cluster manager metrics. Performance: remove the synchronous persistence mechanism from batch ElasticSearch DAO. Because the current enhanced persistent session mechanism, don\u0026rsquo;t require the data queryable immediately after the insert and update anymore. Performance: share flushInterval setting for both metrics and record data, due to synchronous persistence mechanism removed. Record flush interval used to be hardcoded as 10s. Remove syncBulkActions in ElasticSearch storage option. Increase the default bulkActions(env, SW_STORAGE_ES_BULK_ACTIONS) to 5000(from 1000). Increase the flush interval of ElasticSearch indices to 15s(from 10s) Provide distinct for elements of metadata lists. Due to the more aggressive asynchronous flush, metadata lists have more chances including duplicate elements. Don\u0026rsquo;t need this as indicate anymore. Reduce the flush period of hour and day level metrics, only run in 4 times of regular persistent period. This means default flush period of hour and day level metrics are 25s * 4. Performance: optimize IDs read of ElasticSearch storage options(6 and 7). Use the physical index rather than template alias name. Adjust index refresh period as INT(flushInterval * 2/3), it used to be as same as bulk flush period. At the edge case, in low traffic(traffic \u0026lt; bulkActions in the whole period), there is a possible case, 2 period bulks are included in one index refresh rebuild operation, which could cause version conflicts. And this case can\u0026rsquo;t be fixed through core/persistentPeriod as the bulk fresh is not controlled by the persistent timer anymore. The core/maxSyncOperationNum setting(added in 8.5.0) is removed due to metrics persistence is fully asynchronous. The core/syncThreads setting(added in 8.5.0) is removed due to metrics persistence is fully asynchronous. Optimization: Concurrency mode of execution stage for metrics is removed(added in 8.5.0). Only concurrency of prepare stage is meaningful and kept. Fix -meters metrics topic isn\u0026rsquo;t created with namespace issue Enhance persistent session timeout mechanism. Because the enhanced session could cache the metadata metrics forever, new timeout mechanism is designed for avoiding this specific case. Fix Kafka transport topics are created duplicated with and without namespace issue Fix the persistent session timeout mechanism bug. Fix possible version_conflict_engine_exception in bulk execution. Fix PrometheusMetricConverter may throw an IllegalArgumentException when convert metrics to SampleFamily Filtering NaN value samples when build SampleFamily Add Thread and ClassLoader Metrics for the self-observability and otel-oc-rules Simple optimization of trace sql query statement. Avoid \u0026ldquo;select *\u0026rdquo; query method Introduce dynamical logging to update log configuration at runtime Fix Kubernetes ConfigMap configuration center doesn\u0026rsquo;t send delete event Breaking Change: emove qps and add rpm in LAL  UI  Fix the date component for log conditions. Fix selector keys for duplicate options. Add Python celery plugin. Fix default config for metrics. Fix trace table for profile ui. Fix the error of server response time in the topology. Fix chart types for setting metrics configure. Fix logs pages number. Implement a timeline for Events in a new page. Fix style for event details.  Documentation  Add FAQ about Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Add Self Observability service discovery (k8s). Add sending Envoy Metrics to OAP in envoy 1.19 example and bump up to Envoy V3 api.  All issues and pull requests are here\n","title":"Release Apache SkyWalking APM 8.7.0","url":"/events/release-apache-skywalking-apm-8-7-0/"},{"content":"SkyWalking Client JS 0.6.0 is released. Go to downloads page to find release tars.\n Separate production and development environments when building. Upgrade packages to fix vulnerabilities. Fix headers could be null . Fix catching errors for http requests. Fix the firstReportedError is calculated with more types of errors.  ","title":"Release Apache SkyWalking Client JS 0.6.0","url":"/events/release-apache-skywalking-client-js-0-6-0/"},{"content":"SkyWalking is an open source APM (application performance monitor) system, especially designed for microservices, cloud native, and container-based architectures.\nFrom 2020, it has dominated the open source APM market in China, and expanded aggressively in North American, Europe and Asia\u0026rsquo;s other countries.\nWith over 6 years (2015-2021) of development, driven by the global open source community, SkyWalking now provides full stack observability covering metrics, tracing and logging, plus event detector, which are built based on various native and ecosystem solutions.\n Language agent-based(Java, Dot Net, Golang, PHP, NodeJS, Python, C++, LUA) in-process monitoring, is as powerful as commercial APM vendors' agents. Mostly auto-instrumentation, and good interactivity. Service Mesh Observability, working closely with Envoy and Istio teams. Transparent integration of popular metrics ecosystem. Accept metrics from Prometheus SDK, OpenTelemetry collectors, Zabbix agents, etc. Log collection with analysis capability from FluentD, Fluent-bit, Filebeat, etc. agents. Infrastructure monitoring, such as Linux and k8s, is out of the box.  The SkyWalking ecosystem was started by very few people. The community drives the project to cover real scenarios, from tracing to the whole APM field. Even today, more professional open source developers, powered by the vendors behind them, are bringing the project to a different level.\nTypically and most attractively, SkyWalking is going to build the first known open source APM specific database in the world, at least providing\n Time series-based database engine. Support traces/logs and metrics in the database core level. High performance with cluster mode and HPA. Reasonable resource cost.  We nearly doubled the number of contributors in the last year, from ~300 to over 500. The whole community is very energetic. Here, we want to thank our 47 committers(28 PMC members included), listed here, and over 400 other contributors.\nWe together built this humongous Apache Top Level project, and proved the stronge competitiveness of an open-source project.\nThis is a hard-won and impressive achievement. We won\u0026rsquo;t stop here. The trend is there, the ground is solid. We are going to build the top-level APM system relying on our open-source community.\n500 Contributors List    GitHub         1095071913 182148432** 295198088** 394102339** 437376068**   50168383 55846420** 826245622** 844067874 Ahoo-Wang   AirTrioa AlexanderWert AlseinX AngryMills Ax1an   BFergerson BZFYS CalvinKirs CharlesMaster ChaunceyLin5152   CommissarXia Cvimer DeadLion Doublemine Du-fei   ElderJames EvanLjp FatihErdem FeynmanZhou Fine0830   FingerLiu FrankyXu Gallardot GerryYuan HackerRookie   HarryFQ Heguoya Hen1ng HendSame Humbertzhang   IanCao IluckySi Indifer J-Cod3r JaredTan95   Jargon96 Jijun JoeKerouac JohnNiang Johor03   Jozdortraz Jtrust Just-maple KangZhiDong LazyLei   LiWenGu Lin1997 Linda-pan LiteSun Liu-XinYuan   MiracleDx Miss-you MoGuGuai-hzr MrYzys O-ll-O   Patrick0308 QHWG67 Qiliang QuanjieDeng RandyAbernethy   RedzRedz Runrioter SataQiu ScienJus SevenBlue2018   ShaoHans Shikugawa SoberChina SummerOfServenteen Switch-vov   TJ666 Technoboy- TerrellChen TeslaCN TheRealHaui   TinyAllen TomMD ViberW Videl WALL-E   WeihanLi WildWolfBang WillemJiang Wooo0 XhangUeiJong   Xlinlin YczYanchengzhe Yebemeto YoungHu YunaiV   YunfengGao Z-Beatles ZS-Oliver ZhHong ZhuoSiChen   a198720 a1vin-tian a526672351 acurtain adamni135   adermxzs adriancole** aeolusheath agile6v aix3   aiyanbo ajanthan alexkarezin alonelaval amogege   amwyyyy andyliyuze andyzzl aoxls arugal   ascrutae ascrutae** augustowebd aviaviavi bai-yang   beckhampu beckjin beiwangnull bigflybrother bootsrc   bostin brucewu-fly buxingzhe buzuotaxuan bwh12398**   c feng c1ay candyleer carllhw carlvine500   carrypann cheenursn cheetah012 chenbeitang chenglei**   chengshiwen chenmudu chenpengfei chenvista chess-equality   chestarss chidaodezhongsheng chopin-d clevertension clk1st   cngdkxw cnlangzi codeglzhang codelipenghui coder-yqj   coki230 compilerduck constanine coolbeevip crystaldust   cui-liqiang cuiweiwei cutePanda123 cyberdak cyejing   cyhii dafu-wu dagmom dalekliuhan** darcydai   dengliming devkanro devon-ye dickens7 dimaaan   dingdongnigetou dio divyakumarjain dmsolr dominicqi   donbing007 dsc6636926 dvsv2 dzx2018 echooymxq   efekaptan elk-g emschu eoeac evanljp**   evanxuhe feelwing1314 fgksgf fredster33 fuhuo   fulmicoton fushiqinghuan111 geektcp geomonlin ggndnn   gitter-badger givingwu glongzh gnr163 gonedays   grissom-grissom grissomsh guodongq guyukou gxthrj   gy09535 gzshilu hailin0 hanahmily haotian2015   haoyann hardzhang harvies heihaozi hepyu   heyanlong hi-sb honganan horber hsoftxl   huangyoje huliangdream huohuanhuan iluckysi innerpeacez   itsvse jasper-zsh jbampton jialong121 jinlongwang   jjlu521016 jjtyro jmjoy jsbxyyx justeene   juzhiyuan jy00464346 kaanid kagaya85 karott   kayleyang kevinyyyy kezhenxu94 kikupotter kilingzhang   killGC kkl129 klboke ksewen kuaikuai   kun-song kylixs landonzeng langke93 langyan1022   langyizhao lazycathome leemove leizhiyuan libinglong   lijial lilien1010 limfriend linkinshi linliaoy   liqiangz liu-junchi liufei** liuhaoXD liuhaoyang   liuweiyi** liuyanggithup liuzhengyang liweiv lixin40**   lizl9** lkxiaolou llissery louis-zhou lpcy   lpf32 lsyf lucperkins lujiajing1126 lunamagic1978   lunchboxav lxin96** lxliuxuankb lytscu lyzhang1999   mage3k makefriend8 makingtime mantuliu maolie   margauxcabrera masterxxo maxiaoguang64 me** membphis   mestarshine mgsheng michaelsembwever mikkeschiren ming_flycash**   minquan.chen** misaya momo0313 moonming mrproliu   mrproliu** muyun12 nacx neatlife neeuq   nic-chen nickwongwong nikitap492 nileblack nisiyong   novayoung oatiz oflebbe olzhy onecloud360   osiriswd panniyuyu peng-yongsheng pengweiqhca potiuk   probeyang purgeyao qijianbo010 qinhang3 qiuyu-d   qjgszzx qq362220083 qqeasonchen qxo ralphgj   raybi-asus refactor2 remicollet rlenferink rootsongjc   rovast ruibaby s00373198 scolia sdanzo   seifeHu sergicastro shiluo34 sikelangya simonlei   sk163 snakorse songzhendong songzhian songzhian**   sonxy spacewander stalary stenio2011 stevehu   stone-wlg sungitly surechen swartz-k sxzaihua   tangxqa tanjunchen tankilo tanzhen** taskmgr   tbdpmi terranhu terrymanu tevahp thanq   thebouv tianyk tianyuak tincopper tinyu0   tom-pytel tristaZero tristan-tsl trustin tsuilouis   tuohai666 tzsword-2020 tzy1316106836 vcjmhg viktoryi   vision-ken viswaramamoorthy wallezhang wang-yeliang wang_weihan**   wangrzneu wankai123 wbpcode web-xiaxia webb2019   weiqiang-w weiqiang333 wendal wengangJi wenjianzhang   whfjam whl12345 willseeyou wilsonwu wind2008hxy   wingwong-knh withlin wl4g wqr2016 wu-sheng   wuguangkuo wujun8 wuwen5 wuxingye x22x22   xbkaishui xcaspar xdRight xiaoweiyu** xiaoxiangmoe   xiaoy00 xinfeingxia85 xingren23 xinzhuxiansheng xonze   xuanyu66 xuchangjunjx xudianyang yanbw yanfch   yang-xiaodong yangxb2010000 yanickxia yanmaipian yanmingbi   yantaowu yaojingguo yaowenqiang yazong ychandu   ycoe yimeng yu199195 yuqichou yushuqiang**   yuyujulin yxudong yymoth zaunist zaygrzx   zcai2 zeaposs zhang98722 zhanghao001 zhangjianweibj   zhangkewei zhangsean zhangxin** zhaoyuguang zhe1926   zhentaoJin zhongjianno1** zhousiliang163 zhuCheer zhyyu   zifeihan zijin-m zkscpqm zoidbergwill zoumingzm   zouyx zpf1989 zshit zxbu zygfengyuwuzu    ","title":"[Community win] SkyWalking achieved 500 contributors milestone.","url":"/blog/2021-07-12-500-contributors-mark/"},{"content":"时间:2021 年 6 月 26 日\n地点:北京市海淀区西格玛大厦 B1 多功能厅\n视频回放:见 Bilibili\nApache SkyWalking Landscape  吴晟 Sheng Wu. Tetrate Founding Engineer, Apache Software Foundation board director. SkyWalking founder.  SkyWalking 2020-2021 年发展和后续计划\n微服务可观测性分析平台的探索与实践  凌若川 腾讯高级工程师  可观测性分析平台作为云原生时代微服务系统基础组件,开放性与性能是决定平台价值的核心要素。 复杂微服务应用场景与海量多维链路数据,对可观测性分析平台在开放性设计和各环节高性能实现带来诸多挑战。 本次分享中将重点梳理腾讯云微服务团队在构建云原生可观测性分析平台过程中遇到的挑战,介绍我们在架构设计与实现方面的探索与实践。\n 云原生时代微服务可观测性平台面临的性能与可用性挑战 腾讯云在构建高性能微服务可观测性分析平台的探索与实践 微服务可观测性分析平台架构的下一阶段演进方向展望  BanyanDB 数据模型背后的逻辑  高洪涛 Hongtao Gao. Tetrate SRE, SkyWalking PMC, Apache ShardingSphere PMC.  BanyanDB 作为为处理 Apache SkyWalking 产生的 trace,log 和 metric 的数据而特别设计的数据库,其背后数据模型的抉择是非常与众不同的。 在本次分享中,我将根据 RUM 猜想来讨论为什么 BanyanDB 使用的数据模型对于 APM 数据而言是更加高效和可靠的。\n通过本次分享,观众可以:\n 理解数据库设计的取舍 了解 BanyanDB 的数据模型 认识到该模型对于 APM 类数据有特定的优势  Apache SkyWalking 如何做前端监控  范秋霞 Qiuxia Fan,Tetrate FE SRE,SkyWalking PMC.  Apache SkyWalking 对前端进行了监控与跟踪,分别有 Metric, Log, Trace 三部分。本次分享我会介绍页面性能指标的收集与计算,同时用案列进行分析,也会讲解 Log 的采集方法以及 Source Map 错误定位的实施。最后介绍浏览器端 Requets 的跟踪方法。\n通过本次分享,观众可以:\n 了解页面的性能指标以及收集计算方法 了解前端如何做错误日志收集 如何对页面请求进行跟踪以及跟踪的好处  一名普通工程师,该如何正确的理解开源精神?  王晔倞 Yeliang Wang. API7 Partner / Product VP.  开源精神,那也许是一种给于和获取的平衡,有给于才能有获取,有获取才会有给于的动力。无需指责别人只会获取,我们应该懂得开源是一种创造方式,一个没有创造欲和创造力的人加入开源也是无用的。\n通过本次分享,观众可以:\n 为什么国内一些程序员会对开源产生误解? 了解 “开源≠自由≠非商业” 的来龙去脉。 一名普通工程师,如何高效地向开源社区做贡献?  可观测性技术生态和 OpenTelemetry 原理及实践  陈一枭 腾讯. OpenTelemetry docs-cn maintainer、Tencent OpenTelemetry OTeam 创始人  综述云原生可观测性技术生态,介绍 OpenTracing,OpenMetrics,OpenTelemetry 等标准演进。介绍 OpenTelemetry 存在价值意义,介绍 OpenTelemetry 原理及其整体生态规划。介绍腾讯在 OpenTelemetry 方面的实践。\n本次分享内容如下:\n 云原生可观测性技术简介 OpenTelemetry 及其它规范简介 OpenTelemetry 原理 OpenTelemetry 在腾讯的应用及实践  Apache SkyWalking 事件采集系统更快定位故障  柯振旭 Zhenxu Ke,Tetrate SRE, Apache SkyWalking PMC. Apache Incubator PMC. Apache Dubbo committer.  通过本次分享,听众可以:\n 了解 SkyWalking 的事件采集系统; 了解上报事件至 SkyWalking 的多种方式; 学习如何利用 SkyWalking 采集的事件结合 metrics,分析目标系统的性能问题;  可观测性自动注入技术原理探索与实践  詹启新 Tencnet OpenTelemetry Oteam PMC  在可观测领域中自动注入已经成为重要的组成部分之一,其优异简便的使用方式并且可同时覆盖到链路、指标、日志,大大降低了接入成本及运维成本,属于友好的一种接入方式; 本次分享将介绍 Java 中的字节码注入技术原理,及在可观测领域的应用实践\n 常用的自动注入技术原理简介 介绍可观测性在 Java 落地的要点 opentelemetry-java-instrumentation 的核心原理及实现 opentelemetry 自动注入的应用实践  如何利用 Apache APISIX 提升 Nginx 的可观测性  金卫 Wei Jin, API7 Engineer Apache SkyWalking committer. Apache apisix-ingress-controller Founder. Apache APISIX PMC.  在云原生时代,动态和可观测性是 API 网关的标准特性。Apache APISIX 不仅覆盖了 Nginx 的传统功能,在可观测性上也和 SkyWalking 深度合作,大大提升了服务治理能力。本次分享会介绍如何无痛的提升 Nginx 的可观测性和 APISIX 在未来可观测性方面的规划。\n通过本次分享,观众可以:\n 通过 Apache APISIX 实现观测性的几种手段. 了解 Apache APISIX 高效且易用的秘诀. 结合 Apache skywalking 进一步提升可观测性.  ","title":"[视频] SkyWalking Day 2021 演讲视频","url":"/zh/skywalking-day-2021/"},{"content":"SkyWalking CLI 0.7.0 is released. Go to downloads page to find release tars.\n  Features\n Add GitHub Action for integration of event reporter    Bug Fixes\n Fix metrics top can\u0026rsquo;t infer the scope automatically    Chores\n Upgrade dependency crypto Refactor project to use goapi Move parseScope to pkg Update release doc    ","title":"Release Apache SkyWalking CLI 0.7.0","url":"/events/release-apache-skywalking-cli-0-7-0/"},{"content":"SkyWalking 8.6.0 is released. Go to downloads page to find release tars. Changes by Version\nProject  Add OpenSearch as storage option. Upgrade Kubernetes Java client dependency to 11.0. Fix plugin test script error in macOS.  Java Agent  Add trace_segment_ref_limit_per_span configuration mechanism to avoid OOM. Improve GlobalIdGenerator performance. Add an agent plugin to support elasticsearch7. Add jsonrpc4j agent plugin. new options to support multi skywalking cluster use same kafka cluster(plugin.kafka.namespace) resolve agent has no retries if connect kafka cluster failed when bootstrap Add Seata in the component definition. Seata plugin hosts on Seata project. Extended Kafka plugin to properly trace consumers that have topic partitions directly assigned. Support Kafka consumer 2.8.0. Support print SkyWalking context to logs. Add MessageListener enhancement in pulsar plugin. fix a bug that spring-mvc set an error endpoint name if the controller class annotation implements an interface. Add an optional agent plugin to support mybatis. Add spring-cloud-gateway-3.x optional plugin. Add okhttp-4.x plugin. Fix NPE when thrift field is nested in plugin thrift Fix possible NullPointerException in agent\u0026rsquo;s ES plugin. Fix the conversion problem of float type in ConfigInitializer. Fixed part of the dynamic configuration of ConfigurationDiscoveryService that does not take effect under certain circumstances. Introduce method interceptor API v2 Fix ClassCast issue for RequestHolder/ResponseHolder. fixed jdk-threading-plugin memory leak. Optimize multiple field reflection operation in Feign plugin. Fix trace-ignore-plugin TraceIgnorePathPatterns can\u0026rsquo;t set empty value  OAP-Backend  BugFix: filter invalid Envoy access logs whose socket address is empty. Fix K8s monitoring the incorrect metrics calculate. Loop alarm into event system. Support alarm tags. Support WeLink as a channel of alarm notification. Fix: Some defensive codes didn\u0026rsquo;t work in PercentileFunction combine. CVE: fix Jetty vulnerability. https://nvd.nist.gov/vuln/detail/CVE-2019-17638 Fix: MAL function would miss samples name after creating new samples. perf: use iterator.remove() to remove modulesWithoutProvider Support analyzing Envoy TCP access logs and persist error TCP logs. Fix: Envoy error logs are not persisted when no metrics are generated Fix: Memory leakage of low version etcd client. fix-issue Allow multiple definitions as fallback in metadata-service-mapping.yaml file and k8sServiceNameRule. Fix: NPE when configmap has no data. Fix: Dynamic Configuration key slowTraceSegmentThreshold not work Fix: != is not supported in oal when parameters are numbers. Include events of the entity(s) in the alarm. Support native-json format log in kafka-fetcher-plugin. Fix counter misuse in the alarm core. Alarm can\u0026rsquo;t be triggered in time. Events can be configured as alarm source. Make the number of core worker in meter converter thread pool configurable. Add HTTP implementation of logs reporting protocol. Make metrics exporter still work even when storage layer failed. Fix Jetty HTTP TRACE issue, disable HTTP methods except POST. CVE: upgrade snakeyaml to prevent billion laughs attack in dynamic configuration. polish debug logging avoids null value when the segment ignored.  UI  Add logo for kong plugin. Add apisix logo. Refactor js to ts for browser logs and style change. When creating service groups in the topology, it is better if the service names are sorted. Add tooltip for dashboard component. Fix style of endpoint dependency. Support search and visualize alarms with tags. Fix configurations on dashboard. Support to configure the maximum number of displayed items. After changing the durationTime, the topology shows the originally selected group or service. remove the no use maxItemNum for labeled-value metric, etc. Add Azure Functions logo. Support search Endpoint use keyword params in trace view. Add a function which show the statistics infomation during the trace query. Remove the sort button at the column of Type in the trace statistics page. Optimize the APISIX icon in the topology. Implement metrics templates in the topology. Visualize Events on the alarm page. Update duration steps in graphs for Trace and Log.  Documentation  Polish k8s monitoring otel-collector configuration example. Print SkyWalking context to logs configuration example. Update doc about metrics v2 APIs.  All issues and pull requests are here\n","title":"Release Apache SkyWalking APM 8.6.0","url":"/events/release-apache-skywalking-apm-8-6-0/"},{"content":"Abstract Apache SkyWalking hosts SkyWalkingDay Conference 2021 in June 26th, jointly with Tencent and Tetrate.\nWe are going to share SkyWalking\u0026rsquo;s roadmap, features, product experiences and open source culture.\nWelcome to join us.\nVenue Addr./地址 北京市海淀区西格玛大厦B1多功能厅\nDate June 26th.\nRegistration For Free Register for onsite or online\nSessions 10:00 - 10:20 Apache SkyWalking Landscape  吴晟 Sheng Wu. Tetrate Founding Engineer, Apache Software Foundation board director. SkyWalking founder.  SkyWalking 2020-2021年发展和后续计划\n10:20 - 10:50 微服务可观测性分析平台的探索与实践  凌若川 腾讯高级工程师  可观测性分析平台作为云原生时代微服务系统基础组件,开放性与性能是决定平台价值的核心要素。 复杂微服务应用场景与海量多维链路数据,对可观测性分析平台在开放性设计和各环节高性能实现带来诸多挑战。 本次分享中将重点梳理腾讯云微服务团队在构建云原生可观测性分析平台过程中遇到的挑战,介绍我们在架构设计与实现方面的探索与实践。\n 云原生时代微服务可观测性平台面临的性能与可用性挑战 腾讯云在构建高性能微服务可观测性分析平台的探索与实践 微服务可观测性分析平台架构的下一阶段演进方向展望  10:50 - 11:20 BanyanDB数据模型背后的逻辑  高洪涛 Hongtao Gao. Tetrate SRE, SkyWalking PMC, Apache ShardingSphere PMC.  BanyanDB作为为处理Apache SkyWalking产生的trace,log和metric的数据而特别设计的数据库,其背后数据模型的抉择是非常与众不同的。 在本次分享中,我将根据RUM猜想来讨论为什么BanyanDB使用的数据模型对于APM数据而言是更加高效和可靠的。\n通过本次分享,观众可以:\n 理解数据库设计的取舍 了解BanyanDB的数据模型 认识到该模型对于APM类数据有特定的优势  11:20 - 11:50 Apache SkyWalking 如何做前端监控  范秋霞 Qiuxia Fan,Tetrate FE SRE,SkyWalking PMC.  Apache SkyWalking对前端进行了监控与跟踪,分别有Metric, Log, Trace三部分。本次分享我会介绍页面性能指标的收集与计算,同时用案列进行分析,也会讲解Log的采集方法以及Source Map错误定位的实施。最后介绍浏览器端Requets的跟踪方法。\n通过本次分享,观众可以:\n 了解页面的性能指标以及收集计算方法 了解前端如何做错误日志收集 如何对页面请求进行跟踪以及跟踪的好处  午休 13:30 - 14:00 一名普通工程师,该如何正确的理解开源精神?  王晔倞 Yeliang Wang. API7 Partner / Product VP.  开源精神,那也许是一种给于和获取的平衡,有给于才能有获取,有获取才会有给于的动力。无需指责别人只会获取,我们应该懂得开源是一种创造方式,一个没有创造欲和创造力的人加入开源也是无用的。\n通过本次分享,观众可以:\n 为什么国内一些程序员会对开源产生误解? 了解 “开源≠自由≠非商业” 的来龙去脉。 一名普通工程师,如何高效地向开源社区做贡献?  14:00 - 14:30 可观测性技术生态和OpenTelemetry原理及实践  陈一枭 腾讯. OpenTelemetry docs-cn maintainer、Tencent OpenTelemetry OTeam创始人  综述云原生可观测性技术生态,介绍OpenTracing,OpenMetrics,OpenTelemetry等标准演进。介绍OpenTelemetry存在价值意义,介绍OpenTelemetry原理及其整体生态规划。介绍腾讯在OpenTelemetry方面的实践。\n本次分享内容如下:\n 云原生可观测性技术简介 OpenTelemetry及其它规范简介 OpenTelemetry原理 OpenTelemetry在腾讯的应用及实践  14:30 - 15:10 利用 Apache SkyWalking 事件采集系统更快定位故障  柯振旭 Zhenxu Ke,Tetrate SRE, Apache SkyWalking PMC. Apache Incubator PMC. Apache Dubbo committer.  通过本次分享,听众可以:\n 了解 SkyWalking 的事件采集系统; 了解上报事件至 SkyWalking 的多种方式; 学习如何利用 SkyWalking 采集的事件结合 metrics,分析目标系统的性能问题;  15:10 - 15:30 茶歇 15:30 - 16:00 可观测性自动注入技术原理探索与实践  詹启新 Tencnet OpenTelemetry Oteam PMC  在可观测领域中自动注入已经成为重要的组成部分之一,其优异简便的使用方式并且可同时覆盖到链路、指标、日志,大大降低了接入成本及运维成本,属于友好的一种接入方式; 本次分享将介绍Java中的字节码注入技术原理,及在可观测领域的应用实践\n 常用的自动注入技术原理简介 介绍可观测性在Java落地的要点 opentelemetry-java-instrumentation的核心原理及实现 opentelemetry自动注入的应用实践  16:00 - 16:30 如何利用 Apache APISIX 提升 Nginx 的可观测性  金卫 Wei Jin, API7 Engineer Apache SkyWalking committer. Apache apisix-ingress-controller Founder. Apache APISIX PMC.  在云原生时代,动态和可观测性是 API 网关的标准特性。Apache APISIX 不仅覆盖了 Nginx 的传统功能,在可观测性上也和 SkyWalking 深度合作,大大提升了服务治理能力。本次分享会介绍如何无痛的提升 Nginx 的可观测性和 APISIX 在未来可观测性方面的规划。\n通过本次分享,观众可以:\n 通过 Apache APISIX 实现观测性的几种手段. 了解 Apache APISIX 高效且易用的秘诀. 结合 Apache skywalking 进一步提升可观测性.  16:35 抽奖,结束 Sponsors  Tencent Tetrate SegmentFault 思否  Anti-harassment policy SkyWalkingDay is dedicated to providing a harassment-free experience for everyone. We do not tolerate harassment of participants in any form. Sexual language and imagery will also not be tolerated in any event venue. Participants violating these rules may be sanctioned or expelled without a refund, at the discretion of the event organizers. Our anti-harassment policy can be found at Apache website.\nContact Us Send mail to dev@skywalking.apache.org.\n","title":"SkyWalkingDay Conference 2021, relocating at Beijing","url":"/events/skywalkingday-2021/"},{"content":"SkyWalking NodeJS 0.3.0 is released. Go to downloads page to find release tars.\n Add ioredis plugin. (#53) Endpoint cold start detection and marking. (#52) Add mysql2 plugin. (#54) Add AzureHttpTriggerPlugin. (#51) Add Node 15 into test matrix. (#45) Segment reference and reporting overhaul. (#50) Add http ignore by method. (#49) Add secure connection option. (#48) BugFix: wrong context during many async spans. (#46) Add Node Mongoose Plugin. (#44)  ","title":"Release Apache SkyWalking for NodeJS 0.3.0","url":"/events/release-apache-skywalking-nodejs-0-3-0/"},{"content":"SkyWalking Client JS 0.5.1 is released. Go to downloads page to find release tars.\n Add noTraceOrigins option. Fix wrong URL when using relative path. Catch frames errors. Get response.body as a stream with the fetch API. Support reporting multiple logs. Support typescript project.  ","title":"Release Apache SkyWalking Client JS 0.5.1","url":"/events/release-apache-skywalking-client-js-0-5-1/"},{"content":"SkyWalking Kong Agent 0.1.1 is released. Go to downloads page to find release tars.\n Establish the SkyWalking Kong Agent.  ","title":"Release Apache SkyWalking Kong 0.1.1","url":"/events/release-apache-skywalking-kong-0-1-1/"},{"content":"B站视频地址\n","title":"[视频] 大咖说开源 第二季 第4期 | Apache软件基金会20年","url":"/zh/2021-05-09-summer-2021-asf20/"},{"content":"We posted our Response to Elastic 2021 License Change blog 4 months ago. It doesn\u0026rsquo;t have a big impact in the short term, but because of the incompatibility between SSPL and Apache 2.0, we lost the chance of upgrading the storage server, which concerns the community and our users. So, we have to keep looking for a new option as a replacement.\nThere was an open source project, Open Distro for Elasticsearch, maintained by the AWS team. It is an Apache 2.0-licensed distribution of Elasticsearch enhanced with enterprise security, alerting, SQL, and more. After Elastic relicensed its projects, we talked with their team, and they have an agenda to take over the community leadship and keep maintaining Elasticsearch, as it was licensed by Apache 2.0. So, they are good to fork and continue.\nOn April 12th, 2021, AWS announced the new project, OpenSearch, driven by the community, which is initialized from people of AWS, Red Hat, SAP, Capital One, and Logz.io. Read this Introducing OpenSearch blog for more detail.\nOnce we had this news in public, we begin to plan the process of evaluating and testing OpenSearch as SkyWalking\u0026rsquo;s storage option. Read our issue.\nToday, we are glad to ANNOUNCE, OpenSearch could replace ElastcSearch as the storage, and it is still licensed under Apache 2.0.\nThis has been merged in the main stream, and you can find it in the dev doc already.\nOpenSearch OpenSearch storage shares the same configurations as Elasticsearch 7. In order to activate Elasticsearch 7 as storage, set storage provider to elasticsearch7. Please download the apache-skywalking-bin-es7.tar.gz if you want to use OpenSearch as storage.\nSkyWalking community will keep our eyes on the OpenSearch project, and look forward to their first GA release.\n NOTE: we have to add a warning NOTICE to the Elasticsearch storage doc:\nNOTICE: Elastic announced through their blog that Elasticsearch will be moving over to a Server Side Public License (SSPL), which is incompatible with Apache License 2.0. This license change is effective from Elasticsearch version 7.11. So please choose the suitable Elasticsearch version according to your usage.\n","title":"OpenSearch, a new storage option to avoid ElasticSearch's SSPL","url":"/blog/2021-05-09-opensearch-supported/"},{"content":"Hailin Wang(GitHub ID, hailin0) began his SkyWalking journey since Aug 23rd, 2020.\nHe is very active on the code contributions and brought several important features into the SkyWalking ecosystem.\nHe is on the 33rd of the contributor in the main repository[1], focuses on plugin contributions, and logs ecosystem integration, see his code contributions[2]. And also, he started a new and better way to make other open-source projects integrating with SkyWalking.\nHe used over 2 months to make the SkyWalking agent and its plugins as a part of Apache DolphinScheduler\u0026rsquo;s default binary distribution[3], see this PR[4]. This kind of example has affected further community development. Our PMC member, Yuguang Zhao, is using this way to ship our agent and plugins into the Seata project[5]. With SkyWalking\u0026rsquo;s growing, I would not doubt that this kind of integration would be more.\nThe SkyWalking accepts him as a new committer.\nWelcome Hailin Wang join the committer team.\n[1] https://github.com/apache/skywalking/graphs/contributors [2] https://github.com/apache/skywalking/commits?author=hailin0 [3] https://github.com/apache/dolphinscheduler/tree/1.3.6-prepare/ext/skywalking [4] https://github.com/apache/incubator-dolphinscheduler/pull/4852 [5] https://github.com/seata/seata/pull/3652\n","title":"Welcome Hailin Wang as new committer","url":"/events/welcome-hailin-wang-as-new-committer/"},{"content":"SkyWalking LUA Nginx 0.5.0 is released. Go to downloads page to find release tars.\n Adapt to Kong agent. Correct the version format luarock.  ","title":"Release Apache SkyWalking LUA Nginx 0.5.0","url":"/events/release-apache-skywalking-lua-nginx-0.5.0/"},{"content":"SkyWalking 8.5.0 is released. Go to downloads page to find release tars. Changes by Version\nProject  Incompatible Change. Indices and templates of ElasticSearch(6/7, including zipkin-elasticsearch7) storage option have been changed. Update frontend-maven-plugin to 1.11.0, for Download node x64 binary on Apple Silicon. Add E2E test for VM monitoring that metrics from Prometheus node-exporter. Upgrade lombok to 1.18.16. Add Java agent Dockerfile to build Docker image for Java agent.  Java Agent  Remove invalid mysql configuration in agent.config. Add net.bytebuddy.agent.builder.AgentBuilder.RedefinitionStrategy.Listener to show detail message when redefine errors occur. Fix ClassCastException of log4j gRPC reporter. Fix NPE when Kafka reporter activated. Enhance gRPC log appender to allow layout pattern. Fix apm-dubbo-2.7.x-plugin memory leak due to some Dubbo RpcExceptions. Fix lettuce-5.x-plugin get null host in redis sentinel mode. Fix ClassCastException by making CallbackAdapterInterceptor to implement EnhancedInstance interface in the spring-kafka plugin. Fix NullPointerException with KafkaProducer.send(record). Support config agent.span_limit_per_segment can be changed in the runtime. Collect and report agent starting / shutdown events. Support jedis pipeline in jedis-2.x-plugin. Fix apm-toolkit-log4j-2.x-activation no trace Id in async log. Replace hbase-1.x-plugin with hbase-1.x-2.x-plugin to adapt hbase client 2.x Remove the close_before_method and close_after_method parameters of custom-enhance-plugin to avoid memory leaks. Fix bug that springmvc-annotation-4.x-plugin, witness class does not exist in some versions. Add Redis command parameters to \u0026lsquo;db.statement\u0026rsquo; field on Lettuce span UI for displaying more info. Fix NullPointerException with ReactiveRequestHolder.getHeaders. Fix springmvc reactive api can\u0026rsquo;t collect HTTP statusCode. Fix bug that asynchttpclient plugin does not record the response status code. Fix spanLayer is null in optional plugin(gateway-2.0.x-plugin gateway-2.1.x-plugin). Support @Trace, @Tag and @Tags work for static methods.  OAP-Backend  Allow user-defined JAVA_OPTS in the startup script. Metrics combination API supports abandoning results. Add a new concept \u0026ldquo;Event\u0026rdquo; and its implementations to collect events. Add some defensive codes for NPE and bump up Kubernetes client version to expose exception stack trace. Update the timestamp field type for LogQuery. Support Zabbix protocol to receive agent metrics. Update the Apdex metric combine calculator. Enhance MeterSystem to allow creating metrics with same metricName / function / scope. Storage plugin supports postgresql. Fix kubernetes.client.openapi.ApiException. Remove filename suffix in the meter active file config. Introduce log analysis language (LAL). Fix alarm httpclient connection leak. Add sum function in meter system. Remove Jaeger receiver. Remove the experimental Zipkin span analyzer. Upgrade the Zipkin Elasticsearch storage from 6 to 7. Require Zipkin receiver must work with zipkin-elasticsearch7 storage option. Fix DatabaseSlowStatementBuilder statement maybe null. Remove fields of parent entity in the relation sources. Save Envoy http access logs when error occurs. Fix wrong service_instance_sla setting in the topology-instance.yml. Fix wrong metrics name setting in the self-observability.yml. Add telemetry data about metrics in, metrics scraping, mesh error and trace in metrics to zipkin receiver. Fix tags store of log and trace on h2/mysql/pg storage. Merge indices by Metrics Function and Meter Function in Elasticsearch Storage. Fix receiver don\u0026rsquo;t need to get itself when healthCheck Remove group concept from AvgHistogramFunction. Heatmap(function result) doesn\u0026rsquo;t support labels. Support metrics grouped by scope labelValue in MAL, no need global same labelValue as before. Add functions in MAL to filter metrics according to the metric value. Optimize the self monitoring grafana dashboard. Enhance the export service. Add function retagByK8sMeta and opt type K8sRetagType.Pod2Service in MAL for k8s to relate pods and services. Using \u0026ldquo;service.istio.io/canonical-name\u0026rdquo; to replace \u0026ldquo;app\u0026rdquo; label to resolve Envoy ALS service name. Support k8s monitoring. Make the flushing metrics operation concurrent. Fix ALS K8SServiceRegistry didn\u0026rsquo;t remove the correct entry. Using \u0026ldquo;service.istio.io/canonical-name\u0026rdquo; to replace \u0026ldquo;app\u0026rdquo; label to resolve Envoy ALS service name. Append the root slash(/) to getIndex and getTemplate requests in ES(6 and 7) client. Fix disable statement not working. This bug exists since 8.0.0. Remove the useless metric in vm.yaml.  UI  Update selector scroller to show in all pages. Implement searching logs with date. Add nodejs 14 compiling. Fix trace id by clear search conditions. Search endpoints with keywords. Fix pageSize on logs page. Update echarts version to 5.0.2. Fix instance dependency on the topology page. Fix resolved url for vue-property-decorator. Show instance attributes. Copywriting grammar fix. Fix log pages tags column not updated. Fix the problem that the footer and topology group is shaded when the topology radiation is displayed. When the topology radiation chart is displayed, the corresponding button should be highlighted. Refactor the route mapping, Dynamically import routing components, Improve first page loading performance. Support topology of two mutually calling services. Implement a type of table chart in the dashboard. Support event in the dashboard. Show instance name in the trace view. Fix groups of services in the topography.  Documentation  Polish documentation due to we have covered all tracing, logging, and metrics fields. Adjust documentation about Zipkin receiver. Add backend-infrastructure-monitoring doc.  All issues and pull requests are here\n","title":"Release Apache SkyWalking APM 8.5.0","url":"/events/release-apache-skywalking-apm-8-5-0/"},{"content":"SkyWalking Cloud on Kubernetes 0.3.0 is released. Go to downloads page to find release tars.\n Support special characters in the metric selector of HPA metric adapter. Add the namespace to HPA metric name.  ","title":"Release Apache SkyWalking Cloud on Kubernetes 0.3.0","url":"/events/release-apache-skywalking-cloud-on-kubernetes-0-3-0/"},{"content":"SkyWalking NodeJS 0.2.0 is released. Go to downloads page to find release tars.\n Add AMQPLib plugin (RabbitMQ). (#34) Add MongoDB plugin. (#33) Add PgPlugin - PosgreSQL. (#31) Add MySQLPlugin to plugins. (#30) Add http protocol of host to http plugins. (#28) Add tag http.method to plugins. (#26) Bugfix: child spans created on immediate cb from op. (#41) Bugfix: async and preparing child entry/exit. (#36) Bugfix: tsc error of dist lib. (#24) Bugfix: AxiosPlugin async() / resync(). (#21) Bugfix: some requests of express / axios are not close correctly. (#20) Express plugin uses http wrap explicitly if http plugin disabled. (#42)  ","title":"Release Apache SkyWalking for NodeJS 0.2.0","url":"/events/release-apache-skywalking-nodejs-0-2-0/"},{"content":"SkyWalking Python 0.6.0 is released. Go to downloads page to find release tars.\n Fixes:  Segment data loss when gRPC timing out. (#116) sw_tornado plugin async handler status set correctly. (#115) sw_pymysql error when connection haven\u0026rsquo;t db. (#113)    ","title":"Release Apache SkyWalking Python 0.6.0","url":"/events/release-apache-skywalking-python-0-6-0/"},{"content":" Origin: End-User Tracing in a SkyWalking-Observed Browser - The New Stack\n Apache SkyWalking: an APM (application performance monitor) system, especially designed for microservices, cloud native, and container-based (Docker, Kubernetes, Mesos) architectures.\nskywalking-client-js: a lightweight client-side JavaScript exception, performance, and tracing library. It provides metrics and error collection to the SkyWalking backend. It also makes the browser the starting point for distributed tracing.\nBackground Web application performance affects the retention rate of users. If a page load time is too long, the user will give up. So we need to monitor the web application to understand performance and ensure that servers are stable, available and healthy. SkyWalking is an APM tool and the skywalking-client-js extends its monitoring to include the browser, providing performance metrics and error collection to the SkyWalking backend.\nPerformance Metrics The skywalking-client-js uses [window.performance] (https://developer.mozilla.org/en-US/docs/Web/API/Window/performance) for performance data collection. From the MDN doc, the performance interface provides access to performance-related information for the current page. It\u0026rsquo;s part of the High Resolution Time API, but is enhanced by the Performance Timeline API, the Navigation Timing API, the User Timing API, and the Resource Timing API. In skywalking-client-js, all performance metrics are calculated according to the Navigation Timing API defined in the W3C specification. We can get a PerformanceTiming object describing our page using the window.performance.timing property. The PerformanceTiming interface contains properties that offer performance timing information for various events that occur during the loading and use of the current page.\nWe can better understand these attributes when we see them together in the figure below from W3C:\nThe following table contains performance metrics in skywalking-client-js.\n   Metrics Name Describe Calculating Formulae Note     redirectTime Page redirection time redirectEnd - redirectStart If the current document and the document that is redirected to are not from the same origin, set redirectStart, redirectEnd to 0   ttfbTime Time to First Byte responseStart - requestStart According to Google Development   dnsTime Time to DNS query domainLookupEnd - domainLookupStart    tcpTime Time to TCP link connectEnd - connectStart    transTime Time to content transfer responseEnd - responseStart    sslTime Time to SSL secure connection connectEnd - secureConnectionStart Only supports HTTPS   resTime Time to resource loading loadEventStart - domContentLoadedEventEnd Represents a synchronized load resource in pages   fmpTime Time to First Meaningful Paint - Listen for changes in page elements. Traverse each new element, and calculate the total score of these elements. If the element is visible, the score is 1 * weight; if the element is not visible, the score is 0   domAnalysisTime Time to DOM analysis domInteractive - responseEnd    fptTime First Paint Time responseEnd - fetchStart    domReadyTime Time to DOM ready domContentLoadedEventEnd - fetchStart    loadPageTime Page full load time loadEventStart - fetchStart    ttlTime Time to interact domInteractive - fetchStart    firstPackTime Time to first package responseStart - domainLookupStart     Skywalking-client-js collects those performance metrics and sends them to the OAP (Observability Analysis Platform) server , which aggregates data on the back-end side that is then shown in visualizations on the UI side. Users can optimize the page according to these data.\nException Metrics There are five kinds of errors that can be caught in skywalking-client-js:\n The resource loading error is captured by window.addeventlistener ('error ', callback, true) window.onerror catches JS execution errors window.addEventListener('unhandledrejection', callback) is used to catch the promise errors the Vue errors are captured by Vue.config.errorHandler the Ajax errors are captured by addEventListener('error', callback); addEventListener('abort', callback); addEventListener('timeout', callback);  in send callback.  The Skywalking-client-js traces error data to the OAP server, finally visualizing data on the UI side. For an error overview of the App, there are several metrics for basic statistics and trends of errors, including the following metrics.\n App Error Count, the total number of errors in the selected time period. App JS Error Rate, the proportion of PV with JS errors in a selected time period to total PV. All of Apps Error Count, Top N Apps error count ranking. All of Apps JS Error Rate, Top N Apps JS error rate ranking. Error Count of Versions in the Selected App, Top N Error Count of Versions in the Selected App ranking. Error Rate of Versions in the Selected App, Top N JS Error Rate of Versions in the Selected App ranking. Error Count of the Selected App, Top N Error Count of the Selected App ranking. Error Rate of the Selected App, Top N JS Error Rate of the Selected App ranking.  For pages, we use several metrics for basic statistics and trends of errors, including the following metrics:\n Top Unstable Pages / Error Rate, Top N Error Count pages of the Selected version ranking. Top Unstable Pages / Error Count, Top N Error Count pages of the Selected version ranking. Page Error Count Layout, data display of different errors in a period of time.  User Metrics SkyWalking browser monitoring also provides metrics about how the visitors use the monitored websites, such as PV(page views), UV(unique visitors), top N PV(page views), etc.\nIn SPAs (single page applications), the page will be refreshed only once. The traditional method only reports PV once after the page loading, but cannot count the PV of each sub-page, and can\u0026rsquo;t make other types of logs aggregate by sub-page.\nSkyWalking browser monitoring provides two processing methods for SPA pages:\n  Enable SPA automatic parsing. This method is suitable for most single page application scenarios with URL hash as the route. In the initialized configuration item, set enableSPA to true, which will turn on the page\u0026rsquo;s hashchange event listener (trigger re reporting PV), and use URL hash as the page field in other data reporting.\n  Manual reporting. This method can be used in all single page application scenarios. This method can be used if the first method is not usable. The following example provides a set page method to manually update the page name when data is reported. When this method is called, the page PV will be re reported by default:\n  app.on(\u0026#39;routeChange\u0026#39;, function (to) { ClientMonitor.setPerformance({ collector: \u0026#39;http://127.0.0.1:8080\u0026#39;, service: \u0026#39;browser-app\u0026#39;, serviceVersion: \u0026#39;1.0.0\u0026#39;, pagePath: to.path, autoTracePerf: true, enableSPA: true, }); }); Let\u0026rsquo;s take a look at the result found in the following image. It shows the most popular applications and versions, and the changes of PV over a period of time.\nMake the browser the starting point for distributed tracing SkyWalking browser monitoring intercepts HTTP requests to trace segments and spans. It supports tracking these following modes of HTTP requests: XMLHttpRequest and fetch. It also supports tracking libraries and tools based on XMLHttpRequest and fetch - such as Axios, SuperAgent, OpenApi, and so on.\nLet’s see how the SkyWalking browser monitoring intercepts HTTP requests:\nAfter this, use window.addEventListener('xhrReadyStateChange', callback) and set the readyState value tosw8 = xxxx in the request header. At the same time, reporting requests information to the back-end side. Finally, we can view trace data on the trace page. The following graphic is from the trace page:\nTo see how we listen for fetch requests, let’s see the source code of fetch\nAs you can see, it creates a promise and a new XMLHttpRequest object. Because the code of the fetch is built into the browser, it must monitor the code execution first. Therefore, when we add listening events, we can\u0026rsquo;t monitor the code in the fetch. Just after monitoring the code execution, let\u0026rsquo;s rewrite the fetch:\nimport { fetch } from \u0026#39;whatwg-fetch\u0026#39;; window.fetch = fetch; In this way, we can intercept the fetch request through the above method.\nAdditional Resources  End-User Tracing in a SkyWalking-Observed Browser.  ","title":"End-User Tracing in a SkyWalking-Observed Browser","url":"/blog/end-user-tracing-in-a-skywalking-observed-browser/"},{"content":"SourceMarker is an open-source continuous feedback IDE plugin built on top of Apache SkyWalking, a popular open-source APM system with monitoring, tracing, and diagnosing capabilities for distributed software systems. SkyWalking, a truly holistic system, provides the means for automatically producing, storing, and querying software operation metrics. It requires little to no code changes to implement and is lightweight enough to be used in production. By itself, SkyWalking is a formidable force in the realm of continuous monitoring technology.\nSourceMarker, leveraging the continuous monitoring functionality provided by SkyWalking, creates continuous feedback technology by automatically linking software operation metrics to source code and displaying feedback directly inside of the IDE. While currently only supporting JetBrains-based IDEs and JVM-based programming languages, SourceMarker may be extended to support any number of programming languages and IDEs. Using SourceMarker, software developers can understand and validate software operation inside of their IDE. Instead of charts that indicate the health of the application, software developers can view the health of individual source code components and interpret software operation metrics from a much more familiar perspective. Such capabilities improve productivity as time spent continuously context switching from development to monitoring would be eliminated.\nLogging The benefits of continuous feedback technology are immediately apparent with the ability to view and search logs directly from source code. Instead of tailing log files or viewing logs through the browser, SourceMarker allows software developers to navigate production logs just as easily as they navigate source code. By using the source code as the primary perspective for navigating logs, SourceMarker allows software developers to view logs specific to any package, class, method, or line directly from the context of the source code which resulted in those logs.\nTracing Furthermore, continuous feedback technology offers software developers a deeper understanding of software by explicitly tying the implicit software operation to source code. Instead of visualizing software traces as Gantt charts, SourceMarker allows software developers to step through trace stacks while automatically resolving trace tags and logs. With SourceMarker, software developers can navigate production software traces in much the same way one debugs local applications.\nAlerting Most importantly, continuous feedback technology keeps software developers aware of production software operation. Armed with an APM-powered IDE, every software developer can keep track of the behavior of any method, class, package, and even the entire application itself. Moreover, this allows for source code to be the medium through which production bugs are made evident, thereby creating the feasibility of source code with the ability to self-diagnose and convey its own health.\n Download SourceMarker SourceMarker aims to bridge the theoretical and empirical practices of software development through continuous feedback. The goal is to make developing software with empirical data feel natural and intuitive, creating more complete software developers that understand the entire software development cycle.\n https://github.com/sourceplusplus/sourcemarker  This project is still early in its development, so if you think of any ways to improve SourceMarker, please let us know.\n","title":"SourceMarker: Continuous Feedback for Developers","url":"/blog/2021-03-16-continuous-feedback/"},{"content":"SkyWalking LUA Nginx 0.4.1 is released. Go to downloads page to find release tars.\n fix: missing constants in the rockspsec.  ","title":"Release Apache SkyWalking LUA Nginx 0.4.1","url":"/events/release-apache-skywalking-lua-nginx-0.4.1/"},{"content":"SkyWalking LUA Nginx 0.4.0 is released. Go to downloads page to find release tars.\n Add a global field \u0026lsquo;includeHostInEntrySpan\u0026rsquo;, type \u0026lsquo;boolean\u0026rsquo;, mark the entrySpan include host/domain. Add destroyBackendTimer to stop reporting metrics. Doc: set random seed in init_worker phase. Local cache some variables and reuse them in Lua module. Enable local cache and use tablepool to reuse the temporary table.  ","title":"Release Apache SkyWalking LUA Nginx 0.4.0","url":"/events/release-apache-skywalking-lua-nginx-0.4.0/"},{"content":"SkyWalking Client JS 0.4.0 is released. Go to downloads page to find release tars.\n Update stack and message in logs. Fix wrong URL when using relative path in xhr.  ","title":"Release Apache SkyWalking Client JS 0.4.0","url":"/events/release-apache-skywalking-client-js-0-4-0/"},{"content":"SkyWalking Satellite 0.1.0 is released. Go to downloads page to find release tars.\nFeatures  Build the Satellite core structure. Add prometheus self telemetry. Add kafka client plugin. Add none-fallbacker plugin. Add timer-fallbacker plugin. Add nativelog-kafka-forwarder plugin. Add memory-queue plugin. Add mmap-queue plugin. Add grpc-nativelog-receiver plugin. Add http-nativelog-receiver plugin. Add grpc-server plugin. Add http-server plugin. Add prometheus-server plugin.  Bug Fixes Issues and PR  All issues are here All and pull requests are here  ","title":"Release Apache SkyWalking Satellite 0.1.0","url":"/events/release-apache-skwaylking-satellite-0-1-0/"},{"content":"Juntao Zhang leads and finished the re-build process of the whole skywalking website. Immigrate to the whole automatic website update, super friendly to users. Within the re-building process, he took several months contributions to bring the document of our main repository to host on the SkyWalking website, which is also available for host documentations of other repositories. We were waiting for this for years.\nJust in the website repository, he has 3800 LOC contributions through 26 commits.\nWe are honored to have him on the PMC team.\n","title":"Welcome Juntao Zhang (张峻滔) to join the PMC","url":"/events/welcome-juntao-zhang-to-join-the-pmc/"},{"content":" Origin: Observe VM Service Meshes with Apache SkyWalking and the Envoy Access Log Service - The New Stack\n Apache SkyWalking: an APM (application performance monitor) system, especially designed for microservices, cloud native, and container-based (Docker, Kubernetes, Mesos) architectures.\nEnvoy Access Log Service: Access Log Service (ALS) is an Envoy extension that emits detailed access logs of all requests going through Envoy.\nBackground In the previous post, we talked about the observability of service mesh under Kubernetes environment, and applied it to the bookinfo application in practice. We also mentioned that, in order to map the IP addresses into services, SkyWalking needs access to the service metadata from a Kubernetes cluster, which is not available for services deployed in virtual machines (VMs). In this post, we will introduce a new analyzer in SkyWalking that leverages Envoy’s metadata exchange mechanism to decouple with Kubernetes. The analyzer is designed to work in Kubernetes environments, VM environments, and hybrid environments. If there are virtual machines in your service mesh, you might want to try out this new analyzer for better observability, which we will demonstrate in this tutorial.\nHow it works The mechanism of how the analyzer works is the same as what we discussed in the previous post. What makes VMs different from Kubernetes is that, for VM services, there are no places where we can fetch the metadata to map the IP addresses into services.\nThe basic idea we present in this article is to carry the metadata along with Envoy’s access logs, which is called metadata-exchange mechanism in Envoy. When Istio pilot-agent starts an Envoy proxy as a sidecar of a service, it collects the metadata of that service from the Kubernetes platform, or a file on the VM where that service is deployed, and injects the metadata into the bootstrap configuration of Envoy. Envoy will carry the metadata transparently when emitting access logs to the SkyWalking receiver.\nBut how does Envoy compose a piece of a complete access log that involves the client side and server side? When a request goes out from Envoy, a plugin of istio-proxy named \u0026ldquo;metadata-exchange\u0026rdquo; injects the metadata into the http headers (with a prefix like x-envoy-downstream-), and the metadata is propagated to the server side. The Envoy sidecar of the server side receives the request and parses the headers into metadata, and puts the metadata into the access log, keyed by wasm.downstream_peer. The server side Envoy also puts its own metadata into the access log keyed by wasm.upstream_peer. Hence the two sides of a single request are completed.\nWith the metadata-exchange mechanism, we can use the metadata directly without any extra query.\nExample In this tutorial, we will use another demo application Online Boutique that consists of 10+ services so that we can deploy some of them in VMs and make them communicate with other services deployed in Kubernetes.\nTopology of Online Boutique In order to cover as many cases as possible, we will deploy CheckoutService and PaymentService on VM and all the other services on Kubernetes, so that we can cover the cases like Kubernetes → VM (e.g. Frontend → CheckoutService), VM → Kubernetes (e.g. CheckoutService → ShippingService), and VM → VM ( e.g. CheckoutService → PaymentService).\nNOTE: All the commands used in this tutorial are accessible on GitHub.\ngit clone https://github.com/SkyAPMTest/sw-als-vm-demo-scripts cd sw-als-vm-demo-scripts Make sure to init the gcloud SDK properly before moving on. Modify the GCP_PROJECT in file env.sh to your own project name. Most of the other variables should be OK to work if you keep them intact. If you would like to use ISTIO_VERSION \u0026gt;/= 1.8.0, please make sure this patch is included.\n  Prepare Kubernetes cluster and VM instances 00-create-cluster-and-vms.sh creates a new GKE cluster and 2 VM instances that will be used through the entire tutorial, and sets up some necessary firewall rules for them to communicate with each other.\n  Install Istio and SkyWalking 01a-install-istio.sh installs Istio Operator with spec resources/vmintegration.yaml. In the YAML file, we enable the meshExpansion that supports VM in mesh. We also enable the Envoy access log service and specify the address skywalking-oap.istio-system.svc.cluster.local:11800 to which Envoy emits the access logs. 01b-install-skywalking.sh installs Apache SkyWalking and sets the analyzer to mx-mesh.\n  Create files to initialize the VM 02-create-files-to-transfer-to-vm.sh creates necessary files that will be used to initialize the VMs. 03-copy-work-files-to-vm.sh securely transfers the generated files to the VMs with gcloud scp command. Now use ./ssh.sh checkoutservice and ./ssh.sh paymentservice to log into the two VMs respectively, and cd to the ~/work directory, execute ./prep-checkoutservice.sh on checkoutservice VM instance and ./prep-paymentservice.sh on paymentservice VM instance. The Istio sidecar should be installed and started properly. To verify that, use tail -f /var/logs/istio/istio.log to check the Istio logs. The output should be something like:\n2020-12-12T08:07:07.348329Z\tinfo\tsds\tresource:default new connection 2020-12-12T08:07:07.348401Z\tinfo\tsds\tSkipping waiting for gateway secret 2020-12-12T08:07:07.348401Z\tinfo\tsds\tSkipping waiting for gateway secret 2020-12-12T08:07:07.568676Z\tinfo\tcache\tRoot cert has changed, start rotating root cert for SDS clients 2020-12-12T08:07:07.568718Z\tinfo\tcache\tGenerateSecret default 2020-12-12T08:07:07.569398Z\tinfo\tsds\tresource:default pushed key/cert pair to proxy 2020-12-12T08:07:07.949156Z\tinfo\tcache\tLoaded root cert from certificate ROOTCA 2020-12-12T08:07:07.949348Z\tinfo\tsds\tresource:ROOTCA pushed root cert to proxy 2020-12-12T20:12:07.384782Z\tinfo\tsds\tresource:default pushed key/cert pair to proxy 2020-12-12T20:12:07.384832Z\tinfo\tsds\tDynamic push for secret default The dnsmasq configuration address=/.svc.cluster.local/{ISTIO_SERVICE_IP_STUB} also resolves the domain names ended with .svc.cluster.local to Istio service IP, so that you are able to access the Kubernetes services in the VM by fully qualified domain name (FQDN) such as httpbin.default.svc.cluster.local.\n  Deploy demo application Because we want to deploy CheckoutService and PaymentService manually on VM, resources/google-demo.yaml removes the two services from the original YAML . 04a-deploy-demo-app.sh deploys the other services on Kubernetes. Then log into the 2 VMs, run ~/work/deploy-checkoutservice.sh and ~/work/deploy-paymentservice.sh respectively to deploy CheckoutService and PaymentService.\n  Register VMs to Istio Services on VMs can access the services on Kubernetes by FQDN, but that’s not the case when the Kubernetes services want to talk to the VM services. The mesh has no idea where to forward the requests such as checkoutservice.default.svc.cluster.local because checkoutservice is isolated in the VM. Therefore, we need to register the services to the mesh. 04b-register-vm-with-istio.sh registers the VM services to the mesh by creating a \u0026ldquo;dummy\u0026rdquo; service without running Pods, and a WorkloadEntry to bridge the \u0026ldquo;dummy\u0026rdquo; service with the VM service.\n  Done! The demo application contains a load generator service that performs requests repeatedly. We only need to wait a few seconds, and then open the SkyWalking web UI to check the results.\nexport POD_NAME=$(kubectl get pods --namespace istio-system -l \u0026quot;app=skywalking,release=skywalking,component=ui\u0026quot; -o jsonpath=\u0026quot;{.items[0].metadata.name}\u0026quot;) echo \u0026quot;Visit http://127.0.0.1:8080 to use your application\u0026quot; kubectl port-forward $POD_NAME 8080:8080 --namespace istio-system Navigate the browser to http://localhost:8080 . The metrics, topology should be there.\nTroubleshooting If you face any trouble when walking through the steps, here are some common problems and possible solutions:\n  VM service cannot access Kubernetes services? It’s likely the DNS on the VM doesn’t correctly resolve the fully qualified domain names. Try to verify that with nslookup istiod.istio-system.svc.cluster.local. If it doesn’t resolve to the Kubernetes CIDR address, recheck the step in prep-checkoutservice.sh and prep-paymentservice.sh. If the DNS works correctly, try to verify that Envoy has fetched the upstream clusters from the control plane with curl http://localhost:15000/clusters. If it doesn’t contain the target service, recheck prep-checkoutservice.sh.\n  Services are normal but nothing on SkyWalking WebUI? Check the SkyWalking OAP logs via kubectl -n istio-system logs -f $(kubectl get pod -A -l \u0026quot;app=skywalking,release=skywalking,component=oap\u0026quot; -o name) and WebUI logs via kubectl -n istio-system logs -f $(kubectl get pod -A -l \u0026quot;app=skywalking,release=skywalking,component=ui\u0026quot; -o name) to see whether there are any error logs . Also, make sure the time zone at the bottom-right of the browser is set to UTC +0.\n  Additional Resources  Observe a Service Mesh with Envoy ALS.  ","title":"Observe VM Service Meshes with Apache SkyWalking and the Envoy Access Log Service","url":"/blog/obs-service-mesh-vm-with-sw-and-als/"},{"content":"When using SkyWalking java agent, people usually propagate context easily. They even do not need to change the business code. However, it becomes harder when you want to propagate context between threads when using ThreadPoolExecutor. You can use the RunnableWrapper in the maven artifact org.apache.skywalking:apm-toolkit-trace. This way you must change your code. The developer manager usually don\u0026rsquo;t like this because there may be lots of projects, or lots of runnable code. If they don\u0026rsquo;t use SkyWalking some day, the code added will be superfluous and inelegant.\nIs there a way to propagate context without changing the business code? Yes.\nSkywalking java agent enhances a class by add a field and implement an interface. The ThreadPoolExecutor is a special class that is used widely. We even don\u0026rsquo;t know when and where it is loaded. Most JVMs do not allow changes in the class file format for classes that have been loaded previously. So SkyWalking should not enhance the ThreadPoolExecutor successfully by retransforming when the ThreadPoolExecutor has been loaded. However, we can apply advice to the ThreadPoolExecutor#execute method and wrap the Runnable param using our own agent, then enhance the wrapper class by SkyWalking java agent. An advice do not change the layout of a class.\nNow we should decide how to do this. You can use the RunnableWrapper in the maven artifact org.apache.skywalking:apm-toolkit-trace to wrap the param, but you need to face another problem. This RunnableWrapper has a plugin whose active condition is checking if there is @TraceCrossThread. Agent core uses net.bytebuddy.pool.TypePool.Default.WithLazyResolution.LazyTypeDescription to find the annotations of a class. The LazyTypeDescription finds annotations by using a URLClassLoader with no urls if the classloader is null(bootstrap classloader). So it can not find the @TraceCrossThread class unless you change the LocationStrategy of SkyWalking java agent builder.\nIn this project, I write my own wrapper class, and simply add a plugin with a name match condition. Next, Let me show you how these two agents work together.\n  Move the plugin to the skywalking \u0026ldquo;plugins\u0026rdquo; directory.\n  Add this agent after the SkyWalking agent since the wrapper class should not be loaded before SkyWalking agent instrumentation have finished. For example,\n java -javaagent:/path/to/skywalking-agent.jar -javaagent:/path/to/skywalking-tool-agent-v1.0.0.jar \u0026hellip;\n   When our application runs\n SkyWalking java agent adds a transformer by parsing the plugin for enhancing the wrapper class in the tool agent. The tool agent loads the wrapper class into bootstrap classloader. This triggers the previous transformer. The tool agent applies an advice to the ThreadPoolExecutor class, wrapping the java.lang.Runnable param of \u0026ldquo;execute\u0026rdquo; method with the wrapper class. Now SkyWalking propagates the context with the wrapper class.    Enjoy tracing with ThreadPoolExecutor in SkyWalking!\n","title":"Apache SkyWalking: How to propagate context between threads when using ThreadPoolExecutor","url":"/blog/2021-02-09-skywalking-trace-threadpool/"},{"content":"SkyWalking CLI 0.6.0 is released. Go to downloads page to find release tars.\n  Features\n Support authorization when connecting to the OAP Add install command and manifest sub-command Add event command and report sub-command    Bug Fixes\n Fix the bug that can\u0026rsquo;t query JVM instance metrics    Chores\n Set up a simple test with GitHub Actions Reorganize the project layout Update year in NOTICE Add missing license of swck Use license-eye to check license header    ","title":"Release Apache SkyWalking CLI 0.6.0","url":"/events/release-apache-skywalking-cli-0-6-0/"},{"content":" Origin: Tetrate.io blog\n Background Apache SkyWalking\u0026ndash; the APM tool for distributed systems\u0026ndash; has historically focused on providing observability around tracing and metrics, but service performance is often affected by the host. The newest release, SkyWalking 8.4.0, introduces a new feature for monitoring virtual machines. Users can easily detect possible problems from the dashboard\u0026ndash; for example, when CPU usage is overloaded, when there’s not enough memory or disk space, or when the network status is unhealthy, etc.\nHow it works SkyWalking leverages Prometheus and OpenTelemetry for collecting metrics data as we did for Istio control panel metrics; Prometheus is mature and widely used, and we expect to see increased adoption of the new CNCF project, OpenTelemetry. The SkyWalking OAP Server receives these metrics data of OpenCensus format from OpenTelemetry. The process is as follows:\n Prometheus Node Exporter collects metrics data from the VMs. OpenTelemetry Collector fetches metrics from Node Exporters via Prometheus Receiver, and pushes metrics to SkyWalking OAP Server via the OpenCensus GRPC Exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results. The expression rules are in /config/otel-oc-rules/vm.yaml. We can now see the data on the SkyWalking WebUI dashboard.  What to monitor SkyWalking provides default monitoring metrics including:\n CPU Usage (%) Memory RAM Usage (MB) Memory Swap Usage (MB) CPU Average Used CPU Load Memory RAM (total/available/used MB) Memory Swap (total/free MB) File System Mount point Usage (%) Disk R/W (KB/s) Network Bandwidth Usage (receive/transmit KB/s) Network Status (tcp_curr_estab/tcp_tw/tcp_alloc/sockets_used/udp_inuse) File fd Allocated  The following is how it looks when we monitor Linux:\nHow to use To enable this feature, we need to install Prometheus Node Exporter and OpenTelemetry Collector and activate the VM monitoring rules in SkyWalking OAP Server.\nInstall Prometheus Node Exporter wget https://github.com/prometheus/node_exporter/releases/download/v1.0.1/node_exporter-1.0.1.linux-amd64.tar.gz tar xvfz node_exporter-1.0.1.linux-amd64.tar.gz cd node_exporter-1.0.1.linux-amd64 ./node_exporter In linux Node Exporter exposes metrics on port 9100 by default. When it is running, we can get the metrics from the /metrics endpoint. Use a web browser or command curl to verify.\ncurl http://localhost:9100/metrics We should see all the metrics from the output like:\n# HELP go_gc_duration_seconds A summary of the pause duration of garbage collection cycles. # TYPE go_gc_duration_seconds summary go_gc_duration_seconds{quantile=\u0026#34;0\u0026#34;} 7.7777e-05 go_gc_duration_seconds{quantile=\u0026#34;0.25\u0026#34;} 0.000113756 go_gc_duration_seconds{quantile=\u0026#34;0.5\u0026#34;} 0.000127199 go_gc_duration_seconds{quantile=\u0026#34;0.75\u0026#34;} 0.000147778 go_gc_duration_seconds{quantile=\u0026#34;1\u0026#34;} 0.000371894 go_gc_duration_seconds_sum 0.292994058 go_gc_duration_seconds_count 2029 ... Note: We only need to install Node Exporter, rather than Prometheus server. If you want to get more information about Prometheus Node Exporter see: https://prometheus.io/docs/guides/node-exporter/\nInstall OpenTelemetry Collector We can quickly install a OpenTelemetry Collector instance by using docker-compose with the following steps:\n Create a directory to store the configuration files, like /usr/local/otel. Create docker-compose.yaml and otel-collector-config.yaml in this directory represented below:  docker-compose.yaml\nversion:\u0026#34;2\u0026#34;services:# Collectorotel-collector:# Specify the image to start the container fromimage:otel/opentelemetry-collector:0.19.0# Set the otel-collector configfile command:[\u0026#34;--config=/etc/otel-collector-config.yaml\u0026#34;]# Mapping the configfile to host directoryvolumes:- ./otel-collector-config.yaml:/etc/otel-collector-config.yamlports:- \u0026#34;13133:13133\u0026#34;# health_check extension- \u0026#34;55678\u0026#34;# OpenCensus receiverotel-collector-config.yaml\nextensions:health_check:# A receiver is how data gets into the OpenTelemetry Collectorreceivers:# Set Prometheus Receiver to collects metrics from targets# It’s supports the full set of Prometheus configurationprometheus:config:scrape_configs:- job_name:\u0026#39;otel-collector\u0026#39;scrape_interval:10sstatic_configs:# Replace the IP to your VMs‘s IP which has installed Node Exporter- targets:[\u0026#39;vm1:9100\u0026#39;]- targets:[\u0026#39;vm2:9100\u0026#39;]- targets:[‘vm3:9100\u0026#39; ]processors:batch:# An exporter is how data gets sent to different systems/back-endsexporters:# Exports metrics via gRPC using OpenCensus formatopencensus:endpoint:\u0026#34;docker.for.mac.host.internal:11800\u0026#34;# The OAP Server addressinsecure:truelogging:logLevel:debugservice:pipelines:metrics:receivers:[prometheus]processors:[batch]exporters:[logging, opencensus]extensions:[health_check]In this directory use command docker-compose to start up the container:  docker-compose up -d After the container is up and running, you should see metrics already exported in the logs:\n... Metric #165 Descriptor: -\u0026gt; Name: node_network_receive_compressed_total -\u0026gt; Description: Network device statistic receive_compressed. -\u0026gt; Unit: -\u0026gt; DataType: DoubleSum -\u0026gt; IsMonotonic: true -\u0026gt; AggregationTemporality: AGGREGATION_TEMPORALITY_CUMULATIVE DoubleDataPoints #0 Data point labels: -\u0026gt; device: ens4 StartTime: 1612234754364000000 Timestamp: 1612235563448000000 Value: 0.000000 DoubleDataPoints #1 Data point labels: -\u0026gt; device: lo StartTime: 1612234754364000000 Timestamp: 1612235563448000000 Value: 0.000000 ... If you want to get more information about OpenTelemetry Collector see: https://opentelemetry.io/docs/collector/\nSet up SkyWalking OAP Server To activate the oc handler and vm relevant rules, set your environment variables:\nSW_OTEL_RECEIVER=default SW_OTEL_RECEIVER_ENABLED_OC_RULES=vm Note: If there are other rules already activated , you can add vm with use , as a separator.\nSW_OTEL_RECEIVER_ENABLED_OC_RULES=vm,oap Start the SkyWalking OAP Server.\nDone! After all of the above steps are completed, check out the SkyWalking WebUI. Dashboard VM provides the default metrics of all observed virtual machines. Note: Clear the browser local cache if you used it to access deployments of previous SkyWalking versions.\nAdditional Resources  Read more about the SkyWalking 8.4 release highlights. Get more SkyWalking updates on Twitter.  ","title":"SkyWalking 8.4 provides infrastructure monitoring","url":"/blog/2021-02-07-infrastructure-monitoring/"},{"content":" Origin: Tetrate.io blog\n The Apache SkyWalking team today announced the 8.4 release is generally available. This release fills the gap between all previous versions of SkyWalking and the logging domain area. The release also advances SkyWalking’s capabilities for infrastructure observability, starting with virtual machine monitoring.\nBackground SkyWalking has historically focused on the tracing and metrics fields of observability. As its features for tracing, metrics and service level monitoring have become more and more powerful and stable, the SkyWalking team has started to explore new scenarios covered by observability. Because service performance is reflected in the logs, and is highly impacted by the infrastructure on which it runs, SkyWalking brings these two fields into the 8.4 release. This release blog briefly introduces the two new features as well as some other notable changes.\nLogs Metrics, tracing, and logging are considered the three pillars of observability [1]. SkyWalking had the full features of metrics and tracing prior to 8.4; today, as 8.4 is released, the last piece of the jigsaw is now in place.\nFigure 1: Logs Collected By SkyWalking\nFigure 2: Logs Collected By SkyWalking\nThe Java agent firstly provides SDKs to enhance the widely-used logging frameworks, log4j (1.x and 2.x) [2] and logback [3], and send the logs to the SkyWalking backend (OAP). The latter is able to collect logs from wherever the protocol is implemented. This is not a big deal, but when it comes to the correlation between logs and traces, the traditional solution is to print the trace IDs in the logs, and pick the IDs in the error logs to query the related traces. SkyWalking just simplifies the workflow by correlating the logs and traces natively. Navigating between traces and their related logs is as simple as clicking a button.\nFigure 3: Correlation Between Logs and Traces\nInfrastructure Monitoring SkyWalking is known as an application performance monitoring tool. One of the most important factors that impacts the application’s performance is the infrastructure on which the application runs. In the 8.4 release, we added the monitoring metrics of virtual machines into the dashboard.\nFigure 4: VM Metrics\nFundamental metrics such as CPU Used, Memory Used, Disk Read / Write and Network Usage are available on the dashboard. And as usual, those metrics are also available to be configured as alarm triggers when needed.\nDynamic Configurations at Agent Side Dynamic configuration at the backend side has long existed in SkyWalking for several versions. Now, it finally comes to the agent side! Prior to 8.4, you’d have to restart the target services when you modify some configuration items of the agent \u0026ndash; for instance, sampling rate (agent side), ignorable endpoint paths, etc. Now, say goodbye to rebooting. Modifying configurations is not the only usage of the dynamic configuration mechanism. The latter gives countless possibilities to the agent side in terms of dynamic behaviours, e.g. enabling / disabling plugins, enabling / disabling the whole agent, etc. Just imagine!\nGrouped Service Topology This enhancement is from the UI. SkyWalking backend supports grouping the services by user-defined dimensions. In a real world use case, the services are usually grouped by business group or department. When a developer opens the topology map, out of hundreds of services, he or she may just want to focus on the services in charge. The grouped service topology comes to the rescue: one can now choose to display only services belonging to a specified group.\nFigure 5: Grouped Service Topology\nOther Notable Enhancements  Agent: resolves domain names to look up backend service IP addresses. Backend: meter receiver supports meter analysis language (MAL). Backend: several CVE fixes. Backend: supports Envoy {AccessLog,Metrics}Service API V3 and adopts MAL.  Links  [1] https://peter.bourgon.org/blog/2017/02/21/metrics-tracing-and-logging.html [2] https://logging.apache.org/log4j/2.x/ [3] http://logback.qos.ch  Additional Resources  Read more about the SkyWalking 8.4 release highlights. Get more SkyWalking updates on Twitter.  ","title":"Apache SkyWalking 8.4: Logs, VM Monitoring, and Dynamic Configurations at Agent Side","url":"/blog/skywalking8-4-release/"},{"content":"SkyWalking 8.4.0 is released. Go to downloads page to find release tars. Changes by Version\nProject  Incompatible with previous releases when use H2/MySQL/TiDB storage options, due to support multiple alarm rules triggered for one entity. Chore: adapt create_source_release.sh to make it runnable on Linux. Add package to .proto files, prevent polluting top-level namespace in some languages; The OAP server supports previous agent releases, whereas the previous OAP server (\u0026lt;=8.3.0) won\u0026rsquo;t recognize newer agents since this version (\u0026gt;= 8.4.0). Add ElasticSearch 7.10 to test matrix and verify it works. Replace Apache RAT with skywalking-eyes to check license headers. Set up test of Envoy ALS / MetricsService under Istio 1.8.2 to verify Envoy V3 protocol Test: fix flaky E2E test of Kafka.  Java Agent  The operation name of quartz-scheduler plugin, has been changed as the quartz-scheduler/${className} format. Fix jdk-http and okhttp-3.x plugin did not overwrite the old trace header. Add interceptors of method(analyze, searchScroll, clearScroll, searchTemplate and deleteByQuery) for elasticsearch-6.x-plugin. Fix the unexpected RunningContext recreation in the Tomcat plugin. Fix the potential NPE when trace_sql_parameters is enabled. Update byte-buddy to 1.10.19. Fix thrift plugin trace link broken when intermediate service does not mount agent Fix thrift plugin collects wrong args when the method without parameter. Fix DataCarrier\u0026rsquo;s org.apache.skywalking.apm.commons.datacarrier.buffer.Buffer implementation isn\u0026rsquo;t activated in IF_POSSIBLE mode. Fix ArrayBlockingQueueBuffer\u0026rsquo;s useless IF_POSSIBLE mode list Support building gRPC TLS channel but CA file is not required. Add witness method mechanism in the agent plugin core. Add Dolphinscheduler plugin definition. Make sampling still works when the trace ignores plug-in activation. Fix mssql-plugin occur ClassCastException when call the method of return generate key. The operation name of dubbo and dubbo-2.7.x-plugin, has been changed as the groupValue/className.methodName format Fix bug that rocketmq-plugin set the wrong tag. Fix duplicated EnhancedInstance interface added. Fix thread leaks caused by the elasticsearch-6.x-plugin plugin. Support reading segmentId and spanId with toolkit. Fix RestTemplate plugin recording url tag with wrong port Support collecting logs and forwarding through gRPC. Support config agent.sample_n_per_3_secs can be changed in the runtime. Support config agent.ignore_suffix can be changed in the runtime. Support DNS periodic resolving mechanism to update backend service. Support config agent.trace.ignore_path can be changed in the runtime. Added support for transmitting logback 1.x and log4j 2.x formatted \u0026amp; un-formatted messages via gPRC  OAP-Backend  Make meter receiver support MAL. Support influxDB connection response format option. Fix some error when use JSON as influxDB response format. Support Kafka MirrorMaker 2.0 to replicate topics between Kafka clusters. Add the rule name field to alarm record storage entity as a part of ID, to support multiple alarm rules triggered for one entity. The scope id has been removed from the ID. Fix MAL concurrent execution issues. Fix group name can\u0026rsquo;t be queried in the GraphQL. Fix potential gRPC connection leak(not closed) for the channels among OAP instances. Filter OAP instances(unassigned in booting stage) of the empty IP in KubernetesCoordinator. Add component ID for Python aiohttp plugin requester and server. Fix H2 in-memory database table missing issues Add component ID for Python pyramid plugin server. Add component ID for NodeJS Axios plugin. Fix searchService method error in storage-influxdb-plugin. Add JavaScript component ID. Fix CVE of UninstrumentedGateways in Dynamic Configuration activation. Improve query performance in storage-influxdb-plugin. Fix the uuid field in GRPCConfigWatcherRegister is not updated. Support Envoy {AccessLog,Metrics}Service API V3. Adopt the MAL in Envoy metrics service analyzer. Fix the priority setting doesn\u0026rsquo;t work of the ALS analyzers. Fix bug that endpoint-name-grouping.yml is not customizable in Dockerized case. Fix bug that istio version metric type on UI template mismatches the otel rule. Improve ReadWriteSafeCache concurrency read-write performance Fix bug that if use JSON as InfluxDB.ResponseFormat then NumberFormatException maybe occur. Fix timeBucket not taking effect in EqualsAndHashCode annotation of some relationship metrics. Fix SharingServerConfig\u0026rsquo;s propertie is not correct in the application.yml, contextPath -\u0026gt; restConnextPath. Istio control plane: remove redundant metrics and polish panel layout. Fix bug endpoint name grouping not work due to setting service name and endpoint name out of order. Fix receiver analysis error count metrics. Log collecting and query implementation. Support Alarm to feishu. Add the implementation of ConfigurationDiscovery on the OAP side. Fix bug in parseInternalErrorCode where some error codes are never reached. OAL supports multiple values when as numeric. Add node information from the Openensus proto to the labels of the samples, to support the identification of the source of the Metric data. Fix bug that the same sample name in one MAL expression caused IllegalArgumentException in Analyzer.analyse. Add the text analyzer for querying log in the es storage. Chore: Remove duplicate codes in Envoy ALS handler. Remove the strict rule of OAL disable statement parameter. Fix a legal metric query adoption bug. Don\u0026rsquo;t support global level metric query. Add VM MAL and ui-template configration, support Prometheus node-exporter VM metrics that pushed from OpenTelemetry-collector. Remove unused log query parameters.  UI  Fix un-removed tags in trace query. Fix unexpected metrics name on single value component. Don\u0026rsquo;t allow negative value as the refresh period. Fix style issue in trace table view. Separation Log and Dashboard selector data to avoid conflicts. Fix trace instance selector bug. Fix Unnecessary sidebar in tooltips for charts. Refactor dashboard query in a common script. Implement refreshing data for topology by updating date. Implement group selector in the topology. Fix all as default parameter for services selector. Add icon for Python aiohttp plugin. Add icon for Python pyramid plugin. Fix topology render all services nodes when groups changed. Fix rk-footer utc input\u0026rsquo;s width. Update rk-icon and rewrite rk-header svg tags with rk-icon. Add icon for http type. Fix rk-footer utc without local storage. Sort group names in the topology. Add logo for Dolphinscheduler. Fix dashboard wrong instance. Add a legend for the topology. Update the condition of unhealthy cube. Fix: use icons to replace buttons for task list in profile. Fix: support = in the tag value in the trace query page. Add envoy proxy component logo. Chore: set up license-eye to check license headers and add missing license headers. Fix prop for instances-survey and endpoints-survey. Fix envoy icon in topology. Implement the service logs on UI. Change the flask icon to light version for a better view of topology dark theme. Implement viewing logs on trace page. Fix update props of date component. Fix query conditions for logs. Fix style of selectors to word wrap. Fix logs time. Fix search ui for logs.  Documentation  Update the documents of backend fetcher and self observability about the latest configurations. Add documents about the group name of service. Update docs about the latest UI. Update the document of backend trace sampling with the latest configuration. Update kafka plugin support version to 2.6.1. Add FAQ about Fix compiling on Mac M1 chip.  All issues and pull requests are here\n","title":"Release Apache SkyWalking APM 8.4.0","url":"/events/release-apache-skywalking-apm-8-4-0/"},{"content":"Background The verifier is an important part of the next generation End-to-End Testing framework (NGE2E), which is responsible for verifying whether the actual output satisfies the expected template.\nDesign Thinking We will implement the verifier with Go template, plus some enhancements. Firstly, users need to write a Go template file with provided functions and actions to describe how the expected data looks like. Then the verifer renders the template with the actual data object. Finally, the verifier compares the rendered output with the actual data. If the rendered output is not the same with the actual output, it means the actual data is inconsist with the expected data. Otherwise, it means the actual data match the expected data. On failure, the verifier will also print out what are different between expected and actual data.\nBranches / Actions The verifier inherits all the actions from the standard Go template, such as if, with, range, etc. In addition, we also provide some custom actions to satisfy our own needs.\nList Elements Match contains checks if the actual list contains elements that match the given template.\nExamples:\nmetrics:{{- contains .metrics }}- name:{{notEmpty .name }}id:{{notEmpty .id }}value:{{gt .value 0 }}{{- end }}It means that the list metrics must contain an element whose name and id are not empty, and value is greater than 0.\nmetrics:{{- contains .metrics }}- name:p95value:{{gt .value 0 }}- name:p99value:{{gt .value 0 }}{{- end }}This means that the list metrics must contain an element named p95 with a value greater than 0, and an element named p95 with a value greater than 0. Besides the two element, the list metrics may or may not have other random elements.\nFunctions Users can use these provided functions in the template to describe the expected data.\nNot Empty notEmpty checks if the string s is empty.\nExample:\nid:{{notEmpty .id }}Regexp match regexp checks if string s matches the regular expression pattern.\nExamples:\nlabel:{{regexp .label \u0026#34;ratings.*\u0026#34; }}Base64 b64enc s returns the Base64 encoded string of s.\nExamples:\nid:{{b64enc \u0026#34;User\u0026#34; }}.static-suffix# this evalutes the base64 encoded string of \u0026#34;User\u0026#34;, concatenated with a static suffix \u0026#34;.static-suffix\u0026#34;Result:\nid:VXNlcg==.static-suffixFull Example Here is an example of expected data:\n# expected.data.yamlnodes:- id:{{b64enc \u0026#34;User\u0026#34; }}.0name:Usertype:USERisReal:false- id:{{b64enc \u0026#34;Your_ApplicationName\u0026#34; }}.1name:Your_ApplicationNametype:TomcatisReal:true- id:{{$h2ID := (index .nodes 2).id }}{{ notEmpty $h2ID }}# We assert that nodes[2].id is not empty and save it to variable `h2ID` for later usename:localhost:-1type:H2isReal:falsecalls:- id:{{notEmpty (index .calls 0).id }}source:{{b64enc \u0026#34;Your_ApplicationName\u0026#34; }}.1target:{{$h2ID }}# We use the previously assigned variable `h2Id` to asert that the `target` is equal to the `id` of the nodes[2]detectPoints:- CLIENT- id:{{b64enc \u0026#34;User\u0026#34; }}.0-{{ b64enc \u0026#34;Your_ApplicationName\u0026#34; }}.1source:{{b64enc \u0026#34;User\u0026#34; }}.0target:{{b64enc \u0026#34;Your_ApplicationName\u0026#34; }}.1detectPoints:- SERVERwill validate this data:\n# actual.data.yamlnodes:- id:VXNlcg==.0name:Usertype:USERisReal:false- id:WW91cl9BcHBsaWNhdGlvbk5hbWU=.1name:Your_ApplicationNametype:TomcatisReal:true- id:bG9jYWxob3N0Oi0x.0name:localhost:-1type:H2isReal:falsecalls:- id:WW91cl9BcHBsaWNhdGlvbk5hbWU=.1-bG9jYWxob3N0Oi0x.0source:WW91cl9BcHBsaWNhdGlvbk5hbWU=.1detectPoints:- CLIENTtarget:bG9jYWxob3N0Oi0x.0- id:VXNlcg==.0-WW91cl9BcHBsaWNhdGlvbk5hbWU=.1source:VXNlcg==.0detectPoints:- SERVERtarget:WW91cl9BcHBsaWNhdGlvbk5hbWU=.1# expected.data.yamlmetrics:{{- contains .metrics }}- name:{{notEmpty .name }}id:{{notEmpty .id }}value:{{gt .value 0 }}{{- end }}will validate this data:\n# actual.data.yamlmetrics:- name:business-zone::projectAid:YnVzaW5lc3Mtem9uZTo6cHJvamVjdEE=.1value:1- name:system::load balancer1id:c3lzdGVtOjpsb2FkIGJhbGFuY2VyMQ==.1value:0- name:system::load balancer2id:c3lzdGVtOjpsb2FkIGJhbGFuY2VyMg==.1value:0and will report an error when validating this data, because there is no element with a value greater than 0:\n# actual.data.yamlmetrics:- name:business-zone::projectAid:YnVzaW5lc3Mtem9uZTo6cHJvamVjdEE=.1value:0- name:system::load balancer1id:c3lzdGVtOjpsb2FkIGJhbGFuY2VyMQ==.1value:0- name:system::load balancer2id:c3lzdGVtOjpsb2FkIGJhbGFuY2VyMg==.1value:0The contains does an unordered list verification, in order to do list verifications including orders, you can simply use the basic ruls like this:\n# expected.data.yamlmetrics:- name:p99value:{{gt (index .metrics 0).value 0 }}- name:p95value:{{gt (index .metrics 1).value 0 }}which expects the actual metrics list to be exactly ordered, with first element named p99 and value greater 0, second element named p95 and value greater 0.\n","title":"[Design] The Verifier of NGE2E","url":"/blog/2021-02-01-e2e-verifier-design/"},{"content":"SkyWalking Cloud on Kubernetes 0.2.0 is released. Go to downloads page to find release tars.\n Introduce custom metrics adapter to SkyWalking OAP cluster for Kubernetes HPA autoscaling. Add RBAC files and service account to support Kubernetes coordination. Add default and validation webhooks to operator controllers. Add UI CRD to deploy skywalking UI server. Add Fetcher CRD to fetch metrics from other telemetry system, for example, Prometheus.  ","title":"Release Apache SkyWalking Cloud on Kubernetes 0.2.0","url":"/events/release-apache-skywalking-cloud-on-kubernetes-0-2-0/"},{"content":"Apache SkyWalking is an open source APM for distributed system, Apache Software Foundation top-level project.\nAt Jan. 11th, 2021, we noticed the Tencent Cloud Service, Tencent Service Watcher - TSW, for first time. Due to the similar short name, which SkyWalking is also called SW in the community, we connected with the service team of Tencent Cloud, and kindly asked.\nThey used to replay, TSW is purely developed by Tencent team itself, which doesn\u0026rsquo;t have any code dependency on SkyWalking.. We didn\u0026rsquo;t push harder.\nBut one week later, Jan 18th, 2021, our V.P., Sheng got the report again from Haoyang SkyWalking PMC member, through WeChat DM(direct message),. He provided complete evidence to prove TSW actually re-distributed the SkyWalking\u0026rsquo;s Java agent. We keep one copy of their agent\u0026rsquo;s distribution(at Jan. 18th), you could be downloaded here.\nSome typically evidences are here\n  ServiceManager is copied and package-name changed in the TSW\u0026rsquo;s agent.   ContextManager is copied and ackage-name changed in the TSW\u0026rsquo;s agent.   At the same time, we checked their tsw-client-package.zip, it didn\u0026rsquo;t include the SkyWalking\u0026rsquo;s LICENSE and NOTICE. Also, they didn\u0026rsquo;t mention TSW agent is the re-ditribution SkyWalking on their website.\nWith all above information, we had enough reason to believe, from the tech perspective, they were violating the Apache 2.0 License.\nFrom the 18th Jan., 2021, we sent mail [Apache 2.0 License Violation] Tencent Cloud TSW service doesn't follow the Apache 2.0 License to brief the SkyWalking PMC, and took the following actions to connect with Tencent.\n Made direct call to Tencent Open Source Office. Connected with Tencent Cloud TVP program committee, as Sheng Wu(Our VP) is a Tencent Cloud TVP. Talked with the Tencent Cloud team lead.  In all above channels, we provided the evidences of copy-redistribution hebaviors, requested them to revaluate their statements on the website, and follow the License\u0026rsquo;s requirements.\nResolution At Jan. 19th night, UTC+8, 2021. We received response from the Tencent cloud team. They admited their violation behaviors, and did following changes\n  Tencent Cloud TSW service page states, the agent is the fork version(re-distribution) of Apache SkyWalking agent.   TSW agent distributions include the SkyWalking\u0026rsquo;s License and NOTICE. Below is the screenshot, you could download from their product page. We keep a copy of their Jan. 19th 2021 at here.   We have updated the status to the PMC mail list. This license violation issue has been resolved for now.\nThe SkyWalking community and program management committee will keep our eyes on Tencent TSW. ","title":"[Resolved][License Issue] Tencent Cloud TSW service violates the Apache 2.0 License when using SkyWalking.","url":"/blog/2021-01-23-tencent-cloud-violates-aplv2/"},{"content":" 第一节:开篇介绍 第二节:数字游戏(Number Game) 第三节:社区原则(Community “Principles”) 第四节:基金会原则(For public good) 第五节:一些不太好的事情  B站视频地址\n","title":"[视频] 开放原子开源基金会2020年度峰会 - Educate community Over Support community","url":"/zh/2021-01-21-educate-community/"},{"content":"Elastic announced their license change, Upcoming licensing changes to Elasticsearch and Kibana.\n We are moving our Apache 2.0-licensed source code in Elasticsearch and Kibana to be dual licensed under Server Side Public License (SSPL) and the Elastic License, giving users the choice of which license to apply. This license change ensures our community and customers have free and open access to use, modify, redistribute, and collaborate on the code. It also protects our continued investment in developing products that we distribute for free and in the open by restricting cloud service providers from offering Elasticsearch and Kibana as a service without contributing back. This will apply to all maintained branches of these two products and will take place before our upcoming 7.11 release. Our releases will continue to be under the Elastic License as they have been for the last three years.\n Also, they provide the FAQ page for more information about the impact for the users, developers, and vendors.\nIn the perspective of Apache Software Foundation, SSPL has been confirmed as a Catalog X LICENSE(https://www.apache.org/legal/resolved.html#category-x), which means hard-dependency as a part of the core is not allowed. With that, we can\u0026rsquo;t only focus on it anymore. We need to consider other storage options. Right now, we still have InfluxDB, TiDB, H2 server still in Apache 2.0 licensed. Right now, we still have InfluxDB, TiDB, H2 server as storage options still in Apache 2.0 licensed.\nAs one optional plugin, we need to focus on the client driver license. Right now, we are only using ElasticSearch 7.5.0 and 6.3.2 drivers, which are both Apache 2.0 licensed. So, we are safe. For further upgrade, here is their announcement. They answer these typical cases in the FAQ page.\n  I build a SaaS application using Elasticsearch as the backend, how does this affect me?\n This source code license change should not affect you - you can use our default distribution or develop applications on top of it for free, under the Elastic License. This source-available license does not contain any copyleft provisions and the default functionality is free of charge. For a specific example, you can see our response to a question around this at Magento.\nOur users still could use, redistribute, sale the products/services, based on SkyWalking, even they are using self hosting Elastic Search unmodified server.\n  I\u0026rsquo;m using Elasticsearch via APIs, how does this change affect me?\n This change does not affect how you use client libraries to access Elasticsearch. Our client libraries remain licensed under Apache 2.0, with the exception of our Java High Level Rest Client (Java HLRC). The Java HLRC has dependencies on the core of Elasticsearch, and as a result this client library will be licensed under the Elastic License. Over time, we will eliminate this dependency and move the Java HLRC to be licensed under Apache 2.0. Until that time, for the avoidance of doubt, we do not consider using the Java HLRC as a client library in development of an application or library used to access Elasticsearch to constitute a derivative work under the Elastic License, and this will not have any impact on how you license the source code of your application using this client library or how you distribute it.\nThe client driver license incompatible issue will exist, we can\u0026rsquo;t upgrade the driver(s) until they release the Apache 2.0 licensed driver jars. But users are still safe to upgrade the drivers by themselves.\n Apache SkyWalking will discuss the further actions here. If you have any question, welcome to ask. In the later 2021, we will begin to invest the posibility of creating SkyWalking\u0026rsquo;s observability database implementation.\n","title":"Response to Elastic 2021 License Change","url":"/blog/2021-01-17-elastic-change-license/"},{"content":"SkyWalking Client JS 0.3.0 is released. Go to downloads page to find release tars.\n Support tracing starting at the browser. Add traceSDKInternal SDK for tracing SDK internal RPC. Add detailMode SDK for tracing http method and url as tags in spans. Fix conditions of http status.  ","title":"Release Apache SkyWalking Client JS 0.3.0","url":"/events/release-apache-skywalking-client-js-0-3-0/"},{"content":"SkyWalking Eyes 0.1.0 is released. Go to downloads page to find release tars.\n License Header  Add check and fix command. check results can be reported to pull request as comments. fix suggestions can be filed on pull request as edit suggestions.    ","title":"Release Apache SkyWalking Eyes 0.1.0","url":"/events/release-apache-skywalking-eyes-0-1-0/"},{"content":"SkyWalking NodeJS 0.1.0 is released. Go to downloads page to find release tars.\n Initialize project core codes. Built-in http/https plugin. Express plugin. Axios plugin.  ","title":"Release Apache SkyWalking for NodeJS 0.1.0","url":"/events/release-apache-skywalking-nodejs-0-1-0/"},{"content":"SkyWalking Python 0.5.0 is released. Go to downloads page to find release tars.\n  New plugins\n Pyramid Plugin (#102) AioHttp Plugin (#101) Sanic Plugin (#91)    API and enhancements\n @trace decorator supports async functions Supports async task context Optimized path trace ignore Moved exception check to Span.__exit__ Moved Method \u0026amp; Url tags before requests    Fixes:\n BaseExceptions not recorded as errors Allow pending data to send before exit sw_flask general exceptions handled Make skywalking logging Non-global    Chores and tests\n Make tests really run on specified Python version Deprecate 3.5 as it\u0026rsquo;s EOL    ","title":"Release Apache SkyWalking Python 0.5.0","url":"/events/release-apache-skywalking-python-0-5-0/"},{"content":"Apache SkyWalking is an open source APM for distributed system. Provide tracing, service mesh observability, metrics analysis, alarm and visualization.\nJust 11 months ago, on Jan. 20th, 2020, SkyWalking hit the 200 contributors mark. With the growth of the project and the community, SkyWalking now includes over 20 sub(ecosystem) projects covering multiple language agents and service mesh, integration with mature open source projects, like Prometheus, Spring(Sleuth), hundreds of libraries to support all tracing/metrics/logs fields. In the past year, the number of contributors grows super astoundingly , and all its metrics point to its community vibrancy. Many corporate titans are already using SkyWalking in a large-scale production environment, including, Alibaba, Huawei, Baidu, Tencent, etc.\nRecently, our SkyWalking main repository overs 300 contributors.\nOur website has thousands of views from most countries in the world every week.\nAlthough we know that, the metrics like GitHub stars and the numbers of open users and contributors, are not a determinant of vibrancy, they do show the trend, we are very proud to share the increased numbers here, too.\nWe double those numbers and are honored with the development of our community.\nThank you, all of our contributors. Not just these 300 contributors of the main repository, or nearly 400 contributors in all repositories, counted by GitHub. There are countless people contributing codes to SkyWalking\u0026rsquo;s subprojects, ecosystem projects, and private fork versions; writing blogs and guidances, translating documents, books, and presentations; setting up learning sessions for new users; convincing friends to join the community as end-users, contributors, even committers. Companies behinds those contributors support their employees to work with the community to provide feedback and contribute the improvements and features upstream. Conference organizers share the stages with speakers from the SkyWalking community.\nSkyWalking can’t make this happen without your help. You made this community extraordinary.\nAt this crazy distributed computing and cloud native age, we as a community could make DEV, OPS, and SRE teams' work easier by locating the issue(s) in the haystack quicker than before, like why we named the project as SkyWalking, we will have a clear site line when you stand on the glass bridge Skywalk at Grand Canyon West.\n 376 Contributors counted by GitHub account are following. Dec. 22st, 2020. Generated by a tool deveoped by Yousa\n 1095071913 50168383 Ahoo-Wang AirTrioa AlexanderWert AlseinX Ax1an BFergerson BZFYS CharlesMaster ChaunceyLin5152 CommissarXia Cvimer Doublemine ElderJames EvanLjp FatihErdem FeynmanZhou Fine0830 FingerLiu Gallardot GerryYuan HackerRookie Heguoya Hen1ng Humbertzhang IanCao IluckySi Indifer J-Cod3r JaredTan95 Jargon96 Jijun JohnNiang Jozdortraz Jtrust Just-maple KangZhiDong LazyLei LiWenGu Liu-XinYuan Miss-you O-ll-O Patrick0308 QHWG67 Qiliang RandyAbernethy RedzRedz Runrioter SataQiu ScienJus SevenPointOld ShaoHans Shikugawa SoberChina SummerOfServenteen TJ666 TerrellChen TheRealHaui TinyAllen TomMD ViberW Videl WALL-E WeihanLi WildWolfBang WillemJiang Wooo0 XhangUeiJong Xlinlin YczYanchengzhe YoungHu YunaiV ZhHong ZhuoSiChen ZS-Oliver a198720 a526672351 acurtain adamni135 adermxzs adriancole aeolusheath agile6v aix3 aiyanbo ajanthan alexkarezin alonelaval amogege amwyyyy arugal ascrutae augustowebd bai-yang beckhampu beckjin beiwangnull bigflybrother bostin brucewu-fly c1ay candyleer carlvine500 carrypann cheenursn cheetah012 chenpengfei chenvista chess-equality chestarss chidaodezhongsheng chopin-d clevertension clk1st cngdkxw codeglzhang codelipenghui coder-yqj coki230 coolbeevip crystaldust cui-liqiang cuiweiwei cyberdak cyejing dagmom dengliming devkanro devon-ye dimaaan dingdongnigetou dio dmsolr dominicqi donbing007 dsc6636926 duotai dvsv2 dzx2018 echooymxq efekaptan eoeac evanxuhe feelwing1314 fgksgf fuhuo geektcp geomonlin ggndnn gitter-badger glongzh gnr163 gonedays grissom-grissom grissomsh guodongq guyukou gxthrj gzshilu hailin0 hanahmily haotian2015 haoyann hardzhang harvies hepyu heyanlong hi-sb honganan hsoftxl huangyoje huliangdream huohuanhuan innerpeacez itsvse jasonz93 jialong121 jinlongwang jjlu521016 jjtyro jmjoy jsbxyyx justeene juzhiyuan jy00464346 kaanid karott kayleyang kevinyyyy kezhenxu94 kikupotter kilingzhang killGC klboke ksewen kuaikuai kun-song kylixs landonzeng langke93 langyan1022 langyizhao lazycathome leemove leizhiyuan libinglong lilien1010 limfriend linkinshi linliaoy liuhaoXD liuhaoyang liuyanggithup liuzhengyang liweiv lkxiaolou llissery louis-zhou lpf32 lsyf lucperkins lujiajing1126 lunamagic1978 lunchboxav lxliuxuankb lytscu lyzhang1999 magic-akari makingtime maolie masterxxo maxiaoguang64 membphis mestarshine mgsheng michaelsembwever mikkeschiren mm23504570 momo0313 moonming mrproliu muyun12 nacx neatlife neeuq nic-chen nikitap492 nileblack nisiyong novayoung oatiz oflebbe olzhy onecloud360 osiriswd peng-yongsheng pengweiqhca potiuk purgeyao qijianbo010 qinhang3 qiuyu-d qqeasonchen qxo raybi-asus refactor2 remicollet rlenferink rootsongjc rovast scolia sdanzo seifeHu shiluo34 sikelangya simonlei sk163 snakorse songzhendong songzhian sonxy spacewander stalary stenio2011 stevehu stone-wlg sungitly surechen swartz-k sxzaihua tanjunchen tankilo taskmgr tbdpmi terranhu terrymanu tevahp thanq thebouv tianyuak tincopper tinyu0 tom-pytel tristaZero tristan-tsl trustin tsuilouis tuohai666 tzsword-2020 tzy1316106836 vcjmhg vision-ken viswaramamoorthy wankai123 wbpcode web-xiaxia webb2019 weiqiang333 wendal wengangJi wenjianzhang whfjam wind2008hxy withlin wqr2016 wu-sheng wuguangkuo wujun8 wuxingye x22x22 xbkaishui xcaspar xiaoxiangmoe xiaoy00 xinfeingxia85 xinzhuxiansheng xudianyang yanbw yanfch yang-xiaodong yangxb2010000 yanickxia yanmaipian yanmingbi yantaowu yaowenqiang yazong ychandu ycoe yimeng yu199195 yuqichou yuyujulin yymoth zaunist zaygrzx zcai2 zeaposs zhang98722 zhanghao001 zhangjianweibj zhangkewei zhangsean zhaoyuguang zhentaoJin zhousiliang163 zhuCheer zifeihan zkscpqm zoidbergwill zoumingzm zouyx zshit zxbu zygfengyuwuzu  ","title":"Celebrate SkyWalking single repository hits the 300 contributors mark","url":"/blog/2021-01-01-300-contributors-mark/"},{"content":"Ke Zhang (a.k.a. HumbertZhang) mainly focuses on the SkyWalking Python agent, he had participated in the \u0026ldquo;Open Source Promotion Plan - Summer 2020\u0026rdquo; and completed the project smoothly, and won the award \u0026ldquo;Most Potential Students\u0026rdquo; that shows his great willingness to continuously contribute to our community.\nUp to date, he has submitted 8 PRs in the Python agent repository, 7 PRs in the main repo, all in total include ~2000 LOC.\nAt Dec. 13th, 2020, the project management committee (PMC) passed the proposal of promoting him as a new committer. He has accepted the invitation at the same day.\nWelcome to join the committer team, Ke Zhang!\n","title":"Welcome Ke Zhang (张可) as new committer","url":"/events/welcome-ke-zhang-as-new-committer/"},{"content":"今年暑假期间我参加了开源软件供应链点亮计划—暑期 2020 的活动,在这个活动中,我主要参加了 Apache SkyWalking 的 Python Agent 的开发,最终项目顺利结项并获得了”最具潜力奖“,今天我想分享一下我参与这个活动以及开源社区的感受与收获。\n缘起 其实我在参加暑期 2020 活动之前就听说过 SkyWalking 了。我研究生的主要研究方向是微服务和云原生,组里的学长们之前就在使用 SkyWalking 进行一些研究工作,也是通过他们,我了解到了 OpenTracing, SkyWalking 等与微服务相关的 Tracing 工具以及 APM 等,当时我就在想如果有机会可以深度参加这些开源项目就好了。 巧的是,也正是在差不多的时候,本科的一个学长发给了我暑期 2020 活动的链接,我在其中惊喜的发现了 SkyWalking 项目。\n虽然说想要参与 SkyWalking 的开发,但是真的有了机会我却有一些不自信——这可是 Star 上万的 Apache 顶级项目。万幸的是在暑期 2020 活动中,每一个社区都提供了很多题目以供选择,想参与的同学可以提前对要做的事情有所了解,并可以提前做一些准备。我当时也仔细地浏览了项目列表,最终决定申请为 Python Agent 支持 Flask 或 Django 埋点的功能。当时主要考虑的是,我对 Python 语言比较熟悉,同时也有使用 Flask 等 web 框架进行开发的经验,我认为应该可以完成项目要求。为了能让心里更有底一些,我阅读了 Python Agent 的源码,写下了对项目需要做的工作的理解,并向项目的导师柯振旭发送了自荐邮件,最终被选中去完成这个项目。\n过程 被选中后我很激动,也把这份激动化作了参与开源的动力。我在进一步阅读源码,搭建本地环境后,用了三周左右的时间完成了 Django 项目的埋点插件的开发,毕竟我选择的项目是一个低难度的项目,而我在 Python web 方面也有一些经验。在这之后,我的导师和我进行了沟通,在我表达了想要继续做贡献的意愿之后,他给我建议了一些可以进一步进行贡献的方向,我也就继续参与 Python Agent 的开发。接下来,我陆续完成了 PyMongo 埋点插件, 插件版本检查机制, 支持使用 kafka 协议进行数据上报等功能。在提交了暑期 2020 活动的结项申请书后,我又继续参与了在端到端测试中增加对百分位数的验证等功能。\n在整个过程中,我遇到过很多问题,包括对问题认识不够清晰,功能的设计不够完善等等,但是通过与导师的讨论以及 Code Review,这些问题最终都迎刃而解了。此外他还经常会和我交流项目进一步发展方向,并给我以鼓励和肯定,在这里我想特别感谢我的导师在整个项目过程中给我的各种帮助。\n收获 参加暑期 2020 的活动带给我了很多收获,主要有以下几点:\n第一是让我真正参与到了开源项目中。在之前我只向在项目代码或文档中发现的 typo 发起过一些 Pull Request,但是暑期 2020 活动通过列出项目 + 导师指导的方式,明确了所要做的事情,并提供了相应的指导,降低了参与开源的门槛,使得我们学生可以参与到项目的开发中来。\n第二是对我的专业研究方向也有很多启发,我的研究方向就是微服务与云原生相关,通过参与到 SkyWalking 的开发中使得我可以更好地理解研究问题中的一些概念,也让我更得心应手得使用 SkyWalking 来解决一些实际的问题。\n第三是通过参与 SkyWalking Python Agent 以及其他部分的开发,我的贡献得到了社区的承认,并在最近被邀请作为 Committer 加入了社区,这对我而言是很高的认可,也提升了我的自信心。\n​\t第四点就是我通过这个活动认识了不少新朋友,同时也开拓了我的视野,使得我对于开源项目与开源社区有了很多新的认识。\n建议 最后同样是我对想要参与开源社区,想要参与此类活动的同学们的一些建议:\n 虽然奖金很吸引人,但是还是希望大家能抱着长期为项目进行贡献的心态来参与开源项目,以这样的心态参与开源可以让你更好地理解开源社区的运作方式,也可以让你更有机会参与完成激动人心的功能,你在一个东西上付出的时间精力越多,你能收获的往往也越多。 在申请项目的时候,可以提前阅读一下相关功能的源码,并结合自己的思考去写一份清晰明了的 proposal ,这样可以帮助你在申请人中脱颖而出。 在开始着手去完成一个功能之前,首先理清思路,并和自己的导师或了解这一部分的人进行沟通与确认,从而尽量避免在错误的方向上浪费太多时间。  ","title":"暑期 2020 活动学生(张可)心得分享","url":"/zh/2020-12-20-summer2020-activity-sharing2/"},{"content":"背景 我是一个热爱编程、热爱技术的人,⼀直以来都向往着能参与到开源项⽬中锻炼⾃⼰,但当我面对庞大而复杂的项目代码时,却感到手足无措,不知该从何开始。⽽此次的“开源软件供应链点亮计划-暑期2020”活动则正好提供了这样⼀个机会:清晰的任务要求、开源社区成员作为导师提供指导以及一笔丰厚的奖金,让我顺利地踏上了开源这条道路。\n回顾 在“暑期2020”活动的这两个多月里,我为 SkyWalking 的命令行工具实现了一个 dashboard,此外在阅读项目源码的过程中,还发现并修复了几个 bug。到活动结束时,我共提交了11个 PR,贡献了两千多行改动,对 SkyWalking CLI 项目的贡献数量排名第二,还获得了“最具潜力奖”。\n我觉得之所以能够如此顺利地完成这个项⽬主要有两个原因。一方面,我选择的 SkyWalking CLI 项⽬当时最新的版本号为0.3.0,还处于起步阶段,代码量相对较少,⽽且项⽬结构非常清晰,文档也较为详细,这对于我理解整个项⽬⾮常有帮助,从⽽能够更快地上⼿。另一方面,我的项目导师非常认真负责,每次我遇到问题,导师都会及时地为我解答,然后我提交的 PR 也能够很快地被 review。⽽且导师不时会给予我肯定的评论与⿎励,这极⼤地提⾼了我的成就感,让我更加积极地投⼊到下⼀阶段的⼯作,形成⼀个正向的循环。\n收获 回顾整个参与过程,觉得自己收获颇多:\n首先,我学习到了很多可能在学校里接触不到的新技术,了解了开源项目是如何进行协作,开源社区是如何运转治理的,以及开源文化、Apache way 等知识,仿佛进入了一个崭新而精彩的世界。\n其次,我的编程能力得到了锻炼。因为开源项目对于代码的质量有较高的要求,因此我会在编程时有意识地遵守相关的规范,培养良好的编码习惯。然后在导师的 code review 中也学习到了一些编程技巧。\n此外,参与开源为我的科研带来了不少灵感。因为我的研究方向是智能软件工程,旨在将人工智能技术应用在软件工程的各个环节中,这需要我在实践中发现实际问题。而开源则提供了这样一个窗口,让我足不出户即可参与到软件项目的设计、开发、测试和发布等环节。\n最后也是本次活动最大的一个收获,我的贡献得到了社区的认可,被提名成为了 SkyWalking 社区的第一位学生 committer。\n建议 最后,对于将来想要参加此类活动的同学,附上我的一些建议:\n第一,选择活跃、知名的社区。社区对你的影响将是极其深远的,好的社区意味着成熟的协作流程、良好的氛围、严谨的代码规范,以及有更大几率遇到优秀的导师,这些对于你今后在开源方面的发展都是非常有帮助的。\n第二,以兴趣为导向来选择项目,同时要敢于走出舒适区。我最初在选择项目时,初步确定了两个,一个是低难度的 Python 项目,另一个是中等难度的 Go 项目。当时我很纠结:因为我对 Python 语言比较熟悉,选择一个低难度的项目是比较稳妥的,但是项目的代码我看的并不是很懂,具体要怎么做我完全没有头绪;而 Go 项目是一个命令行工具,我对这个比较感兴趣,且有一个大致的思路,但是我对 Go 语言并不是很熟悉,实践经验为零。最后凭借清晰具体的 proposal 我成功申请到了 Go 项目并顺利地完成了,还在实践中快速掌握了一门新的编程语言。\n这次的“暑期2020”活动虽已圆满结束,但我的开源之路才刚刚开始。\n","title":"暑期2020活动心得分享","url":"/zh/2020-12-19-summer2020-activity-sharing/"},{"content":"NGE2E is the next generation End-to-End Testing framework that aims to help developers to set up, debug, and verify E2E tests with ease. It\u0026rsquo;s built based on the lessons learnt from tens of hundreds of test cases in the SkyWalking main repo.\nGoal  Keep the feature parity with the existing E2E framework in SkyWalking main repo; Support both docker-compose and KinD to orchestrate the tested services under different environments; Get rid of the heavy Java/Maven stack, which exists in the current E2E; be language independent as much as possible, users only need to configure YAMLs and run commands, without writing codes;  Non-Goal  This framework is not involved with the build process, i.e. it won\u0026rsquo;t do something like mvn package or docker build, the artifacts (.tar, docker images) should be ready in an earlier process before this; This project doesn\u0026rsquo;t take the plugin tests into account, at least for now; This project doesn\u0026rsquo;t mean to add/remove any new/existing test case to/from the main repo; This documentation won\u0026rsquo;t cover too much technical details of how to implement the framework, that should go into an individual documentation;  Design Before diving into the design details, let\u0026rsquo;s take a quick look at how the end user might use NGE2E.\n All the following commands are mock, and are open to debate.\n To run a test case in a directory /path/to/the/case/directory\ne2e run /path/to/the/case/directory # or cd /path/to/the/case/directory \u0026amp;\u0026amp; e2e run This will run the test case in the specified directory, this command is a wrapper that glues all the following commands, which can be executed separately, for example, to debug the case:\nNOTE: because all the options can be loaded from a configuration file, so as long as a configuration file (say e2e.yaml) is given in the directory, every command should be able to run in bare mode (without any option explicitly specified in the command line);\nSet Up e2e setup --env=compose --file=docker-compose.yaml --wait-for=service/health e2e setup --env=kind --file=kind.yaml --manifests=bookinfo.yaml,gateway.yaml --wait-for=pod/ready e2e setup # If configuration file e2e.yaml is present  --env: the environment, may be compose or kind, represents docker-compose and KinD respectively; --file: the docker-compose.yaml or kind.yaml file that declares how to set up the environment; --manifests: for KinD, the resources files/directories to apply (using kubectl apply -f); --command: a command to run after the environment is started, this may be useful when users need to install some extra tools or apply resources from command line, like istioctl install --profile=demo; --wait-for: can be specified multiple times to give a list of conditions to be met; wait until the given conditions are met; the most frequently-used strategy should be --wait-for=service/health, --wait-for=deployments/available, etc. that make the e2e setup command to wait for all conditions to be met; other possible strategies may be something like --wait-for=\u0026quot;log:Started Successfully\u0026quot;, --wait-for=\u0026quot;http:localhost:8080/healthcheck\u0026quot;, etc. if really needed;  Trigger Inputs e2e trigger --interval=3s --times=0 --action=http --url=\u0026#34;localhost:8080/users\u0026#34; e2e trigger --interval=3s --times=0 --action=cmd --cmd=\u0026#34;curl localhost:8080/users\u0026#34; e2e trigger # If configuration file e2e.yaml is present  --interval=3s: trigger the action every 3 seconds; --times=0: how many times to trigger the action, 0=infinite; --action=http: the action of the trigger, i.e. \u0026ldquo;perform an http request as an input\u0026rdquo;; --action=cmd: the action of the trigger, i.e. \u0026ldquo;execute the cmd as an input\u0026rdquo;;  Query Output swctl service ls this is a project-specific step, different project may use different tools to query the actual output, for SkyWalking, it uses swctl to query the actual output.\nVerify e2e verify --actual=actual.data.yaml --expected=expected.data.yaml e2e verify --query=\u0026#34;swctl service ls\u0026#34; --expected=expected.data.yaml e2e verify # If configuration file e2e.yaml is present   --actual: the actual data file, only YAML file format is supported;\n  --expected: the expected data file, only YAML file format is supported;\n  --query: the query to get the actual data, the query result must have the same format as --actual and --expected;\n The --query option will get the output into a temporary file and use the --actual under the hood;\n   Cleanup e2e cleanup --env=compose --file=docker-compose.yaml e2e cleanup --env=kind --file=kind.yaml --resources=bookinfo.yaml,gateway.yaml e2e cleanup # If configuration file e2e.yaml is present This step requires the same options in the setup step so that it can clean up all things necessarily.\nSummarize To summarize, the directory structure of a test case might be\ncase-name ├── agent-service # optional, an arbitrary project that is used in the docker-compose.yaml if needed │ ├── Dockerfile │ ├── pom.xml │ └── src ├── docker-compose.yaml ├── e2e.yaml # see a sample below └── testdata ├── expected.endpoints.service1.yaml ├── expected.endpoints.service2.yaml └── expected.services.yaml or\ncase-name ├── kind.yaml ├── bookinfo │ ├── bookinfo.yaml │ └── bookinfo-gateway.yaml ├── e2e.yaml # see a sample below └── testdata ├── expected.endpoints.service1.yaml ├── expected.endpoints.service2.yaml └── expected.services.yaml a sample of e2e.yaml may be\nsetup:env:kindfile:kind.yamlmanifests:- path:bookinfo.yamlwait:# you can have multiple conditions to wait- namespace:bookinfolabel-selector:app=productfor:deployment/available- namespace:reviewslabel-selector:app=productfor:deployment/available- namespace:ratingslabel-selector:app=productfor:deployment/availablerun:- command:|# it can be a shell script or anything executableistioctl install --profile=demo -ykubectl label namespace default istio-injection=enabledwait:- namespace:istio-systemlabel-selector:app=istiodfor:deployment/available# OR# env: compose# file: docker-compose.yamltrigger:action:httpinterval:3stimes:0url:localhost:9090/usersverify:- query:swctl service lsexpected:expected.services.yaml- query:swctl endpoint ls --service=\u0026#34;YnVzaW5lc3Mtem9uZTo6cHJvamVjdEM=.1\u0026#34;expected:expected.projectC.endpoints.yamlthen a single command should do the trick.\ne2e run Modules This project is divided into the following modules.\nController A controller command (e2e run) composes all the steps declared in the e2e.yaml, it should be progressive and clearly display which step is currently running. If it failed in a step, the error message should be as much comprehensive as possible. An example of the output might be\ne2e run ✔ Started Kind Cluster - Cluster Name ✔ Checked Pods Readiness - All pods are ready ? Generating Traffic - http localhost:9090/users (progress spinner) ✔ Verified Output - service ls (progress spinner) Verifying Output - endpoint ls ✘ Failed to Verify Output Data - endpoint ls \u0026lt;the diff content\u0026gt; ✔ Clean Up Compared with running the steps one by one, the controller is also responsible for cleaning up env (by executing cleanup command) no mater what status other commands are, even if they are failed, the controller has the following semantics in terms of setup and cleanup.\n// Java try { setup(); // trigger step // verify step // ... } finally { cleanup(); } // GoLang func run() { setup(); defer cleanup(); // trigger step // verify step // ... } Initializer The initializer is responsible for\n  When env==compose\n Start the docker-compose services; Check the services' healthiness; Wait until all services are ready according to the interval, etc.;    When env==kind\n Start the KinD cluster according to the config files; Apply the resources files (--manifests) or/and run the custom init command (--commands); Check the pods' readiness; Wait until all pods are ready according to the interval, etc.;    Verifier According to scenarios we have at the moment, the must-have features are:\n  Matchers\n Exact match Not null Not empty Greater than 0 Regexp match At least one of list element match    Functions\n Base64 encode/decode    in order to help to identify simple bugs from the GitHub Actions workflow, there are some \u0026ldquo;nice to have\u0026rdquo; features:\n Printing the diff content when verification failed is a super helpful bonus proved in the Python agent repo;  Logging When a test case failed, all the necessary logs should be collected into a dedicated directory, which could be uploaded to the GitHub Artifacts for downloading and analysis;\nLogs through the entire process of a test case are:\n KinD clusters logs; Containers/pods logs; The logs from the NGE2E itself;  More Planned Debugging Debugging the E2E locally has been a strong requirement and time killer that we haven\u0026rsquo;t solve up to date, though we have enhancements like https://github.com/apache/skywalking/pull/5198 , but in this framework, we will adopt a new method to \u0026ldquo;really\u0026rdquo; support debugging locally.\nThe most common case when debugging is to run the E2E tests, with one or more services forwarded into the host machine, where the services are run in the IDE or in debug mode.\nFor example, you may run the SkyWalking OAP server in an IDE and run e2e run, expecting the other services (e.g. agent services, SkyWalking WebUI, etc.) inside the containers to connect to your local OAP, instead of the one declared in docker-compose.yaml.\nFor Docker Desktop Mac/Windows, we can access the services running on the host machine inside containers via host.docker.internal, for Linux, it\u0026rsquo;s 172.17.0.1.\nOne possible solution is to add an option --debug-services=oap,other-service-name that rewrites all the router rules inside the containers from oap to host.docker.internal/172.17.0.1.\nCodeGen When adding new test case, a code generator would be of great value to eliminate the repeated labor and copy-pasting issues.\ne2e new \u0026lt;case-name\u0026gt; ","title":"[Design] NGE2E - Next Generation End-to-End Testing Framework","url":"/blog/e2e-design/"},{"content":"这篇文章暂时不讲告警策略, 直接看默认情况下激活的告警目标以及钉钉上的告警效果\nSkyWalking内置了很多默认的告警策略, 然后根据告警策略生成告警目标, 我们可以很容易的在界面上看到\n当我们想去让这些告警目标通知到我们时, 由于SkyWalking目前版本(8.3)已经自带了, 只需要简单配置一下即可\n我们先来钉钉群中创建机器人并勾选加签\n然后再修改告警部分的配置文件, 如果你是默认的配置文件(就像我一样), 你可以直接执行以下命令, 反之你也可以手动修改configs/alarm-settings.yml文件\ntee \u0026lt;your_skywalking_path\u0026gt;/configs/alarm-settings.yml \u0026lt;\u0026lt;-'EOF' dingtalkHooks: textTemplate: |- { \u0026quot;msgtype\u0026quot;: \u0026quot;text\u0026quot;, \u0026quot;text\u0026quot;: { \u0026quot;content\u0026quot;: \u0026quot;Apache SkyWalking Alarm: \\n %s.\u0026quot; } } webhooks: - url: https://oapi.dingtalk.com/robot/send?access_token=\u0026lt;access_token\u0026gt; secret: \u0026lt;加签值\u0026gt; EOF 最终效果如下\n参考文档:\nhttps://github.com/apache/skywalking/blob/master/docs/en/setup/backend/backend-alarm.md\nhttps://ding-doc.dingtalk.com/doc#/serverapi2/qf2nxq/uKPlK\n谢谢观看, 后续我会在SkyWalking告警这块写更多实战文章\n","title":"SkyWalking报警发送到钉钉群","url":"/zh/2020-12-13-skywalking-alarm/"},{"content":"Gui Cao began the code contributions since May 3, 2020. In the past 6 months, his 23 pull requests(GitHub, zifeihan[1]) have been accepted, which includes 5k+ lines of codes.\nMeanwhile, he took part in the tech discussion, and show the interests to contribute more to the project.\nAt Dec. 4th, 2020, the project management committee(PMC) passed the proposal of promoting him as a new committer. He has accepted the invitation at the same day.\nWelcome Gui Cao join the committer team.\n[1] https://github.com/apache/skywalking/commits?author=zifeihan\n","title":"Welcome Gui Cao as new committer","url":"/events/welcome-gui-cao-as-new-committer/"},{"content":" Author: Zhenxu Ke, Sheng Wu, and Tevah Platt. tetrate.io Original link, Tetrate.io blog Dec. 03th, 2020  Apache SkyWalking: an APM (application performance monitor) system, especially designed for microservices, cloud native, and container-based (Docker, Kubernetes, Mesos) architectures.\nEnvoy Access Log Service: Access Log Service (ALS) is an Envoy extension that emits detailed access logs of all requests going through Envoy.\nBackground Apache SkyWalking has long supported observability in service mesh with Istio Mixer adapter. But since v1.5, Istio began to deprecate Mixer due to its poor performance in large scale clusters. Mixer’s functionalities have been moved into the Envoy proxies, and is supported only through the 1.7 Istio release. On the other hand, Sheng Wu and Lizan Zhou presented a better solution based on the Apache SkyWalking and Envoy ALS on KubeCon China 2019, to reduce the performance impact brought by Mixer, while retaining the same observability in service mesh. This solution was initially implemented by Sheng Wu, Hongtao Gao, Lizan Zhou, and Dhi Aurrahman at Tetrate.io. If you are looking for a more efficient solution to observe your service mesh instead of using a Mixer-based solution, this is exactly what you need. In this tutorial, we will explain a little bit how the new solution works, and apply it to the bookinfo application in practice.\nHow it works From a perspective of observability, Envoy can be typically deployed in 2 modes, sidecar, and router. As a sidecar, Envoy mostly represents a single service to receive and send requests (2 and 3 in the picture below). While as a proxy, Envoy may represent many services (1 in the picture below).\nIn both modes, the logs emitted by ALS include a node identifier. The identifier starts with router~ (or ingress~) in router mode and sidecar~ in sidecar proxy mode.\nApart from the node identifier, there are several noteworthy properties in the access logs that will be used in this solution:\n  downstream_direct_remote_address: This field is the downstream direct remote address on which the request from the user was received. Note: This is always the physical peer, even if the remote address is inferred from for example the x-forwarded-for header, proxy protocol, etc.\n  downstream_remote_address: The remote/origin address on which the request from the user was received.\n  downstream_local_address: The local/destination address on which the request from the user was received.\n  upstream_remote_address: The upstream remote/destination address that handles this exchange.\n  upstream_local_address: The upstream local/origin address that handles this exchange.\n  upstream_cluster: The upstream cluster that upstream_remote_address belongs to.\n  We will discuss more about the properties in the following sections.\nSidecar When serving as a sidecar, Envoy is deployed alongside a service, and delegates all the incoming/outgoing requests to/from the service.\n  Delegating incoming requests: in this case, Envoy acts as a server side sidecar, and sets the upstream_cluster in form of inbound|portNumber|portName|Hostname[or]SidecarScopeID.\nThe SkyWalking analyzer checks whether either downstream_remote_address can be mapped to a Kubernetes service:\na. If there is a service (say Service B) whose implementation is running in this IP(and port), then we have a service-to-service relation, Service B -\u0026gt; Service A, which can be used to build the topology. Together with the start_time and duration fields in the access log, we have the latency metrics now.\nb. If there is no service that can be mapped to downstream_remote_address, then the request may come from a service out of the mesh. Since SkyWalking cannot identify the source service where the requests come from, it simply generates the metrics without source service, according to the topology analysis method. The topology can be built as accurately as possible, and the metrics detected from server side are still correct.\n  Delegating outgoing requests: in this case, Envoy acts as a client-side sidecar, and sets the upstream_cluster in form of outbound|\u0026lt;port\u0026gt;|\u0026lt;subset\u0026gt;|\u0026lt;serviceFQDN\u0026gt;.\nClient side detection is relatively simpler than (1. Delegating incoming requests). If upstream_remote_address is another sidecar or proxy, we simply get the mapped service name and generate the topology and metrics. Otherwise, we have no idea what it is and consider it an UNKNOWN service.\n  Proxy role When Envoy is deployed as a proxy, it is an independent service itself and doesn\u0026rsquo;t represent any other service like a sidecar does. Therefore, we can build client-side metrics as well as server-side metrics.\nExample In this section, we will use the typical bookinfo application to demonstrate how Apache SkyWalking 8.3.0+ (the latest version up to Nov. 30th, 2020) works together with Envoy ALS to observe a service mesh.\nInstalling Kubernetes SkyWalking 8.3.0 supports the Envoy ALS solution under both Kubernetes environment and virtual machines (VM) environment, in this tutorial, we’ll only focus on the Kubernetes scenario, for VM solution, please stay tuned for our next blog, so we need to install Kubernetes before taking further steps.\nIn this tutorial, we will use the Minikube tool to quickly set up a local Kubernetes(v1.17) cluster for testing. In order to run all the needed components, including the bookinfo application, the SkyWalking OAP and WebUI, the cluster may need up to 4GB RAM and 2 CPU cores.\nminikube start --memory=4096 --cpus=2 Next, run kubectl get pods --namespace=kube-system --watch to check whether all the Kubernetes components are ready. If not, wait for the readiness before going on.\nInstalling Istio Istio provides a very convenient way to configure the Envoy proxy and enable the access log service. The built-in configuration profiles free us from lots of manual operations. So, for demonstration purposes, we will use Istio through this tutorial.\nexport ISTIO_VERSION=1.7.1 curl -L https://istio.io/downloadIstio | sh - sudo mv $PWD/istio-$ISTIO_VERSION/bin/istioctl /usr/local/bin/ istioctl install --set profile=demo kubectl label namespace default istio-injection=enabled Run kubectl get pods --namespace=istio-system --watch to check whether all the Istio components are ready. If not, wait for the readiness before going on.\nEnabling ALS The demo profile doesn’t enable ALS by default. We need to reconfigure it to enable ALS via some configuration.\nistioctl manifest install \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-oap.istio-system:11800 The example command --set meshConfig.enableEnvoyAccessLogService=true enables the Envoy access log service in the mesh. And as we said earlier, ALS is essentially a gRPC service that emits requests logs. The config meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-oap.istio-system:11800 tells this gRPC service where to emit the logs, say skywalking-oap.istio-system:11800, where we will deploy the SkyWalking ALS receiver later.\nNOTE: You can also enable the ALS when installing Istio so that you don’t need to restart Istio after installation:\nistioctl install --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-oap.istio-system:11800 kubectl label namespace default istio-injection=enabled Deploying Apache SkyWalking The SkyWalking community provides a Helm Chart to make it easier to deploy SkyWalking and its dependent services in Kubernetes. The Helm Chart can be found at the GitHub repository.\n# Install Helm curl -sSLO https://get.helm.sh/helm-v3.0.0-linux-amd64.tar.gz sudo tar xz -C /usr/local/bin --strip-components=1 linux-amd64/helm -f helm-v3.0.0-linux-amd64.tar.gz # Clone SkyWalking Helm Chart git clone https://github.com/apache/skywalking-kubernetes cd skywalking-kubernetes/chart git reset --hard dd749f25913830c47a97430618cefc4167612e75 # Update dependencies helm dep up skywalking # Deploy SkyWalking helm -n istio-system install skywalking skywalking \\  --set oap.storageType=\u0026#39;h2\u0026#39;\\  --set ui.image.tag=8.3.0 \\  --set oap.image.tag=8.3.0-es7 \\  --set oap.replicas=1 \\  --set oap.env.SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=k8s-mesh \\  --set oap.env.JAVA_OPTS=\u0026#39;-Dmode=\u0026#39; \\  --set oap.envoy.als.enabled=true \\  --set elasticsearch.enabled=false We deploy SkyWalking to the namespace istio-system, so that SkyWalking OAP service can be accessed by skywalking-oap.istio-system:11800, to which we told ALS to emit their logs, in the previous step.\nWe also enable the ALS analyzer in the SkyWalking OAP: oap.env.SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=k8s-mesh. The analyzer parses the access logs and maps the IP addresses in the logs to the real service names in the Kubernetes, to build a topology.\nIn order to retrieve the metadata (such as Pod IP and service names) from a Kubernetes cluster for IP mappings, we also set oap.envoy.als.enabled=true, to apply for a ClusterRole that has access to the metadata.\nexport POD_NAME=$(kubectl get pods -A -l \u0026#34;app=skywalking,release=skywalking,component=ui\u0026#34; -o name) echo $POD_NAME kubectl -n istio-system port-forward $POD_NAME 8080:8080 Now navigate your browser to http://localhost:8080 . You should be able to see the SkyWalking dashboard. The dashboard is empty for now, but after we deploy the demo application and generate traffic, it should be filled up later.\nDeploying Bookinfo application Run:\nexport ISTIO_VERSION=1.7.1 kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/platform/kube/bookinfo.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/bookinfo-gateway.yaml kubectl wait --for=condition=Ready pods --all --timeout=1200s minikube tunnel Then navigate your browser to http://localhost/productpage. You should be able to see the typical bookinfo application. Refresh the webpage several times to generate enough access logs.\nDone! And you’re all done! Check out the SkyWalking WebUI again. You should see the topology of the bookinfo application, as well the metrics of each individual service of the bookinfo application.\nTroubleshooting  Check all pods status: kubectl get pods -A. SkyWalking OAP logs: kubectl -n istio-system logs -f $(kubectl get pod -A -l \u0026quot;app=skywalking,release=skywalking,component=oap\u0026quot; -o name). SkyWalking WebUI logs: kubectl -n istio-system logs -f $(kubectl get pod -A -l \u0026quot;app=skywalking,release=skywalking,component=ui\u0026quot; -o name). Make sure the time zone at the bottom-right of the WebUI is set to UTC +0.  Customizing Service Names The SkyWalking community brought more improvements to the ALS solution in the 8.3.0 version. You can decide how to compose the service names when mapping from the IP addresses, with variables service and pod. For instance, configuring K8S_SERVICE_NAME_RULE to the expression ${service.metadata.name}-${pod.metadata.labels.version} gets service names with version label such as reviews-v1, reviews-v2, and reviews-v3, instead of a single service reviews, see the PR.\nWorking ALS with VM Kubernetes is popular, but what about VMs? From what we discussed above, in order to map the IPs to services, SkyWalking needs access to the Kubernetes cluster, fetching service metadata and Pod IPs. But in a VM environment, there is no source from which we can fetch those metadata. In the next post, we will introduce another ALS analyzer based on the Envoy metadata exchange mechanism. With this analyzer, you are able to observe a service mesh in the VM environment. Stay tuned! If you want to have commercial support for the ALS solution or hybrid mesh observability, Tetrate Service Bridge, TSB is another good option out there.\nAdditional Resources  KubeCon 2019 Recorded Video. Get more SkyWalking updates on the official website.  Apache SkyWalking founder Sheng Wu, SkyWalking core maintainer Zhenxu Ke are Tetrate engineers, and Tevah Platt is a content writer for Tetrate. Tetrate helps organizations adopt open source service mesh tools, including Istio, Envoy, and Apache SkyWalking, so they can manage microservices, run service mesh on any infrastructure, and modernize their applications.\n","title":"Observe Service Mesh with SkyWalking and Envoy Access Log Service","url":"/blog/2020-12-03-obs-service-mesh-with-sw-and-als/"},{"content":" 如果你正在寻找在 Mixer 方案以外观察服务网格的更优解,本文正符合你的需要。\n Apache Skywalking︰特别为微服务、云原生和容器化(Docker、Kubernetes、Mesos)架构而设计的 APM(应用性能监控)系统。\nEnvoy 访问日志服务︰访问日志服务(ALS)是 Envoy 的扩展组件,会将所有通过 Envoy 的请求的详细访问日志发送出来。\n背景 Apache SkyWalking 一直通过 Istio Mixer 的适配器,支持服务网格的可观察性。不过自从 v1.5 版本,由于 Mixer 在大型集群中差强人意的表现,Istio 开始弃用 Mixer。Mixer 的功能现已迁至 Envoy 代理,并获 Istio 1.7 版本支持。\n在去年的中国 KubeCon 中,吴晟和周礼赞基于 Apache SkyWalking 和 Envoy ALS,发布了新的方案:不再受制于 Mixer 带来的性能影响,也同时保持服务网格中同等的可观察性。这个方案最初是由吴晟、高洪涛、周礼赞和 Dhi Aurrahman 在 Tetrate.io 实现的。\n如果你正在寻找在 Mixer 方案之外,为你的服务网格进行观察的最优解,本文正是你当前所需的。在这个教程中,我们会解释此方案的运作逻辑,并将它实践到 bookinfo 应用上。\n运作逻辑 从可观察性的角度来说,Envoy 一般有两种部署模式︰Sidecar 和路由模式。 Envoy 代理可以代表多项服务(见下图之 1),或者当它作为 Sidecar 时,一般是代表接收和发送请求的单项服务(下图之 2 和 3)。\n在两种模式中,ALS 发放的日志都会带有一个节点标记符。该标记符在路由模式时,以 router~ (或 ingress~)开头,而在 Sidecar 代理模式时,则以 sidecar~ 开头。\n除了节点标记符之外,这个方案[1]所采用的访问日志也有几个值得一提的字段︰\ndownstream_direct_remote_address︰此字段是下游的直接远程地址,用作接收来自用户的请求。注意︰它永远是对端实体的地址,即使远程地址是从 x-forwarded-for header、代理协议等推断出来的。\ndownstream_remote_address︰远程或原始地址,用作接收来自用户的请求。\ndownstream_local_address︰本地或目标地址,用作接收来自用户的请求。\nupstream_remote_address︰上游的远程或目标地址,用作处理本次交换。\nupstream_local_address︰上游的本地或原始地址,用作处理本次交换。\nupstream_cluster︰upstream_remote_address 所属的上游集群。\n我们会在下面详细讲解各个字段。\nSidecar 当 Envoy 作为 Sidecar 的时候,会搭配服务一起部署,并代理来往服务的传入或传出请求。\n  代理传入请求︰在此情况下,Envoy 会作为服务器端的 Sidecar,以 inbound|portNumber|portName|Hostname[or]SidecarScopeID 格式设定 upstream_cluster。\nSkyWalking 分析器会检查 downstream_remote_address 是否能够找到对应的 Kubernetes 服务。\n如果在此 IP(和端口)中有一个服务(例如服务 B)正在运行,那我们就会建立起服务对服务的关系(即服务 B → 服务 A),帮助建立拓扑。再配合访问日志中的 start_time 和 duration 两个字段,我们就可以获得延迟的指标数据了。\n如果没有任何服务可以和 downstream_remote_address 相对应,那请求就有可能来自网格以外的服务。由于 SkyWalking 无法识别请求的服务来源,在没有源服务的情况下,它简单地根据拓扑分析方法生成数据。拓扑依然可以准确地建立,而从服务器端侦测出来的指标数据也依然是正确的。\n  代理传出请求︰在此情况下,Envoy 会作为客户端的 Sidecar,以 outbound|\u0026lt;port\u0026gt;|\u0026lt;subset\u0026gt;|\u0026lt;serviceFQDN\u0026gt; 格式设定 upstream_cluster。\n客户端的侦测相对来说比代理传入请求容易。如果 upstream_remote_address 是另一个 Sidecar 或代理的话,我们只需要获得它相应的服务名称,便可生成拓扑和指标数据。否则,我们没有办法理解它,只能把它当作 UNKNOWN 服务。\n  代理角色 当 Envoy 被部署为前端代理时,它是独立的服务,并不会像 Sidecar 一样,代表任何其他的服务。所以,我们可以建立客户端以及服务器端的指标数据。\n演示范例 在本章,我们会使用典型的 bookinfo 应用,来演示 Apache SkyWalking 8.3.0+ (截至 2020 年 11 月 30 日的最新版本)如何与 Envoy ALS 合作,联手观察服务网格。\n安装 Kubernetes 在 Kubernetes 和虚拟机器(VM)的环境下,SkyWalking 8.3.0 均支持 Envoy ALS 的方案。在本教程中,我们只会演示在 Kubernetes 的情境,至于 VM 方案,请耐心期待我们下一篇文章。所以在进行下一步之前,我们需要先安装 Kubernetes。\n在本教程中,我们会使用 Minikube 工具来快速设立本地的 Kubernetes(v1.17 版本)集群用作测试。要运行所有必要组件,包括 bookinfo 应用、SkyWalking OAP 和 WebUI,集群需要动用至少 4GB 内存和 2 个 CPU 的核心。\nminikube start --memory=4096 --cpus=2 然后,运行 kubectl get pods --namespace=kube-system --watch,检查所有 Kubernetes 的组件是否已准备好。如果还没,在进行下一步前,请耐心等待准备就绪。\n安装 Istio Istio 为配置 Envoy 代理和实现访问日志服务提供了一个非常方便的方案。内建的配置设定档为我们省去了不少手动的操作。所以,考虑到演示的目的,我们会在本教程全程使用 Istio。\nexport ISTIO_VERSION=1.7.1 curl -L https://istio.io/downloadIstio | sh - sudo mv $PWD/istio-$ISTIO_VERSION/bin/istioctl /usr/local/bin/ istioctl install --set profile=demo kubectl label namespace default istio-injection=enabled 然后,运行 kubectl get pods --namespace=istio-system --watch,检查 Istio 的所有组件是否已准备好。如果还没,在进行下一步前,请耐心等待准备就绪。\n启动访问日志服务 演示的设定档没有预设启动 ALS,我们需要重新配置才能够启动 ALS。\nistioctl manifest install \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-oap.istio-system:11800 范例指令 --set meshConfig.enableEnvoyAccessLogService=true 会在网格中启动访问日志服务。正如之前提到,ALS 本质上是一个会发放请求日志的 gRPC 服务。配置 meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-oap.istio-system:11800 会告诉这个gRPC 服务往哪里发送日志,这里是往 skywalking-oap.istio-system:11800 发送,稍后我们会部署 SkyWalking ALS 接收器到这个地址。\n注意︰\n你也可以在安装 Istio 时启动 ALS,那就不需要在安装后重新启动 Istio︰\nistioctl install --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-oap.istio-system:11800 kubectl label namespace default istio-injection=enabled 部署 Apache SkyWalking SkyWalking 社区提供了 Helm Chart ,让你更轻易地在 Kubernetes 中部署 SkyWalking 以及其依赖服务。 Helm Chart 可以在 GitHub 仓库找到。\n# Install Helm curl -sSLO https://get.helm.sh/helm-v3.0.0-linux-amd64.tar.gz sudo tar xz -C /usr/local/bin --strip-components=1 linux-amd64/helm -f helm-v3.0.0-linux-amd64.tar.gz # Clone SkyWalking Helm Chart git clone https://github.com/apache/skywalking-kubernetes cd skywalking-kubernetes/chart git reset --hard dd749f25913830c47a97430618cefc4167612e75 # Update dependencies helm dep up skywalking # Deploy SkyWalking helm -n istio-system install skywalking skywalking \\  --set oap.storageType=\u0026#39;h2\u0026#39;\\  --set ui.image.tag=8.3.0 \\  --set oap.image.tag=8.3.0-es7 \\  --set oap.replicas=1 \\  --set oap.env.SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=k8s-mesh \\  --set oap.env.JAVA_OPTS=\u0026#39;-Dmode=\u0026#39; \\  --set oap.envoy.als.enabled=true \\  --set elasticsearch.enabled=false 我们在 istio-system 的命名空间内部署 SkyWalking,使 SkyWalking OAP 服务可以使用地址 skywalking-oap.istio-system:11800 访问,在上一步中,我们曾告诉过 ALS 应往此处发放它们的日志。\n我们也在 SkyWalking OAP 中启动 ALS 分析器︰oap.env.SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=k8s-mesh。分析器会对访问日志进行分析,并解析日志中的 IP 地址和 Kubernetes 中的真实服务名称,以建立拓扑。\n为了从 Kubernetes 集群处获取元数据(例如 Pod IP 和服务名称),以识别相应的 IP 地址,我们还会设定 oap.envoy.als.enabled=true,用来申请一个对元数据有访问权的 ClusterRole。\nexport POD_NAME=$(kubectl get pods -A -l \u0026#34;app=skywalking,release=skywalking,component=ui\u0026#34; -o name) echo $POD_NAME kubectl -n istio-system port-forward $POD_NAME 8080:8080 现在到你的浏览器上访问 http://localhost:8080。你应该会看到 SkyWalking 的 Dashboard。 Dashboard 现在应该是空的,但稍后部署应用和生成流量后,它就会被填满。\n部署 Bookinfo 应用 运行︰\nexport ISTIO_VERSION=1.7.1 kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/platform/kube/bookinfo.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/bookinfo-gateway.yaml kubectl wait --for=condition=Ready pods --all --timeout=1200s minikube tunnel 现在到你的浏览器上进入 http://localhost/productpage。你应该会看到典型的 bookinfo 应用画面。重新整理该页面几次,以生成足够的访问日志。\n完成了! 这样做,你就成功完成设置了!再查看 SkyWalking 的 WebUI,你应该会看到 bookinfo 应用的拓扑,以及它每一个单独服务的指标数据。\n疑难解答  检查所有 pod 的状态︰kubectl get pods -A。 SkyWalking OAP 的日志︰kubectl -n istio-system logs -f $(kubectl get pod -A -l \u0026quot;app=skywalking,release=skywalking,component=oap\u0026quot; -o name)。 SkyWalking WebUI 的日志︰kubectl -n istio-system logs -f $(kubectl get pod -A -l \u0026quot;app=skywalking,release=skywalking,component=ui\u0026quot; -o name)。 确保 WebUI 右下方的时区设定在 UTC +0。  自定义服务器名称 SkyWalking 社区在 ALS 方案的 8.3.0 版本中,作出了许多改善。你现在可以在映射 IP 地址时,决定如何用 service 和 pod 变量去自定义服务器的名称。例如,将 K8S_SERVICE_NAME_RULE 设置为 ${service.metadata.name}-${pod.metadata.labels.version},就可以使服务名称带上版本的标签,类似 reviews-v1、reviews-v2 和 reviews- v3,而不再是单个服务 review[2]。\n在 VM 上使用 ALS Kubernetes 很受欢迎,可是 VM 呢?正如我们之前所说,为了替 IP 找到对应的服务,SkyWalking 需要对 Kubernetes 集群有访问权,以获得服务的元数据和 Pod 的 IP。可是在 VM 环境中,我们并没有来源去收集这些元数据。\n在下一篇文章,我们会介绍另外一个 ALS 分析器,它是建立于 Envoy 的元数据交换机制。有了这个分析器,你就可以在 VM 环境中观察服务网格了。万勿错过!\n如果你希望在 ALS 方案或是混合式网格可观察性上获得商业支持,TSB 会是一个好选项。\n额外资源\n KubeCon 2019 的录影视频。 在官方网站上获得更多有关 SkyWalking 的最新消息吧。  如有任何问题或反馈,发送邮件至 learn@tetrate.io。\nApache SkyWalking 创始人吴晟和 SkyWalking 的核心贡献者柯振旭都是 Tetrate 的工程师。 Tetrate 的内容创造者编辑与贡献于本文章。 Tetrate 帮助企业采用开源服务网格工具,包括 Istio、Envoy 和 Apache SkyWalking,让它们轻松管理微服务,在任何架构上运行服务网格,以至现代化他们的应用。\n[1]https://github.com/envoyproxy/envoy/blob/549164c42cae84b59154ca4c36009e408aa10b52/generated_api_shadow/envoy/data/accesslog/v2/accesslog.proto\n[2]https://github.com/apache/skywalking/pull/5722\n","title":"使用 SkyWalking 和 Envoy 访问日志服务对服务网格进行观察","url":"/zh/observe-service-mesh-with-skywalking-and-envoy-access-log-service/"},{"content":"SkyWalking 8.3.0 is released. Go to downloads page to find release tars.\nProject  Test: ElasticSearch version 7.0.0 and 7.9.3 as storage are E2E tested. Test: Bump up testcontainers version to work around the Docker bug on MacOS.  Java Agent  Support propagate the sending timestamp in MQ plugins to calculate the transfer latency in the async MQ scenarios. Support auto-tag with the fixed values propagated in the correlation context. Make HttpClient 3.x, 4.x, and HttpAsyncClient 3.x plugins to support collecting HTTP parameters. Make the Feign plugin to support Java 14 Make the okhttp3 plugin to support Java 14 Polish tracing context related codes. Add the plugin for async-http-client 2.x Fix NPE in the nutz plugin. Provide Apache Commons DBCP 2.x plugin. Add the plugin for mssql-jtds 1.x. Add the plugin for mssql-jdbc 6.x -\u0026gt; 9.x. Fix the default ignore mechanism isn\u0026rsquo;t accurate enough bug. Add the plugin for spring-kafka 1.3.x. Add the plugin for Apache CXF 3.x. Fix okhttp-3.x and async-http-client-2.x did not overwrite the old trace header.  OAP-Backend  Add the @SuperDataset annotation for BrowserErrorLog. Add the thread pool to the Kafka fetcher to increase the performance. Add contain and not contain OPS in OAL. Add Envoy ALS analyzer based on metadata exchange. Add listMetrics GraphQL query. Add group name into services of so11y and istio relevant metrics Support keeping collecting the slowly segments in the sampling mechanism. Support choose files to active the meter analyzer. Support nested class definition in the Service, ServiceInstance, Endpoint, ServiceRelation, and ServiceInstanceRelation sources. Support sideCar.internalErrorCode in the Service, ServiceInstance, Endpoint, ServiceRelation, and ServiceInstanceRelation sources. Improve Kubernetes service registry for ALS analysis. Add health checker for cluster management Support the service auto grouping. Support query service list by the group name. Improve the queryable tags generation. Remove the duplicated tags to reduce the storage payload. Fix the threads of the Kafka fetcher exit if some unexpected exceptions happen. Fix the excessive timeout period set by the kubernetes-client. Fix deadlock problem when using elasticsearch-client-7.0.0. Fix storage-jdbc isExists not set dbname. Fix searchService bug in the InfluxDB storage implementation. Fix CVE in the alarm module, when activating the dynamic configuration feature. Fix CVE in the endpoint grouping, when activating the dynamic configuration feature. Fix CVE in the uninstrumented gateways configs, when activating the dynamic configuration feature. Fix CVE in the Apdex threshold configs, when activating the dynamic configuration feature. Make the codes and doc consistent in sharding server and core server. Fix that chunked string is incorrect while the tag contains colon. Fix the incorrect dynamic configuration key bug of endpoint-name-grouping. Remove unused min date timebucket in jdbc deletehistory logical Fix \u0026ldquo;transaction too large error\u0026rdquo; when use TiDB as storage. Fix \u0026ldquo;index not found\u0026rdquo; in trace query when use ES7 storage. Add otel rules to ui template to observe Istio control plane. Remove istio mixer Support close influxdb batch write model. Check SAN in the ALS (m)TLS process.  UI  Fix incorrect label in radial chart in topology. Replace node-sass with dart-sass. Replace serviceFilter with serviceGroup Removed \u0026ldquo;Les Miserables\u0026rdquo; from radial chart in topology. Add the Promise dropdown option  Documentation  Add VNode FAQ doc. Add logic endpoint section in the agent setup doc. Adjust configuration names and system environment names of the sharing server module Tweak Istio metrics collection doc. Add otel receiver.  All issues and pull requests are here\n","title":"Release Apache SkyWalking APM 8.3.0","url":"/events/release-apache-skwaylking-apm-8-3-0/"},{"content":"Python 作为一门功能强大的编程语言,被广泛的应用于计算机行业之中; 在微服务系统架构盛行的今天,Python 以其丰富的软件生态和灵活的语言特性在服务端编程领域也占有重要的一席之地。 本次分享将阐述 Apache SkyWalking 在微服务架构中要解决的问题,展示如何使用 Apache SkyWalking 来近乎自动化地监控 Python 后端应用服务,并对 Apache SkyWalking 的 Python 语言探针的实现技术进行解读。\nB站视频地址\n","title":"[视频] PyCon China 2020 - Python 微服务应用性能监控","url":"/zh/2020-11-30-pycon/"},{"content":"SkyWalking CLI 0.5.0 is released. Go to downloads page to find release tars.\n  Features\n Use template files in yaml format instead Refactor metrics command to adopt metrics-v2 protocol Use goroutine to speed up dashboard global command Add metrics list command    Bug Fixes\n Add flags of instance, endpoint and normal for metrics command Fix the problem of unable to query database metrics    Chores\n Update release guide doc Add screenshots for use cases in README.md Introduce generated codes into codebase    ","title":"Release Apache SkyWalking CLI 0.5.0","url":"/events/release-apache-skywalking-cli-0-5-0/"},{"content":" Author: Jiapeng Liu. Baidu. skywalking-satellite: The Sidecar Project of Apache SkyWalking Nov. 25th, 2020  A lightweight collector/sidecar which can be deployed close to the target monitored system, to collect metrics, traces, and logs. It also provides advanced features, such as local cache, format transformation, and sampling.\nDesign Thinking Satellite is a 2 level system to collect observability data from other core systems. So, the core element of the design is to guarantee data stability during Pod startup all the way to Pod shutdown avoiding alarm loss. All modules are designed as plugins, and if you have other ideas, you can add them yourself.\nSLO  Single gatherer supports \u0026gt; 1000 ops (Based 0.5 Core,50M) At least once delivery.(Optional) Data stability: 99.999%.(Optional)  Because they are influenced by the choice of plugins, some items in SLO are optional.\nRole Satellite would be running as a Sidecar. Although Daemonset mode would take up fewer resources, it will cause more troubles to the forwarding of agents. So we also want to use Sidecar mode by reducing the costs. But Daemonset mode would be also supported in the future plan.\nCore Modules The Satellite has 3 core modules which are Gatherer, Processor, and Sender.\n The Gatherer module is responsible for fetching or receiving data and pushing the data to Queue. The Processor module is responsible for reading data from the queue and processing data by a series of filter chains. The Sender module is responsible for async processing and forwarding the data to the external services in the batch mode. After sending success, Sender would also acknowledge the offset of Queue in Gatherer.  Detailed Structure The overall design is shown in detail in the figure below. We will explain the specific components one by one.\nGatherer Concepts The Gatherer has 4 components to support the data collection, which are Input, Collector, Worker, and Queue. There are 2 roles in the Worker, which are Fetcher and Receiver.\n The Input is an abstraction of the input source, which is usually mapped to a configuration file. The Collector is created by the Source, but many collectors could be created by the same Source. For example, when a log path has been configured as the /var/*.log in an Input, the number of collectors is the same as the file number in this path. The Fetcher and Receiver is the real worker to collect data. The receiver interface is an abstraction, which has multiple implementations, such as gRPC receiver and HTTP receiver.Here are some specific use cases:  Trace Receiver is a gRPC server for receiving trace data created by Skywalking agents. Log Receiver is also a gRPC server for receiving log data which is collected by Skywalking agents. (In the future we want Skywalking Agent to support log sending, and RPC-based log sending is more efficient and needs fewer resources than file reading. For example, the way of file reading will bring IO pressure and performance cost under multi-line splicing.) Log Fetcher is like Filebeat, which fits the common log collection scenario. This fetcher will have more responsibility than any other workers because it needs to record the offset and process the multi-line splicing. This feature will be implemented in the future. Prometheus Fetcher supports a new way to fetch Prometheus data and push the data to the upstream. \u0026hellip;\u0026hellip;   The Queue is a buffer module to decouple collection and transmission. In the 1st release version, we will use persistent storage to ensure data stability. But the implementation is a plug-in design that can support pure memory queues later.   The data flow We use the Trace Receiver as an example to introduce the data flow. Queue MmapQueue We have simplified the design of MmapQueue to reduce the resources cost on the memory and disk.\nConcepts There are 2 core concepts in MmapQueue.\n Segment: Segment is the real data store center, that provides large-space storage and does not reduce read and write performance as much as possible by using mmap. And we will avoid deleting files by reusing them. Meta: The purpose of meta is to find the data that the consumer needs.  Segment One MmapQueue has a directory to store the whole data. The Queue directory is made up with many segments and 1 meta file. The number of the segments would be computed by 2 params, which are the max cost of the Queue and the cost of each segment. For example, If the max cost is 512M and each segment cost is 256K, the directory can hold up to 2000 files. Once capacity is exceeded, an coverage policy is adopted that means the 2000th would override the first file.\nEach segment in Queue will be N times the size of the page cache and will be read and written in an appended sequence rather than randomly. These would improve the performance of Queue. For example, each Segment is a 128k file, as shown in the figure below.\nMeta The Meta is a mmap file that only contains 56Bit. There are 5 concepts in the Meta.\n Version: A version flag. Watermark Offset: Point to the current writing space.  ID: SegmentID Offset: The offset in Segment.   Writed Offset: Point to the latest refreshed data, that would be overridden by the write offset after period refresh.  ID: SegmentID Offset: The offset in Segment.   Reading Offset: Point to the current reading space.  ID: SegmentID Offset: The offset in Segment.   Committed Offset: Point to the latest committed offset , that is equal to the latest acked offset plus one.  ID: SegmentID Offset: The offset in Segment.    The following diagram illustrates the transformation process.\n The publisher receives data and wants to write to Queue.  The publisher would read Writing Offset to find a space and do plus one. After this, the publisher will write the data to the space.   The consumer wants to read the data from Queue.  The consumer would read Reading Offset to find the current read offset and do plus one. After this, the consumer will read the data from the space.   On period flush, the flusher would override Watermark Offset by using Writing Offset. When the ack operation is triggered, Committed Offset would plus the batch size in the ack batch. When facing crash, Writing Offset and Reading Offset would be overridden by Watermark Offset and Committed Offset. That is because the Reading Offset and Writing Offset cannot guarantee at least once delivery.  Mmap Performance Test The test is to verify the efficiency of mmap in low memory cost.\n The rate of data generation: 7.5K/item 1043 item/s (Based on Aifanfan online pod.) The test structure is based on Bigqueue because of similar structure. Test tool: Go Benchmark Test Command: go test -bench BenchmarkEnqueue -run=none -cpu=1 Result On Mac(15-inch, 2018,16 GB 2400 MHz DDR4, 2.2 GHz Intel Core i7 SSD):  BenchmarkEnqueue/ArenaSize-128KB/MessageSize-8KB/MaxMem-384KB 66501 21606 ns/op 68 B/op 1 allocs/op BenchmarkEnqueue/ArenaSize-128KB/MessageSize-8KB/MaxMem-1.25MB 72348 16649 ns/op 67 B/op 1 allocs/op BenchmarkEnqueue/ArenaSize-128KB/MessageSize-16KB/MaxMem-1.25MB 39996 33199 ns/op 103 B/op 1 allocs/op   Result On Linux(INTEL Xeon E5-2450 V2 8C 2.5GHZ2,INVENTEC PC3L-10600 16G8,INVENTEC SATA 4T 7.2K*8):  BenchmarkEnqueue/ArenaSize-128KB/MessageSize-8KB/MaxMem-384KB 126662\t12070 ns/op\t62 B/op\t1 allocs/op BenchmarkEnqueue/ArenaSize-128KB/MessageSize-8KB/MaxMem-1.25MB 127393\t12097 ns/op\t62 B/op\t1 allocs/op BenchmarkEnqueue/ArenaSize-128KB/MessageSize-16KB/MaxMem-1.25MB 63292\t23806 ns/op\t92 B/op\t1 allocs/op   Conclusion: Based on the above tests, mmap is both satisfied at the write speed and at little memory with very low consumption when running as a sidecar.  Processor The Processor has 3 core components, which are Consumer, Filter, and Context.\n The Consumer is created by the downstream Queue. The consumer has its own read offset and committed offset, which is similar to the offset concept of Spark Streaming. Due to the particularity of APM data preprocessing, Context is a unique concept in the Satellite filter chain, which supports storing the intermediate event because the intermediate state event also needs to be sent in sometimes. The Filter is the core data processing part, which is similar to the processor of beats. Due to the context, the upstream/downstream filters would be logically coupling.  Sender  BatchConverter decouples the Processor and Sender by staging the Buffer structure, providing parallelization. But if BatchBuffer is full, the downstream processors would be blocked. Follower is a real send worker that has a client, such as a gRPC client or Kafka client, and a fallback strategy. Fallback strategy is an interface, we can add more strategies to resolve the abnormal conditions, such as Instability in the network, upgrade the oap cluster. When sent success, Committed Offset in Queue would plus the number of this batch.  High Performance The scenario using Satellite is to collect a lot of APM data collection. We guarantee high performance by the following ways.\n Shorten transmission path, that means only join 2 components,which are Queue and Processor, between receiving and forwarding. High Performance Queue. MmapQueue provides a big, fast and persistent queue based on memory mapped file and ring structure. Processor maintains a linear design, that could be functional processed in one go-routine to avoid too much goroutines switching.  Stability Stability is a core point in Satellite. Stability can be considered in many ways, such as stable resources cost, stable running and crash recovery.\nStable resource cost In terms of resource cost, Memory and CPU should be a concern.\nIn the aspect of the CPU, we keep a sequence structure to avoid a large number of retries occurring when facing network congestion. And Satellite avoids keep pulling when the Queue is empty based on the offset design of Queue.\nIn the aspect of the Memory, we have guaranteed only one data caching in Satellite, that is Queue. For the queue structure, we also keep the size fixed based on the ring structure to maintain stable Memory cost. Also, MmapQueue is designed for minimizing memory consumption and providing persistence while keeping speed as fast as possible. Maybe supports some strategy to dynamically control the size of MmapQueue to process more extreme conditions in the future.\nStable running There are many cases of network congestion, such as the network problem on the host node, OAP cluster is under upgrating, and Kafka cluster is unstable. When facing the above cases, Follower would process fallback strategy and block the downstream processes. Once the failure strategy is finished, such that send success or give up this batch, the Follower would process the next batch.\nCrash Recovery The crash recovery only works when the user selects MmapQueue in Gatherer because of persistent file system design. When facing a crash, Reading Offset would be overridden by Committed Offset that ensure the at least once delivery. And Writed Offset would override Writing Offset that ensures the consumer always works properly and avoid encountering uncrossable defective data blocks.\nBuffer pool The Queue is to store fixed structure objects, object buffer pool would be efficient to reuse memory to avoid GC.\n ackChan batch convertor  Some metrics In Satellite, we should also collect its own monitoring metrics. The following metrics are necessary for Satellite.\n cpu memory go routine number gatherer_writing_offset gatherer_watermark_offset processor_reading_count sender_committed_offset sender_abandoned_count sender_retry_count  Input and Output We will reuse this diagram to explain the input and output.\n Input  Because the push-pull mode is both supported, Queue is a core component. Queue is designed to be a ring-shaped fixed capacity, that means the oldest data would be overridden by the latest data. If users find data loss, users should raise the ceiling of memory Queue. MmapQueue generally doesn\u0026rsquo;t face this problem unless the Sender transport is congested.   Ouput  If the BatchBuffer is full, the processor would be blocked. If the Channel is full, the downstream components would be blocked, such as BatchConvertor and Processor. When SenderWorker sends failure, the batch data would do a failure strategy that would block pulling data from the Channel. The strategy is a part of Sender,the operation mode is synchronous. Once the failure strategy is finished, such that send success or give up this batch, the Sendworker would keep pulling data from the Channel.    Questions How to avoid keep pulling when the Queue is empty? If Watermark Offset is less than or equal to Reading Offset, a signal would be sent to the consumer to avoid keep pulling.\nWhy reusing files in Queue? The unified model is a ring in Queue, that limits fixed resources cost in memory or disk.In Mmap Queue, reusing files turns the delete operations into an overwrite operations, effectively reducing the creation and deletion behavior in files.\nWhat are the strategies for file creation and deletion in MmapQueue? As Satellite running, the number of the files in MmapQueue would keep growing until up to the maximum capacity. After this, the old files will be overridden by the new data to avoid file deletion. When the Pod died, all resources were recycled.\n","title":"The first design of Satellite 0.1.0","url":"/blog/2020-11-25-skywalking-satellite-0.1.0-design/"},{"content":"SkyWalking Python 0.4.0 is released. Go to downloads page to find release tars.\n Feature: Support Kafka reporter protocol (#74) BugFix: Move generated packages into skywalking namespace to avoid conflicts (#72) BugFix: Agent cannot reconnect after server is down (#79) Test: Mitigate unsafe yaml loading (#76)  ","title":"Release Apache SkyWalking Python 0.4.0","url":"/events/release-apache-skywalking-python-0-4-0/"},{"content":"活动介绍 Apache SkyWalking 2020 开发者线下活动,社区创始人,PMC成员和Committer会亲临现场,和大家交流和分享项目中的使用经验。 以及邀请Apache Local Community 北京的成员一起分享Apache文化和Apache之道。\n日程安排 开场演讲 09:30-09:50 SkyWalking\u0026rsquo;s 2019-2020 and beyond\n吴晟,Tetrate.io创始工程师,Apache SkyWalking创始人\nB站视频地址\n 上午 09:55-10:30 贝壳全链路跟踪实践\n赵禹光,赵禹光,贝壳找房监控技术负责人,Apache SkyWalking PMC成员\n10:35-11:15 SkyWalking在百度爱番番部门实践\n刘嘉鹏,百度,SkyWalking contributor\n11:15-11:55 非计算机背景的同学如何贡献开源\n缘于一位本科在读的社会学系的同学的问题,这让我反思我们开源community的定位和Open的程度,于是,适兕从生产、分发、消费的软件供应的角度,根据涉及到的角色,然后再反观现代大学教育体系的专业,进一步对一个开源项目和community需要的专业背景多样性进行一个阐述和探究。并以ALC Beijing为例进行一个事例性的说明。\n适兕,开源布道师,ALC Beijing member,开源之道主创,开源社教育组成员。\nB站视频地址\n 下午 13:30-14:10 如何从 Apache SkyWalking 社区学习 Apache Way\n温铭,支流科技联合创始人&CEO,Apache APISIX 项目 VP, Apache SkyWalking Committer\n14:10-14:50 Apache SkyWalking 在小米公司的应用\n宋振东,小米公司小米信息技术部 skywalking 研发负责人\n14:50-15:30 Istio全生命周期监控\n高洪涛,Tetrate.io创始工程师,Apache SkyWalking PMC成员\n15:30-15:45 茶歇\n15:45-16:25 针对HikariCP数据库连接池的监控\n张鑫 Apache SkyWalking PMC 成员\n16:25-17:00 SkyWalking 与 Nginx 的优化实践\n王院生 深圳支流科技创始人兼 CTO,Apache APISIX 创始人 \u0026amp; PMC成员\nB站视频地址\n","title":"[视频] SkyWalking DevCon 2020","url":"/zh/2020-11-23-devcon/"},{"content":"The APM system provides the tracing or metrics for distributed systems or microservice architectures. Back to APM themselves, they always need backend storage to store the necessary massive data. What are the features required for backend storage? Simple, fewer dependencies, widely used query language, and the efficiency could be into your consideration. Based on that, traditional SQL databases (like MySQL) or NoSQL databases would be better choices. However, this topic will present another backend storage solution for the APM system viewing from NewSQL. Taking Apache Skywalking for instance, this talking will share how to make use of Apache ShardingSphere, a distributed database middleware ecosystem to extend the APM system\u0026rsquo;s storage capability.\nAs a senior DBA worked at JD.com, the responsibility is to develop the distributed database and middleware, and the automated management platform for database clusters. As a PMC of Apache ShardingSphere, I am willing to contribute to the OS community and explore the area of distributed databases and NewSQL.\n  ","title":"[Video] Another backend storage solution for the APM system","url":"/blog/2020-11-21-apachecon-obs-shardingsphere/"},{"content":"Apache APISIX is a cloud-native microservices API gateway, delivering the ultimate performance, security, open-source and scalable platform for all your APIs and microservices. Apache SkyWalking: an APM(application performance monitor) system, especially designed for microservices, cloud-native and container-based (Docker, Kubernetes, Mesos) architectures. Through the powerful plug-in mechanism of Apache APISIX, Apache Skywalking is quickly supported, so that we can see the complete life cycle of requests from the edge to the internal service. Monitor and manage each request in a visual way, and improve the observability of the service.\n  ","title":"[Video] Improve Apache APISIX observability with Apache SkyWalking","url":"/blog/2020-11-21-apachecon-obs-apisix/"},{"content":"Today\u0026rsquo;s monitoring solutions are geared towards operational tasks, displaying behavior as time-series graphs inside dashboards and other abstractions. These abstractions are immensely useful but are largely designed for software operators, whose responsibilities require them to think in systems, rather than the underlying source code. This is problematic given that an ongoing trend of software development is the blurring boundaries between building and operating software. This trend makes it increasingly necessary for programming environments to not just support development-centric activities, but operation-centric activities as well. Such is the goal of the feedback-driven development approach. By combining IDE and APM technology, software developers can intuitively explore multiple dimensions of their software simultaneously with continuous feedback about their software from inception to production.\nBrandon Fergerson is an open-source software developer who does not regard himself as a specialist in the field of programming, but rather as someone who is a devoted admirer. He discovered the beauty of programming at a young age and views programming as an art and those who do it well to be artists. He has an affinity towards getting meta and combining that with admiration of programming, has found source code analysis to be exceptionally interesting. Lately, his primary focus involves researching and building AI-based pair programming technology.\n  ","title":"[Video] SourceMarker - Continuous Feedback for Developers","url":"/blog/2020-11-21-apachecon-obs-sourcemarker/"},{"content":"Over the past few years, and coupled with the growing adoption of microservices, distributed tracing has emerged as one of the most commonly used monitoring and troubleshooting methodologies. New tracing tools are increasingly being introduced, driving adoption even further. One of these tools is Apache SkyWalking, a popular open-source tracing, and APM platform. This talk explores the history of the SkyWalking storage module, shows the evolution of distributed tracing storage layers, from the traditional relational database to document-based search engine. I hope that this talk contributes to the understanding of history and also that it helps to clarify the different types of storage that are available to organizations today.\nHongtao Gao is the engineer of tetrate.io and the former Huawei Cloud expert. One of PMC members of Apache SkyWalking and participates in some popular open-source projects such as Apache ShardingSphere and Elastic-Job. He has an in-depth understanding of distributed databases, container scheduling, microservices, ServicMesh, and other technologies.\n  ","title":"[Video] The history of distributed tracing storage","url":"/blog/2020-11-21-apachecon-obs-storage/"},{"content":" 作者: 赵禹光 原文链接: 亲临百人盛况的Apache SkyWalking 2020 DevCon,看见了什么? 2020 年 10 月 29 日  活动现场 2020年11月14日Apache SkyWalking 2020 DevCon由贝壳找房和tetrate赞助,Apache SkyWalking、云原生、Apache APISIX、Apache Pulsar 和 ALC Beijing 五大社区合作,在贝壳找房一年级会议室盛大举行,本次活动主要面对Apache SkyWalking的使用者、开发者和潜在用户。线上线下共有230多人报名。经统计,实际参加活动人数超过130人,近60%的人愿意抽出自己的休息时间,来交流学习Apache SkyWalking和开源文化。不难看见,在可预见的未来,中国的开源项目很快将进入下一个维度,那必定是更广的社区人员参与,更高技术知识体现,更强的线上稳定性和及时修复能力。\n活动历程: 09:30-09:50 SkyWalking\u0026rsquo;s 2019-2020 and beyond 吴晟老师本次分享:回顾2020年度SkyWalking发布的重要的新特性,出版的《Apache SkyWalking实战》图书,社区的进展,开源爱好者如何参与SkyWalking建设,和已知社区在主导的SkyWalking2021年孵化中的新特性。\n09:55-10:30 贝壳全链路跟踪实践 赵禹光老师(作者)本次分享:回顾了贝壳找房2018年至今,贝壳找房的全链路跟踪项目与SkyWalking的渊源,分享了SkyWalking在实践中遇到的问题,和解决方案。以及SkyWalking近10%的Committer都曾经或正在贝壳人店平台签中研发部,工作过的趣事。\n10:35-11:15 刘嘉鹏老师分享 SkyWalking在百度爱番番部门实践 刘嘉鹏老师本次分享:回顾了百度爱番番部门在使用SkyWalking的发展历程\u0026amp;现状,CRM SAAS产品在近1年使用SkyWalking实践经验,以及如何参与SkyWalking的贡献,并成为的Apache Committer。\n11:15-11:55 适兕老师分享 非计算机背景的同学如何贡献开源 适兕是国内很有名的开源布道师,本次分享从生产、分发、消费的软件供应的角度,根据涉及到的角色,然后再反观现代大学教育体系的专业,进一步对一个开源项目和community需要的专业背景多样性进行一个阐述和探究。并以ALC Beijing为例进行一个事例性的说明,非计算机背景的同学如何贡献开源。\n13:30-14:10 如何从 Apache SkyWalking 社区学习 Apache Way 14:10-14:50 Apache SkyWalking 在小米公司的应用 宋振东老师是小米信息技术部分布式链路追踪系统研发负责人,分别以小米公司,业务开发、架构师、SRE、Leader和QA等多个视角,回顾了SkyWalking在小米公司的应用实践。从APM的产品选型到实际落地,对其他公司准备使用SkyWalking落地,非常有借鉴意义。\n14:50-15:30 Istio全生命周期监控 高洪涛老师本次分享了SkyWalking和可观测云原生等非常前沿的知识布道,其中有,云原生在Logging、Metrics和Tracing的相关知识,Istio,K8S等方面的实践。对一些公司在前沿技术的落地,非常有借鉴意义。\n15:45-16:25 针对HikariCP数据库连接池的监控 张鑫老师本次分享了,以一个SkyWalking无法Tracing的实际线上故障的故事出发,讲述如何定位,和补充SkyWalking插件的不足,并将最后的实践贡献到社区。对大家参与开源很有帮助。\n16:25-17:00 SkyWalking 与 Nginx 的优化实践 王院生老师本次分享SkyWalking社区和APISIX社区合作,在Nginx插件的实践过程,对社区之间的如何开展合作,非常有借鉴意义,院生老师的工作\u0026amp;开源态度,很好的诠释Geek精神,也是我们互联网从业者需要学习恪守的。\nApache SkyWalking 2020 DevCon 讲师PPT Apache SkyWalking 2020 DevCon 讲师 PPT\nSkyWalking 后续发展计划 正如吴晟老师所说:No plan, open to the community,Apache SkyWalking是没有RoadMap。社区的后续发展,依赖于每个人在社区的贡献。与其期待,不如大胆设想,将自己的设计按照Apache Way贡献到SkyWalking,你就是下一个Apache SkyWalking Commiter,加入Member of SkyWalking大家庭,让社区因为你,而更加有活力。\n","title":"亲临百人盛况的Apache SkyWalking 2020 DevCon,看见了什么?","url":"/zh/2020-11-21-what-do-we-see-at-the-apache-skywalking-2020-devcon-event/"},{"content":"Sheng Wu is a founding engineer at tetrate.io, leads the observability for service mesh and hybrid cloud. A searcher, evangelist, and developer in the observability, distributed tracing, and APM. He is a member of the Apache Software Foundation. Love open source software and culture. Created the Apache SkyWalking project and being its VP and PMC member. Co-founder and PMC member of Apache ShardingSphere. Also as a PMC member of Apache Incubator and APISIX. He is awarded as Microsoft MVP, Alibaba Cloud MVP, Tencent Cloud TVP.\nIn the Apache FY2020 report, China is on the top of the download statistics. More China initiated projects joined the incubator, and some of them graduated as the Apache TLP. Sheng joined the Apache community since 2017, in the past 3 years, he witnessed the growth of the open-source culture and Apache way in China. Many developers have joined the ASF as new contributors, committers, foundation members. Chinese enterprises and companies paid more attention to open source contributions, rather than simply using the project like before. In the keynote, he would share the progress about China embracing the Apache culture, and willing of enhancing the whole Apache community.\n  ","title":"[Video] Apache grows in China","url":"/blog/2020-11-21-apachecon-keynote/"},{"content":"SkyWalking Client JS 0.2.0 is released. Go to downloads page to find release tars.\n Bug Fixes  Fixed a bug in sslTime calculate. Fixed a bug in server response status judgment.    ","title":"Release Apache SkyWalking Client JS 0.2.0","url":"/events/release-apache-skywalking-client-js-0-2-0/"},{"content":"SkyWalking Cloud on Kubernetes 0.1.0 is released. Go to downloads page to find release tars.\n Add OAPServer CRDs and controller.  ","title":"Release Apache SkyWalking Cloud on Kubernetes 0.1.0","url":"/events/release-apache-skywalking-cloud-on-kubernetes-0.1.0/"},{"content":"Based on his continuous contributions, Jiapeng Liu (a.k.a evanljp) has been voted as a new committer.\n","title":"Welcome Jiapeng Liu as new committer","url":"/events/welcome-jiapeng-liu-as-new-committer/"},{"content":"SkyWalking Kubernetes Helm Chart 4.0.0 is released. Go to downloads page to find release tars.\n Allow overriding configurations files under /skywalking/config. Unify the usages of different SkyWalking versions. Add Values for init container in case of using private regestry. Add services, endpoints resources in ClusterRole.  ","title":"Release Apache SkyWalking Kubernetes Helm Chart 4.0.0","url":"/events/release-apache-skywalking-kubernetes-helm-chart-4.0.0/"},{"content":"SkyWalking Client JS 0.1.0 is released. Go to downloads page to find release tars.\n Support Browser Side Monitoring. Require SkyWalking APM 8.2+.  ","title":"Release Apache SkyWalking Client JS 0.1.0","url":"/events/release-apache-skywalking-client-js-0-1-0/"},{"content":" Author: Zhenxu Ke, Sheng Wu, Hongtao Gao, and Tevah Platt. tetrate.io Original link, Tetrate.io blog Oct. 29th, 2020  Apache SkyWalking, the observability platform, and open-source application performance monitor (APM) project, today announced the general availability of its 8.2 release. The release extends Apache SkyWalking’s functionalities and monitoring boundary to the browser side.\nBackground SkyWalking is an observability platform and APM tool that works with or without a service mesh, providing automatic instrumentation for microservices, cloud-native and container-based applications. The top-level Apache project is supported by a global community and is used by Alibaba, Huawei, Tencent, Baidu, ByteDance, and scores of others.\nBrowser side monitoring APM helps SRE and Engineering teams to diagnose system failures, or optimize the systems before they become intolerably slow. But is it enough to always make the users happy?\nIn 8.2.0, SkyWalking extends its monitoring boundary to the browser side, e.g., Chrome, or the network between Chrome and the backend service, or the codes running in the browser. With this, not only can we monitor the backend services and requests sent by the browser as usual, but also the front end rendering speed, error logs, etc., which are the most efficient metrics for capturing the experiences of our end users. (This does not currently extend to IoT devices, but this feature moves SkyWalking a step in that direction).\nWhat\u0026rsquo;s more, SkyWalking browser monitoring also provides data about how the users use products, such as PV(page views), UV(unique visitors), top N PV(page views), etc., which can give a product team clues for optimizing their products.\nQuery traces by tags In SkyWalking\u0026rsquo;s Span data model, there are many important fields that are already indexed and can be queried by the users, but for the sake of performance, querying by Span tags was not supported until now. In SkyWalking 8.2.0, we allow users to query traces by specified tags, which is extremely useful. For example, SRE engineers running tests on the product environment can tag the synthetic traffic and query by this tag later.\nMeter Analysis Language In 8.2.0, the meter system provides a functional analysis language called MAL(Meter Analysis Language) that allows users to analyze and aggregate meter data in the OAP streaming system. The result of an expression can be ingested by either the agent analyzer or OpenTelemetry/Prometheus analyzer.\nComposite Alert Rules Alerting is a good way to discover system failures in time. A common problem is that we configure too many triggers just to avoid missing any possible issue. Nobody likes to be woken up by alert messages at midnight, only to find out that the trigger is too sensitive. These kinds of alerts become noisy and don\u0026rsquo;t help at all.\nIn 8.2.0, users can now configure composite alert rules, where composite rules take multiple metrics dimensions into account. With composite alert rules, we can leverage as many metrics as needed to more accurately determine whether there’s a real problem or just an occasional glitch.\nCommon scenarios like successful rate \u0026lt; 90% but there are only 1~2 requests can now be resolved by a composite rule, such as traffic(calls per minute) \u0026gt; n \u0026amp;\u0026amp; successful rate \u0026lt; m%.\nOther Notable Enhancements  The agent toolkit exposes some APIs for users to send customizable metrics. The agent exclude_plugins allows you to exclude some plugins; mount enables you to load a new set of plugins. More than 10 new plugins have been contributed to the agent. The alert system natively supports sending alert messages to Slack, WeChat, DingTalk.  Additional Resources  Read more about the SkyWalking 8.2 release highlights. Get more SkyWalking updates on Twitter.  ","title":"Features in SkyWalking 8.2: Browser Side Monitoring; Query Traces by Tags; Meter Analysis Language","url":"/blog/2020-10-29-skywalking8-2-release/"},{"content":" 作者: 柯振旭, 吴晟, 高洪涛, Tevah Platt. tetrate.io 原文链接: What\u0026rsquo;s new with Apache SkyWalking 8.2? Browser monitoring and more 2020 年 10 月 29 日  Apache SkyWalking,一个可观测性平台,也是一个开源的应用性能监视器(APM)项目,今日宣布 8.2 发行版全面可用。该发行版拓展了核心功能,并将其监控边界拓展到浏览器端。\n背景 SkyWalking 是一个观测平台和 APM 工具。它可以选择性的与 Service Mesh 协同工作,为微服务、云原生和基于容器的应用提供自动的指标。该项目是全球社区支持的 Apache 顶级项目,阿里巴巴、华为、腾讯、百度、字节跳动等许多公司都在使用。\n浏览器端监控 APM 可以帮助 SRE 和工程团队诊断系统故障,也能在系统异常缓慢之前优化它。但它是否足以让用户总是满意呢?\n在 8.2.0 版本中, SkyWalking 将它的监控边界拓展到了浏览器端,比如 Chrome ,或者 Chrome 和后端服务之间的网络。这样,我们不仅可以像以前一样监控浏览器发送给后端服务的与请求,还能看到前端的渲染速度、错误日志等信息——这些信息是获取最终用户体验的最有效指标。(目前此功能尚未拓展到物联网设备中,但这项功能使得 SkyWalking 向着这个方向前进了一步)\n此外,SkyWalking浏览器监视也提供以下数据: PV(page views,页面浏览量), UV(unique visitors,独立访客数),浏览量前 N 的页面(Top N Page Views)等。这些数据可以为产品队伍优化他们的产品提供线索。\n按标签 (tag) 查询链路数据 在 SkyWalking 的 Span 数据模型中,已经有了许多被索引并可供用户查询的重要字段。但出于性能考虑,使用 Span 标签查询链路数据的功能直到现在才正式提供。在 SkyWalking 8.2.0 中,我们允许用户查询被特定标签标记的链路,这非常有用。SRE 工程师可以在生产环境中运行测试,将其打上仿真流量的标签,并稍后通过该标签查找它。\n指标分析语言 在 8.2.0 中,仪表系统提供了一项名为MAL(Meter Analysis Language,指标分析语言)的强大分析语言。该语言允许用户在 OAP 流系统中分析并聚合(aggregate)指标数据。 表达式的结果可以被 Agent 分析器或 OpenTelemetry/Prometheus 分析器获取。\n复合警报规则 警报是及时发现系统失效的有效方式。一个常见的问题是,为了避免错过任何可能的问题,我们通常会配置过多的触发器(triggers)。没有人喜欢半夜被警报叫醒,结果只是因为触发系统太敏感。这种警报很嘈杂并毫无帮助。\n在 8.2.0 版本中,用户选择可以配置考虑了多个度量维度的复合警报规则。使用复合报警规则,我们可以根据需要添加尽可能多的指标来更精确地判断是否存在真正的问题,或者只是一个偶发的小问题。\n一些常见的情况,如 成功率 \u0026lt; 90% 但只有 1~2 个请求,现在可以通过复合规则解决,如流量(即每分钟调用数) \u0026gt; n \u0026amp;\u0026amp; 成功率 \u0026lt; m%。\n其它值得注意的功能增强  agent-toolkit SDK 公开了某些 API,供用户发送自定义指标。 Agent exclude_plgins 配置允许您排除某些插件(plugins); mount 配置使您能够加载一套新的插件。 社区贡献了超过 10 个新 Agent 插件。 报警系统原生支持发送消息到 Slack,企业微信,钉钉。  附加资源   阅读更多关于SkyWalkng 8.2 发行版重点.\n  在推特上获取更多关于 SkyWalking 的更新。\n  Apache SkyWalking DevCon 报名信息 Apache SkyWalking DevCon 2020 开始报名了。 2020 年 11 月 14 日,欢迎大家来线下参加活动和交流, 或者报名观看线上直播。\n","title":"SkyWalking 8.2.0 中的新特性: 浏览器端监控; 使用标签查询; 指标分析语言","url":"/zh/2020-10-29-skywalking8-2-release/"},{"content":"SkyWalking 8.2.0 is released. Go to downloads page to find release tars.\nProject  Support Browser monitoring. Add e2e test for ALS solution of service mesh observability. Support compiling(include testing) in JDK11. Support build a single module.  Java Agent  Support metrics plugin. Support slf4j logs of gRPC and Kafka(when agent uses them) into the agent log files. Add PROPERTIES_REPORT_PERIOD_FACTOR config to avoid the properties of instance cleared. Limit the size of traced SQL to avoid OOM. Support mount command to load a new set of plugins. Add plugin selector mechanism. Enhance the witness classes for MongoDB plugin. Enhance the parameter truncate mechanism of SQL plugins. Enhance the SpringMVC plugin in the reactive APIs. Enhance the SpringMVC plugin to collect HTTP headers as the span tags. Enhance the Kafka plugin, about @KafkaPollAndInvoke Enhance the configuration initialization core. Plugin could have its own plugins. Enhance Feign plugin to collect parameters. Enhance Dubbo plugin to collect parameters. Provide Thrift plugin. Provide XXL-job plugin. Provide MongoDB 4.x plugin. Provide Kafka client 2.1+ plugin. Provide WebFlux-WebClient plugin. Provide ignore-exception plugin. Provide quartz scheduler plugin. Provide ElasticJob 2.x plugin. Provide Spring @Scheduled plugin. Provide Spring-Kafka plugin. Provide HBase client plugin. Provide JSON log format. Move Spring WebFlux plugin to the optional plugin. Fix inconsistent logic bug in PrefixMatch Fix duplicate exit spans in Feign LoadBalancer mechanism. Fix the target service blocked by the Kafka reporter. Fix configurations of Kafka report don\u0026rsquo;t work. Fix rest template concurrent conflict. Fix NPE in the ActiveMQ plugin. Fix conflict between Kafka reporter and sampling plugin. Fix NPE in the log formatter. Fix span layer missing in certain cases, in the Kafka plugin. Fix error format of time in serviceTraffic update. Upgrade bytebuddy to 1.10.14  OAP-Backend  Support Nacos authentication. Support labeled meter in the meter receiver. Separate UI template into multiple files. Provide support for Envoy tracing. Envoy tracer depends on the Envoy community. Support query trace by tags. Support composite alarm rules. Support alarm messages to DingTalk. Support alarm messages to WeChat. Support alarm messages to Slack. Support SSL for Prometheus fetcher and self telemetry. Support labeled histogram in the prometheus format. Support the status of segment based on entry span or first span only. Support the error segment in the sampling mechanism. Support SSL certs of gRPC server. Support labeled metrics in the alarm rule setting. Support to query all labeled data, if no explicit label in the query condition. Add TLS parameters in the mesh analysis. Add health check for InfluxDB storage. Add super dataset concept for the traces/logs. Add separate replicas configuration for super dataset. Add IN operator in the OAL. Add != operator in the OAL. Add like operator in the OAL. Add latest function in the prometheus analysis. Add more configurations in the gRPC server. Optimize the trace query performance. Optimize the CPU usage rate calculation, at least to be 1. Optimize the length of slow SQL column in the MySQL storage. Optimize the topology query, use client side component name when no server side mapping. Add component IDs for Python component. Add component ID range for C++. Fix Slack notification setting NPE. Fix some module missing check of the module manager core. Fix authentication doesn\u0026rsquo;t work in sharing server. Fix metrics batch persistent size bug. Fix trace sampling bug. Fix CLR receiver bug. Fix end time bug in the query process. Fix Exporter INCREMENT mode is not working. Fix an error when executing startup.bat when the log directory exists Add syncBulkActions configuration to set up the batch size of the metrics persistent. Meter Analysis Language.  UI  Add browser dashboard. Add browser log query page. Support query trace by tags. Fix JVM configuration. Fix CLR configuration.  Document  Add the document about SW_NO_UPSTREAM_REAL_ADDRESS. Update ALS setup document. Add Customization Config section for plugin development.  All issues and pull requests are here\n","title":"Release Apache SkyWalking APM 8.2.0","url":"/events/release-apache-skywalking-apm-8-2-0/"},{"content":"高洪涛 美国ServiceMesh服务商tetrate创始工程师。原华为软件开发云技术专家。目前为Apache SkyWalking核心贡献者,参与该开源项目在软件开发云的商业化进程。曾任职当当网系统架构师,开源达人,曾参与Apache ShardingSphere,Elastic-Job等知名开源项目。对分布式数据库,容器调度,微服务,ServicMesh等技术有深入的了解。\n议题简介 定制化Operator模式在面向Kubernetes的云化平台建构中变得越来越流行。Apache SkyWalking社区已经开始尝试使用Operator模式去构建基于Kubernetes平台的PaaS云组件。本次分享给将会给听众带来该项目的初衷,实现与未来演进等相关内容。分享的内容包含:\n 项目动机与设计理念 核心功能展示,包含SkyWalking核心组件的发布,更新与维护。 观测ServiceMesh,包含于Istio的自动集成。 目前的工作进展和对未来的规划。  B站视频地址\n","title":"[视频] Apache SkyWalking Cloud on Kubernetes","url":"/zh/2020-10-25-coscon20-swck/"},{"content":"SkyWalking LUA Nginx 0.3.0 is released. Go to downloads page to find release tars.\n Load the base64 module in utils, different ENV use different library. Add prefix skywalking, avoid conflicts with other lua libraries. Chore: only expose the method of setting random seed, it is optional. Coc: use correct code block type. CI: add upstream_status to tag http.status Add http.status  ","title":"Release Apache SkyWalking LUA Nginx 0.3.0","url":"/events/release-apache-skywalking-lua-nginx-0.3.0/"},{"content":"SkyWalking CLI 0.4.0 is released. Go to downloads page to find release tars.\n Features  Add dashboard global command with auto-refresh Add dashboard global-metrics command Add traces search Refactor metrics thermodynamic command to adopt the new query protocol   Bug Fixes  Fix wrong golang standard time    ","title":"Release Apache SkyWalking CLI 0.4.0","url":"/events/release-apache-skywalking-cli-0-4-0/"},{"content":"Huaxi Jiang (江华禧) (a.k.a. fgksgf) mainly focuses on the SkyWalking CLI project, he had participated in the \u0026ldquo;Open Source Promotion Plan - Summer 2020\u0026rdquo; and completed the project smoothly, and won the award \u0026ldquo;Most Potential Students\u0026rdquo; that shows his great willingness to continuously contribute to our community.\nUp to date, he has submitted 26 PRs in the CLI repository, 3 PRs in the main repo, all in total include ~4000 LOC.\nAt Sep. 28th, 2020, the project management committee (PMC) passed the proposal of promoting him as a new committer. He has accepted the invitation at the same day.\nWelcome to join the committer team, Huaxi!\n","title":"Welcome Huaxi Jiang (江华禧) as new committer","url":"/events/welcome-huaxi-jiang-as-new-committer/"},{"content":"SkyWalking Python 0.3.0 is released. Go to downloads page to find release tars.\n  New plugins\n Urllib3 Plugin (#69) Elasticsearch Plugin (#64) PyMongo Plugin (#60) Rabbitmq Plugin (#53) Make plugin compatible with Django (#52)    API\n Add process propagation (#67) Add tags to decorators (#65) Add Check version of packages when install plugins (#63) Add thread propagation (#62) Add trace ignore (#59) Support snapshot context (#56) Support correlation context (#55)    Chores and tests\n Test: run multiple versions of supported libraries (#66) Chore: add pull request template for plugin (#61) Chore: add dev doc and reorganize the structure (#58) Test: update test health check (#57) Chore: add make goal to package release tar ball (#54)    ","title":"Release Apache SkyWalking Python 0.3.0","url":"/events/release-apache-skywalking-python-0-3-0/"},{"content":"吴晟 吴晟,Apache 基金会会员,Apache SkyWalking 创始人、项目 VP 和 PMC 成员,Apache 孵化器 PMC 成员,Apache ShardingSphere PMC成员,Apache APISIX PMC 成员,Apache ECharts (incubating) 和Apache DolphinScheduler (incubating) 孵化器导师,Zipkin 成员和贡献者。\n分享大纲  分布式追踪兴起的背景 SkyWalking和其他分布式追踪的异同 定位问题的流程和方法 性能剖析的由来、用途和优势  听众收获 听众能够全面的了解分布式追踪的技术背景,和技术原理。以及为什么这些年,分布式追踪和基于分布式追踪的APM系统,Apache SkyWalking,得到了广泛的使用、集成,甚至云厂商的支持。同时,除了针对追踪数据,我们应该关注更多的是,如何利用其产生的监控数据,定位系统的性能问题。以及它有哪些短板,应该如何弥补。\nB站视频地址\n","title":"[视频] 云原生学院 - 后分布式追踪时代的性能问题定位——方法级性能剖析","url":"/zh/2020-08-13-cloud-native-academy/"},{"content":"SkyWalking Chart 3.1.0 is released. Go to downloads page to find release tars.\n Support SkyWalking 8.1.0 Support enable oap dynamic configuration through k8s configmap  ","title":"Release Apache SkyWalking Chart 3.1.0 for SkyWalking 8.1.0","url":"/events/release-apache-skywalking-chart-3-1-0-for-skywalking-8-1-0/"},{"content":" Author: Sheng Wu Original link, Tetrate.io blog  SkyWalking, a top-level Apache project, is the open source APM and observability analysis platform that is solving the problems of 21st-century systems that are increasingly large, distributed, and heterogenous. It\u0026rsquo;s built for the struggles system admins face today: To identify and locate needles in a haystack of interdependent services, to get apples-to-apples metrics across polyglot apps, and to get a complete and meaningful view of performance.\nSkyWalking is a holistic platform that can observe microservices on or off a mesh, and can provide consistent monitoring with a lightweight payload.\nLet\u0026rsquo;s take a look at how SkyWalking evolved to address the problem of observability at scale, and grew from a pure tracing system to a feature-rich observability platform that is now used to analyze deployments that collect tens of billions of traces per day.\nDesigning for scale When SkyWalking was first initialized back in 2015, its primary use case was monitoring the first-generation distributed core system of China Top Telecom companies, China Unicom and China Mobile. In 2013-2014, the telecom companies planned to replace their old traditional monolithic applications with a distributed system. Supporting a super-large distributed system and scaleablity were the high-priority design goals from Day one. So, what matters at scale?\nPull vs. push Pull and push modes relate to the direction of data flow. If the agent collects data and pushes them to the backend for further analysis, we call it \u0026ldquo;push\u0026rdquo; mode. Debate over pull vs. push has gone on for a long time. The key for an observability system is to minimize the cost of the agent, and to be generally suitable for different kinds of observability data.\nThe agent would send the data out a short period after it is collected. Then, we would have less concern about overloading the local cache. One typical case would be endpoint (URI of HTTP, service of gRPC) metrics. Any service could easily have hundreds, even thousands of endpoints. An APM system must have these metrics analysis capabilities.\nFurthermore, metrics aren\u0026rsquo;t the only thing in the observability landscape; traces and logs are important too. SkyWalking is designed to provide a 100% sampling rate tracing capability in the production environment. Clearly, push mode is the only solution.\nAt the same time, using push mode natively doesn\u0026rsquo;t mean SkyWalking can\u0026rsquo;t do data pulling. In recent 8.x releases, SkyWalking supports fetching data from Prometheus-instrumented services for reducing the Non-Recurring Engineering of the end users. Also, pull mode is popular in the MQ based transport, typically as a Kafka consumer. The SkyWalking agent side uses the push mode, and the OAP server uses the pull mode.\nThe conclusion: push mode is the native way, but pull mode works in some special cases too.\nMetrics analysis isn\u0026rsquo;t just mathematical calculation Metrics rely on mathematical theories and calculations. Percentile is a good measure for identifying the long tail issue, and reasonable average response time and successful rate are good SLO(s). But those are not all. Distributed tracing provides not just traces with detailed information, but high values metrics that can be analyzed.\nThe service topology map is required from Ops and SRE teams for the NOC dashboard and confirmation of system data flow. SkyWalking uses the STAM (Streaming Topology Analysis Method) to analyze topology from the traces, or based on ALS (Envoy Access Log Service) in the service mesh environment. This topology and metrics of nodes (services) and lines (service relationships) can\u0026rsquo;t be pulled from simple metrics SDKs.\nAs with fixing the limitation of endpoint metrics collection, SkyWalking needs to do endpoint dependency analysis from trace data too. Endpoint dependency analysis provides more important and specific information, including upstream and downstream. Those dependency relationships and metrics help the developer team to locate the boundaries of a performance issue, to specific code blocks.\nPre-calculation vs. query stage calculation? Query stage calculation provides flexibility. Pre-calculation, in the analysis stage, provides better and much more stable performance. Recall our design principle: SkyWalking targets a large-scale distributed system. Query stage calculation was very limited in scope, and most metrics calculations need to be pre-defined and pre-calculated. The key of supporting large datasets is reducing the size of datasets in the design level. Pre-calculation allows the original data to be merged into aggregated results downstream, to be used in a query or even for an alert check.\nTTL of metrics is another important business enabler. With the near linear performance offered by queries because of pre-calculation, with a similar query infrastructure, organizations can offer higher TTL, thereby providing extended visibility of performance.\nSpeaking of alerts, query-stage calculation also means the alerting query is required to be based on the query engine. But in this case, when the dataset increasing, the query performance could be inconsistent. The same thing happens in a different metrics query.\nCases today Today, SkyWalking is monitoring super large-scale distributed systems in many large enterprises, including Alibaba, Huawei, Tencent, Baidu, China Telecom, and various banks and insurance companies. The online service companies have more traffic than the traditional companies, like banks and telecom suppliers.\nSkyWalking is the observability platform used for a variety of use cases for distributed systems that are super-large by many measures:\n Lagou.com, an online job recruitment platform  SkyWalking is observing \u0026gt;100 services, 500+ JVM instances SkyWalking collects and analyzes 4+ billion traces per day to analyze performance data, including metrics of 300k+ endpoints and dependencies Monitoring \u0026gt;50k traffic per second in the whole cluster   Yonghui SuperMarket, online service  SkyWalking analyzes at least 10+ billion (3B) traces with metrics per day SkyWalking\u0026rsquo;s second, smaller deployment, analyzes 200+ million traces per day   Baidu, internet and AI company, Kubernetes deployment  SkyWalking collects 1T+ traces a day from 1,400+ pods of 120+ services Continues to scale out as more services are added   Beike Zhaofang(ke.com), a Chinese online property brokerage backed by Tencent Holdings and SoftBank Group  Has used SkyWalking from its very beginning, and has two members in the PMC team. Deployments collect 16+ billion traces per day   Ali Yunxiao, DevOps service on the Alibaba Cloud,  SkyWalking collects and analyzes billions of spans per day SkyWalking keeps AliCloud\u0026rsquo;s 45 services and ~300 instances stable   A department of Alibaba TMall, one of the largest business-to-consumer online retailers, spun off from Taobao  A customized version of SkyWalking monitors billions of traces per day At the same time, they are building a load testing platform based on SkyWalking\u0026rsquo;s agent tech stack, leveraging its tracing and context propagation cabilities    Conclusion SkyWalking\u0026rsquo;s approach to observability follows these principles:\n Understand the logic model: don\u0026rsquo;t treat observability as a mathematical tool. Identify dependencies first, then their metrics. Scaling should be accomplished easily and natively. Maintain consistency across different architectures, and in the performance of APM itself.  Resources  Read about the SkyWalking 8.1 release highlights. Get more SkyWalking updates on Twitter. Sign up to hear more about SkyWalking and observability from Tetrate.  ","title":"Observability at Scale: SkyWalking it is","url":"/blog/2020-08-11-observability-at-scale/"},{"content":" 作者:吴晟 翻译:董旭 金蝶医疗 原文链接:Tetrate.io blog  SkyWalking做为Apache的顶级项目,是一个开源的APM和可观测性分析平台,它解决了21世纪日益庞大、分布式和异构的系统的问题。它是为应对当前系统管理所面临的困难而构建的:就像大海捞针,SkyWalking可以在服务依赖复杂且多语言环境下,获取服务对应的指标,以及完整而有意义的性能视图。\nSkyWalking是一个非常全面的平台,无论你的微服务是否在服务网格(Service Mesh)架构下,它都可以提供高性能且一致性的监控。\n让我们来看看,SkyWalking是如何解决大规模集群的可观测性问题,并从一个纯粹的链路跟踪系统,发展成为一个每天分析百亿级跟踪数据,功能丰富的可观测性平台。\n为超大规模而生 SkyWalking的诞生,时间要追溯到2015年,当时它主要应用于监控顶级电信公司(例如:中国联通和中国移动)的第一代分布式核心系统。2013-2014年,这些电信公司计划用分布式系统取代传统的单体架构应用。从诞生那天开始,SkyWalking首要的设计目标,就是能够支持超大型分布式系统,并具有很好可扩展性。那么支撑超大规模系统要考虑什么呢?\n拉取vs推送 与数据流向息息相关的:拉取模式和推送模式。Agent(客户端)收集数据并将其推送到后端,再对数据进一步分析,我们称之为“推送”模式。究竟应该使用拉取还是推送?这个话题已经争论已久。关键因素取决于可观测性系统的目标,即:在Agent端花最小的成本,使其适配不同类型的可观测性数据。\nAgent收集数据后,可以在短时间内发送出去。这样,我们就不必担心本地缓存压力过大。举一个典型的例子,任意服务都可以轻松地拥有数百个甚至数千个端点指标(如:HTTP的URI,gRPC的服务)。那么APM系统就必须具有分析这些数量庞大指标的能力。\n此外,度量指标并不是可观测性领域中的唯一关注点,链路跟踪和日志也很重要。在生产环境下,SkyWalking为了能提供100%采样率的跟踪能力,数据推送模式是唯一可行的解决方案。\nSkyWalking即便使用了推送模式,同时也可进行数据拉取。在最近的8.x的发版本中,SkyWalking支持从已经集成Prometheus的服务中获取终端用户的数据,避免重复工程建设,减少资源浪费。另外,比较常见的是基于MQ的传输构建拉取模式,Kafka消费者就是一个比较典型的例子。SkyWalking的Agent端使用推送模式,OAP服务器端使用拉取模式。\n结论:SkyWalking的推送模式是原生方式,但拉取式模式也适用于某些特殊场景。\n度量指标分析并不仅仅是数学统计 度量指标依赖于数学理论和计算。Percentile(百分位数)是用于反映响应时间的长尾效应。服务具备合理的平均响应时间和成功率,说明服务的服务等级目标(SLO)很好。除此之外,分布式跟踪还为跟踪提供了详细的信息,以及可分析的高价值指标。\n运维团队(OPS)和系统稳定性(SRE)团队通过服务拓扑图,用来观察网络情况(当做NOC dashboard使用)、确认系统数据流。SkyWalking依靠trace(跟踪数据),使用STAM(Streaming Topology Analysis Method)方法进行分析拓扑结构。在服务网格环境下,使用ALS(Envoy Access Log Service)进行拓扑分析。节点(services)和线路(service relationships)的拓扑结构和度量指标数据,无法通过sdk轻而易举的拿到。\n为了解决端点度量指标收集的局限性,SkyWalking还要从跟踪数据中分析端点依赖关系,从而拿到链路上游、下游这些关键具体的信息。这些依赖关系和度量指标信息,有助于开发团队定位引起性能问题的边界,甚至代码块。\n预计算还是查询时计算? 相比查询时计算的灵活性,预计算可以提供更好、更稳定的性能,这在分析场景下尤为重要。回想一下我们的设计原则:SkyWalking是为了一个大规模的分布式系统而设计。查询时计算的使用范围非常有限,大多数度量计算都需要预先定义和预先计算。支持大数据集的关键是:在设计阶段,要减小数据集。预计算允许将原始数据合并到下游的聚合结果中,用于查询,甚至用于警报检查。\n使用SkyWalking的另一个重要因素是:指标的有效期,TTL(Time To Live)。由于采用了预先计算,查询提供了近似线性的高性能。这也帮助“查询系统”这类基础设施系统,提供更好的性能扩展。\n关于警报,使用查询时计算方案,也意味着警报查询需要基于查询引擎。但在这种情况下,随着数据集增加,查询性能会随之下降,其他指标查询也是一样的结果。\n目前使用案例 如今,SkyWalking在许多大型企业的超大规模分布式系统中使用,包括阿里巴巴、华为、腾讯、百度、中国通讯企业以及多家银行和保险公司。上线SkyWalking公司的流量,比银行和电信运营商这种传统公司还要大。\n在很多行业中,SkyWalking是被应用于超大型分布式系统各种场景下的一个可观测性平台:\n  拉勾网\n  SkyWalking正在观测超过100个服务,500多个JVM实例\n  SkyWalking每天收集和分析40多亿个跟踪数据,用来分析性能,其中包括30万个端点和依赖关系的指标\n  在整个群集中监控\u0026gt;50k流量/秒\n    永辉超市\n  SkyWalking每天分析至少100多亿(3B)的跟踪数据\n  其次,SkyWalking用较小的部署,每天分析2亿多个跟踪数据\n    百度\n  SkyWalking每天从1400多个pod中,从120多个服务收集1T以上的跟踪数据\n  随着更多服务的增加,规模会持续增大\n    贝壳找房(ke.com)\n  很早就使用了SkyWalking,有两名成员已经成为PMC\n  Deployments每天收集160多亿个跟踪数据\n    阿里云效\n  SkyWalking每天收集和分析数十亿个span\n  SkyWalking使阿里云的45项服务和~300个实例保持稳定\n    阿里巴巴天猫\n  SkyWalking个性化定制版,每天监控数十亿跟踪数据\n  与此同时,他们基于SkyWalking的Agent技术栈,利用其跟踪和上下文传播能力,正在构建一个全链路压测平台\n    结论 SkyWalking针对可观测性遵循以下原则:\n 理解逻辑模型:不要把可观测性当作数学统计工具。 首先确定依赖关系,然后确定它们的度量指标。 原生和方便的支撑大规模增长。 在不同的架构情况下,APM各方面表现依然保持稳定和一致。  资源  阅读SkyWalking 8.1发布亮点。 在Twitter上获取更多SkyWalking更新。 注册Tetrate以了解更多有关SkyWalking可观测性的信息。  ","title":"SkyWalking 为超大规模而生","url":"/zh/2020-08-11-observability-at-scale-skywalking-it-is/"},{"content":" Author: Sheng Wu, Hongtao Gao, and Tevah Platt(Tetrate) Original link, Tetrate.io blog  Apache SkyWalking, the observability platform, and open-source application performance monitor (APM) project, today announced the general availability of its 8.1 release that extends its functionalities and provides a transport layer to maintain the lightweight of the platform that observes data continuously.\nBackground SkyWalking is an observability platform and APM tool that works with or without a service mesh, providing automatic instrumentation for microservices, cloud-native and container-based applications. The top-level Apache project is supported by a global community and is used by Alibaba, Huawei, Tencent, Baidu, and scores of others.\nTransport traces For a long time, SkyWalking has used gRPC and HTTP to transport traces, metrics, and logs. They provide good performance and are quite lightweight, but people kept asking about the MQ as a transport layer because they want to keep the observability data continuously as much as possible. From SkyWalking’s perspective, the MQ based transport layer consumes more resources required in the deployment and the complexity of deployment and maintenance but brings more powerful throughput capacity between the agent and backend.\nIn 8.1.0, SkyWalking officially provides the typical MQ implementation, Kafka, to transport all observability data, including traces, metrics, logs, and profiling data. At the same time, the backend can support traditional gRPC and HTTP receivers, with the new Kafka consumer at the same time. Different users could choose the transport layer(s) according to their own requirements. Also, by referring to this implementation, the community could contribute various transport plugins for Apache Pulsar, RabbitMQ.\nAutomatic endpoint dependencies detection The 8.1 SkyWalking release offers automatic detection of endpoint dependencies. SkyWalking has long offered automatic endpoint detection, but endpoint dependencies, including upstream and downstream endpoints, are critical for Ops and SRE teams’ performance analysis. The APM system is expected to detect the relationships powered by the distributed tracing. While SkyWalking has been designed to include this important information at the beginning the latest 8.1 release offers a cool visualization about the dependency and metrics between dependent endpoints. It provides a new drill-down angle from the topology. Once you have the performance issue from the service level, you could check on instance and endpoint perspectives:\nSpringSleuth metrics detection In the Java field, the Spring ecosystem is one of the most widely used. Micrometer, the metrics API lib included in the Spring Boot 2.0, is now adopted by SkyWalking’s native meter system APIs and agent. For applications using Micrometer with the SkyWalking agent installed, all Micrometer collected metrics could then be shipped into SkyWalking OAP. With some configurations in the OAP and UI, all metrics are analyzed and visualized in the SkyWalking UI, with all other metrics detected by SkyWalking agents automatically.\nNotable enhancements The Java agent core is enhanced in this release. It could work better in the concurrency class loader case and is more compatible with another agent solution, such as Alibaba’s Arthas.\n With the logic endpoint supported, the local span can be analyzed to get metrics. One span could carry the raw data of more than one endpoint’s performance. GraphQL, InfluxDB Java Client, and Quasar fiber libs are supported to be observed automatically. Kubernetes Configmap can now for the first time be used as the dynamic configuration center– a more cloud-native solution for k8s deployment environments. OAP supports health checks, especially including the storage health status. If the storage (e.g., ElasticSearch) is not available, you could get the unhealth status with explicit reasons through the health status query. Opencensus receiver supports ingesting OpenTelemetry/OpenCensus agent metrics by meter-system.  Additional resources  Read more about the SkyWalking 8.1 release highlights. Read more about SkyWalking from Tetrate on our blog. Get more SkyWalking updates on Twitter. Sign up to hear more about SkyWalking and observability from Tetrate.  ","title":"Features in SkyWalking 8.1: SpringSleuth metrics, endpoint dependency detection, Kafka transport traces and metrics","url":"/blog/2020-08-03-skywalking8-1-release/"},{"content":"SkyWalking APM 8.1.0 is release. Go to downloads page to find release tars.\nProject  Support Kafka as an optional trace, JVM metrics, profiling snapshots and meter system data transport layer. Support Meter system, including the native metrics APIs and the Spring Sleuth adoption. Support JVM thread metrics.  Java Agent  [Core] Fix the concurrency access bug in the Concurrency ClassLoader Case. [Core] Separate the config of the plugins from the core level. [Core] Support instrumented class cached in memory or file, to be compatible with other agents, such as Arthas. Add logic endpoint concept. Could analysis any span or tags flagged by the logic endpoint. Add Spring annotation component name for UI visualization only. Add support to trace Call procedures in MySQL plugin. Support GraphQL plugin. Support Quasar fiber plugin. Support InfluxDB java client plugin. Support brpc java plugin Support ConsoleAppender in the logback v1 plugin. Enhance vert.x endpoint names. Optimize the code to prevent mongo statements from being too long. Fix WebFlux plugin concurrency access bug. Fix ShardingSphere plugins internal conflicts. Fix duplicated Spring MVC endpoint. Fix lettuce plugin sometimes trace doesn‘t show span layer. Fix @Tag returnedObject bug.  OAP-Backend  Support Jetty Server advanced configurations. Support label based filter in the prometheus fetcher and OpenCensus receiver. Support using k8s configmap as the configuration center. Support OAP health check, and storage module health check. Support sampling rate in the dynamic configuration. Add endpoint_relation_sla and endpoint_relation_percentile for endpoint relationship metrics. Add components for Python plugins, including Kafka, Tornado, Redis, Django, PyMysql. Add components for Golang SDK. Add Nacos 1.3.1 back as an optional cluster coordinator and dynamic configuration center. Enhance the metrics query for ElasticSearch implementation to increase the stability. Reduce the length of storage entity names in the self-observability for MySQL and TiDB storage. Fix labels are missing in Prometheus analysis context. Fix column length issue in MySQL/TiDB storage. Fix no data in 2nd level aggregation in self-observability. Fix searchService bug in ES implementation. Fix wrong validation of endpoint relation entity query. Fix the bug caused by the OAL debug flag. Fix endpoint dependency bug in MQ and uninstrumented proxy cases. Fix time bucket conversion issue in the InfluxDB storage implementation. Update k8s client to 8.0.0  UI  Support endpoint dependency graph. Support x-scroll of trace/profile page Fix database selector issue. Add the bar chart in the UI templates.  Document  Update the user logo wall. Add backend configuration vocabulary document. Add agent installation doc for Tomcat9 on Windows. Add istioctl ALS commands for the document. Fix TTL documentation. Add FAQ doc about thread instrumentation.  CVE  Fix fuzzy query sql injection in the MySQL/TiDB storage.  All issues and pull requests are here\n","title":"Release Apache SkyWalking APM 8.1.0","url":"/events/release-apache-skywalking-apm-8-1-0/"},{"content":"Based on his continuous contributions, Wei Hua (a.k.a alonelaval) has been voted as a new committer.\n","title":"Welcome Wei Hua as new committer","url":"/events/welcome-wei-hua-as-new-committer/"},{"content":"SkyWalking Python 0.2.0 is released. Go to downloads page to find release tars.\n  Plugins:\n Kafka Plugin (#50) Tornado Plugin (#48) Redis Plugin (#44) Django Plugin (#37) PyMsql Plugin (#35) Flask plugin (#31)    API\n Add ignore_suffix Config (#40) Add missing log method and simplify test codes (#34) Add content equality of SegmentRef (#30) Validate carrier before using it (#29)    Chores and tests\n Test: print the diff list when validation failed (#46) Created venv builders for linux/windows and req flashers + use documentation (#38)    ","title":"Release Apache SkyWalking Python 0.2.0","url":"/events/release-apache-skywalking-python-0-2-0/"},{"content":"SkyWalking CLI 0.3.0 is released. Go to downloads page to find release tars.\n Command: health check command Command: Add trace command BugFix: Fix wrong metrics graphql path  ","title":"Release Apache SkyWalking CLI 0.3.0","url":"/events/release-apache-skywalking-cli-0-3-0/"},{"content":" Author: Srinivasan Ramaswamy, tetrate Original link, Tetrate.io blog  Asking How are you is more profound than What are your symptoms Background Recently I visited my preferred doctor. Whenever I visit, the doctor greets me with a series of light questions: How’s your day? How about the week before? Any recent trips? Did I break my cycling record? How’s your workout regimen? _Finally _he asks, “Do you have any problems?\u0026quot; On those visits when I didn\u0026rsquo;t feel ok, I would say something like, \u0026ldquo;I\u0026rsquo;m feeling dull this week, and I\u0026rsquo;m feeling more tired towards noon….\u0026quot; It\u0026rsquo;s at this point that he takes out his stethoscope, his pulse oximeter, and blood pressure apparatus. Then, if he feels he needs a more in-depth insight, he starts listing out specific tests to be made.\nWhen I asked him if the first part of the discussion was just an ice-breaker, he said, \u0026ldquo;That\u0026rsquo;s the essential part. It helps me find out how you feel, rather than what your symptoms are.\u0026quot; So, despite appearances, our opening chat about life helped him structure subsequent questions on symptoms, investigations and test results.\nOn the way back, I couldn\u0026rsquo;t stop asking myself, \u0026ldquo;Shouldn\u0026rsquo;t we be managing our mesh this way, too?\u0026quot;\nIf I strike parallels between my own health check and a health check, “tests” would be log analysis, “investigations” would be tracing, and “symptoms” would be the traditional RED (Rate, Errors and Duration) metrics. That leaves the “essential part,” which is what we are talking about here: the Wellness Factor, primarily the health of our mesh.\nHealth in the context of service mesh We can measure the performance of any observed service through RED metrics. RED metrics offer immense value in understanding the performance, reliability, and throughput of every service. Compelling visualizations of these metrics across the mesh make monitoring the entire mesh standardized and scalable. Also, setting alerts based on thresholds for each of these metrics helps to detect anomalies as and when they arise.\nTo establish the context of any service and observe them, it\u0026rsquo;s ideal to visualize the mesh as a topology.\nA topology visualization of the mesh not only allows for picking any service and watching its metrics, but also gives vital information about service dependencies and the potential impact of a given service on the mesh.\nWhile RED metrics of each service offer tremendous insights, the user is more concerned with the overall responsiveness of the mesh rather than each of these services in isolation.\nTo describe the performance of any service, right from submitting the request to receiving a completed http response, we’d be measuring the user\u0026rsquo;s perception of responsiveness. This measure of response time compared with a set threshold is called Apdex. This Apdex is an indicator of the health of a service in the mesh.\nApdex Apdex is a measure of response time considered against a set threshold**. **It is the ratio of satisfactory response times and unsatisfactory response times to total response times.\nApdex is an industry standard to measure the satisfaction of users based on the response time of applications and services. It measures how satisfied your users are with your services, as traditional metrics such as average response time could get skewed quickly.\nSatisfactory response time indicates the number of times when the roundtrip response time of a particular service was less than this threshold. Unsatisfactory response time while meaning the opposite, is further categorized as Tolerating and Frustrating. Tolerating accommodates any performance that is up to four times the threshold, and anything over that or any errors encountered is considered Frustrating. The threshold mentioned here is an ideal roundtrip performance that we expect from any service. We could even start with an organization-wide limit of say, 500ms.\nThe Apdex score is a ratio of satisfied and tolerating requests to the total requests made.\nEach satisfied request counts as one request, while each tolerating request counts as half a satisfied request.\nAn Apdex score takes values from 0 to 1, with 0 being the worst possible score indicating that users were always frustrated, and ‘1’ as the best possible score (100% of response times were Satisfactory).\nA percentage representation of this score also serves as the Health Indicator of the service.\nThe Math The actual computation of this Apdex score is achieved through the following formula.\n\tSatisfiedCount + ( ToleratingCount / 2 ) Apdex Score = ------------------------------------------------------ TotalSamples A percentage representation of this score is known as the Health Indicator of a service.\nExample Computation During a 2-minute period, a host handles 200 requests.\nThe Apdex threshold T = 0.5 seconds (500ms).\n 170 of the requests were handled within 500ms, so they are classified as Satisfied. 20 of the requests were handled between 500ms and 2 seconds (2000 ms), so they are classified as Tolerating. The remaining 10 were not handled properly or took longer than 2 seconds, so they are classified as Frustrated.  The resulting Apdex score is 0.9: (170 + (20/2))/200 = 0.9.\nThe next level At the next level, we can attempt to improve our topology visualization by coloring nodes based on their health. Also, we can include health as a part of the information we show when the user taps on a service.\nApdex specifications recommend the following Apdex Quality Ratings by classifying Apdex Score as Excellent (0.94 - 1.00), Good (0.85 - 0.93), Fair (0.70 - 0.84), Poor (0.50 - 0.69) and Unacceptable (0.00 - 0.49).\nTo visualize this, let’s look at our topology using traffic light colors, marking our nodes as Healthy, At-Risk and Unhealthy, where Unhealthy indicates health that falls below 80%. A rate between 80% and 95% indicates At-Risk, and health at 95% and above is termed Healthy.\nLet’s incorporate this coloring into our topology visualization and take its usability to the next level. If implemented, we will be looking at something like this.\nMoving further Apdex provides tremendous visibility into customer satisfaction on the responsiveness of our services. Even more, by extending the implementation to the edges calling this service we get further insight into the health of the mesh itself.\nTwo services with similar Apdex scores offer the same customer satisfaction to the customer. However, the size of traffic that flows into the service can be of immense help in prioritizing between services to address. A service with higher traffic flow is an indication that this experience is impacting a significant number of users on the mesh.\nWhile health relates to a service, we can also analyze the interactions between two services and calculate the health of the interaction. This health calculation of every interaction on the mesh helps us establish a critical path, based on the health of all interactions in the entire topology.\nIn a big mesh, showing traffic as yet another number will make it more challenging to visualize and monitor. We can, with a bit of creativity, improve the entire visualization by rendering the edges that connect services with different thickness depending on the throughput of the service.\nAn unhealthy service participating in a high throughput transaction could lead to excessive consumption of resources. On the other hand, this visualization also offers a great tip to maximize investment in tuning services.\nTuning service that is a part of a high throughput transaction offers exponential benefits when compared to tuning an occasionally used service.\nIf we look at implementing such a visualization, which includes the health of interactions and throughput of such interactions, we would be looking at something like below :\nThe day is not far These capabilities are already available to users today as one of the UI features of Tetrate’s service mesh platform, using the highly configurable and performant observability and performance management framework: Apache SkyWalking (https://skywalking.apache.org), which monitors traffic across the mesh, aggregates RED metrics for both services and their interactions, continuously computes and monitors health of the services, and enables users to configure alerts and notifications when services cross specific thresholds, thereby having a comprehensive health visibility of the mesh.\nWith such tremendous visibility into our mesh performance, the day is not far when we at our NOC (Network Operations Center) for the mesh have this topology as our HUD (Heads Up Display).\nThis HUD, with the insights and patterns gathered over time, would predict situations and proactively prompt us on potential focus areas to improve customer satisfaction.\nThe visualization with rich historical data can also empower the Network Engineers to go back in time and look at the performance of the mesh on a similar day in the past.\nAn earnest implementation of such a visualization would be something like below :\nTo conclude With all the discussion so far, the health of a mesh is more about how our users feel, and what we can proactively do as service providers to sustain, if not enhance, the experience of our users.\nAs the world advances toward personalized medicine, we\u0026rsquo;re not far from a day when my doctor will text me: \u0026ldquo;How about feasting yourself with ice cream today and take the Gray Butte Trail to Mount Shasta!\u0026rdquo; Likewise, we can do more for our customers by having better insight into their overall wellness.\nTetrate’s approach to “service mesh health” is not only to offer management, monitoring and support but to make infrastructure healthy from the start to reduce the probability of incidents. Powered by the Istio, Envoy, and SkyWalking, Tetrate\u0026rsquo;s solutions enable consistent end-to-end observability, runtime security, and traffic management for any workload in any environment.\nOur customers deserve healthy systems! Please do share your thoughts on making service mesh an exciting and robust experience for our customers.\nReferences  https://en.wikipedia.org/wiki/Apdex https://www.apdex.org/overview.html https://www.apdex.org/index.php/specifications/ https://skywalking.apache.org/  ","title":"The Apdex Score for Measuring Service Mesh Health","url":"/blog/2020-07-26-apdex-and-skywalking/"},{"content":" 作者: Srinivasan Ramaswamy, tetrate 翻译:唐昊杰,南京大学在读学生 校对:吴晟 Original link, Tetrate.io blog July. 26th, 2020  \u0026ldquo;你感觉怎么样\u0026rdquo; 比 \u0026ldquo;你的症状是什么\u0026rdquo; 更重要 背景 最近我拜访了我的医生。每次去看病,医生都会首先问我一连串轻快的问题,比如:你今天过得怎么样?上周过的怎么样?最近有什么出行吗?你打破了自己的骑车记录吗?你的锻炼计划实施如何?最后他会问:“你有什么麻烦吗?”如果这个时候我感觉自己不太好,我会说:“我这周感觉很沉闷,临近中午的时候感觉更累。”这时他就会拿出听诊器、脉搏血氧仪和血压仪。然后,如果他觉得自己需要更深入的了解情况,他就开始列出我需要做的具体检查。\n当我问他,最开始的讨论是否只是为了缓和氛围。他说:“这是必不可少的部分。它帮助我发现你感觉如何,而不是你的症状是什么。\u0026quot;。我们这样关于生活的开场聊天,帮助他组织了后续关于症状、调查和测试结果的问题。\n在回来的路上,我不停地问自己:“我们是不是也应该用这种方式管理我们的网格(service mesh)?”\n如果我把自己的健康检查和网格的健康检查进行类比,“医疗检查”就是日志分析,“调查”就是追踪,“症状”就是传统的RED指标(请求速率、请求错误和请求耗时)。那么根本的问题,就是我们在这里讨论的:健康因素(主要是网格的健康)。\n服务网格中的健康状况 我们可以通过RED指标来衡量任何被观察到的服务的性能。RED指标在了解每个服务的性能、可靠性和吞吐量方面提供了巨大的价值。这些指标在网格上的令人信服的可视化使得监控全部网格变得标准化和可扩展。此外,根据这些指标的阈值设置警报有助于在指标值异常的时候进行异常检测。\n为了建立任何服务的上下文环境并观察它们,理想的做法是将网格可视化为一个拓扑结构。\n网格的拓扑结构可视化不仅允许使用者挑选任意服务并观察其指标,还可以提供有关服务依赖和特定服务在网格上的潜在影响这些重要信息。\n虽然每个服务的RED指标为使用者提供了深刻的洞察能力,但使用者更关心网格的整体响应性,而非每个单独出来的服务的响应性。\n为了描述任意服务的性能(即从提交请求到收到完成了的http响应这段时间内的表现),我们会测量用户对响应性的感知。这种将响应时间与设定的阈值进行比较的衡量标准叫做Apdex。Apdex是衡量一个服务在网格中的健康程度的指标。\nApdex Apdex是根据设定的阈值和响应时间结合考虑的衡量标准。它是满意响应时间和不满意响应时间相对于总响应时间的比率。\nApdex是根据应用和服务的响应时间来衡量使用者满意程度的行业标准。它衡量的是用户对你的服务的满意程度,因为传统的指标(如平均响应时间)可能很快就会容易形成偏差。\n基于满意度的响应时间,表示特定服务的往返响应时间小于设定的阈值的次数。不满意响应时间虽然意思相反,但又进一步分为容忍型和失望型。容忍型包括了了任何响应时间不超过四倍阈值的表现,而任何超过四倍阈值或遇到了错误的表现都被认为是失望型。这里提到的阈值是我们对任意服务所期望的理想响应表现。我们可以设置一个全局范围的阈值,如,500ms。\nApdex得分是满意型请求和容忍型请求与做出的总请求的比率。\n每个_满意的请求_算作一个请求,而每个_容忍的请求_算作半个_满意_的请求。\n一个Apdex得分从0到1的范围内取值。0是最差的分数,表示用户总是感到失望;而'1\u0026rsquo;是最好的分数(100%的响应时间是令人满意的)。\n这个分数的百分比表示也可以用作服务的健康指标。\n数学表示 Apdex得分的实际计算是通过以下公式实现的:\n\t满意请求数 + ( 容忍请求数 / 2 ) Apdex 得分 = ------------------------------------------------------ 总请求数 此公示得到的百分率,即可视为服务的健康度。\n样例计算 在两分钟的采样时间内,主机处理200个请求。\nApdex阈值T设置为0.5秒(500ms)。\n*.\t170个请求在500ms内被处理完成,它们被分类为满意型。 *.\t20个请求在500ms和2秒间被处理,它们被分类为容忍型。 *.\t剩余的10个请求没有被正确处理或者处理时间超过了2秒,所以它们被分类为失望型。\n最终的Apdex得分是0.9,即(170 + (20 / 2))/ 200。\n深入使用 在接下来的层次,我们可以尝试通过根据节点的健康状况来着色节点以改进我们的拓扑可视化。此外,我们还可以在用户点击服务时将健康状况作为我们展示的信息的一部分。\nApdex规范推荐了以下Apdex质量评级,将Apdex得分分为优秀(0.94 - 1.00)、良好(0.85 - 0.93)、一般(0.70 - 0.84)、差(0.50 - 0.69)和不可接受(0.00 - 0.49)。\n为了可视化网格的健康状况,我们用交通灯的颜色将我们的节点标记为健康、有风险和不健康,其中不健康表示健康率低于80%。健康率在80%到95%之间的表示有风险,健康率在95%及以上的称为健康。\n让我们将这种着色融入到我们的拓扑可视化中,并将其可用性提升到一个新的水平。如果实施,我们将看到下图所示的情况。\n更进一步 Apdex为客户对我们服务响应性的满意度提供了可见性。更有甚者,通过将实施范围扩展到调用该服务的调用关系,我们可以进一步了解网格本身的健康状况。\n两个有着相似Apdex分数的服务,为客户提供了相同的客户满意度。然而,流入服务的流量大小对于优先处理哪一服务有着巨大的帮助。流量较高的服务表明这种服务体验影响了网格上更大量的使用者。\n虽然健康程度与单个服务有关,但我们也可以分析两个服务之间的交互并计算交互过程的健康程度。这种对网格上每一个交互的健康程度的计算,可以帮助我们根据整个拓扑结构中所有交互的健康程度,建立一个关键路径。\n在一个大的网格中,将流量展示为另一个数字将使可视化和监控更具挑战性。我们可以根据服务的吞吐量,通过用不同的粗细程度渲染连接服务的边来改善整个可视化的效果。\n一个位于高吞吐量事务的不健康的服务可能会导致资源的过度消耗。另一方面,这种可视化也为调整服务时获取最大化投资效果提供了一个很好的提示。\n与调整一个偶尔使用的服务相比,调整作为高吞吐量事务的一部分的那些服务会带来指数级的收益。\n实施这种包括了交互的健康状况和吞吐量的可视化,我们会看到下图所示的情况:\n这一天即将到来 目前,这些功能已经作为Tetrate服务网格平台的UI功能之一来提供给用户。该平台使用了高速可配置化、高性能的可观测性和监控性能管理平台:Apache SkyWalking (https://skywalking.apache.org),SkyWalking可以监控整个网格的流量,为服务及它们的交互合计RED指标,持续计算和监控服务的健康状况,并使用户能够在服务超过特定阈值时配置报警和通知。这些功能使得SkyWalking对网格拥有全面的健康状况可见性。\n有了这样强大的网格性能可视性,我们将可以在为网格准备的网络运营中心使用这种拓扑结构作为我们的HUD(Heads Up Display)。\nHUD随着时间的推移收集了解到的信息和模式,并将预测各种情况和主动提示我们潜在的重点领域以提高客户满意度。\n丰富的历史数据的可视化也可以使网络工程师能够看看过去中类似的一天的网格表现。\n可视化效果如下图所示。\n总结 综合到目前为止的所有讨论,网格的健康状况更多地是关于用户的感受,以及我们作为服务提供商可以采取积极行动来维持(如果不能增强)用户的体验。\n着个人化医学的发展,现在距离我的医生给我发这样短信的日子并不遥远:“要不今天享用冰淇淋并且沿着灰色小山步道到达沙斯塔山!”相似的,我们可以通过更好地了解客户的整体健康状况为他们做更多的事情。\nTetrate的“服务网格健康程度”方法不仅提供了管理,监视和支持,而且从一开始就使基础架构保持健康以减少事故发生的可能性。在Istio,Envoy和SkyWalking的支持下,Tetrate的解决方案可为任何环境中的任何工作负载提供持续的端到端可观察性,运行时安全性和流量管理。\n我们的客户应该拥有健康的系统!请分享您对使用服务网格为我们的客户带来令人兴奋和强健的体验的想法。\n引用  https://en.wikipedia.org/wiki/Apdex https://www.apdex.org/overview.html https://www.apdex.org/index.php/specifications/ https://skywalking.apache.org/  ","title":"度量服务网格健康度——Apdex得分","url":"/zh/2020-07-26-apdex-and-skywalking/"},{"content":"SkyWalking Python 0.1.0 is released. Go to downloads page to find release tars.\n API: agent core APIs, check the APIs and the examples Plugin: built-in libraries http, urllib.request and third-party library requests are supported. Test: agent test framework is setup, and the corresponding tests of aforementioned plugins are also added.  ","title":"Release Apache SkyWalking Python 0.1.0","url":"/events/release-apache-skywalking-python-0-1-0/"},{"content":"SkyWalking Chart 3.0.0 is released. Go to downloads page to find release tars.\n Support SkyWalking 8.0.1  ","title":"Release Apache SkyWalking Chart 3.0.0 for SkyWalking 8.0.1","url":"/events/release-apache-skywalking-chart-3-0-0-for-skywalking-8-0-1/"},{"content":"Apache SkyWalking 8.0.1 已发布。SkyWalking 是观察性分析平台和应用性能管理系统,提供分布式追踪、服务网格遥测分析、度量聚合和可视化一体化解决方案,支持 Java, .Net Core, PHP, NodeJS, Golang, LUA 语言探针,支持 Envoy + Istio 构建的 Service Mesh。\n与 8.0.0 相比,此版本包含一个热修复程序。\nOAP-Backend\n 修复 no-init 模式在 Elasticsearch 存储中无法运行的错误  8.0.0 值得关注的变化:\n 添加并实现了 v3 协议,旧版本与 8.x 不兼容 移除服务、实例、端点注册机制和 inventory 存储实体 (inventory storage entities) 提供新的 GraphQL 查询协议,同时支持旧协议(计划在今年年底移除) 支持 Prometheus 网络协议,可将 Prometheus 格式的指标传输到 SkyWalking 中 提供 Python agent 移除所有 inventory 缓存 提供 Apache ShardingSphere (4.0.0, 4.1.1) agent 插件 UI dashboard 100% 可配置,可采用后台定义的新指标 修复 H2/MySQL 实现中的 SQL 注入漏洞 Upgrade Nacos to avoid the FastJson CVE in high frequency. 升级 Nacos 以避免 FastJson CVE 升级 jasckson-databind 至 2.9.10  下载地址:http://skywalking.apache.org/downloads/\n","title":"Apache SkyWalking 8.0.1 发布","url":"/zh/2020-06-21-skywalking8-0-1-release/"},{"content":"SkyWalking Nginx LUA 0.2.0 is release. Go to downloads page to find release tars.\n Adapt the new v3 protocol. Implement correlation protocol. Support batch segment report.  ","title":"Relase Apache SkyWalking Nginx LUA 0.2.0","url":"/events/release-apache-skywalking-nginx-lua-0-2-0/"},{"content":"SkyWalking APM 8.0.0 is release. Go to downloads page to find release tars.\nProject  v3 protocol is added and implemented. All previous releases are incompatible with 8.x releases. Service, Instance, Endpoint register mechanism and inventory storage entities are removed. New GraphQL query protocol is provided, the legacy procotol is still supported(plan to remove at the end of this year). Support Prometheus network protocol. Metrics in Prometheus format could be transferred into SkyWalking. Python agent provided. All inventory caches have been removed. Apache ShardingSphere(4.1.0, 4.1.1) agent plugin provided.  Java Agent  Add MariaDB plugin. Vert.x plugin enhancement. More cases are covered. Support v3 extension header. Fix ElasticSearch 5.x plugin TransportClient error. Support Correlation protocol v1. Fix Finagle plugin bug, in processing Noop Span. Make CommandService daemon to avoid blocking target application shutting down gracefully. Refactor spring cloud gateway plugin and support tracing spring cloud gateway 2.2.x  OAP-Backend  Support meter system for Prometheus adoption. In future releases, we will add native meter APIs and MicroMeter(Sleuth) system. Support endpoint grouping. Add SuperDataSet annotation for storage entity. Add superDatasetIndexShardsFactor in the ElasticSearch storage, to provide more shards for @SuperDataSet annotated entites. Typically TraceSegment. Support alarm settings for relationship of service, instance, and endpoint level metrics. Support alarm settings for database(conjecture node in tracing scenario). Data Model could be added in the runtime, don\u0026rsquo;t depend on the bootstrap sequence anymore. Reduce the memory cost, due to no inventory caches. No buffer files in tracing and service mesh cases. New ReadWriteSafe cache implementation. Simplify codes. Provide default way for metrics query, even the metrics doesn\u0026rsquo;t exist. New GraphQL query protocol is provided. Support the metrics type query. Set up length rule of service, instance, and endpoint. Adjust the default jks for ElasticSearch to empty. Fix Apdex function integer overflow issue. Fix profile storage issue. Fix TTL issue. Fix H2 column type bug. Add JRE 8-14 test for the backend.  UI  UI dashboard is 100% configurable to adopt new metrics definited in the backend.  Document  Add v8 upgrade document. Make the coverage accurate including UT and e2e tests. Add miss doc about collecting parameters in the profiled traces.  CVE  Fix SQL Injection vulnerability in H2/MySQL implementation. Upgrade Nacos to avoid the FastJson CVE in high frequency. Upgrade jasckson-databind to 2.9.10.  All issues and pull requests are here\n","title":"Release Apache SkyWalking APM 8.0.0","url":"/events/release-apache-skywalking-apm-8-0-0/"},{"content":"可观察性平台和开源应用程序性能监控(APM)项目 Apache SkyWalking,今天刚宣布 8.0 的发布版本。素以强劲指标、追踪与服务网格能力见称的 SkyWalking ,在最新版本中的功能性延展到用户渴求已久的功能 —— 将指标功能和包括 Prometheus 的其他指标收集系统进行了融合。\n什么是 Apache SkyWalking? SkyWalking 是可观察性平台和 APM 工具,可以选择是否搭载服务网格的使用,为微服务、云原生和容器化应用提供自动度量功能。顶尖的 Apache 项目由来自世界各地的社区人员支持,应用在阿里巴巴、华为、腾讯、百度和大量其他企业。SkyWalking 提供记录、监控和追踪功能,同时也得力于其架构而拥有数据收集终端、分析平台,还有用户界面。\n值得关注的优化包括:  用户界面 Dashboard 上提供百分百的自由度,用户可以任意进行配置,采用后台新定义的指标。 支持 Prometheus 导出格式。Prometheus 格式的指标可以转换至 SkyWalking。 SkyWalking 现已可以自主监控服务网格,为 Istio 和 Envoy 提供指标。 服务、实例、终端地址的注册机制,和库存存储实体已经被移除了。  无须修改原始码的前提下,为用户界面加入新的指标 对于 SkyWalking 的用户,8.0 版本的亮点将会是数据模型的更新,而且传播格式也针对更多语言进行优化。再加上引进了新的 MeterSystem ,除了可以同步运行传统追踪模式,用户还可自定义需要收集的指标。追踪和服务网格专注在拓扑和服务流量的指标上,而 MeterSystem 则汇报用户感兴趣的业务指标,例如是数据库存取性能、圣诞节期间的下单率,或者用户注册或下单的百分比。这些指标数据会在 SkyWalking 的用户界面 Dashboard 上以图像显示。指标的面板数据和拓扑图可以通过 Envoy 的指标绘制,而追踪分析也可以支持 Istio 的遥测。Dashboard 还支持以 JSON 格式导入、导出,而 Dashboard 上的自定义指标也支持设定指标名称、实体种类(服务、实例、终端地址或全部)、标记值等。用户界面模板上已详细描述了用户界面的逻辑和原型配置,以及它的 Dashboard、tab 和组件。\n观察任何配备了 Prometheus 的应用 在这次最新的社区发布中,SkyWalking 可以观察任何配备了 Prometheus 或者提供了 Prometheus 终端地址的应用。这项更新为很多想采用 SkyWalking 指标和追踪的用户节省了不少时间,现在你不再需要重新设置指标工具,就可以获得 Prometheus 数据。因为 Prometheus 更简单、更为人熟悉,是不少用户的不二选择。有了 8.0 版本,Prometheus 网络协议就能够读取所有已设定在 API 上的数据,另外 Prometheus 格式的指标也可转换至 SkyWalking 上。如此一来,通过图像方式展示,所有的指标和拓扑都能一目了然。同时,也支持 Prometheus 的 fetcher。\n监控你的网格 SkyWalking 现在不再只是监控服务或平台,而是监控整个网格。有了 8.0 版本,你除了能获取关于你的网格的指标(包括 Istio 和 Envoy 在内),同时也能通过 SkyWalking 监控自身的性能。因为当监控服务在观察业务集群的同时,它也能实现自我观察,确保运维团队拥有稳定可靠的平台。\n性能优化 最后,8.0 发布移除了注册机制,也不再需要使用独一无二的整数来代表实体。这项改变将大幅优化性能。想了解完整的更新功能列表,可以阅读在 SkyWalking 社区发布的公告页面。\n额外资源  追踪 Twitter 获取更多 SkyWalking 最新资讯 SkyWalking 未来的发布会加入原生指标 API 和融合 Micrometer (Sleuth) 指标集合。  ","title":"SkyWalking 的最新动向?8.0 版本的 MeterSystem 和网格监控","url":"/zh/whats-new-in-skywalking-metersystem-and-mesh-monitoring-in-8-0/"},{"content":"作者:宋净超、张伟\n日前,云原生网络代理 MOSN v0.12.0 发布,观察性分析平台和应用性能管理系统 SkyWalking 完成了与 MOSN 的集成,作为 MOSN 中的支持的分布式追踪系统之一,旨在实现在微服务和 Service Mesh 中的更强大的可观察性。\n背景 相比传统的巨石(Monolith)应用,微服务的一个主要变化是将应用中的不同模块拆分为了独立的进程。在微服务架构下,原来进程内的方法调用成为了跨进程的远程方法调用。相对于单一进程内的方法调用而言,跨进程调用的调试和故障分析是非常困难的,难以使用传统的代码调试程序或者日志打印来对分布式的调用过程进行查看和分析。\n如上图右边所示,微服务架构中系统中各个微服务之间存在复杂的调用关系。\n一个来自客户端的请求在其业务处理过程中经过了多个微服务进程。我们如果想要对该请求的端到端调用过程进行完整的分析,则必须将该请求经过的所有进程的相关信息都收集起来并关联在一起,这就是“分布式追踪”。\n以上关于分布式追踪的介绍引用自 Istio Handbook。\nMOSN 中 tracing 的架构 MOSN 的 tracing 框架由 Driver、Tracer 和 Span 三个部分组成。\nDriver 是 Tracer 的容器,管理注册的 Tracer 实例,Tracer 是 tracing 的入口,根据请求信息创建一个 Span,Span 存储当前跨度的链路信息。\n目前 MOSN tracing 有 SOFATracer 和 SkyWalking 两种实现。SOFATracer 支持 http1 和 xprotocol 协议的链路追踪,将 trace 数据写入本地日志文件中。SkyWalking 支持 http1 协议的链路追踪,使用原生的 Go 语言探针 go2sky 将 trace 数据通过 gRPC 上报到 SkyWalking 后端服务。\n快速开始 下面将使用 Docker 和 docker-compose 来快速开始运行一个集成了 SkyWalking 的分布式追踪示例,该示例代码请见 MOSN GitHub。\n准备 安装 docker 和 docker-compose。\n  安装 docker\n  安装 docker-compose\n  需要一个编译好的 MOSN 程序,您可以下载 MOSN 源码自行编译,或者直接下载 MOSN v0.12.0 发行版以获取 MOSN 的运行时二进制文件。\n下面将以源码编译的方式演示 MOSN 如何与 SkyWalking 集成。\ncd ${projectpath}/cmd/mosn/main go build 获取示例代码目录。\n${targetpath} = ${projectpath}/examples/codes/trace/skywalking/http/ 将编译好的程序移动到示例代码目录。\nmv main ${targetpath}/ cd ${targetpath} 目录结构 下面是 SkyWalking 的目录结构。\n* skywalking └─── http │ main # 编译完成的 MOSN 程序 | server.go # 模拟的 Http Server | clint.go # 模拟的 Http Client | config.json # MOSN 配置 | skywalking-docker-compose.yaml # skywalking docker-compose 运行说明 启动 SkyWalking oap \u0026amp; ui。\ndocker-compose -f skywalking-docker-compose.yaml up -d 启动一个 HTTP Server。\ngo run server.go 启动 MOSN。\n./main start -c config.json 启动一个 HTTP Client。\ngo run client.go 打开 http://127.0.0.1:8080 查看 SkyWalking-UI,SkyWalking Dashboard 界面如下图所示。\n在打开 Dashboard 后请点击右上角的 Auto 按钮以使页面自动刷新。\nDemo 视频 下面来看一下该 Demo 的操作视频。\n\n清理 要想销毁 SkyWalking 后台运行的 docker 容器只需要下面的命令。\ncd ${projectpath}/examples/codes/trace/skywalking/http/ docker-compose -f skywalking-docker-compose.yaml down 未来计划 在今年五月份,SkyWalking 8.0 版本会进行一次全面升级,采用新的探针协议和分析逻辑,探针将更具互感知能力,更好的在 Service Mesh 下使用探针进行监控。同时,SkyWalking 将开放之前仅存在于内核中的 metrics 指标分析体系。Prmoetheus、Spring Cloud Sleuth、Zabbix 等常用的 metrics 监控方式,都会被统一的接入进来,进行分析。此外, SkyWalking 与 MOSN 社区将继续合作:支持追踪 Dubbo 和 SOFARPC,同时适配 sidecar 模式下的链路追踪。\n关于 MOSN MOSN 是一款使用 Go 语言开发的网络代理软件,由蚂蚁金服开源并经过几十万容器的生产级验证。 MOSN 作为云原生的网络数据平面,旨在为服务提供多协议、模块化、智能化、安全的代理能力。 MOSN 是 Modular Open Smart Network 的简称。 MOSN 可以与任何支持 xDS API 的 Service Mesh 集成,亦可以作为独立的四、七层负载均衡,API Gateway、云原生 Ingress 等使用。\n GitHub:https://github.com/mosn/mosn 官网:https://mosn.io  关于 Skywalking SkyWalking 是观察性分析平台和应用性能管理系统。提供分布式追踪、服务网格遥测分析、度量聚合和可视化一体化解决方案。支持 Java、.Net Core、PHP、NodeJS、Golang、LUA 语言探针,支持 Envoy/MOSN + Istio 构建的 Service Mesh。\n GitHub:https://github.com/apache/skywalking 官网:https://skywalking.apache.org  关于本文中的示例请参考 MOSN GitHub 和 MOSN 官方文档。\n","title":"SkyWalking 支持云原生网络代理 MOSN 做分布式追踪","url":"/zh/2020-04-28-skywalking-and-mosn/"},{"content":"Based on his continuous contributions, Wei Zhang (a.k.a arugal) has been invited to join the PMC. Welcome aboard.\n","title":"Welcome Wei Zhang to join the PMC","url":"/events/welcome-wei-zhang-to-join-the-pmc/"},{"content":"目录:\n 1. 概述 2. 搭建 SkyWalking 单机环境 3. 搭建 SkyWalking 集群环境 4. 告警 5. 注意事项 6. Spring Boot 使用示例 6. Spring Cloud 使用示例    作者:芋道源码 原文地址   1. 概述 1.1 概念 SkyWalking 是什么?\n FROM http://skywalking.apache.org/\n分布式系统的应用程序性能监视工具,专为微服务、云原生架构和基于容器(Docker、K8s、Mesos)架构而设计。\n提供分布式追踪、服务网格遥测分析、度量聚合和可视化一体化解决方案。\n 1.2 功能列表 SkyWalking 有哪些功能?\n FROM http://skywalking.apache.org/\n 多种监控手段。可以通过语言探针和 service mesh 获得监控是数据。 多个语言自动探针。包括 Java,.NET Core 和 Node.JS。 轻量高效。无需大数据平台,和大量的服务器资源。 模块化。UI、存储、集群管理都有多种机制可选。 支持告警。 优秀的可视化解决方案。   1.3 整体架构 SkyWalking 整体架构如何?\n FROM http://skywalking.apache.org/\n 整个架构,分成上、下、左、右四部分:\n 考虑到让描述更简单,我们舍弃掉 Metric 指标相关,而着重在 Tracing 链路相关功能。\n  上部分 Agent :负责从应用中,收集链路信息,发送给 SkyWalking OAP 服务器。目前支持 SkyWalking、Zikpin、Jaeger 等提供的 Tracing 数据信息。而我们目前采用的是,SkyWalking Agent 收集 SkyWalking Tracing 数据,传递给服务器。 下部分 SkyWalking OAP :负责接收 Agent 发送的 Tracing 数据信息,然后进行分析(Analysis Core) ,存储到外部存储器( Storage ),最终提供查询( Query )功能。 右部分 Storage :Tracing 数据存储。目前支持 ES、MySQL、Sharding Sphere、TiDB、H2 多种存储器。而我们目前采用的是 ES ,主要考虑是 SkyWalking 开发团队自己的生产环境采用 ES 为主。 左部分 SkyWalking UI :负责提供控台,查看链路等等。  1.4 官方文档 在 https://github.com/apache/skywalking/tree/master/docs 地址下,提供了 SkyWalking 的英文文档。\n考虑到大多数胖友的英语水平和艿艿不相伯仲,再加上胖友一开始对 SkyWalking 比较陌生,所以比较推荐先阅读 https://github.com/SkyAPM/document-cn-translation-of-skywalking 地址,提供了 SkyWalking 的中文文档。\n考虑到胖友使用 SkyWalking 的目的,是实现分布式链路追踪的功能,所以最好去了解下相关的知识。这里推荐阅读两篇文章:\n 《OpenTracing 官方标准 —— 中文版》 Google 论文 《Dapper,大规模分布式系统的跟踪系统》  2. 搭建 SkyWalking 单机环境 考虑到让胖友更快的入门,我们来搭建一个 SkyWalking 单机环境,步骤如下:\n 第一步,搭建一个 Elasticsearch 服务。 第二步,下载 SkyWalking 软件包。 第三步,搭建一个 SkyWalking OAP 服务。 第四步,启动一个 Spring Boot 应用,并配置 SkyWalking Agent。 第五步,搭建一个 SkyWalking UI 服务。  仅仅五步,按照艿艿标题党的性格,应该给本文取个《10 分钟快速搭建 SkyWalking 服务》标题才对,哈哈哈。\n2.1 Elasticsearch 搭建  FROM https://www.elastic.co/cn/products/elasticsearch\nElasticsearch 是一个分布式、RESTful 风格的搜索和数据分析引擎,能够解决不断涌现出的各种用例。 作为 Elastic Stack 的核心,它集中存储您的数据,帮助您发现意料之中以及意料之外的情况。\n 参考《Elasticsearch 极简入门》的「1. 单机部署」小节,搭建一个 Elasticsearch 单机服务。\n不过要注意,本文使用的是 Elasticsearch 7.5.1 版本。因为 SkyWalking 6.6.0 版本,增加了对 Elasticsearch 7.X 版本的支持。当然,如果胖友使用 Elasticsearch 6.X 版本也是可以的。\n2.2 下载 SkyWalking 软件包 对于 SkyWalking 的软件包,有两种方式获取:\n 手动编译 官方包  一般情况下,我们建议使用官方包。手动编译,更多是尝鲜或者等着急修复的 BUG 的版本。\n2.2.1 官方包 在 http://skywalking.apache.org/downloads/ 下,我们下载操作系统对应的发布版。\n这里,我们选择 Binary Distribution for ElasticSearch 7 (Linux) 版本,因为艿艿是 Mac 环境,再加上想使用 Elasticsearch 7.X 版本作为存储。如果胖友想用 Elasticsearch 6.X 版本作为存储,记得下载 Binary Distribution (Linux) 版本。\n① 下载:\n# 创建目录 $ mkdir -p /Users/yunai/skywalking $ cd /Users/yunai/skywalking # 下载 $ wget http://mirror.bit.edu.cn/apache/skywalking/6.6.0/apache-skywalking-apm-es7-6.6.0.tar.gz ② 解压:\n# 解压 $ tar -zxvf apache-skywalking-apm-es7-6.6.0.tar.gz $ cd apache-skywalking-apm-bin-es7 $ ls -ls 4 drwxr-xr-x 8 root root 4096 Sep 9 15:09 agent # SkyWalking Agent 4 drwxr-xr-x 2 root root 4096 Sep 9 15:44 bin # 执行脚本 4 drwxr-xr-x 2 root root 4096 Sep 9 15:44 config # SkyWalking OAP Server 配置文件 32 -rwxr-xr-x 1 root root 28903 Sep 9 14:32 LICENSE 4 drwxr-xr-x 3 root root 4096 Sep 9 15:44 licenses 32 -rwxr-xr-x 1 root root 31850 Sep 9 14:32 NOTICE 16 drwxr-xr-x 2 root root 16384 Sep 9 15:22 oap-libs # SkyWalking OAP Server 4 -rw-r--r-- 1 root root 1978 Sep 9 14:32 README.txt 4 drwxr-xr-x 2 root root 4096 Sep 9 15:44 webapp # SkyWalking UI 2.2.2 手动编译  友情提示:如果胖友没有编译 SkyWalking 源码的诉求,可以跳过本小节。\n 参考 How to build project 文章。\n需要前置安装如下:\n GIT JDK 8+ Maven  ① 克隆代码:\n$ git clone https://github.com/apache/skywalking.git  因为网络问题,可能克隆会有点久。  ② 初始化子模块:\n$ cd skywalking $ git submodule init $ git submodule update ③ 编译\n$ ./mvnw clean package -DskipTests  编译过程,如果机子比较差,花费时间会比较久。  ④ 查看编译结果\n$ cd apm-dist # 编译结果目录 $ cd target $ tar -zxvf apache-skywalking-apm-bin.tar.gz # 解压 Linux 包 $ cd apache-skywalking-apm-bin $ ls -ls 4 drwxr-xr-x 8 root root 4096 Sep 9 15:09 agent # SkyWalking Agent 4 drwxr-xr-x 2 root root 4096 Sep 9 15:44 bin # 执行脚本 4 drwxr-xr-x 2 root root 4096 Sep 9 15:44 config # SkyWalking OAP Server 配置文件 32 -rwxr-xr-x 1 root root 28903 Sep 9 14:32 LICENSE 4 drwxr-xr-x 3 root root 4096 Sep 9 15:44 licenses 32 -rwxr-xr-x 1 root root 31850 Sep 9 14:32 NOTICE 16 drwxr-xr-x 2 root root 16384 Sep 9 15:22 oap-libs # SkyWalking OAP Server 4 -rw-r--r-- 1 root root 1978 Sep 9 14:32 README.txt 4 drwxr-xr-x 2 root root 4096 Sep 9 15:44 webapp # SkyWalking UI 2.3 SkyWalking OAP 搭建 ① 修改 OAP 配置文件\n 友情提示:如果配置文件,适合 SkyWalking 6.X 版本。\n $ vi config/application.ymlstorage:elasticsearch7:nameSpace:${SW_NAMESPACE:\u0026#34;elasticsearch\u0026#34;}clusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:9200}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;http\u0026#34;}# trustStorePath: ${SW_SW_STORAGE_ES_SSL_JKS_PATH:\u0026#34;../es_keystore.jks\u0026#34;}# trustStorePass: ${SW_SW_STORAGE_ES_SSL_JKS_PASS:\u0026#34;\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}password:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}indexShardsNumber:${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:2}indexReplicasNumber:${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:0}# Those data TTL settings will override the same settings in core module.recordDataTTL:${SW_STORAGE_ES_RECORD_DATA_TTL:7}# Unit is dayotherMetricsDataTTL:${SW_STORAGE_ES_OTHER_METRIC_DATA_TTL:45}# Unit is daymonthMetricsDataTTL:${SW_STORAGE_ES_MONTH_METRIC_DATA_TTL:18}# Unit is month# Batch process setting, refer to https://www.elastic.co/guide/en/elasticsearch/client/java-api/5.5/java-docs-bulk-processor.htmlbulkActions:${SW_STORAGE_ES_BULK_ACTIONS:1000}# Execute the bulk every 1000 requestsflushInterval:${SW_STORAGE_ES_FLUSH_INTERVAL:10}# flush the bulk every 10 seconds whatever the number of requestsconcurrentRequests:${SW_STORAGE_ES_CONCURRENT_REQUESTS:2}# the number of concurrent requestsresultWindowMaxSize:${SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE:10000}metadataQueryMaxSize:${SW_STORAGE_ES_QUERY_MAX_SIZE:5000}segmentQueryMaxSize:${SW_STORAGE_ES_QUERY_SEGMENT_SIZE:200}# h2:# driver: ${SW_STORAGE_H2_DRIVER:org.h2.jdbcx.JdbcDataSource}# url: ${SW_STORAGE_H2_URL:jdbc:h2:mem:skywalking-oap-db}# user: ${SW_STORAGE_H2_USER:sa}# metadataQueryMaxSize: ${SW_STORAGE_H2_QUERY_MAX_SIZE:5000} storage.elasticsearch7 配置项,设置使用 Elasticsearch 7.X 版本作为存储器。  这里,我们打开注释,并记得通过 nameSpace 设置 Elasticsearch 集群名。   storage.elasticsearch 配置项,设置使用 Elasticsearch 6.X 版本作为存储器。  这里,我们无需做任何改动。 如果胖友使用 Elasticsearch 6.X 版本作为存储器,记得设置这个配置项,而不是 storage.elasticsearch7 配置项。   storage.h2 配置项,设置使用 H2 作为存储器。  这里,我们需要手动注释掉,因为 H2 是默认配置的存储器。     友情提示:如果配置文件,适合 SkyWalking 7.X 版本。\n  重点修改 storage 配置项,通过 storage.selector 配置项来设置具体使用的存储器。 storage.elasticsearch 配置项,设置使用 Elasticsearch 6.X 版本作为存储器。胖友可以主要修改 nameSpace、clusterNodes 两个配置项即可,设置使用的 Elasticsearch 的集群和命名空间。 storage.elasticsearch7 配置项,设置使用 Elasticsearch 7.X 版本作为存储器。 还有 MySQL、H2、InfluxDB 等等存储器的配置可以选择,胖友自己根据需要去选择哈~  ② 启动 SkyWalking OAP 服务\n$ bin/oapService.sh SkyWalking OAP started successfully! 是否真正启动成功,胖友打开 logs/skywalking-oap-server.log 日志文件,查看是否有错误日志。首次启动时,因为 SkyWalking OAP 会创建 Elasticsearch 的索引,所以会“疯狂”的打印日志。最终,我们看到如下日志,基本可以代表 SkyWalking OAP 服务启动成功:\n 友情提示:因为首次启动会创建 Elasticsearch 索引,所以可能会比较慢。\n 2020-01-02 18:22:53,635 - org.eclipse.jetty.server.Server - 444 [main] INFO [] - Started @35249ms 2.4 SkyWalking UI 搭建 ① 启动 SkyWalking UI 服务\nbin/webappService.sh SkyWalking Web Application started successfully! 是否真正启动成功,胖友打开 logs/logs/webapp.log 日志文件,查看是否有错误日志。最终,我们看到如下日志,基本可以代表 SkyWalking UI 服务启动成功:\n2020-01-02 18:27:02.824 INFO 48250 --- [main] o.a.s.apm.webapp.ApplicationStartUp : Started ApplicationStartUp in 7.774 seconds (JVM running for 8.316) 如果想要修改 SkyWalking UI 服务的参数,可以编辑 webapp/webapp.yml 配置文件。例如说:\n server.port :SkyWalking UI 服务端口。 collector.ribbon.listOfServers :SkyWalking OAP 服务地址数组。因为 SkyWalking UI 界面的数据,是通过请求 SkyWalking OAP 服务来获得的。  ② 访问 UI 界面:\n浏览器打开 http://127.0.0.1:8080 。界面如下图:2.5 SkyWalking Agent 大多数情况下,我们在启动项目的 Shell 脚本上,通过 -javaagent 参数进行配置 SkyWalking Agent 。我们在 「2.3.1 Shell」 小节来看。\n考虑到偶尔我们需要在 IDE 中,也希望使用 SkyWalking Agent ,所以我们在 「2.3.2 IDEA」 小节来看。\n2.3.1 Shell ① Agent 软件包\n我们需要将 apache-skywalking-apm-bin/agent 目录,拷贝到 Java 应用所在的服务器上。这样,Java 应用才可以配置使用该 SkyWalking Agent。我们来看看 Agent 目录下有哪些:\n$ ls -ls total 35176 0 drwxr-xr-x@ 7 yunai staff 224 Dec 24 14:20 activations 0 drwxr-xr-x@ 4 yunai staff 128 Dec 24 14:21 bootstrap-plugins 0 drwxr-xr-x@ 3 yunai staff 96 Dec 24 14:12 config # SkyWalking Agent 配置 0 drwxr-xr-x@ 3 yunai staff 96 Jan 2 19:29 logs # SkyWalking Agent 日志 0 drwxr-xr-x@ 13 yunai staff 416 Dec 24 14:22 optional-plugins # 可选插件 0 drwxr-xr-x@ 68 yunai staff 2176 Dec 24 14:20 plugins # 插件 35176 -rw-r--r--@ 1 yunai staff 18006420 Dec 24 14:12 skywalking-agent.jar # SkyWalking Agent  关于 SkyWalking Agent 提供的插件列表,可以看看《SkyWalking 文档 —— 插件支持列表》。  因为艿艿是在本机测试,所以无需拷贝,SkyWalking Agent 目录是 /Users/yunai/skywalking/apache-skywalking-apm-bin-es7/agent/。\n考虑到方便胖友,艿艿这里提供了一个最简的 Spring Boot 应用 lab-39-demo-2.2.2.RELEASE.jar。对应 Github 仓库是 lab-39-demo。\n② 配置 Java 启动脚本\n# SkyWalking Agent 配置 export SW_AGENT_NAME=demo-application # 配置 Agent 名字。一般来说,我们直接使用 Spring Boot 项目的 `spring.application.name` 。 export SW_AGENT_COLLECTOR_BACKEND_SERVICES=127.0.0.1:11800 # 配置 Collector 地址。 export SW_AGENT_SPAN_LIMIT=2000 # 配置链路的最大 Span 数量。一般情况下,不需要配置,默认为 300 。主要考虑,有些新上 SkyWalking Agent 的项目,代码可能比较糟糕。 export JAVA_AGENT=-javaagent:/Users/yunai/skywalking/apache-skywalking-apm-bin-es7/agent/skywalking-agent.jar # SkyWalking Agent jar 地址。 # Jar 启动 java -jar $JAVA_AGENT -jar lab-39-demo-2.2.2.RELEASE.jar  通过环境变量,进行配置。 更多的变量,可以在 /work/programs/skywalking/apache-skywalking-apm-bin/agent/config/agent.config 查看。要注意,可能有些变量是被注释掉的,例如说 SW_AGENT_SPAN_LIMIT 对应的 agent.span_limit_per_segment 。  ③ 执行脚本:\n直接执行上述的 Shell 脚本,启动 Java 项目。在启动日志中,我们可以看到 SkyWalking Agent 被加载的日志。日志示例如下:\nDEBUG 2020-01-02 19:29:29:400 main AgentPackagePath : The beacon class location is jar:file:/Users/yunai/skywalking/apache-skywalking-apm-bin-es7/agent/skywalking-agent.jar!/org/apache/skywalking/apm/agent/core/boot/AgentPackagePath.class. INFO 2020-01-02 19:29:29:402 main SnifferConfigInitializer : Config file found in /Users/yunai/skywalking/apache-skywalking-apm-bin-es7/agent/config/agent.config. 同时,也可以在 /Users/yunai/skywalking/apache-skywalking-apm-bin-es7/agent/agent/logs/skywalking-api.log 查看对应的 SkyWalking Agent 日志。日志示例如下:\nDEBUG 2020-01-02 19:37:22:539 SkywalkingAgent-5-ServiceAndEndpointRegisterClient-0 ServiceAndEndpointRegisterClient : ServiceAndEndpointRegisterClient running, status:CONNECTED.  这里,我们看到 status:CONNECTED ,表示 SkyWalking Agent 连接 SkyWalking OAP 服务成功。  ④ 简单测试\n完事,可以去 SkyWalking UI 查看是否链路收集成功。\n1、首先,使用浏览器,访问下 http://127.0.0.1:8079/demo/echo 地址,请求下 Spring Boot 应用提供的 API。因为,我们要追踪下该链路。\n2、然后,继续使用浏览器,打开 http://127.0.0.1:8080/ 地址,进入 SkyWalking UI 界面。如下图所示:这里,我们会看到 SkyWalking 中非常重要的三个概念:\n  服务(Service) :表示对请求提供相同行为的一系列或一组工作负载。在使用 Agent 或 SDK 的时候,你可以定义服务的名字。如果不定义的话,SkyWalking 将会使用你在平台(例如说 Istio)上定义的名字。\n 这里,我们可以看到 Spring Boot 应用的服务为 \u0026quot;demo-application\u0026quot;,就是我们在环境变量 SW_AGENT_NAME 中所定义的。\n   服务实例(Service Instance) :上述的一组工作负载中的每一个工作负载称为一个实例。就像 Kubernetes 中的 pods 一样, 服务实例未必就是操作系统上的一个进程。但当你在使用 Agent 的时候, 一个服务实例实际就是操作系统上的一个真实进程。\n 这里,我们可以看到 Spring Boot 应用的服务为 {agent_name}-pid:{pid}@{hostname},由 Agent 自动生成。关于它,我们在「5.1 hostname」小节中,有进一步的讲解,胖友可以瞅瞅。\n   端点(Endpoint) :对于特定服务所接收的请求路径, 如 HTTP 的 URI 路径和 gRPC 服务的类名 + 方法签名。\n 这里,我们可以看到 Spring Boot 应用的一个端点,为 API 接口 /demo/echo。\n   3、之后,点击「拓扑图」菜单,进入查看拓扑图的界面。如下图所示:4、再之后,点击「追踪」菜单,进入查看链路数据的界面。如下图所示:2.3.2 IDEA 我们统一使用 IDEA 作为开发 IDE ,所以忽略 Eclipse 的配置方式。\n具体参考下图,比较简单:3. 搭建 SkyWalking 集群环境 在生产环境下,我们一般推荐搭建 SkyWalking 集群环境。😈 当然,如果公司比较抠门,也可以在生产环境下使用 SkyWalking 单机环境,毕竟 SkyWalking 挂了之后,不影响业务的正常运行。\n搭建一个 SkyWalking 集群环境,步骤如下:\n 第一步,搭建一个 Elasticsearch 服务的集群。 第二步,搭建一个注册中心的集群。目前 SkyWalking 支持 Zookeeper、Kubernetes、Consul、Nacos 作为注册中心。 第三步,搭建一个 SkyWalking OAP 服务的集群,同时参考《SkyWalking 文档 —— 集群管理》,将 SkyWalking OAP 服务注册到注册中心上。 第四步,启动一个 Spring Boot 应用,并配置 SkyWalking Agent。另外,在设置 SkyWaling Agent 的 SW_AGENT_COLLECTOR_BACKEND_SERVICES 地址时,需要设置多个 SkyWalking OAP 服务的地址数组。 第五步,搭建一个 SkyWalking UI 服务的集群,同时使用 Nginx 进行负载均衡。另外,在设置 SkyWalking UI 的 collector.ribbon.listOfServers 地址时,也需要设置多个 SkyWalking OAP 服务的地址数组。  😈 具体的搭建过程,并不复杂,胖友自己去尝试下。\n4. 告警 在 SkyWaling 中,已经提供了告警功能,具体可见《SkyWalking 文档 —— 告警》。\n默认情况下,SkyWalking 已经内置告警规则。同时,我们可以参考告警规则,进行自定义。\n在满足 SkyWalking 告警规则的触发规则时,我们在 SkyWaling UI 的告警界面,可以看到告警内容。如下图所示:同时,我们自定义 Webhook ,对接 SkyWalking 的告警请求。而具体的邮箱、钉钉等告警方式,需要自己进行开发。至于自定义 WebHook 如何实现,可以参考:\n Java 语言:  《基于 SkyWalking 的分布式跟踪系统 - 异常告警》   Go 语言:  dingding-notify-for-skywalking infra-skywalking-webhook    5. 注意事项 5.1 hostname 配置 在 SkyWalking 中,每个被监控的实例的名字,会包含 hostname 。格式为:{agent_name}-pid:{pid}@{hostname} ,例如说:\u0026quot;scrm-scheduler-pid:27629@iZbp1e2xlyvr7fh67qi59oZ\u0026quot; 。\n因为有些服务器未正确设置 hostname ,所以我们一定要去修改,不然都不知道是哪个服务器上的实例(😈 鬼知道 \u0026quot;iZbp1e2xlyvr7fh67qi59oZ\u0026quot; 一串是哪个服务器啊)。\n修改方式如下:\n1、修改 /etc/hosts 的 hostname :\n127.0.0.1 localhost ::1 localhost localhost.localdomain localhost6 localhost6.localdomain6 10.80.62.151 pre-app-01 # 就是这个,其中 10.80.62.151 是本机内网 IP ,pre-app-01 是 hostname 。 2、修改本机 hostname :\n参考 《CentOS7 修改主机名(hostname)》\n$ hostname pre-app-01 # 其中 pre-app-01 就是你希望的 hostname 。 $ hostnamectl set-hostname pre-app-01 # 其中 pre-app-01 就是你希望的 hostname 。 6. Spring Boot 使用示例 在 《芋道 Spring Boot 链路追踪 SkyWalking 入门》 中,我们来详细学习如何在 Spring Boot 中,整合并使用 SkyWalking 收集链路数据。😈 相比「2.5 SkyWaling Agent」来说,我们会提供更加丰富的示例哟。\n7. Spring Cloud 使用示例 在 《芋道 Spring Cloud 链路追踪 SkyWalking 入门》 中,我们来详细学习如何在 Spring Cloud 中,整合并使用 SkyWalking 收集链路数据。😈 相比「2.5 SkyWaling Agent」来说,我们会提供更加丰富的示例哟。\n666. 彩蛋 本文仅仅是简单的 SkyWalking 入门文章,如果胖友想要更好的使用 SkyWalking,推荐通读下《SkyWalking 文档》。\n想要进一步深入的胖友,也可以阅读如下资料:\n 《SkyWalking 源码解析》 《APM 巅峰对决:Apache Skywalking P.K. Pinpoint》 《SkyWalking 官方 —— 博客合集》  😈 最后弱弱的问一句,上完 SkyWaling 之后,有没发现自己系统各种地方慢慢慢!嘻嘻。\n","title":"SkyWalking 极简入门","url":"/zh/2020-04-19-skywalking-quick-start/"},{"content":"This post originally appears on The New Stack\nThis post introduces a way to automatically profile code in production with Apache SkyWalking. We believe the profile method helps reduce maintenance and overhead while increasing the precision in root cause analysis.\nLimitations of the Distributed Tracing In the early days, metrics and logging systems were the key solutions in monitoring platforms. With the adoption of microservice and distributed system-based architecture, distributed tracing has become more important. Distributed tracing provides relevant service context, such as system topology map and RPC parent-child relationships.\nSome claim that distributed tracing is the best way to discover the cause of performance issues in a distributed system. It’s good at finding issues at the RPC abstraction, or in the scope of components instrumented with spans. However, it isn’t that perfect.\nHave you been surprised to find a span duration longer than expected, but no insight into why? What should you do next? Some may think that the next step is to add more instrumentation, more spans into the trace, thinking that you would eventually find the root cause, with more data points. We’ll argue this is not a good option within a production environment. Here’s why:\n There is a risk of application overhead and system overload. Ad-hoc spans measure the performance of specific scopes or methods, but picking the right place can be difficult. To identify the precise cause, you can “instrument” (add spans to) many suspicious places. The additional instrumentation costs more CPU and memory in the production environment. Next, ad-hoc instrumentation that didn’t help is often forgotten, not deleted. This creates a valueless overhead load. In the worst case, excess instrumentation can cause performance problems in the production app or overload the tracing system. The process of ad-hoc (manual) instrumentation usually implies at least a restart. Trace instrumentation libraries, like Zipkin Brave, are integrated into many framework libraries. To instrument a method’s performance typically implies changing code, even if only an annotation. This implies a re-deploy. Even if you have the way to do auto instrumentation, like Apache SkyWalking, you still need to change the configuration and reboot the app. Otherwise, you take the risk of GC caused by hot dynamic instrumentation. Injecting instrumentation into an uninstrumented third party library is hard and complex. It takes more time and many won’t know how to do this. Usually, we don’t have code line numbers in the distributed tracing. Particularly when lambdas are in use, it can be difficult to identify the line of code associated with a span. Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.  Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.\nProfiling in Production Introduction To reuse distributed tracing to achieve method scope precision requires an understanding of the above limitations and a different approach. We called it PROFILE.\nMost high-level languages build and run on a thread concept. The profile approach takes continuous thread dumps. We merge the thread dumps to estimate the execution time of every method shown in the thread dumps. The key for distributed tracing is the tracing context, identifiers active (or current) for the profiled method. Using this trace context, we can weave data harvested from profiling into existing traces. This allows the system to automate otherwise ad-hoc instrumentation. Let’s dig deeper into how profiling works:\nWe consider a method invocation with the same stack depth and signature (method, line number etc), the same operation. We derive span timestamps from the thread dumps the same operation is in. Let’s put this visually:\nAbove, represents 10 successive thread dumps. If this method is in dumps 4-8, we assume it started before dump 4 and finished after dump 8. We can’t tell exactly when the method started and stopped. but the timestamps of thread dumps are close enough.\nTo reduce overhead caused by thread dumps, we only profile methods enclosed by a specific entry point, such as a URI or MVC Controller method. We identify these entry points through the trace context and the APM system.\nThe profile does thread dump analysis and gives us:\n The root cause, precise to the line number in the code. Reduced maintenance as ad-hoc instrumentation is obviated. Reduced overload risk caused by ad-hoc instrumentation. Dynamic activation: only when necessary and with a very clear profile target.  Implementing Precise Profiling with Apache SkyWalking 7 Distributed profiling is built-into Apache SkyWalking application performance monitoring (APM). Let’s demonstrate how the profiling approach locates the root cause of the performance issue.\nfinal CountDownLatchcountDownLatch= new CountDownLatch(2); threadPool.submit(new Task1(countDownLatch)); threadPool.submit(new Task2(countDownLatch)); try { countDownLatch.await(500, TimeUnit.MILLISECONDS); } catch (InterruptedExceptione) { } Task1 and Task2 have a race condition and unstable execution time: they will impact the performance of each other and anything calling them. While this code looks suspicious, it is representative of real life. People in the OPS/SRE team are not usually aware of all code changes and who did them. They only know something in the new code is causing a problem.\nTo make matters interesting, the above code is not always slow: it only happens when the condition is locked. In SkyWalking APM, we have metrics of endpoint p99/p95 latency, so, we are easy to find out the p99 of this endpoint is far from the avg response time. However, this is not the same as understanding the cause of the latency. To locate the root cause, add a profile condition to this endpoint: duration greater than 500ms. This means faster executions will not add profiling load.\nThis is a typical profiled trace segment (part of the whole distributed trace) shown on the SkyWalking UI. We now notice the “service/processWithThreadPool” span is slow as we expected, but why? This method is the one we added the faulty code to. As the UI shows that method, we know the profiler is working. Now, let’s see what the profile analysis result say.\nThis is the profile analysis stack view. We see the stack element names, duration (include/exclude the children) and slowest methods have been highlighted. It shows clearly, “sun.misc.Unsafe.park” costs the most time. If we look for the caller, it is the code we added: CountDownLatch.await.\nThe Limitations of the Profile Method No diagnostic tool can fit all cases, not even the profile method.\nThe first consideration is mistaking a repeatedly called method for a slow method. Thread dumps are periodic. If there is a loop of calling one method, the profile analysis result would say the target method is slow because it is captured every time in the dump process. There could be another reason. A method called many times can also end up captured in each thread dump. Even so, the profile did what it is designed for. It still helps the OPS/SRE team to locate the code having the issue.\nThe second consideration is overhead, the impact of repeated thread dumps is real and can’t be ignored. In SkyWalking, we set the profile dump period to at least 10ms. This means we can’t locate method performance issues if they complete in less than 10ms. SkyWalking has a threshold to control the maximum parallel degree as well.\nUnderstanding the above keeps distributed tracing and APM systems useful for your OPS/SRE team.\nHow to Try This Everything we discussed, including the Apache SkyWalking Java Agent, profile analysis code, and UI, could be found in our GitHub repository. We hope you enjoyed this new profile method, and love Apache SkyWalking. If so, give us a star on GitHub to encourage us.\nSkyWalking 7 has just been released. You can contact the project team through the following channels:\n Follow SkyWalking twitter. Subscribe mailing list: dev@skywalking.apache.org. Send to dev-subscribe@kywalking.apache.org to subscribe to the mail list.  Co-author Sheng Wu is a Tetrate founding engineer and the founder and VP of Apache SkyWalking. He is solving the problem of observability for large-scale service meshes in hybrid and multi-cloud environments.\nAdrian Cole works in the Spring Cloud team at VMware, mostly on Zipkin\nHan Liu is a tech expert at Lagou. He is an Apache SkyWalking committer\n","title":"Apache SkyWalking: Use Profiling to Fix the Blind Spot of Distributed Tracing","url":"/blog/2020-04-13-apache-skywalking-profiling/"},{"content":"SkyWalking Chart 2.0.0 is released. Go to downloads page to find release tars.\n Support SkyWalking 7.0.0 Support set ES user/password Add CI for release  ","title":"Release Apache SkyWalking Chart 2.0.0 for SkyWalking 7.0.0","url":"/events/release-apache-skywalking-chart-2-0-0-for-skywalking-7-0-0/"},{"content":"SkyWalking APM 7.0.0 is release. Go to downloads page to find release tars.\n Upgrade JDK minimal JDK requirement to JDK8 Support profiling code level performance Don\u0026rsquo;t support SkyWalking v5 agent in-wire and out-wire protocol. V6 is required.  ","title":"Release Apache SkyWalking APM 7.0.0","url":"/events/release-apache-skywalking-apm-7-0-0/"},{"content":" 作者:吴晟,刘晗 原文地址  在本文中,我们详细介绍了代码级的性能剖析方法,以及我们在 Apache SkyWalking 中的实践。希望能够帮助大家在线定位系统性能短板,缓解系统压力。\n分布式链路追踪的局限性 在传统的监控系统中,我们如果想要得知系统中的业务是否正常,会采用进程监控、日志收集分析等方式来对系统进行监控。当机器或者服务出现问题时,则会触发告警及时通知负责人。通过这种方式,我们可以得知具体哪些服务出现了问题。但是这时我们并不能得知具体的错误原因出在了哪里,开发人员或者运维人员需要到日志系统里面查看错误日志,甚至需要到真实的业务服务器上查看执行情况来解决问题。\n如此一来,仅仅是发现问题的阶段,可能就会耗费相当长的时间;另外,发现问题但是并不能追溯到问题产生具体原因的情况,也常有发生。这样反反复复极其耗费时间和精力,为此我们便有了基于分布式追踪的 APM 系统。\n通过将业务系统接入分布式追踪中,我们就像是给程序增加了一个放大镜功能,可以清晰看到真实业务请求的整体链路,包括请求时间、请求路径,甚至是操作数据库的语句都可以看得一清二楚。通过这种方式,我们结合告警便可以快速追踪到真实用户请求的完整链路信息,并且这些数据信息完全是持久化的,可以随时进行查询,复盘错误的原因。\n然而随着我们对服务监控理解的加深,我们发现事情并没有那么简单。在分布式链路追踪中我们有这样的两个流派:代码埋点和字节码增强。无论使用哪种方式,底层逻辑一定都逃不过面向切面这个基础逻辑。因为只有这样才可以做到大面积的使用。这也就决定了它只能做到框架级别和 RPC 粒度的监控。这时我们可能依旧会遇到程序执行缓慢或者响应时间不稳定等情况,但无法具体查询到原因。这时候,大家很自然的会考虑到增加埋点粒度,比如对所有的 Spring Bean 方法、甚至主要的业务层方法都加上埋点。但是这种思路会遇到不小的挑战:\n第一,增加埋点时系统开销大,埋点覆盖不够全面。通过这种方式我们确实可以做到具体业务场景具体分析。但随着业务不断迭代上线,弊端也很明显:大量的埋点无疑会加大系统资源的开销,造成 CPU、内存使用率增加,更有可能拖慢整个链路的执行效率。虽然每个埋点消耗的性能很小,在微秒级别,但是因为数量的增加,甚至因为业务代码重用造成重复埋点或者循环使用,此时的性能开销已经无法忽略。\n第二,动态埋点作为一项埋点技术,和手动埋点的性能消耗上十分类似,只是减少的代码修改量,但是因为通用技术的特别,上一个挑战中提到的循环埋点和重复使用的场景甚至更为严重。比如选择所有方法或者特定包下的所有方法埋点,很可能造成系统性能彻底崩溃。\n第三,即使我们通过合理设计和埋点,解决了上述问题,但是 JDK 函数是广泛使用的,我们很难限制对 JDK API 的使用场景。对 JDK 过多方法、特别是非 RPC 方法的监控会造成系统的巨大延迟风险。而且有一些基础类型和底层工具类,是很难通过字节码进行增强的。当我们的 SDK 使用不当或者出现 bug 时,我们无法具体得知真实的错误原因。\n代码级性能剖析方法 方法介绍 基于以上问题,在系统性能监控方法上,我们提出了代码级性能剖析这种在线诊断方法。这种方法基于一个高级语言编程模型共性,即使再复杂的系统,再复杂的业务逻辑,都是基于线程去进行执行的,而且多数逻辑是在单个线程状态下执行的。\n代码级性能剖析就是利用方法栈快照,并对方法执行情况进行分析和汇总。并结合有限的分布式追踪 span 上下文,对代码执行速度进行估算。\n性能剖析激活时,会对指定线程周期性的进行线程栈快照,并将所有的快照进行汇总分析,如果两个连续的快照含有同样的方法栈,则说明此栈中的方法大概率在这个时间间隔内都处于执行状态。从而,通过这种连续快照的时间间隔累加成为估算的方法执行时间。时间估算方法如下图所示:\n在上图中,d0-d10 代表 10 次连续的内存栈快照,实际方法执行时间在 d3-d4 区间,结束时间在 d8-d9 之间。性能剖析无法告诉你方法的准确执行时间,但是他会估算出方法执行时间为 d4-d8 的 4 个快照采集间隔时间之和,这已经是非常的精确的时间估算了。\n而这个过程因为不涉及代码埋点,所以自然性能消耗是稳定和可控的,也无需担心是否被埋点,是否是 JDK 方法等问题。同时,由于上层已经在分布式追踪之下,性能剖析方法可以明确地确定分析开始和结束时间,减少不必要的性能开销。\n性能剖析可以很好的对线程的堆栈信息进行监控,主要有以下几点优势:\n 精确的问题定位,直接到代码方法和代码行; 无需反复的增删埋点,大大减少了人力开发成本; 不用承担过多埋点对目标系统和监控系统的压力和性能风险; 按需使用,平时对系统无消耗,使用时的消耗稳定可能。  SkyWalking 实践实例 我们首先在 Apache SkyWalking APM 中实现此技术方法,下面我们就以一个真实的例子来说明此方法的执行效果。\nfinal CountDownLatchcountDownLatch= new CountDownLatch(2); threadPool.submit(new Task1(countDownLatch)); threadPool.submit(new Task2(countDownLatch)); try { countDownLatch.await(500, TimeUnit.MILLISECONDS); } catch (InterruptedExceptione) { } 这是我们故意加入的问题代码,我们使用 CountDownLanth 设置了两个任务完成后方法执行结束,Task1 和 Task2 是两个执行时间不稳定的任务,所以主任务也会执行速度不稳定。但对于运维和监控团队来说,很难定位到这个方法片段。\n针对于这种情况,我们看看性能剖析会怎样直接定位此问题。\n上图所示的就是我们在进行链路追踪时所看到的真实执行情况,其中我们可以看到在 service/processWithThreadPool 执行速度缓慢,这正是我们植入问题代码的方法。此时在这个调用中没有后续链路了,所以并没有更细致的原因,我们也不打算去 review 代码,从而增加新埋点。这时,我们可以对 HelloService 进行性能剖析,并执行只剖析响应速度大于 500 毫秒的请求。\n注意,指定特定响应时间的剖析是保证剖析有效性的重要特性,如果方法在平均响应时间上已经出现问题,往往通过分布式链路可以快速定位,因为此时链路总时间长,新埋点带来的性能影响相对可控。但是方法性能抖动是不容易用新增埋点来解决的,而且往往只发生在生产环境。\n上图就是我们进行性能剖析后的真实结果图。从左到右分别表示:栈帧名称、该栈帧总计耗时(包含其下面所有自栈帧)、当前栈帧自身耗时和监控次数。我们可以在最后一行看到,线程卡在了 sun.misc.Unsafe.park 中了。如果你熟悉 Java 就可以知道此时进行了锁等待,我们继续按照树的结构向上推,便可以看到线程真正是卡在了 CountDownLatch.await 方法中。\n方法局限性 当然任何的方法都不是万能的,性能剖析也有一些局限性。\n第一, 对于高频反复执行的方法,如循环调用,可能会误报为缓慢方法。但这并不是大问题,因为如果反复执行的耗时较长,必然是系统需要关注的性能瓶颈。\n第二, 由于性能栈快照有一定的性能消耗,所以采集周期不宜过密,如 SkyWalking 实践中,不支持小于 10ms 的采集间隔。所以如果问题方法执行时间过小(比如在 10 毫秒内波动),此方法并不适用。我们也再此强调,方法论和工具的强大,始终不能代替程序员。\n","title":"在线代码级性能剖析,补全分布式追踪的最后一块“短板”","url":"/zh/2020-03-23-using-profiling-to-fix-the-blind-spot-of-distributed-tracing/"},{"content":"SkyWalking CLI 0.2.0 is released. Go to downloads page to find release tars.\n Support visualization of heat map Support top N entities, swctl metrics top 5 --name service_sla Support thermodynamic metrics, swctl metrics thermodynamic --name all_heatmap Support multiple linear metrics, swctl --display=graph --debug metrics multiple-linear --name all_percentile  ","title":"Release Apache SkyWalking CLI 0.2.0","url":"/events/release-apache-skywalking-cli-0-2-0/"},{"content":"SkyWalking Chart 1.1.0 is released. Go to downloads page to find release tars.\n Support SkyWalking 6.6.0 Support deploy Elasticsearch 7 The official helm repo was changed to the official Elasticsearch repo (https://helm.elastic.co/)  ","title":"Release Apache SkyWalking Chart 1.1.0 for SkyWalking 6.6.0","url":"/events/release-apache-skywalking-chart-1-1-0-for-skywalking-6-6-0/"},{"content":"Support tracing and collect metrics from Nginx server. Require SkyWalking APM 7.0+.\n","title":"SkyWalking Nginx LUA 0.1.0 release","url":"/events/skywalking-nginx-lua-0-1-0-release/"},{"content":"Based on his continuous contributions, Ming Wen (a.k.a moonming) has been voted as a new committer.\n","title":"Welcome Ming Wen as new committer","url":"/events/welcome-ming-wen-as-new-committer/"},{"content":"Based on his continuous contributions, Haochao Zhuang (a.k.a dmsolr) has been invited to join the PMC. Welcome aboard.\n","title":"Welcome Haochao Zhuang to join the PMC","url":"/events/welcome-haochao-zhuang-to-join-the-pmc/"},{"content":"Based on his continuous contributions, Zhusheng Xu (a.k.a aderm) has been voted as a new committer.\n","title":"Welcome Zhusheng Xu as new committer","url":"/events/welcome-zhusheng-xu-as-new-committer/"},{"content":"Based on his continuous contributions, Han Liu (a.k.a mrproliu) has been voted as a new committer.\n","title":"Welcome Han Liu as new committer","url":"/events/welcome-han-liu-as-new-committer/"},{"content":" Author: Wu Sheng, tetrate.io, SkyWalking original creator, SkyWalking V.P. GitHub, Twitter, Linkedin  The SkyWalking project provides distributed tracing, topology map analysis, service mesh telemetry analysis, metrics analysis and a super cool visualization targeting distributed systems in k8s or traditional VM deployments.\nThe project is widely used in Alibaba, Huawei, Tencent, DiDi, xiaomi, Pingan, China’s top 3 telecom companies (China Mobile, China telecom, China Unicom), airlines, banks and more. It has over 140 company users listed on our powered by page.\nToday, we welcome and celebrate reaching 200 code contributors on our main repo. We hereby mark this milestone as official today, : Jan. 20th 2020.\nAt this great moment, I would like to share SkyWalking’s 4-year open source journey.\nI wrote the first line on Nov. 1st, 2015, guiding people to understand a distributed system just as micro-services and distributed architecture were becoming popular. In the first 2 years, I never thought it would become such a big and active community. I didn’t even expect it would be an open source project. Initially, the goal was primarily to teach others about distributed tracing and analysis.\nIt was a typical open source project in obscurity in its first two years. But people still showed up, asked questions, and tried to improve the project. I got several invitations to share the project at local meetups.All these made me realize people really needed a good open source APM project.\nIn 2017, I decided to dedicate myself as much as possible to make the project successful, and it became my day job. To be honest, I had no clue about how to do that; at that time in China, it was rare to have this kind of job. So, I began to ask friends around me, “Do you want to collaborate on the open source APM with me?” Most people were busy and gave a clear NO, but two of them agreed to help: Xin Zhang and Yongsheng Peng. We built SkyWalking 3.x and shared the 3.2 release at GOPS Shanghai, China.\nIt became the first adoption version used in production\nCompared to today\u0026rsquo;s SkyWalking, it was a toy prototype, but it had the same tracing design, protocol and analysis method.\nThat year the contributor team was 15-20, and the project had obvious potential to expand. I began to consider bringing the project into a worldwide, top-level open source foundation. Thanks to our initial incubator mentors, Michael Semb Wever, William Jiang, and Luke Han, this really worked. At the end of 2017, SkyWalking joined the Apache Incubator, and kept following the Apache Way to build community. More contributors joined the community.\nWith more people spending time on the project collaborations, including codes, tests, blogs, conference talks, books and uses of the project, a chemical reaction happens. New developers begin to provide bug fixes, new feature requirements and new proposals. At the moment of graduation in spring 2019, the project had 100 contributors. Now, only 9 months later, it’s surged to 200 super quickly. They enhance the project and extend it to frontiers we never imaged: 5 popular language agents, service mesh adoption, CLI tool, super cool visualization. We are even moving on thread profiling, browser performance and Nginx tracing NOW.\nOver the whole 4+ years open source journey, we have had supports from leaders in the tracing open source community around the world, including Adrian Cole, William Jiang, Luke Han, Michael Semb Wever, Ben Sigelman, and Jonah Kowall. And we’ve had critical foundations' help, especially Apache Software Foundation and the Cloud Native Computing Foundation.\nOur contributors also have their support from their employers, including, to the best of my knowledge, Alibaba, Huawei, China Mobile, ke.com, DaoCloud, Lizhi.fm, Yonghui Supermarket, and dangdang.com. I also have support from my employers, tetrate.io, Huawei, and OneAPM.\nThanks to our 200+ contributors and the companies behind them. You make this magic happen.\n","title":"SkyWalking hits 200 contributors mark","url":"/blog/2020-01-20-celebrate-200th-contributor/"},{"content":"Based on his continuous contributions, Hongwei Zhai (a.k.a innerpeacez) has been invited to join the PMC. Welcome aboard.\n","title":"Welcome Hongwei Zhai to join the PMC","url":"/events/welcome-hongwei-zhai-to-join-the-pmc/"},{"content":"Apache APM 6.6.0 release. Go to downloads page to find release tars.\n Service Instance dependency detection are available. Support ElasticSearch 7 as a storage option. Reduce the register load.  ","title":"Release Apache SkyWalking APM 6.6.0","url":"/events/release-apache-skywalking-apm-6-6-0/"},{"content":"SkyWalking Chart 1.0.0 is released. Go to downloads page to find release tars.\n Deploy SkyWalking 6.5.0 by Chart. Elasticsearch deploy optional.  ","title":"Release Apache SkyWalking Chart 1.0.0 for SkyWalking 6.5.0","url":"/events/release-apache-skywalking-chart-1-0-0-for-skywalking-6-5-0/"},{"content":"SkyWalking CLI 0.1.0 is released. Go to downloads page to find release tars.\n Add command swctl service to list services Add command swctl instance and swctl search to list and search instances of service. Add command swctl endpoint to list endpoints of service. Add command swctl linear-metrics to query linear metrics and plot the metrics in Ascii Graph mode. Add command swctl single-metrics to query single-value metrics.  ","title":"Release Apache SkyWalking CLI 0.1.0","url":"/events/release-apache-skywalking-cli-0-1-0/"},{"content":"Based on his continuous contributions, Weiyi Liu (a.k.a wayilau) has been voted as a new committer.\n","title":"Welcome Weiyi Liu as new committer","url":"/events/welcome-weiyi-liu-as-new-committer/"},{"content":"Based on his contributions to the project, he has been accepted as SkyWalking committer. Welcome aboard.\n","title":"Welcome Lang Li as a new committer","url":"/events/welcome-lang-li-as-a-new-committer/"},{"content":"Based on her continuous contributions, Qiuxia Fan (a.k.a Fine0830) has been voted as a new committer.\n","title":"Welcome Qiuxia Fan as new committer","url":"/events/welcome-qiuxia-fan-as-new-committer/"},{"content":"6.5.0 release. Go to downloads page to find release tars.\n New metrics comparison view in UI. Dynamic Alert setting supported. JDK9-12 supported in backend.  ","title":"Release Apache SkyWalking APM 6.5.0","url":"/events/release-apache-skywalking-apm-6-5-0/"},{"content":"Based on his continuous contributions, Wei Zhang (a.k.a arugal) has been voted as a new committer.\n","title":"Welcome Wei Zhang as new committer","url":"/events/welcome-wei-zhang-as-new-committer/"},{"content":"PS:本文仅仅是在我的测试环境实验过,如果有问题,请自行优化调整\n前记:记得skywlking还是6.0版本的时候我就在试用,当时是skywalking基本在两三天左右就会监控数据完全查不出来,elasticsearch日志报错,由于当时也算是初用es,主要用来日志收集,并且时间有限,没有继续深入研究,最近空闲,更新到最新的6.5.0(开发版本)还是会出现同样的问题,下定决心解决下,于是有了本文的浅知拙见\n本次调优环境 skywalking: 6.5.0 elasticsearch:6.3.2(下文用es代替)\n调优过程   当然是百度了,百度后其实翻来翻去就找到一个相关的文章https://my.oschina.net/keking/blog/3025303 ,参考之。\n  调整skywalking的这两个参数试试 bulkActions: 4000 # Execute the bulk every 2000 requests  bulkSize: 60 # flush the bulk every 20mb 然后es还是继续挂,继续频繁的重启\n  继续看这个文章,发现了另外一篇https://www.easyice.cn/archives/207 ,继续参考之\n  这篇文章发现每一个字我都认识,看起来也能懂,但是对于es小白的我来说,着实不知道怎么调整这些参数,姑且先加到es的配置文件里边试试看吧,于是就加了,然后重启es的时候说发现index参数配置,自从5.0之后就不支持这样配置了,还给调了个es的接口去设置,但是设置失败(真够不错的),朝着这个思路去百度,百度到快放弃,后来就寻思,再试试看吧,(百度的结果是知道了index有静态参数和动态参数,动态的参数是可以随时设置,静态的只能创建或者关闭状态的索引才可以设置) 然鹅并不知道怎么关闭索引,继续百度,(怎么全特么百度,好吧不百度了,直接来干货)\n 关闭索引(我的skywalking索引命名空间是dry_trace) curl -XPOST \u0026quot;http://localhost:9200/dry_trace*/_close\u0026quot; 设置参数 curl -XPUT 'http://localhost:9200/dry_trace*/_settings?preserve_existing=true' -H 'Content-type:application/json' -d '{ \u0026quot;index.refresh_interval\u0026quot; : \u0026quot;10s\u0026quot;, \u0026quot;index.translog.durability\u0026quot; : \u0026quot;async\u0026quot;, \u0026quot;index.translog.flush_threshold_size\u0026quot; : \u0026quot;1024mb\u0026quot;, \u0026quot;index.translog.sync_interval\u0026quot; : \u0026quot;120s\u0026quot; }'  打开索引 curl -XPOST \u0026quot;http://localhost:9200/dry_trace*/_open\u0026quot;    还有一点,第四步的方式只适用于现有的索引设置,那么新的索引设置呢,总不能每天重复下第四步吧。当然不需要,来干货 首先登陆kinaba控制台找到开发工具 贴入以下代码\n   PUT /_template/dry_trace_tmp { \u0026quot;index_patterns\u0026quot;: \u0026quot;dry_trace*\u0026quot;, \u0026quot;order\u0026quot;: 1, \u0026quot;settings\u0026quot;: { \u0026quot;index\u0026quot;: { \u0026quot;refresh_interval\u0026quot;: \u0026quot;30s\u0026quot;, \u0026quot;translog\u0026quot;: { \u0026quot;flush_threshold_size\u0026quot;: \u0026quot;1GB\u0026quot;, \u0026quot;sync_interval\u0026quot;: \u0026quot;60s\u0026quot;, \u0026quot;durability\u0026quot;: \u0026quot;async\u0026quot; } } } } 截止目前为止运行一周,还未发现挂掉,一切看起来正常   完结\u0026mdash; 于 2019年11月\n","title":"SkyWalking 使用 ElasticSearch 存储的优化","url":"/zh/2019-11-07-skywalking-elasticsearch-storage-optimization/"},{"content":"Based on his continuous contributions, Haochao Zhuang (a.k.a dmsolr) has been voted as a new committer.\n","title":"Welcome Haochao Zhuang as new committer","url":"/events/welcome-haochao-zhuang-as-new-committer/"},{"content":" 作者:innerpeacez 原文地址  本文主要讲述的是如何使用 Helm Charts 将 SkyWalking 部署到 Kubernetes 集群中,相关文档可以参考skywalking-kubernetes 和 backend-k8s 文档 。\n目前推荐的四种方式:\n 使用 helm 2 提供的 helm serve 启动本地 helm repo 使用本地 chart 文件部署 使用 harbor 提供的 repo 功能 直接从官方 repo 进行部署  注意:目前 skywalking 的 chart 还没有提交到官方仓库,请先参照前三种方式进行部署\nHelm 2 提供的 helm serve 打包对应版本的 skywalking chart 1.配置 helm 环境,参考 Helm 环境配置 ,如果你要部署 helm2 相关 chart 可以直接配置 helm2 的相关环境\n2.克隆/下载ZIP skywalking-kubernetes 这个仓库,仓库关于chart的目录结构如下\n helm-chart\n helm2  6.0.0-GA 6.1.0   helm3  6.3.0 6.4.0     克隆/下载ZIP 完成后进入指定目录打包对应版本的chart\ncd skywalking-kubernetes/helm-chart/\u0026lt;helm-version\u0026gt;/\u0026lt;skywalking-version\u0026gt; 注意:helm-version 为对应的 helm 版本目录,skywalking-version 为对应的 skywalking 版本目录,下面以helm3 和 skywalking 6.3.0 为例\ncd skywalking-kubernetes/helm-chart/helm3/6.3.0 3.由于skywalking 依赖 elasticsearch 作为存储库,执行以下命令更新依赖,默认会从官方repo进行拉取\nhelm dep up skywalking  Hang tight while we grab the latest from your chart repositories\u0026hellip; \u0026hellip;Successfully got an update from the \u0026ldquo;stable\u0026rdquo; chart repository Update Complete. ⎈Happy Helming!⎈ Saving 1 charts Downloading elasticsearch from repo https://kubernetes-charts.storage.googleapis.com/ Deleting outdated charts\n 如果官方 repo 不存在,请先添加官方仓库\nhelm repo add stable https://kubernetes-charts.storage.googleapis.com  \u0026ldquo;stable\u0026rdquo; has been added to your repositories\n 4.打包 skywalking , 执行以下命令\nhelm package skywalking/  Successfully packaged chart and saved it to: C:\\code\\innerpeacez_github\\skywalking-kubernetes\\helm-chart\\helm3\\6.3.0\\skywalking-0.1.0.tgz\n 打包完成后会在当前目录的同级目录生成 .tgz 文件\n ls  skywalking/ skywalking-0.1.0.tgz\n 启动 helm serve 由于上文配置的 helm 为 helm3 ,但是 helm 3中移除了 helm serve 的相关命令,所以需要另外一个环境配置helm2 的相关环境,下载 helm 2.14.3 的二进制文件,配置基本上没有大的差别,不在赘述\n初始化 helm\nhelm init 将上文生成的 skywalking-0.1.0.tgz 文件复制到 helm 相关目录 /root/.helm/repository/local,启动 serve\nhelm serve --address \u0026lt;ip\u0026gt;:8879 --repo-path /root/.helm/repository/local 注意: ip 为要能够被上文配置 helm 3 环境的机器访问到\n可以访问一下看看服务 serve 是否启动成功\ncurl ip:8879 部署 skywalking 1.在helm3 环境中添加启动的本地 repo\nhelm repo add local http://\u0026lt;ip\u0026gt;:8879 2.查看 skywalking chart 是否存在于本地仓库中\nhelm search skywalking  NAME CHART VERSION\tAPP VERSION\tDESCRIPTION local/skywalking 0.1.0 6.3.0 Apache SkyWalking APM System\n 3.部署\nhelm -n test install skywalking local/skywalking 这样 skywalking 就部署到了 k8s 集群中的 test 命名空间了,至此本地安装skywalking 就完成了。\n本地文件部署 如果你不想存储到 chart 到仓库中也可以直接使用本地文件部署 skywalking,按照上面的步骤将skywalking chart 打包完成之后,直接使用以下命令进行部署\nhelm -n test install skywalking skywalking-0.1.0.tgz harbor 作为 repo 存储 charts harbor 目前已经提供了,charts repo 的能力,这样就可以将 docker 镜像和 chart 存储在一个仓库中了,方便维护,具体harbor 的部署方法参考 Harbor 作为存储仓库存储 chart\n官方 repo 部署 目前没有发布到官方 repo 中,后续发布完成后,只需要执行下面命令即可\nhelm install -n test stable/skywalking 总结 四种方式都可以进行部署,如果你想要自定义 chart ,需要使用上述两种本地方法及 harbor 存储的方式,以便你修改好 chart 之后进行部署.\n","title":"使用 chart 部署 SkyWalking","url":"/zh/2019-10-08-how-to-use-sw-chart/"},{"content":" Author: Wei Qiang GitHub  Background SkyWalking backend provides the alarm function, we can define some Alarm rules, call webhook after the rule is triggered. I share my implementation\nDemonstration SkyWalking alarm UI\ndingtalk message body\nIntroduction  install  go get -u github.com/weiqiang333/infra-skywalking-webhook cd $GOPATH/src/github.com/weiqiang333/infra-skywalking-webhook/ bash build/build.sh ./bin/infra-skywalking-webhook help  Configuration  main configs file:configs/production.ymldingtalk:p3:token... Example  ./bin/infra-skywalking-webhook --config configs/production.yml --address 0.0.0.0:8000  SkyWalking backend alarm settings  webhooks:- http://127.0.0.1:8000/dingtalkCollaboration Hope that we can improve together webhook\nSkyWalking alarm rules may add more metric names (eg priority name), we can send different channels by locating different levels of alerts (dingtalk / SMS / phone)\nThanks.\n","title":"SkyWalking alarm webhook sharing","url":"/blog/2019-09-25-alarm-webhook-share/"},{"content":"作者: SkyWalking committer,Kdump\n本文介绍申请Apache SkyWalking Committer流程, 流程包括以下步骤\n 与PMC成员表达想成为committer的意愿(主动/被动) PMC内部投票 PMC正式邮件邀请 填写Apache iCLA申请表 设置ApacheID和邮箱 设置GitHub加入Apache组织 GitHub其它一些不重要设置  前期过程  与PMC成员表达想成为committer的意愿(主动/被动) PMC内部投票  当你对项目的贡献活跃度足够高或足够多时, Skywalking项目的PMC(项目管理委员会)会找到你并询问你是否有意愿成为项目的Committer, 或者也可以主动联系项目的PMC表达自己的意向, 在此之后PMC们会进行内部讨论和投票并告知你是否可以进入下一个环节.这个过程可能需要一周. 如果PMC主动邀请你进行非正式的意愿咨询, 你可以选择接受或拒绝.\nPS:PMC会向你索要你的个人邮箱, 建议提供Gmail, 因为后期绑定Apache邮箱需要用到, 其它邮箱我不确定是否能绑定.\nPS:从Apache官方的流程来讲, 现有的PMC会在没有通知候选人的情况下先进行候选人投票, 但是Skywalking项目的PMC有可能更倾向于先得到候选人的意愿再进行投票.\n正式阶段   PMC正式邮件邀请\n 当你收到PMC正式的邀请邮件时, 恭喜你, 你已经通过了PMC的内部投票, 你需要用英文回答接受邀请或者拒绝邀请, 记住回复的时候一定要选择全部回复.    填写Apache iCLA申请表\n  在你收到的PMC邮件中, 有几个ASF官方链接需要你去浏览, 重点的内容是查看CLAs, 并填写Individual Contributor License Agreement, 你可以将icla.pdf文件下载到本地, 使用PDF工具填写里面所需的信息, 并打印出来签名(一定要手写签名, 否则会被要求重新签名), 再扫描(或手机拍照)成电子文档(需要回复PDF格式, 文件名建议重命名为你的名字-icla.pdf), 使用gpg对电子文档进行签名(参考[HOW-TO: SUBMITTING LICENSE AGREEMENTS AND GRANTS\n](http://www.apache.org/licenses/contributor-agreements.html#submitting)), Window可以使用GnuPG或者Gpg4win.\n  完成gpg签名后, 请将你签名用的公钥上送到pool.sks-keyservers.net服务器, 并在这个页面中验证你的公钥是否可以被搜索到, 搜索关键词可以是你秘钥中填写的名字或者邮箱地址.\n  gpg签名后, 会生成.pdf.asc的文件, 需要将你的你的名字-icla.pdf和你的名字-icla.pdf.asc以附件的方式一起发送到secretary@apache.org, 并抄送给private@skywalking.apache.org.\n    设置ApacheID和邮箱\n 大概5个工作日内, 你会收到一封来至于root@apache.org的邮件, 主题为Welcome to the Apache Software Foundation (ASF)!, 恭喜你, 你已经获得了ApacheID, 这时候你需要根据邮件内容的提示去设置你的ApacheID密码, 密码设置完成后, 需要在Apache Account Utility页面中重点设置Forwarding email address和Your GitHub Username两个信息.保存信息的时候需要你填写当前的ApacheID的密码. 现在进入Gmail, 选择右上角的齿轮-\u0026gt;设置-\u0026gt;账号和导入-\u0026gt;添加其他电子邮件地址-\u0026gt;参考Sending email from your apache.org email address给出的信息根据向导填写Apache邮箱.    设置GitHub加入Apache组织\n 进入Welcome to the GitBox Account Linking Utility!, 按照顺序将Apache Account和GitHub Account点绿, 想点绿MFA Status, 需要去GitHub开启2FA, 请参考配置双重身份验证完成2FA的功能. 等待1~2小时后登陆自己的GitHub的dashboard界面, 你应该会看到一条Apache组织邀请你加入的通知, 这个时候接受即可享有Skywalking相关GitHub项目权限了.    其它提示  GitHub其它一些不重要设置  在GitHub首页展示Apache组织的logo: 进入Apache GitHub组织-\u0026gt;People-\u0026gt;搜索自己的GitHubID-\u0026gt;将Private改成Public    ","title":"Apache SkyWalking Committer申请流程","url":"/zh/2019-09-12-apache-skywalking-committer-apply-process/"},{"content":"Based on his contributions to the skywalking ui project, Weijie Zou (a.k.a Kdump) has been accepted as a new committer.\n","title":"Welcome Weijie Zou as a new committer","url":"/events/welcome-weijie-zou-as-a-new-committer/"},{"content":"6.4.0 release. Go to downloads page to find release tars.\n Highly recommend to upgrade due to Pxx metrics calculation bug. Make agent working in JDK9+ Module system.  Read changelog for the details.\n","title":"Release Apache SkyWalking APM 6.4.0","url":"/events/release-apache-skywalking-apm-6-4-0/"},{"content":"  作者:innerpeacez 原文地址   如果你还不知道 Skywalking agent 是什么,请点击这里查看 Probe 或者这里查看快速了解agent,由于我这边大部分都是 JAVA 服务,所以下文以 Java 中使用 agent 为例,提供了以下三种方式供你选择\n三种方式:  使用官方提供的基础镜像 将 agent 包构建到已经存在的基础镜像中 sidecar 模式挂载 agent  1.使用官方提供的基础镜像 查看官方 docker hub 提供的基础镜像,只需要在你构建服务镜像是 From 这个镜像即可,直接集成到 Jenkins 中可以更加方便\n2.将 agent 包构建到已经存在的基础镜像中 提供这种方式的原因是:官方的镜像属于精简镜像,并且是 openjdk ,可能很多命令没有,需要自己二次安装,以下是我构建的过程\n  下载 oracle jdk\n这个现在 oracle 有点恶心了,wget 各种不行,然后我放弃了,直接从官网下载了\n  下载 skywalking 官方发行包,并解压(以6.3.0为例)\nwget https://www.apache.org/dyn/closer.cgi/skywalking/6.3.0/apache-skywalking-apm-6.3.0.tar.gz \u0026amp;\u0026amp; tar -zxvf apache-skywalking-apm-6.3.0.tar.gz   通过以下 dockerfile 构建基础镜像\nFROMalpine:3.8  ENV LANG=C.UTF-8 RUN set -eux \u0026amp;\u0026amp; \\  apk update \u0026amp;\u0026amp; apk upgrade \u0026amp;\u0026amp; \\  wget -q -O /etc/apk/keys/sgerrand.rsa.pub https://alpine-pkgs.sgerrand.com/sgerrand.rsa.pub \u0026amp;\u0026amp;\\  wget https://github.com/sgerrand/alpine-pkg-glibc/releases/download/2.30-r0/glibc-2.30-r0.apk \u0026amp;\u0026amp;\\  apk --no-cache add unzip vim curl git bash ca-certificates glibc-2.30-r0.apk file \u0026amp;\u0026amp; \\  rm -rf /var/lib/apk/* \u0026amp;\u0026amp;\\  mkdir -p /usr/skywalking/agent/ # A streamlined jreADD jdk1.8.0_221/ /usr/java/jdk1.8.0_221/ADD apache-skywalking-apm-bin/agent/ /usr/skywalking/agent/ # set envENV JAVA_HOME /usr/java/jdk1.8.0_221ENV PATH ${PATH}:${JAVA_HOME}/bin # run container with base path:/WORKDIR/ CMD bash  这里由于 alpine 是基于mini lib 的,但是 java 需要 glibc ,所以加入了 glibc 相关的东西,最后构建出的镜像大小在 490M 左右,因为加了挺多命令还是有点大,仅供参考,同样构建出的镜像也可以直接配置到 jenkins 中。\n3.sidecar 模式挂载 agent 如果你们的服务是部署在 Kubernetes 中,你还可以使用这种方式来使用 Skywalking Agent ,这种方式的好处在与不需要修改原来的基础镜像,也不用重新构建新的服务镜像,而是以sidecar 模式,通过共享volume的方式将agent 所需的相关文件挂载到已经存在的服务镜像中\n构建 skywalking agent sidecar 镜像的方法\n  下载skywalking 官方发行包,并解压\nwget https://www.apache.org/dyn/closer.cgi/skywalking/6.3.0/apache-skywalking-apm-6.3.0.tar.gz \u0026amp;\u0026amp; tar -zxvf apache-skywalking-apm-6.3.0.tar.gz   通过以下 dockerfile 进行构建\nFROMbusybox:latest  ENV LANG=C.UTF-8 RUN set -eux \u0026amp;\u0026amp; mkdir -p /usr/skywalking/agent/ ADD apache-skywalking-apm-bin/agent/ /usr/skywalking/agent/ WORKDIR/  注意:这里我没有在dockerfile中下载skywalking 发行包是因为保证构建出的 sidecar 镜像保持最小,bosybox 只有700 k左右,加上 agent 最后大小小于20M\n如何使用 sidecar 呢?\napiVersion:apps/v1kind:Deploymentmetadata:labels:name:demo-swname:demo-swspec:replicas:1selector:matchLabels:name:demo-swtemplate:metadata:labels:name:demo-swspec:initContainers:- image:innerpeacez/sw-agent-sidecar:latestname:sw-agent-sidecarimagePullPolicy:IfNotPresentcommand:[\u0026#39;sh\u0026#39;]args:[\u0026#39;-c\u0026#39;,\u0026#39;mkdir -p /skywalking/agent \u0026amp;\u0026amp; cp -r /usr/skywalking/agent/* /skywalking/agent\u0026#39;]volumeMounts:- mountPath:/skywalking/agentname:sw-agentcontainers:- image:nginx:1.7.9name:nginxvolumeMounts:- mountPath:/usr/skywalking/agentname:sw-agentports:- containerPort:80volumes:- name:sw-agentemptyDir:{}以上是挂载 sidecar 的 deployment.yaml 文件,以nginx 作为服务为例,主要是通过共享 volume 的方式挂载 agent,首先 initContainers 通过 sw-agent 卷挂载了 sw-agent-sidecar 中的 /skywalking/agent ,并且将上面构建好的镜像中的 agent 目录 cp 到了 /skywalking/agent 目录,完成之后 nginx 启动时也挂载了 sw-agent 卷,并将其挂载到了容器的 /usr/skywalking/agent 目录,这样就完成了共享过程。\n总结 这样除去 ServiceMesh 以外,我能想到的方式就介绍完了,希望可以帮助到你。最后给 Skywalking 一个 Star 吧,国人的骄傲。\n","title":"如何使用 SkyWalking Agent ?","url":"/zh/2019-08-30-how-to-use-skywalking-agent/"},{"content":"Based on his continuous contributions, Yuguang Zhao (a.k.a zhaoyuguang) has been invited to join the PMC. Welcome aboard.\n","title":"Welcome Yuguang Zhao to join the PMC","url":"/events/welcome-yuguang-zhao-to-join-the-pmc/"},{"content":"Based on his continuous contributions, Zhenxu Ke (a.k.a kezhenxu94) has been invited to join the PMC. Welcome aboard.\n","title":"Welcome Zhenxu Ke to join the PMC","url":"/events/welcome-zhenxu-ke-to-join-the-pmc/"},{"content":"Based on his contributions to the skywalking PHP project, Yanlong He (a.k.a heyanlong has been accepted as a new committer.\n","title":"Welcome Yanlong He as a new committer","url":"/events/welcome-yanlong-he-as-a-new-committer/"},{"content":"6.3.0 release. Go to downloads page to find release tars.\n Improve ElasticSearch storage implementation performance again. OAP backend re-install w/o agent reboot required.  Read changelog for the details.\n","title":"Release Apache SkyWalking APM 6.3.0","url":"/events/release-apache-skywalking-apm-6-3-0/"},{"content":"6.2.0 release. Go to downloads page to find release tars. ElasticSearch storage implementation changed, high reduce payload to ElasticSearch cluster.\nRead changelog for the details.\n","title":"Release Apache SkyWalking APM 6.2.0","url":"/events/release-apache-skywalking-apm-6-2-0/"},{"content":"Based on his continuous contributions, Zhenxu Ke (a.k.a kezhenxu94) has been voted as a new committer.\n","title":"Welcome Zhenxu Ke as a new committer","url":"/events/welcome-zhenxu-ke-as-a-new-committer/"},{"content":"6.1.0 release. Go to downloads page to find release tars. This is the first top level project version.\nKey updates\n RocketBot UI OAP performance improvement  ","title":"Release Apache SkyWalking APM 6.1.0","url":"/events/release-apache-skywalking-apm-6-1-0/"},{"content":"Apache SkyWalking PMC accept the RocketBot UI contributions. After IP clearance, it will be released in SkyWalking 6.1 soon.\n","title":"RocketBot UI has been accepted as SkyWalking primary UI","url":"/events/rocketbot-ui-has-been-accepted-as-skywalking-primary-ui/"},{"content":"Apache board approved SkyWalking graduated as TLP at April 17th 2019.\n","title":"SkyWalking graduated as Apache Top Level Project","url":"/events/skywalking-graduated-as-apache-top-level-project/"},{"content":"Based on his continuous contributions, he has been accepted as a new committer.\n","title":"Welcome Yuguang Zhao as a new committer","url":"/events/welcome-yuguang-zhao-as-a-new-committer/"},{"content":"APM和调用链跟踪 随着企业经营规模的扩大,以及对内快速诊断效率和对外SLA(服务品质协议,service-level agreement)的追求,对于业务系统的掌控度的要求越来越高,主要体现在:\n 对于第三方依赖的监控,实时/准实时了解第三方的健康状况/服务品质,降低第三方依赖对于自身系统的扰动(服务降级、故障转移) 对于容器的监控,实时/准实时的了解应用部署环境(CPU、内存、进程、线程、网络、带宽)情况,以便快速扩容/缩容、流量控制、业务迁移 业务方对于自己的调用情况,方便作容量规划,同时对于突发的请求也能进行异常告警和应急准备 自己业务的健康、性能监控,实时/准实时的了解自身的业务运行情况,排查业务瓶颈,快速诊断和定位异常,增加对自己业务的掌控力  同时,对于企业来说,能够更精确的了解资源的使用情况,对于成本核算和控制也有非常大的裨益。\n在这种情况下,一般都会引入APM(Application Performance Management \u0026amp; Monitoring)系统,通过各种探针采集数据,收集关键指标,同时搭配数据呈现和监控告警,能够解决上述的大部分问题。\n然而随着RPC框架、微服务、云计算、大数据的发展,同时业务的规模和深度相比过往也都增加了很多,一次业务可能横跨多个模块/服务/容器,依赖的中间件也越来越多,其中任何一个节点出现异常,都可能导致业务出现波动或者异常,这就导致服务质量监控和异常诊断/定位变得异常复杂,于是催生了新的业务监控模式:调用链跟踪\n 能够分布式的抓取多个节点的业务记录,并且通过统一的业务id(traceId,messageId,requestId等)将一次业务在各个节点的记录串联起来,方便排查业务的瓶颈或者异常点  产品对比 APM和调用链跟踪均不是新诞生事务,很多公司已经有了大量的实践,不过开源的并且能够开箱即用的产品并不多,这里主要选取了Pinpoint,Skywalking,CAT来进行对比(当然也有其他的例如Zipkin,Jaeger等产品,不过总体来说不如前面选取的3个完成度高),了解一下APM和调用链跟踪在开源方面的发展状态。\nPinpoint Pinpoint是一个比较早并且成熟度也非常高的APM+调用链监控的项目,在全世界范围内均有用户使用,支持Java和PHP的探针,数据容器为HBase,其界面参考:\nSkywalking Skywalking是一个新晋的项目,最近一两年发展非常迅猛,本身支持OpenTracing规范,优秀的设计提供了良好的扩展性,支持Java、PHP、.Net、NodeJs探针,数据容器为ElasticSearch,其界面参考:\nCAT CAT是由美团开源的一个APM项目,也历经了多年的迭代升级,拥有大量的企业级用户,对于监控和报警整合比较紧密,支持Java、C/C++、.Net、Python、Go、NodeJs,不过CAT目前主要通过侵入性的方式接入,数据容器包括HDFS(存储原始数据)和mysql(二次统计),其界面参考:\n横向对比 上面只是做了一个简介,那这三个项目各自有什么特色或者优势/劣势呢(三者的主要产品均针对Java,这里也主要针对Java的特性)?\n Pinpoint  优势  大企业/长时间验证,稳定性和完成度高 探针收集的数据粒度比较细 HBase的数据密度较大,支持PB级别下的数据查询 代码设计考虑的扩展性较弱,二次开发难度较大(探针为插件式,开发比较简单) 拥有完整的APM和调用链跟踪功能   劣势  代码针对性强,扩展较难 容器为HBase,查询功能较弱(主要为时间维度) 探针的额外消耗较多(探针采集粒度细,大概10%~20%) 项目趋于成熟,而扩展难度较大,目前社区活跃度偏低,基本只进行探针的增加或者升级 缺少自定义指标的设计     Skywalking  优势  数据容器为ES,查询支持的维度较多并且扩展潜力大 项目设计采用微内核+插件,易读性和扩展性都比较强 主要的研发人员为华人并且均比较活跃,能够进行更加直接的沟通 拥有完整的APM和调用链跟踪功能   劣势  项目发展非常快,稳定性有待验证 ES数据密度较小,在PB级别可能会有性能压力 缺少自定义指标的设计     CAT  优势  大企业/长时间验证,稳定性和完成度高 采用手动数据埋点而不是探针,数据采集的灵活性更强 支持自定义指标 代码设计考虑的扩展性较弱,并且数据结构复杂,二次开发难度较大 拥有完善的监控告警机制   劣势  代码针对性强,扩展较难 需要手动接入埋点,代码侵入性强 APM功能完善,但是不支持调用链跟踪      基本组件 如果分别去看Pinpoint/Skywalking/CAT的整体设计,我们会发现三者更像是一个规范的三种实现,虽然各自有不同的机制和特性,但是从模块划分和功能基本是一致的:\n当然也有一些微小的区别:\n Pinpoint基本没有aggregator,同时query和alarm集成在了web中,只有agent,collector和web Skywalking则是把collector、aggregator、alarm集成为OAP(Observability Analysis Platform),并且可以通过集群部署,不同的实例可以分别承担collector或者aggregator+alarm的角色 CAT则和Skywalking类似,把collector、aggregator、alarm集成为cat-consumer,而由于CAT有比较复杂的配置管理,所以query和配置一起集成为cat-home 当然最大的区别是Pinpoint和Skywalking均是通过javaagent做字节码的扩展,通过切面编程采集数据,类似于探针,而CAT的agent则更像是一个工具集,用于手动埋点  Skywalking 前戏这么多,终于开始进入主题,介绍今天的主角:Skywalking,不过通过之前的铺垫,我们基本都知道了Skywalking期望解决的问题以及总体的结构,下面我们则从细节来看Skywalking是怎么一步一步实现的。\n模块构成 首先,Skywalking进行了精准的领域模型划分:\n整个系统分为三部分:\n agent:采集tracing(调用链数据)和metric(指标)信息并上报 OAP:收集tracing和metric信息通过analysis core模块将数据放入持久化容器中(ES,H2(内存数据库),mysql等等),并进行二次统计和监控告警 webapp:前后端分离,前端负责呈现,并将查询请求封装为graphQL提交给后端,后端通过ribbon做负载均衡转发给OAP集群,再将查询结果渲染展示  而整个Skywalking(包括agent和OAP,而webapp后端业务非常简单主要就是认证和请求转发)均通过微内核+插件式的模式进行编码,代码结构和扩展性均非常强,具体设计可以参考: 从Skywalking看如何设计一个微核+插件式扩展的高扩展框架 ,Spring Cloud Gateway的GatewayFilterFactory的扩展也是通过这种plugin define的方式来实现的。\nSkywalking也提供了其他的一些特性:\n 配置重载:支持通过jvm参数覆写默认配置,支持动态配置管理 集群管理:这个主要体现在OAP,通过集群部署分担数据上报的流量压力和二次计算的计算压力,同时集群也可以通过配置切换角色,分别面向数据采集(collector)和计算(aggregator,alarm),需要注意的是agent目前不支持多collector负载均衡,而是随机从集群中选择一个实例进行数据上报 支持k8s和mesh 支持数据容器的扩展,例如官方主推是ES,通过扩展接口,也可以实现插件去支持其他的数据容器 支持数据上报receiver的扩展,例如目前主要是支持gRPC接受agent的上报,但是也可以实现插件支持其他类型的数据上报(官方默认实现了对Zipkin,telemetry和envoy的支持) 支持客户端采样和服务端采样,不过服务端采样最有意义 官方制定了一个数据查询脚本规范:OAL(Observability Analysis Language),语法类似Linq,以简化数据查询扩展的工作量 支持监控预警,通过OAL获取数据指标和阈值进行对比来触发告警,支持webhook扩展告警方式,支持统计周期的自定义,以及告警静默防止重复告警  数据容器 由于Skywalking并没有自己定制的数据容器或者使用多种数据容器增加复杂度,而是主要使用ElasticSearch(当然开源的基本上都是这样来保持简洁,例如Pinpoint也只使用了HBase),所以数据容器的特性以及自己数据结构基本上就限制了业务的上限,以ES为例:\n ES查询功能异常强大,在数据筛选方面碾压其他所有容器,在数据筛选潜力巨大(Skywalking默认的查询维度就比使用HBase的Pinpoint强很多) 支持sharding分片和replicas数据备份,在高可用/高性能/大数据支持都非常好 支持批量插入,高并发下的插入性能大大增强 数据密度低,源于ES会提前构建大量的索引来优化搜索查询,这是查询功能强大和性能好的代价,但是链路跟踪往往有非常多的上下文需要记录,所以Skywalking把这些上下文二进制化然后通过Base64编码放入data_binary字段并且将字段标记为not_analyzed来避免进行预处理建立查询索引  总体来说,Skywalking尽量使用ES在大数据和查询方面的优势,同时尽量减少ES数据密度低的劣势带来的影响,从目前来看,ES在调用链跟踪方面是不二的数据容器,而在数据指标方面,ES也能中规中矩的完成业务,虽然和时序数据库相比要弱一些,但在PB级以下的数据支持也不会有太大问题。\n数据结构 如果说数据容器决定了上限,那么数据结构则决定了实际到达的高度。Skywalking的数据结构主要为:\n 数据维度(ES索引为skywalking_*_inventory)  service:服务 instance:实例 endpoint:接口 network_adress:外部依赖   数据内容  原始数据  调用链跟踪数据(调用链的trace信息,ES索引为skywalking_segment,Skywalking主要的数据消耗都在这里) 指标(主要是jvm或者envoy的运行时指标,例如ES索引skywalking_instance_jvm_cpu)   二次统计指标  指标(按维度/时间二次统计出来的例如pxx、sla等指标,例如ES索引skywalking_database_access_p75_month) 数据库慢查询记录(数据库索引:skywalking_top_n_database_statement)   关联关系(维度/指标之间的关联关系,ES索引为skywalking_*_relation_*) 特别记录  告警信息(ES索引为skywalking_alarm_record) 并发控制(ES索引为skywalking_register_lock)      其中数量占比最大的就是调用链跟踪数据和各种指标,而这些数据均可以通过OAP设置过期时间,以降低历史数据的对磁盘占用和查询效率的影响。\n调用链跟踪数据 作为Skywalking的核心数据,调用链跟踪数据(skywalking_segment)基本上奠定了整个系统的基础,而如果要详细的了解调用链跟踪的话,就不得不提到openTracing。\nopenTracing基本上是目前开源调用链跟踪系统的一个事实标准,它制定了调用链跟踪的基本流程和基本的数据结构,同时也提供了各个语言的实现。如果用一张图来表现openTracing,则是如下:\n其中:\n SpanContext:一个类似于MDC(Slfj)或者ThreadLocal的组件,负责整个调用链数据采集过程中的上下文保持和传递 Trace:一次调用的完整记录  Span:一次调用中的某个节点/步骤,类似于一层堆栈信息,Trace是由多个Span组成,Span和Span之间也有父子或者并列的关系来标志这个节点/步骤在整个调用中的位置  Tag:节点/步骤中的关键信息 Log:节点/步骤中的详细记录,例如异常时的异常堆栈   Baggage:和SpanContext一样并不属于数据结构而是一种机制,主要用于跨Span或者跨实例的上下文传递,Baggage的数据更多是用于运行时,而不会进行持久化    以一个Trace为例:\n首先是外部请求调用A,然后A依次同步调用了B和C,而B被调用时会去同步调用D,C被调用的时候会依次同步调用E和F,F被调用的时候会通过异步调用G,G则会异步调用H,最终完成一次调用。\n上图是通过Span之间的依赖关系来表现一个Trace,而在时间线上,则可以有如下的表达:\n当然,如果是同步调用的话,父Span的时间占用是包括子Span的时间消耗的。\n而落地到Skywalking中,我们以一条skywalking_segment的记录为例:\n{ \u0026quot;trace_id\u0026quot;: \u0026quot;52.70.15530767312125341\u0026quot;, \u0026quot;endpoint_name\u0026quot;: \u0026quot;Mysql/JDBI/Connection/commit\u0026quot;, \u0026quot;latency\u0026quot;: 0, \u0026quot;end_time\u0026quot;: 1553076731212, \u0026quot;endpoint_id\u0026quot;: 96142, \u0026quot;service_instance_id\u0026quot;: 52, \u0026quot;version\u0026quot;: 2, \u0026quot;start_time\u0026quot;: 1553076731212, \u0026quot;data_binary\u0026quot;: \u0026quot;CgwKCjRGnPvp5eikyxsSXhD///////////8BGMz62NSZLSDM+tjUmS0wju8FQChQAVgBYCF6DgoHZGIudHlwZRIDc3FsehcKC2RiLmluc3RhbmNlEghyaXNrZGF0YXoOCgxkYi5zdGF0ZW1lbnQYAiA0\u0026quot;, \u0026quot;service_id\u0026quot;: 2, \u0026quot;time_bucket\u0026quot;: 20190320181211, \u0026quot;is_error\u0026quot;: 0, \u0026quot;segment_id\u0026quot;: \u0026quot;52.70.15530767312125340\u0026quot; } 其中:\n trace_id:本次调用的唯一id,通过snowflake模式生成 endpoint_name:被调用的接口 latency:耗时 end_time:结束时间戳 endpoint_id:被调用的接口的唯一id service_instance_id:被调用的实例的唯一id version:本数据结构的版本号 start_time:开始时间戳 data_binary:里面保存了本次调用的所有Span的数据,序列化并用Base64编码,不会进行分析和用于查询 service_id:服务的唯一id time_bucket:调用所处的时段 is_error:是否失败 segment_id:数据本身的唯一id,类似于主键,通过snowflake模式生成  这里可以看到,目前Skywalking虽然相较于Pinpoint来说查询的维度要多一些,但是也很有限,而且除了endPoint,并没有和业务有关联的字段,只能通过时间/服务/实例/接口/成功标志/耗时来进行非业务相关的查询,如果后续要增强业务相关的搜索查询的话,应该还需要增加一些用于保存动态内容(如messageId,orderId等业务关键字)的字段用于快速定位。\n指标 指标数据相对于Tracing则要简单得多了,一般来说就是指标标志、时间戳、指标值,而Skywalking中的指标有两种:一种是采集的原始指标值,例如jvm的各种运行时指标(例如cpu消耗、内存结构、GC信息等);一种是各种二次统计指标(例如tp性能指标、SLA等,当然也有为了便于查询的更高时间维度的指标,例如基于分钟、小时、天、周、月)\n例如以下是索引skywalking_endpoint_cpm_hour中的一条记录,用于标志一个小时内某个接口的cpm指标:\n{ \u0026quot;total\u0026quot;: 8900, \u0026quot;service_id\u0026quot;: 5, \u0026quot;time_bucket\u0026quot;: 2019031816, \u0026quot;service_instance_id\u0026quot;: 5, \u0026quot;entity_id\u0026quot;: \u0026quot;7\u0026quot;, \u0026quot;value\u0026quot;: 148 } 各个字段的释义如下:\n total:一分钟内的调用总量 service_id:所属服务的唯一id time_bucket:统计的时段 service_instance_id:所属实例的唯一id entity_id:接口(endpoint)的唯一id value:cpm的指标值(cpm=call per minute,即total/60)  工程实现 Skywalking的工程实现堪比Dubbo,框架设计和代码质量都达到非常高的水准,以dubbo为例,即使2012年发布的老版本放到当今,其设计和编码看起来也依然赏心悦目,设计简洁但是覆盖了所有的核心需求,同时又具备非常强的扩展性,二次开发非常简单,然而却又不会像Spring那样过度封装(当然Spring作为一个更加高度通用的框架,更高的封装也是有必要的)导致代码阅读异常困难。\nagent agent(apm-sniffer)是Skywalking的Java探针实现,主要负责:\n 采集应用实例的jvm指标 通过切向编程进行数据埋点,采集调用链数据 通过RPC将采集的数据上报  当然,agent还实现了客户端采样,不过在APM监控系统里进行客户端数据采样都是没有灵魂的,所以这里就不再赘述了。\n首先,agent通过 org.apache.skywalking.apm.agent.core.boot.BootService 实现了整体的插件化,agent启动会加载所有的BootService实现,并通过 ServiceManager 来管理这些插件的生命周期,采集jvm指标、gRPC连接管理、调用链数据维护、数据上报OAP这些服务均是通过这种方式扩展。\n然后,agent还通过bytebuddy以javaagent的模式,通过字节码增强的机制来构造AOP环境,再提供PluginDefine的规范方便探针的开发,最终实现非侵入性的数据埋点,采集调用链数据。\n最终落地到代码上则异常清晰:\n//通过bytebuddy的AgentBuilder构造javaagent增强classLoader new AgentBuilder.Default(byteBuddy) .ignore( //忽略这些包的内容,不进行增强 nameStartsWith(\u0026quot;net.bytebuddy.\u0026quot;) .or(nameStartsWith(\u0026quot;org.slf4j.\u0026quot;)) .or(nameStartsWith(\u0026quot;org.apache.logging.\u0026quot;)) .or(nameStartsWith(\u0026quot;org.groovy.\u0026quot;)) .or(nameContains(\u0026quot;javassist\u0026quot;)) .or(nameContains(\u0026quot;.asm.\u0026quot;)) .or(nameStartsWith(\u0026quot;sun.reflect\u0026quot;)) .or(allSkyWalkingAgentExcludeToolkit()) .or(ElementMatchers.\u0026lt;TypeDescription\u0026gt;isSynthetic())) //通过pluginFinder加载所有的探针扩展,并获取所有可以增强的class .type(pluginFinder.buildMatch()) //按照pluginFinder的实现,去改变字节码增强类 .transform(new Transformer(pluginFinder)) //通过listener订阅增强的操作记录,方便调试 .with(new Listener()) .installOn(instrumentation); try { //加载所有的service实现并启动 ServiceManager.INSTANCE.boot(); } catch (Exception e) { logger.error(e, \u0026quot;Skywalking agent boot failure.\u0026quot;); } agent也提供了非常简单的扩展实现机制,以增强一个普通类的方法为例,首先你需要定义一个切向点:\npublic interface InstanceMethodsInterceptPoint { //定义切向方法的适配器,符合适配器的class将被增强 ElementMatcher\u0026lt;MethodDescription\u0026gt; getMethodsMatcher(); //增强的具体实现类,classReference String getMethodsInterceptor(); //是否重写参数 boolean isOverrideArgs(); } 然后你还需要一个增强的实现类:\npublic interface InstanceMethodsAroundInterceptor { //方法真正执行前执行 void beforeMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, MethodInterceptResult result) throws Throwable; //方法真正执行后执行 Object afterMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Object ret) throws Throwable; //当异常发生时执行 void handleMethodException(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Throwable t); } 一般在执行前和执行后进行数据埋点,就可以采集到想要的数据,当然实际编程要稍微复杂一点,不过官方也实现了对应的abstract类和数据埋点工具类,所以探针的二次开发在Skywalking这个级别确实是非常简单,只需要处理好资源占用和并发问题即可。真正的难点是要对需要增强的对象非常了解,熟悉其运作机制,才能找准切向点,既要所有的流程都需要经过这个点,又可以抓取到期望抓取的上下文信息。同时,多版本的适配和测试也是非常大的工作量,官方虽然提供witness的机制(通过验证某个class是否存在来验证版本),但是作为影响全局的探针,开发和测试都是需要慎之又慎的。\nOAP 同agent类似,OAP作为Skywalking最核心的模块,也实现了自己的扩展机制,不过在这里叫做Module,具体可以参考library-module,在module的机制下,Skywalking实现了自己必须核心组件:\n core:整个OAP核心业务(remoting、cluster、storage、analysis、query、alarm)的规范和接口 cluster:集群管理的具体实现 storage:数据容器的具体实现 query:为前端提供的查询接口的具体实现 receiver:接收探针上报数据的接收器的具体实现 alarm:监控告警的具体实现  以及一个可选组件:\n telemetry:用于监控OAP自身的健康状况  而前面提到的OAP的高扩展性则体现在核心业务的规范均定义在了core中,如果有需要自己扩展的,只需要自己单独做自己的实现,而不需要做侵入式的改动,最典型的示例则是官方支持的storage,不仅支持单机demo的内存数据库H2和经典的ES,连目前开源的Tidb都可以接入。\n初步实践 对于Skywalking的实践我们经历了三个阶段\n 线下测试 第一次生产环境小规模测试 第二次生产环境小规模测试+全量接入  线下测试 环境 由于是线下测试,所以我们直接使用物理机(E5-2680v2 x2, 128G)虚拟了一个集群(实际性能相比云服务器应该偏好一些):\n ES:单机实例,v6.5,4C8G,jvm内存分配为4G OAP:单机实例,v6.1.0-SNAPSHOT,4C8G,jvm内存分配为4G 应用:基于SpringCloud的4个测试实例,调用关系为A-\u0026gt;B-\u0026gt;C-\u0026gt;D,QPS为200  测试结果 拓扑图:\nOAP机器监控:\nES机器监控:\n服务监控面板:\n其中一个调用链记录:\n可以看出,Skywalking非常依赖CPU(不论是OAP还是ES),同时对于网络IO也有一定的要求,至于ES的文件IO在可接受范围内,毕竟确实有大量内容需要持久化。测试结果也基本达到预期要求,调用链和各个指标的监控都工作良好。\n第一次生产环境测试 在线下测试之后,我们再进行了一次基于实际业务针对探针的测试,测试没有发现探针的异常问题,也没有影响业务的正常运作,同时对于jvm实例影响也不是很大,CPU大概提高了5%左右,并不很明显。在这个基础上我们选择了线上的一台服务器,进行了我们第一次生产环境的测试。\n环境  ES:基于现有的一个ES集群,node x 3,v6.0 OAP:2C4G x 2,v6.1.0-SNAPSHOT,jvm内存分配为2G 应用:两个jvm实例  测试时间:03.11-03.16\n测试结果 业务机器负载情况:\n从最敏感的CPU指标上来看,增加agent并没有导致可见的CPU使用率的变化,而其他的内存、网络IO、连接数也基本没有变化。\nOAP负载情况:\n可以看到机器的CPU和网络均有较大的波动,但是也都没有真正打爆服务器,但是我们的实例却经常出现两种日志:\n One trace segment has been abandoned, cause by buffer is full.\n  Collector traceSegment service doesn\u0026rsquo;t response in xxx seconds.\n 通过阅读源码发现:\n agent和OAP只会使用一个长连接阻塞式的交换数据,如果某次数据交换没有得到响应,则会阻塞后续的上报流程(一般长连接的RPC请求会在数据传输期间互相阻塞,但是不会在等待期间互相阻塞,当然这也是源于agent并没有并发上报的机制),所以一旦OAP在接收数据的过程中发生阻塞,就会导致agent本地的缓冲区满,最终只能将监控数据直接丢弃防止内存泄漏  而导致OAP没有及时响应的一方面是OAP本身性能不够(OAP需要承担大量的二次统计工作,通过Jstack统计,长期有超过几十个线程处于RUNNABLE状态,据吴晟描述目前OAP都是高性能模式,后续将会提供配置来支持低性能模式),另一方面可能是ES批量插入效率不够,因此我们修改了OAP的批量插入参数来增加插入频率,降低单次插入数量:\n bulkActions: ${SW_STORAGE_ES_BULK_ACTIONS:2000 -\u0026gt; 20} # Execute the bulk every 2000 requests bulkSize: ${SW_STORAGE_ES_BULK_SIZE:20 -\u0026gt; 2} # flush the bulk every 20mb flushInterval: ${SW_STORAGE_ES_FLUSH_INTERVAL:10 -\u0026gt; 2} # flush the bulk every 10 seconds whatever the number of requests  虽然 service doesn\u0026rsquo;t response 出现的频率明显降低,但是依然还是会偶尔出现,而每一次出现都会伴随大量的 trace segment has been abandoned ,推测OAP和ES可能都存在性能瓶颈(应该进行更进一步的诊断确定问题,不过当时直接和吴晟沟通,确认确实OAP非常消耗CPU资源,考虑到当时部署只是2C,并且还部署有其他业务,就没有进一步的测试)。\n同时,在频繁的数据丢弃过程中,也偶发了一个bug:当agent上报数据超时并且大量丢弃数据之后,即使后续恢复正常也能通过日志看到数据正常上报,在查询界面查询的时候,会查不到这个实例上报的数据,不过在重启OAP和agent之后,之前上报的数据又能查询到,这个也和吴晟沟通过,没有其他的案例,后续想重现却也一直没有成功。\n而同时还发现两个更加严重的问题:\n 我们使用的是线上已经部署好的ES集群,其版本只有6.0,而新的Skywalking使用了6.3的查询特性,导致很多查询执行报错,只能使用最简单的查询 我们的kafka集群版本也非常古老,不支持v1或者更高版本的header,而kafka的探针强依赖header来传输上下文信息,导致kafka客户端直接报错影响业务,所以也立即移除了kafka的探针  在这一次测试中,我们基本确认了agent对于应用的影响,同时也发现了一些我们和Skywalking的一些问题,留待后续测试确认。\n第二次生产环境测试 为了排除性能和ES版本的影响,测试Skywalking本身的可用性,参考吴晟的建议(这也是在最初技术选型的时候没有选择Pinpoint和CAT的部分原因:一方面Skywalking的功能符合我们的要求,更重要的是有更加直接和效率的和项目维护者直接沟通的渠道),所以这一次我们新申请了ES集群和OAP机器。\n环境  ES:腾讯云托管ES集群,4C16G x 3 SSD,v6.4 OAP:16C32G,standalone,jvm分配24G 应用:2~8个jvm实例  测试时间:03.18-至今\n测试结果 OAP负载情况:\nES集群负载:\n测试过程中,我们先接入了一台机器上的两个实例,完全没有遇到一测中的延迟或者数据丢弃的问题,三天后我们又接入了另外两台机器的4个实例,这之后两天我们又接入了另外两台机器的2个实例。依然没有遇到一测中的延迟或者数据丢弃的问题。\n而ES负载的监控也基本验证了一测延迟的问题,Skywalking由于较高的并发插入,对于ES的性能压力很大(批量插入时需要针对每条数据分析并且构建查询索引),大概率是ES批量插入性能不够导致延迟,考虑到我们仅仅接入了8个实例,日均segment插入量大概5000万条(即日均5000万次独立调用),如果想支持更大规模的监控,对于ES容量规划势必要留够足够的冗余。同时OAP和ES集群的网络开销也不容忽视,在支撑大规模的监控时,需要集群并且receiver和aggregattor分离部署来分担网络IO的压力。\n而在磁盘容量占用上,我们设置的原始数据7天过期,目前刚刚开始滚动过期,目前segment索引已经累计了314757240条记录总计158G数据,当然我们目前异常记录较少,如果异常记录较多的话,其磁盘开销将会急剧增加(span中会记录异常堆栈信息)。而由于选择的SSD,磁盘的写入和查询性能都很高,即使只有3个节点,也完全没有任何压力。\n而在新版本的ES集群下,Skywalking的所有查询功能都变得可用,和我们之前自己的单独编写的异常指标监控都能完美对照。当然我们也遇到一个问题:Skywalking仅采集了调用记录,但是对于调用过程中的过程数据,除了异常堆栈其他均没有采集,导致真的出现异常也缺少充足的上下文信息还原现场,于是我们扩展了Skywalking的两个探针(我们项目目前重度依赖的组件):OkHttp(增加对requestBody和responseBody的采集)和SpringMVC(增加了对requestBody的采集),目前工作正常,如果进一步的增加其他的探针,采集到足够的数据,那么我们基本可以脱离ELK了。\n而OAP方面,CPU和内存的消耗远远低于预期的估计,CPU占用率一直较低,而分配的24G内存也仅使用了10+G,完全可以支持更大规模的接入量,不过在网络IO方面可能存在一定的风险,推测应该8C16G的容器就足以支持十万CPM级别的数据接入。\n当然我们在查询也遇到了一些瓶颈,最大的问题就是无法精确的命中某一条调用记录,就如前面的分析,因为segment的数据结构问题,无法进行面向业务的查询(例如messageId、requestId、orderId等),所以如果想精确匹配某一次调用请求,需要通过各个维度的条件约束慢慢缩小范围最后定位。\nSkywalking展望 通过上述对Skywalking的剖析和实践,Skywalking确实是一个优秀的APM+调用链跟踪监控系统,能够覆盖大部分使用场景,让研发和运维能够更加实时/准实时的了解线上服务的运行情况。当然Skywailking也不是尽善尽美,例如下面就是个人觉得目前可见的不满足我们期望的:\n 数据准实时通过gRPC上报,本地缓存的瓶颈(当然官方主要是为了简化模型,减少依赖,否则Skywalking还依赖ELK就玩得有点大了)  缓存队列的长度,过长占据内存,过短容易buffer满丢弃数据 优雅停机同时又不丢失缓存   数据上报需要在起点上报,链路回传的时候需要携带SPAN及子SPAN的信息,当链路较长或者SPAN保存的信息较多时,会额外消耗一定的带宽 skywalking更多是一个APM系统而不是分布式调用链跟踪系统  在整个链路的探针上均缺少输入输出的抓取 在调用链的筛查上并没用进行增强,并且体现在数据结构的设计,例如TAG信息均保存在SPAN信息中,而SPAN信息均被BASE64编码作为数据保存,无法检索,最终trace的筛查只能通过时间/traceId/service/endPoint/state进行非业务相关的搜索   skywalking缺少对三方接口依赖的指标,这个对于系统稳定往往非常重要  而作为一个初级的使用者,个人觉得我们可以使用有限的人力在以下方向进行扩展:\n 增加receiver:整合ELK,通过日志采集采集数据,降低异构系统的采集开发成本 优化数据结构,提供基于业务关键数据的查询接口 优化探针,采集更多的业务数据,争取代替传统的ELK日志简单查询,绝大部分异常诊断和定位均可以通过Skywalking即可完成 增加业务指标监控的模式,能够自定义业务指标(目前官方已经在实现 Metric Exporter )  ","title":"SkyWalking调研与初步实践","url":"/zh/2019-03-29-introduction-of-skywalking-and-simple-practice/"},{"content":"前言 首先描述下问题的背景,博主有个习惯,每天上下班的时候看下skywalking的trace页面的error情况。但是某天突然发现生产环境skywalking页面没有任何数据了,页面也没有显示任何的异常,有点慌,我们线上虽然没有全面铺开对接skywalking,但是也有十多个应用。看了应用agent端日志后,其实也不用太担心,对应用毫无影响。大概情况就是这样,但是问题还是要解决,下面就开始排查skywalking不可用的问题。\n使用到的工具arthas Arthas是阿里巴巴开源的一款在线诊断java应用程序的工具,是greys工具的升级版本,深受开发者喜爱。当你遇到以下类似问题而束手无策时,Arthas可以帮助你解决:\n 这个类从哪个 jar 包加载的?为什么会报各种类相关的 Exception? 我改的代码为什么没有执行到?难道是我没 commit?分支搞错了? 遇到问题无法在线上 debug,难道只能通过加日志再重新发布吗? 线上遇到某个用户的数据处理有问题,但线上同样无法 debug,线下无法重现! 是否有一个全局视角来查看系统的运行状况? 有什么办法可以监控到JVM的实时运行状态? Arthas采用命令行交互模式,同时提供丰富的 Tab 自动补全功能,进一步方便进行问题的定位和诊断。  项目地址:https://github.com/alibaba/arthas\n先定位问题一 查看skywalking-oap-server.log的日志,发现会有一条异常疯狂的在输出,异常详情如下:\n2019-03-01 09:12:11,578 - org.apache.skywalking.oap.server.core.register.worker.RegisterPersistentWorker -3264081149 [DataCarrier.IndicatorPersistentWorker.endpoint_inventory.Consumser.0.Thread] ERROR [] - Validation Failed: 1: id is too long, must be no longer than 512 bytes but was: 684; org.elasticsearch.action.ActionRequestValidationException: Validation Failed: 1: id is too long, must be no longer than 512 bytes but was: 684; at org.elasticsearch.action.ValidateActions.addValidationError(ValidateActions.java:26) ~[elasticsearch-6.3.2.jar:6.3.2] at org.elasticsearch.action.index.IndexRequest.validate(IndexRequest.java:183) ~[elasticsearch-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestHighLevelClient.performRequest(RestHighLevelClient.java:515) ~[elasticsearch-rest-high-level-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestHighLevelClient.performRequestAndParseEntity(RestHighLevelClient.java:508) ~[elasticsearch-rest-high-level-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestHighLevelClient.index(RestHighLevelClient.java:348) ~[elasticsearch-rest-high-level-client-6.3.2.jar:6.3.2] at org.apache.skywalking.oap.server.library.client.elasticsearch.ElasticSearchClient.forceInsert(ElasticSearchClient.java:141) ~[library-client-6.0.0-alpha.jar:6.0.0-alpha] at org.apache.skywalking.oap.server.storage.plugin.elasticsearch.base.RegisterEsDAO.forceInsert(RegisterEsDAO.java:66) ~[storage-elasticsearch-plugin-6.0.0-alpha.jar:6.0.0-alpha] at org.apache.skywalking.oap.server.core.register.worker.RegisterPersistentWorker.lambda$onWork$0(RegisterPersistentWorker.java:83) ~[server-core-6.0.0-alpha.jar:6.0.0-alpha] at java.util.HashMap$Values.forEach(HashMap.java:981) [?:1.8.0_201] at org.apache.skywalking.oap.server.core.register.worker.RegisterPersistentWorker.onWork(RegisterPersistentWorker.java:74) [server-core-6.0.0-alpha.jar:6.0.0-alpha] at org.apache.skywalking.oap.server.core.register.worker.RegisterPersistentWorker.access$100(RegisterPersistentWorker.java:35) [server-core-6.0.0-alpha.jar:6.0.0-alpha] at org.apache.skywalking.oap.server.core.register.worker.RegisterPersistentWorker$PersistentConsumer.consume(RegisterPersistentWorker.java:120) [server-core-6.0.0-alpha.jar:6.0.0-alpha] at org.apache.skywalking.apm.commons.datacarrier.consumer.ConsumerThread.consume(ConsumerThread.java:101) [apm-datacarrier-6.0.0-alpha.jar:6.0.0-alpha] at org.apache.skywalking.apm.commons.datacarrier.consumer.ConsumerThread.run(ConsumerThread.java:68) [apm-datacarrier-6.0.0-alpha.jar:6.0.0-alpha] 2019-03-01 09:12:11,627 - org.apache.skywalking.oap.server.core.register.worker.RegisterPersistentWorker -3264081198 [DataCarrier.IndicatorPersistentWorker.endpoint_inventory.Consumser.0.Thread] ERROR [] - Validation Failed: 1: id is too long, must be no longer than 512 bytes but was: 684; org.elasticsearch.action.ActionRequestValidationException: Validation Failed: 1: id is too long, must be no longer than 512 bytes but was: 684; at org.elasticsearch.action.ValidateActions.addValidationError(ValidateActions.java:26) ~[elasticsearch-6.3.2.jar:6.3.2] at org.elasticsearch.action.index.IndexRequest.validate(IndexRequest.java:183) ~[elasticsearch-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestHighLevelClient.performRequest(RestHighLevelClient.java:515) ~[elasticsearch-rest-high-level-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestHighLevelClient.performRequestAndParseEntity(RestHighLevelClient.java:508) ~[elasticsearch-rest-high-level-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestHighLevelClient.index(RestHighLevelClient.java:348) ~[elasticsearch-rest-high-level-client-6.3.2.jar:6.3.2] at org.apache.skywalking.oap.server.library.client.elasticsearch.ElasticSearchClient.forceInsert(ElasticSearchClient.java:141) ~[library-client-6.0.0-alpha.jar:6.0.0-alpha] at org.apache.skywalking.oap.server.storage.plugin.elasticsearch.base.RegisterEsDAO.forceInsert(RegisterEsDAO.java:66) ~[storage-elasticsearch-plugin-6.0.0-alpha.jar:6.0.0-alpha] at org.apache.skywalking.oap.server.core.register.worker.RegisterPersistentWorker.lambda$onWork$0(RegisterPersistentWorker.java:83) ~[server-core-6.0.0-alpha.jar:6.0.0-alpha] at java.util.HashMap$Values.forEach(HashMap.java:981) [?:1.8.0_201] at org.apache.skywalking.oap.server.core.register.worker.RegisterPersistentWorker.onWork(RegisterPersistentWorker.java:74) [server-core-6.0.0-alpha.jar:6.0.0-alpha] at org.apache.skywalking.oap.server.core.register.worker.RegisterPersistentWorker.access$100(RegisterPersistentWorker.java:35) [server-core-6.0.0-alpha.jar:6.0.0-alpha] at org.apache.skywalking.oap.server.core.register.worker.RegisterPersistentWorker$PersistentConsumer.consume(RegisterPersistentWorker.java:120) [server-core-6.0.0-alpha.jar:6.0.0-alpha] at org.apache.skywalking.apm.commons.datacarrier.consumer.ConsumerThread.consume(ConsumerThread.java:101) [apm-datacarrier-6.0.0-alpha.jar:6.0.0-alpha] at org.apache.skywalking.apm.commons.datacarrier.consumer.ConsumerThread.run(ConsumerThread.java:68) [apm-datacarrier-6.0.0-alpha.jar:6.0.0-alpha] 可以看到,上面的异常输出的时间节点,以这种频率在疯狂的刷新。通过异常message,得知到是因为skywalking在写elasticsearch时,索引的id太长了。下面是elasticsearch的源码:\nif (id != null \u0026amp;\u0026amp; id.getBytes(StandardCharsets.UTF_8).length \u0026gt; 512) { validationException = addValidationError(\u0026#34;id is too long, must be no longer than 512 bytes but was: \u0026#34; + id.getBytes(StandardCharsets.UTF_8).length, validationException); } 具体可见:elasticsearch/action/index/IndexRequest.java#L240\n问题一: 通过日志,初步定位是哪个系统的url太长,skywalking在注册url数据时触发elasticsearch针对索引id校验的异常,而skywalking注册失败后会不断的重试,所以才有了上面日志不断刷的现象。\n问题解决: elasticsearch client在写es前通过硬编码的方式写死了索引id的长度不能超过512字节大小。也就是我们不能通过从ES侧找解决方案了。回到异常的message,只能看到提示id太长,并没有写明id具体是什么,这个异常提示其实是不合格的,博主觉得应该把id的具体内容抛出来,问题就简单了。因为异常没有明确提示,系统又比较多,不能十多个系统依次关闭重启来验证到底是哪个系统的哪个url有问题。这个时候Arthas就派上用场了,在不重启应用不开启debug模式下,查看实例中的属性对象。下面通过Arthas找到具体的url。\n从异常中得知,org.elasticsearch.action.index.IndexRequest这个类的validate方法触发的,这个方法是没有入参的,校验的id属性其实是对象本身的属性,那么我们使用Arthas的watch指令来看下这个实例id属性。先介绍下watch的用法:\n功能说明 让你能方便的观察到指定方法的调用情况。能观察到的范围为:返回值、抛出异常、入参,通过编写 \u0008OGNL 表达式进行对应变量的查看。\n参数说明 watch 的参数比较多,主要是因为它能在 4 个不同的场景观察对象\n   参数名称 参数说明     class-pattern 类名表达式匹配   method-pattern 方法名表达式匹配   express 观察表达式   condition-express 条件表达式   [b] 在方法调用之前观察   [e] 在方法异常之后观察   [s] 在方法返回之后观察   [f] 在方法结束之后(正常返回和异常返回)观察   [E] 开启正则表达式匹配,默认为通配符匹配   [x:] 指定输出结果的属性遍历深度,默认为 1    从上面的用法说明结合异常信息,我们得到了如下的指令脚本:\nwatch org.elasticsearch.action.index.IndexRequest validate \u0026ldquo;target\u0026rdquo;\n执行后,就看到了我们希望了解到的内容,如:\n索引id的具体内容看到后,就好办了。我们暂时把定位到的这个应用启动脚本中的的skywalking agent移除后(计划后面重新设计下接口)重启了下系统验证下。果然疯狂输出的日志停住了,但是问题并没完全解决,skywalking页面上的数据还是没有恢复。\n定位问题二 skywalking数据存储使用了elasticsearch,页面没有数据,很有可能是elasticsearch出问题了。查看elasticsearch日志后,发现elasticsearch正在疯狂的GC,日志如:\n: 139939K-\u0026gt;3479K(153344K), 0.0285655 secs] 473293K-\u0026gt;336991K(5225856K), 0.0286918 secs] [Times: user=0.05 sys=0.00, real=0.03 secs] 2019-02-28T20:05:38.276+0800: 3216940.387: Total time for which application threads were stopped: 0.0301495 seconds, Stopping threads took: 0.0001549 seconds 2019-02-28T20:05:38.535+0800: 3216940.646: [GC (Allocation Failure) 2019-02-28T20:05:38.535+0800: 3216940.646: [ParNew Desired survivor size 8716288 bytes, new threshold 6 (max 6) - age 1: 1220136 bytes, 1220136 total - age 2: 158496 bytes, 1378632 total - age 3: 88200 bytes, 1466832 total - age 4: 46240 bytes, 1513072 total - age 5: 126584 bytes, 1639656 total - age 6: 159224 bytes, 1798880 total : 139799K-\u0026gt;3295K(153344K), 0.0261667 secs] 473311K-\u0026gt;336837K(5225856K), 0.0263158 secs] [Times: user=0.06 sys=0.00, real=0.03 secs] 2019-02-28T20:05:38.562+0800: 3216940.673: Total time for which application threads were stopped: 0.0276971 seconds, Stopping threads took: 0.0001030 seconds 2019-02-28T20:05:38.901+0800: 3216941.012: [GC (Allocation Failure) 2019-02-28T20:05:38.901+0800: 3216941.012: [ParNew Desired survivor size 8716288 bytes, new threshold 6 (max 6) 问题二: 查询后得知,elasticsearch的内存配置偏大了,GC时间太长,导致elasticsearch脱离服务了。elasticsearch所在主机的内存是8G的实际内存7.6G,刚开始配置了5G的堆内存大小,可能Full GC的时候耗时太久了。查询elasticsearch官方文档后,得到如下的jvm优化建议:\n 将最小堆大小(Xms)和最大堆大小(Xmx)设置为彼此相等。 Elasticsearch可用的堆越多,它可用于缓存的内存就越多。但请注意,过多的堆可能会使您陷入长时间的垃圾收集暂停。 设置Xmx为不超过物理RAM的50%,以确保有足够的物理RAM用于内核文件系统缓存。 不要设置Xmx为JVM用于压缩对象指针(压缩oops)的截止值之上; 确切的截止值变化但接近32 GB。  详情见:https://www.elastic.co/guide/en/elasticsearch/reference/6.5/heap-size.html\n问题解决: 根据Xmx不超过物理RAM的50%上面的jvm优化建议。后面将Xms和Xmx都设置成了3G。然后先停掉skywalking(由于skywalking中会缓存部分数据,如果直接先停ES,会报索引找不到的类似异常,这个大部分skywalking用户应该有遇到过),清空skywalking缓存目录下的内容,如:\n在重启elasticsearch,接着启动skywalking后页面终于恢复了\n结语 整个问题排查到解决大概花了半天时间,幸好一点也不影响线上应用的使用,这个要得益于skywalking的设计,不然就是大灾难了。然后要感谢下Arthas的技术团队,写了这么好用的一款产品并且开源了,如果没有Arthas,这个问题真的不好定位,甚至一度想到了换掉elasticsearch,采用mysql来解决索引id过长的问题。Arthas真的是线上找问题的利器,博主在Arthas刚面世的时候就关注了,并且一直在公司推广使用,在这里在硬推一波。\n作者简介: 陈凯玲,2016年5月加入凯京科技。曾任职高级研发和项目经理,现任凯京科技研发中心架构\u0026amp;运维部负责人。pmp项目管理认证,阿里云MVP。热爱开源,先后开源过多个热门项目。热爱分享技术点滴,独立博客KL博客(http://www.kailing.pub)博主。\n","title":"SkyWalking线上问题排查定位","url":"/zh/2019-03-01-skywalking-troubleshoot/"},{"content":" 作者:王振飞, 写于:2019-02-24 说明:此文是个人所写,版本归属作者,代表个人观点,仅供参考,不代表skywalking官方观点。 说明:本次对比基于skywalking-6.0.0-GA和Pinpoint-1.8.2(截止2019-02-19最新版本)。另外,我们这次技术选型直接否定了Zipkin,其最大原因是它对代码有侵入性,CAT也是一样。这是我们所完全无法接受的。\n 这应该是目前最优秀的两款开源APM产品了,而且两款产品都通过字节码注入的方式,实现了对代码完全无任何侵入,他们的对比信息如下:\nOAP说明: skywalking6.x才有OAP这个概念,skywalking5.x叫collector。\n接下来,对每个PK项进行深入分析和对比。更多精彩和首发内容请关注公众号:【阿飞的博客】。\n社区比较\n这一点上面skywalking肯定完胜。一方面,skywalking已经进入apache孵化,社区相当活跃。而且项目发起人是中国人,我们能够进入官方群(Apache SkyWalking交流群:392443393)和项目发起人吴晟零距离沟通,很多问题能第一时间得到大家的帮助(玩过开源的都知道,这个价值有多大)。 而Pinpoint是韩国人开发的,免不了有沟通障碍。至于github上最近一年的commit频率,skywalking和Pinpoint旗鼓相当,都是接近20的水平: 所以,社区方面,skywalking更胜一筹。\n支持语言比较 Pinpoint只支持Java和PHP,而skywalking支持5种语言:Java, C#, PHP, Node.js, Go。如果公司的服务涉及到多个开发语言,那么skywalking会是你更好的选择。并且,如果你要实现自己的探针(比如python语言),skywalking的二次开发成本也比Pinpoint更低。\n 说明:Github上有开发者为Pinpoint贡献了对Node.js的支持,请戳链接:https://github.com/peaksnail/pinpoint-node-agent。但是已经停止维护,几年没更新了!\n 所以,支持语言方面,skywalking更胜一筹。\n协议比较 SkyWalking支持gRPC和http,不过建议使用gRPC,skywalking6.x版本已经不提供http方式(但是还会保留接收5.x的数据),以后会考虑删除。 而Pinpoint使用的是thrift协议。 协议本身没有谁好谁坏。\n存储比较(重要) 笔者认为,存储是skywalking和Pinpoint最大的差异所在,因为底层存储决定了上层功能。\nPinpoint只支持HBase,且扩展代价较大。这就意味着,如果选择Pinpoint,还要有能力hold住一套HBase集群(daocloud从Pinpoint切换到skywalking就是因为HBase的维护代价有点大)。在这方面,skywalking支持的存储就多很多,这样的话,技术选型时可以根据团队技术特点选择合适的存储,而且还可以自行扩展(不过生产环境上应该大部分是以es存储为主)。\nPinpoint只支持HBase的另一个缺陷就是,HBase本身查询能力有限(HBase只能支持三种方式查询:RowKey精确查找,SCAN范围查找,全表扫描)限制了Pinpoint的查询能力,所以其支持的查询一定是在时间的基础上(Pinpoint通过鼠标圈定一个时间范围后查看这个范围内的Trace信息)。而skywalking可以多个维度任意组合查询,例如:时间范围,服务名,Trace状态,请求路径,TraceId等。\n另外,Pinpoint和skywalking都支持TTL,即历史数据保留策略。skywalking是在OAP模块的application.yml中配置从而指定保留时间。而Pinpoint是通过HBase的ttl功能实现,通过Pinpoint提供的hbase脚本https://github.com/naver/pinpoint/blob/master/hbase/scripts/hbase-create.hbase可以看到:ApplicationTraceIndex配置了TTL =\u0026gt; 5184000,SqlMetaData_Ver2配合了TTL =\u0026gt; 15552000,单位是秒。\n 说明:es并不是完全碾压HBase,es和HBase没有绝对的好和坏。es强在检索能力,存储能力偏弱(千亿以下,es还是完全有能力hold的住的)。HBase强在存储能力,检索能力偏弱。如果搜集的日志量非常庞大,那么es存储就比较吃力。当然,没有蹩脚的中间件,只有蹩脚的程序员,无论是es还是HBase,调优才是最关键的。同样的,如果对检索能力有一定的要求,那么HBase肯定满足不了你。所以,又到了根据你的业务和需求决定的时刻了,trade-off真是无所不在。\n UI比较 Pinpoint的UI确实比skywalking稍微好些,尤其是服务的拓扑图展示。不过daocloud根据Pinpoint的风格为skywalking定制了一款UI。请戳链接:https://github.com/TinyAllen/rocketbot,项目介绍是:rocketbot: A UI for Skywalking。截图如下所示; 所以,只比较原生UI的话,Pinpoint更胜一筹。\n扩展性比较 Pinpoint好像设计之初就没有过多考虑扩展性,无论是底层的存储,还是自定义探针实现等。而skywalking核心设计目标之一就是Pluggable,即可插拔。\n以存储为例,pinpoint完全没有考虑扩展性,而skywalking如果要自定义实现一套存储,只需要定义一个类实现接口org.apache.skywalking.oap.server.library.module.ModuleProvider,然后实现一些DAO即可。至于Pinpoint则完全没有考虑过扩展底层存储。\n再以实现一个自己的探针为例(比如我要实现python语言的探针),Pinpoint选择thrift作为数据传输协议标准,而且为了节省数据传输大小,在传递常量的时候也尽量使用数据参考字典,传递一个数字而不是直接传递字符串等等。这些优化也增加了系统的复杂度:包括使用 Thrift 接口的难度、UDP 数据传输的问题、以及数据常量字典的注册问题等等。Pinpoint发展这么年才支持Java和PHP,可见一斑。而skywalking的数据接口就标准很多,并且支持OpenTracing协议,除了官方支持Java以外,C#、PHP和Node.js的支持都是由社区开发并维护。\n还有后面会提到的告警,skywalking的可扩展性也要远好于Pinpoint。\n最后,Pinpoint和skywalking都支持插件开发,Pinpoint插件开发参考:http://naver.github.io/pinpoint/1.8.2/plugindevguide.html。skywalking插件开发参考:https://github.com/apache/incubator-skywalking/blob/master/docs/en/guides/Java-Plugin-Development-Guide.md。\n所以,扩展性方面skywalking更胜一筹。\n告警比较 Pinpoint和skywalking都支持自定义告警规则。\n但是恼人的是,Pinpoint如果要配置告警规则,还需要安装MySQL(配置告警时的用户,用户组信息以及告警规则都持久化保存在MySQL中),这就导致Pinpoint的维护成本又高了一些,既要维护HBase又要维护MySQL。\nPinpoint支持的告警规则有:SLOW COUNT|RATE, ERROR COUNT|RATE, TOTAL COUNT, SLOW COUNT|RATE TO CALLEE, ERROR COUNT|RATE TO CALLEE, ERROR RATE TO CALLEE, HEAP USAGE RATE, JVM CPU USAGE RATE, DATASOURCE CONNECTION USAGE RATE。\nPinpoint每3分钟周期性检查过去5分钟的数据,如果有符合规则的告警,就会发送sms/email给用户组下的所有用户。需要说明的是,实现发送sms/email的逻辑需要自己实现,Pinpoint只提供了接口com.navercorp.pinpoint.web.alarm.AlarmMessageSender。并且Pinpoint发现告警持续时,会递增发送sms/email的时间间隔 3min -\u0026gt; 6min -\u0026gt; 12min -\u0026gt; 24min,防止sms/email狂刷。\n Pinpoint告警参考:http://naver.github.io/pinpoint/1.8.2/alarm.html\n skywalking配置告警不需要引入任何其他存储。skywalking在config/alarm-settings.xml中可以配置告警规则,告警规则支持自定义。\nskywalking支持的告警规则(配置项中的名称是indicator-name)有:service_resp_time, service_sla, service_cpm, service_p99, service_p95, service_p90, service_p75, service_p50, service_instance_sla, service_instance_resp_time, service_instance_cpm, endpoint_cpm, endpoint_avg, endpoint_sla, endpoint_p99, endpoint_p95, endpoint_p90, endpoint_p75, endpoint_p50。\nSkywalking通过HttpClient的方式远程调用在配置项webhooks中定义的告警通知服务地址。skywalking也支持silence-period配置,假设在TN这个时间点触发了告警,那么TN -\u0026gt; TN+period 这段时间内不会再重复发送该告警。\n skywalking告警参考:https://github.com/apache/incubator-skywalking/blob/master/docs/en/setup/backend/backend-alarm.md。目前只支持official_analysis.oal脚本中Service, Service Instance, Endpoint scope的metric,其他scope的metric需要等待后续扩展。\n Pinpoint和skywalking都支持常用的告警规则配置,但是skywalking采用webhooks的方式就灵活很多:短信通知,邮件通知,微信通知都是可以支持的。而Pinpoint只能sms/email通知,并且还需要引入MySQL存储,增加了整个系统复杂度。所以,告警方面,skywalking更胜一筹。\nJVM监控 skywalking支持监控:Heap, Non-Heap, GC(YGC和FGC)。 Pinpoint能够监控的指标主要有:Heap, Non-Heap, FGC, DirectBufferMemory, MappedBufferMemory,但是没有YGC。另外,Pinpoint还支持多个指标同一时间点查看的功能。如下图所示:\n所以,对JVM的监控方面,Pinpoint更胜一筹。\n服务监控 包括操作系统,和部署的服务实例的监控。 Pinpoint支持的维度有:CPU使用率,Open File Descriptor,数据源,活动线程数,RT,TPS。 skywalking支持的维度有:CPU使用率,SLA,RT,CPM(Call Per Minutes)。 所以,这方面两者旗鼓相当,没有明显的差距。\n跟踪粒度比较 Pinpoint在这方面做的非常好,跟踪粒度非常细。如下图所示,是Pinpoint对某个接口的trace信息: 而同一个接口skywalking的trace信息如下图所示:  备注: 此截图是skywalking加载了插件apm-spring-annotation-plugin-6.0.0-GA.jar(这个插件允许跟踪加了@Bean, @Service, @Component and @Repository注解的spring context中的bean的方法)。\n 通过对比发现,在跟踪粒度方面,Pinpoint更胜一筹。\n过滤追踪 Pinpoint和skywalking都可以实现,而且配置的表达式都是基于ant风格。 Pinpoint在Web UI上配置 filter wizard 即可自定义过滤追踪。 skywalking通过加载apm-trace-ignore-plugin插件就能自定义过滤跟踪,skywalking这种方式更灵活,比如一台高配服务器上有若干个服务,在共用的agent配置文件apm-trace-ignore-plugin.config中可以配置通用的过滤规则,然后通过-D的方式为每个服务配置个性化过滤。\n所以,在过滤追踪方面,skywalking更胜一筹。\n性能损耗 由于Pinpoint采集信息太过详细,所以,它对性能的损耗最大。而skywalking默认策略比较保守,对性能损耗很小。 有网友做过压力测试,对比如下:\n 图片来源于:https://juejin.im/post/5a7a9e0af265da4e914b46f1\n 所以,在性能损耗方面,skywalking更胜一筹。\n发布包比较 skywalking与时俱进,全系标配jar包,部署只需要执行start.sh脚本即可。而Pinpoint的collector和web还是war包,部署时依赖web容器(比如Tomcat)。拜托,都9012年了。\n所以,在发布包方面,skywalking更胜一筹。\n支持组件比较 skywalking和Pinpoint支持的中间件对比说明:\n WEB容器说明:Pinpoint支持几乎所有的WEB容器,包括开源和商业的。而wkywalking只支持开源的WEB容器,对2款大名鼎鼎的商业WEB容器Weblogic和Wevsphere都不支持。 RPC框架说明:对RPC框架的支持,skywalking简直秒杀Pinpoint。连小众的motan和sofarpc都支持。 MQ说明:skywalking比Pinpoint多支持一个国产的MQ中间件RocketMQ,毕竟RocketMQ在国内名气大,而在国外就一般了。加之skywalking也是国产的。 RDBMS/NoSQL说明:Pinpoint对RDBMS和NoSQL的支持都要略好于skywalking,RDBMS方面,skywalking不支持MSSQL和MariaDB。而NoSQL方面,skywalking不支持Cassandra和HBase。至于Pinpoint不支持的H2,完全不是问题,毕竟生产环境是肯定不会使用H2作为底层存储的。 Redis客户端说明:虽然skywalking和Pinpoint都支持Redis,但是skywalking支持三种流行的Redis客户端:Jedis,Redisson,Lettuce。而Pinpoint只支持Jedis和Lettuce,再一次,韩国人开发的Pinpoint无视了目前中国人开发的GitHub上star最多的Redis Client \u0026ndash; Redisson。 日志框架说明:Pinpoint居然不支持log4j2?但是已经有人开发了相关功能,详情请戳链接:log4j plugin support log4j2 or not? https://github.com/naver/pinpoint/issues/3055  通过对skywalking和Pinpoint支持中间件的对比我们发现,skywalking对国产软件的支持真的是全方位秒杀Pinpoint,比如小众化的RPC框架:motan(微博出品),sofarpc,阿里的RocketMQ,Redis客户端Redisson,以及分布式任务调度框架elastic-job等。当然也从另一方面反应国产开源软件在世界上的影响力还很小。\n这方面没有谁好谁坏,毕竟每个公司使用的技术栈不一样。如果你对RocketMQ有强需求,那么skywalking是你的最佳选择。如果你对es有强需求,那么skywalking也是你的最佳选择。如果HBase是你的强需求,那么Pinpoint就是你的最佳选择。如果MSSQL是你的强需求,那么Pinpoint也是你的最佳选择。总之,这里完全取决你的项目了。\n总结 经过前面对skywalking和Pinpoint全方位对比后我们发现,对于两款非常优秀的APM软件,有一种既生瑜何生亮的感觉。Pinpoint的优势在于:追踪数据粒度非常细、功能强大的用户界面,以及使用HBase作为存储带来的海量存储能力。而skywalking的优势在于:非常活跃的中文社区,支持多种语言的探针,对国产开源软件非常全面的支持,以及使用es作为底层存储带来的强大的检索能力,并且skywalking的扩展性以及定制化要更优于Pinpoint:\n 如果你有海量的日志存储需求,推荐Pinpoint。 如果你更看重二次开发的便捷性,推荐skywalking。  最后,参考上面的对比,结合你的需求,哪些不能妥协,哪些可以舍弃,从而更好的选择一款最适合你的APM软件。\n参考链接  参考[1]. https://github.com/apache/incubator-skywalking/blob/master/docs/en/setup/service-agent/java-agent/Supported-list.md 参考[2]. http://naver.github.io/pinpoint/1.8.2/main.html#supported-modules 参考[3]. https://juejin.im/post/5a7a9e0af265da4e914b46f1    如果觉得本文不错,请关注作者公众号:【阿飞的博客】,多谢!\n ","title":"APM巅峰对决:SkyWalking P.K. Pinpoint","url":"/zh/2019-02-24-skywalking-pk-pinpoint/"},{"content":"According to Apache Software Foundation branding policy all docker images of Apache Skywalking should be transferred from skywalking to apache with a prefix skywalking-. The transfer details are as follows\n skywalking/base -\u0026gt; apache/skywalking-base skywalking/oap -\u0026gt; apache/skywalking-oap-server skywalking/ui -\u0026gt; apache/skywalking-ui  All of repositories in skywalking will be removed after one week.\n","title":"Transfer Docker Images to Apache Official Repository","url":"/events/transfer-docker-images-to-apache-official-repository/"},{"content":"6.0.0-GA release. Go to downloads page to find release tars. This is an important milestone version, we recommend all users upgrade to this version.\nKey updates\n Bug fixed Register bug fix, refactor and performance improvement New trace UI  ","title":"Release Apache SkyWalking APM 6.0.0-GA","url":"/events/release-apache-skywalking-apm-6-0-0-ga/"},{"content":"Based on his contributions to the project, he has been accepted as SkyWalking PPMC. Welcome aboard.\n","title":"Welcome Jian Tan as a new PPMC","url":"/events/welcome-jian-tan-as-a-new-ppmc/"},{"content":" Author: Hongtao Gao, Apache SkyWalking \u0026amp; ShardingShpere PMC GitHub, Twitter, Linkedin  Service mesh receiver was first introduced in Apache SkyWalking 6.0.0-beta. It is designed to provide a common entrance for receiving telemetry data from service mesh framework, for instance, Istio, Linkerd, Envoy etc. What’s the service mesh? According to Istio’s explain:\nThe term service mesh is used to describe the network of microservices that make up such applications and the interactions between them.\nAs a PMC member of Apache SkyWalking, I tested trace receiver and well understood the performance of collectors in trace scenario. I also would like to figure out the performance of service mesh receiver.\nDifferent between trace and service mesh Following chart presents a typical trace map:\nYou could find a variety of elements in it just like web service, local method, database, cache, MQ and so on. But service mesh only collect service network telemetry data that contains the entrance and exit data of a service for now(more elements will be imported soon, just like Database). A smaller quantity of data is sent to the service mesh receiver than the trace.\nBut using sidecar is a little different.The client requesting “A” that will send a segment to service mesh receiver from “A”’s sidecar. If “A” depends on “B”, another segment will be sent from “A”’s sidecar. But for a trace system, only one segment is received by the collector. The sidecar model splits one segment into small segments, that will increase service mesh receiver network overhead.\nDeployment Architecture In this test, I will pick two different backend deployment. One is called mini unit, consist of one collector and one elasticsearch instance. Another is a standard production cluster, contains three collectors and three elasticsearch instances.\nMini unit is a suitable architecture for dev or test environment. It saves your time and VM resources, speeds up depolyment process.\nThe standard cluster provides good performance and HA for a production scenario. Though you will pay more money and take care of the cluster carefully, the reliability of the cluster will be a good reward to you.\nI pick 8 CPU and 16GB VM to set up the test environment. This test targets the performance of normal usage scenarios, so that choice is reasonable. The cluster is built on Google Kubernetes Engine(GKE), and every node links each other with a VPC network. For running collector is a CPU intensive task, the resource request of collector deployment should be 8 CPU, which means every collector instance occupy a VM node.\nTesting Process Receiving mesh fragments per second(MPS) depends on the following variables.\n Ingress query per second(QPS) The topology of a microservice cluster Service mesh mode(proxy or sidecar)  In this test, I use Bookinfo app as a demo cluster.\nSo every request will touch max 4 nodes. Plus picking the sidecar mode(every request will send two telemetry data), the MPS will be QPS * 4 *2.\nThere are also some important metrics that should be explained\n Client Query Latency: GraphQL API query response time heatmap. Client Mesh Sender: Send mesh segments per second. The total line represents total send amount and the error line is the total number of failed send. Mesh telemetry latency: service mesh receiver handling data heatmap. Mesh telemetry received: received mesh telemetry data per second.  Mini Unit You could find collector can process up to 25k data per second. The CPU usage is about 4 cores. Most of the query latency is less than 50ms. After login the VM on which collector instance running, I know that system load is reaching the limit(max is 8).\nAccording to the previous formula, a single collector instance could process 3k QPS of Bookinfo traffic.\nStandard Cluster Compare to the mini-unit, cluster’s throughput increases linearly. Three instances provide total 80k per second processing power. Query latency increases slightly, but it’s also very small(less than 500ms). I also checked every collector instance system load that all reached the limit. 10k QPS of BookInfo telemetry data could be processed by the cluster.\nConclusion Let’s wrap them up. There are some important things you could get from this test.\n QPS varies by the there variables. The test results in this blog are not important. The user should pick property value according to his system. Collector cluster’s processing power could scale out. The collector is CPU intensive application. So you should provide sufficient CPU resource to it.  This blog gives people a common method to evaluate the throughput of Service Mesh Receiver. Users could use this to design their Apache Skywalking backend deployment architecture.\n","title":"SkyWalking performance in Service Mesh scenario","url":"/blog/2019-01-25-mesh-loadtest/"},{"content":"ps:本文仅写给菜鸟,以及不知道如何远程调试的程序员,并且仅仅适用skywalking的远程调试\n概述 远程调试的目的是为了解决代码或者说程序包部署在服务器上运行,只能通过log来查看问题,以及不能跟在本地IDE运行debug那样查找问题,观看程序运行流程\u0026hellip; 想想当你的程序运行在服务器上,你在本地的IDE随时debug,是不是很爽的感觉。\n好了不废话,切入正题。\n环境篇 IDE:推荐 IntelliJ IDEA\n开发语言: 本文仅限于java,其他语言请自行询问google爸爸或者baidu娘娘\n源代码:自行从github下载,并且确保你运行的skywalking包也源代码的一致,(也就是说你自己从源代码编译打包运行,虽然不一样也可以调试,但是你想想你在本地开发,更改完代码,没有重新运行,debug出现的诡异情况)\n场景篇 假定有如下三台机器\n   IP 用途 备注     10.193.78.1 oap-server skywalking 的oap服务(或者说collector所在的服务器)   10.193.78.2 agent skywalking agent运行所在的服务器   10.193.78.0 IDE 你自己装IDE也就是IntelliJ IDEA的机器    以上环境,场景请自行安装好,并确认正常运行。本文不在赘述\n废话终于说完了\n操作篇 首要条件,下载源码后,先用maven 打包编译。然后使用Idea打开源码的父目录,整体结构大致如下图 1 :agent调试 1)Idea 配置部分 点击Edit Configurations 在弹出窗口中依次找到(红色线框的部分)并点击 打开的界面如下 修改Name值,自己随意,好记即可 然后Host输入10.193.78.2 Port默认或者其他的,重要的是这个端口在10.193.78.2上没有被占用\n然后找到Use module classpath 选择 apm-agent 最终的结果如下: 注意选择目标agent运行的jdk版本,很重要\n然后点击Apply,并找到如下内容,并且复制待用 2)agent配置部分 找到agent配置的脚本,并打开,找到配置agent的地方, 就这个地方,在这个后边加上刚才复制的内容 最终的结果如下 提供一个我配置的weblogic的配置(仅供参考) 然后重启应用(agent)\n3)调试 回到Idea中找到这个地方,并点击debug按钮,你没看错,就是红色圈住的地方 然后控制台如果出现以下字样: 那么恭喜你,可以愉快的加断点调试了。 ps:需要注意的是agent的、 service instance的注册可能不能那么愉快的调试。因为这个注册比较快,而且是在agent启动的时候就发生的, 而远程调试也需要agent打开后才可以调试,所以,如果你手快当我没说这句话。\n2 :oap-server的调试(也就是collector的调试) 具体过程不在赘述,和上一步的agent调试大同小异,不同的是 Use module classpath需要选择oap-server\n","title":"SkyWalking的远程调试","url":"/zh/2019-01-24-skywalking-remote-debug/"},{"content":"引言 《SkyWalking Java 插件贡献实践》:本文将基于SkyWalking 6.0.0-GA-SNAPSHOT版本,以编写Redis客户端Lettuce的SkyWalking Java Agent 插件为例,与大家分享我贡献PR的过程,希望对大家了解SkyWalking Java Agent插件有所帮助。\n基础概念 OpenTracing和SkyWalking链路模块几个很重要的语义概念。\n  Span:可理解为一次方法调用,一个程序块的调用,或一次RPC/数据库访问。只要是一个具有完整时间周期的程序访问,都可以被认为是一个span。SkyWalking Span对象中的重要属性\n   属性 名称 备注     component 组件 插件的组件名称,如:Lettuce,详见:ComponentsDefine.Class。   tag 标签 k-v结构,关键标签,key详见:Tags.Class。   peer 对端资源 用于拓扑图,若DB组件,需记录集群信息。   operationName 操作名称 若span=0,operationName将会搜索的下拉列表。   layer 显示 在链路页显示,详见SpanLayer.Class。      Trace:调用链,通过归属于其的Span来隐性的定义。一条Trace可被认为是一个由多个Span组成的有向无环图(DAG图),在SkyWalking链路模块你可以看到,Trace又由多个归属于其的trace segment组成。\n  Trace segment:Segment是SkyWalking中的一个概念,它应该包括单个OS进程中每个请求的所有范围,通常是基于语言的单线程。由多个归属于本线程操作的Span组成。\n  核心API 跨进程ContextCarrier核心API  为了实现分布式跟踪,需要绑定跨进程的跟踪,并且应该传播上下文 整个过程。 这就是ContextCarrier的职责。 以下是实现有关跨进程传播的步骤:  在客户端,创建一个新的空的ContextCarrier,将ContextCarrier所有信息放到HTTP heads、Dubbo attachments 或者Kafka messages。 通过服务调用,将ContextCarrier传递到服务端。 在服务端,在对应组件的heads、attachments或messages获取ContextCarrier所有消息。将服务端和客户端的链路信息绑定。    跨线程ContextSnapshot核心API  除了跨进程,跨线程也是需要支持的,例如异步线程(内存中的消息队列)和批处理在Java中很常见,跨进程和跨线程十分相似,因为都是需要传播 上下文。 唯一的区别是,不需要跨线程序列化。 以下是实现有关跨线程传播的步骤:  使用ContextManager#capture获取ContextSnapshot对象。 让子线程以任何方式,通过方法参数或由现有参数携带来访问ContextSnapshot。 在子线程中使用ContextManager#continued。    详尽的核心API相关知识,可点击阅读 《插件开发指南-中文版本》\n插件实践 Lettuce操作redis代码 @PostMapping(\u0026#34;/ping\u0026#34;) public String ping(HttpServletRequest request) throws ExecutionException, InterruptedException { RedisClient redisClient = RedisClient.create(\u0026#34;redis://\u0026#34; + \u0026#34;127.0.0.1\u0026#34; + \u0026#34;:6379\u0026#34;); StatefulRedisConnection\u0026lt;String, String\u0026gt; connection0 = redisClient.connect(); RedisAsyncCommands\u0026lt;String, String\u0026gt; asyncCommands0 = connection0.async(); AsyncCommand\u0026lt;String, String, String\u0026gt; future = (AsyncCommand\u0026lt;String, String, String\u0026gt;)asyncCommands0.set(\u0026#34;key_a\u0026#34;, \u0026#34;value_a\u0026#34;); future.onComplete(s -\u0026gt; OkHttpClient.call(\u0026#34;http://skywalking.apache.org\u0026#34;)); future.get(); connection0.close(); redisClient.shutdown(); return \u0026#34;pong\u0026#34;; } 插件源码架构 Lettuce对Redis封装与Redisson Redisson 类似,目的均是实现简单易用,且无学习曲线的Java的Redis客户端。所以要是先对Redis操作的拦截,需要学习对应客户端的源码。\n设计插件 理解插件实现过程,找到最佳InterceptPoint位置是实现插件融入SkyWalking的核心所在。\n代码实现 PR的url:Support lettuce plugin\n实践中遇到的问题  多线程编程使用debug断点会将链路变成同步,建议使用run模式增加log,或者远程debug来解决。 多线程编程,需要使用跨线程ContextSnapshot核心API,否则链路会断裂。 CompleteableCommand.onComplete方法有时会同步执行,这个和内部机制有关,有时候不分离线程。 插件编译版本若为1.7+,需要将插件放到可选插件中。因为sniffer支持的版本是1.6。  插件兼容 为了插件得到插件最终的兼容兼容版本,我们需要使用docker对所有插件版本的测试,具体步骤如下:\n 编写测试用例:关于如何编写测试用例,请按照如何编写文档来实现。 提供自动测试用例。 如:Redisson插件testcase 确保本地几个流行的插件版本,在本地运行起来是和自己的预期是一致的。 在提供自动测试用例并在CI中递交测试后,插件提交者会批准您的插件。 最终得到完整的插件测试报告。  Pull Request 提交PR 提交PR的时候,需要简述自己对插件的设计,这样有助于与社区的贡献者讨论完成codereview。\n申请自动化测试 测试用例编写完成后,可以申请自动化测试,在自己的PR中会生成插件兼容版本的报告。\n插件文档 插件文档需要更新:Supported-list.md相关插件信息的支持。\n插件如果为可选插件需要在agent-optional-plugins可选插件文档中增加对应的描述。\n注释 Lettuce是一个完全无阻塞的Redis客户端,使用netty构建,提供反应,异步和同步数据访问。了解细节可点击阅读 lettuce.io;\nOpenTracing是一个跨编程语言的标准,了解细节可点击阅读 《OpenTracing语义标准》;\nspan:org.apache.skywalking.apm.agent.core.context.trace.AbstractSpan接口定义了所有Span实现需要完成的方法;\nRedisson是一个非常易用Java的Redis客户端, 它没有学习曲线,无需知道任何Redis命令即可开始使用它。了解细节可点击阅读 redisson.org;\n","title":"SkyWalking Java 插件贡献实践","url":"/zh/2019-01-21-agent-plugin-practice/"},{"content":"Jinlin Fu has contributed 4 new plugins, including gson, activemq, rabbitmq and canal, which made SkyWalking supporting all mainstream OSS MQ. Also provide several documents and bug fixes. The SkyWalking PPMC based on these, promote him as new committer. Welcome on board.\n","title":"Welcome Jinlin Fu as new committer","url":"/events/welcome-jinlin-fu-as-new-committer/"},{"content":" 作者:赵瑞栋 原文地址  引言 微服务框架落地后,分布式部署架构带来的问题就会迅速凸显出来。服务之间的相互调用过程中,如果业务出现错误或者异常,如何快速定位问题?如何跟踪业务调用链路?如何分析解决业务瓶颈?\u0026hellip;本文我们来看看如何解决以上问题。\n一、SkyWalking初探 Skywalking 简介 Skywalking是一款国内开源的应用性能监控工具,支持对分布式系统的监控、跟踪和诊断。\n它提供了如下的主要功能特性: Skywalking 技术架构 SW总体可以分为四部分:\n1.Skywalking Agent:使用Javaagent做字节码植入,无侵入式的收集,并通过HTTP或者gRPC方式发送数据到Skywalking Collector。\nSkywalking Collector :链路数据收集器,对agent传过来的数据进行整合分析处理并落入相关的数据存储中。 Storage:Skywalking的存储,时间更迭,sw已经开发迭代到了6.x版本,在6.x版本中支持以ElasticSearch、Mysql、TiDB、H2、作为存储介质进行数据存储。 UI :Web可视化平台,用来展示落地的数据。  Skywalking Agent配置 通过了解配置,可以对一个组件功能有一个大致的了解。让我们一起看一下skywalking的相关配置。\n解压开skywalking的压缩包,在agent/config文件夹中可以看到agent的配置文件。\n从skywalking支持环境变量配置加载,在启动的时候优先读取环境变量中的相关配置。\n agent.namespace: 跨进程链路中的header,不同的namespace会导致跨进程的链路中断 agent.service_name:一个服务(项目)的唯一标识,这个字段决定了在sw的UI上的关于service的展示名称 agent.sample_n_per_3_secs: 客户端采样率,默认是-1代表全采样 agent.authentication: 与collector进行通信的安全认证,需要同collector中配置相同 agent.ignore_suffix: 忽略特定请求后缀的trace collecttor.backend_service: agent需要同collector进行数据传输的IP和端口 logging.level: agent记录日志级别  skywalking agent使用javaagent无侵入式的配合collector实现对分布式系统的追踪和相关数据的上下文传递。\nSkywalking Collector关键配置 Collector支持集群部署,zookeeper、kubernetes(如果你的应用是部署在容器中的)、consul(GO语言开发的服务发现工具)是sw可选的集群管理工具,结合大家具体的部署方式进行选择。详细配置大家可以去Skywalking官网下载介质包进行了解。\nCollector端口设置\n downsampling: 采样汇总统计维度,会分别按照分钟、【小时、天、月】(可选)来统计各项指标数据。 通过设置TTL相关配置项可以对数据进行自动清理。  Skywalking 在6.X中简化了配置。collector提供了gRPC和HTTP两种通信方式。\nUI使用rest http通信,agent在大多数场景下使用grpc方式通信,在语言不支持的情况下会使用http通信。\n关于绑定IP和端口需要注意的一点是,通过绑定IP,agent和collector必须配置对应ip才可以正常通信。\nCollector存储配置\n在application.yml中配置的storage模块配置中选择要使用的数据库类型,并填写相关的配置信息。\nCollector Receiver\nReceiver是Skywalking在6.x提出的新的概念,负责从被监控的系统中接受指标数据。用户完全可以参照OpenTracing规范来上传自定义的监控数据。Skywalking官方提供了service-mesh、istio、zipkin的相关能力。\n现在Skywalking支持服务端采样,配置项为sampleRate,比例采样,如果配置为5000则采样率就是50%。\n关于采样设置的一点注意事项\n关于服务采样配置的一点建议,如果Collector以集群方式部署,比如:Acollector和Bcollector,建议Acollector.sampleRate = Bcollector.sampleRate。如果采样率设置不相同可能会出现数据丢失问题。\n假设Agent端将所有数据发送到后端Collector处,A采样率设置为30%,B采样率为50%。\n假设有30%的数据,发送到A上,这些数据被全部正确接受并存储,极端情况(与期望的采样数据量相同)下,如果剩下20%待采样的数据发送到了B,这个时候一切都是正常的,如果这20%中有一部分数据被送到了A那么,这些数据将是被忽略的,由此就会造成数据丢失。\n二、业务调用链路监控 Service Topology监控 调用链路监控可以从两个角度去看待。我们先从整体上来认识一下我们所监控的系统。\n通过给服务添加探针并产生实际的调用之后,我们可以通过Skywalking的前端UI查看服务之间的调用关系。\n我们简单模拟一次服务之间的调用。新建两个服务,service-provider以及service-consumer,服务之间简单的通过Feign Client 来模拟远程调用。\n从图中可以看到:\n 有两个服务节点:provider \u0026amp; consumer 有一个数据库节点:localhost【mysql】 一个注册中心节点  consumer消费了provider提供出来的接口。\n一个系统的拓扑图让我们清晰的认识到系统之间的应用的依赖关系以及当前状态下的业务流转流程。细心的可能发现图示节点consumer上有一部分是红色的,红色是什么意思呢?\n红色代表当前流经consumer节点的请求有一断时间内是响应异常的。当节点全部变红的时候证明服务现阶段内就彻底不可用了。运维人员可以通过Topology迅速发现某一个服务潜在的问题,并进行下一步的排查并做到预防。\nSkywalking Trace监控 Skywalking通过业务调用监控进行依赖分析,提供给我们了服务之间的服务调用拓扑关系、以及针对每个endpoint的trace记录。\n我们在之前看到consumer节点服务中发生了错误,让我们一起来定位下错误是发生在了什么地方又是什么原因呢?\n在每一条trace的信息中都可以看到当前请求的时间、GloableId、以及请求被调用的时间。我们分别看一看正确的调用和异常的调用。\nTrace调用链路监控 图示展示的是一次正常的响应,这条响应总耗时19ms,它有4个span:\n span1 /getStore = 19ms 响应的总流转时间 span2 /demo2/stores = 14ms feign client 开始调用远程服务后的响应的总时间 span3 /stores = 14ms 接口服务响应总时间 span4 Mysql = 1ms 服务提供端查询数据库的时间  这里span2和span3的时间表现相同,其实是不同的,因为这里时间取了整。\n在每个Span中可以查看当前Span的相关属性。\n 组件类型: SpringMVC、Feign Span状态: false HttpMethod: GET Url: http://192.168.16.125:10002/demo2/stores  这是一次正常的请求调用Trace日志,可能我们并不关心正常的时候,毕竟一切正常不就是我们期待的么!\n我们再来看下,异常状态下我们的Trace以及Span又是什么样的呢。\n发生错误的调用链中Span中的is error标识变为true,并且在名为Logs的TAB中可以看到错误发生的具体原因。根据异常情况我们就可以轻松定位到影响业务的具体原因,从而快速定位问题,解决问题。\n通过Log我们看到连接被拒,那么可能是我们的网络出现了问题(可能性小,因为实际情况如果网络出现问题我们连这个trace都看不到了),也有可能是服务端配置问题无法正确建立连接。通过异常日志,我们迅速就找到了问题的关键。\n实际情况是,我把服务方停掉了,做了一次简单的模拟。可见,通过拓扑图示我们可以清晰的看到众多服务中哪个服务是出现了问题的,通过trace日志我们可以很快就定位到问题所在,在最短的时间内解决问题。\n三、服务性能指标监控 Skywalking还可以查看具体Service的性能指标,根据相关的性能指标可以分析系统的瓶颈所在并提出优化方案。\nSkywalking 性能监控 在服务调用拓扑图上点击相应的节点我们可以看到该服务的\n SLA: 服务可用性(主要是通过请求成功与失败次数来计算) CPM: 每分钟调用次数 Avg Response Time: 平均响应时间  从应用整体外部来看我们可以监测到应用在一定时间段内的\n 服务可用性指标SLA 每分钟平均响应数 平均响应时间 服务进程PID 服务所在物理机的IP、HostName、Operation System  Service JVM信息监控 还可以监控到Service运行时的CPU、堆内存、非堆内存使用率、以及GC情况。这些信息来源于JVM。注意这里的数据可不是机器本身的数据。\n四、服务告警 前文我们提到了通过查看拓扑图以及调用链路可以定位问题,可是运维人员又不可能一直盯着这些数据,那么我们就需要告警能力,在异常达到一定阈值的时候主动的提示我们去查看系统状态。\n在Sywalking 6.x版本中新增了对服务状态的告警能力。它通过webhook的方式让我们可以自定义我们告警信息的通知方式。诸如:邮件通知、微信通知、短信通知等。\nSkywalking 服务告警 先来看一下告警的规则配置。在alarm-settings.xml中可以配置告警规则,告警规则支持自定义。\n一份告警配置由以下几部分组成:\n service_resp_time_rule:告警规则名称 ***_rule (规则名称可以自定义但是必须以’_rule’结尾 indicator-name:指标数据名称: 定义参见http://t.cn/EGhfbmd op: 操作符: \u0026gt; , \u0026lt; , = 【当然你可以自己扩展开发其他的操作符】 threshold:目标值:指标数据的目标数据 如sample中的1000就是服务响应时间,配合上操作符就是大于1000ms的服务响应 period: 告警检查周期:多久检查一次当前的指标数据是否符合告警规则 counts: 达到告警阈值的次数 silence-period:忽略相同告警信息的周期 message:告警信息 webhooks:服务告警通知服务地址  Skywalking通过HttpClient的方式远程调用在配置项webhooks中定义的告警通知服务地址。\n了解了SW所传送的数据格式我们就可以对告警信息进行接收处理,实现我们需要的告警通知服务啦!\n我们将一个服务停掉,并将另外一个服务的某个对外暴露的接口让他休眠一定的时间。然后调用一定的次数观察服务的状态信息以及告警情况。\n总结 本文简单的通过skwaylking的配置来对skywlaking的功能进行一次初步的了解,对skwaylking新提出的概念以及新功能进行简单的诠释,方便大家了解和使用。通过使用APM工具,可以让我们方便的查看微服务架构中系统瓶颈以及性能问题等。\n精选提问 问1:想问问选型的时候用pinpoint还是SK好?\n答:选型问题\n 要结合具体的业务场景, 比如你的代码运行环境 是java、php、net还是什么。 pinpoint在安装部署上要比skywalking略微复杂 pinpoint和sw支持的组件列表是不同的。 https://github.com/apache/incubator-skywalking/blob/master/docs/en/setup/service-agent/java-agent/Supported-list.md你可以参照这里的支持列表对比下pinpoint的支持对象做一个简单对比。 sw经过测试在并发量较高的情况下比pinpoint的吞吐量更好一些。  问2:有没有指标统计,比如某个url 的top10 请求、响应最慢的10个请求?某个服务在整个链条中的耗时占比?\n答:1.sw自带有响应最慢的请求top10统计针对所有的endpoint的统计。 2.针对每个url的top10统计,sw本身没有做统计,数据都是现成的通过简单的检索就可以搜到你想要的结果。 3.没有具体的耗时占比,但是有具体总链路时间统计以及某个服务的耗时统计,至于占比自己算吧,可以看ppt中的调用链路监控的span时间解释。\n问3:能不能具体说一下在你们系统中的应用?\n答:EOS8LA版本中,我们整合sw对应用提供拓扑、调用链路、性能指标的监控、并在sw数据的基础上增加系统的维度。 当服务数很庞大的时候,整体的拓扑其实就是一张密密麻麻的蜘蛛网。我们可以通过系统来选择具体某个系统下的应用。 8LA中SW是5.0.0alpha版本,受限于sw功能,我们并没有提供告警能力,这在之后会是我们的考虑目标。\n问4:业务访问日志大概每天100G,kubernetes 环境中部署,使用稳定吗?\n答:监控数据没有长时间的存储必要,除非你有特定的需求。它有一定的时效性,你可以设置ttl自动清除过时信息。100g,es集群还是能轻松支撑的。\n问5:和pinpoint相比有什么优势吗?\n答:\n 部署方式、使用方式简单 功能特性支持的更多 高并发性能会更好一些  问6:skywalking的侵入式追踪功能方便进行单服务链的服务追踪。但是跨多台服务器多项目的整体服务链追踪是否有整体设计考虑?\n答:sw本身特性就是对分布式系统的追踪,他是无侵入式的。无关你的应用部署在多少台服务器上。\n问7:应用在加上代理之后性能会下降。请问您有什么解决方法吗?\n答:性能下降是在所难免的,但是据我了解,以及官方的测试,他的性能影响是很低的。这是sw的测试数据供你参考。 https://skywalkingtest.github.io/Agent-Benchmarks/README_zh.html。\n问8:有异构系统需求的话可以用sw吗?\n答:只要skywalking的探针支持的应该都是可以的。\n问9:sw对于商用的web中间件,如bes、tongweb、websphere、weblogic的支持如何?\n答:商业组件支持的比较少,因为涉及到相关license的问题,sw项目组需要获得他们的支持来进行数据上报,据我了解,支持不是很好。\n","title":"SkyWalking 微服务监控分析","url":"/zh/2019-01-03-monitor-microservice/"},{"content":"SkyWalking 依赖 elasticsearch 集群,如果 elasticsearch 安装有 x-pack 插件的话,那么就会存在一个 Basic 认证,导致 skywalking 无法调用 elasticsearch, 解决方法是使用 nginx 做代理,让 nginx 来做这个 Basic 认证,那么这个问题就自然解决了。\n方法如下:\n 安装 nginx   yum install -y nginx\n 配置 nginx  server { listen 9200 default_server; server_name _; location / { proxy_set_header Host $host; proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_pass http://localhost:9200; #Basic字符串就是使用你的用户名(admin),密码(12345)编码后的值 #注意:在进行Basic加密的时候要使用如下格式如:admin:123456 注意中间有个冒号 proxy_set_header Authorization \u0026#34;Basic YWRtaW4gMTIzNDU2\u0026#34;; } } 验证   curl localhost:9200\n { \u0026#34;name\u0026#34; : \u0026#34;Yd0rCp9\u0026#34;, \u0026#34;cluster_name\u0026#34; : \u0026#34;es-cn-4590xv9md0009doky\u0026#34;, \u0026#34;cluster_uuid\u0026#34; : \u0026#34;jAPLrqY5R6KWWgHnGCWOAA\u0026#34;, \u0026#34;version\u0026#34; : { \u0026#34;number\u0026#34; : \u0026#34;6.3.2\u0026#34;, \u0026#34;build_flavor\u0026#34; : \u0026#34;default\u0026#34;, \u0026#34;build_type\u0026#34; : \u0026#34;tar\u0026#34;, \u0026#34;build_hash\u0026#34; : \u0026#34;053779d\u0026#34;, \u0026#34;build_date\u0026#34; : \u0026#34;2018-07-20T05:20:23.451332Z\u0026#34;, \u0026#34;build_snapshot\u0026#34; : false, \u0026#34;lucene_version\u0026#34; : \u0026#34;7.3.1\u0026#34;, \u0026#34;minimum_wire_compatibility_version\u0026#34; : \u0026#34;5.6.0\u0026#34;, \u0026#34;minimum_index_compatibility_version\u0026#34; : \u0026#34;5.0.0\u0026#34; }, \u0026#34;tagline\u0026#34; : \u0026#34;You Know, for Search\u0026#34; } 看到如上结果那么恭喜你成功了。\n","title":"关于 ElasticSearch 因 basic 认证导致 SkyWalking 无法正常调用接口问题","url":"/zh/2019-01-02-skywalking-elasticsearch-basic/"},{"content":" 作者: Wu Sheng, tetrate, SkyWalking original creator GitHub, Twitter, Linkedin 翻译: jjlu521016  背景 在当前的微服务架构中分布式链路追踪是很有必要的一部分,但是对于一些用户来说如何去理解和使用分布式链路追踪的相关数据是不清楚的。 这个博客概述了典型的分布式跟踪用例,以及Skywalking的V6版本中新的可视化功能。我们希望新的用户通过这些示例来更好的理解。\n指标和拓扑图 跟踪数据支持两个众所周知的分析特性:指标和拓扑图\n指标: 每个service, service instance, endpoint的指标都是从跟踪中的入口span派生的。指标代表响应时间的性能。所以可以有一个平均响应时间,99%的响应时间,成功率等。它们按service, service instance, endpoint进行分解。\n拓扑图: 拓扑表示服务之间的链接,是分布式跟踪最有吸引力的特性。拓扑结构允许所有用户理解分布式服务关系和依赖关系,即使它们是不同的或复杂的。这一点很重要,因为它为所有相关方提供了一个单一的视图,无论他们是开发人员、设计者还是操作者。\n这里有一个拓扑图的例子包含了4个项目,包括kafka和两个外部依赖。\n-在skywalking的可选择UI0RocketBot的拓扑图-\nTrace 在分布式链路追踪系统中,我们花费大量资源(CPU、内存、磁盘和网络)来生成、传输和持久跟踪数据。让我们试着回答为什么要这样做?我们可以用跟踪数据回答哪些典型的诊断和系统性能问题?\nSkywalking v6包含两种追踪视图:\n   TreeMode: 第一次提供,帮助您更容易识别问题。    ListMode: 常规的时间线视图,通常也出现在其他跟踪系统中,如Zipkin。    发生错误 在trace视图,最简单的部分是定位错误,可能是由代码异常或网络故障引起的。通过span详情提供的细节,ListMode和TreeMode都能够找到错误 -ListMode 错误span-\n-TreeMode 错误span-\n慢span 一个高优先级的特性是识别跟踪中最慢的span。这将使用应用程序代理捕获的执行持续时间。在旧的ListMode跟踪视图中,由于嵌套,父span几乎总是包括子span的持续时间。换句话说,一个缓慢的span通常会导致它的父节点也变慢,在Skywalking 6中,我们提供了 最慢的前5个span 过滤器来帮助你您直接定位span。\n-最慢的前5个span-\n太多子span 在某些情况下,个别持续时间很快,但跟踪速度仍然很慢,如: -没有慢span的追踪-\n如果要了解根问题是否与太多操作相关,请使用子范围号的Top 5 of children span number,筛选器显示每个span的子级数量,突出显示前5个。 -13个数据库访问相关的span-\n在这个截图中,有一个包含13个子项的span,这些子项都是数据库访问。另外,当您看到跟踪的概述时,这个2000ms跟踪的数据库花费了1380ms。 -1380ms花费在数据库访问-\n在本例中,根本原因是数据库访问太多。这在其他场景中也很常见,比如太多的RPC或缓存访问。\n链路深度 跟踪深度也与延迟有关。像太多子span的场景一样,每个span延迟看起来不错,但整个链路追踪的过程很慢。 -链路深度-\n上图所示,最慢的span小鱼500ms,对于2000毫秒的跟踪来说,速度并不太慢。当您看到第一行时,有四种不同的颜色表示这个分布式跟踪中涉及的四个services。每一个都需要100~400ms,这四个都需要近2000ms,从这里我们知道这个缓慢的跟踪是由一个序列中的3个RPC造成的。\n结束语 分布式链路追踪和APM 工具帮助我们确定造成问题的根源,允许开发和操作团队进行相应的优化。我们希望您喜欢这一点,并且喜欢Apache Skywalking和我们的新链路追踪可视化界面。如果你喜欢的话,在github上面给我们加start来鼓励我们\nSkywakling 6计划在2019年的1月底完成release。您可以通过以下渠道联系项目团队成员\n 关注 skywalking推特 订阅邮件:dev@skywalking.apache.org。发送邮件到 dev-subscribe@kywalking.apache.org 来订阅. 加入Gitter聊天室  ","title":"更容易理解将要到来的分布式链路追踪 6.0GA (翻译)","url":"/zh/2019-01-02-understand-trace-trans2cn/"},{"content":"Background Distributed tracing is a necessary part of modern microservices architecture, but how to understand or use distributed tracing data is unclear to some end users. This blog overviews typical distributed tracing use cases with new visualization features in SkyWalking v6. We hope new users will understand more through these examples.\nMetric and topology Trace data underpins in two well known analysis features: metric and topology\nMetric of each service, service instance, endpoint are derived from entry spans in trace. Metrics represent response time performance. So, you could have average response time, 99% response time, success rate, etc. These are broken down by service, service instance, endpoint.\nTopology represents links between services and is distributed tracing\u0026rsquo;s most attractive feature. Topologies allows all users to understand distributed service relationships and dependencies even when they are varied or complex. This is important as it brings a single view to all interested parties, regardless of if they are a developer, designer or operator.\nHere\u0026rsquo;s an example topology of 4 projects, including Kafka and two outside dependencies.\nTopology in SkyWalking optional UI, RocketBot\nTrace In a distributed tracing system, we spend a lot of resources(CPU, Memory, Disk and Network) to generate, transport and persistent trace data. Let\u0026rsquo;s try to answer why we do this? What are the typical diagnosis and system performance questions we can answer with trace data?\nSkyWalking v6 includes two trace views:\n TreeMode: The first time provided. Help you easier to identify issues. ListMode: Traditional view in time line, also usually seen in other tracing system, such as Zipkin.  Error occurred In the trace view, the easiest part is locating the error, possibly caused by a code exception or network fault. Both ListMode and TreeMode can identify errors, while the span detail screen provides details.\nListMode error span\nTreeMode error span\nSlow span A high priority feature is identifying the slowest spans in a trace. This uses execution duration captured by application agents. In the old ListMode trace view, parent span almost always includes the child span\u0026rsquo;s duration, due to nesting. In other words, a slow span usually causes its parent to also become slow. In SkyWalking 6, we provide Top 5 of slow span filter to help you locate the spans directly.\nTop 5 slow span\nThe above screenshot highlights the top 5 slow spans, excluding child span duration. Also, this shows all spans' execution time, which helps identify the slowest ones.\nToo many child spans In some cases, individual durations are quick, but the trace is still slow, like this one:\nTrace with no slow span\nTo understand if the root problem is related to too many operations, use Top 5 of children span number. This filter shows the amount of children each span has, highlighting the top 5.\n13 database accesses of a span\nIn this screenshot, there is a span with 13 children, which are all Database accesses. Also, when you see overview of trace, database cost 1380ms of this 2000ms trace.\n1380ms database accesses\nIn this example, the root cause is too many database accesses. This is also typical in other scenarios like too many RPCs or cache accesses.\nTrace depth Trace depth is also related latency. Like the too many child spans scenario, each span latency looks good, but the whole trace is slow.\nTrace depth\nHere, the slowest spans are less than 500ms, which are not too slow for a 2000ms trace. When you see the first line, there are four different colors representing four services involved in this distributed trace. Every one of them costs 100~400ms. For all four, there nearly 2000ms. From here, we know this slow trace is caused by 3 RPCs in a serial sequence.\nAt the end Distributed tracing and APM tools help users identify root causes, allowing development and operation teams to optimize accordingly. We hope you enjoyed this, and love Apache SkyWalking and our new trace visualization. If so, give us a star on GitHub to encourage us.\nSkyWalking 6 is scheduled to release at the end of January 2019. You can contact the project team through the following channels:\n Follow SkyWalking twitter Subscribe mailing list: dev@skywalking.apache.org . Send to dev-subscribe@kywalking.apache.org to subscribe the mail list. Join Gitter room.  ","title":"Understand distributed trace easier in the incoming 6-GA","url":"/blog/2019-01-01-understand-trace/"},{"content":"6.0.0-beta release. Go to downloads page to find release tars.\nKey updates\n Bugs fixed, closed to GA New protocols provided, old still compatible. Spring 5 supported MySQL and TiDB as optional storage  ","title":"Release Apache SkyWalking APM 6.0.0-beta","url":"/events/release-apache-skywalking-apm-6-0-0-beta/"},{"content":"Based on his contributions. Including created RocketBot as our secondary UI, new website and very cool trace view page in next release. he has been accepted as SkyWalking PPMC. Welcome aboard.\n","title":"Welcome Yao Wang as a new PPMC","url":"/events/welcome-yao-wang-as-a-new-ppmc/"},{"content":"导读  SkyWalking 中 Java 探针是使用 JavaAgent 的两大字节码操作工具之一的 Byte Buddy(另外是 Javassist)实现的。项目还包含.Net core 和 Nodejs 自动探针,以及 Service Mesh Istio 的监控。总体上,SkyWalking 是一个多语言,多场景的适配,特别为微服务、云原生和基于容器架构设计的可观测性分析平台(Observability Analysis Platform)。 本文基于 SkyWalking 5.0.0-RC2 和 Byte Buddy 1.7.9 版本,会从以下几个章节,让大家掌握 SkyWalking Java 探针的使用,进而让 SkyWalking 在自己公司中的二次开发变得触手可及。  Byte Buddy 实现 JavaAgent 项目 迭代 JavaAgent 项目的方法论 SkyWalking agent 项目如何 Debug SkyWalking 插件开发实践   文章底部有 SkyWalking 和 Byte Buddy 相应的学习资源。  Byte Buddy 实现  首先如果你对 JavaAgent 还不是很了解可以先百度一下,或在公众号内看下《JavaAgent 原理与实践》简单入门下。 SpringMVC 分发请求的关键方法相信已经不用我在赘述了,那我们来编写 Byte Buddy JavaAgent 代码吧。  public class AgentMain { public static void premain(String agentOps, Instrumentation instrumentation) { new AgentBuilder.Default() .type(ElementMatchers.named(\u0026#34;org.springframework.web.servlet.DispatcherServlet\u0026#34;)) .transform((builder, type, classLoader, module) -\u0026gt; builder.method(ElementMatchers.named(\u0026#34;doDispatch\u0026#34;)) .intercept(MethodDelegation.to(DoDispatchInterceptor.class))) .installOn(instrumentation); } }  编写 DispatcherServlet doDispatch 拦截器代码(是不是跟 AOP 如出一辙)  public class DoDispatchInterceptor { @RuntimeType public static Object intercept(@Argument(0) HttpServletRequest request, @SuperCall Callable\u0026lt;?\u0026gt; callable) { final StringBuilder in = new StringBuilder(); if (request.getParameterMap() != null \u0026amp;\u0026amp; request.getParameterMap().size() \u0026gt; 0) { request.getParameterMap().keySet().forEach(key -\u0026gt; in.append(\u0026#34;key=\u0026#34; + key + \u0026#34;_value=\u0026#34; + request.getParameter(key) + \u0026#34;,\u0026#34;)); } long agentStart = System.currentTimeMillis(); try { return callable.call(); } catch (Exception e) { System.out.println(\u0026#34;Exception :\u0026#34; + e.getMessage()); return null; } finally { System.out.println(\u0026#34;path:\u0026#34; + request.getRequestURI() + \u0026#34; 入参:\u0026#34; + in + \u0026#34; 耗时:\u0026#34; + (System.currentTimeMillis() - agentStart)); } } }  resources/META-INF/MANIFEST.MF  Manifest-Version: 1.0 Premain-Class: com.z.test.agent.AgentMain Can-Redefine-Classes: true  pom.xml 文件  dependencies +net.bytebuddy.byte-buddy +javax.servlet.javax.servlet-api *scope=provided plugins +maven-jar-plugin *manifestFile=src/main/resources/META-INF/MANIFEST.MF +maven-shade-plugin *include:net.bytebuddy:byte-buddy:jar: +maven-compiler-plugin  小结:没几十行代码就完成了,通过 Byte Buddy 实现应用组件 SpringMVC 记录请求路径、入参、执行时间 JavaAgent 项目,是不是觉得自己很优秀。  持续迭代 JavaAgent  本章节主要介绍 JavaAgent 如何 Debug,以及持续集成的方法论。 首先我的 JavaAgent 项目目录结构如图所示: 应用项目是用几行代码实现的 SpringBootWeb 项目:  @SpringBootApplication(scanBasePackages = {\u0026#34;com\u0026#34;}) public class TestBootWeb { public static void main(String[] args) { SpringApplication.run(TestBootWeb.class, args); } @RestController public class ApiController { @PostMapping(\u0026#34;/ping\u0026#34;) public String ping(HttpServletRequest request) { return \u0026#34;pong\u0026#34;; } } }  下面是关键 JavaAgent 项目如何持续迭代与集成:  VM options增加:-JavaAgent:{$HOME}/Code/github/z_my_test/test-agent/target/test-agent-1.0-SNAPSHOT.jar=args Before launch 在Build之前增加: Working directory:{$HOME}/Code/github/incubator-skywalking Command line:-T 1C -pl test-agent -am clean package -Denforcer.skip=true -Dmaven.test.skip=true -Dmaven.compile.fork=true  小结:看到这里的将 JavaAgent 持续迭代集成方法,是不是瞬间觉得自己手心已经发痒起来,很想编写一个自己的 agent 项目了呢,等等还有一个好消息:test-demo 这 10 几行的代码实现的 Web 服务,居然有 5k 左右的类可以使用 agent 增强。 注意 mvn 编译加速的命令是 maven3 + 版本以上才支持的哈。  SkyWalking Debug  峰回路转,到了文章的主题《SkyWalking 之高级用法》的正文啦。首先,JavaAgent 项目想 Debug,还需要将 agent 代码与接入 agent 项目至少在同一个工作空间内,网上方法有很多,这里我推荐大家一个最简单的方法。File-\u0026gt;New-\u0026gt;Module from Exisiting Sources… 引入 skywalking-agent 源码即可 详细的 idea 编辑器配置: 优化 SkyWalking agent 编译时间,我的集成时间优化到 30 秒左右:  VM options增加:-JavaAgent:-JavaAgent:{$HOME}/Code/github/incubator-skywalking/skywalking-agent/skywalking-agent.jar:不要用dist里面的skywalking-agent.jar,具体原因大家可以看看源码:apm-sniffer/apm-agent/pom.xml中的maven插件的使用。 Before launch 在Build之前增加: Working directory:{$HOME}/Code/github/incubator-skywalking Command line:-T 1C -pl apm-sniffer/apm-sdk-plugin -amd clean package -Denforcer.skip=true -Dmaven.test.skip=true -Dmaven.compile.fork=true: 这里我针对插件包,因为紧接着下文要开发插件 另外根pom注释maven-checkstyle-plugin也可加速编译 kob 之 SkyWalking 插件编写  kob(贝壳分布式作业调度框架)是贝壳找房项目微服务集群中的基础组件,通过编写贝壳分布式作业调度框架的 SkyWalking 插件,可以实时收集作业调度任务的执行链路信息,从而及时得到基础组件的稳定性,了解细节可点击阅读《贝壳分布式调度框架简介》。想详细了解 SkyWalking 插件编写可在文章底部参考链接中,跳转至对应的官方资源,好话不多说,代码一把唆起来。 apm-sdk-plugin pom.xml 增加自己的插件 model  \u0026lt;artifactId\u0026gt;apm-sdk-plugin\u0026lt;/artifactId\u0026gt; \u0026lt;modules\u0026gt; \u0026lt;module\u0026gt;kob-plugin\u0026lt;/module\u0026gt; ... \u0026lt;modules\u0026gt;  resources.skywalking-plugin.def 增加自己的描述  kob=org.apache.skywalking.apm.plugin.kob.KobInstrumentation  在 SkyWalking 的项目中,通过继承 ClassInstanceMethodsEnhancePluginDefine 可以定义需要拦截的类和增强的方法,编写作业调度方法的 instrumentation  public class KobInstrumentation extends ClassInstanceMethodsEnhancePluginDefine { private static final String ENHANCE_CLASS = \u0026#34;com.ke.kob.client.spring.core.TaskDispatcher\u0026#34;; private static final String INTERCEPT_CLASS = \u0026#34;org.apache.skywalking.apm.plugin.kob.KobInterceptor\u0026#34;; @Override protected ClassMatch enhanceClass() { return NameMatch.byName(ENHANCE_CLASS); } @Override protected ConstructorInterceptPoint[] getConstructorsInterceptPoints() { return null; } @Override protected InstanceMethodsInterceptPoint[] getInstanceMethodsInterceptPoints() { return new InstanceMethodsInterceptPoint[] { new InstanceMethodsInterceptPoint() { @Override public ElementMatcher\u0026lt;MethodDescription\u0026gt; getMethodsMatcher() { return named(\u0026#34;dispatcher1\u0026#34;); } @Override public String getMethodsInterceptor() { return INTERCEPT_CLASS; } @Override public boolean isOverrideArgs() { return false; } } }; } }  通过实现 InstanceMethodsAroundInterceptor 后,定义 beforeMethod、afterMethod 和 handleMethodException 的实现方法,可以环绕增强指定目标方法,下面自定义 interceptor 实现 span 的跟踪(这里需要注意 SkyWalking 中 span 的生命周期,在 afterMethod 方法中结束 span)  public class KobInterceptor implements InstanceMethodsAroundInterceptor { @Override public void beforeMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, MethodInterceptResult result) throws Throwable { final ContextCarrier contextCarrier = new ContextCarrier(); com.ke.kob.client.spring.model.TaskContext context = (TaskContext) allArguments[0]; CarrierItem next = contextCarrier.items(); while (next.hasNext()) { next = next.next(); next.setHeadValue(JSON.toJSONString(context.getUserParam())); } AbstractSpan span = ContextManager.createEntrySpan(\u0026#34;client:\u0026#34;+allArguments[1]+\u0026#34;,task:\u0026#34;+context.getTaskKey(), contextCarrier); span.setComponent(ComponentsDefine.TRANSPORT_CLIENT); SpanLayer.asRPCFramework(span); } @Override public Object afterMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Object ret) throws Throwable { ContextManager.stopSpan(); return ret; } @Override public void handleMethodException(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Throwable t) { } }  实现效果,将操作名改成任务执行节点 + 任务执行方法,实现 kob 的 SkyWalking 的插件编写,加上报警体系,可以进一步增加公司基础组件的稳定性。  参考链接  Apache SkyWalking Byte Buddy(runtime code generation for the Java virtual machine)  ","title":"SkyWalking apm-sniffer 原理学习与插件编写","url":"/zh/2018-12-21-skywalking-apm-sniffer-beginning/"},{"content":"搭建调试环境 阅读 SkyWalking 源码,从配置调试环境开始。\n一定一定一定不要干读代码,而是通过调试的方式。\n 01 通过 Skywalking-5.x 版本的源码构建并运行 👉:哔哩哔哩 | 腾讯视频 02 通过 Skywalking-6.x 版本的源码构建并运行 👉:哔哩哔哩 | 腾讯视频 03 Java 应用(探针)接入 Skywalking[6.x] 👉:哔哩哔哩 | 腾讯视频  SkyWalking 3.X 源码解析合集 虽然是基于 3.X 版本的源码解析,但是对于阅读 SkyWalking Java Agent 和插件部分,同样适用。\n对于 SkyWalking Collector 部分,可以作为一定的参考。\n 《SkyWalking 源码分析 —— 调试环境搭建》 《SkyWalking 源码分析 —— Agent 初始化》 《SkyWalking 源码分析 —— Agent 插件体系》 《SkyWalking 源码分析 —— Collector 初始化》 《SkyWalking 源码分析 —— Collector Cluster 集群管理》 《SkyWalking 源码分析 —— Collector Client Component 客户端组件》 《SkyWalking 源码分析 —— Collector Server Component 服务器组件》 《SkyWalking 源码分析 —— Collector Jetty Server Manager》 《SkyWalking 源码分析 —— Collector gRPC Server Manager》 《SkyWalking 源码分析 —— Collector Naming Server 命名服务》 《SkyWalking 源码分析 —— Collector Queue 队列组件》 《SkyWalking 源码分析 —— Collector Storage 存储组件》 《SkyWalking 源码分析 —— Collector Streaming Computing 流式处理(一)》 《SkyWalking 源码分析 —— Collector Streaming Computing 流式处理(二)》 《SkyWalking 源码分析 —— Collector Cache 缓存组件》 《SkyWalking 源码分析 —— Collector Remote 远程通信服务》 《SkyWalking 源码分析 —— DataCarrier 异步处理库》 《SkyWalking 源码分析 —— Agent Remote 远程通信服务》 《SkyWalking 源码分析 —— 应用于应用实例的注册》 《SkyWalking 源码分析 —— Agent DictionaryManager 字典管理》 《SkyWalking 源码分析 —— Agent 收集 Trace 数据》 《SkyWalking 源码分析 —— Agent 发送 Trace 数据》 《SkyWalking 源码分析 —— Collector 接收 Trace 数据》 《SkyWalking 源码分析 —— Collector 存储 Trace 数据》 《SkyWalking 源码分析 —— JVM 指标的收集与存储》 《SkyWalking 源码分析 —— 运维界面(一)之应用视角》 《SkyWalking 源码分析 —— 运维界面(二)之应用实例视角》 《SkyWalking 源码分析 —— 运维界面(三)之链路追踪视角》 《SkyWalking 源码分析 —— 运维界面(四)之操作视角》 《SkyWalking 源码分析 —— @Trace 注解想要追踪的任何方法》 《SkyWalking 源码分析 —— traceId 集成到日志组件》 《SkyWalking 源码分析 —— Agent 插件(一)之 Tomcat》 《SkyWalking 源码分析 —— Agent 插件(二)之 Dubbo》 《SkyWalking 源码分析 —— Agent 插件(三)之 SpringMVC》 《SkyWalking 源码分析 —— Agent 插件(四)之 MongoDB》  SkyWalking 6.X 源码解析合集  《SkyWalking 6.x 源码分析 —— 调试环境搭建》  ","title":"SkyWalking 源码解析合集","url":"/zh/2018-12-21-skywalking-source-code-read/"},{"content":"版本选择 我们采用的是 5.0.0-RC2 的版本,SkyWalking 的版本信息可以参考 https://github.com/apache/incubator-skywalking/blob/5.x/CHANGES.md\n那么为什么我们没有采用 5.1.0 版本呢,这是因为我们公司内部需要支持 es x-pack,但是在官方发布里面,没有支持 xpack 的版本。\n在 Apache SkyWalking 官方文档 https://github.com/CharlesMaster/incubator-skywalking/tree/master/docs/others/cn 中有提到,SkyWalking 5.x 仍受社区支持。\n对于用户计划从 5.x 升级到 6.x,您应该知道关于有一些概念的定义的变更。最重要的两个改变了的概念是:\n Application(在 5.x 中)更改为 Service(在 6.x 中),Application Instance 也更改为 Service Instance。 Service(在 5.x 中)更改为 Endpoint(在 6.x 中)。  图文详解 Apache SkyWalking 的监控界面由 Monitor 和 Trace 两者构成,Monitor 菜单又包括 Dashbord、Topology、Application、Service、Alarm 五个子菜单构成。本文就是围绕这些菜单分别逐一进行介绍。\nMonitor 当用户通过 SkyWalking 登陆界面使用用户名、密码登陆以后,就会默认进入到 SkyWalking 的 Monitor 下的 Dashboard 界面\nDashboard 下图就是用户登陆之后都会看到的关键 Dashboard 页面,在这个页面的下方的关键指标,图中都做了详细的解释。\n上图中 app 需要强调的是,52 个 app 并不代表 52 个应用,比如 paycenter 有两台 paycenter1 和 paycenter2 就算了 2 个 app,当然还有一些应用是 3 个以上的。在我们公司,paycenter1、paycenter2 这些运维都和我们跳板机管理平台上的名称设置的一样,约定大于配置,开发人员可以更加便捷的排查问题。\n 再次修正一下,关于 dashboard 页面的 app 数,语言类探针,是探针的 app_code 来决定的。比如我们公司的线上配置就是 agent.application_code=auth-center-1\n 上图中需要解释两个概念:\n cpm 代表每分钟请求次数 SLA=(TRANSACTION_CALLS- TRANSACTION_ERROR_CALLS ) * 10000 ) / TRANSACTION_CALLS  该页面主要支持四个跳转:\n一、在上图中,App 板块上的帮助选项是可以直接跳转到 Application 监控页面的。 二、 Service 板块上的帮助选项是可以直接跳转到 Service 监控页面的。\n三、 Slow Service 列表中的每一个慢服务点击以后都会进入到其专项的 Service 监控页面。\n四、 Application Throughput 列表中的每一个 Application 点击以后也都是可以进入到其专项的 Application 监控页面。\n 关于 Application 和 Service 的详细介绍我们后续会展开\n 在 Dashboard 的页面上部分,还有一个选择功能模块: 左侧部分可以定期 refresh Dashboard 的数据,右侧则可以调整整体的查询区间。\nTopology 点击 Monitor 菜单下的 Topology 你会看到下面这张拓扑图\n当然这张图太过于夸张了,如果接入 SkyWalking 的应用并不是很多,会如下图所示: 左侧的三个小按钮可以调整你的视图,支持拖拽。右侧可以输入你所关心的应用名。比如我们输入一个支付和订单两个应用,左侧的拓扑图会变得更加清晰:\n另外,上图中的绿色圆圈都是可以点击的,如果你点击以后,还会出现节点信息: Application 点击 Monitor 菜单下的 Application 你会看到下面这张图,这张图里你可以看到的东西都做了注解。\n这张图里有一个惊喜,就是如果你点开 More Server Details,你可以看到更多的信息\n是的,除了 Host、IPv4、Pid、OS 以外,你还可以看到 CPU、Heap、Non-Heap、GC(Young GC、Old GC)等详细监控信息。\nService 点击 Monitor 菜单下的 Service 你会看到下面这张图,这张图里你可以看到的同样都做了注解。 关于 Dependency Map 这张图我们再补充一下,鼠标悬停可以看到每个阶段的执行时间,这是 Service 下的功能 我们点开图中该图中 Top 20 Slow Traces 下面的被我马赛克掉的 trace 的按钮框,可以看到如下更加详细的信息:\n这些信息可以帮助我们知道每一个方法在哪个阶段那个具体实现耗时了多久。\n如上图所示,每一行基本都是可以打开的,每一行都包含了 Tags、Logs 等监控内容\nAlarm 点击 Monitor 菜单下的 Alarm 你会看到告警菜单。目前 5.X 版本的还没有接入邮件、短信等告警方式,后续 6 支持 webhook,用户可以自己去接短信和邮件。\n告警内容中你可以看到 Applicaion、Server 和 Service 三个层面的告警内容\nTrace Trace 是一个非常实用的功能,用户可以根据精确的 TraceId 去查找\n也可以设定时间段去查找\n我在写使用手册时候,非常巧的是,看到了上图三起异常,于是我们往下拉列表看到了具体的数据\n点击进去,我们可以看到具体的失败原因 当然用户也可以直接将 Trace State 调整为 Error 级别进行查询\n再回顾一遍 一、首先我们进入首页:\n二、点击一下首页的 Slow Service 的 projectC,可以看到如下信息:\n三、如果点击首页的 Appliation Throughput 中的 projectD,可以看到如下信息:\n四、继续点进去右下角的这个 slow service 里的 Consumer,我们可以看到下图:\n参考资料  https://twitter.com/AsfSkyWalking/status/1013616673218179072 https://twitter.com/AsfSkyWalking/status/1013617100143800320  ","title":"Apache SkyWalking 5.0 中文版图文详解使用手册","url":"/zh/2018-12-18-apache-skywalking-5-0-userguide/"},{"content":"Based on his contributions to the project, he has been accepted as SkyWalking committer. Welcome aboard.\n","title":"Welcome Yixiong Cao as a new committer","url":"/events/welcome-yixiong-cao-as-a-new-committer/"},{"content":"Original link, Tetrate.io blog\nContext The integration of SkyWalking and Istio Service Mesh yields an essential open-source tool for resolving the chaos created by the proliferation of siloed, cloud-based services.\nApache SkyWalking is an open, modern performance management tool for distributed services, designed especially for microservices, cloud native and container-based (Docker, K8s, Mesos) architectures. We at Tetrate believe it is going to be an important project for understanding the performance of microservices. The recently released v6 integrates with Istio Service Mesh and focuses on metrics and tracing. It natively understands the most common language runtimes (Java, .Net, and NodeJS). With its new core code, SkyWalking v6 also supports Istrio telemetry data formats, providing consistent analysis, persistence, and visualization.\nSkyWalking has evolved into an Observability Analysis Platform that enables observation and monitoring of hundreds of services all at once. It promises solutions for some of the trickiest problems faced by system administrators using complex arrays of abundant services: Identifying why and where a request is slow, distinguishing normal from deviant system performance, comparing apples-to-apples metrics across apps regardless of programming language, and attaining a complete and meaningful view of performance.\nSkyWalking History Launched in China by Wu Sheng in 2015, SkyWalking started as just a distributed tracing system, like Zipkin, but with auto instrumentation from a Java agent. This enabled JVM users to see distributed traces without any change to their source code. In the last two years, it has been used for research and production by more than 50 companies. With its expanded capabilities, we expect to see it adopted more globally.\nWhat\u0026rsquo;s new Service Mesh Integration Istio has picked up a lot of steam as the framework of choice for distributed services. Based on all the interest in the Istio project, and community feedback, some SkyWalking (P)PMC members decided to integrate with Istio Service Mesh to move SkyWalking to a higher level.\nSo now you can use Skywalking to get metrics and understand the topology of your applications. This works not just for Java, .NET and Node using our language agents, but also for microservices running under the Istio service mesh. You can get a full topology of both kinds of applications.\nObservability analysis platform With its roots in tracing, SkyWalking is now transitioning into an open-standards based Observability Analysis Platform, which means the following:\n It can accept different kinds and formats of telemetry data from mesh like Istio telemetry. Its agents support various popular software technologies and frameworks like Tomcat, Spring, Kafka. The whole supported framework list is here. It can accept data from other compliant sources like Zipkin-formatted traces reported from Zipkin, Jaeger, or OpenCensus clients.  SkyWalking is logically split into four parts: Probes, Platform Backend, Storage and UI:\nThere are two kinds of probes:\n Language agents or SDKs following SkyWalking across-thread propagation formats and trace formats, run in the user’s application process. The Istio mixer adaptor, which collects telemetry from the Service Mesh.  The platform backend provides gRPC and RESTful HTTP endpoints for all SkyWalking-supported trace and metric telemetry data. For example, you can stream these metrics into an analysis system.\nStorage supports multiple implementations such as ElasticSearch, H2 (alpha), MySQL, and Apache ShardingSphere for MySQL Cluster. TiDB will be supported in next release.\nSkyWalking’s built-in UI with a GraphQL endpoint for data allows intuitive, customizable integration.\nSome examples of SkyWalking’s UI:\n Observe a Spring app using the SkyWalking JVM-agent   Observe on Istio without any agent, no matter what langugage the service is written in   See fine-grained metrics like request/Call per Minute, P99/95/90/75/50 latency, avg response time, heatmap   Service dependencies and metrics  Service Focused At Tetrate, we are focused on discovery, reliability, and security of your running services. This is why we are embracing Skywalking, which makes service performance observable.\nBehind this admittedly cool UI, the aggregation logic is very easy to understand, making it easy to customize SkyWalking in its Observability Analysis Language (OAL) script.\nWe’ll post more about OAL for developers looking to customize SkyWalking, and you can read the official OAL introduction document.\nScripts are based on three core concepts:\n  Service represents a group of workloads that provide the same behaviours for incoming requests. You can define the service name whether you are using instrument agents or SDKs. Otherwise, SkyWalking uses the name you defined in the underlying platform, such as Istio.\n  Service Instance Each workload in the Service group is called an instance. Like Pods in Kubernetes, it doesn\u0026rsquo;t need to be a single OS process. If you are using an instrument agent, an instance does map to one OS process.\n  Endpoint is a path in a certain service that handles incoming requests, such as HTTP paths or a gRPC service + method. Mesh telemetry and trace data are formatted as source objects (aka scope). These are the input for the aggregation, with the script describing how to aggregate, including input, conditions, and the resulting metric name.\n  Core Features The other core features in SkyWalking v6 are:\n Service, service instance, endpoint metrics analysis. Consistent visualization in Service Mesh and no mesh. Topology discovery, Service dependency analysis. Distributed tracing. Slow services and endpoints detected. Alarms.  Of course, SkyWalking has some more upgrades from v5, such as:\n ElasticSearch 6 as storage is supported. H2 storage implementor is back. Kubernetes cluster management is provided. You don’t need Zookeeper to keep the backend running in cluster mode. Totally new alarm core. Easier configuration. More cloud native style. MySQL will be supported in the next release.  Please: Test and Provide Feedback! We would love everyone to try to test our new version. You can find everything you need in our Apache repository,read the document for further details. You can contact the project team through the following channels:\n Submit an issue on GitHub repository Mailing list: dev@skywalking.apache.org . Send to dev-subscribe@kywalking.apache.org to subscribe the mail list. Gitter Project twitter  Oh, and one last thing! If you like our project, don\u0026rsquo;t forget to give us a star on GitHub.\n","title":"SkyWalking v6 is Service Mesh ready","url":"/blog/2018-12-12-skywalking-service-mesh-ready/"},{"content":"Based on his contributions to the project, he has been accepted as SkyWalking committer. Welcome aboard.\n","title":"Welcome Jian Tan as a new committer","url":"/events/welcome-jian-tan-as-a-new-committer/"},{"content":"APM consistently compatible in language agent(Java, .Net, NodeJS), 3rd party format(Zipkin) and service mesh telemetry(Istio). Go to downloads page to find release tars.\n","title":"Release Apache SkyWalking 6.0.0-alpha","url":"/events/release-apache-skywalking-6-0-0-alpha/"},{"content":"A stable version of 5.x release. Go to downloads page to find release tars.\n","title":"Release Apache SkyWalking 5.0.0-GA","url":"/events/release-apache-skywalking-5-0-0-ga/"},{"content":"5.0.0-RC2 release. Go to downloads page to find release tars.\n","title":"Release Apache SkyWalking 5.0.0-RC2","url":"/events/release-apache-skywalking-5-0-0-rc2/"},{"content":"5.0.0-beta2 release. Go to downloads page to find release tars.\n","title":"Release Apache SkyWalking 5.0.0-beta2","url":"/events/release-apache-skywalking-5-0-0-beta2/"},{"content":"Translated by Sheng Wu.\nIn many big systems, distributed and especially microservice architectures become more and more popular. With the increase of modules and services, one incoming request could cross dozens of service. How to pinpoint the issues of the online system, and the bottleneck of the whole distributed system? This became a very important problem, which must be resolved.\nTo resolve the problems in distributed system, Google published the paper “Dapper, a Large-Scale Distributed Systems Tracing Infrastructure”, which mentioned the designs and ideas of building a distributed system. Many projects are inspired by it, created in the last 10 years. At 2015, Apache SkyWalking was created by Wu Sheng as a simple distributed system at first and open source. Through almost 3 years developments, at 2018, according to its 5.0.0-alpha/beta releases, it had already became a cool open source APM system for cloud native, container based system.\nAt the early of this year, I was trying to build the Butterfly open source APM in .NET Core, and that is when I met the Apache SkyWalking team and its creator. I decided to join them, and cooperate with them, to provide .NET Core agent native compatible with SkyWalking. At April, I released the first version .NET core agent 0.1.0. After several weeks interation, we released 0.2.0, for increasing the stability and adding HttpClient, Database driver supports.\nBefore we used .NET Core agent, we need to deploy SkyWalking collector, UI and ElasticSearch 5.x. You can download the release versions at here: http://skywalking.apache.org/downloads/ and follow the docs (Deploy-backend-in-standalone-mode, Deploy-backend-in-cluster-mode) to setup the backend.\nAt here, I are giving a quick start to represent, how to monitor a demo distributed .NET Core applications. I can say, that is easy.\n git clone https://github.com/OpenSkywalking/skywalking-netcore.git\n  cd skywalking-netcore\n  dotnet restore\n  dotnet run -p sample/SkyWalking.Sample.Backend dotnet run -p sample/SkyWalking.Sample.Frontend\n Now you can open http://localhost:5001/api/values to access the demo application. Then you can open SkyWalking WebUI http://localhost:8080\n  Overview of the whole distributed system   Topology of distributed system   Application view   Trace query   Span’s tags, logs and related traces   GitHub  Website: http://skywalking.apache.org/ SkyWalking Github Repo: https://github.com/apache/incubator-skywalking SkyWalking-NetCore Github Repo: https://github.com/OpenSkywalking/skywalking-netcore  ","title":"Apache SkyWalking provides open source APM and distributed tracing in .NET Core field","url":"/blog/2018-05-24-skywalking-net/"},{"content":"在大型网站系统设计中,随着分布式架构,特别是微服务架构的流行,我们将系统解耦成更小的单元,通过不断的添加新的、小的模块或者重用已经有的模块来构建复杂的系统。随着模块的不断增多,一次请求可能会涉及到十几个甚至几十个服务的协同处理,那么如何准确快速的定位到线上故障和性能瓶颈,便成为我们不得不面对的棘手问题。\n为解决分布式架构中复杂的服务定位和性能问题,Google 在论文《Dapper, a Large-Scale Distributed Systems Tracing Infrastructure》中提出了分布式跟踪系统的设计和构建思路。在这样的背景下,Apache SkyWalking 创建于 2015 年,参考 Dapper 论文实现分布式追踪功能,并逐渐进化为一个完整功能的 Application Performance Management 系统,用于追踪、监控和诊断大型分布式系统,尤其是容器和云原生下的微服务系统。\n今年初我在尝试使用.NET Core 构建分布式追踪系统 Butterfly 时接触到 SkyWalking 团队,开始和 SkyWalking 团队合作探索 SkyWalking 对.NET Core 的支持,并于 4 月发布 SkyWalking .NET Core 探针的 第一个版本,同时我也有幸加入 SkyWalking 团队共同进行 SkyWalking 在多语言生态的推动。在.NET Core 探针 v0.1 版本发布之后,得到了一些同学的尝鲜使用,也得到诸多改进的建议。经过几周的迭代,SkyWalking .NET Core 探针于今天发布 v0.2 release,在 v0.1 的基础上增加了\u0008稳定性和 HttpClient 及数据库驱动的追踪支持。\n在使用 SkyWalking 对.NET Core 应用追踪之前,我们需要先部署 SkyWalking Collector 收集分析 Trace 和 Elasticsearch 作为 Trace 数据存储。SkyWalking 支持 5.x 的 ES,所以我们需要下载安装对应版本的 ES,并配置 ES 的 cluster.name 为 CollectorDBCluster。然后部署 SkyWalking 5.0 beta 或更高版本 (下载地址:http://skywalking.apache.org/downloads/)。更详细的 Collector 部署文档,请参考 Deploy-backend-in-standalone-mode 和 Deploy-backend-in-cluster-mode。\n最后我们使用示例项目来演示在.NET Core 应用中使用 SkyWalking 进行追踪和监控,克隆 SkyWalking-NetCore 项目到本地:\ngit clone https://github.com/OpenSkywalking/skywalking-netcore.git 进入 skywalking-netcore 目录:\ncd skywalking-netcore 还原 nuget package:\ndotnet restore 启动示例项目:\ndotnet run -p sample/SkyWalking.Sample.Backend dotnet run -p sample/SkyWalking.Sample.Frontend 访问示例应用:\n打开 SkyWalking WebUI 即可看到我们的应用监控面板 http://localhost:8080\nDashboard 视图\nTopologyMap 视图\nApplication 视图\nTrace 视图\nTraceDetails 视图\nGitHub  SkyWalking Github Repo:https://github.com/apache/incubator-skywalking SkyWalking-NetCore Github Repo:https://github.com/OpenSkywalking/skywalking-netcore  ","title":"Apache SkyWalking 为.NET Core带来开箱即用的分布式追踪和应用性能监控","url":"/zh/2018-05-24-skywalking-net/"},{"content":"5.0.0-beta release. Go to downloads page to find release tars.\n","title":"Release Apache SkyWalking 5.0.0-beta","url":"/events/release-apache-skywalking-5-0-0-beta/"},{"content":"5.0.0-alpha release. Go to downloads page to find release tars.\n","title":"Release Apache SkyWalking APM 5.0.0-alpha","url":"/events/release-apache-skywalking-apm-5-0-0-alpha/"},{"content":"","title":"","url":"/index.json"},{"content":"10.0.0 Project  Support Java 21 runtime. Support oap-java21 image for Java 21 runtime. Upgrade OTEL collector version to 0.92.0 in all e2e tests. Switch CI macOS runner to m1. Upgrade PostgreSQL driver to 42.4.4 to fix CVE-2024-1597. Remove CLI(swctl) from the image. Remove CLI_VERSION variable from Makefile build. Add BanyanDB to docker-compose quickstart. Bump up Armeria, jackson, netty, jetcd and grpc to fix CVEs.  OAP Server  Add layer parameter to the global topology graphQL query. Add is_present function in MQE for check if the list metrics has a value or not. Remove unreasonable default configurations for gRPC thread executor. Remove gRPCThreadPoolQueueSize (SW_RECEIVER_GRPC_POOL_QUEUE_SIZE) configuration. Allow excluding ServiceEntries in some namespaces when looking up ServiceEntries as a final resolution method of service metadata. Set up the length of source and dest IDs in relation entities of service, instance, endpoint, and process to 250(was 200). Support build Service/Instance Hierarchy and query. Change the string field in Elasticsearch storage from keyword type to text type if it set more than 32766 length. [Break Change] Change the configuration field of ui_template and ui_menu in Elasticsearch storage from keyword type to text. Support Service Hierarchy auto matching, add auto matching layer relationships (upper -\u0026gt; lower) as following:  MESH -\u0026gt; MESH_DP MESH -\u0026gt; K8S_SERVICE MESH_DP -\u0026gt; K8S_SERVICE GENERAL -\u0026gt; K8S_SERVICE   Add namespace suffix for K8S_SERVICE_NAME_RULE/ISTIO_SERVICE_NAME_RULE and metadata-service-mapping.yaml as default. Allow using a dedicated port for ALS receiver. Fix log query by traceId in JDBCLogQueryDAO. Support handler eBPF access log protocol. Fix SumPerMinFunctionTest error function. Remove unnecessary annotations and functions from Meter Functions. Add max and min functions for MAL down sampling. Fix critical bug of uncontrolled memory cost of TopN statistics. Change topN group key from StorageId to entityId + timeBucket. Add Service Hierarchy auto matching layer relationships (upper -\u0026gt; lower) as following:  MYSQL -\u0026gt; K8S_SERVICE POSTGRESQL -\u0026gt; K8S_SERVICE SO11Y_OAP -\u0026gt; K8S_SERVICE VIRTUAL_DATABASE -\u0026gt; MYSQL VIRTUAL_DATABASE -\u0026gt; POSTGRESQL   Add Golang as a supported language for AMQP. Support available layers of service in the topology. Add count aggregation function for MAL Add Service Hierarchy auto matching layer relationships (upper -\u0026gt; lower) as following:  NGINX -\u0026gt; K8S_SERVICE APISIX -\u0026gt; K8S_SERVICE GENERAL -\u0026gt; APISIX   Add Golang as a supported language for RocketMQ. Support Apache RocketMQ server monitoring. Add Service Hierarchy auto matching layer relationships (upper -\u0026gt; lower) as following:  ROCKETMQ -\u0026gt; K8S_SERVICE VIRTUAL_MQ -\u0026gt; ROCKETMQ   Fix ServiceInstance in query. Mock /api/v1/status/buildinfo for PromQL API. Fix table exists check in the JDBC Storage Plugin. Fix day-based table rolling time range strategy in JDBC storage. Add maxInboundMessageSize (SW_DCS_MAX_INBOUND_MESSAGE_SIZE) configuration to change the max inbound message size of DCS. Fix Service Layer when building Events in the EventHookCallback. Add Golang as a supported language for Pulsar. Add Service Hierarchy auto matching layer relationships (upper -\u0026gt; lower) as following:  RABBITMQ -\u0026gt; K8S_SERVICE VIRTUAL_MQ -\u0026gt; RABBITMQ   Remove Column#function mechanism in the kernel. Make query readMetricValue always return the average value of the duration. Add Service Hierarchy auto matching layer relationships (upper -\u0026gt; lower) as following:  KAFKA -\u0026gt; K8S_SERVICE VIRTUAL_MQ -\u0026gt; KAFKA   Support ClickHouse server monitoring. Add Service Hierarchy auto matching layer relationships (upper -\u0026gt; lower) as following:  CLICKHOUSE -\u0026gt; K8S_SERVICE VIRTUAL_DATABASE -\u0026gt; CLICKHOUSE   Add Service Hierarchy auto matching layer relationships (upper -\u0026gt; lower) as following:  PULSAR -\u0026gt; K8S_SERVICE VIRTUAL_MQ -\u0026gt; PULSAR   Add Golang as a supported language for Kafka. Support displaying the port services listen to from OAP and UI during server start. Refactor data-generator to support generating metrics. Fix AvgHistogramPercentileFunction legacy name. [Break Change] Labeled Metrics support multiple labels.  Storage: store all label names and values instead of only the values. MQE:  Support querying by multiple labels(name and value) instead using _ as the anonymous label name. aggregate_labels function support aggregate by specific labels. relabels function require target label and rename label name and value.   PromQL:  Support querying by multiple labels(name and value) instead using lables as the anonymous label name. Remove general labels labels/relabels/label function. API /api/v1/labels and /api/v1/label/\u0026lt;label_name\u0026gt;/values support return matched metrics labels.   OAL:  Deprecate percentile function and introduce percentile2 function instead.     Bump up Kafka to fix CVE. Fix NullPointerException in Istio ServiceEntry registry. Remove unnecessary componentIds as series ID in the ServiceRelationClientSideMetrics and ServiceRelationServerSideMetrics entities. Fix not throw error when part of expression not matched any expression node in the MQE and `PromQL. Remove kafka-fetcher/default/createTopicIfNotExist as the creation is automatically since #7326 (v8.7.0). Fix inaccuracy nginx service metrics. Fix/Change Windows metrics name(Swap -\u0026gt; Virtual Memory)  memory_swap_free -\u0026gt; memory_virtual_memory_free memory_swap_total -\u0026gt; memory_virtual_memory_total memory_swap_percentage -\u0026gt; memory_virtual_memory_percentage   Fix/Change UI init setting for Windows Swap -\u0026gt; Virtual Memory Fix Memory Swap Usage/Virtual Memory Usage display with UI init.(Linux/Windows) Fix inaccurate APISIX metrics. Fix inaccurate MongoDB Metrics. Support Apache ActiveMQ server monitoring. Add Service Hierarchy auto matching layer relationships (upper -\u0026gt; lower) as following:  ACTIVEMQ -\u0026gt; K8S_SERVICE   Calculate Nginx service HTTP Latency by MQE. MQE query: make metadata not return null. MQE labeled metrics Binary Operation: return empty value if the labels not match rather than report error. Fix inaccurate Hierarchy of RabbitMQ Server monitoring metrics. Fix inaccurate MySQL/MariaDB, Redis, PostgreSQL metrics. Support DoubleValue,IntValue,BoolValue in OTEL metrics attributes. [Break Change] gGRPC metrics exporter unified the metric value type and support labeled metrics. Add component definition(ID=152) for c3p0(JDBC3 Connection and Statement Pooling). Fix MQE top_n global query. Fix inaccurate Pulsar and Bookkeeper metrics. MQE support sort_values and sort_label_values functions.  UI  Fix the mismatch between the unit and calculation of the \u0026ldquo;Network Bandwidth Usage\u0026rdquo; widget in Linux-Service Dashboard. Add theme change animation. Implement the Service and Instance hierarchy topology. Support Tabs in the widget visible when MQE expressions. Support search on Marketplace. Fix default route. Fix layout on the Log widget. Fix Trace associates with Log widget. Add isDefault to the dashboard configuration. Add expressions to dashboard configurations on the dashboard list page. Update Kubernetes related UI templates for adapt data from eBPF access log. Fix dashboard K8S-Service-Root metrics expression. Add dashboards for Service/Instance Hierarchy. Fix MQE in dashboards when using Card widget. Optimize tooltips style. Fix resizing window causes the trace graph to display incorrectly. Add the not found page(404). Enhance VNode logic and support multiple Trace IDs in span\u0026rsquo;s ref. Add the layers filed and associate layers dashboards for the service topology nodes. Fix Nginx-Instance metrics to instance level. Update tabs of the Kubernetes service page. Add Airflow menu i18n. Add Support for dragging in the trace panel. Add workflow icon. Metrics support multiple labels. Support the SINGLE_VALUE for table widgets. Remove the General metric mode and related logical code. Remove metrics for unreal nodes in the topology. Enhance the Trace widget for batch consuming spans. Clean the unused elements in the UI-templates.  Documentation  Update the release doc to remove the announcement as the tests are through e2e rather than manually. Update the release notification mail a little. Polish docs structure. Move customization docs separately from the introduction docs. Add webhook/gRPC hooks settings example for backend-alarm.md. Begin the process of SWIP - SkyWalking Improvement Proposal. Add SWIP-1 Create and detect Service Hierarchy Relationship. Add SWIP-2 Collecting and Gathering Kubernetes Monitoring Data. Update the Overview docs to add the Service Hierarchy Relationship section. Fix incorrect words for backend-bookkeeper-monitoring.md and backend-pulsar-monitoring.md Document a new way to load balance OAP. Add SWIP-3 Support RocketMQ monitoring. Add OpenTelemetry SkyWalking Exporter deprecated warning doc. Update i18n for rocketmq monitoring. Fix: remove click event after unmounted. Fix: end loading without query results. Update nanoid version to 3.3.7. Update postcss version to 8.4.33. Fix kafka topic name in exporter doc. Fix query-protocol.md, make it consistent with the GraphQL query protocol. Add SWIP-5 Support ClickHouse Monitoring. Remove OpenTelemetry Exporter support from meter doc, as this has been flagged as unmaintained on OTEL upstream. Add doc of one-line quick start script for different storage types. Add FAQ for Why is Clickhouse or Loki or xxx not supported as a storage option?. Add SWIP-8 Support ActiveMQ Monitoring.  All issues and pull requests are here\n","title":"10.0.0","url":"/docs/main/next/en/changes/changes/"},{"content":"5.1.0 Agent Changes  Fix spring inherit issue in another way Fix classloader dead lock in jdk7+ - 5.x Support Spring mvc 5.x Support Spring webflux 5.x  Collector Changes  Fix too many open files. Fix the buffer file cannot delete.  5.0.0-GA Agent Changes  Add several package names ignore in agent settings. Classes in these packages would be enhanced, even plugin declared. Support Undertow 2.x plugin. Fix wrong class names of Motan plugin, not a feature related issue, just naming.  Collector Changes  Make buffer file handler close more safety. Fix NPE in AlarmService  Documentation  Fix compiling doc link. Update new live demo address.  5.0.0-RC2 Agent Changes  Support ActiveMQ 5.x Support RuntimeContext used out of TracingContext. Support Oracle ojdbc8 Plugin. Support ElasticSearch client transport 5.2-5.6 Plugin Support using agent.config with given path through system properties. Add a new way to transmit the Request and Response, to avoid bugs in Hytrix scenarios. Fix HTTPComponent client v4 operation name is empty. Fix 2 possible NPEs in Spring plugin. Fix a possible span leak in SpringMVC plugin. Fix NPE in Spring callback plugin.  Collector Changes  Add GZip support for Zipkin receiver. Add new component IDs for nodejs. Fix Zipkin span receiver may miss data in request. Optimize codes in heatmap calculation. Reduce unnecessary divide. Fix NPE in Alarm content generation. Fix the precision lost in ServiceNameService#startTimeMillis. Fix GC count is 0. Fix topology breaks when RPC client uses the async thread call.  UI Changes  Fix UI port can\u0026rsquo;t be set by startup script in Windows. Fix Topology self link error. Fix stack color mismatch label color in gc time chart.  Documentation  Add users list. Fix several document typo. Sync the Chinese documents. Add OpenAPM badge. Add icon/font documents to NOTICE files.  Issues and Pull requests\n5.0.0-beta2 UI -\u0026gt; Collector GraphQL query protocol  Add order and status in trace query.  Agent Changes  Add SOFA plugin. Add witness class for Kafka plugin. Add RuntimeContext in Context. Fix RuntimeContext fail in Tomcat plugin. Fix incompatible for getPropertyDescriptors in Spring core. Fix spymemcached plugin bug. Fix database URL parser bug. Fix StringIndexOutOfBoundsException when mysql jdbc url without databaseName。 Fix duplicate slash in Spring MVC plugin bug. Fix namespace bug. Fix NPE in Okhttp plugin when connect failed. FIx MalformedURLException in httpClientComponent plugin. Remove unused dependencies in Dubbo plugin. Remove gRPC timeout to avoid out of memory leak. Rewrite Async http client plugin. [Incubating] Add trace custom ignore optional plugin.  Collector Changes  Topology query optimization for more than 100 apps. Error rate alarm is not triggered. Tolerate unsupported segments. Support Integer Array, Long Array, String Array, Double Array in streaming data model. Support multiple entry span and multiple service name in one segment durtaion record. Use BulkProcessor to control the linear writing of data by multiple threads. Determine the log is enabled for the DEBUG level before printing message. Add static modifier to Logger. Add AspNet component. Filter inactive service in query. Support to query service based on Application. Fix RemoteDataMappingIdNotFoundException Exclude component-libaries.xml file in collector-*.jar, make sure it is in /conf only. Separate a single TTL in minute to in minute, hour, day, month metric and trace. Add order and status in trace query. Add folder lock to buffer folder. Modify operationName search from match to match_phrase. [Incubating] Add Zipkin span receiver. Support analysis Zipkin v1/v2 formats. [Incubating] Support sharding-sphere as storage implementor.  UI Changes  Support login and access control. Add new webapp.yml configuration file. Modify webapp startup script. Link to trace query from Thermodynamic graph Add application selector in service view. Add order and status in trace query.  Documentation  Add architecture design doc. Reformat deploy document. Adjust Tomcat deploy document. Remove all Apache licenses files in dist release packages. Update user cases. Update UI licenses. Add incubating sections in doc.  Issues and Pull requests\n5.0.0-beta UI -\u0026gt; Collector GraphQL query protocol  Replace all tps to throughput/cpm(calls per min) Add getThermodynamic service Update version to beta  Agent Changes  Support TLS. Support namespace. Support direct link. Support token. Add across thread toolkit. Add new plugin extend machenism to override agent core implementations. Fix an agent start up sequence bug. Fix wrong gc count. Remove system env override. Add Spring AOP aspect patch to avoid aop conflicts.  Collector Changes  Trace query based on timeline. Delete JVM aggregation in second. Support TLS. Support namespace. Support token auth. Group and aggregate requests based on response time and timeline, support Thermodynamic chart query Support component librariy setting through yml file for better extendibility. Optimize performance. Support short column name in ES or other storage implementor. Add a new cache module implementor, based on Caffeine. Support system property override settings. Refactor settings initialization. Provide collector instrumentation agent. Support .NET core component libraries. Fix divide zero in query. Fix Data don't remove as expected in ES implementor. Add some checks in collector modulization core. Add some test cases.  UI Changes  New trace query UI. New Application UI, merge server tab(removed) into application as sub page. New Topology UI. New response time / throughput TopN list. Add Thermodynamic chart in overview page. Change all tps to cpm(calls per minutes). Fix wrong osName in server view. Fix wrong startTime in trace view. Fix some icons internet requirements.  Documentation  Add TLS document. Add namespace document. Add direct link document. Add token document. Add across thread toolkit document. Add a FAQ about, Agent or collector version upgrade. Sync all English document to Chinese.  Issues and Pull requests\n5.0.0-alpha Agent -\u0026gt; Collector protocol  Remove C++ keywords Move Ref into Span from Segment Add span type, when register an operation  UI -\u0026gt; Collector GraphQL query protocol  First version protocol  Agent Changes  Support gRPC 1.x plugin Support kafka 0.11 and 1.x plugin Support ServiceComb 0.x plugin Support optional plugin mechanism. Support Spring 3.x and 4.x bean annotation optional plugin Support Apache httpcomponent AsyncClient 4.x plugin Provide automatic agent daily tests, and release reports here. Refactor Postgresql, Oracle, MySQL plugin for compatible. Fix jetty client 9 plugin error Fix async APIs of okhttp plugin error Fix log config didn\u0026rsquo;t work Fix a class loader error in okhttp plugin  Collector Changes  Support metrics analysis and aggregation for application, application instance and service in minute, hour, day and month. Support new GraphQL query protocol Support alarm Provide a prototype instrument for collector. Support node speculate in cluster and application topology. (Provider Node -\u0026gt; Consumer Node) -\u0026gt; (Provider Node -\u0026gt; MQ Server -\u0026gt; Consumer Node)  UI Changes  New 5.0.0 UI!!!  Issues and Pull requests\n","title":"5.1.0","url":"/docs/main/latest/en/changes/changes-5.x/"},{"content":"5.1.0 Agent Changes  Fix spring inherit issue in another way Fix classloader dead lock in jdk7+ - 5.x Support Spring mvc 5.x Support Spring webflux 5.x  Collector Changes  Fix too many open files. Fix the buffer file cannot delete.  5.0.0-GA Agent Changes  Add several package names ignore in agent settings. Classes in these packages would be enhanced, even plugin declared. Support Undertow 2.x plugin. Fix wrong class names of Motan plugin, not a feature related issue, just naming.  Collector Changes  Make buffer file handler close more safety. Fix NPE in AlarmService  Documentation  Fix compiling doc link. Update new live demo address.  5.0.0-RC2 Agent Changes  Support ActiveMQ 5.x Support RuntimeContext used out of TracingContext. Support Oracle ojdbc8 Plugin. Support ElasticSearch client transport 5.2-5.6 Plugin Support using agent.config with given path through system properties. Add a new way to transmit the Request and Response, to avoid bugs in Hytrix scenarios. Fix HTTPComponent client v4 operation name is empty. Fix 2 possible NPEs in Spring plugin. Fix a possible span leak in SpringMVC plugin. Fix NPE in Spring callback plugin.  Collector Changes  Add GZip support for Zipkin receiver. Add new component IDs for nodejs. Fix Zipkin span receiver may miss data in request. Optimize codes in heatmap calculation. Reduce unnecessary divide. Fix NPE in Alarm content generation. Fix the precision lost in ServiceNameService#startTimeMillis. Fix GC count is 0. Fix topology breaks when RPC client uses the async thread call.  UI Changes  Fix UI port can\u0026rsquo;t be set by startup script in Windows. Fix Topology self link error. Fix stack color mismatch label color in gc time chart.  Documentation  Add users list. Fix several document typo. Sync the Chinese documents. Add OpenAPM badge. Add icon/font documents to NOTICE files.  Issues and Pull requests\n5.0.0-beta2 UI -\u0026gt; Collector GraphQL query protocol  Add order and status in trace query.  Agent Changes  Add SOFA plugin. Add witness class for Kafka plugin. Add RuntimeContext in Context. Fix RuntimeContext fail in Tomcat plugin. Fix incompatible for getPropertyDescriptors in Spring core. Fix spymemcached plugin bug. Fix database URL parser bug. Fix StringIndexOutOfBoundsException when mysql jdbc url without databaseName。 Fix duplicate slash in Spring MVC plugin bug. Fix namespace bug. Fix NPE in Okhttp plugin when connect failed. FIx MalformedURLException in httpClientComponent plugin. Remove unused dependencies in Dubbo plugin. Remove gRPC timeout to avoid out of memory leak. Rewrite Async http client plugin. [Incubating] Add trace custom ignore optional plugin.  Collector Changes  Topology query optimization for more than 100 apps. Error rate alarm is not triggered. Tolerate unsupported segments. Support Integer Array, Long Array, String Array, Double Array in streaming data model. Support multiple entry span and multiple service name in one segment durtaion record. Use BulkProcessor to control the linear writing of data by multiple threads. Determine the log is enabled for the DEBUG level before printing message. Add static modifier to Logger. Add AspNet component. Filter inactive service in query. Support to query service based on Application. Fix RemoteDataMappingIdNotFoundException Exclude component-libaries.xml file in collector-*.jar, make sure it is in /conf only. Separate a single TTL in minute to in minute, hour, day, month metric and trace. Add order and status in trace query. Add folder lock to buffer folder. Modify operationName search from match to match_phrase. [Incubating] Add Zipkin span receiver. Support analysis Zipkin v1/v2 formats. [Incubating] Support sharding-sphere as storage implementor.  UI Changes  Support login and access control. Add new webapp.yml configuration file. Modify webapp startup script. Link to trace query from Thermodynamic graph Add application selector in service view. Add order and status in trace query.  Documentation  Add architecture design doc. Reformat deploy document. Adjust Tomcat deploy document. Remove all Apache licenses files in dist release packages. Update user cases. Update UI licenses. Add incubating sections in doc.  Issues and Pull requests\n5.0.0-beta UI -\u0026gt; Collector GraphQL query protocol  Replace all tps to throughput/cpm(calls per min) Add getThermodynamic service Update version to beta  Agent Changes  Support TLS. Support namespace. Support direct link. Support token. Add across thread toolkit. Add new plugin extend machenism to override agent core implementations. Fix an agent start up sequence bug. Fix wrong gc count. Remove system env override. Add Spring AOP aspect patch to avoid aop conflicts.  Collector Changes  Trace query based on timeline. Delete JVM aggregation in second. Support TLS. Support namespace. Support token auth. Group and aggregate requests based on response time and timeline, support Thermodynamic chart query Support component librariy setting through yml file for better extendibility. Optimize performance. Support short column name in ES or other storage implementor. Add a new cache module implementor, based on Caffeine. Support system property override settings. Refactor settings initialization. Provide collector instrumentation agent. Support .NET core component libraries. Fix divide zero in query. Fix Data don't remove as expected in ES implementor. Add some checks in collector modulization core. Add some test cases.  UI Changes  New trace query UI. New Application UI, merge server tab(removed) into application as sub page. New Topology UI. New response time / throughput TopN list. Add Thermodynamic chart in overview page. Change all tps to cpm(calls per minutes). Fix wrong osName in server view. Fix wrong startTime in trace view. Fix some icons internet requirements.  Documentation  Add TLS document. Add namespace document. Add direct link document. Add token document. Add across thread toolkit document. Add a FAQ about, Agent or collector version upgrade. Sync all English document to Chinese.  Issues and Pull requests\n5.0.0-alpha Agent -\u0026gt; Collector protocol  Remove C++ keywords Move Ref into Span from Segment Add span type, when register an operation  UI -\u0026gt; Collector GraphQL query protocol  First version protocol  Agent Changes  Support gRPC 1.x plugin Support kafka 0.11 and 1.x plugin Support ServiceComb 0.x plugin Support optional plugin mechanism. Support Spring 3.x and 4.x bean annotation optional plugin Support Apache httpcomponent AsyncClient 4.x plugin Provide automatic agent daily tests, and release reports here. Refactor Postgresql, Oracle, MySQL plugin for compatible. Fix jetty client 9 plugin error Fix async APIs of okhttp plugin error Fix log config didn\u0026rsquo;t work Fix a class loader error in okhttp plugin  Collector Changes  Support metrics analysis and aggregation for application, application instance and service in minute, hour, day and month. Support new GraphQL query protocol Support alarm Provide a prototype instrument for collector. Support node speculate in cluster and application topology. (Provider Node -\u0026gt; Consumer Node) -\u0026gt; (Provider Node -\u0026gt; MQ Server -\u0026gt; Consumer Node)  UI Changes  New 5.0.0 UI!!!  Issues and Pull requests\n","title":"5.1.0","url":"/docs/main/next/en/changes/changes-5.x/"},{"content":"5.1.0 Agent Changes  Fix spring inherit issue in another way Fix classloader dead lock in jdk7+ - 5.x Support Spring mvc 5.x Support Spring webflux 5.x  Collector Changes  Fix too many open files. Fix the buffer file cannot delete.  5.0.0-GA Agent Changes  Add several package names ignore in agent settings. Classes in these packages would be enhanced, even plugin declared. Support Undertow 2.x plugin. Fix wrong class names of Motan plugin, not a feature related issue, just naming.  Collector Changes  Make buffer file handler close more safety. Fix NPE in AlarmService  Documentation  Fix compiling doc link. Update new live demo address.  5.0.0-RC2 Agent Changes  Support ActiveMQ 5.x Support RuntimeContext used out of TracingContext. Support Oracle ojdbc8 Plugin. Support ElasticSearch client transport 5.2-5.6 Plugin Support using agent.config with given path through system properties. Add a new way to transmit the Request and Response, to avoid bugs in Hytrix scenarios. Fix HTTPComponent client v4 operation name is empty. Fix 2 possible NPEs in Spring plugin. Fix a possible span leak in SpringMVC plugin. Fix NPE in Spring callback plugin.  Collector Changes  Add GZip support for Zipkin receiver. Add new component IDs for nodejs. Fix Zipkin span receiver may miss data in request. Optimize codes in heatmap calculation. Reduce unnecessary divide. Fix NPE in Alarm content generation. Fix the precision lost in ServiceNameService#startTimeMillis. Fix GC count is 0. Fix topology breaks when RPC client uses the async thread call.  UI Changes  Fix UI port can\u0026rsquo;t be set by startup script in Windows. Fix Topology self link error. Fix stack color mismatch label color in gc time chart.  Documentation  Add users list. Fix several document typo. Sync the Chinese documents. Add OpenAPM badge. Add icon/font documents to NOTICE files.  Issues and Pull requests\n5.0.0-beta2 UI -\u0026gt; Collector GraphQL query protocol  Add order and status in trace query.  Agent Changes  Add SOFA plugin. Add witness class for Kafka plugin. Add RuntimeContext in Context. Fix RuntimeContext fail in Tomcat plugin. Fix incompatible for getPropertyDescriptors in Spring core. Fix spymemcached plugin bug. Fix database URL parser bug. Fix StringIndexOutOfBoundsException when mysql jdbc url without databaseName。 Fix duplicate slash in Spring MVC plugin bug. Fix namespace bug. Fix NPE in Okhttp plugin when connect failed. FIx MalformedURLException in httpClientComponent plugin. Remove unused dependencies in Dubbo plugin. Remove gRPC timeout to avoid out of memory leak. Rewrite Async http client plugin. [Incubating] Add trace custom ignore optional plugin.  Collector Changes  Topology query optimization for more than 100 apps. Error rate alarm is not triggered. Tolerate unsupported segments. Support Integer Array, Long Array, String Array, Double Array in streaming data model. Support multiple entry span and multiple service name in one segment durtaion record. Use BulkProcessor to control the linear writing of data by multiple threads. Determine the log is enabled for the DEBUG level before printing message. Add static modifier to Logger. Add AspNet component. Filter inactive service in query. Support to query service based on Application. Fix RemoteDataMappingIdNotFoundException Exclude component-libaries.xml file in collector-*.jar, make sure it is in /conf only. Separate a single TTL in minute to in minute, hour, day, month metric and trace. Add order and status in trace query. Add folder lock to buffer folder. Modify operationName search from match to match_phrase. [Incubating] Add Zipkin span receiver. Support analysis Zipkin v1/v2 formats. [Incubating] Support sharding-sphere as storage implementor.  UI Changes  Support login and access control. Add new webapp.yml configuration file. Modify webapp startup script. Link to trace query from Thermodynamic graph Add application selector in service view. Add order and status in trace query.  Documentation  Add architecture design doc. Reformat deploy document. Adjust Tomcat deploy document. Remove all Apache licenses files in dist release packages. Update user cases. Update UI licenses. Add incubating sections in doc.  Issues and Pull requests\n5.0.0-beta UI -\u0026gt; Collector GraphQL query protocol  Replace all tps to throughput/cpm(calls per min) Add getThermodynamic service Update version to beta  Agent Changes  Support TLS. Support namespace. Support direct link. Support token. Add across thread toolkit. Add new plugin extend machenism to override agent core implementations. Fix an agent start up sequence bug. Fix wrong gc count. Remove system env override. Add Spring AOP aspect patch to avoid aop conflicts.  Collector Changes  Trace query based on timeline. Delete JVM aggregation in second. Support TLS. Support namespace. Support token auth. Group and aggregate requests based on response time and timeline, support Thermodynamic chart query Support component librariy setting through yml file for better extendibility. Optimize performance. Support short column name in ES or other storage implementor. Add a new cache module implementor, based on Caffeine. Support system property override settings. Refactor settings initialization. Provide collector instrumentation agent. Support .NET core component libraries. Fix divide zero in query. Fix Data don't remove as expected in ES implementor. Add some checks in collector modulization core. Add some test cases.  UI Changes  New trace query UI. New Application UI, merge server tab(removed) into application as sub page. New Topology UI. New response time / throughput TopN list. Add Thermodynamic chart in overview page. Change all tps to cpm(calls per minutes). Fix wrong osName in server view. Fix wrong startTime in trace view. Fix some icons internet requirements.  Documentation  Add TLS document. Add namespace document. Add direct link document. Add token document. Add across thread toolkit document. Add a FAQ about, Agent or collector version upgrade. Sync all English document to Chinese.  Issues and Pull requests\n5.0.0-alpha Agent -\u0026gt; Collector protocol  Remove C++ keywords Move Ref into Span from Segment Add span type, when register an operation  UI -\u0026gt; Collector GraphQL query protocol  First version protocol  Agent Changes  Support gRPC 1.x plugin Support kafka 0.11 and 1.x plugin Support ServiceComb 0.x plugin Support optional plugin mechanism. Support Spring 3.x and 4.x bean annotation optional plugin Support Apache httpcomponent AsyncClient 4.x plugin Provide automatic agent daily tests, and release reports here. Refactor Postgresql, Oracle, MySQL plugin for compatible. Fix jetty client 9 plugin error Fix async APIs of okhttp plugin error Fix log config didn\u0026rsquo;t work Fix a class loader error in okhttp plugin  Collector Changes  Support metrics analysis and aggregation for application, application instance and service in minute, hour, day and month. Support new GraphQL query protocol Support alarm Provide a prototype instrument for collector. Support node speculate in cluster and application topology. (Provider Node -\u0026gt; Consumer Node) -\u0026gt; (Provider Node -\u0026gt; MQ Server -\u0026gt; Consumer Node)  UI Changes  New 5.0.0 UI!!!  Issues and Pull requests\n","title":"5.1.0","url":"/docs/main/v9.1.0/en/changes/changes-5.x/"},{"content":"5.1.0 Agent Changes  Fix spring inherit issue in another way Fix classloader dead lock in jdk7+ - 5.x Support Spring mvc 5.x Support Spring webflux 5.x  Collector Changes  Fix too many open files. Fix the buffer file cannot delete.  5.0.0-GA Agent Changes  Add several package names ignore in agent settings. Classes in these packages would be enhanced, even plugin declared. Support Undertow 2.x plugin. Fix wrong class names of Motan plugin, not a feature related issue, just naming.  Collector Changes  Make buffer file handler close more safety. Fix NPE in AlarmService  Documentation  Fix compiling doc link. Update new live demo address.  5.0.0-RC2 Agent Changes  Support ActiveMQ 5.x Support RuntimeContext used out of TracingContext. Support Oracle ojdbc8 Plugin. Support ElasticSearch client transport 5.2-5.6 Plugin Support using agent.config with given path through system properties. Add a new way to transmit the Request and Response, to avoid bugs in Hytrix scenarios. Fix HTTPComponent client v4 operation name is empty. Fix 2 possible NPEs in Spring plugin. Fix a possible span leak in SpringMVC plugin. Fix NPE in Spring callback plugin.  Collector Changes  Add GZip support for Zipkin receiver. Add new component IDs for nodejs. Fix Zipkin span receiver may miss data in request. Optimize codes in heatmap calculation. Reduce unnecessary divide. Fix NPE in Alarm content generation. Fix the precision lost in ServiceNameService#startTimeMillis. Fix GC count is 0. Fix topology breaks when RPC client uses the async thread call.  UI Changes  Fix UI port can\u0026rsquo;t be set by startup script in Windows. Fix Topology self link error. Fix stack color mismatch label color in gc time chart.  Documentation  Add users list. Fix several document typo. Sync the Chinese documents. Add OpenAPM badge. Add icon/font documents to NOTICE files.  Issues and Pull requests\n5.0.0-beta2 UI -\u0026gt; Collector GraphQL query protocol  Add order and status in trace query.  Agent Changes  Add SOFA plugin. Add witness class for Kafka plugin. Add RuntimeContext in Context. Fix RuntimeContext fail in Tomcat plugin. Fix incompatible for getPropertyDescriptors in Spring core. Fix spymemcached plugin bug. Fix database URL parser bug. Fix StringIndexOutOfBoundsException when mysql jdbc url without databaseName。 Fix duplicate slash in Spring MVC plugin bug. Fix namespace bug. Fix NPE in Okhttp plugin when connect failed. FIx MalformedURLException in httpClientComponent plugin. Remove unused dependencies in Dubbo plugin. Remove gRPC timeout to avoid out of memory leak. Rewrite Async http client plugin. [Incubating] Add trace custom ignore optional plugin.  Collector Changes  Topology query optimization for more than 100 apps. Error rate alarm is not triggered. Tolerate unsupported segments. Support Integer Array, Long Array, String Array, Double Array in streaming data model. Support multiple entry span and multiple service name in one segment durtaion record. Use BulkProcessor to control the linear writing of data by multiple threads. Determine the log is enabled for the DEBUG level before printing message. Add static modifier to Logger. Add AspNet component. Filter inactive service in query. Support to query service based on Application. Fix RemoteDataMappingIdNotFoundException Exclude component-libaries.xml file in collector-*.jar, make sure it is in /conf only. Separate a single TTL in minute to in minute, hour, day, month metric and trace. Add order and status in trace query. Add folder lock to buffer folder. Modify operationName search from match to match_phrase. [Incubating] Add Zipkin span receiver. Support analysis Zipkin v1/v2 formats. [Incubating] Support sharding-sphere as storage implementor.  UI Changes  Support login and access control. Add new webapp.yml configuration file. Modify webapp startup script. Link to trace query from Thermodynamic graph Add application selector in service view. Add order and status in trace query.  Documentation  Add architecture design doc. Reformat deploy document. Adjust Tomcat deploy document. Remove all Apache licenses files in dist release packages. Update user cases. Update UI licenses. Add incubating sections in doc.  Issues and Pull requests\n5.0.0-beta UI -\u0026gt; Collector GraphQL query protocol  Replace all tps to throughput/cpm(calls per min) Add getThermodynamic service Update version to beta  Agent Changes  Support TLS. Support namespace. Support direct link. Support token. Add across thread toolkit. Add new plugin extend machenism to override agent core implementations. Fix an agent start up sequence bug. Fix wrong gc count. Remove system env override. Add Spring AOP aspect patch to avoid aop conflicts.  Collector Changes  Trace query based on timeline. Delete JVM aggregation in second. Support TLS. Support namespace. Support token auth. Group and aggregate requests based on response time and timeline, support Thermodynamic chart query Support component librariy setting through yml file for better extendibility. Optimize performance. Support short column name in ES or other storage implementor. Add a new cache module implementor, based on Caffeine. Support system property override settings. Refactor settings initialization. Provide collector instrumentation agent. Support .NET core component libraries. Fix divide zero in query. Fix Data don't remove as expected in ES implementor. Add some checks in collector modulization core. Add some test cases.  UI Changes  New trace query UI. New Application UI, merge server tab(removed) into application as sub page. New Topology UI. New response time / throughput TopN list. Add Thermodynamic chart in overview page. Change all tps to cpm(calls per minutes). Fix wrong osName in server view. Fix wrong startTime in trace view. Fix some icons internet requirements.  Documentation  Add TLS document. Add namespace document. Add direct link document. Add token document. Add across thread toolkit document. Add a FAQ about, Agent or collector version upgrade. Sync all English document to Chinese.  Issues and Pull requests\n5.0.0-alpha Agent -\u0026gt; Collector protocol  Remove C++ keywords Move Ref into Span from Segment Add span type, when register an operation  UI -\u0026gt; Collector GraphQL query protocol  First version protocol  Agent Changes  Support gRPC 1.x plugin Support kafka 0.11 and 1.x plugin Support ServiceComb 0.x plugin Support optional plugin mechanism. Support Spring 3.x and 4.x bean annotation optional plugin Support Apache httpcomponent AsyncClient 4.x plugin Provide automatic agent daily tests, and release reports here. Refactor Postgresql, Oracle, MySQL plugin for compatible. Fix jetty client 9 plugin error Fix async APIs of okhttp plugin error Fix log config didn\u0026rsquo;t work Fix a class loader error in okhttp plugin  Collector Changes  Support metrics analysis and aggregation for application, application instance and service in minute, hour, day and month. Support new GraphQL query protocol Support alarm Provide a prototype instrument for collector. Support node speculate in cluster and application topology. (Provider Node -\u0026gt; Consumer Node) -\u0026gt; (Provider Node -\u0026gt; MQ Server -\u0026gt; Consumer Node)  UI Changes  New 5.0.0 UI!!!  Issues and Pull requests\n","title":"5.1.0","url":"/docs/main/v9.2.0/en/changes/changes-5.x/"},{"content":"5.1.0 Agent Changes  Fix spring inherit issue in another way Fix classloader dead lock in jdk7+ - 5.x Support Spring mvc 5.x Support Spring webflux 5.x  Collector Changes  Fix too many open files. Fix the buffer file cannot delete.  5.0.0-GA Agent Changes  Add several package names ignore in agent settings. Classes in these packages would be enhanced, even plugin declared. Support Undertow 2.x plugin. Fix wrong class names of Motan plugin, not a feature related issue, just naming.  Collector Changes  Make buffer file handler close more safety. Fix NPE in AlarmService  Documentation  Fix compiling doc link. Update new live demo address.  5.0.0-RC2 Agent Changes  Support ActiveMQ 5.x Support RuntimeContext used out of TracingContext. Support Oracle ojdbc8 Plugin. Support ElasticSearch client transport 5.2-5.6 Plugin Support using agent.config with given path through system properties. Add a new way to transmit the Request and Response, to avoid bugs in Hytrix scenarios. Fix HTTPComponent client v4 operation name is empty. Fix 2 possible NPEs in Spring plugin. Fix a possible span leak in SpringMVC plugin. Fix NPE in Spring callback plugin.  Collector Changes  Add GZip support for Zipkin receiver. Add new component IDs for nodejs. Fix Zipkin span receiver may miss data in request. Optimize codes in heatmap calculation. Reduce unnecessary divide. Fix NPE in Alarm content generation. Fix the precision lost in ServiceNameService#startTimeMillis. Fix GC count is 0. Fix topology breaks when RPC client uses the async thread call.  UI Changes  Fix UI port can\u0026rsquo;t be set by startup script in Windows. Fix Topology self link error. Fix stack color mismatch label color in gc time chart.  Documentation  Add users list. Fix several document typo. Sync the Chinese documents. Add OpenAPM badge. Add icon/font documents to NOTICE files.  Issues and Pull requests\n5.0.0-beta2 UI -\u0026gt; Collector GraphQL query protocol  Add order and status in trace query.  Agent Changes  Add SOFA plugin. Add witness class for Kafka plugin. Add RuntimeContext in Context. Fix RuntimeContext fail in Tomcat plugin. Fix incompatible for getPropertyDescriptors in Spring core. Fix spymemcached plugin bug. Fix database URL parser bug. Fix StringIndexOutOfBoundsException when mysql jdbc url without databaseName。 Fix duplicate slash in Spring MVC plugin bug. Fix namespace bug. Fix NPE in Okhttp plugin when connect failed. FIx MalformedURLException in httpClientComponent plugin. Remove unused dependencies in Dubbo plugin. Remove gRPC timeout to avoid out of memory leak. Rewrite Async http client plugin. [Incubating] Add trace custom ignore optional plugin.  Collector Changes  Topology query optimization for more than 100 apps. Error rate alarm is not triggered. Tolerate unsupported segments. Support Integer Array, Long Array, String Array, Double Array in streaming data model. Support multiple entry span and multiple service name in one segment durtaion record. Use BulkProcessor to control the linear writing of data by multiple threads. Determine the log is enabled for the DEBUG level before printing message. Add static modifier to Logger. Add AspNet component. Filter inactive service in query. Support to query service based on Application. Fix RemoteDataMappingIdNotFoundException Exclude component-libaries.xml file in collector-*.jar, make sure it is in /conf only. Separate a single TTL in minute to in minute, hour, day, month metric and trace. Add order and status in trace query. Add folder lock to buffer folder. Modify operationName search from match to match_phrase. [Incubating] Add Zipkin span receiver. Support analysis Zipkin v1/v2 formats. [Incubating] Support sharding-sphere as storage implementor.  UI Changes  Support login and access control. Add new webapp.yml configuration file. Modify webapp startup script. Link to trace query from Thermodynamic graph Add application selector in service view. Add order and status in trace query.  Documentation  Add architecture design doc. Reformat deploy document. Adjust Tomcat deploy document. Remove all Apache licenses files in dist release packages. Update user cases. Update UI licenses. Add incubating sections in doc.  Issues and Pull requests\n5.0.0-beta UI -\u0026gt; Collector GraphQL query protocol  Replace all tps to throughput/cpm(calls per min) Add getThermodynamic service Update version to beta  Agent Changes  Support TLS. Support namespace. Support direct link. Support token. Add across thread toolkit. Add new plugin extend machenism to override agent core implementations. Fix an agent start up sequence bug. Fix wrong gc count. Remove system env override. Add Spring AOP aspect patch to avoid aop conflicts.  Collector Changes  Trace query based on timeline. Delete JVM aggregation in second. Support TLS. Support namespace. Support token auth. Group and aggregate requests based on response time and timeline, support Thermodynamic chart query Support component librariy setting through yml file for better extendibility. Optimize performance. Support short column name in ES or other storage implementor. Add a new cache module implementor, based on Caffeine. Support system property override settings. Refactor settings initialization. Provide collector instrumentation agent. Support .NET core component libraries. Fix divide zero in query. Fix Data don't remove as expected in ES implementor. Add some checks in collector modulization core. Add some test cases.  UI Changes  New trace query UI. New Application UI, merge server tab(removed) into application as sub page. New Topology UI. New response time / throughput TopN list. Add Thermodynamic chart in overview page. Change all tps to cpm(calls per minutes). Fix wrong osName in server view. Fix wrong startTime in trace view. Fix some icons internet requirements.  Documentation  Add TLS document. Add namespace document. Add direct link document. Add token document. Add across thread toolkit document. Add a FAQ about, Agent or collector version upgrade. Sync all English document to Chinese.  Issues and Pull requests\n5.0.0-alpha Agent -\u0026gt; Collector protocol  Remove C++ keywords Move Ref into Span from Segment Add span type, when register an operation  UI -\u0026gt; Collector GraphQL query protocol  First version protocol  Agent Changes  Support gRPC 1.x plugin Support kafka 0.11 and 1.x plugin Support ServiceComb 0.x plugin Support optional plugin mechanism. Support Spring 3.x and 4.x bean annotation optional plugin Support Apache httpcomponent AsyncClient 4.x plugin Provide automatic agent daily tests, and release reports here. Refactor Postgresql, Oracle, MySQL plugin for compatible. Fix jetty client 9 plugin error Fix async APIs of okhttp plugin error Fix log config didn\u0026rsquo;t work Fix a class loader error in okhttp plugin  Collector Changes  Support metrics analysis and aggregation for application, application instance and service in minute, hour, day and month. Support new GraphQL query protocol Support alarm Provide a prototype instrument for collector. Support node speculate in cluster and application topology. (Provider Node -\u0026gt; Consumer Node) -\u0026gt; (Provider Node -\u0026gt; MQ Server -\u0026gt; Consumer Node)  UI Changes  New 5.0.0 UI!!!  Issues and Pull requests\n","title":"5.1.0","url":"/docs/main/v9.3.0/en/changes/changes-5.x/"},{"content":"5.1.0 Agent Changes  Fix spring inherit issue in another way Fix classloader dead lock in jdk7+ - 5.x Support Spring mvc 5.x Support Spring webflux 5.x  Collector Changes  Fix too many open files. Fix the buffer file cannot delete.  5.0.0-GA Agent Changes  Add several package names ignore in agent settings. Classes in these packages would be enhanced, even plugin declared. Support Undertow 2.x plugin. Fix wrong class names of Motan plugin, not a feature related issue, just naming.  Collector Changes  Make buffer file handler close more safety. Fix NPE in AlarmService  Documentation  Fix compiling doc link. Update new live demo address.  5.0.0-RC2 Agent Changes  Support ActiveMQ 5.x Support RuntimeContext used out of TracingContext. Support Oracle ojdbc8 Plugin. Support ElasticSearch client transport 5.2-5.6 Plugin Support using agent.config with given path through system properties. Add a new way to transmit the Request and Response, to avoid bugs in Hytrix scenarios. Fix HTTPComponent client v4 operation name is empty. Fix 2 possible NPEs in Spring plugin. Fix a possible span leak in SpringMVC plugin. Fix NPE in Spring callback plugin.  Collector Changes  Add GZip support for Zipkin receiver. Add new component IDs for nodejs. Fix Zipkin span receiver may miss data in request. Optimize codes in heatmap calculation. Reduce unnecessary divide. Fix NPE in Alarm content generation. Fix the precision lost in ServiceNameService#startTimeMillis. Fix GC count is 0. Fix topology breaks when RPC client uses the async thread call.  UI Changes  Fix UI port can\u0026rsquo;t be set by startup script in Windows. Fix Topology self link error. Fix stack color mismatch label color in gc time chart.  Documentation  Add users list. Fix several document typo. Sync the Chinese documents. Add OpenAPM badge. Add icon/font documents to NOTICE files.  Issues and Pull requests\n5.0.0-beta2 UI -\u0026gt; Collector GraphQL query protocol  Add order and status in trace query.  Agent Changes  Add SOFA plugin. Add witness class for Kafka plugin. Add RuntimeContext in Context. Fix RuntimeContext fail in Tomcat plugin. Fix incompatible for getPropertyDescriptors in Spring core. Fix spymemcached plugin bug. Fix database URL parser bug. Fix StringIndexOutOfBoundsException when mysql jdbc url without databaseName。 Fix duplicate slash in Spring MVC plugin bug. Fix namespace bug. Fix NPE in Okhttp plugin when connect failed. FIx MalformedURLException in httpClientComponent plugin. Remove unused dependencies in Dubbo plugin. Remove gRPC timeout to avoid out of memory leak. Rewrite Async http client plugin. [Incubating] Add trace custom ignore optional plugin.  Collector Changes  Topology query optimization for more than 100 apps. Error rate alarm is not triggered. Tolerate unsupported segments. Support Integer Array, Long Array, String Array, Double Array in streaming data model. Support multiple entry span and multiple service name in one segment durtaion record. Use BulkProcessor to control the linear writing of data by multiple threads. Determine the log is enabled for the DEBUG level before printing message. Add static modifier to Logger. Add AspNet component. Filter inactive service in query. Support to query service based on Application. Fix RemoteDataMappingIdNotFoundException Exclude component-libaries.xml file in collector-*.jar, make sure it is in /conf only. Separate a single TTL in minute to in minute, hour, day, month metric and trace. Add order and status in trace query. Add folder lock to buffer folder. Modify operationName search from match to match_phrase. [Incubating] Add Zipkin span receiver. Support analysis Zipkin v1/v2 formats. [Incubating] Support sharding-sphere as storage implementor.  UI Changes  Support login and access control. Add new webapp.yml configuration file. Modify webapp startup script. Link to trace query from Thermodynamic graph Add application selector in service view. Add order and status in trace query.  Documentation  Add architecture design doc. Reformat deploy document. Adjust Tomcat deploy document. Remove all Apache licenses files in dist release packages. Update user cases. Update UI licenses. Add incubating sections in doc.  Issues and Pull requests\n5.0.0-beta UI -\u0026gt; Collector GraphQL query protocol  Replace all tps to throughput/cpm(calls per min) Add getThermodynamic service Update version to beta  Agent Changes  Support TLS. Support namespace. Support direct link. Support token. Add across thread toolkit. Add new plugin extend machenism to override agent core implementations. Fix an agent start up sequence bug. Fix wrong gc count. Remove system env override. Add Spring AOP aspect patch to avoid aop conflicts.  Collector Changes  Trace query based on timeline. Delete JVM aggregation in second. Support TLS. Support namespace. Support token auth. Group and aggregate requests based on response time and timeline, support Thermodynamic chart query Support component librariy setting through yml file for better extendibility. Optimize performance. Support short column name in ES or other storage implementor. Add a new cache module implementor, based on Caffeine. Support system property override settings. Refactor settings initialization. Provide collector instrumentation agent. Support .NET core component libraries. Fix divide zero in query. Fix Data don't remove as expected in ES implementor. Add some checks in collector modulization core. Add some test cases.  UI Changes  New trace query UI. New Application UI, merge server tab(removed) into application as sub page. New Topology UI. New response time / throughput TopN list. Add Thermodynamic chart in overview page. Change all tps to cpm(calls per minutes). Fix wrong osName in server view. Fix wrong startTime in trace view. Fix some icons internet requirements.  Documentation  Add TLS document. Add namespace document. Add direct link document. Add token document. Add across thread toolkit document. Add a FAQ about, Agent or collector version upgrade. Sync all English document to Chinese.  Issues and Pull requests\n5.0.0-alpha Agent -\u0026gt; Collector protocol  Remove C++ keywords Move Ref into Span from Segment Add span type, when register an operation  UI -\u0026gt; Collector GraphQL query protocol  First version protocol  Agent Changes  Support gRPC 1.x plugin Support kafka 0.11 and 1.x plugin Support ServiceComb 0.x plugin Support optional plugin mechanism. Support Spring 3.x and 4.x bean annotation optional plugin Support Apache httpcomponent AsyncClient 4.x plugin Provide automatic agent daily tests, and release reports here. Refactor Postgresql, Oracle, MySQL plugin for compatible. Fix jetty client 9 plugin error Fix async APIs of okhttp plugin error Fix log config didn\u0026rsquo;t work Fix a class loader error in okhttp plugin  Collector Changes  Support metrics analysis and aggregation for application, application instance and service in minute, hour, day and month. Support new GraphQL query protocol Support alarm Provide a prototype instrument for collector. Support node speculate in cluster and application topology. (Provider Node -\u0026gt; Consumer Node) -\u0026gt; (Provider Node -\u0026gt; MQ Server -\u0026gt; Consumer Node)  UI Changes  New 5.0.0 UI!!!  Issues and Pull requests\n","title":"5.1.0","url":"/docs/main/v9.4.0/en/changes/changes-5.x/"},{"content":"5.1.0 Agent Changes  Fix spring inherit issue in another way Fix classloader dead lock in jdk7+ - 5.x Support Spring mvc 5.x Support Spring webflux 5.x  Collector Changes  Fix too many open files. Fix the buffer file cannot delete.  5.0.0-GA Agent Changes  Add several package names ignore in agent settings. Classes in these packages would be enhanced, even plugin declared. Support Undertow 2.x plugin. Fix wrong class names of Motan plugin, not a feature related issue, just naming.  Collector Changes  Make buffer file handler close more safety. Fix NPE in AlarmService  Documentation  Fix compiling doc link. Update new live demo address.  5.0.0-RC2 Agent Changes  Support ActiveMQ 5.x Support RuntimeContext used out of TracingContext. Support Oracle ojdbc8 Plugin. Support ElasticSearch client transport 5.2-5.6 Plugin Support using agent.config with given path through system properties. Add a new way to transmit the Request and Response, to avoid bugs in Hytrix scenarios. Fix HTTPComponent client v4 operation name is empty. Fix 2 possible NPEs in Spring plugin. Fix a possible span leak in SpringMVC plugin. Fix NPE in Spring callback plugin.  Collector Changes  Add GZip support for Zipkin receiver. Add new component IDs for nodejs. Fix Zipkin span receiver may miss data in request. Optimize codes in heatmap calculation. Reduce unnecessary divide. Fix NPE in Alarm content generation. Fix the precision lost in ServiceNameService#startTimeMillis. Fix GC count is 0. Fix topology breaks when RPC client uses the async thread call.  UI Changes  Fix UI port can\u0026rsquo;t be set by startup script in Windows. Fix Topology self link error. Fix stack color mismatch label color in gc time chart.  Documentation  Add users list. Fix several document typo. Sync the Chinese documents. Add OpenAPM badge. Add icon/font documents to NOTICE files.  Issues and Pull requests\n5.0.0-beta2 UI -\u0026gt; Collector GraphQL query protocol  Add order and status in trace query.  Agent Changes  Add SOFA plugin. Add witness class for Kafka plugin. Add RuntimeContext in Context. Fix RuntimeContext fail in Tomcat plugin. Fix incompatible for getPropertyDescriptors in Spring core. Fix spymemcached plugin bug. Fix database URL parser bug. Fix StringIndexOutOfBoundsException when mysql jdbc url without databaseName。 Fix duplicate slash in Spring MVC plugin bug. Fix namespace bug. Fix NPE in Okhttp plugin when connect failed. FIx MalformedURLException in httpClientComponent plugin. Remove unused dependencies in Dubbo plugin. Remove gRPC timeout to avoid out of memory leak. Rewrite Async http client plugin. [Incubating] Add trace custom ignore optional plugin.  Collector Changes  Topology query optimization for more than 100 apps. Error rate alarm is not triggered. Tolerate unsupported segments. Support Integer Array, Long Array, String Array, Double Array in streaming data model. Support multiple entry span and multiple service name in one segment durtaion record. Use BulkProcessor to control the linear writing of data by multiple threads. Determine the log is enabled for the DEBUG level before printing message. Add static modifier to Logger. Add AspNet component. Filter inactive service in query. Support to query service based on Application. Fix RemoteDataMappingIdNotFoundException Exclude component-libaries.xml file in collector-*.jar, make sure it is in /conf only. Separate a single TTL in minute to in minute, hour, day, month metric and trace. Add order and status in trace query. Add folder lock to buffer folder. Modify operationName search from match to match_phrase. [Incubating] Add Zipkin span receiver. Support analysis Zipkin v1/v2 formats. [Incubating] Support sharding-sphere as storage implementor.  UI Changes  Support login and access control. Add new webapp.yml configuration file. Modify webapp startup script. Link to trace query from Thermodynamic graph Add application selector in service view. Add order and status in trace query.  Documentation  Add architecture design doc. Reformat deploy document. Adjust Tomcat deploy document. Remove all Apache licenses files in dist release packages. Update user cases. Update UI licenses. Add incubating sections in doc.  Issues and Pull requests\n5.0.0-beta UI -\u0026gt; Collector GraphQL query protocol  Replace all tps to throughput/cpm(calls per min) Add getThermodynamic service Update version to beta  Agent Changes  Support TLS. Support namespace. Support direct link. Support token. Add across thread toolkit. Add new plugin extend machenism to override agent core implementations. Fix an agent start up sequence bug. Fix wrong gc count. Remove system env override. Add Spring AOP aspect patch to avoid aop conflicts.  Collector Changes  Trace query based on timeline. Delete JVM aggregation in second. Support TLS. Support namespace. Support token auth. Group and aggregate requests based on response time and timeline, support Thermodynamic chart query Support component librariy setting through yml file for better extendibility. Optimize performance. Support short column name in ES or other storage implementor. Add a new cache module implementor, based on Caffeine. Support system property override settings. Refactor settings initialization. Provide collector instrumentation agent. Support .NET core component libraries. Fix divide zero in query. Fix Data don't remove as expected in ES implementor. Add some checks in collector modulization core. Add some test cases.  UI Changes  New trace query UI. New Application UI, merge server tab(removed) into application as sub page. New Topology UI. New response time / throughput TopN list. Add Thermodynamic chart in overview page. Change all tps to cpm(calls per minutes). Fix wrong osName in server view. Fix wrong startTime in trace view. Fix some icons internet requirements.  Documentation  Add TLS document. Add namespace document. Add direct link document. Add token document. Add across thread toolkit document. Add a FAQ about, Agent or collector version upgrade. Sync all English document to Chinese.  Issues and Pull requests\n5.0.0-alpha Agent -\u0026gt; Collector protocol  Remove C++ keywords Move Ref into Span from Segment Add span type, when register an operation  UI -\u0026gt; Collector GraphQL query protocol  First version protocol  Agent Changes  Support gRPC 1.x plugin Support kafka 0.11 and 1.x plugin Support ServiceComb 0.x plugin Support optional plugin mechanism. Support Spring 3.x and 4.x bean annotation optional plugin Support Apache httpcomponent AsyncClient 4.x plugin Provide automatic agent daily tests, and release reports here. Refactor Postgresql, Oracle, MySQL plugin for compatible. Fix jetty client 9 plugin error Fix async APIs of okhttp plugin error Fix log config didn\u0026rsquo;t work Fix a class loader error in okhttp plugin  Collector Changes  Support metrics analysis and aggregation for application, application instance and service in minute, hour, day and month. Support new GraphQL query protocol Support alarm Provide a prototype instrument for collector. Support node speculate in cluster and application topology. (Provider Node -\u0026gt; Consumer Node) -\u0026gt; (Provider Node -\u0026gt; MQ Server -\u0026gt; Consumer Node)  UI Changes  New 5.0.0 UI!!!  Issues and Pull requests\n","title":"5.1.0","url":"/docs/main/v9.5.0/en/changes/changes-5.x/"},{"content":"5.1.0 Agent Changes  Fix spring inherit issue in another way Fix classloader dead lock in jdk7+ - 5.x Support Spring mvc 5.x Support Spring webflux 5.x  Collector Changes  Fix too many open files. Fix the buffer file cannot delete.  5.0.0-GA Agent Changes  Add several package names ignore in agent settings. Classes in these packages would be enhanced, even plugin declared. Support Undertow 2.x plugin. Fix wrong class names of Motan plugin, not a feature related issue, just naming.  Collector Changes  Make buffer file handler close more safety. Fix NPE in AlarmService  Documentation  Fix compiling doc link. Update new live demo address.  5.0.0-RC2 Agent Changes  Support ActiveMQ 5.x Support RuntimeContext used out of TracingContext. Support Oracle ojdbc8 Plugin. Support ElasticSearch client transport 5.2-5.6 Plugin Support using agent.config with given path through system properties. Add a new way to transmit the Request and Response, to avoid bugs in Hytrix scenarios. Fix HTTPComponent client v4 operation name is empty. Fix 2 possible NPEs in Spring plugin. Fix a possible span leak in SpringMVC plugin. Fix NPE in Spring callback plugin.  Collector Changes  Add GZip support for Zipkin receiver. Add new component IDs for nodejs. Fix Zipkin span receiver may miss data in request. Optimize codes in heatmap calculation. Reduce unnecessary divide. Fix NPE in Alarm content generation. Fix the precision lost in ServiceNameService#startTimeMillis. Fix GC count is 0. Fix topology breaks when RPC client uses the async thread call.  UI Changes  Fix UI port can\u0026rsquo;t be set by startup script in Windows. Fix Topology self link error. Fix stack color mismatch label color in gc time chart.  Documentation  Add users list. Fix several document typo. Sync the Chinese documents. Add OpenAPM badge. Add icon/font documents to NOTICE files.  Issues and Pull requests\n5.0.0-beta2 UI -\u0026gt; Collector GraphQL query protocol  Add order and status in trace query.  Agent Changes  Add SOFA plugin. Add witness class for Kafka plugin. Add RuntimeContext in Context. Fix RuntimeContext fail in Tomcat plugin. Fix incompatible for getPropertyDescriptors in Spring core. Fix spymemcached plugin bug. Fix database URL parser bug. Fix StringIndexOutOfBoundsException when mysql jdbc url without databaseName。 Fix duplicate slash in Spring MVC plugin bug. Fix namespace bug. Fix NPE in Okhttp plugin when connect failed. FIx MalformedURLException in httpClientComponent plugin. Remove unused dependencies in Dubbo plugin. Remove gRPC timeout to avoid out of memory leak. Rewrite Async http client plugin. [Incubating] Add trace custom ignore optional plugin.  Collector Changes  Topology query optimization for more than 100 apps. Error rate alarm is not triggered. Tolerate unsupported segments. Support Integer Array, Long Array, String Array, Double Array in streaming data model. Support multiple entry span and multiple service name in one segment durtaion record. Use BulkProcessor to control the linear writing of data by multiple threads. Determine the log is enabled for the DEBUG level before printing message. Add static modifier to Logger. Add AspNet component. Filter inactive service in query. Support to query service based on Application. Fix RemoteDataMappingIdNotFoundException Exclude component-libaries.xml file in collector-*.jar, make sure it is in /conf only. Separate a single TTL in minute to in minute, hour, day, month metric and trace. Add order and status in trace query. Add folder lock to buffer folder. Modify operationName search from match to match_phrase. [Incubating] Add Zipkin span receiver. Support analysis Zipkin v1/v2 formats. [Incubating] Support sharding-sphere as storage implementor.  UI Changes  Support login and access control. Add new webapp.yml configuration file. Modify webapp startup script. Link to trace query from Thermodynamic graph Add application selector in service view. Add order and status in trace query.  Documentation  Add architecture design doc. Reformat deploy document. Adjust Tomcat deploy document. Remove all Apache licenses files in dist release packages. Update user cases. Update UI licenses. Add incubating sections in doc.  Issues and Pull requests\n5.0.0-beta UI -\u0026gt; Collector GraphQL query protocol  Replace all tps to throughput/cpm(calls per min) Add getThermodynamic service Update version to beta  Agent Changes  Support TLS. Support namespace. Support direct link. Support token. Add across thread toolkit. Add new plugin extend machenism to override agent core implementations. Fix an agent start up sequence bug. Fix wrong gc count. Remove system env override. Add Spring AOP aspect patch to avoid aop conflicts.  Collector Changes  Trace query based on timeline. Delete JVM aggregation in second. Support TLS. Support namespace. Support token auth. Group and aggregate requests based on response time and timeline, support Thermodynamic chart query Support component librariy setting through yml file for better extendibility. Optimize performance. Support short column name in ES or other storage implementor. Add a new cache module implementor, based on Caffeine. Support system property override settings. Refactor settings initialization. Provide collector instrumentation agent. Support .NET core component libraries. Fix divide zero in query. Fix Data don't remove as expected in ES implementor. Add some checks in collector modulization core. Add some test cases.  UI Changes  New trace query UI. New Application UI, merge server tab(removed) into application as sub page. New Topology UI. New response time / throughput TopN list. Add Thermodynamic chart in overview page. Change all tps to cpm(calls per minutes). Fix wrong osName in server view. Fix wrong startTime in trace view. Fix some icons internet requirements.  Documentation  Add TLS document. Add namespace document. Add direct link document. Add token document. Add across thread toolkit document. Add a FAQ about, Agent or collector version upgrade. Sync all English document to Chinese.  Issues and Pull requests\n5.0.0-alpha Agent -\u0026gt; Collector protocol  Remove C++ keywords Move Ref into Span from Segment Add span type, when register an operation  UI -\u0026gt; Collector GraphQL query protocol  First version protocol  Agent Changes  Support gRPC 1.x plugin Support kafka 0.11 and 1.x plugin Support ServiceComb 0.x plugin Support optional plugin mechanism. Support Spring 3.x and 4.x bean annotation optional plugin Support Apache httpcomponent AsyncClient 4.x plugin Provide automatic agent daily tests, and release reports here. Refactor Postgresql, Oracle, MySQL plugin for compatible. Fix jetty client 9 plugin error Fix async APIs of okhttp plugin error Fix log config didn\u0026rsquo;t work Fix a class loader error in okhttp plugin  Collector Changes  Support metrics analysis and aggregation for application, application instance and service in minute, hour, day and month. Support new GraphQL query protocol Support alarm Provide a prototype instrument for collector. Support node speculate in cluster and application topology. (Provider Node -\u0026gt; Consumer Node) -\u0026gt; (Provider Node -\u0026gt; MQ Server -\u0026gt; Consumer Node)  UI Changes  New 5.0.0 UI!!!  Issues and Pull requests\n","title":"5.1.0","url":"/docs/main/v9.6.0/en/changes/changes-5.x/"},{"content":"5.1.0 Agent Changes  Fix spring inherit issue in another way Fix classloader dead lock in jdk7+ - 5.x Support Spring mvc 5.x Support Spring webflux 5.x  Collector Changes  Fix too many open files. Fix the buffer file cannot delete.  5.0.0-GA Agent Changes  Add several package names ignore in agent settings. Classes in these packages would be enhanced, even plugin declared. Support Undertow 2.x plugin. Fix wrong class names of Motan plugin, not a feature related issue, just naming.  Collector Changes  Make buffer file handler close more safety. Fix NPE in AlarmService  Documentation  Fix compiling doc link. Update new live demo address.  5.0.0-RC2 Agent Changes  Support ActiveMQ 5.x Support RuntimeContext used out of TracingContext. Support Oracle ojdbc8 Plugin. Support ElasticSearch client transport 5.2-5.6 Plugin Support using agent.config with given path through system properties. Add a new way to transmit the Request and Response, to avoid bugs in Hytrix scenarios. Fix HTTPComponent client v4 operation name is empty. Fix 2 possible NPEs in Spring plugin. Fix a possible span leak in SpringMVC plugin. Fix NPE in Spring callback plugin.  Collector Changes  Add GZip support for Zipkin receiver. Add new component IDs for nodejs. Fix Zipkin span receiver may miss data in request. Optimize codes in heatmap calculation. Reduce unnecessary divide. Fix NPE in Alarm content generation. Fix the precision lost in ServiceNameService#startTimeMillis. Fix GC count is 0. Fix topology breaks when RPC client uses the async thread call.  UI Changes  Fix UI port can\u0026rsquo;t be set by startup script in Windows. Fix Topology self link error. Fix stack color mismatch label color in gc time chart.  Documentation  Add users list. Fix several document typo. Sync the Chinese documents. Add OpenAPM badge. Add icon/font documents to NOTICE files.  Issues and Pull requests\n5.0.0-beta2 UI -\u0026gt; Collector GraphQL query protocol  Add order and status in trace query.  Agent Changes  Add SOFA plugin. Add witness class for Kafka plugin. Add RuntimeContext in Context. Fix RuntimeContext fail in Tomcat plugin. Fix incompatible for getPropertyDescriptors in Spring core. Fix spymemcached plugin bug. Fix database URL parser bug. Fix StringIndexOutOfBoundsException when mysql jdbc url without databaseName。 Fix duplicate slash in Spring MVC plugin bug. Fix namespace bug. Fix NPE in Okhttp plugin when connect failed. FIx MalformedURLException in httpClientComponent plugin. Remove unused dependencies in Dubbo plugin. Remove gRPC timeout to avoid out of memory leak. Rewrite Async http client plugin. [Incubating] Add trace custom ignore optional plugin.  Collector Changes  Topology query optimization for more than 100 apps. Error rate alarm is not triggered. Tolerate unsupported segments. Support Integer Array, Long Array, String Array, Double Array in streaming data model. Support multiple entry span and multiple service name in one segment durtaion record. Use BulkProcessor to control the linear writing of data by multiple threads. Determine the log is enabled for the DEBUG level before printing message. Add static modifier to Logger. Add AspNet component. Filter inactive service in query. Support to query service based on Application. Fix RemoteDataMappingIdNotFoundException Exclude component-libaries.xml file in collector-*.jar, make sure it is in /conf only. Separate a single TTL in minute to in minute, hour, day, month metric and trace. Add order and status in trace query. Add folder lock to buffer folder. Modify operationName search from match to match_phrase. [Incubating] Add Zipkin span receiver. Support analysis Zipkin v1/v2 formats. [Incubating] Support sharding-sphere as storage implementor.  UI Changes  Support login and access control. Add new webapp.yml configuration file. Modify webapp startup script. Link to trace query from Thermodynamic graph Add application selector in service view. Add order and status in trace query.  Documentation  Add architecture design doc. Reformat deploy document. Adjust Tomcat deploy document. Remove all Apache licenses files in dist release packages. Update user cases. Update UI licenses. Add incubating sections in doc.  Issues and Pull requests\n5.0.0-beta UI -\u0026gt; Collector GraphQL query protocol  Replace all tps to throughput/cpm(calls per min) Add getThermodynamic service Update version to beta  Agent Changes  Support TLS. Support namespace. Support direct link. Support token. Add across thread toolkit. Add new plugin extend machenism to override agent core implementations. Fix an agent start up sequence bug. Fix wrong gc count. Remove system env override. Add Spring AOP aspect patch to avoid aop conflicts.  Collector Changes  Trace query based on timeline. Delete JVM aggregation in second. Support TLS. Support namespace. Support token auth. Group and aggregate requests based on response time and timeline, support Thermodynamic chart query Support component librariy setting through yml file for better extendibility. Optimize performance. Support short column name in ES or other storage implementor. Add a new cache module implementor, based on Caffeine. Support system property override settings. Refactor settings initialization. Provide collector instrumentation agent. Support .NET core component libraries. Fix divide zero in query. Fix Data don't remove as expected in ES implementor. Add some checks in collector modulization core. Add some test cases.  UI Changes  New trace query UI. New Application UI, merge server tab(removed) into application as sub page. New Topology UI. New response time / throughput TopN list. Add Thermodynamic chart in overview page. Change all tps to cpm(calls per minutes). Fix wrong osName in server view. Fix wrong startTime in trace view. Fix some icons internet requirements.  Documentation  Add TLS document. Add namespace document. Add direct link document. Add token document. Add across thread toolkit document. Add a FAQ about, Agent or collector version upgrade. Sync all English document to Chinese.  Issues and Pull requests\n5.0.0-alpha Agent -\u0026gt; Collector protocol  Remove C++ keywords Move Ref into Span from Segment Add span type, when register an operation  UI -\u0026gt; Collector GraphQL query protocol  First version protocol  Agent Changes  Support gRPC 1.x plugin Support kafka 0.11 and 1.x plugin Support ServiceComb 0.x plugin Support optional plugin mechanism. Support Spring 3.x and 4.x bean annotation optional plugin Support Apache httpcomponent AsyncClient 4.x plugin Provide automatic agent daily tests, and release reports here. Refactor Postgresql, Oracle, MySQL plugin for compatible. Fix jetty client 9 plugin error Fix async APIs of okhttp plugin error Fix log config didn\u0026rsquo;t work Fix a class loader error in okhttp plugin  Collector Changes  Support metrics analysis and aggregation for application, application instance and service in minute, hour, day and month. Support new GraphQL query protocol Support alarm Provide a prototype instrument for collector. Support node speculate in cluster and application topology. (Provider Node -\u0026gt; Consumer Node) -\u0026gt; (Provider Node -\u0026gt; MQ Server -\u0026gt; Consumer Node)  UI Changes  New 5.0.0 UI!!!  Issues and Pull requests\n","title":"5.1.0","url":"/docs/main/v9.7.0/en/changes/changes-5.x/"},{"content":"6.6.0 Project  [IMPORTANT] Local span and exit span are not treated as endpoint detected at client and local. Only entry span is the endpoint. Reduce the load of register and memory cost.   Support MiniKube, Istio and SkyWalking on K8s deployment in CI. Support Windows and MacOS build in GitHub Action CI. Support ElasticSearch 7 in official dist. Hundreds plugin cases have been added in GitHub Action CI process.  Java Agent  Remove the local/exit span operation name register mechanism. Add plugin for JDK Threading classes. Add plugin for Armeria. Support set operation name in async span. Enhance webflux plugin, related to Spring Gateway plugin. Webflux plugin is in optional, due to JDK8 required. Fix a possible deadlock. Fix NPE when OAL scripts are different in different OAP nodes, mostly in upgrading stage. Fix bug about wrong peer in ES plugin. Fix NPE in Spring plugin. Fix wrong class name in Dubbo 2.7 conflict patch. Fix spring annotation inheritance problem.  OAP-Backend  Remove the local/exit span operation name register mechanism. Remove client side endpoint register in service mesh. Service instance dependency and related metrics. Support min func in OAL Support apdex func in OAL Support custom ES config setting at the index level. Envoy ALS proto upgraded. Update JODA lib as bugs in UTC +13/+14. Support topN sample period configurable. Ignore no statement DB operations in slow SQL collection. Fix bug in docker-entrypoint.sh when using MySQL as storage  UI  Service topology enhancement. Dive into service, instance and endpoint metrics on topo map. Service instance dependency view and related metrics. Support using URL parameter in trace query page. Support apdex score in service page. Add service dependency metrics into metrics comparison. Fix alarm search not working.  Document  Update user list and user wall. Add document link for CLI. Add deployment guide of agent in Jetty case. Modify Consul cluster doc. Add document about injecting traceId into the logback with logstack in JSON format. ElementUI license and dependency added.  All issues and pull requests are here\n6.5.0 Project  TTL E2E test (#3437) Test coverage is back in pull request check status (#3503) Plugin tests begin to be migrated into main repo, and is in process. (#3528, #3756, #3751, etc.) Switch to SkyWalking CI (exclusive) nodes (#3546) MySQL storage e2e test. (#3648) E2E tests are verified in multiple jdk versions, jdk 8, 9, 11, 12 (#3657) Jenkins build jobs run only when necessary (#3662)  OAP-Backend  Support dynamically configure alarm settings (#3557) Language of instance could be null (#3485) Make query max window size configurable. (#3765) Remove two max size 500 limit. (#3748) Parameterize the cache size. (#3741) ServiceInstanceRelation set error id (#3683) Makes the scope of alarm message more semantic. (#3680) Add register persistent worker latency metrics (#3677) Fix more reasonable error (#3619) Add GraphQL getServiceInstance instanceUuid field. (#3595) Support namespace in Nacos cluster/configuration (#3578) Instead of datasource-settings.properties, use application.yml for MySQLStorageProvider (#3564) Provide consul dynamic configuration center implementation (#3560) Upgrade guava version to support higher jdk version (#3541) Sync latest als from envoy api (#3507) Set telemetry instanced id for Etcd and Nacos plugin (#3492) Support timeout configuration in agent and backend. (#3491) Make sure the cluster register happens before streaming process. (#3471) Agent supports custom properties. (#3367) Miscellaneous bug fixes (#3567)  UI  Feature: node detail display in topo circle-chart view. BugFix: the jvm-maxheap \u0026amp; jvm-maxnonheap is -1, free is no value Fix bug: time select operation not in effect Fix bug: language initialization failed Fix bug: not show instance language Feature: support the trace list display export png Feature: Metrics comparison view BugFix: Fix dashboard top throughput copy  Java Agent  Spring async scenario optimize (#3723) Support log4j2 AsyncLogger (#3715) Add config to collect PostgreSQL sql query params (#3695) Support namespace in Nacos cluster/configuration (#3578) Provide plugin for ehcache 2.x (#3575) Supporting RequestRateLimiterGatewayFilterFactory (#3538) Kafka-plugin compatible with KafkaTemplate (#3505) Add pulsar apm plugin (#3476) Spring-cloud-gateway traceId does not transmit #3411 (#3446) Gateway compatible with downstream loss (#3445) Provide cassandra java driver 3.x plugin (#3410) Fix SpringMVC4 NoSuchMethodError (#3408) BugFix: endpoint grouping rules may be not unique (#3510) Add feature to control the maximum agent log files (#3475) Agent support custom properties. (#3367) Add Light4j plugin (#3323)  Document  Remove travis badge (#3763) Replace user wall to typical users in readme page (#3719) Update istio docs according latest istio release (#3646) Use chart deploy sw docs (#3573) Reorganize the doc, and provide catalog (#3563) Committer vote and set up document. (#3496) Update als setup doc as istio 1.3 released (#3470) Fill faq reply in official document. (#3450)  All issues and pull requests are here\n6.4.0 Project  Highly recommend to upgrade due to Pxx metrics calculation bug. Make agent working in JDK9+ Module system.  Java Agent  Make agent working in JDK9+ Module system. Support Kafka 2.x client libs. Log error in OKHTTP OnFailure callback. Support injecting traceid into logstack appender in logback. Add OperationName(including endpoint name) length max threshold. Support using Regex to group operation name. Support Undertow routing handler. RestTemplate plugin support operation name grouping. Fix ClassCastException in Webflux plugin. Ordering zookeeper server list, to make it better in topology. Fix a Dubbo plugin incompatible issue. Fix MySQL 5 plugin issue. Make log writer cached. Optimize Spring Cloud Gateway plugin Fix and improve gRPC reconnect mechanism. Remove Disruptor dependency from agent.  Backend  Fix Pxx(p50,p75,p90,p95,p99) metrics func bug.(Critical) Support Gateway in backend analysis, even when it doesn\u0026rsquo;t have suitable language agent. Support using HTTPs SSL accessing ElasticSearch storage. Support Zookeeper ACL. Make alarm records listed in order. Fix Pxx data persistence failure in some cases. Fix some bugs in MySQL storage. Setup slow SQL length threshold. Fix TTL settings is not working as expected. Remove scope-meta file.  UI  Enhance alarm page layout. Support trace tree chart resize. Support trace auto completion when partial traces abandoned somehow. Fix dashboard endpoint slow chart. Add radial chart in topology page. Add trace table mode. Fix topology page bug. Fix calender js bug. Fix \u0026ldquo;The \u0026ldquo;topo-services\u0026rdquo; component did not update the data in time after modifying the time range on the topology page.  Document  Restore the broken Istio setup doc. Add etcd config center document. Correct span_limit_per_segment default value in document. Enhance plugin develop doc. Fix error description in build document.  All issues and pull requests are here\n6.3.0 Project  e2e tests have been added, and verify every pull request. Use ArrayList to replace LinkedList in DataCarrier for much better performance. Add plugin instrumentation definition check in CI. DataCarrier performance improvement by avoiding false-sharing.  Java Agent  Java agent supports JDK 9 - 12, but don\u0026rsquo;t support Java Module yet. Support JVM class auto instrumentation, cataloged as bootstrap plugin. Support JVM HttpClient and HttpsClient plugin.[Optional] Support backend upgrade without rebooting required. Open Redefine and Retransform by other agents. Support Servlet 2.5 in Jetty, Tomcat and SpringMVC plugins. Support Spring @Async plugin. Add new config item to restrict the length of span#peer. Refactor ContextManager#stopSpan. Add gRPC timeout. Support Logback AsyncAppender print tid Fix gRPC reconnect bug. Fix trace segment service doesn\u0026rsquo;t report onComplete. Fix wrong logger class name. Fix gRPC plugin bug. Fix ContextManager.activeSpan() API usage error.  Backend  Support agent reset command downstream when the storage is erased, mostly because of backend upgrade. Backend stream flow refactor. High dimensionality metrics(Hour/Day/Month) are changed to lower priority, to ease the storage payload. Add OAP metrics cache to ease the storage query payload and improve performance. Remove DataCarrier in trace persistent of ElasticSearch storage, by leveraging the elasticsearch bulk queue. OAP internal communication protocol changed. Don\u0026rsquo;t be compatible with old releases. Improve ElasticSearch storage bulk performance. Support etcd as dynamic configuration center. Simplify the PxxMetrics and ThermodynamicMetrics functions for better performance and GC. Support JVM metrics self observability. Add the new OAL runtime engine. Add gRPC timeout. Add Charset in the alarm web hook. Fix buffer lost. Fix dirty read in ElasticSearch storage. Fix bug of cluster management plugins in un-Mixed mode. Fix wrong logger class name. Fix delete bug in ElasticSearch when using namespace. Fix MySQL TTL failure. Totally remove IDs can't be null log, to avoid misleading. Fix provider has been initialized repeatedly. Adjust providers conflict log message. Fix using wrong gc time metrics in OAL.  UI  Fix refresh is not working after endpoint and instance changed. Fix endpoint selector but. Fix wrong copy value in slow traces. Fix can\u0026rsquo;t show trace when it is broken partially(Because of agent sampling or fail safe). Fix database and response time graph bugs.  Document  Add bootstrap plugin development document. Alarm documentation typo fixed. Clarify the Docker file purpose. Fix a license typo.  All issues and pull requests are here\n6.2.0 Project  ElasticSearch implementation performance improved, and CHANGED totally. Must delete all existing indexes to do upgrade. CI and Integration tests provided by ASF INFRA. Plan to enhance tests including e2e, plugin tests in all pull requests, powered by ASF INFRA. DataCarrier queue write index controller performance improvement. 3-5 times quicker than before. Add windows compile support in CI.  Java Agent  Support collect SQL parameter in MySQL plugin.[Optional] Support SolrJ plugin. Support RESTEasy plugin. Support Spring Gateway plugin for 2.1.x[Optional] TracingContext performance improvement. Support Apache ShardingSphere(incubating) plugin. Support span#error in application toolkit. Fix OOM by empty stack of exception. FIx wrong cause exception of stack in span log. Fix unclear the running context in SpringMVC plugin. Fix CPU usage accessor calculation issue. Fix SpringMVC plugin span not stop bug when doing HTTP forward. Fix lettuce plugin async commend bug and NPE. Fix webflux plugin cast exception. [CI]Support import check.  Backend  Support time serious ElasticSearch storage. Provide dynamic configuration module and implementation. Slow SQL threshold supports dynamic config today. Dynamic Configuration module provide multiple implementations, DCS(gRPC based), Zookeeper, Apollo, Nacos. Provide P99/95/90/75/50 charts in topology edge. New topology query protocol and implementation. Support Envoy ALS in Service Mesh scenario. Support Nacos cluster management. Enhance metric exporter. Run in increment and total modes. Fix module provider is loaded repeatedly. Change TOP slow SQL storage in ES to Text from Keyword, as too long text issue. Fix H2TopologyQuery tiny bug. Fix H2 log query bug.(No feature provided yet) Filtering pods not in \u0026lsquo;Running\u0026rsquo; phase in mesh scenario. Fix query alarm bug in MySQL and H2 storage. Codes refactor.  UI  Fix some ID is null query(s). Page refactor, especially time-picker, more friendly. Login removed. Trace timestamp visualization issue fixed. Provide P99/95/90/75/50 charts in topology edge. Change all P99/95/90/75/50 charts style. More readable. Fix 404 in trace page.  Document  Go2Sky project has been donated to SkyAPM, change document link. Add FAQ for ElasticSearch storage, and links from document. Add FAQ fro WebSphere installation. Add several open users. Add alarm webhook document.  All issues and pull requests are here\n6.1.0 Project SkyWalking graduated as Apache Top Level Project.\n Support compiling project agent, backend, UI separately.  Java Agent  Support Vert.x Core 3.x plugin. Support Apache Dubbo plugin. Support use_qualified_name_as_endpoint_name and use_qualified_name_as_operation_name configs in SpringMVC plugin. Support span async close APIs in core. Used in Vert.x plugin. Support MySQL 5,8 plugins. Support set instance id manually(optional). Support customize enhance trace plugin in optional list. Support to set peer in Entry Span. Support Zookeeper plugin. Fix Webflux plugin created unexpected Entry Span. Fix Kafka plugin NPE in Kafka 1.1+ Fix wrong operation name in postgre 8.x plugin. Fix RabbitMQ plugin NPE. Fix agent can\u0026rsquo;t run in JVM 6/7, remove module-info.class. Fix agent can\u0026rsquo;t work well, if there is whitespace in agent path. Fix Spring annotation bug and inheritance enhance issue. Fix CPU accessor bug.  Backend Performance improved, especially in CPU limited environment. 3x improvement in service mesh scenario(no trace) in 8C16G VM. Significantly cost less CPU in low payload.\n Support database metrics and SLOW SQL detection. Support to set max size of metadata query. And change default to 5000 from 100. Support ElasticSearch template for new feature in the future. Support shutdown Zipkin trace analysis, because it doesn\u0026rsquo;t fit production environment. Support log type, scope HTTP_ACCESS_LOG and query. No feature provided, prepare for future versions. Support .NET clr receiver. Support Jaeger trace format, no analysis. Support group endpoint name by regax rules in mesh receiver. Support disable statement in OAL. Support basic auth in ElasticSearch connection. Support metrics exporter module and gRPC implementor. Support \u0026gt;, \u0026lt;, \u0026gt;=, \u0026lt;= in OAL. Support role mode in backend. Support Envoy metrics. Support query segment by service instance. Support to set host/port manually at cluster coordinator, rather than based on core settings. Make sure OAP shutdown when it faces startup error. Support set separated gRPC/Jetty ip:port for receiver, default still use core settings. Fix JVM receiver bug. Fix wrong dest service in mesh analysis. Fix search doesn\u0026rsquo;t work as expected. Refactor ScopeDeclaration annotation. Refactor register lock mechanism. Add SmartSql component for .NET Add integration tests for ElasticSearch client. Add test cases for exporter. Add test cases for queue consume.  UI  RocketBot UI has been accepted and bind in this release. Support CLR metrics.  Document  Documents updated, matching Top Level Project requirement. UI licenses updated, according to RocketBot UI IP clearance. User wall and powered-by list updated. CN documents removed, only consider to provide by volunteer out of Apache.  All issues and pull requests are here\n6.0.0-GA Java Agent  Support gson plugin(optional). Support canal plugin. Fix missing ojdbc component id. Fix dubbo plugin conflict. Fix OpenTracing tag match bug. Fix a missing check in ignore plugin.  Backend  Adjust service inventory entity, to add properties. Adjust service instance inventory entity, to add properties. Add nodeType to service inventory entity. Fix when operation name of local and exit spans in ref, the segment lost. Fix the index names don\u0026rsquo;t show right in logs. Fix wrong alarm text. Add test case for span limit mechanism. Add telemetry module and prometheus implementation, with grafana setting. A refactor for register API in storage module. Fix H2 and MySQL endpoint dependency map miss upstream side. Optimize the inventory register and refactor the implementation. Speed up the trace buffer read. Fix and removed unnecessary inventory register operations.  UI  Add new trace view. Add word-break to tag value.  Document  Add two startup modes document. Add PHP agent links. Add some cn documents. Update year to 2019 User wall updated. Fix a wrong description in how-to-build doc.  All issues and pull requests are here\n6.0.0-beta Protocol  Provide Trace Data Protocol v2 Provide SkyWalking Cross Process Propagation Headers Protocol v2.  Java Agent  Support Trace Data Protocol v2 Support SkyWalking Cross Process Propagation Headers Protocol v2. Support SkyWalking Cross Process Propagation Headers Protocol v1 running in compatible way. Need declare open explicitly. Support SpringMVC 5 Support webflux Support a new way to override agent.config by system env. Span tag can override by explicit way. Fix Spring Controller Inherit issue. Fix ElasticSearch plugin NPE. Fix agent classloader dead lock in certain situation. Fix agent log typo. Fix wrong component id in resettemplete plugin. Fix use transform ignore() in wrong way. Fix H2 query bug.  Backend  Support Trace Data Protocol v2. And Trace Data Protocol v1 is still supported. Support MySQL as storage. Support TiDB as storage. Support a new way to override application.yml by system env. Support service instance and endpoint alarm. Support namespace in istio receiver. Support service throughput(cpm), successful rate(sla), avg response time and p99/p95/p90/p75/p50 response time. Support backend trace sampling. Support Zipkin format again. Support init mode. Support namespace in Zookeeper cluster management. Support consul plugin in cluster module. OAL generate tool has been integrated into main repo, in the maven compile stage. Optimize trace paging query. Fix trace query don\u0026rsquo;t use fuzzy query in ElasticSearch storage. Fix alarm can\u0026rsquo;t be active in right way. Fix unnecessary condition in database and cache number query. Fix wrong namespace bug in ElasticSearch storage. Fix Remote clients selector error: / by zero . Fix segment TTL is not working.  UI  Support service throughput(cpm), successful rate(sla), avg response time and p99/p95/p90/p75/p50 response time. Fix TopN endpoint link doesn\u0026rsquo;t work right. Fix trace stack style. Fix CI.  Document  Add more agent setting documents. Add more contribution documents. Update user wall and powered-by page. Add RocketBot UI project link in document.  All issues and pull requests are here\n6.0.0-alpha SkyWalking 6 is totally new milestone for the project. At this point, we are not just a distributing tracing system with analysis and visualization capabilities. We are an Observability Analysis Platform(OAL).\nThe core and most important features in v6 are\n Support to collect telemetry data from different sources, such as multiple language agents and service mesh. Extensible stream analysis core. Make SQL and cache analysis available in core level, although haven\u0026rsquo;t provided in this release. Provide Observability Analysis Language(OAL) to make analysis metrics customization available. New GraphQL query protocol. Not binding with UI now. UI topology is better now. New alarm core provided. In alpha, only on service related metrics.  All issues and pull requests are here\n","title":"6.6.0","url":"/docs/main/latest/en/changes/changes-6.x/"},{"content":"6.6.0 Project  [IMPORTANT] Local span and exit span are not treated as endpoint detected at client and local. Only entry span is the endpoint. Reduce the load of register and memory cost.   Support MiniKube, Istio and SkyWalking on K8s deployment in CI. Support Windows and MacOS build in GitHub Action CI. Support ElasticSearch 7 in official dist. Hundreds plugin cases have been added in GitHub Action CI process.  Java Agent  Remove the local/exit span operation name register mechanism. Add plugin for JDK Threading classes. Add plugin for Armeria. Support set operation name in async span. Enhance webflux plugin, related to Spring Gateway plugin. Webflux plugin is in optional, due to JDK8 required. Fix a possible deadlock. Fix NPE when OAL scripts are different in different OAP nodes, mostly in upgrading stage. Fix bug about wrong peer in ES plugin. Fix NPE in Spring plugin. Fix wrong class name in Dubbo 2.7 conflict patch. Fix spring annotation inheritance problem.  OAP-Backend  Remove the local/exit span operation name register mechanism. Remove client side endpoint register in service mesh. Service instance dependency and related metrics. Support min func in OAL Support apdex func in OAL Support custom ES config setting at the index level. Envoy ALS proto upgraded. Update JODA lib as bugs in UTC +13/+14. Support topN sample period configurable. Ignore no statement DB operations in slow SQL collection. Fix bug in docker-entrypoint.sh when using MySQL as storage  UI  Service topology enhancement. Dive into service, instance and endpoint metrics on topo map. Service instance dependency view and related metrics. Support using URL parameter in trace query page. Support apdex score in service page. Add service dependency metrics into metrics comparison. Fix alarm search not working.  Document  Update user list and user wall. Add document link for CLI. Add deployment guide of agent in Jetty case. Modify Consul cluster doc. Add document about injecting traceId into the logback with logstack in JSON format. ElementUI license and dependency added.  All issues and pull requests are here\n6.5.0 Project  TTL E2E test (#3437) Test coverage is back in pull request check status (#3503) Plugin tests begin to be migrated into main repo, and is in process. (#3528, #3756, #3751, etc.) Switch to SkyWalking CI (exclusive) nodes (#3546) MySQL storage e2e test. (#3648) E2E tests are verified in multiple jdk versions, jdk 8, 9, 11, 12 (#3657) Jenkins build jobs run only when necessary (#3662)  OAP-Backend  Support dynamically configure alarm settings (#3557) Language of instance could be null (#3485) Make query max window size configurable. (#3765) Remove two max size 500 limit. (#3748) Parameterize the cache size. (#3741) ServiceInstanceRelation set error id (#3683) Makes the scope of alarm message more semantic. (#3680) Add register persistent worker latency metrics (#3677) Fix more reasonable error (#3619) Add GraphQL getServiceInstance instanceUuid field. (#3595) Support namespace in Nacos cluster/configuration (#3578) Instead of datasource-settings.properties, use application.yml for MySQLStorageProvider (#3564) Provide consul dynamic configuration center implementation (#3560) Upgrade guava version to support higher jdk version (#3541) Sync latest als from envoy api (#3507) Set telemetry instanced id for Etcd and Nacos plugin (#3492) Support timeout configuration in agent and backend. (#3491) Make sure the cluster register happens before streaming process. (#3471) Agent supports custom properties. (#3367) Miscellaneous bug fixes (#3567)  UI  Feature: node detail display in topo circle-chart view. BugFix: the jvm-maxheap \u0026amp; jvm-maxnonheap is -1, free is no value Fix bug: time select operation not in effect Fix bug: language initialization failed Fix bug: not show instance language Feature: support the trace list display export png Feature: Metrics comparison view BugFix: Fix dashboard top throughput copy  Java Agent  Spring async scenario optimize (#3723) Support log4j2 AsyncLogger (#3715) Add config to collect PostgreSQL sql query params (#3695) Support namespace in Nacos cluster/configuration (#3578) Provide plugin for ehcache 2.x (#3575) Supporting RequestRateLimiterGatewayFilterFactory (#3538) Kafka-plugin compatible with KafkaTemplate (#3505) Add pulsar apm plugin (#3476) Spring-cloud-gateway traceId does not transmit #3411 (#3446) Gateway compatible with downstream loss (#3445) Provide cassandra java driver 3.x plugin (#3410) Fix SpringMVC4 NoSuchMethodError (#3408) BugFix: endpoint grouping rules may be not unique (#3510) Add feature to control the maximum agent log files (#3475) Agent support custom properties. (#3367) Add Light4j plugin (#3323)  Document  Remove travis badge (#3763) Replace user wall to typical users in readme page (#3719) Update istio docs according latest istio release (#3646) Use chart deploy sw docs (#3573) Reorganize the doc, and provide catalog (#3563) Committer vote and set up document. (#3496) Update als setup doc as istio 1.3 released (#3470) Fill faq reply in official document. (#3450)  All issues and pull requests are here\n6.4.0 Project  Highly recommend to upgrade due to Pxx metrics calculation bug. Make agent working in JDK9+ Module system.  Java Agent  Make agent working in JDK9+ Module system. Support Kafka 2.x client libs. Log error in OKHTTP OnFailure callback. Support injecting traceid into logstack appender in logback. Add OperationName(including endpoint name) length max threshold. Support using Regex to group operation name. Support Undertow routing handler. RestTemplate plugin support operation name grouping. Fix ClassCastException in Webflux plugin. Ordering zookeeper server list, to make it better in topology. Fix a Dubbo plugin incompatible issue. Fix MySQL 5 plugin issue. Make log writer cached. Optimize Spring Cloud Gateway plugin Fix and improve gRPC reconnect mechanism. Remove Disruptor dependency from agent.  Backend  Fix Pxx(p50,p75,p90,p95,p99) metrics func bug.(Critical) Support Gateway in backend analysis, even when it doesn\u0026rsquo;t have suitable language agent. Support using HTTPs SSL accessing ElasticSearch storage. Support Zookeeper ACL. Make alarm records listed in order. Fix Pxx data persistence failure in some cases. Fix some bugs in MySQL storage. Setup slow SQL length threshold. Fix TTL settings is not working as expected. Remove scope-meta file.  UI  Enhance alarm page layout. Support trace tree chart resize. Support trace auto completion when partial traces abandoned somehow. Fix dashboard endpoint slow chart. Add radial chart in topology page. Add trace table mode. Fix topology page bug. Fix calender js bug. Fix \u0026ldquo;The \u0026ldquo;topo-services\u0026rdquo; component did not update the data in time after modifying the time range on the topology page.  Document  Restore the broken Istio setup doc. Add etcd config center document. Correct span_limit_per_segment default value in document. Enhance plugin develop doc. Fix error description in build document.  All issues and pull requests are here\n6.3.0 Project  e2e tests have been added, and verify every pull request. Use ArrayList to replace LinkedList in DataCarrier for much better performance. Add plugin instrumentation definition check in CI. DataCarrier performance improvement by avoiding false-sharing.  Java Agent  Java agent supports JDK 9 - 12, but don\u0026rsquo;t support Java Module yet. Support JVM class auto instrumentation, cataloged as bootstrap plugin. Support JVM HttpClient and HttpsClient plugin.[Optional] Support backend upgrade without rebooting required. Open Redefine and Retransform by other agents. Support Servlet 2.5 in Jetty, Tomcat and SpringMVC plugins. Support Spring @Async plugin. Add new config item to restrict the length of span#peer. Refactor ContextManager#stopSpan. Add gRPC timeout. Support Logback AsyncAppender print tid Fix gRPC reconnect bug. Fix trace segment service doesn\u0026rsquo;t report onComplete. Fix wrong logger class name. Fix gRPC plugin bug. Fix ContextManager.activeSpan() API usage error.  Backend  Support agent reset command downstream when the storage is erased, mostly because of backend upgrade. Backend stream flow refactor. High dimensionality metrics(Hour/Day/Month) are changed to lower priority, to ease the storage payload. Add OAP metrics cache to ease the storage query payload and improve performance. Remove DataCarrier in trace persistent of ElasticSearch storage, by leveraging the elasticsearch bulk queue. OAP internal communication protocol changed. Don\u0026rsquo;t be compatible with old releases. Improve ElasticSearch storage bulk performance. Support etcd as dynamic configuration center. Simplify the PxxMetrics and ThermodynamicMetrics functions for better performance and GC. Support JVM metrics self observability. Add the new OAL runtime engine. Add gRPC timeout. Add Charset in the alarm web hook. Fix buffer lost. Fix dirty read in ElasticSearch storage. Fix bug of cluster management plugins in un-Mixed mode. Fix wrong logger class name. Fix delete bug in ElasticSearch when using namespace. Fix MySQL TTL failure. Totally remove IDs can't be null log, to avoid misleading. Fix provider has been initialized repeatedly. Adjust providers conflict log message. Fix using wrong gc time metrics in OAL.  UI  Fix refresh is not working after endpoint and instance changed. Fix endpoint selector but. Fix wrong copy value in slow traces. Fix can\u0026rsquo;t show trace when it is broken partially(Because of agent sampling or fail safe). Fix database and response time graph bugs.  Document  Add bootstrap plugin development document. Alarm documentation typo fixed. Clarify the Docker file purpose. Fix a license typo.  All issues and pull requests are here\n6.2.0 Project  ElasticSearch implementation performance improved, and CHANGED totally. Must delete all existing indexes to do upgrade. CI and Integration tests provided by ASF INFRA. Plan to enhance tests including e2e, plugin tests in all pull requests, powered by ASF INFRA. DataCarrier queue write index controller performance improvement. 3-5 times quicker than before. Add windows compile support in CI.  Java Agent  Support collect SQL parameter in MySQL plugin.[Optional] Support SolrJ plugin. Support RESTEasy plugin. Support Spring Gateway plugin for 2.1.x[Optional] TracingContext performance improvement. Support Apache ShardingSphere(incubating) plugin. Support span#error in application toolkit. Fix OOM by empty stack of exception. FIx wrong cause exception of stack in span log. Fix unclear the running context in SpringMVC plugin. Fix CPU usage accessor calculation issue. Fix SpringMVC plugin span not stop bug when doing HTTP forward. Fix lettuce plugin async commend bug and NPE. Fix webflux plugin cast exception. [CI]Support import check.  Backend  Support time serious ElasticSearch storage. Provide dynamic configuration module and implementation. Slow SQL threshold supports dynamic config today. Dynamic Configuration module provide multiple implementations, DCS(gRPC based), Zookeeper, Apollo, Nacos. Provide P99/95/90/75/50 charts in topology edge. New topology query protocol and implementation. Support Envoy ALS in Service Mesh scenario. Support Nacos cluster management. Enhance metric exporter. Run in increment and total modes. Fix module provider is loaded repeatedly. Change TOP slow SQL storage in ES to Text from Keyword, as too long text issue. Fix H2TopologyQuery tiny bug. Fix H2 log query bug.(No feature provided yet) Filtering pods not in \u0026lsquo;Running\u0026rsquo; phase in mesh scenario. Fix query alarm bug in MySQL and H2 storage. Codes refactor.  UI  Fix some ID is null query(s). Page refactor, especially time-picker, more friendly. Login removed. Trace timestamp visualization issue fixed. Provide P99/95/90/75/50 charts in topology edge. Change all P99/95/90/75/50 charts style. More readable. Fix 404 in trace page.  Document  Go2Sky project has been donated to SkyAPM, change document link. Add FAQ for ElasticSearch storage, and links from document. Add FAQ fro WebSphere installation. Add several open users. Add alarm webhook document.  All issues and pull requests are here\n6.1.0 Project SkyWalking graduated as Apache Top Level Project.\n Support compiling project agent, backend, UI separately.  Java Agent  Support Vert.x Core 3.x plugin. Support Apache Dubbo plugin. Support use_qualified_name_as_endpoint_name and use_qualified_name_as_operation_name configs in SpringMVC plugin. Support span async close APIs in core. Used in Vert.x plugin. Support MySQL 5,8 plugins. Support set instance id manually(optional). Support customize enhance trace plugin in optional list. Support to set peer in Entry Span. Support Zookeeper plugin. Fix Webflux plugin created unexpected Entry Span. Fix Kafka plugin NPE in Kafka 1.1+ Fix wrong operation name in postgre 8.x plugin. Fix RabbitMQ plugin NPE. Fix agent can\u0026rsquo;t run in JVM 6/7, remove module-info.class. Fix agent can\u0026rsquo;t work well, if there is whitespace in agent path. Fix Spring annotation bug and inheritance enhance issue. Fix CPU accessor bug.  Backend Performance improved, especially in CPU limited environment. 3x improvement in service mesh scenario(no trace) in 8C16G VM. Significantly cost less CPU in low payload.\n Support database metrics and SLOW SQL detection. Support to set max size of metadata query. And change default to 5000 from 100. Support ElasticSearch template for new feature in the future. Support shutdown Zipkin trace analysis, because it doesn\u0026rsquo;t fit production environment. Support log type, scope HTTP_ACCESS_LOG and query. No feature provided, prepare for future versions. Support .NET clr receiver. Support Jaeger trace format, no analysis. Support group endpoint name by regax rules in mesh receiver. Support disable statement in OAL. Support basic auth in ElasticSearch connection. Support metrics exporter module and gRPC implementor. Support \u0026gt;, \u0026lt;, \u0026gt;=, \u0026lt;= in OAL. Support role mode in backend. Support Envoy metrics. Support query segment by service instance. Support to set host/port manually at cluster coordinator, rather than based on core settings. Make sure OAP shutdown when it faces startup error. Support set separated gRPC/Jetty ip:port for receiver, default still use core settings. Fix JVM receiver bug. Fix wrong dest service in mesh analysis. Fix search doesn\u0026rsquo;t work as expected. Refactor ScopeDeclaration annotation. Refactor register lock mechanism. Add SmartSql component for .NET Add integration tests for ElasticSearch client. Add test cases for exporter. Add test cases for queue consume.  UI  RocketBot UI has been accepted and bind in this release. Support CLR metrics.  Document  Documents updated, matching Top Level Project requirement. UI licenses updated, according to RocketBot UI IP clearance. User wall and powered-by list updated. CN documents removed, only consider to provide by volunteer out of Apache.  All issues and pull requests are here\n6.0.0-GA Java Agent  Support gson plugin(optional). Support canal plugin. Fix missing ojdbc component id. Fix dubbo plugin conflict. Fix OpenTracing tag match bug. Fix a missing check in ignore plugin.  Backend  Adjust service inventory entity, to add properties. Adjust service instance inventory entity, to add properties. Add nodeType to service inventory entity. Fix when operation name of local and exit spans in ref, the segment lost. Fix the index names don\u0026rsquo;t show right in logs. Fix wrong alarm text. Add test case for span limit mechanism. Add telemetry module and prometheus implementation, with grafana setting. A refactor for register API in storage module. Fix H2 and MySQL endpoint dependency map miss upstream side. Optimize the inventory register and refactor the implementation. Speed up the trace buffer read. Fix and removed unnecessary inventory register operations.  UI  Add new trace view. Add word-break to tag value.  Document  Add two startup modes document. Add PHP agent links. Add some cn documents. Update year to 2019 User wall updated. Fix a wrong description in how-to-build doc.  All issues and pull requests are here\n6.0.0-beta Protocol  Provide Trace Data Protocol v2 Provide SkyWalking Cross Process Propagation Headers Protocol v2.  Java Agent  Support Trace Data Protocol v2 Support SkyWalking Cross Process Propagation Headers Protocol v2. Support SkyWalking Cross Process Propagation Headers Protocol v1 running in compatible way. Need declare open explicitly. Support SpringMVC 5 Support webflux Support a new way to override agent.config by system env. Span tag can override by explicit way. Fix Spring Controller Inherit issue. Fix ElasticSearch plugin NPE. Fix agent classloader dead lock in certain situation. Fix agent log typo. Fix wrong component id in resettemplete plugin. Fix use transform ignore() in wrong way. Fix H2 query bug.  Backend  Support Trace Data Protocol v2. And Trace Data Protocol v1 is still supported. Support MySQL as storage. Support TiDB as storage. Support a new way to override application.yml by system env. Support service instance and endpoint alarm. Support namespace in istio receiver. Support service throughput(cpm), successful rate(sla), avg response time and p99/p95/p90/p75/p50 response time. Support backend trace sampling. Support Zipkin format again. Support init mode. Support namespace in Zookeeper cluster management. Support consul plugin in cluster module. OAL generate tool has been integrated into main repo, in the maven compile stage. Optimize trace paging query. Fix trace query don\u0026rsquo;t use fuzzy query in ElasticSearch storage. Fix alarm can\u0026rsquo;t be active in right way. Fix unnecessary condition in database and cache number query. Fix wrong namespace bug in ElasticSearch storage. Fix Remote clients selector error: / by zero . Fix segment TTL is not working.  UI  Support service throughput(cpm), successful rate(sla), avg response time and p99/p95/p90/p75/p50 response time. Fix TopN endpoint link doesn\u0026rsquo;t work right. Fix trace stack style. Fix CI.  Document  Add more agent setting documents. Add more contribution documents. Update user wall and powered-by page. Add RocketBot UI project link in document.  All issues and pull requests are here\n6.0.0-alpha SkyWalking 6 is totally new milestone for the project. At this point, we are not just a distributing tracing system with analysis and visualization capabilities. We are an Observability Analysis Platform(OAL).\nThe core and most important features in v6 are\n Support to collect telemetry data from different sources, such as multiple language agents and service mesh. Extensible stream analysis core. Make SQL and cache analysis available in core level, although haven\u0026rsquo;t provided in this release. Provide Observability Analysis Language(OAL) to make analysis metrics customization available. New GraphQL query protocol. Not binding with UI now. UI topology is better now. New alarm core provided. In alpha, only on service related metrics.  All issues and pull requests are here\n","title":"6.6.0","url":"/docs/main/next/en/changes/changes-6.x/"},{"content":"6.6.0 Project  [IMPORTANT] Local span and exit span are not treated as endpoint detected at client and local. Only entry span is the endpoint. Reduce the load of register and memory cost.   Support MiniKube, Istio and SkyWalking on K8s deployment in CI. Support Windows and MacOS build in GitHub Action CI. Support ElasticSearch 7 in official dist. Hundreds plugin cases have been added in GitHub Action CI process.  Java Agent  Remove the local/exit span operation name register mechanism. Add plugin for JDK Threading classes. Add plugin for Armeria. Support set operation name in async span. Enhance webflux plugin, related to Spring Gateway plugin. Webflux plugin is in optional, due to JDK8 required. Fix a possible deadlock. Fix NPE when OAL scripts are different in different OAP nodes, mostly in upgrading stage. Fix bug about wrong peer in ES plugin. Fix NPE in Spring plugin. Fix wrong class name in Dubbo 2.7 conflict patch. Fix spring annotation inheritance problem.  OAP-Backend  Remove the local/exit span operation name register mechanism. Remove client side endpoint register in service mesh. Service instance dependency and related metrics. Support min func in OAL Support apdex func in OAL Support custom ES config setting at the index level. Envoy ALS proto upgraded. Update JODA lib as bugs in UTC +13/+14. Support topN sample period configurable. Ignore no statement DB operations in slow SQL collection. Fix bug in docker-entrypoint.sh when using MySQL as storage  UI  Service topology enhancement. Dive into service, instance and endpoint metrics on topo map. Service instance dependency view and related metrics. Support using URL parameter in trace query page. Support apdex score in service page. Add service dependency metrics into metrics comparison. Fix alarm search not working.  Document  Update user list and user wall. Add document link for CLI. Add deployment guide of agent in Jetty case. Modify Consul cluster doc. Add document about injecting traceId into the logback with logstack in JSON format. ElementUI license and dependency added.  All issues and pull requests are here\n6.5.0 Project  TTL E2E test (#3437) Test coverage is back in pull request check status (#3503) Plugin tests begin to be migrated into main repo, and is in process. (#3528, #3756, #3751, etc.) Switch to SkyWalking CI (exclusive) nodes (#3546) MySQL storage e2e test. (#3648) E2E tests are verified in multiple jdk versions, jdk 8, 9, 11, 12 (#3657) Jenkins build jobs run only when necessary (#3662)  OAP-Backend  Support dynamically configure alarm settings (#3557) Language of instance could be null (#3485) Make query max window size configurable. (#3765) Remove two max size 500 limit. (#3748) Parameterize the cache size. (#3741) ServiceInstanceRelation set error id (#3683) Makes the scope of alarm message more semantic. (#3680) Add register persistent worker latency metrics (#3677) Fix more reasonable error (#3619) Add GraphQL getServiceInstance instanceUuid field. (#3595) Support namespace in Nacos cluster/configuration (#3578) Instead of datasource-settings.properties, use application.yml for MySQLStorageProvider (#3564) Provide consul dynamic configuration center implementation (#3560) Upgrade guava version to support higher jdk version (#3541) Sync latest als from envoy api (#3507) Set telemetry instanced id for Etcd and Nacos plugin (#3492) Support timeout configuration in agent and backend. (#3491) Make sure the cluster register happens before streaming process. (#3471) Agent supports custom properties. (#3367) Miscellaneous bug fixes (#3567)  UI  Feature: node detail display in topo circle-chart view. BugFix: the jvm-maxheap \u0026amp; jvm-maxnonheap is -1, free is no value Fix bug: time select operation not in effect Fix bug: language initialization failed Fix bug: not show instance language Feature: support the trace list display export png Feature: Metrics comparison view BugFix: Fix dashboard top throughput copy  Java Agent  Spring async scenario optimize (#3723) Support log4j2 AsyncLogger (#3715) Add config to collect PostgreSQL sql query params (#3695) Support namespace in Nacos cluster/configuration (#3578) Provide plugin for ehcache 2.x (#3575) Supporting RequestRateLimiterGatewayFilterFactory (#3538) Kafka-plugin compatible with KafkaTemplate (#3505) Add pulsar apm plugin (#3476) Spring-cloud-gateway traceId does not transmit #3411 (#3446) Gateway compatible with downstream loss (#3445) Provide cassandra java driver 3.x plugin (#3410) Fix SpringMVC4 NoSuchMethodError (#3408) BugFix: endpoint grouping rules may be not unique (#3510) Add feature to control the maximum agent log files (#3475) Agent support custom properties. (#3367) Add Light4j plugin (#3323)  Document  Remove travis badge (#3763) Replace user wall to typical users in readme page (#3719) Update istio docs according latest istio release (#3646) Use chart deploy sw docs (#3573) Reorganize the doc, and provide catalog (#3563) Committer vote and set up document. (#3496) Update als setup doc as istio 1.3 released (#3470) Fill faq reply in official document. (#3450)  All issues and pull requests are here\n6.4.0 Project  Highly recommend to upgrade due to Pxx metrics calculation bug. Make agent working in JDK9+ Module system.  Java Agent  Make agent working in JDK9+ Module system. Support Kafka 2.x client libs. Log error in OKHTTP OnFailure callback. Support injecting traceid into logstack appender in logback. Add OperationName(including endpoint name) length max threshold. Support using Regex to group operation name. Support Undertow routing handler. RestTemplate plugin support operation name grouping. Fix ClassCastException in Webflux plugin. Ordering zookeeper server list, to make it better in topology. Fix a Dubbo plugin incompatible issue. Fix MySQL 5 plugin issue. Make log writer cached. Optimize Spring Cloud Gateway plugin Fix and improve gRPC reconnect mechanism. Remove Disruptor dependency from agent.  Backend  Fix Pxx(p50,p75,p90,p95,p99) metrics func bug.(Critical) Support Gateway in backend analysis, even when it doesn\u0026rsquo;t have suitable language agent. Support using HTTPs SSL accessing ElasticSearch storage. Support Zookeeper ACL. Make alarm records listed in order. Fix Pxx data persistence failure in some cases. Fix some bugs in MySQL storage. Setup slow SQL length threshold. Fix TTL settings is not working as expected. Remove scope-meta file.  UI  Enhance alarm page layout. Support trace tree chart resize. Support trace auto completion when partial traces abandoned somehow. Fix dashboard endpoint slow chart. Add radial chart in topology page. Add trace table mode. Fix topology page bug. Fix calender js bug. Fix \u0026ldquo;The \u0026ldquo;topo-services\u0026rdquo; component did not update the data in time after modifying the time range on the topology page.  Document  Restore the broken Istio setup doc. Add etcd config center document. Correct span_limit_per_segment default value in document. Enhance plugin develop doc. Fix error description in build document.  All issues and pull requests are here\n6.3.0 Project  e2e tests have been added, and verify every pull request. Use ArrayList to replace LinkedList in DataCarrier for much better performance. Add plugin instrumentation definition check in CI. DataCarrier performance improvement by avoiding false-sharing.  Java Agent  Java agent supports JDK 9 - 12, but don\u0026rsquo;t support Java Module yet. Support JVM class auto instrumentation, cataloged as bootstrap plugin. Support JVM HttpClient and HttpsClient plugin.[Optional] Support backend upgrade without rebooting required. Open Redefine and Retransform by other agents. Support Servlet 2.5 in Jetty, Tomcat and SpringMVC plugins. Support Spring @Async plugin. Add new config item to restrict the length of span#peer. Refactor ContextManager#stopSpan. Add gRPC timeout. Support Logback AsyncAppender print tid Fix gRPC reconnect bug. Fix trace segment service doesn\u0026rsquo;t report onComplete. Fix wrong logger class name. Fix gRPC plugin bug. Fix ContextManager.activeSpan() API usage error.  Backend  Support agent reset command downstream when the storage is erased, mostly because of backend upgrade. Backend stream flow refactor. High dimensionality metrics(Hour/Day/Month) are changed to lower priority, to ease the storage payload. Add OAP metrics cache to ease the storage query payload and improve performance. Remove DataCarrier in trace persistent of ElasticSearch storage, by leveraging the elasticsearch bulk queue. OAP internal communication protocol changed. Don\u0026rsquo;t be compatible with old releases. Improve ElasticSearch storage bulk performance. Support etcd as dynamic configuration center. Simplify the PxxMetrics and ThermodynamicMetrics functions for better performance and GC. Support JVM metrics self observability. Add the new OAL runtime engine. Add gRPC timeout. Add Charset in the alarm web hook. Fix buffer lost. Fix dirty read in ElasticSearch storage. Fix bug of cluster management plugins in un-Mixed mode. Fix wrong logger class name. Fix delete bug in ElasticSearch when using namespace. Fix MySQL TTL failure. Totally remove IDs can't be null log, to avoid misleading. Fix provider has been initialized repeatedly. Adjust providers conflict log message. Fix using wrong gc time metrics in OAL.  UI  Fix refresh is not working after endpoint and instance changed. Fix endpoint selector but. Fix wrong copy value in slow traces. Fix can\u0026rsquo;t show trace when it is broken partially(Because of agent sampling or fail safe). Fix database and response time graph bugs.  Document  Add bootstrap plugin development document. Alarm documentation typo fixed. Clarify the Docker file purpose. Fix a license typo.  All issues and pull requests are here\n6.2.0 Project  ElasticSearch implementation performance improved, and CHANGED totally. Must delete all existing indexes to do upgrade. CI and Integration tests provided by ASF INFRA. Plan to enhance tests including e2e, plugin tests in all pull requests, powered by ASF INFRA. DataCarrier queue write index controller performance improvement. 3-5 times quicker than before. Add windows compile support in CI.  Java Agent  Support collect SQL parameter in MySQL plugin.[Optional] Support SolrJ plugin. Support RESTEasy plugin. Support Spring Gateway plugin for 2.1.x[Optional] TracingContext performance improvement. Support Apache ShardingSphere(incubating) plugin. Support span#error in application toolkit. Fix OOM by empty stack of exception. FIx wrong cause exception of stack in span log. Fix unclear the running context in SpringMVC plugin. Fix CPU usage accessor calculation issue. Fix SpringMVC plugin span not stop bug when doing HTTP forward. Fix lettuce plugin async commend bug and NPE. Fix webflux plugin cast exception. [CI]Support import check.  Backend  Support time serious ElasticSearch storage. Provide dynamic configuration module and implementation. Slow SQL threshold supports dynamic config today. Dynamic Configuration module provide multiple implementations, DCS(gRPC based), Zookeeper, Apollo, Nacos. Provide P99/95/90/75/50 charts in topology edge. New topology query protocol and implementation. Support Envoy ALS in Service Mesh scenario. Support Nacos cluster management. Enhance metric exporter. Run in increment and total modes. Fix module provider is loaded repeatedly. Change TOP slow SQL storage in ES to Text from Keyword, as too long text issue. Fix H2TopologyQuery tiny bug. Fix H2 log query bug.(No feature provided yet) Filtering pods not in \u0026lsquo;Running\u0026rsquo; phase in mesh scenario. Fix query alarm bug in MySQL and H2 storage. Codes refactor.  UI  Fix some ID is null query(s). Page refactor, especially time-picker, more friendly. Login removed. Trace timestamp visualization issue fixed. Provide P99/95/90/75/50 charts in topology edge. Change all P99/95/90/75/50 charts style. More readable. Fix 404 in trace page.  Document  Go2Sky project has been donated to SkyAPM, change document link. Add FAQ for ElasticSearch storage, and links from document. Add FAQ fro WebSphere installation. Add several open users. Add alarm webhook document.  All issues and pull requests are here\n6.1.0 Project SkyWalking graduated as Apache Top Level Project.\n Support compiling project agent, backend, UI separately.  Java Agent  Support Vert.x Core 3.x plugin. Support Apache Dubbo plugin. Support use_qualified_name_as_endpoint_name and use_qualified_name_as_operation_name configs in SpringMVC plugin. Support span async close APIs in core. Used in Vert.x plugin. Support MySQL 5,8 plugins. Support set instance id manually(optional). Support customize enhance trace plugin in optional list. Support to set peer in Entry Span. Support Zookeeper plugin. Fix Webflux plugin created unexpected Entry Span. Fix Kafka plugin NPE in Kafka 1.1+ Fix wrong operation name in postgre 8.x plugin. Fix RabbitMQ plugin NPE. Fix agent can\u0026rsquo;t run in JVM 6/7, remove module-info.class. Fix agent can\u0026rsquo;t work well, if there is whitespace in agent path. Fix Spring annotation bug and inheritance enhance issue. Fix CPU accessor bug.  Backend Performance improved, especially in CPU limited environment. 3x improvement in service mesh scenario(no trace) in 8C16G VM. Significantly cost less CPU in low payload.\n Support database metrics and SLOW SQL detection. Support to set max size of metadata query. And change default to 5000 from 100. Support ElasticSearch template for new feature in the future. Support shutdown Zipkin trace analysis, because it doesn\u0026rsquo;t fit production environment. Support log type, scope HTTP_ACCESS_LOG and query. No feature provided, prepare for future versions. Support .NET clr receiver. Support Jaeger trace format, no analysis. Support group endpoint name by regax rules in mesh receiver. Support disable statement in OAL. Support basic auth in ElasticSearch connection. Support metrics exporter module and gRPC implementor. Support \u0026gt;, \u0026lt;, \u0026gt;=, \u0026lt;= in OAL. Support role mode in backend. Support Envoy metrics. Support query segment by service instance. Support to set host/port manually at cluster coordinator, rather than based on core settings. Make sure OAP shutdown when it faces startup error. Support set separated gRPC/Jetty ip:port for receiver, default still use core settings. Fix JVM receiver bug. Fix wrong dest service in mesh analysis. Fix search doesn\u0026rsquo;t work as expected. Refactor ScopeDeclaration annotation. Refactor register lock mechanism. Add SmartSql component for .NET Add integration tests for ElasticSearch client. Add test cases for exporter. Add test cases for queue consume.  UI  RocketBot UI has been accepted and bind in this release. Support CLR metrics.  Document  Documents updated, matching Top Level Project requirement. UI licenses updated, according to RocketBot UI IP clearance. User wall and powered-by list updated. CN documents removed, only consider to provide by volunteer out of Apache.  All issues and pull requests are here\n6.0.0-GA Java Agent  Support gson plugin(optional). Support canal plugin. Fix missing ojdbc component id. Fix dubbo plugin conflict. Fix OpenTracing tag match bug. Fix a missing check in ignore plugin.  Backend  Adjust service inventory entity, to add properties. Adjust service instance inventory entity, to add properties. Add nodeType to service inventory entity. Fix when operation name of local and exit spans in ref, the segment lost. Fix the index names don\u0026rsquo;t show right in logs. Fix wrong alarm text. Add test case for span limit mechanism. Add telemetry module and prometheus implementation, with grafana setting. A refactor for register API in storage module. Fix H2 and MySQL endpoint dependency map miss upstream side. Optimize the inventory register and refactor the implementation. Speed up the trace buffer read. Fix and removed unnecessary inventory register operations.  UI  Add new trace view. Add word-break to tag value.  Document  Add two startup modes document. Add PHP agent links. Add some cn documents. Update year to 2019 User wall updated. Fix a wrong description in how-to-build doc.  All issues and pull requests are here\n6.0.0-beta Protocol  Provide Trace Data Protocol v2 Provide SkyWalking Cross Process Propagation Headers Protocol v2.  Java Agent  Support Trace Data Protocol v2 Support SkyWalking Cross Process Propagation Headers Protocol v2. Support SkyWalking Cross Process Propagation Headers Protocol v1 running in compatible way. Need declare open explicitly. Support SpringMVC 5 Support webflux Support a new way to override agent.config by system env. Span tag can override by explicit way. Fix Spring Controller Inherit issue. Fix ElasticSearch plugin NPE. Fix agent classloader dead lock in certain situation. Fix agent log typo. Fix wrong component id in resettemplete plugin. Fix use transform ignore() in wrong way. Fix H2 query bug.  Backend  Support Trace Data Protocol v2. And Trace Data Protocol v1 is still supported. Support MySQL as storage. Support TiDB as storage. Support a new way to override application.yml by system env. Support service instance and endpoint alarm. Support namespace in istio receiver. Support service throughput(cpm), successful rate(sla), avg response time and p99/p95/p90/p75/p50 response time. Support backend trace sampling. Support Zipkin format again. Support init mode. Support namespace in Zookeeper cluster management. Support consul plugin in cluster module. OAL generate tool has been integrated into main repo, in the maven compile stage. Optimize trace paging query. Fix trace query don\u0026rsquo;t use fuzzy query in ElasticSearch storage. Fix alarm can\u0026rsquo;t be active in right way. Fix unnecessary condition in database and cache number query. Fix wrong namespace bug in ElasticSearch storage. Fix Remote clients selector error: / by zero . Fix segment TTL is not working.  UI  Support service throughput(cpm), successful rate(sla), avg response time and p99/p95/p90/p75/p50 response time. Fix TopN endpoint link doesn\u0026rsquo;t work right. Fix trace stack style. Fix CI.  Document  Add more agent setting documents. Add more contribution documents. Update user wall and powered-by page. Add RocketBot UI project link in document.  All issues and pull requests are here\n6.0.0-alpha SkyWalking 6 is totally new milestone for the project. At this point, we are not just a distributing tracing system with analysis and visualization capabilities. We are an Observability Analysis Platform(OAL).\nThe core and most important features in v6 are\n Support to collect telemetry data from different sources, such as multiple language agents and service mesh. Extensible stream analysis core. Make SQL and cache analysis available in core level, although haven\u0026rsquo;t provided in this release. Provide Observability Analysis Language(OAL) to make analysis metrics customization available. New GraphQL query protocol. Not binding with UI now. UI topology is better now. New alarm core provided. In alpha, only on service related metrics.  All issues and pull requests are here\n","title":"6.6.0","url":"/docs/main/v9.1.0/en/changes/changes-6.x/"},{"content":"6.6.0 Project  [IMPORTANT] Local span and exit span are not treated as endpoint detected at client and local. Only entry span is the endpoint. Reduce the load of register and memory cost.   Support MiniKube, Istio and SkyWalking on K8s deployment in CI. Support Windows and MacOS build in GitHub Action CI. Support ElasticSearch 7 in official dist. Hundreds plugin cases have been added in GitHub Action CI process.  Java Agent  Remove the local/exit span operation name register mechanism. Add plugin for JDK Threading classes. Add plugin for Armeria. Support set operation name in async span. Enhance webflux plugin, related to Spring Gateway plugin. Webflux plugin is in optional, due to JDK8 required. Fix a possible deadlock. Fix NPE when OAL scripts are different in different OAP nodes, mostly in upgrading stage. Fix bug about wrong peer in ES plugin. Fix NPE in Spring plugin. Fix wrong class name in Dubbo 2.7 conflict patch. Fix spring annotation inheritance problem.  OAP-Backend  Remove the local/exit span operation name register mechanism. Remove client side endpoint register in service mesh. Service instance dependency and related metrics. Support min func in OAL Support apdex func in OAL Support custom ES config setting at the index level. Envoy ALS proto upgraded. Update JODA lib as bugs in UTC +13/+14. Support topN sample period configurable. Ignore no statement DB operations in slow SQL collection. Fix bug in docker-entrypoint.sh when using MySQL as storage  UI  Service topology enhancement. Dive into service, instance and endpoint metrics on topo map. Service instance dependency view and related metrics. Support using URL parameter in trace query page. Support apdex score in service page. Add service dependency metrics into metrics comparison. Fix alarm search not working.  Document  Update user list and user wall. Add document link for CLI. Add deployment guide of agent in Jetty case. Modify Consul cluster doc. Add document about injecting traceId into the logback with logstack in JSON format. ElementUI license and dependency added.  All issues and pull requests are here\n6.5.0 Project  TTL E2E test (#3437) Test coverage is back in pull request check status (#3503) Plugin tests begin to be migrated into main repo, and is in process. (#3528, #3756, #3751, etc.) Switch to SkyWalking CI (exclusive) nodes (#3546) MySQL storage e2e test. (#3648) E2E tests are verified in multiple jdk versions, jdk 8, 9, 11, 12 (#3657) Jenkins build jobs run only when necessary (#3662)  OAP-Backend  Support dynamically configure alarm settings (#3557) Language of instance could be null (#3485) Make query max window size configurable. (#3765) Remove two max size 500 limit. (#3748) Parameterize the cache size. (#3741) ServiceInstanceRelation set error id (#3683) Makes the scope of alarm message more semantic. (#3680) Add register persistent worker latency metrics (#3677) Fix more reasonable error (#3619) Add GraphQL getServiceInstance instanceUuid field. (#3595) Support namespace in Nacos cluster/configuration (#3578) Instead of datasource-settings.properties, use application.yml for MySQLStorageProvider (#3564) Provide consul dynamic configuration center implementation (#3560) Upgrade guava version to support higher jdk version (#3541) Sync latest als from envoy api (#3507) Set telemetry instanced id for Etcd and Nacos plugin (#3492) Support timeout configuration in agent and backend. (#3491) Make sure the cluster register happens before streaming process. (#3471) Agent supports custom properties. (#3367) Miscellaneous bug fixes (#3567)  UI  Feature: node detail display in topo circle-chart view. BugFix: the jvm-maxheap \u0026amp; jvm-maxnonheap is -1, free is no value Fix bug: time select operation not in effect Fix bug: language initialization failed Fix bug: not show instance language Feature: support the trace list display export png Feature: Metrics comparison view BugFix: Fix dashboard top throughput copy  Java Agent  Spring async scenario optimize (#3723) Support log4j2 AsyncLogger (#3715) Add config to collect PostgreSQL sql query params (#3695) Support namespace in Nacos cluster/configuration (#3578) Provide plugin for ehcache 2.x (#3575) Supporting RequestRateLimiterGatewayFilterFactory (#3538) Kafka-plugin compatible with KafkaTemplate (#3505) Add pulsar apm plugin (#3476) Spring-cloud-gateway traceId does not transmit #3411 (#3446) Gateway compatible with downstream loss (#3445) Provide cassandra java driver 3.x plugin (#3410) Fix SpringMVC4 NoSuchMethodError (#3408) BugFix: endpoint grouping rules may be not unique (#3510) Add feature to control the maximum agent log files (#3475) Agent support custom properties. (#3367) Add Light4j plugin (#3323)  Document  Remove travis badge (#3763) Replace user wall to typical users in readme page (#3719) Update istio docs according latest istio release (#3646) Use chart deploy sw docs (#3573) Reorganize the doc, and provide catalog (#3563) Committer vote and set up document. (#3496) Update als setup doc as istio 1.3 released (#3470) Fill faq reply in official document. (#3450)  All issues and pull requests are here\n6.4.0 Project  Highly recommend to upgrade due to Pxx metrics calculation bug. Make agent working in JDK9+ Module system.  Java Agent  Make agent working in JDK9+ Module system. Support Kafka 2.x client libs. Log error in OKHTTP OnFailure callback. Support injecting traceid into logstack appender in logback. Add OperationName(including endpoint name) length max threshold. Support using Regex to group operation name. Support Undertow routing handler. RestTemplate plugin support operation name grouping. Fix ClassCastException in Webflux plugin. Ordering zookeeper server list, to make it better in topology. Fix a Dubbo plugin incompatible issue. Fix MySQL 5 plugin issue. Make log writer cached. Optimize Spring Cloud Gateway plugin Fix and improve gRPC reconnect mechanism. Remove Disruptor dependency from agent.  Backend  Fix Pxx(p50,p75,p90,p95,p99) metrics func bug.(Critical) Support Gateway in backend analysis, even when it doesn\u0026rsquo;t have suitable language agent. Support using HTTPs SSL accessing ElasticSearch storage. Support Zookeeper ACL. Make alarm records listed in order. Fix Pxx data persistence failure in some cases. Fix some bugs in MySQL storage. Setup slow SQL length threshold. Fix TTL settings is not working as expected. Remove scope-meta file.  UI  Enhance alarm page layout. Support trace tree chart resize. Support trace auto completion when partial traces abandoned somehow. Fix dashboard endpoint slow chart. Add radial chart in topology page. Add trace table mode. Fix topology page bug. Fix calender js bug. Fix \u0026ldquo;The \u0026ldquo;topo-services\u0026rdquo; component did not update the data in time after modifying the time range on the topology page.  Document  Restore the broken Istio setup doc. Add etcd config center document. Correct span_limit_per_segment default value in document. Enhance plugin develop doc. Fix error description in build document.  All issues and pull requests are here\n6.3.0 Project  e2e tests have been added, and verify every pull request. Use ArrayList to replace LinkedList in DataCarrier for much better performance. Add plugin instrumentation definition check in CI. DataCarrier performance improvement by avoiding false-sharing.  Java Agent  Java agent supports JDK 9 - 12, but don\u0026rsquo;t support Java Module yet. Support JVM class auto instrumentation, cataloged as bootstrap plugin. Support JVM HttpClient and HttpsClient plugin.[Optional] Support backend upgrade without rebooting required. Open Redefine and Retransform by other agents. Support Servlet 2.5 in Jetty, Tomcat and SpringMVC plugins. Support Spring @Async plugin. Add new config item to restrict the length of span#peer. Refactor ContextManager#stopSpan. Add gRPC timeout. Support Logback AsyncAppender print tid Fix gRPC reconnect bug. Fix trace segment service doesn\u0026rsquo;t report onComplete. Fix wrong logger class name. Fix gRPC plugin bug. Fix ContextManager.activeSpan() API usage error.  Backend  Support agent reset command downstream when the storage is erased, mostly because of backend upgrade. Backend stream flow refactor. High dimensionality metrics(Hour/Day/Month) are changed to lower priority, to ease the storage payload. Add OAP metrics cache to ease the storage query payload and improve performance. Remove DataCarrier in trace persistent of ElasticSearch storage, by leveraging the elasticsearch bulk queue. OAP internal communication protocol changed. Don\u0026rsquo;t be compatible with old releases. Improve ElasticSearch storage bulk performance. Support etcd as dynamic configuration center. Simplify the PxxMetrics and ThermodynamicMetrics functions for better performance and GC. Support JVM metrics self observability. Add the new OAL runtime engine. Add gRPC timeout. Add Charset in the alarm web hook. Fix buffer lost. Fix dirty read in ElasticSearch storage. Fix bug of cluster management plugins in un-Mixed mode. Fix wrong logger class name. Fix delete bug in ElasticSearch when using namespace. Fix MySQL TTL failure. Totally remove IDs can't be null log, to avoid misleading. Fix provider has been initialized repeatedly. Adjust providers conflict log message. Fix using wrong gc time metrics in OAL.  UI  Fix refresh is not working after endpoint and instance changed. Fix endpoint selector but. Fix wrong copy value in slow traces. Fix can\u0026rsquo;t show trace when it is broken partially(Because of agent sampling or fail safe). Fix database and response time graph bugs.  Document  Add bootstrap plugin development document. Alarm documentation typo fixed. Clarify the Docker file purpose. Fix a license typo.  All issues and pull requests are here\n6.2.0 Project  ElasticSearch implementation performance improved, and CHANGED totally. Must delete all existing indexes to do upgrade. CI and Integration tests provided by ASF INFRA. Plan to enhance tests including e2e, plugin tests in all pull requests, powered by ASF INFRA. DataCarrier queue write index controller performance improvement. 3-5 times quicker than before. Add windows compile support in CI.  Java Agent  Support collect SQL parameter in MySQL plugin.[Optional] Support SolrJ plugin. Support RESTEasy plugin. Support Spring Gateway plugin for 2.1.x[Optional] TracingContext performance improvement. Support Apache ShardingSphere(incubating) plugin. Support span#error in application toolkit. Fix OOM by empty stack of exception. FIx wrong cause exception of stack in span log. Fix unclear the running context in SpringMVC plugin. Fix CPU usage accessor calculation issue. Fix SpringMVC plugin span not stop bug when doing HTTP forward. Fix lettuce plugin async commend bug and NPE. Fix webflux plugin cast exception. [CI]Support import check.  Backend  Support time serious ElasticSearch storage. Provide dynamic configuration module and implementation. Slow SQL threshold supports dynamic config today. Dynamic Configuration module provide multiple implementations, DCS(gRPC based), Zookeeper, Apollo, Nacos. Provide P99/95/90/75/50 charts in topology edge. New topology query protocol and implementation. Support Envoy ALS in Service Mesh scenario. Support Nacos cluster management. Enhance metric exporter. Run in increment and total modes. Fix module provider is loaded repeatedly. Change TOP slow SQL storage in ES to Text from Keyword, as too long text issue. Fix H2TopologyQuery tiny bug. Fix H2 log query bug.(No feature provided yet) Filtering pods not in \u0026lsquo;Running\u0026rsquo; phase in mesh scenario. Fix query alarm bug in MySQL and H2 storage. Codes refactor.  UI  Fix some ID is null query(s). Page refactor, especially time-picker, more friendly. Login removed. Trace timestamp visualization issue fixed. Provide P99/95/90/75/50 charts in topology edge. Change all P99/95/90/75/50 charts style. More readable. Fix 404 in trace page.  Document  Go2Sky project has been donated to SkyAPM, change document link. Add FAQ for ElasticSearch storage, and links from document. Add FAQ fro WebSphere installation. Add several open users. Add alarm webhook document.  All issues and pull requests are here\n6.1.0 Project SkyWalking graduated as Apache Top Level Project.\n Support compiling project agent, backend, UI separately.  Java Agent  Support Vert.x Core 3.x plugin. Support Apache Dubbo plugin. Support use_qualified_name_as_endpoint_name and use_qualified_name_as_operation_name configs in SpringMVC plugin. Support span async close APIs in core. Used in Vert.x plugin. Support MySQL 5,8 plugins. Support set instance id manually(optional). Support customize enhance trace plugin in optional list. Support to set peer in Entry Span. Support Zookeeper plugin. Fix Webflux plugin created unexpected Entry Span. Fix Kafka plugin NPE in Kafka 1.1+ Fix wrong operation name in postgre 8.x plugin. Fix RabbitMQ plugin NPE. Fix agent can\u0026rsquo;t run in JVM 6/7, remove module-info.class. Fix agent can\u0026rsquo;t work well, if there is whitespace in agent path. Fix Spring annotation bug and inheritance enhance issue. Fix CPU accessor bug.  Backend Performance improved, especially in CPU limited environment. 3x improvement in service mesh scenario(no trace) in 8C16G VM. Significantly cost less CPU in low payload.\n Support database metrics and SLOW SQL detection. Support to set max size of metadata query. And change default to 5000 from 100. Support ElasticSearch template for new feature in the future. Support shutdown Zipkin trace analysis, because it doesn\u0026rsquo;t fit production environment. Support log type, scope HTTP_ACCESS_LOG and query. No feature provided, prepare for future versions. Support .NET clr receiver. Support Jaeger trace format, no analysis. Support group endpoint name by regax rules in mesh receiver. Support disable statement in OAL. Support basic auth in ElasticSearch connection. Support metrics exporter module and gRPC implementor. Support \u0026gt;, \u0026lt;, \u0026gt;=, \u0026lt;= in OAL. Support role mode in backend. Support Envoy metrics. Support query segment by service instance. Support to set host/port manually at cluster coordinator, rather than based on core settings. Make sure OAP shutdown when it faces startup error. Support set separated gRPC/Jetty ip:port for receiver, default still use core settings. Fix JVM receiver bug. Fix wrong dest service in mesh analysis. Fix search doesn\u0026rsquo;t work as expected. Refactor ScopeDeclaration annotation. Refactor register lock mechanism. Add SmartSql component for .NET Add integration tests for ElasticSearch client. Add test cases for exporter. Add test cases for queue consume.  UI  RocketBot UI has been accepted and bind in this release. Support CLR metrics.  Document  Documents updated, matching Top Level Project requirement. UI licenses updated, according to RocketBot UI IP clearance. User wall and powered-by list updated. CN documents removed, only consider to provide by volunteer out of Apache.  All issues and pull requests are here\n6.0.0-GA Java Agent  Support gson plugin(optional). Support canal plugin. Fix missing ojdbc component id. Fix dubbo plugin conflict. Fix OpenTracing tag match bug. Fix a missing check in ignore plugin.  Backend  Adjust service inventory entity, to add properties. Adjust service instance inventory entity, to add properties. Add nodeType to service inventory entity. Fix when operation name of local and exit spans in ref, the segment lost. Fix the index names don\u0026rsquo;t show right in logs. Fix wrong alarm text. Add test case for span limit mechanism. Add telemetry module and prometheus implementation, with grafana setting. A refactor for register API in storage module. Fix H2 and MySQL endpoint dependency map miss upstream side. Optimize the inventory register and refactor the implementation. Speed up the trace buffer read. Fix and removed unnecessary inventory register operations.  UI  Add new trace view. Add word-break to tag value.  Document  Add two startup modes document. Add PHP agent links. Add some cn documents. Update year to 2019 User wall updated. Fix a wrong description in how-to-build doc.  All issues and pull requests are here\n6.0.0-beta Protocol  Provide Trace Data Protocol v2 Provide SkyWalking Cross Process Propagation Headers Protocol v2.  Java Agent  Support Trace Data Protocol v2 Support SkyWalking Cross Process Propagation Headers Protocol v2. Support SkyWalking Cross Process Propagation Headers Protocol v1 running in compatible way. Need declare open explicitly. Support SpringMVC 5 Support webflux Support a new way to override agent.config by system env. Span tag can override by explicit way. Fix Spring Controller Inherit issue. Fix ElasticSearch plugin NPE. Fix agent classloader dead lock in certain situation. Fix agent log typo. Fix wrong component id in resettemplete plugin. Fix use transform ignore() in wrong way. Fix H2 query bug.  Backend  Support Trace Data Protocol v2. And Trace Data Protocol v1 is still supported. Support MySQL as storage. Support TiDB as storage. Support a new way to override application.yml by system env. Support service instance and endpoint alarm. Support namespace in istio receiver. Support service throughput(cpm), successful rate(sla), avg response time and p99/p95/p90/p75/p50 response time. Support backend trace sampling. Support Zipkin format again. Support init mode. Support namespace in Zookeeper cluster management. Support consul plugin in cluster module. OAL generate tool has been integrated into main repo, in the maven compile stage. Optimize trace paging query. Fix trace query don\u0026rsquo;t use fuzzy query in ElasticSearch storage. Fix alarm can\u0026rsquo;t be active in right way. Fix unnecessary condition in database and cache number query. Fix wrong namespace bug in ElasticSearch storage. Fix Remote clients selector error: / by zero . Fix segment TTL is not working.  UI  Support service throughput(cpm), successful rate(sla), avg response time and p99/p95/p90/p75/p50 response time. Fix TopN endpoint link doesn\u0026rsquo;t work right. Fix trace stack style. Fix CI.  Document  Add more agent setting documents. Add more contribution documents. Update user wall and powered-by page. Add RocketBot UI project link in document.  All issues and pull requests are here\n6.0.0-alpha SkyWalking 6 is totally new milestone for the project. At this point, we are not just a distributing tracing system with analysis and visualization capabilities. We are an Observability Analysis Platform(OAL).\nThe core and most important features in v6 are\n Support to collect telemetry data from different sources, such as multiple language agents and service mesh. Extensible stream analysis core. Make SQL and cache analysis available in core level, although haven\u0026rsquo;t provided in this release. Provide Observability Analysis Language(OAL) to make analysis metrics customization available. New GraphQL query protocol. Not binding with UI now. UI topology is better now. New alarm core provided. In alpha, only on service related metrics.  All issues and pull requests are here\n","title":"6.6.0","url":"/docs/main/v9.2.0/en/changes/changes-6.x/"},{"content":"6.6.0 Project  [IMPORTANT] Local span and exit span are not treated as endpoint detected at client and local. Only entry span is the endpoint. Reduce the load of register and memory cost.   Support MiniKube, Istio and SkyWalking on K8s deployment in CI. Support Windows and MacOS build in GitHub Action CI. Support ElasticSearch 7 in official dist. Hundreds plugin cases have been added in GitHub Action CI process.  Java Agent  Remove the local/exit span operation name register mechanism. Add plugin for JDK Threading classes. Add plugin for Armeria. Support set operation name in async span. Enhance webflux plugin, related to Spring Gateway plugin. Webflux plugin is in optional, due to JDK8 required. Fix a possible deadlock. Fix NPE when OAL scripts are different in different OAP nodes, mostly in upgrading stage. Fix bug about wrong peer in ES plugin. Fix NPE in Spring plugin. Fix wrong class name in Dubbo 2.7 conflict patch. Fix spring annotation inheritance problem.  OAP-Backend  Remove the local/exit span operation name register mechanism. Remove client side endpoint register in service mesh. Service instance dependency and related metrics. Support min func in OAL Support apdex func in OAL Support custom ES config setting at the index level. Envoy ALS proto upgraded. Update JODA lib as bugs in UTC +13/+14. Support topN sample period configurable. Ignore no statement DB operations in slow SQL collection. Fix bug in docker-entrypoint.sh when using MySQL as storage  UI  Service topology enhancement. Dive into service, instance and endpoint metrics on topo map. Service instance dependency view and related metrics. Support using URL parameter in trace query page. Support apdex score in service page. Add service dependency metrics into metrics comparison. Fix alarm search not working.  Document  Update user list and user wall. Add document link for CLI. Add deployment guide of agent in Jetty case. Modify Consul cluster doc. Add document about injecting traceId into the logback with logstack in JSON format. ElementUI license and dependency added.  All issues and pull requests are here\n6.5.0 Project  TTL E2E test (#3437) Test coverage is back in pull request check status (#3503) Plugin tests begin to be migrated into main repo, and is in process. (#3528, #3756, #3751, etc.) Switch to SkyWalking CI (exclusive) nodes (#3546) MySQL storage e2e test. (#3648) E2E tests are verified in multiple jdk versions, jdk 8, 9, 11, 12 (#3657) Jenkins build jobs run only when necessary (#3662)  OAP-Backend  Support dynamically configure alarm settings (#3557) Language of instance could be null (#3485) Make query max window size configurable. (#3765) Remove two max size 500 limit. (#3748) Parameterize the cache size. (#3741) ServiceInstanceRelation set error id (#3683) Makes the scope of alarm message more semantic. (#3680) Add register persistent worker latency metrics (#3677) Fix more reasonable error (#3619) Add GraphQL getServiceInstance instanceUuid field. (#3595) Support namespace in Nacos cluster/configuration (#3578) Instead of datasource-settings.properties, use application.yml for MySQLStorageProvider (#3564) Provide consul dynamic configuration center implementation (#3560) Upgrade guava version to support higher jdk version (#3541) Sync latest als from envoy api (#3507) Set telemetry instanced id for Etcd and Nacos plugin (#3492) Support timeout configuration in agent and backend. (#3491) Make sure the cluster register happens before streaming process. (#3471) Agent supports custom properties. (#3367) Miscellaneous bug fixes (#3567)  UI  Feature: node detail display in topo circle-chart view. BugFix: the jvm-maxheap \u0026amp; jvm-maxnonheap is -1, free is no value Fix bug: time select operation not in effect Fix bug: language initialization failed Fix bug: not show instance language Feature: support the trace list display export png Feature: Metrics comparison view BugFix: Fix dashboard top throughput copy  Java Agent  Spring async scenario optimize (#3723) Support log4j2 AsyncLogger (#3715) Add config to collect PostgreSQL sql query params (#3695) Support namespace in Nacos cluster/configuration (#3578) Provide plugin for ehcache 2.x (#3575) Supporting RequestRateLimiterGatewayFilterFactory (#3538) Kafka-plugin compatible with KafkaTemplate (#3505) Add pulsar apm plugin (#3476) Spring-cloud-gateway traceId does not transmit #3411 (#3446) Gateway compatible with downstream loss (#3445) Provide cassandra java driver 3.x plugin (#3410) Fix SpringMVC4 NoSuchMethodError (#3408) BugFix: endpoint grouping rules may be not unique (#3510) Add feature to control the maximum agent log files (#3475) Agent support custom properties. (#3367) Add Light4j plugin (#3323)  Document  Remove travis badge (#3763) Replace user wall to typical users in readme page (#3719) Update istio docs according latest istio release (#3646) Use chart deploy sw docs (#3573) Reorganize the doc, and provide catalog (#3563) Committer vote and set up document. (#3496) Update als setup doc as istio 1.3 released (#3470) Fill faq reply in official document. (#3450)  All issues and pull requests are here\n6.4.0 Project  Highly recommend to upgrade due to Pxx metrics calculation bug. Make agent working in JDK9+ Module system.  Java Agent  Make agent working in JDK9+ Module system. Support Kafka 2.x client libs. Log error in OKHTTP OnFailure callback. Support injecting traceid into logstack appender in logback. Add OperationName(including endpoint name) length max threshold. Support using Regex to group operation name. Support Undertow routing handler. RestTemplate plugin support operation name grouping. Fix ClassCastException in Webflux plugin. Ordering zookeeper server list, to make it better in topology. Fix a Dubbo plugin incompatible issue. Fix MySQL 5 plugin issue. Make log writer cached. Optimize Spring Cloud Gateway plugin Fix and improve gRPC reconnect mechanism. Remove Disruptor dependency from agent.  Backend  Fix Pxx(p50,p75,p90,p95,p99) metrics func bug.(Critical) Support Gateway in backend analysis, even when it doesn\u0026rsquo;t have suitable language agent. Support using HTTPs SSL accessing ElasticSearch storage. Support Zookeeper ACL. Make alarm records listed in order. Fix Pxx data persistence failure in some cases. Fix some bugs in MySQL storage. Setup slow SQL length threshold. Fix TTL settings is not working as expected. Remove scope-meta file.  UI  Enhance alarm page layout. Support trace tree chart resize. Support trace auto completion when partial traces abandoned somehow. Fix dashboard endpoint slow chart. Add radial chart in topology page. Add trace table mode. Fix topology page bug. Fix calender js bug. Fix \u0026ldquo;The \u0026ldquo;topo-services\u0026rdquo; component did not update the data in time after modifying the time range on the topology page.  Document  Restore the broken Istio setup doc. Add etcd config center document. Correct span_limit_per_segment default value in document. Enhance plugin develop doc. Fix error description in build document.  All issues and pull requests are here\n6.3.0 Project  e2e tests have been added, and verify every pull request. Use ArrayList to replace LinkedList in DataCarrier for much better performance. Add plugin instrumentation definition check in CI. DataCarrier performance improvement by avoiding false-sharing.  Java Agent  Java agent supports JDK 9 - 12, but don\u0026rsquo;t support Java Module yet. Support JVM class auto instrumentation, cataloged as bootstrap plugin. Support JVM HttpClient and HttpsClient plugin.[Optional] Support backend upgrade without rebooting required. Open Redefine and Retransform by other agents. Support Servlet 2.5 in Jetty, Tomcat and SpringMVC plugins. Support Spring @Async plugin. Add new config item to restrict the length of span#peer. Refactor ContextManager#stopSpan. Add gRPC timeout. Support Logback AsyncAppender print tid Fix gRPC reconnect bug. Fix trace segment service doesn\u0026rsquo;t report onComplete. Fix wrong logger class name. Fix gRPC plugin bug. Fix ContextManager.activeSpan() API usage error.  Backend  Support agent reset command downstream when the storage is erased, mostly because of backend upgrade. Backend stream flow refactor. High dimensionality metrics(Hour/Day/Month) are changed to lower priority, to ease the storage payload. Add OAP metrics cache to ease the storage query payload and improve performance. Remove DataCarrier in trace persistent of ElasticSearch storage, by leveraging the elasticsearch bulk queue. OAP internal communication protocol changed. Don\u0026rsquo;t be compatible with old releases. Improve ElasticSearch storage bulk performance. Support etcd as dynamic configuration center. Simplify the PxxMetrics and ThermodynamicMetrics functions for better performance and GC. Support JVM metrics self observability. Add the new OAL runtime engine. Add gRPC timeout. Add Charset in the alarm web hook. Fix buffer lost. Fix dirty read in ElasticSearch storage. Fix bug of cluster management plugins in un-Mixed mode. Fix wrong logger class name. Fix delete bug in ElasticSearch when using namespace. Fix MySQL TTL failure. Totally remove IDs can't be null log, to avoid misleading. Fix provider has been initialized repeatedly. Adjust providers conflict log message. Fix using wrong gc time metrics in OAL.  UI  Fix refresh is not working after endpoint and instance changed. Fix endpoint selector but. Fix wrong copy value in slow traces. Fix can\u0026rsquo;t show trace when it is broken partially(Because of agent sampling or fail safe). Fix database and response time graph bugs.  Document  Add bootstrap plugin development document. Alarm documentation typo fixed. Clarify the Docker file purpose. Fix a license typo.  All issues and pull requests are here\n6.2.0 Project  ElasticSearch implementation performance improved, and CHANGED totally. Must delete all existing indexes to do upgrade. CI and Integration tests provided by ASF INFRA. Plan to enhance tests including e2e, plugin tests in all pull requests, powered by ASF INFRA. DataCarrier queue write index controller performance improvement. 3-5 times quicker than before. Add windows compile support in CI.  Java Agent  Support collect SQL parameter in MySQL plugin.[Optional] Support SolrJ plugin. Support RESTEasy plugin. Support Spring Gateway plugin for 2.1.x[Optional] TracingContext performance improvement. Support Apache ShardingSphere(incubating) plugin. Support span#error in application toolkit. Fix OOM by empty stack of exception. FIx wrong cause exception of stack in span log. Fix unclear the running context in SpringMVC plugin. Fix CPU usage accessor calculation issue. Fix SpringMVC plugin span not stop bug when doing HTTP forward. Fix lettuce plugin async commend bug and NPE. Fix webflux plugin cast exception. [CI]Support import check.  Backend  Support time serious ElasticSearch storage. Provide dynamic configuration module and implementation. Slow SQL threshold supports dynamic config today. Dynamic Configuration module provide multiple implementations, DCS(gRPC based), Zookeeper, Apollo, Nacos. Provide P99/95/90/75/50 charts in topology edge. New topology query protocol and implementation. Support Envoy ALS in Service Mesh scenario. Support Nacos cluster management. Enhance metric exporter. Run in increment and total modes. Fix module provider is loaded repeatedly. Change TOP slow SQL storage in ES to Text from Keyword, as too long text issue. Fix H2TopologyQuery tiny bug. Fix H2 log query bug.(No feature provided yet) Filtering pods not in \u0026lsquo;Running\u0026rsquo; phase in mesh scenario. Fix query alarm bug in MySQL and H2 storage. Codes refactor.  UI  Fix some ID is null query(s). Page refactor, especially time-picker, more friendly. Login removed. Trace timestamp visualization issue fixed. Provide P99/95/90/75/50 charts in topology edge. Change all P99/95/90/75/50 charts style. More readable. Fix 404 in trace page.  Document  Go2Sky project has been donated to SkyAPM, change document link. Add FAQ for ElasticSearch storage, and links from document. Add FAQ fro WebSphere installation. Add several open users. Add alarm webhook document.  All issues and pull requests are here\n6.1.0 Project SkyWalking graduated as Apache Top Level Project.\n Support compiling project agent, backend, UI separately.  Java Agent  Support Vert.x Core 3.x plugin. Support Apache Dubbo plugin. Support use_qualified_name_as_endpoint_name and use_qualified_name_as_operation_name configs in SpringMVC plugin. Support span async close APIs in core. Used in Vert.x plugin. Support MySQL 5,8 plugins. Support set instance id manually(optional). Support customize enhance trace plugin in optional list. Support to set peer in Entry Span. Support Zookeeper plugin. Fix Webflux plugin created unexpected Entry Span. Fix Kafka plugin NPE in Kafka 1.1+ Fix wrong operation name in postgre 8.x plugin. Fix RabbitMQ plugin NPE. Fix agent can\u0026rsquo;t run in JVM 6/7, remove module-info.class. Fix agent can\u0026rsquo;t work well, if there is whitespace in agent path. Fix Spring annotation bug and inheritance enhance issue. Fix CPU accessor bug.  Backend Performance improved, especially in CPU limited environment. 3x improvement in service mesh scenario(no trace) in 8C16G VM. Significantly cost less CPU in low payload.\n Support database metrics and SLOW SQL detection. Support to set max size of metadata query. And change default to 5000 from 100. Support ElasticSearch template for new feature in the future. Support shutdown Zipkin trace analysis, because it doesn\u0026rsquo;t fit production environment. Support log type, scope HTTP_ACCESS_LOG and query. No feature provided, prepare for future versions. Support .NET clr receiver. Support Jaeger trace format, no analysis. Support group endpoint name by regax rules in mesh receiver. Support disable statement in OAL. Support basic auth in ElasticSearch connection. Support metrics exporter module and gRPC implementor. Support \u0026gt;, \u0026lt;, \u0026gt;=, \u0026lt;= in OAL. Support role mode in backend. Support Envoy metrics. Support query segment by service instance. Support to set host/port manually at cluster coordinator, rather than based on core settings. Make sure OAP shutdown when it faces startup error. Support set separated gRPC/Jetty ip:port for receiver, default still use core settings. Fix JVM receiver bug. Fix wrong dest service in mesh analysis. Fix search doesn\u0026rsquo;t work as expected. Refactor ScopeDeclaration annotation. Refactor register lock mechanism. Add SmartSql component for .NET Add integration tests for ElasticSearch client. Add test cases for exporter. Add test cases for queue consume.  UI  RocketBot UI has been accepted and bind in this release. Support CLR metrics.  Document  Documents updated, matching Top Level Project requirement. UI licenses updated, according to RocketBot UI IP clearance. User wall and powered-by list updated. CN documents removed, only consider to provide by volunteer out of Apache.  All issues and pull requests are here\n6.0.0-GA Java Agent  Support gson plugin(optional). Support canal plugin. Fix missing ojdbc component id. Fix dubbo plugin conflict. Fix OpenTracing tag match bug. Fix a missing check in ignore plugin.  Backend  Adjust service inventory entity, to add properties. Adjust service instance inventory entity, to add properties. Add nodeType to service inventory entity. Fix when operation name of local and exit spans in ref, the segment lost. Fix the index names don\u0026rsquo;t show right in logs. Fix wrong alarm text. Add test case for span limit mechanism. Add telemetry module and prometheus implementation, with grafana setting. A refactor for register API in storage module. Fix H2 and MySQL endpoint dependency map miss upstream side. Optimize the inventory register and refactor the implementation. Speed up the trace buffer read. Fix and removed unnecessary inventory register operations.  UI  Add new trace view. Add word-break to tag value.  Document  Add two startup modes document. Add PHP agent links. Add some cn documents. Update year to 2019 User wall updated. Fix a wrong description in how-to-build doc.  All issues and pull requests are here\n6.0.0-beta Protocol  Provide Trace Data Protocol v2 Provide SkyWalking Cross Process Propagation Headers Protocol v2.  Java Agent  Support Trace Data Protocol v2 Support SkyWalking Cross Process Propagation Headers Protocol v2. Support SkyWalking Cross Process Propagation Headers Protocol v1 running in compatible way. Need declare open explicitly. Support SpringMVC 5 Support webflux Support a new way to override agent.config by system env. Span tag can override by explicit way. Fix Spring Controller Inherit issue. Fix ElasticSearch plugin NPE. Fix agent classloader dead lock in certain situation. Fix agent log typo. Fix wrong component id in resettemplete plugin. Fix use transform ignore() in wrong way. Fix H2 query bug.  Backend  Support Trace Data Protocol v2. And Trace Data Protocol v1 is still supported. Support MySQL as storage. Support TiDB as storage. Support a new way to override application.yml by system env. Support service instance and endpoint alarm. Support namespace in istio receiver. Support service throughput(cpm), successful rate(sla), avg response time and p99/p95/p90/p75/p50 response time. Support backend trace sampling. Support Zipkin format again. Support init mode. Support namespace in Zookeeper cluster management. Support consul plugin in cluster module. OAL generate tool has been integrated into main repo, in the maven compile stage. Optimize trace paging query. Fix trace query don\u0026rsquo;t use fuzzy query in ElasticSearch storage. Fix alarm can\u0026rsquo;t be active in right way. Fix unnecessary condition in database and cache number query. Fix wrong namespace bug in ElasticSearch storage. Fix Remote clients selector error: / by zero . Fix segment TTL is not working.  UI  Support service throughput(cpm), successful rate(sla), avg response time and p99/p95/p90/p75/p50 response time. Fix TopN endpoint link doesn\u0026rsquo;t work right. Fix trace stack style. Fix CI.  Document  Add more agent setting documents. Add more contribution documents. Update user wall and powered-by page. Add RocketBot UI project link in document.  All issues and pull requests are here\n6.0.0-alpha SkyWalking 6 is totally new milestone for the project. At this point, we are not just a distributing tracing system with analysis and visualization capabilities. We are an Observability Analysis Platform(OAL).\nThe core and most important features in v6 are\n Support to collect telemetry data from different sources, such as multiple language agents and service mesh. Extensible stream analysis core. Make SQL and cache analysis available in core level, although haven\u0026rsquo;t provided in this release. Provide Observability Analysis Language(OAL) to make analysis metrics customization available. New GraphQL query protocol. Not binding with UI now. UI topology is better now. New alarm core provided. In alpha, only on service related metrics.  All issues and pull requests are here\n","title":"6.6.0","url":"/docs/main/v9.3.0/en/changes/changes-6.x/"},{"content":"6.6.0 Project  [IMPORTANT] Local span and exit span are not treated as endpoint detected at client and local. Only entry span is the endpoint. Reduce the load of register and memory cost.   Support MiniKube, Istio and SkyWalking on K8s deployment in CI. Support Windows and MacOS build in GitHub Action CI. Support ElasticSearch 7 in official dist. Hundreds plugin cases have been added in GitHub Action CI process.  Java Agent  Remove the local/exit span operation name register mechanism. Add plugin for JDK Threading classes. Add plugin for Armeria. Support set operation name in async span. Enhance webflux plugin, related to Spring Gateway plugin. Webflux plugin is in optional, due to JDK8 required. Fix a possible deadlock. Fix NPE when OAL scripts are different in different OAP nodes, mostly in upgrading stage. Fix bug about wrong peer in ES plugin. Fix NPE in Spring plugin. Fix wrong class name in Dubbo 2.7 conflict patch. Fix spring annotation inheritance problem.  OAP-Backend  Remove the local/exit span operation name register mechanism. Remove client side endpoint register in service mesh. Service instance dependency and related metrics. Support min func in OAL Support apdex func in OAL Support custom ES config setting at the index level. Envoy ALS proto upgraded. Update JODA lib as bugs in UTC +13/+14. Support topN sample period configurable. Ignore no statement DB operations in slow SQL collection. Fix bug in docker-entrypoint.sh when using MySQL as storage  UI  Service topology enhancement. Dive into service, instance and endpoint metrics on topo map. Service instance dependency view and related metrics. Support using URL parameter in trace query page. Support apdex score in service page. Add service dependency metrics into metrics comparison. Fix alarm search not working.  Document  Update user list and user wall. Add document link for CLI. Add deployment guide of agent in Jetty case. Modify Consul cluster doc. Add document about injecting traceId into the logback with logstack in JSON format. ElementUI license and dependency added.  All issues and pull requests are here\n6.5.0 Project  TTL E2E test (#3437) Test coverage is back in pull request check status (#3503) Plugin tests begin to be migrated into main repo, and is in process. (#3528, #3756, #3751, etc.) Switch to SkyWalking CI (exclusive) nodes (#3546) MySQL storage e2e test. (#3648) E2E tests are verified in multiple jdk versions, jdk 8, 9, 11, 12 (#3657) Jenkins build jobs run only when necessary (#3662)  OAP-Backend  Support dynamically configure alarm settings (#3557) Language of instance could be null (#3485) Make query max window size configurable. (#3765) Remove two max size 500 limit. (#3748) Parameterize the cache size. (#3741) ServiceInstanceRelation set error id (#3683) Makes the scope of alarm message more semantic. (#3680) Add register persistent worker latency metrics (#3677) Fix more reasonable error (#3619) Add GraphQL getServiceInstance instanceUuid field. (#3595) Support namespace in Nacos cluster/configuration (#3578) Instead of datasource-settings.properties, use application.yml for MySQLStorageProvider (#3564) Provide consul dynamic configuration center implementation (#3560) Upgrade guava version to support higher jdk version (#3541) Sync latest als from envoy api (#3507) Set telemetry instanced id for Etcd and Nacos plugin (#3492) Support timeout configuration in agent and backend. (#3491) Make sure the cluster register happens before streaming process. (#3471) Agent supports custom properties. (#3367) Miscellaneous bug fixes (#3567)  UI  Feature: node detail display in topo circle-chart view. BugFix: the jvm-maxheap \u0026amp; jvm-maxnonheap is -1, free is no value Fix bug: time select operation not in effect Fix bug: language initialization failed Fix bug: not show instance language Feature: support the trace list display export png Feature: Metrics comparison view BugFix: Fix dashboard top throughput copy  Java Agent  Spring async scenario optimize (#3723) Support log4j2 AsyncLogger (#3715) Add config to collect PostgreSQL sql query params (#3695) Support namespace in Nacos cluster/configuration (#3578) Provide plugin for ehcache 2.x (#3575) Supporting RequestRateLimiterGatewayFilterFactory (#3538) Kafka-plugin compatible with KafkaTemplate (#3505) Add pulsar apm plugin (#3476) Spring-cloud-gateway traceId does not transmit #3411 (#3446) Gateway compatible with downstream loss (#3445) Provide cassandra java driver 3.x plugin (#3410) Fix SpringMVC4 NoSuchMethodError (#3408) BugFix: endpoint grouping rules may be not unique (#3510) Add feature to control the maximum agent log files (#3475) Agent support custom properties. (#3367) Add Light4j plugin (#3323)  Document  Remove travis badge (#3763) Replace user wall to typical users in readme page (#3719) Update istio docs according latest istio release (#3646) Use chart deploy sw docs (#3573) Reorganize the doc, and provide catalog (#3563) Committer vote and set up document. (#3496) Update als setup doc as istio 1.3 released (#3470) Fill faq reply in official document. (#3450)  All issues and pull requests are here\n6.4.0 Project  Highly recommend to upgrade due to Pxx metrics calculation bug. Make agent working in JDK9+ Module system.  Java Agent  Make agent working in JDK9+ Module system. Support Kafka 2.x client libs. Log error in OKHTTP OnFailure callback. Support injecting traceid into logstack appender in logback. Add OperationName(including endpoint name) length max threshold. Support using Regex to group operation name. Support Undertow routing handler. RestTemplate plugin support operation name grouping. Fix ClassCastException in Webflux plugin. Ordering zookeeper server list, to make it better in topology. Fix a Dubbo plugin incompatible issue. Fix MySQL 5 plugin issue. Make log writer cached. Optimize Spring Cloud Gateway plugin Fix and improve gRPC reconnect mechanism. Remove Disruptor dependency from agent.  Backend  Fix Pxx(p50,p75,p90,p95,p99) metrics func bug.(Critical) Support Gateway in backend analysis, even when it doesn\u0026rsquo;t have suitable language agent. Support using HTTPs SSL accessing ElasticSearch storage. Support Zookeeper ACL. Make alarm records listed in order. Fix Pxx data persistence failure in some cases. Fix some bugs in MySQL storage. Setup slow SQL length threshold. Fix TTL settings is not working as expected. Remove scope-meta file.  UI  Enhance alarm page layout. Support trace tree chart resize. Support trace auto completion when partial traces abandoned somehow. Fix dashboard endpoint slow chart. Add radial chart in topology page. Add trace table mode. Fix topology page bug. Fix calender js bug. Fix \u0026ldquo;The \u0026ldquo;topo-services\u0026rdquo; component did not update the data in time after modifying the time range on the topology page.  Document  Restore the broken Istio setup doc. Add etcd config center document. Correct span_limit_per_segment default value in document. Enhance plugin develop doc. Fix error description in build document.  All issues and pull requests are here\n6.3.0 Project  e2e tests have been added, and verify every pull request. Use ArrayList to replace LinkedList in DataCarrier for much better performance. Add plugin instrumentation definition check in CI. DataCarrier performance improvement by avoiding false-sharing.  Java Agent  Java agent supports JDK 9 - 12, but don\u0026rsquo;t support Java Module yet. Support JVM class auto instrumentation, cataloged as bootstrap plugin. Support JVM HttpClient and HttpsClient plugin.[Optional] Support backend upgrade without rebooting required. Open Redefine and Retransform by other agents. Support Servlet 2.5 in Jetty, Tomcat and SpringMVC plugins. Support Spring @Async plugin. Add new config item to restrict the length of span#peer. Refactor ContextManager#stopSpan. Add gRPC timeout. Support Logback AsyncAppender print tid Fix gRPC reconnect bug. Fix trace segment service doesn\u0026rsquo;t report onComplete. Fix wrong logger class name. Fix gRPC plugin bug. Fix ContextManager.activeSpan() API usage error.  Backend  Support agent reset command downstream when the storage is erased, mostly because of backend upgrade. Backend stream flow refactor. High dimensionality metrics(Hour/Day/Month) are changed to lower priority, to ease the storage payload. Add OAP metrics cache to ease the storage query payload and improve performance. Remove DataCarrier in trace persistent of ElasticSearch storage, by leveraging the elasticsearch bulk queue. OAP internal communication protocol changed. Don\u0026rsquo;t be compatible with old releases. Improve ElasticSearch storage bulk performance. Support etcd as dynamic configuration center. Simplify the PxxMetrics and ThermodynamicMetrics functions for better performance and GC. Support JVM metrics self observability. Add the new OAL runtime engine. Add gRPC timeout. Add Charset in the alarm web hook. Fix buffer lost. Fix dirty read in ElasticSearch storage. Fix bug of cluster management plugins in un-Mixed mode. Fix wrong logger class name. Fix delete bug in ElasticSearch when using namespace. Fix MySQL TTL failure. Totally remove IDs can't be null log, to avoid misleading. Fix provider has been initialized repeatedly. Adjust providers conflict log message. Fix using wrong gc time metrics in OAL.  UI  Fix refresh is not working after endpoint and instance changed. Fix endpoint selector but. Fix wrong copy value in slow traces. Fix can\u0026rsquo;t show trace when it is broken partially(Because of agent sampling or fail safe). Fix database and response time graph bugs.  Document  Add bootstrap plugin development document. Alarm documentation typo fixed. Clarify the Docker file purpose. Fix a license typo.  All issues and pull requests are here\n6.2.0 Project  ElasticSearch implementation performance improved, and CHANGED totally. Must delete all existing indexes to do upgrade. CI and Integration tests provided by ASF INFRA. Plan to enhance tests including e2e, plugin tests in all pull requests, powered by ASF INFRA. DataCarrier queue write index controller performance improvement. 3-5 times quicker than before. Add windows compile support in CI.  Java Agent  Support collect SQL parameter in MySQL plugin.[Optional] Support SolrJ plugin. Support RESTEasy plugin. Support Spring Gateway plugin for 2.1.x[Optional] TracingContext performance improvement. Support Apache ShardingSphere(incubating) plugin. Support span#error in application toolkit. Fix OOM by empty stack of exception. FIx wrong cause exception of stack in span log. Fix unclear the running context in SpringMVC plugin. Fix CPU usage accessor calculation issue. Fix SpringMVC plugin span not stop bug when doing HTTP forward. Fix lettuce plugin async commend bug and NPE. Fix webflux plugin cast exception. [CI]Support import check.  Backend  Support time serious ElasticSearch storage. Provide dynamic configuration module and implementation. Slow SQL threshold supports dynamic config today. Dynamic Configuration module provide multiple implementations, DCS(gRPC based), Zookeeper, Apollo, Nacos. Provide P99/95/90/75/50 charts in topology edge. New topology query protocol and implementation. Support Envoy ALS in Service Mesh scenario. Support Nacos cluster management. Enhance metric exporter. Run in increment and total modes. Fix module provider is loaded repeatedly. Change TOP slow SQL storage in ES to Text from Keyword, as too long text issue. Fix H2TopologyQuery tiny bug. Fix H2 log query bug.(No feature provided yet) Filtering pods not in \u0026lsquo;Running\u0026rsquo; phase in mesh scenario. Fix query alarm bug in MySQL and H2 storage. Codes refactor.  UI  Fix some ID is null query(s). Page refactor, especially time-picker, more friendly. Login removed. Trace timestamp visualization issue fixed. Provide P99/95/90/75/50 charts in topology edge. Change all P99/95/90/75/50 charts style. More readable. Fix 404 in trace page.  Document  Go2Sky project has been donated to SkyAPM, change document link. Add FAQ for ElasticSearch storage, and links from document. Add FAQ fro WebSphere installation. Add several open users. Add alarm webhook document.  All issues and pull requests are here\n6.1.0 Project SkyWalking graduated as Apache Top Level Project.\n Support compiling project agent, backend, UI separately.  Java Agent  Support Vert.x Core 3.x plugin. Support Apache Dubbo plugin. Support use_qualified_name_as_endpoint_name and use_qualified_name_as_operation_name configs in SpringMVC plugin. Support span async close APIs in core. Used in Vert.x plugin. Support MySQL 5,8 plugins. Support set instance id manually(optional). Support customize enhance trace plugin in optional list. Support to set peer in Entry Span. Support Zookeeper plugin. Fix Webflux plugin created unexpected Entry Span. Fix Kafka plugin NPE in Kafka 1.1+ Fix wrong operation name in postgre 8.x plugin. Fix RabbitMQ plugin NPE. Fix agent can\u0026rsquo;t run in JVM 6/7, remove module-info.class. Fix agent can\u0026rsquo;t work well, if there is whitespace in agent path. Fix Spring annotation bug and inheritance enhance issue. Fix CPU accessor bug.  Backend Performance improved, especially in CPU limited environment. 3x improvement in service mesh scenario(no trace) in 8C16G VM. Significantly cost less CPU in low payload.\n Support database metrics and SLOW SQL detection. Support to set max size of metadata query. And change default to 5000 from 100. Support ElasticSearch template for new feature in the future. Support shutdown Zipkin trace analysis, because it doesn\u0026rsquo;t fit production environment. Support log type, scope HTTP_ACCESS_LOG and query. No feature provided, prepare for future versions. Support .NET clr receiver. Support Jaeger trace format, no analysis. Support group endpoint name by regax rules in mesh receiver. Support disable statement in OAL. Support basic auth in ElasticSearch connection. Support metrics exporter module and gRPC implementor. Support \u0026gt;, \u0026lt;, \u0026gt;=, \u0026lt;= in OAL. Support role mode in backend. Support Envoy metrics. Support query segment by service instance. Support to set host/port manually at cluster coordinator, rather than based on core settings. Make sure OAP shutdown when it faces startup error. Support set separated gRPC/Jetty ip:port for receiver, default still use core settings. Fix JVM receiver bug. Fix wrong dest service in mesh analysis. Fix search doesn\u0026rsquo;t work as expected. Refactor ScopeDeclaration annotation. Refactor register lock mechanism. Add SmartSql component for .NET Add integration tests for ElasticSearch client. Add test cases for exporter. Add test cases for queue consume.  UI  RocketBot UI has been accepted and bind in this release. Support CLR metrics.  Document  Documents updated, matching Top Level Project requirement. UI licenses updated, according to RocketBot UI IP clearance. User wall and powered-by list updated. CN documents removed, only consider to provide by volunteer out of Apache.  All issues and pull requests are here\n6.0.0-GA Java Agent  Support gson plugin(optional). Support canal plugin. Fix missing ojdbc component id. Fix dubbo plugin conflict. Fix OpenTracing tag match bug. Fix a missing check in ignore plugin.  Backend  Adjust service inventory entity, to add properties. Adjust service instance inventory entity, to add properties. Add nodeType to service inventory entity. Fix when operation name of local and exit spans in ref, the segment lost. Fix the index names don\u0026rsquo;t show right in logs. Fix wrong alarm text. Add test case for span limit mechanism. Add telemetry module and prometheus implementation, with grafana setting. A refactor for register API in storage module. Fix H2 and MySQL endpoint dependency map miss upstream side. Optimize the inventory register and refactor the implementation. Speed up the trace buffer read. Fix and removed unnecessary inventory register operations.  UI  Add new trace view. Add word-break to tag value.  Document  Add two startup modes document. Add PHP agent links. Add some cn documents. Update year to 2019 User wall updated. Fix a wrong description in how-to-build doc.  All issues and pull requests are here\n6.0.0-beta Protocol  Provide Trace Data Protocol v2 Provide SkyWalking Cross Process Propagation Headers Protocol v2.  Java Agent  Support Trace Data Protocol v2 Support SkyWalking Cross Process Propagation Headers Protocol v2. Support SkyWalking Cross Process Propagation Headers Protocol v1 running in compatible way. Need declare open explicitly. Support SpringMVC 5 Support webflux Support a new way to override agent.config by system env. Span tag can override by explicit way. Fix Spring Controller Inherit issue. Fix ElasticSearch plugin NPE. Fix agent classloader dead lock in certain situation. Fix agent log typo. Fix wrong component id in resettemplete plugin. Fix use transform ignore() in wrong way. Fix H2 query bug.  Backend  Support Trace Data Protocol v2. And Trace Data Protocol v1 is still supported. Support MySQL as storage. Support TiDB as storage. Support a new way to override application.yml by system env. Support service instance and endpoint alarm. Support namespace in istio receiver. Support service throughput(cpm), successful rate(sla), avg response time and p99/p95/p90/p75/p50 response time. Support backend trace sampling. Support Zipkin format again. Support init mode. Support namespace in Zookeeper cluster management. Support consul plugin in cluster module. OAL generate tool has been integrated into main repo, in the maven compile stage. Optimize trace paging query. Fix trace query don\u0026rsquo;t use fuzzy query in ElasticSearch storage. Fix alarm can\u0026rsquo;t be active in right way. Fix unnecessary condition in database and cache number query. Fix wrong namespace bug in ElasticSearch storage. Fix Remote clients selector error: / by zero . Fix segment TTL is not working.  UI  Support service throughput(cpm), successful rate(sla), avg response time and p99/p95/p90/p75/p50 response time. Fix TopN endpoint link doesn\u0026rsquo;t work right. Fix trace stack style. Fix CI.  Document  Add more agent setting documents. Add more contribution documents. Update user wall and powered-by page. Add RocketBot UI project link in document.  All issues and pull requests are here\n6.0.0-alpha SkyWalking 6 is totally new milestone for the project. At this point, we are not just a distributing tracing system with analysis and visualization capabilities. We are an Observability Analysis Platform(OAL).\nThe core and most important features in v6 are\n Support to collect telemetry data from different sources, such as multiple language agents and service mesh. Extensible stream analysis core. Make SQL and cache analysis available in core level, although haven\u0026rsquo;t provided in this release. Provide Observability Analysis Language(OAL) to make analysis metrics customization available. New GraphQL query protocol. Not binding with UI now. UI topology is better now. New alarm core provided. In alpha, only on service related metrics.  All issues and pull requests are here\n","title":"6.6.0","url":"/docs/main/v9.4.0/en/changes/changes-6.x/"},{"content":"6.6.0 Project  [IMPORTANT] Local span and exit span are not treated as endpoint detected at client and local. Only entry span is the endpoint. Reduce the load of register and memory cost.   Support MiniKube, Istio and SkyWalking on K8s deployment in CI. Support Windows and MacOS build in GitHub Action CI. Support ElasticSearch 7 in official dist. Hundreds plugin cases have been added in GitHub Action CI process.  Java Agent  Remove the local/exit span operation name register mechanism. Add plugin for JDK Threading classes. Add plugin for Armeria. Support set operation name in async span. Enhance webflux plugin, related to Spring Gateway plugin. Webflux plugin is in optional, due to JDK8 required. Fix a possible deadlock. Fix NPE when OAL scripts are different in different OAP nodes, mostly in upgrading stage. Fix bug about wrong peer in ES plugin. Fix NPE in Spring plugin. Fix wrong class name in Dubbo 2.7 conflict patch. Fix spring annotation inheritance problem.  OAP-Backend  Remove the local/exit span operation name register mechanism. Remove client side endpoint register in service mesh. Service instance dependency and related metrics. Support min func in OAL Support apdex func in OAL Support custom ES config setting at the index level. Envoy ALS proto upgraded. Update JODA lib as bugs in UTC +13/+14. Support topN sample period configurable. Ignore no statement DB operations in slow SQL collection. Fix bug in docker-entrypoint.sh when using MySQL as storage  UI  Service topology enhancement. Dive into service, instance and endpoint metrics on topo map. Service instance dependency view and related metrics. Support using URL parameter in trace query page. Support apdex score in service page. Add service dependency metrics into metrics comparison. Fix alarm search not working.  Document  Update user list and user wall. Add document link for CLI. Add deployment guide of agent in Jetty case. Modify Consul cluster doc. Add document about injecting traceId into the logback with logstack in JSON format. ElementUI license and dependency added.  All issues and pull requests are here\n6.5.0 Project  TTL E2E test (#3437) Test coverage is back in pull request check status (#3503) Plugin tests begin to be migrated into main repo, and is in process. (#3528, #3756, #3751, etc.) Switch to SkyWalking CI (exclusive) nodes (#3546) MySQL storage e2e test. (#3648) E2E tests are verified in multiple jdk versions, jdk 8, 9, 11, 12 (#3657) Jenkins build jobs run only when necessary (#3662)  OAP-Backend  Support dynamically configure alarm settings (#3557) Language of instance could be null (#3485) Make query max window size configurable. (#3765) Remove two max size 500 limit. (#3748) Parameterize the cache size. (#3741) ServiceInstanceRelation set error id (#3683) Makes the scope of alarm message more semantic. (#3680) Add register persistent worker latency metrics (#3677) Fix more reasonable error (#3619) Add GraphQL getServiceInstance instanceUuid field. (#3595) Support namespace in Nacos cluster/configuration (#3578) Instead of datasource-settings.properties, use application.yml for MySQLStorageProvider (#3564) Provide consul dynamic configuration center implementation (#3560) Upgrade guava version to support higher jdk version (#3541) Sync latest als from envoy api (#3507) Set telemetry instanced id for Etcd and Nacos plugin (#3492) Support timeout configuration in agent and backend. (#3491) Make sure the cluster register happens before streaming process. (#3471) Agent supports custom properties. (#3367) Miscellaneous bug fixes (#3567)  UI  Feature: node detail display in topo circle-chart view. BugFix: the jvm-maxheap \u0026amp; jvm-maxnonheap is -1, free is no value Fix bug: time select operation not in effect Fix bug: language initialization failed Fix bug: not show instance language Feature: support the trace list display export png Feature: Metrics comparison view BugFix: Fix dashboard top throughput copy  Java Agent  Spring async scenario optimize (#3723) Support log4j2 AsyncLogger (#3715) Add config to collect PostgreSQL sql query params (#3695) Support namespace in Nacos cluster/configuration (#3578) Provide plugin for ehcache 2.x (#3575) Supporting RequestRateLimiterGatewayFilterFactory (#3538) Kafka-plugin compatible with KafkaTemplate (#3505) Add pulsar apm plugin (#3476) Spring-cloud-gateway traceId does not transmit #3411 (#3446) Gateway compatible with downstream loss (#3445) Provide cassandra java driver 3.x plugin (#3410) Fix SpringMVC4 NoSuchMethodError (#3408) BugFix: endpoint grouping rules may be not unique (#3510) Add feature to control the maximum agent log files (#3475) Agent support custom properties. (#3367) Add Light4j plugin (#3323)  Document  Remove travis badge (#3763) Replace user wall to typical users in readme page (#3719) Update istio docs according latest istio release (#3646) Use chart deploy sw docs (#3573) Reorganize the doc, and provide catalog (#3563) Committer vote and set up document. (#3496) Update als setup doc as istio 1.3 released (#3470) Fill faq reply in official document. (#3450)  All issues and pull requests are here\n6.4.0 Project  Highly recommend to upgrade due to Pxx metrics calculation bug. Make agent working in JDK9+ Module system.  Java Agent  Make agent working in JDK9+ Module system. Support Kafka 2.x client libs. Log error in OKHTTP OnFailure callback. Support injecting traceid into logstack appender in logback. Add OperationName(including endpoint name) length max threshold. Support using Regex to group operation name. Support Undertow routing handler. RestTemplate plugin support operation name grouping. Fix ClassCastException in Webflux plugin. Ordering zookeeper server list, to make it better in topology. Fix a Dubbo plugin incompatible issue. Fix MySQL 5 plugin issue. Make log writer cached. Optimize Spring Cloud Gateway plugin Fix and improve gRPC reconnect mechanism. Remove Disruptor dependency from agent.  Backend  Fix Pxx(p50,p75,p90,p95,p99) metrics func bug.(Critical) Support Gateway in backend analysis, even when it doesn\u0026rsquo;t have suitable language agent. Support using HTTPs SSL accessing ElasticSearch storage. Support Zookeeper ACL. Make alarm records listed in order. Fix Pxx data persistence failure in some cases. Fix some bugs in MySQL storage. Setup slow SQL length threshold. Fix TTL settings is not working as expected. Remove scope-meta file.  UI  Enhance alarm page layout. Support trace tree chart resize. Support trace auto completion when partial traces abandoned somehow. Fix dashboard endpoint slow chart. Add radial chart in topology page. Add trace table mode. Fix topology page bug. Fix calender js bug. Fix \u0026ldquo;The \u0026ldquo;topo-services\u0026rdquo; component did not update the data in time after modifying the time range on the topology page.  Document  Restore the broken Istio setup doc. Add etcd config center document. Correct span_limit_per_segment default value in document. Enhance plugin develop doc. Fix error description in build document.  All issues and pull requests are here\n6.3.0 Project  e2e tests have been added, and verify every pull request. Use ArrayList to replace LinkedList in DataCarrier for much better performance. Add plugin instrumentation definition check in CI. DataCarrier performance improvement by avoiding false-sharing.  Java Agent  Java agent supports JDK 9 - 12, but don\u0026rsquo;t support Java Module yet. Support JVM class auto instrumentation, cataloged as bootstrap plugin. Support JVM HttpClient and HttpsClient plugin.[Optional] Support backend upgrade without rebooting required. Open Redefine and Retransform by other agents. Support Servlet 2.5 in Jetty, Tomcat and SpringMVC plugins. Support Spring @Async plugin. Add new config item to restrict the length of span#peer. Refactor ContextManager#stopSpan. Add gRPC timeout. Support Logback AsyncAppender print tid Fix gRPC reconnect bug. Fix trace segment service doesn\u0026rsquo;t report onComplete. Fix wrong logger class name. Fix gRPC plugin bug. Fix ContextManager.activeSpan() API usage error.  Backend  Support agent reset command downstream when the storage is erased, mostly because of backend upgrade. Backend stream flow refactor. High dimensionality metrics(Hour/Day/Month) are changed to lower priority, to ease the storage payload. Add OAP metrics cache to ease the storage query payload and improve performance. Remove DataCarrier in trace persistent of ElasticSearch storage, by leveraging the elasticsearch bulk queue. OAP internal communication protocol changed. Don\u0026rsquo;t be compatible with old releases. Improve ElasticSearch storage bulk performance. Support etcd as dynamic configuration center. Simplify the PxxMetrics and ThermodynamicMetrics functions for better performance and GC. Support JVM metrics self observability. Add the new OAL runtime engine. Add gRPC timeout. Add Charset in the alarm web hook. Fix buffer lost. Fix dirty read in ElasticSearch storage. Fix bug of cluster management plugins in un-Mixed mode. Fix wrong logger class name. Fix delete bug in ElasticSearch when using namespace. Fix MySQL TTL failure. Totally remove IDs can't be null log, to avoid misleading. Fix provider has been initialized repeatedly. Adjust providers conflict log message. Fix using wrong gc time metrics in OAL.  UI  Fix refresh is not working after endpoint and instance changed. Fix endpoint selector but. Fix wrong copy value in slow traces. Fix can\u0026rsquo;t show trace when it is broken partially(Because of agent sampling or fail safe). Fix database and response time graph bugs.  Document  Add bootstrap plugin development document. Alarm documentation typo fixed. Clarify the Docker file purpose. Fix a license typo.  All issues and pull requests are here\n6.2.0 Project  ElasticSearch implementation performance improved, and CHANGED totally. Must delete all existing indexes to do upgrade. CI and Integration tests provided by ASF INFRA. Plan to enhance tests including e2e, plugin tests in all pull requests, powered by ASF INFRA. DataCarrier queue write index controller performance improvement. 3-5 times quicker than before. Add windows compile support in CI.  Java Agent  Support collect SQL parameter in MySQL plugin.[Optional] Support SolrJ plugin. Support RESTEasy plugin. Support Spring Gateway plugin for 2.1.x[Optional] TracingContext performance improvement. Support Apache ShardingSphere(incubating) plugin. Support span#error in application toolkit. Fix OOM by empty stack of exception. FIx wrong cause exception of stack in span log. Fix unclear the running context in SpringMVC plugin. Fix CPU usage accessor calculation issue. Fix SpringMVC plugin span not stop bug when doing HTTP forward. Fix lettuce plugin async commend bug and NPE. Fix webflux plugin cast exception. [CI]Support import check.  Backend  Support time serious ElasticSearch storage. Provide dynamic configuration module and implementation. Slow SQL threshold supports dynamic config today. Dynamic Configuration module provide multiple implementations, DCS(gRPC based), Zookeeper, Apollo, Nacos. Provide P99/95/90/75/50 charts in topology edge. New topology query protocol and implementation. Support Envoy ALS in Service Mesh scenario. Support Nacos cluster management. Enhance metric exporter. Run in increment and total modes. Fix module provider is loaded repeatedly. Change TOP slow SQL storage in ES to Text from Keyword, as too long text issue. Fix H2TopologyQuery tiny bug. Fix H2 log query bug.(No feature provided yet) Filtering pods not in \u0026lsquo;Running\u0026rsquo; phase in mesh scenario. Fix query alarm bug in MySQL and H2 storage. Codes refactor.  UI  Fix some ID is null query(s). Page refactor, especially time-picker, more friendly. Login removed. Trace timestamp visualization issue fixed. Provide P99/95/90/75/50 charts in topology edge. Change all P99/95/90/75/50 charts style. More readable. Fix 404 in trace page.  Document  Go2Sky project has been donated to SkyAPM, change document link. Add FAQ for ElasticSearch storage, and links from document. Add FAQ fro WebSphere installation. Add several open users. Add alarm webhook document.  All issues and pull requests are here\n6.1.0 Project SkyWalking graduated as Apache Top Level Project.\n Support compiling project agent, backend, UI separately.  Java Agent  Support Vert.x Core 3.x plugin. Support Apache Dubbo plugin. Support use_qualified_name_as_endpoint_name and use_qualified_name_as_operation_name configs in SpringMVC plugin. Support span async close APIs in core. Used in Vert.x plugin. Support MySQL 5,8 plugins. Support set instance id manually(optional). Support customize enhance trace plugin in optional list. Support to set peer in Entry Span. Support Zookeeper plugin. Fix Webflux plugin created unexpected Entry Span. Fix Kafka plugin NPE in Kafka 1.1+ Fix wrong operation name in postgre 8.x plugin. Fix RabbitMQ plugin NPE. Fix agent can\u0026rsquo;t run in JVM 6/7, remove module-info.class. Fix agent can\u0026rsquo;t work well, if there is whitespace in agent path. Fix Spring annotation bug and inheritance enhance issue. Fix CPU accessor bug.  Backend Performance improved, especially in CPU limited environment. 3x improvement in service mesh scenario(no trace) in 8C16G VM. Significantly cost less CPU in low payload.\n Support database metrics and SLOW SQL detection. Support to set max size of metadata query. And change default to 5000 from 100. Support ElasticSearch template for new feature in the future. Support shutdown Zipkin trace analysis, because it doesn\u0026rsquo;t fit production environment. Support log type, scope HTTP_ACCESS_LOG and query. No feature provided, prepare for future versions. Support .NET clr receiver. Support Jaeger trace format, no analysis. Support group endpoint name by regax rules in mesh receiver. Support disable statement in OAL. Support basic auth in ElasticSearch connection. Support metrics exporter module and gRPC implementor. Support \u0026gt;, \u0026lt;, \u0026gt;=, \u0026lt;= in OAL. Support role mode in backend. Support Envoy metrics. Support query segment by service instance. Support to set host/port manually at cluster coordinator, rather than based on core settings. Make sure OAP shutdown when it faces startup error. Support set separated gRPC/Jetty ip:port for receiver, default still use core settings. Fix JVM receiver bug. Fix wrong dest service in mesh analysis. Fix search doesn\u0026rsquo;t work as expected. Refactor ScopeDeclaration annotation. Refactor register lock mechanism. Add SmartSql component for .NET Add integration tests for ElasticSearch client. Add test cases for exporter. Add test cases for queue consume.  UI  RocketBot UI has been accepted and bind in this release. Support CLR metrics.  Document  Documents updated, matching Top Level Project requirement. UI licenses updated, according to RocketBot UI IP clearance. User wall and powered-by list updated. CN documents removed, only consider to provide by volunteer out of Apache.  All issues and pull requests are here\n6.0.0-GA Java Agent  Support gson plugin(optional). Support canal plugin. Fix missing ojdbc component id. Fix dubbo plugin conflict. Fix OpenTracing tag match bug. Fix a missing check in ignore plugin.  Backend  Adjust service inventory entity, to add properties. Adjust service instance inventory entity, to add properties. Add nodeType to service inventory entity. Fix when operation name of local and exit spans in ref, the segment lost. Fix the index names don\u0026rsquo;t show right in logs. Fix wrong alarm text. Add test case for span limit mechanism. Add telemetry module and prometheus implementation, with grafana setting. A refactor for register API in storage module. Fix H2 and MySQL endpoint dependency map miss upstream side. Optimize the inventory register and refactor the implementation. Speed up the trace buffer read. Fix and removed unnecessary inventory register operations.  UI  Add new trace view. Add word-break to tag value.  Document  Add two startup modes document. Add PHP agent links. Add some cn documents. Update year to 2019 User wall updated. Fix a wrong description in how-to-build doc.  All issues and pull requests are here\n6.0.0-beta Protocol  Provide Trace Data Protocol v2 Provide SkyWalking Cross Process Propagation Headers Protocol v2.  Java Agent  Support Trace Data Protocol v2 Support SkyWalking Cross Process Propagation Headers Protocol v2. Support SkyWalking Cross Process Propagation Headers Protocol v1 running in compatible way. Need declare open explicitly. Support SpringMVC 5 Support webflux Support a new way to override agent.config by system env. Span tag can override by explicit way. Fix Spring Controller Inherit issue. Fix ElasticSearch plugin NPE. Fix agent classloader dead lock in certain situation. Fix agent log typo. Fix wrong component id in resettemplete plugin. Fix use transform ignore() in wrong way. Fix H2 query bug.  Backend  Support Trace Data Protocol v2. And Trace Data Protocol v1 is still supported. Support MySQL as storage. Support TiDB as storage. Support a new way to override application.yml by system env. Support service instance and endpoint alarm. Support namespace in istio receiver. Support service throughput(cpm), successful rate(sla), avg response time and p99/p95/p90/p75/p50 response time. Support backend trace sampling. Support Zipkin format again. Support init mode. Support namespace in Zookeeper cluster management. Support consul plugin in cluster module. OAL generate tool has been integrated into main repo, in the maven compile stage. Optimize trace paging query. Fix trace query don\u0026rsquo;t use fuzzy query in ElasticSearch storage. Fix alarm can\u0026rsquo;t be active in right way. Fix unnecessary condition in database and cache number query. Fix wrong namespace bug in ElasticSearch storage. Fix Remote clients selector error: / by zero . Fix segment TTL is not working.  UI  Support service throughput(cpm), successful rate(sla), avg response time and p99/p95/p90/p75/p50 response time. Fix TopN endpoint link doesn\u0026rsquo;t work right. Fix trace stack style. Fix CI.  Document  Add more agent setting documents. Add more contribution documents. Update user wall and powered-by page. Add RocketBot UI project link in document.  All issues and pull requests are here\n6.0.0-alpha SkyWalking 6 is totally new milestone for the project. At this point, we are not just a distributing tracing system with analysis and visualization capabilities. We are an Observability Analysis Platform(OAL).\nThe core and most important features in v6 are\n Support to collect telemetry data from different sources, such as multiple language agents and service mesh. Extensible stream analysis core. Make SQL and cache analysis available in core level, although haven\u0026rsquo;t provided in this release. Provide Observability Analysis Language(OAL) to make analysis metrics customization available. New GraphQL query protocol. Not binding with UI now. UI topology is better now. New alarm core provided. In alpha, only on service related metrics.  All issues and pull requests are here\n","title":"6.6.0","url":"/docs/main/v9.5.0/en/changes/changes-6.x/"},{"content":"6.6.0 Project  [IMPORTANT] Local span and exit span are not treated as endpoint detected at client and local. Only entry span is the endpoint. Reduce the load of register and memory cost.   Support MiniKube, Istio and SkyWalking on K8s deployment in CI. Support Windows and MacOS build in GitHub Action CI. Support ElasticSearch 7 in official dist. Hundreds plugin cases have been added in GitHub Action CI process.  Java Agent  Remove the local/exit span operation name register mechanism. Add plugin for JDK Threading classes. Add plugin for Armeria. Support set operation name in async span. Enhance webflux plugin, related to Spring Gateway plugin. Webflux plugin is in optional, due to JDK8 required. Fix a possible deadlock. Fix NPE when OAL scripts are different in different OAP nodes, mostly in upgrading stage. Fix bug about wrong peer in ES plugin. Fix NPE in Spring plugin. Fix wrong class name in Dubbo 2.7 conflict patch. Fix spring annotation inheritance problem.  OAP-Backend  Remove the local/exit span operation name register mechanism. Remove client side endpoint register in service mesh. Service instance dependency and related metrics. Support min func in OAL Support apdex func in OAL Support custom ES config setting at the index level. Envoy ALS proto upgraded. Update JODA lib as bugs in UTC +13/+14. Support topN sample period configurable. Ignore no statement DB operations in slow SQL collection. Fix bug in docker-entrypoint.sh when using MySQL as storage  UI  Service topology enhancement. Dive into service, instance and endpoint metrics on topo map. Service instance dependency view and related metrics. Support using URL parameter in trace query page. Support apdex score in service page. Add service dependency metrics into metrics comparison. Fix alarm search not working.  Document  Update user list and user wall. Add document link for CLI. Add deployment guide of agent in Jetty case. Modify Consul cluster doc. Add document about injecting traceId into the logback with logstack in JSON format. ElementUI license and dependency added.  All issues and pull requests are here\n6.5.0 Project  TTL E2E test (#3437) Test coverage is back in pull request check status (#3503) Plugin tests begin to be migrated into main repo, and is in process. (#3528, #3756, #3751, etc.) Switch to SkyWalking CI (exclusive) nodes (#3546) MySQL storage e2e test. (#3648) E2E tests are verified in multiple jdk versions, jdk 8, 9, 11, 12 (#3657) Jenkins build jobs run only when necessary (#3662)  OAP-Backend  Support dynamically configure alarm settings (#3557) Language of instance could be null (#3485) Make query max window size configurable. (#3765) Remove two max size 500 limit. (#3748) Parameterize the cache size. (#3741) ServiceInstanceRelation set error id (#3683) Makes the scope of alarm message more semantic. (#3680) Add register persistent worker latency metrics (#3677) Fix more reasonable error (#3619) Add GraphQL getServiceInstance instanceUuid field. (#3595) Support namespace in Nacos cluster/configuration (#3578) Instead of datasource-settings.properties, use application.yml for MySQLStorageProvider (#3564) Provide consul dynamic configuration center implementation (#3560) Upgrade guava version to support higher jdk version (#3541) Sync latest als from envoy api (#3507) Set telemetry instanced id for Etcd and Nacos plugin (#3492) Support timeout configuration in agent and backend. (#3491) Make sure the cluster register happens before streaming process. (#3471) Agent supports custom properties. (#3367) Miscellaneous bug fixes (#3567)  UI  Feature: node detail display in topo circle-chart view. BugFix: the jvm-maxheap \u0026amp; jvm-maxnonheap is -1, free is no value Fix bug: time select operation not in effect Fix bug: language initialization failed Fix bug: not show instance language Feature: support the trace list display export png Feature: Metrics comparison view BugFix: Fix dashboard top throughput copy  Java Agent  Spring async scenario optimize (#3723) Support log4j2 AsyncLogger (#3715) Add config to collect PostgreSQL sql query params (#3695) Support namespace in Nacos cluster/configuration (#3578) Provide plugin for ehcache 2.x (#3575) Supporting RequestRateLimiterGatewayFilterFactory (#3538) Kafka-plugin compatible with KafkaTemplate (#3505) Add pulsar apm plugin (#3476) Spring-cloud-gateway traceId does not transmit #3411 (#3446) Gateway compatible with downstream loss (#3445) Provide cassandra java driver 3.x plugin (#3410) Fix SpringMVC4 NoSuchMethodError (#3408) BugFix: endpoint grouping rules may be not unique (#3510) Add feature to control the maximum agent log files (#3475) Agent support custom properties. (#3367) Add Light4j plugin (#3323)  Document  Remove travis badge (#3763) Replace user wall to typical users in readme page (#3719) Update istio docs according latest istio release (#3646) Use chart deploy sw docs (#3573) Reorganize the doc, and provide catalog (#3563) Committer vote and set up document. (#3496) Update als setup doc as istio 1.3 released (#3470) Fill faq reply in official document. (#3450)  All issues and pull requests are here\n6.4.0 Project  Highly recommend to upgrade due to Pxx metrics calculation bug. Make agent working in JDK9+ Module system.  Java Agent  Make agent working in JDK9+ Module system. Support Kafka 2.x client libs. Log error in OKHTTP OnFailure callback. Support injecting traceid into logstack appender in logback. Add OperationName(including endpoint name) length max threshold. Support using Regex to group operation name. Support Undertow routing handler. RestTemplate plugin support operation name grouping. Fix ClassCastException in Webflux plugin. Ordering zookeeper server list, to make it better in topology. Fix a Dubbo plugin incompatible issue. Fix MySQL 5 plugin issue. Make log writer cached. Optimize Spring Cloud Gateway plugin Fix and improve gRPC reconnect mechanism. Remove Disruptor dependency from agent.  Backend  Fix Pxx(p50,p75,p90,p95,p99) metrics func bug.(Critical) Support Gateway in backend analysis, even when it doesn\u0026rsquo;t have suitable language agent. Support using HTTPs SSL accessing ElasticSearch storage. Support Zookeeper ACL. Make alarm records listed in order. Fix Pxx data persistence failure in some cases. Fix some bugs in MySQL storage. Setup slow SQL length threshold. Fix TTL settings is not working as expected. Remove scope-meta file.  UI  Enhance alarm page layout. Support trace tree chart resize. Support trace auto completion when partial traces abandoned somehow. Fix dashboard endpoint slow chart. Add radial chart in topology page. Add trace table mode. Fix topology page bug. Fix calender js bug. Fix \u0026ldquo;The \u0026ldquo;topo-services\u0026rdquo; component did not update the data in time after modifying the time range on the topology page.  Document  Restore the broken Istio setup doc. Add etcd config center document. Correct span_limit_per_segment default value in document. Enhance plugin develop doc. Fix error description in build document.  All issues and pull requests are here\n6.3.0 Project  e2e tests have been added, and verify every pull request. Use ArrayList to replace LinkedList in DataCarrier for much better performance. Add plugin instrumentation definition check in CI. DataCarrier performance improvement by avoiding false-sharing.  Java Agent  Java agent supports JDK 9 - 12, but don\u0026rsquo;t support Java Module yet. Support JVM class auto instrumentation, cataloged as bootstrap plugin. Support JVM HttpClient and HttpsClient plugin.[Optional] Support backend upgrade without rebooting required. Open Redefine and Retransform by other agents. Support Servlet 2.5 in Jetty, Tomcat and SpringMVC plugins. Support Spring @Async plugin. Add new config item to restrict the length of span#peer. Refactor ContextManager#stopSpan. Add gRPC timeout. Support Logback AsyncAppender print tid Fix gRPC reconnect bug. Fix trace segment service doesn\u0026rsquo;t report onComplete. Fix wrong logger class name. Fix gRPC plugin bug. Fix ContextManager.activeSpan() API usage error.  Backend  Support agent reset command downstream when the storage is erased, mostly because of backend upgrade. Backend stream flow refactor. High dimensionality metrics(Hour/Day/Month) are changed to lower priority, to ease the storage payload. Add OAP metrics cache to ease the storage query payload and improve performance. Remove DataCarrier in trace persistent of ElasticSearch storage, by leveraging the elasticsearch bulk queue. OAP internal communication protocol changed. Don\u0026rsquo;t be compatible with old releases. Improve ElasticSearch storage bulk performance. Support etcd as dynamic configuration center. Simplify the PxxMetrics and ThermodynamicMetrics functions for better performance and GC. Support JVM metrics self observability. Add the new OAL runtime engine. Add gRPC timeout. Add Charset in the alarm web hook. Fix buffer lost. Fix dirty read in ElasticSearch storage. Fix bug of cluster management plugins in un-Mixed mode. Fix wrong logger class name. Fix delete bug in ElasticSearch when using namespace. Fix MySQL TTL failure. Totally remove IDs can't be null log, to avoid misleading. Fix provider has been initialized repeatedly. Adjust providers conflict log message. Fix using wrong gc time metrics in OAL.  UI  Fix refresh is not working after endpoint and instance changed. Fix endpoint selector but. Fix wrong copy value in slow traces. Fix can\u0026rsquo;t show trace when it is broken partially(Because of agent sampling or fail safe). Fix database and response time graph bugs.  Document  Add bootstrap plugin development document. Alarm documentation typo fixed. Clarify the Docker file purpose. Fix a license typo.  All issues and pull requests are here\n6.2.0 Project  ElasticSearch implementation performance improved, and CHANGED totally. Must delete all existing indexes to do upgrade. CI and Integration tests provided by ASF INFRA. Plan to enhance tests including e2e, plugin tests in all pull requests, powered by ASF INFRA. DataCarrier queue write index controller performance improvement. 3-5 times quicker than before. Add windows compile support in CI.  Java Agent  Support collect SQL parameter in MySQL plugin.[Optional] Support SolrJ plugin. Support RESTEasy plugin. Support Spring Gateway plugin for 2.1.x[Optional] TracingContext performance improvement. Support Apache ShardingSphere(incubating) plugin. Support span#error in application toolkit. Fix OOM by empty stack of exception. FIx wrong cause exception of stack in span log. Fix unclear the running context in SpringMVC plugin. Fix CPU usage accessor calculation issue. Fix SpringMVC plugin span not stop bug when doing HTTP forward. Fix lettuce plugin async commend bug and NPE. Fix webflux plugin cast exception. [CI]Support import check.  Backend  Support time serious ElasticSearch storage. Provide dynamic configuration module and implementation. Slow SQL threshold supports dynamic config today. Dynamic Configuration module provide multiple implementations, DCS(gRPC based), Zookeeper, Apollo, Nacos. Provide P99/95/90/75/50 charts in topology edge. New topology query protocol and implementation. Support Envoy ALS in Service Mesh scenario. Support Nacos cluster management. Enhance metric exporter. Run in increment and total modes. Fix module provider is loaded repeatedly. Change TOP slow SQL storage in ES to Text from Keyword, as too long text issue. Fix H2TopologyQuery tiny bug. Fix H2 log query bug.(No feature provided yet) Filtering pods not in \u0026lsquo;Running\u0026rsquo; phase in mesh scenario. Fix query alarm bug in MySQL and H2 storage. Codes refactor.  UI  Fix some ID is null query(s). Page refactor, especially time-picker, more friendly. Login removed. Trace timestamp visualization issue fixed. Provide P99/95/90/75/50 charts in topology edge. Change all P99/95/90/75/50 charts style. More readable. Fix 404 in trace page.  Document  Go2Sky project has been donated to SkyAPM, change document link. Add FAQ for ElasticSearch storage, and links from document. Add FAQ fro WebSphere installation. Add several open users. Add alarm webhook document.  All issues and pull requests are here\n6.1.0 Project SkyWalking graduated as Apache Top Level Project.\n Support compiling project agent, backend, UI separately.  Java Agent  Support Vert.x Core 3.x plugin. Support Apache Dubbo plugin. Support use_qualified_name_as_endpoint_name and use_qualified_name_as_operation_name configs in SpringMVC plugin. Support span async close APIs in core. Used in Vert.x plugin. Support MySQL 5,8 plugins. Support set instance id manually(optional). Support customize enhance trace plugin in optional list. Support to set peer in Entry Span. Support Zookeeper plugin. Fix Webflux plugin created unexpected Entry Span. Fix Kafka plugin NPE in Kafka 1.1+ Fix wrong operation name in postgre 8.x plugin. Fix RabbitMQ plugin NPE. Fix agent can\u0026rsquo;t run in JVM 6/7, remove module-info.class. Fix agent can\u0026rsquo;t work well, if there is whitespace in agent path. Fix Spring annotation bug and inheritance enhance issue. Fix CPU accessor bug.  Backend Performance improved, especially in CPU limited environment. 3x improvement in service mesh scenario(no trace) in 8C16G VM. Significantly cost less CPU in low payload.\n Support database metrics and SLOW SQL detection. Support to set max size of metadata query. And change default to 5000 from 100. Support ElasticSearch template for new feature in the future. Support shutdown Zipkin trace analysis, because it doesn\u0026rsquo;t fit production environment. Support log type, scope HTTP_ACCESS_LOG and query. No feature provided, prepare for future versions. Support .NET clr receiver. Support Jaeger trace format, no analysis. Support group endpoint name by regax rules in mesh receiver. Support disable statement in OAL. Support basic auth in ElasticSearch connection. Support metrics exporter module and gRPC implementor. Support \u0026gt;, \u0026lt;, \u0026gt;=, \u0026lt;= in OAL. Support role mode in backend. Support Envoy metrics. Support query segment by service instance. Support to set host/port manually at cluster coordinator, rather than based on core settings. Make sure OAP shutdown when it faces startup error. Support set separated gRPC/Jetty ip:port for receiver, default still use core settings. Fix JVM receiver bug. Fix wrong dest service in mesh analysis. Fix search doesn\u0026rsquo;t work as expected. Refactor ScopeDeclaration annotation. Refactor register lock mechanism. Add SmartSql component for .NET Add integration tests for ElasticSearch client. Add test cases for exporter. Add test cases for queue consume.  UI  RocketBot UI has been accepted and bind in this release. Support CLR metrics.  Document  Documents updated, matching Top Level Project requirement. UI licenses updated, according to RocketBot UI IP clearance. User wall and powered-by list updated. CN documents removed, only consider to provide by volunteer out of Apache.  All issues and pull requests are here\n6.0.0-GA Java Agent  Support gson plugin(optional). Support canal plugin. Fix missing ojdbc component id. Fix dubbo plugin conflict. Fix OpenTracing tag match bug. Fix a missing check in ignore plugin.  Backend  Adjust service inventory entity, to add properties. Adjust service instance inventory entity, to add properties. Add nodeType to service inventory entity. Fix when operation name of local and exit spans in ref, the segment lost. Fix the index names don\u0026rsquo;t show right in logs. Fix wrong alarm text. Add test case for span limit mechanism. Add telemetry module and prometheus implementation, with grafana setting. A refactor for register API in storage module. Fix H2 and MySQL endpoint dependency map miss upstream side. Optimize the inventory register and refactor the implementation. Speed up the trace buffer read. Fix and removed unnecessary inventory register operations.  UI  Add new trace view. Add word-break to tag value.  Document  Add two startup modes document. Add PHP agent links. Add some cn documents. Update year to 2019 User wall updated. Fix a wrong description in how-to-build doc.  All issues and pull requests are here\n6.0.0-beta Protocol  Provide Trace Data Protocol v2 Provide SkyWalking Cross Process Propagation Headers Protocol v2.  Java Agent  Support Trace Data Protocol v2 Support SkyWalking Cross Process Propagation Headers Protocol v2. Support SkyWalking Cross Process Propagation Headers Protocol v1 running in compatible way. Need declare open explicitly. Support SpringMVC 5 Support webflux Support a new way to override agent.config by system env. Span tag can override by explicit way. Fix Spring Controller Inherit issue. Fix ElasticSearch plugin NPE. Fix agent classloader dead lock in certain situation. Fix agent log typo. Fix wrong component id in resettemplete plugin. Fix use transform ignore() in wrong way. Fix H2 query bug.  Backend  Support Trace Data Protocol v2. And Trace Data Protocol v1 is still supported. Support MySQL as storage. Support TiDB as storage. Support a new way to override application.yml by system env. Support service instance and endpoint alarm. Support namespace in istio receiver. Support service throughput(cpm), successful rate(sla), avg response time and p99/p95/p90/p75/p50 response time. Support backend trace sampling. Support Zipkin format again. Support init mode. Support namespace in Zookeeper cluster management. Support consul plugin in cluster module. OAL generate tool has been integrated into main repo, in the maven compile stage. Optimize trace paging query. Fix trace query don\u0026rsquo;t use fuzzy query in ElasticSearch storage. Fix alarm can\u0026rsquo;t be active in right way. Fix unnecessary condition in database and cache number query. Fix wrong namespace bug in ElasticSearch storage. Fix Remote clients selector error: / by zero . Fix segment TTL is not working.  UI  Support service throughput(cpm), successful rate(sla), avg response time and p99/p95/p90/p75/p50 response time. Fix TopN endpoint link doesn\u0026rsquo;t work right. Fix trace stack style. Fix CI.  Document  Add more agent setting documents. Add more contribution documents. Update user wall and powered-by page. Add RocketBot UI project link in document.  All issues and pull requests are here\n6.0.0-alpha SkyWalking 6 is totally new milestone for the project. At this point, we are not just a distributing tracing system with analysis and visualization capabilities. We are an Observability Analysis Platform(OAL).\nThe core and most important features in v6 are\n Support to collect telemetry data from different sources, such as multiple language agents and service mesh. Extensible stream analysis core. Make SQL and cache analysis available in core level, although haven\u0026rsquo;t provided in this release. Provide Observability Analysis Language(OAL) to make analysis metrics customization available. New GraphQL query protocol. Not binding with UI now. UI topology is better now. New alarm core provided. In alpha, only on service related metrics.  All issues and pull requests are here\n","title":"6.6.0","url":"/docs/main/v9.6.0/en/changes/changes-6.x/"},{"content":"6.6.0 Project  [IMPORTANT] Local span and exit span are not treated as endpoint detected at client and local. Only entry span is the endpoint. Reduce the load of register and memory cost.   Support MiniKube, Istio and SkyWalking on K8s deployment in CI. Support Windows and MacOS build in GitHub Action CI. Support ElasticSearch 7 in official dist. Hundreds plugin cases have been added in GitHub Action CI process.  Java Agent  Remove the local/exit span operation name register mechanism. Add plugin for JDK Threading classes. Add plugin for Armeria. Support set operation name in async span. Enhance webflux plugin, related to Spring Gateway plugin. Webflux plugin is in optional, due to JDK8 required. Fix a possible deadlock. Fix NPE when OAL scripts are different in different OAP nodes, mostly in upgrading stage. Fix bug about wrong peer in ES plugin. Fix NPE in Spring plugin. Fix wrong class name in Dubbo 2.7 conflict patch. Fix spring annotation inheritance problem.  OAP-Backend  Remove the local/exit span operation name register mechanism. Remove client side endpoint register in service mesh. Service instance dependency and related metrics. Support min func in OAL Support apdex func in OAL Support custom ES config setting at the index level. Envoy ALS proto upgraded. Update JODA lib as bugs in UTC +13/+14. Support topN sample period configurable. Ignore no statement DB operations in slow SQL collection. Fix bug in docker-entrypoint.sh when using MySQL as storage  UI  Service topology enhancement. Dive into service, instance and endpoint metrics on topo map. Service instance dependency view and related metrics. Support using URL parameter in trace query page. Support apdex score in service page. Add service dependency metrics into metrics comparison. Fix alarm search not working.  Document  Update user list and user wall. Add document link for CLI. Add deployment guide of agent in Jetty case. Modify Consul cluster doc. Add document about injecting traceId into the logback with logstack in JSON format. ElementUI license and dependency added.  All issues and pull requests are here\n6.5.0 Project  TTL E2E test (#3437) Test coverage is back in pull request check status (#3503) Plugin tests begin to be migrated into main repo, and is in process. (#3528, #3756, #3751, etc.) Switch to SkyWalking CI (exclusive) nodes (#3546) MySQL storage e2e test. (#3648) E2E tests are verified in multiple jdk versions, jdk 8, 9, 11, 12 (#3657) Jenkins build jobs run only when necessary (#3662)  OAP-Backend  Support dynamically configure alarm settings (#3557) Language of instance could be null (#3485) Make query max window size configurable. (#3765) Remove two max size 500 limit. (#3748) Parameterize the cache size. (#3741) ServiceInstanceRelation set error id (#3683) Makes the scope of alarm message more semantic. (#3680) Add register persistent worker latency metrics (#3677) Fix more reasonable error (#3619) Add GraphQL getServiceInstance instanceUuid field. (#3595) Support namespace in Nacos cluster/configuration (#3578) Instead of datasource-settings.properties, use application.yml for MySQLStorageProvider (#3564) Provide consul dynamic configuration center implementation (#3560) Upgrade guava version to support higher jdk version (#3541) Sync latest als from envoy api (#3507) Set telemetry instanced id for Etcd and Nacos plugin (#3492) Support timeout configuration in agent and backend. (#3491) Make sure the cluster register happens before streaming process. (#3471) Agent supports custom properties. (#3367) Miscellaneous bug fixes (#3567)  UI  Feature: node detail display in topo circle-chart view. BugFix: the jvm-maxheap \u0026amp; jvm-maxnonheap is -1, free is no value Fix bug: time select operation not in effect Fix bug: language initialization failed Fix bug: not show instance language Feature: support the trace list display export png Feature: Metrics comparison view BugFix: Fix dashboard top throughput copy  Java Agent  Spring async scenario optimize (#3723) Support log4j2 AsyncLogger (#3715) Add config to collect PostgreSQL sql query params (#3695) Support namespace in Nacos cluster/configuration (#3578) Provide plugin for ehcache 2.x (#3575) Supporting RequestRateLimiterGatewayFilterFactory (#3538) Kafka-plugin compatible with KafkaTemplate (#3505) Add pulsar apm plugin (#3476) Spring-cloud-gateway traceId does not transmit #3411 (#3446) Gateway compatible with downstream loss (#3445) Provide cassandra java driver 3.x plugin (#3410) Fix SpringMVC4 NoSuchMethodError (#3408) BugFix: endpoint grouping rules may be not unique (#3510) Add feature to control the maximum agent log files (#3475) Agent support custom properties. (#3367) Add Light4j plugin (#3323)  Document  Remove travis badge (#3763) Replace user wall to typical users in readme page (#3719) Update istio docs according latest istio release (#3646) Use chart deploy sw docs (#3573) Reorganize the doc, and provide catalog (#3563) Committer vote and set up document. (#3496) Update als setup doc as istio 1.3 released (#3470) Fill faq reply in official document. (#3450)  All issues and pull requests are here\n6.4.0 Project  Highly recommend to upgrade due to Pxx metrics calculation bug. Make agent working in JDK9+ Module system.  Java Agent  Make agent working in JDK9+ Module system. Support Kafka 2.x client libs. Log error in OKHTTP OnFailure callback. Support injecting traceid into logstack appender in logback. Add OperationName(including endpoint name) length max threshold. Support using Regex to group operation name. Support Undertow routing handler. RestTemplate plugin support operation name grouping. Fix ClassCastException in Webflux plugin. Ordering zookeeper server list, to make it better in topology. Fix a Dubbo plugin incompatible issue. Fix MySQL 5 plugin issue. Make log writer cached. Optimize Spring Cloud Gateway plugin Fix and improve gRPC reconnect mechanism. Remove Disruptor dependency from agent.  Backend  Fix Pxx(p50,p75,p90,p95,p99) metrics func bug.(Critical) Support Gateway in backend analysis, even when it doesn\u0026rsquo;t have suitable language agent. Support using HTTPs SSL accessing ElasticSearch storage. Support Zookeeper ACL. Make alarm records listed in order. Fix Pxx data persistence failure in some cases. Fix some bugs in MySQL storage. Setup slow SQL length threshold. Fix TTL settings is not working as expected. Remove scope-meta file.  UI  Enhance alarm page layout. Support trace tree chart resize. Support trace auto completion when partial traces abandoned somehow. Fix dashboard endpoint slow chart. Add radial chart in topology page. Add trace table mode. Fix topology page bug. Fix calender js bug. Fix \u0026ldquo;The \u0026ldquo;topo-services\u0026rdquo; component did not update the data in time after modifying the time range on the topology page.  Document  Restore the broken Istio setup doc. Add etcd config center document. Correct span_limit_per_segment default value in document. Enhance plugin develop doc. Fix error description in build document.  All issues and pull requests are here\n6.3.0 Project  e2e tests have been added, and verify every pull request. Use ArrayList to replace LinkedList in DataCarrier for much better performance. Add plugin instrumentation definition check in CI. DataCarrier performance improvement by avoiding false-sharing.  Java Agent  Java agent supports JDK 9 - 12, but don\u0026rsquo;t support Java Module yet. Support JVM class auto instrumentation, cataloged as bootstrap plugin. Support JVM HttpClient and HttpsClient plugin.[Optional] Support backend upgrade without rebooting required. Open Redefine and Retransform by other agents. Support Servlet 2.5 in Jetty, Tomcat and SpringMVC plugins. Support Spring @Async plugin. Add new config item to restrict the length of span#peer. Refactor ContextManager#stopSpan. Add gRPC timeout. Support Logback AsyncAppender print tid Fix gRPC reconnect bug. Fix trace segment service doesn\u0026rsquo;t report onComplete. Fix wrong logger class name. Fix gRPC plugin bug. Fix ContextManager.activeSpan() API usage error.  Backend  Support agent reset command downstream when the storage is erased, mostly because of backend upgrade. Backend stream flow refactor. High dimensionality metrics(Hour/Day/Month) are changed to lower priority, to ease the storage payload. Add OAP metrics cache to ease the storage query payload and improve performance. Remove DataCarrier in trace persistent of ElasticSearch storage, by leveraging the elasticsearch bulk queue. OAP internal communication protocol changed. Don\u0026rsquo;t be compatible with old releases. Improve ElasticSearch storage bulk performance. Support etcd as dynamic configuration center. Simplify the PxxMetrics and ThermodynamicMetrics functions for better performance and GC. Support JVM metrics self observability. Add the new OAL runtime engine. Add gRPC timeout. Add Charset in the alarm web hook. Fix buffer lost. Fix dirty read in ElasticSearch storage. Fix bug of cluster management plugins in un-Mixed mode. Fix wrong logger class name. Fix delete bug in ElasticSearch when using namespace. Fix MySQL TTL failure. Totally remove IDs can't be null log, to avoid misleading. Fix provider has been initialized repeatedly. Adjust providers conflict log message. Fix using wrong gc time metrics in OAL.  UI  Fix refresh is not working after endpoint and instance changed. Fix endpoint selector but. Fix wrong copy value in slow traces. Fix can\u0026rsquo;t show trace when it is broken partially(Because of agent sampling or fail safe). Fix database and response time graph bugs.  Document  Add bootstrap plugin development document. Alarm documentation typo fixed. Clarify the Docker file purpose. Fix a license typo.  All issues and pull requests are here\n6.2.0 Project  ElasticSearch implementation performance improved, and CHANGED totally. Must delete all existing indexes to do upgrade. CI and Integration tests provided by ASF INFRA. Plan to enhance tests including e2e, plugin tests in all pull requests, powered by ASF INFRA. DataCarrier queue write index controller performance improvement. 3-5 times quicker than before. Add windows compile support in CI.  Java Agent  Support collect SQL parameter in MySQL plugin.[Optional] Support SolrJ plugin. Support RESTEasy plugin. Support Spring Gateway plugin for 2.1.x[Optional] TracingContext performance improvement. Support Apache ShardingSphere(incubating) plugin. Support span#error in application toolkit. Fix OOM by empty stack of exception. FIx wrong cause exception of stack in span log. Fix unclear the running context in SpringMVC plugin. Fix CPU usage accessor calculation issue. Fix SpringMVC plugin span not stop bug when doing HTTP forward. Fix lettuce plugin async commend bug and NPE. Fix webflux plugin cast exception. [CI]Support import check.  Backend  Support time serious ElasticSearch storage. Provide dynamic configuration module and implementation. Slow SQL threshold supports dynamic config today. Dynamic Configuration module provide multiple implementations, DCS(gRPC based), Zookeeper, Apollo, Nacos. Provide P99/95/90/75/50 charts in topology edge. New topology query protocol and implementation. Support Envoy ALS in Service Mesh scenario. Support Nacos cluster management. Enhance metric exporter. Run in increment and total modes. Fix module provider is loaded repeatedly. Change TOP slow SQL storage in ES to Text from Keyword, as too long text issue. Fix H2TopologyQuery tiny bug. Fix H2 log query bug.(No feature provided yet) Filtering pods not in \u0026lsquo;Running\u0026rsquo; phase in mesh scenario. Fix query alarm bug in MySQL and H2 storage. Codes refactor.  UI  Fix some ID is null query(s). Page refactor, especially time-picker, more friendly. Login removed. Trace timestamp visualization issue fixed. Provide P99/95/90/75/50 charts in topology edge. Change all P99/95/90/75/50 charts style. More readable. Fix 404 in trace page.  Document  Go2Sky project has been donated to SkyAPM, change document link. Add FAQ for ElasticSearch storage, and links from document. Add FAQ fro WebSphere installation. Add several open users. Add alarm webhook document.  All issues and pull requests are here\n6.1.0 Project SkyWalking graduated as Apache Top Level Project.\n Support compiling project agent, backend, UI separately.  Java Agent  Support Vert.x Core 3.x plugin. Support Apache Dubbo plugin. Support use_qualified_name_as_endpoint_name and use_qualified_name_as_operation_name configs in SpringMVC plugin. Support span async close APIs in core. Used in Vert.x plugin. Support MySQL 5,8 plugins. Support set instance id manually(optional). Support customize enhance trace plugin in optional list. Support to set peer in Entry Span. Support Zookeeper plugin. Fix Webflux plugin created unexpected Entry Span. Fix Kafka plugin NPE in Kafka 1.1+ Fix wrong operation name in postgre 8.x plugin. Fix RabbitMQ plugin NPE. Fix agent can\u0026rsquo;t run in JVM 6/7, remove module-info.class. Fix agent can\u0026rsquo;t work well, if there is whitespace in agent path. Fix Spring annotation bug and inheritance enhance issue. Fix CPU accessor bug.  Backend Performance improved, especially in CPU limited environment. 3x improvement in service mesh scenario(no trace) in 8C16G VM. Significantly cost less CPU in low payload.\n Support database metrics and SLOW SQL detection. Support to set max size of metadata query. And change default to 5000 from 100. Support ElasticSearch template for new feature in the future. Support shutdown Zipkin trace analysis, because it doesn\u0026rsquo;t fit production environment. Support log type, scope HTTP_ACCESS_LOG and query. No feature provided, prepare for future versions. Support .NET clr receiver. Support Jaeger trace format, no analysis. Support group endpoint name by regax rules in mesh receiver. Support disable statement in OAL. Support basic auth in ElasticSearch connection. Support metrics exporter module and gRPC implementor. Support \u0026gt;, \u0026lt;, \u0026gt;=, \u0026lt;= in OAL. Support role mode in backend. Support Envoy metrics. Support query segment by service instance. Support to set host/port manually at cluster coordinator, rather than based on core settings. Make sure OAP shutdown when it faces startup error. Support set separated gRPC/Jetty ip:port for receiver, default still use core settings. Fix JVM receiver bug. Fix wrong dest service in mesh analysis. Fix search doesn\u0026rsquo;t work as expected. Refactor ScopeDeclaration annotation. Refactor register lock mechanism. Add SmartSql component for .NET Add integration tests for ElasticSearch client. Add test cases for exporter. Add test cases for queue consume.  UI  RocketBot UI has been accepted and bind in this release. Support CLR metrics.  Document  Documents updated, matching Top Level Project requirement. UI licenses updated, according to RocketBot UI IP clearance. User wall and powered-by list updated. CN documents removed, only consider to provide by volunteer out of Apache.  All issues and pull requests are here\n6.0.0-GA Java Agent  Support gson plugin(optional). Support canal plugin. Fix missing ojdbc component id. Fix dubbo plugin conflict. Fix OpenTracing tag match bug. Fix a missing check in ignore plugin.  Backend  Adjust service inventory entity, to add properties. Adjust service instance inventory entity, to add properties. Add nodeType to service inventory entity. Fix when operation name of local and exit spans in ref, the segment lost. Fix the index names don\u0026rsquo;t show right in logs. Fix wrong alarm text. Add test case for span limit mechanism. Add telemetry module and prometheus implementation, with grafana setting. A refactor for register API in storage module. Fix H2 and MySQL endpoint dependency map miss upstream side. Optimize the inventory register and refactor the implementation. Speed up the trace buffer read. Fix and removed unnecessary inventory register operations.  UI  Add new trace view. Add word-break to tag value.  Document  Add two startup modes document. Add PHP agent links. Add some cn documents. Update year to 2019 User wall updated. Fix a wrong description in how-to-build doc.  All issues and pull requests are here\n6.0.0-beta Protocol  Provide Trace Data Protocol v2 Provide SkyWalking Cross Process Propagation Headers Protocol v2.  Java Agent  Support Trace Data Protocol v2 Support SkyWalking Cross Process Propagation Headers Protocol v2. Support SkyWalking Cross Process Propagation Headers Protocol v1 running in compatible way. Need declare open explicitly. Support SpringMVC 5 Support webflux Support a new way to override agent.config by system env. Span tag can override by explicit way. Fix Spring Controller Inherit issue. Fix ElasticSearch plugin NPE. Fix agent classloader dead lock in certain situation. Fix agent log typo. Fix wrong component id in resettemplete plugin. Fix use transform ignore() in wrong way. Fix H2 query bug.  Backend  Support Trace Data Protocol v2. And Trace Data Protocol v1 is still supported. Support MySQL as storage. Support TiDB as storage. Support a new way to override application.yml by system env. Support service instance and endpoint alarm. Support namespace in istio receiver. Support service throughput(cpm), successful rate(sla), avg response time and p99/p95/p90/p75/p50 response time. Support backend trace sampling. Support Zipkin format again. Support init mode. Support namespace in Zookeeper cluster management. Support consul plugin in cluster module. OAL generate tool has been integrated into main repo, in the maven compile stage. Optimize trace paging query. Fix trace query don\u0026rsquo;t use fuzzy query in ElasticSearch storage. Fix alarm can\u0026rsquo;t be active in right way. Fix unnecessary condition in database and cache number query. Fix wrong namespace bug in ElasticSearch storage. Fix Remote clients selector error: / by zero . Fix segment TTL is not working.  UI  Support service throughput(cpm), successful rate(sla), avg response time and p99/p95/p90/p75/p50 response time. Fix TopN endpoint link doesn\u0026rsquo;t work right. Fix trace stack style. Fix CI.  Document  Add more agent setting documents. Add more contribution documents. Update user wall and powered-by page. Add RocketBot UI project link in document.  All issues and pull requests are here\n6.0.0-alpha SkyWalking 6 is totally new milestone for the project. At this point, we are not just a distributing tracing system with analysis and visualization capabilities. We are an Observability Analysis Platform(OAL).\nThe core and most important features in v6 are\n Support to collect telemetry data from different sources, such as multiple language agents and service mesh. Extensible stream analysis core. Make SQL and cache analysis available in core level, although haven\u0026rsquo;t provided in this release. Provide Observability Analysis Language(OAL) to make analysis metrics customization available. New GraphQL query protocol. Not binding with UI now. UI topology is better now. New alarm core provided. In alpha, only on service related metrics.  All issues and pull requests are here\n","title":"6.6.0","url":"/docs/main/v9.7.0/en/changes/changes-6.x/"},{"content":"7.0.0 Project  SkyWalking discards the supports of JDK 1.6 and 1.7 on the java agent side. The minimal requirement of JDK is JDK8. Support method performance profile. Provide new E2E test framework. Remove AppVeyor from the CI, use GitHub action only. Provide new plugin test tool. Don\u0026rsquo;t support SkyWalking v5 agent in-wire and out-wire protocol. v6 is required.  Java Agent  Add lazy injection API in the agent core. Support Servlet 2.5 in the Struts plugin. Fix RestTemplate plugin ClassCastException in the Async call. Add Finagle plugin. Add test cases of H2 and struts. Add Armeria 0.98 plugin. Fix ElasticSearch plugin bug. Fix EHCache plugin bug. Fix a potential I/O leak. Support Oracle SID mode. Update Byte-buddy core. Performance tuning: replace AtomicInteger with AtomicIntegerFieldUpdater. Add AVRO plugin. Update to JDK 1.8 Optimize the ignore plugin. Enhance the gRPC plugin. Add Kotlin Coroutine plugin. Support HTTP parameter collection in Tomcat and SpringMVC plugin. Add @Tag annotation in the application toolkit. Move Lettuce into the default plugin list. Move Webflux into the default plugin list. Add HttpClient 3.x plugin.  OAP-Backend  Support InfluxDB as a new storage option. Add selector in the application.yml. Make the provider activation more flexible through System ENV. Support sub-topology map query. Support gRPC SSL. Support HTTP protocol for agent. Support Nginx LUA agent. Support skip the instance relationship analysis if some agents doesn\u0026rsquo;t have upstream address, currently for LUA agent. Support metrics entity name in the storage. Optional, default OFF. Merge the HOUR and DAY metrics into MINUTE in the ElasticSearch storage implementation. Reduce the payload for ElasticSearch server. Support change detection mechanism in DCS. Support Daily step in the ElasticSearch storage implementation for low traffic system. Provide profile export tool. Support alarm gRPC hook. Fix PHP language doesn\u0026rsquo;t show up on the instance page. Add more comments in the source codes. Add a new metrics type, multiple linears. Fix thread concurrency issue in the alarm core.  UI  Support custom topology definition.  Document  Add FAQ about python2 command required in the compiling. Add doc about new e2e framework. Add doc about the new profile feature. Powered-by page updated.  All issues and pull requests are here\n","title":"7.0.0","url":"/docs/main/latest/en/changes/changes-7.0.0/"},{"content":"7.0.0 Project  SkyWalking discards the supports of JDK 1.6 and 1.7 on the java agent side. The minimal requirement of JDK is JDK8. Support method performance profile. Provide new E2E test framework. Remove AppVeyor from the CI, use GitHub action only. Provide new plugin test tool. Don\u0026rsquo;t support SkyWalking v5 agent in-wire and out-wire protocol. v6 is required.  Java Agent  Add lazy injection API in the agent core. Support Servlet 2.5 in the Struts plugin. Fix RestTemplate plugin ClassCastException in the Async call. Add Finagle plugin. Add test cases of H2 and struts. Add Armeria 0.98 plugin. Fix ElasticSearch plugin bug. Fix EHCache plugin bug. Fix a potential I/O leak. Support Oracle SID mode. Update Byte-buddy core. Performance tuning: replace AtomicInteger with AtomicIntegerFieldUpdater. Add AVRO plugin. Update to JDK 1.8 Optimize the ignore plugin. Enhance the gRPC plugin. Add Kotlin Coroutine plugin. Support HTTP parameter collection in Tomcat and SpringMVC plugin. Add @Tag annotation in the application toolkit. Move Lettuce into the default plugin list. Move Webflux into the default plugin list. Add HttpClient 3.x plugin.  OAP-Backend  Support InfluxDB as a new storage option. Add selector in the application.yml. Make the provider activation more flexible through System ENV. Support sub-topology map query. Support gRPC SSL. Support HTTP protocol for agent. Support Nginx LUA agent. Support skip the instance relationship analysis if some agents doesn\u0026rsquo;t have upstream address, currently for LUA agent. Support metrics entity name in the storage. Optional, default OFF. Merge the HOUR and DAY metrics into MINUTE in the ElasticSearch storage implementation. Reduce the payload for ElasticSearch server. Support change detection mechanism in DCS. Support Daily step in the ElasticSearch storage implementation for low traffic system. Provide profile export tool. Support alarm gRPC hook. Fix PHP language doesn\u0026rsquo;t show up on the instance page. Add more comments in the source codes. Add a new metrics type, multiple linears. Fix thread concurrency issue in the alarm core.  UI  Support custom topology definition.  Document  Add FAQ about python2 command required in the compiling. Add doc about new e2e framework. Add doc about the new profile feature. Powered-by page updated.  All issues and pull requests are here\n","title":"7.0.0","url":"/docs/main/next/en/changes/changes-7.0.0/"},{"content":"7.0.0 Project  SkyWalking discards the supports of JDK 1.6 and 1.7 on the java agent side. The minimal requirement of JDK is JDK8. Support method performance profile. Provide new E2E test framework. Remove AppVeyor from the CI, use GitHub action only. Provide new plugin test tool. Don\u0026rsquo;t support SkyWalking v5 agent in-wire and out-wire protocol. v6 is required.  Java Agent  Add lazy injection API in the agent core. Support Servlet 2.5 in the Struts plugin. Fix RestTemplate plugin ClassCastException in the Async call. Add Finagle plugin. Add test cases of H2 and struts. Add Armeria 0.98 plugin. Fix ElasticSearch plugin bug. Fix EHCache plugin bug. Fix a potential I/O leak. Support Oracle SID mode. Update Byte-buddy core. Performance tuning: replace AtomicInteger with AtomicIntegerFieldUpdater. Add AVRO plugin. Update to JDK 1.8 Optimize the ignore plugin. Enhance the gRPC plugin. Add Kotlin Coroutine plugin. Support HTTP parameter collection in Tomcat and SpringMVC plugin. Add @Tag annotation in the application toolkit. Move Lettuce into the default plugin list. Move Webflux into the default plugin list. Add HttpClient 3.x plugin.  OAP-Backend  Support InfluxDB as a new storage option. Add selector in the application.yml. Make the provider activation more flexible through System ENV. Support sub-topology map query. Support gRPC SSL. Support HTTP protocol for agent. Support Nginx LUA agent. Support skip the instance relationship analysis if some agents doesn\u0026rsquo;t have upstream address, currently for LUA agent. Support metrics entity name in the storage. Optional, default OFF. Merge the HOUR and DAY metrics into MINUTE in the ElasticSearch storage implementation. Reduce the payload for ElasticSearch server. Support change detection mechanism in DCS. Support Daily step in the ElasticSearch storage implementation for low traffic system. Provide profile export tool. Support alarm gRPC hook. Fix PHP language doesn\u0026rsquo;t show up on the instance page. Add more comments in the source codes. Add a new metrics type, multiple linears. Fix thread concurrency issue in the alarm core.  UI  Support custom topology definition.  Document  Add FAQ about python2 command required in the compiling. Add doc about new e2e framework. Add doc about the new profile feature. Powered-by page updated.  All issues and pull requests are here\n","title":"7.0.0","url":"/docs/main/v9.1.0/en/changes/changes-7.0.0/"},{"content":"7.0.0 Project  SkyWalking discards the supports of JDK 1.6 and 1.7 on the java agent side. The minimal requirement of JDK is JDK8. Support method performance profile. Provide new E2E test framework. Remove AppVeyor from the CI, use GitHub action only. Provide new plugin test tool. Don\u0026rsquo;t support SkyWalking v5 agent in-wire and out-wire protocol. v6 is required.  Java Agent  Add lazy injection API in the agent core. Support Servlet 2.5 in the Struts plugin. Fix RestTemplate plugin ClassCastException in the Async call. Add Finagle plugin. Add test cases of H2 and struts. Add Armeria 0.98 plugin. Fix ElasticSearch plugin bug. Fix EHCache plugin bug. Fix a potential I/O leak. Support Oracle SID mode. Update Byte-buddy core. Performance tuning: replace AtomicInteger with AtomicIntegerFieldUpdater. Add AVRO plugin. Update to JDK 1.8 Optimize the ignore plugin. Enhance the gRPC plugin. Add Kotlin Coroutine plugin. Support HTTP parameter collection in Tomcat and SpringMVC plugin. Add @Tag annotation in the application toolkit. Move Lettuce into the default plugin list. Move Webflux into the default plugin list. Add HttpClient 3.x plugin.  OAP-Backend  Support InfluxDB as a new storage option. Add selector in the application.yml. Make the provider activation more flexible through System ENV. Support sub-topology map query. Support gRPC SSL. Support HTTP protocol for agent. Support Nginx LUA agent. Support skip the instance relationship analysis if some agents doesn\u0026rsquo;t have upstream address, currently for LUA agent. Support metrics entity name in the storage. Optional, default OFF. Merge the HOUR and DAY metrics into MINUTE in the ElasticSearch storage implementation. Reduce the payload for ElasticSearch server. Support change detection mechanism in DCS. Support Daily step in the ElasticSearch storage implementation for low traffic system. Provide profile export tool. Support alarm gRPC hook. Fix PHP language doesn\u0026rsquo;t show up on the instance page. Add more comments in the source codes. Add a new metrics type, multiple linears. Fix thread concurrency issue in the alarm core.  UI  Support custom topology definition.  Document  Add FAQ about python2 command required in the compiling. Add doc about new e2e framework. Add doc about the new profile feature. Powered-by page updated.  All issues and pull requests are here\n","title":"7.0.0","url":"/docs/main/v9.2.0/en/changes/changes-7.0.0/"},{"content":"7.0.0 Project  SkyWalking discards the supports of JDK 1.6 and 1.7 on the java agent side. The minimal requirement of JDK is JDK8. Support method performance profile. Provide new E2E test framework. Remove AppVeyor from the CI, use GitHub action only. Provide new plugin test tool. Don\u0026rsquo;t support SkyWalking v5 agent in-wire and out-wire protocol. v6 is required.  Java Agent  Add lazy injection API in the agent core. Support Servlet 2.5 in the Struts plugin. Fix RestTemplate plugin ClassCastException in the Async call. Add Finagle plugin. Add test cases of H2 and struts. Add Armeria 0.98 plugin. Fix ElasticSearch plugin bug. Fix EHCache plugin bug. Fix a potential I/O leak. Support Oracle SID mode. Update Byte-buddy core. Performance tuning: replace AtomicInteger with AtomicIntegerFieldUpdater. Add AVRO plugin. Update to JDK 1.8 Optimize the ignore plugin. Enhance the gRPC plugin. Add Kotlin Coroutine plugin. Support HTTP parameter collection in Tomcat and SpringMVC plugin. Add @Tag annotation in the application toolkit. Move Lettuce into the default plugin list. Move Webflux into the default plugin list. Add HttpClient 3.x plugin.  OAP-Backend  Support InfluxDB as a new storage option. Add selector in the application.yml. Make the provider activation more flexible through System ENV. Support sub-topology map query. Support gRPC SSL. Support HTTP protocol for agent. Support Nginx LUA agent. Support skip the instance relationship analysis if some agents doesn\u0026rsquo;t have upstream address, currently for LUA agent. Support metrics entity name in the storage. Optional, default OFF. Merge the HOUR and DAY metrics into MINUTE in the ElasticSearch storage implementation. Reduce the payload for ElasticSearch server. Support change detection mechanism in DCS. Support Daily step in the ElasticSearch storage implementation for low traffic system. Provide profile export tool. Support alarm gRPC hook. Fix PHP language doesn\u0026rsquo;t show up on the instance page. Add more comments in the source codes. Add a new metrics type, multiple linears. Fix thread concurrency issue in the alarm core.  UI  Support custom topology definition.  Document  Add FAQ about python2 command required in the compiling. Add doc about new e2e framework. Add doc about the new profile feature. Powered-by page updated.  All issues and pull requests are here\n","title":"7.0.0","url":"/docs/main/v9.3.0/en/changes/changes-7.0.0/"},{"content":"7.0.0 Project  SkyWalking discards the supports of JDK 1.6 and 1.7 on the java agent side. The minimal requirement of JDK is JDK8. Support method performance profile. Provide new E2E test framework. Remove AppVeyor from the CI, use GitHub action only. Provide new plugin test tool. Don\u0026rsquo;t support SkyWalking v5 agent in-wire and out-wire protocol. v6 is required.  Java Agent  Add lazy injection API in the agent core. Support Servlet 2.5 in the Struts plugin. Fix RestTemplate plugin ClassCastException in the Async call. Add Finagle plugin. Add test cases of H2 and struts. Add Armeria 0.98 plugin. Fix ElasticSearch plugin bug. Fix EHCache plugin bug. Fix a potential I/O leak. Support Oracle SID mode. Update Byte-buddy core. Performance tuning: replace AtomicInteger with AtomicIntegerFieldUpdater. Add AVRO plugin. Update to JDK 1.8 Optimize the ignore plugin. Enhance the gRPC plugin. Add Kotlin Coroutine plugin. Support HTTP parameter collection in Tomcat and SpringMVC plugin. Add @Tag annotation in the application toolkit. Move Lettuce into the default plugin list. Move Webflux into the default plugin list. Add HttpClient 3.x plugin.  OAP-Backend  Support InfluxDB as a new storage option. Add selector in the application.yml. Make the provider activation more flexible through System ENV. Support sub-topology map query. Support gRPC SSL. Support HTTP protocol for agent. Support Nginx LUA agent. Support skip the instance relationship analysis if some agents doesn\u0026rsquo;t have upstream address, currently for LUA agent. Support metrics entity name in the storage. Optional, default OFF. Merge the HOUR and DAY metrics into MINUTE in the ElasticSearch storage implementation. Reduce the payload for ElasticSearch server. Support change detection mechanism in DCS. Support Daily step in the ElasticSearch storage implementation for low traffic system. Provide profile export tool. Support alarm gRPC hook. Fix PHP language doesn\u0026rsquo;t show up on the instance page. Add more comments in the source codes. Add a new metrics type, multiple linears. Fix thread concurrency issue in the alarm core.  UI  Support custom topology definition.  Document  Add FAQ about python2 command required in the compiling. Add doc about new e2e framework. Add doc about the new profile feature. Powered-by page updated.  All issues and pull requests are here\n","title":"7.0.0","url":"/docs/main/v9.4.0/en/changes/changes-7.0.0/"},{"content":"7.0.0 Project  SkyWalking discards the supports of JDK 1.6 and 1.7 on the java agent side. The minimal requirement of JDK is JDK8. Support method performance profile. Provide new E2E test framework. Remove AppVeyor from the CI, use GitHub action only. Provide new plugin test tool. Don\u0026rsquo;t support SkyWalking v5 agent in-wire and out-wire protocol. v6 is required.  Java Agent  Add lazy injection API in the agent core. Support Servlet 2.5 in the Struts plugin. Fix RestTemplate plugin ClassCastException in the Async call. Add Finagle plugin. Add test cases of H2 and struts. Add Armeria 0.98 plugin. Fix ElasticSearch plugin bug. Fix EHCache plugin bug. Fix a potential I/O leak. Support Oracle SID mode. Update Byte-buddy core. Performance tuning: replace AtomicInteger with AtomicIntegerFieldUpdater. Add AVRO plugin. Update to JDK 1.8 Optimize the ignore plugin. Enhance the gRPC plugin. Add Kotlin Coroutine plugin. Support HTTP parameter collection in Tomcat and SpringMVC plugin. Add @Tag annotation in the application toolkit. Move Lettuce into the default plugin list. Move Webflux into the default plugin list. Add HttpClient 3.x plugin.  OAP-Backend  Support InfluxDB as a new storage option. Add selector in the application.yml. Make the provider activation more flexible through System ENV. Support sub-topology map query. Support gRPC SSL. Support HTTP protocol for agent. Support Nginx LUA agent. Support skip the instance relationship analysis if some agents doesn\u0026rsquo;t have upstream address, currently for LUA agent. Support metrics entity name in the storage. Optional, default OFF. Merge the HOUR and DAY metrics into MINUTE in the ElasticSearch storage implementation. Reduce the payload for ElasticSearch server. Support change detection mechanism in DCS. Support Daily step in the ElasticSearch storage implementation for low traffic system. Provide profile export tool. Support alarm gRPC hook. Fix PHP language doesn\u0026rsquo;t show up on the instance page. Add more comments in the source codes. Add a new metrics type, multiple linears. Fix thread concurrency issue in the alarm core.  UI  Support custom topology definition.  Document  Add FAQ about python2 command required in the compiling. Add doc about new e2e framework. Add doc about the new profile feature. Powered-by page updated.  All issues and pull requests are here\n","title":"7.0.0","url":"/docs/main/v9.5.0/en/changes/changes-7.0.0/"},{"content":"7.0.0 Project  SkyWalking discards the supports of JDK 1.6 and 1.7 on the java agent side. The minimal requirement of JDK is JDK8. Support method performance profile. Provide new E2E test framework. Remove AppVeyor from the CI, use GitHub action only. Provide new plugin test tool. Don\u0026rsquo;t support SkyWalking v5 agent in-wire and out-wire protocol. v6 is required.  Java Agent  Add lazy injection API in the agent core. Support Servlet 2.5 in the Struts plugin. Fix RestTemplate plugin ClassCastException in the Async call. Add Finagle plugin. Add test cases of H2 and struts. Add Armeria 0.98 plugin. Fix ElasticSearch plugin bug. Fix EHCache plugin bug. Fix a potential I/O leak. Support Oracle SID mode. Update Byte-buddy core. Performance tuning: replace AtomicInteger with AtomicIntegerFieldUpdater. Add AVRO plugin. Update to JDK 1.8 Optimize the ignore plugin. Enhance the gRPC plugin. Add Kotlin Coroutine plugin. Support HTTP parameter collection in Tomcat and SpringMVC plugin. Add @Tag annotation in the application toolkit. Move Lettuce into the default plugin list. Move Webflux into the default plugin list. Add HttpClient 3.x plugin.  OAP-Backend  Support InfluxDB as a new storage option. Add selector in the application.yml. Make the provider activation more flexible through System ENV. Support sub-topology map query. Support gRPC SSL. Support HTTP protocol for agent. Support Nginx LUA agent. Support skip the instance relationship analysis if some agents doesn\u0026rsquo;t have upstream address, currently for LUA agent. Support metrics entity name in the storage. Optional, default OFF. Merge the HOUR and DAY metrics into MINUTE in the ElasticSearch storage implementation. Reduce the payload for ElasticSearch server. Support change detection mechanism in DCS. Support Daily step in the ElasticSearch storage implementation for low traffic system. Provide profile export tool. Support alarm gRPC hook. Fix PHP language doesn\u0026rsquo;t show up on the instance page. Add more comments in the source codes. Add a new metrics type, multiple linears. Fix thread concurrency issue in the alarm core.  UI  Support custom topology definition.  Document  Add FAQ about python2 command required in the compiling. Add doc about new e2e framework. Add doc about the new profile feature. Powered-by page updated.  All issues and pull requests are here\n","title":"7.0.0","url":"/docs/main/v9.6.0/en/changes/changes-7.0.0/"},{"content":"7.0.0 Project  SkyWalking discards the supports of JDK 1.6 and 1.7 on the java agent side. The minimal requirement of JDK is JDK8. Support method performance profile. Provide new E2E test framework. Remove AppVeyor from the CI, use GitHub action only. Provide new plugin test tool. Don\u0026rsquo;t support SkyWalking v5 agent in-wire and out-wire protocol. v6 is required.  Java Agent  Add lazy injection API in the agent core. Support Servlet 2.5 in the Struts plugin. Fix RestTemplate plugin ClassCastException in the Async call. Add Finagle plugin. Add test cases of H2 and struts. Add Armeria 0.98 plugin. Fix ElasticSearch plugin bug. Fix EHCache plugin bug. Fix a potential I/O leak. Support Oracle SID mode. Update Byte-buddy core. Performance tuning: replace AtomicInteger with AtomicIntegerFieldUpdater. Add AVRO plugin. Update to JDK 1.8 Optimize the ignore plugin. Enhance the gRPC plugin. Add Kotlin Coroutine plugin. Support HTTP parameter collection in Tomcat and SpringMVC plugin. Add @Tag annotation in the application toolkit. Move Lettuce into the default plugin list. Move Webflux into the default plugin list. Add HttpClient 3.x plugin.  OAP-Backend  Support InfluxDB as a new storage option. Add selector in the application.yml. Make the provider activation more flexible through System ENV. Support sub-topology map query. Support gRPC SSL. Support HTTP protocol for agent. Support Nginx LUA agent. Support skip the instance relationship analysis if some agents doesn\u0026rsquo;t have upstream address, currently for LUA agent. Support metrics entity name in the storage. Optional, default OFF. Merge the HOUR and DAY metrics into MINUTE in the ElasticSearch storage implementation. Reduce the payload for ElasticSearch server. Support change detection mechanism in DCS. Support Daily step in the ElasticSearch storage implementation for low traffic system. Provide profile export tool. Support alarm gRPC hook. Fix PHP language doesn\u0026rsquo;t show up on the instance page. Add more comments in the source codes. Add a new metrics type, multiple linears. Fix thread concurrency issue in the alarm core.  UI  Support custom topology definition.  Document  Add FAQ about python2 command required in the compiling. Add doc about new e2e framework. Add doc about the new profile feature. Powered-by page updated.  All issues and pull requests are here\n","title":"7.0.0","url":"/docs/main/v9.7.0/en/changes/changes-7.0.0/"},{"content":"8.0.0 Project  v3 protocol is added and implemented. All previous releases are incompatible with 8.x releases. Service, Instance, Endpoint register mechanism and inventory storage entities are removed. New GraphQL query protocol is provided, the legacy protocol is still supported(plan to remove at the end of this year). Support Prometheus network protocol. Metrics in Prometheus format could be transferred into SkyWalking. Python agent provided. All inventory caches have been removed. Apache ShardingSphere(4.1.0, 4.1.1) agent plugin provided.  Java Agent  Add MariaDB plugin. Vert.x plugin enhancement. More cases are covered. Support v3 extension header. Fix ElasticSearch 5.x plugin TransportClient error. Support Correlation protocol v1. Fix Finagle plugin bug, in processing Noop Span. Make CommandService daemon to avoid blocking target application shutting down gracefully. Refactor spring cloud gateway plugin and support tracing spring cloud gateway 2.2.x  OAP-Backend  Support meter system for Prometheus adoption. In future releases, we will add native meter APIs and MicroMeter(Sleuth) system. Support endpoint grouping. Add SuperDataSet annotation for storage entity. Add superDatasetIndexShardsFactor in the ElasticSearch storage, to provide more shards for @SuperDataSet annotated entites. Typically TraceSegment. Support alarm settings for relationship of service, instance, and endpoint level metrics. Support alarm settings for database(conjecture node in tracing scenario). Data Model could be added in the runtime, don\u0026rsquo;t depend on the bootstrap sequence anymore. Reduce the memory cost, due to no inventory caches. No buffer files in tracing and service mesh cases. New ReadWriteSafe cache implementation. Simplify codes. Provide default way for metrics query, even the metrics doesn\u0026rsquo;t exist. New GraphQL query protocol is provided. Support the metrics type query. Set up length rule of service, instance, and endpoint. Adjust the default jks for ElasticSearch to empty. Fix Apdex function integer overflow issue. Fix profile storage issue. Fix TTL issue. Fix H2 column type bug. Add JRE 8-14 test for the backend.  UI  UI dashboard is 100% configurable to adopt new metrics definited in the backend.  Document  Add v8 upgrade document. Make the coverage accurate including UT and e2e tests. Add miss doc about collecting parameters in the profiled traces.  CVE  Fix SQL Injection vulnerability in H2/MySQL implementation. Upgrade Nacos to avoid the FastJson CVE in high frequency. Upgrade jasckson-databind to 2.9.10.  All issues and pull requests are here\n","title":"8.0.0","url":"/docs/main/latest/en/changes/changes-8.0.0/"},{"content":"8.0.0 Project  v3 protocol is added and implemented. All previous releases are incompatible with 8.x releases. Service, Instance, Endpoint register mechanism and inventory storage entities are removed. New GraphQL query protocol is provided, the legacy protocol is still supported(plan to remove at the end of this year). Support Prometheus network protocol. Metrics in Prometheus format could be transferred into SkyWalking. Python agent provided. All inventory caches have been removed. Apache ShardingSphere(4.1.0, 4.1.1) agent plugin provided.  Java Agent  Add MariaDB plugin. Vert.x plugin enhancement. More cases are covered. Support v3 extension header. Fix ElasticSearch 5.x plugin TransportClient error. Support Correlation protocol v1. Fix Finagle plugin bug, in processing Noop Span. Make CommandService daemon to avoid blocking target application shutting down gracefully. Refactor spring cloud gateway plugin and support tracing spring cloud gateway 2.2.x  OAP-Backend  Support meter system for Prometheus adoption. In future releases, we will add native meter APIs and MicroMeter(Sleuth) system. Support endpoint grouping. Add SuperDataSet annotation for storage entity. Add superDatasetIndexShardsFactor in the ElasticSearch storage, to provide more shards for @SuperDataSet annotated entites. Typically TraceSegment. Support alarm settings for relationship of service, instance, and endpoint level metrics. Support alarm settings for database(conjecture node in tracing scenario). Data Model could be added in the runtime, don\u0026rsquo;t depend on the bootstrap sequence anymore. Reduce the memory cost, due to no inventory caches. No buffer files in tracing and service mesh cases. New ReadWriteSafe cache implementation. Simplify codes. Provide default way for metrics query, even the metrics doesn\u0026rsquo;t exist. New GraphQL query protocol is provided. Support the metrics type query. Set up length rule of service, instance, and endpoint. Adjust the default jks for ElasticSearch to empty. Fix Apdex function integer overflow issue. Fix profile storage issue. Fix TTL issue. Fix H2 column type bug. Add JRE 8-14 test for the backend.  UI  UI dashboard is 100% configurable to adopt new metrics definited in the backend.  Document  Add v8 upgrade document. Make the coverage accurate including UT and e2e tests. Add miss doc about collecting parameters in the profiled traces.  CVE  Fix SQL Injection vulnerability in H2/MySQL implementation. Upgrade Nacos to avoid the FastJson CVE in high frequency. Upgrade jasckson-databind to 2.9.10.  All issues and pull requests are here\n","title":"8.0.0","url":"/docs/main/next/en/changes/changes-8.0.0/"},{"content":"8.0.0 Project  v3 protocol is added and implemented. All previous releases are incompatible with 8.x releases. Service, Instance, Endpoint register mechanism and inventory storage entities are removed. New GraphQL query protocol is provided, the legacy protocol is still supported(plan to remove at the end of this year). Support Prometheus network protocol. Metrics in Prometheus format could be transferred into SkyWalking. Python agent provided. All inventory caches have been removed. Apache ShardingSphere(4.1.0, 4.1.1) agent plugin provided.  Java Agent  Add MariaDB plugin. Vert.x plugin enhancement. More cases are covered. Support v3 extension header. Fix ElasticSearch 5.x plugin TransportClient error. Support Correlation protocol v1. Fix Finagle plugin bug, in processing Noop Span. Make CommandService daemon to avoid blocking target application shutting down gracefully. Refactor spring cloud gateway plugin and support tracing spring cloud gateway 2.2.x  OAP-Backend  Support meter system for Prometheus adoption. In future releases, we will add native meter APIs and MicroMeter(Sleuth) system. Support endpoint grouping. Add SuperDataSet annotation for storage entity. Add superDatasetIndexShardsFactor in the ElasticSearch storage, to provide more shards for @SuperDataSet annotated entites. Typically TraceSegment. Support alarm settings for relationship of service, instance, and endpoint level metrics. Support alarm settings for database(conjecture node in tracing scenario). Data Model could be added in the runtime, don\u0026rsquo;t depend on the bootstrap sequence anymore. Reduce the memory cost, due to no inventory caches. No buffer files in tracing and service mesh cases. New ReadWriteSafe cache implementation. Simplify codes. Provide default way for metrics query, even the metrics doesn\u0026rsquo;t exist. New GraphQL query protocol is provided. Support the metrics type query. Set up length rule of service, instance, and endpoint. Adjust the default jks for ElasticSearch to empty. Fix Apdex function integer overflow issue. Fix profile storage issue. Fix TTL issue. Fix H2 column type bug. Add JRE 8-14 test for the backend.  UI  UI dashboard is 100% configurable to adopt new metrics definited in the backend.  Document  Add v8 upgrade document. Make the coverage accurate including UT and e2e tests. Add miss doc about collecting parameters in the profiled traces.  CVE  Fix SQL Injection vulnerability in H2/MySQL implementation. Upgrade Nacos to avoid the FastJson CVE in high frequency. Upgrade jasckson-databind to 2.9.10.  All issues and pull requests are here\n","title":"8.0.0","url":"/docs/main/v9.1.0/en/changes/changes-8.0.0/"},{"content":"8.0.0 Project  v3 protocol is added and implemented. All previous releases are incompatible with 8.x releases. Service, Instance, Endpoint register mechanism and inventory storage entities are removed. New GraphQL query protocol is provided, the legacy protocol is still supported(plan to remove at the end of this year). Support Prometheus network protocol. Metrics in Prometheus format could be transferred into SkyWalking. Python agent provided. All inventory caches have been removed. Apache ShardingSphere(4.1.0, 4.1.1) agent plugin provided.  Java Agent  Add MariaDB plugin. Vert.x plugin enhancement. More cases are covered. Support v3 extension header. Fix ElasticSearch 5.x plugin TransportClient error. Support Correlation protocol v1. Fix Finagle plugin bug, in processing Noop Span. Make CommandService daemon to avoid blocking target application shutting down gracefully. Refactor spring cloud gateway plugin and support tracing spring cloud gateway 2.2.x  OAP-Backend  Support meter system for Prometheus adoption. In future releases, we will add native meter APIs and MicroMeter(Sleuth) system. Support endpoint grouping. Add SuperDataSet annotation for storage entity. Add superDatasetIndexShardsFactor in the ElasticSearch storage, to provide more shards for @SuperDataSet annotated entites. Typically TraceSegment. Support alarm settings for relationship of service, instance, and endpoint level metrics. Support alarm settings for database(conjecture node in tracing scenario). Data Model could be added in the runtime, don\u0026rsquo;t depend on the bootstrap sequence anymore. Reduce the memory cost, due to no inventory caches. No buffer files in tracing and service mesh cases. New ReadWriteSafe cache implementation. Simplify codes. Provide default way for metrics query, even the metrics doesn\u0026rsquo;t exist. New GraphQL query protocol is provided. Support the metrics type query. Set up length rule of service, instance, and endpoint. Adjust the default jks for ElasticSearch to empty. Fix Apdex function integer overflow issue. Fix profile storage issue. Fix TTL issue. Fix H2 column type bug. Add JRE 8-14 test for the backend.  UI  UI dashboard is 100% configurable to adopt new metrics definited in the backend.  Document  Add v8 upgrade document. Make the coverage accurate including UT and e2e tests. Add miss doc about collecting parameters in the profiled traces.  CVE  Fix SQL Injection vulnerability in H2/MySQL implementation. Upgrade Nacos to avoid the FastJson CVE in high frequency. Upgrade jasckson-databind to 2.9.10.  All issues and pull requests are here\n","title":"8.0.0","url":"/docs/main/v9.2.0/en/changes/changes-8.0.0/"},{"content":"8.0.0 Project  v3 protocol is added and implemented. All previous releases are incompatible with 8.x releases. Service, Instance, Endpoint register mechanism and inventory storage entities are removed. New GraphQL query protocol is provided, the legacy protocol is still supported(plan to remove at the end of this year). Support Prometheus network protocol. Metrics in Prometheus format could be transferred into SkyWalking. Python agent provided. All inventory caches have been removed. Apache ShardingSphere(4.1.0, 4.1.1) agent plugin provided.  Java Agent  Add MariaDB plugin. Vert.x plugin enhancement. More cases are covered. Support v3 extension header. Fix ElasticSearch 5.x plugin TransportClient error. Support Correlation protocol v1. Fix Finagle plugin bug, in processing Noop Span. Make CommandService daemon to avoid blocking target application shutting down gracefully. Refactor spring cloud gateway plugin and support tracing spring cloud gateway 2.2.x  OAP-Backend  Support meter system for Prometheus adoption. In future releases, we will add native meter APIs and MicroMeter(Sleuth) system. Support endpoint grouping. Add SuperDataSet annotation for storage entity. Add superDatasetIndexShardsFactor in the ElasticSearch storage, to provide more shards for @SuperDataSet annotated entites. Typically TraceSegment. Support alarm settings for relationship of service, instance, and endpoint level metrics. Support alarm settings for database(conjecture node in tracing scenario). Data Model could be added in the runtime, don\u0026rsquo;t depend on the bootstrap sequence anymore. Reduce the memory cost, due to no inventory caches. No buffer files in tracing and service mesh cases. New ReadWriteSafe cache implementation. Simplify codes. Provide default way for metrics query, even the metrics doesn\u0026rsquo;t exist. New GraphQL query protocol is provided. Support the metrics type query. Set up length rule of service, instance, and endpoint. Adjust the default jks for ElasticSearch to empty. Fix Apdex function integer overflow issue. Fix profile storage issue. Fix TTL issue. Fix H2 column type bug. Add JRE 8-14 test for the backend.  UI  UI dashboard is 100% configurable to adopt new metrics definited in the backend.  Document  Add v8 upgrade document. Make the coverage accurate including UT and e2e tests. Add miss doc about collecting parameters in the profiled traces.  CVE  Fix SQL Injection vulnerability in H2/MySQL implementation. Upgrade Nacos to avoid the FastJson CVE in high frequency. Upgrade jasckson-databind to 2.9.10.  All issues and pull requests are here\n","title":"8.0.0","url":"/docs/main/v9.3.0/en/changes/changes-8.0.0/"},{"content":"8.0.0 Project  v3 protocol is added and implemented. All previous releases are incompatible with 8.x releases. Service, Instance, Endpoint register mechanism and inventory storage entities are removed. New GraphQL query protocol is provided, the legacy protocol is still supported(plan to remove at the end of this year). Support Prometheus network protocol. Metrics in Prometheus format could be transferred into SkyWalking. Python agent provided. All inventory caches have been removed. Apache ShardingSphere(4.1.0, 4.1.1) agent plugin provided.  Java Agent  Add MariaDB plugin. Vert.x plugin enhancement. More cases are covered. Support v3 extension header. Fix ElasticSearch 5.x plugin TransportClient error. Support Correlation protocol v1. Fix Finagle plugin bug, in processing Noop Span. Make CommandService daemon to avoid blocking target application shutting down gracefully. Refactor spring cloud gateway plugin and support tracing spring cloud gateway 2.2.x  OAP-Backend  Support meter system for Prometheus adoption. In future releases, we will add native meter APIs and MicroMeter(Sleuth) system. Support endpoint grouping. Add SuperDataSet annotation for storage entity. Add superDatasetIndexShardsFactor in the ElasticSearch storage, to provide more shards for @SuperDataSet annotated entites. Typically TraceSegment. Support alarm settings for relationship of service, instance, and endpoint level metrics. Support alarm settings for database(conjecture node in tracing scenario). Data Model could be added in the runtime, don\u0026rsquo;t depend on the bootstrap sequence anymore. Reduce the memory cost, due to no inventory caches. No buffer files in tracing and service mesh cases. New ReadWriteSafe cache implementation. Simplify codes. Provide default way for metrics query, even the metrics doesn\u0026rsquo;t exist. New GraphQL query protocol is provided. Support the metrics type query. Set up length rule of service, instance, and endpoint. Adjust the default jks for ElasticSearch to empty. Fix Apdex function integer overflow issue. Fix profile storage issue. Fix TTL issue. Fix H2 column type bug. Add JRE 8-14 test for the backend.  UI  UI dashboard is 100% configurable to adopt new metrics definited in the backend.  Document  Add v8 upgrade document. Make the coverage accurate including UT and e2e tests. Add miss doc about collecting parameters in the profiled traces.  CVE  Fix SQL Injection vulnerability in H2/MySQL implementation. Upgrade Nacos to avoid the FastJson CVE in high frequency. Upgrade jasckson-databind to 2.9.10.  All issues and pull requests are here\n","title":"8.0.0","url":"/docs/main/v9.4.0/en/changes/changes-8.0.0/"},{"content":"8.0.0 Project  v3 protocol is added and implemented. All previous releases are incompatible with 8.x releases. Service, Instance, Endpoint register mechanism and inventory storage entities are removed. New GraphQL query protocol is provided, the legacy protocol is still supported(plan to remove at the end of this year). Support Prometheus network protocol. Metrics in Prometheus format could be transferred into SkyWalking. Python agent provided. All inventory caches have been removed. Apache ShardingSphere(4.1.0, 4.1.1) agent plugin provided.  Java Agent  Add MariaDB plugin. Vert.x plugin enhancement. More cases are covered. Support v3 extension header. Fix ElasticSearch 5.x plugin TransportClient error. Support Correlation protocol v1. Fix Finagle plugin bug, in processing Noop Span. Make CommandService daemon to avoid blocking target application shutting down gracefully. Refactor spring cloud gateway plugin and support tracing spring cloud gateway 2.2.x  OAP-Backend  Support meter system for Prometheus adoption. In future releases, we will add native meter APIs and MicroMeter(Sleuth) system. Support endpoint grouping. Add SuperDataSet annotation for storage entity. Add superDatasetIndexShardsFactor in the ElasticSearch storage, to provide more shards for @SuperDataSet annotated entites. Typically TraceSegment. Support alarm settings for relationship of service, instance, and endpoint level metrics. Support alarm settings for database(conjecture node in tracing scenario). Data Model could be added in the runtime, don\u0026rsquo;t depend on the bootstrap sequence anymore. Reduce the memory cost, due to no inventory caches. No buffer files in tracing and service mesh cases. New ReadWriteSafe cache implementation. Simplify codes. Provide default way for metrics query, even the metrics doesn\u0026rsquo;t exist. New GraphQL query protocol is provided. Support the metrics type query. Set up length rule of service, instance, and endpoint. Adjust the default jks for ElasticSearch to empty. Fix Apdex function integer overflow issue. Fix profile storage issue. Fix TTL issue. Fix H2 column type bug. Add JRE 8-14 test for the backend.  UI  UI dashboard is 100% configurable to adopt new metrics definited in the backend.  Document  Add v8 upgrade document. Make the coverage accurate including UT and e2e tests. Add miss doc about collecting parameters in the profiled traces.  CVE  Fix SQL Injection vulnerability in H2/MySQL implementation. Upgrade Nacos to avoid the FastJson CVE in high frequency. Upgrade jasckson-databind to 2.9.10.  All issues and pull requests are here\n","title":"8.0.0","url":"/docs/main/v9.5.0/en/changes/changes-8.0.0/"},{"content":"8.0.0 Project  v3 protocol is added and implemented. All previous releases are incompatible with 8.x releases. Service, Instance, Endpoint register mechanism and inventory storage entities are removed. New GraphQL query protocol is provided, the legacy protocol is still supported(plan to remove at the end of this year). Support Prometheus network protocol. Metrics in Prometheus format could be transferred into SkyWalking. Python agent provided. All inventory caches have been removed. Apache ShardingSphere(4.1.0, 4.1.1) agent plugin provided.  Java Agent  Add MariaDB plugin. Vert.x plugin enhancement. More cases are covered. Support v3 extension header. Fix ElasticSearch 5.x plugin TransportClient error. Support Correlation protocol v1. Fix Finagle plugin bug, in processing Noop Span. Make CommandService daemon to avoid blocking target application shutting down gracefully. Refactor spring cloud gateway plugin and support tracing spring cloud gateway 2.2.x  OAP-Backend  Support meter system for Prometheus adoption. In future releases, we will add native meter APIs and MicroMeter(Sleuth) system. Support endpoint grouping. Add SuperDataSet annotation for storage entity. Add superDatasetIndexShardsFactor in the ElasticSearch storage, to provide more shards for @SuperDataSet annotated entites. Typically TraceSegment. Support alarm settings for relationship of service, instance, and endpoint level metrics. Support alarm settings for database(conjecture node in tracing scenario). Data Model could be added in the runtime, don\u0026rsquo;t depend on the bootstrap sequence anymore. Reduce the memory cost, due to no inventory caches. No buffer files in tracing and service mesh cases. New ReadWriteSafe cache implementation. Simplify codes. Provide default way for metrics query, even the metrics doesn\u0026rsquo;t exist. New GraphQL query protocol is provided. Support the metrics type query. Set up length rule of service, instance, and endpoint. Adjust the default jks for ElasticSearch to empty. Fix Apdex function integer overflow issue. Fix profile storage issue. Fix TTL issue. Fix H2 column type bug. Add JRE 8-14 test for the backend.  UI  UI dashboard is 100% configurable to adopt new metrics definited in the backend.  Document  Add v8 upgrade document. Make the coverage accurate including UT and e2e tests. Add miss doc about collecting parameters in the profiled traces.  CVE  Fix SQL Injection vulnerability in H2/MySQL implementation. Upgrade Nacos to avoid the FastJson CVE in high frequency. Upgrade jasckson-databind to 2.9.10.  All issues and pull requests are here\n","title":"8.0.0","url":"/docs/main/v9.6.0/en/changes/changes-8.0.0/"},{"content":"8.0.0 Project  v3 protocol is added and implemented. All previous releases are incompatible with 8.x releases. Service, Instance, Endpoint register mechanism and inventory storage entities are removed. New GraphQL query protocol is provided, the legacy protocol is still supported(plan to remove at the end of this year). Support Prometheus network protocol. Metrics in Prometheus format could be transferred into SkyWalking. Python agent provided. All inventory caches have been removed. Apache ShardingSphere(4.1.0, 4.1.1) agent plugin provided.  Java Agent  Add MariaDB plugin. Vert.x plugin enhancement. More cases are covered. Support v3 extension header. Fix ElasticSearch 5.x plugin TransportClient error. Support Correlation protocol v1. Fix Finagle plugin bug, in processing Noop Span. Make CommandService daemon to avoid blocking target application shutting down gracefully. Refactor spring cloud gateway plugin and support tracing spring cloud gateway 2.2.x  OAP-Backend  Support meter system for Prometheus adoption. In future releases, we will add native meter APIs and MicroMeter(Sleuth) system. Support endpoint grouping. Add SuperDataSet annotation for storage entity. Add superDatasetIndexShardsFactor in the ElasticSearch storage, to provide more shards for @SuperDataSet annotated entites. Typically TraceSegment. Support alarm settings for relationship of service, instance, and endpoint level metrics. Support alarm settings for database(conjecture node in tracing scenario). Data Model could be added in the runtime, don\u0026rsquo;t depend on the bootstrap sequence anymore. Reduce the memory cost, due to no inventory caches. No buffer files in tracing and service mesh cases. New ReadWriteSafe cache implementation. Simplify codes. Provide default way for metrics query, even the metrics doesn\u0026rsquo;t exist. New GraphQL query protocol is provided. Support the metrics type query. Set up length rule of service, instance, and endpoint. Adjust the default jks for ElasticSearch to empty. Fix Apdex function integer overflow issue. Fix profile storage issue. Fix TTL issue. Fix H2 column type bug. Add JRE 8-14 test for the backend.  UI  UI dashboard is 100% configurable to adopt new metrics definited in the backend.  Document  Add v8 upgrade document. Make the coverage accurate including UT and e2e tests. Add miss doc about collecting parameters in the profiled traces.  CVE  Fix SQL Injection vulnerability in H2/MySQL implementation. Upgrade Nacos to avoid the FastJson CVE in high frequency. Upgrade jasckson-databind to 2.9.10.  All issues and pull requests are here\n","title":"8.0.0","url":"/docs/main/v9.7.0/en/changes/changes-8.0.0/"},{"content":"8.0.1 OAP-Backend  Fix no-init mode is not working in ElasticSearch storage.  ","title":"8.0.1","url":"/docs/main/latest/en/changes/changes-8.0.1/"},{"content":"8.0.1 OAP-Backend  Fix no-init mode is not working in ElasticSearch storage.  ","title":"8.0.1","url":"/docs/main/next/en/changes/changes-8.0.1/"},{"content":"8.0.1 OAP-Backend  Fix no-init mode is not working in ElasticSearch storage.  ","title":"8.0.1","url":"/docs/main/v9.1.0/en/changes/changes-8.0.1/"},{"content":"8.0.1 OAP-Backend  Fix no-init mode is not working in ElasticSearch storage.  ","title":"8.0.1","url":"/docs/main/v9.2.0/en/changes/changes-8.0.1/"},{"content":"8.0.1 OAP-Backend  Fix no-init mode is not working in ElasticSearch storage.  ","title":"8.0.1","url":"/docs/main/v9.3.0/en/changes/changes-8.0.1/"},{"content":"8.0.1 OAP-Backend  Fix no-init mode is not working in ElasticSearch storage.  ","title":"8.0.1","url":"/docs/main/v9.4.0/en/changes/changes-8.0.1/"},{"content":"8.0.1 OAP-Backend  Fix no-init mode is not working in ElasticSearch storage.  ","title":"8.0.1","url":"/docs/main/v9.5.0/en/changes/changes-8.0.1/"},{"content":"8.0.1 OAP-Backend  Fix no-init mode is not working in ElasticSearch storage.  ","title":"8.0.1","url":"/docs/main/v9.6.0/en/changes/changes-8.0.1/"},{"content":"8.0.1 OAP-Backend  Fix no-init mode is not working in ElasticSearch storage.  ","title":"8.0.1","url":"/docs/main/v9.7.0/en/changes/changes-8.0.1/"},{"content":"8.1.0 Project  Support Kafka as an optional trace, JVM metrics, profiling snapshots and meter system data transport layer. Support Meter system, including the native metrics APIs and the Spring Sleuth adoption. Support JVM thread metrics.  Java Agent  [Core] Fix the concurrency access bug in the Concurrency ClassLoader Case. [Core] Separate the config of the plugins from the core level. [Core] Support instrumented class cached in memory or file, to be compatible with other agents, such as Arthas. Add logic endpoint concept. Could analysis any span or tags flagged by the logic endpoint. Add Spring annotation component name for UI visualization only. Add support to trace Call procedures in MySQL plugin. Support GraphQL plugin. Support Quasar fiber plugin. Support InfluxDB java client plugin. Support brpc java plugin Support ConsoleAppender in the logback v1 plugin. Enhance vert.x endpoint names. Optimize the code to prevent mongo statements from being too long. Fix WebFlux plugin concurrency access bug. Fix ShardingSphere plugins internal conflicts. Fix duplicated Spring MVC endpoint. Fix lettuce plugin sometimes trace doesn‘t show span layer. Fix @Tag returnedObject bug.  OAP-Backend  Support Jetty Server advanced configurations. Support label based filter in the prometheus fetcher and OpenCensus receiver. Support using k8s configmap as the configuration center. Support OAP health check, and storage module health check. Support sampling rate in the dynamic configuration. Add endpoint_relation_sla and endpoint_relation_percentile for endpoint relationship metrics. Add components for Python plugins, including Kafka, Tornado, Redis, Django, PyMysql. Add components for Golang SDK. Add Nacos 1.3.1 back as an optional cluster coordinator and dynamic configuration center. Enhance the metrics query for ElasticSearch implementation to increase the stability. Reduce the length of storage entity names in the self-observability for MySQL and TiDB storage. Fix labels are missing in Prometheus analysis context. Fix column length issue in MySQL/TiDB storage. Fix no data in 2nd level aggregation in self-observability. Fix searchService bug in ES implementation. Fix wrong validation of endpoint relation entity query. Fix the bug caused by the OAL debug flag. Fix endpoint dependency bug in MQ and uninstrumented proxy cases. Fix time bucket conversion issue in the InfluxDB storage implementation. Update k8s client to 8.0.0  UI  Support endpoint dependency graph. Support x-scroll of trace/profile page Fix database selector issue. Add the bar chart in the UI templates.  Document  Update the user logo wall. Add backend configuration vocabulary document. Add agent installation doc for Tomcat9 on Windows. Add istioctl ALS commands for the document. Fix TTL documentation. Add FAQ doc about thread instrumentation.  CVE  Fix fuzzy query sql injection in the MySQL/TiDB storage.  All issues and pull requests are here\n","title":"8.1.0","url":"/docs/main/latest/en/changes/changes-8.1.0/"},{"content":"8.1.0 Project  Support Kafka as an optional trace, JVM metrics, profiling snapshots and meter system data transport layer. Support Meter system, including the native metrics APIs and the Spring Sleuth adoption. Support JVM thread metrics.  Java Agent  [Core] Fix the concurrency access bug in the Concurrency ClassLoader Case. [Core] Separate the config of the plugins from the core level. [Core] Support instrumented class cached in memory or file, to be compatible with other agents, such as Arthas. Add logic endpoint concept. Could analysis any span or tags flagged by the logic endpoint. Add Spring annotation component name for UI visualization only. Add support to trace Call procedures in MySQL plugin. Support GraphQL plugin. Support Quasar fiber plugin. Support InfluxDB java client plugin. Support brpc java plugin Support ConsoleAppender in the logback v1 plugin. Enhance vert.x endpoint names. Optimize the code to prevent mongo statements from being too long. Fix WebFlux plugin concurrency access bug. Fix ShardingSphere plugins internal conflicts. Fix duplicated Spring MVC endpoint. Fix lettuce plugin sometimes trace doesn‘t show span layer. Fix @Tag returnedObject bug.  OAP-Backend  Support Jetty Server advanced configurations. Support label based filter in the prometheus fetcher and OpenCensus receiver. Support using k8s configmap as the configuration center. Support OAP health check, and storage module health check. Support sampling rate in the dynamic configuration. Add endpoint_relation_sla and endpoint_relation_percentile for endpoint relationship metrics. Add components for Python plugins, including Kafka, Tornado, Redis, Django, PyMysql. Add components for Golang SDK. Add Nacos 1.3.1 back as an optional cluster coordinator and dynamic configuration center. Enhance the metrics query for ElasticSearch implementation to increase the stability. Reduce the length of storage entity names in the self-observability for MySQL and TiDB storage. Fix labels are missing in Prometheus analysis context. Fix column length issue in MySQL/TiDB storage. Fix no data in 2nd level aggregation in self-observability. Fix searchService bug in ES implementation. Fix wrong validation of endpoint relation entity query. Fix the bug caused by the OAL debug flag. Fix endpoint dependency bug in MQ and uninstrumented proxy cases. Fix time bucket conversion issue in the InfluxDB storage implementation. Update k8s client to 8.0.0  UI  Support endpoint dependency graph. Support x-scroll of trace/profile page Fix database selector issue. Add the bar chart in the UI templates.  Document  Update the user logo wall. Add backend configuration vocabulary document. Add agent installation doc for Tomcat9 on Windows. Add istioctl ALS commands for the document. Fix TTL documentation. Add FAQ doc about thread instrumentation.  CVE  Fix fuzzy query sql injection in the MySQL/TiDB storage.  All issues and pull requests are here\n","title":"8.1.0","url":"/docs/main/next/en/changes/changes-8.1.0/"},{"content":"8.1.0 Project  Support Kafka as an optional trace, JVM metrics, profiling snapshots and meter system data transport layer. Support Meter system, including the native metrics APIs and the Spring Sleuth adoption. Support JVM thread metrics.  Java Agent  [Core] Fix the concurrency access bug in the Concurrency ClassLoader Case. [Core] Separate the config of the plugins from the core level. [Core] Support instrumented class cached in memory or file, to be compatible with other agents, such as Arthas. Add logic endpoint concept. Could analysis any span or tags flagged by the logic endpoint. Add Spring annotation component name for UI visualization only. Add support to trace Call procedures in MySQL plugin. Support GraphQL plugin. Support Quasar fiber plugin. Support InfluxDB java client plugin. Support brpc java plugin Support ConsoleAppender in the logback v1 plugin. Enhance vert.x endpoint names. Optimize the code to prevent mongo statements from being too long. Fix WebFlux plugin concurrency access bug. Fix ShardingSphere plugins internal conflicts. Fix duplicated Spring MVC endpoint. Fix lettuce plugin sometimes trace doesn‘t show span layer. Fix @Tag returnedObject bug.  OAP-Backend  Support Jetty Server advanced configurations. Support label based filter in the prometheus fetcher and OpenCensus receiver. Support using k8s configmap as the configuration center. Support OAP health check, and storage module health check. Support sampling rate in the dynamic configuration. Add endpoint_relation_sla and endpoint_relation_percentile for endpoint relationship metrics. Add components for Python plugins, including Kafka, Tornado, Redis, Django, PyMysql. Add components for Golang SDK. Add Nacos 1.3.1 back as an optional cluster coordinator and dynamic configuration center. Enhance the metrics query for ElasticSearch implementation to increase the stability. Reduce the length of storage entity names in the self-observability for MySQL and TiDB storage. Fix labels are missing in Prometheus analysis context. Fix column length issue in MySQL/TiDB storage. Fix no data in 2nd level aggregation in self-observability. Fix searchService bug in ES implementation. Fix wrong validation of endpoint relation entity query. Fix the bug caused by the OAL debug flag. Fix endpoint dependency bug in MQ and uninstrumented proxy cases. Fix time bucket conversion issue in the InfluxDB storage implementation. Update k8s client to 8.0.0  UI  Support endpoint dependency graph. Support x-scroll of trace/profile page Fix database selector issue. Add the bar chart in the UI templates.  Document  Update the user logo wall. Add backend configuration vocabulary document. Add agent installation doc for Tomcat9 on Windows. Add istioctl ALS commands for the document. Fix TTL documentation. Add FAQ doc about thread instrumentation.  CVE  Fix fuzzy query sql injection in the MySQL/TiDB storage.  All issues and pull requests are here\n","title":"8.1.0","url":"/docs/main/v9.1.0/en/changes/changes-8.1.0/"},{"content":"8.1.0 Project  Support Kafka as an optional trace, JVM metrics, profiling snapshots and meter system data transport layer. Support Meter system, including the native metrics APIs and the Spring Sleuth adoption. Support JVM thread metrics.  Java Agent  [Core] Fix the concurrency access bug in the Concurrency ClassLoader Case. [Core] Separate the config of the plugins from the core level. [Core] Support instrumented class cached in memory or file, to be compatible with other agents, such as Arthas. Add logic endpoint concept. Could analysis any span or tags flagged by the logic endpoint. Add Spring annotation component name for UI visualization only. Add support to trace Call procedures in MySQL plugin. Support GraphQL plugin. Support Quasar fiber plugin. Support InfluxDB java client plugin. Support brpc java plugin Support ConsoleAppender in the logback v1 plugin. Enhance vert.x endpoint names. Optimize the code to prevent mongo statements from being too long. Fix WebFlux plugin concurrency access bug. Fix ShardingSphere plugins internal conflicts. Fix duplicated Spring MVC endpoint. Fix lettuce plugin sometimes trace doesn‘t show span layer. Fix @Tag returnedObject bug.  OAP-Backend  Support Jetty Server advanced configurations. Support label based filter in the prometheus fetcher and OpenCensus receiver. Support using k8s configmap as the configuration center. Support OAP health check, and storage module health check. Support sampling rate in the dynamic configuration. Add endpoint_relation_sla and endpoint_relation_percentile for endpoint relationship metrics. Add components for Python plugins, including Kafka, Tornado, Redis, Django, PyMysql. Add components for Golang SDK. Add Nacos 1.3.1 back as an optional cluster coordinator and dynamic configuration center. Enhance the metrics query for ElasticSearch implementation to increase the stability. Reduce the length of storage entity names in the self-observability for MySQL and TiDB storage. Fix labels are missing in Prometheus analysis context. Fix column length issue in MySQL/TiDB storage. Fix no data in 2nd level aggregation in self-observability. Fix searchService bug in ES implementation. Fix wrong validation of endpoint relation entity query. Fix the bug caused by the OAL debug flag. Fix endpoint dependency bug in MQ and uninstrumented proxy cases. Fix time bucket conversion issue in the InfluxDB storage implementation. Update k8s client to 8.0.0  UI  Support endpoint dependency graph. Support x-scroll of trace/profile page Fix database selector issue. Add the bar chart in the UI templates.  Document  Update the user logo wall. Add backend configuration vocabulary document. Add agent installation doc for Tomcat9 on Windows. Add istioctl ALS commands for the document. Fix TTL documentation. Add FAQ doc about thread instrumentation.  CVE  Fix fuzzy query sql injection in the MySQL/TiDB storage.  All issues and pull requests are here\n","title":"8.1.0","url":"/docs/main/v9.2.0/en/changes/changes-8.1.0/"},{"content":"8.1.0 Project  Support Kafka as an optional trace, JVM metrics, profiling snapshots and meter system data transport layer. Support Meter system, including the native metrics APIs and the Spring Sleuth adoption. Support JVM thread metrics.  Java Agent  [Core] Fix the concurrency access bug in the Concurrency ClassLoader Case. [Core] Separate the config of the plugins from the core level. [Core] Support instrumented class cached in memory or file, to be compatible with other agents, such as Arthas. Add logic endpoint concept. Could analysis any span or tags flagged by the logic endpoint. Add Spring annotation component name for UI visualization only. Add support to trace Call procedures in MySQL plugin. Support GraphQL plugin. Support Quasar fiber plugin. Support InfluxDB java client plugin. Support brpc java plugin Support ConsoleAppender in the logback v1 plugin. Enhance vert.x endpoint names. Optimize the code to prevent mongo statements from being too long. Fix WebFlux plugin concurrency access bug. Fix ShardingSphere plugins internal conflicts. Fix duplicated Spring MVC endpoint. Fix lettuce plugin sometimes trace doesn‘t show span layer. Fix @Tag returnedObject bug.  OAP-Backend  Support Jetty Server advanced configurations. Support label based filter in the prometheus fetcher and OpenCensus receiver. Support using k8s configmap as the configuration center. Support OAP health check, and storage module health check. Support sampling rate in the dynamic configuration. Add endpoint_relation_sla and endpoint_relation_percentile for endpoint relationship metrics. Add components for Python plugins, including Kafka, Tornado, Redis, Django, PyMysql. Add components for Golang SDK. Add Nacos 1.3.1 back as an optional cluster coordinator and dynamic configuration center. Enhance the metrics query for ElasticSearch implementation to increase the stability. Reduce the length of storage entity names in the self-observability for MySQL and TiDB storage. Fix labels are missing in Prometheus analysis context. Fix column length issue in MySQL/TiDB storage. Fix no data in 2nd level aggregation in self-observability. Fix searchService bug in ES implementation. Fix wrong validation of endpoint relation entity query. Fix the bug caused by the OAL debug flag. Fix endpoint dependency bug in MQ and uninstrumented proxy cases. Fix time bucket conversion issue in the InfluxDB storage implementation. Update k8s client to 8.0.0  UI  Support endpoint dependency graph. Support x-scroll of trace/profile page Fix database selector issue. Add the bar chart in the UI templates.  Document  Update the user logo wall. Add backend configuration vocabulary document. Add agent installation doc for Tomcat9 on Windows. Add istioctl ALS commands for the document. Fix TTL documentation. Add FAQ doc about thread instrumentation.  CVE  Fix fuzzy query sql injection in the MySQL/TiDB storage.  All issues and pull requests are here\n","title":"8.1.0","url":"/docs/main/v9.3.0/en/changes/changes-8.1.0/"},{"content":"8.1.0 Project  Support Kafka as an optional trace, JVM metrics, profiling snapshots and meter system data transport layer. Support Meter system, including the native metrics APIs and the Spring Sleuth adoption. Support JVM thread metrics.  Java Agent  [Core] Fix the concurrency access bug in the Concurrency ClassLoader Case. [Core] Separate the config of the plugins from the core level. [Core] Support instrumented class cached in memory or file, to be compatible with other agents, such as Arthas. Add logic endpoint concept. Could analysis any span or tags flagged by the logic endpoint. Add Spring annotation component name for UI visualization only. Add support to trace Call procedures in MySQL plugin. Support GraphQL plugin. Support Quasar fiber plugin. Support InfluxDB java client plugin. Support brpc java plugin Support ConsoleAppender in the logback v1 plugin. Enhance vert.x endpoint names. Optimize the code to prevent mongo statements from being too long. Fix WebFlux plugin concurrency access bug. Fix ShardingSphere plugins internal conflicts. Fix duplicated Spring MVC endpoint. Fix lettuce plugin sometimes trace doesn‘t show span layer. Fix @Tag returnedObject bug.  OAP-Backend  Support Jetty Server advanced configurations. Support label based filter in the prometheus fetcher and OpenCensus receiver. Support using k8s configmap as the configuration center. Support OAP health check, and storage module health check. Support sampling rate in the dynamic configuration. Add endpoint_relation_sla and endpoint_relation_percentile for endpoint relationship metrics. Add components for Python plugins, including Kafka, Tornado, Redis, Django, PyMysql. Add components for Golang SDK. Add Nacos 1.3.1 back as an optional cluster coordinator and dynamic configuration center. Enhance the metrics query for ElasticSearch implementation to increase the stability. Reduce the length of storage entity names in the self-observability for MySQL and TiDB storage. Fix labels are missing in Prometheus analysis context. Fix column length issue in MySQL/TiDB storage. Fix no data in 2nd level aggregation in self-observability. Fix searchService bug in ES implementation. Fix wrong validation of endpoint relation entity query. Fix the bug caused by the OAL debug flag. Fix endpoint dependency bug in MQ and uninstrumented proxy cases. Fix time bucket conversion issue in the InfluxDB storage implementation. Update k8s client to 8.0.0  UI  Support endpoint dependency graph. Support x-scroll of trace/profile page Fix database selector issue. Add the bar chart in the UI templates.  Document  Update the user logo wall. Add backend configuration vocabulary document. Add agent installation doc for Tomcat9 on Windows. Add istioctl ALS commands for the document. Fix TTL documentation. Add FAQ doc about thread instrumentation.  CVE  Fix fuzzy query sql injection in the MySQL/TiDB storage.  All issues and pull requests are here\n","title":"8.1.0","url":"/docs/main/v9.4.0/en/changes/changes-8.1.0/"},{"content":"8.1.0 Project  Support Kafka as an optional trace, JVM metrics, profiling snapshots and meter system data transport layer. Support Meter system, including the native metrics APIs and the Spring Sleuth adoption. Support JVM thread metrics.  Java Agent  [Core] Fix the concurrency access bug in the Concurrency ClassLoader Case. [Core] Separate the config of the plugins from the core level. [Core] Support instrumented class cached in memory or file, to be compatible with other agents, such as Arthas. Add logic endpoint concept. Could analysis any span or tags flagged by the logic endpoint. Add Spring annotation component name for UI visualization only. Add support to trace Call procedures in MySQL plugin. Support GraphQL plugin. Support Quasar fiber plugin. Support InfluxDB java client plugin. Support brpc java plugin Support ConsoleAppender in the logback v1 plugin. Enhance vert.x endpoint names. Optimize the code to prevent mongo statements from being too long. Fix WebFlux plugin concurrency access bug. Fix ShardingSphere plugins internal conflicts. Fix duplicated Spring MVC endpoint. Fix lettuce plugin sometimes trace doesn‘t show span layer. Fix @Tag returnedObject bug.  OAP-Backend  Support Jetty Server advanced configurations. Support label based filter in the prometheus fetcher and OpenCensus receiver. Support using k8s configmap as the configuration center. Support OAP health check, and storage module health check. Support sampling rate in the dynamic configuration. Add endpoint_relation_sla and endpoint_relation_percentile for endpoint relationship metrics. Add components for Python plugins, including Kafka, Tornado, Redis, Django, PyMysql. Add components for Golang SDK. Add Nacos 1.3.1 back as an optional cluster coordinator and dynamic configuration center. Enhance the metrics query for ElasticSearch implementation to increase the stability. Reduce the length of storage entity names in the self-observability for MySQL and TiDB storage. Fix labels are missing in Prometheus analysis context. Fix column length issue in MySQL/TiDB storage. Fix no data in 2nd level aggregation in self-observability. Fix searchService bug in ES implementation. Fix wrong validation of endpoint relation entity query. Fix the bug caused by the OAL debug flag. Fix endpoint dependency bug in MQ and uninstrumented proxy cases. Fix time bucket conversion issue in the InfluxDB storage implementation. Update k8s client to 8.0.0  UI  Support endpoint dependency graph. Support x-scroll of trace/profile page Fix database selector issue. Add the bar chart in the UI templates.  Document  Update the user logo wall. Add backend configuration vocabulary document. Add agent installation doc for Tomcat9 on Windows. Add istioctl ALS commands for the document. Fix TTL documentation. Add FAQ doc about thread instrumentation.  CVE  Fix fuzzy query sql injection in the MySQL/TiDB storage.  All issues and pull requests are here\n","title":"8.1.0","url":"/docs/main/v9.5.0/en/changes/changes-8.1.0/"},{"content":"8.1.0 Project  Support Kafka as an optional trace, JVM metrics, profiling snapshots and meter system data transport layer. Support Meter system, including the native metrics APIs and the Spring Sleuth adoption. Support JVM thread metrics.  Java Agent  [Core] Fix the concurrency access bug in the Concurrency ClassLoader Case. [Core] Separate the config of the plugins from the core level. [Core] Support instrumented class cached in memory or file, to be compatible with other agents, such as Arthas. Add logic endpoint concept. Could analysis any span or tags flagged by the logic endpoint. Add Spring annotation component name for UI visualization only. Add support to trace Call procedures in MySQL plugin. Support GraphQL plugin. Support Quasar fiber plugin. Support InfluxDB java client plugin. Support brpc java plugin Support ConsoleAppender in the logback v1 plugin. Enhance vert.x endpoint names. Optimize the code to prevent mongo statements from being too long. Fix WebFlux plugin concurrency access bug. Fix ShardingSphere plugins internal conflicts. Fix duplicated Spring MVC endpoint. Fix lettuce plugin sometimes trace doesn‘t show span layer. Fix @Tag returnedObject bug.  OAP-Backend  Support Jetty Server advanced configurations. Support label based filter in the prometheus fetcher and OpenCensus receiver. Support using k8s configmap as the configuration center. Support OAP health check, and storage module health check. Support sampling rate in the dynamic configuration. Add endpoint_relation_sla and endpoint_relation_percentile for endpoint relationship metrics. Add components for Python plugins, including Kafka, Tornado, Redis, Django, PyMysql. Add components for Golang SDK. Add Nacos 1.3.1 back as an optional cluster coordinator and dynamic configuration center. Enhance the metrics query for ElasticSearch implementation to increase the stability. Reduce the length of storage entity names in the self-observability for MySQL and TiDB storage. Fix labels are missing in Prometheus analysis context. Fix column length issue in MySQL/TiDB storage. Fix no data in 2nd level aggregation in self-observability. Fix searchService bug in ES implementation. Fix wrong validation of endpoint relation entity query. Fix the bug caused by the OAL debug flag. Fix endpoint dependency bug in MQ and uninstrumented proxy cases. Fix time bucket conversion issue in the InfluxDB storage implementation. Update k8s client to 8.0.0  UI  Support endpoint dependency graph. Support x-scroll of trace/profile page Fix database selector issue. Add the bar chart in the UI templates.  Document  Update the user logo wall. Add backend configuration vocabulary document. Add agent installation doc for Tomcat9 on Windows. Add istioctl ALS commands for the document. Fix TTL documentation. Add FAQ doc about thread instrumentation.  CVE  Fix fuzzy query sql injection in the MySQL/TiDB storage.  All issues and pull requests are here\n","title":"8.1.0","url":"/docs/main/v9.6.0/en/changes/changes-8.1.0/"},{"content":"8.1.0 Project  Support Kafka as an optional trace, JVM metrics, profiling snapshots and meter system data transport layer. Support Meter system, including the native metrics APIs and the Spring Sleuth adoption. Support JVM thread metrics.  Java Agent  [Core] Fix the concurrency access bug in the Concurrency ClassLoader Case. [Core] Separate the config of the plugins from the core level. [Core] Support instrumented class cached in memory or file, to be compatible with other agents, such as Arthas. Add logic endpoint concept. Could analysis any span or tags flagged by the logic endpoint. Add Spring annotation component name for UI visualization only. Add support to trace Call procedures in MySQL plugin. Support GraphQL plugin. Support Quasar fiber plugin. Support InfluxDB java client plugin. Support brpc java plugin Support ConsoleAppender in the logback v1 plugin. Enhance vert.x endpoint names. Optimize the code to prevent mongo statements from being too long. Fix WebFlux plugin concurrency access bug. Fix ShardingSphere plugins internal conflicts. Fix duplicated Spring MVC endpoint. Fix lettuce plugin sometimes trace doesn‘t show span layer. Fix @Tag returnedObject bug.  OAP-Backend  Support Jetty Server advanced configurations. Support label based filter in the prometheus fetcher and OpenCensus receiver. Support using k8s configmap as the configuration center. Support OAP health check, and storage module health check. Support sampling rate in the dynamic configuration. Add endpoint_relation_sla and endpoint_relation_percentile for endpoint relationship metrics. Add components for Python plugins, including Kafka, Tornado, Redis, Django, PyMysql. Add components for Golang SDK. Add Nacos 1.3.1 back as an optional cluster coordinator and dynamic configuration center. Enhance the metrics query for ElasticSearch implementation to increase the stability. Reduce the length of storage entity names in the self-observability for MySQL and TiDB storage. Fix labels are missing in Prometheus analysis context. Fix column length issue in MySQL/TiDB storage. Fix no data in 2nd level aggregation in self-observability. Fix searchService bug in ES implementation. Fix wrong validation of endpoint relation entity query. Fix the bug caused by the OAL debug flag. Fix endpoint dependency bug in MQ and uninstrumented proxy cases. Fix time bucket conversion issue in the InfluxDB storage implementation. Update k8s client to 8.0.0  UI  Support endpoint dependency graph. Support x-scroll of trace/profile page Fix database selector issue. Add the bar chart in the UI templates.  Document  Update the user logo wall. Add backend configuration vocabulary document. Add agent installation doc for Tomcat9 on Windows. Add istioctl ALS commands for the document. Fix TTL documentation. Add FAQ doc about thread instrumentation.  CVE  Fix fuzzy query sql injection in the MySQL/TiDB storage.  All issues and pull requests are here\n","title":"8.1.0","url":"/docs/main/v9.7.0/en/changes/changes-8.1.0/"},{"content":"8.2.0 Project  Support Browser monitoring. Add e2e test for ALS solution of service mesh observability. Support compiling(include testing) in JDK11. Support build a single module.  Java Agent  Support metrics plugin. Support slf4j logs of gRPC and Kafka(when agent uses them) into the agent log files. Add PROPERTIES_REPORT_PERIOD_FACTOR config to avoid the properties of instance cleared. Limit the size of traced SQL to avoid OOM. Support mount command to load a new set of plugins. Add plugin selector mechanism. Enhance the witness classes for MongoDB plugin. Enhance the parameter truncate mechanism of SQL plugins. Enhance the SpringMVC plugin in the reactive APIs. Enhance the SpringMVC plugin to collect HTTP headers as the span tags. Enhance the Kafka plugin, about @KafkaPollAndInvoke Enhance the configuration initialization core. Plugin could have its own plugins. Enhance Feign plugin to collect parameters. Enhance Dubbo plugin to collect parameters. Provide Thrift plugin. Provide XXL-job plugin. Provide MongoDB 4.x plugin. Provide Kafka client 2.1+ plugin. Provide WebFlux-WebClient plugin. Provide ignore-exception plugin. Provide quartz scheduler plugin. Provide ElasticJob 2.x plugin. Provide Spring @Scheduled plugin. Provide Spring-Kafka plugin. Provide HBase client plugin. Provide JSON log format. Move Spring WebFlux plugin to the optional plugin. Fix inconsistent logic bug in PrefixMatch Fix duplicate exit spans in Feign LoadBalancer mechanism. Fix the target service blocked by the Kafka reporter. Fix configurations of Kafka report don\u0026rsquo;t work. Fix rest template concurrent conflict. Fix NPE in the ActiveMQ plugin. Fix conflict between Kafka reporter and sampling plugin. Fix NPE in the log formatter. Fix span layer missing in certain cases, in the Kafka plugin. Fix error format of time in serviceTraffic update. Upgrade bytebuddy to 1.10.14  OAP-Backend  Support Nacos authentication. Support labeled meter in the meter receiver. Separate UI template into multiple files. Provide support for Envoy tracing. Envoy tracer depends on the Envoy community. Support query trace by tags. Support composite alarm rules. Support alarm messages to DingTalk. Support alarm messages to WeChat. Support alarm messages to Slack. Support SSL for Prometheus fetcher and self telemetry. Support labeled histogram in the prometheus format. Support the status of segment based on entry span or first span only. Support the error segment in the sampling mechanism. Support SSL certs of gRPC server. Support labeled metrics in the alarm rule setting. Support to query all labeled data, if no explicit label in the query condition. Add TLS parameters in the mesh analysis. Add health check for InfluxDB storage. Add super dataset concept for the traces/logs. Add separate replicas configuration for super dataset. Add IN operator in the OAL. Add != operator in the OAL. Add like operator in the OAL. Add latest function in the prometheus analysis. Add more configurations in the gRPC server. Optimize the trace query performance. Optimize the CPU usage rate calculation, at least to be 1. Optimize the length of slow SQL column in the MySQL storage. Optimize the topology query, use client side component name when no server side mapping. Add component IDs for Python component. Add component ID range for C++. Fix Slack notification setting NPE. Fix some module missing check of the module manager core. Fix authentication doesn\u0026rsquo;t work in sharing server. Fix metrics batch persistent size bug. Fix trace sampling bug. Fix CLR receiver bug. Fix end time bug in the query process. Fix Exporter INCREMENT mode is not working. Fix an error when executing startup.bat when the log directory exists Add syncBulkActions configuration to set up the batch size of the metrics persistent. Meter Analysis Language.  UI  Add browser dashboard. Add browser log query page. Support query trace by tags. Fix JVM configuration. Fix CLR configuration.  Document  Add the document about SW_NO_UPSTREAM_REAL_ADDRESS. Update ALS setup document. Add Customization Config section for plugin development.  All issues and pull requests are here\n","title":"8.2.0","url":"/docs/main/latest/en/changes/changes-8.2.0/"},{"content":"8.2.0 Project  Support Browser monitoring. Add e2e test for ALS solution of service mesh observability. Support compiling(include testing) in JDK11. Support build a single module.  Java Agent  Support metrics plugin. Support slf4j logs of gRPC and Kafka(when agent uses them) into the agent log files. Add PROPERTIES_REPORT_PERIOD_FACTOR config to avoid the properties of instance cleared. Limit the size of traced SQL to avoid OOM. Support mount command to load a new set of plugins. Add plugin selector mechanism. Enhance the witness classes for MongoDB plugin. Enhance the parameter truncate mechanism of SQL plugins. Enhance the SpringMVC plugin in the reactive APIs. Enhance the SpringMVC plugin to collect HTTP headers as the span tags. Enhance the Kafka plugin, about @KafkaPollAndInvoke Enhance the configuration initialization core. Plugin could have its own plugins. Enhance Feign plugin to collect parameters. Enhance Dubbo plugin to collect parameters. Provide Thrift plugin. Provide XXL-job plugin. Provide MongoDB 4.x plugin. Provide Kafka client 2.1+ plugin. Provide WebFlux-WebClient plugin. Provide ignore-exception plugin. Provide quartz scheduler plugin. Provide ElasticJob 2.x plugin. Provide Spring @Scheduled plugin. Provide Spring-Kafka plugin. Provide HBase client plugin. Provide JSON log format. Move Spring WebFlux plugin to the optional plugin. Fix inconsistent logic bug in PrefixMatch Fix duplicate exit spans in Feign LoadBalancer mechanism. Fix the target service blocked by the Kafka reporter. Fix configurations of Kafka report don\u0026rsquo;t work. Fix rest template concurrent conflict. Fix NPE in the ActiveMQ plugin. Fix conflict between Kafka reporter and sampling plugin. Fix NPE in the log formatter. Fix span layer missing in certain cases, in the Kafka plugin. Fix error format of time in serviceTraffic update. Upgrade bytebuddy to 1.10.14  OAP-Backend  Support Nacos authentication. Support labeled meter in the meter receiver. Separate UI template into multiple files. Provide support for Envoy tracing. Envoy tracer depends on the Envoy community. Support query trace by tags. Support composite alarm rules. Support alarm messages to DingTalk. Support alarm messages to WeChat. Support alarm messages to Slack. Support SSL for Prometheus fetcher and self telemetry. Support labeled histogram in the prometheus format. Support the status of segment based on entry span or first span only. Support the error segment in the sampling mechanism. Support SSL certs of gRPC server. Support labeled metrics in the alarm rule setting. Support to query all labeled data, if no explicit label in the query condition. Add TLS parameters in the mesh analysis. Add health check for InfluxDB storage. Add super dataset concept for the traces/logs. Add separate replicas configuration for super dataset. Add IN operator in the OAL. Add != operator in the OAL. Add like operator in the OAL. Add latest function in the prometheus analysis. Add more configurations in the gRPC server. Optimize the trace query performance. Optimize the CPU usage rate calculation, at least to be 1. Optimize the length of slow SQL column in the MySQL storage. Optimize the topology query, use client side component name when no server side mapping. Add component IDs for Python component. Add component ID range for C++. Fix Slack notification setting NPE. Fix some module missing check of the module manager core. Fix authentication doesn\u0026rsquo;t work in sharing server. Fix metrics batch persistent size bug. Fix trace sampling bug. Fix CLR receiver bug. Fix end time bug in the query process. Fix Exporter INCREMENT mode is not working. Fix an error when executing startup.bat when the log directory exists Add syncBulkActions configuration to set up the batch size of the metrics persistent. Meter Analysis Language.  UI  Add browser dashboard. Add browser log query page. Support query trace by tags. Fix JVM configuration. Fix CLR configuration.  Document  Add the document about SW_NO_UPSTREAM_REAL_ADDRESS. Update ALS setup document. Add Customization Config section for plugin development.  All issues and pull requests are here\n","title":"8.2.0","url":"/docs/main/next/en/changes/changes-8.2.0/"},{"content":"8.2.0 Project  Support Browser monitoring. Add e2e test for ALS solution of service mesh observability. Support compiling(include testing) in JDK11. Support build a single module.  Java Agent  Support metrics plugin. Support slf4j logs of gRPC and Kafka(when agent uses them) into the agent log files. Add PROPERTIES_REPORT_PERIOD_FACTOR config to avoid the properties of instance cleared. Limit the size of traced SQL to avoid OOM. Support mount command to load a new set of plugins. Add plugin selector mechanism. Enhance the witness classes for MongoDB plugin. Enhance the parameter truncate mechanism of SQL plugins. Enhance the SpringMVC plugin in the reactive APIs. Enhance the SpringMVC plugin to collect HTTP headers as the span tags. Enhance the Kafka plugin, about @KafkaPollAndInvoke Enhance the configuration initialization core. Plugin could have its own plugins. Enhance Feign plugin to collect parameters. Enhance Dubbo plugin to collect parameters. Provide Thrift plugin. Provide XXL-job plugin. Provide MongoDB 4.x plugin. Provide Kafka client 2.1+ plugin. Provide WebFlux-WebClient plugin. Provide ignore-exception plugin. Provide quartz scheduler plugin. Provide ElasticJob 2.x plugin. Provide Spring @Scheduled plugin. Provide Spring-Kafka plugin. Provide HBase client plugin. Provide JSON log format. Move Spring WebFlux plugin to the optional plugin. Fix inconsistent logic bug in PrefixMatch Fix duplicate exit spans in Feign LoadBalancer mechanism. Fix the target service blocked by the Kafka reporter. Fix configurations of Kafka report don\u0026rsquo;t work. Fix rest template concurrent conflict. Fix NPE in the ActiveMQ plugin. Fix conflict between Kafka reporter and sampling plugin. Fix NPE in the log formatter. Fix span layer missing in certain cases, in the Kafka plugin. Fix error format of time in serviceTraffic update. Upgrade bytebuddy to 1.10.14  OAP-Backend  Support Nacos authentication. Support labeled meter in the meter receiver. Separate UI template into multiple files. Provide support for Envoy tracing. Envoy tracer depends on the Envoy community. Support query trace by tags. Support composite alarm rules. Support alarm messages to DingTalk. Support alarm messages to WeChat. Support alarm messages to Slack. Support SSL for Prometheus fetcher and self telemetry. Support labeled histogram in the prometheus format. Support the status of segment based on entry span or first span only. Support the error segment in the sampling mechanism. Support SSL certs of gRPC server. Support labeled metrics in the alarm rule setting. Support to query all labeled data, if no explicit label in the query condition. Add TLS parameters in the mesh analysis. Add health check for InfluxDB storage. Add super dataset concept for the traces/logs. Add separate replicas configuration for super dataset. Add IN operator in the OAL. Add != operator in the OAL. Add like operator in the OAL. Add latest function in the prometheus analysis. Add more configurations in the gRPC server. Optimize the trace query performance. Optimize the CPU usage rate calculation, at least to be 1. Optimize the length of slow SQL column in the MySQL storage. Optimize the topology query, use client side component name when no server side mapping. Add component IDs for Python component. Add component ID range for C++. Fix Slack notification setting NPE. Fix some module missing check of the module manager core. Fix authentication doesn\u0026rsquo;t work in sharing server. Fix metrics batch persistent size bug. Fix trace sampling bug. Fix CLR receiver bug. Fix end time bug in the query process. Fix Exporter INCREMENT mode is not working. Fix an error when executing startup.bat when the log directory exists Add syncBulkActions configuration to set up the batch size of the metrics persistent. Meter Analysis Language.  UI  Add browser dashboard. Add browser log query page. Support query trace by tags. Fix JVM configuration. Fix CLR configuration.  Document  Add the document about SW_NO_UPSTREAM_REAL_ADDRESS. Update ALS setup document. Add Customization Config section for plugin development.  All issues and pull requests are here\n","title":"8.2.0","url":"/docs/main/v9.1.0/en/changes/changes-8.2.0/"},{"content":"8.2.0 Project  Support Browser monitoring. Add e2e test for ALS solution of service mesh observability. Support compiling(include testing) in JDK11. Support build a single module.  Java Agent  Support metrics plugin. Support slf4j logs of gRPC and Kafka(when agent uses them) into the agent log files. Add PROPERTIES_REPORT_PERIOD_FACTOR config to avoid the properties of instance cleared. Limit the size of traced SQL to avoid OOM. Support mount command to load a new set of plugins. Add plugin selector mechanism. Enhance the witness classes for MongoDB plugin. Enhance the parameter truncate mechanism of SQL plugins. Enhance the SpringMVC plugin in the reactive APIs. Enhance the SpringMVC plugin to collect HTTP headers as the span tags. Enhance the Kafka plugin, about @KafkaPollAndInvoke Enhance the configuration initialization core. Plugin could have its own plugins. Enhance Feign plugin to collect parameters. Enhance Dubbo plugin to collect parameters. Provide Thrift plugin. Provide XXL-job plugin. Provide MongoDB 4.x plugin. Provide Kafka client 2.1+ plugin. Provide WebFlux-WebClient plugin. Provide ignore-exception plugin. Provide quartz scheduler plugin. Provide ElasticJob 2.x plugin. Provide Spring @Scheduled plugin. Provide Spring-Kafka plugin. Provide HBase client plugin. Provide JSON log format. Move Spring WebFlux plugin to the optional plugin. Fix inconsistent logic bug in PrefixMatch Fix duplicate exit spans in Feign LoadBalancer mechanism. Fix the target service blocked by the Kafka reporter. Fix configurations of Kafka report don\u0026rsquo;t work. Fix rest template concurrent conflict. Fix NPE in the ActiveMQ plugin. Fix conflict between Kafka reporter and sampling plugin. Fix NPE in the log formatter. Fix span layer missing in certain cases, in the Kafka plugin. Fix error format of time in serviceTraffic update. Upgrade bytebuddy to 1.10.14  OAP-Backend  Support Nacos authentication. Support labeled meter in the meter receiver. Separate UI template into multiple files. Provide support for Envoy tracing. Envoy tracer depends on the Envoy community. Support query trace by tags. Support composite alarm rules. Support alarm messages to DingTalk. Support alarm messages to WeChat. Support alarm messages to Slack. Support SSL for Prometheus fetcher and self telemetry. Support labeled histogram in the prometheus format. Support the status of segment based on entry span or first span only. Support the error segment in the sampling mechanism. Support SSL certs of gRPC server. Support labeled metrics in the alarm rule setting. Support to query all labeled data, if no explicit label in the query condition. Add TLS parameters in the mesh analysis. Add health check for InfluxDB storage. Add super dataset concept for the traces/logs. Add separate replicas configuration for super dataset. Add IN operator in the OAL. Add != operator in the OAL. Add like operator in the OAL. Add latest function in the prometheus analysis. Add more configurations in the gRPC server. Optimize the trace query performance. Optimize the CPU usage rate calculation, at least to be 1. Optimize the length of slow SQL column in the MySQL storage. Optimize the topology query, use client side component name when no server side mapping. Add component IDs for Python component. Add component ID range for C++. Fix Slack notification setting NPE. Fix some module missing check of the module manager core. Fix authentication doesn\u0026rsquo;t work in sharing server. Fix metrics batch persistent size bug. Fix trace sampling bug. Fix CLR receiver bug. Fix end time bug in the query process. Fix Exporter INCREMENT mode is not working. Fix an error when executing startup.bat when the log directory exists Add syncBulkActions configuration to set up the batch size of the metrics persistent. Meter Analysis Language.  UI  Add browser dashboard. Add browser log query page. Support query trace by tags. Fix JVM configuration. Fix CLR configuration.  Document  Add the document about SW_NO_UPSTREAM_REAL_ADDRESS. Update ALS setup document. Add Customization Config section for plugin development.  All issues and pull requests are here\n","title":"8.2.0","url":"/docs/main/v9.2.0/en/changes/changes-8.2.0/"},{"content":"8.2.0 Project  Support Browser monitoring. Add e2e test for ALS solution of service mesh observability. Support compiling(include testing) in JDK11. Support build a single module.  Java Agent  Support metrics plugin. Support slf4j logs of gRPC and Kafka(when agent uses them) into the agent log files. Add PROPERTIES_REPORT_PERIOD_FACTOR config to avoid the properties of instance cleared. Limit the size of traced SQL to avoid OOM. Support mount command to load a new set of plugins. Add plugin selector mechanism. Enhance the witness classes for MongoDB plugin. Enhance the parameter truncate mechanism of SQL plugins. Enhance the SpringMVC plugin in the reactive APIs. Enhance the SpringMVC plugin to collect HTTP headers as the span tags. Enhance the Kafka plugin, about @KafkaPollAndInvoke Enhance the configuration initialization core. Plugin could have its own plugins. Enhance Feign plugin to collect parameters. Enhance Dubbo plugin to collect parameters. Provide Thrift plugin. Provide XXL-job plugin. Provide MongoDB 4.x plugin. Provide Kafka client 2.1+ plugin. Provide WebFlux-WebClient plugin. Provide ignore-exception plugin. Provide quartz scheduler plugin. Provide ElasticJob 2.x plugin. Provide Spring @Scheduled plugin. Provide Spring-Kafka plugin. Provide HBase client plugin. Provide JSON log format. Move Spring WebFlux plugin to the optional plugin. Fix inconsistent logic bug in PrefixMatch Fix duplicate exit spans in Feign LoadBalancer mechanism. Fix the target service blocked by the Kafka reporter. Fix configurations of Kafka report don\u0026rsquo;t work. Fix rest template concurrent conflict. Fix NPE in the ActiveMQ plugin. Fix conflict between Kafka reporter and sampling plugin. Fix NPE in the log formatter. Fix span layer missing in certain cases, in the Kafka plugin. Fix error format of time in serviceTraffic update. Upgrade bytebuddy to 1.10.14  OAP-Backend  Support Nacos authentication. Support labeled meter in the meter receiver. Separate UI template into multiple files. Provide support for Envoy tracing. Envoy tracer depends on the Envoy community. Support query trace by tags. Support composite alarm rules. Support alarm messages to DingTalk. Support alarm messages to WeChat. Support alarm messages to Slack. Support SSL for Prometheus fetcher and self telemetry. Support labeled histogram in the prometheus format. Support the status of segment based on entry span or first span only. Support the error segment in the sampling mechanism. Support SSL certs of gRPC server. Support labeled metrics in the alarm rule setting. Support to query all labeled data, if no explicit label in the query condition. Add TLS parameters in the mesh analysis. Add health check for InfluxDB storage. Add super dataset concept for the traces/logs. Add separate replicas configuration for super dataset. Add IN operator in the OAL. Add != operator in the OAL. Add like operator in the OAL. Add latest function in the prometheus analysis. Add more configurations in the gRPC server. Optimize the trace query performance. Optimize the CPU usage rate calculation, at least to be 1. Optimize the length of slow SQL column in the MySQL storage. Optimize the topology query, use client side component name when no server side mapping. Add component IDs for Python component. Add component ID range for C++. Fix Slack notification setting NPE. Fix some module missing check of the module manager core. Fix authentication doesn\u0026rsquo;t work in sharing server. Fix metrics batch persistent size bug. Fix trace sampling bug. Fix CLR receiver bug. Fix end time bug in the query process. Fix Exporter INCREMENT mode is not working. Fix an error when executing startup.bat when the log directory exists Add syncBulkActions configuration to set up the batch size of the metrics persistent. Meter Analysis Language.  UI  Add browser dashboard. Add browser log query page. Support query trace by tags. Fix JVM configuration. Fix CLR configuration.  Document  Add the document about SW_NO_UPSTREAM_REAL_ADDRESS. Update ALS setup document. Add Customization Config section for plugin development.  All issues and pull requests are here\n","title":"8.2.0","url":"/docs/main/v9.3.0/en/changes/changes-8.2.0/"},{"content":"8.2.0 Project  Support Browser monitoring. Add e2e test for ALS solution of service mesh observability. Support compiling(include testing) in JDK11. Support build a single module.  Java Agent  Support metrics plugin. Support slf4j logs of gRPC and Kafka(when agent uses them) into the agent log files. Add PROPERTIES_REPORT_PERIOD_FACTOR config to avoid the properties of instance cleared. Limit the size of traced SQL to avoid OOM. Support mount command to load a new set of plugins. Add plugin selector mechanism. Enhance the witness classes for MongoDB plugin. Enhance the parameter truncate mechanism of SQL plugins. Enhance the SpringMVC plugin in the reactive APIs. Enhance the SpringMVC plugin to collect HTTP headers as the span tags. Enhance the Kafka plugin, about @KafkaPollAndInvoke Enhance the configuration initialization core. Plugin could have its own plugins. Enhance Feign plugin to collect parameters. Enhance Dubbo plugin to collect parameters. Provide Thrift plugin. Provide XXL-job plugin. Provide MongoDB 4.x plugin. Provide Kafka client 2.1+ plugin. Provide WebFlux-WebClient plugin. Provide ignore-exception plugin. Provide quartz scheduler plugin. Provide ElasticJob 2.x plugin. Provide Spring @Scheduled plugin. Provide Spring-Kafka plugin. Provide HBase client plugin. Provide JSON log format. Move Spring WebFlux plugin to the optional plugin. Fix inconsistent logic bug in PrefixMatch Fix duplicate exit spans in Feign LoadBalancer mechanism. Fix the target service blocked by the Kafka reporter. Fix configurations of Kafka report don\u0026rsquo;t work. Fix rest template concurrent conflict. Fix NPE in the ActiveMQ plugin. Fix conflict between Kafka reporter and sampling plugin. Fix NPE in the log formatter. Fix span layer missing in certain cases, in the Kafka plugin. Fix error format of time in serviceTraffic update. Upgrade bytebuddy to 1.10.14  OAP-Backend  Support Nacos authentication. Support labeled meter in the meter receiver. Separate UI template into multiple files. Provide support for Envoy tracing. Envoy tracer depends on the Envoy community. Support query trace by tags. Support composite alarm rules. Support alarm messages to DingTalk. Support alarm messages to WeChat. Support alarm messages to Slack. Support SSL for Prometheus fetcher and self telemetry. Support labeled histogram in the prometheus format. Support the status of segment based on entry span or first span only. Support the error segment in the sampling mechanism. Support SSL certs of gRPC server. Support labeled metrics in the alarm rule setting. Support to query all labeled data, if no explicit label in the query condition. Add TLS parameters in the mesh analysis. Add health check for InfluxDB storage. Add super dataset concept for the traces/logs. Add separate replicas configuration for super dataset. Add IN operator in the OAL. Add != operator in the OAL. Add like operator in the OAL. Add latest function in the prometheus analysis. Add more configurations in the gRPC server. Optimize the trace query performance. Optimize the CPU usage rate calculation, at least to be 1. Optimize the length of slow SQL column in the MySQL storage. Optimize the topology query, use client side component name when no server side mapping. Add component IDs for Python component. Add component ID range for C++. Fix Slack notification setting NPE. Fix some module missing check of the module manager core. Fix authentication doesn\u0026rsquo;t work in sharing server. Fix metrics batch persistent size bug. Fix trace sampling bug. Fix CLR receiver bug. Fix end time bug in the query process. Fix Exporter INCREMENT mode is not working. Fix an error when executing startup.bat when the log directory exists Add syncBulkActions configuration to set up the batch size of the metrics persistent. Meter Analysis Language.  UI  Add browser dashboard. Add browser log query page. Support query trace by tags. Fix JVM configuration. Fix CLR configuration.  Document  Add the document about SW_NO_UPSTREAM_REAL_ADDRESS. Update ALS setup document. Add Customization Config section for plugin development.  All issues and pull requests are here\n","title":"8.2.0","url":"/docs/main/v9.4.0/en/changes/changes-8.2.0/"},{"content":"8.2.0 Project  Support Browser monitoring. Add e2e test for ALS solution of service mesh observability. Support compiling(include testing) in JDK11. Support build a single module.  Java Agent  Support metrics plugin. Support slf4j logs of gRPC and Kafka(when agent uses them) into the agent log files. Add PROPERTIES_REPORT_PERIOD_FACTOR config to avoid the properties of instance cleared. Limit the size of traced SQL to avoid OOM. Support mount command to load a new set of plugins. Add plugin selector mechanism. Enhance the witness classes for MongoDB plugin. Enhance the parameter truncate mechanism of SQL plugins. Enhance the SpringMVC plugin in the reactive APIs. Enhance the SpringMVC plugin to collect HTTP headers as the span tags. Enhance the Kafka plugin, about @KafkaPollAndInvoke Enhance the configuration initialization core. Plugin could have its own plugins. Enhance Feign plugin to collect parameters. Enhance Dubbo plugin to collect parameters. Provide Thrift plugin. Provide XXL-job plugin. Provide MongoDB 4.x plugin. Provide Kafka client 2.1+ plugin. Provide WebFlux-WebClient plugin. Provide ignore-exception plugin. Provide quartz scheduler plugin. Provide ElasticJob 2.x plugin. Provide Spring @Scheduled plugin. Provide Spring-Kafka plugin. Provide HBase client plugin. Provide JSON log format. Move Spring WebFlux plugin to the optional plugin. Fix inconsistent logic bug in PrefixMatch Fix duplicate exit spans in Feign LoadBalancer mechanism. Fix the target service blocked by the Kafka reporter. Fix configurations of Kafka report don\u0026rsquo;t work. Fix rest template concurrent conflict. Fix NPE in the ActiveMQ plugin. Fix conflict between Kafka reporter and sampling plugin. Fix NPE in the log formatter. Fix span layer missing in certain cases, in the Kafka plugin. Fix error format of time in serviceTraffic update. Upgrade bytebuddy to 1.10.14  OAP-Backend  Support Nacos authentication. Support labeled meter in the meter receiver. Separate UI template into multiple files. Provide support for Envoy tracing. Envoy tracer depends on the Envoy community. Support query trace by tags. Support composite alarm rules. Support alarm messages to DingTalk. Support alarm messages to WeChat. Support alarm messages to Slack. Support SSL for Prometheus fetcher and self telemetry. Support labeled histogram in the prometheus format. Support the status of segment based on entry span or first span only. Support the error segment in the sampling mechanism. Support SSL certs of gRPC server. Support labeled metrics in the alarm rule setting. Support to query all labeled data, if no explicit label in the query condition. Add TLS parameters in the mesh analysis. Add health check for InfluxDB storage. Add super dataset concept for the traces/logs. Add separate replicas configuration for super dataset. Add IN operator in the OAL. Add != operator in the OAL. Add like operator in the OAL. Add latest function in the prometheus analysis. Add more configurations in the gRPC server. Optimize the trace query performance. Optimize the CPU usage rate calculation, at least to be 1. Optimize the length of slow SQL column in the MySQL storage. Optimize the topology query, use client side component name when no server side mapping. Add component IDs for Python component. Add component ID range for C++. Fix Slack notification setting NPE. Fix some module missing check of the module manager core. Fix authentication doesn\u0026rsquo;t work in sharing server. Fix metrics batch persistent size bug. Fix trace sampling bug. Fix CLR receiver bug. Fix end time bug in the query process. Fix Exporter INCREMENT mode is not working. Fix an error when executing startup.bat when the log directory exists Add syncBulkActions configuration to set up the batch size of the metrics persistent. Meter Analysis Language.  UI  Add browser dashboard. Add browser log query page. Support query trace by tags. Fix JVM configuration. Fix CLR configuration.  Document  Add the document about SW_NO_UPSTREAM_REAL_ADDRESS. Update ALS setup document. Add Customization Config section for plugin development.  All issues and pull requests are here\n","title":"8.2.0","url":"/docs/main/v9.5.0/en/changes/changes-8.2.0/"},{"content":"8.2.0 Project  Support Browser monitoring. Add e2e test for ALS solution of service mesh observability. Support compiling(include testing) in JDK11. Support build a single module.  Java Agent  Support metrics plugin. Support slf4j logs of gRPC and Kafka(when agent uses them) into the agent log files. Add PROPERTIES_REPORT_PERIOD_FACTOR config to avoid the properties of instance cleared. Limit the size of traced SQL to avoid OOM. Support mount command to load a new set of plugins. Add plugin selector mechanism. Enhance the witness classes for MongoDB plugin. Enhance the parameter truncate mechanism of SQL plugins. Enhance the SpringMVC plugin in the reactive APIs. Enhance the SpringMVC plugin to collect HTTP headers as the span tags. Enhance the Kafka plugin, about @KafkaPollAndInvoke Enhance the configuration initialization core. Plugin could have its own plugins. Enhance Feign plugin to collect parameters. Enhance Dubbo plugin to collect parameters. Provide Thrift plugin. Provide XXL-job plugin. Provide MongoDB 4.x plugin. Provide Kafka client 2.1+ plugin. Provide WebFlux-WebClient plugin. Provide ignore-exception plugin. Provide quartz scheduler plugin. Provide ElasticJob 2.x plugin. Provide Spring @Scheduled plugin. Provide Spring-Kafka plugin. Provide HBase client plugin. Provide JSON log format. Move Spring WebFlux plugin to the optional plugin. Fix inconsistent logic bug in PrefixMatch Fix duplicate exit spans in Feign LoadBalancer mechanism. Fix the target service blocked by the Kafka reporter. Fix configurations of Kafka report don\u0026rsquo;t work. Fix rest template concurrent conflict. Fix NPE in the ActiveMQ plugin. Fix conflict between Kafka reporter and sampling plugin. Fix NPE in the log formatter. Fix span layer missing in certain cases, in the Kafka plugin. Fix error format of time in serviceTraffic update. Upgrade bytebuddy to 1.10.14  OAP-Backend  Support Nacos authentication. Support labeled meter in the meter receiver. Separate UI template into multiple files. Provide support for Envoy tracing. Envoy tracer depends on the Envoy community. Support query trace by tags. Support composite alarm rules. Support alarm messages to DingTalk. Support alarm messages to WeChat. Support alarm messages to Slack. Support SSL for Prometheus fetcher and self telemetry. Support labeled histogram in the prometheus format. Support the status of segment based on entry span or first span only. Support the error segment in the sampling mechanism. Support SSL certs of gRPC server. Support labeled metrics in the alarm rule setting. Support to query all labeled data, if no explicit label in the query condition. Add TLS parameters in the mesh analysis. Add health check for InfluxDB storage. Add super dataset concept for the traces/logs. Add separate replicas configuration for super dataset. Add IN operator in the OAL. Add != operator in the OAL. Add like operator in the OAL. Add latest function in the prometheus analysis. Add more configurations in the gRPC server. Optimize the trace query performance. Optimize the CPU usage rate calculation, at least to be 1. Optimize the length of slow SQL column in the MySQL storage. Optimize the topology query, use client side component name when no server side mapping. Add component IDs for Python component. Add component ID range for C++. Fix Slack notification setting NPE. Fix some module missing check of the module manager core. Fix authentication doesn\u0026rsquo;t work in sharing server. Fix metrics batch persistent size bug. Fix trace sampling bug. Fix CLR receiver bug. Fix end time bug in the query process. Fix Exporter INCREMENT mode is not working. Fix an error when executing startup.bat when the log directory exists Add syncBulkActions configuration to set up the batch size of the metrics persistent. Meter Analysis Language.  UI  Add browser dashboard. Add browser log query page. Support query trace by tags. Fix JVM configuration. Fix CLR configuration.  Document  Add the document about SW_NO_UPSTREAM_REAL_ADDRESS. Update ALS setup document. Add Customization Config section for plugin development.  All issues and pull requests are here\n","title":"8.2.0","url":"/docs/main/v9.6.0/en/changes/changes-8.2.0/"},{"content":"8.2.0 Project  Support Browser monitoring. Add e2e test for ALS solution of service mesh observability. Support compiling(include testing) in JDK11. Support build a single module.  Java Agent  Support metrics plugin. Support slf4j logs of gRPC and Kafka(when agent uses them) into the agent log files. Add PROPERTIES_REPORT_PERIOD_FACTOR config to avoid the properties of instance cleared. Limit the size of traced SQL to avoid OOM. Support mount command to load a new set of plugins. Add plugin selector mechanism. Enhance the witness classes for MongoDB plugin. Enhance the parameter truncate mechanism of SQL plugins. Enhance the SpringMVC plugin in the reactive APIs. Enhance the SpringMVC plugin to collect HTTP headers as the span tags. Enhance the Kafka plugin, about @KafkaPollAndInvoke Enhance the configuration initialization core. Plugin could have its own plugins. Enhance Feign plugin to collect parameters. Enhance Dubbo plugin to collect parameters. Provide Thrift plugin. Provide XXL-job plugin. Provide MongoDB 4.x plugin. Provide Kafka client 2.1+ plugin. Provide WebFlux-WebClient plugin. Provide ignore-exception plugin. Provide quartz scheduler plugin. Provide ElasticJob 2.x plugin. Provide Spring @Scheduled plugin. Provide Spring-Kafka plugin. Provide HBase client plugin. Provide JSON log format. Move Spring WebFlux plugin to the optional plugin. Fix inconsistent logic bug in PrefixMatch Fix duplicate exit spans in Feign LoadBalancer mechanism. Fix the target service blocked by the Kafka reporter. Fix configurations of Kafka report don\u0026rsquo;t work. Fix rest template concurrent conflict. Fix NPE in the ActiveMQ plugin. Fix conflict between Kafka reporter and sampling plugin. Fix NPE in the log formatter. Fix span layer missing in certain cases, in the Kafka plugin. Fix error format of time in serviceTraffic update. Upgrade bytebuddy to 1.10.14  OAP-Backend  Support Nacos authentication. Support labeled meter in the meter receiver. Separate UI template into multiple files. Provide support for Envoy tracing. Envoy tracer depends on the Envoy community. Support query trace by tags. Support composite alarm rules. Support alarm messages to DingTalk. Support alarm messages to WeChat. Support alarm messages to Slack. Support SSL for Prometheus fetcher and self telemetry. Support labeled histogram in the prometheus format. Support the status of segment based on entry span or first span only. Support the error segment in the sampling mechanism. Support SSL certs of gRPC server. Support labeled metrics in the alarm rule setting. Support to query all labeled data, if no explicit label in the query condition. Add TLS parameters in the mesh analysis. Add health check for InfluxDB storage. Add super dataset concept for the traces/logs. Add separate replicas configuration for super dataset. Add IN operator in the OAL. Add != operator in the OAL. Add like operator in the OAL. Add latest function in the prometheus analysis. Add more configurations in the gRPC server. Optimize the trace query performance. Optimize the CPU usage rate calculation, at least to be 1. Optimize the length of slow SQL column in the MySQL storage. Optimize the topology query, use client side component name when no server side mapping. Add component IDs for Python component. Add component ID range for C++. Fix Slack notification setting NPE. Fix some module missing check of the module manager core. Fix authentication doesn\u0026rsquo;t work in sharing server. Fix metrics batch persistent size bug. Fix trace sampling bug. Fix CLR receiver bug. Fix end time bug in the query process. Fix Exporter INCREMENT mode is not working. Fix an error when executing startup.bat when the log directory exists Add syncBulkActions configuration to set up the batch size of the metrics persistent. Meter Analysis Language.  UI  Add browser dashboard. Add browser log query page. Support query trace by tags. Fix JVM configuration. Fix CLR configuration.  Document  Add the document about SW_NO_UPSTREAM_REAL_ADDRESS. Update ALS setup document. Add Customization Config section for plugin development.  All issues and pull requests are here\n","title":"8.2.0","url":"/docs/main/v9.7.0/en/changes/changes-8.2.0/"},{"content":"8.3.0  Project  Test: ElasticSearch version 7.0.0 and 7.9.3 as storage are E2E tested. Test: Bump up testcontainers version to work around the Docker bug on MacOS.  Java Agent  Support propagate the sending timestamp in MQ plugins to calculate the transfer latency in the async MQ scenarios. Support auto-tag with the fixed values propagated in the correlation context. Make HttpClient 3.x, 4.x, and HttpAsyncClient 3.x plugins to support collecting HTTP parameters. Make the Feign plugin to support Java 14 Make the okhttp3 plugin to support Java 14 Polish tracing context related codes. Add the plugin for async-http-client 2.x Fix NPE in the nutz plugin. Provide Apache Commons DBCP 2.x plugin. Add the plugin for mssql-jtds 1.x. Add the plugin for mssql-jdbc 6.x -\u0026gt; 9.x. Fix the default ignore mechanism isn\u0026rsquo;t accurate enough bug. Add the plugin for spring-kafka 1.3.x. Add the plugin for Apache CXF 3.x. Fix okhttp-3.x and async-http-client-2.x did not overwrite the old trace header.  OAP-Backend  Add the @SuperDataset annotation for BrowserErrorLog. Add the thread pool to the Kafka fetcher to increase the performance. Add contain and not contain OPS in OAL. Add Envoy ALS analyzer based on metadata exchange. Add listMetrics GraphQL query. Add group name into services of so11y and istio relevant metrics Support keeping collecting the slowly segments in the sampling mechanism. Support choose files to active the meter analyzer. Support nested class definition in the Service, ServiceInstance, Endpoint, ServiceRelation, and ServiceInstanceRelation sources. Support sideCar.internalErrorCode in the Service, ServiceInstance, Endpoint, ServiceRelation, and ServiceInstanceRelation sources. Improve Kubernetes service registry for ALS analysis. Add health checker for cluster management Support the service auto grouping. Support query service list by the group name. Improve the queryable tags generation. Remove the duplicated tags to reduce the storage payload. Fix the threads of the Kafka fetcher exit if some unexpected exceptions happen. Fix the excessive timeout period set by the kubernetes-client. Fix deadlock problem when using elasticsearch-client-7.0.0. Fix storage-jdbc isExists not set dbname. Fix searchService bug in the InfluxDB storage implementation. Fix CVE in the alarm module, when activating the dynamic configuration feature. Fix CVE in the endpoint grouping, when activating the dynamic configuration feature. Fix CVE in the uninstrumented gateways configs, when activating the dynamic configuration feature. Fix CVE in the Apdex threshold configs, when activating the dynamic configuration feature. Make the codes and doc consistent in sharding server and core server. Fix that chunked string is incorrect while the tag contains colon. Fix the incorrect dynamic configuration key bug of endpoint-name-grouping. Remove unused min date timebucket in jdbc deletehistory logical Fix \u0026ldquo;transaction too large error\u0026rdquo; when use TiDB as storage. Fix \u0026ldquo;index not found\u0026rdquo; in trace query when use ES7 storage. Add otel rules to ui template to observe Istio control plane. Remove istio mixer Support close influxdb batch write model. Check SAN in the ALS (m)TLS process.  UI  Fix incorrect label in radial chart in topology. Replace node-sass with dart-sass. Replace serviceFilter with serviceGroup Removed \u0026ldquo;Les Miserables\u0026rdquo; from radial chart in topology. Add the Promise dropdown option  Documentation  Add VNode FAQ doc. Add logic endpoint section in the agent setup doc. Adjust configuration names and system environment names of the sharing server module Tweak Istio metrics collection doc. Add otel receiver.  All issues and pull requests are here\n","title":"8.3.0","url":"/docs/main/latest/en/changes/changes-8.3.0/"},{"content":"8.3.0  Project  Test: ElasticSearch version 7.0.0 and 7.9.3 as storage are E2E tested. Test: Bump up testcontainers version to work around the Docker bug on MacOS.  Java Agent  Support propagate the sending timestamp in MQ plugins to calculate the transfer latency in the async MQ scenarios. Support auto-tag with the fixed values propagated in the correlation context. Make HttpClient 3.x, 4.x, and HttpAsyncClient 3.x plugins to support collecting HTTP parameters. Make the Feign plugin to support Java 14 Make the okhttp3 plugin to support Java 14 Polish tracing context related codes. Add the plugin for async-http-client 2.x Fix NPE in the nutz plugin. Provide Apache Commons DBCP 2.x plugin. Add the plugin for mssql-jtds 1.x. Add the plugin for mssql-jdbc 6.x -\u0026gt; 9.x. Fix the default ignore mechanism isn\u0026rsquo;t accurate enough bug. Add the plugin for spring-kafka 1.3.x. Add the plugin for Apache CXF 3.x. Fix okhttp-3.x and async-http-client-2.x did not overwrite the old trace header.  OAP-Backend  Add the @SuperDataset annotation for BrowserErrorLog. Add the thread pool to the Kafka fetcher to increase the performance. Add contain and not contain OPS in OAL. Add Envoy ALS analyzer based on metadata exchange. Add listMetrics GraphQL query. Add group name into services of so11y and istio relevant metrics Support keeping collecting the slowly segments in the sampling mechanism. Support choose files to active the meter analyzer. Support nested class definition in the Service, ServiceInstance, Endpoint, ServiceRelation, and ServiceInstanceRelation sources. Support sideCar.internalErrorCode in the Service, ServiceInstance, Endpoint, ServiceRelation, and ServiceInstanceRelation sources. Improve Kubernetes service registry for ALS analysis. Add health checker for cluster management Support the service auto grouping. Support query service list by the group name. Improve the queryable tags generation. Remove the duplicated tags to reduce the storage payload. Fix the threads of the Kafka fetcher exit if some unexpected exceptions happen. Fix the excessive timeout period set by the kubernetes-client. Fix deadlock problem when using elasticsearch-client-7.0.0. Fix storage-jdbc isExists not set dbname. Fix searchService bug in the InfluxDB storage implementation. Fix CVE in the alarm module, when activating the dynamic configuration feature. Fix CVE in the endpoint grouping, when activating the dynamic configuration feature. Fix CVE in the uninstrumented gateways configs, when activating the dynamic configuration feature. Fix CVE in the Apdex threshold configs, when activating the dynamic configuration feature. Make the codes and doc consistent in sharding server and core server. Fix that chunked string is incorrect while the tag contains colon. Fix the incorrect dynamic configuration key bug of endpoint-name-grouping. Remove unused min date timebucket in jdbc deletehistory logical Fix \u0026ldquo;transaction too large error\u0026rdquo; when use TiDB as storage. Fix \u0026ldquo;index not found\u0026rdquo; in trace query when use ES7 storage. Add otel rules to ui template to observe Istio control plane. Remove istio mixer Support close influxdb batch write model. Check SAN in the ALS (m)TLS process.  UI  Fix incorrect label in radial chart in topology. Replace node-sass with dart-sass. Replace serviceFilter with serviceGroup Removed \u0026ldquo;Les Miserables\u0026rdquo; from radial chart in topology. Add the Promise dropdown option  Documentation  Add VNode FAQ doc. Add logic endpoint section in the agent setup doc. Adjust configuration names and system environment names of the sharing server module Tweak Istio metrics collection doc. Add otel receiver.  All issues and pull requests are here\n","title":"8.3.0","url":"/docs/main/next/en/changes/changes-8.3.0/"},{"content":"8.3.0  Project  Test: ElasticSearch version 7.0.0 and 7.9.3 as storage are E2E tested. Test: Bump up testcontainers version to work around the Docker bug on MacOS.  Java Agent  Support propagate the sending timestamp in MQ plugins to calculate the transfer latency in the async MQ scenarios. Support auto-tag with the fixed values propagated in the correlation context. Make HttpClient 3.x, 4.x, and HttpAsyncClient 3.x plugins to support collecting HTTP parameters. Make the Feign plugin to support Java 14 Make the okhttp3 plugin to support Java 14 Polish tracing context related codes. Add the plugin for async-http-client 2.x Fix NPE in the nutz plugin. Provide Apache Commons DBCP 2.x plugin. Add the plugin for mssql-jtds 1.x. Add the plugin for mssql-jdbc 6.x -\u0026gt; 9.x. Fix the default ignore mechanism isn\u0026rsquo;t accurate enough bug. Add the plugin for spring-kafka 1.3.x. Add the plugin for Apache CXF 3.x. Fix okhttp-3.x and async-http-client-2.x did not overwrite the old trace header.  OAP-Backend  Add the @SuperDataset annotation for BrowserErrorLog. Add the thread pool to the Kafka fetcher to increase the performance. Add contain and not contain OPS in OAL. Add Envoy ALS analyzer based on metadata exchange. Add listMetrics GraphQL query. Add group name into services of so11y and istio relevant metrics Support keeping collecting the slowly segments in the sampling mechanism. Support choose files to active the meter analyzer. Support nested class definition in the Service, ServiceInstance, Endpoint, ServiceRelation, and ServiceInstanceRelation sources. Support sideCar.internalErrorCode in the Service, ServiceInstance, Endpoint, ServiceRelation, and ServiceInstanceRelation sources. Improve Kubernetes service registry for ALS analysis. Add health checker for cluster management Support the service auto grouping. Support query service list by the group name. Improve the queryable tags generation. Remove the duplicated tags to reduce the storage payload. Fix the threads of the Kafka fetcher exit if some unexpected exceptions happen. Fix the excessive timeout period set by the kubernetes-client. Fix deadlock problem when using elasticsearch-client-7.0.0. Fix storage-jdbc isExists not set dbname. Fix searchService bug in the InfluxDB storage implementation. Fix CVE in the alarm module, when activating the dynamic configuration feature. Fix CVE in the endpoint grouping, when activating the dynamic configuration feature. Fix CVE in the uninstrumented gateways configs, when activating the dynamic configuration feature. Fix CVE in the Apdex threshold configs, when activating the dynamic configuration feature. Make the codes and doc consistent in sharding server and core server. Fix that chunked string is incorrect while the tag contains colon. Fix the incorrect dynamic configuration key bug of endpoint-name-grouping. Remove unused min date timebucket in jdbc deletehistory logical Fix \u0026ldquo;transaction too large error\u0026rdquo; when use TiDB as storage. Fix \u0026ldquo;index not found\u0026rdquo; in trace query when use ES7 storage. Add otel rules to ui template to observe Istio control plane. Remove istio mixer Support close influxdb batch write model. Check SAN in the ALS (m)TLS process.  UI  Fix incorrect label in radial chart in topology. Replace node-sass with dart-sass. Replace serviceFilter with serviceGroup Removed \u0026ldquo;Les Miserables\u0026rdquo; from radial chart in topology. Add the Promise dropdown option  Documentation  Add VNode FAQ doc. Add logic endpoint section in the agent setup doc. Adjust configuration names and system environment names of the sharing server module Tweak Istio metrics collection doc. Add otel receiver.  All issues and pull requests are here\n","title":"8.3.0","url":"/docs/main/v9.1.0/en/changes/changes-8.3.0/"},{"content":"8.3.0  Project  Test: ElasticSearch version 7.0.0 and 7.9.3 as storage are E2E tested. Test: Bump up testcontainers version to work around the Docker bug on MacOS.  Java Agent  Support propagate the sending timestamp in MQ plugins to calculate the transfer latency in the async MQ scenarios. Support auto-tag with the fixed values propagated in the correlation context. Make HttpClient 3.x, 4.x, and HttpAsyncClient 3.x plugins to support collecting HTTP parameters. Make the Feign plugin to support Java 14 Make the okhttp3 plugin to support Java 14 Polish tracing context related codes. Add the plugin for async-http-client 2.x Fix NPE in the nutz plugin. Provide Apache Commons DBCP 2.x plugin. Add the plugin for mssql-jtds 1.x. Add the plugin for mssql-jdbc 6.x -\u0026gt; 9.x. Fix the default ignore mechanism isn\u0026rsquo;t accurate enough bug. Add the plugin for spring-kafka 1.3.x. Add the plugin for Apache CXF 3.x. Fix okhttp-3.x and async-http-client-2.x did not overwrite the old trace header.  OAP-Backend  Add the @SuperDataset annotation for BrowserErrorLog. Add the thread pool to the Kafka fetcher to increase the performance. Add contain and not contain OPS in OAL. Add Envoy ALS analyzer based on metadata exchange. Add listMetrics GraphQL query. Add group name into services of so11y and istio relevant metrics Support keeping collecting the slowly segments in the sampling mechanism. Support choose files to active the meter analyzer. Support nested class definition in the Service, ServiceInstance, Endpoint, ServiceRelation, and ServiceInstanceRelation sources. Support sideCar.internalErrorCode in the Service, ServiceInstance, Endpoint, ServiceRelation, and ServiceInstanceRelation sources. Improve Kubernetes service registry for ALS analysis. Add health checker for cluster management Support the service auto grouping. Support query service list by the group name. Improve the queryable tags generation. Remove the duplicated tags to reduce the storage payload. Fix the threads of the Kafka fetcher exit if some unexpected exceptions happen. Fix the excessive timeout period set by the kubernetes-client. Fix deadlock problem when using elasticsearch-client-7.0.0. Fix storage-jdbc isExists not set dbname. Fix searchService bug in the InfluxDB storage implementation. Fix CVE in the alarm module, when activating the dynamic configuration feature. Fix CVE in the endpoint grouping, when activating the dynamic configuration feature. Fix CVE in the uninstrumented gateways configs, when activating the dynamic configuration feature. Fix CVE in the Apdex threshold configs, when activating the dynamic configuration feature. Make the codes and doc consistent in sharding server and core server. Fix that chunked string is incorrect while the tag contains colon. Fix the incorrect dynamic configuration key bug of endpoint-name-grouping. Remove unused min date timebucket in jdbc deletehistory logical Fix \u0026ldquo;transaction too large error\u0026rdquo; when use TiDB as storage. Fix \u0026ldquo;index not found\u0026rdquo; in trace query when use ES7 storage. Add otel rules to ui template to observe Istio control plane. Remove istio mixer Support close influxdb batch write model. Check SAN in the ALS (m)TLS process.  UI  Fix incorrect label in radial chart in topology. Replace node-sass with dart-sass. Replace serviceFilter with serviceGroup Removed \u0026ldquo;Les Miserables\u0026rdquo; from radial chart in topology. Add the Promise dropdown option  Documentation  Add VNode FAQ doc. Add logic endpoint section in the agent setup doc. Adjust configuration names and system environment names of the sharing server module Tweak Istio metrics collection doc. Add otel receiver.  All issues and pull requests are here\n","title":"8.3.0","url":"/docs/main/v9.2.0/en/changes/changes-8.3.0/"},{"content":"8.3.0  Project  Test: ElasticSearch version 7.0.0 and 7.9.3 as storage are E2E tested. Test: Bump up testcontainers version to work around the Docker bug on MacOS.  Java Agent  Support propagate the sending timestamp in MQ plugins to calculate the transfer latency in the async MQ scenarios. Support auto-tag with the fixed values propagated in the correlation context. Make HttpClient 3.x, 4.x, and HttpAsyncClient 3.x plugins to support collecting HTTP parameters. Make the Feign plugin to support Java 14 Make the okhttp3 plugin to support Java 14 Polish tracing context related codes. Add the plugin for async-http-client 2.x Fix NPE in the nutz plugin. Provide Apache Commons DBCP 2.x plugin. Add the plugin for mssql-jtds 1.x. Add the plugin for mssql-jdbc 6.x -\u0026gt; 9.x. Fix the default ignore mechanism isn\u0026rsquo;t accurate enough bug. Add the plugin for spring-kafka 1.3.x. Add the plugin for Apache CXF 3.x. Fix okhttp-3.x and async-http-client-2.x did not overwrite the old trace header.  OAP-Backend  Add the @SuperDataset annotation for BrowserErrorLog. Add the thread pool to the Kafka fetcher to increase the performance. Add contain and not contain OPS in OAL. Add Envoy ALS analyzer based on metadata exchange. Add listMetrics GraphQL query. Add group name into services of so11y and istio relevant metrics Support keeping collecting the slowly segments in the sampling mechanism. Support choose files to active the meter analyzer. Support nested class definition in the Service, ServiceInstance, Endpoint, ServiceRelation, and ServiceInstanceRelation sources. Support sideCar.internalErrorCode in the Service, ServiceInstance, Endpoint, ServiceRelation, and ServiceInstanceRelation sources. Improve Kubernetes service registry for ALS analysis. Add health checker for cluster management Support the service auto grouping. Support query service list by the group name. Improve the queryable tags generation. Remove the duplicated tags to reduce the storage payload. Fix the threads of the Kafka fetcher exit if some unexpected exceptions happen. Fix the excessive timeout period set by the kubernetes-client. Fix deadlock problem when using elasticsearch-client-7.0.0. Fix storage-jdbc isExists not set dbname. Fix searchService bug in the InfluxDB storage implementation. Fix CVE in the alarm module, when activating the dynamic configuration feature. Fix CVE in the endpoint grouping, when activating the dynamic configuration feature. Fix CVE in the uninstrumented gateways configs, when activating the dynamic configuration feature. Fix CVE in the Apdex threshold configs, when activating the dynamic configuration feature. Make the codes and doc consistent in sharding server and core server. Fix that chunked string is incorrect while the tag contains colon. Fix the incorrect dynamic configuration key bug of endpoint-name-grouping. Remove unused min date timebucket in jdbc deletehistory logical Fix \u0026ldquo;transaction too large error\u0026rdquo; when use TiDB as storage. Fix \u0026ldquo;index not found\u0026rdquo; in trace query when use ES7 storage. Add otel rules to ui template to observe Istio control plane. Remove istio mixer Support close influxdb batch write model. Check SAN in the ALS (m)TLS process.  UI  Fix incorrect label in radial chart in topology. Replace node-sass with dart-sass. Replace serviceFilter with serviceGroup Removed \u0026ldquo;Les Miserables\u0026rdquo; from radial chart in topology. Add the Promise dropdown option  Documentation  Add VNode FAQ doc. Add logic endpoint section in the agent setup doc. Adjust configuration names and system environment names of the sharing server module Tweak Istio metrics collection doc. Add otel receiver.  All issues and pull requests are here\n","title":"8.3.0","url":"/docs/main/v9.3.0/en/changes/changes-8.3.0/"},{"content":"8.3.0  Project  Test: ElasticSearch version 7.0.0 and 7.9.3 as storage are E2E tested. Test: Bump up testcontainers version to work around the Docker bug on MacOS.  Java Agent  Support propagate the sending timestamp in MQ plugins to calculate the transfer latency in the async MQ scenarios. Support auto-tag with the fixed values propagated in the correlation context. Make HttpClient 3.x, 4.x, and HttpAsyncClient 3.x plugins to support collecting HTTP parameters. Make the Feign plugin to support Java 14 Make the okhttp3 plugin to support Java 14 Polish tracing context related codes. Add the plugin for async-http-client 2.x Fix NPE in the nutz plugin. Provide Apache Commons DBCP 2.x plugin. Add the plugin for mssql-jtds 1.x. Add the plugin for mssql-jdbc 6.x -\u0026gt; 9.x. Fix the default ignore mechanism isn\u0026rsquo;t accurate enough bug. Add the plugin for spring-kafka 1.3.x. Add the plugin for Apache CXF 3.x. Fix okhttp-3.x and async-http-client-2.x did not overwrite the old trace header.  OAP-Backend  Add the @SuperDataset annotation for BrowserErrorLog. Add the thread pool to the Kafka fetcher to increase the performance. Add contain and not contain OPS in OAL. Add Envoy ALS analyzer based on metadata exchange. Add listMetrics GraphQL query. Add group name into services of so11y and istio relevant metrics Support keeping collecting the slowly segments in the sampling mechanism. Support choose files to active the meter analyzer. Support nested class definition in the Service, ServiceInstance, Endpoint, ServiceRelation, and ServiceInstanceRelation sources. Support sideCar.internalErrorCode in the Service, ServiceInstance, Endpoint, ServiceRelation, and ServiceInstanceRelation sources. Improve Kubernetes service registry for ALS analysis. Add health checker for cluster management Support the service auto grouping. Support query service list by the group name. Improve the queryable tags generation. Remove the duplicated tags to reduce the storage payload. Fix the threads of the Kafka fetcher exit if some unexpected exceptions happen. Fix the excessive timeout period set by the kubernetes-client. Fix deadlock problem when using elasticsearch-client-7.0.0. Fix storage-jdbc isExists not set dbname. Fix searchService bug in the InfluxDB storage implementation. Fix CVE in the alarm module, when activating the dynamic configuration feature. Fix CVE in the endpoint grouping, when activating the dynamic configuration feature. Fix CVE in the uninstrumented gateways configs, when activating the dynamic configuration feature. Fix CVE in the Apdex threshold configs, when activating the dynamic configuration feature. Make the codes and doc consistent in sharding server and core server. Fix that chunked string is incorrect while the tag contains colon. Fix the incorrect dynamic configuration key bug of endpoint-name-grouping. Remove unused min date timebucket in jdbc deletehistory logical Fix \u0026ldquo;transaction too large error\u0026rdquo; when use TiDB as storage. Fix \u0026ldquo;index not found\u0026rdquo; in trace query when use ES7 storage. Add otel rules to ui template to observe Istio control plane. Remove istio mixer Support close influxdb batch write model. Check SAN in the ALS (m)TLS process.  UI  Fix incorrect label in radial chart in topology. Replace node-sass with dart-sass. Replace serviceFilter with serviceGroup Removed \u0026ldquo;Les Miserables\u0026rdquo; from radial chart in topology. Add the Promise dropdown option  Documentation  Add VNode FAQ doc. Add logic endpoint section in the agent setup doc. Adjust configuration names and system environment names of the sharing server module Tweak Istio metrics collection doc. Add otel receiver.  All issues and pull requests are here\n","title":"8.3.0","url":"/docs/main/v9.4.0/en/changes/changes-8.3.0/"},{"content":"8.3.0  Project  Test: ElasticSearch version 7.0.0 and 7.9.3 as storage are E2E tested. Test: Bump up testcontainers version to work around the Docker bug on MacOS.  Java Agent  Support propagate the sending timestamp in MQ plugins to calculate the transfer latency in the async MQ scenarios. Support auto-tag with the fixed values propagated in the correlation context. Make HttpClient 3.x, 4.x, and HttpAsyncClient 3.x plugins to support collecting HTTP parameters. Make the Feign plugin to support Java 14 Make the okhttp3 plugin to support Java 14 Polish tracing context related codes. Add the plugin for async-http-client 2.x Fix NPE in the nutz plugin. Provide Apache Commons DBCP 2.x plugin. Add the plugin for mssql-jtds 1.x. Add the plugin for mssql-jdbc 6.x -\u0026gt; 9.x. Fix the default ignore mechanism isn\u0026rsquo;t accurate enough bug. Add the plugin for spring-kafka 1.3.x. Add the plugin for Apache CXF 3.x. Fix okhttp-3.x and async-http-client-2.x did not overwrite the old trace header.  OAP-Backend  Add the @SuperDataset annotation for BrowserErrorLog. Add the thread pool to the Kafka fetcher to increase the performance. Add contain and not contain OPS in OAL. Add Envoy ALS analyzer based on metadata exchange. Add listMetrics GraphQL query. Add group name into services of so11y and istio relevant metrics Support keeping collecting the slowly segments in the sampling mechanism. Support choose files to active the meter analyzer. Support nested class definition in the Service, ServiceInstance, Endpoint, ServiceRelation, and ServiceInstanceRelation sources. Support sideCar.internalErrorCode in the Service, ServiceInstance, Endpoint, ServiceRelation, and ServiceInstanceRelation sources. Improve Kubernetes service registry for ALS analysis. Add health checker for cluster management Support the service auto grouping. Support query service list by the group name. Improve the queryable tags generation. Remove the duplicated tags to reduce the storage payload. Fix the threads of the Kafka fetcher exit if some unexpected exceptions happen. Fix the excessive timeout period set by the kubernetes-client. Fix deadlock problem when using elasticsearch-client-7.0.0. Fix storage-jdbc isExists not set dbname. Fix searchService bug in the InfluxDB storage implementation. Fix CVE in the alarm module, when activating the dynamic configuration feature. Fix CVE in the endpoint grouping, when activating the dynamic configuration feature. Fix CVE in the uninstrumented gateways configs, when activating the dynamic configuration feature. Fix CVE in the Apdex threshold configs, when activating the dynamic configuration feature. Make the codes and doc consistent in sharding server and core server. Fix that chunked string is incorrect while the tag contains colon. Fix the incorrect dynamic configuration key bug of endpoint-name-grouping. Remove unused min date timebucket in jdbc deletehistory logical Fix \u0026ldquo;transaction too large error\u0026rdquo; when use TiDB as storage. Fix \u0026ldquo;index not found\u0026rdquo; in trace query when use ES7 storage. Add otel rules to ui template to observe Istio control plane. Remove istio mixer Support close influxdb batch write model. Check SAN in the ALS (m)TLS process.  UI  Fix incorrect label in radial chart in topology. Replace node-sass with dart-sass. Replace serviceFilter with serviceGroup Removed \u0026ldquo;Les Miserables\u0026rdquo; from radial chart in topology. Add the Promise dropdown option  Documentation  Add VNode FAQ doc. Add logic endpoint section in the agent setup doc. Adjust configuration names and system environment names of the sharing server module Tweak Istio metrics collection doc. Add otel receiver.  All issues and pull requests are here\n","title":"8.3.0","url":"/docs/main/v9.5.0/en/changes/changes-8.3.0/"},{"content":"8.3.0  Project  Test: ElasticSearch version 7.0.0 and 7.9.3 as storage are E2E tested. Test: Bump up testcontainers version to work around the Docker bug on MacOS.  Java Agent  Support propagate the sending timestamp in MQ plugins to calculate the transfer latency in the async MQ scenarios. Support auto-tag with the fixed values propagated in the correlation context. Make HttpClient 3.x, 4.x, and HttpAsyncClient 3.x plugins to support collecting HTTP parameters. Make the Feign plugin to support Java 14 Make the okhttp3 plugin to support Java 14 Polish tracing context related codes. Add the plugin for async-http-client 2.x Fix NPE in the nutz plugin. Provide Apache Commons DBCP 2.x plugin. Add the plugin for mssql-jtds 1.x. Add the plugin for mssql-jdbc 6.x -\u0026gt; 9.x. Fix the default ignore mechanism isn\u0026rsquo;t accurate enough bug. Add the plugin for spring-kafka 1.3.x. Add the plugin for Apache CXF 3.x. Fix okhttp-3.x and async-http-client-2.x did not overwrite the old trace header.  OAP-Backend  Add the @SuperDataset annotation for BrowserErrorLog. Add the thread pool to the Kafka fetcher to increase the performance. Add contain and not contain OPS in OAL. Add Envoy ALS analyzer based on metadata exchange. Add listMetrics GraphQL query. Add group name into services of so11y and istio relevant metrics Support keeping collecting the slowly segments in the sampling mechanism. Support choose files to active the meter analyzer. Support nested class definition in the Service, ServiceInstance, Endpoint, ServiceRelation, and ServiceInstanceRelation sources. Support sideCar.internalErrorCode in the Service, ServiceInstance, Endpoint, ServiceRelation, and ServiceInstanceRelation sources. Improve Kubernetes service registry for ALS analysis. Add health checker for cluster management Support the service auto grouping. Support query service list by the group name. Improve the queryable tags generation. Remove the duplicated tags to reduce the storage payload. Fix the threads of the Kafka fetcher exit if some unexpected exceptions happen. Fix the excessive timeout period set by the kubernetes-client. Fix deadlock problem when using elasticsearch-client-7.0.0. Fix storage-jdbc isExists not set dbname. Fix searchService bug in the InfluxDB storage implementation. Fix CVE in the alarm module, when activating the dynamic configuration feature. Fix CVE in the endpoint grouping, when activating the dynamic configuration feature. Fix CVE in the uninstrumented gateways configs, when activating the dynamic configuration feature. Fix CVE in the Apdex threshold configs, when activating the dynamic configuration feature. Make the codes and doc consistent in sharding server and core server. Fix that chunked string is incorrect while the tag contains colon. Fix the incorrect dynamic configuration key bug of endpoint-name-grouping. Remove unused min date timebucket in jdbc deletehistory logical Fix \u0026ldquo;transaction too large error\u0026rdquo; when use TiDB as storage. Fix \u0026ldquo;index not found\u0026rdquo; in trace query when use ES7 storage. Add otel rules to ui template to observe Istio control plane. Remove istio mixer Support close influxdb batch write model. Check SAN in the ALS (m)TLS process.  UI  Fix incorrect label in radial chart in topology. Replace node-sass with dart-sass. Replace serviceFilter with serviceGroup Removed \u0026ldquo;Les Miserables\u0026rdquo; from radial chart in topology. Add the Promise dropdown option  Documentation  Add VNode FAQ doc. Add logic endpoint section in the agent setup doc. Adjust configuration names and system environment names of the sharing server module Tweak Istio metrics collection doc. Add otel receiver.  All issues and pull requests are here\n","title":"8.3.0","url":"/docs/main/v9.6.0/en/changes/changes-8.3.0/"},{"content":"8.3.0  Project  Test: ElasticSearch version 7.0.0 and 7.9.3 as storage are E2E tested. Test: Bump up testcontainers version to work around the Docker bug on MacOS.  Java Agent  Support propagate the sending timestamp in MQ plugins to calculate the transfer latency in the async MQ scenarios. Support auto-tag with the fixed values propagated in the correlation context. Make HttpClient 3.x, 4.x, and HttpAsyncClient 3.x plugins to support collecting HTTP parameters. Make the Feign plugin to support Java 14 Make the okhttp3 plugin to support Java 14 Polish tracing context related codes. Add the plugin for async-http-client 2.x Fix NPE in the nutz plugin. Provide Apache Commons DBCP 2.x plugin. Add the plugin for mssql-jtds 1.x. Add the plugin for mssql-jdbc 6.x -\u0026gt; 9.x. Fix the default ignore mechanism isn\u0026rsquo;t accurate enough bug. Add the plugin for spring-kafka 1.3.x. Add the plugin for Apache CXF 3.x. Fix okhttp-3.x and async-http-client-2.x did not overwrite the old trace header.  OAP-Backend  Add the @SuperDataset annotation for BrowserErrorLog. Add the thread pool to the Kafka fetcher to increase the performance. Add contain and not contain OPS in OAL. Add Envoy ALS analyzer based on metadata exchange. Add listMetrics GraphQL query. Add group name into services of so11y and istio relevant metrics Support keeping collecting the slowly segments in the sampling mechanism. Support choose files to active the meter analyzer. Support nested class definition in the Service, ServiceInstance, Endpoint, ServiceRelation, and ServiceInstanceRelation sources. Support sideCar.internalErrorCode in the Service, ServiceInstance, Endpoint, ServiceRelation, and ServiceInstanceRelation sources. Improve Kubernetes service registry for ALS analysis. Add health checker for cluster management Support the service auto grouping. Support query service list by the group name. Improve the queryable tags generation. Remove the duplicated tags to reduce the storage payload. Fix the threads of the Kafka fetcher exit if some unexpected exceptions happen. Fix the excessive timeout period set by the kubernetes-client. Fix deadlock problem when using elasticsearch-client-7.0.0. Fix storage-jdbc isExists not set dbname. Fix searchService bug in the InfluxDB storage implementation. Fix CVE in the alarm module, when activating the dynamic configuration feature. Fix CVE in the endpoint grouping, when activating the dynamic configuration feature. Fix CVE in the uninstrumented gateways configs, when activating the dynamic configuration feature. Fix CVE in the Apdex threshold configs, when activating the dynamic configuration feature. Make the codes and doc consistent in sharding server and core server. Fix that chunked string is incorrect while the tag contains colon. Fix the incorrect dynamic configuration key bug of endpoint-name-grouping. Remove unused min date timebucket in jdbc deletehistory logical Fix \u0026ldquo;transaction too large error\u0026rdquo; when use TiDB as storage. Fix \u0026ldquo;index not found\u0026rdquo; in trace query when use ES7 storage. Add otel rules to ui template to observe Istio control plane. Remove istio mixer Support close influxdb batch write model. Check SAN in the ALS (m)TLS process.  UI  Fix incorrect label in radial chart in topology. Replace node-sass with dart-sass. Replace serviceFilter with serviceGroup Removed \u0026ldquo;Les Miserables\u0026rdquo; from radial chart in topology. Add the Promise dropdown option  Documentation  Add VNode FAQ doc. Add logic endpoint section in the agent setup doc. Adjust configuration names and system environment names of the sharing server module Tweak Istio metrics collection doc. Add otel receiver.  All issues and pull requests are here\n","title":"8.3.0","url":"/docs/main/v9.7.0/en/changes/changes-8.3.0/"},{"content":"8.4.0 Project  Incompatible with previous releases when use H2/MySQL/TiDB storage options, due to support multiple alarm rules triggered for one entity. Chore: adapt create_source_release.sh to make it runnable on Linux. Add package to .proto files, prevent polluting top-level namespace in some languages; The OAP server supports previous agent releases, whereas the previous OAP server (\u0026lt;=8.3.0) won\u0026rsquo;t recognize newer agents since this version (\u0026gt;= 8.4.0). Add ElasticSearch 7.10 to test matrix and verify it works. Replace Apache RAT with skywalking-eyes to check license headers. Set up test of Envoy ALS / MetricsService under Istio 1.8.2 to verify Envoy V3 protocol Test: fix flaky E2E test of Kafka.  Java Agent  The operation name of quartz-scheduler plugin, has been changed as the quartz-scheduler/${className} format. Fix jdk-http and okhttp-3.x plugin did not overwrite the old trace header. Add interceptors of method(analyze, searchScroll, clearScroll, searchTemplate and deleteByQuery) for elasticsearch-6.x-plugin. Fix the unexpected RunningContext recreation in the Tomcat plugin. Fix the potential NPE when trace_sql_parameters is enabled. Update byte-buddy to 1.10.19. Fix thrift plugin trace link broken when intermediate service does not mount agent Fix thrift plugin collects wrong args when the method without parameter. Fix DataCarrier\u0026rsquo;s org.apache.skywalking.apm.commons.datacarrier.buffer.Buffer implementation isn\u0026rsquo;t activated in IF_POSSIBLE mode. Fix ArrayBlockingQueueBuffer\u0026rsquo;s useless IF_POSSIBLE mode list Support building gRPC TLS channel but CA file is not required. Add witness method mechanism in the agent plugin core. Add Dolphinscheduler plugin definition. Make sampling still works when the trace ignores plug-in activation. Fix mssql-plugin occur ClassCastException when call the method of return generate key. The operation name of dubbo and dubbo-2.7.x-plugin, has been changed as the groupValue/className.methodName format Fix bug that rocketmq-plugin set the wrong tag. Fix duplicated EnhancedInstance interface added. Fix thread leaks caused by the elasticsearch-6.x-plugin plugin. Support reading segmentId and spanId with toolkit. Fix RestTemplate plugin recording url tag with wrong port Support collecting logs and forwarding through gRPC. Support config agent.sample_n_per_3_secs can be changed in the runtime. Support config agent.ignore_suffix can be changed in the runtime. Support DNS periodic resolving mechanism to update backend service. Support config agent.trace.ignore_path can be changed in the runtime. Added support for transmitting logback 1.x and log4j 2.x formatted \u0026amp; un-formatted messages via gPRC  OAP-Backend  Make meter receiver support MAL. Support influxDB connection response format option. Fix some error when use JSON as influxDB response format. Support Kafka MirrorMaker 2.0 to replicate topics between Kafka clusters. Add the rule name field to alarm record storage entity as a part of ID, to support multiple alarm rules triggered for one entity. The scope id has been removed from the ID. Fix MAL concurrent execution issues. Fix group name can\u0026rsquo;t be queried in the GraphQL. Fix potential gRPC connection leak(not closed) for the channels among OAP instances. Filter OAP instances(unassigned in booting stage) of the empty IP in KubernetesCoordinator. Add component ID for Python aiohttp plugin requester and server. Fix H2 in-memory database table missing issues Add component ID for Python pyramid plugin server. Add component ID for NodeJS Axios plugin. Fix searchService method error in storage-influxdb-plugin. Add JavaScript component ID. Fix CVE of UninstrumentedGateways in Dynamic Configuration activation. Improve query performance in storage-influxdb-plugin. Fix the uuid field in GRPCConfigWatcherRegister is not updated. Support Envoy {AccessLog,Metrics}Service API V3. Adopt the MAL in Envoy metrics service analyzer. Fix the priority setting doesn\u0026rsquo;t work of the ALS analyzers. Fix bug that endpoint-name-grouping.yml is not customizable in Dockerized case. Fix bug that istio version metric type on UI template mismatches the otel rule. Improve ReadWriteSafeCache concurrency read-write performance Fix bug that if use JSON as InfluxDB.ResponseFormat then NumberFormatException maybe occur. Fix timeBucket not taking effect in EqualsAndHashCode annotation of some relationship metrics. Fix SharingServerConfig\u0026rsquo;s propertie is not correct in the application.yml, contextPath -\u0026gt; restConnextPath. Istio control plane: remove redundant metrics and polish panel layout. Fix bug endpoint name grouping not work due to setting service name and endpoint name out of order. Fix receiver analysis error count metrics. Log collecting and query implementation. Support Alarm to feishu. Add the implementation of ConfigurationDiscovery on the OAP side. Fix bug in parseInternalErrorCode where some error codes are never reached. OAL supports multiple values when as numeric. Add node information from the Openensus proto to the labels of the samples, to support the identification of the source of the Metric data. Fix bug that the same sample name in one MAL expression caused IllegalArgumentException in Analyzer.analyse. Add the text analyzer for querying log in the es storage. Chore: Remove duplicate codes in Envoy ALS handler. Remove the strict rule of OAL disable statement parameter. Fix a legal metric query adoption bug. Don\u0026rsquo;t support global level metric query. Add VM MAL and ui-template configration, support Prometheus node-exporter VM metrics that pushed from OpenTelemetry-collector. Remove unused log query parameters.  UI  Fix un-removed tags in trace query. Fix unexpected metrics name on single value component. Don\u0026rsquo;t allow negative value as the refresh period. Fix style issue in trace table view. Separation Log and Dashboard selector data to avoid conflicts. Fix trace instance selector bug. Fix Unnecessary sidebar in tooltips for charts. Refactor dashboard query in a common script. Implement refreshing data for topology by updating date. Implement group selector in the topology. Fix all as default parameter for services selector. Add icon for Python aiohttp plugin. Add icon for Python pyramid plugin. Fix topology render all services nodes when groups changed. Fix rk-footer utc input\u0026rsquo;s width. Update rk-icon and rewrite rk-header svg tags with rk-icon. Add icon for http type. Fix rk-footer utc without local storage. Sort group names in the topology. Add logo for Dolphinscheduler. Fix dashboard wrong instance. Add a legend for the topology. Update the condition of unhealthy cube. Fix: use icons to replace buttons for task list in profile. Fix: support = in the tag value in the trace query page. Add envoy proxy component logo. Chore: set up license-eye to check license headers and add missing license headers. Fix prop for instances-survey and endpoints-survey. Fix envoy icon in topology. Implement the service logs on UI. Change the flask icon to light version for a better view of topology dark theme. Implement viewing logs on trace page. Fix update props of date component. Fix query conditions for logs. Fix style of selectors to word wrap. Fix logs time. Fix search ui for logs.  Documentation  Update the documents of backend fetcher and self observability about the latest configurations. Add documents about the group name of service. Update docs about the latest UI. Update the document of backend trace sampling with the latest configuration. Update kafka plugin support version to 2.6.1. Add FAQ about Fix compiling on Mac M1 chip.  All issues and pull requests are here\n","title":"8.4.0","url":"/docs/main/latest/en/changes/changes-8.4.0/"},{"content":"8.4.0 Project  Incompatible with previous releases when use H2/MySQL/TiDB storage options, due to support multiple alarm rules triggered for one entity. Chore: adapt create_source_release.sh to make it runnable on Linux. Add package to .proto files, prevent polluting top-level namespace in some languages; The OAP server supports previous agent releases, whereas the previous OAP server (\u0026lt;=8.3.0) won\u0026rsquo;t recognize newer agents since this version (\u0026gt;= 8.4.0). Add ElasticSearch 7.10 to test matrix and verify it works. Replace Apache RAT with skywalking-eyes to check license headers. Set up test of Envoy ALS / MetricsService under Istio 1.8.2 to verify Envoy V3 protocol Test: fix flaky E2E test of Kafka.  Java Agent  The operation name of quartz-scheduler plugin, has been changed as the quartz-scheduler/${className} format. Fix jdk-http and okhttp-3.x plugin did not overwrite the old trace header. Add interceptors of method(analyze, searchScroll, clearScroll, searchTemplate and deleteByQuery) for elasticsearch-6.x-plugin. Fix the unexpected RunningContext recreation in the Tomcat plugin. Fix the potential NPE when trace_sql_parameters is enabled. Update byte-buddy to 1.10.19. Fix thrift plugin trace link broken when intermediate service does not mount agent Fix thrift plugin collects wrong args when the method without parameter. Fix DataCarrier\u0026rsquo;s org.apache.skywalking.apm.commons.datacarrier.buffer.Buffer implementation isn\u0026rsquo;t activated in IF_POSSIBLE mode. Fix ArrayBlockingQueueBuffer\u0026rsquo;s useless IF_POSSIBLE mode list Support building gRPC TLS channel but CA file is not required. Add witness method mechanism in the agent plugin core. Add Dolphinscheduler plugin definition. Make sampling still works when the trace ignores plug-in activation. Fix mssql-plugin occur ClassCastException when call the method of return generate key. The operation name of dubbo and dubbo-2.7.x-plugin, has been changed as the groupValue/className.methodName format Fix bug that rocketmq-plugin set the wrong tag. Fix duplicated EnhancedInstance interface added. Fix thread leaks caused by the elasticsearch-6.x-plugin plugin. Support reading segmentId and spanId with toolkit. Fix RestTemplate plugin recording url tag with wrong port Support collecting logs and forwarding through gRPC. Support config agent.sample_n_per_3_secs can be changed in the runtime. Support config agent.ignore_suffix can be changed in the runtime. Support DNS periodic resolving mechanism to update backend service. Support config agent.trace.ignore_path can be changed in the runtime. Added support for transmitting logback 1.x and log4j 2.x formatted \u0026amp; un-formatted messages via gPRC  OAP-Backend  Make meter receiver support MAL. Support influxDB connection response format option. Fix some error when use JSON as influxDB response format. Support Kafka MirrorMaker 2.0 to replicate topics between Kafka clusters. Add the rule name field to alarm record storage entity as a part of ID, to support multiple alarm rules triggered for one entity. The scope id has been removed from the ID. Fix MAL concurrent execution issues. Fix group name can\u0026rsquo;t be queried in the GraphQL. Fix potential gRPC connection leak(not closed) for the channels among OAP instances. Filter OAP instances(unassigned in booting stage) of the empty IP in KubernetesCoordinator. Add component ID for Python aiohttp plugin requester and server. Fix H2 in-memory database table missing issues Add component ID for Python pyramid plugin server. Add component ID for NodeJS Axios plugin. Fix searchService method error in storage-influxdb-plugin. Add JavaScript component ID. Fix CVE of UninstrumentedGateways in Dynamic Configuration activation. Improve query performance in storage-influxdb-plugin. Fix the uuid field in GRPCConfigWatcherRegister is not updated. Support Envoy {AccessLog,Metrics}Service API V3. Adopt the MAL in Envoy metrics service analyzer. Fix the priority setting doesn\u0026rsquo;t work of the ALS analyzers. Fix bug that endpoint-name-grouping.yml is not customizable in Dockerized case. Fix bug that istio version metric type on UI template mismatches the otel rule. Improve ReadWriteSafeCache concurrency read-write performance Fix bug that if use JSON as InfluxDB.ResponseFormat then NumberFormatException maybe occur. Fix timeBucket not taking effect in EqualsAndHashCode annotation of some relationship metrics. Fix SharingServerConfig\u0026rsquo;s propertie is not correct in the application.yml, contextPath -\u0026gt; restConnextPath. Istio control plane: remove redundant metrics and polish panel layout. Fix bug endpoint name grouping not work due to setting service name and endpoint name out of order. Fix receiver analysis error count metrics. Log collecting and query implementation. Support Alarm to feishu. Add the implementation of ConfigurationDiscovery on the OAP side. Fix bug in parseInternalErrorCode where some error codes are never reached. OAL supports multiple values when as numeric. Add node information from the Openensus proto to the labels of the samples, to support the identification of the source of the Metric data. Fix bug that the same sample name in one MAL expression caused IllegalArgumentException in Analyzer.analyse. Add the text analyzer for querying log in the es storage. Chore: Remove duplicate codes in Envoy ALS handler. Remove the strict rule of OAL disable statement parameter. Fix a legal metric query adoption bug. Don\u0026rsquo;t support global level metric query. Add VM MAL and ui-template configration, support Prometheus node-exporter VM metrics that pushed from OpenTelemetry-collector. Remove unused log query parameters.  UI  Fix un-removed tags in trace query. Fix unexpected metrics name on single value component. Don\u0026rsquo;t allow negative value as the refresh period. Fix style issue in trace table view. Separation Log and Dashboard selector data to avoid conflicts. Fix trace instance selector bug. Fix Unnecessary sidebar in tooltips for charts. Refactor dashboard query in a common script. Implement refreshing data for topology by updating date. Implement group selector in the topology. Fix all as default parameter for services selector. Add icon for Python aiohttp plugin. Add icon for Python pyramid plugin. Fix topology render all services nodes when groups changed. Fix rk-footer utc input\u0026rsquo;s width. Update rk-icon and rewrite rk-header svg tags with rk-icon. Add icon for http type. Fix rk-footer utc without local storage. Sort group names in the topology. Add logo for Dolphinscheduler. Fix dashboard wrong instance. Add a legend for the topology. Update the condition of unhealthy cube. Fix: use icons to replace buttons for task list in profile. Fix: support = in the tag value in the trace query page. Add envoy proxy component logo. Chore: set up license-eye to check license headers and add missing license headers. Fix prop for instances-survey and endpoints-survey. Fix envoy icon in topology. Implement the service logs on UI. Change the flask icon to light version for a better view of topology dark theme. Implement viewing logs on trace page. Fix update props of date component. Fix query conditions for logs. Fix style of selectors to word wrap. Fix logs time. Fix search ui for logs.  Documentation  Update the documents of backend fetcher and self observability about the latest configurations. Add documents about the group name of service. Update docs about the latest UI. Update the document of backend trace sampling with the latest configuration. Update kafka plugin support version to 2.6.1. Add FAQ about Fix compiling on Mac M1 chip.  All issues and pull requests are here\n","title":"8.4.0","url":"/docs/main/next/en/changes/changes-8.4.0/"},{"content":"8.4.0 Project  Incompatible with previous releases when use H2/MySQL/TiDB storage options, due to support multiple alarm rules triggered for one entity. Chore: adapt create_source_release.sh to make it runnable on Linux. Add package to .proto files, prevent polluting top-level namespace in some languages; The OAP server supports previous agent releases, whereas the previous OAP server (\u0026lt;=8.3.0) won\u0026rsquo;t recognize newer agents since this version (\u0026gt;= 8.4.0). Add ElasticSearch 7.10 to test matrix and verify it works. Replace Apache RAT with skywalking-eyes to check license headers. Set up test of Envoy ALS / MetricsService under Istio 1.8.2 to verify Envoy V3 protocol Test: fix flaky E2E test of Kafka.  Java Agent  The operation name of quartz-scheduler plugin, has been changed as the quartz-scheduler/${className} format. Fix jdk-http and okhttp-3.x plugin did not overwrite the old trace header. Add interceptors of method(analyze, searchScroll, clearScroll, searchTemplate and deleteByQuery) for elasticsearch-6.x-plugin. Fix the unexpected RunningContext recreation in the Tomcat plugin. Fix the potential NPE when trace_sql_parameters is enabled. Update byte-buddy to 1.10.19. Fix thrift plugin trace link broken when intermediate service does not mount agent Fix thrift plugin collects wrong args when the method without parameter. Fix DataCarrier\u0026rsquo;s org.apache.skywalking.apm.commons.datacarrier.buffer.Buffer implementation isn\u0026rsquo;t activated in IF_POSSIBLE mode. Fix ArrayBlockingQueueBuffer\u0026rsquo;s useless IF_POSSIBLE mode list Support building gRPC TLS channel but CA file is not required. Add witness method mechanism in the agent plugin core. Add Dolphinscheduler plugin definition. Make sampling still works when the trace ignores plug-in activation. Fix mssql-plugin occur ClassCastException when call the method of return generate key. The operation name of dubbo and dubbo-2.7.x-plugin, has been changed as the groupValue/className.methodName format Fix bug that rocketmq-plugin set the wrong tag. Fix duplicated EnhancedInstance interface added. Fix thread leaks caused by the elasticsearch-6.x-plugin plugin. Support reading segmentId and spanId with toolkit. Fix RestTemplate plugin recording url tag with wrong port Support collecting logs and forwarding through gRPC. Support config agent.sample_n_per_3_secs can be changed in the runtime. Support config agent.ignore_suffix can be changed in the runtime. Support DNS periodic resolving mechanism to update backend service. Support config agent.trace.ignore_path can be changed in the runtime. Added support for transmitting logback 1.x and log4j 2.x formatted \u0026amp; un-formatted messages via gPRC  OAP-Backend  Make meter receiver support MAL. Support influxDB connection response format option. Fix some error when use JSON as influxDB response format. Support Kafka MirrorMaker 2.0 to replicate topics between Kafka clusters. Add the rule name field to alarm record storage entity as a part of ID, to support multiple alarm rules triggered for one entity. The scope id has been removed from the ID. Fix MAL concurrent execution issues. Fix group name can\u0026rsquo;t be queried in the GraphQL. Fix potential gRPC connection leak(not closed) for the channels among OAP instances. Filter OAP instances(unassigned in booting stage) of the empty IP in KubernetesCoordinator. Add component ID for Python aiohttp plugin requester and server. Fix H2 in-memory database table missing issues Add component ID for Python pyramid plugin server. Add component ID for NodeJS Axios plugin. Fix searchService method error in storage-influxdb-plugin. Add JavaScript component ID. Fix CVE of UninstrumentedGateways in Dynamic Configuration activation. Improve query performance in storage-influxdb-plugin. Fix the uuid field in GRPCConfigWatcherRegister is not updated. Support Envoy {AccessLog,Metrics}Service API V3. Adopt the MAL in Envoy metrics service analyzer. Fix the priority setting doesn\u0026rsquo;t work of the ALS analyzers. Fix bug that endpoint-name-grouping.yml is not customizable in Dockerized case. Fix bug that istio version metric type on UI template mismatches the otel rule. Improve ReadWriteSafeCache concurrency read-write performance Fix bug that if use JSON as InfluxDB.ResponseFormat then NumberFormatException maybe occur. Fix timeBucket not taking effect in EqualsAndHashCode annotation of some relationship metrics. Fix SharingServerConfig\u0026rsquo;s propertie is not correct in the application.yml, contextPath -\u0026gt; restConnextPath. Istio control plane: remove redundant metrics and polish panel layout. Fix bug endpoint name grouping not work due to setting service name and endpoint name out of order. Fix receiver analysis error count metrics. Log collecting and query implementation. Support Alarm to feishu. Add the implementation of ConfigurationDiscovery on the OAP side. Fix bug in parseInternalErrorCode where some error codes are never reached. OAL supports multiple values when as numeric. Add node information from the Openensus proto to the labels of the samples, to support the identification of the source of the Metric data. Fix bug that the same sample name in one MAL expression caused IllegalArgumentException in Analyzer.analyse. Add the text analyzer for querying log in the es storage. Chore: Remove duplicate codes in Envoy ALS handler. Remove the strict rule of OAL disable statement parameter. Fix a legal metric query adoption bug. Don\u0026rsquo;t support global level metric query. Add VM MAL and ui-template configration, support Prometheus node-exporter VM metrics that pushed from OpenTelemetry-collector. Remove unused log query parameters.  UI  Fix un-removed tags in trace query. Fix unexpected metrics name on single value component. Don\u0026rsquo;t allow negative value as the refresh period. Fix style issue in trace table view. Separation Log and Dashboard selector data to avoid conflicts. Fix trace instance selector bug. Fix Unnecessary sidebar in tooltips for charts. Refactor dashboard query in a common script. Implement refreshing data for topology by updating date. Implement group selector in the topology. Fix all as default parameter for services selector. Add icon for Python aiohttp plugin. Add icon for Python pyramid plugin. Fix topology render all services nodes when groups changed. Fix rk-footer utc input\u0026rsquo;s width. Update rk-icon and rewrite rk-header svg tags with rk-icon. Add icon for http type. Fix rk-footer utc without local storage. Sort group names in the topology. Add logo for Dolphinscheduler. Fix dashboard wrong instance. Add a legend for the topology. Update the condition of unhealthy cube. Fix: use icons to replace buttons for task list in profile. Fix: support = in the tag value in the trace query page. Add envoy proxy component logo. Chore: set up license-eye to check license headers and add missing license headers. Fix prop for instances-survey and endpoints-survey. Fix envoy icon in topology. Implement the service logs on UI. Change the flask icon to light version for a better view of topology dark theme. Implement viewing logs on trace page. Fix update props of date component. Fix query conditions for logs. Fix style of selectors to word wrap. Fix logs time. Fix search ui for logs.  Documentation  Update the documents of backend fetcher and self observability about the latest configurations. Add documents about the group name of service. Update docs about the latest UI. Update the document of backend trace sampling with the latest configuration. Update kafka plugin support version to 2.6.1. Add FAQ about Fix compiling on Mac M1 chip.  All issues and pull requests are here\n","title":"8.4.0","url":"/docs/main/v9.1.0/en/changes/changes-8.4.0/"},{"content":"8.4.0 Project  Incompatible with previous releases when use H2/MySQL/TiDB storage options, due to support multiple alarm rules triggered for one entity. Chore: adapt create_source_release.sh to make it runnable on Linux. Add package to .proto files, prevent polluting top-level namespace in some languages; The OAP server supports previous agent releases, whereas the previous OAP server (\u0026lt;=8.3.0) won\u0026rsquo;t recognize newer agents since this version (\u0026gt;= 8.4.0). Add ElasticSearch 7.10 to test matrix and verify it works. Replace Apache RAT with skywalking-eyes to check license headers. Set up test of Envoy ALS / MetricsService under Istio 1.8.2 to verify Envoy V3 protocol Test: fix flaky E2E test of Kafka.  Java Agent  The operation name of quartz-scheduler plugin, has been changed as the quartz-scheduler/${className} format. Fix jdk-http and okhttp-3.x plugin did not overwrite the old trace header. Add interceptors of method(analyze, searchScroll, clearScroll, searchTemplate and deleteByQuery) for elasticsearch-6.x-plugin. Fix the unexpected RunningContext recreation in the Tomcat plugin. Fix the potential NPE when trace_sql_parameters is enabled. Update byte-buddy to 1.10.19. Fix thrift plugin trace link broken when intermediate service does not mount agent Fix thrift plugin collects wrong args when the method without parameter. Fix DataCarrier\u0026rsquo;s org.apache.skywalking.apm.commons.datacarrier.buffer.Buffer implementation isn\u0026rsquo;t activated in IF_POSSIBLE mode. Fix ArrayBlockingQueueBuffer\u0026rsquo;s useless IF_POSSIBLE mode list Support building gRPC TLS channel but CA file is not required. Add witness method mechanism in the agent plugin core. Add Dolphinscheduler plugin definition. Make sampling still works when the trace ignores plug-in activation. Fix mssql-plugin occur ClassCastException when call the method of return generate key. The operation name of dubbo and dubbo-2.7.x-plugin, has been changed as the groupValue/className.methodName format Fix bug that rocketmq-plugin set the wrong tag. Fix duplicated EnhancedInstance interface added. Fix thread leaks caused by the elasticsearch-6.x-plugin plugin. Support reading segmentId and spanId with toolkit. Fix RestTemplate plugin recording url tag with wrong port Support collecting logs and forwarding through gRPC. Support config agent.sample_n_per_3_secs can be changed in the runtime. Support config agent.ignore_suffix can be changed in the runtime. Support DNS periodic resolving mechanism to update backend service. Support config agent.trace.ignore_path can be changed in the runtime. Added support for transmitting logback 1.x and log4j 2.x formatted \u0026amp; un-formatted messages via gPRC  OAP-Backend  Make meter receiver support MAL. Support influxDB connection response format option. Fix some error when use JSON as influxDB response format. Support Kafka MirrorMaker 2.0 to replicate topics between Kafka clusters. Add the rule name field to alarm record storage entity as a part of ID, to support multiple alarm rules triggered for one entity. The scope id has been removed from the ID. Fix MAL concurrent execution issues. Fix group name can\u0026rsquo;t be queried in the GraphQL. Fix potential gRPC connection leak(not closed) for the channels among OAP instances. Filter OAP instances(unassigned in booting stage) of the empty IP in KubernetesCoordinator. Add component ID for Python aiohttp plugin requester and server. Fix H2 in-memory database table missing issues Add component ID for Python pyramid plugin server. Add component ID for NodeJS Axios plugin. Fix searchService method error in storage-influxdb-plugin. Add JavaScript component ID. Fix CVE of UninstrumentedGateways in Dynamic Configuration activation. Improve query performance in storage-influxdb-plugin. Fix the uuid field in GRPCConfigWatcherRegister is not updated. Support Envoy {AccessLog,Metrics}Service API V3. Adopt the MAL in Envoy metrics service analyzer. Fix the priority setting doesn\u0026rsquo;t work of the ALS analyzers. Fix bug that endpoint-name-grouping.yml is not customizable in Dockerized case. Fix bug that istio version metric type on UI template mismatches the otel rule. Improve ReadWriteSafeCache concurrency read-write performance Fix bug that if use JSON as InfluxDB.ResponseFormat then NumberFormatException maybe occur. Fix timeBucket not taking effect in EqualsAndHashCode annotation of some relationship metrics. Fix SharingServerConfig\u0026rsquo;s propertie is not correct in the application.yml, contextPath -\u0026gt; restConnextPath. Istio control plane: remove redundant metrics and polish panel layout. Fix bug endpoint name grouping not work due to setting service name and endpoint name out of order. Fix receiver analysis error count metrics. Log collecting and query implementation. Support Alarm to feishu. Add the implementation of ConfigurationDiscovery on the OAP side. Fix bug in parseInternalErrorCode where some error codes are never reached. OAL supports multiple values when as numeric. Add node information from the Openensus proto to the labels of the samples, to support the identification of the source of the Metric data. Fix bug that the same sample name in one MAL expression caused IllegalArgumentException in Analyzer.analyse. Add the text analyzer for querying log in the es storage. Chore: Remove duplicate codes in Envoy ALS handler. Remove the strict rule of OAL disable statement parameter. Fix a legal metric query adoption bug. Don\u0026rsquo;t support global level metric query. Add VM MAL and ui-template configration, support Prometheus node-exporter VM metrics that pushed from OpenTelemetry-collector. Remove unused log query parameters.  UI  Fix un-removed tags in trace query. Fix unexpected metrics name on single value component. Don\u0026rsquo;t allow negative value as the refresh period. Fix style issue in trace table view. Separation Log and Dashboard selector data to avoid conflicts. Fix trace instance selector bug. Fix Unnecessary sidebar in tooltips for charts. Refactor dashboard query in a common script. Implement refreshing data for topology by updating date. Implement group selector in the topology. Fix all as default parameter for services selector. Add icon for Python aiohttp plugin. Add icon for Python pyramid plugin. Fix topology render all services nodes when groups changed. Fix rk-footer utc input\u0026rsquo;s width. Update rk-icon and rewrite rk-header svg tags with rk-icon. Add icon for http type. Fix rk-footer utc without local storage. Sort group names in the topology. Add logo for Dolphinscheduler. Fix dashboard wrong instance. Add a legend for the topology. Update the condition of unhealthy cube. Fix: use icons to replace buttons for task list in profile. Fix: support = in the tag value in the trace query page. Add envoy proxy component logo. Chore: set up license-eye to check license headers and add missing license headers. Fix prop for instances-survey and endpoints-survey. Fix envoy icon in topology. Implement the service logs on UI. Change the flask icon to light version for a better view of topology dark theme. Implement viewing logs on trace page. Fix update props of date component. Fix query conditions for logs. Fix style of selectors to word wrap. Fix logs time. Fix search ui for logs.  Documentation  Update the documents of backend fetcher and self observability about the latest configurations. Add documents about the group name of service. Update docs about the latest UI. Update the document of backend trace sampling with the latest configuration. Update kafka plugin support version to 2.6.1. Add FAQ about Fix compiling on Mac M1 chip.  All issues and pull requests are here\n","title":"8.4.0","url":"/docs/main/v9.2.0/en/changes/changes-8.4.0/"},{"content":"8.4.0 Project  Incompatible with previous releases when use H2/MySQL/TiDB storage options, due to support multiple alarm rules triggered for one entity. Chore: adapt create_source_release.sh to make it runnable on Linux. Add package to .proto files, prevent polluting top-level namespace in some languages; The OAP server supports previous agent releases, whereas the previous OAP server (\u0026lt;=8.3.0) won\u0026rsquo;t recognize newer agents since this version (\u0026gt;= 8.4.0). Add ElasticSearch 7.10 to test matrix and verify it works. Replace Apache RAT with skywalking-eyes to check license headers. Set up test of Envoy ALS / MetricsService under Istio 1.8.2 to verify Envoy V3 protocol Test: fix flaky E2E test of Kafka.  Java Agent  The operation name of quartz-scheduler plugin, has been changed as the quartz-scheduler/${className} format. Fix jdk-http and okhttp-3.x plugin did not overwrite the old trace header. Add interceptors of method(analyze, searchScroll, clearScroll, searchTemplate and deleteByQuery) for elasticsearch-6.x-plugin. Fix the unexpected RunningContext recreation in the Tomcat plugin. Fix the potential NPE when trace_sql_parameters is enabled. Update byte-buddy to 1.10.19. Fix thrift plugin trace link broken when intermediate service does not mount agent Fix thrift plugin collects wrong args when the method without parameter. Fix DataCarrier\u0026rsquo;s org.apache.skywalking.apm.commons.datacarrier.buffer.Buffer implementation isn\u0026rsquo;t activated in IF_POSSIBLE mode. Fix ArrayBlockingQueueBuffer\u0026rsquo;s useless IF_POSSIBLE mode list Support building gRPC TLS channel but CA file is not required. Add witness method mechanism in the agent plugin core. Add Dolphinscheduler plugin definition. Make sampling still works when the trace ignores plug-in activation. Fix mssql-plugin occur ClassCastException when call the method of return generate key. The operation name of dubbo and dubbo-2.7.x-plugin, has been changed as the groupValue/className.methodName format Fix bug that rocketmq-plugin set the wrong tag. Fix duplicated EnhancedInstance interface added. Fix thread leaks caused by the elasticsearch-6.x-plugin plugin. Support reading segmentId and spanId with toolkit. Fix RestTemplate plugin recording url tag with wrong port Support collecting logs and forwarding through gRPC. Support config agent.sample_n_per_3_secs can be changed in the runtime. Support config agent.ignore_suffix can be changed in the runtime. Support DNS periodic resolving mechanism to update backend service. Support config agent.trace.ignore_path can be changed in the runtime. Added support for transmitting logback 1.x and log4j 2.x formatted \u0026amp; un-formatted messages via gPRC  OAP-Backend  Make meter receiver support MAL. Support influxDB connection response format option. Fix some error when use JSON as influxDB response format. Support Kafka MirrorMaker 2.0 to replicate topics between Kafka clusters. Add the rule name field to alarm record storage entity as a part of ID, to support multiple alarm rules triggered for one entity. The scope id has been removed from the ID. Fix MAL concurrent execution issues. Fix group name can\u0026rsquo;t be queried in the GraphQL. Fix potential gRPC connection leak(not closed) for the channels among OAP instances. Filter OAP instances(unassigned in booting stage) of the empty IP in KubernetesCoordinator. Add component ID for Python aiohttp plugin requester and server. Fix H2 in-memory database table missing issues Add component ID for Python pyramid plugin server. Add component ID for NodeJS Axios plugin. Fix searchService method error in storage-influxdb-plugin. Add JavaScript component ID. Fix CVE of UninstrumentedGateways in Dynamic Configuration activation. Improve query performance in storage-influxdb-plugin. Fix the uuid field in GRPCConfigWatcherRegister is not updated. Support Envoy {AccessLog,Metrics}Service API V3. Adopt the MAL in Envoy metrics service analyzer. Fix the priority setting doesn\u0026rsquo;t work of the ALS analyzers. Fix bug that endpoint-name-grouping.yml is not customizable in Dockerized case. Fix bug that istio version metric type on UI template mismatches the otel rule. Improve ReadWriteSafeCache concurrency read-write performance Fix bug that if use JSON as InfluxDB.ResponseFormat then NumberFormatException maybe occur. Fix timeBucket not taking effect in EqualsAndHashCode annotation of some relationship metrics. Fix SharingServerConfig\u0026rsquo;s propertie is not correct in the application.yml, contextPath -\u0026gt; restConnextPath. Istio control plane: remove redundant metrics and polish panel layout. Fix bug endpoint name grouping not work due to setting service name and endpoint name out of order. Fix receiver analysis error count metrics. Log collecting and query implementation. Support Alarm to feishu. Add the implementation of ConfigurationDiscovery on the OAP side. Fix bug in parseInternalErrorCode where some error codes are never reached. OAL supports multiple values when as numeric. Add node information from the Openensus proto to the labels of the samples, to support the identification of the source of the Metric data. Fix bug that the same sample name in one MAL expression caused IllegalArgumentException in Analyzer.analyse. Add the text analyzer for querying log in the es storage. Chore: Remove duplicate codes in Envoy ALS handler. Remove the strict rule of OAL disable statement parameter. Fix a legal metric query adoption bug. Don\u0026rsquo;t support global level metric query. Add VM MAL and ui-template configration, support Prometheus node-exporter VM metrics that pushed from OpenTelemetry-collector. Remove unused log query parameters.  UI  Fix un-removed tags in trace query. Fix unexpected metrics name on single value component. Don\u0026rsquo;t allow negative value as the refresh period. Fix style issue in trace table view. Separation Log and Dashboard selector data to avoid conflicts. Fix trace instance selector bug. Fix Unnecessary sidebar in tooltips for charts. Refactor dashboard query in a common script. Implement refreshing data for topology by updating date. Implement group selector in the topology. Fix all as default parameter for services selector. Add icon for Python aiohttp plugin. Add icon for Python pyramid plugin. Fix topology render all services nodes when groups changed. Fix rk-footer utc input\u0026rsquo;s width. Update rk-icon and rewrite rk-header svg tags with rk-icon. Add icon for http type. Fix rk-footer utc without local storage. Sort group names in the topology. Add logo for Dolphinscheduler. Fix dashboard wrong instance. Add a legend for the topology. Update the condition of unhealthy cube. Fix: use icons to replace buttons for task list in profile. Fix: support = in the tag value in the trace query page. Add envoy proxy component logo. Chore: set up license-eye to check license headers and add missing license headers. Fix prop for instances-survey and endpoints-survey. Fix envoy icon in topology. Implement the service logs on UI. Change the flask icon to light version for a better view of topology dark theme. Implement viewing logs on trace page. Fix update props of date component. Fix query conditions for logs. Fix style of selectors to word wrap. Fix logs time. Fix search ui for logs.  Documentation  Update the documents of backend fetcher and self observability about the latest configurations. Add documents about the group name of service. Update docs about the latest UI. Update the document of backend trace sampling with the latest configuration. Update kafka plugin support version to 2.6.1. Add FAQ about Fix compiling on Mac M1 chip.  All issues and pull requests are here\n","title":"8.4.0","url":"/docs/main/v9.3.0/en/changes/changes-8.4.0/"},{"content":"8.4.0 Project  Incompatible with previous releases when use H2/MySQL/TiDB storage options, due to support multiple alarm rules triggered for one entity. Chore: adapt create_source_release.sh to make it runnable on Linux. Add package to .proto files, prevent polluting top-level namespace in some languages; The OAP server supports previous agent releases, whereas the previous OAP server (\u0026lt;=8.3.0) won\u0026rsquo;t recognize newer agents since this version (\u0026gt;= 8.4.0). Add ElasticSearch 7.10 to test matrix and verify it works. Replace Apache RAT with skywalking-eyes to check license headers. Set up test of Envoy ALS / MetricsService under Istio 1.8.2 to verify Envoy V3 protocol Test: fix flaky E2E test of Kafka.  Java Agent  The operation name of quartz-scheduler plugin, has been changed as the quartz-scheduler/${className} format. Fix jdk-http and okhttp-3.x plugin did not overwrite the old trace header. Add interceptors of method(analyze, searchScroll, clearScroll, searchTemplate and deleteByQuery) for elasticsearch-6.x-plugin. Fix the unexpected RunningContext recreation in the Tomcat plugin. Fix the potential NPE when trace_sql_parameters is enabled. Update byte-buddy to 1.10.19. Fix thrift plugin trace link broken when intermediate service does not mount agent Fix thrift plugin collects wrong args when the method without parameter. Fix DataCarrier\u0026rsquo;s org.apache.skywalking.apm.commons.datacarrier.buffer.Buffer implementation isn\u0026rsquo;t activated in IF_POSSIBLE mode. Fix ArrayBlockingQueueBuffer\u0026rsquo;s useless IF_POSSIBLE mode list Support building gRPC TLS channel but CA file is not required. Add witness method mechanism in the agent plugin core. Add Dolphinscheduler plugin definition. Make sampling still works when the trace ignores plug-in activation. Fix mssql-plugin occur ClassCastException when call the method of return generate key. The operation name of dubbo and dubbo-2.7.x-plugin, has been changed as the groupValue/className.methodName format Fix bug that rocketmq-plugin set the wrong tag. Fix duplicated EnhancedInstance interface added. Fix thread leaks caused by the elasticsearch-6.x-plugin plugin. Support reading segmentId and spanId with toolkit. Fix RestTemplate plugin recording url tag with wrong port Support collecting logs and forwarding through gRPC. Support config agent.sample_n_per_3_secs can be changed in the runtime. Support config agent.ignore_suffix can be changed in the runtime. Support DNS periodic resolving mechanism to update backend service. Support config agent.trace.ignore_path can be changed in the runtime. Added support for transmitting logback 1.x and log4j 2.x formatted \u0026amp; un-formatted messages via gPRC  OAP-Backend  Make meter receiver support MAL. Support influxDB connection response format option. Fix some error when use JSON as influxDB response format. Support Kafka MirrorMaker 2.0 to replicate topics between Kafka clusters. Add the rule name field to alarm record storage entity as a part of ID, to support multiple alarm rules triggered for one entity. The scope id has been removed from the ID. Fix MAL concurrent execution issues. Fix group name can\u0026rsquo;t be queried in the GraphQL. Fix potential gRPC connection leak(not closed) for the channels among OAP instances. Filter OAP instances(unassigned in booting stage) of the empty IP in KubernetesCoordinator. Add component ID for Python aiohttp plugin requester and server. Fix H2 in-memory database table missing issues Add component ID for Python pyramid plugin server. Add component ID for NodeJS Axios plugin. Fix searchService method error in storage-influxdb-plugin. Add JavaScript component ID. Fix CVE of UninstrumentedGateways in Dynamic Configuration activation. Improve query performance in storage-influxdb-plugin. Fix the uuid field in GRPCConfigWatcherRegister is not updated. Support Envoy {AccessLog,Metrics}Service API V3. Adopt the MAL in Envoy metrics service analyzer. Fix the priority setting doesn\u0026rsquo;t work of the ALS analyzers. Fix bug that endpoint-name-grouping.yml is not customizable in Dockerized case. Fix bug that istio version metric type on UI template mismatches the otel rule. Improve ReadWriteSafeCache concurrency read-write performance Fix bug that if use JSON as InfluxDB.ResponseFormat then NumberFormatException maybe occur. Fix timeBucket not taking effect in EqualsAndHashCode annotation of some relationship metrics. Fix SharingServerConfig\u0026rsquo;s propertie is not correct in the application.yml, contextPath -\u0026gt; restConnextPath. Istio control plane: remove redundant metrics and polish panel layout. Fix bug endpoint name grouping not work due to setting service name and endpoint name out of order. Fix receiver analysis error count metrics. Log collecting and query implementation. Support Alarm to feishu. Add the implementation of ConfigurationDiscovery on the OAP side. Fix bug in parseInternalErrorCode where some error codes are never reached. OAL supports multiple values when as numeric. Add node information from the Openensus proto to the labels of the samples, to support the identification of the source of the Metric data. Fix bug that the same sample name in one MAL expression caused IllegalArgumentException in Analyzer.analyse. Add the text analyzer for querying log in the es storage. Chore: Remove duplicate codes in Envoy ALS handler. Remove the strict rule of OAL disable statement parameter. Fix a legal metric query adoption bug. Don\u0026rsquo;t support global level metric query. Add VM MAL and ui-template configration, support Prometheus node-exporter VM metrics that pushed from OpenTelemetry-collector. Remove unused log query parameters.  UI  Fix un-removed tags in trace query. Fix unexpected metrics name on single value component. Don\u0026rsquo;t allow negative value as the refresh period. Fix style issue in trace table view. Separation Log and Dashboard selector data to avoid conflicts. Fix trace instance selector bug. Fix Unnecessary sidebar in tooltips for charts. Refactor dashboard query in a common script. Implement refreshing data for topology by updating date. Implement group selector in the topology. Fix all as default parameter for services selector. Add icon for Python aiohttp plugin. Add icon for Python pyramid plugin. Fix topology render all services nodes when groups changed. Fix rk-footer utc input\u0026rsquo;s width. Update rk-icon and rewrite rk-header svg tags with rk-icon. Add icon for http type. Fix rk-footer utc without local storage. Sort group names in the topology. Add logo for Dolphinscheduler. Fix dashboard wrong instance. Add a legend for the topology. Update the condition of unhealthy cube. Fix: use icons to replace buttons for task list in profile. Fix: support = in the tag value in the trace query page. Add envoy proxy component logo. Chore: set up license-eye to check license headers and add missing license headers. Fix prop for instances-survey and endpoints-survey. Fix envoy icon in topology. Implement the service logs on UI. Change the flask icon to light version for a better view of topology dark theme. Implement viewing logs on trace page. Fix update props of date component. Fix query conditions for logs. Fix style of selectors to word wrap. Fix logs time. Fix search ui for logs.  Documentation  Update the documents of backend fetcher and self observability about the latest configurations. Add documents about the group name of service. Update docs about the latest UI. Update the document of backend trace sampling with the latest configuration. Update kafka plugin support version to 2.6.1. Add FAQ about Fix compiling on Mac M1 chip.  All issues and pull requests are here\n","title":"8.4.0","url":"/docs/main/v9.4.0/en/changes/changes-8.4.0/"},{"content":"8.4.0 Project  Incompatible with previous releases when use H2/MySQL/TiDB storage options, due to support multiple alarm rules triggered for one entity. Chore: adapt create_source_release.sh to make it runnable on Linux. Add package to .proto files, prevent polluting top-level namespace in some languages; The OAP server supports previous agent releases, whereas the previous OAP server (\u0026lt;=8.3.0) won\u0026rsquo;t recognize newer agents since this version (\u0026gt;= 8.4.0). Add ElasticSearch 7.10 to test matrix and verify it works. Replace Apache RAT with skywalking-eyes to check license headers. Set up test of Envoy ALS / MetricsService under Istio 1.8.2 to verify Envoy V3 protocol Test: fix flaky E2E test of Kafka.  Java Agent  The operation name of quartz-scheduler plugin, has been changed as the quartz-scheduler/${className} format. Fix jdk-http and okhttp-3.x plugin did not overwrite the old trace header. Add interceptors of method(analyze, searchScroll, clearScroll, searchTemplate and deleteByQuery) for elasticsearch-6.x-plugin. Fix the unexpected RunningContext recreation in the Tomcat plugin. Fix the potential NPE when trace_sql_parameters is enabled. Update byte-buddy to 1.10.19. Fix thrift plugin trace link broken when intermediate service does not mount agent Fix thrift plugin collects wrong args when the method without parameter. Fix DataCarrier\u0026rsquo;s org.apache.skywalking.apm.commons.datacarrier.buffer.Buffer implementation isn\u0026rsquo;t activated in IF_POSSIBLE mode. Fix ArrayBlockingQueueBuffer\u0026rsquo;s useless IF_POSSIBLE mode list Support building gRPC TLS channel but CA file is not required. Add witness method mechanism in the agent plugin core. Add Dolphinscheduler plugin definition. Make sampling still works when the trace ignores plug-in activation. Fix mssql-plugin occur ClassCastException when call the method of return generate key. The operation name of dubbo and dubbo-2.7.x-plugin, has been changed as the groupValue/className.methodName format Fix bug that rocketmq-plugin set the wrong tag. Fix duplicated EnhancedInstance interface added. Fix thread leaks caused by the elasticsearch-6.x-plugin plugin. Support reading segmentId and spanId with toolkit. Fix RestTemplate plugin recording url tag with wrong port Support collecting logs and forwarding through gRPC. Support config agent.sample_n_per_3_secs can be changed in the runtime. Support config agent.ignore_suffix can be changed in the runtime. Support DNS periodic resolving mechanism to update backend service. Support config agent.trace.ignore_path can be changed in the runtime. Added support for transmitting logback 1.x and log4j 2.x formatted \u0026amp; un-formatted messages via gPRC  OAP-Backend  Make meter receiver support MAL. Support influxDB connection response format option. Fix some error when use JSON as influxDB response format. Support Kafka MirrorMaker 2.0 to replicate topics between Kafka clusters. Add the rule name field to alarm record storage entity as a part of ID, to support multiple alarm rules triggered for one entity. The scope id has been removed from the ID. Fix MAL concurrent execution issues. Fix group name can\u0026rsquo;t be queried in the GraphQL. Fix potential gRPC connection leak(not closed) for the channels among OAP instances. Filter OAP instances(unassigned in booting stage) of the empty IP in KubernetesCoordinator. Add component ID for Python aiohttp plugin requester and server. Fix H2 in-memory database table missing issues Add component ID for Python pyramid plugin server. Add component ID for NodeJS Axios plugin. Fix searchService method error in storage-influxdb-plugin. Add JavaScript component ID. Fix CVE of UninstrumentedGateways in Dynamic Configuration activation. Improve query performance in storage-influxdb-plugin. Fix the uuid field in GRPCConfigWatcherRegister is not updated. Support Envoy {AccessLog,Metrics}Service API V3. Adopt the MAL in Envoy metrics service analyzer. Fix the priority setting doesn\u0026rsquo;t work of the ALS analyzers. Fix bug that endpoint-name-grouping.yml is not customizable in Dockerized case. Fix bug that istio version metric type on UI template mismatches the otel rule. Improve ReadWriteSafeCache concurrency read-write performance Fix bug that if use JSON as InfluxDB.ResponseFormat then NumberFormatException maybe occur. Fix timeBucket not taking effect in EqualsAndHashCode annotation of some relationship metrics. Fix SharingServerConfig\u0026rsquo;s propertie is not correct in the application.yml, contextPath -\u0026gt; restConnextPath. Istio control plane: remove redundant metrics and polish panel layout. Fix bug endpoint name grouping not work due to setting service name and endpoint name out of order. Fix receiver analysis error count metrics. Log collecting and query implementation. Support Alarm to feishu. Add the implementation of ConfigurationDiscovery on the OAP side. Fix bug in parseInternalErrorCode where some error codes are never reached. OAL supports multiple values when as numeric. Add node information from the Openensus proto to the labels of the samples, to support the identification of the source of the Metric data. Fix bug that the same sample name in one MAL expression caused IllegalArgumentException in Analyzer.analyse. Add the text analyzer for querying log in the es storage. Chore: Remove duplicate codes in Envoy ALS handler. Remove the strict rule of OAL disable statement parameter. Fix a legal metric query adoption bug. Don\u0026rsquo;t support global level metric query. Add VM MAL and ui-template configration, support Prometheus node-exporter VM metrics that pushed from OpenTelemetry-collector. Remove unused log query parameters.  UI  Fix un-removed tags in trace query. Fix unexpected metrics name on single value component. Don\u0026rsquo;t allow negative value as the refresh period. Fix style issue in trace table view. Separation Log and Dashboard selector data to avoid conflicts. Fix trace instance selector bug. Fix Unnecessary sidebar in tooltips for charts. Refactor dashboard query in a common script. Implement refreshing data for topology by updating date. Implement group selector in the topology. Fix all as default parameter for services selector. Add icon for Python aiohttp plugin. Add icon for Python pyramid plugin. Fix topology render all services nodes when groups changed. Fix rk-footer utc input\u0026rsquo;s width. Update rk-icon and rewrite rk-header svg tags with rk-icon. Add icon for http type. Fix rk-footer utc without local storage. Sort group names in the topology. Add logo for Dolphinscheduler. Fix dashboard wrong instance. Add a legend for the topology. Update the condition of unhealthy cube. Fix: use icons to replace buttons for task list in profile. Fix: support = in the tag value in the trace query page. Add envoy proxy component logo. Chore: set up license-eye to check license headers and add missing license headers. Fix prop for instances-survey and endpoints-survey. Fix envoy icon in topology. Implement the service logs on UI. Change the flask icon to light version for a better view of topology dark theme. Implement viewing logs on trace page. Fix update props of date component. Fix query conditions for logs. Fix style of selectors to word wrap. Fix logs time. Fix search ui for logs.  Documentation  Update the documents of backend fetcher and self observability about the latest configurations. Add documents about the group name of service. Update docs about the latest UI. Update the document of backend trace sampling with the latest configuration. Update kafka plugin support version to 2.6.1. Add FAQ about Fix compiling on Mac M1 chip.  All issues and pull requests are here\n","title":"8.4.0","url":"/docs/main/v9.5.0/en/changes/changes-8.4.0/"},{"content":"8.4.0 Project  Incompatible with previous releases when use H2/MySQL/TiDB storage options, due to support multiple alarm rules triggered for one entity. Chore: adapt create_source_release.sh to make it runnable on Linux. Add package to .proto files, prevent polluting top-level namespace in some languages; The OAP server supports previous agent releases, whereas the previous OAP server (\u0026lt;=8.3.0) won\u0026rsquo;t recognize newer agents since this version (\u0026gt;= 8.4.0). Add ElasticSearch 7.10 to test matrix and verify it works. Replace Apache RAT with skywalking-eyes to check license headers. Set up test of Envoy ALS / MetricsService under Istio 1.8.2 to verify Envoy V3 protocol Test: fix flaky E2E test of Kafka.  Java Agent  The operation name of quartz-scheduler plugin, has been changed as the quartz-scheduler/${className} format. Fix jdk-http and okhttp-3.x plugin did not overwrite the old trace header. Add interceptors of method(analyze, searchScroll, clearScroll, searchTemplate and deleteByQuery) for elasticsearch-6.x-plugin. Fix the unexpected RunningContext recreation in the Tomcat plugin. Fix the potential NPE when trace_sql_parameters is enabled. Update byte-buddy to 1.10.19. Fix thrift plugin trace link broken when intermediate service does not mount agent Fix thrift plugin collects wrong args when the method without parameter. Fix DataCarrier\u0026rsquo;s org.apache.skywalking.apm.commons.datacarrier.buffer.Buffer implementation isn\u0026rsquo;t activated in IF_POSSIBLE mode. Fix ArrayBlockingQueueBuffer\u0026rsquo;s useless IF_POSSIBLE mode list Support building gRPC TLS channel but CA file is not required. Add witness method mechanism in the agent plugin core. Add Dolphinscheduler plugin definition. Make sampling still works when the trace ignores plug-in activation. Fix mssql-plugin occur ClassCastException when call the method of return generate key. The operation name of dubbo and dubbo-2.7.x-plugin, has been changed as the groupValue/className.methodName format Fix bug that rocketmq-plugin set the wrong tag. Fix duplicated EnhancedInstance interface added. Fix thread leaks caused by the elasticsearch-6.x-plugin plugin. Support reading segmentId and spanId with toolkit. Fix RestTemplate plugin recording url tag with wrong port Support collecting logs and forwarding through gRPC. Support config agent.sample_n_per_3_secs can be changed in the runtime. Support config agent.ignore_suffix can be changed in the runtime. Support DNS periodic resolving mechanism to update backend service. Support config agent.trace.ignore_path can be changed in the runtime. Added support for transmitting logback 1.x and log4j 2.x formatted \u0026amp; un-formatted messages via gPRC  OAP-Backend  Make meter receiver support MAL. Support influxDB connection response format option. Fix some error when use JSON as influxDB response format. Support Kafka MirrorMaker 2.0 to replicate topics between Kafka clusters. Add the rule name field to alarm record storage entity as a part of ID, to support multiple alarm rules triggered for one entity. The scope id has been removed from the ID. Fix MAL concurrent execution issues. Fix group name can\u0026rsquo;t be queried in the GraphQL. Fix potential gRPC connection leak(not closed) for the channels among OAP instances. Filter OAP instances(unassigned in booting stage) of the empty IP in KubernetesCoordinator. Add component ID for Python aiohttp plugin requester and server. Fix H2 in-memory database table missing issues Add component ID for Python pyramid plugin server. Add component ID for NodeJS Axios plugin. Fix searchService method error in storage-influxdb-plugin. Add JavaScript component ID. Fix CVE of UninstrumentedGateways in Dynamic Configuration activation. Improve query performance in storage-influxdb-plugin. Fix the uuid field in GRPCConfigWatcherRegister is not updated. Support Envoy {AccessLog,Metrics}Service API V3. Adopt the MAL in Envoy metrics service analyzer. Fix the priority setting doesn\u0026rsquo;t work of the ALS analyzers. Fix bug that endpoint-name-grouping.yml is not customizable in Dockerized case. Fix bug that istio version metric type on UI template mismatches the otel rule. Improve ReadWriteSafeCache concurrency read-write performance Fix bug that if use JSON as InfluxDB.ResponseFormat then NumberFormatException maybe occur. Fix timeBucket not taking effect in EqualsAndHashCode annotation of some relationship metrics. Fix SharingServerConfig\u0026rsquo;s propertie is not correct in the application.yml, contextPath -\u0026gt; restConnextPath. Istio control plane: remove redundant metrics and polish panel layout. Fix bug endpoint name grouping not work due to setting service name and endpoint name out of order. Fix receiver analysis error count metrics. Log collecting and query implementation. Support Alarm to feishu. Add the implementation of ConfigurationDiscovery on the OAP side. Fix bug in parseInternalErrorCode where some error codes are never reached. OAL supports multiple values when as numeric. Add node information from the Openensus proto to the labels of the samples, to support the identification of the source of the Metric data. Fix bug that the same sample name in one MAL expression caused IllegalArgumentException in Analyzer.analyse. Add the text analyzer for querying log in the es storage. Chore: Remove duplicate codes in Envoy ALS handler. Remove the strict rule of OAL disable statement parameter. Fix a legal metric query adoption bug. Don\u0026rsquo;t support global level metric query. Add VM MAL and ui-template configration, support Prometheus node-exporter VM metrics that pushed from OpenTelemetry-collector. Remove unused log query parameters.  UI  Fix un-removed tags in trace query. Fix unexpected metrics name on single value component. Don\u0026rsquo;t allow negative value as the refresh period. Fix style issue in trace table view. Separation Log and Dashboard selector data to avoid conflicts. Fix trace instance selector bug. Fix Unnecessary sidebar in tooltips for charts. Refactor dashboard query in a common script. Implement refreshing data for topology by updating date. Implement group selector in the topology. Fix all as default parameter for services selector. Add icon for Python aiohttp plugin. Add icon for Python pyramid plugin. Fix topology render all services nodes when groups changed. Fix rk-footer utc input\u0026rsquo;s width. Update rk-icon and rewrite rk-header svg tags with rk-icon. Add icon for http type. Fix rk-footer utc without local storage. Sort group names in the topology. Add logo for Dolphinscheduler. Fix dashboard wrong instance. Add a legend for the topology. Update the condition of unhealthy cube. Fix: use icons to replace buttons for task list in profile. Fix: support = in the tag value in the trace query page. Add envoy proxy component logo. Chore: set up license-eye to check license headers and add missing license headers. Fix prop for instances-survey and endpoints-survey. Fix envoy icon in topology. Implement the service logs on UI. Change the flask icon to light version for a better view of topology dark theme. Implement viewing logs on trace page. Fix update props of date component. Fix query conditions for logs. Fix style of selectors to word wrap. Fix logs time. Fix search ui for logs.  Documentation  Update the documents of backend fetcher and self observability about the latest configurations. Add documents about the group name of service. Update docs about the latest UI. Update the document of backend trace sampling with the latest configuration. Update kafka plugin support version to 2.6.1. Add FAQ about Fix compiling on Mac M1 chip.  All issues and pull requests are here\n","title":"8.4.0","url":"/docs/main/v9.6.0/en/changes/changes-8.4.0/"},{"content":"8.4.0 Project  Incompatible with previous releases when use H2/MySQL/TiDB storage options, due to support multiple alarm rules triggered for one entity. Chore: adapt create_source_release.sh to make it runnable on Linux. Add package to .proto files, prevent polluting top-level namespace in some languages; The OAP server supports previous agent releases, whereas the previous OAP server (\u0026lt;=8.3.0) won\u0026rsquo;t recognize newer agents since this version (\u0026gt;= 8.4.0). Add ElasticSearch 7.10 to test matrix and verify it works. Replace Apache RAT with skywalking-eyes to check license headers. Set up test of Envoy ALS / MetricsService under Istio 1.8.2 to verify Envoy V3 protocol Test: fix flaky E2E test of Kafka.  Java Agent  The operation name of quartz-scheduler plugin, has been changed as the quartz-scheduler/${className} format. Fix jdk-http and okhttp-3.x plugin did not overwrite the old trace header. Add interceptors of method(analyze, searchScroll, clearScroll, searchTemplate and deleteByQuery) for elasticsearch-6.x-plugin. Fix the unexpected RunningContext recreation in the Tomcat plugin. Fix the potential NPE when trace_sql_parameters is enabled. Update byte-buddy to 1.10.19. Fix thrift plugin trace link broken when intermediate service does not mount agent Fix thrift plugin collects wrong args when the method without parameter. Fix DataCarrier\u0026rsquo;s org.apache.skywalking.apm.commons.datacarrier.buffer.Buffer implementation isn\u0026rsquo;t activated in IF_POSSIBLE mode. Fix ArrayBlockingQueueBuffer\u0026rsquo;s useless IF_POSSIBLE mode list Support building gRPC TLS channel but CA file is not required. Add witness method mechanism in the agent plugin core. Add Dolphinscheduler plugin definition. Make sampling still works when the trace ignores plug-in activation. Fix mssql-plugin occur ClassCastException when call the method of return generate key. The operation name of dubbo and dubbo-2.7.x-plugin, has been changed as the groupValue/className.methodName format Fix bug that rocketmq-plugin set the wrong tag. Fix duplicated EnhancedInstance interface added. Fix thread leaks caused by the elasticsearch-6.x-plugin plugin. Support reading segmentId and spanId with toolkit. Fix RestTemplate plugin recording url tag with wrong port Support collecting logs and forwarding through gRPC. Support config agent.sample_n_per_3_secs can be changed in the runtime. Support config agent.ignore_suffix can be changed in the runtime. Support DNS periodic resolving mechanism to update backend service. Support config agent.trace.ignore_path can be changed in the runtime. Added support for transmitting logback 1.x and log4j 2.x formatted \u0026amp; un-formatted messages via gPRC  OAP-Backend  Make meter receiver support MAL. Support influxDB connection response format option. Fix some error when use JSON as influxDB response format. Support Kafka MirrorMaker 2.0 to replicate topics between Kafka clusters. Add the rule name field to alarm record storage entity as a part of ID, to support multiple alarm rules triggered for one entity. The scope id has been removed from the ID. Fix MAL concurrent execution issues. Fix group name can\u0026rsquo;t be queried in the GraphQL. Fix potential gRPC connection leak(not closed) for the channels among OAP instances. Filter OAP instances(unassigned in booting stage) of the empty IP in KubernetesCoordinator. Add component ID for Python aiohttp plugin requester and server. Fix H2 in-memory database table missing issues Add component ID for Python pyramid plugin server. Add component ID for NodeJS Axios plugin. Fix searchService method error in storage-influxdb-plugin. Add JavaScript component ID. Fix CVE of UninstrumentedGateways in Dynamic Configuration activation. Improve query performance in storage-influxdb-plugin. Fix the uuid field in GRPCConfigWatcherRegister is not updated. Support Envoy {AccessLog,Metrics}Service API V3. Adopt the MAL in Envoy metrics service analyzer. Fix the priority setting doesn\u0026rsquo;t work of the ALS analyzers. Fix bug that endpoint-name-grouping.yml is not customizable in Dockerized case. Fix bug that istio version metric type on UI template mismatches the otel rule. Improve ReadWriteSafeCache concurrency read-write performance Fix bug that if use JSON as InfluxDB.ResponseFormat then NumberFormatException maybe occur. Fix timeBucket not taking effect in EqualsAndHashCode annotation of some relationship metrics. Fix SharingServerConfig\u0026rsquo;s propertie is not correct in the application.yml, contextPath -\u0026gt; restConnextPath. Istio control plane: remove redundant metrics and polish panel layout. Fix bug endpoint name grouping not work due to setting service name and endpoint name out of order. Fix receiver analysis error count metrics. Log collecting and query implementation. Support Alarm to feishu. Add the implementation of ConfigurationDiscovery on the OAP side. Fix bug in parseInternalErrorCode where some error codes are never reached. OAL supports multiple values when as numeric. Add node information from the Openensus proto to the labels of the samples, to support the identification of the source of the Metric data. Fix bug that the same sample name in one MAL expression caused IllegalArgumentException in Analyzer.analyse. Add the text analyzer for querying log in the es storage. Chore: Remove duplicate codes in Envoy ALS handler. Remove the strict rule of OAL disable statement parameter. Fix a legal metric query adoption bug. Don\u0026rsquo;t support global level metric query. Add VM MAL and ui-template configration, support Prometheus node-exporter VM metrics that pushed from OpenTelemetry-collector. Remove unused log query parameters.  UI  Fix un-removed tags in trace query. Fix unexpected metrics name on single value component. Don\u0026rsquo;t allow negative value as the refresh period. Fix style issue in trace table view. Separation Log and Dashboard selector data to avoid conflicts. Fix trace instance selector bug. Fix Unnecessary sidebar in tooltips for charts. Refactor dashboard query in a common script. Implement refreshing data for topology by updating date. Implement group selector in the topology. Fix all as default parameter for services selector. Add icon for Python aiohttp plugin. Add icon for Python pyramid plugin. Fix topology render all services nodes when groups changed. Fix rk-footer utc input\u0026rsquo;s width. Update rk-icon and rewrite rk-header svg tags with rk-icon. Add icon for http type. Fix rk-footer utc without local storage. Sort group names in the topology. Add logo for Dolphinscheduler. Fix dashboard wrong instance. Add a legend for the topology. Update the condition of unhealthy cube. Fix: use icons to replace buttons for task list in profile. Fix: support = in the tag value in the trace query page. Add envoy proxy component logo. Chore: set up license-eye to check license headers and add missing license headers. Fix prop for instances-survey and endpoints-survey. Fix envoy icon in topology. Implement the service logs on UI. Change the flask icon to light version for a better view of topology dark theme. Implement viewing logs on trace page. Fix update props of date component. Fix query conditions for logs. Fix style of selectors to word wrap. Fix logs time. Fix search ui for logs.  Documentation  Update the documents of backend fetcher and self observability about the latest configurations. Add documents about the group name of service. Update docs about the latest UI. Update the document of backend trace sampling with the latest configuration. Update kafka plugin support version to 2.6.1. Add FAQ about Fix compiling on Mac M1 chip.  All issues and pull requests are here\n","title":"8.4.0","url":"/docs/main/v9.7.0/en/changes/changes-8.4.0/"},{"content":"8.5.0 Project  Incompatible Change. Indices and templates of ElasticSearch(6/7, including zipkin-elasticsearch7) storage option have been changed. Update frontend-maven-plugin to 1.11.0, for Download node x64 binary on Apple Silicon. Add E2E test for VM monitoring that metrics from Prometheus node-exporter. Upgrade lombok to 1.18.16. Add Java agent Dockerfile to build Docker image for Java agent.  Java Agent  Remove invalid mysql configuration in agent.config. Add net.bytebuddy.agent.builder.AgentBuilder.RedefinitionStrategy.Listener to show detail message when redefine errors occur. Fix ClassCastException of log4j gRPC reporter. Fix NPE when Kafka reporter activated. Enhance gRPC log appender to allow layout pattern. Fix apm-dubbo-2.7.x-plugin memory leak due to some Dubbo RpcExceptions. Fix lettuce-5.x-plugin get null host in redis sentinel mode. Fix ClassCastException by making CallbackAdapterInterceptor to implement EnhancedInstance interface in the spring-kafka plugin. Fix NullPointerException with KafkaProducer.send(record). Support config agent.span_limit_per_segment can be changed in the runtime. Collect and report agent starting / shutdown events. Support jedis pipeline in jedis-2.x-plugin. Fix apm-toolkit-log4j-2.x-activation no trace Id in async log. Replace hbase-1.x-plugin with hbase-1.x-2.x-plugin to adapt hbase client 2.x Remove the close_before_method and close_after_method parameters of custom-enhance-plugin to avoid memory leaks. Fix bug that springmvc-annotation-4.x-plugin, witness class does not exist in some versions. Add Redis command parameters to \u0026lsquo;db.statement\u0026rsquo; field on Lettuce span UI for displaying more info. Fix NullPointerException with ReactiveRequestHolder.getHeaders. Fix springmvc reactive api can\u0026rsquo;t collect HTTP statusCode. Fix bug that asynchttpclient plugin does not record the response status code. Fix spanLayer is null in optional plugin(gateway-2.0.x-plugin gateway-2.1.x-plugin). Support @Trace, @Tag and @Tags work for static methods.  OAP-Backend  Allow user-defined JAVA_OPTS in the startup script. Metrics combination API supports abandoning results. Add a new concept \u0026ldquo;Event\u0026rdquo; and its implementations to collect events. Add some defensive codes for NPE and bump up Kubernetes client version to expose exception stack trace. Update the timestamp field type for LogQuery. Support Zabbix protocol to receive agent metrics. Update the Apdex metric combine calculator. Enhance MeterSystem to allow creating metrics with same metricName / function / scope. Storage plugin supports postgresql. Fix kubernetes.client.openapi.ApiException. Remove filename suffix in the meter active file config. Introduce log analysis language (LAL). Fix alarm httpclient connection leak. Add sum function in meter system. Remove Jaeger receiver. Remove the experimental Zipkin span analyzer. Upgrade the Zipkin Elasticsearch storage from 6 to 7. Require Zipkin receiver must work with zipkin-elasticsearch7 storage option. Fix DatabaseSlowStatementBuilder statement maybe null. Remove fields of parent entity in the relation sources. Save Envoy http access logs when error occurs. Fix wrong service_instance_sla setting in the topology-instance.yml. Fix wrong metrics name setting in the self-observability.yml. Add telemetry data about metrics in, metrics scraping, mesh error and trace in metrics to zipkin receiver. Fix tags store of log and trace on h2/mysql/pg storage. Merge indices by Metrics Function and Meter Function in Elasticsearch Storage. Fix receiver don\u0026rsquo;t need to get itself when healthCheck Remove group concept from AvgHistogramFunction. Heatmap(function result) doesn\u0026rsquo;t support labels. Support metrics grouped by scope labelValue in MAL, no need global same labelValue as before. Add functions in MAL to filter metrics according to the metric value. Optimize the self monitoring grafana dashboard. Enhance the export service. Add function retagByK8sMeta and opt type K8sRetagType.Pod2Service in MAL for k8s to relate pods and services. Using \u0026ldquo;service.istio.io/canonical-name\u0026rdquo; to replace \u0026ldquo;app\u0026rdquo; label to resolve Envoy ALS service name. Support k8s monitoring. Make the flushing metrics operation concurrent. Fix ALS K8SServiceRegistry didn\u0026rsquo;t remove the correct entry. Using \u0026ldquo;service.istio.io/canonical-name\u0026rdquo; to replace \u0026ldquo;app\u0026rdquo; label to resolve Envoy ALS service name. Append the root slash(/) to getIndex and getTemplate requests in ES(6 and 7) client. Fix disable statement not working. This bug exists since 8.0.0. Remove the useless metric in vm.yaml.  UI  Update selector scroller to show in all pages. Implement searching logs with date. Add nodejs 14 compiling. Fix trace id by clear search conditions. Search endpoints with keywords. Fix pageSize on logs page. Update echarts version to 5.0.2. Fix instance dependency on the topology page. Fix resolved url for vue-property-decorator. Show instance attributes. Copywriting grammar fix. Fix log pages tags column not updated. Fix the problem that the footer and topology group is shaded when the topology radiation is displayed. When the topology radiation chart is displayed, the corresponding button should be highlighted. Refactor the route mapping, Dynamically import routing components, Improve first page loading performance. Support topology of two mutually calling services. Implement a type of table chart in the dashboard. Support event in the dashboard. Show instance name in the trace view. Fix groups of services in the topography.  Documentation  Polish documentation due to we have covered all tracing, logging, and metrics fields. Adjust documentation about Zipkin receiver. Add backend-infrastructure-monitoring doc.  All issues and pull requests are here\n","title":"8.5.0","url":"/docs/main/latest/en/changes/changes-8.5.0/"},{"content":"8.5.0 Project  Incompatible Change. Indices and templates of ElasticSearch(6/7, including zipkin-elasticsearch7) storage option have been changed. Update frontend-maven-plugin to 1.11.0, for Download node x64 binary on Apple Silicon. Add E2E test for VM monitoring that metrics from Prometheus node-exporter. Upgrade lombok to 1.18.16. Add Java agent Dockerfile to build Docker image for Java agent.  Java Agent  Remove invalid mysql configuration in agent.config. Add net.bytebuddy.agent.builder.AgentBuilder.RedefinitionStrategy.Listener to show detail message when redefine errors occur. Fix ClassCastException of log4j gRPC reporter. Fix NPE when Kafka reporter activated. Enhance gRPC log appender to allow layout pattern. Fix apm-dubbo-2.7.x-plugin memory leak due to some Dubbo RpcExceptions. Fix lettuce-5.x-plugin get null host in redis sentinel mode. Fix ClassCastException by making CallbackAdapterInterceptor to implement EnhancedInstance interface in the spring-kafka plugin. Fix NullPointerException with KafkaProducer.send(record). Support config agent.span_limit_per_segment can be changed in the runtime. Collect and report agent starting / shutdown events. Support jedis pipeline in jedis-2.x-plugin. Fix apm-toolkit-log4j-2.x-activation no trace Id in async log. Replace hbase-1.x-plugin with hbase-1.x-2.x-plugin to adapt hbase client 2.x Remove the close_before_method and close_after_method parameters of custom-enhance-plugin to avoid memory leaks. Fix bug that springmvc-annotation-4.x-plugin, witness class does not exist in some versions. Add Redis command parameters to \u0026lsquo;db.statement\u0026rsquo; field on Lettuce span UI for displaying more info. Fix NullPointerException with ReactiveRequestHolder.getHeaders. Fix springmvc reactive api can\u0026rsquo;t collect HTTP statusCode. Fix bug that asynchttpclient plugin does not record the response status code. Fix spanLayer is null in optional plugin(gateway-2.0.x-plugin gateway-2.1.x-plugin). Support @Trace, @Tag and @Tags work for static methods.  OAP-Backend  Allow user-defined JAVA_OPTS in the startup script. Metrics combination API supports abandoning results. Add a new concept \u0026ldquo;Event\u0026rdquo; and its implementations to collect events. Add some defensive codes for NPE and bump up Kubernetes client version to expose exception stack trace. Update the timestamp field type for LogQuery. Support Zabbix protocol to receive agent metrics. Update the Apdex metric combine calculator. Enhance MeterSystem to allow creating metrics with same metricName / function / scope. Storage plugin supports postgresql. Fix kubernetes.client.openapi.ApiException. Remove filename suffix in the meter active file config. Introduce log analysis language (LAL). Fix alarm httpclient connection leak. Add sum function in meter system. Remove Jaeger receiver. Remove the experimental Zipkin span analyzer. Upgrade the Zipkin Elasticsearch storage from 6 to 7. Require Zipkin receiver must work with zipkin-elasticsearch7 storage option. Fix DatabaseSlowStatementBuilder statement maybe null. Remove fields of parent entity in the relation sources. Save Envoy http access logs when error occurs. Fix wrong service_instance_sla setting in the topology-instance.yml. Fix wrong metrics name setting in the self-observability.yml. Add telemetry data about metrics in, metrics scraping, mesh error and trace in metrics to zipkin receiver. Fix tags store of log and trace on h2/mysql/pg storage. Merge indices by Metrics Function and Meter Function in Elasticsearch Storage. Fix receiver don\u0026rsquo;t need to get itself when healthCheck Remove group concept from AvgHistogramFunction. Heatmap(function result) doesn\u0026rsquo;t support labels. Support metrics grouped by scope labelValue in MAL, no need global same labelValue as before. Add functions in MAL to filter metrics according to the metric value. Optimize the self monitoring grafana dashboard. Enhance the export service. Add function retagByK8sMeta and opt type K8sRetagType.Pod2Service in MAL for k8s to relate pods and services. Using \u0026ldquo;service.istio.io/canonical-name\u0026rdquo; to replace \u0026ldquo;app\u0026rdquo; label to resolve Envoy ALS service name. Support k8s monitoring. Make the flushing metrics operation concurrent. Fix ALS K8SServiceRegistry didn\u0026rsquo;t remove the correct entry. Using \u0026ldquo;service.istio.io/canonical-name\u0026rdquo; to replace \u0026ldquo;app\u0026rdquo; label to resolve Envoy ALS service name. Append the root slash(/) to getIndex and getTemplate requests in ES(6 and 7) client. Fix disable statement not working. This bug exists since 8.0.0. Remove the useless metric in vm.yaml.  UI  Update selector scroller to show in all pages. Implement searching logs with date. Add nodejs 14 compiling. Fix trace id by clear search conditions. Search endpoints with keywords. Fix pageSize on logs page. Update echarts version to 5.0.2. Fix instance dependency on the topology page. Fix resolved url for vue-property-decorator. Show instance attributes. Copywriting grammar fix. Fix log pages tags column not updated. Fix the problem that the footer and topology group is shaded when the topology radiation is displayed. When the topology radiation chart is displayed, the corresponding button should be highlighted. Refactor the route mapping, Dynamically import routing components, Improve first page loading performance. Support topology of two mutually calling services. Implement a type of table chart in the dashboard. Support event in the dashboard. Show instance name in the trace view. Fix groups of services in the topography.  Documentation  Polish documentation due to we have covered all tracing, logging, and metrics fields. Adjust documentation about Zipkin receiver. Add backend-infrastructure-monitoring doc.  All issues and pull requests are here\n","title":"8.5.0","url":"/docs/main/next/en/changes/changes-8.5.0/"},{"content":"8.5.0 Project  Incompatible Change. Indices and templates of ElasticSearch(6/7, including zipkin-elasticsearch7) storage option have been changed. Update frontend-maven-plugin to 1.11.0, for Download node x64 binary on Apple Silicon. Add E2E test for VM monitoring that metrics from Prometheus node-exporter. Upgrade lombok to 1.18.16. Add Java agent Dockerfile to build Docker image for Java agent.  Java Agent  Remove invalid mysql configuration in agent.config. Add net.bytebuddy.agent.builder.AgentBuilder.RedefinitionStrategy.Listener to show detail message when redefine errors occur. Fix ClassCastException of log4j gRPC reporter. Fix NPE when Kafka reporter activated. Enhance gRPC log appender to allow layout pattern. Fix apm-dubbo-2.7.x-plugin memory leak due to some Dubbo RpcExceptions. Fix lettuce-5.x-plugin get null host in redis sentinel mode. Fix ClassCastException by making CallbackAdapterInterceptor to implement EnhancedInstance interface in the spring-kafka plugin. Fix NullPointerException with KafkaProducer.send(record). Support config agent.span_limit_per_segment can be changed in the runtime. Collect and report agent starting / shutdown events. Support jedis pipeline in jedis-2.x-plugin. Fix apm-toolkit-log4j-2.x-activation no trace Id in async log. Replace hbase-1.x-plugin with hbase-1.x-2.x-plugin to adapt hbase client 2.x Remove the close_before_method and close_after_method parameters of custom-enhance-plugin to avoid memory leaks. Fix bug that springmvc-annotation-4.x-plugin, witness class does not exist in some versions. Add Redis command parameters to \u0026lsquo;db.statement\u0026rsquo; field on Lettuce span UI for displaying more info. Fix NullPointerException with ReactiveRequestHolder.getHeaders. Fix springmvc reactive api can\u0026rsquo;t collect HTTP statusCode. Fix bug that asynchttpclient plugin does not record the response status code. Fix spanLayer is null in optional plugin(gateway-2.0.x-plugin gateway-2.1.x-plugin). Support @Trace, @Tag and @Tags work for static methods.  OAP-Backend  Allow user-defined JAVA_OPTS in the startup script. Metrics combination API supports abandoning results. Add a new concept \u0026ldquo;Event\u0026rdquo; and its implementations to collect events. Add some defensive codes for NPE and bump up Kubernetes client version to expose exception stack trace. Update the timestamp field type for LogQuery. Support Zabbix protocol to receive agent metrics. Update the Apdex metric combine calculator. Enhance MeterSystem to allow creating metrics with same metricName / function / scope. Storage plugin supports postgresql. Fix kubernetes.client.openapi.ApiException. Remove filename suffix in the meter active file config. Introduce log analysis language (LAL). Fix alarm httpclient connection leak. Add sum function in meter system. Remove Jaeger receiver. Remove the experimental Zipkin span analyzer. Upgrade the Zipkin Elasticsearch storage from 6 to 7. Require Zipkin receiver must work with zipkin-elasticsearch7 storage option. Fix DatabaseSlowStatementBuilder statement maybe null. Remove fields of parent entity in the relation sources. Save Envoy http access logs when error occurs. Fix wrong service_instance_sla setting in the topology-instance.yml. Fix wrong metrics name setting in the self-observability.yml. Add telemetry data about metrics in, metrics scraping, mesh error and trace in metrics to zipkin receiver. Fix tags store of log and trace on h2/mysql/pg storage. Merge indices by Metrics Function and Meter Function in Elasticsearch Storage. Fix receiver don\u0026rsquo;t need to get itself when healthCheck Remove group concept from AvgHistogramFunction. Heatmap(function result) doesn\u0026rsquo;t support labels. Support metrics grouped by scope labelValue in MAL, no need global same labelValue as before. Add functions in MAL to filter metrics according to the metric value. Optimize the self monitoring grafana dashboard. Enhance the export service. Add function retagByK8sMeta and opt type K8sRetagType.Pod2Service in MAL for k8s to relate pods and services. Using \u0026ldquo;service.istio.io/canonical-name\u0026rdquo; to replace \u0026ldquo;app\u0026rdquo; label to resolve Envoy ALS service name. Support k8s monitoring. Make the flushing metrics operation concurrent. Fix ALS K8SServiceRegistry didn\u0026rsquo;t remove the correct entry. Using \u0026ldquo;service.istio.io/canonical-name\u0026rdquo; to replace \u0026ldquo;app\u0026rdquo; label to resolve Envoy ALS service name. Append the root slash(/) to getIndex and getTemplate requests in ES(6 and 7) client. Fix disable statement not working. This bug exists since 8.0.0. Remove the useless metric in vm.yaml.  UI  Update selector scroller to show in all pages. Implement searching logs with date. Add nodejs 14 compiling. Fix trace id by clear search conditions. Search endpoints with keywords. Fix pageSize on logs page. Update echarts version to 5.0.2. Fix instance dependency on the topology page. Fix resolved url for vue-property-decorator. Show instance attributes. Copywriting grammar fix. Fix log pages tags column not updated. Fix the problem that the footer and topology group is shaded when the topology radiation is displayed. When the topology radiation chart is displayed, the corresponding button should be highlighted. Refactor the route mapping, Dynamically import routing components, Improve first page loading performance. Support topology of two mutually calling services. Implement a type of table chart in the dashboard. Support event in the dashboard. Show instance name in the trace view. Fix groups of services in the topography.  Documentation  Polish documentation due to we have covered all tracing, logging, and metrics fields. Adjust documentation about Zipkin receiver. Add backend-infrastructure-monitoring doc.  All issues and pull requests are here\n","title":"8.5.0","url":"/docs/main/v9.1.0/en/changes/changes-8.5.0/"},{"content":"8.5.0 Project  Incompatible Change. Indices and templates of ElasticSearch(6/7, including zipkin-elasticsearch7) storage option have been changed. Update frontend-maven-plugin to 1.11.0, for Download node x64 binary on Apple Silicon. Add E2E test for VM monitoring that metrics from Prometheus node-exporter. Upgrade lombok to 1.18.16. Add Java agent Dockerfile to build Docker image for Java agent.  Java Agent  Remove invalid mysql configuration in agent.config. Add net.bytebuddy.agent.builder.AgentBuilder.RedefinitionStrategy.Listener to show detail message when redefine errors occur. Fix ClassCastException of log4j gRPC reporter. Fix NPE when Kafka reporter activated. Enhance gRPC log appender to allow layout pattern. Fix apm-dubbo-2.7.x-plugin memory leak due to some Dubbo RpcExceptions. Fix lettuce-5.x-plugin get null host in redis sentinel mode. Fix ClassCastException by making CallbackAdapterInterceptor to implement EnhancedInstance interface in the spring-kafka plugin. Fix NullPointerException with KafkaProducer.send(record). Support config agent.span_limit_per_segment can be changed in the runtime. Collect and report agent starting / shutdown events. Support jedis pipeline in jedis-2.x-plugin. Fix apm-toolkit-log4j-2.x-activation no trace Id in async log. Replace hbase-1.x-plugin with hbase-1.x-2.x-plugin to adapt hbase client 2.x Remove the close_before_method and close_after_method parameters of custom-enhance-plugin to avoid memory leaks. Fix bug that springmvc-annotation-4.x-plugin, witness class does not exist in some versions. Add Redis command parameters to \u0026lsquo;db.statement\u0026rsquo; field on Lettuce span UI for displaying more info. Fix NullPointerException with ReactiveRequestHolder.getHeaders. Fix springmvc reactive api can\u0026rsquo;t collect HTTP statusCode. Fix bug that asynchttpclient plugin does not record the response status code. Fix spanLayer is null in optional plugin(gateway-2.0.x-plugin gateway-2.1.x-plugin). Support @Trace, @Tag and @Tags work for static methods.  OAP-Backend  Allow user-defined JAVA_OPTS in the startup script. Metrics combination API supports abandoning results. Add a new concept \u0026ldquo;Event\u0026rdquo; and its implementations to collect events. Add some defensive codes for NPE and bump up Kubernetes client version to expose exception stack trace. Update the timestamp field type for LogQuery. Support Zabbix protocol to receive agent metrics. Update the Apdex metric combine calculator. Enhance MeterSystem to allow creating metrics with same metricName / function / scope. Storage plugin supports postgresql. Fix kubernetes.client.openapi.ApiException. Remove filename suffix in the meter active file config. Introduce log analysis language (LAL). Fix alarm httpclient connection leak. Add sum function in meter system. Remove Jaeger receiver. Remove the experimental Zipkin span analyzer. Upgrade the Zipkin Elasticsearch storage from 6 to 7. Require Zipkin receiver must work with zipkin-elasticsearch7 storage option. Fix DatabaseSlowStatementBuilder statement maybe null. Remove fields of parent entity in the relation sources. Save Envoy http access logs when error occurs. Fix wrong service_instance_sla setting in the topology-instance.yml. Fix wrong metrics name setting in the self-observability.yml. Add telemetry data about metrics in, metrics scraping, mesh error and trace in metrics to zipkin receiver. Fix tags store of log and trace on h2/mysql/pg storage. Merge indices by Metrics Function and Meter Function in Elasticsearch Storage. Fix receiver don\u0026rsquo;t need to get itself when healthCheck Remove group concept from AvgHistogramFunction. Heatmap(function result) doesn\u0026rsquo;t support labels. Support metrics grouped by scope labelValue in MAL, no need global same labelValue as before. Add functions in MAL to filter metrics according to the metric value. Optimize the self monitoring grafana dashboard. Enhance the export service. Add function retagByK8sMeta and opt type K8sRetagType.Pod2Service in MAL for k8s to relate pods and services. Using \u0026ldquo;service.istio.io/canonical-name\u0026rdquo; to replace \u0026ldquo;app\u0026rdquo; label to resolve Envoy ALS service name. Support k8s monitoring. Make the flushing metrics operation concurrent. Fix ALS K8SServiceRegistry didn\u0026rsquo;t remove the correct entry. Using \u0026ldquo;service.istio.io/canonical-name\u0026rdquo; to replace \u0026ldquo;app\u0026rdquo; label to resolve Envoy ALS service name. Append the root slash(/) to getIndex and getTemplate requests in ES(6 and 7) client. Fix disable statement not working. This bug exists since 8.0.0. Remove the useless metric in vm.yaml.  UI  Update selector scroller to show in all pages. Implement searching logs with date. Add nodejs 14 compiling. Fix trace id by clear search conditions. Search endpoints with keywords. Fix pageSize on logs page. Update echarts version to 5.0.2. Fix instance dependency on the topology page. Fix resolved url for vue-property-decorator. Show instance attributes. Copywriting grammar fix. Fix log pages tags column not updated. Fix the problem that the footer and topology group is shaded when the topology radiation is displayed. When the topology radiation chart is displayed, the corresponding button should be highlighted. Refactor the route mapping, Dynamically import routing components, Improve first page loading performance. Support topology of two mutually calling services. Implement a type of table chart in the dashboard. Support event in the dashboard. Show instance name in the trace view. Fix groups of services in the topography.  Documentation  Polish documentation due to we have covered all tracing, logging, and metrics fields. Adjust documentation about Zipkin receiver. Add backend-infrastructure-monitoring doc.  All issues and pull requests are here\n","title":"8.5.0","url":"/docs/main/v9.2.0/en/changes/changes-8.5.0/"},{"content":"8.5.0 Project  Incompatible Change. Indices and templates of ElasticSearch(6/7, including zipkin-elasticsearch7) storage option have been changed. Update frontend-maven-plugin to 1.11.0, for Download node x64 binary on Apple Silicon. Add E2E test for VM monitoring that metrics from Prometheus node-exporter. Upgrade lombok to 1.18.16. Add Java agent Dockerfile to build Docker image for Java agent.  Java Agent  Remove invalid mysql configuration in agent.config. Add net.bytebuddy.agent.builder.AgentBuilder.RedefinitionStrategy.Listener to show detail message when redefine errors occur. Fix ClassCastException of log4j gRPC reporter. Fix NPE when Kafka reporter activated. Enhance gRPC log appender to allow layout pattern. Fix apm-dubbo-2.7.x-plugin memory leak due to some Dubbo RpcExceptions. Fix lettuce-5.x-plugin get null host in redis sentinel mode. Fix ClassCastException by making CallbackAdapterInterceptor to implement EnhancedInstance interface in the spring-kafka plugin. Fix NullPointerException with KafkaProducer.send(record). Support config agent.span_limit_per_segment can be changed in the runtime. Collect and report agent starting / shutdown events. Support jedis pipeline in jedis-2.x-plugin. Fix apm-toolkit-log4j-2.x-activation no trace Id in async log. Replace hbase-1.x-plugin with hbase-1.x-2.x-plugin to adapt hbase client 2.x Remove the close_before_method and close_after_method parameters of custom-enhance-plugin to avoid memory leaks. Fix bug that springmvc-annotation-4.x-plugin, witness class does not exist in some versions. Add Redis command parameters to \u0026lsquo;db.statement\u0026rsquo; field on Lettuce span UI for displaying more info. Fix NullPointerException with ReactiveRequestHolder.getHeaders. Fix springmvc reactive api can\u0026rsquo;t collect HTTP statusCode. Fix bug that asynchttpclient plugin does not record the response status code. Fix spanLayer is null in optional plugin(gateway-2.0.x-plugin gateway-2.1.x-plugin). Support @Trace, @Tag and @Tags work for static methods.  OAP-Backend  Allow user-defined JAVA_OPTS in the startup script. Metrics combination API supports abandoning results. Add a new concept \u0026ldquo;Event\u0026rdquo; and its implementations to collect events. Add some defensive codes for NPE and bump up Kubernetes client version to expose exception stack trace. Update the timestamp field type for LogQuery. Support Zabbix protocol to receive agent metrics. Update the Apdex metric combine calculator. Enhance MeterSystem to allow creating metrics with same metricName / function / scope. Storage plugin supports postgresql. Fix kubernetes.client.openapi.ApiException. Remove filename suffix in the meter active file config. Introduce log analysis language (LAL). Fix alarm httpclient connection leak. Add sum function in meter system. Remove Jaeger receiver. Remove the experimental Zipkin span analyzer. Upgrade the Zipkin Elasticsearch storage from 6 to 7. Require Zipkin receiver must work with zipkin-elasticsearch7 storage option. Fix DatabaseSlowStatementBuilder statement maybe null. Remove fields of parent entity in the relation sources. Save Envoy http access logs when error occurs. Fix wrong service_instance_sla setting in the topology-instance.yml. Fix wrong metrics name setting in the self-observability.yml. Add telemetry data about metrics in, metrics scraping, mesh error and trace in metrics to zipkin receiver. Fix tags store of log and trace on h2/mysql/pg storage. Merge indices by Metrics Function and Meter Function in Elasticsearch Storage. Fix receiver don\u0026rsquo;t need to get itself when healthCheck Remove group concept from AvgHistogramFunction. Heatmap(function result) doesn\u0026rsquo;t support labels. Support metrics grouped by scope labelValue in MAL, no need global same labelValue as before. Add functions in MAL to filter metrics according to the metric value. Optimize the self monitoring grafana dashboard. Enhance the export service. Add function retagByK8sMeta and opt type K8sRetagType.Pod2Service in MAL for k8s to relate pods and services. Using \u0026ldquo;service.istio.io/canonical-name\u0026rdquo; to replace \u0026ldquo;app\u0026rdquo; label to resolve Envoy ALS service name. Support k8s monitoring. Make the flushing metrics operation concurrent. Fix ALS K8SServiceRegistry didn\u0026rsquo;t remove the correct entry. Using \u0026ldquo;service.istio.io/canonical-name\u0026rdquo; to replace \u0026ldquo;app\u0026rdquo; label to resolve Envoy ALS service name. Append the root slash(/) to getIndex and getTemplate requests in ES(6 and 7) client. Fix disable statement not working. This bug exists since 8.0.0. Remove the useless metric in vm.yaml.  UI  Update selector scroller to show in all pages. Implement searching logs with date. Add nodejs 14 compiling. Fix trace id by clear search conditions. Search endpoints with keywords. Fix pageSize on logs page. Update echarts version to 5.0.2. Fix instance dependency on the topology page. Fix resolved url for vue-property-decorator. Show instance attributes. Copywriting grammar fix. Fix log pages tags column not updated. Fix the problem that the footer and topology group is shaded when the topology radiation is displayed. When the topology radiation chart is displayed, the corresponding button should be highlighted. Refactor the route mapping, Dynamically import routing components, Improve first page loading performance. Support topology of two mutually calling services. Implement a type of table chart in the dashboard. Support event in the dashboard. Show instance name in the trace view. Fix groups of services in the topography.  Documentation  Polish documentation due to we have covered all tracing, logging, and metrics fields. Adjust documentation about Zipkin receiver. Add backend-infrastructure-monitoring doc.  All issues and pull requests are here\n","title":"8.5.0","url":"/docs/main/v9.3.0/en/changes/changes-8.5.0/"},{"content":"8.5.0 Project  Incompatible Change. Indices and templates of ElasticSearch(6/7, including zipkin-elasticsearch7) storage option have been changed. Update frontend-maven-plugin to 1.11.0, for Download node x64 binary on Apple Silicon. Add E2E test for VM monitoring that metrics from Prometheus node-exporter. Upgrade lombok to 1.18.16. Add Java agent Dockerfile to build Docker image for Java agent.  Java Agent  Remove invalid mysql configuration in agent.config. Add net.bytebuddy.agent.builder.AgentBuilder.RedefinitionStrategy.Listener to show detail message when redefine errors occur. Fix ClassCastException of log4j gRPC reporter. Fix NPE when Kafka reporter activated. Enhance gRPC log appender to allow layout pattern. Fix apm-dubbo-2.7.x-plugin memory leak due to some Dubbo RpcExceptions. Fix lettuce-5.x-plugin get null host in redis sentinel mode. Fix ClassCastException by making CallbackAdapterInterceptor to implement EnhancedInstance interface in the spring-kafka plugin. Fix NullPointerException with KafkaProducer.send(record). Support config agent.span_limit_per_segment can be changed in the runtime. Collect and report agent starting / shutdown events. Support jedis pipeline in jedis-2.x-plugin. Fix apm-toolkit-log4j-2.x-activation no trace Id in async log. Replace hbase-1.x-plugin with hbase-1.x-2.x-plugin to adapt hbase client 2.x Remove the close_before_method and close_after_method parameters of custom-enhance-plugin to avoid memory leaks. Fix bug that springmvc-annotation-4.x-plugin, witness class does not exist in some versions. Add Redis command parameters to \u0026lsquo;db.statement\u0026rsquo; field on Lettuce span UI for displaying more info. Fix NullPointerException with ReactiveRequestHolder.getHeaders. Fix springmvc reactive api can\u0026rsquo;t collect HTTP statusCode. Fix bug that asynchttpclient plugin does not record the response status code. Fix spanLayer is null in optional plugin(gateway-2.0.x-plugin gateway-2.1.x-plugin). Support @Trace, @Tag and @Tags work for static methods.  OAP-Backend  Allow user-defined JAVA_OPTS in the startup script. Metrics combination API supports abandoning results. Add a new concept \u0026ldquo;Event\u0026rdquo; and its implementations to collect events. Add some defensive codes for NPE and bump up Kubernetes client version to expose exception stack trace. Update the timestamp field type for LogQuery. Support Zabbix protocol to receive agent metrics. Update the Apdex metric combine calculator. Enhance MeterSystem to allow creating metrics with same metricName / function / scope. Storage plugin supports postgresql. Fix kubernetes.client.openapi.ApiException. Remove filename suffix in the meter active file config. Introduce log analysis language (LAL). Fix alarm httpclient connection leak. Add sum function in meter system. Remove Jaeger receiver. Remove the experimental Zipkin span analyzer. Upgrade the Zipkin Elasticsearch storage from 6 to 7. Require Zipkin receiver must work with zipkin-elasticsearch7 storage option. Fix DatabaseSlowStatementBuilder statement maybe null. Remove fields of parent entity in the relation sources. Save Envoy http access logs when error occurs. Fix wrong service_instance_sla setting in the topology-instance.yml. Fix wrong metrics name setting in the self-observability.yml. Add telemetry data about metrics in, metrics scraping, mesh error and trace in metrics to zipkin receiver. Fix tags store of log and trace on h2/mysql/pg storage. Merge indices by Metrics Function and Meter Function in Elasticsearch Storage. Fix receiver don\u0026rsquo;t need to get itself when healthCheck Remove group concept from AvgHistogramFunction. Heatmap(function result) doesn\u0026rsquo;t support labels. Support metrics grouped by scope labelValue in MAL, no need global same labelValue as before. Add functions in MAL to filter metrics according to the metric value. Optimize the self monitoring grafana dashboard. Enhance the export service. Add function retagByK8sMeta and opt type K8sRetagType.Pod2Service in MAL for k8s to relate pods and services. Using \u0026ldquo;service.istio.io/canonical-name\u0026rdquo; to replace \u0026ldquo;app\u0026rdquo; label to resolve Envoy ALS service name. Support k8s monitoring. Make the flushing metrics operation concurrent. Fix ALS K8SServiceRegistry didn\u0026rsquo;t remove the correct entry. Using \u0026ldquo;service.istio.io/canonical-name\u0026rdquo; to replace \u0026ldquo;app\u0026rdquo; label to resolve Envoy ALS service name. Append the root slash(/) to getIndex and getTemplate requests in ES(6 and 7) client. Fix disable statement not working. This bug exists since 8.0.0. Remove the useless metric in vm.yaml.  UI  Update selector scroller to show in all pages. Implement searching logs with date. Add nodejs 14 compiling. Fix trace id by clear search conditions. Search endpoints with keywords. Fix pageSize on logs page. Update echarts version to 5.0.2. Fix instance dependency on the topology page. Fix resolved url for vue-property-decorator. Show instance attributes. Copywriting grammar fix. Fix log pages tags column not updated. Fix the problem that the footer and topology group is shaded when the topology radiation is displayed. When the topology radiation chart is displayed, the corresponding button should be highlighted. Refactor the route mapping, Dynamically import routing components, Improve first page loading performance. Support topology of two mutually calling services. Implement a type of table chart in the dashboard. Support event in the dashboard. Show instance name in the trace view. Fix groups of services in the topography.  Documentation  Polish documentation due to we have covered all tracing, logging, and metrics fields. Adjust documentation about Zipkin receiver. Add backend-infrastructure-monitoring doc.  All issues and pull requests are here\n","title":"8.5.0","url":"/docs/main/v9.4.0/en/changes/changes-8.5.0/"},{"content":"8.5.0 Project  Incompatible Change. Indices and templates of ElasticSearch(6/7, including zipkin-elasticsearch7) storage option have been changed. Update frontend-maven-plugin to 1.11.0, for Download node x64 binary on Apple Silicon. Add E2E test for VM monitoring that metrics from Prometheus node-exporter. Upgrade lombok to 1.18.16. Add Java agent Dockerfile to build Docker image for Java agent.  Java Agent  Remove invalid mysql configuration in agent.config. Add net.bytebuddy.agent.builder.AgentBuilder.RedefinitionStrategy.Listener to show detail message when redefine errors occur. Fix ClassCastException of log4j gRPC reporter. Fix NPE when Kafka reporter activated. Enhance gRPC log appender to allow layout pattern. Fix apm-dubbo-2.7.x-plugin memory leak due to some Dubbo RpcExceptions. Fix lettuce-5.x-plugin get null host in redis sentinel mode. Fix ClassCastException by making CallbackAdapterInterceptor to implement EnhancedInstance interface in the spring-kafka plugin. Fix NullPointerException with KafkaProducer.send(record). Support config agent.span_limit_per_segment can be changed in the runtime. Collect and report agent starting / shutdown events. Support jedis pipeline in jedis-2.x-plugin. Fix apm-toolkit-log4j-2.x-activation no trace Id in async log. Replace hbase-1.x-plugin with hbase-1.x-2.x-plugin to adapt hbase client 2.x Remove the close_before_method and close_after_method parameters of custom-enhance-plugin to avoid memory leaks. Fix bug that springmvc-annotation-4.x-plugin, witness class does not exist in some versions. Add Redis command parameters to \u0026lsquo;db.statement\u0026rsquo; field on Lettuce span UI for displaying more info. Fix NullPointerException with ReactiveRequestHolder.getHeaders. Fix springmvc reactive api can\u0026rsquo;t collect HTTP statusCode. Fix bug that asynchttpclient plugin does not record the response status code. Fix spanLayer is null in optional plugin(gateway-2.0.x-plugin gateway-2.1.x-plugin). Support @Trace, @Tag and @Tags work for static methods.  OAP-Backend  Allow user-defined JAVA_OPTS in the startup script. Metrics combination API supports abandoning results. Add a new concept \u0026ldquo;Event\u0026rdquo; and its implementations to collect events. Add some defensive codes for NPE and bump up Kubernetes client version to expose exception stack trace. Update the timestamp field type for LogQuery. Support Zabbix protocol to receive agent metrics. Update the Apdex metric combine calculator. Enhance MeterSystem to allow creating metrics with same metricName / function / scope. Storage plugin supports postgresql. Fix kubernetes.client.openapi.ApiException. Remove filename suffix in the meter active file config. Introduce log analysis language (LAL). Fix alarm httpclient connection leak. Add sum function in meter system. Remove Jaeger receiver. Remove the experimental Zipkin span analyzer. Upgrade the Zipkin Elasticsearch storage from 6 to 7. Require Zipkin receiver must work with zipkin-elasticsearch7 storage option. Fix DatabaseSlowStatementBuilder statement maybe null. Remove fields of parent entity in the relation sources. Save Envoy http access logs when error occurs. Fix wrong service_instance_sla setting in the topology-instance.yml. Fix wrong metrics name setting in the self-observability.yml. Add telemetry data about metrics in, metrics scraping, mesh error and trace in metrics to zipkin receiver. Fix tags store of log and trace on h2/mysql/pg storage. Merge indices by Metrics Function and Meter Function in Elasticsearch Storage. Fix receiver don\u0026rsquo;t need to get itself when healthCheck Remove group concept from AvgHistogramFunction. Heatmap(function result) doesn\u0026rsquo;t support labels. Support metrics grouped by scope labelValue in MAL, no need global same labelValue as before. Add functions in MAL to filter metrics according to the metric value. Optimize the self monitoring grafana dashboard. Enhance the export service. Add function retagByK8sMeta and opt type K8sRetagType.Pod2Service in MAL for k8s to relate pods and services. Using \u0026ldquo;service.istio.io/canonical-name\u0026rdquo; to replace \u0026ldquo;app\u0026rdquo; label to resolve Envoy ALS service name. Support k8s monitoring. Make the flushing metrics operation concurrent. Fix ALS K8SServiceRegistry didn\u0026rsquo;t remove the correct entry. Using \u0026ldquo;service.istio.io/canonical-name\u0026rdquo; to replace \u0026ldquo;app\u0026rdquo; label to resolve Envoy ALS service name. Append the root slash(/) to getIndex and getTemplate requests in ES(6 and 7) client. Fix disable statement not working. This bug exists since 8.0.0. Remove the useless metric in vm.yaml.  UI  Update selector scroller to show in all pages. Implement searching logs with date. Add nodejs 14 compiling. Fix trace id by clear search conditions. Search endpoints with keywords. Fix pageSize on logs page. Update echarts version to 5.0.2. Fix instance dependency on the topology page. Fix resolved url for vue-property-decorator. Show instance attributes. Copywriting grammar fix. Fix log pages tags column not updated. Fix the problem that the footer and topology group is shaded when the topology radiation is displayed. When the topology radiation chart is displayed, the corresponding button should be highlighted. Refactor the route mapping, Dynamically import routing components, Improve first page loading performance. Support topology of two mutually calling services. Implement a type of table chart in the dashboard. Support event in the dashboard. Show instance name in the trace view. Fix groups of services in the topography.  Documentation  Polish documentation due to we have covered all tracing, logging, and metrics fields. Adjust documentation about Zipkin receiver. Add backend-infrastructure-monitoring doc.  All issues and pull requests are here\n","title":"8.5.0","url":"/docs/main/v9.5.0/en/changes/changes-8.5.0/"},{"content":"8.5.0 Project  Incompatible Change. Indices and templates of ElasticSearch(6/7, including zipkin-elasticsearch7) storage option have been changed. Update frontend-maven-plugin to 1.11.0, for Download node x64 binary on Apple Silicon. Add E2E test for VM monitoring that metrics from Prometheus node-exporter. Upgrade lombok to 1.18.16. Add Java agent Dockerfile to build Docker image for Java agent.  Java Agent  Remove invalid mysql configuration in agent.config. Add net.bytebuddy.agent.builder.AgentBuilder.RedefinitionStrategy.Listener to show detail message when redefine errors occur. Fix ClassCastException of log4j gRPC reporter. Fix NPE when Kafka reporter activated. Enhance gRPC log appender to allow layout pattern. Fix apm-dubbo-2.7.x-plugin memory leak due to some Dubbo RpcExceptions. Fix lettuce-5.x-plugin get null host in redis sentinel mode. Fix ClassCastException by making CallbackAdapterInterceptor to implement EnhancedInstance interface in the spring-kafka plugin. Fix NullPointerException with KafkaProducer.send(record). Support config agent.span_limit_per_segment can be changed in the runtime. Collect and report agent starting / shutdown events. Support jedis pipeline in jedis-2.x-plugin. Fix apm-toolkit-log4j-2.x-activation no trace Id in async log. Replace hbase-1.x-plugin with hbase-1.x-2.x-plugin to adapt hbase client 2.x Remove the close_before_method and close_after_method parameters of custom-enhance-plugin to avoid memory leaks. Fix bug that springmvc-annotation-4.x-plugin, witness class does not exist in some versions. Add Redis command parameters to \u0026lsquo;db.statement\u0026rsquo; field on Lettuce span UI for displaying more info. Fix NullPointerException with ReactiveRequestHolder.getHeaders. Fix springmvc reactive api can\u0026rsquo;t collect HTTP statusCode. Fix bug that asynchttpclient plugin does not record the response status code. Fix spanLayer is null in optional plugin(gateway-2.0.x-plugin gateway-2.1.x-plugin). Support @Trace, @Tag and @Tags work for static methods.  OAP-Backend  Allow user-defined JAVA_OPTS in the startup script. Metrics combination API supports abandoning results. Add a new concept \u0026ldquo;Event\u0026rdquo; and its implementations to collect events. Add some defensive codes for NPE and bump up Kubernetes client version to expose exception stack trace. Update the timestamp field type for LogQuery. Support Zabbix protocol to receive agent metrics. Update the Apdex metric combine calculator. Enhance MeterSystem to allow creating metrics with same metricName / function / scope. Storage plugin supports postgresql. Fix kubernetes.client.openapi.ApiException. Remove filename suffix in the meter active file config. Introduce log analysis language (LAL). Fix alarm httpclient connection leak. Add sum function in meter system. Remove Jaeger receiver. Remove the experimental Zipkin span analyzer. Upgrade the Zipkin Elasticsearch storage from 6 to 7. Require Zipkin receiver must work with zipkin-elasticsearch7 storage option. Fix DatabaseSlowStatementBuilder statement maybe null. Remove fields of parent entity in the relation sources. Save Envoy http access logs when error occurs. Fix wrong service_instance_sla setting in the topology-instance.yml. Fix wrong metrics name setting in the self-observability.yml. Add telemetry data about metrics in, metrics scraping, mesh error and trace in metrics to zipkin receiver. Fix tags store of log and trace on h2/mysql/pg storage. Merge indices by Metrics Function and Meter Function in Elasticsearch Storage. Fix receiver don\u0026rsquo;t need to get itself when healthCheck Remove group concept from AvgHistogramFunction. Heatmap(function result) doesn\u0026rsquo;t support labels. Support metrics grouped by scope labelValue in MAL, no need global same labelValue as before. Add functions in MAL to filter metrics according to the metric value. Optimize the self monitoring grafana dashboard. Enhance the export service. Add function retagByK8sMeta and opt type K8sRetagType.Pod2Service in MAL for k8s to relate pods and services. Using \u0026ldquo;service.istio.io/canonical-name\u0026rdquo; to replace \u0026ldquo;app\u0026rdquo; label to resolve Envoy ALS service name. Support k8s monitoring. Make the flushing metrics operation concurrent. Fix ALS K8SServiceRegistry didn\u0026rsquo;t remove the correct entry. Using \u0026ldquo;service.istio.io/canonical-name\u0026rdquo; to replace \u0026ldquo;app\u0026rdquo; label to resolve Envoy ALS service name. Append the root slash(/) to getIndex and getTemplate requests in ES(6 and 7) client. Fix disable statement not working. This bug exists since 8.0.0. Remove the useless metric in vm.yaml.  UI  Update selector scroller to show in all pages. Implement searching logs with date. Add nodejs 14 compiling. Fix trace id by clear search conditions. Search endpoints with keywords. Fix pageSize on logs page. Update echarts version to 5.0.2. Fix instance dependency on the topology page. Fix resolved url for vue-property-decorator. Show instance attributes. Copywriting grammar fix. Fix log pages tags column not updated. Fix the problem that the footer and topology group is shaded when the topology radiation is displayed. When the topology radiation chart is displayed, the corresponding button should be highlighted. Refactor the route mapping, Dynamically import routing components, Improve first page loading performance. Support topology of two mutually calling services. Implement a type of table chart in the dashboard. Support event in the dashboard. Show instance name in the trace view. Fix groups of services in the topography.  Documentation  Polish documentation due to we have covered all tracing, logging, and metrics fields. Adjust documentation about Zipkin receiver. Add backend-infrastructure-monitoring doc.  All issues and pull requests are here\n","title":"8.5.0","url":"/docs/main/v9.6.0/en/changes/changes-8.5.0/"},{"content":"8.5.0 Project  Incompatible Change. Indices and templates of ElasticSearch(6/7, including zipkin-elasticsearch7) storage option have been changed. Update frontend-maven-plugin to 1.11.0, for Download node x64 binary on Apple Silicon. Add E2E test for VM monitoring that metrics from Prometheus node-exporter. Upgrade lombok to 1.18.16. Add Java agent Dockerfile to build Docker image for Java agent.  Java Agent  Remove invalid mysql configuration in agent.config. Add net.bytebuddy.agent.builder.AgentBuilder.RedefinitionStrategy.Listener to show detail message when redefine errors occur. Fix ClassCastException of log4j gRPC reporter. Fix NPE when Kafka reporter activated. Enhance gRPC log appender to allow layout pattern. Fix apm-dubbo-2.7.x-plugin memory leak due to some Dubbo RpcExceptions. Fix lettuce-5.x-plugin get null host in redis sentinel mode. Fix ClassCastException by making CallbackAdapterInterceptor to implement EnhancedInstance interface in the spring-kafka plugin. Fix NullPointerException with KafkaProducer.send(record). Support config agent.span_limit_per_segment can be changed in the runtime. Collect and report agent starting / shutdown events. Support jedis pipeline in jedis-2.x-plugin. Fix apm-toolkit-log4j-2.x-activation no trace Id in async log. Replace hbase-1.x-plugin with hbase-1.x-2.x-plugin to adapt hbase client 2.x Remove the close_before_method and close_after_method parameters of custom-enhance-plugin to avoid memory leaks. Fix bug that springmvc-annotation-4.x-plugin, witness class does not exist in some versions. Add Redis command parameters to \u0026lsquo;db.statement\u0026rsquo; field on Lettuce span UI for displaying more info. Fix NullPointerException with ReactiveRequestHolder.getHeaders. Fix springmvc reactive api can\u0026rsquo;t collect HTTP statusCode. Fix bug that asynchttpclient plugin does not record the response status code. Fix spanLayer is null in optional plugin(gateway-2.0.x-plugin gateway-2.1.x-plugin). Support @Trace, @Tag and @Tags work for static methods.  OAP-Backend  Allow user-defined JAVA_OPTS in the startup script. Metrics combination API supports abandoning results. Add a new concept \u0026ldquo;Event\u0026rdquo; and its implementations to collect events. Add some defensive codes for NPE and bump up Kubernetes client version to expose exception stack trace. Update the timestamp field type for LogQuery. Support Zabbix protocol to receive agent metrics. Update the Apdex metric combine calculator. Enhance MeterSystem to allow creating metrics with same metricName / function / scope. Storage plugin supports postgresql. Fix kubernetes.client.openapi.ApiException. Remove filename suffix in the meter active file config. Introduce log analysis language (LAL). Fix alarm httpclient connection leak. Add sum function in meter system. Remove Jaeger receiver. Remove the experimental Zipkin span analyzer. Upgrade the Zipkin Elasticsearch storage from 6 to 7. Require Zipkin receiver must work with zipkin-elasticsearch7 storage option. Fix DatabaseSlowStatementBuilder statement maybe null. Remove fields of parent entity in the relation sources. Save Envoy http access logs when error occurs. Fix wrong service_instance_sla setting in the topology-instance.yml. Fix wrong metrics name setting in the self-observability.yml. Add telemetry data about metrics in, metrics scraping, mesh error and trace in metrics to zipkin receiver. Fix tags store of log and trace on h2/mysql/pg storage. Merge indices by Metrics Function and Meter Function in Elasticsearch Storage. Fix receiver don\u0026rsquo;t need to get itself when healthCheck Remove group concept from AvgHistogramFunction. Heatmap(function result) doesn\u0026rsquo;t support labels. Support metrics grouped by scope labelValue in MAL, no need global same labelValue as before. Add functions in MAL to filter metrics according to the metric value. Optimize the self monitoring grafana dashboard. Enhance the export service. Add function retagByK8sMeta and opt type K8sRetagType.Pod2Service in MAL for k8s to relate pods and services. Using \u0026ldquo;service.istio.io/canonical-name\u0026rdquo; to replace \u0026ldquo;app\u0026rdquo; label to resolve Envoy ALS service name. Support k8s monitoring. Make the flushing metrics operation concurrent. Fix ALS K8SServiceRegistry didn\u0026rsquo;t remove the correct entry. Using \u0026ldquo;service.istio.io/canonical-name\u0026rdquo; to replace \u0026ldquo;app\u0026rdquo; label to resolve Envoy ALS service name. Append the root slash(/) to getIndex and getTemplate requests in ES(6 and 7) client. Fix disable statement not working. This bug exists since 8.0.0. Remove the useless metric in vm.yaml.  UI  Update selector scroller to show in all pages. Implement searching logs with date. Add nodejs 14 compiling. Fix trace id by clear search conditions. Search endpoints with keywords. Fix pageSize on logs page. Update echarts version to 5.0.2. Fix instance dependency on the topology page. Fix resolved url for vue-property-decorator. Show instance attributes. Copywriting grammar fix. Fix log pages tags column not updated. Fix the problem that the footer and topology group is shaded when the topology radiation is displayed. When the topology radiation chart is displayed, the corresponding button should be highlighted. Refactor the route mapping, Dynamically import routing components, Improve first page loading performance. Support topology of two mutually calling services. Implement a type of table chart in the dashboard. Support event in the dashboard. Show instance name in the trace view. Fix groups of services in the topography.  Documentation  Polish documentation due to we have covered all tracing, logging, and metrics fields. Adjust documentation about Zipkin receiver. Add backend-infrastructure-monitoring doc.  All issues and pull requests are here\n","title":"8.5.0","url":"/docs/main/v9.7.0/en/changes/changes-8.5.0/"},{"content":"8.6.0 Project  Add OpenSearch as storage option. Upgrade Kubernetes Java client dependency to 11.0. Fix plugin test script error in macOS.  Java Agent  Add trace_segment_ref_limit_per_span configuration mechanism to avoid OOM. Improve GlobalIdGenerator performance. Add an agent plugin to support elasticsearch7. Add jsonrpc4j agent plugin. new options to support multi skywalking cluster use same kafka cluster(plugin.kafka.namespace) resolve agent has no retries if connect kafka cluster failed when bootstrap Add Seata in the component definition. Seata plugin hosts on Seata project. Extended Kafka plugin to properly trace consumers that have topic partitions directly assigned. Support Kafka consumer 2.8.0. Support print SkyWalking context to logs. Add MessageListener enhancement in pulsar plugin. fix a bug that spring-mvc set an error endpoint name if the controller class annotation implements an interface. Add an optional agent plugin to support mybatis. Add spring-cloud-gateway-3.x optional plugin. Add okhttp-4.x plugin. Fix NPE when thrift field is nested in plugin thrift Fix possible NullPointerException in agent\u0026rsquo;s ES plugin. Fix the conversion problem of float type in ConfigInitializer. Fixed part of the dynamic configuration of ConfigurationDiscoveryService that does not take effect under certain circumstances. Introduce method interceptor API v2 Fix ClassCast issue for RequestHolder/ResponseHolder. fixed jdk-threading-plugin memory leak. Optimize multiple field reflection operation in Feign plugin. Fix trace-ignore-plugin TraceIgnorePathPatterns can\u0026rsquo;t set empty value  OAP-Backend  BugFix: filter invalid Envoy access logs whose socket address is empty. Fix K8s monitoring the incorrect metrics calculate. Loop alarm into event system. Support alarm tags. Support WeLink as a channel of alarm notification. Fix: Some defensive codes didn\u0026rsquo;t work in PercentileFunction combine. CVE: fix Jetty vulnerability. https://nvd.nist.gov/vuln/detail/CVE-2019-17638 Fix: MAL function would miss samples name after creating new samples. perf: use iterator.remove() to remove modulesWithoutProvider Support analyzing Envoy TCP access logs and persist error TCP logs. Fix: Envoy error logs are not persisted when no metrics are generated Fix: Memory leakage of low version etcd client. fix-issue Allow multiple definitions as fallback in metadata-service-mapping.yaml file and k8sServiceNameRule. Fix: NPE when configmap has no data. Fix: Dynamic Configuration key slowTraceSegmentThreshold not work Fix: != is not supported in oal when parameters are numbers. Include events of the entity(s) in the alarm. Support native-json format log in kafka-fetcher-plugin. Fix counter misuse in the alarm core. Alarm can\u0026rsquo;t be triggered in time. Events can be configured as alarm source. Make the number of core worker in meter converter thread pool configurable. Add HTTP implementation of logs reporting protocol. Make metrics exporter still work even when storage layer failed. Fix Jetty HTTP TRACE issue, disable HTTP methods except POST. CVE: upgrade snakeyaml to prevent billion laughs attack in dynamic configuration. polish debug logging avoids null value when the segment ignored.  UI  Add logo for kong plugin. Add apisix logo. Refactor js to ts for browser logs and style change. When creating service groups in the topology, it is better if the service names are sorted. Add tooltip for dashboard component. Fix style of endpoint dependency. Support search and visualize alarms with tags. Fix configurations on dashboard. Support to configure the maximum number of displayed items. After changing the durationTime, the topology shows the originally selected group or service. remove the no use maxItemNum for labeled-value metric, etc. Add Azure Functions logo. Support search Endpoint use keyword params in trace view. Add a function which show the statistics information during the trace query. Remove the sort button at the column of Type in the trace statistics page. Optimize the APISIX icon in the topology. Implement metrics templates in the topology. Visualize Events on the alarm page. Update duration steps in graphs for Trace and Log.  Documentation  Polish k8s monitoring otel-collector configuration example. Print SkyWalking context to logs configuration example. Update doc about metrics v2 APIs.  All issues and pull requests are here\n Find change logs of all versions here.\n","title":"8.6.0","url":"/docs/main/latest/en/changes/changes-8.6.0/"},{"content":"8.6.0 Project  Add OpenSearch as storage option. Upgrade Kubernetes Java client dependency to 11.0. Fix plugin test script error in macOS.  Java Agent  Add trace_segment_ref_limit_per_span configuration mechanism to avoid OOM. Improve GlobalIdGenerator performance. Add an agent plugin to support elasticsearch7. Add jsonrpc4j agent plugin. new options to support multi skywalking cluster use same kafka cluster(plugin.kafka.namespace) resolve agent has no retries if connect kafka cluster failed when bootstrap Add Seata in the component definition. Seata plugin hosts on Seata project. Extended Kafka plugin to properly trace consumers that have topic partitions directly assigned. Support Kafka consumer 2.8.0. Support print SkyWalking context to logs. Add MessageListener enhancement in pulsar plugin. fix a bug that spring-mvc set an error endpoint name if the controller class annotation implements an interface. Add an optional agent plugin to support mybatis. Add spring-cloud-gateway-3.x optional plugin. Add okhttp-4.x plugin. Fix NPE when thrift field is nested in plugin thrift Fix possible NullPointerException in agent\u0026rsquo;s ES plugin. Fix the conversion problem of float type in ConfigInitializer. Fixed part of the dynamic configuration of ConfigurationDiscoveryService that does not take effect under certain circumstances. Introduce method interceptor API v2 Fix ClassCast issue for RequestHolder/ResponseHolder. fixed jdk-threading-plugin memory leak. Optimize multiple field reflection operation in Feign plugin. Fix trace-ignore-plugin TraceIgnorePathPatterns can\u0026rsquo;t set empty value  OAP-Backend  BugFix: filter invalid Envoy access logs whose socket address is empty. Fix K8s monitoring the incorrect metrics calculate. Loop alarm into event system. Support alarm tags. Support WeLink as a channel of alarm notification. Fix: Some defensive codes didn\u0026rsquo;t work in PercentileFunction combine. CVE: fix Jetty vulnerability. https://nvd.nist.gov/vuln/detail/CVE-2019-17638 Fix: MAL function would miss samples name after creating new samples. perf: use iterator.remove() to remove modulesWithoutProvider Support analyzing Envoy TCP access logs and persist error TCP logs. Fix: Envoy error logs are not persisted when no metrics are generated Fix: Memory leakage of low version etcd client. fix-issue Allow multiple definitions as fallback in metadata-service-mapping.yaml file and k8sServiceNameRule. Fix: NPE when configmap has no data. Fix: Dynamic Configuration key slowTraceSegmentThreshold not work Fix: != is not supported in oal when parameters are numbers. Include events of the entity(s) in the alarm. Support native-json format log in kafka-fetcher-plugin. Fix counter misuse in the alarm core. Alarm can\u0026rsquo;t be triggered in time. Events can be configured as alarm source. Make the number of core worker in meter converter thread pool configurable. Add HTTP implementation of logs reporting protocol. Make metrics exporter still work even when storage layer failed. Fix Jetty HTTP TRACE issue, disable HTTP methods except POST. CVE: upgrade snakeyaml to prevent billion laughs attack in dynamic configuration. polish debug logging avoids null value when the segment ignored.  UI  Add logo for kong plugin. Add apisix logo. Refactor js to ts for browser logs and style change. When creating service groups in the topology, it is better if the service names are sorted. Add tooltip for dashboard component. Fix style of endpoint dependency. Support search and visualize alarms with tags. Fix configurations on dashboard. Support to configure the maximum number of displayed items. After changing the durationTime, the topology shows the originally selected group or service. remove the no use maxItemNum for labeled-value metric, etc. Add Azure Functions logo. Support search Endpoint use keyword params in trace view. Add a function which show the statistics information during the trace query. Remove the sort button at the column of Type in the trace statistics page. Optimize the APISIX icon in the topology. Implement metrics templates in the topology. Visualize Events on the alarm page. Update duration steps in graphs for Trace and Log.  Documentation  Polish k8s monitoring otel-collector configuration example. Print SkyWalking context to logs configuration example. Update doc about metrics v2 APIs.  All issues and pull requests are here\n Find change logs of all versions here.\n","title":"8.6.0","url":"/docs/main/next/en/changes/changes-8.6.0/"},{"content":"8.6.0 Project  Add OpenSearch as storage option. Upgrade Kubernetes Java client dependency to 11.0. Fix plugin test script error in macOS.  Java Agent  Add trace_segment_ref_limit_per_span configuration mechanism to avoid OOM. Improve GlobalIdGenerator performance. Add an agent plugin to support elasticsearch7. Add jsonrpc4j agent plugin. new options to support multi skywalking cluster use same kafka cluster(plugin.kafka.namespace) resolve agent has no retries if connect kafka cluster failed when bootstrap Add Seata in the component definition. Seata plugin hosts on Seata project. Extended Kafka plugin to properly trace consumers that have topic partitions directly assigned. Support Kafka consumer 2.8.0. Support print SkyWalking context to logs. Add MessageListener enhancement in pulsar plugin. fix a bug that spring-mvc set an error endpoint name if the controller class annotation implements an interface. Add an optional agent plugin to support mybatis. Add spring-cloud-gateway-3.x optional plugin. Add okhttp-4.x plugin. Fix NPE when thrift field is nested in plugin thrift Fix possible NullPointerException in agent\u0026rsquo;s ES plugin. Fix the conversion problem of float type in ConfigInitializer. Fixed part of the dynamic configuration of ConfigurationDiscoveryService that does not take effect under certain circumstances. Introduce method interceptor API v2 Fix ClassCast issue for RequestHolder/ResponseHolder. fixed jdk-threading-plugin memory leak. Optimize multiple field reflection operation in Feign plugin. Fix trace-ignore-plugin TraceIgnorePathPatterns can\u0026rsquo;t set empty value  OAP-Backend  BugFix: filter invalid Envoy access logs whose socket address is empty. Fix K8s monitoring the incorrect metrics calculate. Loop alarm into event system. Support alarm tags. Support WeLink as a channel of alarm notification. Fix: Some defensive codes didn\u0026rsquo;t work in PercentileFunction combine. CVE: fix Jetty vulnerability. https://nvd.nist.gov/vuln/detail/CVE-2019-17638 Fix: MAL function would miss samples name after creating new samples. perf: use iterator.remove() to remove modulesWithoutProvider Support analyzing Envoy TCP access logs and persist error TCP logs. Fix: Envoy error logs are not persisted when no metrics are generated Fix: Memory leakage of low version etcd client. fix-issue Allow multiple definitions as fallback in metadata-service-mapping.yaml file and k8sServiceNameRule. Fix: NPE when configmap has no data. Fix: Dynamic Configuration key slowTraceSegmentThreshold not work Fix: != is not supported in oal when parameters are numbers. Include events of the entity(s) in the alarm. Support native-json format log in kafka-fetcher-plugin. Fix counter misuse in the alarm core. Alarm can\u0026rsquo;t be triggered in time. Events can be configured as alarm source. Make the number of core worker in meter converter thread pool configurable. Add HTTP implementation of logs reporting protocol. Make metrics exporter still work even when storage layer failed. Fix Jetty HTTP TRACE issue, disable HTTP methods except POST. CVE: upgrade snakeyaml to prevent billion laughs attack in dynamic configuration. polish debug logging avoids null value when the segment ignored.  UI  Add logo for kong plugin. Add apisix logo. Refactor js to ts for browser logs and style change. When creating service groups in the topology, it is better if the service names are sorted. Add tooltip for dashboard component. Fix style of endpoint dependency. Support search and visualize alarms with tags. Fix configurations on dashboard. Support to configure the maximum number of displayed items. After changing the durationTime, the topology shows the originally selected group or service. remove the no use maxItemNum for labeled-value metric, etc. Add Azure Functions logo. Support search Endpoint use keyword params in trace view. Add a function which show the statistics infomation during the trace query. Remove the sort button at the column of Type in the trace statistics page. Optimize the APISIX icon in the topology. Implement metrics templates in the topology. Visualize Events on the alarm page. Update duration steps in graphs for Trace and Log.  Documentation  Polish k8s monitoring otel-collector configuration example. Print SkyWalking context to logs configuration example. Update doc about metrics v2 APIs.  All issues and pull requests are here\n Find change logs of all versions here.\n","title":"8.6.0","url":"/docs/main/v9.1.0/en/changes/changes-8.6.0/"},{"content":"8.6.0 Project  Add OpenSearch as storage option. Upgrade Kubernetes Java client dependency to 11.0. Fix plugin test script error in macOS.  Java Agent  Add trace_segment_ref_limit_per_span configuration mechanism to avoid OOM. Improve GlobalIdGenerator performance. Add an agent plugin to support elasticsearch7. Add jsonrpc4j agent plugin. new options to support multi skywalking cluster use same kafka cluster(plugin.kafka.namespace) resolve agent has no retries if connect kafka cluster failed when bootstrap Add Seata in the component definition. Seata plugin hosts on Seata project. Extended Kafka plugin to properly trace consumers that have topic partitions directly assigned. Support Kafka consumer 2.8.0. Support print SkyWalking context to logs. Add MessageListener enhancement in pulsar plugin. fix a bug that spring-mvc set an error endpoint name if the controller class annotation implements an interface. Add an optional agent plugin to support mybatis. Add spring-cloud-gateway-3.x optional plugin. Add okhttp-4.x plugin. Fix NPE when thrift field is nested in plugin thrift Fix possible NullPointerException in agent\u0026rsquo;s ES plugin. Fix the conversion problem of float type in ConfigInitializer. Fixed part of the dynamic configuration of ConfigurationDiscoveryService that does not take effect under certain circumstances. Introduce method interceptor API v2 Fix ClassCast issue for RequestHolder/ResponseHolder. fixed jdk-threading-plugin memory leak. Optimize multiple field reflection operation in Feign plugin. Fix trace-ignore-plugin TraceIgnorePathPatterns can\u0026rsquo;t set empty value  OAP-Backend  BugFix: filter invalid Envoy access logs whose socket address is empty. Fix K8s monitoring the incorrect metrics calculate. Loop alarm into event system. Support alarm tags. Support WeLink as a channel of alarm notification. Fix: Some defensive codes didn\u0026rsquo;t work in PercentileFunction combine. CVE: fix Jetty vulnerability. https://nvd.nist.gov/vuln/detail/CVE-2019-17638 Fix: MAL function would miss samples name after creating new samples. perf: use iterator.remove() to remove modulesWithoutProvider Support analyzing Envoy TCP access logs and persist error TCP logs. Fix: Envoy error logs are not persisted when no metrics are generated Fix: Memory leakage of low version etcd client. fix-issue Allow multiple definitions as fallback in metadata-service-mapping.yaml file and k8sServiceNameRule. Fix: NPE when configmap has no data. Fix: Dynamic Configuration key slowTraceSegmentThreshold not work Fix: != is not supported in oal when parameters are numbers. Include events of the entity(s) in the alarm. Support native-json format log in kafka-fetcher-plugin. Fix counter misuse in the alarm core. Alarm can\u0026rsquo;t be triggered in time. Events can be configured as alarm source. Make the number of core worker in meter converter thread pool configurable. Add HTTP implementation of logs reporting protocol. Make metrics exporter still work even when storage layer failed. Fix Jetty HTTP TRACE issue, disable HTTP methods except POST. CVE: upgrade snakeyaml to prevent billion laughs attack in dynamic configuration. polish debug logging avoids null value when the segment ignored.  UI  Add logo for kong plugin. Add apisix logo. Refactor js to ts for browser logs and style change. When creating service groups in the topology, it is better if the service names are sorted. Add tooltip for dashboard component. Fix style of endpoint dependency. Support search and visualize alarms with tags. Fix configurations on dashboard. Support to configure the maximum number of displayed items. After changing the durationTime, the topology shows the originally selected group or service. remove the no use maxItemNum for labeled-value metric, etc. Add Azure Functions logo. Support search Endpoint use keyword params in trace view. Add a function which show the statistics information during the trace query. Remove the sort button at the column of Type in the trace statistics page. Optimize the APISIX icon in the topology. Implement metrics templates in the topology. Visualize Events on the alarm page. Update duration steps in graphs for Trace and Log.  Documentation  Polish k8s monitoring otel-collector configuration example. Print SkyWalking context to logs configuration example. Update doc about metrics v2 APIs.  All issues and pull requests are here\n Find change logs of all versions here.\n","title":"8.6.0","url":"/docs/main/v9.2.0/en/changes/changes-8.6.0/"},{"content":"8.6.0 Project  Add OpenSearch as storage option. Upgrade Kubernetes Java client dependency to 11.0. Fix plugin test script error in macOS.  Java Agent  Add trace_segment_ref_limit_per_span configuration mechanism to avoid OOM. Improve GlobalIdGenerator performance. Add an agent plugin to support elasticsearch7. Add jsonrpc4j agent plugin. new options to support multi skywalking cluster use same kafka cluster(plugin.kafka.namespace) resolve agent has no retries if connect kafka cluster failed when bootstrap Add Seata in the component definition. Seata plugin hosts on Seata project. Extended Kafka plugin to properly trace consumers that have topic partitions directly assigned. Support Kafka consumer 2.8.0. Support print SkyWalking context to logs. Add MessageListener enhancement in pulsar plugin. fix a bug that spring-mvc set an error endpoint name if the controller class annotation implements an interface. Add an optional agent plugin to support mybatis. Add spring-cloud-gateway-3.x optional plugin. Add okhttp-4.x plugin. Fix NPE when thrift field is nested in plugin thrift Fix possible NullPointerException in agent\u0026rsquo;s ES plugin. Fix the conversion problem of float type in ConfigInitializer. Fixed part of the dynamic configuration of ConfigurationDiscoveryService that does not take effect under certain circumstances. Introduce method interceptor API v2 Fix ClassCast issue for RequestHolder/ResponseHolder. fixed jdk-threading-plugin memory leak. Optimize multiple field reflection operation in Feign plugin. Fix trace-ignore-plugin TraceIgnorePathPatterns can\u0026rsquo;t set empty value  OAP-Backend  BugFix: filter invalid Envoy access logs whose socket address is empty. Fix K8s monitoring the incorrect metrics calculate. Loop alarm into event system. Support alarm tags. Support WeLink as a channel of alarm notification. Fix: Some defensive codes didn\u0026rsquo;t work in PercentileFunction combine. CVE: fix Jetty vulnerability. https://nvd.nist.gov/vuln/detail/CVE-2019-17638 Fix: MAL function would miss samples name after creating new samples. perf: use iterator.remove() to remove modulesWithoutProvider Support analyzing Envoy TCP access logs and persist error TCP logs. Fix: Envoy error logs are not persisted when no metrics are generated Fix: Memory leakage of low version etcd client. fix-issue Allow multiple definitions as fallback in metadata-service-mapping.yaml file and k8sServiceNameRule. Fix: NPE when configmap has no data. Fix: Dynamic Configuration key slowTraceSegmentThreshold not work Fix: != is not supported in oal when parameters are numbers. Include events of the entity(s) in the alarm. Support native-json format log in kafka-fetcher-plugin. Fix counter misuse in the alarm core. Alarm can\u0026rsquo;t be triggered in time. Events can be configured as alarm source. Make the number of core worker in meter converter thread pool configurable. Add HTTP implementation of logs reporting protocol. Make metrics exporter still work even when storage layer failed. Fix Jetty HTTP TRACE issue, disable HTTP methods except POST. CVE: upgrade snakeyaml to prevent billion laughs attack in dynamic configuration. polish debug logging avoids null value when the segment ignored.  UI  Add logo for kong plugin. Add apisix logo. Refactor js to ts for browser logs and style change. When creating service groups in the topology, it is better if the service names are sorted. Add tooltip for dashboard component. Fix style of endpoint dependency. Support search and visualize alarms with tags. Fix configurations on dashboard. Support to configure the maximum number of displayed items. After changing the durationTime, the topology shows the originally selected group or service. remove the no use maxItemNum for labeled-value metric, etc. Add Azure Functions logo. Support search Endpoint use keyword params in trace view. Add a function which show the statistics information during the trace query. Remove the sort button at the column of Type in the trace statistics page. Optimize the APISIX icon in the topology. Implement metrics templates in the topology. Visualize Events on the alarm page. Update duration steps in graphs for Trace and Log.  Documentation  Polish k8s monitoring otel-collector configuration example. Print SkyWalking context to logs configuration example. Update doc about metrics v2 APIs.  All issues and pull requests are here\n Find change logs of all versions here.\n","title":"8.6.0","url":"/docs/main/v9.3.0/en/changes/changes-8.6.0/"},{"content":"8.6.0 Project  Add OpenSearch as storage option. Upgrade Kubernetes Java client dependency to 11.0. Fix plugin test script error in macOS.  Java Agent  Add trace_segment_ref_limit_per_span configuration mechanism to avoid OOM. Improve GlobalIdGenerator performance. Add an agent plugin to support elasticsearch7. Add jsonrpc4j agent plugin. new options to support multi skywalking cluster use same kafka cluster(plugin.kafka.namespace) resolve agent has no retries if connect kafka cluster failed when bootstrap Add Seata in the component definition. Seata plugin hosts on Seata project. Extended Kafka plugin to properly trace consumers that have topic partitions directly assigned. Support Kafka consumer 2.8.0. Support print SkyWalking context to logs. Add MessageListener enhancement in pulsar plugin. fix a bug that spring-mvc set an error endpoint name if the controller class annotation implements an interface. Add an optional agent plugin to support mybatis. Add spring-cloud-gateway-3.x optional plugin. Add okhttp-4.x plugin. Fix NPE when thrift field is nested in plugin thrift Fix possible NullPointerException in agent\u0026rsquo;s ES plugin. Fix the conversion problem of float type in ConfigInitializer. Fixed part of the dynamic configuration of ConfigurationDiscoveryService that does not take effect under certain circumstances. Introduce method interceptor API v2 Fix ClassCast issue for RequestHolder/ResponseHolder. fixed jdk-threading-plugin memory leak. Optimize multiple field reflection operation in Feign plugin. Fix trace-ignore-plugin TraceIgnorePathPatterns can\u0026rsquo;t set empty value  OAP-Backend  BugFix: filter invalid Envoy access logs whose socket address is empty. Fix K8s monitoring the incorrect metrics calculate. Loop alarm into event system. Support alarm tags. Support WeLink as a channel of alarm notification. Fix: Some defensive codes didn\u0026rsquo;t work in PercentileFunction combine. CVE: fix Jetty vulnerability. https://nvd.nist.gov/vuln/detail/CVE-2019-17638 Fix: MAL function would miss samples name after creating new samples. perf: use iterator.remove() to remove modulesWithoutProvider Support analyzing Envoy TCP access logs and persist error TCP logs. Fix: Envoy error logs are not persisted when no metrics are generated Fix: Memory leakage of low version etcd client. fix-issue Allow multiple definitions as fallback in metadata-service-mapping.yaml file and k8sServiceNameRule. Fix: NPE when configmap has no data. Fix: Dynamic Configuration key slowTraceSegmentThreshold not work Fix: != is not supported in oal when parameters are numbers. Include events of the entity(s) in the alarm. Support native-json format log in kafka-fetcher-plugin. Fix counter misuse in the alarm core. Alarm can\u0026rsquo;t be triggered in time. Events can be configured as alarm source. Make the number of core worker in meter converter thread pool configurable. Add HTTP implementation of logs reporting protocol. Make metrics exporter still work even when storage layer failed. Fix Jetty HTTP TRACE issue, disable HTTP methods except POST. CVE: upgrade snakeyaml to prevent billion laughs attack in dynamic configuration. polish debug logging avoids null value when the segment ignored.  UI  Add logo for kong plugin. Add apisix logo. Refactor js to ts for browser logs and style change. When creating service groups in the topology, it is better if the service names are sorted. Add tooltip for dashboard component. Fix style of endpoint dependency. Support search and visualize alarms with tags. Fix configurations on dashboard. Support to configure the maximum number of displayed items. After changing the durationTime, the topology shows the originally selected group or service. remove the no use maxItemNum for labeled-value metric, etc. Add Azure Functions logo. Support search Endpoint use keyword params in trace view. Add a function which show the statistics information during the trace query. Remove the sort button at the column of Type in the trace statistics page. Optimize the APISIX icon in the topology. Implement metrics templates in the topology. Visualize Events on the alarm page. Update duration steps in graphs for Trace and Log.  Documentation  Polish k8s monitoring otel-collector configuration example. Print SkyWalking context to logs configuration example. Update doc about metrics v2 APIs.  All issues and pull requests are here\n Find change logs of all versions here.\n","title":"8.6.0","url":"/docs/main/v9.4.0/en/changes/changes-8.6.0/"},{"content":"8.6.0 Project  Add OpenSearch as storage option. Upgrade Kubernetes Java client dependency to 11.0. Fix plugin test script error in macOS.  Java Agent  Add trace_segment_ref_limit_per_span configuration mechanism to avoid OOM. Improve GlobalIdGenerator performance. Add an agent plugin to support elasticsearch7. Add jsonrpc4j agent plugin. new options to support multi skywalking cluster use same kafka cluster(plugin.kafka.namespace) resolve agent has no retries if connect kafka cluster failed when bootstrap Add Seata in the component definition. Seata plugin hosts on Seata project. Extended Kafka plugin to properly trace consumers that have topic partitions directly assigned. Support Kafka consumer 2.8.0. Support print SkyWalking context to logs. Add MessageListener enhancement in pulsar plugin. fix a bug that spring-mvc set an error endpoint name if the controller class annotation implements an interface. Add an optional agent plugin to support mybatis. Add spring-cloud-gateway-3.x optional plugin. Add okhttp-4.x plugin. Fix NPE when thrift field is nested in plugin thrift Fix possible NullPointerException in agent\u0026rsquo;s ES plugin. Fix the conversion problem of float type in ConfigInitializer. Fixed part of the dynamic configuration of ConfigurationDiscoveryService that does not take effect under certain circumstances. Introduce method interceptor API v2 Fix ClassCast issue for RequestHolder/ResponseHolder. fixed jdk-threading-plugin memory leak. Optimize multiple field reflection operation in Feign plugin. Fix trace-ignore-plugin TraceIgnorePathPatterns can\u0026rsquo;t set empty value  OAP-Backend  BugFix: filter invalid Envoy access logs whose socket address is empty. Fix K8s monitoring the incorrect metrics calculate. Loop alarm into event system. Support alarm tags. Support WeLink as a channel of alarm notification. Fix: Some defensive codes didn\u0026rsquo;t work in PercentileFunction combine. CVE: fix Jetty vulnerability. https://nvd.nist.gov/vuln/detail/CVE-2019-17638 Fix: MAL function would miss samples name after creating new samples. perf: use iterator.remove() to remove modulesWithoutProvider Support analyzing Envoy TCP access logs and persist error TCP logs. Fix: Envoy error logs are not persisted when no metrics are generated Fix: Memory leakage of low version etcd client. fix-issue Allow multiple definitions as fallback in metadata-service-mapping.yaml file and k8sServiceNameRule. Fix: NPE when configmap has no data. Fix: Dynamic Configuration key slowTraceSegmentThreshold not work Fix: != is not supported in oal when parameters are numbers. Include events of the entity(s) in the alarm. Support native-json format log in kafka-fetcher-plugin. Fix counter misuse in the alarm core. Alarm can\u0026rsquo;t be triggered in time. Events can be configured as alarm source. Make the number of core worker in meter converter thread pool configurable. Add HTTP implementation of logs reporting protocol. Make metrics exporter still work even when storage layer failed. Fix Jetty HTTP TRACE issue, disable HTTP methods except POST. CVE: upgrade snakeyaml to prevent billion laughs attack in dynamic configuration. polish debug logging avoids null value when the segment ignored.  UI  Add logo for kong plugin. Add apisix logo. Refactor js to ts for browser logs and style change. When creating service groups in the topology, it is better if the service names are sorted. Add tooltip for dashboard component. Fix style of endpoint dependency. Support search and visualize alarms with tags. Fix configurations on dashboard. Support to configure the maximum number of displayed items. After changing the durationTime, the topology shows the originally selected group or service. remove the no use maxItemNum for labeled-value metric, etc. Add Azure Functions logo. Support search Endpoint use keyword params in trace view. Add a function which show the statistics information during the trace query. Remove the sort button at the column of Type in the trace statistics page. Optimize the APISIX icon in the topology. Implement metrics templates in the topology. Visualize Events on the alarm page. Update duration steps in graphs for Trace and Log.  Documentation  Polish k8s monitoring otel-collector configuration example. Print SkyWalking context to logs configuration example. Update doc about metrics v2 APIs.  All issues and pull requests are here\n Find change logs of all versions here.\n","title":"8.6.0","url":"/docs/main/v9.5.0/en/changes/changes-8.6.0/"},{"content":"8.6.0 Project  Add OpenSearch as storage option. Upgrade Kubernetes Java client dependency to 11.0. Fix plugin test script error in macOS.  Java Agent  Add trace_segment_ref_limit_per_span configuration mechanism to avoid OOM. Improve GlobalIdGenerator performance. Add an agent plugin to support elasticsearch7. Add jsonrpc4j agent plugin. new options to support multi skywalking cluster use same kafka cluster(plugin.kafka.namespace) resolve agent has no retries if connect kafka cluster failed when bootstrap Add Seata in the component definition. Seata plugin hosts on Seata project. Extended Kafka plugin to properly trace consumers that have topic partitions directly assigned. Support Kafka consumer 2.8.0. Support print SkyWalking context to logs. Add MessageListener enhancement in pulsar plugin. fix a bug that spring-mvc set an error endpoint name if the controller class annotation implements an interface. Add an optional agent plugin to support mybatis. Add spring-cloud-gateway-3.x optional plugin. Add okhttp-4.x plugin. Fix NPE when thrift field is nested in plugin thrift Fix possible NullPointerException in agent\u0026rsquo;s ES plugin. Fix the conversion problem of float type in ConfigInitializer. Fixed part of the dynamic configuration of ConfigurationDiscoveryService that does not take effect under certain circumstances. Introduce method interceptor API v2 Fix ClassCast issue for RequestHolder/ResponseHolder. fixed jdk-threading-plugin memory leak. Optimize multiple field reflection operation in Feign plugin. Fix trace-ignore-plugin TraceIgnorePathPatterns can\u0026rsquo;t set empty value  OAP-Backend  BugFix: filter invalid Envoy access logs whose socket address is empty. Fix K8s monitoring the incorrect metrics calculate. Loop alarm into event system. Support alarm tags. Support WeLink as a channel of alarm notification. Fix: Some defensive codes didn\u0026rsquo;t work in PercentileFunction combine. CVE: fix Jetty vulnerability. https://nvd.nist.gov/vuln/detail/CVE-2019-17638 Fix: MAL function would miss samples name after creating new samples. perf: use iterator.remove() to remove modulesWithoutProvider Support analyzing Envoy TCP access logs and persist error TCP logs. Fix: Envoy error logs are not persisted when no metrics are generated Fix: Memory leakage of low version etcd client. fix-issue Allow multiple definitions as fallback in metadata-service-mapping.yaml file and k8sServiceNameRule. Fix: NPE when configmap has no data. Fix: Dynamic Configuration key slowTraceSegmentThreshold not work Fix: != is not supported in oal when parameters are numbers. Include events of the entity(s) in the alarm. Support native-json format log in kafka-fetcher-plugin. Fix counter misuse in the alarm core. Alarm can\u0026rsquo;t be triggered in time. Events can be configured as alarm source. Make the number of core worker in meter converter thread pool configurable. Add HTTP implementation of logs reporting protocol. Make metrics exporter still work even when storage layer failed. Fix Jetty HTTP TRACE issue, disable HTTP methods except POST. CVE: upgrade snakeyaml to prevent billion laughs attack in dynamic configuration. polish debug logging avoids null value when the segment ignored.  UI  Add logo for kong plugin. Add apisix logo. Refactor js to ts for browser logs and style change. When creating service groups in the topology, it is better if the service names are sorted. Add tooltip for dashboard component. Fix style of endpoint dependency. Support search and visualize alarms with tags. Fix configurations on dashboard. Support to configure the maximum number of displayed items. After changing the durationTime, the topology shows the originally selected group or service. remove the no use maxItemNum for labeled-value metric, etc. Add Azure Functions logo. Support search Endpoint use keyword params in trace view. Add a function which show the statistics information during the trace query. Remove the sort button at the column of Type in the trace statistics page. Optimize the APISIX icon in the topology. Implement metrics templates in the topology. Visualize Events on the alarm page. Update duration steps in graphs for Trace and Log.  Documentation  Polish k8s monitoring otel-collector configuration example. Print SkyWalking context to logs configuration example. Update doc about metrics v2 APIs.  All issues and pull requests are here\n Find change logs of all versions here.\n","title":"8.6.0","url":"/docs/main/v9.6.0/en/changes/changes-8.6.0/"},{"content":"8.6.0 Project  Add OpenSearch as storage option. Upgrade Kubernetes Java client dependency to 11.0. Fix plugin test script error in macOS.  Java Agent  Add trace_segment_ref_limit_per_span configuration mechanism to avoid OOM. Improve GlobalIdGenerator performance. Add an agent plugin to support elasticsearch7. Add jsonrpc4j agent plugin. new options to support multi skywalking cluster use same kafka cluster(plugin.kafka.namespace) resolve agent has no retries if connect kafka cluster failed when bootstrap Add Seata in the component definition. Seata plugin hosts on Seata project. Extended Kafka plugin to properly trace consumers that have topic partitions directly assigned. Support Kafka consumer 2.8.0. Support print SkyWalking context to logs. Add MessageListener enhancement in pulsar plugin. fix a bug that spring-mvc set an error endpoint name if the controller class annotation implements an interface. Add an optional agent plugin to support mybatis. Add spring-cloud-gateway-3.x optional plugin. Add okhttp-4.x plugin. Fix NPE when thrift field is nested in plugin thrift Fix possible NullPointerException in agent\u0026rsquo;s ES plugin. Fix the conversion problem of float type in ConfigInitializer. Fixed part of the dynamic configuration of ConfigurationDiscoveryService that does not take effect under certain circumstances. Introduce method interceptor API v2 Fix ClassCast issue for RequestHolder/ResponseHolder. fixed jdk-threading-plugin memory leak. Optimize multiple field reflection operation in Feign plugin. Fix trace-ignore-plugin TraceIgnorePathPatterns can\u0026rsquo;t set empty value  OAP-Backend  BugFix: filter invalid Envoy access logs whose socket address is empty. Fix K8s monitoring the incorrect metrics calculate. Loop alarm into event system. Support alarm tags. Support WeLink as a channel of alarm notification. Fix: Some defensive codes didn\u0026rsquo;t work in PercentileFunction combine. CVE: fix Jetty vulnerability. https://nvd.nist.gov/vuln/detail/CVE-2019-17638 Fix: MAL function would miss samples name after creating new samples. perf: use iterator.remove() to remove modulesWithoutProvider Support analyzing Envoy TCP access logs and persist error TCP logs. Fix: Envoy error logs are not persisted when no metrics are generated Fix: Memory leakage of low version etcd client. fix-issue Allow multiple definitions as fallback in metadata-service-mapping.yaml file and k8sServiceNameRule. Fix: NPE when configmap has no data. Fix: Dynamic Configuration key slowTraceSegmentThreshold not work Fix: != is not supported in oal when parameters are numbers. Include events of the entity(s) in the alarm. Support native-json format log in kafka-fetcher-plugin. Fix counter misuse in the alarm core. Alarm can\u0026rsquo;t be triggered in time. Events can be configured as alarm source. Make the number of core worker in meter converter thread pool configurable. Add HTTP implementation of logs reporting protocol. Make metrics exporter still work even when storage layer failed. Fix Jetty HTTP TRACE issue, disable HTTP methods except POST. CVE: upgrade snakeyaml to prevent billion laughs attack in dynamic configuration. polish debug logging avoids null value when the segment ignored.  UI  Add logo for kong plugin. Add apisix logo. Refactor js to ts for browser logs and style change. When creating service groups in the topology, it is better if the service names are sorted. Add tooltip for dashboard component. Fix style of endpoint dependency. Support search and visualize alarms with tags. Fix configurations on dashboard. Support to configure the maximum number of displayed items. After changing the durationTime, the topology shows the originally selected group or service. remove the no use maxItemNum for labeled-value metric, etc. Add Azure Functions logo. Support search Endpoint use keyword params in trace view. Add a function which show the statistics information during the trace query. Remove the sort button at the column of Type in the trace statistics page. Optimize the APISIX icon in the topology. Implement metrics templates in the topology. Visualize Events on the alarm page. Update duration steps in graphs for Trace and Log.  Documentation  Polish k8s monitoring otel-collector configuration example. Print SkyWalking context to logs configuration example. Update doc about metrics v2 APIs.  All issues and pull requests are here\n Find change logs of all versions here.\n","title":"8.6.0","url":"/docs/main/v9.7.0/en/changes/changes-8.6.0/"},{"content":"8.7.0 Project  Extract dependency management to a bom. Add JDK 16 to test matrix. DataCarrier consumer add a new event notification, call nothingToConsume method if the queue has no element to consume. Build and push snapshot Docker images to GitHub Container Registry, this is only for people who want to help to test the master branch codes, please don\u0026rsquo;t use in production environments.  Java Agent  Supports modifying span attributes in async mode. Agent supports the collection of JVM arguments and jar dependency information. [Temporary] Support authentication for log report channel. This feature and grpc channel is going to be removed after Satellite 0.2.0 release. Remove deprecated gRPC method, io.grpc.ManagedChannelBuilder#nameResolverFactory. See gRPC-java 7133 for more details. Add Neo4j-4.x plugin. Correct profile.duration to profile.max_duration in the default agent.config file. Fix the response time of gRPC. Support parameter collection for SqlServer. Add ShardingSphere-5.0.0-beta plugin. Fix some method exception error. Fix async finish repeatedly in spring-webflux-5.x-webclient plugin. Add agent plugin to support Sentinel. Move ehcache-2.x plugin as an optional plugin. Support guava-cache plugin. Enhance the compatibility of mysql-8.x-plugin plugin. Support Kafka SASL login module. Fix gateway plugin async finish repeatedly when fallback url configured. Chore: polish methods naming for Spring-Kafka plugins. Remove plugins for ShardingSphere legacy version. Update agent plugin for ElasticJob GA version Remove the logic of generating instance name in KafkaServiceManagementServiceClient class. Improve okhttp plugin performance by optimizing Class.getDeclaredField(). Fix GRPCLogClientAppender no context warning. Fix spring-webflux-5.x-webclient-plugin NPE.  OAP-Backend  Disable Spring sleuth meter analyzer by default. Only count 5xx as error in Envoy ALS receiver. Upgrade apollo core caused by CVE-2020-15170. Upgrade kubernetes client caused by CVE-2020-28052. Upgrade Elasticsearch 7 client caused by CVE-2020-7014. Upgrade jackson related libs caused by CVE-2018-11307, CVE-2018-14718 ~ CVE-2018-14721, CVE-2018-19360 ~ CVE-2018-19362, CVE-2019-14379, CVE-2019-14540, CVE-2019-14892, CVE-2019-14893, CVE-2019-16335, CVE-2019-16942, CVE-2019-16943, CVE-2019-17267, CVE-2019-17531, CVE-2019-20330, CVE-2020-8840, CVE-2020-9546, CVE-2020-9547, CVE-2020-9548, CVE-2018-12022, CVE-2018-12023, CVE-2019-12086, CVE-2019-14439, CVE-2020-10672, CVE-2020-10673, CVE-2020-10968, CVE-2020-10969, CVE-2020-11111, CVE-2020-11112, CVE-2020-11113, CVE-2020-11619, CVE-2020-11620, CVE-2020-14060, CVE-2020-14061, CVE-2020-14062, CVE-2020-14195, CVE-2020-24616, CVE-2020-24750, CVE-2020-25649, CVE-2020-35490, CVE-2020-35491, CVE-2020-35728 and CVE-2020-36179 ~ CVE-2020-36190. Exclude log4j 1.x caused by CVE-2019-17571. Upgrade log4j 2.x caused by CVE-2020-9488. Upgrade nacos libs caused by CVE-2021-29441 and CVE-2021-29442. Upgrade netty caused by CVE-2019-20444, CVE-2019-20445, CVE-2019-16869, CVE-2020-11612, CVE-2021-21290, CVE-2021-21295 and CVE-2021-21409. Upgrade consul client caused by CVE-2018-1000844, CVE-2018-1000850. Upgrade zookeeper caused by CVE-2019-0201, zookeeper cluster coordinator plugin now requires zookeeper server 3.5+. Upgrade snake yaml caused by CVE-2017-18640. Upgrade embed tomcat caused by CVE-2020-13935. Upgrade commons-lang3 to avoid potential NPE in some JDK versions. OAL supports generating metrics from events. Support endpoint name grouping by OpenAPI definitions. Concurrent create PrepareRequest when persist Metrics Fix CounterWindow increase computing issue. Performance: optimize Envoy ALS analyzer performance in high traffic load scenario (reduce ~1cpu in ~10k RPS). Performance: trim useless metadata fields in Envoy ALS metadata to improve performance. Fix: slowDBAccessThreshold dynamic config error when not configured. Performance: cache regex pattern and result, optimize string concatenation in Envy ALS analyzer. Performance: cache metrics id and entity id in Metrics and ISource. Performance: enhance persistent session mechanism, about differentiating cache timeout for different dimensionality metrics. The timeout of the cache for minute and hour level metrics has been prolonged to ~5 min. Performance: Add L1 aggregation flush period, which reduce the CPU load and help young GC. Support connectTimeout and socketTimeout settings for ElasticSearch6 and ElasticSearch7 storages. Re-implement storage session mechanism, cached metrics are removed only according to their last access timestamp, rather than first time. This makes sure hot data never gets removed unexpectedly. Support session expired threshold configurable. Fix InfluxDB storage-plugin Metrics#multiGet issue. Replace zuul proxy with spring cloud gateway 2.x. in webapp module. Upgrade etcd cluster coordinator and dynamic configuration to v3.x. Configuration: Allow configuring server maximum request header size and ES index template order. Add thread state metric and class loaded info metric to JVMMetric. Performance: compile LAL DSL statically and run with type checked. Add pagination to event query protocol. Performance: optimize Envoy error logs persistence performance. Support envoy cluster manager metrics. Performance: remove the synchronous persistence mechanism from batch ElasticSearch DAO. Because the current enhanced persistent session mechanism, don\u0026rsquo;t require the data queryable immediately after the insert and update anymore. Performance: share flushInterval setting for both metrics and record data, due to synchronous persistence mechanism removed. Record flush interval used to be hardcoded as 10s. Remove syncBulkActions in ElasticSearch storage option. Increase the default bulkActions(env, SW_STORAGE_ES_BULK_ACTIONS) to 5000(from 1000). Increase the flush interval of ElasticSearch indices to 15s(from 10s) Provide distinct for elements of metadata lists. Due to the more aggressive asynchronous flush, metadata lists have more chances including duplicate elements. Don\u0026rsquo;t need this as indicate anymore. Reduce the flush period of hour and day level metrics, only run in 4 times of regular persistent period. This means default flush period of hour and day level metrics are 25s * 4. Performance: optimize IDs read of ElasticSearch storage options(6 and 7). Use the physical index rather than template alias name. Adjust index refresh period as INT(flushInterval * 2/3), it used to be as same as bulk flush period. At the edge case, in low traffic(traffic \u0026lt; bulkActions in the whole period), there is a possible case, 2 period bulks are included in one index refresh rebuild operation, which could cause version conflicts. And this case can\u0026rsquo;t be fixed through core/persistentPeriod as the bulk fresh is not controlled by the persistent timer anymore. The core/maxSyncOperationNum setting(added in 8.5.0) is removed due to metrics persistence is fully asynchronous. The core/syncThreads setting(added in 8.5.0) is removed due to metrics persistence is fully asynchronous. Optimization: Concurrency mode of execution stage for metrics is removed(added in 8.5.0). Only concurrency of prepare stage is meaningful and kept. Fix -meters metrics topic isn\u0026rsquo;t created with namespace issue Enhance persistent session timeout mechanism. Because the enhanced session could cache the metadata metrics forever, new timeout mechanism is designed for avoiding this specific case. Fix Kafka transport topics are created duplicated with and without namespace issue Fix the persistent session timeout mechanism bug. Fix possible version_conflict_engine_exception in bulk execution. Fix PrometheusMetricConverter may throw an IllegalArgumentException when convert metrics to SampleFamily Filtering NaN value samples when build SampleFamily Add Thread and ClassLoader Metrics for the self-observability and otel-oc-rules Simple optimization of trace sql query statement. Avoid \u0026ldquo;select *\u0026rdquo; query method Introduce dynamical logging to update log configuration at runtime Fix Kubernetes ConfigMap configuration center doesn\u0026rsquo;t send delete event Breaking Change: emove qps and add rpm in LAL  UI  Fix the date component for log conditions. Fix selector keys for duplicate options. Add Python celery plugin. Fix default config for metrics. Fix trace table for profile ui. Fix the error of server response time in the topology. Fix chart types for setting metrics configure. Fix logs pages number. Implement a timeline for Events in a new page. Fix style for event details.  Documentation  Add FAQ about Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Add Self Observability service discovery (k8s). Add sending Envoy Metrics to OAP in envoy 1.19 example and bump up to Envoy V3 api.  All issues and pull requests are here\n","title":"8.7.0","url":"/docs/main/latest/en/changes/changes-8.7.0/"},{"content":"8.7.0 Project  Extract dependency management to a bom. Add JDK 16 to test matrix. DataCarrier consumer add a new event notification, call nothingToConsume method if the queue has no element to consume. Build and push snapshot Docker images to GitHub Container Registry, this is only for people who want to help to test the master branch codes, please don\u0026rsquo;t use in production environments.  Java Agent  Supports modifying span attributes in async mode. Agent supports the collection of JVM arguments and jar dependency information. [Temporary] Support authentication for log report channel. This feature and grpc channel is going to be removed after Satellite 0.2.0 release. Remove deprecated gRPC method, io.grpc.ManagedChannelBuilder#nameResolverFactory. See gRPC-java 7133 for more details. Add Neo4j-4.x plugin. Correct profile.duration to profile.max_duration in the default agent.config file. Fix the response time of gRPC. Support parameter collection for SqlServer. Add ShardingSphere-5.0.0-beta plugin. Fix some method exception error. Fix async finish repeatedly in spring-webflux-5.x-webclient plugin. Add agent plugin to support Sentinel. Move ehcache-2.x plugin as an optional plugin. Support guava-cache plugin. Enhance the compatibility of mysql-8.x-plugin plugin. Support Kafka SASL login module. Fix gateway plugin async finish repeatedly when fallback url configured. Chore: polish methods naming for Spring-Kafka plugins. Remove plugins for ShardingSphere legacy version. Update agent plugin for ElasticJob GA version Remove the logic of generating instance name in KafkaServiceManagementServiceClient class. Improve okhttp plugin performance by optimizing Class.getDeclaredField(). Fix GRPCLogClientAppender no context warning. Fix spring-webflux-5.x-webclient-plugin NPE.  OAP-Backend  Disable Spring sleuth meter analyzer by default. Only count 5xx as error in Envoy ALS receiver. Upgrade apollo core caused by CVE-2020-15170. Upgrade kubernetes client caused by CVE-2020-28052. Upgrade Elasticsearch 7 client caused by CVE-2020-7014. Upgrade jackson related libs caused by CVE-2018-11307, CVE-2018-14718 ~ CVE-2018-14721, CVE-2018-19360 ~ CVE-2018-19362, CVE-2019-14379, CVE-2019-14540, CVE-2019-14892, CVE-2019-14893, CVE-2019-16335, CVE-2019-16942, CVE-2019-16943, CVE-2019-17267, CVE-2019-17531, CVE-2019-20330, CVE-2020-8840, CVE-2020-9546, CVE-2020-9547, CVE-2020-9548, CVE-2018-12022, CVE-2018-12023, CVE-2019-12086, CVE-2019-14439, CVE-2020-10672, CVE-2020-10673, CVE-2020-10968, CVE-2020-10969, CVE-2020-11111, CVE-2020-11112, CVE-2020-11113, CVE-2020-11619, CVE-2020-11620, CVE-2020-14060, CVE-2020-14061, CVE-2020-14062, CVE-2020-14195, CVE-2020-24616, CVE-2020-24750, CVE-2020-25649, CVE-2020-35490, CVE-2020-35491, CVE-2020-35728 and CVE-2020-36179 ~ CVE-2020-36190. Exclude log4j 1.x caused by CVE-2019-17571. Upgrade log4j 2.x caused by CVE-2020-9488. Upgrade nacos libs caused by CVE-2021-29441 and CVE-2021-29442. Upgrade netty caused by CVE-2019-20444, CVE-2019-20445, CVE-2019-16869, CVE-2020-11612, CVE-2021-21290, CVE-2021-21295 and CVE-2021-21409. Upgrade consul client caused by CVE-2018-1000844, CVE-2018-1000850. Upgrade zookeeper caused by CVE-2019-0201, zookeeper cluster coordinator plugin now requires zookeeper server 3.5+. Upgrade snake yaml caused by CVE-2017-18640. Upgrade embed tomcat caused by CVE-2020-13935. Upgrade commons-lang3 to avoid potential NPE in some JDK versions. OAL supports generating metrics from events. Support endpoint name grouping by OpenAPI definitions. Concurrent create PrepareRequest when persist Metrics Fix CounterWindow increase computing issue. Performance: optimize Envoy ALS analyzer performance in high traffic load scenario (reduce ~1cpu in ~10k RPS). Performance: trim useless metadata fields in Envoy ALS metadata to improve performance. Fix: slowDBAccessThreshold dynamic config error when not configured. Performance: cache regex pattern and result, optimize string concatenation in Envy ALS analyzer. Performance: cache metrics id and entity id in Metrics and ISource. Performance: enhance persistent session mechanism, about differentiating cache timeout for different dimensionality metrics. The timeout of the cache for minute and hour level metrics has been prolonged to ~5 min. Performance: Add L1 aggregation flush period, which reduce the CPU load and help young GC. Support connectTimeout and socketTimeout settings for ElasticSearch6 and ElasticSearch7 storages. Re-implement storage session mechanism, cached metrics are removed only according to their last access timestamp, rather than first time. This makes sure hot data never gets removed unexpectedly. Support session expired threshold configurable. Fix InfluxDB storage-plugin Metrics#multiGet issue. Replace zuul proxy with spring cloud gateway 2.x. in webapp module. Upgrade etcd cluster coordinator and dynamic configuration to v3.x. Configuration: Allow configuring server maximum request header size and ES index template order. Add thread state metric and class loaded info metric to JVMMetric. Performance: compile LAL DSL statically and run with type checked. Add pagination to event query protocol. Performance: optimize Envoy error logs persistence performance. Support envoy cluster manager metrics. Performance: remove the synchronous persistence mechanism from batch ElasticSearch DAO. Because the current enhanced persistent session mechanism, don\u0026rsquo;t require the data queryable immediately after the insert and update anymore. Performance: share flushInterval setting for both metrics and record data, due to synchronous persistence mechanism removed. Record flush interval used to be hardcoded as 10s. Remove syncBulkActions in ElasticSearch storage option. Increase the default bulkActions(env, SW_STORAGE_ES_BULK_ACTIONS) to 5000(from 1000). Increase the flush interval of ElasticSearch indices to 15s(from 10s) Provide distinct for elements of metadata lists. Due to the more aggressive asynchronous flush, metadata lists have more chances including duplicate elements. Don\u0026rsquo;t need this as indicate anymore. Reduce the flush period of hour and day level metrics, only run in 4 times of regular persistent period. This means default flush period of hour and day level metrics are 25s * 4. Performance: optimize IDs read of ElasticSearch storage options(6 and 7). Use the physical index rather than template alias name. Adjust index refresh period as INT(flushInterval * 2/3), it used to be as same as bulk flush period. At the edge case, in low traffic(traffic \u0026lt; bulkActions in the whole period), there is a possible case, 2 period bulks are included in one index refresh rebuild operation, which could cause version conflicts. And this case can\u0026rsquo;t be fixed through core/persistentPeriod as the bulk fresh is not controlled by the persistent timer anymore. The core/maxSyncOperationNum setting(added in 8.5.0) is removed due to metrics persistence is fully asynchronous. The core/syncThreads setting(added in 8.5.0) is removed due to metrics persistence is fully asynchronous. Optimization: Concurrency mode of execution stage for metrics is removed(added in 8.5.0). Only concurrency of prepare stage is meaningful and kept. Fix -meters metrics topic isn\u0026rsquo;t created with namespace issue Enhance persistent session timeout mechanism. Because the enhanced session could cache the metadata metrics forever, new timeout mechanism is designed for avoiding this specific case. Fix Kafka transport topics are created duplicated with and without namespace issue Fix the persistent session timeout mechanism bug. Fix possible version_conflict_engine_exception in bulk execution. Fix PrometheusMetricConverter may throw an IllegalArgumentException when convert metrics to SampleFamily Filtering NaN value samples when build SampleFamily Add Thread and ClassLoader Metrics for the self-observability and otel-oc-rules Simple optimization of trace sql query statement. Avoid \u0026ldquo;select *\u0026rdquo; query method Introduce dynamical logging to update log configuration at runtime Fix Kubernetes ConfigMap configuration center doesn\u0026rsquo;t send delete event Breaking Change: emove qps and add rpm in LAL  UI  Fix the date component for log conditions. Fix selector keys for duplicate options. Add Python celery plugin. Fix default config for metrics. Fix trace table for profile ui. Fix the error of server response time in the topology. Fix chart types for setting metrics configure. Fix logs pages number. Implement a timeline for Events in a new page. Fix style for event details.  Documentation  Add FAQ about Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Add Self Observability service discovery (k8s). Add sending Envoy Metrics to OAP in envoy 1.19 example and bump up to Envoy V3 api.  All issues and pull requests are here\n","title":"8.7.0","url":"/docs/main/next/en/changes/changes-8.7.0/"},{"content":"8.7.0 Project  Extract dependency management to a bom. Add JDK 16 to test matrix. DataCarrier consumer add a new event notification, call nothingToConsume method if the queue has no element to consume. Build and push snapshot Docker images to GitHub Container Registry, this is only for people who want to help to test the master branch codes, please don\u0026rsquo;t use in production environments.  Java Agent  Supports modifying span attributes in async mode. Agent supports the collection of JVM arguments and jar dependency information. [Temporary] Support authentication for log report channel. This feature and grpc channel is going to be removed after Satellite 0.2.0 release. Remove deprecated gRPC method, io.grpc.ManagedChannelBuilder#nameResolverFactory. See gRPC-java 7133 for more details. Add Neo4j-4.x plugin. Correct profile.duration to profile.max_duration in the default agent.config file. Fix the response time of gRPC. Support parameter collection for SqlServer. Add ShardingSphere-5.0.0-beta plugin. Fix some method exception error. Fix async finish repeatedly in spring-webflux-5.x-webclient plugin. Add agent plugin to support Sentinel. Move ehcache-2.x plugin as an optional plugin. Support guava-cache plugin. Enhance the compatibility of mysql-8.x-plugin plugin. Support Kafka SASL login module. Fix gateway plugin async finish repeatedly when fallback url configured. Chore: polish methods naming for Spring-Kafka plugins. Remove plugins for ShardingSphere legacy version. Update agent plugin for ElasticJob GA version Remove the logic of generating instance name in KafkaServiceManagementServiceClient class. Improve okhttp plugin performance by optimizing Class.getDeclaredField(). Fix GRPCLogClientAppender no context warning. Fix spring-webflux-5.x-webclient-plugin NPE.  OAP-Backend  Disable Spring sleuth meter analyzer by default. Only count 5xx as error in Envoy ALS receiver. Upgrade apollo core caused by CVE-2020-15170. Upgrade kubernetes client caused by CVE-2020-28052. Upgrade Elasticsearch 7 client caused by CVE-2020-7014. Upgrade jackson related libs caused by CVE-2018-11307, CVE-2018-14718 ~ CVE-2018-14721, CVE-2018-19360 ~ CVE-2018-19362, CVE-2019-14379, CVE-2019-14540, CVE-2019-14892, CVE-2019-14893, CVE-2019-16335, CVE-2019-16942, CVE-2019-16943, CVE-2019-17267, CVE-2019-17531, CVE-2019-20330, CVE-2020-8840, CVE-2020-9546, CVE-2020-9547, CVE-2020-9548, CVE-2018-12022, CVE-2018-12023, CVE-2019-12086, CVE-2019-14439, CVE-2020-10672, CVE-2020-10673, CVE-2020-10968, CVE-2020-10969, CVE-2020-11111, CVE-2020-11112, CVE-2020-11113, CVE-2020-11619, CVE-2020-11620, CVE-2020-14060, CVE-2020-14061, CVE-2020-14062, CVE-2020-14195, CVE-2020-24616, CVE-2020-24750, CVE-2020-25649, CVE-2020-35490, CVE-2020-35491, CVE-2020-35728 and CVE-2020-36179 ~ CVE-2020-36190. Exclude log4j 1.x caused by CVE-2019-17571. Upgrade log4j 2.x caused by CVE-2020-9488. Upgrade nacos libs caused by CVE-2021-29441 and CVE-2021-29442. Upgrade netty caused by CVE-2019-20444, CVE-2019-20445, CVE-2019-16869, CVE-2020-11612, CVE-2021-21290, CVE-2021-21295 and CVE-2021-21409. Upgrade consul client caused by CVE-2018-1000844, CVE-2018-1000850. Upgrade zookeeper caused by CVE-2019-0201, zookeeper cluster coordinator plugin now requires zookeeper server 3.5+. Upgrade snake yaml caused by CVE-2017-18640. Upgrade embed tomcat caused by CVE-2020-13935. Upgrade commons-lang3 to avoid potential NPE in some JDK versions. OAL supports generating metrics from events. Support endpoint name grouping by OpenAPI definitions. Concurrent create PrepareRequest when persist Metrics Fix CounterWindow increase computing issue. Performance: optimize Envoy ALS analyzer performance in high traffic load scenario (reduce ~1cpu in ~10k RPS). Performance: trim useless metadata fields in Envoy ALS metadata to improve performance. Fix: slowDBAccessThreshold dynamic config error when not configured. Performance: cache regex pattern and result, optimize string concatenation in Envy ALS analyzer. Performance: cache metrics id and entity id in Metrics and ISource. Performance: enhance persistent session mechanism, about differentiating cache timeout for different dimensionality metrics. The timeout of the cache for minute and hour level metrics has been prolonged to ~5 min. Performance: Add L1 aggregation flush period, which reduce the CPU load and help young GC. Support connectTimeout and socketTimeout settings for ElasticSearch6 and ElasticSearch7 storages. Re-implement storage session mechanism, cached metrics are removed only according to their last access timestamp, rather than first time. This makes sure hot data never gets removed unexpectedly. Support session expired threshold configurable. Fix InfluxDB storage-plugin Metrics#multiGet issue. Replace zuul proxy with spring cloud gateway 2.x. in webapp module. Upgrade etcd cluster coordinator and dynamic configuration to v3.x. Configuration: Allow configuring server maximum request header size and ES index template order. Add thread state metric and class loaded info metric to JVMMetric. Performance: compile LAL DSL statically and run with type checked. Add pagination to event query protocol. Performance: optimize Envoy error logs persistence performance. Support envoy cluster manager metrics. Performance: remove the synchronous persistence mechanism from batch ElasticSearch DAO. Because the current enhanced persistent session mechanism, don\u0026rsquo;t require the data queryable immediately after the insert and update anymore. Performance: share flushInterval setting for both metrics and record data, due to synchronous persistence mechanism removed. Record flush interval used to be hardcoded as 10s. Remove syncBulkActions in ElasticSearch storage option. Increase the default bulkActions(env, SW_STORAGE_ES_BULK_ACTIONS) to 5000(from 1000). Increase the flush interval of ElasticSearch indices to 15s(from 10s) Provide distinct for elements of metadata lists. Due to the more aggressive asynchronous flush, metadata lists have more chances including duplicate elements. Don\u0026rsquo;t need this as indicate anymore. Reduce the flush period of hour and day level metrics, only run in 4 times of regular persistent period. This means default flush period of hour and day level metrics are 25s * 4. Performance: optimize IDs read of ElasticSearch storage options(6 and 7). Use the physical index rather than template alias name. Adjust index refresh period as INT(flushInterval * 2/3), it used to be as same as bulk flush period. At the edge case, in low traffic(traffic \u0026lt; bulkActions in the whole period), there is a possible case, 2 period bulks are included in one index refresh rebuild operation, which could cause version conflicts. And this case can\u0026rsquo;t be fixed through core/persistentPeriod as the bulk fresh is not controlled by the persistent timer anymore. The core/maxSyncOperationNum setting(added in 8.5.0) is removed due to metrics persistence is fully asynchronous. The core/syncThreads setting(added in 8.5.0) is removed due to metrics persistence is fully asynchronous. Optimization: Concurrency mode of execution stage for metrics is removed(added in 8.5.0). Only concurrency of prepare stage is meaningful and kept. Fix -meters metrics topic isn\u0026rsquo;t created with namespace issue Enhance persistent session timeout mechanism. Because the enhanced session could cache the metadata metrics forever, new timeout mechanism is designed for avoiding this specific case. Fix Kafka transport topics are created duplicated with and without namespace issue Fix the persistent session timeout mechanism bug. Fix possible version_conflict_engine_exception in bulk execution. Fix PrometheusMetricConverter may throw an IllegalArgumentException when convert metrics to SampleFamily Filtering NaN value samples when build SampleFamily Add Thread and ClassLoader Metrics for the self-observability and otel-oc-rules Simple optimization of trace sql query statement. Avoid \u0026ldquo;select *\u0026rdquo; query method Introduce dynamical logging to update log configuration at runtime Fix Kubernetes ConfigMap configuration center doesn\u0026rsquo;t send delete event Breaking Change: emove qps and add rpm in LAL  UI  Fix the date component for log conditions. Fix selector keys for duplicate options. Add Python celery plugin. Fix default config for metrics. Fix trace table for profile ui. Fix the error of server response time in the topology. Fix chart types for setting metrics configure. Fix logs pages number. Implement a timeline for Events in a new page. Fix style for event details.  Documentation  Add FAQ about Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Add Self Observability service discovery (k8s). Add sending Envoy Metrics to OAP in envoy 1.19 example and bump up to Envoy V3 api.  All issues and pull requests are here\n","title":"8.7.0","url":"/docs/main/v9.1.0/en/changes/changes-8.7.0/"},{"content":"8.7.0 Project  Extract dependency management to a bom. Add JDK 16 to test matrix. DataCarrier consumer add a new event notification, call nothingToConsume method if the queue has no element to consume. Build and push snapshot Docker images to GitHub Container Registry, this is only for people who want to help to test the master branch codes, please don\u0026rsquo;t use in production environments.  Java Agent  Supports modifying span attributes in async mode. Agent supports the collection of JVM arguments and jar dependency information. [Temporary] Support authentication for log report channel. This feature and grpc channel is going to be removed after Satellite 0.2.0 release. Remove deprecated gRPC method, io.grpc.ManagedChannelBuilder#nameResolverFactory. See gRPC-java 7133 for more details. Add Neo4j-4.x plugin. Correct profile.duration to profile.max_duration in the default agent.config file. Fix the response time of gRPC. Support parameter collection for SqlServer. Add ShardingSphere-5.0.0-beta plugin. Fix some method exception error. Fix async finish repeatedly in spring-webflux-5.x-webclient plugin. Add agent plugin to support Sentinel. Move ehcache-2.x plugin as an optional plugin. Support guava-cache plugin. Enhance the compatibility of mysql-8.x-plugin plugin. Support Kafka SASL login module. Fix gateway plugin async finish repeatedly when fallback url configured. Chore: polish methods naming for Spring-Kafka plugins. Remove plugins for ShardingSphere legacy version. Update agent plugin for ElasticJob GA version Remove the logic of generating instance name in KafkaServiceManagementServiceClient class. Improve okhttp plugin performance by optimizing Class.getDeclaredField(). Fix GRPCLogClientAppender no context warning. Fix spring-webflux-5.x-webclient-plugin NPE.  OAP-Backend  Disable Spring sleuth meter analyzer by default. Only count 5xx as error in Envoy ALS receiver. Upgrade apollo core caused by CVE-2020-15170. Upgrade kubernetes client caused by CVE-2020-28052. Upgrade Elasticsearch 7 client caused by CVE-2020-7014. Upgrade jackson related libs caused by CVE-2018-11307, CVE-2018-14718 ~ CVE-2018-14721, CVE-2018-19360 ~ CVE-2018-19362, CVE-2019-14379, CVE-2019-14540, CVE-2019-14892, CVE-2019-14893, CVE-2019-16335, CVE-2019-16942, CVE-2019-16943, CVE-2019-17267, CVE-2019-17531, CVE-2019-20330, CVE-2020-8840, CVE-2020-9546, CVE-2020-9547, CVE-2020-9548, CVE-2018-12022, CVE-2018-12023, CVE-2019-12086, CVE-2019-14439, CVE-2020-10672, CVE-2020-10673, CVE-2020-10968, CVE-2020-10969, CVE-2020-11111, CVE-2020-11112, CVE-2020-11113, CVE-2020-11619, CVE-2020-11620, CVE-2020-14060, CVE-2020-14061, CVE-2020-14062, CVE-2020-14195, CVE-2020-24616, CVE-2020-24750, CVE-2020-25649, CVE-2020-35490, CVE-2020-35491, CVE-2020-35728 and CVE-2020-36179 ~ CVE-2020-36190. Exclude log4j 1.x caused by CVE-2019-17571. Upgrade log4j 2.x caused by CVE-2020-9488. Upgrade nacos libs caused by CVE-2021-29441 and CVE-2021-29442. Upgrade netty caused by CVE-2019-20444, CVE-2019-20445, CVE-2019-16869, CVE-2020-11612, CVE-2021-21290, CVE-2021-21295 and CVE-2021-21409. Upgrade consul client caused by CVE-2018-1000844, CVE-2018-1000850. Upgrade zookeeper caused by CVE-2019-0201, zookeeper cluster coordinator plugin now requires zookeeper server 3.5+. Upgrade snake yaml caused by CVE-2017-18640. Upgrade embed tomcat caused by CVE-2020-13935. Upgrade commons-lang3 to avoid potential NPE in some JDK versions. OAL supports generating metrics from events. Support endpoint name grouping by OpenAPI definitions. Concurrent create PrepareRequest when persist Metrics Fix CounterWindow increase computing issue. Performance: optimize Envoy ALS analyzer performance in high traffic load scenario (reduce ~1cpu in ~10k RPS). Performance: trim useless metadata fields in Envoy ALS metadata to improve performance. Fix: slowDBAccessThreshold dynamic config error when not configured. Performance: cache regex pattern and result, optimize string concatenation in Envy ALS analyzer. Performance: cache metrics id and entity id in Metrics and ISource. Performance: enhance persistent session mechanism, about differentiating cache timeout for different dimensionality metrics. The timeout of the cache for minute and hour level metrics has been prolonged to ~5 min. Performance: Add L1 aggregation flush period, which reduce the CPU load and help young GC. Support connectTimeout and socketTimeout settings for ElasticSearch6 and ElasticSearch7 storages. Re-implement storage session mechanism, cached metrics are removed only according to their last access timestamp, rather than first time. This makes sure hot data never gets removed unexpectedly. Support session expired threshold configurable. Fix InfluxDB storage-plugin Metrics#multiGet issue. Replace zuul proxy with spring cloud gateway 2.x. in webapp module. Upgrade etcd cluster coordinator and dynamic configuration to v3.x. Configuration: Allow configuring server maximum request header size and ES index template order. Add thread state metric and class loaded info metric to JVMMetric. Performance: compile LAL DSL statically and run with type checked. Add pagination to event query protocol. Performance: optimize Envoy error logs persistence performance. Support envoy cluster manager metrics. Performance: remove the synchronous persistence mechanism from batch ElasticSearch DAO. Because the current enhanced persistent session mechanism, don\u0026rsquo;t require the data queryable immediately after the insert and update anymore. Performance: share flushInterval setting for both metrics and record data, due to synchronous persistence mechanism removed. Record flush interval used to be hardcoded as 10s. Remove syncBulkActions in ElasticSearch storage option. Increase the default bulkActions(env, SW_STORAGE_ES_BULK_ACTIONS) to 5000(from 1000). Increase the flush interval of ElasticSearch indices to 15s(from 10s) Provide distinct for elements of metadata lists. Due to the more aggressive asynchronous flush, metadata lists have more chances including duplicate elements. Don\u0026rsquo;t need this as indicate anymore. Reduce the flush period of hour and day level metrics, only run in 4 times of regular persistent period. This means default flush period of hour and day level metrics are 25s * 4. Performance: optimize IDs read of ElasticSearch storage options(6 and 7). Use the physical index rather than template alias name. Adjust index refresh period as INT(flushInterval * 2/3), it used to be as same as bulk flush period. At the edge case, in low traffic(traffic \u0026lt; bulkActions in the whole period), there is a possible case, 2 period bulks are included in one index refresh rebuild operation, which could cause version conflicts. And this case can\u0026rsquo;t be fixed through core/persistentPeriod as the bulk fresh is not controlled by the persistent timer anymore. The core/maxSyncOperationNum setting(added in 8.5.0) is removed due to metrics persistence is fully asynchronous. The core/syncThreads setting(added in 8.5.0) is removed due to metrics persistence is fully asynchronous. Optimization: Concurrency mode of execution stage for metrics is removed(added in 8.5.0). Only concurrency of prepare stage is meaningful and kept. Fix -meters metrics topic isn\u0026rsquo;t created with namespace issue Enhance persistent session timeout mechanism. Because the enhanced session could cache the metadata metrics forever, new timeout mechanism is designed for avoiding this specific case. Fix Kafka transport topics are created duplicated with and without namespace issue Fix the persistent session timeout mechanism bug. Fix possible version_conflict_engine_exception in bulk execution. Fix PrometheusMetricConverter may throw an IllegalArgumentException when convert metrics to SampleFamily Filtering NaN value samples when build SampleFamily Add Thread and ClassLoader Metrics for the self-observability and otel-oc-rules Simple optimization of trace sql query statement. Avoid \u0026ldquo;select *\u0026rdquo; query method Introduce dynamical logging to update log configuration at runtime Fix Kubernetes ConfigMap configuration center doesn\u0026rsquo;t send delete event Breaking Change: emove qps and add rpm in LAL  UI  Fix the date component for log conditions. Fix selector keys for duplicate options. Add Python celery plugin. Fix default config for metrics. Fix trace table for profile ui. Fix the error of server response time in the topology. Fix chart types for setting metrics configure. Fix logs pages number. Implement a timeline for Events in a new page. Fix style for event details.  Documentation  Add FAQ about Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Add Self Observability service discovery (k8s). Add sending Envoy Metrics to OAP in envoy 1.19 example and bump up to Envoy V3 api.  All issues and pull requests are here\n","title":"8.7.0","url":"/docs/main/v9.2.0/en/changes/changes-8.7.0/"},{"content":"8.7.0 Project  Extract dependency management to a bom. Add JDK 16 to test matrix. DataCarrier consumer add a new event notification, call nothingToConsume method if the queue has no element to consume. Build and push snapshot Docker images to GitHub Container Registry, this is only for people who want to help to test the master branch codes, please don\u0026rsquo;t use in production environments.  Java Agent  Supports modifying span attributes in async mode. Agent supports the collection of JVM arguments and jar dependency information. [Temporary] Support authentication for log report channel. This feature and grpc channel is going to be removed after Satellite 0.2.0 release. Remove deprecated gRPC method, io.grpc.ManagedChannelBuilder#nameResolverFactory. See gRPC-java 7133 for more details. Add Neo4j-4.x plugin. Correct profile.duration to profile.max_duration in the default agent.config file. Fix the response time of gRPC. Support parameter collection for SqlServer. Add ShardingSphere-5.0.0-beta plugin. Fix some method exception error. Fix async finish repeatedly in spring-webflux-5.x-webclient plugin. Add agent plugin to support Sentinel. Move ehcache-2.x plugin as an optional plugin. Support guava-cache plugin. Enhance the compatibility of mysql-8.x-plugin plugin. Support Kafka SASL login module. Fix gateway plugin async finish repeatedly when fallback url configured. Chore: polish methods naming for Spring-Kafka plugins. Remove plugins for ShardingSphere legacy version. Update agent plugin for ElasticJob GA version Remove the logic of generating instance name in KafkaServiceManagementServiceClient class. Improve okhttp plugin performance by optimizing Class.getDeclaredField(). Fix GRPCLogClientAppender no context warning. Fix spring-webflux-5.x-webclient-plugin NPE.  OAP-Backend  Disable Spring sleuth meter analyzer by default. Only count 5xx as error in Envoy ALS receiver. Upgrade apollo core caused by CVE-2020-15170. Upgrade kubernetes client caused by CVE-2020-28052. Upgrade Elasticsearch 7 client caused by CVE-2020-7014. Upgrade jackson related libs caused by CVE-2018-11307, CVE-2018-14718 ~ CVE-2018-14721, CVE-2018-19360 ~ CVE-2018-19362, CVE-2019-14379, CVE-2019-14540, CVE-2019-14892, CVE-2019-14893, CVE-2019-16335, CVE-2019-16942, CVE-2019-16943, CVE-2019-17267, CVE-2019-17531, CVE-2019-20330, CVE-2020-8840, CVE-2020-9546, CVE-2020-9547, CVE-2020-9548, CVE-2018-12022, CVE-2018-12023, CVE-2019-12086, CVE-2019-14439, CVE-2020-10672, CVE-2020-10673, CVE-2020-10968, CVE-2020-10969, CVE-2020-11111, CVE-2020-11112, CVE-2020-11113, CVE-2020-11619, CVE-2020-11620, CVE-2020-14060, CVE-2020-14061, CVE-2020-14062, CVE-2020-14195, CVE-2020-24616, CVE-2020-24750, CVE-2020-25649, CVE-2020-35490, CVE-2020-35491, CVE-2020-35728 and CVE-2020-36179 ~ CVE-2020-36190. Exclude log4j 1.x caused by CVE-2019-17571. Upgrade log4j 2.x caused by CVE-2020-9488. Upgrade nacos libs caused by CVE-2021-29441 and CVE-2021-29442. Upgrade netty caused by CVE-2019-20444, CVE-2019-20445, CVE-2019-16869, CVE-2020-11612, CVE-2021-21290, CVE-2021-21295 and CVE-2021-21409. Upgrade consul client caused by CVE-2018-1000844, CVE-2018-1000850. Upgrade zookeeper caused by CVE-2019-0201, zookeeper cluster coordinator plugin now requires zookeeper server 3.5+. Upgrade snake yaml caused by CVE-2017-18640. Upgrade embed tomcat caused by CVE-2020-13935. Upgrade commons-lang3 to avoid potential NPE in some JDK versions. OAL supports generating metrics from events. Support endpoint name grouping by OpenAPI definitions. Concurrent create PrepareRequest when persist Metrics Fix CounterWindow increase computing issue. Performance: optimize Envoy ALS analyzer performance in high traffic load scenario (reduce ~1cpu in ~10k RPS). Performance: trim useless metadata fields in Envoy ALS metadata to improve performance. Fix: slowDBAccessThreshold dynamic config error when not configured. Performance: cache regex pattern and result, optimize string concatenation in Envy ALS analyzer. Performance: cache metrics id and entity id in Metrics and ISource. Performance: enhance persistent session mechanism, about differentiating cache timeout for different dimensionality metrics. The timeout of the cache for minute and hour level metrics has been prolonged to ~5 min. Performance: Add L1 aggregation flush period, which reduce the CPU load and help young GC. Support connectTimeout and socketTimeout settings for ElasticSearch6 and ElasticSearch7 storages. Re-implement storage session mechanism, cached metrics are removed only according to their last access timestamp, rather than first time. This makes sure hot data never gets removed unexpectedly. Support session expired threshold configurable. Fix InfluxDB storage-plugin Metrics#multiGet issue. Replace zuul proxy with spring cloud gateway 2.x. in webapp module. Upgrade etcd cluster coordinator and dynamic configuration to v3.x. Configuration: Allow configuring server maximum request header size and ES index template order. Add thread state metric and class loaded info metric to JVMMetric. Performance: compile LAL DSL statically and run with type checked. Add pagination to event query protocol. Performance: optimize Envoy error logs persistence performance. Support envoy cluster manager metrics. Performance: remove the synchronous persistence mechanism from batch ElasticSearch DAO. Because the current enhanced persistent session mechanism, don\u0026rsquo;t require the data queryable immediately after the insert and update anymore. Performance: share flushInterval setting for both metrics and record data, due to synchronous persistence mechanism removed. Record flush interval used to be hardcoded as 10s. Remove syncBulkActions in ElasticSearch storage option. Increase the default bulkActions(env, SW_STORAGE_ES_BULK_ACTIONS) to 5000(from 1000). Increase the flush interval of ElasticSearch indices to 15s(from 10s) Provide distinct for elements of metadata lists. Due to the more aggressive asynchronous flush, metadata lists have more chances including duplicate elements. Don\u0026rsquo;t need this as indicate anymore. Reduce the flush period of hour and day level metrics, only run in 4 times of regular persistent period. This means default flush period of hour and day level metrics are 25s * 4. Performance: optimize IDs read of ElasticSearch storage options(6 and 7). Use the physical index rather than template alias name. Adjust index refresh period as INT(flushInterval * 2/3), it used to be as same as bulk flush period. At the edge case, in low traffic(traffic \u0026lt; bulkActions in the whole period), there is a possible case, 2 period bulks are included in one index refresh rebuild operation, which could cause version conflicts. And this case can\u0026rsquo;t be fixed through core/persistentPeriod as the bulk fresh is not controlled by the persistent timer anymore. The core/maxSyncOperationNum setting(added in 8.5.0) is removed due to metrics persistence is fully asynchronous. The core/syncThreads setting(added in 8.5.0) is removed due to metrics persistence is fully asynchronous. Optimization: Concurrency mode of execution stage for metrics is removed(added in 8.5.0). Only concurrency of prepare stage is meaningful and kept. Fix -meters metrics topic isn\u0026rsquo;t created with namespace issue Enhance persistent session timeout mechanism. Because the enhanced session could cache the metadata metrics forever, new timeout mechanism is designed for avoiding this specific case. Fix Kafka transport topics are created duplicated with and without namespace issue Fix the persistent session timeout mechanism bug. Fix possible version_conflict_engine_exception in bulk execution. Fix PrometheusMetricConverter may throw an IllegalArgumentException when convert metrics to SampleFamily Filtering NaN value samples when build SampleFamily Add Thread and ClassLoader Metrics for the self-observability and otel-oc-rules Simple optimization of trace sql query statement. Avoid \u0026ldquo;select *\u0026rdquo; query method Introduce dynamical logging to update log configuration at runtime Fix Kubernetes ConfigMap configuration center doesn\u0026rsquo;t send delete event Breaking Change: emove qps and add rpm in LAL  UI  Fix the date component for log conditions. Fix selector keys for duplicate options. Add Python celery plugin. Fix default config for metrics. Fix trace table for profile ui. Fix the error of server response time in the topology. Fix chart types for setting metrics configure. Fix logs pages number. Implement a timeline for Events in a new page. Fix style for event details.  Documentation  Add FAQ about Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Add Self Observability service discovery (k8s). Add sending Envoy Metrics to OAP in envoy 1.19 example and bump up to Envoy V3 api.  All issues and pull requests are here\n","title":"8.7.0","url":"/docs/main/v9.3.0/en/changes/changes-8.7.0/"},{"content":"8.7.0 Project  Extract dependency management to a bom. Add JDK 16 to test matrix. DataCarrier consumer add a new event notification, call nothingToConsume method if the queue has no element to consume. Build and push snapshot Docker images to GitHub Container Registry, this is only for people who want to help to test the master branch codes, please don\u0026rsquo;t use in production environments.  Java Agent  Supports modifying span attributes in async mode. Agent supports the collection of JVM arguments and jar dependency information. [Temporary] Support authentication for log report channel. This feature and grpc channel is going to be removed after Satellite 0.2.0 release. Remove deprecated gRPC method, io.grpc.ManagedChannelBuilder#nameResolverFactory. See gRPC-java 7133 for more details. Add Neo4j-4.x plugin. Correct profile.duration to profile.max_duration in the default agent.config file. Fix the response time of gRPC. Support parameter collection for SqlServer. Add ShardingSphere-5.0.0-beta plugin. Fix some method exception error. Fix async finish repeatedly in spring-webflux-5.x-webclient plugin. Add agent plugin to support Sentinel. Move ehcache-2.x plugin as an optional plugin. Support guava-cache plugin. Enhance the compatibility of mysql-8.x-plugin plugin. Support Kafka SASL login module. Fix gateway plugin async finish repeatedly when fallback url configured. Chore: polish methods naming for Spring-Kafka plugins. Remove plugins for ShardingSphere legacy version. Update agent plugin for ElasticJob GA version Remove the logic of generating instance name in KafkaServiceManagementServiceClient class. Improve okhttp plugin performance by optimizing Class.getDeclaredField(). Fix GRPCLogClientAppender no context warning. Fix spring-webflux-5.x-webclient-plugin NPE.  OAP-Backend  Disable Spring sleuth meter analyzer by default. Only count 5xx as error in Envoy ALS receiver. Upgrade apollo core caused by CVE-2020-15170. Upgrade kubernetes client caused by CVE-2020-28052. Upgrade Elasticsearch 7 client caused by CVE-2020-7014. Upgrade jackson related libs caused by CVE-2018-11307, CVE-2018-14718 ~ CVE-2018-14721, CVE-2018-19360 ~ CVE-2018-19362, CVE-2019-14379, CVE-2019-14540, CVE-2019-14892, CVE-2019-14893, CVE-2019-16335, CVE-2019-16942, CVE-2019-16943, CVE-2019-17267, CVE-2019-17531, CVE-2019-20330, CVE-2020-8840, CVE-2020-9546, CVE-2020-9547, CVE-2020-9548, CVE-2018-12022, CVE-2018-12023, CVE-2019-12086, CVE-2019-14439, CVE-2020-10672, CVE-2020-10673, CVE-2020-10968, CVE-2020-10969, CVE-2020-11111, CVE-2020-11112, CVE-2020-11113, CVE-2020-11619, CVE-2020-11620, CVE-2020-14060, CVE-2020-14061, CVE-2020-14062, CVE-2020-14195, CVE-2020-24616, CVE-2020-24750, CVE-2020-25649, CVE-2020-35490, CVE-2020-35491, CVE-2020-35728 and CVE-2020-36179 ~ CVE-2020-36190. Exclude log4j 1.x caused by CVE-2019-17571. Upgrade log4j 2.x caused by CVE-2020-9488. Upgrade nacos libs caused by CVE-2021-29441 and CVE-2021-29442. Upgrade netty caused by CVE-2019-20444, CVE-2019-20445, CVE-2019-16869, CVE-2020-11612, CVE-2021-21290, CVE-2021-21295 and CVE-2021-21409. Upgrade consul client caused by CVE-2018-1000844, CVE-2018-1000850. Upgrade zookeeper caused by CVE-2019-0201, zookeeper cluster coordinator plugin now requires zookeeper server 3.5+. Upgrade snake yaml caused by CVE-2017-18640. Upgrade embed tomcat caused by CVE-2020-13935. Upgrade commons-lang3 to avoid potential NPE in some JDK versions. OAL supports generating metrics from events. Support endpoint name grouping by OpenAPI definitions. Concurrent create PrepareRequest when persist Metrics Fix CounterWindow increase computing issue. Performance: optimize Envoy ALS analyzer performance in high traffic load scenario (reduce ~1cpu in ~10k RPS). Performance: trim useless metadata fields in Envoy ALS metadata to improve performance. Fix: slowDBAccessThreshold dynamic config error when not configured. Performance: cache regex pattern and result, optimize string concatenation in Envy ALS analyzer. Performance: cache metrics id and entity id in Metrics and ISource. Performance: enhance persistent session mechanism, about differentiating cache timeout for different dimensionality metrics. The timeout of the cache for minute and hour level metrics has been prolonged to ~5 min. Performance: Add L1 aggregation flush period, which reduce the CPU load and help young GC. Support connectTimeout and socketTimeout settings for ElasticSearch6 and ElasticSearch7 storages. Re-implement storage session mechanism, cached metrics are removed only according to their last access timestamp, rather than first time. This makes sure hot data never gets removed unexpectedly. Support session expired threshold configurable. Fix InfluxDB storage-plugin Metrics#multiGet issue. Replace zuul proxy with spring cloud gateway 2.x. in webapp module. Upgrade etcd cluster coordinator and dynamic configuration to v3.x. Configuration: Allow configuring server maximum request header size and ES index template order. Add thread state metric and class loaded info metric to JVMMetric. Performance: compile LAL DSL statically and run with type checked. Add pagination to event query protocol. Performance: optimize Envoy error logs persistence performance. Support envoy cluster manager metrics. Performance: remove the synchronous persistence mechanism from batch ElasticSearch DAO. Because the current enhanced persistent session mechanism, don\u0026rsquo;t require the data queryable immediately after the insert and update anymore. Performance: share flushInterval setting for both metrics and record data, due to synchronous persistence mechanism removed. Record flush interval used to be hardcoded as 10s. Remove syncBulkActions in ElasticSearch storage option. Increase the default bulkActions(env, SW_STORAGE_ES_BULK_ACTIONS) to 5000(from 1000). Increase the flush interval of ElasticSearch indices to 15s(from 10s) Provide distinct for elements of metadata lists. Due to the more aggressive asynchronous flush, metadata lists have more chances including duplicate elements. Don\u0026rsquo;t need this as indicate anymore. Reduce the flush period of hour and day level metrics, only run in 4 times of regular persistent period. This means default flush period of hour and day level metrics are 25s * 4. Performance: optimize IDs read of ElasticSearch storage options(6 and 7). Use the physical index rather than template alias name. Adjust index refresh period as INT(flushInterval * 2/3), it used to be as same as bulk flush period. At the edge case, in low traffic(traffic \u0026lt; bulkActions in the whole period), there is a possible case, 2 period bulks are included in one index refresh rebuild operation, which could cause version conflicts. And this case can\u0026rsquo;t be fixed through core/persistentPeriod as the bulk fresh is not controlled by the persistent timer anymore. The core/maxSyncOperationNum setting(added in 8.5.0) is removed due to metrics persistence is fully asynchronous. The core/syncThreads setting(added in 8.5.0) is removed due to metrics persistence is fully asynchronous. Optimization: Concurrency mode of execution stage for metrics is removed(added in 8.5.0). Only concurrency of prepare stage is meaningful and kept. Fix -meters metrics topic isn\u0026rsquo;t created with namespace issue Enhance persistent session timeout mechanism. Because the enhanced session could cache the metadata metrics forever, new timeout mechanism is designed for avoiding this specific case. Fix Kafka transport topics are created duplicated with and without namespace issue Fix the persistent session timeout mechanism bug. Fix possible version_conflict_engine_exception in bulk execution. Fix PrometheusMetricConverter may throw an IllegalArgumentException when convert metrics to SampleFamily Filtering NaN value samples when build SampleFamily Add Thread and ClassLoader Metrics for the self-observability and otel-oc-rules Simple optimization of trace sql query statement. Avoid \u0026ldquo;select *\u0026rdquo; query method Introduce dynamical logging to update log configuration at runtime Fix Kubernetes ConfigMap configuration center doesn\u0026rsquo;t send delete event Breaking Change: emove qps and add rpm in LAL  UI  Fix the date component for log conditions. Fix selector keys for duplicate options. Add Python celery plugin. Fix default config for metrics. Fix trace table for profile ui. Fix the error of server response time in the topology. Fix chart types for setting metrics configure. Fix logs pages number. Implement a timeline for Events in a new page. Fix style for event details.  Documentation  Add FAQ about Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Add Self Observability service discovery (k8s). Add sending Envoy Metrics to OAP in envoy 1.19 example and bump up to Envoy V3 api.  All issues and pull requests are here\n","title":"8.7.0","url":"/docs/main/v9.4.0/en/changes/changes-8.7.0/"},{"content":"8.7.0 Project  Extract dependency management to a bom. Add JDK 16 to test matrix. DataCarrier consumer add a new event notification, call nothingToConsume method if the queue has no element to consume. Build and push snapshot Docker images to GitHub Container Registry, this is only for people who want to help to test the master branch codes, please don\u0026rsquo;t use in production environments.  Java Agent  Supports modifying span attributes in async mode. Agent supports the collection of JVM arguments and jar dependency information. [Temporary] Support authentication for log report channel. This feature and grpc channel is going to be removed after Satellite 0.2.0 release. Remove deprecated gRPC method, io.grpc.ManagedChannelBuilder#nameResolverFactory. See gRPC-java 7133 for more details. Add Neo4j-4.x plugin. Correct profile.duration to profile.max_duration in the default agent.config file. Fix the response time of gRPC. Support parameter collection for SqlServer. Add ShardingSphere-5.0.0-beta plugin. Fix some method exception error. Fix async finish repeatedly in spring-webflux-5.x-webclient plugin. Add agent plugin to support Sentinel. Move ehcache-2.x plugin as an optional plugin. Support guava-cache plugin. Enhance the compatibility of mysql-8.x-plugin plugin. Support Kafka SASL login module. Fix gateway plugin async finish repeatedly when fallback url configured. Chore: polish methods naming for Spring-Kafka plugins. Remove plugins for ShardingSphere legacy version. Update agent plugin for ElasticJob GA version Remove the logic of generating instance name in KafkaServiceManagementServiceClient class. Improve okhttp plugin performance by optimizing Class.getDeclaredField(). Fix GRPCLogClientAppender no context warning. Fix spring-webflux-5.x-webclient-plugin NPE.  OAP-Backend  Disable Spring sleuth meter analyzer by default. Only count 5xx as error in Envoy ALS receiver. Upgrade apollo core caused by CVE-2020-15170. Upgrade kubernetes client caused by CVE-2020-28052. Upgrade Elasticsearch 7 client caused by CVE-2020-7014. Upgrade jackson related libs caused by CVE-2018-11307, CVE-2018-14718 ~ CVE-2018-14721, CVE-2018-19360 ~ CVE-2018-19362, CVE-2019-14379, CVE-2019-14540, CVE-2019-14892, CVE-2019-14893, CVE-2019-16335, CVE-2019-16942, CVE-2019-16943, CVE-2019-17267, CVE-2019-17531, CVE-2019-20330, CVE-2020-8840, CVE-2020-9546, CVE-2020-9547, CVE-2020-9548, CVE-2018-12022, CVE-2018-12023, CVE-2019-12086, CVE-2019-14439, CVE-2020-10672, CVE-2020-10673, CVE-2020-10968, CVE-2020-10969, CVE-2020-11111, CVE-2020-11112, CVE-2020-11113, CVE-2020-11619, CVE-2020-11620, CVE-2020-14060, CVE-2020-14061, CVE-2020-14062, CVE-2020-14195, CVE-2020-24616, CVE-2020-24750, CVE-2020-25649, CVE-2020-35490, CVE-2020-35491, CVE-2020-35728 and CVE-2020-36179 ~ CVE-2020-36190. Exclude log4j 1.x caused by CVE-2019-17571. Upgrade log4j 2.x caused by CVE-2020-9488. Upgrade nacos libs caused by CVE-2021-29441 and CVE-2021-29442. Upgrade netty caused by CVE-2019-20444, CVE-2019-20445, CVE-2019-16869, CVE-2020-11612, CVE-2021-21290, CVE-2021-21295 and CVE-2021-21409. Upgrade consul client caused by CVE-2018-1000844, CVE-2018-1000850. Upgrade zookeeper caused by CVE-2019-0201, zookeeper cluster coordinator plugin now requires zookeeper server 3.5+. Upgrade snake yaml caused by CVE-2017-18640. Upgrade embed tomcat caused by CVE-2020-13935. Upgrade commons-lang3 to avoid potential NPE in some JDK versions. OAL supports generating metrics from events. Support endpoint name grouping by OpenAPI definitions. Concurrent create PrepareRequest when persist Metrics Fix CounterWindow increase computing issue. Performance: optimize Envoy ALS analyzer performance in high traffic load scenario (reduce ~1cpu in ~10k RPS). Performance: trim useless metadata fields in Envoy ALS metadata to improve performance. Fix: slowDBAccessThreshold dynamic config error when not configured. Performance: cache regex pattern and result, optimize string concatenation in Envy ALS analyzer. Performance: cache metrics id and entity id in Metrics and ISource. Performance: enhance persistent session mechanism, about differentiating cache timeout for different dimensionality metrics. The timeout of the cache for minute and hour level metrics has been prolonged to ~5 min. Performance: Add L1 aggregation flush period, which reduce the CPU load and help young GC. Support connectTimeout and socketTimeout settings for ElasticSearch6 and ElasticSearch7 storages. Re-implement storage session mechanism, cached metrics are removed only according to their last access timestamp, rather than first time. This makes sure hot data never gets removed unexpectedly. Support session expired threshold configurable. Fix InfluxDB storage-plugin Metrics#multiGet issue. Replace zuul proxy with spring cloud gateway 2.x. in webapp module. Upgrade etcd cluster coordinator and dynamic configuration to v3.x. Configuration: Allow configuring server maximum request header size and ES index template order. Add thread state metric and class loaded info metric to JVMMetric. Performance: compile LAL DSL statically and run with type checked. Add pagination to event query protocol. Performance: optimize Envoy error logs persistence performance. Support envoy cluster manager metrics. Performance: remove the synchronous persistence mechanism from batch ElasticSearch DAO. Because the current enhanced persistent session mechanism, don\u0026rsquo;t require the data queryable immediately after the insert and update anymore. Performance: share flushInterval setting for both metrics and record data, due to synchronous persistence mechanism removed. Record flush interval used to be hardcoded as 10s. Remove syncBulkActions in ElasticSearch storage option. Increase the default bulkActions(env, SW_STORAGE_ES_BULK_ACTIONS) to 5000(from 1000). Increase the flush interval of ElasticSearch indices to 15s(from 10s) Provide distinct for elements of metadata lists. Due to the more aggressive asynchronous flush, metadata lists have more chances including duplicate elements. Don\u0026rsquo;t need this as indicate anymore. Reduce the flush period of hour and day level metrics, only run in 4 times of regular persistent period. This means default flush period of hour and day level metrics are 25s * 4. Performance: optimize IDs read of ElasticSearch storage options(6 and 7). Use the physical index rather than template alias name. Adjust index refresh period as INT(flushInterval * 2/3), it used to be as same as bulk flush period. At the edge case, in low traffic(traffic \u0026lt; bulkActions in the whole period), there is a possible case, 2 period bulks are included in one index refresh rebuild operation, which could cause version conflicts. And this case can\u0026rsquo;t be fixed through core/persistentPeriod as the bulk fresh is not controlled by the persistent timer anymore. The core/maxSyncOperationNum setting(added in 8.5.0) is removed due to metrics persistence is fully asynchronous. The core/syncThreads setting(added in 8.5.0) is removed due to metrics persistence is fully asynchronous. Optimization: Concurrency mode of execution stage for metrics is removed(added in 8.5.0). Only concurrency of prepare stage is meaningful and kept. Fix -meters metrics topic isn\u0026rsquo;t created with namespace issue Enhance persistent session timeout mechanism. Because the enhanced session could cache the metadata metrics forever, new timeout mechanism is designed for avoiding this specific case. Fix Kafka transport topics are created duplicated with and without namespace issue Fix the persistent session timeout mechanism bug. Fix possible version_conflict_engine_exception in bulk execution. Fix PrometheusMetricConverter may throw an IllegalArgumentException when convert metrics to SampleFamily Filtering NaN value samples when build SampleFamily Add Thread and ClassLoader Metrics for the self-observability and otel-oc-rules Simple optimization of trace sql query statement. Avoid \u0026ldquo;select *\u0026rdquo; query method Introduce dynamical logging to update log configuration at runtime Fix Kubernetes ConfigMap configuration center doesn\u0026rsquo;t send delete event Breaking Change: emove qps and add rpm in LAL  UI  Fix the date component for log conditions. Fix selector keys for duplicate options. Add Python celery plugin. Fix default config for metrics. Fix trace table for profile ui. Fix the error of server response time in the topology. Fix chart types for setting metrics configure. Fix logs pages number. Implement a timeline for Events in a new page. Fix style for event details.  Documentation  Add FAQ about Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Add Self Observability service discovery (k8s). Add sending Envoy Metrics to OAP in envoy 1.19 example and bump up to Envoy V3 api.  All issues and pull requests are here\n","title":"8.7.0","url":"/docs/main/v9.5.0/en/changes/changes-8.7.0/"},{"content":"8.7.0 Project  Extract dependency management to a bom. Add JDK 16 to test matrix. DataCarrier consumer add a new event notification, call nothingToConsume method if the queue has no element to consume. Build and push snapshot Docker images to GitHub Container Registry, this is only for people who want to help to test the master branch codes, please don\u0026rsquo;t use in production environments.  Java Agent  Supports modifying span attributes in async mode. Agent supports the collection of JVM arguments and jar dependency information. [Temporary] Support authentication for log report channel. This feature and grpc channel is going to be removed after Satellite 0.2.0 release. Remove deprecated gRPC method, io.grpc.ManagedChannelBuilder#nameResolverFactory. See gRPC-java 7133 for more details. Add Neo4j-4.x plugin. Correct profile.duration to profile.max_duration in the default agent.config file. Fix the response time of gRPC. Support parameter collection for SqlServer. Add ShardingSphere-5.0.0-beta plugin. Fix some method exception error. Fix async finish repeatedly in spring-webflux-5.x-webclient plugin. Add agent plugin to support Sentinel. Move ehcache-2.x plugin as an optional plugin. Support guava-cache plugin. Enhance the compatibility of mysql-8.x-plugin plugin. Support Kafka SASL login module. Fix gateway plugin async finish repeatedly when fallback url configured. Chore: polish methods naming for Spring-Kafka plugins. Remove plugins for ShardingSphere legacy version. Update agent plugin for ElasticJob GA version Remove the logic of generating instance name in KafkaServiceManagementServiceClient class. Improve okhttp plugin performance by optimizing Class.getDeclaredField(). Fix GRPCLogClientAppender no context warning. Fix spring-webflux-5.x-webclient-plugin NPE.  OAP-Backend  Disable Spring sleuth meter analyzer by default. Only count 5xx as error in Envoy ALS receiver. Upgrade apollo core caused by CVE-2020-15170. Upgrade kubernetes client caused by CVE-2020-28052. Upgrade Elasticsearch 7 client caused by CVE-2020-7014. Upgrade jackson related libs caused by CVE-2018-11307, CVE-2018-14718 ~ CVE-2018-14721, CVE-2018-19360 ~ CVE-2018-19362, CVE-2019-14379, CVE-2019-14540, CVE-2019-14892, CVE-2019-14893, CVE-2019-16335, CVE-2019-16942, CVE-2019-16943, CVE-2019-17267, CVE-2019-17531, CVE-2019-20330, CVE-2020-8840, CVE-2020-9546, CVE-2020-9547, CVE-2020-9548, CVE-2018-12022, CVE-2018-12023, CVE-2019-12086, CVE-2019-14439, CVE-2020-10672, CVE-2020-10673, CVE-2020-10968, CVE-2020-10969, CVE-2020-11111, CVE-2020-11112, CVE-2020-11113, CVE-2020-11619, CVE-2020-11620, CVE-2020-14060, CVE-2020-14061, CVE-2020-14062, CVE-2020-14195, CVE-2020-24616, CVE-2020-24750, CVE-2020-25649, CVE-2020-35490, CVE-2020-35491, CVE-2020-35728 and CVE-2020-36179 ~ CVE-2020-36190. Exclude log4j 1.x caused by CVE-2019-17571. Upgrade log4j 2.x caused by CVE-2020-9488. Upgrade nacos libs caused by CVE-2021-29441 and CVE-2021-29442. Upgrade netty caused by CVE-2019-20444, CVE-2019-20445, CVE-2019-16869, CVE-2020-11612, CVE-2021-21290, CVE-2021-21295 and CVE-2021-21409. Upgrade consul client caused by CVE-2018-1000844, CVE-2018-1000850. Upgrade zookeeper caused by CVE-2019-0201, zookeeper cluster coordinator plugin now requires zookeeper server 3.5+. Upgrade snake yaml caused by CVE-2017-18640. Upgrade embed tomcat caused by CVE-2020-13935. Upgrade commons-lang3 to avoid potential NPE in some JDK versions. OAL supports generating metrics from events. Support endpoint name grouping by OpenAPI definitions. Concurrent create PrepareRequest when persist Metrics Fix CounterWindow increase computing issue. Performance: optimize Envoy ALS analyzer performance in high traffic load scenario (reduce ~1cpu in ~10k RPS). Performance: trim useless metadata fields in Envoy ALS metadata to improve performance. Fix: slowDBAccessThreshold dynamic config error when not configured. Performance: cache regex pattern and result, optimize string concatenation in Envy ALS analyzer. Performance: cache metrics id and entity id in Metrics and ISource. Performance: enhance persistent session mechanism, about differentiating cache timeout for different dimensionality metrics. The timeout of the cache for minute and hour level metrics has been prolonged to ~5 min. Performance: Add L1 aggregation flush period, which reduce the CPU load and help young GC. Support connectTimeout and socketTimeout settings for ElasticSearch6 and ElasticSearch7 storages. Re-implement storage session mechanism, cached metrics are removed only according to their last access timestamp, rather than first time. This makes sure hot data never gets removed unexpectedly. Support session expired threshold configurable. Fix InfluxDB storage-plugin Metrics#multiGet issue. Replace zuul proxy with spring cloud gateway 2.x. in webapp module. Upgrade etcd cluster coordinator and dynamic configuration to v3.x. Configuration: Allow configuring server maximum request header size and ES index template order. Add thread state metric and class loaded info metric to JVMMetric. Performance: compile LAL DSL statically and run with type checked. Add pagination to event query protocol. Performance: optimize Envoy error logs persistence performance. Support envoy cluster manager metrics. Performance: remove the synchronous persistence mechanism from batch ElasticSearch DAO. Because the current enhanced persistent session mechanism, don\u0026rsquo;t require the data queryable immediately after the insert and update anymore. Performance: share flushInterval setting for both metrics and record data, due to synchronous persistence mechanism removed. Record flush interval used to be hardcoded as 10s. Remove syncBulkActions in ElasticSearch storage option. Increase the default bulkActions(env, SW_STORAGE_ES_BULK_ACTIONS) to 5000(from 1000). Increase the flush interval of ElasticSearch indices to 15s(from 10s) Provide distinct for elements of metadata lists. Due to the more aggressive asynchronous flush, metadata lists have more chances including duplicate elements. Don\u0026rsquo;t need this as indicate anymore. Reduce the flush period of hour and day level metrics, only run in 4 times of regular persistent period. This means default flush period of hour and day level metrics are 25s * 4. Performance: optimize IDs read of ElasticSearch storage options(6 and 7). Use the physical index rather than template alias name. Adjust index refresh period as INT(flushInterval * 2/3), it used to be as same as bulk flush period. At the edge case, in low traffic(traffic \u0026lt; bulkActions in the whole period), there is a possible case, 2 period bulks are included in one index refresh rebuild operation, which could cause version conflicts. And this case can\u0026rsquo;t be fixed through core/persistentPeriod as the bulk fresh is not controlled by the persistent timer anymore. The core/maxSyncOperationNum setting(added in 8.5.0) is removed due to metrics persistence is fully asynchronous. The core/syncThreads setting(added in 8.5.0) is removed due to metrics persistence is fully asynchronous. Optimization: Concurrency mode of execution stage for metrics is removed(added in 8.5.0). Only concurrency of prepare stage is meaningful and kept. Fix -meters metrics topic isn\u0026rsquo;t created with namespace issue Enhance persistent session timeout mechanism. Because the enhanced session could cache the metadata metrics forever, new timeout mechanism is designed for avoiding this specific case. Fix Kafka transport topics are created duplicated with and without namespace issue Fix the persistent session timeout mechanism bug. Fix possible version_conflict_engine_exception in bulk execution. Fix PrometheusMetricConverter may throw an IllegalArgumentException when convert metrics to SampleFamily Filtering NaN value samples when build SampleFamily Add Thread and ClassLoader Metrics for the self-observability and otel-oc-rules Simple optimization of trace sql query statement. Avoid \u0026ldquo;select *\u0026rdquo; query method Introduce dynamical logging to update log configuration at runtime Fix Kubernetes ConfigMap configuration center doesn\u0026rsquo;t send delete event Breaking Change: emove qps and add rpm in LAL  UI  Fix the date component for log conditions. Fix selector keys for duplicate options. Add Python celery plugin. Fix default config for metrics. Fix trace table for profile ui. Fix the error of server response time in the topology. Fix chart types for setting metrics configure. Fix logs pages number. Implement a timeline for Events in a new page. Fix style for event details.  Documentation  Add FAQ about Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Add Self Observability service discovery (k8s). Add sending Envoy Metrics to OAP in envoy 1.19 example and bump up to Envoy V3 api.  All issues and pull requests are here\n","title":"8.7.0","url":"/docs/main/v9.6.0/en/changes/changes-8.7.0/"},{"content":"8.7.0 Project  Extract dependency management to a bom. Add JDK 16 to test matrix. DataCarrier consumer add a new event notification, call nothingToConsume method if the queue has no element to consume. Build and push snapshot Docker images to GitHub Container Registry, this is only for people who want to help to test the master branch codes, please don\u0026rsquo;t use in production environments.  Java Agent  Supports modifying span attributes in async mode. Agent supports the collection of JVM arguments and jar dependency information. [Temporary] Support authentication for log report channel. This feature and grpc channel is going to be removed after Satellite 0.2.0 release. Remove deprecated gRPC method, io.grpc.ManagedChannelBuilder#nameResolverFactory. See gRPC-java 7133 for more details. Add Neo4j-4.x plugin. Correct profile.duration to profile.max_duration in the default agent.config file. Fix the response time of gRPC. Support parameter collection for SqlServer. Add ShardingSphere-5.0.0-beta plugin. Fix some method exception error. Fix async finish repeatedly in spring-webflux-5.x-webclient plugin. Add agent plugin to support Sentinel. Move ehcache-2.x plugin as an optional plugin. Support guava-cache plugin. Enhance the compatibility of mysql-8.x-plugin plugin. Support Kafka SASL login module. Fix gateway plugin async finish repeatedly when fallback url configured. Chore: polish methods naming for Spring-Kafka plugins. Remove plugins for ShardingSphere legacy version. Update agent plugin for ElasticJob GA version Remove the logic of generating instance name in KafkaServiceManagementServiceClient class. Improve okhttp plugin performance by optimizing Class.getDeclaredField(). Fix GRPCLogClientAppender no context warning. Fix spring-webflux-5.x-webclient-plugin NPE.  OAP-Backend  Disable Spring sleuth meter analyzer by default. Only count 5xx as error in Envoy ALS receiver. Upgrade apollo core caused by CVE-2020-15170. Upgrade kubernetes client caused by CVE-2020-28052. Upgrade Elasticsearch 7 client caused by CVE-2020-7014. Upgrade jackson related libs caused by CVE-2018-11307, CVE-2018-14718 ~ CVE-2018-14721, CVE-2018-19360 ~ CVE-2018-19362, CVE-2019-14379, CVE-2019-14540, CVE-2019-14892, CVE-2019-14893, CVE-2019-16335, CVE-2019-16942, CVE-2019-16943, CVE-2019-17267, CVE-2019-17531, CVE-2019-20330, CVE-2020-8840, CVE-2020-9546, CVE-2020-9547, CVE-2020-9548, CVE-2018-12022, CVE-2018-12023, CVE-2019-12086, CVE-2019-14439, CVE-2020-10672, CVE-2020-10673, CVE-2020-10968, CVE-2020-10969, CVE-2020-11111, CVE-2020-11112, CVE-2020-11113, CVE-2020-11619, CVE-2020-11620, CVE-2020-14060, CVE-2020-14061, CVE-2020-14062, CVE-2020-14195, CVE-2020-24616, CVE-2020-24750, CVE-2020-25649, CVE-2020-35490, CVE-2020-35491, CVE-2020-35728 and CVE-2020-36179 ~ CVE-2020-36190. Exclude log4j 1.x caused by CVE-2019-17571. Upgrade log4j 2.x caused by CVE-2020-9488. Upgrade nacos libs caused by CVE-2021-29441 and CVE-2021-29442. Upgrade netty caused by CVE-2019-20444, CVE-2019-20445, CVE-2019-16869, CVE-2020-11612, CVE-2021-21290, CVE-2021-21295 and CVE-2021-21409. Upgrade consul client caused by CVE-2018-1000844, CVE-2018-1000850. Upgrade zookeeper caused by CVE-2019-0201, zookeeper cluster coordinator plugin now requires zookeeper server 3.5+. Upgrade snake yaml caused by CVE-2017-18640. Upgrade embed tomcat caused by CVE-2020-13935. Upgrade commons-lang3 to avoid potential NPE in some JDK versions. OAL supports generating metrics from events. Support endpoint name grouping by OpenAPI definitions. Concurrent create PrepareRequest when persist Metrics Fix CounterWindow increase computing issue. Performance: optimize Envoy ALS analyzer performance in high traffic load scenario (reduce ~1cpu in ~10k RPS). Performance: trim useless metadata fields in Envoy ALS metadata to improve performance. Fix: slowDBAccessThreshold dynamic config error when not configured. Performance: cache regex pattern and result, optimize string concatenation in Envy ALS analyzer. Performance: cache metrics id and entity id in Metrics and ISource. Performance: enhance persistent session mechanism, about differentiating cache timeout for different dimensionality metrics. The timeout of the cache for minute and hour level metrics has been prolonged to ~5 min. Performance: Add L1 aggregation flush period, which reduce the CPU load and help young GC. Support connectTimeout and socketTimeout settings for ElasticSearch6 and ElasticSearch7 storages. Re-implement storage session mechanism, cached metrics are removed only according to their last access timestamp, rather than first time. This makes sure hot data never gets removed unexpectedly. Support session expired threshold configurable. Fix InfluxDB storage-plugin Metrics#multiGet issue. Replace zuul proxy with spring cloud gateway 2.x. in webapp module. Upgrade etcd cluster coordinator and dynamic configuration to v3.x. Configuration: Allow configuring server maximum request header size and ES index template order. Add thread state metric and class loaded info metric to JVMMetric. Performance: compile LAL DSL statically and run with type checked. Add pagination to event query protocol. Performance: optimize Envoy error logs persistence performance. Support envoy cluster manager metrics. Performance: remove the synchronous persistence mechanism from batch ElasticSearch DAO. Because the current enhanced persistent session mechanism, don\u0026rsquo;t require the data queryable immediately after the insert and update anymore. Performance: share flushInterval setting for both metrics and record data, due to synchronous persistence mechanism removed. Record flush interval used to be hardcoded as 10s. Remove syncBulkActions in ElasticSearch storage option. Increase the default bulkActions(env, SW_STORAGE_ES_BULK_ACTIONS) to 5000(from 1000). Increase the flush interval of ElasticSearch indices to 15s(from 10s) Provide distinct for elements of metadata lists. Due to the more aggressive asynchronous flush, metadata lists have more chances including duplicate elements. Don\u0026rsquo;t need this as indicate anymore. Reduce the flush period of hour and day level metrics, only run in 4 times of regular persistent period. This means default flush period of hour and day level metrics are 25s * 4. Performance: optimize IDs read of ElasticSearch storage options(6 and 7). Use the physical index rather than template alias name. Adjust index refresh period as INT(flushInterval * 2/3), it used to be as same as bulk flush period. At the edge case, in low traffic(traffic \u0026lt; bulkActions in the whole period), there is a possible case, 2 period bulks are included in one index refresh rebuild operation, which could cause version conflicts. And this case can\u0026rsquo;t be fixed through core/persistentPeriod as the bulk fresh is not controlled by the persistent timer anymore. The core/maxSyncOperationNum setting(added in 8.5.0) is removed due to metrics persistence is fully asynchronous. The core/syncThreads setting(added in 8.5.0) is removed due to metrics persistence is fully asynchronous. Optimization: Concurrency mode of execution stage for metrics is removed(added in 8.5.0). Only concurrency of prepare stage is meaningful and kept. Fix -meters metrics topic isn\u0026rsquo;t created with namespace issue Enhance persistent session timeout mechanism. Because the enhanced session could cache the metadata metrics forever, new timeout mechanism is designed for avoiding this specific case. Fix Kafka transport topics are created duplicated with and without namespace issue Fix the persistent session timeout mechanism bug. Fix possible version_conflict_engine_exception in bulk execution. Fix PrometheusMetricConverter may throw an IllegalArgumentException when convert metrics to SampleFamily Filtering NaN value samples when build SampleFamily Add Thread and ClassLoader Metrics for the self-observability and otel-oc-rules Simple optimization of trace sql query statement. Avoid \u0026ldquo;select *\u0026rdquo; query method Introduce dynamical logging to update log configuration at runtime Fix Kubernetes ConfigMap configuration center doesn\u0026rsquo;t send delete event Breaking Change: emove qps and add rpm in LAL  UI  Fix the date component for log conditions. Fix selector keys for duplicate options. Add Python celery plugin. Fix default config for metrics. Fix trace table for profile ui. Fix the error of server response time in the topology. Fix chart types for setting metrics configure. Fix logs pages number. Implement a timeline for Events in a new page. Fix style for event details.  Documentation  Add FAQ about Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Add Self Observability service discovery (k8s). Add sending Envoy Metrics to OAP in envoy 1.19 example and bump up to Envoy V3 api.  All issues and pull requests are here\n","title":"8.7.0","url":"/docs/main/v9.7.0/en/changes/changes-8.7.0/"},{"content":"8.8.0 Project  Split javaagent into skywalking-java repository. https://github.com/apache/skywalking-java Merge Dockerfiles from apache/skywalking-docker into this codebase.  OAP Server  Fix CVE-2021-35515, CVE-2021-35516, CVE-2021-35517, CVE-2021-36090. Upgrade org.apache.commons:commons-compress to 1.21. kubernetes java client upgrade from 12.0.1 to 13.0.0 Add event http receiver Support Metric level function serviceRelation in MAL. Support envoy metrics binding into the topology. Fix openapi-definitions folder not being read correctly. Trace segment wouldn\u0026rsquo;t be recognized as a TopN sample service. Add through #4694 experimentally, but it caused performance impact. Remove version and endTime in the segment entity. Reduce indexing payload. Fix mapper_parsing_exception in ElasticSearch 7.14. Support component IDs for Go-Kratos framework. [Break Change] Remove endpoint name in the trace query condition. Only support query by endpoint id. Fix ProfileSnapshotExporterTest case on OpenJDK Runtime Environment AdoptOpenJDK-11.0.11+9 (build 11.0.11+9), MacOS. [Break Change] Remove page path in the browser log query condition. Only support query by page path id. [Break Change] Remove endpoint name in the backend log query condition. Only support query by endpoint id. [Break Change] Fix typo for a column page_path_id(was pate_path_id) of storage entity browser_error_log. Add component id for Python falcon plugin. Add rpcStatusCode for rpc.status_code tag. The responseCode field is marked as deprecated and replaced by httpResponseStatusCode field. Remove the duplicated tags to reduce the storage payload. Add a new API to test log analysis language. Harden the security of Groovy-based DSL, MAL and LAL. Fix distinct in Service/Instance/Endpoint query is not working. Support collection type in dynamic configuration core. Support zookeeper grouped dynamic configurations. Fix NPE when OAP nodes synchronize events with each other in cluster mode. Support k8s configmap grouped dynamic configurations. Add desc sort function in H2 and ElasticSearch implementations of IBrowserLogQueryDAO Support configure sampling policy by configuration module dynamically and static configuration file trace-sampling-policy-settings.yml for service dimension on the backend side. Dynamic configurations agent-analyzer.default.sampleRate and agent-analyzer.default.slowTraceSegmentThreshold are replaced by agent-analyzer.default.traceSamplingPolicy. Static configurations agent-analyzer.default.sampleRate and agent-analyzer.default.slowTraceSegmentThreshold are replaced by agent-analyzer.default.traceSamplingPolicySettingsFile. Fix dynamic configuration watch implementation current value not null when the config is deleted. Fix LoggingConfigWatcher return watch.value would not consistent with the real configuration content. Fix ZookeeperConfigWatcherRegister.readConfig() could cause NPE when data.getData() is null. Support nacos grouped dynamic configurations. Support for filter function filtering of int type values. Support mTLS for gRPC channel. Add yaml file suffix limit when reading ui templates. Support consul grouped dynamic configurations. Fix H2MetadataQueryDAO.searchService doesn\u0026rsquo;t support auto grouping. Rebuilt ElasticSearch client on top of their REST API. Fix ElasticSearch storage plugin doesn\u0026rsquo;t work when hot reloading from secretsManagementFile. Support etcd grouped dynamic configurations. Unified the config word namespace in the project. Switch JRE base image for dev images. Support apollo grouped dynamic configurations. Fix ProfileThreadSnapshotQuery.queryProfiledSegments adopts a wrong sort function Support gRPC sync grouped dynamic configurations. Fix H2EventQueryDAO doesn\u0026rsquo;t sort data by Event.START_TIME and uses a wrong pagination query. Fix LogHandler of kafka-fetcher-plugin cannot recognize namespace. Improve the speed of writing TiDB by batching the SQL execution. Fix wrong service name when IP is node IP in k8s-mesh. Support dynamic configurations for openAPI endpoint name grouping rule. Add component definition for Alibaba Druid and HikariCP. Fix Hour and Day dimensionality metrics not accurate, due to the cache read-then-clear mechanism conflicts with low down metrics flush period added in 8.7.0. Fix Slow SQL sampling not accurate, due to TopN works conflict with cache read-then-clear mechanism. The persistent cache is only read when necessary. Add component definition for Alibaba Fastjson. Fix entity(service/instance/endpoint) names in the MAL system(prometheus, native meter, open census, envoy metric service) are not controlled by core\u0026rsquo;s naming-control mechanism. Upgrade netty version to 4.1.68.Final avoid cve-2021-37136.  UI  Fix not found error when refresh UI. Update endpointName to endpointId in the query trace condition. Add Python falcon icon on the UI. Fix searching endpoints with keywords. Support clicking the service name in the chart to link to the trace or log page. Implement the Log Analysis Language text regexp debugger. Fix fetching nodes and calls with serviceIds on the topology side. Implement Alerts for query errors. Fixes graph parameter of query for topology metrics.  Documentation  Add a section in Log Collecting And Analysis doc, introducing the new Python agent log reporter. Add one missing step in otel-receiver doc about how to activate the default receiver. Reorganize dynamic configuration doc. Add more description about meter configurations in backend-meter doc. Fix typo in endpoint-grouping-rules doc.  All issues and pull requests are here\n","title":"8.8.0","url":"/docs/main/latest/en/changes/changes-8.8.0/"},{"content":"8.8.0 Project  Split javaagent into skywalking-java repository. https://github.com/apache/skywalking-java Merge Dockerfiles from apache/skywalking-docker into this codebase.  OAP Server  Fix CVE-2021-35515, CVE-2021-35516, CVE-2021-35517, CVE-2021-36090. Upgrade org.apache.commons:commons-compress to 1.21. kubernetes java client upgrade from 12.0.1 to 13.0.0 Add event http receiver Support Metric level function serviceRelation in MAL. Support envoy metrics binding into the topology. Fix openapi-definitions folder not being read correctly. Trace segment wouldn\u0026rsquo;t be recognized as a TopN sample service. Add through #4694 experimentally, but it caused performance impact. Remove version and endTime in the segment entity. Reduce indexing payload. Fix mapper_parsing_exception in ElasticSearch 7.14. Support component IDs for Go-Kratos framework. [Break Change] Remove endpoint name in the trace query condition. Only support query by endpoint id. Fix ProfileSnapshotExporterTest case on OpenJDK Runtime Environment AdoptOpenJDK-11.0.11+9 (build 11.0.11+9), MacOS. [Break Change] Remove page path in the browser log query condition. Only support query by page path id. [Break Change] Remove endpoint name in the backend log query condition. Only support query by endpoint id. [Break Change] Fix typo for a column page_path_id(was pate_path_id) of storage entity browser_error_log. Add component id for Python falcon plugin. Add rpcStatusCode for rpc.status_code tag. The responseCode field is marked as deprecated and replaced by httpResponseStatusCode field. Remove the duplicated tags to reduce the storage payload. Add a new API to test log analysis language. Harden the security of Groovy-based DSL, MAL and LAL. Fix distinct in Service/Instance/Endpoint query is not working. Support collection type in dynamic configuration core. Support zookeeper grouped dynamic configurations. Fix NPE when OAP nodes synchronize events with each other in cluster mode. Support k8s configmap grouped dynamic configurations. Add desc sort function in H2 and ElasticSearch implementations of IBrowserLogQueryDAO Support configure sampling policy by configuration module dynamically and static configuration file trace-sampling-policy-settings.yml for service dimension on the backend side. Dynamic configurations agent-analyzer.default.sampleRate and agent-analyzer.default.slowTraceSegmentThreshold are replaced by agent-analyzer.default.traceSamplingPolicy. Static configurations agent-analyzer.default.sampleRate and agent-analyzer.default.slowTraceSegmentThreshold are replaced by agent-analyzer.default.traceSamplingPolicySettingsFile. Fix dynamic configuration watch implementation current value not null when the config is deleted. Fix LoggingConfigWatcher return watch.value would not consistent with the real configuration content. Fix ZookeeperConfigWatcherRegister.readConfig() could cause NPE when data.getData() is null. Support nacos grouped dynamic configurations. Support for filter function filtering of int type values. Support mTLS for gRPC channel. Add yaml file suffix limit when reading ui templates. Support consul grouped dynamic configurations. Fix H2MetadataQueryDAO.searchService doesn\u0026rsquo;t support auto grouping. Rebuilt ElasticSearch client on top of their REST API. Fix ElasticSearch storage plugin doesn\u0026rsquo;t work when hot reloading from secretsManagementFile. Support etcd grouped dynamic configurations. Unified the config word namespace in the project. Switch JRE base image for dev images. Support apollo grouped dynamic configurations. Fix ProfileThreadSnapshotQuery.queryProfiledSegments adopts a wrong sort function Support gRPC sync grouped dynamic configurations. Fix H2EventQueryDAO doesn\u0026rsquo;t sort data by Event.START_TIME and uses a wrong pagination query. Fix LogHandler of kafka-fetcher-plugin cannot recognize namespace. Improve the speed of writing TiDB by batching the SQL execution. Fix wrong service name when IP is node IP in k8s-mesh. Support dynamic configurations for openAPI endpoint name grouping rule. Add component definition for Alibaba Druid and HikariCP. Fix Hour and Day dimensionality metrics not accurate, due to the cache read-then-clear mechanism conflicts with low down metrics flush period added in 8.7.0. Fix Slow SQL sampling not accurate, due to TopN works conflict with cache read-then-clear mechanism. The persistent cache is only read when necessary. Add component definition for Alibaba Fastjson. Fix entity(service/instance/endpoint) names in the MAL system(prometheus, native meter, open census, envoy metric service) are not controlled by core\u0026rsquo;s naming-control mechanism. Upgrade netty version to 4.1.68.Final avoid cve-2021-37136.  UI  Fix not found error when refresh UI. Update endpointName to endpointId in the query trace condition. Add Python falcon icon on the UI. Fix searching endpoints with keywords. Support clicking the service name in the chart to link to the trace or log page. Implement the Log Analysis Language text regexp debugger. Fix fetching nodes and calls with serviceIds on the topology side. Implement Alerts for query errors. Fixes graph parameter of query for topology metrics.  Documentation  Add a section in Log Collecting And Analysis doc, introducing the new Python agent log reporter. Add one missing step in otel-receiver doc about how to activate the default receiver. Reorganize dynamic configuration doc. Add more description about meter configurations in backend-meter doc. Fix typo in endpoint-grouping-rules doc.  All issues and pull requests are here\n","title":"8.8.0","url":"/docs/main/next/en/changes/changes-8.8.0/"},{"content":"8.8.0 Project  Split javaagent into skywalking-java repository. https://github.com/apache/skywalking-java Merge Dockerfiles from apache/skywalking-docker into this codebase.  OAP Server  Fix CVE-2021-35515, CVE-2021-35516, CVE-2021-35517, CVE-2021-36090. Upgrade org.apache.commons:commons-compress to 1.21. kubernetes java client upgrade from 12.0.1 to 13.0.0 Add event http receiver Support Metric level function serviceRelation in MAL. Support envoy metrics binding into the topology. Fix openapi-definitions folder not being read correctly. Trace segment wouldn\u0026rsquo;t be recognized as a TopN sample service. Add through #4694 experimentally, but it caused performance impact. Remove version and endTime in the segment entity. Reduce indexing payload. Fix mapper_parsing_exception in ElasticSearch 7.14. Support component IDs for Go-Kratos framework. [Break Change] Remove endpoint name in the trace query condition. Only support query by endpoint id. Fix ProfileSnapshotExporterTest case on OpenJDK Runtime Environment AdoptOpenJDK-11.0.11+9 (build 11.0.11+9), MacOS. [Break Change] Remove page path in the browser log query condition. Only support query by page path id. [Break Change] Remove endpoint name in the backend log query condition. Only support query by endpoint id. [Break Change] Fix typo for a column page_path_id(was pate_path_id) of storage entity browser_error_log. Add component id for Python falcon plugin. Add rpcStatusCode for rpc.status_code tag. The responseCode field is marked as deprecated and replaced by httpResponseStatusCode field. Remove the duplicated tags to reduce the storage payload. Add a new API to test log analysis language. Harden the security of Groovy-based DSL, MAL and LAL. Fix distinct in Service/Instance/Endpoint query is not working. Support collection type in dynamic configuration core. Support zookeeper grouped dynamic configurations. Fix NPE when OAP nodes synchronize events with each other in cluster mode. Support k8s configmap grouped dynamic configurations. Add desc sort function in H2 and ElasticSearch implementations of IBrowserLogQueryDAO Support configure sampling policy by configuration module dynamically and static configuration file trace-sampling-policy-settings.yml for service dimension on the backend side. Dynamic configurations agent-analyzer.default.sampleRate and agent-analyzer.default.slowTraceSegmentThreshold are replaced by agent-analyzer.default.traceSamplingPolicy. Static configurations agent-analyzer.default.sampleRate and agent-analyzer.default.slowTraceSegmentThreshold are replaced by agent-analyzer.default.traceSamplingPolicySettingsFile. Fix dynamic configuration watch implementation current value not null when the config is deleted. Fix LoggingConfigWatcher return watch.value would not consistent with the real configuration content. Fix ZookeeperConfigWatcherRegister.readConfig() could cause NPE when data.getData() is null. Support nacos grouped dynamic configurations. Support for filter function filtering of int type values. Support mTLS for gRPC channel. Add yaml file suffix limit when reading ui templates. Support consul grouped dynamic configurations. Fix H2MetadataQueryDAO.searchService doesn\u0026rsquo;t support auto grouping. Rebuilt ElasticSearch client on top of their REST API. Fix ElasticSearch storage plugin doesn\u0026rsquo;t work when hot reloading from secretsManagementFile. Support etcd grouped dynamic configurations. Unified the config word namespace in the project. Switch JRE base image for dev images. Support apollo grouped dynamic configurations. Fix ProfileThreadSnapshotQuery.queryProfiledSegments adopts a wrong sort function Support gRPC sync grouped dynamic configurations. Fix H2EventQueryDAO doesn\u0026rsquo;t sort data by Event.START_TIME and uses a wrong pagination query. Fix LogHandler of kafka-fetcher-plugin cannot recognize namespace. Improve the speed of writing TiDB by batching the SQL execution. Fix wrong service name when IP is node IP in k8s-mesh. Support dynamic configurations for openAPI endpoint name grouping rule. Add component definition for Alibaba Druid and HikariCP. Fix Hour and Day dimensionality metrics not accurate, due to the cache read-then-clear mechanism conflicts with low down metrics flush period added in 8.7.0. Fix Slow SQL sampling not accurate, due to TopN works conflict with cache read-then-clear mechanism. The persistent cache is only read when necessary. Add component definition for Alibaba Fastjson. Fix entity(service/instance/endpoint) names in the MAL system(prometheus, native meter, open census, envoy metric service) are not controlled by core\u0026rsquo;s naming-control mechanism. Upgrade netty version to 4.1.68.Final avoid cve-2021-37136.  UI  Fix not found error when refresh UI. Update endpointName to endpointId in the query trace condition. Add Python falcon icon on the UI. Fix searching endpoints with keywords. Support clicking the service name in the chart to link to the trace or log page. Implement the Log Analysis Language text regexp debugger. Fix fetching nodes and calls with serviceIds on the topology side. Implement Alerts for query errors. Fixes graph parameter of query for topology metrics.  Documentation  Add a section in Log Collecting And Analysis doc, introducing the new Python agent log reporter. Add one missing step in otel-receiver doc about how to activate the default receiver. Reorganize dynamic configuration doc. Add more description about meter configurations in backend-meter doc. Fix typo in endpoint-grouping-rules doc.  All issues and pull requests are here\n","title":"8.8.0","url":"/docs/main/v9.1.0/en/changes/changes-8.8.0/"},{"content":"8.8.0 Project  Split javaagent into skywalking-java repository. https://github.com/apache/skywalking-java Merge Dockerfiles from apache/skywalking-docker into this codebase.  OAP Server  Fix CVE-2021-35515, CVE-2021-35516, CVE-2021-35517, CVE-2021-36090. Upgrade org.apache.commons:commons-compress to 1.21. kubernetes java client upgrade from 12.0.1 to 13.0.0 Add event http receiver Support Metric level function serviceRelation in MAL. Support envoy metrics binding into the topology. Fix openapi-definitions folder not being read correctly. Trace segment wouldn\u0026rsquo;t be recognized as a TopN sample service. Add through #4694 experimentally, but it caused performance impact. Remove version and endTime in the segment entity. Reduce indexing payload. Fix mapper_parsing_exception in ElasticSearch 7.14. Support component IDs for Go-Kratos framework. [Break Change] Remove endpoint name in the trace query condition. Only support query by endpoint id. Fix ProfileSnapshotExporterTest case on OpenJDK Runtime Environment AdoptOpenJDK-11.0.11+9 (build 11.0.11+9), MacOS. [Break Change] Remove page path in the browser log query condition. Only support query by page path id. [Break Change] Remove endpoint name in the backend log query condition. Only support query by endpoint id. [Break Change] Fix typo for a column page_path_id(was pate_path_id) of storage entity browser_error_log. Add component id for Python falcon plugin. Add rpcStatusCode for rpc.status_code tag. The responseCode field is marked as deprecated and replaced by httpResponseStatusCode field. Remove the duplicated tags to reduce the storage payload. Add a new API to test log analysis language. Harden the security of Groovy-based DSL, MAL and LAL. Fix distinct in Service/Instance/Endpoint query is not working. Support collection type in dynamic configuration core. Support zookeeper grouped dynamic configurations. Fix NPE when OAP nodes synchronize events with each other in cluster mode. Support k8s configmap grouped dynamic configurations. Add desc sort function in H2 and ElasticSearch implementations of IBrowserLogQueryDAO Support configure sampling policy by configuration module dynamically and static configuration file trace-sampling-policy-settings.yml for service dimension on the backend side. Dynamic configurations agent-analyzer.default.sampleRate and agent-analyzer.default.slowTraceSegmentThreshold are replaced by agent-analyzer.default.traceSamplingPolicy. Static configurations agent-analyzer.default.sampleRate and agent-analyzer.default.slowTraceSegmentThreshold are replaced by agent-analyzer.default.traceSamplingPolicySettingsFile. Fix dynamic configuration watch implementation current value not null when the config is deleted. Fix LoggingConfigWatcher return watch.value would not consistent with the real configuration content. Fix ZookeeperConfigWatcherRegister.readConfig() could cause NPE when data.getData() is null. Support nacos grouped dynamic configurations. Support for filter function filtering of int type values. Support mTLS for gRPC channel. Add yaml file suffix limit when reading ui templates. Support consul grouped dynamic configurations. Fix H2MetadataQueryDAO.searchService doesn\u0026rsquo;t support auto grouping. Rebuilt ElasticSearch client on top of their REST API. Fix ElasticSearch storage plugin doesn\u0026rsquo;t work when hot reloading from secretsManagementFile. Support etcd grouped dynamic configurations. Unified the config word namespace in the project. Switch JRE base image for dev images. Support apollo grouped dynamic configurations. Fix ProfileThreadSnapshotQuery.queryProfiledSegments adopts a wrong sort function Support gRPC sync grouped dynamic configurations. Fix H2EventQueryDAO doesn\u0026rsquo;t sort data by Event.START_TIME and uses a wrong pagination query. Fix LogHandler of kafka-fetcher-plugin cannot recognize namespace. Improve the speed of writing TiDB by batching the SQL execution. Fix wrong service name when IP is node IP in k8s-mesh. Support dynamic configurations for openAPI endpoint name grouping rule. Add component definition for Alibaba Druid and HikariCP. Fix Hour and Day dimensionality metrics not accurate, due to the cache read-then-clear mechanism conflicts with low down metrics flush period added in 8.7.0. Fix Slow SQL sampling not accurate, due to TopN works conflict with cache read-then-clear mechanism. The persistent cache is only read when necessary. Add component definition for Alibaba Fastjson. Fix entity(service/instance/endpoint) names in the MAL system(prometheus, native meter, open census, envoy metric service) are not controlled by core\u0026rsquo;s naming-control mechanism. Upgrade netty version to 4.1.68.Final avoid cve-2021-37136.  UI  Fix not found error when refresh UI. Update endpointName to endpointId in the query trace condition. Add Python falcon icon on the UI. Fix searching endpoints with keywords. Support clicking the service name in the chart to link to the trace or log page. Implement the Log Analysis Language text regexp debugger. Fix fetching nodes and calls with serviceIds on the topology side. Implement Alerts for query errors. Fixes graph parameter of query for topology metrics.  Documentation  Add a section in Log Collecting And Analysis doc, introducing the new Python agent log reporter. Add one missing step in otel-receiver doc about how to activate the default receiver. Reorganize dynamic configuration doc. Add more description about meter configurations in backend-meter doc. Fix typo in endpoint-grouping-rules doc.  All issues and pull requests are here\n","title":"8.8.0","url":"/docs/main/v9.2.0/en/changes/changes-8.8.0/"},{"content":"8.8.0 Project  Split javaagent into skywalking-java repository. https://github.com/apache/skywalking-java Merge Dockerfiles from apache/skywalking-docker into this codebase.  OAP Server  Fix CVE-2021-35515, CVE-2021-35516, CVE-2021-35517, CVE-2021-36090. Upgrade org.apache.commons:commons-compress to 1.21. kubernetes java client upgrade from 12.0.1 to 13.0.0 Add event http receiver Support Metric level function serviceRelation in MAL. Support envoy metrics binding into the topology. Fix openapi-definitions folder not being read correctly. Trace segment wouldn\u0026rsquo;t be recognized as a TopN sample service. Add through #4694 experimentally, but it caused performance impact. Remove version and endTime in the segment entity. Reduce indexing payload. Fix mapper_parsing_exception in ElasticSearch 7.14. Support component IDs for Go-Kratos framework. [Break Change] Remove endpoint name in the trace query condition. Only support query by endpoint id. Fix ProfileSnapshotExporterTest case on OpenJDK Runtime Environment AdoptOpenJDK-11.0.11+9 (build 11.0.11+9), MacOS. [Break Change] Remove page path in the browser log query condition. Only support query by page path id. [Break Change] Remove endpoint name in the backend log query condition. Only support query by endpoint id. [Break Change] Fix typo for a column page_path_id(was pate_path_id) of storage entity browser_error_log. Add component id for Python falcon plugin. Add rpcStatusCode for rpc.status_code tag. The responseCode field is marked as deprecated and replaced by httpResponseStatusCode field. Remove the duplicated tags to reduce the storage payload. Add a new API to test log analysis language. Harden the security of Groovy-based DSL, MAL and LAL. Fix distinct in Service/Instance/Endpoint query is not working. Support collection type in dynamic configuration core. Support zookeeper grouped dynamic configurations. Fix NPE when OAP nodes synchronize events with each other in cluster mode. Support k8s configmap grouped dynamic configurations. Add desc sort function in H2 and ElasticSearch implementations of IBrowserLogQueryDAO Support configure sampling policy by configuration module dynamically and static configuration file trace-sampling-policy-settings.yml for service dimension on the backend side. Dynamic configurations agent-analyzer.default.sampleRate and agent-analyzer.default.slowTraceSegmentThreshold are replaced by agent-analyzer.default.traceSamplingPolicy. Static configurations agent-analyzer.default.sampleRate and agent-analyzer.default.slowTraceSegmentThreshold are replaced by agent-analyzer.default.traceSamplingPolicySettingsFile. Fix dynamic configuration watch implementation current value not null when the config is deleted. Fix LoggingConfigWatcher return watch.value would not consistent with the real configuration content. Fix ZookeeperConfigWatcherRegister.readConfig() could cause NPE when data.getData() is null. Support nacos grouped dynamic configurations. Support for filter function filtering of int type values. Support mTLS for gRPC channel. Add yaml file suffix limit when reading ui templates. Support consul grouped dynamic configurations. Fix H2MetadataQueryDAO.searchService doesn\u0026rsquo;t support auto grouping. Rebuilt ElasticSearch client on top of their REST API. Fix ElasticSearch storage plugin doesn\u0026rsquo;t work when hot reloading from secretsManagementFile. Support etcd grouped dynamic configurations. Unified the config word namespace in the project. Switch JRE base image for dev images. Support apollo grouped dynamic configurations. Fix ProfileThreadSnapshotQuery.queryProfiledSegments adopts a wrong sort function Support gRPC sync grouped dynamic configurations. Fix H2EventQueryDAO doesn\u0026rsquo;t sort data by Event.START_TIME and uses a wrong pagination query. Fix LogHandler of kafka-fetcher-plugin cannot recognize namespace. Improve the speed of writing TiDB by batching the SQL execution. Fix wrong service name when IP is node IP in k8s-mesh. Support dynamic configurations for openAPI endpoint name grouping rule. Add component definition for Alibaba Druid and HikariCP. Fix Hour and Day dimensionality metrics not accurate, due to the cache read-then-clear mechanism conflicts with low down metrics flush period added in 8.7.0. Fix Slow SQL sampling not accurate, due to TopN works conflict with cache read-then-clear mechanism. The persistent cache is only read when necessary. Add component definition for Alibaba Fastjson. Fix entity(service/instance/endpoint) names in the MAL system(prometheus, native meter, open census, envoy metric service) are not controlled by core\u0026rsquo;s naming-control mechanism. Upgrade netty version to 4.1.68.Final avoid cve-2021-37136.  UI  Fix not found error when refresh UI. Update endpointName to endpointId in the query trace condition. Add Python falcon icon on the UI. Fix searching endpoints with keywords. Support clicking the service name in the chart to link to the trace or log page. Implement the Log Analysis Language text regexp debugger. Fix fetching nodes and calls with serviceIds on the topology side. Implement Alerts for query errors. Fixes graph parameter of query for topology metrics.  Documentation  Add a section in Log Collecting And Analysis doc, introducing the new Python agent log reporter. Add one missing step in otel-receiver doc about how to activate the default receiver. Reorganize dynamic configuration doc. Add more description about meter configurations in backend-meter doc. Fix typo in endpoint-grouping-rules doc.  All issues and pull requests are here\n","title":"8.8.0","url":"/docs/main/v9.3.0/en/changes/changes-8.8.0/"},{"content":"8.8.0 Project  Split javaagent into skywalking-java repository. https://github.com/apache/skywalking-java Merge Dockerfiles from apache/skywalking-docker into this codebase.  OAP Server  Fix CVE-2021-35515, CVE-2021-35516, CVE-2021-35517, CVE-2021-36090. Upgrade org.apache.commons:commons-compress to 1.21. kubernetes java client upgrade from 12.0.1 to 13.0.0 Add event http receiver Support Metric level function serviceRelation in MAL. Support envoy metrics binding into the topology. Fix openapi-definitions folder not being read correctly. Trace segment wouldn\u0026rsquo;t be recognized as a TopN sample service. Add through #4694 experimentally, but it caused performance impact. Remove version and endTime in the segment entity. Reduce indexing payload. Fix mapper_parsing_exception in ElasticSearch 7.14. Support component IDs for Go-Kratos framework. [Break Change] Remove endpoint name in the trace query condition. Only support query by endpoint id. Fix ProfileSnapshotExporterTest case on OpenJDK Runtime Environment AdoptOpenJDK-11.0.11+9 (build 11.0.11+9), MacOS. [Break Change] Remove page path in the browser log query condition. Only support query by page path id. [Break Change] Remove endpoint name in the backend log query condition. Only support query by endpoint id. [Break Change] Fix typo for a column page_path_id(was pate_path_id) of storage entity browser_error_log. Add component id for Python falcon plugin. Add rpcStatusCode for rpc.status_code tag. The responseCode field is marked as deprecated and replaced by httpResponseStatusCode field. Remove the duplicated tags to reduce the storage payload. Add a new API to test log analysis language. Harden the security of Groovy-based DSL, MAL and LAL. Fix distinct in Service/Instance/Endpoint query is not working. Support collection type in dynamic configuration core. Support zookeeper grouped dynamic configurations. Fix NPE when OAP nodes synchronize events with each other in cluster mode. Support k8s configmap grouped dynamic configurations. Add desc sort function in H2 and ElasticSearch implementations of IBrowserLogQueryDAO Support configure sampling policy by configuration module dynamically and static configuration file trace-sampling-policy-settings.yml for service dimension on the backend side. Dynamic configurations agent-analyzer.default.sampleRate and agent-analyzer.default.slowTraceSegmentThreshold are replaced by agent-analyzer.default.traceSamplingPolicy. Static configurations agent-analyzer.default.sampleRate and agent-analyzer.default.slowTraceSegmentThreshold are replaced by agent-analyzer.default.traceSamplingPolicySettingsFile. Fix dynamic configuration watch implementation current value not null when the config is deleted. Fix LoggingConfigWatcher return watch.value would not consistent with the real configuration content. Fix ZookeeperConfigWatcherRegister.readConfig() could cause NPE when data.getData() is null. Support nacos grouped dynamic configurations. Support for filter function filtering of int type values. Support mTLS for gRPC channel. Add yaml file suffix limit when reading ui templates. Support consul grouped dynamic configurations. Fix H2MetadataQueryDAO.searchService doesn\u0026rsquo;t support auto grouping. Rebuilt ElasticSearch client on top of their REST API. Fix ElasticSearch storage plugin doesn\u0026rsquo;t work when hot reloading from secretsManagementFile. Support etcd grouped dynamic configurations. Unified the config word namespace in the project. Switch JRE base image for dev images. Support apollo grouped dynamic configurations. Fix ProfileThreadSnapshotQuery.queryProfiledSegments adopts a wrong sort function Support gRPC sync grouped dynamic configurations. Fix H2EventQueryDAO doesn\u0026rsquo;t sort data by Event.START_TIME and uses a wrong pagination query. Fix LogHandler of kafka-fetcher-plugin cannot recognize namespace. Improve the speed of writing TiDB by batching the SQL execution. Fix wrong service name when IP is node IP in k8s-mesh. Support dynamic configurations for openAPI endpoint name grouping rule. Add component definition for Alibaba Druid and HikariCP. Fix Hour and Day dimensionality metrics not accurate, due to the cache read-then-clear mechanism conflicts with low down metrics flush period added in 8.7.0. Fix Slow SQL sampling not accurate, due to TopN works conflict with cache read-then-clear mechanism. The persistent cache is only read when necessary. Add component definition for Alibaba Fastjson. Fix entity(service/instance/endpoint) names in the MAL system(prometheus, native meter, open census, envoy metric service) are not controlled by core\u0026rsquo;s naming-control mechanism. Upgrade netty version to 4.1.68.Final avoid cve-2021-37136.  UI  Fix not found error when refresh UI. Update endpointName to endpointId in the query trace condition. Add Python falcon icon on the UI. Fix searching endpoints with keywords. Support clicking the service name in the chart to link to the trace or log page. Implement the Log Analysis Language text regexp debugger. Fix fetching nodes and calls with serviceIds on the topology side. Implement Alerts for query errors. Fixes graph parameter of query for topology metrics.  Documentation  Add a section in Log Collecting And Analysis doc, introducing the new Python agent log reporter. Add one missing step in otel-receiver doc about how to activate the default receiver. Reorganize dynamic configuration doc. Add more description about meter configurations in backend-meter doc. Fix typo in endpoint-grouping-rules doc.  All issues and pull requests are here\n","title":"8.8.0","url":"/docs/main/v9.4.0/en/changes/changes-8.8.0/"},{"content":"8.8.0 Project  Split javaagent into skywalking-java repository. https://github.com/apache/skywalking-java Merge Dockerfiles from apache/skywalking-docker into this codebase.  OAP Server  Fix CVE-2021-35515, CVE-2021-35516, CVE-2021-35517, CVE-2021-36090. Upgrade org.apache.commons:commons-compress to 1.21. kubernetes java client upgrade from 12.0.1 to 13.0.0 Add event http receiver Support Metric level function serviceRelation in MAL. Support envoy metrics binding into the topology. Fix openapi-definitions folder not being read correctly. Trace segment wouldn\u0026rsquo;t be recognized as a TopN sample service. Add through #4694 experimentally, but it caused performance impact. Remove version and endTime in the segment entity. Reduce indexing payload. Fix mapper_parsing_exception in ElasticSearch 7.14. Support component IDs for Go-Kratos framework. [Break Change] Remove endpoint name in the trace query condition. Only support query by endpoint id. Fix ProfileSnapshotExporterTest case on OpenJDK Runtime Environment AdoptOpenJDK-11.0.11+9 (build 11.0.11+9), MacOS. [Break Change] Remove page path in the browser log query condition. Only support query by page path id. [Break Change] Remove endpoint name in the backend log query condition. Only support query by endpoint id. [Break Change] Fix typo for a column page_path_id(was pate_path_id) of storage entity browser_error_log. Add component id for Python falcon plugin. Add rpcStatusCode for rpc.status_code tag. The responseCode field is marked as deprecated and replaced by httpResponseStatusCode field. Remove the duplicated tags to reduce the storage payload. Add a new API to test log analysis language. Harden the security of Groovy-based DSL, MAL and LAL. Fix distinct in Service/Instance/Endpoint query is not working. Support collection type in dynamic configuration core. Support zookeeper grouped dynamic configurations. Fix NPE when OAP nodes synchronize events with each other in cluster mode. Support k8s configmap grouped dynamic configurations. Add desc sort function in H2 and ElasticSearch implementations of IBrowserLogQueryDAO Support configure sampling policy by configuration module dynamically and static configuration file trace-sampling-policy-settings.yml for service dimension on the backend side. Dynamic configurations agent-analyzer.default.sampleRate and agent-analyzer.default.slowTraceSegmentThreshold are replaced by agent-analyzer.default.traceSamplingPolicy. Static configurations agent-analyzer.default.sampleRate and agent-analyzer.default.slowTraceSegmentThreshold are replaced by agent-analyzer.default.traceSamplingPolicySettingsFile. Fix dynamic configuration watch implementation current value not null when the config is deleted. Fix LoggingConfigWatcher return watch.value would not consistent with the real configuration content. Fix ZookeeperConfigWatcherRegister.readConfig() could cause NPE when data.getData() is null. Support nacos grouped dynamic configurations. Support for filter function filtering of int type values. Support mTLS for gRPC channel. Add yaml file suffix limit when reading ui templates. Support consul grouped dynamic configurations. Fix H2MetadataQueryDAO.searchService doesn\u0026rsquo;t support auto grouping. Rebuilt ElasticSearch client on top of their REST API. Fix ElasticSearch storage plugin doesn\u0026rsquo;t work when hot reloading from secretsManagementFile. Support etcd grouped dynamic configurations. Unified the config word namespace in the project. Switch JRE base image for dev images. Support apollo grouped dynamic configurations. Fix ProfileThreadSnapshotQuery.queryProfiledSegments adopts a wrong sort function Support gRPC sync grouped dynamic configurations. Fix H2EventQueryDAO doesn\u0026rsquo;t sort data by Event.START_TIME and uses a wrong pagination query. Fix LogHandler of kafka-fetcher-plugin cannot recognize namespace. Improve the speed of writing TiDB by batching the SQL execution. Fix wrong service name when IP is node IP in k8s-mesh. Support dynamic configurations for openAPI endpoint name grouping rule. Add component definition for Alibaba Druid and HikariCP. Fix Hour and Day dimensionality metrics not accurate, due to the cache read-then-clear mechanism conflicts with low down metrics flush period added in 8.7.0. Fix Slow SQL sampling not accurate, due to TopN works conflict with cache read-then-clear mechanism. The persistent cache is only read when necessary. Add component definition for Alibaba Fastjson. Fix entity(service/instance/endpoint) names in the MAL system(prometheus, native meter, open census, envoy metric service) are not controlled by core\u0026rsquo;s naming-control mechanism. Upgrade netty version to 4.1.68.Final avoid cve-2021-37136.  UI  Fix not found error when refresh UI. Update endpointName to endpointId in the query trace condition. Add Python falcon icon on the UI. Fix searching endpoints with keywords. Support clicking the service name in the chart to link to the trace or log page. Implement the Log Analysis Language text regexp debugger. Fix fetching nodes and calls with serviceIds on the topology side. Implement Alerts for query errors. Fixes graph parameter of query for topology metrics.  Documentation  Add a section in Log Collecting And Analysis doc, introducing the new Python agent log reporter. Add one missing step in otel-receiver doc about how to activate the default receiver. Reorganize dynamic configuration doc. Add more description about meter configurations in backend-meter doc. Fix typo in endpoint-grouping-rules doc.  All issues and pull requests are here\n","title":"8.8.0","url":"/docs/main/v9.5.0/en/changes/changes-8.8.0/"},{"content":"8.8.0 Project  Split javaagent into skywalking-java repository. https://github.com/apache/skywalking-java Merge Dockerfiles from apache/skywalking-docker into this codebase.  OAP Server  Fix CVE-2021-35515, CVE-2021-35516, CVE-2021-35517, CVE-2021-36090. Upgrade org.apache.commons:commons-compress to 1.21. kubernetes java client upgrade from 12.0.1 to 13.0.0 Add event http receiver Support Metric level function serviceRelation in MAL. Support envoy metrics binding into the topology. Fix openapi-definitions folder not being read correctly. Trace segment wouldn\u0026rsquo;t be recognized as a TopN sample service. Add through #4694 experimentally, but it caused performance impact. Remove version and endTime in the segment entity. Reduce indexing payload. Fix mapper_parsing_exception in ElasticSearch 7.14. Support component IDs for Go-Kratos framework. [Break Change] Remove endpoint name in the trace query condition. Only support query by endpoint id. Fix ProfileSnapshotExporterTest case on OpenJDK Runtime Environment AdoptOpenJDK-11.0.11+9 (build 11.0.11+9), MacOS. [Break Change] Remove page path in the browser log query condition. Only support query by page path id. [Break Change] Remove endpoint name in the backend log query condition. Only support query by endpoint id. [Break Change] Fix typo for a column page_path_id(was pate_path_id) of storage entity browser_error_log. Add component id for Python falcon plugin. Add rpcStatusCode for rpc.status_code tag. The responseCode field is marked as deprecated and replaced by httpResponseStatusCode field. Remove the duplicated tags to reduce the storage payload. Add a new API to test log analysis language. Harden the security of Groovy-based DSL, MAL and LAL. Fix distinct in Service/Instance/Endpoint query is not working. Support collection type in dynamic configuration core. Support zookeeper grouped dynamic configurations. Fix NPE when OAP nodes synchronize events with each other in cluster mode. Support k8s configmap grouped dynamic configurations. Add desc sort function in H2 and ElasticSearch implementations of IBrowserLogQueryDAO Support configure sampling policy by configuration module dynamically and static configuration file trace-sampling-policy-settings.yml for service dimension on the backend side. Dynamic configurations agent-analyzer.default.sampleRate and agent-analyzer.default.slowTraceSegmentThreshold are replaced by agent-analyzer.default.traceSamplingPolicy. Static configurations agent-analyzer.default.sampleRate and agent-analyzer.default.slowTraceSegmentThreshold are replaced by agent-analyzer.default.traceSamplingPolicySettingsFile. Fix dynamic configuration watch implementation current value not null when the config is deleted. Fix LoggingConfigWatcher return watch.value would not consistent with the real configuration content. Fix ZookeeperConfigWatcherRegister.readConfig() could cause NPE when data.getData() is null. Support nacos grouped dynamic configurations. Support for filter function filtering of int type values. Support mTLS for gRPC channel. Add yaml file suffix limit when reading ui templates. Support consul grouped dynamic configurations. Fix H2MetadataQueryDAO.searchService doesn\u0026rsquo;t support auto grouping. Rebuilt ElasticSearch client on top of their REST API. Fix ElasticSearch storage plugin doesn\u0026rsquo;t work when hot reloading from secretsManagementFile. Support etcd grouped dynamic configurations. Unified the config word namespace in the project. Switch JRE base image for dev images. Support apollo grouped dynamic configurations. Fix ProfileThreadSnapshotQuery.queryProfiledSegments adopts a wrong sort function Support gRPC sync grouped dynamic configurations. Fix H2EventQueryDAO doesn\u0026rsquo;t sort data by Event.START_TIME and uses a wrong pagination query. Fix LogHandler of kafka-fetcher-plugin cannot recognize namespace. Improve the speed of writing TiDB by batching the SQL execution. Fix wrong service name when IP is node IP in k8s-mesh. Support dynamic configurations for openAPI endpoint name grouping rule. Add component definition for Alibaba Druid and HikariCP. Fix Hour and Day dimensionality metrics not accurate, due to the cache read-then-clear mechanism conflicts with low down metrics flush period added in 8.7.0. Fix Slow SQL sampling not accurate, due to TopN works conflict with cache read-then-clear mechanism. The persistent cache is only read when necessary. Add component definition for Alibaba Fastjson. Fix entity(service/instance/endpoint) names in the MAL system(prometheus, native meter, open census, envoy metric service) are not controlled by core\u0026rsquo;s naming-control mechanism. Upgrade netty version to 4.1.68.Final avoid cve-2021-37136.  UI  Fix not found error when refresh UI. Update endpointName to endpointId in the query trace condition. Add Python falcon icon on the UI. Fix searching endpoints with keywords. Support clicking the service name in the chart to link to the trace or log page. Implement the Log Analysis Language text regexp debugger. Fix fetching nodes and calls with serviceIds on the topology side. Implement Alerts for query errors. Fixes graph parameter of query for topology metrics.  Documentation  Add a section in Log Collecting And Analysis doc, introducing the new Python agent log reporter. Add one missing step in otel-receiver doc about how to activate the default receiver. Reorganize dynamic configuration doc. Add more description about meter configurations in backend-meter doc. Fix typo in endpoint-grouping-rules doc.  All issues and pull requests are here\n","title":"8.8.0","url":"/docs/main/v9.6.0/en/changes/changes-8.8.0/"},{"content":"8.8.0 Project  Split javaagent into skywalking-java repository. https://github.com/apache/skywalking-java Merge Dockerfiles from apache/skywalking-docker into this codebase.  OAP Server  Fix CVE-2021-35515, CVE-2021-35516, CVE-2021-35517, CVE-2021-36090. Upgrade org.apache.commons:commons-compress to 1.21. kubernetes java client upgrade from 12.0.1 to 13.0.0 Add event http receiver Support Metric level function serviceRelation in MAL. Support envoy metrics binding into the topology. Fix openapi-definitions folder not being read correctly. Trace segment wouldn\u0026rsquo;t be recognized as a TopN sample service. Add through #4694 experimentally, but it caused performance impact. Remove version and endTime in the segment entity. Reduce indexing payload. Fix mapper_parsing_exception in ElasticSearch 7.14. Support component IDs for Go-Kratos framework. [Break Change] Remove endpoint name in the trace query condition. Only support query by endpoint id. Fix ProfileSnapshotExporterTest case on OpenJDK Runtime Environment AdoptOpenJDK-11.0.11+9 (build 11.0.11+9), MacOS. [Break Change] Remove page path in the browser log query condition. Only support query by page path id. [Break Change] Remove endpoint name in the backend log query condition. Only support query by endpoint id. [Break Change] Fix typo for a column page_path_id(was pate_path_id) of storage entity browser_error_log. Add component id for Python falcon plugin. Add rpcStatusCode for rpc.status_code tag. The responseCode field is marked as deprecated and replaced by httpResponseStatusCode field. Remove the duplicated tags to reduce the storage payload. Add a new API to test log analysis language. Harden the security of Groovy-based DSL, MAL and LAL. Fix distinct in Service/Instance/Endpoint query is not working. Support collection type in dynamic configuration core. Support zookeeper grouped dynamic configurations. Fix NPE when OAP nodes synchronize events with each other in cluster mode. Support k8s configmap grouped dynamic configurations. Add desc sort function in H2 and ElasticSearch implementations of IBrowserLogQueryDAO Support configure sampling policy by configuration module dynamically and static configuration file trace-sampling-policy-settings.yml for service dimension on the backend side. Dynamic configurations agent-analyzer.default.sampleRate and agent-analyzer.default.slowTraceSegmentThreshold are replaced by agent-analyzer.default.traceSamplingPolicy. Static configurations agent-analyzer.default.sampleRate and agent-analyzer.default.slowTraceSegmentThreshold are replaced by agent-analyzer.default.traceSamplingPolicySettingsFile. Fix dynamic configuration watch implementation current value not null when the config is deleted. Fix LoggingConfigWatcher return watch.value would not consistent with the real configuration content. Fix ZookeeperConfigWatcherRegister.readConfig() could cause NPE when data.getData() is null. Support nacos grouped dynamic configurations. Support for filter function filtering of int type values. Support mTLS for gRPC channel. Add yaml file suffix limit when reading ui templates. Support consul grouped dynamic configurations. Fix H2MetadataQueryDAO.searchService doesn\u0026rsquo;t support auto grouping. Rebuilt ElasticSearch client on top of their REST API. Fix ElasticSearch storage plugin doesn\u0026rsquo;t work when hot reloading from secretsManagementFile. Support etcd grouped dynamic configurations. Unified the config word namespace in the project. Switch JRE base image for dev images. Support apollo grouped dynamic configurations. Fix ProfileThreadSnapshotQuery.queryProfiledSegments adopts a wrong sort function Support gRPC sync grouped dynamic configurations. Fix H2EventQueryDAO doesn\u0026rsquo;t sort data by Event.START_TIME and uses a wrong pagination query. Fix LogHandler of kafka-fetcher-plugin cannot recognize namespace. Improve the speed of writing TiDB by batching the SQL execution. Fix wrong service name when IP is node IP in k8s-mesh. Support dynamic configurations for openAPI endpoint name grouping rule. Add component definition for Alibaba Druid and HikariCP. Fix Hour and Day dimensionality metrics not accurate, due to the cache read-then-clear mechanism conflicts with low down metrics flush period added in 8.7.0. Fix Slow SQL sampling not accurate, due to TopN works conflict with cache read-then-clear mechanism. The persistent cache is only read when necessary. Add component definition for Alibaba Fastjson. Fix entity(service/instance/endpoint) names in the MAL system(prometheus, native meter, open census, envoy metric service) are not controlled by core\u0026rsquo;s naming-control mechanism. Upgrade netty version to 4.1.68.Final avoid cve-2021-37136.  UI  Fix not found error when refresh UI. Update endpointName to endpointId in the query trace condition. Add Python falcon icon on the UI. Fix searching endpoints with keywords. Support clicking the service name in the chart to link to the trace or log page. Implement the Log Analysis Language text regexp debugger. Fix fetching nodes and calls with serviceIds on the topology side. Implement Alerts for query errors. Fixes graph parameter of query for topology metrics.  Documentation  Add a section in Log Collecting And Analysis doc, introducing the new Python agent log reporter. Add one missing step in otel-receiver doc about how to activate the default receiver. Reorganize dynamic configuration doc. Add more description about meter configurations in backend-meter doc. Fix typo in endpoint-grouping-rules doc.  All issues and pull requests are here\n","title":"8.8.0","url":"/docs/main/v9.7.0/en/changes/changes-8.8.0/"},{"content":"8.8.1 OAP Server  Fix wrong (de)serializer of ElasticSearch client for OpenSearch storage. Fix that traces query with tags will report error. Replace e2e simple cases to e2e-v2. Fix endpoint dependency breaking.  UI  Delete duplicate calls for endpoint dependency.  Documentation All issues and pull requests are here\n","title":"8.8.1","url":"/docs/main/latest/en/changes/changes-8.8.1/"},{"content":"8.8.1 OAP Server  Fix wrong (de)serializer of ElasticSearch client for OpenSearch storage. Fix that traces query with tags will report error. Replace e2e simple cases to e2e-v2. Fix endpoint dependency breaking.  UI  Delete duplicate calls for endpoint dependency.  Documentation All issues and pull requests are here\n","title":"8.8.1","url":"/docs/main/next/en/changes/changes-8.8.1/"},{"content":"8.8.1 OAP Server  Fix wrong (de)serializer of ElasticSearch client for OpenSearch storage. Fix that traces query with tags will report error. Replace e2e simple cases to e2e-v2. Fix endpoint dependency breaking.  UI  Delete duplicate calls for endpoint dependency.  Documentation All issues and pull requests are here\n","title":"8.8.1","url":"/docs/main/v9.1.0/en/changes/changes-8.8.1/"},{"content":"8.8.1 OAP Server  Fix wrong (de)serializer of ElasticSearch client for OpenSearch storage. Fix that traces query with tags will report error. Replace e2e simple cases to e2e-v2. Fix endpoint dependency breaking.  UI  Delete duplicate calls for endpoint dependency.  Documentation All issues and pull requests are here\n","title":"8.8.1","url":"/docs/main/v9.2.0/en/changes/changes-8.8.1/"},{"content":"8.8.1 OAP Server  Fix wrong (de)serializer of ElasticSearch client for OpenSearch storage. Fix that traces query with tags will report error. Replace e2e simple cases to e2e-v2. Fix endpoint dependency breaking.  UI  Delete duplicate calls for endpoint dependency.  Documentation All issues and pull requests are here\n","title":"8.8.1","url":"/docs/main/v9.3.0/en/changes/changes-8.8.1/"},{"content":"8.8.1 OAP Server  Fix wrong (de)serializer of ElasticSearch client for OpenSearch storage. Fix that traces query with tags will report error. Replace e2e simple cases to e2e-v2. Fix endpoint dependency breaking.  UI  Delete duplicate calls for endpoint dependency.  Documentation All issues and pull requests are here\n","title":"8.8.1","url":"/docs/main/v9.4.0/en/changes/changes-8.8.1/"},{"content":"8.8.1 OAP Server  Fix wrong (de)serializer of ElasticSearch client for OpenSearch storage. Fix that traces query with tags will report error. Replace e2e simple cases to e2e-v2. Fix endpoint dependency breaking.  UI  Delete duplicate calls for endpoint dependency.  Documentation All issues and pull requests are here\n","title":"8.8.1","url":"/docs/main/v9.5.0/en/changes/changes-8.8.1/"},{"content":"8.8.1 OAP Server  Fix wrong (de)serializer of ElasticSearch client for OpenSearch storage. Fix that traces query with tags will report error. Replace e2e simple cases to e2e-v2. Fix endpoint dependency breaking.  UI  Delete duplicate calls for endpoint dependency.  Documentation All issues and pull requests are here\n","title":"8.8.1","url":"/docs/main/v9.6.0/en/changes/changes-8.8.1/"},{"content":"8.8.1 OAP Server  Fix wrong (de)serializer of ElasticSearch client for OpenSearch storage. Fix that traces query with tags will report error. Replace e2e simple cases to e2e-v2. Fix endpoint dependency breaking.  UI  Delete duplicate calls for endpoint dependency.  Documentation All issues and pull requests are here\n","title":"8.8.1","url":"/docs/main/v9.7.0/en/changes/changes-8.8.1/"},{"content":"8.9.0 Project  E2E tests immigrate to e2e-v2. Support JDK 16 and 17. Add Docker images for arm64 architecture.  OAP Server  Add component definition for Jackson. Fix that zipkin-receiver plugin is not packaged into dist. Upgrade Armeria to 1.12, upgrade OpenSearch test version to 1.1.0. Add component definition for Apache-Kylin. Enhance get generation mechanism of OAL engine, support map type of source\u0026rsquo;s field. Add tag(Map) into All, Service, ServiceInstance and Endpoint sources. Fix funcParamExpression and literalExpression can\u0026rsquo;t be used in the same aggregation function. Support cast statement in the OAL core engine. Support (str-\u0026gt;long) and (long) for string to long cast statement. Support (str-\u0026gt;int) and (int) for string to int cast statement. Support Long literal number in the OAL core engine. Support literal string as parameter of aggregation function. Add attributeExpression and attributeExpressionSegment in the OAL grammar tree to support map type for the attribute expression. Refactor the OAL compiler context to improve readability. Fix wrong generated codes of hashCode and remoteHashCode methods for numeric fields. Support != null in OAL engine. Add Message Queue Consuming Count metric for MQ consuming service and endpoint. Add Message Queue Avg Consuming Latency metric for MQ consuming service and endpoint. Support -Inf as bucket in the meter system. Fix setting wrong field when combining Events. Support search browser service. Add getProfileTaskLogs to profile query protocol. Set SW_KAFKA_FETCHER_ENABLE_NATIVE_PROTO_LOG, SW_KAFKA_FETCHER_ENABLE_NATIVE_JSON_LOG default true. Fix unexpected deleting due to TTL mechanism bug for H2, MySQL, TiDB and PostgreSQL. Add a GraphQL query to get OAP version, display OAP version in startup message and error logs. Fix TimeBucket missing in H2, MySQL, TiDB and PostgreSQL bug, which causes TTL doesn\u0026rsquo;t work for service_traffic. Fix TimeBucket missing in ElasticSearch and provide compatible storage2Entity for previous versions. Fix ElasticSearch implementation of queryMetricsValues and readLabeledMetricsValues doesn\u0026rsquo;t fill default values when no available data in the ElasticSearch server. Fix config yaml data type conversion bug when meets special character like !. Optimize metrics of minute dimensionality persistence. The value of metrics, which has declaration of the default value and current value equals the default value logically, the whole row wouldn\u0026rsquo;t be pushed into database. Fix max function in OAL doesn\u0026rsquo;t support negative long. Add MicroBench module to make it easier for developers to write JMH test. Upgrade Kubernetes Java client to 14.0.0, supports GCP token refreshing and fixes some bugs. Change SO11Y metric envoy_als_in_count to calculate the ALS message count. Support Istio 1.10.3, 1.11.4, 1.12.0 release.(Tested through e2e) Add filter mechanism in MAL core to filter metrics. Fix concurrency bug in MAL increase-related calculation. Fix a null pointer bug when building SampleFamily. Fix the so11y latency of persistence execution latency not correct in ElasticSearch storage. Add MeterReportService collectBatch method. Add OpenSearch 1.2.0 to test and verify it works. Upgrade grpc-java to 1.42.1 and protoc to 3.17.3 to allow using native Mac osx-aarch_64 artifacts. Fix TopologyQuery.loadEndpointRelation bug. Support using IoTDB as a new storage option. Add customized envoy ALS protocol receiver for satellite transmit batch data. Remove logback dependencies in IoTDB plugin. Fix StorageModuleElasticsearchProvider doesn\u0026rsquo;t watch on trustStorePath. Fix a wrong check about entity if GraphQL at the endpoint relation level.  UI  Optimize endpoint dependency. Show service name by hovering nodes in the sankey chart. Add Apache Kylin logo. Add ClickHouse logo. Optimize the style and add tips for log conditions. Fix the condition for trace table. Optimize profile functions. Implement a reminder to clear cache for dashboard templates. Support +/- hh:mm in TimeZone setting. Optimize global settings. Fix current endpoint for endpoint dependency. Add version in the global settings popup. Optimize Log page style. Avoid some abnormal settings. Fix query condition of events.  Documentation  Enhance documents about the data report and query protocols. Restructure documents about receivers and fetchers.  Remove general receiver and fetcher docs Add more specific menu with docs to help users to find documents easier.   Add a guidance doc about the logic endpoint. Link Satellite as Load Balancer documentation and compatibility with satellite.  All issues and pull requests are here\n","title":"8.9.0","url":"/docs/main/latest/en/changes/changes-8.9.0/"},{"content":"8.9.0 Project  E2E tests immigrate to e2e-v2. Support JDK 16 and 17. Add Docker images for arm64 architecture.  OAP Server  Add component definition for Jackson. Fix that zipkin-receiver plugin is not packaged into dist. Upgrade Armeria to 1.12, upgrade OpenSearch test version to 1.1.0. Add component definition for Apache-Kylin. Enhance get generation mechanism of OAL engine, support map type of source\u0026rsquo;s field. Add tag(Map) into All, Service, ServiceInstance and Endpoint sources. Fix funcParamExpression and literalExpression can\u0026rsquo;t be used in the same aggregation function. Support cast statement in the OAL core engine. Support (str-\u0026gt;long) and (long) for string to long cast statement. Support (str-\u0026gt;int) and (int) for string to int cast statement. Support Long literal number in the OAL core engine. Support literal string as parameter of aggregation function. Add attributeExpression and attributeExpressionSegment in the OAL grammar tree to support map type for the attribute expression. Refactor the OAL compiler context to improve readability. Fix wrong generated codes of hashCode and remoteHashCode methods for numeric fields. Support != null in OAL engine. Add Message Queue Consuming Count metric for MQ consuming service and endpoint. Add Message Queue Avg Consuming Latency metric for MQ consuming service and endpoint. Support -Inf as bucket in the meter system. Fix setting wrong field when combining Events. Support search browser service. Add getProfileTaskLogs to profile query protocol. Set SW_KAFKA_FETCHER_ENABLE_NATIVE_PROTO_LOG, SW_KAFKA_FETCHER_ENABLE_NATIVE_JSON_LOG default true. Fix unexpected deleting due to TTL mechanism bug for H2, MySQL, TiDB and PostgreSQL. Add a GraphQL query to get OAP version, display OAP version in startup message and error logs. Fix TimeBucket missing in H2, MySQL, TiDB and PostgreSQL bug, which causes TTL doesn\u0026rsquo;t work for service_traffic. Fix TimeBucket missing in ElasticSearch and provide compatible storage2Entity for previous versions. Fix ElasticSearch implementation of queryMetricsValues and readLabeledMetricsValues doesn\u0026rsquo;t fill default values when no available data in the ElasticSearch server. Fix config yaml data type conversion bug when meets special character like !. Optimize metrics of minute dimensionality persistence. The value of metrics, which has declaration of the default value and current value equals the default value logically, the whole row wouldn\u0026rsquo;t be pushed into database. Fix max function in OAL doesn\u0026rsquo;t support negative long. Add MicroBench module to make it easier for developers to write JMH test. Upgrade Kubernetes Java client to 14.0.0, supports GCP token refreshing and fixes some bugs. Change SO11Y metric envoy_als_in_count to calculate the ALS message count. Support Istio 1.10.3, 1.11.4, 1.12.0 release.(Tested through e2e) Add filter mechanism in MAL core to filter metrics. Fix concurrency bug in MAL increase-related calculation. Fix a null pointer bug when building SampleFamily. Fix the so11y latency of persistence execution latency not correct in ElasticSearch storage. Add MeterReportService collectBatch method. Add OpenSearch 1.2.0 to test and verify it works. Upgrade grpc-java to 1.42.1 and protoc to 3.17.3 to allow using native Mac osx-aarch_64 artifacts. Fix TopologyQuery.loadEndpointRelation bug. Support using IoTDB as a new storage option. Add customized envoy ALS protocol receiver for satellite transmit batch data. Remove logback dependencies in IoTDB plugin. Fix StorageModuleElasticsearchProvider doesn\u0026rsquo;t watch on trustStorePath. Fix a wrong check about entity if GraphQL at the endpoint relation level.  UI  Optimize endpoint dependency. Show service name by hovering nodes in the sankey chart. Add Apache Kylin logo. Add ClickHouse logo. Optimize the style and add tips for log conditions. Fix the condition for trace table. Optimize profile functions. Implement a reminder to clear cache for dashboard templates. Support +/- hh:mm in TimeZone setting. Optimize global settings. Fix current endpoint for endpoint dependency. Add version in the global settings popup. Optimize Log page style. Avoid some abnormal settings. Fix query condition of events.  Documentation  Enhance documents about the data report and query protocols. Restructure documents about receivers and fetchers.  Remove general receiver and fetcher docs Add more specific menu with docs to help users to find documents easier.   Add a guidance doc about the logic endpoint. Link Satellite as Load Balancer documentation and compatibility with satellite.  All issues and pull requests are here\n","title":"8.9.0","url":"/docs/main/next/en/changes/changes-8.9.0/"},{"content":"8.9.0 Project  E2E tests immigrate to e2e-v2. Support JDK 16 and 17. Add Docker images for arm64 architecture.  OAP Server  Add component definition for Jackson. Fix that zipkin-receiver plugin is not packaged into dist. Upgrade Armeria to 1.12, upgrade OpenSearch test version to 1.1.0. Add component definition for Apache-Kylin. Enhance get generation mechanism of OAL engine, support map type of source\u0026rsquo;s field. Add tag(Map) into All, Service, ServiceInstance and Endpoint sources. Fix funcParamExpression and literalExpression can\u0026rsquo;t be used in the same aggregation function. Support cast statement in the OAL core engine. Support (str-\u0026gt;long) and (long) for string to long cast statement. Support (str-\u0026gt;int) and (int) for string to int cast statement. Support Long literal number in the OAL core engine. Support literal string as parameter of aggregation function. Add attributeExpression and attributeExpressionSegment in the OAL grammar tree to support map type for the attribute expression. Refactor the OAL compiler context to improve readability. Fix wrong generated codes of hashCode and remoteHashCode methods for numeric fields. Support != null in OAL engine. Add Message Queue Consuming Count metric for MQ consuming service and endpoint. Add Message Queue Avg Consuming Latency metric for MQ consuming service and endpoint. Support -Inf as bucket in the meter system. Fix setting wrong field when combining Events. Support search browser service. Add getProfileTaskLogs to profile query protocol. Set SW_KAFKA_FETCHER_ENABLE_NATIVE_PROTO_LOG, SW_KAFKA_FETCHER_ENABLE_NATIVE_JSON_LOG default true. Fix unexpected deleting due to TTL mechanism bug for H2, MySQL, TiDB and PostgreSQL. Add a GraphQL query to get OAP version, display OAP version in startup message and error logs. Fix TimeBucket missing in H2, MySQL, TiDB and PostgreSQL bug, which causes TTL doesn\u0026rsquo;t work for service_traffic. Fix TimeBucket missing in ElasticSearch and provide compatible storage2Entity for previous versions. Fix ElasticSearch implementation of queryMetricsValues and readLabeledMetricsValues doesn\u0026rsquo;t fill default values when no available data in the ElasticSearch server. Fix config yaml data type conversion bug when meets special character like !. Optimize metrics of minute dimensionality persistence. The value of metrics, which has declaration of the default value and current value equals the default value logically, the whole row wouldn\u0026rsquo;t be pushed into database. Fix max function in OAL doesn\u0026rsquo;t support negative long. Add MicroBench module to make it easier for developers to write JMH test. Upgrade Kubernetes Java client to 14.0.0, supports GCP token refreshing and fixes some bugs. Change SO11Y metric envoy_als_in_count to calculate the ALS message count. Support Istio 1.10.3, 1.11.4, 1.12.0 release.(Tested through e2e) Add filter mechanism in MAL core to filter metrics. Fix concurrency bug in MAL increase-related calculation. Fix a null pointer bug when building SampleFamily. Fix the so11y latency of persistence execution latency not correct in ElasticSearch storage. Add MeterReportService collectBatch method. Add OpenSearch 1.2.0 to test and verify it works. Upgrade grpc-java to 1.42.1 and protoc to 3.17.3 to allow using native Mac osx-aarch_64 artifacts. Fix TopologyQuery.loadEndpointRelation bug. Support using IoTDB as a new storage option. Add customized envoy ALS protocol receiver for satellite transmit batch data. Remove logback dependencies in IoTDB plugin. Fix StorageModuleElasticsearchProvider doesn\u0026rsquo;t watch on trustStorePath. Fix a wrong check about entity if GraphQL at the endpoint relation level.  UI  Optimize endpoint dependency. Show service name by hovering nodes in the sankey chart. Add Apache Kylin logo. Add ClickHouse logo. Optimize the style and add tips for log conditions. Fix the condition for trace table. Optimize profile functions. Implement a reminder to clear cache for dashboard templates. Support +/- hh:mm in TimeZone setting. Optimize global settings. Fix current endpoint for endpoint dependency. Add version in the global settings popup. Optimize Log page style. Avoid some abnormal settings. Fix query condition of events.  Documentation  Enhance documents about the data report and query protocols. Restructure documents about receivers and fetchers.  Remove general receiver and fetcher docs Add more specific menu with docs to help users to find documents easier.   Add a guidance doc about the logic endpoint. Link Satellite as Load Balancer documentation and compatibility with satellite.  All issues and pull requests are here\n","title":"8.9.0","url":"/docs/main/v9.1.0/en/changes/changes-8.9.0/"},{"content":"8.9.0 Project  E2E tests immigrate to e2e-v2. Support JDK 16 and 17. Add Docker images for arm64 architecture.  OAP Server  Add component definition for Jackson. Fix that zipkin-receiver plugin is not packaged into dist. Upgrade Armeria to 1.12, upgrade OpenSearch test version to 1.1.0. Add component definition for Apache-Kylin. Enhance get generation mechanism of OAL engine, support map type of source\u0026rsquo;s field. Add tag(Map) into All, Service, ServiceInstance and Endpoint sources. Fix funcParamExpression and literalExpression can\u0026rsquo;t be used in the same aggregation function. Support cast statement in the OAL core engine. Support (str-\u0026gt;long) and (long) for string to long cast statement. Support (str-\u0026gt;int) and (int) for string to int cast statement. Support Long literal number in the OAL core engine. Support literal string as parameter of aggregation function. Add attributeExpression and attributeExpressionSegment in the OAL grammar tree to support map type for the attribute expression. Refactor the OAL compiler context to improve readability. Fix wrong generated codes of hashCode and remoteHashCode methods for numeric fields. Support != null in OAL engine. Add Message Queue Consuming Count metric for MQ consuming service and endpoint. Add Message Queue Avg Consuming Latency metric for MQ consuming service and endpoint. Support -Inf as bucket in the meter system. Fix setting wrong field when combining Events. Support search browser service. Add getProfileTaskLogs to profile query protocol. Set SW_KAFKA_FETCHER_ENABLE_NATIVE_PROTO_LOG, SW_KAFKA_FETCHER_ENABLE_NATIVE_JSON_LOG default true. Fix unexpected deleting due to TTL mechanism bug for H2, MySQL, TiDB and PostgreSQL. Add a GraphQL query to get OAP version, display OAP version in startup message and error logs. Fix TimeBucket missing in H2, MySQL, TiDB and PostgreSQL bug, which causes TTL doesn\u0026rsquo;t work for service_traffic. Fix TimeBucket missing in ElasticSearch and provide compatible storage2Entity for previous versions. Fix ElasticSearch implementation of queryMetricsValues and readLabeledMetricsValues doesn\u0026rsquo;t fill default values when no available data in the ElasticSearch server. Fix config yaml data type conversion bug when meets special character like !. Optimize metrics of minute dimensionality persistence. The value of metrics, which has declaration of the default value and current value equals the default value logically, the whole row wouldn\u0026rsquo;t be pushed into database. Fix max function in OAL doesn\u0026rsquo;t support negative long. Add MicroBench module to make it easier for developers to write JMH test. Upgrade Kubernetes Java client to 14.0.0, supports GCP token refreshing and fixes some bugs. Change SO11Y metric envoy_als_in_count to calculate the ALS message count. Support Istio 1.10.3, 1.11.4, 1.12.0 release.(Tested through e2e) Add filter mechanism in MAL core to filter metrics. Fix concurrency bug in MAL increase-related calculation. Fix a null pointer bug when building SampleFamily. Fix the so11y latency of persistence execution latency not correct in ElasticSearch storage. Add MeterReportService collectBatch method. Add OpenSearch 1.2.0 to test and verify it works. Upgrade grpc-java to 1.42.1 and protoc to 3.17.3 to allow using native Mac osx-aarch_64 artifacts. Fix TopologyQuery.loadEndpointRelation bug. Support using IoTDB as a new storage option. Add customized envoy ALS protocol receiver for satellite transmit batch data. Remove logback dependencies in IoTDB plugin. Fix StorageModuleElasticsearchProvider doesn\u0026rsquo;t watch on trustStorePath. Fix a wrong check about entity if GraphQL at the endpoint relation level.  UI  Optimize endpoint dependency. Show service name by hovering nodes in the sankey chart. Add Apache Kylin logo. Add ClickHouse logo. Optimize the style and add tips for log conditions. Fix the condition for trace table. Optimize profile functions. Implement a reminder to clear cache for dashboard templates. Support +/- hh:mm in TimeZone setting. Optimize global settings. Fix current endpoint for endpoint dependency. Add version in the global settings popup. Optimize Log page style. Avoid some abnormal settings. Fix query condition of events.  Documentation  Enhance documents about the data report and query protocols. Restructure documents about receivers and fetchers.  Remove general receiver and fetcher docs Add more specific menu with docs to help users to find documents easier.   Add a guidance doc about the logic endpoint. Link Satellite as Load Balancer documentation and compatibility with satellite.  All issues and pull requests are here\n","title":"8.9.0","url":"/docs/main/v9.2.0/en/changes/changes-8.9.0/"},{"content":"8.9.0 Project  E2E tests immigrate to e2e-v2. Support JDK 16 and 17. Add Docker images for arm64 architecture.  OAP Server  Add component definition for Jackson. Fix that zipkin-receiver plugin is not packaged into dist. Upgrade Armeria to 1.12, upgrade OpenSearch test version to 1.1.0. Add component definition for Apache-Kylin. Enhance get generation mechanism of OAL engine, support map type of source\u0026rsquo;s field. Add tag(Map) into All, Service, ServiceInstance and Endpoint sources. Fix funcParamExpression and literalExpression can\u0026rsquo;t be used in the same aggregation function. Support cast statement in the OAL core engine. Support (str-\u0026gt;long) and (long) for string to long cast statement. Support (str-\u0026gt;int) and (int) for string to int cast statement. Support Long literal number in the OAL core engine. Support literal string as parameter of aggregation function. Add attributeExpression and attributeExpressionSegment in the OAL grammar tree to support map type for the attribute expression. Refactor the OAL compiler context to improve readability. Fix wrong generated codes of hashCode and remoteHashCode methods for numeric fields. Support != null in OAL engine. Add Message Queue Consuming Count metric for MQ consuming service and endpoint. Add Message Queue Avg Consuming Latency metric for MQ consuming service and endpoint. Support -Inf as bucket in the meter system. Fix setting wrong field when combining Events. Support search browser service. Add getProfileTaskLogs to profile query protocol. Set SW_KAFKA_FETCHER_ENABLE_NATIVE_PROTO_LOG, SW_KAFKA_FETCHER_ENABLE_NATIVE_JSON_LOG default true. Fix unexpected deleting due to TTL mechanism bug for H2, MySQL, TiDB and PostgreSQL. Add a GraphQL query to get OAP version, display OAP version in startup message and error logs. Fix TimeBucket missing in H2, MySQL, TiDB and PostgreSQL bug, which causes TTL doesn\u0026rsquo;t work for service_traffic. Fix TimeBucket missing in ElasticSearch and provide compatible storage2Entity for previous versions. Fix ElasticSearch implementation of queryMetricsValues and readLabeledMetricsValues doesn\u0026rsquo;t fill default values when no available data in the ElasticSearch server. Fix config yaml data type conversion bug when meets special character like !. Optimize metrics of minute dimensionality persistence. The value of metrics, which has declaration of the default value and current value equals the default value logically, the whole row wouldn\u0026rsquo;t be pushed into database. Fix max function in OAL doesn\u0026rsquo;t support negative long. Add MicroBench module to make it easier for developers to write JMH test. Upgrade Kubernetes Java client to 14.0.0, supports GCP token refreshing and fixes some bugs. Change SO11Y metric envoy_als_in_count to calculate the ALS message count. Support Istio 1.10.3, 1.11.4, 1.12.0 release.(Tested through e2e) Add filter mechanism in MAL core to filter metrics. Fix concurrency bug in MAL increase-related calculation. Fix a null pointer bug when building SampleFamily. Fix the so11y latency of persistence execution latency not correct in ElasticSearch storage. Add MeterReportService collectBatch method. Add OpenSearch 1.2.0 to test and verify it works. Upgrade grpc-java to 1.42.1 and protoc to 3.17.3 to allow using native Mac osx-aarch_64 artifacts. Fix TopologyQuery.loadEndpointRelation bug. Support using IoTDB as a new storage option. Add customized envoy ALS protocol receiver for satellite transmit batch data. Remove logback dependencies in IoTDB plugin. Fix StorageModuleElasticsearchProvider doesn\u0026rsquo;t watch on trustStorePath. Fix a wrong check about entity if GraphQL at the endpoint relation level.  UI  Optimize endpoint dependency. Show service name by hovering nodes in the sankey chart. Add Apache Kylin logo. Add ClickHouse logo. Optimize the style and add tips for log conditions. Fix the condition for trace table. Optimize profile functions. Implement a reminder to clear cache for dashboard templates. Support +/- hh:mm in TimeZone setting. Optimize global settings. Fix current endpoint for endpoint dependency. Add version in the global settings popup. Optimize Log page style. Avoid some abnormal settings. Fix query condition of events.  Documentation  Enhance documents about the data report and query protocols. Restructure documents about receivers and fetchers.  Remove general receiver and fetcher docs Add more specific menu with docs to help users to find documents easier.   Add a guidance doc about the logic endpoint. Link Satellite as Load Balancer documentation and compatibility with satellite.  All issues and pull requests are here\n","title":"8.9.0","url":"/docs/main/v9.3.0/en/changes/changes-8.9.0/"},{"content":"8.9.0 Project  E2E tests immigrate to e2e-v2. Support JDK 16 and 17. Add Docker images for arm64 architecture.  OAP Server  Add component definition for Jackson. Fix that zipkin-receiver plugin is not packaged into dist. Upgrade Armeria to 1.12, upgrade OpenSearch test version to 1.1.0. Add component definition for Apache-Kylin. Enhance get generation mechanism of OAL engine, support map type of source\u0026rsquo;s field. Add tag(Map) into All, Service, ServiceInstance and Endpoint sources. Fix funcParamExpression and literalExpression can\u0026rsquo;t be used in the same aggregation function. Support cast statement in the OAL core engine. Support (str-\u0026gt;long) and (long) for string to long cast statement. Support (str-\u0026gt;int) and (int) for string to int cast statement. Support Long literal number in the OAL core engine. Support literal string as parameter of aggregation function. Add attributeExpression and attributeExpressionSegment in the OAL grammar tree to support map type for the attribute expression. Refactor the OAL compiler context to improve readability. Fix wrong generated codes of hashCode and remoteHashCode methods for numeric fields. Support != null in OAL engine. Add Message Queue Consuming Count metric for MQ consuming service and endpoint. Add Message Queue Avg Consuming Latency metric for MQ consuming service and endpoint. Support -Inf as bucket in the meter system. Fix setting wrong field when combining Events. Support search browser service. Add getProfileTaskLogs to profile query protocol. Set SW_KAFKA_FETCHER_ENABLE_NATIVE_PROTO_LOG, SW_KAFKA_FETCHER_ENABLE_NATIVE_JSON_LOG default true. Fix unexpected deleting due to TTL mechanism bug for H2, MySQL, TiDB and PostgreSQL. Add a GraphQL query to get OAP version, display OAP version in startup message and error logs. Fix TimeBucket missing in H2, MySQL, TiDB and PostgreSQL bug, which causes TTL doesn\u0026rsquo;t work for service_traffic. Fix TimeBucket missing in ElasticSearch and provide compatible storage2Entity for previous versions. Fix ElasticSearch implementation of queryMetricsValues and readLabeledMetricsValues doesn\u0026rsquo;t fill default values when no available data in the ElasticSearch server. Fix config yaml data type conversion bug when meets special character like !. Optimize metrics of minute dimensionality persistence. The value of metrics, which has declaration of the default value and current value equals the default value logically, the whole row wouldn\u0026rsquo;t be pushed into database. Fix max function in OAL doesn\u0026rsquo;t support negative long. Add MicroBench module to make it easier for developers to write JMH test. Upgrade Kubernetes Java client to 14.0.0, supports GCP token refreshing and fixes some bugs. Change SO11Y metric envoy_als_in_count to calculate the ALS message count. Support Istio 1.10.3, 1.11.4, 1.12.0 release.(Tested through e2e) Add filter mechanism in MAL core to filter metrics. Fix concurrency bug in MAL increase-related calculation. Fix a null pointer bug when building SampleFamily. Fix the so11y latency of persistence execution latency not correct in ElasticSearch storage. Add MeterReportService collectBatch method. Add OpenSearch 1.2.0 to test and verify it works. Upgrade grpc-java to 1.42.1 and protoc to 3.17.3 to allow using native Mac osx-aarch_64 artifacts. Fix TopologyQuery.loadEndpointRelation bug. Support using IoTDB as a new storage option. Add customized envoy ALS protocol receiver for satellite transmit batch data. Remove logback dependencies in IoTDB plugin. Fix StorageModuleElasticsearchProvider doesn\u0026rsquo;t watch on trustStorePath. Fix a wrong check about entity if GraphQL at the endpoint relation level.  UI  Optimize endpoint dependency. Show service name by hovering nodes in the sankey chart. Add Apache Kylin logo. Add ClickHouse logo. Optimize the style and add tips for log conditions. Fix the condition for trace table. Optimize profile functions. Implement a reminder to clear cache for dashboard templates. Support +/- hh:mm in TimeZone setting. Optimize global settings. Fix current endpoint for endpoint dependency. Add version in the global settings popup. Optimize Log page style. Avoid some abnormal settings. Fix query condition of events.  Documentation  Enhance documents about the data report and query protocols. Restructure documents about receivers and fetchers.  Remove general receiver and fetcher docs Add more specific menu with docs to help users to find documents easier.   Add a guidance doc about the logic endpoint. Link Satellite as Load Balancer documentation and compatibility with satellite.  All issues and pull requests are here\n","title":"8.9.0","url":"/docs/main/v9.4.0/en/changes/changes-8.9.0/"},{"content":"8.9.0 Project  E2E tests immigrate to e2e-v2. Support JDK 16 and 17. Add Docker images for arm64 architecture.  OAP Server  Add component definition for Jackson. Fix that zipkin-receiver plugin is not packaged into dist. Upgrade Armeria to 1.12, upgrade OpenSearch test version to 1.1.0. Add component definition for Apache-Kylin. Enhance get generation mechanism of OAL engine, support map type of source\u0026rsquo;s field. Add tag(Map) into All, Service, ServiceInstance and Endpoint sources. Fix funcParamExpression and literalExpression can\u0026rsquo;t be used in the same aggregation function. Support cast statement in the OAL core engine. Support (str-\u0026gt;long) and (long) for string to long cast statement. Support (str-\u0026gt;int) and (int) for string to int cast statement. Support Long literal number in the OAL core engine. Support literal string as parameter of aggregation function. Add attributeExpression and attributeExpressionSegment in the OAL grammar tree to support map type for the attribute expression. Refactor the OAL compiler context to improve readability. Fix wrong generated codes of hashCode and remoteHashCode methods for numeric fields. Support != null in OAL engine. Add Message Queue Consuming Count metric for MQ consuming service and endpoint. Add Message Queue Avg Consuming Latency metric for MQ consuming service and endpoint. Support -Inf as bucket in the meter system. Fix setting wrong field when combining Events. Support search browser service. Add getProfileTaskLogs to profile query protocol. Set SW_KAFKA_FETCHER_ENABLE_NATIVE_PROTO_LOG, SW_KAFKA_FETCHER_ENABLE_NATIVE_JSON_LOG default true. Fix unexpected deleting due to TTL mechanism bug for H2, MySQL, TiDB and PostgreSQL. Add a GraphQL query to get OAP version, display OAP version in startup message and error logs. Fix TimeBucket missing in H2, MySQL, TiDB and PostgreSQL bug, which causes TTL doesn\u0026rsquo;t work for service_traffic. Fix TimeBucket missing in ElasticSearch and provide compatible storage2Entity for previous versions. Fix ElasticSearch implementation of queryMetricsValues and readLabeledMetricsValues doesn\u0026rsquo;t fill default values when no available data in the ElasticSearch server. Fix config yaml data type conversion bug when meets special character like !. Optimize metrics of minute dimensionality persistence. The value of metrics, which has declaration of the default value and current value equals the default value logically, the whole row wouldn\u0026rsquo;t be pushed into database. Fix max function in OAL doesn\u0026rsquo;t support negative long. Add MicroBench module to make it easier for developers to write JMH test. Upgrade Kubernetes Java client to 14.0.0, supports GCP token refreshing and fixes some bugs. Change SO11Y metric envoy_als_in_count to calculate the ALS message count. Support Istio 1.10.3, 1.11.4, 1.12.0 release.(Tested through e2e) Add filter mechanism in MAL core to filter metrics. Fix concurrency bug in MAL increase-related calculation. Fix a null pointer bug when building SampleFamily. Fix the so11y latency of persistence execution latency not correct in ElasticSearch storage. Add MeterReportService collectBatch method. Add OpenSearch 1.2.0 to test and verify it works. Upgrade grpc-java to 1.42.1 and protoc to 3.17.3 to allow using native Mac osx-aarch_64 artifacts. Fix TopologyQuery.loadEndpointRelation bug. Support using IoTDB as a new storage option. Add customized envoy ALS protocol receiver for satellite transmit batch data. Remove logback dependencies in IoTDB plugin. Fix StorageModuleElasticsearchProvider doesn\u0026rsquo;t watch on trustStorePath. Fix a wrong check about entity if GraphQL at the endpoint relation level.  UI  Optimize endpoint dependency. Show service name by hovering nodes in the sankey chart. Add Apache Kylin logo. Add ClickHouse logo. Optimize the style and add tips for log conditions. Fix the condition for trace table. Optimize profile functions. Implement a reminder to clear cache for dashboard templates. Support +/- hh:mm in TimeZone setting. Optimize global settings. Fix current endpoint for endpoint dependency. Add version in the global settings popup. Optimize Log page style. Avoid some abnormal settings. Fix query condition of events.  Documentation  Enhance documents about the data report and query protocols. Restructure documents about receivers and fetchers.  Remove general receiver and fetcher docs Add more specific menu with docs to help users to find documents easier.   Add a guidance doc about the logic endpoint. Link Satellite as Load Balancer documentation and compatibility with satellite.  All issues and pull requests are here\n","title":"8.9.0","url":"/docs/main/v9.5.0/en/changes/changes-8.9.0/"},{"content":"8.9.0 Project  E2E tests immigrate to e2e-v2. Support JDK 16 and 17. Add Docker images for arm64 architecture.  OAP Server  Add component definition for Jackson. Fix that zipkin-receiver plugin is not packaged into dist. Upgrade Armeria to 1.12, upgrade OpenSearch test version to 1.1.0. Add component definition for Apache-Kylin. Enhance get generation mechanism of OAL engine, support map type of source\u0026rsquo;s field. Add tag(Map) into All, Service, ServiceInstance and Endpoint sources. Fix funcParamExpression and literalExpression can\u0026rsquo;t be used in the same aggregation function. Support cast statement in the OAL core engine. Support (str-\u0026gt;long) and (long) for string to long cast statement. Support (str-\u0026gt;int) and (int) for string to int cast statement. Support Long literal number in the OAL core engine. Support literal string as parameter of aggregation function. Add attributeExpression and attributeExpressionSegment in the OAL grammar tree to support map type for the attribute expression. Refactor the OAL compiler context to improve readability. Fix wrong generated codes of hashCode and remoteHashCode methods for numeric fields. Support != null in OAL engine. Add Message Queue Consuming Count metric for MQ consuming service and endpoint. Add Message Queue Avg Consuming Latency metric for MQ consuming service and endpoint. Support -Inf as bucket in the meter system. Fix setting wrong field when combining Events. Support search browser service. Add getProfileTaskLogs to profile query protocol. Set SW_KAFKA_FETCHER_ENABLE_NATIVE_PROTO_LOG, SW_KAFKA_FETCHER_ENABLE_NATIVE_JSON_LOG default true. Fix unexpected deleting due to TTL mechanism bug for H2, MySQL, TiDB and PostgreSQL. Add a GraphQL query to get OAP version, display OAP version in startup message and error logs. Fix TimeBucket missing in H2, MySQL, TiDB and PostgreSQL bug, which causes TTL doesn\u0026rsquo;t work for service_traffic. Fix TimeBucket missing in ElasticSearch and provide compatible storage2Entity for previous versions. Fix ElasticSearch implementation of queryMetricsValues and readLabeledMetricsValues doesn\u0026rsquo;t fill default values when no available data in the ElasticSearch server. Fix config yaml data type conversion bug when meets special character like !. Optimize metrics of minute dimensionality persistence. The value of metrics, which has declaration of the default value and current value equals the default value logically, the whole row wouldn\u0026rsquo;t be pushed into database. Fix max function in OAL doesn\u0026rsquo;t support negative long. Add MicroBench module to make it easier for developers to write JMH test. Upgrade Kubernetes Java client to 14.0.0, supports GCP token refreshing and fixes some bugs. Change SO11Y metric envoy_als_in_count to calculate the ALS message count. Support Istio 1.10.3, 1.11.4, 1.12.0 release.(Tested through e2e) Add filter mechanism in MAL core to filter metrics. Fix concurrency bug in MAL increase-related calculation. Fix a null pointer bug when building SampleFamily. Fix the so11y latency of persistence execution latency not correct in ElasticSearch storage. Add MeterReportService collectBatch method. Add OpenSearch 1.2.0 to test and verify it works. Upgrade grpc-java to 1.42.1 and protoc to 3.17.3 to allow using native Mac osx-aarch_64 artifacts. Fix TopologyQuery.loadEndpointRelation bug. Support using IoTDB as a new storage option. Add customized envoy ALS protocol receiver for satellite transmit batch data. Remove logback dependencies in IoTDB plugin. Fix StorageModuleElasticsearchProvider doesn\u0026rsquo;t watch on trustStorePath. Fix a wrong check about entity if GraphQL at the endpoint relation level.  UI  Optimize endpoint dependency. Show service name by hovering nodes in the sankey chart. Add Apache Kylin logo. Add ClickHouse logo. Optimize the style and add tips for log conditions. Fix the condition for trace table. Optimize profile functions. Implement a reminder to clear cache for dashboard templates. Support +/- hh:mm in TimeZone setting. Optimize global settings. Fix current endpoint for endpoint dependency. Add version in the global settings popup. Optimize Log page style. Avoid some abnormal settings. Fix query condition of events.  Documentation  Enhance documents about the data report and query protocols. Restructure documents about receivers and fetchers.  Remove general receiver and fetcher docs Add more specific menu with docs to help users to find documents easier.   Add a guidance doc about the logic endpoint. Link Satellite as Load Balancer documentation and compatibility with satellite.  All issues and pull requests are here\n","title":"8.9.0","url":"/docs/main/v9.6.0/en/changes/changes-8.9.0/"},{"content":"8.9.0 Project  E2E tests immigrate to e2e-v2. Support JDK 16 and 17. Add Docker images for arm64 architecture.  OAP Server  Add component definition for Jackson. Fix that zipkin-receiver plugin is not packaged into dist. Upgrade Armeria to 1.12, upgrade OpenSearch test version to 1.1.0. Add component definition for Apache-Kylin. Enhance get generation mechanism of OAL engine, support map type of source\u0026rsquo;s field. Add tag(Map) into All, Service, ServiceInstance and Endpoint sources. Fix funcParamExpression and literalExpression can\u0026rsquo;t be used in the same aggregation function. Support cast statement in the OAL core engine. Support (str-\u0026gt;long) and (long) for string to long cast statement. Support (str-\u0026gt;int) and (int) for string to int cast statement. Support Long literal number in the OAL core engine. Support literal string as parameter of aggregation function. Add attributeExpression and attributeExpressionSegment in the OAL grammar tree to support map type for the attribute expression. Refactor the OAL compiler context to improve readability. Fix wrong generated codes of hashCode and remoteHashCode methods for numeric fields. Support != null in OAL engine. Add Message Queue Consuming Count metric for MQ consuming service and endpoint. Add Message Queue Avg Consuming Latency metric for MQ consuming service and endpoint. Support -Inf as bucket in the meter system. Fix setting wrong field when combining Events. Support search browser service. Add getProfileTaskLogs to profile query protocol. Set SW_KAFKA_FETCHER_ENABLE_NATIVE_PROTO_LOG, SW_KAFKA_FETCHER_ENABLE_NATIVE_JSON_LOG default true. Fix unexpected deleting due to TTL mechanism bug for H2, MySQL, TiDB and PostgreSQL. Add a GraphQL query to get OAP version, display OAP version in startup message and error logs. Fix TimeBucket missing in H2, MySQL, TiDB and PostgreSQL bug, which causes TTL doesn\u0026rsquo;t work for service_traffic. Fix TimeBucket missing in ElasticSearch and provide compatible storage2Entity for previous versions. Fix ElasticSearch implementation of queryMetricsValues and readLabeledMetricsValues doesn\u0026rsquo;t fill default values when no available data in the ElasticSearch server. Fix config yaml data type conversion bug when meets special character like !. Optimize metrics of minute dimensionality persistence. The value of metrics, which has declaration of the default value and current value equals the default value logically, the whole row wouldn\u0026rsquo;t be pushed into database. Fix max function in OAL doesn\u0026rsquo;t support negative long. Add MicroBench module to make it easier for developers to write JMH test. Upgrade Kubernetes Java client to 14.0.0, supports GCP token refreshing and fixes some bugs. Change SO11Y metric envoy_als_in_count to calculate the ALS message count. Support Istio 1.10.3, 1.11.4, 1.12.0 release.(Tested through e2e) Add filter mechanism in MAL core to filter metrics. Fix concurrency bug in MAL increase-related calculation. Fix a null pointer bug when building SampleFamily. Fix the so11y latency of persistence execution latency not correct in ElasticSearch storage. Add MeterReportService collectBatch method. Add OpenSearch 1.2.0 to test and verify it works. Upgrade grpc-java to 1.42.1 and protoc to 3.17.3 to allow using native Mac osx-aarch_64 artifacts. Fix TopologyQuery.loadEndpointRelation bug. Support using IoTDB as a new storage option. Add customized envoy ALS protocol receiver for satellite transmit batch data. Remove logback dependencies in IoTDB plugin. Fix StorageModuleElasticsearchProvider doesn\u0026rsquo;t watch on trustStorePath. Fix a wrong check about entity if GraphQL at the endpoint relation level.  UI  Optimize endpoint dependency. Show service name by hovering nodes in the sankey chart. Add Apache Kylin logo. Add ClickHouse logo. Optimize the style and add tips for log conditions. Fix the condition for trace table. Optimize profile functions. Implement a reminder to clear cache for dashboard templates. Support +/- hh:mm in TimeZone setting. Optimize global settings. Fix current endpoint for endpoint dependency. Add version in the global settings popup. Optimize Log page style. Avoid some abnormal settings. Fix query condition of events.  Documentation  Enhance documents about the data report and query protocols. Restructure documents about receivers and fetchers.  Remove general receiver and fetcher docs Add more specific menu with docs to help users to find documents easier.   Add a guidance doc about the logic endpoint. Link Satellite as Load Balancer documentation and compatibility with satellite.  All issues and pull requests are here\n","title":"8.9.0","url":"/docs/main/v9.7.0/en/changes/changes-8.9.0/"},{"content":"8.9.1 Project  Upgrade log4j2 to 2.15.0 for CVE-2021-44228  ","title":"8.9.1","url":"/docs/main/latest/en/changes/changes-8.9.1/"},{"content":"8.9.1 Project  Upgrade log4j2 to 2.15.0 for CVE-2021-44228  ","title":"8.9.1","url":"/docs/main/next/en/changes/changes-8.9.1/"},{"content":"8.9.1 Project  Upgrade log4j2 to 2.15.0 for CVE-2021-44228  ","title":"8.9.1","url":"/docs/main/v9.1.0/en/changes/changes-8.9.1/"},{"content":"8.9.1 Project  Upgrade log4j2 to 2.15.0 for CVE-2021-44228  ","title":"8.9.1","url":"/docs/main/v9.2.0/en/changes/changes-8.9.1/"},{"content":"8.9.1 Project  Upgrade log4j2 to 2.15.0 for CVE-2021-44228  ","title":"8.9.1","url":"/docs/main/v9.3.0/en/changes/changes-8.9.1/"},{"content":"8.9.1 Project  Upgrade log4j2 to 2.15.0 for CVE-2021-44228  ","title":"8.9.1","url":"/docs/main/v9.4.0/en/changes/changes-8.9.1/"},{"content":"8.9.1 Project  Upgrade log4j2 to 2.15.0 for CVE-2021-44228  ","title":"8.9.1","url":"/docs/main/v9.5.0/en/changes/changes-8.9.1/"},{"content":"8.9.1 Project  Upgrade log4j2 to 2.15.0 for CVE-2021-44228  ","title":"8.9.1","url":"/docs/main/v9.6.0/en/changes/changes-8.9.1/"},{"content":"8.9.1 Project  Upgrade log4j2 to 2.15.0 for CVE-2021-44228  ","title":"8.9.1","url":"/docs/main/v9.7.0/en/changes/changes-8.9.1/"},{"content":"9.0.0 Project  Upgrade log4j2 to 2.17.1 for CVE-2021-44228, CVE-2021-45046, CVE-2021-45105 and CVE-2021-44832. This CVE only effects on JDK if JNDI is opened in default. Notice, using JVM option -Dlog4j2.formatMsgNoLookups=true or setting the LOG4J_FORMAT_MSG_NO_LOOKUPS=”true” environment variable also avoids CVEs. Upgrade maven-wrapper to 3.1.0, maven to 3.8.4 for performance improvements and ARM more native support. Exclude unnecessary libs when building under JDK 9+. Migrate base Docker image to eclipse-temurin as adoptopenjdk is deprecated. Add E2E test under Java 17. Upgrade protoc to 3.19.2. Add Istio 1.13.1 to E2E test matrix for verification. Upgrade Apache parent pom version to 25. Use the plugin version defined by the Apache maven parent.  Upgrade maven-dependency-plugin to 3.2.0. Upgrade maven-assembly-plugin to 3.3.0. Upgrade maven-failsafe-plugin to 2.22.2. Upgrade maven-surefire-plugin to 2.22.2. Upgrade maven-jar-plugin to 3.2.2. Upgrade maven-enforcer-plugin to 3.0.0. Upgrade maven-compiler-plugin to 3.10.0. Upgrade maven-resources-plugin to 3.2.0. Upgrade maven-source-plugin to 3.2.1.   Update codeStyle.xml to fix incompatibility on M1\u0026rsquo;s IntelliJ IDEA 2021.3.2. Update frontend-maven-plugin to 1.12 and npm to 16.14.0 for booster UI build. Improve CI with the GHA new feature \u0026ldquo;run failed jobs\u0026rdquo;. Fix ./mvnw compile not work if ./mvnw install is not executed at least once. Add JD_PRESERVE_LINE_FEEDS=true in official code style file. Upgrade OAP dependencies gson(2.9.0), guava(31.1), jackson(2.13.2), protobuf-java(3.18.4), commons-io(2.7), postgresql(42.3.3). Remove commons-pool and commons-dbcp from OAP dependencies(Not used before). Upgrade webapp dependencies gson(2.9.0), spring boot(2.6.6), jackson(2.13.2.2), spring cloud(2021.0.1), Apache httpclient(4.5.13).  OAP Server  Fix potential NPE in OAL string match and a bug when right-hand-side variable includes double quotes. Bump up Armeria version to 1.14.1 to fix CVE. Polish ETCD cluster config environment variables. Add the analysis of metrics in Satellite MetricsService. Fix Can't split endpoint id into 2 parts bug for endpoint ID. In the TCP in service mesh observability, endpoint name doesn\u0026rsquo;t exist in TCP traffic. Upgrade H2 version to 2.0.206 to fix CVE-2021-23463 and GHSA-h376-j262-vhq6. Extend column name override mechanism working for ValueColumnMetadata. Introduce new concept Layer and removed NodeType. More details refer to v9-version-upgrade. Fix query sort metrics failure in H2 Storage. Bump up grpc to 1.43.2 and protobuf to 3.19.2 to fix CVE-2021-22569. Add source layer and dest layer to relation. Follow protocol grammar fix GCPhrase -\u0026gt; GCPhase. Set layer to mesh relation. Add FAAS to SpanLayer. Adjust e2e case for V9 core. Support ZGC GC time and count metric collecting. Sync proto buffers files from upstream Envoy (Related to https://github.com/envoyproxy/envoy/pull/18955). Bump up GraphQL related dependencies to latest versions. Add normal to V9 service meta query. Support scope=ALL catalog for metrics. Bump up H2 to 2.1.210 to fix CVE-2022-23221. E2E: Add normal field to Service. Add FreeSql component ID(3017) of dotnet agent. E2E: verify OAP cluster model data aggregation. Fix SelfRemoteClient self observing metrics. Add env variables SW_CLUSTER_INTERNAL_COM_HOST and SW_CLUSTER_INTERNAL_COM_PORT for cluster selectors zookeeper ,consul,etcd and nacos. Doc update: configuration-vocabulary,backend-cluster about env variables SW_CLUSTER_INTERNAL_COM_HOST and SW_CLUSTER_INTERNAL_COM_PORT. Add Python MysqlClient component ID(7013) with mapping information. Support Java thread pool metrics analysis. Fix IoTDB Storage Option insert null index value. Set the default value of SW_STORAGE_IOTDB_SESSIONPOOL_SIZE to 8. Bump up iotdb-session to 0.12.4. Bump up PostgreSQL driver to fix CVE. Add Guava EventBus component ID(123) of Java agent. Add OpenFunction component ID(5013). Expose configuration responseTimeout of ES client. Support datasource metric analysis. [Breaking Change] Keep the endpoint avg resp time meter name the same with others scope. (This may break 3rd party integration and existing alarm rule settings) Add Python FastAPI component ID(7014). Support all metrics from MAL engine in alarm core, including Prometheus, OC receiver, meter receiver. Allow updating non-metrics templates when structure changed. Set default connection timeout of ElasticSearch to 3000 milliseconds. Support ElasticSearch 8 and add it into E2E tests. Disable indexing for field alarm_record.tags_raw_data of binary type in ElasticSearch storage. Fix Zipkin receiver wrong condition for decoding gzip. Add a new sampler (possibility) in LAL. Unify module name receiver_zipkin to receiver-zipkin, remove receiver_jaeger from application.yaml. Introduce the entity of Process type. Set the length of event#parameters to 2000. Limit the length of Event#parameters. Support large service/instance/networkAddressAlias list query by using ElasticSearch scrolling API, add metadataQueryBatchSize to configure scrolling page size. Change default value of metadataQueryMaxSize from 5000 to 10000 Replace deprecated Armeria API BasicToken.of with AuthToken.ofBasic. Implement v9 UI template management protocol. Implement process metadata query protocol. Expose more ElasticSearch health check related logs to help to diagnose Health check fails. reason: No healthy endpoint. Add source event generated metrics to SERVICE_CATALOG_NAME catalog. [Breaking Change] Deprecate All from OAL source. [Breaking Change] Remove SRC_ALL: 'All' from OAL grammar tree. Remove all_heatmap and all_percentile metrics. Fix ElasticSearch normal index couldn\u0026rsquo;t apply mapping and update. Enhance DataCarrier#MultipleChannelsConsumer to add priority for the channels, which makes OAP server has a better performance to activate all analyzers on default. Activate receiver-otel#enabledOcRules receiver with k8s-node,oap,vm rules on default. Activate satellite,spring-sleuth for agent-analyzer#meterAnalyzerActiveFiles on default. Activate receiver-zabbix receiver with agent rule on default. Replace HTTP server (GraphQL, agent HTTP protocol) from Jetty with Armeria. [Breaking Change] Remove configuration restAcceptorPriorityDelta (env var: SW_RECEIVER_SHARING_JETTY_DELTA , SW_CORE_REST_JETTY_DELTA). [Breaking Change] Remove configuration graphql/path (env var: SW_QUERY_GRAPHQL_PATH). Add storage column attribute indexOnly, support ElasticSearch only index and not store some fields. Add indexOnly=true to SegmentRecord.tags, AlarmRecord.tags, AbstractLogRecord.tags, to reduce unnecessary storage. [Breaking Change] Remove configuration restMinThreads (env var: SW_CORE_REST_JETTY_MIN_THREADS , SW_RECEIVER_SHARING_JETTY_MIN_THREADS). Refactor the core Builder mechanism, new storage plugin could implement their own converter and get rid of hard requirement of using HashMap to communicate between data object and database native structure. [Breaking Change] Break all existing 3rd-party storage extensions. Remove hard requirement of BASE64 encoding for binary field. Add complexity limitation for GraphQL query to avoid malicious query. Add Column.shardingKeyIdx for column definition for BanyanDB.  Sharding key is used to group time series data per metric of one entity in one place (same sharding and/or same row for column-oriented database). For example, ServiceA's traffic gauge, service call per minute, includes following timestamp values, then it should be sharded by service ID [ServiceA(encoded ID): 01-28 18:30 values-1, 01-28 18:31 values-2, 01-28 18:32 values-3, 01-28 18:32 values-4] BanyanDB is the 1st storage implementation supporting this. It would make continuous time series metrics stored closely and compressed better. NOTICE, this sharding concept is NOT just for splitting data into different database instances or physical files.  Support ElasticSearch template mappings properties parameters and _source update. Implement the eBPF profiling query and data collect protocol. [Breaking Change] Remove Deprecated responseCode from sources, including Service, ServiceInstance, Endpoint Enhance endpoint dependency analysis to support cross threads cases. Refactor span analysis code structures. Remove isNotNormal service requirement when use alias to merge service topology from client side. All RPCs' peer services from client side are always normal services. This cause the topology is not merged correctly. Fix event type of export data is incorrect, it was EventType.TOTAL always. Reduce redundancy ThreadLocal in MAL core. Improve MAL performance. Trim tag\u0026rsquo;s key and value in log query. Refactor IoTDB storage plugin, add IoTDBDataConverter and fix ModifyCollectionInEnhancedForLoop bug. Bump up iotdb-session to 0.12.5. Fix the configuration of Aggregation and GC Count metrics for oap self observability E2E: Add verify OAP eBPF Profiling. Let multiGet could query without tag value in the InfluxDB storage plugin. Adjust MAL for V9, remove some groups, add a new Service function for the custom delimiter. Add service catalog DatabaseSlowStatement. Add Error Prone Annotations dependency to suppress warnings, which are not errors.  UI  [Breaking Change] Introduce Booster UI, remove RocketBot UI. [Breaking Change] UI Templates have been redesigned totally. GraphQL query is minimal compatible for metadata and metrics query. Remove unused jars (log4j-api.jar) in classpath. Bump up netty version to fix CVE. Add Database Connection pool metric. Re-implement UI template initialization for Booster UI. Add environment variable SW_ENABLE_UPDATE_UI_TEMPLATE to control user edit UI template. Add the Self Observability template of the SkyWalking Satellite. Add the template of OpenFunction observability.  Documentation  Reconstruction doc menu for v9. Update backend-alarm.md doc, support op \u0026ldquo;=\u0026rdquo; to \u0026ldquo;==\u0026rdquo;. Update backend-meter.md doc . Add \u0026lt;STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System\u0026gt; paper. Add Academy menu for recommending articles. Remove All source relative document and examples. Update Booster UI\u0026rsquo;s dependency licenses. Add profiling doc, and remove service mesh intro doc(not necessary). Add a doc for virtual database. Rewrite UI introduction. Update k8s-monitoring, backend-telemetry and v9-version-upgrade doc for v9.  All issues and pull requests are here\n","title":"9.0.0","url":"/docs/main/latest/en/changes/changes-9.0.0/"},{"content":"9.0.0 Project  Upgrade log4j2 to 2.17.1 for CVE-2021-44228, CVE-2021-45046, CVE-2021-45105 and CVE-2021-44832. This CVE only effects on JDK if JNDI is opened in default. Notice, using JVM option -Dlog4j2.formatMsgNoLookups=true or setting the LOG4J_FORMAT_MSG_NO_LOOKUPS=”true” environment variable also avoids CVEs. Upgrade maven-wrapper to 3.1.0, maven to 3.8.4 for performance improvements and ARM more native support. Exclude unnecessary libs when building under JDK 9+. Migrate base Docker image to eclipse-temurin as adoptopenjdk is deprecated. Add E2E test under Java 17. Upgrade protoc to 3.19.2. Add Istio 1.13.1 to E2E test matrix for verification. Upgrade Apache parent pom version to 25. Use the plugin version defined by the Apache maven parent.  Upgrade maven-dependency-plugin to 3.2.0. Upgrade maven-assembly-plugin to 3.3.0. Upgrade maven-failsafe-plugin to 2.22.2. Upgrade maven-surefire-plugin to 2.22.2. Upgrade maven-jar-plugin to 3.2.2. Upgrade maven-enforcer-plugin to 3.0.0. Upgrade maven-compiler-plugin to 3.10.0. Upgrade maven-resources-plugin to 3.2.0. Upgrade maven-source-plugin to 3.2.1.   Update codeStyle.xml to fix incompatibility on M1\u0026rsquo;s IntelliJ IDEA 2021.3.2. Update frontend-maven-plugin to 1.12 and npm to 16.14.0 for booster UI build. Improve CI with the GHA new feature \u0026ldquo;run failed jobs\u0026rdquo;. Fix ./mvnw compile not work if ./mvnw install is not executed at least once. Add JD_PRESERVE_LINE_FEEDS=true in official code style file. Upgrade OAP dependencies gson(2.9.0), guava(31.1), jackson(2.13.2), protobuf-java(3.18.4), commons-io(2.7), postgresql(42.3.3). Remove commons-pool and commons-dbcp from OAP dependencies(Not used before). Upgrade webapp dependencies gson(2.9.0), spring boot(2.6.6), jackson(2.13.2.2), spring cloud(2021.0.1), Apache httpclient(4.5.13).  OAP Server  Fix potential NPE in OAL string match and a bug when right-hand-side variable includes double quotes. Bump up Armeria version to 1.14.1 to fix CVE. Polish ETCD cluster config environment variables. Add the analysis of metrics in Satellite MetricsService. Fix Can't split endpoint id into 2 parts bug for endpoint ID. In the TCP in service mesh observability, endpoint name doesn\u0026rsquo;t exist in TCP traffic. Upgrade H2 version to 2.0.206 to fix CVE-2021-23463 and GHSA-h376-j262-vhq6. Extend column name override mechanism working for ValueColumnMetadata. Introduce new concept Layer and removed NodeType. More details refer to v9-version-upgrade. Fix query sort metrics failure in H2 Storage. Bump up grpc to 1.43.2 and protobuf to 3.19.2 to fix CVE-2021-22569. Add source layer and dest layer to relation. Follow protocol grammar fix GCPhrase -\u0026gt; GCPhase. Set layer to mesh relation. Add FAAS to SpanLayer. Adjust e2e case for V9 core. Support ZGC GC time and count metric collecting. Sync proto buffers files from upstream Envoy (Related to https://github.com/envoyproxy/envoy/pull/18955). Bump up GraphQL related dependencies to latest versions. Add normal to V9 service meta query. Support scope=ALL catalog for metrics. Bump up H2 to 2.1.210 to fix CVE-2022-23221. E2E: Add normal field to Service. Add FreeSql component ID(3017) of dotnet agent. E2E: verify OAP cluster model data aggregation. Fix SelfRemoteClient self observing metrics. Add env variables SW_CLUSTER_INTERNAL_COM_HOST and SW_CLUSTER_INTERNAL_COM_PORT for cluster selectors zookeeper ,consul,etcd and nacos. Doc update: configuration-vocabulary,backend-cluster about env variables SW_CLUSTER_INTERNAL_COM_HOST and SW_CLUSTER_INTERNAL_COM_PORT. Add Python MysqlClient component ID(7013) with mapping information. Support Java thread pool metrics analysis. Fix IoTDB Storage Option insert null index value. Set the default value of SW_STORAGE_IOTDB_SESSIONPOOL_SIZE to 8. Bump up iotdb-session to 0.12.4. Bump up PostgreSQL driver to fix CVE. Add Guava EventBus component ID(123) of Java agent. Add OpenFunction component ID(5013). Expose configuration responseTimeout of ES client. Support datasource metric analysis. [Breaking Change] Keep the endpoint avg resp time meter name the same with others scope. (This may break 3rd party integration and existing alarm rule settings) Add Python FastAPI component ID(7014). Support all metrics from MAL engine in alarm core, including Prometheus, OC receiver, meter receiver. Allow updating non-metrics templates when structure changed. Set default connection timeout of ElasticSearch to 3000 milliseconds. Support ElasticSearch 8 and add it into E2E tests. Disable indexing for field alarm_record.tags_raw_data of binary type in ElasticSearch storage. Fix Zipkin receiver wrong condition for decoding gzip. Add a new sampler (possibility) in LAL. Unify module name receiver_zipkin to receiver-zipkin, remove receiver_jaeger from application.yaml. Introduce the entity of Process type. Set the length of event#parameters to 2000. Limit the length of Event#parameters. Support large service/instance/networkAddressAlias list query by using ElasticSearch scrolling API, add metadataQueryBatchSize to configure scrolling page size. Change default value of metadataQueryMaxSize from 5000 to 10000 Replace deprecated Armeria API BasicToken.of with AuthToken.ofBasic. Implement v9 UI template management protocol. Implement process metadata query protocol. Expose more ElasticSearch health check related logs to help to diagnose Health check fails. reason: No healthy endpoint. Add source event generated metrics to SERVICE_CATALOG_NAME catalog. [Breaking Change] Deprecate All from OAL source. [Breaking Change] Remove SRC_ALL: 'All' from OAL grammar tree. Remove all_heatmap and all_percentile metrics. Fix ElasticSearch normal index couldn\u0026rsquo;t apply mapping and update. Enhance DataCarrier#MultipleChannelsConsumer to add priority for the channels, which makes OAP server has a better performance to activate all analyzers on default. Activate receiver-otel#enabledOcRules receiver with k8s-node,oap,vm rules on default. Activate satellite,spring-sleuth for agent-analyzer#meterAnalyzerActiveFiles on default. Activate receiver-zabbix receiver with agent rule on default. Replace HTTP server (GraphQL, agent HTTP protocol) from Jetty with Armeria. [Breaking Change] Remove configuration restAcceptorPriorityDelta (env var: SW_RECEIVER_SHARING_JETTY_DELTA , SW_CORE_REST_JETTY_DELTA). [Breaking Change] Remove configuration graphql/path (env var: SW_QUERY_GRAPHQL_PATH). Add storage column attribute indexOnly, support ElasticSearch only index and not store some fields. Add indexOnly=true to SegmentRecord.tags, AlarmRecord.tags, AbstractLogRecord.tags, to reduce unnecessary storage. [Breaking Change] Remove configuration restMinThreads (env var: SW_CORE_REST_JETTY_MIN_THREADS , SW_RECEIVER_SHARING_JETTY_MIN_THREADS). Refactor the core Builder mechanism, new storage plugin could implement their own converter and get rid of hard requirement of using HashMap to communicate between data object and database native structure. [Breaking Change] Break all existing 3rd-party storage extensions. Remove hard requirement of BASE64 encoding for binary field. Add complexity limitation for GraphQL query to avoid malicious query. Add Column.shardingKeyIdx for column definition for BanyanDB.  Sharding key is used to group time series data per metric of one entity in one place (same sharding and/or same row for column-oriented database). For example, ServiceA's traffic gauge, service call per minute, includes following timestamp values, then it should be sharded by service ID [ServiceA(encoded ID): 01-28 18:30 values-1, 01-28 18:31 values-2, 01-28 18:32 values-3, 01-28 18:32 values-4] BanyanDB is the 1st storage implementation supporting this. It would make continuous time series metrics stored closely and compressed better. NOTICE, this sharding concept is NOT just for splitting data into different database instances or physical files.  Support ElasticSearch template mappings properties parameters and _source update. Implement the eBPF profiling query and data collect protocol. [Breaking Change] Remove Deprecated responseCode from sources, including Service, ServiceInstance, Endpoint Enhance endpoint dependency analysis to support cross threads cases. Refactor span analysis code structures. Remove isNotNormal service requirement when use alias to merge service topology from client side. All RPCs' peer services from client side are always normal services. This cause the topology is not merged correctly. Fix event type of export data is incorrect, it was EventType.TOTAL always. Reduce redundancy ThreadLocal in MAL core. Improve MAL performance. Trim tag\u0026rsquo;s key and value in log query. Refactor IoTDB storage plugin, add IoTDBDataConverter and fix ModifyCollectionInEnhancedForLoop bug. Bump up iotdb-session to 0.12.5. Fix the configuration of Aggregation and GC Count metrics for oap self observability E2E: Add verify OAP eBPF Profiling. Let multiGet could query without tag value in the InfluxDB storage plugin. Adjust MAL for V9, remove some groups, add a new Service function for the custom delimiter. Add service catalog DatabaseSlowStatement. Add Error Prone Annotations dependency to suppress warnings, which are not errors.  UI  [Breaking Change] Introduce Booster UI, remove RocketBot UI. [Breaking Change] UI Templates have been redesigned totally. GraphQL query is minimal compatible for metadata and metrics query. Remove unused jars (log4j-api.jar) in classpath. Bump up netty version to fix CVE. Add Database Connection pool metric. Re-implement UI template initialization for Booster UI. Add environment variable SW_ENABLE_UPDATE_UI_TEMPLATE to control user edit UI template. Add the Self Observability template of the SkyWalking Satellite. Add the template of OpenFunction observability.  Documentation  Reconstruction doc menu for v9. Update backend-alarm.md doc, support op \u0026ldquo;=\u0026rdquo; to \u0026ldquo;==\u0026rdquo;. Update backend-meter.md doc . Add \u0026lt;STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System\u0026gt; paper. Add Academy menu for recommending articles. Remove All source relative document and examples. Update Booster UI\u0026rsquo;s dependency licenses. Add profiling doc, and remove service mesh intro doc(not necessary). Add a doc for virtual database. Rewrite UI introduction. Update k8s-monitoring, backend-telemetry and v9-version-upgrade doc for v9.  All issues and pull requests are here\n","title":"9.0.0","url":"/docs/main/next/en/changes/changes-9.0.0/"},{"content":"9.0.0 Project  Upgrade log4j2 to 2.17.1 for CVE-2021-44228, CVE-2021-45046, CVE-2021-45105 and CVE-2021-44832. This CVE only effects on JDK if JNDI is opened in default. Notice, using JVM option -Dlog4j2.formatMsgNoLookups=true or setting the LOG4J_FORMAT_MSG_NO_LOOKUPS=”true” environment variable also avoids CVEs. Upgrade maven-wrapper to 3.1.0, maven to 3.8.4 for performance improvements and ARM more native support. Exclude unnecessary libs when building under JDK 9+. Migrate base Docker image to eclipse-temurin as adoptopenjdk is deprecated. Add E2E test under Java 17. Upgrade protoc to 3.19.2. Add Istio 1.13.1 to E2E test matrix for verification. Upgrade Apache parent pom version to 25. Use the plugin version defined by the Apache maven parent.  Upgrade maven-dependency-plugin to 3.2.0. Upgrade maven-assembly-plugin to 3.3.0. Upgrade maven-failsafe-plugin to 2.22.2. Upgrade maven-surefire-plugin to 2.22.2. Upgrade maven-jar-plugin to 3.2.2. Upgrade maven-enforcer-plugin to 3.0.0. Upgrade maven-compiler-plugin to 3.10.0. Upgrade maven-resources-plugin to 3.2.0. Upgrade maven-source-plugin to 3.2.1.   Update codeStyle.xml to fix incompatibility on M1\u0026rsquo;s IntelliJ IDEA 2021.3.2. Update frontend-maven-plugin to 1.12 and npm to 16.14.0 for booster UI build. Improve CI with the GHA new feature \u0026ldquo;run failed jobs\u0026rdquo;. Fix ./mvnw compile not work if ./mvnw install is not executed at least once. Add JD_PRESERVE_LINE_FEEDS=true in official code style file. Upgrade OAP dependencies gson(2.9.0), guava(31.1), jackson(2.13.2), protobuf-java(3.18.4), commons-io(2.7), postgresql(42.3.3). Remove commons-pool and commons-dbcp from OAP dependencies(Not used before). Upgrade webapp dependencies gson(2.9.0), spring boot(2.6.6), jackson(2.13.2.2), spring cloud(2021.0.1), Apache httpclient(4.5.13).  OAP Server  Fix potential NPE in OAL string match and a bug when right-hand-side variable includes double quotes. Bump up Armeria version to 1.14.1 to fix CVE. Polish ETCD cluster config environment variables. Add the analysis of metrics in Satellite MetricsService. Fix Can't split endpoint id into 2 parts bug for endpoint ID. In the TCP in service mesh observability, endpoint name doesn\u0026rsquo;t exist in TCP traffic. Upgrade H2 version to 2.0.206 to fix CVE-2021-23463 and GHSA-h376-j262-vhq6. Extend column name override mechanism working for ValueColumnMetadata. Introduce new concept Layer and removed NodeType. More details refer to v9-version-upgrade. Fix query sort metrics failure in H2 Storage. Bump up grpc to 1.43.2 and protobuf to 3.19.2 to fix CVE-2021-22569. Add source layer and dest layer to relation. Follow protocol grammar fix GCPhrase -\u0026gt; GCPhase. Set layer to mesh relation. Add FAAS to SpanLayer. Adjust e2e case for V9 core. Support ZGC GC time and count metric collecting. Sync proto buffers files from upstream Envoy (Related to https://github.com/envoyproxy/envoy/pull/18955). Bump up GraphQL related dependencies to latest versions. Add normal to V9 service meta query. Support scope=ALL catalog for metrics. Bump up H2 to 2.1.210 to fix CVE-2022-23221. E2E: Add normal field to Service. Add FreeSql component ID(3017) of dotnet agent. E2E: verify OAP cluster model data aggregation. Fix SelfRemoteClient self observing metrics. Add env variables SW_CLUSTER_INTERNAL_COM_HOST and SW_CLUSTER_INTERNAL_COM_PORT for cluster selectors zookeeper ,consul,etcd and nacos. Doc update: configuration-vocabulary,backend-cluster about env variables SW_CLUSTER_INTERNAL_COM_HOST and SW_CLUSTER_INTERNAL_COM_PORT. Add Python MysqlClient component ID(7013) with mapping information. Support Java thread pool metrics analysis. Fix IoTDB Storage Option insert null index value. Set the default value of SW_STORAGE_IOTDB_SESSIONPOOL_SIZE to 8. Bump up iotdb-session to 0.12.4. Bump up PostgreSQL driver to fix CVE. Add Guava EventBus component ID(123) of Java agent. Add OpenFunction component ID(5013). Expose configuration responseTimeout of ES client. Support datasource metric analysis. [Breaking Change] Keep the endpoint avg resp time meter name the same with others scope. (This may break 3rd party integration and existing alarm rule settings) Add Python FastAPI component ID(7014). Support all metrics from MAL engine in alarm core, including Prometheus, OC receiver, meter receiver. Allow updating non-metrics templates when structure changed. Set default connection timeout of ElasticSearch to 3000 milliseconds. Support ElasticSearch 8 and add it into E2E tests. Disable indexing for field alarm_record.tags_raw_data of binary type in ElasticSearch storage. Fix Zipkin receiver wrong condition for decoding gzip. Add a new sampler (possibility) in LAL. Unify module name receiver_zipkin to receiver-zipkin, remove receiver_jaeger from application.yaml. Introduce the entity of Process type. Set the length of event#parameters to 2000. Limit the length of Event#parameters. Support large service/instance/networkAddressAlias list query by using ElasticSearch scrolling API, add metadataQueryBatchSize to configure scrolling page size. Change default value of metadataQueryMaxSize from 5000 to 10000 Replace deprecated Armeria API BasicToken.of with AuthToken.ofBasic. Implement v9 UI template management protocol. Implement process metadata query protocol. Expose more ElasticSearch health check related logs to help to diagnose Health check fails. reason: No healthy endpoint. Add source event generated metrics to SERVICE_CATALOG_NAME catalog. [Breaking Change] Deprecate All from OAL source. [Breaking Change] Remove SRC_ALL: 'All' from OAL grammar tree. Remove all_heatmap and all_percentile metrics. Fix ElasticSearch normal index couldn\u0026rsquo;t apply mapping and update. Enhance DataCarrier#MultipleChannelsConsumer to add priority for the channels, which makes OAP server has a better performance to activate all analyzers on default. Activate receiver-otel#enabledOcRules receiver with k8s-node,oap,vm rules on default. Activate satellite,spring-sleuth for agent-analyzer#meterAnalyzerActiveFiles on default. Activate receiver-zabbix receiver with agent rule on default. Replace HTTP server (GraphQL, agent HTTP protocol) from Jetty with Armeria. [Breaking Change] Remove configuration restAcceptorPriorityDelta (env var: SW_RECEIVER_SHARING_JETTY_DELTA , SW_CORE_REST_JETTY_DELTA). [Breaking Change] Remove configuration graphql/path (env var: SW_QUERY_GRAPHQL_PATH). Add storage column attribute indexOnly, support ElasticSearch only index and not store some fields. Add indexOnly=true to SegmentRecord.tags, AlarmRecord.tags, AbstractLogRecord.tags, to reduce unnecessary storage. [Breaking Change] Remove configuration restMinThreads (env var: SW_CORE_REST_JETTY_MIN_THREADS , SW_RECEIVER_SHARING_JETTY_MIN_THREADS). Refactor the core Builder mechanism, new storage plugin could implement their own converter and get rid of hard requirement of using HashMap to communicate between data object and database native structure. [Breaking Change] Break all existing 3rd-party storage extensions. Remove hard requirement of BASE64 encoding for binary field. Add complexity limitation for GraphQL query to avoid malicious query. Add Column.shardingKeyIdx for column definition for BanyanDB.  Sharding key is used to group time series data per metric of one entity in one place (same sharding and/or same row for column-oriented database). For example, ServiceA's traffic gauge, service call per minute, includes following timestamp values, then it should be sharded by service ID [ServiceA(encoded ID): 01-28 18:30 values-1, 01-28 18:31 values-2, 01-28 18:32 values-3, 01-28 18:32 values-4] BanyanDB is the 1st storage implementation supporting this. It would make continuous time series metrics stored closely and compressed better. NOTICE, this sharding concept is NOT just for splitting data into different database instances or physical files.  Support ElasticSearch template mappings properties parameters and _source update. Implement the eBPF profiling query and data collect protocol. [Breaking Change] Remove Deprecated responseCode from sources, including Service, ServiceInstance, Endpoint Enhance endpoint dependency analysis to support cross threads cases. Refactor span analysis code structures. Remove isNotNormal service requirement when use alias to merge service topology from client side. All RPCs' peer services from client side are always normal services. This cause the topology is not merged correctly. Fix event type of export data is incorrect, it was EventType.TOTAL always. Reduce redundancy ThreadLocal in MAL core. Improve MAL performance. Trim tag\u0026rsquo;s key and value in log query. Refactor IoTDB storage plugin, add IoTDBDataConverter and fix ModifyCollectionInEnhancedForLoop bug. Bump up iotdb-session to 0.12.5. Fix the configuration of Aggregation and GC Count metrics for oap self observability E2E: Add verify OAP eBPF Profiling. Let multiGet could query without tag value in the InfluxDB storage plugin. Adjust MAL for V9, remove some groups, add a new Service function for the custom delimiter. Add service catalog DatabaseSlowStatement. Add Error Prone Annotations dependency to suppress warnings, which are not errors.  UI  [Breaking Change] Introduce Booster UI, remove RocketBot UI. [Breaking Change] UI Templates have been redesigned totally. GraphQL query is minimal compatible for metadata and metrics query. Remove unused jars (log4j-api.jar) in classpath. Bump up netty version to fix CVE. Add Database Connection pool metric. Re-implement UI template initialization for Booster UI. Add environment variable SW_ENABLE_UPDATE_UI_TEMPLATE to control user edit UI template. Add the Self Observability template of the SkyWalking Satellite. Add the template of OpenFunction observability.  Documentation  Reconstruction doc menu for v9. Update backend-alarm.md doc, support op \u0026ldquo;=\u0026rdquo; to \u0026ldquo;==\u0026rdquo;. Update backend-meter.md doc . Add \u0026lt;STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System\u0026gt; paper. Add Academy menu for recommending articles. Remove All source relative document and examples. Update Booster UI\u0026rsquo;s dependency licenses. Add profiling doc, and remove service mesh intro doc(not necessary). Add a doc for virtual database. Rewrite UI introduction. Update k8s-monitoring, backend-telemetry and v9-version-upgrade doc for v9.  All issues and pull requests are here\n","title":"9.0.0","url":"/docs/main/v9.1.0/en/changes/changes-9.0.0/"},{"content":"9.0.0 Project  Upgrade log4j2 to 2.17.1 for CVE-2021-44228, CVE-2021-45046, CVE-2021-45105 and CVE-2021-44832. This CVE only effects on JDK if JNDI is opened in default. Notice, using JVM option -Dlog4j2.formatMsgNoLookups=true or setting the LOG4J_FORMAT_MSG_NO_LOOKUPS=”true” environment variable also avoids CVEs. Upgrade maven-wrapper to 3.1.0, maven to 3.8.4 for performance improvements and ARM more native support. Exclude unnecessary libs when building under JDK 9+. Migrate base Docker image to eclipse-temurin as adoptopenjdk is deprecated. Add E2E test under Java 17. Upgrade protoc to 3.19.2. Add Istio 1.13.1 to E2E test matrix for verification. Upgrade Apache parent pom version to 25. Use the plugin version defined by the Apache maven parent.  Upgrade maven-dependency-plugin to 3.2.0. Upgrade maven-assembly-plugin to 3.3.0. Upgrade maven-failsafe-plugin to 2.22.2. Upgrade maven-surefire-plugin to 2.22.2. Upgrade maven-jar-plugin to 3.2.2. Upgrade maven-enforcer-plugin to 3.0.0. Upgrade maven-compiler-plugin to 3.10.0. Upgrade maven-resources-plugin to 3.2.0. Upgrade maven-source-plugin to 3.2.1.   Update codeStyle.xml to fix incompatibility on M1\u0026rsquo;s IntelliJ IDEA 2021.3.2. Update frontend-maven-plugin to 1.12 and npm to 16.14.0 for booster UI build. Improve CI with the GHA new feature \u0026ldquo;run failed jobs\u0026rdquo;. Fix ./mvnw compile not work if ./mvnw install is not executed at least once. Add JD_PRESERVE_LINE_FEEDS=true in official code style file. Upgrade OAP dependencies gson(2.9.0), guava(31.1), jackson(2.13.2), protobuf-java(3.18.4), commons-io(2.7), postgresql(42.3.3). Remove commons-pool and commons-dbcp from OAP dependencies(Not used before). Upgrade webapp dependencies gson(2.9.0), spring boot(2.6.6), jackson(2.13.2.2), spring cloud(2021.0.1), Apache httpclient(4.5.13).  OAP Server  Fix potential NPE in OAL string match and a bug when right-hand-side variable includes double quotes. Bump up Armeria version to 1.14.1 to fix CVE. Polish ETCD cluster config environment variables. Add the analysis of metrics in Satellite MetricsService. Fix Can't split endpoint id into 2 parts bug for endpoint ID. In the TCP in service mesh observability, endpoint name doesn\u0026rsquo;t exist in TCP traffic. Upgrade H2 version to 2.0.206 to fix CVE-2021-23463 and GHSA-h376-j262-vhq6. Extend column name override mechanism working for ValueColumnMetadata. Introduce new concept Layer and removed NodeType. More details refer to v9-version-upgrade. Fix query sort metrics failure in H2 Storage. Bump up grpc to 1.43.2 and protobuf to 3.19.2 to fix CVE-2021-22569. Add source layer and dest layer to relation. Follow protocol grammar fix GCPhrase -\u0026gt; GCPhase. Set layer to mesh relation. Add FAAS to SpanLayer. Adjust e2e case for V9 core. Support ZGC GC time and count metric collecting. Sync proto buffers files from upstream Envoy (Related to https://github.com/envoyproxy/envoy/pull/18955). Bump up GraphQL related dependencies to latest versions. Add normal to V9 service meta query. Support scope=ALL catalog for metrics. Bump up H2 to 2.1.210 to fix CVE-2022-23221. E2E: Add normal field to Service. Add FreeSql component ID(3017) of dotnet agent. E2E: verify OAP cluster model data aggregation. Fix SelfRemoteClient self observing metrics. Add env variables SW_CLUSTER_INTERNAL_COM_HOST and SW_CLUSTER_INTERNAL_COM_PORT for cluster selectors zookeeper ,consul,etcd and nacos. Doc update: configuration-vocabulary,backend-cluster about env variables SW_CLUSTER_INTERNAL_COM_HOST and SW_CLUSTER_INTERNAL_COM_PORT. Add Python MysqlClient component ID(7013) with mapping information. Support Java thread pool metrics analysis. Fix IoTDB Storage Option insert null index value. Set the default value of SW_STORAGE_IOTDB_SESSIONPOOL_SIZE to 8. Bump up iotdb-session to 0.12.4. Bump up PostgreSQL driver to fix CVE. Add Guava EventBus component ID(123) of Java agent. Add OpenFunction component ID(5013). Expose configuration responseTimeout of ES client. Support datasource metric analysis. [Breaking Change] Keep the endpoint avg resp time meter name the same with others scope. (This may break 3rd party integration and existing alarm rule settings) Add Python FastAPI component ID(7014). Support all metrics from MAL engine in alarm core, including Prometheus, OC receiver, meter receiver. Allow updating non-metrics templates when structure changed. Set default connection timeout of ElasticSearch to 3000 milliseconds. Support ElasticSearch 8 and add it into E2E tests. Disable indexing for field alarm_record.tags_raw_data of binary type in ElasticSearch storage. Fix Zipkin receiver wrong condition for decoding gzip. Add a new sampler (possibility) in LAL. Unify module name receiver_zipkin to receiver-zipkin, remove receiver_jaeger from application.yaml. Introduce the entity of Process type. Set the length of event#parameters to 2000. Limit the length of Event#parameters. Support large service/instance/networkAddressAlias list query by using ElasticSearch scrolling API, add metadataQueryBatchSize to configure scrolling page size. Change default value of metadataQueryMaxSize from 5000 to 10000 Replace deprecated Armeria API BasicToken.of with AuthToken.ofBasic. Implement v9 UI template management protocol. Implement process metadata query protocol. Expose more ElasticSearch health check related logs to help to diagnose Health check fails. reason: No healthy endpoint. Add source event generated metrics to SERVICE_CATALOG_NAME catalog. [Breaking Change] Deprecate All from OAL source. [Breaking Change] Remove SRC_ALL: 'All' from OAL grammar tree. Remove all_heatmap and all_percentile metrics. Fix ElasticSearch normal index couldn\u0026rsquo;t apply mapping and update. Enhance DataCarrier#MultipleChannelsConsumer to add priority for the channels, which makes OAP server has a better performance to activate all analyzers on default. Activate receiver-otel#enabledOcRules receiver with k8s-node,oap,vm rules on default. Activate satellite,spring-sleuth for agent-analyzer#meterAnalyzerActiveFiles on default. Activate receiver-zabbix receiver with agent rule on default. Replace HTTP server (GraphQL, agent HTTP protocol) from Jetty with Armeria. [Breaking Change] Remove configuration restAcceptorPriorityDelta (env var: SW_RECEIVER_SHARING_JETTY_DELTA , SW_CORE_REST_JETTY_DELTA). [Breaking Change] Remove configuration graphql/path (env var: SW_QUERY_GRAPHQL_PATH). Add storage column attribute indexOnly, support ElasticSearch only index and not store some fields. Add indexOnly=true to SegmentRecord.tags, AlarmRecord.tags, AbstractLogRecord.tags, to reduce unnecessary storage. [Breaking Change] Remove configuration restMinThreads (env var: SW_CORE_REST_JETTY_MIN_THREADS , SW_RECEIVER_SHARING_JETTY_MIN_THREADS). Refactor the core Builder mechanism, new storage plugin could implement their own converter and get rid of hard requirement of using HashMap to communicate between data object and database native structure. [Breaking Change] Break all existing 3rd-party storage extensions. Remove hard requirement of BASE64 encoding for binary field. Add complexity limitation for GraphQL query to avoid malicious query. Add Column.shardingKeyIdx for column definition for BanyanDB.  Sharding key is used to group time series data per metric of one entity in one place (same sharding and/or same row for column-oriented database). For example, ServiceA's traffic gauge, service call per minute, includes following timestamp values, then it should be sharded by service ID [ServiceA(encoded ID): 01-28 18:30 values-1, 01-28 18:31 values-2, 01-28 18:32 values-3, 01-28 18:32 values-4] BanyanDB is the 1st storage implementation supporting this. It would make continuous time series metrics stored closely and compressed better. NOTICE, this sharding concept is NOT just for splitting data into different database instances or physical files.  Support ElasticSearch template mappings properties parameters and _source update. Implement the eBPF profiling query and data collect protocol. [Breaking Change] Remove Deprecated responseCode from sources, including Service, ServiceInstance, Endpoint Enhance endpoint dependency analysis to support cross threads cases. Refactor span analysis code structures. Remove isNotNormal service requirement when use alias to merge service topology from client side. All RPCs' peer services from client side are always normal services. This cause the topology is not merged correctly. Fix event type of export data is incorrect, it was EventType.TOTAL always. Reduce redundancy ThreadLocal in MAL core. Improve MAL performance. Trim tag\u0026rsquo;s key and value in log query. Refactor IoTDB storage plugin, add IoTDBDataConverter and fix ModifyCollectionInEnhancedForLoop bug. Bump up iotdb-session to 0.12.5. Fix the configuration of Aggregation and GC Count metrics for oap self observability E2E: Add verify OAP eBPF Profiling. Let multiGet could query without tag value in the InfluxDB storage plugin. Adjust MAL for V9, remove some groups, add a new Service function for the custom delimiter. Add service catalog DatabaseSlowStatement. Add Error Prone Annotations dependency to suppress warnings, which are not errors.  UI  [Breaking Change] Introduce Booster UI, remove RocketBot UI. [Breaking Change] UI Templates have been redesigned totally. GraphQL query is minimal compatible for metadata and metrics query. Remove unused jars (log4j-api.jar) in classpath. Bump up netty version to fix CVE. Add Database Connection pool metric. Re-implement UI template initialization for Booster UI. Add environment variable SW_ENABLE_UPDATE_UI_TEMPLATE to control user edit UI template. Add the Self Observability template of the SkyWalking Satellite. Add the template of OpenFunction observability.  Documentation  Reconstruction doc menu for v9. Update backend-alarm.md doc, support op \u0026ldquo;=\u0026rdquo; to \u0026ldquo;==\u0026rdquo;. Update backend-meter.md doc . Add \u0026lt;STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System\u0026gt; paper. Add Academy menu for recommending articles. Remove All source relative document and examples. Update Booster UI\u0026rsquo;s dependency licenses. Add profiling doc, and remove service mesh intro doc(not necessary). Add a doc for virtual database. Rewrite UI introduction. Update k8s-monitoring, backend-telemetry and v9-version-upgrade doc for v9.  All issues and pull requests are here\n","title":"9.0.0","url":"/docs/main/v9.2.0/en/changes/changes-9.0.0/"},{"content":"9.0.0 Project  Upgrade log4j2 to 2.17.1 for CVE-2021-44228, CVE-2021-45046, CVE-2021-45105 and CVE-2021-44832. This CVE only effects on JDK if JNDI is opened in default. Notice, using JVM option -Dlog4j2.formatMsgNoLookups=true or setting the LOG4J_FORMAT_MSG_NO_LOOKUPS=”true” environment variable also avoids CVEs. Upgrade maven-wrapper to 3.1.0, maven to 3.8.4 for performance improvements and ARM more native support. Exclude unnecessary libs when building under JDK 9+. Migrate base Docker image to eclipse-temurin as adoptopenjdk is deprecated. Add E2E test under Java 17. Upgrade protoc to 3.19.2. Add Istio 1.13.1 to E2E test matrix for verification. Upgrade Apache parent pom version to 25. Use the plugin version defined by the Apache maven parent.  Upgrade maven-dependency-plugin to 3.2.0. Upgrade maven-assembly-plugin to 3.3.0. Upgrade maven-failsafe-plugin to 2.22.2. Upgrade maven-surefire-plugin to 2.22.2. Upgrade maven-jar-plugin to 3.2.2. Upgrade maven-enforcer-plugin to 3.0.0. Upgrade maven-compiler-plugin to 3.10.0. Upgrade maven-resources-plugin to 3.2.0. Upgrade maven-source-plugin to 3.2.1.   Update codeStyle.xml to fix incompatibility on M1\u0026rsquo;s IntelliJ IDEA 2021.3.2. Update frontend-maven-plugin to 1.12 and npm to 16.14.0 for booster UI build. Improve CI with the GHA new feature \u0026ldquo;run failed jobs\u0026rdquo;. Fix ./mvnw compile not work if ./mvnw install is not executed at least once. Add JD_PRESERVE_LINE_FEEDS=true in official code style file. Upgrade OAP dependencies gson(2.9.0), guava(31.1), jackson(2.13.2), protobuf-java(3.18.4), commons-io(2.7), postgresql(42.3.3). Remove commons-pool and commons-dbcp from OAP dependencies(Not used before). Upgrade webapp dependencies gson(2.9.0), spring boot(2.6.6), jackson(2.13.2.2), spring cloud(2021.0.1), Apache httpclient(4.5.13).  OAP Server  Fix potential NPE in OAL string match and a bug when right-hand-side variable includes double quotes. Bump up Armeria version to 1.14.1 to fix CVE. Polish ETCD cluster config environment variables. Add the analysis of metrics in Satellite MetricsService. Fix Can't split endpoint id into 2 parts bug for endpoint ID. In the TCP in service mesh observability, endpoint name doesn\u0026rsquo;t exist in TCP traffic. Upgrade H2 version to 2.0.206 to fix CVE-2021-23463 and GHSA-h376-j262-vhq6. Extend column name override mechanism working for ValueColumnMetadata. Introduce new concept Layer and removed NodeType. More details refer to v9-version-upgrade. Fix query sort metrics failure in H2 Storage. Bump up grpc to 1.43.2 and protobuf to 3.19.2 to fix CVE-2021-22569. Add source layer and dest layer to relation. Follow protocol grammar fix GCPhrase -\u0026gt; GCPhase. Set layer to mesh relation. Add FAAS to SpanLayer. Adjust e2e case for V9 core. Support ZGC GC time and count metric collecting. Sync proto buffers files from upstream Envoy (Related to https://github.com/envoyproxy/envoy/pull/18955). Bump up GraphQL related dependencies to latest versions. Add normal to V9 service meta query. Support scope=ALL catalog for metrics. Bump up H2 to 2.1.210 to fix CVE-2022-23221. E2E: Add normal field to Service. Add FreeSql component ID(3017) of dotnet agent. E2E: verify OAP cluster model data aggregation. Fix SelfRemoteClient self observing metrics. Add env variables SW_CLUSTER_INTERNAL_COM_HOST and SW_CLUSTER_INTERNAL_COM_PORT for cluster selectors zookeeper ,consul,etcd and nacos. Doc update: configuration-vocabulary,backend-cluster about env variables SW_CLUSTER_INTERNAL_COM_HOST and SW_CLUSTER_INTERNAL_COM_PORT. Add Python MysqlClient component ID(7013) with mapping information. Support Java thread pool metrics analysis. Fix IoTDB Storage Option insert null index value. Set the default value of SW_STORAGE_IOTDB_SESSIONPOOL_SIZE to 8. Bump up iotdb-session to 0.12.4. Bump up PostgreSQL driver to fix CVE. Add Guava EventBus component ID(123) of Java agent. Add OpenFunction component ID(5013). Expose configuration responseTimeout of ES client. Support datasource metric analysis. [Breaking Change] Keep the endpoint avg resp time meter name the same with others scope. (This may break 3rd party integration and existing alarm rule settings) Add Python FastAPI component ID(7014). Support all metrics from MAL engine in alarm core, including Prometheus, OC receiver, meter receiver. Allow updating non-metrics templates when structure changed. Set default connection timeout of ElasticSearch to 3000 milliseconds. Support ElasticSearch 8 and add it into E2E tests. Disable indexing for field alarm_record.tags_raw_data of binary type in ElasticSearch storage. Fix Zipkin receiver wrong condition for decoding gzip. Add a new sampler (possibility) in LAL. Unify module name receiver_zipkin to receiver-zipkin, remove receiver_jaeger from application.yaml. Introduce the entity of Process type. Set the length of event#parameters to 2000. Limit the length of Event#parameters. Support large service/instance/networkAddressAlias list query by using ElasticSearch scrolling API, add metadataQueryBatchSize to configure scrolling page size. Change default value of metadataQueryMaxSize from 5000 to 10000 Replace deprecated Armeria API BasicToken.of with AuthToken.ofBasic. Implement v9 UI template management protocol. Implement process metadata query protocol. Expose more ElasticSearch health check related logs to help to diagnose Health check fails. reason: No healthy endpoint. Add source event generated metrics to SERVICE_CATALOG_NAME catalog. [Breaking Change] Deprecate All from OAL source. [Breaking Change] Remove SRC_ALL: 'All' from OAL grammar tree. Remove all_heatmap and all_percentile metrics. Fix ElasticSearch normal index couldn\u0026rsquo;t apply mapping and update. Enhance DataCarrier#MultipleChannelsConsumer to add priority for the channels, which makes OAP server has a better performance to activate all analyzers on default. Activate receiver-otel#enabledOcRules receiver with k8s-node,oap,vm rules on default. Activate satellite,spring-sleuth for agent-analyzer#meterAnalyzerActiveFiles on default. Activate receiver-zabbix receiver with agent rule on default. Replace HTTP server (GraphQL, agent HTTP protocol) from Jetty with Armeria. [Breaking Change] Remove configuration restAcceptorPriorityDelta (env var: SW_RECEIVER_SHARING_JETTY_DELTA , SW_CORE_REST_JETTY_DELTA). [Breaking Change] Remove configuration graphql/path (env var: SW_QUERY_GRAPHQL_PATH). Add storage column attribute indexOnly, support ElasticSearch only index and not store some fields. Add indexOnly=true to SegmentRecord.tags, AlarmRecord.tags, AbstractLogRecord.tags, to reduce unnecessary storage. [Breaking Change] Remove configuration restMinThreads (env var: SW_CORE_REST_JETTY_MIN_THREADS , SW_RECEIVER_SHARING_JETTY_MIN_THREADS). Refactor the core Builder mechanism, new storage plugin could implement their own converter and get rid of hard requirement of using HashMap to communicate between data object and database native structure. [Breaking Change] Break all existing 3rd-party storage extensions. Remove hard requirement of BASE64 encoding for binary field. Add complexity limitation for GraphQL query to avoid malicious query. Add Column.shardingKeyIdx for column definition for BanyanDB.  Sharding key is used to group time series data per metric of one entity in one place (same sharding and/or same row for column-oriented database). For example, ServiceA's traffic gauge, service call per minute, includes following timestamp values, then it should be sharded by service ID [ServiceA(encoded ID): 01-28 18:30 values-1, 01-28 18:31 values-2, 01-28 18:32 values-3, 01-28 18:32 values-4] BanyanDB is the 1st storage implementation supporting this. It would make continuous time series metrics stored closely and compressed better. NOTICE, this sharding concept is NOT just for splitting data into different database instances or physical files.  Support ElasticSearch template mappings properties parameters and _source update. Implement the eBPF profiling query and data collect protocol. [Breaking Change] Remove Deprecated responseCode from sources, including Service, ServiceInstance, Endpoint Enhance endpoint dependency analysis to support cross threads cases. Refactor span analysis code structures. Remove isNotNormal service requirement when use alias to merge service topology from client side. All RPCs' peer services from client side are always normal services. This cause the topology is not merged correctly. Fix event type of export data is incorrect, it was EventType.TOTAL always. Reduce redundancy ThreadLocal in MAL core. Improve MAL performance. Trim tag\u0026rsquo;s key and value in log query. Refactor IoTDB storage plugin, add IoTDBDataConverter and fix ModifyCollectionInEnhancedForLoop bug. Bump up iotdb-session to 0.12.5. Fix the configuration of Aggregation and GC Count metrics for oap self observability E2E: Add verify OAP eBPF Profiling. Let multiGet could query without tag value in the InfluxDB storage plugin. Adjust MAL for V9, remove some groups, add a new Service function for the custom delimiter. Add service catalog DatabaseSlowStatement. Add Error Prone Annotations dependency to suppress warnings, which are not errors.  UI  [Breaking Change] Introduce Booster UI, remove RocketBot UI. [Breaking Change] UI Templates have been redesigned totally. GraphQL query is minimal compatible for metadata and metrics query. Remove unused jars (log4j-api.jar) in classpath. Bump up netty version to fix CVE. Add Database Connection pool metric. Re-implement UI template initialization for Booster UI. Add environment variable SW_ENABLE_UPDATE_UI_TEMPLATE to control user edit UI template. Add the Self Observability template of the SkyWalking Satellite. Add the template of OpenFunction observability.  Documentation  Reconstruction doc menu for v9. Update backend-alarm.md doc, support op \u0026ldquo;=\u0026rdquo; to \u0026ldquo;==\u0026rdquo;. Update backend-meter.md doc . Add \u0026lt;STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System\u0026gt; paper. Add Academy menu for recommending articles. Remove All source relative document and examples. Update Booster UI\u0026rsquo;s dependency licenses. Add profiling doc, and remove service mesh intro doc(not necessary). Add a doc for virtual database. Rewrite UI introduction. Update k8s-monitoring, backend-telemetry and v9-version-upgrade doc for v9.  All issues and pull requests are here\n","title":"9.0.0","url":"/docs/main/v9.3.0/en/changes/changes-9.0.0/"},{"content":"9.0.0 Project  Upgrade log4j2 to 2.17.1 for CVE-2021-44228, CVE-2021-45046, CVE-2021-45105 and CVE-2021-44832. This CVE only effects on JDK if JNDI is opened in default. Notice, using JVM option -Dlog4j2.formatMsgNoLookups=true or setting the LOG4J_FORMAT_MSG_NO_LOOKUPS=”true” environment variable also avoids CVEs. Upgrade maven-wrapper to 3.1.0, maven to 3.8.4 for performance improvements and ARM more native support. Exclude unnecessary libs when building under JDK 9+. Migrate base Docker image to eclipse-temurin as adoptopenjdk is deprecated. Add E2E test under Java 17. Upgrade protoc to 3.19.2. Add Istio 1.13.1 to E2E test matrix for verification. Upgrade Apache parent pom version to 25. Use the plugin version defined by the Apache maven parent.  Upgrade maven-dependency-plugin to 3.2.0. Upgrade maven-assembly-plugin to 3.3.0. Upgrade maven-failsafe-plugin to 2.22.2. Upgrade maven-surefire-plugin to 2.22.2. Upgrade maven-jar-plugin to 3.2.2. Upgrade maven-enforcer-plugin to 3.0.0. Upgrade maven-compiler-plugin to 3.10.0. Upgrade maven-resources-plugin to 3.2.0. Upgrade maven-source-plugin to 3.2.1.   Update codeStyle.xml to fix incompatibility on M1\u0026rsquo;s IntelliJ IDEA 2021.3.2. Update frontend-maven-plugin to 1.12 and npm to 16.14.0 for booster UI build. Improve CI with the GHA new feature \u0026ldquo;run failed jobs\u0026rdquo;. Fix ./mvnw compile not work if ./mvnw install is not executed at least once. Add JD_PRESERVE_LINE_FEEDS=true in official code style file. Upgrade OAP dependencies gson(2.9.0), guava(31.1), jackson(2.13.2), protobuf-java(3.18.4), commons-io(2.7), postgresql(42.3.3). Remove commons-pool and commons-dbcp from OAP dependencies(Not used before). Upgrade webapp dependencies gson(2.9.0), spring boot(2.6.6), jackson(2.13.2.2), spring cloud(2021.0.1), Apache httpclient(4.5.13).  OAP Server  Fix potential NPE in OAL string match and a bug when right-hand-side variable includes double quotes. Bump up Armeria version to 1.14.1 to fix CVE. Polish ETCD cluster config environment variables. Add the analysis of metrics in Satellite MetricsService. Fix Can't split endpoint id into 2 parts bug for endpoint ID. In the TCP in service mesh observability, endpoint name doesn\u0026rsquo;t exist in TCP traffic. Upgrade H2 version to 2.0.206 to fix CVE-2021-23463 and GHSA-h376-j262-vhq6. Extend column name override mechanism working for ValueColumnMetadata. Introduce new concept Layer and removed NodeType. More details refer to v9-version-upgrade. Fix query sort metrics failure in H2 Storage. Bump up grpc to 1.43.2 and protobuf to 3.19.2 to fix CVE-2021-22569. Add source layer and dest layer to relation. Follow protocol grammar fix GCPhrase -\u0026gt; GCPhase. Set layer to mesh relation. Add FAAS to SpanLayer. Adjust e2e case for V9 core. Support ZGC GC time and count metric collecting. Sync proto buffers files from upstream Envoy (Related to https://github.com/envoyproxy/envoy/pull/18955). Bump up GraphQL related dependencies to latest versions. Add normal to V9 service meta query. Support scope=ALL catalog for metrics. Bump up H2 to 2.1.210 to fix CVE-2022-23221. E2E: Add normal field to Service. Add FreeSql component ID(3017) of dotnet agent. E2E: verify OAP cluster model data aggregation. Fix SelfRemoteClient self observing metrics. Add env variables SW_CLUSTER_INTERNAL_COM_HOST and SW_CLUSTER_INTERNAL_COM_PORT for cluster selectors zookeeper ,consul,etcd and nacos. Doc update: configuration-vocabulary,backend-cluster about env variables SW_CLUSTER_INTERNAL_COM_HOST and SW_CLUSTER_INTERNAL_COM_PORT. Add Python MysqlClient component ID(7013) with mapping information. Support Java thread pool metrics analysis. Fix IoTDB Storage Option insert null index value. Set the default value of SW_STORAGE_IOTDB_SESSIONPOOL_SIZE to 8. Bump up iotdb-session to 0.12.4. Bump up PostgreSQL driver to fix CVE. Add Guava EventBus component ID(123) of Java agent. Add OpenFunction component ID(5013). Expose configuration responseTimeout of ES client. Support datasource metric analysis. [Breaking Change] Keep the endpoint avg resp time meter name the same with others scope. (This may break 3rd party integration and existing alarm rule settings) Add Python FastAPI component ID(7014). Support all metrics from MAL engine in alarm core, including Prometheus, OC receiver, meter receiver. Allow updating non-metrics templates when structure changed. Set default connection timeout of ElasticSearch to 3000 milliseconds. Support ElasticSearch 8 and add it into E2E tests. Disable indexing for field alarm_record.tags_raw_data of binary type in ElasticSearch storage. Fix Zipkin receiver wrong condition for decoding gzip. Add a new sampler (possibility) in LAL. Unify module name receiver_zipkin to receiver-zipkin, remove receiver_jaeger from application.yaml. Introduce the entity of Process type. Set the length of event#parameters to 2000. Limit the length of Event#parameters. Support large service/instance/networkAddressAlias list query by using ElasticSearch scrolling API, add metadataQueryBatchSize to configure scrolling page size. Change default value of metadataQueryMaxSize from 5000 to 10000 Replace deprecated Armeria API BasicToken.of with AuthToken.ofBasic. Implement v9 UI template management protocol. Implement process metadata query protocol. Expose more ElasticSearch health check related logs to help to diagnose Health check fails. reason: No healthy endpoint. Add source event generated metrics to SERVICE_CATALOG_NAME catalog. [Breaking Change] Deprecate All from OAL source. [Breaking Change] Remove SRC_ALL: 'All' from OAL grammar tree. Remove all_heatmap and all_percentile metrics. Fix ElasticSearch normal index couldn\u0026rsquo;t apply mapping and update. Enhance DataCarrier#MultipleChannelsConsumer to add priority for the channels, which makes OAP server has a better performance to activate all analyzers on default. Activate receiver-otel#enabledOcRules receiver with k8s-node,oap,vm rules on default. Activate satellite,spring-sleuth for agent-analyzer#meterAnalyzerActiveFiles on default. Activate receiver-zabbix receiver with agent rule on default. Replace HTTP server (GraphQL, agent HTTP protocol) from Jetty with Armeria. [Breaking Change] Remove configuration restAcceptorPriorityDelta (env var: SW_RECEIVER_SHARING_JETTY_DELTA , SW_CORE_REST_JETTY_DELTA). [Breaking Change] Remove configuration graphql/path (env var: SW_QUERY_GRAPHQL_PATH). Add storage column attribute indexOnly, support ElasticSearch only index and not store some fields. Add indexOnly=true to SegmentRecord.tags, AlarmRecord.tags, AbstractLogRecord.tags, to reduce unnecessary storage. [Breaking Change] Remove configuration restMinThreads (env var: SW_CORE_REST_JETTY_MIN_THREADS , SW_RECEIVER_SHARING_JETTY_MIN_THREADS). Refactor the core Builder mechanism, new storage plugin could implement their own converter and get rid of hard requirement of using HashMap to communicate between data object and database native structure. [Breaking Change] Break all existing 3rd-party storage extensions. Remove hard requirement of BASE64 encoding for binary field. Add complexity limitation for GraphQL query to avoid malicious query. Add Column.shardingKeyIdx for column definition for BanyanDB.  Sharding key is used to group time series data per metric of one entity in one place (same sharding and/or same row for column-oriented database). For example, ServiceA's traffic gauge, service call per minute, includes following timestamp values, then it should be sharded by service ID [ServiceA(encoded ID): 01-28 18:30 values-1, 01-28 18:31 values-2, 01-28 18:32 values-3, 01-28 18:32 values-4] BanyanDB is the 1st storage implementation supporting this. It would make continuous time series metrics stored closely and compressed better. NOTICE, this sharding concept is NOT just for splitting data into different database instances or physical files.  Support ElasticSearch template mappings properties parameters and _source update. Implement the eBPF profiling query and data collect protocol. [Breaking Change] Remove Deprecated responseCode from sources, including Service, ServiceInstance, Endpoint Enhance endpoint dependency analysis to support cross threads cases. Refactor span analysis code structures. Remove isNotNormal service requirement when use alias to merge service topology from client side. All RPCs' peer services from client side are always normal services. This cause the topology is not merged correctly. Fix event type of export data is incorrect, it was EventType.TOTAL always. Reduce redundancy ThreadLocal in MAL core. Improve MAL performance. Trim tag\u0026rsquo;s key and value in log query. Refactor IoTDB storage plugin, add IoTDBDataConverter and fix ModifyCollectionInEnhancedForLoop bug. Bump up iotdb-session to 0.12.5. Fix the configuration of Aggregation and GC Count metrics for oap self observability E2E: Add verify OAP eBPF Profiling. Let multiGet could query without tag value in the InfluxDB storage plugin. Adjust MAL for V9, remove some groups, add a new Service function for the custom delimiter. Add service catalog DatabaseSlowStatement. Add Error Prone Annotations dependency to suppress warnings, which are not errors.  UI  [Breaking Change] Introduce Booster UI, remove RocketBot UI. [Breaking Change] UI Templates have been redesigned totally. GraphQL query is minimal compatible for metadata and metrics query. Remove unused jars (log4j-api.jar) in classpath. Bump up netty version to fix CVE. Add Database Connection pool metric. Re-implement UI template initialization for Booster UI. Add environment variable SW_ENABLE_UPDATE_UI_TEMPLATE to control user edit UI template. Add the Self Observability template of the SkyWalking Satellite. Add the template of OpenFunction observability.  Documentation  Reconstruction doc menu for v9. Update backend-alarm.md doc, support op \u0026ldquo;=\u0026rdquo; to \u0026ldquo;==\u0026rdquo;. Update backend-meter.md doc . Add \u0026lt;STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System\u0026gt; paper. Add Academy menu for recommending articles. Remove All source relative document and examples. Update Booster UI\u0026rsquo;s dependency licenses. Add profiling doc, and remove service mesh intro doc(not necessary). Add a doc for virtual database. Rewrite UI introduction. Update k8s-monitoring, backend-telemetry and v9-version-upgrade doc for v9.  All issues and pull requests are here\n","title":"9.0.0","url":"/docs/main/v9.4.0/en/changes/changes-9.0.0/"},{"content":"9.0.0 Project  Upgrade log4j2 to 2.17.1 for CVE-2021-44228, CVE-2021-45046, CVE-2021-45105 and CVE-2021-44832. This CVE only effects on JDK if JNDI is opened in default. Notice, using JVM option -Dlog4j2.formatMsgNoLookups=true or setting the LOG4J_FORMAT_MSG_NO_LOOKUPS=”true” environment variable also avoids CVEs. Upgrade maven-wrapper to 3.1.0, maven to 3.8.4 for performance improvements and ARM more native support. Exclude unnecessary libs when building under JDK 9+. Migrate base Docker image to eclipse-temurin as adoptopenjdk is deprecated. Add E2E test under Java 17. Upgrade protoc to 3.19.2. Add Istio 1.13.1 to E2E test matrix for verification. Upgrade Apache parent pom version to 25. Use the plugin version defined by the Apache maven parent.  Upgrade maven-dependency-plugin to 3.2.0. Upgrade maven-assembly-plugin to 3.3.0. Upgrade maven-failsafe-plugin to 2.22.2. Upgrade maven-surefire-plugin to 2.22.2. Upgrade maven-jar-plugin to 3.2.2. Upgrade maven-enforcer-plugin to 3.0.0. Upgrade maven-compiler-plugin to 3.10.0. Upgrade maven-resources-plugin to 3.2.0. Upgrade maven-source-plugin to 3.2.1.   Update codeStyle.xml to fix incompatibility on M1\u0026rsquo;s IntelliJ IDEA 2021.3.2. Update frontend-maven-plugin to 1.12 and npm to 16.14.0 for booster UI build. Improve CI with the GHA new feature \u0026ldquo;run failed jobs\u0026rdquo;. Fix ./mvnw compile not work if ./mvnw install is not executed at least once. Add JD_PRESERVE_LINE_FEEDS=true in official code style file. Upgrade OAP dependencies gson(2.9.0), guava(31.1), jackson(2.13.2), protobuf-java(3.18.4), commons-io(2.7), postgresql(42.3.3). Remove commons-pool and commons-dbcp from OAP dependencies(Not used before). Upgrade webapp dependencies gson(2.9.0), spring boot(2.6.6), jackson(2.13.2.2), spring cloud(2021.0.1), Apache httpclient(4.5.13).  OAP Server  Fix potential NPE in OAL string match and a bug when right-hand-side variable includes double quotes. Bump up Armeria version to 1.14.1 to fix CVE. Polish ETCD cluster config environment variables. Add the analysis of metrics in Satellite MetricsService. Fix Can't split endpoint id into 2 parts bug for endpoint ID. In the TCP in service mesh observability, endpoint name doesn\u0026rsquo;t exist in TCP traffic. Upgrade H2 version to 2.0.206 to fix CVE-2021-23463 and GHSA-h376-j262-vhq6. Extend column name override mechanism working for ValueColumnMetadata. Introduce new concept Layer and removed NodeType. More details refer to v9-version-upgrade. Fix query sort metrics failure in H2 Storage. Bump up grpc to 1.43.2 and protobuf to 3.19.2 to fix CVE-2021-22569. Add source layer and dest layer to relation. Follow protocol grammar fix GCPhrase -\u0026gt; GCPhase. Set layer to mesh relation. Add FAAS to SpanLayer. Adjust e2e case for V9 core. Support ZGC GC time and count metric collecting. Sync proto buffers files from upstream Envoy (Related to https://github.com/envoyproxy/envoy/pull/18955). Bump up GraphQL related dependencies to latest versions. Add normal to V9 service meta query. Support scope=ALL catalog for metrics. Bump up H2 to 2.1.210 to fix CVE-2022-23221. E2E: Add normal field to Service. Add FreeSql component ID(3017) of dotnet agent. E2E: verify OAP cluster model data aggregation. Fix SelfRemoteClient self observing metrics. Add env variables SW_CLUSTER_INTERNAL_COM_HOST and SW_CLUSTER_INTERNAL_COM_PORT for cluster selectors zookeeper ,consul,etcd and nacos. Doc update: configuration-vocabulary,backend-cluster about env variables SW_CLUSTER_INTERNAL_COM_HOST and SW_CLUSTER_INTERNAL_COM_PORT. Add Python MysqlClient component ID(7013) with mapping information. Support Java thread pool metrics analysis. Fix IoTDB Storage Option insert null index value. Set the default value of SW_STORAGE_IOTDB_SESSIONPOOL_SIZE to 8. Bump up iotdb-session to 0.12.4. Bump up PostgreSQL driver to fix CVE. Add Guava EventBus component ID(123) of Java agent. Add OpenFunction component ID(5013). Expose configuration responseTimeout of ES client. Support datasource metric analysis. [Breaking Change] Keep the endpoint avg resp time meter name the same with others scope. (This may break 3rd party integration and existing alarm rule settings) Add Python FastAPI component ID(7014). Support all metrics from MAL engine in alarm core, including Prometheus, OC receiver, meter receiver. Allow updating non-metrics templates when structure changed. Set default connection timeout of ElasticSearch to 3000 milliseconds. Support ElasticSearch 8 and add it into E2E tests. Disable indexing for field alarm_record.tags_raw_data of binary type in ElasticSearch storage. Fix Zipkin receiver wrong condition for decoding gzip. Add a new sampler (possibility) in LAL. Unify module name receiver_zipkin to receiver-zipkin, remove receiver_jaeger from application.yaml. Introduce the entity of Process type. Set the length of event#parameters to 2000. Limit the length of Event#parameters. Support large service/instance/networkAddressAlias list query by using ElasticSearch scrolling API, add metadataQueryBatchSize to configure scrolling page size. Change default value of metadataQueryMaxSize from 5000 to 10000 Replace deprecated Armeria API BasicToken.of with AuthToken.ofBasic. Implement v9 UI template management protocol. Implement process metadata query protocol. Expose more ElasticSearch health check related logs to help to diagnose Health check fails. reason: No healthy endpoint. Add source event generated metrics to SERVICE_CATALOG_NAME catalog. [Breaking Change] Deprecate All from OAL source. [Breaking Change] Remove SRC_ALL: 'All' from OAL grammar tree. Remove all_heatmap and all_percentile metrics. Fix ElasticSearch normal index couldn\u0026rsquo;t apply mapping and update. Enhance DataCarrier#MultipleChannelsConsumer to add priority for the channels, which makes OAP server has a better performance to activate all analyzers on default. Activate receiver-otel#enabledOcRules receiver with k8s-node,oap,vm rules on default. Activate satellite,spring-sleuth for agent-analyzer#meterAnalyzerActiveFiles on default. Activate receiver-zabbix receiver with agent rule on default. Replace HTTP server (GraphQL, agent HTTP protocol) from Jetty with Armeria. [Breaking Change] Remove configuration restAcceptorPriorityDelta (env var: SW_RECEIVER_SHARING_JETTY_DELTA , SW_CORE_REST_JETTY_DELTA). [Breaking Change] Remove configuration graphql/path (env var: SW_QUERY_GRAPHQL_PATH). Add storage column attribute indexOnly, support ElasticSearch only index and not store some fields. Add indexOnly=true to SegmentRecord.tags, AlarmRecord.tags, AbstractLogRecord.tags, to reduce unnecessary storage. [Breaking Change] Remove configuration restMinThreads (env var: SW_CORE_REST_JETTY_MIN_THREADS , SW_RECEIVER_SHARING_JETTY_MIN_THREADS). Refactor the core Builder mechanism, new storage plugin could implement their own converter and get rid of hard requirement of using HashMap to communicate between data object and database native structure. [Breaking Change] Break all existing 3rd-party storage extensions. Remove hard requirement of BASE64 encoding for binary field. Add complexity limitation for GraphQL query to avoid malicious query. Add Column.shardingKeyIdx for column definition for BanyanDB.  Sharding key is used to group time series data per metric of one entity in one place (same sharding and/or same row for column-oriented database). For example, ServiceA's traffic gauge, service call per minute, includes following timestamp values, then it should be sharded by service ID [ServiceA(encoded ID): 01-28 18:30 values-1, 01-28 18:31 values-2, 01-28 18:32 values-3, 01-28 18:32 values-4] BanyanDB is the 1st storage implementation supporting this. It would make continuous time series metrics stored closely and compressed better. NOTICE, this sharding concept is NOT just for splitting data into different database instances or physical files.  Support ElasticSearch template mappings properties parameters and _source update. Implement the eBPF profiling query and data collect protocol. [Breaking Change] Remove Deprecated responseCode from sources, including Service, ServiceInstance, Endpoint Enhance endpoint dependency analysis to support cross threads cases. Refactor span analysis code structures. Remove isNotNormal service requirement when use alias to merge service topology from client side. All RPCs' peer services from client side are always normal services. This cause the topology is not merged correctly. Fix event type of export data is incorrect, it was EventType.TOTAL always. Reduce redundancy ThreadLocal in MAL core. Improve MAL performance. Trim tag\u0026rsquo;s key and value in log query. Refactor IoTDB storage plugin, add IoTDBDataConverter and fix ModifyCollectionInEnhancedForLoop bug. Bump up iotdb-session to 0.12.5. Fix the configuration of Aggregation and GC Count metrics for oap self observability E2E: Add verify OAP eBPF Profiling. Let multiGet could query without tag value in the InfluxDB storage plugin. Adjust MAL for V9, remove some groups, add a new Service function for the custom delimiter. Add service catalog DatabaseSlowStatement. Add Error Prone Annotations dependency to suppress warnings, which are not errors.  UI  [Breaking Change] Introduce Booster UI, remove RocketBot UI. [Breaking Change] UI Templates have been redesigned totally. GraphQL query is minimal compatible for metadata and metrics query. Remove unused jars (log4j-api.jar) in classpath. Bump up netty version to fix CVE. Add Database Connection pool metric. Re-implement UI template initialization for Booster UI. Add environment variable SW_ENABLE_UPDATE_UI_TEMPLATE to control user edit UI template. Add the Self Observability template of the SkyWalking Satellite. Add the template of OpenFunction observability.  Documentation  Reconstruction doc menu for v9. Update backend-alarm.md doc, support op \u0026ldquo;=\u0026rdquo; to \u0026ldquo;==\u0026rdquo;. Update backend-meter.md doc . Add \u0026lt;STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System\u0026gt; paper. Add Academy menu for recommending articles. Remove All source relative document and examples. Update Booster UI\u0026rsquo;s dependency licenses. Add profiling doc, and remove service mesh intro doc(not necessary). Add a doc for virtual database. Rewrite UI introduction. Update k8s-monitoring, backend-telemetry and v9-version-upgrade doc for v9.  All issues and pull requests are here\n","title":"9.0.0","url":"/docs/main/v9.5.0/en/changes/changes-9.0.0/"},{"content":"9.0.0 Project  Upgrade log4j2 to 2.17.1 for CVE-2021-44228, CVE-2021-45046, CVE-2021-45105 and CVE-2021-44832. This CVE only effects on JDK if JNDI is opened in default. Notice, using JVM option -Dlog4j2.formatMsgNoLookups=true or setting the LOG4J_FORMAT_MSG_NO_LOOKUPS=”true” environment variable also avoids CVEs. Upgrade maven-wrapper to 3.1.0, maven to 3.8.4 for performance improvements and ARM more native support. Exclude unnecessary libs when building under JDK 9+. Migrate base Docker image to eclipse-temurin as adoptopenjdk is deprecated. Add E2E test under Java 17. Upgrade protoc to 3.19.2. Add Istio 1.13.1 to E2E test matrix for verification. Upgrade Apache parent pom version to 25. Use the plugin version defined by the Apache maven parent.  Upgrade maven-dependency-plugin to 3.2.0. Upgrade maven-assembly-plugin to 3.3.0. Upgrade maven-failsafe-plugin to 2.22.2. Upgrade maven-surefire-plugin to 2.22.2. Upgrade maven-jar-plugin to 3.2.2. Upgrade maven-enforcer-plugin to 3.0.0. Upgrade maven-compiler-plugin to 3.10.0. Upgrade maven-resources-plugin to 3.2.0. Upgrade maven-source-plugin to 3.2.1.   Update codeStyle.xml to fix incompatibility on M1\u0026rsquo;s IntelliJ IDEA 2021.3.2. Update frontend-maven-plugin to 1.12 and npm to 16.14.0 for booster UI build. Improve CI with the GHA new feature \u0026ldquo;run failed jobs\u0026rdquo;. Fix ./mvnw compile not work if ./mvnw install is not executed at least once. Add JD_PRESERVE_LINE_FEEDS=true in official code style file. Upgrade OAP dependencies gson(2.9.0), guava(31.1), jackson(2.13.2), protobuf-java(3.18.4), commons-io(2.7), postgresql(42.3.3). Remove commons-pool and commons-dbcp from OAP dependencies(Not used before). Upgrade webapp dependencies gson(2.9.0), spring boot(2.6.6), jackson(2.13.2.2), spring cloud(2021.0.1), Apache httpclient(4.5.13).  OAP Server  Fix potential NPE in OAL string match and a bug when right-hand-side variable includes double quotes. Bump up Armeria version to 1.14.1 to fix CVE. Polish ETCD cluster config environment variables. Add the analysis of metrics in Satellite MetricsService. Fix Can't split endpoint id into 2 parts bug for endpoint ID. In the TCP in service mesh observability, endpoint name doesn\u0026rsquo;t exist in TCP traffic. Upgrade H2 version to 2.0.206 to fix CVE-2021-23463 and GHSA-h376-j262-vhq6. Extend column name override mechanism working for ValueColumnMetadata. Introduce new concept Layer and removed NodeType. More details refer to v9-version-upgrade. Fix query sort metrics failure in H2 Storage. Bump up grpc to 1.43.2 and protobuf to 3.19.2 to fix CVE-2021-22569. Add source layer and dest layer to relation. Follow protocol grammar fix GCPhrase -\u0026gt; GCPhase. Set layer to mesh relation. Add FAAS to SpanLayer. Adjust e2e case for V9 core. Support ZGC GC time and count metric collecting. Sync proto buffers files from upstream Envoy (Related to https://github.com/envoyproxy/envoy/pull/18955). Bump up GraphQL related dependencies to latest versions. Add normal to V9 service meta query. Support scope=ALL catalog for metrics. Bump up H2 to 2.1.210 to fix CVE-2022-23221. E2E: Add normal field to Service. Add FreeSql component ID(3017) of dotnet agent. E2E: verify OAP cluster model data aggregation. Fix SelfRemoteClient self observing metrics. Add env variables SW_CLUSTER_INTERNAL_COM_HOST and SW_CLUSTER_INTERNAL_COM_PORT for cluster selectors zookeeper ,consul,etcd and nacos. Doc update: configuration-vocabulary,backend-cluster about env variables SW_CLUSTER_INTERNAL_COM_HOST and SW_CLUSTER_INTERNAL_COM_PORT. Add Python MysqlClient component ID(7013) with mapping information. Support Java thread pool metrics analysis. Fix IoTDB Storage Option insert null index value. Set the default value of SW_STORAGE_IOTDB_SESSIONPOOL_SIZE to 8. Bump up iotdb-session to 0.12.4. Bump up PostgreSQL driver to fix CVE. Add Guava EventBus component ID(123) of Java agent. Add OpenFunction component ID(5013). Expose configuration responseTimeout of ES client. Support datasource metric analysis. [Breaking Change] Keep the endpoint avg resp time meter name the same with others scope. (This may break 3rd party integration and existing alarm rule settings) Add Python FastAPI component ID(7014). Support all metrics from MAL engine in alarm core, including Prometheus, OC receiver, meter receiver. Allow updating non-metrics templates when structure changed. Set default connection timeout of ElasticSearch to 3000 milliseconds. Support ElasticSearch 8 and add it into E2E tests. Disable indexing for field alarm_record.tags_raw_data of binary type in ElasticSearch storage. Fix Zipkin receiver wrong condition for decoding gzip. Add a new sampler (possibility) in LAL. Unify module name receiver_zipkin to receiver-zipkin, remove receiver_jaeger from application.yaml. Introduce the entity of Process type. Set the length of event#parameters to 2000. Limit the length of Event#parameters. Support large service/instance/networkAddressAlias list query by using ElasticSearch scrolling API, add metadataQueryBatchSize to configure scrolling page size. Change default value of metadataQueryMaxSize from 5000 to 10000 Replace deprecated Armeria API BasicToken.of with AuthToken.ofBasic. Implement v9 UI template management protocol. Implement process metadata query protocol. Expose more ElasticSearch health check related logs to help to diagnose Health check fails. reason: No healthy endpoint. Add source event generated metrics to SERVICE_CATALOG_NAME catalog. [Breaking Change] Deprecate All from OAL source. [Breaking Change] Remove SRC_ALL: 'All' from OAL grammar tree. Remove all_heatmap and all_percentile metrics. Fix ElasticSearch normal index couldn\u0026rsquo;t apply mapping and update. Enhance DataCarrier#MultipleChannelsConsumer to add priority for the channels, which makes OAP server has a better performance to activate all analyzers on default. Activate receiver-otel#enabledOcRules receiver with k8s-node,oap,vm rules on default. Activate satellite,spring-sleuth for agent-analyzer#meterAnalyzerActiveFiles on default. Activate receiver-zabbix receiver with agent rule on default. Replace HTTP server (GraphQL, agent HTTP protocol) from Jetty with Armeria. [Breaking Change] Remove configuration restAcceptorPriorityDelta (env var: SW_RECEIVER_SHARING_JETTY_DELTA , SW_CORE_REST_JETTY_DELTA). [Breaking Change] Remove configuration graphql/path (env var: SW_QUERY_GRAPHQL_PATH). Add storage column attribute indexOnly, support ElasticSearch only index and not store some fields. Add indexOnly=true to SegmentRecord.tags, AlarmRecord.tags, AbstractLogRecord.tags, to reduce unnecessary storage. [Breaking Change] Remove configuration restMinThreads (env var: SW_CORE_REST_JETTY_MIN_THREADS , SW_RECEIVER_SHARING_JETTY_MIN_THREADS). Refactor the core Builder mechanism, new storage plugin could implement their own converter and get rid of hard requirement of using HashMap to communicate between data object and database native structure. [Breaking Change] Break all existing 3rd-party storage extensions. Remove hard requirement of BASE64 encoding for binary field. Add complexity limitation for GraphQL query to avoid malicious query. Add Column.shardingKeyIdx for column definition for BanyanDB.  Sharding key is used to group time series data per metric of one entity in one place (same sharding and/or same row for column-oriented database). For example, ServiceA's traffic gauge, service call per minute, includes following timestamp values, then it should be sharded by service ID [ServiceA(encoded ID): 01-28 18:30 values-1, 01-28 18:31 values-2, 01-28 18:32 values-3, 01-28 18:32 values-4] BanyanDB is the 1st storage implementation supporting this. It would make continuous time series metrics stored closely and compressed better. NOTICE, this sharding concept is NOT just for splitting data into different database instances or physical files.  Support ElasticSearch template mappings properties parameters and _source update. Implement the eBPF profiling query and data collect protocol. [Breaking Change] Remove Deprecated responseCode from sources, including Service, ServiceInstance, Endpoint Enhance endpoint dependency analysis to support cross threads cases. Refactor span analysis code structures. Remove isNotNormal service requirement when use alias to merge service topology from client side. All RPCs' peer services from client side are always normal services. This cause the topology is not merged correctly. Fix event type of export data is incorrect, it was EventType.TOTAL always. Reduce redundancy ThreadLocal in MAL core. Improve MAL performance. Trim tag\u0026rsquo;s key and value in log query. Refactor IoTDB storage plugin, add IoTDBDataConverter and fix ModifyCollectionInEnhancedForLoop bug. Bump up iotdb-session to 0.12.5. Fix the configuration of Aggregation and GC Count metrics for oap self observability E2E: Add verify OAP eBPF Profiling. Let multiGet could query without tag value in the InfluxDB storage plugin. Adjust MAL for V9, remove some groups, add a new Service function for the custom delimiter. Add service catalog DatabaseSlowStatement. Add Error Prone Annotations dependency to suppress warnings, which are not errors.  UI  [Breaking Change] Introduce Booster UI, remove RocketBot UI. [Breaking Change] UI Templates have been redesigned totally. GraphQL query is minimal compatible for metadata and metrics query. Remove unused jars (log4j-api.jar) in classpath. Bump up netty version to fix CVE. Add Database Connection pool metric. Re-implement UI template initialization for Booster UI. Add environment variable SW_ENABLE_UPDATE_UI_TEMPLATE to control user edit UI template. Add the Self Observability template of the SkyWalking Satellite. Add the template of OpenFunction observability.  Documentation  Reconstruction doc menu for v9. Update backend-alarm.md doc, support op \u0026ldquo;=\u0026rdquo; to \u0026ldquo;==\u0026rdquo;. Update backend-meter.md doc . Add \u0026lt;STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System\u0026gt; paper. Add Academy menu for recommending articles. Remove All source relative document and examples. Update Booster UI\u0026rsquo;s dependency licenses. Add profiling doc, and remove service mesh intro doc(not necessary). Add a doc for virtual database. Rewrite UI introduction. Update k8s-monitoring, backend-telemetry and v9-version-upgrade doc for v9.  All issues and pull requests are here\n","title":"9.0.0","url":"/docs/main/v9.6.0/en/changes/changes-9.0.0/"},{"content":"9.0.0 Project  Upgrade log4j2 to 2.17.1 for CVE-2021-44228, CVE-2021-45046, CVE-2021-45105 and CVE-2021-44832. This CVE only effects on JDK if JNDI is opened in default. Notice, using JVM option -Dlog4j2.formatMsgNoLookups=true or setting the LOG4J_FORMAT_MSG_NO_LOOKUPS=”true” environment variable also avoids CVEs. Upgrade maven-wrapper to 3.1.0, maven to 3.8.4 for performance improvements and ARM more native support. Exclude unnecessary libs when building under JDK 9+. Migrate base Docker image to eclipse-temurin as adoptopenjdk is deprecated. Add E2E test under Java 17. Upgrade protoc to 3.19.2. Add Istio 1.13.1 to E2E test matrix for verification. Upgrade Apache parent pom version to 25. Use the plugin version defined by the Apache maven parent.  Upgrade maven-dependency-plugin to 3.2.0. Upgrade maven-assembly-plugin to 3.3.0. Upgrade maven-failsafe-plugin to 2.22.2. Upgrade maven-surefire-plugin to 2.22.2. Upgrade maven-jar-plugin to 3.2.2. Upgrade maven-enforcer-plugin to 3.0.0. Upgrade maven-compiler-plugin to 3.10.0. Upgrade maven-resources-plugin to 3.2.0. Upgrade maven-source-plugin to 3.2.1.   Update codeStyle.xml to fix incompatibility on M1\u0026rsquo;s IntelliJ IDEA 2021.3.2. Update frontend-maven-plugin to 1.12 and npm to 16.14.0 for booster UI build. Improve CI with the GHA new feature \u0026ldquo;run failed jobs\u0026rdquo;. Fix ./mvnw compile not work if ./mvnw install is not executed at least once. Add JD_PRESERVE_LINE_FEEDS=true in official code style file. Upgrade OAP dependencies gson(2.9.0), guava(31.1), jackson(2.13.2), protobuf-java(3.18.4), commons-io(2.7), postgresql(42.3.3). Remove commons-pool and commons-dbcp from OAP dependencies(Not used before). Upgrade webapp dependencies gson(2.9.0), spring boot(2.6.6), jackson(2.13.2.2), spring cloud(2021.0.1), Apache httpclient(4.5.13).  OAP Server  Fix potential NPE in OAL string match and a bug when right-hand-side variable includes double quotes. Bump up Armeria version to 1.14.1 to fix CVE. Polish ETCD cluster config environment variables. Add the analysis of metrics in Satellite MetricsService. Fix Can't split endpoint id into 2 parts bug for endpoint ID. In the TCP in service mesh observability, endpoint name doesn\u0026rsquo;t exist in TCP traffic. Upgrade H2 version to 2.0.206 to fix CVE-2021-23463 and GHSA-h376-j262-vhq6. Extend column name override mechanism working for ValueColumnMetadata. Introduce new concept Layer and removed NodeType. More details refer to v9-version-upgrade. Fix query sort metrics failure in H2 Storage. Bump up grpc to 1.43.2 and protobuf to 3.19.2 to fix CVE-2021-22569. Add source layer and dest layer to relation. Follow protocol grammar fix GCPhrase -\u0026gt; GCPhase. Set layer to mesh relation. Add FAAS to SpanLayer. Adjust e2e case for V9 core. Support ZGC GC time and count metric collecting. Sync proto buffers files from upstream Envoy (Related to https://github.com/envoyproxy/envoy/pull/18955). Bump up GraphQL related dependencies to latest versions. Add normal to V9 service meta query. Support scope=ALL catalog for metrics. Bump up H2 to 2.1.210 to fix CVE-2022-23221. E2E: Add normal field to Service. Add FreeSql component ID(3017) of dotnet agent. E2E: verify OAP cluster model data aggregation. Fix SelfRemoteClient self observing metrics. Add env variables SW_CLUSTER_INTERNAL_COM_HOST and SW_CLUSTER_INTERNAL_COM_PORT for cluster selectors zookeeper ,consul,etcd and nacos. Doc update: configuration-vocabulary,backend-cluster about env variables SW_CLUSTER_INTERNAL_COM_HOST and SW_CLUSTER_INTERNAL_COM_PORT. Add Python MysqlClient component ID(7013) with mapping information. Support Java thread pool metrics analysis. Fix IoTDB Storage Option insert null index value. Set the default value of SW_STORAGE_IOTDB_SESSIONPOOL_SIZE to 8. Bump up iotdb-session to 0.12.4. Bump up PostgreSQL driver to fix CVE. Add Guava EventBus component ID(123) of Java agent. Add OpenFunction component ID(5013). Expose configuration responseTimeout of ES client. Support datasource metric analysis. [Breaking Change] Keep the endpoint avg resp time meter name the same with others scope. (This may break 3rd party integration and existing alarm rule settings) Add Python FastAPI component ID(7014). Support all metrics from MAL engine in alarm core, including Prometheus, OC receiver, meter receiver. Allow updating non-metrics templates when structure changed. Set default connection timeout of ElasticSearch to 3000 milliseconds. Support ElasticSearch 8 and add it into E2E tests. Disable indexing for field alarm_record.tags_raw_data of binary type in ElasticSearch storage. Fix Zipkin receiver wrong condition for decoding gzip. Add a new sampler (possibility) in LAL. Unify module name receiver_zipkin to receiver-zipkin, remove receiver_jaeger from application.yaml. Introduce the entity of Process type. Set the length of event#parameters to 2000. Limit the length of Event#parameters. Support large service/instance/networkAddressAlias list query by using ElasticSearch scrolling API, add metadataQueryBatchSize to configure scrolling page size. Change default value of metadataQueryMaxSize from 5000 to 10000 Replace deprecated Armeria API BasicToken.of with AuthToken.ofBasic. Implement v9 UI template management protocol. Implement process metadata query protocol. Expose more ElasticSearch health check related logs to help to diagnose Health check fails. reason: No healthy endpoint. Add source event generated metrics to SERVICE_CATALOG_NAME catalog. [Breaking Change] Deprecate All from OAL source. [Breaking Change] Remove SRC_ALL: 'All' from OAL grammar tree. Remove all_heatmap and all_percentile metrics. Fix ElasticSearch normal index couldn\u0026rsquo;t apply mapping and update. Enhance DataCarrier#MultipleChannelsConsumer to add priority for the channels, which makes OAP server has a better performance to activate all analyzers on default. Activate receiver-otel#enabledOcRules receiver with k8s-node,oap,vm rules on default. Activate satellite,spring-sleuth for agent-analyzer#meterAnalyzerActiveFiles on default. Activate receiver-zabbix receiver with agent rule on default. Replace HTTP server (GraphQL, agent HTTP protocol) from Jetty with Armeria. [Breaking Change] Remove configuration restAcceptorPriorityDelta (env var: SW_RECEIVER_SHARING_JETTY_DELTA , SW_CORE_REST_JETTY_DELTA). [Breaking Change] Remove configuration graphql/path (env var: SW_QUERY_GRAPHQL_PATH). Add storage column attribute indexOnly, support ElasticSearch only index and not store some fields. Add indexOnly=true to SegmentRecord.tags, AlarmRecord.tags, AbstractLogRecord.tags, to reduce unnecessary storage. [Breaking Change] Remove configuration restMinThreads (env var: SW_CORE_REST_JETTY_MIN_THREADS , SW_RECEIVER_SHARING_JETTY_MIN_THREADS). Refactor the core Builder mechanism, new storage plugin could implement their own converter and get rid of hard requirement of using HashMap to communicate between data object and database native structure. [Breaking Change] Break all existing 3rd-party storage extensions. Remove hard requirement of BASE64 encoding for binary field. Add complexity limitation for GraphQL query to avoid malicious query. Add Column.shardingKeyIdx for column definition for BanyanDB.  Sharding key is used to group time series data per metric of one entity in one place (same sharding and/or same row for column-oriented database). For example, ServiceA's traffic gauge, service call per minute, includes following timestamp values, then it should be sharded by service ID [ServiceA(encoded ID): 01-28 18:30 values-1, 01-28 18:31 values-2, 01-28 18:32 values-3, 01-28 18:32 values-4] BanyanDB is the 1st storage implementation supporting this. It would make continuous time series metrics stored closely and compressed better. NOTICE, this sharding concept is NOT just for splitting data into different database instances or physical files.  Support ElasticSearch template mappings properties parameters and _source update. Implement the eBPF profiling query and data collect protocol. [Breaking Change] Remove Deprecated responseCode from sources, including Service, ServiceInstance, Endpoint Enhance endpoint dependency analysis to support cross threads cases. Refactor span analysis code structures. Remove isNotNormal service requirement when use alias to merge service topology from client side. All RPCs' peer services from client side are always normal services. This cause the topology is not merged correctly. Fix event type of export data is incorrect, it was EventType.TOTAL always. Reduce redundancy ThreadLocal in MAL core. Improve MAL performance. Trim tag\u0026rsquo;s key and value in log query. Refactor IoTDB storage plugin, add IoTDBDataConverter and fix ModifyCollectionInEnhancedForLoop bug. Bump up iotdb-session to 0.12.5. Fix the configuration of Aggregation and GC Count metrics for oap self observability E2E: Add verify OAP eBPF Profiling. Let multiGet could query without tag value in the InfluxDB storage plugin. Adjust MAL for V9, remove some groups, add a new Service function for the custom delimiter. Add service catalog DatabaseSlowStatement. Add Error Prone Annotations dependency to suppress warnings, which are not errors.  UI  [Breaking Change] Introduce Booster UI, remove RocketBot UI. [Breaking Change] UI Templates have been redesigned totally. GraphQL query is minimal compatible for metadata and metrics query. Remove unused jars (log4j-api.jar) in classpath. Bump up netty version to fix CVE. Add Database Connection pool metric. Re-implement UI template initialization for Booster UI. Add environment variable SW_ENABLE_UPDATE_UI_TEMPLATE to control user edit UI template. Add the Self Observability template of the SkyWalking Satellite. Add the template of OpenFunction observability.  Documentation  Reconstruction doc menu for v9. Update backend-alarm.md doc, support op \u0026ldquo;=\u0026rdquo; to \u0026ldquo;==\u0026rdquo;. Update backend-meter.md doc . Add \u0026lt;STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System\u0026gt; paper. Add Academy menu for recommending articles. Remove All source relative document and examples. Update Booster UI\u0026rsquo;s dependency licenses. Add profiling doc, and remove service mesh intro doc(not necessary). Add a doc for virtual database. Rewrite UI introduction. Update k8s-monitoring, backend-telemetry and v9-version-upgrade doc for v9.  All issues and pull requests are here\n","title":"9.0.0","url":"/docs/main/v9.7.0/en/changes/changes-9.0.0/"},{"content":"9.1.0 Project  [IMPORTANT] Remove InfluxDB 1.x and Apache IoTDB 0.X as storage options, check details at here. Remove converter-moshi 2.5.0, influx-java 2.15, iotdb java 0.12.5, thrift 0.14.1, moshi 1.5.0, msgpack 0.8.16 dependencies. Remove InfluxDB and IoTDB relative codes and E2E tests. Upgrade OAP dependencies zipkin to 2.23.16, H2 to 2.1.212, Apache Freemarker to 2.3.31, gRPC-java 1.46.0, netty to 4.1.76. Upgrade Webapp dependencies, spring-cloud-dependencies to 2021.0.2, logback-classic to 1.2.11 [IMPORTANT] Add BanyanDB storage implementation. Notice BanyanDB is currently under active development and SHOULD NOT be used in production cluster.  OAP Server  Add component definition(ID=127) for Apache ShenYu (incubating). Fix Zipkin receiver: Decode spans error, missing Layer for V9 and wrong time bucket for generate Service and Endpoint. [Refactor] Move SQLDatabase(H2/MySQL/PostgreSQL), ElasticSearch and BanyanDB specific configurations out of column. Support BanyanDB global index for entities. Log and Segment record entities declare this new feature. Remove unnecessary analyzer settings in columns of templates. Many were added due to analyzer\u0026rsquo;s default value. Simplify the Kafka Fetch configuration in cluster mode. [Breaking Change] Update the eBPF Profiling task to the service level, please delete index/table: ebpf_profiling_task, process_traffic. Fix event can\u0026rsquo;t split service ID into 2 parts. Fix OAP Self-Observability metric GC Time calculation. Set SW_QUERY_MAX_QUERY_COMPLEXITY default value to 1000 Webapp module (for UI) enabled compression. [Breaking Change] Add layer field to event, report an event without layer is not allowed. Fix ES flush thread stops when flush schedule task throws exception, such as ElasticSearch flush failed. Fix ES BulkProcessor in BatchProcessEsDAO was initialized multiple times and created multiple ES flush schedule tasks. HTTPServer support the handler register with allowed HTTP methods. [Critical] Revert Enhance DataCarrier#MultipleChannelsConsumer to add priority to avoid consuming issues. Fix the problem that some configurations (such as group.id) did not take effect due to the override order when using the kafkaConsumerConfig property to extend the configuration in Kafka Fetcher. Remove build time from the OAP version. Add data-generator module to run OAP in testing mode, generating mock data for testing. Support receive Kubernetes processes from gRPC protocol. Fix the problem that es index(TimeSeriesTable, eg. endpoint_traffic, alarm_record) didn\u0026rsquo;t create even after rerun with init-mode. This problem caused the OAP server to fail to start when the OAP server was down for more than a day. Support autocomplete tags in traces query. [Breaking Change] Replace all configurations **_JETTY_** to **_REST_**. Add the support eBPF profiling field into the process entity. E2E: fix log test miss verify LAL and metrics. Enhance Converter mechanism in kernel level to make BanyanDB native feature more effective. Add TermsAggregation properties collect_mode and execution_hint. Add \u0026ldquo;execution_hint\u0026rdquo;: \u0026ldquo;map\u0026rdquo;, \u0026ldquo;collect_mode\u0026rdquo;: \u0026ldquo;breadth_first\u0026rdquo; for aggregation and topology query to improve 5-10x performance. Clean up scroll contexts after used. Support autocomplete tags in logs query. Enhance Deprecated MetricQuery(v1) getValues querying to asynchronous concurrency query Fix the pod match error when the service has multiple selector in kubernetes environment. VM monitoring adapts the 0.50.0 of the opentelemetry-collector. Add Envoy internal cost metrics. Remove Layer concept from ServiceInstance. Remove unnecessary onCompleted on gRPC onError callback. Remove Layer concept form Process. Update to list all eBPF profiling schedulers without duration. Storage(ElasticSearch): add search options to tolerate inexisting indices. Fix the problem that MQ has the wrong Layer type. Fix NoneStream model has wrong downsampling(was Second, should be Minute). SQL Database: provide @SQLDatabase.AdditionalEntity to support create additional tables from a model. [Breaking Change] SQL Database: remove SQL Database config maxSizeOfArrayColumn and numOfSearchableValuesPerTag. [Breaking Change] SQL Database: move Tags list from Segment,Logs,Alarms to their additional table. [Breaking Change] Remove total field in Trace, Log, Event, Browser log, and alarm list query. Support OFF_CPU eBPF Profiling. Fix SumAggregationBuilder#build should use the SumAggregation rather than MaxAggregation. Add TiDB, OpenSearch, Postgres storage optional to Trace and eBPF Profiling E2E testing. Add OFF CPU eBPF Profiling E2E Testing. Fix searchableTag as rpc.status_code and http.status_code. status_code had been removed. Fix scroll query failure exception. Add profileDataQueryBatchSize config in Elasticsearch Storage. Add APIs to query Pod log on demand. Remove OAL for events. Simplify the format index name logical in ES storage. Add instance properties extractor in MAL. Support Zipkin traces collect and zipkin traces query API. [Breaking Change] Zipkin receiver mechanism changes and traces do not stream into OAP Segment anymore.  UI  General service instance: move Thread Pool from JVM to Overview, fix JVM GC Count calculation. Add Apache ShenYu (incubating) component LOGO. Show more metrics on service/instance/endpoint list on the dashboards. Support average values of metrics on the service/list/endpoint table widgets, with pop-up linear graph. Fix viewLogs button query no data. Fix UTC when page loads. Implement the eBPF profile widget on dashboard. Optimize the trace widget. Avoid invalid query for topology metrics. Add the alarm and log tag tips. Fix spans details and task logs. Verify query params to avoid invalid queries. Mobile terminal adaptation. Fix: set dropdown for the Tab widget, init instance/endpoint relation selectors, update sankey graph. Add eBPF Profiling widget into General service, Service Mesh and Kubernetes tabs. Fix jump to endpoint-relation dashboard template. Fix set graph options. Remove the Layer filed from the Instance and Process. Fix date time picker display when set hour to 0. Implement tags auto-complete for Trace and Log. Support multiple trees for the flame graph. Fix the page doesn\u0026rsquo;t need to be re-rendered when the url changes. Remove unexpected data for exporting dashboards. Fix duration time. Remove the total field from query conditions. Fix minDuration and maxDuration for the trace filter. Add Log configuration for the browser templates. Fix query conditions for the browser logs. Add Spanish Translation. Visualize the OFF CPU eBPF profiling. Add Spanish language to UI. Sort spans with startTime or spanId in a segment. Visualize a on-demand log widget. Fix activate the correct tab index after renaming a Tabs name. FaaS dashboard support on-demand log (OpenFunction/functions-framework-go version \u0026gt; 0.3.0).  Documentation  Add eBPF agent into probe introduction.  All issues and pull requests are here\n","title":"9.1.0","url":"/docs/main/latest/en/changes/changes-9.1.0/"},{"content":"9.1.0 Project  [IMPORTANT] Remove InfluxDB 1.x and Apache IoTDB 0.X as storage options, check details at here. Remove converter-moshi 2.5.0, influx-java 2.15, iotdb java 0.12.5, thrift 0.14.1, moshi 1.5.0, msgpack 0.8.16 dependencies. Remove InfluxDB and IoTDB relative codes and E2E tests. Upgrade OAP dependencies zipkin to 2.23.16, H2 to 2.1.212, Apache Freemarker to 2.3.31, gRPC-java 1.46.0, netty to 4.1.76. Upgrade Webapp dependencies, spring-cloud-dependencies to 2021.0.2, logback-classic to 1.2.11 [IMPORTANT] Add BanyanDB storage implementation. Notice BanyanDB is currently under active development and SHOULD NOT be used in production cluster.  OAP Server  Add component definition(ID=127) for Apache ShenYu (incubating). Fix Zipkin receiver: Decode spans error, missing Layer for V9 and wrong time bucket for generate Service and Endpoint. [Refactor] Move SQLDatabase(H2/MySQL/PostgreSQL), ElasticSearch and BanyanDB specific configurations out of column. Support BanyanDB global index for entities. Log and Segment record entities declare this new feature. Remove unnecessary analyzer settings in columns of templates. Many were added due to analyzer\u0026rsquo;s default value. Simplify the Kafka Fetch configuration in cluster mode. [Breaking Change] Update the eBPF Profiling task to the service level, please delete index/table: ebpf_profiling_task, process_traffic. Fix event can\u0026rsquo;t split service ID into 2 parts. Fix OAP Self-Observability metric GC Time calculation. Set SW_QUERY_MAX_QUERY_COMPLEXITY default value to 1000 Webapp module (for UI) enabled compression. [Breaking Change] Add layer field to event, report an event without layer is not allowed. Fix ES flush thread stops when flush schedule task throws exception, such as ElasticSearch flush failed. Fix ES BulkProcessor in BatchProcessEsDAO was initialized multiple times and created multiple ES flush schedule tasks. HTTPServer support the handler register with allowed HTTP methods. [Critical] Revert Enhance DataCarrier#MultipleChannelsConsumer to add priority to avoid consuming issues. Fix the problem that some configurations (such as group.id) did not take effect due to the override order when using the kafkaConsumerConfig property to extend the configuration in Kafka Fetcher. Remove build time from the OAP version. Add data-generator module to run OAP in testing mode, generating mock data for testing. Support receive Kubernetes processes from gRPC protocol. Fix the problem that es index(TimeSeriesTable, eg. endpoint_traffic, alarm_record) didn\u0026rsquo;t create even after rerun with init-mode. This problem caused the OAP server to fail to start when the OAP server was down for more than a day. Support autocomplete tags in traces query. [Breaking Change] Replace all configurations **_JETTY_** to **_REST_**. Add the support eBPF profiling field into the process entity. E2E: fix log test miss verify LAL and metrics. Enhance Converter mechanism in kernel level to make BanyanDB native feature more effective. Add TermsAggregation properties collect_mode and execution_hint. Add \u0026ldquo;execution_hint\u0026rdquo;: \u0026ldquo;map\u0026rdquo;, \u0026ldquo;collect_mode\u0026rdquo;: \u0026ldquo;breadth_first\u0026rdquo; for aggregation and topology query to improve 5-10x performance. Clean up scroll contexts after used. Support autocomplete tags in logs query. Enhance Deprecated MetricQuery(v1) getValues querying to asynchronous concurrency query Fix the pod match error when the service has multiple selector in kubernetes environment. VM monitoring adapts the 0.50.0 of the opentelemetry-collector. Add Envoy internal cost metrics. Remove Layer concept from ServiceInstance. Remove unnecessary onCompleted on gRPC onError callback. Remove Layer concept form Process. Update to list all eBPF profiling schedulers without duration. Storage(ElasticSearch): add search options to tolerate inexisting indices. Fix the problem that MQ has the wrong Layer type. Fix NoneStream model has wrong downsampling(was Second, should be Minute). SQL Database: provide @SQLDatabase.AdditionalEntity to support create additional tables from a model. [Breaking Change] SQL Database: remove SQL Database config maxSizeOfArrayColumn and numOfSearchableValuesPerTag. [Breaking Change] SQL Database: move Tags list from Segment,Logs,Alarms to their additional table. [Breaking Change] Remove total field in Trace, Log, Event, Browser log, and alarm list query. Support OFF_CPU eBPF Profiling. Fix SumAggregationBuilder#build should use the SumAggregation rather than MaxAggregation. Add TiDB, OpenSearch, Postgres storage optional to Trace and eBPF Profiling E2E testing. Add OFF CPU eBPF Profiling E2E Testing. Fix searchableTag as rpc.status_code and http.status_code. status_code had been removed. Fix scroll query failure exception. Add profileDataQueryBatchSize config in Elasticsearch Storage. Add APIs to query Pod log on demand. Remove OAL for events. Simplify the format index name logical in ES storage. Add instance properties extractor in MAL. Support Zipkin traces collect and zipkin traces query API. [Breaking Change] Zipkin receiver mechanism changes and traces do not stream into OAP Segment anymore.  UI  General service instance: move Thread Pool from JVM to Overview, fix JVM GC Count calculation. Add Apache ShenYu (incubating) component LOGO. Show more metrics on service/instance/endpoint list on the dashboards. Support average values of metrics on the service/list/endpoint table widgets, with pop-up linear graph. Fix viewLogs button query no data. Fix UTC when page loads. Implement the eBPF profile widget on dashboard. Optimize the trace widget. Avoid invalid query for topology metrics. Add the alarm and log tag tips. Fix spans details and task logs. Verify query params to avoid invalid queries. Mobile terminal adaptation. Fix: set dropdown for the Tab widget, init instance/endpoint relation selectors, update sankey graph. Add eBPF Profiling widget into General service, Service Mesh and Kubernetes tabs. Fix jump to endpoint-relation dashboard template. Fix set graph options. Remove the Layer filed from the Instance and Process. Fix date time picker display when set hour to 0. Implement tags auto-complete for Trace and Log. Support multiple trees for the flame graph. Fix the page doesn\u0026rsquo;t need to be re-rendered when the url changes. Remove unexpected data for exporting dashboards. Fix duration time. Remove the total field from query conditions. Fix minDuration and maxDuration for the trace filter. Add Log configuration for the browser templates. Fix query conditions for the browser logs. Add Spanish Translation. Visualize the OFF CPU eBPF profiling. Add Spanish language to UI. Sort spans with startTime or spanId in a segment. Visualize a on-demand log widget. Fix activate the correct tab index after renaming a Tabs name. FaaS dashboard support on-demand log (OpenFunction/functions-framework-go version \u0026gt; 0.3.0).  Documentation  Add eBPF agent into probe introduction.  All issues and pull requests are here\n","title":"9.1.0","url":"/docs/main/next/en/changes/changes-9.1.0/"},{"content":"9.1.0 Project  [IMPORTANT] Remove InfluxDB 1.x and Apache IoTDB 0.X as storage options, check details at here. Remove converter-moshi 2.5.0, influx-java 2.15, iotdb java 0.12.5, thrift 0.14.1, moshi 1.5.0, msgpack 0.8.16 dependencies. Remove InfluxDB and IoTDB relative codes and E2E tests. Upgrade OAP dependencies zipkin to 2.23.16, H2 to 2.1.212, Apache Freemarker to 2.3.31, gRPC-java 1.46.0, netty to 4.1.76. Upgrade Webapp dependencies, spring-cloud-dependencies to 2021.0.2, logback-classic to 1.2.11 [IMPORTANT] Add BanyanDB storage implementation. Notice BanyanDB is currently under active development and SHOULD NOT be used in production cluster.  OAP Server  Add component definition(ID=127) for Apache ShenYu (incubating). Fix Zipkin receiver: Decode spans error, missing Layer for V9 and wrong time bucket for generate Service and Endpoint. [Refactor] Move SQLDatabase(H2/MySQL/PostgreSQL), ElasticSearch and BanyanDB specific configurations out of column. Support BanyanDB global index for entities. Log and Segment record entities declare this new feature. Remove unnecessary analyzer settings in columns of templates. Many were added due to analyzer\u0026rsquo;s default value. Simplify the Kafka Fetch configuration in cluster mode. [Breaking Change] Update the eBPF Profiling task to the service level, please delete index/table: ebpf_profiling_task, process_traffic. Fix event can\u0026rsquo;t split service ID into 2 parts. Fix OAP Self-Observability metric GC Time calculation. Set SW_QUERY_MAX_QUERY_COMPLEXITY default value to 1000 Webapp module (for UI) enabled compression. [Breaking Change] Add layer field to event, report an event without layer is not allowed. Fix ES flush thread stops when flush schedule task throws exception, such as ElasticSearch flush failed. Fix ES BulkProcessor in BatchProcessEsDAO was initialized multiple times and created multiple ES flush schedule tasks. HTTPServer support the handler register with allowed HTTP methods. [Critical] Revert Enhance DataCarrier#MultipleChannelsConsumer to add priority to avoid consuming issues. Fix the problem that some configurations (such as group.id) did not take effect due to the override order when using the kafkaConsumerConfig property to extend the configuration in Kafka Fetcher. Remove build time from the OAP version. Add data-generator module to run OAP in testing mode, generating mock data for testing. Support receive Kubernetes processes from gRPC protocol. Fix the problem that es index(TimeSeriesTable, eg. endpoint_traffic, alarm_record) didn\u0026rsquo;t create even after rerun with init-mode. This problem caused the OAP server to fail to start when the OAP server was down for more than a day. Support autocomplete tags in traces query. [Breaking Change] Replace all configurations **_JETTY_** to **_REST_**. Add the support eBPF profiling field into the process entity. E2E: fix log test miss verify LAL and metrics. Enhance Converter mechanism in kernel level to make BanyanDB native feature more effective. Add TermsAggregation properties collect_mode and execution_hint. Add \u0026ldquo;execution_hint\u0026rdquo;: \u0026ldquo;map\u0026rdquo;, \u0026ldquo;collect_mode\u0026rdquo;: \u0026ldquo;breadth_first\u0026rdquo; for aggregation and topology query to improve 5-10x performance. Clean up scroll contexts after used. Support autocomplete tags in logs query. Enhance Deprecated MetricQuery(v1) getValues querying to asynchronous concurrency query Fix the pod match error when the service has multiple selector in kubernetes environment. VM monitoring adapts the 0.50.0 of the opentelemetry-collector. Add Envoy internal cost metrics. Remove Layer concept from ServiceInstance. Remove unnecessary onCompleted on gRPC onError callback. Remove Layer concept form Process. Update to list all eBPF profiling schedulers without duration. Storage(ElasticSearch): add search options to tolerate inexisting indices. Fix the problem that MQ has the wrong Layer type. Fix NoneStream model has wrong downsampling(was Second, should be Minute). SQL Database: provide @SQLDatabase.AdditionalEntity to support create additional tables from a model. [Breaking Change] SQL Database: remove SQL Database config maxSizeOfArrayColumn and numOfSearchableValuesPerTag. [Breaking Change] SQL Database: move Tags list from Segment,Logs,Alarms to their additional table. [Breaking Change] Remove total field in Trace, Log, Event, Browser log, and alarm list query. Support OFF_CPU eBPF Profiling. Fix SumAggregationBuilder#build should use the SumAggregation rather than MaxAggregation. Add TiDB, OpenSearch, Postgres storage optional to Trace and eBPF Profiling E2E testing. Add OFF CPU eBPF Profiling E2E Testing. Fix searchableTag as rpc.status_code and http.status_code. status_code had been removed. Fix scroll query failure exception. Add profileDataQueryBatchSize config in Elasticsearch Storage. Add APIs to query Pod log on demand. Remove OAL for events. Simplify the format index name logical in ES storage. Add instance properties extractor in MAL. Support Zipkin traces collect and zipkin traces query API. [Breaking Change] Zipkin receiver mechanism changes and traces do not stream into OAP Segment anymore.  UI  General service instance: move Thread Pool from JVM to Overview, fix JVM GC Count calculation. Add Apache ShenYu (incubating) component LOGO. Show more metrics on service/instance/endpoint list on the dashboards. Support average values of metrics on the service/list/endpoint table widgets, with pop-up linear graph. Fix viewLogs button query no data. Fix UTC when page loads. Implement the eBPF profile widget on dashboard. Optimize the trace widget. Avoid invalid query for topology metrics. Add the alarm and log tag tips. Fix spans details and task logs. Verify query params to avoid invalid queries. Mobile terminal adaptation. Fix: set dropdown for the Tab widget, init instance/endpoint relation selectors, update sankey graph. Add eBPF Profiling widget into General service, Service Mesh and Kubernetes tabs. Fix jump to endpoint-relation dashboard template. Fix set graph options. Remove the Layer filed from the Instance and Process. Fix date time picker display when set hour to 0. Implement tags auto-complete for Trace and Log. Support multiple trees for the flame graph. Fix the page doesn\u0026rsquo;t need to be re-rendered when the url changes. Remove unexpected data for exporting dashboards. Fix duration time. Remove the total field from query conditions. Fix minDuration and maxDuration for the trace filter. Add Log configuration for the browser templates. Fix query conditions for the browser logs. Add Spanish Translation. Visualize the OFF CPU eBPF profiling. Add Spanish language to UI. Sort spans with startTime or spanId in a segment. Visualize a on-demand log widget. Fix activate the correct tab index after renaming a Tabs name. FaaS dashboard support on-demand log (OpenFunction/functions-framework-go version \u0026gt; 0.3.0).  Documentation  Add eBPF agent into probe introduction.  All issues and pull requests are here\n","title":"9.1.0","url":"/docs/main/v9.1.0/en/changes/changes/"},{"content":"9.1.0 Project  [IMPORTANT] Remove InfluxDB 1.x and Apache IoTDB 0.X as storage options, check details at here. Remove converter-moshi 2.5.0, influx-java 2.15, iotdb java 0.12.5, thrift 0.14.1, moshi 1.5.0, msgpack 0.8.16 dependencies. Remove InfluxDB and IoTDB relative codes and E2E tests. Upgrade OAP dependencies zipkin to 2.23.16, H2 to 2.1.212, Apache Freemarker to 2.3.31, gRPC-java 1.46.0, netty to 4.1.76. Upgrade Webapp dependencies, spring-cloud-dependencies to 2021.0.2, logback-classic to 1.2.11 [IMPORTANT] Add BanyanDB storage implementation. Notice BanyanDB is currently under active development and SHOULD NOT be used in production cluster.  OAP Server  Add component definition(ID=127) for Apache ShenYu (incubating). Fix Zipkin receiver: Decode spans error, missing Layer for V9 and wrong time bucket for generate Service and Endpoint. [Refactor] Move SQLDatabase(H2/MySQL/PostgreSQL), ElasticSearch and BanyanDB specific configurations out of column. Support BanyanDB global index for entities. Log and Segment record entities declare this new feature. Remove unnecessary analyzer settings in columns of templates. Many were added due to analyzer\u0026rsquo;s default value. Simplify the Kafka Fetch configuration in cluster mode. [Breaking Change] Update the eBPF Profiling task to the service level, please delete index/table: ebpf_profiling_task, process_traffic. Fix event can\u0026rsquo;t split service ID into 2 parts. Fix OAP Self-Observability metric GC Time calculation. Set SW_QUERY_MAX_QUERY_COMPLEXITY default value to 1000 Webapp module (for UI) enabled compression. [Breaking Change] Add layer field to event, report an event without layer is not allowed. Fix ES flush thread stops when flush schedule task throws exception, such as ElasticSearch flush failed. Fix ES BulkProcessor in BatchProcessEsDAO was initialized multiple times and created multiple ES flush schedule tasks. HTTPServer support the handler register with allowed HTTP methods. [Critical] Revert Enhance DataCarrier#MultipleChannelsConsumer to add priority to avoid consuming issues. Fix the problem that some configurations (such as group.id) did not take effect due to the override order when using the kafkaConsumerConfig property to extend the configuration in Kafka Fetcher. Remove build time from the OAP version. Add data-generator module to run OAP in testing mode, generating mock data for testing. Support receive Kubernetes processes from gRPC protocol. Fix the problem that es index(TimeSeriesTable, eg. endpoint_traffic, alarm_record) didn\u0026rsquo;t create even after rerun with init-mode. This problem caused the OAP server to fail to start when the OAP server was down for more than a day. Support autocomplete tags in traces query. [Breaking Change] Replace all configurations **_JETTY_** to **_REST_**. Add the support eBPF profiling field into the process entity. E2E: fix log test miss verify LAL and metrics. Enhance Converter mechanism in kernel level to make BanyanDB native feature more effective. Add TermsAggregation properties collect_mode and execution_hint. Add \u0026ldquo;execution_hint\u0026rdquo;: \u0026ldquo;map\u0026rdquo;, \u0026ldquo;collect_mode\u0026rdquo;: \u0026ldquo;breadth_first\u0026rdquo; for aggregation and topology query to improve 5-10x performance. Clean up scroll contexts after used. Support autocomplete tags in logs query. Enhance Deprecated MetricQuery(v1) getValues querying to asynchronous concurrency query Fix the pod match error when the service has multiple selector in kubernetes environment. VM monitoring adapts the 0.50.0 of the opentelemetry-collector. Add Envoy internal cost metrics. Remove Layer concept from ServiceInstance. Remove unnecessary onCompleted on gRPC onError callback. Remove Layer concept form Process. Update to list all eBPF profiling schedulers without duration. Storage(ElasticSearch): add search options to tolerate inexisting indices. Fix the problem that MQ has the wrong Layer type. Fix NoneStream model has wrong downsampling(was Second, should be Minute). SQL Database: provide @SQLDatabase.AdditionalEntity to support create additional tables from a model. [Breaking Change] SQL Database: remove SQL Database config maxSizeOfArrayColumn and numOfSearchableValuesPerTag. [Breaking Change] SQL Database: move Tags list from Segment,Logs,Alarms to their additional table. [Breaking Change] Remove total field in Trace, Log, Event, Browser log, and alarm list query. Support OFF_CPU eBPF Profiling. Fix SumAggregationBuilder#build should use the SumAggregation rather than MaxAggregation. Add TiDB, OpenSearch, Postgres storage optional to Trace and eBPF Profiling E2E testing. Add OFF CPU eBPF Profiling E2E Testing. Fix searchableTag as rpc.status_code and http.status_code. status_code had been removed. Fix scroll query failure exception. Add profileDataQueryBatchSize config in Elasticsearch Storage. Add APIs to query Pod log on demand. Remove OAL for events. Simplify the format index name logical in ES storage. Add instance properties extractor in MAL. Support Zipkin traces collect and zipkin traces query API. [Breaking Change] Zipkin receiver mechanism changes and traces do not stream into OAP Segment anymore.  UI  General service instance: move Thread Pool from JVM to Overview, fix JVM GC Count calculation. Add Apache ShenYu (incubating) component LOGO. Show more metrics on service/instance/endpoint list on the dashboards. Support average values of metrics on the service/list/endpoint table widgets, with pop-up linear graph. Fix viewLogs button query no data. Fix UTC when page loads. Implement the eBPF profile widget on dashboard. Optimize the trace widget. Avoid invalid query for topology metrics. Add the alarm and log tag tips. Fix spans details and task logs. Verify query params to avoid invalid queries. Mobile terminal adaptation. Fix: set dropdown for the Tab widget, init instance/endpoint relation selectors, update sankey graph. Add eBPF Profiling widget into General service, Service Mesh and Kubernetes tabs. Fix jump to endpoint-relation dashboard template. Fix set graph options. Remove the Layer filed from the Instance and Process. Fix date time picker display when set hour to 0. Implement tags auto-complete for Trace and Log. Support multiple trees for the flame graph. Fix the page doesn\u0026rsquo;t need to be re-rendered when the url changes. Remove unexpected data for exporting dashboards. Fix duration time. Remove the total field from query conditions. Fix minDuration and maxDuration for the trace filter. Add Log configuration for the browser templates. Fix query conditions for the browser logs. Add Spanish Translation. Visualize the OFF CPU eBPF profiling. Add Spanish language to UI. Sort spans with startTime or spanId in a segment. Visualize a on-demand log widget. Fix activate the correct tab index after renaming a Tabs name. FaaS dashboard support on-demand log (OpenFunction/functions-framework-go version \u0026gt; 0.3.0).  Documentation  Add eBPF agent into probe introduction.  All issues and pull requests are here\n","title":"9.1.0","url":"/docs/main/v9.2.0/en/changes/changes-9.1.0/"},{"content":"9.1.0 Project  [IMPORTANT] Remove InfluxDB 1.x and Apache IoTDB 0.X as storage options, check details at here. Remove converter-moshi 2.5.0, influx-java 2.15, iotdb java 0.12.5, thrift 0.14.1, moshi 1.5.0, msgpack 0.8.16 dependencies. Remove InfluxDB and IoTDB relative codes and E2E tests. Upgrade OAP dependencies zipkin to 2.23.16, H2 to 2.1.212, Apache Freemarker to 2.3.31, gRPC-java 1.46.0, netty to 4.1.76. Upgrade Webapp dependencies, spring-cloud-dependencies to 2021.0.2, logback-classic to 1.2.11 [IMPORTANT] Add BanyanDB storage implementation. Notice BanyanDB is currently under active development and SHOULD NOT be used in production cluster.  OAP Server  Add component definition(ID=127) for Apache ShenYu (incubating). Fix Zipkin receiver: Decode spans error, missing Layer for V9 and wrong time bucket for generate Service and Endpoint. [Refactor] Move SQLDatabase(H2/MySQL/PostgreSQL), ElasticSearch and BanyanDB specific configurations out of column. Support BanyanDB global index for entities. Log and Segment record entities declare this new feature. Remove unnecessary analyzer settings in columns of templates. Many were added due to analyzer\u0026rsquo;s default value. Simplify the Kafka Fetch configuration in cluster mode. [Breaking Change] Update the eBPF Profiling task to the service level, please delete index/table: ebpf_profiling_task, process_traffic. Fix event can\u0026rsquo;t split service ID into 2 parts. Fix OAP Self-Observability metric GC Time calculation. Set SW_QUERY_MAX_QUERY_COMPLEXITY default value to 1000 Webapp module (for UI) enabled compression. [Breaking Change] Add layer field to event, report an event without layer is not allowed. Fix ES flush thread stops when flush schedule task throws exception, such as ElasticSearch flush failed. Fix ES BulkProcessor in BatchProcessEsDAO was initialized multiple times and created multiple ES flush schedule tasks. HTTPServer support the handler register with allowed HTTP methods. [Critical] Revert Enhance DataCarrier#MultipleChannelsConsumer to add priority to avoid consuming issues. Fix the problem that some configurations (such as group.id) did not take effect due to the override order when using the kafkaConsumerConfig property to extend the configuration in Kafka Fetcher. Remove build time from the OAP version. Add data-generator module to run OAP in testing mode, generating mock data for testing. Support receive Kubernetes processes from gRPC protocol. Fix the problem that es index(TimeSeriesTable, eg. endpoint_traffic, alarm_record) didn\u0026rsquo;t create even after rerun with init-mode. This problem caused the OAP server to fail to start when the OAP server was down for more than a day. Support autocomplete tags in traces query. [Breaking Change] Replace all configurations **_JETTY_** to **_REST_**. Add the support eBPF profiling field into the process entity. E2E: fix log test miss verify LAL and metrics. Enhance Converter mechanism in kernel level to make BanyanDB native feature more effective. Add TermsAggregation properties collect_mode and execution_hint. Add \u0026ldquo;execution_hint\u0026rdquo;: \u0026ldquo;map\u0026rdquo;, \u0026ldquo;collect_mode\u0026rdquo;: \u0026ldquo;breadth_first\u0026rdquo; for aggregation and topology query to improve 5-10x performance. Clean up scroll contexts after used. Support autocomplete tags in logs query. Enhance Deprecated MetricQuery(v1) getValues querying to asynchronous concurrency query Fix the pod match error when the service has multiple selector in kubernetes environment. VM monitoring adapts the 0.50.0 of the opentelemetry-collector. Add Envoy internal cost metrics. Remove Layer concept from ServiceInstance. Remove unnecessary onCompleted on gRPC onError callback. Remove Layer concept form Process. Update to list all eBPF profiling schedulers without duration. Storage(ElasticSearch): add search options to tolerate inexisting indices. Fix the problem that MQ has the wrong Layer type. Fix NoneStream model has wrong downsampling(was Second, should be Minute). SQL Database: provide @SQLDatabase.AdditionalEntity to support create additional tables from a model. [Breaking Change] SQL Database: remove SQL Database config maxSizeOfArrayColumn and numOfSearchableValuesPerTag. [Breaking Change] SQL Database: move Tags list from Segment,Logs,Alarms to their additional table. [Breaking Change] Remove total field in Trace, Log, Event, Browser log, and alarm list query. Support OFF_CPU eBPF Profiling. Fix SumAggregationBuilder#build should use the SumAggregation rather than MaxAggregation. Add TiDB, OpenSearch, Postgres storage optional to Trace and eBPF Profiling E2E testing. Add OFF CPU eBPF Profiling E2E Testing. Fix searchableTag as rpc.status_code and http.status_code. status_code had been removed. Fix scroll query failure exception. Add profileDataQueryBatchSize config in Elasticsearch Storage. Add APIs to query Pod log on demand. Remove OAL for events. Simplify the format index name logical in ES storage. Add instance properties extractor in MAL. Support Zipkin traces collect and zipkin traces query API. [Breaking Change] Zipkin receiver mechanism changes and traces do not stream into OAP Segment anymore.  UI  General service instance: move Thread Pool from JVM to Overview, fix JVM GC Count calculation. Add Apache ShenYu (incubating) component LOGO. Show more metrics on service/instance/endpoint list on the dashboards. Support average values of metrics on the service/list/endpoint table widgets, with pop-up linear graph. Fix viewLogs button query no data. Fix UTC when page loads. Implement the eBPF profile widget on dashboard. Optimize the trace widget. Avoid invalid query for topology metrics. Add the alarm and log tag tips. Fix spans details and task logs. Verify query params to avoid invalid queries. Mobile terminal adaptation. Fix: set dropdown for the Tab widget, init instance/endpoint relation selectors, update sankey graph. Add eBPF Profiling widget into General service, Service Mesh and Kubernetes tabs. Fix jump to endpoint-relation dashboard template. Fix set graph options. Remove the Layer filed from the Instance and Process. Fix date time picker display when set hour to 0. Implement tags auto-complete for Trace and Log. Support multiple trees for the flame graph. Fix the page doesn\u0026rsquo;t need to be re-rendered when the url changes. Remove unexpected data for exporting dashboards. Fix duration time. Remove the total field from query conditions. Fix minDuration and maxDuration for the trace filter. Add Log configuration for the browser templates. Fix query conditions for the browser logs. Add Spanish Translation. Visualize the OFF CPU eBPF profiling. Add Spanish language to UI. Sort spans with startTime or spanId in a segment. Visualize a on-demand log widget. Fix activate the correct tab index after renaming a Tabs name. FaaS dashboard support on-demand log (OpenFunction/functions-framework-go version \u0026gt; 0.3.0).  Documentation  Add eBPF agent into probe introduction.  All issues and pull requests are here\n","title":"9.1.0","url":"/docs/main/v9.3.0/en/changes/changes-9.1.0/"},{"content":"9.1.0 Project  [IMPORTANT] Remove InfluxDB 1.x and Apache IoTDB 0.X as storage options, check details at here. Remove converter-moshi 2.5.0, influx-java 2.15, iotdb java 0.12.5, thrift 0.14.1, moshi 1.5.0, msgpack 0.8.16 dependencies. Remove InfluxDB and IoTDB relative codes and E2E tests. Upgrade OAP dependencies zipkin to 2.23.16, H2 to 2.1.212, Apache Freemarker to 2.3.31, gRPC-java 1.46.0, netty to 4.1.76. Upgrade Webapp dependencies, spring-cloud-dependencies to 2021.0.2, logback-classic to 1.2.11 [IMPORTANT] Add BanyanDB storage implementation. Notice BanyanDB is currently under active development and SHOULD NOT be used in production cluster.  OAP Server  Add component definition(ID=127) for Apache ShenYu (incubating). Fix Zipkin receiver: Decode spans error, missing Layer for V9 and wrong time bucket for generate Service and Endpoint. [Refactor] Move SQLDatabase(H2/MySQL/PostgreSQL), ElasticSearch and BanyanDB specific configurations out of column. Support BanyanDB global index for entities. Log and Segment record entities declare this new feature. Remove unnecessary analyzer settings in columns of templates. Many were added due to analyzer\u0026rsquo;s default value. Simplify the Kafka Fetch configuration in cluster mode. [Breaking Change] Update the eBPF Profiling task to the service level, please delete index/table: ebpf_profiling_task, process_traffic. Fix event can\u0026rsquo;t split service ID into 2 parts. Fix OAP Self-Observability metric GC Time calculation. Set SW_QUERY_MAX_QUERY_COMPLEXITY default value to 1000 Webapp module (for UI) enabled compression. [Breaking Change] Add layer field to event, report an event without layer is not allowed. Fix ES flush thread stops when flush schedule task throws exception, such as ElasticSearch flush failed. Fix ES BulkProcessor in BatchProcessEsDAO was initialized multiple times and created multiple ES flush schedule tasks. HTTPServer support the handler register with allowed HTTP methods. [Critical] Revert Enhance DataCarrier#MultipleChannelsConsumer to add priority to avoid consuming issues. Fix the problem that some configurations (such as group.id) did not take effect due to the override order when using the kafkaConsumerConfig property to extend the configuration in Kafka Fetcher. Remove build time from the OAP version. Add data-generator module to run OAP in testing mode, generating mock data for testing. Support receive Kubernetes processes from gRPC protocol. Fix the problem that es index(TimeSeriesTable, eg. endpoint_traffic, alarm_record) didn\u0026rsquo;t create even after rerun with init-mode. This problem caused the OAP server to fail to start when the OAP server was down for more than a day. Support autocomplete tags in traces query. [Breaking Change] Replace all configurations **_JETTY_** to **_REST_**. Add the support eBPF profiling field into the process entity. E2E: fix log test miss verify LAL and metrics. Enhance Converter mechanism in kernel level to make BanyanDB native feature more effective. Add TermsAggregation properties collect_mode and execution_hint. Add \u0026ldquo;execution_hint\u0026rdquo;: \u0026ldquo;map\u0026rdquo;, \u0026ldquo;collect_mode\u0026rdquo;: \u0026ldquo;breadth_first\u0026rdquo; for aggregation and topology query to improve 5-10x performance. Clean up scroll contexts after used. Support autocomplete tags in logs query. Enhance Deprecated MetricQuery(v1) getValues querying to asynchronous concurrency query Fix the pod match error when the service has multiple selector in kubernetes environment. VM monitoring adapts the 0.50.0 of the opentelemetry-collector. Add Envoy internal cost metrics. Remove Layer concept from ServiceInstance. Remove unnecessary onCompleted on gRPC onError callback. Remove Layer concept form Process. Update to list all eBPF profiling schedulers without duration. Storage(ElasticSearch): add search options to tolerate inexisting indices. Fix the problem that MQ has the wrong Layer type. Fix NoneStream model has wrong downsampling(was Second, should be Minute). SQL Database: provide @SQLDatabase.AdditionalEntity to support create additional tables from a model. [Breaking Change] SQL Database: remove SQL Database config maxSizeOfArrayColumn and numOfSearchableValuesPerTag. [Breaking Change] SQL Database: move Tags list from Segment,Logs,Alarms to their additional table. [Breaking Change] Remove total field in Trace, Log, Event, Browser log, and alarm list query. Support OFF_CPU eBPF Profiling. Fix SumAggregationBuilder#build should use the SumAggregation rather than MaxAggregation. Add TiDB, OpenSearch, Postgres storage optional to Trace and eBPF Profiling E2E testing. Add OFF CPU eBPF Profiling E2E Testing. Fix searchableTag as rpc.status_code and http.status_code. status_code had been removed. Fix scroll query failure exception. Add profileDataQueryBatchSize config in Elasticsearch Storage. Add APIs to query Pod log on demand. Remove OAL for events. Simplify the format index name logical in ES storage. Add instance properties extractor in MAL. Support Zipkin traces collect and zipkin traces query API. [Breaking Change] Zipkin receiver mechanism changes and traces do not stream into OAP Segment anymore.  UI  General service instance: move Thread Pool from JVM to Overview, fix JVM GC Count calculation. Add Apache ShenYu (incubating) component LOGO. Show more metrics on service/instance/endpoint list on the dashboards. Support average values of metrics on the service/list/endpoint table widgets, with pop-up linear graph. Fix viewLogs button query no data. Fix UTC when page loads. Implement the eBPF profile widget on dashboard. Optimize the trace widget. Avoid invalid query for topology metrics. Add the alarm and log tag tips. Fix spans details and task logs. Verify query params to avoid invalid queries. Mobile terminal adaptation. Fix: set dropdown for the Tab widget, init instance/endpoint relation selectors, update sankey graph. Add eBPF Profiling widget into General service, Service Mesh and Kubernetes tabs. Fix jump to endpoint-relation dashboard template. Fix set graph options. Remove the Layer filed from the Instance and Process. Fix date time picker display when set hour to 0. Implement tags auto-complete for Trace and Log. Support multiple trees for the flame graph. Fix the page doesn\u0026rsquo;t need to be re-rendered when the url changes. Remove unexpected data for exporting dashboards. Fix duration time. Remove the total field from query conditions. Fix minDuration and maxDuration for the trace filter. Add Log configuration for the browser templates. Fix query conditions for the browser logs. Add Spanish Translation. Visualize the OFF CPU eBPF profiling. Add Spanish language to UI. Sort spans with startTime or spanId in a segment. Visualize a on-demand log widget. Fix activate the correct tab index after renaming a Tabs name. FaaS dashboard support on-demand log (OpenFunction/functions-framework-go version \u0026gt; 0.3.0).  Documentation  Add eBPF agent into probe introduction.  All issues and pull requests are here\n","title":"9.1.0","url":"/docs/main/v9.4.0/en/changes/changes-9.1.0/"},{"content":"9.1.0 Project  [IMPORTANT] Remove InfluxDB 1.x and Apache IoTDB 0.X as storage options, check details at here. Remove converter-moshi 2.5.0, influx-java 2.15, iotdb java 0.12.5, thrift 0.14.1, moshi 1.5.0, msgpack 0.8.16 dependencies. Remove InfluxDB and IoTDB relative codes and E2E tests. Upgrade OAP dependencies zipkin to 2.23.16, H2 to 2.1.212, Apache Freemarker to 2.3.31, gRPC-java 1.46.0, netty to 4.1.76. Upgrade Webapp dependencies, spring-cloud-dependencies to 2021.0.2, logback-classic to 1.2.11 [IMPORTANT] Add BanyanDB storage implementation. Notice BanyanDB is currently under active development and SHOULD NOT be used in production cluster.  OAP Server  Add component definition(ID=127) for Apache ShenYu (incubating). Fix Zipkin receiver: Decode spans error, missing Layer for V9 and wrong time bucket for generate Service and Endpoint. [Refactor] Move SQLDatabase(H2/MySQL/PostgreSQL), ElasticSearch and BanyanDB specific configurations out of column. Support BanyanDB global index for entities. Log and Segment record entities declare this new feature. Remove unnecessary analyzer settings in columns of templates. Many were added due to analyzer\u0026rsquo;s default value. Simplify the Kafka Fetch configuration in cluster mode. [Breaking Change] Update the eBPF Profiling task to the service level, please delete index/table: ebpf_profiling_task, process_traffic. Fix event can\u0026rsquo;t split service ID into 2 parts. Fix OAP Self-Observability metric GC Time calculation. Set SW_QUERY_MAX_QUERY_COMPLEXITY default value to 1000 Webapp module (for UI) enabled compression. [Breaking Change] Add layer field to event, report an event without layer is not allowed. Fix ES flush thread stops when flush schedule task throws exception, such as ElasticSearch flush failed. Fix ES BulkProcessor in BatchProcessEsDAO was initialized multiple times and created multiple ES flush schedule tasks. HTTPServer support the handler register with allowed HTTP methods. [Critical] Revert Enhance DataCarrier#MultipleChannelsConsumer to add priority to avoid consuming issues. Fix the problem that some configurations (such as group.id) did not take effect due to the override order when using the kafkaConsumerConfig property to extend the configuration in Kafka Fetcher. Remove build time from the OAP version. Add data-generator module to run OAP in testing mode, generating mock data for testing. Support receive Kubernetes processes from gRPC protocol. Fix the problem that es index(TimeSeriesTable, eg. endpoint_traffic, alarm_record) didn\u0026rsquo;t create even after rerun with init-mode. This problem caused the OAP server to fail to start when the OAP server was down for more than a day. Support autocomplete tags in traces query. [Breaking Change] Replace all configurations **_JETTY_** to **_REST_**. Add the support eBPF profiling field into the process entity. E2E: fix log test miss verify LAL and metrics. Enhance Converter mechanism in kernel level to make BanyanDB native feature more effective. Add TermsAggregation properties collect_mode and execution_hint. Add \u0026ldquo;execution_hint\u0026rdquo;: \u0026ldquo;map\u0026rdquo;, \u0026ldquo;collect_mode\u0026rdquo;: \u0026ldquo;breadth_first\u0026rdquo; for aggregation and topology query to improve 5-10x performance. Clean up scroll contexts after used. Support autocomplete tags in logs query. Enhance Deprecated MetricQuery(v1) getValues querying to asynchronous concurrency query Fix the pod match error when the service has multiple selector in kubernetes environment. VM monitoring adapts the 0.50.0 of the opentelemetry-collector. Add Envoy internal cost metrics. Remove Layer concept from ServiceInstance. Remove unnecessary onCompleted on gRPC onError callback. Remove Layer concept form Process. Update to list all eBPF profiling schedulers without duration. Storage(ElasticSearch): add search options to tolerate inexisting indices. Fix the problem that MQ has the wrong Layer type. Fix NoneStream model has wrong downsampling(was Second, should be Minute). SQL Database: provide @SQLDatabase.AdditionalEntity to support create additional tables from a model. [Breaking Change] SQL Database: remove SQL Database config maxSizeOfArrayColumn and numOfSearchableValuesPerTag. [Breaking Change] SQL Database: move Tags list from Segment,Logs,Alarms to their additional table. [Breaking Change] Remove total field in Trace, Log, Event, Browser log, and alarm list query. Support OFF_CPU eBPF Profiling. Fix SumAggregationBuilder#build should use the SumAggregation rather than MaxAggregation. Add TiDB, OpenSearch, Postgres storage optional to Trace and eBPF Profiling E2E testing. Add OFF CPU eBPF Profiling E2E Testing. Fix searchableTag as rpc.status_code and http.status_code. status_code had been removed. Fix scroll query failure exception. Add profileDataQueryBatchSize config in Elasticsearch Storage. Add APIs to query Pod log on demand. Remove OAL for events. Simplify the format index name logical in ES storage. Add instance properties extractor in MAL. Support Zipkin traces collect and zipkin traces query API. [Breaking Change] Zipkin receiver mechanism changes and traces do not stream into OAP Segment anymore.  UI  General service instance: move Thread Pool from JVM to Overview, fix JVM GC Count calculation. Add Apache ShenYu (incubating) component LOGO. Show more metrics on service/instance/endpoint list on the dashboards. Support average values of metrics on the service/list/endpoint table widgets, with pop-up linear graph. Fix viewLogs button query no data. Fix UTC when page loads. Implement the eBPF profile widget on dashboard. Optimize the trace widget. Avoid invalid query for topology metrics. Add the alarm and log tag tips. Fix spans details and task logs. Verify query params to avoid invalid queries. Mobile terminal adaptation. Fix: set dropdown for the Tab widget, init instance/endpoint relation selectors, update sankey graph. Add eBPF Profiling widget into General service, Service Mesh and Kubernetes tabs. Fix jump to endpoint-relation dashboard template. Fix set graph options. Remove the Layer filed from the Instance and Process. Fix date time picker display when set hour to 0. Implement tags auto-complete for Trace and Log. Support multiple trees for the flame graph. Fix the page doesn\u0026rsquo;t need to be re-rendered when the url changes. Remove unexpected data for exporting dashboards. Fix duration time. Remove the total field from query conditions. Fix minDuration and maxDuration for the trace filter. Add Log configuration for the browser templates. Fix query conditions for the browser logs. Add Spanish Translation. Visualize the OFF CPU eBPF profiling. Add Spanish language to UI. Sort spans with startTime or spanId in a segment. Visualize a on-demand log widget. Fix activate the correct tab index after renaming a Tabs name. FaaS dashboard support on-demand log (OpenFunction/functions-framework-go version \u0026gt; 0.3.0).  Documentation  Add eBPF agent into probe introduction.  All issues and pull requests are here\n","title":"9.1.0","url":"/docs/main/v9.5.0/en/changes/changes-9.1.0/"},{"content":"9.1.0 Project  [IMPORTANT] Remove InfluxDB 1.x and Apache IoTDB 0.X as storage options, check details at here. Remove converter-moshi 2.5.0, influx-java 2.15, iotdb java 0.12.5, thrift 0.14.1, moshi 1.5.0, msgpack 0.8.16 dependencies. Remove InfluxDB and IoTDB relative codes and E2E tests. Upgrade OAP dependencies zipkin to 2.23.16, H2 to 2.1.212, Apache Freemarker to 2.3.31, gRPC-java 1.46.0, netty to 4.1.76. Upgrade Webapp dependencies, spring-cloud-dependencies to 2021.0.2, logback-classic to 1.2.11 [IMPORTANT] Add BanyanDB storage implementation. Notice BanyanDB is currently under active development and SHOULD NOT be used in production cluster.  OAP Server  Add component definition(ID=127) for Apache ShenYu (incubating). Fix Zipkin receiver: Decode spans error, missing Layer for V9 and wrong time bucket for generate Service and Endpoint. [Refactor] Move SQLDatabase(H2/MySQL/PostgreSQL), ElasticSearch and BanyanDB specific configurations out of column. Support BanyanDB global index for entities. Log and Segment record entities declare this new feature. Remove unnecessary analyzer settings in columns of templates. Many were added due to analyzer\u0026rsquo;s default value. Simplify the Kafka Fetch configuration in cluster mode. [Breaking Change] Update the eBPF Profiling task to the service level, please delete index/table: ebpf_profiling_task, process_traffic. Fix event can\u0026rsquo;t split service ID into 2 parts. Fix OAP Self-Observability metric GC Time calculation. Set SW_QUERY_MAX_QUERY_COMPLEXITY default value to 1000 Webapp module (for UI) enabled compression. [Breaking Change] Add layer field to event, report an event without layer is not allowed. Fix ES flush thread stops when flush schedule task throws exception, such as ElasticSearch flush failed. Fix ES BulkProcessor in BatchProcessEsDAO was initialized multiple times and created multiple ES flush schedule tasks. HTTPServer support the handler register with allowed HTTP methods. [Critical] Revert Enhance DataCarrier#MultipleChannelsConsumer to add priority to avoid consuming issues. Fix the problem that some configurations (such as group.id) did not take effect due to the override order when using the kafkaConsumerConfig property to extend the configuration in Kafka Fetcher. Remove build time from the OAP version. Add data-generator module to run OAP in testing mode, generating mock data for testing. Support receive Kubernetes processes from gRPC protocol. Fix the problem that es index(TimeSeriesTable, eg. endpoint_traffic, alarm_record) didn\u0026rsquo;t create even after rerun with init-mode. This problem caused the OAP server to fail to start when the OAP server was down for more than a day. Support autocomplete tags in traces query. [Breaking Change] Replace all configurations **_JETTY_** to **_REST_**. Add the support eBPF profiling field into the process entity. E2E: fix log test miss verify LAL and metrics. Enhance Converter mechanism in kernel level to make BanyanDB native feature more effective. Add TermsAggregation properties collect_mode and execution_hint. Add \u0026ldquo;execution_hint\u0026rdquo;: \u0026ldquo;map\u0026rdquo;, \u0026ldquo;collect_mode\u0026rdquo;: \u0026ldquo;breadth_first\u0026rdquo; for aggregation and topology query to improve 5-10x performance. Clean up scroll contexts after used. Support autocomplete tags in logs query. Enhance Deprecated MetricQuery(v1) getValues querying to asynchronous concurrency query Fix the pod match error when the service has multiple selector in kubernetes environment. VM monitoring adapts the 0.50.0 of the opentelemetry-collector. Add Envoy internal cost metrics. Remove Layer concept from ServiceInstance. Remove unnecessary onCompleted on gRPC onError callback. Remove Layer concept form Process. Update to list all eBPF profiling schedulers without duration. Storage(ElasticSearch): add search options to tolerate inexisting indices. Fix the problem that MQ has the wrong Layer type. Fix NoneStream model has wrong downsampling(was Second, should be Minute). SQL Database: provide @SQLDatabase.AdditionalEntity to support create additional tables from a model. [Breaking Change] SQL Database: remove SQL Database config maxSizeOfArrayColumn and numOfSearchableValuesPerTag. [Breaking Change] SQL Database: move Tags list from Segment,Logs,Alarms to their additional table. [Breaking Change] Remove total field in Trace, Log, Event, Browser log, and alarm list query. Support OFF_CPU eBPF Profiling. Fix SumAggregationBuilder#build should use the SumAggregation rather than MaxAggregation. Add TiDB, OpenSearch, Postgres storage optional to Trace and eBPF Profiling E2E testing. Add OFF CPU eBPF Profiling E2E Testing. Fix searchableTag as rpc.status_code and http.status_code. status_code had been removed. Fix scroll query failure exception. Add profileDataQueryBatchSize config in Elasticsearch Storage. Add APIs to query Pod log on demand. Remove OAL for events. Simplify the format index name logical in ES storage. Add instance properties extractor in MAL. Support Zipkin traces collect and zipkin traces query API. [Breaking Change] Zipkin receiver mechanism changes and traces do not stream into OAP Segment anymore.  UI  General service instance: move Thread Pool from JVM to Overview, fix JVM GC Count calculation. Add Apache ShenYu (incubating) component LOGO. Show more metrics on service/instance/endpoint list on the dashboards. Support average values of metrics on the service/list/endpoint table widgets, with pop-up linear graph. Fix viewLogs button query no data. Fix UTC when page loads. Implement the eBPF profile widget on dashboard. Optimize the trace widget. Avoid invalid query for topology metrics. Add the alarm and log tag tips. Fix spans details and task logs. Verify query params to avoid invalid queries. Mobile terminal adaptation. Fix: set dropdown for the Tab widget, init instance/endpoint relation selectors, update sankey graph. Add eBPF Profiling widget into General service, Service Mesh and Kubernetes tabs. Fix jump to endpoint-relation dashboard template. Fix set graph options. Remove the Layer filed from the Instance and Process. Fix date time picker display when set hour to 0. Implement tags auto-complete for Trace and Log. Support multiple trees for the flame graph. Fix the page doesn\u0026rsquo;t need to be re-rendered when the url changes. Remove unexpected data for exporting dashboards. Fix duration time. Remove the total field from query conditions. Fix minDuration and maxDuration for the trace filter. Add Log configuration for the browser templates. Fix query conditions for the browser logs. Add Spanish Translation. Visualize the OFF CPU eBPF profiling. Add Spanish language to UI. Sort spans with startTime or spanId in a segment. Visualize a on-demand log widget. Fix activate the correct tab index after renaming a Tabs name. FaaS dashboard support on-demand log (OpenFunction/functions-framework-go version \u0026gt; 0.3.0).  Documentation  Add eBPF agent into probe introduction.  All issues and pull requests are here\n","title":"9.1.0","url":"/docs/main/v9.6.0/en/changes/changes-9.1.0/"},{"content":"9.1.0 Project  [IMPORTANT] Remove InfluxDB 1.x and Apache IoTDB 0.X as storage options, check details at here. Remove converter-moshi 2.5.0, influx-java 2.15, iotdb java 0.12.5, thrift 0.14.1, moshi 1.5.0, msgpack 0.8.16 dependencies. Remove InfluxDB and IoTDB relative codes and E2E tests. Upgrade OAP dependencies zipkin to 2.23.16, H2 to 2.1.212, Apache Freemarker to 2.3.31, gRPC-java 1.46.0, netty to 4.1.76. Upgrade Webapp dependencies, spring-cloud-dependencies to 2021.0.2, logback-classic to 1.2.11 [IMPORTANT] Add BanyanDB storage implementation. Notice BanyanDB is currently under active development and SHOULD NOT be used in production cluster.  OAP Server  Add component definition(ID=127) for Apache ShenYu (incubating). Fix Zipkin receiver: Decode spans error, missing Layer for V9 and wrong time bucket for generate Service and Endpoint. [Refactor] Move SQLDatabase(H2/MySQL/PostgreSQL), ElasticSearch and BanyanDB specific configurations out of column. Support BanyanDB global index for entities. Log and Segment record entities declare this new feature. Remove unnecessary analyzer settings in columns of templates. Many were added due to analyzer\u0026rsquo;s default value. Simplify the Kafka Fetch configuration in cluster mode. [Breaking Change] Update the eBPF Profiling task to the service level, please delete index/table: ebpf_profiling_task, process_traffic. Fix event can\u0026rsquo;t split service ID into 2 parts. Fix OAP Self-Observability metric GC Time calculation. Set SW_QUERY_MAX_QUERY_COMPLEXITY default value to 1000 Webapp module (for UI) enabled compression. [Breaking Change] Add layer field to event, report an event without layer is not allowed. Fix ES flush thread stops when flush schedule task throws exception, such as ElasticSearch flush failed. Fix ES BulkProcessor in BatchProcessEsDAO was initialized multiple times and created multiple ES flush schedule tasks. HTTPServer support the handler register with allowed HTTP methods. [Critical] Revert Enhance DataCarrier#MultipleChannelsConsumer to add priority to avoid consuming issues. Fix the problem that some configurations (such as group.id) did not take effect due to the override order when using the kafkaConsumerConfig property to extend the configuration in Kafka Fetcher. Remove build time from the OAP version. Add data-generator module to run OAP in testing mode, generating mock data for testing. Support receive Kubernetes processes from gRPC protocol. Fix the problem that es index(TimeSeriesTable, eg. endpoint_traffic, alarm_record) didn\u0026rsquo;t create even after rerun with init-mode. This problem caused the OAP server to fail to start when the OAP server was down for more than a day. Support autocomplete tags in traces query. [Breaking Change] Replace all configurations **_JETTY_** to **_REST_**. Add the support eBPF profiling field into the process entity. E2E: fix log test miss verify LAL and metrics. Enhance Converter mechanism in kernel level to make BanyanDB native feature more effective. Add TermsAggregation properties collect_mode and execution_hint. Add \u0026ldquo;execution_hint\u0026rdquo;: \u0026ldquo;map\u0026rdquo;, \u0026ldquo;collect_mode\u0026rdquo;: \u0026ldquo;breadth_first\u0026rdquo; for aggregation and topology query to improve 5-10x performance. Clean up scroll contexts after used. Support autocomplete tags in logs query. Enhance Deprecated MetricQuery(v1) getValues querying to asynchronous concurrency query Fix the pod match error when the service has multiple selector in kubernetes environment. VM monitoring adapts the 0.50.0 of the opentelemetry-collector. Add Envoy internal cost metrics. Remove Layer concept from ServiceInstance. Remove unnecessary onCompleted on gRPC onError callback. Remove Layer concept form Process. Update to list all eBPF profiling schedulers without duration. Storage(ElasticSearch): add search options to tolerate inexisting indices. Fix the problem that MQ has the wrong Layer type. Fix NoneStream model has wrong downsampling(was Second, should be Minute). SQL Database: provide @SQLDatabase.AdditionalEntity to support create additional tables from a model. [Breaking Change] SQL Database: remove SQL Database config maxSizeOfArrayColumn and numOfSearchableValuesPerTag. [Breaking Change] SQL Database: move Tags list from Segment,Logs,Alarms to their additional table. [Breaking Change] Remove total field in Trace, Log, Event, Browser log, and alarm list query. Support OFF_CPU eBPF Profiling. Fix SumAggregationBuilder#build should use the SumAggregation rather than MaxAggregation. Add TiDB, OpenSearch, Postgres storage optional to Trace and eBPF Profiling E2E testing. Add OFF CPU eBPF Profiling E2E Testing. Fix searchableTag as rpc.status_code and http.status_code. status_code had been removed. Fix scroll query failure exception. Add profileDataQueryBatchSize config in Elasticsearch Storage. Add APIs to query Pod log on demand. Remove OAL for events. Simplify the format index name logical in ES storage. Add instance properties extractor in MAL. Support Zipkin traces collect and zipkin traces query API. [Breaking Change] Zipkin receiver mechanism changes and traces do not stream into OAP Segment anymore.  UI  General service instance: move Thread Pool from JVM to Overview, fix JVM GC Count calculation. Add Apache ShenYu (incubating) component LOGO. Show more metrics on service/instance/endpoint list on the dashboards. Support average values of metrics on the service/list/endpoint table widgets, with pop-up linear graph. Fix viewLogs button query no data. Fix UTC when page loads. Implement the eBPF profile widget on dashboard. Optimize the trace widget. Avoid invalid query for topology metrics. Add the alarm and log tag tips. Fix spans details and task logs. Verify query params to avoid invalid queries. Mobile terminal adaptation. Fix: set dropdown for the Tab widget, init instance/endpoint relation selectors, update sankey graph. Add eBPF Profiling widget into General service, Service Mesh and Kubernetes tabs. Fix jump to endpoint-relation dashboard template. Fix set graph options. Remove the Layer filed from the Instance and Process. Fix date time picker display when set hour to 0. Implement tags auto-complete for Trace and Log. Support multiple trees for the flame graph. Fix the page doesn\u0026rsquo;t need to be re-rendered when the url changes. Remove unexpected data for exporting dashboards. Fix duration time. Remove the total field from query conditions. Fix minDuration and maxDuration for the trace filter. Add Log configuration for the browser templates. Fix query conditions for the browser logs. Add Spanish Translation. Visualize the OFF CPU eBPF profiling. Add Spanish language to UI. Sort spans with startTime or spanId in a segment. Visualize a on-demand log widget. Fix activate the correct tab index after renaming a Tabs name. FaaS dashboard support on-demand log (OpenFunction/functions-framework-go version \u0026gt; 0.3.0).  Documentation  Add eBPF agent into probe introduction.  All issues and pull requests are here\n","title":"9.1.0","url":"/docs/main/v9.7.0/en/changes/changes-9.1.0/"},{"content":"9.2.0 Project  [Critical] Fix a low performance issue of metrics persistent in the ElasticSearch storage implementation. One single metric could have to wait for an unnecessary 7~10s(System Env Variable SW_STORAGE_ES_FLUSH_INTERVAL) since 8.8.0 - 9.1.0 releases. Upgrade Armeria to 1.16.0, Kubernetes Java client to 15.0.1.  OAP Server  Add more entities for Zipkin to improve performance. ElasticSearch: scroll id should be updated when scrolling as it may change. Mesh: fix only last rule works when multiple rules are defined in metadata-service-mapping.yaml. Support sending alarm messages to PagerDuty. Support Zipkin kafka collector. Add VIRTUAL detect type to Process for Network Profiling. Add component ID(128) for Java Hutool plugin. Add Zipkin query exception handler, response error message for illegal arguments. Fix a NullPointerException in the endpoint analysis, which would cause missing MQ-related LocalSpan in the trace. Add forEach, processRelation function to MAL expression. Add expPrefix, initExp in MAL config. Add component ID(7015) for Python Bottle plugin. Remove legacy OAL percentile functions, p99, p95, p90, p75, p50 func(s). Revert #8066. Keep all metrics persistent even it is default value. Skip loading UI templates if folder is empty or doesn\u0026rsquo;t exist. Optimize ElasticSearch query performance by using _mGet and physical index name rather than alias in these scenarios, (a) Metrics aggregation (b) Zipkin query (c) Metrics query (d) Log query Support the NETWORK type of eBPF Profiling task. Support sumHistogram in MAL. [Breaking Change] Make the eBPF Profiling task support to the service instance level, index/table ebpf_profiling_task is required to be re-created when bump up from previous releases. Fix race condition in Banyandb storage Support SUM_PER_MIN downsampling in MAL. Support sumHistogramPercentile in MAL. Add VIRTUAL_CACHE to Layer, to fix conjectured Redis server, which icon can\u0026rsquo;t show on the topology. [Breaking Change] Elasticsearch storage merge all metrics/meter and records(without super datasets) indices into one physical index template metrics-all and records-all on the default setting. Provide system environment variable(SW_STORAGE_ES_LOGIC_SHARDING) to shard metrics/meter indices into multi-physical indices as the previous versions(one index template per metric/meter aggregation function). In the current one index mode, users still could choose to adjust ElasticSearch\u0026rsquo;s shard number(SW_STORAGE_ES_INDEX_SHARDS_NUMBER) to scale out. More details please refer to New ElasticSearch storage option explanation in 9.2.0 and backend-storage.md [Breaking Change] Index/table ebpf_profiling_schedule added a new column ebpf_profiling_schedule_id, the H2/Mysql/Tidb/Postgres storage users are required to re-created it when bump up from previous releases. Fix Zipkin trace query the max size of spans. Add tls and https component IDs for Network Profiling. Support Elasticsearch column alias for the compatibility between storage logicSharding model and no-logicSharding model. Support MySQL monitoring. Support PostgreSQL monitoring. Fix query services by serviceId error when Elasticsearch storage SW_STORAGE_ES_QUERY_MAX_SIZE \u0026gt; 10000. Support sending alarm messages to Discord. Fix query history process data failure. Optimize TTL mechanism for Elasticsearch storage, skip executed indices in one TTL rotation. Add Kubernetes support module to share codes between modules and reduce calls to Kubernetes API server. Bump up Kubernetes Java client to fix cve. Adapt OpenTelemetry native metrics protocol. [Breaking Change] rename configuration folder from otel-oc-rules to otel-rules. [Breaking Change] rename configuration field from enabledOcRules to enabledOtelRules and environment variable name from SW_OTEL_RECEIVER_ENABLED_OC_RULES to SW_OTEL_RECEIVER_ENABLED_OTEL_RULES. [Breaking Change] Fix JDBC TTL to delete additional tables data. SQL Database requires removing segment,segment_tag, logs, logs_tag, alarms, alarms_tag, zipkin_span, zipkin_query before OAP starts. SQL Database: add @SQLDatabase.ExtraColumn4AdditionalEntity to support add an extra column from parent to an additional table. Add component ID(131) for Java Micronaut plugin Add component ID(132) for Nats java client plugin  UI  Fix query conditions for the browser logs. Implement a url parameter to activate tab index. Fix clear interval fail when switch autoRefresh to off. Optimize log tables. Fix log detail pop-up page doesn\u0026rsquo;t work. Optimize table widget to hide the whole metric column when no metric is set. Implement the Event widget. Remove event menu. Fix span detail text overlap. Add Python Bottle Plugin Logo. Implement an association between widgets(line, bar, area graphs) with time. Fix tag dropdown style. Hide the copy button when db.statement is empty. Fix legend metrics for topology. Dashboard: Add metrics association. Dashboard: Fix FaaS-Root document link and topology service relation dashboard link. Dashboard: Fix Mesh-Instance metric Throughput. Dashboard: Fix Mesh-Service-Relation metric Throughput and Proxy Sidecar Internal Latency in Nanoseconds (Client Response). Dashboard: Fix Mesh-Instance-Relation metric Throughput. Enhance associations for the Event widget. Add event widgets in dashboard where applicable. Fix dashboard list search box not work. Fix short time range. Fix event widget incompatibility in Safari. Refactor the tags component to support searching for tag keys and values. Implement the log widget and the trace widget associate with each other, remove log tables on the trace widget. Add log widget to general service root. Associate the event widget with the trace and log widget. Add the MYSQL layer and update layer routers. Fix query order for trace list. Add a calculation to convert seconds to days. q* Add Spring Sleuth dashboard to general service instance. Support the process dashboard and create the time range text widget. Fix picking calendar with a wrong time range and setting a unique value for dashboard grid key. Add PostgreSQL to Database sub-menu. Implement the network profiling widget. Add Micronaut icon for Java plugin. Add Nats icon for Java plugin. Bump moment and @vue/cli-plugin-e2e-cypress. Add Network Profiling for Service Mesh DP instance and K8s pod panels.  Documentation  Fix invalid links in release docs. Clean up doc about event metrics. Add a table for metric calculations in the ui doc. Add an explanation for alerting kernel and its in-memory window mechanism. Add more docs for widget details. Update alarm doc introduce configuration property key Fix dependency license\u0026rsquo;s NOTICE and binary jar included issues in the source release. Add eBPF CPU profiling doc.  All issues and pull requests are here\n","title":"9.2.0","url":"/docs/main/latest/en/changes/changes-9.2.0/"},{"content":"9.2.0 Project  [Critical] Fix a low performance issue of metrics persistent in the ElasticSearch storage implementation. One single metric could have to wait for an unnecessary 7~10s(System Env Variable SW_STORAGE_ES_FLUSH_INTERVAL) since 8.8.0 - 9.1.0 releases. Upgrade Armeria to 1.16.0, Kubernetes Java client to 15.0.1.  OAP Server  Add more entities for Zipkin to improve performance. ElasticSearch: scroll id should be updated when scrolling as it may change. Mesh: fix only last rule works when multiple rules are defined in metadata-service-mapping.yaml. Support sending alarm messages to PagerDuty. Support Zipkin kafka collector. Add VIRTUAL detect type to Process for Network Profiling. Add component ID(128) for Java Hutool plugin. Add Zipkin query exception handler, response error message for illegal arguments. Fix a NullPointerException in the endpoint analysis, which would cause missing MQ-related LocalSpan in the trace. Add forEach, processRelation function to MAL expression. Add expPrefix, initExp in MAL config. Add component ID(7015) for Python Bottle plugin. Remove legacy OAL percentile functions, p99, p95, p90, p75, p50 func(s). Revert #8066. Keep all metrics persistent even it is default value. Skip loading UI templates if folder is empty or doesn\u0026rsquo;t exist. Optimize ElasticSearch query performance by using _mGet and physical index name rather than alias in these scenarios, (a) Metrics aggregation (b) Zipkin query (c) Metrics query (d) Log query Support the NETWORK type of eBPF Profiling task. Support sumHistogram in MAL. [Breaking Change] Make the eBPF Profiling task support to the service instance level, index/table ebpf_profiling_task is required to be re-created when bump up from previous releases. Fix race condition in Banyandb storage Support SUM_PER_MIN downsampling in MAL. Support sumHistogramPercentile in MAL. Add VIRTUAL_CACHE to Layer, to fix conjectured Redis server, which icon can\u0026rsquo;t show on the topology. [Breaking Change] Elasticsearch storage merge all metrics/meter and records(without super datasets) indices into one physical index template metrics-all and records-all on the default setting. Provide system environment variable(SW_STORAGE_ES_LOGIC_SHARDING) to shard metrics/meter indices into multi-physical indices as the previous versions(one index template per metric/meter aggregation function). In the current one index mode, users still could choose to adjust ElasticSearch\u0026rsquo;s shard number(SW_STORAGE_ES_INDEX_SHARDS_NUMBER) to scale out. More details please refer to New ElasticSearch storage option explanation in 9.2.0 and backend-storage.md [Breaking Change] Index/table ebpf_profiling_schedule added a new column ebpf_profiling_schedule_id, the H2/Mysql/Tidb/Postgres storage users are required to re-created it when bump up from previous releases. Fix Zipkin trace query the max size of spans. Add tls and https component IDs for Network Profiling. Support Elasticsearch column alias for the compatibility between storage logicSharding model and no-logicSharding model. Support MySQL monitoring. Support PostgreSQL monitoring. Fix query services by serviceId error when Elasticsearch storage SW_STORAGE_ES_QUERY_MAX_SIZE \u0026gt; 10000. Support sending alarm messages to Discord. Fix query history process data failure. Optimize TTL mechanism for Elasticsearch storage, skip executed indices in one TTL rotation. Add Kubernetes support module to share codes between modules and reduce calls to Kubernetes API server. Bump up Kubernetes Java client to fix cve. Adapt OpenTelemetry native metrics protocol. [Breaking Change] rename configuration folder from otel-oc-rules to otel-rules. [Breaking Change] rename configuration field from enabledOcRules to enabledOtelRules and environment variable name from SW_OTEL_RECEIVER_ENABLED_OC_RULES to SW_OTEL_RECEIVER_ENABLED_OTEL_RULES. [Breaking Change] Fix JDBC TTL to delete additional tables data. SQL Database requires removing segment,segment_tag, logs, logs_tag, alarms, alarms_tag, zipkin_span, zipkin_query before OAP starts. SQL Database: add @SQLDatabase.ExtraColumn4AdditionalEntity to support add an extra column from parent to an additional table. Add component ID(131) for Java Micronaut plugin Add component ID(132) for Nats java client plugin  UI  Fix query conditions for the browser logs. Implement a url parameter to activate tab index. Fix clear interval fail when switch autoRefresh to off. Optimize log tables. Fix log detail pop-up page doesn\u0026rsquo;t work. Optimize table widget to hide the whole metric column when no metric is set. Implement the Event widget. Remove event menu. Fix span detail text overlap. Add Python Bottle Plugin Logo. Implement an association between widgets(line, bar, area graphs) with time. Fix tag dropdown style. Hide the copy button when db.statement is empty. Fix legend metrics for topology. Dashboard: Add metrics association. Dashboard: Fix FaaS-Root document link and topology service relation dashboard link. Dashboard: Fix Mesh-Instance metric Throughput. Dashboard: Fix Mesh-Service-Relation metric Throughput and Proxy Sidecar Internal Latency in Nanoseconds (Client Response). Dashboard: Fix Mesh-Instance-Relation metric Throughput. Enhance associations for the Event widget. Add event widgets in dashboard where applicable. Fix dashboard list search box not work. Fix short time range. Fix event widget incompatibility in Safari. Refactor the tags component to support searching for tag keys and values. Implement the log widget and the trace widget associate with each other, remove log tables on the trace widget. Add log widget to general service root. Associate the event widget with the trace and log widget. Add the MYSQL layer and update layer routers. Fix query order for trace list. Add a calculation to convert seconds to days. q* Add Spring Sleuth dashboard to general service instance. Support the process dashboard and create the time range text widget. Fix picking calendar with a wrong time range and setting a unique value for dashboard grid key. Add PostgreSQL to Database sub-menu. Implement the network profiling widget. Add Micronaut icon for Java plugin. Add Nats icon for Java plugin. Bump moment and @vue/cli-plugin-e2e-cypress. Add Network Profiling for Service Mesh DP instance and K8s pod panels.  Documentation  Fix invalid links in release docs. Clean up doc about event metrics. Add a table for metric calculations in the ui doc. Add an explanation for alerting kernel and its in-memory window mechanism. Add more docs for widget details. Update alarm doc introduce configuration property key Fix dependency license\u0026rsquo;s NOTICE and binary jar included issues in the source release. Add eBPF CPU profiling doc.  All issues and pull requests are here\n","title":"9.2.0","url":"/docs/main/next/en/changes/changes-9.2.0/"},{"content":"9.2.0 Project  [Critical] Fix a low performance issue of metrics persistent in the ElasticSearch storage implementation. One single metric could have to wait for an unnecessary 7~10s(System Env Variable SW_STORAGE_ES_FLUSH_INTERVAL) since 8.8.0 - 9.1.0 releases. Upgrade Armeria to 1.16.0, Kubernetes Java client to 15.0.1.  OAP Server  Add more entities for Zipkin to improve performance. ElasticSearch: scroll id should be updated when scrolling as it may change. Mesh: fix only last rule works when multiple rules are defined in metadata-service-mapping.yaml. Support sending alarm messages to PagerDuty. Support Zipkin kafka collector. Add VIRTUAL detect type to Process for Network Profiling. Add component ID(128) for Java Hutool plugin. Add Zipkin query exception handler, response error message for illegal arguments. Fix a NullPointerException in the endpoint analysis, which would cause missing MQ-related LocalSpan in the trace. Add forEach, processRelation function to MAL expression. Add expPrefix, initExp in MAL config. Add component ID(7015) for Python Bottle plugin. Remove legacy OAL percentile functions, p99, p95, p90, p75, p50 func(s). Revert #8066. Keep all metrics persistent even it is default value. Skip loading UI templates if folder is empty or doesn\u0026rsquo;t exist. Optimize ElasticSearch query performance by using _mGet and physical index name rather than alias in these scenarios, (a) Metrics aggregation (b) Zipkin query (c) Metrics query (d) Log query Support the NETWORK type of eBPF Profiling task. Support sumHistogram in MAL. [Breaking Change] Make the eBPF Profiling task support to the service instance level, index/table ebpf_profiling_task is required to be re-created when bump up from previous releases. Fix race condition in Banyandb storage Support SUM_PER_MIN downsampling in MAL. Support sumHistogramPercentile in MAL. Add VIRTUAL_CACHE to Layer, to fix conjectured Redis server, which icon can\u0026rsquo;t show on the topology. [Breaking Change] Elasticsearch storage merge all metrics/meter and records(without super datasets) indices into one physical index template metrics-all and records-all on the default setting. Provide system environment variable(SW_STORAGE_ES_LOGIC_SHARDING) to shard metrics/meter indices into multi-physical indices as the previous versions(one index template per metric/meter aggregation function). In the current one index mode, users still could choose to adjust ElasticSearch\u0026rsquo;s shard number(SW_STORAGE_ES_INDEX_SHARDS_NUMBER) to scale out. More details please refer to New ElasticSearch storage option explanation in 9.2.0 and backend-storage.md [Breaking Change] Index/table ebpf_profiling_schedule added a new column ebpf_profiling_schedule_id, the H2/Mysql/Tidb/Postgres storage users are required to re-created it when bump up from previous releases. Fix Zipkin trace query the max size of spans. Add tls and https component IDs for Network Profiling. Support Elasticsearch column alias for the compatibility between storage logicSharding model and no-logicSharding model. Support MySQL monitoring. Support PostgreSQL monitoring. Fix query services by serviceId error when Elasticsearch storage SW_STORAGE_ES_QUERY_MAX_SIZE \u0026gt; 10000. Support sending alarm messages to Discord. Fix query history process data failure. Optimize TTL mechanism for Elasticsearch storage, skip executed indices in one TTL rotation. Add Kubernetes support module to share codes between modules and reduce calls to Kubernetes API server. Bump up Kubernetes Java client to fix cve. Adapt OpenTelemetry native metrics protocol. [Breaking Change] rename configuration folder from otel-oc-rules to otel-rules. [Breaking Change] rename configuration field from enabledOcRules to enabledOtelRules and environment variable name from SW_OTEL_RECEIVER_ENABLED_OC_RULES to SW_OTEL_RECEIVER_ENABLED_OTEL_RULES. [Breaking Change] Fix JDBC TTL to delete additional tables data. SQL Database requires removing segment,segment_tag, logs, logs_tag, alarms, alarms_tag, zipkin_span, zipkin_query before OAP starts. SQL Database: add @SQLDatabase.ExtraColumn4AdditionalEntity to support add an extra column from parent to an additional table. Add component ID(131) for Java Micronaut plugin Add component ID(132) for Nats java client plugin  UI  Fix query conditions for the browser logs. Implement a url parameter to activate tab index. Fix clear interval fail when switch autoRefresh to off. Optimize log tables. Fix log detail pop-up page doesn\u0026rsquo;t work. Optimize table widget to hide the whole metric column when no metric is set. Implement the Event widget. Remove event menu. Fix span detail text overlap. Add Python Bottle Plugin Logo. Implement an association between widgets(line, bar, area graphs) with time. Fix tag dropdown style. Hide the copy button when db.statement is empty. Fix legend metrics for topology. Dashboard: Add metrics association. Dashboard: Fix FaaS-Root document link and topology service relation dashboard link. Dashboard: Fix Mesh-Instance metric Throughput. Dashboard: Fix Mesh-Service-Relation metric Throughput and Proxy Sidecar Internal Latency in Nanoseconds (Client Response). Dashboard: Fix Mesh-Instance-Relation metric Throughput. Enhance associations for the Event widget. Add event widgets in dashboard where applicable. Fix dashboard list search box not work. Fix short time range. Fix event widget incompatibility in Safari. Refactor the tags component to support searching for tag keys and values. Implement the log widget and the trace widget associate with each other, remove log tables on the trace widget. Add log widget to general service root. Associate the event widget with the trace and log widget. Add the MYSQL layer and update layer routers. Fix query order for trace list. Add a calculation to convert seconds to days. q* Add Spring Sleuth dashboard to general service instance. Support the process dashboard and create the time range text widget. Fix picking calendar with a wrong time range and setting a unique value for dashboard grid key. Add PostgreSQL to Database sub-menu. Implement the network profiling widget. Add Micronaut icon for Java plugin. Add Nats icon for Java plugin. Bump moment and @vue/cli-plugin-e2e-cypress. Add Network Profiling for Service Mesh DP instance and K8s pod panels.  Documentation  Fix invalid links in release docs. Clean up doc about event metrics. Add a table for metric calculations in the ui doc. Add an explanation for alerting kernel and its in-memory window mechanism. Add more docs for widget details. Update alarm doc introduce configuration property key Fix dependency license\u0026rsquo;s NOTICE and binary jar included issues in the source release. Add eBPF CPU profiling doc.  All issues and pull requests are here\n","title":"9.2.0","url":"/docs/main/v9.2.0/en/changes/changes/"},{"content":"9.2.0 Project  [Critical] Fix a low performance issue of metrics persistent in the ElasticSearch storage implementation. One single metric could have to wait for an unnecessary 7~10s(System Env Variable SW_STORAGE_ES_FLUSH_INTERVAL) since 8.8.0 - 9.1.0 releases. Upgrade Armeria to 1.16.0, Kubernetes Java client to 15.0.1.  OAP Server  Add more entities for Zipkin to improve performance. ElasticSearch: scroll id should be updated when scrolling as it may change. Mesh: fix only last rule works when multiple rules are defined in metadata-service-mapping.yaml. Support sending alarm messages to PagerDuty. Support Zipkin kafka collector. Add VIRTUAL detect type to Process for Network Profiling. Add component ID(128) for Java Hutool plugin. Add Zipkin query exception handler, response error message for illegal arguments. Fix a NullPointerException in the endpoint analysis, which would cause missing MQ-related LocalSpan in the trace. Add forEach, processRelation function to MAL expression. Add expPrefix, initExp in MAL config. Add component ID(7015) for Python Bottle plugin. Remove legacy OAL percentile functions, p99, p95, p90, p75, p50 func(s). Revert #8066. Keep all metrics persistent even it is default value. Skip loading UI templates if folder is empty or doesn\u0026rsquo;t exist. Optimize ElasticSearch query performance by using _mGet and physical index name rather than alias in these scenarios, (a) Metrics aggregation (b) Zipkin query (c) Metrics query (d) Log query Support the NETWORK type of eBPF Profiling task. Support sumHistogram in MAL. [Breaking Change] Make the eBPF Profiling task support to the service instance level, index/table ebpf_profiling_task is required to be re-created when bump up from previous releases. Fix race condition in Banyandb storage Support SUM_PER_MIN downsampling in MAL. Support sumHistogramPercentile in MAL. Add VIRTUAL_CACHE to Layer, to fix conjectured Redis server, which icon can\u0026rsquo;t show on the topology. [Breaking Change] Elasticsearch storage merge all metrics/meter and records(without super datasets) indices into one physical index template metrics-all and records-all on the default setting. Provide system environment variable(SW_STORAGE_ES_LOGIC_SHARDING) to shard metrics/meter indices into multi-physical indices as the previous versions(one index template per metric/meter aggregation function). In the current one index mode, users still could choose to adjust ElasticSearch\u0026rsquo;s shard number(SW_STORAGE_ES_INDEX_SHARDS_NUMBER) to scale out. More details please refer to New ElasticSearch storage option explanation in 9.2.0 and backend-storage.md [Breaking Change] Index/table ebpf_profiling_schedule added a new column ebpf_profiling_schedule_id, the H2/Mysql/Tidb/Postgres storage users are required to re-created it when bump up from previous releases. Fix Zipkin trace query the max size of spans. Add tls and https component IDs for Network Profiling. Support Elasticsearch column alias for the compatibility between storage logicSharding model and no-logicSharding model. Support MySQL monitoring. Support PostgreSQL monitoring. Fix query services by serviceId error when Elasticsearch storage SW_STORAGE_ES_QUERY_MAX_SIZE \u0026gt; 10000. Support sending alarm messages to Discord. Fix query history process data failure. Optimize TTL mechanism for Elasticsearch storage, skip executed indices in one TTL rotation. Add Kubernetes support module to share codes between modules and reduce calls to Kubernetes API server. Bump up Kubernetes Java client to fix cve. Adapt OpenTelemetry native metrics protocol. [Breaking Change] rename configuration folder from otel-oc-rules to otel-rules. [Breaking Change] rename configuration field from enabledOcRules to enabledOtelRules and environment variable name from SW_OTEL_RECEIVER_ENABLED_OC_RULES to SW_OTEL_RECEIVER_ENABLED_OTEL_RULES. [Breaking Change] Fix JDBC TTL to delete additional tables data. SQL Database requires removing segment,segment_tag, logs, logs_tag, alarms, alarms_tag, zipkin_span, zipkin_query before OAP starts. SQL Database: add @SQLDatabase.ExtraColumn4AdditionalEntity to support add an extra column from parent to an additional table. Add component ID(131) for Java Micronaut plugin Add component ID(132) for Nats java client plugin  UI  Fix query conditions for the browser logs. Implement a url parameter to activate tab index. Fix clear interval fail when switch autoRefresh to off. Optimize log tables. Fix log detail pop-up page doesn\u0026rsquo;t work. Optimize table widget to hide the whole metric column when no metric is set. Implement the Event widget. Remove event menu. Fix span detail text overlap. Add Python Bottle Plugin Logo. Implement an association between widgets(line, bar, area graphs) with time. Fix tag dropdown style. Hide the copy button when db.statement is empty. Fix legend metrics for topology. Dashboard: Add metrics association. Dashboard: Fix FaaS-Root document link and topology service relation dashboard link. Dashboard: Fix Mesh-Instance metric Throughput. Dashboard: Fix Mesh-Service-Relation metric Throughput and Proxy Sidecar Internal Latency in Nanoseconds (Client Response). Dashboard: Fix Mesh-Instance-Relation metric Throughput. Enhance associations for the Event widget. Add event widgets in dashboard where applicable. Fix dashboard list search box not work. Fix short time range. Fix event widget incompatibility in Safari. Refactor the tags component to support searching for tag keys and values. Implement the log widget and the trace widget associate with each other, remove log tables on the trace widget. Add log widget to general service root. Associate the event widget with the trace and log widget. Add the MYSQL layer and update layer routers. Fix query order for trace list. Add a calculation to convert seconds to days. q* Add Spring Sleuth dashboard to general service instance. Support the process dashboard and create the time range text widget. Fix picking calendar with a wrong time range and setting a unique value for dashboard grid key. Add PostgreSQL to Database sub-menu. Implement the network profiling widget. Add Micronaut icon for Java plugin. Add Nats icon for Java plugin. Bump moment and @vue/cli-plugin-e2e-cypress. Add Network Profiling for Service Mesh DP instance and K8s pod panels.  Documentation  Fix invalid links in release docs. Clean up doc about event metrics. Add a table for metric calculations in the ui doc. Add an explanation for alerting kernel and its in-memory window mechanism. Add more docs for widget details. Update alarm doc introduce configuration property key Fix dependency license\u0026rsquo;s NOTICE and binary jar included issues in the source release. Add eBPF CPU profiling doc.  All issues and pull requests are here\n","title":"9.2.0","url":"/docs/main/v9.3.0/en/changes/changes-9.2.0/"},{"content":"9.2.0 Project  [Critical] Fix a low performance issue of metrics persistent in the ElasticSearch storage implementation. One single metric could have to wait for an unnecessary 7~10s(System Env Variable SW_STORAGE_ES_FLUSH_INTERVAL) since 8.8.0 - 9.1.0 releases. Upgrade Armeria to 1.16.0, Kubernetes Java client to 15.0.1.  OAP Server  Add more entities for Zipkin to improve performance. ElasticSearch: scroll id should be updated when scrolling as it may change. Mesh: fix only last rule works when multiple rules are defined in metadata-service-mapping.yaml. Support sending alarm messages to PagerDuty. Support Zipkin kafka collector. Add VIRTUAL detect type to Process for Network Profiling. Add component ID(128) for Java Hutool plugin. Add Zipkin query exception handler, response error message for illegal arguments. Fix a NullPointerException in the endpoint analysis, which would cause missing MQ-related LocalSpan in the trace. Add forEach, processRelation function to MAL expression. Add expPrefix, initExp in MAL config. Add component ID(7015) for Python Bottle plugin. Remove legacy OAL percentile functions, p99, p95, p90, p75, p50 func(s). Revert #8066. Keep all metrics persistent even it is default value. Skip loading UI templates if folder is empty or doesn\u0026rsquo;t exist. Optimize ElasticSearch query performance by using _mGet and physical index name rather than alias in these scenarios, (a) Metrics aggregation (b) Zipkin query (c) Metrics query (d) Log query Support the NETWORK type of eBPF Profiling task. Support sumHistogram in MAL. [Breaking Change] Make the eBPF Profiling task support to the service instance level, index/table ebpf_profiling_task is required to be re-created when bump up from previous releases. Fix race condition in Banyandb storage Support SUM_PER_MIN downsampling in MAL. Support sumHistogramPercentile in MAL. Add VIRTUAL_CACHE to Layer, to fix conjectured Redis server, which icon can\u0026rsquo;t show on the topology. [Breaking Change] Elasticsearch storage merge all metrics/meter and records(without super datasets) indices into one physical index template metrics-all and records-all on the default setting. Provide system environment variable(SW_STORAGE_ES_LOGIC_SHARDING) to shard metrics/meter indices into multi-physical indices as the previous versions(one index template per metric/meter aggregation function). In the current one index mode, users still could choose to adjust ElasticSearch\u0026rsquo;s shard number(SW_STORAGE_ES_INDEX_SHARDS_NUMBER) to scale out. More details please refer to New ElasticSearch storage option explanation in 9.2.0 and backend-storage.md [Breaking Change] Index/table ebpf_profiling_schedule added a new column ebpf_profiling_schedule_id, the H2/Mysql/Tidb/Postgres storage users are required to re-created it when bump up from previous releases. Fix Zipkin trace query the max size of spans. Add tls and https component IDs for Network Profiling. Support Elasticsearch column alias for the compatibility between storage logicSharding model and no-logicSharding model. Support MySQL monitoring. Support PostgreSQL monitoring. Fix query services by serviceId error when Elasticsearch storage SW_STORAGE_ES_QUERY_MAX_SIZE \u0026gt; 10000. Support sending alarm messages to Discord. Fix query history process data failure. Optimize TTL mechanism for Elasticsearch storage, skip executed indices in one TTL rotation. Add Kubernetes support module to share codes between modules and reduce calls to Kubernetes API server. Bump up Kubernetes Java client to fix cve. Adapt OpenTelemetry native metrics protocol. [Breaking Change] rename configuration folder from otel-oc-rules to otel-rules. [Breaking Change] rename configuration field from enabledOcRules to enabledOtelRules and environment variable name from SW_OTEL_RECEIVER_ENABLED_OC_RULES to SW_OTEL_RECEIVER_ENABLED_OTEL_RULES. [Breaking Change] Fix JDBC TTL to delete additional tables data. SQL Database requires removing segment,segment_tag, logs, logs_tag, alarms, alarms_tag, zipkin_span, zipkin_query before OAP starts. SQL Database: add @SQLDatabase.ExtraColumn4AdditionalEntity to support add an extra column from parent to an additional table. Add component ID(131) for Java Micronaut plugin Add component ID(132) for Nats java client plugin  UI  Fix query conditions for the browser logs. Implement a url parameter to activate tab index. Fix clear interval fail when switch autoRefresh to off. Optimize log tables. Fix log detail pop-up page doesn\u0026rsquo;t work. Optimize table widget to hide the whole metric column when no metric is set. Implement the Event widget. Remove event menu. Fix span detail text overlap. Add Python Bottle Plugin Logo. Implement an association between widgets(line, bar, area graphs) with time. Fix tag dropdown style. Hide the copy button when db.statement is empty. Fix legend metrics for topology. Dashboard: Add metrics association. Dashboard: Fix FaaS-Root document link and topology service relation dashboard link. Dashboard: Fix Mesh-Instance metric Throughput. Dashboard: Fix Mesh-Service-Relation metric Throughput and Proxy Sidecar Internal Latency in Nanoseconds (Client Response). Dashboard: Fix Mesh-Instance-Relation metric Throughput. Enhance associations for the Event widget. Add event widgets in dashboard where applicable. Fix dashboard list search box not work. Fix short time range. Fix event widget incompatibility in Safari. Refactor the tags component to support searching for tag keys and values. Implement the log widget and the trace widget associate with each other, remove log tables on the trace widget. Add log widget to general service root. Associate the event widget with the trace and log widget. Add the MYSQL layer and update layer routers. Fix query order for trace list. Add a calculation to convert seconds to days. q* Add Spring Sleuth dashboard to general service instance. Support the process dashboard and create the time range text widget. Fix picking calendar with a wrong time range and setting a unique value for dashboard grid key. Add PostgreSQL to Database sub-menu. Implement the network profiling widget. Add Micronaut icon for Java plugin. Add Nats icon for Java plugin. Bump moment and @vue/cli-plugin-e2e-cypress. Add Network Profiling for Service Mesh DP instance and K8s pod panels.  Documentation  Fix invalid links in release docs. Clean up doc about event metrics. Add a table for metric calculations in the ui doc. Add an explanation for alerting kernel and its in-memory window mechanism. Add more docs for widget details. Update alarm doc introduce configuration property key Fix dependency license\u0026rsquo;s NOTICE and binary jar included issues in the source release. Add eBPF CPU profiling doc.  All issues and pull requests are here\n","title":"9.2.0","url":"/docs/main/v9.4.0/en/changes/changes-9.2.0/"},{"content":"9.2.0 Project  [Critical] Fix a low performance issue of metrics persistent in the ElasticSearch storage implementation. One single metric could have to wait for an unnecessary 7~10s(System Env Variable SW_STORAGE_ES_FLUSH_INTERVAL) since 8.8.0 - 9.1.0 releases. Upgrade Armeria to 1.16.0, Kubernetes Java client to 15.0.1.  OAP Server  Add more entities for Zipkin to improve performance. ElasticSearch: scroll id should be updated when scrolling as it may change. Mesh: fix only last rule works when multiple rules are defined in metadata-service-mapping.yaml. Support sending alarm messages to PagerDuty. Support Zipkin kafka collector. Add VIRTUAL detect type to Process for Network Profiling. Add component ID(128) for Java Hutool plugin. Add Zipkin query exception handler, response error message for illegal arguments. Fix a NullPointerException in the endpoint analysis, which would cause missing MQ-related LocalSpan in the trace. Add forEach, processRelation function to MAL expression. Add expPrefix, initExp in MAL config. Add component ID(7015) for Python Bottle plugin. Remove legacy OAL percentile functions, p99, p95, p90, p75, p50 func(s). Revert #8066. Keep all metrics persistent even it is default value. Skip loading UI templates if folder is empty or doesn\u0026rsquo;t exist. Optimize ElasticSearch query performance by using _mGet and physical index name rather than alias in these scenarios, (a) Metrics aggregation (b) Zipkin query (c) Metrics query (d) Log query Support the NETWORK type of eBPF Profiling task. Support sumHistogram in MAL. [Breaking Change] Make the eBPF Profiling task support to the service instance level, index/table ebpf_profiling_task is required to be re-created when bump up from previous releases. Fix race condition in Banyandb storage Support SUM_PER_MIN downsampling in MAL. Support sumHistogramPercentile in MAL. Add VIRTUAL_CACHE to Layer, to fix conjectured Redis server, which icon can\u0026rsquo;t show on the topology. [Breaking Change] Elasticsearch storage merge all metrics/meter and records(without super datasets) indices into one physical index template metrics-all and records-all on the default setting. Provide system environment variable(SW_STORAGE_ES_LOGIC_SHARDING) to shard metrics/meter indices into multi-physical indices as the previous versions(one index template per metric/meter aggregation function). In the current one index mode, users still could choose to adjust ElasticSearch\u0026rsquo;s shard number(SW_STORAGE_ES_INDEX_SHARDS_NUMBER) to scale out. More details please refer to New ElasticSearch storage option explanation in 9.2.0 and backend-storage.md [Breaking Change] Index/table ebpf_profiling_schedule added a new column ebpf_profiling_schedule_id, the H2/Mysql/Tidb/Postgres storage users are required to re-created it when bump up from previous releases. Fix Zipkin trace query the max size of spans. Add tls and https component IDs for Network Profiling. Support Elasticsearch column alias for the compatibility between storage logicSharding model and no-logicSharding model. Support MySQL monitoring. Support PostgreSQL monitoring. Fix query services by serviceId error when Elasticsearch storage SW_STORAGE_ES_QUERY_MAX_SIZE \u0026gt; 10000. Support sending alarm messages to Discord. Fix query history process data failure. Optimize TTL mechanism for Elasticsearch storage, skip executed indices in one TTL rotation. Add Kubernetes support module to share codes between modules and reduce calls to Kubernetes API server. Bump up Kubernetes Java client to fix cve. Adapt OpenTelemetry native metrics protocol. [Breaking Change] rename configuration folder from otel-oc-rules to otel-rules. [Breaking Change] rename configuration field from enabledOcRules to enabledOtelRules and environment variable name from SW_OTEL_RECEIVER_ENABLED_OC_RULES to SW_OTEL_RECEIVER_ENABLED_OTEL_RULES. [Breaking Change] Fix JDBC TTL to delete additional tables data. SQL Database requires removing segment,segment_tag, logs, logs_tag, alarms, alarms_tag, zipkin_span, zipkin_query before OAP starts. SQL Database: add @SQLDatabase.ExtraColumn4AdditionalEntity to support add an extra column from parent to an additional table. Add component ID(131) for Java Micronaut plugin Add component ID(132) for Nats java client plugin  UI  Fix query conditions for the browser logs. Implement a url parameter to activate tab index. Fix clear interval fail when switch autoRefresh to off. Optimize log tables. Fix log detail pop-up page doesn\u0026rsquo;t work. Optimize table widget to hide the whole metric column when no metric is set. Implement the Event widget. Remove event menu. Fix span detail text overlap. Add Python Bottle Plugin Logo. Implement an association between widgets(line, bar, area graphs) with time. Fix tag dropdown style. Hide the copy button when db.statement is empty. Fix legend metrics for topology. Dashboard: Add metrics association. Dashboard: Fix FaaS-Root document link and topology service relation dashboard link. Dashboard: Fix Mesh-Instance metric Throughput. Dashboard: Fix Mesh-Service-Relation metric Throughput and Proxy Sidecar Internal Latency in Nanoseconds (Client Response). Dashboard: Fix Mesh-Instance-Relation metric Throughput. Enhance associations for the Event widget. Add event widgets in dashboard where applicable. Fix dashboard list search box not work. Fix short time range. Fix event widget incompatibility in Safari. Refactor the tags component to support searching for tag keys and values. Implement the log widget and the trace widget associate with each other, remove log tables on the trace widget. Add log widget to general service root. Associate the event widget with the trace and log widget. Add the MYSQL layer and update layer routers. Fix query order for trace list. Add a calculation to convert seconds to days. q* Add Spring Sleuth dashboard to general service instance. Support the process dashboard and create the time range text widget. Fix picking calendar with a wrong time range and setting a unique value for dashboard grid key. Add PostgreSQL to Database sub-menu. Implement the network profiling widget. Add Micronaut icon for Java plugin. Add Nats icon for Java plugin. Bump moment and @vue/cli-plugin-e2e-cypress. Add Network Profiling for Service Mesh DP instance and K8s pod panels.  Documentation  Fix invalid links in release docs. Clean up doc about event metrics. Add a table for metric calculations in the ui doc. Add an explanation for alerting kernel and its in-memory window mechanism. Add more docs for widget details. Update alarm doc introduce configuration property key Fix dependency license\u0026rsquo;s NOTICE and binary jar included issues in the source release. Add eBPF CPU profiling doc.  All issues and pull requests are here\n","title":"9.2.0","url":"/docs/main/v9.5.0/en/changes/changes-9.2.0/"},{"content":"9.2.0 Project  [Critical] Fix a low performance issue of metrics persistent in the ElasticSearch storage implementation. One single metric could have to wait for an unnecessary 7~10s(System Env Variable SW_STORAGE_ES_FLUSH_INTERVAL) since 8.8.0 - 9.1.0 releases. Upgrade Armeria to 1.16.0, Kubernetes Java client to 15.0.1.  OAP Server  Add more entities for Zipkin to improve performance. ElasticSearch: scroll id should be updated when scrolling as it may change. Mesh: fix only last rule works when multiple rules are defined in metadata-service-mapping.yaml. Support sending alarm messages to PagerDuty. Support Zipkin kafka collector. Add VIRTUAL detect type to Process for Network Profiling. Add component ID(128) for Java Hutool plugin. Add Zipkin query exception handler, response error message for illegal arguments. Fix a NullPointerException in the endpoint analysis, which would cause missing MQ-related LocalSpan in the trace. Add forEach, processRelation function to MAL expression. Add expPrefix, initExp in MAL config. Add component ID(7015) for Python Bottle plugin. Remove legacy OAL percentile functions, p99, p95, p90, p75, p50 func(s). Revert #8066. Keep all metrics persistent even it is default value. Skip loading UI templates if folder is empty or doesn\u0026rsquo;t exist. Optimize ElasticSearch query performance by using _mGet and physical index name rather than alias in these scenarios, (a) Metrics aggregation (b) Zipkin query (c) Metrics query (d) Log query Support the NETWORK type of eBPF Profiling task. Support sumHistogram in MAL. [Breaking Change] Make the eBPF Profiling task support to the service instance level, index/table ebpf_profiling_task is required to be re-created when bump up from previous releases. Fix race condition in Banyandb storage Support SUM_PER_MIN downsampling in MAL. Support sumHistogramPercentile in MAL. Add VIRTUAL_CACHE to Layer, to fix conjectured Redis server, which icon can\u0026rsquo;t show on the topology. [Breaking Change] Elasticsearch storage merge all metrics/meter and records(without super datasets) indices into one physical index template metrics-all and records-all on the default setting. Provide system environment variable(SW_STORAGE_ES_LOGIC_SHARDING) to shard metrics/meter indices into multi-physical indices as the previous versions(one index template per metric/meter aggregation function). In the current one index mode, users still could choose to adjust ElasticSearch\u0026rsquo;s shard number(SW_STORAGE_ES_INDEX_SHARDS_NUMBER) to scale out. More details please refer to New ElasticSearch storage option explanation in 9.2.0 and backend-storage.md [Breaking Change] Index/table ebpf_profiling_schedule added a new column ebpf_profiling_schedule_id, the H2/Mysql/Tidb/Postgres storage users are required to re-created it when bump up from previous releases. Fix Zipkin trace query the max size of spans. Add tls and https component IDs for Network Profiling. Support Elasticsearch column alias for the compatibility between storage logicSharding model and no-logicSharding model. Support MySQL monitoring. Support PostgreSQL monitoring. Fix query services by serviceId error when Elasticsearch storage SW_STORAGE_ES_QUERY_MAX_SIZE \u0026gt; 10000. Support sending alarm messages to Discord. Fix query history process data failure. Optimize TTL mechanism for Elasticsearch storage, skip executed indices in one TTL rotation. Add Kubernetes support module to share codes between modules and reduce calls to Kubernetes API server. Bump up Kubernetes Java client to fix cve. Adapt OpenTelemetry native metrics protocol. [Breaking Change] rename configuration folder from otel-oc-rules to otel-rules. [Breaking Change] rename configuration field from enabledOcRules to enabledOtelRules and environment variable name from SW_OTEL_RECEIVER_ENABLED_OC_RULES to SW_OTEL_RECEIVER_ENABLED_OTEL_RULES. [Breaking Change] Fix JDBC TTL to delete additional tables data. SQL Database requires removing segment,segment_tag, logs, logs_tag, alarms, alarms_tag, zipkin_span, zipkin_query before OAP starts. SQL Database: add @SQLDatabase.ExtraColumn4AdditionalEntity to support add an extra column from parent to an additional table. Add component ID(131) for Java Micronaut plugin Add component ID(132) for Nats java client plugin  UI  Fix query conditions for the browser logs. Implement a url parameter to activate tab index. Fix clear interval fail when switch autoRefresh to off. Optimize log tables. Fix log detail pop-up page doesn\u0026rsquo;t work. Optimize table widget to hide the whole metric column when no metric is set. Implement the Event widget. Remove event menu. Fix span detail text overlap. Add Python Bottle Plugin Logo. Implement an association between widgets(line, bar, area graphs) with time. Fix tag dropdown style. Hide the copy button when db.statement is empty. Fix legend metrics for topology. Dashboard: Add metrics association. Dashboard: Fix FaaS-Root document link and topology service relation dashboard link. Dashboard: Fix Mesh-Instance metric Throughput. Dashboard: Fix Mesh-Service-Relation metric Throughput and Proxy Sidecar Internal Latency in Nanoseconds (Client Response). Dashboard: Fix Mesh-Instance-Relation metric Throughput. Enhance associations for the Event widget. Add event widgets in dashboard where applicable. Fix dashboard list search box not work. Fix short time range. Fix event widget incompatibility in Safari. Refactor the tags component to support searching for tag keys and values. Implement the log widget and the trace widget associate with each other, remove log tables on the trace widget. Add log widget to general service root. Associate the event widget with the trace and log widget. Add the MYSQL layer and update layer routers. Fix query order for trace list. Add a calculation to convert seconds to days. q* Add Spring Sleuth dashboard to general service instance. Support the process dashboard and create the time range text widget. Fix picking calendar with a wrong time range and setting a unique value for dashboard grid key. Add PostgreSQL to Database sub-menu. Implement the network profiling widget. Add Micronaut icon for Java plugin. Add Nats icon for Java plugin. Bump moment and @vue/cli-plugin-e2e-cypress. Add Network Profiling for Service Mesh DP instance and K8s pod panels.  Documentation  Fix invalid links in release docs. Clean up doc about event metrics. Add a table for metric calculations in the ui doc. Add an explanation for alerting kernel and its in-memory window mechanism. Add more docs for widget details. Update alarm doc introduce configuration property key Fix dependency license\u0026rsquo;s NOTICE and binary jar included issues in the source release. Add eBPF CPU profiling doc.  All issues and pull requests are here\n","title":"9.2.0","url":"/docs/main/v9.6.0/en/changes/changes-9.2.0/"},{"content":"9.2.0 Project  [Critical] Fix a low performance issue of metrics persistent in the ElasticSearch storage implementation. One single metric could have to wait for an unnecessary 7~10s(System Env Variable SW_STORAGE_ES_FLUSH_INTERVAL) since 8.8.0 - 9.1.0 releases. Upgrade Armeria to 1.16.0, Kubernetes Java client to 15.0.1.  OAP Server  Add more entities for Zipkin to improve performance. ElasticSearch: scroll id should be updated when scrolling as it may change. Mesh: fix only last rule works when multiple rules are defined in metadata-service-mapping.yaml. Support sending alarm messages to PagerDuty. Support Zipkin kafka collector. Add VIRTUAL detect type to Process for Network Profiling. Add component ID(128) for Java Hutool plugin. Add Zipkin query exception handler, response error message for illegal arguments. Fix a NullPointerException in the endpoint analysis, which would cause missing MQ-related LocalSpan in the trace. Add forEach, processRelation function to MAL expression. Add expPrefix, initExp in MAL config. Add component ID(7015) for Python Bottle plugin. Remove legacy OAL percentile functions, p99, p95, p90, p75, p50 func(s). Revert #8066. Keep all metrics persistent even it is default value. Skip loading UI templates if folder is empty or doesn\u0026rsquo;t exist. Optimize ElasticSearch query performance by using _mGet and physical index name rather than alias in these scenarios, (a) Metrics aggregation (b) Zipkin query (c) Metrics query (d) Log query Support the NETWORK type of eBPF Profiling task. Support sumHistogram in MAL. [Breaking Change] Make the eBPF Profiling task support to the service instance level, index/table ebpf_profiling_task is required to be re-created when bump up from previous releases. Fix race condition in Banyandb storage Support SUM_PER_MIN downsampling in MAL. Support sumHistogramPercentile in MAL. Add VIRTUAL_CACHE to Layer, to fix conjectured Redis server, which icon can\u0026rsquo;t show on the topology. [Breaking Change] Elasticsearch storage merge all metrics/meter and records(without super datasets) indices into one physical index template metrics-all and records-all on the default setting. Provide system environment variable(SW_STORAGE_ES_LOGIC_SHARDING) to shard metrics/meter indices into multi-physical indices as the previous versions(one index template per metric/meter aggregation function). In the current one index mode, users still could choose to adjust ElasticSearch\u0026rsquo;s shard number(SW_STORAGE_ES_INDEX_SHARDS_NUMBER) to scale out. More details please refer to New ElasticSearch storage option explanation in 9.2.0 and backend-storage.md [Breaking Change] Index/table ebpf_profiling_schedule added a new column ebpf_profiling_schedule_id, the H2/Mysql/Tidb/Postgres storage users are required to re-created it when bump up from previous releases. Fix Zipkin trace query the max size of spans. Add tls and https component IDs for Network Profiling. Support Elasticsearch column alias for the compatibility between storage logicSharding model and no-logicSharding model. Support MySQL monitoring. Support PostgreSQL monitoring. Fix query services by serviceId error when Elasticsearch storage SW_STORAGE_ES_QUERY_MAX_SIZE \u0026gt; 10000. Support sending alarm messages to Discord. Fix query history process data failure. Optimize TTL mechanism for Elasticsearch storage, skip executed indices in one TTL rotation. Add Kubernetes support module to share codes between modules and reduce calls to Kubernetes API server. Bump up Kubernetes Java client to fix cve. Adapt OpenTelemetry native metrics protocol. [Breaking Change] rename configuration folder from otel-oc-rules to otel-rules. [Breaking Change] rename configuration field from enabledOcRules to enabledOtelRules and environment variable name from SW_OTEL_RECEIVER_ENABLED_OC_RULES to SW_OTEL_RECEIVER_ENABLED_OTEL_RULES. [Breaking Change] Fix JDBC TTL to delete additional tables data. SQL Database requires removing segment,segment_tag, logs, logs_tag, alarms, alarms_tag, zipkin_span, zipkin_query before OAP starts. SQL Database: add @SQLDatabase.ExtraColumn4AdditionalEntity to support add an extra column from parent to an additional table. Add component ID(131) for Java Micronaut plugin Add component ID(132) for Nats java client plugin  UI  Fix query conditions for the browser logs. Implement a url parameter to activate tab index. Fix clear interval fail when switch autoRefresh to off. Optimize log tables. Fix log detail pop-up page doesn\u0026rsquo;t work. Optimize table widget to hide the whole metric column when no metric is set. Implement the Event widget. Remove event menu. Fix span detail text overlap. Add Python Bottle Plugin Logo. Implement an association between widgets(line, bar, area graphs) with time. Fix tag dropdown style. Hide the copy button when db.statement is empty. Fix legend metrics for topology. Dashboard: Add metrics association. Dashboard: Fix FaaS-Root document link and topology service relation dashboard link. Dashboard: Fix Mesh-Instance metric Throughput. Dashboard: Fix Mesh-Service-Relation metric Throughput and Proxy Sidecar Internal Latency in Nanoseconds (Client Response). Dashboard: Fix Mesh-Instance-Relation metric Throughput. Enhance associations for the Event widget. Add event widgets in dashboard where applicable. Fix dashboard list search box not work. Fix short time range. Fix event widget incompatibility in Safari. Refactor the tags component to support searching for tag keys and values. Implement the log widget and the trace widget associate with each other, remove log tables on the trace widget. Add log widget to general service root. Associate the event widget with the trace and log widget. Add the MYSQL layer and update layer routers. Fix query order for trace list. Add a calculation to convert seconds to days. q* Add Spring Sleuth dashboard to general service instance. Support the process dashboard and create the time range text widget. Fix picking calendar with a wrong time range and setting a unique value for dashboard grid key. Add PostgreSQL to Database sub-menu. Implement the network profiling widget. Add Micronaut icon for Java plugin. Add Nats icon for Java plugin. Bump moment and @vue/cli-plugin-e2e-cypress. Add Network Profiling for Service Mesh DP instance and K8s pod panels.  Documentation  Fix invalid links in release docs. Clean up doc about event metrics. Add a table for metric calculations in the ui doc. Add an explanation for alerting kernel and its in-memory window mechanism. Add more docs for widget details. Update alarm doc introduce configuration property key Fix dependency license\u0026rsquo;s NOTICE and binary jar included issues in the source release. Add eBPF CPU profiling doc.  All issues and pull requests are here\n","title":"9.2.0","url":"/docs/main/v9.7.0/en/changes/changes-9.2.0/"},{"content":"9.3.0 Project  Bump up the embedded swctl version in OAP Docker image.  OAP Server  Add component ID(133) for impala JDBC Java agent plugin and component ID(134) for impala server. Use prepareStatement in H2SQLExecutor#getByIDs.(No function change). Bump up snakeyaml to 1.32 for fixing CVE. Fix DurationUtils.convertToTimeBucket missed verify date format. Enhance LAL to support converting LogData to DatabaseSlowStatement. [Breaking Change] Change the LAL script format(Add layer property). Adapt ElasticSearch 8.1+, migrate from removed APIs to recommended APIs. Support monitoring MySQL slow SQLs. Support analyzing cache related spans to provide metrics and slow commands for cache services from client side Optimize virtual database, fix dynamic config watcher NPE when default value is null Remove physical index existing check and keep template existing check only to avoid meaningless retry wait in no-init mode. Make sure instance list ordered in TTL processor to avoid TTL timer never runs. Support monitoring PostgreSQL slow SQLs. [Breaking Change] Support sharding MySQL database instances and tables by Shardingsphere-Proxy. SQL-Database requires removing tables log_tag/segment_tag/zipkin_query before OAP starts, if bump up from previous releases. Fix meter functions avgHistogram, avgHistogramPercentile, avgLabeled, sumHistogram having data conflict when downsampling. Do sorting readLabeledMetricsValues result forcedly in case the storage(database) doesn\u0026rsquo;t return data consistent with the parameter list. Fix the wrong watch semantics in Kubernetes watchers, which causes heavy traffic to API server in some Kubernetes clusters, we should use Get State and Start at Most Recent semantic instead of Start at Exact because we don\u0026rsquo;t need the changing history events, see https://kubernetes.io/docs/reference/using-api/api-concepts/#semantics-for-watch. Unify query services and DAOs codes time range condition to Duration. [Breaking Change]: Remove prometheus-fetcher plugin, please use OpenTelemetry to scrape Prometheus metrics and set up SkyWalking OpenTelemetry receiver instead. BugFix: histogram metrics sent to MAL should be treated as OpenTelemetry style, not Prometheus style: (-infinity, explicit_bounds[i]] for i == 0 (explicit_bounds[i-1], explicit_bounds[i]] for 0 \u0026lt; i \u0026lt; size(explicit_bounds) (explicit_bounds[i-1], +infinity) for i == size(explicit_bounds)  Support Golang runtime metrics analysis. Add APISIX metrics monitoring Support skywalking-client-js report empty service version and page path , set default version as latest and default page path as /(root). Fix the error fetching data (/browser_app_page_pv0) : Can't split endpoint id into 2 parts. [Breaking Change] Limit the max length of trace/log/alarm tag\u0026rsquo;s key=value, set the max length of column tags in tableslog_tag/segment_tag/alarm_record_tag and column query in zipkin_query and column tag_value in tag_autocomplete to 256. SQL-Database requires altering these columns' length or removing these tables before OAP starts, if bump up from previous releases. Optimize the creation conditions of profiling task. Lazy load the Kubernetes metadata and switch from event-driven to polling. Previously we set up watchers to watch the Kubernetes metadata changes, this is perfect when there are deployments changes and SkyWalking can react to the changes in real time. However when the cluster has many events (such as in large cluster or some special Kubernetes engine like OpenShift), the requests sent from SkyWalking becomes unpredictable, i.e. SkyWalking might send massive requests to Kubernetes API server, causing heavy load to the API server. This PR switches from the watcher mechanism to polling mechanism, SkyWalking polls the metadata in a specified interval, so that the requests sent to API server is predictable (~10 requests every interval, 3 minutes), and the requests count is constant regardless of the cluster\u0026rsquo;s changes. However with this change SkyWalking can\u0026rsquo;t react to the cluster changes in time, but the delay is acceptable in our case. Optimize the query time of tasks in ProfileTaskCache. Fix metrics was put into wrong slot of the window in the alerting kernel. Support sumPerMinLabeled in MAL. Bump up jackson databind, snakeyaml, grpc dependencies. Support export Trace and Log through Kafka. Add new config initialization mechanism of module provider. This is a ModuleManager lib kernel level change. [Breaking Change] Support new records query protocol, rename the column named service_id to entity_id for support difference entity. Please re-create top_n_database_statement index/table. Remove improper self-obs metrics in JvmMetricsHandler(for Kafka channel). gRPC stream canceling code is not logged as an error when the client cancels the stream. The client cancels the stream when the pod is terminated. [Breaking Change] Change the way of loading MAL rules(support pattern). Move k8s relative MAL files into /otel-rules/k8s. [Breaking Change] Refactor service mesh protobuf definitions and split TCP-related metrics to individual definition. Add TCP{Service,ServiceInstance,ServiceRelation,ServiceInstanceRelation} sources and split TCP-related entities out from original Service,ServiceInstance,ServiceRelation,ServiceInstanceRelation. [Breaking Change] TCP-related source names are changed, fields of TCP-related sources are changed, please refer to the latest oal/tcp.oal file. Do not log error logs when failed to create ElasticSearch index because the index is created already. Add virtual MQ analysis for native traces. Support Python runtime metrics analysis. Support sampledTrace in LAL. Support multiple rules with different names under the same layer of LAL script. (Optimization) Reduce the buffer size(queue) of MAL(only) metric streams. Set L1 queue size as 1/20, L2 queue size as 1/2. Support monitoring MySQL/PostgreSQL in the cluster mode. [Breaking Change] Migrate to BanyanDB v0.2.0.  Adopt new OR logical operator for,  MeasureIDs query BanyanDBProfileThreadSnapshotQueryDAO query Multiple Event conditions query Metrics query   Simplify Group check and creation Partially apply UITemplate changes Support index_only Return CompletableFuture\u0026lt;Void\u0026gt; directly from BanyanDB client Optimize data binary parse methods in *LogQueryDAO Support different indexType Support configuration for TTL and (block|segment) intervals   Elasticsearch storage: Provide system environment variable(SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS) and support specify the settings (number_of_shards/number_of_replicas) for each index individually. Elasticsearch storage: Support update index settings (number_of_shards/number_of_replicas) for the index template after rebooting. Optimize MQ Topology analysis. Use entry span\u0026rsquo;s peer from the consumer side as source service when no producer instrumentation(no cross-process reference). Refactor JDBC storage implementations to reuse logics. Fix ClassCastException in LoggingConfigWatcher. Support span attached event concept in Zipkin and SkyWalking trace query. Support span attached events on Zipkin lens UI. Force UTF-8 encoding in JsonLogHandler of kafka-fetcher-plugin. Fix max length to 512 of entity, instance and endpoint IDs in trace, log, profiling, topN tables(JDBC storages). The value was 200 by default. Add component IDs(135, 136, 137) for EventMesh server and client-side plugins. Bump up Kafka client to 2.8.1 to fix CVE-2021-38153. Remove lengthEnvVariable for Column as it never works as expected. Add LongText to support longer logs persistent as a text type in ElasticSearch, instead of a keyword, to avoid length limitation. Fix wrong system variable name SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI. It was opaenapi. Fix not-time-series model blocking OAP boots in no-init mode. Fix ShardingTopologyQueryDAO.loadServiceRelationsDetectedAtServerSide invoke backend miss parameter serviceIds. Changed system variable SW_SUPERDATASET_STORAGE_DAY_STEP to SW_STORAGE_ES_SUPER_DATASET_DAY_STEP to be consistent with other ES storage related variables. Fix ESEventQueryDAO missing metric_table boolQuery criteria. Add default entity name(_blank) if absent to avoid NPE in the decoding. This caused Can't split xxx id into 2 parts. Support dynamic config the sampling strategy in network profiling. Zipkin module support BanyanDB storage. Zipkin traces query API, sort the result set by start time by default. Enhance the cache mechanism in the metric persistent process.  This cache only worked when the metric is accessible(readable) from the database. Once the insert execution is delayed due to the scale, the cache loses efficacy. It only works for the last time update per minute, considering our 25s period. Fix ID conflicts for all JDBC storage implementations. Due to the insert delay, the JDBC storage implementation would still generate another new insert statement.   [Breaking Change] Remove core/default/enableDatabaseSession config. [Breaking Change] Add @BanyanDB.TimestampColumn to identify which column in Record is providing the timestamp(milliseconds) for BanyanDB, since BanyanDB stream requires a timestamp in milliseconds. For SQL-Database: add new column timestamp for tables profile_task_log/top_n_database_statement, requires altering this column or removing these tables before OAP starts, if bump up from previous releases. Fix Elasticsearch storage: In No-Sharding Mode, add specific analyzer to the template before index creation to avoid update index error. Internal API: remove undocumented ElasticSearch API usage and use documented one. Fix BanyanDB.ShardingKey annotation missed in the generated OAL metrics classes. Fix Elasticsearch storage: Query sortMetrics missing transform real index column name. Rename BanyanDB.ShardingKey to BanyanDB.SeriesID. Self-Observability: Add counters for metrics reading from DB or cached. Dashboard:Metrics Persistent Cache Count. Self-Observability: Fix GC Time calculation. Fix Elasticsearch storage: In No-Sharding Mode, column\u0026rsquo;s property indexOnly not applied and cannot be updated. Update the trace_id field as storage only(cannot be queried) in top_n_database_statement, top_n_cache_read_command, top_n_cache_read_command index.  UI  Fix: tab active incorrectly, when click tab space Add impala icon for impala JDBC Java agent plugin. (Webapp)Bump up snakeyaml to 1.31 for fixing CVE-2022-25857 [Breaking Change]: migrate from Spring Web to Armeria, now you should use the environment variable name SW_OAP_ADDRESS to change the OAP backend service addresses, like SW_OAP_ADDRESS=localhost:12800,localhost:12801, and use environment variable SW_SERVER_PORT to change the port. Other Spring-related configurations don\u0026rsquo;t take effect anymore. Polish the endpoint list graph. Fix styles for an adaptive height. Fix setting up a new time range after clicking the refresh button. Enhance the process topology graph to support dragging nodes. UI-template: Fix metrics calculation in general-service/mesh-service/faas-function top-list dashboard. Update MySQL dashboard to visualize collected slow SQLs. Add virtual cache dashboard. Remove responseCode fields of all OAL sources, as well as examples to avoid user\u0026rsquo;s confusion. Remove All from the endpoints selector. Enhance menu configurations to make it easier to change. Update PostgreSQL dashboard to visualize collected slow SQLs. Add Golang runtime metrics and cpu/memory used rate panels in General-Instance dashboard. Add gateway apisix menu. Query logs with the specific service ID. Bump d3-color from 3.0.1 to 3.1.0. Add Golang runtime metrics and cpu/memory used rate panels in FaaS-Instance dashboard. Revert logs on trace widget. Add a sub-menu for virtual mq. Add readRecords to metric types. Verify dashboard names for new dashboards. Associate metrics with the trace widget on dashboards. Fix configuration panel styles. Remove a un-use icon. Support labeled value on the service/instance/endpoint list widgets. Add menu for virtual MQ. Set selector props and update configuration panel styles. Add Python runtime metrics and cpu/memory utilization panels to General-Instance and Fass-Instance dashboards. Enhance the legend of metrics graph widget with the summary table. Add apache eventMesh logo file. Fix conditions for trace profiling. Fix tag keys list and duration condition. Fix typo. Fix condition logic for trace tree data. Enhance tags component to search tags with the input value. Fix topology loading style. Fix update metric processor for the readRecords and remove readSampledRecords from metrics selector. Add trace association for FAAS dashboards. Visualize attached events on the trace widget. Add HTTP/1.x metrics and HTTP req/resp body collecting tabs on the network profiling widget. Implement creating tasks ui for network profiling widget. Fix entity types for ProcessRelation. Add trace association for general service dashboards.  Documentation  Add metadata-uid setup doc about Kubernetes coordinator in the cluster management. Add a doc for adding menus to booster UI. Move general good read blogs from Agent Introduction to Academy. Add re-post for blog Scaling with Apache SkyWalking in the academy list. Add re-post for blog Diagnose Service Mesh Network Performance with eBPF in the academy list. Add Security Notice doc. Add new docs for Report Span Attached Events data collecting protocol. Add new docs for Record query protocol Update Server Agents and Compatibility for PHP agent. Add docs for profiling. Update the network profiling documentation.  All issues and pull requests are here\n","title":"9.3.0","url":"/docs/main/latest/en/changes/changes-9.3.0/"},{"content":"9.3.0 Project  Bump up the embedded swctl version in OAP Docker image.  OAP Server  Add component ID(133) for impala JDBC Java agent plugin and component ID(134) for impala server. Use prepareStatement in H2SQLExecutor#getByIDs.(No function change). Bump up snakeyaml to 1.32 for fixing CVE. Fix DurationUtils.convertToTimeBucket missed verify date format. Enhance LAL to support converting LogData to DatabaseSlowStatement. [Breaking Change] Change the LAL script format(Add layer property). Adapt ElasticSearch 8.1+, migrate from removed APIs to recommended APIs. Support monitoring MySQL slow SQLs. Support analyzing cache related spans to provide metrics and slow commands for cache services from client side Optimize virtual database, fix dynamic config watcher NPE when default value is null Remove physical index existing check and keep template existing check only to avoid meaningless retry wait in no-init mode. Make sure instance list ordered in TTL processor to avoid TTL timer never runs. Support monitoring PostgreSQL slow SQLs. [Breaking Change] Support sharding MySQL database instances and tables by Shardingsphere-Proxy. SQL-Database requires removing tables log_tag/segment_tag/zipkin_query before OAP starts, if bump up from previous releases. Fix meter functions avgHistogram, avgHistogramPercentile, avgLabeled, sumHistogram having data conflict when downsampling. Do sorting readLabeledMetricsValues result forcedly in case the storage(database) doesn\u0026rsquo;t return data consistent with the parameter list. Fix the wrong watch semantics in Kubernetes watchers, which causes heavy traffic to API server in some Kubernetes clusters, we should use Get State and Start at Most Recent semantic instead of Start at Exact because we don\u0026rsquo;t need the changing history events, see https://kubernetes.io/docs/reference/using-api/api-concepts/#semantics-for-watch. Unify query services and DAOs codes time range condition to Duration. [Breaking Change]: Remove prometheus-fetcher plugin, please use OpenTelemetry to scrape Prometheus metrics and set up SkyWalking OpenTelemetry receiver instead. BugFix: histogram metrics sent to MAL should be treated as OpenTelemetry style, not Prometheus style: (-infinity, explicit_bounds[i]] for i == 0 (explicit_bounds[i-1], explicit_bounds[i]] for 0 \u0026lt; i \u0026lt; size(explicit_bounds) (explicit_bounds[i-1], +infinity) for i == size(explicit_bounds)  Support Golang runtime metrics analysis. Add APISIX metrics monitoring Support skywalking-client-js report empty service version and page path , set default version as latest and default page path as /(root). Fix the error fetching data (/browser_app_page_pv0) : Can't split endpoint id into 2 parts. [Breaking Change] Limit the max length of trace/log/alarm tag\u0026rsquo;s key=value, set the max length of column tags in tableslog_tag/segment_tag/alarm_record_tag and column query in zipkin_query and column tag_value in tag_autocomplete to 256. SQL-Database requires altering these columns' length or removing these tables before OAP starts, if bump up from previous releases. Optimize the creation conditions of profiling task. Lazy load the Kubernetes metadata and switch from event-driven to polling. Previously we set up watchers to watch the Kubernetes metadata changes, this is perfect when there are deployments changes and SkyWalking can react to the changes in real time. However when the cluster has many events (such as in large cluster or some special Kubernetes engine like OpenShift), the requests sent from SkyWalking becomes unpredictable, i.e. SkyWalking might send massive requests to Kubernetes API server, causing heavy load to the API server. This PR switches from the watcher mechanism to polling mechanism, SkyWalking polls the metadata in a specified interval, so that the requests sent to API server is predictable (~10 requests every interval, 3 minutes), and the requests count is constant regardless of the cluster\u0026rsquo;s changes. However with this change SkyWalking can\u0026rsquo;t react to the cluster changes in time, but the delay is acceptable in our case. Optimize the query time of tasks in ProfileTaskCache. Fix metrics was put into wrong slot of the window in the alerting kernel. Support sumPerMinLabeled in MAL. Bump up jackson databind, snakeyaml, grpc dependencies. Support export Trace and Log through Kafka. Add new config initialization mechanism of module provider. This is a ModuleManager lib kernel level change. [Breaking Change] Support new records query protocol, rename the column named service_id to entity_id for support difference entity. Please re-create top_n_database_statement index/table. Remove improper self-obs metrics in JvmMetricsHandler(for Kafka channel). gRPC stream canceling code is not logged as an error when the client cancels the stream. The client cancels the stream when the pod is terminated. [Breaking Change] Change the way of loading MAL rules(support pattern). Move k8s relative MAL files into /otel-rules/k8s. [Breaking Change] Refactor service mesh protobuf definitions and split TCP-related metrics to individual definition. Add TCP{Service,ServiceInstance,ServiceRelation,ServiceInstanceRelation} sources and split TCP-related entities out from original Service,ServiceInstance,ServiceRelation,ServiceInstanceRelation. [Breaking Change] TCP-related source names are changed, fields of TCP-related sources are changed, please refer to the latest oal/tcp.oal file. Do not log error logs when failed to create ElasticSearch index because the index is created already. Add virtual MQ analysis for native traces. Support Python runtime metrics analysis. Support sampledTrace in LAL. Support multiple rules with different names under the same layer of LAL script. (Optimization) Reduce the buffer size(queue) of MAL(only) metric streams. Set L1 queue size as 1/20, L2 queue size as 1/2. Support monitoring MySQL/PostgreSQL in the cluster mode. [Breaking Change] Migrate to BanyanDB v0.2.0.  Adopt new OR logical operator for,  MeasureIDs query BanyanDBProfileThreadSnapshotQueryDAO query Multiple Event conditions query Metrics query   Simplify Group check and creation Partially apply UITemplate changes Support index_only Return CompletableFuture\u0026lt;Void\u0026gt; directly from BanyanDB client Optimize data binary parse methods in *LogQueryDAO Support different indexType Support configuration for TTL and (block|segment) intervals   Elasticsearch storage: Provide system environment variable(SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS) and support specify the settings (number_of_shards/number_of_replicas) for each index individually. Elasticsearch storage: Support update index settings (number_of_shards/number_of_replicas) for the index template after rebooting. Optimize MQ Topology analysis. Use entry span\u0026rsquo;s peer from the consumer side as source service when no producer instrumentation(no cross-process reference). Refactor JDBC storage implementations to reuse logics. Fix ClassCastException in LoggingConfigWatcher. Support span attached event concept in Zipkin and SkyWalking trace query. Support span attached events on Zipkin lens UI. Force UTF-8 encoding in JsonLogHandler of kafka-fetcher-plugin. Fix max length to 512 of entity, instance and endpoint IDs in trace, log, profiling, topN tables(JDBC storages). The value was 200 by default. Add component IDs(135, 136, 137) for EventMesh server and client-side plugins. Bump up Kafka client to 2.8.1 to fix CVE-2021-38153. Remove lengthEnvVariable for Column as it never works as expected. Add LongText to support longer logs persistent as a text type in ElasticSearch, instead of a keyword, to avoid length limitation. Fix wrong system variable name SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI. It was opaenapi. Fix not-time-series model blocking OAP boots in no-init mode. Fix ShardingTopologyQueryDAO.loadServiceRelationsDetectedAtServerSide invoke backend miss parameter serviceIds. Changed system variable SW_SUPERDATASET_STORAGE_DAY_STEP to SW_STORAGE_ES_SUPER_DATASET_DAY_STEP to be consistent with other ES storage related variables. Fix ESEventQueryDAO missing metric_table boolQuery criteria. Add default entity name(_blank) if absent to avoid NPE in the decoding. This caused Can't split xxx id into 2 parts. Support dynamic config the sampling strategy in network profiling. Zipkin module support BanyanDB storage. Zipkin traces query API, sort the result set by start time by default. Enhance the cache mechanism in the metric persistent process.  This cache only worked when the metric is accessible(readable) from the database. Once the insert execution is delayed due to the scale, the cache loses efficacy. It only works for the last time update per minute, considering our 25s period. Fix ID conflicts for all JDBC storage implementations. Due to the insert delay, the JDBC storage implementation would still generate another new insert statement.   [Breaking Change] Remove core/default/enableDatabaseSession config. [Breaking Change] Add @BanyanDB.TimestampColumn to identify which column in Record is providing the timestamp(milliseconds) for BanyanDB, since BanyanDB stream requires a timestamp in milliseconds. For SQL-Database: add new column timestamp for tables profile_task_log/top_n_database_statement, requires altering this column or removing these tables before OAP starts, if bump up from previous releases. Fix Elasticsearch storage: In No-Sharding Mode, add specific analyzer to the template before index creation to avoid update index error. Internal API: remove undocumented ElasticSearch API usage and use documented one. Fix BanyanDB.ShardingKey annotation missed in the generated OAL metrics classes. Fix Elasticsearch storage: Query sortMetrics missing transform real index column name. Rename BanyanDB.ShardingKey to BanyanDB.SeriesID. Self-Observability: Add counters for metrics reading from DB or cached. Dashboard:Metrics Persistent Cache Count. Self-Observability: Fix GC Time calculation. Fix Elasticsearch storage: In No-Sharding Mode, column\u0026rsquo;s property indexOnly not applied and cannot be updated. Update the trace_id field as storage only(cannot be queried) in top_n_database_statement, top_n_cache_read_command, top_n_cache_read_command index.  UI  Fix: tab active incorrectly, when click tab space Add impala icon for impala JDBC Java agent plugin. (Webapp)Bump up snakeyaml to 1.31 for fixing CVE-2022-25857 [Breaking Change]: migrate from Spring Web to Armeria, now you should use the environment variable name SW_OAP_ADDRESS to change the OAP backend service addresses, like SW_OAP_ADDRESS=localhost:12800,localhost:12801, and use environment variable SW_SERVER_PORT to change the port. Other Spring-related configurations don\u0026rsquo;t take effect anymore. Polish the endpoint list graph. Fix styles for an adaptive height. Fix setting up a new time range after clicking the refresh button. Enhance the process topology graph to support dragging nodes. UI-template: Fix metrics calculation in general-service/mesh-service/faas-function top-list dashboard. Update MySQL dashboard to visualize collected slow SQLs. Add virtual cache dashboard. Remove responseCode fields of all OAL sources, as well as examples to avoid user\u0026rsquo;s confusion. Remove All from the endpoints selector. Enhance menu configurations to make it easier to change. Update PostgreSQL dashboard to visualize collected slow SQLs. Add Golang runtime metrics and cpu/memory used rate panels in General-Instance dashboard. Add gateway apisix menu. Query logs with the specific service ID. Bump d3-color from 3.0.1 to 3.1.0. Add Golang runtime metrics and cpu/memory used rate panels in FaaS-Instance dashboard. Revert logs on trace widget. Add a sub-menu for virtual mq. Add readRecords to metric types. Verify dashboard names for new dashboards. Associate metrics with the trace widget on dashboards. Fix configuration panel styles. Remove a un-use icon. Support labeled value on the service/instance/endpoint list widgets. Add menu for virtual MQ. Set selector props and update configuration panel styles. Add Python runtime metrics and cpu/memory utilization panels to General-Instance and Fass-Instance dashboards. Enhance the legend of metrics graph widget with the summary table. Add apache eventMesh logo file. Fix conditions for trace profiling. Fix tag keys list and duration condition. Fix typo. Fix condition logic for trace tree data. Enhance tags component to search tags with the input value. Fix topology loading style. Fix update metric processor for the readRecords and remove readSampledRecords from metrics selector. Add trace association for FAAS dashboards. Visualize attached events on the trace widget. Add HTTP/1.x metrics and HTTP req/resp body collecting tabs on the network profiling widget. Implement creating tasks ui for network profiling widget. Fix entity types for ProcessRelation. Add trace association for general service dashboards.  Documentation  Add metadata-uid setup doc about Kubernetes coordinator in the cluster management. Add a doc for adding menus to booster UI. Move general good read blogs from Agent Introduction to Academy. Add re-post for blog Scaling with Apache SkyWalking in the academy list. Add re-post for blog Diagnose Service Mesh Network Performance with eBPF in the academy list. Add Security Notice doc. Add new docs for Report Span Attached Events data collecting protocol. Add new docs for Record query protocol Update Server Agents and Compatibility for PHP agent. Add docs for profiling. Update the network profiling documentation.  All issues and pull requests are here\n","title":"9.3.0","url":"/docs/main/next/en/changes/changes-9.3.0/"},{"content":"9.3.0 Project  Bump up the embedded swctl version in OAP Docker image.  OAP Server  Add component ID(133) for impala JDBC Java agent plugin and component ID(134) for impala server. Use prepareStatement in H2SQLExecutor#getByIDs.(No function change). Bump up snakeyaml to 1.32 for fixing CVE. Fix DurationUtils.convertToTimeBucket missed verify date format. Enhance LAL to support converting LogData to DatabaseSlowStatement. [Breaking Change] Change the LAL script format(Add layer property). Adapt ElasticSearch 8.1+, migrate from removed APIs to recommended APIs. Support monitoring MySQL slow SQLs. Support analyzing cache related spans to provide metrics and slow commands for cache services from client side Optimize virtual database, fix dynamic config watcher NPE when default value is null Remove physical index existing check and keep template existing check only to avoid meaningless retry wait in no-init mode. Make sure instance list ordered in TTL processor to avoid TTL timer never runs. Support monitoring PostgreSQL slow SQLs. [Breaking Change] Support sharding MySQL database instances and tables by Shardingsphere-Proxy. SQL-Database requires removing tables log_tag/segment_tag/zipkin_query before OAP starts, if bump up from previous releases. Fix meter functions avgHistogram, avgHistogramPercentile, avgLabeled, sumHistogram having data conflict when downsampling. Do sorting readLabeledMetricsValues result forcedly in case the storage(database) doesn\u0026rsquo;t return data consistent with the parameter list. Fix the wrong watch semantics in Kubernetes watchers, which causes heavy traffic to API server in some Kubernetes clusters, we should use Get State and Start at Most Recent semantic instead of Start at Exact because we don\u0026rsquo;t need the changing history events, see https://kubernetes.io/docs/reference/using-api/api-concepts/#semantics-for-watch. Unify query services and DAOs codes time range condition to Duration. [Breaking Change]: Remove prometheus-fetcher plugin, please use OpenTelemetry to scrape Prometheus metrics and set up SkyWalking OpenTelemetry receiver instead. BugFix: histogram metrics sent to MAL should be treated as OpenTelemetry style, not Prometheus style: (-infinity, explicit_bounds[i]] for i == 0 (explicit_bounds[i-1], explicit_bounds[i]] for 0 \u0026lt; i \u0026lt; size(explicit_bounds) (explicit_bounds[i-1], +infinity) for i == size(explicit_bounds)  Support Golang runtime metrics analysis. Add APISIX metrics monitoring Support skywalking-client-js report empty service version and page path , set default version as latest and default page path as /(root). Fix the error fetching data (/browser_app_page_pv0) : Can't split endpoint id into 2 parts. [Breaking Change] Limit the max length of trace/log/alarm tag\u0026rsquo;s key=value, set the max length of column tags in tableslog_tag/segment_tag/alarm_record_tag and column query in zipkin_query and column tag_value in tag_autocomplete to 256. SQL-Database requires altering these columns' length or removing these tables before OAP starts, if bump up from previous releases. Optimize the creation conditions of profiling task. Lazy load the Kubernetes metadata and switch from event-driven to polling. Previously we set up watchers to watch the Kubernetes metadata changes, this is perfect when there are deployments changes and SkyWalking can react to the changes in real time. However when the cluster has many events (such as in large cluster or some special Kubernetes engine like OpenShift), the requests sent from SkyWalking becomes unpredictable, i.e. SkyWalking might send massive requests to Kubernetes API server, causing heavy load to the API server. This PR switches from the watcher mechanism to polling mechanism, SkyWalking polls the metadata in a specified interval, so that the requests sent to API server is predictable (~10 requests every interval, 3 minutes), and the requests count is constant regardless of the cluster\u0026rsquo;s changes. However with this change SkyWalking can\u0026rsquo;t react to the cluster changes in time, but the delay is acceptable in our case. Optimize the query time of tasks in ProfileTaskCache. Fix metrics was put into wrong slot of the window in the alerting kernel. Support sumPerMinLabeled in MAL. Bump up jackson databind, snakeyaml, grpc dependencies. Support export Trace and Log through Kafka. Add new config initialization mechanism of module provider. This is a ModuleManager lib kernel level change. [Breaking Change] Support new records query protocol, rename the column named service_id to entity_id for support difference entity. Please re-create top_n_database_statement index/table. Remove improper self-obs metrics in JvmMetricsHandler(for Kafka channel). gRPC stream canceling code is not logged as an error when the client cancels the stream. The client cancels the stream when the pod is terminated. [Breaking Change] Change the way of loading MAL rules(support pattern). Move k8s relative MAL files into /otel-rules/k8s. [Breaking Change] Refactor service mesh protobuf definitions and split TCP-related metrics to individual definition. Add TCP{Service,ServiceInstance,ServiceRelation,ServiceInstanceRelation} sources and split TCP-related entities out from original Service,ServiceInstance,ServiceRelation,ServiceInstanceRelation. [Breaking Change] TCP-related source names are changed, fields of TCP-related sources are changed, please refer to the latest oal/tcp.oal file. Do not log error logs when failed to create ElasticSearch index because the index is created already. Add virtual MQ analysis for native traces. Support Python runtime metrics analysis. Support sampledTrace in LAL. Support multiple rules with different names under the same layer of LAL script. (Optimization) Reduce the buffer size(queue) of MAL(only) metric streams. Set L1 queue size as 1/20, L2 queue size as 1/2. Support monitoring MySQL/PostgreSQL in the cluster mode. [Breaking Change] Migrate to BanyanDB v0.2.0.  Adopt new OR logical operator for,  MeasureIDs query BanyanDBProfileThreadSnapshotQueryDAO query Multiple Event conditions query Metrics query   Simplify Group check and creation Partially apply UITemplate changes Support index_only Return CompletableFuture\u0026lt;Void\u0026gt; directly from BanyanDB client Optimize data binary parse methods in *LogQueryDAO Support different indexType Support configuration for TTL and (block|segment) intervals   Elasticsearch storage: Provide system environment variable(SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS) and support specify the settings (number_of_shards/number_of_replicas) for each index individually. Elasticsearch storage: Support update index settings (number_of_shards/number_of_replicas) for the index template after rebooting. Optimize MQ Topology analysis. Use entry span\u0026rsquo;s peer from the consumer side as source service when no producer instrumentation(no cross-process reference). Refactor JDBC storage implementations to reuse logics. Fix ClassCastException in LoggingConfigWatcher. Support span attached event concept in Zipkin and SkyWalking trace query. Support span attached events on Zipkin lens UI. Force UTF-8 encoding in JsonLogHandler of kafka-fetcher-plugin. Fix max length to 512 of entity, instance and endpoint IDs in trace, log, profiling, topN tables(JDBC storages). The value was 200 by default. Add component IDs(135, 136, 137) for EventMesh server and client-side plugins. Bump up Kafka client to 2.8.1 to fix CVE-2021-38153. Remove lengthEnvVariable for Column as it never works as expected. Add LongText to support longer logs persistent as a text type in ElasticSearch, instead of a keyword, to avoid length limitation. Fix wrong system variable name SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI. It was opaenapi. Fix not-time-series model blocking OAP boots in no-init mode. Fix ShardingTopologyQueryDAO.loadServiceRelationsDetectedAtServerSide invoke backend miss parameter serviceIds. Changed system variable SW_SUPERDATASET_STORAGE_DAY_STEP to SW_STORAGE_ES_SUPER_DATASET_DAY_STEP to be consistent with other ES storage related variables. Fix ESEventQueryDAO missing metric_table boolQuery criteria. Add default entity name(_blank) if absent to avoid NPE in the decoding. This caused Can't split xxx id into 2 parts. Support dynamic config the sampling strategy in network profiling. Zipkin module support BanyanDB storage. Zipkin traces query API, sort the result set by start time by default. Enhance the cache mechanism in the metric persistent process.  This cache only worked when the metric is accessible(readable) from the database. Once the insert execution is delayed due to the scale, the cache loses efficacy. It only works for the last time update per minute, considering our 25s period. Fix ID conflicts for all JDBC storage implementations. Due to the insert delay, the JDBC storage implementation would still generate another new insert statement.   [Breaking Change] Remove core/default/enableDatabaseSession config. [Breaking Change] Add @BanyanDB.TimestampColumn to identify which column in Record is providing the timestamp(milliseconds) for BanyanDB, since BanyanDB stream requires a timestamp in milliseconds. For SQL-Database: add new column timestamp for tables profile_task_log/top_n_database_statement, requires altering this column or removing these tables before OAP starts, if bump up from previous releases. Fix Elasticsearch storage: In No-Sharding Mode, add specific analyzer to the template before index creation to avoid update index error. Internal API: remove undocumented ElasticSearch API usage and use documented one. Fix BanyanDB.ShardingKey annotation missed in the generated OAL metrics classes. Fix Elasticsearch storage: Query sortMetrics missing transform real index column name. Rename BanyanDB.ShardingKey to BanyanDB.SeriesID. Self-Observability: Add counters for metrics reading from DB or cached. Dashboard:Metrics Persistent Cache Count. Self-Observability: Fix GC Time calculation. Fix Elasticsearch storage: In No-Sharding Mode, column\u0026rsquo;s property indexOnly not applied and cannot be updated. Update the trace_id field as storage only(cannot be queried) in top_n_database_statement, top_n_cache_read_command, top_n_cache_read_command index.  UI  Fix: tab active incorrectly, when click tab space Add impala icon for impala JDBC Java agent plugin. (Webapp)Bump up snakeyaml to 1.31 for fixing CVE-2022-25857 [Breaking Change]: migrate from Spring Web to Armeria, now you should use the environment variable name SW_OAP_ADDRESS to change the OAP backend service addresses, like SW_OAP_ADDRESS=localhost:12800,localhost:12801, and use environment variable SW_SERVER_PORT to change the port. Other Spring-related configurations don\u0026rsquo;t take effect anymore. Polish the endpoint list graph. Fix styles for an adaptive height. Fix setting up a new time range after clicking the refresh button. Enhance the process topology graph to support dragging nodes. UI-template: Fix metrics calculation in general-service/mesh-service/faas-function top-list dashboard. Update MySQL dashboard to visualize collected slow SQLs. Add virtual cache dashboard. Remove responseCode fields of all OAL sources, as well as examples to avoid user\u0026rsquo;s confusion. Remove All from the endpoints selector. Enhance menu configurations to make it easier to change. Update PostgreSQL dashboard to visualize collected slow SQLs. Add Golang runtime metrics and cpu/memory used rate panels in General-Instance dashboard. Add gateway apisix menu. Query logs with the specific service ID. Bump d3-color from 3.0.1 to 3.1.0. Add Golang runtime metrics and cpu/memory used rate panels in FaaS-Instance dashboard. Revert logs on trace widget. Add a sub-menu for virtual mq. Add readRecords to metric types. Verify dashboard names for new dashboards. Associate metrics with the trace widget on dashboards. Fix configuration panel styles. Remove a un-use icon. Support labeled value on the service/instance/endpoint list widgets. Add menu for virtual MQ. Set selector props and update configuration panel styles. Add Python runtime metrics and cpu/memory utilization panels to General-Instance and Fass-Instance dashboards. Enhance the legend of metrics graph widget with the summary table. Add apache eventMesh logo file. Fix conditions for trace profiling. Fix tag keys list and duration condition. Fix typo. Fix condition logic for trace tree data. Enhance tags component to search tags with the input value. Fix topology loading style. Fix update metric processor for the readRecords and remove readSampledRecords from metrics selector. Add trace association for FAAS dashboards. Visualize attached events on the trace widget. Add HTTP/1.x metrics and HTTP req/resp body collecting tabs on the network profiling widget. Implement creating tasks ui for network profiling widget. Fix entity types for ProcessRelation. Add trace association for general service dashboards.  Documentation  Add metadata-uid setup doc about Kubernetes coordinator in the cluster management. Add a doc for adding menus to booster UI. Move general good read blogs from Agent Introduction to Academy. Add re-post for blog Scaling with Apache SkyWalking in the academy list. Add re-post for blog Diagnose Service Mesh Network Performance with eBPF in the academy list. Add Security Notice doc. Add new docs for Report Span Attached Events data collecting protocol. Add new docs for Record query protocol Update Server Agents and Compatibility for PHP agent. Add docs for profiling. Update the network profiling documentation.  All issues and pull requests are here\n","title":"9.3.0","url":"/docs/main/v9.3.0/en/changes/changes/"},{"content":"9.3.0 Project  Bump up the embedded swctl version in OAP Docker image.  OAP Server  Add component ID(133) for impala JDBC Java agent plugin and component ID(134) for impala server. Use prepareStatement in H2SQLExecutor#getByIDs.(No function change). Bump up snakeyaml to 1.32 for fixing CVE. Fix DurationUtils.convertToTimeBucket missed verify date format. Enhance LAL to support converting LogData to DatabaseSlowStatement. [Breaking Change] Change the LAL script format(Add layer property). Adapt ElasticSearch 8.1+, migrate from removed APIs to recommended APIs. Support monitoring MySQL slow SQLs. Support analyzing cache related spans to provide metrics and slow commands for cache services from client side Optimize virtual database, fix dynamic config watcher NPE when default value is null Remove physical index existing check and keep template existing check only to avoid meaningless retry wait in no-init mode. Make sure instance list ordered in TTL processor to avoid TTL timer never runs. Support monitoring PostgreSQL slow SQLs. [Breaking Change] Support sharding MySQL database instances and tables by Shardingsphere-Proxy. SQL-Database requires removing tables log_tag/segment_tag/zipkin_query before OAP starts, if bump up from previous releases. Fix meter functions avgHistogram, avgHistogramPercentile, avgLabeled, sumHistogram having data conflict when downsampling. Do sorting readLabeledMetricsValues result forcedly in case the storage(database) doesn\u0026rsquo;t return data consistent with the parameter list. Fix the wrong watch semantics in Kubernetes watchers, which causes heavy traffic to API server in some Kubernetes clusters, we should use Get State and Start at Most Recent semantic instead of Start at Exact because we don\u0026rsquo;t need the changing history events, see https://kubernetes.io/docs/reference/using-api/api-concepts/#semantics-for-watch. Unify query services and DAOs codes time range condition to Duration. [Breaking Change]: Remove prometheus-fetcher plugin, please use OpenTelemetry to scrape Prometheus metrics and set up SkyWalking OpenTelemetry receiver instead. BugFix: histogram metrics sent to MAL should be treated as OpenTelemetry style, not Prometheus style: (-infinity, explicit_bounds[i]] for i == 0 (explicit_bounds[i-1], explicit_bounds[i]] for 0 \u0026lt; i \u0026lt; size(explicit_bounds) (explicit_bounds[i-1], +infinity) for i == size(explicit_bounds)  Support Golang runtime metrics analysis. Add APISIX metrics monitoring Support skywalking-client-js report empty service version and page path , set default version as latest and default page path as /(root). Fix the error fetching data (/browser_app_page_pv0) : Can't split endpoint id into 2 parts. [Breaking Change] Limit the max length of trace/log/alarm tag\u0026rsquo;s key=value, set the max length of column tags in tableslog_tag/segment_tag/alarm_record_tag and column query in zipkin_query and column tag_value in tag_autocomplete to 256. SQL-Database requires altering these columns' length or removing these tables before OAP starts, if bump up from previous releases. Optimize the creation conditions of profiling task. Lazy load the Kubernetes metadata and switch from event-driven to polling. Previously we set up watchers to watch the Kubernetes metadata changes, this is perfect when there are deployments changes and SkyWalking can react to the changes in real time. However when the cluster has many events (such as in large cluster or some special Kubernetes engine like OpenShift), the requests sent from SkyWalking becomes unpredictable, i.e. SkyWalking might send massive requests to Kubernetes API server, causing heavy load to the API server. This PR switches from the watcher mechanism to polling mechanism, SkyWalking polls the metadata in a specified interval, so that the requests sent to API server is predictable (~10 requests every interval, 3 minutes), and the requests count is constant regardless of the cluster\u0026rsquo;s changes. However with this change SkyWalking can\u0026rsquo;t react to the cluster changes in time, but the delay is acceptable in our case. Optimize the query time of tasks in ProfileTaskCache. Fix metrics was put into wrong slot of the window in the alerting kernel. Support sumPerMinLabeled in MAL. Bump up jackson databind, snakeyaml, grpc dependencies. Support export Trace and Log through Kafka. Add new config initialization mechanism of module provider. This is a ModuleManager lib kernel level change. [Breaking Change] Support new records query protocol, rename the column named service_id to entity_id for support difference entity. Please re-create top_n_database_statement index/table. Remove improper self-obs metrics in JvmMetricsHandler(for Kafka channel). gRPC stream canceling code is not logged as an error when the client cancels the stream. The client cancels the stream when the pod is terminated. [Breaking Change] Change the way of loading MAL rules(support pattern). Move k8s relative MAL files into /otel-rules/k8s. [Breaking Change] Refactor service mesh protobuf definitions and split TCP-related metrics to individual definition. Add TCP{Service,ServiceInstance,ServiceRelation,ServiceInstanceRelation} sources and split TCP-related entities out from original Service,ServiceInstance,ServiceRelation,ServiceInstanceRelation. [Breaking Change] TCP-related source names are changed, fields of TCP-related sources are changed, please refer to the latest oal/tcp.oal file. Do not log error logs when failed to create ElasticSearch index because the index is created already. Add virtual MQ analysis for native traces. Support Python runtime metrics analysis. Support sampledTrace in LAL. Support multiple rules with different names under the same layer of LAL script. (Optimization) Reduce the buffer size(queue) of MAL(only) metric streams. Set L1 queue size as 1/20, L2 queue size as 1/2. Support monitoring MySQL/PostgreSQL in the cluster mode. [Breaking Change] Migrate to BanyanDB v0.2.0.  Adopt new OR logical operator for,  MeasureIDs query BanyanDBProfileThreadSnapshotQueryDAO query Multiple Event conditions query Metrics query   Simplify Group check and creation Partially apply UITemplate changes Support index_only Return CompletableFuture\u0026lt;Void\u0026gt; directly from BanyanDB client Optimize data binary parse methods in *LogQueryDAO Support different indexType Support configuration for TTL and (block|segment) intervals   Elasticsearch storage: Provide system environment variable(SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS) and support specify the settings (number_of_shards/number_of_replicas) for each index individually. Elasticsearch storage: Support update index settings (number_of_shards/number_of_replicas) for the index template after rebooting. Optimize MQ Topology analysis. Use entry span\u0026rsquo;s peer from the consumer side as source service when no producer instrumentation(no cross-process reference). Refactor JDBC storage implementations to reuse logics. Fix ClassCastException in LoggingConfigWatcher. Support span attached event concept in Zipkin and SkyWalking trace query. Support span attached events on Zipkin lens UI. Force UTF-8 encoding in JsonLogHandler of kafka-fetcher-plugin. Fix max length to 512 of entity, instance and endpoint IDs in trace, log, profiling, topN tables(JDBC storages). The value was 200 by default. Add component IDs(135, 136, 137) for EventMesh server and client-side plugins. Bump up Kafka client to 2.8.1 to fix CVE-2021-38153. Remove lengthEnvVariable for Column as it never works as expected. Add LongText to support longer logs persistent as a text type in ElasticSearch, instead of a keyword, to avoid length limitation. Fix wrong system variable name SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI. It was opaenapi. Fix not-time-series model blocking OAP boots in no-init mode. Fix ShardingTopologyQueryDAO.loadServiceRelationsDetectedAtServerSide invoke backend miss parameter serviceIds. Changed system variable SW_SUPERDATASET_STORAGE_DAY_STEP to SW_STORAGE_ES_SUPER_DATASET_DAY_STEP to be consistent with other ES storage related variables. Fix ESEventQueryDAO missing metric_table boolQuery criteria. Add default entity name(_blank) if absent to avoid NPE in the decoding. This caused Can't split xxx id into 2 parts. Support dynamic config the sampling strategy in network profiling. Zipkin module support BanyanDB storage. Zipkin traces query API, sort the result set by start time by default. Enhance the cache mechanism in the metric persistent process.  This cache only worked when the metric is accessible(readable) from the database. Once the insert execution is delayed due to the scale, the cache loses efficacy. It only works for the last time update per minute, considering our 25s period. Fix ID conflicts for all JDBC storage implementations. Due to the insert delay, the JDBC storage implementation would still generate another new insert statement.   [Breaking Change] Remove core/default/enableDatabaseSession config. [Breaking Change] Add @BanyanDB.TimestampColumn to identify which column in Record is providing the timestamp(milliseconds) for BanyanDB, since BanyanDB stream requires a timestamp in milliseconds. For SQL-Database: add new column timestamp for tables profile_task_log/top_n_database_statement, requires altering this column or removing these tables before OAP starts, if bump up from previous releases. Fix Elasticsearch storage: In No-Sharding Mode, add specific analyzer to the template before index creation to avoid update index error. Internal API: remove undocumented ElasticSearch API usage and use documented one. Fix BanyanDB.ShardingKey annotation missed in the generated OAL metrics classes. Fix Elasticsearch storage: Query sortMetrics missing transform real index column name. Rename BanyanDB.ShardingKey to BanyanDB.SeriesID. Self-Observability: Add counters for metrics reading from DB or cached. Dashboard:Metrics Persistent Cache Count. Self-Observability: Fix GC Time calculation. Fix Elasticsearch storage: In No-Sharding Mode, column\u0026rsquo;s property indexOnly not applied and cannot be updated. Update the trace_id field as storage only(cannot be queried) in top_n_database_statement, top_n_cache_read_command, top_n_cache_read_command index.  UI  Fix: tab active incorrectly, when click tab space Add impala icon for impala JDBC Java agent plugin. (Webapp)Bump up snakeyaml to 1.31 for fixing CVE-2022-25857 [Breaking Change]: migrate from Spring Web to Armeria, now you should use the environment variable name SW_OAP_ADDRESS to change the OAP backend service addresses, like SW_OAP_ADDRESS=localhost:12800,localhost:12801, and use environment variable SW_SERVER_PORT to change the port. Other Spring-related configurations don\u0026rsquo;t take effect anymore. Polish the endpoint list graph. Fix styles for an adaptive height. Fix setting up a new time range after clicking the refresh button. Enhance the process topology graph to support dragging nodes. UI-template: Fix metrics calculation in general-service/mesh-service/faas-function top-list dashboard. Update MySQL dashboard to visualize collected slow SQLs. Add virtual cache dashboard. Remove responseCode fields of all OAL sources, as well as examples to avoid user\u0026rsquo;s confusion. Remove All from the endpoints selector. Enhance menu configurations to make it easier to change. Update PostgreSQL dashboard to visualize collected slow SQLs. Add Golang runtime metrics and cpu/memory used rate panels in General-Instance dashboard. Add gateway apisix menu. Query logs with the specific service ID. Bump d3-color from 3.0.1 to 3.1.0. Add Golang runtime metrics and cpu/memory used rate panels in FaaS-Instance dashboard. Revert logs on trace widget. Add a sub-menu for virtual mq. Add readRecords to metric types. Verify dashboard names for new dashboards. Associate metrics with the trace widget on dashboards. Fix configuration panel styles. Remove a un-use icon. Support labeled value on the service/instance/endpoint list widgets. Add menu for virtual MQ. Set selector props and update configuration panel styles. Add Python runtime metrics and cpu/memory utilization panels to General-Instance and Fass-Instance dashboards. Enhance the legend of metrics graph widget with the summary table. Add apache eventMesh logo file. Fix conditions for trace profiling. Fix tag keys list and duration condition. Fix typo. Fix condition logic for trace tree data. Enhance tags component to search tags with the input value. Fix topology loading style. Fix update metric processor for the readRecords and remove readSampledRecords from metrics selector. Add trace association for FAAS dashboards. Visualize attached events on the trace widget. Add HTTP/1.x metrics and HTTP req/resp body collecting tabs on the network profiling widget. Implement creating tasks ui for network profiling widget. Fix entity types for ProcessRelation. Add trace association for general service dashboards.  Documentation  Add metadata-uid setup doc about Kubernetes coordinator in the cluster management. Add a doc for adding menus to booster UI. Move general good read blogs from Agent Introduction to Academy. Add re-post for blog Scaling with Apache SkyWalking in the academy list. Add re-post for blog Diagnose Service Mesh Network Performance with eBPF in the academy list. Add Security Notice doc. Add new docs for Report Span Attached Events data collecting protocol. Add new docs for Record query protocol Update Server Agents and Compatibility for PHP agent. Add docs for profiling. Update the network profiling documentation.  All issues and pull requests are here\n","title":"9.3.0","url":"/docs/main/v9.4.0/en/changes/changes-9.3.0/"},{"content":"9.3.0 Project  Bump up the embedded swctl version in OAP Docker image.  OAP Server  Add component ID(133) for impala JDBC Java agent plugin and component ID(134) for impala server. Use prepareStatement in H2SQLExecutor#getByIDs.(No function change). Bump up snakeyaml to 1.32 for fixing CVE. Fix DurationUtils.convertToTimeBucket missed verify date format. Enhance LAL to support converting LogData to DatabaseSlowStatement. [Breaking Change] Change the LAL script format(Add layer property). Adapt ElasticSearch 8.1+, migrate from removed APIs to recommended APIs. Support monitoring MySQL slow SQLs. Support analyzing cache related spans to provide metrics and slow commands for cache services from client side Optimize virtual database, fix dynamic config watcher NPE when default value is null Remove physical index existing check and keep template existing check only to avoid meaningless retry wait in no-init mode. Make sure instance list ordered in TTL processor to avoid TTL timer never runs. Support monitoring PostgreSQL slow SQLs. [Breaking Change] Support sharding MySQL database instances and tables by Shardingsphere-Proxy. SQL-Database requires removing tables log_tag/segment_tag/zipkin_query before OAP starts, if bump up from previous releases. Fix meter functions avgHistogram, avgHistogramPercentile, avgLabeled, sumHistogram having data conflict when downsampling. Do sorting readLabeledMetricsValues result forcedly in case the storage(database) doesn\u0026rsquo;t return data consistent with the parameter list. Fix the wrong watch semantics in Kubernetes watchers, which causes heavy traffic to API server in some Kubernetes clusters, we should use Get State and Start at Most Recent semantic instead of Start at Exact because we don\u0026rsquo;t need the changing history events, see https://kubernetes.io/docs/reference/using-api/api-concepts/#semantics-for-watch. Unify query services and DAOs codes time range condition to Duration. [Breaking Change]: Remove prometheus-fetcher plugin, please use OpenTelemetry to scrape Prometheus metrics and set up SkyWalking OpenTelemetry receiver instead. BugFix: histogram metrics sent to MAL should be treated as OpenTelemetry style, not Prometheus style: (-infinity, explicit_bounds[i]] for i == 0 (explicit_bounds[i-1], explicit_bounds[i]] for 0 \u0026lt; i \u0026lt; size(explicit_bounds) (explicit_bounds[i-1], +infinity) for i == size(explicit_bounds)  Support Golang runtime metrics analysis. Add APISIX metrics monitoring Support skywalking-client-js report empty service version and page path , set default version as latest and default page path as /(root). Fix the error fetching data (/browser_app_page_pv0) : Can't split endpoint id into 2 parts. [Breaking Change] Limit the max length of trace/log/alarm tag\u0026rsquo;s key=value, set the max length of column tags in tableslog_tag/segment_tag/alarm_record_tag and column query in zipkin_query and column tag_value in tag_autocomplete to 256. SQL-Database requires altering these columns' length or removing these tables before OAP starts, if bump up from previous releases. Optimize the creation conditions of profiling task. Lazy load the Kubernetes metadata and switch from event-driven to polling. Previously we set up watchers to watch the Kubernetes metadata changes, this is perfect when there are deployments changes and SkyWalking can react to the changes in real time. However when the cluster has many events (such as in large cluster or some special Kubernetes engine like OpenShift), the requests sent from SkyWalking becomes unpredictable, i.e. SkyWalking might send massive requests to Kubernetes API server, causing heavy load to the API server. This PR switches from the watcher mechanism to polling mechanism, SkyWalking polls the metadata in a specified interval, so that the requests sent to API server is predictable (~10 requests every interval, 3 minutes), and the requests count is constant regardless of the cluster\u0026rsquo;s changes. However with this change SkyWalking can\u0026rsquo;t react to the cluster changes in time, but the delay is acceptable in our case. Optimize the query time of tasks in ProfileTaskCache. Fix metrics was put into wrong slot of the window in the alerting kernel. Support sumPerMinLabeled in MAL. Bump up jackson databind, snakeyaml, grpc dependencies. Support export Trace and Log through Kafka. Add new config initialization mechanism of module provider. This is a ModuleManager lib kernel level change. [Breaking Change] Support new records query protocol, rename the column named service_id to entity_id for support difference entity. Please re-create top_n_database_statement index/table. Remove improper self-obs metrics in JvmMetricsHandler(for Kafka channel). gRPC stream canceling code is not logged as an error when the client cancels the stream. The client cancels the stream when the pod is terminated. [Breaking Change] Change the way of loading MAL rules(support pattern). Move k8s relative MAL files into /otel-rules/k8s. [Breaking Change] Refactor service mesh protobuf definitions and split TCP-related metrics to individual definition. Add TCP{Service,ServiceInstance,ServiceRelation,ServiceInstanceRelation} sources and split TCP-related entities out from original Service,ServiceInstance,ServiceRelation,ServiceInstanceRelation. [Breaking Change] TCP-related source names are changed, fields of TCP-related sources are changed, please refer to the latest oal/tcp.oal file. Do not log error logs when failed to create ElasticSearch index because the index is created already. Add virtual MQ analysis for native traces. Support Python runtime metrics analysis. Support sampledTrace in LAL. Support multiple rules with different names under the same layer of LAL script. (Optimization) Reduce the buffer size(queue) of MAL(only) metric streams. Set L1 queue size as 1/20, L2 queue size as 1/2. Support monitoring MySQL/PostgreSQL in the cluster mode. [Breaking Change] Migrate to BanyanDB v0.2.0.  Adopt new OR logical operator for,  MeasureIDs query BanyanDBProfileThreadSnapshotQueryDAO query Multiple Event conditions query Metrics query   Simplify Group check and creation Partially apply UITemplate changes Support index_only Return CompletableFuture\u0026lt;Void\u0026gt; directly from BanyanDB client Optimize data binary parse methods in *LogQueryDAO Support different indexType Support configuration for TTL and (block|segment) intervals   Elasticsearch storage: Provide system environment variable(SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS) and support specify the settings (number_of_shards/number_of_replicas) for each index individually. Elasticsearch storage: Support update index settings (number_of_shards/number_of_replicas) for the index template after rebooting. Optimize MQ Topology analysis. Use entry span\u0026rsquo;s peer from the consumer side as source service when no producer instrumentation(no cross-process reference). Refactor JDBC storage implementations to reuse logics. Fix ClassCastException in LoggingConfigWatcher. Support span attached event concept in Zipkin and SkyWalking trace query. Support span attached events on Zipkin lens UI. Force UTF-8 encoding in JsonLogHandler of kafka-fetcher-plugin. Fix max length to 512 of entity, instance and endpoint IDs in trace, log, profiling, topN tables(JDBC storages). The value was 200 by default. Add component IDs(135, 136, 137) for EventMesh server and client-side plugins. Bump up Kafka client to 2.8.1 to fix CVE-2021-38153. Remove lengthEnvVariable for Column as it never works as expected. Add LongText to support longer logs persistent as a text type in ElasticSearch, instead of a keyword, to avoid length limitation. Fix wrong system variable name SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI. It was opaenapi. Fix not-time-series model blocking OAP boots in no-init mode. Fix ShardingTopologyQueryDAO.loadServiceRelationsDetectedAtServerSide invoke backend miss parameter serviceIds. Changed system variable SW_SUPERDATASET_STORAGE_DAY_STEP to SW_STORAGE_ES_SUPER_DATASET_DAY_STEP to be consistent with other ES storage related variables. Fix ESEventQueryDAO missing metric_table boolQuery criteria. Add default entity name(_blank) if absent to avoid NPE in the decoding. This caused Can't split xxx id into 2 parts. Support dynamic config the sampling strategy in network profiling. Zipkin module support BanyanDB storage. Zipkin traces query API, sort the result set by start time by default. Enhance the cache mechanism in the metric persistent process.  This cache only worked when the metric is accessible(readable) from the database. Once the insert execution is delayed due to the scale, the cache loses efficacy. It only works for the last time update per minute, considering our 25s period. Fix ID conflicts for all JDBC storage implementations. Due to the insert delay, the JDBC storage implementation would still generate another new insert statement.   [Breaking Change] Remove core/default/enableDatabaseSession config. [Breaking Change] Add @BanyanDB.TimestampColumn to identify which column in Record is providing the timestamp(milliseconds) for BanyanDB, since BanyanDB stream requires a timestamp in milliseconds. For SQL-Database: add new column timestamp for tables profile_task_log/top_n_database_statement, requires altering this column or removing these tables before OAP starts, if bump up from previous releases. Fix Elasticsearch storage: In No-Sharding Mode, add specific analyzer to the template before index creation to avoid update index error. Internal API: remove undocumented ElasticSearch API usage and use documented one. Fix BanyanDB.ShardingKey annotation missed in the generated OAL metrics classes. Fix Elasticsearch storage: Query sortMetrics missing transform real index column name. Rename BanyanDB.ShardingKey to BanyanDB.SeriesID. Self-Observability: Add counters for metrics reading from DB or cached. Dashboard:Metrics Persistent Cache Count. Self-Observability: Fix GC Time calculation. Fix Elasticsearch storage: In No-Sharding Mode, column\u0026rsquo;s property indexOnly not applied and cannot be updated. Update the trace_id field as storage only(cannot be queried) in top_n_database_statement, top_n_cache_read_command, top_n_cache_read_command index.  UI  Fix: tab active incorrectly, when click tab space Add impala icon for impala JDBC Java agent plugin. (Webapp)Bump up snakeyaml to 1.31 for fixing CVE-2022-25857 [Breaking Change]: migrate from Spring Web to Armeria, now you should use the environment variable name SW_OAP_ADDRESS to change the OAP backend service addresses, like SW_OAP_ADDRESS=localhost:12800,localhost:12801, and use environment variable SW_SERVER_PORT to change the port. Other Spring-related configurations don\u0026rsquo;t take effect anymore. Polish the endpoint list graph. Fix styles for an adaptive height. Fix setting up a new time range after clicking the refresh button. Enhance the process topology graph to support dragging nodes. UI-template: Fix metrics calculation in general-service/mesh-service/faas-function top-list dashboard. Update MySQL dashboard to visualize collected slow SQLs. Add virtual cache dashboard. Remove responseCode fields of all OAL sources, as well as examples to avoid user\u0026rsquo;s confusion. Remove All from the endpoints selector. Enhance menu configurations to make it easier to change. Update PostgreSQL dashboard to visualize collected slow SQLs. Add Golang runtime metrics and cpu/memory used rate panels in General-Instance dashboard. Add gateway apisix menu. Query logs with the specific service ID. Bump d3-color from 3.0.1 to 3.1.0. Add Golang runtime metrics and cpu/memory used rate panels in FaaS-Instance dashboard. Revert logs on trace widget. Add a sub-menu for virtual mq. Add readRecords to metric types. Verify dashboard names for new dashboards. Associate metrics with the trace widget on dashboards. Fix configuration panel styles. Remove a un-use icon. Support labeled value on the service/instance/endpoint list widgets. Add menu for virtual MQ. Set selector props and update configuration panel styles. Add Python runtime metrics and cpu/memory utilization panels to General-Instance and Fass-Instance dashboards. Enhance the legend of metrics graph widget with the summary table. Add apache eventMesh logo file. Fix conditions for trace profiling. Fix tag keys list and duration condition. Fix typo. Fix condition logic for trace tree data. Enhance tags component to search tags with the input value. Fix topology loading style. Fix update metric processor for the readRecords and remove readSampledRecords from metrics selector. Add trace association for FAAS dashboards. Visualize attached events on the trace widget. Add HTTP/1.x metrics and HTTP req/resp body collecting tabs on the network profiling widget. Implement creating tasks ui for network profiling widget. Fix entity types for ProcessRelation. Add trace association for general service dashboards.  Documentation  Add metadata-uid setup doc about Kubernetes coordinator in the cluster management. Add a doc for adding menus to booster UI. Move general good read blogs from Agent Introduction to Academy. Add re-post for blog Scaling with Apache SkyWalking in the academy list. Add re-post for blog Diagnose Service Mesh Network Performance with eBPF in the academy list. Add Security Notice doc. Add new docs for Report Span Attached Events data collecting protocol. Add new docs for Record query protocol Update Server Agents and Compatibility for PHP agent. Add docs for profiling. Update the network profiling documentation.  All issues and pull requests are here\n","title":"9.3.0","url":"/docs/main/v9.5.0/en/changes/changes-9.3.0/"},{"content":"9.3.0 Project  Bump up the embedded swctl version in OAP Docker image.  OAP Server  Add component ID(133) for impala JDBC Java agent plugin and component ID(134) for impala server. Use prepareStatement in H2SQLExecutor#getByIDs.(No function change). Bump up snakeyaml to 1.32 for fixing CVE. Fix DurationUtils.convertToTimeBucket missed verify date format. Enhance LAL to support converting LogData to DatabaseSlowStatement. [Breaking Change] Change the LAL script format(Add layer property). Adapt ElasticSearch 8.1+, migrate from removed APIs to recommended APIs. Support monitoring MySQL slow SQLs. Support analyzing cache related spans to provide metrics and slow commands for cache services from client side Optimize virtual database, fix dynamic config watcher NPE when default value is null Remove physical index existing check and keep template existing check only to avoid meaningless retry wait in no-init mode. Make sure instance list ordered in TTL processor to avoid TTL timer never runs. Support monitoring PostgreSQL slow SQLs. [Breaking Change] Support sharding MySQL database instances and tables by Shardingsphere-Proxy. SQL-Database requires removing tables log_tag/segment_tag/zipkin_query before OAP starts, if bump up from previous releases. Fix meter functions avgHistogram, avgHistogramPercentile, avgLabeled, sumHistogram having data conflict when downsampling. Do sorting readLabeledMetricsValues result forcedly in case the storage(database) doesn\u0026rsquo;t return data consistent with the parameter list. Fix the wrong watch semantics in Kubernetes watchers, which causes heavy traffic to API server in some Kubernetes clusters, we should use Get State and Start at Most Recent semantic instead of Start at Exact because we don\u0026rsquo;t need the changing history events, see https://kubernetes.io/docs/reference/using-api/api-concepts/#semantics-for-watch. Unify query services and DAOs codes time range condition to Duration. [Breaking Change]: Remove prometheus-fetcher plugin, please use OpenTelemetry to scrape Prometheus metrics and set up SkyWalking OpenTelemetry receiver instead. BugFix: histogram metrics sent to MAL should be treated as OpenTelemetry style, not Prometheus style: (-infinity, explicit_bounds[i]] for i == 0 (explicit_bounds[i-1], explicit_bounds[i]] for 0 \u0026lt; i \u0026lt; size(explicit_bounds) (explicit_bounds[i-1], +infinity) for i == size(explicit_bounds)  Support Golang runtime metrics analysis. Add APISIX metrics monitoring Support skywalking-client-js report empty service version and page path , set default version as latest and default page path as /(root). Fix the error fetching data (/browser_app_page_pv0) : Can't split endpoint id into 2 parts. [Breaking Change] Limit the max length of trace/log/alarm tag\u0026rsquo;s key=value, set the max length of column tags in tableslog_tag/segment_tag/alarm_record_tag and column query in zipkin_query and column tag_value in tag_autocomplete to 256. SQL-Database requires altering these columns' length or removing these tables before OAP starts, if bump up from previous releases. Optimize the creation conditions of profiling task. Lazy load the Kubernetes metadata and switch from event-driven to polling. Previously we set up watchers to watch the Kubernetes metadata changes, this is perfect when there are deployments changes and SkyWalking can react to the changes in real time. However when the cluster has many events (such as in large cluster or some special Kubernetes engine like OpenShift), the requests sent from SkyWalking becomes unpredictable, i.e. SkyWalking might send massive requests to Kubernetes API server, causing heavy load to the API server. This PR switches from the watcher mechanism to polling mechanism, SkyWalking polls the metadata in a specified interval, so that the requests sent to API server is predictable (~10 requests every interval, 3 minutes), and the requests count is constant regardless of the cluster\u0026rsquo;s changes. However with this change SkyWalking can\u0026rsquo;t react to the cluster changes in time, but the delay is acceptable in our case. Optimize the query time of tasks in ProfileTaskCache. Fix metrics was put into wrong slot of the window in the alerting kernel. Support sumPerMinLabeled in MAL. Bump up jackson databind, snakeyaml, grpc dependencies. Support export Trace and Log through Kafka. Add new config initialization mechanism of module provider. This is a ModuleManager lib kernel level change. [Breaking Change] Support new records query protocol, rename the column named service_id to entity_id for support difference entity. Please re-create top_n_database_statement index/table. Remove improper self-obs metrics in JvmMetricsHandler(for Kafka channel). gRPC stream canceling code is not logged as an error when the client cancels the stream. The client cancels the stream when the pod is terminated. [Breaking Change] Change the way of loading MAL rules(support pattern). Move k8s relative MAL files into /otel-rules/k8s. [Breaking Change] Refactor service mesh protobuf definitions and split TCP-related metrics to individual definition. Add TCP{Service,ServiceInstance,ServiceRelation,ServiceInstanceRelation} sources and split TCP-related entities out from original Service,ServiceInstance,ServiceRelation,ServiceInstanceRelation. [Breaking Change] TCP-related source names are changed, fields of TCP-related sources are changed, please refer to the latest oal/tcp.oal file. Do not log error logs when failed to create ElasticSearch index because the index is created already. Add virtual MQ analysis for native traces. Support Python runtime metrics analysis. Support sampledTrace in LAL. Support multiple rules with different names under the same layer of LAL script. (Optimization) Reduce the buffer size(queue) of MAL(only) metric streams. Set L1 queue size as 1/20, L2 queue size as 1/2. Support monitoring MySQL/PostgreSQL in the cluster mode. [Breaking Change] Migrate to BanyanDB v0.2.0.  Adopt new OR logical operator for,  MeasureIDs query BanyanDBProfileThreadSnapshotQueryDAO query Multiple Event conditions query Metrics query   Simplify Group check and creation Partially apply UITemplate changes Support index_only Return CompletableFuture\u0026lt;Void\u0026gt; directly from BanyanDB client Optimize data binary parse methods in *LogQueryDAO Support different indexType Support configuration for TTL and (block|segment) intervals   Elasticsearch storage: Provide system environment variable(SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS) and support specify the settings (number_of_shards/number_of_replicas) for each index individually. Elasticsearch storage: Support update index settings (number_of_shards/number_of_replicas) for the index template after rebooting. Optimize MQ Topology analysis. Use entry span\u0026rsquo;s peer from the consumer side as source service when no producer instrumentation(no cross-process reference). Refactor JDBC storage implementations to reuse logics. Fix ClassCastException in LoggingConfigWatcher. Support span attached event concept in Zipkin and SkyWalking trace query. Support span attached events on Zipkin lens UI. Force UTF-8 encoding in JsonLogHandler of kafka-fetcher-plugin. Fix max length to 512 of entity, instance and endpoint IDs in trace, log, profiling, topN tables(JDBC storages). The value was 200 by default. Add component IDs(135, 136, 137) for EventMesh server and client-side plugins. Bump up Kafka client to 2.8.1 to fix CVE-2021-38153. Remove lengthEnvVariable for Column as it never works as expected. Add LongText to support longer logs persistent as a text type in ElasticSearch, instead of a keyword, to avoid length limitation. Fix wrong system variable name SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI. It was opaenapi. Fix not-time-series model blocking OAP boots in no-init mode. Fix ShardingTopologyQueryDAO.loadServiceRelationsDetectedAtServerSide invoke backend miss parameter serviceIds. Changed system variable SW_SUPERDATASET_STORAGE_DAY_STEP to SW_STORAGE_ES_SUPER_DATASET_DAY_STEP to be consistent with other ES storage related variables. Fix ESEventQueryDAO missing metric_table boolQuery criteria. Add default entity name(_blank) if absent to avoid NPE in the decoding. This caused Can't split xxx id into 2 parts. Support dynamic config the sampling strategy in network profiling. Zipkin module support BanyanDB storage. Zipkin traces query API, sort the result set by start time by default. Enhance the cache mechanism in the metric persistent process.  This cache only worked when the metric is accessible(readable) from the database. Once the insert execution is delayed due to the scale, the cache loses efficacy. It only works for the last time update per minute, considering our 25s period. Fix ID conflicts for all JDBC storage implementations. Due to the insert delay, the JDBC storage implementation would still generate another new insert statement.   [Breaking Change] Remove core/default/enableDatabaseSession config. [Breaking Change] Add @BanyanDB.TimestampColumn to identify which column in Record is providing the timestamp(milliseconds) for BanyanDB, since BanyanDB stream requires a timestamp in milliseconds. For SQL-Database: add new column timestamp for tables profile_task_log/top_n_database_statement, requires altering this column or removing these tables before OAP starts, if bump up from previous releases. Fix Elasticsearch storage: In No-Sharding Mode, add specific analyzer to the template before index creation to avoid update index error. Internal API: remove undocumented ElasticSearch API usage and use documented one. Fix BanyanDB.ShardingKey annotation missed in the generated OAL metrics classes. Fix Elasticsearch storage: Query sortMetrics missing transform real index column name. Rename BanyanDB.ShardingKey to BanyanDB.SeriesID. Self-Observability: Add counters for metrics reading from DB or cached. Dashboard:Metrics Persistent Cache Count. Self-Observability: Fix GC Time calculation. Fix Elasticsearch storage: In No-Sharding Mode, column\u0026rsquo;s property indexOnly not applied and cannot be updated. Update the trace_id field as storage only(cannot be queried) in top_n_database_statement, top_n_cache_read_command, top_n_cache_read_command index.  UI  Fix: tab active incorrectly, when click tab space Add impala icon for impala JDBC Java agent plugin. (Webapp)Bump up snakeyaml to 1.31 for fixing CVE-2022-25857 [Breaking Change]: migrate from Spring Web to Armeria, now you should use the environment variable name SW_OAP_ADDRESS to change the OAP backend service addresses, like SW_OAP_ADDRESS=localhost:12800,localhost:12801, and use environment variable SW_SERVER_PORT to change the port. Other Spring-related configurations don\u0026rsquo;t take effect anymore. Polish the endpoint list graph. Fix styles for an adaptive height. Fix setting up a new time range after clicking the refresh button. Enhance the process topology graph to support dragging nodes. UI-template: Fix metrics calculation in general-service/mesh-service/faas-function top-list dashboard. Update MySQL dashboard to visualize collected slow SQLs. Add virtual cache dashboard. Remove responseCode fields of all OAL sources, as well as examples to avoid user\u0026rsquo;s confusion. Remove All from the endpoints selector. Enhance menu configurations to make it easier to change. Update PostgreSQL dashboard to visualize collected slow SQLs. Add Golang runtime metrics and cpu/memory used rate panels in General-Instance dashboard. Add gateway apisix menu. Query logs with the specific service ID. Bump d3-color from 3.0.1 to 3.1.0. Add Golang runtime metrics and cpu/memory used rate panels in FaaS-Instance dashboard. Revert logs on trace widget. Add a sub-menu for virtual mq. Add readRecords to metric types. Verify dashboard names for new dashboards. Associate metrics with the trace widget on dashboards. Fix configuration panel styles. Remove a un-use icon. Support labeled value on the service/instance/endpoint list widgets. Add menu for virtual MQ. Set selector props and update configuration panel styles. Add Python runtime metrics and cpu/memory utilization panels to General-Instance and Fass-Instance dashboards. Enhance the legend of metrics graph widget with the summary table. Add apache eventMesh logo file. Fix conditions for trace profiling. Fix tag keys list and duration condition. Fix typo. Fix condition logic for trace tree data. Enhance tags component to search tags with the input value. Fix topology loading style. Fix update metric processor for the readRecords and remove readSampledRecords from metrics selector. Add trace association for FAAS dashboards. Visualize attached events on the trace widget. Add HTTP/1.x metrics and HTTP req/resp body collecting tabs on the network profiling widget. Implement creating tasks ui for network profiling widget. Fix entity types for ProcessRelation. Add trace association for general service dashboards.  Documentation  Add metadata-uid setup doc about Kubernetes coordinator in the cluster management. Add a doc for adding menus to booster UI. Move general good read blogs from Agent Introduction to Academy. Add re-post for blog Scaling with Apache SkyWalking in the academy list. Add re-post for blog Diagnose Service Mesh Network Performance with eBPF in the academy list. Add Security Notice doc. Add new docs for Report Span Attached Events data collecting protocol. Add new docs for Record query protocol Update Server Agents and Compatibility for PHP agent. Add docs for profiling. Update the network profiling documentation.  All issues and pull requests are here\n","title":"9.3.0","url":"/docs/main/v9.6.0/en/changes/changes-9.3.0/"},{"content":"9.3.0 Project  Bump up the embedded swctl version in OAP Docker image.  OAP Server  Add component ID(133) for impala JDBC Java agent plugin and component ID(134) for impala server. Use prepareStatement in H2SQLExecutor#getByIDs.(No function change). Bump up snakeyaml to 1.32 for fixing CVE. Fix DurationUtils.convertToTimeBucket missed verify date format. Enhance LAL to support converting LogData to DatabaseSlowStatement. [Breaking Change] Change the LAL script format(Add layer property). Adapt ElasticSearch 8.1+, migrate from removed APIs to recommended APIs. Support monitoring MySQL slow SQLs. Support analyzing cache related spans to provide metrics and slow commands for cache services from client side Optimize virtual database, fix dynamic config watcher NPE when default value is null Remove physical index existing check and keep template existing check only to avoid meaningless retry wait in no-init mode. Make sure instance list ordered in TTL processor to avoid TTL timer never runs. Support monitoring PostgreSQL slow SQLs. [Breaking Change] Support sharding MySQL database instances and tables by Shardingsphere-Proxy. SQL-Database requires removing tables log_tag/segment_tag/zipkin_query before OAP starts, if bump up from previous releases. Fix meter functions avgHistogram, avgHistogramPercentile, avgLabeled, sumHistogram having data conflict when downsampling. Do sorting readLabeledMetricsValues result forcedly in case the storage(database) doesn\u0026rsquo;t return data consistent with the parameter list. Fix the wrong watch semantics in Kubernetes watchers, which causes heavy traffic to API server in some Kubernetes clusters, we should use Get State and Start at Most Recent semantic instead of Start at Exact because we don\u0026rsquo;t need the changing history events, see https://kubernetes.io/docs/reference/using-api/api-concepts/#semantics-for-watch. Unify query services and DAOs codes time range condition to Duration. [Breaking Change]: Remove prometheus-fetcher plugin, please use OpenTelemetry to scrape Prometheus metrics and set up SkyWalking OpenTelemetry receiver instead. BugFix: histogram metrics sent to MAL should be treated as OpenTelemetry style, not Prometheus style: (-infinity, explicit_bounds[i]] for i == 0 (explicit_bounds[i-1], explicit_bounds[i]] for 0 \u0026lt; i \u0026lt; size(explicit_bounds) (explicit_bounds[i-1], +infinity) for i == size(explicit_bounds)  Support Golang runtime metrics analysis. Add APISIX metrics monitoring Support skywalking-client-js report empty service version and page path , set default version as latest and default page path as /(root). Fix the error fetching data (/browser_app_page_pv0) : Can't split endpoint id into 2 parts. [Breaking Change] Limit the max length of trace/log/alarm tag\u0026rsquo;s key=value, set the max length of column tags in tableslog_tag/segment_tag/alarm_record_tag and column query in zipkin_query and column tag_value in tag_autocomplete to 256. SQL-Database requires altering these columns' length or removing these tables before OAP starts, if bump up from previous releases. Optimize the creation conditions of profiling task. Lazy load the Kubernetes metadata and switch from event-driven to polling. Previously we set up watchers to watch the Kubernetes metadata changes, this is perfect when there are deployments changes and SkyWalking can react to the changes in real time. However when the cluster has many events (such as in large cluster or some special Kubernetes engine like OpenShift), the requests sent from SkyWalking becomes unpredictable, i.e. SkyWalking might send massive requests to Kubernetes API server, causing heavy load to the API server. This PR switches from the watcher mechanism to polling mechanism, SkyWalking polls the metadata in a specified interval, so that the requests sent to API server is predictable (~10 requests every interval, 3 minutes), and the requests count is constant regardless of the cluster\u0026rsquo;s changes. However with this change SkyWalking can\u0026rsquo;t react to the cluster changes in time, but the delay is acceptable in our case. Optimize the query time of tasks in ProfileTaskCache. Fix metrics was put into wrong slot of the window in the alerting kernel. Support sumPerMinLabeled in MAL. Bump up jackson databind, snakeyaml, grpc dependencies. Support export Trace and Log through Kafka. Add new config initialization mechanism of module provider. This is a ModuleManager lib kernel level change. [Breaking Change] Support new records query protocol, rename the column named service_id to entity_id for support difference entity. Please re-create top_n_database_statement index/table. Remove improper self-obs metrics in JvmMetricsHandler(for Kafka channel). gRPC stream canceling code is not logged as an error when the client cancels the stream. The client cancels the stream when the pod is terminated. [Breaking Change] Change the way of loading MAL rules(support pattern). Move k8s relative MAL files into /otel-rules/k8s. [Breaking Change] Refactor service mesh protobuf definitions and split TCP-related metrics to individual definition. Add TCP{Service,ServiceInstance,ServiceRelation,ServiceInstanceRelation} sources and split TCP-related entities out from original Service,ServiceInstance,ServiceRelation,ServiceInstanceRelation. [Breaking Change] TCP-related source names are changed, fields of TCP-related sources are changed, please refer to the latest oal/tcp.oal file. Do not log error logs when failed to create ElasticSearch index because the index is created already. Add virtual MQ analysis for native traces. Support Python runtime metrics analysis. Support sampledTrace in LAL. Support multiple rules with different names under the same layer of LAL script. (Optimization) Reduce the buffer size(queue) of MAL(only) metric streams. Set L1 queue size as 1/20, L2 queue size as 1/2. Support monitoring MySQL/PostgreSQL in the cluster mode. [Breaking Change] Migrate to BanyanDB v0.2.0.  Adopt new OR logical operator for,  MeasureIDs query BanyanDBProfileThreadSnapshotQueryDAO query Multiple Event conditions query Metrics query   Simplify Group check and creation Partially apply UITemplate changes Support index_only Return CompletableFuture\u0026lt;Void\u0026gt; directly from BanyanDB client Optimize data binary parse methods in *LogQueryDAO Support different indexType Support configuration for TTL and (block|segment) intervals   Elasticsearch storage: Provide system environment variable(SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS) and support specify the settings (number_of_shards/number_of_replicas) for each index individually. Elasticsearch storage: Support update index settings (number_of_shards/number_of_replicas) for the index template after rebooting. Optimize MQ Topology analysis. Use entry span\u0026rsquo;s peer from the consumer side as source service when no producer instrumentation(no cross-process reference). Refactor JDBC storage implementations to reuse logics. Fix ClassCastException in LoggingConfigWatcher. Support span attached event concept in Zipkin and SkyWalking trace query. Support span attached events on Zipkin lens UI. Force UTF-8 encoding in JsonLogHandler of kafka-fetcher-plugin. Fix max length to 512 of entity, instance and endpoint IDs in trace, log, profiling, topN tables(JDBC storages). The value was 200 by default. Add component IDs(135, 136, 137) for EventMesh server and client-side plugins. Bump up Kafka client to 2.8.1 to fix CVE-2021-38153. Remove lengthEnvVariable for Column as it never works as expected. Add LongText to support longer logs persistent as a text type in ElasticSearch, instead of a keyword, to avoid length limitation. Fix wrong system variable name SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI. It was opaenapi. Fix not-time-series model blocking OAP boots in no-init mode. Fix ShardingTopologyQueryDAO.loadServiceRelationsDetectedAtServerSide invoke backend miss parameter serviceIds. Changed system variable SW_SUPERDATASET_STORAGE_DAY_STEP to SW_STORAGE_ES_SUPER_DATASET_DAY_STEP to be consistent with other ES storage related variables. Fix ESEventQueryDAO missing metric_table boolQuery criteria. Add default entity name(_blank) if absent to avoid NPE in the decoding. This caused Can't split xxx id into 2 parts. Support dynamic config the sampling strategy in network profiling. Zipkin module support BanyanDB storage. Zipkin traces query API, sort the result set by start time by default. Enhance the cache mechanism in the metric persistent process.  This cache only worked when the metric is accessible(readable) from the database. Once the insert execution is delayed due to the scale, the cache loses efficacy. It only works for the last time update per minute, considering our 25s period. Fix ID conflicts for all JDBC storage implementations. Due to the insert delay, the JDBC storage implementation would still generate another new insert statement.   [Breaking Change] Remove core/default/enableDatabaseSession config. [Breaking Change] Add @BanyanDB.TimestampColumn to identify which column in Record is providing the timestamp(milliseconds) for BanyanDB, since BanyanDB stream requires a timestamp in milliseconds. For SQL-Database: add new column timestamp for tables profile_task_log/top_n_database_statement, requires altering this column or removing these tables before OAP starts, if bump up from previous releases. Fix Elasticsearch storage: In No-Sharding Mode, add specific analyzer to the template before index creation to avoid update index error. Internal API: remove undocumented ElasticSearch API usage and use documented one. Fix BanyanDB.ShardingKey annotation missed in the generated OAL metrics classes. Fix Elasticsearch storage: Query sortMetrics missing transform real index column name. Rename BanyanDB.ShardingKey to BanyanDB.SeriesID. Self-Observability: Add counters for metrics reading from DB or cached. Dashboard:Metrics Persistent Cache Count. Self-Observability: Fix GC Time calculation. Fix Elasticsearch storage: In No-Sharding Mode, column\u0026rsquo;s property indexOnly not applied and cannot be updated. Update the trace_id field as storage only(cannot be queried) in top_n_database_statement, top_n_cache_read_command, top_n_cache_read_command index.  UI  Fix: tab active incorrectly, when click tab space Add impala icon for impala JDBC Java agent plugin. (Webapp)Bump up snakeyaml to 1.31 for fixing CVE-2022-25857 [Breaking Change]: migrate from Spring Web to Armeria, now you should use the environment variable name SW_OAP_ADDRESS to change the OAP backend service addresses, like SW_OAP_ADDRESS=localhost:12800,localhost:12801, and use environment variable SW_SERVER_PORT to change the port. Other Spring-related configurations don\u0026rsquo;t take effect anymore. Polish the endpoint list graph. Fix styles for an adaptive height. Fix setting up a new time range after clicking the refresh button. Enhance the process topology graph to support dragging nodes. UI-template: Fix metrics calculation in general-service/mesh-service/faas-function top-list dashboard. Update MySQL dashboard to visualize collected slow SQLs. Add virtual cache dashboard. Remove responseCode fields of all OAL sources, as well as examples to avoid user\u0026rsquo;s confusion. Remove All from the endpoints selector. Enhance menu configurations to make it easier to change. Update PostgreSQL dashboard to visualize collected slow SQLs. Add Golang runtime metrics and cpu/memory used rate panels in General-Instance dashboard. Add gateway apisix menu. Query logs with the specific service ID. Bump d3-color from 3.0.1 to 3.1.0. Add Golang runtime metrics and cpu/memory used rate panels in FaaS-Instance dashboard. Revert logs on trace widget. Add a sub-menu for virtual mq. Add readRecords to metric types. Verify dashboard names for new dashboards. Associate metrics with the trace widget on dashboards. Fix configuration panel styles. Remove a un-use icon. Support labeled value on the service/instance/endpoint list widgets. Add menu for virtual MQ. Set selector props and update configuration panel styles. Add Python runtime metrics and cpu/memory utilization panels to General-Instance and Fass-Instance dashboards. Enhance the legend of metrics graph widget with the summary table. Add apache eventMesh logo file. Fix conditions for trace profiling. Fix tag keys list and duration condition. Fix typo. Fix condition logic for trace tree data. Enhance tags component to search tags with the input value. Fix topology loading style. Fix update metric processor for the readRecords and remove readSampledRecords from metrics selector. Add trace association for FAAS dashboards. Visualize attached events on the trace widget. Add HTTP/1.x metrics and HTTP req/resp body collecting tabs on the network profiling widget. Implement creating tasks ui for network profiling widget. Fix entity types for ProcessRelation. Add trace association for general service dashboards.  Documentation  Add metadata-uid setup doc about Kubernetes coordinator in the cluster management. Add a doc for adding menus to booster UI. Move general good read blogs from Agent Introduction to Academy. Add re-post for blog Scaling with Apache SkyWalking in the academy list. Add re-post for blog Diagnose Service Mesh Network Performance with eBPF in the academy list. Add Security Notice doc. Add new docs for Report Span Attached Events data collecting protocol. Add new docs for Record query protocol Update Server Agents and Compatibility for PHP agent. Add docs for profiling. Update the network profiling documentation.  All issues and pull requests are here\n","title":"9.3.0","url":"/docs/main/v9.7.0/en/changes/changes-9.3.0/"},{"content":"9.4.0 Project  Bump up Zipkin and Zipkin lens UI dependency to 2.24.0. Bump up Apache parent pom version to 29. Bump up Armeria version to 1.21.0. Clean up maven pom.xmls. Bump up Java version to 11. Bump up snakeyaml to 2.0.  OAP Server  Add ServerStatusService in the core module to provide a new way to expose booting status to other modules. Adds Micrometer as a new component.(ID=141) Refactor session cache in MetricsPersistentWorker. Cache enhancement - don\u0026rsquo;t read new metrics from database in minute dimensionality.   // When // (1) the time bucket of the server's latest stability status is provided // 1.1 the OAP has booted successfully // 1.2 the current dimensionality is in minute. // 1.3 the OAP cluster is rebalanced due to scaling // (2) the metrics are from the time after the timeOfLatestStabilitySts // (3) the metrics don't exist in the cache // the kernel should NOT try to load it from the database. // // Notice, about condition (2), // for the specific minute of booted successfully, the metrics are expected to load from database when // it doesn't exist in the cache.  Remove the offset of metric session timeout according to worker creation sequence. Correct MetricsExtension annotations declarations in manual entities. Support component IDs' priority in process relation metrics. Remove abandon logic in MergableBufferedData, which caused unexpected no-update. Fix miss set LastUpdateTimestamp that caused the metrics session to expire. Rename MAL rule spring-sleuth.yaml to spring-micrometer.yaml. Fix memory leak in Zipkin API. Remove the dependency of refresh_interval of ElasticSearch indices from elasticsearch/flushInterval config. Now, it uses core/persistentPeriod + 5s as refresh_interval for all indices instead. Change elasticsearch/flushInterval to 5s(was 15s). Optimize flushInterval of ElasticSearch BulkProcessor to avoid extra periodical flush in the continuous bulk streams. An unexpected dot is added when exp is a pure metric name and expPrefix != null. Support monitoring MariaDB. Remove measure/stream specific interval settings in BanyanDB. Add global-specific settings used to override global configurations (e.g segmentIntervalDays, blockIntervalHours) in BanyanDB. Use TTL-driven interval settings for the measure-default group in BanyanDB. Fix wrong group of non time-relative metadata in BanyanDB. Refactor StorageData#id to the new StorageID object from a String type. Support multiple component IDs in the service topology level. Add ElasticSearch.Keyword annotation to declare the target field type as keyword. [Breaking Change] Column component_id of service_relation_client_side and service_relation_server_side have been replaced by component_ids. Support priority definition in the component-libraries.yml. Enhance service topology query. When there are multiple components detected from the server side, the component type of the node would be determined by the priority, which was random in the previous release. Remove component_id from service_instance_relation_client_side and service_instance_relation_server_side. Make the satellite E2E test more stable. Add Istio 1.16 to test matrix. Register ValueColumn as Tag for Record in BanyanDB storage plugin. Bump up Netty to 4.1.86. Remove unnecessary additional columns when storage is in logical sharding mode. The cluster coordinator support watch mechanism for notifying RemoteClientManager and ServerStatusService. Fix ServiceMeshServiceDispatcher overwrite ServiceDispatcher debug file when open SW_OAL_ENGINE_DEBUG. Use groupBy and in operators to optimize topology query for BanyanDB storage plugin. Support server status watcher for MetricsPersistentWorker to check the metrics whether required initialization. Fix the meter value are not correct when using sumPerMinLabeld or sumHistogramPercentile MAL function. Fix cannot display attached events when using Zipkin Lens UI query traces. Remove time_bucket for both Stream and Measure kinds in BanyanDB plugin. Merge TIME_BUCKET of Metrics and Record into StorageData. Support no layer in the listServices query. Fix time_bucket of ServiceTraffic not set correctly in slowSql of MAL. Correct the TopN record query DAO of BanyanDB. Tweak interval settings of BanyanDB. Support monitoring AWS Cloud EKS. Bump BanyanDB Java client to 0.3.0-rc1. Remove id tag from measures. Add Banyandb.MeasureField to mark a column as a BanyanDB Measure field. Add BanyanDB.StoreIDTag to store a process\u0026rsquo;s id for searching. [Breaking Change] The supported version of ShardingSphere-Proxy is upgraded from 5.1.2 to 5.3.1. Due to the changes of ShardingSphere\u0026rsquo;s API, versions before 5.3.1 are not compatible. Add the eBPF network profiling E2E Test in the per storage. Fix TCP service instances are lack of instance properties like pod and namespace, which causes Pod log not to work for TCP workloads. Add Python HBase happybase module component ID(94). Fix gRPC alarm cannot update settings from dynamic configuration source. Add batchOfBytes configuration to limit the size of bulk flush. Add Python Websocket module component ID(7018). [Optional] Optimize single trace query performance by customizing routing in ElasticSearch. SkyWalking trace segments and Zipkin spans are using trace ID for routing. This is OFF by default, controlled by storage/elasticsearch/enableCustomRouting. Enhance OAP HTTP server to support HTTPS Remove handler scan in otel receiver, manual initialization instead Add aws-firehose-receiver to support collecting AWS CloudWatch metric(OpenTelemetry format). Notice, no HTTPS/TLS setup support. By following AWS Firehose request, it uses proxy request (https://... instead of /aws/firehose/metrics), there must be a proxy(Nginx, Envoy, etc.). Avoid Antlr dependencies' versions might be different in compile time and runtime. Now PrometheusMetricConverter#escapedName also support converting / to _. Add missing TCP throughput metrics. Refactor @Column annotation, swap Column#name and ElasticSearch.Column#columnAlias and rename ElasticSearch.Column#columnAlias to ElasticSearch.Column#legacyName. Add Python HTTPX module component ID(7019). Migrate tests from junit 4 to junit 5. Refactor http-based alarm plugins and extract common logic to HttpAlarmCallback. Support Amazon Simple Storage Service (Amazon S3) metrics monitoring Support process Sum metrics with AGGREGATION_TEMPORALITY_DELTA case Support Amazon DynamoDB monitoring. Support prometheus HTTP API and promQL. Scope in the Entity of Metrics query v1 protocol is not required and automatical correction. The scope is determined based on the metric itself. Add explicit ReadTimeout for ConsulConfigurationWatcher to avoid IllegalArgumentException: Cache watchInterval=10sec \u0026gt;= networkClientReadTimeout=10000ms. Fix DurationUtils.getDurationPoints exceed, when startTimeBucket equals endTimeBucket. Support process OpenTelemetry ExponentialHistogram metrics Add FreeRedis component ID(3018).  UI  Add Zipkin Lens UI to webapp, and proxy it to context path /zipkin. Migrate the build tool from vue cli to Vite4. Fix Instance Relation and Endpoint Relation dashboards show up. Add Micrometer icon. Update MySQL UI to support MariaDB. Add AWS menu for supporting AWS monitoring. Add missing FastAPI logo. Update the log details page to support the formatted display of JSON content. Fix build config. Avoid being unable to drag process nodes for the first time. Add node folder into ignore list. Add ElPopconfirm to component types. Add an iframe widget for zipkin UI. Optimize graph tooltips to make them more friendly. Bump json5 from 1.0.1 to 1.0.2. Add websockets icon. Implement independent mode for widgets. Bump http-cache-semantics from 4.1.0 to 4.1.1. Update menus for OpenFunction. Add auto fresh to widgets independent mode. Fix: clear trace ID on the Log and Trace widgets after using association. Fix: reset duration for query conditions after time range changes. Add AWS S3 menu. Refactor: optimize side bar component to make it more friendly. Fix: remove duplicate popup message for query result. Add logo for HTTPX. Refactor: optimize the attached events visualization in the trace widget. Update BanyanDB client to 0.3.1. Add AWS DynamoDB menu. Fix: add auto period to the independent mode for widgets. Optimize menus and add Windows monitoring menu. Add a calculation for the cpm5dAvg. add a cpm5d calculation. Fix data processing error in the eBPF profiling widget. Support for double quotes in SlowSQL statements. Fix: the wrong position of the menu when clicking the topology node.  Documentation  Remove Spring Sleuth docs, and add Spring MicroMeter Observations Analysis with the latest Java agent side enhancement. Update monitoring MySQL document to add the MariaDB part. Reorganize the protocols docs to a more clear API docs. Add documentation about replacing Zipkin server with SkyWalking OAP. Add Lens UI relative docs in Zipkin trace section. Add Profiling APIs. Fix backend telemetry doc and so11y dashboard doc as the OAP Prometheus fetcher was removed since 9.3.0  All issues and pull requests are here\n","title":"9.4.0","url":"/docs/main/latest/en/changes/changes-9.4.0/"},{"content":"9.4.0 Project  Bump up Zipkin and Zipkin lens UI dependency to 2.24.0. Bump up Apache parent pom version to 29. Bump up Armeria version to 1.21.0. Clean up maven pom.xmls. Bump up Java version to 11. Bump up snakeyaml to 2.0.  OAP Server  Add ServerStatusService in the core module to provide a new way to expose booting status to other modules. Adds Micrometer as a new component.(ID=141) Refactor session cache in MetricsPersistentWorker. Cache enhancement - don\u0026rsquo;t read new metrics from database in minute dimensionality.   // When // (1) the time bucket of the server's latest stability status is provided // 1.1 the OAP has booted successfully // 1.2 the current dimensionality is in minute. // 1.3 the OAP cluster is rebalanced due to scaling // (2) the metrics are from the time after the timeOfLatestStabilitySts // (3) the metrics don't exist in the cache // the kernel should NOT try to load it from the database. // // Notice, about condition (2), // for the specific minute of booted successfully, the metrics are expected to load from database when // it doesn't exist in the cache.  Remove the offset of metric session timeout according to worker creation sequence. Correct MetricsExtension annotations declarations in manual entities. Support component IDs' priority in process relation metrics. Remove abandon logic in MergableBufferedData, which caused unexpected no-update. Fix miss set LastUpdateTimestamp that caused the metrics session to expire. Rename MAL rule spring-sleuth.yaml to spring-micrometer.yaml. Fix memory leak in Zipkin API. Remove the dependency of refresh_interval of ElasticSearch indices from elasticsearch/flushInterval config. Now, it uses core/persistentPeriod + 5s as refresh_interval for all indices instead. Change elasticsearch/flushInterval to 5s(was 15s). Optimize flushInterval of ElasticSearch BulkProcessor to avoid extra periodical flush in the continuous bulk streams. An unexpected dot is added when exp is a pure metric name and expPrefix != null. Support monitoring MariaDB. Remove measure/stream specific interval settings in BanyanDB. Add global-specific settings used to override global configurations (e.g segmentIntervalDays, blockIntervalHours) in BanyanDB. Use TTL-driven interval settings for the measure-default group in BanyanDB. Fix wrong group of non time-relative metadata in BanyanDB. Refactor StorageData#id to the new StorageID object from a String type. Support multiple component IDs in the service topology level. Add ElasticSearch.Keyword annotation to declare the target field type as keyword. [Breaking Change] Column component_id of service_relation_client_side and service_relation_server_side have been replaced by component_ids. Support priority definition in the component-libraries.yml. Enhance service topology query. When there are multiple components detected from the server side, the component type of the node would be determined by the priority, which was random in the previous release. Remove component_id from service_instance_relation_client_side and service_instance_relation_server_side. Make the satellite E2E test more stable. Add Istio 1.16 to test matrix. Register ValueColumn as Tag for Record in BanyanDB storage plugin. Bump up Netty to 4.1.86. Remove unnecessary additional columns when storage is in logical sharding mode. The cluster coordinator support watch mechanism for notifying RemoteClientManager and ServerStatusService. Fix ServiceMeshServiceDispatcher overwrite ServiceDispatcher debug file when open SW_OAL_ENGINE_DEBUG. Use groupBy and in operators to optimize topology query for BanyanDB storage plugin. Support server status watcher for MetricsPersistentWorker to check the metrics whether required initialization. Fix the meter value are not correct when using sumPerMinLabeld or sumHistogramPercentile MAL function. Fix cannot display attached events when using Zipkin Lens UI query traces. Remove time_bucket for both Stream and Measure kinds in BanyanDB plugin. Merge TIME_BUCKET of Metrics and Record into StorageData. Support no layer in the listServices query. Fix time_bucket of ServiceTraffic not set correctly in slowSql of MAL. Correct the TopN record query DAO of BanyanDB. Tweak interval settings of BanyanDB. Support monitoring AWS Cloud EKS. Bump BanyanDB Java client to 0.3.0-rc1. Remove id tag from measures. Add Banyandb.MeasureField to mark a column as a BanyanDB Measure field. Add BanyanDB.StoreIDTag to store a process\u0026rsquo;s id for searching. [Breaking Change] The supported version of ShardingSphere-Proxy is upgraded from 5.1.2 to 5.3.1. Due to the changes of ShardingSphere\u0026rsquo;s API, versions before 5.3.1 are not compatible. Add the eBPF network profiling E2E Test in the per storage. Fix TCP service instances are lack of instance properties like pod and namespace, which causes Pod log not to work for TCP workloads. Add Python HBase happybase module component ID(94). Fix gRPC alarm cannot update settings from dynamic configuration source. Add batchOfBytes configuration to limit the size of bulk flush. Add Python Websocket module component ID(7018). [Optional] Optimize single trace query performance by customizing routing in ElasticSearch. SkyWalking trace segments and Zipkin spans are using trace ID for routing. This is OFF by default, controlled by storage/elasticsearch/enableCustomRouting. Enhance OAP HTTP server to support HTTPS Remove handler scan in otel receiver, manual initialization instead Add aws-firehose-receiver to support collecting AWS CloudWatch metric(OpenTelemetry format). Notice, no HTTPS/TLS setup support. By following AWS Firehose request, it uses proxy request (https://... instead of /aws/firehose/metrics), there must be a proxy(Nginx, Envoy, etc.). Avoid Antlr dependencies' versions might be different in compile time and runtime. Now PrometheusMetricConverter#escapedName also support converting / to _. Add missing TCP throughput metrics. Refactor @Column annotation, swap Column#name and ElasticSearch.Column#columnAlias and rename ElasticSearch.Column#columnAlias to ElasticSearch.Column#legacyName. Add Python HTTPX module component ID(7019). Migrate tests from junit 4 to junit 5. Refactor http-based alarm plugins and extract common logic to HttpAlarmCallback. Support Amazon Simple Storage Service (Amazon S3) metrics monitoring Support process Sum metrics with AGGREGATION_TEMPORALITY_DELTA case Support Amazon DynamoDB monitoring. Support prometheus HTTP API and promQL. Scope in the Entity of Metrics query v1 protocol is not required and automatical correction. The scope is determined based on the metric itself. Add explicit ReadTimeout for ConsulConfigurationWatcher to avoid IllegalArgumentException: Cache watchInterval=10sec \u0026gt;= networkClientReadTimeout=10000ms. Fix DurationUtils.getDurationPoints exceed, when startTimeBucket equals endTimeBucket. Support process OpenTelemetry ExponentialHistogram metrics Add FreeRedis component ID(3018).  UI  Add Zipkin Lens UI to webapp, and proxy it to context path /zipkin. Migrate the build tool from vue cli to Vite4. Fix Instance Relation and Endpoint Relation dashboards show up. Add Micrometer icon. Update MySQL UI to support MariaDB. Add AWS menu for supporting AWS monitoring. Add missing FastAPI logo. Update the log details page to support the formatted display of JSON content. Fix build config. Avoid being unable to drag process nodes for the first time. Add node folder into ignore list. Add ElPopconfirm to component types. Add an iframe widget for zipkin UI. Optimize graph tooltips to make them more friendly. Bump json5 from 1.0.1 to 1.0.2. Add websockets icon. Implement independent mode for widgets. Bump http-cache-semantics from 4.1.0 to 4.1.1. Update menus for OpenFunction. Add auto fresh to widgets independent mode. Fix: clear trace ID on the Log and Trace widgets after using association. Fix: reset duration for query conditions after time range changes. Add AWS S3 menu. Refactor: optimize side bar component to make it more friendly. Fix: remove duplicate popup message for query result. Add logo for HTTPX. Refactor: optimize the attached events visualization in the trace widget. Update BanyanDB client to 0.3.1. Add AWS DynamoDB menu. Fix: add auto period to the independent mode for widgets. Optimize menus and add Windows monitoring menu. Add a calculation for the cpm5dAvg. add a cpm5d calculation. Fix data processing error in the eBPF profiling widget. Support for double quotes in SlowSQL statements. Fix: the wrong position of the menu when clicking the topology node.  Documentation  Remove Spring Sleuth docs, and add Spring MicroMeter Observations Analysis with the latest Java agent side enhancement. Update monitoring MySQL document to add the MariaDB part. Reorganize the protocols docs to a more clear API docs. Add documentation about replacing Zipkin server with SkyWalking OAP. Add Lens UI relative docs in Zipkin trace section. Add Profiling APIs. Fix backend telemetry doc and so11y dashboard doc as the OAP Prometheus fetcher was removed since 9.3.0  All issues and pull requests are here\n","title":"9.4.0","url":"/docs/main/next/en/changes/changes-9.4.0/"},{"content":"9.4.0 Project  Bump up Zipkin and Zipkin lens UI dependency to 2.24.0. Bump up Apache parent pom version to 29. Bump up Armeria version to 1.21.0. Clean up maven pom.xmls. Bump up Java version to 11. Bump up snakeyaml to 2.0.  OAP Server  Add ServerStatusService in the core module to provide a new way to expose booting status to other modules. Adds Micrometer as a new component.(ID=141) Refactor session cache in MetricsPersistentWorker. Cache enhancement - don\u0026rsquo;t read new metrics from database in minute dimensionality.   // When // (1) the time bucket of the server's latest stability status is provided // 1.1 the OAP has booted successfully // 1.2 the current dimensionality is in minute. // 1.3 the OAP cluster is rebalanced due to scaling // (2) the metrics are from the time after the timeOfLatestStabilitySts // (3) the metrics don't exist in the cache // the kernel should NOT try to load it from the database. // // Notice, about condition (2), // for the specific minute of booted successfully, the metrics are expected to load from database when // it doesn't exist in the cache.  Remove the offset of metric session timeout according to worker creation sequence. Correct MetricsExtension annotations declarations in manual entities. Support component IDs' priority in process relation metrics. Remove abandon logic in MergableBufferedData, which caused unexpected no-update. Fix miss set LastUpdateTimestamp that caused the metrics session to expire. Rename MAL rule spring-sleuth.yaml to spring-micrometer.yaml. Fix memory leak in Zipkin API. Remove the dependency of refresh_interval of ElasticSearch indices from elasticsearch/flushInterval config. Now, it uses core/persistentPeriod + 5s as refresh_interval for all indices instead. Change elasticsearch/flushInterval to 5s(was 15s). Optimize flushInterval of ElasticSearch BulkProcessor to avoid extra periodical flush in the continuous bulk streams. An unexpected dot is added when exp is a pure metric name and expPrefix != null. Support monitoring MariaDB. Remove measure/stream specific interval settings in BanyanDB. Add global-specific settings used to override global configurations (e.g segmentIntervalDays, blockIntervalHours) in BanyanDB. Use TTL-driven interval settings for the measure-default group in BanyanDB. Fix wrong group of non time-relative metadata in BanyanDB. Refactor StorageData#id to the new StorageID object from a String type. Support multiple component IDs in the service topology level. Add ElasticSearch.Keyword annotation to declare the target field type as keyword. [Breaking Change] Column component_id of service_relation_client_side and service_relation_server_side have been replaced by component_ids. Support priority definition in the component-libraries.yml. Enhance service topology query. When there are multiple components detected from the server side, the component type of the node would be determined by the priority, which was random in the previous release. Remove component_id from service_instance_relation_client_side and service_instance_relation_server_side. Make the satellite E2E test more stable. Add Istio 1.16 to test matrix. Register ValueColumn as Tag for Record in BanyanDB storage plugin. Bump up Netty to 4.1.86. Remove unnecessary additional columns when storage is in logical sharding mode. The cluster coordinator support watch mechanism for notifying RemoteClientManager and ServerStatusService. Fix ServiceMeshServiceDispatcher overwrite ServiceDispatcher debug file when open SW_OAL_ENGINE_DEBUG. Use groupBy and in operators to optimize topology query for BanyanDB storage plugin. Support server status watcher for MetricsPersistentWorker to check the metrics whether required initialization. Fix the meter value are not correct when using sumPerMinLabeld or sumHistogramPercentile MAL function. Fix cannot display attached events when using Zipkin Lens UI query traces. Remove time_bucket for both Stream and Measure kinds in BanyanDB plugin. Merge TIME_BUCKET of Metrics and Record into StorageData. Support no layer in the listServices query. Fix time_bucket of ServiceTraffic not set correctly in slowSql of MAL. Correct the TopN record query DAO of BanyanDB. Tweak interval settings of BanyanDB. Support monitoring AWS Cloud EKS. Bump BanyanDB Java client to 0.3.0-rc1. Remove id tag from measures. Add Banyandb.MeasureField to mark a column as a BanyanDB Measure field. Add BanyanDB.StoreIDTag to store a process\u0026rsquo;s id for searching. [Breaking Change] The supported version of ShardingSphere-Proxy is upgraded from 5.1.2 to 5.3.1. Due to the changes of ShardingSphere\u0026rsquo;s API, versions before 5.3.1 are not compatible. Add the eBPF network profiling E2E Test in the per storage. Fix TCP service instances are lack of instance properties like pod and namespace, which causes Pod log not to work for TCP workloads. Add Python HBase happybase module component ID(94). Fix gRPC alarm cannot update settings from dynamic configuration source. Add batchOfBytes configuration to limit the size of bulk flush. Add Python Websocket module component ID(7018). [Optional] Optimize single trace query performance by customizing routing in ElasticSearch. SkyWalking trace segments and Zipkin spans are using trace ID for routing. This is OFF by default, controlled by storage/elasticsearch/enableCustomRouting. Enhance OAP HTTP server to support HTTPS Remove handler scan in otel receiver, manual initialization instead Add aws-firehose-receiver to support collecting AWS CloudWatch metric(OpenTelemetry format). Notice, no HTTPS/TLS setup support. By following AWS Firehose request, it uses proxy request (https://... instead of /aws/firehose/metrics), there must be a proxy(Nginx, Envoy, etc.). Avoid Antlr dependencies' versions might be different in compile time and runtime. Now PrometheusMetricConverter#escapedName also support converting / to _. Add missing TCP throughput metrics. Refactor @Column annotation, swap Column#name and ElasticSearch.Column#columnAlias and rename ElasticSearch.Column#columnAlias to ElasticSearch.Column#legacyName. Add Python HTTPX module component ID(7019). Migrate tests from junit 4 to junit 5. Refactor http-based alarm plugins and extract common logic to HttpAlarmCallback. Support Amazon Simple Storage Service (Amazon S3) metrics monitoring Support process Sum metrics with AGGREGATION_TEMPORALITY_DELTA case Support Amazon DynamoDB monitoring. Support prometheus HTTP API and promQL. Scope in the Entity of Metrics query v1 protocol is not required and automatical correction. The scope is determined based on the metric itself. Add explicit ReadTimeout for ConsulConfigurationWatcher to avoid IllegalArgumentException: Cache watchInterval=10sec \u0026gt;= networkClientReadTimeout=10000ms. Fix DurationUtils.getDurationPoints exceed, when startTimeBucket equals endTimeBucket. Support process OpenTelemetry ExponentialHistogram metrics Add FreeRedis component ID(3018).  UI  Add Zipkin Lens UI to webapp, and proxy it to context path /zipkin. Migrate the build tool from vue cli to Vite4. Fix Instance Relation and Endpoint Relation dashboards show up. Add Micrometer icon. Update MySQL UI to support MariaDB. Add AWS menu for supporting AWS monitoring. Add missing FastAPI logo. Update the log details page to support the formatted display of JSON content. Fix build config. Avoid being unable to drag process nodes for the first time. Add node folder into ignore list. Add ElPopconfirm to component types. Add an iframe widget for zipkin UI. Optimize graph tooltips to make them more friendly. Bump json5 from 1.0.1 to 1.0.2. Add websockets icon. Implement independent mode for widgets. Bump http-cache-semantics from 4.1.0 to 4.1.1. Update menus for OpenFunction. Add auto fresh to widgets independent mode. Fix: clear trace ID on the Log and Trace widgets after using association. Fix: reset duration for query conditions after time range changes. Add AWS S3 menu. Refactor: optimize side bar component to make it more friendly. Fix: remove duplicate popup message for query result. Add logo for HTTPX. Refactor: optimize the attached events visualization in the trace widget. Update BanyanDB client to 0.3.1. Add AWS DynamoDB menu. Fix: add auto period to the independent mode for widgets. Optimize menus and add Windows monitoring menu. Add a calculation for the cpm5dAvg. add a cpm5d calculation. Fix data processing error in the eBPF profiling widget. Support for double quotes in SlowSQL statements. Fix: the wrong position of the menu when clicking the topology node.  Documentation  Remove Spring Sleuth docs, and add Spring MicroMeter Observations Analysis with the latest Java agent side enhancement. Update monitoring MySQL document to add the MariaDB part. Reorganize the protocols docs to a more clear API docs. Add documentation about replacing Zipkin server with SkyWalking OAP. Add Lens UI relative docs in Zipkin trace section. Add Profiling APIs. Fix backend telemetry doc and so11y dashboard doc as the OAP Prometheus fetcher was removed since 9.3.0  All issues and pull requests are here\n","title":"9.4.0","url":"/docs/main/v9.4.0/en/changes/changes/"},{"content":"9.4.0 Project  Bump up Zipkin and Zipkin lens UI dependency to 2.24.0. Bump up Apache parent pom version to 29. Bump up Armeria version to 1.21.0. Clean up maven pom.xmls. Bump up Java version to 11. Bump up snakeyaml to 2.0.  OAP Server  Add ServerStatusService in the core module to provide a new way to expose booting status to other modules. Adds Micrometer as a new component.(ID=141) Refactor session cache in MetricsPersistentWorker. Cache enhancement - don\u0026rsquo;t read new metrics from database in minute dimensionality.   // When // (1) the time bucket of the server's latest stability status is provided // 1.1 the OAP has booted successfully // 1.2 the current dimensionality is in minute. // 1.3 the OAP cluster is rebalanced due to scaling // (2) the metrics are from the time after the timeOfLatestStabilitySts // (3) the metrics don't exist in the cache // the kernel should NOT try to load it from the database. // // Notice, about condition (2), // for the specific minute of booted successfully, the metrics are expected to load from database when // it doesn't exist in the cache.  Remove the offset of metric session timeout according to worker creation sequence. Correct MetricsExtension annotations declarations in manual entities. Support component IDs' priority in process relation metrics. Remove abandon logic in MergableBufferedData, which caused unexpected no-update. Fix miss set LastUpdateTimestamp that caused the metrics session to expire. Rename MAL rule spring-sleuth.yaml to spring-micrometer.yaml. Fix memory leak in Zipkin API. Remove the dependency of refresh_interval of ElasticSearch indices from elasticsearch/flushInterval config. Now, it uses core/persistentPeriod + 5s as refresh_interval for all indices instead. Change elasticsearch/flushInterval to 5s(was 15s). Optimize flushInterval of ElasticSearch BulkProcessor to avoid extra periodical flush in the continuous bulk streams. An unexpected dot is added when exp is a pure metric name and expPrefix != null. Support monitoring MariaDB. Remove measure/stream specific interval settings in BanyanDB. Add global-specific settings used to override global configurations (e.g segmentIntervalDays, blockIntervalHours) in BanyanDB. Use TTL-driven interval settings for the measure-default group in BanyanDB. Fix wrong group of non time-relative metadata in BanyanDB. Refactor StorageData#id to the new StorageID object from a String type. Support multiple component IDs in the service topology level. Add ElasticSearch.Keyword annotation to declare the target field type as keyword. [Breaking Change] Column component_id of service_relation_client_side and service_relation_server_side have been replaced by component_ids. Support priority definition in the component-libraries.yml. Enhance service topology query. When there are multiple components detected from the server side, the component type of the node would be determined by the priority, which was random in the previous release. Remove component_id from service_instance_relation_client_side and service_instance_relation_server_side. Make the satellite E2E test more stable. Add Istio 1.16 to test matrix. Register ValueColumn as Tag for Record in BanyanDB storage plugin. Bump up Netty to 4.1.86. Remove unnecessary additional columns when storage is in logical sharding mode. The cluster coordinator support watch mechanism for notifying RemoteClientManager and ServerStatusService. Fix ServiceMeshServiceDispatcher overwrite ServiceDispatcher debug file when open SW_OAL_ENGINE_DEBUG. Use groupBy and in operators to optimize topology query for BanyanDB storage plugin. Support server status watcher for MetricsPersistentWorker to check the metrics whether required initialization. Fix the meter value are not correct when using sumPerMinLabeld or sumHistogramPercentile MAL function. Fix cannot display attached events when using Zipkin Lens UI query traces. Remove time_bucket for both Stream and Measure kinds in BanyanDB plugin. Merge TIME_BUCKET of Metrics and Record into StorageData. Support no layer in the listServices query. Fix time_bucket of ServiceTraffic not set correctly in slowSql of MAL. Correct the TopN record query DAO of BanyanDB. Tweak interval settings of BanyanDB. Support monitoring AWS Cloud EKS. Bump BanyanDB Java client to 0.3.0-rc1. Remove id tag from measures. Add Banyandb.MeasureField to mark a column as a BanyanDB Measure field. Add BanyanDB.StoreIDTag to store a process\u0026rsquo;s id for searching. [Breaking Change] The supported version of ShardingSphere-Proxy is upgraded from 5.1.2 to 5.3.1. Due to the changes of ShardingSphere\u0026rsquo;s API, versions before 5.3.1 are not compatible. Add the eBPF network profiling E2E Test in the per storage. Fix TCP service instances are lack of instance properties like pod and namespace, which causes Pod log not to work for TCP workloads. Add Python HBase happybase module component ID(94). Fix gRPC alarm cannot update settings from dynamic configuration source. Add batchOfBytes configuration to limit the size of bulk flush. Add Python Websocket module component ID(7018). [Optional] Optimize single trace query performance by customizing routing in ElasticSearch. SkyWalking trace segments and Zipkin spans are using trace ID for routing. This is OFF by default, controlled by storage/elasticsearch/enableCustomRouting. Enhance OAP HTTP server to support HTTPS Remove handler scan in otel receiver, manual initialization instead Add aws-firehose-receiver to support collecting AWS CloudWatch metric(OpenTelemetry format). Notice, no HTTPS/TLS setup support. By following AWS Firehose request, it uses proxy request (https://... instead of /aws/firehose/metrics), there must be a proxy(Nginx, Envoy, etc.). Avoid Antlr dependencies' versions might be different in compile time and runtime. Now PrometheusMetricConverter#escapedName also support converting / to _. Add missing TCP throughput metrics. Refactor @Column annotation, swap Column#name and ElasticSearch.Column#columnAlias and rename ElasticSearch.Column#columnAlias to ElasticSearch.Column#legacyName. Add Python HTTPX module component ID(7019). Migrate tests from junit 4 to junit 5. Refactor http-based alarm plugins and extract common logic to HttpAlarmCallback. Support Amazon Simple Storage Service (Amazon S3) metrics monitoring Support process Sum metrics with AGGREGATION_TEMPORALITY_DELTA case Support Amazon DynamoDB monitoring. Support prometheus HTTP API and promQL. Scope in the Entity of Metrics query v1 protocol is not required and automatical correction. The scope is determined based on the metric itself. Add explicit ReadTimeout for ConsulConfigurationWatcher to avoid IllegalArgumentException: Cache watchInterval=10sec \u0026gt;= networkClientReadTimeout=10000ms. Fix DurationUtils.getDurationPoints exceed, when startTimeBucket equals endTimeBucket. Support process OpenTelemetry ExponentialHistogram metrics Add FreeRedis component ID(3018).  UI  Add Zipkin Lens UI to webapp, and proxy it to context path /zipkin. Migrate the build tool from vue cli to Vite4. Fix Instance Relation and Endpoint Relation dashboards show up. Add Micrometer icon. Update MySQL UI to support MariaDB. Add AWS menu for supporting AWS monitoring. Add missing FastAPI logo. Update the log details page to support the formatted display of JSON content. Fix build config. Avoid being unable to drag process nodes for the first time. Add node folder into ignore list. Add ElPopconfirm to component types. Add an iframe widget for zipkin UI. Optimize graph tooltips to make them more friendly. Bump json5 from 1.0.1 to 1.0.2. Add websockets icon. Implement independent mode for widgets. Bump http-cache-semantics from 4.1.0 to 4.1.1. Update menus for OpenFunction. Add auto fresh to widgets independent mode. Fix: clear trace ID on the Log and Trace widgets after using association. Fix: reset duration for query conditions after time range changes. Add AWS S3 menu. Refactor: optimize side bar component to make it more friendly. Fix: remove duplicate popup message for query result. Add logo for HTTPX. Refactor: optimize the attached events visualization in the trace widget. Update BanyanDB client to 0.3.1. Add AWS DynamoDB menu. Fix: add auto period to the independent mode for widgets. Optimize menus and add Windows monitoring menu. Add a calculation for the cpm5dAvg. add a cpm5d calculation. Fix data processing error in the eBPF profiling widget. Support for double quotes in SlowSQL statements. Fix: the wrong position of the menu when clicking the topology node.  Documentation  Remove Spring Sleuth docs, and add Spring MicroMeter Observations Analysis with the latest Java agent side enhancement. Update monitoring MySQL document to add the MariaDB part. Reorganize the protocols docs to a more clear API docs. Add documentation about replacing Zipkin server with SkyWalking OAP. Add Lens UI relative docs in Zipkin trace section. Add Profiling APIs. Fix backend telemetry doc and so11y dashboard doc as the OAP Prometheus fetcher was removed since 9.3.0  All issues and pull requests are here\n","title":"9.4.0","url":"/docs/main/v9.5.0/en/changes/changes-9.4.0/"},{"content":"9.4.0 Project  Bump up Zipkin and Zipkin lens UI dependency to 2.24.0. Bump up Apache parent pom version to 29. Bump up Armeria version to 1.21.0. Clean up maven pom.xmls. Bump up Java version to 11. Bump up snakeyaml to 2.0.  OAP Server  Add ServerStatusService in the core module to provide a new way to expose booting status to other modules. Adds Micrometer as a new component.(ID=141) Refactor session cache in MetricsPersistentWorker. Cache enhancement - don\u0026rsquo;t read new metrics from database in minute dimensionality.   // When // (1) the time bucket of the server's latest stability status is provided // 1.1 the OAP has booted successfully // 1.2 the current dimensionality is in minute. // 1.3 the OAP cluster is rebalanced due to scaling // (2) the metrics are from the time after the timeOfLatestStabilitySts // (3) the metrics don't exist in the cache // the kernel should NOT try to load it from the database. // // Notice, about condition (2), // for the specific minute of booted successfully, the metrics are expected to load from database when // it doesn't exist in the cache.  Remove the offset of metric session timeout according to worker creation sequence. Correct MetricsExtension annotations declarations in manual entities. Support component IDs' priority in process relation metrics. Remove abandon logic in MergableBufferedData, which caused unexpected no-update. Fix miss set LastUpdateTimestamp that caused the metrics session to expire. Rename MAL rule spring-sleuth.yaml to spring-micrometer.yaml. Fix memory leak in Zipkin API. Remove the dependency of refresh_interval of ElasticSearch indices from elasticsearch/flushInterval config. Now, it uses core/persistentPeriod + 5s as refresh_interval for all indices instead. Change elasticsearch/flushInterval to 5s(was 15s). Optimize flushInterval of ElasticSearch BulkProcessor to avoid extra periodical flush in the continuous bulk streams. An unexpected dot is added when exp is a pure metric name and expPrefix != null. Support monitoring MariaDB. Remove measure/stream specific interval settings in BanyanDB. Add global-specific settings used to override global configurations (e.g segmentIntervalDays, blockIntervalHours) in BanyanDB. Use TTL-driven interval settings for the measure-default group in BanyanDB. Fix wrong group of non time-relative metadata in BanyanDB. Refactor StorageData#id to the new StorageID object from a String type. Support multiple component IDs in the service topology level. Add ElasticSearch.Keyword annotation to declare the target field type as keyword. [Breaking Change] Column component_id of service_relation_client_side and service_relation_server_side have been replaced by component_ids. Support priority definition in the component-libraries.yml. Enhance service topology query. When there are multiple components detected from the server side, the component type of the node would be determined by the priority, which was random in the previous release. Remove component_id from service_instance_relation_client_side and service_instance_relation_server_side. Make the satellite E2E test more stable. Add Istio 1.16 to test matrix. Register ValueColumn as Tag for Record in BanyanDB storage plugin. Bump up Netty to 4.1.86. Remove unnecessary additional columns when storage is in logical sharding mode. The cluster coordinator support watch mechanism for notifying RemoteClientManager and ServerStatusService. Fix ServiceMeshServiceDispatcher overwrite ServiceDispatcher debug file when open SW_OAL_ENGINE_DEBUG. Use groupBy and in operators to optimize topology query for BanyanDB storage plugin. Support server status watcher for MetricsPersistentWorker to check the metrics whether required initialization. Fix the meter value are not correct when using sumPerMinLabeld or sumHistogramPercentile MAL function. Fix cannot display attached events when using Zipkin Lens UI query traces. Remove time_bucket for both Stream and Measure kinds in BanyanDB plugin. Merge TIME_BUCKET of Metrics and Record into StorageData. Support no layer in the listServices query. Fix time_bucket of ServiceTraffic not set correctly in slowSql of MAL. Correct the TopN record query DAO of BanyanDB. Tweak interval settings of BanyanDB. Support monitoring AWS Cloud EKS. Bump BanyanDB Java client to 0.3.0-rc1. Remove id tag from measures. Add Banyandb.MeasureField to mark a column as a BanyanDB Measure field. Add BanyanDB.StoreIDTag to store a process\u0026rsquo;s id for searching. [Breaking Change] The supported version of ShardingSphere-Proxy is upgraded from 5.1.2 to 5.3.1. Due to the changes of ShardingSphere\u0026rsquo;s API, versions before 5.3.1 are not compatible. Add the eBPF network profiling E2E Test in the per storage. Fix TCP service instances are lack of instance properties like pod and namespace, which causes Pod log not to work for TCP workloads. Add Python HBase happybase module component ID(94). Fix gRPC alarm cannot update settings from dynamic configuration source. Add batchOfBytes configuration to limit the size of bulk flush. Add Python Websocket module component ID(7018). [Optional] Optimize single trace query performance by customizing routing in ElasticSearch. SkyWalking trace segments and Zipkin spans are using trace ID for routing. This is OFF by default, controlled by storage/elasticsearch/enableCustomRouting. Enhance OAP HTTP server to support HTTPS Remove handler scan in otel receiver, manual initialization instead Add aws-firehose-receiver to support collecting AWS CloudWatch metric(OpenTelemetry format). Notice, no HTTPS/TLS setup support. By following AWS Firehose request, it uses proxy request (https://... instead of /aws/firehose/metrics), there must be a proxy(Nginx, Envoy, etc.). Avoid Antlr dependencies' versions might be different in compile time and runtime. Now PrometheusMetricConverter#escapedName also support converting / to _. Add missing TCP throughput metrics. Refactor @Column annotation, swap Column#name and ElasticSearch.Column#columnAlias and rename ElasticSearch.Column#columnAlias to ElasticSearch.Column#legacyName. Add Python HTTPX module component ID(7019). Migrate tests from junit 4 to junit 5. Refactor http-based alarm plugins and extract common logic to HttpAlarmCallback. Support Amazon Simple Storage Service (Amazon S3) metrics monitoring Support process Sum metrics with AGGREGATION_TEMPORALITY_DELTA case Support Amazon DynamoDB monitoring. Support prometheus HTTP API and promQL. Scope in the Entity of Metrics query v1 protocol is not required and automatical correction. The scope is determined based on the metric itself. Add explicit ReadTimeout for ConsulConfigurationWatcher to avoid IllegalArgumentException: Cache watchInterval=10sec \u0026gt;= networkClientReadTimeout=10000ms. Fix DurationUtils.getDurationPoints exceed, when startTimeBucket equals endTimeBucket. Support process OpenTelemetry ExponentialHistogram metrics Add FreeRedis component ID(3018).  UI  Add Zipkin Lens UI to webapp, and proxy it to context path /zipkin. Migrate the build tool from vue cli to Vite4. Fix Instance Relation and Endpoint Relation dashboards show up. Add Micrometer icon. Update MySQL UI to support MariaDB. Add AWS menu for supporting AWS monitoring. Add missing FastAPI logo. Update the log details page to support the formatted display of JSON content. Fix build config. Avoid being unable to drag process nodes for the first time. Add node folder into ignore list. Add ElPopconfirm to component types. Add an iframe widget for zipkin UI. Optimize graph tooltips to make them more friendly. Bump json5 from 1.0.1 to 1.0.2. Add websockets icon. Implement independent mode for widgets. Bump http-cache-semantics from 4.1.0 to 4.1.1. Update menus for OpenFunction. Add auto fresh to widgets independent mode. Fix: clear trace ID on the Log and Trace widgets after using association. Fix: reset duration for query conditions after time range changes. Add AWS S3 menu. Refactor: optimize side bar component to make it more friendly. Fix: remove duplicate popup message for query result. Add logo for HTTPX. Refactor: optimize the attached events visualization in the trace widget. Update BanyanDB client to 0.3.1. Add AWS DynamoDB menu. Fix: add auto period to the independent mode for widgets. Optimize menus and add Windows monitoring menu. Add a calculation for the cpm5dAvg. add a cpm5d calculation. Fix data processing error in the eBPF profiling widget. Support for double quotes in SlowSQL statements. Fix: the wrong position of the menu when clicking the topology node.  Documentation  Remove Spring Sleuth docs, and add Spring MicroMeter Observations Analysis with the latest Java agent side enhancement. Update monitoring MySQL document to add the MariaDB part. Reorganize the protocols docs to a more clear API docs. Add documentation about replacing Zipkin server with SkyWalking OAP. Add Lens UI relative docs in Zipkin trace section. Add Profiling APIs. Fix backend telemetry doc and so11y dashboard doc as the OAP Prometheus fetcher was removed since 9.3.0  All issues and pull requests are here\n","title":"9.4.0","url":"/docs/main/v9.6.0/en/changes/changes-9.4.0/"},{"content":"9.4.0 Project  Bump up Zipkin and Zipkin lens UI dependency to 2.24.0. Bump up Apache parent pom version to 29. Bump up Armeria version to 1.21.0. Clean up maven pom.xmls. Bump up Java version to 11. Bump up snakeyaml to 2.0.  OAP Server  Add ServerStatusService in the core module to provide a new way to expose booting status to other modules. Adds Micrometer as a new component.(ID=141) Refactor session cache in MetricsPersistentWorker. Cache enhancement - don\u0026rsquo;t read new metrics from database in minute dimensionality.   // When // (1) the time bucket of the server's latest stability status is provided // 1.1 the OAP has booted successfully // 1.2 the current dimensionality is in minute. // 1.3 the OAP cluster is rebalanced due to scaling // (2) the metrics are from the time after the timeOfLatestStabilitySts // (3) the metrics don't exist in the cache // the kernel should NOT try to load it from the database. // // Notice, about condition (2), // for the specific minute of booted successfully, the metrics are expected to load from database when // it doesn't exist in the cache.  Remove the offset of metric session timeout according to worker creation sequence. Correct MetricsExtension annotations declarations in manual entities. Support component IDs' priority in process relation metrics. Remove abandon logic in MergableBufferedData, which caused unexpected no-update. Fix miss set LastUpdateTimestamp that caused the metrics session to expire. Rename MAL rule spring-sleuth.yaml to spring-micrometer.yaml. Fix memory leak in Zipkin API. Remove the dependency of refresh_interval of ElasticSearch indices from elasticsearch/flushInterval config. Now, it uses core/persistentPeriod + 5s as refresh_interval for all indices instead. Change elasticsearch/flushInterval to 5s(was 15s). Optimize flushInterval of ElasticSearch BulkProcessor to avoid extra periodical flush in the continuous bulk streams. An unexpected dot is added when exp is a pure metric name and expPrefix != null. Support monitoring MariaDB. Remove measure/stream specific interval settings in BanyanDB. Add global-specific settings used to override global configurations (e.g segmentIntervalDays, blockIntervalHours) in BanyanDB. Use TTL-driven interval settings for the measure-default group in BanyanDB. Fix wrong group of non time-relative metadata in BanyanDB. Refactor StorageData#id to the new StorageID object from a String type. Support multiple component IDs in the service topology level. Add ElasticSearch.Keyword annotation to declare the target field type as keyword. [Breaking Change] Column component_id of service_relation_client_side and service_relation_server_side have been replaced by component_ids. Support priority definition in the component-libraries.yml. Enhance service topology query. When there are multiple components detected from the server side, the component type of the node would be determined by the priority, which was random in the previous release. Remove component_id from service_instance_relation_client_side and service_instance_relation_server_side. Make the satellite E2E test more stable. Add Istio 1.16 to test matrix. Register ValueColumn as Tag for Record in BanyanDB storage plugin. Bump up Netty to 4.1.86. Remove unnecessary additional columns when storage is in logical sharding mode. The cluster coordinator support watch mechanism for notifying RemoteClientManager and ServerStatusService. Fix ServiceMeshServiceDispatcher overwrite ServiceDispatcher debug file when open SW_OAL_ENGINE_DEBUG. Use groupBy and in operators to optimize topology query for BanyanDB storage plugin. Support server status watcher for MetricsPersistentWorker to check the metrics whether required initialization. Fix the meter value are not correct when using sumPerMinLabeld or sumHistogramPercentile MAL function. Fix cannot display attached events when using Zipkin Lens UI query traces. Remove time_bucket for both Stream and Measure kinds in BanyanDB plugin. Merge TIME_BUCKET of Metrics and Record into StorageData. Support no layer in the listServices query. Fix time_bucket of ServiceTraffic not set correctly in slowSql of MAL. Correct the TopN record query DAO of BanyanDB. Tweak interval settings of BanyanDB. Support monitoring AWS Cloud EKS. Bump BanyanDB Java client to 0.3.0-rc1. Remove id tag from measures. Add Banyandb.MeasureField to mark a column as a BanyanDB Measure field. Add BanyanDB.StoreIDTag to store a process\u0026rsquo;s id for searching. [Breaking Change] The supported version of ShardingSphere-Proxy is upgraded from 5.1.2 to 5.3.1. Due to the changes of ShardingSphere\u0026rsquo;s API, versions before 5.3.1 are not compatible. Add the eBPF network profiling E2E Test in the per storage. Fix TCP service instances are lack of instance properties like pod and namespace, which causes Pod log not to work for TCP workloads. Add Python HBase happybase module component ID(94). Fix gRPC alarm cannot update settings from dynamic configuration source. Add batchOfBytes configuration to limit the size of bulk flush. Add Python Websocket module component ID(7018). [Optional] Optimize single trace query performance by customizing routing in ElasticSearch. SkyWalking trace segments and Zipkin spans are using trace ID for routing. This is OFF by default, controlled by storage/elasticsearch/enableCustomRouting. Enhance OAP HTTP server to support HTTPS Remove handler scan in otel receiver, manual initialization instead Add aws-firehose-receiver to support collecting AWS CloudWatch metric(OpenTelemetry format). Notice, no HTTPS/TLS setup support. By following AWS Firehose request, it uses proxy request (https://... instead of /aws/firehose/metrics), there must be a proxy(Nginx, Envoy, etc.). Avoid Antlr dependencies' versions might be different in compile time and runtime. Now PrometheusMetricConverter#escapedName also support converting / to _. Add missing TCP throughput metrics. Refactor @Column annotation, swap Column#name and ElasticSearch.Column#columnAlias and rename ElasticSearch.Column#columnAlias to ElasticSearch.Column#legacyName. Add Python HTTPX module component ID(7019). Migrate tests from junit 4 to junit 5. Refactor http-based alarm plugins and extract common logic to HttpAlarmCallback. Support Amazon Simple Storage Service (Amazon S3) metrics monitoring Support process Sum metrics with AGGREGATION_TEMPORALITY_DELTA case Support Amazon DynamoDB monitoring. Support prometheus HTTP API and promQL. Scope in the Entity of Metrics query v1 protocol is not required and automatical correction. The scope is determined based on the metric itself. Add explicit ReadTimeout for ConsulConfigurationWatcher to avoid IllegalArgumentException: Cache watchInterval=10sec \u0026gt;= networkClientReadTimeout=10000ms. Fix DurationUtils.getDurationPoints exceed, when startTimeBucket equals endTimeBucket. Support process OpenTelemetry ExponentialHistogram metrics Add FreeRedis component ID(3018).  UI  Add Zipkin Lens UI to webapp, and proxy it to context path /zipkin. Migrate the build tool from vue cli to Vite4. Fix Instance Relation and Endpoint Relation dashboards show up. Add Micrometer icon. Update MySQL UI to support MariaDB. Add AWS menu for supporting AWS monitoring. Add missing FastAPI logo. Update the log details page to support the formatted display of JSON content. Fix build config. Avoid being unable to drag process nodes for the first time. Add node folder into ignore list. Add ElPopconfirm to component types. Add an iframe widget for zipkin UI. Optimize graph tooltips to make them more friendly. Bump json5 from 1.0.1 to 1.0.2. Add websockets icon. Implement independent mode for widgets. Bump http-cache-semantics from 4.1.0 to 4.1.1. Update menus for OpenFunction. Add auto fresh to widgets independent mode. Fix: clear trace ID on the Log and Trace widgets after using association. Fix: reset duration for query conditions after time range changes. Add AWS S3 menu. Refactor: optimize side bar component to make it more friendly. Fix: remove duplicate popup message for query result. Add logo for HTTPX. Refactor: optimize the attached events visualization in the trace widget. Update BanyanDB client to 0.3.1. Add AWS DynamoDB menu. Fix: add auto period to the independent mode for widgets. Optimize menus and add Windows monitoring menu. Add a calculation for the cpm5dAvg. add a cpm5d calculation. Fix data processing error in the eBPF profiling widget. Support for double quotes in SlowSQL statements. Fix: the wrong position of the menu when clicking the topology node.  Documentation  Remove Spring Sleuth docs, and add Spring MicroMeter Observations Analysis with the latest Java agent side enhancement. Update monitoring MySQL document to add the MariaDB part. Reorganize the protocols docs to a more clear API docs. Add documentation about replacing Zipkin server with SkyWalking OAP. Add Lens UI relative docs in Zipkin trace section. Add Profiling APIs. Fix backend telemetry doc and so11y dashboard doc as the OAP Prometheus fetcher was removed since 9.3.0  All issues and pull requests are here\n","title":"9.4.0","url":"/docs/main/v9.7.0/en/changes/changes-9.4.0/"},{"content":"9.5.0 Project  Fix Duplicate class found due to the delombok goal.  OAP Server  Fix wrong layer of metric user error in DynamoDB monitoring. ElasticSearch storage does not check field types when OAP running in no-init mode. Support to bind TLS status as a part of component for service topology. Fix component ID priority bug. Fix component ID of topology overlap due to storage layer bugs. [Breaking Change] Enhance JDBC storage through merging tables and managing day-based table rolling. [Breaking Change] Sharding-MySQL implementations and tests get removed due to we have the day-based rolling mechanism by default Fix otel k8s-cluster rule add namespace dimension for MAL aggregation calculation(Deployment Status,Deployment Spec Replicas) Support continuous profiling feature. Support collect process level related metrics. Fix K8sRetag reads the wrong k8s service from the cache due to a possible namespace mismatch. [Breaking Change] Support cross-thread trace profiling. The data structure and query APIs are changed. Fix PromQL HTTP API /api/v1/labels response missing service label. Fix possible NPE when initialize IntList. Support parse PromQL expression has empty labels in the braces for metadata query. Support alarm metric OP !=. Support metrics query indicates whether value == 0 represents actually zero or no data. Fix NPE when query the not exist series indexes in ElasticSearch storage. Support collecting memory buff/cache metrics in VM monitoring. PromQL: Remove empty values from the query result, fix /api/v1/metadata param limit could cause out of bound. Support monitoring the total number metrics of k8s StatefulSet and DaemonSet. Support Amazon API Gateway monitoring. Bump up graphql-java to fix cve. Bump up Kubernetes Java client. Support Redis Monitoring. Add component ID for amqp, amqp-producer and amqp-consumer. Support no-proxy mode for aws-firehose receiver Bump up armeria to 1.23.1 Support Elasticsearch Monitoring. Fix PromQL HTTP API /api/v1/series response missing service label when matching metric. Support ServerSide TopN for BanyanDB. Add component ID for Jersey. Remove OpenCensus support, the related codes and docs as it\u0026rsquo;s sunsetting. Support dynamic configuration of searchableTracesTags Support exportErrorStatusTraceOnly for export the error status trace segments through the Kafka channel Add component ID for Grizzly. Fix potential NPE in Zipkin receiver when the Span is missing some fields. Filter out unknown_cluster metric data. Support RabbitMQ Monitoring. Support Redis slow logs collection. Fix data loss when query continuous profiling task record. Adapt the continuous profiling task query GraphQL. Support Metrics Query Expression(MQE) and allows users to do simple query-stage calculation through the expression. Deprecated metrics query v2 protocol. Deprecated record query protocol. Add component ID for go-redis. Add OpenSearch 2.8.0 to test case. Add ai-pipeline module. Support HTTP URI formatting through ai-pipeline to do pattern recognition. Add new HTTP URI grouping engine with benchmark. [Breaking Change] Use the new HTTP URI grouping engine to replace the old regex based mechanism. Support sumLabeled in MAL. Migrate from kubernetes-client/java to fabric8 client. Envoy ALS generated relation metrics considers http status codes \u0026gt;= 400 has an error at the client side. Add cause message field when query continuous profiling task.  UI  Revert: cpm5d function. This feature is cancelled from backend. Fix: alerting link breaks on the topology. Refactor Topology widget to make it more hierarchical.  Choose User as the first node. If User node is absent, choose the busiest node(which has the most calls of all). Do a left-to-right flow process. At the same level, list nodes from top to bottom in alphabetical order.   Fix filter ID when ReadRecords metric associates with trace. Add AWS API Gateway menu. Change trace profiling protocol. Add Redis menu. Optimize data types. Support isEmptyValue flag for metrics query. Add elasticsearch menu. [Clean UI templates before upgrade] Set showSymbol: true, and make the data point shows on the Line graph. Please clean ui_template index in elasticsearch storage or table in JDBC storage. [Clean UI templates before upgrade] UI templates: Simplify metric name with the label. Add MQ menu. Add Jeysey icon. Fix: set endpoint and instance selectors with url parameters correctly. Bump up dependencies versions icons-vue 1.1.4, element-plus 2.1.0, nanoid 3.3.6, postcss 8.4.23 Add OpenTelemetry log protocol support. [Breaking Change] Configuration key enabledOtelRules is renamed to enabledOtelMetricsRules and the corresponding environment variable is renamed to SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES. Add grizzly icon. Fix: the Instance List data display error. Fix: set topN type to Number. Support Metrics Query Expression(MQE) and allows users to do simple query-stage calculation through the expression. Bump up zipkin ui dependency to 2.24.1. Bump up vite to 4.0.5. Apply MQE on General and Virtual-Database layer UI-templates. Add Continuous Profiling tab on Mesh layer UI-templates.  Documentation  Add Profiling related documentations. Add SUM_PER_MIN to MAL documentation. Make the log relative docs more clear, and easier for further more formats support. Update the cluster management and advanced deployment docs.  All issues and pull requests are here\n","title":"9.5.0","url":"/docs/main/latest/en/changes/changes-9.5.0/"},{"content":"9.5.0 Project  Fix Duplicate class found due to the delombok goal.  OAP Server  Fix wrong layer of metric user error in DynamoDB monitoring. ElasticSearch storage does not check field types when OAP running in no-init mode. Support to bind TLS status as a part of component for service topology. Fix component ID priority bug. Fix component ID of topology overlap due to storage layer bugs. [Breaking Change] Enhance JDBC storage through merging tables and managing day-based table rolling. [Breaking Change] Sharding-MySQL implementations and tests get removed due to we have the day-based rolling mechanism by default Fix otel k8s-cluster rule add namespace dimension for MAL aggregation calculation(Deployment Status,Deployment Spec Replicas) Support continuous profiling feature. Support collect process level related metrics. Fix K8sRetag reads the wrong k8s service from the cache due to a possible namespace mismatch. [Breaking Change] Support cross-thread trace profiling. The data structure and query APIs are changed. Fix PromQL HTTP API /api/v1/labels response missing service label. Fix possible NPE when initialize IntList. Support parse PromQL expression has empty labels in the braces for metadata query. Support alarm metric OP !=. Support metrics query indicates whether value == 0 represents actually zero or no data. Fix NPE when query the not exist series indexes in ElasticSearch storage. Support collecting memory buff/cache metrics in VM monitoring. PromQL: Remove empty values from the query result, fix /api/v1/metadata param limit could cause out of bound. Support monitoring the total number metrics of k8s StatefulSet and DaemonSet. Support Amazon API Gateway monitoring. Bump up graphql-java to fix cve. Bump up Kubernetes Java client. Support Redis Monitoring. Add component ID for amqp, amqp-producer and amqp-consumer. Support no-proxy mode for aws-firehose receiver Bump up armeria to 1.23.1 Support Elasticsearch Monitoring. Fix PromQL HTTP API /api/v1/series response missing service label when matching metric. Support ServerSide TopN for BanyanDB. Add component ID for Jersey. Remove OpenCensus support, the related codes and docs as it\u0026rsquo;s sunsetting. Support dynamic configuration of searchableTracesTags Support exportErrorStatusTraceOnly for export the error status trace segments through the Kafka channel Add component ID for Grizzly. Fix potential NPE in Zipkin receiver when the Span is missing some fields. Filter out unknown_cluster metric data. Support RabbitMQ Monitoring. Support Redis slow logs collection. Fix data loss when query continuous profiling task record. Adapt the continuous profiling task query GraphQL. Support Metrics Query Expression(MQE) and allows users to do simple query-stage calculation through the expression. Deprecated metrics query v2 protocol. Deprecated record query protocol. Add component ID for go-redis. Add OpenSearch 2.8.0 to test case. Add ai-pipeline module. Support HTTP URI formatting through ai-pipeline to do pattern recognition. Add new HTTP URI grouping engine with benchmark. [Breaking Change] Use the new HTTP URI grouping engine to replace the old regex based mechanism. Support sumLabeled in MAL. Migrate from kubernetes-client/java to fabric8 client. Envoy ALS generated relation metrics considers http status codes \u0026gt;= 400 has an error at the client side. Add cause message field when query continuous profiling task.  UI  Revert: cpm5d function. This feature is cancelled from backend. Fix: alerting link breaks on the topology. Refactor Topology widget to make it more hierarchical.  Choose User as the first node. If User node is absent, choose the busiest node(which has the most calls of all). Do a left-to-right flow process. At the same level, list nodes from top to bottom in alphabetical order.   Fix filter ID when ReadRecords metric associates with trace. Add AWS API Gateway menu. Change trace profiling protocol. Add Redis menu. Optimize data types. Support isEmptyValue flag for metrics query. Add elasticsearch menu. [Clean UI templates before upgrade] Set showSymbol: true, and make the data point shows on the Line graph. Please clean ui_template index in elasticsearch storage or table in JDBC storage. [Clean UI templates before upgrade] UI templates: Simplify metric name with the label. Add MQ menu. Add Jeysey icon. Fix: set endpoint and instance selectors with url parameters correctly. Bump up dependencies versions icons-vue 1.1.4, element-plus 2.1.0, nanoid 3.3.6, postcss 8.4.23 Add OpenTelemetry log protocol support. [Breaking Change] Configuration key enabledOtelRules is renamed to enabledOtelMetricsRules and the corresponding environment variable is renamed to SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES. Add grizzly icon. Fix: the Instance List data display error. Fix: set topN type to Number. Support Metrics Query Expression(MQE) and allows users to do simple query-stage calculation through the expression. Bump up zipkin ui dependency to 2.24.1. Bump up vite to 4.0.5. Apply MQE on General and Virtual-Database layer UI-templates. Add Continuous Profiling tab on Mesh layer UI-templates.  Documentation  Add Profiling related documentations. Add SUM_PER_MIN to MAL documentation. Make the log relative docs more clear, and easier for further more formats support. Update the cluster management and advanced deployment docs.  All issues and pull requests are here\n","title":"9.5.0","url":"/docs/main/next/en/changes/changes-9.5.0/"},{"content":"9.5.0 Project  Fix Duplicate class found due to the delombok goal.  OAP Server  Fix wrong layer of metric user error in DynamoDB monitoring. ElasticSearch storage does not check field types when OAP running in no-init mode. Support to bind TLS status as a part of component for service topology. Fix component ID priority bug. Fix component ID of topology overlap due to storage layer bugs. [Breaking Change] Enhance JDBC storage through merging tables and managing day-based table rolling. [Breaking Change] Sharding-MySQL implementations and tests get removed due to we have the day-based rolling mechanism by default Fix otel k8s-cluster rule add namespace dimension for MAL aggregation calculation(Deployment Status,Deployment Spec Replicas) Support continuous profiling feature. Support collect process level related metrics. Fix K8sRetag reads the wrong k8s service from the cache due to a possible namespace mismatch. [Breaking Change] Support cross-thread trace profiling. The data structure and query APIs are changed. Fix PromQL HTTP API /api/v1/labels response missing service label. Fix possible NPE when initialize IntList. Support parse PromQL expression has empty labels in the braces for metadata query. Support alarm metric OP !=. Support metrics query indicates whether value == 0 represents actually zero or no data. Fix NPE when query the not exist series indexes in ElasticSearch storage. Support collecting memory buff/cache metrics in VM monitoring. PromQL: Remove empty values from the query result, fix /api/v1/metadata param limit could cause out of bound. Support monitoring the total number metrics of k8s StatefulSet and DaemonSet. Support Amazon API Gateway monitoring. Bump up graphql-java to fix cve. Bump up Kubernetes Java client. Support Redis Monitoring. Add component ID for amqp, amqp-producer and amqp-consumer. Support no-proxy mode for aws-firehose receiver Bump up armeria to 1.23.1 Support Elasticsearch Monitoring. Fix PromQL HTTP API /api/v1/series response missing service label when matching metric. Support ServerSide TopN for BanyanDB. Add component ID for Jersey. Remove OpenCensus support, the related codes and docs as it\u0026rsquo;s sunsetting. Support dynamic configuration of searchableTracesTags Support exportErrorStatusTraceOnly for export the error status trace segments through the Kafka channel Add component ID for Grizzly. Fix potential NPE in Zipkin receiver when the Span is missing some fields. Filter out unknown_cluster metric data. Support RabbitMQ Monitoring. Support Redis slow logs collection. Fix data loss when query continuous profiling task record. Adapt the continuous profiling task query GraphQL. Support Metrics Query Expression(MQE) and allows users to do simple query-stage calculation through the expression. Deprecated metrics query v2 protocol. Deprecated record query protocol. Add component ID for go-redis. Add OpenSearch 2.8.0 to test case. Add ai-pipeline module. Support HTTP URI formatting through ai-pipeline to do pattern recognition. Add new HTTP URI grouping engine with benchmark. [Breaking Change] Use the new HTTP URI grouping engine to replace the old regex based mechanism. Support sumLabeled in MAL. Migrate from kubernetes-client/java to fabric8 client. Envoy ALS generated relation metrics considers http status codes \u0026gt;= 400 has an error at the client side. Add cause message field when query continuous profiling task.  UI  Revert: cpm5d function. This feature is cancelled from backend. Fix: alerting link breaks on the topology. Refactor Topology widget to make it more hierarchical.  Choose User as the first node. If User node is absent, choose the busiest node(which has the most calls of all). Do a left-to-right flow process. At the same level, list nodes from top to bottom in alphabetical order.   Fix filter ID when ReadRecords metric associates with trace. Add AWS API Gateway menu. Change trace profiling protocol. Add Redis menu. Optimize data types. Support isEmptyValue flag for metrics query. Add elasticsearch menu. [Clean UI templates before upgrade] Set showSymbol: true, and make the data point shows on the Line graph. Please clean ui_template index in elasticsearch storage or table in JDBC storage. [Clean UI templates before upgrade] UI templates: Simplify metric name with the label. Add MQ menu. Add Jeysey icon. Fix: set endpoint and instance selectors with url parameters correctly. Bump up dependencies versions icons-vue 1.1.4, element-plus 2.1.0, nanoid 3.3.6, postcss 8.4.23 Add OpenTelemetry log protocol support. [Breaking Change] Configuration key enabledOtelRules is renamed to enabledOtelMetricsRules and the corresponding environment variable is renamed to SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES. Add grizzly icon. Fix: the Instance List data display error. Fix: set topN type to Number. Support Metrics Query Expression(MQE) and allows users to do simple query-stage calculation through the expression. Bump up zipkin ui dependency to 2.24.1. Bump up vite to 4.0.5. Apply MQE on General and Virtual-Database layer UI-templates. Add Continuous Profiling tab on Mesh layer UI-templates.  Documentation  Add Profiling related documentations. Add SUM_PER_MIN to MAL documentation. Make the log relative docs more clear, and easier for further more formats support. Update the cluster management and advanced deployment docs.  All issues and pull requests are here\n","title":"9.5.0","url":"/docs/main/v9.5.0/en/changes/changes/"},{"content":"9.5.0 Project  Fix Duplicate class found due to the delombok goal.  OAP Server  Fix wrong layer of metric user error in DynamoDB monitoring. ElasticSearch storage does not check field types when OAP running in no-init mode. Support to bind TLS status as a part of component for service topology. Fix component ID priority bug. Fix component ID of topology overlap due to storage layer bugs. [Breaking Change] Enhance JDBC storage through merging tables and managing day-based table rolling. [Breaking Change] Sharding-MySQL implementations and tests get removed due to we have the day-based rolling mechanism by default Fix otel k8s-cluster rule add namespace dimension for MAL aggregation calculation(Deployment Status,Deployment Spec Replicas) Support continuous profiling feature. Support collect process level related metrics. Fix K8sRetag reads the wrong k8s service from the cache due to a possible namespace mismatch. [Breaking Change] Support cross-thread trace profiling. The data structure and query APIs are changed. Fix PromQL HTTP API /api/v1/labels response missing service label. Fix possible NPE when initialize IntList. Support parse PromQL expression has empty labels in the braces for metadata query. Support alarm metric OP !=. Support metrics query indicates whether value == 0 represents actually zero or no data. Fix NPE when query the not exist series indexes in ElasticSearch storage. Support collecting memory buff/cache metrics in VM monitoring. PromQL: Remove empty values from the query result, fix /api/v1/metadata param limit could cause out of bound. Support monitoring the total number metrics of k8s StatefulSet and DaemonSet. Support Amazon API Gateway monitoring. Bump up graphql-java to fix cve. Bump up Kubernetes Java client. Support Redis Monitoring. Add component ID for amqp, amqp-producer and amqp-consumer. Support no-proxy mode for aws-firehose receiver Bump up armeria to 1.23.1 Support Elasticsearch Monitoring. Fix PromQL HTTP API /api/v1/series response missing service label when matching metric. Support ServerSide TopN for BanyanDB. Add component ID for Jersey. Remove OpenCensus support, the related codes and docs as it\u0026rsquo;s sunsetting. Support dynamic configuration of searchableTracesTags Support exportErrorStatusTraceOnly for export the error status trace segments through the Kafka channel Add component ID for Grizzly. Fix potential NPE in Zipkin receiver when the Span is missing some fields. Filter out unknown_cluster metric data. Support RabbitMQ Monitoring. Support Redis slow logs collection. Fix data loss when query continuous profiling task record. Adapt the continuous profiling task query GraphQL. Support Metrics Query Expression(MQE) and allows users to do simple query-stage calculation through the expression. Deprecated metrics query v2 protocol. Deprecated record query protocol. Add component ID for go-redis. Add OpenSearch 2.8.0 to test case. Add ai-pipeline module. Support HTTP URI formatting through ai-pipeline to do pattern recognition. Add new HTTP URI grouping engine with benchmark. [Breaking Change] Use the new HTTP URI grouping engine to replace the old regex based mechanism. Support sumLabeled in MAL. Migrate from kubernetes-client/java to fabric8 client. Envoy ALS generated relation metrics considers http status codes \u0026gt;= 400 has an error at the client side. Add cause message field when query continuous profiling task.  UI  Revert: cpm5d function. This feature is cancelled from backend. Fix: alerting link breaks on the topology. Refactor Topology widget to make it more hierarchical.  Choose User as the first node. If User node is absent, choose the busiest node(which has the most calls of all). Do a left-to-right flow process. At the same level, list nodes from top to bottom in alphabetical order.   Fix filter ID when ReadRecords metric associates with trace. Add AWS API Gateway menu. Change trace profiling protocol. Add Redis menu. Optimize data types. Support isEmptyValue flag for metrics query. Add elasticsearch menu. [Clean UI templates before upgrade] Set showSymbol: true, and make the data point shows on the Line graph. Please clean ui_template index in elasticsearch storage or table in JDBC storage. [Clean UI templates before upgrade] UI templates: Simplify metric name with the label. Add MQ menu. Add Jeysey icon. Fix: set endpoint and instance selectors with url parameters correctly. Bump up dependencies versions icons-vue 1.1.4, element-plus 2.1.0, nanoid 3.3.6, postcss 8.4.23 Add OpenTelemetry log protocol support. [Breaking Change] Configuration key enabledOtelRules is renamed to enabledOtelMetricsRules and the corresponding environment variable is renamed to SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES. Add grizzly icon. Fix: the Instance List data display error. Fix: set topN type to Number. Support Metrics Query Expression(MQE) and allows users to do simple query-stage calculation through the expression. Bump up zipkin ui dependency to 2.24.1. Bump up vite to 4.0.5. Apply MQE on General and Virtual-Database layer UI-templates. Add Continuous Profiling tab on Mesh layer UI-templates.  Documentation  Add Profiling related documentations. Add SUM_PER_MIN to MAL documentation. Make the log relative docs more clear, and easier for further more formats support. Update the cluster management and advanced deployment docs.  All issues and pull requests are here\n","title":"9.5.0","url":"/docs/main/v9.6.0/en/changes/changes-9.5.0/"},{"content":"9.5.0 Project  Fix Duplicate class found due to the delombok goal.  OAP Server  Fix wrong layer of metric user error in DynamoDB monitoring. ElasticSearch storage does not check field types when OAP running in no-init mode. Support to bind TLS status as a part of component for service topology. Fix component ID priority bug. Fix component ID of topology overlap due to storage layer bugs. [Breaking Change] Enhance JDBC storage through merging tables and managing day-based table rolling. [Breaking Change] Sharding-MySQL implementations and tests get removed due to we have the day-based rolling mechanism by default Fix otel k8s-cluster rule add namespace dimension for MAL aggregation calculation(Deployment Status,Deployment Spec Replicas) Support continuous profiling feature. Support collect process level related metrics. Fix K8sRetag reads the wrong k8s service from the cache due to a possible namespace mismatch. [Breaking Change] Support cross-thread trace profiling. The data structure and query APIs are changed. Fix PromQL HTTP API /api/v1/labels response missing service label. Fix possible NPE when initialize IntList. Support parse PromQL expression has empty labels in the braces for metadata query. Support alarm metric OP !=. Support metrics query indicates whether value == 0 represents actually zero or no data. Fix NPE when query the not exist series indexes in ElasticSearch storage. Support collecting memory buff/cache metrics in VM monitoring. PromQL: Remove empty values from the query result, fix /api/v1/metadata param limit could cause out of bound. Support monitoring the total number metrics of k8s StatefulSet and DaemonSet. Support Amazon API Gateway monitoring. Bump up graphql-java to fix cve. Bump up Kubernetes Java client. Support Redis Monitoring. Add component ID for amqp, amqp-producer and amqp-consumer. Support no-proxy mode for aws-firehose receiver Bump up armeria to 1.23.1 Support Elasticsearch Monitoring. Fix PromQL HTTP API /api/v1/series response missing service label when matching metric. Support ServerSide TopN for BanyanDB. Add component ID for Jersey. Remove OpenCensus support, the related codes and docs as it\u0026rsquo;s sunsetting. Support dynamic configuration of searchableTracesTags Support exportErrorStatusTraceOnly for export the error status trace segments through the Kafka channel Add component ID for Grizzly. Fix potential NPE in Zipkin receiver when the Span is missing some fields. Filter out unknown_cluster metric data. Support RabbitMQ Monitoring. Support Redis slow logs collection. Fix data loss when query continuous profiling task record. Adapt the continuous profiling task query GraphQL. Support Metrics Query Expression(MQE) and allows users to do simple query-stage calculation through the expression. Deprecated metrics query v2 protocol. Deprecated record query protocol. Add component ID for go-redis. Add OpenSearch 2.8.0 to test case. Add ai-pipeline module. Support HTTP URI formatting through ai-pipeline to do pattern recognition. Add new HTTP URI grouping engine with benchmark. [Breaking Change] Use the new HTTP URI grouping engine to replace the old regex based mechanism. Support sumLabeled in MAL. Migrate from kubernetes-client/java to fabric8 client. Envoy ALS generated relation metrics considers http status codes \u0026gt;= 400 has an error at the client side. Add cause message field when query continuous profiling task.  UI  Revert: cpm5d function. This feature is cancelled from backend. Fix: alerting link breaks on the topology. Refactor Topology widget to make it more hierarchical.  Choose User as the first node. If User node is absent, choose the busiest node(which has the most calls of all). Do a left-to-right flow process. At the same level, list nodes from top to bottom in alphabetical order.   Fix filter ID when ReadRecords metric associates with trace. Add AWS API Gateway menu. Change trace profiling protocol. Add Redis menu. Optimize data types. Support isEmptyValue flag for metrics query. Add elasticsearch menu. [Clean UI templates before upgrade] Set showSymbol: true, and make the data point shows on the Line graph. Please clean ui_template index in elasticsearch storage or table in JDBC storage. [Clean UI templates before upgrade] UI templates: Simplify metric name with the label. Add MQ menu. Add Jeysey icon. Fix: set endpoint and instance selectors with url parameters correctly. Bump up dependencies versions icons-vue 1.1.4, element-plus 2.1.0, nanoid 3.3.6, postcss 8.4.23 Add OpenTelemetry log protocol support. [Breaking Change] Configuration key enabledOtelRules is renamed to enabledOtelMetricsRules and the corresponding environment variable is renamed to SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES. Add grizzly icon. Fix: the Instance List data display error. Fix: set topN type to Number. Support Metrics Query Expression(MQE) and allows users to do simple query-stage calculation through the expression. Bump up zipkin ui dependency to 2.24.1. Bump up vite to 4.0.5. Apply MQE on General and Virtual-Database layer UI-templates. Add Continuous Profiling tab on Mesh layer UI-templates.  Documentation  Add Profiling related documentations. Add SUM_PER_MIN to MAL documentation. Make the log relative docs more clear, and easier for further more formats support. Update the cluster management and advanced deployment docs.  All issues and pull requests are here\n","title":"9.5.0","url":"/docs/main/v9.7.0/en/changes/changes-9.5.0/"},{"content":"9.6.0 Project  Bump up Guava to 32.0.1 to avoid the lib listed as vulnerable due to CVE-2020-8908. This API is never used. Maven artifact skywalking-log-recevier-plugin is renamed to skywalking-log-receiver-plugin. Bump up cli version 0.11 to 0.12. Bump up the version of ASF parent pom to v30. Make builds reproducible for automatic releases CI.  OAP Server  Add Neo4j component ID(112) language: Python. Add Istio ServiceEntry registry to resolve unknown IPs in ALS. Wrap deleteProperty API to the BanyanDBStorageClient. [Breaking change] Remove matchedCounter from HttpUriRecognitionService#feedRawData. Remove patterns from HttpUriRecognitionService#feedRawData and add max 10 candidates of raw URIs for each pattern. Add component ID for WebSphere. Fix AI Pipeline uri caching NullPointer and IllegalArgument Exceptions. Fix NPE in metrics query when the metric is not exist. Remove E2E tests for Istio \u0026lt; 1.15, ElasticSearch \u0026lt; 7.16.3, they might still work but are not supported as planed. Scroll all results in ElasticSearch storage and refactor scrolling logics, including Service, Instance, Endpoint, Process, etc. Improve Kubernetes coordinator to remove Terminating OAP Pods in cluster. Support SW_CORE_SYNC_PERIOD_HTTP_URI_RECOGNITION_PATTERN and SW_CORE_TRAINING_PERIOD_HTTP_URI_RECOGNITION_PATTERN to control the period of training and sync HTTP URI recognition patterns. And shorten the default period to 10s for sync and 60s for training. Fix ElasticSearch scroller bug. Add component ID for Aerospike(ID=149). Packages with name recevier are renamed to receiver. BanyanDBMetricsDAO handles storeIDTag in multiGet for BanyanDBModelExtension. Fix endpoint grouping-related logic and enhance the performance of PatternTree retrieval. Fix metric session cache saving after batch insert when using mysql-connector-java. Support dynamic UI menu query. Add comment for docker/.env to explain the usage. Fix wrong environment variable name SW_OTEL_RECEIVER_ENABLED_OTEL_RULES to right SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES. Fix instance query in JDBC implementation. Set the SW_QUERY_MAX_QUERY_COMPLEXITY default value to 3000(was 1000). Accept length=4000 parameter value of the event. It was 2000. Tolerate parameter value in illegal JSON format. Update BanyanDB Java Client to 0.4.0 Support aggregate Labeled Value Metrics in MQE. [Breaking change] Change the default label name in MQE from label to _. Bump up grpc version to 1.53.0. [Breaking change] Removed \u0026lsquo;\u0026amp;\u0026rsquo; symbols from shell scripts to avoid OAP server process running as a background process. Revert part of #10616 to fix the unexpected changes: if there is no data we should return an array with 0s, but in #10616, an empty array is returned. Cache all service entity in memory for query. Bump up jackson version to 2.15.2. Increase the default memory size to avoid OOM. Bump up graphql-java to 21.0. Add Echo component ID(5015) language: Golang. Fix index out of bounds exception in aggregate_labels MQE function. Support MongoDB Server/Cluster monitoring powered by OTEL. Do not print configurations values in logs to avoid sensitive info leaked. Move created the latest index before retrieval indexes by aliases to avoid the 404 exception. This just prevents some interference from manual operations. Add more Go VM metrics, as new skywalking-go agent provided since its 0.2 release. Add component ID for Lock (ID=5016). [Breaking change] Adjust the structure of hooks in the alarm-settings.yml. Support multiple configs for each hook types and specifying the hooks in the alarm rule. Bump up Armeria to 1.24.3. Fix BooleanMatch and BooleanNotEqualMatch doing Boolean comparison. Support LogQL HTTP query APIs. Add Mux Server component ID(5017) language: Golang. Remove ElasticSearch 6.3.2 from our client lib tests. Bump up ElasticSearch server 8.8.1 to 8.9.0 for latest e2e testing. 8.1.0, 7.16.3 and 7.17.10 are still tested. Add OpenSearch 2.8.0 to our client lib tests. Use listening mode for apollo implementation of dynamic configuration. Add view_as_seq function in MQE for listing metrics in the given prioritized sequence. Fix the wrong default value of k8sServiceNameRule if it\u0026rsquo;s not explicitly set. Improve PromQL to allow for multiple metric operations within a single query. Fix MQE Binary Operation between labeled metrics and other type of value result. Add component ID for Nacos (ID=150). Support Compare Operation in MQE. Fix the Kubernetes resource cache not refreshed. Fix wrong classpath that might cause OOM in startup. Enhance the serviceRelation in MAL by adding settings for the delimiter and component fields. [Breaking change] Support MQE in the Alerting. The Alarm Rules configuration(alarm-settings.yml), add expression field and remove metrics-name/count/threshold/op/only-as-condition fields and remove composite-rules configuration. Check results in ALS as per downstream/upstream instead of per log. Fix GraphQL query listInstances not using endTime query Do not start server and Kafka consumer in init mode. Add Iris component ID(5018). Add OTLP Tracing support as a Zipkin trace input.  UI  Fix metric name browser_app_error_rate in Browser-Root dashboard. Fix display name of endpoint_cpm for endpoint list in General-Service dashboard. Implement customize menus and marketplace page. Fix minTraceDuration and maxTraceDuration types. Fix init minTime to Infinity. Bump dependencies to fix vulnerabilities. Add scss variables. Fix the title of instance list and notices in the continue profiling. Add a link to explain the expression metric, add units in the continue profiling widget. Calculate string width to set Tabs name width. [Breaking change] Removed \u0026lsquo;\u0026amp;\u0026rsquo; symbols from shell scripts to avoid web application server process running as a background process. Reset chart label. Fix service associates instances. Remove node-sass. Fix commit error on Windows. Apply MQE on MYSQL, POSTGRESQL, REDIS, ELASTICSEARCH and DYNAMODB layer UI-templates. Apply MQE on Virtual-Cache layer UI-templates Apply MQE on APISIX, AWS_EKS, AWS_GATEWAY and AWS_S3 layer UI templates. Apply MQE on RabbitMQ Dashboards. Apply MQE on Virtual-MQ layer UI-templates Apply MQE on Infra-Linux layer UI-templates Apply MQE on Infra-Windows layer UI-templates Apply MQE on Browser layer UI-templates. Implement MQE on topology widget. Fix getEndpoints keyword blank. Implement a breadcrumb component as navigation.  Documentation  Add Go agent into the server agent documentation. Add data unit description in the configuration of continuous profiling policy. Remove storage extension doc, as it is expired. Remove how to add menu doc, as SkyWalking supports marketplace and new backend-based setup. Separate contribution docs to a new menu structure. Add a doc to explain how to manage i18n. Add a doc to explain OTLP Trace support. Fix typo in dynamic-config-configmap.md. Fix out-dated docs about Kafka fetcher. Remove 3rd part fetchers from the docs, as they are not maintained anymore.  All issues and pull requests are here\n","title":"9.6.0","url":"/docs/main/latest/en/changes/changes-9.6.0/"},{"content":"9.6.0 Project  Bump up Guava to 32.0.1 to avoid the lib listed as vulnerable due to CVE-2020-8908. This API is never used. Maven artifact skywalking-log-recevier-plugin is renamed to skywalking-log-receiver-plugin. Bump up cli version 0.11 to 0.12. Bump up the version of ASF parent pom to v30. Make builds reproducible for automatic releases CI.  OAP Server  Add Neo4j component ID(112) language: Python. Add Istio ServiceEntry registry to resolve unknown IPs in ALS. Wrap deleteProperty API to the BanyanDBStorageClient. [Breaking change] Remove matchedCounter from HttpUriRecognitionService#feedRawData. Remove patterns from HttpUriRecognitionService#feedRawData and add max 10 candidates of raw URIs for each pattern. Add component ID for WebSphere. Fix AI Pipeline uri caching NullPointer and IllegalArgument Exceptions. Fix NPE in metrics query when the metric is not exist. Remove E2E tests for Istio \u0026lt; 1.15, ElasticSearch \u0026lt; 7.16.3, they might still work but are not supported as planed. Scroll all results in ElasticSearch storage and refactor scrolling logics, including Service, Instance, Endpoint, Process, etc. Improve Kubernetes coordinator to remove Terminating OAP Pods in cluster. Support SW_CORE_SYNC_PERIOD_HTTP_URI_RECOGNITION_PATTERN and SW_CORE_TRAINING_PERIOD_HTTP_URI_RECOGNITION_PATTERN to control the period of training and sync HTTP URI recognition patterns. And shorten the default period to 10s for sync and 60s for training. Fix ElasticSearch scroller bug. Add component ID for Aerospike(ID=149). Packages with name recevier are renamed to receiver. BanyanDBMetricsDAO handles storeIDTag in multiGet for BanyanDBModelExtension. Fix endpoint grouping-related logic and enhance the performance of PatternTree retrieval. Fix metric session cache saving after batch insert when using mysql-connector-java. Support dynamic UI menu query. Add comment for docker/.env to explain the usage. Fix wrong environment variable name SW_OTEL_RECEIVER_ENABLED_OTEL_RULES to right SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES. Fix instance query in JDBC implementation. Set the SW_QUERY_MAX_QUERY_COMPLEXITY default value to 3000(was 1000). Accept length=4000 parameter value of the event. It was 2000. Tolerate parameter value in illegal JSON format. Update BanyanDB Java Client to 0.4.0 Support aggregate Labeled Value Metrics in MQE. [Breaking change] Change the default label name in MQE from label to _. Bump up grpc version to 1.53.0. [Breaking change] Removed \u0026lsquo;\u0026amp;\u0026rsquo; symbols from shell scripts to avoid OAP server process running as a background process. Revert part of #10616 to fix the unexpected changes: if there is no data we should return an array with 0s, but in #10616, an empty array is returned. Cache all service entity in memory for query. Bump up jackson version to 2.15.2. Increase the default memory size to avoid OOM. Bump up graphql-java to 21.0. Add Echo component ID(5015) language: Golang. Fix index out of bounds exception in aggregate_labels MQE function. Support MongoDB Server/Cluster monitoring powered by OTEL. Do not print configurations values in logs to avoid sensitive info leaked. Move created the latest index before retrieval indexes by aliases to avoid the 404 exception. This just prevents some interference from manual operations. Add more Go VM metrics, as new skywalking-go agent provided since its 0.2 release. Add component ID for Lock (ID=5016). [Breaking change] Adjust the structure of hooks in the alarm-settings.yml. Support multiple configs for each hook types and specifying the hooks in the alarm rule. Bump up Armeria to 1.24.3. Fix BooleanMatch and BooleanNotEqualMatch doing Boolean comparison. Support LogQL HTTP query APIs. Add Mux Server component ID(5017) language: Golang. Remove ElasticSearch 6.3.2 from our client lib tests. Bump up ElasticSearch server 8.8.1 to 8.9.0 for latest e2e testing. 8.1.0, 7.16.3 and 7.17.10 are still tested. Add OpenSearch 2.8.0 to our client lib tests. Use listening mode for apollo implementation of dynamic configuration. Add view_as_seq function in MQE for listing metrics in the given prioritized sequence. Fix the wrong default value of k8sServiceNameRule if it\u0026rsquo;s not explicitly set. Improve PromQL to allow for multiple metric operations within a single query. Fix MQE Binary Operation between labeled metrics and other type of value result. Add component ID for Nacos (ID=150). Support Compare Operation in MQE. Fix the Kubernetes resource cache not refreshed. Fix wrong classpath that might cause OOM in startup. Enhance the serviceRelation in MAL by adding settings for the delimiter and component fields. [Breaking change] Support MQE in the Alerting. The Alarm Rules configuration(alarm-settings.yml), add expression field and remove metrics-name/count/threshold/op/only-as-condition fields and remove composite-rules configuration. Check results in ALS as per downstream/upstream instead of per log. Fix GraphQL query listInstances not using endTime query Do not start server and Kafka consumer in init mode. Add Iris component ID(5018). Add OTLP Tracing support as a Zipkin trace input.  UI  Fix metric name browser_app_error_rate in Browser-Root dashboard. Fix display name of endpoint_cpm for endpoint list in General-Service dashboard. Implement customize menus and marketplace page. Fix minTraceDuration and maxTraceDuration types. Fix init minTime to Infinity. Bump dependencies to fix vulnerabilities. Add scss variables. Fix the title of instance list and notices in the continue profiling. Add a link to explain the expression metric, add units in the continue profiling widget. Calculate string width to set Tabs name width. [Breaking change] Removed \u0026lsquo;\u0026amp;\u0026rsquo; symbols from shell scripts to avoid web application server process running as a background process. Reset chart label. Fix service associates instances. Remove node-sass. Fix commit error on Windows. Apply MQE on MYSQL, POSTGRESQL, REDIS, ELASTICSEARCH and DYNAMODB layer UI-templates. Apply MQE on Virtual-Cache layer UI-templates Apply MQE on APISIX, AWS_EKS, AWS_GATEWAY and AWS_S3 layer UI templates. Apply MQE on RabbitMQ Dashboards. Apply MQE on Virtual-MQ layer UI-templates Apply MQE on Infra-Linux layer UI-templates Apply MQE on Infra-Windows layer UI-templates Apply MQE on Browser layer UI-templates. Implement MQE on topology widget. Fix getEndpoints keyword blank. Implement a breadcrumb component as navigation.  Documentation  Add Go agent into the server agent documentation. Add data unit description in the configuration of continuous profiling policy. Remove storage extension doc, as it is expired. Remove how to add menu doc, as SkyWalking supports marketplace and new backend-based setup. Separate contribution docs to a new menu structure. Add a doc to explain how to manage i18n. Add a doc to explain OTLP Trace support. Fix typo in dynamic-config-configmap.md. Fix out-dated docs about Kafka fetcher. Remove 3rd part fetchers from the docs, as they are not maintained anymore.  All issues and pull requests are here\n","title":"9.6.0","url":"/docs/main/next/en/changes/changes-9.6.0/"},{"content":"9.6.0 Project  Bump up Guava to 32.0.1 to avoid the lib listed as vulnerable due to CVE-2020-8908. This API is never used. Maven artifact skywalking-log-recevier-plugin is renamed to skywalking-log-receiver-plugin. Bump up cli version 0.11 to 0.12. Bump up the version of ASF parent pom to v30. Make builds reproducible for automatic releases CI.  OAP Server  Add Neo4j component ID(112) language: Python. Add Istio ServiceEntry registry to resolve unknown IPs in ALS. Wrap deleteProperty API to the BanyanDBStorageClient. [Breaking change] Remove matchedCounter from HttpUriRecognitionService#feedRawData. Remove patterns from HttpUriRecognitionService#feedRawData and add max 10 candidates of raw URIs for each pattern. Add component ID for WebSphere. Fix AI Pipeline uri caching NullPointer and IllegalArgument Exceptions. Fix NPE in metrics query when the metric is not exist. Remove E2E tests for Istio \u0026lt; 1.15, ElasticSearch \u0026lt; 7.16.3, they might still work but are not supported as planed. Scroll all results in ElasticSearch storage and refactor scrolling logics, including Service, Instance, Endpoint, Process, etc. Improve Kubernetes coordinator to remove Terminating OAP Pods in cluster. Support SW_CORE_SYNC_PERIOD_HTTP_URI_RECOGNITION_PATTERN and SW_CORE_TRAINING_PERIOD_HTTP_URI_RECOGNITION_PATTERN to control the period of training and sync HTTP URI recognition patterns. And shorten the default period to 10s for sync and 60s for training. Fix ElasticSearch scroller bug. Add component ID for Aerospike(ID=149). Packages with name recevier are renamed to receiver. BanyanDBMetricsDAO handles storeIDTag in multiGet for BanyanDBModelExtension. Fix endpoint grouping-related logic and enhance the performance of PatternTree retrieval. Fix metric session cache saving after batch insert when using mysql-connector-java. Support dynamic UI menu query. Add comment for docker/.env to explain the usage. Fix wrong environment variable name SW_OTEL_RECEIVER_ENABLED_OTEL_RULES to right SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES. Fix instance query in JDBC implementation. Set the SW_QUERY_MAX_QUERY_COMPLEXITY default value to 3000(was 1000). Accept length=4000 parameter value of the event. It was 2000. Tolerate parameter value in illegal JSON format. Update BanyanDB Java Client to 0.4.0 Support aggregate Labeled Value Metrics in MQE. [Breaking change] Change the default label name in MQE from label to _. Bump up grpc version to 1.53.0. [Breaking change] Removed \u0026lsquo;\u0026amp;\u0026rsquo; symbols from shell scripts to avoid OAP server process running as a background process. Revert part of #10616 to fix the unexpected changes: if there is no data we should return an array with 0s, but in #10616, an empty array is returned. Cache all service entity in memory for query. Bump up jackson version to 2.15.2. Increase the default memory size to avoid OOM. Bump up graphql-java to 21.0. Add Echo component ID(5015) language: Golang. Fix index out of bounds exception in aggregate_labels MQE function. Support MongoDB Server/Cluster monitoring powered by OTEL. Do not print configurations values in logs to avoid sensitive info leaked. Move created the latest index before retrieval indexes by aliases to avoid the 404 exception. This just prevents some interference from manual operations. Add more Go VM metrics, as new skywalking-go agent provided since its 0.2 release. Add component ID for Lock (ID=5016). [Breaking change] Adjust the structure of hooks in the alarm-settings.yml. Support multiple configs for each hook types and specifying the hooks in the alarm rule. Bump up Armeria to 1.24.3. Fix BooleanMatch and BooleanNotEqualMatch doing Boolean comparison. Support LogQL HTTP query APIs. Add Mux Server component ID(5017) language: Golang. Remove ElasticSearch 6.3.2 from our client lib tests. Bump up ElasticSearch server 8.8.1 to 8.9.0 for latest e2e testing. 8.1.0, 7.16.3 and 7.17.10 are still tested. Add OpenSearch 2.8.0 to our client lib tests. Use listening mode for apollo implementation of dynamic configuration. Add view_as_seq function in MQE for listing metrics in the given prioritized sequence. Fix the wrong default value of k8sServiceNameRule if it\u0026rsquo;s not explicitly set. Improve PromQL to allow for multiple metric operations within a single query. Fix MQE Binary Operation between labeled metrics and other type of value result. Add component ID for Nacos (ID=150). Support Compare Operation in MQE. Fix the Kubernetes resource cache not refreshed. Fix wrong classpath that might cause OOM in startup. Enhance the serviceRelation in MAL by adding settings for the delimiter and component fields. [Breaking change] Support MQE in the Alerting. The Alarm Rules configuration(alarm-settings.yml), add expression field and remove metrics-name/count/threshold/op/only-as-condition fields and remove composite-rules configuration. Check results in ALS as per downstream/upstream instead of per log. Fix GraphQL query listInstances not using endTime query Do not start server and Kafka consumer in init mode. Add Iris component ID(5018). Add OTLP Tracing support as a Zipkin trace input.  UI  Fix metric name browser_app_error_rate in Browser-Root dashboard. Fix display name of endpoint_cpm for endpoint list in General-Service dashboard. Implement customize menus and marketplace page. Fix minTraceDuration and maxTraceDuration types. Fix init minTime to Infinity. Bump dependencies to fix vulnerabilities. Add scss variables. Fix the title of instance list and notices in the continue profiling. Add a link to explain the expression metric, add units in the continue profiling widget. Calculate string width to set Tabs name width. [Breaking change] Removed \u0026lsquo;\u0026amp;\u0026rsquo; symbols from shell scripts to avoid web application server process running as a background process. Reset chart label. Fix service associates instances. Remove node-sass. Fix commit error on Windows. Apply MQE on MYSQL, POSTGRESQL, REDIS, ELASTICSEARCH and DYNAMODB layer UI-templates. Apply MQE on Virtual-Cache layer UI-templates Apply MQE on APISIX, AWS_EKS, AWS_GATEWAY and AWS_S3 layer UI templates. Apply MQE on RabbitMQ Dashboards. Apply MQE on Virtual-MQ layer UI-templates Apply MQE on Infra-Linux layer UI-templates Apply MQE on Infra-Windows layer UI-templates Apply MQE on Browser layer UI-templates. Implement MQE on topology widget. Fix getEndpoints keyword blank. Implement a breadcrumb component as navigation.  Documentation  Add Go agent into the server agent documentation. Add data unit description in the configuration of continuous profiling policy. Remove storage extension doc, as it is expired. Remove how to add menu doc, as SkyWalking supports marketplace and new backend-based setup. Separate contribution docs to a new menu structure. Add a doc to explain how to manage i18n. Add a doc to explain OTLP Trace support. Fix typo in dynamic-config-configmap.md. Fix out-dated docs about Kafka fetcher. Remove 3rd part fetchers from the docs, as they are not maintained anymore.  All issues and pull requests are here\n","title":"9.6.0","url":"/docs/main/v9.6.0/en/changes/changes/"},{"content":"9.6.0 Project  Bump up Guava to 32.0.1 to avoid the lib listed as vulnerable due to CVE-2020-8908. This API is never used. Maven artifact skywalking-log-recevier-plugin is renamed to skywalking-log-receiver-plugin. Bump up cli version 0.11 to 0.12. Bump up the version of ASF parent pom to v30. Make builds reproducible for automatic releases CI.  OAP Server  Add Neo4j component ID(112) language: Python. Add Istio ServiceEntry registry to resolve unknown IPs in ALS. Wrap deleteProperty API to the BanyanDBStorageClient. [Breaking change] Remove matchedCounter from HttpUriRecognitionService#feedRawData. Remove patterns from HttpUriRecognitionService#feedRawData and add max 10 candidates of raw URIs for each pattern. Add component ID for WebSphere. Fix AI Pipeline uri caching NullPointer and IllegalArgument Exceptions. Fix NPE in metrics query when the metric is not exist. Remove E2E tests for Istio \u0026lt; 1.15, ElasticSearch \u0026lt; 7.16.3, they might still work but are not supported as planed. Scroll all results in ElasticSearch storage and refactor scrolling logics, including Service, Instance, Endpoint, Process, etc. Improve Kubernetes coordinator to remove Terminating OAP Pods in cluster. Support SW_CORE_SYNC_PERIOD_HTTP_URI_RECOGNITION_PATTERN and SW_CORE_TRAINING_PERIOD_HTTP_URI_RECOGNITION_PATTERN to control the period of training and sync HTTP URI recognition patterns. And shorten the default period to 10s for sync and 60s for training. Fix ElasticSearch scroller bug. Add component ID for Aerospike(ID=149). Packages with name recevier are renamed to receiver. BanyanDBMetricsDAO handles storeIDTag in multiGet for BanyanDBModelExtension. Fix endpoint grouping-related logic and enhance the performance of PatternTree retrieval. Fix metric session cache saving after batch insert when using mysql-connector-java. Support dynamic UI menu query. Add comment for docker/.env to explain the usage. Fix wrong environment variable name SW_OTEL_RECEIVER_ENABLED_OTEL_RULES to right SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES. Fix instance query in JDBC implementation. Set the SW_QUERY_MAX_QUERY_COMPLEXITY default value to 3000(was 1000). Accept length=4000 parameter value of the event. It was 2000. Tolerate parameter value in illegal JSON format. Update BanyanDB Java Client to 0.4.0 Support aggregate Labeled Value Metrics in MQE. [Breaking change] Change the default label name in MQE from label to _. Bump up grpc version to 1.53.0. [Breaking change] Removed \u0026lsquo;\u0026amp;\u0026rsquo; symbols from shell scripts to avoid OAP server process running as a background process. Revert part of #10616 to fix the unexpected changes: if there is no data we should return an array with 0s, but in #10616, an empty array is returned. Cache all service entity in memory for query. Bump up jackson version to 2.15.2. Increase the default memory size to avoid OOM. Bump up graphql-java to 21.0. Add Echo component ID(5015) language: Golang. Fix index out of bounds exception in aggregate_labels MQE function. Support MongoDB Server/Cluster monitoring powered by OTEL. Do not print configurations values in logs to avoid sensitive info leaked. Move created the latest index before retrieval indexes by aliases to avoid the 404 exception. This just prevents some interference from manual operations. Add more Go VM metrics, as new skywalking-go agent provided since its 0.2 release. Add component ID for Lock (ID=5016). [Breaking change] Adjust the structure of hooks in the alarm-settings.yml. Support multiple configs for each hook types and specifying the hooks in the alarm rule. Bump up Armeria to 1.24.3. Fix BooleanMatch and BooleanNotEqualMatch doing Boolean comparison. Support LogQL HTTP query APIs. Add Mux Server component ID(5017) language: Golang. Remove ElasticSearch 6.3.2 from our client lib tests. Bump up ElasticSearch server 8.8.1 to 8.9.0 for latest e2e testing. 8.1.0, 7.16.3 and 7.17.10 are still tested. Add OpenSearch 2.8.0 to our client lib tests. Use listening mode for apollo implementation of dynamic configuration. Add view_as_seq function in MQE for listing metrics in the given prioritized sequence. Fix the wrong default value of k8sServiceNameRule if it\u0026rsquo;s not explicitly set. Improve PromQL to allow for multiple metric operations within a single query. Fix MQE Binary Operation between labeled metrics and other type of value result. Add component ID for Nacos (ID=150). Support Compare Operation in MQE. Fix the Kubernetes resource cache not refreshed. Fix wrong classpath that might cause OOM in startup. Enhance the serviceRelation in MAL by adding settings for the delimiter and component fields. [Breaking change] Support MQE in the Alerting. The Alarm Rules configuration(alarm-settings.yml), add expression field and remove metrics-name/count/threshold/op/only-as-condition fields and remove composite-rules configuration. Check results in ALS as per downstream/upstream instead of per log. Fix GraphQL query listInstances not using endTime query Do not start server and Kafka consumer in init mode. Add Iris component ID(5018). Add OTLP Tracing support as a Zipkin trace input.  UI  Fix metric name browser_app_error_rate in Browser-Root dashboard. Fix display name of endpoint_cpm for endpoint list in General-Service dashboard. Implement customize menus and marketplace page. Fix minTraceDuration and maxTraceDuration types. Fix init minTime to Infinity. Bump dependencies to fix vulnerabilities. Add scss variables. Fix the title of instance list and notices in the continue profiling. Add a link to explain the expression metric, add units in the continue profiling widget. Calculate string width to set Tabs name width. [Breaking change] Removed \u0026lsquo;\u0026amp;\u0026rsquo; symbols from shell scripts to avoid web application server process running as a background process. Reset chart label. Fix service associates instances. Remove node-sass. Fix commit error on Windows. Apply MQE on MYSQL, POSTGRESQL, REDIS, ELASTICSEARCH and DYNAMODB layer UI-templates. Apply MQE on Virtual-Cache layer UI-templates Apply MQE on APISIX, AWS_EKS, AWS_GATEWAY and AWS_S3 layer UI templates. Apply MQE on RabbitMQ Dashboards. Apply MQE on Virtual-MQ layer UI-templates Apply MQE on Infra-Linux layer UI-templates Apply MQE on Infra-Windows layer UI-templates Apply MQE on Browser layer UI-templates. Implement MQE on topology widget. Fix getEndpoints keyword blank. Implement a breadcrumb component as navigation.  Documentation  Add Go agent into the server agent documentation. Add data unit description in the configuration of continuous profiling policy. Remove storage extension doc, as it is expired. Remove how to add menu doc, as SkyWalking supports marketplace and new backend-based setup. Separate contribution docs to a new menu structure. Add a doc to explain how to manage i18n. Add a doc to explain OTLP Trace support. Fix typo in dynamic-config-configmap.md. Fix out-dated docs about Kafka fetcher. Remove 3rd part fetchers from the docs, as they are not maintained anymore.  All issues and pull requests are here\n","title":"9.6.0","url":"/docs/main/v9.7.0/en/changes/changes-9.6.0/"},{"content":"9.7.0 Project  Bump Java agent to 9.1-dev in the e2e tests. Bump up netty to 4.1.100. Update Groovy 3 to 4.0.15. Support packaging the project in JDK21. Compiler source and target remain in JDK11.  OAP Server  ElasticSearchClient: Add deleteById API. Fix Custom alarm rules are overwritten by \u0026lsquo;resource/alarm-settings.yml\u0026rsquo; Support Kafka Monitoring. Support Pulsar server and BookKeeper server Monitoring. [Breaking Change] Elasticsearch storage merge all management data indices into one index management, including ui_template,ui_menu,continuous_profiling_policy. Add a release mechanism for alarm windows when it is expired in case of OOM. Fix Zipkin trace receiver response: make the HTTP status code from 200 to 202. Update BanyanDB Java Client to 0.5.0. Fix getInstances query in the BanyanDB Metadata DAO. BanyanDBStorageClient: Add keepAliveProperty API. Fix table exists check in the JDBC Storage Plugin. Enhance extensibility of HTTP Server library. Adjust AlarmRecord alarmMessage column length to 512. Fix EventHookCallback build event: build the layer from Service's Layer. Fix AlarmCore doAlarm: catch exception for each callback to avoid interruption. Optimize queryBasicTraces in TraceQueryEsDAO. Fix WebhookCallback send incorrect messages, add catch exception for each callback HTTP Post. Fix AlarmRule expression validation: add labeled metrics mock data for check. Support collect ZGC memory pool metrics. Add a component ID for Netty-http (ID=151). Add a component ID for Fiber (ID=5021). BanyanDBStorageClient: Add define(Property property, PropertyStore.Strategy strategy) API. Correct the file format and fix typos in the filenames for monitoring Kafka\u0026rsquo;s e2e tests. Support extract timestamp from patterned datetime string in LAL. Support output key parameters in the booting logs. Fix cannot query zipkin traces with annotationQuery parameter in the JDBC related storage. Fix limit doesn\u0026rsquo;t work for findEndpoint API in ES storage. Isolate MAL CounterWindow cache by metric name. Fix JDBC Log query order. Change the DataCarrier IF_POSSIBLE strategy to use ArrayBlockingQueue implementation. Change the policy of the queue(DataCarrier) in the L1 metric aggregate worker to IF_POSSIBLE mode. Add self-observability metric metrics_aggregator_abandon to count the number of abandon metrics. Support Nginx monitoring. Fix BanyanDB Metadata Query: make query single instance/process return full tags to avoid NPE. Repleace go2sky E2E to GO agent. Replace Metrics v2 protocol with MQE in UI templates and E2E Test. Fix incorrect apisix metrics otel rules. Support Scratch The OAP Config Dump. Support increase/rate function in the MQE query language. Group service endpoints into _abandoned when endpoints have high cardinality.  UI  Add new menu for kafka monitoring. Fix independent widget duration. Fix the display height of the link tree structure. Replace the name by shortName on service widget. Refactor: update pagination style. No visualization style change. Apply MQE on K8s layer UI-templates. Fix icons display in trace tree diagram. Fix: update tooltip style to support multiple metrics scrolling view in a metrics graph. Add a new widget to show jvm memory pool detail. Fix: avoid querying data with empty parameters. Add a title and a description for trace segments. Add Netty icon for Netty HTTP plugin. Add Pulsar menu i18n files. Refactor Logs view. Implement the Dark Theme. Change UI templates for Text widgets. Add Nginx menu i18n. Fix the height for trace widget. Polish list style. Fix Log associate with Trace. Enhance layout for broken Topology widget. Fix calls metric with call type for Topology widget. Fix changing metrics config for Topology widget. Fix routes for Tab widget. Remove OpenFunction(FAAS layer) relative UI templates and menu item. Fix: change colors to match dark theme for Network Profiling. Remove the description of OpenFunction in the UI i18n. Reduce component chunks to improve page loading resource time.  Documentation  Separate storage docs to different files, and add an estimated timeline for BanyanDB(end of 2023). Add topology configuration in UI-Grafana doc. Add missing metrics to the OpenTelemetry Metrics doc. Polish docs of Concepts and Designs. Fix incorrect notes of slowCacheReadThreshold. Update OAP setup and cluster coordinator docs to explain new booting parameters table in the logs, and how to setup cluster mode.  All issues and pull requests are here\n","title":"9.7.0","url":"/docs/main/latest/en/changes/changes/"},{"content":"9.7.0 Project  Bump Java agent to 9.1-dev in the e2e tests. Bump up netty to 4.1.100. Update Groovy 3 to 4.0.15. Support packaging the project in JDK21. Compiler source and target remain in JDK11.  OAP Server  ElasticSearchClient: Add deleteById API. Fix Custom alarm rules are overwritten by \u0026lsquo;resource/alarm-settings.yml\u0026rsquo; Support Kafka Monitoring. Support Pulsar server and BookKeeper server Monitoring. [Breaking Change] Elasticsearch storage merge all management data indices into one index management, including ui_template,ui_menu,continuous_profiling_policy. Add a release mechanism for alarm windows when it is expired in case of OOM. Fix Zipkin trace receiver response: make the HTTP status code from 200 to 202. Update BanyanDB Java Client to 0.5.0. Fix getInstances query in the BanyanDB Metadata DAO. BanyanDBStorageClient: Add keepAliveProperty API. Fix table exists check in the JDBC Storage Plugin. Enhance extensibility of HTTP Server library. Adjust AlarmRecord alarmMessage column length to 512. Fix EventHookCallback build event: build the layer from Service's Layer. Fix AlarmCore doAlarm: catch exception for each callback to avoid interruption. Optimize queryBasicTraces in TraceQueryEsDAO. Fix WebhookCallback send incorrect messages, add catch exception for each callback HTTP Post. Fix AlarmRule expression validation: add labeled metrics mock data for check. Support collect ZGC memory pool metrics. Add a component ID for Netty-http (ID=151). Add a component ID for Fiber (ID=5021). BanyanDBStorageClient: Add define(Property property, PropertyStore.Strategy strategy) API. Correct the file format and fix typos in the filenames for monitoring Kafka\u0026rsquo;s e2e tests. Support extract timestamp from patterned datetime string in LAL. Support output key parameters in the booting logs. Fix cannot query zipkin traces with annotationQuery parameter in the JDBC related storage. Fix limit doesn\u0026rsquo;t work for findEndpoint API in ES storage. Isolate MAL CounterWindow cache by metric name. Fix JDBC Log query order. Change the DataCarrier IF_POSSIBLE strategy to use ArrayBlockingQueue implementation. Change the policy of the queue(DataCarrier) in the L1 metric aggregate worker to IF_POSSIBLE mode. Add self-observability metric metrics_aggregator_abandon to count the number of abandon metrics. Support Nginx monitoring. Fix BanyanDB Metadata Query: make query single instance/process return full tags to avoid NPE. Repleace go2sky E2E to GO agent. Replace Metrics v2 protocol with MQE in UI templates and E2E Test. Fix incorrect apisix metrics otel rules. Support Scratch The OAP Config Dump. Support increase/rate function in the MQE query language. Group service endpoints into _abandoned when endpoints have high cardinality.  UI  Add new menu for kafka monitoring. Fix independent widget duration. Fix the display height of the link tree structure. Replace the name by shortName on service widget. Refactor: update pagination style. No visualization style change. Apply MQE on K8s layer UI-templates. Fix icons display in trace tree diagram. Fix: update tooltip style to support multiple metrics scrolling view in a metrics graph. Add a new widget to show jvm memory pool detail. Fix: avoid querying data with empty parameters. Add a title and a description for trace segments. Add Netty icon for Netty HTTP plugin. Add Pulsar menu i18n files. Refactor Logs view. Implement the Dark Theme. Change UI templates for Text widgets. Add Nginx menu i18n. Fix the height for trace widget. Polish list style. Fix Log associate with Trace. Enhance layout for broken Topology widget. Fix calls metric with call type for Topology widget. Fix changing metrics config for Topology widget. Fix routes for Tab widget. Remove OpenFunction(FAAS layer) relative UI templates and menu item. Fix: change colors to match dark theme for Network Profiling. Remove the description of OpenFunction in the UI i18n. Reduce component chunks to improve page loading resource time.  Documentation  Separate storage docs to different files, and add an estimated timeline for BanyanDB(end of 2023). Add topology configuration in UI-Grafana doc. Add missing metrics to the OpenTelemetry Metrics doc. Polish docs of Concepts and Designs. Fix incorrect notes of slowCacheReadThreshold. Update OAP setup and cluster coordinator docs to explain new booting parameters table in the logs, and how to setup cluster mode.  All issues and pull requests are here\n","title":"9.7.0","url":"/docs/main/next/en/changes/changes-9.7.0/"},{"content":"9.7.0 Project  Bump Java agent to 9.1-dev in the e2e tests. Bump up netty to 4.1.100. Update Groovy 3 to 4.0.15. Support packaging the project in JDK21. Compiler source and target remain in JDK11.  OAP Server  ElasticSearchClient: Add deleteById API. Fix Custom alarm rules are overwritten by \u0026lsquo;resource/alarm-settings.yml\u0026rsquo; Support Kafka Monitoring. Support Pulsar server and BookKeeper server Monitoring. [Breaking Change] Elasticsearch storage merge all management data indices into one index management, including ui_template,ui_menu,continuous_profiling_policy. Add a release mechanism for alarm windows when it is expired in case of OOM. Fix Zipkin trace receiver response: make the HTTP status code from 200 to 202. Update BanyanDB Java Client to 0.5.0. Fix getInstances query in the BanyanDB Metadata DAO. BanyanDBStorageClient: Add keepAliveProperty API. Fix table exists check in the JDBC Storage Plugin. Enhance extensibility of HTTP Server library. Adjust AlarmRecord alarmMessage column length to 512. Fix EventHookCallback build event: build the layer from Service's Layer. Fix AlarmCore doAlarm: catch exception for each callback to avoid interruption. Optimize queryBasicTraces in TraceQueryEsDAO. Fix WebhookCallback send incorrect messages, add catch exception for each callback HTTP Post. Fix AlarmRule expression validation: add labeled metrics mock data for check. Support collect ZGC memory pool metrics. Add a component ID for Netty-http (ID=151). Add a component ID for Fiber (ID=5021). BanyanDBStorageClient: Add define(Property property, PropertyStore.Strategy strategy) API. Correct the file format and fix typos in the filenames for monitoring Kafka\u0026rsquo;s e2e tests. Support extract timestamp from patterned datetime string in LAL. Support output key parameters in the booting logs. Fix cannot query zipkin traces with annotationQuery parameter in the JDBC related storage. Fix limit doesn\u0026rsquo;t work for findEndpoint API in ES storage. Isolate MAL CounterWindow cache by metric name. Fix JDBC Log query order. Change the DataCarrier IF_POSSIBLE strategy to use ArrayBlockingQueue implementation. Change the policy of the queue(DataCarrier) in the L1 metric aggregate worker to IF_POSSIBLE mode. Add self-observability metric metrics_aggregator_abandon to count the number of abandon metrics. Support Nginx monitoring. Fix BanyanDB Metadata Query: make query single instance/process return full tags to avoid NPE. Repleace go2sky E2E to GO agent. Replace Metrics v2 protocol with MQE in UI templates and E2E Test. Fix incorrect apisix metrics otel rules. Support Scratch The OAP Config Dump. Support increase/rate function in the MQE query language. Group service endpoints into _abandoned when endpoints have high cardinality.  UI  Add new menu for kafka monitoring. Fix independent widget duration. Fix the display height of the link tree structure. Replace the name by shortName on service widget. Refactor: update pagination style. No visualization style change. Apply MQE on K8s layer UI-templates. Fix icons display in trace tree diagram. Fix: update tooltip style to support multiple metrics scrolling view in a metrics graph. Add a new widget to show jvm memory pool detail. Fix: avoid querying data with empty parameters. Add a title and a description for trace segments. Add Netty icon for Netty HTTP plugin. Add Pulsar menu i18n files. Refactor Logs view. Implement the Dark Theme. Change UI templates for Text widgets. Add Nginx menu i18n. Fix the height for trace widget. Polish list style. Fix Log associate with Trace. Enhance layout for broken Topology widget. Fix calls metric with call type for Topology widget. Fix changing metrics config for Topology widget. Fix routes for Tab widget. Remove OpenFunction(FAAS layer) relative UI templates and menu item. Fix: change colors to match dark theme for Network Profiling. Remove the description of OpenFunction in the UI i18n. Reduce component chunks to improve page loading resource time.  Documentation  Separate storage docs to different files, and add an estimated timeline for BanyanDB(end of 2023). Add topology configuration in UI-Grafana doc. Add missing metrics to the OpenTelemetry Metrics doc. Polish docs of Concepts and Designs. Fix incorrect notes of slowCacheReadThreshold. Update OAP setup and cluster coordinator docs to explain new booting parameters table in the logs, and how to setup cluster mode.  All issues and pull requests are here\n","title":"9.7.0","url":"/docs/main/v9.7.0/en/changes/changes/"},{"content":"Academy Academy is an article/video list recommended by the committer team.\n  STAM Paper about the fundamental theory of SkyWalking tracing models.\n  Blog about Scaling SkyWalking server automatically in kubernetes.\n  Blog about Use Profiling to Fix the Blind Spot of Distributed Tracing.\n  Blog about observing Istio + Envoy service mesh with ALS solution.\n  Blog about observing Istio + Envoy service mesh with ALS Metadata-Exchange mechanism (in VMs and / or Kubernetes).\n  ","title":"Academy","url":"/docs/main/v9.0.0/en/academy/list/"},{"content":"Academy Academy is an article/video list recommended by the committer team.\n  STAM Paper about the fundamental theory of SkyWalking tracing models.\n  Blog about Scaling SkyWalking server automatically in kubernetes.\n  Blog about Use Profiling to Fix the Blind Spot of Distributed Tracing.\n  Blog about observing Istio + Envoy service mesh with ALS solution.\n  Blog about observing Istio + Envoy service mesh with ALS Metadata-Exchange mechanism (in VMs and / or Kubernetes).\n  ","title":"Academy","url":"/docs/main/v9.1.0/en/academy/list/"},{"content":"Academy Academy is an article/video list recommended by the committer team.\n  STAM Paper about the fundamental theory of SkyWalking tracing models.\n  Blog about Scaling SkyWalking server automatically in kubernetes.\n  Blog about Use Profiling to Fix the Blind Spot of Distributed Tracing.\n  Blog about observing Istio + Envoy service mesh with ALS solution.\n  Blog about observing Istio + Envoy service mesh with ALS Metadata-Exchange mechanism (in VMs and / or Kubernetes).\n  Blog about using eBPF Profiling to pinpoint service mesh critical performance Impact.\n  ","title":"Academy","url":"/docs/main/v9.2.0/en/academy/list/"},{"content":"ActiveMQ classic monitoring SkyWalking leverages jmx prometheus exporter for collecting metrics data from ActiveMQ classic. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  ActiveMQ classic has extensive support for JMX to allow you to monitor and control the behavior of the broker via the JMX MBeans. The jmx prometheus exporter collects metrics data from ActiveMQ classic, this exporter is intended to be run as a Java Agent, exposing a HTTP server and serving metrics of the local JVM. OpenTelemetry Collector fetches metrics from jmx prometheus exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Enable JMX in activemq.xml, the JMX remote port defaults to 1616, you can change it through ACTIVEMQ_SUNJMX_START. The example for ActiveMQ configuration, refer to here. Set up jmx prometheus exporter which runs as a Java Agent(recommended) of ActiveMQ classic. If you work with docker, you also can set up a single server for exporter, refer to here(note the configuration of includeObjectNames). Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  ActiveMQ classic Monitoring ActiveMQ classic monitoring provides multidimensional metrics monitoring of ActiveMQ Exporter as Layer: ActiveMQ Service in the OAP. In each cluster, the broker is represented as Instance and the destination is represented as Endpoint.\nActiveMQ Cluster Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     System Load Average Count meter_activemq_cluster_system_load_average The average system load, range:[0, 10000]. JMX Prometheus Exporter   Thread Count Count meter_activemq_cluster_thread_count Threads currently used by the JVM. JMX Prometheus Exporter   Init Heap Memory Usage Bytes meter_activemq_cluster_heap_memory_usage_init The initial amount of heap memory available. JMX Prometheus Exporter   Committed Heap Memory Usage Bytes meter_activemq_cluster_heap_memory_usage_committed The memory is guaranteed to be available for the JVM to use. JMX Prometheus Exporter   Used Heap Memory Usage Bytes meter_activemq_cluster_heap_memory_usage_used The amount of JVM heap memory currently in use. JMX Prometheus Exporter   Max Heap Memory Usage Bytes meter_activemq_cluster_heap_memory_usage_max The maximum possible size of the heap memory. JMX Prometheus Exporter   GC G1 Old Collection Count Count meter_activemq_cluster_gc_g1_old_collection_count The gc count of G1 Old Generation(JDK[9,17]). JMX Prometheus Exporter   GC G1 Young Collection Count Count meter_activemq_cluster_gc_g1_young_collection_count The gc count of G1 Young Generation(JDK[9,17]). JMX Prometheus Exporter   GC G1 Old Collection Time ms meter_activemq_cluster_gc_g1_old_collection_time The gc time spent in G1 Old Generation in milliseconds(JDK[9,17]). JMX Prometheus Exporter   GC G1 Young Collection Time ms meter_activemq_cluster_gc_g1_young_collection_time The gc time spent in G1 Young Generation in milliseconds(JDK[9,17]). JMX Prometheus Exporter   GC Parallel Old Collection Count Count meter_activemq_cluster_gc_parallel_old_collection_count The gc count of Parallel Old Generation(JDK[6,8]). JMX Prometheus Exporter   GC Parallel Young Collection Count Count meter_activemq_cluster_gc_parallel_young_collection_count The gc count of Parallel Young Generation(JDK[6,8]). JMX Prometheus Exporter   GC Parallel Old Collection Time ms meter_activemq_cluster_gc_parallel_old_collection_time The gc time spent in Parallel Old Generation in milliseconds(JDK[6,8]). JMX Prometheus Exporter   GC Parallel Young Collection Time ms meter_activemq_cluster_gc_parallel_young_collection_time The gc time spent in Parallel Young Generation in milliseconds(JDK[6,8]). JMX Prometheus Exporter   Enqueue Rate Count/s meter_activemq_cluster_enqueue_rate Number of messages that have been sent to the cluster per second(JDK[6,8]). JMX Prometheus Exporter   Dequeue Rate Count/s meter_activemq_cluster_dequeue_rate Number of messages that have been acknowledged or discarded on the cluster per second. JMX Prometheus Exporter   Dispatch Rate Count/s meter_activemq_cluster_dispatch_rate Number of messages that has been delivered to consumers per second. JMX Prometheus Exporter   Expired Rate Count/s meter_activemq_cluster_expired_rate Number of messages that have been expired per second. JMX Prometheus Exporter   Average Enqueue Time ms meter_activemq_cluster_average_enqueue_time The average time a message was held on this cluster. JMX Prometheus Exporter   Max Enqueue Time ms meter_activemq_cluster_max_enqueue_time The max time a message was held on this cluster. JMX Prometheus Exporter    ActiveMQ Broker Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Uptime sec meter_activemq_broker_uptime Uptime of the broker in day. JMX Prometheus Exporter   State  meter_activemq_broker_state If slave broker 1 else 0. JMX Prometheus Exporter   Current Connections Count meter_activemq_broker_current_connections The number of clients connected to the broker currently. JMX Prometheus Exporter   Current Producer Count Count meter_activemq_broker_current_producer_count The number of producers currently attached to the broker. JMX Prometheus Exporter   Current Consumer Count Count meter_activemq_broker_current_consumer_count The number of consumers consuming messages from the broker. JMX Prometheus Exporter   Producer Count Count meter_activemq_broker_producer_count Number of message producers active on destinations. JMX Prometheus Exporter   Consumer Count Count meter_activemq_broker_consumer_count Number of message consumers subscribed to destinations. JMX Prometheus Exporter   Enqueue Count Count meter_activemq_broker_enqueue_count The total number of messages sent to the broker. JMX Prometheus Exporter   Dequeue Count Count meter_activemq_broker_dequeue_count The total number of messages the broker has delivered to consumers. JMX Prometheus Exporter   Enqueue Rate Count/sec meter_activemq_broker_enqueue_rate The total number of messages sent to the broker per second. JMX Prometheus Exporter   Dequeue Rate Count/sec meter_activemq_broker_dequeue_rate The total number of messages the broker has delivered to consumers per second. JMX Prometheus Exporter   Memory Percent Usage % meter_activemq_broker_memory_percent_usage Percentage of configured memory used by the broker. JMX Prometheus Exporter   Memory Usage Bytes meter_activemq_broker_memory_percent_usage Memory used by undelivered messages in bytes. JMX Prometheus Exporter   Memory Limit Bytes meter_activemq_broker_memory_limit Memory limited used for holding undelivered messages before paging to temporary storage. JMX Prometheus Exporter   Store Percent Usage % meter_activemq_broker_store_percent_usage Percentage of available disk space used for persistent message storage. JMX Prometheus Exporter   Store Limit Bytes meter_activemq_broker_store_limit Disk limited used for persistent messages before producers are blocked. JMX Prometheus Exporter   Temp Percent Usage Bytes meter_activemq_broker_temp_percent_usage Percentage of available disk space used for non-persistent message storage. JMX Prometheus Exporter   Temp Limit Bytes meter_activemq_broker_temp_limit Disk limited used for non-persistent messages and temporary data before producers are blocked. JMX Prometheus Exporter   Average Message Size Bytes meter_activemq_broker_average_message_size Average message size on this broker. JMX Prometheus Exporter   Max Message Size Bytes meter_activemq_broker_max_message_size Max message size on this broker. JMX Prometheus Exporter   Queue Size Count meter_activemq_broker_queue_size Number of messages on this broker that have been dispatched but not acknowledged. JMX Prometheus Exporter    ActiveMQ Destination Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Producer Count Count meter_activemq_destination_producer_count Number of producers attached to this destination. JMX Prometheus Exporter   Consumer Count Count meter_activemq_destination_consumer_count Number of consumers subscribed to this destination. JMX Prometheus Exporter   Topic Consumer Count Count meter_activemq_destination_topic_consumer_count Number of consumers subscribed to the topics. JMX Prometheus Exporter   Queue Size Count meter_activemq_destination_queue_size The number of messages that have not been acknowledged by a consumer. JMX Prometheus Exporter   Memory Usage Bytes meter_activemq_destination_memory_usage Memory used by undelivered messages in bytes. JMX Prometheus Exporter   Memory Percent Usage % meter_activemq_destination_memory_percent_usage Percentage of configured memory used by the destination. JMX Prometheus Exporter   Enqueue Count Count meter_activemq_destination_enqueue_count The number of messages sent to the destination. JMX Prometheus Exporter   Dequeue Count Count meter_activemq_destination_dequeue_count The number of messages the destination has delivered to consumers. JMX Prometheus Exporter   Average Enqueue Time ms meter_activemq_destination_average_enqueue_time The average time a message was held on this destination. JMX Prometheus Exporter   Max Enqueue Time ms meter_activemq_destination_max_enqueue_time The max time a message was held on this destination. JMX Prometheus Exporter   Dispatch Count Count meter_activemq_destination_dispatch_count Number of messages that has been delivered to consumers. JMX Prometheus Exporter   Expired Count Count meter_activemq_destination_expired_count Number of messages that have been expired. JMX Prometheus Exporter   Inflight Count Count meter_activemq_destination_inflight_count Number of messages that have been dispatched to but not acknowledged by consumers. JMX Prometheus Exporter   Average Message Size Bytes meter_activemq_destination_average_message_size Average message size on this destination. JMX Prometheus Exporter   Max Message Size Bytes meter_activemq_destination_max_message_size Max message size on this destination. JMX Prometheus Exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in otel-rules/activemq/activemq-cluster.yaml, otel-rules/activemq/activemq-broker.yaml, otel-rules/activemq/activemq-destination.yaml. The ActiveMQ dashboard panel configurations are found in ui-initialized-templates/activemq.\n","title":"ActiveMQ classic monitoring","url":"/docs/main/next/en/setup/backend/backend-activemq-monitoring/"},{"content":"Advanced deployment OAP servers communicate with each other in a cluster environment to do distributed aggregation. In the cluster mode, all OAP nodes are running in Mixed mode by default.\nThe available roles for OAP are,\n Mixed(default) Receiver Aggregator  Sometimes users may wish to deploy cluster nodes with a clearly defined role. They could then use this function.\nMixed By default, the OAP is responsible for:\n Receiving agent traces or metrics. L1 aggregation Internal communication (sending/receiving) L2 aggregation Persistence Alarm  Receiver The OAP is responsible for:\n Receiving agent traces or metrics. L1 aggregation Internal communication (sending)  Aggregator The OAP is responsible for:\n Internal communication(receiving from Receiver and Mixed roles OAP) L2 aggregation Persistence Alarm   These roles are designed for complex deployment requirements on security and network policy.\nKubernetes If you are using our native Kubernetes coordinator, and you insist to install OAP nodes with a clearly defined role. There should be two deployments for each role, one for receiver OAPs and the other for aggregator OAPs to separate different system environment settings. Then, the labelSelector should be set for Aggregator role selection rules to choose the right OAP deployment based on your needs.\n","title":"Advanced deployment","url":"/docs/main/latest/en/setup/backend/advanced-deployment/"},{"content":"Advanced deployment OAP servers communicate with each other in a cluster environment to do distributed aggregation. In the cluster mode, all OAP nodes are running in Mixed mode by default.\nThe available roles for OAP are,\n Mixed(default) Receiver Aggregator  Sometimes users may wish to deploy cluster nodes with a clearly defined role. They could then use this function.\nMixed By default, the OAP is responsible for:\n Receiving agent traces or metrics. L1 aggregation Internal communication (sending/receiving) L2 aggregation Persistence Alarm  Receiver The OAP is responsible for:\n Receiving agent traces or metrics. L1 aggregation Internal communication (sending)  Aggregator The OAP is responsible for:\n Internal communication(receiving from Receiver and Mixed roles OAP) L2 aggregation Persistence Alarm   These roles are designed for complex deployment requirements on security and network policy.\nKubernetes If you are using our native Kubernetes coordinator, and you insist to install OAP nodes with a clearly defined role. There should be two deployments for each role, one for receiver OAPs and the other for aggregator OAPs to separate different system environment settings. Then, the labelSelector should be set for Aggregator role selection rules to choose the right OAP deployment based on your needs.\n","title":"Advanced deployment","url":"/docs/main/next/en/setup/backend/advanced-deployment/"},{"content":"Advanced deployment OAP servers communicate with each other in a cluster environment. In the cluster mode, you could run in different roles.\n Mixed(default) Receiver Aggregator  Sometimes users may wish to deploy cluster nodes with a clearly defined role. They could then use this function.\nMixed By default, the OAP is responsible for:\n Receiving agent traces or metrics. L1 aggregation Internal communication (sending/receiving) L2 aggregation Persistence Alarm  Receiver The OAP is responsible for:\n Receiving agent traces or metrics. L1 aggregation Internal communication (sending)  Aggregator The OAP is responsible for:\n Internal communication(receive) L2 aggregation Persistence Alarm   These roles are designed for complex deployment requirements on security and network policy.\nKubernetes If you are using our native Kubernetes coordinator, the labelSelector setting is used for Aggregator role selection rules. Choose the right OAP deployment based on your needs.\n","title":"Advanced deployment","url":"/docs/main/v9.0.0/en/setup/backend/advanced-deployment/"},{"content":"Advanced deployment OAP servers communicate with each other in a cluster environment. In the cluster mode, you could run in different roles.\n Mixed(default) Receiver Aggregator  Sometimes users may wish to deploy cluster nodes with a clearly defined role. They could then use this function.\nMixed By default, the OAP is responsible for:\n Receiving agent traces or metrics. L1 aggregation Internal communication (sending/receiving) L2 aggregation Persistence Alarm  Receiver The OAP is responsible for:\n Receiving agent traces or metrics. L1 aggregation Internal communication (sending)  Aggregator The OAP is responsible for:\n Internal communication(receive) L2 aggregation Persistence Alarm   These roles are designed for complex deployment requirements on security and network policy.\nKubernetes If you are using our native Kubernetes coordinator, the labelSelector setting is used for Aggregator role selection rules. Choose the right OAP deployment based on your needs.\n","title":"Advanced deployment","url":"/docs/main/v9.1.0/en/setup/backend/advanced-deployment/"},{"content":"Advanced deployment OAP servers communicate with each other in a cluster environment. In the cluster mode, you could run in different roles.\n Mixed(default) Receiver Aggregator  Sometimes users may wish to deploy cluster nodes with a clearly defined role. They could then use this function.\nMixed By default, the OAP is responsible for:\n Receiving agent traces or metrics. L1 aggregation Internal communication (sending/receiving) L2 aggregation Persistence Alarm  Receiver The OAP is responsible for:\n Receiving agent traces or metrics. L1 aggregation Internal communication (sending)  Aggregator The OAP is responsible for:\n Internal communication(receive) L2 aggregation Persistence Alarm   These roles are designed for complex deployment requirements on security and network policy.\nKubernetes If you are using our native Kubernetes coordinator, the labelSelector setting is used for Aggregator role selection rules. Choose the right OAP deployment based on your needs.\n","title":"Advanced deployment","url":"/docs/main/v9.2.0/en/setup/backend/advanced-deployment/"},{"content":"Advanced deployment OAP servers communicate with each other in a cluster environment. In the cluster mode, you could run in different roles.\n Mixed(default) Receiver Aggregator  Sometimes users may wish to deploy cluster nodes with a clearly defined role. They could then use this function.\nMixed By default, the OAP is responsible for:\n Receiving agent traces or metrics. L1 aggregation Internal communication (sending/receiving) L2 aggregation Persistence Alarm  Receiver The OAP is responsible for:\n Receiving agent traces or metrics. L1 aggregation Internal communication (sending)  Aggregator The OAP is responsible for:\n Internal communication(receive) L2 aggregation Persistence Alarm   These roles are designed for complex deployment requirements on security and network policy.\nKubernetes If you are using our native Kubernetes coordinator, the labelSelector setting is used for Aggregator role selection rules. Choose the right OAP deployment based on your needs.\n","title":"Advanced deployment","url":"/docs/main/v9.3.0/en/setup/backend/advanced-deployment/"},{"content":"Advanced deployment OAP servers communicate with each other in a cluster environment. In the cluster mode, you could run in different roles.\n Mixed(default) Receiver Aggregator  Sometimes users may wish to deploy cluster nodes with a clearly defined role. They could then use this function.\nMixed By default, the OAP is responsible for:\n Receiving agent traces or metrics. L1 aggregation Internal communication (sending/receiving) L2 aggregation Persistence Alarm  Receiver The OAP is responsible for:\n Receiving agent traces or metrics. L1 aggregation Internal communication (sending)  Aggregator The OAP is responsible for:\n Internal communication(receive) L2 aggregation Persistence Alarm   These roles are designed for complex deployment requirements on security and network policy.\nKubernetes If you are using our native Kubernetes coordinator, the labelSelector setting is used for Aggregator role selection rules. Choose the right OAP deployment based on your needs.\n","title":"Advanced deployment","url":"/docs/main/v9.4.0/en/setup/backend/advanced-deployment/"},{"content":"Advanced deployment OAP servers communicate with each other in a cluster environment to do distributed aggregation. In the cluster mode, all OAP nodes are running in Mixed mode by default.\nThe available roles for OAP are,\n Mixed(default) Receiver Aggregator  Sometimes users may wish to deploy cluster nodes with a clearly defined role. They could then use this function.\nMixed By default, the OAP is responsible for:\n Receiving agent traces or metrics. L1 aggregation Internal communication (sending/receiving) L2 aggregation Persistence Alarm  Receiver The OAP is responsible for:\n Receiving agent traces or metrics. L1 aggregation Internal communication (sending)  Aggregator The OAP is responsible for:\n Internal communication(receiving from Receiver and Mixed roles OAP) L2 aggregation Persistence Alarm   These roles are designed for complex deployment requirements on security and network policy.\nKubernetes If you are using our native Kubernetes coordinator, and you insist to install OAP nodes with a clearly defined role. There should be two deployments for each role, one for receiver OAPs and the other for aggregator OAPs to separate different system environment settings. Then, the labelSelector should be set for Aggregator role selection rules to choose the right OAP deployment based on your needs.\n","title":"Advanced deployment","url":"/docs/main/v9.5.0/en/setup/backend/advanced-deployment/"},{"content":"Advanced deployment OAP servers communicate with each other in a cluster environment to do distributed aggregation. In the cluster mode, all OAP nodes are running in Mixed mode by default.\nThe available roles for OAP are,\n Mixed(default) Receiver Aggregator  Sometimes users may wish to deploy cluster nodes with a clearly defined role. They could then use this function.\nMixed By default, the OAP is responsible for:\n Receiving agent traces or metrics. L1 aggregation Internal communication (sending/receiving) L2 aggregation Persistence Alarm  Receiver The OAP is responsible for:\n Receiving agent traces or metrics. L1 aggregation Internal communication (sending)  Aggregator The OAP is responsible for:\n Internal communication(receiving from Receiver and Mixed roles OAP) L2 aggregation Persistence Alarm   These roles are designed for complex deployment requirements on security and network policy.\nKubernetes If you are using our native Kubernetes coordinator, and you insist to install OAP nodes with a clearly defined role. There should be two deployments for each role, one for receiver OAPs and the other for aggregator OAPs to separate different system environment settings. Then, the labelSelector should be set for Aggregator role selection rules to choose the right OAP deployment based on your needs.\n","title":"Advanced deployment","url":"/docs/main/v9.6.0/en/setup/backend/advanced-deployment/"},{"content":"Advanced deployment OAP servers communicate with each other in a cluster environment to do distributed aggregation. In the cluster mode, all OAP nodes are running in Mixed mode by default.\nThe available roles for OAP are,\n Mixed(default) Receiver Aggregator  Sometimes users may wish to deploy cluster nodes with a clearly defined role. They could then use this function.\nMixed By default, the OAP is responsible for:\n Receiving agent traces or metrics. L1 aggregation Internal communication (sending/receiving) L2 aggregation Persistence Alarm  Receiver The OAP is responsible for:\n Receiving agent traces or metrics. L1 aggregation Internal communication (sending)  Aggregator The OAP is responsible for:\n Internal communication(receiving from Receiver and Mixed roles OAP) L2 aggregation Persistence Alarm   These roles are designed for complex deployment requirements on security and network policy.\nKubernetes If you are using our native Kubernetes coordinator, and you insist to install OAP nodes with a clearly defined role. There should be two deployments for each role, one for receiver OAPs and the other for aggregator OAPs to separate different system environment settings. Then, the labelSelector should be set for Aggregator role selection rules to choose the right OAP deployment based on your needs.\n","title":"Advanced deployment","url":"/docs/main/v9.7.0/en/setup/backend/advanced-deployment/"},{"content":"Advanced Features  Set the settings through system properties for config file override. Read setting override. Use gRPC TLS to link backend. See open TLS Set client token if backend open the token authentication. Application Toolkit, are a collection of libraries, provided by SkyWalking APM. Using them, you have a bridge between your application and SkyWalking APM agent.  If you want your codes to interact with SkyWalking agent, including getting trace id, setting tags, propagating custom data etc.. Try SkyWalking manual APIs. If you require customized metrics, try SkyWalking Meter System Toolkit. If you want to continue traces across thread manually, use across thread solution APIs. If you want to forward Micrometer metrics / observations, use SkyWalking Micrometer Register. If you want to use OpenTracing Java APIs, try SkyWalking OpenTracing compatible tracer. More details you could find at http://opentracing.io If you want to tolerate some exceptions, read tolerate custom exception doc. If you want to print trace context(e.g. traceId) in your logs, or collect logs, choose the log frameworks, log4j, log4j2, logback.   If you want to specify the path of your agent.config file. Read set config file through system properties  ","title":"Advanced Features","url":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/advanced-features/"},{"content":"Advanced Features  Set the settings through system properties for config file override. Read setting override. Use gRPC TLS to link backend. See open TLS Set client token if backend open the token authentication. Application Toolkit, are a collection of libraries, provided by SkyWalking APM. Using them, you have a bridge between your application and SkyWalking APM agent.  If you want your codes to interact with SkyWalking agent, including getting trace id, setting tags, propagating custom data etc.. Try SkyWalking manual APIs. If you require customized metrics, try SkyWalking Meter System Toolkit. If you want to continue traces across thread manually, use across thread solution APIs. If you want to forward Micrometer metrics / observations, use SkyWalking Micrometer Register. If you want to use OpenTracing Java APIs, try SkyWalking OpenTracing compatible tracer. More details you could find at http://opentracing.io If you want to tolerate some exceptions, read tolerate custom exception doc. If you want to print trace context(e.g. traceId) in your logs, or collect logs, choose the log frameworks, log4j, log4j2, logback.   If you want to specify the path of your agent.config file. Read set config file through system properties  ","title":"Advanced Features","url":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/advanced-features/"},{"content":"Advanced Features  Set the settings through system properties for config file override. Read setting override. Use gRPC TLS to link backend. See open TLS Set client token if backend open the token authentication. Application Toolkit, are a collection of libraries, provided by SkyWalking APM. Using them, you have a bridge between your application and SkyWalking APM agent.  If you want your codes to interact with SkyWalking agent, including getting trace id, setting tags, propagating custom data etc.. Try SkyWalking manual APIs. If you require customized metrics, try SkyWalking Meter System Toolkit. If you want to continue traces across thread manually, use across thread solution APIs. If you want to forward Micrometer metrics / observations, use SkyWalking Micrometer Register. If you want to use OpenTracing Java APIs, try SkyWalking OpenTracing compatible tracer. More details you could find at http://opentracing.io If you want to tolerate some exceptions, read tolerate custom exception doc. If you want to print trace context(e.g. traceId) in your logs, or collect logs, choose the log frameworks, log4j, log4j2, logback.   If you want to specify the path of your agent.config file. Read set config file through system properties  ","title":"Advanced Features","url":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/advanced-features/"},{"content":"Advanced Features  Set the settings through system properties for config file override. Read setting override. Use gRPC TLS to link backend. See open TLS Set client token if backend open the token authentication. Application Toolkit, are a collection of libraries, provided by SkyWalking APM. Using them, you have a bridge between your application and SkyWalking APM agent.  If you want your codes to interact with SkyWalking agent, including getting trace id, setting tags, propagating custom data etc.. Try SkyWalking manual APIs. If you require customized metrics, try SkyWalking Meter System Toolkit. If you want to continue traces across thread manually, use across thread solution APIs. If you want to forward Micrometer metrics / observations, use SkyWalking Micrometer Register. If you want to use OpenTracing Java APIs, try SkyWalking OpenTracing compatible tracer. More details you could find at http://opentracing.io If you want to tolerate some exceptions, read tolerate custom exception doc. If you want to print trace context(e.g. traceId) in your logs, or collect logs, choose the log frameworks, log4j, log4j2, logback.   If you want to specify the path of your agent.config file. Read set config file through system properties  ","title":"Advanced Features","url":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/advanced-features/"},{"content":"Advanced Features  Set the settings through system properties for config file override. Read setting override. Use gRPC TLS to link backend. See open TLS Set client token if backend open the token authentication. Application Toolkit, are a collection of libraries, provided by SkyWalking APM. Using them, you have a bridge between your application and SkyWalking APM agent.  If you want your codes to interact with SkyWalking agent, including getting trace id, setting tags, propagating custom data etc.. Try SkyWalking manual APIs. If you require customized metrics, try SkyWalking Meter System Toolkit. If you want to continue traces across thread manually, use across thread solution APIs. If you want to forward Micrometer metrics / observations, use SkyWalking Micrometer Register. If you want to use OpenTracing Java APIs, try SkyWalking OpenTracing compatible tracer. More details you could find at http://opentracing.io If you want to tolerate some exceptions, read tolerate custom exception doc. If you want to print trace context(e.g. traceId) in your logs, or collect logs, choose the log frameworks, log4j, log4j2, logback.   If you want to specify the path of your agent.config file. Read set config file through system properties  ","title":"Advanced Features","url":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/advanced-features/"},{"content":"Advanced Reporters The advanced report provides an alternative way to submit the agent collected data to the backend. All of them are in the optional-reporter-plugins folder, move the one you needed into the reporter-plugins folder for the activation. Notice, don\u0026rsquo;t try to activate multiple reporters, that could cause unexpected fatal errors.\nKafka Reporter The Kafka reporter plugin support report traces, JVM metrics, Instance Properties, and profiled snapshots to Kafka cluster, which is disabled in default. Move the jar of the plugin, kafka-reporter-plugin-x.y.z.jar, from agent/optional-reporter-plugins to agent/plugins for activating.\nIf you configure to use compression.type such as lz4, zstd, snappy, etc., you also need to move the jar of the plugin, lz4-java-x.y.z.jar or zstd-jni-x.y.z.jar or snappy-java.x.y.z.jar, from agent/optional-reporter-plugins to agent/plugins.\nNotice, currently, the agent still needs to configure GRPC receiver for delivering the task of profiling. In other words, the following configure cannot be omitted.\n# Backend service addresses. collector.backend_service=${SW_AGENT_COLLECTOR_BACKEND_SERVICES:127.0.0.1:11800} # Kafka producer configuration plugin.kafka.bootstrap_servers=${SW_KAFKA_BOOTSTRAP_SERVERS:localhost:9092} plugin.kafka.get_topic_timeout=${SW_GET_TOPIC_TIMEOUT:10} Before you activated the Kafka reporter, you have to make sure that Kafka fetcher of OAP server has been opened in service.\nAdvanced Kafka Producer Configurations Kafka reporter plugin support to customize all configurations of listed in here. For example:\nplugin.kafka.producer_config[delivery.timeout.ms]=12000 Since SkyWalking 8.8.0, support to configure advanced Producer configurations in JSON format, like this:\nplugin.kafka.producer_config_json={\u0026quot;delivery.timeout.ms\u0026quot;: 12000, \u0026quot;compression.type\u0026quot;: \u0026quot;snappy\u0026quot;} Currently, there are 2 ways to configure advanced configurations below. Notice that, the new way, configured in JSON format, will be overridden by plugin.kafka.producer_config[key]=value when they have the duplication keys.\nSince 8.16.0, users could implement their decoder for kafka configurations rather than using plain configurations(such as password) of Kafka producer, Including plugin.kafka.producer_config_json,plugin.kafka.producer_config or environment variable SW_PLUGIN_KAFKA_PRODUCER_CONFIG_JSON.\nBy doing that, add the kafka-config-extension dependency to your decoder project and implement decode interface.\n Add the KafkaConfigExtension dependency to your project.  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;kafka-config-extension\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;scope\u0026gt;provided\u0026lt;/scope\u0026gt; \u0026lt;/dependency\u0026gt;  Implement your custom decode method.Like this:  package org.apache.skywalking.apm.agent.sample; import org.apache.skywalking.apm.agent.core.kafka.KafkaConfigExtension; import java.util.Map; /** * Custom decode class */ public class DecodeUtil implements KafkaConfigExtension { /** * Custom decode method. * @param config the value of `plugin.kafka.producer_config` or `plugin.kafka.producer_config_json` in `agent.config`. * @return the decoded configuration if you implement your custom decode logic. */ public Map\u0026lt;String, String\u0026gt; decode(Map\u0026lt;String, String\u0026gt; config) { /** * implement your custom decode logic * */ return config; } } Then, package your decoder project as a jar and move to agent/plugins.\nNotice, the jar package should contain all the dependencies required for your custom decode code.\nThe last step is to activate the decoder class in agent.config like this:\nplugin.kafka.decrypt_class=\u0026quot;org.apache.skywalking.apm.agent.sample.DecodeUtil\u0026quot; or configure by environment variable\nSW_KAFKA_DECRYPT_CLASS=\u0026quot;org.apache.skywalking.apm.agent.sample.DecodeUtil\u0026quot; 3rd party reporters There are other reporter implementations from out of the Apache Software Foundation.\nPulsar Reporter Go to Pulsar-reporter-plugin for more details.\nRocketMQ Reporter Go to RocketMQ-reporter-plugin for more details.\n","title":"Advanced Reporters","url":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/advanced-reporters/"},{"content":"Advanced Reporters The advanced report provides an alternative way to submit the agent collected data to the backend. All of them are in the optional-reporter-plugins folder, move the one you needed into the plugins folder for the activation. Notice, don\u0026rsquo;t try to activate multiple reporters, that could cause unexpected fatal errors.\nKafka Reporter The Kafka reporter plugin support report traces, JVM metrics, Instance Properties, and profiled snapshots to Kafka cluster, which is disabled in default. Move the jar of the plugin, kafka-reporter-plugin-x.y.z.jar, from agent/optional-reporter-plugins to agent/plugins for activating.\nIf you configure to use compression.type such as lz4, zstd, snappy, etc., you also need to move the jar of the plugin, lz4-java-x.y.z.jar or zstd-jni-x.y.z.jar or snappy-java.x.y.z.jar, from agent/optional-reporter-plugins to agent/plugins.\nNotice, currently, the agent still needs to configure GRPC receiver for delivering the task of profiling. In other words, the following configure cannot be omitted.\n# Backend service addresses. collector.backend_service=${SW_AGENT_COLLECTOR_BACKEND_SERVICES:127.0.0.1:11800} # Kafka producer configuration plugin.kafka.bootstrap_servers=${SW_KAFKA_BOOTSTRAP_SERVERS:localhost:9092} plugin.kafka.get_topic_timeout=${SW_GET_TOPIC_TIMEOUT:10} Before you activated the Kafka reporter, you have to make sure that Kafka fetcher of OAP server has been opened in service.\nAdvanced Kafka Producer Configurations Kafka reporter plugin support to customize all configurations of listed in here. For example:\nplugin.kafka.producer_config[delivery.timeout.ms]=12000 Since SkyWalking 8.8.0, support to configure advanced Producer configurations in JSON format, like this:\nplugin.kafka.producer_config_json={\u0026quot;delivery.timeout.ms\u0026quot;: 12000, \u0026quot;compression.type\u0026quot;: \u0026quot;snappy\u0026quot;} Currently, there are 2 ways to configure advanced configurations below. Notice that, the new way, configured in JSON format, will be overridden by plugin.kafka.producer_config[key]=value when they have the duplication keys.\nSince 8.16.0, users could implement their decoder for kafka configurations rather than using plain configurations(such as password) of Kafka producer, Including plugin.kafka.producer_config_json,plugin.kafka.producer_config or environment variable SW_PLUGIN_KAFKA_PRODUCER_CONFIG_JSON.\nBy doing that, add the kafka-config-extension dependency to your decoder project and implement decode interface.\n Add the KafkaConfigExtension dependency to your project.  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;kafka-config-extension\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;scope\u0026gt;provided\u0026lt;/scope\u0026gt; \u0026lt;/dependency\u0026gt;  Implement your custom decode method.Like this:  package org.apache.skywalking.apm.agent.sample; import org.apache.skywalking.apm.agent.core.kafka.KafkaConfigExtension; import java.util.Map; /** * Custom decode class */ public class DecodeUtil implements KafkaConfigExtension { /** * Custom decode method. * @param config the value of `plugin.kafka.producer_config` or `plugin.kafka.producer_config_json` in `agent.config`. * @return the decoded configuration if you implement your custom decode logic. */ public Map\u0026lt;String, String\u0026gt; decode(Map\u0026lt;String, String\u0026gt; config) { /** * implement your custom decode logic * */ return config; } } Then, package your decoder project as a jar and move to agent/plugins.\nNotice, the jar package should contain all the dependencies required for your custom decode code.\nThe last step is to activate the decoder class in agent.config like this:\nplugin.kafka.decrypt_class=\u0026quot;org.apache.skywalking.apm.agent.sample.DecodeUtil\u0026quot; or configure by environment variable\nSW_KAFKA_DECRYPT_CLASS=\u0026quot;org.apache.skywalking.apm.agent.sample.DecodeUtil\u0026quot; 3rd party reporters There are other reporter implementations from out of the Apache Software Foundation.\nPulsar Reporter Go to Pulsar-reporter-plugin for more details.\nRocketMQ Reporter Go to RocketMQ-reporter-plugin for more details.\n","title":"Advanced Reporters","url":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/advanced-reporters/"},{"content":"Advanced Reporters The advanced report provides an alternative way to submit the agent collected data to the backend. All of them are in the optional-reporter-plugins folder, move the one you needed into the reporter-plugins folder for the activation. Notice, don\u0026rsquo;t try to activate multiple reporters, that could cause unexpected fatal errors.\nKafka Reporter The Kafka reporter plugin support report traces, JVM metrics, Instance Properties, and profiled snapshots to Kafka cluster, which is disabled in default. Move the jar of the plugin, kafka-reporter-plugin-x.y.z.jar, from agent/optional-reporter-plugins to agent/plugins for activating.\nIf you configure to use compression.type such as lz4, zstd, snappy, etc., you also need to move the jar of the plugin, lz4-java-x.y.z.jar or zstd-jni-x.y.z.jar or snappy-java.x.y.z.jar, from agent/optional-reporter-plugins to agent/plugins.\nNotice, currently, the agent still needs to configure GRPC receiver for delivering the task of profiling. In other words, the following configure cannot be omitted.\n# Backend service addresses. collector.backend_service=${SW_AGENT_COLLECTOR_BACKEND_SERVICES:127.0.0.1:11800} # Kafka producer configuration plugin.kafka.bootstrap_servers=${SW_KAFKA_BOOTSTRAP_SERVERS:localhost:9092} plugin.kafka.get_topic_timeout=${SW_GET_TOPIC_TIMEOUT:10} Before you activated the Kafka reporter, you have to make sure that Kafka fetcher of OAP server has been opened in service.\nAdvanced Kafka Producer Configurations Kafka reporter plugin support to customize all configurations of listed in here. For example:\nplugin.kafka.producer_config[delivery.timeout.ms]=12000 Since SkyWalking 8.8.0, support to configure advanced Producer configurations in JSON format, like this:\nplugin.kafka.producer_config_json={\u0026quot;delivery.timeout.ms\u0026quot;: 12000, \u0026quot;compression.type\u0026quot;: \u0026quot;snappy\u0026quot;} Currently, there are 2 ways to configure advanced configurations below. Notice that, the new way, configured in JSON format, will be overridden by plugin.kafka.producer_config[key]=value when they have the duplication keys.\nSince 8.16.0, users could implement their decoder for kafka configurations rather than using plain configurations(such as password) of Kafka producer, Including plugin.kafka.producer_config_json,plugin.kafka.producer_config or environment variable SW_PLUGIN_KAFKA_PRODUCER_CONFIG_JSON.\nBy doing that, add the kafka-config-extension dependency to your decoder project and implement decode interface.\n Add the KafkaConfigExtension dependency to your project.  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;kafka-config-extension\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;scope\u0026gt;provided\u0026lt;/scope\u0026gt; \u0026lt;/dependency\u0026gt;  Implement your custom decode method.Like this:  package org.apache.skywalking.apm.agent.sample; import org.apache.skywalking.apm.agent.core.kafka.KafkaConfigExtension; import java.util.Map; /** * Custom decode class */ public class DecodeUtil implements KafkaConfigExtension { /** * Custom decode method. * @param config the value of `plugin.kafka.producer_config` or `plugin.kafka.producer_config_json` in `agent.config`. * @return the decoded configuration if you implement your custom decode logic. */ public Map\u0026lt;String, String\u0026gt; decode(Map\u0026lt;String, String\u0026gt; config) { /** * implement your custom decode logic * */ return config; } } Then, package your decoder project as a jar and move to agent/plugins.\nNotice, the jar package should contain all the dependencies required for your custom decode code.\nThe last step is to activate the decoder class in agent.config like this:\nplugin.kafka.decrypt_class=\u0026quot;org.apache.skywalking.apm.agent.sample.DecodeUtil\u0026quot; or configure by environment variable\nSW_KAFKA_DECRYPT_CLASS=\u0026quot;org.apache.skywalking.apm.agent.sample.DecodeUtil\u0026quot; 3rd party reporters There are other reporter implementations from out of the Apache Software Foundation.\nPulsar Reporter Go to Pulsar-reporter-plugin for more details.\nRocketMQ Reporter Go to RocketMQ-reporter-plugin for more details.\n","title":"Advanced Reporters","url":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/advanced-reporters/"},{"content":"Advanced Reporters The advanced report provides an alternative way to submit the agent collected data to the backend. All of them are in the optional-reporter-plugins folder, move the one you needed into the reporter-plugins folder for the activation. Notice, don\u0026rsquo;t try to activate multiple reporters, that could cause unexpected fatal errors.\nKafka Reporter The Kafka reporter plugin support report traces, JVM metrics, Instance Properties, and profiled snapshots to Kafka cluster, which is disabled in default. Move the jar of the plugin, kafka-reporter-plugin-x.y.z.jar, from agent/optional-reporter-plugins to agent/plugins for activating.\nIf you configure to use compression.type such as lz4, zstd, snappy, etc., you also need to move the jar of the plugin, lz4-java-x.y.z.jar or zstd-jni-x.y.z.jar or snappy-java.x.y.z.jar, from agent/optional-reporter-plugins to agent/plugins.\nNotice, currently, the agent still needs to configure GRPC receiver for delivering the task of profiling. In other words, the following configure cannot be omitted.\n# Backend service addresses. collector.backend_service=${SW_AGENT_COLLECTOR_BACKEND_SERVICES:127.0.0.1:11800} # Kafka producer configuration plugin.kafka.bootstrap_servers=${SW_KAFKA_BOOTSTRAP_SERVERS:localhost:9092} plugin.kafka.get_topic_timeout=${SW_GET_TOPIC_TIMEOUT:10} Before you activated the Kafka reporter, you have to make sure that Kafka fetcher of OAP server has been opened in service.\nAdvanced Kafka Producer Configurations Kafka reporter plugin support to customize all configurations of listed in here. For example:\nplugin.kafka.producer_config[delivery.timeout.ms]=12000 Since SkyWalking 8.8.0, support to configure advanced Producer configurations in JSON format, like this:\nplugin.kafka.producer_config_json={\u0026quot;delivery.timeout.ms\u0026quot;: 12000, \u0026quot;compression.type\u0026quot;: \u0026quot;snappy\u0026quot;} Currently, there are 2 ways to configure advanced configurations below. Notice that, the new way, configured in JSON format, will be overridden by plugin.kafka.producer_config[key]=value when they have the duplication keys.\nSince 8.16.0, users could implement their decoder for kafka configurations rather than using plain configurations(such as password) of Kafka producer, Including plugin.kafka.producer_config_json,plugin.kafka.producer_config or environment variable SW_PLUGIN_KAFKA_PRODUCER_CONFIG_JSON.\nBy doing that, add the kafka-config-extension dependency to your decoder project and implement decode interface.\n Add the KafkaConfigExtension dependency to your project.  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;kafka-config-extension\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;scope\u0026gt;provided\u0026lt;/scope\u0026gt; \u0026lt;/dependency\u0026gt;  Implement your custom decode method.Like this:  package org.apache.skywalking.apm.agent.sample; import org.apache.skywalking.apm.agent.core.kafka.KafkaConfigExtension; import java.util.Map; /** * Custom decode class */ public class DecodeUtil implements KafkaConfigExtension { /** * Custom decode method. * @param config the value of `plugin.kafka.producer_config` or `plugin.kafka.producer_config_json` in `agent.config`. * @return the decoded configuration if you implement your custom decode logic. */ public Map\u0026lt;String, String\u0026gt; decode(Map\u0026lt;String, String\u0026gt; config) { /** * implement your custom decode logic * */ return config; } } Then, package your decoder project as a jar and move to agent/plugins.\nNotice, the jar package should contain all the dependencies required for your custom decode code.\nThe last step is to activate the decoder class in agent.config like this:\nplugin.kafka.decrypt_class=\u0026quot;org.apache.skywalking.apm.agent.sample.DecodeUtil\u0026quot; or configure by environment variable\nSW_KAFKA_DECRYPT_CLASS=\u0026quot;org.apache.skywalking.apm.agent.sample.DecodeUtil\u0026quot; 3rd party reporters There are other reporter implementations from out of the Apache Software Foundation.\nPulsar Reporter Go to Pulsar-reporter-plugin for more details.\nRocketMQ Reporter Go to RocketMQ-reporter-plugin for more details.\n","title":"Advanced Reporters","url":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/advanced-reporters/"},{"content":"Advanced Reporters The advanced report provides an alternative way to submit the agent collected data to the backend. All of them are in the optional-reporter-plugins folder, move the one you needed into the reporter-plugins folder for the activation. Notice, don\u0026rsquo;t try to activate multiple reporters, that could cause unexpected fatal errors.\nKafka Reporter The Kafka reporter plugin support report traces, JVM metrics, Instance Properties, and profiled snapshots to Kafka cluster, which is disabled in default. Move the jar of the plugin, kafka-reporter-plugin-x.y.z.jar, from agent/optional-reporter-plugins to agent/plugins for activating.\nIf you configure to use compression.type such as lz4, zstd, snappy, etc., you also need to move the jar of the plugin, lz4-java-x.y.z.jar or zstd-jni-x.y.z.jar or snappy-java.x.y.z.jar, from agent/optional-reporter-plugins to agent/plugins.\nNotice, currently, the agent still needs to configure GRPC receiver for delivering the task of profiling. In other words, the following configure cannot be omitted.\n# Backend service addresses. collector.backend_service=${SW_AGENT_COLLECTOR_BACKEND_SERVICES:127.0.0.1:11800} # Kafka producer configuration plugin.kafka.bootstrap_servers=${SW_KAFKA_BOOTSTRAP_SERVERS:localhost:9092} plugin.kafka.get_topic_timeout=${SW_GET_TOPIC_TIMEOUT:10} Before you activated the Kafka reporter, you have to make sure that Kafka fetcher of OAP server has been opened in service.\nAdvanced Kafka Producer Configurations Kafka reporter plugin support to customize all configurations of listed in here. For example:\nplugin.kafka.producer_config[delivery.timeout.ms]=12000 Since SkyWalking 8.8.0, support to configure advanced Producer configurations in JSON format, like this:\nplugin.kafka.producer_config_json={\u0026quot;delivery.timeout.ms\u0026quot;: 12000, \u0026quot;compression.type\u0026quot;: \u0026quot;snappy\u0026quot;} Currently, there are 2 ways to configure advanced configurations below. Notice that, the new way, configured in JSON format, will be overridden by plugin.kafka.producer_config[key]=value when they have the duplication keys.\nSince 8.16.0, users could implement their decoder for kafka configurations rather than using plain configurations(such as password) of Kafka producer, Including plugin.kafka.producer_config_json,plugin.kafka.producer_config or environment variable SW_PLUGIN_KAFKA_PRODUCER_CONFIG_JSON.\nBy doing that, add the kafka-config-extension dependency to your decoder project and implement decode interface.\n Add the KafkaConfigExtension dependency to your project.  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;kafka-config-extension\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;scope\u0026gt;provided\u0026lt;/scope\u0026gt; \u0026lt;/dependency\u0026gt;  Implement your custom decode method.Like this:  package org.apache.skywalking.apm.agent.sample; import org.apache.skywalking.apm.agent.core.kafka.KafkaConfigExtension; import java.util.Map; /** * Custom decode class */ public class DecodeUtil implements KafkaConfigExtension { /** * Custom decode method. * @param config the value of `plugin.kafka.producer_config` or `plugin.kafka.producer_config_json` in `agent.config`. * @return the decoded configuration if you implement your custom decode logic. */ public Map\u0026lt;String, String\u0026gt; decode(Map\u0026lt;String, String\u0026gt; config) { /** * implement your custom decode logic * */ return config; } } Then, package your decoder project as a jar and move to agent/plugins.\nNotice, the jar package should contain all the dependencies required for your custom decode code.\nThe last step is to activate the decoder class in agent.config like this:\nplugin.kafka.decrypt_class=\u0026quot;org.apache.skywalking.apm.agent.sample.DecodeUtil\u0026quot; or configure by environment variable\nSW_KAFKA_DECRYPT_CLASS=\u0026quot;org.apache.skywalking.apm.agent.sample.DecodeUtil\u0026quot; 3rd party reporters There are other reporter implementations from out of the Apache Software Foundation.\nPulsar Reporter Go to Pulsar-reporter-plugin for more details.\nRocketMQ Reporter Go to RocketMQ-reporter-plugin for more details.\n","title":"Advanced Reporters","url":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/advanced-reporters/"},{"content":"AI Pipeline Warning, this module is still in the ALPHA stage. This is not stable.\nPattern Recognition, Machine Learning(ML) and Artificial Intelligence(AI) are common technology to identify patterns in data. This module provides a way to integrate these technologies in a standardized way about shipping the data from OAP kernel to 3rd party.\nFrom the industry practice, Pattern Recognition, Machine Learning(ML) and Artificial Intelligence(AI) are always overestimated, they are good at many things but have to run in a clear context.\nThe ai-pipeline module is activated by default.\nai-pipeline:selector:${SW_AI_PIPELINE:default}default:uriRecognitionServerAddr:${SW_AI_PIPELINE_URI_RECOGNITION_SERVER_ADDR:}uriRecognitionServerPort:${SW_AI_PIPELINE_URI_RECOGNITION_SERVER_PORT:17128}Supported Scenarios  HTTP Restful URI recognition.  ","title":"AI Pipeline","url":"/docs/main/latest/en/setup/ai-pipeline/introduction/"},{"content":"AI Pipeline Warning, this module is still in the ALPHA stage. This is not stable.\nPattern Recognition, Machine Learning(ML) and Artificial Intelligence(AI) are common technology to identify patterns in data. This module provides a way to integrate these technologies in a standardized way about shipping the data from OAP kernel to 3rd party.\nFrom the industry practice, Pattern Recognition, Machine Learning(ML) and Artificial Intelligence(AI) are always overestimated, they are good at many things but have to run in a clear context.\nThe ai-pipeline module is activated by default.\nai-pipeline:selector:${SW_AI_PIPELINE:default}default:uriRecognitionServerAddr:${SW_AI_PIPELINE_URI_RECOGNITION_SERVER_ADDR:}uriRecognitionServerPort:${SW_AI_PIPELINE_URI_RECOGNITION_SERVER_PORT:17128}Supported Scenarios  HTTP Restful URI recognition.  ","title":"AI Pipeline","url":"/docs/main/next/en/setup/ai-pipeline/introduction/"},{"content":"AI Pipeline Warning, this module is still in the ALPHA stage. This is not stable.\nPattern Recognition, Machine Learning(ML) and Artificial Intelligence(AI) are common technology to identify patterns in data. This module provides a way to integrate these technologies in a standardized way about shipping the data from OAP kernel to 3rd party.\nFrom the industry practice, Pattern Recognition, Machine Learning(ML) and Artificial Intelligence(AI) are always overestimated, they are good at many things but have to run in a clear context.\nThe ai-pipeline module is activated by default.\nai-pipeline:selector:${SW_AI_PIPELINE:default}default:uriRecognitionServerAddr:${SW_AI_PIPELINE_URI_RECOGNITION_SERVER_ADDR:}uriRecognitionServerPort:${SW_AI_PIPELINE_URI_RECOGNITION_SERVER_PORT:17128}Supported Scenarios  HTTP Restful URI recognition.  ","title":"AI Pipeline","url":"/docs/main/v9.5.0/en/setup/ai-pipeline/introduction/"},{"content":"AI Pipeline Warning, this module is still in the ALPHA stage. This is not stable.\nPattern Recognition, Machine Learning(ML) and Artificial Intelligence(AI) are common technology to identify patterns in data. This module provides a way to integrate these technologies in a standardized way about shipping the data from OAP kernel to 3rd party.\nFrom the industry practice, Pattern Recognition, Machine Learning(ML) and Artificial Intelligence(AI) are always overestimated, they are good at many things but have to run in a clear context.\nThe ai-pipeline module is activated by default.\nai-pipeline:selector:${SW_AI_PIPELINE:default}default:uriRecognitionServerAddr:${SW_AI_PIPELINE_URI_RECOGNITION_SERVER_ADDR:}uriRecognitionServerPort:${SW_AI_PIPELINE_URI_RECOGNITION_SERVER_PORT:17128}Supported Scenarios  HTTP Restful URI recognition.  ","title":"AI Pipeline","url":"/docs/main/v9.6.0/en/setup/ai-pipeline/introduction/"},{"content":"AI Pipeline Warning, this module is still in the ALPHA stage. This is not stable.\nPattern Recognition, Machine Learning(ML) and Artificial Intelligence(AI) are common technology to identify patterns in data. This module provides a way to integrate these technologies in a standardized way about shipping the data from OAP kernel to 3rd party.\nFrom the industry practice, Pattern Recognition, Machine Learning(ML) and Artificial Intelligence(AI) are always overestimated, they are good at many things but have to run in a clear context.\nThe ai-pipeline module is activated by default.\nai-pipeline:selector:${SW_AI_PIPELINE:default}default:uriRecognitionServerAddr:${SW_AI_PIPELINE_URI_RECOGNITION_SERVER_ADDR:}uriRecognitionServerPort:${SW_AI_PIPELINE_URI_RECOGNITION_SERVER_PORT:17128}Supported Scenarios  HTTP Restful URI recognition.  ","title":"AI Pipeline","url":"/docs/main/v9.7.0/en/setup/ai-pipeline/introduction/"},{"content":"Alarm Alarm core is driven by a collection of rules, which are defined in config/alarm-settings.yml. There are three parts in alarm rule definition.\n Alarm rules. They define how metrics alarm should be triggered and what conditions should be considered. Webhooks. The list of web service endpoints, which should be called after the alarm is triggered. gRPCHook. The host and port of the remote gRPC method, which should be called after the alarm is triggered.  Entity name Defines the relation between scope and entity name.\n Service: Service name Instance: {Instance name} of {Service name} Endpoint: {Endpoint name} in {Service name} Database: Database service name Service Relation: {Source service name} to {Dest service name} Instance Relation: {Source instance name} of {Source service name} to {Dest instance name} of {Dest service name} Endpoint Relation: {Source endpoint name} in {Source Service name} to {Dest endpoint name} in {Dest service name}  Rules There are two types of rules: individual rules and composite rules. A composite rule is a combination of individual rules.\nIndividual rules An alarm rule is made up of the following elements:\n Rule name. A unique name shown in the alarm message. It must end with _rule. Metrics name. This is also the metrics name in the OAL script. Only long, double, int types are supported. See the list of all potential metrics name. Events can be also configured as the source of alarm, please refer to the event doc for more details. Include names. Entity names which are included in this rule. Please follow the entity name definitions. Exclude names. Entity names which are excluded from this rule. Please follow the entity name definitions. Include names regex. A regex that includes entity names. If both include-name list and include-name regex are set, both rules will take effect. Exclude names regex. A regex that excludes entity names. If both exclude-name list and exclude-name regex are set, both rules will take effect. Include labels. Metric labels which are included in this rule. Exclude labels. Metric labels which are excluded from this rule. Include labels regex. A regex that includes labels. If both include-label list and include-label regex are set, both rules will take effect. Exclude labels regex. A regex that exclude labels. If both the exclude-label list and exclude-label regex are set, both rules will take effect. Tags. Tags are key/value pairs that are attached to alarms. Tags are used to specify distinguishing attributes of alarms that are meaningful and relevant to users. If you would like to make these tags searchable on the SkyWalking UI, you may set the tag keys in core/default/searchableAlarmTags, or through system environment variable SW_SEARCHABLE_ALARM_TAG_KEYS. The key level is supported by default.  Label settings are required by the meter-system. They are used to store metrics from the label-system platform, such as Prometheus, Micrometer, etc. The four label settings mentioned above must implement LabeledValueHolder.\n Threshold. The target value. For multiple-value metrics, such as percentile, the threshold is an array. It is described as: value1, value2, value3, value4, value5. Each value may serve as the threshold for each value of the metrics. Set the value to - if you do not wish to trigger the alarm by one or more of the values.\nFor example in percentile, value1 is the threshold of P50, and -, -, value3, value4, value5 means that there is no threshold for P50 and P75 in the percentile alarm rule. OP. The operator. It supports \u0026gt;, \u0026gt;=, \u0026lt;, \u0026lt;=, ==. We welcome contributions of all OPs. Period. The size of metrics cache in minutes for checking the alarm conditions. This is a time window that corresponds to the backend deployment env time. Count. Within a period window, if the number of times which value goes over the threshold (based on OP) reaches count, then an alarm will be sent. Only as condition. Indicates if the rule can send notifications, or if it simply serves as an condition of the composite rule. Silence period. After the alarm is triggered in Time-N, there will be silence during the TN -\u0026gt; TN + period. By default, it works in the same manner as period. The same alarm (having the same ID in the same metrics name) may only be triggered once within a period.  Composite rules NOTE: Composite rules are only applicable to alarm rules targeting the same entity level, such as service-level alarm rules (service_percent_rule \u0026amp;\u0026amp; service_resp_time_percentile_rule). Do not compose alarm rules of different entity levels, such as an alarm rule of the service metrics with another rule of the endpoint metrics.\nA composite rule is made up of the following elements:\n Rule name. A unique name shown in the alarm message. Must end with _rule. Expression. Specifies how to compose rules, and supports \u0026amp;\u0026amp;, ||, and (). Message. The notification message to be sent out when the rule is triggered. Tags. Tags are key/value pairs that are attached to alarms. Tags are used to specify distinguishing attributes of alarms that are meaningful and relevant to users.  rules:# Rule unique name, must be ended with `_rule`.endpoint_percent_rule:# Metrics value need to be long, double or intmetrics-name:endpoint_percentthreshold:75op:\u0026lt;# The length of time to evaluate the metricsperiod:10# How many times after the metrics match the condition, will trigger alarmcount:3# How many times of checks, the alarm keeps silence after alarm triggered, default as same as period.silence-period:10# Specify if the rule can send notification or just as an condition of composite ruleonly-as-condition:falsetags:level:WARNINGservice_percent_rule:metrics-name:service_percent# [Optional] Default, match all services in this metricsinclude-names:- service_a- service_bexclude-names:- service_c# Single value metrics threshold.threshold:85op:\u0026lt;period:10count:4only-as-condition:falseservice_resp_time_percentile_rule:# Metrics value need to be long, double or intmetrics-name:service_percentileop:\u0026#34;\u0026gt;\u0026#34;# Multiple value metrics threshold. Thresholds for P50, P75, P90, P95, P99.threshold:1000,1000,1000,1000,1000period:10count:3silence-period:5message:Percentile response time of service {name} alarm in 3 minutes of last 10 minutes, due to more than one condition of p50 \u0026gt; 1000, p75 \u0026gt; 1000, p90 \u0026gt; 1000, p95 \u0026gt; 1000, p99 \u0026gt; 1000only-as-condition:falsemeter_service_status_code_rule:metrics-name:meter_status_codeexclude-labels:- \u0026#34;200\u0026#34;op:\u0026#34;\u0026gt;\u0026#34;threshold:10period:10count:3silence-period:5message:The request number of entity {name} non-200 status is more than expected.only-as-condition:falsecomposite-rules:comp_rule:# Must satisfied percent rule and resp time rule expression:service_percent_rule \u0026amp;\u0026amp; service_resp_time_percentile_rulemessage:Service {name} successful rate is less than 80% and P50 of response time is over 1000mstags:level:CRITICALDefault alarm rules For convenience\u0026rsquo;s sake, we have provided a default alarm-setting.yml in our release. It includes the following rules:\n Service average response time over 1s in the last 3 minutes. Service success rate lower than 80% in the last 2 minutes. Percentile of service response time over 1s in the last 3 minutes Service Instance average response time over 1s in the last 2 minutes, and the instance name matches the regex. Endpoint average response time over 1s in the last 2 minutes. Database access average response time over 1s in the last 2 minutes. Endpoint relation average response time over 1s in the last 2 minutes.  List of all potential metrics name The metrics names are defined in the official OAL scripts and MAL scripts, the Event names can also serve as the metrics names, all possible event names can be also found in the Event doc.\nCurrently, metrics from the Service, Service Instance, Endpoint, Service Relation, Service Instance Relation, Endpoint Relation scopes could be used in Alarm, and the Database access scope is same as Service.\nSubmit an issue or a pull request if you want to support any other scopes in alarm.\nWebhook The Webhook requires the peer to be a web container. The alarm message will be sent through HTTP post by application/json content type. The JSON format is based on List\u0026lt;org.apache.skywalking.oap.server.core.alarm.AlarmMessage\u0026gt; with the following key information:\n scopeId, scope. All scopes are defined in org.apache.skywalking.oap.server.core.source.DefaultScopeDefine. name. Target scope entity name. Please follow the entity name definitions. id0. The ID of the scope entity that matches with the name. When using the relation scope, it is the source entity ID. id1. When using the relation scope, it is the destination entity ID. Otherwise, it is empty. ruleName. The rule name configured in alarm-settings.yml. alarmMessage. The alarm text message. startTime. The alarm time measured in milliseconds, which occurs between the current time and the midnight of January 1, 1970 UTC. tags. The tags configured in alarm-settings.yml.  See the following example:\n[{ \u0026#34;scopeId\u0026#34;: 1, \u0026#34;scope\u0026#34;: \u0026#34;SERVICE\u0026#34;, \u0026#34;name\u0026#34;: \u0026#34;serviceA\u0026#34;, \u0026#34;id0\u0026#34;: \u0026#34;12\u0026#34;, \u0026#34;id1\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;ruleName\u0026#34;: \u0026#34;service_resp_time_rule\u0026#34;, \u0026#34;alarmMessage\u0026#34;: \u0026#34;alarmMessage xxxx\u0026#34;, \u0026#34;startTime\u0026#34;: 1560524171000, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;WARNING\u0026#34; }] }, { \u0026#34;scopeId\u0026#34;: 1, \u0026#34;scope\u0026#34;: \u0026#34;SERVICE\u0026#34;, \u0026#34;name\u0026#34;: \u0026#34;serviceB\u0026#34;, \u0026#34;id0\u0026#34;: \u0026#34;23\u0026#34;, \u0026#34;id1\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;ruleName\u0026#34;: \u0026#34;service_resp_time_rule\u0026#34;, \u0026#34;alarmMessage\u0026#34;: \u0026#34;alarmMessage yyy\u0026#34;, \u0026#34;startTime\u0026#34;: 1560524171000, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;CRITICAL\u0026#34; }] }] gRPCHook The alarm message will be sent through remote gRPC method by Protobuf content type. The message contains key information which are defined in oap-server/server-alarm-plugin/src/main/proto/alarm-hook.proto.\nPart of the protocol looks like this:\nmessage AlarmMessage { int64 scopeId = 1; string scope = 2; string name = 3; string id0 = 4; string id1 = 5; string ruleName = 6; string alarmMessage = 7; int64 startTime = 8; AlarmTags tags = 9;}message AlarmTags { // String key, String value pair.  repeated KeyStringValuePair data = 1;}message KeyStringValuePair { string key = 1; string value = 2;}Slack Chat Hook Follow the Getting Started with Incoming Webhooks guide and create new Webhooks.\nThe alarm message will be sent through HTTP post by application/json content type if you have configured Slack Incoming Webhooks as follows:\nslackHooks:textTemplate:|-{ \u0026#34;type\u0026#34;: \u0026#34;section\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;mrkdwn\u0026#34;, \u0026#34;text\u0026#34;: \u0026#34;:alarm_clock: *Apache Skywalking Alarm* \\n **%s**.\u0026#34; } }webhooks:- https://hooks.slack.com/services/x/y/zWeChat Hook Note that only the WeChat Company Edition (WeCom) supports WebHooks. To use the WeChat WebHook, follow the Wechat Webhooks guide. The alarm message will be sent through HTTP post by application/json content type after you have set up Wechat Webhooks as follows:\nwechatHooks:textTemplate:|-{ \u0026#34;msgtype\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;content\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; } }webhooks:- https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=dummy_keyDingtalk Hook Follow the Dingtalk Webhooks guide and create new Webhooks. For security purposes, you can config an optional secret for an individual webhook URL. The alarm message will be sent through HTTP post by application/json content type if you have configured Dingtalk Webhooks as follows:\ndingtalkHooks:textTemplate:|-{ \u0026#34;msgtype\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;content\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; } }webhooks:- url:https://oapi.dingtalk.com/robot/send?access_token=dummy_tokensecret:dummysecretFeishu Hook Follow the Feishu Webhooks guide and create new Webhooks. For security purposes, you can config an optional secret for an individual webhook URL. If you would like to direct a text to a user, you can config ats which is the feishu\u0026rsquo;s user_id and separated by \u0026ldquo;,\u0026rdquo; . The alarm message will be sent through HTTP post by application/json content type if you have configured Feishu Webhooks as follows:\nfeishuHooks:textTemplate:|-{ \u0026#34;msg_type\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;content\u0026#34;: { \u0026#34;text\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; }, \u0026#34;ats\u0026#34;:\u0026#34;feishu_user_id_1,feishu_user_id_2\u0026#34; }webhooks:- url:https://open.feishu.cn/open-apis/bot/v2/hook/dummy_tokensecret:dummysecretWeLink Hook Follow the WeLink Webhooks guide and create new Webhooks. The alarm message will be sent through HTTP post by application/json content type if you have configured WeLink Webhooks as follows:\nwelinkHooks:textTemplate:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;webhooks:# you may find your own client_id and client_secret in your app, below are dummy, need to change.- client_id:\u0026#34;dummy_client_id\u0026#34;client_secret:dummy_secret_keyaccess_token_url:https://open.welink.huaweicloud.com/api/auth/v2/ticketsmessage_url:https://open.welink.huaweicloud.com/api/welinkim/v1/im-service/chat/group-chat# if you send to multi group at a time, separate group_ids with commas, e.g. \u0026#34;123xx\u0026#34;,\u0026#34;456xx\u0026#34;group_ids:\u0026#34;dummy_group_id\u0026#34;# make a name you like for the robot, it will display in grouprobot_name:robotUpdate the settings dynamically Since 6.5.0, the alarm settings can be updated dynamically at runtime by Dynamic Configuration, which will override the settings in alarm-settings.yml.\nIn order to determine whether an alarm rule is triggered or not, SkyWalking needs to cache the metrics of a time window for each alarm rule. If any attribute (metrics-name, op, threshold, period, count, etc.) of a rule is changed, the sliding window will be destroyed and re-created, causing the alarm of this specific rule to restart again.\n","title":"Alarm","url":"/docs/main/v9.0.0/en/setup/backend/backend-alarm/"},{"content":"Alarm The alarm core is driven by a collection of rules defined in config/alarm-settings.yml. There are three parts to alarm rule definitions.\n Alarm rules. They define how metrics alarm should be triggered and what conditions should be considered. Webhooks. The list of web service endpoints, which should be called after an alarm is triggered. gRPCHook. The host and port of the remote gRPC method, which should be called after an alarm is triggered.  Entity name Defines the relation between scope and entity name.\n Service: Service name Instance: {Instance name} of {Service name} Endpoint: {Endpoint name} in {Service name} Database: Database service name Service Relation: {Source service name} to {Dest service name} Instance Relation: {Source instance name} of {Source service name} to {Dest instance name} of {Dest service name} Endpoint Relation: {Source endpoint name} in {Source Service name} to {Dest endpoint name} in {Dest service name}  Rules There are two types of rules: individual rules and composite rules. A composite rule is a combination of individual rules.\nIndividual rules An alarm rule is made up of the following elements:\n Rule name. A unique name shown in the alarm message. It must end with _rule. Metrics name. This is also the metrics name in the OAL script. Only long, double, int types are supported. See the list of all potential metrics name. Events can also be configured as the source of Alarm. Please refer to the event doc for more details. Include names. Entity names that are included in this rule. Please follow the entity name definitions. Exclude names. Entity names that are excluded from this rule. Please follow the entity name definitions. Include names regex. A regex that includes entity names. If both include-name list and include-name regex are set, both rules will take effect. Exclude names regex. A regex that excludes entity names. Both rules will take effect if both include-label list and include-label regex are set. Include labels. Metric labels that are included in this rule. Exclude labels. Metric labels that are excluded from this rule. Include labels regex. A regex that includes labels. If both include-label list and include-label regex are set, both rules will take effect. Exclude labels regex. A regex that excludes labels. Both rules will take effect if both exclude-label list and exclude-label regex are set. Tags. Tags are key/value pairs that are attached to alarms. Tags are used to specify distinguishing attributes of alarms that are meaningful and relevant to users. If you want to make these tags searchable on the SkyWalking UI, you may set the tag keys in core/default/searchableAlarmTags or through the system environment variable SW_SEARCHABLE_ALARM_TAG_KEYS. The key level is supported by default.  Label settings are required by the meter system. They are used to store metrics from the label-system platform, such as Prometheus, Micrometer, etc. The four label settings mentioned above must implement LabeledValueHolder.\n Threshold. The target value. For multiple-value metrics, such as percentile, the threshold is an array. It is described as: value1, value2, value3, value4, value5. Each value may serve as the threshold for each value of the metrics. Set the value to - if you do not wish to trigger the Alarm by one or more of the values.\nFor example, in percentile, value1 is the threshold of P50, and -, -, value3, value4, value5 means that there is no threshold for P50 and P75 in the percentile alarm rule. OP. The operator. It supports \u0026gt;, \u0026gt;=, \u0026lt;, \u0026lt;=, ==. We welcome contributions of all OPs. Period. The size of metrics cache in minutes for checking the alarm conditions. This is a time window that corresponds to the backend deployment env time. Count. Within a period window, if the number of times which value goes over the threshold (based on OP) reaches count, then an alarm will be sent. Only as condition. Indicates if the rule can send notifications or if it simply serves as a condition of the composite rule. Silence period. After the alarm is triggered at Time-N (TN), there will be silence during the TN -\u0026gt; TN + period. By default, it works in the same manner as period. The same Alarm (having the same ID in the same metrics name) may only be triggered once within a period.  Composite rules NOTE: Composite rules are only applicable to alarm rules targeting the same entity level, such as service-level alarm rules (service_percent_rule \u0026amp;\u0026amp; service_resp_time_percentile_rule). Do not compose alarm rules of different entity levels, such as an alarm rule of the service metrics with another rule of the endpoint metrics.\nA composite rule is made up of the following elements:\n Rule name. A unique name shown in the alarm message. Must end with _rule. Expression. Specifies how to compose rules, and supports \u0026amp;\u0026amp;, ||, and (). Message. The notification message to be sent out when the rule is triggered. Tags. Tags are key/value pairs that are attached to alarms. Tags are used to specify distinguishing attributes of alarms that are meaningful and relevant to users.  rules:# Rule unique name, must be ended with `_rule`.endpoint_percent_rule:# Metrics value need to be long, double or intmetrics-name:endpoint_percentthreshold:75op:\u0026lt;# The length of time to evaluate the metricsperiod:10# How many times after the metrics match the condition, will trigger alarmcount:3# How many times of checks, the alarm keeps silence after alarm triggered, default as same as period.silence-period:10# Specify if the rule can send notification or just as an condition of composite ruleonly-as-condition:falsetags:level:WARNINGservice_percent_rule:metrics-name:service_percent# [Optional] Default, match all services in this metricsinclude-names:- service_a- service_bexclude-names:- service_c# Single value metrics threshold.threshold:85op:\u0026lt;period:10count:4only-as-condition:falseservice_resp_time_percentile_rule:# Metrics value need to be long, double or intmetrics-name:service_percentileop:\u0026#34;\u0026gt;\u0026#34;# Multiple value metrics threshold. Thresholds for P50, P75, P90, P95, P99.threshold:1000,1000,1000,1000,1000period:10count:3silence-period:5message:Percentile response time of service {name} alarm in 3 minutes of last 10 minutes, due to more than one condition of p50 \u0026gt; 1000, p75 \u0026gt; 1000, p90 \u0026gt; 1000, p95 \u0026gt; 1000, p99 \u0026gt; 1000only-as-condition:falsemeter_service_status_code_rule:metrics-name:meter_status_codeexclude-labels:- \u0026#34;200\u0026#34;op:\u0026#34;\u0026gt;\u0026#34;threshold:10period:10count:3silence-period:5message:The request number of entity {name} non-200 status is more than expected.only-as-condition:falsecomposite-rules:comp_rule:# Must satisfied percent rule and resp time rule expression:service_percent_rule \u0026amp;\u0026amp; service_resp_time_percentile_rulemessage:Service {name} successful rate is less than 80% and P50 of response time is over 1000mstags:level:CRITICALDefault alarm rules For convenience\u0026rsquo;s sake, we have provided a default alarm-setting.yml in our release. It includes the following rules:\n Service average response time over 1s in the last 3 minutes. Service success rate lower than 80% in the last 2 minutes. Percentile of service response time over 1s in the last 3 minutes Service Instance average response time over 1s in the last 2 minutes, and the instance name matches the regex. Endpoint average response time over 1s in the last 2 minutes. Database access average response time over 1s in the last 2 minutes. Endpoint relation average response time over 1s in the last 2 minutes.  List of all potential metrics name The metrics names are defined in the official OAL scripts and MAL scripts, the Event names can also serve as the metrics names, all possible event names can be also found in the Event doc.\nCurrently, metrics from the Service, Service Instance, Endpoint, Service Relation, Service Instance Relation, Endpoint Relation scopes could be used in Alarm, and the Database access scope is the same as Service.\nSubmit an issue or a pull request if you want to support any other scopes in Alarm.\nWebhook The Webhook requires the peer to be a web container. The alarm message will be sent through HTTP post by application/json content type. The JSON format is based on List\u0026lt;org.apache.skywalking.oap.server.core.alarm.AlarmMessage\u0026gt; with the following key information:\n scopeId, scope. All scopes are defined in org.apache.skywalking.oap.server.core.source.DefaultScopeDefine. name. Target scope entity name. Please follow the entity name definitions. id0. The ID of the scope entity that matches with the name. When using the relation scope, it is the source entity ID. id1. When using the relation scope, it is the destination entity ID. Otherwise, it is empty. ruleName. The rule name configured in alarm-settings.yml. alarmMessage. The alarm text message. startTime. The alarm time measured in milliseconds, which occurs between the current time and the midnight of January 1, 1970 UTC. tags. The tags configured in alarm-settings.yml.  See the following example:\n[{ \u0026#34;scopeId\u0026#34;: 1, \u0026#34;scope\u0026#34;: \u0026#34;SERVICE\u0026#34;, \u0026#34;name\u0026#34;: \u0026#34;serviceA\u0026#34;, \u0026#34;id0\u0026#34;: \u0026#34;12\u0026#34;, \u0026#34;id1\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;ruleName\u0026#34;: \u0026#34;service_resp_time_rule\u0026#34;, \u0026#34;alarmMessage\u0026#34;: \u0026#34;alarmMessage xxxx\u0026#34;, \u0026#34;startTime\u0026#34;: 1560524171000, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;WARNING\u0026#34; }] }, { \u0026#34;scopeId\u0026#34;: 1, \u0026#34;scope\u0026#34;: \u0026#34;SERVICE\u0026#34;, \u0026#34;name\u0026#34;: \u0026#34;serviceB\u0026#34;, \u0026#34;id0\u0026#34;: \u0026#34;23\u0026#34;, \u0026#34;id1\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;ruleName\u0026#34;: \u0026#34;service_resp_time_rule\u0026#34;, \u0026#34;alarmMessage\u0026#34;: \u0026#34;alarmMessage yyy\u0026#34;, \u0026#34;startTime\u0026#34;: 1560524171000, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;CRITICAL\u0026#34; }] }] gRPCHook The alarm message will be sent through remote gRPC method by Protobuf content type. The message contains key information which are defined in oap-server/server-alarm-plugin/src/main/proto/alarm-hook.proto.\nPart of the protocol looks like this:\nmessage AlarmMessage { int64 scopeId = 1; string scope = 2; string name = 3; string id0 = 4; string id1 = 5; string ruleName = 6; string alarmMessage = 7; int64 startTime = 8; AlarmTags tags = 9;}message AlarmTags { // String key, String value pair.  repeated KeyStringValuePair data = 1;}message KeyStringValuePair { string key = 1; string value = 2;}Slack Chat Hook Follow the Getting Started with Incoming Webhooks guide and create new Webhooks.\nThe alarm message will be sent through HTTP post by application/json content type if you have configured Slack Incoming Webhooks as follows:\nslackHooks:textTemplate:|-{ \u0026#34;type\u0026#34;: \u0026#34;section\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;mrkdwn\u0026#34;, \u0026#34;text\u0026#34;: \u0026#34;:alarm_clock: *Apache Skywalking Alarm* \\n **%s**.\u0026#34; } }webhooks:- https://hooks.slack.com/services/x/y/zWeChat Hook Note that only the WeChat Company Edition (WeCom) supports WebHooks. To use the WeChat WebHook, follow the Wechat Webhooks guide. The alarm message will be sent through HTTP post by application/json content type after you have set up Wechat Webhooks as follows:\nwechatHooks:textTemplate:|-{ \u0026#34;msgtype\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;content\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; } }webhooks:- https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=dummy_keyDingTalk Hook Follow the Dingtalk Webhooks guide and create new Webhooks. You can configure an optional secret for an individual webhook URL for security purposes. The alarm message will be sent through HTTP post by application/json content type if you have configured DingTalk Webhooks as follows:\ndingtalkHooks:textTemplate:|-{ \u0026#34;msgtype\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;content\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; } }webhooks:- url:https://oapi.dingtalk.com/robot/send?access_token=dummy_tokensecret:dummysecretFeishu Hook Follow the Feishu Webhooks guide and create new Webhooks. You can configure an optional secret for an individual webhook URL for security purposes. If you want to direct a text to a user, you can configure ats, which is Feishu\u0026rsquo;s user_id and separated by \u0026ldquo;,\u0026rdquo; . The alarm message will be sent through HTTP post by application/json content type if you have configured Feishu Webhooks as follows:\nfeishuHooks:textTemplate:|-{ \u0026#34;msg_type\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;content\u0026#34;: { \u0026#34;text\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; }, \u0026#34;ats\u0026#34;:\u0026#34;feishu_user_id_1,feishu_user_id_2\u0026#34; }webhooks:- url:https://open.feishu.cn/open-apis/bot/v2/hook/dummy_tokensecret:dummysecretWeLink Hook Follow the WeLink Webhooks guide and create new Webhooks. The alarm message will be sent through HTTP post by application/json content type if you have configured WeLink Webhooks as follows:\nwelinkHooks:textTemplate:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;webhooks:# you may find your own client_id and client_secret in your app, below are dummy, need to change.- client_id:\u0026#34;dummy_client_id\u0026#34;client_secret:dummy_secret_keyaccess_token_url:https://open.welink.huaweicloud.com/api/auth/v2/ticketsmessage_url:https://open.welink.huaweicloud.com/api/welinkim/v1/im-service/chat/group-chat# if you send to multi group at a time, separate group_ids with commas, e.g. \u0026#34;123xx\u0026#34;,\u0026#34;456xx\u0026#34;group_ids:\u0026#34;dummy_group_id\u0026#34;# make a name you like for the robot, it will display in grouprobot_name:robotUpdate the settings dynamically Since 6.5.0, the alarm settings can be updated dynamically at runtime by Dynamic Configuration, which will override the settings in alarm-settings.yml.\nIn order to determine whether an alarm rule is triggered or not, SkyWalking needs to cache the metrics of a time window for each alarm rule. If any attribute (metrics-name, op, threshold, period, count, etc.) of a rule is changed, the sliding window will be destroyed and re-created, causing the Alarm of this specific rule to restart again.\n","title":"Alarm","url":"/docs/main/v9.1.0/en/setup/backend/backend-alarm/"},{"content":"Alerting Alerting mechanism measures system performance according to the metrics of services/instances/endpoints from different layers. Alerting kernel is an in-memory, time-window based queue.\nThe alerting core is driven by a collection of rules defined in config/alarm-settings.yml. There are three parts to alerting rule definitions.\n alerting rules. They define how metrics alerting should be triggered and what conditions should be considered. hooks. The list of hooks, which should be called after an alerting is triggered.  Entity name Defines the relation between scope and entity name.\n Service: Service name Instance: {Instance name} of {Service name} Endpoint: {Endpoint name} in {Service name} Service Relation: {Source service name} to {Dest service name} Instance Relation: {Source instance name} of {Source service name} to {Dest instance name} of {Dest service name} Endpoint Relation: {Source endpoint name} in {Source Service name} to {Dest endpoint name} in {Dest service name}  Rules An alerting rule is made up of the following elements:\n Rule name. A unique name shown in the alarm message. It must end with _rule. Expression. A MQE expression that defines the conditions of the rule. The result type must be SINGLE_VALUE and the root operation of the expression must be a Compare Operation which provides 1(true) or 0(false) result. When the result is 1(true), the alarm will be triggered. For example, avg(service_resp_time / 1000) \u0026gt; 1 is a valid expression to indicate the request latency is slower than 1s. The typical illegal expressions are  avg(service_resp_time \u0026gt; 1000) + 1 expression root doesn\u0026rsquo;t use Compare Operation service_resp_time \u0026gt; 1000 expression return a TIME_SERIES_VALUES type of values rather than a SINGLE_VALUE value.    The metrics names in the expression could be found in the list of all potential metrics name doc.\n Include names. Entity names that are included in this rule. Please follow the entity name definitions. Exclude names. Entity names that are excluded from this rule. Please follow the entity name definitions. Include names regex. A regex that includes entity names. If both include-name list and include-name regex are set, both rules will take effect. Exclude names regex. A regex that excludes entity names. Both rules will take effect if both include-label list and include-label regex are set. Tags. Tags are key/value pairs that are attached to alarms. Tags are used to specify distinguishing attributes of alarms that are meaningful and relevant to users. If you want to make these tags searchable on the SkyWalking UI, you may set the tag keys in core/default/searchableAlarmTags or through the system environment variable SW_SEARCHABLE_ALARM_TAG_KEYS. The key level is supported by default. Period. The size of metrics cache in minutes for checking the alarm conditions. This is a time window that corresponds to the backend deployment env time. Hooks. Binding the specific names of the hooks when the alarm is triggered. The name format is {hookType}.{hookName} (slack.custom1 e.g.) and must be defined in the hooks section of the alarm-settings.yml file. If the hook name is not specified, the global hook will be used. Silence period. After the alarm is triggered at Time-N (TN), there will be silence during the TN -\u0026gt; TN + period. By default, it works in the same manner as period. The same Alarm (having the same ID in the same metrics name) may only be triggered once within a period.  Such as for a metric, there is a shifting window as following at T7.\n   T1 T2 T3 T4 T5 T6 T7     Value1 Value2 Value3 Value4 Value5 Value6 Value7     Period(Time point T1 ~ T7) are continuous data points for minutes. Notice, alerts are not supported above minute-by-minute periods as they would not be efficient. Values(Value1 ~ Value7) are the values or labeled values for every time point. Expression is calculated based on the metric values(Value1 ~ Value7). For example, expression avg(service_resp_time) \u0026gt; 1000, if the value are 1001, 1001, 1001, 1001, 1001, 1001, 1001, the calculation is ((1001 + 10001 + ... + 1001) / 7) \u0026gt; 1000 and the result would be 1(true). Then the alarm would be triggered. In every minute, the window would shift automatically. At T8, Value8 would be cached, and T1/Value1 would be removed from the window.  NOTE:\n If the expression include labeled metrics and result has multiple labeled value(e.g. sum(service_percentile{_='0,1'} \u0026gt; 1000) \u0026gt;= 3), the alarm will be triggered if any of the labeled value result matches 3 times of the condition(P50 \u0026gt; 1000 or P75 \u0026gt; 1000). One alarm rule is targeting the same entity level, such as service-level expression (avg(service_resp_time) \u0026gt; 1000). Set entity names(Include/Exclude names\u0026hellip;) according to metrics entity levels, do not include different entity levels metrics in the same expression, such as service metrics and endpoint metrics.  rules:# Rule unique name, must be ended with `_rule`.endpoint_percent_rule:# A MQE expression and the root operation of the expression must be a Compare Operation.expression:sum((endpoint_sla / 100) \u0026lt; 75) \u0026gt;= 3# The length of time to evaluate the metricsperiod:10# How many times of checks, the alarm keeps silence after alarm triggered, default as same as period.silence-period:10message:Successful rate of endpoint {name} is lower than 75%tags:level:WARNINGservice_percent_rule:expression:sum((service_sla / 100) \u0026lt; 85) \u0026gt;= 4# [Optional] Default, match all services in this metricsinclude-names:- service_a- service_bexclude-names:- service_cperiod:10message:Service {name} successful rate is less than 85%service_resp_time_percentile_rule:expression:sum(service_percentile{_=\u0026#39;0,1,2,3,4\u0026#39;} \u0026gt; 1000) \u0026gt;= 3period:10silence-period:5message:Percentile response time of service {name} alarm in 3 minutes of last 10 minutes, due to more than one condition of p50 \u0026gt; 1000, p75 \u0026gt; 1000, p90 \u0026gt; 1000, p95 \u0026gt; 1000, p99 \u0026gt; 1000meter_service_status_code_rule:expression:sum(aggregate_labels(meter_status_code{_=\u0026#39;4xx,5xx\u0026#39;},sum) \u0026gt; 10) \u0026gt; 3period:10count:3silence-period:5message:The request number of entity {name} 4xx and 5xx status is more than expected.hooks:- \u0026#34;slack.custom1\u0026#34;- \u0026#34;pagerduty.custom1\u0026#34;comp_rule:expression:(avg(service_sla / 100) \u0026gt; 80) * (avg(service_percentile{_=\u0026#39;0\u0026#39;}) \u0026gt; 1000) == 1period:10message:Service {name} avg successful rate is less than 80% and P50 of avg response time is over 1000ms in last 10 minutes.tags:level:CRITICALhooks:- \u0026#34;slack.default\u0026#34;- \u0026#34;slack.custom1\u0026#34;- \u0026#34;pagerduty.custom1\u0026#34;Default alarm rules For convenience\u0026rsquo;s sake, we have provided a default alarm-setting.yml in our release. It includes the following rules:\n Service average response time over 1s in the last 3 minutes. Service success rate lower than 80% in the last 2 minutes. Percentile of service response time over 1s in the last 3 minutes Service Instance average response time over 1s in the last 2 minutes, and the instance name matches the regex. Endpoint average response time over 1s in the last 2 minutes. Database access average response time over 1s in the last 2 minutes. Endpoint relation average response time over 1s in the last 2 minutes.  List of all potential metrics name The metrics names are defined in the official OAL scripts and MAL scripts.\nCurrently, metrics from the Service, Service Instance, Endpoint, Service Relation, Service Instance Relation, Endpoint Relation scopes could be used in Alarm, and the Database access scope is the same as Service.\nSubmit an issue or a pull request if you want to support any other scopes in Alarm.\nHooks Hooks are a way to send alarm messages to the outside world. SkyWalking supports multiple hooks of the same type, each hook can support different configurations. For example, you can configure two Slack hooks, one named default and set is-default: true means this hook will apply on all Alarm Rules without config hooks. Another named custom1 will only apply on the Alarm Rules which with config hooks and include the name slack.custom1.\nhooks:slack:# default here is just a name, set the field \u0026#39;is-default: true\u0026#39; if this notification hook is expected to be default globally.default:# If true, this hook will apply on all rules, unless a rule has its own specific hook. Could have more than one default hooks in the same hook type.is-default:truetext-template:|-{ \u0026#34;type\u0026#34;: \u0026#34;section\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;mrkdwn\u0026#34;, \u0026#34;text\u0026#34;: \u0026#34;:alarm_clock: *Apache Skywalking Alarm* \\n **%s**.\u0026#34; } }webhooks:- https://hooks.slack.com/services/x/y/zsssscustom1:text-template:|-{ \u0026#34;type\u0026#34;: \u0026#34;section\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;mrkdwn\u0026#34;, \u0026#34;text\u0026#34;: \u0026#34;:alarm_clock: *Apache Skywalking Alarm* \\n **%s**.\u0026#34; } }webhooks:- https://hooks.slack.com/services/x/y/custom1Currently, SkyWalking supports the following hook types:\nWebhook The Webhook requires the peer to be a web container. The alarm message will be sent through HTTP post by application/json content type after you have set up Webhook hooks as follows:\nwebhook:default:is-default:trueurls:- http://ip:port/xxx- http://ip:port/yyyThe JSON format is based on List\u0026lt;org.apache.skywalking.oap.server.core.alarm.AlarmMessage\u0026gt; with the following key information:\n scopeId, scope. All scopes are defined in org.apache.skywalking.oap.server.core.source.DefaultScopeDefine. name. Target scope entity name. Please follow the entity name definitions. id0. The ID of the scope entity that matches with the name. When using the relation scope, it is the source entity ID. id1. When using the relation scope, it is the destination entity ID. Otherwise, it is empty. ruleName. The rule name configured in alarm-settings.yml. alarmMessage. The alarm text message. startTime. The alarm time measured in milliseconds, which occurs between the current time and the midnight of January 1, 1970 UTC. tags. The tags configured in alarm-settings.yml.  See the following example:\n[{ \u0026#34;scopeId\u0026#34;: 1, \u0026#34;scope\u0026#34;: \u0026#34;SERVICE\u0026#34;, \u0026#34;name\u0026#34;: \u0026#34;serviceA\u0026#34;, \u0026#34;id0\u0026#34;: \u0026#34;12\u0026#34;, \u0026#34;id1\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;ruleName\u0026#34;: \u0026#34;service_resp_time_rule\u0026#34;, \u0026#34;alarmMessage\u0026#34;: \u0026#34;alarmMessage xxxx\u0026#34;, \u0026#34;startTime\u0026#34;: 1560524171000, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;WARNING\u0026#34; }] }, { \u0026#34;scopeId\u0026#34;: 1, \u0026#34;scope\u0026#34;: \u0026#34;SERVICE\u0026#34;, \u0026#34;name\u0026#34;: \u0026#34;serviceB\u0026#34;, \u0026#34;id0\u0026#34;: \u0026#34;23\u0026#34;, \u0026#34;id1\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;ruleName\u0026#34;: \u0026#34;service_resp_time_rule\u0026#34;, \u0026#34;alarmMessage\u0026#34;: \u0026#34;alarmMessage yyy\u0026#34;, \u0026#34;startTime\u0026#34;: 1560524171000, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;CRITICAL\u0026#34; }] }] gRPC The alarm message will be sent through remote gRPC method by Protobuf content type after you have set up gRPC hooks as follows:\ngRPC:default:is-default:truetarget-host:iptarget-port:portThe message contains key information which are defined in oap-server/server-alarm-plugin/src/main/proto/alarm-hook.proto.\nPart of the protocol looks like this:\nmessage AlarmMessage { int64 scopeId = 1; string scope = 2; string name = 3; string id0 = 4; string id1 = 5; string ruleName = 6; string alarmMessage = 7; int64 startTime = 8; AlarmTags tags = 9;}message AlarmTags { // String key, String value pair.  repeated KeyStringValuePair data = 1;}message KeyStringValuePair { string key = 1; string value = 2;}Slack Chat Follow the Getting Started with Incoming Webhooks guide and create new Webhooks.\nThe alarm message will be sent through HTTP post by application/json content type if you have configured Slack Incoming Webhooks as follows:\nslack:default:is-default:truetext-template:|-{ \u0026#34;type\u0026#34;: \u0026#34;section\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;mrkdwn\u0026#34;, \u0026#34;text\u0026#34;: \u0026#34;:alarm_clock: *Apache Skywalking Alarm* \\n **%s**.\u0026#34; } }webhooks:- https://hooks.slack.com/services/x/y/zWeChat Note that only the WeChat Company Edition (WeCom) supports WebHooks. To use the WeChat WebHook, follow the Wechat Webhooks guide. The alarm message will be sent through HTTP post by application/json content type after you have set up Wechat Webhooks as follows:\nwechat:default:is-default:truetext-template:|-{ \u0026#34;msgtype\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;content\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; } }webhooks:- https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=dummy_keyDingTalk Follow the Dingtalk Webhooks guide and create new Webhooks. You can configure an optional secret for an individual webhook URL for security purposes. The alarm message will be sent through HTTP post by application/json content type if you have configured DingTalk Webhooks as follows:\ndingtalk:default:is-default:truetext-template:|-{ \u0026#34;msgtype\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;content\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; } }webhooks:- url:https://oapi.dingtalk.com/robot/send?access_token=dummy_tokensecret:dummysecretFeishu Follow the Feishu Webhooks guide and create new Webhooks. You can configure an optional secret for an individual webhook URL for security purposes. If you want to direct a text to a user, you can configure ats, which is Feishu\u0026rsquo;s user_id and separated by \u0026ldquo;,\u0026rdquo; . The alarm message will be sent through HTTP post by application/json content type if you have configured Feishu Webhooks as follows:\nfeishu:default:is-default:truetext-template:|-{ \u0026#34;msg_type\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;content\u0026#34;: { \u0026#34;text\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; }, \u0026#34;ats\u0026#34;:\u0026#34;feishu_user_id_1,feishu_user_id_2\u0026#34; }webhooks:- url:https://open.feishu.cn/open-apis/bot/v2/hook/dummy_tokensecret:dummysecretWeLink Follow the WeLink Webhooks guide and create new Webhooks. The alarm message will be sent through HTTP post by application/json content type if you have configured WeLink Webhooks as follows:\nwelink:default:is-default:truetext-template:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;webhooks:# you may find your own client_id and client_secret in your app, below are dummy, need to change.- client-id:\u0026#34;dummy_client_id\u0026#34;client-secret:dummy_secret_keyaccess-token-url:https://open.welink.huaweicloud.com/api/auth/v2/ticketsmessage-url:https://open.welink.huaweicloud.com/api/welinkim/v1/im-service/chat/group-chat# if you send to multi group at a time, separate group_ids with commas, e.g. \u0026#34;123xx\u0026#34;,\u0026#34;456xx\u0026#34;group-ids:\u0026#34;dummy_group_id\u0026#34;# make a name you like for the robot, it will display in grouprobot-name:robotPagerDuty The PagerDuty hook is based on Events API v2.\nFollow the Getting Started section to create an Events API v2 integration on your PagerDuty service and copy the integration key.\nThen configure as follows:\npagerduty:default:is-default:truetext-template:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;integration-keys:- 5c6d805c9dcf4e03d09dfa81e8789ba1You can also configure multiple integration keys.\nDiscord Follow the Discord Webhooks guide and create a new webhook.\nThen configure as follows:\ndiscord:default:is-default:truetext-template:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;webhooks:- url:https://discordapp.com/api/webhooks/1008166889777414645/8e0Am4Zb-YGbBqqbiiq0jSHPTEEaHa4j1vIC-zSSm231T8ewGxgY0_XUYpY-k1nN4HBlusername:robotUpdate the settings dynamically Since 6.5.0, the alerting settings can be updated dynamically at runtime by Dynamic Configuration, which will override the settings in alarm-settings.yml.\nIn order to determine whether an alerting rule is triggered or not, SkyWalking needs to cache the metrics of a time window for each alerting rule. If any attribute (expression, period, etc.) of a rule is changed, the sliding window will be destroyed and re-created, causing the Alarm of this specific rule to restart again.\nKeys with data types of alerting rule configuration file    Alerting element Configuration property key Type Description     Expression expression string MQE expression   Include names include-names string array    Exclude names exclude-names string array    Include names regex include-names-regex string Java regex Pattern   Exclude names regex exclude-names-regex string Java regex Pattern   Tags tags key-value pair    Period Period int    Silence period silence-period int    Message message string    Hooks hooks string array     ","title":"Alerting","url":"/docs/main/latest/en/setup/backend/backend-alarm/"},{"content":"Alerting Alerting mechanism measures system performance according to the metrics of services/instances/endpoints from different layers. Alerting kernel is an in-memory, time-window based queue.\nThe alerting core is driven by a collection of rules defined in config/alarm-settings.yml. There are three parts to alerting rule definitions.\n alerting rules. They define how metrics alerting should be triggered and what conditions should be considered. hooks. The list of hooks, which should be called after an alerting is triggered.  Entity name Defines the relation between scope and entity name.\n Service: Service name Instance: {Instance name} of {Service name} Endpoint: {Endpoint name} in {Service name} Service Relation: {Source service name} to {Dest service name} Instance Relation: {Source instance name} of {Source service name} to {Dest instance name} of {Dest service name} Endpoint Relation: {Source endpoint name} in {Source Service name} to {Dest endpoint name} in {Dest service name}  Rules An alerting rule is made up of the following elements:\n Rule name. A unique name shown in the alarm message. It must end with _rule. Expression. A MQE expression that defines the conditions of the rule. The result type must be SINGLE_VALUE and the root operation of the expression must be a Compare Operation which provides 1(true) or 0(false) result. When the result is 1(true), the alarm will be triggered. For example, avg(service_resp_time / 1000) \u0026gt; 1 is a valid expression to indicate the request latency is slower than 1s. The typical illegal expressions are  avg(service_resp_time \u0026gt; 1000) + 1 expression root doesn\u0026rsquo;t use Compare Operation service_resp_time \u0026gt; 1000 expression return a TIME_SERIES_VALUES type of values rather than a SINGLE_VALUE value.    The metrics names in the expression could be found in the list of all potential metrics name doc.\n Include names. Entity names that are included in this rule. Please follow the entity name definitions. Exclude names. Entity names that are excluded from this rule. Please follow the entity name definitions. Include names regex. A regex that includes entity names. If both include-name list and include-name regex are set, both rules will take effect. Exclude names regex. A regex that excludes entity names. Both rules will take effect if both include-label list and include-label regex are set. Tags. Tags are key/value pairs that are attached to alarms. Tags are used to specify distinguishing attributes of alarms that are meaningful and relevant to users. If you want to make these tags searchable on the SkyWalking UI, you may set the tag keys in core/default/searchableAlarmTags or through the system environment variable SW_SEARCHABLE_ALARM_TAG_KEYS. The key level is supported by default. Period. The size of metrics cache in minutes for checking the alarm conditions. This is a time window that corresponds to the backend deployment env time. Hooks. Binding the specific names of the hooks when the alarm is triggered. The name format is {hookType}.{hookName} (slack.custom1 e.g.) and must be defined in the hooks section of the alarm-settings.yml file. If the hook name is not specified, the global hook will be used. Silence period. After the alarm is triggered at Time-N (TN), there will be silence during the TN -\u0026gt; TN + period. By default, it works in the same manner as period. The same Alarm (having the same ID in the same metrics name) may only be triggered once within a period.  Such as for a metric, there is a shifting window as following at T7.\n   T1 T2 T3 T4 T5 T6 T7     Value1 Value2 Value3 Value4 Value5 Value6 Value7     Period(Time point T1 ~ T7) are continuous data points for minutes. Notice, alerts are not supported above minute-by-minute periods as they would not be efficient. Values(Value1 ~ Value7) are the values or labeled values for every time point. Expression is calculated based on the metric values(Value1 ~ Value7). For example, expression avg(service_resp_time) \u0026gt; 1000, if the value are 1001, 1001, 1001, 1001, 1001, 1001, 1001, the calculation is ((1001 + 10001 + ... + 1001) / 7) \u0026gt; 1000 and the result would be 1(true). Then the alarm would be triggered. In every minute, the window would shift automatically. At T8, Value8 would be cached, and T1/Value1 would be removed from the window.  NOTE:\n If the expression include labeled metrics and result has multiple labeled value(e.g. sum(service_percentile{p='50,75'} \u0026gt; 1000) \u0026gt;= 3), the alarm will be triggered if any of the labeled value result matches 3 times of the condition(P50 \u0026gt; 1000 or P75 \u0026gt; 1000). One alarm rule is targeting the same entity level, such as service-level expression (avg(service_resp_time) \u0026gt; 1000). Set entity names(Include/Exclude names\u0026hellip;) according to metrics entity levels, do not include different entity levels metrics in the same expression, such as service metrics and endpoint metrics.  rules:# Rule unique name, must be ended with `_rule`.endpoint_percent_rule:# A MQE expression and the root operation of the expression must be a Compare Operation.expression:sum((endpoint_sla / 100) \u0026lt; 75) \u0026gt;= 3# The length of time to evaluate the metricsperiod:10# How many times of checks, the alarm keeps silence after alarm triggered, default as same as period.silence-period:10message:Successful rate of endpoint {name} is lower than 75%tags:level:WARNINGservice_percent_rule:expression:sum((service_sla / 100) \u0026lt; 85) \u0026gt;= 4# [Optional] Default, match all services in this metricsinclude-names:- service_a- service_bexclude-names:- service_cperiod:10message:Service {name} successful rate is less than 85%service_resp_time_percentile_rule:expression:sum(service_percentile{p=\u0026#39;50,75,90,95,99\u0026#39;} \u0026gt; 1000) \u0026gt;= 3period:10silence-period:5message:Percentile response time of service {name} alarm in 3 minutes of last 10 minutes, due to more than one condition of p50 \u0026gt; 1000, p75 \u0026gt; 1000, p90 \u0026gt; 1000, p95 \u0026gt; 1000, p99 \u0026gt; 1000meter_service_status_code_rule:expression:sum(aggregate_labels(meter_status_code{_=\u0026#39;4xx,5xx\u0026#39;},sum) \u0026gt; 10) \u0026gt; 3period:10count:3silence-period:5message:The request number of entity {name} 4xx and 5xx status is more than expected.hooks:- \u0026#34;slack.custom1\u0026#34;- \u0026#34;pagerduty.custom1\u0026#34;comp_rule:expression:(avg(service_sla / 100) \u0026gt; 80) * (avg(service_percentile{_=\u0026#39;0\u0026#39;}) \u0026gt; 1000) == 1period:10message:Service {name} avg successful rate is less than 80% and P50 of avg response time is over 1000ms in last 10 minutes.tags:level:CRITICALhooks:- \u0026#34;slack.default\u0026#34;- \u0026#34;slack.custom1\u0026#34;- \u0026#34;pagerduty.custom1\u0026#34;Default alarm rules For convenience\u0026rsquo;s sake, we have provided a default alarm-setting.yml in our release. It includes the following rules:\n Service average response time over 1s in the last 3 minutes. Service success rate lower than 80% in the last 2 minutes. Percentile of service response time over 1s in the last 3 minutes Service Instance average response time over 1s in the last 2 minutes, and the instance name matches the regex. Endpoint average response time over 1s in the last 2 minutes. Database access average response time over 1s in the last 2 minutes. Endpoint relation average response time over 1s in the last 2 minutes.  List of all potential metrics name The metrics names are defined in the official OAL scripts and MAL scripts.\nCurrently, metrics from the Service, Service Instance, Endpoint, Service Relation, Service Instance Relation, Endpoint Relation scopes could be used in Alarm, and the Database access scope is the same as Service.\nSubmit an issue or a pull request if you want to support any other scopes in Alarm.\nHooks Hooks are a way to send alarm messages to the outside world. SkyWalking supports multiple hooks of the same type, each hook can support different configurations. For example, you can configure two Slack hooks, one named default and set is-default: true means this hook will apply on all Alarm Rules without config hooks. Another named custom1 will only apply on the Alarm Rules which with config hooks and include the name slack.custom1.\nhooks:slack:# default here is just a name, set the field \u0026#39;is-default: true\u0026#39; if this notification hook is expected to be default globally.default:# If true, this hook will apply on all rules, unless a rule has its own specific hook. Could have more than one default hooks in the same hook type.is-default:truetext-template:|-{ \u0026#34;type\u0026#34;: \u0026#34;section\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;mrkdwn\u0026#34;, \u0026#34;text\u0026#34;: \u0026#34;:alarm_clock: *Apache Skywalking Alarm* \\n **%s**.\u0026#34; } }webhooks:- https://hooks.slack.com/services/x/y/zsssscustom1:text-template:|-{ \u0026#34;type\u0026#34;: \u0026#34;section\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;mrkdwn\u0026#34;, \u0026#34;text\u0026#34;: \u0026#34;:alarm_clock: *Apache Skywalking Alarm* \\n **%s**.\u0026#34; } }webhooks:- https://hooks.slack.com/services/x/y/custom1Currently, SkyWalking supports the following hook types:\nWebhook The Webhook requires the peer to be a web container. The alarm message will be sent through HTTP post by application/json content type after you have set up Webhook hooks as follows:\nwebhook:default:is-default:trueurls:- http://ip:port/xxx- http://ip:port/yyyThe JSON format is based on List\u0026lt;org.apache.skywalking.oap.server.core.alarm.AlarmMessage\u0026gt; with the following key information:\n scopeId, scope. All scopes are defined in org.apache.skywalking.oap.server.core.source.DefaultScopeDefine. name. Target scope entity name. Please follow the entity name definitions. id0. The ID of the scope entity that matches with the name. When using the relation scope, it is the source entity ID. id1. When using the relation scope, it is the destination entity ID. Otherwise, it is empty. ruleName. The rule name configured in alarm-settings.yml. alarmMessage. The alarm text message. startTime. The alarm time measured in milliseconds, which occurs between the current time and the midnight of January 1, 1970 UTC. tags. The tags configured in alarm-settings.yml.  See the following example:\n[{ \u0026#34;scopeId\u0026#34;: 1, \u0026#34;scope\u0026#34;: \u0026#34;SERVICE\u0026#34;, \u0026#34;name\u0026#34;: \u0026#34;serviceA\u0026#34;, \u0026#34;id0\u0026#34;: \u0026#34;12\u0026#34;, \u0026#34;id1\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;ruleName\u0026#34;: \u0026#34;service_resp_time_rule\u0026#34;, \u0026#34;alarmMessage\u0026#34;: \u0026#34;alarmMessage xxxx\u0026#34;, \u0026#34;startTime\u0026#34;: 1560524171000, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;WARNING\u0026#34; }] }, { \u0026#34;scopeId\u0026#34;: 1, \u0026#34;scope\u0026#34;: \u0026#34;SERVICE\u0026#34;, \u0026#34;name\u0026#34;: \u0026#34;serviceB\u0026#34;, \u0026#34;id0\u0026#34;: \u0026#34;23\u0026#34;, \u0026#34;id1\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;ruleName\u0026#34;: \u0026#34;service_resp_time_rule\u0026#34;, \u0026#34;alarmMessage\u0026#34;: \u0026#34;alarmMessage yyy\u0026#34;, \u0026#34;startTime\u0026#34;: 1560524171000, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;CRITICAL\u0026#34; }] }] gRPC The alarm message will be sent through remote gRPC method by Protobuf content type after you have set up gRPC hooks as follows:\ngRPC:default:is-default:truetarget-host:iptarget-port:portThe message contains key information which are defined in oap-server/server-alarm-plugin/src/main/proto/alarm-hook.proto.\nPart of the protocol looks like this:\nmessage AlarmMessage { int64 scopeId = 1; string scope = 2; string name = 3; string id0 = 4; string id1 = 5; string ruleName = 6; string alarmMessage = 7; int64 startTime = 8; AlarmTags tags = 9;}message AlarmTags { // String key, String value pair.  repeated KeyStringValuePair data = 1;}message KeyStringValuePair { string key = 1; string value = 2;}Slack Chat Follow the Getting Started with Incoming Webhooks guide and create new Webhooks.\nThe alarm message will be sent through HTTP post by application/json content type if you have configured Slack Incoming Webhooks as follows:\nslack:default:is-default:truetext-template:|-{ \u0026#34;type\u0026#34;: \u0026#34;section\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;mrkdwn\u0026#34;, \u0026#34;text\u0026#34;: \u0026#34;:alarm_clock: *Apache Skywalking Alarm* \\n **%s**.\u0026#34; } }webhooks:- https://hooks.slack.com/services/x/y/zWeChat Note that only the WeChat Company Edition (WeCom) supports WebHooks. To use the WeChat WebHook, follow the Wechat Webhooks guide. The alarm message will be sent through HTTP post by application/json content type after you have set up Wechat Webhooks as follows:\nwechat:default:is-default:truetext-template:|-{ \u0026#34;msgtype\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;content\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; } }webhooks:- https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=dummy_keyDingTalk Follow the Dingtalk Webhooks guide and create new Webhooks. You can configure an optional secret for an individual webhook URL for security purposes. The alarm message will be sent through HTTP post by application/json content type if you have configured DingTalk Webhooks as follows:\ndingtalk:default:is-default:truetext-template:|-{ \u0026#34;msgtype\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;content\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; } }webhooks:- url:https://oapi.dingtalk.com/robot/send?access_token=dummy_tokensecret:dummysecretFeishu Follow the Feishu Webhooks guide and create new Webhooks. You can configure an optional secret for an individual webhook URL for security purposes. If you want to direct a text to a user, you can configure ats, which is Feishu\u0026rsquo;s user_id and separated by \u0026ldquo;,\u0026rdquo; . The alarm message will be sent through HTTP post by application/json content type if you have configured Feishu Webhooks as follows:\nfeishu:default:is-default:truetext-template:|-{ \u0026#34;msg_type\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;content\u0026#34;: { \u0026#34;text\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; }, \u0026#34;ats\u0026#34;:\u0026#34;feishu_user_id_1,feishu_user_id_2\u0026#34; }webhooks:- url:https://open.feishu.cn/open-apis/bot/v2/hook/dummy_tokensecret:dummysecretWeLink Follow the WeLink Webhooks guide and create new Webhooks. The alarm message will be sent through HTTP post by application/json content type if you have configured WeLink Webhooks as follows:\nwelink:default:is-default:truetext-template:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;webhooks:# you may find your own client_id and client_secret in your app, below are dummy, need to change.- client-id:\u0026#34;dummy_client_id\u0026#34;client-secret:dummy_secret_keyaccess-token-url:https://open.welink.huaweicloud.com/api/auth/v2/ticketsmessage-url:https://open.welink.huaweicloud.com/api/welinkim/v1/im-service/chat/group-chat# if you send to multi group at a time, separate group_ids with commas, e.g. \u0026#34;123xx\u0026#34;,\u0026#34;456xx\u0026#34;group-ids:\u0026#34;dummy_group_id\u0026#34;# make a name you like for the robot, it will display in grouprobot-name:robotPagerDuty The PagerDuty hook is based on Events API v2.\nFollow the Getting Started section to create an Events API v2 integration on your PagerDuty service and copy the integration key.\nThen configure as follows:\npagerduty:default:is-default:truetext-template:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;integration-keys:- 5c6d805c9dcf4e03d09dfa81e8789ba1You can also configure multiple integration keys.\nDiscord Follow the Discord Webhooks guide and create a new webhook.\nThen configure as follows:\ndiscord:default:is-default:truetext-template:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;webhooks:- url:https://discordapp.com/api/webhooks/1008166889777414645/8e0Am4Zb-YGbBqqbiiq0jSHPTEEaHa4j1vIC-zSSm231T8ewGxgY0_XUYpY-k1nN4HBlusername:robotUpdate the settings dynamically Since 6.5.0, the alerting settings can be updated dynamically at runtime by Dynamic Configuration, which will override the settings in alarm-settings.yml.\nIn order to determine whether an alerting rule is triggered or not, SkyWalking needs to cache the metrics of a time window for each alerting rule. If any attribute (expression, period, etc.) of a rule is changed, the sliding window will be destroyed and re-created, causing the Alarm of this specific rule to restart again.\nKeys with data types of alerting rule configuration file    Alerting element Configuration property key Type Description     Expression expression string MQE expression   Include names include-names string array    Exclude names exclude-names string array    Include names regex include-names-regex string Java regex Pattern   Exclude names regex exclude-names-regex string Java regex Pattern   Tags tags key-value pair    Period Period int    Silence period silence-period int    Message message string    Hooks hooks string array     ","title":"Alerting","url":"/docs/main/next/en/setup/backend/backend-alarm/"},{"content":"Alerting Alerting mechanism measures system performance according to the metrics of services/instances/endpoints from different layers. Alerting kernel is an in-memory, time-window based queue.\nThe alerting core is driven by a collection of rules defined in config/alarm-settings.yml. There are three parts to alerting rule definitions.\n alerting rules. They define how metrics alerting should be triggered and what conditions should be considered. Webhooks. The list of web service endpoints, which should be called after an alerting is triggered. gRPCHook. The host and port of the remote gRPC method, which should be called after an alerting is triggered.  Entity name Defines the relation between scope and entity name.\n Service: Service name Instance: {Instance name} of {Service name} Endpoint: {Endpoint name} in {Service name} Database: Database service name Service Relation: {Source service name} to {Dest service name} Instance Relation: {Source instance name} of {Source service name} to {Dest instance name} of {Dest service name} Endpoint Relation: {Source endpoint name} in {Source Service name} to {Dest endpoint name} in {Dest service name}  Rules There are two types of rules: individual rules and composite rules. A composite rule is a combination of individual rules.\nIndividual rules An alerting rule is made up of the following elements:\n Rule name. A unique name shown in the alarm message. It must end with _rule. Metrics name. This is also the metrics name in the OAL script. Only long, double, int types are supported. See the list of all potential metrics name. Events can also be configured as the source of Alarm. Please refer to the event doc for more details. Include names. Entity names that are included in this rule. Please follow the entity name definitions. Exclude names. Entity names that are excluded from this rule. Please follow the entity name definitions. Include names regex. A regex that includes entity names. If both include-name list and include-name regex are set, both rules will take effect. Exclude names regex. A regex that excludes entity names. Both rules will take effect if both include-label list and include-label regex are set. Include labels. Metric labels that are included in this rule. Exclude labels. Metric labels that are excluded from this rule. Include labels regex. A regex that includes labels. If both include-label list and include-label regex are set, both rules will take effect. Exclude labels regex. A regex that excludes labels. Both rules will take effect if both exclude-label list and exclude-label regex are set. Tags. Tags are key/value pairs that are attached to alarms. Tags are used to specify distinguishing attributes of alarms that are meaningful and relevant to users. If you want to make these tags searchable on the SkyWalking UI, you may set the tag keys in core/default/searchableAlarmTags or through the system environment variable SW_SEARCHABLE_ALARM_TAG_KEYS. The key level is supported by default.  Label settings are required by the meter system. They are used to store metrics from the label-system platform, such as Prometheus, Micrometer, etc. The four label settings mentioned above must implement LabeledValueHolder.\n Threshold. The target value. For multiple-value metrics, such as percentile, the threshold is an array. It is described as: value1, value2, value3, value4, value5. Each value may serve as the threshold for each value of the metrics. Set the value to - if you do not wish to trigger the Alarm by one or more of the values.\nFor example, in percentile, value1 is the threshold of P50, and -, -, value3, value4, value5 means that there is no threshold for P50 and P75 in the percentile alarm rule. OP. The operator. It supports \u0026gt;, \u0026gt;=, \u0026lt;, \u0026lt;=, ==. We welcome contributions of all OPs. Period. The size of metrics cache in minutes for checking the alarm conditions. This is a time window that corresponds to the backend deployment env time. Count. Within a period window, if the number of times which value goes over the threshold (based on OP) reaches count, then an alarm will be sent. Only as condition. Indicates if the rule can send notifications or if it simply serves as a condition of the composite rule. Silence period. After the alarm is triggered at Time-N (TN), there will be silence during the TN -\u0026gt; TN + period. By default, it works in the same manner as period. The same Alarm (having the same ID in the same metrics name) may only be triggered once within a period.  Such as for a metric, there is a shifting window as following at T7.\n   T1 T2 T3 T4 T5 T6 T7     Value1 Value2 Value3 Value4 Value5 Value6 Value7     Period(Time point T1 ~ T7) are continuous data points for minutes. Notice, alerts are not supported above minute-by-minute periods as they would not be efficient. Values(Value1 ~ Value7) are the values or labeled values for every time point. Count\u0026rsquo;s value(N) represents there are N values in the window matched the operator and threshold. In every minute, the window would shift automatically. At T8, Value8 would be cached, and T1/Value1 would be removed from the window.  Composite rules NOTE: Composite rules are only applicable to alerting rules targeting the same entity level, such as service-level alarm rules (service_percent_rule \u0026amp;\u0026amp; service_resp_time_percentile_rule). Do not compose alarm rules of different entity levels, such as an alarm rule of the service metrics with another rule of the endpoint metrics.\nA composite rule is made up of the following elements:\n Rule name. A unique name shown in the alarm message. Must end with _rule. Expression. Specifies how to compose rules, and supports \u0026amp;\u0026amp;, ||, and (). Message. The notification message to be sent out when the rule is triggered. Tags. Tags are key/value pairs that are attached to alarms. Tags are used to specify distinguishing attributes of alarms that are meaningful and relevant to users.  rules:# Rule unique name, must be ended with `_rule`.endpoint_percent_rule:# Metrics value need to be long, double or intmetrics-name:endpoint_percentthreshold:75op:\u0026lt;# The length of time to evaluate the metricsperiod:10# How many times after the metrics match the condition, will trigger alarmcount:3# How many times of checks, the alarm keeps silence after alarm triggered, default as same as period.silence-period:10# Specify if the rule can send notification or just as an condition of composite ruleonly-as-condition:falsetags:level:WARNINGservice_percent_rule:metrics-name:service_percent# [Optional] Default, match all services in this metricsinclude-names:- service_a- service_bexclude-names:- service_c# Single value metrics threshold.threshold:85op:\u0026lt;period:10count:4only-as-condition:falseservice_resp_time_percentile_rule:# Metrics value need to be long, double or intmetrics-name:service_percentileop:\u0026#34;\u0026gt;\u0026#34;# Multiple value metrics threshold. Thresholds for P50, P75, P90, P95, P99.threshold:1000,1000,1000,1000,1000period:10count:3silence-period:5message:Percentile response time of service {name} alarm in 3 minutes of last 10 minutes, due to more than one condition of p50 \u0026gt; 1000, p75 \u0026gt; 1000, p90 \u0026gt; 1000, p95 \u0026gt; 1000, p99 \u0026gt; 1000only-as-condition:falsemeter_service_status_code_rule:metrics-name:meter_status_codeexclude-labels:- \u0026#34;200\u0026#34;op:\u0026#34;\u0026gt;\u0026#34;threshold:10period:10count:3silence-period:5message:The request number of entity {name} non-200 status is more than expected.only-as-condition:falsecomposite-rules:comp_rule:# Must satisfied percent rule and resp time rule expression:service_percent_rule \u0026amp;\u0026amp; service_resp_time_percentile_rulemessage:Service {name} successful rate is less than 80% and P50 of response time is over 1000mstags:level:CRITICALDefault alarm rules For convenience\u0026rsquo;s sake, we have provided a default alarm-setting.yml in our release. It includes the following rules:\n Service average response time over 1s in the last 3 minutes. Service success rate lower than 80% in the last 2 minutes. Percentile of service response time over 1s in the last 3 minutes Service Instance average response time over 1s in the last 2 minutes, and the instance name matches the regex. Endpoint average response time over 1s in the last 2 minutes. Database access average response time over 1s in the last 2 minutes. Endpoint relation average response time over 1s in the last 2 minutes.  List of all potential metrics name The metrics names are defined in the official OAL scripts and MAL scripts, the Event names can also serve as the metrics names, all possible event names can be also found in the Event doc.\nCurrently, metrics from the Service, Service Instance, Endpoint, Service Relation, Service Instance Relation, Endpoint Relation scopes could be used in Alarm, and the Database access scope is the same as Service.\nSubmit an issue or a pull request if you want to support any other scopes in Alarm.\nWebhook The Webhook requires the peer to be a web container. The alarm message will be sent through HTTP post by application/json content type. The JSON format is based on List\u0026lt;org.apache.skywalking.oap.server.core.alarm.AlarmMessage\u0026gt; with the following key information:\n scopeId, scope. All scopes are defined in org.apache.skywalking.oap.server.core.source.DefaultScopeDefine. name. Target scope entity name. Please follow the entity name definitions. id0. The ID of the scope entity that matches with the name. When using the relation scope, it is the source entity ID. id1. When using the relation scope, it is the destination entity ID. Otherwise, it is empty. ruleName. The rule name configured in alarm-settings.yml. alarmMessage. The alarm text message. startTime. The alarm time measured in milliseconds, which occurs between the current time and the midnight of January 1, 1970 UTC. tags. The tags configured in alarm-settings.yml.  See the following example:\n[{ \u0026#34;scopeId\u0026#34;: 1, \u0026#34;scope\u0026#34;: \u0026#34;SERVICE\u0026#34;, \u0026#34;name\u0026#34;: \u0026#34;serviceA\u0026#34;, \u0026#34;id0\u0026#34;: \u0026#34;12\u0026#34;, \u0026#34;id1\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;ruleName\u0026#34;: \u0026#34;service_resp_time_rule\u0026#34;, \u0026#34;alarmMessage\u0026#34;: \u0026#34;alarmMessage xxxx\u0026#34;, \u0026#34;startTime\u0026#34;: 1560524171000, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;WARNING\u0026#34; }] }, { \u0026#34;scopeId\u0026#34;: 1, \u0026#34;scope\u0026#34;: \u0026#34;SERVICE\u0026#34;, \u0026#34;name\u0026#34;: \u0026#34;serviceB\u0026#34;, \u0026#34;id0\u0026#34;: \u0026#34;23\u0026#34;, \u0026#34;id1\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;ruleName\u0026#34;: \u0026#34;service_resp_time_rule\u0026#34;, \u0026#34;alarmMessage\u0026#34;: \u0026#34;alarmMessage yyy\u0026#34;, \u0026#34;startTime\u0026#34;: 1560524171000, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;CRITICAL\u0026#34; }] }] gRPCHook The alarm message will be sent through remote gRPC method by Protobuf content type. The message contains key information which are defined in oap-server/server-alarm-plugin/src/main/proto/alarm-hook.proto.\nPart of the protocol looks like this:\nmessage AlarmMessage { int64 scopeId = 1; string scope = 2; string name = 3; string id0 = 4; string id1 = 5; string ruleName = 6; string alarmMessage = 7; int64 startTime = 8; AlarmTags tags = 9;}message AlarmTags { // String key, String value pair.  repeated KeyStringValuePair data = 1;}message KeyStringValuePair { string key = 1; string value = 2;}Slack Chat Hook Follow the Getting Started with Incoming Webhooks guide and create new Webhooks.\nThe alarm message will be sent through HTTP post by application/json content type if you have configured Slack Incoming Webhooks as follows:\nslackHooks:textTemplate:|-{ \u0026#34;type\u0026#34;: \u0026#34;section\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;mrkdwn\u0026#34;, \u0026#34;text\u0026#34;: \u0026#34;:alarm_clock: *Apache Skywalking Alarm* \\n **%s**.\u0026#34; } }webhooks:- https://hooks.slack.com/services/x/y/zWeChat Hook Note that only the WeChat Company Edition (WeCom) supports WebHooks. To use the WeChat WebHook, follow the Wechat Webhooks guide. The alarm message will be sent through HTTP post by application/json content type after you have set up Wechat Webhooks as follows:\nwechatHooks:textTemplate:|-{ \u0026#34;msgtype\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;content\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; } }webhooks:- https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=dummy_keyDingTalk Hook Follow the Dingtalk Webhooks guide and create new Webhooks. You can configure an optional secret for an individual webhook URL for security purposes. The alarm message will be sent through HTTP post by application/json content type if you have configured DingTalk Webhooks as follows:\ndingtalkHooks:textTemplate:|-{ \u0026#34;msgtype\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;content\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; } }webhooks:- url:https://oapi.dingtalk.com/robot/send?access_token=dummy_tokensecret:dummysecretFeishu Hook Follow the Feishu Webhooks guide and create new Webhooks. You can configure an optional secret for an individual webhook URL for security purposes. If you want to direct a text to a user, you can configure ats, which is Feishu\u0026rsquo;s user_id and separated by \u0026ldquo;,\u0026rdquo; . The alarm message will be sent through HTTP post by application/json content type if you have configured Feishu Webhooks as follows:\nfeishuHooks:textTemplate:|-{ \u0026#34;msg_type\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;content\u0026#34;: { \u0026#34;text\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; }, \u0026#34;ats\u0026#34;:\u0026#34;feishu_user_id_1,feishu_user_id_2\u0026#34; }webhooks:- url:https://open.feishu.cn/open-apis/bot/v2/hook/dummy_tokensecret:dummysecretWeLink Hook Follow the WeLink Webhooks guide and create new Webhooks. The alarm message will be sent through HTTP post by application/json content type if you have configured WeLink Webhooks as follows:\nwelinkHooks:textTemplate:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;webhooks:# you may find your own client_id and client_secret in your app, below are dummy, need to change.- client_id:\u0026#34;dummy_client_id\u0026#34;client_secret:dummy_secret_keyaccess_token_url:https://open.welink.huaweicloud.com/api/auth/v2/ticketsmessage_url:https://open.welink.huaweicloud.com/api/welinkim/v1/im-service/chat/group-chat# if you send to multi group at a time, separate group_ids with commas, e.g. \u0026#34;123xx\u0026#34;,\u0026#34;456xx\u0026#34;group_ids:\u0026#34;dummy_group_id\u0026#34;# make a name you like for the robot, it will display in grouprobot_name:robotPagerDuty Hook The PagerDuty hook is based on Events API v2.\nFollow the Getting Started section to create an Events API v2 integration on your PagerDuty service and copy the integration key.\nThen configure as follows:\npagerDutyHooks:textTemplate:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;integrationKeys:- 5c6d805c9dcf4e03d09dfa81e8789ba1You can also configure multiple integration keys.\nDiscord Hook Follow the Discord Webhooks guide and create a new webhook.\nThen configure as follows:\ndiscordHooks:textTemplate:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;webhooks:- url:https://discordapp.com/api/webhooks/1008166889777414645/8e0Am4Zb-YGbBqqbiiq0jSHPTEEaHa4j1vIC-zSSm231T8ewGxgY0_XUYpY-k1nN4HBlusername:robotUpdate the settings dynamically Since 6.5.0, the alerting settings can be updated dynamically at runtime by Dynamic Configuration, which will override the settings in alarm-settings.yml.\nIn order to determine whether an alerting rule is triggered or not, SkyWalking needs to cache the metrics of a time window for each alerting rule. If any attribute (metrics-name, op, threshold, period, count, etc.) of a rule is changed, the sliding window will be destroyed and re-created, causing the Alarm of this specific rule to restart again.\nKeys with data types of alerting rule configuration file    Alerting element Configuration property key Type Description     Include names include-names string array    Exclude names exclude-names string array    Include names regex include-names-regex string Java regex Pattern   Exclude names regex exclude-names-regex string Java regex Pattern   Include labels include-labels string array    Exclude labels exclude-labels string array    Include labels regex include-labels-regex string Java regex Pattern   Exclude labels regex exclude-labels-regex string Java regex Pattern   Tags tags key-value pair    Threshold threshold number    OP op operator example: \u0026gt;, \u0026gt;=   Period Period int    Count count int    Only as condition only-as-condition boolean    Silence period silence-period int    Message message string     ","title":"Alerting","url":"/docs/main/v9.2.0/en/setup/backend/backend-alarm/"},{"content":"Alerting Alerting mechanism measures system performance according to the metrics of services/instances/endpoints from different layers. Alerting kernel is an in-memory, time-window based queue.\nThe alerting core is driven by a collection of rules defined in config/alarm-settings.yml. There are three parts to alerting rule definitions.\n alerting rules. They define how metrics alerting should be triggered and what conditions should be considered. Webhooks. The list of web service endpoints, which should be called after an alerting is triggered. gRPCHook. The host and port of the remote gRPC method, which should be called after an alerting is triggered.  Entity name Defines the relation between scope and entity name.\n Service: Service name Instance: {Instance name} of {Service name} Endpoint: {Endpoint name} in {Service name} Database: Database service name Service Relation: {Source service name} to {Dest service name} Instance Relation: {Source instance name} of {Source service name} to {Dest instance name} of {Dest service name} Endpoint Relation: {Source endpoint name} in {Source Service name} to {Dest endpoint name} in {Dest service name}  Rules There are two types of rules: individual rules and composite rules. A composite rule is a combination of individual rules.\nIndividual rules An alerting rule is made up of the following elements:\n Rule name. A unique name shown in the alarm message. It must end with _rule. Metrics name. This is also the metrics name in the OAL script. Only long, double, int types are supported. See the list of all potential metrics name. Events can also be configured as the source of Alarm. Please refer to the event doc for more details. Include names. Entity names that are included in this rule. Please follow the entity name definitions. Exclude names. Entity names that are excluded from this rule. Please follow the entity name definitions. Include names regex. A regex that includes entity names. If both include-name list and include-name regex are set, both rules will take effect. Exclude names regex. A regex that excludes entity names. Both rules will take effect if both include-label list and include-label regex are set. Include labels. Metric labels that are included in this rule. Exclude labels. Metric labels that are excluded from this rule. Include labels regex. A regex that includes labels. If both include-label list and include-label regex are set, both rules will take effect. Exclude labels regex. A regex that excludes labels. Both rules will take effect if both exclude-label list and exclude-label regex are set. Tags. Tags are key/value pairs that are attached to alarms. Tags are used to specify distinguishing attributes of alarms that are meaningful and relevant to users. If you want to make these tags searchable on the SkyWalking UI, you may set the tag keys in core/default/searchableAlarmTags or through the system environment variable SW_SEARCHABLE_ALARM_TAG_KEYS. The key level is supported by default.  Label settings are required by the meter system. They are used to store metrics from the label-system platform, such as Prometheus, Micrometer, etc. The four label settings mentioned above must implement LabeledValueHolder.\n Threshold. The target value. For multiple-value metrics, such as percentile, the threshold is an array. It is described as: value1, value2, value3, value4, value5. Each value may serve as the threshold for each value of the metrics. Set the value to - if you do not wish to trigger the Alarm by one or more of the values.\nFor example, in percentile, value1 is the threshold of P50, and -, -, value3, value4, value5 means that there is no threshold for P50 and P75 in the percentile alarm rule. OP. The operator. It supports \u0026gt;, \u0026gt;=, \u0026lt;, \u0026lt;=, ==. We welcome contributions of all OPs. Period. The size of metrics cache in minutes for checking the alarm conditions. This is a time window that corresponds to the backend deployment env time. Count. Within a period window, if the number of times which value goes over the threshold (based on OP) reaches count, then an alarm will be sent. Only as condition. Indicates if the rule can send notifications or if it simply serves as a condition of the composite rule. Silence period. After the alarm is triggered at Time-N (TN), there will be silence during the TN -\u0026gt; TN + period. By default, it works in the same manner as period. The same Alarm (having the same ID in the same metrics name) may only be triggered once within a period.  Such as for a metric, there is a shifting window as following at T7.\n   T1 T2 T3 T4 T5 T6 T7     Value1 Value2 Value3 Value4 Value5 Value6 Value7     Period(Time point T1 ~ T7) are continuous data points for minutes. Notice, alerts are not supported above minute-by-minute periods as they would not be efficient. Values(Value1 ~ Value7) are the values or labeled values for every time point. Count\u0026rsquo;s value(N) represents there are N values in the window matched the operator and threshold. In every minute, the window would shift automatically. At T8, Value8 would be cached, and T1/Value1 would be removed from the window.  Composite rules NOTE: Composite rules are only applicable to alerting rules targeting the same entity level, such as service-level alarm rules (service_percent_rule \u0026amp;\u0026amp; service_resp_time_percentile_rule). Do not compose alarm rules of different entity levels, such as an alarm rule of the service metrics with another rule of the endpoint metrics.\nA composite rule is made up of the following elements:\n Rule name. A unique name shown in the alarm message. Must end with _rule. Expression. Specifies how to compose rules, and supports \u0026amp;\u0026amp;, ||, and (). Message. The notification message to be sent out when the rule is triggered. Tags. Tags are key/value pairs that are attached to alarms. Tags are used to specify distinguishing attributes of alarms that are meaningful and relevant to users.  rules:# Rule unique name, must be ended with `_rule`.endpoint_percent_rule:# Metrics value need to be long, double or intmetrics-name:endpoint_percentthreshold:75op:\u0026lt;# The length of time to evaluate the metricsperiod:10# How many times after the metrics match the condition, will trigger alarmcount:3# How many times of checks, the alarm keeps silence after alarm triggered, default as same as period.silence-period:10# Specify if the rule can send notification or just as an condition of composite ruleonly-as-condition:falsetags:level:WARNINGservice_percent_rule:metrics-name:service_percent# [Optional] Default, match all services in this metricsinclude-names:- service_a- service_bexclude-names:- service_c# Single value metrics threshold.threshold:85op:\u0026lt;period:10count:4only-as-condition:falseservice_resp_time_percentile_rule:# Metrics value need to be long, double or intmetrics-name:service_percentileop:\u0026#34;\u0026gt;\u0026#34;# Multiple value metrics threshold. Thresholds for P50, P75, P90, P95, P99.threshold:1000,1000,1000,1000,1000period:10count:3silence-period:5message:Percentile response time of service {name} alarm in 3 minutes of last 10 minutes, due to more than one condition of p50 \u0026gt; 1000, p75 \u0026gt; 1000, p90 \u0026gt; 1000, p95 \u0026gt; 1000, p99 \u0026gt; 1000only-as-condition:falsemeter_service_status_code_rule:metrics-name:meter_status_codeexclude-labels:- \u0026#34;200\u0026#34;op:\u0026#34;\u0026gt;\u0026#34;threshold:10period:10count:3silence-period:5message:The request number of entity {name} non-200 status is more than expected.only-as-condition:falsecomposite-rules:comp_rule:# Must satisfied percent rule and resp time rule expression:service_percent_rule \u0026amp;\u0026amp; service_resp_time_percentile_rulemessage:Service {name} successful rate is less than 80% and P50 of response time is over 1000mstags:level:CRITICALDefault alarm rules For convenience\u0026rsquo;s sake, we have provided a default alarm-setting.yml in our release. It includes the following rules:\n Service average response time over 1s in the last 3 minutes. Service success rate lower than 80% in the last 2 minutes. Percentile of service response time over 1s in the last 3 minutes Service Instance average response time over 1s in the last 2 minutes, and the instance name matches the regex. Endpoint average response time over 1s in the last 2 minutes. Database access average response time over 1s in the last 2 minutes. Endpoint relation average response time over 1s in the last 2 minutes.  List of all potential metrics name The metrics names are defined in the official OAL scripts and MAL scripts, the Event names can also serve as the metrics names, all possible event names can be also found in the Event doc.\nCurrently, metrics from the Service, Service Instance, Endpoint, Service Relation, Service Instance Relation, Endpoint Relation scopes could be used in Alarm, and the Database access scope is the same as Service.\nSubmit an issue or a pull request if you want to support any other scopes in Alarm.\nWebhook The Webhook requires the peer to be a web container. The alarm message will be sent through HTTP post by application/json content type. The JSON format is based on List\u0026lt;org.apache.skywalking.oap.server.core.alarm.AlarmMessage\u0026gt; with the following key information:\n scopeId, scope. All scopes are defined in org.apache.skywalking.oap.server.core.source.DefaultScopeDefine. name. Target scope entity name. Please follow the entity name definitions. id0. The ID of the scope entity that matches with the name. When using the relation scope, it is the source entity ID. id1. When using the relation scope, it is the destination entity ID. Otherwise, it is empty. ruleName. The rule name configured in alarm-settings.yml. alarmMessage. The alarm text message. startTime. The alarm time measured in milliseconds, which occurs between the current time and the midnight of January 1, 1970 UTC. tags. The tags configured in alarm-settings.yml.  See the following example:\n[{ \u0026#34;scopeId\u0026#34;: 1, \u0026#34;scope\u0026#34;: \u0026#34;SERVICE\u0026#34;, \u0026#34;name\u0026#34;: \u0026#34;serviceA\u0026#34;, \u0026#34;id0\u0026#34;: \u0026#34;12\u0026#34;, \u0026#34;id1\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;ruleName\u0026#34;: \u0026#34;service_resp_time_rule\u0026#34;, \u0026#34;alarmMessage\u0026#34;: \u0026#34;alarmMessage xxxx\u0026#34;, \u0026#34;startTime\u0026#34;: 1560524171000, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;WARNING\u0026#34; }] }, { \u0026#34;scopeId\u0026#34;: 1, \u0026#34;scope\u0026#34;: \u0026#34;SERVICE\u0026#34;, \u0026#34;name\u0026#34;: \u0026#34;serviceB\u0026#34;, \u0026#34;id0\u0026#34;: \u0026#34;23\u0026#34;, \u0026#34;id1\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;ruleName\u0026#34;: \u0026#34;service_resp_time_rule\u0026#34;, \u0026#34;alarmMessage\u0026#34;: \u0026#34;alarmMessage yyy\u0026#34;, \u0026#34;startTime\u0026#34;: 1560524171000, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;CRITICAL\u0026#34; }] }] gRPCHook The alarm message will be sent through remote gRPC method by Protobuf content type. The message contains key information which are defined in oap-server/server-alarm-plugin/src/main/proto/alarm-hook.proto.\nPart of the protocol looks like this:\nmessage AlarmMessage { int64 scopeId = 1; string scope = 2; string name = 3; string id0 = 4; string id1 = 5; string ruleName = 6; string alarmMessage = 7; int64 startTime = 8; AlarmTags tags = 9;}message AlarmTags { // String key, String value pair.  repeated KeyStringValuePair data = 1;}message KeyStringValuePair { string key = 1; string value = 2;}Slack Chat Hook Follow the Getting Started with Incoming Webhooks guide and create new Webhooks.\nThe alarm message will be sent through HTTP post by application/json content type if you have configured Slack Incoming Webhooks as follows:\nslackHooks:textTemplate:|-{ \u0026#34;type\u0026#34;: \u0026#34;section\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;mrkdwn\u0026#34;, \u0026#34;text\u0026#34;: \u0026#34;:alarm_clock: *Apache Skywalking Alarm* \\n **%s**.\u0026#34; } }webhooks:- https://hooks.slack.com/services/x/y/zWeChat Hook Note that only the WeChat Company Edition (WeCom) supports WebHooks. To use the WeChat WebHook, follow the Wechat Webhooks guide. The alarm message will be sent through HTTP post by application/json content type after you have set up Wechat Webhooks as follows:\nwechatHooks:textTemplate:|-{ \u0026#34;msgtype\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;content\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; } }webhooks:- https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=dummy_keyDingTalk Hook Follow the Dingtalk Webhooks guide and create new Webhooks. You can configure an optional secret for an individual webhook URL for security purposes. The alarm message will be sent through HTTP post by application/json content type if you have configured DingTalk Webhooks as follows:\ndingtalkHooks:textTemplate:|-{ \u0026#34;msgtype\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;content\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; } }webhooks:- url:https://oapi.dingtalk.com/robot/send?access_token=dummy_tokensecret:dummysecretFeishu Hook Follow the Feishu Webhooks guide and create new Webhooks. You can configure an optional secret for an individual webhook URL for security purposes. If you want to direct a text to a user, you can configure ats, which is Feishu\u0026rsquo;s user_id and separated by \u0026ldquo;,\u0026rdquo; . The alarm message will be sent through HTTP post by application/json content type if you have configured Feishu Webhooks as follows:\nfeishuHooks:textTemplate:|-{ \u0026#34;msg_type\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;content\u0026#34;: { \u0026#34;text\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; }, \u0026#34;ats\u0026#34;:\u0026#34;feishu_user_id_1,feishu_user_id_2\u0026#34; }webhooks:- url:https://open.feishu.cn/open-apis/bot/v2/hook/dummy_tokensecret:dummysecretWeLink Hook Follow the WeLink Webhooks guide and create new Webhooks. The alarm message will be sent through HTTP post by application/json content type if you have configured WeLink Webhooks as follows:\nwelinkHooks:textTemplate:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;webhooks:# you may find your own client_id and client_secret in your app, below are dummy, need to change.- client_id:\u0026#34;dummy_client_id\u0026#34;client_secret:dummy_secret_keyaccess_token_url:https://open.welink.huaweicloud.com/api/auth/v2/ticketsmessage_url:https://open.welink.huaweicloud.com/api/welinkim/v1/im-service/chat/group-chat# if you send to multi group at a time, separate group_ids with commas, e.g. \u0026#34;123xx\u0026#34;,\u0026#34;456xx\u0026#34;group_ids:\u0026#34;dummy_group_id\u0026#34;# make a name you like for the robot, it will display in grouprobot_name:robotPagerDuty Hook The PagerDuty hook is based on Events API v2.\nFollow the Getting Started section to create an Events API v2 integration on your PagerDuty service and copy the integration key.\nThen configure as follows:\npagerDutyHooks:textTemplate:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;integrationKeys:- 5c6d805c9dcf4e03d09dfa81e8789ba1You can also configure multiple integration keys.\nDiscord Hook Follow the Discord Webhooks guide and create a new webhook.\nThen configure as follows:\ndiscordHooks:textTemplate:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;webhooks:- url:https://discordapp.com/api/webhooks/1008166889777414645/8e0Am4Zb-YGbBqqbiiq0jSHPTEEaHa4j1vIC-zSSm231T8ewGxgY0_XUYpY-k1nN4HBlusername:robotUpdate the settings dynamically Since 6.5.0, the alerting settings can be updated dynamically at runtime by Dynamic Configuration, which will override the settings in alarm-settings.yml.\nIn order to determine whether an alerting rule is triggered or not, SkyWalking needs to cache the metrics of a time window for each alerting rule. If any attribute (metrics-name, op, threshold, period, count, etc.) of a rule is changed, the sliding window will be destroyed and re-created, causing the Alarm of this specific rule to restart again.\nKeys with data types of alerting rule configuration file    Alerting element Configuration property key Type Description     Include names include-names string array    Exclude names exclude-names string array    Include names regex include-names-regex string Java regex Pattern   Exclude names regex exclude-names-regex string Java regex Pattern   Include labels include-labels string array    Exclude labels exclude-labels string array    Include labels regex include-labels-regex string Java regex Pattern   Exclude labels regex exclude-labels-regex string Java regex Pattern   Tags tags key-value pair    Threshold threshold number    OP op operator example: \u0026gt;, \u0026gt;=   Period Period int    Count count int    Only as condition only-as-condition boolean    Silence period silence-period int    Message message string     ","title":"Alerting","url":"/docs/main/v9.3.0/en/setup/backend/backend-alarm/"},{"content":"Alerting Alerting mechanism measures system performance according to the metrics of services/instances/endpoints from different layers. Alerting kernel is an in-memory, time-window based queue.\nThe alerting core is driven by a collection of rules defined in config/alarm-settings.yml. There are three parts to alerting rule definitions.\n alerting rules. They define how metrics alerting should be triggered and what conditions should be considered. Webhooks. The list of web service endpoints, which should be called after an alerting is triggered. gRPCHook. The host and port of the remote gRPC method, which should be called after an alerting is triggered.  Entity name Defines the relation between scope and entity name.\n Service: Service name Instance: {Instance name} of {Service name} Endpoint: {Endpoint name} in {Service name} Database: Database service name Service Relation: {Source service name} to {Dest service name} Instance Relation: {Source instance name} of {Source service name} to {Dest instance name} of {Dest service name} Endpoint Relation: {Source endpoint name} in {Source Service name} to {Dest endpoint name} in {Dest service name}  Rules There are two types of rules: individual rules and composite rules. A composite rule is a combination of individual rules.\nIndividual rules An alerting rule is made up of the following elements:\n Rule name. A unique name shown in the alarm message. It must end with _rule. Metrics name. This is also the metrics name in the OAL script. Only long, double, int types are supported. See the list of all potential metrics name. Events can also be configured as the source of Alarm. Please refer to the event doc for more details. Include names. Entity names that are included in this rule. Please follow the entity name definitions. Exclude names. Entity names that are excluded from this rule. Please follow the entity name definitions. Include names regex. A regex that includes entity names. If both include-name list and include-name regex are set, both rules will take effect. Exclude names regex. A regex that excludes entity names. Both rules will take effect if both include-label list and include-label regex are set. Include labels. Metric labels that are included in this rule. Exclude labels. Metric labels that are excluded from this rule. Include labels regex. A regex that includes labels. If both include-label list and include-label regex are set, both rules will take effect. Exclude labels regex. A regex that excludes labels. Both rules will take effect if both exclude-label list and exclude-label regex are set. Tags. Tags are key/value pairs that are attached to alarms. Tags are used to specify distinguishing attributes of alarms that are meaningful and relevant to users. If you want to make these tags searchable on the SkyWalking UI, you may set the tag keys in core/default/searchableAlarmTags or through the system environment variable SW_SEARCHABLE_ALARM_TAG_KEYS. The key level is supported by default.  Label settings are required by the meter system. They are used to store metrics from the label-system platform, such as Prometheus, Micrometer, etc. The four label settings mentioned above must implement LabeledValueHolder.\n Threshold. The target value. For multiple-value metrics, such as percentile, the threshold is an array. It is described as: value1, value2, value3, value4, value5. Each value may serve as the threshold for each value of the metrics. Set the value to - if you do not wish to trigger the Alarm by one or more of the values.\nFor example, in percentile, value1 is the threshold of P50, and -, -, value3, value4, value5 means that there is no threshold for P50 and P75 in the percentile alarm rule. OP. The operator. It supports \u0026gt;, \u0026gt;=, \u0026lt;, \u0026lt;=, ==. We welcome contributions of all OPs. Period. The size of metrics cache in minutes for checking the alarm conditions. This is a time window that corresponds to the backend deployment env time. Count. Within a period window, if the number of times which value goes over the threshold (based on OP) reaches count, then an alarm will be sent. Only as condition. Indicates if the rule can send notifications or if it simply serves as a condition of the composite rule. Silence period. After the alarm is triggered at Time-N (TN), there will be silence during the TN -\u0026gt; TN + period. By default, it works in the same manner as period. The same Alarm (having the same ID in the same metrics name) may only be triggered once within a period.  Such as for a metric, there is a shifting window as following at T7.\n   T1 T2 T3 T4 T5 T6 T7     Value1 Value2 Value3 Value4 Value5 Value6 Value7     Period(Time point T1 ~ T7) are continuous data points for minutes. Notice, alerts are not supported above minute-by-minute periods as they would not be efficient. Values(Value1 ~ Value7) are the values or labeled values for every time point. Count\u0026rsquo;s value(N) represents there are N values in the window matched the operator and threshold. In every minute, the window would shift automatically. At T8, Value8 would be cached, and T1/Value1 would be removed from the window.  Composite rules NOTE: Composite rules are only applicable to alerting rules targeting the same entity level, such as service-level alarm rules (service_percent_rule \u0026amp;\u0026amp; service_resp_time_percentile_rule). Do not compose alarm rules of different entity levels, such as an alarm rule of the service metrics with another rule of the endpoint metrics.\nA composite rule is made up of the following elements:\n Rule name. A unique name shown in the alarm message. Must end with _rule. Expression. Specifies how to compose rules, and supports \u0026amp;\u0026amp;, ||, and (). Message. The notification message to be sent out when the rule is triggered. Tags. Tags are key/value pairs that are attached to alarms. Tags are used to specify distinguishing attributes of alarms that are meaningful and relevant to users.  rules:# Rule unique name, must be ended with `_rule`.endpoint_percent_rule:# Metrics value need to be long, double or intmetrics-name:endpoint_percentthreshold:75op:\u0026lt;# The length of time to evaluate the metricsperiod:10# How many times after the metrics match the condition, will trigger alarmcount:3# How many times of checks, the alarm keeps silence after alarm triggered, default as same as period.silence-period:10# Specify if the rule can send notification or just as an condition of composite ruleonly-as-condition:falsetags:level:WARNINGservice_percent_rule:metrics-name:service_percent# [Optional] Default, match all services in this metricsinclude-names:- service_a- service_bexclude-names:- service_c# Single value metrics threshold.threshold:85op:\u0026lt;period:10count:4only-as-condition:falseservice_resp_time_percentile_rule:# Metrics value need to be long, double or intmetrics-name:service_percentileop:\u0026#34;\u0026gt;\u0026#34;# Multiple value metrics threshold. Thresholds for P50, P75, P90, P95, P99.threshold:1000,1000,1000,1000,1000period:10count:3silence-period:5message:Percentile response time of service {name} alarm in 3 minutes of last 10 minutes, due to more than one condition of p50 \u0026gt; 1000, p75 \u0026gt; 1000, p90 \u0026gt; 1000, p95 \u0026gt; 1000, p99 \u0026gt; 1000only-as-condition:falsemeter_service_status_code_rule:metrics-name:meter_status_codeexclude-labels:- \u0026#34;200\u0026#34;op:\u0026#34;\u0026gt;\u0026#34;threshold:10period:10count:3silence-period:5message:The request number of entity {name} non-200 status is more than expected.only-as-condition:falsecomposite-rules:comp_rule:# Must satisfied percent rule and resp time rule expression:service_percent_rule \u0026amp;\u0026amp; service_resp_time_percentile_rulemessage:Service {name} successful rate is less than 80% and P50 of response time is over 1000mstags:level:CRITICALDefault alarm rules For convenience\u0026rsquo;s sake, we have provided a default alarm-setting.yml in our release. It includes the following rules:\n Service average response time over 1s in the last 3 minutes. Service success rate lower than 80% in the last 2 minutes. Percentile of service response time over 1s in the last 3 minutes Service Instance average response time over 1s in the last 2 minutes, and the instance name matches the regex. Endpoint average response time over 1s in the last 2 minutes. Database access average response time over 1s in the last 2 minutes. Endpoint relation average response time over 1s in the last 2 minutes.  List of all potential metrics name The metrics names are defined in the official OAL scripts and MAL scripts, the Event names can also serve as the metrics names, all possible event names can be also found in the Event doc.\nCurrently, metrics from the Service, Service Instance, Endpoint, Service Relation, Service Instance Relation, Endpoint Relation scopes could be used in Alarm, and the Database access scope is the same as Service.\nSubmit an issue or a pull request if you want to support any other scopes in Alarm.\nWebhook The Webhook requires the peer to be a web container. The alarm message will be sent through HTTP post by application/json content type. The JSON format is based on List\u0026lt;org.apache.skywalking.oap.server.core.alarm.AlarmMessage\u0026gt; with the following key information:\n scopeId, scope. All scopes are defined in org.apache.skywalking.oap.server.core.source.DefaultScopeDefine. name. Target scope entity name. Please follow the entity name definitions. id0. The ID of the scope entity that matches with the name. When using the relation scope, it is the source entity ID. id1. When using the relation scope, it is the destination entity ID. Otherwise, it is empty. ruleName. The rule name configured in alarm-settings.yml. alarmMessage. The alarm text message. startTime. The alarm time measured in milliseconds, which occurs between the current time and the midnight of January 1, 1970 UTC. tags. The tags configured in alarm-settings.yml.  See the following example:\n[{ \u0026#34;scopeId\u0026#34;: 1, \u0026#34;scope\u0026#34;: \u0026#34;SERVICE\u0026#34;, \u0026#34;name\u0026#34;: \u0026#34;serviceA\u0026#34;, \u0026#34;id0\u0026#34;: \u0026#34;12\u0026#34;, \u0026#34;id1\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;ruleName\u0026#34;: \u0026#34;service_resp_time_rule\u0026#34;, \u0026#34;alarmMessage\u0026#34;: \u0026#34;alarmMessage xxxx\u0026#34;, \u0026#34;startTime\u0026#34;: 1560524171000, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;WARNING\u0026#34; }] }, { \u0026#34;scopeId\u0026#34;: 1, \u0026#34;scope\u0026#34;: \u0026#34;SERVICE\u0026#34;, \u0026#34;name\u0026#34;: \u0026#34;serviceB\u0026#34;, \u0026#34;id0\u0026#34;: \u0026#34;23\u0026#34;, \u0026#34;id1\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;ruleName\u0026#34;: \u0026#34;service_resp_time_rule\u0026#34;, \u0026#34;alarmMessage\u0026#34;: \u0026#34;alarmMessage yyy\u0026#34;, \u0026#34;startTime\u0026#34;: 1560524171000, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;CRITICAL\u0026#34; }] }] gRPCHook The alarm message will be sent through remote gRPC method by Protobuf content type. The message contains key information which are defined in oap-server/server-alarm-plugin/src/main/proto/alarm-hook.proto.\nPart of the protocol looks like this:\nmessage AlarmMessage { int64 scopeId = 1; string scope = 2; string name = 3; string id0 = 4; string id1 = 5; string ruleName = 6; string alarmMessage = 7; int64 startTime = 8; AlarmTags tags = 9;}message AlarmTags { // String key, String value pair.  repeated KeyStringValuePair data = 1;}message KeyStringValuePair { string key = 1; string value = 2;}Slack Chat Hook Follow the Getting Started with Incoming Webhooks guide and create new Webhooks.\nThe alarm message will be sent through HTTP post by application/json content type if you have configured Slack Incoming Webhooks as follows:\nslackHooks:textTemplate:|-{ \u0026#34;type\u0026#34;: \u0026#34;section\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;mrkdwn\u0026#34;, \u0026#34;text\u0026#34;: \u0026#34;:alarm_clock: *Apache Skywalking Alarm* \\n **%s**.\u0026#34; } }webhooks:- https://hooks.slack.com/services/x/y/zWeChat Hook Note that only the WeChat Company Edition (WeCom) supports WebHooks. To use the WeChat WebHook, follow the Wechat Webhooks guide. The alarm message will be sent through HTTP post by application/json content type after you have set up Wechat Webhooks as follows:\nwechatHooks:textTemplate:|-{ \u0026#34;msgtype\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;content\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; } }webhooks:- https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=dummy_keyDingTalk Hook Follow the Dingtalk Webhooks guide and create new Webhooks. You can configure an optional secret for an individual webhook URL for security purposes. The alarm message will be sent through HTTP post by application/json content type if you have configured DingTalk Webhooks as follows:\ndingtalkHooks:textTemplate:|-{ \u0026#34;msgtype\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;content\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; } }webhooks:- url:https://oapi.dingtalk.com/robot/send?access_token=dummy_tokensecret:dummysecretFeishu Hook Follow the Feishu Webhooks guide and create new Webhooks. You can configure an optional secret for an individual webhook URL for security purposes. If you want to direct a text to a user, you can configure ats, which is Feishu\u0026rsquo;s user_id and separated by \u0026ldquo;,\u0026rdquo; . The alarm message will be sent through HTTP post by application/json content type if you have configured Feishu Webhooks as follows:\nfeishuHooks:textTemplate:|-{ \u0026#34;msg_type\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;content\u0026#34;: { \u0026#34;text\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; }, \u0026#34;ats\u0026#34;:\u0026#34;feishu_user_id_1,feishu_user_id_2\u0026#34; }webhooks:- url:https://open.feishu.cn/open-apis/bot/v2/hook/dummy_tokensecret:dummysecretWeLink Hook Follow the WeLink Webhooks guide and create new Webhooks. The alarm message will be sent through HTTP post by application/json content type if you have configured WeLink Webhooks as follows:\nwelinkHooks:textTemplate:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;webhooks:# you may find your own client_id and client_secret in your app, below are dummy, need to change.- client_id:\u0026#34;dummy_client_id\u0026#34;client_secret:dummy_secret_keyaccess_token_url:https://open.welink.huaweicloud.com/api/auth/v2/ticketsmessage_url:https://open.welink.huaweicloud.com/api/welinkim/v1/im-service/chat/group-chat# if you send to multi group at a time, separate group_ids with commas, e.g. \u0026#34;123xx\u0026#34;,\u0026#34;456xx\u0026#34;group_ids:\u0026#34;dummy_group_id\u0026#34;# make a name you like for the robot, it will display in grouprobot_name:robotPagerDuty Hook The PagerDuty hook is based on Events API v2.\nFollow the Getting Started section to create an Events API v2 integration on your PagerDuty service and copy the integration key.\nThen configure as follows:\npagerDutyHooks:textTemplate:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;integrationKeys:- 5c6d805c9dcf4e03d09dfa81e8789ba1You can also configure multiple integration keys.\nDiscord Hook Follow the Discord Webhooks guide and create a new webhook.\nThen configure as follows:\ndiscordHooks:textTemplate:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;webhooks:- url:https://discordapp.com/api/webhooks/1008166889777414645/8e0Am4Zb-YGbBqqbiiq0jSHPTEEaHa4j1vIC-zSSm231T8ewGxgY0_XUYpY-k1nN4HBlusername:robotUpdate the settings dynamically Since 6.5.0, the alerting settings can be updated dynamically at runtime by Dynamic Configuration, which will override the settings in alarm-settings.yml.\nIn order to determine whether an alerting rule is triggered or not, SkyWalking needs to cache the metrics of a time window for each alerting rule. If any attribute (metrics-name, op, threshold, period, count, etc.) of a rule is changed, the sliding window will be destroyed and re-created, causing the Alarm of this specific rule to restart again.\nKeys with data types of alerting rule configuration file    Alerting element Configuration property key Type Description     Include names include-names string array    Exclude names exclude-names string array    Include names regex include-names-regex string Java regex Pattern   Exclude names regex exclude-names-regex string Java regex Pattern   Include labels include-labels string array    Exclude labels exclude-labels string array    Include labels regex include-labels-regex string Java regex Pattern   Exclude labels regex exclude-labels-regex string Java regex Pattern   Tags tags key-value pair    Threshold threshold number    OP op operator example: \u0026gt;, \u0026gt;=   Period Period int    Count count int    Only as condition only-as-condition boolean    Silence period silence-period int    Message message string     ","title":"Alerting","url":"/docs/main/v9.4.0/en/setup/backend/backend-alarm/"},{"content":"Alerting Alerting mechanism measures system performance according to the metrics of services/instances/endpoints from different layers. Alerting kernel is an in-memory, time-window based queue.\nThe alerting core is driven by a collection of rules defined in config/alarm-settings.yml. There are three parts to alerting rule definitions.\n alerting rules. They define how metrics alerting should be triggered and what conditions should be considered. Webhooks. The list of web service endpoints, which should be called after an alerting is triggered. gRPCHook. The host and port of the remote gRPC method, which should be called after an alerting is triggered.  Entity name Defines the relation between scope and entity name.\n Service: Service name Instance: {Instance name} of {Service name} Endpoint: {Endpoint name} in {Service name} Database: Database service name Service Relation: {Source service name} to {Dest service name} Instance Relation: {Source instance name} of {Source service name} to {Dest instance name} of {Dest service name} Endpoint Relation: {Source endpoint name} in {Source Service name} to {Dest endpoint name} in {Dest service name}  Rules There are two types of rules: individual rules and composite rules. A composite rule is a combination of individual rules.\nIndividual rules An alerting rule is made up of the following elements:\n Rule name. A unique name shown in the alarm message. It must end with _rule. Metrics name. This is also the metrics name in the OAL script. Only long, double, int types are supported. See the list of all potential metrics name. Events can also be configured as the source of Alarm. Please refer to the event doc for more details. Include names. Entity names that are included in this rule. Please follow the entity name definitions. Exclude names. Entity names that are excluded from this rule. Please follow the entity name definitions. Include names regex. A regex that includes entity names. If both include-name list and include-name regex are set, both rules will take effect. Exclude names regex. A regex that excludes entity names. Both rules will take effect if both include-label list and include-label regex are set. Include labels. Metric labels that are included in this rule. Exclude labels. Metric labels that are excluded from this rule. Include labels regex. A regex that includes labels. If both include-label list and include-label regex are set, both rules will take effect. Exclude labels regex. A regex that excludes labels. Both rules will take effect if both exclude-label list and exclude-label regex are set. Tags. Tags are key/value pairs that are attached to alarms. Tags are used to specify distinguishing attributes of alarms that are meaningful and relevant to users. If you want to make these tags searchable on the SkyWalking UI, you may set the tag keys in core/default/searchableAlarmTags or through the system environment variable SW_SEARCHABLE_ALARM_TAG_KEYS. The key level is supported by default.  Label settings are required by the meter system. They are used to store metrics from the label-system platform, such as Prometheus, Micrometer, etc. The four label settings mentioned above must implement LabeledValueHolder.\n Threshold. The target value. For multiple-value metrics, such as percentile, the threshold is an array. It is described as: value1, value2, value3, value4, value5. Each value may serve as the threshold for each value of the metrics. Set the value to - if you do not wish to trigger the Alarm by one or more of the values.\nFor example, in percentile, value1 is the threshold of P50, and -, -, value3, value4, value5 means that there is no threshold for P50 and P75 in the percentile alarm rule. OP. The operator. It supports \u0026gt;, \u0026gt;=, \u0026lt;, \u0026lt;=, ==, !=. We welcome contributions of all OPs. Period. The size of metrics cache in minutes for checking the alarm conditions. This is a time window that corresponds to the backend deployment env time. Count. Within a period window, if the number of times which value goes over the threshold (based on OP) reaches count, then an alarm will be sent. Only as condition. Indicates if the rule can send notifications or if it simply serves as a condition of the composite rule. Silence period. After the alarm is triggered at Time-N (TN), there will be silence during the TN -\u0026gt; TN + period. By default, it works in the same manner as period. The same Alarm (having the same ID in the same metrics name) may only be triggered once within a period.  Such as for a metric, there is a shifting window as following at T7.\n   T1 T2 T3 T4 T5 T6 T7     Value1 Value2 Value3 Value4 Value5 Value6 Value7     Period(Time point T1 ~ T7) are continuous data points for minutes. Notice, alerts are not supported above minute-by-minute periods as they would not be efficient. Values(Value1 ~ Value7) are the values or labeled values for every time point. Count\u0026rsquo;s value(N) represents there are N values in the window matched the operator and threshold. In every minute, the window would shift automatically. At T8, Value8 would be cached, and T1/Value1 would be removed from the window.  Composite rules NOTE: Composite rules are only applicable to alerting rules targeting the same entity level, such as service-level alarm rules (service_percent_rule \u0026amp;\u0026amp; service_resp_time_percentile_rule). Do not compose alarm rules of different entity levels, such as an alarm rule of the service metrics with another rule of the endpoint metrics.\nA composite rule is made up of the following elements:\n Rule name. A unique name shown in the alarm message. Must end with _rule. Expression. Specifies how to compose rules, and supports \u0026amp;\u0026amp;, ||, and (). Message. The notification message to be sent out when the rule is triggered. Tags. Tags are key/value pairs that are attached to alarms. Tags are used to specify distinguishing attributes of alarms that are meaningful and relevant to users.  rules:# Rule unique name, must be ended with `_rule`.endpoint_percent_rule:# Metrics value need to be long, double or intmetrics-name:endpoint_percentthreshold:75op:\u0026lt;# The length of time to evaluate the metricsperiod:10# How many times after the metrics match the condition, will trigger alarmcount:3# How many times of checks, the alarm keeps silence after alarm triggered, default as same as period.silence-period:10# Specify if the rule can send notification or just as an condition of composite ruleonly-as-condition:falsetags:level:WARNINGservice_percent_rule:metrics-name:service_percent# [Optional] Default, match all services in this metricsinclude-names:- service_a- service_bexclude-names:- service_c# Single value metrics threshold.threshold:85op:\u0026lt;period:10count:4only-as-condition:falseservice_resp_time_percentile_rule:# Metrics value need to be long, double or intmetrics-name:service_percentileop:\u0026#34;\u0026gt;\u0026#34;# Multiple value metrics threshold. Thresholds for P50, P75, P90, P95, P99.threshold:1000,1000,1000,1000,1000period:10count:3silence-period:5message:Percentile response time of service {name} alarm in 3 minutes of last 10 minutes, due to more than one condition of p50 \u0026gt; 1000, p75 \u0026gt; 1000, p90 \u0026gt; 1000, p95 \u0026gt; 1000, p99 \u0026gt; 1000only-as-condition:falsemeter_service_status_code_rule:metrics-name:meter_status_codeexclude-labels:- \u0026#34;200\u0026#34;op:\u0026#34;\u0026gt;\u0026#34;threshold:10period:10count:3silence-period:5message:The request number of entity {name} non-200 status is more than expected.only-as-condition:falsecomposite-rules:comp_rule:# Must satisfied percent rule and resp time rule expression:service_percent_rule \u0026amp;\u0026amp; service_resp_time_percentile_rulemessage:Service {name} successful rate is less than 80% and P50 of response time is over 1000mstags:level:CRITICALDefault alarm rules For convenience\u0026rsquo;s sake, we have provided a default alarm-setting.yml in our release. It includes the following rules:\n Service average response time over 1s in the last 3 minutes. Service success rate lower than 80% in the last 2 minutes. Percentile of service response time over 1s in the last 3 minutes Service Instance average response time over 1s in the last 2 minutes, and the instance name matches the regex. Endpoint average response time over 1s in the last 2 minutes. Database access average response time over 1s in the last 2 minutes. Endpoint relation average response time over 1s in the last 2 minutes.  List of all potential metrics name The metrics names are defined in the official OAL scripts and MAL scripts, the Event names can also serve as the metrics names, all possible event names can be also found in the Event doc.\nCurrently, metrics from the Service, Service Instance, Endpoint, Service Relation, Service Instance Relation, Endpoint Relation scopes could be used in Alarm, and the Database access scope is the same as Service.\nSubmit an issue or a pull request if you want to support any other scopes in Alarm.\nWebhook The Webhook requires the peer to be a web container. The alarm message will be sent through HTTP post by application/json content type. The JSON format is based on List\u0026lt;org.apache.skywalking.oap.server.core.alarm.AlarmMessage\u0026gt; with the following key information:\n scopeId, scope. All scopes are defined in org.apache.skywalking.oap.server.core.source.DefaultScopeDefine. name. Target scope entity name. Please follow the entity name definitions. id0. The ID of the scope entity that matches with the name. When using the relation scope, it is the source entity ID. id1. When using the relation scope, it is the destination entity ID. Otherwise, it is empty. ruleName. The rule name configured in alarm-settings.yml. alarmMessage. The alarm text message. startTime. The alarm time measured in milliseconds, which occurs between the current time and the midnight of January 1, 1970 UTC. tags. The tags configured in alarm-settings.yml.  See the following example:\n[{ \u0026#34;scopeId\u0026#34;: 1, \u0026#34;scope\u0026#34;: \u0026#34;SERVICE\u0026#34;, \u0026#34;name\u0026#34;: \u0026#34;serviceA\u0026#34;, \u0026#34;id0\u0026#34;: \u0026#34;12\u0026#34;, \u0026#34;id1\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;ruleName\u0026#34;: \u0026#34;service_resp_time_rule\u0026#34;, \u0026#34;alarmMessage\u0026#34;: \u0026#34;alarmMessage xxxx\u0026#34;, \u0026#34;startTime\u0026#34;: 1560524171000, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;WARNING\u0026#34; }] }, { \u0026#34;scopeId\u0026#34;: 1, \u0026#34;scope\u0026#34;: \u0026#34;SERVICE\u0026#34;, \u0026#34;name\u0026#34;: \u0026#34;serviceB\u0026#34;, \u0026#34;id0\u0026#34;: \u0026#34;23\u0026#34;, \u0026#34;id1\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;ruleName\u0026#34;: \u0026#34;service_resp_time_rule\u0026#34;, \u0026#34;alarmMessage\u0026#34;: \u0026#34;alarmMessage yyy\u0026#34;, \u0026#34;startTime\u0026#34;: 1560524171000, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;CRITICAL\u0026#34; }] }] gRPCHook The alarm message will be sent through remote gRPC method by Protobuf content type. The message contains key information which are defined in oap-server/server-alarm-plugin/src/main/proto/alarm-hook.proto.\nPart of the protocol looks like this:\nmessage AlarmMessage { int64 scopeId = 1; string scope = 2; string name = 3; string id0 = 4; string id1 = 5; string ruleName = 6; string alarmMessage = 7; int64 startTime = 8; AlarmTags tags = 9;}message AlarmTags { // String key, String value pair.  repeated KeyStringValuePair data = 1;}message KeyStringValuePair { string key = 1; string value = 2;}Slack Chat Hook Follow the Getting Started with Incoming Webhooks guide and create new Webhooks.\nThe alarm message will be sent through HTTP post by application/json content type if you have configured Slack Incoming Webhooks as follows:\nslackHooks:textTemplate:|-{ \u0026#34;type\u0026#34;: \u0026#34;section\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;mrkdwn\u0026#34;, \u0026#34;text\u0026#34;: \u0026#34;:alarm_clock: *Apache Skywalking Alarm* \\n **%s**.\u0026#34; } }webhooks:- https://hooks.slack.com/services/x/y/zWeChat Hook Note that only the WeChat Company Edition (WeCom) supports WebHooks. To use the WeChat WebHook, follow the Wechat Webhooks guide. The alarm message will be sent through HTTP post by application/json content type after you have set up Wechat Webhooks as follows:\nwechatHooks:textTemplate:|-{ \u0026#34;msgtype\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;content\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; } }webhooks:- https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=dummy_keyDingTalk Hook Follow the Dingtalk Webhooks guide and create new Webhooks. You can configure an optional secret for an individual webhook URL for security purposes. The alarm message will be sent through HTTP post by application/json content type if you have configured DingTalk Webhooks as follows:\ndingtalkHooks:textTemplate:|-{ \u0026#34;msgtype\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;content\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; } }webhooks:- url:https://oapi.dingtalk.com/robot/send?access_token=dummy_tokensecret:dummysecretFeishu Hook Follow the Feishu Webhooks guide and create new Webhooks. You can configure an optional secret for an individual webhook URL for security purposes. If you want to direct a text to a user, you can configure ats, which is Feishu\u0026rsquo;s user_id and separated by \u0026ldquo;,\u0026rdquo; . The alarm message will be sent through HTTP post by application/json content type if you have configured Feishu Webhooks as follows:\nfeishuHooks:textTemplate:|-{ \u0026#34;msg_type\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;content\u0026#34;: { \u0026#34;text\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; }, \u0026#34;ats\u0026#34;:\u0026#34;feishu_user_id_1,feishu_user_id_2\u0026#34; }webhooks:- url:https://open.feishu.cn/open-apis/bot/v2/hook/dummy_tokensecret:dummysecretWeLink Hook Follow the WeLink Webhooks guide and create new Webhooks. The alarm message will be sent through HTTP post by application/json content type if you have configured WeLink Webhooks as follows:\nwelinkHooks:textTemplate:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;webhooks:# you may find your own client_id and client_secret in your app, below are dummy, need to change.- client_id:\u0026#34;dummy_client_id\u0026#34;client_secret:dummy_secret_keyaccess_token_url:https://open.welink.huaweicloud.com/api/auth/v2/ticketsmessage_url:https://open.welink.huaweicloud.com/api/welinkim/v1/im-service/chat/group-chat# if you send to multi group at a time, separate group_ids with commas, e.g. \u0026#34;123xx\u0026#34;,\u0026#34;456xx\u0026#34;group_ids:\u0026#34;dummy_group_id\u0026#34;# make a name you like for the robot, it will display in grouprobot_name:robotPagerDuty Hook The PagerDuty hook is based on Events API v2.\nFollow the Getting Started section to create an Events API v2 integration on your PagerDuty service and copy the integration key.\nThen configure as follows:\npagerDutyHooks:textTemplate:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;integrationKeys:- 5c6d805c9dcf4e03d09dfa81e8789ba1You can also configure multiple integration keys.\nDiscord Hook Follow the Discord Webhooks guide and create a new webhook.\nThen configure as follows:\ndiscordHooks:textTemplate:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;webhooks:- url:https://discordapp.com/api/webhooks/1008166889777414645/8e0Am4Zb-YGbBqqbiiq0jSHPTEEaHa4j1vIC-zSSm231T8ewGxgY0_XUYpY-k1nN4HBlusername:robotUpdate the settings dynamically Since 6.5.0, the alerting settings can be updated dynamically at runtime by Dynamic Configuration, which will override the settings in alarm-settings.yml.\nIn order to determine whether an alerting rule is triggered or not, SkyWalking needs to cache the metrics of a time window for each alerting rule. If any attribute (metrics-name, op, threshold, period, count, etc.) of a rule is changed, the sliding window will be destroyed and re-created, causing the Alarm of this specific rule to restart again.\nKeys with data types of alerting rule configuration file    Alerting element Configuration property key Type Description     Include names include-names string array    Exclude names exclude-names string array    Include names regex include-names-regex string Java regex Pattern   Exclude names regex exclude-names-regex string Java regex Pattern   Include labels include-labels string array    Exclude labels exclude-labels string array    Include labels regex include-labels-regex string Java regex Pattern   Exclude labels regex exclude-labels-regex string Java regex Pattern   Tags tags key-value pair    Threshold threshold number    OP op operator example: \u0026gt;, \u0026gt;=   Period Period int    Count count int    Only as condition only-as-condition boolean    Silence period silence-period int    Message message string     ","title":"Alerting","url":"/docs/main/v9.5.0/en/setup/backend/backend-alarm/"},{"content":"Alerting Alerting mechanism measures system performance according to the metrics of services/instances/endpoints from different layers. Alerting kernel is an in-memory, time-window based queue.\nThe alerting core is driven by a collection of rules defined in config/alarm-settings.yml. There are three parts to alerting rule definitions.\n alerting rules. They define how metrics alerting should be triggered and what conditions should be considered. hooks. The list of hooks, which should be called after an alerting is triggered.  Entity name Defines the relation between scope and entity name.\n Service: Service name Instance: {Instance name} of {Service name} Endpoint: {Endpoint name} in {Service name} Service Relation: {Source service name} to {Dest service name} Instance Relation: {Source instance name} of {Source service name} to {Dest instance name} of {Dest service name} Endpoint Relation: {Source endpoint name} in {Source Service name} to {Dest endpoint name} in {Dest service name}  Rules An alerting rule is made up of the following elements:\n Rule name. A unique name shown in the alarm message. It must end with _rule. Expression. A MQE expression that defines the conditions of the rule. The result type must be SINGLE_VALUE and the root operation of the expression must be a Compare Operation which provides 1(true) or 0(false) result. When the result is 1(true), the alarm will be triggered. For example, avg(service_resp_time / 1000) \u0026gt; 1 is a valid expression to indicate the request latency is slower than 1s. The typical illegal expressions are  avg(service_resp_time \u0026gt; 1000) + 1 expression root doesn\u0026rsquo;t use Compare Operation service_resp_time \u0026gt; 1000 expression return a TIME_SERIES_VALUES type of values rather than a SINGLE_VALUE value.    The metrics names in the expression could be found in the list of all potential metrics name doc.\n Include names. Entity names that are included in this rule. Please follow the entity name definitions. Exclude names. Entity names that are excluded from this rule. Please follow the entity name definitions. Include names regex. A regex that includes entity names. If both include-name list and include-name regex are set, both rules will take effect. Exclude names regex. A regex that excludes entity names. Both rules will take effect if both include-label list and include-label regex are set. Tags. Tags are key/value pairs that are attached to alarms. Tags are used to specify distinguishing attributes of alarms that are meaningful and relevant to users. If you want to make these tags searchable on the SkyWalking UI, you may set the tag keys in core/default/searchableAlarmTags or through the system environment variable SW_SEARCHABLE_ALARM_TAG_KEYS. The key level is supported by default. Period. The size of metrics cache in minutes for checking the alarm conditions. This is a time window that corresponds to the backend deployment env time. Hooks. Binding the specific names of the hooks when the alarm is triggered. The name format is {hookType}.{hookName} (slack.custom1 e.g.) and must be defined in the hooks section of the alarm-settings.yml file. If the hook name is not specified, the global hook will be used. Silence period. After the alarm is triggered at Time-N (TN), there will be silence during the TN -\u0026gt; TN + period. By default, it works in the same manner as period. The same Alarm (having the same ID in the same metrics name) may only be triggered once within a period.  Such as for a metric, there is a shifting window as following at T7.\n   T1 T2 T3 T4 T5 T6 T7     Value1 Value2 Value3 Value4 Value5 Value6 Value7     Period(Time point T1 ~ T7) are continuous data points for minutes. Notice, alerts are not supported above minute-by-minute periods as they would not be efficient. Values(Value1 ~ Value7) are the values or labeled values for every time point. Expression is calculated based on the metric values(Value1 ~ Value7). For example, expression avg(service_resp_time) \u0026gt; 1000, if the value are 1001, 1001, 1001, 1001, 1001, 1001, 1001, the calculation is ((1001 + 10001 + ... + 1001) / 7) \u0026gt; 1000 and the result would be 1(true). Then the alarm would be triggered. In every minute, the window would shift automatically. At T8, Value8 would be cached, and T1/Value1 would be removed from the window.  NOTE:\n If the expression include labeled metrics and result has multiple labeled value(e.g. sum(service_percentile{_='0,1'} \u0026gt; 1000) \u0026gt;= 3), the alarm will be triggered if any of the labeled value result matches 3 times of the condition(P50 \u0026gt; 1000 or P75 \u0026gt; 1000). One alarm rule is targeting the same entity level, such as service-level expression (avg(service_resp_time) \u0026gt; 1000). Set entity names(Include/Exclude names\u0026hellip;) according to metrics entity levels, do not include different entity levels metrics in the same expression, such as service metrics and endpoint metrics.  rules:# Rule unique name, must be ended with `_rule`.endpoint_percent_rule:# A MQE expression and the root operation of the expression must be a Compare Operation.expression:sum((endpoint_sla / 100) \u0026lt; 75) \u0026gt;= 3# The length of time to evaluate the metricsperiod:10# How many times of checks, the alarm keeps silence after alarm triggered, default as same as period.silence-period:10message:Successful rate of endpoint {name} is lower than 75%tags:level:WARNINGservice_percent_rule:expression:sum((service_sla / 100) \u0026lt; 85) \u0026gt;= 4# [Optional] Default, match all services in this metricsinclude-names:- service_a- service_bexclude-names:- service_cperiod:10message:Service {name} successful rate is less than 85%service_resp_time_percentile_rule:expression:sum(service_percentile{_=\u0026#39;0,1,2,3,4\u0026#39;} \u0026gt; 1000) \u0026gt;= 3period:10silence-period:5message:Percentile response time of service {name} alarm in 3 minutes of last 10 minutes, due to more than one condition of p50 \u0026gt; 1000, p75 \u0026gt; 1000, p90 \u0026gt; 1000, p95 \u0026gt; 1000, p99 \u0026gt; 1000meter_service_status_code_rule:expression:sum(aggregate_labels(meter_status_code{_=\u0026#39;4xx,5xx\u0026#39;},sum) \u0026gt; 10) \u0026gt; 3period:10count:3silence-period:5message:The request number of entity {name} 4xx and 5xx status is more than expected.hooks:- \u0026#34;slack.custom1\u0026#34;- \u0026#34;pagerduty.custom1\u0026#34;comp_rule:expression:(avg(service_sla / 100) \u0026gt; 80) * (avg(service_percentile{_=\u0026#39;0\u0026#39;}) \u0026gt; 1000) == 1period:10message:Service {name} avg successful rate is less than 80% and P50 of avg response time is over 1000ms in last 10 minutes.tags:level:CRITICALhooks:- \u0026#34;slack.default\u0026#34;- \u0026#34;slack.custom1\u0026#34;- \u0026#34;pagerduty.custom1\u0026#34;Default alarm rules For convenience\u0026rsquo;s sake, we have provided a default alarm-setting.yml in our release. It includes the following rules:\n Service average response time over 1s in the last 3 minutes. Service success rate lower than 80% in the last 2 minutes. Percentile of service response time over 1s in the last 3 minutes Service Instance average response time over 1s in the last 2 minutes, and the instance name matches the regex. Endpoint average response time over 1s in the last 2 minutes. Database access average response time over 1s in the last 2 minutes. Endpoint relation average response time over 1s in the last 2 minutes.  List of all potential metrics name The metrics names are defined in the official OAL scripts and MAL scripts.\nCurrently, metrics from the Service, Service Instance, Endpoint, Service Relation, Service Instance Relation, Endpoint Relation scopes could be used in Alarm, and the Database access scope is the same as Service.\nSubmit an issue or a pull request if you want to support any other scopes in Alarm.\nHooks Hooks are a way to send alarm messages to the outside world. SkyWalking supports multiple hooks of the same type, each hook can support different configurations. For example, you can configure two Slack hooks, one named default and set is-default: true means this hook will apply on all Alarm Rules without config hooks. Another named custom1 will only apply on the Alarm Rules which with config hooks and include the name slack.custom1.\nhooks:slack:# default here is just a name, set the field \u0026#39;is-default: true\u0026#39; if this notification hook is expected to be default globally.default:# If true, this hook will apply on all rules, unless a rule has its own specific hook. Could have more than one default hooks in the same hook type.is-default:truetext-template:|-{ \u0026#34;type\u0026#34;: \u0026#34;section\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;mrkdwn\u0026#34;, \u0026#34;text\u0026#34;: \u0026#34;:alarm_clock: *Apache Skywalking Alarm* \\n **%s**.\u0026#34; } }webhooks:- https://hooks.slack.com/services/x/y/zsssscustom1:text-template:|-{ \u0026#34;type\u0026#34;: \u0026#34;section\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;mrkdwn\u0026#34;, \u0026#34;text\u0026#34;: \u0026#34;:alarm_clock: *Apache Skywalking Alarm* \\n **%s**.\u0026#34; } }webhooks:- https://hooks.slack.com/services/x/y/custom1Currently, SkyWalking supports the following hook types:\nWebhook The Webhook requires the peer to be a web container. The alarm message will be sent through HTTP post by application/json content type. The JSON format is based on List\u0026lt;org.apache.skywalking.oap.server.core.alarm.AlarmMessage\u0026gt; with the following key information:\n scopeId, scope. All scopes are defined in org.apache.skywalking.oap.server.core.source.DefaultScopeDefine. name. Target scope entity name. Please follow the entity name definitions. id0. The ID of the scope entity that matches with the name. When using the relation scope, it is the source entity ID. id1. When using the relation scope, it is the destination entity ID. Otherwise, it is empty. ruleName. The rule name configured in alarm-settings.yml. alarmMessage. The alarm text message. startTime. The alarm time measured in milliseconds, which occurs between the current time and the midnight of January 1, 1970 UTC. tags. The tags configured in alarm-settings.yml.  See the following example:\n[{ \u0026#34;scopeId\u0026#34;: 1, \u0026#34;scope\u0026#34;: \u0026#34;SERVICE\u0026#34;, \u0026#34;name\u0026#34;: \u0026#34;serviceA\u0026#34;, \u0026#34;id0\u0026#34;: \u0026#34;12\u0026#34;, \u0026#34;id1\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;ruleName\u0026#34;: \u0026#34;service_resp_time_rule\u0026#34;, \u0026#34;alarmMessage\u0026#34;: \u0026#34;alarmMessage xxxx\u0026#34;, \u0026#34;startTime\u0026#34;: 1560524171000, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;WARNING\u0026#34; }] }, { \u0026#34;scopeId\u0026#34;: 1, \u0026#34;scope\u0026#34;: \u0026#34;SERVICE\u0026#34;, \u0026#34;name\u0026#34;: \u0026#34;serviceB\u0026#34;, \u0026#34;id0\u0026#34;: \u0026#34;23\u0026#34;, \u0026#34;id1\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;ruleName\u0026#34;: \u0026#34;service_resp_time_rule\u0026#34;, \u0026#34;alarmMessage\u0026#34;: \u0026#34;alarmMessage yyy\u0026#34;, \u0026#34;startTime\u0026#34;: 1560524171000, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;CRITICAL\u0026#34; }] }] gRPC The alarm message will be sent through remote gRPC method by Protobuf content type. The message contains key information which are defined in oap-server/server-alarm-plugin/src/main/proto/alarm-hook.proto.\nPart of the protocol looks like this:\nmessage AlarmMessage { int64 scopeId = 1; string scope = 2; string name = 3; string id0 = 4; string id1 = 5; string ruleName = 6; string alarmMessage = 7; int64 startTime = 8; AlarmTags tags = 9;}message AlarmTags { // String key, String value pair.  repeated KeyStringValuePair data = 1;}message KeyStringValuePair { string key = 1; string value = 2;}Slack Chat Follow the Getting Started with Incoming Webhooks guide and create new Webhooks.\nThe alarm message will be sent through HTTP post by application/json content type if you have configured Slack Incoming Webhooks as follows:\nslack:default:is-default:truetext-template:|-{ \u0026#34;type\u0026#34;: \u0026#34;section\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;mrkdwn\u0026#34;, \u0026#34;text\u0026#34;: \u0026#34;:alarm_clock: *Apache Skywalking Alarm* \\n **%s**.\u0026#34; } }webhooks:- https://hooks.slack.com/services/x/y/zWeChat Note that only the WeChat Company Edition (WeCom) supports WebHooks. To use the WeChat WebHook, follow the Wechat Webhooks guide. The alarm message will be sent through HTTP post by application/json content type after you have set up Wechat Webhooks as follows:\nwechat:default:is-default:truetext-template:|-{ \u0026#34;msgtype\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;content\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; } }webhooks:- https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=dummy_keyDingTalk Follow the Dingtalk Webhooks guide and create new Webhooks. You can configure an optional secret for an individual webhook URL for security purposes. The alarm message will be sent through HTTP post by application/json content type if you have configured DingTalk Webhooks as follows:\ndingtalk:default:is-default:truetext-template:|-{ \u0026#34;msgtype\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;content\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; } }webhooks:- url:https://oapi.dingtalk.com/robot/send?access_token=dummy_tokensecret:dummysecretFeishu Follow the Feishu Webhooks guide and create new Webhooks. You can configure an optional secret for an individual webhook URL for security purposes. If you want to direct a text to a user, you can configure ats, which is Feishu\u0026rsquo;s user_id and separated by \u0026ldquo;,\u0026rdquo; . The alarm message will be sent through HTTP post by application/json content type if you have configured Feishu Webhooks as follows:\nfeishu:default:is-default:truetext-template:|-{ \u0026#34;msg_type\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;content\u0026#34;: { \u0026#34;text\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; }, \u0026#34;ats\u0026#34;:\u0026#34;feishu_user_id_1,feishu_user_id_2\u0026#34; }webhooks:- url:https://open.feishu.cn/open-apis/bot/v2/hook/dummy_tokensecret:dummysecretWeLink Follow the WeLink Webhooks guide and create new Webhooks. The alarm message will be sent through HTTP post by application/json content type if you have configured WeLink Webhooks as follows:\nwelink:default:is-default:truetext-template:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;webhooks:# you may find your own client_id and client_secret in your app, below are dummy, need to change.- client-id:\u0026#34;dummy_client_id\u0026#34;client-secret:dummy_secret_keyaccess-token-url:https://open.welink.huaweicloud.com/api/auth/v2/ticketsmessage-url:https://open.welink.huaweicloud.com/api/welinkim/v1/im-service/chat/group-chat# if you send to multi group at a time, separate group_ids with commas, e.g. \u0026#34;123xx\u0026#34;,\u0026#34;456xx\u0026#34;group-ids:\u0026#34;dummy_group_id\u0026#34;# make a name you like for the robot, it will display in grouprobot-name:robotPagerDuty The PagerDuty hook is based on Events API v2.\nFollow the Getting Started section to create an Events API v2 integration on your PagerDuty service and copy the integration key.\nThen configure as follows:\npagerduty:default:is-default:truetext-template:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;integration-keys:- 5c6d805c9dcf4e03d09dfa81e8789ba1You can also configure multiple integration keys.\nDiscord Follow the Discord Webhooks guide and create a new webhook.\nThen configure as follows:\ndiscord:default:is-default:truetext-template:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;webhooks:- url:https://discordapp.com/api/webhooks/1008166889777414645/8e0Am4Zb-YGbBqqbiiq0jSHPTEEaHa4j1vIC-zSSm231T8ewGxgY0_XUYpY-k1nN4HBlusername:robotUpdate the settings dynamically Since 6.5.0, the alerting settings can be updated dynamically at runtime by Dynamic Configuration, which will override the settings in alarm-settings.yml.\nIn order to determine whether an alerting rule is triggered or not, SkyWalking needs to cache the metrics of a time window for each alerting rule. If any attribute (expression, period, etc.) of a rule is changed, the sliding window will be destroyed and re-created, causing the Alarm of this specific rule to restart again.\nKeys with data types of alerting rule configuration file    Alerting element Configuration property key Type Description     Expression expression string MQE expression   Include names include-names string array    Exclude names exclude-names string array    Include names regex include-names-regex string Java regex Pattern   Exclude names regex exclude-names-regex string Java regex Pattern   Tags tags key-value pair    Period Period int    Silence period silence-period int    Message message string    Hooks hooks string array     ","title":"Alerting","url":"/docs/main/v9.6.0/en/setup/backend/backend-alarm/"},{"content":"Alerting Alerting mechanism measures system performance according to the metrics of services/instances/endpoints from different layers. Alerting kernel is an in-memory, time-window based queue.\nThe alerting core is driven by a collection of rules defined in config/alarm-settings.yml. There are three parts to alerting rule definitions.\n alerting rules. They define how metrics alerting should be triggered and what conditions should be considered. hooks. The list of hooks, which should be called after an alerting is triggered.  Entity name Defines the relation between scope and entity name.\n Service: Service name Instance: {Instance name} of {Service name} Endpoint: {Endpoint name} in {Service name} Service Relation: {Source service name} to {Dest service name} Instance Relation: {Source instance name} of {Source service name} to {Dest instance name} of {Dest service name} Endpoint Relation: {Source endpoint name} in {Source Service name} to {Dest endpoint name} in {Dest service name}  Rules An alerting rule is made up of the following elements:\n Rule name. A unique name shown in the alarm message. It must end with _rule. Expression. A MQE expression that defines the conditions of the rule. The result type must be SINGLE_VALUE and the root operation of the expression must be a Compare Operation which provides 1(true) or 0(false) result. When the result is 1(true), the alarm will be triggered. For example, avg(service_resp_time / 1000) \u0026gt; 1 is a valid expression to indicate the request latency is slower than 1s. The typical illegal expressions are  avg(service_resp_time \u0026gt; 1000) + 1 expression root doesn\u0026rsquo;t use Compare Operation service_resp_time \u0026gt; 1000 expression return a TIME_SERIES_VALUES type of values rather than a SINGLE_VALUE value.    The metrics names in the expression could be found in the list of all potential metrics name doc.\n Include names. Entity names that are included in this rule. Please follow the entity name definitions. Exclude names. Entity names that are excluded from this rule. Please follow the entity name definitions. Include names regex. A regex that includes entity names. If both include-name list and include-name regex are set, both rules will take effect. Exclude names regex. A regex that excludes entity names. Both rules will take effect if both include-label list and include-label regex are set. Tags. Tags are key/value pairs that are attached to alarms. Tags are used to specify distinguishing attributes of alarms that are meaningful and relevant to users. If you want to make these tags searchable on the SkyWalking UI, you may set the tag keys in core/default/searchableAlarmTags or through the system environment variable SW_SEARCHABLE_ALARM_TAG_KEYS. The key level is supported by default. Period. The size of metrics cache in minutes for checking the alarm conditions. This is a time window that corresponds to the backend deployment env time. Hooks. Binding the specific names of the hooks when the alarm is triggered. The name format is {hookType}.{hookName} (slack.custom1 e.g.) and must be defined in the hooks section of the alarm-settings.yml file. If the hook name is not specified, the global hook will be used. Silence period. After the alarm is triggered at Time-N (TN), there will be silence during the TN -\u0026gt; TN + period. By default, it works in the same manner as period. The same Alarm (having the same ID in the same metrics name) may only be triggered once within a period.  Such as for a metric, there is a shifting window as following at T7.\n   T1 T2 T3 T4 T5 T6 T7     Value1 Value2 Value3 Value4 Value5 Value6 Value7     Period(Time point T1 ~ T7) are continuous data points for minutes. Notice, alerts are not supported above minute-by-minute periods as they would not be efficient. Values(Value1 ~ Value7) are the values or labeled values for every time point. Expression is calculated based on the metric values(Value1 ~ Value7). For example, expression avg(service_resp_time) \u0026gt; 1000, if the value are 1001, 1001, 1001, 1001, 1001, 1001, 1001, the calculation is ((1001 + 10001 + ... + 1001) / 7) \u0026gt; 1000 and the result would be 1(true). Then the alarm would be triggered. In every minute, the window would shift automatically. At T8, Value8 would be cached, and T1/Value1 would be removed from the window.  NOTE:\n If the expression include labeled metrics and result has multiple labeled value(e.g. sum(service_percentile{_='0,1'} \u0026gt; 1000) \u0026gt;= 3), the alarm will be triggered if any of the labeled value result matches 3 times of the condition(P50 \u0026gt; 1000 or P75 \u0026gt; 1000). One alarm rule is targeting the same entity level, such as service-level expression (avg(service_resp_time) \u0026gt; 1000). Set entity names(Include/Exclude names\u0026hellip;) according to metrics entity levels, do not include different entity levels metrics in the same expression, such as service metrics and endpoint metrics.  rules:# Rule unique name, must be ended with `_rule`.endpoint_percent_rule:# A MQE expression and the root operation of the expression must be a Compare Operation.expression:sum((endpoint_sla / 100) \u0026lt; 75) \u0026gt;= 3# The length of time to evaluate the metricsperiod:10# How many times of checks, the alarm keeps silence after alarm triggered, default as same as period.silence-period:10message:Successful rate of endpoint {name} is lower than 75%tags:level:WARNINGservice_percent_rule:expression:sum((service_sla / 100) \u0026lt; 85) \u0026gt;= 4# [Optional] Default, match all services in this metricsinclude-names:- service_a- service_bexclude-names:- service_cperiod:10message:Service {name} successful rate is less than 85%service_resp_time_percentile_rule:expression:sum(service_percentile{_=\u0026#39;0,1,2,3,4\u0026#39;} \u0026gt; 1000) \u0026gt;= 3period:10silence-period:5message:Percentile response time of service {name} alarm in 3 minutes of last 10 minutes, due to more than one condition of p50 \u0026gt; 1000, p75 \u0026gt; 1000, p90 \u0026gt; 1000, p95 \u0026gt; 1000, p99 \u0026gt; 1000meter_service_status_code_rule:expression:sum(aggregate_labels(meter_status_code{_=\u0026#39;4xx,5xx\u0026#39;},sum) \u0026gt; 10) \u0026gt; 3period:10count:3silence-period:5message:The request number of entity {name} 4xx and 5xx status is more than expected.hooks:- \u0026#34;slack.custom1\u0026#34;- \u0026#34;pagerduty.custom1\u0026#34;comp_rule:expression:(avg(service_sla / 100) \u0026gt; 80) * (avg(service_percentile{_=\u0026#39;0\u0026#39;}) \u0026gt; 1000) == 1period:10message:Service {name} avg successful rate is less than 80% and P50 of avg response time is over 1000ms in last 10 minutes.tags:level:CRITICALhooks:- \u0026#34;slack.default\u0026#34;- \u0026#34;slack.custom1\u0026#34;- \u0026#34;pagerduty.custom1\u0026#34;Default alarm rules For convenience\u0026rsquo;s sake, we have provided a default alarm-setting.yml in our release. It includes the following rules:\n Service average response time over 1s in the last 3 minutes. Service success rate lower than 80% in the last 2 minutes. Percentile of service response time over 1s in the last 3 minutes Service Instance average response time over 1s in the last 2 minutes, and the instance name matches the regex. Endpoint average response time over 1s in the last 2 minutes. Database access average response time over 1s in the last 2 minutes. Endpoint relation average response time over 1s in the last 2 minutes.  List of all potential metrics name The metrics names are defined in the official OAL scripts and MAL scripts.\nCurrently, metrics from the Service, Service Instance, Endpoint, Service Relation, Service Instance Relation, Endpoint Relation scopes could be used in Alarm, and the Database access scope is the same as Service.\nSubmit an issue or a pull request if you want to support any other scopes in Alarm.\nHooks Hooks are a way to send alarm messages to the outside world. SkyWalking supports multiple hooks of the same type, each hook can support different configurations. For example, you can configure two Slack hooks, one named default and set is-default: true means this hook will apply on all Alarm Rules without config hooks. Another named custom1 will only apply on the Alarm Rules which with config hooks and include the name slack.custom1.\nhooks:slack:# default here is just a name, set the field \u0026#39;is-default: true\u0026#39; if this notification hook is expected to be default globally.default:# If true, this hook will apply on all rules, unless a rule has its own specific hook. Could have more than one default hooks in the same hook type.is-default:truetext-template:|-{ \u0026#34;type\u0026#34;: \u0026#34;section\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;mrkdwn\u0026#34;, \u0026#34;text\u0026#34;: \u0026#34;:alarm_clock: *Apache Skywalking Alarm* \\n **%s**.\u0026#34; } }webhooks:- https://hooks.slack.com/services/x/y/zsssscustom1:text-template:|-{ \u0026#34;type\u0026#34;: \u0026#34;section\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;mrkdwn\u0026#34;, \u0026#34;text\u0026#34;: \u0026#34;:alarm_clock: *Apache Skywalking Alarm* \\n **%s**.\u0026#34; } }webhooks:- https://hooks.slack.com/services/x/y/custom1Currently, SkyWalking supports the following hook types:\nWebhook The Webhook requires the peer to be a web container. The alarm message will be sent through HTTP post by application/json content type after you have set up Webhook hooks as follows:\nwebhook:default:is-default:trueurls:- http://ip:port/xxx- http://ip:port/yyyThe JSON format is based on List\u0026lt;org.apache.skywalking.oap.server.core.alarm.AlarmMessage\u0026gt; with the following key information:\n scopeId, scope. All scopes are defined in org.apache.skywalking.oap.server.core.source.DefaultScopeDefine. name. Target scope entity name. Please follow the entity name definitions. id0. The ID of the scope entity that matches with the name. When using the relation scope, it is the source entity ID. id1. When using the relation scope, it is the destination entity ID. Otherwise, it is empty. ruleName. The rule name configured in alarm-settings.yml. alarmMessage. The alarm text message. startTime. The alarm time measured in milliseconds, which occurs between the current time and the midnight of January 1, 1970 UTC. tags. The tags configured in alarm-settings.yml.  See the following example:\n[{ \u0026#34;scopeId\u0026#34;: 1, \u0026#34;scope\u0026#34;: \u0026#34;SERVICE\u0026#34;, \u0026#34;name\u0026#34;: \u0026#34;serviceA\u0026#34;, \u0026#34;id0\u0026#34;: \u0026#34;12\u0026#34;, \u0026#34;id1\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;ruleName\u0026#34;: \u0026#34;service_resp_time_rule\u0026#34;, \u0026#34;alarmMessage\u0026#34;: \u0026#34;alarmMessage xxxx\u0026#34;, \u0026#34;startTime\u0026#34;: 1560524171000, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;WARNING\u0026#34; }] }, { \u0026#34;scopeId\u0026#34;: 1, \u0026#34;scope\u0026#34;: \u0026#34;SERVICE\u0026#34;, \u0026#34;name\u0026#34;: \u0026#34;serviceB\u0026#34;, \u0026#34;id0\u0026#34;: \u0026#34;23\u0026#34;, \u0026#34;id1\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;ruleName\u0026#34;: \u0026#34;service_resp_time_rule\u0026#34;, \u0026#34;alarmMessage\u0026#34;: \u0026#34;alarmMessage yyy\u0026#34;, \u0026#34;startTime\u0026#34;: 1560524171000, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;CRITICAL\u0026#34; }] }] gRPC The alarm message will be sent through remote gRPC method by Protobuf content type after you have set up gRPC hooks as follows:\ngRPC:default:is-default:truetarget-host:iptarget-port:portThe message contains key information which are defined in oap-server/server-alarm-plugin/src/main/proto/alarm-hook.proto.\nPart of the protocol looks like this:\nmessage AlarmMessage { int64 scopeId = 1; string scope = 2; string name = 3; string id0 = 4; string id1 = 5; string ruleName = 6; string alarmMessage = 7; int64 startTime = 8; AlarmTags tags = 9;}message AlarmTags { // String key, String value pair.  repeated KeyStringValuePair data = 1;}message KeyStringValuePair { string key = 1; string value = 2;}Slack Chat Follow the Getting Started with Incoming Webhooks guide and create new Webhooks.\nThe alarm message will be sent through HTTP post by application/json content type if you have configured Slack Incoming Webhooks as follows:\nslack:default:is-default:truetext-template:|-{ \u0026#34;type\u0026#34;: \u0026#34;section\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;mrkdwn\u0026#34;, \u0026#34;text\u0026#34;: \u0026#34;:alarm_clock: *Apache Skywalking Alarm* \\n **%s**.\u0026#34; } }webhooks:- https://hooks.slack.com/services/x/y/zWeChat Note that only the WeChat Company Edition (WeCom) supports WebHooks. To use the WeChat WebHook, follow the Wechat Webhooks guide. The alarm message will be sent through HTTP post by application/json content type after you have set up Wechat Webhooks as follows:\nwechat:default:is-default:truetext-template:|-{ \u0026#34;msgtype\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;content\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; } }webhooks:- https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=dummy_keyDingTalk Follow the Dingtalk Webhooks guide and create new Webhooks. You can configure an optional secret for an individual webhook URL for security purposes. The alarm message will be sent through HTTP post by application/json content type if you have configured DingTalk Webhooks as follows:\ndingtalk:default:is-default:truetext-template:|-{ \u0026#34;msgtype\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;text\u0026#34;: { \u0026#34;content\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; } }webhooks:- url:https://oapi.dingtalk.com/robot/send?access_token=dummy_tokensecret:dummysecretFeishu Follow the Feishu Webhooks guide and create new Webhooks. You can configure an optional secret for an individual webhook URL for security purposes. If you want to direct a text to a user, you can configure ats, which is Feishu\u0026rsquo;s user_id and separated by \u0026ldquo;,\u0026rdquo; . The alarm message will be sent through HTTP post by application/json content type if you have configured Feishu Webhooks as follows:\nfeishu:default:is-default:truetext-template:|-{ \u0026#34;msg_type\u0026#34;: \u0026#34;text\u0026#34;, \u0026#34;content\u0026#34;: { \u0026#34;text\u0026#34;: \u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34; }, \u0026#34;ats\u0026#34;:\u0026#34;feishu_user_id_1,feishu_user_id_2\u0026#34; }webhooks:- url:https://open.feishu.cn/open-apis/bot/v2/hook/dummy_tokensecret:dummysecretWeLink Follow the WeLink Webhooks guide and create new Webhooks. The alarm message will be sent through HTTP post by application/json content type if you have configured WeLink Webhooks as follows:\nwelink:default:is-default:truetext-template:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;webhooks:# you may find your own client_id and client_secret in your app, below are dummy, need to change.- client-id:\u0026#34;dummy_client_id\u0026#34;client-secret:dummy_secret_keyaccess-token-url:https://open.welink.huaweicloud.com/api/auth/v2/ticketsmessage-url:https://open.welink.huaweicloud.com/api/welinkim/v1/im-service/chat/group-chat# if you send to multi group at a time, separate group_ids with commas, e.g. \u0026#34;123xx\u0026#34;,\u0026#34;456xx\u0026#34;group-ids:\u0026#34;dummy_group_id\u0026#34;# make a name you like for the robot, it will display in grouprobot-name:robotPagerDuty The PagerDuty hook is based on Events API v2.\nFollow the Getting Started section to create an Events API v2 integration on your PagerDuty service and copy the integration key.\nThen configure as follows:\npagerduty:default:is-default:truetext-template:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;integration-keys:- 5c6d805c9dcf4e03d09dfa81e8789ba1You can also configure multiple integration keys.\nDiscord Follow the Discord Webhooks guide and create a new webhook.\nThen configure as follows:\ndiscord:default:is-default:truetext-template:\u0026#34;Apache SkyWalking Alarm: \\n %s.\u0026#34;webhooks:- url:https://discordapp.com/api/webhooks/1008166889777414645/8e0Am4Zb-YGbBqqbiiq0jSHPTEEaHa4j1vIC-zSSm231T8ewGxgY0_XUYpY-k1nN4HBlusername:robotUpdate the settings dynamically Since 6.5.0, the alerting settings can be updated dynamically at runtime by Dynamic Configuration, which will override the settings in alarm-settings.yml.\nIn order to determine whether an alerting rule is triggered or not, SkyWalking needs to cache the metrics of a time window for each alerting rule. If any attribute (expression, period, etc.) of a rule is changed, the sliding window will be destroyed and re-created, causing the Alarm of this specific rule to restart again.\nKeys with data types of alerting rule configuration file    Alerting element Configuration property key Type Description     Expression expression string MQE expression   Include names include-names string array    Exclude names exclude-names string array    Include names regex include-names-regex string Java regex Pattern   Exclude names regex exclude-names-regex string Java regex Pattern   Tags tags key-value pair    Period Period int    Silence period silence-period int    Message message string    Hooks hooks string array     ","title":"Alerting","url":"/docs/main/v9.7.0/en/setup/backend/backend-alarm/"},{"content":"ALS Load Balance Using satellite as a load balancer in envoy and OAP can effectively prevent the problem of unbalanced messages received by OAP.\nIn this case, we mainly use memory queues for intermediate data storage.\nDeference Envoy Count, OAP performance could impact the Satellite transmit performance.\n   Envoy Instance Concurrent User ALS OPS Satellite CPU Satellite Memory     150 100 ~50K 1.2C 0.5-1.0G   150 300 ~80K 1.8C 1.0-1.5G   300 100 ~50K 1.4C 0.8-1.2G   300 300 ~100K 2.2C 1.3-2.0G   800 100 ~50K 1.5C 0.9-1.5G   800 300 ~100K 2.6C 1.7-2.7G   1500 100 ~50K 1.7C 1.4-2.4G   1500 300 ~100K 2.7C 2.3-3.0G   2300 150 ~50K 1.8C 1.9-3.1G   2300 300 ~90K 2.5C 2.3-4.0G   2300 500 ~110K 3.2C 2.8-4.7G    Detail Environment Using GKE Environment, helm to build cluster.\n   Module Version Replicate Count CPU Limit Memory Limit Description     OAP 8.9.0 6 12C 32Gi Using ElasticSearch as Storage   Satellite 0.4.0 1 8C 16Gi    ElasticSearch 7.5.1 3 8 16Gi     Setting 800 Envoy, 100K QPS ALS.\n   Module Environment Config Use Value Default Value Description Recommend Value     Satellite SATELLITE_QUEUE_PARTITION 50 4 Support several goroutines concurrently to consume the queue Satellite CPU number * 4-6, It could help improve throughput, but the default value also could handle 800 Envoy Instance and 100K QPS ALS message.   Satellite SATELLITE_QUEUE_EVENT_BUFFER_SIZE 3000 1000 The size of the queue in each concurrency This is related to the number of Envoys. If the number of Envoys is large, it is recommended to increase the value.   Satellite SATELLITE_ENVOY_ALS_V3_PIPE_RECEIVER_FLUSH_TIME 3000 1000 When the Satellite receives the message, how long(millisecond) will the ALS message be merged into an Event. If a certain time delay is accepted, the value can be adjusted larger, which can effectively reduce CPU usage and make the Satellite more stable   Satellite SATELLITE_ENVOY_ALS_V3_PIPE_SENDER_FLUSH_TIME 3000 1000 How long(millisecond) is the memory queue data for each Goroutine to be summarized and sent to OAP This depends on the amount of data in your queue, you can keep it consistent with SATELLITE_ENVOY_ALS_V3_PIPE_RECEIVER_FLUSH_TIME   OAP SW_CORE_GRPC_MAX_CONCURRENT_CALL 50 4 A link between Satellite and OAP, how many requests parallelism is supported Same with SATELLITE_QUEUE_PARTITION in Satellite    ","title":"ALS Load Balance","url":"/docs/skywalking-satellite/latest/en/setup/performance/als-load-balance/readme/"},{"content":"ALS Load Balance Using satellite as a load balancer in envoy and OAP can effectively prevent the problem of unbalanced messages received by OAP.\nIn this case, we mainly use memory queues for intermediate data storage.\nDeference Envoy Count, OAP performance could impact the Satellite transmit performance.\n   Envoy Instance Concurrent User ALS OPS Satellite CPU Satellite Memory     150 100 ~50K 1.2C 0.5-1.0G   150 300 ~80K 1.8C 1.0-1.5G   300 100 ~50K 1.4C 0.8-1.2G   300 300 ~100K 2.2C 1.3-2.0G   800 100 ~50K 1.5C 0.9-1.5G   800 300 ~100K 2.6C 1.7-2.7G   1500 100 ~50K 1.7C 1.4-2.4G   1500 300 ~100K 2.7C 2.3-3.0G   2300 150 ~50K 1.8C 1.9-3.1G   2300 300 ~90K 2.5C 2.3-4.0G   2300 500 ~110K 3.2C 2.8-4.7G    Detail Environment Using GKE Environment, helm to build cluster.\n   Module Version Replicate Count CPU Limit Memory Limit Description     OAP 8.9.0 6 12C 32Gi Using ElasticSearch as Storage   Satellite 0.4.0 1 8C 16Gi    ElasticSearch 7.5.1 3 8 16Gi     Setting 800 Envoy, 100K QPS ALS.\n   Module Environment Config Use Value Default Value Description Recommend Value     Satellite SATELLITE_QUEUE_PARTITION 50 4 Support several goroutines concurrently to consume the queue Satellite CPU number * 4-6, It could help improve throughput, but the default value also could handle 800 Envoy Instance and 100K QPS ALS message.   Satellite SATELLITE_QUEUE_EVENT_BUFFER_SIZE 3000 1000 The size of the queue in each concurrency This is related to the number of Envoys. If the number of Envoys is large, it is recommended to increase the value.   Satellite SATELLITE_ENVOY_ALS_V3_PIPE_RECEIVER_FLUSH_TIME 3000 1000 When the Satellite receives the message, how long(millisecond) will the ALS message be merged into an Event. If a certain time delay is accepted, the value can be adjusted larger, which can effectively reduce CPU usage and make the Satellite more stable   Satellite SATELLITE_ENVOY_ALS_V3_PIPE_SENDER_FLUSH_TIME 3000 1000 How long(millisecond) is the memory queue data for each Goroutine to be summarized and sent to OAP This depends on the amount of data in your queue, you can keep it consistent with SATELLITE_ENVOY_ALS_V3_PIPE_RECEIVER_FLUSH_TIME   OAP SW_CORE_GRPC_MAX_CONCURRENT_CALL 50 4 A link between Satellite and OAP, how many requests parallelism is supported Same with SATELLITE_QUEUE_PARTITION in Satellite    ","title":"ALS Load Balance","url":"/docs/skywalking-satellite/next/en/setup/performance/als-load-balance/readme/"},{"content":"ALS Load Balance Using satellite as a load balancer in envoy and OAP can effectively prevent the problem of unbalanced messages received by OAP.\nIn this case, we mainly use memory queues for intermediate data storage.\nDeference Envoy Count, OAP performance could impact the Satellite transmit performance.\n   Envoy Instance Concurrent User ALS OPS Satellite CPU Satellite Memory     150 100 ~50K 1.2C 0.5-1.0G   150 300 ~80K 1.8C 1.0-1.5G   300 100 ~50K 1.4C 0.8-1.2G   300 300 ~100K 2.2C 1.3-2.0G   800 100 ~50K 1.5C 0.9-1.5G   800 300 ~100K 2.6C 1.7-2.7G   1500 100 ~50K 1.7C 1.4-2.4G   1500 300 ~100K 2.7C 2.3-3.0G   2300 150 ~50K 1.8C 1.9-3.1G   2300 300 ~90K 2.5C 2.3-4.0G   2300 500 ~110K 3.2C 2.8-4.7G    Detail Environment Using GKE Environment, helm to build cluster.\n   Module Version Replicate Count CPU Limit Memory Limit Description     OAP 8.9.0 6 12C 32Gi Using ElasticSearch as Storage   Satellite 0.4.0 1 8C 16Gi    ElasticSearch 7.5.1 3 8 16Gi     Setting 800 Envoy, 100K QPS ALS.\n   Module Environment Config Use Value Default Value Description Recommend Value     Satellite SATELLITE_QUEUE_PARTITION 50 4 Support several goroutines concurrently to consume the queue Satellite CPU number * 4-6, It could help improve throughput, but the default value also could handle 800 Envoy Instance and 100K QPS ALS message.   Satellite SATELLITE_QUEUE_EVENT_BUFFER_SIZE 3000 1000 The size of the queue in each concurrency This is related to the number of Envoys. If the number of Envoys is large, it is recommended to increase the value.   Satellite SATELLITE_ENVOY_ALS_V3_PIPE_RECEIVER_FLUSH_TIME 3000 1000 When the Satellite receives the message, how long(millisecond) will the ALS message be merged into an Event. If a certain time delay is accepted, the value can be adjusted larger, which can effectively reduce CPU usage and make the Satellite more stable   Satellite SATELLITE_ENVOY_ALS_V3_PIPE_SENDER_FLUSH_TIME 3000 1000 How long(millisecond) is the memory queue data for each Goroutine to be summarized and sent to OAP This depends on the amount of data in your queue, you can keep it consistent with SATELLITE_ENVOY_ALS_V3_PIPE_RECEIVER_FLUSH_TIME   OAP SW_CORE_GRPC_MAX_CONCURRENT_CALL 50 4 A link between Satellite and OAP, how many requests parallelism is supported Same with SATELLITE_QUEUE_PARTITION in Satellite    ","title":"ALS Load Balance","url":"/docs/skywalking-satellite/v1.2.0/en/setup/performance/als-load-balance/readme/"},{"content":"Analysis Native Streaming Traces and Service Mesh Traffic The traces in SkyWalking native format and Service Mesh Traffic(Access Log in gRPC) are able to be analyzed by OAL, to build metrics of services, service instances and endpoints, and to build topology/dependency of services, service instances and endpoints(traces-oriented analysis only).\nThe spans of traces relative with RPC, such as HTTP, gRPC, Dubbo, RocketMQ, Kafka, would be converted to service input/output traffic, like access logs collected from service mesh. Both of those traffic would be cataloged as the defined sources in the Observability Analysis Language engine.\nThe metrics are customizable through Observability Analysis Language(OAL) scripts, and the topology/dependency is built by the SkyWalking OAP kernel automatically without explicit OAL scripts.\nObservability Analysis Language OAL(Observability Analysis Language) serves to analyze incoming data in streaming mode.\nOAL focuses on metrics in Service, Service Instance and Endpoint. Therefore, the language is easy to learn and use.\nOAL scripts are now found in the /config folder, and users could simply change and reboot the server to run them. However, the OAL script is a compiled language, and the OAL Runtime generates java codes dynamically. Don\u0026rsquo;t expect to mount the changes of those scripts in the runtime. If your OAP servers are running in a cluster mode, these script defined metrics should be aligned.\nYou can open set SW_OAL_ENGINE_DEBUG=Y at system env to see which classes are generated.\nGrammar Scripts should be named *.oal\n// Declare the metrics. METRICS_NAME = from(CAST SCOPE.(* | [FIELD][,FIELD ...])) [.filter(CAST FIELD OP [INT | STRING])] .FUNCTION([PARAM][, PARAM ...]) // Disable hard code disable(METRICS_NAME); From The from statement defines the data source of this OAL expression.\nPrimary SCOPEs are Service, ServiceInstance, Endpoint, ServiceRelation, ServiceInstanceRelation, and EndpointRelation. There are also some secondary scopes which belong to a primary scope.\nSee Scope Definitions, where you can find all existing Scopes and Fields.\nFilter Use filter to build conditions for the value of fields by using field name and expression.\nThe filter expressions run as a chain, generally connected with logic AND. The OPs support ==, !=, \u0026gt;, \u0026lt;, \u0026gt;=, \u0026lt;=, in [...] ,like %..., like ...% , like %...% , contain and not contain, with type detection based on field type. In the event of incompatibility, compile or code generation errors may be triggered.\nAggregation Function The default functions are provided by the SkyWalking OAP core, and it is possible to implement additional functions.\nFunctions provided\n longAvg. The avg of all input per scope entity. The input field must be a long.   instance_jvm_memory_max = from(ServiceInstanceJVMMemory.max).longAvg();\n In this case, the input represents the request of each ServiceInstanceJVMMemory scope, and avg is based on field max.\n doubleAvg. The avg of all input per scope entity. The input field must be a double.   instance_jvm_cpu = from(ServiceInstanceJVMCPU.usePercent).doubleAvg();\n In this case, the input represents the request of each ServiceInstanceJVMCPU scope, and avg is based on field usePercent.\n percent. The number or ratio is expressed as a fraction of 100, where the input matches with the condition.   endpoint_percent = from(Endpoint.*).percent(status == true);\n In this case, all input represents requests of each endpoint, and the condition is endpoint.status == true.\n rate. The rate expressed is as a fraction of 100, where the input matches with the condition.   browser_app_error_rate = from(BrowserAppTraffic.*).rate(trafficCategory == BrowserAppTrafficCategory.FIRST_ERROR, trafficCategory == BrowserAppTrafficCategory.NORMAL);\n In this case, all input represents requests of each browser app traffic, the numerator condition is trafficCategory == BrowserAppTrafficCategory.FIRST_ERROR and denominator condition is trafficCategory == BrowserAppTrafficCategory.NORMAL. Parameter (1) is the numerator condition. Parameter (2) is the denominator condition.\n count. The sum of calls per scope entity.   service_calls_sum = from(Service.*).count();\n In this case, the number of calls of each service.\n histogram. See Heatmap in WIKI.   service_heatmap = from(Service.latency).histogram(100, 20);\n In this case, the thermodynamic heatmap of all incoming requests. Parameter (1) is the precision of latency calculation, such as in the above case, where 113ms and 193ms are considered the same in the 101-200ms group. Parameter (2) is the group amount. In the above case, 21(param value + 1) groups are 0-100ms, 101-200ms, \u0026hellip; 1901-2000ms, 2000+ms\n apdex. See Apdex in WIKI.   service_apdex = from(Service.latency).apdex(name, status);\n In this case, the apdex score of each service. Parameter (1) is the service name, which reflects the Apdex threshold value loaded from service-apdex-threshold.yml in the config folder. Parameter (2) is the status of this request. The status(success/failure) reflects the Apdex calculation.\n p99, p95, p90, p75, p50. See percentile in WIKI.   service_percentile = from(Service.latency).percentile(10);\n percentile is the first multiple-value metric, which has been introduced since 7.0.0. As a metric with multiple values, it could be queried through the getMultipleLinearIntValues GraphQL query. In this case, see p99, p95, p90, p75, and p50 of all incoming requests. The parameter is precise to a latency at p99, such as in the above case, and 120ms and 124ms are considered to produce the same response time.\nIn this case, the p99 value of all incoming requests. The parameter is precise to a latency at p99, such as in the above case, and 120ms and 124ms are considered to produce the same response time.\nMetrics name The metrics name for storage implementor, alarm and query modules. The type inference is supported by core.\nGroup All metrics data will be grouped by Scope.ID and min-level TimeBucket.\n In the Endpoint scope, the Scope.ID is same as the Endpoint ID (i.e. the unique ID based on service and its endpoint).  Cast Fields of source are static type. In some cases, the type required by the filter expression and aggregation function doesn\u0026rsquo;t match the type in the source, such as tag value in the source is String type, most aggregation calculation requires numeric.\nCast expression is provided to do so.\n (str-\u0026gt;long) or (long), cast string type into long. (str-\u0026gt;int) or (int), cast string type into int.  mq_consume_latency = from((str-\u0026gt;long)Service.tag[\u0026quot;transmission.latency\u0026quot;]).longAvg(); // the value of tag is string type. Cast statement is supported in\n From statement. from((cast)source.attre). Filter expression. .filter((cast)tag[\u0026quot;transmission.latency\u0026quot;] \u0026gt; 0) Aggregation function parameter. .longAvg((cast)strField1== 1, (cast)strField2)  Disable Disable is an advanced statement in OAL, which is only used in certain cases. Some of the aggregation and metrics are defined through core hard codes. Examples include segment and top_n_database_statement. This disable statement is designed to render them inactive. By default, none of them are disabled.\nNOTICE, all disable statements should be in oal/disable.oal script file.\nExamples // Calculate p99 of both Endpoint1 and Endpoint2 endpoint_p99 = from(Endpoint.latency).filter(name in (\u0026quot;Endpoint1\u0026quot;, \u0026quot;Endpoint2\u0026quot;)).summary(0.99) // Calculate p99 of Endpoint name started with `serv` serv_Endpoint_p99 = from(Endpoint.latency).filter(name like \u0026quot;serv%\u0026quot;).summary(0.99) // Calculate the avg response time of each Endpoint endpoint_resp_time = from(Endpoint.latency).avg() // Calculate the p50, p75, p90, p95 and p99 of each Endpoint by 50 ms steps. endpoint_percentile = from(Endpoint.latency).percentile(10) // Calculate the percent of response status is true, for each service. endpoint_success = from(Endpoint.*).filter(status == true).percent() // Calculate the sum of response code in [404, 500, 503], for each service. endpoint_abnormal = from(Endpoint.*).filter(httpResponseStatusCode in [404, 500, 503]).count() // Calculate the sum of request type in [RequestType.RPC, RequestType.gRPC], for each service. endpoint_rpc_calls_sum = from(Endpoint.*).filter(type in [RequestType.RPC, RequestType.gRPC]).count() // Calculate the sum of endpoint name in [\u0026quot;/v1\u0026quot;, \u0026quot;/v2\u0026quot;], for each service. endpoint_url_sum = from(Endpoint.*).filter(name in [\u0026quot;/v1\u0026quot;, \u0026quot;/v2\u0026quot;]).count() // Calculate the sum of calls for each service. endpoint_calls = from(Endpoint.*).count() // Calculate the CPM with the GET method for each service.The value is made up with `tagKey:tagValue`. // Option 1, use `tags contain`. service_cpm_http_get = from(Service.*).filter(tags contain \u0026quot;http.method:GET\u0026quot;).cpm() // Option 2, use `tag[key]`. service_cpm_http_get = from(Service.*).filter(tag[\u0026quot;http.method\u0026quot;] == \u0026quot;GET\u0026quot;).cpm(); // Calculate the CPM with the HTTP method except for the GET method for each service.The value is made up with `tagKey:tagValue`. service_cpm_http_other = from(Service.*).filter(tags not contain \u0026quot;http.method:GET\u0026quot;).cpm() disable(segment); disable(endpoint_relation_server_side); disable(top_n_database_statement); ","title":"Analysis Native Streaming Traces and Service Mesh Traffic","url":"/docs/main/latest/en/concepts-and-designs/oal/"},{"content":"Analysis Native Streaming Traces and Service Mesh Traffic The traces in SkyWalking native format and Service Mesh Traffic(Access Log in gRPC) are able to be analyzed by OAL, to build metrics of services, service instances and endpoints, and to build topology/dependency of services, service instances and endpoints(traces-oriented analysis only).\nThe spans of traces relative with RPC, such as HTTP, gRPC, Dubbo, RocketMQ, Kafka, would be converted to service input/output traffic, like access logs collected from service mesh. Both of those traffic would be cataloged as the defined sources in the Observability Analysis Language engine.\nThe metrics are customizable through Observability Analysis Language(OAL) scripts, and the topology/dependency is built by the SkyWalking OAP kernel automatically without explicit OAL scripts.\nObservability Analysis Language OAL(Observability Analysis Language) serves to analyze incoming data in streaming mode.\nOAL focuses on metrics in Service, Service Instance and Endpoint. Therefore, the language is easy to learn and use.\nOAL scripts are now found in the /config folder, and users could simply change and reboot the server to run them. However, the OAL script is a compiled language, and the OAL Runtime generates java codes dynamically. Don\u0026rsquo;t expect to mount the changes of those scripts in the runtime. If your OAP servers are running in a cluster mode, these script defined metrics should be aligned.\nYou can open set SW_OAL_ENGINE_DEBUG=Y at system env to see which classes are generated.\nGrammar Scripts should be named *.oal\n// Declare the metrics. METRICS_NAME = from(CAST SCOPE.(* | [FIELD][,FIELD ...])) [.filter(CAST FIELD OP [INT | STRING])] .FUNCTION([PARAM][, PARAM ...]) // Disable hard code disable(METRICS_NAME); From The from statement defines the data source of this OAL expression.\nPrimary SCOPEs are Service, ServiceInstance, Endpoint, ServiceRelation, ServiceInstanceRelation, and EndpointRelation. There are also some secondary scopes which belong to a primary scope.\nSee Scope Definitions, where you can find all existing Scopes and Fields.\nFilter Use filter to build conditions for the value of fields by using field name and expression.\nThe filter expressions run as a chain, generally connected with logic AND. The OPs support ==, !=, \u0026gt;, \u0026lt;, \u0026gt;=, \u0026lt;=, in [...] ,like %..., like ...% , like %...% , contain and not contain, with type detection based on field type. In the event of incompatibility, compile or code generation errors may be triggered.\nAggregation Function The default functions are provided by the SkyWalking OAP core, and it is possible to implement additional functions.\nFunctions provided\n longAvg. The avg of all input per scope entity. The input field must be a long.   instance_jvm_memory_max = from(ServiceInstanceJVMMemory.max).longAvg();\n In this case, the input represents the request of each ServiceInstanceJVMMemory scope, and avg is based on field max.\n doubleAvg. The avg of all input per scope entity. The input field must be a double.   instance_jvm_cpu = from(ServiceInstanceJVMCPU.usePercent).doubleAvg();\n In this case, the input represents the request of each ServiceInstanceJVMCPU scope, and avg is based on field usePercent.\n percent. The number or ratio is expressed as a fraction of 100, where the input matches with the condition.   endpoint_percent = from(Endpoint.*).percent(status == true);\n In this case, all input represents requests of each endpoint, and the condition is endpoint.status == true.\n rate. The rate expressed is as a fraction of 100, where the input matches with the condition.   browser_app_error_rate = from(BrowserAppTraffic.*).rate(trafficCategory == BrowserAppTrafficCategory.FIRST_ERROR, trafficCategory == BrowserAppTrafficCategory.NORMAL);\n In this case, all input represents requests of each browser app traffic, the numerator condition is trafficCategory == BrowserAppTrafficCategory.FIRST_ERROR and denominator condition is trafficCategory == BrowserAppTrafficCategory.NORMAL. Parameter (1) is the numerator condition. Parameter (2) is the denominator condition.\n count. The sum of calls per scope entity.   service_calls_sum = from(Service.*).count();\n In this case, the number of calls of each service.\n histogram. See Heatmap in WIKI.   service_heatmap = from(Service.latency).histogram(100, 20);\n In this case, the thermodynamic heatmap of all incoming requests. Parameter (1) is the precision of latency calculation, such as in the above case, where 113ms and 193ms are considered the same in the 101-200ms group. Parameter (2) is the group amount. In the above case, 21(param value + 1) groups are 0-100ms, 101-200ms, \u0026hellip; 1901-2000ms, 2000+ms\n apdex. See Apdex in WIKI.   service_apdex = from(Service.latency).apdex(name, status);\n In this case, the apdex score of each service. Parameter (1) is the service name, which reflects the Apdex threshold value loaded from service-apdex-threshold.yml in the config folder. Parameter (2) is the status of this request. The status(success/failure) reflects the Apdex calculation.\n p99, p95, p90, p75, p50. See percentile in WIKI.   service_percentile = from(Service.latency).percentile2(10);\n percentile (deprecated since 10.0.0) is the first multiple-value metric, which has been introduced since 7.0.0. As a metric with multiple values, it could be queried through the getMultipleLinearIntValues GraphQL query. percentile2 Since 10.0.0, the percentile function has been instead by percentile2. The percentile2 function is a labeled-value metric with default label name p and label values 50,75,90,95,99. In this case, see p99, p95, p90, p75, and p50 of all incoming requests. The parameter is precise to a latency at p99, such as in the above case, and 120ms and 124ms are considered to produce the same response time.\nIn this case, the p99 value of all incoming requests. The parameter is precise to a latency at p99, such as in the above case, and 120ms and 124ms are considered to produce the same response time.\nMetrics name The metrics name for storage implementor, alarm and query modules. The type inference is supported by core.\nGroup All metrics data will be grouped by Scope.ID and min-level TimeBucket.\n In the Endpoint scope, the Scope.ID is same as the Endpoint ID (i.e. the unique ID based on service and its endpoint).  Cast Fields of source are static type. In some cases, the type required by the filter expression and aggregation function doesn\u0026rsquo;t match the type in the source, such as tag value in the source is String type, most aggregation calculation requires numeric.\nCast expression is provided to do so.\n (str-\u0026gt;long) or (long), cast string type into long. (str-\u0026gt;int) or (int), cast string type into int.  mq_consume_latency = from((str-\u0026gt;long)Service.tag[\u0026quot;transmission.latency\u0026quot;]).longAvg(); // the value of tag is string type. Cast statement is supported in\n From statement. from((cast)source.attre). Filter expression. .filter((cast)tag[\u0026quot;transmission.latency\u0026quot;] \u0026gt; 0) Aggregation function parameter. .longAvg((cast)strField1== 1, (cast)strField2)  Disable Disable is an advanced statement in OAL, which is only used in certain cases. Some of the aggregation and metrics are defined through core hard codes. Examples include segment and top_n_database_statement. This disable statement is designed to render them inactive. By default, none of them are disabled.\nNOTICE, all disable statements should be in oal/disable.oal script file.\nExamples // Calculate p99 of both Endpoint1 and Endpoint2 endpoint_p99 = from(Endpoint.latency).filter(name in (\u0026quot;Endpoint1\u0026quot;, \u0026quot;Endpoint2\u0026quot;)).summary(0.99) // Calculate p99 of Endpoint name started with `serv` serv_Endpoint_p99 = from(Endpoint.latency).filter(name like \u0026quot;serv%\u0026quot;).summary(0.99) // Calculate the avg response time of each Endpoint endpoint_resp_time = from(Endpoint.latency).avg() // Calculate the p50, p75, p90, p95 and p99 of each Endpoint by 50 ms steps. endpoint_percentile = from(Endpoint.latency).percentile2(10) // Calculate the percent of response status is true, for each service. endpoint_success = from(Endpoint.*).filter(status == true).percent() // Calculate the sum of response code in [404, 500, 503], for each service. endpoint_abnormal = from(Endpoint.*).filter(httpResponseStatusCode in [404, 500, 503]).count() // Calculate the sum of request type in [RequestType.RPC, RequestType.gRPC], for each service. endpoint_rpc_calls_sum = from(Endpoint.*).filter(type in [RequestType.RPC, RequestType.gRPC]).count() // Calculate the sum of endpoint name in [\u0026quot;/v1\u0026quot;, \u0026quot;/v2\u0026quot;], for each service. endpoint_url_sum = from(Endpoint.*).filter(name in [\u0026quot;/v1\u0026quot;, \u0026quot;/v2\u0026quot;]).count() // Calculate the sum of calls for each service. endpoint_calls = from(Endpoint.*).count() // Calculate the CPM with the GET method for each service.The value is made up with `tagKey:tagValue`. // Option 1, use `tags contain`. service_cpm_http_get = from(Service.*).filter(tags contain \u0026quot;http.method:GET\u0026quot;).cpm() // Option 2, use `tag[key]`. service_cpm_http_get = from(Service.*).filter(tag[\u0026quot;http.method\u0026quot;] == \u0026quot;GET\u0026quot;).cpm(); // Calculate the CPM with the HTTP method except for the GET method for each service.The value is made up with `tagKey:tagValue`. service_cpm_http_other = from(Service.*).filter(tags not contain \u0026quot;http.method:GET\u0026quot;).cpm() disable(segment); disable(endpoint_relation_server_side); disable(top_n_database_statement); ","title":"Analysis Native Streaming Traces and Service Mesh Traffic","url":"/docs/main/next/en/concepts-and-designs/oal/"},{"content":"Analysis Native Streaming Traces and Service Mesh Traffic The traces in SkyWalking native format and Service Mesh Traffic(Access Log in gRPC) are able to be analyzed by OAL, to build metrics of services, service instances and endpoints, and to build topology/dependency of services, service instances and endpoints(traces-oriented analysis only).\nThe spans of traces relative with RPC, such as HTTP, gRPC, Dubbo, RocketMQ, Kafka, would be converted to service input/output traffic, like access logs collected from service mesh. Both of those traffic would be cataloged as the defined sources in the Observability Analysis Language engine.\nThe metrics are customizable through Observability Analysis Language(OAL) scripts, and the topology/dependency is built by the SkyWalking OAP kernel automatically without explicit OAL scripts.\nObservability Analysis Language OAL(Observability Analysis Language) serves to analyze incoming data in streaming mode.\nOAL focuses on metrics in Service, Service Instance and Endpoint. Therefore, the language is easy to learn and use.\nOAL scripts are now found in the /config folder, and users could simply change and reboot the server to run them. However, the OAL script is a compiled language, and the OAL Runtime generates java codes dynamically. Don\u0026rsquo;t expect to mount the changes of those scripts in the runtime. If your OAP servers are running in a cluster mode, these script defined metrics should be aligned.\nYou can open set SW_OAL_ENGINE_DEBUG=Y at system env to see which classes are generated.\nGrammar Scripts should be named *.oal\n// Declare the metrics. METRICS_NAME = from(CAST SCOPE.(* | [FIELD][,FIELD ...])) [.filter(CAST FIELD OP [INT | STRING])] .FUNCTION([PARAM][, PARAM ...]) // Disable hard code disable(METRICS_NAME); From The from statement defines the data source of this OAL expression.\nPrimary SCOPEs are Service, ServiceInstance, Endpoint, ServiceRelation, ServiceInstanceRelation, and EndpointRelation. There are also some secondary scopes which belong to a primary scope.\nSee Scope Definitions, where you can find all existing Scopes and Fields.\nFilter Use filter to build conditions for the value of fields by using field name and expression.\nThe filter expressions run as a chain, generally connected with logic AND. The OPs support ==, !=, \u0026gt;, \u0026lt;, \u0026gt;=, \u0026lt;=, in [...] ,like %..., like ...% , like %...% , contain and not contain, with type detection based on field type. In the event of incompatibility, compile or code generation errors may be triggered.\nAggregation Function The default functions are provided by the SkyWalking OAP core, and it is possible to implement additional functions.\nFunctions provided\n longAvg. The avg of all input per scope entity. The input field must be a long.   instance_jvm_memory_max = from(ServiceInstanceJVMMemory.max).longAvg();\n In this case, the input represents the request of each ServiceInstanceJVMMemory scope, and avg is based on field max.\n doubleAvg. The avg of all input per scope entity. The input field must be a double.   instance_jvm_cpu = from(ServiceInstanceJVMCPU.usePercent).doubleAvg();\n In this case, the input represents the request of each ServiceInstanceJVMCPU scope, and avg is based on field usePercent.\n percent. The number or ratio is expressed as a fraction of 100, where the input matches with the condition.   endpoint_percent = from(Endpoint.*).percent(status == true);\n In this case, all input represents requests of each endpoint, and the condition is endpoint.status == true.\n rate. The rate expressed is as a fraction of 100, where the input matches with the condition.   browser_app_error_rate = from(BrowserAppTraffic.*).rate(trafficCategory == BrowserAppTrafficCategory.FIRST_ERROR, trafficCategory == BrowserAppTrafficCategory.NORMAL);\n In this case, all input represents requests of each browser app traffic, the numerator condition is trafficCategory == BrowserAppTrafficCategory.FIRST_ERROR and denominator condition is trafficCategory == BrowserAppTrafficCategory.NORMAL. Parameter (1) is the numerator condition. Parameter (2) is the denominator condition.\n count. The sum of calls per scope entity.   service_calls_sum = from(Service.*).count();\n In this case, the number of calls of each service.\n histogram. See Heatmap in WIKI.   service_heatmap = from(Service.latency).histogram(100, 20);\n In this case, the thermodynamic heatmap of all incoming requests. Parameter (1) is the precision of latency calculation, such as in the above case, where 113ms and 193ms are considered the same in the 101-200ms group. Parameter (2) is the group amount. In the above case, 21(param value + 1) groups are 0-100ms, 101-200ms, \u0026hellip; 1901-2000ms, 2000+ms\n apdex. See Apdex in WIKI.   service_apdex = from(Service.latency).apdex(name, status);\n In this case, the apdex score of each service. Parameter (1) is the service name, which reflects the Apdex threshold value loaded from service-apdex-threshold.yml in the config folder. Parameter (2) is the status of this request. The status(success/failure) reflects the Apdex calculation.\n p99, p95, p90, p75, p50. See percentile in WIKI.   service_percentile = from(Service.latency).percentile(10);\n percentile is the first multiple-value metric, which has been introduced since 7.0.0. As a metric with multiple values, it could be queried through the getMultipleLinearIntValues GraphQL query. In this case, see p99, p95, p90, p75, and p50 of all incoming requests. The parameter is precise to a latency at p99, such as in the above case, and 120ms and 124ms are considered to produce the same response time.\nIn this case, the p99 value of all incoming requests. The parameter is precise to a latency at p99, such as in the above case, and 120ms and 124ms are considered to produce the same response time.\nMetrics name The metrics name for storage implementor, alarm and query modules. The type inference is supported by core.\nGroup All metrics data will be grouped by Scope.ID and min-level TimeBucket.\n In the Endpoint scope, the Scope.ID is same as the Endpoint ID (i.e. the unique ID based on service and its endpoint).  Cast Fields of source are static type. In some cases, the type required by the filter expression and aggregation function doesn\u0026rsquo;t match the type in the source, such as tag value in the source is String type, most aggregation calculation requires numeric.\nCast expression is provided to do so.\n (str-\u0026gt;long) or (long), cast string type into long. (str-\u0026gt;int) or (int), cast string type into int.  mq_consume_latency = from((str-\u0026gt;long)Service.tag[\u0026quot;transmission.latency\u0026quot;]).longAvg(); // the value of tag is string type. Cast statement is supported in\n From statement. from((cast)source.attre). Filter expression. .filter((cast)tag[\u0026quot;transmission.latency\u0026quot;] \u0026gt; 0) Aggregation function parameter. .longAvg((cast)strField1== 1, (cast)strField2)  Disable Disable is an advanced statement in OAL, which is only used in certain cases. Some of the aggregation and metrics are defined through core hard codes. Examples include segment and top_n_database_statement. This disable statement is designed to render them inactive. By default, none of them are disabled.\nNOTICE, all disable statements should be in oal/disable.oal script file.\nExamples // Calculate p99 of both Endpoint1 and Endpoint2 endpoint_p99 = from(Endpoint.latency).filter(name in (\u0026quot;Endpoint1\u0026quot;, \u0026quot;Endpoint2\u0026quot;)).summary(0.99) // Calculate p99 of Endpoint name started with `serv` serv_Endpoint_p99 = from(Endpoint.latency).filter(name like \u0026quot;serv%\u0026quot;).summary(0.99) // Calculate the avg response time of each Endpoint endpoint_resp_time = from(Endpoint.latency).avg() // Calculate the p50, p75, p90, p95 and p99 of each Endpoint by 50 ms steps. endpoint_percentile = from(Endpoint.latency).percentile(10) // Calculate the percent of response status is true, for each service. endpoint_success = from(Endpoint.*).filter(status == true).percent() // Calculate the sum of response code in [404, 500, 503], for each service. endpoint_abnormal = from(Endpoint.*).filter(httpResponseStatusCode in [404, 500, 503]).count() // Calculate the sum of request type in [RequestType.RPC, RequestType.gRPC], for each service. endpoint_rpc_calls_sum = from(Endpoint.*).filter(type in [RequestType.RPC, RequestType.gRPC]).count() // Calculate the sum of endpoint name in [\u0026quot;/v1\u0026quot;, \u0026quot;/v2\u0026quot;], for each service. endpoint_url_sum = from(Endpoint.*).filter(name in [\u0026quot;/v1\u0026quot;, \u0026quot;/v2\u0026quot;]).count() // Calculate the sum of calls for each service. endpoint_calls = from(Endpoint.*).count() // Calculate the CPM with the GET method for each service.The value is made up with `tagKey:tagValue`. // Option 1, use `tags contain`. service_cpm_http_get = from(Service.*).filter(tags contain \u0026quot;http.method:GET\u0026quot;).cpm() // Option 2, use `tag[key]`. service_cpm_http_get = from(Service.*).filter(tag[\u0026quot;http.method\u0026quot;] == \u0026quot;GET\u0026quot;).cpm(); // Calculate the CPM with the HTTP method except for the GET method for each service.The value is made up with `tagKey:tagValue`. service_cpm_http_other = from(Service.*).filter(tags not contain \u0026quot;http.method:GET\u0026quot;).cpm() disable(segment); disable(endpoint_relation_server_side); disable(top_n_database_statement); ","title":"Analysis Native Streaming Traces and Service Mesh Traffic","url":"/docs/main/v9.7.0/en/concepts-and-designs/oal/"},{"content":"Apache SkyWalking Agent Containerized Scenarios Docker images are not official ASF releases but provided for convenience. Recommended usage is always to build the source\nThis image only hosts the pre-built SkyWalking Java agent jars, and provides some convenient configurations for containerized scenarios.\nHow to use this image Docker FROMapache/skywalking-java-agent:8.5.0-jdk8# ... build your java applicationYou can start your Java application with CMD or ENTRYPOINT, but you don\u0026rsquo;t need to care about the Java options to enable SkyWalking agent, it should be adopted automatically.\nKubernetes Currently, SkyWalking provides two ways to install the java agent on your services on Kubernetes.\n  To use the java agent more natively, you can try the java agent injector to inject the java agent image as a sidecar.\n  If you think it\u0026rsquo;s hard to install the injector, you can also use this java agent image as a sidecar as below.\n  apiVersion:v1kind:Podmetadata:name:agent-as-sidecarspec:restartPolicy:Nevervolumes:- name:skywalking-agentemptyDir:{}initContainers:- name:agent-containerimage:apache/skywalking-java-agent:8.7.0-alpinevolumeMounts:- name:skywalking-agentmountPath:/agentcommand:[\u0026#34;/bin/sh\u0026#34;]args:[\u0026#34;-c\u0026#34;,\u0026#34;cp -R /skywalking/agent /agent/\u0026#34;]containers:- name:app-containerimage:springio/gs-spring-boot-dockervolumeMounts:- name:skywalking-agentmountPath:/skywalkingenv:- name:JAVA_TOOL_OPTIONSvalue:\u0026#34;-javaagent:/skywalking/agent/skywalking-agent.jar\u0026#34;","title":"Apache SkyWalking Agent Containerized Scenarios","url":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/containerization/"},{"content":"Apache SkyWalking Agent Containerized Scenarios Docker images are not official ASF releases but provided for convenience. Recommended usage is always to build the source\nThis image only hosts the pre-built SkyWalking Java agent jars, and provides some convenient configurations for containerized scenarios.\nHow to use this image Docker FROMapache/skywalking-java-agent:8.5.0-jdk8# ... build your java applicationYou can start your Java application with CMD or ENTRYPOINT, but you don\u0026rsquo;t need to care about the Java options to enable SkyWalking agent, it should be adopted automatically.\nKubernetes Currently, SkyWalking provides two ways to install the java agent on your services on Kubernetes.\n  To use the java agent more natively, you can try the java agent injector to inject the java agent image as a sidecar.\n  If you think it\u0026rsquo;s hard to install the injector, you can also use this java agent image as a sidecar as below.\n  apiVersion:v1kind:Podmetadata:name:agent-as-sidecarspec:restartPolicy:Nevervolumes:- name:skywalking-agentemptyDir:{}initContainers:- name:agent-containerimage:apache/skywalking-java-agent:8.7.0-alpinevolumeMounts:- name:skywalking-agentmountPath:/agentcommand:[\u0026#34;/bin/sh\u0026#34;]args:[\u0026#34;-c\u0026#34;,\u0026#34;cp -R /skywalking/agent /agent/\u0026#34;]containers:- name:app-containerimage:springio/gs-spring-boot-dockervolumeMounts:- name:skywalking-agentmountPath:/skywalkingenv:- name:JAVA_TOOL_OPTIONSvalue:\u0026#34;-javaagent:/skywalking/agent/skywalking-agent.jar\u0026#34;","title":"Apache SkyWalking Agent Containerized Scenarios","url":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/containerization/"},{"content":"Apache SkyWalking Agent Containerized Scenarios Docker images are not official ASF releases but provided for convenience. Recommended usage is always to build the source\nThis image only hosts the pre-built SkyWalking Java agent jars, and provides some convenient configurations for containerized scenarios.\nHow to use this image Docker FROMapache/skywalking-java-agent:8.5.0-jdk8# ... build your java applicationYou can start your Java application with CMD or ENTRYPOINT, but you don\u0026rsquo;t need to care about the Java options to enable SkyWalking agent, it should be adopted automatically.\nKubernetes Currently, SkyWalking provides two ways to install the java agent on your services on Kubernetes.\n  To use the java agent more natively, you can try the java agent injector to inject the java agent image as a sidecar.\n  If you think it\u0026rsquo;s hard to install the injector, you can also use this java agent image as a sidecar as below.\n  apiVersion:v1kind:Podmetadata:name:agent-as-sidecarspec:restartPolicy:Nevervolumes:- name:skywalking-agentemptyDir:{}initContainers:- name:agent-containerimage:apache/skywalking-java-agent:8.7.0-alpinevolumeMounts:- name:skywalking-agentmountPath:/agentcommand:[\u0026#34;/bin/sh\u0026#34;]args:[\u0026#34;-c\u0026#34;,\u0026#34;cp -R /skywalking/agent /agent/\u0026#34;]containers:- name:app-containerimage:springio/gs-spring-boot-dockervolumeMounts:- name:skywalking-agentmountPath:/skywalkingenv:- name:JAVA_TOOL_OPTIONSvalue:\u0026#34;-javaagent:/skywalking/agent/skywalking-agent.jar\u0026#34;","title":"Apache SkyWalking Agent Containerized Scenarios","url":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/containerization/"},{"content":"Apache SkyWalking Agent Containerized Scenarios Docker images are not official ASF releases but provided for convenience. Recommended usage is always to build the source\nThis image only hosts the pre-built SkyWalking Java agent jars, and provides some convenient configurations for containerized scenarios.\nHow to use this image Docker FROMapache/skywalking-java-agent:8.5.0-jdk8# ... build your java applicationYou can start your Java application with CMD or ENTRYPOINT, but you don\u0026rsquo;t need to care about the Java options to enable SkyWalking agent, it should be adopted automatically.\nKubernetes Currently, SkyWalking provides two ways to install the java agent on your services on Kubernetes.\n  To use the java agent more natively, you can try the java agent injector to inject the java agent image as a sidecar.\n  If you think it\u0026rsquo;s hard to install the injector, you can also use this java agent image as a sidecar as below.\n  apiVersion:v1kind:Podmetadata:name:agent-as-sidecarspec:restartPolicy:Nevervolumes:- name:skywalking-agentemptyDir:{}initContainers:- name:agent-containerimage:apache/skywalking-java-agent:8.7.0-alpinevolumeMounts:- name:skywalking-agentmountPath:/agentcommand:[\u0026#34;/bin/sh\u0026#34;]args:[\u0026#34;-c\u0026#34;,\u0026#34;cp -R /skywalking/agent /agent/\u0026#34;]containers:- name:app-containerimage:springio/gs-spring-boot-dockervolumeMounts:- name:skywalking-agentmountPath:/skywalkingenv:- name:JAVA_TOOL_OPTIONSvalue:\u0026#34;-javaagent:/skywalking/agent/skywalking-agent.jar\u0026#34;","title":"Apache SkyWalking Agent Containerized Scenarios","url":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/containerization/"},{"content":"Apache SkyWalking Agent Containerized Scenarios Docker images are not official ASF releases but provided for convenience. Recommended usage is always to build the source\nThis image only hosts the pre-built SkyWalking Java agent jars, and provides some convenient configurations for containerized scenarios.\nHow to use this image Docker FROMapache/skywalking-java-agent:8.5.0-jdk8# ... build your java applicationYou can start your Java application with CMD or ENTRYPOINT, but you don\u0026rsquo;t need to care about the Java options to enable SkyWalking agent, it should be adopted automatically.\nKubernetes Currently, SkyWalking provides two ways to install the java agent on your services on Kubernetes.\n  To use the java agent more natively, you can try the java agent injector to inject the java agent image as a sidecar.\n  If you think it\u0026rsquo;s hard to install the injector, you can also use this java agent image as a sidecar as below.\n  apiVersion:v1kind:Podmetadata:name:agent-as-sidecarspec:restartPolicy:Nevervolumes:- name:skywalking-agentemptyDir:{}initContainers:- name:agent-containerimage:apache/skywalking-java-agent:8.7.0-alpinevolumeMounts:- name:skywalking-agentmountPath:/agentcommand:[\u0026#34;/bin/sh\u0026#34;]args:[\u0026#34;-c\u0026#34;,\u0026#34;cp -R /skywalking/agent /agent/\u0026#34;]containers:- name:app-containerimage:springio/gs-spring-boot-dockervolumeMounts:- name:skywalking-agentmountPath:/skywalkingenv:- name:JAVA_TOOL_OPTIONSvalue:\u0026#34;-javaagent:/skywalking/agent/skywalking-agent.jar\u0026#34;","title":"Apache SkyWalking Agent Containerized Scenarios","url":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/containerization/"},{"content":"Apache SkyWalking BanyanDB release guide This documentation guides the release manager to release the SkyWalking BanyanDB in the Apache Way, and also helps people to check the release for vote.\nPrerequisites  Close(if finished, or move to next milestone otherwise) all issues in the current milestone from skywalking-banyandb and skywalking, create a new milestone if needed. Update CHANGES.md.  Add your GPG public key to Apache svn   Log in id.apache.org and submit your key fingerprint.\n  Add your GPG public key into SkyWalking GPG KEYS file, you can do this only if you are a PMC member. You can ask a PMC member for help. DO NOT override the existed KEYS file content, only append your key at the end of the file.\n  Build and sign the source code package export VERSION=\u0026lt;the version to release\u0026gt; git clone git@github.com:apache/skywalking-banyandb \u0026amp;\u0026amp; cd skywalking-banyandb git tag -a \u0026#34;v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking BanyanDB $VERSION\u0026#34; git push --tags make clean \u0026amp;\u0026amp; make release-assembly The skywalking-banyandb-${VERSION}-bin.tgz, skywalking-banyandb-${VERSION}-src.tgz, and their corresponding asc, sha512. In total, six files should be automatically generated in the directory.\nUpload to Apache svn svn co https://dist.apache.org/repos/dist/dev/skywalking/ mkdir -p skywalking/banyandb/\u0026#34;$VERSION\u0026#34; cp skywalking-banyandb/build/skywalking-banyandb*.tgz skywalking/banyandb/\u0026#34;$VERSION\u0026#34; cp skywalking-banyandb/build/skywalking-banyandb*.tgz.asc skywalking/banyandb/\u0026#34;$VERSION\u0026#34; cp skywalking-banyandb/build/skywalking-banyandb*.tgz.sha512 skywalking/banyandb/\u0026#34;$VERSION\u0026#34; cd skywalking/banyandb \u0026amp;\u0026amp; svn add \u0026#34;$VERSION\u0026#34; \u0026amp;\u0026amp; svn commit -m \u0026#34;Draft Apache SkyWalking BanyanDB release $VERSION\u0026#34; Call for vote in dev@ mailing list Call for vote in dev@skywalking.apache.org\nSubject: [VOTE] Release Apache SkyWalking BanyanDB version $VERSION Content: Hi the SkyWalking Community: This is a call for vote to release Apache SkyWalking BanyanDB version $VERSION. Release notes: * https://github.com/apache/skywalking-banyandb/blob/v$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/banyandb/$VERSION * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-banyandb-src-x.x.x.tgz - sha512xxxxyyyzzz apache-skywalking-banyandb-bin-x.x.x.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-banyandb/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-banyandb/blob/v$VERSION/docs/installation.md Voting will start now and will remain open for at least 72 hours, all PMC members are required to give their votes. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Thanks. [1] https://github.com/apache/skywalking/blob/master/docs/en/guides/How-to-release.md#vote-check Vote Check All PMC members and committers should check these before voting +1:\n Features test. All artifacts in staging repository are published with .asc, .md5, and sha files. Source codes and distribution packages (apache-skywalking-banyandb-{src,bin}-$VERSION.tgz) are in https://dist.apache.org/repos/dist/dev/skywalking/banyandb/$VERSION with .asc, .sha512. LICENSE and NOTICE are in source codes and distribution package. Check shasum -c apache-skywalking-banyandb-{src,bin}-$VERSION.tgz.sha512. Check GPG signature. Download KEYS and import them by curl https://www.apache.org/dist/skywalking/KEYS -o KEYS \u0026amp;\u0026amp; gpg --import KEYS. Check gpg --batch --verify apache-skywalking-banyandb-{src,bin}-$VERSION.tgz.asc apache-skywalking-banyandb-{src,bin}-$VERSION.tgz Build distribution from source code package by following this the build guide. Licenses header check.  Vote result should follow these:\n  PMC vote is +1 binding, all others is +1 no binding.\n  Within 72 hours, you get at least 3 (+1 binding), and have more +1 than -1. Vote pass.\n  Send the closing vote mail to announce the result. When count the binding and no binding votes, please list the names of voters. An example like this:\n[RESULT][VOTE] Release Apache SkyWalking BanyanDB version $VERSION 3 days passed, we’ve got ($NUMBER) +1 bindings: xxx xxx xxx ... (list names) I’ll continue the release process.   Publish release   Move source codes tar balls and distributions to https://dist.apache.org/repos/dist/release/skywalking/, you can do this only if you are a PMC member.\nexport SVN_EDITOR=vim svn mv https://dist.apache.org/repos/dist/dev/skywalking/banyandb/$VERSION https://dist.apache.org/repos/dist/release/skywalking/banyandb # .... # enter your apache password # ....   Remove last released tar balls from https://dist.apache.org/repos/dist/release/skywalking\n  Refer to the previous PR, update news and links on the website. There are seven files need to modify.\n  Update Github release page, follow the previous convention.\n  Send ANNOUNCE email to dev@skywalking.apache.org and announce@apache.org, the sender should use his/her Apache email account. You can get the permlink of vote thread at here.\nSubject: [ANNOUNCEMENT] Apache SkyWalking BanyanDB $VERSION Released Content: Hi the SkyWalking Community On behalf of the SkyWalking Team, I’m glad to announce that SkyWalking BanyanDB $VERSION is now released. SkyWalking BanyanDB: An observability database, aims to ingest, analyze and store Metrics, Tracing and Logging data. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. Vote Thread: $VOTE_THREAD_PERMALINK Download Links: https://skywalking.apache.org/downloads/ Release Notes : https://github.com/apache/skywalking-banyandb/blob/v$VERSION/CHANGES.md Website: https://skywalking.apache.org/ SkyWalking BanyanDB Resources: - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Documents: https://github.com/apache/skywalking-banyandb/blob/v$VERSION/README.md The Apache SkyWalking Team   ","title":"Apache SkyWalking BanyanDB release guide","url":"/docs/skywalking-banyandb/latest/release/"},{"content":"Apache SkyWalking BanyanDB release guide This documentation guides the release manager to release the SkyWalking BanyanDB in the Apache Way, and also helps people to check the release for vote.\nPrerequisites  Close(if finished, or move to next milestone otherwise) all issues in the current milestone from skywalking-banyandb and skywalking, create a new milestone if needed. Update CHANGES.md.  Add your GPG public key to Apache svn   Log in id.apache.org and submit your key fingerprint.\n  Add your GPG public key into SkyWalking GPG KEYS file, you can do this only if you are a PMC member. You can ask a PMC member for help. DO NOT override the existed KEYS file content, only append your key at the end of the file.\n  Build and sign the source code package export VERSION=\u0026lt;the version to release\u0026gt; git clone git@github.com:apache/skywalking-banyandb \u0026amp;\u0026amp; cd skywalking-banyandb git tag -a \u0026#34;v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking BanyanDB $VERSION\u0026#34; git push --tags make clean \u0026amp;\u0026amp; make release-assembly The skywalking-banyandb-${VERSION}-bin.tgz, skywalking-banyandb-${VERSION}-src.tgz, and their corresponding asc, sha512. In total, six files should be automatically generated in the directory.\nUpload to Apache svn svn co https://dist.apache.org/repos/dist/dev/skywalking/ mkdir -p skywalking/banyandb/\u0026#34;$VERSION\u0026#34; cp skywalking-banyandb/build/skywalking-banyandb*.tgz skywalking/banyandb/\u0026#34;$VERSION\u0026#34; cp skywalking-banyandb/build/skywalking-banyandb*.tgz.asc skywalking/banyandb/\u0026#34;$VERSION\u0026#34; cp skywalking-banyandb/build/skywalking-banyandb*.tgz.sha512 skywalking/banyandb/\u0026#34;$VERSION\u0026#34; cd skywalking/banyandb \u0026amp;\u0026amp; svn add \u0026#34;$VERSION\u0026#34; \u0026amp;\u0026amp; svn commit -m \u0026#34;Draft Apache SkyWalking BanyanDB release $VERSION\u0026#34; Call for vote in dev@ mailing list Call for vote in dev@skywalking.apache.org\nSubject: [VOTE] Release Apache SkyWalking BanyanDB version $VERSION Content: Hi the SkyWalking Community: This is a call for vote to release Apache SkyWalking BanyanDB version $VERSION. Release notes: * https://github.com/apache/skywalking-banyandb/blob/v$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/banyandb/$VERSION * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-banyandb-src-x.x.x.tgz - sha512xxxxyyyzzz apache-skywalking-banyandb-bin-x.x.x.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-banyandb/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-banyandb/blob/v$VERSION/docs/installation/binaries.md#Build-From-Source Voting will start now and will remain open for at least 72 hours, all PMC members are required to give their votes. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Thanks. [1] https://github.com/apache/skywalking/blob/master/docs/en/guides/How-to-release.md#vote-check Vote Check All PMC members and committers should check these before voting +1:\n Features test. All artifacts in staging repository are published with .asc, .md5, and sha files. Source codes and distribution packages (apache-skywalking-banyandb-{src,bin}-$VERSION.tgz) are in https://dist.apache.org/repos/dist/dev/skywalking/banyandb/$VERSION with .asc, .sha512. LICENSE and NOTICE are in source codes and distribution package. Check shasum -c apache-skywalking-banyandb-{src,bin}-$VERSION.tgz.sha512. Check GPG signature. Download KEYS and import them by curl https://www.apache.org/dist/skywalking/KEYS -o KEYS \u0026amp;\u0026amp; gpg --import KEYS. Check gpg --batch --verify apache-skywalking-banyandb-{src,bin}-$VERSION.tgz.asc apache-skywalking-banyandb-{src,bin}-$VERSION.tgz Build distribution from source code package by following this the build guide. Licenses header check.  Vote result should follow these:\n  PMC vote is +1 binding, all others is +1 no binding.\n  Within 72 hours, you get at least 3 (+1 binding), and have more +1 than -1. Vote pass.\n  Send the closing vote mail to announce the result. When count the binding and no binding votes, please list the names of voters. An example like this:\n[RESULT][VOTE] Release Apache SkyWalking BanyanDB version $VERSION 3 days passed, we’ve got ($NUMBER) +1 bindings: xxx xxx xxx ... (list names) I’ll continue the release process.   Publish release   Move source codes tar balls and distributions to https://dist.apache.org/repos/dist/release/skywalking/, you can do this only if you are a PMC member.\nexport SVN_EDITOR=vim svn mv https://dist.apache.org/repos/dist/dev/skywalking/banyandb/$VERSION https://dist.apache.org/repos/dist/release/skywalking/banyandb # .... # enter your apache password # ....   Remove last released tar balls from https://dist.apache.org/repos/dist/release/skywalking\n  Refer to the previous PR, update news and links on the website. There are seven files need to modify.\n  Update Github release page, follow the previous convention.\n  Send ANNOUNCE email to dev@skywalking.apache.org and announce@apache.org, the sender should use his/her Apache email account. You can get the permlink of vote thread at here.\nSubject: [ANNOUNCEMENT] Apache SkyWalking BanyanDB $VERSION Released Content: Hi the SkyWalking Community On behalf of the SkyWalking Team, I’m glad to announce that SkyWalking BanyanDB $VERSION is now released. SkyWalking BanyanDB: An observability database, aims to ingest, analyze and store Metrics, Tracing and Logging data. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. Vote Thread: $VOTE_THREAD_PERMALINK Download Links: https://skywalking.apache.org/downloads/ Release Notes : https://github.com/apache/skywalking-banyandb/blob/v$VERSION/CHANGES.md Website: https://skywalking.apache.org/ SkyWalking BanyanDB Resources: - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Documents: https://github.com/apache/skywalking-banyandb/blob/v$VERSION/README.md The Apache SkyWalking Team   ","title":"Apache SkyWalking BanyanDB release guide","url":"/docs/skywalking-banyandb/next/release/"},{"content":"Apache SkyWalking BanyanDB release guide This documentation guides the release manager to release the SkyWalking BanyanDB in the Apache Way, and also helps people to check the release for vote.\nPrerequisites  Close(if finished, or move to next milestone otherwise) all issues in the current milestone from skywalking-banyandb and skywalking, create a new milestone if needed. Update CHANGES.md.  Add your GPG public key to Apache svn   Log in id.apache.org and submit your key fingerprint.\n  Add your GPG public key into SkyWalking GPG KEYS file, you can do this only if you are a PMC member. You can ask a PMC member for help. DO NOT override the existed KEYS file content, only append your key at the end of the file.\n  Build and sign the source code package export VERSION=\u0026lt;the version to release\u0026gt; git clone git@github.com:apache/skywalking-banyandb \u0026amp;\u0026amp; cd skywalking-banyandb git tag -a \u0026#34;v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking BanyanDB $VERSION\u0026#34; git push --tags make clean \u0026amp;\u0026amp; make release-assembly The skywalking-banyandb-${VERSION}-bin.tgz, skywalking-banyandb-${VERSION}-src.tgz, and their corresponding asc, sha512. In total, six files should be automatically generated in the directory.\nUpload to Apache svn svn co https://dist.apache.org/repos/dist/dev/skywalking/ mkdir -p skywalking/banyandb/\u0026#34;$VERSION\u0026#34; cp skywalking-banyandb/build/skywalking-banyandb*.tgz skywalking/banyandb/\u0026#34;$VERSION\u0026#34; cp skywalking-banyandb/build/skywalking-banyandb*.tgz.asc skywalking/banyandb/\u0026#34;$VERSION\u0026#34; cp skywalking-banyandb/build/skywalking-banyandb*.tgz.sha512 skywalking/banyandb/\u0026#34;$VERSION\u0026#34; cd skywalking/banyandb \u0026amp;\u0026amp; svn add \u0026#34;$VERSION\u0026#34; \u0026amp;\u0026amp; svn commit -m \u0026#34;Draft Apache SkyWalking BanyanDB release $VERSION\u0026#34; Call for vote in dev@ mailing list Call for vote in dev@skywalking.apache.org\nSubject: [VOTE] Release Apache SkyWalking BanyanDB version $VERSION Content: Hi the SkyWalking Community: This is a call for vote to release Apache SkyWalking BanyanDB version $VERSION. Release notes: * https://github.com/apache/skywalking-banyandb/blob/v$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/banyandb/$VERSION * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-banyandb-src-x.x.x.tgz - sha512xxxxyyyzzz apache-skywalking-banyandb-bin-x.x.x.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-banyandb/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-banyandb/blob/v$VERSION/docs/installation.md Voting will start now and will remain open for at least 72 hours, all PMC members are required to give their votes. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Thanks. [1] https://github.com/apache/skywalking/blob/master/docs/en/guides/How-to-release.md#vote-check Vote Check All PMC members and committers should check these before voting +1:\n Features test. All artifacts in staging repository are published with .asc, .md5, and sha files. Source codes and distribution packages (apache-skywalking-banyandb-{src,bin}-$VERSION.tgz) are in https://dist.apache.org/repos/dist/dev/skywalking/banyandb/$VERSION with .asc, .sha512. LICENSE and NOTICE are in source codes and distribution package. Check shasum -c apache-skywalking-banyandb-{src,bin}-$VERSION.tgz.sha512. Check GPG signature. Download KEYS and import them by curl https://www.apache.org/dist/skywalking/KEYS -o KEYS \u0026amp;\u0026amp; gpg --import KEYS. Check gpg --batch --verify apache-skywalking-banyandb-{src,bin}-$VERSION.tgz.asc apache-skywalking-banyandb-{src,bin}-$VERSION.tgz Build distribution from source code package by following this the build guide. Licenses header check.  Vote result should follow these:\n  PMC vote is +1 binding, all others is +1 no binding.\n  Within 72 hours, you get at least 3 (+1 binding), and have more +1 than -1. Vote pass.\n  Send the closing vote mail to announce the result. When count the binding and no binding votes, please list the names of voters. An example like this:\n[RESULT][VOTE] Release Apache SkyWalking BanyanDB version $VERSION 3 days passed, we’ve got ($NUMBER) +1 bindings: xxx xxx xxx ... (list names) I’ll continue the release process.   Publish release   Move source codes tar balls and distributions to https://dist.apache.org/repos/dist/release/skywalking/, you can do this only if you are a PMC member.\nexport SVN_EDITOR=vim svn mv https://dist.apache.org/repos/dist/dev/skywalking/banyandb/$VERSION https://dist.apache.org/repos/dist/release/skywalking/banyandb # .... # enter your apache password # ....   Remove last released tar balls from https://dist.apache.org/repos/dist/release/skywalking\n  Refer to the previous PR, update news and links on the website. There are seven files need to modify.\n  Update Github release page, follow the previous convention.\n  Send ANNOUNCE email to dev@skywalking.apache.org and announce@apache.org, the sender should use his/her Apache email account. You can get the permlink of vote thread at here.\nSubject: [ANNOUNCEMENT] Apache SkyWalking BanyanDB $VERSION Released Content: Hi the SkyWalking Community On behalf of the SkyWalking Team, I’m glad to announce that SkyWalking BanyanDB $VERSION is now released. SkyWalking BanyanDB: An observability database, aims to ingest, analyze and store Metrics, Tracing and Logging data. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. Vote Thread: $VOTE_THREAD_PERMALINK Download Links: https://skywalking.apache.org/downloads/ Release Notes : https://github.com/apache/skywalking-banyandb/blob/v$VERSION/CHANGES.md Website: https://skywalking.apache.org/ SkyWalking BanyanDB Resources: - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Documents: https://github.com/apache/skywalking-banyandb/blob/v$VERSION/README.md The Apache SkyWalking Team   ","title":"Apache SkyWalking BanyanDB release guide","url":"/docs/skywalking-banyandb/v0.5.0/release/"},{"content":"Apache SkyWalking Cloud on Kubernetes release guide This documentation guides the release manager to release the SkyWalking Cloud on Kubernetes in the Apache Way, and also helps people to check the release for vote.\nPrerequisites  Close(if finished, or move to next milestone otherwise) all issues in the current milestone from skywalking-swck and skywalking, create a new milestone if needed. Update CHANGES.md. Update image tags of adapter and operator.  Add your GPG public key to Apache svn   Log in id.apache.org and submit your key fingerprint.\n  Add your GPG public key into SkyWalking GPG KEYS file, you can do this only if you are a PMC member. You can ask a PMC member for help. DO NOT override the existed KEYS file content, only append your key at the end of the file.\n  Build and sign the source code package export VERSION=\u0026lt;the version to release\u0026gt; git clone git@github.com:apache/skywalking-swck \u0026amp;\u0026amp; cd skywalking-swck git tag -a \u0026#34;v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking Cloud on Kubernetes $VERSION\u0026#34; git push --tags make clean \u0026amp;\u0026amp; make release The skywalking-swck-${VERSION}-bin.tgz, skywalking-swck-${VERSION}-src.tgz, and their corresponding asc, sha512. In total, six files should be automatically generated in the directory.\nUpload to Apache svn svn co https://dist.apache.org/repos/dist/dev/skywalking/ mkdir -p skywalking/swck/\u0026#34;$VERSION\u0026#34; cp skywalking-swck/build/release/skywalking-swck*.tgz skywalking/swck/\u0026#34;$VERSION\u0026#34; cp skywalking-swck/build/release/skywalking-swck*.tgz.asc skywalking/swck/\u0026#34;$VERSION\u0026#34; cp skywalking-swck/build/release/skywalking-swck*.tgz.sha512 skywalking/swck/\u0026#34;$VERSION\u0026#34; cd skywalking/swck \u0026amp;\u0026amp; svn add \u0026#34;$VERSION\u0026#34; \u0026amp;\u0026amp; svn commit -m \u0026#34;Draft Apache SkyWalking-SWCK release $VERSION\u0026#34; Make the internal announcement Send an announcement email to dev@ mailing list.\nSubject: [ANNOUNCEMENT] SkyWalking Cloud on Kubernetes $VERSION test build available Content: The test build of SkyWalking Cloud on Kubernetes $VERSION is now available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking-swck/blob/$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/swck/$VERSION * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-swck-bin-x.x.x.tgz - sha512xxxxyyyzzz apache-skywalking-swck-src-x.x.x.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-swck/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-swck/blob/$VERSION/docs/operator.md#build-from-sources * https://github.com/apache/skywalking-swck/blob/$VERSION/docs/custom-metrics-adapter.md#use-kustomize-to-customise-your-deployment * https://github.com/apache/skywalking-swck/blob/$VERSION/docs/release.md A vote regarding the quality of this test build will be initiated within the next couple of days. Wait at least 48 hours for test responses Any PMC, committer or contributor can test features for releasing, and feedback. Based on that, PMC will decide whether to start a vote or not.\nCall for vote in dev@ mailing list Call for vote in dev@skywalking.apache.org\nSubject: [VOTE] Release Apache SkyWalking Cloud on Kubernetes version $VERSION Content: Hi the SkyWalking Community: This is a call for vote to release Apache SkyWalking Cloud on Kubernetes version $VERSION. Release notes: * https://github.com/apache/skywalking-swck/blob/$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/swck/$VERSION * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-swck-src-x.x.x.tgz - sha512xxxxyyyzzz apache-skywalking-swck-bin-x.x.x.tgz Release Tag : * (Git Tag) $VERSION Release Commit Hash : * https://github.com/apache/skywalking-swck/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-swck/blob/$VERSION/docs/release.md Voting will start now and will remain open for at least 72 hours, all PMC members are required to give their votes. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Thanks. [1] https://github.com/apache/skywalking/blob/master/docs/en/guides/How-to-release.md#vote-check Vote Check All PMC members and committers should check these before voting +1:\n Features test. All artifacts in staging repository are published with .asc, .md5, and sha files. Source codes and distribution packages (apache-skywalking-swck-{src,bin}-$VERSION.tgz) are in https://dist.apache.org/repos/dist/dev/skywalking/swck/$VERSION with .asc, .sha512. LICENSE and NOTICE are in source codes and distribution package. Check shasum -c apache-skywalking-swck-{src,bin}-$VERSION.tgz.sha512. Check GPG signature. Download KEYS and import them by curl https://www.apache.org/dist/skywalking/KEYS -o KEYS \u0026amp;\u0026amp; gpg --import KEYS. Check gpg --batch --verify apache-skywalking-swck-{src,bin}-$VERSION.tgz.asc apache-skywalking-swck-{src,bin}-$VERSION.tgz Build distribution from source code package by following this the build guide. Licenses header check.  Vote result should follow these:\n  PMC vote is +1 binding, all others is +1 no binding.\n  Within 72 hours, you get at least 3 (+1 binding), and have more +1 than -1. Vote pass.\n  Send the closing vote mail to announce the result. When count the binding and no binding votes, please list the names of voters. An example like this:\n[RESULT][VOTE] Release Apache SkyWalking Cloud on Kubernetes version $VERSION 3 days passed, we’ve got ($NUMBER) +1 bindings: xxx xxx xxx ... (list names) I’ll continue the release process.   Publish release   Move source codes tar balls and distributions to https://dist.apache.org/repos/dist/release/skywalking/, you can do this only if you are a PMC member.\nexport SVN_EDITOR=vim svn mv https://dist.apache.org/repos/dist/dev/skywalking/swck/$VERSION https://dist.apache.org/repos/dist/release/skywalking/swck # .... # enter your apache password # ....   Remove last released tar balls from https://dist.apache.org/repos/dist/release/skywalking\n  Refer to the previous PR, update news and links on the website. There are seven files need to modify.\n  Update Github release page, follow the previous convention.\n  Send ANNOUNCE email to dev@skywalking.apache.org and announce@apache.org, the sender should use his/her Apache email account. You can get the permlink of vote thread at here.\nSubject: [ANNOUNCEMENT] Apache SkyWalking Cloud on Kubernetes $VERSION Released Content: Hi the SkyWalking Community On behalf of the SkyWalking Team, I’m glad to announce that SkyWalking Cloud on Kubernetes $VERSION is now released. SkyWalking Cloud on Kubernetes: A bridge platform between Apache SkyWalking and Kubernetes. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. Vote Thread: $VOTE_THREAD_PERMALINK Download Links: https://skywalking.apache.org/downloads/ Release Notes : https://github.com/apache/skywalking-swck/blob/$VERSION/CHANGES.md Website: https://skywalking.apache.org/ SkyWalking Cloud on Kubernetes Resources: - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Documents: https://github.com/apache/skywalking-swck/blob/$VERSION/README.md The Apache SkyWalking Team   ","title":"Apache SkyWalking Cloud on Kubernetes release guide","url":"/docs/skywalking-swck/latest/release/"},{"content":"Apache SkyWalking Cloud on Kubernetes release guide This documentation guides the release manager to release the SkyWalking Cloud on Kubernetes in the Apache Way, and also helps people to check the release for vote.\nPrerequisites  Close(if finished, or move to next milestone otherwise) all issues in the current milestone from skywalking-swck and skywalking, create a new milestone if needed. Update CHANGES.md. Update image tags of adapter and operator.  Add your GPG public key to Apache svn   Log in id.apache.org and submit your key fingerprint.\n  Add your GPG public key into SkyWalking GPG KEYS file, you can do this only if you are a PMC member. You can ask a PMC member for help. DO NOT override the existed KEYS file content, only append your key at the end of the file.\n  Build and sign the source code package export VERSION=\u0026lt;the version to release\u0026gt; git clone git@github.com:apache/skywalking-swck \u0026amp;\u0026amp; cd skywalking-swck git tag -a \u0026#34;v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking Cloud on Kubernetes $VERSION\u0026#34; git push --tags make clean \u0026amp;\u0026amp; make release The skywalking-swck-${VERSION}-bin.tgz, skywalking-swck-${VERSION}-src.tgz, and their corresponding asc, sha512. In total, six files should be automatically generated in the directory.\nUpload to Apache svn svn co https://dist.apache.org/repos/dist/dev/skywalking/ mkdir -p skywalking/swck/\u0026#34;$VERSION\u0026#34; cp skywalking-swck/build/release/skywalking-swck*.tgz skywalking/swck/\u0026#34;$VERSION\u0026#34; cp skywalking-swck/build/release/skywalking-swck*.tgz.asc skywalking/swck/\u0026#34;$VERSION\u0026#34; cp skywalking-swck/build/release/skywalking-swck*.tgz.sha512 skywalking/swck/\u0026#34;$VERSION\u0026#34; cd skywalking/swck \u0026amp;\u0026amp; svn add \u0026#34;$VERSION\u0026#34; \u0026amp;\u0026amp; svn commit -m \u0026#34;Draft Apache SkyWalking-SWCK release $VERSION\u0026#34; Make the internal announcement Send an announcement email to dev@ mailing list.\nSubject: [ANNOUNCEMENT] SkyWalking Cloud on Kubernetes $VERSION test build available Content: The test build of SkyWalking Cloud on Kubernetes $VERSION is now available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking-swck/blob/$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/swck/$VERSION * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-swck-bin-x.x.x.tgz - sha512xxxxyyyzzz apache-skywalking-swck-src-x.x.x.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-swck/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-swck/blob/$VERSION/docs/operator.md#build-from-sources * https://github.com/apache/skywalking-swck/blob/$VERSION/docs/custom-metrics-adapter.md#use-kustomize-to-customise-your-deployment * https://github.com/apache/skywalking-swck/blob/$VERSION/docs/release.md A vote regarding the quality of this test build will be initiated within the next couple of days. Wait at least 48 hours for test responses Any PMC, committer or contributor can test features for releasing, and feedback. Based on that, PMC will decide whether to start a vote or not.\nCall for vote in dev@ mailing list Call for vote in dev@skywalking.apache.org\nSubject: [VOTE] Release Apache SkyWalking Cloud on Kubernetes version $VERSION Content: Hi the SkyWalking Community: This is a call for vote to release Apache SkyWalking Cloud on Kubernetes version $VERSION. Release notes: * https://github.com/apache/skywalking-swck/blob/$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/swck/$VERSION * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-swck-src-x.x.x.tgz - sha512xxxxyyyzzz apache-skywalking-swck-bin-x.x.x.tgz Release Tag : * (Git Tag) $VERSION Release Commit Hash : * https://github.com/apache/skywalking-swck/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-swck/blob/$VERSION/docs/release.md Voting will start now and will remain open for at least 72 hours, all PMC members are required to give their votes. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Thanks. [1] https://github.com/apache/skywalking/blob/master/docs/en/guides/How-to-release.md#vote-check Vote Check All PMC members and committers should check these before voting +1:\n Features test. All artifacts in staging repository are published with .asc, .md5, and sha files. Source codes and distribution packages (apache-skywalking-swck-{src,bin}-$VERSION.tgz) are in https://dist.apache.org/repos/dist/dev/skywalking/swck/$VERSION with .asc, .sha512. LICENSE and NOTICE are in source codes and distribution package. Check shasum -c apache-skywalking-swck-{src,bin}-$VERSION.tgz.sha512. Check GPG signature. Download KEYS and import them by curl https://www.apache.org/dist/skywalking/KEYS -o KEYS \u0026amp;\u0026amp; gpg --import KEYS. Check gpg --batch --verify apache-skywalking-swck-{src,bin}-$VERSION.tgz.asc apache-skywalking-swck-{src,bin}-$VERSION.tgz Build distribution from source code package by following this the build guide. Licenses header check.  Vote result should follow these:\n  PMC vote is +1 binding, all others is +1 no binding.\n  Within 72 hours, you get at least 3 (+1 binding), and have more +1 than -1. Vote pass.\n  Send the closing vote mail to announce the result. When count the binding and no binding votes, please list the names of voters. An example like this:\n[RESULT][VOTE] Release Apache SkyWalking Cloud on Kubernetes version $VERSION 3 days passed, we’ve got ($NUMBER) +1 bindings: xxx xxx xxx ... (list names) I’ll continue the release process.   Publish release   Move source codes tar balls and distributions to https://dist.apache.org/repos/dist/release/skywalking/, you can do this only if you are a PMC member.\nexport SVN_EDITOR=vim svn mv https://dist.apache.org/repos/dist/dev/skywalking/swck/$VERSION https://dist.apache.org/repos/dist/release/skywalking/swck # .... # enter your apache password # ....   Remove last released tar balls from https://dist.apache.org/repos/dist/release/skywalking\n  Refer to the previous PR, update news and links on the website. There are seven files need to modify.\n  Update Github release page, follow the previous convention.\n  Send ANNOUNCE email to dev@skywalking.apache.org and announce@apache.org, the sender should use his/her Apache email account. You can get the permlink of vote thread at here.\nSubject: [ANNOUNCEMENT] Apache SkyWalking Cloud on Kubernetes $VERSION Released Content: Hi the SkyWalking Community On behalf of the SkyWalking Team, I’m glad to announce that SkyWalking Cloud on Kubernetes $VERSION is now released. SkyWalking Cloud on Kubernetes: A bridge platform between Apache SkyWalking and Kubernetes. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. Vote Thread: $VOTE_THREAD_PERMALINK Download Links: https://skywalking.apache.org/downloads/ Release Notes : https://github.com/apache/skywalking-swck/blob/$VERSION/CHANGES.md Website: https://skywalking.apache.org/ SkyWalking Cloud on Kubernetes Resources: - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Documents: https://github.com/apache/skywalking-swck/blob/$VERSION/README.md The Apache SkyWalking Team   ","title":"Apache SkyWalking Cloud on Kubernetes release guide","url":"/docs/skywalking-swck/next/release/"},{"content":"Apache SkyWalking Cloud on Kubernetes release guide This documentation guides the release manager to release the SkyWalking Cloud on Kubernetes in the Apache Way, and also helps people to check the release for vote.\nPrerequisites  Close(if finished, or move to next milestone otherwise) all issues in the current milestone from skywalking-swck and skywalking, create a new milestone if needed. Update CHANGES.md. Update image tags of adapter and operator.  Add your GPG public key to Apache svn   Log in id.apache.org and submit your key fingerprint.\n  Add your GPG public key into SkyWalking GPG KEYS file, you can do this only if you are a PMC member. You can ask a PMC member for help. DO NOT override the existed KEYS file content, only append your key at the end of the file.\n  Build and sign the source code package export VERSION=\u0026lt;the version to release\u0026gt; git clone git@github.com:apache/skywalking-swck \u0026amp;\u0026amp; cd skywalking-swck git tag -a \u0026#34;v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking Cloud on Kubernetes $VERSION\u0026#34; git push --tags make clean \u0026amp;\u0026amp; make release The skywalking-swck-${VERSION}-bin.tgz, skywalking-swck-${VERSION}-src.tgz, and their corresponding asc, sha512. In total, six files should be automatically generated in the directory.\nUpload to Apache svn svn co https://dist.apache.org/repos/dist/dev/skywalking/ mkdir -p skywalking/swck/\u0026#34;$VERSION\u0026#34; cp skywalking-swck/build/release/skywalking-swck*.tgz skywalking/swck/\u0026#34;$VERSION\u0026#34; cp skywalking-swck/build/release/skywalking-swck*.tgz.asc skywalking/swck/\u0026#34;$VERSION\u0026#34; cp skywalking-swck/build/release/skywalking-swck*.tgz.sha512 skywalking/swck/\u0026#34;$VERSION\u0026#34; cd skywalking/swck \u0026amp;\u0026amp; svn add \u0026#34;$VERSION\u0026#34; \u0026amp;\u0026amp; svn commit -m \u0026#34;Draft Apache SkyWalking-SWCK release $VERSION\u0026#34; Make the internal announcement Send an announcement email to dev@ mailing list.\nSubject: [ANNOUNCEMENT] SkyWalking Cloud on Kubernetes $VERSION test build available Content: The test build of SkyWalking Cloud on Kubernetes $VERSION is now available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking-swck/blob/$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/swck/$VERSION * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-swck-bin-x.x.x.tgz - sha512xxxxyyyzzz apache-skywalking-swck-src-x.x.x.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-swck/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-swck/blob/$VERSION/docs/operator.md#build-from-sources * https://github.com/apache/skywalking-swck/blob/$VERSION/docs/custom-metrics-adapter.md#use-kustomize-to-customise-your-deployment * https://github.com/apache/skywalking-swck/blob/$VERSION/docs/release.md A vote regarding the quality of this test build will be initiated within the next couple of days. Wait at least 48 hours for test responses Any PMC, committer or contributor can test features for releasing, and feedback. Based on that, PMC will decide whether to start a vote or not.\nCall for vote in dev@ mailing list Call for vote in dev@skywalking.apache.org\nSubject: [VOTE] Release Apache SkyWalking Cloud on Kubernetes version $VERSION Content: Hi the SkyWalking Community: This is a call for vote to release Apache SkyWalking Cloud on Kubernetes version $VERSION. Release notes: * https://github.com/apache/skywalking-swck/blob/$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/swck/$VERSION * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-swck-src-x.x.x.tgz - sha512xxxxyyyzzz apache-skywalking-swck-bin-x.x.x.tgz Release Tag : * (Git Tag) $VERSION Release Commit Hash : * https://github.com/apache/skywalking-swck/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-swck/blob/$VERSION/docs/release.md Voting will start now and will remain open for at least 72 hours, all PMC members are required to give their votes. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Thanks. [1] https://github.com/apache/skywalking/blob/master/docs/en/guides/How-to-release.md#vote-check Vote Check All PMC members and committers should check these before voting +1:\n Features test. All artifacts in staging repository are published with .asc, .md5, and sha files. Source codes and distribution packages (apache-skywalking-swck-{src,bin}-$VERSION.tgz) are in https://dist.apache.org/repos/dist/dev/skywalking/swck/$VERSION with .asc, .sha512. LICENSE and NOTICE are in source codes and distribution package. Check shasum -c apache-skywalking-swck-{src,bin}-$VERSION.tgz.sha512. Check GPG signature. Download KEYS and import them by curl https://www.apache.org/dist/skywalking/KEYS -o KEYS \u0026amp;\u0026amp; gpg --import KEYS. Check gpg --batch --verify apache-skywalking-swck-{src,bin}-$VERSION.tgz.asc apache-skywalking-swck-{src,bin}-$VERSION.tgz Build distribution from source code package by following this the build guide. Licenses header check.  Vote result should follow these:\n  PMC vote is +1 binding, all others is +1 no binding.\n  Within 72 hours, you get at least 3 (+1 binding), and have more +1 than -1. Vote pass.\n  Send the closing vote mail to announce the result. When count the binding and no binding votes, please list the names of voters. An example like this:\n[RESULT][VOTE] Release Apache SkyWalking Cloud on Kubernetes version $VERSION 3 days passed, we’ve got ($NUMBER) +1 bindings: xxx xxx xxx ... (list names) I’ll continue the release process.   Publish release   Move source codes tar balls and distributions to https://dist.apache.org/repos/dist/release/skywalking/, you can do this only if you are a PMC member.\nexport SVN_EDITOR=vim svn mv https://dist.apache.org/repos/dist/dev/skywalking/swck/$VERSION https://dist.apache.org/repos/dist/release/skywalking/swck # .... # enter your apache password # ....   Remove last released tar balls from https://dist.apache.org/repos/dist/release/skywalking\n  Refer to the previous PR, update news and links on the website. There are seven files need to modify.\n  Update Github release page, follow the previous convention.\n  Send ANNOUNCE email to dev@skywalking.apache.org and announce@apache.org, the sender should use his/her Apache email account. You can get the permlink of vote thread at here.\nSubject: [ANNOUNCEMENT] Apache SkyWalking Cloud on Kubernetes $VERSION Released Content: Hi the SkyWalking Community On behalf of the SkyWalking Team, I’m glad to announce that SkyWalking Cloud on Kubernetes $VERSION is now released. SkyWalking Cloud on Kubernetes: A bridge platform between Apache SkyWalking and Kubernetes. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. Vote Thread: $VOTE_THREAD_PERMALINK Download Links: https://skywalking.apache.org/downloads/ Release Notes : https://github.com/apache/skywalking-swck/blob/$VERSION/CHANGES.md Website: https://skywalking.apache.org/ SkyWalking Cloud on Kubernetes Resources: - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Documents: https://github.com/apache/skywalking-swck/blob/$VERSION/README.md The Apache SkyWalking Team   ","title":"Apache SkyWalking Cloud on Kubernetes release guide","url":"/docs/skywalking-swck/v0.9.0/release/"},{"content":"Apache SkyWalking committer SkyWalking Project Management Committee (PMC) is responsible for assessing the contributions of candidates.\nLike many Apache projects, SkyWalking welcome all contributions, including code contributions, blog entries, guides for new users, public speeches, and enhancement of the project in various ways.\nCommitter Nominate new committer In SkyWalking, new committer nomination could only be officially started by existing PMC members. If a new committer feels that he/she is qualified, he/she should contact any existing PMC member and discuss. If this is agreed among some members of the PMC, the process will kick off.\nThe following steps are recommended (to be initiated only by an existing PMC member):\n Send an email titled [DISCUSS] Promote xxx as new committer to private@skywalking.a.o. List the important contributions of the candidate, so you could gather support from other PMC members for your proposal. Keep the discussion open for more than 3 days but no more than 1 week, unless there is any express objection or concern. If the PMC generally agrees to the proposal, send an email titled [VOTE] Promote xxx as new committer to private@skywalking.a.o. Keep the voting process open for more than 3 days, but no more than 1 week. Consider the result as Consensus Approval if there are three +1 votes and +1 votes \u0026gt; -1 votes. Send an email titled [RESULT][VOTE] Promote xxx as new committer to private@skywalking.a.o, and list the voting details, including who the voters are.  Invite new committer The PMC member who starts the promotion is responsible for sending an invitation to the new committer and guiding him/her to set up the ASF env.\nThe PMC member should send an email using the following template to the new committer:\nTo: JoeBloggs@foo.net Cc: private@skywalking.apache.org Subject: Invitation to become SkyWalking committer: Joe Bloggs Hello [invitee name], The SkyWalking Project Management Committee] (PMC) hereby offers you committer privileges to the project. These privileges are offered on the understanding that you'll use them reasonably and with common sense. We like to work on trust rather than unnecessary constraints. Being a committer enables you to more easily make changes without needing to go through the patch submission process. Being a committer does not require you to participate any more than you already do. It does tend to make one even more committed. You will probably find that you spend more time here. Of course, you can decline and instead remain as a contributor, participating as you do now. A. This personal invitation is a chance for you to accept or decline in private. Either way, please let us know in reply to the [private@skywalking.apache.org] address only. B. If you accept, the next step is to register an iCLA: 1. Details of the iCLA and the forms are found through this link: http://www.apache.org/licenses/#clas 2. Instructions for its completion and return to the Secretary of the ASF are found at http://www.apache.org/licenses/#submitting 3. When you transmit the completed iCLA, request to notify the Apache SkyWalking and choose a unique Apache id. Look to see if your preferred id is already taken at http://people.apache.org/committer-index.html This will allow the Secretary to notify the PMC when your iCLA has been recorded. When recording of your iCLA is noticed, you will receive a follow-up message with the next steps for establishing you as a committer. Invitation acceptance process The new committer should reply to private@skywalking.apache.org (choose reply all), and express his/her intention to accept the invitation. Then, this invitation will be treated as accepted by the project\u0026rsquo;s PMC. Of course, the new committer may also choose to decline the invitation.\nOnce the invitation has been accepted, the new committer has to take the following steps:\n Subscribe to dev@skywalking.apache.org. Usually this is already done. Choose a Apache ID that is not on the apache committers list page. Download the ICLA (If the new committer contributes to the project as a day job, CCLA is expected). After filling in the icla.pdf (or ccla.pdf) with the correct information, print, sign it by hand, scan it as an PDF, and send it as an attachment to secretary@apache.org. (If electronic signature is preferred, please follow the steps on this page) The PMC will wait for the Apache secretary to confirm the ICLA (or CCLA) filed. The new committer and PMC will receive the following email:  Dear XXX, This message acknowledges receipt of your ICLA, which has been filed in the Apache Software Foundation records. Your account has been requested for you and you should receive email with next steps within the next few days (can take up to a week). Please refer to https://www.apache.org/foundation/how-it-works.html#developers for more information about roles at Apache. In the unlikely event that the account has not yet been requested, the PMC member should contact the project V.P.. The V.P. could request through the Apache Account Submission Helper Form.\nAfter several days, the new committer will receive an email confirming creation of the account, titled Welcome to the Apache Software Foundation (ASF)!. Congratulations! The new committer now has an official Apache ID.\nThe PMC member should add the new committer to the official committer list through roster.\nSet up the Apache ID and dev env  Go to Apache Account Utility Platform, create your password, set up your personal mailbox (Forwarding email address) and GitHub account(Your GitHub Username). An organizational invite will be sent to you via email shortly thereafter (within 2 hours). If you would like to use the xxx@apache.org email service, please refer to here. Gmail is recommended, because this forwarding mode is not easy to find in most mailbox service settings. Follow the authorized GitHub 2FA wiki to enable two-factor authorization (2FA) on Github. When you set 2FA to \u0026ldquo;off\u0026rdquo;, it will be delisted by the corresponding Apache committer write permission group until you set it up again. (NOTE: Treat your recovery codes with the same level of attention as you would your password!) Use GitBox Account Linking Utility to obtain write permission of the SkyWalking project. Follow this doc to update the website.  If you would like to show up publicly in the Apache GitHub org, you need to go to the Apache GitHub org people page, search for yourself, and choose Organization visibility to Public.\nCommitter rights, duties, and responsibilities The SkyWalking project doesn\u0026rsquo;t require continuing contributions from you after you have become a committer, but we truly hope that you will continue to play a part in our community!\nAs a committer, you could\n Review and merge the pull request to the master branch in the Apache repo. A pull request often contains multiple commits. Those commits must be squashed and merged into a single commit with explanatory comments. It is recommended for new committers to request recheck of the pull request from senior committers. Create and push codes to the new branch in the Apache repo. Follow the release process to prepare a new release. Remember to confirm with the committer team that it is the right time to create the release.  The PMC hopes that the new committer will take part in the release process as well as release voting, even though their vote will be regarded as +1 no binding. Being familiar with the release process is key to being promoted to the role of PMC member.\nProject Management Committee The Project Management Committee (PMC) member does not have any special rights in code contributions. They simply oversee the project and make sure that it follows the Apache requirements. Its functions include:\n Binding voting for releases and license checks; New committer and PMC member recognition; Identification of branding issues and brand protection; and Responding to questions raised by the ASF board, and taking necessary actions.  The V.P. and chair of the PMC is the secretary, who is responsible for initializing the board report.\nIn most cases, a new PMC member is nominated from the committer team. But it is also possible to become a PMC member directly, so long as the PMC agrees to the nomination and is confident that the candidate is ready. For instance, this can be demonstrated by the fact that he/she has been an Apache member, an Apache officer, or a PMC member of another project.\nThe new PMC voting process should also follow the [DISCUSS], [VOTE] and [RESULT][VOTE] procedures using a private mail list, just like the voting process for new committers. Before sending the invitation, the PMC must also send a NOTICE mail to the Apache board.\nTo: board@apache.org Cc: private@skywalking.apache.org Subject: [NOTICE] Jane Doe for SkyWalking PMC SkyWalking proposes to invite Jane Doe (janedoe) to join the PMC. (include if a vote was held) The vote result is available here: https://lists.apache.org/... After 72 hours, if the board doesn\u0026rsquo;t object to the nomination (which it won\u0026rsquo;t most cases), an invitation may then be sent to the candidate.\nOnce the invitation is accepted, a PMC member should add the new member to the official PMC list through roster.\n","title":"Apache SkyWalking committer","url":"/docs/main/latest/en/guides/asf/committer/"},{"content":"Apache SkyWalking committer SkyWalking Project Management Committee (PMC) is responsible for assessing the contributions of candidates.\nLike many Apache projects, SkyWalking welcome all contributions, including code contributions, blog entries, guides for new users, public speeches, and enhancement of the project in various ways.\nCommitter Nominate new committer In SkyWalking, new committer nomination could only be officially started by existing PMC members. If a new committer feels that he/she is qualified, he/she should contact any existing PMC member and discuss. If this is agreed among some members of the PMC, the process will kick off.\nThe following steps are recommended (to be initiated only by an existing PMC member):\n Send an email titled [DISCUSS] Promote xxx as new committer to private@skywalking.a.o. List the important contributions of the candidate, so you could gather support from other PMC members for your proposal. Keep the discussion open for more than 3 days but no more than 1 week, unless there is any express objection or concern. If the PMC generally agrees to the proposal, send an email titled [VOTE] Promote xxx as new committer to private@skywalking.a.o. Keep the voting process open for more than 3 days, but no more than 1 week. Consider the result as Consensus Approval if there are three +1 votes and +1 votes \u0026gt; -1 votes. Send an email titled [RESULT][VOTE] Promote xxx as new committer to private@skywalking.a.o, and list the voting details, including who the voters are.  Invite new committer The PMC member who starts the promotion is responsible for sending an invitation to the new committer and guiding him/her to set up the ASF env.\nThe PMC member should send an email using the following template to the new committer:\nTo: JoeBloggs@foo.net Cc: private@skywalking.apache.org Subject: Invitation to become SkyWalking committer: Joe Bloggs Hello [invitee name], The SkyWalking Project Management Committee] (PMC) hereby offers you committer privileges to the project. These privileges are offered on the understanding that you'll use them reasonably and with common sense. We like to work on trust rather than unnecessary constraints. Being a committer enables you to more easily make changes without needing to go through the patch submission process. Being a committer does not require you to participate any more than you already do. It does tend to make one even more committed. You will probably find that you spend more time here. Of course, you can decline and instead remain as a contributor, participating as you do now. A. This personal invitation is a chance for you to accept or decline in private. Either way, please let us know in reply to the [private@skywalking.apache.org] address only. B. If you accept, the next step is to register an iCLA: 1. Details of the iCLA and the forms are found through this link: http://www.apache.org/licenses/#clas 2. Instructions for its completion and return to the Secretary of the ASF are found at http://www.apache.org/licenses/#submitting 3. When you transmit the completed iCLA, request to notify the Apache SkyWalking and choose a unique Apache id. Look to see if your preferred id is already taken at http://people.apache.org/committer-index.html This will allow the Secretary to notify the PMC when your iCLA has been recorded. When recording of your iCLA is noticed, you will receive a follow-up message with the next steps for establishing you as a committer. Invitation acceptance process The new committer should reply to private@skywalking.apache.org (choose reply all), and express his/her intention to accept the invitation. Then, this invitation will be treated as accepted by the project\u0026rsquo;s PMC. Of course, the new committer may also choose to decline the invitation.\nOnce the invitation has been accepted, the new committer has to take the following steps:\n Subscribe to dev@skywalking.apache.org. Usually this is already done. Choose a Apache ID that is not on the apache committers list page. Download the ICLA (If the new committer contributes to the project as a day job, CCLA is expected). After filling in the icla.pdf (or ccla.pdf) with the correct information, print, sign it by hand, scan it as an PDF, and send it as an attachment to secretary@apache.org. (If electronic signature is preferred, please follow the steps on this page) The PMC will wait for the Apache secretary to confirm the ICLA (or CCLA) filed. The new committer and PMC will receive the following email:  Dear XXX, This message acknowledges receipt of your ICLA, which has been filed in the Apache Software Foundation records. Your account has been requested for you and you should receive email with next steps within the next few days (can take up to a week). Please refer to https://www.apache.org/foundation/how-it-works.html#developers for more information about roles at Apache. In the unlikely event that the account has not yet been requested, the PMC member should contact the project V.P.. The V.P. could request through the Apache Account Submission Helper Form.\nAfter several days, the new committer will receive an email confirming creation of the account, titled Welcome to the Apache Software Foundation (ASF)!. Congratulations! The new committer now has an official Apache ID.\nThe PMC member should add the new committer to the official committer list through roster.\nSet up the Apache ID and dev env  Go to Apache Account Utility Platform, create your password, set up your personal mailbox (Forwarding email address) and GitHub account(Your GitHub Username). An organizational invite will be sent to you via email shortly thereafter (within 2 hours). If you would like to use the xxx@apache.org email service, please refer to here. Gmail is recommended, because this forwarding mode is not easy to find in most mailbox service settings. Follow the authorized GitHub 2FA wiki to enable two-factor authorization (2FA) on Github. When you set 2FA to \u0026ldquo;off\u0026rdquo;, it will be delisted by the corresponding Apache committer write permission group until you set it up again. (NOTE: Treat your recovery codes with the same level of attention as you would your password!) Use GitBox Account Linking Utility to obtain write permission of the SkyWalking project. Follow this doc to update the website.  If you would like to show up publicly in the Apache GitHub org, you need to go to the Apache GitHub org people page, search for yourself, and choose Organization visibility to Public.\nCommitter rights, duties, and responsibilities The SkyWalking project doesn\u0026rsquo;t require continuing contributions from you after you have become a committer, but we truly hope that you will continue to play a part in our community!\nAs a committer, you could\n Review and merge the pull request to the master branch in the Apache repo. A pull request often contains multiple commits. Those commits must be squashed and merged into a single commit with explanatory comments. It is recommended for new committers to request recheck of the pull request from senior committers. Create and push codes to the new branch in the Apache repo. Follow the release process to prepare a new release. Remember to confirm with the committer team that it is the right time to create the release.  The PMC hopes that the new committer will take part in the release process as well as release voting, even though their vote will be regarded as +1 no binding. Being familiar with the release process is key to being promoted to the role of PMC member.\nProject Management Committee The Project Management Committee (PMC) member does not have any special rights in code contributions. They simply oversee the project and make sure that it follows the Apache requirements. Its functions include:\n Binding voting for releases and license checks; New committer and PMC member recognition; Identification of branding issues and brand protection; and Responding to questions raised by the ASF board, and taking necessary actions.  The V.P. and chair of the PMC is the secretary, who is responsible for initializing the board report.\nIn most cases, a new PMC member is nominated from the committer team. But it is also possible to become a PMC member directly, so long as the PMC agrees to the nomination and is confident that the candidate is ready. For instance, this can be demonstrated by the fact that he/she has been an Apache member, an Apache officer, or a PMC member of another project.\nThe new PMC voting process should also follow the [DISCUSS], [VOTE] and [RESULT][VOTE] procedures using a private mail list, just like the voting process for new committers. Before sending the invitation, the PMC must also send a NOTICE mail to the Apache board.\nTo: board@apache.org Cc: private@skywalking.apache.org Subject: [NOTICE] Jane Doe for SkyWalking PMC SkyWalking proposes to invite Jane Doe (janedoe) to join the PMC. (include if a vote was held) The vote result is available here: https://lists.apache.org/... After 72 hours, if the board doesn\u0026rsquo;t object to the nomination (which it won\u0026rsquo;t most cases), an invitation may then be sent to the candidate.\nOnce the invitation is accepted, a PMC member should add the new member to the official PMC list through roster.\n","title":"Apache SkyWalking committer","url":"/docs/main/next/en/guides/asf/committer/"},{"content":"Apache SkyWalking committer SkyWalking Project Management Committee (PMC) is responsible for assessing the contributions of candidates.\nLike many Apache projects, SkyWalking welcome all contributions, including code contributions, blog entries, guides for new users, public speeches, and enhancement of the project in various ways.\nCommitter Nominate new committer In SkyWalking, new committer nomination could only be officially started by existing PMC members. If a new committer feels that he/she is qualified, he/she should contact any existing PMC member and discuss. If this is agreed among some members of the PMC, the process will kick off.\nThe following steps are recommended (to be initiated only by an existing PMC member):\n Send an email titled [DISCUSS] Promote xxx as new committer to private@skywalking.a.o. List the important contributions of the candidate, so you could gather support from other PMC members for your proposal. Keep the discussion open for more than 3 days but no more than 1 week, unless there is any express objection or concern. If the PMC generally agrees to the proposal, send an email titled [VOTE] Promote xxx as new committer to private@skywalking.a.o. Keep the voting process open for more than 3 days, but no more than 1 week. Consider the result as Consensus Approval if there are three +1 votes and +1 votes \u0026gt; -1 votes. Send an email titled [RESULT][VOTE] Promote xxx as new committer to private@skywalking.a.o, and list the voting details, including who the voters are.  Invite new committer The PMC member who starts the promotion is responsible for sending an invitation to the new committer and guiding him/her to set up the ASF env.\nThe PMC member should send an email using the following template to the new committer:\nTo: JoeBloggs@foo.net Cc: private@skywalking.apache.org Subject: Invitation to become SkyWalking committer: Joe Bloggs Hello [invitee name], The SkyWalking Project Management Committee] (PMC) hereby offers you committer privileges to the project. These privileges are offered on the understanding that you'll use them reasonably and with common sense. We like to work on trust rather than unnecessary constraints. Being a committer enables you to more easily make changes without needing to go through the patch submission process. Being a committer does not require you to participate any more than you already do. It does tend to make one even more committed. You will probably find that you spend more time here. Of course, you can decline and instead remain as a contributor, participating as you do now. A. This personal invitation is a chance for you to accept or decline in private. Either way, please let us know in reply to the [private@skywalking.apache.org] address only. B. If you accept, the next step is to register an iCLA: 1. Details of the iCLA and the forms are found through this link: http://www.apache.org/licenses/#clas 2. Instructions for its completion and return to the Secretary of the ASF are found at http://www.apache.org/licenses/#submitting 3. When you transmit the completed iCLA, request to notify the Apache SkyWalking and choose a unique Apache id. Look to see if your preferred id is already taken at http://people.apache.org/committer-index.html This will allow the Secretary to notify the PMC when your iCLA has been recorded. When recording of your iCLA is noticed, you will receive a follow-up message with the next steps for establishing you as a committer. Invitation acceptance process The new committer should reply to private@skywalking.apache.org (choose reply all), and express his/her intention to accept the invitation. Then, this invitation will be treated as accepted by the project\u0026rsquo;s PMC. Of course, the new committer may also choose to decline the invitation.\nOnce the invitation has been accepted, the new committer has to take the following steps:\n Subscribe to dev@skywalking.apache.org. Usually this is already done. Choose a Apache ID that is not on the apache committers list page. Download the ICLA (If the new committer contributes to the project as a day job, CCLA is expected). After filling in the icla.pdf (or ccla.pdf) with the correct information, print, sign it by hand, scan it as an PDF, and send it as an attachment to secretary@apache.org. (If electronic signature is preferred, please follow the steps on this page) The PMC will wait for the Apache secretary to confirm the ICLA (or CCLA) filed. The new committer and PMC will receive the following email:  Dear XXX, This message acknowledges receipt of your ICLA, which has been filed in the Apache Software Foundation records. Your account has been requested for you and you should receive email with next steps within the next few days (can take up to a week). Please refer to https://www.apache.org/foundation/how-it-works.html#developers for more information about roles at Apache. In the unlikely event that the account has not yet been requested, the PMC member should contact the project V.P.. The V.P. could request through the Apache Account Submission Helper Form.\nAfter several days, the new committer will receive an email confirming creation of the account, titled Welcome to the Apache Software Foundation (ASF)!. Congratulations! The new committer now has an official Apache ID.\nThe PMC member should add the new committer to the official committer list through roster.\nSet up the Apache ID and dev env  Go to Apache Account Utility Platform, create your password, set up your personal mailbox (Forwarding email address) and GitHub account(Your GitHub Username). An organizational invite will be sent to you via email shortly thereafter (within 2 hours). If you would like to use the xxx@apache.org email service, please refer to here. Gmail is recommended, because this forwarding mode is not easy to find in most mailbox service settings. Follow the authorized GitHub 2FA wiki to enable two-factor authorization (2FA) on Github. When you set 2FA to \u0026ldquo;off\u0026rdquo;, it will be delisted by the corresponding Apache committer write permission group until you set it up again. (NOTE: Treat your recovery codes with the same level of attention as you would your password!) Use GitBox Account Linking Utility to obtain write permission of the SkyWalking project. Follow this doc to update the website.  If you would like to show up publicly in the Apache GitHub org, you need to go to the Apache GitHub org people page, search for yourself, and choose Organization visibility to Public.\nCommitter rights, duties, and responsibilities The SkyWalking project doesn\u0026rsquo;t require continuing contributions from you after you have become a committer, but we truly hope that you will continue to play a part in our community!\nAs a committer, you could\n Review and merge the pull request to the master branch in the Apache repo. A pull request often contains multiple commits. Those commits must be squashed and merged into a single commit with explanatory comments. It is recommended for new committers to request recheck of the pull request from senior committers. Create and push codes to the new branch in the Apache repo. Follow the release process to prepare a new release. Remember to confirm with the committer team that it is the right time to create the release.  The PMC hopes that the new committer will take part in the release process as well as release voting, even though their vote will be regarded as +1 no binding. Being familiar with the release process is key to being promoted to the role of PMC member.\nProject Management Committee The Project Management Committee (PMC) member does not have any special rights in code contributions. They simply oversee the project and make sure that it follows the Apache requirements. Its functions include:\n Binding voting for releases and license checks; New committer and PMC member recognition; Identification of branding issues and brand protection; and Responding to questions raised by the ASF board, and taking necessary actions.  The V.P. and chair of the PMC is the secretary, who is responsible for initializing the board report.\nIn most cases, a new PMC member is nominated from the committer team. But it is also possible to become a PMC member directly, so long as the PMC agrees to the nomination and is confident that the candidate is ready. For instance, this can be demonstrated by the fact that he/she has been an Apache member, an Apache officer, or a PMC member of another project.\nThe new PMC voting process should also follow the [DISCUSS], [VOTE] and [RESULT][VOTE] procedures using a private mail list, just like the voting process for new committers. Before sending the invitation, the PMC must also send a NOTICE mail to the Apache board.\nTo: board@apache.org Cc: private@skywalking.apache.org Subject: [NOTICE] Jane Doe for SkyWalking PMC SkyWalking proposes to invite Jane Doe (janedoe) to join the PMC. (include if a vote was held) The vote result is available here: https://lists.apache.org/... After 72 hours, if the board doesn\u0026rsquo;t object to the nomination (which it won\u0026rsquo;t most cases), an invitation may then be sent to the candidate.\nOnce the invitation is accepted, a PMC member should add the new member to the official PMC list through roster.\n","title":"Apache SkyWalking committer","url":"/docs/main/v9.0.0/en/guides/asf/committer/"},{"content":"Apache SkyWalking committer SkyWalking Project Management Committee (PMC) is responsible for assessing the contributions of candidates.\nLike many Apache projects, SkyWalking welcome all contributions, including code contributions, blog entries, guides for new users, public speeches, and enhancement of the project in various ways.\nCommitter Nominate new committer In SkyWalking, new committer nomination could only be officially started by existing PMC members. If a new committer feels that he/she is qualified, he/she should contact any existing PMC member and discuss. If this is agreed among some members of the PMC, the process will kick off.\nThe following steps are recommended (to be initiated only by an existing PMC member):\n Send an email titled [DISCUSS] Promote xxx as new committer to private@skywalking.a.o. List the important contributions of the candidate, so you could gather support from other PMC members for your proposal. Keep the discussion open for more than 3 days but no more than 1 week, unless there is any express objection or concern. If the PMC generally agrees to the proposal, send an email titled [VOTE] Promote xxx as new committer to private@skywalking.a.o. Keep the voting process open for more than 3 days, but no more than 1 week. Consider the result as Consensus Approval if there are three +1 votes and +1 votes \u0026gt; -1 votes. Send an email titled [RESULT][VOTE] Promote xxx as new committer to private@skywalking.a.o, and list the voting details, including who the voters are.  Invite new committer The PMC member who starts the promotion is responsible for sending an invitation to the new committer and guiding him/her to set up the ASF env.\nThe PMC member should send an email using the following template to the new committer:\nTo: JoeBloggs@foo.net Cc: private@skywalking.apache.org Subject: Invitation to become SkyWalking committer: Joe Bloggs Hello [invitee name], The SkyWalking Project Management Committee] (PMC) hereby offers you committer privileges to the project. These privileges are offered on the understanding that you'll use them reasonably and with common sense. We like to work on trust rather than unnecessary constraints. Being a committer enables you to more easily make changes without needing to go through the patch submission process. Being a committer does not require you to participate any more than you already do. It does tend to make one even more committed. You will probably find that you spend more time here. Of course, you can decline and instead remain as a contributor, participating as you do now. A. This personal invitation is a chance for you to accept or decline in private. Either way, please let us know in reply to the [private@skywalking.apache.org] address only. B. If you accept, the next step is to register an iCLA: 1. Details of the iCLA and the forms are found through this link: http://www.apache.org/licenses/#clas 2. Instructions for its completion and return to the Secretary of the ASF are found at http://www.apache.org/licenses/#submitting 3. When you transmit the completed iCLA, request to notify the Apache SkyWalking and choose a unique Apache id. Look to see if your preferred id is already taken at http://people.apache.org/committer-index.html This will allow the Secretary to notify the PMC when your iCLA has been recorded. When recording of your iCLA is noticed, you will receive a follow-up message with the next steps for establishing you as a committer. Invitation acceptance process The new committer should reply to private@skywalking.apache.org (choose reply all), and express his/her intention to accept the invitation. Then, this invitation will be treated as accepted by the project\u0026rsquo;s PMC. Of course, the new committer may also choose to decline the invitation.\nOnce the invitation has been accepted, the new committer has to take the following steps:\n Subscribe to dev@skywalking.apache.org. Usually this is already done. Choose a Apache ID that is not on the apache committers list page. Download the ICLA (If the new committer contributes to the project as a day job, CCLA is expected). After filling in the icla.pdf (or ccla.pdf) with the correct information, print, sign it by hand, scan it as an PDF, and send it as an attachment to secretary@apache.org. (If electronic signature is preferred, please follow the steps on this page) The PMC will wait for the Apache secretary to confirm the ICLA (or CCLA) filed. The new committer and PMC will receive the following email:  Dear XXX, This message acknowledges receipt of your ICLA, which has been filed in the Apache Software Foundation records. Your account has been requested for you and you should receive email with next steps within the next few days (can take up to a week). Please refer to https://www.apache.org/foundation/how-it-works.html#developers for more information about roles at Apache. In the unlikely event that the account has not yet been requested, the PMC member should contact the project V.P.. The V.P. could request through the Apache Account Submission Helper Form.\nAfter several days, the new committer will receive an email confirming creation of the account, titled Welcome to the Apache Software Foundation (ASF)!. Congratulations! The new committer now has an official Apache ID.\nThe PMC member should add the new committer to the official committer list through roster.\nSet up the Apache ID and dev env  Go to Apache Account Utility Platform, create your password, set up your personal mailbox (Forwarding email address) and GitHub account(Your GitHub Username). An organizational invite will be sent to you via email shortly thereafter (within 2 hours). If you would like to use the xxx@apache.org email service, please refer to here. Gmail is recommended, because this forwarding mode is not easy to find in most mailbox service settings. Follow the authorized GitHub 2FA wiki to enable two-factor authorization (2FA) on Github. When you set 2FA to \u0026ldquo;off\u0026rdquo;, it will be delisted by the corresponding Apache committer write permission group until you set it up again. (NOTE: Treat your recovery codes with the same level of attention as you would your password!) Use GitBox Account Linking Utility to obtain write permission of the SkyWalking project. Follow this doc to update the website.  If you would like to show up publicly in the Apache GitHub org, you need to go to the Apache GitHub org people page, search for yourself, and choose Organization visibility to Public.\nCommitter rights, duties, and responsibilities The SkyWalking project doesn\u0026rsquo;t require continuing contributions from you after you have become a committer, but we truly hope that you will continue to play a part in our community!\nAs a committer, you could\n Review and merge the pull request to the master branch in the Apache repo. A pull request often contains multiple commits. Those commits must be squashed and merged into a single commit with explanatory comments. It is recommended for new committers to request recheck of the pull request from senior committers. Create and push codes to the new branch in the Apache repo. Follow the release process to prepare a new release. Remember to confirm with the committer team that it is the right time to create the release.  The PMC hopes that the new committer will take part in the release process as well as release voting, even though their vote will be regarded as +1 no binding. Being familiar with the release process is key to being promoted to the role of PMC member.\nProject Management Committee The Project Management Committee (PMC) member does not have any special rights in code contributions. They simply oversee the project and make sure that it follows the Apache requirements. Its functions include:\n Binding voting for releases and license checks; New committer and PMC member recognition; Identification of branding issues and brand protection; and Responding to questions raised by the ASF board, and taking necessary actions.  The V.P. and chair of the PMC is the secretary, who is responsible for initializing the board report.\nIn most cases, a new PMC member is nominated from the committer team. But it is also possible to become a PMC member directly, so long as the PMC agrees to the nomination and is confident that the candidate is ready. For instance, this can be demonstrated by the fact that he/she has been an Apache member, an Apache officer, or a PMC member of another project.\nThe new PMC voting process should also follow the [DISCUSS], [VOTE] and [RESULT][VOTE] procedures using a private mail list, just like the voting process for new committers. Before sending the invitation, the PMC must also send a NOTICE mail to the Apache board.\nTo: board@apache.org Cc: private@skywalking.apache.org Subject: [NOTICE] Jane Doe for SkyWalking PMC SkyWalking proposes to invite Jane Doe (janedoe) to join the PMC. (include if a vote was held) The vote result is available here: https://lists.apache.org/... After 72 hours, if the board doesn\u0026rsquo;t object to the nomination (which it won\u0026rsquo;t most cases), an invitation may then be sent to the candidate.\nOnce the invitation is accepted, a PMC member should add the new member to the official PMC list through roster.\n","title":"Apache SkyWalking committer","url":"/docs/main/v9.1.0/en/guides/asf/committer/"},{"content":"Apache SkyWalking committer SkyWalking Project Management Committee (PMC) is responsible for assessing the contributions of candidates.\nLike many Apache projects, SkyWalking welcome all contributions, including code contributions, blog entries, guides for new users, public speeches, and enhancement of the project in various ways.\nCommitter Nominate new committer In SkyWalking, new committer nomination could only be officially started by existing PMC members. If a new committer feels that he/she is qualified, he/she should contact any existing PMC member and discuss. If this is agreed among some members of the PMC, the process will kick off.\nThe following steps are recommended (to be initiated only by an existing PMC member):\n Send an email titled [DISCUSS] Promote xxx as new committer to private@skywalking.a.o. List the important contributions of the candidate, so you could gather support from other PMC members for your proposal. Keep the discussion open for more than 3 days but no more than 1 week, unless there is any express objection or concern. If the PMC generally agrees to the proposal, send an email titled [VOTE] Promote xxx as new committer to private@skywalking.a.o. Keep the voting process open for more than 3 days, but no more than 1 week. Consider the result as Consensus Approval if there are three +1 votes and +1 votes \u0026gt; -1 votes. Send an email titled [RESULT][VOTE] Promote xxx as new committer to private@skywalking.a.o, and list the voting details, including who the voters are.  Invite new committer The PMC member who starts the promotion is responsible for sending an invitation to the new committer and guiding him/her to set up the ASF env.\nThe PMC member should send an email using the following template to the new committer:\nTo: JoeBloggs@foo.net Cc: private@skywalking.apache.org Subject: Invitation to become SkyWalking committer: Joe Bloggs Hello [invitee name], The SkyWalking Project Management Committee] (PMC) hereby offers you committer privileges to the project. These privileges are offered on the understanding that you'll use them reasonably and with common sense. We like to work on trust rather than unnecessary constraints. Being a committer enables you to more easily make changes without needing to go through the patch submission process. Being a committer does not require you to participate any more than you already do. It does tend to make one even more committed. You will probably find that you spend more time here. Of course, you can decline and instead remain as a contributor, participating as you do now. A. This personal invitation is a chance for you to accept or decline in private. Either way, please let us know in reply to the [private@skywalking.apache.org] address only. B. If you accept, the next step is to register an iCLA: 1. Details of the iCLA and the forms are found through this link: http://www.apache.org/licenses/#clas 2. Instructions for its completion and return to the Secretary of the ASF are found at http://www.apache.org/licenses/#submitting 3. When you transmit the completed iCLA, request to notify the Apache SkyWalking and choose a unique Apache id. Look to see if your preferred id is already taken at http://people.apache.org/committer-index.html This will allow the Secretary to notify the PMC when your iCLA has been recorded. When recording of your iCLA is noticed, you will receive a follow-up message with the next steps for establishing you as a committer. Invitation acceptance process The new committer should reply to private@skywalking.apache.org (choose reply all), and express his/her intention to accept the invitation. Then, this invitation will be treated as accepted by the project\u0026rsquo;s PMC. Of course, the new committer may also choose to decline the invitation.\nOnce the invitation has been accepted, the new committer has to take the following steps:\n Subscribe to dev@skywalking.apache.org. Usually this is already done. Choose a Apache ID that is not on the apache committers list page. Download the ICLA (If the new committer contributes to the project as a day job, CCLA is expected). After filling in the icla.pdf (or ccla.pdf) with the correct information, print, sign it by hand, scan it as an PDF, and send it as an attachment to secretary@apache.org. (If electronic signature is preferred, please follow the steps on this page) The PMC will wait for the Apache secretary to confirm the ICLA (or CCLA) filed. The new committer and PMC will receive the following email:  Dear XXX, This message acknowledges receipt of your ICLA, which has been filed in the Apache Software Foundation records. Your account has been requested for you and you should receive email with next steps within the next few days (can take up to a week). Please refer to https://www.apache.org/foundation/how-it-works.html#developers for more information about roles at Apache. In the unlikely event that the account has not yet been requested, the PMC member should contact the project V.P.. The V.P. could request through the Apache Account Submission Helper Form.\nAfter several days, the new committer will receive an email confirming creation of the account, titled Welcome to the Apache Software Foundation (ASF)!. Congratulations! The new committer now has an official Apache ID.\nThe PMC member should add the new committer to the official committer list through roster.\nSet up the Apache ID and dev env  Go to Apache Account Utility Platform, create your password, set up your personal mailbox (Forwarding email address) and GitHub account(Your GitHub Username). An organizational invite will be sent to you via email shortly thereafter (within 2 hours). If you would like to use the xxx@apache.org email service, please refer to here. Gmail is recommended, because this forwarding mode is not easy to find in most mailbox service settings. Follow the authorized GitHub 2FA wiki to enable two-factor authorization (2FA) on Github. When you set 2FA to \u0026ldquo;off\u0026rdquo;, it will be delisted by the corresponding Apache committer write permission group until you set it up again. (NOTE: Treat your recovery codes with the same level of attention as you would your password!) Use GitBox Account Linking Utility to obtain write permission of the SkyWalking project. Follow this doc to update the website.  If you would like to show up publicly in the Apache GitHub org, you need to go to the Apache GitHub org people page, search for yourself, and choose Organization visibility to Public.\nCommitter rights, duties, and responsibilities The SkyWalking project doesn\u0026rsquo;t require continuing contributions from you after you have become a committer, but we truly hope that you will continue to play a part in our community!\nAs a committer, you could\n Review and merge the pull request to the master branch in the Apache repo. A pull request often contains multiple commits. Those commits must be squashed and merged into a single commit with explanatory comments. It is recommended for new committers to request recheck of the pull request from senior committers. Create and push codes to the new branch in the Apache repo. Follow the release process to prepare a new release. Remember to confirm with the committer team that it is the right time to create the release.  The PMC hopes that the new committer will take part in the release process as well as release voting, even though their vote will be regarded as +1 no binding. Being familiar with the release process is key to being promoted to the role of PMC member.\nProject Management Committee The Project Management Committee (PMC) member does not have any special rights in code contributions. They simply oversee the project and make sure that it follows the Apache requirements. Its functions include:\n Binding voting for releases and license checks; New committer and PMC member recognition; Identification of branding issues and brand protection; and Responding to questions raised by the ASF board, and taking necessary actions.  The V.P. and chair of the PMC is the secretary, who is responsible for initializing the board report.\nIn most cases, a new PMC member is nominated from the committer team. But it is also possible to become a PMC member directly, so long as the PMC agrees to the nomination and is confident that the candidate is ready. For instance, this can be demonstrated by the fact that he/she has been an Apache member, an Apache officer, or a PMC member of another project.\nThe new PMC voting process should also follow the [DISCUSS], [VOTE] and [RESULT][VOTE] procedures using a private mail list, just like the voting process for new committers. Before sending the invitation, the PMC must also send a NOTICE mail to the Apache board.\nTo: board@apache.org Cc: private@skywalking.apache.org Subject: [NOTICE] Jane Doe for SkyWalking PMC SkyWalking proposes to invite Jane Doe (janedoe) to join the PMC. (include if a vote was held) The vote result is available here: https://lists.apache.org/... After 72 hours, if the board doesn\u0026rsquo;t object to the nomination (which it won\u0026rsquo;t most cases), an invitation may then be sent to the candidate.\nOnce the invitation is accepted, a PMC member should add the new member to the official PMC list through roster.\n","title":"Apache SkyWalking committer","url":"/docs/main/v9.2.0/en/guides/asf/committer/"},{"content":"Apache SkyWalking committer SkyWalking Project Management Committee (PMC) is responsible for assessing the contributions of candidates.\nLike many Apache projects, SkyWalking welcome all contributions, including code contributions, blog entries, guides for new users, public speeches, and enhancement of the project in various ways.\nCommitter Nominate new committer In SkyWalking, new committer nomination could only be officially started by existing PMC members. If a new committer feels that he/she is qualified, he/she should contact any existing PMC member and discuss. If this is agreed among some members of the PMC, the process will kick off.\nThe following steps are recommended (to be initiated only by an existing PMC member):\n Send an email titled [DISCUSS] Promote xxx as new committer to private@skywalking.a.o. List the important contributions of the candidate, so you could gather support from other PMC members for your proposal. Keep the discussion open for more than 3 days but no more than 1 week, unless there is any express objection or concern. If the PMC generally agrees to the proposal, send an email titled [VOTE] Promote xxx as new committer to private@skywalking.a.o. Keep the voting process open for more than 3 days, but no more than 1 week. Consider the result as Consensus Approval if there are three +1 votes and +1 votes \u0026gt; -1 votes. Send an email titled [RESULT][VOTE] Promote xxx as new committer to private@skywalking.a.o, and list the voting details, including who the voters are.  Invite new committer The PMC member who starts the promotion is responsible for sending an invitation to the new committer and guiding him/her to set up the ASF env.\nThe PMC member should send an email using the following template to the new committer:\nTo: JoeBloggs@foo.net Cc: private@skywalking.apache.org Subject: Invitation to become SkyWalking committer: Joe Bloggs Hello [invitee name], The SkyWalking Project Management Committee] (PMC) hereby offers you committer privileges to the project. These privileges are offered on the understanding that you'll use them reasonably and with common sense. We like to work on trust rather than unnecessary constraints. Being a committer enables you to more easily make changes without needing to go through the patch submission process. Being a committer does not require you to participate any more than you already do. It does tend to make one even more committed. You will probably find that you spend more time here. Of course, you can decline and instead remain as a contributor, participating as you do now. A. This personal invitation is a chance for you to accept or decline in private. Either way, please let us know in reply to the [private@skywalking.apache.org] address only. B. If you accept, the next step is to register an iCLA: 1. Details of the iCLA and the forms are found through this link: http://www.apache.org/licenses/#clas 2. Instructions for its completion and return to the Secretary of the ASF are found at http://www.apache.org/licenses/#submitting 3. When you transmit the completed iCLA, request to notify the Apache SkyWalking and choose a unique Apache id. Look to see if your preferred id is already taken at http://people.apache.org/committer-index.html This will allow the Secretary to notify the PMC when your iCLA has been recorded. When recording of your iCLA is noticed, you will receive a follow-up message with the next steps for establishing you as a committer. Invitation acceptance process The new committer should reply to private@skywalking.apache.org (choose reply all), and express his/her intention to accept the invitation. Then, this invitation will be treated as accepted by the project\u0026rsquo;s PMC. Of course, the new committer may also choose to decline the invitation.\nOnce the invitation has been accepted, the new committer has to take the following steps:\n Subscribe to dev@skywalking.apache.org. Usually this is already done. Choose a Apache ID that is not on the apache committers list page. Download the ICLA (If the new committer contributes to the project as a day job, CCLA is expected). After filling in the icla.pdf (or ccla.pdf) with the correct information, print, sign it by hand, scan it as an PDF, and send it as an attachment to secretary@apache.org. (If electronic signature is preferred, please follow the steps on this page) The PMC will wait for the Apache secretary to confirm the ICLA (or CCLA) filed. The new committer and PMC will receive the following email:  Dear XXX, This message acknowledges receipt of your ICLA, which has been filed in the Apache Software Foundation records. Your account has been requested for you and you should receive email with next steps within the next few days (can take up to a week). Please refer to https://www.apache.org/foundation/how-it-works.html#developers for more information about roles at Apache. In the unlikely event that the account has not yet been requested, the PMC member should contact the project V.P.. The V.P. could request through the Apache Account Submission Helper Form.\nAfter several days, the new committer will receive an email confirming creation of the account, titled Welcome to the Apache Software Foundation (ASF)!. Congratulations! The new committer now has an official Apache ID.\nThe PMC member should add the new committer to the official committer list through roster.\nSet up the Apache ID and dev env  Go to Apache Account Utility Platform, create your password, set up your personal mailbox (Forwarding email address) and GitHub account(Your GitHub Username). An organizational invite will be sent to you via email shortly thereafter (within 2 hours). If you would like to use the xxx@apache.org email service, please refer to here. Gmail is recommended, because this forwarding mode is not easy to find in most mailbox service settings. Follow the authorized GitHub 2FA wiki to enable two-factor authorization (2FA) on Github. When you set 2FA to \u0026ldquo;off\u0026rdquo;, it will be delisted by the corresponding Apache committer write permission group until you set it up again. (NOTE: Treat your recovery codes with the same level of attention as you would your password!) Use GitBox Account Linking Utility to obtain write permission of the SkyWalking project. Follow this doc to update the website.  If you would like to show up publicly in the Apache GitHub org, you need to go to the Apache GitHub org people page, search for yourself, and choose Organization visibility to Public.\nCommitter rights, duties, and responsibilities The SkyWalking project doesn\u0026rsquo;t require continuing contributions from you after you have become a committer, but we truly hope that you will continue to play a part in our community!\nAs a committer, you could\n Review and merge the pull request to the master branch in the Apache repo. A pull request often contains multiple commits. Those commits must be squashed and merged into a single commit with explanatory comments. It is recommended for new committers to request recheck of the pull request from senior committers. Create and push codes to the new branch in the Apache repo. Follow the release process to prepare a new release. Remember to confirm with the committer team that it is the right time to create the release.  The PMC hopes that the new committer will take part in the release process as well as release voting, even though their vote will be regarded as +1 no binding. Being familiar with the release process is key to being promoted to the role of PMC member.\nProject Management Committee The Project Management Committee (PMC) member does not have any special rights in code contributions. They simply oversee the project and make sure that it follows the Apache requirements. Its functions include:\n Binding voting for releases and license checks; New committer and PMC member recognition; Identification of branding issues and brand protection; and Responding to questions raised by the ASF board, and taking necessary actions.  The V.P. and chair of the PMC is the secretary, who is responsible for initializing the board report.\nIn most cases, a new PMC member is nominated from the committer team. But it is also possible to become a PMC member directly, so long as the PMC agrees to the nomination and is confident that the candidate is ready. For instance, this can be demonstrated by the fact that he/she has been an Apache member, an Apache officer, or a PMC member of another project.\nThe new PMC voting process should also follow the [DISCUSS], [VOTE] and [RESULT][VOTE] procedures using a private mail list, just like the voting process for new committers. Before sending the invitation, the PMC must also send a NOTICE mail to the Apache board.\nTo: board@apache.org Cc: private@skywalking.apache.org Subject: [NOTICE] Jane Doe for SkyWalking PMC SkyWalking proposes to invite Jane Doe (janedoe) to join the PMC. (include if a vote was held) The vote result is available here: https://lists.apache.org/... After 72 hours, if the board doesn\u0026rsquo;t object to the nomination (which it won\u0026rsquo;t most cases), an invitation may then be sent to the candidate.\nOnce the invitation is accepted, a PMC member should add the new member to the official PMC list through roster.\n","title":"Apache SkyWalking committer","url":"/docs/main/v9.3.0/en/guides/asf/committer/"},{"content":"Apache SkyWalking committer SkyWalking Project Management Committee (PMC) is responsible for assessing the contributions of candidates.\nLike many Apache projects, SkyWalking welcome all contributions, including code contributions, blog entries, guides for new users, public speeches, and enhancement of the project in various ways.\nCommitter Nominate new committer In SkyWalking, new committer nomination could only be officially started by existing PMC members. If a new committer feels that he/she is qualified, he/she should contact any existing PMC member and discuss. If this is agreed among some members of the PMC, the process will kick off.\nThe following steps are recommended (to be initiated only by an existing PMC member):\n Send an email titled [DISCUSS] Promote xxx as new committer to private@skywalking.a.o. List the important contributions of the candidate, so you could gather support from other PMC members for your proposal. Keep the discussion open for more than 3 days but no more than 1 week, unless there is any express objection or concern. If the PMC generally agrees to the proposal, send an email titled [VOTE] Promote xxx as new committer to private@skywalking.a.o. Keep the voting process open for more than 3 days, but no more than 1 week. Consider the result as Consensus Approval if there are three +1 votes and +1 votes \u0026gt; -1 votes. Send an email titled [RESULT][VOTE] Promote xxx as new committer to private@skywalking.a.o, and list the voting details, including who the voters are.  Invite new committer The PMC member who starts the promotion is responsible for sending an invitation to the new committer and guiding him/her to set up the ASF env.\nThe PMC member should send an email using the following template to the new committer:\nTo: JoeBloggs@foo.net Cc: private@skywalking.apache.org Subject: Invitation to become SkyWalking committer: Joe Bloggs Hello [invitee name], The SkyWalking Project Management Committee] (PMC) hereby offers you committer privileges to the project. These privileges are offered on the understanding that you'll use them reasonably and with common sense. We like to work on trust rather than unnecessary constraints. Being a committer enables you to more easily make changes without needing to go through the patch submission process. Being a committer does not require you to participate any more than you already do. It does tend to make one even more committed. You will probably find that you spend more time here. Of course, you can decline and instead remain as a contributor, participating as you do now. A. This personal invitation is a chance for you to accept or decline in private. Either way, please let us know in reply to the [private@skywalking.apache.org] address only. B. If you accept, the next step is to register an iCLA: 1. Details of the iCLA and the forms are found through this link: http://www.apache.org/licenses/#clas 2. Instructions for its completion and return to the Secretary of the ASF are found at http://www.apache.org/licenses/#submitting 3. When you transmit the completed iCLA, request to notify the Apache SkyWalking and choose a unique Apache id. Look to see if your preferred id is already taken at http://people.apache.org/committer-index.html This will allow the Secretary to notify the PMC when your iCLA has been recorded. When recording of your iCLA is noticed, you will receive a follow-up message with the next steps for establishing you as a committer. Invitation acceptance process The new committer should reply to private@skywalking.apache.org (choose reply all), and express his/her intention to accept the invitation. Then, this invitation will be treated as accepted by the project\u0026rsquo;s PMC. Of course, the new committer may also choose to decline the invitation.\nOnce the invitation has been accepted, the new committer has to take the following steps:\n Subscribe to dev@skywalking.apache.org. Usually this is already done. Choose a Apache ID that is not on the apache committers list page. Download the ICLA (If the new committer contributes to the project as a day job, CCLA is expected). After filling in the icla.pdf (or ccla.pdf) with the correct information, print, sign it by hand, scan it as an PDF, and send it as an attachment to secretary@apache.org. (If electronic signature is preferred, please follow the steps on this page) The PMC will wait for the Apache secretary to confirm the ICLA (or CCLA) filed. The new committer and PMC will receive the following email:  Dear XXX, This message acknowledges receipt of your ICLA, which has been filed in the Apache Software Foundation records. Your account has been requested for you and you should receive email with next steps within the next few days (can take up to a week). Please refer to https://www.apache.org/foundation/how-it-works.html#developers for more information about roles at Apache. In the unlikely event that the account has not yet been requested, the PMC member should contact the project V.P.. The V.P. could request through the Apache Account Submission Helper Form.\nAfter several days, the new committer will receive an email confirming creation of the account, titled Welcome to the Apache Software Foundation (ASF)!. Congratulations! The new committer now has an official Apache ID.\nThe PMC member should add the new committer to the official committer list through roster.\nSet up the Apache ID and dev env  Go to Apache Account Utility Platform, create your password, set up your personal mailbox (Forwarding email address) and GitHub account(Your GitHub Username). An organizational invite will be sent to you via email shortly thereafter (within 2 hours). If you would like to use the xxx@apache.org email service, please refer to here. Gmail is recommended, because this forwarding mode is not easy to find in most mailbox service settings. Follow the authorized GitHub 2FA wiki to enable two-factor authorization (2FA) on Github. When you set 2FA to \u0026ldquo;off\u0026rdquo;, it will be delisted by the corresponding Apache committer write permission group until you set it up again. (NOTE: Treat your recovery codes with the same level of attention as you would your password!) Use GitBox Account Linking Utility to obtain write permission of the SkyWalking project. Follow this doc to update the website.  If you would like to show up publicly in the Apache GitHub org, you need to go to the Apache GitHub org people page, search for yourself, and choose Organization visibility to Public.\nCommitter rights, duties, and responsibilities The SkyWalking project doesn\u0026rsquo;t require continuing contributions from you after you have become a committer, but we truly hope that you will continue to play a part in our community!\nAs a committer, you could\n Review and merge the pull request to the master branch in the Apache repo. A pull request often contains multiple commits. Those commits must be squashed and merged into a single commit with explanatory comments. It is recommended for new committers to request recheck of the pull request from senior committers. Create and push codes to the new branch in the Apache repo. Follow the release process to prepare a new release. Remember to confirm with the committer team that it is the right time to create the release.  The PMC hopes that the new committer will take part in the release process as well as release voting, even though their vote will be regarded as +1 no binding. Being familiar with the release process is key to being promoted to the role of PMC member.\nProject Management Committee The Project Management Committee (PMC) member does not have any special rights in code contributions. They simply oversee the project and make sure that it follows the Apache requirements. Its functions include:\n Binding voting for releases and license checks; New committer and PMC member recognition; Identification of branding issues and brand protection; and Responding to questions raised by the ASF board, and taking necessary actions.  The V.P. and chair of the PMC is the secretary, who is responsible for initializing the board report.\nIn most cases, a new PMC member is nominated from the committer team. But it is also possible to become a PMC member directly, so long as the PMC agrees to the nomination and is confident that the candidate is ready. For instance, this can be demonstrated by the fact that he/she has been an Apache member, an Apache officer, or a PMC member of another project.\nThe new PMC voting process should also follow the [DISCUSS], [VOTE] and [RESULT][VOTE] procedures using a private mail list, just like the voting process for new committers. Before sending the invitation, the PMC must also send a NOTICE mail to the Apache board.\nTo: board@apache.org Cc: private@skywalking.apache.org Subject: [NOTICE] Jane Doe for SkyWalking PMC SkyWalking proposes to invite Jane Doe (janedoe) to join the PMC. (include if a vote was held) The vote result is available here: https://lists.apache.org/... After 72 hours, if the board doesn\u0026rsquo;t object to the nomination (which it won\u0026rsquo;t most cases), an invitation may then be sent to the candidate.\nOnce the invitation is accepted, a PMC member should add the new member to the official PMC list through roster.\n","title":"Apache SkyWalking committer","url":"/docs/main/v9.4.0/en/guides/asf/committer/"},{"content":"Apache SkyWalking committer SkyWalking Project Management Committee (PMC) is responsible for assessing the contributions of candidates.\nLike many Apache projects, SkyWalking welcome all contributions, including code contributions, blog entries, guides for new users, public speeches, and enhancement of the project in various ways.\nCommitter Nominate new committer In SkyWalking, new committer nomination could only be officially started by existing PMC members. If a new committer feels that he/she is qualified, he/she should contact any existing PMC member and discuss. If this is agreed among some members of the PMC, the process will kick off.\nThe following steps are recommended (to be initiated only by an existing PMC member):\n Send an email titled [DISCUSS] Promote xxx as new committer to private@skywalking.a.o. List the important contributions of the candidate, so you could gather support from other PMC members for your proposal. Keep the discussion open for more than 3 days but no more than 1 week, unless there is any express objection or concern. If the PMC generally agrees to the proposal, send an email titled [VOTE] Promote xxx as new committer to private@skywalking.a.o. Keep the voting process open for more than 3 days, but no more than 1 week. Consider the result as Consensus Approval if there are three +1 votes and +1 votes \u0026gt; -1 votes. Send an email titled [RESULT][VOTE] Promote xxx as new committer to private@skywalking.a.o, and list the voting details, including who the voters are.  Invite new committer The PMC member who starts the promotion is responsible for sending an invitation to the new committer and guiding him/her to set up the ASF env.\nThe PMC member should send an email using the following template to the new committer:\nTo: JoeBloggs@foo.net Cc: private@skywalking.apache.org Subject: Invitation to become SkyWalking committer: Joe Bloggs Hello [invitee name], The SkyWalking Project Management Committee] (PMC) hereby offers you committer privileges to the project. These privileges are offered on the understanding that you'll use them reasonably and with common sense. We like to work on trust rather than unnecessary constraints. Being a committer enables you to more easily make changes without needing to go through the patch submission process. Being a committer does not require you to participate any more than you already do. It does tend to make one even more committed. You will probably find that you spend more time here. Of course, you can decline and instead remain as a contributor, participating as you do now. A. This personal invitation is a chance for you to accept or decline in private. Either way, please let us know in reply to the [private@skywalking.apache.org] address only. B. If you accept, the next step is to register an iCLA: 1. Details of the iCLA and the forms are found through this link: http://www.apache.org/licenses/#clas 2. Instructions for its completion and return to the Secretary of the ASF are found at http://www.apache.org/licenses/#submitting 3. When you transmit the completed iCLA, request to notify the Apache SkyWalking and choose a unique Apache id. Look to see if your preferred id is already taken at http://people.apache.org/committer-index.html This will allow the Secretary to notify the PMC when your iCLA has been recorded. When recording of your iCLA is noticed, you will receive a follow-up message with the next steps for establishing you as a committer. Invitation acceptance process The new committer should reply to private@skywalking.apache.org (choose reply all), and express his/her intention to accept the invitation. Then, this invitation will be treated as accepted by the project\u0026rsquo;s PMC. Of course, the new committer may also choose to decline the invitation.\nOnce the invitation has been accepted, the new committer has to take the following steps:\n Subscribe to dev@skywalking.apache.org. Usually this is already done. Choose a Apache ID that is not on the apache committers list page. Download the ICLA (If the new committer contributes to the project as a day job, CCLA is expected). After filling in the icla.pdf (or ccla.pdf) with the correct information, print, sign it by hand, scan it as an PDF, and send it as an attachment to secretary@apache.org. (If electronic signature is preferred, please follow the steps on this page) The PMC will wait for the Apache secretary to confirm the ICLA (or CCLA) filed. The new committer and PMC will receive the following email:  Dear XXX, This message acknowledges receipt of your ICLA, which has been filed in the Apache Software Foundation records. Your account has been requested for you and you should receive email with next steps within the next few days (can take up to a week). Please refer to https://www.apache.org/foundation/how-it-works.html#developers for more information about roles at Apache. In the unlikely event that the account has not yet been requested, the PMC member should contact the project V.P.. The V.P. could request through the Apache Account Submission Helper Form.\nAfter several days, the new committer will receive an email confirming creation of the account, titled Welcome to the Apache Software Foundation (ASF)!. Congratulations! The new committer now has an official Apache ID.\nThe PMC member should add the new committer to the official committer list through roster.\nSet up the Apache ID and dev env  Go to Apache Account Utility Platform, create your password, set up your personal mailbox (Forwarding email address) and GitHub account(Your GitHub Username). An organizational invite will be sent to you via email shortly thereafter (within 2 hours). If you would like to use the xxx@apache.org email service, please refer to here. Gmail is recommended, because this forwarding mode is not easy to find in most mailbox service settings. Follow the authorized GitHub 2FA wiki to enable two-factor authorization (2FA) on Github. When you set 2FA to \u0026ldquo;off\u0026rdquo;, it will be delisted by the corresponding Apache committer write permission group until you set it up again. (NOTE: Treat your recovery codes with the same level of attention as you would your password!) Use GitBox Account Linking Utility to obtain write permission of the SkyWalking project. Follow this doc to update the website.  If you would like to show up publicly in the Apache GitHub org, you need to go to the Apache GitHub org people page, search for yourself, and choose Organization visibility to Public.\nCommitter rights, duties, and responsibilities The SkyWalking project doesn\u0026rsquo;t require continuing contributions from you after you have become a committer, but we truly hope that you will continue to play a part in our community!\nAs a committer, you could\n Review and merge the pull request to the master branch in the Apache repo. A pull request often contains multiple commits. Those commits must be squashed and merged into a single commit with explanatory comments. It is recommended for new committers to request recheck of the pull request from senior committers. Create and push codes to the new branch in the Apache repo. Follow the release process to prepare a new release. Remember to confirm with the committer team that it is the right time to create the release.  The PMC hopes that the new committer will take part in the release process as well as release voting, even though their vote will be regarded as +1 no binding. Being familiar with the release process is key to being promoted to the role of PMC member.\nProject Management Committee The Project Management Committee (PMC) member does not have any special rights in code contributions. They simply oversee the project and make sure that it follows the Apache requirements. Its functions include:\n Binding voting for releases and license checks; New committer and PMC member recognition; Identification of branding issues and brand protection; and Responding to questions raised by the ASF board, and taking necessary actions.  The V.P. and chair of the PMC is the secretary, who is responsible for initializing the board report.\nIn most cases, a new PMC member is nominated from the committer team. But it is also possible to become a PMC member directly, so long as the PMC agrees to the nomination and is confident that the candidate is ready. For instance, this can be demonstrated by the fact that he/she has been an Apache member, an Apache officer, or a PMC member of another project.\nThe new PMC voting process should also follow the [DISCUSS], [VOTE] and [RESULT][VOTE] procedures using a private mail list, just like the voting process for new committers. Before sending the invitation, the PMC must also send a NOTICE mail to the Apache board.\nTo: board@apache.org Cc: private@skywalking.apache.org Subject: [NOTICE] Jane Doe for SkyWalking PMC SkyWalking proposes to invite Jane Doe (janedoe) to join the PMC. (include if a vote was held) The vote result is available here: https://lists.apache.org/... After 72 hours, if the board doesn\u0026rsquo;t object to the nomination (which it won\u0026rsquo;t most cases), an invitation may then be sent to the candidate.\nOnce the invitation is accepted, a PMC member should add the new member to the official PMC list through roster.\n","title":"Apache SkyWalking committer","url":"/docs/main/v9.5.0/en/guides/asf/committer/"},{"content":"Apache SkyWalking committer SkyWalking Project Management Committee (PMC) is responsible for assessing the contributions of candidates.\nLike many Apache projects, SkyWalking welcome all contributions, including code contributions, blog entries, guides for new users, public speeches, and enhancement of the project in various ways.\nCommitter Nominate new committer In SkyWalking, new committer nomination could only be officially started by existing PMC members. If a new committer feels that he/she is qualified, he/she should contact any existing PMC member and discuss. If this is agreed among some members of the PMC, the process will kick off.\nThe following steps are recommended (to be initiated only by an existing PMC member):\n Send an email titled [DISCUSS] Promote xxx as new committer to private@skywalking.a.o. List the important contributions of the candidate, so you could gather support from other PMC members for your proposal. Keep the discussion open for more than 3 days but no more than 1 week, unless there is any express objection or concern. If the PMC generally agrees to the proposal, send an email titled [VOTE] Promote xxx as new committer to private@skywalking.a.o. Keep the voting process open for more than 3 days, but no more than 1 week. Consider the result as Consensus Approval if there are three +1 votes and +1 votes \u0026gt; -1 votes. Send an email titled [RESULT][VOTE] Promote xxx as new committer to private@skywalking.a.o, and list the voting details, including who the voters are.  Invite new committer The PMC member who starts the promotion is responsible for sending an invitation to the new committer and guiding him/her to set up the ASF env.\nThe PMC member should send an email using the following template to the new committer:\nTo: JoeBloggs@foo.net Cc: private@skywalking.apache.org Subject: Invitation to become SkyWalking committer: Joe Bloggs Hello [invitee name], The SkyWalking Project Management Committee] (PMC) hereby offers you committer privileges to the project. These privileges are offered on the understanding that you'll use them reasonably and with common sense. We like to work on trust rather than unnecessary constraints. Being a committer enables you to more easily make changes without needing to go through the patch submission process. Being a committer does not require you to participate any more than you already do. It does tend to make one even more committed. You will probably find that you spend more time here. Of course, you can decline and instead remain as a contributor, participating as you do now. A. This personal invitation is a chance for you to accept or decline in private. Either way, please let us know in reply to the [private@skywalking.apache.org] address only. B. If you accept, the next step is to register an iCLA: 1. Details of the iCLA and the forms are found through this link: http://www.apache.org/licenses/#clas 2. Instructions for its completion and return to the Secretary of the ASF are found at http://www.apache.org/licenses/#submitting 3. When you transmit the completed iCLA, request to notify the Apache SkyWalking and choose a unique Apache id. Look to see if your preferred id is already taken at http://people.apache.org/committer-index.html This will allow the Secretary to notify the PMC when your iCLA has been recorded. When recording of your iCLA is noticed, you will receive a follow-up message with the next steps for establishing you as a committer. Invitation acceptance process The new committer should reply to private@skywalking.apache.org (choose reply all), and express his/her intention to accept the invitation. Then, this invitation will be treated as accepted by the project\u0026rsquo;s PMC. Of course, the new committer may also choose to decline the invitation.\nOnce the invitation has been accepted, the new committer has to take the following steps:\n Subscribe to dev@skywalking.apache.org. Usually this is already done. Choose a Apache ID that is not on the apache committers list page. Download the ICLA (If the new committer contributes to the project as a day job, CCLA is expected). After filling in the icla.pdf (or ccla.pdf) with the correct information, print, sign it by hand, scan it as an PDF, and send it as an attachment to secretary@apache.org. (If electronic signature is preferred, please follow the steps on this page) The PMC will wait for the Apache secretary to confirm the ICLA (or CCLA) filed. The new committer and PMC will receive the following email:  Dear XXX, This message acknowledges receipt of your ICLA, which has been filed in the Apache Software Foundation records. Your account has been requested for you and you should receive email with next steps within the next few days (can take up to a week). Please refer to https://www.apache.org/foundation/how-it-works.html#developers for more information about roles at Apache. In the unlikely event that the account has not yet been requested, the PMC member should contact the project V.P.. The V.P. could request through the Apache Account Submission Helper Form.\nAfter several days, the new committer will receive an email confirming creation of the account, titled Welcome to the Apache Software Foundation (ASF)!. Congratulations! The new committer now has an official Apache ID.\nThe PMC member should add the new committer to the official committer list through roster.\nSet up the Apache ID and dev env  Go to Apache Account Utility Platform, create your password, set up your personal mailbox (Forwarding email address) and GitHub account(Your GitHub Username). An organizational invite will be sent to you via email shortly thereafter (within 2 hours). If you would like to use the xxx@apache.org email service, please refer to here. Gmail is recommended, because this forwarding mode is not easy to find in most mailbox service settings. Follow the authorized GitHub 2FA wiki to enable two-factor authorization (2FA) on Github. When you set 2FA to \u0026ldquo;off\u0026rdquo;, it will be delisted by the corresponding Apache committer write permission group until you set it up again. (NOTE: Treat your recovery codes with the same level of attention as you would your password!) Use GitBox Account Linking Utility to obtain write permission of the SkyWalking project. Follow this doc to update the website.  If you would like to show up publicly in the Apache GitHub org, you need to go to the Apache GitHub org people page, search for yourself, and choose Organization visibility to Public.\nCommitter rights, duties, and responsibilities The SkyWalking project doesn\u0026rsquo;t require continuing contributions from you after you have become a committer, but we truly hope that you will continue to play a part in our community!\nAs a committer, you could\n Review and merge the pull request to the master branch in the Apache repo. A pull request often contains multiple commits. Those commits must be squashed and merged into a single commit with explanatory comments. It is recommended for new committers to request recheck of the pull request from senior committers. Create and push codes to the new branch in the Apache repo. Follow the release process to prepare a new release. Remember to confirm with the committer team that it is the right time to create the release.  The PMC hopes that the new committer will take part in the release process as well as release voting, even though their vote will be regarded as +1 no binding. Being familiar with the release process is key to being promoted to the role of PMC member.\nProject Management Committee The Project Management Committee (PMC) member does not have any special rights in code contributions. They simply oversee the project and make sure that it follows the Apache requirements. Its functions include:\n Binding voting for releases and license checks; New committer and PMC member recognition; Identification of branding issues and brand protection; and Responding to questions raised by the ASF board, and taking necessary actions.  The V.P. and chair of the PMC is the secretary, who is responsible for initializing the board report.\nIn most cases, a new PMC member is nominated from the committer team. But it is also possible to become a PMC member directly, so long as the PMC agrees to the nomination and is confident that the candidate is ready. For instance, this can be demonstrated by the fact that he/she has been an Apache member, an Apache officer, or a PMC member of another project.\nThe new PMC voting process should also follow the [DISCUSS], [VOTE] and [RESULT][VOTE] procedures using a private mail list, just like the voting process for new committers. Before sending the invitation, the PMC must also send a NOTICE mail to the Apache board.\nTo: board@apache.org Cc: private@skywalking.apache.org Subject: [NOTICE] Jane Doe for SkyWalking PMC SkyWalking proposes to invite Jane Doe (janedoe) to join the PMC. (include if a vote was held) The vote result is available here: https://lists.apache.org/... After 72 hours, if the board doesn\u0026rsquo;t object to the nomination (which it won\u0026rsquo;t most cases), an invitation may then be sent to the candidate.\nOnce the invitation is accepted, a PMC member should add the new member to the official PMC list through roster.\n","title":"Apache SkyWalking committer","url":"/docs/main/v9.6.0/en/guides/asf/committer/"},{"content":"Apache SkyWalking committer SkyWalking Project Management Committee (PMC) is responsible for assessing the contributions of candidates.\nLike many Apache projects, SkyWalking welcome all contributions, including code contributions, blog entries, guides for new users, public speeches, and enhancement of the project in various ways.\nCommitter Nominate new committer In SkyWalking, new committer nomination could only be officially started by existing PMC members. If a new committer feels that he/she is qualified, he/she should contact any existing PMC member and discuss. If this is agreed among some members of the PMC, the process will kick off.\nThe following steps are recommended (to be initiated only by an existing PMC member):\n Send an email titled [DISCUSS] Promote xxx as new committer to private@skywalking.a.o. List the important contributions of the candidate, so you could gather support from other PMC members for your proposal. Keep the discussion open for more than 3 days but no more than 1 week, unless there is any express objection or concern. If the PMC generally agrees to the proposal, send an email titled [VOTE] Promote xxx as new committer to private@skywalking.a.o. Keep the voting process open for more than 3 days, but no more than 1 week. Consider the result as Consensus Approval if there are three +1 votes and +1 votes \u0026gt; -1 votes. Send an email titled [RESULT][VOTE] Promote xxx as new committer to private@skywalking.a.o, and list the voting details, including who the voters are.  Invite new committer The PMC member who starts the promotion is responsible for sending an invitation to the new committer and guiding him/her to set up the ASF env.\nThe PMC member should send an email using the following template to the new committer:\nTo: JoeBloggs@foo.net Cc: private@skywalking.apache.org Subject: Invitation to become SkyWalking committer: Joe Bloggs Hello [invitee name], The SkyWalking Project Management Committee] (PMC) hereby offers you committer privileges to the project. These privileges are offered on the understanding that you'll use them reasonably and with common sense. We like to work on trust rather than unnecessary constraints. Being a committer enables you to more easily make changes without needing to go through the patch submission process. Being a committer does not require you to participate any more than you already do. It does tend to make one even more committed. You will probably find that you spend more time here. Of course, you can decline and instead remain as a contributor, participating as you do now. A. This personal invitation is a chance for you to accept or decline in private. Either way, please let us know in reply to the [private@skywalking.apache.org] address only. B. If you accept, the next step is to register an iCLA: 1. Details of the iCLA and the forms are found through this link: http://www.apache.org/licenses/#clas 2. Instructions for its completion and return to the Secretary of the ASF are found at http://www.apache.org/licenses/#submitting 3. When you transmit the completed iCLA, request to notify the Apache SkyWalking and choose a unique Apache id. Look to see if your preferred id is already taken at http://people.apache.org/committer-index.html This will allow the Secretary to notify the PMC when your iCLA has been recorded. When recording of your iCLA is noticed, you will receive a follow-up message with the next steps for establishing you as a committer. Invitation acceptance process The new committer should reply to private@skywalking.apache.org (choose reply all), and express his/her intention to accept the invitation. Then, this invitation will be treated as accepted by the project\u0026rsquo;s PMC. Of course, the new committer may also choose to decline the invitation.\nOnce the invitation has been accepted, the new committer has to take the following steps:\n Subscribe to dev@skywalking.apache.org. Usually this is already done. Choose a Apache ID that is not on the apache committers list page. Download the ICLA (If the new committer contributes to the project as a day job, CCLA is expected). After filling in the icla.pdf (or ccla.pdf) with the correct information, print, sign it by hand, scan it as an PDF, and send it as an attachment to secretary@apache.org. (If electronic signature is preferred, please follow the steps on this page) The PMC will wait for the Apache secretary to confirm the ICLA (or CCLA) filed. The new committer and PMC will receive the following email:  Dear XXX, This message acknowledges receipt of your ICLA, which has been filed in the Apache Software Foundation records. Your account has been requested for you and you should receive email with next steps within the next few days (can take up to a week). Please refer to https://www.apache.org/foundation/how-it-works.html#developers for more information about roles at Apache. In the unlikely event that the account has not yet been requested, the PMC member should contact the project V.P.. The V.P. could request through the Apache Account Submission Helper Form.\nAfter several days, the new committer will receive an email confirming creation of the account, titled Welcome to the Apache Software Foundation (ASF)!. Congratulations! The new committer now has an official Apache ID.\nThe PMC member should add the new committer to the official committer list through roster.\nSet up the Apache ID and dev env  Go to Apache Account Utility Platform, create your password, set up your personal mailbox (Forwarding email address) and GitHub account(Your GitHub Username). An organizational invite will be sent to you via email shortly thereafter (within 2 hours). If you would like to use the xxx@apache.org email service, please refer to here. Gmail is recommended, because this forwarding mode is not easy to find in most mailbox service settings. Follow the authorized GitHub 2FA wiki to enable two-factor authorization (2FA) on Github. When you set 2FA to \u0026ldquo;off\u0026rdquo;, it will be delisted by the corresponding Apache committer write permission group until you set it up again. (NOTE: Treat your recovery codes with the same level of attention as you would your password!) Use GitBox Account Linking Utility to obtain write permission of the SkyWalking project. Follow this doc to update the website.  If you would like to show up publicly in the Apache GitHub org, you need to go to the Apache GitHub org people page, search for yourself, and choose Organization visibility to Public.\nCommitter rights, duties, and responsibilities The SkyWalking project doesn\u0026rsquo;t require continuing contributions from you after you have become a committer, but we truly hope that you will continue to play a part in our community!\nAs a committer, you could\n Review and merge the pull request to the master branch in the Apache repo. A pull request often contains multiple commits. Those commits must be squashed and merged into a single commit with explanatory comments. It is recommended for new committers to request recheck of the pull request from senior committers. Create and push codes to the new branch in the Apache repo. Follow the release process to prepare a new release. Remember to confirm with the committer team that it is the right time to create the release.  The PMC hopes that the new committer will take part in the release process as well as release voting, even though their vote will be regarded as +1 no binding. Being familiar with the release process is key to being promoted to the role of PMC member.\nProject Management Committee The Project Management Committee (PMC) member does not have any special rights in code contributions. They simply oversee the project and make sure that it follows the Apache requirements. Its functions include:\n Binding voting for releases and license checks; New committer and PMC member recognition; Identification of branding issues and brand protection; and Responding to questions raised by the ASF board, and taking necessary actions.  The V.P. and chair of the PMC is the secretary, who is responsible for initializing the board report.\nIn most cases, a new PMC member is nominated from the committer team. But it is also possible to become a PMC member directly, so long as the PMC agrees to the nomination and is confident that the candidate is ready. For instance, this can be demonstrated by the fact that he/she has been an Apache member, an Apache officer, or a PMC member of another project.\nThe new PMC voting process should also follow the [DISCUSS], [VOTE] and [RESULT][VOTE] procedures using a private mail list, just like the voting process for new committers. Before sending the invitation, the PMC must also send a NOTICE mail to the Apache board.\nTo: board@apache.org Cc: private@skywalking.apache.org Subject: [NOTICE] Jane Doe for SkyWalking PMC SkyWalking proposes to invite Jane Doe (janedoe) to join the PMC. (include if a vote was held) The vote result is available here: https://lists.apache.org/... After 72 hours, if the board doesn\u0026rsquo;t object to the nomination (which it won\u0026rsquo;t most cases), an invitation may then be sent to the candidate.\nOnce the invitation is accepted, a PMC member should add the new member to the official PMC list through roster.\n","title":"Apache SkyWalking committer","url":"/docs/main/v9.7.0/en/guides/asf/committer/"},{"content":"Apache SkyWalking Go Release Guide This documentation guides the release manager to release the SkyWalking Go in the Apache Way, and also helps people to check the release for vote.\nPrerequisites  Close(if finished, or move to next milestone otherwise) all issues in the current milestone from skywalking-go and skywalking, create a new milestone if needed. Update CHANGES.md. Check the dependency licenses including all dependencies.  Add your GPG public key to Apache svn   Upload your GPG public key to a public GPG site, such as MIT\u0026rsquo;s site.\n  Log in id.apache.org and submit your key fingerprint.\n  Add your GPG public key into SkyWalking GPG KEYS file, you can do this only if you are a PMC member. You can ask a PMC member for help. DO NOT override the existed KEYS file content, only append your key at the end of the file.\n  Build and sign the source code package export VERSION=\u0026lt;the version to release\u0026gt; git clone git@github.com:apache/skywalking-go \u0026amp;\u0026amp; cd skywalking-go git tag -a \u0026#34;v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking-Go v$VERSION\u0026#34; git tag -a \u0026#34;toolkit/v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking-Go Toolkit v$VERSION\u0026#34; git push --tags make release In total, six files should be automatically generated in the directory: apache-skywalking-go-${VERSION}-bin.tgz, apache-skywalking-go-${VERSION}-src.tgz, and their corresponding asc, sha512 files.\nUpload to Apache svn svn co https://dist.apache.org/repos/dist/dev/skywalking/ mkdir -p skywalking/go/\u0026#34;$VERSION\u0026#34; cp skywalking-go/apache-skywalking*.tgz skywalking/go/\u0026#34;$VERSION\u0026#34; cp skywalking-go/apache-skywalking*.tgz.asc skywalking/go/\u0026#34;$VERSION\u0026#34; cp skywalking-go/apache-skywalking*.tgz.sha512 skywalking/go/\u0026#34;$VERSION\u0026#34; cd skywalking/go \u0026amp;\u0026amp; svn add \u0026#34;$VERSION\u0026#34; \u0026amp;\u0026amp; svn commit -m \u0026#34;Draft Apache SkyWalking-Go release $VERSION\u0026#34; Call for vote in dev@ mailing list Call for vote in dev@skywalking.apache.org, please check all links before sending the email.\nSubject: [VOTE] Release Apache SkyWalking Go version $VERSION Content: Hi the SkyWalking Community: This is a call for vote to release Apache SkyWalking Go version $VERSION. Release notes: * https://github.com/apache/skywalking-go/blob/v$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/go/$VERSION * sha512 checksums - sha512xxxxyyyzzz skywalking-go-x.x.x-src.tgz - sha512xxxxyyyzzz skywalking-go-x.x.x-bin.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-go/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-go/blob/v$VERSION/docs/en/development-and-contribution/how-to-release.md Voting will start now and will remain open for at least 72 hours, all PMC members are required to give their votes. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Thanks. [1] https://github.com/apache/skywalking/blob/master/docs/en/guides/How-to-release.md#vote-check Vote Check All PMC members and committers should check these before voting +1:\n Features test. All artifacts in staging repository are published with .asc, .md5, and sha files. Source codes and distribution packages (skywalking-go-$VERSION-{src,bin}.tgz) are in https://dist.apache.org/repos/dist/dev/skywalking/go/$VERSION with .asc, .sha512. LICENSE and NOTICE are in source codes and distribution package. Check shasum -c skywalking-go-$VERSION-{src,bin}.tgz.sha512. Check gpg --verify skywalking-go-$VERSION-{src,bin}.tgz.asc skywalking-go-$VERSION-{src,bin}.tgz. Build distribution from source code package by following this command, make build.  Vote result should follow these:\n  PMC vote is +1 binding, all others is +1 no binding.\n  Within 72 hours, you get at least 3 (+1 binding), and have more +1 than -1. Vote pass.\n  Send the closing vote mail to announce the result. When count the binding and no binding votes, please list the names of voters. An example like this:\n[RESULT][VOTE] Release Apache SkyWalking Go version $VERSION 3 days passed, we’ve got ($NUMBER) +1 bindings (and ... +1 non-bindings): (list names) +1 bindings: xxx ... +1 non-bindings: xxx ... Thank you for voting, I’ll continue the release process.   Publish release   Move source codes tar balls and distributions to https://dist.apache.org/repos/dist/release/skywalking/, you can do this only if you are a PMC member.\nexport SVN_EDITOR=vim svn mv https://dist.apache.org/repos/dist/dev/skywalking/go/$VERSION https://dist.apache.org/repos/dist/release/skywalking/go   Refer to the previous PR, update the event and download links on the website.\n  Update Github release page, follow the previous convention.\n  Send ANNOUNCE email to dev@skywalking.apache.org and announce@apache.org, the sender should use his/her Apache email account, please check all links before sending the email.\nSubject: [ANNOUNCEMENT] Apache SkyWalking Go $VERSION Released Content: Hi the SkyWalking Community On behalf of the SkyWalking Team, I’m glad to announce that SkyWalking Go $VERSION is now released. SkyWalking Go: The Golang auto-instrument Agent for Apache SkyWalking, which provides the native tracing/metrics/logging abilities for Golang projects. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. Download Links: http://skywalking.apache.org/downloads/ Release Notes : https://github.com/apache/skywalking-go/blob/v$VERSION/CHANGES.md Website: http://skywalking.apache.org/ SkyWalking Go Resources: - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalking.apache.org - Documents: https://github.com/apache/skywalking-go/blob/v$VERSION/README.md The Apache SkyWalking Team   Remove Unnecessary Releases Please remember to remove all unnecessary releases in the mirror svn (https://dist.apache.org/repos/dist/release/skywalking/), if you don\u0026rsquo;t recommend users to choose those version. For example, you have removed the download and documentation links from the website. If they want old ones, the Archive repository has all of them.\n","title":"Apache SkyWalking Go Release Guide","url":"/docs/skywalking-go/latest/en/development-and-contribution/how-to-release/"},{"content":"Apache SkyWalking Go Release Guide This documentation guides the release manager to release the SkyWalking Go in the Apache Way, and also helps people to check the release for vote.\nPrerequisites  Close(if finished, or move to next milestone otherwise) all issues in the current milestone from skywalking-go and skywalking, create a new milestone if needed. Update CHANGES.md. Check the dependency licenses including all dependencies.  Add your GPG public key to Apache svn   Upload your GPG public key to a public GPG site, such as MIT\u0026rsquo;s site.\n  Log in id.apache.org and submit your key fingerprint.\n  Add your GPG public key into SkyWalking GPG KEYS file, you can do this only if you are a PMC member. You can ask a PMC member for help. DO NOT override the existed KEYS file content, only append your key at the end of the file.\n  Build and sign the source code package export VERSION=\u0026lt;the version to release\u0026gt; git clone git@github.com:apache/skywalking-go \u0026amp;\u0026amp; cd skywalking-go git tag -a \u0026#34;v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking-Go v$VERSION\u0026#34; git tag -a \u0026#34;toolkit/v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking-Go Toolkit v$VERSION\u0026#34; git push --tags make release In total, six files should be automatically generated in the directory: apache-skywalking-go-${VERSION}-bin.tgz, apache-skywalking-go-${VERSION}-src.tgz, and their corresponding asc, sha512 files.\nUpload to Apache svn svn co https://dist.apache.org/repos/dist/dev/skywalking/ mkdir -p skywalking/go/\u0026#34;$VERSION\u0026#34; cp skywalking-go/apache-skywalking*.tgz skywalking/go/\u0026#34;$VERSION\u0026#34; cp skywalking-go/apache-skywalking*.tgz.asc skywalking/go/\u0026#34;$VERSION\u0026#34; cp skywalking-go/apache-skywalking*.tgz.sha512 skywalking/go/\u0026#34;$VERSION\u0026#34; cd skywalking/go \u0026amp;\u0026amp; svn add \u0026#34;$VERSION\u0026#34; \u0026amp;\u0026amp; svn commit -m \u0026#34;Draft Apache SkyWalking-Go release $VERSION\u0026#34; Call for vote in dev@ mailing list Call for vote in dev@skywalking.apache.org, please check all links before sending the email.\nSubject: [VOTE] Release Apache SkyWalking Go version $VERSION Content: Hi the SkyWalking Community: This is a call for vote to release Apache SkyWalking Go version $VERSION. Release notes: * https://github.com/apache/skywalking-go/blob/v$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/go/$VERSION * sha512 checksums - sha512xxxxyyyzzz skywalking-go-x.x.x-src.tgz - sha512xxxxyyyzzz skywalking-go-x.x.x-bin.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-go/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-go/blob/v$VERSION/docs/en/development-and-contribution/how-to-release.md Voting will start now and will remain open for at least 72 hours, all PMC members are required to give their votes. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Thanks. [1] https://github.com/apache/skywalking/blob/master/docs/en/guides/How-to-release.md#vote-check Vote Check All PMC members and committers should check these before voting +1:\n Features test. All artifacts in staging repository are published with .asc, .md5, and sha files. Source codes and distribution packages (skywalking-go-$VERSION-{src,bin}.tgz) are in https://dist.apache.org/repos/dist/dev/skywalking/go/$VERSION with .asc, .sha512. LICENSE and NOTICE are in source codes and distribution package. Check shasum -c skywalking-go-$VERSION-{src,bin}.tgz.sha512. Check gpg --verify skywalking-go-$VERSION-{src,bin}.tgz.asc skywalking-go-$VERSION-{src,bin}.tgz. Build distribution from source code package by following this command, make build.  Vote result should follow these:\n  PMC vote is +1 binding, all others is +1 no binding.\n  Within 72 hours, you get at least 3 (+1 binding), and have more +1 than -1. Vote pass.\n  Send the closing vote mail to announce the result. When count the binding and no binding votes, please list the names of voters. An example like this:\n[RESULT][VOTE] Release Apache SkyWalking Go version $VERSION 3 days passed, we’ve got ($NUMBER) +1 bindings (and ... +1 non-bindings): (list names) +1 bindings: xxx ... +1 non-bindings: xxx ... Thank you for voting, I’ll continue the release process.   Publish release   Move source codes tar balls and distributions to https://dist.apache.org/repos/dist/release/skywalking/, you can do this only if you are a PMC member.\nexport SVN_EDITOR=vim svn mv https://dist.apache.org/repos/dist/dev/skywalking/go/$VERSION https://dist.apache.org/repos/dist/release/skywalking/go   Refer to the previous PR, update the event and download links on the website.\n  Update Github release page, follow the previous convention.\n  Send ANNOUNCE email to dev@skywalking.apache.org and announce@apache.org, the sender should use his/her Apache email account, please check all links before sending the email.\nSubject: [ANNOUNCEMENT] Apache SkyWalking Go $VERSION Released Content: Hi the SkyWalking Community On behalf of the SkyWalking Team, I’m glad to announce that SkyWalking Go $VERSION is now released. SkyWalking Go: The Golang auto-instrument Agent for Apache SkyWalking, which provides the native tracing/metrics/logging abilities for Golang projects. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. Download Links: http://skywalking.apache.org/downloads/ Release Notes : https://github.com/apache/skywalking-go/blob/v$VERSION/CHANGES.md Website: http://skywalking.apache.org/ SkyWalking Go Resources: - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalking.apache.org - Documents: https://github.com/apache/skywalking-go/blob/v$VERSION/README.md The Apache SkyWalking Team   Remove Unnecessary Releases Please remember to remove all unnecessary releases in the mirror svn (https://dist.apache.org/repos/dist/release/skywalking/), if you don\u0026rsquo;t recommend users to choose those version. For example, you have removed the download and documentation links from the website. If they want old ones, the Archive repository has all of them.\n","title":"Apache SkyWalking Go Release Guide","url":"/docs/skywalking-go/next/en/development-and-contribution/how-to-release/"},{"content":"Apache SkyWalking Go Release Guide This documentation guides the release manager to release the SkyWalking Go in the Apache Way, and also helps people to check the release for vote.\nPrerequisites  Close(if finished, or move to next milestone otherwise) all issues in the current milestone from skywalking-go and skywalking, create a new milestone if needed. Update CHANGES.md. Check the dependency licenses including all dependencies.  Add your GPG public key to Apache svn   Upload your GPG public key to a public GPG site, such as MIT\u0026rsquo;s site.\n  Log in id.apache.org and submit your key fingerprint.\n  Add your GPG public key into SkyWalking GPG KEYS file, you can do this only if you are a PMC member. You can ask a PMC member for help. DO NOT override the existed KEYS file content, only append your key at the end of the file.\n  Build and sign the source code package export VERSION=\u0026lt;the version to release\u0026gt; git clone git@github.com:apache/skywalking-go \u0026amp;\u0026amp; cd skywalking-go git tag -a \u0026#34;v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking-Go v$VERSION\u0026#34; git tag -a \u0026#34;toolkit/v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking-Go Toolkit v$VERSION\u0026#34; git push --tags make release In total, six files should be automatically generated in the directory: apache-skywalking-go-${VERSION}-bin.tgz, apache-skywalking-go-${VERSION}-src.tgz, and their corresponding asc, sha512 files.\nUpload to Apache svn svn co https://dist.apache.org/repos/dist/dev/skywalking/ mkdir -p skywalking/go/\u0026#34;$VERSION\u0026#34; cp skywalking-go/apache-skywalking*.tgz skywalking/go/\u0026#34;$VERSION\u0026#34; cp skywalking-go/apache-skywalking*.tgz.asc skywalking/go/\u0026#34;$VERSION\u0026#34; cp skywalking-go/apache-skywalking*.tgz.sha512 skywalking/go/\u0026#34;$VERSION\u0026#34; cd skywalking/go \u0026amp;\u0026amp; svn add \u0026#34;$VERSION\u0026#34; \u0026amp;\u0026amp; svn commit -m \u0026#34;Draft Apache SkyWalking-Go release $VERSION\u0026#34; Call for vote in dev@ mailing list Call for vote in dev@skywalking.apache.org, please check all links before sending the email.\nSubject: [VOTE] Release Apache SkyWalking Go version $VERSION Content: Hi the SkyWalking Community: This is a call for vote to release Apache SkyWalking Go version $VERSION. Release notes: * https://github.com/apache/skywalking-go/blob/v$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/go/$VERSION * sha512 checksums - sha512xxxxyyyzzz skywalking-go-x.x.x-src.tgz - sha512xxxxyyyzzz skywalking-go-x.x.x-bin.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-go/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-go/blob/v$VERSION/docs/en/development-and-contribution/how-to-release.md Voting will start now and will remain open for at least 72 hours, all PMC members are required to give their votes. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Thanks. [1] https://github.com/apache/skywalking/blob/master/docs/en/guides/How-to-release.md#vote-check Vote Check All PMC members and committers should check these before voting +1:\n Features test. All artifacts in staging repository are published with .asc, .md5, and sha files. Source codes and distribution packages (skywalking-go-$VERSION-{src,bin}.tgz) are in https://dist.apache.org/repos/dist/dev/skywalking/go/$VERSION with .asc, .sha512. LICENSE and NOTICE are in source codes and distribution package. Check shasum -c skywalking-go-$VERSION-{src,bin}.tgz.sha512. Check gpg --verify skywalking-go-$VERSION-{src,bin}.tgz.asc skywalking-go-$VERSION-{src,bin}.tgz. Build distribution from source code package by following this command, make build.  Vote result should follow these:\n  PMC vote is +1 binding, all others is +1 no binding.\n  Within 72 hours, you get at least 3 (+1 binding), and have more +1 than -1. Vote pass.\n  Send the closing vote mail to announce the result. When count the binding and no binding votes, please list the names of voters. An example like this:\n[RESULT][VOTE] Release Apache SkyWalking Go version $VERSION 3 days passed, we’ve got ($NUMBER) +1 bindings (and ... +1 non-bindings): (list names) +1 bindings: xxx ... +1 non-bindings: xxx ... Thank you for voting, I’ll continue the release process.   Publish release   Move source codes tar balls and distributions to https://dist.apache.org/repos/dist/release/skywalking/, you can do this only if you are a PMC member.\nexport SVN_EDITOR=vim svn mv https://dist.apache.org/repos/dist/dev/skywalking/go/$VERSION https://dist.apache.org/repos/dist/release/skywalking/go   Refer to the previous PR, update the event and download links on the website.\n  Update Github release page, follow the previous convention.\n  Send ANNOUNCE email to dev@skywalking.apache.org and announce@apache.org, the sender should use his/her Apache email account, please check all links before sending the email.\nSubject: [ANNOUNCEMENT] Apache SkyWalking Go $VERSION Released Content: Hi the SkyWalking Community On behalf of the SkyWalking Team, I’m glad to announce that SkyWalking Go $VERSION is now released. SkyWalking Go: The Golang auto-instrument Agent for Apache SkyWalking, which provides the native tracing/metrics/logging abilities for Golang projects. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. Download Links: http://skywalking.apache.org/downloads/ Release Notes : https://github.com/apache/skywalking-go/blob/v$VERSION/CHANGES.md Website: http://skywalking.apache.org/ SkyWalking Go Resources: - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalking.apache.org - Documents: https://github.com/apache/skywalking-go/blob/v$VERSION/README.md The Apache SkyWalking Team   Remove Unnecessary Releases Please remember to remove all unnecessary releases in the mirror svn (https://dist.apache.org/repos/dist/release/skywalking/), if you don\u0026rsquo;t recommend users to choose those version. For example, you have removed the download and documentation links from the website. If they want old ones, the Archive repository has all of them.\n","title":"Apache SkyWalking Go Release Guide","url":"/docs/skywalking-go/v0.4.0/en/development-and-contribution/how-to-release/"},{"content":"Apache SkyWalking Infra E2E Release Guide This documentation guides the release manager to release the SkyWalking Infra E2E in the Apache Way, and also helps people to check the release for voting.\nPrerequisites  Close (if finished, or move to next milestone otherwise) all issues in the current milestone from skywalking-infra-e2e and skywalking, create a new milestone if needed. Update CHANGES.md.  Add your GPG public key to Apache svn   Upload your GPG public key to a public GPG site, such as MIT\u0026rsquo;s site.\n  Log in id.apache.org and submit your key fingerprint.\n  Add your GPG public key into SkyWalking GPG KEYS file, you can do this only if you are a PMC member. You can ask a PMC member for help. DO NOT override the existed KEYS file content, only append your key at the end of the file.\n  Build and sign the source code package export VERSION=\u0026lt;the version to release\u0026gt; git clone --recurse-submodules git@github.com:apache/skywalking-infra-e2e.git \u0026amp;\u0026amp; cd skywalking-infra-e2e git tag -a \u0026#34;v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking-Infra-E2E $VERSION\u0026#34; git push --tags make clean make test # this is optional, it runs sanity checks to verify the features make release Upload to Apache svn svn co https://dist.apache.org/repos/dist/dev/skywalking/infra-e2e release/skywalking/infra-e2e mkdir -p release/skywalking/infra-e2e/\u0026#34;$VERSION\u0026#34; cp skywalking-infra-e2e/skywalking*.tgz release/skywalking/infra-e2e/\u0026#34;$VERSION\u0026#34; cp skywalking-infra-e2e/skywalking*.tgz.asc release/skywalking/infra-e2e/\u0026#34;$VERSION\u0026#34; cp skywalking-infra-e2e/skywalking*.tgz.sha512 release/skywalking/infra-e2e/\u0026#34;$VERSION\u0026#34; cd release/skywalking \u0026amp;\u0026amp; svn add infra-e2e/$VERSION \u0026amp;\u0026amp; svn commit infra-e2e -m \u0026#34;Draft Apache SkyWalking-Infra-E2E release $VERSION\u0026#34; Call for vote in dev@ mailing list Call for vote in dev@skywalking.apache.org.\nSubject: [VOTE] Release Apache SkyWalking Infra E2E version $VERSION Content: Hi the SkyWalking Community: This is a call for vote to release Apache SkyWalking Infra E2E version $VERSION. Release notes: * https://github.com/apache/skywalking-infra-e2e/blob/v$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/infra-e2e/$VERSION * sha512 checksums - sha512xxxxyyyzzz skywalking-e2e-$VERSION-bin.tgz - sha512xxxxyyyzzz skywalking-e2e-$VERSION-src.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-infra-e2e/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-infra-e2e/blob/main/docs/en/contribution/Release-Guidance.md Voting will start now and will remain open for at least 72 hours, all PMC members are required to give their votes. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Thanks. [1] https://github.com/apache/skywalking/blob/master/docs/en/guides/How-to-release.md#vote-check Vote Check All PMC members and committers should check these before voting +1:\n Features test. All artifacts in staging repository are published with .asc, and sha files. Source codes and distribution packages (skywalking-e2e-$VERSION-src.tgz) are in https://dist.apache.org/repos/dist/dev/skywalking/infra-e2e/$VERSION with .asc, .sha512. LICENSE and NOTICE are in source codes and distribution package. Check shasum -c skywalking-e2e-$VERSION-src.tgz.sha512. Check gpg --verify skywalking-e2e-$VERSION-src.tgz.asc skywalking-e2e-$VERSION-src.tgz. Build distribution from source code package by following this the build guide.  Vote result should follow these:\n  PMC vote is +1 binding, all others is +1 no binding.\n  Within 72 hours, you get at least 3 (+1 binding), and have more +1 than -1. Vote pass.\n  Send the closing vote mail to announce the result. When count the binding and no binding votes, please list the names of voters. An example like this:\n[RESULT][VOTE] Release Apache SkyWalking Infra E2E version $VERSION 72+ hours passed, we’ve got ($NUMBER) +1 bindings (and ... +1 non-bindings): (list names) +1 bindings: xxx ... +1 non-bindings: xxx ... Thank you for voting, I’ll continue the release process.   Publish release   Move source codes tar balls and distributions to https://dist.apache.org/repos/dist/release/skywalking/, you can do this only if you are a PMC member.\nsvn mv https://dist.apache.org/repos/dist/dev/skywalking/infra-e2e/\u0026#34;$VERSION\u0026#34; https://dist.apache.org/repos/dist/release/skywalking/infra-e2e/\u0026#34;$VERSION\u0026#34;   Refer to the previous PR, update news and links on the website. There are several files need to modify.\n  Update Github release page, follow the previous convention.\n  Send ANNOUNCE email to dev@skywalking.apache.org and announce@apache.org, the sender should use his/her Apache email account.\nSubject: [ANNOUNCEMENT] Apache SkyWalking Infra E2E $VERSION Released Content: Hi the SkyWalking Community On behalf of the SkyWalking Team, I’m glad to announce that SkyWalking Infra E2E $VERSION is now released. SkyWalking Infra E2E: An End-to-End Testing framework that aims to help developers to set up, debug, and verify E2E tests with ease. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. Download Links: http://skywalking.apache.org/downloads/ Release Notes : https://github.com/apache/skywalking-infra-e2e/blob/v$VERSION/CHANGES.md Website: http://skywalking.apache.org/ SkyWalking Infra E2E Resources: - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalking.apache.org - Documents: https://github.com/apache/skywalking-infra-e2e/blob/v$VERSION/README.md The Apache SkyWalking Team   ","title":"Apache SkyWalking Infra E2E Release Guide","url":"/docs/skywalking-infra-e2e/latest/en/contribution/release-guidance/"},{"content":"Apache SkyWalking Infra E2E Release Guide This documentation guides the release manager to release the SkyWalking Infra E2E in the Apache Way, and also helps people to check the release for voting.\nPrerequisites  Close (if finished, or move to next milestone otherwise) all issues in the current milestone from skywalking-infra-e2e and skywalking, create a new milestone if needed. Update CHANGES.md.  Add your GPG public key to Apache svn   Upload your GPG public key to a public GPG site, such as MIT\u0026rsquo;s site.\n  Log in id.apache.org and submit your key fingerprint.\n  Add your GPG public key into SkyWalking GPG KEYS file, you can do this only if you are a PMC member. You can ask a PMC member for help. DO NOT override the existed KEYS file content, only append your key at the end of the file.\n  Build and sign the source code package export VERSION=\u0026lt;the version to release\u0026gt; git clone --recurse-submodules git@github.com:apache/skywalking-infra-e2e.git \u0026amp;\u0026amp; cd skywalking-infra-e2e git tag -a \u0026#34;v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking-Infra-E2E $VERSION\u0026#34; git push --tags make clean make test # this is optional, it runs sanity checks to verify the features make release Upload to Apache svn svn co https://dist.apache.org/repos/dist/dev/skywalking/infra-e2e release/skywalking/infra-e2e mkdir -p release/skywalking/infra-e2e/\u0026#34;$VERSION\u0026#34; cp skywalking-infra-e2e/skywalking*.tgz release/skywalking/infra-e2e/\u0026#34;$VERSION\u0026#34; cp skywalking-infra-e2e/skywalking*.tgz.asc release/skywalking/infra-e2e/\u0026#34;$VERSION\u0026#34; cp skywalking-infra-e2e/skywalking*.tgz.sha512 release/skywalking/infra-e2e/\u0026#34;$VERSION\u0026#34; cd release/skywalking \u0026amp;\u0026amp; svn add infra-e2e/$VERSION \u0026amp;\u0026amp; svn commit infra-e2e -m \u0026#34;Draft Apache SkyWalking-Infra-E2E release $VERSION\u0026#34; Call for vote in dev@ mailing list Call for vote in dev@skywalking.apache.org.\nSubject: [VOTE] Release Apache SkyWalking Infra E2E version $VERSION Content: Hi the SkyWalking Community: This is a call for vote to release Apache SkyWalking Infra E2E version $VERSION. Release notes: * https://github.com/apache/skywalking-infra-e2e/blob/v$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/infra-e2e/$VERSION * sha512 checksums - sha512xxxxyyyzzz skywalking-e2e-$VERSION-bin.tgz - sha512xxxxyyyzzz skywalking-e2e-$VERSION-src.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-infra-e2e/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-infra-e2e/blob/main/docs/en/contribution/Release-Guidance.md Voting will start now and will remain open for at least 72 hours, all PMC members are required to give their votes. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Thanks. [1] https://github.com/apache/skywalking/blob/master/docs/en/guides/How-to-release.md#vote-check Vote Check All PMC members and committers should check these before voting +1:\n Features test. All artifacts in staging repository are published with .asc, and sha files. Source codes and distribution packages (skywalking-e2e-$VERSION-src.tgz) are in https://dist.apache.org/repos/dist/dev/skywalking/infra-e2e/$VERSION with .asc, .sha512. LICENSE and NOTICE are in source codes and distribution package. Check shasum -c skywalking-e2e-$VERSION-src.tgz.sha512. Check gpg --verify skywalking-e2e-$VERSION-src.tgz.asc skywalking-e2e-$VERSION-src.tgz. Build distribution from source code package by following this the build guide.  Vote result should follow these:\n  PMC vote is +1 binding, all others is +1 no binding.\n  Within 72 hours, you get at least 3 (+1 binding), and have more +1 than -1. Vote pass.\n  Send the closing vote mail to announce the result. When count the binding and no binding votes, please list the names of voters. An example like this:\n[RESULT][VOTE] Release Apache SkyWalking Infra E2E version $VERSION 72+ hours passed, we’ve got ($NUMBER) +1 bindings (and ... +1 non-bindings): (list names) +1 bindings: xxx ... +1 non-bindings: xxx ... Thank you for voting, I’ll continue the release process.   Publish release   Move source codes tar balls and distributions to https://dist.apache.org/repos/dist/release/skywalking/, you can do this only if you are a PMC member.\nsvn mv https://dist.apache.org/repos/dist/dev/skywalking/infra-e2e/\u0026#34;$VERSION\u0026#34; https://dist.apache.org/repos/dist/release/skywalking/infra-e2e/\u0026#34;$VERSION\u0026#34;   Refer to the previous PR, update news and links on the website. There are several files need to modify.\n  Update Github release page, follow the previous convention.\n  Send ANNOUNCE email to dev@skywalking.apache.org and announce@apache.org, the sender should use his/her Apache email account.\nSubject: [ANNOUNCEMENT] Apache SkyWalking Infra E2E $VERSION Released Content: Hi the SkyWalking Community On behalf of the SkyWalking Team, I’m glad to announce that SkyWalking Infra E2E $VERSION is now released. SkyWalking Infra E2E: An End-to-End Testing framework that aims to help developers to set up, debug, and verify E2E tests with ease. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. Download Links: http://skywalking.apache.org/downloads/ Release Notes : https://github.com/apache/skywalking-infra-e2e/blob/v$VERSION/CHANGES.md Website: http://skywalking.apache.org/ SkyWalking Infra E2E Resources: - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalking.apache.org - Documents: https://github.com/apache/skywalking-infra-e2e/blob/v$VERSION/README.md The Apache SkyWalking Team   ","title":"Apache SkyWalking Infra E2E Release Guide","url":"/docs/skywalking-infra-e2e/next/en/contribution/release-guidance/"},{"content":"Apache SkyWalking Infra E2E Release Guide This documentation guides the release manager to release the SkyWalking Infra E2E in the Apache Way, and also helps people to check the release for voting.\nPrerequisites  Close (if finished, or move to next milestone otherwise) all issues in the current milestone from skywalking-infra-e2e and skywalking, create a new milestone if needed. Update CHANGES.md.  Add your GPG public key to Apache svn   Upload your GPG public key to a public GPG site, such as MIT\u0026rsquo;s site.\n  Log in id.apache.org and submit your key fingerprint.\n  Add your GPG public key into SkyWalking GPG KEYS file, you can do this only if you are a PMC member. You can ask a PMC member for help. DO NOT override the existed KEYS file content, only append your key at the end of the file.\n  Build and sign the source code package export VERSION=\u0026lt;the version to release\u0026gt; git clone --recurse-submodules git@github.com:apache/skywalking-infra-e2e.git \u0026amp;\u0026amp; cd skywalking-infra-e2e git tag -a \u0026#34;v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking-Infra-E2E $VERSION\u0026#34; git push --tags make clean make test # this is optional, it runs sanity checks to verify the features make release Upload to Apache svn svn co https://dist.apache.org/repos/dist/dev/skywalking/infra-e2e release/skywalking/infra-e2e mkdir -p release/skywalking/infra-e2e/\u0026#34;$VERSION\u0026#34; cp skywalking-infra-e2e/skywalking*.tgz release/skywalking/infra-e2e/\u0026#34;$VERSION\u0026#34; cp skywalking-infra-e2e/skywalking*.tgz.asc release/skywalking/infra-e2e/\u0026#34;$VERSION\u0026#34; cp skywalking-infra-e2e/skywalking*.tgz.sha512 release/skywalking/infra-e2e/\u0026#34;$VERSION\u0026#34; cd release/skywalking \u0026amp;\u0026amp; svn add infra-e2e/$VERSION \u0026amp;\u0026amp; svn commit infra-e2e -m \u0026#34;Draft Apache SkyWalking-Infra-E2E release $VERSION\u0026#34; Call for vote in dev@ mailing list Call for vote in dev@skywalking.apache.org.\nSubject: [VOTE] Release Apache SkyWalking Infra E2E version $VERSION Content: Hi the SkyWalking Community: This is a call for vote to release Apache SkyWalking Infra E2E version $VERSION. Release notes: * https://github.com/apache/skywalking-infra-e2e/blob/v$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/infra-e2e/$VERSION * sha512 checksums - sha512xxxxyyyzzz skywalking-e2e-$VERSION-bin.tgz - sha512xxxxyyyzzz skywalking-e2e-$VERSION-src.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-infra-e2e/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-infra-e2e/blob/main/docs/en/contribution/Release-Guidance.md Voting will start now and will remain open for at least 72 hours, all PMC members are required to give their votes. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Thanks. [1] https://github.com/apache/skywalking/blob/master/docs/en/guides/How-to-release.md#vote-check Vote Check All PMC members and committers should check these before voting +1:\n Features test. All artifacts in staging repository are published with .asc, and sha files. Source codes and distribution packages (skywalking-e2e-$VERSION-src.tgz) are in https://dist.apache.org/repos/dist/dev/skywalking/infra-e2e/$VERSION with .asc, .sha512. LICENSE and NOTICE are in source codes and distribution package. Check shasum -c skywalking-e2e-$VERSION-src.tgz.sha512. Check gpg --verify skywalking-e2e-$VERSION-src.tgz.asc skywalking-e2e-$VERSION-src.tgz. Build distribution from source code package by following this the build guide.  Vote result should follow these:\n  PMC vote is +1 binding, all others is +1 no binding.\n  Within 72 hours, you get at least 3 (+1 binding), and have more +1 than -1. Vote pass.\n  Send the closing vote mail to announce the result. When count the binding and no binding votes, please list the names of voters. An example like this:\n[RESULT][VOTE] Release Apache SkyWalking Infra E2E version $VERSION 72+ hours passed, we’ve got ($NUMBER) +1 bindings (and ... +1 non-bindings): (list names) +1 bindings: xxx ... +1 non-bindings: xxx ... Thank you for voting, I’ll continue the release process.   Publish release   Move source codes tar balls and distributions to https://dist.apache.org/repos/dist/release/skywalking/, you can do this only if you are a PMC member.\nsvn mv https://dist.apache.org/repos/dist/dev/skywalking/infra-e2e/\u0026#34;$VERSION\u0026#34; https://dist.apache.org/repos/dist/release/skywalking/infra-e2e/\u0026#34;$VERSION\u0026#34;   Refer to the previous PR, update news and links on the website. There are several files need to modify.\n  Update Github release page, follow the previous convention.\n  Send ANNOUNCE email to dev@skywalking.apache.org and announce@apache.org, the sender should use his/her Apache email account.\nSubject: [ANNOUNCEMENT] Apache SkyWalking Infra E2E $VERSION Released Content: Hi the SkyWalking Community On behalf of the SkyWalking Team, I’m glad to announce that SkyWalking Infra E2E $VERSION is now released. SkyWalking Infra E2E: An End-to-End Testing framework that aims to help developers to set up, debug, and verify E2E tests with ease. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. Download Links: http://skywalking.apache.org/downloads/ Release Notes : https://github.com/apache/skywalking-infra-e2e/blob/v$VERSION/CHANGES.md Website: http://skywalking.apache.org/ SkyWalking Infra E2E Resources: - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalking.apache.org - Documents: https://github.com/apache/skywalking-infra-e2e/blob/v$VERSION/README.md The Apache SkyWalking Team   ","title":"Apache SkyWalking Infra E2E Release Guide","url":"/docs/skywalking-infra-e2e/v1.3.0/en/contribution/release-guidance/"},{"content":"Apache SkyWalking PHP Agent release guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking SDK in The Apache Way and start the voting process by reading this document.\nRequirements  Rust(rustc) Cargo PHP(php, php-config) Pecl GPG shasum  Add your GPG public key  Add your GPG public key into the SkyWalking GPG KEYS file. If you are a committer, use your Apache ID and password to log in this svn, and update the file. Don\u0026rsquo;t override the existing file.(Notice, only PMC member could update this file) Upload your GPG public key to the public GPG site, such as MIT\u0026rsquo;s site. This site should be in the Apache maven staging repository checklist.  Draft a new release Open Create a new release page, choose the tag, and click the Generate release notes button, then copy the generated text to local /tmp/notes.txt.\nTest your settings and package ## Make sure local compiling passed \u0026gt; cargo build ## Create package.xml from package.xml.tpl \u0026gt; cargo run -p scripts --release -- create-package-xml --version x.y.z --notes \u0026#34;`cat /tmp/notes.txt`\u0026#34; ## Create local package. The skywalking_agent-x.y.z.tgz should be found in project root \u0026gt; pecl package Sign the package Tag the commit ID of this release as vx.y.z.\nAfter set the version in Cargo.toml with the release number, package locally. Then run the following commands to sign your package.\n\u0026gt; export RELEASE_VERSION=x.y.z ## The package should be signed by your Apache committer mail. \u0026gt; gpg --armor --detach-sig skywalking_agent-$RELEASE_VERSION.tgz \u0026gt; shasum -a 512 skywalking_agent-$RELEASE_VERSION.tgz \u0026gt; skywalking_agent-$RELEASE_VERSION.tgz.sha512 After these, the source tar with its signed asc and sha512 are ready.\nUpload to Apache SVN and tag a release  Use your Apache ID to log in to https://dist.apache.org/repos/dist/dev/skywalking/php. Create a folder and name it by the release version and round, such as: x.y.z Upload tar ball, asc, sha512 files to the new folder.  Call a vote in dev Call a vote in dev@skywalking.apache.org\nMail title: [VOTE] Release Apache SkyWalking PHP version x.y.z Mail content: Hi All, This is a call for vote to release Apache SkyWalking PHP version x.y.z. Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/php/x.y.z/ * sha512 checksums - xxxxxxxx skywalking_agent-x.y.z.tgz Release Tag : * (Git Tag) vx.y.z Release CommitID : * https://github.com/apache/skywalking-php/tree/{commit-id} Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-php/blob/master/docs/en/contribution/compiling.md Voting will start now (Date) and will remain open for at least 72 hours, Request all PMC members to give their vote. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Vote Check The voting process is as follows:\n All PMC member votes are +1 binding, and all other votes are +1 but non-binding. If you obtain at least 3 (+1 binding) votes with more +1 than -1 votes within 72 hours, the release will be approved.  Publish the release   Move source codes tar and distribution packages to https://dist.apache.org/repos/dist/release/skywalking/.\n\u0026gt; export SVN_EDITOR=vim \u0026gt; svn mv https://dist.apache.org/repos/dist/dev/skywalking/php/x.y.z https://dist.apache.org/repos/dist/release/skywalking/php .... enter your apache password ....   Pecl publish package on skywalking_agent.\nMake sure you have a PECL account, and list in package.tpl.xml as \u0026lt;developer\u0026gt;, or reach private@skywalking.apache.org if you are a committer/PMC but not listed.\nYou can request a PECL account via https://pecl.php.net/account-request.php.\n  Add an release event, update download and doc releases on the SkyWalking website.\n  Add the new release on ASF addrelease site.\n  Remove the old releases on https://dist.apache.org/repos/dist/release/skywalking/php/{previous-version}.\n  Send a release announcement Send ANNOUNCE email to dev@skywalking.apache.org, announce@apache.org. The sender should use the Apache email account.\nMail title: [ANNOUNCE] Apache SkyWalking PHP x.y.z released Mail content: Hi all, SkyWalking PHP Agent provides the native tracing abilities for PHP project. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. This release contains a number of new features, bug fixes and improvements compared to version a.b.c(last release). The notable changes since x.y.z include: (Highlight key changes) 1. ... 2. ... 3. ... Apache SkyWalking website: http://skywalking.apache.org/ Downloads: http://skywalking.apache.org/downloads/ Twitter: https://twitter.com/ASFSkyWalking SkyWalking Resources: - GitHub: https://github.com/apache/skywalking - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Apache SkyWalking Team ","title":"Apache SkyWalking PHP Agent release guide","url":"/docs/skywalking-php/latest/en/contribution/release-agent/"},{"content":"Apache SkyWalking PHP Agent release guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking SDK in The Apache Way and start the voting process by reading this document.\nRequirements  Rust(rustc) Cargo PHP(php, php-config) Pecl GPG shasum  Add your GPG public key  Add your GPG public key into the SkyWalking GPG KEYS file. If you are a committer, use your Apache ID and password to log in this svn, and update the file. Don\u0026rsquo;t override the existing file.(Notice, only PMC member could update this file) Upload your GPG public key to the public GPG site, such as MIT\u0026rsquo;s site. This site should be in the Apache maven staging repository checklist.  Draft a new release Open Create a new release page, choose the tag, and click the Generate release notes button, then copy the generated text to local /tmp/notes.txt.\nTest your settings and package ## Make sure local compiling passed \u0026gt; cargo build ## Create package.xml from package.xml.tpl \u0026gt; cargo run -p scripts --release -- create-package-xml --version x.y.z --notes \u0026#34;`cat /tmp/notes.txt`\u0026#34; ## Create local package. The skywalking_agent-x.y.z.tgz should be found in project root \u0026gt; pecl package Sign the package Tag the commit ID of this release as vx.y.z.\nAfter set the version in Cargo.toml with the release number, package locally. Then run the following commands to sign your package.\n\u0026gt; export RELEASE_VERSION=x.y.z ## The package should be signed by your Apache committer mail. \u0026gt; gpg --armor --detach-sig skywalking_agent-$RELEASE_VERSION.tgz \u0026gt; shasum -a 512 skywalking_agent-$RELEASE_VERSION.tgz \u0026gt; skywalking_agent-$RELEASE_VERSION.tgz.sha512 After these, the source tar with its signed asc and sha512 are ready.\nUpload to Apache SVN and tag a release  Use your Apache ID to log in to https://dist.apache.org/repos/dist/dev/skywalking/php. Create a folder and name it by the release version and round, such as: x.y.z Upload tar ball, asc, sha512 files to the new folder.  Call a vote in dev Call a vote in dev@skywalking.apache.org\nMail title: [VOTE] Release Apache SkyWalking PHP version x.y.z Mail content: Hi All, This is a call for vote to release Apache SkyWalking PHP version x.y.z. Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/php/x.y.z/ * sha512 checksums - xxxxxxxx skywalking_agent-x.y.z.tgz Release Tag : * (Git Tag) vx.y.z Release CommitID : * https://github.com/apache/skywalking-php/tree/{commit-id} Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-php/blob/master/docs/en/contribution/compiling.md Voting will start now (Date) and will remain open for at least 72 hours, Request all PMC members to give their vote. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Vote Check The voting process is as follows:\n All PMC member votes are +1 binding, and all other votes are +1 but non-binding. If you obtain at least 3 (+1 binding) votes with more +1 than -1 votes within 72 hours, the release will be approved.  Publish the release   Move source codes tar and distribution packages to https://dist.apache.org/repos/dist/release/skywalking/.\n\u0026gt; export SVN_EDITOR=vim \u0026gt; svn mv https://dist.apache.org/repos/dist/dev/skywalking/php/x.y.z https://dist.apache.org/repos/dist/release/skywalking/php .... enter your apache password ....   Pecl publish package on skywalking_agent.\nMake sure you have a PECL account, and list in package.tpl.xml as \u0026lt;developer\u0026gt;, or reach private@skywalking.apache.org if you are a committer/PMC but not listed.\nYou can request a PECL account via https://pecl.php.net/account-request.php.\n  Add an release event, update download and doc releases on the SkyWalking website.\n  Add the new release on ASF addrelease site.\n  Remove the old releases on https://dist.apache.org/repos/dist/release/skywalking/php/{previous-version}.\n  Send a release announcement Send ANNOUNCE email to dev@skywalking.apache.org, announce@apache.org. The sender should use the Apache email account.\nMail title: [ANNOUNCE] Apache SkyWalking PHP x.y.z released Mail content: Hi all, SkyWalking PHP Agent provides the native tracing abilities for PHP project. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. This release contains a number of new features, bug fixes and improvements compared to version a.b.c(last release). The notable changes since x.y.z include: (Highlight key changes) 1. ... 2. ... 3. ... Apache SkyWalking website: http://skywalking.apache.org/ Downloads: http://skywalking.apache.org/downloads/ Twitter: https://twitter.com/ASFSkyWalking SkyWalking Resources: - GitHub: https://github.com/apache/skywalking - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Apache SkyWalking Team ","title":"Apache SkyWalking PHP Agent release guide","url":"/docs/skywalking-php/next/en/contribution/release-agent/"},{"content":"Apache SkyWalking PHP Agent release guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking SDK in The Apache Way and start the voting process by reading this document.\nRequirements  Rust(rustc) Cargo PHP(php, php-config) Pecl GPG shasum  Add your GPG public key  Add your GPG public key into the SkyWalking GPG KEYS file. If you are a committer, use your Apache ID and password to log in this svn, and update the file. Don\u0026rsquo;t override the existing file.(Notice, only PMC member could update this file) Upload your GPG public key to the public GPG site, such as MIT\u0026rsquo;s site. This site should be in the Apache maven staging repository checklist.  Draft a new release Open Create a new release page, choose the tag, and click the Generate release notes button, then copy the generated text to local /tmp/notes.txt.\nTest your settings and package ## Make sure local compiling passed \u0026gt; cargo build ## Create package.xml from package.xml.tpl \u0026gt; cargo run -p scripts --release -- create-package-xml --version x.y.z --notes \u0026#34;`cat /tmp/notes.txt`\u0026#34; ## Create local package. The skywalking_agent-x.y.z.tgz should be found in project root \u0026gt; pecl package Sign the package Tag the commit ID of this release as vx.y.z.\nAfter set the version in Cargo.toml with the release number, package locally. Then run the following commands to sign your package.\n\u0026gt; export RELEASE_VERSION=x.y.z ## The package should be signed by your Apache committer mail. \u0026gt; gpg --armor --detach-sig skywalking_agent-$RELEASE_VERSION.tgz \u0026gt; shasum -a 512 skywalking_agent-$RELEASE_VERSION.tgz \u0026gt; skywalking_agent-$RELEASE_VERSION.tgz.sha512 After these, the source tar with its signed asc and sha512 are ready.\nUpload to Apache SVN and tag a release  Use your Apache ID to log in to https://dist.apache.org/repos/dist/dev/skywalking/php. Create a folder and name it by the release version and round, such as: x.y.z Upload tar ball, asc, sha512 files to the new folder.  Call a vote in dev Call a vote in dev@skywalking.apache.org\nMail title: [VOTE] Release Apache SkyWalking PHP version x.y.z Mail content: Hi All, This is a call for vote to release Apache SkyWalking PHP version x.y.z. Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/php/x.y.z/ * sha512 checksums - xxxxxxxx skywalking_agent-x.y.z.tgz Release Tag : * (Git Tag) vx.y.z Release CommitID : * https://github.com/apache/skywalking-php/tree/{commit-id} Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-php/blob/master/docs/en/contribution/compiling.md Voting will start now (Date) and will remain open for at least 72 hours, Request all PMC members to give their vote. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Vote Check The voting process is as follows:\n All PMC member votes are +1 binding, and all other votes are +1 but non-binding. If you obtain at least 3 (+1 binding) votes with more +1 than -1 votes within 72 hours, the release will be approved.  Publish the release   Move source codes tar and distribution packages to https://dist.apache.org/repos/dist/release/skywalking/.\n\u0026gt; export SVN_EDITOR=vim \u0026gt; svn mv https://dist.apache.org/repos/dist/dev/skywalking/php/x.y.z https://dist.apache.org/repos/dist/release/skywalking/php .... enter your apache password ....   Pecl publish package on skywalking_agent.\nMake sure you have a PECL account, and list in package.tpl.xml as \u0026lt;developer\u0026gt;, or reach private@skywalking.apache.org if you are a committer/PMC but not listed.\nYou can request a PECL account via https://pecl.php.net/account-request.php.\n  Add an release event, update download and doc releases on the SkyWalking website.\n  Add the new release on ASF addrelease site.\n  Remove the old releases on https://dist.apache.org/repos/dist/release/skywalking/php/{previous-version}.\n  Send a release announcement Send ANNOUNCE email to dev@skywalking.apache.org, announce@apache.org. The sender should use the Apache email account.\nMail title: [ANNOUNCE] Apache SkyWalking PHP x.y.z released Mail content: Hi all, SkyWalking PHP Agent provides the native tracing abilities for PHP project. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. This release contains a number of new features, bug fixes and improvements compared to version a.b.c(last release). The notable changes since x.y.z include: (Highlight key changes) 1. ... 2. ... 3. ... Apache SkyWalking website: http://skywalking.apache.org/ Downloads: http://skywalking.apache.org/downloads/ Twitter: https://twitter.com/ASFSkyWalking SkyWalking Resources: - GitHub: https://github.com/apache/skywalking - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Apache SkyWalking Team ","title":"Apache SkyWalking PHP Agent release guide","url":"/docs/skywalking-php/v0.7.0/en/contribution/release-agent/"},{"content":"Apache SkyWalking Python Agent dockerfile and images Docker images are not official ASF releases but provided for convenience. Recommended usage is always to build the source\nThis image hosts the SkyWalking Python agent package on top of official Python base images (full \u0026amp; slim) providing support from Python 3.7 - 3.11.\nHow to use this image The images are hosted at Docker Hub and available from the skywalking.docker.scarf.sh endpoint.\nskywalking.docker.scarf.sh/apache/skywalking-python\nBuild your Python application image on top of this image Start by pulling the skywalking-python image as the base of your application image. Refer to Docker Hub for the list of tags available.\nFROMapache/skywalking-python:0.7.0-grpc-py3.9# ... build your Python applicationYou could start your Python application with CMD. The Python image already sets an entry point ENTRYPOINT [\u0026quot;sw-python\u0026quot;].\nFor example - CMD ['run', '-p', 'gunicorn', 'app.wsgi'] -p is always needed when using with Gunicorn/uWSGI -\u0026gt; This will be translated to sw-python run -p gunicorn app.wsgi\nYou don\u0026rsquo;t need to care about enabling the SkyWalking Python agent manually, it should be adopted and bootstrapped automatically through the sw-python CLI.\nEnvironment variables should be provided to customize the agent behavior.\nBuild an image from the dockerfile Provide the following arguments to build your own image from the dockerfile.\nBASE_PYTHON_IMAGE # the Python base image to build upon SW_PYTHON_AGENT_VERSION # agent version to be pulled from PyPI SW_PYTHON_AGENT_PROTOCOL # agent protocol - grpc/ http/ kafka ","title":"Apache SkyWalking Python Agent dockerfile and images","url":"/docs/skywalking-python/latest/en/setup/container/"},{"content":"Apache SkyWalking Python Agent dockerfile and images Docker images are not official ASF releases but provided for convenience. Recommended usage is always to build the source\nThis image hosts the SkyWalking Python agent package on top of official Python base images (full \u0026amp; slim) providing support from Python 3.7 - 3.11.\nHow to use this image The images are hosted at Docker Hub.\nThe images come with protocol variants(gRPC, Kafka, HTTP) and base Python variants(Full, Slim).\nBuild your Python application image on top of this image Start by pulling the skywalking-python image as the base of your application image. Refer to Docker Hub for the list of tags available.\nFROMapache/skywalking-python:1.1.0-grpc-py3.10# ... build your Python applicationYou could start your Python application with CMD. The Python image already sets an entry point ENTRYPOINT [\u0026quot;sw-python\u0026quot;].\nFor example - CMD ['run', '-p', 'gunicorn', 'app.wsgi'] -p is always needed when using with Gunicorn/uWSGI -\u0026gt; This will be translated to sw-python run -p gunicorn app.wsgi\nYou don\u0026rsquo;t need to care about enabling the SkyWalking Python agent manually, it should be adopted and bootstrapped automatically through the sw-python CLI.\nEnvironment variables should be provided to customize the agent behavior.\nBuild an image from the dockerfile Provide the following arguments to build your own image from the dockerfile.\nBASE_PYTHON_IMAGE # the Python base image to build upon SW_PYTHON_AGENT_VERSION # agent version to be pulled from PyPI SW_PYTHON_AGENT_PROTOCOL # agent protocol - grpc/ http/ kafka ","title":"Apache SkyWalking Python Agent dockerfile and images","url":"/docs/skywalking-python/next/en/setup/container/"},{"content":"Apache SkyWalking Python Agent dockerfile and images Docker images are not official ASF releases but provided for convenience. Recommended usage is always to build the source\nThis image hosts the SkyWalking Python agent package on top of official Python base images (full \u0026amp; slim) providing support from Python 3.7 - 3.11.\nHow to use this image The images are hosted at Docker Hub and available from the skywalking.docker.scarf.sh endpoint.\nskywalking.docker.scarf.sh/apache/skywalking-python\nBuild your Python application image on top of this image Start by pulling the skywalking-python image as the base of your application image. Refer to Docker Hub for the list of tags available.\nFROMapache/skywalking-python:0.7.0-grpc-py3.9# ... build your Python applicationYou could start your Python application with CMD. The Python image already sets an entry point ENTRYPOINT [\u0026quot;sw-python\u0026quot;].\nFor example - CMD ['run', '-p', 'gunicorn', 'app.wsgi'] -p is always needed when using with Gunicorn/uWSGI -\u0026gt; This will be translated to sw-python run -p gunicorn app.wsgi\nYou don\u0026rsquo;t need to care about enabling the SkyWalking Python agent manually, it should be adopted and bootstrapped automatically through the sw-python CLI.\nEnvironment variables should be provided to customize the agent behavior.\nBuild an image from the dockerfile Provide the following arguments to build your own image from the dockerfile.\nBASE_PYTHON_IMAGE # the Python base image to build upon SW_PYTHON_AGENT_VERSION # agent version to be pulled from PyPI SW_PYTHON_AGENT_PROTOCOL # agent protocol - grpc/ http/ kafka ","title":"Apache SkyWalking Python Agent dockerfile and images","url":"/docs/skywalking-python/v1.0.1/en/setup/container/"},{"content":"Apache SkyWalking Python Image Release Guide This documentation shows the way to build and push the SkyWalking Python images to DockerHub.\nPrerequisites Before building the latest release of images, make sure an official release is pushed to PyPI where the dockerfile will depend on.\nImages This process wil generate a list of images covering most used Python versions and variations(grpc/http/kafka) of the Python agent.\nThe convenience images are published to Docker Hub and available from the skywalking.docker.scarf.sh endpoint.\n skywalking.docker.scarf.sh/apache/skywalking-python (Docker Hub)  How to build Issue the following commands to build relevant docker images for the Python agent. The make command will generate three images(grpc, http, kafka) for each Python version supported.\nAt the root folder -\nexport AGENT_VERSION=\u0026lt;version\u0026gt; make build-image Or at the docker folder -\ncd docker export AGENT_VERSION=\u0026lt;version\u0026gt; make How to publish images After a SkyWalking Apache release for the Python agent and wheels have been pushed to PyPI:\n  Build images from the project root, this step pulls agent wheel from PyPI and installs it:\nexport AGENT_VERSION=\u0026lt;version\u0026gt; make build-image   Verify the images built.\n  Push built images to docker hub repos:\nmake push-image   ","title":"Apache SkyWalking Python Image Release Guide","url":"/docs/skywalking-python/latest/en/contribution/how-to-release-docker/"},{"content":"Apache SkyWalking Python Image Release Guide The official process generating a list of images covering most used Python versions and variations(grpc/http/kafka) of the Python agent is deployed to our GitHub actions and therefore do not rely on this documentation.\nThis documentation shows the way to build and push the SkyWalking Python images manually.\nHow to build manually Before building the latest release of images, make sure an official release is pushed to PyPI where the dockerfile will depend on.\nImages The process generating a list of images covering most used Python versions and variations(grpc/http/kafka) of the Python agent is deployed to our GitHub actions.\nThe convenience images are published to DockerHub\nHow to build Issue the following commands to build relevant docker images for the Python agent. The make command will generate three images(grpc, http, kafka) for each Python version supported.\nAt the root folder -\nexport AGENT_VERSION=\u0026lt;version\u0026gt; make build-image Or at the docker folder -\ncd docker export AGENT_VERSION=\u0026lt;version\u0026gt; make How to publish images After a SkyWalking Apache release for the Python agent and wheels have been pushed to PyPI:\n  Build images from the project root, this step pulls agent wheel from PyPI and installs it:\nexport AGENT_VERSION=\u0026lt;version\u0026gt; make build-image   Verify the images built.\n  Push built images to docker hub repos:\nmake push-image   ","title":"Apache SkyWalking Python Image Release Guide","url":"/docs/skywalking-python/next/en/contribution/how-to-release-docker/"},{"content":"Apache SkyWalking Python Image Release Guide This documentation shows the way to build and push the SkyWalking Python images to DockerHub.\nPrerequisites Before building the latest release of images, make sure an official release is pushed to PyPI where the dockerfile will depend on.\nImages This process wil generate a list of images covering most used Python versions and variations(grpc/http/kafka) of the Python agent.\nThe convenience images are published to Docker Hub and available from the skywalking.docker.scarf.sh endpoint.\n skywalking.docker.scarf.sh/apache/skywalking-python (Docker Hub)  How to build Issue the following commands to build relevant docker images for the Python agent. The make command will generate three images(grpc, http, kafka) for each Python version supported.\nAt the root folder -\nexport AGENT_VERSION=\u0026lt;version\u0026gt; make build-image Or at the docker folder -\ncd docker export AGENT_VERSION=\u0026lt;version\u0026gt; make How to publish images After a SkyWalking Apache release for the Python agent and wheels have been pushed to PyPI:\n  Build images from the project root, this step pulls agent wheel from PyPI and installs it:\nexport AGENT_VERSION=\u0026lt;version\u0026gt; make build-image   Verify the images built.\n  Push built images to docker hub repos:\nmake push-image   ","title":"Apache SkyWalking Python Image Release Guide","url":"/docs/skywalking-python/v1.0.1/en/contribution/how-to-release-docker/"},{"content":"Apache SkyWalking Python Release Guide This documentation guides the release manager to release the SkyWalking Python in the Apache Way, and also helps people to check the release for vote.\nPrerequisites  Close (if finished, or move to next milestone otherwise) all issues in the current milestone from skywalking-python and skywalking, create a new milestone if needed. Update CHANGELOG.md and version in pyproject.toml.  Add your GPG public key to Apache SVN   Log in id.apache.org and submit your key fingerprint.\n  Add your GPG public key into SkyWalking GPG KEYS file, you can do this only if you are a PMC member. You can ask a PMC member for help. DO NOT override the existed KEYS file content, only append your key at the end of the file.\n  Build and sign the source code package export VERSION=\u0026lt;the version to release\u0026gt; git clone --recurse-submodules git@github.com:apache/skywalking-python \u0026amp;\u0026amp; cd skywalking-python git tag -a \u0026#34;v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking-Python $VERSION\u0026#34; git push --tags make clean \u0026amp;\u0026amp; make release Upload to Apache SVN svn co https://dist.apache.org/repos/dist/dev/skywalking/python release/skywalking/python mkdir -p release/skywalking/python/\u0026#34;$VERSION\u0026#34; cp skywalking-python/skywalking*.tgz release/skywalking/python/\u0026#34;$VERSION\u0026#34; cp skywalking-python/skywalking*.tgz.asc release/skywalking/python/\u0026#34;$VERSION\u0026#34; cp skywalking-python/skywalking-python*.tgz.sha512 release/skywalking/python/\u0026#34;$VERSION\u0026#34; cd release/skywalking \u0026amp;\u0026amp; svn add python/$VERSION \u0026amp;\u0026amp; svn commit python -m \u0026#34;Draft Apache SkyWalking-Python release $VERSION\u0026#34; Make the internal announcement Send an announcement email to dev@ mailing list, please check all links before sending the email, the same below.\nSubject: [ANNOUNCEMENT] Apache SkyWalking Python $VERSION test build available Content: The test build of Apache SkyWalking Python $VERSION is now available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking-python/blob/v$VERSION/CHANGELOG.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/python/$VERSION * sha512 checksums - sha512xxxxyyyzzz skywalking-python-src-x.x.x.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-python/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * http://pgp.mit.edu:11371/pks/lookup?op=get\u0026amp;search=0x8BD99F552D9F33D7 corresponding to kezhenxu94@apache.org Guide to build the release from source : * https://github.com/apache/skywalking-python/blob/master/CONTRIBUTING.md#compiling-and-building A vote regarding the quality of this test build will be initiated within the next couple of days. Wait at least 48 hours for test responses Any PMC, committer or contributor can test features for releasing, and feedback. Based on that, PMC will decide whether to start a vote or not.\nCall for vote in dev@ mailing list Call for vote in dev@skywalking.apache.org.\nSubject: [VOTE] Release Apache SkyWalking Python version $VERSION Content: Hi the SkyWalking Community: This is a call for vote to release Apache SkyWalking Python version $VERSION. Release notes: * https://github.com/apache/skywalking-python/blob/v$VERSION/CHANGELOG.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/python/$VERSION * sha512 checksums - sha512xxxxyyyzzz skywalking-python-src-x.x.x.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-python/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-python/blob/master/CONTRIBUTING.md#compiling-and-building Voting will start now and will remain open for at least 72 hours, all PMC members are required to give their votes. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Thanks. [1] https://github.com/apache/skywalking/blob/master/docs/en/guides/How-to-release.md#vote-check Vote Check All PMC members and committers should check these before voting +1:\n Features test. All artifacts in staging repository are published with .asc, .md5, and sha files. Source codes and distribution packages (skywalking-python-src-$VERSION.tgz) are in https://dist.apache.org/repos/dist/dev/skywalking/python/$VERSION with .asc, .sha512. LICENSE and NOTICE are in source codes and distribution package. Check shasum -c skywalking-python-src-$VERSION.tgz.sha512. Check gpg --verify skywalking-python-src-$VERSION.tgz.asc skywalking-python-src-$VERSION.tgz. Build distribution from source code package by following this the build guide. Licenses check, make license.  Vote result should follow these:\n  PMC vote is +1 binding, all others is +1 no binding.\n  Within 72 hours, you get at least 3 (+1 binding), and have more +1 than -1. Vote pass.\n  Send the closing vote mail to announce the result. When count the binding and no binding votes, please list the names of voters. An example like this:\n[RESULT][VOTE] Release Apache SkyWalking Python version $VERSION 72+ hours passed, we’ve got ($NUMBER) +1 bindings (and ... +1 non-bindings): (list names) +1 bindings: xxx ... +1 non-bindings: xxx ... Thank you for voting, I’ll continue the release process.   Publish release   Move source codes tar balls and distributions to https://dist.apache.org/repos/dist/release/skywalking/, you can do this only if you are a PMC member.\nsvn mv https://dist.apache.org/repos/dist/dev/skywalking/python/\u0026#34;$VERSION\u0026#34; https://dist.apache.org/repos/dist/release/skywalking/python/\u0026#34;$VERSION\u0026#34;   Refer to the previous PR, update news and links on the website. There are several files need to modify.\n  Update Github release page, follow the previous convention.\n  Send ANNOUNCE email to dev@skywalking.apache.org and announce@apache.org, the sender should use his/her Apache email account.\nSubject: [ANNOUNCEMENT] Apache SkyWalking Python $VERSION Released Content: Hi the SkyWalking Community On behalf of the SkyWalking Team, I’m glad to announce that SkyWalking Python $VERSION is now released. SkyWalking Python: The Python Agent for Apache SkyWalking provides the native tracing/metrics/logging/profiling abilities for Python projects. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. Download Links: http://skywalking.apache.org/downloads/ Release Notes : https://github.com/apache/skywalking-python/blob/v$VERSION/CHANGELOG.md Website: http://skywalking.apache.org/ SkyWalking Python Resources: - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalking.apache.org - Documents: https://github.com/apache/skywalking-python/blob/v$VERSION/README.md The Apache SkyWalking Team   ","title":"Apache SkyWalking Python Release Guide","url":"/docs/skywalking-python/latest/en/contribution/how-to-release/"},{"content":"Apache SkyWalking Python Release Guide This documentation guides the release manager to release the SkyWalking Python in the Apache Way, and also helps people to check the release for vote.\nPrerequisites  Close (if finished, or move to next milestone otherwise) all issues in the current milestone from skywalking-python and skywalking, create a new milestone if needed. Update CHANGELOG.md and version in pyproject.toml.  Add your GPG public key to Apache SVN   Log in id.apache.org and submit your key fingerprint.\n  Add your GPG public key into SkyWalking GPG KEYS file, you can do this only if you are a PMC member. You can ask a PMC member for help. DO NOT override the existed KEYS file content, only append your key at the end of the file.\n  Build and sign the source code package export VERSION=\u0026lt;the version to release\u0026gt; git clone --recurse-submodules git@github.com:apache/skywalking-python \u0026amp;\u0026amp; cd skywalking-python git tag -a \u0026#34;v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking-Python $VERSION\u0026#34; git push --tags make clean \u0026amp;\u0026amp; make release Upload to Apache SVN svn co https://dist.apache.org/repos/dist/dev/skywalking/python release/skywalking/python mkdir -p release/skywalking/python/\u0026#34;$VERSION\u0026#34; cp skywalking*.tgz release/skywalking/python/\u0026#34;$VERSION\u0026#34; cp skywalking*.tgz.asc release/skywalking/python/\u0026#34;$VERSION\u0026#34; cp skywalking-python*.tgz.sha512 release/skywalking/python/\u0026#34;$VERSION\u0026#34; cd release/skywalking \u0026amp;\u0026amp; svn add python/$VERSION \u0026amp;\u0026amp; svn commit python -m \u0026#34;Draft Apache SkyWalking-Python release $VERSION\u0026#34; Make the internal announcement First, generate a sha512sum for the source code package generated in last step:\nsha512sum release/skywalking/python/\u0026#34;$VERSION\u0026#34;/skywalking-python-src-\u0026#34;$VERSION\u0026#34;.tgz Send an announcement email to dev@ mailing list, please check all links before sending the email, the same as below.\nSubject: [ANNOUNCEMENT] Apache SkyWalking Python $VERSION test build available Content: The test build of Apache SkyWalking Python $VERSION is now available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking-python/blob/v$VERSION/CHANGELOG.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/python/$VERSION * sha512 checksums - sha512xxxxyyyzzz skywalking-python-src-x.x.x.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-python/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * http://pgp.mit.edu:11371/pks/lookup?op=get\u0026amp;search=0x8BD99F552D9F33D7 corresponding to kezhenxu94@apache.org Guide to build the release from source : * https://github.com/apache/skywalking-python/blob/master/CONTRIBUTING.md#compiling-and-building A vote regarding the quality of this test build will be initiated within the next couple of days. Wait at least 48 hours for test responses Any PMC, committer or contributor can test features for releasing, and feedback. Based on that, PMC will decide whether to start a vote or not.\nCall for vote in dev@ mailing list Call for vote in dev@skywalking.apache.org.\nSubject: [VOTE] Release Apache SkyWalking Python version $VERSION Content: Hi the SkyWalking Community: This is a call for vote to release Apache SkyWalking Python version $VERSION. Release notes: * https://github.com/apache/skywalking-python/blob/v$VERSION/CHANGELOG.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/python/$VERSION * sha512 checksums - sha512xxxxyyyzzz skywalking-python-src-x.x.x.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-python/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-python/blob/master/CONTRIBUTING.md#compiling-and-building Voting will start now and will remain open for at least 72 hours, all PMC members are required to give their votes. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Thanks. [1] https://github.com/apache/skywalking/blob/master/docs/en/guides/How-to-release.md#vote-check Vote Check All PMC members and committers should check these before voting +1:\n Features test. All artifacts in staging repository are published with .asc, .md5, and sha files. Source codes and distribution packages (skywalking-python-src-$VERSION.tgz) are in https://dist.apache.org/repos/dist/dev/skywalking/python/$VERSION with .asc, .sha512. LICENSE and NOTICE are in source codes and distribution package. Check shasum -c skywalking-python-src-$VERSION.tgz.sha512. Check gpg --verify skywalking-python-src-$VERSION.tgz.asc skywalking-python-src-$VERSION.tgz. Build distribution from source code package by following this the build guide. Licenses check, make license.  Vote result should follow these:\n  PMC vote is +1 binding, all others is +1 no binding.\n  Within 72 hours, you get at least 3 (+1 binding), and have more +1 than -1. Vote pass.\n  Send the closing vote mail to announce the result. When count the binding and no binding votes, please list the names of voters. An example like this:\n[RESULT][VOTE] Release Apache SkyWalking Python version $VERSION 72+ hours passed, we’ve got ($NUMBER) +1 bindings (and ... +1 non-bindings): (list names) +1 bindings: xxx ... +1 non-bindings: xxx ... Thank you for voting, I’ll continue the release process.   Publish release   Move source codes tar balls and distributions to https://dist.apache.org/repos/dist/release/skywalking/, you can do this only if you are a PMC member.\nsvn mv https://dist.apache.org/repos/dist/dev/skywalking/python/\u0026#34;$VERSION\u0026#34; https://dist.apache.org/repos/dist/release/skywalking/python/\u0026#34;$VERSION\u0026#34;   Refer to the previous PR, update news and links on the website. There are several files need to modify.\n  Publish PyPI package After the official ASF release, we publish the packaged wheel to the PyPI index.\n Make sure the final upload is correct by using the test PyPI index make upload-test. Upload the final artifacts by running make upload.  Publish Docker images After the release on GitHub, a GitHub Action will be triggered to build Docker images based on the latest code.\nImportant We announce the new release by drafting one on Github release page, following the previous convention.\nAn automation via GitHub Actions will automatically trigger upon the mentioned release event to build and upload Docker images to DockerHub.\nSee How-to-release-docker for a detailed description of manual release.\n Send ANNOUNCEMENT email to dev@skywalking.apache.org and announce@apache.org, the sender should use his/her Apache email account.\nSubject: [ANNOUNCEMENT] Apache SkyWalking Python $VERSION Released Content: Hi the SkyWalking Community On behalf of the SkyWalking Team, I’m glad to announce that SkyWalking Python $VERSION is now released. SkyWalking Python: The Python Agent for Apache SkyWalking provides the native tracing/metrics/logging/profiling abilities for Python projects. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. Download Links: http://skywalking.apache.org/downloads/ Release Notes : https://github.com/apache/skywalking-python/blob/v$VERSION/CHANGELOG.md Website: http://skywalking.apache.org/ SkyWalking Python Resources: - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalking.apache.org - Documents: https://github.com/apache/skywalking-python/blob/v$VERSION/README.md The Apache SkyWalking Team   ","title":"Apache SkyWalking Python Release Guide","url":"/docs/skywalking-python/next/en/contribution/how-to-release/"},{"content":"Apache SkyWalking Python Release Guide This documentation guides the release manager to release the SkyWalking Python in the Apache Way, and also helps people to check the release for vote.\nPrerequisites  Close (if finished, or move to next milestone otherwise) all issues in the current milestone from skywalking-python and skywalking, create a new milestone if needed. Update CHANGELOG.md and version in pyproject.toml.  Add your GPG public key to Apache SVN   Log in id.apache.org and submit your key fingerprint.\n  Add your GPG public key into SkyWalking GPG KEYS file, you can do this only if you are a PMC member. You can ask a PMC member for help. DO NOT override the existed KEYS file content, only append your key at the end of the file.\n  Build and sign the source code package export VERSION=\u0026lt;the version to release\u0026gt; git clone --recurse-submodules git@github.com:apache/skywalking-python \u0026amp;\u0026amp; cd skywalking-python git tag -a \u0026#34;v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking-Python $VERSION\u0026#34; git push --tags make clean \u0026amp;\u0026amp; make release Upload to Apache SVN svn co https://dist.apache.org/repos/dist/dev/skywalking/python release/skywalking/python mkdir -p release/skywalking/python/\u0026#34;$VERSION\u0026#34; cp skywalking-python/skywalking*.tgz release/skywalking/python/\u0026#34;$VERSION\u0026#34; cp skywalking-python/skywalking*.tgz.asc release/skywalking/python/\u0026#34;$VERSION\u0026#34; cp skywalking-python/skywalking-python*.tgz.sha512 release/skywalking/python/\u0026#34;$VERSION\u0026#34; cd release/skywalking \u0026amp;\u0026amp; svn add python/$VERSION \u0026amp;\u0026amp; svn commit python -m \u0026#34;Draft Apache SkyWalking-Python release $VERSION\u0026#34; Make the internal announcement Send an announcement email to dev@ mailing list, please check all links before sending the email, the same below.\nSubject: [ANNOUNCEMENT] Apache SkyWalking Python $VERSION test build available Content: The test build of Apache SkyWalking Python $VERSION is now available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking-python/blob/v$VERSION/CHANGELOG.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/python/$VERSION * sha512 checksums - sha512xxxxyyyzzz skywalking-python-src-x.x.x.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-python/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * http://pgp.mit.edu:11371/pks/lookup?op=get\u0026amp;search=0x8BD99F552D9F33D7 corresponding to kezhenxu94@apache.org Guide to build the release from source : * https://github.com/apache/skywalking-python/blob/master/CONTRIBUTING.md#compiling-and-building A vote regarding the quality of this test build will be initiated within the next couple of days. Wait at least 48 hours for test responses Any PMC, committer or contributor can test features for releasing, and feedback. Based on that, PMC will decide whether to start a vote or not.\nCall for vote in dev@ mailing list Call for vote in dev@skywalking.apache.org.\nSubject: [VOTE] Release Apache SkyWalking Python version $VERSION Content: Hi the SkyWalking Community: This is a call for vote to release Apache SkyWalking Python version $VERSION. Release notes: * https://github.com/apache/skywalking-python/blob/v$VERSION/CHANGELOG.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/python/$VERSION * sha512 checksums - sha512xxxxyyyzzz skywalking-python-src-x.x.x.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-python/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-python/blob/master/CONTRIBUTING.md#compiling-and-building Voting will start now and will remain open for at least 72 hours, all PMC members are required to give their votes. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Thanks. [1] https://github.com/apache/skywalking/blob/master/docs/en/guides/How-to-release.md#vote-check Vote Check All PMC members and committers should check these before voting +1:\n Features test. All artifacts in staging repository are published with .asc, .md5, and sha files. Source codes and distribution packages (skywalking-python-src-$VERSION.tgz) are in https://dist.apache.org/repos/dist/dev/skywalking/python/$VERSION with .asc, .sha512. LICENSE and NOTICE are in source codes and distribution package. Check shasum -c skywalking-python-src-$VERSION.tgz.sha512. Check gpg --verify skywalking-python-src-$VERSION.tgz.asc skywalking-python-src-$VERSION.tgz. Build distribution from source code package by following this the build guide. Licenses check, make license.  Vote result should follow these:\n  PMC vote is +1 binding, all others is +1 no binding.\n  Within 72 hours, you get at least 3 (+1 binding), and have more +1 than -1. Vote pass.\n  Send the closing vote mail to announce the result. When count the binding and no binding votes, please list the names of voters. An example like this:\n[RESULT][VOTE] Release Apache SkyWalking Python version $VERSION 72+ hours passed, we’ve got ($NUMBER) +1 bindings (and ... +1 non-bindings): (list names) +1 bindings: xxx ... +1 non-bindings: xxx ... Thank you for voting, I’ll continue the release process.   Publish release   Move source codes tar balls and distributions to https://dist.apache.org/repos/dist/release/skywalking/, you can do this only if you are a PMC member.\nsvn mv https://dist.apache.org/repos/dist/dev/skywalking/python/\u0026#34;$VERSION\u0026#34; https://dist.apache.org/repos/dist/release/skywalking/python/\u0026#34;$VERSION\u0026#34;   Refer to the previous PR, update news and links on the website. There are several files need to modify.\n  Update Github release page, follow the previous convention.\n  Send ANNOUNCE email to dev@skywalking.apache.org and announce@apache.org, the sender should use his/her Apache email account.\nSubject: [ANNOUNCEMENT] Apache SkyWalking Python $VERSION Released Content: Hi the SkyWalking Community On behalf of the SkyWalking Team, I’m glad to announce that SkyWalking Python $VERSION is now released. SkyWalking Python: The Python Agent for Apache SkyWalking provides the native tracing/metrics/logging/profiling abilities for Python projects. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. Download Links: http://skywalking.apache.org/downloads/ Release Notes : https://github.com/apache/skywalking-python/blob/v$VERSION/CHANGELOG.md Website: http://skywalking.apache.org/ SkyWalking Python Resources: - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalking.apache.org - Documents: https://github.com/apache/skywalking-python/blob/v$VERSION/README.md The Apache SkyWalking Team   ","title":"Apache SkyWalking Python Release Guide","url":"/docs/skywalking-python/v1.0.1/en/contribution/how-to-release/"},{"content":"Apache SkyWalking Rover Release Guide This documentation guides the release manager to release the SkyWalking Rover in the Apache Way, and also helps people to check the release for vote.\nPrerequisites  Close(if finished, or move to next milestone otherwise) all issues in the current milestone from skywalking-rover and skywalking, create a new milestone if needed. Update CHANGES.md. Check the dependency licenses including all dependencies.  Add your GPG public key to Apache svn   Upload your GPG public key to a public GPG site, such as MIT\u0026rsquo;s site.\n  Log in id.apache.org and submit your key fingerprint.\n  Add your GPG public key into SkyWalking GPG KEYS file, you can do this only if you are a PMC member. You can ask a PMC member for help. DO NOT override the existed KEYS file content, only append your key at the end of the file.\n  Build and sign the source code package export VERSION=\u0026lt;the version to release\u0026gt; git clone git@github.com:apache/skywalking-rover \u0026amp;\u0026amp; cd skywalking-rover git tag -a \u0026#34;v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking-Rover v$VERSION\u0026#34; git push --tags make release In total, six files should be automatically generated in the directory: apache-skywalking-rover-${VERSION}-bin.tgz, apache-skywalking-rover-${VERSION}-src.tgz, and their corresponding asc, sha512 files.\nUpload to Apache svn svn co https://dist.apache.org/repos/dist/dev/skywalking/ mkdir -p skywalking/rover/\u0026#34;$VERSION\u0026#34; cp skywalking-rover/apache-skywalking*.tgz skywalking/rover/\u0026#34;$VERSION\u0026#34; cp skywalking-rover/apache-skywalking*.tgz.asc skywalking/rover/\u0026#34;$VERSION\u0026#34; cp skywalking-rover/apache-skywalking-rover*.tgz.sha512 skywalking/rover/\u0026#34;$VERSION\u0026#34; cd skywalking/rover \u0026amp;\u0026amp; svn add \u0026#34;$VERSION\u0026#34; \u0026amp;\u0026amp; svn commit -m \u0026#34;Draft Apache SkyWalking-Rover release $VERSION\u0026#34; Call for vote in dev@ mailing list Call for vote in dev@skywalking.apache.org, please check all links before sending the email.\nSubject: [VOTE] Release Apache SkyWalking Rover version $VERSION Content: Hi the SkyWalking Community: This is a call for vote to release Apache SkyWalking Rover version $VERSION. Release notes: * https://github.com/apache/skywalking-rover/blob/v$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/rover/$VERSION * sha512 checksums - sha512xxxxyyyzzz skywalking-rover-x.x.x-src.tgz - sha512xxxxyyyzzz skywalking-rover-x.x.x-bin.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-rover/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-rover/blob/v$VERSION/docs/en/guides/contribution/how-to-release.md Voting will start now and will remain open for at least 72 hours, all PMC members are required to give their votes. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Thanks. [1] https://github.com/apache/skywalking/blob/master/docs/en/guides/How-to-release.md#vote-check Vote Check All PMC members and committers should check these before voting +1:\n Features test. All artifacts in staging repository are published with .asc, .md5, and sha files. Source codes and distribution packages (skywalking-rover-$VERSION-{src,bin}.tgz) are in https://dist.apache.org/repos/dist/dev/skywalking/rover/$VERSION with .asc, .sha512. LICENSE and NOTICE are in source codes and distribution package. Check shasum -c skywalking-rover-$VERSION-{src,bin}.tgz.sha512. Check gpg --verify skywalking-rover-$VERSION-{src,bin}.tgz.asc skywalking-rover-$VERSION-{src,bin}.tgz. Build distribution from source code package by following this command, make container-generate build.  Vote result should follow these:\n  PMC vote is +1 binding, all others is +1 no binding.\n  Within 72 hours, you get at least 3 (+1 binding), and have more +1 than -1. Vote pass.\n  Send the closing vote mail to announce the result. When count the binding and no binding votes, please list the names of voters. An example like this:\n[RESULT][VOTE] Release Apache SkyWalking Rover version $VERSION 3 days passed, we’ve got ($NUMBER) +1 bindings (and ... +1 non-bindings): (list names) +1 bindings: xxx ... +1 non-bindings: xxx ... Thank you for voting, I’ll continue the release process.   Publish release   Move source codes tar balls and distributions to https://dist.apache.org/repos/dist/release/skywalking/, you can do this only if you are a PMC member.\nexport SVN_EDITOR=vim svn mv https://dist.apache.org/repos/dist/dev/skywalking/rover/$VERSION https://dist.apache.org/repos/dist/release/skywalking/rover   Refer to the previous PR, update the event and download links on the website.\n  Update Github release page, follow the previous convention.\n  Push docker image to the Docker Hub, make sure you have the write permission for push image.\nmake docker \u0026amp;\u0026amp; make docker.push   Send ANNOUNCE email to dev@skywalking.apache.org and announce@apache.org, the sender should use his/her Apache email account, please check all links before sending the email.\nSubject: [ANNOUNCEMENT] Apache SkyWalking Rover $VERSION Released Content: Hi the SkyWalking Community On behalf of the SkyWalking Team, I’m glad to announce that SkyWalking Rover $VERSION is now released. SkyWalking Rover: A lightweight collector/sidecar could be deployed closing to the target monitored system, to collect metrics, traces, and logs. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. Download Links: http://skywalking.apache.org/downloads/ Release Notes : https://github.com/apache/skywalking-rover/blob/v$VERSION/CHANGES.md Website: http://skywalking.apache.org/ SkyWalking Rover Resources: - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalking.apache.org - Documents: https://github.com/apache/skywalking-rover/blob/v$VERSION/README.md The Apache SkyWalking Team   Remove Unnecessary Releases Please remember to remove all unnecessary releases in the mirror svn (https://dist.apache.org/repos/dist/release/skywalking/), if you don\u0026rsquo;t recommend users to choose those version. For example, you have removed the download and documentation links from the website. If they want old ones, the Archive repository has all of them.\n","title":"Apache SkyWalking Rover Release Guide","url":"/docs/skywalking-rover/latest/en/guides/contribution/how-to-release/"},{"content":"Apache SkyWalking Rover Release Guide This documentation guides the release manager to release the SkyWalking Rover in the Apache Way, and also helps people to check the release for vote.\nPrerequisites  Close(if finished, or move to next milestone otherwise) all issues in the current milestone from skywalking-rover and skywalking, create a new milestone if needed. Update CHANGES.md. Check the dependency licenses including all dependencies.  Add your GPG public key to Apache svn   Upload your GPG public key to a public GPG site, such as MIT\u0026rsquo;s site.\n  Log in id.apache.org and submit your key fingerprint.\n  Add your GPG public key into SkyWalking GPG KEYS file, you can do this only if you are a PMC member. You can ask a PMC member for help. DO NOT override the existed KEYS file content, only append your key at the end of the file.\n  Build and sign the source code package export VERSION=\u0026lt;the version to release\u0026gt; git clone git@github.com:apache/skywalking-rover \u0026amp;\u0026amp; cd skywalking-rover git tag -a \u0026#34;v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking-Rover v$VERSION\u0026#34; git push --tags make release In total, six files should be automatically generated in the directory: apache-skywalking-rover-${VERSION}-bin.tgz, apache-skywalking-rover-${VERSION}-src.tgz, and their corresponding asc, sha512 files.\nUpload to Apache svn svn co https://dist.apache.org/repos/dist/dev/skywalking/ mkdir -p skywalking/rover/\u0026#34;$VERSION\u0026#34; cp skywalking-rover/apache-skywalking*.tgz skywalking/rover/\u0026#34;$VERSION\u0026#34; cp skywalking-rover/apache-skywalking*.tgz.asc skywalking/rover/\u0026#34;$VERSION\u0026#34; cp skywalking-rover/apache-skywalking-rover*.tgz.sha512 skywalking/rover/\u0026#34;$VERSION\u0026#34; cd skywalking/rover \u0026amp;\u0026amp; svn add \u0026#34;$VERSION\u0026#34; \u0026amp;\u0026amp; svn commit -m \u0026#34;Draft Apache SkyWalking-Rover release $VERSION\u0026#34; Call for vote in dev@ mailing list Call for vote in dev@skywalking.apache.org, please check all links before sending the email.\nSubject: [VOTE] Release Apache SkyWalking Rover version $VERSION Content: Hi the SkyWalking Community: This is a call for vote to release Apache SkyWalking Rover version $VERSION. Release notes: * https://github.com/apache/skywalking-rover/blob/v$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/rover/$VERSION * sha512 checksums - sha512xxxxyyyzzz skywalking-rover-x.x.x-src.tgz - sha512xxxxyyyzzz skywalking-rover-x.x.x-bin.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-rover/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-rover/blob/v$VERSION/docs/en/guides/contribution/how-to-release.md Voting will start now and will remain open for at least 72 hours, all PMC members are required to give their votes. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Thanks. [1] https://github.com/apache/skywalking/blob/master/docs/en/guides/How-to-release.md#vote-check Vote Check All PMC members and committers should check these before voting +1:\n Features test. All artifacts in staging repository are published with .asc, .md5, and sha files. Source codes and distribution packages (skywalking-rover-$VERSION-{src,bin}.tgz) are in https://dist.apache.org/repos/dist/dev/skywalking/rover/$VERSION with .asc, .sha512. LICENSE and NOTICE are in source codes and distribution package. Check shasum -c skywalking-rover-$VERSION-{src,bin}.tgz.sha512. Check gpg --verify skywalking-rover-$VERSION-{src,bin}.tgz.asc skywalking-rover-$VERSION-{src,bin}.tgz. Build distribution from source code package by following this command, make container-generate build.  Vote result should follow these:\n  PMC vote is +1 binding, all others is +1 no binding.\n  Within 72 hours, you get at least 3 (+1 binding), and have more +1 than -1. Vote pass.\n  Send the closing vote mail to announce the result. When count the binding and no binding votes, please list the names of voters. An example like this:\n[RESULT][VOTE] Release Apache SkyWalking Rover version $VERSION 3 days passed, we’ve got ($NUMBER) +1 bindings (and ... +1 non-bindings): (list names) +1 bindings: xxx ... +1 non-bindings: xxx ... Thank you for voting, I’ll continue the release process.   Publish release   Move source codes tar balls and distributions to https://dist.apache.org/repos/dist/release/skywalking/, you can do this only if you are a PMC member.\nexport SVN_EDITOR=vim svn mv https://dist.apache.org/repos/dist/dev/skywalking/rover/$VERSION https://dist.apache.org/repos/dist/release/skywalking/rover   Refer to the previous PR, update the event and download links on the website.\n  Update Github release page, follow the previous convention.\n  Push docker image to the Docker Hub, make sure you have the write permission for push image.\nmake docker \u0026amp;\u0026amp; make docker.push   Send ANNOUNCE email to dev@skywalking.apache.org and announce@apache.org, the sender should use his/her Apache email account, please check all links before sending the email.\nSubject: [ANNOUNCEMENT] Apache SkyWalking Rover $VERSION Released Content: Hi the SkyWalking Community On behalf of the SkyWalking Team, I’m glad to announce that SkyWalking Rover $VERSION is now released. SkyWalking Rover: A lightweight collector/sidecar could be deployed closing to the target monitored system, to collect metrics, traces, and logs. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. Download Links: http://skywalking.apache.org/downloads/ Release Notes : https://github.com/apache/skywalking-rover/blob/v$VERSION/CHANGES.md Website: http://skywalking.apache.org/ SkyWalking Rover Resources: - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalking.apache.org - Documents: https://github.com/apache/skywalking-rover/blob/v$VERSION/README.md The Apache SkyWalking Team   Remove Unnecessary Releases Please remember to remove all unnecessary releases in the mirror svn (https://dist.apache.org/repos/dist/release/skywalking/), if you don\u0026rsquo;t recommend users to choose those version. For example, you have removed the download and documentation links from the website. If they want old ones, the Archive repository has all of them.\n","title":"Apache SkyWalking Rover Release Guide","url":"/docs/skywalking-rover/next/en/guides/contribution/how-to-release/"},{"content":"Apache SkyWalking Rover Release Guide This documentation guides the release manager to release the SkyWalking Rover in the Apache Way, and also helps people to check the release for vote.\nPrerequisites  Close(if finished, or move to next milestone otherwise) all issues in the current milestone from skywalking-rover and skywalking, create a new milestone if needed. Update CHANGES.md. Check the dependency licenses including all dependencies.  Add your GPG public key to Apache svn   Upload your GPG public key to a public GPG site, such as MIT\u0026rsquo;s site.\n  Log in id.apache.org and submit your key fingerprint.\n  Add your GPG public key into SkyWalking GPG KEYS file, you can do this only if you are a PMC member. You can ask a PMC member for help. DO NOT override the existed KEYS file content, only append your key at the end of the file.\n  Build and sign the source code package export VERSION=\u0026lt;the version to release\u0026gt; git clone git@github.com:apache/skywalking-rover \u0026amp;\u0026amp; cd skywalking-rover git tag -a \u0026#34;v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking-Rover v$VERSION\u0026#34; git push --tags make release In total, six files should be automatically generated in the directory: apache-skywalking-rover-${VERSION}-bin.tgz, apache-skywalking-rover-${VERSION}-src.tgz, and their corresponding asc, sha512 files.\nUpload to Apache svn svn co https://dist.apache.org/repos/dist/dev/skywalking/ mkdir -p skywalking/rover/\u0026#34;$VERSION\u0026#34; cp skywalking-rover/apache-skywalking*.tgz skywalking/rover/\u0026#34;$VERSION\u0026#34; cp skywalking-rover/apache-skywalking*.tgz.asc skywalking/rover/\u0026#34;$VERSION\u0026#34; cp skywalking-rover/apache-skywalking-rover*.tgz.sha512 skywalking/rover/\u0026#34;$VERSION\u0026#34; cd skywalking/rover \u0026amp;\u0026amp; svn add \u0026#34;$VERSION\u0026#34; \u0026amp;\u0026amp; svn commit -m \u0026#34;Draft Apache SkyWalking-Rover release $VERSION\u0026#34; Call for vote in dev@ mailing list Call for vote in dev@skywalking.apache.org, please check all links before sending the email.\nSubject: [VOTE] Release Apache SkyWalking Rover version $VERSION Content: Hi the SkyWalking Community: This is a call for vote to release Apache SkyWalking Rover version $VERSION. Release notes: * https://github.com/apache/skywalking-rover/blob/v$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/rover/$VERSION * sha512 checksums - sha512xxxxyyyzzz skywalking-rover-x.x.x-src.tgz - sha512xxxxyyyzzz skywalking-rover-x.x.x-bin.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-rover/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-rover/blob/v$VERSION/docs/en/guides/contribution/how-to-release.md Voting will start now and will remain open for at least 72 hours, all PMC members are required to give their votes. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Thanks. [1] https://github.com/apache/skywalking/blob/master/docs/en/guides/How-to-release.md#vote-check Vote Check All PMC members and committers should check these before voting +1:\n Features test. All artifacts in staging repository are published with .asc, .md5, and sha files. Source codes and distribution packages (skywalking-rover-$VERSION-{src,bin}.tgz) are in https://dist.apache.org/repos/dist/dev/skywalking/rover/$VERSION with .asc, .sha512. LICENSE and NOTICE are in source codes and distribution package. Check shasum -c skywalking-rover-$VERSION-{src,bin}.tgz.sha512. Check gpg --verify skywalking-rover-$VERSION-{src,bin}.tgz.asc skywalking-rover-$VERSION-{src,bin}.tgz. Build distribution from source code package by following this command, make container-generate build.  Vote result should follow these:\n  PMC vote is +1 binding, all others is +1 no binding.\n  Within 72 hours, you get at least 3 (+1 binding), and have more +1 than -1. Vote pass.\n  Send the closing vote mail to announce the result. When count the binding and no binding votes, please list the names of voters. An example like this:\n[RESULT][VOTE] Release Apache SkyWalking Rover version $VERSION 3 days passed, we’ve got ($NUMBER) +1 bindings (and ... +1 non-bindings): (list names) +1 bindings: xxx ... +1 non-bindings: xxx ... Thank you for voting, I’ll continue the release process.   Publish release   Move source codes tar balls and distributions to https://dist.apache.org/repos/dist/release/skywalking/, you can do this only if you are a PMC member.\nexport SVN_EDITOR=vim svn mv https://dist.apache.org/repos/dist/dev/skywalking/rover/$VERSION https://dist.apache.org/repos/dist/release/skywalking/rover   Refer to the previous PR, update the event and download links on the website.\n  Update Github release page, follow the previous convention.\n  Push docker image to the Docker Hub, make sure you have the write permission for push image.\nmake docker \u0026amp;\u0026amp; make docker.push   Send ANNOUNCE email to dev@skywalking.apache.org and announce@apache.org, the sender should use his/her Apache email account, please check all links before sending the email.\nSubject: [ANNOUNCEMENT] Apache SkyWalking Rover $VERSION Released Content: Hi the SkyWalking Community On behalf of the SkyWalking Team, I’m glad to announce that SkyWalking Rover $VERSION is now released. SkyWalking Rover: A lightweight collector/sidecar could be deployed closing to the target monitored system, to collect metrics, traces, and logs. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. Download Links: http://skywalking.apache.org/downloads/ Release Notes : https://github.com/apache/skywalking-rover/blob/v$VERSION/CHANGES.md Website: http://skywalking.apache.org/ SkyWalking Rover Resources: - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalking.apache.org - Documents: https://github.com/apache/skywalking-rover/blob/v$VERSION/README.md The Apache SkyWalking Team   Remove Unnecessary Releases Please remember to remove all unnecessary releases in the mirror svn (https://dist.apache.org/repos/dist/release/skywalking/), if you don\u0026rsquo;t recommend users to choose those version. For example, you have removed the download and documentation links from the website. If they want old ones, the Archive repository has all of them.\n","title":"Apache SkyWalking Rover Release Guide","url":"/docs/skywalking-rover/v0.6.0/en/guides/contribution/how-to-release/"},{"content":"Apache SkyWalking Satellite Release Guide This documentation guides the release manager to release the SkyWalking Satellite in the Apache Way, and also helps people to check the release for vote.\nPrerequisites  Close(if finished, or move to next milestone otherwise) all issues in the current milestone from skywalking-satellite and skywalking, create a new milestone if needed. Update CHANGES.md.  Add your GPG public key to Apache svn   Upload your GPG public key to a public GPG site, such as MIT\u0026rsquo;s site.\n  Log in id.apache.org and submit your key fingerprint.\n  Add your GPG public key into SkyWalking GPG KEYS file, you can do this only if you are a PMC member. You can ask a PMC member for help. DO NOT override the existed KEYS file content, only append your key at the end of the file.\n  Build and sign the source code package export VERSION=\u0026lt;the version to release\u0026gt; git clone git@github.com:apache/skywalking-satellite \u0026amp;\u0026amp; cd skywalking-satellite git tag -a \u0026#34;v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking-Satellite v$VERSION\u0026#34; git push --tags make release In total, six files should be automatically generated in the directory: apache-skywalking-satellite-${VERSION}-bin.tgz, apache-skywalking-satellite-${VERSION}-src.tgz, and their corresponding asc, sha512 files.\nUpload to Apache svn svn co https://dist.apache.org/repos/dist/dev/skywalking/ mkdir -p skywalking/satellite/\u0026#34;$VERSION\u0026#34; cp skywalking-satellite/apache-skywalking*.tgz skywalking/satellite/\u0026#34;$VERSION\u0026#34; cp skywalking-satellite/apache-skywalking*.tgz.asc skywalking/satellite/\u0026#34;$VERSION\u0026#34; cp skywalking-satellite/apache-skywalking-satellite*.tgz.sha512 skywalking/satellite/\u0026#34;$VERSION\u0026#34; cd skywalking/satellite \u0026amp;\u0026amp; svn add \u0026#34;$VERSION\u0026#34; \u0026amp;\u0026amp; svn commit -m \u0026#34;Draft Apache SkyWalking-Satellite release $VERSION\u0026#34; Make the internal announcement Send an announcement email to dev@ mailing list, please check all links before sending the email.\nSubject: [ANNOUNCEMENT] SkyWalking Satellite $VERSION test build available Content: The test build of SkyWalking Satellite $VERSION is now available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking-satellite/blob/$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/satellite/$VERSION * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-satellite-bin-x.x.x.tgz - sha512xxxxyyyzzz apache-skywalking-satellite-src-x.x.x.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-satellite/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * http://pgp.mit.edu:11371/pks/lookup?op=get\u0026amp;search=0x8BD99F552D9F33D7 corresponding to kezhenxu94@apache.org Guide to build the release from source : * https://github.com/apache/skywalking-satellite/blob/v$VERSION/docs/en/guides/contribution/How-to-release.md A vote regarding the quality of this test build will be initiated within the next couple of days. Wait at least 48 hours for test responses Any PMC, committer or contributor can test features for releasing, and feedback. Based on that, PMC will decide whether to start a vote or not.\nCall for vote in dev@ mailing list Call for vote in dev@skywalking.apache.org, please check all links before sending the email.\nSubject: [VOTE] Release Apache SkyWalking Satellite version $VERSION Content: Hi the SkyWalking Community: This is a call for vote to release Apache SkyWalking Satellite version $VERSION. Release notes: * https://github.com/apache/skywalking-satellite/blob/$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/satellite/$VERSION * sha512 checksums - sha512xxxxyyyzzz skywalking-satellite-x.x.x-src.tgz - sha512xxxxyyyzzz skywalking-satellite-x.x.x-bin.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-satellite/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-satellite/blob/$VERSION/docs/en/guides/contribuation/How-to-release.md Voting will start now and will remain open for at least 72 hours, all PMC members are required to give their votes. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Thanks. [1] https://github.com/apache/skywalking/blob/master/docs/en/guides/How-to-release.md#vote-check Vote Check All PMC members and committers should check these before voting +1:\n Features test. All artifacts in staging repository are published with .asc, .md5, and sha files. Source codes and distribution packages (skywalking-satellite-$VERSION-{src,bin}.tgz) are in https://dist.apache.org/repos/dist/dev/skywalking/satellite/$VERSION with .asc, .sha512. LICENSE and NOTICE are in source codes and distribution package. Check shasum -c skywalking-satellite-$VERSION-{src,bin}.tgz.sha512. Check gpg --verify skywalking-satellite-$VERSION-{src,bin}.tgz.asc skywalking-satellite-$VERSION-{src,bin}.tgz. Build distribution from source code package by following this command, make build. Licenses check, make license.  Vote result should follow these:\n  PMC vote is +1 binding, all others is +1 no binding.\n  Within 72 hours, you get at least 3 (+1 binding), and have more +1 than -1. Vote pass.\n  Send the closing vote mail to announce the result. When count the binding and no binding votes, please list the names of voters. An example like this:\n[RESULT][VOTE] Release Apache SkyWalking Satellite version $VERSION 3 days passed, we’ve got ($NUMBER) +1 bindings (and ... +1 non-bindings): (list names) +1 bindings: xxx ... +1 non-bindings: xxx ... Thank you for voting, I’ll continue the release process.   Publish release   Move source codes tar balls and distributions to https://dist.apache.org/repos/dist/release/skywalking/, you can do this only if you are a PMC member.\nexport SVN_EDITOR=vim svn mv https://dist.apache.org/repos/dist/dev/skywalking/satellite/$VERSION https://dist.apache.org/repos/dist/release/skywalking/satellite   Refer to the previous PR, update the event and download links on the website.\n  Update Github release page, follow the previous convention.\n  Push docker image to the Docker Hub, make sure you have the write permission for push image.\nmake docker \u0026amp;\u0026amp; make docker.push   Send ANNOUNCE email to dev@skywalking.apache.org and announce@apache.org, the sender should use his/her Apache email account, please check all links before sending the email.\nSubject: [ANNOUNCEMENT] Apache SkyWalking Satellite $VERSION Released Content: Hi the SkyWalking Community On behalf of the SkyWalking Team, I’m glad to announce that SkyWalking Satellite $VERSION is now released. SkyWalking Satellite: A lightweight collector/sidecar could be deployed closing to the target monitored system, to collect metrics, traces, and logs. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. Download Links: http://skywalking.apache.org/downloads/ Release Notes : https://github.com/apache/skywalking-satellite/blob/$VERSION/CHANGES.md Website: http://skywalking.apache.org/ SkyWalking Satellite Resources: - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalking.apache.org - Documents: https://github.com/apache/skywalking-satellite/blob/$VERSION/README.md The Apache SkyWalking Team   Remove Unnecessary Releases Please remember to remove all unnecessary releases in the mirror svn (https://dist.apache.org/repos/dist/release/skywalking/), if you don\u0026rsquo;t recommend users to choose those version. For example, you have removed the download and documentation links from the website. If they want old ones, the Archive repository has all of them.\n","title":"Apache SkyWalking Satellite Release Guide","url":"/docs/skywalking-satellite/latest/en/guides/contribution/how-to-release/"},{"content":"Apache SkyWalking Satellite Release Guide This documentation guides the release manager to release the SkyWalking Satellite in the Apache Way, and also helps people to check the release for vote.\nPrerequisites  Close(if finished, or move to next milestone otherwise) all issues in the current milestone from skywalking-satellite and skywalking, create a new milestone if needed. Update CHANGES.md.  Add your GPG public key to Apache svn   Upload your GPG public key to a public GPG site, such as MIT\u0026rsquo;s site.\n  Log in id.apache.org and submit your key fingerprint.\n  Add your GPG public key into SkyWalking GPG KEYS file, you can do this only if you are a PMC member. You can ask a PMC member for help. DO NOT override the existed KEYS file content, only append your key at the end of the file.\n  Build and sign the source code package export VERSION=\u0026lt;the version to release\u0026gt; git clone git@github.com:apache/skywalking-satellite \u0026amp;\u0026amp; cd skywalking-satellite git tag -a \u0026#34;v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking-Satellite v$VERSION\u0026#34; git push --tags make release In total, six files should be automatically generated in the directory: apache-skywalking-satellite-${VERSION}-bin.tgz, apache-skywalking-satellite-${VERSION}-src.tgz, and their corresponding asc, sha512 files.\nUpload to Apache svn svn co https://dist.apache.org/repos/dist/dev/skywalking/ mkdir -p skywalking/satellite/\u0026#34;$VERSION\u0026#34; cp skywalking-satellite/apache-skywalking*.tgz skywalking/satellite/\u0026#34;$VERSION\u0026#34; cp skywalking-satellite/apache-skywalking*.tgz.asc skywalking/satellite/\u0026#34;$VERSION\u0026#34; cp skywalking-satellite/apache-skywalking-satellite*.tgz.sha512 skywalking/satellite/\u0026#34;$VERSION\u0026#34; cd skywalking/satellite \u0026amp;\u0026amp; svn add \u0026#34;$VERSION\u0026#34; \u0026amp;\u0026amp; svn commit -m \u0026#34;Draft Apache SkyWalking-Satellite release $VERSION\u0026#34; Make the internal announcement Send an announcement email to dev@ mailing list, please check all links before sending the email.\nSubject: [ANNOUNCEMENT] SkyWalking Satellite $VERSION test build available Content: The test build of SkyWalking Satellite $VERSION is now available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking-satellite/blob/$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/satellite/$VERSION * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-satellite-bin-x.x.x.tgz - sha512xxxxyyyzzz apache-skywalking-satellite-src-x.x.x.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-satellite/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * http://pgp.mit.edu:11371/pks/lookup?op=get\u0026amp;search=0x8BD99F552D9F33D7 corresponding to kezhenxu94@apache.org Guide to build the release from source : * https://github.com/apache/skywalking-satellite/blob/v$VERSION/docs/en/guides/contribution/How-to-release.md A vote regarding the quality of this test build will be initiated within the next couple of days. Wait at least 48 hours for test responses Any PMC, committer or contributor can test features for releasing, and feedback. Based on that, PMC will decide whether to start a vote or not.\nCall for vote in dev@ mailing list Call for vote in dev@skywalking.apache.org, please check all links before sending the email.\nSubject: [VOTE] Release Apache SkyWalking Satellite version $VERSION Content: Hi the SkyWalking Community: This is a call for vote to release Apache SkyWalking Satellite version $VERSION. Release notes: * https://github.com/apache/skywalking-satellite/blob/$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/satellite/$VERSION * sha512 checksums - sha512xxxxyyyzzz skywalking-satellite-x.x.x-src.tgz - sha512xxxxyyyzzz skywalking-satellite-x.x.x-bin.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-satellite/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-satellite/blob/$VERSION/docs/en/guides/contribuation/How-to-release.md Voting will start now and will remain open for at least 72 hours, all PMC members are required to give their votes. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Thanks. [1] https://github.com/apache/skywalking/blob/master/docs/en/guides/How-to-release.md#vote-check Vote Check All PMC members and committers should check these before voting +1:\n Features test. All artifacts in staging repository are published with .asc, .md5, and sha files. Source codes and distribution packages (skywalking-satellite-$VERSION-{src,bin}.tgz) are in https://dist.apache.org/repos/dist/dev/skywalking/satellite/$VERSION with .asc, .sha512. LICENSE and NOTICE are in source codes and distribution package. Check shasum -c skywalking-satellite-$VERSION-{src,bin}.tgz.sha512. Check gpg --verify skywalking-satellite-$VERSION-{src,bin}.tgz.asc skywalking-satellite-$VERSION-{src,bin}.tgz. Build distribution from source code package by following this command, make build. Licenses check, make license.  Vote result should follow these:\n  PMC vote is +1 binding, all others is +1 no binding.\n  Within 72 hours, you get at least 3 (+1 binding), and have more +1 than -1. Vote pass.\n  Send the closing vote mail to announce the result. When count the binding and no binding votes, please list the names of voters. An example like this:\n[RESULT][VOTE] Release Apache SkyWalking Satellite version $VERSION 3 days passed, we’ve got ($NUMBER) +1 bindings (and ... +1 non-bindings): (list names) +1 bindings: xxx ... +1 non-bindings: xxx ... Thank you for voting, I’ll continue the release process.   Publish release   Move source codes tar balls and distributions to https://dist.apache.org/repos/dist/release/skywalking/, you can do this only if you are a PMC member.\nexport SVN_EDITOR=vim svn mv https://dist.apache.org/repos/dist/dev/skywalking/satellite/$VERSION https://dist.apache.org/repos/dist/release/skywalking/satellite   Refer to the previous PR, update the event and download links on the website.\n  Update Github release page, follow the previous convention.\n  Push docker image to the Docker Hub, make sure you have the write permission for push image.\nmake docker \u0026amp;\u0026amp; make docker.push   Send ANNOUNCE email to dev@skywalking.apache.org and announce@apache.org, the sender should use his/her Apache email account, please check all links before sending the email.\nSubject: [ANNOUNCEMENT] Apache SkyWalking Satellite $VERSION Released Content: Hi the SkyWalking Community On behalf of the SkyWalking Team, I’m glad to announce that SkyWalking Satellite $VERSION is now released. SkyWalking Satellite: A lightweight collector/sidecar could be deployed closing to the target monitored system, to collect metrics, traces, and logs. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. Download Links: http://skywalking.apache.org/downloads/ Release Notes : https://github.com/apache/skywalking-satellite/blob/$VERSION/CHANGES.md Website: http://skywalking.apache.org/ SkyWalking Satellite Resources: - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalking.apache.org - Documents: https://github.com/apache/skywalking-satellite/blob/$VERSION/README.md The Apache SkyWalking Team   Remove Unnecessary Releases Please remember to remove all unnecessary releases in the mirror svn (https://dist.apache.org/repos/dist/release/skywalking/), if you don\u0026rsquo;t recommend users to choose those version. For example, you have removed the download and documentation links from the website. If they want old ones, the Archive repository has all of them.\n","title":"Apache SkyWalking Satellite Release Guide","url":"/docs/skywalking-satellite/next/en/guides/contribution/how-to-release/"},{"content":"Apache SkyWalking Satellite Release Guide This documentation guides the release manager to release the SkyWalking Satellite in the Apache Way, and also helps people to check the release for vote.\nPrerequisites  Close(if finished, or move to next milestone otherwise) all issues in the current milestone from skywalking-satellite and skywalking, create a new milestone if needed. Update CHANGES.md.  Add your GPG public key to Apache svn   Upload your GPG public key to a public GPG site, such as MIT\u0026rsquo;s site.\n  Log in id.apache.org and submit your key fingerprint.\n  Add your GPG public key into SkyWalking GPG KEYS file, you can do this only if you are a PMC member. You can ask a PMC member for help. DO NOT override the existed KEYS file content, only append your key at the end of the file.\n  Build and sign the source code package export VERSION=\u0026lt;the version to release\u0026gt; git clone git@github.com:apache/skywalking-satellite \u0026amp;\u0026amp; cd skywalking-satellite git tag -a \u0026#34;v$VERSION\u0026#34; -m \u0026#34;Release Apache SkyWalking-Satellite v$VERSION\u0026#34; git push --tags make release In total, six files should be automatically generated in the directory: apache-skywalking-satellite-${VERSION}-bin.tgz, apache-skywalking-satellite-${VERSION}-src.tgz, and their corresponding asc, sha512 files.\nUpload to Apache svn svn co https://dist.apache.org/repos/dist/dev/skywalking/ mkdir -p skywalking/satellite/\u0026#34;$VERSION\u0026#34; cp skywalking-satellite/apache-skywalking*.tgz skywalking/satellite/\u0026#34;$VERSION\u0026#34; cp skywalking-satellite/apache-skywalking*.tgz.asc skywalking/satellite/\u0026#34;$VERSION\u0026#34; cp skywalking-satellite/apache-skywalking-satellite*.tgz.sha512 skywalking/satellite/\u0026#34;$VERSION\u0026#34; cd skywalking/satellite \u0026amp;\u0026amp; svn add \u0026#34;$VERSION\u0026#34; \u0026amp;\u0026amp; svn commit -m \u0026#34;Draft Apache SkyWalking-Satellite release $VERSION\u0026#34; Make the internal announcement Send an announcement email to dev@ mailing list, please check all links before sending the email.\nSubject: [ANNOUNCEMENT] SkyWalking Satellite $VERSION test build available Content: The test build of SkyWalking Satellite $VERSION is now available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking-satellite/blob/$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/satellite/$VERSION * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-satellite-bin-x.x.x.tgz - sha512xxxxyyyzzz apache-skywalking-satellite-src-x.x.x.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-satellite/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * http://pgp.mit.edu:11371/pks/lookup?op=get\u0026amp;search=0x8BD99F552D9F33D7 corresponding to kezhenxu94@apache.org Guide to build the release from source : * https://github.com/apache/skywalking-satellite/blob/v$VERSION/docs/en/guides/contribution/How-to-release.md A vote regarding the quality of this test build will be initiated within the next couple of days. Wait at least 48 hours for test responses Any PMC, committer or contributor can test features for releasing, and feedback. Based on that, PMC will decide whether to start a vote or not.\nCall for vote in dev@ mailing list Call for vote in dev@skywalking.apache.org, please check all links before sending the email.\nSubject: [VOTE] Release Apache SkyWalking Satellite version $VERSION Content: Hi the SkyWalking Community: This is a call for vote to release Apache SkyWalking Satellite version $VERSION. Release notes: * https://github.com/apache/skywalking-satellite/blob/$VERSION/CHANGES.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/satellite/$VERSION * sha512 checksums - sha512xxxxyyyzzz skywalking-satellite-x.x.x-src.tgz - sha512xxxxyyyzzz skywalking-satellite-x.x.x-bin.tgz Release Tag : * (Git Tag) v$VERSION Release Commit Hash : * https://github.com/apache/skywalking-satellite/tree/\u0026lt;Git Commit Hash\u0026gt; Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking-satellite/blob/$VERSION/docs/en/guides/contribuation/How-to-release.md Voting will start now and will remain open for at least 72 hours, all PMC members are required to give their votes. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Thanks. [1] https://github.com/apache/skywalking/blob/master/docs/en/guides/How-to-release.md#vote-check Vote Check All PMC members and committers should check these before voting +1:\n Features test. All artifacts in staging repository are published with .asc, .md5, and sha files. Source codes and distribution packages (skywalking-satellite-$VERSION-{src,bin}.tgz) are in https://dist.apache.org/repos/dist/dev/skywalking/satellite/$VERSION with .asc, .sha512. LICENSE and NOTICE are in source codes and distribution package. Check shasum -c skywalking-satellite-$VERSION-{src,bin}.tgz.sha512. Check gpg --verify skywalking-satellite-$VERSION-{src,bin}.tgz.asc skywalking-satellite-$VERSION-{src,bin}.tgz. Build distribution from source code package by following this command, make build. Licenses check, make license.  Vote result should follow these:\n  PMC vote is +1 binding, all others is +1 no binding.\n  Within 72 hours, you get at least 3 (+1 binding), and have more +1 than -1. Vote pass.\n  Send the closing vote mail to announce the result. When count the binding and no binding votes, please list the names of voters. An example like this:\n[RESULT][VOTE] Release Apache SkyWalking Satellite version $VERSION 3 days passed, we’ve got ($NUMBER) +1 bindings (and ... +1 non-bindings): (list names) +1 bindings: xxx ... +1 non-bindings: xxx ... Thank you for voting, I’ll continue the release process.   Publish release   Move source codes tar balls and distributions to https://dist.apache.org/repos/dist/release/skywalking/, you can do this only if you are a PMC member.\nexport SVN_EDITOR=vim svn mv https://dist.apache.org/repos/dist/dev/skywalking/satellite/$VERSION https://dist.apache.org/repos/dist/release/skywalking/satellite   Refer to the previous PR, update the event and download links on the website.\n  Update Github release page, follow the previous convention.\n  Push docker image to the Docker Hub, make sure you have the write permission for push image.\nmake docker \u0026amp;\u0026amp; make docker.push   Send ANNOUNCE email to dev@skywalking.apache.org and announce@apache.org, the sender should use his/her Apache email account, please check all links before sending the email.\nSubject: [ANNOUNCEMENT] Apache SkyWalking Satellite $VERSION Released Content: Hi the SkyWalking Community On behalf of the SkyWalking Team, I’m glad to announce that SkyWalking Satellite $VERSION is now released. SkyWalking Satellite: A lightweight collector/sidecar could be deployed closing to the target monitored system, to collect metrics, traces, and logs. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. Download Links: http://skywalking.apache.org/downloads/ Release Notes : https://github.com/apache/skywalking-satellite/blob/$VERSION/CHANGES.md Website: http://skywalking.apache.org/ SkyWalking Satellite Resources: - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalking.apache.org - Documents: https://github.com/apache/skywalking-satellite/blob/$VERSION/README.md The Apache SkyWalking Team   Remove Unnecessary Releases Please remember to remove all unnecessary releases in the mirror svn (https://dist.apache.org/repos/dist/release/skywalking/), if you don\u0026rsquo;t recommend users to choose those version. For example, you have removed the download and documentation links from the website. If they want old ones, the Archive repository has all of them.\n","title":"Apache SkyWalking Satellite Release Guide","url":"/docs/skywalking-satellite/v1.2.0/en/guides/contribution/how-to-release/"},{"content":"Apdex threshold Apdex is a measure of response time based against a set threshold. It measures the ratio of satisfactory response times to unsatisfactory response times. The response time is measured from an asset request to completed delivery back to the requestor.\nA user defines a response time threshold T. All responses handled in T or less time satisfy the user.\nFor example, if T is 1.2 seconds and a response completes in 0.5 seconds, then the user is satisfied. All responses greater than 1.2 seconds dissatisfy the user. Responses greater than 4.8 seconds frustrate the user.\nThe apdex threshold T can be configured in service-apdex-threshold.yml file or via Dynamic Configuration. The default item will apply to a service that isn\u0026rsquo;t defined in this configuration as the default threshold.\nConfiguration Format The configuration content includes the names and thresholds of the services:\n# default threshold is 500msdefault:500# example:# the threshold of service \u0026#34;tomcat\u0026#34; is 1s# tomcat: 1000# the threshold of service \u0026#34;springboot1\u0026#34; is 50ms# springboot1: 50","title":"Apdex threshold","url":"/docs/main/latest/en/setup/backend/apdex-threshold/"},{"content":"Apdex threshold Apdex is a measure of response time based against a set threshold. It measures the ratio of satisfactory response times to unsatisfactory response times. The response time is measured from an asset request to completed delivery back to the requestor.\nA user defines a response time threshold T. All responses handled in T or less time satisfy the user.\nFor example, if T is 1.2 seconds and a response completes in 0.5 seconds, then the user is satisfied. All responses greater than 1.2 seconds dissatisfy the user. Responses greater than 4.8 seconds frustrate the user.\nThe apdex threshold T can be configured in service-apdex-threshold.yml file or via Dynamic Configuration. The default item will apply to a service that isn\u0026rsquo;t defined in this configuration as the default threshold.\nConfiguration Format The configuration content includes the names and thresholds of the services:\n# default threshold is 500msdefault:500# example:# the threshold of service \u0026#34;tomcat\u0026#34; is 1s# tomcat: 1000# the threshold of service \u0026#34;springboot1\u0026#34; is 50ms# springboot1: 50","title":"Apdex threshold","url":"/docs/main/next/en/setup/backend/apdex-threshold/"},{"content":"Apdex threshold Apdex is a measure of response time based against a set threshold. It measures the ratio of satisfactory response times to unsatisfactory response times. The response time is measured from an asset request to completed delivery back to the requestor.\nA user defines a response time threshold T. All responses handled in T or less time satisfy the user.\nFor example, if T is 1.2 seconds and a response completes in 0.5 seconds, then the user is satisfied. All responses greater than 1.2 seconds dissatisfy the user. Responses greater than 4.8 seconds frustrate the user.\nThe apdex threshold T can be configured in service-apdex-threshold.yml file or via Dynamic Configuration. The default item will apply to a service that isn\u0026rsquo;t defined in this configuration as the default threshold.\nConfiguration Format The configuration content includes the names and thresholds of the services:\n# default threshold is 500msdefault:500# example:# the threshold of service \u0026#34;tomcat\u0026#34; is 1s# tomcat: 1000# the threshold of service \u0026#34;springboot1\u0026#34; is 50ms# springboot1: 50","title":"Apdex threshold","url":"/docs/main/v9.0.0/en/setup/backend/apdex-threshold/"},{"content":"Apdex threshold Apdex is a measure of response time based against a set threshold. It measures the ratio of satisfactory response times to unsatisfactory response times. The response time is measured from an asset request to completed delivery back to the requestor.\nA user defines a response time threshold T. All responses handled in T or less time satisfy the user.\nFor example, if T is 1.2 seconds and a response completes in 0.5 seconds, then the user is satisfied. All responses greater than 1.2 seconds dissatisfy the user. Responses greater than 4.8 seconds frustrate the user.\nThe apdex threshold T can be configured in service-apdex-threshold.yml file or via Dynamic Configuration. The default item will apply to a service that isn\u0026rsquo;t defined in this configuration as the default threshold.\nConfiguration Format The configuration content includes the names and thresholds of the services:\n# default threshold is 500msdefault:500# example:# the threshold of service \u0026#34;tomcat\u0026#34; is 1s# tomcat: 1000# the threshold of service \u0026#34;springboot1\u0026#34; is 50ms# springboot1: 50","title":"Apdex threshold","url":"/docs/main/v9.1.0/en/setup/backend/apdex-threshold/"},{"content":"Apdex threshold Apdex is a measure of response time based against a set threshold. It measures the ratio of satisfactory response times to unsatisfactory response times. The response time is measured from an asset request to completed delivery back to the requestor.\nA user defines a response time threshold T. All responses handled in T or less time satisfy the user.\nFor example, if T is 1.2 seconds and a response completes in 0.5 seconds, then the user is satisfied. All responses greater than 1.2 seconds dissatisfy the user. Responses greater than 4.8 seconds frustrate the user.\nThe apdex threshold T can be configured in service-apdex-threshold.yml file or via Dynamic Configuration. The default item will apply to a service that isn\u0026rsquo;t defined in this configuration as the default threshold.\nConfiguration Format The configuration content includes the names and thresholds of the services:\n# default threshold is 500msdefault:500# example:# the threshold of service \u0026#34;tomcat\u0026#34; is 1s# tomcat: 1000# the threshold of service \u0026#34;springboot1\u0026#34; is 50ms# springboot1: 50","title":"Apdex threshold","url":"/docs/main/v9.2.0/en/setup/backend/apdex-threshold/"},{"content":"Apdex threshold Apdex is a measure of response time based against a set threshold. It measures the ratio of satisfactory response times to unsatisfactory response times. The response time is measured from an asset request to completed delivery back to the requestor.\nA user defines a response time threshold T. All responses handled in T or less time satisfy the user.\nFor example, if T is 1.2 seconds and a response completes in 0.5 seconds, then the user is satisfied. All responses greater than 1.2 seconds dissatisfy the user. Responses greater than 4.8 seconds frustrate the user.\nThe apdex threshold T can be configured in service-apdex-threshold.yml file or via Dynamic Configuration. The default item will apply to a service that isn\u0026rsquo;t defined in this configuration as the default threshold.\nConfiguration Format The configuration content includes the names and thresholds of the services:\n# default threshold is 500msdefault:500# example:# the threshold of service \u0026#34;tomcat\u0026#34; is 1s# tomcat: 1000# the threshold of service \u0026#34;springboot1\u0026#34; is 50ms# springboot1: 50","title":"Apdex threshold","url":"/docs/main/v9.3.0/en/setup/backend/apdex-threshold/"},{"content":"Apdex threshold Apdex is a measure of response time based against a set threshold. It measures the ratio of satisfactory response times to unsatisfactory response times. The response time is measured from an asset request to completed delivery back to the requestor.\nA user defines a response time threshold T. All responses handled in T or less time satisfy the user.\nFor example, if T is 1.2 seconds and a response completes in 0.5 seconds, then the user is satisfied. All responses greater than 1.2 seconds dissatisfy the user. Responses greater than 4.8 seconds frustrate the user.\nThe apdex threshold T can be configured in service-apdex-threshold.yml file or via Dynamic Configuration. The default item will apply to a service that isn\u0026rsquo;t defined in this configuration as the default threshold.\nConfiguration Format The configuration content includes the names and thresholds of the services:\n# default threshold is 500msdefault:500# example:# the threshold of service \u0026#34;tomcat\u0026#34; is 1s# tomcat: 1000# the threshold of service \u0026#34;springboot1\u0026#34; is 50ms# springboot1: 50","title":"Apdex threshold","url":"/docs/main/v9.4.0/en/setup/backend/apdex-threshold/"},{"content":"Apdex threshold Apdex is a measure of response time based against a set threshold. It measures the ratio of satisfactory response times to unsatisfactory response times. The response time is measured from an asset request to completed delivery back to the requestor.\nA user defines a response time threshold T. All responses handled in T or less time satisfy the user.\nFor example, if T is 1.2 seconds and a response completes in 0.5 seconds, then the user is satisfied. All responses greater than 1.2 seconds dissatisfy the user. Responses greater than 4.8 seconds frustrate the user.\nThe apdex threshold T can be configured in service-apdex-threshold.yml file or via Dynamic Configuration. The default item will apply to a service that isn\u0026rsquo;t defined in this configuration as the default threshold.\nConfiguration Format The configuration content includes the names and thresholds of the services:\n# default threshold is 500msdefault:500# example:# the threshold of service \u0026#34;tomcat\u0026#34; is 1s# tomcat: 1000# the threshold of service \u0026#34;springboot1\u0026#34; is 50ms# springboot1: 50","title":"Apdex threshold","url":"/docs/main/v9.5.0/en/setup/backend/apdex-threshold/"},{"content":"Apdex threshold Apdex is a measure of response time based against a set threshold. It measures the ratio of satisfactory response times to unsatisfactory response times. The response time is measured from an asset request to completed delivery back to the requestor.\nA user defines a response time threshold T. All responses handled in T or less time satisfy the user.\nFor example, if T is 1.2 seconds and a response completes in 0.5 seconds, then the user is satisfied. All responses greater than 1.2 seconds dissatisfy the user. Responses greater than 4.8 seconds frustrate the user.\nThe apdex threshold T can be configured in service-apdex-threshold.yml file or via Dynamic Configuration. The default item will apply to a service that isn\u0026rsquo;t defined in this configuration as the default threshold.\nConfiguration Format The configuration content includes the names and thresholds of the services:\n# default threshold is 500msdefault:500# example:# the threshold of service \u0026#34;tomcat\u0026#34; is 1s# tomcat: 1000# the threshold of service \u0026#34;springboot1\u0026#34; is 50ms# springboot1: 50","title":"Apdex threshold","url":"/docs/main/v9.6.0/en/setup/backend/apdex-threshold/"},{"content":"Apdex threshold Apdex is a measure of response time based against a set threshold. It measures the ratio of satisfactory response times to unsatisfactory response times. The response time is measured from an asset request to completed delivery back to the requestor.\nA user defines a response time threshold T. All responses handled in T or less time satisfy the user.\nFor example, if T is 1.2 seconds and a response completes in 0.5 seconds, then the user is satisfied. All responses greater than 1.2 seconds dissatisfy the user. Responses greater than 4.8 seconds frustrate the user.\nThe apdex threshold T can be configured in service-apdex-threshold.yml file or via Dynamic Configuration. The default item will apply to a service that isn\u0026rsquo;t defined in this configuration as the default threshold.\nConfiguration Format The configuration content includes the names and thresholds of the services:\n# default threshold is 500msdefault:500# example:# the threshold of service \u0026#34;tomcat\u0026#34; is 1s# tomcat: 1000# the threshold of service \u0026#34;springboot1\u0026#34; is 50ms# springboot1: 50","title":"Apdex threshold","url":"/docs/main/v9.7.0/en/setup/backend/apdex-threshold/"},{"content":"APISIX monitoring APISIX performance from apisix prometheus plugin SkyWalking leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  APISIX Prometheus plugin collects metrics data from APSIX. OpenTelemetry Collector fetches metrics from APISIX Prometheus plugin via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Enable APISIX APISIX Prometheus plugin . Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  APISIX Monitoring APISIX prometheus plugin provide multiple dimensions metrics for APISIX server , upstream , route , etc. Accordingly, SkyWalking observes the status, payload, and latency of the APISIX server, which is cataloged as a LAYER: APISIX Service in the OAP. Meanwhile, the instances would be recognized as LAYER: APISIX instances. The route rules and nodes would be recognized as endpoints with route/ and upstream/ prefixes.\nSpecify SkyWalking Service name SkyWalking expects OTEL Collector attribute skywalking_service to be the Service name.\nMake sure skywalking_service attribute exists through static_configs of OTEL Prometheus scape config.\nreceivers:prometheus:config:scrape_configs:- job_name:\u0026#39;apisix-monitoring\u0026#39;static_configs:- targets:[\u0026#39;apisix:9091\u0026#39;]labels:skywalking_service:exmple_service_name # Specify SkyWalking Service name You also could leverage OTEL Collector processor to add skywalking_service attribute , as following :\nprocessors:resource/skywalking-service:attributes:- key:skywalking_service value:exmple_service_name# Specify Skywalking Service name action:insert Notice , if you don\u0026rsquo;t specify skywalking_service attribute, SkyWalking OAP would use APISIX as the default service name\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     HTTP status  meter_apisix_sv_http_status Service The increment rate of the status of HTTP requests APISIX Prometheus plugin   HTTP latency  meter_apisix_sv_http_latency Service The increment rate of the latency of HTTP requests APISIX Prometheus plugin   HTTP bandwidth KB meter_apisix_sv_bandwidth Service The increment rate of the bandwidth of HTTP requests APISIX Prometheus plugin   HTTP status of non-matched requests  meter_apisix_sv_http_status Service The increment rate of the status of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP latency non-matched requests  meter_apisix_sv_http_latency Service The increment rate of the latency of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP bandwidth non-matched requests KB meter_apisix_sv_bandwidth Service The increment rate of the bandwidth of HTTP requests ,which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP connection  meter_apisix_sv_http_connections Service The avg number of the connections APISIX Prometheus plugin   HTTP Request Trend  meter_apisix_http_requests Service The increment rate of HTTP requests APISIX Prometheus plugin   HTTP status  meter_apisix_instance_http_status Instance The increment rate of the status of HTTP requests APISIX Prometheus plugin   HTTP latency  meter_apisix_instance_http_latency Instance The increment rate of the latency of HTTP requests APISIX Prometheus plugin   HTTP bandwidth KB meter_apisix_instance_bandwidth Instance The increment rate of the bandwidth of HTTP requests APISIX Prometheus plugin   HTTP status of non-matched requests  meter_apisix_instance_http_status Instance The increment rate of the status of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP latency non-matched requests  meter_apisix_instance_http_latency Instance The increment rate of the latency of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP bandwidth non-matched requests KB meter_apisix_instance_bandwidth Instance The increment rate of the bandwidth of HTTP requests ,which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP connection  meter_apisix_instance_http_connections Instance The avg number of the connections APISIX Prometheus plugin   HTTP Request Trend  meter_apisix_instance_http_requests Instance The increment rate of HTTP requests APISIX Prometheus plugin   Shared dict capacity MB meter_apisix_instance_shared_dict_capacity_bytes Instance The avg capacity of shared dict capacity APISIX Prometheus plugin   Shared free space MB meter_apisix_instance_shared_dict_free_space_bytes Instance The avg free space of shared dict capacity APISIX Prometheus plugin   etcd index  meter_apisix_instance_sv_etcd_indexes Instance etcd modify index for APISIX keys APISIX Prometheus plugin   etcd latest reachability  meter_apisix_instance_sv_etcd_reachable Instance etcd latest reachable , Refer to APISIX Prometheus plugin APISIX Prometheus plugin   HTTP status  meter_apisix_endpoint_node_http_status Endpoint The increment rate of the status of HTTP requests APISIX Prometheus plugin   HTTP latency  meter_apisix_endpoint_node_http_latency Endpoint The increment rate of the latency of HTTP requests APISIX Prometheus plugin   HTTP bandwidth KB meter_apisix_endpoint_node_bandwidth Endpoint The increment rate of the bandwidth of HTTP requests APISIX Prometheus plugin    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/apisix.yaml. The APISIX dashboard panel configurations are found in /config/ui-initialized-templates/apisix.\n","title":"APISIX monitoring","url":"/docs/main/latest/en/setup/backend/backend-apisix-monitoring/"},{"content":"APISIX monitoring APISIX performance from apisix prometheus plugin SkyWalking leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  APISIX Prometheus plugin collects metrics data from APSIX. OpenTelemetry Collector fetches metrics from APISIX Prometheus plugin via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Enable APISIX APISIX Prometheus plugin . Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  APISIX Monitoring APISIX prometheus plugin provide multiple dimensions metrics for APISIX server , upstream , route , etc. Accordingly, SkyWalking observes the status, payload, and latency of the APISIX server, which is cataloged as a LAYER: APISIX Service in the OAP. Meanwhile, the instances would be recognized as LAYER: APISIX instances. The route rules and nodes would be recognized as endpoints with route/ and upstream/ prefixes.\nSpecify SkyWalking Service name SkyWalking expects OTEL Collector attribute skywalking_service to be the Service name.\nMake sure skywalking_service attribute exists through static_configs of OTEL Prometheus scape config.\nreceivers:prometheus:config:scrape_configs:- job_name:\u0026#39;apisix-monitoring\u0026#39;static_configs:- targets:[\u0026#39;apisix:9091\u0026#39;]labels:skywalking_service:exmple_service_name # Specify SkyWalking Service name You also could leverage OTEL Collector processor to add skywalking_service attribute , as following :\nprocessors:resource/skywalking-service:attributes:- key:skywalking_service value:exmple_service_name# Specify Skywalking Service name action:insert Notice , if you don\u0026rsquo;t specify skywalking_service attribute, SkyWalking OAP would use APISIX as the default service name\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     HTTP status  meter_apisix_sv_http_status Service The increment rate of the status of HTTP requests APISIX Prometheus plugin   HTTP latency  meter_apisix_sv_http_latency Service The increment rate of the latency of HTTP requests APISIX Prometheus plugin   HTTP bandwidth KB meter_apisix_sv_bandwidth Service The increment rate of the bandwidth of HTTP requests APISIX Prometheus plugin   HTTP status of non-matched requests  meter_apisix_sv_http_status Service The increment rate of the status of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP latency non-matched requests  meter_apisix_sv_http_latency Service The increment rate of the latency of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP bandwidth non-matched requests KB meter_apisix_sv_bandwidth Service The increment rate of the bandwidth of HTTP requests ,which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP connection  meter_apisix_sv_http_connections Service The avg number of the connections APISIX Prometheus plugin   HTTP Request Trend  meter_apisix_http_requests Service The increment rate of HTTP requests APISIX Prometheus plugin   HTTP status  meter_apisix_instance_http_status Instance The increment rate of the status of HTTP requests APISIX Prometheus plugin   HTTP latency  meter_apisix_instance_http_latency Instance The increment rate of the latency of HTTP requests APISIX Prometheus plugin   HTTP bandwidth KB meter_apisix_instance_bandwidth Instance The increment rate of the bandwidth of HTTP requests APISIX Prometheus plugin   HTTP status of non-matched requests  meter_apisix_instance_http_status Instance The increment rate of the status of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP latency non-matched requests  meter_apisix_instance_http_latency Instance The increment rate of the latency of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP bandwidth non-matched requests KB meter_apisix_instance_bandwidth Instance The increment rate of the bandwidth of HTTP requests ,which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP connection  meter_apisix_instance_http_connections Instance The avg number of the connections APISIX Prometheus plugin   HTTP Request Trend  meter_apisix_instance_http_requests Instance The increment rate of HTTP requests APISIX Prometheus plugin   Shared dict capacity MB meter_apisix_instance_shared_dict_capacity_bytes Instance The avg capacity of shared dict capacity APISIX Prometheus plugin   Shared free space MB meter_apisix_instance_shared_dict_free_space_bytes Instance The avg free space of shared dict capacity APISIX Prometheus plugin   etcd index  meter_apisix_instance_sv_etcd_indexes Instance etcd modify index for APISIX keys APISIX Prometheus plugin   etcd latest reachability  meter_apisix_instance_sv_etcd_reachable Instance etcd latest reachable , Refer to APISIX Prometheus plugin APISIX Prometheus plugin   HTTP status  meter_apisix_endpoint_node_http_status Endpoint The increment rate of the status of HTTP requests APISIX Prometheus plugin   HTTP latency  meter_apisix_endpoint_node_http_latency Endpoint The increment rate of the latency of HTTP requests APISIX Prometheus plugin   HTTP bandwidth KB meter_apisix_endpoint_node_bandwidth Endpoint The increment rate of the bandwidth of HTTP requests APISIX Prometheus plugin    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/apisix.yaml. The APISIX dashboard panel configurations are found in /config/ui-initialized-templates/apisix.\n","title":"APISIX monitoring","url":"/docs/main/next/en/setup/backend/backend-apisix-monitoring/"},{"content":"APISIX monitoring APISIX performance from apisix prometheus plugin SkyWalking leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  APISIX Prometheus plugin collects metrics data from APSIX. OpenTelemetry Collector fetches metrics from APISIX Prometheus plugin via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via the OpenCensus gRPC Exporter or OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Enable APISIX APISIX Prometheus plugin . Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  APISIX Monitoring APISIX prometheus plugin provide multiple dimensions metrics for APISIX server , upstream , route , etc. Accordingly, SkyWalking observes the status, payload, and latency of the APISIX server, which is cataloged as a LAYER: APISIX Service in the OAP. Meanwhile, the instances would be recognized as LAYER: APISIX instances. The route rules and nodes would be recognized as endpoints with route/ and upstream/ prefixes.\nSpecify SkyWalking Service name SkyWalking expects OTEL Collector attribute skywalking_service to be the Service name.\nMake sure skywalking_service attribute exists through static_configs of OTEL Prometheus scape config.\nreceivers:prometheus:config:scrape_configs:- job_name:\u0026#39;apisix-monitoring\u0026#39;static_configs:- targets:[\u0026#39;apisix:9091\u0026#39;]labels:skywalking_service:exmple_service_name # Specify SkyWalking Service name You also could leverage OTEL Collector processor to add skywalking_service attribute , as following :\nprocessors:resource/skywalking-service:attributes:- key:skywalking_service value:exmple_service_name# Specify Skywalking Service name action:insert Notice , if you don\u0026rsquo;t specify skywalking_service attribute, SkyWalking OAP would use APISIX as the default service name\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     HTTP status  meter_apisix_sv_http_status Service The increment rate of the status of HTTP requests APISIX Prometheus plugin   HTTP latency  meter_apisix_sv_http_latency Service The increment rate of the latency of HTTP requests APISIX Prometheus plugin   HTTP bandwidth KB meter_apisix_sv_bandwidth Service The increment rate of the bandwidth of HTTP requests APISIX Prometheus plugin   HTTP status of non-matched requests  meter_apisix_sv_http_status Service The increment rate of the status of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP latency non-matched requests  meter_apisix_sv_http_latency Service The increment rate of the latency of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP bandwidth non-matched requests KB meter_apisix_sv_bandwidth Service The increment rate of the bandwidth of HTTP requests ,which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP connection  meter_apisix_sv_http_connections Service The avg number of the connections APISIX Prometheus plugin   HTTP Request Trend  meter_apisix_http_requests Service The increment rate of HTTP requests APISIX Prometheus plugin   HTTP status  meter_apisix_instance_http_status Instance The increment rate of the status of HTTP requests APISIX Prometheus plugin   HTTP latency  meter_apisix_instance_http_latency Instance The increment rate of the latency of HTTP requests APISIX Prometheus plugin   HTTP bandwidth KB meter_apisix_instance_bandwidth Instance The increment rate of the bandwidth of HTTP requests APISIX Prometheus plugin   HTTP status of non-matched requests  meter_apisix_instance_http_status Instance The increment rate of the status of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP latency non-matched requests  meter_apisix_instance_http_latency Instance The increment rate of the latency of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP bandwidth non-matched requests KB meter_apisix_instance_bandwidth Instance The increment rate of the bandwidth of HTTP requests ,which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP connection  meter_apisix_instance_http_connections Instance The avg number of the connections APISIX Prometheus plugin   HTTP Request Trend  meter_apisix_instance_http_requests Instance The increment rate of HTTP requests APISIX Prometheus plugin   Shared dict capacity MB meter_apisix_instance_shared_dict_capacity_bytes Instance The avg capacity of shared dict capacity APISIX Prometheus plugin   Shared free space MB meter_apisix_instance_shared_dict_free_space_bytes Instance The avg free space of shared dict capacity APISIX Prometheus plugin   etcd index  meter_apisix_instance_sv_etcd_indexes Instance etcd modify index for APISIX keys APISIX Prometheus plugin   etcd latest reachability  meter_apisix_instance_sv_etcd_reachable Instance etcd latest reachable , Refer to APISIX Prometheus plugin APISIX Prometheus plugin   HTTP status  meter_apisix_endpoint_node_http_status Endpoint The increment rate of the status of HTTP requests APISIX Prometheus plugin   HTTP latency  meter_apisix_endpoint_node_http_latency Endpoint The increment rate of the latency of HTTP requests APISIX Prometheus plugin   HTTP bandwidth KB meter_apisix_endpoint_node_bandwidth Endpoint The increment rate of the bandwidth of HTTP requests APISIX Prometheus plugin    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/apisix.yaml. The APISIX dashboard panel configurations are found in /config/ui-initialized-templates/apisix.\n","title":"APISIX monitoring","url":"/docs/main/v9.3.0/en/setup/backend/backend-apisix-monitoring/"},{"content":"APISIX monitoring APISIX performance from apisix prometheus plugin SkyWalking leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  APISIX Prometheus plugin collects metrics data from APSIX. OpenTelemetry Collector fetches metrics from APISIX Prometheus plugin via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via the OpenCensus gRPC Exporter or OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Enable APISIX APISIX Prometheus plugin . Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  APISIX Monitoring APISIX prometheus plugin provide multiple dimensions metrics for APISIX server , upstream , route , etc. Accordingly, SkyWalking observes the status, payload, and latency of the APISIX server, which is cataloged as a LAYER: APISIX Service in the OAP. Meanwhile, the instances would be recognized as LAYER: APISIX instances. The route rules and nodes would be recognized as endpoints with route/ and upstream/ prefixes.\nSpecify SkyWalking Service name SkyWalking expects OTEL Collector attribute skywalking_service to be the Service name.\nMake sure skywalking_service attribute exists through static_configs of OTEL Prometheus scape config.\nreceivers:prometheus:config:scrape_configs:- job_name:\u0026#39;apisix-monitoring\u0026#39;static_configs:- targets:[\u0026#39;apisix:9091\u0026#39;]labels:skywalking_service:exmple_service_name # Specify SkyWalking Service name You also could leverage OTEL Collector processor to add skywalking_service attribute , as following :\nprocessors:resource/skywalking-service:attributes:- key:skywalking_service value:exmple_service_name# Specify Skywalking Service name action:insert Notice , if you don\u0026rsquo;t specify skywalking_service attribute, SkyWalking OAP would use APISIX as the default service name\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     HTTP status  meter_apisix_sv_http_status Service The increment rate of the status of HTTP requests APISIX Prometheus plugin   HTTP latency  meter_apisix_sv_http_latency Service The increment rate of the latency of HTTP requests APISIX Prometheus plugin   HTTP bandwidth KB meter_apisix_sv_bandwidth Service The increment rate of the bandwidth of HTTP requests APISIX Prometheus plugin   HTTP status of non-matched requests  meter_apisix_sv_http_status Service The increment rate of the status of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP latency non-matched requests  meter_apisix_sv_http_latency Service The increment rate of the latency of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP bandwidth non-matched requests KB meter_apisix_sv_bandwidth Service The increment rate of the bandwidth of HTTP requests ,which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP connection  meter_apisix_sv_http_connections Service The avg number of the connections APISIX Prometheus plugin   HTTP Request Trend  meter_apisix_http_requests Service The increment rate of HTTP requests APISIX Prometheus plugin   HTTP status  meter_apisix_instance_http_status Instance The increment rate of the status of HTTP requests APISIX Prometheus plugin   HTTP latency  meter_apisix_instance_http_latency Instance The increment rate of the latency of HTTP requests APISIX Prometheus plugin   HTTP bandwidth KB meter_apisix_instance_bandwidth Instance The increment rate of the bandwidth of HTTP requests APISIX Prometheus plugin   HTTP status of non-matched requests  meter_apisix_instance_http_status Instance The increment rate of the status of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP latency non-matched requests  meter_apisix_instance_http_latency Instance The increment rate of the latency of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP bandwidth non-matched requests KB meter_apisix_instance_bandwidth Instance The increment rate of the bandwidth of HTTP requests ,which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP connection  meter_apisix_instance_http_connections Instance The avg number of the connections APISIX Prometheus plugin   HTTP Request Trend  meter_apisix_instance_http_requests Instance The increment rate of HTTP requests APISIX Prometheus plugin   Shared dict capacity MB meter_apisix_instance_shared_dict_capacity_bytes Instance The avg capacity of shared dict capacity APISIX Prometheus plugin   Shared free space MB meter_apisix_instance_shared_dict_free_space_bytes Instance The avg free space of shared dict capacity APISIX Prometheus plugin   etcd index  meter_apisix_instance_sv_etcd_indexes Instance etcd modify index for APISIX keys APISIX Prometheus plugin   etcd latest reachability  meter_apisix_instance_sv_etcd_reachable Instance etcd latest reachable , Refer to APISIX Prometheus plugin APISIX Prometheus plugin   HTTP status  meter_apisix_endpoint_node_http_status Endpoint The increment rate of the status of HTTP requests APISIX Prometheus plugin   HTTP latency  meter_apisix_endpoint_node_http_latency Endpoint The increment rate of the latency of HTTP requests APISIX Prometheus plugin   HTTP bandwidth KB meter_apisix_endpoint_node_bandwidth Endpoint The increment rate of the bandwidth of HTTP requests APISIX Prometheus plugin    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/apisix.yaml. The APISIX dashboard panel configurations are found in /config/ui-initialized-templates/apisix.\n","title":"APISIX monitoring","url":"/docs/main/v9.4.0/en/setup/backend/backend-apisix-monitoring/"},{"content":"APISIX monitoring APISIX performance from apisix prometheus plugin SkyWalking leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  APISIX Prometheus plugin collects metrics data from APSIX. OpenTelemetry Collector fetches metrics from APISIX Prometheus plugin via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Enable APISIX APISIX Prometheus plugin . Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  APISIX Monitoring APISIX prometheus plugin provide multiple dimensions metrics for APISIX server , upstream , route , etc. Accordingly, SkyWalking observes the status, payload, and latency of the APISIX server, which is cataloged as a LAYER: APISIX Service in the OAP. Meanwhile, the instances would be recognized as LAYER: APISIX instances. The route rules and nodes would be recognized as endpoints with route/ and upstream/ prefixes.\nSpecify SkyWalking Service name SkyWalking expects OTEL Collector attribute skywalking_service to be the Service name.\nMake sure skywalking_service attribute exists through static_configs of OTEL Prometheus scape config.\nreceivers:prometheus:config:scrape_configs:- job_name:\u0026#39;apisix-monitoring\u0026#39;static_configs:- targets:[\u0026#39;apisix:9091\u0026#39;]labels:skywalking_service:exmple_service_name # Specify SkyWalking Service name You also could leverage OTEL Collector processor to add skywalking_service attribute , as following :\nprocessors:resource/skywalking-service:attributes:- key:skywalking_service value:exmple_service_name# Specify Skywalking Service name action:insert Notice , if you don\u0026rsquo;t specify skywalking_service attribute, SkyWalking OAP would use APISIX as the default service name\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     HTTP status  meter_apisix_sv_http_status Service The increment rate of the status of HTTP requests APISIX Prometheus plugin   HTTP latency  meter_apisix_sv_http_latency Service The increment rate of the latency of HTTP requests APISIX Prometheus plugin   HTTP bandwidth KB meter_apisix_sv_bandwidth Service The increment rate of the bandwidth of HTTP requests APISIX Prometheus plugin   HTTP status of non-matched requests  meter_apisix_sv_http_status Service The increment rate of the status of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP latency non-matched requests  meter_apisix_sv_http_latency Service The increment rate of the latency of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP bandwidth non-matched requests KB meter_apisix_sv_bandwidth Service The increment rate of the bandwidth of HTTP requests ,which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP connection  meter_apisix_sv_http_connections Service The avg number of the connections APISIX Prometheus plugin   HTTP Request Trend  meter_apisix_http_requests Service The increment rate of HTTP requests APISIX Prometheus plugin   HTTP status  meter_apisix_instance_http_status Instance The increment rate of the status of HTTP requests APISIX Prometheus plugin   HTTP latency  meter_apisix_instance_http_latency Instance The increment rate of the latency of HTTP requests APISIX Prometheus plugin   HTTP bandwidth KB meter_apisix_instance_bandwidth Instance The increment rate of the bandwidth of HTTP requests APISIX Prometheus plugin   HTTP status of non-matched requests  meter_apisix_instance_http_status Instance The increment rate of the status of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP latency non-matched requests  meter_apisix_instance_http_latency Instance The increment rate of the latency of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP bandwidth non-matched requests KB meter_apisix_instance_bandwidth Instance The increment rate of the bandwidth of HTTP requests ,which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP connection  meter_apisix_instance_http_connections Instance The avg number of the connections APISIX Prometheus plugin   HTTP Request Trend  meter_apisix_instance_http_requests Instance The increment rate of HTTP requests APISIX Prometheus plugin   Shared dict capacity MB meter_apisix_instance_shared_dict_capacity_bytes Instance The avg capacity of shared dict capacity APISIX Prometheus plugin   Shared free space MB meter_apisix_instance_shared_dict_free_space_bytes Instance The avg free space of shared dict capacity APISIX Prometheus plugin   etcd index  meter_apisix_instance_sv_etcd_indexes Instance etcd modify index for APISIX keys APISIX Prometheus plugin   etcd latest reachability  meter_apisix_instance_sv_etcd_reachable Instance etcd latest reachable , Refer to APISIX Prometheus plugin APISIX Prometheus plugin   HTTP status  meter_apisix_endpoint_node_http_status Endpoint The increment rate of the status of HTTP requests APISIX Prometheus plugin   HTTP latency  meter_apisix_endpoint_node_http_latency Endpoint The increment rate of the latency of HTTP requests APISIX Prometheus plugin   HTTP bandwidth KB meter_apisix_endpoint_node_bandwidth Endpoint The increment rate of the bandwidth of HTTP requests APISIX Prometheus plugin    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/apisix.yaml. The APISIX dashboard panel configurations are found in /config/ui-initialized-templates/apisix.\n","title":"APISIX monitoring","url":"/docs/main/v9.5.0/en/setup/backend/backend-apisix-monitoring/"},{"content":"APISIX monitoring APISIX performance from apisix prometheus plugin SkyWalking leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  APISIX Prometheus plugin collects metrics data from APSIX. OpenTelemetry Collector fetches metrics from APISIX Prometheus plugin via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Enable APISIX APISIX Prometheus plugin . Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  APISIX Monitoring APISIX prometheus plugin provide multiple dimensions metrics for APISIX server , upstream , route , etc. Accordingly, SkyWalking observes the status, payload, and latency of the APISIX server, which is cataloged as a LAYER: APISIX Service in the OAP. Meanwhile, the instances would be recognized as LAYER: APISIX instances. The route rules and nodes would be recognized as endpoints with route/ and upstream/ prefixes.\nSpecify SkyWalking Service name SkyWalking expects OTEL Collector attribute skywalking_service to be the Service name.\nMake sure skywalking_service attribute exists through static_configs of OTEL Prometheus scape config.\nreceivers:prometheus:config:scrape_configs:- job_name:\u0026#39;apisix-monitoring\u0026#39;static_configs:- targets:[\u0026#39;apisix:9091\u0026#39;]labels:skywalking_service:exmple_service_name # Specify SkyWalking Service name You also could leverage OTEL Collector processor to add skywalking_service attribute , as following :\nprocessors:resource/skywalking-service:attributes:- key:skywalking_service value:exmple_service_name# Specify Skywalking Service name action:insert Notice , if you don\u0026rsquo;t specify skywalking_service attribute, SkyWalking OAP would use APISIX as the default service name\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     HTTP status  meter_apisix_sv_http_status Service The increment rate of the status of HTTP requests APISIX Prometheus plugin   HTTP latency  meter_apisix_sv_http_latency Service The increment rate of the latency of HTTP requests APISIX Prometheus plugin   HTTP bandwidth KB meter_apisix_sv_bandwidth Service The increment rate of the bandwidth of HTTP requests APISIX Prometheus plugin   HTTP status of non-matched requests  meter_apisix_sv_http_status Service The increment rate of the status of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP latency non-matched requests  meter_apisix_sv_http_latency Service The increment rate of the latency of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP bandwidth non-matched requests KB meter_apisix_sv_bandwidth Service The increment rate of the bandwidth of HTTP requests ,which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP connection  meter_apisix_sv_http_connections Service The avg number of the connections APISIX Prometheus plugin   HTTP Request Trend  meter_apisix_http_requests Service The increment rate of HTTP requests APISIX Prometheus plugin   HTTP status  meter_apisix_instance_http_status Instance The increment rate of the status of HTTP requests APISIX Prometheus plugin   HTTP latency  meter_apisix_instance_http_latency Instance The increment rate of the latency of HTTP requests APISIX Prometheus plugin   HTTP bandwidth KB meter_apisix_instance_bandwidth Instance The increment rate of the bandwidth of HTTP requests APISIX Prometheus plugin   HTTP status of non-matched requests  meter_apisix_instance_http_status Instance The increment rate of the status of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP latency non-matched requests  meter_apisix_instance_http_latency Instance The increment rate of the latency of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP bandwidth non-matched requests KB meter_apisix_instance_bandwidth Instance The increment rate of the bandwidth of HTTP requests ,which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP connection  meter_apisix_instance_http_connections Instance The avg number of the connections APISIX Prometheus plugin   HTTP Request Trend  meter_apisix_instance_http_requests Instance The increment rate of HTTP requests APISIX Prometheus plugin   Shared dict capacity MB meter_apisix_instance_shared_dict_capacity_bytes Instance The avg capacity of shared dict capacity APISIX Prometheus plugin   Shared free space MB meter_apisix_instance_shared_dict_free_space_bytes Instance The avg free space of shared dict capacity APISIX Prometheus plugin   etcd index  meter_apisix_instance_sv_etcd_indexes Instance etcd modify index for APISIX keys APISIX Prometheus plugin   etcd latest reachability  meter_apisix_instance_sv_etcd_reachable Instance etcd latest reachable , Refer to APISIX Prometheus plugin APISIX Prometheus plugin   HTTP status  meter_apisix_endpoint_node_http_status Endpoint The increment rate of the status of HTTP requests APISIX Prometheus plugin   HTTP latency  meter_apisix_endpoint_node_http_latency Endpoint The increment rate of the latency of HTTP requests APISIX Prometheus plugin   HTTP bandwidth KB meter_apisix_endpoint_node_bandwidth Endpoint The increment rate of the bandwidth of HTTP requests APISIX Prometheus plugin    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/apisix.yaml. The APISIX dashboard panel configurations are found in /config/ui-initialized-templates/apisix.\n","title":"APISIX monitoring","url":"/docs/main/v9.6.0/en/setup/backend/backend-apisix-monitoring/"},{"content":"APISIX monitoring APISIX performance from apisix prometheus plugin SkyWalking leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  APISIX Prometheus plugin collects metrics data from APSIX. OpenTelemetry Collector fetches metrics from APISIX Prometheus plugin via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Enable APISIX APISIX Prometheus plugin . Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  APISIX Monitoring APISIX prometheus plugin provide multiple dimensions metrics for APISIX server , upstream , route , etc. Accordingly, SkyWalking observes the status, payload, and latency of the APISIX server, which is cataloged as a LAYER: APISIX Service in the OAP. Meanwhile, the instances would be recognized as LAYER: APISIX instances. The route rules and nodes would be recognized as endpoints with route/ and upstream/ prefixes.\nSpecify SkyWalking Service name SkyWalking expects OTEL Collector attribute skywalking_service to be the Service name.\nMake sure skywalking_service attribute exists through static_configs of OTEL Prometheus scape config.\nreceivers:prometheus:config:scrape_configs:- job_name:\u0026#39;apisix-monitoring\u0026#39;static_configs:- targets:[\u0026#39;apisix:9091\u0026#39;]labels:skywalking_service:exmple_service_name # Specify SkyWalking Service name You also could leverage OTEL Collector processor to add skywalking_service attribute , as following :\nprocessors:resource/skywalking-service:attributes:- key:skywalking_service value:exmple_service_name# Specify Skywalking Service name action:insert Notice , if you don\u0026rsquo;t specify skywalking_service attribute, SkyWalking OAP would use APISIX as the default service name\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     HTTP status  meter_apisix_sv_http_status Service The increment rate of the status of HTTP requests APISIX Prometheus plugin   HTTP latency  meter_apisix_sv_http_latency Service The increment rate of the latency of HTTP requests APISIX Prometheus plugin   HTTP bandwidth KB meter_apisix_sv_bandwidth Service The increment rate of the bandwidth of HTTP requests APISIX Prometheus plugin   HTTP status of non-matched requests  meter_apisix_sv_http_status Service The increment rate of the status of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP latency non-matched requests  meter_apisix_sv_http_latency Service The increment rate of the latency of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP bandwidth non-matched requests KB meter_apisix_sv_bandwidth Service The increment rate of the bandwidth of HTTP requests ,which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP connection  meter_apisix_sv_http_connections Service The avg number of the connections APISIX Prometheus plugin   HTTP Request Trend  meter_apisix_http_requests Service The increment rate of HTTP requests APISIX Prometheus plugin   HTTP status  meter_apisix_instance_http_status Instance The increment rate of the status of HTTP requests APISIX Prometheus plugin   HTTP latency  meter_apisix_instance_http_latency Instance The increment rate of the latency of HTTP requests APISIX Prometheus plugin   HTTP bandwidth KB meter_apisix_instance_bandwidth Instance The increment rate of the bandwidth of HTTP requests APISIX Prometheus plugin   HTTP status of non-matched requests  meter_apisix_instance_http_status Instance The increment rate of the status of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP latency non-matched requests  meter_apisix_instance_http_latency Instance The increment rate of the latency of HTTP requests, which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP bandwidth non-matched requests KB meter_apisix_instance_bandwidth Instance The increment rate of the bandwidth of HTTP requests ,which don\u0026rsquo;t match any route APISIX Prometheus plugin   HTTP connection  meter_apisix_instance_http_connections Instance The avg number of the connections APISIX Prometheus plugin   HTTP Request Trend  meter_apisix_instance_http_requests Instance The increment rate of HTTP requests APISIX Prometheus plugin   Shared dict capacity MB meter_apisix_instance_shared_dict_capacity_bytes Instance The avg capacity of shared dict capacity APISIX Prometheus plugin   Shared free space MB meter_apisix_instance_shared_dict_free_space_bytes Instance The avg free space of shared dict capacity APISIX Prometheus plugin   etcd index  meter_apisix_instance_sv_etcd_indexes Instance etcd modify index for APISIX keys APISIX Prometheus plugin   etcd latest reachability  meter_apisix_instance_sv_etcd_reachable Instance etcd latest reachable , Refer to APISIX Prometheus plugin APISIX Prometheus plugin   HTTP status  meter_apisix_endpoint_node_http_status Endpoint The increment rate of the status of HTTP requests APISIX Prometheus plugin   HTTP latency  meter_apisix_endpoint_node_http_latency Endpoint The increment rate of the latency of HTTP requests APISIX Prometheus plugin   HTTP bandwidth KB meter_apisix_endpoint_node_bandwidth Endpoint The increment rate of the bandwidth of HTTP requests APISIX Prometheus plugin    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/apisix.yaml. The APISIX dashboard panel configurations are found in /config/ui-initialized-templates/apisix.\n","title":"APISIX monitoring","url":"/docs/main/v9.7.0/en/setup/backend/backend-apisix-monitoring/"},{"content":"AWS API Gateway monitoring Amazon API Gateway is an AWS service for creating, publishing, maintaining, monitoring, and securing REST, HTTP, and WebSocket APIs. SkyWalking leverages AWS Kinesis Data Firehose receiver to transfer the CloudWatch metrics of API Gateway(HTTP and REST APIs) to OpenTelemetry receiver and into the Meter System.\nData flow  AWS CloudWatch collect metrics for API Gateway(REST and HTTP APIs), refer to API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch CloudWatch metric streams stream CloudWatch metrics of API Gateway to AWS Kinesis Data Firehose AWS Kinesis Data Firehose delivery metrics to AWS Kinesis Data Firehose receiver through the HTTP endpoint  Set up  Enable CloudWatch metrics for API Gateway Create an Amazon Kinesis Data Firehose Delivery Stream, and set AWS Kinesis Data Firehose receiver\u0026rsquo;s address as HTTP(s) Destination, refer to Create Delivery Stream Create CloudWatch metric stream, and select the Firehose Delivery Stream which has been created above, set Select namespaces to AWS/ApiGateway, Select output format to OpenTelemetry 0.7. refer to CloudWatch Metric Streams  Gateway Monitoring SkyWalking observes CloudWatch metrics of the AWS API Gateway, which is cataloged as a LAYER: AWS_GATEWAY Service in the OAP. Meanwhile, the routes would be recognized as LAYER: AWS_GATEWAY endpoints\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     Request Count count aws_gateway_service_count Service The total number API requests in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   4xx Count count aws_gateway_service_4xx Service The number of client-side errors captured in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   5xx Count count aws_gateway_service_5xx Service The number of server-side errors captured in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Request Average Latency ms aws_gateway_service_latency Service The time between when API Gateway receives a request from a client and when it returns a response to the client. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Request Average Integration Latency ms aws_gateway_service_integration_latency Service The time between when API Gateway relays a request to the backend and when it receives a response from the backend. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Data Processed KB aws_gateway_service_data_processed Service The amount of data processed API Gateway HTTP APIs monitoring with CloudWatch   Cache Hit Count Rate % aws_gateway_service_cache_hit_rate Service The number of requests served from the API cache API Gateway REST APIs monitoring with CloudWatch   Cache Miss Count Rate % aws_gateway_service_cache_miss_rate Service The number of requests served from the backend API Gateway REST APIs monitoring with CloudWatch   Request Count count aws_gateway_endpoint_count Endpoint The total number API requests in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   4xx Count count aws_gateway_endpoint_4xx Endpoint The number of client-side errors captured in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   5xx Count count aws_gateway_endpoint_5xx Endpoint The number of server-side errors captured in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Request Average Latency ms aws_gateway_endpoint_latency Endpoint The time between when API Gateway receives a request from a client and when it returns a response to the client. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Request Average Integration Latency ms aws_gateway_endpoint_integration_latency Endpoint The time between when API Gateway relays a request to the backend and when it receives a response from the backend. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Data Processed KB aws_gateway_endpoint_data_processed Endpoint The amount of data processed API Gateway HTTP APIs monitoring with CloudWatch   Cache Hit Count Rate % aws_gateway_endpoint_cache_hit_rate Endpoint The number of requests served from the API cache API Gateway REST APIs monitoring with CloudWatch   Cache Miss Count Rate % aws_gateway_endpoint_cache_miss_rate Endpoint The number of requests served from the backend API Gateway REST APIs monitoring with CloudWatch    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-gateway/. The AWS Cloud EKS dashboard panel configurations are found in /config/ui-initialized-templates/aws_gateway.\n","title":"AWS API Gateway monitoring","url":"/docs/main/latest/en/setup/backend/backend-aws-api-gateway-monitoring/"},{"content":"AWS API Gateway monitoring Amazon API Gateway is an AWS service for creating, publishing, maintaining, monitoring, and securing REST, HTTP, and WebSocket APIs. SkyWalking leverages AWS Kinesis Data Firehose receiver to transfer the CloudWatch metrics of API Gateway(HTTP and REST APIs) to OpenTelemetry receiver and into the Meter System.\nData flow  AWS CloudWatch collect metrics for API Gateway(REST and HTTP APIs), refer to API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch CloudWatch metric streams stream CloudWatch metrics of API Gateway to AWS Kinesis Data Firehose AWS Kinesis Data Firehose delivery metrics to AWS Kinesis Data Firehose receiver through the HTTP endpoint  Set up  Enable CloudWatch metrics for API Gateway Create an Amazon Kinesis Data Firehose Delivery Stream, and set AWS Kinesis Data Firehose receiver\u0026rsquo;s address as HTTP(s) Destination, refer to Create Delivery Stream Create CloudWatch metric stream, and select the Firehose Delivery Stream which has been created above, set Select namespaces to AWS/ApiGateway, Select output format to OpenTelemetry 0.7. refer to CloudWatch Metric Streams  Gateway Monitoring SkyWalking observes CloudWatch metrics of the AWS API Gateway, which is cataloged as a LAYER: AWS_GATEWAY Service in the OAP. Meanwhile, the routes would be recognized as LAYER: AWS_GATEWAY endpoints\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     Request Count count aws_gateway_service_count Service The total number API requests in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   4xx Count count aws_gateway_service_4xx Service The number of client-side errors captured in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   5xx Count count aws_gateway_service_5xx Service The number of server-side errors captured in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Request Average Latency ms aws_gateway_service_latency Service The time between when API Gateway receives a request from a client and when it returns a response to the client. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Request Average Integration Latency ms aws_gateway_service_integration_latency Service The time between when API Gateway relays a request to the backend and when it receives a response from the backend. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Data Processed KB aws_gateway_service_data_processed Service The amount of data processed API Gateway HTTP APIs monitoring with CloudWatch   Cache Hit Count Rate % aws_gateway_service_cache_hit_rate Service The number of requests served from the API cache API Gateway REST APIs monitoring with CloudWatch   Cache Miss Count Rate % aws_gateway_service_cache_miss_rate Service The number of requests served from the backend API Gateway REST APIs monitoring with CloudWatch   Request Count count aws_gateway_endpoint_count Endpoint The total number API requests in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   4xx Count count aws_gateway_endpoint_4xx Endpoint The number of client-side errors captured in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   5xx Count count aws_gateway_endpoint_5xx Endpoint The number of server-side errors captured in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Request Average Latency ms aws_gateway_endpoint_latency Endpoint The time between when API Gateway receives a request from a client and when it returns a response to the client. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Request Average Integration Latency ms aws_gateway_endpoint_integration_latency Endpoint The time between when API Gateway relays a request to the backend and when it receives a response from the backend. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Data Processed KB aws_gateway_endpoint_data_processed Endpoint The amount of data processed API Gateway HTTP APIs monitoring with CloudWatch   Cache Hit Count Rate % aws_gateway_endpoint_cache_hit_rate Endpoint The number of requests served from the API cache API Gateway REST APIs monitoring with CloudWatch   Cache Miss Count Rate % aws_gateway_endpoint_cache_miss_rate Endpoint The number of requests served from the backend API Gateway REST APIs monitoring with CloudWatch    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-gateway/. The AWS Cloud EKS dashboard panel configurations are found in /config/ui-initialized-templates/aws_gateway.\n","title":"AWS API Gateway monitoring","url":"/docs/main/next/en/setup/backend/backend-aws-api-gateway-monitoring/"},{"content":"AWS API Gateway monitoring Amazon API Gateway is an AWS service for creating, publishing, maintaining, monitoring, and securing REST, HTTP, and WebSocket APIs. SkyWalking leverages AWS Kinesis Data Firehose receiver to transfer the CloudWatch metrics of API Gateway(HTTP and REST APIs) to OpenTelemetry receiver and into the Meter System.\nData flow  AWS CloudWatch collect metrics for API Gateway(REST and HTTP APIs), refer to API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch CloudWatch metric streams stream CloudWatch metrics of API Gateway to AWS Kinesis Data Firehose AWS Kinesis Data Firehose delivery metrics to AWS Kinesis Data Firehose receiver through the HTTP endpoint  Set up  Enable CloudWatch metrics for API Gateway Create an Amazon Kinesis Data Firehose Delivery Stream, and set AWS Kinesis Data Firehose receiver\u0026rsquo;s address as HTTP(s) Destination, refer to Create Delivery Stream Create CloudWatch metric stream, and select the Firehose Delivery Stream which has been created above, set Select namespaces to AWS/ApiGateway, Select output format to OpenTelemetry 0.7. refer to CloudWatch Metric Streams  Gateway Monitoring SkyWalking observes CloudWatch metrics of the AWS API Gateway, which is cataloged as a LAYER: AWS_GATEWAY Service in the OAP. Meanwhile, the routes would be recognized as LAYER: AWS_GATEWAY endpoints\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     Request Count count aws_gateway_service_count Service The total number API requests in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   4xx Count count aws_gateway_service_4xx Service The number of client-side errors captured in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   5xx Count count aws_gateway_service_5xx Service The number of server-side errors captured in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Request Average Latency ms aws_gateway_service_latency Service The time between when API Gateway receives a request from a client and when it returns a response to the client. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Request Average Integration Latency ms aws_gateway_service_integration_latency Service The time between when API Gateway relays a request to the backend and when it receives a response from the backend. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Data Processed KB aws_gateway_service_data_processed Service The amount of data processed API Gateway HTTP APIs monitoring with CloudWatch   Cache Hit Count Rate % aws_gateway_service_cache_hit_rate Service The number of requests served from the API cache API Gateway REST APIs monitoring with CloudWatch   Cache Miss Count Rate % aws_gateway_service_cache_miss_rate Service The number of requests served from the backend API Gateway REST APIs monitoring with CloudWatch   Request Count count aws_gateway_endpoint_count Endpoint The total number API requests in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   4xx Count count aws_gateway_endpoint_4xx Endpoint The number of client-side errors captured in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   5xx Count count aws_gateway_endpoint_5xx Endpoint The number of server-side errors captured in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Request Average Latency ms aws_gateway_endpoint_latency Endpoint The time between when API Gateway receives a request from a client and when it returns a response to the client. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Request Average Integration Latency ms aws_gateway_endpoint_integration_latency Endpoint The time between when API Gateway relays a request to the backend and when it receives a response from the backend. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Data Processed KB aws_gateway_endpoint_data_processed Endpoint The amount of data processed API Gateway HTTP APIs monitoring with CloudWatch   Cache Hit Count Rate % aws_gateway_endpoint_cache_hit_rate Endpoint The number of requests served from the API cache API Gateway REST APIs monitoring with CloudWatch   Cache Miss Count Rate % aws_gateway_endpoint_cache_miss_rate Endpoint The number of requests served from the backend API Gateway REST APIs monitoring with CloudWatch    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-gateway/. The AWS Cloud EKS dashboard panel configurations are found in /config/ui-initialized-templates/aws_gateway.\n","title":"AWS API Gateway monitoring","url":"/docs/main/v9.5.0/en/setup/backend/backend-aws-api-gateway-monitoring/"},{"content":"AWS API Gateway monitoring Amazon API Gateway is an AWS service for creating, publishing, maintaining, monitoring, and securing REST, HTTP, and WebSocket APIs. SkyWalking leverages AWS Kinesis Data Firehose receiver to transfer the CloudWatch metrics of API Gateway(HTTP and REST APIs) to OpenTelemetry receiver and into the Meter System.\nData flow  AWS CloudWatch collect metrics for API Gateway(REST and HTTP APIs), refer to API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch CloudWatch metric streams stream CloudWatch metrics of API Gateway to AWS Kinesis Data Firehose AWS Kinesis Data Firehose delivery metrics to AWS Kinesis Data Firehose receiver through the HTTP endpoint  Set up  Enable CloudWatch metrics for API Gateway Create an Amazon Kinesis Data Firehose Delivery Stream, and set AWS Kinesis Data Firehose receiver\u0026rsquo;s address as HTTP(s) Destination, refer to Create Delivery Stream Create CloudWatch metric stream, and select the Firehose Delivery Stream which has been created above, set Select namespaces to AWS/ApiGateway, Select output format to OpenTelemetry 0.7. refer to CloudWatch Metric Streams  Gateway Monitoring SkyWalking observes CloudWatch metrics of the AWS API Gateway, which is cataloged as a LAYER: AWS_GATEWAY Service in the OAP. Meanwhile, the routes would be recognized as LAYER: AWS_GATEWAY endpoints\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     Request Count count aws_gateway_service_count Service The total number API requests in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   4xx Count count aws_gateway_service_4xx Service The number of client-side errors captured in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   5xx Count count aws_gateway_service_5xx Service The number of server-side errors captured in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Request Average Latency ms aws_gateway_service_latency Service The time between when API Gateway receives a request from a client and when it returns a response to the client. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Request Average Integration Latency ms aws_gateway_service_integration_latency Service The time between when API Gateway relays a request to the backend and when it receives a response from the backend. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Data Processed KB aws_gateway_service_data_processed Service The amount of data processed API Gateway HTTP APIs monitoring with CloudWatch   Cache Hit Count Rate % aws_gateway_service_cache_hit_rate Service The number of requests served from the API cache API Gateway REST APIs monitoring with CloudWatch   Cache Miss Count Rate % aws_gateway_service_cache_miss_rate Service The number of requests served from the backend API Gateway REST APIs monitoring with CloudWatch   Request Count count aws_gateway_endpoint_count Endpoint The total number API requests in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   4xx Count count aws_gateway_endpoint_4xx Endpoint The number of client-side errors captured in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   5xx Count count aws_gateway_endpoint_5xx Endpoint The number of server-side errors captured in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Request Average Latency ms aws_gateway_endpoint_latency Endpoint The time between when API Gateway receives a request from a client and when it returns a response to the client. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Request Average Integration Latency ms aws_gateway_endpoint_integration_latency Endpoint The time between when API Gateway relays a request to the backend and when it receives a response from the backend. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Data Processed KB aws_gateway_endpoint_data_processed Endpoint The amount of data processed API Gateway HTTP APIs monitoring with CloudWatch   Cache Hit Count Rate % aws_gateway_endpoint_cache_hit_rate Endpoint The number of requests served from the API cache API Gateway REST APIs monitoring with CloudWatch   Cache Miss Count Rate % aws_gateway_endpoint_cache_miss_rate Endpoint The number of requests served from the backend API Gateway REST APIs monitoring with CloudWatch    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-gateway/. The AWS Cloud EKS dashboard panel configurations are found in /config/ui-initialized-templates/aws_gateway.\n","title":"AWS API Gateway monitoring","url":"/docs/main/v9.6.0/en/setup/backend/backend-aws-api-gateway-monitoring/"},{"content":"AWS API Gateway monitoring Amazon API Gateway is an AWS service for creating, publishing, maintaining, monitoring, and securing REST, HTTP, and WebSocket APIs. SkyWalking leverages AWS Kinesis Data Firehose receiver to transfer the CloudWatch metrics of API Gateway(HTTP and REST APIs) to OpenTelemetry receiver and into the Meter System.\nData flow  AWS CloudWatch collect metrics for API Gateway(REST and HTTP APIs), refer to API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch CloudWatch metric streams stream CloudWatch metrics of API Gateway to AWS Kinesis Data Firehose AWS Kinesis Data Firehose delivery metrics to AWS Kinesis Data Firehose receiver through the HTTP endpoint  Set up  Enable CloudWatch metrics for API Gateway Create an Amazon Kinesis Data Firehose Delivery Stream, and set AWS Kinesis Data Firehose receiver\u0026rsquo;s address as HTTP(s) Destination, refer to Create Delivery Stream Create CloudWatch metric stream, and select the Firehose Delivery Stream which has been created above, set Select namespaces to AWS/ApiGateway, Select output format to OpenTelemetry 0.7. refer to CloudWatch Metric Streams  Gateway Monitoring SkyWalking observes CloudWatch metrics of the AWS API Gateway, which is cataloged as a LAYER: AWS_GATEWAY Service in the OAP. Meanwhile, the routes would be recognized as LAYER: AWS_GATEWAY endpoints\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     Request Count count aws_gateway_service_count Service The total number API requests in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   4xx Count count aws_gateway_service_4xx Service The number of client-side errors captured in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   5xx Count count aws_gateway_service_5xx Service The number of server-side errors captured in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Request Average Latency ms aws_gateway_service_latency Service The time between when API Gateway receives a request from a client and when it returns a response to the client. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Request Average Integration Latency ms aws_gateway_service_integration_latency Service The time between when API Gateway relays a request to the backend and when it receives a response from the backend. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Data Processed KB aws_gateway_service_data_processed Service The amount of data processed API Gateway HTTP APIs monitoring with CloudWatch   Cache Hit Count Rate % aws_gateway_service_cache_hit_rate Service The number of requests served from the API cache API Gateway REST APIs monitoring with CloudWatch   Cache Miss Count Rate % aws_gateway_service_cache_miss_rate Service The number of requests served from the backend API Gateway REST APIs monitoring with CloudWatch   Request Count count aws_gateway_endpoint_count Endpoint The total number API requests in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   4xx Count count aws_gateway_endpoint_4xx Endpoint The number of client-side errors captured in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   5xx Count count aws_gateway_endpoint_5xx Endpoint The number of server-side errors captured in a given period. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Request Average Latency ms aws_gateway_endpoint_latency Endpoint The time between when API Gateway receives a request from a client and when it returns a response to the client. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Request Average Integration Latency ms aws_gateway_endpoint_integration_latency Endpoint The time between when API Gateway relays a request to the backend and when it receives a response from the backend. API Gateway HTTP APIs monitoring with CloudWatch and API Gateway REST APIs monitoring with CloudWatch   Data Processed KB aws_gateway_endpoint_data_processed Endpoint The amount of data processed API Gateway HTTP APIs monitoring with CloudWatch   Cache Hit Count Rate % aws_gateway_endpoint_cache_hit_rate Endpoint The number of requests served from the API cache API Gateway REST APIs monitoring with CloudWatch   Cache Miss Count Rate % aws_gateway_endpoint_cache_miss_rate Endpoint The number of requests served from the backend API Gateway REST APIs monitoring with CloudWatch    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-gateway/. The AWS Cloud EKS dashboard panel configurations are found in /config/ui-initialized-templates/aws_gateway.\n","title":"AWS API Gateway monitoring","url":"/docs/main/v9.7.0/en/setup/backend/backend-aws-api-gateway-monitoring/"},{"content":"AWS Cloud EKS monitoring SkyWalking leverages OpenTelemetry Collector with AWS Container Insights Receiver to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  OpenTelemetry Collector fetches metrics from EKS via AWS Container Insights Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Deploy amazon/aws-otel-collector with AWS Container Insights Receiver to EKS Config SkyWalking OpenTelemetry receiver.  Read Monitoring AWS EKS and S3 with SkyWalking for more details\nEKS Monitoring AWS Container Insights Receiver provides multiple dimensions metrics for EKS cluster, node, service, etc. Accordingly, SkyWalking observes the status, and payload of the EKS cluster, which is cataloged as a LAYER: AWS_EKS Service in the OAP. Meanwhile, the k8s nodes would be recognized as LAYER: AWS_EKS instances. The k8s service would be recognized as endpoints.\nSpecify Job Name SkyWalking distinguishes AWS Cloud EKS metrics by attributes job_name, which value is aws-cloud-eks-monitoring. You could leverage OTEL Collector processor to add the attribute as follows:\nprocessors:resource/job-name:attributes:- key:job_namevalue:aws-cloud-eks-monitoringaction:insert Notice, if you don\u0026rsquo;t specify job_name attribute, SkyWalking OAP will ignore the metrics\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     Node Count  eks_cluster_node_count Service The node count of the EKS cluster AWS Container Insights Receiver   Failed Node Count  eks_cluster_failed_node_count Service The failed node count of the EKS cluster AWS Container Insights Receiver   Pod Count (namespace dimension)  eks_cluster_namespace_count Service The count of pod in the EKS cluster(namespace dimension) AWS Container Insights Receiver   Pod Count (service dimension)  eks_cluster_service_count Service The count of pod in the EKS cluster(service dimension) AWS Container Insights Receiver   Network RX Dropped Count (per second) count/s eks_cluster_net_rx_dropped Service Network RX dropped count AWS Container Insights Receiver   Network RX Error Count (per second) count/s eks_cluster_net_rx_error Service Network RX error count AWS Container Insights Receiver   Network TX Dropped Count (per second) count/s eks_cluster_net_rx_dropped Service Network TX dropped count AWS Container Insights Receiver   Network TX Error Count (per second) count/s eks_cluster_net_rx_error Service Network TX error count AWS Container Insights Receiver   Pod Count  eks_cluster_node_pod_number Instance The count of pod running on the node AWS Container Insights Receiver   CPU Utilization percent eks_cluster_node_cpu_utilization Instance The CPU Utilization of the node AWS Container Insights Receiver   Memory Utilization percent eks_cluster_node_memory_utilization Instance The Memory Utilization of the node AWS Container Insights Receiver   Network RX bytes/s eks_cluster_node_net_rx_bytes Instance Network RX bytes of the node AWS Container Insights Receiver   Network RX Error Count count/s eks_cluster_node_net_rx_bytes Instance Network RX error count of the node AWS Container Insights Receiver   Network TX bytes/s eks_cluster_node_net_rx_bytes Instance Network TX bytes of the node AWS Container Insights Receiver   Network TX Error Count count/s eks_cluster_node_net_rx_bytes Instance Network TX error count of the node AWS Container Insights Receiver   Disk IO Write bytes/s eks_cluster_node_net_rx_bytes Instance The IO write bytes of the node AWS Container Insights Receiver   Disk IO Read bytes/s eks_cluster_node_net_rx_bytes Instance The IO read bytes of the node AWS Container Insights Receiver   FS Utilization percent eks_cluster_node_net_rx_bytes Instance The filesystem utilization of the node AWS Container Insights Receiver   CPU Utilization percent eks_cluster_node_pod_cpu_utilization Instance The CPU Utilization of the pod running on the node AWS Container Insights Receiver   Memory Utilization percent eks_cluster_node_pod_memory_utilization Instance The Memory Utilization of the pod running on the node AWS Container Insights Receiver   Network RX bytes/s eks_cluster_node_pod_net_rx_bytes Instance Network RX bytes of the pod running on the node AWS Container Insights Receiver   Network RX Error Count count/s eks_cluster_node_pod_net_rx_error Instance Network RX error count of the pod running on the node AWS Container Insights Receiver   Network TX bytes/s eks_cluster_node_pod_net_tx_bytes Instance Network RX bytes of the pod running on the node AWS Container Insights Receiver   Network TX Error Count count/s eks_cluster_node_pod_net_tx_error Instance Network RX error count of the pod running on the node AWS Container Insights Receiver   CPU Utilization percent eks_cluster_service_pod_cpu_utilization Endpoint The CPU Utilization of pod that belong to the service AWS Container Insights Receiver   Memory Utilization percent eks_cluster_service_pod_memory_utilization Endpoint The Memory Utilization of pod that belong to the service AWS Container Insights Receiver   Network RX bytes/s eks_cluster_service_pod_net_rx_bytes Endpoint Network RX bytes of the pod that belong to the service AWS Container Insights Receiver   Network RX Error Count count/s eks_cluster_service_pod_net_rx_error Endpoint Network TX error count of the pod that belongs to the service AWS Container Insights Receiver   Network TX bytes/s eks_cluster_service_pod_net_tx_bytes Endpoint Network TX bytes of the pod that belong to the service AWS Container Insights Receiver   Network TX Error Count count/s eks_cluster_node_pod_net_tx_error Endpoint Network TX error count of the pod that belongs to the service AWS Container Insights Receiver    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-eks/. The AWS Cloud EKS dashboard panel configurations are found in /config/ui-initialized-templates/aws_eks.\nOTEL Configuration Sample With AWS Container Insights Receiver extensions:health_check:receivers:awscontainerinsightreceiver:processors:resource/job-name:attributes:- key:job_namevalue:aws-cloud-eks-monitoringaction:insertexporters:otlp:endpoint:oap-service:11800tls:insecure:truelogging:loglevel:debugservice:pipelines:metrics:receivers:[awscontainerinsightreceiver]processors:[resource/job-name]exporters:[otlp,logging]extensions:[health_check]Refer to AWS Container Insights Receiver for more information\n","title":"AWS Cloud EKS monitoring","url":"/docs/main/latest/en/setup/backend/backend-aws-eks-monitoring/"},{"content":"AWS Cloud EKS monitoring SkyWalking leverages OpenTelemetry Collector with AWS Container Insights Receiver to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  OpenTelemetry Collector fetches metrics from EKS via AWS Container Insights Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Deploy amazon/aws-otel-collector with AWS Container Insights Receiver to EKS Config SkyWalking OpenTelemetry receiver.  Read Monitoring AWS EKS and S3 with SkyWalking for more details\nEKS Monitoring AWS Container Insights Receiver provides multiple dimensions metrics for EKS cluster, node, service, etc. Accordingly, SkyWalking observes the status, and payload of the EKS cluster, which is cataloged as a LAYER: AWS_EKS Service in the OAP. Meanwhile, the k8s nodes would be recognized as LAYER: AWS_EKS instances. The k8s service would be recognized as endpoints.\nSpecify Job Name SkyWalking distinguishes AWS Cloud EKS metrics by attributes job_name, which value is aws-cloud-eks-monitoring. You could leverage OTEL Collector processor to add the attribute as follows:\nprocessors:resource/job-name:attributes:- key:job_namevalue:aws-cloud-eks-monitoringaction:insert Notice, if you don\u0026rsquo;t specify job_name attribute, SkyWalking OAP will ignore the metrics\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     Node Count  eks_cluster_node_count Service The node count of the EKS cluster AWS Container Insights Receiver   Failed Node Count  eks_cluster_failed_node_count Service The failed node count of the EKS cluster AWS Container Insights Receiver   Pod Count (namespace dimension)  eks_cluster_namespace_count Service The count of pod in the EKS cluster(namespace dimension) AWS Container Insights Receiver   Pod Count (service dimension)  eks_cluster_service_count Service The count of pod in the EKS cluster(service dimension) AWS Container Insights Receiver   Network RX Dropped Count (per second) count/s eks_cluster_net_rx_dropped Service Network RX dropped count AWS Container Insights Receiver   Network RX Error Count (per second) count/s eks_cluster_net_rx_error Service Network RX error count AWS Container Insights Receiver   Network TX Dropped Count (per second) count/s eks_cluster_net_rx_dropped Service Network TX dropped count AWS Container Insights Receiver   Network TX Error Count (per second) count/s eks_cluster_net_rx_error Service Network TX error count AWS Container Insights Receiver   Pod Count  eks_cluster_node_pod_number Instance The count of pod running on the node AWS Container Insights Receiver   CPU Utilization percent eks_cluster_node_cpu_utilization Instance The CPU Utilization of the node AWS Container Insights Receiver   Memory Utilization percent eks_cluster_node_memory_utilization Instance The Memory Utilization of the node AWS Container Insights Receiver   Network RX bytes/s eks_cluster_node_net_rx_bytes Instance Network RX bytes of the node AWS Container Insights Receiver   Network RX Error Count count/s eks_cluster_node_net_rx_bytes Instance Network RX error count of the node AWS Container Insights Receiver   Network TX bytes/s eks_cluster_node_net_rx_bytes Instance Network TX bytes of the node AWS Container Insights Receiver   Network TX Error Count count/s eks_cluster_node_net_rx_bytes Instance Network TX error count of the node AWS Container Insights Receiver   Disk IO Write bytes/s eks_cluster_node_net_rx_bytes Instance The IO write bytes of the node AWS Container Insights Receiver   Disk IO Read bytes/s eks_cluster_node_net_rx_bytes Instance The IO read bytes of the node AWS Container Insights Receiver   FS Utilization percent eks_cluster_node_net_rx_bytes Instance The filesystem utilization of the node AWS Container Insights Receiver   CPU Utilization percent eks_cluster_node_pod_cpu_utilization Instance The CPU Utilization of the pod running on the node AWS Container Insights Receiver   Memory Utilization percent eks_cluster_node_pod_memory_utilization Instance The Memory Utilization of the pod running on the node AWS Container Insights Receiver   Network RX bytes/s eks_cluster_node_pod_net_rx_bytes Instance Network RX bytes of the pod running on the node AWS Container Insights Receiver   Network RX Error Count count/s eks_cluster_node_pod_net_rx_error Instance Network RX error count of the pod running on the node AWS Container Insights Receiver   Network TX bytes/s eks_cluster_node_pod_net_tx_bytes Instance Network RX bytes of the pod running on the node AWS Container Insights Receiver   Network TX Error Count count/s eks_cluster_node_pod_net_tx_error Instance Network RX error count of the pod running on the node AWS Container Insights Receiver   CPU Utilization percent eks_cluster_service_pod_cpu_utilization Endpoint The CPU Utilization of pod that belong to the service AWS Container Insights Receiver   Memory Utilization percent eks_cluster_service_pod_memory_utilization Endpoint The Memory Utilization of pod that belong to the service AWS Container Insights Receiver   Network RX bytes/s eks_cluster_service_pod_net_rx_bytes Endpoint Network RX bytes of the pod that belong to the service AWS Container Insights Receiver   Network RX Error Count count/s eks_cluster_service_pod_net_rx_error Endpoint Network TX error count of the pod that belongs to the service AWS Container Insights Receiver   Network TX bytes/s eks_cluster_service_pod_net_tx_bytes Endpoint Network TX bytes of the pod that belong to the service AWS Container Insights Receiver   Network TX Error Count count/s eks_cluster_node_pod_net_tx_error Endpoint Network TX error count of the pod that belongs to the service AWS Container Insights Receiver    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-eks/. The AWS Cloud EKS dashboard panel configurations are found in /config/ui-initialized-templates/aws_eks.\nOTEL Configuration Sample With AWS Container Insights Receiver extensions:health_check:receivers:awscontainerinsightreceiver:processors:resource/job-name:attributes:- key:job_namevalue:aws-cloud-eks-monitoringaction:insertexporters:otlp:endpoint:oap-service:11800tls:insecure:truelogging:loglevel:debugservice:pipelines:metrics:receivers:[awscontainerinsightreceiver]processors:[resource/job-name]exporters:[otlp,logging]extensions:[health_check]Refer to AWS Container Insights Receiver for more information\n","title":"AWS Cloud EKS monitoring","url":"/docs/main/next/en/setup/backend/backend-aws-eks-monitoring/"},{"content":"AWS Cloud EKS monitoring SkyWalking leverages OpenTelemetry Collector with AWS Container Insights Receiver to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  OpenTelemetry Collector fetches metrics from EKS via AWS Container Insights Receiver and pushes metrics to SkyWalking OAP Server via the OpenCensus gRPC Exporter or OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Deploy amazon/aws-otel-collector with AWS Container Insights Receiver to EKS Config SkyWalking OpenTelemetry receiver.  EKS Monitoring AWS Container Insights Receiver provides multiple dimensions metrics for EKS cluster, node, service, etc. Accordingly, SkyWalking observes the status, and payload of the EKS cluster, which is cataloged as a LAYER: AWS_EKS Service in the OAP. Meanwhile, the k8s nodes would be recognized as LAYER: AWS_EKS instances. The k8s service would be recognized as endpoints.\nSpecify Job Name SkyWalking distinguishes AWS Cloud EKS metrics by attributes job_name, which value is aws-cloud-eks-monitoring. You could leverage OTEL Collector processor to add the attribute as follows:\nprocessors:resource/job-name:attributes:- key:job_namevalue:aws-cloud-eks-monitoringaction:insert Notice, if you don\u0026rsquo;t specify job_name attribute, SkyWalking OAP will ignore the metrics\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     Node Count  eks_cluster_node_count Service The node count of the EKS cluster AWS Container Insights Receiver   Failed Node Count  eks_cluster_failed_node_count Service The failed node count of the EKS cluster AWS Container Insights Receiver   Pod Count (namespace dimension)  eks_cluster_namespace_count Service The count of pod in the EKS cluster(namespace dimension) AWS Container Insights Receiver   Pod Count (service dimension)  eks_cluster_service_count Service The count of pod in the EKS cluster(service dimension) AWS Container Insights Receiver   Network RX Dropped Count (per second) count/s eks_cluster_net_rx_dropped Service Network RX dropped count AWS Container Insights Receiver   Network RX Error Count (per second) count/s eks_cluster_net_rx_error Service Network RX error count AWS Container Insights Receiver   Network TX Dropped Count (per second) count/s eks_cluster_net_rx_dropped Service Network TX dropped count AWS Container Insights Receiver   Network TX Error Count (per second) count/s eks_cluster_net_rx_error Service Network TX error count AWS Container Insights Receiver   Pod Count  eks_cluster_node_pod_number Instance The count of pod running on the node AWS Container Insights Receiver   CPU Utilization percent eks_cluster_node_cpu_utilization Instance The CPU Utilization of the node AWS Container Insights Receiver   Memory Utilization percent eks_cluster_node_memory_utilization Instance The Memory Utilization of the node AWS Container Insights Receiver   Network RX bytes/s eks_cluster_node_net_rx_bytes Instance Network RX bytes of the node AWS Container Insights Receiver   Network RX Error Count count/s eks_cluster_node_net_rx_bytes Instance Network RX error count of the node AWS Container Insights Receiver   Network TX bytes/s eks_cluster_node_net_rx_bytes Instance Network TX bytes of the node AWS Container Insights Receiver   Network TX Error Count count/s eks_cluster_node_net_rx_bytes Instance Network TX error count of the node AWS Container Insights Receiver   Disk IO Write bytes/s eks_cluster_node_net_rx_bytes Instance The IO write bytes of the node AWS Container Insights Receiver   Disk IO Read bytes/s eks_cluster_node_net_rx_bytes Instance The IO read bytes of the node AWS Container Insights Receiver   FS Utilization percent eks_cluster_node_net_rx_bytes Instance The filesystem utilization of the node AWS Container Insights Receiver   CPU Utilization percent eks_cluster_node_pod_cpu_utilization Instance The CPU Utilization of the pod running on the node AWS Container Insights Receiver   Memory Utilization percent eks_cluster_node_pod_memory_utilization Instance The Memory Utilization of the pod running on the node AWS Container Insights Receiver   Network RX bytes/s eks_cluster_node_pod_net_rx_bytes Instance Network RX bytes of the pod running on the node AWS Container Insights Receiver   Network RX Error Count count/s eks_cluster_node_pod_net_rx_error Instance Network RX error count of the pod running on the node AWS Container Insights Receiver   Network TX bytes/s eks_cluster_node_pod_net_tx_bytes Instance Network RX bytes of the pod running on the node AWS Container Insights Receiver   Network TX Error Count count/s eks_cluster_node_pod_net_tx_error Instance Network RX error count of the pod running on the node AWS Container Insights Receiver   CPU Utilization percent eks_cluster_service_pod_cpu_utilization Endpoint The CPU Utilization of pod that belong to the service AWS Container Insights Receiver   Memory Utilization percent eks_cluster_service_pod_memory_utilization Endpoint The Memory Utilization of pod that belong to the service AWS Container Insights Receiver   Network RX bytes/s eks_cluster_service_pod_net_rx_bytes Endpoint Network RX bytes of the pod that belong to the service AWS Container Insights Receiver   Network RX Error Count count/s eks_cluster_service_pod_net_rx_error Endpoint Network TX error count of the pod that belongs to the service AWS Container Insights Receiver   Network TX bytes/s eks_cluster_service_pod_net_tx_bytes Endpoint Network TX bytes of the pod that belong to the service AWS Container Insights Receiver   Network TX Error Count count/s eks_cluster_node_pod_net_tx_error Endpoint Network TX error count of the pod that belongs to the service AWS Container Insights Receiver    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-eks/. The AWS Cloud EKS dashboard panel configurations are found in /config/ui-initialized-templates/aws_eks.\nOTEL Configuration Sample With AWS Container Insights Receiver extensions:health_check:receivers:awscontainerinsightreceiver:processors:resource/job-name:attributes:- key:job_namevalue:aws-cloud-eks-monitoringaction:insertexporters:otlp:endpoint:oap-service:11800tls:insecure:truelogging:loglevel:debugservice:pipelines:metrics:receivers:[awscontainerinsightreceiver]processors:[resource/job-name]exporters:[otlp,logging]extensions:[health_check]Refer to AWS Container Insights Receiver for more information\n","title":"AWS Cloud EKS monitoring","url":"/docs/main/v9.4.0/en/setup/backend/backend-aws-eks-monitoring/"},{"content":"AWS Cloud EKS monitoring SkyWalking leverages OpenTelemetry Collector with AWS Container Insights Receiver to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  OpenTelemetry Collector fetches metrics from EKS via AWS Container Insights Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Deploy amazon/aws-otel-collector with AWS Container Insights Receiver to EKS Config SkyWalking OpenTelemetry receiver.  Read Monitoring AWS EKS and S3 with SkyWalking for more details\nEKS Monitoring AWS Container Insights Receiver provides multiple dimensions metrics for EKS cluster, node, service, etc. Accordingly, SkyWalking observes the status, and payload of the EKS cluster, which is cataloged as a LAYER: AWS_EKS Service in the OAP. Meanwhile, the k8s nodes would be recognized as LAYER: AWS_EKS instances. The k8s service would be recognized as endpoints.\nSpecify Job Name SkyWalking distinguishes AWS Cloud EKS metrics by attributes job_name, which value is aws-cloud-eks-monitoring. You could leverage OTEL Collector processor to add the attribute as follows:\nprocessors:resource/job-name:attributes:- key:job_namevalue:aws-cloud-eks-monitoringaction:insert Notice, if you don\u0026rsquo;t specify job_name attribute, SkyWalking OAP will ignore the metrics\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     Node Count  eks_cluster_node_count Service The node count of the EKS cluster AWS Container Insights Receiver   Failed Node Count  eks_cluster_failed_node_count Service The failed node count of the EKS cluster AWS Container Insights Receiver   Pod Count (namespace dimension)  eks_cluster_namespace_count Service The count of pod in the EKS cluster(namespace dimension) AWS Container Insights Receiver   Pod Count (service dimension)  eks_cluster_service_count Service The count of pod in the EKS cluster(service dimension) AWS Container Insights Receiver   Network RX Dropped Count (per second) count/s eks_cluster_net_rx_dropped Service Network RX dropped count AWS Container Insights Receiver   Network RX Error Count (per second) count/s eks_cluster_net_rx_error Service Network RX error count AWS Container Insights Receiver   Network TX Dropped Count (per second) count/s eks_cluster_net_rx_dropped Service Network TX dropped count AWS Container Insights Receiver   Network TX Error Count (per second) count/s eks_cluster_net_rx_error Service Network TX error count AWS Container Insights Receiver   Pod Count  eks_cluster_node_pod_number Instance The count of pod running on the node AWS Container Insights Receiver   CPU Utilization percent eks_cluster_node_cpu_utilization Instance The CPU Utilization of the node AWS Container Insights Receiver   Memory Utilization percent eks_cluster_node_memory_utilization Instance The Memory Utilization of the node AWS Container Insights Receiver   Network RX bytes/s eks_cluster_node_net_rx_bytes Instance Network RX bytes of the node AWS Container Insights Receiver   Network RX Error Count count/s eks_cluster_node_net_rx_bytes Instance Network RX error count of the node AWS Container Insights Receiver   Network TX bytes/s eks_cluster_node_net_rx_bytes Instance Network TX bytes of the node AWS Container Insights Receiver   Network TX Error Count count/s eks_cluster_node_net_rx_bytes Instance Network TX error count of the node AWS Container Insights Receiver   Disk IO Write bytes/s eks_cluster_node_net_rx_bytes Instance The IO write bytes of the node AWS Container Insights Receiver   Disk IO Read bytes/s eks_cluster_node_net_rx_bytes Instance The IO read bytes of the node AWS Container Insights Receiver   FS Utilization percent eks_cluster_node_net_rx_bytes Instance The filesystem utilization of the node AWS Container Insights Receiver   CPU Utilization percent eks_cluster_node_pod_cpu_utilization Instance The CPU Utilization of the pod running on the node AWS Container Insights Receiver   Memory Utilization percent eks_cluster_node_pod_memory_utilization Instance The Memory Utilization of the pod running on the node AWS Container Insights Receiver   Network RX bytes/s eks_cluster_node_pod_net_rx_bytes Instance Network RX bytes of the pod running on the node AWS Container Insights Receiver   Network RX Error Count count/s eks_cluster_node_pod_net_rx_error Instance Network RX error count of the pod running on the node AWS Container Insights Receiver   Network TX bytes/s eks_cluster_node_pod_net_tx_bytes Instance Network RX bytes of the pod running on the node AWS Container Insights Receiver   Network TX Error Count count/s eks_cluster_node_pod_net_tx_error Instance Network RX error count of the pod running on the node AWS Container Insights Receiver   CPU Utilization percent eks_cluster_service_pod_cpu_utilization Endpoint The CPU Utilization of pod that belong to the service AWS Container Insights Receiver   Memory Utilization percent eks_cluster_service_pod_memory_utilization Endpoint The Memory Utilization of pod that belong to the service AWS Container Insights Receiver   Network RX bytes/s eks_cluster_service_pod_net_rx_bytes Endpoint Network RX bytes of the pod that belong to the service AWS Container Insights Receiver   Network RX Error Count count/s eks_cluster_service_pod_net_rx_error Endpoint Network TX error count of the pod that belongs to the service AWS Container Insights Receiver   Network TX bytes/s eks_cluster_service_pod_net_tx_bytes Endpoint Network TX bytes of the pod that belong to the service AWS Container Insights Receiver   Network TX Error Count count/s eks_cluster_node_pod_net_tx_error Endpoint Network TX error count of the pod that belongs to the service AWS Container Insights Receiver    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-eks/. The AWS Cloud EKS dashboard panel configurations are found in /config/ui-initialized-templates/aws_eks.\nOTEL Configuration Sample With AWS Container Insights Receiver extensions:health_check:receivers:awscontainerinsightreceiver:processors:resource/job-name:attributes:- key:job_namevalue:aws-cloud-eks-monitoringaction:insertexporters:otlp:endpoint:oap-service:11800tls:insecure:truelogging:loglevel:debugservice:pipelines:metrics:receivers:[awscontainerinsightreceiver]processors:[resource/job-name]exporters:[otlp,logging]extensions:[health_check]Refer to AWS Container Insights Receiver for more information\n","title":"AWS Cloud EKS monitoring","url":"/docs/main/v9.5.0/en/setup/backend/backend-aws-eks-monitoring/"},{"content":"AWS Cloud EKS monitoring SkyWalking leverages OpenTelemetry Collector with AWS Container Insights Receiver to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  OpenTelemetry Collector fetches metrics from EKS via AWS Container Insights Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Deploy amazon/aws-otel-collector with AWS Container Insights Receiver to EKS Config SkyWalking OpenTelemetry receiver.  Read Monitoring AWS EKS and S3 with SkyWalking for more details\nEKS Monitoring AWS Container Insights Receiver provides multiple dimensions metrics for EKS cluster, node, service, etc. Accordingly, SkyWalking observes the status, and payload of the EKS cluster, which is cataloged as a LAYER: AWS_EKS Service in the OAP. Meanwhile, the k8s nodes would be recognized as LAYER: AWS_EKS instances. The k8s service would be recognized as endpoints.\nSpecify Job Name SkyWalking distinguishes AWS Cloud EKS metrics by attributes job_name, which value is aws-cloud-eks-monitoring. You could leverage OTEL Collector processor to add the attribute as follows:\nprocessors:resource/job-name:attributes:- key:job_namevalue:aws-cloud-eks-monitoringaction:insert Notice, if you don\u0026rsquo;t specify job_name attribute, SkyWalking OAP will ignore the metrics\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     Node Count  eks_cluster_node_count Service The node count of the EKS cluster AWS Container Insights Receiver   Failed Node Count  eks_cluster_failed_node_count Service The failed node count of the EKS cluster AWS Container Insights Receiver   Pod Count (namespace dimension)  eks_cluster_namespace_count Service The count of pod in the EKS cluster(namespace dimension) AWS Container Insights Receiver   Pod Count (service dimension)  eks_cluster_service_count Service The count of pod in the EKS cluster(service dimension) AWS Container Insights Receiver   Network RX Dropped Count (per second) count/s eks_cluster_net_rx_dropped Service Network RX dropped count AWS Container Insights Receiver   Network RX Error Count (per second) count/s eks_cluster_net_rx_error Service Network RX error count AWS Container Insights Receiver   Network TX Dropped Count (per second) count/s eks_cluster_net_rx_dropped Service Network TX dropped count AWS Container Insights Receiver   Network TX Error Count (per second) count/s eks_cluster_net_rx_error Service Network TX error count AWS Container Insights Receiver   Pod Count  eks_cluster_node_pod_number Instance The count of pod running on the node AWS Container Insights Receiver   CPU Utilization percent eks_cluster_node_cpu_utilization Instance The CPU Utilization of the node AWS Container Insights Receiver   Memory Utilization percent eks_cluster_node_memory_utilization Instance The Memory Utilization of the node AWS Container Insights Receiver   Network RX bytes/s eks_cluster_node_net_rx_bytes Instance Network RX bytes of the node AWS Container Insights Receiver   Network RX Error Count count/s eks_cluster_node_net_rx_bytes Instance Network RX error count of the node AWS Container Insights Receiver   Network TX bytes/s eks_cluster_node_net_rx_bytes Instance Network TX bytes of the node AWS Container Insights Receiver   Network TX Error Count count/s eks_cluster_node_net_rx_bytes Instance Network TX error count of the node AWS Container Insights Receiver   Disk IO Write bytes/s eks_cluster_node_net_rx_bytes Instance The IO write bytes of the node AWS Container Insights Receiver   Disk IO Read bytes/s eks_cluster_node_net_rx_bytes Instance The IO read bytes of the node AWS Container Insights Receiver   FS Utilization percent eks_cluster_node_net_rx_bytes Instance The filesystem utilization of the node AWS Container Insights Receiver   CPU Utilization percent eks_cluster_node_pod_cpu_utilization Instance The CPU Utilization of the pod running on the node AWS Container Insights Receiver   Memory Utilization percent eks_cluster_node_pod_memory_utilization Instance The Memory Utilization of the pod running on the node AWS Container Insights Receiver   Network RX bytes/s eks_cluster_node_pod_net_rx_bytes Instance Network RX bytes of the pod running on the node AWS Container Insights Receiver   Network RX Error Count count/s eks_cluster_node_pod_net_rx_error Instance Network RX error count of the pod running on the node AWS Container Insights Receiver   Network TX bytes/s eks_cluster_node_pod_net_tx_bytes Instance Network RX bytes of the pod running on the node AWS Container Insights Receiver   Network TX Error Count count/s eks_cluster_node_pod_net_tx_error Instance Network RX error count of the pod running on the node AWS Container Insights Receiver   CPU Utilization percent eks_cluster_service_pod_cpu_utilization Endpoint The CPU Utilization of pod that belong to the service AWS Container Insights Receiver   Memory Utilization percent eks_cluster_service_pod_memory_utilization Endpoint The Memory Utilization of pod that belong to the service AWS Container Insights Receiver   Network RX bytes/s eks_cluster_service_pod_net_rx_bytes Endpoint Network RX bytes of the pod that belong to the service AWS Container Insights Receiver   Network RX Error Count count/s eks_cluster_service_pod_net_rx_error Endpoint Network TX error count of the pod that belongs to the service AWS Container Insights Receiver   Network TX bytes/s eks_cluster_service_pod_net_tx_bytes Endpoint Network TX bytes of the pod that belong to the service AWS Container Insights Receiver   Network TX Error Count count/s eks_cluster_node_pod_net_tx_error Endpoint Network TX error count of the pod that belongs to the service AWS Container Insights Receiver    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-eks/. The AWS Cloud EKS dashboard panel configurations are found in /config/ui-initialized-templates/aws_eks.\nOTEL Configuration Sample With AWS Container Insights Receiver extensions:health_check:receivers:awscontainerinsightreceiver:processors:resource/job-name:attributes:- key:job_namevalue:aws-cloud-eks-monitoringaction:insertexporters:otlp:endpoint:oap-service:11800tls:insecure:truelogging:loglevel:debugservice:pipelines:metrics:receivers:[awscontainerinsightreceiver]processors:[resource/job-name]exporters:[otlp,logging]extensions:[health_check]Refer to AWS Container Insights Receiver for more information\n","title":"AWS Cloud EKS monitoring","url":"/docs/main/v9.6.0/en/setup/backend/backend-aws-eks-monitoring/"},{"content":"AWS Cloud EKS monitoring SkyWalking leverages OpenTelemetry Collector with AWS Container Insights Receiver to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  OpenTelemetry Collector fetches metrics from EKS via AWS Container Insights Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Deploy amazon/aws-otel-collector with AWS Container Insights Receiver to EKS Config SkyWalking OpenTelemetry receiver.  Read Monitoring AWS EKS and S3 with SkyWalking for more details\nEKS Monitoring AWS Container Insights Receiver provides multiple dimensions metrics for EKS cluster, node, service, etc. Accordingly, SkyWalking observes the status, and payload of the EKS cluster, which is cataloged as a LAYER: AWS_EKS Service in the OAP. Meanwhile, the k8s nodes would be recognized as LAYER: AWS_EKS instances. The k8s service would be recognized as endpoints.\nSpecify Job Name SkyWalking distinguishes AWS Cloud EKS metrics by attributes job_name, which value is aws-cloud-eks-monitoring. You could leverage OTEL Collector processor to add the attribute as follows:\nprocessors:resource/job-name:attributes:- key:job_namevalue:aws-cloud-eks-monitoringaction:insert Notice, if you don\u0026rsquo;t specify job_name attribute, SkyWalking OAP will ignore the metrics\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     Node Count  eks_cluster_node_count Service The node count of the EKS cluster AWS Container Insights Receiver   Failed Node Count  eks_cluster_failed_node_count Service The failed node count of the EKS cluster AWS Container Insights Receiver   Pod Count (namespace dimension)  eks_cluster_namespace_count Service The count of pod in the EKS cluster(namespace dimension) AWS Container Insights Receiver   Pod Count (service dimension)  eks_cluster_service_count Service The count of pod in the EKS cluster(service dimension) AWS Container Insights Receiver   Network RX Dropped Count (per second) count/s eks_cluster_net_rx_dropped Service Network RX dropped count AWS Container Insights Receiver   Network RX Error Count (per second) count/s eks_cluster_net_rx_error Service Network RX error count AWS Container Insights Receiver   Network TX Dropped Count (per second) count/s eks_cluster_net_rx_dropped Service Network TX dropped count AWS Container Insights Receiver   Network TX Error Count (per second) count/s eks_cluster_net_rx_error Service Network TX error count AWS Container Insights Receiver   Pod Count  eks_cluster_node_pod_number Instance The count of pod running on the node AWS Container Insights Receiver   CPU Utilization percent eks_cluster_node_cpu_utilization Instance The CPU Utilization of the node AWS Container Insights Receiver   Memory Utilization percent eks_cluster_node_memory_utilization Instance The Memory Utilization of the node AWS Container Insights Receiver   Network RX bytes/s eks_cluster_node_net_rx_bytes Instance Network RX bytes of the node AWS Container Insights Receiver   Network RX Error Count count/s eks_cluster_node_net_rx_bytes Instance Network RX error count of the node AWS Container Insights Receiver   Network TX bytes/s eks_cluster_node_net_rx_bytes Instance Network TX bytes of the node AWS Container Insights Receiver   Network TX Error Count count/s eks_cluster_node_net_rx_bytes Instance Network TX error count of the node AWS Container Insights Receiver   Disk IO Write bytes/s eks_cluster_node_net_rx_bytes Instance The IO write bytes of the node AWS Container Insights Receiver   Disk IO Read bytes/s eks_cluster_node_net_rx_bytes Instance The IO read bytes of the node AWS Container Insights Receiver   FS Utilization percent eks_cluster_node_net_rx_bytes Instance The filesystem utilization of the node AWS Container Insights Receiver   CPU Utilization percent eks_cluster_node_pod_cpu_utilization Instance The CPU Utilization of the pod running on the node AWS Container Insights Receiver   Memory Utilization percent eks_cluster_node_pod_memory_utilization Instance The Memory Utilization of the pod running on the node AWS Container Insights Receiver   Network RX bytes/s eks_cluster_node_pod_net_rx_bytes Instance Network RX bytes of the pod running on the node AWS Container Insights Receiver   Network RX Error Count count/s eks_cluster_node_pod_net_rx_error Instance Network RX error count of the pod running on the node AWS Container Insights Receiver   Network TX bytes/s eks_cluster_node_pod_net_tx_bytes Instance Network RX bytes of the pod running on the node AWS Container Insights Receiver   Network TX Error Count count/s eks_cluster_node_pod_net_tx_error Instance Network RX error count of the pod running on the node AWS Container Insights Receiver   CPU Utilization percent eks_cluster_service_pod_cpu_utilization Endpoint The CPU Utilization of pod that belong to the service AWS Container Insights Receiver   Memory Utilization percent eks_cluster_service_pod_memory_utilization Endpoint The Memory Utilization of pod that belong to the service AWS Container Insights Receiver   Network RX bytes/s eks_cluster_service_pod_net_rx_bytes Endpoint Network RX bytes of the pod that belong to the service AWS Container Insights Receiver   Network RX Error Count count/s eks_cluster_service_pod_net_rx_error Endpoint Network TX error count of the pod that belongs to the service AWS Container Insights Receiver   Network TX bytes/s eks_cluster_service_pod_net_tx_bytes Endpoint Network TX bytes of the pod that belong to the service AWS Container Insights Receiver   Network TX Error Count count/s eks_cluster_node_pod_net_tx_error Endpoint Network TX error count of the pod that belongs to the service AWS Container Insights Receiver    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-eks/. The AWS Cloud EKS dashboard panel configurations are found in /config/ui-initialized-templates/aws_eks.\nOTEL Configuration Sample With AWS Container Insights Receiver extensions:health_check:receivers:awscontainerinsightreceiver:processors:resource/job-name:attributes:- key:job_namevalue:aws-cloud-eks-monitoringaction:insertexporters:otlp:endpoint:oap-service:11800tls:insecure:truelogging:loglevel:debugservice:pipelines:metrics:receivers:[awscontainerinsightreceiver]processors:[resource/job-name]exporters:[otlp,logging]extensions:[health_check]Refer to AWS Container Insights Receiver for more information\n","title":"AWS Cloud EKS monitoring","url":"/docs/main/v9.7.0/en/setup/backend/backend-aws-eks-monitoring/"},{"content":"AWS Cloud S3 monitoring Amazon Simple Storage Service (Amazon S3) is an object storage service. SkyWalking leverages AWS Kinesis Data Firehose receiver to transfer the CloudWatch metrics of s3 to OpenTelemetry receiver and into the Meter System.\nData flow  AWS CloudWatch collect metrics for S3, refer to S3 monitoring with CloudWatch CloudWatch metric streams stream CloudWatch metrics of S3 to AWS Kinesis Data Firehose AWS Kinesis Data Firehose delivery metrics to AWS Kinesis Data Firehose receiver through the HTTP endpoint  Set up  Create CloudWatch metrics configuration for S3, refer to S3 metrics configuration Create an Amazon Kinesis Data Firehose Delivery Stream, and set AWS Kinesis Data Firehose receiver\u0026rsquo;s address as HTTP(s) Destination, refer to Create Delivery Stream Create CloudWatch metric stream, and select the Firehose Delivery Stream which has been created above, set Select namespaces to AWS/S3, Select output format to OpenTelemetry 0.7. refer to CloudWatch Metric Streams  Read Monitoring AWS EKS and S3 with SkyWalking for more details\nS3 Monitoring SkyWalking observes CloudWatch metrics of the S3 bucket, which is cataloged as a LAYER: AWS_S3 Service in the OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     4xx Errors count aws_s3_4xx Service The number of HTTP 4xx client error status code requests made to the S3 bucket S3 monitoring with CloudWatch   5xx Errors count aws_s3_5xx Service The number of HTTP 5xx client error status code requests made to the S3 bucket S3 monitoring with CloudWatch   Downloaded bytes aws_s3_downloaded_bytes Service The number of bytes downloaded for requests made to an Amazon S3 bucket S3 monitoring with CloudWatch   Uploaded bytes aws_s3_uploaded_bytes Service The number of bytes uploaded for requests made to an Amazon S3 bucket S3 monitoring with CloudWatch   Request Average Latency bytes aws_s3_request_latency Service The average of elapsed per-request time from the first byte received to the last byte sent to an Amazon S3 bucket S3 monitoring with CloudWatch   First Byte Average Latency bytes aws_s3_request_latency Service The average of per-request time from the complete request being received by an Amazon S3 bucket to when the response starts to be returned S3 monitoring with CloudWatch   All Requests bytes aws_s3_delete_requests Service The number of HTTP All requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch   Get Requests bytes aws_s3_delete_requests Service The number of HTTP Get requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch   Put Requests bytes aws_s3_delete_requests Service The number of HTTP PUT requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch   Delete Requests bytes aws_s3_delete_requests Service The number of HTTP Delete requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-s3/. The AWS Cloud EKS dashboard panel configurations are found in /config/ui-initialized-templates/aws_s3.\n","title":"AWS Cloud S3 monitoring","url":"/docs/main/latest/en/setup/backend/backend-aws-s3-monitoring/"},{"content":"AWS Cloud S3 monitoring Amazon Simple Storage Service (Amazon S3) is an object storage service. SkyWalking leverages AWS Kinesis Data Firehose receiver to transfer the CloudWatch metrics of s3 to OpenTelemetry receiver and into the Meter System.\nData flow  AWS CloudWatch collect metrics for S3, refer to S3 monitoring with CloudWatch CloudWatch metric streams stream CloudWatch metrics of S3 to AWS Kinesis Data Firehose AWS Kinesis Data Firehose delivery metrics to AWS Kinesis Data Firehose receiver through the HTTP endpoint  Set up  Create CloudWatch metrics configuration for S3, refer to S3 metrics configuration Create an Amazon Kinesis Data Firehose Delivery Stream, and set AWS Kinesis Data Firehose receiver\u0026rsquo;s address as HTTP(s) Destination, refer to Create Delivery Stream Create CloudWatch metric stream, and select the Firehose Delivery Stream which has been created above, set Select namespaces to AWS/S3, Select output format to OpenTelemetry 0.7. refer to CloudWatch Metric Streams  Read Monitoring AWS EKS and S3 with SkyWalking for more details\nS3 Monitoring SkyWalking observes CloudWatch metrics of the S3 bucket, which is cataloged as a LAYER: AWS_S3 Service in the OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     4xx Errors count aws_s3_4xx Service The number of HTTP 4xx client error status code requests made to the S3 bucket S3 monitoring with CloudWatch   5xx Errors count aws_s3_5xx Service The number of HTTP 5xx client error status code requests made to the S3 bucket S3 monitoring with CloudWatch   Downloaded bytes aws_s3_downloaded_bytes Service The number of bytes downloaded for requests made to an Amazon S3 bucket S3 monitoring with CloudWatch   Uploaded bytes aws_s3_uploaded_bytes Service The number of bytes uploaded for requests made to an Amazon S3 bucket S3 monitoring with CloudWatch   Request Average Latency bytes aws_s3_request_latency Service The average of elapsed per-request time from the first byte received to the last byte sent to an Amazon S3 bucket S3 monitoring with CloudWatch   First Byte Average Latency bytes aws_s3_request_latency Service The average of per-request time from the complete request being received by an Amazon S3 bucket to when the response starts to be returned S3 monitoring with CloudWatch   All Requests bytes aws_s3_delete_requests Service The number of HTTP All requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch   Get Requests bytes aws_s3_delete_requests Service The number of HTTP Get requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch   Put Requests bytes aws_s3_delete_requests Service The number of HTTP PUT requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch   Delete Requests bytes aws_s3_delete_requests Service The number of HTTP Delete requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-s3/. The AWS Cloud EKS dashboard panel configurations are found in /config/ui-initialized-templates/aws_s3.\n","title":"AWS Cloud S3 monitoring","url":"/docs/main/next/en/setup/backend/backend-aws-s3-monitoring/"},{"content":"AWS Cloud S3 monitoring Amazon Simple Storage Service (Amazon S3) is an object storage service. SkyWalking leverages AWS Kinesis Data Firehose receiver to transfer the CloudWatch metrics of s3 to OpenTelemetry receiver and into the Meter System.\nData flow  AWS CloudWatch collect metrics for S3, refer to S3 monitoring with CloudWatch CloudWatch metric streams stream CloudWatch metrics of S3 to AWS Kinesis Data Firehose AWS Kinesis Data Firehose delivery metrics to AWS Kinesis Data Firehose receiver through the HTTP endpoint  Set up  Create CloudWatch metrics configuration for S3, refer to S3 metrics configuration Create an Amazon Kinesis Data Firehose Delivery Stream, and set AWS Kinesis Data Firehose receiver\u0026rsquo;s address as HTTP(s) Destination, refer to Create Delivery Stream Create CloudWatch metric stream, and select the Firehose Delivery Stream which has been created above, set Select namespaces to AWS/S3, Select output format to OpenTelemetry 0.7. refer to CloudWatch Metric Streams  S3 Monitoring SkyWalking observes CloudWatch metrics of the S3 bucket, which is cataloged as a LAYER: AWS_S3 Service in the OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     4xx Errors count aws_s3_4xx Service The number of HTTP 4xx client error status code requests made to the S3 bucket S3 monitoring with CloudWatch   5xx Errors count aws_s3_5xx Service The number of HTTP 5xx client error status code requests made to the S3 bucket S3 monitoring with CloudWatch   Downloaded bytes aws_s3_downloaded_bytes Service The number of bytes downloaded for requests made to an Amazon S3 bucket S3 monitoring with CloudWatch   Uploaded bytes aws_s3_uploaded_bytes Service The number of bytes uploaded for requests made to an Amazon S3 bucket S3 monitoring with CloudWatch   Request Average Latency bytes aws_s3_request_latency Service The average of elapsed per-request time from the first byte received to the last byte sent to an Amazon S3 bucket S3 monitoring with CloudWatch   First Byte Average Latency bytes aws_s3_request_latency Service The average of per-request time from the complete request being received by an Amazon S3 bucket to when the response starts to be returned S3 monitoring with CloudWatch   All Requests bytes aws_s3_delete_requests Service The number of HTTP All requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch   Get Requests bytes aws_s3_delete_requests Service The number of HTTP Get requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch   Put Requests bytes aws_s3_delete_requests Service The number of HTTP PUT requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch   Delete Requests bytes aws_s3_delete_requests Service The number of HTTP Delete requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-s3/. The AWS Cloud EKS dashboard panel configurations are found in /config/ui-initialized-templates/aws_s3.\n","title":"AWS Cloud S3 monitoring","url":"/docs/main/v9.4.0/en/setup/backend/backend-aws-s3-monitoring/"},{"content":"AWS Cloud S3 monitoring Amazon Simple Storage Service (Amazon S3) is an object storage service. SkyWalking leverages AWS Kinesis Data Firehose receiver to transfer the CloudWatch metrics of s3 to OpenTelemetry receiver and into the Meter System.\nData flow  AWS CloudWatch collect metrics for S3, refer to S3 monitoring with CloudWatch CloudWatch metric streams stream CloudWatch metrics of S3 to AWS Kinesis Data Firehose AWS Kinesis Data Firehose delivery metrics to AWS Kinesis Data Firehose receiver through the HTTP endpoint  Set up  Create CloudWatch metrics configuration for S3, refer to S3 metrics configuration Create an Amazon Kinesis Data Firehose Delivery Stream, and set AWS Kinesis Data Firehose receiver\u0026rsquo;s address as HTTP(s) Destination, refer to Create Delivery Stream Create CloudWatch metric stream, and select the Firehose Delivery Stream which has been created above, set Select namespaces to AWS/S3, Select output format to OpenTelemetry 0.7. refer to CloudWatch Metric Streams  Read Monitoring AWS EKS and S3 with SkyWalking for more details\nS3 Monitoring SkyWalking observes CloudWatch metrics of the S3 bucket, which is cataloged as a LAYER: AWS_S3 Service in the OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     4xx Errors count aws_s3_4xx Service The number of HTTP 4xx client error status code requests made to the S3 bucket S3 monitoring with CloudWatch   5xx Errors count aws_s3_5xx Service The number of HTTP 5xx client error status code requests made to the S3 bucket S3 monitoring with CloudWatch   Downloaded bytes aws_s3_downloaded_bytes Service The number of bytes downloaded for requests made to an Amazon S3 bucket S3 monitoring with CloudWatch   Uploaded bytes aws_s3_uploaded_bytes Service The number of bytes uploaded for requests made to an Amazon S3 bucket S3 monitoring with CloudWatch   Request Average Latency bytes aws_s3_request_latency Service The average of elapsed per-request time from the first byte received to the last byte sent to an Amazon S3 bucket S3 monitoring with CloudWatch   First Byte Average Latency bytes aws_s3_request_latency Service The average of per-request time from the complete request being received by an Amazon S3 bucket to when the response starts to be returned S3 monitoring with CloudWatch   All Requests bytes aws_s3_delete_requests Service The number of HTTP All requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch   Get Requests bytes aws_s3_delete_requests Service The number of HTTP Get requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch   Put Requests bytes aws_s3_delete_requests Service The number of HTTP PUT requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch   Delete Requests bytes aws_s3_delete_requests Service The number of HTTP Delete requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-s3/. The AWS Cloud EKS dashboard panel configurations are found in /config/ui-initialized-templates/aws_s3.\n","title":"AWS Cloud S3 monitoring","url":"/docs/main/v9.5.0/en/setup/backend/backend-aws-s3-monitoring/"},{"content":"AWS Cloud S3 monitoring Amazon Simple Storage Service (Amazon S3) is an object storage service. SkyWalking leverages AWS Kinesis Data Firehose receiver to transfer the CloudWatch metrics of s3 to OpenTelemetry receiver and into the Meter System.\nData flow  AWS CloudWatch collect metrics for S3, refer to S3 monitoring with CloudWatch CloudWatch metric streams stream CloudWatch metrics of S3 to AWS Kinesis Data Firehose AWS Kinesis Data Firehose delivery metrics to AWS Kinesis Data Firehose receiver through the HTTP endpoint  Set up  Create CloudWatch metrics configuration for S3, refer to S3 metrics configuration Create an Amazon Kinesis Data Firehose Delivery Stream, and set AWS Kinesis Data Firehose receiver\u0026rsquo;s address as HTTP(s) Destination, refer to Create Delivery Stream Create CloudWatch metric stream, and select the Firehose Delivery Stream which has been created above, set Select namespaces to AWS/S3, Select output format to OpenTelemetry 0.7. refer to CloudWatch Metric Streams  Read Monitoring AWS EKS and S3 with SkyWalking for more details\nS3 Monitoring SkyWalking observes CloudWatch metrics of the S3 bucket, which is cataloged as a LAYER: AWS_S3 Service in the OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     4xx Errors count aws_s3_4xx Service The number of HTTP 4xx client error status code requests made to the S3 bucket S3 monitoring with CloudWatch   5xx Errors count aws_s3_5xx Service The number of HTTP 5xx client error status code requests made to the S3 bucket S3 monitoring with CloudWatch   Downloaded bytes aws_s3_downloaded_bytes Service The number of bytes downloaded for requests made to an Amazon S3 bucket S3 monitoring with CloudWatch   Uploaded bytes aws_s3_uploaded_bytes Service The number of bytes uploaded for requests made to an Amazon S3 bucket S3 monitoring with CloudWatch   Request Average Latency bytes aws_s3_request_latency Service The average of elapsed per-request time from the first byte received to the last byte sent to an Amazon S3 bucket S3 monitoring with CloudWatch   First Byte Average Latency bytes aws_s3_request_latency Service The average of per-request time from the complete request being received by an Amazon S3 bucket to when the response starts to be returned S3 monitoring with CloudWatch   All Requests bytes aws_s3_delete_requests Service The number of HTTP All requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch   Get Requests bytes aws_s3_delete_requests Service The number of HTTP Get requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch   Put Requests bytes aws_s3_delete_requests Service The number of HTTP PUT requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch   Delete Requests bytes aws_s3_delete_requests Service The number of HTTP Delete requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-s3/. The AWS Cloud EKS dashboard panel configurations are found in /config/ui-initialized-templates/aws_s3.\n","title":"AWS Cloud S3 monitoring","url":"/docs/main/v9.6.0/en/setup/backend/backend-aws-s3-monitoring/"},{"content":"AWS Cloud S3 monitoring Amazon Simple Storage Service (Amazon S3) is an object storage service. SkyWalking leverages AWS Kinesis Data Firehose receiver to transfer the CloudWatch metrics of s3 to OpenTelemetry receiver and into the Meter System.\nData flow  AWS CloudWatch collect metrics for S3, refer to S3 monitoring with CloudWatch CloudWatch metric streams stream CloudWatch metrics of S3 to AWS Kinesis Data Firehose AWS Kinesis Data Firehose delivery metrics to AWS Kinesis Data Firehose receiver through the HTTP endpoint  Set up  Create CloudWatch metrics configuration for S3, refer to S3 metrics configuration Create an Amazon Kinesis Data Firehose Delivery Stream, and set AWS Kinesis Data Firehose receiver\u0026rsquo;s address as HTTP(s) Destination, refer to Create Delivery Stream Create CloudWatch metric stream, and select the Firehose Delivery Stream which has been created above, set Select namespaces to AWS/S3, Select output format to OpenTelemetry 0.7. refer to CloudWatch Metric Streams  Read Monitoring AWS EKS and S3 with SkyWalking for more details\nS3 Monitoring SkyWalking observes CloudWatch metrics of the S3 bucket, which is cataloged as a LAYER: AWS_S3 Service in the OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     4xx Errors count aws_s3_4xx Service The number of HTTP 4xx client error status code requests made to the S3 bucket S3 monitoring with CloudWatch   5xx Errors count aws_s3_5xx Service The number of HTTP 5xx client error status code requests made to the S3 bucket S3 monitoring with CloudWatch   Downloaded bytes aws_s3_downloaded_bytes Service The number of bytes downloaded for requests made to an Amazon S3 bucket S3 monitoring with CloudWatch   Uploaded bytes aws_s3_uploaded_bytes Service The number of bytes uploaded for requests made to an Amazon S3 bucket S3 monitoring with CloudWatch   Request Average Latency bytes aws_s3_request_latency Service The average of elapsed per-request time from the first byte received to the last byte sent to an Amazon S3 bucket S3 monitoring with CloudWatch   First Byte Average Latency bytes aws_s3_request_latency Service The average of per-request time from the complete request being received by an Amazon S3 bucket to when the response starts to be returned S3 monitoring with CloudWatch   All Requests bytes aws_s3_delete_requests Service The number of HTTP All requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch   Get Requests bytes aws_s3_delete_requests Service The number of HTTP Get requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch   Put Requests bytes aws_s3_delete_requests Service The number of HTTP PUT requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch   Delete Requests bytes aws_s3_delete_requests Service The number of HTTP Delete requests made for objects in an Amazon S3 bucket S3 monitoring with CloudWatch    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-s3/. The AWS Cloud EKS dashboard panel configurations are found in /config/ui-initialized-templates/aws_s3.\n","title":"AWS Cloud S3 monitoring","url":"/docs/main/v9.7.0/en/setup/backend/backend-aws-s3-monitoring/"},{"content":"AWS DynamoDb monitoring SkyWalking leverages Amazon Kinesis Data Filehose with Amazon CloudWatch to transfer the metrics into the Meter System.\nData flow  Amazon CloudWatch fetches metrics from DynamoDB and pushes metrics to SkyWalking OAP Server via Amazon Kinesis data firehose. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Create CloudWatch metrics configuration for DynamoDB, refer to DynamoDB metrics configuration Create an Amazon Kinesis Data Firehose Delivery Stream, and set AWS Kinesis Data Firehose receiver\u0026rsquo;s address as HTTP(s) Destination, refer to Create Delivery Stream3. Create a metric stream, set namespace to DynanoDB, and set Kinesis Data Firehose to the firehose you just created. Config aws-firehose-receiver to receive data. Create CloudWatch metric stream, and select the Firehose Delivery Stream which has been created above, set Select namespaces to AWS/DynamoDB, Select output format to OpenTelemetry 0.7. refer to CloudWatch Metric Streams  Read Monitoring DynamoDB with SkyWalking for more details\nDynamoDB Monitoring DynamoDB monitoring provides monitoring of the status and resources of the DynamoDB server. AWS user id is cataloged as a Layer: AWS_DYNAMODB Service in OAP. Each DynamoDB table is cataloged as an Endpoint in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Read Usage unit/s consumed_read_capacity_units provisioned_read_capacity_units The situation of read capacity units consumed and provisioned over the specified time period Amazon CloudWatch   Write Usage unit/s consumed_write_capacity_units provisioned_write_capacity_units The situation of write capacity units consumed and provisioned over the specified time period Amazon CloudWatch   Successful Request Latency ms get_successful_request_latency put_successful_request_latency query_successful_request_latency scan_successful_request_latency The latency of successful request Amazon CloudWatch   TTL Deleted Item count  time_to_live_deleted_item_count The count of items deleted by TTL Amazon CloudWatch   Throttle Events  read_throttle_events write_throttle_events Requests to DynamoDB that exceed the provisioned read/write capacity units for a table or a global secondary index. Amazon CloudWatch   Throttled Requests  read_throttled_requests write_throttled_requests Requests to DynamoDB that exceed the provisioned throughput limits on a resource (such as a table or an index). Amazon CloudWatch   Scan/Query Operation Returned Item Ccount  scan_returned_item_count query_returned_item_count\n The number of items returned by Query, Scan or ExecuteStatement (select) operations during the specified time period. Amazon CloudWatch   System Errors  read_system_errors\nwrite_system_errors The requests to DynamoDB or Amazon DynamoDB Streams that generate an HTTP 500 status code during the specified time period. Amazon CloudWatch   User Errors  user_errors Requests to DynamoDB or Amazon DynamoDB Streams that generate an HTTP 400 status code during the specified time period. Amazon CloudWatch   Condition Checked Fail Requests  conditional_check_failed_requests The number of failed attempts to perform conditional writes. Amazon CloudWatch   Transaction Conflict  transaction_conflict Rejected item-level requests due to transactional conflicts between concurrent requests on the same items. Amazon CloudWatch    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-dynamodb. The DynamoDB dashboard panel configurations are found in /config/ui-initialized-templates/aws_dynamodb.\n","title":"AWS DynamoDb monitoring","url":"/docs/main/latest/en/setup/backend/backend-aws-dynamodb-monitoring/"},{"content":"AWS DynamoDb monitoring SkyWalking leverages Amazon Kinesis Data Filehose with Amazon CloudWatch to transfer the metrics into the Meter System.\nData flow  Amazon CloudWatch fetches metrics from DynamoDB and pushes metrics to SkyWalking OAP Server via Amazon Kinesis data firehose. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Create CloudWatch metrics configuration for DynamoDB, refer to DynamoDB metrics configuration Create an Amazon Kinesis Data Firehose Delivery Stream, and set AWS Kinesis Data Firehose receiver\u0026rsquo;s address as HTTP(s) Destination, refer to Create Delivery Stream3. Create a metric stream, set namespace to DynanoDB, and set Kinesis Data Firehose to the firehose you just created. Config aws-firehose-receiver to receive data. Create CloudWatch metric stream, and select the Firehose Delivery Stream which has been created above, set Select namespaces to AWS/DynamoDB, Select output format to OpenTelemetry 0.7. refer to CloudWatch Metric Streams  Read Monitoring DynamoDB with SkyWalking for more details\nDynamoDB Monitoring DynamoDB monitoring provides monitoring of the status and resources of the DynamoDB server. AWS user id is cataloged as a Layer: AWS_DYNAMODB Service in OAP. Each DynamoDB table is cataloged as an Endpoint in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Read Usage unit/s consumed_read_capacity_units provisioned_read_capacity_units The situation of read capacity units consumed and provisioned over the specified time period Amazon CloudWatch   Write Usage unit/s consumed_write_capacity_units provisioned_write_capacity_units The situation of write capacity units consumed and provisioned over the specified time period Amazon CloudWatch   Successful Request Latency ms get_successful_request_latency put_successful_request_latency query_successful_request_latency scan_successful_request_latency The latency of successful request Amazon CloudWatch   TTL Deleted Item count  time_to_live_deleted_item_count The count of items deleted by TTL Amazon CloudWatch   Throttle Events  read_throttle_events write_throttle_events Requests to DynamoDB that exceed the provisioned read/write capacity units for a table or a global secondary index. Amazon CloudWatch   Throttled Requests  read_throttled_requests write_throttled_requests Requests to DynamoDB that exceed the provisioned throughput limits on a resource (such as a table or an index). Amazon CloudWatch   Scan/Query Operation Returned Item Ccount  scan_returned_item_count query_returned_item_count\n The number of items returned by Query, Scan or ExecuteStatement (select) operations during the specified time period. Amazon CloudWatch   System Errors  read_system_errors\nwrite_system_errors The requests to DynamoDB or Amazon DynamoDB Streams that generate an HTTP 500 status code during the specified time period. Amazon CloudWatch   User Errors  user_errors Requests to DynamoDB or Amazon DynamoDB Streams that generate an HTTP 400 status code during the specified time period. Amazon CloudWatch   Condition Checked Fail Requests  conditional_check_failed_requests The number of failed attempts to perform conditional writes. Amazon CloudWatch   Transaction Conflict  transaction_conflict Rejected item-level requests due to transactional conflicts between concurrent requests on the same items. Amazon CloudWatch    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-dynamodb. The DynamoDB dashboard panel configurations are found in /config/ui-initialized-templates/aws_dynamodb.\n","title":"AWS DynamoDb monitoring","url":"/docs/main/next/en/setup/backend/backend-aws-dynamodb-monitoring/"},{"content":"AWS DynamoDb monitoring SkyWalking leverages Amazon Kinesis Data Filehose with Amazon CloudWatch to transfer the metrics into the Meter System.\nData flow  Amazon CloudWatch fetches metrics from DynamoDB and pushes metrics to SkyWalking OAP Server via Amazon Kinesis data firehose. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Create CloudWatch metrics configuration for DynamoDB, refer to DynamoDB metrics configuration Create an Amazon Kinesis Data Firehose Delivery Stream, and set AWS Kinesis Data Firehose receiver\u0026rsquo;s address as HTTP(s) Destination, refer to Create Delivery Stream3. Create a metric stream, set namespace to DynanoDB, and set Kinesis Data Firehose to the firehose you just created. Config aws-firehose-receiver to receive data. Create CloudWatch metric stream, and select the Firehose Delivery Stream which has been created above, set Select namespaces to AWS/DynamoDB, Select output format to OpenTelemetry 0.7. refer to CloudWatch Metric Streams  DynamoDB Monitoring DynamoDB monitoring provides monitoring of the status and resources of the DynamoDB server. AWS user id is cataloged as a Layer: AWS_DYNAMODB Service in OAP. Each DynamoDB table is cataloged as an Endpoint in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Read Usage unit/s consumed_read_capacity_units provisioned_read_capacity_units The situation of read capacity units consumed and provisioned over the specified time period Amazon CloudWatch   Write Usage unit/s consumed_write_capacity_units provisioned_write_capacity_units The situation of write capacity units consumed and provisioned over the specified time period Amazon CloudWatch   Successful Request Latency ms get_successful_request_latency put_successful_request_latency query_successful_request_latency scan_successful_request_latency The latency of successful request Amazon CloudWatch   TTL Deleted Item count  time_to_live_deleted_item_count The count of items deleted by TTL Amazon CloudWatch   Throttle Events  read_throttle_events write_throttle_events Requests to DynamoDB that exceed the provisioned read/write capacity units for a table or a global secondary index. Amazon CloudWatch   Throttled Requests  read_throttled_requests write_throttled_requests Requests to DynamoDB that exceed the provisioned throughput limits on a resource (such as a table or an index). Amazon CloudWatch   Scan/Query Operation Returned Item Ccount  scan_returned_item_count query_returned_item_count\n The number of items returned by Query, Scan or ExecuteStatement (select) operations during the specified time period. Amazon CloudWatch   System Errors  read_system_errors\nwrite_system_errors The requests to DynamoDB or Amazon DynamoDB Streams that generate an HTTP 500 status code during the specified time period. Amazon CloudWatch   User Errors  user_errors Requests to DynamoDB or Amazon DynamoDB Streams that generate an HTTP 400 status code during the specified time period. Amazon CloudWatch   Condition Checked Fail Requests  conditional_check_failed_requests The number of failed attempts to perform conditional writes. Amazon CloudWatch   Transaction Conflict  transaction_conflict Rejected item-level requests due to transactional conflicts between concurrent requests on the same items. Amazon CloudWatch    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-dynamodb. The DynamoDB dashboard panel configurations are found in /config/ui-initialized-templates/aws_dynamodb.\n","title":"AWS DynamoDb monitoring","url":"/docs/main/v9.4.0/en/setup/backend/backend-aws-dynamodb-monitoring/"},{"content":"AWS DynamoDb monitoring SkyWalking leverages Amazon Kinesis Data Filehose with Amazon CloudWatch to transfer the metrics into the Meter System.\nData flow  Amazon CloudWatch fetches metrics from DynamoDB and pushes metrics to SkyWalking OAP Server via Amazon Kinesis data firehose. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Create CloudWatch metrics configuration for DynamoDB, refer to DynamoDB metrics configuration Create an Amazon Kinesis Data Firehose Delivery Stream, and set AWS Kinesis Data Firehose receiver\u0026rsquo;s address as HTTP(s) Destination, refer to Create Delivery Stream3. Create a metric stream, set namespace to DynanoDB, and set Kinesis Data Firehose to the firehose you just created. Config aws-firehose-receiver to receive data. Create CloudWatch metric stream, and select the Firehose Delivery Stream which has been created above, set Select namespaces to AWS/DynamoDB, Select output format to OpenTelemetry 0.7. refer to CloudWatch Metric Streams  Read Monitoring DynamoDB with SkyWalking for more details\nDynamoDB Monitoring DynamoDB monitoring provides monitoring of the status and resources of the DynamoDB server. AWS user id is cataloged as a Layer: AWS_DYNAMODB Service in OAP. Each DynamoDB table is cataloged as an Endpoint in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Read Usage unit/s consumed_read_capacity_units provisioned_read_capacity_units The situation of read capacity units consumed and provisioned over the specified time period Amazon CloudWatch   Write Usage unit/s consumed_write_capacity_units provisioned_write_capacity_units The situation of write capacity units consumed and provisioned over the specified time period Amazon CloudWatch   Successful Request Latency ms get_successful_request_latency put_successful_request_latency query_successful_request_latency scan_successful_request_latency The latency of successful request Amazon CloudWatch   TTL Deleted Item count  time_to_live_deleted_item_count The count of items deleted by TTL Amazon CloudWatch   Throttle Events  read_throttle_events write_throttle_events Requests to DynamoDB that exceed the provisioned read/write capacity units for a table or a global secondary index. Amazon CloudWatch   Throttled Requests  read_throttled_requests write_throttled_requests Requests to DynamoDB that exceed the provisioned throughput limits on a resource (such as a table or an index). Amazon CloudWatch   Scan/Query Operation Returned Item Ccount  scan_returned_item_count query_returned_item_count\n The number of items returned by Query, Scan or ExecuteStatement (select) operations during the specified time period. Amazon CloudWatch   System Errors  read_system_errors\nwrite_system_errors The requests to DynamoDB or Amazon DynamoDB Streams that generate an HTTP 500 status code during the specified time period. Amazon CloudWatch   User Errors  user_errors Requests to DynamoDB or Amazon DynamoDB Streams that generate an HTTP 400 status code during the specified time period. Amazon CloudWatch   Condition Checked Fail Requests  conditional_check_failed_requests The number of failed attempts to perform conditional writes. Amazon CloudWatch   Transaction Conflict  transaction_conflict Rejected item-level requests due to transactional conflicts between concurrent requests on the same items. Amazon CloudWatch    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-dynamodb. The DynamoDB dashboard panel configurations are found in /config/ui-initialized-templates/aws_dynamodb.\n","title":"AWS DynamoDb monitoring","url":"/docs/main/v9.5.0/en/setup/backend/backend-aws-dynamodb-monitoring/"},{"content":"AWS DynamoDb monitoring SkyWalking leverages Amazon Kinesis Data Filehose with Amazon CloudWatch to transfer the metrics into the Meter System.\nData flow  Amazon CloudWatch fetches metrics from DynamoDB and pushes metrics to SkyWalking OAP Server via Amazon Kinesis data firehose. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Create CloudWatch metrics configuration for DynamoDB, refer to DynamoDB metrics configuration Create an Amazon Kinesis Data Firehose Delivery Stream, and set AWS Kinesis Data Firehose receiver\u0026rsquo;s address as HTTP(s) Destination, refer to Create Delivery Stream3. Create a metric stream, set namespace to DynanoDB, and set Kinesis Data Firehose to the firehose you just created. Config aws-firehose-receiver to receive data. Create CloudWatch metric stream, and select the Firehose Delivery Stream which has been created above, set Select namespaces to AWS/DynamoDB, Select output format to OpenTelemetry 0.7. refer to CloudWatch Metric Streams  Read Monitoring DynamoDB with SkyWalking for more details\nDynamoDB Monitoring DynamoDB monitoring provides monitoring of the status and resources of the DynamoDB server. AWS user id is cataloged as a Layer: AWS_DYNAMODB Service in OAP. Each DynamoDB table is cataloged as an Endpoint in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Read Usage unit/s consumed_read_capacity_units provisioned_read_capacity_units The situation of read capacity units consumed and provisioned over the specified time period Amazon CloudWatch   Write Usage unit/s consumed_write_capacity_units provisioned_write_capacity_units The situation of write capacity units consumed and provisioned over the specified time period Amazon CloudWatch   Successful Request Latency ms get_successful_request_latency put_successful_request_latency query_successful_request_latency scan_successful_request_latency The latency of successful request Amazon CloudWatch   TTL Deleted Item count  time_to_live_deleted_item_count The count of items deleted by TTL Amazon CloudWatch   Throttle Events  read_throttle_events write_throttle_events Requests to DynamoDB that exceed the provisioned read/write capacity units for a table or a global secondary index. Amazon CloudWatch   Throttled Requests  read_throttled_requests write_throttled_requests Requests to DynamoDB that exceed the provisioned throughput limits on a resource (such as a table or an index). Amazon CloudWatch   Scan/Query Operation Returned Item Ccount  scan_returned_item_count query_returned_item_count\n The number of items returned by Query, Scan or ExecuteStatement (select) operations during the specified time period. Amazon CloudWatch   System Errors  read_system_errors\nwrite_system_errors The requests to DynamoDB or Amazon DynamoDB Streams that generate an HTTP 500 status code during the specified time period. Amazon CloudWatch   User Errors  user_errors Requests to DynamoDB or Amazon DynamoDB Streams that generate an HTTP 400 status code during the specified time period. Amazon CloudWatch   Condition Checked Fail Requests  conditional_check_failed_requests The number of failed attempts to perform conditional writes. Amazon CloudWatch   Transaction Conflict  transaction_conflict Rejected item-level requests due to transactional conflicts between concurrent requests on the same items. Amazon CloudWatch    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-dynamodb. The DynamoDB dashboard panel configurations are found in /config/ui-initialized-templates/aws_dynamodb.\n","title":"AWS DynamoDb monitoring","url":"/docs/main/v9.6.0/en/setup/backend/backend-aws-dynamodb-monitoring/"},{"content":"AWS DynamoDb monitoring SkyWalking leverages Amazon Kinesis Data Filehose with Amazon CloudWatch to transfer the metrics into the Meter System.\nData flow  Amazon CloudWatch fetches metrics from DynamoDB and pushes metrics to SkyWalking OAP Server via Amazon Kinesis data firehose. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Create CloudWatch metrics configuration for DynamoDB, refer to DynamoDB metrics configuration Create an Amazon Kinesis Data Firehose Delivery Stream, and set AWS Kinesis Data Firehose receiver\u0026rsquo;s address as HTTP(s) Destination, refer to Create Delivery Stream3. Create a metric stream, set namespace to DynanoDB, and set Kinesis Data Firehose to the firehose you just created. Config aws-firehose-receiver to receive data. Create CloudWatch metric stream, and select the Firehose Delivery Stream which has been created above, set Select namespaces to AWS/DynamoDB, Select output format to OpenTelemetry 0.7. refer to CloudWatch Metric Streams  Read Monitoring DynamoDB with SkyWalking for more details\nDynamoDB Monitoring DynamoDB monitoring provides monitoring of the status and resources of the DynamoDB server. AWS user id is cataloged as a Layer: AWS_DYNAMODB Service in OAP. Each DynamoDB table is cataloged as an Endpoint in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Read Usage unit/s consumed_read_capacity_units provisioned_read_capacity_units The situation of read capacity units consumed and provisioned over the specified time period Amazon CloudWatch   Write Usage unit/s consumed_write_capacity_units provisioned_write_capacity_units The situation of write capacity units consumed and provisioned over the specified time period Amazon CloudWatch   Successful Request Latency ms get_successful_request_latency put_successful_request_latency query_successful_request_latency scan_successful_request_latency The latency of successful request Amazon CloudWatch   TTL Deleted Item count  time_to_live_deleted_item_count The count of items deleted by TTL Amazon CloudWatch   Throttle Events  read_throttle_events write_throttle_events Requests to DynamoDB that exceed the provisioned read/write capacity units for a table or a global secondary index. Amazon CloudWatch   Throttled Requests  read_throttled_requests write_throttled_requests Requests to DynamoDB that exceed the provisioned throughput limits on a resource (such as a table or an index). Amazon CloudWatch   Scan/Query Operation Returned Item Ccount  scan_returned_item_count query_returned_item_count\n The number of items returned by Query, Scan or ExecuteStatement (select) operations during the specified time period. Amazon CloudWatch   System Errors  read_system_errors\nwrite_system_errors The requests to DynamoDB or Amazon DynamoDB Streams that generate an HTTP 500 status code during the specified time period. Amazon CloudWatch   User Errors  user_errors Requests to DynamoDB or Amazon DynamoDB Streams that generate an HTTP 400 status code during the specified time period. Amazon CloudWatch   Condition Checked Fail Requests  conditional_check_failed_requests The number of failed attempts to perform conditional writes. Amazon CloudWatch   Transaction Conflict  transaction_conflict Rejected item-level requests due to transactional conflicts between concurrent requests on the same items. Amazon CloudWatch    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/aws-dynamodb. The DynamoDB dashboard panel configurations are found in /config/ui-initialized-templates/aws_dynamodb.\n","title":"AWS DynamoDb monitoring","url":"/docs/main/v9.7.0/en/setup/backend/backend-aws-dynamodb-monitoring/"},{"content":"AWS Firehose receiver AWS Firehose receiver listens on 0.0.0.0:12801 by default, and provides an HTTP Endpoint /aws/firehose/metrics that follows Amazon Kinesis Data Firehose Delivery Stream HTTP Endpoint Delivery Specifications You could leverage the receiver to collect AWS CloudWatch metrics, and analysis it through MAL as the receiver bases on OpenTelemetry receiver\nSetup(S3 example)  Create CloudWatch metrics configuration for S3 (refer to S3 CloudWatch metrics) Stream CloudWatch metrics to AWS Kinesis Data Firehose delivery stream by CloudWatch metrics stream Specify AWS Kinesis Data Firehose delivery stream HTTP Endpoint (refer to Choose HTTP Endpoint for Your Destination)  Usually, the AWS CloudWatch metrics process flow with OAP is as follows:\nCloudWatch metrics with S3 --\u0026gt; CloudWatch Metric Stream (OpenTelemetry formart) --\u0026gt; Kinesis Data Firehose Delivery Stream --\u0026gt; AWS Firehose receiver(OAP) --\u0026gt; OpenTelemetry receiver(OAP) The following blogs demonstrate complete setup process for AWS S3 and API Gateway:\n Monitoring DynamoDB with SkyWalking Monitoring AWS EKS and S3 with SkyWalking  Supported metrics    Description Configuration File Data Source     Metrics of AWS Cloud S3 otel-rules/aws-s3/s3-service.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS DynamoDB otel-rules/aws-dynamodb/dynamodb-service.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS DynamoDB otel-rules/aws-dynamodb/dynamodb-endpoint.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS API Gateway otel-rules/aws-gateway/gateway-service.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS API Gateway otel-rules/aws-gateway/gateway-endpoint.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver    Notice  Only OpenTelemetry format is supported (refer to Metric streams output formats) According to HTTPS requirement by AWS Firehose(refer to Amazon Kinesis Data Firehose Delivery Stream HTTP Endpoint Delivery Specifications, users have two options   A proxy(e.g. Nginx, Envoy) is required in front of OAP\u0026rsquo;s Firehose receiver to accept HTTPS requests from AWS Firehose through port 443. (Recommended based on the general security policy) Set aws-firehose/enableTLS=true with suitable cert/key files through aws-firehose/tlsKeyPath and aws-firehose/tlsCertChainPath at OAP side to accept requests from firehose directly.  AWS Firehose receiver support setting accessKey for Kinesis Data Firehose, please refer to configuration vocabulary  ","title":"AWS Firehose receiver","url":"/docs/main/latest/en/setup/backend/aws-firehose-receiver/"},{"content":"AWS Firehose receiver AWS Firehose receiver listens on 0.0.0.0:12801 by default, and provides an HTTP Endpoint /aws/firehose/metrics that follows Amazon Kinesis Data Firehose Delivery Stream HTTP Endpoint Delivery Specifications You could leverage the receiver to collect AWS CloudWatch metrics, and analysis it through MAL as the receiver bases on OpenTelemetry receiver\nSetup(S3 example)  Create CloudWatch metrics configuration for S3 (refer to S3 CloudWatch metrics) Stream CloudWatch metrics to AWS Kinesis Data Firehose delivery stream by CloudWatch metrics stream Specify AWS Kinesis Data Firehose delivery stream HTTP Endpoint (refer to Choose HTTP Endpoint for Your Destination)  Usually, the AWS CloudWatch metrics process flow with OAP is as follows:\nCloudWatch metrics with S3 --\u0026gt; CloudWatch Metric Stream (OpenTelemetry formart) --\u0026gt; Kinesis Data Firehose Delivery Stream --\u0026gt; AWS Firehose receiver(OAP) --\u0026gt; OpenTelemetry receiver(OAP) The following blogs demonstrate complete setup process for AWS S3 and API Gateway:\n Monitoring DynamoDB with SkyWalking Monitoring AWS EKS and S3 with SkyWalking  Supported metrics    Description Configuration File Data Source     Metrics of AWS Cloud S3 otel-rules/aws-s3/s3-service.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS DynamoDB otel-rules/aws-dynamodb/dynamodb-service.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS DynamoDB otel-rules/aws-dynamodb/dynamodb-endpoint.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS API Gateway otel-rules/aws-gateway/gateway-service.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS API Gateway otel-rules/aws-gateway/gateway-endpoint.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver    Notice  Only OpenTelemetry format is supported (refer to Metric streams output formats) According to HTTPS requirement by AWS Firehose(refer to Amazon Kinesis Data Firehose Delivery Stream HTTP Endpoint Delivery Specifications, users have two options   A proxy(e.g. Nginx, Envoy) is required in front of OAP\u0026rsquo;s Firehose receiver to accept HTTPS requests from AWS Firehose through port 443. (Recommended based on the general security policy) Set aws-firehose/enableTLS=true with suitable cert/key files through aws-firehose/tlsKeyPath and aws-firehose/tlsCertChainPath at OAP side to accept requests from firehose directly.  AWS Firehose receiver support setting accessKey for Kinesis Data Firehose, please refer to configuration vocabulary  ","title":"AWS Firehose receiver","url":"/docs/main/next/en/setup/backend/aws-firehose-receiver/"},{"content":"AWS Firehose receiver AWS Firehose receiver listens on 0.0.0.0:12801 by default, and provides an HTTP Endpoint /aws/firehose/metrics that follows Amazon Kinesis Data Firehose Delivery Stream HTTP Endpoint Delivery Specifications You could leverage the receiver to collect AWS CloudWatch metrics, and analysis it through MAL as the receiver bases on OpenTelemetry receiver\nSetup(S3 example)  Create CloudWatch metrics configuration for S3 (refer to S3 CloudWatch metrics) Stream CloudWatch metrics to AWS Kinesis Data Firehose delivery stream by CloudWatch metrics stream Specify AWS Kinesis Data Firehose delivery stream HTTP Endpoint (refer to Choose HTTP Endpoint for Your Destination)  Usually, the AWS CloudWatch metrics process flow with OAP is as follows:\nCloudWatch metrics with S3 --\u0026gt; CloudWatch Metric Stream (OpenTelemetry formart) --\u0026gt; Kinesis Data Firehose Delivery Stream --\u0026gt; AWS Firehose receiver(OAP) --\u0026gt; OpenTelemetry receiver(OAP) Supported metrics    Description Configuration File Data Source     Metrics of AWS Cloud S3 otel-rules/aws-s3/s3-service.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS DynamoDB otel-rules/aws-dynamodb/dynamodb-service.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS DynamoDB otel-rules/aws-dynamodb/dynamodb-endpoint.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver    Notice  Only OpenTelemetry format is supported (refer to Metric streams output formats) A proxy(e.g. Nginx, Envoy) is required in front of OAP\u0026rsquo;s Firehose receiver to accept HTTPS requests from AWS Firehose through port 443 (refer to Amazon Kinesis Data Firehose Delivery Stream HTTP Endpoint Delivery Specifications. AWS Firehose receiver support setting accessKey for Kinesis Data Firehose, please refer to configuration vocabulary  ","title":"AWS Firehose receiver","url":"/docs/main/v9.4.0/en/setup/backend/aws-firehose-receiver/"},{"content":"AWS Firehose receiver AWS Firehose receiver listens on 0.0.0.0:12801 by default, and provides an HTTP Endpoint /aws/firehose/metrics that follows Amazon Kinesis Data Firehose Delivery Stream HTTP Endpoint Delivery Specifications You could leverage the receiver to collect AWS CloudWatch metrics, and analysis it through MAL as the receiver bases on OpenTelemetry receiver\nSetup(S3 example)  Create CloudWatch metrics configuration for S3 (refer to S3 CloudWatch metrics) Stream CloudWatch metrics to AWS Kinesis Data Firehose delivery stream by CloudWatch metrics stream Specify AWS Kinesis Data Firehose delivery stream HTTP Endpoint (refer to Choose HTTP Endpoint for Your Destination)  Usually, the AWS CloudWatch metrics process flow with OAP is as follows:\nCloudWatch metrics with S3 --\u0026gt; CloudWatch Metric Stream (OpenTelemetry formart) --\u0026gt; Kinesis Data Firehose Delivery Stream --\u0026gt; AWS Firehose receiver(OAP) --\u0026gt; OpenTelemetry receiver(OAP) The following blogs demonstrate complete setup process for AWS S3 and API Gateway:\n Monitoring DynamoDB with SkyWalking Monitoring AWS EKS and S3 with SkyWalking  Supported metrics    Description Configuration File Data Source     Metrics of AWS Cloud S3 otel-rules/aws-s3/s3-service.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS DynamoDB otel-rules/aws-dynamodb/dynamodb-service.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS DynamoDB otel-rules/aws-dynamodb/dynamodb-endpoint.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS API Gateway otel-rules/aws-gateway/gateway-service.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS API Gateway otel-rules/aws-gateway/gateway-endpoint.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver    Notice  Only OpenTelemetry format is supported (refer to Metric streams output formats) According to HTTPS requirement by AWS Firehose(refer to Amazon Kinesis Data Firehose Delivery Stream HTTP Endpoint Delivery Specifications, users have two options   A proxy(e.g. Nginx, Envoy) is required in front of OAP\u0026rsquo;s Firehose receiver to accept HTTPS requests from AWS Firehose through port 443. (Recommended based on the general security policy) Set aws-firehose/enableTLS=true with suitable cert/key files through aws-firehose/tlsKeyPath and aws-firehose/tlsCertChainPath at OAP side to accept requests from firehose directly.  AWS Firehose receiver support setting accessKey for Kinesis Data Firehose, please refer to configuration vocabulary  ","title":"AWS Firehose receiver","url":"/docs/main/v9.5.0/en/setup/backend/aws-firehose-receiver/"},{"content":"AWS Firehose receiver AWS Firehose receiver listens on 0.0.0.0:12801 by default, and provides an HTTP Endpoint /aws/firehose/metrics that follows Amazon Kinesis Data Firehose Delivery Stream HTTP Endpoint Delivery Specifications You could leverage the receiver to collect AWS CloudWatch metrics, and analysis it through MAL as the receiver bases on OpenTelemetry receiver\nSetup(S3 example)  Create CloudWatch metrics configuration for S3 (refer to S3 CloudWatch metrics) Stream CloudWatch metrics to AWS Kinesis Data Firehose delivery stream by CloudWatch metrics stream Specify AWS Kinesis Data Firehose delivery stream HTTP Endpoint (refer to Choose HTTP Endpoint for Your Destination)  Usually, the AWS CloudWatch metrics process flow with OAP is as follows:\nCloudWatch metrics with S3 --\u0026gt; CloudWatch Metric Stream (OpenTelemetry formart) --\u0026gt; Kinesis Data Firehose Delivery Stream --\u0026gt; AWS Firehose receiver(OAP) --\u0026gt; OpenTelemetry receiver(OAP) The following blogs demonstrate complete setup process for AWS S3 and API Gateway:\n Monitoring DynamoDB with SkyWalking Monitoring AWS EKS and S3 with SkyWalking  Supported metrics    Description Configuration File Data Source     Metrics of AWS Cloud S3 otel-rules/aws-s3/s3-service.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS DynamoDB otel-rules/aws-dynamodb/dynamodb-service.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS DynamoDB otel-rules/aws-dynamodb/dynamodb-endpoint.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS API Gateway otel-rules/aws-gateway/gateway-service.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS API Gateway otel-rules/aws-gateway/gateway-endpoint.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver    Notice  Only OpenTelemetry format is supported (refer to Metric streams output formats) According to HTTPS requirement by AWS Firehose(refer to Amazon Kinesis Data Firehose Delivery Stream HTTP Endpoint Delivery Specifications, users have two options   A proxy(e.g. Nginx, Envoy) is required in front of OAP\u0026rsquo;s Firehose receiver to accept HTTPS requests from AWS Firehose through port 443. (Recommended based on the general security policy) Set aws-firehose/enableTLS=true with suitable cert/key files through aws-firehose/tlsKeyPath and aws-firehose/tlsCertChainPath at OAP side to accept requests from firehose directly.  AWS Firehose receiver support setting accessKey for Kinesis Data Firehose, please refer to configuration vocabulary  ","title":"AWS Firehose receiver","url":"/docs/main/v9.6.0/en/setup/backend/aws-firehose-receiver/"},{"content":"AWS Firehose receiver AWS Firehose receiver listens on 0.0.0.0:12801 by default, and provides an HTTP Endpoint /aws/firehose/metrics that follows Amazon Kinesis Data Firehose Delivery Stream HTTP Endpoint Delivery Specifications You could leverage the receiver to collect AWS CloudWatch metrics, and analysis it through MAL as the receiver bases on OpenTelemetry receiver\nSetup(S3 example)  Create CloudWatch metrics configuration for S3 (refer to S3 CloudWatch metrics) Stream CloudWatch metrics to AWS Kinesis Data Firehose delivery stream by CloudWatch metrics stream Specify AWS Kinesis Data Firehose delivery stream HTTP Endpoint (refer to Choose HTTP Endpoint for Your Destination)  Usually, the AWS CloudWatch metrics process flow with OAP is as follows:\nCloudWatch metrics with S3 --\u0026gt; CloudWatch Metric Stream (OpenTelemetry formart) --\u0026gt; Kinesis Data Firehose Delivery Stream --\u0026gt; AWS Firehose receiver(OAP) --\u0026gt; OpenTelemetry receiver(OAP) The following blogs demonstrate complete setup process for AWS S3 and API Gateway:\n Monitoring DynamoDB with SkyWalking Monitoring AWS EKS and S3 with SkyWalking  Supported metrics    Description Configuration File Data Source     Metrics of AWS Cloud S3 otel-rules/aws-s3/s3-service.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS DynamoDB otel-rules/aws-dynamodb/dynamodb-service.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS DynamoDB otel-rules/aws-dynamodb/dynamodb-endpoint.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS API Gateway otel-rules/aws-gateway/gateway-service.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver   Metrics of AWS API Gateway otel-rules/aws-gateway/gateway-endpoint.yaml AWS CloudWatcher Metrics Stream -\u0026gt; AWS Firehose delivery stream -\u0026gt; SkyWalking OAP Server with AWS Firehose receiver    Notice  Only OpenTelemetry format is supported (refer to Metric streams output formats) According to HTTPS requirement by AWS Firehose(refer to Amazon Kinesis Data Firehose Delivery Stream HTTP Endpoint Delivery Specifications, users have two options   A proxy(e.g. Nginx, Envoy) is required in front of OAP\u0026rsquo;s Firehose receiver to accept HTTPS requests from AWS Firehose through port 443. (Recommended based on the general security policy) Set aws-firehose/enableTLS=true with suitable cert/key files through aws-firehose/tlsKeyPath and aws-firehose/tlsCertChainPath at OAP side to accept requests from firehose directly.  AWS Firehose receiver support setting accessKey for Kinesis Data Firehose, please refer to configuration vocabulary  ","title":"AWS Firehose receiver","url":"/docs/main/v9.7.0/en/setup/backend/aws-firehose-receiver/"},{"content":"Backend Load Balancer When setting the Agent or Envoy to connect to the OAP server directly as in default, the OAP server cluster would face the problem of load imbalance. This issue would be severe in high-traffic load scenarios. SkyWalking Satellite is recommended to be used as a native gateway proxy to provide load balancing capabilities for data content before the data from Agent/ Envoy reaches the OAP. The major difference between Satellite and other general wide used proxy(s), like Envoy, is that it would route the data accordingly to contents rather than connection, as gRPC streaming is used widely in SkyWalking.\nFollow instructions in the Setup SkyWalking Satellite to deploy Satellite and connect your application to the Satellite.\nScaling with Apache SkyWalking blog introduces the theory and technology details on how to set up a load balancer for the OAP cluster.\n","title":"Backend Load Balancer","url":"/docs/main/latest/en/setup/backend/backend-load-balancer/"},{"content":"Backend Load Balancer When setting the Agent or Envoy to connect to the OAP server directly by default, the OAP server cluster would face the problem of load imbalance. This issue becomes severe in high-traffic load scenarios. In this doc, we will introduce two means to solve the problem.\nSkyWalking Satellite Project SkyWalking Satellite is recommended to be used as a native gateway proxy to provide load balancing capabilities for data content before the data from Agent/ Envoy reaches the OAP. The major difference between Satellite and other general wide used proxy(s), like Envoy, is that it would route the data accordingly to contents rather than connection, as gRPC streaming is used widely in SkyWalking.\nFollow instructions in the Setup SkyWalking Satellite to deploy Satellite and connect your application to the Satellite.\nScaling with Apache SkyWalking blog introduces the theory and technology details on how to set up a load balancer for the OAP cluster.\nEnvoy Filter to Limit Connections Per OAP Instance If you don\u0026rsquo;t want to deploy skywalking-satellite, you can enable Istio sidecar injection for SkyWalking OAP Pods,\nkubectl label namespace $SKYWALKING_NAMESPACE istio-injection=enabled kubectl -n $SKYWALKING_NAMESPACE rollout restart -l app=skywalking,component=oap and apply an EnvoyFilter to limit the connections per OAP instance, so that each of the OAP instance can have similar amount of gRPC connections.\nBefore that, you need to calculate the number of connections for each OAP instance as follows:\nNUMBER_OF_SERVICE_PODS=\u0026lt;the-number-of-service-pods-that-are-monitored-by-skywalking\u0026gt; # Each service Pod has 2 connections to OAP NUMBER_OF_TOTAL_CONNECTIONS=$((NUMBER_OF_SERVICE_PODS * 2)) # Divide the total connections by the replicas of OAP NUMBER_OF_CONNECTIONS_PER_OAP=$((NUMBER_OF_TOTAL_CONNECTIONS / $NUMBER_OF_OAP_REPLICAS)) And you can apply an EnvoyFilter to limit connections:\nkubectl -n $SKYWALKING_NAMESPACE apply -f - \u0026lt;\u0026lt;EOF apiVersion: networking.istio.io/v1alpha3 kind: EnvoyFilter metadata: name: oap-limit-connections namespace: istio-system spec: configPatches: - applyTo: NETWORK_FILTER match: context: ANY listener: filterChain: filter: name: envoy.filters.network.http_connection_manager portNumber: 11800 patch: operation: INSERT_BEFORE value: name: envoy.filters.network.ConnectionLimit typed_config: \u0026#39;@type\u0026#39;: type.googleapis.com/envoy.extensions.filters.network.connection_limit.v3.ConnectionLimit max_connections: $NUMBER_OF_CONNECTIONS_PER_OAP stat_prefix: envoy_filters_network_connection_limit workloadSelector: labels: app: oap EOF By this approach, we can limit the connections to port 11800 per OAP instance, but there is another corner case when the amount of service Pods are huge. Because the limiting is on connection level, and each service Pod has 2 connections to OAP port 11800, one for Envoy ALS to send access log, the other one for Envoy metrics, and because the traffic of the 2 connections can vary very much, if the number of service Pods is large enough, an extreme case might happen that one OAP instance is serving all Envoy metrics connections and the other OAP instance is serving all Envoy ALS connections, which in turn might be unbalanced again, to solve this, we can split the ALS connections to a dedicated port, and limit the connections to that port only.\nYou can set the environment variable SW_ALS_GRPC_PORT to a port number other than 0 when deploying skywalking, and limit connections to that port only in the EnvoyFilter:\nexport SW_ALS_GRPC_PORT=11802 kubectl -n $SKYWALKING_NAMESPACE apply -f - \u0026lt;\u0026lt;EOF apiVersion: networking.istio.io/v1alpha3 kind: EnvoyFilter metadata: name: oap-limit-connections namespace: istio-system spec: configPatches: - applyTo: NETWORK_FILTER match: context: ANY listener: filterChain: filter: name: envoy.filters.network.http_connection_manager portNumber: $SW_ALS_GRPC_PORT patch: operation: INSERT_BEFORE value: name: envoy.filters.network.ConnectionLimit typed_config: \u0026#39;@type\u0026#39;: type.googleapis.com/envoy.extensions.filters.network.connection_limit.v3.ConnectionLimit max_connections: $NUMBER_OF_CONNECTIONS_PER_OAP stat_prefix: envoy_filters_network_connection_limit workloadSelector: labels: app: oap EOF ","title":"Backend Load Balancer","url":"/docs/main/next/en/setup/backend/backend-load-balancer/"},{"content":"Backend Load Balancer When set the Agent or Envoy connecting to OAP server directly as in default, OAP server cluster would face the problem of OAP load imbalance. This issue would be very serious in high traffic load scenarios. Satellite is recommended to be used as a native gateway proxy, to provide load balancing capabilities for data content before the data from Agent/Envoy reaches the OAP. The major difference between Satellite and other general wide used proxy(s), like Envoy, is that, Satellite would route the data accordingly to contents rather than connection, as gRPC streaming is used widely in SkyWalking.\nFollow instructions in the Setup SkyWalking Satellite to deploy Satellite and connect your application to the satellite.\nScaling with Apache SkyWalking blog introduces the theory and technology details how to set up load balancer for the OAP cluster.\n","title":"Backend Load Balancer","url":"/docs/main/v9.0.0/en/setup/backend/backend-load-balancer/"},{"content":"Backend Load Balancer When setting the Agent or Envoy to connect to the OAP server directly as in default, the OAP server cluster would face the problem of load imbalance. This issue would be severe in high-traffic load scenarios. SkyWalking Satellite is recommended to be used as a native gateway proxy to provide load balancing capabilities for data content before the data from Agent/ Envoy reaches the OAP. The major difference between Satellite and other general wide used proxy(s), like Envoy, is that it would route the data accordingly to contents rather than connection, as gRPC streaming is used widely in SkyWalking.\nFollow instructions in the Setup SkyWalking Satellite to deploy Satellite and connect your application to the Satellite.\nScaling with Apache SkyWalking blog introduces the theory and technology details on how to set up a load balancer for the OAP cluster.\n","title":"Backend Load Balancer","url":"/docs/main/v9.1.0/en/setup/backend/backend-load-balancer/"},{"content":"Backend Load Balancer When setting the Agent or Envoy to connect to the OAP server directly as in default, the OAP server cluster would face the problem of load imbalance. This issue would be severe in high-traffic load scenarios. SkyWalking Satellite is recommended to be used as a native gateway proxy to provide load balancing capabilities for data content before the data from Agent/ Envoy reaches the OAP. The major difference between Satellite and other general wide used proxy(s), like Envoy, is that it would route the data accordingly to contents rather than connection, as gRPC streaming is used widely in SkyWalking.\nFollow instructions in the Setup SkyWalking Satellite to deploy Satellite and connect your application to the Satellite.\nScaling with Apache SkyWalking blog introduces the theory and technology details on how to set up a load balancer for the OAP cluster.\n","title":"Backend Load Balancer","url":"/docs/main/v9.2.0/en/setup/backend/backend-load-balancer/"},{"content":"Backend Load Balancer When setting the Agent or Envoy to connect to the OAP server directly as in default, the OAP server cluster would face the problem of load imbalance. This issue would be severe in high-traffic load scenarios. SkyWalking Satellite is recommended to be used as a native gateway proxy to provide load balancing capabilities for data content before the data from Agent/ Envoy reaches the OAP. The major difference between Satellite and other general wide used proxy(s), like Envoy, is that it would route the data accordingly to contents rather than connection, as gRPC streaming is used widely in SkyWalking.\nFollow instructions in the Setup SkyWalking Satellite to deploy Satellite and connect your application to the Satellite.\nScaling with Apache SkyWalking blog introduces the theory and technology details on how to set up a load balancer for the OAP cluster.\n","title":"Backend Load Balancer","url":"/docs/main/v9.3.0/en/setup/backend/backend-load-balancer/"},{"content":"Backend Load Balancer When setting the Agent or Envoy to connect to the OAP server directly as in default, the OAP server cluster would face the problem of load imbalance. This issue would be severe in high-traffic load scenarios. SkyWalking Satellite is recommended to be used as a native gateway proxy to provide load balancing capabilities for data content before the data from Agent/ Envoy reaches the OAP. The major difference between Satellite and other general wide used proxy(s), like Envoy, is that it would route the data accordingly to contents rather than connection, as gRPC streaming is used widely in SkyWalking.\nFollow instructions in the Setup SkyWalking Satellite to deploy Satellite and connect your application to the Satellite.\nScaling with Apache SkyWalking blog introduces the theory and technology details on how to set up a load balancer for the OAP cluster.\n","title":"Backend Load Balancer","url":"/docs/main/v9.4.0/en/setup/backend/backend-load-balancer/"},{"content":"Backend Load Balancer When setting the Agent or Envoy to connect to the OAP server directly as in default, the OAP server cluster would face the problem of load imbalance. This issue would be severe in high-traffic load scenarios. SkyWalking Satellite is recommended to be used as a native gateway proxy to provide load balancing capabilities for data content before the data from Agent/ Envoy reaches the OAP. The major difference between Satellite and other general wide used proxy(s), like Envoy, is that it would route the data accordingly to contents rather than connection, as gRPC streaming is used widely in SkyWalking.\nFollow instructions in the Setup SkyWalking Satellite to deploy Satellite and connect your application to the Satellite.\nScaling with Apache SkyWalking blog introduces the theory and technology details on how to set up a load balancer for the OAP cluster.\n","title":"Backend Load Balancer","url":"/docs/main/v9.5.0/en/setup/backend/backend-load-balancer/"},{"content":"Backend Load Balancer When setting the Agent or Envoy to connect to the OAP server directly as in default, the OAP server cluster would face the problem of load imbalance. This issue would be severe in high-traffic load scenarios. SkyWalking Satellite is recommended to be used as a native gateway proxy to provide load balancing capabilities for data content before the data from Agent/ Envoy reaches the OAP. The major difference between Satellite and other general wide used proxy(s), like Envoy, is that it would route the data accordingly to contents rather than connection, as gRPC streaming is used widely in SkyWalking.\nFollow instructions in the Setup SkyWalking Satellite to deploy Satellite and connect your application to the Satellite.\nScaling with Apache SkyWalking blog introduces the theory and technology details on how to set up a load balancer for the OAP cluster.\n","title":"Backend Load Balancer","url":"/docs/main/v9.6.0/en/setup/backend/backend-load-balancer/"},{"content":"Backend Load Balancer When setting the Agent or Envoy to connect to the OAP server directly as in default, the OAP server cluster would face the problem of load imbalance. This issue would be severe in high-traffic load scenarios. SkyWalking Satellite is recommended to be used as a native gateway proxy to provide load balancing capabilities for data content before the data from Agent/ Envoy reaches the OAP. The major difference between Satellite and other general wide used proxy(s), like Envoy, is that it would route the data accordingly to contents rather than connection, as gRPC streaming is used widely in SkyWalking.\nFollow instructions in the Setup SkyWalking Satellite to deploy Satellite and connect your application to the Satellite.\nScaling with Apache SkyWalking blog introduces the theory and technology details on how to set up a load balancer for the OAP cluster.\n","title":"Backend Load Balancer","url":"/docs/main/v9.7.0/en/setup/backend/backend-load-balancer/"},{"content":"Backend setup SkyWalking\u0026rsquo;s backend distribution package consists of the following parts:\n  bin/cmd scripts: Located in the /bin folder. Includes startup Linux shell and Windows cmd scripts for the backend server and UI startup.\n  Backend config: Located in the /config folder. Includes settings files of the backend, which are:\n application.yml log4j.xml alarm-settings.yml    Libraries of backend: Located in the /oap-libs folder. All dependencies of the backend can be found there.\n  Webapp env: Located in the webapp folder. UI frontend jar file can be found here, together with its webapp.yml setting file.\n  Requirements and default settings Requirement: JDK11 or JDK17.\nBefore you begin, you should understand that the main purpose of the following quickstart is to help you obtain a basic configuration for previews/demos. Performance and long-term running are NOT among the purposes of the quickstart.\nFor production/QA/tests environments, see Backend and UI deployment documents.\nYou can use bin/startup.sh (or cmd) to start up the backend and UI with their default settings, set out as follows:\n Backend storage uses H2 by default (for an easier start) Backend listens on 0.0.0.0/11800 for gRPC APIs and 0.0.0.0/12800 for HTTP REST APIs.  In Java, DotNetCore, Node.js, and Istio agents/probes, you should set the gRPC service address to ip/host:11800, and IP/host should be where your backend is.\n UI listens on 8080 port and request 127.0.0.1/12800 to run a GraphQL query.  Interaction Before deploying Skywalking in your distributed environment, you should learn about how agents/probes, the backend, and the UI communicate with each other:\n Most native agents and probes, including language-based or mesh probes, use gRPC service (core/default/gRPC* in application.yml) to report data to the backend. Also, the REST service is supported in JSON format. UI uses GraphQL (HTTP) query to access the backend, also in REST service (core/default/rest* in application.yml).  Startup script The default startup scripts are /bin/oapService.sh(.bat). Read the start up mode document to learn other ways to start up the backend.\nKey Parameters In The Booting Logs After the OAP booting process completed, you should be able to see all important parameters listed in the logs.\n2023-11-06 21:10:45,988 org.apache.skywalking.oap.server.starter.OAPServerBootstrap 67 [main] INFO [] - The key booting parameters of Apache SkyWalking OAP are listed as following. Running Mode | null TTL.metrics | 7 TTL.record | 3 Version | 9.7.0-SNAPSHOT-92af797 module.agent-analyzer.provider | default module.ai-pipeline.provider | default module.alarm.provider | default module.aws-firehose.provider | default module.cluster.provider | standalone module.configuration-discovery.provider | default module.configuration.provider | none module.core.provider | default module.envoy-metric.provider | default module.event-analyzer.provider | default module.log-analyzer.provider | default module.logql.provider | default module.promql.provider | default module.query.provider | graphql module.receiver-browser.provider | default module.receiver-clr.provider | default module.receiver-ebpf.provider | default module.receiver-event.provider | default module.receiver-jvm.provider | default module.receiver-log.provider | default module.receiver-meter.provider | default module.receiver-otel.provider | default module.receiver-profile.provider | default module.receiver-register.provider | default module.receiver-sharing-server.provider | default module.receiver-telegraf.provider | default module.receiver-trace.provider | default module.service-mesh.provider | default module.storage.provider | h2 module.telemetry.provider | none oap.external.grpc.host | 0.0.0.0 oap.external.grpc.port | 11800 oap.external.http.host | 0.0.0.0 oap.external.http.port | 12800 oap.internal.comm.host | 0.0.0.0 oap.internal.comm.port | 11800  oap.external.grpc.host:oap.external.grpc.port is for reporting telemetry data through gRPC channel, including native agents, OTEL. oap.external.http.host:oap.external.http.port is for reporting telemetry data through HTTP channel and query, including native GraphQL(UI), PromQL, LogQL. oap.internal.comm.host:oap.internal.comm.port is for OAP cluster internal communication via gRPC/HTTP2 protocol. The default host(0.0.0.0) is not suitable for the cluster mode, unless in k8s deployment. Please read Cluster Doc to understand how to set up the SkyWalking backend in the cluster mode.  application.yml SkyWalking backend startup behaviours are driven by config/application.yml. Understanding the settings file will help you read this document.\nThe core concept behind this setting file is that the SkyWalking collector is based on a pure modular design. End-users can switch or assemble the collector features according to their unique requirements.\nIn application.yml, there are three levels.\n Level 1: Module name. This means that this module is active in running mode. Level 2: Provider option list and provider selector. Available providers are listed here with a selector to indicate which one will actually take effect. If only one provider is listed, the selector is optional and can be omitted. Level 3. Settings of the chosen provider.  Example:\nstorage:selector:mysql# the mysql storage will actually be activated, while the h2 storage takes no effecth2:properties:jdbcUrl:${SW_STORAGE_H2_URL:jdbc:h2:mem:skywalking-oap-db;DB_CLOSE_DELAY=-1;DATABASE_TO_UPPER=FALSE}dataSource.user:${SW_STORAGE_H2_USER:sa}metadataQueryMaxSize:${SW_STORAGE_H2_QUERY_MAX_SIZE:5000}mysql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:3306/swtest?allowMultiQueries=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root@1234}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}# other configurations storage is the module. selector selects one out of all providers listed below. The unselected ones take no effect as if they were deleted. default is the default implementor of the core module. driver, url, \u0026hellip; metadataQueryMaxSize are all setting items of the implementor.  At the same time, there are two types of modules: required and optional. The required modules provide the skeleton of the backend. Even though their modular design supports pluggability, removing those modules does not serve any purpose. For optional modules, some of them have a provider implementation called none, meaning that it only provides a shell with no actual logic, typically such as telemetry. Setting - to the selector means that this whole module will be excluded at runtime. We advise against changing the APIs of those modules unless you understand the SkyWalking project and its codes very well.\nThe required modules are listed here:\n Core. Provides the basic and major skeleton of all data analysis and stream dispatch. Cluster. Manages multiple backend instances in a cluster, which could provide high throughput process capabilities. See Cluster Management for more details. Storage. Makes the analysis result persistent. See Choose storage for more details Query. Provides query interfaces to UI. Receiver and Fetcher. Expose the service to the agents and probes, or read telemetry data from a channel.  FAQs Why do we need to set the timezone? And when do we do it? SkyWalking provides downsampling time-series metrics features. Query and store at each time dimension (minute, hour, day, month metrics indexes) related to timezone when time formatting.\nFor example, metrics time will be formatted like yyyyMMddHHmm in minute dimension metrics, which is timezone-related.\nBy default, SkyWalking\u0026rsquo;s OAP backend chooses the OS default timezone. Please follow the Java and OS documents if you want to override the timezone.\n","title":"Backend setup","url":"/docs/main/latest/en/setup/backend/backend-setup/"},{"content":"Backend setup SkyWalking\u0026rsquo;s backend distribution package consists of the following parts:\n  bin/cmd scripts: Located in the /bin folder. Includes startup Linux shell and Windows cmd scripts for the backend server and UI startup.\n  Backend config: Located in the /config folder. Includes settings files of the backend, which are:\n application.yml log4j.xml alarm-settings.yml    Libraries of backend: Located in the /oap-libs folder. All dependencies of the backend can be found there.\n  Webapp env: Located in the webapp folder. UI frontend jar file can be found here, together with its webapp.yml setting file.\n  Requirements and default settings Requirement: Java 11/17/21.\nBefore you begin, you should understand that the main purpose of the following quickstart is to help you obtain a basic configuration for previews/demos. Performance and long-term running are NOT among the purposes of the quickstart.\nFor production/QA/tests environments, see Backend and UI deployment documents.\nYou can use bin/startup.sh (or cmd) to start up the backend and UI with their default settings, set out as follows:\n Backend storage uses H2 by default (for an easier start) Backend listens on 0.0.0.0/11800 for gRPC APIs and 0.0.0.0/12800 for HTTP REST APIs.  In Java, DotNetCore, Node.js, and Istio agents/probes, you should set the gRPC service address to ip/host:11800, and IP/host should be where your backend is.\n UI listens on 8080 port and request 127.0.0.1/12800 to run a GraphQL query.  Interaction Before deploying Skywalking in your distributed environment, you should learn about how agents/probes, the backend, and the UI communicate with each other:\n Most native agents and probes, including language-based or mesh probes, use gRPC service (core/default/gRPC* in application.yml) to report data to the backend. Also, the REST service is supported in JSON format. UI uses GraphQL (HTTP) query to access the backend, also in REST service (core/default/rest* in application.yml).  Startup script The default startup scripts are /bin/oapService.sh(.bat). Read the start up mode document to learn other ways to start up the backend.\nKey Parameters In The Booting Logs After the OAP booting process completed, you should be able to see all important parameters listed in the logs.\n2023-11-06 21:10:45,988 org.apache.skywalking.oap.server.starter.OAPServerBootstrap 67 [main] INFO [] - The key booting parameters of Apache SkyWalking OAP are listed as following. Running Mode | null TTL.metrics | 7 TTL.record | 3 Version | 9.7.0-SNAPSHOT-92af797 module.agent-analyzer.provider | default module.ai-pipeline.provider | default module.alarm.provider | default module.aws-firehose.provider | default module.cluster.provider | standalone module.configuration-discovery.provider | default module.configuration.provider | none module.core.provider | default module.envoy-metric.provider | default module.event-analyzer.provider | default module.log-analyzer.provider | default module.logql.provider | default module.promql.provider | default module.query.provider | graphql module.receiver-browser.provider | default module.receiver-clr.provider | default module.receiver-ebpf.provider | default module.receiver-event.provider | default module.receiver-jvm.provider | default module.receiver-log.provider | default module.receiver-meter.provider | default module.receiver-otel.provider | default module.receiver-profile.provider | default module.receiver-register.provider | default module.receiver-sharing-server.provider | default module.receiver-telegraf.provider | default module.receiver-trace.provider | default module.service-mesh.provider | default module.storage.provider | h2 module.telemetry.provider | none oap.external.grpc.host | 0.0.0.0 oap.external.grpc.port | 11800 oap.external.http.host | 0.0.0.0 oap.external.http.port | 12800 oap.internal.comm.host | 0.0.0.0 oap.internal.comm.port | 11800  oap.external.grpc.host:oap.external.grpc.port is for reporting telemetry data through gRPC channel, including native agents, OTEL. oap.external.http.host:oap.external.http.port is for reporting telemetry data through HTTP channel and query, including native GraphQL(UI), PromQL, LogQL. oap.internal.comm.host:oap.internal.comm.port is for OAP cluster internal communication via gRPC/HTTP2 protocol. The default host(0.0.0.0) is not suitable for the cluster mode, unless in k8s deployment. Please read Cluster Doc to understand how to set up the SkyWalking backend in the cluster mode.  application.yml SkyWalking backend startup behaviours are driven by config/application.yml. Understanding the settings file will help you read this document.\nThe core concept behind this setting file is that the SkyWalking collector is based on a pure modular design. End-users can switch or assemble the collector features according to their unique requirements.\nIn application.yml, there are three levels.\n Level 1: Module name. This means that this module is active in running mode. Level 2: Provider option list and provider selector. Available providers are listed here with a selector to indicate which one will actually take effect. If only one provider is listed, the selector is optional and can be omitted. Level 3. Settings of the chosen provider.  Example:\nstorage:selector:mysql# the mysql storage will actually be activated, while the h2 storage takes no effecth2:properties:jdbcUrl:${SW_STORAGE_H2_URL:jdbc:h2:mem:skywalking-oap-db;DB_CLOSE_DELAY=-1;DATABASE_TO_UPPER=FALSE}dataSource.user:${SW_STORAGE_H2_USER:sa}metadataQueryMaxSize:${SW_STORAGE_H2_QUERY_MAX_SIZE:5000}mysql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:3306/swtest?allowMultiQueries=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root@1234}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}# other configurations storage is the module. selector selects one out of all providers listed below. The unselected ones take no effect as if they were deleted. default is the default implementor of the core module. driver, url, \u0026hellip; metadataQueryMaxSize are all setting items of the implementor.  At the same time, there are two types of modules: required and optional. The required modules provide the skeleton of the backend. Even though their modular design supports pluggability, removing those modules does not serve any purpose. For optional modules, some of them have a provider implementation called none, meaning that it only provides a shell with no actual logic, typically such as telemetry. Setting - to the selector means that this whole module will be excluded at runtime. We advise against changing the APIs of those modules unless you understand the SkyWalking project and its codes very well.\nThe required modules are listed here:\n Core. Provides the basic and major skeleton of all data analysis and stream dispatch. Cluster. Manages multiple backend instances in a cluster, which could provide high throughput process capabilities. See Cluster Management for more details. Storage. Makes the analysis result persistent. See Choose storage for more details Query. Provides query interfaces to UI. Receiver and Fetcher. Expose the service to the agents and probes, or read telemetry data from a channel.  FAQs Why do we need to set the timezone? And when do we do it? SkyWalking provides downsampling time-series metrics features. Query and store at each time dimension (minute, hour, day, month metrics indexes) related to timezone when time formatting.\nFor example, metrics time will be formatted like yyyyMMddHHmm in minute dimension metrics, which is timezone-related.\nBy default, SkyWalking\u0026rsquo;s OAP backend chooses the OS default timezone. Please follow the Java and OS documents if you want to override the timezone.\n","title":"Backend setup","url":"/docs/main/next/en/setup/backend/backend-setup/"},{"content":"Backend setup SkyWalking\u0026rsquo;s backend distribution package consists of the following parts:\n  bin/cmd scripts: Located in the /bin folder. Includes startup linux shell and Windows cmd scripts for the backend server and UI startup.\n  Backend config: Located in the /config folder. Includes settings files of the backend, which are:\n application.yml log4j.xml alarm-settings.yml    Libraries of backend: Located in the /oap-libs folder. All dependencies of the backend can be found in it.\n  Webapp env: Located in the webapp folder. UI frontend jar file can be found here, together with its webapp.yml setting file.\n  Requirements and default settings Requirement: JDK8 to JDK17 are tested. Other versions are not tested and may or may not work.\nBefore you start, you should know that the main purpose of quickstart is to help you obtain a basic configuration for previews/demo. Performance and long-term running are not our goals.\nFor production/QA/tests environments, see Backend and UI deployment documents.\nYou can use bin/startup.sh (or cmd) to start up the backend and UI with their default settings, set out as follows:\n Backend storage uses H2 by default (for an easier start) Backend listens on 0.0.0.0/11800 for gRPC APIs and 0.0.0.0/12800 for HTTP REST APIs.  In Java, DotNetCore, Node.js, and Istio agents/probes, you should set the gRPC service address to ip/host:11800, and ip/host should be where your backend is.\n UI listens on 8080 port and request 127.0.0.1/12800 to run a GraphQL query.  Interaction Before deploying Skywalking in your distributed environment, you should learn about how agents/probes, the backend, and the UI communicate with each other:\n All native agents and probes, either language based or mesh probe, use the gRPC service (core/default/gRPC* in application.yml) to report data to the backend. Also, the Jetty service is supported in JSON format. UI uses GraphQL (HTTP) query to access the backend also in Jetty service (core/default/rest* in application.yml).  Startup script The default startup scripts are /bin/oapService.sh(.bat). Read the start up mode document to learn about other ways to start up the backend.\napplication.yml SkyWalking backend startup behaviours are driven by config/application.yml. Understanding the setting file will help you read this document. The core concept behind this setting file is that the SkyWalking collector is based on pure modular design. End users can switch or assemble the collector features according to their own requirements.\nIn application.yml, there are three levels.\n Level 1: Module name. This means that this module is active in running mode. Level 2: Provider option list and provider selector. Available providers are listed here with a selector to indicate which one will actually take effect. If there is only one provider listed, the selector is optional and can be omitted. Level 3. Settings of the provider.  Example:\nstorage:selector:mysql# the mysql storage will actually be activated, while the h2 storage takes no effecth2:driver:${SW_STORAGE_H2_DRIVER:org.h2.jdbcx.JdbcDataSource}url:${SW_STORAGE_H2_URL:jdbc:h2:mem:skywalking-oap-db}user:${SW_STORAGE_H2_USER:sa}metadataQueryMaxSize:${SW_STORAGE_H2_QUERY_MAX_SIZE:5000}mysql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:3306/swtest\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root@1234}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}# other configurations storage is the module. selector selects one out of all providers listed below. The unselected ones take no effect as if they were deleted. default is the default implementor of the core module. driver, url, \u0026hellip; metadataQueryMaxSize are all setting items of the implementor.  At the same time, there are two types of modules: required and optional. The required modules provide the skeleton of the backend. Even though their modular design supports pluggability, removing those modules does not serve any purpose. For optional modules, some of them have a provider implementation called none, meaning that it only provides a shell with no actual logic, typically such as telemetry. Setting - to the selector means that this whole module will be excluded at runtime. We advise against trying to change the APIs of those modules, unless you understand the SkyWalking project and its codes very well.\nThe required modules are listed here:\n Core. Provides the basic and major skeleton of all data analysis and stream dispatch. Cluster. Manages multiple backend instances in a cluster, which could provide high throughput process capabilities. See Cluster Management for more details. Storage. Makes the analysis result persistent. See Choose storage for more details Query. Provides query interfaces to UI. Receiver and Fetcher. Expose the service to the agents and probes, or read telemetry data from a channel.  FAQs Why do we need to set the timezone? And when do we do it? SkyWalking provides downsampling time series metrics features. Query and store at each time dimension (minute, hour, day, month metrics indexes) related to timezone when time formatting.\nFor example, metrics time will be formatted like YYYYMMDDHHmm in minute dimension metrics, which is timezone related.\nBy default, SkyWalking\u0026rsquo;s OAP backend chooses the OS default timezone. If you want to override it, please follow the Java and OS documents.\nHow to query the storage directly from a 3rd party tool? SkyWalking provides different options based on browser UI, CLI and GraphQL to support extensions. But some users may want to query data directly from the storage. For example, in the case of ElasticSearch, Kibana is a great tool for doing this.\nBy default, in order to reduce memory, network and storage space usages, SkyWalking saves based64-encoded ID(s) only in metrics entities. But these tools usually don\u0026rsquo;t support nested query, and are not convenient to work with. For these exceptional reasons, SkyWalking provides a config to add all necessary name column(s) into the final metrics entities with ID as a trade-off.\nTake a look at core/default/activeExtraModelColumns config in the application.yaml, and set it as true to enable this feature.\nNote that this feature is simply for 3rd party integration and doesn\u0026rsquo;t provide any new features to native SkyWalking use cases.\n","title":"Backend setup","url":"/docs/main/v9.0.0/en/setup/backend/backend-setup/"},{"content":"Backend setup SkyWalking\u0026rsquo;s backend distribution package consists of the following parts:\n  bin/cmd scripts: Located in the /bin folder. Includes startup Linux shell and Windows cmd scripts for the backend server and UI startup.\n  Backend config: Located in the /config folder. Includes settings files of the backend, which are:\n application.yml log4j.xml alarm-settings.yml    Libraries of backend: Located in the /oap-libs folder. All dependencies of the backend can be found there.\n  Webapp env: Located in the webapp folder. UI frontend jar file can be found here, together with its webapp.yml setting file.\n  Requirements and default settings Requirement: JDK8 to JDK17 are tested. Other versions are not tested and may or may not work.\nBefore you begin, you should understand that the main purpose of the following quickstart is to help you obtain a basic configuration for previews/demos. Performance and long-term running are NOT among the purposes of the quickstart.\nFor production/QA/tests environments, see Backend and UI deployment documents.\nYou can use bin/startup.sh (or cmd) to start up the backend and UI with their default settings, set out as follows:\n Backend storage uses H2 by default (for an easier start) Backend listens on 0.0.0.0/11800 for gRPC APIs and 0.0.0.0/12800 for HTTP REST APIs.  In Java, DotNetCore, Node.js, and Istio agents/probes, you should set the gRPC service address to ip/host:11800, and IP/host should be where your backend is.\n UI listens on 8080 port and request 127.0.0.1/12800 to run a GraphQL query.  Interaction Before deploying Skywalking in your distributed environment, you should learn about how agents/probes, the backend, and the UI communicate with each other:\n Most native agents and probes, including language-based or mesh probes, use gRPC service (core/default/gRPC* in application.yml) to report data to the backend. Also, the REST service is supported in JSON format. UI uses GraphQL (HTTP) query to access the backend, also in REST service (core/default/rest* in application.yml).  Startup script The default startup scripts are /bin/oapService.sh(.bat). Read the start up mode document to learn other ways to start up the backend.\napplication.yml SkyWalking backend startup behaviours are driven by config/application.yml. Understanding the settings file will help you read this document.\nThe core concept behind this setting file is that the SkyWalking collector is based on a pure modular design. End-users can switch or assemble the collector features according to their unique requirements.\nIn application.yml, there are three levels.\n Level 1: Module name. This means that this module is active in running mode. Level 2: Provider option list and provider selector. Available providers are listed here with a selector to indicate which one will actually take effect. If only one provider is listed, the selector is optional and can be omitted. Level 3. Settings of the chosen provider.  Example:\nstorage:selector:mysql# the mysql storage will actually be activated, while the h2 storage takes no effecth2:driver:${SW_STORAGE_H2_DRIVER:org.h2.jdbcx.JdbcDataSource}url:${SW_STORAGE_H2_URL:jdbc:h2:mem:skywalking-oap-db}user:${SW_STORAGE_H2_USER:sa}metadataQueryMaxSize:${SW_STORAGE_H2_QUERY_MAX_SIZE:5000}mysql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:3306/swtest\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root@1234}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}# other configurations storage is the module. selector selects one out of all providers listed below. The unselected ones take no effect as if they were deleted. default is the default implementor of the core module. driver, url, \u0026hellip; metadataQueryMaxSize are all setting items of the implementor.  At the same time, there are two types of modules: required and optional. The required modules provide the skeleton of the backend. Even though their modular design supports pluggability, removing those modules does not serve any purpose. For optional modules, some of them have a provider implementation called none, meaning that it only provides a shell with no actual logic, typically such as telemetry. Setting - to the selector means that this whole module will be excluded at runtime. We advise against changing the APIs of those modules unless you understand the SkyWalking project and its codes very well.\nThe required modules are listed here:\n Core. Provides the basic and major skeleton of all data analysis and stream dispatch. Cluster. Manages multiple backend instances in a cluster, which could provide high throughput process capabilities. See Cluster Management for more details. Storage. Makes the analysis result persistent. See Choose storage for more details Query. Provides query interfaces to UI. Receiver and Fetcher. Expose the service to the agents and probes, or read telemetry data from a channel.  FAQs Why do we need to set the timezone? And when do we do it? SkyWalking provides downsampling time-series metrics features. Query and store at each time dimension (minute, hour, day, month metrics indexes) related to timezone when time formatting.\nFor example, metrics time will be formatted like yyyyMMddHHmm in minute dimension metrics, which is timezone-related.\nBy default, SkyWalking\u0026rsquo;s OAP backend chooses the OS default timezone. Please follow the Java and OS documents if you want to override the timezone.\nHow to query the storage directly from a 3rd party tool? SkyWalking provides different options based on browser UI, CLI and GraphQL to support extensions. But some users may want to query data directly from the storage. For example, in the case of ElasticSearch, Kibana is a great tool for doing this.\nBy default, SkyWalking saves based64-encoded ID(s) only in metrics entities to reduce memory, network and storage space usages. But these tools usually don\u0026rsquo;t support nested queries and are not convenient to work with. For these exceptional reasons, SkyWalking provides a config to add all necessary name column(s) into the final metrics entities with ID as a trade-off.\nTake a look at core/default/activeExtraModelColumns config in the application.yaml, and set it as true to enable this feature.\nNote that this feature is simply for 3rd party integration and doesn\u0026rsquo;t provide any new features to native SkyWalking use cases.\n","title":"Backend setup","url":"/docs/main/v9.1.0/en/setup/backend/backend-setup/"},{"content":"Backend setup SkyWalking\u0026rsquo;s backend distribution package consists of the following parts:\n  bin/cmd scripts: Located in the /bin folder. Includes startup Linux shell and Windows cmd scripts for the backend server and UI startup.\n  Backend config: Located in the /config folder. Includes settings files of the backend, which are:\n application.yml log4j.xml alarm-settings.yml    Libraries of backend: Located in the /oap-libs folder. All dependencies of the backend can be found there.\n  Webapp env: Located in the webapp folder. UI frontend jar file can be found here, together with its webapp.yml setting file.\n  Requirements and default settings Requirement: JDK8 to JDK17 are tested. Other versions are not tested and may or may not work.\nBefore you begin, you should understand that the main purpose of the following quickstart is to help you obtain a basic configuration for previews/demos. Performance and long-term running are NOT among the purposes of the quickstart.\nFor production/QA/tests environments, see Backend and UI deployment documents.\nYou can use bin/startup.sh (or cmd) to start up the backend and UI with their default settings, set out as follows:\n Backend storage uses H2 by default (for an easier start) Backend listens on 0.0.0.0/11800 for gRPC APIs and 0.0.0.0/12800 for HTTP REST APIs.  In Java, DotNetCore, Node.js, and Istio agents/probes, you should set the gRPC service address to ip/host:11800, and IP/host should be where your backend is.\n UI listens on 8080 port and request 127.0.0.1/12800 to run a GraphQL query.  Interaction Before deploying Skywalking in your distributed environment, you should learn about how agents/probes, the backend, and the UI communicate with each other:\n Most native agents and probes, including language-based or mesh probes, use gRPC service (core/default/gRPC* in application.yml) to report data to the backend. Also, the REST service is supported in JSON format. UI uses GraphQL (HTTP) query to access the backend, also in REST service (core/default/rest* in application.yml).  Startup script The default startup scripts are /bin/oapService.sh(.bat). Read the start up mode document to learn other ways to start up the backend.\napplication.yml SkyWalking backend startup behaviours are driven by config/application.yml. Understanding the settings file will help you read this document.\nThe core concept behind this setting file is that the SkyWalking collector is based on a pure modular design. End-users can switch or assemble the collector features according to their unique requirements.\nIn application.yml, there are three levels.\n Level 1: Module name. This means that this module is active in running mode. Level 2: Provider option list and provider selector. Available providers are listed here with a selector to indicate which one will actually take effect. If only one provider is listed, the selector is optional and can be omitted. Level 3. Settings of the chosen provider.  Example:\nstorage:selector:mysql# the mysql storage will actually be activated, while the h2 storage takes no effecth2:driver:${SW_STORAGE_H2_DRIVER:org.h2.jdbcx.JdbcDataSource}url:${SW_STORAGE_H2_URL:jdbc:h2:mem:skywalking-oap-db}user:${SW_STORAGE_H2_USER:sa}metadataQueryMaxSize:${SW_STORAGE_H2_QUERY_MAX_SIZE:5000}mysql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:3306/swtest\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root@1234}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}# other configurations storage is the module. selector selects one out of all providers listed below. The unselected ones take no effect as if they were deleted. default is the default implementor of the core module. driver, url, \u0026hellip; metadataQueryMaxSize are all setting items of the implementor.  At the same time, there are two types of modules: required and optional. The required modules provide the skeleton of the backend. Even though their modular design supports pluggability, removing those modules does not serve any purpose. For optional modules, some of them have a provider implementation called none, meaning that it only provides a shell with no actual logic, typically such as telemetry. Setting - to the selector means that this whole module will be excluded at runtime. We advise against changing the APIs of those modules unless you understand the SkyWalking project and its codes very well.\nThe required modules are listed here:\n Core. Provides the basic and major skeleton of all data analysis and stream dispatch. Cluster. Manages multiple backend instances in a cluster, which could provide high throughput process capabilities. See Cluster Management for more details. Storage. Makes the analysis result persistent. See Choose storage for more details Query. Provides query interfaces to UI. Receiver and Fetcher. Expose the service to the agents and probes, or read telemetry data from a channel.  FAQs Why do we need to set the timezone? And when do we do it? SkyWalking provides downsampling time-series metrics features. Query and store at each time dimension (minute, hour, day, month metrics indexes) related to timezone when time formatting.\nFor example, metrics time will be formatted like yyyyMMddHHmm in minute dimension metrics, which is timezone-related.\nBy default, SkyWalking\u0026rsquo;s OAP backend chooses the OS default timezone. Please follow the Java and OS documents if you want to override the timezone.\nHow to query the storage directly from a 3rd party tool? SkyWalking provides different options based on browser UI, CLI and GraphQL to support extensions. But some users may want to query data directly from the storage. For example, in the case of ElasticSearch, Kibana is a great tool for doing this.\nBy default, SkyWalking saves based64-encoded ID(s) only in metrics entities to reduce memory, network and storage space usages. But these tools usually don\u0026rsquo;t support nested queries and are not convenient to work with. For these exceptional reasons, SkyWalking provides a config to add all necessary name column(s) into the final metrics entities with ID as a trade-off.\nTake a look at core/default/activeExtraModelColumns config in the application.yaml, and set it as true to enable this feature.\nNote that this feature is simply for 3rd party integration and doesn\u0026rsquo;t provide any new features to native SkyWalking use cases.\n","title":"Backend setup","url":"/docs/main/v9.2.0/en/setup/backend/backend-setup/"},{"content":"Backend setup SkyWalking\u0026rsquo;s backend distribution package consists of the following parts:\n  bin/cmd scripts: Located in the /bin folder. Includes startup Linux shell and Windows cmd scripts for the backend server and UI startup.\n  Backend config: Located in the /config folder. Includes settings files of the backend, which are:\n application.yml log4j.xml alarm-settings.yml    Libraries of backend: Located in the /oap-libs folder. All dependencies of the backend can be found there.\n  Webapp env: Located in the webapp folder. UI frontend jar file can be found here, together with its webapp.yml setting file.\n  Requirements and default settings Requirement: JDK8 to JDK17 are tested. Other versions are not tested and may or may not work.\nBefore you begin, you should understand that the main purpose of the following quickstart is to help you obtain a basic configuration for previews/demos. Performance and long-term running are NOT among the purposes of the quickstart.\nFor production/QA/tests environments, see Backend and UI deployment documents.\nYou can use bin/startup.sh (or cmd) to start up the backend and UI with their default settings, set out as follows:\n Backend storage uses H2 by default (for an easier start) Backend listens on 0.0.0.0/11800 for gRPC APIs and 0.0.0.0/12800 for HTTP REST APIs.  In Java, DotNetCore, Node.js, and Istio agents/probes, you should set the gRPC service address to ip/host:11800, and IP/host should be where your backend is.\n UI listens on 8080 port and request 127.0.0.1/12800 to run a GraphQL query.  Interaction Before deploying Skywalking in your distributed environment, you should learn about how agents/probes, the backend, and the UI communicate with each other:\n Most native agents and probes, including language-based or mesh probes, use gRPC service (core/default/gRPC* in application.yml) to report data to the backend. Also, the REST service is supported in JSON format. UI uses GraphQL (HTTP) query to access the backend, also in REST service (core/default/rest* in application.yml).  Startup script The default startup scripts are /bin/oapService.sh(.bat). Read the start up mode document to learn other ways to start up the backend.\napplication.yml SkyWalking backend startup behaviours are driven by config/application.yml. Understanding the settings file will help you read this document.\nThe core concept behind this setting file is that the SkyWalking collector is based on a pure modular design. End-users can switch or assemble the collector features according to their unique requirements.\nIn application.yml, there are three levels.\n Level 1: Module name. This means that this module is active in running mode. Level 2: Provider option list and provider selector. Available providers are listed here with a selector to indicate which one will actually take effect. If only one provider is listed, the selector is optional and can be omitted. Level 3. Settings of the chosen provider.  Example:\nstorage:selector:mysql# the mysql storage will actually be activated, while the h2 storage takes no effecth2:properties:jdbcUrl:${SW_STORAGE_H2_URL:jdbc:h2:mem:skywalking-oap-db;DB_CLOSE_DELAY=-1}dataSource.user:${SW_STORAGE_H2_USER:sa}metadataQueryMaxSize:${SW_STORAGE_H2_QUERY_MAX_SIZE:5000}mysql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:3306/swtest\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root@1234}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}# other configurations storage is the module. selector selects one out of all providers listed below. The unselected ones take no effect as if they were deleted. default is the default implementor of the core module. driver, url, \u0026hellip; metadataQueryMaxSize are all setting items of the implementor.  At the same time, there are two types of modules: required and optional. The required modules provide the skeleton of the backend. Even though their modular design supports pluggability, removing those modules does not serve any purpose. For optional modules, some of them have a provider implementation called none, meaning that it only provides a shell with no actual logic, typically such as telemetry. Setting - to the selector means that this whole module will be excluded at runtime. We advise against changing the APIs of those modules unless you understand the SkyWalking project and its codes very well.\nThe required modules are listed here:\n Core. Provides the basic and major skeleton of all data analysis and stream dispatch. Cluster. Manages multiple backend instances in a cluster, which could provide high throughput process capabilities. See Cluster Management for more details. Storage. Makes the analysis result persistent. See Choose storage for more details Query. Provides query interfaces to UI. Receiver and Fetcher. Expose the service to the agents and probes, or read telemetry data from a channel.  FAQs Why do we need to set the timezone? And when do we do it? SkyWalking provides downsampling time-series metrics features. Query and store at each time dimension (minute, hour, day, month metrics indexes) related to timezone when time formatting.\nFor example, metrics time will be formatted like yyyyMMddHHmm in minute dimension metrics, which is timezone-related.\nBy default, SkyWalking\u0026rsquo;s OAP backend chooses the OS default timezone. Please follow the Java and OS documents if you want to override the timezone.\nHow to query the storage directly from a 3rd party tool? SkyWalking provides different options based on browser UI, CLI and GraphQL to support extensions. But some users may want to query data directly from the storage. For example, in the case of ElasticSearch, Kibana is a great tool for doing this.\nBy default, SkyWalking saves based64-encoded ID(s) only in metrics entities to reduce memory, network and storage space usages. But these tools usually don\u0026rsquo;t support nested queries and are not convenient to work with. For these exceptional reasons, SkyWalking provides a config to add all necessary name column(s) into the final metrics entities with ID as a trade-off.\nTake a look at core/default/activeExtraModelColumns config in the application.yaml, and set it as true to enable this feature.\nNote that this feature is simply for 3rd party integration and doesn\u0026rsquo;t provide any new features to native SkyWalking use cases.\n","title":"Backend setup","url":"/docs/main/v9.3.0/en/setup/backend/backend-setup/"},{"content":"Backend setup SkyWalking\u0026rsquo;s backend distribution package consists of the following parts:\n  bin/cmd scripts: Located in the /bin folder. Includes startup Linux shell and Windows cmd scripts for the backend server and UI startup.\n  Backend config: Located in the /config folder. Includes settings files of the backend, which are:\n application.yml log4j.xml alarm-settings.yml    Libraries of backend: Located in the /oap-libs folder. All dependencies of the backend can be found there.\n  Webapp env: Located in the webapp folder. UI frontend jar file can be found here, together with its webapp.yml setting file.\n  Requirements and default settings Requirement: JDK11 to JDK17 are tested. Other versions are not tested and may or may not work.\nBefore you begin, you should understand that the main purpose of the following quickstart is to help you obtain a basic configuration for previews/demos. Performance and long-term running are NOT among the purposes of the quickstart.\nFor production/QA/tests environments, see Backend and UI deployment documents.\nYou can use bin/startup.sh (or cmd) to start up the backend and UI with their default settings, set out as follows:\n Backend storage uses H2 by default (for an easier start) Backend listens on 0.0.0.0/11800 for gRPC APIs and 0.0.0.0/12800 for HTTP REST APIs.  In Java, DotNetCore, Node.js, and Istio agents/probes, you should set the gRPC service address to ip/host:11800, and IP/host should be where your backend is.\n UI listens on 8080 port and request 127.0.0.1/12800 to run a GraphQL query.  Interaction Before deploying Skywalking in your distributed environment, you should learn about how agents/probes, the backend, and the UI communicate with each other:\n Most native agents and probes, including language-based or mesh probes, use gRPC service (core/default/gRPC* in application.yml) to report data to the backend. Also, the REST service is supported in JSON format. UI uses GraphQL (HTTP) query to access the backend, also in REST service (core/default/rest* in application.yml).  Startup script The default startup scripts are /bin/oapService.sh(.bat). Read the start up mode document to learn other ways to start up the backend.\napplication.yml SkyWalking backend startup behaviours are driven by config/application.yml. Understanding the settings file will help you read this document.\nThe core concept behind this setting file is that the SkyWalking collector is based on a pure modular design. End-users can switch or assemble the collector features according to their unique requirements.\nIn application.yml, there are three levels.\n Level 1: Module name. This means that this module is active in running mode. Level 2: Provider option list and provider selector. Available providers are listed here with a selector to indicate which one will actually take effect. If only one provider is listed, the selector is optional and can be omitted. Level 3. Settings of the chosen provider.  Example:\nstorage:selector:mysql# the mysql storage will actually be activated, while the h2 storage takes no effecth2:properties:jdbcUrl:${SW_STORAGE_H2_URL:jdbc:h2:mem:skywalking-oap-db;DB_CLOSE_DELAY=-1}dataSource.user:${SW_STORAGE_H2_USER:sa}metadataQueryMaxSize:${SW_STORAGE_H2_QUERY_MAX_SIZE:5000}mysql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:3306/swtest\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root@1234}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}# other configurations storage is the module. selector selects one out of all providers listed below. The unselected ones take no effect as if they were deleted. default is the default implementor of the core module. driver, url, \u0026hellip; metadataQueryMaxSize are all setting items of the implementor.  At the same time, there are two types of modules: required and optional. The required modules provide the skeleton of the backend. Even though their modular design supports pluggability, removing those modules does not serve any purpose. For optional modules, some of them have a provider implementation called none, meaning that it only provides a shell with no actual logic, typically such as telemetry. Setting - to the selector means that this whole module will be excluded at runtime. We advise against changing the APIs of those modules unless you understand the SkyWalking project and its codes very well.\nThe required modules are listed here:\n Core. Provides the basic and major skeleton of all data analysis and stream dispatch. Cluster. Manages multiple backend instances in a cluster, which could provide high throughput process capabilities. See Cluster Management for more details. Storage. Makes the analysis result persistent. See Choose storage for more details Query. Provides query interfaces to UI. Receiver and Fetcher. Expose the service to the agents and probes, or read telemetry data from a channel.  FAQs Why do we need to set the timezone? And when do we do it? SkyWalking provides downsampling time-series metrics features. Query and store at each time dimension (minute, hour, day, month metrics indexes) related to timezone when time formatting.\nFor example, metrics time will be formatted like yyyyMMddHHmm in minute dimension metrics, which is timezone-related.\nBy default, SkyWalking\u0026rsquo;s OAP backend chooses the OS default timezone. Please follow the Java and OS documents if you want to override the timezone.\nHow to query the storage directly from a 3rd party tool? SkyWalking provides different options based on browser UI, CLI and GraphQL to support extensions. But some users may want to query data directly from the storage. For example, in the case of ElasticSearch, Kibana is a great tool for doing this.\nBy default, SkyWalking saves based64-encoded ID(s) only in metrics entities to reduce memory, network and storage space usages. But these tools usually don\u0026rsquo;t support nested queries and are not convenient to work with. For these exceptional reasons, SkyWalking provides a config to add all necessary name column(s) into the final metrics entities with ID as a trade-off.\nTake a look at core/default/activeExtraModelColumns config in the application.yaml, and set it as true to enable this feature.\nNote that this feature is simply for 3rd party integration and doesn\u0026rsquo;t provide any new features to native SkyWalking use cases.\n","title":"Backend setup","url":"/docs/main/v9.4.0/en/setup/backend/backend-setup/"},{"content":"Backend setup SkyWalking\u0026rsquo;s backend distribution package consists of the following parts:\n  bin/cmd scripts: Located in the /bin folder. Includes startup Linux shell and Windows cmd scripts for the backend server and UI startup.\n  Backend config: Located in the /config folder. Includes settings files of the backend, which are:\n application.yml log4j.xml alarm-settings.yml    Libraries of backend: Located in the /oap-libs folder. All dependencies of the backend can be found there.\n  Webapp env: Located in the webapp folder. UI frontend jar file can be found here, together with its webapp.yml setting file.\n  Requirements and default settings Requirement: JDK11 to JDK17 are tested. Other versions are not tested and may or may not work.\nBefore you begin, you should understand that the main purpose of the following quickstart is to help you obtain a basic configuration for previews/demos. Performance and long-term running are NOT among the purposes of the quickstart.\nFor production/QA/tests environments, see Backend and UI deployment documents.\nYou can use bin/startup.sh (or cmd) to start up the backend and UI with their default settings, set out as follows:\n Backend storage uses H2 by default (for an easier start) Backend listens on 0.0.0.0/11800 for gRPC APIs and 0.0.0.0/12800 for HTTP REST APIs.  In Java, DotNetCore, Node.js, and Istio agents/probes, you should set the gRPC service address to ip/host:11800, and IP/host should be where your backend is.\n UI listens on 8080 port and request 127.0.0.1/12800 to run a GraphQL query.  Interaction Before deploying Skywalking in your distributed environment, you should learn about how agents/probes, the backend, and the UI communicate with each other:\n Most native agents and probes, including language-based or mesh probes, use gRPC service (core/default/gRPC* in application.yml) to report data to the backend. Also, the REST service is supported in JSON format. UI uses GraphQL (HTTP) query to access the backend, also in REST service (core/default/rest* in application.yml).  Startup script The default startup scripts are /bin/oapService.sh(.bat). Read the start up mode document to learn other ways to start up the backend.\napplication.yml SkyWalking backend startup behaviours are driven by config/application.yml. Understanding the settings file will help you read this document.\nThe core concept behind this setting file is that the SkyWalking collector is based on a pure modular design. End-users can switch or assemble the collector features according to their unique requirements.\nIn application.yml, there are three levels.\n Level 1: Module name. This means that this module is active in running mode. Level 2: Provider option list and provider selector. Available providers are listed here with a selector to indicate which one will actually take effect. If only one provider is listed, the selector is optional and can be omitted. Level 3. Settings of the chosen provider.  Example:\nstorage:selector:mysql# the mysql storage will actually be activated, while the h2 storage takes no effecth2:properties:jdbcUrl:${SW_STORAGE_H2_URL:jdbc:h2:mem:skywalking-oap-db;DB_CLOSE_DELAY=-1;DATABASE_TO_UPPER=FALSE}dataSource.user:${SW_STORAGE_H2_USER:sa}metadataQueryMaxSize:${SW_STORAGE_H2_QUERY_MAX_SIZE:5000}mysql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:3306/swtest?allowMultiQueries=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root@1234}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}# other configurations storage is the module. selector selects one out of all providers listed below. The unselected ones take no effect as if they were deleted. default is the default implementor of the core module. driver, url, \u0026hellip; metadataQueryMaxSize are all setting items of the implementor.  At the same time, there are two types of modules: required and optional. The required modules provide the skeleton of the backend. Even though their modular design supports pluggability, removing those modules does not serve any purpose. For optional modules, some of them have a provider implementation called none, meaning that it only provides a shell with no actual logic, typically such as telemetry. Setting - to the selector means that this whole module will be excluded at runtime. We advise against changing the APIs of those modules unless you understand the SkyWalking project and its codes very well.\nThe required modules are listed here:\n Core. Provides the basic and major skeleton of all data analysis and stream dispatch. Cluster. Manages multiple backend instances in a cluster, which could provide high throughput process capabilities. See Cluster Management for more details. Storage. Makes the analysis result persistent. See Choose storage for more details Query. Provides query interfaces to UI. Receiver and Fetcher. Expose the service to the agents and probes, or read telemetry data from a channel.  FAQs Why do we need to set the timezone? And when do we do it? SkyWalking provides downsampling time-series metrics features. Query and store at each time dimension (minute, hour, day, month metrics indexes) related to timezone when time formatting.\nFor example, metrics time will be formatted like yyyyMMddHHmm in minute dimension metrics, which is timezone-related.\nBy default, SkyWalking\u0026rsquo;s OAP backend chooses the OS default timezone. Please follow the Java and OS documents if you want to override the timezone.\nHow to query the storage directly from a 3rd party tool? SkyWalking provides different options based on browser UI, CLI and GraphQL to support extensions. But some users may want to query data directly from the storage. For example, in the case of ElasticSearch, Kibana is a great tool for doing this.\nBy default, SkyWalking saves based64-encoded ID(s) only in metrics entities to reduce memory, network and storage space usages. But these tools usually don\u0026rsquo;t support nested queries and are not convenient to work with. For these exceptional reasons, SkyWalking provides a config to add all necessary name column(s) into the final metrics entities with ID as a trade-off.\nTake a look at core/default/activeExtraModelColumns config in the application.yaml, and set it as true to enable this feature.\nNote that this feature is simply for 3rd party integration and doesn\u0026rsquo;t provide any new features to native SkyWalking use cases.\n","title":"Backend setup","url":"/docs/main/v9.5.0/en/setup/backend/backend-setup/"},{"content":"Backend setup SkyWalking\u0026rsquo;s backend distribution package consists of the following parts:\n  bin/cmd scripts: Located in the /bin folder. Includes startup Linux shell and Windows cmd scripts for the backend server and UI startup.\n  Backend config: Located in the /config folder. Includes settings files of the backend, which are:\n application.yml log4j.xml alarm-settings.yml    Libraries of backend: Located in the /oap-libs folder. All dependencies of the backend can be found there.\n  Webapp env: Located in the webapp folder. UI frontend jar file can be found here, together with its webapp.yml setting file.\n  Requirements and default settings Requirement: JDK11 to JDK17 are tested. Other versions are not tested and may or may not work.\nBefore you begin, you should understand that the main purpose of the following quickstart is to help you obtain a basic configuration for previews/demos. Performance and long-term running are NOT among the purposes of the quickstart.\nFor production/QA/tests environments, see Backend and UI deployment documents.\nYou can use bin/startup.sh (or cmd) to start up the backend and UI with their default settings, set out as follows:\n Backend storage uses H2 by default (for an easier start) Backend listens on 0.0.0.0/11800 for gRPC APIs and 0.0.0.0/12800 for HTTP REST APIs.  In Java, DotNetCore, Node.js, and Istio agents/probes, you should set the gRPC service address to ip/host:11800, and IP/host should be where your backend is.\n UI listens on 8080 port and request 127.0.0.1/12800 to run a GraphQL query.  Interaction Before deploying Skywalking in your distributed environment, you should learn about how agents/probes, the backend, and the UI communicate with each other:\n Most native agents and probes, including language-based or mesh probes, use gRPC service (core/default/gRPC* in application.yml) to report data to the backend. Also, the REST service is supported in JSON format. UI uses GraphQL (HTTP) query to access the backend, also in REST service (core/default/rest* in application.yml).  Startup script The default startup scripts are /bin/oapService.sh(.bat). Read the start up mode document to learn other ways to start up the backend.\napplication.yml SkyWalking backend startup behaviours are driven by config/application.yml. Understanding the settings file will help you read this document.\nThe core concept behind this setting file is that the SkyWalking collector is based on a pure modular design. End-users can switch or assemble the collector features according to their unique requirements.\nIn application.yml, there are three levels.\n Level 1: Module name. This means that this module is active in running mode. Level 2: Provider option list and provider selector. Available providers are listed here with a selector to indicate which one will actually take effect. If only one provider is listed, the selector is optional and can be omitted. Level 3. Settings of the chosen provider.  Example:\nstorage:selector:mysql# the mysql storage will actually be activated, while the h2 storage takes no effecth2:properties:jdbcUrl:${SW_STORAGE_H2_URL:jdbc:h2:mem:skywalking-oap-db;DB_CLOSE_DELAY=-1;DATABASE_TO_UPPER=FALSE}dataSource.user:${SW_STORAGE_H2_USER:sa}metadataQueryMaxSize:${SW_STORAGE_H2_QUERY_MAX_SIZE:5000}mysql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:3306/swtest?allowMultiQueries=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root@1234}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}# other configurations storage is the module. selector selects one out of all providers listed below. The unselected ones take no effect as if they were deleted. default is the default implementor of the core module. driver, url, \u0026hellip; metadataQueryMaxSize are all setting items of the implementor.  At the same time, there are two types of modules: required and optional. The required modules provide the skeleton of the backend. Even though their modular design supports pluggability, removing those modules does not serve any purpose. For optional modules, some of them have a provider implementation called none, meaning that it only provides a shell with no actual logic, typically such as telemetry. Setting - to the selector means that this whole module will be excluded at runtime. We advise against changing the APIs of those modules unless you understand the SkyWalking project and its codes very well.\nThe required modules are listed here:\n Core. Provides the basic and major skeleton of all data analysis and stream dispatch. Cluster. Manages multiple backend instances in a cluster, which could provide high throughput process capabilities. See Cluster Management for more details. Storage. Makes the analysis result persistent. See Choose storage for more details Query. Provides query interfaces to UI. Receiver and Fetcher. Expose the service to the agents and probes, or read telemetry data from a channel.  FAQs Why do we need to set the timezone? And when do we do it? SkyWalking provides downsampling time-series metrics features. Query and store at each time dimension (minute, hour, day, month metrics indexes) related to timezone when time formatting.\nFor example, metrics time will be formatted like yyyyMMddHHmm in minute dimension metrics, which is timezone-related.\nBy default, SkyWalking\u0026rsquo;s OAP backend chooses the OS default timezone. Please follow the Java and OS documents if you want to override the timezone.\nHow to query the storage directly from a 3rd party tool? SkyWalking provides different options based on browser UI, CLI and GraphQL to support extensions. But some users may want to query data directly from the storage. For example, in the case of ElasticSearch, Kibana is a great tool for doing this.\nBy default, SkyWalking saves based64-encoded ID(s) only in metrics entities to reduce memory, network and storage space usages. But these tools usually don\u0026rsquo;t support nested queries and are not convenient to work with. For these exceptional reasons, SkyWalking provides a config to add all necessary name column(s) into the final metrics entities with ID as a trade-off.\nTake a look at core/default/activeExtraModelColumns config in the application.yaml, and set it as true to enable this feature.\nNote that this feature is simply for 3rd party integration and doesn\u0026rsquo;t provide any new features to native SkyWalking use cases.\n","title":"Backend setup","url":"/docs/main/v9.6.0/en/setup/backend/backend-setup/"},{"content":"Backend setup SkyWalking\u0026rsquo;s backend distribution package consists of the following parts:\n  bin/cmd scripts: Located in the /bin folder. Includes startup Linux shell and Windows cmd scripts for the backend server and UI startup.\n  Backend config: Located in the /config folder. Includes settings files of the backend, which are:\n application.yml log4j.xml alarm-settings.yml    Libraries of backend: Located in the /oap-libs folder. All dependencies of the backend can be found there.\n  Webapp env: Located in the webapp folder. UI frontend jar file can be found here, together with its webapp.yml setting file.\n  Requirements and default settings Requirement: JDK11 or JDK17.\nBefore you begin, you should understand that the main purpose of the following quickstart is to help you obtain a basic configuration for previews/demos. Performance and long-term running are NOT among the purposes of the quickstart.\nFor production/QA/tests environments, see Backend and UI deployment documents.\nYou can use bin/startup.sh (or cmd) to start up the backend and UI with their default settings, set out as follows:\n Backend storage uses H2 by default (for an easier start) Backend listens on 0.0.0.0/11800 for gRPC APIs and 0.0.0.0/12800 for HTTP REST APIs.  In Java, DotNetCore, Node.js, and Istio agents/probes, you should set the gRPC service address to ip/host:11800, and IP/host should be where your backend is.\n UI listens on 8080 port and request 127.0.0.1/12800 to run a GraphQL query.  Interaction Before deploying Skywalking in your distributed environment, you should learn about how agents/probes, the backend, and the UI communicate with each other:\n Most native agents and probes, including language-based or mesh probes, use gRPC service (core/default/gRPC* in application.yml) to report data to the backend. Also, the REST service is supported in JSON format. UI uses GraphQL (HTTP) query to access the backend, also in REST service (core/default/rest* in application.yml).  Startup script The default startup scripts are /bin/oapService.sh(.bat). Read the start up mode document to learn other ways to start up the backend.\nKey Parameters In The Booting Logs After the OAP booting process completed, you should be able to see all important parameters listed in the logs.\n2023-11-06 21:10:45,988 org.apache.skywalking.oap.server.starter.OAPServerBootstrap 67 [main] INFO [] - The key booting parameters of Apache SkyWalking OAP are listed as following. Running Mode | null TTL.metrics | 7 TTL.record | 3 Version | 9.7.0-SNAPSHOT-92af797 module.agent-analyzer.provider | default module.ai-pipeline.provider | default module.alarm.provider | default module.aws-firehose.provider | default module.cluster.provider | standalone module.configuration-discovery.provider | default module.configuration.provider | none module.core.provider | default module.envoy-metric.provider | default module.event-analyzer.provider | default module.log-analyzer.provider | default module.logql.provider | default module.promql.provider | default module.query.provider | graphql module.receiver-browser.provider | default module.receiver-clr.provider | default module.receiver-ebpf.provider | default module.receiver-event.provider | default module.receiver-jvm.provider | default module.receiver-log.provider | default module.receiver-meter.provider | default module.receiver-otel.provider | default module.receiver-profile.provider | default module.receiver-register.provider | default module.receiver-sharing-server.provider | default module.receiver-telegraf.provider | default module.receiver-trace.provider | default module.service-mesh.provider | default module.storage.provider | h2 module.telemetry.provider | none oap.external.grpc.host | 0.0.0.0 oap.external.grpc.port | 11800 oap.external.http.host | 0.0.0.0 oap.external.http.port | 12800 oap.internal.comm.host | 0.0.0.0 oap.internal.comm.port | 11800  oap.external.grpc.host:oap.external.grpc.port is for reporting telemetry data through gRPC channel, including native agents, OTEL. oap.external.http.host:oap.external.http.port is for reporting telemetry data through HTTP channel and query, including native GraphQL(UI), PromQL, LogQL. oap.internal.comm.host:oap.internal.comm.port is for OAP cluster internal communication via gRPC/HTTP2 protocol. The default host(0.0.0.0) is not suitable for the cluster mode, unless in k8s deployment. Please read Cluster Doc to understand how to set up the SkyWalking backend in the cluster mode.  application.yml SkyWalking backend startup behaviours are driven by config/application.yml. Understanding the settings file will help you read this document.\nThe core concept behind this setting file is that the SkyWalking collector is based on a pure modular design. End-users can switch or assemble the collector features according to their unique requirements.\nIn application.yml, there are three levels.\n Level 1: Module name. This means that this module is active in running mode. Level 2: Provider option list and provider selector. Available providers are listed here with a selector to indicate which one will actually take effect. If only one provider is listed, the selector is optional and can be omitted. Level 3. Settings of the chosen provider.  Example:\nstorage:selector:mysql# the mysql storage will actually be activated, while the h2 storage takes no effecth2:properties:jdbcUrl:${SW_STORAGE_H2_URL:jdbc:h2:mem:skywalking-oap-db;DB_CLOSE_DELAY=-1;DATABASE_TO_UPPER=FALSE}dataSource.user:${SW_STORAGE_H2_USER:sa}metadataQueryMaxSize:${SW_STORAGE_H2_QUERY_MAX_SIZE:5000}mysql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:3306/swtest?allowMultiQueries=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root@1234}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}# other configurations storage is the module. selector selects one out of all providers listed below. The unselected ones take no effect as if they were deleted. default is the default implementor of the core module. driver, url, \u0026hellip; metadataQueryMaxSize are all setting items of the implementor.  At the same time, there are two types of modules: required and optional. The required modules provide the skeleton of the backend. Even though their modular design supports pluggability, removing those modules does not serve any purpose. For optional modules, some of them have a provider implementation called none, meaning that it only provides a shell with no actual logic, typically such as telemetry. Setting - to the selector means that this whole module will be excluded at runtime. We advise against changing the APIs of those modules unless you understand the SkyWalking project and its codes very well.\nThe required modules are listed here:\n Core. Provides the basic and major skeleton of all data analysis and stream dispatch. Cluster. Manages multiple backend instances in a cluster, which could provide high throughput process capabilities. See Cluster Management for more details. Storage. Makes the analysis result persistent. See Choose storage for more details Query. Provides query interfaces to UI. Receiver and Fetcher. Expose the service to the agents and probes, or read telemetry data from a channel.  FAQs Why do we need to set the timezone? And when do we do it? SkyWalking provides downsampling time-series metrics features. Query and store at each time dimension (minute, hour, day, month metrics indexes) related to timezone when time formatting.\nFor example, metrics time will be formatted like yyyyMMddHHmm in minute dimension metrics, which is timezone-related.\nBy default, SkyWalking\u0026rsquo;s OAP backend chooses the OS default timezone. Please follow the Java and OS documents if you want to override the timezone.\n","title":"Backend setup","url":"/docs/main/v9.7.0/en/setup/backend/backend-setup/"},{"content":"Backend storage The SkyWalking storage is pluggable. We have provided the following storage solutions, which allow you to easily use one of them by specifying it as the selector in application.yml:\nstorage:selector:${SW_STORAGE:elasticsearch}Natively supported storage:\n H2 OpenSearch ElasticSearch 7 and 8. MySQL and its compatible databases PostgreSQL and its compatible databases BanyanDB(alpha stage)  H2 is the default storage option in the distribution package. It is recommended to use H2 for testing and development ONLY. Elasticsearch and OpenSearch are recommended for production environments, specially for large scale deployments. MySQL and PostgreSQL are recommended for production environments for medium scale deployments, especially for low trace and log sampling rate. Some of their compatible databases may support larger scale better, such as TiDB and AWS Aurora.\nBanyanDB is going to be our next generation storage solution. It is still in alpha stage. It has shown high potential performance improvement. Less than 50% CPU usage and 50% memory usage with 40% disk volume compared to Elasticsearch in the same scale with 100% sampling. We are looking for early adoption, and it would be our first-class recommended storage option since 2024.\n","title":"Backend storage","url":"/docs/main/latest/en/setup/backend/backend-storage/"},{"content":"Backend storage The SkyWalking storage is pluggable. We have provided the following storage solutions, which allow you to easily use one of them by specifying it as the selector in application.yml:\nstorage:selector:${SW_STORAGE:elasticsearch}Natively supported storage:\n H2 OpenSearch ElasticSearch 7 and 8. MySQL and its compatible databases PostgreSQL and its compatible databases BanyanDB(alpha stage)  H2 is the default storage option in the distribution package. It is recommended to use H2 for testing and development ONLY. Elasticsearch and OpenSearch are recommended for production environments, specially for large scale deployments. MySQL and PostgreSQL are recommended for production environments for medium scale deployments, especially for low trace and log sampling rate. Some of their compatible databases may support larger scale better, such as TiDB and AWS Aurora.\nBanyanDB is going to be our next generation storage solution. It is still in alpha stage. It has shown high potential performance improvement. Less than 50% CPU usage and 50% memory usage with 40% disk volume compared to Elasticsearch in the same scale with 100% sampling. We are looking for early adoption, and it would be our first-class recommended storage option since 2024.\n","title":"Backend storage","url":"/docs/main/next/en/setup/backend/backend-storage/"},{"content":"Backend storage The SkyWalking storage is pluggable. We have provided the following storage solutions, which allows you to easily use one of them by specifying it as the selector in application.yml:\nstorage:selector:${SW_STORAGE:elasticsearch}Natively supported storage:\n H2 OpenSearch ElasticSearch 6, 7, 8 MySQL TiDB InfluxDB PostgreSQL IoTDB  H2 Activate H2 as storage, set storage provider to H2 In-Memory Databases. Default in distribution package. Please read Database URL Overview in H2 official document. You can set the target to H2 in Embedded, Server and Mixed modes.\nSetting fragment example\nstorage:selector:${SW_STORAGE:h2}h2:driver:org.h2.jdbcx.JdbcDataSourceurl:jdbc:h2:mem:skywalking-oap-dbuser:samaxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:100}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:1}OpenSearch OpenSearch storage shares the same configurations as ElasticSearch. In order to activate OpenSearch as storage, set storage provider to elasticsearch.\nElasticSearch NOTE: Elastic announced through their blog that Elasticsearch will be moving over to a Server Side Public License (SSPL), which is incompatible with Apache License 2.0. This license change is effective from Elasticsearch version 7.11. So please choose the suitable ElasticSearch version according to your usage.\nSince 8.8.0, SkyWalking rebuilds the ElasticSearch client on top of ElasticSearch REST API and automatically picks up correct request formats according to the server side version, hence you don\u0026rsquo;t need to download different binaries and don\u0026rsquo;t need to configure different storage selector for different ElasticSearch server side version anymore.\nFor now, SkyWalking supports ElasticSearch 6.x, ElasticSearch 7.x, ElasticSearch 8.x, and OpenSearch 1.x, their configurations are as follows:\nstorage:selector:${SW_STORAGE:elasticsearch}elasticsearch:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}clusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:9200}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;http\u0026#34;}trustStorePath:${SW_STORAGE_ES_SSL_JKS_PATH:\u0026#34;\u0026#34;}trustStorePass:${SW_STORAGE_ES_SSL_JKS_PASS:\u0026#34;\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}password:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}secretsManagementFile:${SW_ES_SECRETS_MANAGEMENT_FILE:\u0026#34;\u0026#34;}# Secrets management file in the properties format includes the username, password, which are managed by 3rd party tool.dayStep:${SW_STORAGE_DAY_STEP:1}# Represent the number of days in the one minute/hour/day index.indexShardsNumber:${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:1}# Shard number of new indexesindexReplicasNumber:${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:1}# Replicas number of new indexes# Super data set has been defined in the codes, such as trace segments.The following 3 config would be improve es performance when storage super size data in es.superDatasetDayStep:${SW_SUPERDATASET_STORAGE_DAY_STEP:-1}# Represent the number of days in the super size dataset record index, the default value is the same as dayStep when the value is less than 0superDatasetIndexShardsFactor:${SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR:5}# This factor provides more shards for the super data set, shards number = indexShardsNumber * superDatasetIndexShardsFactor. Also, this factor effects Zipkin and Jaeger traces.superDatasetIndexReplicasNumber:${SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER:0}# Represent the replicas number in the super size dataset record index, the default value is 0.indexTemplateOrder:${SW_STORAGE_ES_INDEX_TEMPLATE_ORDER:0}# the order of index templatebulkActions:${SW_STORAGE_ES_BULK_ACTIONS:1000}# Execute the async bulk record data every ${SW_STORAGE_ES_BULK_ACTIONS} requestsflushInterval:${SW_STORAGE_ES_FLUSH_INTERVAL:10}# flush the bulk every 10 seconds whatever the number of requestsconcurrentRequests:${SW_STORAGE_ES_CONCURRENT_REQUESTS:2}# the number of concurrent requestsresultWindowMaxSize:${SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE:10000}metadataQueryMaxSize:${SW_STORAGE_ES_QUERY_MAX_SIZE:5000}segmentQueryMaxSize:${SW_STORAGE_ES_QUERY_SEGMENT_SIZE:200}profileTaskQueryMaxSize:${SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE:200}oapAnalyzer:${SW_STORAGE_ES_OAP_ANALYZER:\u0026#34;{\\\u0026#34;analyzer\\\u0026#34;:{\\\u0026#34;oap_analyzer\\\u0026#34;:{\\\u0026#34;type\\\u0026#34;:\\\u0026#34;stop\\\u0026#34;}}}\u0026#34;}# the oap analyzer.oapLogAnalyzer:${SW_STORAGE_ES_OAP_LOG_ANALYZER:\u0026#34;{\\\u0026#34;analyzer\\\u0026#34;:{\\\u0026#34;oap_log_analyzer\\\u0026#34;:{\\\u0026#34;type\\\u0026#34;:\\\u0026#34;standard\\\u0026#34;}}}\u0026#34;}# the oap log analyzer. It could be customized by the ES analyzer configuration to support more language log formats, such as Chinese log, Japanese log and etc.advanced:${SW_STORAGE_ES_ADVANCED:\u0026#34;\u0026#34;}ElasticSearch With Https SSL Encrypting communications. Example:\nstorage:selector:${SW_STORAGE:elasticsearch}elasticsearch:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}# User needs to be set when Http Basic authentication is enabledpassword:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}# Password to be set when Http Basic authentication is enabledclusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:443}trustStorePath:${SW_SW_STORAGE_ES_SSL_JKS_PATH:\u0026#34;../es_keystore.jks\u0026#34;}trustStorePass:${SW_SW_STORAGE_ES_SSL_JKS_PASS:\u0026#34;\u0026#34;}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;https\u0026#34;}... File at trustStorePath is being monitored. Once it is changed, the ElasticSearch client will reconnect. trustStorePass could be changed in the runtime through Secrets Management File Of ElasticSearch Authentication.  Daily Index Step Daily index step(storage/elasticsearch/dayStep, default 1) represents the index creation period. In this period, metrics for several days (dayStep value) are saved.\nIn most cases, users don\u0026rsquo;t need to change the value manually, as SkyWalking is designed to observe large scale distributed systems. But in some cases, users may want to set a long TTL value, such as more than 60 days. However, their ElasticSearch cluster may not be powerful enough due to low traffic in the production environment. This value could be increased to 5 (or more), if users could ensure a single index could support the metrics and traces for these days (5 in this case).\nFor example, if dayStep == 11,\n Data in [2000-01-01, 2000-01-11] will be merged into the index-20000101. Data in [2000-01-12, 2000-01-22] will be merged into the index-20000112.  storage/elasticsearch/superDatasetDayStep overrides the storage/elasticsearch/dayStep if the value is positive. This would affect the record-related entities, such as trace segments. In some cases, the size of metrics is much smaller than the record (trace). This would improve the shards balance in the ElasticSearch cluster.\nNOTE: TTL deletion would be affected by these steps. You should set an extra dayStep in your TTL. For example, if you want to have TTL == 30 days and dayStep == 10, you are commended to set TTL = 40.\nSecrets Management File Of ElasticSearch Authentication The value of secretsManagementFile should point to the secrets management file absolute path. The file includes username, password, and JKS password of the ElasticSearch server in the properties format.\nuser=xxx password=yyy trustStorePass=zzz The major difference between using user, password, trustStorePass configs in the application.yaml file is that the Secrets Management File is being watched by the OAP server. Once it is changed manually or through a 3rd party tool, such as Vault, the storage provider will use the new username, password, and JKS password to establish the connection and close the old one. If the information exists in the file, the user/password will be overrided.\nAdvanced Configurations For Elasticsearch Index You can add advanced configurations in JSON format to set ElasticSearch index settings by following ElasticSearch doc\nFor example, set translog settings:\nstorage:elasticsearch:# ......advanced:${SW_STORAGE_ES_ADVANCED:\u0026#34;{\\\u0026#34;index.translog.durability\\\u0026#34;:\\\u0026#34;request\\\u0026#34;,\\\u0026#34;index.translog.sync_interval\\\u0026#34;:\\\u0026#34;5s\\\u0026#34;}\u0026#34;}Recommended ElasticSearch server-side configurations You could add the following configuration to elasticsearch.yml, and set the value based on your environment.\n# In tracing scenario, consider to set more than this at least.thread_pool.index.queue_size:1000# Only suitable for ElasticSearch 6thread_pool.write.queue_size:1000# Suitable for ElasticSearch 6 and 7# When you face query error at trace page, remember to check this.index.max_result_window:1000000We strongly recommend that you read more about these configurations from ElasticSearch\u0026rsquo;s official document, since they have a direct impact on the performance of ElasticSearch.\nElasticSearch with Zipkin trace extension This implementation is very similar to elasticsearch, except that it extends to support Zipkin span storage. The configurations are largely the same.\nstorage:selector:${SW_STORAGE:zipkin-elasticsearch}zipkin-elasticsearch:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}clusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:9200}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;http\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}password:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}indexShardsNumber:${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:2}indexReplicasNumber:${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:0}# Batch process setting, refer to https://www.elastic.co/guide/en/elasticsearch/client/java-api/5.5/java-docs-bulk-processor.htmlbulkActions:${SW_STORAGE_ES_BULK_ACTIONS:2000}# Execute the bulk every 2000 requestsbulkSize:${SW_STORAGE_ES_BULK_SIZE:20}# flush the bulk every 20mbflushInterval:${SW_STORAGE_ES_FLUSH_INTERVAL:10}# flush the bulk every 10 seconds whatever the number of requestsconcurrentRequests:${SW_STORAGE_ES_CONCURRENT_REQUESTS:2}# the number of concurrent requestsAbout Namespace When namespace is set, all index names in ElasticSearch will use it as prefix.\nMySQL Active MySQL as storage, set storage provider to mysql.\nNOTE: MySQL driver is NOT allowed in Apache official distribution and source codes. Please download MySQL driver on your own. Copy the connection driver jar to oap-libs.\nstorage:selector:${SW_STORAGE:mysql}mysql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:3306/swtest?rewriteBatchedStatements=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root@1234}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password are found in application.yml. Only part of the settings are listed here. See the HikariCP connection pool document for full settings. To understand the function of the parameter rewriteBatchedStatements=true in MySQL, see the MySQL official document.\nTiDB Tested TiDB Server 4.0.8 version and MySQL Client driver 8.0.13 version are currently available. Activate TiDB as storage, and set storage provider to tidb.\nstorage:selector:${SW_STORAGE:tidb}tidb:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:4000/swtest?rewriteBatchedStatements=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:\u0026#34;\u0026#34;}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}dataSource.useAffectedRows:${SW_DATA_SOURCE_USE_AFFECTED_ROWS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfArrayColumn:${SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN:20}numOfSearchableValuesPerTag:${SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG:2}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password are found in application.yml. For details on settings, refer to the configuration of MySQL above. To understand the function of the parameter rewriteBatchedStatements=true in TiDB, see the document of TiDB best practices.\nInfluxDB InfluxDB storage provides a time-series database as a new storage option.\nstorage:selector:${SW_STORAGE:influxdb}influxdb:url:${SW_STORAGE_INFLUXDB_URL:http://localhost:8086}user:${SW_STORAGE_INFLUXDB_USER:root}password:${SW_STORAGE_INFLUXDB_PASSWORD:}database:${SW_STORAGE_INFLUXDB_DATABASE:skywalking}actions:${SW_STORAGE_INFLUXDB_ACTIONS:1000}# the number of actions to collectduration:${SW_STORAGE_INFLUXDB_DURATION:1000}# the time to wait at most (milliseconds)fetchTaskLogMaxSize:${SW_STORAGE_INFLUXDB_FETCH_TASK_LOG_MAX_SIZE:5000}# the max number of fetch task log in a requestAll connection related settings, including URL link, username, and password are found in application.yml. For metadata storage provider settings, refer to the configurations of H2/MySQL above.\nPostgreSQL PostgreSQL jdbc driver uses version 42.3.2. It supports PostgreSQL 8.2 or newer. Activate PostgreSQL as storage, and set storage provider to postgresql.\nstorage:selector:${SW_STORAGE:postgresql}postgresql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:postgresql://localhost:5432/skywalking\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:postgres}dataSource.password:${SW_DATA_SOURCE_PASSWORD:123456}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfArrayColumn:${SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN:20}numOfSearchableValuesPerTag:${SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG:2}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password are found in application.yml. Only part of the settings are listed here. Please follow HikariCP connection pool document for full settings.\nIoTDB IoTDB is a time-series database from Apache, which is one of the storage plugin options.\nIoTDB storage plugin is still in progress. Its efficiency will improve in the future.\nstorage:selector:${SW_STORAGE:iotdb}iotdb:host:${SW_STORAGE_IOTDB_HOST:127.0.0.1}rpcPort:${SW_STORAGE_IOTDB_RPC_PORT:6667}username:${SW_STORAGE_IOTDB_USERNAME:root}password:${SW_STORAGE_IOTDB_PASSWORD:root}storageGroup:${SW_STORAGE_IOTDB_STORAGE_GROUP:root.skywalking}sessionPoolSize:${SW_STORAGE_IOTDB_SESSIONPOOL_SIZE:8}# If it\u0026#39;s zero, the SessionPool size will be 2*CPU_CoresfetchTaskLogMaxSize:${SW_STORAGE_IOTDB_FETCH_TASK_LOG_MAX_SIZE:1000}# the max number of fetch task log in a requestAll connection related settings, including host, rpcPort, username, and password are found in application.yml. Please ensure the IoTDB version \u0026gt;= 0.12.3.\nMore storage extension solutions Follow the Storage extension development guide in the Project Extensions document.\n","title":"Backend storage","url":"/docs/main/v9.0.0/en/setup/backend/backend-storage/"},{"content":"Backend storage The SkyWalking storage is pluggable. We have provided the following storage solutions, which allow you to easily use one of them by specifying it as the selector in application.yml:\nstorage:selector:${SW_STORAGE:elasticsearch}Natively supported storage:\n H2 OpenSearch ElasticSearch 6, 7, 8 MySQL TiDB PostgreSQL BanyanDB  H2 Activate H2 as storage, set storage provider to H2 In-Memory Databases. Default in the distribution package. Please read Database URL Overview in H2 official document. You can set the target to H2 in Embedded, Server and Mixed modes.\nSetting fragment example\nstorage:selector:${SW_STORAGE:h2}h2:driver:org.h2.jdbcx.JdbcDataSourceurl:jdbc:h2:mem:skywalking-oap-dbuser:samaxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:100}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:1}OpenSearch OpenSearch storage shares the same configurations as ElasticSearch. In order to activate OpenSearch as storage, set the storage provider to elasticsearch.\nElasticSearch NOTE: Elastic announced through their blog that Elasticsearch will be moving over to a Server Side Public License (SSPL), which is incompatible with Apache License 2.0. This license change is effective from Elasticsearch version 7.11. So please choose the suitable ElasticSearch version according to your usage.\nSince 8.8.0, SkyWalking rebuilds the ElasticSearch client on top of ElasticSearch REST API and automatically picks up correct request formats according to the server-side version, hence you don\u0026rsquo;t need to download different binaries and don\u0026rsquo;t need to configure different storage selectors for different ElasticSearch server-side versions anymore.\nFor now, SkyWalking supports ElasticSearch 6.x, ElasticSearch 7.x, ElasticSearch 8.x, and OpenSearch 1.x, their configurations are as follows:\nstorage:selector:${SW_STORAGE:elasticsearch}elasticsearch:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}clusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:9200}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;http\u0026#34;}trustStorePath:${SW_STORAGE_ES_SSL_JKS_PATH:\u0026#34;\u0026#34;}trustStorePass:${SW_STORAGE_ES_SSL_JKS_PASS:\u0026#34;\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}password:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}secretsManagementFile:${SW_ES_SECRETS_MANAGEMENT_FILE:\u0026#34;\u0026#34;}# Secrets management file in the properties format includes the username, password, which are managed by 3rd party tool.dayStep:${SW_STORAGE_DAY_STEP:1}# Represent the number of days in the one minute/hour/day index.indexShardsNumber:${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:1}# Shard number of new indexesindexReplicasNumber:${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:1}# Replicas number of new indexes# Super data set has been defined in the codes, such as trace segments.The following 3 config would be improve es performance when storage super size data in es.superDatasetDayStep:${SW_SUPERDATASET_STORAGE_DAY_STEP:-1}# Represent the number of days in the super size dataset record index, the default value is the same as dayStep when the value is less than 0superDatasetIndexShardsFactor:${SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR:5}# This factor provides more shards for the super data set, shards number = indexShardsNumber * superDatasetIndexShardsFactor. Also, this factor effects Zipkin and Jaeger traces.superDatasetIndexReplicasNumber:${SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER:0}# Represent the replicas number in the super size dataset record index, the default value is 0.indexTemplateOrder:${SW_STORAGE_ES_INDEX_TEMPLATE_ORDER:0}# the order of index templatebulkActions:${SW_STORAGE_ES_BULK_ACTIONS:1000}# Execute the async bulk record data every ${SW_STORAGE_ES_BULK_ACTIONS} requestsflushInterval:${SW_STORAGE_ES_FLUSH_INTERVAL:10}# flush the bulk every 10 seconds whatever the number of requestsconcurrentRequests:${SW_STORAGE_ES_CONCURRENT_REQUESTS:2}# the number of concurrent requestsresultWindowMaxSize:${SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE:10000}metadataQueryMaxSize:${SW_STORAGE_ES_QUERY_MAX_SIZE:5000}segmentQueryMaxSize:${SW_STORAGE_ES_QUERY_SEGMENT_SIZE:200}profileTaskQueryMaxSize:${SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE:200}profileDataQueryScrollBatchSize:${SW_STORAGE_ES_QUERY_PROFILE_DATA_SCROLLING_BATCH_SIZE:100}oapAnalyzer:${SW_STORAGE_ES_OAP_ANALYZER:\u0026#34;{\\\u0026#34;analyzer\\\u0026#34;:{\\\u0026#34;oap_analyzer\\\u0026#34;:{\\\u0026#34;type\\\u0026#34;:\\\u0026#34;stop\\\u0026#34;}}}\u0026#34;}# the oap analyzer.oapLogAnalyzer:${SW_STORAGE_ES_OAP_LOG_ANALYZER:\u0026#34;{\\\u0026#34;analyzer\\\u0026#34;:{\\\u0026#34;oap_log_analyzer\\\u0026#34;:{\\\u0026#34;type\\\u0026#34;:\\\u0026#34;standard\\\u0026#34;}}}\u0026#34;}# the oap log analyzer. It could be customized by the ES analyzer configuration to support more language log formats, such as Chinese log, Japanese log and etc.advanced:${SW_STORAGE_ES_ADVANCED:\u0026#34;\u0026#34;}ElasticSearch With Https SSL Encrypting communications. Example:\nstorage:selector:${SW_STORAGE:elasticsearch}elasticsearch:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}# User needs to be set when Http Basic authentication is enabledpassword:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}# Password to be set when Http Basic authentication is enabledclusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:443}trustStorePath:${SW_SW_STORAGE_ES_SSL_JKS_PATH:\u0026#34;../es_keystore.jks\u0026#34;}trustStorePass:${SW_SW_STORAGE_ES_SSL_JKS_PASS:\u0026#34;\u0026#34;}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;https\u0026#34;}... File at trustStorePath is being monitored. Once it is changed, the ElasticSearch client will reconnect. trustStorePass could be changed in the runtime through Secrets Management File Of ElasticSearch Authentication.  Daily Index Step Daily index step(storage/elasticsearch/dayStep, default 1) represents the index creation period. In this period, metrics for several days (dayStep value) are saved.\nIn most cases, users don\u0026rsquo;t need to change the value manually, as SkyWalking is designed to observe large-scale distributed systems. But in some cases, users may want to set a long TTL value, such as more than 60 days. However, their ElasticSearch cluster may not be powerful enough due to low traffic in the production environment. This value could be increased to 5 (or more) if users could ensure a single index could support the metrics and traces for these days (5 in this case).\nFor example, if dayStep == 11,\n Data in [2000-01-01, 2000-01-11] will be merged into the index-20000101. Data in [2000-01-12, 2000-01-22] will be merged into the index-20000112.  storage/elasticsearch/superDatasetDayStep overrides the storage/elasticsearch/dayStep if the value is positive. This would affect the record-related entities, such as trace segments. In some cases, the size of metrics is much smaller than the record (trace). This would improve the shards balance in the ElasticSearch cluster.\nNOTE: TTL deletion would be affected by these steps. You should set an extra dayStep in your TTL. For example, if you want to have TTL == 30 days and dayStep == 10, you are recommended to set TTL = 40.\nSecrets Management File Of ElasticSearch Authentication The value of secretsManagementFile should point to the secrets management file absolute path. The file includes the username, password, and JKS password of the ElasticSearch server in the properties format.\nuser=xxx password=yyy trustStorePass=zzz The major difference between using user, password, trustStorePass configs in the application.yaml file is that the Secrets Management File is being watched by the OAP server. Once it is changed manually or through a 3rd party tool, such as Vault, the storage provider will use the new username, password, and JKS password to establish the connection and close the old one. If the information exists in the file, the user/password will be overridden.\nAdvanced Configurations For Elasticsearch Index You can add advanced configurations in JSON format to set ElasticSearch index settings by following ElasticSearch doc\nFor example, set translog settings:\nstorage:elasticsearch:# ......advanced:${SW_STORAGE_ES_ADVANCED:\u0026#34;{\\\u0026#34;index.translog.durability\\\u0026#34;:\\\u0026#34;request\\\u0026#34;,\\\u0026#34;index.translog.sync_interval\\\u0026#34;:\\\u0026#34;5s\\\u0026#34;}\u0026#34;}Recommended ElasticSearch server-side configurations You could add the following configuration to elasticsearch.yml, and set the value based on your environment.\n# In tracing scenario, consider to set more than this at least.thread_pool.index.queue_size:1000# Only suitable for ElasticSearch 6thread_pool.write.queue_size:1000# Suitable for ElasticSearch 6 and 7# When you face a query error on the traces page, remember to check this.index.max_result_window:1000000We strongly recommend that you read more about these configurations from ElasticSearch\u0026rsquo;s official documentation since they directly impact the performance of ElasticSearch.\nAbout Namespace When a namespace is set, all index names in ElasticSearch will use it as the prefix.\nMySQL Activate MySQL as storage, and set storage provider to mysql.\nNOTE: MySQL driver is NOT allowed in Apache official distribution and source codes. Please download the MySQL driver on your own. Copy the connection driver jar to oap-libs.\nstorage:selector:${SW_STORAGE:mysql}mysql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:3306/swtest?rewriteBatchedStatements=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root@1234}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password, are found in application.yml. Only part of the settings is listed here. See the HikariCP connection pool document for full settings. To understand the function of the parameter rewriteBatchedStatements=true in MySQL, see the MySQL official document.\nTiDB Tested TiDB Server 4.0.8 version, and MySQL Client driver 8.0.13 version is currently available. Activate TiDB as storage, and set storage provider to tidb.\nstorage:selector:${SW_STORAGE:tidb}tidb:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:4000/swtest?rewriteBatchedStatements=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:\u0026#34;\u0026#34;}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}dataSource.useAffectedRows:${SW_DATA_SOURCE_USE_AFFECTED_ROWS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfArrayColumn:${SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN:20}numOfSearchableValuesPerTag:${SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG:2}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password are found in application.yml. For details on settings, refer to the configuration of MySQL above. To understand the function of the parameter rewriteBatchedStatements=true in TiDB, see the document of TiDB best practices.\nPostgreSQL PostgreSQL JDBC driver uses version 42.3.2. It supports PostgreSQL 8.2 or newer. Activate PostgreSQL as storage, and set storage provider to postgresql.\nstorage:selector:${SW_STORAGE:postgresql}postgresql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:postgresql://localhost:5432/skywalking\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:postgres}dataSource.password:${SW_DATA_SOURCE_PASSWORD:123456}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfArrayColumn:${SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN:20}numOfSearchableValuesPerTag:${SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG:2}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password, are found in application.yml. Only part of the settings is listed here. Please follow HikariCP connection pool document for full settings.\nBanyanDB BanyanDB is a dedicated storage implementation developed by the SkyWalking Team and the community. Activate BanyanDB as the storage, and set storage provider to banyandb.\nstorage:banyandb:host:${SW_STORAGE_BANYANDB_HOST:127.0.0.1}port:${SW_STORAGE_BANYANDB_PORT:17912}maxBulkSize:${SW_STORAGE_BANYANDB_MAX_BULK_SIZE:5000}flushInterval:${SW_STORAGE_BANYANDB_FLUSH_INTERVAL:15}metricsShardsNumber:${SW_STORAGE_BANYANDB_METRICS_SHARDS_NUMBER:1}recordShardsNumber:${SW_STORAGE_BANYANDB_RECORD_SHARDS_NUMBER:1}superDatasetShardsFactor:${SW_STORAGE_BANYANDB_SUPERDATASET_SHARDS_FACTOR:2}concurrentWriteThreads:${SW_STORAGE_BANYANDB_CONCURRENT_WRITE_THREADS:15}profileTaskQueryMaxSize:${SW_STORAGE_BANYANDB_PROFILE_TASK_QUERY_MAX_SIZE:200}# the max number of fetch task in a requestFor more details, please refer to the documents of BanyanDB and BanyanDB Java Client subprojects.\nMore storage extension solutions Follow the Storage extension development guide in the Project Extensions document.\n","title":"Backend storage","url":"/docs/main/v9.1.0/en/setup/backend/backend-storage/"},{"content":"Backend storage The SkyWalking storage is pluggable. We have provided the following storage solutions, which allow you to easily use one of them by specifying it as the selector in application.yml:\nstorage:selector:${SW_STORAGE:elasticsearch}Natively supported storage:\n H2 OpenSearch ElasticSearch 6, 7, 8 MySQL TiDB PostgreSQL BanyanDB  H2 Activate H2 as storage, set storage provider to H2 In-Memory Databases. Default in the distribution package. Please read Database URL Overview in H2 official document. You can set the target to H2 in Embedded, Server and Mixed modes.\nSetting fragment example\nstorage:selector:${SW_STORAGE:h2}h2:driver:org.h2.jdbcx.JdbcDataSourceurl:jdbc:h2:mem:skywalking-oap-dbuser:samaxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:100}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:1}OpenSearch OpenSearch is a fork from ElasticSearch 7.11 but licensed in Apache 2.0. OpenSearch storage shares the same configurations as ElasticSearch. In order to activate OpenSearch as storage, set the storage provider to elasticsearch.\nElasticSearch NOTE: Elastic announced through their blog that Elasticsearch will be moving over to a Server Side Public License (SSPL), which is incompatible with Apache License 2.0. This license change is effective from Elasticsearch version 7.11. So please choose the suitable ElasticSearch version according to your usage. If you have concerns about SSPL, choose the versions before 7.11 or switch to OpenSearch.\nSince 9.2.0, SkyWalking provides no-sharding/one-index mode to merge all metrics/meter and records(without super datasets) indices into one physical index template metrics-all and records-all on the default setting. In the current one index mode, users still could choose to adjust ElasticSearch\u0026rsquo;s shard number(SW_STORAGE_ES_INDEX_SHARDS_NUMBER) to scale out. After merge all indices, the following indices are available:\n sw_ui_template sw_metrics-all-${day-format} sw_log-${day-format} sw_segment-${day-format} sw_browser_error_log-${day-format} sw_zipkin_span-${day-format} sw_records-all-${day-format}   Provide system environment variable(SW_STORAGE_ES_LOGIC_SHARDING). Set it to true could shard metrics indices into multi-physical indices as same as the versions(one index template per metric/meter aggregation function) before 9.2.0.\n Since 8.8.0, SkyWalking rebuilds the ElasticSearch client on top of ElasticSearch REST API and automatically picks up correct request formats according to the server-side version, hence you don\u0026rsquo;t need to download different binaries and don\u0026rsquo;t need to configure different storage selectors for different ElasticSearch server-side versions anymore.\nFor now, SkyWalking supports ElasticSearch 6.x, ElasticSearch 7.x, ElasticSearch 8.x, and OpenSearch 1.x, their configurations are as follows:\nstorage:selector:${SW_STORAGE:elasticsearch}elasticsearch:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}clusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:9200}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;http\u0026#34;}trustStorePath:${SW_STORAGE_ES_SSL_JKS_PATH:\u0026#34;\u0026#34;}trustStorePass:${SW_STORAGE_ES_SSL_JKS_PASS:\u0026#34;\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}password:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}secretsManagementFile:${SW_ES_SECRETS_MANAGEMENT_FILE:\u0026#34;\u0026#34;}# Secrets management file in the properties format includes the username, password, which are managed by 3rd party tool.dayStep:${SW_STORAGE_DAY_STEP:1}# Represent the number of days in the one minute/hour/day index.indexShardsNumber:${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:1}# Shard number of new indexesindexReplicasNumber:${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:1}# Replicas number of new indexes# Super data set has been defined in the codes, such as trace segments.The following 3 config would be improve es performance when storage super size data in es.superDatasetDayStep:${SW_SUPERDATASET_STORAGE_DAY_STEP:-1}# Represent the number of days in the super size dataset record index, the default value is the same as dayStep when the value is less than 0superDatasetIndexShardsFactor:${SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR:5}# This factor provides more shards for the super data set, shards number = indexShardsNumber * superDatasetIndexShardsFactor. Also, this factor effects Zipkin and Jaeger traces.superDatasetIndexReplicasNumber:${SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER:0}# Represent the replicas number in the super size dataset record index, the default value is 0.indexTemplateOrder:${SW_STORAGE_ES_INDEX_TEMPLATE_ORDER:0}# the order of index templatebulkActions:${SW_STORAGE_ES_BULK_ACTIONS:1000}# Execute the async bulk record data every ${SW_STORAGE_ES_BULK_ACTIONS} requestsflushInterval:${SW_STORAGE_ES_FLUSH_INTERVAL:10}# flush the bulk every 10 seconds whatever the number of requestsconcurrentRequests:${SW_STORAGE_ES_CONCURRENT_REQUESTS:2}# the number of concurrent requestsresultWindowMaxSize:${SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE:10000}metadataQueryMaxSize:${SW_STORAGE_ES_QUERY_MAX_SIZE:5000}segmentQueryMaxSize:${SW_STORAGE_ES_QUERY_SEGMENT_SIZE:200}profileTaskQueryMaxSize:${SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE:200}profileDataQueryScrollBatchSize:${SW_STORAGE_ES_QUERY_PROFILE_DATA_SCROLLING_BATCH_SIZE:100}oapAnalyzer:${SW_STORAGE_ES_OAP_ANALYZER:\u0026#34;{\\\u0026#34;analyzer\\\u0026#34;:{\\\u0026#34;oap_analyzer\\\u0026#34;:{\\\u0026#34;type\\\u0026#34;:\\\u0026#34;stop\\\u0026#34;}}}\u0026#34;}# the oap analyzer.oapLogAnalyzer:${SW_STORAGE_ES_OAP_LOG_ANALYZER:\u0026#34;{\\\u0026#34;analyzer\\\u0026#34;:{\\\u0026#34;oap_log_analyzer\\\u0026#34;:{\\\u0026#34;type\\\u0026#34;:\\\u0026#34;standard\\\u0026#34;}}}\u0026#34;}# the oap log analyzer. It could be customized by the ES analyzer configuration to support more language log formats, such as Chinese log, Japanese log and etc.advanced:${SW_STORAGE_ES_ADVANCED:\u0026#34;\u0026#34;}logicSharding:${SW_STORAGE_ES_LOGIC_SHARDING:false}ElasticSearch With Https SSL Encrypting communications. Example:\nstorage:selector:${SW_STORAGE:elasticsearch}elasticsearch:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}# User needs to be set when Http Basic authentication is enabledpassword:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}# Password to be set when Http Basic authentication is enabledclusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:443}trustStorePath:${SW_SW_STORAGE_ES_SSL_JKS_PATH:\u0026#34;../es_keystore.jks\u0026#34;}trustStorePass:${SW_SW_STORAGE_ES_SSL_JKS_PASS:\u0026#34;\u0026#34;}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;https\u0026#34;}... File at trustStorePath is being monitored. Once it is changed, the ElasticSearch client will reconnect. trustStorePass could be changed in the runtime through Secrets Management File Of ElasticSearch Authentication.  Daily Index Step Daily index step(storage/elasticsearch/dayStep, default 1) represents the index creation period. In this period, metrics for several days (dayStep value) are saved.\nIn most cases, users don\u0026rsquo;t need to change the value manually, as SkyWalking is designed to observe large-scale distributed systems. But in some cases, users may want to set a long TTL value, such as more than 60 days. However, their ElasticSearch cluster may not be powerful enough due to low traffic in the production environment. This value could be increased to 5 (or more) if users could ensure a single index could support the metrics and traces for these days (5 in this case).\nFor example, if dayStep == 11,\n Data in [2000-01-01, 2000-01-11] will be merged into the index-20000101. Data in [2000-01-12, 2000-01-22] will be merged into the index-20000112.  storage/elasticsearch/superDatasetDayStep overrides the storage/elasticsearch/dayStep if the value is positive. This would affect the record-related entities, such as trace segments. In some cases, the size of metrics is much smaller than the record (trace). This would improve the shards balance in the ElasticSearch cluster.\nNOTE: TTL deletion would be affected by these steps. You should set an extra dayStep in your TTL. For example, if you want to have TTL == 30 days and dayStep == 10, you are recommended to set TTL = 40.\nSecrets Management File Of ElasticSearch Authentication The value of secretsManagementFile should point to the secrets management file absolute path. The file includes the username, password, and JKS password of the ElasticSearch server in the properties format.\nuser=xxx password=yyy trustStorePass=zzz The major difference between using user, password, trustStorePass configs in the application.yaml file is that the Secrets Management File is being watched by the OAP server. Once it is changed manually or through a 3rd party tool, such as Vault, the storage provider will use the new username, password, and JKS password to establish the connection and close the old one. If the information exists in the file, the user/password will be overridden.\nAdvanced Configurations For Elasticsearch Index You can add advanced configurations in JSON format to set ElasticSearch index settings by following ElasticSearch doc\nFor example, set translog settings:\nstorage:elasticsearch:# ......advanced:${SW_STORAGE_ES_ADVANCED:\u0026#34;{\\\u0026#34;index.translog.durability\\\u0026#34;:\\\u0026#34;request\\\u0026#34;,\\\u0026#34;index.translog.sync_interval\\\u0026#34;:\\\u0026#34;5s\\\u0026#34;}\u0026#34;}Recommended ElasticSearch server-side configurations You could add the following configuration to elasticsearch.yml, and set the value based on your environment.\n# In tracing scenario, consider to set more than this at least.thread_pool.index.queue_size:1000# Only suitable for ElasticSearch 6thread_pool.write.queue_size:1000# Suitable for ElasticSearch 6 and 7# When you face a query error on the traces page, remember to check this.index.max_result_window:1000000We strongly recommend that you read more about these configurations from ElasticSearch\u0026rsquo;s official documentation since they directly impact the performance of ElasticSearch.\nAbout Namespace When a namespace is set, all index names in ElasticSearch will use it as the prefix.\nMySQL Activate MySQL as storage, and set storage provider to mysql.\nNOTE: MySQL driver is NOT allowed in Apache official distribution and source codes. Please download the MySQL driver on your own. Copy the connection driver jar to oap-libs.\nstorage:selector:${SW_STORAGE:mysql}mysql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:3306/swtest?rewriteBatchedStatements=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root@1234}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password, are found in application.yml. Only part of the settings is listed here. See the HikariCP connection pool document for full settings. To understand the function of the parameter rewriteBatchedStatements=true in MySQL, see the MySQL official document.\nTiDB Tested TiDB Server 4.0.8 version, and MySQL Client driver 8.0.13 version is currently available. Activate TiDB as storage, and set storage provider to tidb.\nstorage:selector:${SW_STORAGE:tidb}tidb:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:4000/swtest?rewriteBatchedStatements=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:\u0026#34;\u0026#34;}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}dataSource.useAffectedRows:${SW_DATA_SOURCE_USE_AFFECTED_ROWS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfArrayColumn:${SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN:20}numOfSearchableValuesPerTag:${SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG:2}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password are found in application.yml. For details on settings, refer to the configuration of MySQL above. To understand the function of the parameter rewriteBatchedStatements=true in TiDB, see the document of TiDB best practices.\nPostgreSQL PostgreSQL JDBC driver uses version 42.3.2. It supports PostgreSQL 8.2 or newer. Activate PostgreSQL as storage, and set storage provider to postgresql.\nstorage:selector:${SW_STORAGE:postgresql}postgresql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:postgresql://localhost:5432/skywalking\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:postgres}dataSource.password:${SW_DATA_SOURCE_PASSWORD:123456}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfArrayColumn:${SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN:20}numOfSearchableValuesPerTag:${SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG:2}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password, are found in application.yml. Only part of the settings is listed here. Please follow HikariCP connection pool document for full settings.\nBanyanDB BanyanDB is a dedicated storage implementation developed by the SkyWalking Team and the community. Activate BanyanDB as the storage, and set storage provider to banyandb.\nstorage:banyandb:host:${SW_STORAGE_BANYANDB_HOST:127.0.0.1}port:${SW_STORAGE_BANYANDB_PORT:17912}maxBulkSize:${SW_STORAGE_BANYANDB_MAX_BULK_SIZE:5000}flushInterval:${SW_STORAGE_BANYANDB_FLUSH_INTERVAL:15}metricsShardsNumber:${SW_STORAGE_BANYANDB_METRICS_SHARDS_NUMBER:1}recordShardsNumber:${SW_STORAGE_BANYANDB_RECORD_SHARDS_NUMBER:1}superDatasetShardsFactor:${SW_STORAGE_BANYANDB_SUPERDATASET_SHARDS_FACTOR:2}concurrentWriteThreads:${SW_STORAGE_BANYANDB_CONCURRENT_WRITE_THREADS:15}profileTaskQueryMaxSize:${SW_STORAGE_BANYANDB_PROFILE_TASK_QUERY_MAX_SIZE:200}# the max number of fetch task in a requestFor more details, please refer to the documents of BanyanDB and BanyanDB Java Client subprojects.\nMore storage extension solutions Follow the Storage extension development guide in the Project Extensions document.\n","title":"Backend storage","url":"/docs/main/v9.2.0/en/setup/backend/backend-storage/"},{"content":"Backend storage The SkyWalking storage is pluggable. We have provided the following storage solutions, which allow you to easily use one of them by specifying it as the selector in application.yml:\nstorage:selector:${SW_STORAGE:elasticsearch}Natively supported storage:\n H2 OpenSearch ElasticSearch 6, 7, 8 MySQL MySQL-Sharding(Shardingsphere-Proxy 5.1.2) TiDB PostgreSQL BanyanDB  H2 Activate H2 as storage, set storage provider to H2 In-Memory Databases. Default in the distribution package. Please read Database URL Overview in H2 official document. You can set the target to H2 in Embedded, Server and Mixed modes.\nSetting fragment example\nstorage:selector:${SW_STORAGE:h2}h2:driver:org.h2.jdbcx.JdbcDataSourceurl:jdbc:h2:mem:skywalking-oap-dbuser:samaxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:100}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:1}OpenSearch OpenSearch is a fork from ElasticSearch 7.11 but licensed in Apache 2.0. OpenSearch storage shares the same configurations as ElasticSearch. In order to activate OpenSearch as storage, set the storage provider to elasticsearch.\nWe support and tested the following versions of OpenSearch:\n 1.1.0, 1.3.6 2.4.0  ElasticSearch NOTE: Elastic announced through their blog that Elasticsearch will be moving over to a Server Side Public License (SSPL), which is incompatible with Apache License 2.0. This license change is effective from Elasticsearch version 7.11. So please choose the suitable ElasticSearch version according to your usage. If you have concerns about SSPL, choose the versions before 7.11 or switch to OpenSearch.\nBy default, SkyWalking uses following indices for various telemetry data.\n sw_ui_template (UI dashboard settings) sw_metrics-all-${day-format} (All metrics/meters generated through MAL and OAL engines, and metadata of service/instance/endpoint) sw_log-${day-format} (Collected logs, exclude browser logs) sw_segment-${day-format} (Native trace segments) sw_browser_error_log-${day-format} (Collected browser logs) sw_zipkin_span-${day-format} (Zipkin trace spans) sw_records-all-${day-format} (All sampled records, e.g. slow SQLs, agent profiling, and ebpf profiling)  SkyWalking rebuilds the ElasticSearch client on top of ElasticSearch REST API and automatically picks up correct request formats according to the server-side version, hence you don\u0026rsquo;t need to download different binaries and don\u0026rsquo;t need to configure different storage selectors for different ElasticSearch server-side versions anymore.\nFor now, SkyWalking supports ElasticSearch 6.x, ElasticSearch 7.x, ElasticSearch 8.x, and OpenSearch 1.x, their configurations are as follows:\nstorage:selector:${SW_STORAGE:elasticsearch}elasticsearch:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}clusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:9200}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;http\u0026#34;}trustStorePath:${SW_STORAGE_ES_SSL_JKS_PATH:\u0026#34;\u0026#34;}trustStorePass:${SW_STORAGE_ES_SSL_JKS_PASS:\u0026#34;\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}password:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}secretsManagementFile:${SW_ES_SECRETS_MANAGEMENT_FILE:\u0026#34;\u0026#34;}# Secrets management file in the properties format includes the username, password, which are managed by 3rd party tool.dayStep:${SW_STORAGE_DAY_STEP:1}# Represent the number of days in the one minute/hour/day index.indexShardsNumber:${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:1}# Shard number of new indexesindexReplicasNumber:${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:1}# Replicas number of new indexes# Specify the settings for each index individually.# If configured, this setting has the highest priority and overrides the generic settings.specificIndexSettings:${SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS:\u0026#34;\u0026#34;}# Super data set has been defined in the codes, such as trace segments.The following 3 config would be improve es performance when storage super size data in es.superDatasetDayStep:${SW_STORAGE_ES_SUPER_DATASET_DAY_STEP:-1}# Represent the number of days in the super size dataset record index, the default value is the same as dayStep when the value is less than 0superDatasetIndexShardsFactor:${SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR:5}# This factor provides more shards for the super data set, shards number = indexShardsNumber * superDatasetIndexShardsFactor. Also, this factor effects Zipkin traces.superDatasetIndexReplicasNumber:${SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER:0}# Represent the replicas number in the super size dataset record index, the default value is 0.indexTemplateOrder:${SW_STORAGE_ES_INDEX_TEMPLATE_ORDER:0}# the order of index templatebulkActions:${SW_STORAGE_ES_BULK_ACTIONS:1000}# Execute the async bulk record data every ${SW_STORAGE_ES_BULK_ACTIONS} requestsflushInterval:${SW_STORAGE_ES_FLUSH_INTERVAL:10}# flush the bulk every 10 seconds whatever the number of requestsconcurrentRequests:${SW_STORAGE_ES_CONCURRENT_REQUESTS:2}# the number of concurrent requestsresultWindowMaxSize:${SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE:10000}metadataQueryMaxSize:${SW_STORAGE_ES_QUERY_MAX_SIZE:5000}segmentQueryMaxSize:${SW_STORAGE_ES_QUERY_SEGMENT_SIZE:200}profileTaskQueryMaxSize:${SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE:200}profileDataQueryScrollBatchSize:${SW_STORAGE_ES_QUERY_PROFILE_DATA_SCROLLING_BATCH_SIZE:100}oapAnalyzer:${SW_STORAGE_ES_OAP_ANALYZER:\u0026#34;{\\\u0026#34;analyzer\\\u0026#34;:{\\\u0026#34;oap_analyzer\\\u0026#34;:{\\\u0026#34;type\\\u0026#34;:\\\u0026#34;stop\\\u0026#34;}}}\u0026#34;}# the oap analyzer.oapLogAnalyzer:${SW_STORAGE_ES_OAP_LOG_ANALYZER:\u0026#34;{\\\u0026#34;analyzer\\\u0026#34;:{\\\u0026#34;oap_log_analyzer\\\u0026#34;:{\\\u0026#34;type\\\u0026#34;:\\\u0026#34;standard\\\u0026#34;}}}\u0026#34;}# the oap log analyzer. It could be customized by the ES analyzer configuration to support more language log formats, such as Chinese log, Japanese log and etc.advanced:${SW_STORAGE_ES_ADVANCED:\u0026#34;\u0026#34;}# Set it to `true` could shard metrics indices into multi-physical indices# as same as the versions(one index template per metric/meter aggregation function) before 9.2.0.logicSharding:${SW_STORAGE_ES_LOGIC_SHARDING:false}ElasticSearch With Https SSL Encrypting communications. Example:\nstorage:selector:${SW_STORAGE:elasticsearch}elasticsearch:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}# User needs to be set when Http Basic authentication is enabledpassword:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}# Password to be set when Http Basic authentication is enabledclusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:443}trustStorePath:${SW_SW_STORAGE_ES_SSL_JKS_PATH:\u0026#34;../es_keystore.jks\u0026#34;}trustStorePass:${SW_SW_STORAGE_ES_SSL_JKS_PASS:\u0026#34;\u0026#34;}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;https\u0026#34;}... File at trustStorePath is being monitored. Once it is changed, the ElasticSearch client will reconnect. trustStorePass could be changed in the runtime through Secrets Management File Of ElasticSearch Authentication.  Daily Index Step Daily index step(storage/elasticsearch/dayStep, default 1) represents the index creation period. In this period, metrics for several days (dayStep value) are saved.\nIn most cases, users don\u0026rsquo;t need to change the value manually, as SkyWalking is designed to observe large-scale distributed systems. But in some cases, users may want to set a long TTL value, such as more than 60 days. However, their ElasticSearch cluster may not be powerful enough due to low traffic in the production environment. This value could be increased to 5 (or more) if users could ensure a single index could support the metrics and traces for these days (5 in this case).\nFor example, if dayStep == 11,\n Data in [2000-01-01, 2000-01-11] will be merged into the index-20000101. Data in [2000-01-12, 2000-01-22] will be merged into the index-20000112.  storage/elasticsearch/superDatasetDayStep overrides the storage/elasticsearch/dayStep if the value is positive. This would affect the record-related entities, such as trace segments. In some cases, the size of metrics is much smaller than the record (trace). This would improve the shards balance in the ElasticSearch cluster.\nNOTE: TTL deletion would be affected by these steps. You should set an extra dayStep in your TTL. For example, if you want to have TTL == 30 days and dayStep == 10, you are recommended to set TTL = 40.\nSecrets Management File Of ElasticSearch Authentication The value of secretsManagementFile should point to the secrets management file absolute path. The file includes the username, password, and JKS password of the ElasticSearch server in the properties format.\nuser=xxx password=yyy trustStorePass=zzz The major difference between using user, password, trustStorePass configs in the application.yaml file is that the Secrets Management File is being watched by the OAP server. Once it is changed manually or through a 3rd party tool, such as Vault, the storage provider will use the new username, password, and JKS password to establish the connection and close the old one. If the information exists in the file, the user/password will be overridden.\nIndex Settings The following settings control the number of shards and replicas for new and existing index templates. The update only got applied after OAP reboots.\nstorage:elasticsearch:# ......indexShardsNumber:${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:1}indexReplicasNumber:${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:1}specificIndexSettings:${SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS:\u0026#34;\u0026#34;}superDatasetIndexShardsFactor:${SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR:5}superDatasetIndexReplicasNumber:${SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER:0}The following table shows the relationship between those config items and Elasticsearch index number_of_shards/number_of_replicas. And also you can specify the settings for each index individually.\n   index number_of_shards number_of_replicas     sw_ui_template indexShardsNumber indexReplicasNumber   sw_metrics-all-${day-format} indexShardsNumber indexReplicasNumber   sw_log-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_segment-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_browser_error_log-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_zipkin_span-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_records-all-${day-format} indexShardsNumber indexReplicasNumber    Advanced Configurations For Elasticsearch Index You can add advanced configurations in JSON format to set ElasticSearch index settings by following ElasticSearch doc\nFor example, set translog settings:\nstorage:elasticsearch:# ......advanced:${SW_STORAGE_ES_ADVANCED:\u0026#34;{\\\u0026#34;index.translog.durability\\\u0026#34;:\\\u0026#34;request\\\u0026#34;,\\\u0026#34;index.translog.sync_interval\\\u0026#34;:\\\u0026#34;5s\\\u0026#34;}\u0026#34;}Specify Settings For Each Elasticsearch Index Individually You can specify the settings for one or more indexes individually by using SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS.\nNOTE: Supported settings:\n number_of_shards number_of_replicas  NOTE: These settings have the highest priority and will override the existing generic settings mentioned in index settings doc.\nThe settings are in JSON format. The index name here is logic entity name, which should exclude the ${SW_NAMESPACE} which is sw by default, e.g.\n{ \u0026#34;metrics-all\u0026#34;:{ \u0026#34;number_of_shards\u0026#34;:\u0026#34;3\u0026#34;, \u0026#34;number_of_replicas\u0026#34;:\u0026#34;2\u0026#34; }, \u0026#34;segment\u0026#34;:{ \u0026#34;number_of_shards\u0026#34;:\u0026#34;6\u0026#34;, \u0026#34;number_of_replicas\u0026#34;:\u0026#34;1\u0026#34; } } This configuration in the YAML file is like this,\nstorage:elasticsearch:# ......specificIndexSettings:${SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS:\u0026#34;{\\\u0026#34;metrics-all\\\u0026#34;:{\\\u0026#34;number_of_shards\\\u0026#34;:\\\u0026#34;3\\\u0026#34;,\\\u0026#34;number_of_replicas\\\u0026#34;:\\\u0026#34;2\\\u0026#34;},\\\u0026#34;segment\\\u0026#34;:{\\\u0026#34;number_of_shards\\\u0026#34;:\\\u0026#34;6\\\u0026#34;,\\\u0026#34;number_of_replicas\\\u0026#34;:\\\u0026#34;1\\\u0026#34;}}\u0026#34;}Recommended ElasticSearch server-side configurations You could add the following configuration to elasticsearch.yml, and set the value based on your environment.\n# In tracing scenario, consider to set more than this at least.thread_pool.index.queue_size:1000# Only suitable for ElasticSearch 6thread_pool.write.queue_size:1000# Suitable for ElasticSearch 6 and 7# When you face a query error on the traces page, remember to check this.index.max_result_window:1000000We strongly recommend that you read more about these configurations from ElasticSearch\u0026rsquo;s official documentation since they directly impact the performance of ElasticSearch.\nAbout Namespace When a namespace is set, all index names in ElasticSearch will use it as the prefix.\nMySQL Activate MySQL as storage, and set storage provider to mysql.\nNOTE: MySQL driver is NOT allowed in Apache official distribution and source codes. Please download the MySQL driver on your own. Copy the connection driver jar to oap-libs.\nstorage:selector:${SW_STORAGE:mysql}mysql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:3306/swtest?rewriteBatchedStatements=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root@1234}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password, are found in application.yml. Only part of the settings is listed here. See the HikariCP connection pool document for full settings. To understand the function of the parameter rewriteBatchedStatements=true in MySQL, see the MySQL official document.\nMySQL-Sharding MySQL-Sharding plugin provides the MySQL database sharding and table sharding, this feature leverage Shardingsphere-Proxy to manage the JDBC between OAP and multi-database instances, and according to the sharding rules do routing to the database and table sharding.\nTested Shardingsphere-Proxy 5.1.2 version, and MySQL Client driver 8.0.13 version is currently available. Activate MySQL and Shardingsphere-Proxy as storage, and set storage provider to mysql-sharding.\nNOTE: MySQL driver is NOT allowed in Apache official distribution and source codes. Please download the MySQL driver on your own. Copy the connection driver jar to oap-libs.\nstorage:selector:${SW_STORAGE:mysql-sharding}mysql-sharding:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:13307/swtest?rewriteBatchedStatements=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}# The dataSources are configured in ShardingSphere-Proxy config-sharding.yaml# The dataSource name should include the prefix \u0026#34;ds_\u0026#34; and separated by \u0026#34;,\u0026#34;dataSources:${SW_JDBC_SHARDING_DATA_SOURCES:ds_0,ds_1}TiDB Tested TiDB Server 4.0.8 version, and MySQL Client driver 8.0.13 version is currently available. Activate TiDB as storage, and set storage provider to tidb.\nstorage:selector:${SW_STORAGE:tidb}tidb:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:4000/swtest?rewriteBatchedStatements=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:\u0026#34;\u0026#34;}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}dataSource.useAffectedRows:${SW_DATA_SOURCE_USE_AFFECTED_ROWS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfArrayColumn:${SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN:20}numOfSearchableValuesPerTag:${SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG:2}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password are found in application.yml. For details on settings, refer to the configuration of MySQL above. To understand the function of the parameter rewriteBatchedStatements=true in TiDB, see the document of TiDB best practices.\nPostgreSQL PostgreSQL JDBC driver uses version 42.3.2. It supports PostgreSQL 8.2 or newer. Activate PostgreSQL as storage, and set storage provider to postgresql.\nstorage:selector:${SW_STORAGE:postgresql}postgresql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:postgresql://localhost:5432/skywalking\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:postgres}dataSource.password:${SW_DATA_SOURCE_PASSWORD:123456}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfArrayColumn:${SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN:20}numOfSearchableValuesPerTag:${SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG:2}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password, are found in application.yml. Only part of the settings is listed here. Please follow HikariCP connection pool document for full settings.\nBanyanDB BanyanDB is a dedicated storage implementation developed by the SkyWalking Team and the community. Activate BanyanDB as the storage, and set storage provider to banyandb.\nstorage:banyandb:host:${SW_STORAGE_BANYANDB_HOST:127.0.0.1}port:${SW_STORAGE_BANYANDB_PORT:17912}maxBulkSize:${SW_STORAGE_BANYANDB_MAX_BULK_SIZE:5000}flushInterval:${SW_STORAGE_BANYANDB_FLUSH_INTERVAL:15}metricsShardsNumber:${SW_STORAGE_BANYANDB_METRICS_SHARDS_NUMBER:1}recordShardsNumber:${SW_STORAGE_BANYANDB_RECORD_SHARDS_NUMBER:1}superDatasetShardsFactor:${SW_STORAGE_BANYANDB_SUPERDATASET_SHARDS_FACTOR:2}concurrentWriteThreads:${SW_STORAGE_BANYANDB_CONCURRENT_WRITE_THREADS:15}profileTaskQueryMaxSize:${SW_STORAGE_BANYANDB_PROFILE_TASK_QUERY_MAX_SIZE:200}# the max number of fetch task in a requeststreamBlockInterval:${SW_STORAGE_BANYANDB_STREAM_BLOCK_INTERVAL:4}# Unit is hourstreamSegmentInterval:${SW_STORAGE_BANYANDB_STREAM_SEGMENT_INTERVAL:24}# Unit is hourmeasureBlockInterval:${SW_STORAGE_BANYANDB_MEASURE_BLOCK_INTERVAL:4}# Unit is hourmeasureSegmentInterval:${SW_STORAGE_BANYANDB_MEASURE_SEGMENT_INTERVAL:24}# Unit is hourFor more details, please refer to the documents of BanyanDB and BanyanDB Java Client subprojects.\nMore storage extension solutions Follow the Storage extension development guide in the Project Extensions document.\n","title":"Backend storage","url":"/docs/main/v9.3.0/en/setup/backend/backend-storage/"},{"content":"Backend storage The SkyWalking storage is pluggable. We have provided the following storage solutions, which allow you to easily use one of them by specifying it as the selector in application.yml:\nstorage:selector:${SW_STORAGE:elasticsearch}Natively supported storage:\n H2 OpenSearch ElasticSearch 6, 7, 8 MySQL MySQL-Sharding(Shardingsphere-Proxy 5.3.1) TiDB PostgreSQL BanyanDB  H2 Activate H2 as storage, set storage provider to H2 In-Memory Databases. Default in the distribution package. Please read Database URL Overview in H2 official document. You can set the target to H2 in Embedded, Server and Mixed modes.\nSetting fragment example\nstorage:selector:${SW_STORAGE:h2}h2:driver:org.h2.jdbcx.JdbcDataSourceurl:jdbc:h2:mem:skywalking-oap-dbuser:samaxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:100}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:1}OpenSearch OpenSearch is a fork from ElasticSearch 7.11 but licensed in Apache 2.0. OpenSearch storage shares the same configurations as ElasticSearch. In order to activate OpenSearch as storage, set the storage provider to elasticsearch.\nWe support and tested the following versions of OpenSearch:\n 1.1.0, 1.3.6 2.4.0  ElasticSearch NOTE: Elastic announced through their blog that Elasticsearch will be moving over to a Server Side Public License (SSPL), which is incompatible with Apache License 2.0. This license change is effective from Elasticsearch version 7.11. So please choose the suitable ElasticSearch version according to your usage. If you have concerns about SSPL, choose the versions before 7.11 or switch to OpenSearch.\nBy default, SkyWalking uses following indices for various telemetry data.\n sw_ui_template (UI dashboard settings) sw_metrics-all-${day-format} (All metrics/meters generated through MAL and OAL engines, and metadata of service/instance/endpoint) sw_log-${day-format} (Collected logs, exclude browser logs) sw_segment-${day-format} (Native trace segments) sw_browser_error_log-${day-format} (Collected browser logs) sw_zipkin_span-${day-format} (Zipkin trace spans) sw_records-all-${day-format} (All sampled records, e.g. slow SQLs, agent profiling, and ebpf profiling)  SkyWalking rebuilds the ElasticSearch client on top of ElasticSearch REST API and automatically picks up correct request formats according to the server-side version, hence you don\u0026rsquo;t need to download different binaries and don\u0026rsquo;t need to configure different storage selectors for different ElasticSearch server-side versions anymore.\nFor now, SkyWalking supports ElasticSearch 6.x, ElasticSearch 7.x, ElasticSearch 8.x, and OpenSearch 1.x, their configurations are as follows:\nstorage:selector:${SW_STORAGE:elasticsearch}elasticsearch:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}clusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:9200}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;http\u0026#34;}trustStorePath:${SW_STORAGE_ES_SSL_JKS_PATH:\u0026#34;\u0026#34;}trustStorePass:${SW_STORAGE_ES_SSL_JKS_PASS:\u0026#34;\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}password:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}secretsManagementFile:${SW_ES_SECRETS_MANAGEMENT_FILE:\u0026#34;\u0026#34;}# Secrets management file in the properties format includes the username, password, which are managed by 3rd party tool.dayStep:${SW_STORAGE_DAY_STEP:1}# Represent the number of days in the one minute/hour/day index.indexShardsNumber:${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:1}# Shard number of new indexesindexReplicasNumber:${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:1}# Replicas number of new indexes# Specify the settings for each index individually.# If configured, this setting has the highest priority and overrides the generic settings.specificIndexSettings:${SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS:\u0026#34;\u0026#34;}# Super data set has been defined in the codes, such as trace segments.The following 3 config would be improve es performance when storage super size data in es.superDatasetDayStep:${SW_STORAGE_ES_SUPER_DATASET_DAY_STEP:-1}# Represent the number of days in the super size dataset record index, the default value is the same as dayStep when the value is less than 0superDatasetIndexShardsFactor:${SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR:5}# This factor provides more shards for the super data set, shards number = indexShardsNumber * superDatasetIndexShardsFactor. Also, this factor effects Zipkin traces.superDatasetIndexReplicasNumber:${SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER:0}# Represent the replicas number in the super size dataset record index, the default value is 0.indexTemplateOrder:${SW_STORAGE_ES_INDEX_TEMPLATE_ORDER:0}# the order of index templatebulkActions:${SW_STORAGE_ES_BULK_ACTIONS:1000}# Execute the async bulk record data every ${SW_STORAGE_ES_BULK_ACTIONS} requestsflushInterval:${SW_STORAGE_ES_FLUSH_INTERVAL:10}# flush the bulk every 10 seconds whatever the number of requestsconcurrentRequests:${SW_STORAGE_ES_CONCURRENT_REQUESTS:2}# the number of concurrent requestsresultWindowMaxSize:${SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE:10000}metadataQueryMaxSize:${SW_STORAGE_ES_QUERY_MAX_SIZE:5000}segmentQueryMaxSize:${SW_STORAGE_ES_QUERY_SEGMENT_SIZE:200}profileTaskQueryMaxSize:${SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE:200}profileDataQueryScrollBatchSize:${SW_STORAGE_ES_QUERY_PROFILE_DATA_SCROLLING_BATCH_SIZE:100}oapAnalyzer:${SW_STORAGE_ES_OAP_ANALYZER:\u0026#34;{\\\u0026#34;analyzer\\\u0026#34;:{\\\u0026#34;oap_analyzer\\\u0026#34;:{\\\u0026#34;type\\\u0026#34;:\\\u0026#34;stop\\\u0026#34;}}}\u0026#34;}# the oap analyzer.oapLogAnalyzer:${SW_STORAGE_ES_OAP_LOG_ANALYZER:\u0026#34;{\\\u0026#34;analyzer\\\u0026#34;:{\\\u0026#34;oap_log_analyzer\\\u0026#34;:{\\\u0026#34;type\\\u0026#34;:\\\u0026#34;standard\\\u0026#34;}}}\u0026#34;}# the oap log analyzer. It could be customized by the ES analyzer configuration to support more language log formats, such as Chinese log, Japanese log and etc.advanced:${SW_STORAGE_ES_ADVANCED:\u0026#34;\u0026#34;}# Set it to `true` could shard metrics indices into multi-physical indices# as same as the versions(one index template per metric/meter aggregation function) before 9.2.0.logicSharding:${SW_STORAGE_ES_LOGIC_SHARDING:false}# Custom routing can reduce the impact of searches. Instead of having to fan out a search request to all the shards in an index, the request can be sent to just the shard that matches the specific routing value (or values).enableCustomRouting:${SW_STORAGE_ES_ENABLE_CUSTOM_ROUTING:false}ElasticSearch With Https SSL Encrypting communications. Example:\nstorage:selector:${SW_STORAGE:elasticsearch}elasticsearch:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}# User needs to be set when Http Basic authentication is enabledpassword:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}# Password to be set when Http Basic authentication is enabledclusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:443}trustStorePath:${SW_SW_STORAGE_ES_SSL_JKS_PATH:\u0026#34;../es_keystore.jks\u0026#34;}trustStorePass:${SW_SW_STORAGE_ES_SSL_JKS_PASS:\u0026#34;\u0026#34;}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;https\u0026#34;}... File at trustStorePath is being monitored. Once it is changed, the ElasticSearch client will reconnect. trustStorePass could be changed in the runtime through Secrets Management File Of ElasticSearch Authentication.  Daily Index Step Daily index step(storage/elasticsearch/dayStep, default 1) represents the index creation period. In this period, metrics for several days (dayStep value) are saved.\nIn most cases, users don\u0026rsquo;t need to change the value manually, as SkyWalking is designed to observe large-scale distributed systems. But in some cases, users may want to set a long TTL value, such as more than 60 days. However, their ElasticSearch cluster may not be powerful enough due to low traffic in the production environment. This value could be increased to 5 (or more) if users could ensure a single index could support the metrics and traces for these days (5 in this case).\nFor example, if dayStep == 11,\n Data in [2000-01-01, 2000-01-11] will be merged into the index-20000101. Data in [2000-01-12, 2000-01-22] will be merged into the index-20000112.  storage/elasticsearch/superDatasetDayStep overrides the storage/elasticsearch/dayStep if the value is positive. This would affect the record-related entities, such as trace segments. In some cases, the size of metrics is much smaller than the record (trace). This would improve the shards balance in the ElasticSearch cluster.\nNOTE: TTL deletion would be affected by these steps. You should set an extra dayStep in your TTL. For example, if you want to have TTL == 30 days and dayStep == 10, you are recommended to set TTL = 40.\nSecrets Management File Of ElasticSearch Authentication The value of secretsManagementFile should point to the secrets management file absolute path. The file includes the username, password, and JKS password of the ElasticSearch server in the properties format.\nuser=xxx password=yyy trustStorePass=zzz The major difference between using user, password, trustStorePass configs in the application.yaml file is that the Secrets Management File is being watched by the OAP server. Once it is changed manually or through a 3rd party tool, such as Vault, the storage provider will use the new username, password, and JKS password to establish the connection and close the old one. If the information exists in the file, the user/password will be overridden.\nIndex Settings The following settings control the number of shards and replicas for new and existing index templates. The update only got applied after OAP reboots.\nstorage:elasticsearch:# ......indexShardsNumber:${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:1}indexReplicasNumber:${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:1}specificIndexSettings:${SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS:\u0026#34;\u0026#34;}superDatasetIndexShardsFactor:${SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR:5}superDatasetIndexReplicasNumber:${SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER:0}The following table shows the relationship between those config items and Elasticsearch index number_of_shards/number_of_replicas. And also you can specify the settings for each index individually.\n   index number_of_shards number_of_replicas     sw_ui_template indexShardsNumber indexReplicasNumber   sw_metrics-all-${day-format} indexShardsNumber indexReplicasNumber   sw_log-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_segment-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_browser_error_log-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_zipkin_span-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_records-all-${day-format} indexShardsNumber indexReplicasNumber    Advanced Configurations For Elasticsearch Index You can add advanced configurations in JSON format to set ElasticSearch index settings by following ElasticSearch doc\nFor example, set translog settings:\nstorage:elasticsearch:# ......advanced:${SW_STORAGE_ES_ADVANCED:\u0026#34;{\\\u0026#34;index.translog.durability\\\u0026#34;:\\\u0026#34;request\\\u0026#34;,\\\u0026#34;index.translog.sync_interval\\\u0026#34;:\\\u0026#34;5s\\\u0026#34;}\u0026#34;}Specify Settings For Each Elasticsearch Index Individually You can specify the settings for one or more indexes individually by using SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS.\nNOTE: Supported settings:\n number_of_shards number_of_replicas  NOTE: These settings have the highest priority and will override the existing generic settings mentioned in index settings doc.\nThe settings are in JSON format. The index name here is logic entity name, which should exclude the ${SW_NAMESPACE} which is sw by default, e.g.\n{ \u0026#34;metrics-all\u0026#34;:{ \u0026#34;number_of_shards\u0026#34;:\u0026#34;3\u0026#34;, \u0026#34;number_of_replicas\u0026#34;:\u0026#34;2\u0026#34; }, \u0026#34;segment\u0026#34;:{ \u0026#34;number_of_shards\u0026#34;:\u0026#34;6\u0026#34;, \u0026#34;number_of_replicas\u0026#34;:\u0026#34;1\u0026#34; } } This configuration in the YAML file is like this,\nstorage:elasticsearch:# ......specificIndexSettings:${SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS:\u0026#34;{\\\u0026#34;metrics-all\\\u0026#34;:{\\\u0026#34;number_of_shards\\\u0026#34;:\\\u0026#34;3\\\u0026#34;,\\\u0026#34;number_of_replicas\\\u0026#34;:\\\u0026#34;2\\\u0026#34;},\\\u0026#34;segment\\\u0026#34;:{\\\u0026#34;number_of_shards\\\u0026#34;:\\\u0026#34;6\\\u0026#34;,\\\u0026#34;number_of_replicas\\\u0026#34;:\\\u0026#34;1\\\u0026#34;}}\u0026#34;}Recommended ElasticSearch server-side configurations You could add the following configuration to elasticsearch.yml, and set the value based on your environment.\n# In tracing scenario, consider to set more than this at least.thread_pool.index.queue_size:1000# Only suitable for ElasticSearch 6thread_pool.write.queue_size:1000# Suitable for ElasticSearch 6 and 7# When you face a query error on the traces page, remember to check this.index.max_result_window:1000000We strongly recommend that you read more about these configurations from ElasticSearch\u0026rsquo;s official documentation since they directly impact the performance of ElasticSearch.\nAbout Namespace When a namespace is set, all index names in ElasticSearch will use it as the prefix.\nMySQL Activate MySQL as storage, and set storage provider to mysql.\nNOTE: MySQL driver is NOT allowed in Apache official distribution and source codes. Please download the MySQL driver on your own. Copy the connection driver jar to oap-libs.\nstorage:selector:${SW_STORAGE:mysql}mysql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:3306/swtest?rewriteBatchedStatements=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root@1234}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password, are found in application.yml. Only part of the settings is listed here. See the HikariCP connection pool document for full settings. To understand the function of the parameter rewriteBatchedStatements=true in MySQL, see the MySQL official document.\nMySQL-Sharding MySQL-Sharding plugin provides the MySQL database sharding and table sharding, this feature leverage Shardingsphere-Proxy to manage the JDBC between OAP and multi-database instances, and according to the sharding rules do routing to the database and table sharding.\nTested Shardingsphere-Proxy 5.3.1 version, and MySQL Client driver 8.0.13 version is currently available. Activate MySQL and Shardingsphere-Proxy as storage, and set storage provider to mysql-sharding.\nNOTE: MySQL driver is NOT allowed in Apache official distribution and source codes. Please download the MySQL driver on your own. Copy the connection driver jar to oap-libs.\nstorage:selector:${SW_STORAGE:mysql-sharding}mysql-sharding:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:13307/swtest?rewriteBatchedStatements=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}# The dataSources are configured in ShardingSphere-Proxy config-sharding.yaml# The dataSource name should include the prefix \u0026#34;ds_\u0026#34; and separated by \u0026#34;,\u0026#34;dataSources:${SW_JDBC_SHARDING_DATA_SOURCES:ds_0,ds_1}TiDB Tested TiDB Server 4.0.8 version, and MySQL Client driver 8.0.13 version is currently available. Activate TiDB as storage, and set storage provider to tidb.\nstorage:selector:${SW_STORAGE:tidb}tidb:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:4000/swtest?rewriteBatchedStatements=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:\u0026#34;\u0026#34;}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}dataSource.useAffectedRows:${SW_DATA_SOURCE_USE_AFFECTED_ROWS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfArrayColumn:${SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN:20}numOfSearchableValuesPerTag:${SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG:2}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password are found in application.yml. For details on settings, refer to the configuration of MySQL above. To understand the function of the parameter rewriteBatchedStatements=true in TiDB, see the document of TiDB best practices.\nPostgreSQL PostgreSQL JDBC driver uses version 42.3.2. It supports PostgreSQL 8.2 or newer. Activate PostgreSQL as storage, and set storage provider to postgresql.\nstorage:selector:${SW_STORAGE:postgresql}postgresql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:postgresql://localhost:5432/skywalking\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:postgres}dataSource.password:${SW_DATA_SOURCE_PASSWORD:123456}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfArrayColumn:${SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN:20}numOfSearchableValuesPerTag:${SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG:2}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password, are found in application.yml. Only part of the settings is listed here. Please follow HikariCP connection pool document for full settings.\nBanyanDB BanyanDB is a dedicated storage implementation developed by the SkyWalking Team and the community. Activate BanyanDB as the storage, and set storage provider to banyandb.\nstorage:banyandb:host:${SW_STORAGE_BANYANDB_HOST:127.0.0.1}port:${SW_STORAGE_BANYANDB_PORT:17912}maxBulkSize:${SW_STORAGE_BANYANDB_MAX_BULK_SIZE:5000}flushInterval:${SW_STORAGE_BANYANDB_FLUSH_INTERVAL:15}metricsShardsNumber:${SW_STORAGE_BANYANDB_METRICS_SHARDS_NUMBER:1}recordShardsNumber:${SW_STORAGE_BANYANDB_RECORD_SHARDS_NUMBER:1}superDatasetShardsFactor:${SW_STORAGE_BANYANDB_SUPERDATASET_SHARDS_FACTOR:2}concurrentWriteThreads:${SW_STORAGE_BANYANDB_CONCURRENT_WRITE_THREADS:15}profileTaskQueryMaxSize:${SW_STORAGE_BANYANDB_PROFILE_TASK_QUERY_MAX_SIZE:200}# the max number of fetch task in a requeststreamBlockInterval:${SW_STORAGE_BANYANDB_STREAM_BLOCK_INTERVAL:4}# Unit is hourstreamSegmentInterval:${SW_STORAGE_BANYANDB_STREAM_SEGMENT_INTERVAL:24}# Unit is hourmeasureBlockInterval:${SW_STORAGE_BANYANDB_MEASURE_BLOCK_INTERVAL:4}# Unit is hourmeasureSegmentInterval:${SW_STORAGE_BANYANDB_MEASURE_SEGMENT_INTERVAL:24}# Unit is hourFor more details, please refer to the documents of BanyanDB and BanyanDB Java Client subprojects.\nMore storage extension solutions Follow the Storage extension development guide in the Project Extensions document.\n","title":"Backend storage","url":"/docs/main/v9.4.0/en/setup/backend/backend-storage/"},{"content":"Backend storage The SkyWalking storage is pluggable. We have provided the following storage solutions, which allow you to easily use one of them by specifying it as the selector in application.yml:\nstorage:selector:${SW_STORAGE:elasticsearch}Natively supported storage:\n H2 OpenSearch ElasticSearch 6, 7, 8 MySQL PostgreSQL BanyanDB  H2 Activate H2 as storage, set storage provider to H2 In-Memory Databases. Default in the distribution package. Please read Database URL Overview in H2 official document. You can set the target to H2 in Embedded, Server and Mixed modes.\nSetting fragment example\nstorage:selector:${SW_STORAGE:h2}h2:driver:org.h2.jdbcx.JdbcDataSourceurl:jdbc:h2:mem:skywalking-oap-dbuser:samaxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:100}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:1}OpenSearch OpenSearch is a fork from ElasticSearch 7.11 but licensed in Apache 2.0. OpenSearch storage shares the same configurations as ElasticSearch. In order to activate OpenSearch as storage, set the storage provider to elasticsearch.\nWe support and tested the following versions of OpenSearch:\n 1.1.0, 1.3.6 2.4.0  ElasticSearch NOTE: Elastic announced through their blog that Elasticsearch will be moving over to a Server Side Public License (SSPL), which is incompatible with Apache License 2.0. This license change is effective from Elasticsearch version 7.11. So please choose the suitable ElasticSearch version according to your usage. If you have concerns about SSPL, choose the versions before 7.11 or switch to OpenSearch.\nBy default, SkyWalking uses following indices for various telemetry data.\n sw_ui_template (UI dashboard settings) sw_metrics-all-${day-format} (All metrics/meters generated through MAL and OAL engines, and metadata of service/instance/endpoint) sw_log-${day-format} (Collected logs, exclude browser logs) sw_segment-${day-format} (Native trace segments) sw_browser_error_log-${day-format} (Collected browser logs) sw_zipkin_span-${day-format} (Zipkin trace spans) sw_records-all-${day-format} (All sampled records, e.g. slow SQLs, agent profiling, and ebpf profiling)  SkyWalking rebuilds the ElasticSearch client on top of ElasticSearch REST API and automatically picks up correct request formats according to the server-side version, hence you don\u0026rsquo;t need to download different binaries and don\u0026rsquo;t need to configure different storage selectors for different ElasticSearch server-side versions anymore.\nFor now, SkyWalking supports ElasticSearch 6.x, ElasticSearch 7.x, ElasticSearch 8.x, and OpenSearch 1.x, their configurations are as follows:\nstorage:selector:${SW_STORAGE:elasticsearch}elasticsearch:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}clusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:9200}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;http\u0026#34;}trustStorePath:${SW_STORAGE_ES_SSL_JKS_PATH:\u0026#34;\u0026#34;}trustStorePass:${SW_STORAGE_ES_SSL_JKS_PASS:\u0026#34;\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}password:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}secretsManagementFile:${SW_ES_SECRETS_MANAGEMENT_FILE:\u0026#34;\u0026#34;}# Secrets management file in the properties format includes the username, password, which are managed by 3rd party tool.dayStep:${SW_STORAGE_DAY_STEP:1}# Represent the number of days in the one minute/hour/day index.indexShardsNumber:${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:1}# Shard number of new indexesindexReplicasNumber:${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:1}# Replicas number of new indexes# Specify the settings for each index individually.# If configured, this setting has the highest priority and overrides the generic settings.specificIndexSettings:${SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS:\u0026#34;\u0026#34;}# Super data set has been defined in the codes, such as trace segments.The following 3 config would be improve es performance when storage super size data in es.superDatasetDayStep:${SW_STORAGE_ES_SUPER_DATASET_DAY_STEP:-1}# Represent the number of days in the super size dataset record index, the default value is the same as dayStep when the value is less than 0superDatasetIndexShardsFactor:${SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR:5}# This factor provides more shards for the super data set, shards number = indexShardsNumber * superDatasetIndexShardsFactor. Also, this factor effects Zipkin traces.superDatasetIndexReplicasNumber:${SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER:0}# Represent the replicas number in the super size dataset record index, the default value is 0.indexTemplateOrder:${SW_STORAGE_ES_INDEX_TEMPLATE_ORDER:0}# the order of index templatebulkActions:${SW_STORAGE_ES_BULK_ACTIONS:1000}# Execute the async bulk record data every ${SW_STORAGE_ES_BULK_ACTIONS} requestsflushInterval:${SW_STORAGE_ES_FLUSH_INTERVAL:10}# flush the bulk every 10 seconds whatever the number of requestsconcurrentRequests:${SW_STORAGE_ES_CONCURRENT_REQUESTS:2}# the number of concurrent requestsresultWindowMaxSize:${SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE:10000}metadataQueryMaxSize:${SW_STORAGE_ES_QUERY_MAX_SIZE:5000}segmentQueryMaxSize:${SW_STORAGE_ES_QUERY_SEGMENT_SIZE:200}profileTaskQueryMaxSize:${SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE:200}profileDataQueryScrollBatchSize:${SW_STORAGE_ES_QUERY_PROFILE_DATA_SCROLLING_BATCH_SIZE:100}oapAnalyzer:${SW_STORAGE_ES_OAP_ANALYZER:\u0026#34;{\\\u0026#34;analyzer\\\u0026#34;:{\\\u0026#34;oap_analyzer\\\u0026#34;:{\\\u0026#34;type\\\u0026#34;:\\\u0026#34;stop\\\u0026#34;}}}\u0026#34;}# the oap analyzer.oapLogAnalyzer:${SW_STORAGE_ES_OAP_LOG_ANALYZER:\u0026#34;{\\\u0026#34;analyzer\\\u0026#34;:{\\\u0026#34;oap_log_analyzer\\\u0026#34;:{\\\u0026#34;type\\\u0026#34;:\\\u0026#34;standard\\\u0026#34;}}}\u0026#34;}# the oap log analyzer. It could be customized by the ES analyzer configuration to support more language log formats, such as Chinese log, Japanese log and etc.advanced:${SW_STORAGE_ES_ADVANCED:\u0026#34;\u0026#34;}# Set it to `true` could shard metrics indices into multi-physical indices# as same as the versions(one index template per metric/meter aggregation function) before 9.2.0.logicSharding:${SW_STORAGE_ES_LOGIC_SHARDING:false}# Custom routing can reduce the impact of searches. Instead of having to fan out a search request to all the shards in an index, the request can be sent to just the shard that matches the specific routing value (or values).enableCustomRouting:${SW_STORAGE_ES_ENABLE_CUSTOM_ROUTING:false}ElasticSearch With Https SSL Encrypting communications. Example:\nstorage:selector:${SW_STORAGE:elasticsearch}elasticsearch:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}# User needs to be set when Http Basic authentication is enabledpassword:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}# Password to be set when Http Basic authentication is enabledclusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:443}trustStorePath:${SW_SW_STORAGE_ES_SSL_JKS_PATH:\u0026#34;../es_keystore.jks\u0026#34;}trustStorePass:${SW_SW_STORAGE_ES_SSL_JKS_PASS:\u0026#34;\u0026#34;}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;https\u0026#34;}... File at trustStorePath is being monitored. Once it is changed, the ElasticSearch client will reconnect. trustStorePass could be changed in the runtime through Secrets Management File Of ElasticSearch Authentication.  Daily Index Step Daily index step(storage/elasticsearch/dayStep, default 1) represents the index creation period. In this period, metrics for several days (dayStep value) are saved.\nIn most cases, users don\u0026rsquo;t need to change the value manually, as SkyWalking is designed to observe large-scale distributed systems. But in some cases, users may want to set a long TTL value, such as more than 60 days. However, their ElasticSearch cluster may not be powerful enough due to low traffic in the production environment. This value could be increased to 5 (or more) if users could ensure a single index could support the metrics and traces for these days (5 in this case).\nFor example, if dayStep == 11,\n Data in [2000-01-01, 2000-01-11] will be merged into the index-20000101. Data in [2000-01-12, 2000-01-22] will be merged into the index-20000112.  storage/elasticsearch/superDatasetDayStep overrides the storage/elasticsearch/dayStep if the value is positive. This would affect the record-related entities, such as trace segments. In some cases, the size of metrics is much smaller than the record (trace). This would improve the shards balance in the ElasticSearch cluster.\nNOTE: TTL deletion would be affected by these steps. You should set an extra dayStep in your TTL. For example, if you want to have TTL == 30 days and dayStep == 10, you are recommended to set TTL = 40.\nSecrets Management File Of ElasticSearch Authentication The value of secretsManagementFile should point to the secrets management file absolute path. The file includes the username, password, and JKS password of the ElasticSearch server in the properties format.\nuser=xxx password=yyy trustStorePass=zzz The major difference between using user, password, trustStorePass configs in the application.yaml file is that the Secrets Management File is being watched by the OAP server. Once it is changed manually or through a 3rd party tool, such as Vault, the storage provider will use the new username, password, and JKS password to establish the connection and close the old one. If the information exists in the file, the user/password will be overridden.\nIndex Settings The following settings control the number of shards and replicas for new and existing index templates. The update only got applied after OAP reboots.\nstorage:elasticsearch:# ......indexShardsNumber:${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:1}indexReplicasNumber:${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:1}specificIndexSettings:${SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS:\u0026#34;\u0026#34;}superDatasetIndexShardsFactor:${SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR:5}superDatasetIndexReplicasNumber:${SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER:0}The following table shows the relationship between those config items and Elasticsearch index number_of_shards/number_of_replicas. And also you can specify the settings for each index individually.\n   index number_of_shards number_of_replicas     sw_ui_template indexShardsNumber indexReplicasNumber   sw_metrics-all-${day-format} indexShardsNumber indexReplicasNumber   sw_log-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_segment-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_browser_error_log-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_zipkin_span-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_records-all-${day-format} indexShardsNumber indexReplicasNumber    Advanced Configurations For Elasticsearch Index You can add advanced configurations in JSON format to set ElasticSearch index settings by following ElasticSearch doc\nFor example, set translog settings:\nstorage:elasticsearch:# ......advanced:${SW_STORAGE_ES_ADVANCED:\u0026#34;{\\\u0026#34;index.translog.durability\\\u0026#34;:\\\u0026#34;request\\\u0026#34;,\\\u0026#34;index.translog.sync_interval\\\u0026#34;:\\\u0026#34;5s\\\u0026#34;}\u0026#34;}Specify Settings For Each Elasticsearch Index Individually You can specify the settings for one or more indexes individually by using SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS.\nNOTE: Supported settings:\n number_of_shards number_of_replicas  NOTE: These settings have the highest priority and will override the existing generic settings mentioned in index settings doc.\nThe settings are in JSON format. The index name here is logic entity name, which should exclude the ${SW_NAMESPACE} which is sw by default, e.g.\n{ \u0026#34;metrics-all\u0026#34;:{ \u0026#34;number_of_shards\u0026#34;:\u0026#34;3\u0026#34;, \u0026#34;number_of_replicas\u0026#34;:\u0026#34;2\u0026#34; }, \u0026#34;segment\u0026#34;:{ \u0026#34;number_of_shards\u0026#34;:\u0026#34;6\u0026#34;, \u0026#34;number_of_replicas\u0026#34;:\u0026#34;1\u0026#34; } } This configuration in the YAML file is like this,\nstorage:elasticsearch:# ......specificIndexSettings:${SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS:\u0026#34;{\\\u0026#34;metrics-all\\\u0026#34;:{\\\u0026#34;number_of_shards\\\u0026#34;:\\\u0026#34;3\\\u0026#34;,\\\u0026#34;number_of_replicas\\\u0026#34;:\\\u0026#34;2\\\u0026#34;},\\\u0026#34;segment\\\u0026#34;:{\\\u0026#34;number_of_shards\\\u0026#34;:\\\u0026#34;6\\\u0026#34;,\\\u0026#34;number_of_replicas\\\u0026#34;:\\\u0026#34;1\\\u0026#34;}}\u0026#34;}Recommended ElasticSearch server-side configurations You could add the following configuration to elasticsearch.yml, and set the value based on your environment.\n# In tracing scenario, consider to set more than this at least.thread_pool.index.queue_size:1000# Only suitable for ElasticSearch 6thread_pool.write.queue_size:1000# Suitable for ElasticSearch 6 and 7# When you face a query error on the traces page, remember to check this.index.max_result_window:1000000We strongly recommend that you read more about these configurations from ElasticSearch\u0026rsquo;s official documentation since they directly impact the performance of ElasticSearch.\nAbout Namespace When a namespace is set, all index names in ElasticSearch will use it as the prefix.\nMySQL Activate MySQL as storage, and set storage provider to mysql.\nNOTE: MySQL driver is NOT allowed in Apache official distribution and source codes. Please download the MySQL driver on your own. Copy the connection driver jar to oap-libs.\nstorage:selector:${SW_STORAGE:mysql}mysql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:3306/swtest?rewriteBatchedStatements=true\u0026amp;allowMultiQueries=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root@1234}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password, are found in application.yml. Only part of the settings is listed here. See the HikariCP connection pool document for full settings. To understand the function of the parameter rewriteBatchedStatements=true in MySQL, see the MySQL official document.\nIn theory, all other databases that are compatible with MySQL protocol should be able to use this storage plugin, such as TiDB. Please compose the JDBC URL according to the database\u0026rsquo;s documentation.\nPostgreSQL PostgreSQL JDBC driver uses version 42.3.2. It supports PostgreSQL 8.2 or newer. Activate PostgreSQL as storage, and set storage provider to postgresql.\nstorage:selector:${SW_STORAGE:postgresql}postgresql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:postgresql://localhost:5432/skywalking\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:postgres}dataSource.password:${SW_DATA_SOURCE_PASSWORD:123456}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfArrayColumn:${SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN:20}numOfSearchableValuesPerTag:${SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG:2}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password, are found in application.yml. Only part of the settings is listed here. Please follow HikariCP connection pool document for full settings.\nBanyanDB BanyanDB is a dedicated storage implementation developed by the SkyWalking Team and the community. Activate BanyanDB as the storage, and set storage provider to banyandb.\nstorage:banyandb:host:${SW_STORAGE_BANYANDB_HOST:127.0.0.1}port:${SW_STORAGE_BANYANDB_PORT:17912}maxBulkSize:${SW_STORAGE_BANYANDB_MAX_BULK_SIZE:5000}flushInterval:${SW_STORAGE_BANYANDB_FLUSH_INTERVAL:15}metricsShardsNumber:${SW_STORAGE_BANYANDB_METRICS_SHARDS_NUMBER:1}recordShardsNumber:${SW_STORAGE_BANYANDB_RECORD_SHARDS_NUMBER:1}superDatasetShardsFactor:${SW_STORAGE_BANYANDB_SUPERDATASET_SHARDS_FACTOR:2}concurrentWriteThreads:${SW_STORAGE_BANYANDB_CONCURRENT_WRITE_THREADS:15}profileTaskQueryMaxSize:${SW_STORAGE_BANYANDB_PROFILE_TASK_QUERY_MAX_SIZE:200}# the max number of fetch task in a requeststreamBlockInterval:${SW_STORAGE_BANYANDB_STREAM_BLOCK_INTERVAL:4}# Unit is hourstreamSegmentInterval:${SW_STORAGE_BANYANDB_STREAM_SEGMENT_INTERVAL:24}# Unit is hourmeasureBlockInterval:${SW_STORAGE_BANYANDB_MEASURE_BLOCK_INTERVAL:4}# Unit is hourmeasureSegmentInterval:${SW_STORAGE_BANYANDB_MEASURE_SEGMENT_INTERVAL:24}# Unit is hourFor more details, please refer to the documents of BanyanDB and BanyanDB Java Client subprojects.\nMore storage extension solutions Follow the Storage extension development guide in the Project Extensions document.\n","title":"Backend storage","url":"/docs/main/v9.5.0/en/setup/backend/backend-storage/"},{"content":"Backend storage The SkyWalking storage is pluggable. We have provided the following storage solutions, which allow you to easily use one of them by specifying it as the selector in application.yml:\nstorage:selector:${SW_STORAGE:elasticsearch}Natively supported storage:\n H2 OpenSearch ElasticSearch 7 and 8. MySQL PostgreSQL BanyanDB  H2 Activate H2 as storage, set storage provider to H2 In-Memory Databases. Default in the distribution package. Please read Database URL Overview in H2 official document. You can set the target to H2 in Embedded, Server and Mixed modes.\nSetting fragment example\nstorage:selector:${SW_STORAGE:h2}h2:driver:org.h2.jdbcx.JdbcDataSourceurl:jdbc:h2:mem:skywalking-oap-dbuser:samaxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:100}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:1}OpenSearch OpenSearch is a fork from ElasticSearch 7.11 but licensed in Apache 2.0. OpenSearch storage shares the same configurations as ElasticSearch. In order to activate OpenSearch as storage, set the storage provider to elasticsearch.\nWe support and tested the following versions of OpenSearch:\n 1.1.0, 1.3.10 2.4.0, 2.8.0  ElasticSearch NOTE: Elastic announced through their blog that Elasticsearch will be moving over to a Server Side Public License (SSPL) and/or Elastic License 2.0(ELv2), since Feb. 2021, which is incompatible with Apache License 2.0. Both of these licenses are not OSS licenses approved by the Open Source Initiative (OSI). This license change is effective from Elasticsearch version 7.11. So please choose the suitable ElasticSearch version according to your usage. If you have concerns about SSPL/ELv2, choose the versions before 7.11 or switch to OpenSearch.\nBy default, SkyWalking uses following indices for various telemetry data.\n sw_ui_template (UI dashboard settings) sw_metrics-all-${day-format} (All metrics/meters generated through MAL and OAL engines, and metadata of service/instance/endpoint) sw_log-${day-format} (Collected logs, exclude browser logs) sw_segment-${day-format} (Native trace segments) sw_browser_error_log-${day-format} (Collected browser logs) sw_zipkin_span-${day-format} (Zipkin trace spans) sw_records-all-${day-format} (All sampled records, e.g. slow SQLs, agent profiling, and ebpf profiling)  SkyWalking rebuilds the ElasticSearch client on top of ElasticSearch REST API and automatically picks up correct request formats according to the server-side version, hence you don\u0026rsquo;t need to download different binaries and don\u0026rsquo;t need to configure different storage selectors for different ElasticSearch server-side versions anymore.\nFor now, SkyWalking supports ElasticSearch 7.x, ElasticSearch 8.x, and OpenSearch 1.x, their configurations are as follows:\nNotice, ElasticSearch 6 worked and is not promised due to end of life officially.\nstorage:selector:${SW_STORAGE:elasticsearch}elasticsearch:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}clusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:9200}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;http\u0026#34;}trustStorePath:${SW_STORAGE_ES_SSL_JKS_PATH:\u0026#34;\u0026#34;}trustStorePass:${SW_STORAGE_ES_SSL_JKS_PASS:\u0026#34;\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}password:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}secretsManagementFile:${SW_ES_SECRETS_MANAGEMENT_FILE:\u0026#34;\u0026#34;}# Secrets management file in the properties format includes the username, password, which are managed by 3rd party tool.dayStep:${SW_STORAGE_DAY_STEP:1}# Represent the number of days in the one minute/hour/day index.indexShardsNumber:${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:1}# Shard number of new indexesindexReplicasNumber:${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:1}# Replicas number of new indexes# Specify the settings for each index individually.# If configured, this setting has the highest priority and overrides the generic settings.specificIndexSettings:${SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS:\u0026#34;\u0026#34;}# Super data set has been defined in the codes, such as trace segments.The following 3 config would be improve es performance when storage super size data in es.superDatasetDayStep:${SW_STORAGE_ES_SUPER_DATASET_DAY_STEP:-1}# Represent the number of days in the super size dataset record index, the default value is the same as dayStep when the value is less than 0superDatasetIndexShardsFactor:${SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR:5}# This factor provides more shards for the super data set, shards number = indexShardsNumber * superDatasetIndexShardsFactor. Also, this factor effects Zipkin traces.superDatasetIndexReplicasNumber:${SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER:0}# Represent the replicas number in the super size dataset record index, the default value is 0.indexTemplateOrder:${SW_STORAGE_ES_INDEX_TEMPLATE_ORDER:0}# the order of index templatebulkActions:${SW_STORAGE_ES_BULK_ACTIONS:1000}# Execute the async bulk record data every ${SW_STORAGE_ES_BULK_ACTIONS} requestsflushInterval:${SW_STORAGE_ES_FLUSH_INTERVAL:10}# flush the bulk every 10 seconds whatever the number of requestsconcurrentRequests:${SW_STORAGE_ES_CONCURRENT_REQUESTS:2}# the number of concurrent requestsresultWindowMaxSize:${SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE:10000}metadataQueryMaxSize:${SW_STORAGE_ES_QUERY_MAX_SIZE:5000}segmentQueryMaxSize:${SW_STORAGE_ES_QUERY_SEGMENT_SIZE:200}profileTaskQueryMaxSize:${SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE:200}profileDataQueryScrollBatchSize:${SW_STORAGE_ES_QUERY_PROFILE_DATA_SCROLLING_BATCH_SIZE:100}oapAnalyzer:${SW_STORAGE_ES_OAP_ANALYZER:\u0026#34;{\\\u0026#34;analyzer\\\u0026#34;:{\\\u0026#34;oap_analyzer\\\u0026#34;:{\\\u0026#34;type\\\u0026#34;:\\\u0026#34;stop\\\u0026#34;}}}\u0026#34;}# the oap analyzer.oapLogAnalyzer:${SW_STORAGE_ES_OAP_LOG_ANALYZER:\u0026#34;{\\\u0026#34;analyzer\\\u0026#34;:{\\\u0026#34;oap_log_analyzer\\\u0026#34;:{\\\u0026#34;type\\\u0026#34;:\\\u0026#34;standard\\\u0026#34;}}}\u0026#34;}# the oap log analyzer. It could be customized by the ES analyzer configuration to support more language log formats, such as Chinese log, Japanese log and etc.advanced:${SW_STORAGE_ES_ADVANCED:\u0026#34;\u0026#34;}# Set it to `true` could shard metrics indices into multi-physical indices# as same as the versions(one index template per metric/meter aggregation function) before 9.2.0.logicSharding:${SW_STORAGE_ES_LOGIC_SHARDING:false}# Custom routing can reduce the impact of searches. Instead of having to fan out a search request to all the shards in an index, the request can be sent to just the shard that matches the specific routing value (or values).enableCustomRouting:${SW_STORAGE_ES_ENABLE_CUSTOM_ROUTING:false}ElasticSearch With Https SSL Encrypting communications. Example:\nstorage:selector:${SW_STORAGE:elasticsearch}elasticsearch:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}# User needs to be set when Http Basic authentication is enabledpassword:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}# Password to be set when Http Basic authentication is enabledclusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:443}trustStorePath:${SW_SW_STORAGE_ES_SSL_JKS_PATH:\u0026#34;../es_keystore.jks\u0026#34;}trustStorePass:${SW_SW_STORAGE_ES_SSL_JKS_PASS:\u0026#34;\u0026#34;}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;https\u0026#34;}... File at trustStorePath is being monitored. Once it is changed, the ElasticSearch client will reconnect. trustStorePass could be changed in the runtime through Secrets Management File Of ElasticSearch Authentication.  Daily Index Step Daily index step(storage/elasticsearch/dayStep, default 1) represents the index creation period. In this period, metrics for several days (dayStep value) are saved.\nIn most cases, users don\u0026rsquo;t need to change the value manually, as SkyWalking is designed to observe large-scale distributed systems. But in some cases, users may want to set a long TTL value, such as more than 60 days. However, their ElasticSearch cluster may not be powerful enough due to low traffic in the production environment. This value could be increased to 5 (or more) if users could ensure a single index could support the metrics and traces for these days (5 in this case).\nFor example, if dayStep == 11,\n Data in [2000-01-01, 2000-01-11] will be merged into the index-20000101. Data in [2000-01-12, 2000-01-22] will be merged into the index-20000112.  storage/elasticsearch/superDatasetDayStep overrides the storage/elasticsearch/dayStep if the value is positive. This would affect the record-related entities, such as trace segments. In some cases, the size of metrics is much smaller than the record (trace). This would improve the shards balance in the ElasticSearch cluster.\nNOTE: TTL deletion would be affected by these steps. You should set an extra dayStep in your TTL. For example, if you want to have TTL == 30 days and dayStep == 10, you are recommended to set TTL = 40.\nSecrets Management File Of ElasticSearch Authentication The value of secretsManagementFile should point to the secrets management file absolute path. The file includes the username, password, and JKS password of the ElasticSearch server in the properties format.\nuser=xxx password=yyy trustStorePass=zzz The major difference between using user, password, trustStorePass configs in the application.yaml file is that the Secrets Management File is being watched by the OAP server. Once it is changed manually or through a 3rd party tool, such as Vault, the storage provider will use the new username, password, and JKS password to establish the connection and close the old one. If the information exists in the file, the user/password will be overridden.\nIndex Settings The following settings control the number of shards and replicas for new and existing index templates. The update only got applied after OAP reboots.\nstorage:elasticsearch:# ......indexShardsNumber:${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:1}indexReplicasNumber:${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:1}specificIndexSettings:${SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS:\u0026#34;\u0026#34;}superDatasetIndexShardsFactor:${SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR:5}superDatasetIndexReplicasNumber:${SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER:0}The following table shows the relationship between those config items and Elasticsearch index number_of_shards/number_of_replicas. And also you can specify the settings for each index individually.\n   index number_of_shards number_of_replicas     sw_ui_template indexShardsNumber indexReplicasNumber   sw_metrics-all-${day-format} indexShardsNumber indexReplicasNumber   sw_log-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_segment-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_browser_error_log-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_zipkin_span-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_records-all-${day-format} indexShardsNumber indexReplicasNumber    Advanced Configurations For Elasticsearch Index You can add advanced configurations in JSON format to set ElasticSearch index settings by following ElasticSearch doc\nFor example, set translog settings:\nstorage:elasticsearch:# ......advanced:${SW_STORAGE_ES_ADVANCED:\u0026#34;{\\\u0026#34;index.translog.durability\\\u0026#34;:\\\u0026#34;request\\\u0026#34;,\\\u0026#34;index.translog.sync_interval\\\u0026#34;:\\\u0026#34;5s\\\u0026#34;}\u0026#34;}Specify Settings For Each Elasticsearch Index Individually You can specify the settings for one or more indexes individually by using SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS.\nNOTE: Supported settings:\n number_of_shards number_of_replicas  NOTE: These settings have the highest priority and will override the existing generic settings mentioned in index settings doc.\nThe settings are in JSON format. The index name here is logic entity name, which should exclude the ${SW_NAMESPACE} which is sw by default, e.g.\n{ \u0026#34;metrics-all\u0026#34;:{ \u0026#34;number_of_shards\u0026#34;:\u0026#34;3\u0026#34;, \u0026#34;number_of_replicas\u0026#34;:\u0026#34;2\u0026#34; }, \u0026#34;segment\u0026#34;:{ \u0026#34;number_of_shards\u0026#34;:\u0026#34;6\u0026#34;, \u0026#34;number_of_replicas\u0026#34;:\u0026#34;1\u0026#34; } } This configuration in the YAML file is like this,\nstorage:elasticsearch:# ......specificIndexSettings:${SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS:\u0026#34;{\\\u0026#34;metrics-all\\\u0026#34;:{\\\u0026#34;number_of_shards\\\u0026#34;:\\\u0026#34;3\\\u0026#34;,\\\u0026#34;number_of_replicas\\\u0026#34;:\\\u0026#34;2\\\u0026#34;},\\\u0026#34;segment\\\u0026#34;:{\\\u0026#34;number_of_shards\\\u0026#34;:\\\u0026#34;6\\\u0026#34;,\\\u0026#34;number_of_replicas\\\u0026#34;:\\\u0026#34;1\\\u0026#34;}}\u0026#34;}Recommended ElasticSearch server-side configurations You could add the following configuration to elasticsearch.yml, and set the value based on your environment.\n# In tracing scenario, consider to set more than this at least.thread_pool.index.queue_size:1000# Only suitable for ElasticSearch 6thread_pool.write.queue_size:1000# Suitable for ElasticSearch 6 and 7# When you face a query error on the traces page, remember to check this.index.max_result_window:1000000We strongly recommend that you read more about these configurations from ElasticSearch\u0026rsquo;s official documentation since they directly impact the performance of ElasticSearch.\nAbout Namespace When a namespace is set, all index names in ElasticSearch will use it as the prefix.\nMySQL Activate MySQL as storage, and set storage provider to mysql.\nNOTE: MySQL driver is NOT allowed in Apache official distribution and source codes. Please download the MySQL driver on your own. Copy the connection driver jar to oap-libs.\nstorage:selector:${SW_STORAGE:mysql}mysql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:3306/swtest?rewriteBatchedStatements=true\u0026amp;allowMultiQueries=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root@1234}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password, are found in application.yml. Only part of the settings is listed here. See the HikariCP connection pool document for full settings. To understand the function of the parameter rewriteBatchedStatements=true in MySQL, see the MySQL official document.\nIn theory, all other databases that are compatible with MySQL protocol should be able to use this storage plugin, such as TiDB. Please compose the JDBC URL according to the database\u0026rsquo;s documentation.\nPostgreSQL PostgreSQL JDBC driver uses version 42.3.2. It supports PostgreSQL 8.2 or newer. Activate PostgreSQL as storage, and set storage provider to postgresql.\nstorage:selector:${SW_STORAGE:postgresql}postgresql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:postgresql://localhost:5432/skywalking\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:postgres}dataSource.password:${SW_DATA_SOURCE_PASSWORD:123456}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfArrayColumn:${SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN:20}numOfSearchableValuesPerTag:${SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG:2}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password, are found in application.yml. Only part of the settings is listed here. Please follow HikariCP connection pool document for full settings.\nBanyanDB BanyanDB is a dedicated storage implementation developed by the SkyWalking Team and the community. Activate BanyanDB as the storage, and set storage provider to banyandb.\nstorage:banyandb:host:${SW_STORAGE_BANYANDB_HOST:127.0.0.1}port:${SW_STORAGE_BANYANDB_PORT:17912}maxBulkSize:${SW_STORAGE_BANYANDB_MAX_BULK_SIZE:5000}flushInterval:${SW_STORAGE_BANYANDB_FLUSH_INTERVAL:15}metricsShardsNumber:${SW_STORAGE_BANYANDB_METRICS_SHARDS_NUMBER:1}recordShardsNumber:${SW_STORAGE_BANYANDB_RECORD_SHARDS_NUMBER:1}superDatasetShardsFactor:${SW_STORAGE_BANYANDB_SUPERDATASET_SHARDS_FACTOR:2}concurrentWriteThreads:${SW_STORAGE_BANYANDB_CONCURRENT_WRITE_THREADS:15}profileTaskQueryMaxSize:${SW_STORAGE_BANYANDB_PROFILE_TASK_QUERY_MAX_SIZE:200}# the max number of fetch task in a requeststreamBlockInterval:${SW_STORAGE_BANYANDB_STREAM_BLOCK_INTERVAL:4}# Unit is hourstreamSegmentInterval:${SW_STORAGE_BANYANDB_STREAM_SEGMENT_INTERVAL:24}# Unit is hourmeasureBlockInterval:${SW_STORAGE_BANYANDB_MEASURE_BLOCK_INTERVAL:4}# Unit is hourmeasureSegmentInterval:${SW_STORAGE_BANYANDB_MEASURE_SEGMENT_INTERVAL:24}# Unit is hourFor more details, please refer to the documents of BanyanDB and BanyanDB Java Client subprojects.\n","title":"Backend storage","url":"/docs/main/v9.6.0/en/setup/backend/backend-storage/"},{"content":"Backend storage The SkyWalking storage is pluggable. We have provided the following storage solutions, which allow you to easily use one of them by specifying it as the selector in application.yml:\nstorage:selector:${SW_STORAGE:elasticsearch}Natively supported storage:\n H2 OpenSearch ElasticSearch 7 and 8. MySQL and its compatible databases PostgreSQL and its compatible databases BanyanDB(alpha stage)  H2 is the default storage option in the distribution package. It is recommended to use H2 for testing and development ONLY. Elasticsearch and OpenSearch are recommended for production environments, specially for large scale deployments. MySQL and PostgreSQL are recommended for production environments for medium scale deployments, especially for low trace and log sampling rate. Some of their compatible databases may support larger scale better, such as TiDB and AWS Aurora.\nBanyanDB is going to be our next generation storage solution. It is still in alpha stage. It has shown high potential performance improvement. Less than 50% CPU usage and 50% memory usage with 40% disk volume compared to Elasticsearch in the same scale with 100% sampling. We are looking for early adoption, and it would be our first-class recommended storage option since 2024.\n","title":"Backend storage","url":"/docs/main/v9.7.0/en/setup/backend/backend-storage/"},{"content":"Background Write Ahead Logging (WAL) is a technique used in databases to ensure that data is not lost due to system crashes or other failures. The basic idea of WAL is to log changes to a database in a separate file before applying them to the database itself. This way, if there is a system failure, the database can be recovered by replaying the log of changes from the WAL file. BanyanDB leverages the WAL to enhance the data buffer for schema resource writing. In such a system, write operations are first written to the WAL file before being applied to the interval buffer. This ensures that the log is written to disk before the actual data is written. Hence the term \u0026ldquo;write ahead\u0026rdquo;.\nFormat A segment refers to a block of data in the WAL file that contains a sequence of database changes. Once rotate is invoked, a new segment is created to continue logging subsequent changes. A \u0026ldquo;WALEntry\u0026rdquo; is a data unit representing a series of changes to a Series. Each WALEntry is written to a segment.\nWAlEntry contains as follows:\n Length:8 bytes, which means the length of a WalEntry. Series ID:8 bytes, the same as request Series ID. Count:4 bytes, how many binary/timestamps in one WalEntry. Timestamp:8 bytes. Binary Length:2 bytes. Binary: value in the write request.  Write process The writing process in WAL is as follows:\n Firstly, the changes are first written to the write buffer. Those with the same series ID will go to the identical WALEntry. When the buffer is full, the WALEntry is created, then flushed to the disk. WAL can optionally use the compression algorithm snappy to compress the data on disk. Each WALEntry is appended to the tail of the WAL file on the disk.  When entries in the buffer are flushed to the disk, the callback function returned by the write operation is invoked. You can ignore this function to improve the writing performance, but it risks losing data.\nRead WAL A client could read a single segment by a segment id. When opening the segment file, the reader will decompress the WAL file if the writing compresses the data.\nRotation WAL supports rotation operation to switch to a new segment. The operation closes the currently open segment and opens a new one, returning the closed segment details.\nDelete A client could delete a segment closed by the rotate operation.\nconfiguration BanyanDB WAL has the following configuration options:\n   Name Default Value Introduction     wal_compression true Compress the WAL entry or not   wal_file_size 64MB The size of the WAL file   wal_buffer_size 16kB The size of WAL buffer.    ","title":"Background","url":"/docs/skywalking-banyandb/latest/concept/wal/"},{"content":"Background Write Ahead Logging (WAL) is a technique used in databases to ensure that data is not lost due to system crashes or other failures. The basic idea of WAL is to log changes to a database in a separate file before applying them to the database itself. This way, if there is a system failure, the database can be recovered by replaying the log of changes from the WAL file. BanyanDB leverages the WAL to enhance the data buffer for schema resource writing. In such a system, write operations are first written to the WAL file before being applied to the interval buffer. This ensures that the log is written to disk before the actual data is written. Hence the term \u0026ldquo;write ahead\u0026rdquo;.\nFormat A segment refers to a block of data in the WAL file that contains a sequence of database changes. Once rotate is invoked, a new segment is created to continue logging subsequent changes. A \u0026ldquo;WALEntry\u0026rdquo; is a data unit representing a series of changes to a Series. Each WALEntry is written to a segment.\nWAlEntry contains as follows:\n Length:8 bytes, which means the length of a WalEntry. Series ID:8 bytes, the same as request Series ID. Count:4 bytes, how many binary/timestamps in one WalEntry. Timestamp:8 bytes. Binary Length:2 bytes. Binary: value in the write request.  Write process The writing process in WAL is as follows:\n Firstly, the changes are first written to the write buffer. Those with the same series ID will go to the identical WALEntry. When the buffer is full, the WALEntry is created, then flushed to the disk. WAL can optionally use the compression algorithm snappy to compress the data on disk. Each WALEntry is appended to the tail of the WAL file on the disk.  When entries in the buffer are flushed to the disk, the callback function returned by the write operation is invoked. You can ignore this function to improve the writing performance, but it risks losing data.\nRead WAL A client could read a single segment by a segment id. When opening the segment file, the reader will decompress the WAL file if the writing compresses the data.\nRotation WAL supports rotation operation to switch to a new segment. The operation closes the currently open segment and opens a new one, returning the closed segment details.\nDelete A client could delete a segment closed by the rotate operation.\nconfiguration BanyanDB WAL has the following configuration options:\n   Name Default Value Introduction     wal_compression true Compress the WAL entry or not   wal_file_size 64MB The size of the WAL file   wal_buffer_size 16kB The size of WAL buffer.    ","title":"Background","url":"/docs/skywalking-banyandb/v0.5.0/concept/wal/"},{"content":"BanyanDB BanyanDB is a dedicated storage implementation developed by the SkyWalking Team and the community. Activate BanyanDB as the storage, and set storage provider to banyandb.\nThe OAP requires BanyanDB 0.5 server. As BanyanDB is still in the beta phase, we don\u0026rsquo;t provide any compatibility besides the required version.\nstorage:banyandb:targets:${SW_STORAGE_BANYANDB_TARGETS:127.0.0.1:17912}maxBulkSize:${SW_STORAGE_BANYANDB_MAX_BULK_SIZE:5000}flushInterval:${SW_STORAGE_BANYANDB_FLUSH_INTERVAL:15}metricsShardsNumber:${SW_STORAGE_BANYANDB_METRICS_SHARDS_NUMBER:1}recordShardsNumber:${SW_STORAGE_BANYANDB_RECORD_SHARDS_NUMBER:1}superDatasetShardsFactor:${SW_STORAGE_BANYANDB_SUPERDATASET_SHARDS_FACTOR:2}concurrentWriteThreads:${SW_STORAGE_BANYANDB_CONCURRENT_WRITE_THREADS:15}profileTaskQueryMaxSize:${SW_STORAGE_BANYANDB_PROFILE_TASK_QUERY_MAX_SIZE:200}# the max number of fetch task in a requeststreamBlockInterval:${SW_STORAGE_BANYANDB_STREAM_BLOCK_INTERVAL:4}# Unit is hourstreamSegmentInterval:${SW_STORAGE_BANYANDB_STREAM_SEGMENT_INTERVAL:24}# Unit is hourmeasureBlockInterval:${SW_STORAGE_BANYANDB_MEASURE_BLOCK_INTERVAL:4}# Unit is hourmeasureSegmentInterval:${SW_STORAGE_BANYANDB_MEASURE_SEGMENT_INTERVAL:24}# Unit is hourFor more details, please refer to the documents of BanyanDB and BanyanDB Java Client subprojects.\n","title":"BanyanDB","url":"/docs/main/latest/en/setup/backend/storages/banyandb/"},{"content":"BanyanDB BanyanDB is a dedicated storage implementation developed by the SkyWalking Team and the community. Activate BanyanDB as the storage, and set storage provider to banyandb.\nThe OAP requires BanyanDB 0.5 server. As BanyanDB is still in the beta phase, we don\u0026rsquo;t provide any compatibility besides the required version.\nstorage:banyandb:targets:${SW_STORAGE_BANYANDB_TARGETS:127.0.0.1:17912}maxBulkSize:${SW_STORAGE_BANYANDB_MAX_BULK_SIZE:5000}flushInterval:${SW_STORAGE_BANYANDB_FLUSH_INTERVAL:15}metricsShardsNumber:${SW_STORAGE_BANYANDB_METRICS_SHARDS_NUMBER:1}recordShardsNumber:${SW_STORAGE_BANYANDB_RECORD_SHARDS_NUMBER:1}superDatasetShardsFactor:${SW_STORAGE_BANYANDB_SUPERDATASET_SHARDS_FACTOR:2}concurrentWriteThreads:${SW_STORAGE_BANYANDB_CONCURRENT_WRITE_THREADS:15}profileTaskQueryMaxSize:${SW_STORAGE_BANYANDB_PROFILE_TASK_QUERY_MAX_SIZE:200}# the max number of fetch task in a requeststreamBlockInterval:${SW_STORAGE_BANYANDB_STREAM_BLOCK_INTERVAL:4}# Unit is hourstreamSegmentInterval:${SW_STORAGE_BANYANDB_STREAM_SEGMENT_INTERVAL:24}# Unit is hourmeasureBlockInterval:${SW_STORAGE_BANYANDB_MEASURE_BLOCK_INTERVAL:4}# Unit is hourmeasureSegmentInterval:${SW_STORAGE_BANYANDB_MEASURE_SEGMENT_INTERVAL:24}# Unit is hourFor more details, please refer to the documents of BanyanDB and BanyanDB Java Client subprojects.\n","title":"BanyanDB","url":"/docs/main/next/en/setup/backend/storages/banyandb/"},{"content":"BanyanDB BanyanDB is a dedicated storage implementation developed by the SkyWalking Team and the community. Activate BanyanDB as the storage, and set storage provider to banyandb.\nThe OAP requires BanyanDB 0.5 server. As BanyanDB is still in the beta phase, we don\u0026rsquo;t provide any compatibility besides the required version.\nstorage:banyandb:targets:${SW_STORAGE_BANYANDB_TARGETS:127.0.0.1:17912}maxBulkSize:${SW_STORAGE_BANYANDB_MAX_BULK_SIZE:5000}flushInterval:${SW_STORAGE_BANYANDB_FLUSH_INTERVAL:15}metricsShardsNumber:${SW_STORAGE_BANYANDB_METRICS_SHARDS_NUMBER:1}recordShardsNumber:${SW_STORAGE_BANYANDB_RECORD_SHARDS_NUMBER:1}superDatasetShardsFactor:${SW_STORAGE_BANYANDB_SUPERDATASET_SHARDS_FACTOR:2}concurrentWriteThreads:${SW_STORAGE_BANYANDB_CONCURRENT_WRITE_THREADS:15}profileTaskQueryMaxSize:${SW_STORAGE_BANYANDB_PROFILE_TASK_QUERY_MAX_SIZE:200}# the max number of fetch task in a requeststreamBlockInterval:${SW_STORAGE_BANYANDB_STREAM_BLOCK_INTERVAL:4}# Unit is hourstreamSegmentInterval:${SW_STORAGE_BANYANDB_STREAM_SEGMENT_INTERVAL:24}# Unit is hourmeasureBlockInterval:${SW_STORAGE_BANYANDB_MEASURE_BLOCK_INTERVAL:4}# Unit is hourmeasureSegmentInterval:${SW_STORAGE_BANYANDB_MEASURE_SEGMENT_INTERVAL:24}# Unit is hourFor more details, please refer to the documents of BanyanDB and BanyanDB Java Client subprojects.\n","title":"BanyanDB","url":"/docs/main/v9.7.0/en/setup/backend/storages/banyandb/"},{"content":"BanyanDB Clustering BanyanDB Clustering introduces a robust and scalable architecture that comprises \u0026ldquo;Liaison Nodes\u0026rdquo;, \u0026ldquo;Data Nodes\u0026rdquo;, and \u0026ldquo;Meta Nodes\u0026rdquo;. This structure allows for effectively distributing and managing time-series data within the system.\n1. Architectural Overview A BanyanDB installation includes three distinct types of nodes: Data Nodes, Meta Nodes, and Liaison Nodes.\n1.1 Data Nodes Data Nodes hold all the raw time series data, metadata, and indexed data. They handle the storage and management of data, including streams and measures, tag keys and values, as well as field keys and values.\nData Nodes also handle the local query execution. When a query is made, it is directed to a Liaison, which then interacts with Data Nodes to execute the distributed query and return results.\nIn addition to persistent raw data, Data Nodes also handle TopN aggregation calculation or other computational tasks.\n1.2 Meta Nodes Meta Nodes is implemented by etcd. They are responsible for maintaining high-level metadata of the cluster, which includes:\n All nodes in the cluster All database schemas  1.3 Liaison Nodes Liaison Nodes serve as gateways, routing traffic to Data Nodes. In addition to routing, they also provide authentication, TTL, and other security services to ensure secure and effective communication without the cluster.\nLiaison Nodes are also responsible for handling computational tasks associated with distributed querying the database. They build query tasks and search for data from Data Nodes.\n1.4 Standalone Mode BanyanDB integrates multiple roles into a single process in the standalone mode, making it simpler and faster to deploy. This mode is especially useful for scenarios with a limited number of data points or for testing and development purposes.\nIn this mode, the single process performs the roles of the Liaison Node, Data Node, and Meta Node. It receives requests, maintains metadata, processes queries, and handles data, all within a unified setup.\n2. Communication within a Cluster All nodes within a BanyanDB cluster communicate with other nodes according to their roles:\n Meta Nodes share high-level metadata about the cluster. Data Nodes store and manage the raw time series data and communicate with Meta Nodes. Liaison Nodes distribute incoming data to the appropriate Data Nodes. They also handle distributed query execution and communicate with Meta Nodes.  Nodes Discovery All nodes in the cluster are discovered by the Meta Nodes. When a node starts up, it registers itself with the Meta Nodes. The Meta Nodes then share this information with the Liaison Nodes which use it to route requests to the appropriate nodes.\n3. Data Organization Different nodes in BanyanDB are responsible for different parts of the database, while Query and Liaison Nodes manage the routing and processing of queries.\n3.1 Meta Nodes Meta Nodes store all high-level metadata that describes the cluster. This data is kept in an etcd-backed database on disk, including information about the shard allocation of each Data Node. This information is used by the Liaison Nodes to route data to the appropriate Data Nodes, based on the sharding key of the data.\nBy storing shard allocation information, Meta Nodes help ensure that data is routed efficiently and accurately across the cluster. This information is constantly updated as the cluster changes, allowing for dynamic allocation of resources and efficient use of available capacity.\n3.2 Data Nodes Data Nodes store all raw time series data, metadata, and indexed data. On disk, the data is organized by \u0026lt;group\u0026gt;/shard-\u0026lt;shard_id\u0026gt;/\u0026lt;segment_id\u0026gt;/. The segment is designed to support retention policy.\n3.3 Liaison Nodes Liaison Nodes do not store data but manage the routing of incoming requests to the appropriate Query or Data Nodes. They also provide authentication, TTL, and other security services.\nThey also handle the computational tasks associated with data queries, interacting directly with Data Nodes to execute queries and return results.\n4. Determining Optimal Node Counts When creating a BanyanDB cluster, choosing the appropriate number of each node type to configure and connect is crucial. The number of Meta Nodes should always be odd, for instance, “3”. The number of Data Nodes scales based on your storage and query needs. The number of Liaison Nodes depends on the expected query load and routing complexity.\nIf the write and read load is from different sources, it is recommended to separate the Liaison Nodes for write and read. For instance, if the write load is from metrics, trace or log collectors and the read load is from a web application, it is recommended to separate the Liaison Nodes for write and read.\nThis separation allows for more efficient routing of requests and better performance. It also allows for scaling out of the cluster based on the specific needs of each type of request. For instance, if the write load is high, you can scale out the write Liaison Nodes to handle the increased load.\nThe BanyanDB architecture allows for efficient clustering, scaling, and high availability, making it a robust choice for time series data management.\n5. Writes in a Cluster In BanyanDB, writing data in a cluster is designed to take advantage of the robust capabilities of underlying storage systems, such as Google Compute Persistent Disk or Amazon S3(TBD). These platforms ensure high levels of data durability, making them an optimal choice for storing raw time series data.\n5.1 Data Replication Unlike some other systems, BanyanDB does not support application-level replication, which can consume significant disk space. Instead, it delegates the task of replication to these underlying storage systems. This approach simplifies the BanyanDB architecture and reduces the complexity of managing replication at the application level. This approach also results in significant data savings.\nThe comparison between using a storage system and application-level replication boils down to several key factors: reliability, scalability, and complexity.\nReliability: A storage system provides built-in data durability by automatically storing data across multiple systems. It\u0026rsquo;s designed to deliver 99.999999999% durability, ensuring data is reliably stored and available when needed. While replication can increase data availability, it\u0026rsquo;s dependent on the application\u0026rsquo;s implementation. Any bugs or issues in the replication logic can lead to data loss or inconsistencies.\nScalability: A storage system is highly scalable by design and can store and retrieve any amount of data from anywhere. As your data grows, the system grows with you. You don\u0026rsquo;t need to worry about outgrowing your storage capacity. Scaling application-level replication can be challenging. As data grows, so does the need for more disk space and compute resources, potentially leading to increased costs and management complexity.\nComplexity: With the storage system handling replication, the complexity is abstracted away from the user. The user need not concern themselves with the details of how replication is handled. Managing replication at the application level can be complex. It requires careful configuration, monitoring, and potentially significant engineering effort to maintain.\nFuthermore, the storage system might be cheaper. For instance, S3 can be more cost-effective because it eliminates the need for additional resources required for application-level replication. Application-level replication also requires ongoing maintenance, potentially increasing operational costs.\n5.2 Data Sharding Data distribution across the cluster is determined based on the shard_num setting for a group and the specified entity in each resource, be it a stream or measure. The resource’s name with its entity is the sharding key, guiding data distribution to the appropriate Data Node during write operations.\nLiaison Nodes retrieve shard mapping information from Meta Nodes to achieve efficient data routing. This information is used to route data to the appropriate Data Nodes based on the sharding key of the data.\nThis sharding strategy ensures the write load is evenly distributed across the cluster, enhancing write performance and overall system efficiency. BanyanDB uses a hash algorithm for sharding. The hash function maps the sharding key (resource name and entity) to a node in the cluster. Each shard is assigned to the node returned by the hash function.\n5.3 Data Write Path Here\u0026rsquo;s a text-based diagram illustrating the data write path in BanyanDB:\nUser | | API Request (Write) | v ------------------------------------ | Liaison Node | \u0026lt;--- Stateless Node, Routes Request | (Identifies relevant Data Nodes | | and dispatches write request) | ------------------------------------ | v ----------------- ----------------- ----------------- | Data Node 1 | | Data Node 2 | | Data Node 3 | | (Shard 1) | | (Shard 2) | | (Shard 3) | ----------------- ----------------- -----------------  A user makes an API request to the Liaison Node. This request is a write request, containing the data to be written to the database. The Liaison Node, which is stateless, identifies the relevant Data Nodes that will store the data based on the entity specified in the request. The write request is executed across the identified Data Nodes. Each Data Node writes the data to its shard.  This architecture allows BanyanDB to execute write requests efficiently across a distributed system, leveraging the stateless nature and routing/writing capabilities of the Liaison Node, and the distributed storage of Data Nodes.\n6. Queries in a Cluster BanyanDB utilizes a distributed architecture that allows for efficient query processing. When a query is made, it is directed to a Liaison Node.\n6.1 Query Routing Liaison Nodes do not use shard mapping information from Meta Nodes to execute distributed queries. Instead, they access all Data Nodes to retrieve the necessary data for queries. As the query load is lower, it is practical for liaison nodes to access all data nodes for this purpose. It may increase network traffic, but simplifies scaling out of the cluster.\nCompared to the write load, the query load is relatively low. For instance, in a time series database, the write load is typically 100x higher than the query load. This is because the write load is driven by the number of devices sending data to the database, while the query load is driven by the number of users accessing the data.\nThis strategy enables scaling out of the cluster. When the cluster scales out, the liaison node can access all data nodes without any mapping info changes. It eliminates the need to backup previous shard mapping information, reducing complexity of scaling out.\n6.2 Query Execution Parallel execution significantly enhances the efficiency of data retrieval and reduces the overall query processing time. It allows for faster response times as the workload of the query is shared across multiple shards, each working on their part of the problem simultaneously. This feature makes BanyanDB particularly effective for large-scale data analysis tasks.\nIn summary, BanyanDB\u0026rsquo;s approach to querying leverages its unique distributed architecture, enabling high-performance data retrieval across multiple shards in parallel.\n6.3 Query Path User | | API Request (Query) | v ------------------------------------ | Liaison Node | \u0026lt;--- Stateless Node, Distributes Query | (Access all Data nodes to | | execute distributed queries) | ------------------------------------ | | | v v v ----------------- ----------------- ----------------- | Data Node 1 | | Data Node 2 | | Data Node 3 | | (Shard 1) | | (Shard 2) | | (Shard 3) | ----------------- ----------------- -----------------  A user makes an API request to the Liaison Node. This request may be a query for specific data. The Liaison Node builds a distributed query to select all data nodes. The query is executed in parallel across all Data Nodes. Each Data Node execute a local query plan to process the data stored in its shard concurrently with the others. The results from each shard are then returned to the Liaison Node, which consolidates them into a single response to the user.  This architecture allows BanyanDB to execute queries efficiently across a distributed system, leveraging the distributed query capabilities of the Liaison Node and the parallel processing of Data Nodes.\n","title":"BanyanDB Clustering","url":"/docs/skywalking-banyandb/latest/concept/clustering/"},{"content":"BanyanDB Clustering BanyanDB Clustering introduces a robust and scalable architecture that comprises \u0026ldquo;Liaison Nodes\u0026rdquo;, \u0026ldquo;Data Nodes\u0026rdquo;, and \u0026ldquo;Meta Nodes\u0026rdquo;. This structure allows for effectively distributing and managing time-series data within the system.\n1. Architectural Overview A BanyanDB installation includes three distinct types of nodes: Data Nodes, Meta Nodes, and Liaison Nodes.\n1.1 Data Nodes Data Nodes hold all the raw time series data, metadata, and indexed data. They handle the storage and management of data, including streams and measures, tag keys and values, as well as field keys and values.\nData Nodes also handle the local query execution. When a query is made, it is directed to a Liaison, which then interacts with Data Nodes to execute the distributed query and return results.\nIn addition to persistent raw data, Data Nodes also handle TopN aggregation calculation or other computational tasks.\n1.2 Meta Nodes Meta Nodes is implemented by etcd. They are responsible for maintaining high-level metadata of the cluster, which includes:\n All nodes in the cluster All database schemas  1.3 Liaison Nodes Liaison Nodes serve as gateways, routing traffic to Data Nodes. In addition to routing, they also provide authentication, TTL, and other security services to ensure secure and effective communication without the cluster.\nLiaison Nodes are also responsible for handling computational tasks associated with distributed querying the database. They build query tasks and search for data from Data Nodes.\n1.4 Standalone Mode BanyanDB integrates multiple roles into a single process in the standalone mode, making it simpler and faster to deploy. This mode is especially useful for scenarios with a limited number of data points or for testing and development purposes.\nIn this mode, the single process performs the roles of the Liaison Node, Data Node, and Meta Node. It receives requests, maintains metadata, processes queries, and handles data, all within a unified setup.\n2. Communication within a Cluster All nodes within a BanyanDB cluster communicate with other nodes according to their roles:\n Meta Nodes share high-level metadata about the cluster. Data Nodes store and manage the raw time series data and communicate with Meta Nodes. Liaison Nodes distribute incoming data to the appropriate Data Nodes. They also handle distributed query execution and communicate with Meta Nodes.  Nodes Discovery All nodes in the cluster are discovered by the Meta Nodes. When a node starts up, it registers itself with the Meta Nodes. The Meta Nodes then share this information with the Liaison Nodes which use it to route requests to the appropriate nodes.\nIf data nodes are unable to connect to the meta nodes due to network partition or other issues, they will be removed from the meta nodes. However, the liaison nodes will not remove the data nodes from their routing list until the data nodes are also unreachable from the liaison nodes' perspective. This approach ensures that the system can continue to function even if some data nodes are temporarily unavailable from the meta nodes.\n3. Data Organization Different nodes in BanyanDB are responsible for different parts of the database, while Query and Liaison Nodes manage the routing and processing of queries.\n3.1 Meta Nodes Meta Nodes store all high-level metadata that describes the cluster. This data is kept in an etcd-backed database on disk, including information about the shard allocation of each Data Node. This information is used by the Liaison Nodes to route data to the appropriate Data Nodes, based on the sharding key of the data.\nBy storing shard allocation information, Meta Nodes help ensure that data is routed efficiently and accurately across the cluster. This information is constantly updated as the cluster changes, allowing for dynamic allocation of resources and efficient use of available capacity.\n3.2 Data Nodes Data Nodes store all raw time series data, metadata, and indexed data. On disk, the data is organized by \u0026lt;group\u0026gt;/shard-\u0026lt;shard_id\u0026gt;/\u0026lt;segment_id\u0026gt;/. The segment is designed to support retention policy.\n3.3 Liaison Nodes Liaison Nodes do not store data but manage the routing of incoming requests to the appropriate Query or Data Nodes. They also provide authentication, TTL, and other security services.\nThey also handle the computational tasks associated with data queries, interacting directly with Data Nodes to execute queries and return results.\n4. Determining Optimal Node Counts When creating a BanyanDB cluster, choosing the appropriate number of each node type to configure and connect is crucial. The number of Meta Nodes should always be odd, for instance, “3”. The number of Data Nodes scales based on your storage and query needs. The number of Liaison Nodes depends on the expected query load and routing complexity.\nIf the write and read load is from different sources, it is recommended to separate the Liaison Nodes for write and read. For instance, if the write load is from metrics, trace or log collectors and the read load is from a web application, it is recommended to separate the Liaison Nodes for write and read.\nThis separation allows for more efficient routing of requests and better performance. It also allows for scaling out of the cluster based on the specific needs of each type of request. For instance, if the write load is high, you can scale out the write Liaison Nodes to handle the increased load.\nThe BanyanDB architecture allows for efficient clustering, scaling, and high availability, making it a robust choice for time series data management.\n5. Writes in a Cluster In BanyanDB, writing data in a cluster is designed to take advantage of the robust capabilities of underlying storage systems, such as Google Compute Persistent Disk or Amazon S3(TBD). These platforms ensure high levels of data durability, making them an optimal choice for storing raw time series data.\n5.1 Data Replication Unlike some other systems, BanyanDB does not support application-level replication, which can consume significant disk space. Instead, it delegates the task of replication to these underlying storage systems. This approach simplifies the BanyanDB architecture and reduces the complexity of managing replication at the application level. This approach also results in significant data savings.\nThe comparison between using a storage system and application-level replication boils down to several key factors: reliability, scalability, and complexity.\nReliability: A storage system provides built-in data durability by automatically storing data across multiple systems. It\u0026rsquo;s designed to deliver 99.999999999% durability, ensuring data is reliably stored and available when needed. While replication can increase data availability, it\u0026rsquo;s dependent on the application\u0026rsquo;s implementation. Any bugs or issues in the replication logic can lead to data loss or inconsistencies.\nScalability: A storage system is highly scalable by design and can store and retrieve any amount of data from anywhere. As your data grows, the system grows with you. You don\u0026rsquo;t need to worry about outgrowing your storage capacity. Scaling application-level replication can be challenging. As data grows, so does the need for more disk space and compute resources, potentially leading to increased costs and management complexity.\nComplexity: With the storage system handling replication, the complexity is abstracted away from the user. The user need not concern themselves with the details of how replication is handled. Managing replication at the application level can be complex. It requires careful configuration, monitoring, and potentially significant engineering effort to maintain.\nFuthermore, the storage system might be cheaper. For instance, S3 can be more cost-effective because it eliminates the need for additional resources required for application-level replication. Application-level replication also requires ongoing maintenance, potentially increasing operational costs.\n5.2 Data Sharding Data distribution across the cluster is determined based on the shard_num setting for a group and the specified entity in each resource, be it a stream or measure. The resource’s name with its entity is the sharding key, guiding data distribution to the appropriate Data Node during write operations.\nLiaison Nodes retrieve shard mapping information from Meta Nodes to achieve efficient data routing. This information is used to route data to the appropriate Data Nodes based on the sharding key of the data.\nThis sharding strategy ensures the write load is evenly distributed across the cluster, enhancing write performance and overall system efficiency. BanyanDB uses a hash algorithm for sharding. The hash function maps the sharding key (resource name and entity) to a node in the cluster. Each shard is assigned to the node returned by the hash function.\n5.3 Data Write Path Here\u0026rsquo;s a text-based diagram illustrating the data write path in BanyanDB:\nUser | | API Request (Write) | v ------------------------------------ | Liaison Node | \u0026lt;--- Stateless Node, Routes Request | (Identifies relevant Data Nodes | | and dispatches write request) | ------------------------------------ | v ----------------- ----------------- ----------------- | Data Node 1 | | Data Node 2 | | Data Node 3 | | (Shard 1) | | (Shard 2) | | (Shard 3) | ----------------- ----------------- -----------------  A user makes an API request to the Liaison Node. This request is a write request, containing the data to be written to the database. The Liaison Node, which is stateless, identifies the relevant Data Nodes that will store the data based on the entity specified in the request. The write request is executed across the identified Data Nodes. Each Data Node writes the data to its shard.  This architecture allows BanyanDB to execute write requests efficiently across a distributed system, leveraging the stateless nature and routing/writing capabilities of the Liaison Node, and the distributed storage of Data Nodes.\n6. Queries in a Cluster BanyanDB utilizes a distributed architecture that allows for efficient query processing. When a query is made, it is directed to a Liaison Node.\n6.1 Query Routing Liaison Nodes do not use shard mapping information from Meta Nodes to execute distributed queries. Instead, they access all Data Nodes to retrieve the necessary data for queries. As the query load is lower, it is practical for liaison nodes to access all data nodes for this purpose. It may increase network traffic, but simplifies scaling out of the cluster.\nCompared to the write load, the query load is relatively low. For instance, in a time series database, the write load is typically 100x higher than the query load. This is because the write load is driven by the number of devices sending data to the database, while the query load is driven by the number of users accessing the data.\nThis strategy enables scaling out of the cluster. When the cluster scales out, the liaison node can access all data nodes without any mapping info changes. It eliminates the need to backup previous shard mapping information, reducing complexity of scaling out.\n6.2 Query Execution Parallel execution significantly enhances the efficiency of data retrieval and reduces the overall query processing time. It allows for faster response times as the workload of the query is shared across multiple shards, each working on their part of the problem simultaneously. This feature makes BanyanDB particularly effective for large-scale data analysis tasks.\nIn summary, BanyanDB\u0026rsquo;s approach to querying leverages its unique distributed architecture, enabling high-performance data retrieval across multiple shards in parallel.\n6.3 Query Path User | | API Request (Query) | v ------------------------------------ | Liaison Node | \u0026lt;--- Stateless Node, Distributes Query | (Access all Data nodes to | | execute distributed queries) | ------------------------------------ | | | v v v ----------------- ----------------- ----------------- | Data Node 1 | | Data Node 2 | | Data Node 3 | | (Shard 1) | | (Shard 2) | | (Shard 3) | ----------------- ----------------- -----------------  A user makes an API request to the Liaison Node. This request may be a query for specific data. The Liaison Node builds a distributed query to select all data nodes. The query is executed in parallel across all Data Nodes. Each Data Node execute a local query plan to process the data stored in its shard concurrently with the others. The results from each shard are then returned to the Liaison Node, which consolidates them into a single response to the user.  This architecture allows BanyanDB to execute queries efficiently across a distributed system, leveraging the distributed query capabilities of the Liaison Node and the parallel processing of Data Nodes.\n7. Failover BanyanDB is designed to be highly available and fault-tolerant.\nIn case of a Data Node failure, the system can automatically recover and continue to operate.\nLiaison nodes have a built-in mechanism to detect the failure of a Data Node. When a Data Node fails, the Liaison Node will automatically route requests to other available Data Nodes with the same shard. This ensures that the system remains operational even in the face of node failures. Thanks to the query mode, which allows Liaison Nodes to access all Data Nodes, the system can continue to function even if some Data Nodes are unavailable. When the failed data nodes are restored, the system won\u0026rsquo;t reply data to them since the data is still retrieved from other nodes.\nIn the case of a Liaison Node failure, the system can be configured to have multiple Liaison Nodes for redundancy. If one Liaison Node fails, the other Liaison Nodes can take over its responsibilities, ensuring that the system remains available.\n Please note that any written request which triggers the failover process will be rejected, and the client should re-send the request.\n ","title":"BanyanDB Clustering","url":"/docs/skywalking-banyandb/next/concept/clustering/"},{"content":"BanyanDB Clustering BanyanDB Clustering introduces a robust and scalable architecture that comprises \u0026ldquo;Liaison Nodes\u0026rdquo;, \u0026ldquo;Data Nodes\u0026rdquo;, and \u0026ldquo;Meta Nodes\u0026rdquo;. This structure allows for effectively distributing and managing time-series data within the system.\n1. Architectural Overview A BanyanDB installation includes three distinct types of nodes: Data Nodes, Meta Nodes, and Liaison Nodes.\n1.1 Data Nodes Data Nodes hold all the raw time series data, metadata, and indexed data. They handle the storage and management of data, including streams and measures, tag keys and values, as well as field keys and values.\nData Nodes also handle the local query execution. When a query is made, it is directed to a Liaison, which then interacts with Data Nodes to execute the distributed query and return results.\nIn addition to persistent raw data, Data Nodes also handle TopN aggregation calculation or other computational tasks.\n1.2 Meta Nodes Meta Nodes is implemented by etcd. They are responsible for maintaining high-level metadata of the cluster, which includes:\n All nodes in the cluster All database schemas  1.3 Liaison Nodes Liaison Nodes serve as gateways, routing traffic to Data Nodes. In addition to routing, they also provide authentication, TTL, and other security services to ensure secure and effective communication without the cluster.\nLiaison Nodes are also responsible for handling computational tasks associated with distributed querying the database. They build query tasks and search for data from Data Nodes.\n1.4 Standalone Mode BanyanDB integrates multiple roles into a single process in the standalone mode, making it simpler and faster to deploy. This mode is especially useful for scenarios with a limited number of data points or for testing and development purposes.\nIn this mode, the single process performs the roles of the Liaison Node, Data Node, and Meta Node. It receives requests, maintains metadata, processes queries, and handles data, all within a unified setup.\n2. Communication within a Cluster All nodes within a BanyanDB cluster communicate with other nodes according to their roles:\n Meta Nodes share high-level metadata about the cluster. Data Nodes store and manage the raw time series data and communicate with Meta Nodes. Liaison Nodes distribute incoming data to the appropriate Data Nodes. They also handle distributed query execution and communicate with Meta Nodes.  Nodes Discovery All nodes in the cluster are discovered by the Meta Nodes. When a node starts up, it registers itself with the Meta Nodes. The Meta Nodes then share this information with the Liaison Nodes which use it to route requests to the appropriate nodes.\n3. Data Organization Different nodes in BanyanDB are responsible for different parts of the database, while Query and Liaison Nodes manage the routing and processing of queries.\n3.1 Meta Nodes Meta Nodes store all high-level metadata that describes the cluster. This data is kept in an etcd-backed database on disk, including information about the shard allocation of each Data Node. This information is used by the Liaison Nodes to route data to the appropriate Data Nodes, based on the sharding key of the data.\nBy storing shard allocation information, Meta Nodes help ensure that data is routed efficiently and accurately across the cluster. This information is constantly updated as the cluster changes, allowing for dynamic allocation of resources and efficient use of available capacity.\n3.2 Data Nodes Data Nodes store all raw time series data, metadata, and indexed data. On disk, the data is organized by \u0026lt;group\u0026gt;/shard-\u0026lt;shard_id\u0026gt;/\u0026lt;segment_id\u0026gt;/. The segment is designed to support retention policy.\n3.3 Liaison Nodes Liaison Nodes do not store data but manage the routing of incoming requests to the appropriate Query or Data Nodes. They also provide authentication, TTL, and other security services.\nThey also handle the computational tasks associated with data queries, interacting directly with Data Nodes to execute queries and return results.\n4. Determining Optimal Node Counts When creating a BanyanDB cluster, choosing the appropriate number of each node type to configure and connect is crucial. The number of Meta Nodes should always be odd, for instance, “3”. The number of Data Nodes scales based on your storage and query needs. The number of Liaison Nodes depends on the expected query load and routing complexity.\nIf the write and read load is from different sources, it is recommended to separate the Liaison Nodes for write and read. For instance, if the write load is from metrics, trace or log collectors and the read load is from a web application, it is recommended to separate the Liaison Nodes for write and read.\nThis separation allows for more efficient routing of requests and better performance. It also allows for scaling out of the cluster based on the specific needs of each type of request. For instance, if the write load is high, you can scale out the write Liaison Nodes to handle the increased load.\nThe BanyanDB architecture allows for efficient clustering, scaling, and high availability, making it a robust choice for time series data management.\n5. Writes in a Cluster In BanyanDB, writing data in a cluster is designed to take advantage of the robust capabilities of underlying storage systems, such as Google Compute Persistent Disk or Amazon S3(TBD). These platforms ensure high levels of data durability, making them an optimal choice for storing raw time series data.\n5.1 Data Replication Unlike some other systems, BanyanDB does not support application-level replication, which can consume significant disk space. Instead, it delegates the task of replication to these underlying storage systems. This approach simplifies the BanyanDB architecture and reduces the complexity of managing replication at the application level. This approach also results in significant data savings.\nThe comparison between using a storage system and application-level replication boils down to several key factors: reliability, scalability, and complexity.\nReliability: A storage system provides built-in data durability by automatically storing data across multiple systems. It\u0026rsquo;s designed to deliver 99.999999999% durability, ensuring data is reliably stored and available when needed. While replication can increase data availability, it\u0026rsquo;s dependent on the application\u0026rsquo;s implementation. Any bugs or issues in the replication logic can lead to data loss or inconsistencies.\nScalability: A storage system is highly scalable by design and can store and retrieve any amount of data from anywhere. As your data grows, the system grows with you. You don\u0026rsquo;t need to worry about outgrowing your storage capacity. Scaling application-level replication can be challenging. As data grows, so does the need for more disk space and compute resources, potentially leading to increased costs and management complexity.\nComplexity: With the storage system handling replication, the complexity is abstracted away from the user. The user need not concern themselves with the details of how replication is handled. Managing replication at the application level can be complex. It requires careful configuration, monitoring, and potentially significant engineering effort to maintain.\nFuthermore, the storage system might be cheaper. For instance, S3 can be more cost-effective because it eliminates the need for additional resources required for application-level replication. Application-level replication also requires ongoing maintenance, potentially increasing operational costs.\n5.2 Data Sharding Data distribution across the cluster is determined based on the shard_num setting for a group and the specified entity in each resource, be it a stream or measure. The resource’s name with its entity is the sharding key, guiding data distribution to the appropriate Data Node during write operations.\nLiaison Nodes retrieve shard mapping information from Meta Nodes to achieve efficient data routing. This information is used to route data to the appropriate Data Nodes based on the sharding key of the data.\nThis sharding strategy ensures the write load is evenly distributed across the cluster, enhancing write performance and overall system efficiency. BanyanDB uses a hash algorithm for sharding. The hash function maps the sharding key (resource name and entity) to a node in the cluster. Each shard is assigned to the node returned by the hash function.\n5.3 Data Write Path Here\u0026rsquo;s a text-based diagram illustrating the data write path in BanyanDB:\nUser | | API Request (Write) | v ------------------------------------ | Liaison Node | \u0026lt;--- Stateless Node, Routes Request | (Identifies relevant Data Nodes | | and dispatches write request) | ------------------------------------ | v ----------------- ----------------- ----------------- | Data Node 1 | | Data Node 2 | | Data Node 3 | | (Shard 1) | | (Shard 2) | | (Shard 3) | ----------------- ----------------- -----------------  A user makes an API request to the Liaison Node. This request is a write request, containing the data to be written to the database. The Liaison Node, which is stateless, identifies the relevant Data Nodes that will store the data based on the entity specified in the request. The write request is executed across the identified Data Nodes. Each Data Node writes the data to its shard.  This architecture allows BanyanDB to execute write requests efficiently across a distributed system, leveraging the stateless nature and routing/writing capabilities of the Liaison Node, and the distributed storage of Data Nodes.\n6. Queries in a Cluster BanyanDB utilizes a distributed architecture that allows for efficient query processing. When a query is made, it is directed to a Liaison Node.\n6.1 Query Routing Liaison Nodes do not use shard mapping information from Meta Nodes to execute distributed queries. Instead, they access all Data Nodes to retrieve the necessary data for queries. As the query load is lower, it is practical for liaison nodes to access all data nodes for this purpose. It may increase network traffic, but simplifies scaling out of the cluster.\nCompared to the write load, the query load is relatively low. For instance, in a time series database, the write load is typically 100x higher than the query load. This is because the write load is driven by the number of devices sending data to the database, while the query load is driven by the number of users accessing the data.\nThis strategy enables scaling out of the cluster. When the cluster scales out, the liaison node can access all data nodes without any mapping info changes. It eliminates the need to backup previous shard mapping information, reducing complexity of scaling out.\n6.2 Query Execution Parallel execution significantly enhances the efficiency of data retrieval and reduces the overall query processing time. It allows for faster response times as the workload of the query is shared across multiple shards, each working on their part of the problem simultaneously. This feature makes BanyanDB particularly effective for large-scale data analysis tasks.\nIn summary, BanyanDB\u0026rsquo;s approach to querying leverages its unique distributed architecture, enabling high-performance data retrieval across multiple shards in parallel.\n6.3 Query Path User | | API Request (Query) | v ------------------------------------ | Liaison Node | \u0026lt;--- Stateless Node, Distributes Query | (Access all Data nodes to | | execute distributed queries) | ------------------------------------ | | | v v v ----------------- ----------------- ----------------- | Data Node 1 | | Data Node 2 | | Data Node 3 | | (Shard 1) | | (Shard 2) | | (Shard 3) | ----------------- ----------------- -----------------  A user makes an API request to the Liaison Node. This request may be a query for specific data. The Liaison Node builds a distributed query to select all data nodes. The query is executed in parallel across all Data Nodes. Each Data Node execute a local query plan to process the data stored in its shard concurrently with the others. The results from each shard are then returned to the Liaison Node, which consolidates them into a single response to the user.  This architecture allows BanyanDB to execute queries efficiently across a distributed system, leveraging the distributed query capabilities of the Liaison Node and the parallel processing of Data Nodes.\n","title":"BanyanDB Clustering","url":"/docs/skywalking-banyandb/v0.5.0/concept/clustering/"},{"content":"BookKeeper monitoring SkyWalking leverages OpenTelemetry Collector to collect metrics data from the BookKeeper and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. Kafka entity as a Service in OAP and on the `Layer: BOOKKEEPER.\nData flow  BookKeeper exposes metrics through Prometheus endpoint. OpenTelemetry Collector fetches metrics from BookKeeper cluster via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.`  Setup  Set up BookKeeper Cluster. Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  BookKeeper Monitoring Bookkeeper monitoring provides multidimensional metrics monitoring of BookKeeper cluster as Layer: BOOKKEEPER Service in the OAP. In each cluster, the nodes are represented as Instance.\nBookKeeper Cluster Supported Metrics    Monitoring Panel Metric Name Description Data Source     Bookie Ledgers Count meter_bookkeeper_bookie_ledgers_count The number of the bookie ledgers. Bookkeeper Cluster   Bookie Ledger Writable Dirs meter_bookkeeper_bookie_ledger_writable_dirs The number of writable directories in the bookie. Bookkeeper Cluster   Bookie Ledger Dir Usage meter_bookkeeper_bookie_ledger_dir_data_bookkeeper_ledgers_usage The number of successfully created connections. Bookkeeper Cluster   Bookie Entries Count meter_bookkeeper_bookie_entries_count The number of the bookie write entries. Bookkeeper Cluster   Bookie Write Cache Size meter_bookkeeper_bookie_write_cache_size The size of the bookie write cache (MB). Bookkeeper Cluster   Bookie Write Cache Entry Count meter_bookkeeper_bookie_write_cache_count The entry count in the bookie write cache. Bookkeeper Cluster   Bookie Read Cache Size meter_bookkeeper_bookie_read_cache_size The size of the bookie read cache (MB). Bookkeeper Cluster   Bookie Read Cache Entry Count meter_bookkeeper_bookie_read_cache_count The entry count in the bookie read cache. Bookkeeper Cluster   Bookie Read Rate meter_bookkeeper_bookie_read_rate The bookie read rate (bytes/s). Bookkeeper Cluster   Bookie Write Rate meter_bookkeeper_bookie_write_rate The bookie write rate (bytes/s). Bookkeeper Cluster    BookKeeper Node Supported Metrics    Monitoring Panel Metric Name Description Data Source     JVM Memory Pool Used meter_bookkeeper_node_jvm_memory_pool_used The usage of the broker jvm memory pool. Bookkeeper Bookie   JVM Memory meter_bookkeeper_node_jvm_memory_used meter_bookkeeper_node_jvm_memory_committed meter_bookkeeper_node_jvm_memory_init The usage of the broker jvm memory. Bookkeeper Bookie   JVM Threads meter_bookkeeper_node_jvm_threads_current meter_bookkeeper_node_jvm_threads_daemon meter_bookkeeper_node_jvm_threads_peak meter_bookkeeper_node_jvm_threads_deadlocked The count of the jvm threads. Bookkeeper Bookie   GC Time meter_bookkeeper_node_jvm_gc_collection_seconds_sum Time spent in a given JVM garbage collector in seconds. Bookkeeper Bookie   GC Count meter_bookkeeper_node_jvm_gc_collection_seconds_count The count of a given JVM garbage. Bookkeeper Bookie   Thread Executor Completed meter_bookkeeper_node_thread_executor_completed The count of the executor thread. Bookkeeper Bookie   Thread Executor Tasks meter_bookkeeper_node_thread_executor_tasks_completed meter_bookkeeper_node_thread_executor_tasks_rejected meter_bookkeeper_node_thread_executor_tasks_failed The count of the executor tasks. Bookkeeper Bookie   Pooled Threads meter_bookkeeper_node_high_priority_threads meter_bookkeeper_node_read_thread_pool_threads The count of the pooled thread. Bookkeeper Bookie   Pooled Threads Max Queue Size meter_bookkeeper_node_high_priority_thread_max_queue_size meter_bookkeeper_node_read_thread_pool_max_queue_size The count of the pooled threads max queue size. Bookkeeper Bookie    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in otel-rules/bookkeeper/bookkeeper-cluster.yaml, otel-rules/bookkeeper/bookkeeper-node.yaml. The RabbitMQ dashboard panel configurations are found in /config/ui-initialized-templates/bookkeeper.\n","title":"BookKeeper monitoring","url":"/docs/main/latest/en/setup/backend/backend-bookkeeper-monitoring/"},{"content":"BookKeeper monitoring SkyWalking leverages OpenTelemetry Collector to collect metrics data from the BookKeeper and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. Kafka entity as a Service in OAP and on the `Layer: BOOKKEEPER.\nData flow  BookKeeper exposes metrics through Prometheus endpoint. OpenTelemetry Collector fetches metrics from BookKeeper cluster via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.`  Setup  Set up BookKeeper Cluster. Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  BookKeeper Monitoring Bookkeeper monitoring provides multidimensional metrics monitoring of BookKeeper cluster as Layer: BOOKKEEPER Service in the OAP. In each cluster, the nodes are represented as Instance.\nBookKeeper Cluster Supported Metrics    Monitoring Panel Metric Name Description Data Source     Bookie Ledgers Count meter_bookkeeper_bookie_ledgers_count The number of the bookie ledgers. Bookkeeper Cluster   Bookie Ledger Writable Dirs meter_bookkeeper_bookie_ledger_writable_dirs The number of writable directories in the bookie. Bookkeeper Cluster   Bookie Ledger Dir Usage meter_bookkeeper_bookie_ledger_dir_data_bookkeeper_ledgers_usage The number of successfully created connections. Bookkeeper Cluster   Bookie Entries Count meter_bookkeeper_bookie_entries_count The number of the bookie write entries. Bookkeeper Cluster   Bookie Write Cache Size meter_bookkeeper_bookie_write_cache_size The size of the bookie write cache (MB). Bookkeeper Cluster   Bookie Write Cache Entry Count meter_bookkeeper_bookie_write_cache_count The entry count in the bookie write cache. Bookkeeper Cluster   Bookie Read Cache Size meter_bookkeeper_bookie_read_cache_size The size of the bookie read cache (MB). Bookkeeper Cluster   Bookie Read Cache Entry Count meter_bookkeeper_bookie_read_cache_count The entry count in the bookie read cache. Bookkeeper Cluster   Bookie Read Rate meter_bookkeeper_bookie_read_rate The bookie read rate (bytes/s). Bookkeeper Cluster   Bookie Write Rate meter_bookkeeper_bookie_write_rate The bookie write rate (bytes/s). Bookkeeper Cluster    BookKeeper Node Supported Metrics    Monitoring Panel Metric Name Description Data Source     JVM Memory Pool Used meter_bookkeeper_node_jvm_memory_pool_used The usage of the broker jvm memory pool. Bookkeeper Bookie   JVM Memory meter_bookkeeper_node_jvm_memory_used meter_bookkeeper_node_jvm_memory_committed meter_bookkeeper_node_jvm_memory_init The usage of the broker jvm memory. Bookkeeper Bookie   JVM Threads meter_bookkeeper_node_jvm_threads_current meter_bookkeeper_node_jvm_threads_daemon meter_bookkeeper_node_jvm_threads_peak meter_bookkeeper_node_jvm_threads_deadlocked The count of the jvm threads. Bookkeeper Bookie   GC Time meter_bookkeeper_node_jvm_gc_collection_seconds_sum Time spent in a given JVM garbage collector in seconds. Bookkeeper Bookie   GC Count meter_bookkeeper_node_jvm_gc_collection_seconds_count The count of a given JVM garbage. Bookkeeper Bookie   Thread Executor Completed meter_bookkeeper_node_thread_executor_completed The count of the executor thread. Bookkeeper Bookie   Thread Executor Tasks meter_bookkeeper_node_thread_executor_tasks_completed meter_bookkeeper_node_thread_executor_tasks_rejected meter_bookkeeper_node_thread_executor_tasks_failed The count of the executor tasks. Bookkeeper Bookie   Pooled Threads meter_bookkeeper_node_high_priority_threads meter_bookkeeper_node_read_thread_pool_threads The count of the pooled thread. Bookkeeper Bookie   Pooled Threads Max Queue Size meter_bookkeeper_node_high_priority_thread_max_queue_size meter_bookkeeper_node_read_thread_pool_max_queue_size The count of the pooled threads max queue size. Bookkeeper Bookie    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in otel-rules/bookkeeper/bookkeeper-cluster.yaml, otel-rules/bookkeeper/bookkeeper-node.yaml. The Bookkeeper dashboard panel configurations are found in /config/ui-initialized-templates/bookkeeper.\n","title":"BookKeeper monitoring","url":"/docs/main/next/en/setup/backend/backend-bookkeeper-monitoring/"},{"content":"BookKeeper monitoring SkyWalking leverages OpenTelemetry Collector to collect metrics data from the BookKeeper and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. Kafka entity as a Service in OAP and on the `Layer: BOOKKEEPER.\nData flow  BookKeeper exposes metrics through Prometheus endpoint. OpenTelemetry Collector fetches metrics from BookKeeper cluster via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.`  Setup  Set up BookKeeper Cluster. Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  BookKeeper Monitoring Bookkeeper monitoring provides multidimensional metrics monitoring of BookKeeper cluster as Layer: BOOKKEEPER Service in the OAP. In each cluster, the nodes are represented as Instance.\nBookKeeper Cluster Supported Metrics    Monitoring Panel Metric Name Description Data Source     Bookie Ledgers Count meter_bookkeeper_bookie_ledgers_count The number of the bookie ledgers. Bookkeeper Cluster   Bookie Ledger Writable Dirs meter_bookkeeper_bookie_ledger_writable_dirs The number of writable directories in the bookie. Bookkeeper Cluster   Bookie Ledger Dir Usage meter_bookkeeper_bookie_ledger_dir_data_bookkeeper_ledgers_usage The number of successfully created connections. Bookkeeper Cluster   Bookie Entries Count meter_bookkeeper_bookie_entries_count The number of the bookie write entries. Bookkeeper Cluster   Bookie Write Cache Size meter_bookkeeper_bookie_write_cache_size The size of the bookie write cache (MB). Bookkeeper Cluster   Bookie Write Cache Entry Count meter_bookkeeper_bookie_write_cache_count The entry count in the bookie write cache. Bookkeeper Cluster   Bookie Read Cache Size meter_bookkeeper_bookie_read_cache_size The size of the bookie read cache (MB). Bookkeeper Cluster   Bookie Read Cache Entry Count meter_bookkeeper_bookie_read_cache_count The entry count in the bookie read cache. Bookkeeper Cluster   Bookie Read Rate meter_bookkeeper_bookie_read_rate The bookie read rate (bytes/s). Bookkeeper Cluster   Bookie Write Rate meter_bookkeeper_bookie_write_rate The bookie write rate (bytes/s). Bookkeeper Cluster    BookKeeper Node Supported Metrics    Monitoring Panel Metric Name Description Data Source     JVM Memory Pool Used meter_bookkeeper_node_jvm_memory_pool_used The usage of the broker jvm memory pool. Bookkeeper Bookie   JVM Memory meter_bookkeeper_node_jvm_memory_used meter_bookkeeper_node_jvm_memory_committed meter_bookkeeper_node_jvm_memory_init The usage of the broker jvm memory. Bookkeeper Bookie   JVM Threads meter_bookkeeper_node_jvm_threads_current meter_bookkeeper_node_jvm_threads_daemon meter_bookkeeper_node_jvm_threads_peak meter_bookkeeper_node_jvm_threads_deadlocked The count of the jvm threads. Bookkeeper Bookie   GC Time meter_bookkeeper_node_jvm_gc_collection_seconds_sum Time spent in a given JVM garbage collector in seconds. Bookkeeper Bookie   GC Count meter_bookkeeper_node_jvm_gc_collection_seconds_count The count of a given JVM garbage. Bookkeeper Bookie   Thread Executor Completed meter_bookkeeper_node_thread_executor_completed The count of the executor thread. Bookkeeper Bookie   Thread Executor Tasks meter_bookkeeper_node_thread_executor_tasks_completed meter_bookkeeper_node_thread_executor_tasks_rejected meter_bookkeeper_node_thread_executor_tasks_failed The count of the executor tasks. Bookkeeper Bookie   Pooled Threads meter_bookkeeper_node_high_priority_threads meter_bookkeeper_node_read_thread_pool_threads The count of the pooled thread. Bookkeeper Bookie   Pooled Threads Max Queue Size meter_bookkeeper_node_high_priority_thread_max_queue_size meter_bookkeeper_node_read_thread_pool_max_queue_size The count of the pooled threads max queue size. Bookkeeper Bookie    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in otel-rules/bookkeeper/bookkeeper-cluster.yaml, otel-rules/bookkeeper/bookkeeper-node.yaml. The RabbitMQ dashboard panel configurations are found in /config/ui-initialized-templates/bookkeeper.\n","title":"BookKeeper monitoring","url":"/docs/main/v9.7.0/en/setup/backend/backend-bookkeeper-monitoring/"},{"content":"Bootstrap class plugins All bootstrap plugins are optional, due to unexpected risk. Bootstrap plugins are provided in bootstrap-plugins folder. For using these plugins, you need to put the target plugin jar file into /plugins.\nNow, we have the following known bootstrap plugins.\n Plugin of JDK HttpURLConnection. Agent is compatible with JDK 1.8+ Plugin of JDK Callable and Runnable. Agent is compatible with JDK 1.8+ Plugin of JDK ThreadPoolExecutor. Agent is compatible with JDK 1.8+ Plugin of JDK ForkJoinPool. Agent is compatible with JDK 1.8+  HttpURLConnection Plugin Notice The plugin of JDK HttpURLConnection depended on sun.net.*. When using Java 9+, You should add some JVM options as follows:\n   Java version JVM option     9-15 Nothing to do. Because --illegal-access default model is permitted.   16 Add --add-exports java.base/sun.net.www=ALL-UNNAMED or --illegal-access=permit   17+ Add --add-exports java.base/sun.net.www=ALL-UNNAMED    For more information\n JEP 403: Strongly Encapsulate JDK Internals A peek into Java 17: Encapsulating the Java runtime internals  ","title":"Bootstrap class plugins","url":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/bootstrap-plugins/"},{"content":"Bootstrap class plugins All bootstrap plugins are optional, due to unexpected risk. Bootstrap plugins are provided in bootstrap-plugins folder. For using these plugins, you need to put the target plugin jar file into /plugins.\nNow, we have the following known bootstrap plugins.\n Plugin of JDK HttpURLConnection. Agent is compatible with JDK 1.8+ Plugin of JDK Callable and Runnable. Agent is compatible with JDK 1.8+ Plugin of JDK ThreadPoolExecutor. Agent is compatible with JDK 1.8+ Plugin of JDK ForkJoinPool. Agent is compatible with JDK 1.8+  HttpURLConnection Plugin Notice The plugin of JDK HttpURLConnection depended on sun.net.*. When using Java 9+, You should add some JVM options as follows:\n   Java version JVM option     9-15 Nothing to do. Because --illegal-access default model is permitted.   16 Add --add-exports java.base/sun.net.www=ALL-UNNAMED or --illegal-access=permit   17+ Add --add-exports java.base/sun.net.www=ALL-UNNAMED    For more information\n JEP 403: Strongly Encapsulate JDK Internals A peek into Java 17: Encapsulating the Java runtime internals  ","title":"Bootstrap class plugins","url":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/bootstrap-plugins/"},{"content":"Bootstrap class plugins All bootstrap plugins are optional, due to unexpected risk. Bootstrap plugins are provided in bootstrap-plugins folder. For using these plugins, you need to put the target plugin jar file into /plugins.\nNow, we have the following known bootstrap plugins.\n Plugin of JDK HttpURLConnection. Agent is compatible with JDK 1.8+ Plugin of JDK Callable and Runnable. Agent is compatible with JDK 1.8+ Plugin of JDK ThreadPoolExecutor. Agent is compatible with JDK 1.8+ Plugin of JDK ForkJoinPool. Agent is compatible with JDK 1.8+  HttpURLConnection Plugin Notice The plugin of JDK HttpURLConnection depended on sun.net.*. When using Java 9+, You should add some JVM options as follows:\n   Java version JVM option     9-15 Nothing to do. Because --illegal-access default model is permitted.   16 Add --add-exports java.base/sun.net.www=ALL-UNNAMED or --illegal-access=permit   17+ Add --add-exports java.base/sun.net.www=ALL-UNNAMED    For more information\n JEP 403: Strongly Encapsulate JDK Internals A peek into Java 17: Encapsulating the Java runtime internals  ","title":"Bootstrap class plugins","url":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/bootstrap-plugins/"},{"content":"Bootstrap class plugins All bootstrap plugins are optional, due to unexpected risk. Bootstrap plugins are provided in bootstrap-plugins folder. For using these plugins, you need to put the target plugin jar file into /plugins.\nNow, we have the following known bootstrap plugins.\n Plugin of JDK HttpURLConnection. Agent is compatible with JDK 1.8+ Plugin of JDK Callable and Runnable. Agent is compatible with JDK 1.8+ Plugin of JDK ThreadPoolExecutor. Agent is compatible with JDK 1.8+ Plugin of JDK ForkJoinPool. Agent is compatible with JDK 1.8+  HttpURLConnection Plugin Notice The plugin of JDK HttpURLConnection depended on sun.net.*. When using Java 9+, You should add some JVM options as follows:\n   Java version JVM option     9-15 Nothing to do. Because --illegal-access default model is permitted.   16 Add --add-exports java.base/sun.net.www=ALL-UNNAMED or --illegal-access=permit   17+ Add --add-exports java.base/sun.net.www=ALL-UNNAMED    For more information\n JEP 403: Strongly Encapsulate JDK Internals A peek into Java 17: Encapsulating the Java runtime internals  ","title":"Bootstrap class plugins","url":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/bootstrap-plugins/"},{"content":"Bootstrap class plugins All bootstrap plugins are optional, due to unexpected risk. Bootstrap plugins are provided in bootstrap-plugins folder. For using these plugins, you need to put the target plugin jar file into /plugins.\nNow, we have the following known bootstrap plugins.\n Plugin of JDK HttpURLConnection. Agent is compatible with JDK 1.8+ Plugin of JDK Callable and Runnable. Agent is compatible with JDK 1.8+ Plugin of JDK ThreadPoolExecutor. Agent is compatible with JDK 1.8+ Plugin of JDK ForkJoinPool. Agent is compatible with JDK 1.8+  HttpURLConnection Plugin Notice The plugin of JDK HttpURLConnection depended on sun.net.*. When using Java 9+, You should add some JVM options as follows:\n   Java version JVM option     9-15 Nothing to do. Because --illegal-access default model is permitted.   16 Add --add-exports java.base/sun.net.www=ALL-UNNAMED or --illegal-access=permit   17+ Add --add-exports java.base/sun.net.www=ALL-UNNAMED    For more information\n JEP 403: Strongly Encapsulate JDK Internals A peek into Java 17: Encapsulating the Java runtime internals  ","title":"Bootstrap class plugins","url":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/bootstrap-plugins/"},{"content":"Browser Monitoring Apache SkyWalking Client JS is a client-side JavaScript exception and tracing library.\nIt has these features:\n Provides metrics and error collection to SkyWalking backend. Lightweight. A simple JavaScript library. No browser plugin is required. Browser serves as a starting point for the entire distributed tracing system.  See Client JS official doc for more information.\nNote: Make sure receiver-browser is enabled. It is ON by default since version 8.2.0.\nreceiver-browser:selector:${SW_RECEIVER_BROWSER:default} // This means activated.default:# The sample rate precision is 1/10000. 10000 means 100% sample in default.sampleRate:${SW_RECEIVER_BROWSER_SAMPLE_RATE:10000}","title":"Browser Monitoring","url":"/docs/main/latest/en/setup/service-agent/browser-agent/"},{"content":"Browser Monitoring Apache SkyWalking Client JS is a client-side JavaScript exception and tracing library.\nIt has these features:\n Provides metrics and error collection to SkyWalking backend. Lightweight. A simple JavaScript library. No browser plugin is required. Browser serves as a starting point for the entire distributed tracing system.  See Client JS official doc for more information.\nNote: Make sure receiver-browser is enabled. It is ON by default since version 8.2.0.\nreceiver-browser:selector:${SW_RECEIVER_BROWSER:default} // This means activated.default:# The sample rate precision is 1/10000. 10000 means 100% sample in default.sampleRate:${SW_RECEIVER_BROWSER_SAMPLE_RATE:10000}","title":"Browser Monitoring","url":"/docs/main/next/en/setup/service-agent/browser-agent/"},{"content":"Browser Monitoring Apache SkyWalking Client JS is a client-side JavaScript exception and tracing library.\nIt has these features:\n Provides metrics and error collection to SkyWalking backend. Lightweight. No browser plugin required. A simple JavaScript library. Browser serves as a starting point for the entire distributed tracing system.  See Client JS official doc for more information.\nNote: Make sure receiver-browser is enabled. It is ON by default since version 8.2.0.\nreceiver-browser:selector:${SW_RECEIVER_BROWSER:default} // This means activated.default:# The sample rate precision is 1/10000. 10000 means 100% sample in default.sampleRate:${SW_RECEIVER_BROWSER_SAMPLE_RATE:10000}","title":"Browser Monitoring","url":"/docs/main/v9.0.0/en/setup/service-agent/browser-agent/"},{"content":"Browser Monitoring Apache SkyWalking Client JS is a client-side JavaScript exception and tracing library.\nIt has these features:\n Provides metrics and error collection to SkyWalking backend. Lightweight. A simple JavaScript library. No browser plugin is required. Browser serves as a starting point for the entire distributed tracing system.  See Client JS official doc for more information.\nNote: Make sure receiver-browser is enabled. It is ON by default since version 8.2.0.\nreceiver-browser:selector:${SW_RECEIVER_BROWSER:default} // This means activated.default:# The sample rate precision is 1/10000. 10000 means 100% sample in default.sampleRate:${SW_RECEIVER_BROWSER_SAMPLE_RATE:10000}","title":"Browser Monitoring","url":"/docs/main/v9.1.0/en/setup/service-agent/browser-agent/"},{"content":"Browser Monitoring Apache SkyWalking Client JS is a client-side JavaScript exception and tracing library.\nIt has these features:\n Provides metrics and error collection to SkyWalking backend. Lightweight. A simple JavaScript library. No browser plugin is required. Browser serves as a starting point for the entire distributed tracing system.  See Client JS official doc for more information.\nNote: Make sure receiver-browser is enabled. It is ON by default since version 8.2.0.\nreceiver-browser:selector:${SW_RECEIVER_BROWSER:default} // This means activated.default:# The sample rate precision is 1/10000. 10000 means 100% sample in default.sampleRate:${SW_RECEIVER_BROWSER_SAMPLE_RATE:10000}","title":"Browser Monitoring","url":"/docs/main/v9.2.0/en/setup/service-agent/browser-agent/"},{"content":"Browser Monitoring Apache SkyWalking Client JS is a client-side JavaScript exception and tracing library.\nIt has these features:\n Provides metrics and error collection to SkyWalking backend. Lightweight. A simple JavaScript library. No browser plugin is required. Browser serves as a starting point for the entire distributed tracing system.  See Client JS official doc for more information.\nNote: Make sure receiver-browser is enabled. It is ON by default since version 8.2.0.\nreceiver-browser:selector:${SW_RECEIVER_BROWSER:default} // This means activated.default:# The sample rate precision is 1/10000. 10000 means 100% sample in default.sampleRate:${SW_RECEIVER_BROWSER_SAMPLE_RATE:10000}","title":"Browser Monitoring","url":"/docs/main/v9.3.0/en/setup/service-agent/browser-agent/"},{"content":"Browser Monitoring Apache SkyWalking Client JS is a client-side JavaScript exception and tracing library.\nIt has these features:\n Provides metrics and error collection to SkyWalking backend. Lightweight. A simple JavaScript library. No browser plugin is required. Browser serves as a starting point for the entire distributed tracing system.  See Client JS official doc for more information.\nNote: Make sure receiver-browser is enabled. It is ON by default since version 8.2.0.\nreceiver-browser:selector:${SW_RECEIVER_BROWSER:default} // This means activated.default:# The sample rate precision is 1/10000. 10000 means 100% sample in default.sampleRate:${SW_RECEIVER_BROWSER_SAMPLE_RATE:10000}","title":"Browser Monitoring","url":"/docs/main/v9.4.0/en/setup/service-agent/browser-agent/"},{"content":"Browser Monitoring Apache SkyWalking Client JS is a client-side JavaScript exception and tracing library.\nIt has these features:\n Provides metrics and error collection to SkyWalking backend. Lightweight. A simple JavaScript library. No browser plugin is required. Browser serves as a starting point for the entire distributed tracing system.  See Client JS official doc for more information.\nNote: Make sure receiver-browser is enabled. It is ON by default since version 8.2.0.\nreceiver-browser:selector:${SW_RECEIVER_BROWSER:default} // This means activated.default:# The sample rate precision is 1/10000. 10000 means 100% sample in default.sampleRate:${SW_RECEIVER_BROWSER_SAMPLE_RATE:10000}","title":"Browser Monitoring","url":"/docs/main/v9.5.0/en/setup/service-agent/browser-agent/"},{"content":"Browser Monitoring Apache SkyWalking Client JS is a client-side JavaScript exception and tracing library.\nIt has these features:\n Provides metrics and error collection to SkyWalking backend. Lightweight. A simple JavaScript library. No browser plugin is required. Browser serves as a starting point for the entire distributed tracing system.  See Client JS official doc for more information.\nNote: Make sure receiver-browser is enabled. It is ON by default since version 8.2.0.\nreceiver-browser:selector:${SW_RECEIVER_BROWSER:default} // This means activated.default:# The sample rate precision is 1/10000. 10000 means 100% sample in default.sampleRate:${SW_RECEIVER_BROWSER_SAMPLE_RATE:10000}","title":"Browser Monitoring","url":"/docs/main/v9.6.0/en/setup/service-agent/browser-agent/"},{"content":"Browser Monitoring Apache SkyWalking Client JS is a client-side JavaScript exception and tracing library.\nIt has these features:\n Provides metrics and error collection to SkyWalking backend. Lightweight. A simple JavaScript library. No browser plugin is required. Browser serves as a starting point for the entire distributed tracing system.  See Client JS official doc for more information.\nNote: Make sure receiver-browser is enabled. It is ON by default since version 8.2.0.\nreceiver-browser:selector:${SW_RECEIVER_BROWSER:default} // This means activated.default:# The sample rate precision is 1/10000. 10000 means 100% sample in default.sampleRate:${SW_RECEIVER_BROWSER_SAMPLE_RATE:10000}","title":"Browser Monitoring","url":"/docs/main/v9.7.0/en/setup/service-agent/browser-agent/"},{"content":"Browser Protocol Browser protocol describes the data format between skywalking-client-js and the backend.\nOverview Browser protocol is defined and provided in gRPC format, and also implemented in HTTP 1.1\nSend performance data and error logs You can send performance data and error logs using the following services:\n BrowserPerfService#collectPerfData for performance data format. BrowserPerfService#collectErrorLogs for error log format.  For error log format, note that:\n BrowserErrorLog#uniqueId should be unique in all distributed environments.  ","title":"Browser Protocol","url":"/docs/main/latest/en/api/browser-protocol/"},{"content":"Browser Protocol Browser protocol describes the data format between skywalking-client-js and the backend.\nOverview Browser protocol is defined and provided in gRPC format, and also implemented in HTTP 1.1\nSend performance data and error logs You can send performance data and error logs using the following services:\n BrowserPerfService#collectPerfData for performance data format. BrowserPerfService#collectErrorLogs for error log format.  For error log format, note that:\n BrowserErrorLog#uniqueId should be unique in all distributed environments.  ","title":"Browser Protocol","url":"/docs/main/next/en/api/browser-protocol/"},{"content":"Browser Protocol Browser protocol describes the data format between skywalking-client-js and the backend.\nOverview Browser protocol is defined and provided in gRPC format, and also implemented in HTTP 1.1\nSend performance data and error logs You can send performance data and error logs using the following services:\n BrowserPerfService#collectPerfData for performance data format. BrowserPerfService#collectErrorLogs for error log format.  For error log format, note that:\n BrowserErrorLog#uniqueId should be unique in all distributed environments.  ","title":"Browser Protocol","url":"/docs/main/v9.0.0/en/protocols/browser-protocol/"},{"content":"Browser Protocol Browser protocol describes the data format between skywalking-client-js and the backend.\nOverview Browser protocol is defined and provided in gRPC format, and also implemented in HTTP 1.1\nSend performance data and error logs You can send performance data and error logs using the following services:\n BrowserPerfService#collectPerfData for performance data format. BrowserPerfService#collectErrorLogs for error log format.  For error log format, note that:\n BrowserErrorLog#uniqueId should be unique in all distributed environments.  ","title":"Browser Protocol","url":"/docs/main/v9.1.0/en/protocols/browser-protocol/"},{"content":"Browser Protocol Browser protocol describes the data format between skywalking-client-js and the backend.\nOverview Browser protocol is defined and provided in gRPC format, and also implemented in HTTP 1.1\nSend performance data and error logs You can send performance data and error logs using the following services:\n BrowserPerfService#collectPerfData for performance data format. BrowserPerfService#collectErrorLogs for error log format.  For error log format, note that:\n BrowserErrorLog#uniqueId should be unique in all distributed environments.  ","title":"Browser Protocol","url":"/docs/main/v9.2.0/en/protocols/browser-protocol/"},{"content":"Browser Protocol Browser protocol describes the data format between skywalking-client-js and the backend.\nOverview Browser protocol is defined and provided in gRPC format, and also implemented in HTTP 1.1\nSend performance data and error logs You can send performance data and error logs using the following services:\n BrowserPerfService#collectPerfData for performance data format. BrowserPerfService#collectErrorLogs for error log format.  For error log format, note that:\n BrowserErrorLog#uniqueId should be unique in all distributed environments.  ","title":"Browser Protocol","url":"/docs/main/v9.3.0/en/protocols/browser-protocol/"},{"content":"Browser Protocol Browser protocol describes the data format between skywalking-client-js and the backend.\nOverview Browser protocol is defined and provided in gRPC format, and also implemented in HTTP 1.1\nSend performance data and error logs You can send performance data and error logs using the following services:\n BrowserPerfService#collectPerfData for performance data format. BrowserPerfService#collectErrorLogs for error log format.  For error log format, note that:\n BrowserErrorLog#uniqueId should be unique in all distributed environments.  ","title":"Browser Protocol","url":"/docs/main/v9.4.0/en/api/browser-protocol/"},{"content":"Browser Protocol Browser protocol describes the data format between skywalking-client-js and the backend.\nOverview Browser protocol is defined and provided in gRPC format, and also implemented in HTTP 1.1\nSend performance data and error logs You can send performance data and error logs using the following services:\n BrowserPerfService#collectPerfData for performance data format. BrowserPerfService#collectErrorLogs for error log format.  For error log format, note that:\n BrowserErrorLog#uniqueId should be unique in all distributed environments.  ","title":"Browser Protocol","url":"/docs/main/v9.5.0/en/api/browser-protocol/"},{"content":"Browser Protocol Browser protocol describes the data format between skywalking-client-js and the backend.\nOverview Browser protocol is defined and provided in gRPC format, and also implemented in HTTP 1.1\nSend performance data and error logs You can send performance data and error logs using the following services:\n BrowserPerfService#collectPerfData for performance data format. BrowserPerfService#collectErrorLogs for error log format.  For error log format, note that:\n BrowserErrorLog#uniqueId should be unique in all distributed environments.  ","title":"Browser Protocol","url":"/docs/main/v9.6.0/en/api/browser-protocol/"},{"content":"Browser Protocol Browser protocol describes the data format between skywalking-client-js and the backend.\nOverview Browser protocol is defined and provided in gRPC format, and also implemented in HTTP 1.1\nSend performance data and error logs You can send performance data and error logs using the following services:\n BrowserPerfService#collectPerfData for performance data format. BrowserPerfService#collectErrorLogs for error log format.  For error log format, note that:\n BrowserErrorLog#uniqueId should be unique in all distributed environments.  ","title":"Browser Protocol","url":"/docs/main/v9.7.0/en/api/browser-protocol/"},{"content":"Build and use the Agent from source codes When you want to build and use the Agent from source code, please follow these steps.\nInstall SkyWalking Go Use go get to import the skywalking-go program.\n// latest or any commit ID go get github.com/apache/skywalking-go@latest Also, import the module to your main package:\nimport _ \u0026#34;github.com/apache/skywalking-go\u0026#34; Build the Agent When building the project, you need to clone the project and build it.\n// git clone the same version(tag or commit ID) as your dependency version. git clone https://github.com/apache/skywalking-go.git cd skywalking-go \u0026amp;\u0026amp; make build Next, you would find several versions of the Go Agent program for different systems in the bin directory of the current project. When you need to compile the program, please add the following statement with the agent program which matches your system:\n-toolexec=\u0026#34;/path/to/go-agent\u0026#34; -a  -toolexec is the path to the Golang enhancement program. -a is the parameter for rebuilding all packages forcibly.  If you want to customize the configuration information for the current service, please add the following parameters, read more please refer the settings override documentation):\n-toolexec=\u0026#34;/path/to/go-agent -config /path/to/config.yaml\u0026#34; -a ","title":"Build and use the Agent from source codes","url":"/docs/skywalking-go/latest/en/development-and-contribution/build-and-use-agent/"},{"content":"Build and use the Agent from source codes When you want to build and use the Agent from source code, please follow these steps.\nInstall SkyWalking Go Use go get to import the skywalking-go program.\n// latest or any commit ID go get github.com/apache/skywalking-go@latest Also, import the module to your main package:\nimport _ \u0026#34;github.com/apache/skywalking-go\u0026#34; Build the Agent When building the project, you need to clone the project and build it.\n// git clone the same version(tag or commit ID) as your dependency version. git clone https://github.com/apache/skywalking-go.git cd skywalking-go \u0026amp;\u0026amp; make build Next, you would find several versions of the Go Agent program for different systems in the bin directory of the current project. When you need to compile the program, please add the following statement with the agent program which matches your system:\n-toolexec=\u0026#34;/path/to/go-agent\u0026#34; -a  -toolexec is the path to the Golang enhancement program. -a is the parameter for rebuilding all packages forcibly.  If you want to customize the configuration information for the current service, please add the following parameters, read more please refer the settings override documentation):\n-toolexec=\u0026#34;/path/to/go-agent -config /path/to/config.yaml\u0026#34; -a ","title":"Build and use the Agent from source codes","url":"/docs/skywalking-go/next/en/development-and-contribution/build-and-use-agent/"},{"content":"Build and use the Agent from source codes When you want to build and use the Agent from source code, please follow these steps.\nInstall SkyWalking Go Use go get to import the skywalking-go program.\n// latest or any commit ID go get github.com/apache/skywalking-go@latest Also, import the module to your main package:\nimport _ \u0026#34;github.com/apache/skywalking-go\u0026#34; Build the Agent When building the project, you need to clone the project and build it.\n// git clone the same version(tag or commit ID) as your dependency version. git clone https://github.com/apache/skywalking-go.git cd skywalking-go \u0026amp;\u0026amp; make build Next, you would find several versions of the Go Agent program for different systems in the bin directory of the current project. When you need to compile the program, please add the following statement with the agent program which matches your system:\n-toolexec=\u0026#34;/path/to/go-agent\u0026#34; -a  -toolexec is the path to the Golang enhancement program. -a is the parameter for rebuilding all packages forcibly.  If you want to customize the configuration information for the current service, please add the following parameters, read more please refer the settings override documentation):\n-toolexec=\u0026#34;/path/to/go-agent -config /path/to/config.yaml\u0026#34; -a ","title":"Build and use the Agent from source codes","url":"/docs/skywalking-go/v0.4.0/en/development-and-contribution/build-and-use-agent/"},{"content":"Building This document will help you compile and build the project in golang environment.\nPlatform Linux, macOS, and Windows are supported in SkyWalking Infra E2E.\nCommand git clone https://github.com/apache/skywalking-infra-e2e.git cd skywalking-infra-e2e make build After these commands, the e2e execute file path is bin/$PLATFORM/e2e.\n","title":"Building","url":"/docs/skywalking-infra-e2e/latest/en/contribution/compiling-guidance/"},{"content":"Building This document will help you compile and build the project in golang environment.\nPlatform Linux, macOS, and Windows are supported in SkyWalking Infra E2E.\nCommand git clone https://github.com/apache/skywalking-infra-e2e.git cd skywalking-infra-e2e make build After these commands, the e2e execute file path is bin/$PLATFORM/e2e.\n","title":"Building","url":"/docs/skywalking-infra-e2e/next/en/contribution/compiling-guidance/"},{"content":"Building This document will help you compile and build the project in golang environment.\nPlatform Linux, macOS, and Windows are supported in SkyWalking Infra E2E.\nCommand git clone https://github.com/apache/skywalking-infra-e2e.git cd skywalking-infra-e2e make build After these commands, the e2e execute file path is bin/$PLATFORM/e2e.\n","title":"Building","url":"/docs/skywalking-infra-e2e/v1.3.0/en/contribution/compiling-guidance/"},{"content":"CDS - Configuration Discovery Service CDS - Configuration Discovery Service provides the dynamic configuration for the agent, defined in gRPC.\nConfiguration Format The configuration content includes the service name and their configs. The\nconfigurations://service nameserviceA:// Configurations of service A// Key and Value are determined by the agent side.// Check the agent setup doc for all available configurations.key1:value1key2:value2...serviceB:...Available key(s) and value(s) in Java Agent. Java agent supports the following dynamic configurations.\n   Config Key Value Description Value Format Example Required Plugin(s)     agent.sample_n_per_3_secs The number of sampled traces per 3 seconds -1 -   agent.ignore_suffix If the operation name of the first span is included in this set, this segment should be ignored. Multiple values should be separated by , .txt,.log -   agent.trace.ignore_path The value is the path that you need to ignore, multiple paths should be separated by , more details /your/path/1/**,/your/path/2/** apm-trace-ignore-plugin   agent.span_limit_per_segment The max number of spans per segment. 300 -   plugin.jdbc.trace_sql_parameters If set to true, the parameters of the sql (typically java.sql.PreparedStatement) would be collected. false -     Required plugin(s), the configuration affects only when the required plugins activated.  ","title":"CDS - Configuration Discovery Service","url":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/configuration-discovery/"},{"content":"CDS - Configuration Discovery Service CDS - Configuration Discovery Service provides the dynamic configuration for the agent, defined in gRPC.\nConfiguration Format The configuration content includes the service name and their configs. The\nconfigurations://service nameserviceA:// Configurations of service A// Key and Value are determined by the agent side.// Check the agent setup doc for all available configurations.key1:value1key2:value2...serviceB:...Available key(s) and value(s) in Java Agent. Java agent supports the following dynamic configurations.\n   Config Key Value Description Value Format Example Required Plugin(s)     agent.sample_n_per_3_secs The number of sampled traces per 3 seconds -1 -   agent.ignore_suffix If the operation name of the first span is included in this set, this segment should be ignored. Multiple values should be separated by , .txt,.log -   agent.trace.ignore_path The value is the path that you need to ignore, multiple paths should be separated by , more details /your/path/1/**,/your/path/2/** apm-trace-ignore-plugin   agent.span_limit_per_segment The max number of spans per segment. 300 -   plugin.jdbc.trace_sql_parameters If set to true, the parameters of the sql (typically java.sql.PreparedStatement) would be collected. false -     Required plugin(s), the configuration affects only when the required plugins activated.  ","title":"CDS - Configuration Discovery Service","url":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/configuration-discovery/"},{"content":"CDS - Configuration Discovery Service CDS - Configuration Discovery Service provides the dynamic configuration for the agent, defined in gRPC.\nConfiguration Format The configuration content includes the service name and their configs. The\nconfigurations://service nameserviceA:// Configurations of service A// Key and Value are determined by the agent side.// Check the agent setup doc for all available configurations.key1:value1key2:value2...serviceB:...Available key(s) and value(s) in Java Agent. Java agent supports the following dynamic configurations.\n   Config Key Value Description Value Format Example Required Plugin(s)     agent.sample_n_per_3_secs The number of sampled traces per 3 seconds -1 -   agent.ignore_suffix If the operation name of the first span is included in this set, this segment should be ignored. Multiple values should be separated by , .txt,.log -   agent.trace.ignore_path The value is the path that you need to ignore, multiple paths should be separated by , more details /your/path/1/**,/your/path/2/** apm-trace-ignore-plugin   agent.span_limit_per_segment The max number of spans per segment. 300 -   plugin.jdbc.trace_sql_parameters If set to true, the parameters of the sql (typically java.sql.PreparedStatement) would be collected. false -     Required plugin(s), the configuration affects only when the required plugins activated.  ","title":"CDS - Configuration Discovery Service","url":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/configuration-discovery/"},{"content":"CDS - Configuration Discovery Service CDS - Configuration Discovery Service provides the dynamic configuration for the agent, defined in gRPC.\nConfiguration Format The configuration content includes the service name and their configs. The\nconfigurations://service nameserviceA:// Configurations of service A// Key and Value are determined by the agent side.// Check the agent setup doc for all available configurations.key1:value1key2:value2...serviceB:...Available key(s) and value(s) in Java Agent. Java agent supports the following dynamic configurations.\n   Config Key Value Description Value Format Example Required Plugin(s)     agent.sample_n_per_3_secs The number of sampled traces per 3 seconds -1 -   agent.ignore_suffix If the operation name of the first span is included in this set, this segment should be ignored. Multiple values should be separated by , .txt,.log -   agent.trace.ignore_path The value is the path that you need to ignore, multiple paths should be separated by , more details /your/path/1/**,/your/path/2/** apm-trace-ignore-plugin   agent.span_limit_per_segment The max number of spans per segment. 300 -   plugin.jdbc.trace_sql_parameters If set to true, the parameters of the sql (typically java.sql.PreparedStatement) would be collected. false -     Required plugin(s), the configuration affects only when the required plugins activated.  ","title":"CDS - Configuration Discovery Service","url":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/configuration-discovery/"},{"content":"CDS - Configuration Discovery Service CDS - Configuration Discovery Service provides the dynamic configuration for the agent, defined in gRPC.\nConfiguration Format The configuration content includes the service name and their configs. The\nconfigurations://service nameserviceA:// Configurations of service A// Key and Value are determined by the agent side.// Check the agent setup doc for all available configurations.key1:value1key2:value2...serviceB:...Available key(s) and value(s) in Java Agent. Java agent supports the following dynamic configurations.\n   Config Key Value Description Value Format Example Required Plugin(s)     agent.sample_n_per_3_secs The number of sampled traces per 3 seconds -1 -   agent.ignore_suffix If the operation name of the first span is included in this set, this segment should be ignored. Multiple values should be separated by , .txt,.log -   agent.trace.ignore_path The value is the path that you need to ignore, multiple paths should be separated by , more details /your/path/1/**,/your/path/2/** apm-trace-ignore-plugin   agent.span_limit_per_segment The max number of spans per segment. 300 -   plugin.jdbc.trace_sql_parameters If set to true, the parameters of the sql (typically java.sql.PreparedStatement) would be collected. false -     Required plugin(s), the configuration affects only when the required plugins activated.  ","title":"CDS - Configuration Discovery Service","url":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/configuration-discovery/"},{"content":"ClickHouse monitoring ClickHouse server performance from built-in metrics data SkyWalking leverages ClickHouse built-in metrics data since v20.1.2.4. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  Configure ClickHouse to expose metrics data for scraping from Prometheus. OpenTelemetry Collector fetches metrics from ClickeHouse server through Prometheus endpoint, and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up built-in prometheus endpoint . Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  ClickHouse Monitoring ClickHouse monitoring provides monitoring of the metrics 、events and asynchronous_metrics of the ClickHouse server. ClickHouse cluster is cataloged as a Layer: CLICKHOUSE Service in OAP. Each ClickHouse server is cataloged as an Instance in OAP.\nClickHouse Instance Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     CpuUsage count meter_clickhouse_instance_cpu_usage CPU time spent seen by OS per second(according to ClickHouse.system.dashboard.CPU Usage (cores)). ClickHouse   MemoryUsage percentage meter_clickhouse_instance_memory_usage Total amount of memory (bytes) allocated by the server/ total amount of OS memory. ClickHouse   MemoryAvailable percentage meter_clickhouse_instance_memory_available Total amount of memory (bytes) available for program / total amount of OS memory. ClickHouse   Uptime sec meter_clickhouse_instance_uptime The server uptime in seconds. It includes the time spent for server initialization before accepting connections. ClickHouse   Version string meter_clickhouse_instance_version Version of the server in a single integer number in base-1000. ClickHouse   FileOpen count meter_clickhouse_instance_file_open Number of files opened. ClickHouse    ClickHouse Network Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     TcpConnections count meter_clickhouse_instance_tcp_connectionsmeter_clickhouse_tcp_connections Number of connections to TCP server. ClickHouse   MysqlConnections count meter_clickhouse_instance_mysql_connectionsmeter_clickhouse_mysql_connections Number of client connections using MySQL protocol. ClickHouse   HttpConnections count meter_clickhouse_instance_http_connectionsmeter_clickhouse_mysql_connections Number of connections to HTTP server. ClickHouse   InterserverConnections count meter_clickhouse_instance_interserver_connectionsmeter_clickhouse_interserver_connections Number of connections from other replicas to fetch parts. ClickHouse   PostgresqlConnections count meter_clickhouse_instance_postgresql_connectionsmeter_clickhouse_postgresql_connections Number of client connections using PostgreSQL protocol. ClickHouse   ReceiveBytes bytes meter_clickhouse_instance_network_receive_bytesmeter_clickhouse_network_receive_bytes Total number of bytes received from network. ClickHouse   SendBytes bytes meter_clickhouse_instance_network_send_bytesmeter_clickhouse_network_send_bytes Total number of bytes send to network. ClickHouse    ClickHouse Query Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     QueryCount count meter_clickhouse_instance_querymeter_clickhouse_query Number of executing queries. ClickHouse   SelectQueryCount count meter_clickhouse_instance_query_selectmeter_clickhouse_query_select Number of executing queries, but only for SELECT queries. ClickHouse   InsertQueryCount count meter_clickhouse_instance_query_insertmeter_clickhouse_query_insert Number of executing queries, but only for INSERT queries. ClickHouse   SelectQueryRate count/sec meter_clickhouse_instance_query_select_ratemeter_clickhouse_query_select_rate Number of SELECT queries per second. ClickHouse   InsertQueryRate count/sec meter_clickhouse_instance_query_insert_ratemeter_clickhouse_query_insert_rate Number of INSERT queries per second. ClickHouse   Querytime microsec meter_clickhouse_instance_querytime_microsecondsmeter_clickhouse_querytime_microseconds Total time of all queries. ClickHouse   SelectQuerytime microsec meter_clickhouse_instance_querytime_select_microsecondsmeter_clickhouse_querytime_select_microseconds Total time of SELECT queries. ClickHouse   InsertQuerytime microsec meter_clickhouse_instance_querytime_insert_microsecondsmeter_clickhouse_querytime_insert_microseconds Total time of INSERT queries. ClickHouse   OtherQuerytime microsec meter_clickhouse_instance_querytime_other_microsecondsmeter_clickhouse_querytime_other_microseconds Total time of queries that are not SELECT or INSERT. ClickHouse   QuerySlowCount count meter_clickhouse_instance_query_slowmeter_clickhouse_query_slow Number of reads from a file that were slow. ClickHouse    ClickHouse Insertion Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     InsertQueryCount count meter_clickhouse_instance_query_insertmeter_clickhouse_query_insert Number of executing queries, but only for INSERT queries. ClickHouse   InsertedRowCount count meter_clickhouse_instance_inserted_rowsmeter_clickhouse_inserted_rows Number of rows INSERTed to all tables. ClickHouse   InsertedBytes bytes meter_clickhouse_instance_inserted_bytesmeter_clickhouse_inserted_bytes Number of bytes INSERTed to all tables. ClickHouse   DelayedInsertCount count meter_clickhouse_instance_delayed_insertmeter_clickhouse_delayed_insert Number of times the INSERT of a block to a MergeTree table was throttled due to high number of active data parts for partition. ClickHouse    ClickHouse Replicas Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     ReplicatedChecks count meter_clickhouse_instance_replicated_checksmeter_clickhouse_replicated_checks Number of data parts checking for consistency. ClickHouse   ReplicatedFetch count meter_clickhouse_instance_replicated_fetchmeter_clickhouse_replicated_fetch Number of data parts being fetched from replica. ClickHouse   ReplicatedSend count meter_clickhouse_instance_replicated_sendmeter_clickhouse_replicated_send Number of data parts being sent to replicas. ClickHouse    ClickHouse MergeTree Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     BackgroundMergeCount count meter_clickhouse_instance_background_mergemeter_clickhouse_background_merge Number of executing background merges. ClickHouse   MergeRows count meter_clickhouse_instance_merge_rowsmeter_clickhouse_merge_rows Rows read for background merges. This is the number of rows before merge. ClickHouse   MergeUncompressedBytes bytes meter_clickhouse_instance_merge_uncompressed_bytesmeter_clickhouse_merge_uncompressed_bytes Uncompressed bytes (for columns as they stored in memory) that was read for background merges. This is the number before merge. ClickHouse   MoveCount count meter_clickhouse_instance_movemeter_clickhouse_move Number of currently executing moves. ClickHouse   PartsActive Count meter_clickhouse_instance_parts_activemeter_clickhouse_parts_active Active data part, used by current and upcoming SELECTs. ClickHouse   MutationsCount count meter_clickhouse_instance_mutationsmeter_clickhouse_mutations Number of mutations (ALTER DELETE/UPDATE). ClickHouse    ClickHouse Kafka Table Engine Supported Metrics When table engine works with Apache Kafka.\nKafka lets you:\n Publish or subscribe to data flows. Organize fault-tolerant storage. Process streams as they become available.     Monitoring Panel Unit Metric Name Description Data Source     KafkaMessagesRead count meter_clickhouse_instance_kafka_messages_readmeter_clickhouse_kafka_messages_read Number of Kafka messages already processed by ClickHouse. ClickHouse   KafkaWrites count meter_clickhouse_instance_kafka_writesmeter_clickhouse_kafka_writes Number of writes (inserts) to Kafka tables. ClickHouse   KafkaConsumers count meter_clickhouse_instance_kafka_consumersmeter_clickhouse_kafka_consumers Number of active Kafka consumers. ClickHouse   KafkProducers count meter_clickhouse_instance_kafka_producersmeter_clickhouse_kafka_producers Number of active Kafka producer created. ClickHouse    ClickHouse ZooKeeper Supported Metrics ClickHouse uses ZooKeeper for storing metadata of replicas when using replicated tables. If replicated tables are not used, this section of parameters can be omitted.\n   Monitoring Panel Unit Metric Name Description Data Source     ZookeeperSession count meter_clickhouse_instance_zookeeper_sessionmeter_clickhouse_zookeeper_session Number of sessions (connections) to ZooKeeper. ClickHouse   ZookeeperWatch count meter_clickhouse_instance_zookeeper_watchmeter_clickhouse_zookeeper_watch Number of watches (event subscriptions) in ZooKeeper. ClickHouse   ZookeeperBytesSent bytes meter_clickhouse_instance_zookeeper_bytes_sentmeter_clickhouse_zookeeper_bytes_sent Number of bytes send over network while communicating with ZooKeeper. ClickHouse   ZookeeperBytesReceive bytes meter_clickhouse_instance_zookeeper_bytes_receivedmeter_clickhouse_zookeeper_bytes_received Number of bytes send over network while communicating with ZooKeeper. ClickHouse    ClickHouse Keeper Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     KeeperAliveConnections count meter_clickhouse_instance_keeper_connections_alivemeter_clickhouse_keeper_connections_alive Number of alive connections for embedded ClickHouse Keeper. ClickHouse   KeeperOutstandingRequets count meter_clickhouse_instance_keeper_outstanding_requestsmeter_clickhouse_keeper_outstanding_requests Number of outstanding requests for embedded ClickHouse Keeper. ClickHouse    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/clickhouse. The ClickHouse dashboard panel configurations are found in /config/ui-initialized-templates/clickhouse.\n","title":"ClickHouse monitoring","url":"/docs/main/next/en/setup/backend/backend-clickhouse-monitoring/"},{"content":"Client/grpc-client Description The gRPC client is a sharing plugin to keep connection with the gRPC server and delivery the data to it.\nDefaultConfig # The gRPC client finder typefinder_type:\u0026#34;static\u0026#34;# The gRPC server address (default localhost:11800), multiple addresses are split by \u0026#34;,\u0026#34;.server_addr:localhost:11800# The gRPC kubernetes server address finderkubernetes_config:# The kind of resourcekind:pod# The resource namespacesnamespaces:- default# How to get the address exported portextra_port:# Resource target portport:11800# The TLS switch (default false).enable_TLS:false# The file path of client.pem. The config only works when opening the TLS switch.client_pem_path:\u0026#34;\u0026#34;# The file path of client.key. The config only works when opening the TLS switch.client_key_path:\u0026#34;\u0026#34;# The file path oca.pem. The config only works when opening the TLS switch.ca_pem_path:\u0026#34;\u0026#34;# InsecureSkipVerify controls whether a client verifies the server\u0026#39;s certificate chain and host name.insecure_skip_verify:true# The auth value when send requestauthentication:\u0026#34;\u0026#34;# How frequently to check the connection(second)check_period:5# The gRPC send request timeouttimeout:# The timeout for unary single requestunary:5s# The timeout for unary stream requeststream:20sConfiguration    Name Type Description     finder_type string The gRPC server address finder type, support \u0026ldquo;static\u0026rdquo; and \u0026ldquo;kubernetes\u0026rdquo;   server_addr string The gRPC server address, only works for \u0026ldquo;static\u0026rdquo; address finder   kubernetes_config *resolvers.KubernetesConfig The kubernetes config to lookup addresses, only works for \u0026ldquo;kubernetes\u0026rdquo; address finder   kubernetes_config.api_server string The kubernetes API server address, If not define means using in kubernetes mode to connect   kubernetes_config.basic_auth *resolvers.BasicAuth The HTTP basic authentication credentials for the targets.   kubernetes_config.basic_auth.username string    kubernetes_config.basic_auth.password resolvers.Secret    kubernetes_config.basic_auth.password_file string    kubernetes_config.bearer_token resolvers.Secret The bearer token for the targets.   kubernetes_config.bearer_token_file string The bearer token file for the targets.   kubernetes_config.proxy_url string HTTP proxy server to use to connect to the targets.   kubernetes_config.tls_config resolvers.TLSConfig TLSConfig to use to connect to the targets.   kubernetes_config.namespaces []string Support to lookup namespaces   kubernetes_config.kind string The kind of api   kubernetes_config.selector resolvers.Selector The kind selector   kubernetes_config.extra_port resolvers.ExtraPort How to get the address exported port   enable_TLS bool Enable TLS connect to server   client_pem_path string The file path of client.pem. The config only works when opening the TLS switch.   client_key_path string The file path of client.key. The config only works when opening the TLS switch.   ca_pem_path string The file path oca.pem. The config only works when opening the TLS switch.   insecure_skip_verify bool Controls whether a client verifies the server\u0026rsquo;s certificate chain and host name.   authentication string The auth value when send request   check_period int How frequently to check the connection(second)   timeout grpc.TimeoutConfig The gRPC send request timeout    ","title":"Client/grpc-client","url":"/docs/skywalking-satellite/latest/en/setup/plugins/client_grpc-client/"},{"content":"Client/grpc-client Description The gRPC client is a sharing plugin to keep connection with the gRPC server and delivery the data to it.\nDefaultConfig # The gRPC client finder typefinder_type:\u0026#34;static\u0026#34;# The gRPC server address (default localhost:11800), multiple addresses are split by \u0026#34;,\u0026#34;.server_addr:localhost:11800# The gRPC kubernetes server address finderkubernetes_config:# The kind of resourcekind:pod# The resource namespacesnamespaces:- default# How to get the address exported portextra_port:# Resource target portport:11800# The TLS switch (default false).enable_TLS:false# The file path of client.pem. The config only works when opening the TLS switch.client_pem_path:\u0026#34;\u0026#34;# The file path of client.key. The config only works when opening the TLS switch.client_key_path:\u0026#34;\u0026#34;# The file path oca.pem. The config only works when opening the TLS switch.ca_pem_path:\u0026#34;\u0026#34;# InsecureSkipVerify controls whether a client verifies the server\u0026#39;s certificate chain and host name.insecure_skip_verify:true# The auth value when send requestauthentication:\u0026#34;\u0026#34;# How frequently to check the connection(second)check_period:5# The gRPC send request timeouttimeout:# The timeout for unary single requestunary:5s# The timeout for unary stream requeststream:20sConfiguration    Name Type Description     finder_type string The gRPC server address finder type, support \u0026ldquo;static\u0026rdquo; and \u0026ldquo;kubernetes\u0026rdquo;   server_addr string The gRPC server address, only works for \u0026ldquo;static\u0026rdquo; address finder   kubernetes_config *resolvers.KubernetesConfig The kubernetes config to lookup addresses, only works for \u0026ldquo;kubernetes\u0026rdquo; address finder   kubernetes_config.api_server string The kubernetes API server address, If not define means using in kubernetes mode to connect   kubernetes_config.basic_auth *resolvers.BasicAuth The HTTP basic authentication credentials for the targets.   kubernetes_config.basic_auth.username string    kubernetes_config.basic_auth.password resolvers.Secret    kubernetes_config.basic_auth.password_file string    kubernetes_config.bearer_token resolvers.Secret The bearer token for the targets.   kubernetes_config.bearer_token_file string The bearer token file for the targets.   kubernetes_config.proxy_url string HTTP proxy server to use to connect to the targets.   kubernetes_config.tls_config resolvers.TLSConfig TLSConfig to use to connect to the targets.   kubernetes_config.namespaces []string Support to lookup namespaces   kubernetes_config.kind string The kind of api   kubernetes_config.selector resolvers.Selector The kind selector   kubernetes_config.extra_port resolvers.ExtraPort How to get the address exported port   enable_TLS bool Enable TLS connect to server   client_pem_path string The file path of client.pem. The config only works when opening the TLS switch.   client_key_path string The file path of client.key. The config only works when opening the TLS switch.   ca_pem_path string The file path oca.pem. The config only works when opening the TLS switch.   insecure_skip_verify bool Controls whether a client verifies the server\u0026rsquo;s certificate chain and host name.   authentication string The auth value when send request   check_period int How frequently to check the connection(second)   timeout grpc.TimeoutConfig The gRPC send request timeout    ","title":"Client/grpc-client","url":"/docs/skywalking-satellite/next/en/setup/plugins/client_grpc-client/"},{"content":"Client/grpc-client Description The gRPC client is a sharing plugin to keep connection with the gRPC server and delivery the data to it.\nDefaultConfig # The gRPC client finder typefinder_type:\u0026#34;static\u0026#34;# The gRPC server address (default localhost:11800), multiple addresses are split by \u0026#34;,\u0026#34;.server_addr:localhost:11800# The gRPC kubernetes server address finderkubernetes_config:# The kind of resourcekind:pod# The resource namespacesnamespaces:- default# How to get the address exported portextra_port:# Resource target portport:11800# The TLS switch (default false).enable_TLS:false# The file path of client.pem. The config only works when opening the TLS switch.client_pem_path:\u0026#34;\u0026#34;# The file path of client.key. The config only works when opening the TLS switch.client_key_path:\u0026#34;\u0026#34;# The file path oca.pem. The config only works when opening the TLS switch.ca_pem_path:\u0026#34;\u0026#34;# InsecureSkipVerify controls whether a client verifies the server\u0026#39;s certificate chain and host name.insecure_skip_verify:true# The auth value when send requestauthentication:\u0026#34;\u0026#34;# How frequently to check the connection(second)check_period:5# The gRPC send request timeouttimeout:# The timeout for unary single requestunary:5s# The timeout for unary stream requeststream:20sConfiguration    Name Type Description     finder_type string The gRPC server address finder type, support \u0026ldquo;static\u0026rdquo; and \u0026ldquo;kubernetes\u0026rdquo;   server_addr string The gRPC server address, only works for \u0026ldquo;static\u0026rdquo; address finder   kubernetes_config *resolvers.KubernetesConfig The kubernetes config to lookup addresses, only works for \u0026ldquo;kubernetes\u0026rdquo; address finder   kubernetes_config.api_server string The kubernetes API server address, If not define means using in kubernetes mode to connect   kubernetes_config.basic_auth *resolvers.BasicAuth The HTTP basic authentication credentials for the targets.   kubernetes_config.basic_auth.username string    kubernetes_config.basic_auth.password resolvers.Secret    kubernetes_config.basic_auth.password_file string    kubernetes_config.bearer_token resolvers.Secret The bearer token for the targets.   kubernetes_config.bearer_token_file string The bearer token file for the targets.   kubernetes_config.proxy_url string HTTP proxy server to use to connect to the targets.   kubernetes_config.tls_config resolvers.TLSConfig TLSConfig to use to connect to the targets.   kubernetes_config.namespaces []string Support to lookup namespaces   kubernetes_config.kind string The kind of api   kubernetes_config.selector resolvers.Selector The kind selector   kubernetes_config.extra_port resolvers.ExtraPort How to get the address exported port   enable_TLS bool Enable TLS connect to server   client_pem_path string The file path of client.pem. The config only works when opening the TLS switch.   client_key_path string The file path of client.key. The config only works when opening the TLS switch.   ca_pem_path string The file path oca.pem. The config only works when opening the TLS switch.   insecure_skip_verify bool Controls whether a client verifies the server\u0026rsquo;s certificate chain and host name.   authentication string The auth value when send request   check_period int How frequently to check the connection(second)   timeout grpc.TimeoutConfig The gRPC send request timeout    ","title":"Client/grpc-client","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/client_grpc-client/"},{"content":"Client/kafka-client Description The Kafka client is a sharing plugin to keep connection with the Kafka brokers and delivery the data to it.\nDefaultConfig # The Kafka broker addresses (default localhost:9092). Multiple values are separated by commas.brokers:localhost:9092# The Kafka version should follow this pattern, which is major_minor_veryMinor_patch (default 1.0.0.0).version:1.0.0.0# The TLS switch (default false).enable_TLS:false# The file path of client.pem. The config only works when opening the TLS switch.client_pem_path:\u0026#34;\u0026#34;# The file path of client.key. The config only works when opening the TLS switch.client_key_path:\u0026#34;\u0026#34;# The file path oca.pem. The config only works when opening the TLS switch.ca_pem_path:\u0026#34;\u0026#34;# 0 means NoResponse, 1 means WaitForLocal and -1 means WaitForAll (default 1).required_acks:1# The producer max retry times (default 3).producer_max_retry:3# The meta max retry times (default 3).meta_max_retry:3# How long to wait for the cluster to settle between retries (default 100ms). Time unit is ms.retry_backoff:100# The max message bytes.max_message_bytes:1000000# If enabled, the producer will ensure that exactly one copy of each message is written (default false).idempotent_writes:false# A user-provided string sent with every request to the brokers for logging, debugging, and auditing purposes (default Satellite).client_id:Satellite# Compression codec represents the various compression codecs recognized by Kafka in messages. 0 : None, 1 : Gzip, 2 : Snappy, 3 : LZ4, 4 : ZSTDcompression_codec:0# How frequently to refresh the cluster metadata in the background. Defaults to 10 minutes. The unit is minute.refresh_period:10# InsecureSkipVerify controls whether a client verifies the server\u0026#39;s certificate chain and host name.insecure_skip_verify:trueConfiguration    Name Type Description     brokers string The Kafka broker addresses (default localhost:9092).   version string The version should follow this pattern, which is major.minor.veryMinor.patch.   enable_TLS bool The TLS switch (default false).   client_pem_path string The file path of client.pem. The config only works when opening the TLS switch.   client_key_path string The file path of client.key. The config only works when opening the TLS switch.   ca_pem_path string The file path oca.pem. The config only works when opening the TLS switch.   required_acks int16 0 means NoResponse, 1 means WaitForLocal and -1 means WaitForAll (default 1).   producer_max_retry int The producer max retry times (default 3).   meta_max_retry int The meta max retry times (default 3).   retry_backoff int How long to wait for the cluster to settle between retries (default 100ms).   max_message_bytes int The max message bytes.   idempotent_writes bool Ensure that exactly one copy of each message is written when is true.   client_id string A user-provided string sent with every request to the brokers.   compression_codec int Represents the various compression codecs recognized by Kafka in messages.   refresh_period int How frequently to refresh the cluster metadata.   insecure_skip_verify bool Controls whether a client verifies the server\u0026rsquo;s certificate chain and host name.    ","title":"Client/kafka-client","url":"/docs/skywalking-satellite/latest/en/setup/plugins/client_kafka-client/"},{"content":"Client/kafka-client Description The Kafka client is a sharing plugin to keep connection with the Kafka brokers and delivery the data to it.\nDefaultConfig # The Kafka broker addresses (default localhost:9092). Multiple values are separated by commas.brokers:localhost:9092# The Kafka version should follow this pattern, which is major_minor_veryMinor_patch (default 1.0.0.0).version:1.0.0.0# The TLS switch (default false).enable_TLS:false# The file path of client.pem. The config only works when opening the TLS switch.client_pem_path:\u0026#34;\u0026#34;# The file path of client.key. The config only works when opening the TLS switch.client_key_path:\u0026#34;\u0026#34;# The file path oca.pem. The config only works when opening the TLS switch.ca_pem_path:\u0026#34;\u0026#34;# 0 means NoResponse, 1 means WaitForLocal and -1 means WaitForAll (default 1).required_acks:1# The producer max retry times (default 3).producer_max_retry:3# The meta max retry times (default 3).meta_max_retry:3# How long to wait for the cluster to settle between retries (default 100ms). Time unit is ms.retry_backoff:100# The max message bytes.max_message_bytes:1000000# If enabled, the producer will ensure that exactly one copy of each message is written (default false).idempotent_writes:false# A user-provided string sent with every request to the brokers for logging, debugging, and auditing purposes (default Satellite).client_id:Satellite# Compression codec represents the various compression codecs recognized by Kafka in messages. 0 : None, 1 : Gzip, 2 : Snappy, 3 : LZ4, 4 : ZSTDcompression_codec:0# How frequently to refresh the cluster metadata in the background. Defaults to 10 minutes. The unit is minute.refresh_period:10# InsecureSkipVerify controls whether a client verifies the server\u0026#39;s certificate chain and host name.insecure_skip_verify:trueConfiguration    Name Type Description     brokers string The Kafka broker addresses (default localhost:9092).   version string The version should follow this pattern, which is major.minor.veryMinor.patch.   enable_TLS bool The TLS switch (default false).   client_pem_path string The file path of client.pem. The config only works when opening the TLS switch.   client_key_path string The file path of client.key. The config only works when opening the TLS switch.   ca_pem_path string The file path oca.pem. The config only works when opening the TLS switch.   required_acks int16 0 means NoResponse, 1 means WaitForLocal and -1 means WaitForAll (default 1).   producer_max_retry int The producer max retry times (default 3).   meta_max_retry int The meta max retry times (default 3).   retry_backoff int How long to wait for the cluster to settle between retries (default 100ms).   max_message_bytes int The max message bytes.   idempotent_writes bool Ensure that exactly one copy of each message is written when is true.   client_id string A user-provided string sent with every request to the brokers.   compression_codec int Represents the various compression codecs recognized by Kafka in messages.   refresh_period int How frequently to refresh the cluster metadata.   insecure_skip_verify bool Controls whether a client verifies the server\u0026rsquo;s certificate chain and host name.    ","title":"Client/kafka-client","url":"/docs/skywalking-satellite/next/en/setup/plugins/client_kafka-client/"},{"content":"Client/kafka-client Description The Kafka client is a sharing plugin to keep connection with the Kafka brokers and delivery the data to it.\nDefaultConfig # The Kafka broker addresses (default localhost:9092). Multiple values are separated by commas.brokers:localhost:9092# The Kafka version should follow this pattern, which is major_minor_veryMinor_patch (default 1.0.0.0).version:1.0.0.0# The TLS switch (default false).enable_TLS:false# The file path of client.pem. The config only works when opening the TLS switch.client_pem_path:\u0026#34;\u0026#34;# The file path of client.key. The config only works when opening the TLS switch.client_key_path:\u0026#34;\u0026#34;# The file path oca.pem. The config only works when opening the TLS switch.ca_pem_path:\u0026#34;\u0026#34;# 0 means NoResponse, 1 means WaitForLocal and -1 means WaitForAll (default 1).required_acks:1# The producer max retry times (default 3).producer_max_retry:3# The meta max retry times (default 3).meta_max_retry:3# How long to wait for the cluster to settle between retries (default 100ms). Time unit is ms.retry_backoff:100# The max message bytes.max_message_bytes:1000000# If enabled, the producer will ensure that exactly one copy of each message is written (default false).idempotent_writes:false# A user-provided string sent with every request to the brokers for logging, debugging, and auditing purposes (default Satellite).client_id:Satellite# Compression codec represents the various compression codecs recognized by Kafka in messages. 0 : None, 1 : Gzip, 2 : Snappy, 3 : LZ4, 4 : ZSTDcompression_codec:0# How frequently to refresh the cluster metadata in the background. Defaults to 10 minutes. The unit is minute.refresh_period:10# InsecureSkipVerify controls whether a client verifies the server\u0026#39;s certificate chain and host name.insecure_skip_verify:trueConfiguration    Name Type Description     brokers string The Kafka broker addresses (default localhost:9092).   version string The version should follow this pattern, which is major.minor.veryMinor.patch.   enable_TLS bool The TLS switch (default false).   client_pem_path string The file path of client.pem. The config only works when opening the TLS switch.   client_key_path string The file path of client.key. The config only works when opening the TLS switch.   ca_pem_path string The file path oca.pem. The config only works when opening the TLS switch.   required_acks int16 0 means NoResponse, 1 means WaitForLocal and -1 means WaitForAll (default 1).   producer_max_retry int The producer max retry times (default 3).   meta_max_retry int The meta max retry times (default 3).   retry_backoff int How long to wait for the cluster to settle between retries (default 100ms).   max_message_bytes int The max message bytes.   idempotent_writes bool Ensure that exactly one copy of each message is written when is true.   client_id string A user-provided string sent with every request to the brokers.   compression_codec int Represents the various compression codecs recognized by Kafka in messages.   refresh_period int How frequently to refresh the cluster metadata.   insecure_skip_verify bool Controls whether a client verifies the server\u0026rsquo;s certificate chain and host name.    ","title":"Client/kafka-client","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/client_kafka-client/"},{"content":"Clients Command Line The command line tool named bydbctl improves users' interactive experience. The examples listed in this folder show how to use this command to create, update, read and delete schemas. Furthermore, bydbctl could help in querying data stored in streams, measures and properties.\nThese are several ways to install:\n Get binaries from download. Build from sources to get latest features.  The config file named .bydbctl.yaml will be created in $HOME folder after the first CRUD command is applied.\n\u0026gt; more ~/.bydbctl.yaml addr: http://127.0.0.1:64299 group: \u0026#34;\u0026#34; bydbctl leverages HTTP endpoints to retrieve data instead of gRPC.\nHTTP client Users could select any HTTP client to access the HTTP based endpoints. The default address is localhost:17913/api\nJava Client The java native client is hosted at skywalking-banyandb-java-client.\nWeb application The web application is hosted at skywalking-banyandb-webapp when you boot up the BanyanDB server.\ngRPC command-line tool Users have a chance to use any command-line tool to interact with the Banyand server\u0026rsquo;s gRPC endpoints. The only limitation is the CLI tool has to support file descriptor files since the database server does not support server reflection.\nBuf is a Protobuf building tooling the BanyanDB relies on. It can provide FileDescriptorSets usable by gRPC CLI tools like grpcurl\nBanyanDB recommends installing Buf by issuing\n$ make -C api generate Protobuf schema files are compiled Above command will compile *.proto after downloading buf into \u0026lt;project_root\u0026gt;/bin\nUsers could leverage buf\u0026rsquo;s internal compiler to generate the FileDescriptorSets\n$ cd api $ ../bin/buf build -o image.bin If grpcurl is the CLI tool to access the APIs of BanyanDb. To use image.bin with it on the fly:\n$ grpcurl -plaintext -protoset image.bin localhost:17912 ... ","title":"Clients","url":"/docs/skywalking-banyandb/latest/clients/"},{"content":"Clients Command Line The command line tool named bydbctl improves users' interactive experience. The examples listed in this folder show how to use this command to create, update, read and delete schemas. Furthermore, bydbctl could help in querying data stored in streams, measures and properties.\nThese are several ways to install:\n Get binaries from download. Build from sources to get latest features.  The config file named .bydbctl.yaml will be created in $HOME folder after the first CRUD command is applied.\n\u0026gt; more ~/.bydbctl.yaml addr: http://127.0.0.1:64299 group: \u0026#34;\u0026#34; bydbctl leverages HTTP endpoints to retrieve data instead of gRPC.\nHTTP client Users could select any HTTP client to access the HTTP based endpoints. The default address is localhost:17913/api\nJava Client The java native client is hosted at skywalking-banyandb-java-client.\nWeb application The web application is hosted at skywalking-banyandb-webapp when you boot up the BanyanDB server.\ngRPC command-line tool Users have a chance to use any command-line tool to interact with the Banyand server\u0026rsquo;s gRPC endpoints. The only limitation is the CLI tool has to support file descriptor files since the database server does not support server reflection.\nBuf is a Protobuf building tooling the BanyanDB relies on. It can provide FileDescriptorSets usable by gRPC CLI tools like grpcurl\nBanyanDB recommends installing Buf by issuing\n$ make -C api generate Protobuf schema files are compiled Above command will compile *.proto after downloading buf into \u0026lt;project_root\u0026gt;/bin\nUsers could leverage buf\u0026rsquo;s internal compiler to generate the FileDescriptorSets\n$ cd api $ ../bin/buf build -o image.bin If grpcurl is the CLI tool to access the APIs of BanyanDb. To use image.bin with it on the fly:\n$ grpcurl -plaintext -protoset image.bin localhost:17912 ... ","title":"Clients","url":"/docs/skywalking-banyandb/next/clients/"},{"content":"Clients Command Line The command line tool named bydbctl improves users' interactive experience. The examples listed in this folder show how to use this command to create, update, read and delete schemas. Furthermore, bydbctl could help in querying data stored in streams, measures and properties.\nThese are several ways to install:\n Get binaries from download. Build from sources to get latest features.  The config file named .bydbctl.yaml will be created in $HOME folder after the first CRUD command is applied.\n\u0026gt; more ~/.bydbctl.yaml addr: http://127.0.0.1:64299 group: \u0026#34;\u0026#34; bydbctl leverages HTTP endpoints to retrieve data instead of gRPC.\nHTTP client Users could select any HTTP client to access the HTTP based endpoints. The default address is localhost:17913/api\nJava Client The java native client is hosted at skywalking-banyandb-java-client.\nWeb application The web application is hosted at skywalking-banyandb-webapp when you boot up the BanyanDB server.\ngRPC command-line tool Users have a chance to use any command-line tool to interact with the Banyand server\u0026rsquo;s gRPC endpoints. The only limitation is the CLI tool has to support file descriptor files since the database server does not support server reflection.\nBuf is a Protobuf building tooling the BanyanDB relies on. It can provide FileDescriptorSets usable by gRPC CLI tools like grpcurl\nBanyanDB recommends installing Buf by issuing\n$ make -C api generate Protobuf schema files are compiled Above command will compile *.proto after downloading buf into \u0026lt;project_root\u0026gt;/bin\nUsers could leverage buf\u0026rsquo;s internal compiler to generate the FileDescriptorSets\n$ cd api $ ../bin/buf build -o image.bin If grpcurl is the CLI tool to access the APIs of BanyanDb. To use image.bin with it on the fly:\n$ grpcurl -plaintext -protoset image.bin localhost:17912 ... ","title":"Clients","url":"/docs/skywalking-banyandb/v0.5.0/clients/"},{"content":"Cluster Installation Setup Meta Nodes Meta nodes are a etcd cluster which is required for the metadata module to provide the metadata service and nodes discovery service for the whole cluster.\nThe etcd cluster can be setup by the etcd installation guide\nRole-base Banyand Cluster There is an example: The etcd cluster is spread across three nodes with the addresses 10.0.0.1:2379, 10.0.0.2:2379, and 10.0.0.3:2379.\nData nodes and liaison nodes are running as independent processes by\n$ ./banyand-server storage --etcd-endpoints=http://10.0.0.1:2379,http://10.0.0.2:2379,http://10.0.0.3:2379 \u0026lt;flags\u0026gt; $ ./banyand-server storage --etcd-endpoints=http://10.0.0.1:2379,http://10.0.0.2:2379,http://10.0.0.3:2379 \u0026lt;flags\u0026gt; $ ./banyand-server storage --etcd-endpoints=http://10.0.0.1:2379,http://10.0.0.2:2379,http://10.0.0.3:2379 \u0026lt;flags\u0026gt; $ ./banyand-server liaison --etcd-endpoints=http://10.0.0.1:2379,http://10.0.0.2:2379,http://10.0.0.3:2379 \u0026lt;flags\u0026gt; Node Discovery The node discovery is based on the etcd cluster. The etcd cluster is required for the metadata module to provide the metadata service and nodes discovery service for the whole cluster.\nThe host is registered to the etcd cluster by the banyand-server automatically based on node-host-provider :\n node-host-provider=hostname : Default. The OS\u0026rsquo;s hostname is registered as the host part in the address. node-host-provider=ip : The OS\u0026rsquo;s the first non-loopback active IP address(IPv4) is registered as the host part in the address. node-host-provider=flag : node-host is registered as the host part in the address.  ","title":"Cluster Installation","url":"/docs/skywalking-banyandb/latest/installation/cluster/"},{"content":"Cluster Installation Setup Meta Nodes Meta nodes are a etcd cluster which is required for the metadata module to provide the metadata service and nodes discovery service for the whole cluster.\nThe etcd cluster can be setup by the etcd installation guide\nRole-base Banyand Cluster There is an example: The etcd cluster is spread across three nodes with the addresses 10.0.0.1:2379, 10.0.0.2:2379, and 10.0.0.3:2379.\nData nodes and liaison nodes are running as independent processes by\n$ ./banyand-server-static storage --etcd-endpoints=http://10.0.0.1:2379,http://10.0.0.2:2379,http://10.0.0.3:2379 \u0026lt;flags\u0026gt; $ ./banyand-server-static storage --etcd-endpoints=http://10.0.0.1:2379,http://10.0.0.2:2379,http://10.0.0.3:2379 \u0026lt;flags\u0026gt; $ ./banyand-server-static storage --etcd-endpoints=http://10.0.0.1:2379,http://10.0.0.2:2379,http://10.0.0.3:2379 \u0026lt;flags\u0026gt; $ ./banyand-server-static liaison --etcd-endpoints=http://10.0.0.1:2379,http://10.0.0.2:2379,http://10.0.0.3:2379 \u0026lt;flags\u0026gt; Node Discovery The node discovery is based on the etcd cluster. The etcd cluster is required for the metadata module to provide the metadata service and nodes discovery service for the whole cluster.\nThe host is registered to the etcd cluster by the banyand-server-static automatically based on node-host-provider :\n node-host-provider=hostname : Default. The OS\u0026rsquo;s hostname is registered as the host part in the address. node-host-provider=ip : The OS\u0026rsquo;s the first non-loopback active IP address(IPv4) is registered as the host part in the address. node-host-provider=flag : node-host is registered as the host part in the address.  Etcd Authentication etcd supports through tls certificates and RBAC-based authentication for both clients to server communication. This section tends to help users set up authentication for BanyanDB.\nAuthentication with username/password The etcd user can be setup by the etcd authentication guide\nThe username/password is configured in the following command:\n etcd-username: The username for etcd client authentication. etcd-password: The password for etcd client authentication.  Note: recommended using environment variables to set username/password for higher security.\n$ ./banyand-server-static storage --etcd-endpoints=your-endpoints --etcd-username=your-username --etcd-password=your-password \u0026lt;flags\u0026gt; $ ./banyand-server-static liaison --etcd-endpoints=your-endpoints --etcd-username=your-username --etcd-password=your-password \u0026lt;flags\u0026gt; Transport security with HTTPS The etcd trusted certificate file can be setup by the etcd transport security model\n etcd-tls-ca-file: The path of the trusted certificate file.  $ ./banyand-server-static storage --etcd-endpoints=your-https-endpoints --etcd-tls-ca-file=youf-file-path \u0026lt;flags\u0026gt; $ ./banyand-server-static liaison --etcd-endpoints=your-https-endpoints --etcd-tls-ca-file=youf-file-path \u0026lt;flags\u0026gt; Authentication with HTTPS client certificates The etcd client certificates can be setup by the etcd transport security model\n etcd-tls-ca-file: The path of the trusted certificate file. etcd-tls-cert-file: Certificate used for SSL/TLS connections to etcd. When this option is set, advertise-client-urls can use the HTTPS schema. etcd-tls-key-file: Key for the certificate. Must be unencrypted.  $ ./banyand-server-static storage --etcd-endpoints=your-https-endpoints --etcd-tls-ca-file=youf-file-path --etcd-tls-cert-file=youf-file-path --etcd-tls-key-file=youf-file-path \u0026lt;flags\u0026gt; $ ./banyand-server-static liaison --etcd-endpoints=your-https-endpoints --etcd-tls-ca-file=youf-file-path --etcd-tls-cert-file=youf-file-path --etcd-tls-key-file=youf-file-path \u0026lt;flags\u0026gt; ","title":"Cluster Installation","url":"/docs/skywalking-banyandb/next/installation/cluster/"},{"content":"Cluster Installation Setup Meta Nodes Meta nodes are a etcd cluster which is required for the metadata module to provide the metadata service and nodes discovery service for the whole cluster.\nThe etcd cluster can be setup by the etcd installation guide\nRole-base Banyand Cluster There is an example: The etcd cluster is spread across three nodes with the addresses 10.0.0.1:2379, 10.0.0.2:2379, and 10.0.0.3:2379.\nData nodes and liaison nodes are running as independent processes by\n$ ./banyand-server storage --etcd-endpoints=http://10.0.0.1:2379,http://10.0.0.2:2379,http://10.0.0.3:2379 \u0026lt;flags\u0026gt; $ ./banyand-server storage --etcd-endpoints=http://10.0.0.1:2379,http://10.0.0.2:2379,http://10.0.0.3:2379 \u0026lt;flags\u0026gt; $ ./banyand-server storage --etcd-endpoints=http://10.0.0.1:2379,http://10.0.0.2:2379,http://10.0.0.3:2379 \u0026lt;flags\u0026gt; $ ./banyand-server liaison --etcd-endpoints=http://10.0.0.1:2379,http://10.0.0.2:2379,http://10.0.0.3:2379 \u0026lt;flags\u0026gt; Node Discovery The node discovery is based on the etcd cluster. The etcd cluster is required for the metadata module to provide the metadata service and nodes discovery service for the whole cluster.\nThe host is registered to the etcd cluster by the banyand-server automatically based on node-host-provider :\n node-host-provider=hostname : Default. The OS\u0026rsquo;s hostname is registered as the host part in the address. node-host-provider=ip : The OS\u0026rsquo;s the first non-loopback active IP address(IPv4) is registered as the host part in the address. node-host-provider=flag : node-host is registered as the host part in the address.  ","title":"Cluster Installation","url":"/docs/skywalking-banyandb/v0.5.0/installation/cluster/"},{"content":"Cluster Management In many production environments, the backend needs to support distributed aggregation, high throughput and provide high availability (HA) to maintain robustness, so you always need to setup CLUSTER management in product env. Otherwise, you would face metrics inaccurate.\ncore/gRPCHost is listening on 0.0.0.0 for quick start as the single mode for most cases. Besides the Kubernetes coordinator, which is using the cloud-native mode to establish cluster, all other coordinators requires core/gRPCHost updated to real IP addresses or take reference of internalComHost and internalComPort in each coordinator doc.\nNOTICE, cluster management doesn\u0026rsquo;t provide a service discovery mechanism for agents and probes. We recommend agents/probes using gateway to load balancer to access OAP clusters.\nThere are various ways to manage the cluster in the backend. Choose the one that best suits your needs.\n Kubernetes. When the backend clusters are deployed inside Kubernetes, you could make use of this method by using k8s native APIs to manage clusters. Zookeeper coordinator. Use Zookeeper to let the backend instances detect and communicate with each other. Consul. Use Consul as the backend cluster management implementor and coordinate backend instances. Etcd. Use Etcd to coordinate backend instances. Nacos. Use Nacos to coordinate backend instances.  In the application.yml file, there are default configurations for the aforementioned coordinators under the section cluster. You can specify any of them in the selector property to enable it.\nCloud Native Kubernetes The required backend clusters are deployed inside Kubernetes. See the guides in Deploy in kubernetes. Set the selector to kubernetes.\ncluster:selector:${SW_CLUSTER:kubernetes}# other configurationsMeanwhile, the OAP cluster requires the pod\u0026rsquo;s UID which is laid at metadata.uid as the value of the system environment variable SKYWALKING_COLLECTOR_UID\ncontainers:# Original configurations of OAP container- name:{{.Values.oap.name }}image:{{.Values.oap.image.repository }}:{{ required \u0026#34;oap.image.tag is required\u0026#34; .Values.oap.image.tag }}# ...# ...env:# Add metadata.uid as the system environment variable, SKYWALKING_COLLECTOR_UID - name:SKYWALKING_COLLECTOR_UIDvalueFrom:fieldRef:fieldPath:metadata.uidRead the complete helm for more details.\nTraditional Coordinator NOTICE In all the following coordinators, oap.internal.comm.host:oap.internal.comm.port is registered as the ID and address for the current OAP node. By default, because they are same in all OAP nodes, the registrations are conflicted, and (may) show as one registered node, which actually would be the node itself. In this case, the cluster mode is NOT working.\nPlease check the registered nodes on your coordinator servers, to make the registration information unique for every node. You could have two options\n Change core/gRPCHost(oap.internal.comm.host) and core/gRPCPort(oap.internal.comm.port) for internal, and setup external communication channels for data reporting and query. Use internalComHost and internalComPort in the config to provide a unique host and port for every OAP node. This host name port should be accessible for other OAP nodes.  Zookeeper coordinator Zookeeper is a very common and widely used cluster coordinator. Set the cluster/selector to zookeeper in the yml to enable it.\nRequired Zookeeper version: 3.5+\ncluster:selector:${SW_CLUSTER:zookeeper}# other configurations hostPort is the list of zookeeper servers. Format is IP1:PORT1,IP2:PORT2,...,IPn:PORTn enableACL enable Zookeeper ACL to control access to its znode. schema is Zookeeper ACL schemas. expression is a expression of ACL. The format of the expression is specific to the schema. hostPort, baseSleepTimeMs and maxRetries are settings of Zookeeper curator client.  Note:\n If Zookeeper ACL is enabled and /skywalking exists, you must ensure that SkyWalking has CREATE, READ and WRITE permissions. If /skywalking does not exist, it will be created by SkyWalking, and all permissions to the specified user will be granted. Simultaneously, znode grants READ permission to anyone. If you set schema as digest, the password of the expression is set in clear text.  In some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes, such as the default host(0.0.0.0) should not be used in cluster mode. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The exposed host name for other OAP nodes in the cluster internal communication. internalComPort: the exposed port for other OAP nodes in the cluster internal communication.  zookeeper:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}hostPort:${SW_CLUSTER_ZK_HOST_PORT:localhost:2181}#Retry PolicybaseSleepTimeMs:${SW_CLUSTER_ZK_SLEEP_TIME:1000}# initial amount of time to wait between retriesmaxRetries:${SW_CLUSTER_ZK_MAX_RETRIES:3}# max number of times to retryinternalComHost:${SW_CLUSTER_INTERNAL_COM_HOST:172.10.4.10}internalComPort:${SW_CLUSTER_INTERNAL_COM_PORT:11800}# Enable ACLenableACL:${SW_ZK_ENABLE_ACL:false}# disable ACL in defaultschema:${SW_ZK_SCHEMA:digest}# only support digest schemaexpression:${SW_ZK_EXPRESSION:skywalking:skywalking}Consul Recently, the Consul system has become more and more popular, and many companies and developers now use Consul as their service discovery solution. Set the cluster/selector to consul in the yml to enable it.\ncluster:selector:${SW_CLUSTER:consul}# other configurationsSame as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes, such as the default host(0.0.0.0) should not be used in cluster mode. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The exposed host name for other OAP nodes in the cluster internal communication. internalComPort: the exposed port for other OAP nodes in the cluster internal communication.  Etcd Set the cluster/selector to etcd in the yml to enable it. The Etcd client has upgraded to v3 protocol and changed to the CoreOS official library. Since 8.7.0, only the v3 protocol is supported for Etcd.\ncluster:selector:${SW_CLUSTER:etcd}# other configurationsetcd:# etcd cluster nodes, example: 10.0.0.1:2379,10.0.0.2:2379,10.0.0.3:2379endpoints:${SW_CLUSTER_ETCD_ENDPOINTS:localhost:2379}namespace:${SW_CLUSTER_ETCD_NAMESPACE:/skywalking}serviceName:${SW_CLUSTER_ETCD_SERVICE_NAME:\u0026#34;SkyWalking_OAP_Cluster\u0026#34;}authentication:${SW_CLUSTER_ETCD_AUTHENTICATION:false}user:${SW_CLUSTER_ETCD_USER:}password:${SW_CLUSTER_ETCD_PASSWORD:}Same as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes, such as the default host(0.0.0.0) should not be used in cluster mode. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The exposed host name for other OAP nodes in the cluster internal communication. internalComPort: the exposed port for other OAP nodes in the cluster internal communication.  Nacos Set the cluster/selector to nacos in the yml to enable it.\ncluster:selector:${SW_CLUSTER:nacos}# other configurationsNacos supports authentication by username or accessKey. Empty means that there is no need for authentication. Extra config is as follows:\nnacos:username:password:accessKey:secretKey:Same as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes, such as the default host(0.0.0.0) should not be used in cluster mode. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The exposed host name for other OAP nodes in the cluster internal communication. internalComPort: the exposed port for other OAP nodes in the cluster internal communication.  ","title":"Cluster Management","url":"/docs/main/latest/en/setup/backend/backend-cluster/"},{"content":"Cluster Management In many production environments, the backend needs to support distributed aggregation, high throughput and provide high availability (HA) to maintain robustness, so you always need to setup CLUSTER management in product env. Otherwise, you would face metrics inaccurate.\ncore/gRPCHost is listening on 0.0.0.0 for quick start as the single mode for most cases. Besides the Kubernetes coordinator, which is using the cloud-native mode to establish cluster, all other coordinators requires core/gRPCHost updated to real IP addresses or take reference of internalComHost and internalComPort in each coordinator doc.\nNOTICE, cluster management doesn\u0026rsquo;t provide a service discovery mechanism for agents and probes. We recommend agents/probes using gateway to load balancer to access OAP clusters.\nThere are various ways to manage the cluster in the backend. Choose the one that best suits your needs.\n Kubernetes. When the backend clusters are deployed inside Kubernetes, you could make use of this method by using k8s native APIs to manage clusters. Zookeeper coordinator. Use Zookeeper to let the backend instances detect and communicate with each other. Consul. Use Consul as the backend cluster management implementor and coordinate backend instances. Etcd. Use Etcd to coordinate backend instances. Nacos. Use Nacos to coordinate backend instances.  In the application.yml file, there are default configurations for the aforementioned coordinators under the section cluster. You can specify any of them in the selector property to enable it.\nCloud Native Kubernetes The required backend clusters are deployed inside Kubernetes. See the guides in Deploy in kubernetes. Set the selector to kubernetes.\ncluster:selector:${SW_CLUSTER:kubernetes}# other configurationsMeanwhile, the OAP cluster requires the pod\u0026rsquo;s UID which is laid at metadata.uid as the value of the system environment variable SKYWALKING_COLLECTOR_UID\ncontainers:# Original configurations of OAP container- name:{{.Values.oap.name }}image:{{.Values.oap.image.repository }}:{{ required \u0026#34;oap.image.tag is required\u0026#34; .Values.oap.image.tag }}# ...# ...env:# Add metadata.uid as the system environment variable, SKYWALKING_COLLECTOR_UID - name:SKYWALKING_COLLECTOR_UIDvalueFrom:fieldRef:fieldPath:metadata.uidRead the complete helm for more details.\nTraditional Coordinator NOTICE In all the following coordinators, oap.internal.comm.host:oap.internal.comm.port is registered as the ID and address for the current OAP node. By default, because they are same in all OAP nodes, the registrations are conflicted, and (may) show as one registered node, which actually would be the node itself. In this case, the cluster mode is NOT working.\nPlease check the registered nodes on your coordinator servers, to make the registration information unique for every node. You could have two options\n Change core/gRPCHost(oap.internal.comm.host) and core/gRPCPort(oap.internal.comm.port) for internal, and setup external communication channels for data reporting and query. Use internalComHost and internalComPort in the config to provide a unique host and port for every OAP node. This host name port should be accessible for other OAP nodes.  Zookeeper coordinator Zookeeper is a very common and widely used cluster coordinator. Set the cluster/selector to zookeeper in the yml to enable it.\nRequired Zookeeper version: 3.5+\ncluster:selector:${SW_CLUSTER:zookeeper}# other configurations hostPort is the list of zookeeper servers. Format is IP1:PORT1,IP2:PORT2,...,IPn:PORTn enableACL enable Zookeeper ACL to control access to its znode. schema is Zookeeper ACL schemas. expression is a expression of ACL. The format of the expression is specific to the schema. hostPort, baseSleepTimeMs and maxRetries are settings of Zookeeper curator client.  Note:\n If Zookeeper ACL is enabled and /skywalking exists, you must ensure that SkyWalking has CREATE, READ and WRITE permissions. If /skywalking does not exist, it will be created by SkyWalking, and all permissions to the specified user will be granted. Simultaneously, znode grants READ permission to anyone. If you set schema as digest, the password of the expression is set in clear text.  In some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes, such as the default host(0.0.0.0) should not be used in cluster mode. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The exposed host name for other OAP nodes in the cluster internal communication. internalComPort: the exposed port for other OAP nodes in the cluster internal communication.  cluster:selector:${SW_CLUSTER:zookeeper}...zookeeper:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}hostPort:${SW_CLUSTER_ZK_HOST_PORT:localhost:2181}#Retry PolicybaseSleepTimeMs:${SW_CLUSTER_ZK_SLEEP_TIME:1000}# initial amount of time to wait between retriesmaxRetries:${SW_CLUSTER_ZK_MAX_RETRIES:3}# max number of times to retryinternalComHost:${SW_CLUSTER_INTERNAL_COM_HOST:172.10.4.10}internalComPort:${SW_CLUSTER_INTERNAL_COM_PORT:11800}# Enable ACLenableACL:${SW_ZK_ENABLE_ACL:false}# disable ACL in defaultschema:${SW_ZK_SCHEMA:digest}# only support digest schemaexpression:${SW_ZK_EXPRESSION:skywalking:skywalking}Consul Recently, the Consul system has become more and more popular, and many companies and developers now use Consul as their service discovery solution. Set the cluster/selector to consul in the yml to enable it.\ncluster:selector:${SW_CLUSTER:consul}...consul:serviceName:${SW_SERVICE_NAME:\u0026#34;SkyWalking_OAP_Cluster\u0026#34;}# Consul cluster nodes, example: 10.0.0.1:8500,10.0.0.2:8500,10.0.0.3:8500hostPort:${SW_CLUSTER_CONSUL_HOST_PORT:localhost:8500}aclToken:${SW_CLUSTER_CONSUL_ACLTOKEN:\u0026#34;\u0026#34;}internalComHost:${SW_CLUSTER_INTERNAL_COM_HOST:\u0026#34;\u0026#34;}internalComPort:${SW_CLUSTER_INTERNAL_COM_PORT:-1}Same as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes, such as the default host(0.0.0.0) should not be used in cluster mode. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The exposed host name for other OAP nodes in the cluster internal communication. internalComPort: the exposed port for other OAP nodes in the cluster internal communication.  Etcd Set the cluster/selector to etcd in the yml to enable it. The Etcd client has upgraded to v3 protocol and changed to the CoreOS official library. Since 8.7.0, only the v3 protocol is supported for Etcd.\ncluster:selector:${SW_CLUSTER:etcd}# other configurationsetcd:# etcd cluster nodes, example: 10.0.0.1:2379,10.0.0.2:2379,10.0.0.3:2379endpoints:${SW_CLUSTER_ETCD_ENDPOINTS:localhost:2379}namespace:${SW_CLUSTER_ETCD_NAMESPACE:/skywalking}serviceName:${SW_CLUSTER_ETCD_SERVICE_NAME:\u0026#34;SkyWalking_OAP_Cluster\u0026#34;}authentication:${SW_CLUSTER_ETCD_AUTHENTICATION:false}user:${SW_CLUSTER_ETCD_USER:}password:${SW_CLUSTER_ETCD_PASSWORD:}Same as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes, such as the default host(0.0.0.0) should not be used in cluster mode. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The exposed host name for other OAP nodes in the cluster internal communication. internalComPort: the exposed port for other OAP nodes in the cluster internal communication.  Nacos Set the cluster/selector to nacos in the yml to enable it.\ncluster:selector:${SW_CLUSTER:nacos}# other configurationsNacos supports authentication by username or accessKey. Empty means that there is no need for authentication. Extra config is as follows:\nnacos:username:password:accessKey:secretKey:Same as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes, such as the default host(0.0.0.0) should not be used in cluster mode. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The exposed host name for other OAP nodes in the cluster internal communication. internalComPort: the exposed port for other OAP nodes in the cluster internal communication.  ","title":"Cluster Management","url":"/docs/main/next/en/setup/backend/backend-cluster/"},{"content":"Cluster Management In many product environments, the backend needs to support high throughput and provide HA to maintain robustness, so you always need cluster management in product env.\nNOTICE, cluster management doesn\u0026rsquo;t provide service discovery mechanism for agents and probes. We recommend agents/probes using gateway to load balancer to access OAP clusters.\nThe core feature of cluster management is supporting the whole OAP cluster running distributed aggregation and analysis for telemetry data.\nThere are various ways to manage the cluster in the backend. Choose the one that best suits your needs.\n Zookeeper coordinator. Use Zookeeper to let the backend instances detect and communicate with each other. Kubernetes. When the backend clusters are deployed inside Kubernetes, you could make use of this method by using k8s native APIs to manage clusters. Consul. Use Consul as the backend cluster management implementor and coordinate backend instances. Etcd. Use Etcd to coordinate backend instances. Nacos. Use Nacos to coordinate backend instances. In the application.yml file, there are default configurations for the aforementioned coordinators under the section cluster. You can specify any of them in the selector property to enable it.  Zookeeper coordinator Zookeeper is a very common and widely used cluster coordinator. Set the cluster/selector to zookeeper in the yml to enable it.\nRequired Zookeeper version: 3.5+\ncluster:selector:${SW_CLUSTER:zookeeper}# other configurations hostPort is the list of zookeeper servers. Format is IP1:PORT1,IP2:PORT2,...,IPn:PORTn enableACL enable Zookeeper ACL to control access to its znode. schema is Zookeeper ACL schemas. expression is a expression of ACL. The format of the expression is specific to the schema. hostPort, baseSleepTimeMs and maxRetries are settings of Zookeeper curator client.  Note:\n If Zookeeper ACL is enabled and /skywalking exists, you must make sure that SkyWalking has CREATE, READ and WRITE permissions. If /skywalking does not exist, it will be created by SkyWalking and all permissions to the specified user will be granted. Simultaneously, znode grants the READ permission to anyone. If you set schema as digest, the password of the expression is set in clear text.  In some cases, the OAP default gRPC host and port in core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: the registered port and other OAP nodes use this to communicate with the current node.  zookeeper:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}hostPort:${SW_CLUSTER_ZK_HOST_PORT:localhost:2181}#Retry PolicybaseSleepTimeMs:${SW_CLUSTER_ZK_SLEEP_TIME:1000}# initial amount of time to wait between retriesmaxRetries:${SW_CLUSTER_ZK_MAX_RETRIES:3}# max number of times to retryinternalComHost:${SW_CLUSTER_INTERNAL_COM_HOST:172.10.4.10}internalComPort:${SW_CLUSTER_INTERNAL_COM_PORT:11800}# Enable ACLenableACL:${SW_ZK_ENABLE_ACL:false}# disable ACL in defaultschema:${SW_ZK_SCHEMA:digest}# only support digest schemaexpression:${SW_ZK_EXPRESSION:skywalking:skywalking}Kubernetes The require backend clusters are deployed inside Kubernetes. See the guides in Deploy in kubernetes. Set the selector to kubernetes.\ncluster:selector:${SW_CLUSTER:kubernetes}# other configurationsConsul Recently, the Consul system has become more and more popular, and many companies and developers now use Consul as their service discovery solution. Set the cluster/selector to consul in the yml to enable it.\ncluster:selector:${SW_CLUSTER:consul}# other configurationsSame as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registed host and other OAP nodes use this to communicate with the current node. internalComPort: The registered port and other OAP nodes use this to communicate with the current node.  Etcd Set the cluster/selector to etcd in the yml to enable it. The Etcd client has upgraded to v3 protocol and changed to the CoreOS official library. Since 8.7.0, only the v3 protocol is supported for Etcd.\ncluster:selector:${SW_CLUSTER:etcd}# other configurationsetcd:# etcd cluster nodes, example: 10.0.0.1:2379,10.0.0.2:2379,10.0.0.3:2379endpoints:${SW_CLUSTER_ETCD_ENDPOINTS:localhost:2379}namespace:${SW_CLUSTER_ETCD_NAMESPACE:/skywalking}serviceName:${SW_CLUSTER_ETCD_SERVICE_NAME:\u0026#34;SkyWalking_OAP_Cluster\u0026#34;}authentication:${SW_CLUSTER_ETCD_AUTHENTICATION:false}user:${SW_CLUSTER_ETCD_USER:}password:${SW_CLUSTER_ETCD_PASSWORD:}Same as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in core are not suitable for internal communication among the oap nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: The registered port and other OAP nodes use this to communicate with the current node.  Nacos Set the cluster/selector to nacos in the yml to enable it.\ncluster:selector:${SW_CLUSTER:nacos}# other configurationsNacos supports authentication by username or accessKey. Empty means that there is no need for authentication. Extra config is as follows:\nnacos:username:password:accessKey:secretKey:Same as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: The registered port and other OAP nodes use this to communicate with the current node.  ","title":"Cluster Management","url":"/docs/main/v9.0.0/en/setup/backend/backend-cluster/"},{"content":"Cluster Management In many production environments, the backend needs to support high throughput and provide high availability (HA) to maintain robustness, so you always need cluster management in product env.\nNOTICE, cluster management doesn\u0026rsquo;t provide a service discovery mechanism for agents and probes. We recommend agents/probes using gateway to load balancer to access OAP clusters.\nThe core feature of cluster management is supporting the whole OAP cluster running distributed aggregation and analysis for telemetry data.\nThere are various ways to manage the cluster in the backend. Choose the one that best suits your needs.\n Zookeeper coordinator. Use Zookeeper to let the backend instances detect and communicate with each other. Kubernetes. When the backend clusters are deployed inside Kubernetes, you could make use of this method by using k8s native APIs to manage clusters. Consul. Use Consul as the backend cluster management implementor and coordinate backend instances. Etcd. Use Etcd to coordinate backend instances. Nacos. Use Nacos to coordinate backend instances. In the application.yml file, there are default configurations for the aforementioned coordinators under the section cluster. You can specify any of them in the selector property to enable it.  Zookeeper coordinator Zookeeper is a very common and widely used cluster coordinator. Set the cluster/selector to zookeeper in the yml to enable it.\nRequired Zookeeper version: 3.5+\ncluster:selector:${SW_CLUSTER:zookeeper}# other configurations hostPort is the list of zookeeper servers. Format is IP1:PORT1,IP2:PORT2,...,IPn:PORTn enableACL enable Zookeeper ACL to control access to its znode. schema is Zookeeper ACL schemas. expression is a expression of ACL. The format of the expression is specific to the schema. hostPort, baseSleepTimeMs and maxRetries are settings of Zookeeper curator client.  Note:\n If Zookeeper ACL is enabled and /skywalking exists, you must ensure that SkyWalking has CREATE, READ and WRITE permissions. If /skywalking does not exist, it will be created by SkyWalking, and all permissions to the specified user will be granted. Simultaneously, znode grants READ permission to anyone. If you set schema as digest, the password of the expression is set in clear text.  In some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: the registered port and other OAP nodes use this to communicate with the current node.  zookeeper:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}hostPort:${SW_CLUSTER_ZK_HOST_PORT:localhost:2181}#Retry PolicybaseSleepTimeMs:${SW_CLUSTER_ZK_SLEEP_TIME:1000}# initial amount of time to wait between retriesmaxRetries:${SW_CLUSTER_ZK_MAX_RETRIES:3}# max number of times to retryinternalComHost:${SW_CLUSTER_INTERNAL_COM_HOST:172.10.4.10}internalComPort:${SW_CLUSTER_INTERNAL_COM_PORT:11800}# Enable ACLenableACL:${SW_ZK_ENABLE_ACL:false}# disable ACL in defaultschema:${SW_ZK_SCHEMA:digest}# only support digest schemaexpression:${SW_ZK_EXPRESSION:skywalking:skywalking}Kubernetes The required backend clusters are deployed inside Kubernetes. See the guides in Deploy in kubernetes. Set the selector to kubernetes.\ncluster:selector:${SW_CLUSTER:kubernetes}# other configurationsConsul Recently, the Consul system has become more and more popular, and many companies and developers now use Consul as their service discovery solution. Set the cluster/selector to consul in the yml to enable it.\ncluster:selector:${SW_CLUSTER:consul}# other configurationsSame as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: The registered port and other OAP nodes use this to communicate with the current node.  Etcd Set the cluster/selector to etcd in the yml to enable it. The Etcd client has upgraded to v3 protocol and changed to the CoreOS official library. Since 8.7.0, only the v3 protocol is supported for Etcd.\ncluster:selector:${SW_CLUSTER:etcd}# other configurationsetcd:# etcd cluster nodes, example: 10.0.0.1:2379,10.0.0.2:2379,10.0.0.3:2379endpoints:${SW_CLUSTER_ETCD_ENDPOINTS:localhost:2379}namespace:${SW_CLUSTER_ETCD_NAMESPACE:/skywalking}serviceName:${SW_CLUSTER_ETCD_SERVICE_NAME:\u0026#34;SkyWalking_OAP_Cluster\u0026#34;}authentication:${SW_CLUSTER_ETCD_AUTHENTICATION:false}user:${SW_CLUSTER_ETCD_USER:}password:${SW_CLUSTER_ETCD_PASSWORD:}Same as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: The registered port and other OAP nodes use this to communicate with the current node.  Nacos Set the cluster/selector to nacos in the yml to enable it.\ncluster:selector:${SW_CLUSTER:nacos}# other configurationsNacos supports authentication by username or accessKey. Empty means that there is no need for authentication. Extra config is as follows:\nnacos:username:password:accessKey:secretKey:Same as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: The registered port and other OAP nodes use this to communicate with the current node.  ","title":"Cluster Management","url":"/docs/main/v9.1.0/en/setup/backend/backend-cluster/"},{"content":"Cluster Management In many production environments, the backend needs to support high throughput and provide high availability (HA) to maintain robustness, so you always need cluster management in product env.\nNOTICE, cluster management doesn\u0026rsquo;t provide a service discovery mechanism for agents and probes. We recommend agents/probes using gateway to load balancer to access OAP clusters.\nThe core feature of cluster management is supporting the whole OAP cluster running distributed aggregation and analysis for telemetry data.\nThere are various ways to manage the cluster in the backend. Choose the one that best suits your needs.\n Zookeeper coordinator. Use Zookeeper to let the backend instances detect and communicate with each other. Kubernetes. When the backend clusters are deployed inside Kubernetes, you could make use of this method by using k8s native APIs to manage clusters. Consul. Use Consul as the backend cluster management implementor and coordinate backend instances. Etcd. Use Etcd to coordinate backend instances. Nacos. Use Nacos to coordinate backend instances. In the application.yml file, there are default configurations for the aforementioned coordinators under the section cluster. You can specify any of them in the selector property to enable it.  Zookeeper coordinator Zookeeper is a very common and widely used cluster coordinator. Set the cluster/selector to zookeeper in the yml to enable it.\nRequired Zookeeper version: 3.5+\ncluster:selector:${SW_CLUSTER:zookeeper}# other configurations hostPort is the list of zookeeper servers. Format is IP1:PORT1,IP2:PORT2,...,IPn:PORTn enableACL enable Zookeeper ACL to control access to its znode. schema is Zookeeper ACL schemas. expression is a expression of ACL. The format of the expression is specific to the schema. hostPort, baseSleepTimeMs and maxRetries are settings of Zookeeper curator client.  Note:\n If Zookeeper ACL is enabled and /skywalking exists, you must ensure that SkyWalking has CREATE, READ and WRITE permissions. If /skywalking does not exist, it will be created by SkyWalking, and all permissions to the specified user will be granted. Simultaneously, znode grants READ permission to anyone. If you set schema as digest, the password of the expression is set in clear text.  In some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: the registered port and other OAP nodes use this to communicate with the current node.  zookeeper:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}hostPort:${SW_CLUSTER_ZK_HOST_PORT:localhost:2181}#Retry PolicybaseSleepTimeMs:${SW_CLUSTER_ZK_SLEEP_TIME:1000}# initial amount of time to wait between retriesmaxRetries:${SW_CLUSTER_ZK_MAX_RETRIES:3}# max number of times to retryinternalComHost:${SW_CLUSTER_INTERNAL_COM_HOST:172.10.4.10}internalComPort:${SW_CLUSTER_INTERNAL_COM_PORT:11800}# Enable ACLenableACL:${SW_ZK_ENABLE_ACL:false}# disable ACL in defaultschema:${SW_ZK_SCHEMA:digest}# only support digest schemaexpression:${SW_ZK_EXPRESSION:skywalking:skywalking}Kubernetes The required backend clusters are deployed inside Kubernetes. See the guides in Deploy in kubernetes. Set the selector to kubernetes.\ncluster:selector:${SW_CLUSTER:kubernetes}# other configurationsConsul Recently, the Consul system has become more and more popular, and many companies and developers now use Consul as their service discovery solution. Set the cluster/selector to consul in the yml to enable it.\ncluster:selector:${SW_CLUSTER:consul}# other configurationsSame as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: The registered port and other OAP nodes use this to communicate with the current node.  Etcd Set the cluster/selector to etcd in the yml to enable it. The Etcd client has upgraded to v3 protocol and changed to the CoreOS official library. Since 8.7.0, only the v3 protocol is supported for Etcd.\ncluster:selector:${SW_CLUSTER:etcd}# other configurationsetcd:# etcd cluster nodes, example: 10.0.0.1:2379,10.0.0.2:2379,10.0.0.3:2379endpoints:${SW_CLUSTER_ETCD_ENDPOINTS:localhost:2379}namespace:${SW_CLUSTER_ETCD_NAMESPACE:/skywalking}serviceName:${SW_CLUSTER_ETCD_SERVICE_NAME:\u0026#34;SkyWalking_OAP_Cluster\u0026#34;}authentication:${SW_CLUSTER_ETCD_AUTHENTICATION:false}user:${SW_CLUSTER_ETCD_USER:}password:${SW_CLUSTER_ETCD_PASSWORD:}Same as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: The registered port and other OAP nodes use this to communicate with the current node.  Nacos Set the cluster/selector to nacos in the yml to enable it.\ncluster:selector:${SW_CLUSTER:nacos}# other configurationsNacos supports authentication by username or accessKey. Empty means that there is no need for authentication. Extra config is as follows:\nnacos:username:password:accessKey:secretKey:Same as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: The registered port and other OAP nodes use this to communicate with the current node.  ","title":"Cluster Management","url":"/docs/main/v9.2.0/en/setup/backend/backend-cluster/"},{"content":"Cluster Management In many production environments, the backend needs to support high throughput and provide high availability (HA) to maintain robustness, so you always need cluster management in product env.\nNOTICE, cluster management doesn\u0026rsquo;t provide a service discovery mechanism for agents and probes. We recommend agents/probes using gateway to load balancer to access OAP clusters.\nThe core feature of cluster management is supporting the whole OAP cluster running distributed aggregation and analysis for telemetry data.\nThere are various ways to manage the cluster in the backend. Choose the one that best suits your needs.\n Zookeeper coordinator. Use Zookeeper to let the backend instances detect and communicate with each other. Kubernetes. When the backend clusters are deployed inside Kubernetes, you could make use of this method by using k8s native APIs to manage clusters. Consul. Use Consul as the backend cluster management implementor and coordinate backend instances. Etcd. Use Etcd to coordinate backend instances. Nacos. Use Nacos to coordinate backend instances. In the application.yml file, there are default configurations for the aforementioned coordinators under the section cluster. You can specify any of them in the selector property to enable it.  Zookeeper coordinator Zookeeper is a very common and widely used cluster coordinator. Set the cluster/selector to zookeeper in the yml to enable it.\nRequired Zookeeper version: 3.5+\ncluster:selector:${SW_CLUSTER:zookeeper}# other configurations hostPort is the list of zookeeper servers. Format is IP1:PORT1,IP2:PORT2,...,IPn:PORTn enableACL enable Zookeeper ACL to control access to its znode. schema is Zookeeper ACL schemas. expression is a expression of ACL. The format of the expression is specific to the schema. hostPort, baseSleepTimeMs and maxRetries are settings of Zookeeper curator client.  Note:\n If Zookeeper ACL is enabled and /skywalking exists, you must ensure that SkyWalking has CREATE, READ and WRITE permissions. If /skywalking does not exist, it will be created by SkyWalking, and all permissions to the specified user will be granted. Simultaneously, znode grants READ permission to anyone. If you set schema as digest, the password of the expression is set in clear text.  In some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: the registered port and other OAP nodes use this to communicate with the current node.  zookeeper:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}hostPort:${SW_CLUSTER_ZK_HOST_PORT:localhost:2181}#Retry PolicybaseSleepTimeMs:${SW_CLUSTER_ZK_SLEEP_TIME:1000}# initial amount of time to wait between retriesmaxRetries:${SW_CLUSTER_ZK_MAX_RETRIES:3}# max number of times to retryinternalComHost:${SW_CLUSTER_INTERNAL_COM_HOST:172.10.4.10}internalComPort:${SW_CLUSTER_INTERNAL_COM_PORT:11800}# Enable ACLenableACL:${SW_ZK_ENABLE_ACL:false}# disable ACL in defaultschema:${SW_ZK_SCHEMA:digest}# only support digest schemaexpression:${SW_ZK_EXPRESSION:skywalking:skywalking}Kubernetes The required backend clusters are deployed inside Kubernetes. See the guides in Deploy in kubernetes. Set the selector to kubernetes.\ncluster:selector:${SW_CLUSTER:kubernetes}# other configurationsMeanwhile, the OAP cluster requires the pod\u0026rsquo;s UID which is laid at metadata.uid as the value of the system environment variable SKYWALKING_COLLECTOR_UID\ncontainers:# Original configurations of OAP container- name:{{.Values.oap.name }}image:{{.Values.oap.image.repository }}:{{ required \u0026#34;oap.image.tag is required\u0026#34; .Values.oap.image.tag }}# ...# ...env:# Add metadata.uid as the system environment variable, SKYWALKING_COLLECTOR_UID - name:SKYWALKING_COLLECTOR_UIDvalueFrom:fieldRef:fieldPath:metadata.uidRead the complete helm for more details.\nConsul Recently, the Consul system has become more and more popular, and many companies and developers now use Consul as their service discovery solution. Set the cluster/selector to consul in the yml to enable it.\ncluster:selector:${SW_CLUSTER:consul}# other configurationsSame as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: The registered port and other OAP nodes use this to communicate with the current node.  Etcd Set the cluster/selector to etcd in the yml to enable it. The Etcd client has upgraded to v3 protocol and changed to the CoreOS official library. Since 8.7.0, only the v3 protocol is supported for Etcd.\ncluster:selector:${SW_CLUSTER:etcd}# other configurationsetcd:# etcd cluster nodes, example: 10.0.0.1:2379,10.0.0.2:2379,10.0.0.3:2379endpoints:${SW_CLUSTER_ETCD_ENDPOINTS:localhost:2379}namespace:${SW_CLUSTER_ETCD_NAMESPACE:/skywalking}serviceName:${SW_CLUSTER_ETCD_SERVICE_NAME:\u0026#34;SkyWalking_OAP_Cluster\u0026#34;}authentication:${SW_CLUSTER_ETCD_AUTHENTICATION:false}user:${SW_CLUSTER_ETCD_USER:}password:${SW_CLUSTER_ETCD_PASSWORD:}Same as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: The registered port and other OAP nodes use this to communicate with the current node.  Nacos Set the cluster/selector to nacos in the yml to enable it.\ncluster:selector:${SW_CLUSTER:nacos}# other configurationsNacos supports authentication by username or accessKey. Empty means that there is no need for authentication. Extra config is as follows:\nnacos:username:password:accessKey:secretKey:Same as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: The registered port and other OAP nodes use this to communicate with the current node.  ","title":"Cluster Management","url":"/docs/main/v9.3.0/en/setup/backend/backend-cluster/"},{"content":"Cluster Management In many production environments, the backend needs to support high throughput and provide high availability (HA) to maintain robustness, so you always need cluster management in product env.\nNOTICE, cluster management doesn\u0026rsquo;t provide a service discovery mechanism for agents and probes. We recommend agents/probes using gateway to load balancer to access OAP clusters.\nThe core feature of cluster management is supporting the whole OAP cluster running distributed aggregation and analysis for telemetry data.\nThere are various ways to manage the cluster in the backend. Choose the one that best suits your needs.\n Zookeeper coordinator. Use Zookeeper to let the backend instances detect and communicate with each other. Kubernetes. When the backend clusters are deployed inside Kubernetes, you could make use of this method by using k8s native APIs to manage clusters. Consul. Use Consul as the backend cluster management implementor and coordinate backend instances. Etcd. Use Etcd to coordinate backend instances. Nacos. Use Nacos to coordinate backend instances. In the application.yml file, there are default configurations for the aforementioned coordinators under the section cluster. You can specify any of them in the selector property to enable it.  Zookeeper coordinator Zookeeper is a very common and widely used cluster coordinator. Set the cluster/selector to zookeeper in the yml to enable it.\nRequired Zookeeper version: 3.5+\ncluster:selector:${SW_CLUSTER:zookeeper}# other configurations hostPort is the list of zookeeper servers. Format is IP1:PORT1,IP2:PORT2,...,IPn:PORTn enableACL enable Zookeeper ACL to control access to its znode. schema is Zookeeper ACL schemas. expression is a expression of ACL. The format of the expression is specific to the schema. hostPort, baseSleepTimeMs and maxRetries are settings of Zookeeper curator client.  Note:\n If Zookeeper ACL is enabled and /skywalking exists, you must ensure that SkyWalking has CREATE, READ and WRITE permissions. If /skywalking does not exist, it will be created by SkyWalking, and all permissions to the specified user will be granted. Simultaneously, znode grants READ permission to anyone. If you set schema as digest, the password of the expression is set in clear text.  In some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: the registered port and other OAP nodes use this to communicate with the current node.  zookeeper:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}hostPort:${SW_CLUSTER_ZK_HOST_PORT:localhost:2181}#Retry PolicybaseSleepTimeMs:${SW_CLUSTER_ZK_SLEEP_TIME:1000}# initial amount of time to wait between retriesmaxRetries:${SW_CLUSTER_ZK_MAX_RETRIES:3}# max number of times to retryinternalComHost:${SW_CLUSTER_INTERNAL_COM_HOST:172.10.4.10}internalComPort:${SW_CLUSTER_INTERNAL_COM_PORT:11800}# Enable ACLenableACL:${SW_ZK_ENABLE_ACL:false}# disable ACL in defaultschema:${SW_ZK_SCHEMA:digest}# only support digest schemaexpression:${SW_ZK_EXPRESSION:skywalking:skywalking}Kubernetes The required backend clusters are deployed inside Kubernetes. See the guides in Deploy in kubernetes. Set the selector to kubernetes.\ncluster:selector:${SW_CLUSTER:kubernetes}# other configurationsMeanwhile, the OAP cluster requires the pod\u0026rsquo;s UID which is laid at metadata.uid as the value of the system environment variable SKYWALKING_COLLECTOR_UID\ncontainers:# Original configurations of OAP container- name:{{.Values.oap.name }}image:{{.Values.oap.image.repository }}:{{ required \u0026#34;oap.image.tag is required\u0026#34; .Values.oap.image.tag }}# ...# ...env:# Add metadata.uid as the system environment variable, SKYWALKING_COLLECTOR_UID - name:SKYWALKING_COLLECTOR_UIDvalueFrom:fieldRef:fieldPath:metadata.uidRead the complete helm for more details.\nConsul Recently, the Consul system has become more and more popular, and many companies and developers now use Consul as their service discovery solution. Set the cluster/selector to consul in the yml to enable it.\ncluster:selector:${SW_CLUSTER:consul}# other configurationsSame as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: The registered port and other OAP nodes use this to communicate with the current node.  Etcd Set the cluster/selector to etcd in the yml to enable it. The Etcd client has upgraded to v3 protocol and changed to the CoreOS official library. Since 8.7.0, only the v3 protocol is supported for Etcd.\ncluster:selector:${SW_CLUSTER:etcd}# other configurationsetcd:# etcd cluster nodes, example: 10.0.0.1:2379,10.0.0.2:2379,10.0.0.3:2379endpoints:${SW_CLUSTER_ETCD_ENDPOINTS:localhost:2379}namespace:${SW_CLUSTER_ETCD_NAMESPACE:/skywalking}serviceName:${SW_CLUSTER_ETCD_SERVICE_NAME:\u0026#34;SkyWalking_OAP_Cluster\u0026#34;}authentication:${SW_CLUSTER_ETCD_AUTHENTICATION:false}user:${SW_CLUSTER_ETCD_USER:}password:${SW_CLUSTER_ETCD_PASSWORD:}Same as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: The registered port and other OAP nodes use this to communicate with the current node.  Nacos Set the cluster/selector to nacos in the yml to enable it.\ncluster:selector:${SW_CLUSTER:nacos}# other configurationsNacos supports authentication by username or accessKey. Empty means that there is no need for authentication. Extra config is as follows:\nnacos:username:password:accessKey:secretKey:Same as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: The registered port and other OAP nodes use this to communicate with the current node.  ","title":"Cluster Management","url":"/docs/main/v9.4.0/en/setup/backend/backend-cluster/"},{"content":"Cluster Management In many production environments, the backend needs to support distributed aggregation, high throughput and provide high availability (HA) to maintain robustness, so you always need to setup CLUSTER management in product env. Otherwise, you would face metrics inaccurate.\ncore/gRPCHost is listening on 0.0.0.0 for quick start as the single mode for most cases. Besides the Kubernetes coordinator, which is using the cloud-native mode to establish cluster, all other coordinators requires core/gRPCHost updated to real IP addresses or take reference of internalComHost and internalComPort in each coordinator doc.\nNOTICE, cluster management doesn\u0026rsquo;t provide a service discovery mechanism for agents and probes. We recommend agents/probes using gateway to load balancer to access OAP clusters.\nThere are various ways to manage the cluster in the backend. Choose the one that best suits your needs.\n Kubernetes. When the backend clusters are deployed inside Kubernetes, you could make use of this method by using k8s native APIs to manage clusters. Zookeeper coordinator. Use Zookeeper to let the backend instances detect and communicate with each other. Consul. Use Consul as the backend cluster management implementor and coordinate backend instances. Etcd. Use Etcd to coordinate backend instances. Nacos. Use Nacos to coordinate backend instances.  In the application.yml file, there are default configurations for the aforementioned coordinators under the section cluster. You can specify any of them in the selector property to enable it.\nKubernetes The required backend clusters are deployed inside Kubernetes. See the guides in Deploy in kubernetes. Set the selector to kubernetes.\ncluster:selector:${SW_CLUSTER:kubernetes}# other configurationsMeanwhile, the OAP cluster requires the pod\u0026rsquo;s UID which is laid at metadata.uid as the value of the system environment variable SKYWALKING_COLLECTOR_UID\ncontainers:# Original configurations of OAP container- name:{{.Values.oap.name }}image:{{.Values.oap.image.repository }}:{{ required \u0026#34;oap.image.tag is required\u0026#34; .Values.oap.image.tag }}# ...# ...env:# Add metadata.uid as the system environment variable, SKYWALKING_COLLECTOR_UID - name:SKYWALKING_COLLECTOR_UIDvalueFrom:fieldRef:fieldPath:metadata.uidRead the complete helm for more details.\nZookeeper coordinator Zookeeper is a very common and widely used cluster coordinator. Set the cluster/selector to zookeeper in the yml to enable it.\nRequired Zookeeper version: 3.5+\ncluster:selector:${SW_CLUSTER:zookeeper}# other configurations hostPort is the list of zookeeper servers. Format is IP1:PORT1,IP2:PORT2,...,IPn:PORTn enableACL enable Zookeeper ACL to control access to its znode. schema is Zookeeper ACL schemas. expression is a expression of ACL. The format of the expression is specific to the schema. hostPort, baseSleepTimeMs and maxRetries are settings of Zookeeper curator client.  Note:\n If Zookeeper ACL is enabled and /skywalking exists, you must ensure that SkyWalking has CREATE, READ and WRITE permissions. If /skywalking does not exist, it will be created by SkyWalking, and all permissions to the specified user will be granted. Simultaneously, znode grants READ permission to anyone. If you set schema as digest, the password of the expression is set in clear text.  In some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: the registered port and other OAP nodes use this to communicate with the current node.  zookeeper:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}hostPort:${SW_CLUSTER_ZK_HOST_PORT:localhost:2181}#Retry PolicybaseSleepTimeMs:${SW_CLUSTER_ZK_SLEEP_TIME:1000}# initial amount of time to wait between retriesmaxRetries:${SW_CLUSTER_ZK_MAX_RETRIES:3}# max number of times to retryinternalComHost:${SW_CLUSTER_INTERNAL_COM_HOST:172.10.4.10}internalComPort:${SW_CLUSTER_INTERNAL_COM_PORT:11800}# Enable ACLenableACL:${SW_ZK_ENABLE_ACL:false}# disable ACL in defaultschema:${SW_ZK_SCHEMA:digest}# only support digest schemaexpression:${SW_ZK_EXPRESSION:skywalking:skywalking}Consul Recently, the Consul system has become more and more popular, and many companies and developers now use Consul as their service discovery solution. Set the cluster/selector to consul in the yml to enable it.\ncluster:selector:${SW_CLUSTER:consul}# other configurationsSame as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: The registered port and other OAP nodes use this to communicate with the current node.  Etcd Set the cluster/selector to etcd in the yml to enable it. The Etcd client has upgraded to v3 protocol and changed to the CoreOS official library. Since 8.7.0, only the v3 protocol is supported for Etcd.\ncluster:selector:${SW_CLUSTER:etcd}# other configurationsetcd:# etcd cluster nodes, example: 10.0.0.1:2379,10.0.0.2:2379,10.0.0.3:2379endpoints:${SW_CLUSTER_ETCD_ENDPOINTS:localhost:2379}namespace:${SW_CLUSTER_ETCD_NAMESPACE:/skywalking}serviceName:${SW_CLUSTER_ETCD_SERVICE_NAME:\u0026#34;SkyWalking_OAP_Cluster\u0026#34;}authentication:${SW_CLUSTER_ETCD_AUTHENTICATION:false}user:${SW_CLUSTER_ETCD_USER:}password:${SW_CLUSTER_ETCD_PASSWORD:}Same as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: The registered port and other OAP nodes use this to communicate with the current node.  Nacos Set the cluster/selector to nacos in the yml to enable it.\ncluster:selector:${SW_CLUSTER:nacos}# other configurationsNacos supports authentication by username or accessKey. Empty means that there is no need for authentication. Extra config is as follows:\nnacos:username:password:accessKey:secretKey:Same as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: The registered port and other OAP nodes use this to communicate with the current node.  ","title":"Cluster Management","url":"/docs/main/v9.5.0/en/setup/backend/backend-cluster/"},{"content":"Cluster Management In many production environments, the backend needs to support distributed aggregation, high throughput and provide high availability (HA) to maintain robustness, so you always need to setup CLUSTER management in product env. Otherwise, you would face metrics inaccurate.\ncore/gRPCHost is listening on 0.0.0.0 for quick start as the single mode for most cases. Besides the Kubernetes coordinator, which is using the cloud-native mode to establish cluster, all other coordinators requires core/gRPCHost updated to real IP addresses or take reference of internalComHost and internalComPort in each coordinator doc.\nNOTICE, cluster management doesn\u0026rsquo;t provide a service discovery mechanism for agents and probes. We recommend agents/probes using gateway to load balancer to access OAP clusters.\nThere are various ways to manage the cluster in the backend. Choose the one that best suits your needs.\n Kubernetes. When the backend clusters are deployed inside Kubernetes, you could make use of this method by using k8s native APIs to manage clusters. Zookeeper coordinator. Use Zookeeper to let the backend instances detect and communicate with each other. Consul. Use Consul as the backend cluster management implementor and coordinate backend instances. Etcd. Use Etcd to coordinate backend instances. Nacos. Use Nacos to coordinate backend instances.  In the application.yml file, there are default configurations for the aforementioned coordinators under the section cluster. You can specify any of them in the selector property to enable it.\nKubernetes The required backend clusters are deployed inside Kubernetes. See the guides in Deploy in kubernetes. Set the selector to kubernetes.\ncluster:selector:${SW_CLUSTER:kubernetes}# other configurationsMeanwhile, the OAP cluster requires the pod\u0026rsquo;s UID which is laid at metadata.uid as the value of the system environment variable SKYWALKING_COLLECTOR_UID\ncontainers:# Original configurations of OAP container- name:{{.Values.oap.name }}image:{{.Values.oap.image.repository }}:{{ required \u0026#34;oap.image.tag is required\u0026#34; .Values.oap.image.tag }}# ...# ...env:# Add metadata.uid as the system environment variable, SKYWALKING_COLLECTOR_UID - name:SKYWALKING_COLLECTOR_UIDvalueFrom:fieldRef:fieldPath:metadata.uidRead the complete helm for more details.\nZookeeper coordinator Zookeeper is a very common and widely used cluster coordinator. Set the cluster/selector to zookeeper in the yml to enable it.\nRequired Zookeeper version: 3.5+\ncluster:selector:${SW_CLUSTER:zookeeper}# other configurations hostPort is the list of zookeeper servers. Format is IP1:PORT1,IP2:PORT2,...,IPn:PORTn enableACL enable Zookeeper ACL to control access to its znode. schema is Zookeeper ACL schemas. expression is a expression of ACL. The format of the expression is specific to the schema. hostPort, baseSleepTimeMs and maxRetries are settings of Zookeeper curator client.  Note:\n If Zookeeper ACL is enabled and /skywalking exists, you must ensure that SkyWalking has CREATE, READ and WRITE permissions. If /skywalking does not exist, it will be created by SkyWalking, and all permissions to the specified user will be granted. Simultaneously, znode grants READ permission to anyone. If you set schema as digest, the password of the expression is set in clear text.  In some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: the registered port and other OAP nodes use this to communicate with the current node.  zookeeper:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}hostPort:${SW_CLUSTER_ZK_HOST_PORT:localhost:2181}#Retry PolicybaseSleepTimeMs:${SW_CLUSTER_ZK_SLEEP_TIME:1000}# initial amount of time to wait between retriesmaxRetries:${SW_CLUSTER_ZK_MAX_RETRIES:3}# max number of times to retryinternalComHost:${SW_CLUSTER_INTERNAL_COM_HOST:172.10.4.10}internalComPort:${SW_CLUSTER_INTERNAL_COM_PORT:11800}# Enable ACLenableACL:${SW_ZK_ENABLE_ACL:false}# disable ACL in defaultschema:${SW_ZK_SCHEMA:digest}# only support digest schemaexpression:${SW_ZK_EXPRESSION:skywalking:skywalking}Consul Recently, the Consul system has become more and more popular, and many companies and developers now use Consul as their service discovery solution. Set the cluster/selector to consul in the yml to enable it.\ncluster:selector:${SW_CLUSTER:consul}# other configurationsSame as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: The registered port and other OAP nodes use this to communicate with the current node.  Etcd Set the cluster/selector to etcd in the yml to enable it. The Etcd client has upgraded to v3 protocol and changed to the CoreOS official library. Since 8.7.0, only the v3 protocol is supported for Etcd.\ncluster:selector:${SW_CLUSTER:etcd}# other configurationsetcd:# etcd cluster nodes, example: 10.0.0.1:2379,10.0.0.2:2379,10.0.0.3:2379endpoints:${SW_CLUSTER_ETCD_ENDPOINTS:localhost:2379}namespace:${SW_CLUSTER_ETCD_NAMESPACE:/skywalking}serviceName:${SW_CLUSTER_ETCD_SERVICE_NAME:\u0026#34;SkyWalking_OAP_Cluster\u0026#34;}authentication:${SW_CLUSTER_ETCD_AUTHENTICATION:false}user:${SW_CLUSTER_ETCD_USER:}password:${SW_CLUSTER_ETCD_PASSWORD:}Same as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: The registered port and other OAP nodes use this to communicate with the current node.  Nacos Set the cluster/selector to nacos in the yml to enable it.\ncluster:selector:${SW_CLUSTER:nacos}# other configurationsNacos supports authentication by username or accessKey. Empty means that there is no need for authentication. Extra config is as follows:\nnacos:username:password:accessKey:secretKey:Same as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The registered host and other OAP nodes use this to communicate with the current node. internalComPort: The registered port and other OAP nodes use this to communicate with the current node.  ","title":"Cluster Management","url":"/docs/main/v9.6.0/en/setup/backend/backend-cluster/"},{"content":"Cluster Management In many production environments, the backend needs to support distributed aggregation, high throughput and provide high availability (HA) to maintain robustness, so you always need to setup CLUSTER management in product env. Otherwise, you would face metrics inaccurate.\ncore/gRPCHost is listening on 0.0.0.0 for quick start as the single mode for most cases. Besides the Kubernetes coordinator, which is using the cloud-native mode to establish cluster, all other coordinators requires core/gRPCHost updated to real IP addresses or take reference of internalComHost and internalComPort in each coordinator doc.\nNOTICE, cluster management doesn\u0026rsquo;t provide a service discovery mechanism for agents and probes. We recommend agents/probes using gateway to load balancer to access OAP clusters.\nThere are various ways to manage the cluster in the backend. Choose the one that best suits your needs.\n Kubernetes. When the backend clusters are deployed inside Kubernetes, you could make use of this method by using k8s native APIs to manage clusters. Zookeeper coordinator. Use Zookeeper to let the backend instances detect and communicate with each other. Consul. Use Consul as the backend cluster management implementor and coordinate backend instances. Etcd. Use Etcd to coordinate backend instances. Nacos. Use Nacos to coordinate backend instances.  In the application.yml file, there are default configurations for the aforementioned coordinators under the section cluster. You can specify any of them in the selector property to enable it.\nCloud Native Kubernetes The required backend clusters are deployed inside Kubernetes. See the guides in Deploy in kubernetes. Set the selector to kubernetes.\ncluster:selector:${SW_CLUSTER:kubernetes}# other configurationsMeanwhile, the OAP cluster requires the pod\u0026rsquo;s UID which is laid at metadata.uid as the value of the system environment variable SKYWALKING_COLLECTOR_UID\ncontainers:# Original configurations of OAP container- name:{{.Values.oap.name }}image:{{.Values.oap.image.repository }}:{{ required \u0026#34;oap.image.tag is required\u0026#34; .Values.oap.image.tag }}# ...# ...env:# Add metadata.uid as the system environment variable, SKYWALKING_COLLECTOR_UID - name:SKYWALKING_COLLECTOR_UIDvalueFrom:fieldRef:fieldPath:metadata.uidRead the complete helm for more details.\nTraditional Coordinator NOTICE In all the following coordinators, oap.internal.comm.host:oap.internal.comm.port is registered as the ID and address for the current OAP node. By default, because they are same in all OAP nodes, the registrations are conflicted, and (may) show as one registered node, which actually would be the node itself. In this case, the cluster mode is NOT working.\nPlease check the registered nodes on your coordinator servers, to make the registration information unique for every node. You could have two options\n Change core/gRPCHost(oap.internal.comm.host) and core/gRPCPort(oap.internal.comm.port) for internal, and setup external communication channels for data reporting and query. Use internalComHost and internalComPort in the config to provide a unique host and port for every OAP node. This host name port should be accessible for other OAP nodes.  Zookeeper coordinator Zookeeper is a very common and widely used cluster coordinator. Set the cluster/selector to zookeeper in the yml to enable it.\nRequired Zookeeper version: 3.5+\ncluster:selector:${SW_CLUSTER:zookeeper}# other configurations hostPort is the list of zookeeper servers. Format is IP1:PORT1,IP2:PORT2,...,IPn:PORTn enableACL enable Zookeeper ACL to control access to its znode. schema is Zookeeper ACL schemas. expression is a expression of ACL. The format of the expression is specific to the schema. hostPort, baseSleepTimeMs and maxRetries are settings of Zookeeper curator client.  Note:\n If Zookeeper ACL is enabled and /skywalking exists, you must ensure that SkyWalking has CREATE, READ and WRITE permissions. If /skywalking does not exist, it will be created by SkyWalking, and all permissions to the specified user will be granted. Simultaneously, znode grants READ permission to anyone. If you set schema as digest, the password of the expression is set in clear text.  In some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes, such as the default host(0.0.0.0) should not be used in cluster mode. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The exposed host name for other OAP nodes in the cluster internal communication. internalComPort: the exposed port for other OAP nodes in the cluster internal communication.  zookeeper:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}hostPort:${SW_CLUSTER_ZK_HOST_PORT:localhost:2181}#Retry PolicybaseSleepTimeMs:${SW_CLUSTER_ZK_SLEEP_TIME:1000}# initial amount of time to wait between retriesmaxRetries:${SW_CLUSTER_ZK_MAX_RETRIES:3}# max number of times to retryinternalComHost:${SW_CLUSTER_INTERNAL_COM_HOST:172.10.4.10}internalComPort:${SW_CLUSTER_INTERNAL_COM_PORT:11800}# Enable ACLenableACL:${SW_ZK_ENABLE_ACL:false}# disable ACL in defaultschema:${SW_ZK_SCHEMA:digest}# only support digest schemaexpression:${SW_ZK_EXPRESSION:skywalking:skywalking}Consul Recently, the Consul system has become more and more popular, and many companies and developers now use Consul as their service discovery solution. Set the cluster/selector to consul in the yml to enable it.\ncluster:selector:${SW_CLUSTER:consul}# other configurationsSame as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes, such as the default host(0.0.0.0) should not be used in cluster mode. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The exposed host name for other OAP nodes in the cluster internal communication. internalComPort: the exposed port for other OAP nodes in the cluster internal communication.  Etcd Set the cluster/selector to etcd in the yml to enable it. The Etcd client has upgraded to v3 protocol and changed to the CoreOS official library. Since 8.7.0, only the v3 protocol is supported for Etcd.\ncluster:selector:${SW_CLUSTER:etcd}# other configurationsetcd:# etcd cluster nodes, example: 10.0.0.1:2379,10.0.0.2:2379,10.0.0.3:2379endpoints:${SW_CLUSTER_ETCD_ENDPOINTS:localhost:2379}namespace:${SW_CLUSTER_ETCD_NAMESPACE:/skywalking}serviceName:${SW_CLUSTER_ETCD_SERVICE_NAME:\u0026#34;SkyWalking_OAP_Cluster\u0026#34;}authentication:${SW_CLUSTER_ETCD_AUTHENTICATION:false}user:${SW_CLUSTER_ETCD_USER:}password:${SW_CLUSTER_ETCD_PASSWORD:}Same as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes, such as the default host(0.0.0.0) should not be used in cluster mode. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The exposed host name for other OAP nodes in the cluster internal communication. internalComPort: the exposed port for other OAP nodes in the cluster internal communication.  Nacos Set the cluster/selector to nacos in the yml to enable it.\ncluster:selector:${SW_CLUSTER:nacos}# other configurationsNacos supports authentication by username or accessKey. Empty means that there is no need for authentication. Extra config is as follows:\nnacos:username:password:accessKey:secretKey:Same as the Zookeeper coordinator, in some cases, the OAP default gRPC host and port in the core are not suitable for internal communication among the OAP nodes, such as the default host(0.0.0.0) should not be used in cluster mode. The following settings are provided to set the host and port manually, based on your own LAN env.\n internalComHost: The exposed host name for other OAP nodes in the cluster internal communication. internalComPort: the exposed port for other OAP nodes in the cluster internal communication.  ","title":"Cluster Management","url":"/docs/main/v9.7.0/en/setup/backend/backend-cluster/"},{"content":"Coding Style for SkyWalking Python String formatting Since Python 3.5 is end of life, we fully utilize the clarity and performance boost brought by f-strings. Please do not use other styles - +, % or .format unless f-string is absolutely unfeasible in the context, or it is a logger message, which is optimized for the % style\nRun make dev-fix to invoke flynt to convert other formats to f-string, pay extra care to possible corner cases leading to a semantically different conversion.\nQuotes As we know both single quotes and double quotes are both acceptable in Python. For a better coding style, we enforce a check for using single quotes when possible.\nPlease only use double quotes on the outside when there are inevitable single quotes inside the string, or when there are nest quotes.\nFor example -\nfoo = f\u0026#34;I\u0026#39;m a string\u0026#34; bar = f\u0026#34;This repo is called \u0026#39;skywalking-python\u0026#39;\u0026#34; Run make dev-fix to invoke unify to deal with your quotes if flake8 complaints about it.\nDebug messages Please import the logger_debug_enabled variable and wrap your debug messages with a check.\nThis should be done for all performance critical components.\nif logger_debug_enabled: logger.debug(\u0026#39;Message - %s\u0026#39;, some_func()) Imports Please make sure the imports are placed in a good order, or flake8-isort will notify you of the violations.\nRun make dev-fix to automatically fix the sorting problem.\nNaming In PEP8 convention, we are required to use snake_case as the accepted style.\nHowever, there are special cases. For example, you are overriding/monkey-patching a method which happens to use the old style camelCase naming, then it is acceptable to have the original naming convention to preserve context.\nPlease mark the line with # noqa to avoid linting.\n","title":"Coding Style for SkyWalking Python","url":"/docs/skywalking-python/latest/en/contribution/codingstyle/"},{"content":"Coding Style for SkyWalking Python String formatting Since Python 3.5 is end of life, we fully utilize the clarity and performance boost brought by f-strings. Please do not use other styles - +, % or .format unless f-string is absolutely unfeasible in the context, or it is a logger message, which is optimized for the % style\nRun make dev-fix to invoke flynt to convert other formats to f-string, pay extra care to possible corner cases leading to a semantically different conversion.\nQuotes As we know both single quotes and double quotes are both acceptable in Python. For a better coding style, we enforce a check for using single quotes when possible.\nPlease only use double quotes on the outside when there are inevitable single quotes inside the string, or when there are nest quotes.\nFor example -\nfoo = f\u0026#34;I\u0026#39;m a string\u0026#34; bar = f\u0026#34;This repo is called \u0026#39;skywalking-python\u0026#39;\u0026#34; Run make dev-fix to invoke unify to deal with your quotes if flake8 complaints about it.\nDebug messages Please import the logger_debug_enabled variable and wrap your debug messages with a check.\nThis should be done for all performance critical components.\nif logger_debug_enabled: logger.debug(\u0026#39;Message - %s\u0026#39;, some_func()) Imports Please make sure the imports are placed in a good order, or flake8-isort will notify you of the violations.\nRun make dev-fix to automatically fix the sorting problem.\nNaming In PEP8 convention, we are required to use snake_case as the accepted style.\nHowever, there are special cases. For example, you are overriding/monkey-patching a method which happens to use the old style camelCase naming, then it is acceptable to have the original naming convention to preserve context.\nPlease mark the line with # noqa to avoid linting.\n","title":"Coding Style for SkyWalking Python","url":"/docs/skywalking-python/next/en/contribution/codingstyle/"},{"content":"Coding Style for SkyWalking Python String formatting Since Python 3.5 is end of life, we fully utilize the clarity and performance boost brought by f-strings. Please do not use other styles - +, % or .format unless f-string is absolutely unfeasible in the context, or it is a logger message, which is optimized for the % style\nRun make dev-fix to invoke flynt to convert other formats to f-string, pay extra care to possible corner cases leading to a semantically different conversion.\nQuotes As we know both single quotes and double quotes are both acceptable in Python. For a better coding style, we enforce a check for using single quotes when possible.\nPlease only use double quotes on the outside when there are inevitable single quotes inside the string, or when there are nest quotes.\nFor example -\nfoo = f\u0026#34;I\u0026#39;m a string\u0026#34; bar = f\u0026#34;This repo is called \u0026#39;skywalking-python\u0026#39;\u0026#34; Run make dev-fix to invoke unify to deal with your quotes if flake8 complaints about it.\nDebug messages Please import the logger_debug_enabled variable and wrap your debug messages with a check.\nThis should be done for all performance critical components.\nif logger_debug_enabled: logger.debug(\u0026#39;Message - %s\u0026#39;, some_func()) Imports Please make sure the imports are placed in a good order, or flake8-isort will notify you of the violations.\nRun make dev-fix to automatically fix the sorting problem.\nNaming In PEP8 convention, we are required to use snake_case as the accepted style.\nHowever, there are special cases. For example, you are overriding/monkey-patching a method which happens to use the old style camelCase naming, then it is acceptable to have the original naming convention to preserve context.\nPlease mark the line with # noqa to avoid linting.\n","title":"Coding Style for SkyWalking Python","url":"/docs/skywalking-python/v1.0.1/en/contribution/codingstyle/"},{"content":"Collecting and Gathering Kubernetes Monitoring Data Motivation SkyWalking has provided an access log collector based on the Agent layer and Service Mesh layer, and can generate corresponding topology maps and metrics based on the data. However, the Kubernetes Layer still lacks corresponding access log collector and analysis work.\nThis proposal is dedicated to collecting and analyzing network access logs in Kubernetes.\nArchitecture Graph There is no significant architecture-level change. Still using the Rover project to collect data and report it to SkyWalking OAP using the gRPC protocol.\nPropose Changes Based on the content in Motivation, if we want to ignore the application types(different program languages) and only monitor network logs, using eBPF is a good choice. It mainly reflects in the following aspects:\n Non-intrusive: When monitoring network access logs with eBPF, the application do not need to make any changes to be monitored. Language-unrestricted: Regardless of which programming language is used in the application, network data will ultimately be accessed through Linux Syscalls. Therefore, we can monitor network data by attaching eBPF to the syscalls layer, thus ignoring programming languages. Kernel interception: Since eBPF can attach to the kernel methods, it can obtain the execution status of each packet at L2-L4 layers and generate more detailed metrics.  Based on these reasons and collected data, they can be implemented in SkyWalking Rover and collected and monitored based on the following steps:\n Monitor the network execution status of all processes in Kubernetes when the Rover system starts. Periodically report data content via gRPC protocol to SkyWalking OAP. SkyWalking OAP parses network access logs and generates corresponding network topology, metrics, etc.  Limitation For content that uses TLS for data transmission, Rover will detect whether the current language uses libraries such as OpenSSL. If it is used, it will asynchronously intercept relevant OpenSSL methods when the process starts to perceive the original data content.\nHowever, this approach is not feasible for Java because Java does not use the OpenSSL library but performs encryption/decryption through Java code. Currently, eBPF cannot intercept Java method calls. Therefore, it results in an inability to perceive the TLS data protocol in Java.\nService with Istio sidecar scenario If the Service is deployed in Istio sidecar, it will still monitor each process. If the Service is a Java service and uses TLS, it can analyze the relevant traffic generated in the sidecar (envoy).\nImported Dependencies libs and their licenses. No new library is planned to be added to the codebase.\nCompatibility About the protocol, there should be no breaking changes, but enhancements only:\n Rover: adding a new gRPC data collection protocol for reporting the access logs. OAP: It should have no protocol updates. The existing query protocols are already sufficient for querying Kubernetes topology and metric data.  Data Generation Entity  service_traffic     column data type value description     name string kubernetes service name   short_name string same with name   service_id string base64(name).1   group string empty string   layer string KUBERNETES     instance_traffic     column data type value description     service_id string base64(service_name).1   name string pod name   last_ping long last access log message timestamp(millisecond)   properties json empty string     endpoint_traffic     column data type value description     service_id string base64(service_name).1   name string access log endpoint name(for HTTP1, is URI)    Entity Relation All entity information is built on connections. If the target address is remote, the name will be resolved in the following order:\n If it is a pod IP, it will be resolved as pod information. If it is a service IP, it will be resolved as service information. If neither exists, only pod information will be displayed.  Different entities have different displays for remote addresses. Please refer to the following table.\n   table name remote info(display by following order)     service_relation service name, remote IP address   instance_relation pod name, remote IP address    NOTICE: If it is the internal data interaction within the pod, such as exchanging data between services and sidecar (envoy), no corresponding traffic will be generated. We only generate and interact with external pods.\nLimitation If the service IP is used to send requests to the upstream, we will use eBPF to perceive the real target PodIP by perceiving relevant conntrack records.\nHowever, if conntrack technology is not used, it is difficult to perceive the real target IP address. In this case, instance relation data of this kind will be dropped, but we will mark all discarded relationship generation counts through a metric for better understanding of the situation.\nMetrics Integrate the data into the OAL system and generate corresponding metrics through predefined data combined with OAL statements.\nGeneral usage docs This proposal will only add a module to Rover that explains the configuration of access logs, and changes in the Kubernetes module on the UI.\nIn the Kubernetes UI, users can see the following additions:\n Topology: A topology diagram showing the calling relationships between services, instances, and processes. Entity Metrics: Metric data for services, instances, and processes. Call Relationship Metrics: Metrics for call relationships between different entities.  ","title":"Collecting and Gathering Kubernetes Monitoring Data","url":"/docs/main/next/en/swip/swip-2/"},{"content":"Collecting File Log Application\u0026rsquo;s logs are important data for troubleshooting, usually they are persistent through local or network file system. SkyWalking provides ways to collect logs from those files by leveraging popular open-source tools.\nLog files collector You can use Filebeat, Fluentd and FluentBit to collect logs, and then transport the logs to SkyWalking OAP through Kafka or HTTP protocol, with the formats Kafka JSON or HTTP JSON array.\nFilebeat Filebeat supports using Kafka to transport logs. Open kafka-fetcher and enable configs enableNativeJsonLog.\nTake the following Filebeat config YAML as an example to set up Filebeat:\n filebeat.yml  Fluentd Fluentd supports using Kafka to transport logs. Open kafka-fetcher and enable configs enableNativeJsonLog.\nTake the following fluentd config file as an example to set up Fluentd:\n fluentd.conf  Fluent-bit Fluent-bit sends logs to OAP directly through HTTP(rest port). Point the output address to restHost:restPort of receiver-sharing-server or core(if receiver-sharing-server is inactivated)\nTake the following fluent-bit config files as an example to set up Fluent-bit:\n fluent-bit.conf  ","title":"Collecting File Log","url":"/docs/main/latest/en/setup/backend/filelog-native/"},{"content":"Collecting File Log Application\u0026rsquo;s logs are important data for troubleshooting, usually they are persistent through local or network file system. SkyWalking provides ways to collect logs from those files by leveraging popular open-source tools.\nLog files collector You can use Filebeat, Fluentd and FluentBit to collect logs, and then transport the logs to SkyWalking OAP through Kafka or HTTP protocol, with the formats Kafka JSON or HTTP JSON array.\nFilebeat Filebeat supports using Kafka to transport logs. Open kafka-fetcher and enable configs enableNativeJsonLog.\nTake the following Filebeat config YAML as an example to set up Filebeat:\n filebeat.yml  Fluentd Fluentd supports using Kafka to transport logs. Open kafka-fetcher and enable configs enableNativeJsonLog.\nTake the following fluentd config file as an example to set up Fluentd:\n fluentd.conf  Fluent-bit Fluent-bit sends logs to OAP directly through HTTP(rest port). Point the output address to restHost:restPort of receiver-sharing-server or core(if receiver-sharing-server is inactivated)\nTake the following fluent-bit config files as an example to set up Fluent-bit:\n fluent-bit.conf  ","title":"Collecting File Log","url":"/docs/main/next/en/setup/backend/filelog-native/"},{"content":"Collecting File Log Application\u0026rsquo;s logs are important data for troubleshooting, usually they are persistent through local or network file system. SkyWalking provides ways to collect logs from those files by leveraging popular open-source tools.\nLog files collector You can use Filebeat, Fluentd and FluentBit to collect logs, and then transport the logs to SkyWalking OAP through Kafka or HTTP protocol, with the formats Kafka JSON or HTTP JSON array.\nFilebeat Filebeat supports using Kafka to transport logs. Open kafka-fetcher and enable configs enableNativeJsonLog.\nTake the following Filebeat config YAML as an example to set up Filebeat:\n filebeat.yml  Fluentd Fluentd supports using Kafka to transport logs. Open kafka-fetcher and enable configs enableNativeJsonLog.\nTake the following fluentd config file as an example to set up Fluentd:\n fluentd.conf  Fluent-bit Fluent-bit sends logs to OAP directly through HTTP(rest port). Point the output address to restHost:restPort of receiver-sharing-server or core(if receiver-sharing-server is inactivated)\nTake the following fluent-bit config files as an example to set up Fluent-bit:\n fluent-bit.conf  ","title":"Collecting File Log","url":"/docs/main/v9.5.0/en/setup/backend/filelog-native/"},{"content":"Collecting File Log Application\u0026rsquo;s logs are important data for troubleshooting, usually they are persistent through local or network file system. SkyWalking provides ways to collect logs from those files by leveraging popular open-source tools.\nLog files collector You can use Filebeat, Fluentd and FluentBit to collect logs, and then transport the logs to SkyWalking OAP through Kafka or HTTP protocol, with the formats Kafka JSON or HTTP JSON array.\nFilebeat Filebeat supports using Kafka to transport logs. Open kafka-fetcher and enable configs enableNativeJsonLog.\nTake the following Filebeat config YAML as an example to set up Filebeat:\n filebeat.yml  Fluentd Fluentd supports using Kafka to transport logs. Open kafka-fetcher and enable configs enableNativeJsonLog.\nTake the following fluentd config file as an example to set up Fluentd:\n fluentd.conf  Fluent-bit Fluent-bit sends logs to OAP directly through HTTP(rest port). Point the output address to restHost:restPort of receiver-sharing-server or core(if receiver-sharing-server is inactivated)\nTake the following fluent-bit config files as an example to set up Fluent-bit:\n fluent-bit.conf  ","title":"Collecting File Log","url":"/docs/main/v9.6.0/en/setup/backend/filelog-native/"},{"content":"Collecting File Log Application\u0026rsquo;s logs are important data for troubleshooting, usually they are persistent through local or network file system. SkyWalking provides ways to collect logs from those files by leveraging popular open-source tools.\nLog files collector You can use Filebeat, Fluentd and FluentBit to collect logs, and then transport the logs to SkyWalking OAP through Kafka or HTTP protocol, with the formats Kafka JSON or HTTP JSON array.\nFilebeat Filebeat supports using Kafka to transport logs. Open kafka-fetcher and enable configs enableNativeJsonLog.\nTake the following Filebeat config YAML as an example to set up Filebeat:\n filebeat.yml  Fluentd Fluentd supports using Kafka to transport logs. Open kafka-fetcher and enable configs enableNativeJsonLog.\nTake the following fluentd config file as an example to set up Fluentd:\n fluentd.conf  Fluent-bit Fluent-bit sends logs to OAP directly through HTTP(rest port). Point the output address to restHost:restPort of receiver-sharing-server or core(if receiver-sharing-server is inactivated)\nTake the following fluent-bit config files as an example to set up Fluent-bit:\n fluent-bit.conf  ","title":"Collecting File Log","url":"/docs/main/v9.7.0/en/setup/backend/filelog-native/"},{"content":"Collecting Logs by Agents Some of SkyWalking native agents support collecting logs and sending them to OAP server without local files and/or file agents, which are listed in here.\nJava agent\u0026rsquo;s toolkits Java agent provides toolkits for log4j, log4j2, and logback to report logs through gRPC with automatically injected trace context.\nSkyWalking Satellite sidecar is a recommended proxy/side that forwards logs (including the use of Kafka MQ to transport logs). When using this, open kafka-fetcher and enable configs enableNativeProtoLog.\nJava agent provides toolkits for log4j, log4j2, and logback to report logs through files with automatically injected trace context.\nLog framework config examples:\n log4j1.x fileAppender log4j2.x fileAppender logback fileAppender  Python agent log reporter SkyWalking Python Agent implements a log reporter for the logging module with functionalities aligning with the Java toolkits.\nTo explore how to enable the reporting features for your use cases, please refer to the Log Reporter Doc for a detailed guide.\n","title":"Collecting Logs by Agents","url":"/docs/main/latest/en/setup/backend/log-agent-native/"},{"content":"Collecting Logs by Agents Some of SkyWalking native agents support collecting logs and sending them to OAP server without local files and/or file agents, which are listed in here.\nJava agent\u0026rsquo;s toolkits Java agent provides toolkits for log4j, log4j2, and logback to report logs through gRPC with automatically injected trace context.\nSkyWalking Satellite sidecar is a recommended proxy/side that forwards logs (including the use of Kafka MQ to transport logs). When using this, open kafka-fetcher and enable configs enableNativeProtoLog.\nJava agent provides toolkits for log4j, log4j2, and logback to report logs through files with automatically injected trace context.\nLog framework config examples:\n log4j1.x fileAppender log4j2.x fileAppender logback fileAppender  Python agent log reporter SkyWalking Python Agent implements a log reporter for the logging module with functionalities aligning with the Java toolkits.\nTo explore how to enable the reporting features for your use cases, please refer to the Log Reporter Doc for a detailed guide.\n","title":"Collecting Logs by Agents","url":"/docs/main/next/en/setup/backend/log-agent-native/"},{"content":"Collecting Logs by Agents Some of SkyWalking native agents support collecting logs and sending them to OAP server without local files and/or file agents, which are listed in here.\nJava agent\u0026rsquo;s toolkits Java agent provides toolkits for log4j, log4j2, and logback to report logs through gRPC with automatically injected trace context.\nSkyWalking Satellite sidecar is a recommended proxy/side that forwards logs (including the use of Kafka MQ to transport logs). When using this, open kafka-fetcher and enable configs enableNativeProtoLog.\nJava agent provides toolkits for log4j, log4j2, and logback to report logs through files with automatically injected trace context.\nLog framework config examples:\n log4j1.x fileAppender log4j2.x fileAppender logback fileAppender  Python agent log reporter SkyWalking Python Agent implements a log reporter for the logging module with functionalities aligning with the Java toolkits.\nTo explore how to enable the reporting features for your use cases, please refer to the Log Reporter Doc for a detailed guide.\n","title":"Collecting Logs by Agents","url":"/docs/main/v9.5.0/en/setup/backend/log-agent-native/"},{"content":"Collecting Logs by Agents Some of SkyWalking native agents support collecting logs and sending them to OAP server without local files and/or file agents, which are listed in here.\nJava agent\u0026rsquo;s toolkits Java agent provides toolkits for log4j, log4j2, and logback to report logs through gRPC with automatically injected trace context.\nSkyWalking Satellite sidecar is a recommended proxy/side that forwards logs (including the use of Kafka MQ to transport logs). When using this, open kafka-fetcher and enable configs enableNativeProtoLog.\nJava agent provides toolkits for log4j, log4j2, and logback to report logs through files with automatically injected trace context.\nLog framework config examples:\n log4j1.x fileAppender log4j2.x fileAppender logback fileAppender  Python agent log reporter SkyWalking Python Agent implements a log reporter for the logging module with functionalities aligning with the Java toolkits.\nTo explore how to enable the reporting features for your use cases, please refer to the Log Reporter Doc for a detailed guide.\n","title":"Collecting Logs by Agents","url":"/docs/main/v9.6.0/en/setup/backend/log-agent-native/"},{"content":"Collecting Logs by Agents Some of SkyWalking native agents support collecting logs and sending them to OAP server without local files and/or file agents, which are listed in here.\nJava agent\u0026rsquo;s toolkits Java agent provides toolkits for log4j, log4j2, and logback to report logs through gRPC with automatically injected trace context.\nSkyWalking Satellite sidecar is a recommended proxy/side that forwards logs (including the use of Kafka MQ to transport logs). When using this, open kafka-fetcher and enable configs enableNativeProtoLog.\nJava agent provides toolkits for log4j, log4j2, and logback to report logs through files with automatically injected trace context.\nLog framework config examples:\n log4j1.x fileAppender log4j2.x fileAppender logback fileAppender  Python agent log reporter SkyWalking Python Agent implements a log reporter for the logging module with functionalities aligning with the Java toolkits.\nTo explore how to enable the reporting features for your use cases, please refer to the Log Reporter Doc for a detailed guide.\n","title":"Collecting Logs by Agents","url":"/docs/main/v9.7.0/en/setup/backend/log-agent-native/"},{"content":"Common configuration Logger Logger is used to configure the system log.\n   Name Default Environment Key Description     logger.level INFO ROVER_LOGGER_LEVEL The lowest level of printing allowed.    Core Core is used to communicate with the backend server. It provides APIs for other modules to establish connections with the backend.\n   Name Default Environment Key Description     core.cluster_name  ROVER_CORE_CLUSTER_NAME The name of the cluster.   core.backend.addr localhost:11800 ROVER_BACKEND_ADDR The backend server address.   core.backend.enable_TLS false ROVER_BACKEND_ENABLE_TLS The TLS switch.   core.backend.client_pem_path client.pem ROVER_BACKEND_PEM_PATH The file path of client.pem. The config only works when opening the TLS switch.   core.backend.client_key_path client.key ROVER_BACKEND_KEY_PATH The file path of client.key. The config only works when opening the TLS switch.   core.backend.insecure_skip_verify false ROVER_BACKEND_INSECURE_SKIP_VERIFY InsecureSkipVerify controls whether a client verifies the server\u0026rsquo;s certificate chain and host name.   core.backend.ca_pem_path ca.pem ROVER_BACKEND_CA_PEM_PATH The file path oca.pem. The config only works when opening the TLS switch.   core.backend.check_period 5 ROVER_BACKEND_CHECK_PERIOD How frequently to check the connection(second).   core.backend.authentication  ROVER_BACKEND_AUTHENTICATION The auth value when send request.    ","title":"Common configuration","url":"/docs/skywalking-rover/latest/en/setup/configuration/common/"},{"content":"Common configuration Logger Logger is used to configure the system log.\n   Name Default Environment Key Description     logger.level INFO ROVER_LOGGER_LEVEL The lowest level of printing allowed.    Core Core is used to communicate with the backend server. It provides APIs for other modules to establish connections with the backend.\n   Name Default Environment Key Description     core.cluster_name  ROVER_CORE_CLUSTER_NAME The name of the cluster.   core.backend.addr localhost:11800 ROVER_BACKEND_ADDR The backend server address.   core.backend.enable_TLS false ROVER_BACKEND_ENABLE_TLS The TLS switch.   core.backend.client_pem_path client.pem ROVER_BACKEND_PEM_PATH The file path of client.pem. The config only works when opening the TLS switch.   core.backend.client_key_path client.key ROVER_BACKEND_KEY_PATH The file path of client.key. The config only works when opening the TLS switch.   core.backend.insecure_skip_verify false ROVER_BACKEND_INSECURE_SKIP_VERIFY InsecureSkipVerify controls whether a client verifies the server\u0026rsquo;s certificate chain and host name.   core.backend.ca_pem_path ca.pem ROVER_BACKEND_CA_PEM_PATH The file path oca.pem. The config only works when opening the TLS switch.   core.backend.check_period 5 ROVER_BACKEND_CHECK_PERIOD How frequently to check the connection(second).   core.backend.authentication  ROVER_BACKEND_AUTHENTICATION The auth value when send request.    ","title":"Common configuration","url":"/docs/skywalking-rover/next/en/setup/configuration/common/"},{"content":"Common configuration Logger Logger is used to configure the system log.\n   Name Default Environment Key Description     logger.level INFO ROVER_LOGGER_LEVEL The lowest level of printing allowed.    Core Core is used to communicate with the backend server. It provides APIs for other modules to establish connections with the backend.\n   Name Default Environment Key Description     core.cluster_name  ROVER_CORE_CLUSTER_NAME The name of the cluster.   core.backend.addr localhost:11800 ROVER_BACKEND_ADDR The backend server address.   core.backend.enable_TLS false ROVER_BACKEND_ENABLE_TLS The TLS switch.   core.backend.client_pem_path client.pem ROVER_BACKEND_PEM_PATH The file path of client.pem. The config only works when opening the TLS switch.   core.backend.client_key_path client.key ROVER_BACKEND_KEY_PATH The file path of client.key. The config only works when opening the TLS switch.   core.backend.insecure_skip_verify false ROVER_BACKEND_INSECURE_SKIP_VERIFY InsecureSkipVerify controls whether a client verifies the server\u0026rsquo;s certificate chain and host name.   core.backend.ca_pem_path ca.pem ROVER_BACKEND_CA_PEM_PATH The file path oca.pem. The config only works when opening the TLS switch.   core.backend.check_period 5 ROVER_BACKEND_CHECK_PERIOD How frequently to check the connection(second).   core.backend.authentication  ROVER_BACKEND_AUTHENTICATION The auth value when send request.    ","title":"Common configuration","url":"/docs/skywalking-rover/v0.6.0/en/setup/configuration/common/"},{"content":"Common configuration The common configuration has 2 parts, which are logger configuration and the telemetry configuration.\nLogger    Config Default Description     log_pattern %time [%level][%field] - %msg The log format pattern configuration.   time_pattern 2006-01-02 15:04:05.000 The time format pattern configuration.   level info The lowest level of printing allowed.    Self Telemetry    Config Default Description     cluster default-cluster The space concept for the deployment, such as the namespace concept in the Kubernetes.   service default-service The group concept for the deployment, such as the service resource concept in the Kubernetes.   instance default-instance The minimum running unit, such as the pod concept in the Kubernetes.    ","title":"Common configuration","url":"/docs/skywalking-satellite/latest/en/setup/configuration/common/"},{"content":"Common configuration The common configuration has 2 parts, which are logger configuration and the telemetry configuration.\nLogger    Config Default Description     log_pattern %time [%level][%field] - %msg The log format pattern configuration.   time_pattern 2006-01-02 15:04:05.000 The time format pattern configuration.   level info The lowest level of printing allowed.    Self Telemetry    Config Default Description     cluster default-cluster The space concept for the deployment, such as the namespace concept in the Kubernetes.   service default-service The group concept for the deployment, such as the service resource concept in the Kubernetes.   instance default-instance The minimum running unit, such as the pod concept in the Kubernetes.    ","title":"Common configuration","url":"/docs/skywalking-satellite/next/en/setup/configuration/common/"},{"content":"Common configuration The common configuration has 2 parts, which are logger configuration and the telemetry configuration.\nLogger    Config Default Description     log_pattern %time [%level][%field] - %msg The log format pattern configuration.   time_pattern 2006-01-02 15:04:05.000 The time format pattern configuration.   level info The lowest level of printing allowed.    Self Telemetry    Config Default Description     cluster default-cluster The space concept for the deployment, such as the namespace concept in the Kubernetes.   service default-service The group concept for the deployment, such as the service resource concept in the Kubernetes.   instance default-instance The minimum running unit, such as the pod concept in the Kubernetes.    ","title":"Common configuration","url":"/docs/skywalking-satellite/v1.2.0/en/setup/configuration/common/"},{"content":"Compatibility SkyWalking 8.0+ uses v3 protocols. Agents don\u0026rsquo;t have to keep the identical versions as the OAP backend.\nSkyWalking Native Agents    OAP Server Version Java Python NodeJS LUA Kong Browser Agent Rust PHP Go Rover Satellite     8.0.1 - 8.1.0 8.0.0 - 8.3.0 \u0026lt; = 0.6.0 \u0026lt; = 0.3.0 All All No All No No No No   8.2.0 - 8.3.0 8.0.0 - 8.3.0 \u0026lt; = 0.6.0 \u0026lt; = 0.3.0 All All All All No No No No   8.4.0 - 8.8.1 \u0026gt; = 8.0.0 All All All All All All All No No No   8.9.0+ \u0026gt; = 8.0.0 All All All All All All All No No \u0026gt; = 0.4.0   9.0.0 \u0026gt; = 8.0.0 All All All All All All All No No \u0026gt; = 0.4.0   9.1.0+ \u0026gt; = 8.0.0 All All All All All All All No \u0026gt; = 0.1.0 \u0026gt; = 1.0.0   9.5.0+ \u0026gt; = 8.0.0 \u0026amp; \u0026gt; = 9.0.0 All All All All All All All \u0026gt; = 0.1.0 \u0026gt; = 0.5.0 \u0026gt; = 1.2.0    Ecosystem Agents All following agent implementations are a part of the SkyWalking ecosystem. All the source codes and their distributions don\u0026rsquo;t belong to the Apache Software Foundation.\n   OAP Server Version DotNet cpp2sky     8.0.1 - 8.3.0 1.0.0 - 1.3.0 \u0026lt; = 0.2.0   8.4.0+ \u0026gt; = 1.0.0 All   9.0.0+ \u0026gt; = 1.0.0 All    All these projects are maintained by their own communities, and please reach them if you face any compatibility issues.\n All above compatibility are only references, and if you face an unimplemented error, it means you need to upgrade the OAP backend to support newer features in the agents.\n","title":"Compatibility","url":"/docs/main/latest/en/setup/service-agent/agent-compatibility/"},{"content":"Compatibility SkyWalking 8.0+ uses v3 protocols. Agents don\u0026rsquo;t have to keep the identical versions as the OAP backend.\nSkyWalking Native Agents    OAP Server Version Java Python NodeJS LUA Kong Browser Agent Rust PHP Go Rover Satellite     8.0.1 - 8.1.0 8.0.0 - 8.3.0 \u0026lt; = 0.6.0 \u0026lt; = 0.3.0 All All No All No No No No   8.2.0 - 8.3.0 8.0.0 - 8.3.0 \u0026lt; = 0.6.0 \u0026lt; = 0.3.0 All All All All No No No No   8.4.0 - 8.8.1 \u0026gt; = 8.0.0 All All All All All All All No No No   8.9.0+ \u0026gt; = 8.0.0 All All All All All All All No No \u0026gt; = 0.4.0   9.0.0 \u0026gt; = 8.0.0 All All All All All All All No No \u0026gt; = 0.4.0   9.1.0+ \u0026gt; = 8.0.0 All All All All All All All No \u0026gt; = 0.1.0 \u0026gt; = 1.0.0   9.5.0+ \u0026gt; = 8.0.0 \u0026amp; \u0026gt; = 9.0.0 All All All All All All All \u0026gt; = 0.1.0 \u0026gt; = 0.5.0 \u0026gt; = 1.2.0    Ecosystem Agents All following agent implementations are a part of the SkyWalking ecosystem. All the source codes and their distributions don\u0026rsquo;t belong to the Apache Software Foundation.\n   OAP Server Version DotNet cpp2sky     8.0.1 - 8.3.0 1.0.0 - 1.3.0 \u0026lt; = 0.2.0   8.4.0+ \u0026gt; = 1.0.0 All   9.0.0+ \u0026gt; = 1.0.0 All    All these projects are maintained by their own communities, and please reach them if you face any compatibility issues.\n All above compatibility are only references, and if you face an unimplemented error, it means you need to upgrade the OAP backend to support newer features in the agents.\n","title":"Compatibility","url":"/docs/main/next/en/setup/service-agent/agent-compatibility/"},{"content":"Compatibility SkyWalking 8.0+ uses v3 protocols. Agents don\u0026rsquo;t have to keep the same versions as the OAP backend.\nSkyWalking Native Agents    OAP Server Version Java Python NodeJS LUA Kong Browser Agent Rust Satellite     8.0.1 - 8.1.0 8.0.0 - 8.3.0 \u0026lt; = 0.6.0 \u0026lt; = 0.3.0 All All No All No   8.2.0 - 8.3.0 8.0.0 - 8.3.0 \u0026lt; = 0.6.0 \u0026lt; = 0.3.0 All All All All No   8.4.0 - 8.8.1 \u0026gt; = 8.0.0 All All All All All All No   8.9.0+ \u0026gt; = 8.0.0 All All All All All All \u0026gt; = 0.4.0   9.0.0+ \u0026gt; = 8.0.0 All All All All All All \u0026gt; = 0.4.0    Ecosystem Agents All following agent implementations are a part of SkyWalking ecosystem. All the source codes and their distributions don\u0026rsquo;t belong to the Apache Software Foundation.\n   OAP Server Version DotNet Go2sky cpp2sky PHP agent     8.0.1 - 8.3.0 1.0.0 - 1.3.0 0.4.0 - 0.6.0 \u0026lt; = 0.2.0 \u0026gt; = 3.0.0   8.4.0+ \u0026gt; = 1.0.0 \u0026gt; = 0.4.0 All \u0026gt; = 3.0.0   9.0.0+ \u0026gt; = 1.0.0 \u0026gt; = 0.4.0 All \u0026gt; = 3.0.0    All these projects are maintained by their own communities, please reach them if you face any compatibility issue.\n All above compatibility are only references, if you face unimplemented error, it means you need to upgrade OAP backend to support newer features in the agents.\n","title":"Compatibility","url":"/docs/main/v9.0.0/en/setup/service-agent/agent-compatibility/"},{"content":"Compatibility SkyWalking 8.0+ uses v3 protocols. Agents don\u0026rsquo;t have to keep the identical versions as the OAP backend.\nSkyWalking Native Agents    OAP Server Version Java Python NodeJS LUA Kong Browser Agent Rust Rover(ebpf agnet) Satellite     8.0.1 - 8.1.0 8.0.0 - 8.3.0 \u0026lt; = 0.6.0 \u0026lt; = 0.3.0 All All No All No No   8.2.0 - 8.3.0 8.0.0 - 8.3.0 \u0026lt; = 0.6.0 \u0026lt; = 0.3.0 All All All All No No   8.4.0 - 8.8.1 \u0026gt; = 8.0.0 All All All All All All No No   8.9.0+ \u0026gt; = 8.0.0 All All All All All All No \u0026gt; = 0.4.0   9.0.0 \u0026gt; = 8.0.0 All All All All All All No \u0026gt; = 0.4.0   9.1.0+ \u0026gt; = 8.0.0 All All All All All All \u0026gt; = 0.1.0 \u0026gt; = 1.0.0    Ecosystem Agents All following agent implementations are a part of the SkyWalking ecosystem. All the source codes and their distributions don\u0026rsquo;t belong to the Apache Software Foundation.\n   OAP Server Version DotNet Go2sky cpp2sky PHP agent     8.0.1 - 8.3.0 1.0.0 - 1.3.0 0.4.0 - 0.6.0 \u0026lt; = 0.2.0 \u0026gt; = 3.0.0   8.4.0+ \u0026gt; = 1.0.0 \u0026gt; = 0.4.0 All \u0026gt; = 3.0.0   9.0.0+ \u0026gt; = 1.0.0 \u0026gt; = 0.4.0 All \u0026gt; = 3.0.0    All these projects are maintained by their own communities, and please reach them if you face any compatibility issues.\n All above compatibility are only references, and if you face an unimplemented error, it means you need to upgrade the OAP backend to support newer features in the agents.\n","title":"Compatibility","url":"/docs/main/v9.1.0/en/setup/service-agent/agent-compatibility/"},{"content":"Compatibility SkyWalking 8.0+ uses v3 protocols. Agents don\u0026rsquo;t have to keep the identical versions as the OAP backend.\nSkyWalking Native Agents    OAP Server Version Java Python NodeJS LUA Kong Browser Agent Rust Rover(ebpf agnet) Satellite     8.0.1 - 8.1.0 8.0.0 - 8.3.0 \u0026lt; = 0.6.0 \u0026lt; = 0.3.0 All All No All No No   8.2.0 - 8.3.0 8.0.0 - 8.3.0 \u0026lt; = 0.6.0 \u0026lt; = 0.3.0 All All All All No No   8.4.0 - 8.8.1 \u0026gt; = 8.0.0 All All All All All All No No   8.9.0+ \u0026gt; = 8.0.0 All All All All All All No \u0026gt; = 0.4.0   9.0.0 \u0026gt; = 8.0.0 All All All All All All No \u0026gt; = 0.4.0   9.1.0+ \u0026gt; = 8.0.0 All All All All All All \u0026gt; = 0.1.0 \u0026gt; = 1.0.0    Ecosystem Agents All following agent implementations are a part of the SkyWalking ecosystem. All the source codes and their distributions don\u0026rsquo;t belong to the Apache Software Foundation.\n   OAP Server Version DotNet Go2sky cpp2sky PHP agent     8.0.1 - 8.3.0 1.0.0 - 1.3.0 0.4.0 - 0.6.0 \u0026lt; = 0.2.0 \u0026gt; = 3.0.0 \u0026amp;\u0026amp; \u0026lt; 5.0.0   8.4.0+ \u0026gt; = 1.0.0 \u0026gt; = 0.4.0 All \u0026gt; = 5.0.0   9.0.0+ \u0026gt; = 1.0.0 \u0026gt; = 0.4.0 All \u0026gt; = 5.0.0    All these projects are maintained by their own communities, and please reach them if you face any compatibility issues.\n All above compatibility are only references, and if you face an unimplemented error, it means you need to upgrade the OAP backend to support newer features in the agents.\n","title":"Compatibility","url":"/docs/main/v9.2.0/en/setup/service-agent/agent-compatibility/"},{"content":"Compatibility SkyWalking 8.0+ uses v3 protocols. Agents don\u0026rsquo;t have to keep the identical versions as the OAP backend.\nSkyWalking Native Agents    OAP Server Version Java Python NodeJS LUA Kong Browser Agent Rust Rover(ebpf agent) Satellite PHP     8.0.1 - 8.1.0 8.0.0 - 8.3.0 \u0026lt; = 0.6.0 \u0026lt; = 0.3.0 All All No All No No No   8.2.0 - 8.3.0 8.0.0 - 8.3.0 \u0026lt; = 0.6.0 \u0026lt; = 0.3.0 All All All All No No No   8.4.0 - 8.8.1 \u0026gt; = 8.0.0 All All All All All All No No All   8.9.0+ \u0026gt; = 8.0.0 All All All All All All No \u0026gt; = 0.4.0 All   9.0.0 \u0026gt; = 8.0.0 All All All All All All No \u0026gt; = 0.4.0 All   9.1.0+ \u0026gt; = 8.0.0 All All All All All All \u0026gt; = 0.1.0 \u0026gt; = 1.0.0 All    Ecosystem Agents All following agent implementations are a part of the SkyWalking ecosystem. All the source codes and their distributions don\u0026rsquo;t belong to the Apache Software Foundation.\n   OAP Server Version DotNet Go2sky cpp2sky     8.0.1 - 8.3.0 1.0.0 - 1.3.0 0.4.0 - 0.6.0 \u0026lt; = 0.2.0   8.4.0+ \u0026gt; = 1.0.0 \u0026gt; = 0.4.0 All   9.0.0+ \u0026gt; = 1.0.0 \u0026gt; = 0.4.0 All    All these projects are maintained by their own communities, and please reach them if you face any compatibility issues.\n All above compatibility are only references, and if you face an unimplemented error, it means you need to upgrade the OAP backend to support newer features in the agents.\n","title":"Compatibility","url":"/docs/main/v9.3.0/en/setup/service-agent/agent-compatibility/"},{"content":"Compatibility SkyWalking 8.0+ uses v3 protocols. Agents don\u0026rsquo;t have to keep the identical versions as the OAP backend.\nSkyWalking Native Agents    OAP Server Version Java Python NodeJS LUA Kong Browser Agent Rust Rover(ebpf agent) Satellite PHP     8.0.1 - 8.1.0 8.0.0 - 8.3.0 \u0026lt; = 0.6.0 \u0026lt; = 0.3.0 All All No All No No No   8.2.0 - 8.3.0 8.0.0 - 8.3.0 \u0026lt; = 0.6.0 \u0026lt; = 0.3.0 All All All All No No No   8.4.0 - 8.8.1 \u0026gt; = 8.0.0 All All All All All All No No All   8.9.0+ \u0026gt; = 8.0.0 All All All All All All No \u0026gt; = 0.4.0 All   9.0.0 \u0026gt; = 8.0.0 All All All All All All No \u0026gt; = 0.4.0 All   9.1.0+ \u0026gt; = 8.0.0 All All All All All All \u0026gt; = 0.1.0 \u0026gt; = 1.0.0 All    Ecosystem Agents All following agent implementations are a part of the SkyWalking ecosystem. All the source codes and their distributions don\u0026rsquo;t belong to the Apache Software Foundation.\n   OAP Server Version DotNet Go2sky cpp2sky     8.0.1 - 8.3.0 1.0.0 - 1.3.0 0.4.0 - 0.6.0 \u0026lt; = 0.2.0   8.4.0+ \u0026gt; = 1.0.0 \u0026gt; = 0.4.0 All   9.0.0+ \u0026gt; = 1.0.0 \u0026gt; = 0.4.0 All    All these projects are maintained by their own communities, and please reach them if you face any compatibility issues.\n All above compatibility are only references, and if you face an unimplemented error, it means you need to upgrade the OAP backend to support newer features in the agents.\n","title":"Compatibility","url":"/docs/main/v9.4.0/en/setup/service-agent/agent-compatibility/"},{"content":"Compatibility SkyWalking 8.0+ uses v3 protocols. Agents don\u0026rsquo;t have to keep the identical versions as the OAP backend.\nSkyWalking Native Agents    OAP Server Version Java Python NodeJS LUA Kong Browser Agent Rust Rover(ebpf agent) Satellite PHP     8.0.1 - 8.1.0 8.0.0 - 8.3.0 \u0026lt; = 0.6.0 \u0026lt; = 0.3.0 All All No All No No No   8.2.0 - 8.3.0 8.0.0 - 8.3.0 \u0026lt; = 0.6.0 \u0026lt; = 0.3.0 All All All All No No No   8.4.0 - 8.8.1 \u0026gt; = 8.0.0 All All All All All All No No All   8.9.0+ \u0026gt; = 8.0.0 All All All All All All No \u0026gt; = 0.4.0 All   9.0.0 \u0026gt; = 8.0.0 All All All All All All No \u0026gt; = 0.4.0 All   9.1.0+ \u0026gt; = 8.0.0 All All All All All All \u0026gt; = 0.1.0 \u0026gt; = 1.0.0 All   9.5.0+ \u0026gt; = 8.0.0 All All All All All All \u0026gt; = 0.5.0 \u0026gt; = 1.2.0 All    Ecosystem Agents All following agent implementations are a part of the SkyWalking ecosystem. All the source codes and their distributions don\u0026rsquo;t belong to the Apache Software Foundation.\n   OAP Server Version DotNet Go2sky cpp2sky     8.0.1 - 8.3.0 1.0.0 - 1.3.0 0.4.0 - 0.6.0 \u0026lt; = 0.2.0   8.4.0+ \u0026gt; = 1.0.0 \u0026gt; = 0.4.0 All   9.0.0+ \u0026gt; = 1.0.0 \u0026gt; = 0.4.0 All    All these projects are maintained by their own communities, and please reach them if you face any compatibility issues.\n All above compatibility are only references, and if you face an unimplemented error, it means you need to upgrade the OAP backend to support newer features in the agents.\n","title":"Compatibility","url":"/docs/main/v9.5.0/en/setup/service-agent/agent-compatibility/"},{"content":"Compatibility SkyWalking 8.0+ uses v3 protocols. Agents don\u0026rsquo;t have to keep the identical versions as the OAP backend.\nSkyWalking Native Agents    OAP Server Version Java Python NodeJS LUA Kong Browser Agent Rust PHP Go Rover Satellite     8.0.1 - 8.1.0 8.0.0 - 8.3.0 \u0026lt; = 0.6.0 \u0026lt; = 0.3.0 All All No All No No No No   8.2.0 - 8.3.0 8.0.0 - 8.3.0 \u0026lt; = 0.6.0 \u0026lt; = 0.3.0 All All All All No No No No   8.4.0 - 8.8.1 \u0026gt; = 8.0.0 All All All All All All All No No No   8.9.0+ \u0026gt; = 8.0.0 All All All All All All All No No \u0026gt; = 0.4.0   9.0.0 \u0026gt; = 8.0.0 All All All All All All All No No \u0026gt; = 0.4.0   9.1.0+ \u0026gt; = 8.0.0 All All All All All All All No \u0026gt; = 0.1.0 \u0026gt; = 1.0.0   9.5.0+ \u0026gt; = 8.0.0 \u0026amp; \u0026gt; = 9.0.0 All All All All All All All \u0026gt; = 0.1.0 \u0026gt; = 0.5.0 \u0026gt; = 1.2.0    Ecosystem Agents All following agent implementations are a part of the SkyWalking ecosystem. All the source codes and their distributions don\u0026rsquo;t belong to the Apache Software Foundation.\n   OAP Server Version DotNet cpp2sky     8.0.1 - 8.3.0 1.0.0 - 1.3.0 \u0026lt; = 0.2.0   8.4.0+ \u0026gt; = 1.0.0 All   9.0.0+ \u0026gt; = 1.0.0 All    All these projects are maintained by their own communities, and please reach them if you face any compatibility issues.\n All above compatibility are only references, and if you face an unimplemented error, it means you need to upgrade the OAP backend to support newer features in the agents.\n","title":"Compatibility","url":"/docs/main/v9.6.0/en/setup/service-agent/agent-compatibility/"},{"content":"Compatibility SkyWalking 8.0+ uses v3 protocols. Agents don\u0026rsquo;t have to keep the identical versions as the OAP backend.\nSkyWalking Native Agents    OAP Server Version Java Python NodeJS LUA Kong Browser Agent Rust PHP Go Rover Satellite     8.0.1 - 8.1.0 8.0.0 - 8.3.0 \u0026lt; = 0.6.0 \u0026lt; = 0.3.0 All All No All No No No No   8.2.0 - 8.3.0 8.0.0 - 8.3.0 \u0026lt; = 0.6.0 \u0026lt; = 0.3.0 All All All All No No No No   8.4.0 - 8.8.1 \u0026gt; = 8.0.0 All All All All All All All No No No   8.9.0+ \u0026gt; = 8.0.0 All All All All All All All No No \u0026gt; = 0.4.0   9.0.0 \u0026gt; = 8.0.0 All All All All All All All No No \u0026gt; = 0.4.0   9.1.0+ \u0026gt; = 8.0.0 All All All All All All All No \u0026gt; = 0.1.0 \u0026gt; = 1.0.0   9.5.0+ \u0026gt; = 8.0.0 \u0026amp; \u0026gt; = 9.0.0 All All All All All All All \u0026gt; = 0.1.0 \u0026gt; = 0.5.0 \u0026gt; = 1.2.0    Ecosystem Agents All following agent implementations are a part of the SkyWalking ecosystem. All the source codes and their distributions don\u0026rsquo;t belong to the Apache Software Foundation.\n   OAP Server Version DotNet cpp2sky     8.0.1 - 8.3.0 1.0.0 - 1.3.0 \u0026lt; = 0.2.0   8.4.0+ \u0026gt; = 1.0.0 All   9.0.0+ \u0026gt; = 1.0.0 All    All these projects are maintained by their own communities, and please reach them if you face any compatibility issues.\n All above compatibility are only references, and if you face an unimplemented error, it means you need to upgrade the OAP backend to support newer features in the agents.\n","title":"Compatibility","url":"/docs/main/v9.7.0/en/setup/service-agent/agent-compatibility/"},{"content":"Compatibility with other Java agent bytecode processes Problem   When using the SkyWalking agent, some other agents, such as Arthas, can\u0026rsquo;t work properly. https://github.com/apache/skywalking/pull/4858\n  The retransform classes in the Java agent conflict with the SkyWalking agent, as illustrated in this demo\n  Cause The SkyWalking agent uses ByteBuddy to transform classes when the Java application starts. ByteBuddy generates auxiliary classes with different random names every time.\nWhen another Java agent retransforms the same class, it triggers the SkyWalking agent to enhance the class again. Since the bytecode has been regenerated by ByteBuddy, the fields and imported class names have been modified, and the JVM verifications on class bytecode have failed, the retransform classes would therefore be unsuccessful.\nResolution 1. Enable the class cache feature\nAdd JVM parameters:\n-Dskywalking.agent.is_cache_enhanced_class=true -Dskywalking.agent.class_cache_mode=MEMORY\nOr uncomment the following options in agent.conf:\n# If true, the SkyWalking agent will cache all instrumented classes files to memory or disk files (as determined by the class cache mode), # Allow other Java agents to enhance those classes that are enhanced by the SkyWalking agent. agent.is_cache_enhanced_class = ${SW_AGENT_CACHE_CLASS:false} # The instrumented classes cache mode: MEMORY or FILE # MEMORY: cache class bytes to memory; if there are too many instrumented classes or if their sizes are too large, it may take up more memory # FILE: cache class bytes to user temp folder starts with 'class-cache', and automatically clean up cached class files when the application exits agent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:MEMORY} If the class cache feature is enabled, save the instrumented class bytecode to memory or a temporary file. When other Java agents retransform the same class, the SkyWalking agent first attempts to load from the cache.\nIf the cached class is found, it will be used directly without regenerating an auxiliary class with a new random name. Then, the process of the subsequent Java agent will not be affected.\n2. Class cache save mode\nWe recommend saving cache classes to memory, if it takes up more memory space. Alternatively, you can use the local file system. Set the class cache mode in one of the folliwng ways:\n-Dskywalking.agent.class_cache_mode=MEMORY : save cache classes to Java memory. -Dskywalking.agent.class_cache_mode=FILE : save cache classes to SkyWalking agent path \u0026lsquo;/class-cache\u0026rsquo;.\nOr modify these options in agent.conf:\nagent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:MEMORY} agent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:FILE}\n","title":"Compatibility with other Java agent bytecode processes","url":"/docs/main/latest/en/faq/compatible-with-other-javaagent-bytecode-processing/"},{"content":"Compatibility with other Java agent bytecode processes Problem   When using the SkyWalking agent, some other agents, such as Arthas, can\u0026rsquo;t work properly. https://github.com/apache/skywalking/pull/4858\n  The retransform classes in the Java agent conflict with the SkyWalking agent, as illustrated in this demo\n  Cause The SkyWalking agent uses ByteBuddy to transform classes when the Java application starts. ByteBuddy generates auxiliary classes with different random names every time.\nWhen another Java agent retransforms the same class, it triggers the SkyWalking agent to enhance the class again. Since the bytecode has been regenerated by ByteBuddy, the fields and imported class names have been modified, and the JVM verifications on class bytecode have failed, the retransform classes would therefore be unsuccessful.\nResolution 1. Enable the class cache feature\nAdd JVM parameters:\n-Dskywalking.agent.is_cache_enhanced_class=true -Dskywalking.agent.class_cache_mode=MEMORY\nOr uncomment the following options in agent.conf:\n# If true, the SkyWalking agent will cache all instrumented classes files to memory or disk files (as determined by the class cache mode), # Allow other Java agents to enhance those classes that are enhanced by the SkyWalking agent. agent.is_cache_enhanced_class = ${SW_AGENT_CACHE_CLASS:false} # The instrumented classes cache mode: MEMORY or FILE # MEMORY: cache class bytes to memory; if there are too many instrumented classes or if their sizes are too large, it may take up more memory # FILE: cache class bytes to user temp folder starts with 'class-cache', and automatically clean up cached class files when the application exits agent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:MEMORY} If the class cache feature is enabled, save the instrumented class bytecode to memory or a temporary file. When other Java agents retransform the same class, the SkyWalking agent first attempts to load from the cache.\nIf the cached class is found, it will be used directly without regenerating an auxiliary class with a new random name. Then, the process of the subsequent Java agent will not be affected.\n2. Class cache save mode\nWe recommend saving cache classes to memory, if it takes up more memory space. Alternatively, you can use the local file system. Set the class cache mode in one of the folliwng ways:\n-Dskywalking.agent.class_cache_mode=MEMORY : save cache classes to Java memory. -Dskywalking.agent.class_cache_mode=FILE : save cache classes to SkyWalking agent path \u0026lsquo;/class-cache\u0026rsquo;.\nOr modify these options in agent.conf:\nagent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:MEMORY} agent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:FILE}\n","title":"Compatibility with other Java agent bytecode processes","url":"/docs/main/next/en/faq/compatible-with-other-javaagent-bytecode-processing/"},{"content":"Compatibility with other Java agent bytecode processes Problem   When using the SkyWalking agent, some other agents, such as Arthas, can\u0026rsquo;t work properly. https://github.com/apache/skywalking/pull/4858\n  The retransform classes in the Java agent conflict with the SkyWalking agent, as illustrated in this demo\n  Cause The SkyWalking agent uses ByteBuddy to transform classes when the Java application starts. ByteBuddy generates auxiliary classes with different random names every time.\nWhen another Java agent retransforms the same class, it triggers the SkyWalking agent to enhance the class again. Since the bytecode has been regenerated by ByteBuddy, the fields and imported class names have been modified, and the JVM verifications on class bytecode have failed, the retransform classes would therefore be unsuccessful.\nResolution 1. Enable the class cache feature\nAdd JVM parameters:\n-Dskywalking.agent.is_cache_enhanced_class=true -Dskywalking.agent.class_cache_mode=MEMORY\nOr uncomment the following options in agent.conf:\n# If true, the SkyWalking agent will cache all instrumented classes files to memory or disk files (as determined by the class cache mode), # Allow other Java agents to enhance those classes that are enhanced by the SkyWalking agent. agent.is_cache_enhanced_class = ${SW_AGENT_CACHE_CLASS:false} # The instrumented classes cache mode: MEMORY or FILE # MEMORY: cache class bytes to memory; if there are too many instrumented classes or if their sizes are too large, it may take up more memory # FILE: cache class bytes to user temp folder starts with 'class-cache', and automatically clean up cached class files when the application exits agent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:MEMORY} If the class cache feature is enabled, save the instrumented class bytecode to memory or a temporary file. When other Java agents retransform the same class, the SkyWalking agent first attempts to load from the cache.\nIf the cached class is found, it will be used directly without regenerating an auxiliary class with a new random name. Then, the process of the subsequent Java agent will not be affected.\n2. Class cache save mode\nWe recommend saving cache classes to memory, if it takes up more memory space. Alternatively, you can use the local file system. Set the class cache mode in one of the folliwng ways:\n-Dskywalking.agent.class_cache_mode=MEMORY : save cache classes to Java memory. -Dskywalking.agent.class_cache_mode=FILE : save cache classes to SkyWalking agent path \u0026lsquo;/class-cache\u0026rsquo;.\nOr modify these options in agent.conf:\nagent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:MEMORY} agent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:FILE}\n","title":"Compatibility with other Java agent bytecode processes","url":"/docs/main/v9.0.0/en/faq/compatible-with-other-javaagent-bytecode-processing/"},{"content":"Compatibility with other Java agent bytecode processes Problem   When using the SkyWalking agent, some other agents, such as Arthas, can\u0026rsquo;t work properly. https://github.com/apache/skywalking/pull/4858\n  The retransform classes in the Java agent conflict with the SkyWalking agent, as illustrated in this demo\n  Cause The SkyWalking agent uses ByteBuddy to transform classes when the Java application starts. ByteBuddy generates auxiliary classes with different random names every time.\nWhen another Java agent retransforms the same class, it triggers the SkyWalking agent to enhance the class again. Since the bytecode has been regenerated by ByteBuddy, the fields and imported class names have been modified, and the JVM verifications on class bytecode have failed, the retransform classes would therefore be unsuccessful.\nResolution 1. Enable the class cache feature\nAdd JVM parameters:\n-Dskywalking.agent.is_cache_enhanced_class=true -Dskywalking.agent.class_cache_mode=MEMORY\nOr uncomment the following options in agent.conf:\n# If true, the SkyWalking agent will cache all instrumented classes files to memory or disk files (as determined by the class cache mode), # Allow other Java agents to enhance those classes that are enhanced by the SkyWalking agent. agent.is_cache_enhanced_class = ${SW_AGENT_CACHE_CLASS:false} # The instrumented classes cache mode: MEMORY or FILE # MEMORY: cache class bytes to memory; if there are too many instrumented classes or if their sizes are too large, it may take up more memory # FILE: cache class bytes to user temp folder starts with 'class-cache', and automatically clean up cached class files when the application exits agent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:MEMORY} If the class cache feature is enabled, save the instrumented class bytecode to memory or a temporary file. When other Java agents retransform the same class, the SkyWalking agent first attempts to load from the cache.\nIf the cached class is found, it will be used directly without regenerating an auxiliary class with a new random name. Then, the process of the subsequent Java agent will not be affected.\n2. Class cache save mode\nWe recommend saving cache classes to memory, if it takes up more memory space. Alternatively, you can use the local file system. Set the class cache mode in one of the folliwng ways:\n-Dskywalking.agent.class_cache_mode=MEMORY : save cache classes to Java memory. -Dskywalking.agent.class_cache_mode=FILE : save cache classes to SkyWalking agent path \u0026lsquo;/class-cache\u0026rsquo;.\nOr modify these options in agent.conf:\nagent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:MEMORY} agent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:FILE}\n","title":"Compatibility with other Java agent bytecode processes","url":"/docs/main/v9.1.0/en/faq/compatible-with-other-javaagent-bytecode-processing/"},{"content":"Compatibility with other Java agent bytecode processes Problem   When using the SkyWalking agent, some other agents, such as Arthas, can\u0026rsquo;t work properly. https://github.com/apache/skywalking/pull/4858\n  The retransform classes in the Java agent conflict with the SkyWalking agent, as illustrated in this demo\n  Cause The SkyWalking agent uses ByteBuddy to transform classes when the Java application starts. ByteBuddy generates auxiliary classes with different random names every time.\nWhen another Java agent retransforms the same class, it triggers the SkyWalking agent to enhance the class again. Since the bytecode has been regenerated by ByteBuddy, the fields and imported class names have been modified, and the JVM verifications on class bytecode have failed, the retransform classes would therefore be unsuccessful.\nResolution 1. Enable the class cache feature\nAdd JVM parameters:\n-Dskywalking.agent.is_cache_enhanced_class=true -Dskywalking.agent.class_cache_mode=MEMORY\nOr uncomment the following options in agent.conf:\n# If true, the SkyWalking agent will cache all instrumented classes files to memory or disk files (as determined by the class cache mode), # Allow other Java agents to enhance those classes that are enhanced by the SkyWalking agent. agent.is_cache_enhanced_class = ${SW_AGENT_CACHE_CLASS:false} # The instrumented classes cache mode: MEMORY or FILE # MEMORY: cache class bytes to memory; if there are too many instrumented classes or if their sizes are too large, it may take up more memory # FILE: cache class bytes to user temp folder starts with 'class-cache', and automatically clean up cached class files when the application exits agent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:MEMORY} If the class cache feature is enabled, save the instrumented class bytecode to memory or a temporary file. When other Java agents retransform the same class, the SkyWalking agent first attempts to load from the cache.\nIf the cached class is found, it will be used directly without regenerating an auxiliary class with a new random name. Then, the process of the subsequent Java agent will not be affected.\n2. Class cache save mode\nWe recommend saving cache classes to memory, if it takes up more memory space. Alternatively, you can use the local file system. Set the class cache mode in one of the folliwng ways:\n-Dskywalking.agent.class_cache_mode=MEMORY : save cache classes to Java memory. -Dskywalking.agent.class_cache_mode=FILE : save cache classes to SkyWalking agent path \u0026lsquo;/class-cache\u0026rsquo;.\nOr modify these options in agent.conf:\nagent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:MEMORY} agent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:FILE}\n","title":"Compatibility with other Java agent bytecode processes","url":"/docs/main/v9.2.0/en/faq/compatible-with-other-javaagent-bytecode-processing/"},{"content":"Compatibility with other Java agent bytecode processes Problem   When using the SkyWalking agent, some other agents, such as Arthas, can\u0026rsquo;t work properly. https://github.com/apache/skywalking/pull/4858\n  The retransform classes in the Java agent conflict with the SkyWalking agent, as illustrated in this demo\n  Cause The SkyWalking agent uses ByteBuddy to transform classes when the Java application starts. ByteBuddy generates auxiliary classes with different random names every time.\nWhen another Java agent retransforms the same class, it triggers the SkyWalking agent to enhance the class again. Since the bytecode has been regenerated by ByteBuddy, the fields and imported class names have been modified, and the JVM verifications on class bytecode have failed, the retransform classes would therefore be unsuccessful.\nResolution 1. Enable the class cache feature\nAdd JVM parameters:\n-Dskywalking.agent.is_cache_enhanced_class=true -Dskywalking.agent.class_cache_mode=MEMORY\nOr uncomment the following options in agent.conf:\n# If true, the SkyWalking agent will cache all instrumented classes files to memory or disk files (as determined by the class cache mode), # Allow other Java agents to enhance those classes that are enhanced by the SkyWalking agent. agent.is_cache_enhanced_class = ${SW_AGENT_CACHE_CLASS:false} # The instrumented classes cache mode: MEMORY or FILE # MEMORY: cache class bytes to memory; if there are too many instrumented classes or if their sizes are too large, it may take up more memory # FILE: cache class bytes to user temp folder starts with 'class-cache', and automatically clean up cached class files when the application exits agent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:MEMORY} If the class cache feature is enabled, save the instrumented class bytecode to memory or a temporary file. When other Java agents retransform the same class, the SkyWalking agent first attempts to load from the cache.\nIf the cached class is found, it will be used directly without regenerating an auxiliary class with a new random name. Then, the process of the subsequent Java agent will not be affected.\n2. Class cache save mode\nWe recommend saving cache classes to memory, if it takes up more memory space. Alternatively, you can use the local file system. Set the class cache mode in one of the folliwng ways:\n-Dskywalking.agent.class_cache_mode=MEMORY : save cache classes to Java memory. -Dskywalking.agent.class_cache_mode=FILE : save cache classes to SkyWalking agent path \u0026lsquo;/class-cache\u0026rsquo;.\nOr modify these options in agent.conf:\nagent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:MEMORY} agent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:FILE}\n","title":"Compatibility with other Java agent bytecode processes","url":"/docs/main/v9.3.0/en/faq/compatible-with-other-javaagent-bytecode-processing/"},{"content":"Compatibility with other Java agent bytecode processes Problem   When using the SkyWalking agent, some other agents, such as Arthas, can\u0026rsquo;t work properly. https://github.com/apache/skywalking/pull/4858\n  The retransform classes in the Java agent conflict with the SkyWalking agent, as illustrated in this demo\n  Cause The SkyWalking agent uses ByteBuddy to transform classes when the Java application starts. ByteBuddy generates auxiliary classes with different random names every time.\nWhen another Java agent retransforms the same class, it triggers the SkyWalking agent to enhance the class again. Since the bytecode has been regenerated by ByteBuddy, the fields and imported class names have been modified, and the JVM verifications on class bytecode have failed, the retransform classes would therefore be unsuccessful.\nResolution 1. Enable the class cache feature\nAdd JVM parameters:\n-Dskywalking.agent.is_cache_enhanced_class=true -Dskywalking.agent.class_cache_mode=MEMORY\nOr uncomment the following options in agent.conf:\n# If true, the SkyWalking agent will cache all instrumented classes files to memory or disk files (as determined by the class cache mode), # Allow other Java agents to enhance those classes that are enhanced by the SkyWalking agent. agent.is_cache_enhanced_class = ${SW_AGENT_CACHE_CLASS:false} # The instrumented classes cache mode: MEMORY or FILE # MEMORY: cache class bytes to memory; if there are too many instrumented classes or if their sizes are too large, it may take up more memory # FILE: cache class bytes to user temp folder starts with 'class-cache', and automatically clean up cached class files when the application exits agent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:MEMORY} If the class cache feature is enabled, save the instrumented class bytecode to memory or a temporary file. When other Java agents retransform the same class, the SkyWalking agent first attempts to load from the cache.\nIf the cached class is found, it will be used directly without regenerating an auxiliary class with a new random name. Then, the process of the subsequent Java agent will not be affected.\n2. Class cache save mode\nWe recommend saving cache classes to memory, if it takes up more memory space. Alternatively, you can use the local file system. Set the class cache mode in one of the folliwng ways:\n-Dskywalking.agent.class_cache_mode=MEMORY : save cache classes to Java memory. -Dskywalking.agent.class_cache_mode=FILE : save cache classes to SkyWalking agent path \u0026lsquo;/class-cache\u0026rsquo;.\nOr modify these options in agent.conf:\nagent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:MEMORY} agent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:FILE}\n","title":"Compatibility with other Java agent bytecode processes","url":"/docs/main/v9.4.0/en/faq/compatible-with-other-javaagent-bytecode-processing/"},{"content":"Compatibility with other Java agent bytecode processes Problem   When using the SkyWalking agent, some other agents, such as Arthas, can\u0026rsquo;t work properly. https://github.com/apache/skywalking/pull/4858\n  The retransform classes in the Java agent conflict with the SkyWalking agent, as illustrated in this demo\n  Cause The SkyWalking agent uses ByteBuddy to transform classes when the Java application starts. ByteBuddy generates auxiliary classes with different random names every time.\nWhen another Java agent retransforms the same class, it triggers the SkyWalking agent to enhance the class again. Since the bytecode has been regenerated by ByteBuddy, the fields and imported class names have been modified, and the JVM verifications on class bytecode have failed, the retransform classes would therefore be unsuccessful.\nResolution 1. Enable the class cache feature\nAdd JVM parameters:\n-Dskywalking.agent.is_cache_enhanced_class=true -Dskywalking.agent.class_cache_mode=MEMORY\nOr uncomment the following options in agent.conf:\n# If true, the SkyWalking agent will cache all instrumented classes files to memory or disk files (as determined by the class cache mode), # Allow other Java agents to enhance those classes that are enhanced by the SkyWalking agent. agent.is_cache_enhanced_class = ${SW_AGENT_CACHE_CLASS:false} # The instrumented classes cache mode: MEMORY or FILE # MEMORY: cache class bytes to memory; if there are too many instrumented classes or if their sizes are too large, it may take up more memory # FILE: cache class bytes to user temp folder starts with 'class-cache', and automatically clean up cached class files when the application exits agent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:MEMORY} If the class cache feature is enabled, save the instrumented class bytecode to memory or a temporary file. When other Java agents retransform the same class, the SkyWalking agent first attempts to load from the cache.\nIf the cached class is found, it will be used directly without regenerating an auxiliary class with a new random name. Then, the process of the subsequent Java agent will not be affected.\n2. Class cache save mode\nWe recommend saving cache classes to memory, if it takes up more memory space. Alternatively, you can use the local file system. Set the class cache mode in one of the folliwng ways:\n-Dskywalking.agent.class_cache_mode=MEMORY : save cache classes to Java memory. -Dskywalking.agent.class_cache_mode=FILE : save cache classes to SkyWalking agent path \u0026lsquo;/class-cache\u0026rsquo;.\nOr modify these options in agent.conf:\nagent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:MEMORY} agent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:FILE}\n","title":"Compatibility with other Java agent bytecode processes","url":"/docs/main/v9.5.0/en/faq/compatible-with-other-javaagent-bytecode-processing/"},{"content":"Compatibility with other Java agent bytecode processes Problem   When using the SkyWalking agent, some other agents, such as Arthas, can\u0026rsquo;t work properly. https://github.com/apache/skywalking/pull/4858\n  The retransform classes in the Java agent conflict with the SkyWalking agent, as illustrated in this demo\n  Cause The SkyWalking agent uses ByteBuddy to transform classes when the Java application starts. ByteBuddy generates auxiliary classes with different random names every time.\nWhen another Java agent retransforms the same class, it triggers the SkyWalking agent to enhance the class again. Since the bytecode has been regenerated by ByteBuddy, the fields and imported class names have been modified, and the JVM verifications on class bytecode have failed, the retransform classes would therefore be unsuccessful.\nResolution 1. Enable the class cache feature\nAdd JVM parameters:\n-Dskywalking.agent.is_cache_enhanced_class=true -Dskywalking.agent.class_cache_mode=MEMORY\nOr uncomment the following options in agent.conf:\n# If true, the SkyWalking agent will cache all instrumented classes files to memory or disk files (as determined by the class cache mode), # Allow other Java agents to enhance those classes that are enhanced by the SkyWalking agent. agent.is_cache_enhanced_class = ${SW_AGENT_CACHE_CLASS:false} # The instrumented classes cache mode: MEMORY or FILE # MEMORY: cache class bytes to memory; if there are too many instrumented classes or if their sizes are too large, it may take up more memory # FILE: cache class bytes to user temp folder starts with 'class-cache', and automatically clean up cached class files when the application exits agent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:MEMORY} If the class cache feature is enabled, save the instrumented class bytecode to memory or a temporary file. When other Java agents retransform the same class, the SkyWalking agent first attempts to load from the cache.\nIf the cached class is found, it will be used directly without regenerating an auxiliary class with a new random name. Then, the process of the subsequent Java agent will not be affected.\n2. Class cache save mode\nWe recommend saving cache classes to memory, if it takes up more memory space. Alternatively, you can use the local file system. Set the class cache mode in one of the folliwng ways:\n-Dskywalking.agent.class_cache_mode=MEMORY : save cache classes to Java memory. -Dskywalking.agent.class_cache_mode=FILE : save cache classes to SkyWalking agent path \u0026lsquo;/class-cache\u0026rsquo;.\nOr modify these options in agent.conf:\nagent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:MEMORY} agent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:FILE}\n","title":"Compatibility with other Java agent bytecode processes","url":"/docs/main/v9.6.0/en/faq/compatible-with-other-javaagent-bytecode-processing/"},{"content":"Compatibility with other Java agent bytecode processes Problem   When using the SkyWalking agent, some other agents, such as Arthas, can\u0026rsquo;t work properly. https://github.com/apache/skywalking/pull/4858\n  The retransform classes in the Java agent conflict with the SkyWalking agent, as illustrated in this demo\n  Cause The SkyWalking agent uses ByteBuddy to transform classes when the Java application starts. ByteBuddy generates auxiliary classes with different random names every time.\nWhen another Java agent retransforms the same class, it triggers the SkyWalking agent to enhance the class again. Since the bytecode has been regenerated by ByteBuddy, the fields and imported class names have been modified, and the JVM verifications on class bytecode have failed, the retransform classes would therefore be unsuccessful.\nResolution 1. Enable the class cache feature\nAdd JVM parameters:\n-Dskywalking.agent.is_cache_enhanced_class=true -Dskywalking.agent.class_cache_mode=MEMORY\nOr uncomment the following options in agent.conf:\n# If true, the SkyWalking agent will cache all instrumented classes files to memory or disk files (as determined by the class cache mode), # Allow other Java agents to enhance those classes that are enhanced by the SkyWalking agent. agent.is_cache_enhanced_class = ${SW_AGENT_CACHE_CLASS:false} # The instrumented classes cache mode: MEMORY or FILE # MEMORY: cache class bytes to memory; if there are too many instrumented classes or if their sizes are too large, it may take up more memory # FILE: cache class bytes to user temp folder starts with 'class-cache', and automatically clean up cached class files when the application exits agent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:MEMORY} If the class cache feature is enabled, save the instrumented class bytecode to memory or a temporary file. When other Java agents retransform the same class, the SkyWalking agent first attempts to load from the cache.\nIf the cached class is found, it will be used directly without regenerating an auxiliary class with a new random name. Then, the process of the subsequent Java agent will not be affected.\n2. Class cache save mode\nWe recommend saving cache classes to memory, if it takes up more memory space. Alternatively, you can use the local file system. Set the class cache mode in one of the folliwng ways:\n-Dskywalking.agent.class_cache_mode=MEMORY : save cache classes to Java memory. -Dskywalking.agent.class_cache_mode=FILE : save cache classes to SkyWalking agent path \u0026lsquo;/class-cache\u0026rsquo;.\nOr modify these options in agent.conf:\nagent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:MEMORY} agent.class_cache_mode = ${SW_AGENT_CLASS_CACHE_MODE:FILE}\n","title":"Compatibility with other Java agent bytecode processes","url":"/docs/main/v9.7.0/en/faq/compatible-with-other-javaagent-bytecode-processing/"},{"content":"Compiling Go version Go version 1.18 or higher is supported for compilation.\nPlatform Linux Linux version \u0026gt;= 4.4, and dependency these tools:\n llvm \u0026gt;= 13. libbpf-dev.  MacOS or Windows Make sure it already has a docker environment.\nCommand git clone https://github.com/apache/skywalking-rover cd skywalking-rover # Linux platform make generate build # MacOS or Windows make container-generate build ","title":"Compiling","url":"/docs/skywalking-rover/latest/en/guides/compile/how-to-compile/"},{"content":"Compiling Go version Go version 1.18 or higher is supported for compilation.\nPlatform Linux Linux version \u0026gt;= 4.4, and dependency these tools:\n llvm \u0026gt;= 13. libbpf-dev.  MacOS or Windows Make sure it already has a docker environment.\nCommand git clone https://github.com/apache/skywalking-rover cd skywalking-rover # Linux platform make generate build # MacOS or Windows make container-generate build ","title":"Compiling","url":"/docs/skywalking-rover/next/en/guides/compile/how-to-compile/"},{"content":"Compiling Go version Go version 1.18 or higher is supported for compilation.\nPlatform Linux Linux version \u0026gt;= 4.4, and dependency these tools:\n llvm \u0026gt;= 13. libbpf-dev.  MacOS or Windows Make sure it already has a docker environment.\nCommand git clone https://github.com/apache/skywalking-rover cd skywalking-rover # Linux platform make generate build # MacOS or Windows make container-generate build ","title":"Compiling","url":"/docs/skywalking-rover/v0.6.0/en/guides/compile/how-to-compile/"},{"content":"Compiling Go version Go version 1.18 and 1.19 are supported for compilation.\nPlatform Linux, MacOS and Windows are supported in SkyWalking Satellite. However, some components don\u0026rsquo;t fit the Windows platform, including:\n mmap-queue  Command git clone https://github.com/apache/skywalking-satellite cd skywalking-satellite make build ","title":"Compiling","url":"/docs/skywalking-satellite/latest/en/guides/compile/how-to-compile/"},{"content":"Compiling Go version Go version 1.19 is required for compilation.\nPlatform Linux, MacOS and Windows are supported in SkyWalking Satellite. However, some components don\u0026rsquo;t fit the Windows platform, including:\n mmap-queue  Command git clone https://github.com/apache/skywalking-satellite cd skywalking-satellite make build ","title":"Compiling","url":"/docs/skywalking-satellite/next/en/guides/compile/how-to-compile/"},{"content":"Compiling Go version Go version 1.18 and 1.19 are supported for compilation.\nPlatform Linux, MacOS and Windows are supported in SkyWalking Satellite. However, some components don\u0026rsquo;t fit the Windows platform, including:\n mmap-queue  Command git clone https://github.com/apache/skywalking-satellite cd skywalking-satellite make build ","title":"Compiling","url":"/docs/skywalking-satellite/v1.2.0/en/guides/compile/how-to-compile/"},{"content":"Compiling issues on Mac\u0026rsquo;s M1 chip Problem  When compiling according to How-to-build, The following problems may occur, causing the build to fail.  [ERROR] Failed to execute goal org.xolstice.maven.plugins:protobuf-maven-plugin:0.6.1:compile (grpc-build) on project apm-network: Unable to resolve artifact: Missing: [ERROR] ---------- [ERROR] 1) com.google.protobuf:protoc:exe:osx-aarch_64:3.12.0 [ERROR] [ERROR] Try downloading the file manually from the project website. [ERROR] [ERROR] Then, install it using the command: [ERROR] mvn install:install-file -DgroupId=com.google.protobuf -DartifactId=protoc -Dversion=3.12.0 -Dclassifier=osx-aarch_64 -Dpackaging=exe -Dfile=/path/to/file [ERROR] [ERROR] Alternatively, if you host your own repository you can deploy the file there: [ERROR] mvn deploy:deploy-file -DgroupId=com.google.protobuf -DartifactId=protoc -Dversion=3.12.0 -Dclassifier=osx-aarch_64 -Dpackaging=exe -Dfile=/path/to/file -Durl=[url] -DrepositoryId=[id] [ERROR] [ERROR] Path to dependency: [ERROR] 1) org.apache.skywalking:apm-network:jar:8.4.0-SNAPSHOT [ERROR] 2) com.google.protobuf:protoc:exe:osx-aarch_64:3.12.0 [ERROR] [ERROR] ---------- [ERROR] 1 required artifact is missing. Reason The dependent Protocol Buffers v3.14.0 does not come with an osx-aarch_64 version. You may find the osx-aarch_64 version at the Protocol Buffers Releases link here: https://github.com/protocolbuffers/protobuf/releases. Since Mac\u0026rsquo;s M1 is compatible with the osx-x86_64 version, before this version is available for downloading, you need to manually specify the osx-x86_64 version.\nResolution You may add -Dos.detected.classifier=osx-x86_64 after the original compilation parameters, such as: ./mvnw clean package -DskipTests -Dos.detected.classifier=osx-x86_64. After specifying the version, compile and run normally.\n","title":"Compiling issues on Mac's M1 chip","url":"/docs/main/latest/en/faq/how-to-build-with-mac-m1/"},{"content":"Compiling issues on Mac\u0026rsquo;s M1 chip Problem  When compiling according to How-to-build, The following problems may occur, causing the build to fail.  [ERROR] Failed to execute goal org.xolstice.maven.plugins:protobuf-maven-plugin:0.6.1:compile (grpc-build) on project apm-network: Unable to resolve artifact: Missing: [ERROR] ---------- [ERROR] 1) com.google.protobuf:protoc:exe:osx-aarch_64:3.12.0 [ERROR] [ERROR] Try downloading the file manually from the project website. [ERROR] [ERROR] Then, install it using the command: [ERROR] mvn install:install-file -DgroupId=com.google.protobuf -DartifactId=protoc -Dversion=3.12.0 -Dclassifier=osx-aarch_64 -Dpackaging=exe -Dfile=/path/to/file [ERROR] [ERROR] Alternatively, if you host your own repository you can deploy the file there: [ERROR] mvn deploy:deploy-file -DgroupId=com.google.protobuf -DartifactId=protoc -Dversion=3.12.0 -Dclassifier=osx-aarch_64 -Dpackaging=exe -Dfile=/path/to/file -Durl=[url] -DrepositoryId=[id] [ERROR] [ERROR] Path to dependency: [ERROR] 1) org.apache.skywalking:apm-network:jar:8.4.0-SNAPSHOT [ERROR] 2) com.google.protobuf:protoc:exe:osx-aarch_64:3.12.0 [ERROR] [ERROR] ---------- [ERROR] 1 required artifact is missing. Reason The dependent Protocol Buffers v3.14.0 does not come with an osx-aarch_64 version. You may find the osx-aarch_64 version at the Protocol Buffers Releases link here: https://github.com/protocolbuffers/protobuf/releases. Since Mac\u0026rsquo;s M1 is compatible with the osx-x86_64 version, before this version is available for downloading, you need to manually specify the osx-x86_64 version.\nResolution You may add -Dos.detected.classifier=osx-x86_64 after the original compilation parameters, such as: ./mvnw clean package -DskipTests -Dos.detected.classifier=osx-x86_64. After specifying the version, compile and run normally.\n","title":"Compiling issues on Mac's M1 chip","url":"/docs/main/next/en/faq/how-to-build-with-mac-m1/"},{"content":"Compiling issues on Mac\u0026rsquo;s M1 chip Problem  When compiling according to How-to-build, The following problems may occur, causing the build to fail.  [ERROR] Failed to execute goal org.xolstice.maven.plugins:protobuf-maven-plugin:0.6.1:compile (grpc-build) on project apm-network: Unable to resolve artifact: Missing: [ERROR] ---------- [ERROR] 1) com.google.protobuf:protoc:exe:osx-aarch_64:3.12.0 [ERROR] [ERROR] Try downloading the file manually from the project website. [ERROR] [ERROR] Then, install it using the command: [ERROR] mvn install:install-file -DgroupId=com.google.protobuf -DartifactId=protoc -Dversion=3.12.0 -Dclassifier=osx-aarch_64 -Dpackaging=exe -Dfile=/path/to/file [ERROR] [ERROR] Alternatively, if you host your own repository you can deploy the file there: [ERROR] mvn deploy:deploy-file -DgroupId=com.google.protobuf -DartifactId=protoc -Dversion=3.12.0 -Dclassifier=osx-aarch_64 -Dpackaging=exe -Dfile=/path/to/file -Durl=[url] -DrepositoryId=[id] [ERROR] [ERROR] Path to dependency: [ERROR] 1) org.apache.skywalking:apm-network:jar:8.4.0-SNAPSHOT [ERROR] 2) com.google.protobuf:protoc:exe:osx-aarch_64:3.12.0 [ERROR] [ERROR] ---------- [ERROR] 1 required artifact is missing. Reason The dependent Protocol Buffers v3.14.0 does not come with an osx-aarch_64 version. You may find the osx-aarch_64 version at the Protocol Buffers Releases link here: https://github.com/protocolbuffers/protobuf/releases. Since Mac\u0026rsquo;s M1 is compatible with the osx-x86_64 version, before this version is available for downloading, you need to manually specify the osx-x86_64 version.\nResolution You may add -Dos.detected.classifier=osx-x86_64 after the original compilation parameters, such as: ./mvnw clean package -DskipTests -Dos.detected.classifier=osx-x86_64. After specifying the version, compile and run normally.\n","title":"Compiling issues on Mac's M1 chip","url":"/docs/main/v9.0.0/en/faq/how-to-build-with-mac-m1/"},{"content":"Compiling issues on Mac\u0026rsquo;s M1 chip Problem  When compiling according to How-to-build, The following problems may occur, causing the build to fail.  [ERROR] Failed to execute goal org.xolstice.maven.plugins:protobuf-maven-plugin:0.6.1:compile (grpc-build) on project apm-network: Unable to resolve artifact: Missing: [ERROR] ---------- [ERROR] 1) com.google.protobuf:protoc:exe:osx-aarch_64:3.12.0 [ERROR] [ERROR] Try downloading the file manually from the project website. [ERROR] [ERROR] Then, install it using the command: [ERROR] mvn install:install-file -DgroupId=com.google.protobuf -DartifactId=protoc -Dversion=3.12.0 -Dclassifier=osx-aarch_64 -Dpackaging=exe -Dfile=/path/to/file [ERROR] [ERROR] Alternatively, if you host your own repository you can deploy the file there: [ERROR] mvn deploy:deploy-file -DgroupId=com.google.protobuf -DartifactId=protoc -Dversion=3.12.0 -Dclassifier=osx-aarch_64 -Dpackaging=exe -Dfile=/path/to/file -Durl=[url] -DrepositoryId=[id] [ERROR] [ERROR] Path to dependency: [ERROR] 1) org.apache.skywalking:apm-network:jar:8.4.0-SNAPSHOT [ERROR] 2) com.google.protobuf:protoc:exe:osx-aarch_64:3.12.0 [ERROR] [ERROR] ---------- [ERROR] 1 required artifact is missing. Reason The dependent Protocol Buffers v3.14.0 does not come with an osx-aarch_64 version. You may find the osx-aarch_64 version at the Protocol Buffers Releases link here: https://github.com/protocolbuffers/protobuf/releases. Since Mac\u0026rsquo;s M1 is compatible with the osx-x86_64 version, before this version is available for downloading, you need to manually specify the osx-x86_64 version.\nResolution You may add -Dos.detected.classifier=osx-x86_64 after the original compilation parameters, such as: ./mvnw clean package -DskipTests -Dos.detected.classifier=osx-x86_64. After specifying the version, compile and run normally.\n","title":"Compiling issues on Mac's M1 chip","url":"/docs/main/v9.1.0/en/faq/how-to-build-with-mac-m1/"},{"content":"Compiling issues on Mac\u0026rsquo;s M1 chip Problem  When compiling according to How-to-build, The following problems may occur, causing the build to fail.  [ERROR] Failed to execute goal org.xolstice.maven.plugins:protobuf-maven-plugin:0.6.1:compile (grpc-build) on project apm-network: Unable to resolve artifact: Missing: [ERROR] ---------- [ERROR] 1) com.google.protobuf:protoc:exe:osx-aarch_64:3.12.0 [ERROR] [ERROR] Try downloading the file manually from the project website. [ERROR] [ERROR] Then, install it using the command: [ERROR] mvn install:install-file -DgroupId=com.google.protobuf -DartifactId=protoc -Dversion=3.12.0 -Dclassifier=osx-aarch_64 -Dpackaging=exe -Dfile=/path/to/file [ERROR] [ERROR] Alternatively, if you host your own repository you can deploy the file there: [ERROR] mvn deploy:deploy-file -DgroupId=com.google.protobuf -DartifactId=protoc -Dversion=3.12.0 -Dclassifier=osx-aarch_64 -Dpackaging=exe -Dfile=/path/to/file -Durl=[url] -DrepositoryId=[id] [ERROR] [ERROR] Path to dependency: [ERROR] 1) org.apache.skywalking:apm-network:jar:8.4.0-SNAPSHOT [ERROR] 2) com.google.protobuf:protoc:exe:osx-aarch_64:3.12.0 [ERROR] [ERROR] ---------- [ERROR] 1 required artifact is missing. Reason The dependent Protocol Buffers v3.14.0 does not come with an osx-aarch_64 version. You may find the osx-aarch_64 version at the Protocol Buffers Releases link here: https://github.com/protocolbuffers/protobuf/releases. Since Mac\u0026rsquo;s M1 is compatible with the osx-x86_64 version, before this version is available for downloading, you need to manually specify the osx-x86_64 version.\nResolution You may add -Dos.detected.classifier=osx-x86_64 after the original compilation parameters, such as: ./mvnw clean package -DskipTests -Dos.detected.classifier=osx-x86_64. After specifying the version, compile and run normally.\n","title":"Compiling issues on Mac's M1 chip","url":"/docs/main/v9.2.0/en/faq/how-to-build-with-mac-m1/"},{"content":"Compiling issues on Mac\u0026rsquo;s M1 chip Problem  When compiling according to How-to-build, The following problems may occur, causing the build to fail.  [ERROR] Failed to execute goal org.xolstice.maven.plugins:protobuf-maven-plugin:0.6.1:compile (grpc-build) on project apm-network: Unable to resolve artifact: Missing: [ERROR] ---------- [ERROR] 1) com.google.protobuf:protoc:exe:osx-aarch_64:3.12.0 [ERROR] [ERROR] Try downloading the file manually from the project website. [ERROR] [ERROR] Then, install it using the command: [ERROR] mvn install:install-file -DgroupId=com.google.protobuf -DartifactId=protoc -Dversion=3.12.0 -Dclassifier=osx-aarch_64 -Dpackaging=exe -Dfile=/path/to/file [ERROR] [ERROR] Alternatively, if you host your own repository you can deploy the file there: [ERROR] mvn deploy:deploy-file -DgroupId=com.google.protobuf -DartifactId=protoc -Dversion=3.12.0 -Dclassifier=osx-aarch_64 -Dpackaging=exe -Dfile=/path/to/file -Durl=[url] -DrepositoryId=[id] [ERROR] [ERROR] Path to dependency: [ERROR] 1) org.apache.skywalking:apm-network:jar:8.4.0-SNAPSHOT [ERROR] 2) com.google.protobuf:protoc:exe:osx-aarch_64:3.12.0 [ERROR] [ERROR] ---------- [ERROR] 1 required artifact is missing. Reason The dependent Protocol Buffers v3.14.0 does not come with an osx-aarch_64 version. You may find the osx-aarch_64 version at the Protocol Buffers Releases link here: https://github.com/protocolbuffers/protobuf/releases. Since Mac\u0026rsquo;s M1 is compatible with the osx-x86_64 version, before this version is available for downloading, you need to manually specify the osx-x86_64 version.\nResolution You may add -Dos.detected.classifier=osx-x86_64 after the original compilation parameters, such as: ./mvnw clean package -DskipTests -Dos.detected.classifier=osx-x86_64. After specifying the version, compile and run normally.\n","title":"Compiling issues on Mac's M1 chip","url":"/docs/main/v9.3.0/en/faq/how-to-build-with-mac-m1/"},{"content":"Compiling issues on Mac\u0026rsquo;s M1 chip Problem  When compiling according to How-to-build, The following problems may occur, causing the build to fail.  [ERROR] Failed to execute goal org.xolstice.maven.plugins:protobuf-maven-plugin:0.6.1:compile (grpc-build) on project apm-network: Unable to resolve artifact: Missing: [ERROR] ---------- [ERROR] 1) com.google.protobuf:protoc:exe:osx-aarch_64:3.12.0 [ERROR] [ERROR] Try downloading the file manually from the project website. [ERROR] [ERROR] Then, install it using the command: [ERROR] mvn install:install-file -DgroupId=com.google.protobuf -DartifactId=protoc -Dversion=3.12.0 -Dclassifier=osx-aarch_64 -Dpackaging=exe -Dfile=/path/to/file [ERROR] [ERROR] Alternatively, if you host your own repository you can deploy the file there: [ERROR] mvn deploy:deploy-file -DgroupId=com.google.protobuf -DartifactId=protoc -Dversion=3.12.0 -Dclassifier=osx-aarch_64 -Dpackaging=exe -Dfile=/path/to/file -Durl=[url] -DrepositoryId=[id] [ERROR] [ERROR] Path to dependency: [ERROR] 1) org.apache.skywalking:apm-network:jar:8.4.0-SNAPSHOT [ERROR] 2) com.google.protobuf:protoc:exe:osx-aarch_64:3.12.0 [ERROR] [ERROR] ---------- [ERROR] 1 required artifact is missing. Reason The dependent Protocol Buffers v3.14.0 does not come with an osx-aarch_64 version. You may find the osx-aarch_64 version at the Protocol Buffers Releases link here: https://github.com/protocolbuffers/protobuf/releases. Since Mac\u0026rsquo;s M1 is compatible with the osx-x86_64 version, before this version is available for downloading, you need to manually specify the osx-x86_64 version.\nResolution You may add -Dos.detected.classifier=osx-x86_64 after the original compilation parameters, such as: ./mvnw clean package -DskipTests -Dos.detected.classifier=osx-x86_64. After specifying the version, compile and run normally.\n","title":"Compiling issues on Mac's M1 chip","url":"/docs/main/v9.4.0/en/faq/how-to-build-with-mac-m1/"},{"content":"Compiling issues on Mac\u0026rsquo;s M1 chip Problem  When compiling according to How-to-build, The following problems may occur, causing the build to fail.  [ERROR] Failed to execute goal org.xolstice.maven.plugins:protobuf-maven-plugin:0.6.1:compile (grpc-build) on project apm-network: Unable to resolve artifact: Missing: [ERROR] ---------- [ERROR] 1) com.google.protobuf:protoc:exe:osx-aarch_64:3.12.0 [ERROR] [ERROR] Try downloading the file manually from the project website. [ERROR] [ERROR] Then, install it using the command: [ERROR] mvn install:install-file -DgroupId=com.google.protobuf -DartifactId=protoc -Dversion=3.12.0 -Dclassifier=osx-aarch_64 -Dpackaging=exe -Dfile=/path/to/file [ERROR] [ERROR] Alternatively, if you host your own repository you can deploy the file there: [ERROR] mvn deploy:deploy-file -DgroupId=com.google.protobuf -DartifactId=protoc -Dversion=3.12.0 -Dclassifier=osx-aarch_64 -Dpackaging=exe -Dfile=/path/to/file -Durl=[url] -DrepositoryId=[id] [ERROR] [ERROR] Path to dependency: [ERROR] 1) org.apache.skywalking:apm-network:jar:8.4.0-SNAPSHOT [ERROR] 2) com.google.protobuf:protoc:exe:osx-aarch_64:3.12.0 [ERROR] [ERROR] ---------- [ERROR] 1 required artifact is missing. Reason The dependent Protocol Buffers v3.14.0 does not come with an osx-aarch_64 version. You may find the osx-aarch_64 version at the Protocol Buffers Releases link here: https://github.com/protocolbuffers/protobuf/releases. Since Mac\u0026rsquo;s M1 is compatible with the osx-x86_64 version, before this version is available for downloading, you need to manually specify the osx-x86_64 version.\nResolution You may add -Dos.detected.classifier=osx-x86_64 after the original compilation parameters, such as: ./mvnw clean package -DskipTests -Dos.detected.classifier=osx-x86_64. After specifying the version, compile and run normally.\n","title":"Compiling issues on Mac's M1 chip","url":"/docs/main/v9.5.0/en/faq/how-to-build-with-mac-m1/"},{"content":"Compiling issues on Mac\u0026rsquo;s M1 chip Problem  When compiling according to How-to-build, The following problems may occur, causing the build to fail.  [ERROR] Failed to execute goal org.xolstice.maven.plugins:protobuf-maven-plugin:0.6.1:compile (grpc-build) on project apm-network: Unable to resolve artifact: Missing: [ERROR] ---------- [ERROR] 1) com.google.protobuf:protoc:exe:osx-aarch_64:3.12.0 [ERROR] [ERROR] Try downloading the file manually from the project website. [ERROR] [ERROR] Then, install it using the command: [ERROR] mvn install:install-file -DgroupId=com.google.protobuf -DartifactId=protoc -Dversion=3.12.0 -Dclassifier=osx-aarch_64 -Dpackaging=exe -Dfile=/path/to/file [ERROR] [ERROR] Alternatively, if you host your own repository you can deploy the file there: [ERROR] mvn deploy:deploy-file -DgroupId=com.google.protobuf -DartifactId=protoc -Dversion=3.12.0 -Dclassifier=osx-aarch_64 -Dpackaging=exe -Dfile=/path/to/file -Durl=[url] -DrepositoryId=[id] [ERROR] [ERROR] Path to dependency: [ERROR] 1) org.apache.skywalking:apm-network:jar:8.4.0-SNAPSHOT [ERROR] 2) com.google.protobuf:protoc:exe:osx-aarch_64:3.12.0 [ERROR] [ERROR] ---------- [ERROR] 1 required artifact is missing. Reason The dependent Protocol Buffers v3.14.0 does not come with an osx-aarch_64 version. You may find the osx-aarch_64 version at the Protocol Buffers Releases link here: https://github.com/protocolbuffers/protobuf/releases. Since Mac\u0026rsquo;s M1 is compatible with the osx-x86_64 version, before this version is available for downloading, you need to manually specify the osx-x86_64 version.\nResolution You may add -Dos.detected.classifier=osx-x86_64 after the original compilation parameters, such as: ./mvnw clean package -DskipTests -Dos.detected.classifier=osx-x86_64. After specifying the version, compile and run normally.\n","title":"Compiling issues on Mac's M1 chip","url":"/docs/main/v9.6.0/en/faq/how-to-build-with-mac-m1/"},{"content":"Compiling issues on Mac\u0026rsquo;s M1 chip Problem  When compiling according to How-to-build, The following problems may occur, causing the build to fail.  [ERROR] Failed to execute goal org.xolstice.maven.plugins:protobuf-maven-plugin:0.6.1:compile (grpc-build) on project apm-network: Unable to resolve artifact: Missing: [ERROR] ---------- [ERROR] 1) com.google.protobuf:protoc:exe:osx-aarch_64:3.12.0 [ERROR] [ERROR] Try downloading the file manually from the project website. [ERROR] [ERROR] Then, install it using the command: [ERROR] mvn install:install-file -DgroupId=com.google.protobuf -DartifactId=protoc -Dversion=3.12.0 -Dclassifier=osx-aarch_64 -Dpackaging=exe -Dfile=/path/to/file [ERROR] [ERROR] Alternatively, if you host your own repository you can deploy the file there: [ERROR] mvn deploy:deploy-file -DgroupId=com.google.protobuf -DartifactId=protoc -Dversion=3.12.0 -Dclassifier=osx-aarch_64 -Dpackaging=exe -Dfile=/path/to/file -Durl=[url] -DrepositoryId=[id] [ERROR] [ERROR] Path to dependency: [ERROR] 1) org.apache.skywalking:apm-network:jar:8.4.0-SNAPSHOT [ERROR] 2) com.google.protobuf:protoc:exe:osx-aarch_64:3.12.0 [ERROR] [ERROR] ---------- [ERROR] 1 required artifact is missing. Reason The dependent Protocol Buffers v3.14.0 does not come with an osx-aarch_64 version. You may find the osx-aarch_64 version at the Protocol Buffers Releases link here: https://github.com/protocolbuffers/protobuf/releases. Since Mac\u0026rsquo;s M1 is compatible with the osx-x86_64 version, before this version is available for downloading, you need to manually specify the osx-x86_64 version.\nResolution You may add -Dos.detected.classifier=osx-x86_64 after the original compilation parameters, such as: ./mvnw clean package -DskipTests -Dos.detected.classifier=osx-x86_64. After specifying the version, compile and run normally.\n","title":"Compiling issues on Mac's M1 chip","url":"/docs/main/v9.7.0/en/faq/how-to-build-with-mac-m1/"},{"content":"Compiling project This document will help you compile and build a project in your maven and set your IDE.\nPrepare JDK 17 or 21.\n If you clone codes from https://github.com/apache/skywalking-java  git clone https://github.com/apache/skywalking-java.git cd skywalking-java ./mvnw clean package -Pall  If you download source codes tar from https://skywalking.apache.org/downloads/  ./mvnw clean package The agent binary package is generated in skywalking-agent folder.\nSet Generated Source Codes(grpc-java and java folders in apm-protocol/apm-network/target/generated-sources/protobuf) folders if you are using IntelliJ IDE.\nBuilding Docker images After you have compiled the project and have generated the skywalking-agent folder, you can build Docker images. [make docker] builds the agent Docker images based on alpine image, java8, java11 and java 17 images by default. If you want to only build part of the images, add suffix .alpine or .java\u0026lt;x\u0026gt; to the make target, for example:\n Build Docker images based on alpine, Java 8 and Java 11. make docker.alpine docker.java8 docker.java11   You can also customize the Docker registry and Docker image names by specifying the variable HUB, NAME.\n Set private Docker registry to gcr.io/skywalking and custom name to sw-agent. make docker.alpine HUB=gcr.io/skywalking NAME=sw-agent This will name the Docker image to gcr.io/skywalking/sw-agent:latest-alpine\n  If you want to push the Docker images, add suffix to the make target docker., for example:\n Build and push images based on alpine, Java 8 and Java 11. make docker.push.alpine docker.push.java8 docker.push.java11   ","title":"Compiling project","url":"/docs/skywalking-java/latest/en/contribution/compiling/"},{"content":"Compiling project This document will help you compile and build a project in your maven and set your IDE.\nPrepare JDK 17 or 21.\n If you clone codes from https://github.com/apache/skywalking-java  git clone https://github.com/apache/skywalking-java.git cd skywalking-java ./mvnw clean package -Pall  If you download source codes tar from https://skywalking.apache.org/downloads/  ./mvnw clean package The agent binary package is generated in skywalking-agent folder.\nSet Generated Source Codes(grpc-java and java folders in apm-protocol/apm-network/target/generated-sources/protobuf) folders if you are using IntelliJ IDE.\nBuilding Docker images After you have compiled the project and have generated the skywalking-agent folder, you can build Docker images. [make docker] builds the agent Docker images based on alpine image, java8, java11 and java 17 images by default. If you want to only build part of the images, add suffix .alpine or .java\u0026lt;x\u0026gt; to the make target, for example:\n Build Docker images based on alpine, Java 8 and Java 11. make docker.alpine docker.java8 docker.java11   You can also customize the Docker registry and Docker image names by specifying the variable HUB, NAME.\n Set private Docker registry to gcr.io/skywalking and custom name to sw-agent. make docker.alpine HUB=gcr.io/skywalking NAME=sw-agent This will name the Docker image to gcr.io/skywalking/sw-agent:latest-alpine\n  If you want to push the Docker images, add suffix to the make target docker., for example:\n Build and push images based on alpine, Java 8 and Java 11. make docker.push.alpine docker.push.java8 docker.push.java11   ","title":"Compiling project","url":"/docs/skywalking-java/next/en/contribution/compiling/"},{"content":"Compiling project This document will help you compile and build a project in your maven and set your IDE.\nPrepare JDK 8+.\n If you clone codes from https://github.com/apache/skywalking-java  git clone https://github.com/apache/skywalking-java.git cd skywalking-java ./mvnw clean package -Pall  If you download source codes tar from https://skywalking.apache.org/downloads/  ./mvnw clean package The agent binary package is generated in skywalking-agent folder.\nSet Generated Source Codes(grpc-java and java folders in apm-protocol/apm-network/target/generated-sources/protobuf) folders if you are using IntelliJ IDE.\nBuilding Docker images After you have compiled the project and have generated the skywalking-agent folder, you can build Docker images. [make docker] builds the agent Docker images based on alpine image, java8, java11 and java 17 images by default. If you want to only build part of the images, add suffix .alpine or .java\u0026lt;x\u0026gt; to the make target, for example:\n Build Docker images based on alpine, Java 8 and Java 11. make docker.alpine docker.java8 docker.java11   You can also customize the Docker registry and Docker image names by specifying the variable HUB, NAME.\n Set private Docker registry to gcr.io/skywalking and custom name to sw-agent. make docker.alpine HUB=gcr.io/skywalking NAME=sw-agent This will name the Docker image to gcr.io/skywalking/sw-agent:latest-alpine\n  If you want to push the Docker images, add suffix to the make target docker., for example:\n Build and push images based on alpine, Java 8 and Java 11. make docker.push.alpine docker.push.java8 docker.push.java11   ","title":"Compiling project","url":"/docs/skywalking-java/v9.0.0/en/contribution/compiling/"},{"content":"Compiling project This document will help you compile and build a project in your maven and set your IDE.\nPrepare JDK 17 or 21.\n If you clone codes from https://github.com/apache/skywalking-java  git clone https://github.com/apache/skywalking-java.git cd skywalking-java ./mvnw clean package -Pall  If you download source codes tar from https://skywalking.apache.org/downloads/  ./mvnw clean package The agent binary package is generated in skywalking-agent folder.\nSet Generated Source Codes(grpc-java and java folders in apm-protocol/apm-network/target/generated-sources/protobuf) folders if you are using IntelliJ IDE.\nBuilding Docker images After you have compiled the project and have generated the skywalking-agent folder, you can build Docker images. [make docker] builds the agent Docker images based on alpine image, java8, java11 and java 17 images by default. If you want to only build part of the images, add suffix .alpine or .java\u0026lt;x\u0026gt; to the make target, for example:\n Build Docker images based on alpine, Java 8 and Java 11. make docker.alpine docker.java8 docker.java11   You can also customize the Docker registry and Docker image names by specifying the variable HUB, NAME.\n Set private Docker registry to gcr.io/skywalking and custom name to sw-agent. make docker.alpine HUB=gcr.io/skywalking NAME=sw-agent This will name the Docker image to gcr.io/skywalking/sw-agent:latest-alpine\n  If you want to push the Docker images, add suffix to the make target docker., for example:\n Build and push images based on alpine, Java 8 and Java 11. make docker.push.alpine docker.push.java8 docker.push.java11   ","title":"Compiling project","url":"/docs/skywalking-java/v9.1.0/en/contribution/compiling/"},{"content":"Compiling project This document will help you compile and build a project in your maven and set your IDE.\nPrepare JDK 17 or 21.\n If you clone codes from https://github.com/apache/skywalking-java  git clone https://github.com/apache/skywalking-java.git cd skywalking-java ./mvnw clean package -Pall  If you download source codes tar from https://skywalking.apache.org/downloads/  ./mvnw clean package The agent binary package is generated in skywalking-agent folder.\nSet Generated Source Codes(grpc-java and java folders in apm-protocol/apm-network/target/generated-sources/protobuf) folders if you are using IntelliJ IDE.\nBuilding Docker images After you have compiled the project and have generated the skywalking-agent folder, you can build Docker images. [make docker] builds the agent Docker images based on alpine image, java8, java11 and java 17 images by default. If you want to only build part of the images, add suffix .alpine or .java\u0026lt;x\u0026gt; to the make target, for example:\n Build Docker images based on alpine, Java 8 and Java 11. make docker.alpine docker.java8 docker.java11   You can also customize the Docker registry and Docker image names by specifying the variable HUB, NAME.\n Set private Docker registry to gcr.io/skywalking and custom name to sw-agent. make docker.alpine HUB=gcr.io/skywalking NAME=sw-agent This will name the Docker image to gcr.io/skywalking/sw-agent:latest-alpine\n  If you want to push the Docker images, add suffix to the make target docker., for example:\n Build and push images based on alpine, Java 8 and Java 11. make docker.push.alpine docker.push.java8 docker.push.java11   ","title":"Compiling project","url":"/docs/skywalking-java/v9.2.0/en/contribution/compiling/"},{"content":"Compiling project This document will help you compile and build the package file.\nPrepare PHP and Rust environments.\nInstall PHP Environment For Debian user:\nsudo apt install php-cli php-dev For MacOS user:\nbrew install php Install Rust Environment Install Rust 1.65.0+.\nFor Linux user:\ncurl --proto \u0026#39;=https\u0026#39; --tlsv1.2 -sSf https://sh.rustup.rs | sh For MacOS user:\nbrew install rust Install requirement For Debian user:\nsudo apt install gcc make llvm-dev libclang-dev clang protobuf-compiler For MacOS user:\nbrew install protobuf Build and install Skywalking PHP Agent from archive file For Linux user:\nsudo pecl install skywalking_agent-x.y.z.tgz For MacOS user:\n Running the pecl install command with the php installed in brew may encounter the problem of mkdir, please refer to Installing PHP and PECL Extensions on MacOS.\n pecl install skywalking_agent-x.y.z.tgz The extension file skywalking_agent.so is generated in the php extension folder, get it by run php-config --extension-dir.\n","title":"Compiling project","url":"/docs/skywalking-php/latest/en/contribution/compiling/"},{"content":"Compiling project This document will help you compile and build the package file.\nPrepare PHP and Rust environments.\nInstall PHP Environment For Debian user:\nsudo apt install php-cli php-dev For MacOS user:\nbrew install php Install Rust Environment Install Rust 1.65.0+.\nFor Linux user:\ncurl --proto \u0026#39;=https\u0026#39; --tlsv1.2 -sSf https://sh.rustup.rs | sh For MacOS user:\nbrew install rust Install requirement For Debian user:\nsudo apt install gcc make llvm-dev libclang-dev clang protobuf-compiler For MacOS user:\nbrew install protobuf Build and install Skywalking PHP Agent from archive file For Linux user:\nsudo pecl install skywalking_agent-x.y.z.tgz For MacOS user:\n Running the pecl install command with the php installed in brew may encounter the problem of mkdir, please refer to Installing PHP and PECL Extensions on MacOS.\n pecl install skywalking_agent-x.y.z.tgz The extension file skywalking_agent.so is generated in the php extension folder, get it by run php-config --extension-dir.\n","title":"Compiling project","url":"/docs/skywalking-php/next/en/contribution/compiling/"},{"content":"Compiling project This document will help you compile and build the package file.\nPrepare PHP and Rust environments.\nInstall PHP Environment For Debian user:\nsudo apt install php-cli php-dev For MacOS user:\nbrew install php Install Rust Environment Install Rust 1.65.0+.\nFor Linux user:\ncurl --proto \u0026#39;=https\u0026#39; --tlsv1.2 -sSf https://sh.rustup.rs | sh For MacOS user:\nbrew install rust Install requirement For Debian user:\nsudo apt install gcc make llvm-dev libclang-dev clang protobuf-compiler For MacOS user:\nbrew install protobuf Build and install Skywalking PHP Agent from archive file For Linux user:\nsudo pecl install skywalking_agent-x.y.z.tgz For MacOS user:\n Running the pecl install command with the php installed in brew may encounter the problem of mkdir, please refer to Installing PHP and PECL Extensions on MacOS.\n pecl install skywalking_agent-x.y.z.tgz The extension file skywalking_agent.so is generated in the php extension folder, get it by run php-config --extension-dir.\n","title":"Compiling project","url":"/docs/skywalking-php/v0.7.0/en/contribution/compiling/"},{"content":"Component library settings Component library settings are about your own or third-party libraries used in the monitored application.\nIn agent or SDK, regardless of whether the library name is collected as ID or String (literally, e.g. SpringMVC), the collector formats data in ID for better performance and less storage requirements.\nAlso, the collector conjectures the remote service based on the component library. For example: if the component library is MySQL Driver library, then the remote service should be MySQL Server.\nFor these two reasons, the collector requires two parts of settings in this file:\n Component library ID, names and languages. Remote server mapping based on the local library.  All component names and IDs must be defined in this file.\nComponent Library ID Define all names and IDs from component libraries which are used in the monitored application. This uses a two-way mapping strategy. The agent or SDK could use the value (ID) to represent the component name in uplink data.\n Name: the component name used in agent and UI ID: Unique ID. All IDs are reserved once they are released. Languages: Program languages may use this component. Multi languages should be separated by ,.  ID rules  Java and multi languages shared: (0, 3000) .NET Platform reserved: [3000, 4000) Node.js Platform reserved: [4000, 5000) Go reserved: [5000, 6000) Lua reserved: [6000, 7000) Python reserved: [7000, 8000) PHP reserved: [8000, 9000) C++ reserved: [9000, 10000) Javascript reserved: [10000, 11000) Rust reserved: [11000, 12000)  Example:\nTomcat:id:1languages:JavaHttpClient:id:2languages:Java,C#,Node.jsDubbo:id:3languages:JavaH2:id:4languages:JavaRemote server mapping The remote server will be conjectured by the local component. The mappings are based on names in the component library.\n Key: client component library name Value: server component name  Component-Server-Mappings:Jedis:RedisStackExchange.Redis:RedisRedisson:RedisLettuce:RedisZookeeper:ZookeeperSqlClient:SqlServerNpgsql:PostgreSQLMySqlConnector:MysqlEntityFrameworkCore.InMemory:InMemoryDatabase","title":"Component library settings","url":"/docs/main/latest/en/guides/component-library-settings/"},{"content":"Component library settings Component library settings are about your own or third-party libraries used in the monitored application.\nIn agent or SDK, regardless of whether the library name is collected as ID or String (literally, e.g. SpringMVC), the collector formats data in ID for better performance and less storage requirements.\nAlso, the collector conjectures the remote service based on the component library. For example: if the component library is MySQL Driver library, then the remote service should be MySQL Server.\nFor these two reasons, the collector requires two parts of settings in this file:\n Component library ID, names and languages. Remote server mapping based on the local library.  All component names and IDs must be defined in this file.\nComponent Library ID Define all names and IDs from component libraries which are used in the monitored application. This uses a two-way mapping strategy. The agent or SDK could use the value (ID) to represent the component name in uplink data.\n Name: the component name used in agent and UI ID: Unique ID. All IDs are reserved once they are released. Languages: Program languages may use this component. Multi languages should be separated by ,.  ID rules  Java and multi languages shared: (0, 3000) .NET Platform reserved: [3000, 4000) Node.js Platform reserved: [4000, 5000) Go reserved: [5000, 6000) Lua reserved: [6000, 7000) Python reserved: [7000, 8000) PHP reserved: [8000, 9000) C++ reserved: [9000, 10000) Javascript reserved: [10000, 11000) Rust reserved: [11000, 12000)  Example:\nTomcat:id:1languages:JavaHttpClient:id:2languages:Java,C#,Node.jsDubbo:id:3languages:JavaH2:id:4languages:JavaRemote server mapping The remote server will be conjectured by the local component. The mappings are based on names in the component library.\n Key: client component library name Value: server component name  Component-Server-Mappings:Jedis:RedisStackExchange.Redis:RedisRedisson:RedisLettuce:RedisZookeeper:ZookeeperSqlClient:SqlServerNpgsql:PostgreSQLMySqlConnector:MysqlEntityFrameworkCore.InMemory:InMemoryDatabase","title":"Component library settings","url":"/docs/main/next/en/guides/component-library-settings/"},{"content":"Component library settings Component library settings are about your own or third-party libraries used in the monitored application.\nIn agent or SDK, regardless of whether the library name is collected as ID or String (literally, e.g. SpringMVC), the collector formats data in ID for better performance and less storage requirements.\nAlso, the collector conjectures the remote service based on the component library. For example: if the component library is MySQL Driver library, then the remote service should be MySQL Server.\nFor these two reasons, the collector requires two parts of settings in this file:\n Component library ID, names and languages. Remote server mapping based on the local library.  All component names and IDs must be defined in this file.\nComponent Library ID Define all names and IDs from component libraries which are used in the monitored application. This uses a two-way mapping strategy. The agent or SDK could use the value (ID) to represent the component name in uplink data.\n Name: the component name used in agent and UI ID: Unique ID. All IDs are reserved once they are released. Languages: Program languages may use this component. Multi languages should be separated by ,.  ID rules  Java and multi languages shared: (0, 3000) .NET Platform reserved: [3000, 4000) Node.js Platform reserved: [4000, 5000) Go reserved: [5000, 6000) Lua reserved: [6000, 7000) Python reserved: [7000, 8000) PHP reserved: [8000, 9000) C++ reserved: [9000, 10000)  Example:\nTomcat:id:1languages:JavaHttpClient:id:2languages:Java,C#,Node.jsDubbo:id:3languages:JavaH2:id:4languages:JavaRemote server mapping The remote server will be conjectured by the local component. The mappings are based on names in the component library.\n Key: client component library name Value: server component name  Component-Server-Mappings:Jedis:RedisStackExchange.Redis:RedisRedisson:RedisLettuce:RedisZookeeper:ZookeeperSqlClient:SqlServerNpgsql:PostgreSQLMySqlConnector:MysqlEntityFrameworkCore.InMemory:InMemoryDatabase","title":"Component library settings","url":"/docs/main/v9.0.0/en/guides/component-library-settings/"},{"content":"Component library settings Component library settings are about your own or third-party libraries used in the monitored application.\nIn agent or SDK, regardless of whether the library name is collected as ID or String (literally, e.g. SpringMVC), the collector formats data in ID for better performance and less storage requirements.\nAlso, the collector conjectures the remote service based on the component library. For example: if the component library is MySQL Driver library, then the remote service should be MySQL Server.\nFor these two reasons, the collector requires two parts of settings in this file:\n Component library ID, names and languages. Remote server mapping based on the local library.  All component names and IDs must be defined in this file.\nComponent Library ID Define all names and IDs from component libraries which are used in the monitored application. This uses a two-way mapping strategy. The agent or SDK could use the value (ID) to represent the component name in uplink data.\n Name: the component name used in agent and UI ID: Unique ID. All IDs are reserved once they are released. Languages: Program languages may use this component. Multi languages should be separated by ,.  ID rules  Java and multi languages shared: (0, 3000) .NET Platform reserved: [3000, 4000) Node.js Platform reserved: [4000, 5000) Go reserved: [5000, 6000) Lua reserved: [6000, 7000) Python reserved: [7000, 8000) PHP reserved: [8000, 9000) C++ reserved: [9000, 10000)  Example:\nTomcat:id:1languages:JavaHttpClient:id:2languages:Java,C#,Node.jsDubbo:id:3languages:JavaH2:id:4languages:JavaRemote server mapping The remote server will be conjectured by the local component. The mappings are based on names in the component library.\n Key: client component library name Value: server component name  Component-Server-Mappings:Jedis:RedisStackExchange.Redis:RedisRedisson:RedisLettuce:RedisZookeeper:ZookeeperSqlClient:SqlServerNpgsql:PostgreSQLMySqlConnector:MysqlEntityFrameworkCore.InMemory:InMemoryDatabase","title":"Component library settings","url":"/docs/main/v9.1.0/en/guides/component-library-settings/"},{"content":"Component library settings Component library settings are about your own or third-party libraries used in the monitored application.\nIn agent or SDK, regardless of whether the library name is collected as ID or String (literally, e.g. SpringMVC), the collector formats data in ID for better performance and less storage requirements.\nAlso, the collector conjectures the remote service based on the component library. For example: if the component library is MySQL Driver library, then the remote service should be MySQL Server.\nFor these two reasons, the collector requires two parts of settings in this file:\n Component library ID, names and languages. Remote server mapping based on the local library.  All component names and IDs must be defined in this file.\nComponent Library ID Define all names and IDs from component libraries which are used in the monitored application. This uses a two-way mapping strategy. The agent or SDK could use the value (ID) to represent the component name in uplink data.\n Name: the component name used in agent and UI ID: Unique ID. All IDs are reserved once they are released. Languages: Program languages may use this component. Multi languages should be separated by ,.  ID rules  Java and multi languages shared: (0, 3000) .NET Platform reserved: [3000, 4000) Node.js Platform reserved: [4000, 5000) Go reserved: [5000, 6000) Lua reserved: [6000, 7000) Python reserved: [7000, 8000) PHP reserved: [8000, 9000) C++ reserved: [9000, 10000)  Example:\nTomcat:id:1languages:JavaHttpClient:id:2languages:Java,C#,Node.jsDubbo:id:3languages:JavaH2:id:4languages:JavaRemote server mapping The remote server will be conjectured by the local component. The mappings are based on names in the component library.\n Key: client component library name Value: server component name  Component-Server-Mappings:Jedis:RedisStackExchange.Redis:RedisRedisson:RedisLettuce:RedisZookeeper:ZookeeperSqlClient:SqlServerNpgsql:PostgreSQLMySqlConnector:MysqlEntityFrameworkCore.InMemory:InMemoryDatabase","title":"Component library settings","url":"/docs/main/v9.2.0/en/guides/component-library-settings/"},{"content":"Component library settings Component library settings are about your own or third-party libraries used in the monitored application.\nIn agent or SDK, regardless of whether the library name is collected as ID or String (literally, e.g. SpringMVC), the collector formats data in ID for better performance and less storage requirements.\nAlso, the collector conjectures the remote service based on the component library. For example: if the component library is MySQL Driver library, then the remote service should be MySQL Server.\nFor these two reasons, the collector requires two parts of settings in this file:\n Component library ID, names and languages. Remote server mapping based on the local library.  All component names and IDs must be defined in this file.\nComponent Library ID Define all names and IDs from component libraries which are used in the monitored application. This uses a two-way mapping strategy. The agent or SDK could use the value (ID) to represent the component name in uplink data.\n Name: the component name used in agent and UI ID: Unique ID. All IDs are reserved once they are released. Languages: Program languages may use this component. Multi languages should be separated by ,.  ID rules  Java and multi languages shared: (0, 3000) .NET Platform reserved: [3000, 4000) Node.js Platform reserved: [4000, 5000) Go reserved: [5000, 6000) Lua reserved: [6000, 7000) Python reserved: [7000, 8000) PHP reserved: [8000, 9000) C++ reserved: [9000, 10000) Javascript reserved: [10000, 11000) Rust reserved: [11000, 12000)  Example:\nTomcat:id:1languages:JavaHttpClient:id:2languages:Java,C#,Node.jsDubbo:id:3languages:JavaH2:id:4languages:JavaRemote server mapping The remote server will be conjectured by the local component. The mappings are based on names in the component library.\n Key: client component library name Value: server component name  Component-Server-Mappings:Jedis:RedisStackExchange.Redis:RedisRedisson:RedisLettuce:RedisZookeeper:ZookeeperSqlClient:SqlServerNpgsql:PostgreSQLMySqlConnector:MysqlEntityFrameworkCore.InMemory:InMemoryDatabase","title":"Component library settings","url":"/docs/main/v9.3.0/en/guides/component-library-settings/"},{"content":"Component library settings Component library settings are about your own or third-party libraries used in the monitored application.\nIn agent or SDK, regardless of whether the library name is collected as ID or String (literally, e.g. SpringMVC), the collector formats data in ID for better performance and less storage requirements.\nAlso, the collector conjectures the remote service based on the component library. For example: if the component library is MySQL Driver library, then the remote service should be MySQL Server.\nFor these two reasons, the collector requires two parts of settings in this file:\n Component library ID, names and languages. Remote server mapping based on the local library.  All component names and IDs must be defined in this file.\nComponent Library ID Define all names and IDs from component libraries which are used in the monitored application. This uses a two-way mapping strategy. The agent or SDK could use the value (ID) to represent the component name in uplink data.\n Name: the component name used in agent and UI ID: Unique ID. All IDs are reserved once they are released. Languages: Program languages may use this component. Multi languages should be separated by ,.  ID rules  Java and multi languages shared: (0, 3000) .NET Platform reserved: [3000, 4000) Node.js Platform reserved: [4000, 5000) Go reserved: [5000, 6000) Lua reserved: [6000, 7000) Python reserved: [7000, 8000) PHP reserved: [8000, 9000) C++ reserved: [9000, 10000) Javascript reserved: [10000, 11000) Rust reserved: [11000, 12000)  Example:\nTomcat:id:1languages:JavaHttpClient:id:2languages:Java,C#,Node.jsDubbo:id:3languages:JavaH2:id:4languages:JavaRemote server mapping The remote server will be conjectured by the local component. The mappings are based on names in the component library.\n Key: client component library name Value: server component name  Component-Server-Mappings:Jedis:RedisStackExchange.Redis:RedisRedisson:RedisLettuce:RedisZookeeper:ZookeeperSqlClient:SqlServerNpgsql:PostgreSQLMySqlConnector:MysqlEntityFrameworkCore.InMemory:InMemoryDatabase","title":"Component library settings","url":"/docs/main/v9.4.0/en/guides/component-library-settings/"},{"content":"Component library settings Component library settings are about your own or third-party libraries used in the monitored application.\nIn agent or SDK, regardless of whether the library name is collected as ID or String (literally, e.g. SpringMVC), the collector formats data in ID for better performance and less storage requirements.\nAlso, the collector conjectures the remote service based on the component library. For example: if the component library is MySQL Driver library, then the remote service should be MySQL Server.\nFor these two reasons, the collector requires two parts of settings in this file:\n Component library ID, names and languages. Remote server mapping based on the local library.  All component names and IDs must be defined in this file.\nComponent Library ID Define all names and IDs from component libraries which are used in the monitored application. This uses a two-way mapping strategy. The agent or SDK could use the value (ID) to represent the component name in uplink data.\n Name: the component name used in agent and UI ID: Unique ID. All IDs are reserved once they are released. Languages: Program languages may use this component. Multi languages should be separated by ,.  ID rules  Java and multi languages shared: (0, 3000) .NET Platform reserved: [3000, 4000) Node.js Platform reserved: [4000, 5000) Go reserved: [5000, 6000) Lua reserved: [6000, 7000) Python reserved: [7000, 8000) PHP reserved: [8000, 9000) C++ reserved: [9000, 10000) Javascript reserved: [10000, 11000) Rust reserved: [11000, 12000)  Example:\nTomcat:id:1languages:JavaHttpClient:id:2languages:Java,C#,Node.jsDubbo:id:3languages:JavaH2:id:4languages:JavaRemote server mapping The remote server will be conjectured by the local component. The mappings are based on names in the component library.\n Key: client component library name Value: server component name  Component-Server-Mappings:Jedis:RedisStackExchange.Redis:RedisRedisson:RedisLettuce:RedisZookeeper:ZookeeperSqlClient:SqlServerNpgsql:PostgreSQLMySqlConnector:MysqlEntityFrameworkCore.InMemory:InMemoryDatabase","title":"Component library settings","url":"/docs/main/v9.5.0/en/guides/component-library-settings/"},{"content":"Component library settings Component library settings are about your own or third-party libraries used in the monitored application.\nIn agent or SDK, regardless of whether the library name is collected as ID or String (literally, e.g. SpringMVC), the collector formats data in ID for better performance and less storage requirements.\nAlso, the collector conjectures the remote service based on the component library. For example: if the component library is MySQL Driver library, then the remote service should be MySQL Server.\nFor these two reasons, the collector requires two parts of settings in this file:\n Component library ID, names and languages. Remote server mapping based on the local library.  All component names and IDs must be defined in this file.\nComponent Library ID Define all names and IDs from component libraries which are used in the monitored application. This uses a two-way mapping strategy. The agent or SDK could use the value (ID) to represent the component name in uplink data.\n Name: the component name used in agent and UI ID: Unique ID. All IDs are reserved once they are released. Languages: Program languages may use this component. Multi languages should be separated by ,.  ID rules  Java and multi languages shared: (0, 3000) .NET Platform reserved: [3000, 4000) Node.js Platform reserved: [4000, 5000) Go reserved: [5000, 6000) Lua reserved: [6000, 7000) Python reserved: [7000, 8000) PHP reserved: [8000, 9000) C++ reserved: [9000, 10000) Javascript reserved: [10000, 11000) Rust reserved: [11000, 12000)  Example:\nTomcat:id:1languages:JavaHttpClient:id:2languages:Java,C#,Node.jsDubbo:id:3languages:JavaH2:id:4languages:JavaRemote server mapping The remote server will be conjectured by the local component. The mappings are based on names in the component library.\n Key: client component library name Value: server component name  Component-Server-Mappings:Jedis:RedisStackExchange.Redis:RedisRedisson:RedisLettuce:RedisZookeeper:ZookeeperSqlClient:SqlServerNpgsql:PostgreSQLMySqlConnector:MysqlEntityFrameworkCore.InMemory:InMemoryDatabase","title":"Component library settings","url":"/docs/main/v9.6.0/en/guides/component-library-settings/"},{"content":"Component library settings Component library settings are about your own or third-party libraries used in the monitored application.\nIn agent or SDK, regardless of whether the library name is collected as ID or String (literally, e.g. SpringMVC), the collector formats data in ID for better performance and less storage requirements.\nAlso, the collector conjectures the remote service based on the component library. For example: if the component library is MySQL Driver library, then the remote service should be MySQL Server.\nFor these two reasons, the collector requires two parts of settings in this file:\n Component library ID, names and languages. Remote server mapping based on the local library.  All component names and IDs must be defined in this file.\nComponent Library ID Define all names and IDs from component libraries which are used in the monitored application. This uses a two-way mapping strategy. The agent or SDK could use the value (ID) to represent the component name in uplink data.\n Name: the component name used in agent and UI ID: Unique ID. All IDs are reserved once they are released. Languages: Program languages may use this component. Multi languages should be separated by ,.  ID rules  Java and multi languages shared: (0, 3000) .NET Platform reserved: [3000, 4000) Node.js Platform reserved: [4000, 5000) Go reserved: [5000, 6000) Lua reserved: [6000, 7000) Python reserved: [7000, 8000) PHP reserved: [8000, 9000) C++ reserved: [9000, 10000) Javascript reserved: [10000, 11000) Rust reserved: [11000, 12000)  Example:\nTomcat:id:1languages:JavaHttpClient:id:2languages:Java,C#,Node.jsDubbo:id:3languages:JavaH2:id:4languages:JavaRemote server mapping The remote server will be conjectured by the local component. The mappings are based on names in the component library.\n Key: client component library name Value: server component name  Component-Server-Mappings:Jedis:RedisStackExchange.Redis:RedisRedisson:RedisLettuce:RedisZookeeper:ZookeeperSqlClient:SqlServerNpgsql:PostgreSQLMySqlConnector:MysqlEntityFrameworkCore.InMemory:InMemoryDatabase","title":"Component library settings","url":"/docs/main/v9.7.0/en/guides/component-library-settings/"},{"content":"Concepts and Designs Concepts and Designs help you to learn and understand the SkyWalking Infra E2E and the landscape.\n What is SkyWalking Infra E2E?  Project Goals. Provides the goals, which SkyWalking Infra E2E is trying to focus on and provides features about them.    After you read the above documents, you should understand the basic goals of the SkyWalking Infra E2E. Now, you can choose which following parts you are interested, then dive in.\n Module Design  ","title":"Concepts and Designs","url":"/docs/skywalking-infra-e2e/latest/en/concepts-and-designs/readme/"},{"content":"Concepts and Designs Concepts and Designs help you to learn and understand the SkyWalking Infra E2E and the landscape.\n What is SkyWalking Infra E2E?  Project Goals. Provides the goals, which SkyWalking Infra E2E is trying to focus on and provides features about them.    After you read the above documents, you should understand the basic goals of the SkyWalking Infra E2E. Now, you can choose which following parts you are interested, then dive in.\n Module Design  ","title":"Concepts and Designs","url":"/docs/skywalking-infra-e2e/next/en/concepts-and-designs/readme/"},{"content":"Concepts and Designs Concepts and Designs help you to learn and understand the SkyWalking Infra E2E and the landscape.\n What is SkyWalking Infra E2E?  Project Goals. Provides the goals, which SkyWalking Infra E2E is trying to focus on and provides features about them.    After you read the above documents, you should understand the basic goals of the SkyWalking Infra E2E. Now, you can choose which following parts you are interested, then dive in.\n Module Design  ","title":"Concepts and Designs","url":"/docs/skywalking-infra-e2e/v1.3.0/en/concepts-and-designs/readme/"},{"content":"Concepts and Designs Concepts and Designs help you to learn and understand the SkyWalking Satellite and the landscape.\n What is SkyWalking Satellite?  Overview and Core concepts. Provides a high-level description and introduction, including the problems the project solves. Project Goals. Provides the goals, which SkyWalking Satellite is trying to focus and provide features about them.    After you read the above documents, you should understand basic goals of the SkyWalking Satellite. Now, you can choose which following parts you are interested, then dive in.\n Module Design Plugin Mechanism Project Structure Memory mapped Queue  ","title":"Concepts and Designs","url":"/docs/skywalking-satellite/latest/en/concepts-and-designs/readme/"},{"content":"Concepts and Designs Concepts and Designs help you to learn and understand the SkyWalking Satellite and the landscape.\n What is SkyWalking Satellite?  Overview and Core concepts. Provides a high-level description and introduction, including the problems the project solves. Project Goals. Provides the goals, which SkyWalking Satellite is trying to focus and provide features about them.    After you read the above documents, you should understand basic goals of the SkyWalking Satellite. Now, you can choose which following parts you are interested, then dive in.\n Module Design Plugin Mechanism Project Structure Memory mapped Queue  ","title":"Concepts and Designs","url":"/docs/skywalking-satellite/next/en/concepts-and-designs/readme/"},{"content":"Concepts and Designs Concepts and Designs help you to learn and understand the SkyWalking Satellite and the landscape.\n What is SkyWalking Satellite?  Overview and Core concepts. Provides a high-level description and introduction, including the problems the project solves. Project Goals. Provides the goals, which SkyWalking Satellite is trying to focus and provide features about them.    After you read the above documents, you should understand basic goals of the SkyWalking Satellite. Now, you can choose which following parts you are interested, then dive in.\n Module Design Plugin Mechanism Project Structure Memory mapped Queue  ","title":"Concepts and Designs","url":"/docs/skywalking-satellite/v1.2.0/en/concepts-and-designs/readme/"},{"content":"Configuration Vocabulary The Configuration Vocabulary lists all available configurations provided by application.yml.\n   Module Provider Settings Value(s) and Explanation System Environment Variable¹ Default     core default role Option values: Mixed/Receiver/Aggregator. Receiver mode OAP opens the service to the agents, then analyzes and aggregates the results, and forwards the results for distributed aggregation. Aggregator mode OAP receives data from Mixer and Receiver role OAP nodes, and performs 2nd level aggregation. Mixer means both Receiver and Aggregator. SW_CORE_ROLE Mixed   - - restHost Binding IP of RESTful services. Services include GraphQL query and HTTP data report. SW_CORE_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_CORE_REST_PORT 12800   - - restContextPath Web context path of RESTful services. SW_CORE_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_CORE_REST_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_CORE_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize ServerSocketChannel Backlog of RESTful services. SW_CORE_REST_QUEUE_SIZE 0   - - httpMaxRequestHeaderSize Maximum request header size accepted. SW_CORE_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - gRPCHost Binding IP of gRPC services, including gRPC data report and internal communication among OAP nodes. SW_CORE_GRPC_HOST 0.0.0.0   - - gRPCPort Binding port of gRPC services. SW_CORE_GRPC_PORT 11800   - - gRPCSslEnabled Activates SSL for gRPC services. SW_CORE_GRPC_SSL_ENABLED false   - - gRPCSslKeyPath File path of gRPC SSL key. SW_CORE_GRPC_SSL_KEY_PATH -   - - gRPCSslCertChainPath File path of gRPC SSL cert chain. SW_CORE_GRPC_SSL_CERT_CHAIN_PATH -   - - gRPCSslTrustedCAPath File path of gRPC trusted CA. SW_CORE_GRPC_SSL_TRUSTED_CA_PATH -   - - downsampling Activated level of down sampling aggregation.  Hour,Day   - - enableDataKeeperExecutor Controller of TTL scheduler. Once disabled, TTL wouldn\u0026rsquo;t work. SW_CORE_ENABLE_DATA_KEEPER_EXECUTOR true   - - dataKeeperExecutePeriod Execution period of TTL scheduler (in minutes). Execution doesn\u0026rsquo;t mean deleting data. The storage provider (e.g. ElasticSearch storage) could override this. SW_CORE_DATA_KEEPER_EXECUTE_PERIOD 5   - - recordDataTTL The lifecycle of record data (in days). Record data includes traces, top N sample records, and logs. Minimum value is 2. SW_CORE_RECORD_DATA_TTL 3   - - metricsDataTTL The lifecycle of metrics data (in days), including metadata. We recommend setting metricsDataTTL \u0026gt;= recordDataTTL. Minimum value is 2. SW_CORE_METRICS_DATA_TTL 7   - - l1FlushPeriod The period of L1 aggregation flush to L2 aggregation (in milliseconds). SW_CORE_L1_AGGREGATION_FLUSH_PERIOD 500   - - storageSessionTimeout The threshold of session time (in milliseconds). Default value is 70000. SW_CORE_STORAGE_SESSION_TIMEOUT 70000   - - persistentPeriod The period of doing data persistence. Unit is second.Default value is 25s SW_CORE_PERSISTENT_PERIOD 25   - - topNReportPeriod The execution period (in minutes) of top N sampler, which saves sampled data into the storage. SW_CORE_TOPN_REPORT_PERIOD 10   - - activeExtraModelColumns Appends entity names (e.g. service names) into metrics storage entities. SW_CORE_ACTIVE_EXTRA_MODEL_COLUMNS false   - - serviceNameMaxLength Maximum length limit of service names. SW_SERVICE_NAME_MAX_LENGTH 70   - - instanceNameMaxLength Maximum length limit of service instance names. The maximum length of service + instance names should be less than 200. SW_INSTANCE_NAME_MAX_LENGTH 70   - - endpointNameMaxLength Maximum length limit of endpoint names. The maximum length of service + endpoint names should be less than 240. SW_ENDPOINT_NAME_MAX_LENGTH 150   - - searchableTracesTags Defines a set of span tag keys which are searchable through GraphQL. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_SEARCHABLE_TAG_KEYS http.method,http.status_code,rpc.status_code,db.type,db.instance,mq.queue,mq.topic,mq.broker   - - searchableLogsTags Defines a set of log tag keys which are searchable through GraphQL. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_SEARCHABLE_LOGS_TAG_KEYS level   - - searchableAlarmTags Defines a set of alarm tag keys which are searchable through GraphQL. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_SEARCHABLE_ALARM_TAG_KEYS level   - - autocompleteTagKeysQueryMaxSize The max size of tags keys for autocomplete select. SW_AUTOCOMPLETE_TAG_KEYS_QUERY_MAX_SIZE 100   - - autocompleteTagValuesQueryMaxSize The max size of tags values for autocomplete select. SW_AUTOCOMPLETE_TAG_VALUES_QUERY_MAX_SIZE 100   - - gRPCThreadPoolSize Pool size of gRPC server. SW_CORE_GRPC_THREAD_POOL_SIZE CPU core * 4   - - gRPCThreadPoolQueueSize Queue size of gRPC server. SW_CORE_GRPC_POOL_QUEUE_SIZE 10000   - - maxConcurrentCallsPerConnection The maximum number of concurrent calls permitted for each incoming connection. Defaults to no limit. SW_CORE_GRPC_MAX_CONCURRENT_CALL -   - - maxMessageSize Sets the maximum message size allowed to be received on the server. Empty means 4 MiB. SW_CORE_GRPC_MAX_MESSAGE_SIZE 4M(based on Netty)   - - remoteTimeout Timeout for cluster internal communication (in seconds). - 20   - - maxSizeOfNetworkAddressAlias The maximum size of network address detected in the system being monitored. - 1_000_000   - - maxPageSizeOfQueryProfileSnapshot The maximum size for snapshot analysis in an OAP query. - 500   - - maxSizeOfAnalyzeProfileSnapshot The maximum number of snapshots analyzed by the OAP. - 12000   - - prepareThreads The number of threads used to prepare metrics data to the storage. SW_CORE_PREPARE_THREADS 2   - - enableEndpointNameGroupingByOpenapi Automatically groups endpoints by the given OpenAPI definitions. SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI true   - - maxDurationOfQueryEBPFProfilingData The maximum duration(in second) of query the eBPF profiling data from database. - 30   - - maxThreadCountOfQueryEBPFProfilingData The maximum thread count of query the eBPF profiling data from database. - System CPU core size   - - uiMenuRefreshInterval The period(in seconds) of refreshing the status of all UI menu items. - 20   - - serviceCacheRefreshInterval The period(in seconds) of refreshing the service cache. SW_SERVICE_CACHE_REFRESH_INTERVAL 10   cluster standalone - Standalone is not suitable for running on a single node running. No configuration available. - -   - zookeeper namespace The namespace, represented by root path, isolates the configurations in Zookeeper. SW_NAMESPACE /, root path   - - hostPort Hosts and ports of Zookeeper Cluster. SW_CLUSTER_ZK_HOST_PORT localhost:2181   - - baseSleepTimeMs The period of Zookeeper client between two retries (in milliseconds). SW_CLUSTER_ZK_SLEEP_TIME 1000   - - maxRetries The maximum retry time. SW_CLUSTER_ZK_MAX_RETRIES 3   - - enableACL Opens ACL using schema and expression. SW_ZK_ENABLE_ACL false   - - schema Schema for the authorization. SW_ZK_SCHEMA digest   - - expression Expression for the authorization. SW_ZK_EXPRESSION skywalking:skywalking   - - internalComHost The hostname registered in Zookeeper for the internal communication of OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Zookeeper for the internal communication of OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - kubernetes namespace Namespace deployed by SkyWalking in k8s. SW_CLUSTER_K8S_NAMESPACE default   - - labelSelector Labels used for filtering OAP deployment in k8s. SW_CLUSTER_K8S_LABEL app=collector,release=skywalking   - - uidEnvName Environment variable name for reading uid. SW_CLUSTER_K8S_UID SKYWALKING_COLLECTOR_UID   - consul serviceName Service name for SkyWalking cluster. SW_SERVICE_NAME SkyWalking_OAP_Cluster   - - hostPort Hosts and ports for Consul cluster. SW_CLUSTER_CONSUL_HOST_PORT localhost:8500   - - aclToken ACL Token of Consul. Empty string means without ALC token. SW_CLUSTER_CONSUL_ACLTOKEN -   - - internalComHost The hostname registered in Consul for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Consul for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - etcd serviceName Service name for SkyWalking cluster. SW_CLUSTER_ETCD_SERVICE_NAME SkyWalking_OAP_Cluster   - - endpoints Hosts and ports for etcd cluster. SW_CLUSTER_ETCD_ENDPOINTS localhost:2379   - - namespace Namespace for SkyWalking cluster. SW_CLUSTER_ETCD_NAMESPACE /skywalking   - - authentication Indicates whether there is authentication. SW_CLUSTER_ETCD_AUTHENTICATION false   - - user Etcd auth username. SW_CLUSTER_ETCD_USER    - - password Etcd auth password. SW_CLUSTER_ETCD_PASSWORD    - - internalComHost The hostname registered in etcd for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in etcd for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - Nacos serviceName Service name for SkyWalking cluster. SW_SERVICE_NAME SkyWalking_OAP_Cluster   - - hostPort Hosts and ports for Nacos cluster. SW_CLUSTER_NACOS_HOST_PORT localhost:8848   - - namespace Namespace used by SkyWalking node coordination. SW_CLUSTER_NACOS_NAMESPACE public   - - internalComHost The hostname registered in Nacos for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Nacos for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - - username Nacos Auth username. SW_CLUSTER_NACOS_USERNAME -   - - password Nacos Auth password. SW_CLUSTER_NACOS_PASSWORD -   - - accessKey Nacos Auth accessKey. SW_CLUSTER_NACOS_ACCESSKEY -   - - secretKey Nacos Auth secretKey. SW_CLUSTER_NACOS_SECRETKEY -   - - syncPeriodHttpUriRecognitionPattern The period of HTTP URI recognition pattern synchronization (in seconds). SW_CORE_SYNC_PERIOD_HTTP_URI_RECOGNITION_PATTERN 10   - - trainingPeriodHttpUriRecognitionPattern The period of HTTP URI recognition pattern training (in seconds). SW_CORE_TRAINING_PERIOD_HTTP_URI_RECOGNITION_PATTERN 60   - - maxHttpUrisNumberPerService The maximum number of HTTP URIs per service. SW_MAX_HTTP_URIS_NUMBER_PER_SERVICE 3000   storage elasticsearch - ElasticSearch (and OpenSearch) storage implementation. - -   - - namespace Prefix of indexes created and used by SkyWalking. SW_NAMESPACE -   - - clusterNodes ElasticSearch cluster nodes for client connection. SW_STORAGE_ES_CLUSTER_NODES localhost   - - protocol HTTP or HTTPs. SW_STORAGE_ES_HTTP_PROTOCOL HTTP   - - connectTimeout Connect timeout of ElasticSearch client (in milliseconds). SW_STORAGE_ES_CONNECT_TIMEOUT 3000   - - socketTimeout Socket timeout of ElasticSearch client (in milliseconds). SW_STORAGE_ES_SOCKET_TIMEOUT 30000   - - responseTimeout Response timeout of ElasticSearch client (in milliseconds), 0 disables the timeout. SW_STORAGE_ES_RESPONSE_TIMEOUT 1500   - - numHttpClientThread The number of threads for the underlying HTTP client to perform socket I/O. If the value is \u0026lt;= 0, the number of available processors will be used. SW_STORAGE_ES_NUM_HTTP_CLIENT_THREAD 0   - - user Username of ElasticSearch cluster. SW_ES_USER -   - - password Password of ElasticSearch cluster. SW_ES_PASSWORD -   - - trustStorePath Trust JKS file path. Only works when username and password are enabled. SW_STORAGE_ES_SSL_JKS_PATH -   - - trustStorePass Trust JKS file password. Only works when username and password are enabled. SW_STORAGE_ES_SSL_JKS_PASS -   - - secretsManagementFile Secrets management file in the properties format, including username and password, which are managed by a 3rd party tool. Capable of being updated them at runtime. SW_ES_SECRETS_MANAGEMENT_FILE -   - - dayStep Represents the number of days in the one-minute/hour/day index. SW_STORAGE_DAY_STEP 1   - - indexShardsNumber Shard number of new indexes. SW_STORAGE_ES_INDEX_SHARDS_NUMBER 1   - - indexReplicasNumber Replicas number of new indexes. SW_STORAGE_ES_INDEX_REPLICAS_NUMBER 0   - - specificIndexSettings Specify the settings for each index individually. If configured, this setting has the highest priority and overrides the generic settings. SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS -   - - superDatasetDayStep Represents the number of days in the super size dataset record index. Default value is the same as dayStep when the value is less than 0. SW_STORAGE_ES_SUPER_DATASET_DAY_STEP -1   - - superDatasetIndexShardsFactor Super dataset is defined in the code (e.g. trace segments). This factor provides more shards for the super dataset: shards number = indexShardsNumber * superDatasetIndexShardsFactor. This factor also affects Zipkin and Jaeger traces. SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR 5   - - superDatasetIndexReplicasNumber Represents the replicas number in the super size dataset record index. SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER 0   - - indexTemplateOrder The order of index template. SW_STORAGE_ES_INDEX_TEMPLATE_ORDER 0   - - bulkActions Async bulk size of the record data batch execution. SW_STORAGE_ES_BULK_ACTIONS 5000   - - batchOfBytes A threshold to control the max body size of ElasticSearch Bulk flush. SW_STORAGE_ES_BATCH_OF_BYTES 10485760 (10m)   - - flushInterval Period of flush (in seconds). Does not matter whether bulkActions is reached or not. SW_STORAGE_ES_FLUSH_INTERVAL 5   - - concurrentRequests The number of concurrent requests allowed to be executed. SW_STORAGE_ES_CONCURRENT_REQUESTS 2   - - resultWindowMaxSize The maximum size of dataset when the OAP loads cache, such as network aliases. SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE 10000   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_ES_QUERY_MAX_SIZE 10000   - - scrollingBatchSize The batch size of metadata per iteration when metadataQueryMaxSize or resultWindowMaxSize is too large to be retrieved in a single query. SW_STORAGE_ES_SCROLLING_BATCH_SIZE 5000   - - segmentQueryMaxSize The maximum size of trace segments per query. SW_STORAGE_ES_QUERY_SEGMENT_SIZE 200   - - profileTaskQueryMaxSize The maximum size of profile task per query. SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE 200   - - profileDataQueryScrollBatchSize The batch size of query profiling data. SW_STORAGE_ES_QUERY_PROFILE_DATA_BATCH_SIZE 100   - - advanced All settings of ElasticSearch index creation. The value should be in JSON format. SW_STORAGE_ES_ADVANCED -   - - logicSharding Shard metrics and records indices into multi-physical indices, one index template per metric/meter aggregation function or record. SW_STORAGE_ES_LOGIC_SHARDING false   - h2 - H2 storage is designed for demonstration and running in short term (i.e. 1-2 hours) only. - -   - - url H2 connection URL. Defaults to H2 memory mode. SW_STORAGE_H2_URL jdbc:h2:mem:skywalking-oap-db   - - user Username of H2 database. SW_STORAGE_H2_USER sa   - - password Password of H2 database. - -   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_H2_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 100   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 1   - mysql - MySQL Storage. The MySQL JDBC Driver is not in the dist. Please copy it into the oap-lib folder manually. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - postgresql - PostgreSQL storage. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - banyandb - BanyanDB storage. - -   - - targets Hosts with ports of the BanyanDB. SW_STORAGE_BANYANDB_TARGETS 127.0.0.1:17912   - - maxBulkSize The maximum size of write entities in a single batch write call. SW_STORAGE_BANYANDB_MAX_BULK_SIZE 5000   - - flushInterval Period of flush interval. In the timeunit of seconds. SW_STORAGE_BANYANDB_FLUSH_INTERVAL 15   - - metricsShardsNumber Shards Number for measure/metrics. SW_STORAGE_BANYANDB_METRICS_SHARDS_NUMBER 1   - - recordShardsNumber Shards Number for a normal record. SW_STORAGE_BANYANDB_RECORD_SHARDS_NUMBER 1   - - superDatasetShardsFactor Shards Factor for a super dataset record, i.e. Shard number of a super dataset is recordShardsNumber*superDatasetShardsFactor. SW_STORAGE_BANYANDB_SUPERDATASET_SHARDS_FACTOR 2   - - concurrentWriteThreads Concurrent consumer threads for batch writing. SW_STORAGE_BANYANDB_CONCURRENT_WRITE_THREADS 15   - - profileTaskQueryMaxSize Max size of ProfileTask to be fetched. SW_STORAGE_BANYANDB_PROFILE_TASK_QUERY_MAX_SIZE 200   agent-analyzer default Agent Analyzer. SW_AGENT_ANALYZER default    - - traceSamplingPolicySettingsFile The sampling policy including sampling rate and the threshold of trace segment latency can be configured by the traceSamplingPolicySettingsFile file. SW_TRACE_SAMPLING_POLICY_SETTINGS_FILE trace-sampling-policy-settings.yml   - - slowDBAccessThreshold The slow database access threshold (in milliseconds). SW_SLOW_DB_THRESHOLD default:200,mongodb:100   - - forceSampleErrorSegment When sampling mechanism is activated, this config samples the error status segment and ignores the sampling rate. SW_FORCE_SAMPLE_ERROR_SEGMENT true   - - segmentStatusAnalysisStrategy Determines the final segment status from span status. Available values are FROM_SPAN_STATUS , FROM_ENTRY_SPAN, and FROM_FIRST_SPAN. FROM_SPAN_STATUS indicates that the segment status would be error if any span has an error status. FROM_ENTRY_SPAN means that the segment status would only be determined by the status of entry spans. FROM_FIRST_SPAN means that the segment status would only be determined by the status of the first span. SW_SEGMENT_STATUS_ANALYSIS_STRATEGY FROM_SPAN_STATUS   - - noUpstreamRealAddressAgents Exit spans with the component in the list would not generate client-side instance relation metrics, since some tracing plugins (e.g. Nginx-LUA and Envoy) can\u0026rsquo;t collect the real peer IP address. SW_NO_UPSTREAM_REAL_ADDRESS 6000,9000   - - meterAnalyzerActiveFiles Indicates which files could be instrumented and analyzed. Multiple files are split by \u0026ldquo;,\u0026rdquo;. SW_METER_ANALYZER_ACTIVE_FILES    - - slowCacheWriteThreshold The threshold of slow command which is used for writing operation (in milliseconds). SW_SLOW_CACHE_WRITE_THRESHOLD default:20,redis:10   - - slowCacheReadThreshold The threshold of slow command which is used for reading (getting) operation (in milliseconds). SW_SLOW_CACHE_READ_THRESHOLD default:20,redis:10   receiver-sharing-server default Sharing server provides new gRPC and restful servers for data collection. Ana designates that servers in the core module are to be used for internal communication only. - -    - - restHost Binding IP of RESTful services. Services include GraphQL query and HTTP data report. SW_RECEIVER_SHARING_REST_HOST -   - - restPort Binding port of RESTful services. SW_RECEIVER_SHARING_REST_PORT -   - - restContextPath Web context path of RESTful services. SW_RECEIVER_SHARING_REST_CONTEXT_PATH -   - - restMaxThreads Maximum thread number of RESTful services. SW_RECEIVER_SHARING_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_RECEIVER_SHARING_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize ServerSocketChannel backlog of RESTful services. SW_RECEIVER_SHARING_REST_QUEUE_SIZE 0   - - httpMaxRequestHeaderSize Maximum request header size accepted. SW_RECEIVER_SHARING_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - gRPCHost Binding IP of gRPC services. Services include gRPC data report and internal communication among OAP nodes. SW_RECEIVER_GRPC_HOST 0.0.0.0. Not Activated   - - gRPCPort Binding port of gRPC services. SW_RECEIVER_GRPC_PORT Not Activated   - - gRPCThreadPoolSize Pool size of gRPC server. SW_RECEIVER_GRPC_THREAD_POOL_SIZE CPU core * 4   - - gRPCThreadPoolQueueSize Queue size of gRPC server. SW_RECEIVER_GRPC_POOL_QUEUE_SIZE 10000   - - gRPCSslEnabled Activates SSL for gRPC services. SW_RECEIVER_GRPC_SSL_ENABLED false   - - gRPCSslKeyPath File path of gRPC SSL key. SW_RECEIVER_GRPC_SSL_KEY_PATH -   - - gRPCSslCertChainPath File path of gRPC SSL cert chain. SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH -   - - maxConcurrentCallsPerConnection The maximum number of concurrent calls permitted for each incoming connection. Defaults to no limit. SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL -   - - authentication The token text for authentication. Works for gRPC connection only. Once this is set, the client is required to use the same token. SW_AUTHENTICATION -   log-analyzer default Log Analyzer. SW_LOG_ANALYZER default    - - lalFiles The LAL configuration file names (without file extension) to be activated. Read LAL for more details. SW_LOG_LAL_FILES default   - - malFiles The MAL configuration file names (without file extension) to be activated. Read LAL for more details. SW_LOG_MAL_FILES \u0026quot;\u0026quot;   event-analyzer default Event Analyzer. SW_EVENT_ANALYZER default    receiver-register default gRPC and HTTPRestful services that provide service, service instance and endpoint register. - -    receiver-trace default gRPC and HTTPRestful services that accept SkyWalking format traces. - -    receiver-jvm default gRPC services that accept JVM metrics data. - -    receiver-clr default gRPC services that accept .Net CLR metrics data. - -    receiver-profile default gRPC services that accept profile task status and snapshot reporter. - -    receiver-zabbix default TCP receiver accepts Zabbix format metrics. - -    - - port Exported TCP port. Zabbix agent could connect and transport data. SW_RECEIVER_ZABBIX_PORT 10051   - - host Binds to host. SW_RECEIVER_ZABBIX_HOST 0.0.0.0   - - activeFiles Enables config when agent request is received. SW_RECEIVER_ZABBIX_ACTIVE_FILES agent   service-mesh default gRPC services that accept data from inbound mesh probes. - -    envoy-metric default Envoy metrics_service and ALS(access log service) are supported by this receiver. The OAL script supports all GAUGE type metrics. - -    - - acceptMetricsService Starts Envoy Metrics Service analysis. SW_ENVOY_METRIC_SERVICE true   - - alsHTTPAnalysis Starts Envoy HTTP Access Log Service analysis. Value = k8s-mesh means starting the analysis. SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS -   - - alsTCPAnalysis Starts Envoy TCP Access Log Service analysis. Value = k8s-mesh means starting the analysis. SW_ENVOY_METRIC_ALS_TCP_ANALYSIS -   - - k8sServiceNameRule k8sServiceNameRule allows you to customize the service name in ALS via Kubernetes metadata. The available variables are pod and service. E.g. you can use ${service.metadata.name}-${pod.metadata.labels.version} to append the version number to the service name. Note that when using environment variables to pass this configuration, use single quotes('') to avoid being evaluated by the shell. K8S_SERVICE_NAME_RULE ${pod.metadata.labels.(service.istio.io/canonical-name)}   - - istioServiceNameRule istioServiceNameRule allows you to customize the service name in ALS via Kubernetes metadata. The available variables are serviceEntry. E.g. you can use ${serviceEntry.metadata.name}-${serviceEntry.metadata.labels.version} to append the version number to the service name. Note that when using environment variables to pass this configuration, use single quotes('') to avoid being evaluated by the shell. ISTIO_SERVICE_NAME_RULE ${serviceEntry.metadata.name}   receiver-otel default A receiver for analyzing metrics data from OpenTelemetry. - -    - - enabledHandlers Enabled handlers for otel. SW_OTEL_RECEIVER_ENABLED_HANDLERS -   - - enabledOtelMetricsRules Enabled metric rules for OTLP handler. SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES -   receiver-zipkin default A receiver for Zipkin traces. - -    - - sampleRate The sample rate precision is 1/10000, should be between 0 and 10000 SW_ZIPKIN_SAMPLE_RATE 10000   - - searchableTracesTags Defines a set of span tag keys which are searchable. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_ZIPKIN_SEARCHABLE_TAG_KEYS http.method   - - enableHttpCollector Enable Http Collector. SW_ZIPKIN_HTTP_COLLECTOR_ENABLED true   - - restHost Binding IP of RESTful services. SW_RECEIVER_ZIPKIN_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_RECEIVER_ZIPKIN_REST_PORT 9411   - - restContextPath Web context path of RESTful services. SW_RECEIVER_ZIPKIN_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_RECEIVER_ZIPKIN_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_RECEIVER_ZIPKIN_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_RECEIVER_ZIPKIN_REST_QUEUE_SIZE 0   - - enableKafkaCollector Enable Kafka Collector. SW_ZIPKIN_KAFKA_COLLECTOR_ENABLED false   - - kafkaBootstrapServers Kafka ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG. SW_ZIPKIN_KAFKA_SERVERS localhost:9092   - - kafkaGroupId Kafka ConsumerConfig.GROUP_ID_CONFIG. SW_ZIPKIN_KAFKA_GROUP_ID zipkin   - - kafkaTopic Kafka Topics. SW_ZIPKIN_KAFKA_TOPIC zipkin   - - kafkaConsumerConfig Kafka consumer config, JSON format as Properties. If it contains the same key with above, would override. SW_ZIPKIN_KAFKA_CONSUMER_CONFIG \u0026ldquo;{\u0026quot;auto.offset.reset\u0026quot;:\u0026quot;earliest\u0026quot;,\u0026quot;enable.auto.commit\u0026quot;:true}\u0026rdquo;   - - kafkaConsumers The number of consumers to create. SW_ZIPKIN_KAFKA_CONSUMERS 1   - - kafkaHandlerThreadPoolSize Pool size of Kafka message handler executor. SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_SIZE CPU core * 2   - - kafkaHandlerThreadPoolQueueSize Queue size of Kafka message handler executor. SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE 10000   kafka-fetcher default Read SkyWalking\u0026rsquo;s native metrics/logs/traces through Kafka server. - -    - - bootstrapServers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_KAFKA_FETCHER_SERVERS localhost:9092   - - namespace Namespace aims to isolate multi OAP cluster when using the same Kafka cluster. If you set a namespace for Kafka fetcher, OAP will add a prefix to topic name. You should also set namespace in agent.config. The property is named plugin.kafka.namespace. SW_NAMESPACE -   - - groupId A unique string that identifies the consumer group to which this consumer belongs. - skywalking-consumer   - - createTopicIfNotExist If true, this creates Kafka topic (if it does not already exist). - true   - - partitions The number of partitions for the topic being created. SW_KAFKA_FETCHER_PARTITIONS 3   - - consumers The number of consumers to create. SW_KAFKA_FETCHER_CONSUMERS 1   - - enableNativeProtoLog Enables fetching and handling native proto log data. SW_KAFKA_FETCHER_ENABLE_NATIVE_PROTO_LOG true   - - enableNativeJsonLog Enables fetching and handling native json log data. SW_KAFKA_FETCHER_ENABLE_NATIVE_JSON_LOG true   - - replicationFactor The replication factor for each partition in the topic being created. SW_KAFKA_FETCHER_PARTITIONS_FACTOR 2   - - kafkaHandlerThreadPoolSize Pool size of Kafka message handler executor. SW_KAFKA_HANDLER_THREAD_POOL_SIZE CPU core * 2   - - kafkaHandlerThreadPoolQueueSize Queue size of Kafka message handler executor. SW_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE 10000   - - topicNameOfMeters Kafka topic name for meter system data. - skywalking-meters   - - topicNameOfMetrics Kafka topic name for JVM metrics data. - skywalking-metrics   - - topicNameOfProfiling Kafka topic name for profiling data. - skywalking-profilings   - - topicNameOfTracingSegments Kafka topic name for tracing data. - skywalking-segments   - - topicNameOfManagements Kafka topic name for service instance reporting and registration. - skywalking-managements   - - topicNameOfLogs Kafka topic name for native proto log data. - skywalking-logs   - - topicNameOfJsonLogs Kafka topic name for native json log data. - skywalking-logs-json   receiver-browser default gRPC services that accept browser performance data and error log. - - -   - - sampleRate Sampling rate for receiving trace. Precise to 1/10000. 10000 means sampling rate of 100% by default. SW_RECEIVER_BROWSER_SAMPLE_RATE 10000   query graphql - GraphQL query implementation. -    - - enableLogTestTool Enable the log testing API to test the LAL. NOTE: This API evaluates untrusted code on the OAP server. A malicious script can do significant damage (steal keys and secrets, remove files and directories, install malware, etc). As such, please enable this API only when you completely trust your users. SW_QUERY_GRAPHQL_ENABLE_LOG_TEST_TOOL false   - - maxQueryComplexity Maximum complexity allowed for the GraphQL query that can be used to abort a query if the total number of data fields queried exceeds the defined threshold. SW_QUERY_MAX_QUERY_COMPLEXITY 3000   - - enableUpdateUITemplate Allow user add,disable and update UI template. SW_ENABLE_UPDATE_UI_TEMPLATE false   - - enableOnDemandPodLog Ondemand Pod log: fetch the Pod logs on users' demand, the logs are fetched and displayed in real time, and are not persisted in any kind. This is helpful when users want to do some experiments and monitor the logs and see what\u0026rsquo;s happing inside the service. Note: if you print secrets in the logs, they are also visible to the UI, so for the sake of security, this feature is disabled by default, please set this configuration to enable the feature manually. SW_ENABLE_ON_DEMAND_POD_LOG false   query-zipkin default - This module is for Zipkin query API and support zipkin-lens UI -    - - restHost Binding IP of RESTful services. SW_QUERY_ZIPKIN_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_QUERY_ZIPKIN_REST_PORT 9412   - - restContextPath Web context path of RESTful services. SW_QUERY_ZIPKIN_REST_CONTEXT_PATH zipkin   - - restMaxThreads Maximum thread number of RESTful services. SW_QUERY_ZIPKIN_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_QUERY_ZIPKIN_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_QUERY_ZIPKIN_REST_QUEUE_SIZE 0   - - lookback Default look back for traces and autocompleteTags, 1 day in millis SW_QUERY_ZIPKIN_LOOKBACK 86400000   - - namesMaxAge The Cache-Control max-age (seconds) for serviceNames, remoteServiceNames and spanNames SW_QUERY_ZIPKIN_NAMES_MAX_AGE 300   - - uiQueryLimit Default traces query max size SW_QUERY_ZIPKIN_UI_QUERY_LIMIT 10   - - uiDefaultLookback Default look back on the UI for search traces, 15 minutes in millis SW_QUERY_ZIPKIN_UI_DEFAULT_LOOKBACK 900000   promql default - This module is for PromQL API. -    - - restHost Binding IP of RESTful services. SW_PROMQL_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_PROMQL_REST_PORT 9090   - - restContextPath Web context path of RESTful services. SW_PROMQL_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_PROMQL_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_PROMQL_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_PROMQL_REST_QUEUE_SIZE 0   alarm default - Read alarm doc for more details. -    telemetry - - Read telemetry doc for more details. -    - none - No op implementation. -    - prometheus host Binding host for Prometheus server fetching data. SW_TELEMETRY_PROMETHEUS_HOST 0.0.0.0   - - port Binding port for Prometheus server fetching data. SW_TELEMETRY_PROMETHEUS_PORT 1234   configuration - - Read dynamic configuration doc for more details. -    - grpc host DCS server binding hostname. SW_DCS_SERVER_HOST -   - - port DCS server binding port. SW_DCS_SERVER_PORT 80   - - clusterName Cluster name when reading the latest configuration from DSC server. SW_DCS_CLUSTER_NAME SkyWalking   - - period The period of reading data from DSC server by the OAP (in seconds). SW_DCS_PERIOD 20   - apollo apolloMeta apollo.meta in Apollo. SW_CONFIG_APOLLO http://localhost:8080   - - apolloCluster apollo.cluster in Apollo. SW_CONFIG_APOLLO_CLUSTER default   - - apolloEnv env in Apollo. SW_CONFIG_APOLLO_ENV -   - - appId app.id in Apollo. SW_CONFIG_APOLLO_APP_ID skywalking   - zookeeper namespace The namespace (represented by root path) that isolates the configurations in the Zookeeper. SW_CONFIG_ZK_NAMESPACE /, root path   - - hostPort Hosts and ports of Zookeeper Cluster. SW_CONFIG_ZK_HOST_PORT localhost:2181   - - baseSleepTimeMs The period of Zookeeper client between two retries (in milliseconds). SW_CONFIG_ZK_BASE_SLEEP_TIME_MS 1000   - - maxRetries The maximum retry time. SW_CONFIG_ZK_MAX_RETRIES 3   - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - etcd endpoints Hosts and ports for etcd cluster (separated by commas if multiple). SW_CONFIG_ETCD_ENDPOINTS http://localhost:2379   - - namespace Namespace for SkyWalking cluster. SW_CONFIG_ETCD_NAMESPACE /skywalking   - - authentication Indicates whether there is authentication. SW_CONFIG_ETCD_AUTHENTICATION false   - - user Etcd auth username. SW_CONFIG_ETCD_USER    - - password Etcd auth password. SW_CONFIG_ETCD_PASSWORD    - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - consul hostPort Hosts and ports for Consul cluster. SW_CONFIG_CONSUL_HOST_AND_PORTS localhost:8500   - - aclToken ACL Token of Consul. Empty string means without ACL token. SW_CONFIG_CONSUL_ACL_TOKEN -   - - period The period of data sync (in seconds). SW_CONFIG_CONSUL_PERIOD 60   - k8s-configmap namespace Deployment namespace of the config map. SW_CLUSTER_K8S_NAMESPACE default   - - labelSelector Labels for locating configmap. SW_CLUSTER_K8S_LABEL app=collector,release=skywalking   - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - nacos serverAddr Nacos Server Host. SW_CONFIG_NACOS_SERVER_ADDR 127.0.0.1   - - port Nacos Server Port. SW_CONFIG_NACOS_SERVER_PORT 8848   - - group Nacos Configuration namespace. SW_CONFIG_NACOS_SERVER_NAMESPACE -   - - period The period of data sync (in seconds). SW_CONFIG_CONFIG_NACOS_PERIOD 60   - - username Nacos Auth username. SW_CONFIG_NACOS_USERNAME -   - - password Nacos Auth password. SW_CONFIG_NACOS_PASSWORD -   - - accessKey Nacos Auth accessKey. SW_CONFIG_NACOS_ACCESSKEY -   - - secretKey Nacos Auth secretKey. SW_CONFIG_NACOS_SECRETKEY -   exporter default enableGRPCMetrics Enable gRPC metrics exporter. SW_EXPORTER_ENABLE_GRPC_METRICS false   - - gRPCTargetHost The host of target gRPC server for receiving export data SW_EXPORTER_GRPC_HOST 127.0.0.1   - - gRPCTargetPort The port of target gRPC server for receiving export data. SW_EXPORTER_GRPC_PORT 9870   - - enableKafkaTrace Enable Kafka trace exporter. SW_EXPORTER_ENABLE_KAFKA_TRACE false   - - enableKafkaLog Enable Kafka log exporter. SW_EXPORTER_ENABLE_KAFKA_LOG false   - - kafkaBootstrapServers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_EXPORTER_KAFKA_SERVERS localhost:9092   - - kafkaProducerConfig Kafka producer config, JSON format as Properties. SW_EXPORTER_KAFKA_PRODUCER_CONFIG -   - - kafkaTopicTrace Kafka topic name for trace. SW_EXPORTER_KAFKA_TOPIC_TRACE skywalking-export-trace   - - kafkaTopicLog Kafka topic name for log. SW_EXPORTER_KAFKA_TOPIC_LOG skywalking-export-log   - - exportErrorStatusTraceOnly Export error status trace segments through the Kafka channel. SW_EXPORTER_KAFKA_TRACE_FILTER_ERROR false   health-checker default checkIntervalSeconds The period of checking OAP internal health status (in seconds). SW_HEALTH_CHECKER_INTERVAL_SECONDS 5   debugging-query default       - - keywords4MaskingSecretsOfConfig Include the list of keywords to filter configurations including secrets. Separate keywords by a comma. SW_DEBUGGING_QUERY_KEYWORDS_FOR_MASKING_SECRETS user,password,token,accessKey,secretKey,authentication   configuration-discovery default disableMessageDigest If true, agent receives the latest configuration every time, even without making any changes. By default, OAP uses the SHA512 message digest mechanism to detect changes in configuration. SW_DISABLE_MESSAGE_DIGEST false   receiver-event default gRPC services that handle events data. - -    aws-firehose-receiver default host Binding IP of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_HOST 0.0.0.0   - - port Binding port of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_PORT 12801   - - contextPath Context path of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_CONTEXT_PATH /   - - maxThreads Max Thtread number of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_MAX_THREADS 200   - - idleTimeOut Idle timeout of a connection for keep-alive. SW_RECEIVER_AWS_FIREHOSE_HTTP_IDLE_TIME_OUT 30000   - - acceptQueueSize Maximum allowed number of open connections SW_RECEIVER_AWS_FIREHOSE_HTTP_ACCEPT_QUEUE_SIZE 0   - - maxRequestHeaderSize Maximum length of all headers in an HTTP/1 response SW_RECEIVER_AWS_FIREHOSE_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - firehoseAccessKey The AccessKey of AWS firhose SW_RECEIVER_AWS_FIREHOSE_ACCESS_KEY    - - enableTLS Indicate if enable HTTPS for the server SW_RECEIVER_AWS_FIREHOSE_HTTP_ENABLE_TLS false   - - tlsKeyPath TLS key path SW_RECEIVER_AWS_FIREHOSE_HTTP_TLS_KEY_PATH    - - tlsCertChainPath TLS certificate chain path SW_RECEIVER_AWS_FIREHOSE_HTTP_TLS_CERT_CHAIN_PATH    ai-pipeline default       - - uriRecognitionServerAddr The address of the URI recognition server. SW_AI_PIPELINE_URI_RECOGNITION_SERVER_ADDR -   - - uriRecognitionServerPort The port of the URI recognition server. SW_AI_PIPELINE_URI_RECOGNITION_SERVER_PORT 17128    Note ¹ System Environment Variable name could be declared and changed in application.yml. The names listed here are simply provided in the default application.yml file.\n","title":"Configuration Vocabulary","url":"/docs/main/latest/en/setup/backend/configuration-vocabulary/"},{"content":"Configuration Vocabulary The Configuration Vocabulary lists all available configurations provided by application.yml.\n   Module Provider Settings Value(s) and Explanation System Environment Variable¹ Default     core default role Option values: Mixed/Receiver/Aggregator. Receiver mode OAP opens the service to the agents, then analyzes and aggregates the results, and forwards the results for distributed aggregation. Aggregator mode OAP receives data from Mixer and Receiver role OAP nodes, and performs 2nd level aggregation. Mixer means both Receiver and Aggregator. SW_CORE_ROLE Mixed   - - restHost Binding IP of RESTful services. Services include GraphQL query and HTTP data report. SW_CORE_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_CORE_REST_PORT 12800   - - restContextPath Web context path of RESTful services. SW_CORE_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_CORE_REST_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_CORE_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize ServerSocketChannel Backlog of RESTful services. SW_CORE_REST_QUEUE_SIZE 0   - - httpMaxRequestHeaderSize Maximum request header size accepted. SW_CORE_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - gRPCHost Binding IP of gRPC services, including gRPC data report and internal communication among OAP nodes. SW_CORE_GRPC_HOST 0.0.0.0   - - gRPCPort Binding port of gRPC services. SW_CORE_GRPC_PORT 11800   - - gRPCSslEnabled Activates SSL for gRPC services. SW_CORE_GRPC_SSL_ENABLED false   - - gRPCSslKeyPath File path of gRPC SSL key. SW_CORE_GRPC_SSL_KEY_PATH -   - - gRPCSslCertChainPath File path of gRPC SSL cert chain. SW_CORE_GRPC_SSL_CERT_CHAIN_PATH -   - - gRPCSslTrustedCAPath File path of gRPC trusted CA. SW_CORE_GRPC_SSL_TRUSTED_CA_PATH -   - - downsampling Activated level of down sampling aggregation.  Hour,Day   - - enableDataKeeperExecutor Controller of TTL scheduler. Once disabled, TTL wouldn\u0026rsquo;t work. SW_CORE_ENABLE_DATA_KEEPER_EXECUTOR true   - - dataKeeperExecutePeriod Execution period of TTL scheduler (in minutes). Execution doesn\u0026rsquo;t mean deleting data. The storage provider (e.g. ElasticSearch storage) could override this. SW_CORE_DATA_KEEPER_EXECUTE_PERIOD 5   - - recordDataTTL The lifecycle of record data (in days). Record data includes traces, top N sample records, and logs. Minimum value is 2. SW_CORE_RECORD_DATA_TTL 3   - - metricsDataTTL The lifecycle of metrics data (in days), including metadata. We recommend setting metricsDataTTL \u0026gt;= recordDataTTL. Minimum value is 2. SW_CORE_METRICS_DATA_TTL 7   - - l1FlushPeriod The period of L1 aggregation flush to L2 aggregation (in milliseconds). SW_CORE_L1_AGGREGATION_FLUSH_PERIOD 500   - - storageSessionTimeout The threshold of session time (in milliseconds). Default value is 70000. SW_CORE_STORAGE_SESSION_TIMEOUT 70000   - - persistentPeriod The period of doing data persistence. Unit is second.Default value is 25s SW_CORE_PERSISTENT_PERIOD 25   - - topNReportPeriod The execution period (in minutes) of top N sampler, which saves sampled data into the storage. SW_CORE_TOPN_REPORT_PERIOD 10   - - activeExtraModelColumns Appends entity names (e.g. service names) into metrics storage entities. SW_CORE_ACTIVE_EXTRA_MODEL_COLUMNS false   - - serviceNameMaxLength Maximum length limit of service names. SW_SERVICE_NAME_MAX_LENGTH 70   - - instanceNameMaxLength Maximum length limit of service instance names. The maximum length of service + instance names should be less than 200. SW_INSTANCE_NAME_MAX_LENGTH 70   - - endpointNameMaxLength Maximum length limit of endpoint names. The maximum length of service + endpoint names should be less than 240. SW_ENDPOINT_NAME_MAX_LENGTH 150   - - searchableTracesTags Defines a set of span tag keys which are searchable through GraphQL. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_SEARCHABLE_TAG_KEYS http.method,http.status_code,rpc.status_code,db.type,db.instance,mq.queue,mq.topic,mq.broker   - - searchableLogsTags Defines a set of log tag keys which are searchable through GraphQL. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_SEARCHABLE_LOGS_TAG_KEYS level   - - searchableAlarmTags Defines a set of alarm tag keys which are searchable through GraphQL. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_SEARCHABLE_ALARM_TAG_KEYS level   - - autocompleteTagKeysQueryMaxSize The max size of tags keys for autocomplete select. SW_AUTOCOMPLETE_TAG_KEYS_QUERY_MAX_SIZE 100   - - autocompleteTagValuesQueryMaxSize The max size of tags values for autocomplete select. SW_AUTOCOMPLETE_TAG_VALUES_QUERY_MAX_SIZE 100   - - gRPCThreadPoolSize Pool size of gRPC server. SW_CORE_GRPC_THREAD_POOL_SIZE Default to gRPC\u0026rsquo;s implementation, which is a cached thread pool that can grow infinitely.   - - maxConcurrentCallsPerConnection The maximum number of concurrent calls permitted for each incoming connection. Defaults to no limit. SW_CORE_GRPC_MAX_CONCURRENT_CALL -   - - maxMessageSize Sets the maximum message size allowed to be received on the server. Empty means 4 MiB. SW_CORE_GRPC_MAX_MESSAGE_SIZE 4M(based on Netty)   - - remoteTimeout Timeout for cluster internal communication (in seconds). - 20   - - maxSizeOfNetworkAddressAlias The maximum size of network address detected in the system being monitored. - 1_000_000   - - maxPageSizeOfQueryProfileSnapshot The maximum size for snapshot analysis in an OAP query. - 500   - - maxSizeOfAnalyzeProfileSnapshot The maximum number of snapshots analyzed by the OAP. - 12000   - - prepareThreads The number of threads used to prepare metrics data to the storage. SW_CORE_PREPARE_THREADS 2   - - enableEndpointNameGroupingByOpenapi Automatically groups endpoints by the given OpenAPI definitions. SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI true   - - maxDurationOfQueryEBPFProfilingData The maximum duration(in second) of query the eBPF profiling data from database. - 30   - - maxThreadCountOfQueryEBPFProfilingData The maximum thread count of query the eBPF profiling data from database. - System CPU core size   - - uiMenuRefreshInterval The period(in seconds) of refreshing the status of all UI menu items. - 20   - - serviceCacheRefreshInterval The period(in seconds) of refreshing the service cache. SW_SERVICE_CACHE_REFRESH_INTERVAL 10   - - enableHierarchy If disable the hierarchy, the service and instance hierarchy relation will not be built. And the query of hierarchy will return empty result. All the hierarchy relations are defined in the hierarchy-definition.yml. Notice: some of the configurations only available for kubernetes environments. SW_CORE_ENABLE_HIERARCHY true   cluster standalone - Standalone is not suitable for running on a single node running. No configuration available. - -   - zookeeper namespace The namespace, represented by root path, isolates the configurations in Zookeeper. SW_NAMESPACE /, root path   - - hostPort Hosts and ports of Zookeeper Cluster. SW_CLUSTER_ZK_HOST_PORT localhost:2181   - - baseSleepTimeMs The period of Zookeeper client between two retries (in milliseconds). SW_CLUSTER_ZK_SLEEP_TIME 1000   - - maxRetries The maximum retry time. SW_CLUSTER_ZK_MAX_RETRIES 3   - - enableACL Opens ACL using schema and expression. SW_ZK_ENABLE_ACL false   - - schema Schema for the authorization. SW_ZK_SCHEMA digest   - - expression Expression for the authorization. SW_ZK_EXPRESSION skywalking:skywalking   - - internalComHost The hostname registered in Zookeeper for the internal communication of OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Zookeeper for the internal communication of OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - kubernetes namespace Namespace deployed by SkyWalking in k8s. SW_CLUSTER_K8S_NAMESPACE default   - - labelSelector Labels used for filtering OAP deployment in k8s. SW_CLUSTER_K8S_LABEL app=collector,release=skywalking   - - uidEnvName Environment variable name for reading uid. SW_CLUSTER_K8S_UID SKYWALKING_COLLECTOR_UID   - consul serviceName Service name for SkyWalking cluster. SW_SERVICE_NAME SkyWalking_OAP_Cluster   - - hostPort Hosts and ports for Consul cluster. SW_CLUSTER_CONSUL_HOST_PORT localhost:8500   - - aclToken ACL Token of Consul. Empty string means without ALC token. SW_CLUSTER_CONSUL_ACLTOKEN -   - - internalComHost The hostname registered in Consul for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Consul for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - etcd serviceName Service name for SkyWalking cluster. SW_CLUSTER_ETCD_SERVICE_NAME SkyWalking_OAP_Cluster   - - endpoints Hosts and ports for etcd cluster. SW_CLUSTER_ETCD_ENDPOINTS localhost:2379   - - namespace Namespace for SkyWalking cluster. SW_CLUSTER_ETCD_NAMESPACE /skywalking   - - authentication Indicates whether there is authentication. SW_CLUSTER_ETCD_AUTHENTICATION false   - - user Etcd auth username. SW_CLUSTER_ETCD_USER    - - password Etcd auth password. SW_CLUSTER_ETCD_PASSWORD    - - internalComHost The hostname registered in etcd for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in etcd for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - Nacos serviceName Service name for SkyWalking cluster. SW_SERVICE_NAME SkyWalking_OAP_Cluster   - - hostPort Hosts and ports for Nacos cluster. SW_CLUSTER_NACOS_HOST_PORT localhost:8848   - - namespace Namespace used by SkyWalking node coordination. SW_CLUSTER_NACOS_NAMESPACE public   - - internalComHost The hostname registered in Nacos for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Nacos for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - - username Nacos Auth username. SW_CLUSTER_NACOS_USERNAME -   - - password Nacos Auth password. SW_CLUSTER_NACOS_PASSWORD -   - - accessKey Nacos Auth accessKey. SW_CLUSTER_NACOS_ACCESSKEY -   - - secretKey Nacos Auth secretKey. SW_CLUSTER_NACOS_SECRETKEY -   - - syncPeriodHttpUriRecognitionPattern The period of HTTP URI recognition pattern synchronization (in seconds). SW_CORE_SYNC_PERIOD_HTTP_URI_RECOGNITION_PATTERN 10   - - trainingPeriodHttpUriRecognitionPattern The period of HTTP URI recognition pattern training (in seconds). SW_CORE_TRAINING_PERIOD_HTTP_URI_RECOGNITION_PATTERN 60   - - maxHttpUrisNumberPerService The maximum number of HTTP URIs per service. SW_MAX_HTTP_URIS_NUMBER_PER_SERVICE 3000   storage elasticsearch - ElasticSearch (and OpenSearch) storage implementation. - -   - - namespace Prefix of indexes created and used by SkyWalking. SW_NAMESPACE -   - - clusterNodes ElasticSearch cluster nodes for client connection. SW_STORAGE_ES_CLUSTER_NODES localhost   - - protocol HTTP or HTTPs. SW_STORAGE_ES_HTTP_PROTOCOL HTTP   - - connectTimeout Connect timeout of ElasticSearch client (in milliseconds). SW_STORAGE_ES_CONNECT_TIMEOUT 3000   - - socketTimeout Socket timeout of ElasticSearch client (in milliseconds). SW_STORAGE_ES_SOCKET_TIMEOUT 30000   - - responseTimeout Response timeout of ElasticSearch client (in milliseconds), 0 disables the timeout. SW_STORAGE_ES_RESPONSE_TIMEOUT 1500   - - numHttpClientThread The number of threads for the underlying HTTP client to perform socket I/O. If the value is \u0026lt;= 0, the number of available processors will be used. SW_STORAGE_ES_NUM_HTTP_CLIENT_THREAD 0   - - user Username of ElasticSearch cluster. SW_ES_USER -   - - password Password of ElasticSearch cluster. SW_ES_PASSWORD -   - - trustStorePath Trust JKS file path. Only works when username and password are enabled. SW_STORAGE_ES_SSL_JKS_PATH -   - - trustStorePass Trust JKS file password. Only works when username and password are enabled. SW_STORAGE_ES_SSL_JKS_PASS -   - - secretsManagementFile Secrets management file in the properties format, including username and password, which are managed by a 3rd party tool. Capable of being updated them at runtime. SW_ES_SECRETS_MANAGEMENT_FILE -   - - dayStep Represents the number of days in the one-minute/hour/day index. SW_STORAGE_DAY_STEP 1   - - indexShardsNumber Shard number of new indexes. SW_STORAGE_ES_INDEX_SHARDS_NUMBER 1   - - indexReplicasNumber Replicas number of new indexes. SW_STORAGE_ES_INDEX_REPLICAS_NUMBER 0   - - specificIndexSettings Specify the settings for each index individually. If configured, this setting has the highest priority and overrides the generic settings. SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS -   - - superDatasetDayStep Represents the number of days in the super size dataset record index. Default value is the same as dayStep when the value is less than 0. SW_STORAGE_ES_SUPER_DATASET_DAY_STEP -1   - - superDatasetIndexShardsFactor Super dataset is defined in the code (e.g. trace segments). This factor provides more shards for the super dataset: shards number = indexShardsNumber * superDatasetIndexShardsFactor. This factor also affects Zipkin and Jaeger traces. SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR 5   - - superDatasetIndexReplicasNumber Represents the replicas number in the super size dataset record index. SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER 0   - - indexTemplateOrder The order of index template. SW_STORAGE_ES_INDEX_TEMPLATE_ORDER 0   - - bulkActions Async bulk size of the record data batch execution. SW_STORAGE_ES_BULK_ACTIONS 5000   - - batchOfBytes A threshold to control the max body size of ElasticSearch Bulk flush. SW_STORAGE_ES_BATCH_OF_BYTES 10485760 (10m)   - - flushInterval Period of flush (in seconds). Does not matter whether bulkActions is reached or not. SW_STORAGE_ES_FLUSH_INTERVAL 5   - - concurrentRequests The number of concurrent requests allowed to be executed. SW_STORAGE_ES_CONCURRENT_REQUESTS 2   - - resultWindowMaxSize The maximum size of dataset when the OAP loads cache, such as network aliases. SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE 10000   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_ES_QUERY_MAX_SIZE 10000   - - scrollingBatchSize The batch size of metadata per iteration when metadataQueryMaxSize or resultWindowMaxSize is too large to be retrieved in a single query. SW_STORAGE_ES_SCROLLING_BATCH_SIZE 5000   - - segmentQueryMaxSize The maximum size of trace segments per query. SW_STORAGE_ES_QUERY_SEGMENT_SIZE 200   - - profileTaskQueryMaxSize The maximum size of profile task per query. SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE 200   - - profileDataQueryScrollBatchSize The batch size of query profiling data. SW_STORAGE_ES_QUERY_PROFILE_DATA_BATCH_SIZE 100   - - advanced All settings of ElasticSearch index creation. The value should be in JSON format. SW_STORAGE_ES_ADVANCED -   - - logicSharding Shard metrics and records indices into multi-physical indices, one index template per metric/meter aggregation function or record. SW_STORAGE_ES_LOGIC_SHARDING false   - h2 - H2 storage is designed for demonstration and running in short term (i.e. 1-2 hours) only. - -   - - url H2 connection URL. Defaults to H2 memory mode. SW_STORAGE_H2_URL jdbc:h2:mem:skywalking-oap-db   - - user Username of H2 database. SW_STORAGE_H2_USER sa   - - password Password of H2 database. - -   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_H2_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 100   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 1   - mysql - MySQL Storage. The MySQL JDBC Driver is not in the dist. Please copy it into the oap-lib folder manually. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - postgresql - PostgreSQL storage. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - banyandb - BanyanDB storage. - -   - - targets Hosts with ports of the BanyanDB. SW_STORAGE_BANYANDB_TARGETS 127.0.0.1:17912   - - maxBulkSize The maximum size of write entities in a single batch write call. SW_STORAGE_BANYANDB_MAX_BULK_SIZE 5000   - - flushInterval Period of flush interval. In the timeunit of seconds. SW_STORAGE_BANYANDB_FLUSH_INTERVAL 15   - - metricsShardsNumber Shards Number for measure/metrics. SW_STORAGE_BANYANDB_METRICS_SHARDS_NUMBER 1   - - recordShardsNumber Shards Number for a normal record. SW_STORAGE_BANYANDB_RECORD_SHARDS_NUMBER 1   - - superDatasetShardsFactor Shards Factor for a super dataset record, i.e. Shard number of a super dataset is recordShardsNumber*superDatasetShardsFactor. SW_STORAGE_BANYANDB_SUPERDATASET_SHARDS_FACTOR 2   - - concurrentWriteThreads Concurrent consumer threads for batch writing. SW_STORAGE_BANYANDB_CONCURRENT_WRITE_THREADS 15   - - profileTaskQueryMaxSize Max size of ProfileTask to be fetched. SW_STORAGE_BANYANDB_PROFILE_TASK_QUERY_MAX_SIZE 200   agent-analyzer default Agent Analyzer. SW_AGENT_ANALYZER default    - - traceSamplingPolicySettingsFile The sampling policy including sampling rate and the threshold of trace segment latency can be configured by the traceSamplingPolicySettingsFile file. SW_TRACE_SAMPLING_POLICY_SETTINGS_FILE trace-sampling-policy-settings.yml   - - slowDBAccessThreshold The slow database access threshold (in milliseconds). SW_SLOW_DB_THRESHOLD default:200,mongodb:100   - - forceSampleErrorSegment When sampling mechanism is activated, this config samples the error status segment and ignores the sampling rate. SW_FORCE_SAMPLE_ERROR_SEGMENT true   - - segmentStatusAnalysisStrategy Determines the final segment status from span status. Available values are FROM_SPAN_STATUS , FROM_ENTRY_SPAN, and FROM_FIRST_SPAN. FROM_SPAN_STATUS indicates that the segment status would be error if any span has an error status. FROM_ENTRY_SPAN means that the segment status would only be determined by the status of entry spans. FROM_FIRST_SPAN means that the segment status would only be determined by the status of the first span. SW_SEGMENT_STATUS_ANALYSIS_STRATEGY FROM_SPAN_STATUS   - - noUpstreamRealAddressAgents Exit spans with the component in the list would not generate client-side instance relation metrics, since some tracing plugins (e.g. Nginx-LUA and Envoy) can\u0026rsquo;t collect the real peer IP address. SW_NO_UPSTREAM_REAL_ADDRESS 6000,9000   - - meterAnalyzerActiveFiles Indicates which files could be instrumented and analyzed. Multiple files are split by \u0026ldquo;,\u0026rdquo;. SW_METER_ANALYZER_ACTIVE_FILES    - - slowCacheWriteThreshold The threshold of slow command which is used for writing operation (in milliseconds). SW_SLOW_CACHE_WRITE_THRESHOLD default:20,redis:10   - - slowCacheReadThreshold The threshold of slow command which is used for reading (getting) operation (in milliseconds). SW_SLOW_CACHE_READ_THRESHOLD default:20,redis:10   receiver-sharing-server default Sharing server provides new gRPC and restful servers for data collection. Ana designates that servers in the core module are to be used for internal communication only. - -    - - restHost Binding IP of RESTful services. Services include GraphQL query and HTTP data report. SW_RECEIVER_SHARING_REST_HOST -   - - restPort Binding port of RESTful services. SW_RECEIVER_SHARING_REST_PORT -   - - restContextPath Web context path of RESTful services. SW_RECEIVER_SHARING_REST_CONTEXT_PATH -   - - restMaxThreads Maximum thread number of RESTful services. SW_RECEIVER_SHARING_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_RECEIVER_SHARING_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize ServerSocketChannel backlog of RESTful services. SW_RECEIVER_SHARING_REST_QUEUE_SIZE 0   - - httpMaxRequestHeaderSize Maximum request header size accepted. SW_RECEIVER_SHARING_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - gRPCHost Binding IP of gRPC services. Services include gRPC data report and internal communication among OAP nodes. SW_RECEIVER_GRPC_HOST 0.0.0.0. Not Activated   - - gRPCPort Binding port of gRPC services. SW_RECEIVER_GRPC_PORT Not Activated   - - gRPCThreadPoolSize Pool size of gRPC server. SW_RECEIVER_GRPC_THREAD_POOL_SIZE Default to gRPC\u0026rsquo;s implementation, which is a cached thread pool that can grow infinitely.   - - gRPCSslEnabled Activates SSL for gRPC services. SW_RECEIVER_GRPC_SSL_ENABLED false   - - gRPCSslKeyPath File path of gRPC SSL key. SW_RECEIVER_GRPC_SSL_KEY_PATH -   - - gRPCSslCertChainPath File path of gRPC SSL cert chain. SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH -   - - maxConcurrentCallsPerConnection The maximum number of concurrent calls permitted for each incoming connection. Defaults to no limit. SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL -   - - authentication The token text for authentication. Works for gRPC connection only. Once this is set, the client is required to use the same token. SW_AUTHENTICATION -   log-analyzer default Log Analyzer. SW_LOG_ANALYZER default    - - lalFiles The LAL configuration file names (without file extension) to be activated. Read LAL for more details. SW_LOG_LAL_FILES default   - - malFiles The MAL configuration file names (without file extension) to be activated. Read LAL for more details. SW_LOG_MAL_FILES \u0026quot;\u0026quot;   event-analyzer default Event Analyzer. SW_EVENT_ANALYZER default    receiver-register default gRPC and HTTPRestful services that provide service, service instance and endpoint register. - -    receiver-trace default gRPC and HTTPRestful services that accept SkyWalking format traces. - -    receiver-jvm default gRPC services that accept JVM metrics data. - -    receiver-clr default gRPC services that accept .Net CLR metrics data. - -    receiver-profile default gRPC services that accept profile task status and snapshot reporter. - -    receiver-zabbix default TCP receiver accepts Zabbix format metrics. - -    - - port Exported TCP port. Zabbix agent could connect and transport data. SW_RECEIVER_ZABBIX_PORT 10051   - - host Binds to host. SW_RECEIVER_ZABBIX_HOST 0.0.0.0   - - activeFiles Enables config when agent request is received. SW_RECEIVER_ZABBIX_ACTIVE_FILES agent   service-mesh default gRPC services that accept data from inbound mesh probes. - -    envoy-metric default Envoy metrics_service and ALS(access log service) are supported by this receiver. The OAL script supports all GAUGE type metrics. - -    - - acceptMetricsService Starts Envoy Metrics Service analysis. SW_ENVOY_METRIC_SERVICE true   - - alsHTTPAnalysis Starts Envoy HTTP Access Log Service analysis. Value = k8s-mesh means starting the analysis. SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS -   - - alsTCPAnalysis Starts Envoy TCP Access Log Service analysis. Value = k8s-mesh means starting the analysis. SW_ENVOY_METRIC_ALS_TCP_ANALYSIS -   - - k8sServiceNameRule k8sServiceNameRule allows you to customize the service name in ALS via Kubernetes metadata. The available variables are pod and service. E.g. you can use ${service.metadata.name}-${pod.metadata.labels.version} to append the version number to the service name. Note that when using environment variables to pass this configuration, use single quotes('') to avoid being evaluated by the shell. K8S_SERVICE_NAME_RULE ${pod.metadata.labels.(service.istio.io/canonical-name)}.${pod.metadata.namespace}   - - istioServiceNameRule istioServiceNameRule allows you to customize the service name in ALS via Kubernetes metadata. The available variables are serviceEntry. E.g. you can use ${serviceEntry.metadata.name}-${serviceEntry.metadata.labels.version} to append the version number to the service name. Note that when using environment variables to pass this configuration, use single quotes('') to avoid being evaluated by the shell. ISTIO_SERVICE_NAME_RULE ${serviceEntry.metadata.name}.${serviceEntry.metadata.namespace}   - - istioServiceEntryIgnoredNamespaces When looking up service informations from the Istio ServiceEntries, some of the ServiceEntries might be created in several namespaces automatically by some components, and OAP will randomly pick one of them to build the service name, users can use this config to exclude ServiceEntries that they don\u0026rsquo;t want to be used. Comma separated. SW_ISTIO_SERVICE_ENTRY_IGNORED_NAMESPACES -   - - gRPCHost Binding IP of gRPC service for Envoy access log service. SW_ALS_GRPC_HOST 0.0.0.0. Not Activated   - - gRPCPort Binding port of gRPC service for Envoy access log service. SW_ALS_GRPC_PORT Not Activated   - - gRPCThreadPoolSize Pool size of gRPC server. SW_ALS_GRPC_THREAD_POOL_SIZE Default to gRPC\u0026rsquo;s implementation, which is a cached thread pool that can grow infinitely.   - - gRPCSslEnabled Activates SSL for gRPC services. SW_ALS_GRPC_SSL_ENABLED false   - - gRPCSslKeyPath File path of gRPC SSL key. SW_ALS_GRPC_SSL_KEY_PATH -   - - gRPCSslCertChainPath File path of gRPC SSL cert chain. SW_ALS_GRPC_SSL_CERT_CHAIN_PATH -   - - maxConcurrentCallsPerConnection The maximum number of concurrent calls permitted for each incoming connection. Defaults to no limit. SW_ALS_GRPC_MAX_CONCURRENT_CALL -   - - maxMessageSize Sets the maximum message size allowed to be received on the server. Empty means 4 MiB. SW_ALS_GRPC_MAX_MESSAGE_SIZE 4M(based on Netty)   receiver-otel default A receiver for analyzing metrics data from OpenTelemetry. - -    - - enabledHandlers Enabled handlers for otel. SW_OTEL_RECEIVER_ENABLED_HANDLERS -   - - enabledOtelMetricsRules Enabled metric rules for OTLP handler. SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES -   receiver-zipkin default A receiver for Zipkin traces. - -    - - sampleRate The sample rate precision is 1/10000, should be between 0 and 10000 SW_ZIPKIN_SAMPLE_RATE 10000   - - searchableTracesTags Defines a set of span tag keys which are searchable. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_ZIPKIN_SEARCHABLE_TAG_KEYS http.method   - - enableHttpCollector Enable Http Collector. SW_ZIPKIN_HTTP_COLLECTOR_ENABLED true   - - restHost Binding IP of RESTful services. SW_RECEIVER_ZIPKIN_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_RECEIVER_ZIPKIN_REST_PORT 9411   - - restContextPath Web context path of RESTful services. SW_RECEIVER_ZIPKIN_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_RECEIVER_ZIPKIN_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_RECEIVER_ZIPKIN_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_RECEIVER_ZIPKIN_REST_QUEUE_SIZE 0   - - enableKafkaCollector Enable Kafka Collector. SW_ZIPKIN_KAFKA_COLLECTOR_ENABLED false   - - kafkaBootstrapServers Kafka ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG. SW_ZIPKIN_KAFKA_SERVERS localhost:9092   - - kafkaGroupId Kafka ConsumerConfig.GROUP_ID_CONFIG. SW_ZIPKIN_KAFKA_GROUP_ID zipkin   - - kafkaTopic Kafka Topics. SW_ZIPKIN_KAFKA_TOPIC zipkin   - - kafkaConsumerConfig Kafka consumer config, JSON format as Properties. If it contains the same key with above, would override. SW_ZIPKIN_KAFKA_CONSUMER_CONFIG \u0026ldquo;{\u0026quot;auto.offset.reset\u0026quot;:\u0026quot;earliest\u0026quot;,\u0026quot;enable.auto.commit\u0026quot;:true}\u0026rdquo;   - - kafkaConsumers The number of consumers to create. SW_ZIPKIN_KAFKA_CONSUMERS 1   - - kafkaHandlerThreadPoolSize Pool size of Kafka message handler executor. SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_SIZE CPU core * 2   - - kafkaHandlerThreadPoolQueueSize Queue size of Kafka message handler executor. SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE 10000   kafka-fetcher default Read SkyWalking\u0026rsquo;s native metrics/logs/traces through Kafka server. - -    - - bootstrapServers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_KAFKA_FETCHER_SERVERS localhost:9092   - - namespace Namespace aims to isolate multi OAP cluster when using the same Kafka cluster. If you set a namespace for Kafka fetcher, OAP will add a prefix to topic name. You should also set namespace in agent.config. The property is named plugin.kafka.namespace. SW_NAMESPACE -   - - groupId A unique string that identifies the consumer group to which this consumer belongs. - skywalking-consumer   - - partitions The number of partitions for the topic being created. SW_KAFKA_FETCHER_PARTITIONS 3   - - consumers The number of consumers to create. SW_KAFKA_FETCHER_CONSUMERS 1   - - enableNativeProtoLog Enables fetching and handling native proto log data. SW_KAFKA_FETCHER_ENABLE_NATIVE_PROTO_LOG true   - - enableNativeJsonLog Enables fetching and handling native json log data. SW_KAFKA_FETCHER_ENABLE_NATIVE_JSON_LOG true   - - replicationFactor The replication factor for each partition in the topic being created. SW_KAFKA_FETCHER_PARTITIONS_FACTOR 2   - - kafkaHandlerThreadPoolSize Pool size of Kafka message handler executor. SW_KAFKA_HANDLER_THREAD_POOL_SIZE CPU core * 2   - - kafkaHandlerThreadPoolQueueSize Queue size of Kafka message handler executor. SW_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE 10000   - - topicNameOfMeters Kafka topic name for meter system data. - skywalking-meters   - - topicNameOfMetrics Kafka topic name for JVM metrics data. - skywalking-metrics   - - topicNameOfProfiling Kafka topic name for profiling data. - skywalking-profilings   - - topicNameOfTracingSegments Kafka topic name for tracing data. - skywalking-segments   - - topicNameOfManagements Kafka topic name for service instance reporting and registration. - skywalking-managements   - - topicNameOfLogs Kafka topic name for native proto log data. - skywalking-logs   - - topicNameOfJsonLogs Kafka topic name for native json log data. - skywalking-logs-json   receiver-browser default gRPC services that accept browser performance data and error log. - - -   - - sampleRate Sampling rate for receiving trace. Precise to 1/10000. 10000 means sampling rate of 100% by default. SW_RECEIVER_BROWSER_SAMPLE_RATE 10000   query graphql - GraphQL query implementation. -    - - enableLogTestTool Enable the log testing API to test the LAL. NOTE: This API evaluates untrusted code on the OAP server. A malicious script can do significant damage (steal keys and secrets, remove files and directories, install malware, etc). As such, please enable this API only when you completely trust your users. SW_QUERY_GRAPHQL_ENABLE_LOG_TEST_TOOL false   - - maxQueryComplexity Maximum complexity allowed for the GraphQL query that can be used to abort a query if the total number of data fields queried exceeds the defined threshold. SW_QUERY_MAX_QUERY_COMPLEXITY 3000   - - enableUpdateUITemplate Allow user add,disable and update UI template. SW_ENABLE_UPDATE_UI_TEMPLATE false   - - enableOnDemandPodLog Ondemand Pod log: fetch the Pod logs on users' demand, the logs are fetched and displayed in real time, and are not persisted in any kind. This is helpful when users want to do some experiments and monitor the logs and see what\u0026rsquo;s happing inside the service. Note: if you print secrets in the logs, they are also visible to the UI, so for the sake of security, this feature is disabled by default, please set this configuration to enable the feature manually. SW_ENABLE_ON_DEMAND_POD_LOG false   query-zipkin default - This module is for Zipkin query API and support zipkin-lens UI -    - - restHost Binding IP of RESTful services. SW_QUERY_ZIPKIN_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_QUERY_ZIPKIN_REST_PORT 9412   - - restContextPath Web context path of RESTful services. SW_QUERY_ZIPKIN_REST_CONTEXT_PATH zipkin   - - restMaxThreads Maximum thread number of RESTful services. SW_QUERY_ZIPKIN_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_QUERY_ZIPKIN_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_QUERY_ZIPKIN_REST_QUEUE_SIZE 0   - - lookback Default look back for traces and autocompleteTags, 1 day in millis SW_QUERY_ZIPKIN_LOOKBACK 86400000   - - namesMaxAge The Cache-Control max-age (seconds) for serviceNames, remoteServiceNames and spanNames SW_QUERY_ZIPKIN_NAMES_MAX_AGE 300   - - uiQueryLimit Default traces query max size SW_QUERY_ZIPKIN_UI_QUERY_LIMIT 10   - - uiDefaultLookback Default look back on the UI for search traces, 15 minutes in millis SW_QUERY_ZIPKIN_UI_DEFAULT_LOOKBACK 900000   promql default - This module is for PromQL API. -    - - restHost Binding IP of RESTful services. SW_PROMQL_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_PROMQL_REST_PORT 9090   - - restContextPath Web context path of RESTful services. SW_PROMQL_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_PROMQL_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_PROMQL_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_PROMQL_REST_QUEUE_SIZE 0   - - buildInfoVersion Mock version for API buildInfo SW_PROMQL_BUILD_INFO_VERSION 2.45.0   - - buildInfoRevision Mock revision for API buildInfo SW_PROMQL_BUILD_INFO_REVISION    - - buildInfoBranch Mock branch for API buildInfo SW_PROMQL_BUILD_INFO_BRANCH    - - buildInfoBuildUser Mock build user for API buildInfo SW_PROMQL_BUILD_INFO_BUILD_USER    - - buildInfoBuildDate Mock build date for API buildInfo SW_PROMQL_BUILD_INFO_BUILD_DATE    - - buildInfoGoVersion Mock go version for API buildInfo SW_PROMQL_BUILD_INFO_GO_VERSION    alarm default - Read alarm doc for more details. -    telemetry - - Read telemetry doc for more details. -    - none - No op implementation. -    - prometheus host Binding host for Prometheus server fetching data. SW_TELEMETRY_PROMETHEUS_HOST 0.0.0.0   - - port Binding port for Prometheus server fetching data. SW_TELEMETRY_PROMETHEUS_PORT 1234   configuration - - Read dynamic configuration doc for more details. -    - grpc host DCS server binding hostname. SW_DCS_SERVER_HOST -   - - port DCS server binding port. SW_DCS_SERVER_PORT 80   - - clusterName Cluster name when reading the latest configuration from DSC server. SW_DCS_CLUSTER_NAME SkyWalking   - - period The period of reading data from DSC server by the OAP (in seconds). SW_DCS_PERIOD 20   - - maxInboundMessageSize The max inbound message size of gRPC. SW_DCS_MAX_INBOUND_MESSAGE_SIZE 4194304   - apollo apolloMeta apollo.meta in Apollo. SW_CONFIG_APOLLO http://localhost:8080   - - apolloCluster apollo.cluster in Apollo. SW_CONFIG_APOLLO_CLUSTER default   - - apolloEnv env in Apollo. SW_CONFIG_APOLLO_ENV -   - - appId app.id in Apollo. SW_CONFIG_APOLLO_APP_ID skywalking   - zookeeper namespace The namespace (represented by root path) that isolates the configurations in the Zookeeper. SW_CONFIG_ZK_NAMESPACE /, root path   - - hostPort Hosts and ports of Zookeeper Cluster. SW_CONFIG_ZK_HOST_PORT localhost:2181   - - baseSleepTimeMs The period of Zookeeper client between two retries (in milliseconds). SW_CONFIG_ZK_BASE_SLEEP_TIME_MS 1000   - - maxRetries The maximum retry time. SW_CONFIG_ZK_MAX_RETRIES 3   - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - etcd endpoints Hosts and ports for etcd cluster (separated by commas if multiple). SW_CONFIG_ETCD_ENDPOINTS http://localhost:2379   - - namespace Namespace for SkyWalking cluster. SW_CONFIG_ETCD_NAMESPACE /skywalking   - - authentication Indicates whether there is authentication. SW_CONFIG_ETCD_AUTHENTICATION false   - - user Etcd auth username. SW_CONFIG_ETCD_USER    - - password Etcd auth password. SW_CONFIG_ETCD_PASSWORD    - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - consul hostPort Hosts and ports for Consul cluster. SW_CONFIG_CONSUL_HOST_AND_PORTS localhost:8500   - - aclToken ACL Token of Consul. Empty string means without ACL token. SW_CONFIG_CONSUL_ACL_TOKEN -   - - period The period of data sync (in seconds). SW_CONFIG_CONSUL_PERIOD 60   - k8s-configmap namespace Deployment namespace of the config map. SW_CLUSTER_K8S_NAMESPACE default   - - labelSelector Labels for locating configmap. SW_CLUSTER_K8S_LABEL app=collector,release=skywalking   - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - nacos serverAddr Nacos Server Host. SW_CONFIG_NACOS_SERVER_ADDR 127.0.0.1   - - port Nacos Server Port. SW_CONFIG_NACOS_SERVER_PORT 8848   - - group Nacos Configuration namespace. SW_CONFIG_NACOS_SERVER_NAMESPACE -   - - period The period of data sync (in seconds). SW_CONFIG_CONFIG_NACOS_PERIOD 60   - - username Nacos Auth username. SW_CONFIG_NACOS_USERNAME -   - - password Nacos Auth password. SW_CONFIG_NACOS_PASSWORD -   - - accessKey Nacos Auth accessKey. SW_CONFIG_NACOS_ACCESSKEY -   - - secretKey Nacos Auth secretKey. SW_CONFIG_NACOS_SECRETKEY -   exporter default enableGRPCMetrics Enable gRPC metrics exporter. SW_EXPORTER_ENABLE_GRPC_METRICS false   - - gRPCTargetHost The host of target gRPC server for receiving export data SW_EXPORTER_GRPC_HOST 127.0.0.1   - - gRPCTargetPort The port of target gRPC server for receiving export data. SW_EXPORTER_GRPC_PORT 9870   - - enableKafkaTrace Enable Kafka trace exporter. SW_EXPORTER_ENABLE_KAFKA_TRACE false   - - enableKafkaLog Enable Kafka log exporter. SW_EXPORTER_ENABLE_KAFKA_LOG false   - - kafkaBootstrapServers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_EXPORTER_KAFKA_SERVERS localhost:9092   - - kafkaProducerConfig Kafka producer config, JSON format as Properties. SW_EXPORTER_KAFKA_PRODUCER_CONFIG -   - - kafkaTopicTrace Kafka topic name for trace. SW_EXPORTER_KAFKA_TOPIC_TRACE skywalking-export-trace   - - kafkaTopicLog Kafka topic name for log. SW_EXPORTER_KAFKA_TOPIC_LOG skywalking-export-log   - - exportErrorStatusTraceOnly Export error status trace segments through the Kafka channel. SW_EXPORTER_KAFKA_TRACE_FILTER_ERROR false   health-checker default checkIntervalSeconds The period of checking OAP internal health status (in seconds). SW_HEALTH_CHECKER_INTERVAL_SECONDS 5   debugging-query default       - - keywords4MaskingSecretsOfConfig Include the list of keywords to filter configurations including secrets. Separate keywords by a comma. SW_DEBUGGING_QUERY_KEYWORDS_FOR_MASKING_SECRETS user,password,token,accessKey,secretKey,authentication   configuration-discovery default disableMessageDigest If true, agent receives the latest configuration every time, even without making any changes. By default, OAP uses the SHA512 message digest mechanism to detect changes in configuration. SW_DISABLE_MESSAGE_DIGEST false   receiver-event default gRPC services that handle events data. - -    aws-firehose-receiver default host Binding IP of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_HOST 0.0.0.0   - - port Binding port of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_PORT 12801   - - contextPath Context path of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_CONTEXT_PATH /   - - maxThreads Max Thtread number of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_MAX_THREADS 200   - - idleTimeOut Idle timeout of a connection for keep-alive. SW_RECEIVER_AWS_FIREHOSE_HTTP_IDLE_TIME_OUT 30000   - - acceptQueueSize Maximum allowed number of open connections SW_RECEIVER_AWS_FIREHOSE_HTTP_ACCEPT_QUEUE_SIZE 0   - - maxRequestHeaderSize Maximum length of all headers in an HTTP/1 response SW_RECEIVER_AWS_FIREHOSE_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - firehoseAccessKey The AccessKey of AWS firhose SW_RECEIVER_AWS_FIREHOSE_ACCESS_KEY    - - enableTLS Indicate if enable HTTPS for the server SW_RECEIVER_AWS_FIREHOSE_HTTP_ENABLE_TLS false   - - tlsKeyPath TLS key path SW_RECEIVER_AWS_FIREHOSE_HTTP_TLS_KEY_PATH    - - tlsCertChainPath TLS certificate chain path SW_RECEIVER_AWS_FIREHOSE_HTTP_TLS_CERT_CHAIN_PATH    ai-pipeline default       - - uriRecognitionServerAddr The address of the URI recognition server. SW_AI_PIPELINE_URI_RECOGNITION_SERVER_ADDR -   - - uriRecognitionServerPort The port of the URI recognition server. SW_AI_PIPELINE_URI_RECOGNITION_SERVER_PORT 17128    Note ¹ System Environment Variable name could be declared and changed in application.yml. The names listed here are simply provided in the default application.yml file.\n","title":"Configuration Vocabulary","url":"/docs/main/next/en/setup/backend/configuration-vocabulary/"},{"content":"Configuration Vocabulary The Configuration Vocabulary lists all available configurations provided by application.yml.\n   Module Provider Settings Value(s) and Explanation System Environment Variable¹ Default     core default role Option values: Mixed/Receiver/Aggregator. Receiver mode OAP opens the service to the agents, then analyzes and aggregates the results, and forwards the results for distributed aggregation. Aggregator mode OAP receives data from Mixer and Receiver role OAP nodes, and performs 2nd level aggregation. Mixer means both Receiver and Aggregator. SW_CORE_ROLE Mixed   - - restHost Binding IP of RESTful services. Services include GraphQL query and HTTP data report. SW_CORE_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_CORE_REST_PORT 12800   - - restContextPath Web context path of RESTful services. SW_CORE_REST_CONTEXT_PATH /   - - restMinThreads Minimum thread number of RESTful services. SW_CORE_REST_JETTY_MIN_THREADS 1   - - restMaxThreads Maximum thread number of RESTful services. SW_CORE_REST_JETTY_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_CORE_REST_JETTY_IDLE_TIMEOUT 30000   - - restAcceptQueueSize ServerSocketChannel Backlog of RESTful services. SW_CORE_REST_JETTY_QUEUE_SIZE 0   - - httpMaxRequestHeaderSize Maximum request header size accepted. SW_CORE_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - gRPCHost Binding IP of gRPC services, including gRPC data report and internal communication among OAP nodes. SW_CORE_GRPC_HOST 0.0.0.0   - - gRPCPort Binding port of gRPC services. SW_CORE_GRPC_PORT 11800   - - gRPCSslEnabled Activates SSL for gRPC services. SW_CORE_GRPC_SSL_ENABLED false   - - gRPCSslKeyPath File path of gRPC SSL key. SW_CORE_GRPC_SSL_KEY_PATH -   - - gRPCSslCertChainPath File path of gRPC SSL cert chain. SW_CORE_GRPC_SSL_CERT_CHAIN_PATH -   - - gRPCSslTrustedCAPath File path of gRPC trusted CA. SW_CORE_GRPC_SSL_TRUSTED_CA_PATH -   - - downsampling Activated level of down sampling aggregation.  Hour,Day   - - persistentPeriod Execution period of the persistent timer (in seconds).  25   - - enableDataKeeperExecutor Controller of TTL scheduler. Once disabled, TTL wouldn\u0026rsquo;t work. SW_CORE_ENABLE_DATA_KEEPER_EXECUTOR true   - - dataKeeperExecutePeriod Execution period of TTL scheduler (in minutes). Execution doesn\u0026rsquo;t mean deleting data. The storage provider (e.g. ElasticSearch storage) could override this. SW_CORE_DATA_KEEPER_EXECUTE_PERIOD 5   - - recordDataTTL The lifecycle of record data (in days). Record data includes traces, top N sample records, and logs. Minimum value is 2. SW_CORE_RECORD_DATA_TTL 3   - - metricsDataTTL The lifecycle of metrics data (in days), including metadata. We recommend setting metricsDataTTL \u0026gt;= recordDataTTL. Minimum value is 2. SW_CORE_METRICS_DATA_TTL 7   - - l1FlushPeriod The period of L1 aggregation flush to L2 aggregation (in milliseconds). SW_CORE_L1_AGGREGATION_FLUSH_PERIOD 500   - - storageSessionTimeout The threshold of session time (in milliseconds). Default value is 70000. SW_CORE_STORAGE_SESSION_TIMEOUT 70000   - - persistentPeriod The period of doing data persistence. Unit is second.Default value is 25s SW_CORE_PERSISTENT_PERIOD 25   - - enableDatabaseSession Cache metrics data for 1 minute to reduce database queries, and if the OAP cluster changes within that minute. SW_CORE_ENABLE_DATABASE_SESSION true   - - topNReportPeriod The execution period (in minutes) of top N sampler, which saves sampled data into the storage. SW_CORE_TOPN_REPORT_PERIOD 10   - - activeExtraModelColumns Appends entity names (e.g. service names) into metrics storage entities. SW_CORE_ACTIVE_EXTRA_MODEL_COLUMNS false   - - serviceNameMaxLength Maximum length limit of service names. SW_SERVICE_NAME_MAX_LENGTH 70   - - instanceNameMaxLength Maximum length limit of service instance names. The maximum length of service + instance names should be less than 200. SW_INSTANCE_NAME_MAX_LENGTH 70   - - endpointNameMaxLength Maximum length limit of endpoint names. The maximum length of service + endpoint names should be less than 240. SW_ENDPOINT_NAME_MAX_LENGTH 150   - - searchableTracesTags Defines a set of span tag keys which are searchable through GraphQL. Multiple values are separated by commas. SW_SEARCHABLE_TAG_KEYS http.method,status_code,db.type,db.instance,mq.queue,mq.topic,mq.broker   - - searchableLogsTags Defines a set of log tag keys which are searchable through GraphQL. Multiple values are separated by commas. SW_SEARCHABLE_LOGS_TAG_KEYS level   - - searchableAlarmTags Defines a set of alarm tag keys which are searchable through GraphQL. Multiple values are separated by commas. SW_SEARCHABLE_ALARM_TAG_KEYS level   - - gRPCThreadPoolSize Pool size of gRPC server. SW_CORE_GRPC_THREAD_POOL_SIZE CPU core * 4   - - gRPCThreadPoolQueueSize Queue size of gRPC server. SW_CORE_GRPC_POOL_QUEUE_SIZE 10000   - - maxConcurrentCallsPerConnection The maximum number of concurrent calls permitted for each incoming connection. Defaults to no limit. SW_CORE_GRPC_MAX_CONCURRENT_CALL -   - - maxMessageSize Sets the maximum message size allowed to be received on the server. Empty means 4 MiB. SW_CORE_GRPC_MAX_MESSAGE_SIZE 4M(based on Netty)   - - remoteTimeout Timeout for cluster internal communication (in seconds). - 20   - - maxSizeOfNetworkAddressAlias The maximum size of network address detected in the system being monitored. - 1_000_000   - - maxPageSizeOfQueryProfileSnapshot The maximum size for snapshot analysis in an OAP query. - 500   - - maxSizeOfAnalyzeProfileSnapshot The maximum number of snapshots analyzed by the OAP. - 12000   - - prepareThreads The number of threads used to prepare metrics data to the storage. SW_CORE_PREPARE_THREADS 2   - - enableEndpointNameGroupingByOpenapi Automatically groups endpoints by the given OpenAPI definitions. SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPAENAPI true   - - maxDurationOfAnalyzeEBPFProfiling The maximum duration(in minute) of analyze the eBPF profiling data. - 10   - - maxDurationOfQueryEBPFProfilingData The maximum duration(in second) of query the eBPF profiling data from database. - 30   - - maxThreadCountOfQueryEBPFProfilingData The maximum thread count of query the eBPF profiling data from database. - System CPU core size   cluster standalone - Standalone is not suitable for running on a single node running. No configuration available. - -   - zookeeper namespace The namespace, represented by root path, isolates the configurations in Zookeeper. SW_NAMESPACE /, root path   - - hostPort Hosts and ports of Zookeeper Cluster. SW_CLUSTER_ZK_HOST_PORT localhost:2181   - - baseSleepTimeMs The period of Zookeeper client between two retries (in milliseconds). SW_CLUSTER_ZK_SLEEP_TIME 1000   - - maxRetries The maximum retry time. SW_CLUSTER_ZK_MAX_RETRIES 3   - - enableACL Opens ACL using schema and expression. SW_ZK_ENABLE_ACL false   - - schema Schema for the authorization. SW_ZK_SCHEMA digest   - - expression Expression for the authorization. SW_ZK_EXPRESSION skywalking:skywalking   - - internalComHost The hostname registered in Zookeeper for the internal communication of OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Zookeeper for the internal communication of OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - kubernetes namespace Namespace deployed by SkyWalking in k8s. SW_CLUSTER_K8S_NAMESPACE default   - - labelSelector Labels used for filtering OAP deployment in k8s. SW_CLUSTER_K8S_LABEL app=collector,release=skywalking   - - uidEnvName Environment variable name for reading uid. SW_CLUSTER_K8S_UID SKYWALKING_COLLECTOR_UID   - consul serviceName Service name for SkyWalking cluster. SW_SERVICE_NAME SkyWalking_OAP_Cluster   - - hostPort Hosts and ports for Consul cluster. SW_CLUSTER_CONSUL_HOST_PORT localhost:8500   - - aclToken ACL Token of Consul. Empty string means without ALC token. SW_CLUSTER_CONSUL_ACLTOKEN -   - - internalComHost The hostname registered in Consul for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Consul for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - etcd serviceName Service name for SkyWalking cluster. SW_CLUSTER_ETCD_SERVICE_NAME SkyWalking_OAP_Cluster   - - endpoints Hosts and ports for etcd cluster. SW_CLUSTER_ETCD_ENDPOINTS localhost:2379   - - namespace Namespace for SkyWalking cluster. SW_CLUSTER_ETCD_NAMESPACE /skywalking   - - authentication Indicates whether there is authentication. SW_CLUSTER_ETCD_AUTHENTICATION false   - - user Etcd auth username. SW_CLUSTER_ETCD_USER    - - password Etcd auth password. SW_CLUSTER_ETCD_PASSWORD    - - internalComHost The hostname registered in etcd for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in etcd for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - Nacos serviceName Service name for SkyWalking cluster. SW_SERVICE_NAME SkyWalking_OAP_Cluster   - - hostPort Hosts and ports for Nacos cluster. SW_CLUSTER_NACOS_HOST_PORT localhost:8848   - - namespace Namespace used by SkyWalking node coordination. SW_CLUSTER_NACOS_NAMESPACE public   - - internalComHost The hostname registered in Nacos for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Nacos for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - - username Nacos Auth username. SW_CLUSTER_NACOS_USERNAME -   - - password Nacos Auth password. SW_CLUSTER_NACOS_PASSWORD -   - - accessKey Nacos Auth accessKey. SW_CLUSTER_NACOS_ACCESSKEY -   - - secretKey Nacos Auth secretKey. SW_CLUSTER_NACOS_SECRETKEY -   storage elasticsearch - ElasticSearch (and OpenSearch) storage implementation. - -   - - namespace Prefix of indexes created and used by SkyWalking. SW_NAMESPACE -   - - clusterNodes ElasticSearch cluster nodes for client connection. SW_STORAGE_ES_CLUSTER_NODES localhost   - - protocol HTTP or HTTPs. SW_STORAGE_ES_HTTP_PROTOCOL HTTP   - - connectTimeout Connect timeout of ElasticSearch client (in milliseconds). SW_STORAGE_ES_CONNECT_TIMEOUT 3000   - - socketTimeout Socket timeout of ElasticSearch client (in milliseconds). SW_STORAGE_ES_SOCKET_TIMEOUT 30000   - - responseTimeout Response timeout of ElasticSearch client (in milliseconds), 0 disables the timeout. SW_STORAGE_ES_RESPONSE_TIMEOUT 1500   - - numHttpClientThread The number of threads for the underlying HTTP client to perform socket I/O. If the value is \u0026lt;= 0, the number of available processors will be used. SW_STORAGE_ES_NUM_HTTP_CLIENT_THREAD 0   - - user Username of ElasticSearch cluster. SW_ES_USER -   - - password Password of ElasticSearch cluster. SW_ES_PASSWORD -   - - trustStorePath Trust JKS file path. Only works when username and password are enabled. SW_STORAGE_ES_SSL_JKS_PATH -   - - trustStorePass Trust JKS file password. Only works when username and password are enabled. SW_STORAGE_ES_SSL_JKS_PASS -   - - secretsManagementFile Secrets management file in the properties format, including username and password, which are managed by a 3rd party tool. Capable of being updated them at runtime. SW_ES_SECRETS_MANAGEMENT_FILE -   - - dayStep Represents the number of days in the one-minute/hour/day index. SW_STORAGE_DAY_STEP 1   - - indexShardsNumber Shard number of new indexes. SW_STORAGE_ES_INDEX_SHARDS_NUMBER 1   - - indexReplicasNumber Replicas number of new indexes. SW_STORAGE_ES_INDEX_REPLICAS_NUMBER 0   - - superDatasetDayStep Represents the number of days in the super size dataset record index. Default value is the same as dayStep when the value is less than 0. SW_SUPERDATASET_STORAGE_DAY_STEP -1   - - superDatasetIndexShardsFactor Super dataset is defined in the code (e.g. trace segments). This factor provides more shards for the super dataset: shards number = indexShardsNumber * superDatasetIndexShardsFactor. This factor also affects Zipkin and Jaeger traces. SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR 5   - - superDatasetIndexReplicasNumber Represents the replicas number in the super size dataset record index. SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER 0   - - indexTemplateOrder The order of index template. SW_STORAGE_ES_INDEX_TEMPLATE_ORDER 0   - - bulkActions Async bulk size of the record data batch execution. SW_STORAGE_ES_BULK_ACTIONS 5000   - - flushInterval Period of flush (in seconds). Does not matter whether bulkActions is reached or not. INT(flushInterval * 2/3) is used for index refresh period. SW_STORAGE_ES_FLUSH_INTERVAL 15 (index refresh period = 10)   - - concurrentRequests The number of concurrent requests allowed to be executed. SW_STORAGE_ES_CONCURRENT_REQUESTS 2   - - resultWindowMaxSize The maximum size of dataset when the OAP loads cache, such as network aliases. SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE 10000   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_ES_QUERY_MAX_SIZE 10000   - - scrollingBatchSize The batch size of metadata per iteration when metadataQueryMaxSize or resultWindowMaxSize is too large to be retrieved in a single query. SW_STORAGE_ES_SCROLLING_BATCH_SIZE 5000   - - segmentQueryMaxSize The maximum size of trace segments per query. SW_STORAGE_ES_QUERY_SEGMENT_SIZE 200   - - profileTaskQueryMaxSize The maximum size of profile task per query. SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE 200   - - advanced All settings of ElasticSearch index creation. The value should be in JSON format. SW_STORAGE_ES_ADVANCED -   - h2 - H2 storage is designed for demonstration and running in short term (i.e. 1-2 hours) only. - -   - - driver H2 JDBC driver. SW_STORAGE_H2_DRIVER org.h2.jdbcx.JdbcDataSource   - - url H2 connection URL. Defaults to H2 memory mode. SW_STORAGE_H2_URL jdbc:h2:mem:skywalking-oap-db   - - user Username of H2 database. SW_STORAGE_H2_USER sa   - - password Password of H2 database. - -   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_H2_QUERY_MAX_SIZE 5000   - - maxSizeOfArrayColumn Some entities (e.g. trace segments) include the logic column with multiple values. In H2, we use multiple physical columns to host the values: e.g. change column_a with values [1,2,3,4,5] to column_a_0 = 1, column_a_1 = 2, column_a_2 = 3 , column_a_3 = 4, column_a_4 = 5. SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN 20   - - numOfSearchableValuesPerTag In a trace segment, this includes multiple spans with multiple tags. Different spans may have the same tag key, e.g. multiple HTTP exit spans all have their own http.method tags. This configuration sets the limit on the maximum number of values for the same tag key. SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG 2   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 100   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 1   - mysql - MySQL Storage. The MySQL JDBC Driver is not in the dist. Please copy it into the oap-lib folder manually. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfArrayColumn Some entities (e.g. trace segments) include the logic column with multiple values. In MySQL, we use multiple physical columns to host the values, e.g. change column_a with values [1,2,3,4,5] to column_a_0 = 1, column_a_1 = 2, column_a_2 = 3 , column_a_3 = 4, column_a_4 = 5. SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN 20   - - numOfSearchableValuesPerTag In a trace segment, this includes multiple spans with multiple tags. Different spans may have same tag key, e.g. multiple HTTP exit spans all have their own http.method tags. This configuration sets the limit on the maximum number of values for the same tag key. SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG 2   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - postgresql - PostgreSQL storage. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfArrayColumn Some entities (e.g. trace segments) include the logic column with multiple values. In PostgreSQL, we use multiple physical columns to host the values, e.g. change column_a with values [1,2,3,4,5] to column_a_0 = 1, column_a_1 = 2, column_a_2 = 3 , column_a_3 = 4, column_a_4 = 5 SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN 20   - - numOfSearchableValuesPerTag In a trace segment, this includes multiple spans with multiple tags. Different spans may have same tag key, e.g. multiple HTTP exit spans all have their own http.method tags. This configuration sets the limit on the maximum number of values for the same tag key. SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG 2   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - influxdb - InfluxDB storage. - -   - - url InfluxDB connection URL. SW_STORAGE_INFLUXDB_URL http://localhost:8086   - - user User name of InfluxDB. SW_STORAGE_INFLUXDB_USER root   - - password Password of InfluxDB. SW_STORAGE_INFLUXDB_PASSWORD -   - - database Database of InfluxDB. SW_STORAGE_INFLUXDB_DATABASE skywalking   - - actions The number of actions to collect. SW_STORAGE_INFLUXDB_ACTIONS 1000   - - duration The maximum waiting time (in milliseconds). SW_STORAGE_INFLUXDB_DURATION 1000   - - batchEnabled If true, write points with batch API. SW_STORAGE_INFLUXDB_BATCH_ENABLED true   - - fetchTaskLogMaxSize The maximum number of fetch task log in a request. SW_STORAGE_INFLUXDB_FETCH_TASK_LOG_MAX_SIZE 5000   - - connectionResponseFormat The response format of connection to influxDB. It can only be MSGPACK or JSON. SW_STORAGE_INFLUXDB_CONNECTION_RESPONSE_FORMAT MSGPACK   - iotdb - IoTDB storage. - -   - - host The host of IoTDB server. SW_STORAGE_IOTDB_HOST 127.0.0.1   - - rpcPort The port listened by IoTDB server. SW_STORAGE_IOTDB_RPC_PORT 6667   - - username The username of IoTDB SW_STORAGE_IOTDB_USERNAME root   - - password The password of IoTDB SW_STORAGE_IOTDB_PASSWORD root   - - storageGroup The path of Storage Group and it must start with root. SW_STORAGE_IOTDB_STORAGE_GROUP root.skywalking   - - sessionPoolSize The connection pool size for IoTDB. If the value is 0, the size of SessionPool will be 2 * CPU_Cores SW_STORAGE_IOTDB_SESSIONPOOL_SIZE 8   - - fetchTaskLogMaxSize the max number of fetch task log in a request SW_STORAGE_IOTDB_FETCH_TASK_LOG_MAX_SIZE 1000   agent-analyzer default Agent Analyzer. SW_AGENT_ANALYZER default    - - traceSamplingPolicySettingsFile The sampling policy including sampling rate and the threshold of trace segment latency can be configured by the traceSamplingPolicySettingsFile file. SW_TRACE_SAMPLING_POLICY_SETTINGS_FILE trace-sampling-policy-settings.yml   - - slowDBAccessThreshold The slow database access threshold (in milliseconds). SW_SLOW_DB_THRESHOLD default:200,mongodb:100   - - forceSampleErrorSegment When sampling mechanism is activated, this config samples the error status segment and ignores the sampling rate. SW_FORCE_SAMPLE_ERROR_SEGMENT true   - - segmentStatusAnalysisStrategy Determines the final segment status from span status. Available values are FROM_SPAN_STATUS , FROM_ENTRY_SPAN, and FROM_FIRST_SPAN. FROM_SPAN_STATUS indicates that the segment status would be error if any span has an error status. FROM_ENTRY_SPAN means that the segment status would only be determined by the status of entry spans. FROM_FIRST_SPAN means that the segment status would only be determined by the status of the first span. SW_SEGMENT_STATUS_ANALYSIS_STRATEGY FROM_SPAN_STATUS   - - noUpstreamRealAddressAgents Exit spans with the component in the list would not generate client-side instance relation metrics, since some tracing plugins (e.g. Nginx-LUA and Envoy) can\u0026rsquo;t collect the real peer IP address. SW_NO_UPSTREAM_REAL_ADDRESS 6000,9000   - - meterAnalyzerActiveFiles Indicates which files could be instrumented and analyzed. Multiple files are split by \u0026ldquo;,\u0026rdquo;. SW_METER_ANALYZER_ACTIVE_FILES    receiver-sharing-server default Sharing server provides new gRPC and restful servers for data collection. Ana designates that servers in the core module are to be used for internal communication only. - -    - - restHost Binding IP of RESTful services. Services include GraphQL query and HTTP data report. SW_RECEIVER_SHARING_REST_HOST -   - - restPort Binding port of RESTful services. SW_RECEIVER_SHARING_REST_PORT -   - - restContextPath Web context path of RESTful services. SW_RECEIVER_SHARING_REST_CONTEXT_PATH -   - - restMinThreads Minimum thread number of RESTful services. SW_RECEIVER_SHARING_JETTY_MIN_THREADS 1   - - restMaxThreads Maximum thread number of RESTful services. SW_RECEIVER_SHARING_JETTY_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_RECEIVER_SHARING_JETTY_IDLE_TIMEOUT 30000   - - restAcceptQueueSize ServerSocketChannel backlog of RESTful services. SW_RECEIVER_SHARING_JETTY_QUEUE_SIZE 0   - - httpMaxRequestHeaderSize Maximum request header size accepted. SW_RECEIVER_SHARING_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - gRPCHost Binding IP of gRPC services. Services include gRPC data report and internal communication among OAP nodes. SW_RECEIVER_GRPC_HOST 0.0.0.0. Not Activated   - - gRPCPort Binding port of gRPC services. SW_RECEIVER_GRPC_PORT Not Activated   - - gRPCThreadPoolSize Pool size of gRPC server. SW_RECEIVER_GRPC_THREAD_POOL_SIZE CPU core * 4   - - gRPCThreadPoolQueueSize Queue size of gRPC server. SW_RECEIVER_GRPC_POOL_QUEUE_SIZE 10000   - - gRPCSslEnabled Activates SSL for gRPC services. SW_RECEIVER_GRPC_SSL_ENABLED false   - - gRPCSslKeyPath File path of gRPC SSL key. SW_RECEIVER_GRPC_SSL_KEY_PATH -   - - gRPCSslCertChainPath File path of gRPC SSL cert chain. SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH -   - - maxConcurrentCallsPerConnection The maximum number of concurrent calls permitted for each incoming connection. Defaults to no limit. SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL -   - - authentication The token text for authentication. Works for gRPC connection only. Once this is set, the client is required to use the same token. SW_AUTHENTICATION -   log-analyzer default Log Analyzer. SW_LOG_ANALYZER default    - - lalFiles The LAL configuration file names (without file extension) to be activated. Read LAL for more details. SW_LOG_LAL_FILES default   - - malFiles The MAL configuration file names (without file extension) to be activated. Read LAL for more details. SW_LOG_MAL_FILES \u0026quot;\u0026quot;   event-analyzer default Event Analyzer. SW_EVENT_ANALYZER default    receiver-register default gRPC and HTTPRestful services that provide service, service instance and endpoint register. - -    receiver-trace default gRPC and HTTPRestful services that accept SkyWalking format traces. - -    receiver-jvm default gRPC services that accept JVM metrics data. - -    receiver-clr default gRPC services that accept .Net CLR metrics data. - -    receiver-profile default gRPC services that accept profile task status and snapshot reporter. - -    receiver-zabbix default TCP receiver accepts Zabbix format metrics. - -    - - port Exported TCP port. Zabbix agent could connect and transport data. SW_RECEIVER_ZABBIX_PORT 10051   - - host Binds to host. SW_RECEIVER_ZABBIX_HOST 0.0.0.0   - - activeFiles Enables config when agent request is received. SW_RECEIVER_ZABBIX_ACTIVE_FILES agent   service-mesh default gRPC services that accept data from inbound mesh probes. - -    envoy-metric default Envoy metrics_service and ALS(access log service) are supported by this receiver. The OAL script supports all GAUGE type metrics. - -    - - acceptMetricsService Starts Envoy Metrics Service analysis. SW_ENVOY_METRIC_SERVICE true   - - alsHTTPAnalysis Starts Envoy HTTP Access Log Service analysis. Value = k8s-mesh means starting the analysis. SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS -   - - alsTCPAnalysis Starts Envoy TCP Access Log Service analysis. Value = k8s-mesh means starting the analysis. SW_ENVOY_METRIC_ALS_TCP_ANALYSIS -   - - k8sServiceNameRule k8sServiceNameRule allows you to customize the service name in ALS via Kubernetes metadata. The available variables are pod and service. E.g. you can use ${service.metadata.name}-${pod.metadata.labels.version} to append the version number to the service name. Note that when using environment variables to pass this configuration, use single quotes('') to avoid being evaluated by the shell. -    receiver-otel default A receiver for analyzing metrics data from OpenTelemetry. - -    - - enabledHandlers Enabled handlers for otel. SW_OTEL_RECEIVER_ENABLED_HANDLERS -   - - enabledOcRules Enabled metric rules for OC handler. SW_OTEL_RECEIVER_ENABLED_OC_RULES -   receiver-zipkin default A receiver for Zipkin traces. - -    - - restHost Binding IP of RESTful services. SW_RECEIVER_ZIPKIN_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_RECEIVER_ZIPKIN_PORT 9411   - - restContextPath Web context path of RESTful services. SW_RECEIVER_ZIPKIN_CONTEXT_PATH /   prometheus-fetcher default Prometheus fetcher reads metrics from Prometheus endpoint, and transfer the metrics into SkyWalking native format for the MAL engine. - -    - - enabledRules Enabled rules. SW_PROMETHEUS_FETCHER_ENABLED_RULES self   - - maxConvertWorker The maximize meter convert worker. SW_PROMETHEUS_FETCHER_NUM_CONVERT_WORKER -1(by default, half the number of CPU core(s))   kafka-fetcher default Read SkyWalking\u0026rsquo;s native metrics/logs/traces through Kafka server. - -    - - bootstrapServers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_KAFKA_FETCHER_SERVERS localhost:9092   - - namespace Namespace aims to isolate multi OAP cluster when using the same Kafka cluster. If you set a namespace for Kafka fetcher, OAP will add a prefix to topic name. You should also set namespace in agent.config. The property is named plugin.kafka.namespace. SW_NAMESPACE -   - - groupId A unique string that identifies the consumer group to which this consumer belongs. - skywalking-consumer   - - consumePartitions Indicates which PartitionId(s) of the topics is/are assigned to the OAP server. Separated by commas if multiple. SW_KAFKA_FETCHER_CONSUME_PARTITIONS -   - - isSharding True when OAP Server is in cluster. SW_KAFKA_FETCHER_IS_SHARDING false   - - createTopicIfNotExist If true, this creates Kafka topic (if it does not already exist). - true   - - partitions The number of partitions for the topic being created. SW_KAFKA_FETCHER_PARTITIONS 3   - - enableNativeProtoLog Enables fetching and handling native proto log data. SW_KAFKA_FETCHER_ENABLE_NATIVE_PROTO_LOG true   - - enableNativeJsonLog Enables fetching and handling native json log data. SW_KAFKA_FETCHER_ENABLE_NATIVE_JSON_LOG true   - - replicationFactor The replication factor for each partition in the topic being created. SW_KAFKA_FETCHER_PARTITIONS_FACTOR 2   - - kafkaHandlerThreadPoolSize Pool size of Kafka message handler executor. SW_KAFKA_HANDLER_THREAD_POOL_SIZE CPU core * 2   - - kafkaHandlerThreadPoolQueueSize Queue size of Kafka message handler executor. SW_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE 10000   - - topicNameOfMeters Kafka topic name for meter system data. - skywalking-meters   - - topicNameOfMetrics Kafka topic name for JVM metrics data. - skywalking-metrics   - - topicNameOfProfiling Kafka topic name for profiling data. - skywalking-profilings   - - topicNameOfTracingSegments Kafka topic name for tracing data. - skywalking-segments   - - topicNameOfManagements Kafka topic name for service instance reporting and registration. - skywalking-managements   - - topicNameOfLogs Kafka topic name for native proto log data. - skywalking-logs   - - topicNameOfJsonLogs Kafka topic name for native json log data. - skywalking-logs-json   receiver-browser default gRPC services that accept browser performance data and error log. - - -   - - sampleRate Sampling rate for receiving trace. Precise to 1/10000. 10000 means sampling rate of 100% by default. SW_RECEIVER_BROWSER_SAMPLE_RATE 10000   query graphql - GraphQL query implementation. -    - - enableLogTestTool Enable the log testing API to test the LAL. NOTE: This API evaluates untrusted code on the OAP server. A malicious script can do significant damage (steal keys and secrets, remove files and directories, install malware, etc). As such, please enable this API only when you completely trust your users. SW_QUERY_GRAPHQL_ENABLE_LOG_TEST_TOOL false   - - maxQueryComplexity Maximum complexity allowed for the GraphQL query that can be used to abort a query if the total number of data fields queried exceeds the defined threshold. SW_QUERY_MAX_QUERY_COMPLEXITY 100   - - enableUpdateUITemplate Allow user add,disable and update UI template. SW_ENABLE_UPDATE_UI_TEMPLATE false   alarm default - Read alarm doc for more details. -    telemetry - - Read telemetry doc for more details. -    - none - No op implementation. -    - prometheus host Binding host for Prometheus server fetching data. SW_TELEMETRY_PROMETHEUS_HOST 0.0.0.0   - - port Binding port for Prometheus server fetching data. SW_TELEMETRY_PROMETHEUS_PORT 1234   configuration - - Read dynamic configuration doc for more details. -    - grpc host DCS server binding hostname. SW_DCS_SERVER_HOST -   - - port DCS server binding port. SW_DCS_SERVER_PORT 80   - - clusterName Cluster name when reading the latest configuration from DSC server. SW_DCS_CLUSTER_NAME SkyWalking   - - period The period of reading data from DSC server by the OAP (in seconds). SW_DCS_PERIOD 20   - apollo apolloMeta apollo.meta in Apollo. SW_CONFIG_APOLLO http://localhost:8080   - - apolloCluster apollo.cluster in Apollo. SW_CONFIG_APOLLO_CLUSTER default   - - apolloEnv env in Apollo. SW_CONFIG_APOLLO_ENV -   - - appId app.id in Apollo. SW_CONFIG_APOLLO_APP_ID skywalking   - - period The period of data sync (in seconds). SW_CONFIG_APOLLO_PERIOD 60   - zookeeper namespace The namespace (represented by root path) that isolates the configurations in the Zookeeper. SW_CONFIG_ZK_NAMESPACE /, root path   - - hostPort Hosts and ports of Zookeeper Cluster. SW_CONFIG_ZK_HOST_PORT localhost:2181   - - baseSleepTimeMs The period of Zookeeper client between two retries (in milliseconds). SW_CONFIG_ZK_BASE_SLEEP_TIME_MS 1000   - - maxRetries The maximum retry time. SW_CONFIG_ZK_MAX_RETRIES 3   - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - etcd endpoints Hosts and ports for etcd cluster (separated by commas if multiple). SW_CONFIG_ETCD_ENDPOINTS http://localhost:2379   - - namespace Namespace for SkyWalking cluster. SW_CONFIG_ETCD_NAMESPACE /skywalking   - - authentication Indicates whether there is authentication. SW_CONFIG_ETCD_AUTHENTICATION false   - - user Etcd auth username. SW_CONFIG_ETCD_USER    - - password Etcd auth password. SW_CONFIG_ETCD_PASSWORD    - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - consul hostPort Hosts and ports for Consul cluster. SW_CONFIG_CONSUL_HOST_AND_PORTS localhost:8500   - - aclToken ACL Token of Consul. Empty string means without ACL token. SW_CONFIG_CONSUL_ACL_TOKEN -   - - period The period of data sync (in seconds). SW_CONFIG_CONSUL_PERIOD 60   - k8s-configmap namespace Deployment namespace of the config map. SW_CLUSTER_K8S_NAMESPACE default   - - labelSelector Labels for locating configmap. SW_CLUSTER_K8S_LABEL app=collector,release=skywalking   - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - nacos serverAddr Nacos Server Host. SW_CONFIG_NACOS_SERVER_ADDR 127.0.0.1   - - port Nacos Server Port. SW_CONFIG_NACOS_SERVER_PORT 8848   - - group Nacos Configuration namespace. SW_CONFIG_NACOS_SERVER_NAMESPACE -   - - period The period of data sync (in seconds). SW_CONFIG_CONFIG_NACOS_PERIOD 60   - - username Nacos Auth username. SW_CONFIG_NACOS_USERNAME -   - - password Nacos Auth password. SW_CONFIG_NACOS_PASSWORD -   - - accessKey Nacos Auth accessKey. SW_CONFIG_NACOS_ACCESSKEY -   - - secretKey Nacos Auth secretKey. SW_CONFIG_NACOS_SECRETKEY -   exporter grpc targetHost The host of target gRPC server for receiving export data. SW_EXPORTER_GRPC_HOST 127.0.0.1   - - targetPort The port of target gRPC server for receiving export data. SW_EXPORTER_GRPC_PORT 9870   health-checker default checkIntervalSeconds The period of checking OAP internal health status (in seconds). SW_HEALTH_CHECKER_INTERVAL_SECONDS 5   configuration-discovery default disableMessageDigest If true, agent receives the latest configuration every time, even without making any changes. By default, OAP uses the SHA512 message digest mechanism to detect changes in configuration. SW_DISABLE_MESSAGE_DIGEST false   receiver-event default gRPC services that handle events data. - -     Note ¹ System Environment Variable name could be declared and changed in application.yml. The names listed here are simply provided in the default application.yml file.\n","title":"Configuration Vocabulary","url":"/docs/main/v9.0.0/en/setup/backend/configuration-vocabulary/"},{"content":"Configuration Vocabulary The Configuration Vocabulary lists all available configurations provided by application.yml.\n   Module Provider Settings Value(s) and Explanation System Environment Variable¹ Default     core default role Option values: Mixed/Receiver/Aggregator. Receiver mode OAP opens the service to the agents, then analyzes and aggregates the results, and forwards the results for distributed aggregation. Aggregator mode OAP receives data from Mixer and Receiver role OAP nodes, and performs 2nd level aggregation. Mixer means both Receiver and Aggregator. SW_CORE_ROLE Mixed   - - restHost Binding IP of RESTful services. Services include GraphQL query and HTTP data report. SW_CORE_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_CORE_REST_PORT 12800   - - restContextPath Web context path of RESTful services. SW_CORE_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_CORE_REST_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_CORE_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize ServerSocketChannel Backlog of RESTful services. SW_CORE_REST_QUEUE_SIZE 0   - - httpMaxRequestHeaderSize Maximum request header size accepted. SW_CORE_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - gRPCHost Binding IP of gRPC services, including gRPC data report and internal communication among OAP nodes. SW_CORE_GRPC_HOST 0.0.0.0   - - gRPCPort Binding port of gRPC services. SW_CORE_GRPC_PORT 11800   - - gRPCSslEnabled Activates SSL for gRPC services. SW_CORE_GRPC_SSL_ENABLED false   - - gRPCSslKeyPath File path of gRPC SSL key. SW_CORE_GRPC_SSL_KEY_PATH -   - - gRPCSslCertChainPath File path of gRPC SSL cert chain. SW_CORE_GRPC_SSL_CERT_CHAIN_PATH -   - - gRPCSslTrustedCAPath File path of gRPC trusted CA. SW_CORE_GRPC_SSL_TRUSTED_CA_PATH -   - - downsampling Activated level of down sampling aggregation.  Hour,Day   - - persistentPeriod Execution period of the persistent timer (in seconds).  25   - - enableDataKeeperExecutor Controller of TTL scheduler. Once disabled, TTL wouldn\u0026rsquo;t work. SW_CORE_ENABLE_DATA_KEEPER_EXECUTOR true   - - dataKeeperExecutePeriod Execution period of TTL scheduler (in minutes). Execution doesn\u0026rsquo;t mean deleting data. The storage provider (e.g. ElasticSearch storage) could override this. SW_CORE_DATA_KEEPER_EXECUTE_PERIOD 5   - - recordDataTTL The lifecycle of record data (in days). Record data includes traces, top N sample records, and logs. Minimum value is 2. SW_CORE_RECORD_DATA_TTL 3   - - metricsDataTTL The lifecycle of metrics data (in days), including metadata. We recommend setting metricsDataTTL \u0026gt;= recordDataTTL. Minimum value is 2. SW_CORE_METRICS_DATA_TTL 7   - - l1FlushPeriod The period of L1 aggregation flush to L2 aggregation (in milliseconds). SW_CORE_L1_AGGREGATION_FLUSH_PERIOD 500   - - storageSessionTimeout The threshold of session time (in milliseconds). Default value is 70000. SW_CORE_STORAGE_SESSION_TIMEOUT 70000   - - persistentPeriod The period of doing data persistence. Unit is second.Default value is 25s SW_CORE_PERSISTENT_PERIOD 25   - - enableDatabaseSession Cache metrics data for 1 minute to reduce database queries, and if the OAP cluster changes within that minute. SW_CORE_ENABLE_DATABASE_SESSION true   - - topNReportPeriod The execution period (in minutes) of top N sampler, which saves sampled data into the storage. SW_CORE_TOPN_REPORT_PERIOD 10   - - activeExtraModelColumns Appends entity names (e.g. service names) into metrics storage entities. SW_CORE_ACTIVE_EXTRA_MODEL_COLUMNS false   - - serviceNameMaxLength Maximum length limit of service names. SW_SERVICE_NAME_MAX_LENGTH 70   - - instanceNameMaxLength Maximum length limit of service instance names. The maximum length of service + instance names should be less than 200. SW_INSTANCE_NAME_MAX_LENGTH 70   - - endpointNameMaxLength Maximum length limit of endpoint names. The maximum length of service + endpoint names should be less than 240. SW_ENDPOINT_NAME_MAX_LENGTH 150   - - searchableTracesTags Defines a set of span tag keys which are searchable through GraphQL. Multiple values are separated by commas. SW_SEARCHABLE_TAG_KEYS http.method,http.status_code,rpc.status_code,db.type,db.instance,mq.queue,mq.topic,mq.broker   - - searchableLogsTags Defines a set of log tag keys which are searchable through GraphQL. Multiple values are separated by commas. SW_SEARCHABLE_LOGS_TAG_KEYS level   - - searchableAlarmTags Defines a set of alarm tag keys which are searchable through GraphQL. Multiple values are separated by commas. SW_SEARCHABLE_ALARM_TAG_KEYS level   - - autocompleteTagKeysQueryMaxSize The max size of tags keys for autocomplete select. SW_AUTOCOMPLETE_TAG_KEYS_QUERY_MAX_SIZE 100   - - autocompleteTagValuesQueryMaxSize The max size of tags values for autocomplete select. SW_AUTOCOMPLETE_TAG_VALUES_QUERY_MAX_SIZE 100   - - gRPCThreadPoolSize Pool size of gRPC server. SW_CORE_GRPC_THREAD_POOL_SIZE CPU core * 4   - - gRPCThreadPoolQueueSize Queue size of gRPC server. SW_CORE_GRPC_POOL_QUEUE_SIZE 10000   - - maxConcurrentCallsPerConnection The maximum number of concurrent calls permitted for each incoming connection. Defaults to no limit. SW_CORE_GRPC_MAX_CONCURRENT_CALL -   - - maxMessageSize Sets the maximum message size allowed to be received on the server. Empty means 4 MiB. SW_CORE_GRPC_MAX_MESSAGE_SIZE 4M(based on Netty)   - - remoteTimeout Timeout for cluster internal communication (in seconds). - 20   - - maxSizeOfNetworkAddressAlias The maximum size of network address detected in the system being monitored. - 1_000_000   - - maxPageSizeOfQueryProfileSnapshot The maximum size for snapshot analysis in an OAP query. - 500   - - maxSizeOfAnalyzeProfileSnapshot The maximum number of snapshots analyzed by the OAP. - 12000   - - prepareThreads The number of threads used to prepare metrics data to the storage. SW_CORE_PREPARE_THREADS 2   - - enableEndpointNameGroupingByOpenapi Automatically groups endpoints by the given OpenAPI definitions. SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPAENAPI true   - - maxDurationOfQueryEBPFProfilingData The maximum duration(in second) of query the eBPF profiling data from database. - 30   - - maxThreadCountOfQueryEBPFProfilingData The maximum thread count of query the eBPF profiling data from database. - System CPU core size   cluster standalone - Standalone is not suitable for running on a single node running. No configuration available. - -   - zookeeper namespace The namespace, represented by root path, isolates the configurations in Zookeeper. SW_NAMESPACE /, root path   - - hostPort Hosts and ports of Zookeeper Cluster. SW_CLUSTER_ZK_HOST_PORT localhost:2181   - - baseSleepTimeMs The period of Zookeeper client between two retries (in milliseconds). SW_CLUSTER_ZK_SLEEP_TIME 1000   - - maxRetries The maximum retry time. SW_CLUSTER_ZK_MAX_RETRIES 3   - - enableACL Opens ACL using schema and expression. SW_ZK_ENABLE_ACL false   - - schema Schema for the authorization. SW_ZK_SCHEMA digest   - - expression Expression for the authorization. SW_ZK_EXPRESSION skywalking:skywalking   - - internalComHost The hostname registered in Zookeeper for the internal communication of OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Zookeeper for the internal communication of OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - kubernetes namespace Namespace deployed by SkyWalking in k8s. SW_CLUSTER_K8S_NAMESPACE default   - - labelSelector Labels used for filtering OAP deployment in k8s. SW_CLUSTER_K8S_LABEL app=collector,release=skywalking   - - uidEnvName Environment variable name for reading uid. SW_CLUSTER_K8S_UID SKYWALKING_COLLECTOR_UID   - consul serviceName Service name for SkyWalking cluster. SW_SERVICE_NAME SkyWalking_OAP_Cluster   - - hostPort Hosts and ports for Consul cluster. SW_CLUSTER_CONSUL_HOST_PORT localhost:8500   - - aclToken ACL Token of Consul. Empty string means without ALC token. SW_CLUSTER_CONSUL_ACLTOKEN -   - - internalComHost The hostname registered in Consul for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Consul for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - etcd serviceName Service name for SkyWalking cluster. SW_CLUSTER_ETCD_SERVICE_NAME SkyWalking_OAP_Cluster   - - endpoints Hosts and ports for etcd cluster. SW_CLUSTER_ETCD_ENDPOINTS localhost:2379   - - namespace Namespace for SkyWalking cluster. SW_CLUSTER_ETCD_NAMESPACE /skywalking   - - authentication Indicates whether there is authentication. SW_CLUSTER_ETCD_AUTHENTICATION false   - - user Etcd auth username. SW_CLUSTER_ETCD_USER    - - password Etcd auth password. SW_CLUSTER_ETCD_PASSWORD    - - internalComHost The hostname registered in etcd for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in etcd for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - Nacos serviceName Service name for SkyWalking cluster. SW_SERVICE_NAME SkyWalking_OAP_Cluster   - - hostPort Hosts and ports for Nacos cluster. SW_CLUSTER_NACOS_HOST_PORT localhost:8848   - - namespace Namespace used by SkyWalking node coordination. SW_CLUSTER_NACOS_NAMESPACE public   - - internalComHost The hostname registered in Nacos for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Nacos for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - - username Nacos Auth username. SW_CLUSTER_NACOS_USERNAME -   - - password Nacos Auth password. SW_CLUSTER_NACOS_PASSWORD -   - - accessKey Nacos Auth accessKey. SW_CLUSTER_NACOS_ACCESSKEY -   - - secretKey Nacos Auth secretKey. SW_CLUSTER_NACOS_SECRETKEY -   storage elasticsearch - ElasticSearch (and OpenSearch) storage implementation. - -   - - namespace Prefix of indexes created and used by SkyWalking. SW_NAMESPACE -   - - clusterNodes ElasticSearch cluster nodes for client connection. SW_STORAGE_ES_CLUSTER_NODES localhost   - - protocol HTTP or HTTPs. SW_STORAGE_ES_HTTP_PROTOCOL HTTP   - - connectTimeout Connect timeout of ElasticSearch client (in milliseconds). SW_STORAGE_ES_CONNECT_TIMEOUT 3000   - - socketTimeout Socket timeout of ElasticSearch client (in milliseconds). SW_STORAGE_ES_SOCKET_TIMEOUT 30000   - - responseTimeout Response timeout of ElasticSearch client (in milliseconds), 0 disables the timeout. SW_STORAGE_ES_RESPONSE_TIMEOUT 1500   - - numHttpClientThread The number of threads for the underlying HTTP client to perform socket I/O. If the value is \u0026lt;= 0, the number of available processors will be used. SW_STORAGE_ES_NUM_HTTP_CLIENT_THREAD 0   - - user Username of ElasticSearch cluster. SW_ES_USER -   - - password Password of ElasticSearch cluster. SW_ES_PASSWORD -   - - trustStorePath Trust JKS file path. Only works when username and password are enabled. SW_STORAGE_ES_SSL_JKS_PATH -   - - trustStorePass Trust JKS file password. Only works when username and password are enabled. SW_STORAGE_ES_SSL_JKS_PASS -   - - secretsManagementFile Secrets management file in the properties format, including username and password, which are managed by a 3rd party tool. Capable of being updated them at runtime. SW_ES_SECRETS_MANAGEMENT_FILE -   - - dayStep Represents the number of days in the one-minute/hour/day index. SW_STORAGE_DAY_STEP 1   - - indexShardsNumber Shard number of new indexes. SW_STORAGE_ES_INDEX_SHARDS_NUMBER 1   - - indexReplicasNumber Replicas number of new indexes. SW_STORAGE_ES_INDEX_REPLICAS_NUMBER 0   - - superDatasetDayStep Represents the number of days in the super size dataset record index. Default value is the same as dayStep when the value is less than 0. SW_SUPERDATASET_STORAGE_DAY_STEP -1   - - superDatasetIndexShardsFactor Super dataset is defined in the code (e.g. trace segments). This factor provides more shards for the super dataset: shards number = indexShardsNumber * superDatasetIndexShardsFactor. This factor also affects Zipkin and Jaeger traces. SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR 5   - - superDatasetIndexReplicasNumber Represents the replicas number in the super size dataset record index. SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER 0   - - indexTemplateOrder The order of index template. SW_STORAGE_ES_INDEX_TEMPLATE_ORDER 0   - - bulkActions Async bulk size of the record data batch execution. SW_STORAGE_ES_BULK_ACTIONS 5000   - - flushInterval Period of flush (in seconds). Does not matter whether bulkActions is reached or not. INT(flushInterval * 2/3) is used for index refresh period. SW_STORAGE_ES_FLUSH_INTERVAL 15 (index refresh period = 10)   - - concurrentRequests The number of concurrent requests allowed to be executed. SW_STORAGE_ES_CONCURRENT_REQUESTS 2   - - resultWindowMaxSize The maximum size of dataset when the OAP loads cache, such as network aliases. SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE 10000   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_ES_QUERY_MAX_SIZE 10000   - - scrollingBatchSize The batch size of metadata per iteration when metadataQueryMaxSize or resultWindowMaxSize is too large to be retrieved in a single query. SW_STORAGE_ES_SCROLLING_BATCH_SIZE 5000   - - segmentQueryMaxSize The maximum size of trace segments per query. SW_STORAGE_ES_QUERY_SEGMENT_SIZE 200   - - profileTaskQueryMaxSize The maximum size of profile task per query. SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE 200   - - profileDataQueryScrollBatchSize The batch size of query profiling data. SW_STORAGE_ES_QUERY_PROFILE_DATA_BATCH_SIZE 100   - - advanced All settings of ElasticSearch index creation. The value should be in JSON format. SW_STORAGE_ES_ADVANCED -   - h2 - H2 storage is designed for demonstration and running in short term (i.e. 1-2 hours) only. - -   - - driver H2 JDBC driver. SW_STORAGE_H2_DRIVER org.h2.jdbcx.JdbcDataSource   - - url H2 connection URL. Defaults to H2 memory mode. SW_STORAGE_H2_URL jdbc:h2:mem:skywalking-oap-db   - - user Username of H2 database. SW_STORAGE_H2_USER sa   - - password Password of H2 database. - -   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_H2_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 100   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 1   - mysql - MySQL Storage. The MySQL JDBC Driver is not in the dist. Please copy it into the oap-lib folder manually. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfArrayColumn Some entities (e.g. trace segments) include the logic column with multiple values. In MySQL, we use multiple physical columns to host the values, e.g. change column_a with values [1,2,3,4,5] to column_a_0 = 1, column_a_1 = 2, column_a_2 = 3 , column_a_3 = 4, column_a_4 = 5. SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN 20   - - numOfSearchableValuesPerTag In a trace segment, this includes multiple spans with multiple tags. Different spans may have same tag key, e.g. multiple HTTP exit spans all have their own http.method tags. This configuration sets the limit on the maximum number of values for the same tag key. SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG 2   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - postgresql - PostgreSQL storage. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfArrayColumn Some entities (e.g. trace segments) include the logic column with multiple values. In PostgreSQL, we use multiple physical columns to host the values, e.g. change column_a with values [1,2,3,4,5] to column_a_0 = 1, column_a_1 = 2, column_a_2 = 3 , column_a_3 = 4, column_a_4 = 5 SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN 20   - - numOfSearchableValuesPerTag In a trace segment, this includes multiple spans with multiple tags. Different spans may have same tag key, e.g. multiple HTTP exit spans all have their own http.method tags. This configuration sets the limit on the maximum number of values for the same tag key. SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG 2   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - banyandb - BanyanDB storage. - -   - - host Host of the BanyanDB. SW_STORAGE_BANYANDB_HOST 127.0.0.1   - - port Port of the BanyanDB. SW_STORAGE_BANYANDB_PORT 17912   - - maxBulkSize The maximum size of write entities in a single batch write call. SW_STORAGE_BANYANDB_MAX_BULK_SIZE 5000   - - flushInterval Period of flush interval. In the timeunit of seconds. SW_STORAGE_BANYANDB_FLUSH_INTERVAL 15   - - metricsShardsNumber Shards Number for measure/metrics. SW_STORAGE_BANYANDB_METRICS_SHARDS_NUMBER 1   - - recordShardsNumber Shards Number for a normal record. SW_STORAGE_BANYANDB_RECORD_SHARDS_NUMBER 1   - - superDatasetShardsFactor Shards Factor for a super dataset record, i.e. Shard number of a super dataset is recordShardsNumber*superDatasetShardsFactor. SW_STORAGE_BANYANDB_SUPERDATASET_SHARDS_FACTOR 2   - - concurrentWriteThreads Concurrent consumer threads for batch writing. SW_STORAGE_BANYANDB_CONCURRENT_WRITE_THREADS 15   - - profileTaskQueryMaxSize Max size of ProfileTask to be fetched. SW_STORAGE_BANYANDB_PROFILE_TASK_QUERY_MAX_SIZE 200   agent-analyzer default Agent Analyzer. SW_AGENT_ANALYZER default    - - traceSamplingPolicySettingsFile The sampling policy including sampling rate and the threshold of trace segment latency can be configured by the traceSamplingPolicySettingsFile file. SW_TRACE_SAMPLING_POLICY_SETTINGS_FILE trace-sampling-policy-settings.yml   - - slowDBAccessThreshold The slow database access threshold (in milliseconds). SW_SLOW_DB_THRESHOLD default:200,mongodb:100   - - forceSampleErrorSegment When sampling mechanism is activated, this config samples the error status segment and ignores the sampling rate. SW_FORCE_SAMPLE_ERROR_SEGMENT true   - - segmentStatusAnalysisStrategy Determines the final segment status from span status. Available values are FROM_SPAN_STATUS , FROM_ENTRY_SPAN, and FROM_FIRST_SPAN. FROM_SPAN_STATUS indicates that the segment status would be error if any span has an error status. FROM_ENTRY_SPAN means that the segment status would only be determined by the status of entry spans. FROM_FIRST_SPAN means that the segment status would only be determined by the status of the first span. SW_SEGMENT_STATUS_ANALYSIS_STRATEGY FROM_SPAN_STATUS   - - noUpstreamRealAddressAgents Exit spans with the component in the list would not generate client-side instance relation metrics, since some tracing plugins (e.g. Nginx-LUA and Envoy) can\u0026rsquo;t collect the real peer IP address. SW_NO_UPSTREAM_REAL_ADDRESS 6000,9000   - - meterAnalyzerActiveFiles Indicates which files could be instrumented and analyzed. Multiple files are split by \u0026ldquo;,\u0026rdquo;. SW_METER_ANALYZER_ACTIVE_FILES    receiver-sharing-server default Sharing server provides new gRPC and restful servers for data collection. Ana designates that servers in the core module are to be used for internal communication only. - -    - - restHost Binding IP of RESTful services. Services include GraphQL query and HTTP data report. SW_RECEIVER_SHARING_REST_HOST -   - - restPort Binding port of RESTful services. SW_RECEIVER_SHARING_REST_PORT -   - - restContextPath Web context path of RESTful services. SW_RECEIVER_SHARING_REST_CONTEXT_PATH -   - - restMaxThreads Maximum thread number of RESTful services. SW_RECEIVER_SHARING_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_RECEIVER_SHARING_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize ServerSocketChannel backlog of RESTful services. SW_RECEIVER_SHARING_REST_QUEUE_SIZE 0   - - httpMaxRequestHeaderSize Maximum request header size accepted. SW_RECEIVER_SHARING_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - gRPCHost Binding IP of gRPC services. Services include gRPC data report and internal communication among OAP nodes. SW_RECEIVER_GRPC_HOST 0.0.0.0. Not Activated   - - gRPCPort Binding port of gRPC services. SW_RECEIVER_GRPC_PORT Not Activated   - - gRPCThreadPoolSize Pool size of gRPC server. SW_RECEIVER_GRPC_THREAD_POOL_SIZE CPU core * 4   - - gRPCThreadPoolQueueSize Queue size of gRPC server. SW_RECEIVER_GRPC_POOL_QUEUE_SIZE 10000   - - gRPCSslEnabled Activates SSL for gRPC services. SW_RECEIVER_GRPC_SSL_ENABLED false   - - gRPCSslKeyPath File path of gRPC SSL key. SW_RECEIVER_GRPC_SSL_KEY_PATH -   - - gRPCSslCertChainPath File path of gRPC SSL cert chain. SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH -   - - maxConcurrentCallsPerConnection The maximum number of concurrent calls permitted for each incoming connection. Defaults to no limit. SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL -   - - authentication The token text for authentication. Works for gRPC connection only. Once this is set, the client is required to use the same token. SW_AUTHENTICATION -   log-analyzer default Log Analyzer. SW_LOG_ANALYZER default    - - lalFiles The LAL configuration file names (without file extension) to be activated. Read LAL for more details. SW_LOG_LAL_FILES default   - - malFiles The MAL configuration file names (without file extension) to be activated. Read LAL for more details. SW_LOG_MAL_FILES \u0026quot;\u0026quot;   event-analyzer default Event Analyzer. SW_EVENT_ANALYZER default    receiver-register default gRPC and HTTPRestful services that provide service, service instance and endpoint register. - -    receiver-trace default gRPC and HTTPRestful services that accept SkyWalking format traces. - -    receiver-jvm default gRPC services that accept JVM metrics data. - -    receiver-clr default gRPC services that accept .Net CLR metrics data. - -    receiver-profile default gRPC services that accept profile task status and snapshot reporter. - -    receiver-zabbix default TCP receiver accepts Zabbix format metrics. - -    - - port Exported TCP port. Zabbix agent could connect and transport data. SW_RECEIVER_ZABBIX_PORT 10051   - - host Binds to host. SW_RECEIVER_ZABBIX_HOST 0.0.0.0   - - activeFiles Enables config when agent request is received. SW_RECEIVER_ZABBIX_ACTIVE_FILES agent   service-mesh default gRPC services that accept data from inbound mesh probes. - -    envoy-metric default Envoy metrics_service and ALS(access log service) are supported by this receiver. The OAL script supports all GAUGE type metrics. - -    - - acceptMetricsService Starts Envoy Metrics Service analysis. SW_ENVOY_METRIC_SERVICE true   - - alsHTTPAnalysis Starts Envoy HTTP Access Log Service analysis. Value = k8s-mesh means starting the analysis. SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS -   - - alsTCPAnalysis Starts Envoy TCP Access Log Service analysis. Value = k8s-mesh means starting the analysis. SW_ENVOY_METRIC_ALS_TCP_ANALYSIS -   - - k8sServiceNameRule k8sServiceNameRule allows you to customize the service name in ALS via Kubernetes metadata. The available variables are pod and service. E.g. you can use ${service.metadata.name}-${pod.metadata.labels.version} to append the version number to the service name. Note that when using environment variables to pass this configuration, use single quotes('') to avoid being evaluated by the shell. -    receiver-otel default A receiver for analyzing metrics data from OpenTelemetry. - -    - - enabledHandlers Enabled handlers for otel. SW_OTEL_RECEIVER_ENABLED_HANDLERS -   - - enabledOcRules Enabled metric rules for OC handler. SW_OTEL_RECEIVER_ENABLED_OC_RULES -   receiver-zipkin default A receiver for Zipkin traces. - -    - - restHost Binding IP of RESTful services. SW_RECEIVER_ZIPKIN_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_RECEIVER_ZIPKIN_REST_PORT 9411   - - restContextPath Web context path of RESTful services. SW_RECEIVER_ZIPKIN_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_RECEIVER_ZIPKIN_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_RECEIVER_ZIPKIN_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_RECEIVER_ZIPKIN_REST_QUEUE_SIZE 0   - - sampleRate The sample rate precision is 1/10000, should be between 0 and 10000 SW_ZIPKIN_SAMPLE_RATE 10000   - - searchableTracesTags Defines a set of span tag keys which are searchable. Multiple values are separated by commas. SW_ZIPKIN_SEARCHABLE_TAG_KEYS http.method   prometheus-fetcher default Prometheus fetcher reads metrics from Prometheus endpoint, and transfer the metrics into SkyWalking native format for the MAL engine. - -    - - enabledRules Enabled rules. SW_PROMETHEUS_FETCHER_ENABLED_RULES self   - - maxConvertWorker The maximize meter convert worker. SW_PROMETHEUS_FETCHER_NUM_CONVERT_WORKER -1(by default, half the number of CPU core(s))   kafka-fetcher default Read SkyWalking\u0026rsquo;s native metrics/logs/traces through Kafka server. - -    - - bootstrapServers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_KAFKA_FETCHER_SERVERS localhost:9092   - - namespace Namespace aims to isolate multi OAP cluster when using the same Kafka cluster. If you set a namespace for Kafka fetcher, OAP will add a prefix to topic name. You should also set namespace in agent.config. The property is named plugin.kafka.namespace. SW_NAMESPACE -   - - groupId A unique string that identifies the consumer group to which this consumer belongs. - skywalking-consumer   - - createTopicIfNotExist If true, this creates Kafka topic (if it does not already exist). - true   - - partitions The number of partitions for the topic being created. SW_KAFKA_FETCHER_PARTITIONS 3   - - consumers The number of consumers to create. SW_KAFKA_FETCHER_CONSUMERS 1   - - enableNativeProtoLog Enables fetching and handling native proto log data. SW_KAFKA_FETCHER_ENABLE_NATIVE_PROTO_LOG true   - - enableNativeJsonLog Enables fetching and handling native json log data. SW_KAFKA_FETCHER_ENABLE_NATIVE_JSON_LOG true   - - replicationFactor The replication factor for each partition in the topic being created. SW_KAFKA_FETCHER_PARTITIONS_FACTOR 2   - - kafkaHandlerThreadPoolSize Pool size of Kafka message handler executor. SW_KAFKA_HANDLER_THREAD_POOL_SIZE CPU core * 2   - - kafkaHandlerThreadPoolQueueSize Queue size of Kafka message handler executor. SW_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE 10000   - - topicNameOfMeters Kafka topic name for meter system data. - skywalking-meters   - - topicNameOfMetrics Kafka topic name for JVM metrics data. - skywalking-metrics   - - topicNameOfProfiling Kafka topic name for profiling data. - skywalking-profilings   - - topicNameOfTracingSegments Kafka topic name for tracing data. - skywalking-segments   - - topicNameOfManagements Kafka topic name for service instance reporting and registration. - skywalking-managements   - - topicNameOfLogs Kafka topic name for native proto log data. - skywalking-logs   - - topicNameOfJsonLogs Kafka topic name for native json log data. - skywalking-logs-json   receiver-browser default gRPC services that accept browser performance data and error log. - - -   - - sampleRate Sampling rate for receiving trace. Precise to 1/10000. 10000 means sampling rate of 100% by default. SW_RECEIVER_BROWSER_SAMPLE_RATE 10000   query graphql - GraphQL query implementation. -    - - enableLogTestTool Enable the log testing API to test the LAL. NOTE: This API evaluates untrusted code on the OAP server. A malicious script can do significant damage (steal keys and secrets, remove files and directories, install malware, etc). As such, please enable this API only when you completely trust your users. SW_QUERY_GRAPHQL_ENABLE_LOG_TEST_TOOL false   - - maxQueryComplexity Maximum complexity allowed for the GraphQL query that can be used to abort a query if the total number of data fields queried exceeds the defined threshold. SW_QUERY_MAX_QUERY_COMPLEXITY 1000   - - enableUpdateUITemplate Allow user add,disable and update UI template. SW_ENABLE_UPDATE_UI_TEMPLATE false   - - enableOnDemandPodLog Ondemand Pod log: fetch the Pod logs on users' demand, the logs are fetched and displayed in real time, and are not persisted in any kind. This is helpful when users want to do some experiments and monitor the logs and see what\u0026rsquo;s happing inside the service. Note: if you print secrets in the logs, they are also visible to the UI, so for the sake of security, this feature is disabled by default, please set this configuration to enable the feature manually. SW_ENABLE_ON_DEMAND_POD_LOG false   query graphql - GraphQL query implementation. -    - - restHost Binding IP of RESTful services. SW_QUERY_ZIPKIN_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_QUERY_ZIPKIN_REST_PORT 9412   - - restContextPath Web context path of RESTful services. SW_QUERY_ZIPKIN_REST_CONTEXT_PATH zipkin   - - restMaxThreads Maximum thread number of RESTful services. SW_QUERY_ZIPKIN_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_QUERY_ZIPKIN_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_QUERY_ZIPKIN_REST_QUEUE_SIZE 0   - - lookback Default look back for serviceNames, remoteServiceNames and spanNames, 1 day in millis SW_QUERY_ZIPKIN_LOOKBACK 86400000   - - namesMaxAge The Cache-Control max-age (seconds) for serviceNames, remoteServiceNames and spanNames SW_QUERY_ZIPKIN_NAMES_MAX_AGE 300   - - uiQueryLimit Default traces query max size SW_QUERY_ZIPKIN_UI_QUERY_LIMIT 10   - - uiDefaultLookback Default look back for search traces, 15 minutes in millis SW_QUERY_ZIPKIN_UI_DEFAULT_LOOKBACK 900000   alarm default - Read alarm doc for more details. -    telemetry - - Read telemetry doc for more details. -    - none - No op implementation. -    - prometheus host Binding host for Prometheus server fetching data. SW_TELEMETRY_PROMETHEUS_HOST 0.0.0.0   - - port Binding port for Prometheus server fetching data. SW_TELEMETRY_PROMETHEUS_PORT 1234   configuration - - Read dynamic configuration doc for more details. -    - grpc host DCS server binding hostname. SW_DCS_SERVER_HOST -   - - port DCS server binding port. SW_DCS_SERVER_PORT 80   - - clusterName Cluster name when reading the latest configuration from DSC server. SW_DCS_CLUSTER_NAME SkyWalking   - - period The period of reading data from DSC server by the OAP (in seconds). SW_DCS_PERIOD 20   - apollo apolloMeta apollo.meta in Apollo. SW_CONFIG_APOLLO http://localhost:8080   - - apolloCluster apollo.cluster in Apollo. SW_CONFIG_APOLLO_CLUSTER default   - - apolloEnv env in Apollo. SW_CONFIG_APOLLO_ENV -   - - appId app.id in Apollo. SW_CONFIG_APOLLO_APP_ID skywalking   - - period The period of data sync (in seconds). SW_CONFIG_APOLLO_PERIOD 60   - zookeeper namespace The namespace (represented by root path) that isolates the configurations in the Zookeeper. SW_CONFIG_ZK_NAMESPACE /, root path   - - hostPort Hosts and ports of Zookeeper Cluster. SW_CONFIG_ZK_HOST_PORT localhost:2181   - - baseSleepTimeMs The period of Zookeeper client between two retries (in milliseconds). SW_CONFIG_ZK_BASE_SLEEP_TIME_MS 1000   - - maxRetries The maximum retry time. SW_CONFIG_ZK_MAX_RETRIES 3   - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - etcd endpoints Hosts and ports for etcd cluster (separated by commas if multiple). SW_CONFIG_ETCD_ENDPOINTS http://localhost:2379   - - namespace Namespace for SkyWalking cluster. SW_CONFIG_ETCD_NAMESPACE /skywalking   - - authentication Indicates whether there is authentication. SW_CONFIG_ETCD_AUTHENTICATION false   - - user Etcd auth username. SW_CONFIG_ETCD_USER    - - password Etcd auth password. SW_CONFIG_ETCD_PASSWORD    - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - consul hostPort Hosts and ports for Consul cluster. SW_CONFIG_CONSUL_HOST_AND_PORTS localhost:8500   - - aclToken ACL Token of Consul. Empty string means without ACL token. SW_CONFIG_CONSUL_ACL_TOKEN -   - - period The period of data sync (in seconds). SW_CONFIG_CONSUL_PERIOD 60   - k8s-configmap namespace Deployment namespace of the config map. SW_CLUSTER_K8S_NAMESPACE default   - - labelSelector Labels for locating configmap. SW_CLUSTER_K8S_LABEL app=collector,release=skywalking   - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - nacos serverAddr Nacos Server Host. SW_CONFIG_NACOS_SERVER_ADDR 127.0.0.1   - - port Nacos Server Port. SW_CONFIG_NACOS_SERVER_PORT 8848   - - group Nacos Configuration namespace. SW_CONFIG_NACOS_SERVER_NAMESPACE -   - - period The period of data sync (in seconds). SW_CONFIG_CONFIG_NACOS_PERIOD 60   - - username Nacos Auth username. SW_CONFIG_NACOS_USERNAME -   - - password Nacos Auth password. SW_CONFIG_NACOS_PASSWORD -   - - accessKey Nacos Auth accessKey. SW_CONFIG_NACOS_ACCESSKEY -   - - secretKey Nacos Auth secretKey. SW_CONFIG_NACOS_SECRETKEY -   exporter grpc targetHost The host of target gRPC server for receiving export data. SW_EXPORTER_GRPC_HOST 127.0.0.1   - - targetPort The port of target gRPC server for receiving export data. SW_EXPORTER_GRPC_PORT 9870   health-checker default checkIntervalSeconds The period of checking OAP internal health status (in seconds). SW_HEALTH_CHECKER_INTERVAL_SECONDS 5   configuration-discovery default disableMessageDigest If true, agent receives the latest configuration every time, even without making any changes. By default, OAP uses the SHA512 message digest mechanism to detect changes in configuration. SW_DISABLE_MESSAGE_DIGEST false   receiver-event default gRPC services that handle events data. - -     Note ¹ System Environment Variable name could be declared and changed in application.yml. The names listed here are simply provided in the default application.yml file.\n","title":"Configuration Vocabulary","url":"/docs/main/v9.1.0/en/setup/backend/configuration-vocabulary/"},{"content":"Configuration Vocabulary The Configuration Vocabulary lists all available configurations provided by application.yml.\n   Module Provider Settings Value(s) and Explanation System Environment Variable¹ Default     core default role Option values: Mixed/Receiver/Aggregator. Receiver mode OAP opens the service to the agents, then analyzes and aggregates the results, and forwards the results for distributed aggregation. Aggregator mode OAP receives data from Mixer and Receiver role OAP nodes, and performs 2nd level aggregation. Mixer means both Receiver and Aggregator. SW_CORE_ROLE Mixed   - - restHost Binding IP of RESTful services. Services include GraphQL query and HTTP data report. SW_CORE_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_CORE_REST_PORT 12800   - - restContextPath Web context path of RESTful services. SW_CORE_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_CORE_REST_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_CORE_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize ServerSocketChannel Backlog of RESTful services. SW_CORE_REST_QUEUE_SIZE 0   - - httpMaxRequestHeaderSize Maximum request header size accepted. SW_CORE_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - gRPCHost Binding IP of gRPC services, including gRPC data report and internal communication among OAP nodes. SW_CORE_GRPC_HOST 0.0.0.0   - - gRPCPort Binding port of gRPC services. SW_CORE_GRPC_PORT 11800   - - gRPCSslEnabled Activates SSL for gRPC services. SW_CORE_GRPC_SSL_ENABLED false   - - gRPCSslKeyPath File path of gRPC SSL key. SW_CORE_GRPC_SSL_KEY_PATH -   - - gRPCSslCertChainPath File path of gRPC SSL cert chain. SW_CORE_GRPC_SSL_CERT_CHAIN_PATH -   - - gRPCSslTrustedCAPath File path of gRPC trusted CA. SW_CORE_GRPC_SSL_TRUSTED_CA_PATH -   - - downsampling Activated level of down sampling aggregation.  Hour,Day   - - persistentPeriod Execution period of the persistent timer (in seconds).  25   - - enableDataKeeperExecutor Controller of TTL scheduler. Once disabled, TTL wouldn\u0026rsquo;t work. SW_CORE_ENABLE_DATA_KEEPER_EXECUTOR true   - - dataKeeperExecutePeriod Execution period of TTL scheduler (in minutes). Execution doesn\u0026rsquo;t mean deleting data. The storage provider (e.g. ElasticSearch storage) could override this. SW_CORE_DATA_KEEPER_EXECUTE_PERIOD 5   - - recordDataTTL The lifecycle of record data (in days). Record data includes traces, top N sample records, and logs. Minimum value is 2. SW_CORE_RECORD_DATA_TTL 3   - - metricsDataTTL The lifecycle of metrics data (in days), including metadata. We recommend setting metricsDataTTL \u0026gt;= recordDataTTL. Minimum value is 2. SW_CORE_METRICS_DATA_TTL 7   - - l1FlushPeriod The period of L1 aggregation flush to L2 aggregation (in milliseconds). SW_CORE_L1_AGGREGATION_FLUSH_PERIOD 500   - - storageSessionTimeout The threshold of session time (in milliseconds). Default value is 70000. SW_CORE_STORAGE_SESSION_TIMEOUT 70000   - - persistentPeriod The period of doing data persistence. Unit is second.Default value is 25s SW_CORE_PERSISTENT_PERIOD 25   - - enableDatabaseSession Cache metrics data for 1 minute to reduce database queries, and if the OAP cluster changes within that minute. SW_CORE_ENABLE_DATABASE_SESSION true   - - topNReportPeriod The execution period (in minutes) of top N sampler, which saves sampled data into the storage. SW_CORE_TOPN_REPORT_PERIOD 10   - - activeExtraModelColumns Appends entity names (e.g. service names) into metrics storage entities. SW_CORE_ACTIVE_EXTRA_MODEL_COLUMNS false   - - serviceNameMaxLength Maximum length limit of service names. SW_SERVICE_NAME_MAX_LENGTH 70   - - instanceNameMaxLength Maximum length limit of service instance names. The maximum length of service + instance names should be less than 200. SW_INSTANCE_NAME_MAX_LENGTH 70   - - endpointNameMaxLength Maximum length limit of endpoint names. The maximum length of service + endpoint names should be less than 240. SW_ENDPOINT_NAME_MAX_LENGTH 150   - - searchableTracesTags Defines a set of span tag keys which are searchable through GraphQL. Multiple values are separated by commas. SW_SEARCHABLE_TAG_KEYS http.method,http.status_code,rpc.status_code,db.type,db.instance,mq.queue,mq.topic,mq.broker   - - searchableLogsTags Defines a set of log tag keys which are searchable through GraphQL. Multiple values are separated by commas. SW_SEARCHABLE_LOGS_TAG_KEYS level   - - searchableAlarmTags Defines a set of alarm tag keys which are searchable through GraphQL. Multiple values are separated by commas. SW_SEARCHABLE_ALARM_TAG_KEYS level   - - autocompleteTagKeysQueryMaxSize The max size of tags keys for autocomplete select. SW_AUTOCOMPLETE_TAG_KEYS_QUERY_MAX_SIZE 100   - - autocompleteTagValuesQueryMaxSize The max size of tags values for autocomplete select. SW_AUTOCOMPLETE_TAG_VALUES_QUERY_MAX_SIZE 100   - - gRPCThreadPoolSize Pool size of gRPC server. SW_CORE_GRPC_THREAD_POOL_SIZE CPU core * 4   - - gRPCThreadPoolQueueSize Queue size of gRPC server. SW_CORE_GRPC_POOL_QUEUE_SIZE 10000   - - maxConcurrentCallsPerConnection The maximum number of concurrent calls permitted for each incoming connection. Defaults to no limit. SW_CORE_GRPC_MAX_CONCURRENT_CALL -   - - maxMessageSize Sets the maximum message size allowed to be received on the server. Empty means 4 MiB. SW_CORE_GRPC_MAX_MESSAGE_SIZE 4M(based on Netty)   - - remoteTimeout Timeout for cluster internal communication (in seconds). - 20   - - maxSizeOfNetworkAddressAlias The maximum size of network address detected in the system being monitored. - 1_000_000   - - maxPageSizeOfQueryProfileSnapshot The maximum size for snapshot analysis in an OAP query. - 500   - - maxSizeOfAnalyzeProfileSnapshot The maximum number of snapshots analyzed by the OAP. - 12000   - - prepareThreads The number of threads used to prepare metrics data to the storage. SW_CORE_PREPARE_THREADS 2   - - enableEndpointNameGroupingByOpenapi Automatically groups endpoints by the given OpenAPI definitions. SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPAENAPI true   - - maxDurationOfQueryEBPFProfilingData The maximum duration(in second) of query the eBPF profiling data from database. - 30   - - maxThreadCountOfQueryEBPFProfilingData The maximum thread count of query the eBPF profiling data from database. - System CPU core size   cluster standalone - Standalone is not suitable for running on a single node running. No configuration available. - -   - zookeeper namespace The namespace, represented by root path, isolates the configurations in Zookeeper. SW_NAMESPACE /, root path   - - hostPort Hosts and ports of Zookeeper Cluster. SW_CLUSTER_ZK_HOST_PORT localhost:2181   - - baseSleepTimeMs The period of Zookeeper client between two retries (in milliseconds). SW_CLUSTER_ZK_SLEEP_TIME 1000   - - maxRetries The maximum retry time. SW_CLUSTER_ZK_MAX_RETRIES 3   - - enableACL Opens ACL using schema and expression. SW_ZK_ENABLE_ACL false   - - schema Schema for the authorization. SW_ZK_SCHEMA digest   - - expression Expression for the authorization. SW_ZK_EXPRESSION skywalking:skywalking   - - internalComHost The hostname registered in Zookeeper for the internal communication of OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Zookeeper for the internal communication of OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - kubernetes namespace Namespace deployed by SkyWalking in k8s. SW_CLUSTER_K8S_NAMESPACE default   - - labelSelector Labels used for filtering OAP deployment in k8s. SW_CLUSTER_K8S_LABEL app=collector,release=skywalking   - - uidEnvName Environment variable name for reading uid. SW_CLUSTER_K8S_UID SKYWALKING_COLLECTOR_UID   - consul serviceName Service name for SkyWalking cluster. SW_SERVICE_NAME SkyWalking_OAP_Cluster   - - hostPort Hosts and ports for Consul cluster. SW_CLUSTER_CONSUL_HOST_PORT localhost:8500   - - aclToken ACL Token of Consul. Empty string means without ALC token. SW_CLUSTER_CONSUL_ACLTOKEN -   - - internalComHost The hostname registered in Consul for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Consul for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - etcd serviceName Service name for SkyWalking cluster. SW_CLUSTER_ETCD_SERVICE_NAME SkyWalking_OAP_Cluster   - - endpoints Hosts and ports for etcd cluster. SW_CLUSTER_ETCD_ENDPOINTS localhost:2379   - - namespace Namespace for SkyWalking cluster. SW_CLUSTER_ETCD_NAMESPACE /skywalking   - - authentication Indicates whether there is authentication. SW_CLUSTER_ETCD_AUTHENTICATION false   - - user Etcd auth username. SW_CLUSTER_ETCD_USER    - - password Etcd auth password. SW_CLUSTER_ETCD_PASSWORD    - - internalComHost The hostname registered in etcd for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in etcd for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - Nacos serviceName Service name for SkyWalking cluster. SW_SERVICE_NAME SkyWalking_OAP_Cluster   - - hostPort Hosts and ports for Nacos cluster. SW_CLUSTER_NACOS_HOST_PORT localhost:8848   - - namespace Namespace used by SkyWalking node coordination. SW_CLUSTER_NACOS_NAMESPACE public   - - internalComHost The hostname registered in Nacos for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Nacos for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - - username Nacos Auth username. SW_CLUSTER_NACOS_USERNAME -   - - password Nacos Auth password. SW_CLUSTER_NACOS_PASSWORD -   - - accessKey Nacos Auth accessKey. SW_CLUSTER_NACOS_ACCESSKEY -   - - secretKey Nacos Auth secretKey. SW_CLUSTER_NACOS_SECRETKEY -   storage elasticsearch - ElasticSearch (and OpenSearch) storage implementation. - -   - - namespace Prefix of indexes created and used by SkyWalking. SW_NAMESPACE -   - - clusterNodes ElasticSearch cluster nodes for client connection. SW_STORAGE_ES_CLUSTER_NODES localhost   - - protocol HTTP or HTTPs. SW_STORAGE_ES_HTTP_PROTOCOL HTTP   - - connectTimeout Connect timeout of ElasticSearch client (in milliseconds). SW_STORAGE_ES_CONNECT_TIMEOUT 3000   - - socketTimeout Socket timeout of ElasticSearch client (in milliseconds). SW_STORAGE_ES_SOCKET_TIMEOUT 30000   - - responseTimeout Response timeout of ElasticSearch client (in milliseconds), 0 disables the timeout. SW_STORAGE_ES_RESPONSE_TIMEOUT 1500   - - numHttpClientThread The number of threads for the underlying HTTP client to perform socket I/O. If the value is \u0026lt;= 0, the number of available processors will be used. SW_STORAGE_ES_NUM_HTTP_CLIENT_THREAD 0   - - user Username of ElasticSearch cluster. SW_ES_USER -   - - password Password of ElasticSearch cluster. SW_ES_PASSWORD -   - - trustStorePath Trust JKS file path. Only works when username and password are enabled. SW_STORAGE_ES_SSL_JKS_PATH -   - - trustStorePass Trust JKS file password. Only works when username and password are enabled. SW_STORAGE_ES_SSL_JKS_PASS -   - - secretsManagementFile Secrets management file in the properties format, including username and password, which are managed by a 3rd party tool. Capable of being updated them at runtime. SW_ES_SECRETS_MANAGEMENT_FILE -   - - dayStep Represents the number of days in the one-minute/hour/day index. SW_STORAGE_DAY_STEP 1   - - indexShardsNumber Shard number of new indexes. SW_STORAGE_ES_INDEX_SHARDS_NUMBER 1   - - indexReplicasNumber Replicas number of new indexes. SW_STORAGE_ES_INDEX_REPLICAS_NUMBER 0   - - superDatasetDayStep Represents the number of days in the super size dataset record index. Default value is the same as dayStep when the value is less than 0. SW_SUPERDATASET_STORAGE_DAY_STEP -1   - - superDatasetIndexShardsFactor Super dataset is defined in the code (e.g. trace segments). This factor provides more shards for the super dataset: shards number = indexShardsNumber * superDatasetIndexShardsFactor. This factor also affects Zipkin and Jaeger traces. SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR 5   - - superDatasetIndexReplicasNumber Represents the replicas number in the super size dataset record index. SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER 0   - - indexTemplateOrder The order of index template. SW_STORAGE_ES_INDEX_TEMPLATE_ORDER 0   - - bulkActions Async bulk size of the record data batch execution. SW_STORAGE_ES_BULK_ACTIONS 5000   - - flushInterval Period of flush (in seconds). Does not matter whether bulkActions is reached or not. INT(flushInterval * 2/3) is used for index refresh period. SW_STORAGE_ES_FLUSH_INTERVAL 15 (index refresh period = 10)   - - concurrentRequests The number of concurrent requests allowed to be executed. SW_STORAGE_ES_CONCURRENT_REQUESTS 2   - - resultWindowMaxSize The maximum size of dataset when the OAP loads cache, such as network aliases. SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE 10000   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_ES_QUERY_MAX_SIZE 10000   - - scrollingBatchSize The batch size of metadata per iteration when metadataQueryMaxSize or resultWindowMaxSize is too large to be retrieved in a single query. SW_STORAGE_ES_SCROLLING_BATCH_SIZE 5000   - - segmentQueryMaxSize The maximum size of trace segments per query. SW_STORAGE_ES_QUERY_SEGMENT_SIZE 200   - - profileTaskQueryMaxSize The maximum size of profile task per query. SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE 200   - - profileDataQueryScrollBatchSize The batch size of query profiling data. SW_STORAGE_ES_QUERY_PROFILE_DATA_BATCH_SIZE 100   - - advanced All settings of ElasticSearch index creation. The value should be in JSON format. SW_STORAGE_ES_ADVANCED -   - - logicSharding Shard metrics and records indices into multi-physical indices, one index template per metric/meter aggregation function or record. SW_STORAGE_ES_LOGIC_SHARDING false   - h2 - H2 storage is designed for demonstration and running in short term (i.e. 1-2 hours) only. - -   - - driver H2 JDBC driver. SW_STORAGE_H2_DRIVER org.h2.jdbcx.JdbcDataSource   - - url H2 connection URL. Defaults to H2 memory mode. SW_STORAGE_H2_URL jdbc:h2:mem:skywalking-oap-db   - - user Username of H2 database. SW_STORAGE_H2_USER sa   - - password Password of H2 database. - -   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_H2_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 100   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 1   - mysql - MySQL Storage. The MySQL JDBC Driver is not in the dist. Please copy it into the oap-lib folder manually. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfArrayColumn Some entities (e.g. trace segments) include the logic column with multiple values. In MySQL, we use multiple physical columns to host the values, e.g. change column_a with values [1,2,3,4,5] to column_a_0 = 1, column_a_1 = 2, column_a_2 = 3 , column_a_3 = 4, column_a_4 = 5. SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN 20   - - numOfSearchableValuesPerTag In a trace segment, this includes multiple spans with multiple tags. Different spans may have same tag key, e.g. multiple HTTP exit spans all have their own http.method tags. This configuration sets the limit on the maximum number of values for the same tag key. SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG 2   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - postgresql - PostgreSQL storage. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfArrayColumn Some entities (e.g. trace segments) include the logic column with multiple values. In PostgreSQL, we use multiple physical columns to host the values, e.g. change column_a with values [1,2,3,4,5] to column_a_0 = 1, column_a_1 = 2, column_a_2 = 3 , column_a_3 = 4, column_a_4 = 5 SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN 20   - - numOfSearchableValuesPerTag In a trace segment, this includes multiple spans with multiple tags. Different spans may have same tag key, e.g. multiple HTTP exit spans all have their own http.method tags. This configuration sets the limit on the maximum number of values for the same tag key. SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG 2   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - banyandb - BanyanDB storage. - -   - - host Host of the BanyanDB. SW_STORAGE_BANYANDB_HOST 127.0.0.1   - - port Port of the BanyanDB. SW_STORAGE_BANYANDB_PORT 17912   - - maxBulkSize The maximum size of write entities in a single batch write call. SW_STORAGE_BANYANDB_MAX_BULK_SIZE 5000   - - flushInterval Period of flush interval. In the timeunit of seconds. SW_STORAGE_BANYANDB_FLUSH_INTERVAL 15   - - metricsShardsNumber Shards Number for measure/metrics. SW_STORAGE_BANYANDB_METRICS_SHARDS_NUMBER 1   - - recordShardsNumber Shards Number for a normal record. SW_STORAGE_BANYANDB_RECORD_SHARDS_NUMBER 1   - - superDatasetShardsFactor Shards Factor for a super dataset record, i.e. Shard number of a super dataset is recordShardsNumber*superDatasetShardsFactor. SW_STORAGE_BANYANDB_SUPERDATASET_SHARDS_FACTOR 2   - - concurrentWriteThreads Concurrent consumer threads for batch writing. SW_STORAGE_BANYANDB_CONCURRENT_WRITE_THREADS 15   - - profileTaskQueryMaxSize Max size of ProfileTask to be fetched. SW_STORAGE_BANYANDB_PROFILE_TASK_QUERY_MAX_SIZE 200   agent-analyzer default Agent Analyzer. SW_AGENT_ANALYZER default    - - traceSamplingPolicySettingsFile The sampling policy including sampling rate and the threshold of trace segment latency can be configured by the traceSamplingPolicySettingsFile file. SW_TRACE_SAMPLING_POLICY_SETTINGS_FILE trace-sampling-policy-settings.yml   - - slowDBAccessThreshold The slow database access threshold (in milliseconds). SW_SLOW_DB_THRESHOLD default:200,mongodb:100   - - forceSampleErrorSegment When sampling mechanism is activated, this config samples the error status segment and ignores the sampling rate. SW_FORCE_SAMPLE_ERROR_SEGMENT true   - - segmentStatusAnalysisStrategy Determines the final segment status from span status. Available values are FROM_SPAN_STATUS , FROM_ENTRY_SPAN, and FROM_FIRST_SPAN. FROM_SPAN_STATUS indicates that the segment status would be error if any span has an error status. FROM_ENTRY_SPAN means that the segment status would only be determined by the status of entry spans. FROM_FIRST_SPAN means that the segment status would only be determined by the status of the first span. SW_SEGMENT_STATUS_ANALYSIS_STRATEGY FROM_SPAN_STATUS   - - noUpstreamRealAddressAgents Exit spans with the component in the list would not generate client-side instance relation metrics, since some tracing plugins (e.g. Nginx-LUA and Envoy) can\u0026rsquo;t collect the real peer IP address. SW_NO_UPSTREAM_REAL_ADDRESS 6000,9000   - - meterAnalyzerActiveFiles Indicates which files could be instrumented and analyzed. Multiple files are split by \u0026ldquo;,\u0026rdquo;. SW_METER_ANALYZER_ACTIVE_FILES    receiver-sharing-server default Sharing server provides new gRPC and restful servers for data collection. Ana designates that servers in the core module are to be used for internal communication only. - -    - - restHost Binding IP of RESTful services. Services include GraphQL query and HTTP data report. SW_RECEIVER_SHARING_REST_HOST -   - - restPort Binding port of RESTful services. SW_RECEIVER_SHARING_REST_PORT -   - - restContextPath Web context path of RESTful services. SW_RECEIVER_SHARING_REST_CONTEXT_PATH -   - - restMaxThreads Maximum thread number of RESTful services. SW_RECEIVER_SHARING_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_RECEIVER_SHARING_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize ServerSocketChannel backlog of RESTful services. SW_RECEIVER_SHARING_REST_QUEUE_SIZE 0   - - httpMaxRequestHeaderSize Maximum request header size accepted. SW_RECEIVER_SHARING_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - gRPCHost Binding IP of gRPC services. Services include gRPC data report and internal communication among OAP nodes. SW_RECEIVER_GRPC_HOST 0.0.0.0. Not Activated   - - gRPCPort Binding port of gRPC services. SW_RECEIVER_GRPC_PORT Not Activated   - - gRPCThreadPoolSize Pool size of gRPC server. SW_RECEIVER_GRPC_THREAD_POOL_SIZE CPU core * 4   - - gRPCThreadPoolQueueSize Queue size of gRPC server. SW_RECEIVER_GRPC_POOL_QUEUE_SIZE 10000   - - gRPCSslEnabled Activates SSL for gRPC services. SW_RECEIVER_GRPC_SSL_ENABLED false   - - gRPCSslKeyPath File path of gRPC SSL key. SW_RECEIVER_GRPC_SSL_KEY_PATH -   - - gRPCSslCertChainPath File path of gRPC SSL cert chain. SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH -   - - maxConcurrentCallsPerConnection The maximum number of concurrent calls permitted for each incoming connection. Defaults to no limit. SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL -   - - authentication The token text for authentication. Works for gRPC connection only. Once this is set, the client is required to use the same token. SW_AUTHENTICATION -   log-analyzer default Log Analyzer. SW_LOG_ANALYZER default    - - lalFiles The LAL configuration file names (without file extension) to be activated. Read LAL for more details. SW_LOG_LAL_FILES default   - - malFiles The MAL configuration file names (without file extension) to be activated. Read LAL for more details. SW_LOG_MAL_FILES \u0026quot;\u0026quot;   event-analyzer default Event Analyzer. SW_EVENT_ANALYZER default    receiver-register default gRPC and HTTPRestful services that provide service, service instance and endpoint register. - -    receiver-trace default gRPC and HTTPRestful services that accept SkyWalking format traces. - -    receiver-jvm default gRPC services that accept JVM metrics data. - -    receiver-clr default gRPC services that accept .Net CLR metrics data. - -    receiver-profile default gRPC services that accept profile task status and snapshot reporter. - -    receiver-zabbix default TCP receiver accepts Zabbix format metrics. - -    - - port Exported TCP port. Zabbix agent could connect and transport data. SW_RECEIVER_ZABBIX_PORT 10051   - - host Binds to host. SW_RECEIVER_ZABBIX_HOST 0.0.0.0   - - activeFiles Enables config when agent request is received. SW_RECEIVER_ZABBIX_ACTIVE_FILES agent   service-mesh default gRPC services that accept data from inbound mesh probes. - -    envoy-metric default Envoy metrics_service and ALS(access log service) are supported by this receiver. The OAL script supports all GAUGE type metrics. - -    - - acceptMetricsService Starts Envoy Metrics Service analysis. SW_ENVOY_METRIC_SERVICE true   - - alsHTTPAnalysis Starts Envoy HTTP Access Log Service analysis. Value = k8s-mesh means starting the analysis. SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS -   - - alsTCPAnalysis Starts Envoy TCP Access Log Service analysis. Value = k8s-mesh means starting the analysis. SW_ENVOY_METRIC_ALS_TCP_ANALYSIS -   - - k8sServiceNameRule k8sServiceNameRule allows you to customize the service name in ALS via Kubernetes metadata. The available variables are pod and service. E.g. you can use ${service.metadata.name}-${pod.metadata.labels.version} to append the version number to the service name. Note that when using environment variables to pass this configuration, use single quotes('') to avoid being evaluated by the shell. -    receiver-otel default A receiver for analyzing metrics data from OpenTelemetry. - -    - - enabledHandlers Enabled handlers for otel. SW_OTEL_RECEIVER_ENABLED_HANDLERS -   - - enabledOtelRules Enabled metric rules for OC handler. SW_OTEL_RECEIVER_ENABLED_OTEL_RULES -   receiver-zipkin default A receiver for Zipkin traces. - -    - - sampleRate The sample rate precision is 1/10000, should be between 0 and 10000 SW_ZIPKIN_SAMPLE_RATE 10000   - - searchableTracesTags Defines a set of span tag keys which are searchable. Multiple values are separated by commas. SW_ZIPKIN_SEARCHABLE_TAG_KEYS http.method   - - enableHttpCollector Enable Http Collector. SW_ZIPKIN_HTTP_COLLECTOR_ENABLED true   - - restHost Binding IP of RESTful services. SW_RECEIVER_ZIPKIN_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_RECEIVER_ZIPKIN_REST_PORT 9411   - - restContextPath Web context path of RESTful services. SW_RECEIVER_ZIPKIN_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_RECEIVER_ZIPKIN_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_RECEIVER_ZIPKIN_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_RECEIVER_ZIPKIN_REST_QUEUE_SIZE 0   - - enableKafkaCollector Enable Kafka Collector. SW_ZIPKIN_KAFKA_COLLECTOR_ENABLED false   - - kafkaBootstrapServers Kafka ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG. SW_ZIPKIN_KAFKA_SERVERS localhost:9092   - - kafkaGroupId Kafka ConsumerConfig.GROUP_ID_CONFIG. SW_ZIPKIN_KAFKA_GROUP_ID zipkin   - - kafkaTopic Kafka Topics. SW_ZIPKIN_KAFKA_TOPIC zipkin   - - kafkaConsumerConfig Kafka consumer config, JSON format as Properties. If it contains the same key with above, would override. SW_ZIPKIN_KAFKA_CONSUMER_CONFIG \u0026ldquo;{\u0026quot;auto.offset.reset\u0026quot;:\u0026quot;earliest\u0026quot;,\u0026quot;enable.auto.commit\u0026quot;:true}\u0026rdquo;   - - kafkaConsumers The number of consumers to create. SW_ZIPKIN_KAFKA_CONSUMERS 1   - - kafkaHandlerThreadPoolSize Pool size of Kafka message handler executor. SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_SIZE CPU core * 2   - - kafkaHandlerThreadPoolQueueSize Queue size of Kafka message handler executor. SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE 10000   prometheus-fetcher default Prometheus fetcher reads metrics from Prometheus endpoint, and transfer the metrics into SkyWalking native format for the MAL engine. - -    - - enabledRules Enabled rules. SW_PROMETHEUS_FETCHER_ENABLED_RULES self   - - maxConvertWorker The maximize meter convert worker. SW_PROMETHEUS_FETCHER_NUM_CONVERT_WORKER -1(by default, half the number of CPU core(s))   kafka-fetcher default Read SkyWalking\u0026rsquo;s native metrics/logs/traces through Kafka server. - -    - - bootstrapServers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_KAFKA_FETCHER_SERVERS localhost:9092   - - namespace Namespace aims to isolate multi OAP cluster when using the same Kafka cluster. If you set a namespace for Kafka fetcher, OAP will add a prefix to topic name. You should also set namespace in agent.config. The property is named plugin.kafka.namespace. SW_NAMESPACE -   - - groupId A unique string that identifies the consumer group to which this consumer belongs. - skywalking-consumer   - - createTopicIfNotExist If true, this creates Kafka topic (if it does not already exist). - true   - - partitions The number of partitions for the topic being created. SW_KAFKA_FETCHER_PARTITIONS 3   - - consumers The number of consumers to create. SW_KAFKA_FETCHER_CONSUMERS 1   - - enableNativeProtoLog Enables fetching and handling native proto log data. SW_KAFKA_FETCHER_ENABLE_NATIVE_PROTO_LOG true   - - enableNativeJsonLog Enables fetching and handling native json log data. SW_KAFKA_FETCHER_ENABLE_NATIVE_JSON_LOG true   - - replicationFactor The replication factor for each partition in the topic being created. SW_KAFKA_FETCHER_PARTITIONS_FACTOR 2   - - kafkaHandlerThreadPoolSize Pool size of Kafka message handler executor. SW_KAFKA_HANDLER_THREAD_POOL_SIZE CPU core * 2   - - kafkaHandlerThreadPoolQueueSize Queue size of Kafka message handler executor. SW_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE 10000   - - topicNameOfMeters Kafka topic name for meter system data. - skywalking-meters   - - topicNameOfMetrics Kafka topic name for JVM metrics data. - skywalking-metrics   - - topicNameOfProfiling Kafka topic name for profiling data. - skywalking-profilings   - - topicNameOfTracingSegments Kafka topic name for tracing data. - skywalking-segments   - - topicNameOfManagements Kafka topic name for service instance reporting and registration. - skywalking-managements   - - topicNameOfLogs Kafka topic name for native proto log data. - skywalking-logs   - - topicNameOfJsonLogs Kafka topic name for native json log data. - skywalking-logs-json   receiver-browser default gRPC services that accept browser performance data and error log. - - -   - - sampleRate Sampling rate for receiving trace. Precise to 1/10000. 10000 means sampling rate of 100% by default. SW_RECEIVER_BROWSER_SAMPLE_RATE 10000   query graphql - GraphQL query implementation. -    - - enableLogTestTool Enable the log testing API to test the LAL. NOTE: This API evaluates untrusted code on the OAP server. A malicious script can do significant damage (steal keys and secrets, remove files and directories, install malware, etc). As such, please enable this API only when you completely trust your users. SW_QUERY_GRAPHQL_ENABLE_LOG_TEST_TOOL false   - - maxQueryComplexity Maximum complexity allowed for the GraphQL query that can be used to abort a query if the total number of data fields queried exceeds the defined threshold. SW_QUERY_MAX_QUERY_COMPLEXITY 1000   - - enableUpdateUITemplate Allow user add,disable and update UI template. SW_ENABLE_UPDATE_UI_TEMPLATE false   - - enableOnDemandPodLog Ondemand Pod log: fetch the Pod logs on users' demand, the logs are fetched and displayed in real time, and are not persisted in any kind. This is helpful when users want to do some experiments and monitor the logs and see what\u0026rsquo;s happing inside the service. Note: if you print secrets in the logs, they are also visible to the UI, so for the sake of security, this feature is disabled by default, please set this configuration to enable the feature manually. SW_ENABLE_ON_DEMAND_POD_LOG false   query graphql - GraphQL query implementation. -    - - restHost Binding IP of RESTful services. SW_QUERY_ZIPKIN_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_QUERY_ZIPKIN_REST_PORT 9412   - - restContextPath Web context path of RESTful services. SW_QUERY_ZIPKIN_REST_CONTEXT_PATH zipkin   - - restMaxThreads Maximum thread number of RESTful services. SW_QUERY_ZIPKIN_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_QUERY_ZIPKIN_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_QUERY_ZIPKIN_REST_QUEUE_SIZE 0   - - lookback Default look back for traces and autocompleteTags, 1 day in millis SW_QUERY_ZIPKIN_LOOKBACK 86400000   - - namesMaxAge The Cache-Control max-age (seconds) for serviceNames, remoteServiceNames and spanNames SW_QUERY_ZIPKIN_NAMES_MAX_AGE 300   - - uiQueryLimit Default traces query max size SW_QUERY_ZIPKIN_UI_QUERY_LIMIT 10   - - uiDefaultLookback Default look back on the UI for search traces, 15 minutes in millis SW_QUERY_ZIPKIN_UI_DEFAULT_LOOKBACK 900000   alarm default - Read alarm doc for more details. -    telemetry - - Read telemetry doc for more details. -    - none - No op implementation. -    - prometheus host Binding host for Prometheus server fetching data. SW_TELEMETRY_PROMETHEUS_HOST 0.0.0.0   - - port Binding port for Prometheus server fetching data. SW_TELEMETRY_PROMETHEUS_PORT 1234   configuration - - Read dynamic configuration doc for more details. -    - grpc host DCS server binding hostname. SW_DCS_SERVER_HOST -   - - port DCS server binding port. SW_DCS_SERVER_PORT 80   - - clusterName Cluster name when reading the latest configuration from DSC server. SW_DCS_CLUSTER_NAME SkyWalking   - - period The period of reading data from DSC server by the OAP (in seconds). SW_DCS_PERIOD 20   - apollo apolloMeta apollo.meta in Apollo. SW_CONFIG_APOLLO http://localhost:8080   - - apolloCluster apollo.cluster in Apollo. SW_CONFIG_APOLLO_CLUSTER default   - - apolloEnv env in Apollo. SW_CONFIG_APOLLO_ENV -   - - appId app.id in Apollo. SW_CONFIG_APOLLO_APP_ID skywalking   - - period The period of data sync (in seconds). SW_CONFIG_APOLLO_PERIOD 60   - zookeeper namespace The namespace (represented by root path) that isolates the configurations in the Zookeeper. SW_CONFIG_ZK_NAMESPACE /, root path   - - hostPort Hosts and ports of Zookeeper Cluster. SW_CONFIG_ZK_HOST_PORT localhost:2181   - - baseSleepTimeMs The period of Zookeeper client between two retries (in milliseconds). SW_CONFIG_ZK_BASE_SLEEP_TIME_MS 1000   - - maxRetries The maximum retry time. SW_CONFIG_ZK_MAX_RETRIES 3   - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - etcd endpoints Hosts and ports for etcd cluster (separated by commas if multiple). SW_CONFIG_ETCD_ENDPOINTS http://localhost:2379   - - namespace Namespace for SkyWalking cluster. SW_CONFIG_ETCD_NAMESPACE /skywalking   - - authentication Indicates whether there is authentication. SW_CONFIG_ETCD_AUTHENTICATION false   - - user Etcd auth username. SW_CONFIG_ETCD_USER    - - password Etcd auth password. SW_CONFIG_ETCD_PASSWORD    - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - consul hostPort Hosts and ports for Consul cluster. SW_CONFIG_CONSUL_HOST_AND_PORTS localhost:8500   - - aclToken ACL Token of Consul. Empty string means without ACL token. SW_CONFIG_CONSUL_ACL_TOKEN -   - - period The period of data sync (in seconds). SW_CONFIG_CONSUL_PERIOD 60   - k8s-configmap namespace Deployment namespace of the config map. SW_CLUSTER_K8S_NAMESPACE default   - - labelSelector Labels for locating configmap. SW_CLUSTER_K8S_LABEL app=collector,release=skywalking   - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - nacos serverAddr Nacos Server Host. SW_CONFIG_NACOS_SERVER_ADDR 127.0.0.1   - - port Nacos Server Port. SW_CONFIG_NACOS_SERVER_PORT 8848   - - group Nacos Configuration namespace. SW_CONFIG_NACOS_SERVER_NAMESPACE -   - - period The period of data sync (in seconds). SW_CONFIG_CONFIG_NACOS_PERIOD 60   - - username Nacos Auth username. SW_CONFIG_NACOS_USERNAME -   - - password Nacos Auth password. SW_CONFIG_NACOS_PASSWORD -   - - accessKey Nacos Auth accessKey. SW_CONFIG_NACOS_ACCESSKEY -   - - secretKey Nacos Auth secretKey. SW_CONFIG_NACOS_SECRETKEY -   exporter grpc targetHost The host of target gRPC server for receiving export data. SW_EXPORTER_GRPC_HOST 127.0.0.1   - - targetPort The port of target gRPC server for receiving export data. SW_EXPORTER_GRPC_PORT 9870   health-checker default checkIntervalSeconds The period of checking OAP internal health status (in seconds). SW_HEALTH_CHECKER_INTERVAL_SECONDS 5   configuration-discovery default disableMessageDigest If true, agent receives the latest configuration every time, even without making any changes. By default, OAP uses the SHA512 message digest mechanism to detect changes in configuration. SW_DISABLE_MESSAGE_DIGEST false   receiver-event default gRPC services that handle events data. - -     Note ¹ System Environment Variable name could be declared and changed in application.yml. The names listed here are simply provided in the default application.yml file.\n","title":"Configuration Vocabulary","url":"/docs/main/v9.2.0/en/setup/backend/configuration-vocabulary/"},{"content":"Configuration Vocabulary The Configuration Vocabulary lists all available configurations provided by application.yml.\n   Module Provider Settings Value(s) and Explanation System Environment Variable¹ Default     core default role Option values: Mixed/Receiver/Aggregator. Receiver mode OAP opens the service to the agents, then analyzes and aggregates the results, and forwards the results for distributed aggregation. Aggregator mode OAP receives data from Mixer and Receiver role OAP nodes, and performs 2nd level aggregation. Mixer means both Receiver and Aggregator. SW_CORE_ROLE Mixed   - - restHost Binding IP of RESTful services. Services include GraphQL query and HTTP data report. SW_CORE_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_CORE_REST_PORT 12800   - - restContextPath Web context path of RESTful services. SW_CORE_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_CORE_REST_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_CORE_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize ServerSocketChannel Backlog of RESTful services. SW_CORE_REST_QUEUE_SIZE 0   - - httpMaxRequestHeaderSize Maximum request header size accepted. SW_CORE_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - gRPCHost Binding IP of gRPC services, including gRPC data report and internal communication among OAP nodes. SW_CORE_GRPC_HOST 0.0.0.0   - - gRPCPort Binding port of gRPC services. SW_CORE_GRPC_PORT 11800   - - gRPCSslEnabled Activates SSL for gRPC services. SW_CORE_GRPC_SSL_ENABLED false   - - gRPCSslKeyPath File path of gRPC SSL key. SW_CORE_GRPC_SSL_KEY_PATH -   - - gRPCSslCertChainPath File path of gRPC SSL cert chain. SW_CORE_GRPC_SSL_CERT_CHAIN_PATH -   - - gRPCSslTrustedCAPath File path of gRPC trusted CA. SW_CORE_GRPC_SSL_TRUSTED_CA_PATH -   - - downsampling Activated level of down sampling aggregation.  Hour,Day   - - persistentPeriod Execution period of the persistent timer (in seconds).  25   - - enableDataKeeperExecutor Controller of TTL scheduler. Once disabled, TTL wouldn\u0026rsquo;t work. SW_CORE_ENABLE_DATA_KEEPER_EXECUTOR true   - - dataKeeperExecutePeriod Execution period of TTL scheduler (in minutes). Execution doesn\u0026rsquo;t mean deleting data. The storage provider (e.g. ElasticSearch storage) could override this. SW_CORE_DATA_KEEPER_EXECUTE_PERIOD 5   - - recordDataTTL The lifecycle of record data (in days). Record data includes traces, top N sample records, and logs. Minimum value is 2. SW_CORE_RECORD_DATA_TTL 3   - - metricsDataTTL The lifecycle of metrics data (in days), including metadata. We recommend setting metricsDataTTL \u0026gt;= recordDataTTL. Minimum value is 2. SW_CORE_METRICS_DATA_TTL 7   - - l1FlushPeriod The period of L1 aggregation flush to L2 aggregation (in milliseconds). SW_CORE_L1_AGGREGATION_FLUSH_PERIOD 500   - - storageSessionTimeout The threshold of session time (in milliseconds). Default value is 70000. SW_CORE_STORAGE_SESSION_TIMEOUT 70000   - - persistentPeriod The period of doing data persistence. Unit is second.Default value is 25s SW_CORE_PERSISTENT_PERIOD 25   - - topNReportPeriod The execution period (in minutes) of top N sampler, which saves sampled data into the storage. SW_CORE_TOPN_REPORT_PERIOD 10   - - activeExtraModelColumns Appends entity names (e.g. service names) into metrics storage entities. SW_CORE_ACTIVE_EXTRA_MODEL_COLUMNS false   - - serviceNameMaxLength Maximum length limit of service names. SW_SERVICE_NAME_MAX_LENGTH 70   - - instanceNameMaxLength Maximum length limit of service instance names. The maximum length of service + instance names should be less than 200. SW_INSTANCE_NAME_MAX_LENGTH 70   - - endpointNameMaxLength Maximum length limit of endpoint names. The maximum length of service + endpoint names should be less than 240. SW_ENDPOINT_NAME_MAX_LENGTH 150   - - searchableTracesTags Defines a set of span tag keys which are searchable through GraphQL. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_SEARCHABLE_TAG_KEYS http.method,http.status_code,rpc.status_code,db.type,db.instance,mq.queue,mq.topic,mq.broker   - - searchableLogsTags Defines a set of log tag keys which are searchable through GraphQL. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_SEARCHABLE_LOGS_TAG_KEYS level   - - searchableAlarmTags Defines a set of alarm tag keys which are searchable through GraphQL. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_SEARCHABLE_ALARM_TAG_KEYS level   - - autocompleteTagKeysQueryMaxSize The max size of tags keys for autocomplete select. SW_AUTOCOMPLETE_TAG_KEYS_QUERY_MAX_SIZE 100   - - autocompleteTagValuesQueryMaxSize The max size of tags values for autocomplete select. SW_AUTOCOMPLETE_TAG_VALUES_QUERY_MAX_SIZE 100   - - gRPCThreadPoolSize Pool size of gRPC server. SW_CORE_GRPC_THREAD_POOL_SIZE CPU core * 4   - - gRPCThreadPoolQueueSize Queue size of gRPC server. SW_CORE_GRPC_POOL_QUEUE_SIZE 10000   - - maxConcurrentCallsPerConnection The maximum number of concurrent calls permitted for each incoming connection. Defaults to no limit. SW_CORE_GRPC_MAX_CONCURRENT_CALL -   - - maxMessageSize Sets the maximum message size allowed to be received on the server. Empty means 4 MiB. SW_CORE_GRPC_MAX_MESSAGE_SIZE 4M(based on Netty)   - - remoteTimeout Timeout for cluster internal communication (in seconds). - 20   - - maxSizeOfNetworkAddressAlias The maximum size of network address detected in the system being monitored. - 1_000_000   - - maxPageSizeOfQueryProfileSnapshot The maximum size for snapshot analysis in an OAP query. - 500   - - maxSizeOfAnalyzeProfileSnapshot The maximum number of snapshots analyzed by the OAP. - 12000   - - prepareThreads The number of threads used to prepare metrics data to the storage. SW_CORE_PREPARE_THREADS 2   - - enableEndpointNameGroupingByOpenapi Automatically groups endpoints by the given OpenAPI definitions. SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI true   - - maxDurationOfQueryEBPFProfilingData The maximum duration(in second) of query the eBPF profiling data from database. - 30   - - maxThreadCountOfQueryEBPFProfilingData The maximum thread count of query the eBPF profiling data from database. - System CPU core size   cluster standalone - Standalone is not suitable for running on a single node running. No configuration available. - -   - zookeeper namespace The namespace, represented by root path, isolates the configurations in Zookeeper. SW_NAMESPACE /, root path   - - hostPort Hosts and ports of Zookeeper Cluster. SW_CLUSTER_ZK_HOST_PORT localhost:2181   - - baseSleepTimeMs The period of Zookeeper client between two retries (in milliseconds). SW_CLUSTER_ZK_SLEEP_TIME 1000   - - maxRetries The maximum retry time. SW_CLUSTER_ZK_MAX_RETRIES 3   - - enableACL Opens ACL using schema and expression. SW_ZK_ENABLE_ACL false   - - schema Schema for the authorization. SW_ZK_SCHEMA digest   - - expression Expression for the authorization. SW_ZK_EXPRESSION skywalking:skywalking   - - internalComHost The hostname registered in Zookeeper for the internal communication of OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Zookeeper for the internal communication of OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - kubernetes namespace Namespace deployed by SkyWalking in k8s. SW_CLUSTER_K8S_NAMESPACE default   - - labelSelector Labels used for filtering OAP deployment in k8s. SW_CLUSTER_K8S_LABEL app=collector,release=skywalking   - - uidEnvName Environment variable name for reading uid. SW_CLUSTER_K8S_UID SKYWALKING_COLLECTOR_UID   - consul serviceName Service name for SkyWalking cluster. SW_SERVICE_NAME SkyWalking_OAP_Cluster   - - hostPort Hosts and ports for Consul cluster. SW_CLUSTER_CONSUL_HOST_PORT localhost:8500   - - aclToken ACL Token of Consul. Empty string means without ALC token. SW_CLUSTER_CONSUL_ACLTOKEN -   - - internalComHost The hostname registered in Consul for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Consul for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - etcd serviceName Service name for SkyWalking cluster. SW_CLUSTER_ETCD_SERVICE_NAME SkyWalking_OAP_Cluster   - - endpoints Hosts and ports for etcd cluster. SW_CLUSTER_ETCD_ENDPOINTS localhost:2379   - - namespace Namespace for SkyWalking cluster. SW_CLUSTER_ETCD_NAMESPACE /skywalking   - - authentication Indicates whether there is authentication. SW_CLUSTER_ETCD_AUTHENTICATION false   - - user Etcd auth username. SW_CLUSTER_ETCD_USER    - - password Etcd auth password. SW_CLUSTER_ETCD_PASSWORD    - - internalComHost The hostname registered in etcd for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in etcd for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - Nacos serviceName Service name for SkyWalking cluster. SW_SERVICE_NAME SkyWalking_OAP_Cluster   - - hostPort Hosts and ports for Nacos cluster. SW_CLUSTER_NACOS_HOST_PORT localhost:8848   - - namespace Namespace used by SkyWalking node coordination. SW_CLUSTER_NACOS_NAMESPACE public   - - internalComHost The hostname registered in Nacos for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Nacos for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - - username Nacos Auth username. SW_CLUSTER_NACOS_USERNAME -   - - password Nacos Auth password. SW_CLUSTER_NACOS_PASSWORD -   - - accessKey Nacos Auth accessKey. SW_CLUSTER_NACOS_ACCESSKEY -   - - secretKey Nacos Auth secretKey. SW_CLUSTER_NACOS_SECRETKEY -   storage elasticsearch - ElasticSearch (and OpenSearch) storage implementation. - -   - - namespace Prefix of indexes created and used by SkyWalking. SW_NAMESPACE -   - - clusterNodes ElasticSearch cluster nodes for client connection. SW_STORAGE_ES_CLUSTER_NODES localhost   - - protocol HTTP or HTTPs. SW_STORAGE_ES_HTTP_PROTOCOL HTTP   - - connectTimeout Connect timeout of ElasticSearch client (in milliseconds). SW_STORAGE_ES_CONNECT_TIMEOUT 3000   - - socketTimeout Socket timeout of ElasticSearch client (in milliseconds). SW_STORAGE_ES_SOCKET_TIMEOUT 30000   - - responseTimeout Response timeout of ElasticSearch client (in milliseconds), 0 disables the timeout. SW_STORAGE_ES_RESPONSE_TIMEOUT 1500   - - numHttpClientThread The number of threads for the underlying HTTP client to perform socket I/O. If the value is \u0026lt;= 0, the number of available processors will be used. SW_STORAGE_ES_NUM_HTTP_CLIENT_THREAD 0   - - user Username of ElasticSearch cluster. SW_ES_USER -   - - password Password of ElasticSearch cluster. SW_ES_PASSWORD -   - - trustStorePath Trust JKS file path. Only works when username and password are enabled. SW_STORAGE_ES_SSL_JKS_PATH -   - - trustStorePass Trust JKS file password. Only works when username and password are enabled. SW_STORAGE_ES_SSL_JKS_PASS -   - - secretsManagementFile Secrets management file in the properties format, including username and password, which are managed by a 3rd party tool. Capable of being updated them at runtime. SW_ES_SECRETS_MANAGEMENT_FILE -   - - dayStep Represents the number of days in the one-minute/hour/day index. SW_STORAGE_DAY_STEP 1   - - indexShardsNumber Shard number of new indexes. SW_STORAGE_ES_INDEX_SHARDS_NUMBER 1   - - indexReplicasNumber Replicas number of new indexes. SW_STORAGE_ES_INDEX_REPLICAS_NUMBER 0   - - specificIndexSettings Specify the settings for each index individually. If configured, this setting has the highest priority and overrides the generic settings. SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS -   - - superDatasetDayStep Represents the number of days in the super size dataset record index. Default value is the same as dayStep when the value is less than 0. SW_STORAGE_ES_SUPER_DATASET_DAY_STEP -1   - - superDatasetIndexShardsFactor Super dataset is defined in the code (e.g. trace segments). This factor provides more shards for the super dataset: shards number = indexShardsNumber * superDatasetIndexShardsFactor. This factor also affects Zipkin and Jaeger traces. SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR 5   - - superDatasetIndexReplicasNumber Represents the replicas number in the super size dataset record index. SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER 0   - - indexTemplateOrder The order of index template. SW_STORAGE_ES_INDEX_TEMPLATE_ORDER 0   - - bulkActions Async bulk size of the record data batch execution. SW_STORAGE_ES_BULK_ACTIONS 5000   - - flushInterval Period of flush (in seconds). Does not matter whether bulkActions is reached or not. INT(flushInterval * 2/3) is used for index refresh period. SW_STORAGE_ES_FLUSH_INTERVAL 15 (index refresh period = 10)   - - concurrentRequests The number of concurrent requests allowed to be executed. SW_STORAGE_ES_CONCURRENT_REQUESTS 2   - - resultWindowMaxSize The maximum size of dataset when the OAP loads cache, such as network aliases. SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE 10000   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_ES_QUERY_MAX_SIZE 10000   - - scrollingBatchSize The batch size of metadata per iteration when metadataQueryMaxSize or resultWindowMaxSize is too large to be retrieved in a single query. SW_STORAGE_ES_SCROLLING_BATCH_SIZE 5000   - - segmentQueryMaxSize The maximum size of trace segments per query. SW_STORAGE_ES_QUERY_SEGMENT_SIZE 200   - - profileTaskQueryMaxSize The maximum size of profile task per query. SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE 200   - - profileDataQueryScrollBatchSize The batch size of query profiling data. SW_STORAGE_ES_QUERY_PROFILE_DATA_BATCH_SIZE 100   - - advanced All settings of ElasticSearch index creation. The value should be in JSON format. SW_STORAGE_ES_ADVANCED -   - - logicSharding Shard metrics and records indices into multi-physical indices, one index template per metric/meter aggregation function or record. SW_STORAGE_ES_LOGIC_SHARDING false   - h2 - H2 storage is designed for demonstration and running in short term (i.e. 1-2 hours) only. - -   - - url H2 connection URL. Defaults to H2 memory mode. SW_STORAGE_H2_URL jdbc:h2:mem:skywalking-oap-db   - - user Username of H2 database. SW_STORAGE_H2_USER sa   - - password Password of H2 database. - -   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_H2_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 100   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 1   - mysql - MySQL Storage. The MySQL JDBC Driver is not in the dist. Please copy it into the oap-lib folder manually. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - mysql-sharding - Sharding-Proxy for MySQL properties. The MySQL JDBC Driver is not in the dist. Please copy it into the oap-lib folder manually. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - - dataSources The dataSources are configured in ShardingSphere-Proxy config-sharding.yaml.The dataSource name should include the prefix \u0026ldquo;ds_\u0026rdquo; and separated by \u0026ldquo;,\u0026rdquo; and start from ds_0 SW_JDBC_SHARDING_DATA_SOURCES ds_0,ds_1   - postgresql - PostgreSQL storage. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - banyandb - BanyanDB storage. - -   - - host Host of the BanyanDB. SW_STORAGE_BANYANDB_HOST 127.0.0.1   - - port Port of the BanyanDB. SW_STORAGE_BANYANDB_PORT 17912   - - maxBulkSize The maximum size of write entities in a single batch write call. SW_STORAGE_BANYANDB_MAX_BULK_SIZE 5000   - - flushInterval Period of flush interval. In the timeunit of seconds. SW_STORAGE_BANYANDB_FLUSH_INTERVAL 15   - - metricsShardsNumber Shards Number for measure/metrics. SW_STORAGE_BANYANDB_METRICS_SHARDS_NUMBER 1   - - recordShardsNumber Shards Number for a normal record. SW_STORAGE_BANYANDB_RECORD_SHARDS_NUMBER 1   - - superDatasetShardsFactor Shards Factor for a super dataset record, i.e. Shard number of a super dataset is recordShardsNumber*superDatasetShardsFactor. SW_STORAGE_BANYANDB_SUPERDATASET_SHARDS_FACTOR 2   - - concurrentWriteThreads Concurrent consumer threads for batch writing. SW_STORAGE_BANYANDB_CONCURRENT_WRITE_THREADS 15   - - profileTaskQueryMaxSize Max size of ProfileTask to be fetched. SW_STORAGE_BANYANDB_PROFILE_TASK_QUERY_MAX_SIZE 200   agent-analyzer default Agent Analyzer. SW_AGENT_ANALYZER default    - - traceSamplingPolicySettingsFile The sampling policy including sampling rate and the threshold of trace segment latency can be configured by the traceSamplingPolicySettingsFile file. SW_TRACE_SAMPLING_POLICY_SETTINGS_FILE trace-sampling-policy-settings.yml   - - slowDBAccessThreshold The slow database access threshold (in milliseconds). SW_SLOW_DB_THRESHOLD default:200,mongodb:100   - - forceSampleErrorSegment When sampling mechanism is activated, this config samples the error status segment and ignores the sampling rate. SW_FORCE_SAMPLE_ERROR_SEGMENT true   - - segmentStatusAnalysisStrategy Determines the final segment status from span status. Available values are FROM_SPAN_STATUS , FROM_ENTRY_SPAN, and FROM_FIRST_SPAN. FROM_SPAN_STATUS indicates that the segment status would be error if any span has an error status. FROM_ENTRY_SPAN means that the segment status would only be determined by the status of entry spans. FROM_FIRST_SPAN means that the segment status would only be determined by the status of the first span. SW_SEGMENT_STATUS_ANALYSIS_STRATEGY FROM_SPAN_STATUS   - - noUpstreamRealAddressAgents Exit spans with the component in the list would not generate client-side instance relation metrics, since some tracing plugins (e.g. Nginx-LUA and Envoy) can\u0026rsquo;t collect the real peer IP address. SW_NO_UPSTREAM_REAL_ADDRESS 6000,9000   - - meterAnalyzerActiveFiles Indicates which files could be instrumented and analyzed. Multiple files are split by \u0026ldquo;,\u0026rdquo;. SW_METER_ANALYZER_ACTIVE_FILES    - - slowCacheWriteThreshold The threshold of slow command which is used for writing operation (in milliseconds). SW_SLOW_CACHE_WRITE_THRESHOLD default:20,redis:10   - - slowCacheReadThreshold The threshold of slow command which is used for reading (getting) operation (in milliseconds). SW_SLOW_CACHE_READ_THRESHOLD default:20,redis:10   receiver-sharing-server default Sharing server provides new gRPC and restful servers for data collection. Ana designates that servers in the core module are to be used for internal communication only. - -    - - restHost Binding IP of RESTful services. Services include GraphQL query and HTTP data report. SW_RECEIVER_SHARING_REST_HOST -   - - restPort Binding port of RESTful services. SW_RECEIVER_SHARING_REST_PORT -   - - restContextPath Web context path of RESTful services. SW_RECEIVER_SHARING_REST_CONTEXT_PATH -   - - restMaxThreads Maximum thread number of RESTful services. SW_RECEIVER_SHARING_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_RECEIVER_SHARING_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize ServerSocketChannel backlog of RESTful services. SW_RECEIVER_SHARING_REST_QUEUE_SIZE 0   - - httpMaxRequestHeaderSize Maximum request header size accepted. SW_RECEIVER_SHARING_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - gRPCHost Binding IP of gRPC services. Services include gRPC data report and internal communication among OAP nodes. SW_RECEIVER_GRPC_HOST 0.0.0.0. Not Activated   - - gRPCPort Binding port of gRPC services. SW_RECEIVER_GRPC_PORT Not Activated   - - gRPCThreadPoolSize Pool size of gRPC server. SW_RECEIVER_GRPC_THREAD_POOL_SIZE CPU core * 4   - - gRPCThreadPoolQueueSize Queue size of gRPC server. SW_RECEIVER_GRPC_POOL_QUEUE_SIZE 10000   - - gRPCSslEnabled Activates SSL for gRPC services. SW_RECEIVER_GRPC_SSL_ENABLED false   - - gRPCSslKeyPath File path of gRPC SSL key. SW_RECEIVER_GRPC_SSL_KEY_PATH -   - - gRPCSslCertChainPath File path of gRPC SSL cert chain. SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH -   - - maxConcurrentCallsPerConnection The maximum number of concurrent calls permitted for each incoming connection. Defaults to no limit. SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL -   - - authentication The token text for authentication. Works for gRPC connection only. Once this is set, the client is required to use the same token. SW_AUTHENTICATION -   log-analyzer default Log Analyzer. SW_LOG_ANALYZER default    - - lalFiles The LAL configuration file names (without file extension) to be activated. Read LAL for more details. SW_LOG_LAL_FILES default   - - malFiles The MAL configuration file names (without file extension) to be activated. Read LAL for more details. SW_LOG_MAL_FILES \u0026quot;\u0026quot;   event-analyzer default Event Analyzer. SW_EVENT_ANALYZER default    receiver-register default gRPC and HTTPRestful services that provide service, service instance and endpoint register. - -    receiver-trace default gRPC and HTTPRestful services that accept SkyWalking format traces. - -    receiver-jvm default gRPC services that accept JVM metrics data. - -    receiver-clr default gRPC services that accept .Net CLR metrics data. - -    receiver-profile default gRPC services that accept profile task status and snapshot reporter. - -    receiver-zabbix default TCP receiver accepts Zabbix format metrics. - -    - - port Exported TCP port. Zabbix agent could connect and transport data. SW_RECEIVER_ZABBIX_PORT 10051   - - host Binds to host. SW_RECEIVER_ZABBIX_HOST 0.0.0.0   - - activeFiles Enables config when agent request is received. SW_RECEIVER_ZABBIX_ACTIVE_FILES agent   service-mesh default gRPC services that accept data from inbound mesh probes. - -    envoy-metric default Envoy metrics_service and ALS(access log service) are supported by this receiver. The OAL script supports all GAUGE type metrics. - -    - - acceptMetricsService Starts Envoy Metrics Service analysis. SW_ENVOY_METRIC_SERVICE true   - - alsHTTPAnalysis Starts Envoy HTTP Access Log Service analysis. Value = k8s-mesh means starting the analysis. SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS -   - - alsTCPAnalysis Starts Envoy TCP Access Log Service analysis. Value = k8s-mesh means starting the analysis. SW_ENVOY_METRIC_ALS_TCP_ANALYSIS -   - - k8sServiceNameRule k8sServiceNameRule allows you to customize the service name in ALS via Kubernetes metadata. The available variables are pod and service. E.g. you can use ${service.metadata.name}-${pod.metadata.labels.version} to append the version number to the service name. Note that when using environment variables to pass this configuration, use single quotes('') to avoid being evaluated by the shell. -    receiver-otel default A receiver for analyzing metrics data from OpenTelemetry. - -    - - enabledHandlers Enabled handlers for otel. SW_OTEL_RECEIVER_ENABLED_HANDLERS -   - - enabledOtelRules Enabled metric rules for OC handler. SW_OTEL_RECEIVER_ENABLED_OTEL_RULES -   receiver-zipkin default A receiver for Zipkin traces. - -    - - sampleRate The sample rate precision is 1/10000, should be between 0 and 10000 SW_ZIPKIN_SAMPLE_RATE 10000   - - searchableTracesTags Defines a set of span tag keys which are searchable. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_ZIPKIN_SEARCHABLE_TAG_KEYS http.method   - - enableHttpCollector Enable Http Collector. SW_ZIPKIN_HTTP_COLLECTOR_ENABLED true   - - restHost Binding IP of RESTful services. SW_RECEIVER_ZIPKIN_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_RECEIVER_ZIPKIN_REST_PORT 9411   - - restContextPath Web context path of RESTful services. SW_RECEIVER_ZIPKIN_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_RECEIVER_ZIPKIN_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_RECEIVER_ZIPKIN_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_RECEIVER_ZIPKIN_REST_QUEUE_SIZE 0   - - enableKafkaCollector Enable Kafka Collector. SW_ZIPKIN_KAFKA_COLLECTOR_ENABLED false   - - kafkaBootstrapServers Kafka ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG. SW_ZIPKIN_KAFKA_SERVERS localhost:9092   - - kafkaGroupId Kafka ConsumerConfig.GROUP_ID_CONFIG. SW_ZIPKIN_KAFKA_GROUP_ID zipkin   - - kafkaTopic Kafka Topics. SW_ZIPKIN_KAFKA_TOPIC zipkin   - - kafkaConsumerConfig Kafka consumer config, JSON format as Properties. If it contains the same key with above, would override. SW_ZIPKIN_KAFKA_CONSUMER_CONFIG \u0026ldquo;{\u0026quot;auto.offset.reset\u0026quot;:\u0026quot;earliest\u0026quot;,\u0026quot;enable.auto.commit\u0026quot;:true}\u0026rdquo;   - - kafkaConsumers The number of consumers to create. SW_ZIPKIN_KAFKA_CONSUMERS 1   - - kafkaHandlerThreadPoolSize Pool size of Kafka message handler executor. SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_SIZE CPU core * 2   - - kafkaHandlerThreadPoolQueueSize Queue size of Kafka message handler executor. SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE 10000   kafka-fetcher default Read SkyWalking\u0026rsquo;s native metrics/logs/traces through Kafka server. - -    - - bootstrapServers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_KAFKA_FETCHER_SERVERS localhost:9092   - - namespace Namespace aims to isolate multi OAP cluster when using the same Kafka cluster. If you set a namespace for Kafka fetcher, OAP will add a prefix to topic name. You should also set namespace in agent.config. The property is named plugin.kafka.namespace. SW_NAMESPACE -   - - groupId A unique string that identifies the consumer group to which this consumer belongs. - skywalking-consumer   - - createTopicIfNotExist If true, this creates Kafka topic (if it does not already exist). - true   - - partitions The number of partitions for the topic being created. SW_KAFKA_FETCHER_PARTITIONS 3   - - consumers The number of consumers to create. SW_KAFKA_FETCHER_CONSUMERS 1   - - enableNativeProtoLog Enables fetching and handling native proto log data. SW_KAFKA_FETCHER_ENABLE_NATIVE_PROTO_LOG true   - - enableNativeJsonLog Enables fetching and handling native json log data. SW_KAFKA_FETCHER_ENABLE_NATIVE_JSON_LOG true   - - replicationFactor The replication factor for each partition in the topic being created. SW_KAFKA_FETCHER_PARTITIONS_FACTOR 2   - - kafkaHandlerThreadPoolSize Pool size of Kafka message handler executor. SW_KAFKA_HANDLER_THREAD_POOL_SIZE CPU core * 2   - - kafkaHandlerThreadPoolQueueSize Queue size of Kafka message handler executor. SW_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE 10000   - - topicNameOfMeters Kafka topic name for meter system data. - skywalking-meters   - - topicNameOfMetrics Kafka topic name for JVM metrics data. - skywalking-metrics   - - topicNameOfProfiling Kafka topic name for profiling data. - skywalking-profilings   - - topicNameOfTracingSegments Kafka topic name for tracing data. - skywalking-segments   - - topicNameOfManagements Kafka topic name for service instance reporting and registration. - skywalking-managements   - - topicNameOfLogs Kafka topic name for native proto log data. - skywalking-logs   - - topicNameOfJsonLogs Kafka topic name for native json log data. - skywalking-logs-json   receiver-browser default gRPC services that accept browser performance data and error log. - - -   - - sampleRate Sampling rate for receiving trace. Precise to 1/10000. 10000 means sampling rate of 100% by default. SW_RECEIVER_BROWSER_SAMPLE_RATE 10000   query graphql - GraphQL query implementation. -    - - enableLogTestTool Enable the log testing API to test the LAL. NOTE: This API evaluates untrusted code on the OAP server. A malicious script can do significant damage (steal keys and secrets, remove files and directories, install malware, etc). As such, please enable this API only when you completely trust your users. SW_QUERY_GRAPHQL_ENABLE_LOG_TEST_TOOL false   - - maxQueryComplexity Maximum complexity allowed for the GraphQL query that can be used to abort a query if the total number of data fields queried exceeds the defined threshold. SW_QUERY_MAX_QUERY_COMPLEXITY 1000   - - enableUpdateUITemplate Allow user add,disable and update UI template. SW_ENABLE_UPDATE_UI_TEMPLATE false   - - enableOnDemandPodLog Ondemand Pod log: fetch the Pod logs on users' demand, the logs are fetched and displayed in real time, and are not persisted in any kind. This is helpful when users want to do some experiments and monitor the logs and see what\u0026rsquo;s happing inside the service. Note: if you print secrets in the logs, they are also visible to the UI, so for the sake of security, this feature is disabled by default, please set this configuration to enable the feature manually. SW_ENABLE_ON_DEMAND_POD_LOG false   query graphql - GraphQL query implementation. -    - - restHost Binding IP of RESTful services. SW_QUERY_ZIPKIN_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_QUERY_ZIPKIN_REST_PORT 9412   - - restContextPath Web context path of RESTful services. SW_QUERY_ZIPKIN_REST_CONTEXT_PATH zipkin   - - restMaxThreads Maximum thread number of RESTful services. SW_QUERY_ZIPKIN_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_QUERY_ZIPKIN_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_QUERY_ZIPKIN_REST_QUEUE_SIZE 0   - - lookback Default look back for traces and autocompleteTags, 1 day in millis SW_QUERY_ZIPKIN_LOOKBACK 86400000   - - namesMaxAge The Cache-Control max-age (seconds) for serviceNames, remoteServiceNames and spanNames SW_QUERY_ZIPKIN_NAMES_MAX_AGE 300   - - uiQueryLimit Default traces query max size SW_QUERY_ZIPKIN_UI_QUERY_LIMIT 10   - - uiDefaultLookback Default look back on the UI for search traces, 15 minutes in millis SW_QUERY_ZIPKIN_UI_DEFAULT_LOOKBACK 900000   alarm default - Read alarm doc for more details. -    telemetry - - Read telemetry doc for more details. -    - none - No op implementation. -    - prometheus host Binding host for Prometheus server fetching data. SW_TELEMETRY_PROMETHEUS_HOST 0.0.0.0   - - port Binding port for Prometheus server fetching data. SW_TELEMETRY_PROMETHEUS_PORT 1234   configuration - - Read dynamic configuration doc for more details. -    - grpc host DCS server binding hostname. SW_DCS_SERVER_HOST -   - - port DCS server binding port. SW_DCS_SERVER_PORT 80   - - clusterName Cluster name when reading the latest configuration from DSC server. SW_DCS_CLUSTER_NAME SkyWalking   - - period The period of reading data from DSC server by the OAP (in seconds). SW_DCS_PERIOD 20   - apollo apolloMeta apollo.meta in Apollo. SW_CONFIG_APOLLO http://localhost:8080   - - apolloCluster apollo.cluster in Apollo. SW_CONFIG_APOLLO_CLUSTER default   - - apolloEnv env in Apollo. SW_CONFIG_APOLLO_ENV -   - - appId app.id in Apollo. SW_CONFIG_APOLLO_APP_ID skywalking   - - period The period of data sync (in seconds). SW_CONFIG_APOLLO_PERIOD 60   - zookeeper namespace The namespace (represented by root path) that isolates the configurations in the Zookeeper. SW_CONFIG_ZK_NAMESPACE /, root path   - - hostPort Hosts and ports of Zookeeper Cluster. SW_CONFIG_ZK_HOST_PORT localhost:2181   - - baseSleepTimeMs The period of Zookeeper client between two retries (in milliseconds). SW_CONFIG_ZK_BASE_SLEEP_TIME_MS 1000   - - maxRetries The maximum retry time. SW_CONFIG_ZK_MAX_RETRIES 3   - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - etcd endpoints Hosts and ports for etcd cluster (separated by commas if multiple). SW_CONFIG_ETCD_ENDPOINTS http://localhost:2379   - - namespace Namespace for SkyWalking cluster. SW_CONFIG_ETCD_NAMESPACE /skywalking   - - authentication Indicates whether there is authentication. SW_CONFIG_ETCD_AUTHENTICATION false   - - user Etcd auth username. SW_CONFIG_ETCD_USER    - - password Etcd auth password. SW_CONFIG_ETCD_PASSWORD    - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - consul hostPort Hosts and ports for Consul cluster. SW_CONFIG_CONSUL_HOST_AND_PORTS localhost:8500   - - aclToken ACL Token of Consul. Empty string means without ACL token. SW_CONFIG_CONSUL_ACL_TOKEN -   - - period The period of data sync (in seconds). SW_CONFIG_CONSUL_PERIOD 60   - k8s-configmap namespace Deployment namespace of the config map. SW_CLUSTER_K8S_NAMESPACE default   - - labelSelector Labels for locating configmap. SW_CLUSTER_K8S_LABEL app=collector,release=skywalking   - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - nacos serverAddr Nacos Server Host. SW_CONFIG_NACOS_SERVER_ADDR 127.0.0.1   - - port Nacos Server Port. SW_CONFIG_NACOS_SERVER_PORT 8848   - - group Nacos Configuration namespace. SW_CONFIG_NACOS_SERVER_NAMESPACE -   - - period The period of data sync (in seconds). SW_CONFIG_CONFIG_NACOS_PERIOD 60   - - username Nacos Auth username. SW_CONFIG_NACOS_USERNAME -   - - password Nacos Auth password. SW_CONFIG_NACOS_PASSWORD -   - - accessKey Nacos Auth accessKey. SW_CONFIG_NACOS_ACCESSKEY -   - - secretKey Nacos Auth secretKey. SW_CONFIG_NACOS_SECRETKEY -   exporter default enableGRPCMetrics Enable gRPC metrics exporter. SW_EXPORTER_ENABLE_GRPC_METRICS false   - - gRPCTargetHost The host of target gRPC server for receiving export data SW_EXPORTER_GRPC_HOST 127.0.0.1   - - gRPCTargetPort The port of target gRPC server for receiving export data. SW_EXPORTER_GRPC_PORT 9870   - - enableKafkaTrace Enable Kafka trace exporter. SW_EXPORTER_ENABLE_KAFKA_TRACE false   - - enableKafkaLog Enable Kafka log exporter. SW_EXPORTER_ENABLE_KAFKA_LOG false   - - kafkaBootstrapServers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_EXPORTER_KAFKA_SERVERS localhost:9092   - - kafkaProducerConfig Kafka producer config, JSON format as Properties. SW_EXPORTER_KAFKA_PRODUCER_CONFIG -   - - kafkaTopicTrace Kafka topic name for trace. SW_EXPORTER_KAFKA_TOPIC_TRACE skywalking-export-trace   - - kafkaTopicLog Kafka topic name for log. SW_EXPORTER_KAFKA_TOPIC_LOG skywalking-export-log   health-checker default checkIntervalSeconds The period of checking OAP internal health status (in seconds). SW_HEALTH_CHECKER_INTERVAL_SECONDS 5   configuration-discovery default disableMessageDigest If true, agent receives the latest configuration every time, even without making any changes. By default, OAP uses the SHA512 message digest mechanism to detect changes in configuration. SW_DISABLE_MESSAGE_DIGEST false   receiver-event default gRPC services that handle events data. - -     Note ¹ System Environment Variable name could be declared and changed in application.yml. The names listed here are simply provided in the default application.yml file.\n","title":"Configuration Vocabulary","url":"/docs/main/v9.3.0/en/setup/backend/configuration-vocabulary/"},{"content":"Configuration Vocabulary The Configuration Vocabulary lists all available configurations provided by application.yml.\n   Module Provider Settings Value(s) and Explanation System Environment Variable¹ Default     core default role Option values: Mixed/Receiver/Aggregator. Receiver mode OAP opens the service to the agents, then analyzes and aggregates the results, and forwards the results for distributed aggregation. Aggregator mode OAP receives data from Mixer and Receiver role OAP nodes, and performs 2nd level aggregation. Mixer means both Receiver and Aggregator. SW_CORE_ROLE Mixed   - - restHost Binding IP of RESTful services. Services include GraphQL query and HTTP data report. SW_CORE_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_CORE_REST_PORT 12800   - - restContextPath Web context path of RESTful services. SW_CORE_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_CORE_REST_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_CORE_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize ServerSocketChannel Backlog of RESTful services. SW_CORE_REST_QUEUE_SIZE 0   - - httpMaxRequestHeaderSize Maximum request header size accepted. SW_CORE_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - gRPCHost Binding IP of gRPC services, including gRPC data report and internal communication among OAP nodes. SW_CORE_GRPC_HOST 0.0.0.0   - - gRPCPort Binding port of gRPC services. SW_CORE_GRPC_PORT 11800   - - gRPCSslEnabled Activates SSL for gRPC services. SW_CORE_GRPC_SSL_ENABLED false   - - gRPCSslKeyPath File path of gRPC SSL key. SW_CORE_GRPC_SSL_KEY_PATH -   - - gRPCSslCertChainPath File path of gRPC SSL cert chain. SW_CORE_GRPC_SSL_CERT_CHAIN_PATH -   - - gRPCSslTrustedCAPath File path of gRPC trusted CA. SW_CORE_GRPC_SSL_TRUSTED_CA_PATH -   - - downsampling Activated level of down sampling aggregation.  Hour,Day   - - persistentPeriod Execution period of the persistent timer (in seconds).  25   - - enableDataKeeperExecutor Controller of TTL scheduler. Once disabled, TTL wouldn\u0026rsquo;t work. SW_CORE_ENABLE_DATA_KEEPER_EXECUTOR true   - - dataKeeperExecutePeriod Execution period of TTL scheduler (in minutes). Execution doesn\u0026rsquo;t mean deleting data. The storage provider (e.g. ElasticSearch storage) could override this. SW_CORE_DATA_KEEPER_EXECUTE_PERIOD 5   - - recordDataTTL The lifecycle of record data (in days). Record data includes traces, top N sample records, and logs. Minimum value is 2. SW_CORE_RECORD_DATA_TTL 3   - - metricsDataTTL The lifecycle of metrics data (in days), including metadata. We recommend setting metricsDataTTL \u0026gt;= recordDataTTL. Minimum value is 2. SW_CORE_METRICS_DATA_TTL 7   - - l1FlushPeriod The period of L1 aggregation flush to L2 aggregation (in milliseconds). SW_CORE_L1_AGGREGATION_FLUSH_PERIOD 500   - - storageSessionTimeout The threshold of session time (in milliseconds). Default value is 70000. SW_CORE_STORAGE_SESSION_TIMEOUT 70000   - - persistentPeriod The period of doing data persistence. Unit is second.Default value is 25s SW_CORE_PERSISTENT_PERIOD 25   - - topNReportPeriod The execution period (in minutes) of top N sampler, which saves sampled data into the storage. SW_CORE_TOPN_REPORT_PERIOD 10   - - activeExtraModelColumns Appends entity names (e.g. service names) into metrics storage entities. SW_CORE_ACTIVE_EXTRA_MODEL_COLUMNS false   - - serviceNameMaxLength Maximum length limit of service names. SW_SERVICE_NAME_MAX_LENGTH 70   - - instanceNameMaxLength Maximum length limit of service instance names. The maximum length of service + instance names should be less than 200. SW_INSTANCE_NAME_MAX_LENGTH 70   - - endpointNameMaxLength Maximum length limit of endpoint names. The maximum length of service + endpoint names should be less than 240. SW_ENDPOINT_NAME_MAX_LENGTH 150   - - searchableTracesTags Defines a set of span tag keys which are searchable through GraphQL. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_SEARCHABLE_TAG_KEYS http.method,http.status_code,rpc.status_code,db.type,db.instance,mq.queue,mq.topic,mq.broker   - - searchableLogsTags Defines a set of log tag keys which are searchable through GraphQL. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_SEARCHABLE_LOGS_TAG_KEYS level   - - searchableAlarmTags Defines a set of alarm tag keys which are searchable through GraphQL. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_SEARCHABLE_ALARM_TAG_KEYS level   - - autocompleteTagKeysQueryMaxSize The max size of tags keys for autocomplete select. SW_AUTOCOMPLETE_TAG_KEYS_QUERY_MAX_SIZE 100   - - autocompleteTagValuesQueryMaxSize The max size of tags values for autocomplete select. SW_AUTOCOMPLETE_TAG_VALUES_QUERY_MAX_SIZE 100   - - gRPCThreadPoolSize Pool size of gRPC server. SW_CORE_GRPC_THREAD_POOL_SIZE CPU core * 4   - - gRPCThreadPoolQueueSize Queue size of gRPC server. SW_CORE_GRPC_POOL_QUEUE_SIZE 10000   - - maxConcurrentCallsPerConnection The maximum number of concurrent calls permitted for each incoming connection. Defaults to no limit. SW_CORE_GRPC_MAX_CONCURRENT_CALL -   - - maxMessageSize Sets the maximum message size allowed to be received on the server. Empty means 4 MiB. SW_CORE_GRPC_MAX_MESSAGE_SIZE 4M(based on Netty)   - - remoteTimeout Timeout for cluster internal communication (in seconds). - 20   - - maxSizeOfNetworkAddressAlias The maximum size of network address detected in the system being monitored. - 1_000_000   - - maxPageSizeOfQueryProfileSnapshot The maximum size for snapshot analysis in an OAP query. - 500   - - maxSizeOfAnalyzeProfileSnapshot The maximum number of snapshots analyzed by the OAP. - 12000   - - prepareThreads The number of threads used to prepare metrics data to the storage. SW_CORE_PREPARE_THREADS 2   - - enableEndpointNameGroupingByOpenapi Automatically groups endpoints by the given OpenAPI definitions. SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI true   - - maxDurationOfQueryEBPFProfilingData The maximum duration(in second) of query the eBPF profiling data from database. - 30   - - maxThreadCountOfQueryEBPFProfilingData The maximum thread count of query the eBPF profiling data from database. - System CPU core size   cluster standalone - Standalone is not suitable for running on a single node running. No configuration available. - -   - zookeeper namespace The namespace, represented by root path, isolates the configurations in Zookeeper. SW_NAMESPACE /, root path   - - hostPort Hosts and ports of Zookeeper Cluster. SW_CLUSTER_ZK_HOST_PORT localhost:2181   - - baseSleepTimeMs The period of Zookeeper client between two retries (in milliseconds). SW_CLUSTER_ZK_SLEEP_TIME 1000   - - maxRetries The maximum retry time. SW_CLUSTER_ZK_MAX_RETRIES 3   - - enableACL Opens ACL using schema and expression. SW_ZK_ENABLE_ACL false   - - schema Schema for the authorization. SW_ZK_SCHEMA digest   - - expression Expression for the authorization. SW_ZK_EXPRESSION skywalking:skywalking   - - internalComHost The hostname registered in Zookeeper for the internal communication of OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Zookeeper for the internal communication of OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - kubernetes namespace Namespace deployed by SkyWalking in k8s. SW_CLUSTER_K8S_NAMESPACE default   - - labelSelector Labels used for filtering OAP deployment in k8s. SW_CLUSTER_K8S_LABEL app=collector,release=skywalking   - - uidEnvName Environment variable name for reading uid. SW_CLUSTER_K8S_UID SKYWALKING_COLLECTOR_UID   - consul serviceName Service name for SkyWalking cluster. SW_SERVICE_NAME SkyWalking_OAP_Cluster   - - hostPort Hosts and ports for Consul cluster. SW_CLUSTER_CONSUL_HOST_PORT localhost:8500   - - aclToken ACL Token of Consul. Empty string means without ALC token. SW_CLUSTER_CONSUL_ACLTOKEN -   - - internalComHost The hostname registered in Consul for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Consul for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - etcd serviceName Service name for SkyWalking cluster. SW_CLUSTER_ETCD_SERVICE_NAME SkyWalking_OAP_Cluster   - - endpoints Hosts and ports for etcd cluster. SW_CLUSTER_ETCD_ENDPOINTS localhost:2379   - - namespace Namespace for SkyWalking cluster. SW_CLUSTER_ETCD_NAMESPACE /skywalking   - - authentication Indicates whether there is authentication. SW_CLUSTER_ETCD_AUTHENTICATION false   - - user Etcd auth username. SW_CLUSTER_ETCD_USER    - - password Etcd auth password. SW_CLUSTER_ETCD_PASSWORD    - - internalComHost The hostname registered in etcd for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in etcd for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - Nacos serviceName Service name for SkyWalking cluster. SW_SERVICE_NAME SkyWalking_OAP_Cluster   - - hostPort Hosts and ports for Nacos cluster. SW_CLUSTER_NACOS_HOST_PORT localhost:8848   - - namespace Namespace used by SkyWalking node coordination. SW_CLUSTER_NACOS_NAMESPACE public   - - internalComHost The hostname registered in Nacos for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Nacos for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - - username Nacos Auth username. SW_CLUSTER_NACOS_USERNAME -   - - password Nacos Auth password. SW_CLUSTER_NACOS_PASSWORD -   - - accessKey Nacos Auth accessKey. SW_CLUSTER_NACOS_ACCESSKEY -   - - secretKey Nacos Auth secretKey. SW_CLUSTER_NACOS_SECRETKEY -   storage elasticsearch - ElasticSearch (and OpenSearch) storage implementation. - -   - - namespace Prefix of indexes created and used by SkyWalking. SW_NAMESPACE -   - - clusterNodes ElasticSearch cluster nodes for client connection. SW_STORAGE_ES_CLUSTER_NODES localhost   - - protocol HTTP or HTTPs. SW_STORAGE_ES_HTTP_PROTOCOL HTTP   - - connectTimeout Connect timeout of ElasticSearch client (in milliseconds). SW_STORAGE_ES_CONNECT_TIMEOUT 3000   - - socketTimeout Socket timeout of ElasticSearch client (in milliseconds). SW_STORAGE_ES_SOCKET_TIMEOUT 30000   - - responseTimeout Response timeout of ElasticSearch client (in milliseconds), 0 disables the timeout. SW_STORAGE_ES_RESPONSE_TIMEOUT 1500   - - numHttpClientThread The number of threads for the underlying HTTP client to perform socket I/O. If the value is \u0026lt;= 0, the number of available processors will be used. SW_STORAGE_ES_NUM_HTTP_CLIENT_THREAD 0   - - user Username of ElasticSearch cluster. SW_ES_USER -   - - password Password of ElasticSearch cluster. SW_ES_PASSWORD -   - - trustStorePath Trust JKS file path. Only works when username and password are enabled. SW_STORAGE_ES_SSL_JKS_PATH -   - - trustStorePass Trust JKS file password. Only works when username and password are enabled. SW_STORAGE_ES_SSL_JKS_PASS -   - - secretsManagementFile Secrets management file in the properties format, including username and password, which are managed by a 3rd party tool. Capable of being updated them at runtime. SW_ES_SECRETS_MANAGEMENT_FILE -   - - dayStep Represents the number of days in the one-minute/hour/day index. SW_STORAGE_DAY_STEP 1   - - indexShardsNumber Shard number of new indexes. SW_STORAGE_ES_INDEX_SHARDS_NUMBER 1   - - indexReplicasNumber Replicas number of new indexes. SW_STORAGE_ES_INDEX_REPLICAS_NUMBER 0   - - specificIndexSettings Specify the settings for each index individually. If configured, this setting has the highest priority and overrides the generic settings. SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS -   - - superDatasetDayStep Represents the number of days in the super size dataset record index. Default value is the same as dayStep when the value is less than 0. SW_STORAGE_ES_SUPER_DATASET_DAY_STEP -1   - - superDatasetIndexShardsFactor Super dataset is defined in the code (e.g. trace segments). This factor provides more shards for the super dataset: shards number = indexShardsNumber * superDatasetIndexShardsFactor. This factor also affects Zipkin and Jaeger traces. SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR 5   - - superDatasetIndexReplicasNumber Represents the replicas number in the super size dataset record index. SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER 0   - - indexTemplateOrder The order of index template. SW_STORAGE_ES_INDEX_TEMPLATE_ORDER 0   - - bulkActions Async bulk size of the record data batch execution. SW_STORAGE_ES_BULK_ACTIONS 5000   - - batchOfBytes A threshold to control the max body size of ElasticSearch Bulk flush. SW_STORAGE_ES_BATCH_OF_BYTES 10485760 (10m)   - - flushInterval Period of flush (in seconds). Does not matter whether bulkActions is reached or not. SW_STORAGE_ES_FLUSH_INTERVAL 5   - - concurrentRequests The number of concurrent requests allowed to be executed. SW_STORAGE_ES_CONCURRENT_REQUESTS 2   - - resultWindowMaxSize The maximum size of dataset when the OAP loads cache, such as network aliases. SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE 10000   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_ES_QUERY_MAX_SIZE 10000   - - scrollingBatchSize The batch size of metadata per iteration when metadataQueryMaxSize or resultWindowMaxSize is too large to be retrieved in a single query. SW_STORAGE_ES_SCROLLING_BATCH_SIZE 5000   - - segmentQueryMaxSize The maximum size of trace segments per query. SW_STORAGE_ES_QUERY_SEGMENT_SIZE 200   - - profileTaskQueryMaxSize The maximum size of profile task per query. SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE 200   - - profileDataQueryScrollBatchSize The batch size of query profiling data. SW_STORAGE_ES_QUERY_PROFILE_DATA_BATCH_SIZE 100   - - advanced All settings of ElasticSearch index creation. The value should be in JSON format. SW_STORAGE_ES_ADVANCED -   - - logicSharding Shard metrics and records indices into multi-physical indices, one index template per metric/meter aggregation function or record. SW_STORAGE_ES_LOGIC_SHARDING false   - h2 - H2 storage is designed for demonstration and running in short term (i.e. 1-2 hours) only. - -   - - url H2 connection URL. Defaults to H2 memory mode. SW_STORAGE_H2_URL jdbc:h2:mem:skywalking-oap-db   - - user Username of H2 database. SW_STORAGE_H2_USER sa   - - password Password of H2 database. - -   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_H2_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 100   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 1   - mysql - MySQL Storage. The MySQL JDBC Driver is not in the dist. Please copy it into the oap-lib folder manually. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - mysql-sharding - Sharding-Proxy for MySQL properties. The MySQL JDBC Driver is not in the dist. Please copy it into the oap-lib folder manually. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - - dataSources The dataSources are configured in ShardingSphere-Proxy config-sharding.yaml.The dataSource name should include the prefix \u0026ldquo;ds_\u0026rdquo; and separated by \u0026ldquo;,\u0026rdquo; and start from ds_0 SW_JDBC_SHARDING_DATA_SOURCES ds_0,ds_1   - postgresql - PostgreSQL storage. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - banyandb - BanyanDB storage. - -   - - host Host of the BanyanDB. SW_STORAGE_BANYANDB_HOST 127.0.0.1   - - port Port of the BanyanDB. SW_STORAGE_BANYANDB_PORT 17912   - - maxBulkSize The maximum size of write entities in a single batch write call. SW_STORAGE_BANYANDB_MAX_BULK_SIZE 5000   - - flushInterval Period of flush interval. In the timeunit of seconds. SW_STORAGE_BANYANDB_FLUSH_INTERVAL 15   - - metricsShardsNumber Shards Number for measure/metrics. SW_STORAGE_BANYANDB_METRICS_SHARDS_NUMBER 1   - - recordShardsNumber Shards Number for a normal record. SW_STORAGE_BANYANDB_RECORD_SHARDS_NUMBER 1   - - superDatasetShardsFactor Shards Factor for a super dataset record, i.e. Shard number of a super dataset is recordShardsNumber*superDatasetShardsFactor. SW_STORAGE_BANYANDB_SUPERDATASET_SHARDS_FACTOR 2   - - concurrentWriteThreads Concurrent consumer threads for batch writing. SW_STORAGE_BANYANDB_CONCURRENT_WRITE_THREADS 15   - - profileTaskQueryMaxSize Max size of ProfileTask to be fetched. SW_STORAGE_BANYANDB_PROFILE_TASK_QUERY_MAX_SIZE 200   agent-analyzer default Agent Analyzer. SW_AGENT_ANALYZER default    - - traceSamplingPolicySettingsFile The sampling policy including sampling rate and the threshold of trace segment latency can be configured by the traceSamplingPolicySettingsFile file. SW_TRACE_SAMPLING_POLICY_SETTINGS_FILE trace-sampling-policy-settings.yml   - - slowDBAccessThreshold The slow database access threshold (in milliseconds). SW_SLOW_DB_THRESHOLD default:200,mongodb:100   - - forceSampleErrorSegment When sampling mechanism is activated, this config samples the error status segment and ignores the sampling rate. SW_FORCE_SAMPLE_ERROR_SEGMENT true   - - segmentStatusAnalysisStrategy Determines the final segment status from span status. Available values are FROM_SPAN_STATUS , FROM_ENTRY_SPAN, and FROM_FIRST_SPAN. FROM_SPAN_STATUS indicates that the segment status would be error if any span has an error status. FROM_ENTRY_SPAN means that the segment status would only be determined by the status of entry spans. FROM_FIRST_SPAN means that the segment status would only be determined by the status of the first span. SW_SEGMENT_STATUS_ANALYSIS_STRATEGY FROM_SPAN_STATUS   - - noUpstreamRealAddressAgents Exit spans with the component in the list would not generate client-side instance relation metrics, since some tracing plugins (e.g. Nginx-LUA and Envoy) can\u0026rsquo;t collect the real peer IP address. SW_NO_UPSTREAM_REAL_ADDRESS 6000,9000   - - meterAnalyzerActiveFiles Indicates which files could be instrumented and analyzed. Multiple files are split by \u0026ldquo;,\u0026rdquo;. SW_METER_ANALYZER_ACTIVE_FILES    - - slowCacheWriteThreshold The threshold of slow command which is used for writing operation (in milliseconds). SW_SLOW_CACHE_WRITE_THRESHOLD default:20,redis:10   - - slowCacheReadThreshold The threshold of slow command which is used for reading (getting) operation (in milliseconds). SW_SLOW_CACHE_READ_THRESHOLD default:20,redis:10   receiver-sharing-server default Sharing server provides new gRPC and restful servers for data collection. Ana designates that servers in the core module are to be used for internal communication only. - -    - - restHost Binding IP of RESTful services. Services include GraphQL query and HTTP data report. SW_RECEIVER_SHARING_REST_HOST -   - - restPort Binding port of RESTful services. SW_RECEIVER_SHARING_REST_PORT -   - - restContextPath Web context path of RESTful services. SW_RECEIVER_SHARING_REST_CONTEXT_PATH -   - - restMaxThreads Maximum thread number of RESTful services. SW_RECEIVER_SHARING_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_RECEIVER_SHARING_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize ServerSocketChannel backlog of RESTful services. SW_RECEIVER_SHARING_REST_QUEUE_SIZE 0   - - httpMaxRequestHeaderSize Maximum request header size accepted. SW_RECEIVER_SHARING_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - gRPCHost Binding IP of gRPC services. Services include gRPC data report and internal communication among OAP nodes. SW_RECEIVER_GRPC_HOST 0.0.0.0. Not Activated   - - gRPCPort Binding port of gRPC services. SW_RECEIVER_GRPC_PORT Not Activated   - - gRPCThreadPoolSize Pool size of gRPC server. SW_RECEIVER_GRPC_THREAD_POOL_SIZE CPU core * 4   - - gRPCThreadPoolQueueSize Queue size of gRPC server. SW_RECEIVER_GRPC_POOL_QUEUE_SIZE 10000   - - gRPCSslEnabled Activates SSL for gRPC services. SW_RECEIVER_GRPC_SSL_ENABLED false   - - gRPCSslKeyPath File path of gRPC SSL key. SW_RECEIVER_GRPC_SSL_KEY_PATH -   - - gRPCSslCertChainPath File path of gRPC SSL cert chain. SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH -   - - maxConcurrentCallsPerConnection The maximum number of concurrent calls permitted for each incoming connection. Defaults to no limit. SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL -   - - authentication The token text for authentication. Works for gRPC connection only. Once this is set, the client is required to use the same token. SW_AUTHENTICATION -   log-analyzer default Log Analyzer. SW_LOG_ANALYZER default    - - lalFiles The LAL configuration file names (without file extension) to be activated. Read LAL for more details. SW_LOG_LAL_FILES default   - - malFiles The MAL configuration file names (without file extension) to be activated. Read LAL for more details. SW_LOG_MAL_FILES \u0026quot;\u0026quot;   event-analyzer default Event Analyzer. SW_EVENT_ANALYZER default    receiver-register default gRPC and HTTPRestful services that provide service, service instance and endpoint register. - -    receiver-trace default gRPC and HTTPRestful services that accept SkyWalking format traces. - -    receiver-jvm default gRPC services that accept JVM metrics data. - -    receiver-clr default gRPC services that accept .Net CLR metrics data. - -    receiver-profile default gRPC services that accept profile task status and snapshot reporter. - -    receiver-zabbix default TCP receiver accepts Zabbix format metrics. - -    - - port Exported TCP port. Zabbix agent could connect and transport data. SW_RECEIVER_ZABBIX_PORT 10051   - - host Binds to host. SW_RECEIVER_ZABBIX_HOST 0.0.0.0   - - activeFiles Enables config when agent request is received. SW_RECEIVER_ZABBIX_ACTIVE_FILES agent   service-mesh default gRPC services that accept data from inbound mesh probes. - -    envoy-metric default Envoy metrics_service and ALS(access log service) are supported by this receiver. The OAL script supports all GAUGE type metrics. - -    - - acceptMetricsService Starts Envoy Metrics Service analysis. SW_ENVOY_METRIC_SERVICE true   - - alsHTTPAnalysis Starts Envoy HTTP Access Log Service analysis. Value = k8s-mesh means starting the analysis. SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS -   - - alsTCPAnalysis Starts Envoy TCP Access Log Service analysis. Value = k8s-mesh means starting the analysis. SW_ENVOY_METRIC_ALS_TCP_ANALYSIS -   - - k8sServiceNameRule k8sServiceNameRule allows you to customize the service name in ALS via Kubernetes metadata. The available variables are pod and service. E.g. you can use ${service.metadata.name}-${pod.metadata.labels.version} to append the version number to the service name. Note that when using environment variables to pass this configuration, use single quotes('') to avoid being evaluated by the shell. -    receiver-otel default A receiver for analyzing metrics data from OpenTelemetry. - -    - - enabledHandlers Enabled handlers for otel. SW_OTEL_RECEIVER_ENABLED_HANDLERS -   - - enabledOtelRules Enabled metric rules for OC handler. SW_OTEL_RECEIVER_ENABLED_OTEL_RULES -   receiver-zipkin default A receiver for Zipkin traces. - -    - - sampleRate The sample rate precision is 1/10000, should be between 0 and 10000 SW_ZIPKIN_SAMPLE_RATE 10000   - - searchableTracesTags Defines a set of span tag keys which are searchable. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_ZIPKIN_SEARCHABLE_TAG_KEYS http.method   - - enableHttpCollector Enable Http Collector. SW_ZIPKIN_HTTP_COLLECTOR_ENABLED true   - - restHost Binding IP of RESTful services. SW_RECEIVER_ZIPKIN_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_RECEIVER_ZIPKIN_REST_PORT 9411   - - restContextPath Web context path of RESTful services. SW_RECEIVER_ZIPKIN_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_RECEIVER_ZIPKIN_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_RECEIVER_ZIPKIN_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_RECEIVER_ZIPKIN_REST_QUEUE_SIZE 0   - - enableKafkaCollector Enable Kafka Collector. SW_ZIPKIN_KAFKA_COLLECTOR_ENABLED false   - - kafkaBootstrapServers Kafka ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG. SW_ZIPKIN_KAFKA_SERVERS localhost:9092   - - kafkaGroupId Kafka ConsumerConfig.GROUP_ID_CONFIG. SW_ZIPKIN_KAFKA_GROUP_ID zipkin   - - kafkaTopic Kafka Topics. SW_ZIPKIN_KAFKA_TOPIC zipkin   - - kafkaConsumerConfig Kafka consumer config, JSON format as Properties. If it contains the same key with above, would override. SW_ZIPKIN_KAFKA_CONSUMER_CONFIG \u0026ldquo;{\u0026quot;auto.offset.reset\u0026quot;:\u0026quot;earliest\u0026quot;,\u0026quot;enable.auto.commit\u0026quot;:true}\u0026rdquo;   - - kafkaConsumers The number of consumers to create. SW_ZIPKIN_KAFKA_CONSUMERS 1   - - kafkaHandlerThreadPoolSize Pool size of Kafka message handler executor. SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_SIZE CPU core * 2   - - kafkaHandlerThreadPoolQueueSize Queue size of Kafka message handler executor. SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE 10000   kafka-fetcher default Read SkyWalking\u0026rsquo;s native metrics/logs/traces through Kafka server. - -    - - bootstrapServers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_KAFKA_FETCHER_SERVERS localhost:9092   - - namespace Namespace aims to isolate multi OAP cluster when using the same Kafka cluster. If you set a namespace for Kafka fetcher, OAP will add a prefix to topic name. You should also set namespace in agent.config. The property is named plugin.kafka.namespace. SW_NAMESPACE -   - - groupId A unique string that identifies the consumer group to which this consumer belongs. - skywalking-consumer   - - createTopicIfNotExist If true, this creates Kafka topic (if it does not already exist). - true   - - partitions The number of partitions for the topic being created. SW_KAFKA_FETCHER_PARTITIONS 3   - - consumers The number of consumers to create. SW_KAFKA_FETCHER_CONSUMERS 1   - - enableNativeProtoLog Enables fetching and handling native proto log data. SW_KAFKA_FETCHER_ENABLE_NATIVE_PROTO_LOG true   - - enableNativeJsonLog Enables fetching and handling native json log data. SW_KAFKA_FETCHER_ENABLE_NATIVE_JSON_LOG true   - - replicationFactor The replication factor for each partition in the topic being created. SW_KAFKA_FETCHER_PARTITIONS_FACTOR 2   - - kafkaHandlerThreadPoolSize Pool size of Kafka message handler executor. SW_KAFKA_HANDLER_THREAD_POOL_SIZE CPU core * 2   - - kafkaHandlerThreadPoolQueueSize Queue size of Kafka message handler executor. SW_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE 10000   - - topicNameOfMeters Kafka topic name for meter system data. - skywalking-meters   - - topicNameOfMetrics Kafka topic name for JVM metrics data. - skywalking-metrics   - - topicNameOfProfiling Kafka topic name for profiling data. - skywalking-profilings   - - topicNameOfTracingSegments Kafka topic name for tracing data. - skywalking-segments   - - topicNameOfManagements Kafka topic name for service instance reporting and registration. - skywalking-managements   - - topicNameOfLogs Kafka topic name for native proto log data. - skywalking-logs   - - topicNameOfJsonLogs Kafka topic name for native json log data. - skywalking-logs-json   receiver-browser default gRPC services that accept browser performance data and error log. - - -   - - sampleRate Sampling rate for receiving trace. Precise to 1/10000. 10000 means sampling rate of 100% by default. SW_RECEIVER_BROWSER_SAMPLE_RATE 10000   query graphql - GraphQL query implementation. -    - - enableLogTestTool Enable the log testing API to test the LAL. NOTE: This API evaluates untrusted code on the OAP server. A malicious script can do significant damage (steal keys and secrets, remove files and directories, install malware, etc). As such, please enable this API only when you completely trust your users. SW_QUERY_GRAPHQL_ENABLE_LOG_TEST_TOOL false   - - maxQueryComplexity Maximum complexity allowed for the GraphQL query that can be used to abort a query if the total number of data fields queried exceeds the defined threshold. SW_QUERY_MAX_QUERY_COMPLEXITY 1000   - - enableUpdateUITemplate Allow user add,disable and update UI template. SW_ENABLE_UPDATE_UI_TEMPLATE false   - - enableOnDemandPodLog Ondemand Pod log: fetch the Pod logs on users' demand, the logs are fetched and displayed in real time, and are not persisted in any kind. This is helpful when users want to do some experiments and monitor the logs and see what\u0026rsquo;s happing inside the service. Note: if you print secrets in the logs, they are also visible to the UI, so for the sake of security, this feature is disabled by default, please set this configuration to enable the feature manually. SW_ENABLE_ON_DEMAND_POD_LOG false   query-zipkin default - This module is for Zipkin query API and support zipkin-lens UI -    - - restHost Binding IP of RESTful services. SW_QUERY_ZIPKIN_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_QUERY_ZIPKIN_REST_PORT 9412   - - restContextPath Web context path of RESTful services. SW_QUERY_ZIPKIN_REST_CONTEXT_PATH zipkin   - - restMaxThreads Maximum thread number of RESTful services. SW_QUERY_ZIPKIN_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_QUERY_ZIPKIN_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_QUERY_ZIPKIN_REST_QUEUE_SIZE 0   - - lookback Default look back for traces and autocompleteTags, 1 day in millis SW_QUERY_ZIPKIN_LOOKBACK 86400000   - - namesMaxAge The Cache-Control max-age (seconds) for serviceNames, remoteServiceNames and spanNames SW_QUERY_ZIPKIN_NAMES_MAX_AGE 300   - - uiQueryLimit Default traces query max size SW_QUERY_ZIPKIN_UI_QUERY_LIMIT 10   - - uiDefaultLookback Default look back on the UI for search traces, 15 minutes in millis SW_QUERY_ZIPKIN_UI_DEFAULT_LOOKBACK 900000   promql default - This module is for PromQL API. -    - - restHost Binding IP of RESTful services. SW_PROMQL_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_PROMQL_REST_PORT 9090   - - restContextPath Web context path of RESTful services. SW_PROMQL_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_PROMQL_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_PROMQL_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_PROMQL_REST_QUEUE_SIZE 0   alarm default - Read alarm doc for more details. -    telemetry - - Read telemetry doc for more details. -    - none - No op implementation. -    - prometheus host Binding host for Prometheus server fetching data. SW_TELEMETRY_PROMETHEUS_HOST 0.0.0.0   - - port Binding port for Prometheus server fetching data. SW_TELEMETRY_PROMETHEUS_PORT 1234   configuration - - Read dynamic configuration doc for more details. -    - grpc host DCS server binding hostname. SW_DCS_SERVER_HOST -   - - port DCS server binding port. SW_DCS_SERVER_PORT 80   - - clusterName Cluster name when reading the latest configuration from DSC server. SW_DCS_CLUSTER_NAME SkyWalking   - - period The period of reading data from DSC server by the OAP (in seconds). SW_DCS_PERIOD 20   - apollo apolloMeta apollo.meta in Apollo. SW_CONFIG_APOLLO http://localhost:8080   - - apolloCluster apollo.cluster in Apollo. SW_CONFIG_APOLLO_CLUSTER default   - - apolloEnv env in Apollo. SW_CONFIG_APOLLO_ENV -   - - appId app.id in Apollo. SW_CONFIG_APOLLO_APP_ID skywalking   - - period The period of data sync (in seconds). SW_CONFIG_APOLLO_PERIOD 60   - zookeeper namespace The namespace (represented by root path) that isolates the configurations in the Zookeeper. SW_CONFIG_ZK_NAMESPACE /, root path   - - hostPort Hosts and ports of Zookeeper Cluster. SW_CONFIG_ZK_HOST_PORT localhost:2181   - - baseSleepTimeMs The period of Zookeeper client between two retries (in milliseconds). SW_CONFIG_ZK_BASE_SLEEP_TIME_MS 1000   - - maxRetries The maximum retry time. SW_CONFIG_ZK_MAX_RETRIES 3   - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - etcd endpoints Hosts and ports for etcd cluster (separated by commas if multiple). SW_CONFIG_ETCD_ENDPOINTS http://localhost:2379   - - namespace Namespace for SkyWalking cluster. SW_CONFIG_ETCD_NAMESPACE /skywalking   - - authentication Indicates whether there is authentication. SW_CONFIG_ETCD_AUTHENTICATION false   - - user Etcd auth username. SW_CONFIG_ETCD_USER    - - password Etcd auth password. SW_CONFIG_ETCD_PASSWORD    - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - consul hostPort Hosts and ports for Consul cluster. SW_CONFIG_CONSUL_HOST_AND_PORTS localhost:8500   - - aclToken ACL Token of Consul. Empty string means without ACL token. SW_CONFIG_CONSUL_ACL_TOKEN -   - - period The period of data sync (in seconds). SW_CONFIG_CONSUL_PERIOD 60   - k8s-configmap namespace Deployment namespace of the config map. SW_CLUSTER_K8S_NAMESPACE default   - - labelSelector Labels for locating configmap. SW_CLUSTER_K8S_LABEL app=collector,release=skywalking   - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - nacos serverAddr Nacos Server Host. SW_CONFIG_NACOS_SERVER_ADDR 127.0.0.1   - - port Nacos Server Port. SW_CONFIG_NACOS_SERVER_PORT 8848   - - group Nacos Configuration namespace. SW_CONFIG_NACOS_SERVER_NAMESPACE -   - - period The period of data sync (in seconds). SW_CONFIG_CONFIG_NACOS_PERIOD 60   - - username Nacos Auth username. SW_CONFIG_NACOS_USERNAME -   - - password Nacos Auth password. SW_CONFIG_NACOS_PASSWORD -   - - accessKey Nacos Auth accessKey. SW_CONFIG_NACOS_ACCESSKEY -   - - secretKey Nacos Auth secretKey. SW_CONFIG_NACOS_SECRETKEY -   exporter default enableGRPCMetrics Enable gRPC metrics exporter. SW_EXPORTER_ENABLE_GRPC_METRICS false   - - gRPCTargetHost The host of target gRPC server for receiving export data SW_EXPORTER_GRPC_HOST 127.0.0.1   - - gRPCTargetPort The port of target gRPC server for receiving export data. SW_EXPORTER_GRPC_PORT 9870   - - enableKafkaTrace Enable Kafka trace exporter. SW_EXPORTER_ENABLE_KAFKA_TRACE false   - - enableKafkaLog Enable Kafka log exporter. SW_EXPORTER_ENABLE_KAFKA_LOG false   - - kafkaBootstrapServers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_EXPORTER_KAFKA_SERVERS localhost:9092   - - kafkaProducerConfig Kafka producer config, JSON format as Properties. SW_EXPORTER_KAFKA_PRODUCER_CONFIG -   - - kafkaTopicTrace Kafka topic name for trace. SW_EXPORTER_KAFKA_TOPIC_TRACE skywalking-export-trace   - - kafkaTopicLog Kafka topic name for log. SW_EXPORTER_KAFKA_TOPIC_LOG skywalking-export-log   health-checker default checkIntervalSeconds The period of checking OAP internal health status (in seconds). SW_HEALTH_CHECKER_INTERVAL_SECONDS 5   configuration-discovery default disableMessageDigest If true, agent receives the latest configuration every time, even without making any changes. By default, OAP uses the SHA512 message digest mechanism to detect changes in configuration. SW_DISABLE_MESSAGE_DIGEST false   receiver-event default gRPC services that handle events data. - -    aws-firehose-receiver default host Binding IP of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_HOST 0.0.0.0   - - port Binding port of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_PORT 12801   - - contextPath Context path of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_CONTEXT_PATH /   - - maxThreads Max Thtread number of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_MAX_THREADS 200   - - idleTimeOut Idle timeout of a connection for keep-alive. SW_RECEIVER_AWS_FIREHOSE_HTTP_IDLE_TIME_OUT 30000   - - acceptQueueSize Maximum allowed number of open connections SW_RECEIVER_AWS_FIREHOSE_HTTP_ACCEPT_QUEUE_SIZE 0   - - maxRequestHeaderSize Maximum length of all headers in an HTTP/1 response SW_RECEIVER_AWS_FIREHOSE_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - firehoseAccessKey The AccessKey of AWS firhose SW_RECEIVER_AWS_FIREHOSE_ACCESS_KEY     Note ¹ System Environment Variable name could be declared and changed in application.yml. The names listed here are simply provided in the default application.yml file.\n","title":"Configuration Vocabulary","url":"/docs/main/v9.4.0/en/setup/backend/configuration-vocabulary/"},{"content":"Configuration Vocabulary The Configuration Vocabulary lists all available configurations provided by application.yml.\n   Module Provider Settings Value(s) and Explanation System Environment Variable¹ Default     core default role Option values: Mixed/Receiver/Aggregator. Receiver mode OAP opens the service to the agents, then analyzes and aggregates the results, and forwards the results for distributed aggregation. Aggregator mode OAP receives data from Mixer and Receiver role OAP nodes, and performs 2nd level aggregation. Mixer means both Receiver and Aggregator. SW_CORE_ROLE Mixed   - - restHost Binding IP of RESTful services. Services include GraphQL query and HTTP data report. SW_CORE_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_CORE_REST_PORT 12800   - - restContextPath Web context path of RESTful services. SW_CORE_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_CORE_REST_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_CORE_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize ServerSocketChannel Backlog of RESTful services. SW_CORE_REST_QUEUE_SIZE 0   - - httpMaxRequestHeaderSize Maximum request header size accepted. SW_CORE_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - gRPCHost Binding IP of gRPC services, including gRPC data report and internal communication among OAP nodes. SW_CORE_GRPC_HOST 0.0.0.0   - - gRPCPort Binding port of gRPC services. SW_CORE_GRPC_PORT 11800   - - gRPCSslEnabled Activates SSL for gRPC services. SW_CORE_GRPC_SSL_ENABLED false   - - gRPCSslKeyPath File path of gRPC SSL key. SW_CORE_GRPC_SSL_KEY_PATH -   - - gRPCSslCertChainPath File path of gRPC SSL cert chain. SW_CORE_GRPC_SSL_CERT_CHAIN_PATH -   - - gRPCSslTrustedCAPath File path of gRPC trusted CA. SW_CORE_GRPC_SSL_TRUSTED_CA_PATH -   - - downsampling Activated level of down sampling aggregation.  Hour,Day   - - enableDataKeeperExecutor Controller of TTL scheduler. Once disabled, TTL wouldn\u0026rsquo;t work. SW_CORE_ENABLE_DATA_KEEPER_EXECUTOR true   - - dataKeeperExecutePeriod Execution period of TTL scheduler (in minutes). Execution doesn\u0026rsquo;t mean deleting data. The storage provider (e.g. ElasticSearch storage) could override this. SW_CORE_DATA_KEEPER_EXECUTE_PERIOD 5   - - recordDataTTL The lifecycle of record data (in days). Record data includes traces, top N sample records, and logs. Minimum value is 2. SW_CORE_RECORD_DATA_TTL 3   - - metricsDataTTL The lifecycle of metrics data (in days), including metadata. We recommend setting metricsDataTTL \u0026gt;= recordDataTTL. Minimum value is 2. SW_CORE_METRICS_DATA_TTL 7   - - l1FlushPeriod The period of L1 aggregation flush to L2 aggregation (in milliseconds). SW_CORE_L1_AGGREGATION_FLUSH_PERIOD 500   - - storageSessionTimeout The threshold of session time (in milliseconds). Default value is 70000. SW_CORE_STORAGE_SESSION_TIMEOUT 70000   - - persistentPeriod The period of doing data persistence. Unit is second.Default value is 25s SW_CORE_PERSISTENT_PERIOD 25   - - topNReportPeriod The execution period (in minutes) of top N sampler, which saves sampled data into the storage. SW_CORE_TOPN_REPORT_PERIOD 10   - - activeExtraModelColumns Appends entity names (e.g. service names) into metrics storage entities. SW_CORE_ACTIVE_EXTRA_MODEL_COLUMNS false   - - serviceNameMaxLength Maximum length limit of service names. SW_SERVICE_NAME_MAX_LENGTH 70   - - instanceNameMaxLength Maximum length limit of service instance names. The maximum length of service + instance names should be less than 200. SW_INSTANCE_NAME_MAX_LENGTH 70   - - endpointNameMaxLength Maximum length limit of endpoint names. The maximum length of service + endpoint names should be less than 240. SW_ENDPOINT_NAME_MAX_LENGTH 150   - - searchableTracesTags Defines a set of span tag keys which are searchable through GraphQL. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_SEARCHABLE_TAG_KEYS http.method,http.status_code,rpc.status_code,db.type,db.instance,mq.queue,mq.topic,mq.broker   - - searchableLogsTags Defines a set of log tag keys which are searchable through GraphQL. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_SEARCHABLE_LOGS_TAG_KEYS level   - - searchableAlarmTags Defines a set of alarm tag keys which are searchable through GraphQL. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_SEARCHABLE_ALARM_TAG_KEYS level   - - autocompleteTagKeysQueryMaxSize The max size of tags keys for autocomplete select. SW_AUTOCOMPLETE_TAG_KEYS_QUERY_MAX_SIZE 100   - - autocompleteTagValuesQueryMaxSize The max size of tags values for autocomplete select. SW_AUTOCOMPLETE_TAG_VALUES_QUERY_MAX_SIZE 100   - - gRPCThreadPoolSize Pool size of gRPC server. SW_CORE_GRPC_THREAD_POOL_SIZE CPU core * 4   - - gRPCThreadPoolQueueSize Queue size of gRPC server. SW_CORE_GRPC_POOL_QUEUE_SIZE 10000   - - maxConcurrentCallsPerConnection The maximum number of concurrent calls permitted for each incoming connection. Defaults to no limit. SW_CORE_GRPC_MAX_CONCURRENT_CALL -   - - maxMessageSize Sets the maximum message size allowed to be received on the server. Empty means 4 MiB. SW_CORE_GRPC_MAX_MESSAGE_SIZE 4M(based on Netty)   - - remoteTimeout Timeout for cluster internal communication (in seconds). - 20   - - maxSizeOfNetworkAddressAlias The maximum size of network address detected in the system being monitored. - 1_000_000   - - maxPageSizeOfQueryProfileSnapshot The maximum size for snapshot analysis in an OAP query. - 500   - - maxSizeOfAnalyzeProfileSnapshot The maximum number of snapshots analyzed by the OAP. - 12000   - - prepareThreads The number of threads used to prepare metrics data to the storage. SW_CORE_PREPARE_THREADS 2   - - enableEndpointNameGroupingByOpenapi Automatically groups endpoints by the given OpenAPI definitions. SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI true   - - maxDurationOfQueryEBPFProfilingData The maximum duration(in second) of query the eBPF profiling data from database. - 30   - - maxThreadCountOfQueryEBPFProfilingData The maximum thread count of query the eBPF profiling data from database. - System CPU core size   cluster standalone - Standalone is not suitable for running on a single node running. No configuration available. - -   - zookeeper namespace The namespace, represented by root path, isolates the configurations in Zookeeper. SW_NAMESPACE /, root path   - - hostPort Hosts and ports of Zookeeper Cluster. SW_CLUSTER_ZK_HOST_PORT localhost:2181   - - baseSleepTimeMs The period of Zookeeper client between two retries (in milliseconds). SW_CLUSTER_ZK_SLEEP_TIME 1000   - - maxRetries The maximum retry time. SW_CLUSTER_ZK_MAX_RETRIES 3   - - enableACL Opens ACL using schema and expression. SW_ZK_ENABLE_ACL false   - - schema Schema for the authorization. SW_ZK_SCHEMA digest   - - expression Expression for the authorization. SW_ZK_EXPRESSION skywalking:skywalking   - - internalComHost The hostname registered in Zookeeper for the internal communication of OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Zookeeper for the internal communication of OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - kubernetes namespace Namespace deployed by SkyWalking in k8s. SW_CLUSTER_K8S_NAMESPACE default   - - labelSelector Labels used for filtering OAP deployment in k8s. SW_CLUSTER_K8S_LABEL app=collector,release=skywalking   - - uidEnvName Environment variable name for reading uid. SW_CLUSTER_K8S_UID SKYWALKING_COLLECTOR_UID   - consul serviceName Service name for SkyWalking cluster. SW_SERVICE_NAME SkyWalking_OAP_Cluster   - - hostPort Hosts and ports for Consul cluster. SW_CLUSTER_CONSUL_HOST_PORT localhost:8500   - - aclToken ACL Token of Consul. Empty string means without ALC token. SW_CLUSTER_CONSUL_ACLTOKEN -   - - internalComHost The hostname registered in Consul for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Consul for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - etcd serviceName Service name for SkyWalking cluster. SW_CLUSTER_ETCD_SERVICE_NAME SkyWalking_OAP_Cluster   - - endpoints Hosts and ports for etcd cluster. SW_CLUSTER_ETCD_ENDPOINTS localhost:2379   - - namespace Namespace for SkyWalking cluster. SW_CLUSTER_ETCD_NAMESPACE /skywalking   - - authentication Indicates whether there is authentication. SW_CLUSTER_ETCD_AUTHENTICATION false   - - user Etcd auth username. SW_CLUSTER_ETCD_USER    - - password Etcd auth password. SW_CLUSTER_ETCD_PASSWORD    - - internalComHost The hostname registered in etcd for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in etcd for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - Nacos serviceName Service name for SkyWalking cluster. SW_SERVICE_NAME SkyWalking_OAP_Cluster   - - hostPort Hosts and ports for Nacos cluster. SW_CLUSTER_NACOS_HOST_PORT localhost:8848   - - namespace Namespace used by SkyWalking node coordination. SW_CLUSTER_NACOS_NAMESPACE public   - - internalComHost The hostname registered in Nacos for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Nacos for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - - username Nacos Auth username. SW_CLUSTER_NACOS_USERNAME -   - - password Nacos Auth password. SW_CLUSTER_NACOS_PASSWORD -   - - accessKey Nacos Auth accessKey. SW_CLUSTER_NACOS_ACCESSKEY -   - - secretKey Nacos Auth secretKey. SW_CLUSTER_NACOS_SECRETKEY -   - - maxHttpUrisNumberPerService The maximum number of HTTP URIs per service. SW_MAX_HTTP_URIS_NUMBER_PER_SERVICE 3000   storage elasticsearch - ElasticSearch (and OpenSearch) storage implementation. - -   - - namespace Prefix of indexes created and used by SkyWalking. SW_NAMESPACE -   - - clusterNodes ElasticSearch cluster nodes for client connection. SW_STORAGE_ES_CLUSTER_NODES localhost   - - protocol HTTP or HTTPs. SW_STORAGE_ES_HTTP_PROTOCOL HTTP   - - connectTimeout Connect timeout of ElasticSearch client (in milliseconds). SW_STORAGE_ES_CONNECT_TIMEOUT 3000   - - socketTimeout Socket timeout of ElasticSearch client (in milliseconds). SW_STORAGE_ES_SOCKET_TIMEOUT 30000   - - responseTimeout Response timeout of ElasticSearch client (in milliseconds), 0 disables the timeout. SW_STORAGE_ES_RESPONSE_TIMEOUT 1500   - - numHttpClientThread The number of threads for the underlying HTTP client to perform socket I/O. If the value is \u0026lt;= 0, the number of available processors will be used. SW_STORAGE_ES_NUM_HTTP_CLIENT_THREAD 0   - - user Username of ElasticSearch cluster. SW_ES_USER -   - - password Password of ElasticSearch cluster. SW_ES_PASSWORD -   - - trustStorePath Trust JKS file path. Only works when username and password are enabled. SW_STORAGE_ES_SSL_JKS_PATH -   - - trustStorePass Trust JKS file password. Only works when username and password are enabled. SW_STORAGE_ES_SSL_JKS_PASS -   - - secretsManagementFile Secrets management file in the properties format, including username and password, which are managed by a 3rd party tool. Capable of being updated them at runtime. SW_ES_SECRETS_MANAGEMENT_FILE -   - - dayStep Represents the number of days in the one-minute/hour/day index. SW_STORAGE_DAY_STEP 1   - - indexShardsNumber Shard number of new indexes. SW_STORAGE_ES_INDEX_SHARDS_NUMBER 1   - - indexReplicasNumber Replicas number of new indexes. SW_STORAGE_ES_INDEX_REPLICAS_NUMBER 0   - - specificIndexSettings Specify the settings for each index individually. If configured, this setting has the highest priority and overrides the generic settings. SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS -   - - superDatasetDayStep Represents the number of days in the super size dataset record index. Default value is the same as dayStep when the value is less than 0. SW_STORAGE_ES_SUPER_DATASET_DAY_STEP -1   - - superDatasetIndexShardsFactor Super dataset is defined in the code (e.g. trace segments). This factor provides more shards for the super dataset: shards number = indexShardsNumber * superDatasetIndexShardsFactor. This factor also affects Zipkin and Jaeger traces. SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR 5   - - superDatasetIndexReplicasNumber Represents the replicas number in the super size dataset record index. SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER 0   - - indexTemplateOrder The order of index template. SW_STORAGE_ES_INDEX_TEMPLATE_ORDER 0   - - bulkActions Async bulk size of the record data batch execution. SW_STORAGE_ES_BULK_ACTIONS 5000   - - batchOfBytes A threshold to control the max body size of ElasticSearch Bulk flush. SW_STORAGE_ES_BATCH_OF_BYTES 10485760 (10m)   - - flushInterval Period of flush (in seconds). Does not matter whether bulkActions is reached or not. SW_STORAGE_ES_FLUSH_INTERVAL 5   - - concurrentRequests The number of concurrent requests allowed to be executed. SW_STORAGE_ES_CONCURRENT_REQUESTS 2   - - resultWindowMaxSize The maximum size of dataset when the OAP loads cache, such as network aliases. SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE 10000   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_ES_QUERY_MAX_SIZE 10000   - - scrollingBatchSize The batch size of metadata per iteration when metadataQueryMaxSize or resultWindowMaxSize is too large to be retrieved in a single query. SW_STORAGE_ES_SCROLLING_BATCH_SIZE 5000   - - segmentQueryMaxSize The maximum size of trace segments per query. SW_STORAGE_ES_QUERY_SEGMENT_SIZE 200   - - profileTaskQueryMaxSize The maximum size of profile task per query. SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE 200   - - profileDataQueryScrollBatchSize The batch size of query profiling data. SW_STORAGE_ES_QUERY_PROFILE_DATA_BATCH_SIZE 100   - - advanced All settings of ElasticSearch index creation. The value should be in JSON format. SW_STORAGE_ES_ADVANCED -   - - logicSharding Shard metrics and records indices into multi-physical indices, one index template per metric/meter aggregation function or record. SW_STORAGE_ES_LOGIC_SHARDING false   - h2 - H2 storage is designed for demonstration and running in short term (i.e. 1-2 hours) only. - -   - - url H2 connection URL. Defaults to H2 memory mode. SW_STORAGE_H2_URL jdbc:h2:mem:skywalking-oap-db   - - user Username of H2 database. SW_STORAGE_H2_USER sa   - - password Password of H2 database. - -   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_H2_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 100   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 1   - mysql - MySQL Storage. The MySQL JDBC Driver is not in the dist. Please copy it into the oap-lib folder manually. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - postgresql - PostgreSQL storage. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - banyandb - BanyanDB storage. - -   - - host Host of the BanyanDB. SW_STORAGE_BANYANDB_HOST 127.0.0.1   - - port Port of the BanyanDB. SW_STORAGE_BANYANDB_PORT 17912   - - maxBulkSize The maximum size of write entities in a single batch write call. SW_STORAGE_BANYANDB_MAX_BULK_SIZE 5000   - - flushInterval Period of flush interval. In the timeunit of seconds. SW_STORAGE_BANYANDB_FLUSH_INTERVAL 15   - - metricsShardsNumber Shards Number for measure/metrics. SW_STORAGE_BANYANDB_METRICS_SHARDS_NUMBER 1   - - recordShardsNumber Shards Number for a normal record. SW_STORAGE_BANYANDB_RECORD_SHARDS_NUMBER 1   - - superDatasetShardsFactor Shards Factor for a super dataset record, i.e. Shard number of a super dataset is recordShardsNumber*superDatasetShardsFactor. SW_STORAGE_BANYANDB_SUPERDATASET_SHARDS_FACTOR 2   - - concurrentWriteThreads Concurrent consumer threads for batch writing. SW_STORAGE_BANYANDB_CONCURRENT_WRITE_THREADS 15   - - profileTaskQueryMaxSize Max size of ProfileTask to be fetched. SW_STORAGE_BANYANDB_PROFILE_TASK_QUERY_MAX_SIZE 200   agent-analyzer default Agent Analyzer. SW_AGENT_ANALYZER default    - - traceSamplingPolicySettingsFile The sampling policy including sampling rate and the threshold of trace segment latency can be configured by the traceSamplingPolicySettingsFile file. SW_TRACE_SAMPLING_POLICY_SETTINGS_FILE trace-sampling-policy-settings.yml   - - slowDBAccessThreshold The slow database access threshold (in milliseconds). SW_SLOW_DB_THRESHOLD default:200,mongodb:100   - - forceSampleErrorSegment When sampling mechanism is activated, this config samples the error status segment and ignores the sampling rate. SW_FORCE_SAMPLE_ERROR_SEGMENT true   - - segmentStatusAnalysisStrategy Determines the final segment status from span status. Available values are FROM_SPAN_STATUS , FROM_ENTRY_SPAN, and FROM_FIRST_SPAN. FROM_SPAN_STATUS indicates that the segment status would be error if any span has an error status. FROM_ENTRY_SPAN means that the segment status would only be determined by the status of entry spans. FROM_FIRST_SPAN means that the segment status would only be determined by the status of the first span. SW_SEGMENT_STATUS_ANALYSIS_STRATEGY FROM_SPAN_STATUS   - - noUpstreamRealAddressAgents Exit spans with the component in the list would not generate client-side instance relation metrics, since some tracing plugins (e.g. Nginx-LUA and Envoy) can\u0026rsquo;t collect the real peer IP address. SW_NO_UPSTREAM_REAL_ADDRESS 6000,9000   - - meterAnalyzerActiveFiles Indicates which files could be instrumented and analyzed. Multiple files are split by \u0026ldquo;,\u0026rdquo;. SW_METER_ANALYZER_ACTIVE_FILES    - - slowCacheWriteThreshold The threshold of slow command which is used for writing operation (in milliseconds). SW_SLOW_CACHE_WRITE_THRESHOLD default:20,redis:10   - - slowCacheReadThreshold The threshold of slow command which is used for reading (getting) operation (in milliseconds). SW_SLOW_CACHE_READ_THRESHOLD default:20,redis:10   receiver-sharing-server default Sharing server provides new gRPC and restful servers for data collection. Ana designates that servers in the core module are to be used for internal communication only. - -    - - restHost Binding IP of RESTful services. Services include GraphQL query and HTTP data report. SW_RECEIVER_SHARING_REST_HOST -   - - restPort Binding port of RESTful services. SW_RECEIVER_SHARING_REST_PORT -   - - restContextPath Web context path of RESTful services. SW_RECEIVER_SHARING_REST_CONTEXT_PATH -   - - restMaxThreads Maximum thread number of RESTful services. SW_RECEIVER_SHARING_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_RECEIVER_SHARING_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize ServerSocketChannel backlog of RESTful services. SW_RECEIVER_SHARING_REST_QUEUE_SIZE 0   - - httpMaxRequestHeaderSize Maximum request header size accepted. SW_RECEIVER_SHARING_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - gRPCHost Binding IP of gRPC services. Services include gRPC data report and internal communication among OAP nodes. SW_RECEIVER_GRPC_HOST 0.0.0.0. Not Activated   - - gRPCPort Binding port of gRPC services. SW_RECEIVER_GRPC_PORT Not Activated   - - gRPCThreadPoolSize Pool size of gRPC server. SW_RECEIVER_GRPC_THREAD_POOL_SIZE CPU core * 4   - - gRPCThreadPoolQueueSize Queue size of gRPC server. SW_RECEIVER_GRPC_POOL_QUEUE_SIZE 10000   - - gRPCSslEnabled Activates SSL for gRPC services. SW_RECEIVER_GRPC_SSL_ENABLED false   - - gRPCSslKeyPath File path of gRPC SSL key. SW_RECEIVER_GRPC_SSL_KEY_PATH -   - - gRPCSslCertChainPath File path of gRPC SSL cert chain. SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH -   - - maxConcurrentCallsPerConnection The maximum number of concurrent calls permitted for each incoming connection. Defaults to no limit. SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL -   - - authentication The token text for authentication. Works for gRPC connection only. Once this is set, the client is required to use the same token. SW_AUTHENTICATION -   log-analyzer default Log Analyzer. SW_LOG_ANALYZER default    - - lalFiles The LAL configuration file names (without file extension) to be activated. Read LAL for more details. SW_LOG_LAL_FILES default   - - malFiles The MAL configuration file names (without file extension) to be activated. Read LAL for more details. SW_LOG_MAL_FILES \u0026quot;\u0026quot;   event-analyzer default Event Analyzer. SW_EVENT_ANALYZER default    receiver-register default gRPC and HTTPRestful services that provide service, service instance and endpoint register. - -    receiver-trace default gRPC and HTTPRestful services that accept SkyWalking format traces. - -    receiver-jvm default gRPC services that accept JVM metrics data. - -    receiver-clr default gRPC services that accept .Net CLR metrics data. - -    receiver-profile default gRPC services that accept profile task status and snapshot reporter. - -    receiver-zabbix default TCP receiver accepts Zabbix format metrics. - -    - - port Exported TCP port. Zabbix agent could connect and transport data. SW_RECEIVER_ZABBIX_PORT 10051   - - host Binds to host. SW_RECEIVER_ZABBIX_HOST 0.0.0.0   - - activeFiles Enables config when agent request is received. SW_RECEIVER_ZABBIX_ACTIVE_FILES agent   service-mesh default gRPC services that accept data from inbound mesh probes. - -    envoy-metric default Envoy metrics_service and ALS(access log service) are supported by this receiver. The OAL script supports all GAUGE type metrics. - -    - - acceptMetricsService Starts Envoy Metrics Service analysis. SW_ENVOY_METRIC_SERVICE true   - - alsHTTPAnalysis Starts Envoy HTTP Access Log Service analysis. Value = k8s-mesh means starting the analysis. SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS -   - - alsTCPAnalysis Starts Envoy TCP Access Log Service analysis. Value = k8s-mesh means starting the analysis. SW_ENVOY_METRIC_ALS_TCP_ANALYSIS -   - - k8sServiceNameRule k8sServiceNameRule allows you to customize the service name in ALS via Kubernetes metadata. The available variables are pod and service. E.g. you can use ${service.metadata.name}-${pod.metadata.labels.version} to append the version number to the service name. Note that when using environment variables to pass this configuration, use single quotes('') to avoid being evaluated by the shell. -    receiver-otel default A receiver for analyzing metrics data from OpenTelemetry. - -    - - enabledHandlers Enabled handlers for otel. SW_OTEL_RECEIVER_ENABLED_HANDLERS -   - - enabledOtelMetricsRules Enabled metric rules for OTLP handler. SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES -   receiver-zipkin default A receiver for Zipkin traces. - -    - - sampleRate The sample rate precision is 1/10000, should be between 0 and 10000 SW_ZIPKIN_SAMPLE_RATE 10000   - - searchableTracesTags Defines a set of span tag keys which are searchable. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_ZIPKIN_SEARCHABLE_TAG_KEYS http.method   - - enableHttpCollector Enable Http Collector. SW_ZIPKIN_HTTP_COLLECTOR_ENABLED true   - - restHost Binding IP of RESTful services. SW_RECEIVER_ZIPKIN_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_RECEIVER_ZIPKIN_REST_PORT 9411   - - restContextPath Web context path of RESTful services. SW_RECEIVER_ZIPKIN_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_RECEIVER_ZIPKIN_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_RECEIVER_ZIPKIN_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_RECEIVER_ZIPKIN_REST_QUEUE_SIZE 0   - - enableKafkaCollector Enable Kafka Collector. SW_ZIPKIN_KAFKA_COLLECTOR_ENABLED false   - - kafkaBootstrapServers Kafka ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG. SW_ZIPKIN_KAFKA_SERVERS localhost:9092   - - kafkaGroupId Kafka ConsumerConfig.GROUP_ID_CONFIG. SW_ZIPKIN_KAFKA_GROUP_ID zipkin   - - kafkaTopic Kafka Topics. SW_ZIPKIN_KAFKA_TOPIC zipkin   - - kafkaConsumerConfig Kafka consumer config, JSON format as Properties. If it contains the same key with above, would override. SW_ZIPKIN_KAFKA_CONSUMER_CONFIG \u0026ldquo;{\u0026quot;auto.offset.reset\u0026quot;:\u0026quot;earliest\u0026quot;,\u0026quot;enable.auto.commit\u0026quot;:true}\u0026rdquo;   - - kafkaConsumers The number of consumers to create. SW_ZIPKIN_KAFKA_CONSUMERS 1   - - kafkaHandlerThreadPoolSize Pool size of Kafka message handler executor. SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_SIZE CPU core * 2   - - kafkaHandlerThreadPoolQueueSize Queue size of Kafka message handler executor. SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE 10000   kafka-fetcher default Read SkyWalking\u0026rsquo;s native metrics/logs/traces through Kafka server. - -    - - bootstrapServers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_KAFKA_FETCHER_SERVERS localhost:9092   - - namespace Namespace aims to isolate multi OAP cluster when using the same Kafka cluster. If you set a namespace for Kafka fetcher, OAP will add a prefix to topic name. You should also set namespace in agent.config. The property is named plugin.kafka.namespace. SW_NAMESPACE -   - - groupId A unique string that identifies the consumer group to which this consumer belongs. - skywalking-consumer   - - createTopicIfNotExist If true, this creates Kafka topic (if it does not already exist). - true   - - partitions The number of partitions for the topic being created. SW_KAFKA_FETCHER_PARTITIONS 3   - - consumers The number of consumers to create. SW_KAFKA_FETCHER_CONSUMERS 1   - - enableNativeProtoLog Enables fetching and handling native proto log data. SW_KAFKA_FETCHER_ENABLE_NATIVE_PROTO_LOG true   - - enableNativeJsonLog Enables fetching and handling native json log data. SW_KAFKA_FETCHER_ENABLE_NATIVE_JSON_LOG true   - - replicationFactor The replication factor for each partition in the topic being created. SW_KAFKA_FETCHER_PARTITIONS_FACTOR 2   - - kafkaHandlerThreadPoolSize Pool size of Kafka message handler executor. SW_KAFKA_HANDLER_THREAD_POOL_SIZE CPU core * 2   - - kafkaHandlerThreadPoolQueueSize Queue size of Kafka message handler executor. SW_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE 10000   - - topicNameOfMeters Kafka topic name for meter system data. - skywalking-meters   - - topicNameOfMetrics Kafka topic name for JVM metrics data. - skywalking-metrics   - - topicNameOfProfiling Kafka topic name for profiling data. - skywalking-profilings   - - topicNameOfTracingSegments Kafka topic name for tracing data. - skywalking-segments   - - topicNameOfManagements Kafka topic name for service instance reporting and registration. - skywalking-managements   - - topicNameOfLogs Kafka topic name for native proto log data. - skywalking-logs   - - topicNameOfJsonLogs Kafka topic name for native json log data. - skywalking-logs-json   receiver-browser default gRPC services that accept browser performance data and error log. - - -   - - sampleRate Sampling rate for receiving trace. Precise to 1/10000. 10000 means sampling rate of 100% by default. SW_RECEIVER_BROWSER_SAMPLE_RATE 10000   query graphql - GraphQL query implementation. -    - - enableLogTestTool Enable the log testing API to test the LAL. NOTE: This API evaluates untrusted code on the OAP server. A malicious script can do significant damage (steal keys and secrets, remove files and directories, install malware, etc). As such, please enable this API only when you completely trust your users. SW_QUERY_GRAPHQL_ENABLE_LOG_TEST_TOOL false   - - maxQueryComplexity Maximum complexity allowed for the GraphQL query that can be used to abort a query if the total number of data fields queried exceeds the defined threshold. SW_QUERY_MAX_QUERY_COMPLEXITY 1000   - - enableUpdateUITemplate Allow user add,disable and update UI template. SW_ENABLE_UPDATE_UI_TEMPLATE false   - - enableOnDemandPodLog Ondemand Pod log: fetch the Pod logs on users' demand, the logs are fetched and displayed in real time, and are not persisted in any kind. This is helpful when users want to do some experiments and monitor the logs and see what\u0026rsquo;s happing inside the service. Note: if you print secrets in the logs, they are also visible to the UI, so for the sake of security, this feature is disabled by default, please set this configuration to enable the feature manually. SW_ENABLE_ON_DEMAND_POD_LOG false   query-zipkin default - This module is for Zipkin query API and support zipkin-lens UI -    - - restHost Binding IP of RESTful services. SW_QUERY_ZIPKIN_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_QUERY_ZIPKIN_REST_PORT 9412   - - restContextPath Web context path of RESTful services. SW_QUERY_ZIPKIN_REST_CONTEXT_PATH zipkin   - - restMaxThreads Maximum thread number of RESTful services. SW_QUERY_ZIPKIN_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_QUERY_ZIPKIN_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_QUERY_ZIPKIN_REST_QUEUE_SIZE 0   - - lookback Default look back for traces and autocompleteTags, 1 day in millis SW_QUERY_ZIPKIN_LOOKBACK 86400000   - - namesMaxAge The Cache-Control max-age (seconds) for serviceNames, remoteServiceNames and spanNames SW_QUERY_ZIPKIN_NAMES_MAX_AGE 300   - - uiQueryLimit Default traces query max size SW_QUERY_ZIPKIN_UI_QUERY_LIMIT 10   - - uiDefaultLookback Default look back on the UI for search traces, 15 minutes in millis SW_QUERY_ZIPKIN_UI_DEFAULT_LOOKBACK 900000   promql default - This module is for PromQL API. -    - - restHost Binding IP of RESTful services. SW_PROMQL_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_PROMQL_REST_PORT 9090   - - restContextPath Web context path of RESTful services. SW_PROMQL_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_PROMQL_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_PROMQL_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_PROMQL_REST_QUEUE_SIZE 0   alarm default - Read alarm doc for more details. -    telemetry - - Read telemetry doc for more details. -    - none - No op implementation. -    - prometheus host Binding host for Prometheus server fetching data. SW_TELEMETRY_PROMETHEUS_HOST 0.0.0.0   - - port Binding port for Prometheus server fetching data. SW_TELEMETRY_PROMETHEUS_PORT 1234   configuration - - Read dynamic configuration doc for more details. -    - grpc host DCS server binding hostname. SW_DCS_SERVER_HOST -   - - port DCS server binding port. SW_DCS_SERVER_PORT 80   - - clusterName Cluster name when reading the latest configuration from DSC server. SW_DCS_CLUSTER_NAME SkyWalking   - - period The period of reading data from DSC server by the OAP (in seconds). SW_DCS_PERIOD 20   - apollo apolloMeta apollo.meta in Apollo. SW_CONFIG_APOLLO http://localhost:8080   - - apolloCluster apollo.cluster in Apollo. SW_CONFIG_APOLLO_CLUSTER default   - - apolloEnv env in Apollo. SW_CONFIG_APOLLO_ENV -   - - appId app.id in Apollo. SW_CONFIG_APOLLO_APP_ID skywalking   - - period The period of data sync (in seconds). SW_CONFIG_APOLLO_PERIOD 60   - zookeeper namespace The namespace (represented by root path) that isolates the configurations in the Zookeeper. SW_CONFIG_ZK_NAMESPACE /, root path   - - hostPort Hosts and ports of Zookeeper Cluster. SW_CONFIG_ZK_HOST_PORT localhost:2181   - - baseSleepTimeMs The period of Zookeeper client between two retries (in milliseconds). SW_CONFIG_ZK_BASE_SLEEP_TIME_MS 1000   - - maxRetries The maximum retry time. SW_CONFIG_ZK_MAX_RETRIES 3   - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - etcd endpoints Hosts and ports for etcd cluster (separated by commas if multiple). SW_CONFIG_ETCD_ENDPOINTS http://localhost:2379   - - namespace Namespace for SkyWalking cluster. SW_CONFIG_ETCD_NAMESPACE /skywalking   - - authentication Indicates whether there is authentication. SW_CONFIG_ETCD_AUTHENTICATION false   - - user Etcd auth username. SW_CONFIG_ETCD_USER    - - password Etcd auth password. SW_CONFIG_ETCD_PASSWORD    - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - consul hostPort Hosts and ports for Consul cluster. SW_CONFIG_CONSUL_HOST_AND_PORTS localhost:8500   - - aclToken ACL Token of Consul. Empty string means without ACL token. SW_CONFIG_CONSUL_ACL_TOKEN -   - - period The period of data sync (in seconds). SW_CONFIG_CONSUL_PERIOD 60   - k8s-configmap namespace Deployment namespace of the config map. SW_CLUSTER_K8S_NAMESPACE default   - - labelSelector Labels for locating configmap. SW_CLUSTER_K8S_LABEL app=collector,release=skywalking   - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - nacos serverAddr Nacos Server Host. SW_CONFIG_NACOS_SERVER_ADDR 127.0.0.1   - - port Nacos Server Port. SW_CONFIG_NACOS_SERVER_PORT 8848   - - group Nacos Configuration namespace. SW_CONFIG_NACOS_SERVER_NAMESPACE -   - - period The period of data sync (in seconds). SW_CONFIG_CONFIG_NACOS_PERIOD 60   - - username Nacos Auth username. SW_CONFIG_NACOS_USERNAME -   - - password Nacos Auth password. SW_CONFIG_NACOS_PASSWORD -   - - accessKey Nacos Auth accessKey. SW_CONFIG_NACOS_ACCESSKEY -   - - secretKey Nacos Auth secretKey. SW_CONFIG_NACOS_SECRETKEY -   exporter default enableGRPCMetrics Enable gRPC metrics exporter. SW_EXPORTER_ENABLE_GRPC_METRICS false   - - gRPCTargetHost The host of target gRPC server for receiving export data SW_EXPORTER_GRPC_HOST 127.0.0.1   - - gRPCTargetPort The port of target gRPC server for receiving export data. SW_EXPORTER_GRPC_PORT 9870   - - enableKafkaTrace Enable Kafka trace exporter. SW_EXPORTER_ENABLE_KAFKA_TRACE false   - - enableKafkaLog Enable Kafka log exporter. SW_EXPORTER_ENABLE_KAFKA_LOG false   - - kafkaBootstrapServers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_EXPORTER_KAFKA_SERVERS localhost:9092   - - kafkaProducerConfig Kafka producer config, JSON format as Properties. SW_EXPORTER_KAFKA_PRODUCER_CONFIG -   - - kafkaTopicTrace Kafka topic name for trace. SW_EXPORTER_KAFKA_TOPIC_TRACE skywalking-export-trace   - - kafkaTopicLog Kafka topic name for log. SW_EXPORTER_KAFKA_TOPIC_LOG skywalking-export-log   - - exportErrorStatusTraceOnly Export error status trace segments through the Kafka channel. SW_EXPORTER_KAFKA_TRACE_FILTER_ERROR false   health-checker default checkIntervalSeconds The period of checking OAP internal health status (in seconds). SW_HEALTH_CHECKER_INTERVAL_SECONDS 5   configuration-discovery default disableMessageDigest If true, agent receives the latest configuration every time, even without making any changes. By default, OAP uses the SHA512 message digest mechanism to detect changes in configuration. SW_DISABLE_MESSAGE_DIGEST false   receiver-event default gRPC services that handle events data. - -    aws-firehose-receiver default host Binding IP of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_HOST 0.0.0.0   - - port Binding port of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_PORT 12801   - - contextPath Context path of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_CONTEXT_PATH /   - - maxThreads Max Thtread number of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_MAX_THREADS 200   - - idleTimeOut Idle timeout of a connection for keep-alive. SW_RECEIVER_AWS_FIREHOSE_HTTP_IDLE_TIME_OUT 30000   - - acceptQueueSize Maximum allowed number of open connections SW_RECEIVER_AWS_FIREHOSE_HTTP_ACCEPT_QUEUE_SIZE 0   - - maxRequestHeaderSize Maximum length of all headers in an HTTP/1 response SW_RECEIVER_AWS_FIREHOSE_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - firehoseAccessKey The AccessKey of AWS firhose SW_RECEIVER_AWS_FIREHOSE_ACCESS_KEY    - - enableTLS Indicate if enable HTTPS for the server SW_RECEIVER_AWS_FIREHOSE_HTTP_ENABLE_TLS false   - - tlsKeyPath TLS key path SW_RECEIVER_AWS_FIREHOSE_HTTP_TLS_KEY_PATH    - - tlsCertChainPath TLS certificate chain path SW_RECEIVER_AWS_FIREHOSE_HTTP_TLS_CERT_CHAIN_PATH    ai-pipeline default       - - uriRecognitionServerAddr The address of the URI recognition server. SW_AI_PIPELINE_URI_RECOGNITION_SERVER_ADDR -   - - uriRecognitionServerPort The port of the URI recognition server. SW_AI_PIPELINE_URI_RECOGNITION_SERVER_PORT 17128    Note ¹ System Environment Variable name could be declared and changed in application.yml. The names listed here are simply provided in the default application.yml file.\n","title":"Configuration Vocabulary","url":"/docs/main/v9.5.0/en/setup/backend/configuration-vocabulary/"},{"content":"Configuration Vocabulary The Configuration Vocabulary lists all available configurations provided by application.yml.\n   Module Provider Settings Value(s) and Explanation System Environment Variable¹ Default     core default role Option values: Mixed/Receiver/Aggregator. Receiver mode OAP opens the service to the agents, then analyzes and aggregates the results, and forwards the results for distributed aggregation. Aggregator mode OAP receives data from Mixer and Receiver role OAP nodes, and performs 2nd level aggregation. Mixer means both Receiver and Aggregator. SW_CORE_ROLE Mixed   - - restHost Binding IP of RESTful services. Services include GraphQL query and HTTP data report. SW_CORE_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_CORE_REST_PORT 12800   - - restContextPath Web context path of RESTful services. SW_CORE_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_CORE_REST_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_CORE_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize ServerSocketChannel Backlog of RESTful services. SW_CORE_REST_QUEUE_SIZE 0   - - httpMaxRequestHeaderSize Maximum request header size accepted. SW_CORE_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - gRPCHost Binding IP of gRPC services, including gRPC data report and internal communication among OAP nodes. SW_CORE_GRPC_HOST 0.0.0.0   - - gRPCPort Binding port of gRPC services. SW_CORE_GRPC_PORT 11800   - - gRPCSslEnabled Activates SSL for gRPC services. SW_CORE_GRPC_SSL_ENABLED false   - - gRPCSslKeyPath File path of gRPC SSL key. SW_CORE_GRPC_SSL_KEY_PATH -   - - gRPCSslCertChainPath File path of gRPC SSL cert chain. SW_CORE_GRPC_SSL_CERT_CHAIN_PATH -   - - gRPCSslTrustedCAPath File path of gRPC trusted CA. SW_CORE_GRPC_SSL_TRUSTED_CA_PATH -   - - downsampling Activated level of down sampling aggregation.  Hour,Day   - - enableDataKeeperExecutor Controller of TTL scheduler. Once disabled, TTL wouldn\u0026rsquo;t work. SW_CORE_ENABLE_DATA_KEEPER_EXECUTOR true   - - dataKeeperExecutePeriod Execution period of TTL scheduler (in minutes). Execution doesn\u0026rsquo;t mean deleting data. The storage provider (e.g. ElasticSearch storage) could override this. SW_CORE_DATA_KEEPER_EXECUTE_PERIOD 5   - - recordDataTTL The lifecycle of record data (in days). Record data includes traces, top N sample records, and logs. Minimum value is 2. SW_CORE_RECORD_DATA_TTL 3   - - metricsDataTTL The lifecycle of metrics data (in days), including metadata. We recommend setting metricsDataTTL \u0026gt;= recordDataTTL. Minimum value is 2. SW_CORE_METRICS_DATA_TTL 7   - - l1FlushPeriod The period of L1 aggregation flush to L2 aggregation (in milliseconds). SW_CORE_L1_AGGREGATION_FLUSH_PERIOD 500   - - storageSessionTimeout The threshold of session time (in milliseconds). Default value is 70000. SW_CORE_STORAGE_SESSION_TIMEOUT 70000   - - persistentPeriod The period of doing data persistence. Unit is second.Default value is 25s SW_CORE_PERSISTENT_PERIOD 25   - - topNReportPeriod The execution period (in minutes) of top N sampler, which saves sampled data into the storage. SW_CORE_TOPN_REPORT_PERIOD 10   - - activeExtraModelColumns Appends entity names (e.g. service names) into metrics storage entities. SW_CORE_ACTIVE_EXTRA_MODEL_COLUMNS false   - - serviceNameMaxLength Maximum length limit of service names. SW_SERVICE_NAME_MAX_LENGTH 70   - - instanceNameMaxLength Maximum length limit of service instance names. The maximum length of service + instance names should be less than 200. SW_INSTANCE_NAME_MAX_LENGTH 70   - - endpointNameMaxLength Maximum length limit of endpoint names. The maximum length of service + endpoint names should be less than 240. SW_ENDPOINT_NAME_MAX_LENGTH 150   - - searchableTracesTags Defines a set of span tag keys which are searchable through GraphQL. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_SEARCHABLE_TAG_KEYS http.method,http.status_code,rpc.status_code,db.type,db.instance,mq.queue,mq.topic,mq.broker   - - searchableLogsTags Defines a set of log tag keys which are searchable through GraphQL. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_SEARCHABLE_LOGS_TAG_KEYS level   - - searchableAlarmTags Defines a set of alarm tag keys which are searchable through GraphQL. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_SEARCHABLE_ALARM_TAG_KEYS level   - - autocompleteTagKeysQueryMaxSize The max size of tags keys for autocomplete select. SW_AUTOCOMPLETE_TAG_KEYS_QUERY_MAX_SIZE 100   - - autocompleteTagValuesQueryMaxSize The max size of tags values for autocomplete select. SW_AUTOCOMPLETE_TAG_VALUES_QUERY_MAX_SIZE 100   - - gRPCThreadPoolSize Pool size of gRPC server. SW_CORE_GRPC_THREAD_POOL_SIZE CPU core * 4   - - gRPCThreadPoolQueueSize Queue size of gRPC server. SW_CORE_GRPC_POOL_QUEUE_SIZE 10000   - - maxConcurrentCallsPerConnection The maximum number of concurrent calls permitted for each incoming connection. Defaults to no limit. SW_CORE_GRPC_MAX_CONCURRENT_CALL -   - - maxMessageSize Sets the maximum message size allowed to be received on the server. Empty means 4 MiB. SW_CORE_GRPC_MAX_MESSAGE_SIZE 4M(based on Netty)   - - remoteTimeout Timeout for cluster internal communication (in seconds). - 20   - - maxSizeOfNetworkAddressAlias The maximum size of network address detected in the system being monitored. - 1_000_000   - - maxPageSizeOfQueryProfileSnapshot The maximum size for snapshot analysis in an OAP query. - 500   - - maxSizeOfAnalyzeProfileSnapshot The maximum number of snapshots analyzed by the OAP. - 12000   - - prepareThreads The number of threads used to prepare metrics data to the storage. SW_CORE_PREPARE_THREADS 2   - - enableEndpointNameGroupingByOpenapi Automatically groups endpoints by the given OpenAPI definitions. SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI true   - - maxDurationOfQueryEBPFProfilingData The maximum duration(in second) of query the eBPF profiling data from database. - 30   - - maxThreadCountOfQueryEBPFProfilingData The maximum thread count of query the eBPF profiling data from database. - System CPU core size   - - uiMenuRefreshInterval The period(in seconds) of refreshing the status of all UI menu items. - 20   - - serviceCacheRefreshInterval The period(in seconds) of refreshing the service cache. SW_SERVICE_CACHE_REFRESH_INTERVAL 10   cluster standalone - Standalone is not suitable for running on a single node running. No configuration available. - -   - zookeeper namespace The namespace, represented by root path, isolates the configurations in Zookeeper. SW_NAMESPACE /, root path   - - hostPort Hosts and ports of Zookeeper Cluster. SW_CLUSTER_ZK_HOST_PORT localhost:2181   - - baseSleepTimeMs The period of Zookeeper client between two retries (in milliseconds). SW_CLUSTER_ZK_SLEEP_TIME 1000   - - maxRetries The maximum retry time. SW_CLUSTER_ZK_MAX_RETRIES 3   - - enableACL Opens ACL using schema and expression. SW_ZK_ENABLE_ACL false   - - schema Schema for the authorization. SW_ZK_SCHEMA digest   - - expression Expression for the authorization. SW_ZK_EXPRESSION skywalking:skywalking   - - internalComHost The hostname registered in Zookeeper for the internal communication of OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Zookeeper for the internal communication of OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - kubernetes namespace Namespace deployed by SkyWalking in k8s. SW_CLUSTER_K8S_NAMESPACE default   - - labelSelector Labels used for filtering OAP deployment in k8s. SW_CLUSTER_K8S_LABEL app=collector,release=skywalking   - - uidEnvName Environment variable name for reading uid. SW_CLUSTER_K8S_UID SKYWALKING_COLLECTOR_UID   - consul serviceName Service name for SkyWalking cluster. SW_SERVICE_NAME SkyWalking_OAP_Cluster   - - hostPort Hosts and ports for Consul cluster. SW_CLUSTER_CONSUL_HOST_PORT localhost:8500   - - aclToken ACL Token of Consul. Empty string means without ALC token. SW_CLUSTER_CONSUL_ACLTOKEN -   - - internalComHost The hostname registered in Consul for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Consul for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - etcd serviceName Service name for SkyWalking cluster. SW_CLUSTER_ETCD_SERVICE_NAME SkyWalking_OAP_Cluster   - - endpoints Hosts and ports for etcd cluster. SW_CLUSTER_ETCD_ENDPOINTS localhost:2379   - - namespace Namespace for SkyWalking cluster. SW_CLUSTER_ETCD_NAMESPACE /skywalking   - - authentication Indicates whether there is authentication. SW_CLUSTER_ETCD_AUTHENTICATION false   - - user Etcd auth username. SW_CLUSTER_ETCD_USER    - - password Etcd auth password. SW_CLUSTER_ETCD_PASSWORD    - - internalComHost The hostname registered in etcd for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in etcd for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - Nacos serviceName Service name for SkyWalking cluster. SW_SERVICE_NAME SkyWalking_OAP_Cluster   - - hostPort Hosts and ports for Nacos cluster. SW_CLUSTER_NACOS_HOST_PORT localhost:8848   - - namespace Namespace used by SkyWalking node coordination. SW_CLUSTER_NACOS_NAMESPACE public   - - internalComHost The hostname registered in Nacos for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Nacos for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - - username Nacos Auth username. SW_CLUSTER_NACOS_USERNAME -   - - password Nacos Auth password. SW_CLUSTER_NACOS_PASSWORD -   - - accessKey Nacos Auth accessKey. SW_CLUSTER_NACOS_ACCESSKEY -   - - secretKey Nacos Auth secretKey. SW_CLUSTER_NACOS_SECRETKEY -   - - syncPeriodHttpUriRecognitionPattern The period of HTTP URI recognition pattern synchronization (in seconds). SW_CORE_SYNC_PERIOD_HTTP_URI_RECOGNITION_PATTERN 10   - - trainingPeriodHttpUriRecognitionPattern The period of HTTP URI recognition pattern training (in seconds). SW_CORE_TRAINING_PERIOD_HTTP_URI_RECOGNITION_PATTERN 60   - - maxHttpUrisNumberPerService The maximum number of HTTP URIs per service. SW_MAX_HTTP_URIS_NUMBER_PER_SERVICE 3000   storage elasticsearch - ElasticSearch (and OpenSearch) storage implementation. - -   - - namespace Prefix of indexes created and used by SkyWalking. SW_NAMESPACE -   - - clusterNodes ElasticSearch cluster nodes for client connection. SW_STORAGE_ES_CLUSTER_NODES localhost   - - protocol HTTP or HTTPs. SW_STORAGE_ES_HTTP_PROTOCOL HTTP   - - connectTimeout Connect timeout of ElasticSearch client (in milliseconds). SW_STORAGE_ES_CONNECT_TIMEOUT 3000   - - socketTimeout Socket timeout of ElasticSearch client (in milliseconds). SW_STORAGE_ES_SOCKET_TIMEOUT 30000   - - responseTimeout Response timeout of ElasticSearch client (in milliseconds), 0 disables the timeout. SW_STORAGE_ES_RESPONSE_TIMEOUT 1500   - - numHttpClientThread The number of threads for the underlying HTTP client to perform socket I/O. If the value is \u0026lt;= 0, the number of available processors will be used. SW_STORAGE_ES_NUM_HTTP_CLIENT_THREAD 0   - - user Username of ElasticSearch cluster. SW_ES_USER -   - - password Password of ElasticSearch cluster. SW_ES_PASSWORD -   - - trustStorePath Trust JKS file path. Only works when username and password are enabled. SW_STORAGE_ES_SSL_JKS_PATH -   - - trustStorePass Trust JKS file password. Only works when username and password are enabled. SW_STORAGE_ES_SSL_JKS_PASS -   - - secretsManagementFile Secrets management file in the properties format, including username and password, which are managed by a 3rd party tool. Capable of being updated them at runtime. SW_ES_SECRETS_MANAGEMENT_FILE -   - - dayStep Represents the number of days in the one-minute/hour/day index. SW_STORAGE_DAY_STEP 1   - - indexShardsNumber Shard number of new indexes. SW_STORAGE_ES_INDEX_SHARDS_NUMBER 1   - - indexReplicasNumber Replicas number of new indexes. SW_STORAGE_ES_INDEX_REPLICAS_NUMBER 0   - - specificIndexSettings Specify the settings for each index individually. If configured, this setting has the highest priority and overrides the generic settings. SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS -   - - superDatasetDayStep Represents the number of days in the super size dataset record index. Default value is the same as dayStep when the value is less than 0. SW_STORAGE_ES_SUPER_DATASET_DAY_STEP -1   - - superDatasetIndexShardsFactor Super dataset is defined in the code (e.g. trace segments). This factor provides more shards for the super dataset: shards number = indexShardsNumber * superDatasetIndexShardsFactor. This factor also affects Zipkin and Jaeger traces. SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR 5   - - superDatasetIndexReplicasNumber Represents the replicas number in the super size dataset record index. SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER 0   - - indexTemplateOrder The order of index template. SW_STORAGE_ES_INDEX_TEMPLATE_ORDER 0   - - bulkActions Async bulk size of the record data batch execution. SW_STORAGE_ES_BULK_ACTIONS 5000   - - batchOfBytes A threshold to control the max body size of ElasticSearch Bulk flush. SW_STORAGE_ES_BATCH_OF_BYTES 10485760 (10m)   - - flushInterval Period of flush (in seconds). Does not matter whether bulkActions is reached or not. SW_STORAGE_ES_FLUSH_INTERVAL 5   - - concurrentRequests The number of concurrent requests allowed to be executed. SW_STORAGE_ES_CONCURRENT_REQUESTS 2   - - resultWindowMaxSize The maximum size of dataset when the OAP loads cache, such as network aliases. SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE 10000   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_ES_QUERY_MAX_SIZE 10000   - - scrollingBatchSize The batch size of metadata per iteration when metadataQueryMaxSize or resultWindowMaxSize is too large to be retrieved in a single query. SW_STORAGE_ES_SCROLLING_BATCH_SIZE 5000   - - segmentQueryMaxSize The maximum size of trace segments per query. SW_STORAGE_ES_QUERY_SEGMENT_SIZE 200   - - profileTaskQueryMaxSize The maximum size of profile task per query. SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE 200   - - profileDataQueryScrollBatchSize The batch size of query profiling data. SW_STORAGE_ES_QUERY_PROFILE_DATA_BATCH_SIZE 100   - - advanced All settings of ElasticSearch index creation. The value should be in JSON format. SW_STORAGE_ES_ADVANCED -   - - logicSharding Shard metrics and records indices into multi-physical indices, one index template per metric/meter aggregation function or record. SW_STORAGE_ES_LOGIC_SHARDING false   - h2 - H2 storage is designed for demonstration and running in short term (i.e. 1-2 hours) only. - -   - - url H2 connection URL. Defaults to H2 memory mode. SW_STORAGE_H2_URL jdbc:h2:mem:skywalking-oap-db   - - user Username of H2 database. SW_STORAGE_H2_USER sa   - - password Password of H2 database. - -   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_H2_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 100   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 1   - mysql - MySQL Storage. The MySQL JDBC Driver is not in the dist. Please copy it into the oap-lib folder manually. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - postgresql - PostgreSQL storage. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - banyandb - BanyanDB storage. - -   - - host Host of the BanyanDB. SW_STORAGE_BANYANDB_HOST 127.0.0.1   - - port Port of the BanyanDB. SW_STORAGE_BANYANDB_PORT 17912   - - maxBulkSize The maximum size of write entities in a single batch write call. SW_STORAGE_BANYANDB_MAX_BULK_SIZE 5000   - - flushInterval Period of flush interval. In the timeunit of seconds. SW_STORAGE_BANYANDB_FLUSH_INTERVAL 15   - - metricsShardsNumber Shards Number for measure/metrics. SW_STORAGE_BANYANDB_METRICS_SHARDS_NUMBER 1   - - recordShardsNumber Shards Number for a normal record. SW_STORAGE_BANYANDB_RECORD_SHARDS_NUMBER 1   - - superDatasetShardsFactor Shards Factor for a super dataset record, i.e. Shard number of a super dataset is recordShardsNumber*superDatasetShardsFactor. SW_STORAGE_BANYANDB_SUPERDATASET_SHARDS_FACTOR 2   - - concurrentWriteThreads Concurrent consumer threads for batch writing. SW_STORAGE_BANYANDB_CONCURRENT_WRITE_THREADS 15   - - profileTaskQueryMaxSize Max size of ProfileTask to be fetched. SW_STORAGE_BANYANDB_PROFILE_TASK_QUERY_MAX_SIZE 200   agent-analyzer default Agent Analyzer. SW_AGENT_ANALYZER default    - - traceSamplingPolicySettingsFile The sampling policy including sampling rate and the threshold of trace segment latency can be configured by the traceSamplingPolicySettingsFile file. SW_TRACE_SAMPLING_POLICY_SETTINGS_FILE trace-sampling-policy-settings.yml   - - slowDBAccessThreshold The slow database access threshold (in milliseconds). SW_SLOW_DB_THRESHOLD default:200,mongodb:100   - - forceSampleErrorSegment When sampling mechanism is activated, this config samples the error status segment and ignores the sampling rate. SW_FORCE_SAMPLE_ERROR_SEGMENT true   - - segmentStatusAnalysisStrategy Determines the final segment status from span status. Available values are FROM_SPAN_STATUS , FROM_ENTRY_SPAN, and FROM_FIRST_SPAN. FROM_SPAN_STATUS indicates that the segment status would be error if any span has an error status. FROM_ENTRY_SPAN means that the segment status would only be determined by the status of entry spans. FROM_FIRST_SPAN means that the segment status would only be determined by the status of the first span. SW_SEGMENT_STATUS_ANALYSIS_STRATEGY FROM_SPAN_STATUS   - - noUpstreamRealAddressAgents Exit spans with the component in the list would not generate client-side instance relation metrics, since some tracing plugins (e.g. Nginx-LUA and Envoy) can\u0026rsquo;t collect the real peer IP address. SW_NO_UPSTREAM_REAL_ADDRESS 6000,9000   - - meterAnalyzerActiveFiles Indicates which files could be instrumented and analyzed. Multiple files are split by \u0026ldquo;,\u0026rdquo;. SW_METER_ANALYZER_ACTIVE_FILES    - - slowCacheWriteThreshold The threshold of slow command which is used for writing operation (in milliseconds). SW_SLOW_CACHE_WRITE_THRESHOLD default:20,redis:10   - - slowCacheReadThreshold The threshold of slow command which is used for reading (getting) operation (in milliseconds). SW_SLOW_CACHE_READ_THRESHOLD default:20,redis:10   receiver-sharing-server default Sharing server provides new gRPC and restful servers for data collection. Ana designates that servers in the core module are to be used for internal communication only. - -    - - restHost Binding IP of RESTful services. Services include GraphQL query and HTTP data report. SW_RECEIVER_SHARING_REST_HOST -   - - restPort Binding port of RESTful services. SW_RECEIVER_SHARING_REST_PORT -   - - restContextPath Web context path of RESTful services. SW_RECEIVER_SHARING_REST_CONTEXT_PATH -   - - restMaxThreads Maximum thread number of RESTful services. SW_RECEIVER_SHARING_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_RECEIVER_SHARING_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize ServerSocketChannel backlog of RESTful services. SW_RECEIVER_SHARING_REST_QUEUE_SIZE 0   - - httpMaxRequestHeaderSize Maximum request header size accepted. SW_RECEIVER_SHARING_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - gRPCHost Binding IP of gRPC services. Services include gRPC data report and internal communication among OAP nodes. SW_RECEIVER_GRPC_HOST 0.0.0.0. Not Activated   - - gRPCPort Binding port of gRPC services. SW_RECEIVER_GRPC_PORT Not Activated   - - gRPCThreadPoolSize Pool size of gRPC server. SW_RECEIVER_GRPC_THREAD_POOL_SIZE CPU core * 4   - - gRPCThreadPoolQueueSize Queue size of gRPC server. SW_RECEIVER_GRPC_POOL_QUEUE_SIZE 10000   - - gRPCSslEnabled Activates SSL for gRPC services. SW_RECEIVER_GRPC_SSL_ENABLED false   - - gRPCSslKeyPath File path of gRPC SSL key. SW_RECEIVER_GRPC_SSL_KEY_PATH -   - - gRPCSslCertChainPath File path of gRPC SSL cert chain. SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH -   - - maxConcurrentCallsPerConnection The maximum number of concurrent calls permitted for each incoming connection. Defaults to no limit. SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL -   - - authentication The token text for authentication. Works for gRPC connection only. Once this is set, the client is required to use the same token. SW_AUTHENTICATION -   log-analyzer default Log Analyzer. SW_LOG_ANALYZER default    - - lalFiles The LAL configuration file names (without file extension) to be activated. Read LAL for more details. SW_LOG_LAL_FILES default   - - malFiles The MAL configuration file names (without file extension) to be activated. Read LAL for more details. SW_LOG_MAL_FILES \u0026quot;\u0026quot;   event-analyzer default Event Analyzer. SW_EVENT_ANALYZER default    receiver-register default gRPC and HTTPRestful services that provide service, service instance and endpoint register. - -    receiver-trace default gRPC and HTTPRestful services that accept SkyWalking format traces. - -    receiver-jvm default gRPC services that accept JVM metrics data. - -    receiver-clr default gRPC services that accept .Net CLR metrics data. - -    receiver-profile default gRPC services that accept profile task status and snapshot reporter. - -    receiver-zabbix default TCP receiver accepts Zabbix format metrics. - -    - - port Exported TCP port. Zabbix agent could connect and transport data. SW_RECEIVER_ZABBIX_PORT 10051   - - host Binds to host. SW_RECEIVER_ZABBIX_HOST 0.0.0.0   - - activeFiles Enables config when agent request is received. SW_RECEIVER_ZABBIX_ACTIVE_FILES agent   service-mesh default gRPC services that accept data from inbound mesh probes. - -    envoy-metric default Envoy metrics_service and ALS(access log service) are supported by this receiver. The OAL script supports all GAUGE type metrics. - -    - - acceptMetricsService Starts Envoy Metrics Service analysis. SW_ENVOY_METRIC_SERVICE true   - - alsHTTPAnalysis Starts Envoy HTTP Access Log Service analysis. Value = k8s-mesh means starting the analysis. SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS -   - - alsTCPAnalysis Starts Envoy TCP Access Log Service analysis. Value = k8s-mesh means starting the analysis. SW_ENVOY_METRIC_ALS_TCP_ANALYSIS -   - - k8sServiceNameRule k8sServiceNameRule allows you to customize the service name in ALS via Kubernetes metadata. The available variables are pod and service. E.g. you can use ${service.metadata.name}-${pod.metadata.labels.version} to append the version number to the service name. Note that when using environment variables to pass this configuration, use single quotes('') to avoid being evaluated by the shell. K8S_SERVICE_NAME_RULE ${pod.metadata.labels.(service.istio.io/canonical-name)}   - - istioServiceNameRule istioServiceNameRule allows you to customize the service name in ALS via Kubernetes metadata. The available variables are serviceEntry. E.g. you can use ${serviceEntry.metadata.name}-${serviceEntry.metadata.labels.version} to append the version number to the service name. Note that when using environment variables to pass this configuration, use single quotes('') to avoid being evaluated by the shell. ISTIO_SERVICE_NAME_RULE ${serviceEntry.metadata.name}   receiver-otel default A receiver for analyzing metrics data from OpenTelemetry. - -    - - enabledHandlers Enabled handlers for otel. SW_OTEL_RECEIVER_ENABLED_HANDLERS -   - - enabledOtelMetricsRules Enabled metric rules for OTLP handler. SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES -   receiver-zipkin default A receiver for Zipkin traces. - -    - - sampleRate The sample rate precision is 1/10000, should be between 0 and 10000 SW_ZIPKIN_SAMPLE_RATE 10000   - - searchableTracesTags Defines a set of span tag keys which are searchable. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_ZIPKIN_SEARCHABLE_TAG_KEYS http.method   - - enableHttpCollector Enable Http Collector. SW_ZIPKIN_HTTP_COLLECTOR_ENABLED true   - - restHost Binding IP of RESTful services. SW_RECEIVER_ZIPKIN_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_RECEIVER_ZIPKIN_REST_PORT 9411   - - restContextPath Web context path of RESTful services. SW_RECEIVER_ZIPKIN_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_RECEIVER_ZIPKIN_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_RECEIVER_ZIPKIN_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_RECEIVER_ZIPKIN_REST_QUEUE_SIZE 0   - - enableKafkaCollector Enable Kafka Collector. SW_ZIPKIN_KAFKA_COLLECTOR_ENABLED false   - - kafkaBootstrapServers Kafka ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG. SW_ZIPKIN_KAFKA_SERVERS localhost:9092   - - kafkaGroupId Kafka ConsumerConfig.GROUP_ID_CONFIG. SW_ZIPKIN_KAFKA_GROUP_ID zipkin   - - kafkaTopic Kafka Topics. SW_ZIPKIN_KAFKA_TOPIC zipkin   - - kafkaConsumerConfig Kafka consumer config, JSON format as Properties. If it contains the same key with above, would override. SW_ZIPKIN_KAFKA_CONSUMER_CONFIG \u0026ldquo;{\u0026quot;auto.offset.reset\u0026quot;:\u0026quot;earliest\u0026quot;,\u0026quot;enable.auto.commit\u0026quot;:true}\u0026rdquo;   - - kafkaConsumers The number of consumers to create. SW_ZIPKIN_KAFKA_CONSUMERS 1   - - kafkaHandlerThreadPoolSize Pool size of Kafka message handler executor. SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_SIZE CPU core * 2   - - kafkaHandlerThreadPoolQueueSize Queue size of Kafka message handler executor. SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE 10000   kafka-fetcher default Read SkyWalking\u0026rsquo;s native metrics/logs/traces through Kafka server. - -    - - bootstrapServers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_KAFKA_FETCHER_SERVERS localhost:9092   - - namespace Namespace aims to isolate multi OAP cluster when using the same Kafka cluster. If you set a namespace for Kafka fetcher, OAP will add a prefix to topic name. You should also set namespace in agent.config. The property is named plugin.kafka.namespace. SW_NAMESPACE -   - - groupId A unique string that identifies the consumer group to which this consumer belongs. - skywalking-consumer   - - createTopicIfNotExist If true, this creates Kafka topic (if it does not already exist). - true   - - partitions The number of partitions for the topic being created. SW_KAFKA_FETCHER_PARTITIONS 3   - - consumers The number of consumers to create. SW_KAFKA_FETCHER_CONSUMERS 1   - - enableNativeProtoLog Enables fetching and handling native proto log data. SW_KAFKA_FETCHER_ENABLE_NATIVE_PROTO_LOG true   - - enableNativeJsonLog Enables fetching and handling native json log data. SW_KAFKA_FETCHER_ENABLE_NATIVE_JSON_LOG true   - - replicationFactor The replication factor for each partition in the topic being created. SW_KAFKA_FETCHER_PARTITIONS_FACTOR 2   - - kafkaHandlerThreadPoolSize Pool size of Kafka message handler executor. SW_KAFKA_HANDLER_THREAD_POOL_SIZE CPU core * 2   - - kafkaHandlerThreadPoolQueueSize Queue size of Kafka message handler executor. SW_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE 10000   - - topicNameOfMeters Kafka topic name for meter system data. - skywalking-meters   - - topicNameOfMetrics Kafka topic name for JVM metrics data. - skywalking-metrics   - - topicNameOfProfiling Kafka topic name for profiling data. - skywalking-profilings   - - topicNameOfTracingSegments Kafka topic name for tracing data. - skywalking-segments   - - topicNameOfManagements Kafka topic name for service instance reporting and registration. - skywalking-managements   - - topicNameOfLogs Kafka topic name for native proto log data. - skywalking-logs   - - topicNameOfJsonLogs Kafka topic name for native json log data. - skywalking-logs-json   receiver-browser default gRPC services that accept browser performance data and error log. - - -   - - sampleRate Sampling rate for receiving trace. Precise to 1/10000. 10000 means sampling rate of 100% by default. SW_RECEIVER_BROWSER_SAMPLE_RATE 10000   query graphql - GraphQL query implementation. -    - - enableLogTestTool Enable the log testing API to test the LAL. NOTE: This API evaluates untrusted code on the OAP server. A malicious script can do significant damage (steal keys and secrets, remove files and directories, install malware, etc). As such, please enable this API only when you completely trust your users. SW_QUERY_GRAPHQL_ENABLE_LOG_TEST_TOOL false   - - maxQueryComplexity Maximum complexity allowed for the GraphQL query that can be used to abort a query if the total number of data fields queried exceeds the defined threshold. SW_QUERY_MAX_QUERY_COMPLEXITY 3000   - - enableUpdateUITemplate Allow user add,disable and update UI template. SW_ENABLE_UPDATE_UI_TEMPLATE false   - - enableOnDemandPodLog Ondemand Pod log: fetch the Pod logs on users' demand, the logs are fetched and displayed in real time, and are not persisted in any kind. This is helpful when users want to do some experiments and monitor the logs and see what\u0026rsquo;s happing inside the service. Note: if you print secrets in the logs, they are also visible to the UI, so for the sake of security, this feature is disabled by default, please set this configuration to enable the feature manually. SW_ENABLE_ON_DEMAND_POD_LOG false   query-zipkin default - This module is for Zipkin query API and support zipkin-lens UI -    - - restHost Binding IP of RESTful services. SW_QUERY_ZIPKIN_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_QUERY_ZIPKIN_REST_PORT 9412   - - restContextPath Web context path of RESTful services. SW_QUERY_ZIPKIN_REST_CONTEXT_PATH zipkin   - - restMaxThreads Maximum thread number of RESTful services. SW_QUERY_ZIPKIN_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_QUERY_ZIPKIN_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_QUERY_ZIPKIN_REST_QUEUE_SIZE 0   - - lookback Default look back for traces and autocompleteTags, 1 day in millis SW_QUERY_ZIPKIN_LOOKBACK 86400000   - - namesMaxAge The Cache-Control max-age (seconds) for serviceNames, remoteServiceNames and spanNames SW_QUERY_ZIPKIN_NAMES_MAX_AGE 300   - - uiQueryLimit Default traces query max size SW_QUERY_ZIPKIN_UI_QUERY_LIMIT 10   - - uiDefaultLookback Default look back on the UI for search traces, 15 minutes in millis SW_QUERY_ZIPKIN_UI_DEFAULT_LOOKBACK 900000   promql default - This module is for PromQL API. -    - - restHost Binding IP of RESTful services. SW_PROMQL_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_PROMQL_REST_PORT 9090   - - restContextPath Web context path of RESTful services. SW_PROMQL_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_PROMQL_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_PROMQL_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_PROMQL_REST_QUEUE_SIZE 0   alarm default - Read alarm doc for more details. -    telemetry - - Read telemetry doc for more details. -    - none - No op implementation. -    - prometheus host Binding host for Prometheus server fetching data. SW_TELEMETRY_PROMETHEUS_HOST 0.0.0.0   - - port Binding port for Prometheus server fetching data. SW_TELEMETRY_PROMETHEUS_PORT 1234   configuration - - Read dynamic configuration doc for more details. -    - grpc host DCS server binding hostname. SW_DCS_SERVER_HOST -   - - port DCS server binding port. SW_DCS_SERVER_PORT 80   - - clusterName Cluster name when reading the latest configuration from DSC server. SW_DCS_CLUSTER_NAME SkyWalking   - - period The period of reading data from DSC server by the OAP (in seconds). SW_DCS_PERIOD 20   - apollo apolloMeta apollo.meta in Apollo. SW_CONFIG_APOLLO http://localhost:8080   - - apolloCluster apollo.cluster in Apollo. SW_CONFIG_APOLLO_CLUSTER default   - - apolloEnv env in Apollo. SW_CONFIG_APOLLO_ENV -   - - appId app.id in Apollo. SW_CONFIG_APOLLO_APP_ID skywalking   - zookeeper namespace The namespace (represented by root path) that isolates the configurations in the Zookeeper. SW_CONFIG_ZK_NAMESPACE /, root path   - - hostPort Hosts and ports of Zookeeper Cluster. SW_CONFIG_ZK_HOST_PORT localhost:2181   - - baseSleepTimeMs The period of Zookeeper client between two retries (in milliseconds). SW_CONFIG_ZK_BASE_SLEEP_TIME_MS 1000   - - maxRetries The maximum retry time. SW_CONFIG_ZK_MAX_RETRIES 3   - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - etcd endpoints Hosts and ports for etcd cluster (separated by commas if multiple). SW_CONFIG_ETCD_ENDPOINTS http://localhost:2379   - - namespace Namespace for SkyWalking cluster. SW_CONFIG_ETCD_NAMESPACE /skywalking   - - authentication Indicates whether there is authentication. SW_CONFIG_ETCD_AUTHENTICATION false   - - user Etcd auth username. SW_CONFIG_ETCD_USER    - - password Etcd auth password. SW_CONFIG_ETCD_PASSWORD    - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - consul hostPort Hosts and ports for Consul cluster. SW_CONFIG_CONSUL_HOST_AND_PORTS localhost:8500   - - aclToken ACL Token of Consul. Empty string means without ACL token. SW_CONFIG_CONSUL_ACL_TOKEN -   - - period The period of data sync (in seconds). SW_CONFIG_CONSUL_PERIOD 60   - k8s-configmap namespace Deployment namespace of the config map. SW_CLUSTER_K8S_NAMESPACE default   - - labelSelector Labels for locating configmap. SW_CLUSTER_K8S_LABEL app=collector,release=skywalking   - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - nacos serverAddr Nacos Server Host. SW_CONFIG_NACOS_SERVER_ADDR 127.0.0.1   - - port Nacos Server Port. SW_CONFIG_NACOS_SERVER_PORT 8848   - - group Nacos Configuration namespace. SW_CONFIG_NACOS_SERVER_NAMESPACE -   - - period The period of data sync (in seconds). SW_CONFIG_CONFIG_NACOS_PERIOD 60   - - username Nacos Auth username. SW_CONFIG_NACOS_USERNAME -   - - password Nacos Auth password. SW_CONFIG_NACOS_PASSWORD -   - - accessKey Nacos Auth accessKey. SW_CONFIG_NACOS_ACCESSKEY -   - - secretKey Nacos Auth secretKey. SW_CONFIG_NACOS_SECRETKEY -   exporter default enableGRPCMetrics Enable gRPC metrics exporter. SW_EXPORTER_ENABLE_GRPC_METRICS false   - - gRPCTargetHost The host of target gRPC server for receiving export data SW_EXPORTER_GRPC_HOST 127.0.0.1   - - gRPCTargetPort The port of target gRPC server for receiving export data. SW_EXPORTER_GRPC_PORT 9870   - - enableKafkaTrace Enable Kafka trace exporter. SW_EXPORTER_ENABLE_KAFKA_TRACE false   - - enableKafkaLog Enable Kafka log exporter. SW_EXPORTER_ENABLE_KAFKA_LOG false   - - kafkaBootstrapServers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_EXPORTER_KAFKA_SERVERS localhost:9092   - - kafkaProducerConfig Kafka producer config, JSON format as Properties. SW_EXPORTER_KAFKA_PRODUCER_CONFIG -   - - kafkaTopicTrace Kafka topic name for trace. SW_EXPORTER_KAFKA_TOPIC_TRACE skywalking-export-trace   - - kafkaTopicLog Kafka topic name for log. SW_EXPORTER_KAFKA_TOPIC_LOG skywalking-export-log   - - exportErrorStatusTraceOnly Export error status trace segments through the Kafka channel. SW_EXPORTER_KAFKA_TRACE_FILTER_ERROR false   health-checker default checkIntervalSeconds The period of checking OAP internal health status (in seconds). SW_HEALTH_CHECKER_INTERVAL_SECONDS 5   configuration-discovery default disableMessageDigest If true, agent receives the latest configuration every time, even without making any changes. By default, OAP uses the SHA512 message digest mechanism to detect changes in configuration. SW_DISABLE_MESSAGE_DIGEST false   receiver-event default gRPC services that handle events data. - -    aws-firehose-receiver default host Binding IP of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_HOST 0.0.0.0   - - port Binding port of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_PORT 12801   - - contextPath Context path of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_CONTEXT_PATH /   - - maxThreads Max Thtread number of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_MAX_THREADS 200   - - idleTimeOut Idle timeout of a connection for keep-alive. SW_RECEIVER_AWS_FIREHOSE_HTTP_IDLE_TIME_OUT 30000   - - acceptQueueSize Maximum allowed number of open connections SW_RECEIVER_AWS_FIREHOSE_HTTP_ACCEPT_QUEUE_SIZE 0   - - maxRequestHeaderSize Maximum length of all headers in an HTTP/1 response SW_RECEIVER_AWS_FIREHOSE_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - firehoseAccessKey The AccessKey of AWS firhose SW_RECEIVER_AWS_FIREHOSE_ACCESS_KEY    - - enableTLS Indicate if enable HTTPS for the server SW_RECEIVER_AWS_FIREHOSE_HTTP_ENABLE_TLS false   - - tlsKeyPath TLS key path SW_RECEIVER_AWS_FIREHOSE_HTTP_TLS_KEY_PATH    - - tlsCertChainPath TLS certificate chain path SW_RECEIVER_AWS_FIREHOSE_HTTP_TLS_CERT_CHAIN_PATH    ai-pipeline default       - - uriRecognitionServerAddr The address of the URI recognition server. SW_AI_PIPELINE_URI_RECOGNITION_SERVER_ADDR -   - - uriRecognitionServerPort The port of the URI recognition server. SW_AI_PIPELINE_URI_RECOGNITION_SERVER_PORT 17128    Note ¹ System Environment Variable name could be declared and changed in application.yml. The names listed here are simply provided in the default application.yml file.\n","title":"Configuration Vocabulary","url":"/docs/main/v9.6.0/en/setup/backend/configuration-vocabulary/"},{"content":"Configuration Vocabulary The Configuration Vocabulary lists all available configurations provided by application.yml.\n   Module Provider Settings Value(s) and Explanation System Environment Variable¹ Default     core default role Option values: Mixed/Receiver/Aggregator. Receiver mode OAP opens the service to the agents, then analyzes and aggregates the results, and forwards the results for distributed aggregation. Aggregator mode OAP receives data from Mixer and Receiver role OAP nodes, and performs 2nd level aggregation. Mixer means both Receiver and Aggregator. SW_CORE_ROLE Mixed   - - restHost Binding IP of RESTful services. Services include GraphQL query and HTTP data report. SW_CORE_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_CORE_REST_PORT 12800   - - restContextPath Web context path of RESTful services. SW_CORE_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_CORE_REST_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_CORE_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize ServerSocketChannel Backlog of RESTful services. SW_CORE_REST_QUEUE_SIZE 0   - - httpMaxRequestHeaderSize Maximum request header size accepted. SW_CORE_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - gRPCHost Binding IP of gRPC services, including gRPC data report and internal communication among OAP nodes. SW_CORE_GRPC_HOST 0.0.0.0   - - gRPCPort Binding port of gRPC services. SW_CORE_GRPC_PORT 11800   - - gRPCSslEnabled Activates SSL for gRPC services. SW_CORE_GRPC_SSL_ENABLED false   - - gRPCSslKeyPath File path of gRPC SSL key. SW_CORE_GRPC_SSL_KEY_PATH -   - - gRPCSslCertChainPath File path of gRPC SSL cert chain. SW_CORE_GRPC_SSL_CERT_CHAIN_PATH -   - - gRPCSslTrustedCAPath File path of gRPC trusted CA. SW_CORE_GRPC_SSL_TRUSTED_CA_PATH -   - - downsampling Activated level of down sampling aggregation.  Hour,Day   - - enableDataKeeperExecutor Controller of TTL scheduler. Once disabled, TTL wouldn\u0026rsquo;t work. SW_CORE_ENABLE_DATA_KEEPER_EXECUTOR true   - - dataKeeperExecutePeriod Execution period of TTL scheduler (in minutes). Execution doesn\u0026rsquo;t mean deleting data. The storage provider (e.g. ElasticSearch storage) could override this. SW_CORE_DATA_KEEPER_EXECUTE_PERIOD 5   - - recordDataTTL The lifecycle of record data (in days). Record data includes traces, top N sample records, and logs. Minimum value is 2. SW_CORE_RECORD_DATA_TTL 3   - - metricsDataTTL The lifecycle of metrics data (in days), including metadata. We recommend setting metricsDataTTL \u0026gt;= recordDataTTL. Minimum value is 2. SW_CORE_METRICS_DATA_TTL 7   - - l1FlushPeriod The period of L1 aggregation flush to L2 aggregation (in milliseconds). SW_CORE_L1_AGGREGATION_FLUSH_PERIOD 500   - - storageSessionTimeout The threshold of session time (in milliseconds). Default value is 70000. SW_CORE_STORAGE_SESSION_TIMEOUT 70000   - - persistentPeriod The period of doing data persistence. Unit is second.Default value is 25s SW_CORE_PERSISTENT_PERIOD 25   - - topNReportPeriod The execution period (in minutes) of top N sampler, which saves sampled data into the storage. SW_CORE_TOPN_REPORT_PERIOD 10   - - activeExtraModelColumns Appends entity names (e.g. service names) into metrics storage entities. SW_CORE_ACTIVE_EXTRA_MODEL_COLUMNS false   - - serviceNameMaxLength Maximum length limit of service names. SW_SERVICE_NAME_MAX_LENGTH 70   - - instanceNameMaxLength Maximum length limit of service instance names. The maximum length of service + instance names should be less than 200. SW_INSTANCE_NAME_MAX_LENGTH 70   - - endpointNameMaxLength Maximum length limit of endpoint names. The maximum length of service + endpoint names should be less than 240. SW_ENDPOINT_NAME_MAX_LENGTH 150   - - searchableTracesTags Defines a set of span tag keys which are searchable through GraphQL. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_SEARCHABLE_TAG_KEYS http.method,http.status_code,rpc.status_code,db.type,db.instance,mq.queue,mq.topic,mq.broker   - - searchableLogsTags Defines a set of log tag keys which are searchable through GraphQL. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_SEARCHABLE_LOGS_TAG_KEYS level   - - searchableAlarmTags Defines a set of alarm tag keys which are searchable through GraphQL. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_SEARCHABLE_ALARM_TAG_KEYS level   - - autocompleteTagKeysQueryMaxSize The max size of tags keys for autocomplete select. SW_AUTOCOMPLETE_TAG_KEYS_QUERY_MAX_SIZE 100   - - autocompleteTagValuesQueryMaxSize The max size of tags values for autocomplete select. SW_AUTOCOMPLETE_TAG_VALUES_QUERY_MAX_SIZE 100   - - gRPCThreadPoolSize Pool size of gRPC server. SW_CORE_GRPC_THREAD_POOL_SIZE CPU core * 4   - - gRPCThreadPoolQueueSize Queue size of gRPC server. SW_CORE_GRPC_POOL_QUEUE_SIZE 10000   - - maxConcurrentCallsPerConnection The maximum number of concurrent calls permitted for each incoming connection. Defaults to no limit. SW_CORE_GRPC_MAX_CONCURRENT_CALL -   - - maxMessageSize Sets the maximum message size allowed to be received on the server. Empty means 4 MiB. SW_CORE_GRPC_MAX_MESSAGE_SIZE 4M(based on Netty)   - - remoteTimeout Timeout for cluster internal communication (in seconds). - 20   - - maxSizeOfNetworkAddressAlias The maximum size of network address detected in the system being monitored. - 1_000_000   - - maxPageSizeOfQueryProfileSnapshot The maximum size for snapshot analysis in an OAP query. - 500   - - maxSizeOfAnalyzeProfileSnapshot The maximum number of snapshots analyzed by the OAP. - 12000   - - prepareThreads The number of threads used to prepare metrics data to the storage. SW_CORE_PREPARE_THREADS 2   - - enableEndpointNameGroupingByOpenapi Automatically groups endpoints by the given OpenAPI definitions. SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI true   - - maxDurationOfQueryEBPFProfilingData The maximum duration(in second) of query the eBPF profiling data from database. - 30   - - maxThreadCountOfQueryEBPFProfilingData The maximum thread count of query the eBPF profiling data from database. - System CPU core size   - - uiMenuRefreshInterval The period(in seconds) of refreshing the status of all UI menu items. - 20   - - serviceCacheRefreshInterval The period(in seconds) of refreshing the service cache. SW_SERVICE_CACHE_REFRESH_INTERVAL 10   cluster standalone - Standalone is not suitable for running on a single node running. No configuration available. - -   - zookeeper namespace The namespace, represented by root path, isolates the configurations in Zookeeper. SW_NAMESPACE /, root path   - - hostPort Hosts and ports of Zookeeper Cluster. SW_CLUSTER_ZK_HOST_PORT localhost:2181   - - baseSleepTimeMs The period of Zookeeper client between two retries (in milliseconds). SW_CLUSTER_ZK_SLEEP_TIME 1000   - - maxRetries The maximum retry time. SW_CLUSTER_ZK_MAX_RETRIES 3   - - enableACL Opens ACL using schema and expression. SW_ZK_ENABLE_ACL false   - - schema Schema for the authorization. SW_ZK_SCHEMA digest   - - expression Expression for the authorization. SW_ZK_EXPRESSION skywalking:skywalking   - - internalComHost The hostname registered in Zookeeper for the internal communication of OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Zookeeper for the internal communication of OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - kubernetes namespace Namespace deployed by SkyWalking in k8s. SW_CLUSTER_K8S_NAMESPACE default   - - labelSelector Labels used for filtering OAP deployment in k8s. SW_CLUSTER_K8S_LABEL app=collector,release=skywalking   - - uidEnvName Environment variable name for reading uid. SW_CLUSTER_K8S_UID SKYWALKING_COLLECTOR_UID   - consul serviceName Service name for SkyWalking cluster. SW_SERVICE_NAME SkyWalking_OAP_Cluster   - - hostPort Hosts and ports for Consul cluster. SW_CLUSTER_CONSUL_HOST_PORT localhost:8500   - - aclToken ACL Token of Consul. Empty string means without ALC token. SW_CLUSTER_CONSUL_ACLTOKEN -   - - internalComHost The hostname registered in Consul for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Consul for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - etcd serviceName Service name for SkyWalking cluster. SW_CLUSTER_ETCD_SERVICE_NAME SkyWalking_OAP_Cluster   - - endpoints Hosts and ports for etcd cluster. SW_CLUSTER_ETCD_ENDPOINTS localhost:2379   - - namespace Namespace for SkyWalking cluster. SW_CLUSTER_ETCD_NAMESPACE /skywalking   - - authentication Indicates whether there is authentication. SW_CLUSTER_ETCD_AUTHENTICATION false   - - user Etcd auth username. SW_CLUSTER_ETCD_USER    - - password Etcd auth password. SW_CLUSTER_ETCD_PASSWORD    - - internalComHost The hostname registered in etcd for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in etcd for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - Nacos serviceName Service name for SkyWalking cluster. SW_SERVICE_NAME SkyWalking_OAP_Cluster   - - hostPort Hosts and ports for Nacos cluster. SW_CLUSTER_NACOS_HOST_PORT localhost:8848   - - namespace Namespace used by SkyWalking node coordination. SW_CLUSTER_NACOS_NAMESPACE public   - - internalComHost The hostname registered in Nacos for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_HOST -   - - internalComPort The port registered in Nacos for internal communications of the OAP cluster. SW_CLUSTER_INTERNAL_COM_PORT -1   - - username Nacos Auth username. SW_CLUSTER_NACOS_USERNAME -   - - password Nacos Auth password. SW_CLUSTER_NACOS_PASSWORD -   - - accessKey Nacos Auth accessKey. SW_CLUSTER_NACOS_ACCESSKEY -   - - secretKey Nacos Auth secretKey. SW_CLUSTER_NACOS_SECRETKEY -   - - syncPeriodHttpUriRecognitionPattern The period of HTTP URI recognition pattern synchronization (in seconds). SW_CORE_SYNC_PERIOD_HTTP_URI_RECOGNITION_PATTERN 10   - - trainingPeriodHttpUriRecognitionPattern The period of HTTP URI recognition pattern training (in seconds). SW_CORE_TRAINING_PERIOD_HTTP_URI_RECOGNITION_PATTERN 60   - - maxHttpUrisNumberPerService The maximum number of HTTP URIs per service. SW_MAX_HTTP_URIS_NUMBER_PER_SERVICE 3000   storage elasticsearch - ElasticSearch (and OpenSearch) storage implementation. - -   - - namespace Prefix of indexes created and used by SkyWalking. SW_NAMESPACE -   - - clusterNodes ElasticSearch cluster nodes for client connection. SW_STORAGE_ES_CLUSTER_NODES localhost   - - protocol HTTP or HTTPs. SW_STORAGE_ES_HTTP_PROTOCOL HTTP   - - connectTimeout Connect timeout of ElasticSearch client (in milliseconds). SW_STORAGE_ES_CONNECT_TIMEOUT 3000   - - socketTimeout Socket timeout of ElasticSearch client (in milliseconds). SW_STORAGE_ES_SOCKET_TIMEOUT 30000   - - responseTimeout Response timeout of ElasticSearch client (in milliseconds), 0 disables the timeout. SW_STORAGE_ES_RESPONSE_TIMEOUT 1500   - - numHttpClientThread The number of threads for the underlying HTTP client to perform socket I/O. If the value is \u0026lt;= 0, the number of available processors will be used. SW_STORAGE_ES_NUM_HTTP_CLIENT_THREAD 0   - - user Username of ElasticSearch cluster. SW_ES_USER -   - - password Password of ElasticSearch cluster. SW_ES_PASSWORD -   - - trustStorePath Trust JKS file path. Only works when username and password are enabled. SW_STORAGE_ES_SSL_JKS_PATH -   - - trustStorePass Trust JKS file password. Only works when username and password are enabled. SW_STORAGE_ES_SSL_JKS_PASS -   - - secretsManagementFile Secrets management file in the properties format, including username and password, which are managed by a 3rd party tool. Capable of being updated them at runtime. SW_ES_SECRETS_MANAGEMENT_FILE -   - - dayStep Represents the number of days in the one-minute/hour/day index. SW_STORAGE_DAY_STEP 1   - - indexShardsNumber Shard number of new indexes. SW_STORAGE_ES_INDEX_SHARDS_NUMBER 1   - - indexReplicasNumber Replicas number of new indexes. SW_STORAGE_ES_INDEX_REPLICAS_NUMBER 0   - - specificIndexSettings Specify the settings for each index individually. If configured, this setting has the highest priority and overrides the generic settings. SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS -   - - superDatasetDayStep Represents the number of days in the super size dataset record index. Default value is the same as dayStep when the value is less than 0. SW_STORAGE_ES_SUPER_DATASET_DAY_STEP -1   - - superDatasetIndexShardsFactor Super dataset is defined in the code (e.g. trace segments). This factor provides more shards for the super dataset: shards number = indexShardsNumber * superDatasetIndexShardsFactor. This factor also affects Zipkin and Jaeger traces. SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR 5   - - superDatasetIndexReplicasNumber Represents the replicas number in the super size dataset record index. SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER 0   - - indexTemplateOrder The order of index template. SW_STORAGE_ES_INDEX_TEMPLATE_ORDER 0   - - bulkActions Async bulk size of the record data batch execution. SW_STORAGE_ES_BULK_ACTIONS 5000   - - batchOfBytes A threshold to control the max body size of ElasticSearch Bulk flush. SW_STORAGE_ES_BATCH_OF_BYTES 10485760 (10m)   - - flushInterval Period of flush (in seconds). Does not matter whether bulkActions is reached or not. SW_STORAGE_ES_FLUSH_INTERVAL 5   - - concurrentRequests The number of concurrent requests allowed to be executed. SW_STORAGE_ES_CONCURRENT_REQUESTS 2   - - resultWindowMaxSize The maximum size of dataset when the OAP loads cache, such as network aliases. SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE 10000   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_ES_QUERY_MAX_SIZE 10000   - - scrollingBatchSize The batch size of metadata per iteration when metadataQueryMaxSize or resultWindowMaxSize is too large to be retrieved in a single query. SW_STORAGE_ES_SCROLLING_BATCH_SIZE 5000   - - segmentQueryMaxSize The maximum size of trace segments per query. SW_STORAGE_ES_QUERY_SEGMENT_SIZE 200   - - profileTaskQueryMaxSize The maximum size of profile task per query. SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE 200   - - profileDataQueryScrollBatchSize The batch size of query profiling data. SW_STORAGE_ES_QUERY_PROFILE_DATA_BATCH_SIZE 100   - - advanced All settings of ElasticSearch index creation. The value should be in JSON format. SW_STORAGE_ES_ADVANCED -   - - logicSharding Shard metrics and records indices into multi-physical indices, one index template per metric/meter aggregation function or record. SW_STORAGE_ES_LOGIC_SHARDING false   - h2 - H2 storage is designed for demonstration and running in short term (i.e. 1-2 hours) only. - -   - - url H2 connection URL. Defaults to H2 memory mode. SW_STORAGE_H2_URL jdbc:h2:mem:skywalking-oap-db   - - user Username of H2 database. SW_STORAGE_H2_USER sa   - - password Password of H2 database. - -   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_H2_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 100   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 1   - mysql - MySQL Storage. The MySQL JDBC Driver is not in the dist. Please copy it into the oap-lib folder manually. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - postgresql - PostgreSQL storage. - -   - - properties Hikari connection pool configurations. - Listed in the application.yaml.   - - metadataQueryMaxSize The maximum size of metadata per query. SW_STORAGE_MYSQL_QUERY_MAX_SIZE 5000   - - maxSizeOfBatchSql The maximum size of batch size of SQL execution SW_STORAGE_MAX_SIZE_OF_BATCH_SQL 2000   - - asyncBatchPersistentPoolSize async flush data into database thread size SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE 4   - banyandb - BanyanDB storage. - -   - - targets Hosts with ports of the BanyanDB. SW_STORAGE_BANYANDB_TARGETS 127.0.0.1:17912   - - maxBulkSize The maximum size of write entities in a single batch write call. SW_STORAGE_BANYANDB_MAX_BULK_SIZE 5000   - - flushInterval Period of flush interval. In the timeunit of seconds. SW_STORAGE_BANYANDB_FLUSH_INTERVAL 15   - - metricsShardsNumber Shards Number for measure/metrics. SW_STORAGE_BANYANDB_METRICS_SHARDS_NUMBER 1   - - recordShardsNumber Shards Number for a normal record. SW_STORAGE_BANYANDB_RECORD_SHARDS_NUMBER 1   - - superDatasetShardsFactor Shards Factor for a super dataset record, i.e. Shard number of a super dataset is recordShardsNumber*superDatasetShardsFactor. SW_STORAGE_BANYANDB_SUPERDATASET_SHARDS_FACTOR 2   - - concurrentWriteThreads Concurrent consumer threads for batch writing. SW_STORAGE_BANYANDB_CONCURRENT_WRITE_THREADS 15   - - profileTaskQueryMaxSize Max size of ProfileTask to be fetched. SW_STORAGE_BANYANDB_PROFILE_TASK_QUERY_MAX_SIZE 200   agent-analyzer default Agent Analyzer. SW_AGENT_ANALYZER default    - - traceSamplingPolicySettingsFile The sampling policy including sampling rate and the threshold of trace segment latency can be configured by the traceSamplingPolicySettingsFile file. SW_TRACE_SAMPLING_POLICY_SETTINGS_FILE trace-sampling-policy-settings.yml   - - slowDBAccessThreshold The slow database access threshold (in milliseconds). SW_SLOW_DB_THRESHOLD default:200,mongodb:100   - - forceSampleErrorSegment When sampling mechanism is activated, this config samples the error status segment and ignores the sampling rate. SW_FORCE_SAMPLE_ERROR_SEGMENT true   - - segmentStatusAnalysisStrategy Determines the final segment status from span status. Available values are FROM_SPAN_STATUS , FROM_ENTRY_SPAN, and FROM_FIRST_SPAN. FROM_SPAN_STATUS indicates that the segment status would be error if any span has an error status. FROM_ENTRY_SPAN means that the segment status would only be determined by the status of entry spans. FROM_FIRST_SPAN means that the segment status would only be determined by the status of the first span. SW_SEGMENT_STATUS_ANALYSIS_STRATEGY FROM_SPAN_STATUS   - - noUpstreamRealAddressAgents Exit spans with the component in the list would not generate client-side instance relation metrics, since some tracing plugins (e.g. Nginx-LUA and Envoy) can\u0026rsquo;t collect the real peer IP address. SW_NO_UPSTREAM_REAL_ADDRESS 6000,9000   - - meterAnalyzerActiveFiles Indicates which files could be instrumented and analyzed. Multiple files are split by \u0026ldquo;,\u0026rdquo;. SW_METER_ANALYZER_ACTIVE_FILES    - - slowCacheWriteThreshold The threshold of slow command which is used for writing operation (in milliseconds). SW_SLOW_CACHE_WRITE_THRESHOLD default:20,redis:10   - - slowCacheReadThreshold The threshold of slow command which is used for reading (getting) operation (in milliseconds). SW_SLOW_CACHE_READ_THRESHOLD default:20,redis:10   receiver-sharing-server default Sharing server provides new gRPC and restful servers for data collection. Ana designates that servers in the core module are to be used for internal communication only. - -    - - restHost Binding IP of RESTful services. Services include GraphQL query and HTTP data report. SW_RECEIVER_SHARING_REST_HOST -   - - restPort Binding port of RESTful services. SW_RECEIVER_SHARING_REST_PORT -   - - restContextPath Web context path of RESTful services. SW_RECEIVER_SHARING_REST_CONTEXT_PATH -   - - restMaxThreads Maximum thread number of RESTful services. SW_RECEIVER_SHARING_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_RECEIVER_SHARING_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize ServerSocketChannel backlog of RESTful services. SW_RECEIVER_SHARING_REST_QUEUE_SIZE 0   - - httpMaxRequestHeaderSize Maximum request header size accepted. SW_RECEIVER_SHARING_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - gRPCHost Binding IP of gRPC services. Services include gRPC data report and internal communication among OAP nodes. SW_RECEIVER_GRPC_HOST 0.0.0.0. Not Activated   - - gRPCPort Binding port of gRPC services. SW_RECEIVER_GRPC_PORT Not Activated   - - gRPCThreadPoolSize Pool size of gRPC server. SW_RECEIVER_GRPC_THREAD_POOL_SIZE CPU core * 4   - - gRPCThreadPoolQueueSize Queue size of gRPC server. SW_RECEIVER_GRPC_POOL_QUEUE_SIZE 10000   - - gRPCSslEnabled Activates SSL for gRPC services. SW_RECEIVER_GRPC_SSL_ENABLED false   - - gRPCSslKeyPath File path of gRPC SSL key. SW_RECEIVER_GRPC_SSL_KEY_PATH -   - - gRPCSslCertChainPath File path of gRPC SSL cert chain. SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH -   - - maxConcurrentCallsPerConnection The maximum number of concurrent calls permitted for each incoming connection. Defaults to no limit. SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL -   - - authentication The token text for authentication. Works for gRPC connection only. Once this is set, the client is required to use the same token. SW_AUTHENTICATION -   log-analyzer default Log Analyzer. SW_LOG_ANALYZER default    - - lalFiles The LAL configuration file names (without file extension) to be activated. Read LAL for more details. SW_LOG_LAL_FILES default   - - malFiles The MAL configuration file names (without file extension) to be activated. Read LAL for more details. SW_LOG_MAL_FILES \u0026quot;\u0026quot;   event-analyzer default Event Analyzer. SW_EVENT_ANALYZER default    receiver-register default gRPC and HTTPRestful services that provide service, service instance and endpoint register. - -    receiver-trace default gRPC and HTTPRestful services that accept SkyWalking format traces. - -    receiver-jvm default gRPC services that accept JVM metrics data. - -    receiver-clr default gRPC services that accept .Net CLR metrics data. - -    receiver-profile default gRPC services that accept profile task status and snapshot reporter. - -    receiver-zabbix default TCP receiver accepts Zabbix format metrics. - -    - - port Exported TCP port. Zabbix agent could connect and transport data. SW_RECEIVER_ZABBIX_PORT 10051   - - host Binds to host. SW_RECEIVER_ZABBIX_HOST 0.0.0.0   - - activeFiles Enables config when agent request is received. SW_RECEIVER_ZABBIX_ACTIVE_FILES agent   service-mesh default gRPC services that accept data from inbound mesh probes. - -    envoy-metric default Envoy metrics_service and ALS(access log service) are supported by this receiver. The OAL script supports all GAUGE type metrics. - -    - - acceptMetricsService Starts Envoy Metrics Service analysis. SW_ENVOY_METRIC_SERVICE true   - - alsHTTPAnalysis Starts Envoy HTTP Access Log Service analysis. Value = k8s-mesh means starting the analysis. SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS -   - - alsTCPAnalysis Starts Envoy TCP Access Log Service analysis. Value = k8s-mesh means starting the analysis. SW_ENVOY_METRIC_ALS_TCP_ANALYSIS -   - - k8sServiceNameRule k8sServiceNameRule allows you to customize the service name in ALS via Kubernetes metadata. The available variables are pod and service. E.g. you can use ${service.metadata.name}-${pod.metadata.labels.version} to append the version number to the service name. Note that when using environment variables to pass this configuration, use single quotes('') to avoid being evaluated by the shell. K8S_SERVICE_NAME_RULE ${pod.metadata.labels.(service.istio.io/canonical-name)}   - - istioServiceNameRule istioServiceNameRule allows you to customize the service name in ALS via Kubernetes metadata. The available variables are serviceEntry. E.g. you can use ${serviceEntry.metadata.name}-${serviceEntry.metadata.labels.version} to append the version number to the service name. Note that when using environment variables to pass this configuration, use single quotes('') to avoid being evaluated by the shell. ISTIO_SERVICE_NAME_RULE ${serviceEntry.metadata.name}   receiver-otel default A receiver for analyzing metrics data from OpenTelemetry. - -    - - enabledHandlers Enabled handlers for otel. SW_OTEL_RECEIVER_ENABLED_HANDLERS -   - - enabledOtelMetricsRules Enabled metric rules for OTLP handler. SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES -   receiver-zipkin default A receiver for Zipkin traces. - -    - - sampleRate The sample rate precision is 1/10000, should be between 0 and 10000 SW_ZIPKIN_SAMPLE_RATE 10000   - - searchableTracesTags Defines a set of span tag keys which are searchable. Multiple values are separated by commas. The max length of key=value should be less than 256 or will be dropped. SW_ZIPKIN_SEARCHABLE_TAG_KEYS http.method   - - enableHttpCollector Enable Http Collector. SW_ZIPKIN_HTTP_COLLECTOR_ENABLED true   - - restHost Binding IP of RESTful services. SW_RECEIVER_ZIPKIN_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_RECEIVER_ZIPKIN_REST_PORT 9411   - - restContextPath Web context path of RESTful services. SW_RECEIVER_ZIPKIN_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_RECEIVER_ZIPKIN_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_RECEIVER_ZIPKIN_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_RECEIVER_ZIPKIN_REST_QUEUE_SIZE 0   - - enableKafkaCollector Enable Kafka Collector. SW_ZIPKIN_KAFKA_COLLECTOR_ENABLED false   - - kafkaBootstrapServers Kafka ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG. SW_ZIPKIN_KAFKA_SERVERS localhost:9092   - - kafkaGroupId Kafka ConsumerConfig.GROUP_ID_CONFIG. SW_ZIPKIN_KAFKA_GROUP_ID zipkin   - - kafkaTopic Kafka Topics. SW_ZIPKIN_KAFKA_TOPIC zipkin   - - kafkaConsumerConfig Kafka consumer config, JSON format as Properties. If it contains the same key with above, would override. SW_ZIPKIN_KAFKA_CONSUMER_CONFIG \u0026ldquo;{\u0026quot;auto.offset.reset\u0026quot;:\u0026quot;earliest\u0026quot;,\u0026quot;enable.auto.commit\u0026quot;:true}\u0026rdquo;   - - kafkaConsumers The number of consumers to create. SW_ZIPKIN_KAFKA_CONSUMERS 1   - - kafkaHandlerThreadPoolSize Pool size of Kafka message handler executor. SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_SIZE CPU core * 2   - - kafkaHandlerThreadPoolQueueSize Queue size of Kafka message handler executor. SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE 10000   kafka-fetcher default Read SkyWalking\u0026rsquo;s native metrics/logs/traces through Kafka server. - -    - - bootstrapServers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_KAFKA_FETCHER_SERVERS localhost:9092   - - namespace Namespace aims to isolate multi OAP cluster when using the same Kafka cluster. If you set a namespace for Kafka fetcher, OAP will add a prefix to topic name. You should also set namespace in agent.config. The property is named plugin.kafka.namespace. SW_NAMESPACE -   - - groupId A unique string that identifies the consumer group to which this consumer belongs. - skywalking-consumer   - - createTopicIfNotExist If true, this creates Kafka topic (if it does not already exist). - true   - - partitions The number of partitions for the topic being created. SW_KAFKA_FETCHER_PARTITIONS 3   - - consumers The number of consumers to create. SW_KAFKA_FETCHER_CONSUMERS 1   - - enableNativeProtoLog Enables fetching and handling native proto log data. SW_KAFKA_FETCHER_ENABLE_NATIVE_PROTO_LOG true   - - enableNativeJsonLog Enables fetching and handling native json log data. SW_KAFKA_FETCHER_ENABLE_NATIVE_JSON_LOG true   - - replicationFactor The replication factor for each partition in the topic being created. SW_KAFKA_FETCHER_PARTITIONS_FACTOR 2   - - kafkaHandlerThreadPoolSize Pool size of Kafka message handler executor. SW_KAFKA_HANDLER_THREAD_POOL_SIZE CPU core * 2   - - kafkaHandlerThreadPoolQueueSize Queue size of Kafka message handler executor. SW_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE 10000   - - topicNameOfMeters Kafka topic name for meter system data. - skywalking-meters   - - topicNameOfMetrics Kafka topic name for JVM metrics data. - skywalking-metrics   - - topicNameOfProfiling Kafka topic name for profiling data. - skywalking-profilings   - - topicNameOfTracingSegments Kafka topic name for tracing data. - skywalking-segments   - - topicNameOfManagements Kafka topic name for service instance reporting and registration. - skywalking-managements   - - topicNameOfLogs Kafka topic name for native proto log data. - skywalking-logs   - - topicNameOfJsonLogs Kafka topic name for native json log data. - skywalking-logs-json   receiver-browser default gRPC services that accept browser performance data and error log. - - -   - - sampleRate Sampling rate for receiving trace. Precise to 1/10000. 10000 means sampling rate of 100% by default. SW_RECEIVER_BROWSER_SAMPLE_RATE 10000   query graphql - GraphQL query implementation. -    - - enableLogTestTool Enable the log testing API to test the LAL. NOTE: This API evaluates untrusted code on the OAP server. A malicious script can do significant damage (steal keys and secrets, remove files and directories, install malware, etc). As such, please enable this API only when you completely trust your users. SW_QUERY_GRAPHQL_ENABLE_LOG_TEST_TOOL false   - - maxQueryComplexity Maximum complexity allowed for the GraphQL query that can be used to abort a query if the total number of data fields queried exceeds the defined threshold. SW_QUERY_MAX_QUERY_COMPLEXITY 3000   - - enableUpdateUITemplate Allow user add,disable and update UI template. SW_ENABLE_UPDATE_UI_TEMPLATE false   - - enableOnDemandPodLog Ondemand Pod log: fetch the Pod logs on users' demand, the logs are fetched and displayed in real time, and are not persisted in any kind. This is helpful when users want to do some experiments and monitor the logs and see what\u0026rsquo;s happing inside the service. Note: if you print secrets in the logs, they are also visible to the UI, so for the sake of security, this feature is disabled by default, please set this configuration to enable the feature manually. SW_ENABLE_ON_DEMAND_POD_LOG false   query-zipkin default - This module is for Zipkin query API and support zipkin-lens UI -    - - restHost Binding IP of RESTful services. SW_QUERY_ZIPKIN_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_QUERY_ZIPKIN_REST_PORT 9412   - - restContextPath Web context path of RESTful services. SW_QUERY_ZIPKIN_REST_CONTEXT_PATH zipkin   - - restMaxThreads Maximum thread number of RESTful services. SW_QUERY_ZIPKIN_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_QUERY_ZIPKIN_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_QUERY_ZIPKIN_REST_QUEUE_SIZE 0   - - lookback Default look back for traces and autocompleteTags, 1 day in millis SW_QUERY_ZIPKIN_LOOKBACK 86400000   - - namesMaxAge The Cache-Control max-age (seconds) for serviceNames, remoteServiceNames and spanNames SW_QUERY_ZIPKIN_NAMES_MAX_AGE 300   - - uiQueryLimit Default traces query max size SW_QUERY_ZIPKIN_UI_QUERY_LIMIT 10   - - uiDefaultLookback Default look back on the UI for search traces, 15 minutes in millis SW_QUERY_ZIPKIN_UI_DEFAULT_LOOKBACK 900000   promql default - This module is for PromQL API. -    - - restHost Binding IP of RESTful services. SW_PROMQL_REST_HOST 0.0.0.0   - - restPort Binding port of RESTful services. SW_PROMQL_REST_PORT 9090   - - restContextPath Web context path of RESTful services. SW_PROMQL_REST_CONTEXT_PATH /   - - restMaxThreads Maximum thread number of RESTful services. SW_PROMQL_REST_MAX_THREADS 200   - - restIdleTimeOut Connector idle timeout of RESTful services (in milliseconds). SW_PROMQL_REST_IDLE_TIMEOUT 30000   - - restAcceptQueueSize Maximum request header size accepted. SW_PROMQL_REST_QUEUE_SIZE 0   alarm default - Read alarm doc for more details. -    telemetry - - Read telemetry doc for more details. -    - none - No op implementation. -    - prometheus host Binding host for Prometheus server fetching data. SW_TELEMETRY_PROMETHEUS_HOST 0.0.0.0   - - port Binding port for Prometheus server fetching data. SW_TELEMETRY_PROMETHEUS_PORT 1234   configuration - - Read dynamic configuration doc for more details. -    - grpc host DCS server binding hostname. SW_DCS_SERVER_HOST -   - - port DCS server binding port. SW_DCS_SERVER_PORT 80   - - clusterName Cluster name when reading the latest configuration from DSC server. SW_DCS_CLUSTER_NAME SkyWalking   - - period The period of reading data from DSC server by the OAP (in seconds). SW_DCS_PERIOD 20   - apollo apolloMeta apollo.meta in Apollo. SW_CONFIG_APOLLO http://localhost:8080   - - apolloCluster apollo.cluster in Apollo. SW_CONFIG_APOLLO_CLUSTER default   - - apolloEnv env in Apollo. SW_CONFIG_APOLLO_ENV -   - - appId app.id in Apollo. SW_CONFIG_APOLLO_APP_ID skywalking   - zookeeper namespace The namespace (represented by root path) that isolates the configurations in the Zookeeper. SW_CONFIG_ZK_NAMESPACE /, root path   - - hostPort Hosts and ports of Zookeeper Cluster. SW_CONFIG_ZK_HOST_PORT localhost:2181   - - baseSleepTimeMs The period of Zookeeper client between two retries (in milliseconds). SW_CONFIG_ZK_BASE_SLEEP_TIME_MS 1000   - - maxRetries The maximum retry time. SW_CONFIG_ZK_MAX_RETRIES 3   - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - etcd endpoints Hosts and ports for etcd cluster (separated by commas if multiple). SW_CONFIG_ETCD_ENDPOINTS http://localhost:2379   - - namespace Namespace for SkyWalking cluster. SW_CONFIG_ETCD_NAMESPACE /skywalking   - - authentication Indicates whether there is authentication. SW_CONFIG_ETCD_AUTHENTICATION false   - - user Etcd auth username. SW_CONFIG_ETCD_USER    - - password Etcd auth password. SW_CONFIG_ETCD_PASSWORD    - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - consul hostPort Hosts and ports for Consul cluster. SW_CONFIG_CONSUL_HOST_AND_PORTS localhost:8500   - - aclToken ACL Token of Consul. Empty string means without ACL token. SW_CONFIG_CONSUL_ACL_TOKEN -   - - period The period of data sync (in seconds). SW_CONFIG_CONSUL_PERIOD 60   - k8s-configmap namespace Deployment namespace of the config map. SW_CLUSTER_K8S_NAMESPACE default   - - labelSelector Labels for locating configmap. SW_CLUSTER_K8S_LABEL app=collector,release=skywalking   - - period The period of data sync (in seconds). SW_CONFIG_ZK_PERIOD 60   - nacos serverAddr Nacos Server Host. SW_CONFIG_NACOS_SERVER_ADDR 127.0.0.1   - - port Nacos Server Port. SW_CONFIG_NACOS_SERVER_PORT 8848   - - group Nacos Configuration namespace. SW_CONFIG_NACOS_SERVER_NAMESPACE -   - - period The period of data sync (in seconds). SW_CONFIG_CONFIG_NACOS_PERIOD 60   - - username Nacos Auth username. SW_CONFIG_NACOS_USERNAME -   - - password Nacos Auth password. SW_CONFIG_NACOS_PASSWORD -   - - accessKey Nacos Auth accessKey. SW_CONFIG_NACOS_ACCESSKEY -   - - secretKey Nacos Auth secretKey. SW_CONFIG_NACOS_SECRETKEY -   exporter default enableGRPCMetrics Enable gRPC metrics exporter. SW_EXPORTER_ENABLE_GRPC_METRICS false   - - gRPCTargetHost The host of target gRPC server for receiving export data SW_EXPORTER_GRPC_HOST 127.0.0.1   - - gRPCTargetPort The port of target gRPC server for receiving export data. SW_EXPORTER_GRPC_PORT 9870   - - enableKafkaTrace Enable Kafka trace exporter. SW_EXPORTER_ENABLE_KAFKA_TRACE false   - - enableKafkaLog Enable Kafka log exporter. SW_EXPORTER_ENABLE_KAFKA_LOG false   - - kafkaBootstrapServers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_EXPORTER_KAFKA_SERVERS localhost:9092   - - kafkaProducerConfig Kafka producer config, JSON format as Properties. SW_EXPORTER_KAFKA_PRODUCER_CONFIG -   - - kafkaTopicTrace Kafka topic name for trace. SW_EXPORTER_KAFKA_TOPIC_TRACE skywalking-export-trace   - - kafkaTopicLog Kafka topic name for log. SW_EXPORTER_KAFKA_TOPIC_LOG skywalking-export-log   - - exportErrorStatusTraceOnly Export error status trace segments through the Kafka channel. SW_EXPORTER_KAFKA_TRACE_FILTER_ERROR false   health-checker default checkIntervalSeconds The period of checking OAP internal health status (in seconds). SW_HEALTH_CHECKER_INTERVAL_SECONDS 5   debugging-query default       - - keywords4MaskingSecretsOfConfig Include the list of keywords to filter configurations including secrets. Separate keywords by a comma. SW_DEBUGGING_QUERY_KEYWORDS_FOR_MASKING_SECRETS user,password,token,accessKey,secretKey,authentication   configuration-discovery default disableMessageDigest If true, agent receives the latest configuration every time, even without making any changes. By default, OAP uses the SHA512 message digest mechanism to detect changes in configuration. SW_DISABLE_MESSAGE_DIGEST false   receiver-event default gRPC services that handle events data. - -    aws-firehose-receiver default host Binding IP of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_HOST 0.0.0.0   - - port Binding port of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_PORT 12801   - - contextPath Context path of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_CONTEXT_PATH /   - - maxThreads Max Thtread number of HTTP server SW_RECEIVER_AWS_FIREHOSE_HTTP_MAX_THREADS 200   - - idleTimeOut Idle timeout of a connection for keep-alive. SW_RECEIVER_AWS_FIREHOSE_HTTP_IDLE_TIME_OUT 30000   - - acceptQueueSize Maximum allowed number of open connections SW_RECEIVER_AWS_FIREHOSE_HTTP_ACCEPT_QUEUE_SIZE 0   - - maxRequestHeaderSize Maximum length of all headers in an HTTP/1 response SW_RECEIVER_AWS_FIREHOSE_HTTP_MAX_REQUEST_HEADER_SIZE 8192   - - firehoseAccessKey The AccessKey of AWS firhose SW_RECEIVER_AWS_FIREHOSE_ACCESS_KEY    - - enableTLS Indicate if enable HTTPS for the server SW_RECEIVER_AWS_FIREHOSE_HTTP_ENABLE_TLS false   - - tlsKeyPath TLS key path SW_RECEIVER_AWS_FIREHOSE_HTTP_TLS_KEY_PATH    - - tlsCertChainPath TLS certificate chain path SW_RECEIVER_AWS_FIREHOSE_HTTP_TLS_CERT_CHAIN_PATH    ai-pipeline default       - - uriRecognitionServerAddr The address of the URI recognition server. SW_AI_PIPELINE_URI_RECOGNITION_SERVER_ADDR -   - - uriRecognitionServerPort The port of the URI recognition server. SW_AI_PIPELINE_URI_RECOGNITION_SERVER_PORT 17128    Note ¹ System Environment Variable name could be declared and changed in application.yml. The names listed here are simply provided in the default application.yml file.\n","title":"Configuration Vocabulary","url":"/docs/main/v9.7.0/en/setup/backend/configuration-vocabulary/"},{"content":"Context injection If you want to fetch the SkyWalking Context in your PHP code, which is super helpful for debugging and observability, You can enable the configuration item skywalking_agent.inject_context.\nDescription skywalking_agent.inject_context\nWhether to enable automatic injection of skywalking context variables (such as SW_TRACE_ID). For php-fpm mode, it will be injected into the $_SERVER variable. For swoole mode, it will be injected into the $request-\u0026gt;server variable.\nConfiguration [skywalking_agent] extension = skywalking_agent.so skywalking_agent.inject_context = On Usage For php-fpm mode:\n\u0026lt;?php echo $_SERVER[\u0026#34;SW_SERVICE_NAME\u0026#34;]; // get service name echo $_SERVER[\u0026#34;SW_INSTANCE_NAME\u0026#34;]; // get instance name echo $_SERVER[\u0026#34;SW_TRACE_ID\u0026#34;]; // get trace id For swoole mode:\n\u0026lt;?php $http = new Swoole\\Http\\Server(\u0026#39;127.0.0.1\u0026#39;, 9501); $http-\u0026gt;on(\u0026#39;request\u0026#39;, function ($request, $response) { echo $request-\u0026gt;server[\u0026#34;SW_SERVICE_NAME\u0026#34;]; // get service name  echo $request-\u0026gt;server[\u0026#34;SW_INSTANCE_NAME\u0026#34;]; // get instance name  echo $request-\u0026gt;server[\u0026#34;SW_TRACE_ID\u0026#34;]; // get trace id }); ","title":"Context injection","url":"/docs/skywalking-php/next/en/configuration/context-injection/"},{"content":"Continuous Profiling Continuous profiling utilizes eBPF, process monitoring, and other technologies to collect data. When the configured threshold is met, it would automatically start profiling tasks. Corresponds to Continuous Profiling in the concepts and designs. This approach helps identify performance bottlenecks and potential issues in a proactive manner, allowing users to optimize their applications and systems more effectively.\nActive in the OAP Continuous profiling uses the same protocol service as eBPF Profiling, so you only need to ensure that the eBPF Profiling receiver is running.\nreceiver-ebpf:selector:${SW_RECEIVER_EBPF:default}default:Configuration of Continuous Profiling Policy Continuous profiling can be configured on a service entity, with the following fields in the configuration:\n Service: The service entity for which you want to monitor the processes. Targets: Configuration conditions.  Target Type: Target profiling type, currently supporting On CPU Profiling, Off CPU Profiling, and Network Profiling. Check Items: Detection conditions, only one of the multiple condition rules needs to be met to start the task.  Type: Monitoring type, currently supporting \u0026ldquo;System Load\u0026rdquo;, \u0026ldquo;Process CPU\u0026rdquo;, \u0026ldquo;Process Thread Count\u0026rdquo;, \u0026ldquo;HTTP Error Rate\u0026rdquo;, \u0026ldquo;HTTP Avg Response Time\u0026rdquo;. Threshold: Check if the monitoring value meets the specified expectations. Period: The time period(seconds) for monitoring data, which can also be understood as the most recent duration. Count: The number of times(seconds) the threshold is triggered within the detection period, which can also be understood as the total number of times the specified threshold rule is triggered in the most recent duration(seconds). Once the count check is met, the specified Profiling task will be started. URI: For HTTP-related monitoring types, used to filter specific URIs.      Monitoring After saving the configuration, the eBPF agent can perform monitoring operations on the processes under the specified service based on the service-level configuration.\nMetrics While performing monitoring, the eBPF agent would report the monitoring data to OAP for storage, making it more convenient to understand the real-time monitoring status. The main metrics include:\n   Monitor Type Unit Description     System Load Load System load average over a specified period.   Process CPU Percentage The CPU usage of the process as a percentage.   Process Thread Count Count The number of threads in the process.   HTTP Error Rate Percentage The percentage of HTTP requests that result in error responses (e.g., 4xx or 5xx status codes).   HTTP Avg Response Time Millisecond The average response time for HTTP requests.    Threshold With Trigger In the eBPF agent, data is collected periodically, and the sliding time window technique is used to store the data from the most recent Period cycles. The Threshold rule is used to verify whether the data within each cycle meets the specified criteria. If the number of times the conditions are met within the sliding time window exceeds the Count value, the corresponding Profiling task would be triggered.\nThe sliding time window technique ensures that the most recent and relevant data is considered when evaluating the conditions. This approach allows for a more accurate and dynamic assessment of the system\u0026rsquo;s performance, making it possible to identify and respond to issues in a timely manner. By triggering Profiling tasks when specific conditions are met, the system can automatically initiate performance analysis and help uncover potential bottlenecks or areas for improvement.\nCauses When the eBPF agent reports a Profiling task, it also reports the reason for triggering the Profiling task, which mainly includes the following information:\n Process: The specific process that triggered the policy. Monitor Type: The type of monitoring that was triggered. Threshold: The configured threshold value. Current: The monitoring value at the time the rule was triggered.  Silence Period Upon triggering a continuous profiling task, the eBPF agent supports a feature that prevents re-triggering tasks within a specified period. This feature is designed to prevent an unlimited number of profiling tasks from being initiated if the process continuously reaches the threshold, which could potentially cause system issues.\n","title":"Continuous Profiling","url":"/docs/main/latest/en/setup/backend/backend-continuous-profiling/"},{"content":"Continuous Profiling Continuous profiling utilizes eBPF, process monitoring, and other technologies to collect data. When the configured threshold is met, it would automatically start profiling tasks. Corresponds to Continuous Profiling in the concepts and designs. This approach helps identify performance bottlenecks and potential issues in a proactive manner, allowing users to optimize their applications and systems more effectively.\nActive in the OAP Continuous profiling uses the same protocol service as eBPF Profiling, so you only need to ensure that the eBPF Profiling receiver is running.\nreceiver-ebpf:selector:${SW_RECEIVER_EBPF:default}default:Configuration of Continuous Profiling Policy Continuous profiling can be configured on a service entity, with the following fields in the configuration:\n Service: The service entity for which you want to monitor the processes. Targets: Configuration conditions.  Target Type: Target profiling type, currently supporting On CPU Profiling, Off CPU Profiling, and Network Profiling. Check Items: Detection conditions, only one of the multiple condition rules needs to be met to start the task.  Type: Monitoring type, currently supporting \u0026ldquo;System Load\u0026rdquo;, \u0026ldquo;Process CPU\u0026rdquo;, \u0026ldquo;Process Thread Count\u0026rdquo;, \u0026ldquo;HTTP Error Rate\u0026rdquo;, \u0026ldquo;HTTP Avg Response Time\u0026rdquo;. Threshold: Check if the monitoring value meets the specified expectations. Period: The time period(seconds) for monitoring data, which can also be understood as the most recent duration. Count: The number of times(seconds) the threshold is triggered within the detection period, which can also be understood as the total number of times the specified threshold rule is triggered in the most recent duration(seconds). Once the count check is met, the specified Profiling task will be started. URI: For HTTP-related monitoring types, used to filter specific URIs.      Monitoring After saving the configuration, the eBPF agent can perform monitoring operations on the processes under the specified service based on the service-level configuration.\nMetrics While performing monitoring, the eBPF agent would report the monitoring data to OAP for storage, making it more convenient to understand the real-time monitoring status. The main metrics include:\n   Monitor Type Unit Description     System Load Load System load average over a specified period.   Process CPU Percentage The CPU usage of the process as a percentage.   Process Thread Count Count The number of threads in the process.   HTTP Error Rate Percentage The percentage of HTTP requests that result in error responses (e.g., 4xx or 5xx status codes).   HTTP Avg Response Time Millisecond The average response time for HTTP requests.    Threshold With Trigger In the eBPF agent, data is collected periodically, and the sliding time window technique is used to store the data from the most recent Period cycles. The Threshold rule is used to verify whether the data within each cycle meets the specified criteria. If the number of times the conditions are met within the sliding time window exceeds the Count value, the corresponding Profiling task would be triggered.\nThe sliding time window technique ensures that the most recent and relevant data is considered when evaluating the conditions. This approach allows for a more accurate and dynamic assessment of the system\u0026rsquo;s performance, making it possible to identify and respond to issues in a timely manner. By triggering Profiling tasks when specific conditions are met, the system can automatically initiate performance analysis and help uncover potential bottlenecks or areas for improvement.\nCauses When the eBPF agent reports a Profiling task, it also reports the reason for triggering the Profiling task, which mainly includes the following information:\n Process: The specific process that triggered the policy. Monitor Type: The type of monitoring that was triggered. Threshold: The configured threshold value. Current: The monitoring value at the time the rule was triggered.  Silence Period Upon triggering a continuous profiling task, the eBPF agent supports a feature that prevents re-triggering tasks within a specified period. This feature is designed to prevent an unlimited number of profiling tasks from being initiated if the process continuously reaches the threshold, which could potentially cause system issues.\n","title":"Continuous Profiling","url":"/docs/main/next/en/setup/backend/backend-continuous-profiling/"},{"content":"Continuous Profiling Continuous profiling utilizes eBPF, process monitoring, and other technologies to collect data. When the configured threshold is met, it would automatically start profiling tasks. Corresponds to Continuous Profiling in the concepts and designs. This approach helps identify performance bottlenecks and potential issues in a proactive manner, allowing users to optimize their applications and systems more effectively.\nActive in the OAP Continuous profiling uses the same protocol service as eBPF Profiling, so you only need to ensure that the eBPF Profiling receiver is running.\nreceiver-ebpf:selector:${SW_RECEIVER_EBPF:default}default:Configuration of Continuous Profiling Policy Continuous profiling can be configured on a service entity, with the following fields in the configuration:\n Service: The service entity for which you want to monitor the processes. Targets: Configuration conditions.  Target Type: Target profiling type, currently supporting On CPU Profiling, Off CPU Profiling, and Network Profiling. Check Items: Detection conditions, only one of the multiple condition rules needs to be met to start the task.  Type: Monitoring type, currently supporting \u0026ldquo;System Load\u0026rdquo;, \u0026ldquo;Process CPU\u0026rdquo;, \u0026ldquo;Process Thread Count\u0026rdquo;, \u0026ldquo;HTTP Error Rate\u0026rdquo;, \u0026ldquo;HTTP Avg Response Time\u0026rdquo;. Threshold: Check if the monitoring value meets the specified expectations. Period: The time period for monitoring data, which can also be understood as the most recent duration. Count: The number of times the threshold is triggered within the detection period, which can also be understood as the total number of times the specified threshold rule is triggered in the most recent duration. Once the count check is met, the specified Profiling task will be started. URI: For HTTP-related monitoring types, used to filter specific URIs.      Monitoring After saving the configuration, the eBPF agent can perform monitoring operations on the processes under the specified service based on the service-level configuration.\nMetrics While performing monitoring, the eBPF agent would report the monitoring data to OAP for storage, making it more convenient to understand the real-time monitoring status. The main metrics include:\n   Monitor Type Unit Description     System Load Load System load average over a specified period.   Process CPU Percentage The CPU usage of the process as a percentage.   Process Thread Count Count The number of threads in the process.   HTTP Error Rate Percentage The percentage of HTTP requests that result in error responses (e.g., 4xx or 5xx status codes).   HTTP Avg Response Time Millisecond The average response time for HTTP requests.    Threshold With Trigger In the eBPF agent, data is collected periodically, and the sliding time window technique is used to store the data from the most recent Period cycles. The Threshold rule is used to verify whether the data within each cycle meets the specified criteria. If the number of times the conditions are met within the sliding time window exceeds the Count value, the corresponding Profiling task would be triggered.\nThe sliding time window technique ensures that the most recent and relevant data is considered when evaluating the conditions. This approach allows for a more accurate and dynamic assessment of the system\u0026rsquo;s performance, making it possible to identify and respond to issues in a timely manner. By triggering Profiling tasks when specific conditions are met, the system can automatically initiate performance analysis and help uncover potential bottlenecks or areas for improvement.\nCauses When the eBPF agent reports a Profiling task, it also reports the reason for triggering the Profiling task, which mainly includes the following information:\n Process: The specific process that triggered the policy. Monitor Type: The type of monitoring that was triggered. Threshold: The configured threshold value. Current: The monitoring value at the time the rule was triggered.  Silence Period Upon triggering a continuous profiling task, the eBPF agent supports a feature that prevents re-triggering tasks within a specified period. This feature is designed to prevent an unlimited number of profiling tasks from being initiated if the process continuously reaches the threshold, which could potentially cause system issues.\n","title":"Continuous Profiling","url":"/docs/main/v9.5.0/en/setup/backend/backend-continuous-profiling/"},{"content":"Continuous Profiling Continuous profiling utilizes eBPF, process monitoring, and other technologies to collect data. When the configured threshold is met, it would automatically start profiling tasks. Corresponds to Continuous Profiling in the concepts and designs. This approach helps identify performance bottlenecks and potential issues in a proactive manner, allowing users to optimize their applications and systems more effectively.\nActive in the OAP Continuous profiling uses the same protocol service as eBPF Profiling, so you only need to ensure that the eBPF Profiling receiver is running.\nreceiver-ebpf:selector:${SW_RECEIVER_EBPF:default}default:Configuration of Continuous Profiling Policy Continuous profiling can be configured on a service entity, with the following fields in the configuration:\n Service: The service entity for which you want to monitor the processes. Targets: Configuration conditions.  Target Type: Target profiling type, currently supporting On CPU Profiling, Off CPU Profiling, and Network Profiling. Check Items: Detection conditions, only one of the multiple condition rules needs to be met to start the task.  Type: Monitoring type, currently supporting \u0026ldquo;System Load\u0026rdquo;, \u0026ldquo;Process CPU\u0026rdquo;, \u0026ldquo;Process Thread Count\u0026rdquo;, \u0026ldquo;HTTP Error Rate\u0026rdquo;, \u0026ldquo;HTTP Avg Response Time\u0026rdquo;. Threshold: Check if the monitoring value meets the specified expectations. Period: The time period(seconds) for monitoring data, which can also be understood as the most recent duration. Count: The number of times(seconds) the threshold is triggered within the detection period, which can also be understood as the total number of times the specified threshold rule is triggered in the most recent duration(seconds). Once the count check is met, the specified Profiling task will be started. URI: For HTTP-related monitoring types, used to filter specific URIs.      Monitoring After saving the configuration, the eBPF agent can perform monitoring operations on the processes under the specified service based on the service-level configuration.\nMetrics While performing monitoring, the eBPF agent would report the monitoring data to OAP for storage, making it more convenient to understand the real-time monitoring status. The main metrics include:\n   Monitor Type Unit Description     System Load Load System load average over a specified period.   Process CPU Percentage The CPU usage of the process as a percentage.   Process Thread Count Count The number of threads in the process.   HTTP Error Rate Percentage The percentage of HTTP requests that result in error responses (e.g., 4xx or 5xx status codes).   HTTP Avg Response Time Millisecond The average response time for HTTP requests.    Threshold With Trigger In the eBPF agent, data is collected periodically, and the sliding time window technique is used to store the data from the most recent Period cycles. The Threshold rule is used to verify whether the data within each cycle meets the specified criteria. If the number of times the conditions are met within the sliding time window exceeds the Count value, the corresponding Profiling task would be triggered.\nThe sliding time window technique ensures that the most recent and relevant data is considered when evaluating the conditions. This approach allows for a more accurate and dynamic assessment of the system\u0026rsquo;s performance, making it possible to identify and respond to issues in a timely manner. By triggering Profiling tasks when specific conditions are met, the system can automatically initiate performance analysis and help uncover potential bottlenecks or areas for improvement.\nCauses When the eBPF agent reports a Profiling task, it also reports the reason for triggering the Profiling task, which mainly includes the following information:\n Process: The specific process that triggered the policy. Monitor Type: The type of monitoring that was triggered. Threshold: The configured threshold value. Current: The monitoring value at the time the rule was triggered.  Silence Period Upon triggering a continuous profiling task, the eBPF agent supports a feature that prevents re-triggering tasks within a specified period. This feature is designed to prevent an unlimited number of profiling tasks from being initiated if the process continuously reaches the threshold, which could potentially cause system issues.\n","title":"Continuous Profiling","url":"/docs/main/v9.6.0/en/setup/backend/backend-continuous-profiling/"},{"content":"Continuous Profiling Continuous profiling utilizes eBPF, process monitoring, and other technologies to collect data. When the configured threshold is met, it would automatically start profiling tasks. Corresponds to Continuous Profiling in the concepts and designs. This approach helps identify performance bottlenecks and potential issues in a proactive manner, allowing users to optimize their applications and systems more effectively.\nActive in the OAP Continuous profiling uses the same protocol service as eBPF Profiling, so you only need to ensure that the eBPF Profiling receiver is running.\nreceiver-ebpf:selector:${SW_RECEIVER_EBPF:default}default:Configuration of Continuous Profiling Policy Continuous profiling can be configured on a service entity, with the following fields in the configuration:\n Service: The service entity for which you want to monitor the processes. Targets: Configuration conditions.  Target Type: Target profiling type, currently supporting On CPU Profiling, Off CPU Profiling, and Network Profiling. Check Items: Detection conditions, only one of the multiple condition rules needs to be met to start the task.  Type: Monitoring type, currently supporting \u0026ldquo;System Load\u0026rdquo;, \u0026ldquo;Process CPU\u0026rdquo;, \u0026ldquo;Process Thread Count\u0026rdquo;, \u0026ldquo;HTTP Error Rate\u0026rdquo;, \u0026ldquo;HTTP Avg Response Time\u0026rdquo;. Threshold: Check if the monitoring value meets the specified expectations. Period: The time period(seconds) for monitoring data, which can also be understood as the most recent duration. Count: The number of times(seconds) the threshold is triggered within the detection period, which can also be understood as the total number of times the specified threshold rule is triggered in the most recent duration(seconds). Once the count check is met, the specified Profiling task will be started. URI: For HTTP-related monitoring types, used to filter specific URIs.      Monitoring After saving the configuration, the eBPF agent can perform monitoring operations on the processes under the specified service based on the service-level configuration.\nMetrics While performing monitoring, the eBPF agent would report the monitoring data to OAP for storage, making it more convenient to understand the real-time monitoring status. The main metrics include:\n   Monitor Type Unit Description     System Load Load System load average over a specified period.   Process CPU Percentage The CPU usage of the process as a percentage.   Process Thread Count Count The number of threads in the process.   HTTP Error Rate Percentage The percentage of HTTP requests that result in error responses (e.g., 4xx or 5xx status codes).   HTTP Avg Response Time Millisecond The average response time for HTTP requests.    Threshold With Trigger In the eBPF agent, data is collected periodically, and the sliding time window technique is used to store the data from the most recent Period cycles. The Threshold rule is used to verify whether the data within each cycle meets the specified criteria. If the number of times the conditions are met within the sliding time window exceeds the Count value, the corresponding Profiling task would be triggered.\nThe sliding time window technique ensures that the most recent and relevant data is considered when evaluating the conditions. This approach allows for a more accurate and dynamic assessment of the system\u0026rsquo;s performance, making it possible to identify and respond to issues in a timely manner. By triggering Profiling tasks when specific conditions are met, the system can automatically initiate performance analysis and help uncover potential bottlenecks or areas for improvement.\nCauses When the eBPF agent reports a Profiling task, it also reports the reason for triggering the Profiling task, which mainly includes the following information:\n Process: The specific process that triggered the policy. Monitor Type: The type of monitoring that was triggered. Threshold: The configured threshold value. Current: The monitoring value at the time the rule was triggered.  Silence Period Upon triggering a continuous profiling task, the eBPF agent supports a feature that prevents re-triggering tasks within a specified period. This feature is designed to prevent an unlimited number of profiling tasks from being initiated if the process continuously reaches the threshold, which could potentially cause system issues.\n","title":"Continuous Profiling","url":"/docs/main/v9.7.0/en/setup/backend/backend-continuous-profiling/"},{"content":"Contribution If you want to debug or develop SkyWalking Infra E2E, The following documentations would guide you.\n  Compiling\n Compiling Guidance    Release\n Release Guidance    ","title":"Contribution","url":"/docs/skywalking-infra-e2e/latest/en/contribution/readme/"},{"content":"Contribution If you want to debug or develop SkyWalking Infra E2E, The following documentations would guide you.\n  Compiling\n Compiling Guidance    Release\n Release Guidance    ","title":"Contribution","url":"/docs/skywalking-infra-e2e/next/en/contribution/readme/"},{"content":"Contribution If you want to debug or develop SkyWalking Infra E2E, The following documentations would guide you.\n  Compiling\n Compiling Guidance    Release\n Release Guidance    ","title":"Contribution","url":"/docs/skywalking-infra-e2e/v1.3.0/en/contribution/readme/"},{"content":"Create and detect Service Hierarchy Relationship Motivation Service relationship is one of the most important parts of collaborating data in the APM. Service Map is supported for years from tracing to trace analysis. But still due to the means of the probs, a service could be detected from multiple methods, which is the same service in multiple layers. v9 proposal mentioned the concept of the layer. Through this proposal, we plan to establish a kernel-level concept to connect services detected in different layers.\nArchitecture Graph There is no significant architecture-level change.\nPropose Changes The data sources of SkyWalking APM have covered traditional agent installed service, VMs, cloud infra, k8s, etc.\nFor example, a Java service is built in a docker image and is going to be deployed in a k8s cluster, with a sidecar injected due to service mesh managed. The following services would be able to detect cross-layers\n Java service, detected as Java agent installed. A pod of k8s service is detected, due to k8s layer monitoring. Side car perspective service is detected. VM Linux monitoring for a general process, as the container of Java service is deployed on this specific k8s node. Virtual databases, caches, and queues conjectured by agents, and also monitored through k8s monitoring, even traffic monitored by service mesh.  All these services have logic connections or are identical from a physical perspective, but currently, they may be just similar on name(s), no further metadata connection.\nBy those, we have a chance to move one step ahead to connect the dots of the whole infrastructure. This means, for the first time, we are going to establish the connections among services detected from various layers.\nIn the v10, I am proposing a new concept Service Hierarchy. Service Hierarchy defines the relationships of existing services in various layers. With more kinds of agent tech involved(such as eBPF) and deployment tools(such as operator and agent injector), we could inject relative service/instance metadata and try to build the connections, including,\n Agent injector injects the pod ID into the system env, then Java agent could report the relationship through system properties. Rover(eBPF agent) reveals its next iteration forward k8s monitoring rather than profiling. And add the capabilities to establish connections among k8s pods and service mesh srv.  Meanwhile, as usual with the new major version change, I would expect UI side changes as well. UI should have flexible capabilities to show hierarchy services from the service view and topology view. Also, we could consider a deeper view of the instance part as well.\nImported Dependencies libs and their licenses. No new library is planned to be added to the codebase.\nCompatibility About the protocol, there should be no breaking changes, but enhancements only. New query protocols( service-hierarchy and instance-hierarchy) are considered to be added, some new fields should be added on things like topology query and instance dependencies to list relative services/instances from other layers directly rather than an extra query.\nAbout the data structure, due to the new data concept is going to be created, service hierarchy relative data models are going to be added. If the user is using Elasticsearch and BanyanDB, this should be compatible, they just need to re-run init-mode OAP to extend the existing models. But for SQL database users(MySQL, PostgreSQL), this could require new tables.\nGraphQL query protocol New query protocol hierarchy.graphqls is going to be added.\ntypeHierarchyRelatedService{# The related service ID.id:ID!# The literal name of the #id.name:String!# The related service\u0026#39;s Layer name.layer:String!normal:Boolean!}typeHierarchyRelatedInstance{# The related instance ID.id:ID!# The literal name of the #id. Instance Name.name:String!# Service idserviceId:ID!# The literal name of the #serviceId.serviceName:String!# The service\u0026#39;s Layer name.# Service could have multiple layers, this is the layer of the service that the instance belongs to.layer:String!normal:Boolean!}typeHierarchyServiceRelation{upperService:HierarchyRelatedService!lowerService:HierarchyRelatedService!}typeHierarchyInstanceRelation{upperInstance:HierarchyRelatedInstance!lowerInstance:HierarchyRelatedInstance!}typeServiceHierarchy{relations:[HierarchyServiceRelation!]!}typeInstanceHierarchy{relations:[HierarchyInstanceRelation!]!}typeLayerLevel{# The layer name.layer:String!# The layer level.# The level of the upper service should greater than the level of the lower service.level:Int!}extendtypeQuery{# Query the service hierarchy, based on the given service. Will recursively return all related layers services in the hierarchy.getServiceHierarchy(serviceId:ID!,layer:String!):ServiceHierarchy!# Query the instance hierarchy, based on the given instance. Will return all direct related layers instances in the hierarchy, no recursive.getInstanceHierarchy(instanceId:ID!,layer:String!):InstanceHierarchy!# List layer hierarchy levels. The layer levels are defined in the `hierarchy-definition.yml`.listLayerLevels:[LayerLevel!]!}New data models   service_hierarchy_relation\n   Column name Data type Description     id String serviceId.servicelayer-relatedServiceId.relatedServiceLayer   service_id String upper service id   service_layer int upper service layer value   related_service_id String lower service id   related_service_layer int lower service layer value   time_bucket long       instance_hierarchy_relation\n   Column name Data type Description     id String instanceId.servicelayer-relateInstanceId.relatedServiceLayer   instance_id String upper instance id   service_layer int upper service layer value   related_instance_id String lower instance id   related_service_layer int lower service layer value   time_bucket long       Internal APIs Internal APIs should be exposed in the Core module to support building the hierarchy relationship.\npublic void toServiceHierarchyRelation(String upperServiceName, Layer upperServiceLayer, String lowerServiceName, Layer lowerServiceLayer); public void toInstanceHierarchyRelation(String upperInstanceName, String upperServiceName, Layer upperServiceLayer, String lowerInstanceName, String lowerServiceName, Layer lowerServiceLayer); Hierarchy Definition All layers hierarchy relations are defined in the hierarchy-definition.yml file. OAP will check the hierarchy relations before building and use the matching rules to auto match the relations. Here is an example:\n# Define the hierarchy of service layers, the layers under the specific layer are related lower of the layer.# The relation could have a matching rule for auto matching, which are defined in the `auto-matching-rules` section.# All the layers are defined in the file `org.apache.skywalking.oap.server.core.analysis.Layers.java`.hierarchy:MESH:MESH_DP:nameK8S_SERVICE:short-nameMESH_DP:K8S_SERVICE:short-nameGENERAL:K8S_SERVICE:lower-short-name-remove-nsMYSQL:K8S_SERVICE:~VIRTUAL_DATABASE:MYSQL:~# Use Groovy script to define the matching rules, the input parameters are the upper service(u) and the lower service(l) and the return value is a boolean.# which are used to match the relation between the upper service(u) and the lower service(l) on the different layers.auto-matching-rules:# the name of the upper service is equal to the name of the lower servicename:\u0026#34;{ (u, l) -\u0026gt; u.name == l.name }\u0026#34;# the short name of the upper service is equal to the short name of the lower serviceshort-name:\u0026#34;{ (u, l) -\u0026gt; u.shortName == l.shortName }\u0026#34;# remove the namespace from the lower service short namelower-short-name-remove-ns:\u0026#34;{ (u, l) -\u0026gt; u.shortName == l.shortName.substring(0, l.shortName.lastIndexOf(\u0026#39;.\u0026#39;)) }\u0026#34;# The hierarchy level of the service layer, the level is used to define the order of the service layer for UI presentation,# The level of the upper service should greater than the level of the lower service in `hierarchy` section.layer-levels:MESH:3GENERAL:3VIRTUAL_DATABASE:3MYSQL:2MESH_DP:1K8S_SERVICE:0General usage docs This proposal doesn\u0026rsquo;t impact the end user in any way of using SkyWalking. The remarkable change will be in the UI. On the service dashboard and topology map, the user should be able to see the hierarchy relationship, which means other services in other layers are logically the same as the current one. UI would provide the link to jump to the relative service\u0026rsquo;s dashboard.\nNo Goal This proposal doesn\u0026rsquo;t cover all the logic about how to detect the service hierarchy structure. All those should be in a separate SWIP.\n","title":"Create and detect Service Hierarchy Relationship","url":"/docs/main/next/en/swip/swip-1/"},{"content":"Create Span   Use Tracer.createEntrySpan() API to create entry span, and then use SpanRef to contain the reference of created span in agent kernel. The first parameter is operation name of span and the second parameter is the ContextCarrierRef instance which is the reference of contextcarrier in agent kernel. If the second parameter is not null, the process of creating entry span will do the extract operation which will be introduced in inject/extract scenario.\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... SpanRef spanRef = Tracer.createEntrySpan(\u0026#34;${operationName}\u0026#34;, null);   Use Tracer.createLocalSpan() API to create local span, the only parameter is the operation name of span.\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... SpanRef spanRef = Tracer.createLocalSpan(\u0026#34;${operationName}\u0026#34;);   Use Tracer.createExitSpan() API to create exit span\n  two parameters case: the first parameter is the operation name of span, the second parameter is the remote peer which means the peer address of exit operation.\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... SpanRef spanRef = Tracer.createExitSpan(\u0026#34;${operationName}\u0026#34;, \u0026#34;${remotePeer}\u0026#34;);   three parameters case: the first parameter is the operation name of span, the second parameter is the ContextCarrierRef instance and the third parameter is the remote peer. This case will be introduced in inject/extract scenario.\n    Use Tracer.stopSpan() API to stop current span\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... Tracer.stopSpan();   Inject/Extract Context Carrier The Inject/extract is to pass context information between different process. The ContextCarrierRef contains the reference of ContextCarrier and the CarrierItemRef contains the reference of CarrierItem. The CarrierItem instances compose a linked list.\n Use Tracer.inject() to inject information of current context into carrier Use Tracer.extract() to extract info from contextCarrier. Use items() of ContextCarrierRef instance to get head CarrierItemRef instance. Use hasNext() of CarrierItemRef instance to judge if the CarrierItemRef has next item. Use next() of CarrierItemRef instance to get next item Use getHeadKey of CarrierItemRef instance to get key of current item Use getHeadValue of CarrierItemRef instance to get value of current item Use setHeadValue of CarrierItemRef instance to set value of current item  /* You can consider map as the message\u0026#39;s header/metadata, such as Http, MQ and RPC. Do the inject operation in one process and then pass the map in header/metadata. */ ContextCarrierRef contextCarrierRef = new ContextCarrierRef(); Tracer.inject(contextCarrierRef); Map\u0026lt;String, String\u0026gt; map = new HashMap\u0026lt;\u0026gt;(); CarrierItemRef next = contextCarrierRef.items(); while (next.hasNext()) { next = next.next(); map.put(next.getHeadKey(), next.getHeadValue()); } ... note: Inject can be done only in Exit Span\n// Receive the map representing a header/metadata and do the extract operation in another process. ... ContextCarrierRef contextCarrierRef = new ContextCarrierRef(); CarrierItemRef next = contextCarrierRef.items(); while ((next.hasNext())) { next = next.next(); String value = map.get(next.getHeadKey()); if (value != null){ next.setHeadValue(value); } } Tracer.extract(contextCarrierRef); Also, you can do the inject/extract operation when creating exit/entry span.\nContextCarrierRef contextCarrierRef = new ContextCarrierRef(); SpanRef spanRef = Tracer.createExitSpan(\u0026#34;operationName\u0026#34;, contextCarrierRef, \u0026#34;remotePeer\u0026#34;); Map\u0026lt;String, String\u0026gt; map = new HashMap\u0026lt;\u0026gt;(); CarrierItemRef next = contextCarrierRef.items(); while (next.hasNext()) { next = next.next(); map.put(next.getHeadKey(), next.getHeadValue()); } ... ... ContextCarrierRef contextCarrierRef = new ContextCarrierRef(); CarrierItemRef next = contextCarrierRef.items(); while ((next.hasNext())) { next = next.next(); String value = map.get(next.getHeadKey()); if (value != null){ next.setHeadValue(value); } } SpanRef spanRef = Tracer.createEntrySpan(\u0026#34;${operationName}\u0026#34;, contextCarrierRef); Capture/Continue Context Snapshot   Use Tracer.capture() to capture the segment info and store it in ContextSnapshotRef, and then use Tracer.continued() to load the snapshot as the ref segment info. The capture/continue is used for tracing context in the x-thread tracing.\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... ContextSnapshotRef contextSnapshotRef = Tracer.capture(); Thread thread = new Thread(() -\u0026gt; { SpanRef spanRef = Tracer.createLocalSpan(\u0026#34;${operationName}\u0026#34;); Tracer.continued(contextSnapshotRef); ... }); thread.start(); thread.join();   Add Span\u0026rsquo;s Tag and Log   Use log of SpanRef instance to record log in span\nimport org.apache.skywalking.apm.toolkit.trace.SpanRef; ... SpanRef spanRef = Tracer.createLocalSpan(\u0026#34;${operationName}\u0026#34;); // Throwable parameter spanRef.log(new RuntimeException(\u0026#34;${exception_message}\u0026#34;)); // Map parameter Map\u0026lt;String, String\u0026gt; logMap = new HashMap\u0026lt;\u0026gt;(); logMap.put(\u0026#34;event\u0026#34;, \u0026#34;${event_type}\u0026#34;); logMap.put(\u0026#34;message\u0026#34;, \u0026#34;${message_value}\u0026#34;); spanRef.log(logMap);   Use tag of SpanRef instance to add tag to span, the parameters of tag are two String which are key and value respectively.\nimport org.apache.skywalking.apm.toolkit.trace.SpanRef; ... SpanRef spanRef = Tracer.createLocalSpan(operationName); spanRef.tag(\u0026#34;${key}\u0026#34;, \u0026#34;${value}\u0026#34;);   Async Prepare/Finish   Use prepareForAsync of SpanRef instance to make the span still alive until asyncFinish called, and then in specific time use asyncFinish of this SpanRef instance to notify this span that it could be finished.\nimport org.apache.skywalking.apm.toolkit.trace.SpanRef; ... SpanRef spanRef = Tracer.createLocalSpan(\u0026#34;${operationName}\u0026#34;); spanRef.prepareForAsync(); // the span does not finish because of the prepareForAsync() operation Tracer.stopSpan(); Thread thread = new Thread(() -\u0026gt; { ... spanRef.asyncFinish(); }); thread.start(); thread.join();   ActiveSpan You can use the ActiveSpan to get the current span and do some operations.\n  Add custom tag in the context of traced method, ActiveSpan.tag(\u0026quot;key\u0026quot;, \u0026quot;val\u0026quot;).\n  ActiveSpan.error() Mark the current span as error status.\n  ActiveSpan.error(String errorMsg) Mark the current span as error status with a message.\n  ActiveSpan.error(Throwable throwable) Mark the current span as error status with a Throwable.\n  ActiveSpan.debug(String debugMsg) Add a debug level log message in the current span.\n  ActiveSpan.info(String infoMsg) Add an info level log message in the current span.\n  ActiveSpan.setOperationName(String operationName) Customize an operation name.\n  ActiveSpan.tag(\u0026#34;my_tag\u0026#34;, \u0026#34;my_value\u0026#34;); ActiveSpan.error(); ActiveSpan.error(\u0026#34;Test-Error-Reason\u0026#34;); ActiveSpan.error(new RuntimeException(\u0026#34;Test-Error-Throwable\u0026#34;)); ActiveSpan.info(\u0026#34;Test-Info-Msg\u0026#34;); ActiveSpan.debug(\u0026#34;Test-debug-Msg\u0026#34;); ActiveSpan.setOperationName(\u0026#34;${opetationName}\u0026#34;); Sample codes only\n","title":"Create Span","url":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/application-toolkit-tracer/"},{"content":"Create Span   Use Tracer.createEntrySpan() API to create entry span, and then use SpanRef to contain the reference of created span in agent kernel. The first parameter is operation name of span and the second parameter is the ContextCarrierRef instance which is the reference of contextcarrier in agent kernel. If the second parameter is not null, the process of creating entry span will do the extract operation which will be introduced in inject/extract scenario.\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... SpanRef spanRef = Tracer.createEntrySpan(\u0026#34;${operationName}\u0026#34;, null);   Use Tracer.createLocalSpan() API to create local span, the only parameter is the operation name of span.\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... SpanRef spanRef = Tracer.createLocalSpan(\u0026#34;${operationName}\u0026#34;);   Use Tracer.createExitSpan() API to create exit span\n  two parameters case: the first parameter is the operation name of span, the second parameter is the remote peer which means the peer address of exit operation.\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... SpanRef spanRef = Tracer.createExitSpan(\u0026#34;${operationName}\u0026#34;, \u0026#34;${remotePeer}\u0026#34;);   three parameters case: the first parameter is the operation name of span, the second parameter is the ContextCarrierRef instance and the third parameter is the remote peer. This case will be introduced in inject/extract scenario.\n    Use Tracer.stopSpan() API to stop current span\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... Tracer.stopSpan();   Inject/Extract Context Carrier The Inject/extract is to pass context information between different process. The ContextCarrierRef contains the reference of ContextCarrier and the CarrierItemRef contains the reference of CarrierItem. The CarrierItem instances compose a linked list.\n Use Tracer.inject() to inject information of current context into carrier Use Tracer.extract() to extract info from contextCarrier. Use items() of ContextCarrierRef instance to get head CarrierItemRef instance. Use hasNext() of CarrierItemRef instance to judge if the CarrierItemRef has next item. Use next() of CarrierItemRef instance to get next item Use getHeadKey of CarrierItemRef instance to get key of current item Use getHeadValue of CarrierItemRef instance to get value of current item Use setHeadValue of CarrierItemRef instance to set value of current item  /* You can consider map as the message\u0026#39;s header/metadata, such as Http, MQ and RPC. Do the inject operation in one process and then pass the map in header/metadata. */ ContextCarrierRef contextCarrierRef = new ContextCarrierRef(); Tracer.inject(contextCarrierRef); Map\u0026lt;String, String\u0026gt; map = new HashMap\u0026lt;\u0026gt;(); CarrierItemRef next = contextCarrierRef.items(); while (next.hasNext()) { next = next.next(); map.put(next.getHeadKey(), next.getHeadValue()); } ... note: Inject can be done only in Exit Span\n// Receive the map representing a header/metadata and do the extract operation in another process. ... ContextCarrierRef contextCarrierRef = new ContextCarrierRef(); CarrierItemRef next = contextCarrierRef.items(); while ((next.hasNext())) { next = next.next(); String value = map.get(next.getHeadKey()); if (value != null){ next.setHeadValue(value); } } Tracer.extract(contextCarrierRef); Also, you can do the inject/extract operation when creating exit/entry span.\nContextCarrierRef contextCarrierRef = new ContextCarrierRef(); SpanRef spanRef = Tracer.createExitSpan(\u0026#34;operationName\u0026#34;, contextCarrierRef, \u0026#34;remotePeer\u0026#34;); Map\u0026lt;String, String\u0026gt; map = new HashMap\u0026lt;\u0026gt;(); CarrierItemRef next = contextCarrierRef.items(); while (next.hasNext()) { next = next.next(); map.put(next.getHeadKey(), next.getHeadValue()); } ... ... ContextCarrierRef contextCarrierRef = new ContextCarrierRef(); CarrierItemRef next = contextCarrierRef.items(); while ((next.hasNext())) { next = next.next(); String value = map.get(next.getHeadKey()); if (value != null){ next.setHeadValue(value); } } SpanRef spanRef = Tracer.createEntrySpan(\u0026#34;${operationName}\u0026#34;, contextCarrierRef); Capture/Continue Context Snapshot   Use Tracer.capture() to capture the segment info and store it in ContextSnapshotRef, and then use Tracer.continued() to load the snapshot as the ref segment info. The capture/continue is used for tracing context in the x-thread tracing.\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... ContextSnapshotRef contextSnapshotRef = Tracer.capture(); Thread thread = new Thread(() -\u0026gt; { SpanRef spanRef = Tracer.createLocalSpan(\u0026#34;${operationName}\u0026#34;); Tracer.continued(contextSnapshotRef); ... }); thread.start(); thread.join();   Add Span\u0026rsquo;s Tag and Log   Use log of SpanRef instance to record log in span\nimport org.apache.skywalking.apm.toolkit.trace.SpanRef; ... SpanRef spanRef = Tracer.createLocalSpan(\u0026#34;${operationName}\u0026#34;); // Throwable parameter spanRef.log(new RuntimeException(\u0026#34;${exception_message}\u0026#34;)); // Map parameter Map\u0026lt;String, String\u0026gt; logMap = new HashMap\u0026lt;\u0026gt;(); logMap.put(\u0026#34;event\u0026#34;, \u0026#34;${event_type}\u0026#34;); logMap.put(\u0026#34;message\u0026#34;, \u0026#34;${message_value}\u0026#34;); spanRef.log(logMap);   Use tag of SpanRef instance to add tag to span, the parameters of tag are two String which are key and value respectively.\nimport org.apache.skywalking.apm.toolkit.trace.SpanRef; ... SpanRef spanRef = Tracer.createLocalSpan(operationName); spanRef.tag(\u0026#34;${key}\u0026#34;, \u0026#34;${value}\u0026#34;);   Async Prepare/Finish   Use prepareForAsync of SpanRef instance to make the span still alive until asyncFinish called, and then in specific time use asyncFinish of this SpanRef instance to notify this span that it could be finished.\nimport org.apache.skywalking.apm.toolkit.trace.SpanRef; ... SpanRef spanRef = Tracer.createLocalSpan(\u0026#34;${operationName}\u0026#34;); spanRef.prepareForAsync(); // the span does not finish because of the prepareForAsync() operation Tracer.stopSpan(); Thread thread = new Thread(() -\u0026gt; { ... spanRef.asyncFinish(); }); thread.start(); thread.join();   ActiveSpan You can use the ActiveSpan to get the current span and do some operations.\n  Add custom tag in the context of traced method, ActiveSpan.tag(\u0026quot;key\u0026quot;, \u0026quot;val\u0026quot;).\n  ActiveSpan.error() Mark the current span as error status.\n  ActiveSpan.error(String errorMsg) Mark the current span as error status with a message.\n  ActiveSpan.error(Throwable throwable) Mark the current span as error status with a Throwable.\n  ActiveSpan.debug(String debugMsg) Add a debug level log message in the current span.\n  ActiveSpan.info(String infoMsg) Add an info level log message in the current span.\n  ActiveSpan.setOperationName(String operationName) Customize an operation name.\n  ActiveSpan.tag(\u0026#34;my_tag\u0026#34;, \u0026#34;my_value\u0026#34;); ActiveSpan.error(); ActiveSpan.error(\u0026#34;Test-Error-Reason\u0026#34;); ActiveSpan.error(new RuntimeException(\u0026#34;Test-Error-Throwable\u0026#34;)); ActiveSpan.info(\u0026#34;Test-Info-Msg\u0026#34;); ActiveSpan.debug(\u0026#34;Test-debug-Msg\u0026#34;); ActiveSpan.setOperationName(\u0026#34;${opetationName}\u0026#34;); Sample codes only\n","title":"Create Span","url":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-tracer/"},{"content":"Create Span   Use Tracer.createEntrySpan() API to create entry span, and then use SpanRef to contain the reference of created span in agent kernel. The first parameter is operation name of span and the second parameter is the ContextCarrierRef instance which is the reference of contextcarrier in agent kernel. If the second parameter is not null, the process of creating entry span will do the extract operation which will be introduced in inject/extract scenario.\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... SpanRef spanRef = Tracer.createEnteySpan(\u0026#34;${operationName}\u0026#34;, null);   Use Tracer.createLocalSpan() API to create local span, the only parameter is the operation name of span.\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... SpanRef spanRef = Tracer.createLocalSpan(\u0026#34;${operationName}\u0026#34;);   Use Tracer.createExitSpan() API to create exit span\n  two parameters case: the first parameter is the operation name of span, the second parameter is the remote peer which means the peer address of exit operation.\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... SpanRef spanRef = Tracer.createExitSpan(\u0026#34;${operationName}\u0026#34;, \u0026#34;${remotePeer}\u0026#34;);   three parameters case: the first parameter is the operation name of span, the second parameter is the ContextCarrierRef instance and the third parameter is the remote peer. This case will be introduced in inject/extract scenario.\n    Use Tracer.stopSpan() API to stop current span\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... Tracer.stopSpan();   Inject/Extract Context Carrier The Inject/extract is to pass context information between different process. The ContextCarrierRef contains the reference of ContextCarrier and the CarrierItemRef contains the reference of CarrierItem. The CarrierItem instances compose a linked list.\n Use Tracer.inject() to inject information of current context into carrier Use Tracer.extract() to extract info from contextCarrier. Use items() of ContextCarrierRef instance to get head CarrierItemRef instance. Use hasNext() of CarrierItemRef instance to judge if the CarrierItemRef has next item. Use next() of CarrierItemRef instance to get next item Use getHeadKey of CarrierItemRef instance to get key of current item Use getHeadValue of CarrierItemRef instance to get value of current item Use setHeadValue of CarrierItemRef instance to set value of current item  /* You can consider map as the message\u0026#39;s header/metadata, such as Http, MQ and RPC. Do the inject operation in one process and then pass the map in header/metadata. */ ContextCarrierRef contextCarrierRef = new ContextCarrierRef(); Tracer.inject(contextCarrierRef); Map\u0026lt;String, String\u0026gt; map = new HashMap\u0026lt;\u0026gt;(); CarrierItemRef next = contextCarrierRef.items(); while (next.hasNext()) { next = next.next(); map.put(next.getHeadKey(), next.getHeadValue()); } ... // Receive the map representing a header/metadata and do the extract operation in another process. ... ContextCarrierRef contextCarrierRef = new ContextCarrierRef(); CarrierItemRef next = contextCarrierRef.items(); for (Map.Entry\u0026lt;String, String\u0026gt; entry : map.entrySet()) { if (next.hasNext()) { next = next.next(); if (entry.getKey().equals(next.getHeadKey())) next.setHeadValue(entry.getValue()); } } Tracer.extract(contextCarrierRef); Also, you can do the inject/extract operation when creating exit/entry span.\nContextCarrierRef contextCarrierRef = new ContextCarrierRef(); SpanRef spanRef = Tracer.createExitSpan(\u0026#34;operationName\u0026#34;, contextCarrierRef, \u0026#34;remotePeer\u0026#34;); Map\u0026lt;String, String\u0026gt; map = new HashMap\u0026lt;\u0026gt;(); CarrierItemRef next = contextCarrierRef.items(); while (next.hasNext()) { next = next.next(); map.put(next.getHeadKey(), next.getHeadValue()); } ... ... ContextCarrierRef contextCarrierRef = new ContextCarrierRef(); CarrierItemRef next = contextCarrierRef.items(); for (Map.Entry\u0026lt;String, String\u0026gt; entry : map.entrySet()) { if (next.hasNext()) { next = next.next(); if (entry.getKey().equals(next.getHeadKey())) next.setHeadValue(entry.getValue()); } } SpanRef spanRef = Tracer.createEntrySpan(\u0026#34;${operationName}\u0026#34;, contextCarrierRef); Capture/Continue Context Snapshot   Use Tracer.capture() to capture the segment info and store it in ContextSnapshotRef, and then use Tracer.continued() to load the snapshot as the ref segment info. The capture/continue is used for tracing context in the x-thread tracing.\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... ContextSnapshotRef contextSnapshotRef = Tracer.capture(); Thread thread = new Thread(() -\u0026gt; { SpanRef spanRef = Tracer.createLocalSpan(\u0026#34;${operationName}\u0026#34;); Tracer.continued(contextSnapshotRef); ... }); thread.start(); thread.join();   Add Span\u0026rsquo;s Tag and Log   Use log of SpanRef instance to record log in span\nimport org.apache.skywalking.apm.toolkit.trace.SpanRef; ... SpanRef spanRef = Tracer.createLocalSpan(\u0026#34;${operationName}\u0026#34;); // Throwable parameter spanRef.log(new RuntimeException(\u0026#34;${exception_message}\u0026#34;)); // Map parameter Map\u0026lt;String, String\u0026gt; logMap = new HashMap\u0026lt;\u0026gt;(); logMap.put(\u0026#34;event\u0026#34;, \u0026#34;${event_type}\u0026#34;); logMap.put(\u0026#34;message\u0026#34;, \u0026#34;${message_value}\u0026#34;); spanRef.log(logMap);   Use tag of SpanRef instance to add tag to span, the parameters of tag are two String which are key and value respectively.\nimport org.apache.skywalking.apm.toolkit.trace.SpanRef; ... SpanRef spanRef = Tracer.createLocalSpan(operationName); spanRef.tag(\u0026#34;${key}\u0026#34;, \u0026#34;${value}\u0026#34;);   Async Prepare/Finish   Use prepareForAsync of SpanRef instance to make the span still alive until asyncFinish called, and then in specific time use asyncFinish of this SpanRef instance to notify this span that it could be finished.\nimport org.apache.skywalking.apm.toolkit.trace.SpanRef; ... SpanRef spanRef = Tracer.createLocalSpan(\u0026#34;${operationName}\u0026#34;); spanRef.prepareForAsync(); // the span does not finish because of the prepareForAsync() operation Tracer.stopSpan(); Thread thread = new Thread(() -\u0026gt; { ... spanRef.asyncFinish(); }); thread.start(); thread.join();   ActiveSpan You can use the ActiveSpan to get the current span and do some operations.\n  Add custom tag in the context of traced method, ActiveSpan.tag(\u0026quot;key\u0026quot;, \u0026quot;val\u0026quot;).\n  ActiveSpan.error() Mark the current span as error status.\n  ActiveSpan.error(String errorMsg) Mark the current span as error status with a message.\n  ActiveSpan.error(Throwable throwable) Mark the current span as error status with a Throwable.\n  ActiveSpan.debug(String debugMsg) Add a debug level log message in the current span.\n  ActiveSpan.info(String infoMsg) Add an info level log message in the current span.\n  ActiveSpan.setOperationName(String operationName) Customize an operation name.\n  ActiveSpan.tag(\u0026#34;my_tag\u0026#34;, \u0026#34;my_value\u0026#34;); ActiveSpan.error(); ActiveSpan.error(\u0026#34;Test-Error-Reason\u0026#34;); ActiveSpan.error(new RuntimeException(\u0026#34;Test-Error-Throwable\u0026#34;)); ActiveSpan.info(\u0026#34;Test-Info-Msg\u0026#34;); ActiveSpan.debug(\u0026#34;Test-debug-Msg\u0026#34;); ActiveSpan.setOperationName(\u0026#34;${opetationName}\u0026#34;); Sample codes only\n","title":"Create Span","url":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/application-toolkit-tracer/"},{"content":"Create Span   Use Tracer.createEntrySpan() API to create entry span, and then use SpanRef to contain the reference of created span in agent kernel. The first parameter is operation name of span and the second parameter is the ContextCarrierRef instance which is the reference of contextcarrier in agent kernel. If the second parameter is not null, the process of creating entry span will do the extract operation which will be introduced in inject/extract scenario.\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... SpanRef spanRef = Tracer.createEntrySpan(\u0026#34;${operationName}\u0026#34;, null);   Use Tracer.createLocalSpan() API to create local span, the only parameter is the operation name of span.\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... SpanRef spanRef = Tracer.createLocalSpan(\u0026#34;${operationName}\u0026#34;);   Use Tracer.createExitSpan() API to create exit span\n  two parameters case: the first parameter is the operation name of span, the second parameter is the remote peer which means the peer address of exit operation.\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... SpanRef spanRef = Tracer.createExitSpan(\u0026#34;${operationName}\u0026#34;, \u0026#34;${remotePeer}\u0026#34;);   three parameters case: the first parameter is the operation name of span, the second parameter is the ContextCarrierRef instance and the third parameter is the remote peer. This case will be introduced in inject/extract scenario.\n    Use Tracer.stopSpan() API to stop current span\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... Tracer.stopSpan();   Inject/Extract Context Carrier The Inject/extract is to pass context information between different process. The ContextCarrierRef contains the reference of ContextCarrier and the CarrierItemRef contains the reference of CarrierItem. The CarrierItem instances compose a linked list.\n Use Tracer.inject() to inject information of current context into carrier Use Tracer.extract() to extract info from contextCarrier. Use items() of ContextCarrierRef instance to get head CarrierItemRef instance. Use hasNext() of CarrierItemRef instance to judge if the CarrierItemRef has next item. Use next() of CarrierItemRef instance to get next item Use getHeadKey of CarrierItemRef instance to get key of current item Use getHeadValue of CarrierItemRef instance to get value of current item Use setHeadValue of CarrierItemRef instance to set value of current item  /* You can consider map as the message\u0026#39;s header/metadata, such as Http, MQ and RPC. Do the inject operation in one process and then pass the map in header/metadata. */ ContextCarrierRef contextCarrierRef = new ContextCarrierRef(); Tracer.inject(contextCarrierRef); Map\u0026lt;String, String\u0026gt; map = new HashMap\u0026lt;\u0026gt;(); CarrierItemRef next = contextCarrierRef.items(); while (next.hasNext()) { next = next.next(); map.put(next.getHeadKey(), next.getHeadValue()); } ... note: Inject can be done only in Exit Span\n// Receive the map representing a header/metadata and do the extract operation in another process. ... ContextCarrierRef contextCarrierRef = new ContextCarrierRef(); CarrierItemRef next = contextCarrierRef.items(); while ((next.hasNext())) { next = next.next(); String value = map.get(next.getHeadKey()); if (value != null){ next.setHeadValue(value); } } Tracer.extract(contextCarrierRef); Also, you can do the inject/extract operation when creating exit/entry span.\nContextCarrierRef contextCarrierRef = new ContextCarrierRef(); SpanRef spanRef = Tracer.createExitSpan(\u0026#34;operationName\u0026#34;, contextCarrierRef, \u0026#34;remotePeer\u0026#34;); Map\u0026lt;String, String\u0026gt; map = new HashMap\u0026lt;\u0026gt;(); CarrierItemRef next = contextCarrierRef.items(); while (next.hasNext()) { next = next.next(); map.put(next.getHeadKey(), next.getHeadValue()); } ... ... ContextCarrierRef contextCarrierRef = new ContextCarrierRef(); CarrierItemRef next = contextCarrierRef.items(); while ((next.hasNext())) { next = next.next(); String value = map.get(next.getHeadKey()); if (value != null){ next.setHeadValue(value); } } SpanRef spanRef = Tracer.createEntrySpan(\u0026#34;${operationName}\u0026#34;, contextCarrierRef); Capture/Continue Context Snapshot   Use Tracer.capture() to capture the segment info and store it in ContextSnapshotRef, and then use Tracer.continued() to load the snapshot as the ref segment info. The capture/continue is used for tracing context in the x-thread tracing.\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... ContextSnapshotRef contextSnapshotRef = Tracer.capture(); Thread thread = new Thread(() -\u0026gt; { SpanRef spanRef = Tracer.createLocalSpan(\u0026#34;${operationName}\u0026#34;); Tracer.continued(contextSnapshotRef); ... }); thread.start(); thread.join();   Add Span\u0026rsquo;s Tag and Log   Use log of SpanRef instance to record log in span\nimport org.apache.skywalking.apm.toolkit.trace.SpanRef; ... SpanRef spanRef = Tracer.createLocalSpan(\u0026#34;${operationName}\u0026#34;); // Throwable parameter spanRef.log(new RuntimeException(\u0026#34;${exception_message}\u0026#34;)); // Map parameter Map\u0026lt;String, String\u0026gt; logMap = new HashMap\u0026lt;\u0026gt;(); logMap.put(\u0026#34;event\u0026#34;, \u0026#34;${event_type}\u0026#34;); logMap.put(\u0026#34;message\u0026#34;, \u0026#34;${message_value}\u0026#34;); spanRef.log(logMap);   Use tag of SpanRef instance to add tag to span, the parameters of tag are two String which are key and value respectively.\nimport org.apache.skywalking.apm.toolkit.trace.SpanRef; ... SpanRef spanRef = Tracer.createLocalSpan(operationName); spanRef.tag(\u0026#34;${key}\u0026#34;, \u0026#34;${value}\u0026#34;);   Async Prepare/Finish   Use prepareForAsync of SpanRef instance to make the span still alive until asyncFinish called, and then in specific time use asyncFinish of this SpanRef instance to notify this span that it could be finished.\nimport org.apache.skywalking.apm.toolkit.trace.SpanRef; ... SpanRef spanRef = Tracer.createLocalSpan(\u0026#34;${operationName}\u0026#34;); spanRef.prepareForAsync(); // the span does not finish because of the prepareForAsync() operation Tracer.stopSpan(); Thread thread = new Thread(() -\u0026gt; { ... spanRef.asyncFinish(); }); thread.start(); thread.join();   ActiveSpan You can use the ActiveSpan to get the current span and do some operations.\n  Add custom tag in the context of traced method, ActiveSpan.tag(\u0026quot;key\u0026quot;, \u0026quot;val\u0026quot;).\n  ActiveSpan.error() Mark the current span as error status.\n  ActiveSpan.error(String errorMsg) Mark the current span as error status with a message.\n  ActiveSpan.error(Throwable throwable) Mark the current span as error status with a Throwable.\n  ActiveSpan.debug(String debugMsg) Add a debug level log message in the current span.\n  ActiveSpan.info(String infoMsg) Add an info level log message in the current span.\n  ActiveSpan.setOperationName(String operationName) Customize an operation name.\n  ActiveSpan.tag(\u0026#34;my_tag\u0026#34;, \u0026#34;my_value\u0026#34;); ActiveSpan.error(); ActiveSpan.error(\u0026#34;Test-Error-Reason\u0026#34;); ActiveSpan.error(new RuntimeException(\u0026#34;Test-Error-Throwable\u0026#34;)); ActiveSpan.info(\u0026#34;Test-Info-Msg\u0026#34;); ActiveSpan.debug(\u0026#34;Test-debug-Msg\u0026#34;); ActiveSpan.setOperationName(\u0026#34;${opetationName}\u0026#34;); Sample codes only\n","title":"Create Span","url":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/application-toolkit-tracer/"},{"content":"Create Span   Use Tracer.createEntrySpan() API to create entry span, and then use SpanRef to contain the reference of created span in agent kernel. The first parameter is operation name of span and the second parameter is the ContextCarrierRef instance which is the reference of contextcarrier in agent kernel. If the second parameter is not null, the process of creating entry span will do the extract operation which will be introduced in inject/extract scenario.\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... SpanRef spanRef = Tracer.createEntrySpan(\u0026#34;${operationName}\u0026#34;, null);   Use Tracer.createLocalSpan() API to create local span, the only parameter is the operation name of span.\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... SpanRef spanRef = Tracer.createLocalSpan(\u0026#34;${operationName}\u0026#34;);   Use Tracer.createExitSpan() API to create exit span\n  two parameters case: the first parameter is the operation name of span, the second parameter is the remote peer which means the peer address of exit operation.\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... SpanRef spanRef = Tracer.createExitSpan(\u0026#34;${operationName}\u0026#34;, \u0026#34;${remotePeer}\u0026#34;);   three parameters case: the first parameter is the operation name of span, the second parameter is the ContextCarrierRef instance and the third parameter is the remote peer. This case will be introduced in inject/extract scenario.\n    Use Tracer.stopSpan() API to stop current span\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... Tracer.stopSpan();   Inject/Extract Context Carrier The Inject/extract is to pass context information between different process. The ContextCarrierRef contains the reference of ContextCarrier and the CarrierItemRef contains the reference of CarrierItem. The CarrierItem instances compose a linked list.\n Use Tracer.inject() to inject information of current context into carrier Use Tracer.extract() to extract info from contextCarrier. Use items() of ContextCarrierRef instance to get head CarrierItemRef instance. Use hasNext() of CarrierItemRef instance to judge if the CarrierItemRef has next item. Use next() of CarrierItemRef instance to get next item Use getHeadKey of CarrierItemRef instance to get key of current item Use getHeadValue of CarrierItemRef instance to get value of current item Use setHeadValue of CarrierItemRef instance to set value of current item  /* You can consider map as the message\u0026#39;s header/metadata, such as Http, MQ and RPC. Do the inject operation in one process and then pass the map in header/metadata. */ ContextCarrierRef contextCarrierRef = new ContextCarrierRef(); Tracer.inject(contextCarrierRef); Map\u0026lt;String, String\u0026gt; map = new HashMap\u0026lt;\u0026gt;(); CarrierItemRef next = contextCarrierRef.items(); while (next.hasNext()) { next = next.next(); map.put(next.getHeadKey(), next.getHeadValue()); } ... note: Inject can be done only in Exit Span\n// Receive the map representing a header/metadata and do the extract operation in another process. ... ContextCarrierRef contextCarrierRef = new ContextCarrierRef(); CarrierItemRef next = contextCarrierRef.items(); while ((next.hasNext())) { next = next.next(); String value = map.get(next.getHeadKey()); if (value != null){ next.setHeadValue(value); } } Tracer.extract(contextCarrierRef); Also, you can do the inject/extract operation when creating exit/entry span.\nContextCarrierRef contextCarrierRef = new ContextCarrierRef(); SpanRef spanRef = Tracer.createExitSpan(\u0026#34;operationName\u0026#34;, contextCarrierRef, \u0026#34;remotePeer\u0026#34;); Map\u0026lt;String, String\u0026gt; map = new HashMap\u0026lt;\u0026gt;(); CarrierItemRef next = contextCarrierRef.items(); while (next.hasNext()) { next = next.next(); map.put(next.getHeadKey(), next.getHeadValue()); } ... ... ContextCarrierRef contextCarrierRef = new ContextCarrierRef(); CarrierItemRef next = contextCarrierRef.items(); while ((next.hasNext())) { next = next.next(); String value = map.get(next.getHeadKey()); if (value != null){ next.setHeadValue(value); } } SpanRef spanRef = Tracer.createEntrySpan(\u0026#34;${operationName}\u0026#34;, contextCarrierRef); Capture/Continue Context Snapshot   Use Tracer.capture() to capture the segment info and store it in ContextSnapshotRef, and then use Tracer.continued() to load the snapshot as the ref segment info. The capture/continue is used for tracing context in the x-thread tracing.\nimport org.apache.skywalking.apm.toolkit.trace.Tracer; ... ContextSnapshotRef contextSnapshotRef = Tracer.capture(); Thread thread = new Thread(() -\u0026gt; { SpanRef spanRef = Tracer.createLocalSpan(\u0026#34;${operationName}\u0026#34;); Tracer.continued(contextSnapshotRef); ... }); thread.start(); thread.join();   Add Span\u0026rsquo;s Tag and Log   Use log of SpanRef instance to record log in span\nimport org.apache.skywalking.apm.toolkit.trace.SpanRef; ... SpanRef spanRef = Tracer.createLocalSpan(\u0026#34;${operationName}\u0026#34;); // Throwable parameter spanRef.log(new RuntimeException(\u0026#34;${exception_message}\u0026#34;)); // Map parameter Map\u0026lt;String, String\u0026gt; logMap = new HashMap\u0026lt;\u0026gt;(); logMap.put(\u0026#34;event\u0026#34;, \u0026#34;${event_type}\u0026#34;); logMap.put(\u0026#34;message\u0026#34;, \u0026#34;${message_value}\u0026#34;); spanRef.log(logMap);   Use tag of SpanRef instance to add tag to span, the parameters of tag are two String which are key and value respectively.\nimport org.apache.skywalking.apm.toolkit.trace.SpanRef; ... SpanRef spanRef = Tracer.createLocalSpan(operationName); spanRef.tag(\u0026#34;${key}\u0026#34;, \u0026#34;${value}\u0026#34;);   Async Prepare/Finish   Use prepareForAsync of SpanRef instance to make the span still alive until asyncFinish called, and then in specific time use asyncFinish of this SpanRef instance to notify this span that it could be finished.\nimport org.apache.skywalking.apm.toolkit.trace.SpanRef; ... SpanRef spanRef = Tracer.createLocalSpan(\u0026#34;${operationName}\u0026#34;); spanRef.prepareForAsync(); // the span does not finish because of the prepareForAsync() operation Tracer.stopSpan(); Thread thread = new Thread(() -\u0026gt; { ... spanRef.asyncFinish(); }); thread.start(); thread.join();   ActiveSpan You can use the ActiveSpan to get the current span and do some operations.\n  Add custom tag in the context of traced method, ActiveSpan.tag(\u0026quot;key\u0026quot;, \u0026quot;val\u0026quot;).\n  ActiveSpan.error() Mark the current span as error status.\n  ActiveSpan.error(String errorMsg) Mark the current span as error status with a message.\n  ActiveSpan.error(Throwable throwable) Mark the current span as error status with a Throwable.\n  ActiveSpan.debug(String debugMsg) Add a debug level log message in the current span.\n  ActiveSpan.info(String infoMsg) Add an info level log message in the current span.\n  ActiveSpan.setOperationName(String operationName) Customize an operation name.\n  ActiveSpan.tag(\u0026#34;my_tag\u0026#34;, \u0026#34;my_value\u0026#34;); ActiveSpan.error(); ActiveSpan.error(\u0026#34;Test-Error-Reason\u0026#34;); ActiveSpan.error(new RuntimeException(\u0026#34;Test-Error-Throwable\u0026#34;)); ActiveSpan.info(\u0026#34;Test-Info-Msg\u0026#34;); ActiveSpan.debug(\u0026#34;Test-debug-Msg\u0026#34;); ActiveSpan.setOperationName(\u0026#34;${opetationName}\u0026#34;); Sample codes only\n","title":"Create Span","url":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/application-toolkit-tracer/"},{"content":"CRUD Groups CRUD operations create, read, update and delete groups.\nThe group represents a collection of a class of resources. Each resource has a name unique to a group.\nbydbctl is the command line tool in examples.\nCreate operation Create operation adds a new group to the database\u0026rsquo;s metadata registry repository. If the group does not currently exist, create operation will create the schema.\nExamples of creating $ bydbctl group create -f - \u0026lt;\u0026lt;EOF metadata: name: sw_metric catalog: CATALOG_MEASURE resource_opts: shard_num: 2 block_interval: unit: UNIT_HOUR num: 2 segment_interval: unit: UNIT_DAY num: 1 ttl: unit: UNIT_DAY num: 7 EOF The group creates two shards to store group data points. Every day, it would create a segment that will generate a block every 2 hours.\nThe data in this group will keep 7 days.\nGet operation Get operation gets a group\u0026rsquo;s schema.\nExamples of getting $ bydbctl group get -g sw_metric Update operation Update operation updates a group\u0026rsquo;s schema.\nExamples of updating If we want to change the ttl of the data in this group to be 1 day, use the command:\n$ bydbctl group update -f - \u0026lt;\u0026lt;EOF metadata: name: sw_metric catalog: CATALOG_MEASURE resource_opts: shard_num: 2 block_interval: unit: UNIT_HOUR num: 2 segment_interval: unit: UNIT_DAY num: 1 ttl: unit: UNIT_DAY num: 1 EOF Delete operation Delete operation deletes a group\u0026rsquo;s schema.\nExamples of deleting $ bydbctl group delete -g sw_metric List operation The list operation shows all groups' schema.\nExamples $ bydbctl group list API Reference GroupService v1\n","title":"CRUD Groups","url":"/docs/skywalking-banyandb/latest/crud/group/"},{"content":"CRUD Groups CRUD operations create, read, update and delete groups.\nThe group represents a collection of a class of resources. Each resource has a name unique to a group.\nbydbctl is the command line tool in examples.\nCreate operation Create operation adds a new group to the database\u0026rsquo;s metadata registry repository. If the group does not currently exist, create operation will create the schema.\nExamples of creating $ bydbctl group create -f - \u0026lt;\u0026lt;EOF metadata: name: sw_metric catalog: CATALOG_MEASURE resource_opts: shard_num: 2 segment_interval: unit: UNIT_DAY num: 1 ttl: unit: UNIT_DAY num: 7 EOF The group creates two shards to store group data points. Every day, it would create a segment that will generate a block every 2 hours.\nThe data in this group will keep 7 days.\nGet operation Get operation gets a group\u0026rsquo;s schema.\nExamples of getting $ bydbctl group get -g sw_metric Update operation Update operation updates a group\u0026rsquo;s schema.\nExamples of updating If we want to change the ttl of the data in this group to be 1 day, use the command:\n$ bydbctl group update -f - \u0026lt;\u0026lt;EOF metadata: name: sw_metric catalog: CATALOG_MEASURE resource_opts: shard_num: 2 segment_interval: unit: UNIT_DAY num: 1 ttl: unit: UNIT_DAY num: 1 EOF Delete operation Delete operation deletes a group\u0026rsquo;s schema.\nExamples of deleting $ bydbctl group delete -g sw_metric List operation The list operation shows all groups' schema.\nExamples $ bydbctl group list API Reference GroupService v1\n","title":"CRUD Groups","url":"/docs/skywalking-banyandb/next/crud/group/"},{"content":"CRUD Groups CRUD operations create, read, update and delete groups.\nThe group represents a collection of a class of resources. Each resource has a name unique to a group.\nbydbctl is the command line tool in examples.\nCreate operation Create operation adds a new group to the database\u0026rsquo;s metadata registry repository. If the group does not currently exist, create operation will create the schema.\nExamples of creating $ bydbctl group create -f - \u0026lt;\u0026lt;EOF metadata: name: sw_metric catalog: CATALOG_MEASURE resource_opts: shard_num: 2 block_interval: unit: UNIT_HOUR num: 2 segment_interval: unit: UNIT_DAY num: 1 ttl: unit: UNIT_DAY num: 7 EOF The group creates two shards to store group data points. Every day, it would create a segment that will generate a block every 2 hours.\nThe data in this group will keep 7 days.\nGet operation Get operation gets a group\u0026rsquo;s schema.\nExamples of getting $ bydbctl group get -g sw_metric Update operation Update operation updates a group\u0026rsquo;s schema.\nExamples of updating If we want to change the ttl of the data in this group to be 1 day, use the command:\n$ bydbctl group update -f - \u0026lt;\u0026lt;EOF metadata: name: sw_metric catalog: CATALOG_MEASURE resource_opts: shard_num: 2 block_interval: unit: UNIT_HOUR num: 2 segment_interval: unit: UNIT_DAY num: 1 ttl: unit: UNIT_DAY num: 1 EOF Delete operation Delete operation deletes a group\u0026rsquo;s schema.\nExamples of deleting $ bydbctl group delete -g sw_metric List operation The list operation shows all groups' schema.\nExamples $ bydbctl group list API Reference GroupService v1\n","title":"CRUD Groups","url":"/docs/skywalking-banyandb/v0.5.0/crud/group/"},{"content":"CRUD indexRuleBindings CRUD operations create, read, update and delete index rule bindings.\nAn index rule binding is a bridge to connect several index rules to a subject. This binding is valid between begin_at_nanoseconds and expire_at_nanoseconds, that provides flexible strategies to control how to generate time series indices.\nbydbctl is the command line tool in examples.\nCreate operation Create operation adds a new index rule binding to the database\u0026rsquo;s metadata registry repository. If the index rule binding does not currently exist, create operation will create the schema.\nExamples An index rule binding belongs to a unique group. We should create such a group with a catalog CATALOG_STREAM before creating a index rule binding. The subject(stream/measure) and index rule MUST live in the same group with the binding.\n$ bydbctl group create -f - \u0026lt;\u0026lt;EOF metadata: name: default catalog: CATALOG_STREAM resource_opts: shard_num: 2 block_interval: unit: UNIT_HOUR num: 2 segment_interval: unit: UNIT_DAY num: 1 ttl: unit: UNIT_DAY num: 7 EOF The group creates two shards to store indexRuleBinding data points. Every one day, it would create a segment which will generate a block every 2 hours.\nThe data in this group will keep 7 days.\nThen, below command will create a new indexRuleBinding:\n$ bydbctl indexRuleBinding create -f - \u0026lt;\u0026lt;EOF metadata: name: stream_binding group: sw_stream rules: - trace_id - duration - endpoint_id - status_code - http.method - db.instance - db.type - mq.broker - mq.queue - mq.topic - extended_tags subject: catalog: CATALOG_STREAM name: sw begin_at: \u0026#39;2021-04-15T01:30:15.01Z\u0026#39; expire_at: \u0026#39;2121-04-15T01:30:15.01Z\u0026#39; EOF The YAML contains:\n rules: references to the name of index rules. subject: stream or measure\u0026rsquo;s name and catalog. begin_at and expire_at: the TTL of this binding.  Get operation Get(Read) operation gets an index rule binding\u0026rsquo;s schema.\nExamples of getting $ bydbctl indexRuleBinding get -g sw_stream -n stream_binding Update operation Update operation update an index rule binding\u0026rsquo;s schema.\nExamples updating $ bydbctl indexRuleBinding update -f - \u0026lt;\u0026lt;EOF metadata: name: stream_binding group: sw_stream rules: - trace_id - duration - endpoint_id - status_code - http.method - db.instance - db.type - mq.broker - mq.queue - mq.topic # Remove this rule # - extended_tags subject: catalog: CATALOG_STREAM name: sw begin_at: \u0026#39;2021-04-15T01:30:15.01Z\u0026#39; expire_at: \u0026#39;2121-04-15T01:30:15.01Z\u0026#39; EOF The new YAML removed the index rule extended_tags\u0026rsquo;s binding.\nDelete operation Delete operation delete an index rule binding\u0026rsquo;s schema.\nExamples of deleting $ bydbctl indexRuleBinding delete -g sw_stream -n stream_binding List operation List operation list all index rule bindings in a group.\nExamples of listing $ bydbctl indexRuleBinding list -g sw_stream API Reference indexRuleBindingService v1\n","title":"CRUD indexRuleBindings","url":"/docs/skywalking-banyandb/latest/crud/index_rule_binding/"},{"content":"CRUD indexRuleBindings CRUD operations create, read, update and delete index rule bindings.\nAn index rule binding is a bridge to connect several index rules to a subject. This binding is valid between begin_at_nanoseconds and expire_at_nanoseconds, that provides flexible strategies to control how to generate time series indices.\nbydbctl is the command line tool in examples.\nCreate operation Create operation adds a new index rule binding to the database\u0026rsquo;s metadata registry repository. If the index rule binding does not currently exist, create operation will create the schema.\nExamples An index rule binding belongs to a unique group. We should create such a group with a catalog CATALOG_STREAM before creating a index rule binding. The subject(stream/measure) and index rule MUST live in the same group with the binding.\n$ bydbctl group create -f - \u0026lt;\u0026lt;EOF metadata: name: default catalog: CATALOG_STREAM resource_opts: shard_num: 2 segment_interval: unit: UNIT_DAY num: 1 ttl: unit: UNIT_DAY num: 7 EOF The group creates two shards to store indexRuleBinding data points. Every one day, it would create a segment which will generate a block every 2 hours.\nThe data in this group will keep 7 days.\nThen, below command will create a new indexRuleBinding:\n$ bydbctl indexRuleBinding create -f - \u0026lt;\u0026lt;EOF metadata: name: stream_binding group: sw_stream rules: - trace_id - duration - endpoint_id - status_code - http.method - db.instance - db.type - mq.broker - mq.queue - mq.topic - extended_tags subject: catalog: CATALOG_STREAM name: sw begin_at: \u0026#39;2021-04-15T01:30:15.01Z\u0026#39; expire_at: \u0026#39;2121-04-15T01:30:15.01Z\u0026#39; EOF The YAML contains:\n rules: references to the name of index rules. subject: stream or measure\u0026rsquo;s name and catalog. begin_at and expire_at: the TTL of this binding.  Get operation Get(Read) operation gets an index rule binding\u0026rsquo;s schema.\nExamples of getting $ bydbctl indexRuleBinding get -g sw_stream -n stream_binding Update operation Update operation update an index rule binding\u0026rsquo;s schema.\nExamples updating $ bydbctl indexRuleBinding update -f - \u0026lt;\u0026lt;EOF metadata: name: stream_binding group: sw_stream rules: - trace_id - duration - endpoint_id - status_code - http.method - db.instance - db.type - mq.broker - mq.queue - mq.topic # Remove this rule # - extended_tags subject: catalog: CATALOG_STREAM name: sw begin_at: \u0026#39;2021-04-15T01:30:15.01Z\u0026#39; expire_at: \u0026#39;2121-04-15T01:30:15.01Z\u0026#39; EOF The new YAML removed the index rule extended_tags\u0026rsquo;s binding.\nDelete operation Delete operation delete an index rule binding\u0026rsquo;s schema.\nExamples of deleting $ bydbctl indexRuleBinding delete -g sw_stream -n stream_binding List operation List operation list all index rule bindings in a group.\nExamples of listing $ bydbctl indexRuleBinding list -g sw_stream API Reference indexRuleBindingService v1\n","title":"CRUD indexRuleBindings","url":"/docs/skywalking-banyandb/next/crud/index_rule_binding/"},{"content":"CRUD indexRuleBindings CRUD operations create, read, update and delete index rule bindings.\nAn index rule binding is a bridge to connect several index rules to a subject. This binding is valid between begin_at_nanoseconds and expire_at_nanoseconds, that provides flexible strategies to control how to generate time series indices.\nbydbctl is the command line tool in examples.\nCreate operation Create operation adds a new index rule binding to the database\u0026rsquo;s metadata registry repository. If the index rule binding does not currently exist, create operation will create the schema.\nExamples An index rule binding belongs to a unique group. We should create such a group with a catalog CATALOG_STREAM before creating a index rule binding. The subject(stream/measure) and index rule MUST live in the same group with the binding.\n$ bydbctl group create -f - \u0026lt;\u0026lt;EOF metadata: name: default catalog: CATALOG_STREAM resource_opts: shard_num: 2 block_interval: unit: UNIT_HOUR num: 2 segment_interval: unit: UNIT_DAY num: 1 ttl: unit: UNIT_DAY num: 7 EOF The group creates two shards to store indexRuleBinding data points. Every one day, it would create a segment which will generate a block every 2 hours.\nThe data in this group will keep 7 days.\nThen, below command will create a new indexRuleBinding:\n$ bydbctl indexRuleBinding create -f - \u0026lt;\u0026lt;EOF metadata: name: stream_binding group: sw_stream rules: - trace_id - duration - endpoint_id - status_code - http.method - db.instance - db.type - mq.broker - mq.queue - mq.topic - extended_tags subject: catalog: CATALOG_STREAM name: sw begin_at: \u0026#39;2021-04-15T01:30:15.01Z\u0026#39; expire_at: \u0026#39;2121-04-15T01:30:15.01Z\u0026#39; EOF The YAML contains:\n rules: references to the name of index rules. subject: stream or measure\u0026rsquo;s name and catalog. begin_at and expire_at: the TTL of this binding.  Get operation Get(Read) operation gets an index rule binding\u0026rsquo;s schema.\nExamples of getting $ bydbctl indexRuleBinding get -g sw_stream -n stream_binding Update operation Update operation update an index rule binding\u0026rsquo;s schema.\nExamples updating $ bydbctl indexRuleBinding update -f - \u0026lt;\u0026lt;EOF metadata: name: stream_binding group: sw_stream rules: - trace_id - duration - endpoint_id - status_code - http.method - db.instance - db.type - mq.broker - mq.queue - mq.topic # Remove this rule # - extended_tags subject: catalog: CATALOG_STREAM name: sw begin_at: \u0026#39;2021-04-15T01:30:15.01Z\u0026#39; expire_at: \u0026#39;2121-04-15T01:30:15.01Z\u0026#39; EOF The new YAML removed the index rule extended_tags\u0026rsquo;s binding.\nDelete operation Delete operation delete an index rule binding\u0026rsquo;s schema.\nExamples of deleting $ bydbctl indexRuleBinding delete -g sw_stream -n stream_binding List operation List operation list all index rule bindings in a group.\nExamples of listing $ bydbctl indexRuleBinding list -g sw_stream API Reference indexRuleBindingService v1\n","title":"CRUD indexRuleBindings","url":"/docs/skywalking-banyandb/v0.5.0/crud/index_rule_binding/"},{"content":"CRUD IndexRules CRUD operations create, read, update and delete index rules.\nIndexRule defines how to generate indices based on tags and the index type. IndexRule should bind to a subject(stream or measure) through an IndexRuleBinding to generate proper indices.\nbydbctl is the command line tool in examples.\nCreate operation Create operation adds a new index rule to the database\u0026rsquo;s metadata registry repository. If the index rule does not currently exist, create operation will create the schema.\nExamples of creating An index rule belongs to its subjects' group. We should create such a group if there is no such group.\nThe command supposes that the index rule will bind to streams. So it creates a CATALOG_STREAM group here.\n$ bydbctl group create -f - \u0026lt;\u0026lt;EOF metadata: name: sw_stream catalog: CATALOG_STREAM resource_opts: shard_num: 2 block_interval: unit: UNIT_HOUR num: 2 segment_interval: unit: UNIT_DAY num: 1 ttl: unit: UNIT_DAY num: 7 EOF The group creates two shards to store indexRule data points. Every day, it would create a segment that will generate a block every 2 hours.\nThe data in this group will keep 7 days.\nThen, the next command will create a new index rule:\n$ bydbctl indexRule create -f - \u0026lt;\u0026lt;EOF metadata: name: trace_id group: sw_stream tags: - trace_id type: TYPE_TREE location: LOCATION_GLOBAL EOF This YAML creates an index rule which uses the tag trace_id to generate a TREE_TYPE index which is located at GLOBAL.\nGet operation Get(Read) operation gets an index rule\u0026rsquo;s schema.\nExamples of getting $ bydbctl indexRule get -g sw_stream -n trace_id Update operation Update operation updates an index rule\u0026rsquo;s schema.\nExamples of updating This example changes the type from TREE to INVERTED.\n$ bydbctl indexRule update -f - \u0026lt;\u0026lt;EOF metadata: name: trace_id group: sw_stream tags: - trace_id type: TYPE_INVERTED location: LOCATION_GLOBAL EOF Delete operation Delete operation deletes an index rule\u0026rsquo;s schema.\nExamples of deleting $ bydbctl indexRule delete -g sw_stream -n trace_id List operation List operation list all index rules' schema in a group.\nExamples of listing $ bydbctl indexRule list -g sw_stream API Reference indexRuleService v1\n","title":"CRUD IndexRules","url":"/docs/skywalking-banyandb/latest/crud/index_rule/"},{"content":"CRUD IndexRules CRUD operations create, read, update and delete index rules.\nIndexRule defines how to generate indices based on tags and the index type. IndexRule should bind to a subject(stream or measure) through an IndexRuleBinding to generate proper indices.\nbydbctl is the command line tool in examples.\nCreate operation Create operation adds a new index rule to the database\u0026rsquo;s metadata registry repository. If the index rule does not currently exist, create operation will create the schema.\nExamples of creating An index rule belongs to its subjects' group. We should create such a group if there is no such group.\nThe command supposes that the index rule will bind to streams. So it creates a CATALOG_STREAM group here.\n$ bydbctl group create -f - \u0026lt;\u0026lt;EOF metadata: name: sw_stream catalog: CATALOG_STREAM resource_opts: shard_num: 2 segment_interval: unit: UNIT_DAY num: 1 ttl: unit: UNIT_DAY num: 7 EOF The group creates two shards to store indexRule data points. Every day, it would create a segment that will generate a block every 2 hours.\nThe data in this group will keep 7 days.\nThen, the next command will create a new index rule:\n$ bydbctl indexRule create -f - \u0026lt;\u0026lt;EOF metadata: name: trace_id group: sw_stream tags: - trace_id type: TYPE_INVERTED EOF This YAML creates an index rule which uses the tag trace_id to generate a TYPE_INVERTED index.\nGet operation Get(Read) operation gets an index rule\u0026rsquo;s schema.\nExamples of getting $ bydbctl indexRule get -g sw_stream -n trace_id Update operation Update operation updates an index rule\u0026rsquo;s schema.\nExamples of updating This example changes the type from TREE to INVERTED.\n$ bydbctl indexRule update -f - \u0026lt;\u0026lt;EOF metadata: name: trace_id group: sw_stream tags: - trace_id type: TYPE_INVERTED EOF Delete operation Delete operation deletes an index rule\u0026rsquo;s schema.\nExamples of deleting $ bydbctl indexRule delete -g sw_stream -n trace_id List operation List operation list all index rules' schema in a group.\nExamples of listing $ bydbctl indexRule list -g sw_stream API Reference indexRuleService v1\n","title":"CRUD IndexRules","url":"/docs/skywalking-banyandb/next/crud/index_rule/"},{"content":"CRUD IndexRules CRUD operations create, read, update and delete index rules.\nIndexRule defines how to generate indices based on tags and the index type. IndexRule should bind to a subject(stream or measure) through an IndexRuleBinding to generate proper indices.\nbydbctl is the command line tool in examples.\nCreate operation Create operation adds a new index rule to the database\u0026rsquo;s metadata registry repository. If the index rule does not currently exist, create operation will create the schema.\nExamples of creating An index rule belongs to its subjects' group. We should create such a group if there is no such group.\nThe command supposes that the index rule will bind to streams. So it creates a CATALOG_STREAM group here.\n$ bydbctl group create -f - \u0026lt;\u0026lt;EOF metadata: name: sw_stream catalog: CATALOG_STREAM resource_opts: shard_num: 2 block_interval: unit: UNIT_HOUR num: 2 segment_interval: unit: UNIT_DAY num: 1 ttl: unit: UNIT_DAY num: 7 EOF The group creates two shards to store indexRule data points. Every day, it would create a segment that will generate a block every 2 hours.\nThe data in this group will keep 7 days.\nThen, the next command will create a new index rule:\n$ bydbctl indexRule create -f - \u0026lt;\u0026lt;EOF metadata: name: trace_id group: sw_stream tags: - trace_id type: TYPE_TREE location: LOCATION_GLOBAL EOF This YAML creates an index rule which uses the tag trace_id to generate a TREE_TYPE index which is located at GLOBAL.\nGet operation Get(Read) operation gets an index rule\u0026rsquo;s schema.\nExamples of getting $ bydbctl indexRule get -g sw_stream -n trace_id Update operation Update operation updates an index rule\u0026rsquo;s schema.\nExamples of updating This example changes the type from TREE to INVERTED.\n$ bydbctl indexRule update -f - \u0026lt;\u0026lt;EOF metadata: name: trace_id group: sw_stream tags: - trace_id type: TYPE_INVERTED location: LOCATION_GLOBAL EOF Delete operation Delete operation deletes an index rule\u0026rsquo;s schema.\nExamples of deleting $ bydbctl indexRule delete -g sw_stream -n trace_id List operation List operation list all index rules' schema in a group.\nExamples of listing $ bydbctl indexRule list -g sw_stream API Reference indexRuleService v1\n","title":"CRUD IndexRules","url":"/docs/skywalking-banyandb/v0.5.0/crud/index_rule/"},{"content":"CRUD Measures CRUD operations create, read, update and delete measures.\nbydbctl is the command line tool in examples.\nCreate operation Create operation adds a new measure to the database\u0026rsquo;s metadata registry repository. If the measure does not currently exist, create operation will create the schema.\nExamples of creating A measure belongs to a unique group. We should create such a group with a catalog CATALOG_MEASURE before creating a measure.\n$ bydbctl group create -f - \u0026lt;\u0026lt;EOF metadata: name: sw_metric catalog: CATALOG_MEASURE resource_opts: shard_num: 2 block_interval: unit: UNIT_HOUR num: 2 segment_interval: unit: UNIT_DAY num: 1 ttl: unit: UNIT_DAY num: 7 EOF The group creates two shards to store data points. Every day, it would create a segment that will generate a block every 2 hours.\nThe data in this group will keep 7 days.\nThen, the below command will create a new measure:\n$ bydbctl measure create -f - \u0026lt;\u0026lt;EOF metadata: name: service_cpm_minute group: sw_metric tag_families: - name: default tags: - name: id type: TAG_TYPE_STRING - name: entity_id type: TAG_TYPE_STRING fields: - name: total field_type: FIELD_TYPE_INT encoding_method: ENCODING_METHOD_GORILLA compression_method: COMPRESSION_METHOD_ZSTD - name: value field_type: FIELD_TYPE_INT encoding_method: ENCODING_METHOD_GORILLA compression_method: COMPRESSION_METHOD_ZSTD entity: tag_names: - entity_id interval: 1m EOF service_cpm_minute expects to ingest a series of data points with a minute interval.\nGet operation Get(Read) operation gets a measure\u0026rsquo;s schema.\nExamples of getting $ bydbctl measure get -g sw_metric -n service_cpm_minute Update operation Update operation changes a measure\u0026rsquo;s schema.\nExamples of updating $ bydbctl measure update -f - \u0026lt;\u0026lt;EOF metadata: name: service_cpm_minute group: sw_metric tagFamilies: - name: searchable tags: - name: trace_id type: TAG_TYPE_STRING entity: tag_names: - entity_id EOF Delete operation Delete operation removes a measure\u0026rsquo;s schema.\nExamples of deleting $ bydbctl measure delete -g sw_metric -n service_cpm_minute List operation The list operation shows all measures' schema in a group.\nExamples of listing $ bydbctl measure list -g sw_metric API Reference MeasureService v1\n","title":"CRUD Measures","url":"/docs/skywalking-banyandb/latest/crud/measure/schema/"},{"content":"CRUD Measures CRUD operations create, read, update and delete measures.\nbydbctl is the command line tool in examples.\nCreate operation Create operation adds a new measure to the database\u0026rsquo;s metadata registry repository. If the measure does not currently exist, create operation will create the schema.\nExamples of creating A measure belongs to a unique group. We should create such a group with a catalog CATALOG_MEASURE before creating a measure.\n$ bydbctl group create -f - \u0026lt;\u0026lt;EOF metadata: name: sw_metric catalog: CATALOG_MEASURE resource_opts: shard_num: 2 segment_interval: unit: UNIT_DAY num: 1 ttl: unit: UNIT_DAY num: 7 EOF The group creates two shards to store data points. Every day, it would create a segment that will generate a block every 2 hours.\nThe data in this group will keep 7 days.\nThen, the below command will create a new measure:\n$ bydbctl measure create -f - \u0026lt;\u0026lt;EOF metadata: name: service_cpm_minute group: sw_metric tag_families: - name: default tags: - name: id type: TAG_TYPE_STRING - name: entity_id type: TAG_TYPE_STRING fields: - name: total field_type: FIELD_TYPE_INT encoding_method: ENCODING_METHOD_GORILLA compression_method: COMPRESSION_METHOD_ZSTD - name: value field_type: FIELD_TYPE_INT encoding_method: ENCODING_METHOD_GORILLA compression_method: COMPRESSION_METHOD_ZSTD entity: tag_names: - entity_id interval: 1m EOF service_cpm_minute expects to ingest a series of data points with a minute interval.\nGet operation Get(Read) operation gets a measure\u0026rsquo;s schema.\nExamples of getting $ bydbctl measure get -g sw_metric -n service_cpm_minute Update operation Update operation changes a measure\u0026rsquo;s schema.\nExamples of updating $ bydbctl measure update -f - \u0026lt;\u0026lt;EOF metadata: name: service_cpm_minute group: sw_metric tagFamilies: - name: searchable tags: - name: trace_id type: TAG_TYPE_STRING entity: tag_names: - entity_id EOF Delete operation Delete operation removes a measure\u0026rsquo;s schema.\nExamples of deleting $ bydbctl measure delete -g sw_metric -n service_cpm_minute List operation The list operation shows all measures' schema in a group.\nExamples of listing $ bydbctl measure list -g sw_metric API Reference MeasureService v1\n","title":"CRUD Measures","url":"/docs/skywalking-banyandb/next/crud/measure/schema/"},{"content":"CRUD Measures CRUD operations create, read, update and delete measures.\nbydbctl is the command line tool in examples.\nCreate operation Create operation adds a new measure to the database\u0026rsquo;s metadata registry repository. If the measure does not currently exist, create operation will create the schema.\nExamples of creating A measure belongs to a unique group. We should create such a group with a catalog CATALOG_MEASURE before creating a measure.\n$ bydbctl group create -f - \u0026lt;\u0026lt;EOF metadata: name: sw_metric catalog: CATALOG_MEASURE resource_opts: shard_num: 2 block_interval: unit: UNIT_HOUR num: 2 segment_interval: unit: UNIT_DAY num: 1 ttl: unit: UNIT_DAY num: 7 EOF The group creates two shards to store data points. Every day, it would create a segment that will generate a block every 2 hours.\nThe data in this group will keep 7 days.\nThen, the below command will create a new measure:\n$ bydbctl measure create -f - \u0026lt;\u0026lt;EOF metadata: name: service_cpm_minute group: sw_metric tag_families: - name: default tags: - name: id type: TAG_TYPE_STRING - name: entity_id type: TAG_TYPE_STRING fields: - name: total field_type: FIELD_TYPE_INT encoding_method: ENCODING_METHOD_GORILLA compression_method: COMPRESSION_METHOD_ZSTD - name: value field_type: FIELD_TYPE_INT encoding_method: ENCODING_METHOD_GORILLA compression_method: COMPRESSION_METHOD_ZSTD entity: tag_names: - entity_id interval: 1m EOF service_cpm_minute expects to ingest a series of data points with a minute interval.\nGet operation Get(Read) operation gets a measure\u0026rsquo;s schema.\nExamples of getting $ bydbctl measure get -g sw_metric -n service_cpm_minute Update operation Update operation changes a measure\u0026rsquo;s schema.\nExamples of updating $ bydbctl measure update -f - \u0026lt;\u0026lt;EOF metadata: name: service_cpm_minute group: sw_metric tagFamilies: - name: searchable tags: - name: trace_id type: TAG_TYPE_STRING entity: tag_names: - entity_id EOF Delete operation Delete operation removes a measure\u0026rsquo;s schema.\nExamples of deleting $ bydbctl measure delete -g sw_metric -n service_cpm_minute List operation The list operation shows all measures' schema in a group.\nExamples of listing $ bydbctl measure list -g sw_metric API Reference MeasureService v1\n","title":"CRUD Measures","url":"/docs/skywalking-banyandb/v0.5.0/crud/measure/schema/"},{"content":"CRUD Property CRUD operations create/update, read and delete property.\nProperty stores the user defined data.\nbydbctl is the command line tool in examples.\nApply (Create/Update) operation Apply creates a property if it\u0026rsquo;s absent, or updates an existed one based on a strategy. If the property does not currently exist, create operation will create the property.\nExamples of applying A property belongs to a unique group. We should create such a group before creating a property.\nThe group\u0026rsquo;s catalog should be empty.\n$ bydbctl group create -f - \u0026lt;\u0026lt;EOF metadata: name: sw EOF Then, below command will create a new property:\n$ bydbctl property apply -f - \u0026lt;\u0026lt;EOF metadata: container: group: sw name: temp_data id: General-Service tags: - key: name value: str: value: \u0026#34;hello\u0026#34; - key: state value: str: value: \u0026#34;succeed\u0026#34; EOF The operation supports updating partial tags.\n$ bydbctl property apply -f - \u0026lt;\u0026lt;EOF metadata: container: group: sw name: temp_data id: General-Service tags: - key: state value: str: value: \u0026#34;failed\u0026#34; EOF TTL is supported in the operation.\n$ bydbctl property apply -f - \u0026lt;\u0026lt;EOF metadata: container: group: sw name: temp_data id: General-Service tags: - key: state value: str: value: \u0026#34;failed\u0026#34; ttl: \u0026#34;1h\u0026#34; Get operation Get operation gets a property.\nExamples of getting $ bydbctl property get -g sw -n temp_data --id General-Service The operation could filter data by tags.\n$ bydbctl property get -g sw -n temp_data --id General-Service --tags state Delete operation Delete operation delete a property.\nExamples of deleting $ bydbctl property delete -g sw -n temp_data --id General-Service The delete operation could remove specific tags instead of the whole property.\n$ bydbctl property delete -g sw -n temp_data --id General-Service --tags state List operation List operation lists all properties in a group.\nExamples of listing in a group $ bydbctl property list -g sw List operation lists all properties in a group with a name.\nExamples of listing in a group with a name $ bydbctl property list -g sw -n temp_data TTL field in a property TTL field in a property is used to set the time to live of the property. The property will be deleted automatically after the TTL.\nThis functionality is supported by the lease mechanism. The readonly lease_id field is used to identify the lease of the property.\nExamples of setting TTL $ bydbctl property apply -f - \u0026lt;\u0026lt;EOF metadata: container: group: sw name: temp_data id: General-Service tags: - key: state value: str: value: \u0026#34;failed\u0026#34; ttl: \u0026#34;1h\u0026#34; EOF The lease_id is returned in the response. You can use get operation to get the property with the lease_id as well.\n$ bydbctl property get -g sw -n temp_data --id General-Service The lease_id is used to keep the property alive. You can use keepalive operation to keep the property alive. When the keepalive operation is called, the property\u0026rsquo;s TTL will be reset to the original value.\n$ bydbctl property keepalive --lease_id 1 API Reference MeasureService v1\n","title":"CRUD Property","url":"/docs/skywalking-banyandb/latest/crud/property/"},{"content":"CRUD Property CRUD operations create/update, read and delete property.\nProperty stores the user defined data.\nbydbctl is the command line tool in examples.\nApply (Create/Update) operation Apply creates a property if it\u0026rsquo;s absent, or updates an existed one based on a strategy. If the property does not currently exist, create operation will create the property.\nExamples of applying A property belongs to a unique group. We should create such a group before creating a property.\nThe group\u0026rsquo;s catalog should be empty.\n$ bydbctl group create -f - \u0026lt;\u0026lt;EOF metadata: name: sw EOF Then, below command will create a new property:\n$ bydbctl property apply -f - \u0026lt;\u0026lt;EOF metadata: container: group: sw name: temp_data id: General-Service tags: - key: name value: str: value: \u0026#34;hello\u0026#34; - key: state value: str: value: \u0026#34;succeed\u0026#34; EOF The operation supports updating partial tags.\n$ bydbctl property apply -f - \u0026lt;\u0026lt;EOF metadata: container: group: sw name: temp_data id: General-Service tags: - key: state value: str: value: \u0026#34;failed\u0026#34; EOF TTL is supported in the operation.\n$ bydbctl property apply -f - \u0026lt;\u0026lt;EOF metadata: container: group: sw name: temp_data id: General-Service tags: - key: state value: str: value: \u0026#34;failed\u0026#34; ttl: \u0026#34;1h\u0026#34; Get operation Get operation gets a property.\nExamples of getting $ bydbctl property get -g sw -n temp_data --id General-Service The operation could filter data by tags.\n$ bydbctl property get -g sw -n temp_data --id General-Service --tags state Delete operation Delete operation delete a property.\nExamples of deleting $ bydbctl property delete -g sw -n temp_data --id General-Service The delete operation could remove specific tags instead of the whole property.\n$ bydbctl property delete -g sw -n temp_data --id General-Service --tags state List operation List operation lists all properties in a group.\nExamples of listing in a group $ bydbctl property list -g sw List operation lists all properties in a group with a name.\nExamples of listing in a group with a name $ bydbctl property list -g sw -n temp_data TTL field in a property TTL field in a property is used to set the time to live of the property. The property will be deleted automatically after the TTL.\nThis functionality is supported by the lease mechanism. The readonly lease_id field is used to identify the lease of the property.\nExamples of setting TTL $ bydbctl property apply -f - \u0026lt;\u0026lt;EOF metadata: container: group: sw name: temp_data id: General-Service tags: - key: state value: str: value: \u0026#34;failed\u0026#34; ttl: \u0026#34;1h\u0026#34; EOF The lease_id is returned in the response. You can use get operation to get the property with the lease_id as well.\n$ bydbctl property get -g sw -n temp_data --id General-Service The lease_id is used to keep the property alive. You can use keepalive operation to keep the property alive. When the keepalive operation is called, the property\u0026rsquo;s TTL will be reset to the original value.\n$ bydbctl property keepalive --lease_id 1 API Reference MeasureService v1\n","title":"CRUD Property","url":"/docs/skywalking-banyandb/next/crud/property/"},{"content":"CRUD Property CRUD operations create/update, read and delete property.\nProperty stores the user defined data.\nbydbctl is the command line tool in examples.\nApply (Create/Update) operation Apply creates a property if it\u0026rsquo;s absent, or updates an existed one based on a strategy. If the property does not currently exist, create operation will create the property.\nExamples of applying A property belongs to a unique group. We should create such a group before creating a property.\nThe group\u0026rsquo;s catalog should be empty.\n$ bydbctl group create -f - \u0026lt;\u0026lt;EOF metadata: name: sw EOF Then, below command will create a new property:\n$ bydbctl property apply -f - \u0026lt;\u0026lt;EOF metadata: container: group: sw name: temp_data id: General-Service tags: - key: name value: str: value: \u0026#34;hello\u0026#34; - key: state value: str: value: \u0026#34;succeed\u0026#34; EOF The operation supports updating partial tags.\n$ bydbctl property apply -f - \u0026lt;\u0026lt;EOF metadata: container: group: sw name: temp_data id: General-Service tags: - key: state value: str: value: \u0026#34;failed\u0026#34; EOF TTL is supported in the operation.\n$ bydbctl property apply -f - \u0026lt;\u0026lt;EOF metadata: container: group: sw name: temp_data id: General-Service tags: - key: state value: str: value: \u0026#34;failed\u0026#34; ttl: \u0026#34;1h\u0026#34; Get operation Get operation gets a property.\nExamples of getting $ bydbctl property get -g sw -n temp_data --id General-Service The operation could filter data by tags.\n$ bydbctl property get -g sw -n temp_data --id General-Service --tags state Delete operation Delete operation delete a property.\nExamples of deleting $ bydbctl property delete -g sw -n temp_data --id General-Service The delete operation could remove specific tags instead of the whole property.\n$ bydbctl property delete -g sw -n temp_data --id General-Service --tags state List operation List operation lists all properties in a group.\nExamples of listing in a group $ bydbctl property list -g sw List operation lists all properties in a group with a name.\nExamples of listing in a group with a name $ bydbctl property list -g sw -n temp_data TTL field in a property TTL field in a property is used to set the time to live of the property. The property will be deleted automatically after the TTL.\nThis functionality is supported by the lease mechanism. The readonly lease_id field is used to identify the lease of the property.\nExamples of setting TTL $ bydbctl property apply -f - \u0026lt;\u0026lt;EOF metadata: container: group: sw name: temp_data id: General-Service tags: - key: state value: str: value: \u0026#34;failed\u0026#34; ttl: \u0026#34;1h\u0026#34; EOF The lease_id is returned in the response. You can use get operation to get the property with the lease_id as well.\n$ bydbctl property get -g sw -n temp_data --id General-Service The lease_id is used to keep the property alive. You can use keepalive operation to keep the property alive. When the keepalive operation is called, the property\u0026rsquo;s TTL will be reset to the original value.\n$ bydbctl property keepalive --lease_id 1 API Reference MeasureService v1\n","title":"CRUD Property","url":"/docs/skywalking-banyandb/v0.5.0/crud/property/"},{"content":"CRUD Streams CRUD operations create, read, update and delete streams.\nbydbctl is the command line tool in examples.\nStream intends to store streaming data, for example, traces or logs.\nCreate operation Create operation adds a new stream to the database\u0026rsquo;s metadata registry repository. If the stream does not currently exist, create operation will create the schema.\nExamples of creating A stream belongs to a unique group. We should create such a group with a catalog CATALOG_STREAM before creating a stream.\n$ bydbctl group create -f - \u0026lt;\u0026lt;EOF metadata: name: default catalog: CATALOG_STREAM resource_opts: shard_num: 2 block_interval: unit: UNIT_HOUR num: 2 segment_interval: unit: UNIT_DAY num: 1 ttl: unit: UNIT_DAY num: 7 EOF The group creates two shards to store stream data points. Every one day, it would create a segment which will generate a block every 2 hours.\nThe data in this group will keep 7 days.\nThen, below command will create a new stream:\n$ bydbctl stream create -f - \u0026lt;\u0026lt;EOF metadata: name: sw group: default tagFamilies: - name: searchable tags: - name: trace_id type: TAG_TYPE_STRING entity: tagNames: - stream_id EOF Get operation Get(Read) operation get a stream\u0026rsquo;s schema.\nExamples of getting $ bydbctl stream get -g default -n sw Update operation Update operation update a stream\u0026rsquo;s schema.\nExamples of updating bydbctl is the command line tool to update a stream in this example.\n$ bydbctl stream update -f - \u0026lt;\u0026lt;EOF metadata: name: sw group: default tagFamilies: - name: searchable tags: - name: trace_id type: TAG_TYPE_STRING entity: tagNames: - stream_id EOF Delete operation Delete operation delete a stream\u0026rsquo;s schema.\nExamples of deleting bydbctl is the command line tool to delete a stream in this example.\n$ bydbctl stream delete -g default -n sw List operation List operation list all streams' schema in a group.\nExamples of listing $ bydbctl stream list -g default API Reference StreamService v1\n","title":"CRUD Streams","url":"/docs/skywalking-banyandb/latest/crud/stream/schema/"},{"content":"CRUD Streams CRUD operations create, read, update and delete streams.\nbydbctl is the command line tool in examples.\nStream intends to store streaming data, for example, traces or logs.\nCreate operation Create operation adds a new stream to the database\u0026rsquo;s metadata registry repository. If the stream does not currently exist, create operation will create the schema.\nExamples of creating A stream belongs to a unique group. We should create such a group with a catalog CATALOG_STREAM before creating a stream.\n$ bydbctl group create -f - \u0026lt;\u0026lt;EOF metadata: name: default catalog: CATALOG_STREAM resource_opts: shard_num: 2 segment_interval: unit: UNIT_DAY num: 1 ttl: unit: UNIT_DAY num: 7 EOF The group creates two shards to store stream data points. Every one day, it would create a segment which will generate a block every 2 hours.\nThe data in this group will keep 7 days.\nThen, below command will create a new stream:\n$ bydbctl stream create -f - \u0026lt;\u0026lt;EOF metadata: name: sw group: default tagFamilies: - name: searchable tags: - name: trace_id type: TAG_TYPE_STRING entity: tagNames: - stream_id EOF Get operation Get(Read) operation get a stream\u0026rsquo;s schema.\nExamples of getting $ bydbctl stream get -g default -n sw Update operation Update operation update a stream\u0026rsquo;s schema.\nExamples of updating bydbctl is the command line tool to update a stream in this example.\n$ bydbctl stream update -f - \u0026lt;\u0026lt;EOF metadata: name: sw group: default tagFamilies: - name: searchable tags: - name: trace_id type: TAG_TYPE_STRING entity: tagNames: - stream_id EOF Delete operation Delete operation delete a stream\u0026rsquo;s schema.\nExamples of deleting bydbctl is the command line tool to delete a stream in this example.\n$ bydbctl stream delete -g default -n sw List operation List operation list all streams' schema in a group.\nExamples of listing $ bydbctl stream list -g default API Reference StreamService v1\n","title":"CRUD Streams","url":"/docs/skywalking-banyandb/next/crud/stream/schema/"},{"content":"CRUD Streams CRUD operations create, read, update and delete streams.\nbydbctl is the command line tool in examples.\nStream intends to store streaming data, for example, traces or logs.\nCreate operation Create operation adds a new stream to the database\u0026rsquo;s metadata registry repository. If the stream does not currently exist, create operation will create the schema.\nExamples of creating A stream belongs to a unique group. We should create such a group with a catalog CATALOG_STREAM before creating a stream.\n$ bydbctl group create -f - \u0026lt;\u0026lt;EOF metadata: name: default catalog: CATALOG_STREAM resource_opts: shard_num: 2 block_interval: unit: UNIT_HOUR num: 2 segment_interval: unit: UNIT_DAY num: 1 ttl: unit: UNIT_DAY num: 7 EOF The group creates two shards to store stream data points. Every one day, it would create a segment which will generate a block every 2 hours.\nThe data in this group will keep 7 days.\nThen, below command will create a new stream:\n$ bydbctl stream create -f - \u0026lt;\u0026lt;EOF metadata: name: sw group: default tagFamilies: - name: searchable tags: - name: trace_id type: TAG_TYPE_STRING entity: tagNames: - stream_id EOF Get operation Get(Read) operation get a stream\u0026rsquo;s schema.\nExamples of getting $ bydbctl stream get -g default -n sw Update operation Update operation update a stream\u0026rsquo;s schema.\nExamples of updating bydbctl is the command line tool to update a stream in this example.\n$ bydbctl stream update -f - \u0026lt;\u0026lt;EOF metadata: name: sw group: default tagFamilies: - name: searchable tags: - name: trace_id type: TAG_TYPE_STRING entity: tagNames: - stream_id EOF Delete operation Delete operation delete a stream\u0026rsquo;s schema.\nExamples of deleting bydbctl is the command line tool to delete a stream in this example.\n$ bydbctl stream delete -g default -n sw List operation List operation list all streams' schema in a group.\nExamples of listing $ bydbctl stream list -g default API Reference StreamService v1\n","title":"CRUD Streams","url":"/docs/skywalking-banyandb/v0.5.0/crud/stream/schema/"},{"content":"Custom metrics Adapter This adapter contains an implementation of external metrics API. It is therefore suitable for use with the autoscaling/v2 Horizontal Pod Autoscaler in Kubernetes 1.9+.\nUse kustomize to customise your deployment  Clone the source code:  git clone git@github.com:apache/skywalking-swck.git  Edit file adapter/config/adapter/kustomization.yaml file to change your preferences. If you prefer to your private docker image, a quick path to override ADAPTER_IMG environment variable : export ADAPTER_IMG=\u0026lt;private registry\u0026gt;/metrics-adapter:\u0026lt;tag\u0026gt;\n  Use make to generate the final manifests and deploy:\n  make -C adapter deploy Configuration The adapter takes the standard Kubernetes generic API server arguments (including those for authentication and authorization). By default, it will attempt to using Kubernetes in-cluster config to connect to the cluster.\nIt takes the following addition arguments specific to configuring how the adapter talks to SkyWalking OAP cluster:\n --oap-addr The address of OAP cluster. --metric-filter-regex A regular expression to filter metrics retrieved from OAP cluster. --refresh-interval This is the interval at which to update the cache of available metrics from OAP cluster. --namespace A prefix to which metrics are appended. The format is \u0026lsquo;namespace|metric_name\u0026rsquo;, defaults to skywalking.apache.org  HPA Configuration External metrics allow you to autoscale your cluster based on any metric available in OAP cluster. Just provide a metric block with a name and selector, and use the External metric type.\n- type:Externalexternal:metric:name:\u0026lt;metric_name\u0026gt;selector:matchLabels:\u0026lt;label_key\u0026gt;:\u0026lt;label_value\u0026gt;...target:.... metric_name: The name of metric generated by OAL or other subsystem. label: label_key is the entity name of skywalking metrics. if the label value contains special characters more than ., - and _, service.str.\u0026lt;number\u0026gt; represent the literal of label value, and service.byte.\u0026lt;number\u0026gt; could encode these special characters to hex bytes.  Supposing the service name is v1|productpage|bookinfo|demo, the matchLabels should be like the below piece:\nmatchLabels:\u0026#34;service.str.0\u0026#34;: \u0026#34;v1\u0026#34;\u0026#34;service.byte.1\u0026#34;: \u0026#34;7c\u0026#34;// the hex byte of \u0026#34;|\u0026#34;\u0026#34;service.str.2\u0026#34;: \u0026#34;productpage\u0026#34;\u0026#34;service.byte.3\u0026#34;: \u0026#34;7c\u0026#34;\u0026#34;service.str.4\u0026#34;: \u0026#34;bookinfo\u0026#34;\u0026#34;service.byte.5\u0026#34;: \u0026#34;7c\u0026#34;\u0026#34;service.str.6\u0026#34;: \u0026#34;demo\u0026#34; Caveats: byte label only accept a single character. That means || should be transformed to service.byte.0:\u0026quot;7c\u0026quot; and service.byte.1:\u0026quot;7c\u0026quot; instead of service.byte.0:\u0026quot;7c7c\u0026quot;\n The options of label keys are:\n service, service.str.\u0026lt;number\u0026gt; or service.byte.\u0026lt;number\u0026gt; The name of the service. instance, instance.str.\u0026lt;number\u0026gt; or instance.byte.\u0026lt;number\u0026gt; The name of the service instance. endpoint, endpoint.str.\u0026lt;number\u0026gt; or endpoint.byte.\u0026lt;number\u0026gt; The name of the endpoint. label, label.str.\u0026lt;number\u0026gt; or label.byte.\u0026lt;number\u0026gt; is optional, The labels you need to query, used for querying multi-labels metrics. Unlike swctl, this key only supports a single label due to the specification of the custom metrics API.  For example, if your application name is front_gateway, you could add the following section to your HorizontalPodAutoscaler manifest to specify that you need less than 80ms of 90th latency.\n- type:Externalexternal:metric:name:skywalking.apache.org|service_percentileselector:matchLabels:service:front_gateway# The index of [P50, P75, P90, P95, P99]. 2 is the index of P90(90%)label:\u0026#34;2\u0026#34;target:type:Valuevalue:80If the service is v1|productpage|bookinfo|demo|-:\n- type:Externalexternal:metric:name:skywalking.apache.org|service_cpmselector:matchLabels:\u0026#34;service.str.0\u0026#34;: \u0026#34;v1\u0026#34;\u0026#34;service.byte.1\u0026#34;: \u0026#34;7c\u0026#34;\u0026#34;service.str.2\u0026#34;: \u0026#34;productpage\u0026#34;\u0026#34;service.byte.3\u0026#34;: \u0026#34;7c\u0026#34;\u0026#34;service.str.4\u0026#34;: \u0026#34;bookinfo\u0026#34;\u0026#34;service.byte.5\u0026#34;: \u0026#34;7c\u0026#34;\u0026#34;service.str.6\u0026#34;: \u0026#34;demo\u0026#34;\u0026#34;service.byte.7\u0026#34;: \u0026#34;7c\u0026#34;\u0026#34;service.byte.8\u0026#34;: \u0026#34;2d\u0026#34;target:type:Valuevalue:80","title":"Custom metrics Adapter","url":"/docs/skywalking-swck/latest/custom-metrics-adapter/"},{"content":"Custom metrics Adapter This adapter contains an implementation of external metrics API. It is therefore suitable for use with the autoscaling/v2 Horizontal Pod Autoscaler in Kubernetes 1.9+.\nUse kustomize to customise your deployment  Clone the source code:  git clone git@github.com:apache/skywalking-swck.git  Edit file adapter/config/adapter/kustomization.yaml file to change your preferences. If you prefer to your private docker image, a quick path to override ADAPTER_IMG environment variable : export ADAPTER_IMG=\u0026lt;private registry\u0026gt;/metrics-adapter:\u0026lt;tag\u0026gt;\n  Use make to generate the final manifests and deploy:\n  make -C adapter deploy Configuration The adapter takes the standard Kubernetes generic API server arguments (including those for authentication and authorization). By default, it will attempt to using Kubernetes in-cluster config to connect to the cluster.\nIt takes the following addition arguments specific to configuring how the adapter talks to SkyWalking OAP cluster:\n --oap-addr The address of OAP cluster. --metric-filter-regex A regular expression to filter metrics retrieved from OAP cluster. --refresh-interval This is the interval at which to update the cache of available metrics from OAP cluster. --namespace A prefix to which metrics are appended. The format is \u0026lsquo;namespace|metric_name\u0026rsquo;, defaults to skywalking.apache.org  HPA Configuration External metrics allow you to autoscale your cluster based on any metric available in OAP cluster. Just provide a metric block with a name and selector, and use the External metric type.\n- type:Externalexternal:metric:name:\u0026lt;metric_name\u0026gt;selector:matchLabels:\u0026lt;label_key\u0026gt;:\u0026lt;label_value\u0026gt;...target:.... metric_name: The name of metric generated by OAL or other subsystem. label: label_key is the entity name of skywalking metrics. if the label value contains special characters more than ., - and _, service.str.\u0026lt;number\u0026gt; represent the literal of label value, and service.byte.\u0026lt;number\u0026gt; could encode these special characters to hex bytes.  Supposing the service name is v1|productpage|bookinfo|demo, the matchLabels should be like the below piece:\nmatchLabels:\u0026#34;service.str.0\u0026#34;: \u0026#34;v1\u0026#34;\u0026#34;service.byte.1\u0026#34;: \u0026#34;7c\u0026#34;// the hex byte of \u0026#34;|\u0026#34;\u0026#34;service.str.2\u0026#34;: \u0026#34;productpage\u0026#34;\u0026#34;service.byte.3\u0026#34;: \u0026#34;7c\u0026#34;\u0026#34;service.str.4\u0026#34;: \u0026#34;bookinfo\u0026#34;\u0026#34;service.byte.5\u0026#34;: \u0026#34;7c\u0026#34;\u0026#34;service.str.6\u0026#34;: \u0026#34;demo\u0026#34; Caveats: byte label only accept a single character. That means || should be transformed to service.byte.0:\u0026quot;7c\u0026quot; and service.byte.1:\u0026quot;7c\u0026quot; instead of service.byte.0:\u0026quot;7c7c\u0026quot;\n The options of label keys are:\n service, service.str.\u0026lt;number\u0026gt; or service.byte.\u0026lt;number\u0026gt; The name of the service. instance, instance.str.\u0026lt;number\u0026gt; or instance.byte.\u0026lt;number\u0026gt; The name of the service instance. endpoint, endpoint.str.\u0026lt;number\u0026gt; or endpoint.byte.\u0026lt;number\u0026gt; The name of the endpoint. label, label.str.\u0026lt;number\u0026gt; or label.byte.\u0026lt;number\u0026gt; is optional, The labels you need to query, used for querying multi-labels metrics. Unlike swctl, this key only supports a single label due to the specification of the custom metrics API.  For example, if your application name is front_gateway, you could add the following section to your HorizontalPodAutoscaler manifest to specify that you need less than 80ms of 90th latency.\n- type:Externalexternal:metric:name:skywalking.apache.org|service_percentileselector:matchLabels:service:front_gateway# The index of [P50, P75, P90, P95, P99]. 2 is the index of P90(90%)label:\u0026#34;2\u0026#34;target:type:Valuevalue:80If the service is v1|productpage|bookinfo|demo|-:\n- type:Externalexternal:metric:name:skywalking.apache.org|service_cpmselector:matchLabels:\u0026#34;service.str.0\u0026#34;: \u0026#34;v1\u0026#34;\u0026#34;service.byte.1\u0026#34;: \u0026#34;7c\u0026#34;\u0026#34;service.str.2\u0026#34;: \u0026#34;productpage\u0026#34;\u0026#34;service.byte.3\u0026#34;: \u0026#34;7c\u0026#34;\u0026#34;service.str.4\u0026#34;: \u0026#34;bookinfo\u0026#34;\u0026#34;service.byte.5\u0026#34;: \u0026#34;7c\u0026#34;\u0026#34;service.str.6\u0026#34;: \u0026#34;demo\u0026#34;\u0026#34;service.byte.7\u0026#34;: \u0026#34;7c\u0026#34;\u0026#34;service.byte.8\u0026#34;: \u0026#34;2d\u0026#34;target:type:Valuevalue:80","title":"Custom metrics Adapter","url":"/docs/skywalking-swck/next/custom-metrics-adapter/"},{"content":"Custom metrics Adapter This adapter contains an implementation of external metrics API. It is therefore suitable for use with the autoscaling/v2 Horizontal Pod Autoscaler in Kubernetes 1.9+.\nUse kustomize to customise your deployment  Clone the source code:  git clone git@github.com:apache/skywalking-swck.git  Edit file adapter/config/adapter/kustomization.yaml file to change your preferences. If you prefer to your private docker image, a quick path to override ADAPTER_IMG environment variable : export ADAPTER_IMG=\u0026lt;private registry\u0026gt;/metrics-adapter:\u0026lt;tag\u0026gt;\n  Use make to generate the final manifests and deploy:\n  make -C adapter deploy Configuration The adapter takes the standard Kubernetes generic API server arguments (including those for authentication and authorization). By default, it will attempt to using Kubernetes in-cluster config to connect to the cluster.\nIt takes the following addition arguments specific to configuring how the adapter talks to SkyWalking OAP cluster:\n --oap-addr The address of OAP cluster. --metric-filter-regex A regular expression to filter metrics retrieved from OAP cluster. --refresh-interval This is the interval at which to update the cache of available metrics from OAP cluster. --namespace A prefix to which metrics are appended. The format is \u0026lsquo;namespace|metric_name\u0026rsquo;, defaults to skywalking.apache.org  HPA Configuration External metrics allow you to autoscale your cluster based on any metric available in OAP cluster. Just provide a metric block with a name and selector, and use the External metric type.\n- type:Externalexternal:metric:name:\u0026lt;metric_name\u0026gt;selector:matchLabels:\u0026lt;label_key\u0026gt;:\u0026lt;label_value\u0026gt;...target:.... metric_name: The name of metric generated by OAL or other subsystem. label: label_key is the entity name of skywalking metrics. if the label value contains special characters more than ., - and _, service.str.\u0026lt;number\u0026gt; represent the literal of label value, and service.byte.\u0026lt;number\u0026gt; could encode these special characters to hex bytes.  Supposing the service name is v1|productpage|bookinfo|demo, the matchLabels should be like the below piece:\nmatchLabels:\u0026#34;service.str.0\u0026#34;: \u0026#34;v1\u0026#34;\u0026#34;service.byte.1\u0026#34;: \u0026#34;7c\u0026#34;// the hex byte of \u0026#34;|\u0026#34;\u0026#34;service.str.2\u0026#34;: \u0026#34;productpage\u0026#34;\u0026#34;service.byte.3\u0026#34;: \u0026#34;7c\u0026#34;\u0026#34;service.str.4\u0026#34;: \u0026#34;bookinfo\u0026#34;\u0026#34;service.byte.5\u0026#34;: \u0026#34;7c\u0026#34;\u0026#34;service.str.6\u0026#34;: \u0026#34;demo\u0026#34; Caveats: byte label only accept a single character. That means || should be transformed to service.byte.0:\u0026quot;7c\u0026quot; and service.byte.1:\u0026quot;7c\u0026quot; instead of service.byte.0:\u0026quot;7c7c\u0026quot;\n The options of label keys are:\n service, service.str.\u0026lt;number\u0026gt; or service.byte.\u0026lt;number\u0026gt; The name of the service. instance, instance.str.\u0026lt;number\u0026gt; or instance.byte.\u0026lt;number\u0026gt; The name of the service instance. endpoint, endpoint.str.\u0026lt;number\u0026gt; or endpoint.byte.\u0026lt;number\u0026gt; The name of the endpoint. label, label.str.\u0026lt;number\u0026gt; or label.byte.\u0026lt;number\u0026gt; is optional, The labels you need to query, used for querying multi-labels metrics. Unlike swctl, this key only supports a single label due to the specification of the custom metrics API.  For example, if your application name is front_gateway, you could add the following section to your HorizontalPodAutoscaler manifest to specify that you need less than 80ms of 90th latency.\n- type:Externalexternal:metric:name:skywalking.apache.org|service_percentileselector:matchLabels:service:front_gateway# The index of [P50, P75, P90, P95, P99]. 2 is the index of P90(90%)label:\u0026#34;2\u0026#34;target:type:Valuevalue:80If the service is v1|productpage|bookinfo|demo|-:\n- type:Externalexternal:metric:name:skywalking.apache.org|service_cpmselector:matchLabels:\u0026#34;service.str.0\u0026#34;: \u0026#34;v1\u0026#34;\u0026#34;service.byte.1\u0026#34;: \u0026#34;7c\u0026#34;\u0026#34;service.str.2\u0026#34;: \u0026#34;productpage\u0026#34;\u0026#34;service.byte.3\u0026#34;: \u0026#34;7c\u0026#34;\u0026#34;service.str.4\u0026#34;: \u0026#34;bookinfo\u0026#34;\u0026#34;service.byte.5\u0026#34;: \u0026#34;7c\u0026#34;\u0026#34;service.str.6\u0026#34;: \u0026#34;demo\u0026#34;\u0026#34;service.byte.7\u0026#34;: \u0026#34;7c\u0026#34;\u0026#34;service.byte.8\u0026#34;: \u0026#34;2d\u0026#34;target:type:Valuevalue:80","title":"Custom metrics Adapter","url":"/docs/skywalking-swck/v0.9.0/custom-metrics-adapter/"},{"content":"Data Model This chapter introduces BanyanDB\u0026rsquo;s data models and covers the following:\n the high-level data organization data model data retrieval  You can also find examples of how to interact with BanyanDB using bydbctl, how to create and drop groups, or how to create, read, update and drop streams/measures.\nStructure of BanyanDB The hierarchy that data is organized into streams, measures and properties in groups.\nGroups Group does not provide a mechanism for isolating groups of resources within a single banyand-server but is the minimal unit to manage physical structures. Each group contains a set of options, like retention policy, shard number, etc. Several shards distribute in a group.\nmetadata:name:othersor\nmetadata:name:sw_metriccatalog:CATALOG_MEASUREresource_opts:shard_num:2block_interval:unit:UNIT_HOURnum:2segment_interval:unit:UNIT_DAYnum:1ttl:unit:UNIT_DAYnum:7The group creates two shards to store data points. Every day, it would create a segment that will generate a block every 2 hours. The available units are HOUR and DAY. The data in this group will keep 7 days.\nEvery other resource should belong to a group. The catalog indicates which kind of data model the group contains.\n UNSPECIFIED: Property or other data models. MEASURE: Measure. STREAM: Stream.  Group Registration Operations\nMeasures BanyanDB lets you define a measure as follows:\nmetadata:name:service_cpm_minutegroup:sw_metrictag_families:- name:defaulttags:- name:idtype:TAG_TYPE_STRING- name:entity_idtype:TAG_TYPE_STRINGfields:- name:totalfield_type:FIELD_TYPE_INTencoding_method:ENCODING_METHOD_GORILLAcompression_method:COMPRESSION_METHOD_ZSTD- name:valuefield_type:FIELD_TYPE_INTencoding_method:ENCODING_METHOD_GORILLAcompression_method:COMPRESSION_METHOD_ZSTDentity:tag_names:- entity_idinterval:1mMeasure consists of a sequence of data points. Each data point contains tags and fields.\nTags are key-value pairs. The database engine can index tag values by referring to the index rules and rule bindings, confining the query to filtering data points based on tags bound to an index rule.\nTags are grouped into unique tag_families which are the logical and physical grouping of tags.\nMeasure supports the following tag types:\n STRING : Text INT : 64 bits long integer STRING_ARRAY : A group of strings INT_ARRAY : A group of integers DATA_BINARY : Raw binary  A group of selected tags composite an entity that points out a specific time series the data point belongs to. The database engine has capacities to encode and compress values in the same time series. Users should select appropriate tag combinations to optimize the data size. Another role of entity is the sharding key of data points, determining how to fragment data between shards.\nFields are also key-value pairs like tags. But the value of each field is the actual value of a single data point. The database engine would encode and compress the field\u0026rsquo;s values in the same time series. The query operation is forbidden to filter data points based on a field\u0026rsquo;s value. You could apply aggregation functions to them.\nMeasure supports the following fields types:\n STRING : Text INT : 64 bits long integer DATA_BINARY : Raw binary FLOAT : 64 bits double-precision floating-point number  Measure supports the following encoding methods:\n GORILLA : GORILLA encoding is lossless. It is more suitable for a numerical sequence with similar values and is not recommended for sequence data with large fluctuations.  Measure supports the types of the following fields:\n ZSTD : Zstandard is a real-time compression algorithm, that provides high compression ratios. It offers a very wide range of compression/speed trade-offs, while being backed by a very fast decoder. For BanyanDB focus on speed.  Another option named interval plays a critical role in encoding. It indicates the time range between two adjacent data points in a time series and implies that all data points belonging to the same time series are distributed based on a fixed interval. A better practice for the naming measure is to append the interval literal to the tail, for example, service_cpm_minute. It\u0026rsquo;s a parameter of GORILLA encoding method.\nMeasure Registration Operations\nTopNAggregation Find the Top-N entities from a dataset in a time range is a common scenario. We could see the diagrams like \u0026ldquo;Top 10 throughput endpoints\u0026rdquo;, and \u0026ldquo;Most slow 20 endpoints\u0026rdquo;, etc on SkyWalking\u0026rsquo;s UI. Exploring and analyzing the top entities can always reveal some high-value information.\nBanyanDB introduces the TopNAggregation, aiming to pre-calculate the top/bottom entities during the measure writing phase. In the query phase, BanyanDB can quickly retrieve the top/bottom records. The performance would be much better than top() function which is based on the query phase aggregation procedure.\n Caveat: TopNAggregation is an approximate realization, to use it well you need have a good understanding with the algorithm as well as the data distribution.\n ---metadata:name:endpoint_cpm_minute_top_bottomgroup:sw_metricsource_measure:name:endpoint_cpm_minutegroup:sw_metricfield_name:valuefield_value_sort:SORT_UNSPECIFIEDgroup_by_tag_names:- entity_idcounters_number:10000lru_size:10endpoint_cpm_minute_top_bottom is watching the data ingesting of the source measure endpoint_cpm_minute to generate both top 1000 and bottom 1000 entity cardinalities. If only Top 1000 or Bottom 1000 is needed, the field_value_sort could be DESC or ASC respectively.\n SORT_DESC: Top-N. In a series of 1,2,3...1000. Top10\u0026rsquo;s result is 1000,999...991. SORT_ASC: Bottom-N. In a series of 1,2,3...1000. Bottom10\u0026rsquo;s result is 1,2...10.  Tags in group_by_tag_names are used as dimensions. These tags can be searched (only equality is supported) in the query phase. Tags do not exist in group_by_tag_names will be dropped in the pre-calculating phase.\ncounters_number denotes the number of entity cardinality. As the above example shows, calculating the Top 100 among 10 thousands is easier than among 10 millions.\nlru_size is a late data optimizing flag. The higher the number, the more late data, but the more memory space is consumed.\nTopNAggregation Registration Operations\nStreams Stream shares many details with Measure except for abandoning field. Stream focuses on high throughput data collection, for example, tracing and logging. The database engine also supports compressing stream entries based on entity, but no encoding process is involved.\nStream Registration Operations\nProperties Property is a schema-less or schema-free data model. That means you DO NOT have to define a schema before writing a Property\nProperty is a standard key-value store. Users could store their metadata or items on a property and get a sequential consistency guarantee. BanyanDB\u0026rsquo;s motivation for introducing such a particular structure is to support most APM scenarios that need to store critical data, especially for a distributed database cluster.\nWe should create a group before creating a property.\nCreating group.\nmetadata:name:swCreating property.\nmetadata:container:group:swname:temp_dataid:General-Servicetags:- key:namevalue:str:value:\u0026#34;hello\u0026#34;- key:statevalue:str:value:\u0026#34;succeed\u0026#34;Property supports a three-level hierarchy, group/name/id, that is more flexible than schemaful data models.\nThe property supports the TTL mechanism. You could set the ttl field to specify the time to live.\nmetadata:container:group:swname:temp_dataid:General-Servicetags:- key:namevalue:str:value:\u0026#34;hello\u0026#34;- key:statevalue:str:value:\u0026#34;succeed\u0026#34;ttl:\u0026#34;1h\u0026#34;\u0026ldquo;General-Service\u0026rdquo; will be dropped after 1 hour. If you want to extend the TTL, you could use the \u0026ldquo;keepalive\u0026rdquo; operation. The \u0026ldquo;lease_id\u0026rdquo; is returned in the apply response. You can use get operation to get the property with the lease_id as well.\nlease_id:1\u0026ldquo;General-Service\u0026rdquo; lives another 1 hour.\nYou could Create, Read, Update and Drop a property, and update or drop several tags instead of the entire property.\nProperty Operations\nData Models Data models in BanyanDB derive from some classic data models.\nTimeSeries Model A time series is a series of data points indexed in time order. Most commonly, a time series is a sequence taken at successive equally spaced points in time. Thus it is a sequence of discrete-time data.\nYou can store time series data points through Stream or Measure. Examples of Stream are logs, traces and events. Measure could ingest metrics, profiles, etc.\nKey-Value Model The key-value data model is a subset of the Property data model. Every property has a key \u0026lt;group\u0026gt;/\u0026lt;name\u0026gt;/\u0026lt;id\u0026gt; that identifies a property within a collection. This key acts as the primary key to retrieve the data. You can set it when creating a key. It cannot be changed later because the attribute is immutable.\nThere are several Key-Value pairs in a property, named Tags. You could add, update and drop them based on the tag\u0026rsquo;s key.\nData Retrieval Queries and Writes are used to filter schemaful data models, Stream, Measure or TopNAggregation based on certain criteria, as well as to compute or store new data.\n MeasureService provides Write, Query and TopN StreamService provides Write, Query  IndexRule \u0026amp; IndexRuleBinding An IndexRule indicates which tags are indexed. An IndexRuleBinding binds an index rule to the target resources or the subject. There might be several rule bindings to a single resource, but their effective time range could NOT overlap.\nmetadata:name:trace_idgroup:sw_streamtags:- trace_idtype:TYPE_TREElocation:LOCATION_GLOBALIndexRule supports selecting two distinct kinds of index structures. The INVERTED index is the primary option when users set up an index rule. It\u0026rsquo;s suitable for most tag indexing due to a better memory usage ratio and query performance. When there are many unique tag values here, such as the ID tag and numeric duration tag, the TREE index could be better. This index saves much memory space with high-cardinality data sets.\nMost IndexRule\u0026rsquo;s location is LOCAL which places indices with their indexed data together. IndexRule also provides a GLOBAL location to place some indices on a higher layer of hierarchical structure. This option intends to optimize the full-scan operation for some querying cases of no time range specification, such as finding spans from a trace by trace_id.\nmetadata:name:stream_bindinggroup:sw_streamrules:- trace_id- duration- endpoint_id- status_code- http.method- db.instance- db.type- mq.broker- mq.queue- mq.topic- extended_tagssubject:catalog:CATALOG_STREAMname:swbegin_at:\u0026#39;2021-04-15T01:30:15.01Z\u0026#39;expire_at:\u0026#39;2121-04-15T01:30:15.01Z\u0026#39;IndexRuleBinding binds IndexRules to a subject, Stream or Measure. The time range between begin_at and expire_at is the effective time.\nIndexRule Registration Operations\nIndexRuleBinding Registration Operations\nIndex Granularity In BanyanDB, Stream and Measure have different levels of index granularity.\nFor Measure, the indexed target is a data point with specific tag values. The query processor uses the tag values defined in the entity field of the Measure to compose a series ID, which is used to find the several series that match the query criteria. The entity field is a set of tags that defines the unique identity of a time series, and it restricts the tags that can be used as indexed target.\nEach series contains a sequence of data points that share the same tag values. Once the query processor has identified the relevant series, it scans the data points between the desired time range in those series to find the data that matches the query criteria.\nFor example, suppose we have a Measure with the following entity field: {service, operation, instance}. If we get a data point with the following tag values: service=shopping, operation=search, and instance=prod-1, then the query processor would use those tag values to construct a series ID that uniquely identifies the series containing that data point. The query processor would then scan the relevant data points in that series to find the data that matches the query criteria.\nThe side effect of the measure index is that each indexed value has to represent a unique seriesID. This is because the series ID is constructed by concatenating the indexed tag values in the entity field. If two series have the same entity field, they would have the same series ID and would be indistinguishable from one another. This means that if you want to index a tag that is not part of the entity field, you would need to ensure that it is unique across all series. One way to do this would be to include the tag in the entity field, but this may not always be feasible or desirable depending on your use case.\nFor Stream, the indexed target is an element that is a combination of the series ID and timestamp. The Stream query processor uses the time range to find target files. The indexed result points to the target element. The processor doesn\u0026rsquo;t have to scan a series of elements in this time range, which reduces the query time.\nFor example, suppose we have a Stream with the following tags: service, operation, instance, and status_code. If we get a data point with the following tag values: service=shopping, operation=search, instance=prod-1, and status_code=200, and the data point\u0026rsquo;s time is 1:00pm on January 1st, 2022, then the series ID for this data point would be shopping_search_prod-1_200_1641052800, where 1641052800 is the Unix timestamp representing 1:00pm on January 1st, 2022.\nThe indexed target would be the combination of the series ID and timestamp, which in this case would be shopping_search_prod-1_200_1641052800. The Stream query processor would use the time range specified in the query to find target files and then search within those files for the indexed target.\nThe following is a comparison of the indexing granularity, performance, and flexibility of Stream and Measure indices:\n   Indexing Granularity Performance Flexibility     Measure indices are constructed for each series and are based on the entity field of the Measure. Each indexed value has to represent a unique seriesID. Measure index is faster than Stream index. Measure index is less flexible and requires more care when indexing tags that are not part of the entity field.   Stream indices are constructed for each element and are based on the series ID and timestamp. Stream index is slower than Measure index. Stream index is more flexible than Measure index and can index any tag value.    In general, Measure indices are faster and more efficient, but they require more care when indexing tags that are not part of the entity field. Stream indices, on the other hand, are slower and take up more space, but they can index any tag value and do not have the same side effects as Measure indices.\n","title":"Data Model","url":"/docs/skywalking-banyandb/latest/concept/data-model/"},{"content":"Data Model This chapter introduces BanyanDB\u0026rsquo;s data models and covers the following:\n the high-level data organization data model data retrieval  You can also find examples of how to interact with BanyanDB using bydbctl, how to create and drop groups, or how to create, read, update and drop streams/measures.\nStructure of BanyanDB The hierarchy that data is organized into streams, measures and properties in groups.\nGroups Group does not provide a mechanism for isolating groups of resources within a single banyand-server but is the minimal unit to manage physical structures. Each group contains a set of options, like retention policy, shard number, etc. Several shards distribute in a group.\nmetadata:name:othersor\nmetadata:name:sw_metriccatalog:CATALOG_MEASUREresource_opts:shard_num:2segment_interval:unit:UNIT_DAYnum:1ttl:unit:UNIT_DAYnum:7The group creates two shards to store data points. Every day, it would create a segment that will generate a block every 2 hours. The available units are HOUR and DAY. The data in this group will keep 7 days.\nEvery other resource should belong to a group. The catalog indicates which kind of data model the group contains.\n UNSPECIFIED: Property or other data models. MEASURE: Measure. STREAM: Stream.  Group Registration Operations\nMeasures BanyanDB lets you define a measure as follows:\nmetadata:name:service_cpm_minutegroup:sw_metrictag_families:- name:defaulttags:- name:idtype:TAG_TYPE_STRING- name:entity_idtype:TAG_TYPE_STRINGfields:- name:totalfield_type:FIELD_TYPE_INTencoding_method:ENCODING_METHOD_GORILLAcompression_method:COMPRESSION_METHOD_ZSTD- name:valuefield_type:FIELD_TYPE_INTencoding_method:ENCODING_METHOD_GORILLAcompression_method:COMPRESSION_METHOD_ZSTDentity:tag_names:- entity_idinterval:1mMeasure consists of a sequence of data points. Each data point contains tags and fields.\nTags are key-value pairs. The database engine can index tag values by referring to the index rules and rule bindings, confining the query to filtering data points based on tags bound to an index rule.\nTags are grouped into unique tag_families which are the logical and physical grouping of tags.\nMeasure supports the following tag types:\n STRING : Text INT : 64 bits long integer STRING_ARRAY : A group of strings INT_ARRAY : A group of integers DATA_BINARY : Raw binary  A group of selected tags composite an entity that points out a specific time series the data point belongs to. The database engine has capacities to encode and compress values in the same time series. Users should select appropriate tag combinations to optimize the data size. Another role of entity is the sharding key of data points, determining how to fragment data between shards.\nFields are also key-value pairs like tags. But the value of each field is the actual value of a single data point. The database engine would encode and compress the field\u0026rsquo;s values in the same time series. The query operation is forbidden to filter data points based on a field\u0026rsquo;s value. You could apply aggregation functions to them.\nMeasure supports the following fields types:\n STRING : Text INT : 64 bits long integer DATA_BINARY : Raw binary FLOAT : 64 bits double-precision floating-point number  Measure supports the following encoding methods:\n GORILLA : GORILLA encoding is lossless. It is more suitable for a numerical sequence with similar values and is not recommended for sequence data with large fluctuations.  Measure supports the types of the following fields:\n ZSTD : Zstandard is a real-time compression algorithm, that provides high compression ratios. It offers a very wide range of compression/speed trade-offs, while being backed by a very fast decoder. For BanyanDB focus on speed.  Another option named interval plays a critical role in encoding. It indicates the time range between two adjacent data points in a time series and implies that all data points belonging to the same time series are distributed based on a fixed interval. A better practice for the naming measure is to append the interval literal to the tail, for example, service_cpm_minute. It\u0026rsquo;s a parameter of GORILLA encoding method.\nMeasure Registration Operations\nTopNAggregation Find the Top-N entities from a dataset in a time range is a common scenario. We could see the diagrams like \u0026ldquo;Top 10 throughput endpoints\u0026rdquo;, and \u0026ldquo;Most slow 20 endpoints\u0026rdquo;, etc on SkyWalking\u0026rsquo;s UI. Exploring and analyzing the top entities can always reveal some high-value information.\nBanyanDB introduces the TopNAggregation, aiming to pre-calculate the top/bottom entities during the measure writing phase. In the query phase, BanyanDB can quickly retrieve the top/bottom records. The performance would be much better than top() function which is based on the query phase aggregation procedure.\n Caveat: TopNAggregation is an approximate realization, to use it well you need have a good understanding with the algorithm as well as the data distribution.\n ---metadata:name:endpoint_cpm_minute_top_bottomgroup:sw_metricsource_measure:name:endpoint_cpm_minutegroup:sw_metricfield_name:valuefield_value_sort:SORT_UNSPECIFIEDgroup_by_tag_names:- entity_idcounters_number:10000lru_size:10endpoint_cpm_minute_top_bottom is watching the data ingesting of the source measure endpoint_cpm_minute to generate both top 1000 and bottom 1000 entity cardinalities. If only Top 1000 or Bottom 1000 is needed, the field_value_sort could be DESC or ASC respectively.\n SORT_DESC: Top-N. In a series of 1,2,3...1000. Top10\u0026rsquo;s result is 1000,999...991. SORT_ASC: Bottom-N. In a series of 1,2,3...1000. Bottom10\u0026rsquo;s result is 1,2...10.  Tags in group_by_tag_names are used as dimensions. These tags can be searched (only equality is supported) in the query phase. Tags do not exist in group_by_tag_names will be dropped in the pre-calculating phase.\ncounters_number denotes the number of entity cardinality. As the above example shows, calculating the Top 100 among 10 thousands is easier than among 10 millions.\nlru_size is a late data optimizing flag. The higher the number, the more late data, but the more memory space is consumed.\nTopNAggregation Registration Operations\nStreams Stream shares many details with Measure except for abandoning field. Stream focuses on high throughput data collection, for example, tracing and logging. The database engine also supports compressing stream entries based on entity, but no encoding process is involved.\nStream Registration Operations\nProperties Property is a schema-less or schema-free data model. That means you DO NOT have to define a schema before writing a Property\nProperty is a standard key-value store. Users could store their metadata or items on a property and get a sequential consistency guarantee. BanyanDB\u0026rsquo;s motivation for introducing such a particular structure is to support most APM scenarios that need to store critical data, especially for a distributed database cluster.\nWe should create a group before creating a property.\nCreating group.\nmetadata:name:swCreating property.\nmetadata:container:group:swname:temp_dataid:General-Servicetags:- key:namevalue:str:value:\u0026#34;hello\u0026#34;- key:statevalue:str:value:\u0026#34;succeed\u0026#34;Property supports a three-level hierarchy, group/name/id, that is more flexible than schemaful data models.\nThe property supports the TTL mechanism. You could set the ttl field to specify the time to live.\nmetadata:container:group:swname:temp_dataid:General-Servicetags:- key:namevalue:str:value:\u0026#34;hello\u0026#34;- key:statevalue:str:value:\u0026#34;succeed\u0026#34;ttl:\u0026#34;1h\u0026#34;\u0026ldquo;General-Service\u0026rdquo; will be dropped after 1 hour. If you want to extend the TTL, you could use the \u0026ldquo;keepalive\u0026rdquo; operation. The \u0026ldquo;lease_id\u0026rdquo; is returned in the apply response. You can use get operation to get the property with the lease_id as well.\nlease_id:1\u0026ldquo;General-Service\u0026rdquo; lives another 1 hour.\nYou could Create, Read, Update and Drop a property, and update or drop several tags instead of the entire property.\nProperty Operations\nData Models Data models in BanyanDB derive from some classic data models.\nTimeSeries Model A time series is a series of data points indexed in time order. Most commonly, a time series is a sequence taken at successive equally spaced points in time. Thus it is a sequence of discrete-time data.\nYou can store time series data points through Stream or Measure. Examples of Stream are logs, traces and events. Measure could ingest metrics, profiles, etc.\nKey-Value Model The key-value data model is a subset of the Property data model. Every property has a key \u0026lt;group\u0026gt;/\u0026lt;name\u0026gt;/\u0026lt;id\u0026gt; that identifies a property within a collection. This key acts as the primary key to retrieve the data. You can set it when creating a key. It cannot be changed later because the attribute is immutable.\nThere are several Key-Value pairs in a property, named Tags. You could add, update and drop them based on the tag\u0026rsquo;s key.\nData Retrieval Queries and Writes are used to filter schemaful data models, Stream, Measure or TopNAggregation based on certain criteria, as well as to compute or store new data.\n MeasureService provides Write, Query and TopN StreamService provides Write, Query  IndexRule \u0026amp; IndexRuleBinding An IndexRule indicates which tags are indexed. An IndexRuleBinding binds an index rule to the target resources or the subject. There might be several rule bindings to a single resource, but their effective time range could NOT overlap.\nmetadata:name:trace_idgroup:sw_streamtags:- trace_idtype:TYPE_INVERTEDIndexRule supports selecting two distinct kinds of index structures. The INVERTED index is the primary option when users set up an index rule. It\u0026rsquo;s suitable for most tag indexing due to a better memory usage ratio and query performance.\nmetadata:name:stream_bindinggroup:sw_streamrules:- trace_id- duration- endpoint_id- status_code- http.method- db.instance- db.type- mq.broker- mq.queue- mq.topic- extended_tagssubject:catalog:CATALOG_STREAMname:swbegin_at:\u0026#39;2021-04-15T01:30:15.01Z\u0026#39;expire_at:\u0026#39;2121-04-15T01:30:15.01Z\u0026#39;IndexRuleBinding binds IndexRules to a subject, Stream or Measure. The time range between begin_at and expire_at is the effective time.\nIndexRule Registration Operations\nIndexRuleBinding Registration Operations\nIndex Granularity In BanyanDB, Stream and Measure have different levels of index granularity.\nFor Measure, the indexed target is a data point with specific tag values. The query processor uses the tag values defined in the entity field of the Measure to compose a series ID, which is used to find the several series that match the query criteria. The entity field is a set of tags that defines the unique identity of a time series, and it restricts the tags that can be used as indexed target.\nEach series contains a sequence of data points that share the same tag values. Once the query processor has identified the relevant series, it scans the data points between the desired time range in those series to find the data that matches the query criteria.\nFor example, suppose we have a Measure with the following entity field: {service, operation, instance}. If we get a data point with the following tag values: service=shopping, operation=search, and instance=prod-1, then the query processor would use those tag values to construct a series ID that uniquely identifies the series containing that data point. The query processor would then scan the relevant data points in that series to find the data that matches the query criteria.\nThe side effect of the measure index is that each indexed value has to represent a unique seriesID. This is because the series ID is constructed by concatenating the indexed tag values in the entity field. If two series have the same entity field, they would have the same series ID and would be indistinguishable from one another. This means that if you want to index a tag that is not part of the entity field, you would need to ensure that it is unique across all series. One way to do this would be to include the tag in the entity field, but this may not always be feasible or desirable depending on your use case.\nFor Stream, the indexed target is an element that is a combination of the series ID and timestamp. The Stream query processor uses the time range to find target files. The indexed result points to the target element. The processor doesn\u0026rsquo;t have to scan a series of elements in this time range, which reduces the query time.\nFor example, suppose we have a Stream with the following tags: service, operation, instance, and status_code. If we get a data point with the following tag values: service=shopping, operation=search, instance=prod-1, and status_code=200, and the data point\u0026rsquo;s time is 1:00pm on January 1st, 2022, then the series ID for this data point would be shopping_search_prod-1_200_1641052800, where 1641052800 is the Unix timestamp representing 1:00pm on January 1st, 2022.\nThe indexed target would be the combination of the series ID and timestamp, which in this case would be shopping_search_prod-1_200_1641052800. The Stream query processor would use the time range specified in the query to find target files and then search within those files for the indexed target.\nThe following is a comparison of the indexing granularity, performance, and flexibility of Stream and Measure indices:\n   Indexing Granularity Performance Flexibility     Measure indices are constructed for each series and are based on the entity field of the Measure. Each indexed value has to represent a unique seriesID. Measure index is faster than Stream index. Measure index is less flexible and requires more care when indexing tags that are not part of the entity field.   Stream indices are constructed for each element and are based on the series ID and timestamp. Stream index is slower than Measure index. Stream index is more flexible than Measure index and can index any tag value.    In general, Measure indices are faster and more efficient, but they require more care when indexing tags that are not part of the entity field. Stream indices, on the other hand, are slower and take up more space, but they can index any tag value and do not have the same side effects as Measure indices.\n","title":"Data Model","url":"/docs/skywalking-banyandb/next/concept/data-model/"},{"content":"Data Model This chapter introduces BanyanDB\u0026rsquo;s data models and covers the following:\n the high-level data organization data model data retrieval  You can also find examples of how to interact with BanyanDB using bydbctl, how to create and drop groups, or how to create, read, update and drop streams/measures.\nStructure of BanyanDB The hierarchy that data is organized into streams, measures and properties in groups.\nGroups Group does not provide a mechanism for isolating groups of resources within a single banyand-server but is the minimal unit to manage physical structures. Each group contains a set of options, like retention policy, shard number, etc. Several shards distribute in a group.\nmetadata:name:othersor\nmetadata:name:sw_metriccatalog:CATALOG_MEASUREresource_opts:shard_num:2block_interval:unit:UNIT_HOURnum:2segment_interval:unit:UNIT_DAYnum:1ttl:unit:UNIT_DAYnum:7The group creates two shards to store data points. Every day, it would create a segment that will generate a block every 2 hours. The available units are HOUR and DAY. The data in this group will keep 7 days.\nEvery other resource should belong to a group. The catalog indicates which kind of data model the group contains.\n UNSPECIFIED: Property or other data models. MEASURE: Measure. STREAM: Stream.  Group Registration Operations\nMeasures BanyanDB lets you define a measure as follows:\nmetadata:name:service_cpm_minutegroup:sw_metrictag_families:- name:defaulttags:- name:idtype:TAG_TYPE_STRING- name:entity_idtype:TAG_TYPE_STRINGfields:- name:totalfield_type:FIELD_TYPE_INTencoding_method:ENCODING_METHOD_GORILLAcompression_method:COMPRESSION_METHOD_ZSTD- name:valuefield_type:FIELD_TYPE_INTencoding_method:ENCODING_METHOD_GORILLAcompression_method:COMPRESSION_METHOD_ZSTDentity:tag_names:- entity_idinterval:1mMeasure consists of a sequence of data points. Each data point contains tags and fields.\nTags are key-value pairs. The database engine can index tag values by referring to the index rules and rule bindings, confining the query to filtering data points based on tags bound to an index rule.\nTags are grouped into unique tag_families which are the logical and physical grouping of tags.\nMeasure supports the following tag types:\n STRING : Text INT : 64 bits long integer STRING_ARRAY : A group of strings INT_ARRAY : A group of integers DATA_BINARY : Raw binary  A group of selected tags composite an entity that points out a specific time series the data point belongs to. The database engine has capacities to encode and compress values in the same time series. Users should select appropriate tag combinations to optimize the data size. Another role of entity is the sharding key of data points, determining how to fragment data between shards.\nFields are also key-value pairs like tags. But the value of each field is the actual value of a single data point. The database engine would encode and compress the field\u0026rsquo;s values in the same time series. The query operation is forbidden to filter data points based on a field\u0026rsquo;s value. You could apply aggregation functions to them.\nMeasure supports the following fields types:\n STRING : Text INT : 64 bits long integer DATA_BINARY : Raw binary FLOAT : 64 bits double-precision floating-point number  Measure supports the following encoding methods:\n GORILLA : GORILLA encoding is lossless. It is more suitable for a numerical sequence with similar values and is not recommended for sequence data with large fluctuations.  Measure supports the types of the following fields:\n ZSTD : Zstandard is a real-time compression algorithm, that provides high compression ratios. It offers a very wide range of compression/speed trade-offs, while being backed by a very fast decoder. For BanyanDB focus on speed.  Another option named interval plays a critical role in encoding. It indicates the time range between two adjacent data points in a time series and implies that all data points belonging to the same time series are distributed based on a fixed interval. A better practice for the naming measure is to append the interval literal to the tail, for example, service_cpm_minute. It\u0026rsquo;s a parameter of GORILLA encoding method.\nMeasure Registration Operations\nTopNAggregation Find the Top-N entities from a dataset in a time range is a common scenario. We could see the diagrams like \u0026ldquo;Top 10 throughput endpoints\u0026rdquo;, and \u0026ldquo;Most slow 20 endpoints\u0026rdquo;, etc on SkyWalking\u0026rsquo;s UI. Exploring and analyzing the top entities can always reveal some high-value information.\nBanyanDB introduces the TopNAggregation, aiming to pre-calculate the top/bottom entities during the measure writing phase. In the query phase, BanyanDB can quickly retrieve the top/bottom records. The performance would be much better than top() function which is based on the query phase aggregation procedure.\n Caveat: TopNAggregation is an approximate realization, to use it well you need have a good understanding with the algorithm as well as the data distribution.\n ---metadata:name:endpoint_cpm_minute_top_bottomgroup:sw_metricsource_measure:name:endpoint_cpm_minutegroup:sw_metricfield_name:valuefield_value_sort:SORT_UNSPECIFIEDgroup_by_tag_names:- entity_idcounters_number:10000lru_size:10endpoint_cpm_minute_top_bottom is watching the data ingesting of the source measure endpoint_cpm_minute to generate both top 1000 and bottom 1000 entity cardinalities. If only Top 1000 or Bottom 1000 is needed, the field_value_sort could be DESC or ASC respectively.\n SORT_DESC: Top-N. In a series of 1,2,3...1000. Top10\u0026rsquo;s result is 1000,999...991. SORT_ASC: Bottom-N. In a series of 1,2,3...1000. Bottom10\u0026rsquo;s result is 1,2...10.  Tags in group_by_tag_names are used as dimensions. These tags can be searched (only equality is supported) in the query phase. Tags do not exist in group_by_tag_names will be dropped in the pre-calculating phase.\ncounters_number denotes the number of entity cardinality. As the above example shows, calculating the Top 100 among 10 thousands is easier than among 10 millions.\nlru_size is a late data optimizing flag. The higher the number, the more late data, but the more memory space is consumed.\nTopNAggregation Registration Operations\nStreams Stream shares many details with Measure except for abandoning field. Stream focuses on high throughput data collection, for example, tracing and logging. The database engine also supports compressing stream entries based on entity, but no encoding process is involved.\nStream Registration Operations\nProperties Property is a schema-less or schema-free data model. That means you DO NOT have to define a schema before writing a Property\nProperty is a standard key-value store. Users could store their metadata or items on a property and get a sequential consistency guarantee. BanyanDB\u0026rsquo;s motivation for introducing such a particular structure is to support most APM scenarios that need to store critical data, especially for a distributed database cluster.\nWe should create a group before creating a property.\nCreating group.\nmetadata:name:swCreating property.\nmetadata:container:group:swname:temp_dataid:General-Servicetags:- key:namevalue:str:value:\u0026#34;hello\u0026#34;- key:statevalue:str:value:\u0026#34;succeed\u0026#34;Property supports a three-level hierarchy, group/name/id, that is more flexible than schemaful data models.\nThe property supports the TTL mechanism. You could set the ttl field to specify the time to live.\nmetadata:container:group:swname:temp_dataid:General-Servicetags:- key:namevalue:str:value:\u0026#34;hello\u0026#34;- key:statevalue:str:value:\u0026#34;succeed\u0026#34;ttl:\u0026#34;1h\u0026#34;\u0026ldquo;General-Service\u0026rdquo; will be dropped after 1 hour. If you want to extend the TTL, you could use the \u0026ldquo;keepalive\u0026rdquo; operation. The \u0026ldquo;lease_id\u0026rdquo; is returned in the apply response. You can use get operation to get the property with the lease_id as well.\nlease_id:1\u0026ldquo;General-Service\u0026rdquo; lives another 1 hour.\nYou could Create, Read, Update and Drop a property, and update or drop several tags instead of the entire property.\nProperty Operations\nData Models Data models in BanyanDB derive from some classic data models.\nTimeSeries Model A time series is a series of data points indexed in time order. Most commonly, a time series is a sequence taken at successive equally spaced points in time. Thus it is a sequence of discrete-time data.\nYou can store time series data points through Stream or Measure. Examples of Stream are logs, traces and events. Measure could ingest metrics, profiles, etc.\nKey-Value Model The key-value data model is a subset of the Property data model. Every property has a key \u0026lt;group\u0026gt;/\u0026lt;name\u0026gt;/\u0026lt;id\u0026gt; that identifies a property within a collection. This key acts as the primary key to retrieve the data. You can set it when creating a key. It cannot be changed later because the attribute is immutable.\nThere are several Key-Value pairs in a property, named Tags. You could add, update and drop them based on the tag\u0026rsquo;s key.\nData Retrieval Queries and Writes are used to filter schemaful data models, Stream, Measure or TopNAggregation based on certain criteria, as well as to compute or store new data.\n MeasureService provides Write, Query and TopN StreamService provides Write, Query  IndexRule \u0026amp; IndexRuleBinding An IndexRule indicates which tags are indexed. An IndexRuleBinding binds an index rule to the target resources or the subject. There might be several rule bindings to a single resource, but their effective time range could NOT overlap.\nmetadata:name:trace_idgroup:sw_streamtags:- trace_idtype:TYPE_TREElocation:LOCATION_GLOBALIndexRule supports selecting two distinct kinds of index structures. The INVERTED index is the primary option when users set up an index rule. It\u0026rsquo;s suitable for most tag indexing due to a better memory usage ratio and query performance. When there are many unique tag values here, such as the ID tag and numeric duration tag, the TREE index could be better. This index saves much memory space with high-cardinality data sets.\nMost IndexRule\u0026rsquo;s location is LOCAL which places indices with their indexed data together. IndexRule also provides a GLOBAL location to place some indices on a higher layer of hierarchical structure. This option intends to optimize the full-scan operation for some querying cases of no time range specification, such as finding spans from a trace by trace_id.\nmetadata:name:stream_bindinggroup:sw_streamrules:- trace_id- duration- endpoint_id- status_code- http.method- db.instance- db.type- mq.broker- mq.queue- mq.topic- extended_tagssubject:catalog:CATALOG_STREAMname:swbegin_at:\u0026#39;2021-04-15T01:30:15.01Z\u0026#39;expire_at:\u0026#39;2121-04-15T01:30:15.01Z\u0026#39;IndexRuleBinding binds IndexRules to a subject, Stream or Measure. The time range between begin_at and expire_at is the effective time.\nIndexRule Registration Operations\nIndexRuleBinding Registration Operations\nIndex Granularity In BanyanDB, Stream and Measure have different levels of index granularity.\nFor Measure, the indexed target is a data point with specific tag values. The query processor uses the tag values defined in the entity field of the Measure to compose a series ID, which is used to find the several series that match the query criteria. The entity field is a set of tags that defines the unique identity of a time series, and it restricts the tags that can be used as indexed target.\nEach series contains a sequence of data points that share the same tag values. Once the query processor has identified the relevant series, it scans the data points between the desired time range in those series to find the data that matches the query criteria.\nFor example, suppose we have a Measure with the following entity field: {service, operation, instance}. If we get a data point with the following tag values: service=shopping, operation=search, and instance=prod-1, then the query processor would use those tag values to construct a series ID that uniquely identifies the series containing that data point. The query processor would then scan the relevant data points in that series to find the data that matches the query criteria.\nThe side effect of the measure index is that each indexed value has to represent a unique seriesID. This is because the series ID is constructed by concatenating the indexed tag values in the entity field. If two series have the same entity field, they would have the same series ID and would be indistinguishable from one another. This means that if you want to index a tag that is not part of the entity field, you would need to ensure that it is unique across all series. One way to do this would be to include the tag in the entity field, but this may not always be feasible or desirable depending on your use case.\nFor Stream, the indexed target is an element that is a combination of the series ID and timestamp. The Stream query processor uses the time range to find target files. The indexed result points to the target element. The processor doesn\u0026rsquo;t have to scan a series of elements in this time range, which reduces the query time.\nFor example, suppose we have a Stream with the following tags: service, operation, instance, and status_code. If we get a data point with the following tag values: service=shopping, operation=search, instance=prod-1, and status_code=200, and the data point\u0026rsquo;s time is 1:00pm on January 1st, 2022, then the series ID for this data point would be shopping_search_prod-1_200_1641052800, where 1641052800 is the Unix timestamp representing 1:00pm on January 1st, 2022.\nThe indexed target would be the combination of the series ID and timestamp, which in this case would be shopping_search_prod-1_200_1641052800. The Stream query processor would use the time range specified in the query to find target files and then search within those files for the indexed target.\nThe following is a comparison of the indexing granularity, performance, and flexibility of Stream and Measure indices:\n   Indexing Granularity Performance Flexibility     Measure indices are constructed for each series and are based on the entity field of the Measure. Each indexed value has to represent a unique seriesID. Measure index is faster than Stream index. Measure index is less flexible and requires more care when indexing tags that are not part of the entity field.   Stream indices are constructed for each element and are based on the series ID and timestamp. Stream index is slower than Measure index. Stream index is more flexible than Measure index and can index any tag value.    In general, Measure indices are faster and more efficient, but they require more care when indexing tags that are not part of the entity field. Stream indices, on the other hand, are slower and take up more space, but they can index any tag value and do not have the same side effects as Measure indices.\n","title":"Data Model","url":"/docs/skywalking-banyandb/v0.5.0/concept/data-model/"},{"content":"Define Service Hierarchy SkyWalking v10 introduces a new concept Service Hierarchy which defines the relationships of existing logically same services in various layers. The concept and design could be found here.\nService Hierarchy Configuration All the relationships defined in the config/hierarchy-definition.yml file. You can customize it according to your own needs. Here is an example:\nhierarchy:MESH:MESH_DP:nameK8S_SERVICE:short-nameMESH_DP:K8S_SERVICE:short-nameGENERAL:K8S_SERVICE:lower-short-name-remove-nsMYSQL:K8S_SERVICE:short-namePOSTGRESQL:K8S_SERVICE:short-nameSO11Y_OAP:K8S_SERVICE:short-nameVIRTUAL_DATABASE:MYSQL:lower-short-name-with-fqdnPOSTGRESQL:lower-short-name-with-fqdnauto-matching-rules:# the name of the upper service is equal to the name of the lower servicename:\u0026#34;{ (u, l) -\u0026gt; u.name == l.name }\u0026#34;# the short name of the upper service is equal to the short name of the lower serviceshort-name:\u0026#34;{ (u, l) -\u0026gt; u.shortName == l.shortName }\u0026#34;# remove the k8s namespace from the lower service short name# this rule is only works on k8s env.lower-short-name-remove-ns:\u0026#34;{ (u, l) -\u0026gt; { if(l.shortName.lastIndexOf(\u0026#39;.\u0026#39;) \u0026gt; 0) return u.shortName == l.shortName.substring(0, l.shortName.lastIndexOf(\u0026#39;.\u0026#39;)); return false; } }\u0026#34;# the short name of the upper remove port is equal to the short name of the lower service with fqdn suffix# this rule is only works on k8s env.lower-short-name-with-fqdn:\u0026#34;{ (u, l) -\u0026gt; { if(u.shortName.lastIndexOf(\u0026#39;:\u0026#39;) \u0026gt; 0) return u.shortName.substring(0, u.shortName.lastIndexOf(\u0026#39;:\u0026#39;)) == l.shortName.concat(\u0026#39;.svc.cluster.local\u0026#39;); return false; } }\u0026#34;layer-levels:# The hierarchy level of the service layer, the level is used to define the order of the service layer for UI presentation.# The level of the upper service should greater than the level of the lower service in `hierarchy` section.MESH:3GENERAL:3SO11Y_OAP:3VIRTUAL_DATABASE:3MYSQL:2POSTGRESQL:2MESH_DP:1K8S_SERVICE:0Hierarchy  The hierarchy of service layers are defined in the hierarchy section. The layers under the specific layer are related lower of the layer. The relation could have a matching rule for auto matching, which are defined in the auto-matching-rules section. The relation without a matching rule should be built through the internal API. All the layers are defined in the file org.apache.skywalking.oap.server.core.analysis.Layers.java. If the hierarchy is not defined, the service hierarchy relationship will not be built. If you want to add a new relationship, you should certainly know they can be matched automatically by Auto Matching Rules. Notice: some hierarchy relations and auto matching rules are only works on k8s env.  Auto Matching Rules  The auto matching rules are defined in the auto-matching-rules section. Use Groovy script to define the matching rules, the input parameters are the upper service(u) and the lower service(l) and the return value is a boolean, which are used to match the relation between the upper service(u) and the lower service(l) on the different layers. The default matching rules required the service name configured as SkyWalking default and follow the Showcase. If you customized the service name in any layer, you should customize the related matching rules according your service name rules.  Layer Levels  Define the hierarchy level of the service layer in the layer-levels section. The level is used to define the order of the service layer for UI presentation. The level of the upper service should greater than the level of the lower service in hierarchy section.  ","title":"Define Service Hierarchy","url":"/docs/main/next/en/concepts-and-designs/service-hierarchy-configuration/"},{"content":" Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-log4j-1.x\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Print trace ID in your logs  Config a layout  log4j.appender.CONSOLE.layout=org.apache.skywalking.apm.toolkit.log.log4j.v1.x.TraceIdPatternLayout  set %T in layout.ConversionPattern ( In 2.0-2016, you should use %x, Why change? )  log4j.appender.CONSOLE.layout.ConversionPattern=%d [%T] %-5p %c{1}:%L - %m%n  When you use -javaagent to active the SkyWalking tracer, log4j will output traceId, if it existed. If the tracer is inactive, the output will be TID: N/A.  Print SkyWalking context in your logs   Your only need to replace pattern %T with %T{SW_CTX}.\n  When you use -javaagent to active the SkyWalking tracer, log4j will output SW_CTX: [$serviceName,$instanceName,$traceId,$traceSegmentId,$spanId], if it existed. If the tracer is inactive, the output will be SW_CTX: N/A.\n  gRPC reporter The gRPC report could forward the collected logs to SkyWalking OAP server, or SkyWalking Satellite sidecar. Trace id, segment id, and span id will attach to logs automatically. You don\u0026rsquo;t need to change the layout.\n Add GRPCLogClientAppender in log4j.properties  log4j.rootLogger=INFO,CustomAppender log4j.appender.CustomAppender=org.apache.skywalking.apm.toolkit.log.log4j.v1.x.log.GRPCLogClientAppender log4j.appender.CustomAppender.layout=org.apache.log4j.PatternLayout log4j.appender.CustomAppender.layout.ConversionPattern=[%t] %-5p %c %x - %m%n  Add config of the plugin or use default  log.max_message_size=${SW_GRPC_LOG_MAX_MESSAGE_SIZE:10485760} ","title":"Dependency the toolkit, such as using maven or gradle","url":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/application-toolkit-log4j-1.x/"},{"content":" Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-log4j-2.x\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Print trace ID in your logs  Config the [%traceId] pattern in your log4j2.xml  \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout pattern=\u0026#34;%d [%traceId] %-5p %c{1}:%L - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;/Appenders\u0026gt;  Support log4j2 AsyncRoot , No additional configuration is required. Refer to the demo of log4j2.xml below. For details: Log4j2 Async Loggers  \u0026lt;Configuration\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout pattern=\u0026#34;%d [%traceId] %-5p %c{1}:%L - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;AsyncRoot level=\u0026#34;INFO\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Console\u0026#34;/\u0026gt; \u0026lt;/AsyncRoot\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;   Support log4j2 AsyncAppender , No additional configuration is required. Refer to the demo of log4j2.xml below.\nFor details: All Loggers Async\nLog4j-2.9 and higher require disruptor-3.3.4.jar or higher on the classpath. Prior to Log4j-2.9, disruptor-3.0.0.jar or higher was required. This is simplest to configure and gives the best performance. To make all loggers asynchronous, add the disruptor jar to the classpath and set the system property log4j2.contextSelector to org.apache.logging.log4j.core.async.AsyncLoggerContextSelector.\n\u0026lt;Configuration status=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;!-- Async Loggers will auto-flush in batches, so switch off immediateFlush. --\u0026gt; \u0026lt;RandomAccessFile name=\u0026#34;RandomAccessFile\u0026#34; fileName=\u0026#34;async.log\u0026#34; immediateFlush=\u0026#34;false\u0026#34; append=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;PatternLayout\u0026gt; \u0026lt;Pattern\u0026gt;%d %p %c{1.} [%t] [%traceId] %m %ex%n\u0026lt;/Pattern\u0026gt; \u0026lt;/PatternLayout\u0026gt; \u0026lt;/RandomAccessFile\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;Root level=\u0026#34;info\u0026#34; includeLocation=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;RandomAccessFile\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt; For details: Mixed Sync \u0026amp; Async\nLog4j-2.9 and higher require disruptor-3.3.4.jar or higher on the classpath. Prior to Log4j-2.9, disruptor-3.0.0.jar or higher was required. There is no need to set system property Log4jContextSelector to any value.\n\u0026lt;Configuration status=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;!-- Async Loggers will auto-flush in batches, so switch off immediateFlush. --\u0026gt; \u0026lt;RandomAccessFile name=\u0026#34;RandomAccessFile\u0026#34; fileName=\u0026#34;asyncWithLocation.log\u0026#34; immediateFlush=\u0026#34;false\u0026#34; append=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;PatternLayout\u0026gt; \u0026lt;Pattern\u0026gt;%d %p %class{1.} [%t] [%traceId] %location %m %ex%n\u0026lt;/Pattern\u0026gt; \u0026lt;/PatternLayout\u0026gt; \u0026lt;/RandomAccessFile\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;!-- pattern layout actually uses location, so we need to include it --\u0026gt; \u0026lt;AsyncLogger name=\u0026#34;com.foo.Bar\u0026#34; level=\u0026#34;trace\u0026#34; includeLocation=\u0026#34;true\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;RandomAccessFile\u0026#34;/\u0026gt; \u0026lt;/AsyncLogger\u0026gt; \u0026lt;Root level=\u0026#34;info\u0026#34; includeLocation=\u0026#34;true\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;RandomAccessFile\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;   Support log4j2 AsyncAppender, For details: Log4j2 AsyncAppender\n  \u0026lt;Configuration\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout pattern=\u0026#34;%d [%traceId] %-5p %c{1}:%L - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;Async name=\u0026#34;Async\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Console\u0026#34;/\u0026gt; \u0026lt;/Async\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;Root level=\u0026#34;INFO\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Async\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;  When you use -javaagent to active the SkyWalking tracer, log4j2 will output traceId, if it existed. If the tracer is inactive, the output will be TID: N/A.  Print SkyWalking context in your logs   Your only need to replace pattern %traceId with %sw_ctx.\n  When you use -javaagent to active the SkyWalking tracer, log4j2 will output SW_CTX: [$serviceName,$instanceName,$traceId,$traceSegmentId,$spanId], if it existed. If the tracer is inactive, the output will be SW_CTX: N/A.\n  gRPC reporter The gRPC report could forward the collected logs to SkyWalking OAP server, or SkyWalking Satellite sidecar. Trace id, segment id, and span id will attach to logs automatically. You don\u0026rsquo;t need to change the layout.\n Add GRPCLogClientAppender in log4j2.xml  \u0026lt;GRPCLogClientAppender name=\u0026#34;grpc-log\u0026#34;\u0026gt; \u0026lt;PatternLayout pattern=\u0026#34;%d{HH:mm:ss.SSS} [%t] %-5level %logger{36} - %msg%n\u0026#34;/\u0026gt; \u0026lt;/GRPCLogClientAppender\u0026gt;  Add config of the plugin or use default  log.max_message_size=${SW_GRPC_LOG_MAX_MESSAGE_SIZE:10485760}  Support -Dlog4j2.contextSelector=org.apache.logging.log4j.core.async.AsyncLoggerContextSelector in gRPC log report.  Transmitting un-formatted messages The log4j 2.x gRPC reporter supports transmitting logs as formatted or un-formatted. Transmitting formatted data is the default but can be disabled by adding the following to the agent config:\nplugin.toolkit.log.transmit_formatted=false The above will result in the content field being used for the log pattern with additional log tags of argument.0, argument.1, and so on representing each logged argument as well as an additional exception tag which is only present if a throwable is also logged.\nFor example, the following code:\nlog.info(\u0026#34;{} {} {}\u0026#34;, 1, 2, 3); Will result in:\n{ \u0026#34;content\u0026#34;: \u0026#34;{} {} {}\u0026#34;, \u0026#34;tags\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;argument.0\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.1\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.2\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;3\u0026#34; } ] } ","title":"Dependency the toolkit, such as using maven or gradle","url":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/application-toolkit-log4j-2.x/"},{"content":" Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-meter\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; If you\u0026rsquo;re using Spring sleuth, you could use Spring Sleuth Setup at the OAP server.\n Counter API represents a single monotonically increasing counter, automatic collect data and report to backend.  import org.apache.skywalking.apm.toolkit.meter.MeterFactory; Counter counter = MeterFactory.counter(meterName).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).mode(Counter.Mode.INCREMENT).build(); counter.increment(1d);  MeterFactory.counter Create a new counter builder with the meter name. Counter.Builder.tag(String key, String value) Mark a tag key/value pair. Counter.Builder.mode(Counter.Mode mode) Change the counter mode, RATE mode means reporting rate to the backend. Counter.Builder.build() Build a new Counter which is collected and reported to the backend. Counter.increment(double count) Increment count to the Counter, It could be a positive value.   Gauge API represents a single numerical value.  import org.apache.skywalking.apm.toolkit.meter.MeterFactory; ThreadPoolExecutor threadPool = ...; Gauge gauge = MeterFactory.gauge(meterName, () -\u0026gt; threadPool.getActiveCount()).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).build();  MeterFactory.gauge(String name, Supplier\u0026lt;Double\u0026gt; getter) Create a new gauge builder with the meter name and supplier function, this function need to return a double value. Gauge.Builder.tag(String key, String value) Mark a tag key/value pair. Gauge.Builder.build() Build a new Gauge which is collected and reported to the backend.   Histogram API represents a summary sample observations with customize buckets.  import org.apache.skywalking.apm.toolkit.meter.MeterFactory; Histogram histogram = MeterFactory.histogram(\u0026#34;test\u0026#34;).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).steps(Arrays.asList(1, 5, 10)).minValue(0).build(); histogram.addValue(3);  MeterFactory.histogram(String name) Create a new histogram builder with the meter name. Histogram.Builder.tag(String key, String value) Mark a tag key/value pair. Histogram.Builder.steps(List\u0026lt;Double\u0026gt; steps) Set up the max values of every histogram buckets. Histogram.Builder.minValue(double value) Set up the minimal value of this histogram, default is 0. Histogram.Builder.build() Build a new Histogram which is collected and reported to the backend. Histogram.addValue(double value) Add value into the histogram, automatically analyze what bucket count needs to be increment. rule: count into [step1, step2).  ","title":"Dependency the toolkit, such as using maven or gradle","url":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/application-toolkit-meter/"},{"content":" Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-trace\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  Use TraceContext.traceId() API to obtain traceId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;traceId\u0026#34;, TraceContext.traceId());  Use TraceContext.segmentId() API to obtain segmentId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;segmentId\u0026#34;, TraceContext.segmentId());  Use TraceContext.spanId() API to obtain spanId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;spanId\u0026#34;, TraceContext.spanId()); Sample codes only\n  Add @Trace to any method you want to trace. After that, you can see the span in the Stack.\n  Methods annotated with @Tag will try to tag the current active span with the given key (Tag#key()) and (Tag#value()), if there is no active span at all, this annotation takes no effect. @Tag can be repeated, and can be used in companion with @Trace, see examples below. The value of Tag is the same as what are supported in Customize Enhance Trace.\n  Add custom tag in the context of traced method, ActiveSpan.tag(\u0026quot;key\u0026quot;, \u0026quot;val\u0026quot;).\n  ActiveSpan.error() Mark the current span as error status.\n  ActiveSpan.error(String errorMsg) Mark the current span as error status with a message.\n  ActiveSpan.error(Throwable throwable) Mark the current span as error status with a Throwable.\n  ActiveSpan.debug(String debugMsg) Add a debug level log message in the current span.\n  ActiveSpan.info(String infoMsg) Add an info level log message in the current span.\n  ActiveSpan.setOperationName(String operationName) Customize an operation name.\n  ActiveSpan.tag(\u0026#34;my_tag\u0026#34;, \u0026#34;my_value\u0026#34;); ActiveSpan.error(); ActiveSpan.error(\u0026#34;Test-Error-Reason\u0026#34;); ActiveSpan.error(new RuntimeException(\u0026#34;Test-Error-Throwable\u0026#34;)); ActiveSpan.info(\u0026#34;Test-Info-Msg\u0026#34;); ActiveSpan.debug(\u0026#34;Test-debug-Msg\u0026#34;); /** * The codes below will generate a span, * and two types of tags, one type tag: keys are `tag1` and `tag2`, values are the passed-in parameters, respectively, the other type tag: keys are `username` and `age`, values are the return value in User, respectively */ @Trace @Tag(key = \u0026#34;tag1\u0026#34;, value = \u0026#34;arg[0]\u0026#34;) @Tag(key = \u0026#34;tag2\u0026#34;, value = \u0026#34;arg[1]\u0026#34;) @Tag(key = \u0026#34;username\u0026#34;, value = \u0026#34;returnedObj.username\u0026#34;) @Tag(key = \u0026#34;age\u0026#34;, value = \u0026#34;returnedObj.age\u0026#34;) public User methodYouWantToTrace(String param1, String param2) { // ActiveSpan.setOperationName(\u0026#34;Customize your own operation name, if this is an entry span, this would be an endpoint name\u0026#34;);  // ... }  Use TraceContext.putCorrelation() API to put custom data in tracing context.  Optional\u0026lt;String\u0026gt; previous = TraceContext.putCorrelation(\u0026#34;customKey\u0026#34;, \u0026#34;customValue\u0026#34;); CorrelationContext will remove the item when the value is null or empty.\n Use TraceContext.getCorrelation() API to get custom data.  Optional\u0026lt;String\u0026gt; value = TraceContext.getCorrelation(\u0026#34;customKey\u0026#34;); CorrelationContext configuration descriptions could be found in the agent configuration documentation, with correlation. as the prefix.\n","title":"Dependency the toolkit, such as using maven or gradle","url":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/application-toolkit-trace/"},{"content":" Dependency the toolkit, such as using maven or gradle  OpenTracing (Deprecated) OpenTracing is a vendor-neutral standard for distributed tracing. It is a set of APIs that can be used to instrument, generate, collect, and report telemetry data for distributed systems. It is designed to be extensible so that new implementations can be created for new platforms or languages. It had been archived by the CNCF TOC. Learn more.\nSkyWalking community keeps the API compatible with 0.30.0 only. All further development will not be accepted.\n\u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-opentracing\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  Use our OpenTracing tracer implementation  Tracer tracer = new SkywalkingTracer(); Tracer.SpanBuilder spanBuilder = tracer.buildSpan(\u0026#34;/yourApplication/yourService\u0026#34;); ","title":"Dependency the toolkit, such as using maven or gradle","url":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/opentracing/"},{"content":" Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-log4j-1.x\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Print trace ID in your logs  Config a layout  log4j.appender.CONSOLE.layout=org.apache.skywalking.apm.toolkit.log.log4j.v1.x.TraceIdPatternLayout  set %T in layout.ConversionPattern ( In 2.0-2016, you should use %x, Why change? )  log4j.appender.CONSOLE.layout.ConversionPattern=%d [%T] %-5p %c{1}:%L - %m%n  When you use -javaagent to active the SkyWalking tracer, log4j will output traceId, if it existed. If the tracer is inactive, the output will be TID: N/A.  Print SkyWalking context in your logs   Your only need to replace pattern %T with %T{SW_CTX}.\n  When you use -javaagent to active the SkyWalking tracer, log4j will output SW_CTX: [$serviceName,$instanceName,$traceId,$traceSegmentId,$spanId], if it existed. If the tracer is inactive, the output will be SW_CTX: N/A.\n  gRPC reporter The gRPC report could forward the collected logs to SkyWalking OAP server, or SkyWalking Satellite sidecar. Trace id, segment id, and span id will attach to logs automatically. You don\u0026rsquo;t need to change the layout.\n Add GRPCLogClientAppender in log4j.properties  log4j.rootLogger=INFO,CustomAppender log4j.appender.CustomAppender=org.apache.skywalking.apm.toolkit.log.log4j.v1.x.log.GRPCLogClientAppender log4j.appender.CustomAppender.layout=org.apache.log4j.PatternLayout log4j.appender.CustomAppender.layout.ConversionPattern=[%t] %-5p %c %x - %m%n  Add config of the plugin or use default  log.max_message_size=${SW_GRPC_LOG_MAX_MESSAGE_SIZE:10485760} ","title":"Dependency the toolkit, such as using maven or gradle","url":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-log4j-1.x/"},{"content":" Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-log4j-2.x\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Print trace ID in your logs  Config the [%traceId] pattern in your log4j2.xml  \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout pattern=\u0026#34;%d [%traceId] %-5p %c{1}:%L - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;/Appenders\u0026gt;  Support log4j2 AsyncRoot , No additional configuration is required. Refer to the demo of log4j2.xml below. For details: Log4j2 Async Loggers  \u0026lt;Configuration\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout pattern=\u0026#34;%d [%traceId] %-5p %c{1}:%L - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;AsyncRoot level=\u0026#34;INFO\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Console\u0026#34;/\u0026gt; \u0026lt;/AsyncRoot\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;   Support log4j2 AsyncAppender , No additional configuration is required. Refer to the demo of log4j2.xml below.\nFor details: All Loggers Async\nLog4j-2.9 and higher require disruptor-3.3.4.jar or higher on the classpath. Prior to Log4j-2.9, disruptor-3.0.0.jar or higher was required. This is simplest to configure and gives the best performance. To make all loggers asynchronous, add the disruptor jar to the classpath and set the system property log4j2.contextSelector to org.apache.logging.log4j.core.async.AsyncLoggerContextSelector.\n\u0026lt;Configuration status=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;!-- Async Loggers will auto-flush in batches, so switch off immediateFlush. --\u0026gt; \u0026lt;RandomAccessFile name=\u0026#34;RandomAccessFile\u0026#34; fileName=\u0026#34;async.log\u0026#34; immediateFlush=\u0026#34;false\u0026#34; append=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;PatternLayout\u0026gt; \u0026lt;Pattern\u0026gt;%d %p %c{1.} [%t] [%traceId] %m %ex%n\u0026lt;/Pattern\u0026gt; \u0026lt;/PatternLayout\u0026gt; \u0026lt;/RandomAccessFile\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;Root level=\u0026#34;info\u0026#34; includeLocation=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;RandomAccessFile\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt; For details: Mixed Sync \u0026amp; Async\nLog4j-2.9 and higher require disruptor-3.3.4.jar or higher on the classpath. Prior to Log4j-2.9, disruptor-3.0.0.jar or higher was required. There is no need to set system property Log4jContextSelector to any value.\n\u0026lt;Configuration status=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;!-- Async Loggers will auto-flush in batches, so switch off immediateFlush. --\u0026gt; \u0026lt;RandomAccessFile name=\u0026#34;RandomAccessFile\u0026#34; fileName=\u0026#34;asyncWithLocation.log\u0026#34; immediateFlush=\u0026#34;false\u0026#34; append=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;PatternLayout\u0026gt; \u0026lt;Pattern\u0026gt;%d %p %class{1.} [%t] [%traceId] %location %m %ex%n\u0026lt;/Pattern\u0026gt; \u0026lt;/PatternLayout\u0026gt; \u0026lt;/RandomAccessFile\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;!-- pattern layout actually uses location, so we need to include it --\u0026gt; \u0026lt;AsyncLogger name=\u0026#34;com.foo.Bar\u0026#34; level=\u0026#34;trace\u0026#34; includeLocation=\u0026#34;true\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;RandomAccessFile\u0026#34;/\u0026gt; \u0026lt;/AsyncLogger\u0026gt; \u0026lt;Root level=\u0026#34;info\u0026#34; includeLocation=\u0026#34;true\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;RandomAccessFile\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;   Support log4j2 AsyncAppender, For details: Log4j2 AsyncAppender\n  \u0026lt;Configuration\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout pattern=\u0026#34;%d [%traceId] %-5p %c{1}:%L - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;Async name=\u0026#34;Async\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Console\u0026#34;/\u0026gt; \u0026lt;/Async\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;Root level=\u0026#34;INFO\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Async\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;  When you use -javaagent to active the SkyWalking tracer, log4j2 will output traceId, if it existed. If the tracer is inactive, the output will be TID: N/A.  Print SkyWalking context in your logs   Your only need to replace pattern %traceId with %sw_ctx.\n  When you use -javaagent to active the SkyWalking tracer, log4j2 will output SW_CTX: [$serviceName,$instanceName,$traceId,$traceSegmentId,$spanId], if it existed. If the tracer is inactive, the output will be SW_CTX: N/A.\n  gRPC reporter The gRPC report could forward the collected logs to SkyWalking OAP server, or SkyWalking Satellite sidecar. Trace id, segment id, and span id will attach to logs automatically. You don\u0026rsquo;t need to change the layout.\n Add GRPCLogClientAppender in log4j2.xml  \u0026lt;GRPCLogClientAppender name=\u0026#34;grpc-log\u0026#34;\u0026gt; \u0026lt;PatternLayout pattern=\u0026#34;%d{HH:mm:ss.SSS} [%t] %-5level %logger{36} - %msg%n\u0026#34;/\u0026gt; \u0026lt;/GRPCLogClientAppender\u0026gt;  Add config of the plugin or use default  log.max_message_size=${SW_GRPC_LOG_MAX_MESSAGE_SIZE:10485760}  Support -Dlog4j2.contextSelector=org.apache.logging.log4j.core.async.AsyncLoggerContextSelector in gRPC log report.  Transmitting un-formatted messages The log4j 2.x gRPC reporter supports transmitting logs as formatted or un-formatted. Transmitting formatted data is the default but can be disabled by adding the following to the agent config:\nplugin.toolkit.log.transmit_formatted=false The above will result in the content field being used for the log pattern with additional log tags of argument.0, argument.1, and so on representing each logged argument as well as an additional exception tag which is only present if a throwable is also logged.\nFor example, the following code:\nlog.info(\u0026#34;{} {} {}\u0026#34;, 1, 2, 3); Will result in:\n{ \u0026#34;content\u0026#34;: \u0026#34;{} {} {}\u0026#34;, \u0026#34;tags\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;argument.0\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.1\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.2\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;3\u0026#34; } ] } ","title":"Dependency the toolkit, such as using maven or gradle","url":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-log4j-2.x/"},{"content":" Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-meter\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; If you\u0026rsquo;re using Spring sleuth, you could use Spring Sleuth Setup at the OAP server.\n Counter API represents a single monotonically increasing counter, automatic collect data and report to backend.  import org.apache.skywalking.apm.toolkit.meter.MeterFactory; Counter counter = MeterFactory.counter(meterName).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).mode(Counter.Mode.INCREMENT).build(); counter.increment(1d);  MeterFactory.counter Create a new counter builder with the meter name. Counter.Builder.tag(String key, String value) Mark a tag key/value pair. Counter.Builder.mode(Counter.Mode mode) Change the counter mode, RATE mode means reporting rate to the backend. Counter.Builder.build() Build a new Counter which is collected and reported to the backend. Counter.increment(double count) Increment count to the Counter, It could be a positive value.   Gauge API represents a single numerical value.  import org.apache.skywalking.apm.toolkit.meter.MeterFactory; ThreadPoolExecutor threadPool = ...; Gauge gauge = MeterFactory.gauge(meterName, () -\u0026gt; threadPool.getActiveCount()).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).build();  MeterFactory.gauge(String name, Supplier\u0026lt;Double\u0026gt; getter) Create a new gauge builder with the meter name and supplier function, this function need to return a double value. Gauge.Builder.tag(String key, String value) Mark a tag key/value pair. Gauge.Builder.build() Build a new Gauge which is collected and reported to the backend.   Histogram API represents a summary sample observations with customize buckets.  import org.apache.skywalking.apm.toolkit.meter.MeterFactory; Histogram histogram = MeterFactory.histogram(\u0026#34;test\u0026#34;).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).steps(Arrays.asList(1, 5, 10)).minValue(0).build(); histogram.addValue(3);  MeterFactory.histogram(String name) Create a new histogram builder with the meter name. Histogram.Builder.tag(String key, String value) Mark a tag key/value pair. Histogram.Builder.steps(List\u0026lt;Double\u0026gt; steps) Set up the max values of every histogram buckets. Histogram.Builder.minValue(double value) Set up the minimal value of this histogram, default is 0. Histogram.Builder.build() Build a new Histogram which is collected and reported to the backend. Histogram.addValue(double value) Add value into the histogram, automatically analyze what bucket count needs to be increment. rule: count into [step1, step2).  ","title":"Dependency the toolkit, such as using maven or gradle","url":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-meter/"},{"content":" Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-trace\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  Use TraceContext.traceId() API to obtain traceId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;traceId\u0026#34;, TraceContext.traceId());  Use TraceContext.segmentId() API to obtain segmentId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;segmentId\u0026#34;, TraceContext.segmentId());  Use TraceContext.spanId() API to obtain spanId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;spanId\u0026#34;, TraceContext.spanId()); Sample codes only\n  Add @Trace to any method you want to trace. After that, you can see the span in the Stack.\n  Methods annotated with @Tag will try to tag the current active span with the given key (Tag#key()) and (Tag#value()), if there is no active span at all, this annotation takes no effect. @Tag can be repeated, and can be used in companion with @Trace, see examples below. The value of Tag is the same as what are supported in Customize Enhance Trace.\n  Add custom tag in the context of traced method, ActiveSpan.tag(\u0026quot;key\u0026quot;, \u0026quot;val\u0026quot;).\n  ActiveSpan.error() Mark the current span as error status.\n  ActiveSpan.error(String errorMsg) Mark the current span as error status with a message.\n  ActiveSpan.error(Throwable throwable) Mark the current span as error status with a Throwable.\n  ActiveSpan.debug(String debugMsg) Add a debug level log message in the current span.\n  ActiveSpan.info(String infoMsg) Add an info level log message in the current span.\n  ActiveSpan.setOperationName(String operationName) Customize an operation name.\n  ActiveSpan.tag(\u0026#34;my_tag\u0026#34;, \u0026#34;my_value\u0026#34;); ActiveSpan.error(); ActiveSpan.error(\u0026#34;Test-Error-Reason\u0026#34;); ActiveSpan.error(new RuntimeException(\u0026#34;Test-Error-Throwable\u0026#34;)); ActiveSpan.info(\u0026#34;Test-Info-Msg\u0026#34;); ActiveSpan.debug(\u0026#34;Test-debug-Msg\u0026#34;); /** * The codes below will generate a span, * and two types of tags, one type tag: keys are `tag1` and `tag2`, values are the passed-in parameters, respectively, the other type tag: keys are `username` and `age`, values are the return value in User, respectively */ @Trace @Tag(key = \u0026#34;tag1\u0026#34;, value = \u0026#34;arg[0]\u0026#34;) @Tag(key = \u0026#34;tag2\u0026#34;, value = \u0026#34;arg[1]\u0026#34;) @Tag(key = \u0026#34;username\u0026#34;, value = \u0026#34;returnedObj.username\u0026#34;) @Tag(key = \u0026#34;age\u0026#34;, value = \u0026#34;returnedObj.age\u0026#34;) public User methodYouWantToTrace(String param1, String param2) { // ActiveSpan.setOperationName(\u0026#34;Customize your own operation name, if this is an entry span, this would be an endpoint name\u0026#34;);  // ... }  Use TraceContext.putCorrelation() API to put custom data in tracing context.  Optional\u0026lt;String\u0026gt; previous = TraceContext.putCorrelation(\u0026#34;customKey\u0026#34;, \u0026#34;customValue\u0026#34;); CorrelationContext will remove the item when the value is null or empty.\n Use TraceContext.getCorrelation() API to get custom data.  Optional\u0026lt;String\u0026gt; value = TraceContext.getCorrelation(\u0026#34;customKey\u0026#34;); CorrelationContext configuration descriptions could be found in the agent configuration documentation, with correlation. as the prefix.\n","title":"Dependency the toolkit, such as using maven or gradle","url":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-trace/"},{"content":" Dependency the toolkit, such as using maven or gradle  OpenTracing (Deprecated) OpenTracing is a vendor-neutral standard for distributed tracing. It is a set of APIs that can be used to instrument, generate, collect, and report telemetry data for distributed systems. It is designed to be extensible so that new implementations can be created for new platforms or languages. It had been archived by the CNCF TOC. Learn more.\nSkyWalking community keeps the API compatible with 0.30.0 only. All further development will not be accepted.\n\u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-opentracing\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  Use our OpenTracing tracer implementation  Tracer tracer = new SkywalkingTracer(); Tracer.SpanBuilder spanBuilder = tracer.buildSpan(\u0026#34;/yourApplication/yourService\u0026#34;); ","title":"Dependency the toolkit, such as using maven or gradle","url":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/opentracing/"},{"content":" Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-log4j-1.x\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Print trace ID in your logs  Config a layout  log4j.appender.CONSOLE.layout=org.apache.skywalking.apm.toolkit.log.log4j.v1.x.TraceIdPatternLayout  set %T in layout.ConversionPattern ( In 2.0-2016, you should use %x, Why change? )  log4j.appender.CONSOLE.layout.ConversionPattern=%d [%T] %-5p %c{1}:%L - %m%n  When you use -javaagent to active the SkyWalking tracer, log4j will output traceId, if it existed. If the tracer is inactive, the output will be TID: N/A.  Print SkyWalking context in your logs   Your only need to replace pattern %T with %T{SW_CTX}.\n  When you use -javaagent to active the SkyWalking tracer, log4j will output SW_CTX: [$serviceName,$instanceName,$traceId,$traceSegmentId,$spanId], if it existed. If the tracer is inactive, the output will be SW_CTX: N/A.\n  gRPC reporter The gRPC report could forward the collected logs to SkyWalking OAP server, or SkyWalking Satellite sidecar. Trace id, segment id, and span id will attach to logs automatically. You don\u0026rsquo;t need to change the layout.\n Add GRPCLogClientAppender in log4j.properties  log4j.rootLogger=INFO,CustomAppender log4j.appender.CustomAppender=org.apache.skywalking.apm.toolkit.log.log4j.v1.x.log.GRPCLogClientAppender log4j.appender.CustomAppender.layout=org.apache.log4j.PatternLayout log4j.appender.CustomAppender.layout.ConversionPattern=[%t] %-5p %c %x - %m%n  Add config of the plugin or use default  log.max_message_size=${SW_GRPC_LOG_MAX_MESSAGE_SIZE:10485760} ","title":"Dependency the toolkit, such as using maven or gradle","url":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/application-toolkit-log4j-1.x/"},{"content":" Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-log4j-2.x\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Print trace ID in your logs  Config the [%traceId] pattern in your log4j2.xml  \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout pattern=\u0026#34;%d [%traceId] %-5p %c{1}:%L - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;/Appenders\u0026gt;  Support log4j2 AsyncRoot , No additional configuration is required. Refer to the demo of log4j2.xml below. For details: Log4j2 Async Loggers  \u0026lt;Configuration\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout pattern=\u0026#34;%d [%traceId] %-5p %c{1}:%L - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;AsyncRoot level=\u0026#34;INFO\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Console\u0026#34;/\u0026gt; \u0026lt;/AsyncRoot\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;   Support log4j2 AsyncAppender , No additional configuration is required. Refer to the demo of log4j2.xml below.\nFor details: All Loggers Async\nLog4j-2.9 and higher require disruptor-3.3.4.jar or higher on the classpath. Prior to Log4j-2.9, disruptor-3.0.0.jar or higher was required. This is simplest to configure and gives the best performance. To make all loggers asynchronous, add the disruptor jar to the classpath and set the system property log4j2.contextSelector to org.apache.logging.log4j.core.async.AsyncLoggerContextSelector.\n\u0026lt;Configuration status=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;!-- Async Loggers will auto-flush in batches, so switch off immediateFlush. --\u0026gt; \u0026lt;RandomAccessFile name=\u0026#34;RandomAccessFile\u0026#34; fileName=\u0026#34;async.log\u0026#34; immediateFlush=\u0026#34;false\u0026#34; append=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;PatternLayout\u0026gt; \u0026lt;Pattern\u0026gt;%d %p %c{1.} [%t] [%traceId] %m %ex%n\u0026lt;/Pattern\u0026gt; \u0026lt;/PatternLayout\u0026gt; \u0026lt;/RandomAccessFile\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;Root level=\u0026#34;info\u0026#34; includeLocation=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;RandomAccessFile\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt; For details: Mixed Sync \u0026amp; Async\nLog4j-2.9 and higher require disruptor-3.3.4.jar or higher on the classpath. Prior to Log4j-2.9, disruptor-3.0.0.jar or higher was required. There is no need to set system property Log4jContextSelector to any value.\n\u0026lt;Configuration status=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;!-- Async Loggers will auto-flush in batches, so switch off immediateFlush. --\u0026gt; \u0026lt;RandomAccessFile name=\u0026#34;RandomAccessFile\u0026#34; fileName=\u0026#34;asyncWithLocation.log\u0026#34; immediateFlush=\u0026#34;false\u0026#34; append=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;PatternLayout\u0026gt; \u0026lt;Pattern\u0026gt;%d %p %class{1.} [%t] [%traceId] %location %m %ex%n\u0026lt;/Pattern\u0026gt; \u0026lt;/PatternLayout\u0026gt; \u0026lt;/RandomAccessFile\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;!-- pattern layout actually uses location, so we need to include it --\u0026gt; \u0026lt;AsyncLogger name=\u0026#34;com.foo.Bar\u0026#34; level=\u0026#34;trace\u0026#34; includeLocation=\u0026#34;true\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;RandomAccessFile\u0026#34;/\u0026gt; \u0026lt;/AsyncLogger\u0026gt; \u0026lt;Root level=\u0026#34;info\u0026#34; includeLocation=\u0026#34;true\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;RandomAccessFile\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;   Support log4j2 AsyncAppender, For details: Log4j2 AsyncAppender\n  \u0026lt;Configuration\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout pattern=\u0026#34;%d [%traceId] %-5p %c{1}:%L - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;Async name=\u0026#34;Async\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Console\u0026#34;/\u0026gt; \u0026lt;/Async\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;Root level=\u0026#34;INFO\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Async\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;  When you use -javaagent to active the SkyWalking tracer, log4j2 will output traceId, if it existed. If the tracer is inactive, the output will be TID: N/A.  Print SkyWalking context in your logs   Your only need to replace pattern %traceId with %sw_ctx.\n  When you use -javaagent to active the SkyWalking tracer, log4j2 will output SW_CTX: [$serviceName,$instanceName,$traceId,$traceSegmentId,$spanId], if it existed. If the tracer is inactive, the output will be SW_CTX: N/A.\n  gRPC reporter The gRPC report could forward the collected logs to SkyWalking OAP server, or SkyWalking Satellite sidecar. Trace id, segment id, and span id will attach to logs automatically. You don\u0026rsquo;t need to change the layout.\n Add GRPCLogClientAppender in log4j2.xml  \u0026lt;GRPCLogClientAppender name=\u0026#34;grpc-log\u0026#34;\u0026gt; \u0026lt;PatternLayout pattern=\u0026#34;%d{HH:mm:ss.SSS} [%t] %-5level %logger{36} - %msg%n\u0026#34;/\u0026gt; \u0026lt;/GRPCLogClientAppender\u0026gt;  Add config of the plugin or use default  log.max_message_size=${SW_GRPC_LOG_MAX_MESSAGE_SIZE:10485760}  Support -Dlog4j2.contextSelector=org.apache.logging.log4j.core.async.AsyncLoggerContextSelector in gRPC log report.  Transmitting un-formatted messages The log4j 2.x gRPC reporter supports transmitting logs as formatted or un-formatted. Transmitting formatted data is the default but can be disabled by adding the following to the agent config:\nplugin.toolkit.log.transmit_formatted=false The above will result in the content field being used for the log pattern with additional log tags of argument.0, argument.1, and so on representing each logged argument as well as an additional exception tag which is only present if a throwable is also logged.\nFor example, the following code:\nlog.info(\u0026#34;{} {} {}\u0026#34;, 1, 2, 3); Will result in:\n{ \u0026#34;content\u0026#34;: \u0026#34;{} {} {}\u0026#34;, \u0026#34;tags\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;argument.0\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.1\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.2\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;3\u0026#34; } ] } ","title":"Dependency the toolkit, such as using maven or gradle","url":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/application-toolkit-log4j-2.x/"},{"content":" Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-meter\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; If you\u0026rsquo;re using Spring sleuth, you could use Spring Sleuth Setup at the OAP server.\n Counter API represents a single monotonically increasing counter, automatic collect data and report to backend.  import org.apache.skywalking.apm.toolkit.meter.MeterFactory; Counter counter = MeterFactory.counter(meterName).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).mode(Counter.Mode.INCREMENT).build(); counter.increment(1d);  MeterFactory.counter Create a new counter builder with the meter name. Counter.Builder.tag(String key, String value) Mark a tag key/value pair. Counter.Builder.mode(Counter.Mode mode) Change the counter mode, RATE mode means reporting rate to the backend. Counter.Builder.build() Build a new Counter which is collected and reported to the backend. Counter.increment(double count) Increment count to the Counter, It could be a positive value.   Gauge API represents a single numerical value.  import org.apache.skywalking.apm.toolkit.meter.MeterFactory; ThreadPoolExecutor threadPool = ...; Gauge gauge = MeterFactory.gauge(meterName, () -\u0026gt; threadPool.getActiveCount()).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).build();  MeterFactory.gauge(String name, Supplier\u0026lt;Double\u0026gt; getter) Create a new gauge builder with the meter name and supplier function, this function need to return a double value. Gauge.Builder.tag(String key, String value) Mark a tag key/value pair. Gauge.Builder.build() Build a new Gauge which is collected and reported to the backend.   Histogram API represents a summary sample observations with customize buckets.  import org.apache.skywalking.apm.toolkit.meter.MeterFactory; Histogram histogram = MeterFactory.histogram(\u0026#34;test\u0026#34;).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).steps(Arrays.asList(1, 5, 10)).minValue(0).build(); histogram.addValue(3);  MeterFactory.histogram(String name) Create a new histogram builder with the meter name. Histogram.Builder.tag(String key, String value) Mark a tag key/value pair. Histogram.Builder.steps(List\u0026lt;Double\u0026gt; steps) Set up the max values of every histogram buckets. Histogram.Builder.minValue(double value) Set up the minimal value of this histogram, default is 0. Histogram.Builder.build() Build a new Histogram which is collected and reported to the backend. Histogram.addValue(double value) Add value into the histogram, automatically analyze what bucket count needs to be increment. rule: count into [step1, step2).  ","title":"Dependency the toolkit, such as using maven or gradle","url":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/application-toolkit-meter/"},{"content":" Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-trace\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  Use TraceContext.traceId() API to obtain traceId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;traceId\u0026#34;, TraceContext.traceId());  Use TraceContext.segmentId() API to obtain segmentId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;segmentId\u0026#34;, TraceContext.segmentId());  Use TraceContext.spanId() API to obtain spanId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;spanId\u0026#34;, TraceContext.spanId()); Sample codes only\n  Add @Trace to any method you want to trace. After that, you can see the span in the Stack.\n  Methods annotated with @Tag will try to tag the current active span with the given key (Tag#key()) and (Tag#value()), if there is no active span at all, this annotation takes no effect. @Tag can be repeated, and can be used in companion with @Trace, see examples below. The value of Tag is the same as what are supported in Customize Enhance Trace.\n  Add custom tag in the context of traced method, ActiveSpan.tag(\u0026quot;key\u0026quot;, \u0026quot;val\u0026quot;).\n  ActiveSpan.error() Mark the current span as error status.\n  ActiveSpan.error(String errorMsg) Mark the current span as error status with a message.\n  ActiveSpan.error(Throwable throwable) Mark the current span as error status with a Throwable.\n  ActiveSpan.debug(String debugMsg) Add a debug level log message in the current span.\n  ActiveSpan.info(String infoMsg) Add an info level log message in the current span.\n  ActiveSpan.setOperationName(String operationName) Customize an operation name.\n  ActiveSpan.tag(\u0026#34;my_tag\u0026#34;, \u0026#34;my_value\u0026#34;); ActiveSpan.error(); ActiveSpan.error(\u0026#34;Test-Error-Reason\u0026#34;); ActiveSpan.error(new RuntimeException(\u0026#34;Test-Error-Throwable\u0026#34;)); ActiveSpan.info(\u0026#34;Test-Info-Msg\u0026#34;); ActiveSpan.debug(\u0026#34;Test-debug-Msg\u0026#34;); /** * The codes below will generate a span, * and two types of tags, one type tag: keys are `tag1` and `tag2`, values are the passed-in parameters, respectively, the other type tag: keys are `username` and `age`, values are the return value in User, respectively */ @Trace @Tag(key = \u0026#34;tag1\u0026#34;, value = \u0026#34;arg[0]\u0026#34;) @Tag(key = \u0026#34;tag2\u0026#34;, value = \u0026#34;arg[1]\u0026#34;) @Tag(key = \u0026#34;username\u0026#34;, value = \u0026#34;returnedObj.username\u0026#34;) @Tag(key = \u0026#34;age\u0026#34;, value = \u0026#34;returnedObj.age\u0026#34;) public User methodYouWantToTrace(String param1, String param2) { // ActiveSpan.setOperationName(\u0026#34;Customize your own operation name, if this is an entry span, this would be an endpoint name\u0026#34;);  // ... }  Use TraceContext.putCorrelation() API to put custom data in tracing context.  Optional\u0026lt;String\u0026gt; previous = TraceContext.putCorrelation(\u0026#34;customKey\u0026#34;, \u0026#34;customValue\u0026#34;); CorrelationContext will remove the item when the value is null or empty.\n Use TraceContext.getCorrelation() API to get custom data.  Optional\u0026lt;String\u0026gt; value = TraceContext.getCorrelation(\u0026#34;customKey\u0026#34;); CorrelationContext configuration descriptions could be found in the agent configuration documentation, with correlation. as the prefix.\n","title":"Dependency the toolkit, such as using maven or gradle","url":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/application-toolkit-trace/"},{"content":" Dependency the toolkit, such as using maven or gradle  OpenTracing (Deprecated) OpenTracing is a vendor-neutral standard for distributed tracing. It is a set of APIs that can be used to instrument, generate, collect, and report telemetry data for distributed systems. It is designed to be extensible so that new implementations can be created for new platforms or languages. It had been archived by the CNCF TOC. Learn more.\nSkyWalking community keeps the API compatible with 0.30.0 only. All further development will not be accepted.\n\u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-opentracing\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  Use our OpenTracing tracer implementation  Tracer tracer = new SkywalkingTracer(); Tracer.SpanBuilder spanBuilder = tracer.buildSpan(\u0026#34;/yourApplication/yourService\u0026#34;); ","title":"Dependency the toolkit, such as using maven or gradle","url":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/opentracing/"},{"content":" Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-log4j-1.x\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Print trace ID in your logs  Config a layout  log4j.appender.CONSOLE.layout=org.apache.skywalking.apm.toolkit.log.log4j.v1.x.TraceIdPatternLayout  set %T in layout.ConversionPattern ( In 2.0-2016, you should use %x, Why change? )  log4j.appender.CONSOLE.layout.ConversionPattern=%d [%T] %-5p %c{1}:%L - %m%n  When you use -javaagent to active the SkyWalking tracer, log4j will output traceId, if it existed. If the tracer is inactive, the output will be TID: N/A.  Print SkyWalking context in your logs   Your only need to replace pattern %T with %T{SW_CTX}.\n  When you use -javaagent to active the SkyWalking tracer, log4j will output SW_CTX: [$serviceName,$instanceName,$traceId,$traceSegmentId,$spanId], if it existed. If the tracer is inactive, the output will be SW_CTX: N/A.\n  gRPC reporter The gRPC report could forward the collected logs to SkyWalking OAP server, or SkyWalking Satellite sidecar. Trace id, segment id, and span id will attach to logs automatically. You don\u0026rsquo;t need to change the layout.\n Add GRPCLogClientAppender in log4j.properties  log4j.rootLogger=INFO,CustomAppender log4j.appender.CustomAppender=org.apache.skywalking.apm.toolkit.log.log4j.v1.x.log.GRPCLogClientAppender log4j.appender.CustomAppender.layout=org.apache.log4j.PatternLayout log4j.appender.CustomAppender.layout.ConversionPattern=[%t] %-5p %c %x - %m%n  Add config of the plugin or use default  log.max_message_size=${SW_GRPC_LOG_MAX_MESSAGE_SIZE:10485760} ","title":"Dependency the toolkit, such as using maven or gradle","url":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/application-toolkit-log4j-1.x/"},{"content":" Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-log4j-2.x\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Print trace ID in your logs  Config the [%traceId] pattern in your log4j2.xml  \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout pattern=\u0026#34;%d [%traceId] %-5p %c{1}:%L - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;/Appenders\u0026gt;  Support log4j2 AsyncRoot , No additional configuration is required. Refer to the demo of log4j2.xml below. For details: Log4j2 Async Loggers  \u0026lt;Configuration\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout pattern=\u0026#34;%d [%traceId] %-5p %c{1}:%L - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;AsyncRoot level=\u0026#34;INFO\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Console\u0026#34;/\u0026gt; \u0026lt;/AsyncRoot\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;   Support log4j2 AsyncAppender , No additional configuration is required. Refer to the demo of log4j2.xml below.\nFor details: All Loggers Async\nLog4j-2.9 and higher require disruptor-3.3.4.jar or higher on the classpath. Prior to Log4j-2.9, disruptor-3.0.0.jar or higher was required. This is simplest to configure and gives the best performance. To make all loggers asynchronous, add the disruptor jar to the classpath and set the system property log4j2.contextSelector to org.apache.logging.log4j.core.async.AsyncLoggerContextSelector.\n\u0026lt;Configuration status=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;!-- Async Loggers will auto-flush in batches, so switch off immediateFlush. --\u0026gt; \u0026lt;RandomAccessFile name=\u0026#34;RandomAccessFile\u0026#34; fileName=\u0026#34;async.log\u0026#34; immediateFlush=\u0026#34;false\u0026#34; append=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;PatternLayout\u0026gt; \u0026lt;Pattern\u0026gt;%d %p %c{1.} [%t] [%traceId] %m %ex%n\u0026lt;/Pattern\u0026gt; \u0026lt;/PatternLayout\u0026gt; \u0026lt;/RandomAccessFile\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;Root level=\u0026#34;info\u0026#34; includeLocation=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;RandomAccessFile\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt; For details: Mixed Sync \u0026amp; Async\nLog4j-2.9 and higher require disruptor-3.3.4.jar or higher on the classpath. Prior to Log4j-2.9, disruptor-3.0.0.jar or higher was required. There is no need to set system property Log4jContextSelector to any value.\n\u0026lt;Configuration status=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;!-- Async Loggers will auto-flush in batches, so switch off immediateFlush. --\u0026gt; \u0026lt;RandomAccessFile name=\u0026#34;RandomAccessFile\u0026#34; fileName=\u0026#34;asyncWithLocation.log\u0026#34; immediateFlush=\u0026#34;false\u0026#34; append=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;PatternLayout\u0026gt; \u0026lt;Pattern\u0026gt;%d %p %class{1.} [%t] [%traceId] %location %m %ex%n\u0026lt;/Pattern\u0026gt; \u0026lt;/PatternLayout\u0026gt; \u0026lt;/RandomAccessFile\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;!-- pattern layout actually uses location, so we need to include it --\u0026gt; \u0026lt;AsyncLogger name=\u0026#34;com.foo.Bar\u0026#34; level=\u0026#34;trace\u0026#34; includeLocation=\u0026#34;true\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;RandomAccessFile\u0026#34;/\u0026gt; \u0026lt;/AsyncLogger\u0026gt; \u0026lt;Root level=\u0026#34;info\u0026#34; includeLocation=\u0026#34;true\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;RandomAccessFile\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;   Support log4j2 AsyncAppender, For details: Log4j2 AsyncAppender\n  \u0026lt;Configuration\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout pattern=\u0026#34;%d [%traceId] %-5p %c{1}:%L - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;Async name=\u0026#34;Async\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Console\u0026#34;/\u0026gt; \u0026lt;/Async\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;Root level=\u0026#34;INFO\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Async\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;  When you use -javaagent to active the SkyWalking tracer, log4j2 will output traceId, if it existed. If the tracer is inactive, the output will be TID: N/A.  Print SkyWalking context in your logs   Your only need to replace pattern %traceId with %sw_ctx.\n  When you use -javaagent to active the SkyWalking tracer, log4j2 will output SW_CTX: [$serviceName,$instanceName,$traceId,$traceSegmentId,$spanId], if it existed. If the tracer is inactive, the output will be SW_CTX: N/A.\n  gRPC reporter The gRPC report could forward the collected logs to SkyWalking OAP server, or SkyWalking Satellite sidecar. Trace id, segment id, and span id will attach to logs automatically. You don\u0026rsquo;t need to change the layout.\n Add GRPCLogClientAppender in log4j2.xml  \u0026lt;GRPCLogClientAppender name=\u0026#34;grpc-log\u0026#34;\u0026gt; \u0026lt;PatternLayout pattern=\u0026#34;%d{HH:mm:ss.SSS} [%t] %-5level %logger{36} - %msg%n\u0026#34;/\u0026gt; \u0026lt;/GRPCLogClientAppender\u0026gt;  Add config of the plugin or use default  log.max_message_size=${SW_GRPC_LOG_MAX_MESSAGE_SIZE:10485760}  Support -Dlog4j2.contextSelector=org.apache.logging.log4j.core.async.AsyncLoggerContextSelector in gRPC log report.  Transmitting un-formatted messages The log4j 2.x gRPC reporter supports transmitting logs as formatted or un-formatted. Transmitting formatted data is the default but can be disabled by adding the following to the agent config:\nplugin.toolkit.log.transmit_formatted=false The above will result in the content field being used for the log pattern with additional log tags of argument.0, argument.1, and so on representing each logged argument as well as an additional exception tag which is only present if a throwable is also logged.\nFor example, the following code:\nlog.info(\u0026#34;{} {} {}\u0026#34;, 1, 2, 3); Will result in:\n{ \u0026#34;content\u0026#34;: \u0026#34;{} {} {}\u0026#34;, \u0026#34;tags\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;argument.0\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.1\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.2\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;3\u0026#34; } ] } ","title":"Dependency the toolkit, such as using maven or gradle","url":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/application-toolkit-log4j-2.x/"},{"content":" Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-meter\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; If you\u0026rsquo;re using Spring sleuth, you could use Spring Sleuth Setup at the OAP server.\n Counter API represents a single monotonically increasing counter, automatic collect data and report to backend.  import org.apache.skywalking.apm.toolkit.meter.MeterFactory; Counter counter = MeterFactory.counter(meterName).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).mode(Counter.Mode.INCREMENT).build(); counter.increment(1d);  MeterFactory.counter Create a new counter builder with the meter name. Counter.Builder.tag(String key, String value) Mark a tag key/value pair. Counter.Builder.mode(Counter.Mode mode) Change the counter mode, RATE mode means reporting rate to the backend. Counter.Builder.build() Build a new Counter which is collected and reported to the backend. Counter.increment(double count) Increment count to the Counter, It could be a positive value.   Gauge API represents a single numerical value.  import org.apache.skywalking.apm.toolkit.meter.MeterFactory; ThreadPoolExecutor threadPool = ...; Gauge gauge = MeterFactory.gauge(meterName, () -\u0026gt; threadPool.getActiveCount()).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).build();  MeterFactory.gauge(String name, Supplier\u0026lt;Double\u0026gt; getter) Create a new gauge builder with the meter name and supplier function, this function need to return a double value. Gauge.Builder.tag(String key, String value) Mark a tag key/value pair. Gauge.Builder.build() Build a new Gauge which is collected and reported to the backend.   Histogram API represents a summary sample observations with customize buckets.  import org.apache.skywalking.apm.toolkit.meter.MeterFactory; Histogram histogram = MeterFactory.histogram(\u0026#34;test\u0026#34;).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).steps(Arrays.asList(1, 5, 10)).minValue(0).build(); histogram.addValue(3);  MeterFactory.histogram(String name) Create a new histogram builder with the meter name. Histogram.Builder.tag(String key, String value) Mark a tag key/value pair. Histogram.Builder.steps(List\u0026lt;Double\u0026gt; steps) Set up the max values of every histogram buckets. Histogram.Builder.minValue(double value) Set up the minimal value of this histogram, default is 0. Histogram.Builder.build() Build a new Histogram which is collected and reported to the backend. Histogram.addValue(double value) Add value into the histogram, automatically analyze what bucket count needs to be increment. rule: count into [step1, step2).  ","title":"Dependency the toolkit, such as using maven or gradle","url":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/application-toolkit-meter/"},{"content":" Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-trace\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  Use TraceContext.traceId() API to obtain traceId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;traceId\u0026#34;, TraceContext.traceId());  Use TraceContext.segmentId() API to obtain segmentId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;segmentId\u0026#34;, TraceContext.segmentId());  Use TraceContext.spanId() API to obtain spanId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;spanId\u0026#34;, TraceContext.spanId()); Sample codes only\n  Add @Trace to any method you want to trace. After that, you can see the span in the Stack.\n  Methods annotated with @Tag will try to tag the current active span with the given key (Tag#key()) and (Tag#value()), if there is no active span at all, this annotation takes no effect. @Tag can be repeated, and can be used in companion with @Trace, see examples below. The value of Tag is the same as what are supported in Customize Enhance Trace.\n  Add custom tag in the context of traced method, ActiveSpan.tag(\u0026quot;key\u0026quot;, \u0026quot;val\u0026quot;).\n  ActiveSpan.error() Mark the current span as error status.\n  ActiveSpan.error(String errorMsg) Mark the current span as error status with a message.\n  ActiveSpan.error(Throwable throwable) Mark the current span as error status with a Throwable.\n  ActiveSpan.debug(String debugMsg) Add a debug level log message in the current span.\n  ActiveSpan.info(String infoMsg) Add an info level log message in the current span.\n  ActiveSpan.setOperationName(String operationName) Customize an operation name.\n  ActiveSpan.tag(\u0026#34;my_tag\u0026#34;, \u0026#34;my_value\u0026#34;); ActiveSpan.error(); ActiveSpan.error(\u0026#34;Test-Error-Reason\u0026#34;); ActiveSpan.error(new RuntimeException(\u0026#34;Test-Error-Throwable\u0026#34;)); ActiveSpan.info(\u0026#34;Test-Info-Msg\u0026#34;); ActiveSpan.debug(\u0026#34;Test-debug-Msg\u0026#34;); /** * The codes below will generate a span, * and two types of tags, one type tag: keys are `tag1` and `tag2`, values are the passed-in parameters, respectively, the other type tag: keys are `username` and `age`, values are the return value in User, respectively */ @Trace @Tag(key = \u0026#34;tag1\u0026#34;, value = \u0026#34;arg[0]\u0026#34;) @Tag(key = \u0026#34;tag2\u0026#34;, value = \u0026#34;arg[1]\u0026#34;) @Tag(key = \u0026#34;username\u0026#34;, value = \u0026#34;returnedObj.username\u0026#34;) @Tag(key = \u0026#34;age\u0026#34;, value = \u0026#34;returnedObj.age\u0026#34;) public User methodYouWantToTrace(String param1, String param2) { // ActiveSpan.setOperationName(\u0026#34;Customize your own operation name, if this is an entry span, this would be an endpoint name\u0026#34;);  // ... }  Use TraceContext.putCorrelation() API to put custom data in tracing context.  Optional\u0026lt;String\u0026gt; previous = TraceContext.putCorrelation(\u0026#34;customKey\u0026#34;, \u0026#34;customValue\u0026#34;); CorrelationContext will remove the item when the value is null or empty.\n Use TraceContext.getCorrelation() API to get custom data.  Optional\u0026lt;String\u0026gt; value = TraceContext.getCorrelation(\u0026#34;customKey\u0026#34;); CorrelationContext configuration descriptions could be found in the agent configuration documentation, with correlation. as the prefix.\n","title":"Dependency the toolkit, such as using maven or gradle","url":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/application-toolkit-trace/"},{"content":" Dependency the toolkit, such as using maven or gradle  OpenTracing (Deprecated) OpenTracing is a vendor-neutral standard for distributed tracing. It is a set of APIs that can be used to instrument, generate, collect, and report telemetry data for distributed systems. It is designed to be extensible so that new implementations can be created for new platforms or languages. It had been archived by the CNCF TOC. Learn more.\nSkyWalking community keeps the API compatible with 0.30.0 only. All further development will not be accepted.\n\u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-opentracing\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  Use our OpenTracing tracer implementation  Tracer tracer = new SkywalkingTracer(); Tracer.SpanBuilder spanBuilder = tracer.buildSpan(\u0026#34;/yourApplication/yourService\u0026#34;); ","title":"Dependency the toolkit, such as using maven or gradle","url":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/opentracing/"},{"content":" Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-log4j-1.x\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Print trace ID in your logs  Config a layout  log4j.appender.CONSOLE.layout=org.apache.skywalking.apm.toolkit.log.log4j.v1.x.TraceIdPatternLayout  set %T in layout.ConversionPattern ( In 2.0-2016, you should use %x, Why change? )  log4j.appender.CONSOLE.layout.ConversionPattern=%d [%T] %-5p %c{1}:%L - %m%n  When you use -javaagent to active the SkyWalking tracer, log4j will output traceId, if it existed. If the tracer is inactive, the output will be TID: N/A.  Print SkyWalking context in your logs   Your only need to replace pattern %T with %T{SW_CTX}.\n  When you use -javaagent to active the SkyWalking tracer, log4j will output SW_CTX: [$serviceName,$instanceName,$traceId,$traceSegmentId,$spanId], if it existed. If the tracer is inactive, the output will be SW_CTX: N/A.\n  gRPC reporter The gRPC report could forward the collected logs to SkyWalking OAP server, or SkyWalking Satellite sidecar. Trace id, segment id, and span id will attach to logs automatically. You don\u0026rsquo;t need to change the layout.\n Add GRPCLogClientAppender in log4j.properties  log4j.rootLogger=INFO,CustomAppender log4j.appender.CustomAppender=org.apache.skywalking.apm.toolkit.log.log4j.v1.x.log.GRPCLogClientAppender log4j.appender.CustomAppender.layout=org.apache.log4j.PatternLayout log4j.appender.CustomAppender.layout.ConversionPattern=[%t] %-5p %c %x - %m%n  Add config of the plugin or use default  log.max_message_size=${SW_GRPC_LOG_MAX_MESSAGE_SIZE:10485760} ","title":"Dependency the toolkit, such as using maven or gradle","url":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/application-toolkit-log4j-1.x/"},{"content":" Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-log4j-2.x\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Print trace ID in your logs  Config the [%traceId] pattern in your log4j2.xml  \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout pattern=\u0026#34;%d [%traceId] %-5p %c{1}:%L - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;/Appenders\u0026gt;  Support log4j2 AsyncRoot , No additional configuration is required. Refer to the demo of log4j2.xml below. For details: Log4j2 Async Loggers  \u0026lt;Configuration\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout pattern=\u0026#34;%d [%traceId] %-5p %c{1}:%L - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;AsyncRoot level=\u0026#34;INFO\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Console\u0026#34;/\u0026gt; \u0026lt;/AsyncRoot\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;   Support log4j2 AsyncAppender , No additional configuration is required. Refer to the demo of log4j2.xml below.\nFor details: All Loggers Async\nLog4j-2.9 and higher require disruptor-3.3.4.jar or higher on the classpath. Prior to Log4j-2.9, disruptor-3.0.0.jar or higher was required. This is simplest to configure and gives the best performance. To make all loggers asynchronous, add the disruptor jar to the classpath and set the system property log4j2.contextSelector to org.apache.logging.log4j.core.async.AsyncLoggerContextSelector.\n\u0026lt;Configuration status=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;!-- Async Loggers will auto-flush in batches, so switch off immediateFlush. --\u0026gt; \u0026lt;RandomAccessFile name=\u0026#34;RandomAccessFile\u0026#34; fileName=\u0026#34;async.log\u0026#34; immediateFlush=\u0026#34;false\u0026#34; append=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;PatternLayout\u0026gt; \u0026lt;Pattern\u0026gt;%d %p %c{1.} [%t] [%traceId] %m %ex%n\u0026lt;/Pattern\u0026gt; \u0026lt;/PatternLayout\u0026gt; \u0026lt;/RandomAccessFile\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;Root level=\u0026#34;info\u0026#34; includeLocation=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;RandomAccessFile\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt; For details: Mixed Sync \u0026amp; Async\nLog4j-2.9 and higher require disruptor-3.3.4.jar or higher on the classpath. Prior to Log4j-2.9, disruptor-3.0.0.jar or higher was required. There is no need to set system property Log4jContextSelector to any value.\n\u0026lt;Configuration status=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;!-- Async Loggers will auto-flush in batches, so switch off immediateFlush. --\u0026gt; \u0026lt;RandomAccessFile name=\u0026#34;RandomAccessFile\u0026#34; fileName=\u0026#34;asyncWithLocation.log\u0026#34; immediateFlush=\u0026#34;false\u0026#34; append=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;PatternLayout\u0026gt; \u0026lt;Pattern\u0026gt;%d %p %class{1.} [%t] [%traceId] %location %m %ex%n\u0026lt;/Pattern\u0026gt; \u0026lt;/PatternLayout\u0026gt; \u0026lt;/RandomAccessFile\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;!-- pattern layout actually uses location, so we need to include it --\u0026gt; \u0026lt;AsyncLogger name=\u0026#34;com.foo.Bar\u0026#34; level=\u0026#34;trace\u0026#34; includeLocation=\u0026#34;true\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;RandomAccessFile\u0026#34;/\u0026gt; \u0026lt;/AsyncLogger\u0026gt; \u0026lt;Root level=\u0026#34;info\u0026#34; includeLocation=\u0026#34;true\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;RandomAccessFile\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;   Support log4j2 AsyncAppender, For details: Log4j2 AsyncAppender\n  \u0026lt;Configuration\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout pattern=\u0026#34;%d [%traceId] %-5p %c{1}:%L - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;Async name=\u0026#34;Async\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Console\u0026#34;/\u0026gt; \u0026lt;/Async\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;Root level=\u0026#34;INFO\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Async\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;  When you use -javaagent to active the SkyWalking tracer, log4j2 will output traceId, if it existed. If the tracer is inactive, the output will be TID: N/A.  Print SkyWalking context in your logs   Your only need to replace pattern %traceId with %sw_ctx.\n  When you use -javaagent to active the SkyWalking tracer, log4j2 will output SW_CTX: [$serviceName,$instanceName,$traceId,$traceSegmentId,$spanId], if it existed. If the tracer is inactive, the output will be SW_CTX: N/A.\n  gRPC reporter The gRPC report could forward the collected logs to SkyWalking OAP server, or SkyWalking Satellite sidecar. Trace id, segment id, and span id will attach to logs automatically. You don\u0026rsquo;t need to change the layout.\n Add GRPCLogClientAppender in log4j2.xml  \u0026lt;GRPCLogClientAppender name=\u0026#34;grpc-log\u0026#34;\u0026gt; \u0026lt;PatternLayout pattern=\u0026#34;%d{HH:mm:ss.SSS} [%t] %-5level %logger{36} - %msg%n\u0026#34;/\u0026gt; \u0026lt;/GRPCLogClientAppender\u0026gt;  Add config of the plugin or use default  log.max_message_size=${SW_GRPC_LOG_MAX_MESSAGE_SIZE:10485760}  Support -Dlog4j2.contextSelector=org.apache.logging.log4j.core.async.AsyncLoggerContextSelector in gRPC log report.  Transmitting un-formatted messages The log4j 2.x gRPC reporter supports transmitting logs as formatted or un-formatted. Transmitting formatted data is the default but can be disabled by adding the following to the agent config:\nplugin.toolkit.log.transmit_formatted=false The above will result in the content field being used for the log pattern with additional log tags of argument.0, argument.1, and so on representing each logged argument as well as an additional exception tag which is only present if a throwable is also logged.\nFor example, the following code:\nlog.info(\u0026#34;{} {} {}\u0026#34;, 1, 2, 3); Will result in:\n{ \u0026#34;content\u0026#34;: \u0026#34;{} {} {}\u0026#34;, \u0026#34;tags\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;argument.0\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.1\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.2\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;3\u0026#34; } ] } ","title":"Dependency the toolkit, such as using maven or gradle","url":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/application-toolkit-log4j-2.x/"},{"content":" Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-meter\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; If you\u0026rsquo;re using Spring sleuth, you could use Spring Sleuth Setup at the OAP server.\n Counter API represents a single monotonically increasing counter, automatic collect data and report to backend.  import org.apache.skywalking.apm.toolkit.meter.MeterFactory; Counter counter = MeterFactory.counter(meterName).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).mode(Counter.Mode.INCREMENT).build(); counter.increment(1d);  MeterFactory.counter Create a new counter builder with the meter name. Counter.Builder.tag(String key, String value) Mark a tag key/value pair. Counter.Builder.mode(Counter.Mode mode) Change the counter mode, RATE mode means reporting rate to the backend. Counter.Builder.build() Build a new Counter which is collected and reported to the backend. Counter.increment(double count) Increment count to the Counter, It could be a positive value.   Gauge API represents a single numerical value.  import org.apache.skywalking.apm.toolkit.meter.MeterFactory; ThreadPoolExecutor threadPool = ...; Gauge gauge = MeterFactory.gauge(meterName, () -\u0026gt; threadPool.getActiveCount()).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).build();  MeterFactory.gauge(String name, Supplier\u0026lt;Double\u0026gt; getter) Create a new gauge builder with the meter name and supplier function, this function need to return a double value. Gauge.Builder.tag(String key, String value) Mark a tag key/value pair. Gauge.Builder.build() Build a new Gauge which is collected and reported to the backend.   Histogram API represents a summary sample observations with customize buckets.  import org.apache.skywalking.apm.toolkit.meter.MeterFactory; Histogram histogram = MeterFactory.histogram(\u0026#34;test\u0026#34;).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).steps(Arrays.asList(1, 5, 10)).minValue(0).build(); histogram.addValue(3);  MeterFactory.histogram(String name) Create a new histogram builder with the meter name. Histogram.Builder.tag(String key, String value) Mark a tag key/value pair. Histogram.Builder.steps(List\u0026lt;Double\u0026gt; steps) Set up the max values of every histogram buckets. Histogram.Builder.minValue(double value) Set up the minimal value of this histogram, default is 0. Histogram.Builder.build() Build a new Histogram which is collected and reported to the backend. Histogram.addValue(double value) Add value into the histogram, automatically analyze what bucket count needs to be increment. rule: count into [step1, step2).  ","title":"Dependency the toolkit, such as using maven or gradle","url":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/application-toolkit-meter/"},{"content":" Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-trace\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  Use TraceContext.traceId() API to obtain traceId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;traceId\u0026#34;, TraceContext.traceId());  Use TraceContext.segmentId() API to obtain segmentId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;segmentId\u0026#34;, TraceContext.segmentId());  Use TraceContext.spanId() API to obtain spanId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;spanId\u0026#34;, TraceContext.spanId()); Sample codes only\n  Add @Trace to any method you want to trace. After that, you can see the span in the Stack.\n  Methods annotated with @Tag will try to tag the current active span with the given key (Tag#key()) and (Tag#value()), if there is no active span at all, this annotation takes no effect. @Tag can be repeated, and can be used in companion with @Trace, see examples below. The value of Tag is the same as what are supported in Customize Enhance Trace.\n  Add custom tag in the context of traced method, ActiveSpan.tag(\u0026quot;key\u0026quot;, \u0026quot;val\u0026quot;).\n  ActiveSpan.error() Mark the current span as error status.\n  ActiveSpan.error(String errorMsg) Mark the current span as error status with a message.\n  ActiveSpan.error(Throwable throwable) Mark the current span as error status with a Throwable.\n  ActiveSpan.debug(String debugMsg) Add a debug level log message in the current span.\n  ActiveSpan.info(String infoMsg) Add an info level log message in the current span.\n  ActiveSpan.setOperationName(String operationName) Customize an operation name.\n  ActiveSpan.tag(\u0026#34;my_tag\u0026#34;, \u0026#34;my_value\u0026#34;); ActiveSpan.error(); ActiveSpan.error(\u0026#34;Test-Error-Reason\u0026#34;); ActiveSpan.error(new RuntimeException(\u0026#34;Test-Error-Throwable\u0026#34;)); ActiveSpan.info(\u0026#34;Test-Info-Msg\u0026#34;); ActiveSpan.debug(\u0026#34;Test-debug-Msg\u0026#34;); /** * The codes below will generate a span, * and two types of tags, one type tag: keys are `tag1` and `tag2`, values are the passed-in parameters, respectively, the other type tag: keys are `username` and `age`, values are the return value in User, respectively */ @Trace @Tag(key = \u0026#34;tag1\u0026#34;, value = \u0026#34;arg[0]\u0026#34;) @Tag(key = \u0026#34;tag2\u0026#34;, value = \u0026#34;arg[1]\u0026#34;) @Tag(key = \u0026#34;username\u0026#34;, value = \u0026#34;returnedObj.username\u0026#34;) @Tag(key = \u0026#34;age\u0026#34;, value = \u0026#34;returnedObj.age\u0026#34;) public User methodYouWantToTrace(String param1, String param2) { // ActiveSpan.setOperationName(\u0026#34;Customize your own operation name, if this is an entry span, this would be an endpoint name\u0026#34;);  // ... }  Use TraceContext.putCorrelation() API to put custom data in tracing context.  Optional\u0026lt;String\u0026gt; previous = TraceContext.putCorrelation(\u0026#34;customKey\u0026#34;, \u0026#34;customValue\u0026#34;); CorrelationContext will remove the item when the value is null or empty.\n Use TraceContext.getCorrelation() API to get custom data.  Optional\u0026lt;String\u0026gt; value = TraceContext.getCorrelation(\u0026#34;customKey\u0026#34;); CorrelationContext configuration descriptions could be found in the agent configuration documentation, with correlation. as the prefix.\n","title":"Dependency the toolkit, such as using maven or gradle","url":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/application-toolkit-trace/"},{"content":" Dependency the toolkit, such as using maven or gradle  OpenTracing (Deprecated) OpenTracing is a vendor-neutral standard for distributed tracing. It is a set of APIs that can be used to instrument, generate, collect, and report telemetry data for distributed systems. It is designed to be extensible so that new implementations can be created for new platforms or languages. It had been archived by the CNCF TOC. Learn more.\nSkyWalking community keeps the API compatible with 0.30.0 only. All further development will not be accepted.\n\u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-opentracing\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  Use our OpenTracing tracer implementation  Tracer tracer = new SkywalkingTracer(); Tracer.SpanBuilder spanBuilder = tracer.buildSpan(\u0026#34;/yourApplication/yourService\u0026#34;); ","title":"Dependency the toolkit, such as using maven or gradle","url":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/opentracing/"},{"content":"Deploy OAP server and UI with default settings In this example, we will deploy an OAP server and UI to Kubernetes cluster with default settings specified by their Custom Resource Defines(CRD).\nInstall Operator Follow Operator installation instrument to install the operator.\nDeploy OAP server and UI with default setting Clone this repo, then change current directory to samples.\nIssue the below command to deploy an OAP server and UI.\nkubectl apply -f default.yaml Get created custom resources as below:\n$ kubectl get oapserver,ui NAME INSTANCES RUNNING ADDRESS oapserver.operator.skywalking.apache.org/default 1 1 default-oap.skywalking-swck-system NAME INSTANCES RUNNING INTERNALADDRESS EXTERNALIPS PORTS ui.operator.skywalking.apache.org/default 1 1 default-ui.skywalking-swck-system [80] View the UI In order to view the UI from your browser, you should get the external address from the ingress generated by the UI custom resource firstly.\n$ kubectl get ingresses NAME HOSTS ADDRESS PORTS AGE default-ui demo.ui.skywalking \u0026lt;External_IP\u0026gt; 80 33h Edit your local /etc/hosts to append the following host-ip mapping.\ndemo.ui.skywalking \u0026lt;External_IP\u0026gt; Finally, navigate your browser to demo.ui.skywalking to access UI service.\nNotice, please install an ingress controller to your Kubernetes environment.\n","title":"Deploy OAP server and UI with default settings","url":"/docs/skywalking-swck/latest/examples/default-backend/"},{"content":"Deploy OAP server and UI with default settings In this example, we will deploy an OAP server and UI to Kubernetes cluster with default settings specified by their Custom Resource Defines(CRD).\nInstall Operator Follow Operator installation instrument to install the operator.\nDeploy OAP server and UI with default setting Clone this repo, then change current directory to samples.\nIssue the below command to deploy an OAP server and UI.\nkubectl apply -f default.yaml Get created custom resources as below:\n$ kubectl get oapserver,ui NAME INSTANCES RUNNING ADDRESS oapserver.operator.skywalking.apache.org/default 1 1 default-oap.skywalking-swck-system NAME INSTANCES RUNNING INTERNALADDRESS EXTERNALIPS PORTS ui.operator.skywalking.apache.org/default 1 1 default-ui.skywalking-swck-system [80] View the UI In order to view the UI from your browser, you should get the external address from the ingress generated by the UI custom resource firstly.\n$ kubectl get ingresses NAME HOSTS ADDRESS PORTS AGE default-ui demo.ui.skywalking \u0026lt;External_IP\u0026gt; 80 33h Edit your local /etc/hosts to append the following host-ip mapping.\ndemo.ui.skywalking \u0026lt;External_IP\u0026gt; Finally, navigate your browser to demo.ui.skywalking to access UI service.\nNotice, please install an ingress controller to your Kubernetes environment.\n","title":"Deploy OAP server and UI with default settings","url":"/docs/skywalking-swck/next/examples/default-backend/"},{"content":"Deploy OAP server and UI with default settings In this example, we will deploy an OAP server and UI to Kubernetes cluster with default settings specified by their Custom Resource Defines(CRD).\nInstall Operator Follow Operator installation instrument to install the operator.\nDeploy OAP server and UI with default setting Clone this repo, then change current directory to samples.\nIssue the below command to deploy an OAP server and UI.\nkubectl apply -f default.yaml Get created custom resources as below:\n$ kubectl get oapserver,ui NAME INSTANCES RUNNING ADDRESS oapserver.operator.skywalking.apache.org/default 1 1 default-oap.skywalking-swck-system NAME INSTANCES RUNNING INTERNALADDRESS EXTERNALIPS PORTS ui.operator.skywalking.apache.org/default 1 1 default-ui.skywalking-swck-system [80] View the UI In order to view the UI from your browser, you should get the external address from the ingress generated by the UI custom resource firstly.\n$ kubectl get ingresses NAME HOSTS ADDRESS PORTS AGE default-ui demo.ui.skywalking \u0026lt;External_IP\u0026gt; 80 33h Edit your local /etc/hosts to append the following host-ip mapping.\ndemo.ui.skywalking \u0026lt;External_IP\u0026gt; Finally, navigate your browser to demo.ui.skywalking to access UI service.\nNotice, please install an ingress controller to your Kubernetes environment.\n","title":"Deploy OAP server and UI with default settings","url":"/docs/skywalking-swck/v0.9.0/examples/default-backend/"},{"content":"Deploy on Kubernetes This documentation helps you to set up the rover in the Kubernetes environment.\nStartup Kubernetes Make sure that you already have a Kubernetes cluster.\nIf you don\u0026rsquo;t have a running cluster, you can also leverage KinD (Kubernetes in Docker) or minikube to create a cluster.\nDeploy Rover Please follow the rover-daemonset.yml to deploy the rover in your Kubernetes cluster. Update the comment in the file, which includes two configs:\n Rover docker image: You could use make docker to build an image and upload it to your private registry, or update from the public image. OAP address: Update the OAP address.  Then, you could use kuberctl apply -f rover-daemonset.yml to deploy the skywalking-rover into your cluster. It would deploy in each node as a DaemonSet.\n","title":"Deploy on Kubernetes","url":"/docs/skywalking-rover/latest/en/setup/deployment/kubernetes/readme/"},{"content":"Deploy on Kubernetes This documentation helps you to set up the rover in the Kubernetes environment.\nStartup Kubernetes Make sure that you already have a Kubernetes cluster.\nIf you don\u0026rsquo;t have a running cluster, you can also leverage KinD (Kubernetes in Docker) or minikube to create a cluster.\nDeploy Rover Please follow the rover-daemonset.yml to deploy the rover in your Kubernetes cluster. Update the comment in the file, which includes two configs:\n Rover docker image: You could use make docker to build an image and upload it to your private registry, or update from the public image. OAP address: Update the OAP address.  Then, you could use kubectl apply -f rover-daemonset.yml to deploy the skywalking-rover into your cluster. It would deploy in each node as a DaemonSet.\n","title":"Deploy on Kubernetes","url":"/docs/skywalking-rover/next/en/setup/deployment/kubernetes/readme/"},{"content":"Deploy on Kubernetes This documentation helps you to set up the rover in the Kubernetes environment.\nStartup Kubernetes Make sure that you already have a Kubernetes cluster.\nIf you don\u0026rsquo;t have a running cluster, you can also leverage KinD (Kubernetes in Docker) or minikube to create a cluster.\nDeploy Rover Please follow the rover-daemonset.yml to deploy the rover in your Kubernetes cluster. Update the comment in the file, which includes two configs:\n Rover docker image: You could use make docker to build an image and upload it to your private registry, or update from the public image. OAP address: Update the OAP address.  Then, you could use kuberctl apply -f rover-daemonset.yml to deploy the skywalking-rover into your cluster. It would deploy in each node as a DaemonSet.\n","title":"Deploy on Kubernetes","url":"/docs/skywalking-rover/v0.6.0/en/setup/deployment/kubernetes/readme/"},{"content":"Deploy on Kubernetes It could help you run the Satellite as a gateway in Kubernetes environment.\nInstall We recommend install the Satellite by helm, follow command below, it could start the latest release version of SkyWalking Backend, UI and Satellite.\nexport SKYWALKING_RELEASE_NAME=skywalking # change the release name according to your scenario export SKYWALKING_RELEASE_NAMESPACE=default # change the namespace to where you want to install SkyWalking export REPO=skywalking helm repo add ${REPO} https://apache.jfrog.io/artifactory/skywalking-helm helm install \u0026#34;${SKYWALKING_RELEASE_NAME}\u0026#34; ${REPO}/skywalking -n \u0026#34;${SKYWALKING_RELEASE_NAMESPACE}\u0026#34; \\  --set oap.image.tag=8.8.1 \\  --set oap.storageType=elasticsearch \\  --set ui.image.tag=8.8.1 \\  --set elasticsearch.imageTag=6.8.6 \\  --set satellite.enabled=true \\  --set satellite.image.tag=v0.4.0 Change Address After the Satellite and Backend started, need to change the address from agent/node. Then the satellite could load balance the request from agent/node to OAP backend.\nSuch as in Java Agent, you should change the property value in collector.backend_service forward to this: skywalking-satellite.${SKYWALKING_RELEASE_NAMESPACE}:11800.\n","title":"Deploy on Kubernetes","url":"/docs/skywalking-satellite/latest/en/setup/examples/deploy/kubernetes/readme/"},{"content":"Deploy on Kubernetes It could help you run the Satellite as a gateway in Kubernetes environment.\nInstall We recommend install the Satellite by helm, follow command below, it could start the latest release version of SkyWalking Backend, UI and Satellite.\nexport SKYWALKING_RELEASE_NAME=skywalking # change the release name according to your scenario export SKYWALKING_RELEASE_NAMESPACE=default # change the namespace to where you want to install SkyWalking export REPO=skywalking helm repo add ${REPO} https://apache.jfrog.io/artifactory/skywalking-helm helm install \u0026#34;${SKYWALKING_RELEASE_NAME}\u0026#34; ${REPO}/skywalking -n \u0026#34;${SKYWALKING_RELEASE_NAMESPACE}\u0026#34; \\  --set oap.image.tag=8.8.1 \\  --set oap.storageType=elasticsearch \\  --set ui.image.tag=8.8.1 \\  --set elasticsearch.imageTag=6.8.6 \\  --set satellite.enabled=true \\  --set satellite.image.tag=v0.4.0 Change Address After the Satellite and Backend started, need to change the address from agent/node. Then the satellite could load balance the request from agent/node to OAP backend.\nSuch as in Java Agent, you should change the property value in collector.backend_service forward to this: skywalking-satellite.${SKYWALKING_RELEASE_NAMESPACE}:11800.\n","title":"Deploy on Kubernetes","url":"/docs/skywalking-satellite/next/en/setup/examples/deploy/kubernetes/readme/"},{"content":"Deploy on Kubernetes It could help you run the Satellite as a gateway in Kubernetes environment.\nInstall We recommend install the Satellite by helm, follow command below, it could start the latest release version of SkyWalking Backend, UI and Satellite.\nexport SKYWALKING_RELEASE_NAME=skywalking # change the release name according to your scenario export SKYWALKING_RELEASE_NAMESPACE=default # change the namespace to where you want to install SkyWalking export REPO=skywalking helm repo add ${REPO} https://apache.jfrog.io/artifactory/skywalking-helm helm install \u0026#34;${SKYWALKING_RELEASE_NAME}\u0026#34; ${REPO}/skywalking -n \u0026#34;${SKYWALKING_RELEASE_NAMESPACE}\u0026#34; \\  --set oap.image.tag=8.8.1 \\  --set oap.storageType=elasticsearch \\  --set ui.image.tag=8.8.1 \\  --set elasticsearch.imageTag=6.8.6 \\  --set satellite.enabled=true \\  --set satellite.image.tag=v0.4.0 Change Address After the Satellite and Backend started, need to change the address from agent/node. Then the satellite could load balance the request from agent/node to OAP backend.\nSuch as in Java Agent, you should change the property value in collector.backend_service forward to this: skywalking-satellite.${SKYWALKING_RELEASE_NAMESPACE}:11800.\n","title":"Deploy on Kubernetes","url":"/docs/skywalking-satellite/v1.2.0/en/setup/examples/deploy/kubernetes/readme/"},{"content":"Deploy on Linux and Windows It could help you run the Satellite as a gateway in Linux or Windows instance.\nInstall Download Download the latest release version from SkyWalking Release Page.\nChange OAP Server addresses Update the OAP Server address in the config file, then satellite could connect to them and use round-robin policy for load-balance server before send each request.\nSupport two ways to locate the server list, using finder_type to change the type to find:\n static: Define the server address list. kubernetes: Define kubernetes pod/service/endpoint, it could be found addresses and dynamic update automatically.  Static server list You could see there define two server address and split by \u0026ldquo;,\u0026rdquo;.\nsharing:clients:- plugin_name:\u0026#34;grpc-client\u0026#34;# The gRPC server address finder typefinder_type:${SATELLITE_GRPC_CLIENT_FINDER:static}# The gRPC server address (default localhost:11800).server_addr:${SATELLITE_GRPC_CLIENT:127.0.0.1:11800,127.0.0.2:11800}# The TLS switchenable_TLS:${SATELLITE_GRPC_ENABLE_TLS:false}# The file path of client.pem. The config only works when opening the TLS switch.client_pem_path:${SATELLITE_GRPC_CLIENT_PEM_PATH:\u0026#34;client.pem\u0026#34;}# The file path of client.key. The config only works when opening the TLS switch.client_key_path:${SATELLITE_GRPC_CLIENT_KEY_PATH:\u0026#34;client.key\u0026#34;}# InsecureSkipVerify controls whether a client verifies the server\u0026#39;s certificate chain and host name.insecure_skip_verify:${SATELLITE_GRPC_INSECURE_SKIP_VERIFY:false}# The file path oca.pem. The config only works when opening the TLS switch.ca_pem_path:${SATELLITE_grpc_CA_PEM_PATH:\u0026#34;ca.pem\u0026#34;}# How frequently to check the connection(second)check_period:${SATELLITE_GRPC_CHECK_PERIOD:5}# The auth value when send requestauthentication:${SATELLITE_GRPC_AUTHENTICATION:\u0026#34;\u0026#34;}Kubernetes selector Using kubernetes_config to define the address\u0026rsquo;s finder.\nsharing:clients:- plugin_name:\u0026#34;grpc-client\u0026#34;# The gRPC server address finder typefinder_type:${SATELLITE_GRPC_CLIENT_FINDER:kubernetes}# The kubernetes config to lookup addresseskubernetes_config:# The kubernetes API server address, If not define means using in kubernetes mode to connectapi_server:http://localhost:8001/# The kind of apikind:endpoints# Support to lookup namespacesnamespaces:- default# The kind selectorselector:label:app=productpage# How to get the address exported portextra_port:port:9080# The TLS switchenable_TLS:${SATELLITE_GRPC_ENABLE_TLS:false}# The file path of client.pem. The config only works when opening the TLS switch.client_pem_path:${SATELLITE_GRPC_CLIENT_PEM_PATH:\u0026#34;client.pem\u0026#34;}# The file path of client.key. The config only works when opening the TLS switch.client_key_path:${SATELLITE_GRPC_CLIENT_KEY_PATH:\u0026#34;client.key\u0026#34;}# InsecureSkipVerify controls whether a client verifies the server\u0026#39;s certificate chain and host name.insecure_skip_verify:${SATELLITE_GRPC_INSECURE_SKIP_VERIFY:false}# The file path oca.pem. The config only works when opening the TLS switch.ca_pem_path:${SATELLITE_grpc_CA_PEM_PATH:\u0026#34;ca.pem\u0026#34;}# How frequently to check the connection(second)check_period:${SATELLITE_GRPC_CHECK_PERIOD:5}# The auth value when send requestauthentication:${SATELLITE_GRPC_AUTHENTICATION:\u0026#34;\u0026#34;}Start Satellite Execute the script bin/startup.sh(linux) or bin/startup.cmd(windows) to start. Then It could start these port:\n gRPC port(11800): listen the gRPC request, It could handle request from SkyWalking Agent protocol and Envoy ALS/Metrics protocol. Prometheus(1234): listen the HTTP request, It could get all SO11Y metrics from /metrics endpoint using Prometheus format.  Change Address After the satellite start, need to change the address from agent/node. Then the satellite could load balance the request from agent/node to OAP backend.\nSuch as in Java Agent, you should change the property value in collector.backend_service forward to the satellite gRPC port.\n","title":"Deploy on Linux and Windows","url":"/docs/skywalking-satellite/latest/en/setup/examples/deploy/linux-windows/readme/"},{"content":"Deploy on Linux and Windows It could help you run the Satellite as a gateway in Linux or Windows instance.\nInstall Download Download the latest release version from SkyWalking Release Page.\nChange OAP Server addresses Update the OAP Server address in the config file, then satellite could connect to them and use round-robin policy for load-balance server before send each request.\nSupport two ways to locate the server list, using finder_type to change the type to find:\n static: Define the server address list. kubernetes: Define kubernetes pod/service/endpoint, it could be found addresses and dynamic update automatically.  Static server list You could see there define two server address and split by \u0026ldquo;,\u0026rdquo;.\nsharing:clients:- plugin_name:\u0026#34;grpc-client\u0026#34;# The gRPC server address finder typefinder_type:${SATELLITE_GRPC_CLIENT_FINDER:static}# The gRPC server address (default localhost:11800).server_addr:${SATELLITE_GRPC_CLIENT:127.0.0.1:11800,127.0.0.2:11800}# The TLS switchenable_TLS:${SATELLITE_GRPC_ENABLE_TLS:false}# The file path of client.pem. The config only works when opening the TLS switch.client_pem_path:${SATELLITE_GRPC_CLIENT_PEM_PATH:\u0026#34;client.pem\u0026#34;}# The file path of client.key. The config only works when opening the TLS switch.client_key_path:${SATELLITE_GRPC_CLIENT_KEY_PATH:\u0026#34;client.key\u0026#34;}# InsecureSkipVerify controls whether a client verifies the server\u0026#39;s certificate chain and host name.insecure_skip_verify:${SATELLITE_GRPC_INSECURE_SKIP_VERIFY:false}# The file path oca.pem. The config only works when opening the TLS switch.ca_pem_path:${SATELLITE_grpc_CA_PEM_PATH:\u0026#34;ca.pem\u0026#34;}# How frequently to check the connection(second)check_period:${SATELLITE_GRPC_CHECK_PERIOD:5}# The auth value when send requestauthentication:${SATELLITE_GRPC_AUTHENTICATION:\u0026#34;\u0026#34;}Kubernetes selector Using kubernetes_config to define the address\u0026rsquo;s finder.\nsharing:clients:- plugin_name:\u0026#34;grpc-client\u0026#34;# The gRPC server address finder typefinder_type:${SATELLITE_GRPC_CLIENT_FINDER:kubernetes}# The kubernetes config to lookup addresseskubernetes_config:# The kubernetes API server address, If not define means using in kubernetes mode to connectapi_server:http://localhost:8001/# The kind of apikind:endpoints# Support to lookup namespacesnamespaces:- default# The kind selectorselector:label:app=productpage# How to get the address exported portextra_port:port:9080# The TLS switchenable_TLS:${SATELLITE_GRPC_ENABLE_TLS:false}# The file path of client.pem. The config only works when opening the TLS switch.client_pem_path:${SATELLITE_GRPC_CLIENT_PEM_PATH:\u0026#34;client.pem\u0026#34;}# The file path of client.key. The config only works when opening the TLS switch.client_key_path:${SATELLITE_GRPC_CLIENT_KEY_PATH:\u0026#34;client.key\u0026#34;}# InsecureSkipVerify controls whether a client verifies the server\u0026#39;s certificate chain and host name.insecure_skip_verify:${SATELLITE_GRPC_INSECURE_SKIP_VERIFY:false}# The file path oca.pem. The config only works when opening the TLS switch.ca_pem_path:${SATELLITE_grpc_CA_PEM_PATH:\u0026#34;ca.pem\u0026#34;}# How frequently to check the connection(second)check_period:${SATELLITE_GRPC_CHECK_PERIOD:5}# The auth value when send requestauthentication:${SATELLITE_GRPC_AUTHENTICATION:\u0026#34;\u0026#34;}Start Satellite Execute the script bin/startup.sh(linux) or bin/startup.cmd(windows) to start. Then It could start these port:\n gRPC port(11800): listen the gRPC request, It could handle request from SkyWalking Agent protocol and Envoy ALS/Metrics protocol. Prometheus(1234): listen the HTTP request, It could get all SO11Y metrics from /metrics endpoint using Prometheus format.  Change Address After the satellite start, need to change the address from agent/node. Then the satellite could load balance the request from agent/node to OAP backend.\nSuch as in Java Agent, you should change the property value in collector.backend_service forward to the satellite gRPC port.\n","title":"Deploy on Linux and Windows","url":"/docs/skywalking-satellite/next/en/setup/examples/deploy/linux-windows/readme/"},{"content":"Deploy on Linux and Windows It could help you run the Satellite as a gateway in Linux or Windows instance.\nInstall Download Download the latest release version from SkyWalking Release Page.\nChange OAP Server addresses Update the OAP Server address in the config file, then satellite could connect to them and use round-robin policy for load-balance server before send each request.\nSupport two ways to locate the server list, using finder_type to change the type to find:\n static: Define the server address list. kubernetes: Define kubernetes pod/service/endpoint, it could be found addresses and dynamic update automatically.  Static server list You could see there define two server address and split by \u0026ldquo;,\u0026rdquo;.\nsharing:clients:- plugin_name:\u0026#34;grpc-client\u0026#34;# The gRPC server address finder typefinder_type:${SATELLITE_GRPC_CLIENT_FINDER:static}# The gRPC server address (default localhost:11800).server_addr:${SATELLITE_GRPC_CLIENT:127.0.0.1:11800,127.0.0.2:11800}# The TLS switchenable_TLS:${SATELLITE_GRPC_ENABLE_TLS:false}# The file path of client.pem. The config only works when opening the TLS switch.client_pem_path:${SATELLITE_GRPC_CLIENT_PEM_PATH:\u0026#34;client.pem\u0026#34;}# The file path of client.key. The config only works when opening the TLS switch.client_key_path:${SATELLITE_GRPC_CLIENT_KEY_PATH:\u0026#34;client.key\u0026#34;}# InsecureSkipVerify controls whether a client verifies the server\u0026#39;s certificate chain and host name.insecure_skip_verify:${SATELLITE_GRPC_INSECURE_SKIP_VERIFY:false}# The file path oca.pem. The config only works when opening the TLS switch.ca_pem_path:${SATELLITE_grpc_CA_PEM_PATH:\u0026#34;ca.pem\u0026#34;}# How frequently to check the connection(second)check_period:${SATELLITE_GRPC_CHECK_PERIOD:5}# The auth value when send requestauthentication:${SATELLITE_GRPC_AUTHENTICATION:\u0026#34;\u0026#34;}Kubernetes selector Using kubernetes_config to define the address\u0026rsquo;s finder.\nsharing:clients:- plugin_name:\u0026#34;grpc-client\u0026#34;# The gRPC server address finder typefinder_type:${SATELLITE_GRPC_CLIENT_FINDER:kubernetes}# The kubernetes config to lookup addresseskubernetes_config:# The kubernetes API server address, If not define means using in kubernetes mode to connectapi_server:http://localhost:8001/# The kind of apikind:endpoints# Support to lookup namespacesnamespaces:- default# The kind selectorselector:label:app=productpage# How to get the address exported portextra_port:port:9080# The TLS switchenable_TLS:${SATELLITE_GRPC_ENABLE_TLS:false}# The file path of client.pem. The config only works when opening the TLS switch.client_pem_path:${SATELLITE_GRPC_CLIENT_PEM_PATH:\u0026#34;client.pem\u0026#34;}# The file path of client.key. The config only works when opening the TLS switch.client_key_path:${SATELLITE_GRPC_CLIENT_KEY_PATH:\u0026#34;client.key\u0026#34;}# InsecureSkipVerify controls whether a client verifies the server\u0026#39;s certificate chain and host name.insecure_skip_verify:${SATELLITE_GRPC_INSECURE_SKIP_VERIFY:false}# The file path oca.pem. The config only works when opening the TLS switch.ca_pem_path:${SATELLITE_grpc_CA_PEM_PATH:\u0026#34;ca.pem\u0026#34;}# How frequently to check the connection(second)check_period:${SATELLITE_GRPC_CHECK_PERIOD:5}# The auth value when send requestauthentication:${SATELLITE_GRPC_AUTHENTICATION:\u0026#34;\u0026#34;}Start Satellite Execute the script bin/startup.sh(linux) or bin/startup.cmd(windows) to start. Then It could start these port:\n gRPC port(11800): listen the gRPC request, It could handle request from SkyWalking Agent protocol and Envoy ALS/Metrics protocol. Prometheus(1234): listen the HTTP request, It could get all SO11Y metrics from /metrics endpoint using Prometheus format.  Change Address After the satellite start, need to change the address from agent/node. Then the satellite could load balance the request from agent/node to OAP backend.\nSuch as in Java Agent, you should change the property value in collector.backend_service forward to the satellite gRPC port.\n","title":"Deploy on Linux and Windows","url":"/docs/skywalking-satellite/v1.2.0/en/setup/examples/deploy/linux-windows/readme/"},{"content":"Deploy SkyWalking backend and UI in Kubernetes Before you read Kubernetes deployment guidance, please make sure you have read Quick Start and Advanced Setup documents. Most SkyWalking OAP settings are controlled through System environment variables when applying helm deployment.\nFollow instructions in the deploying SkyWalking backend to Kubernetes cluster to deploy OAP and UI to a Kubernetes cluster.\nPlease refer to the Readme file.\n","title":"Deploy SkyWalking backend and UI in Kubernetes","url":"/docs/main/latest/en/setup/backend/backend-k8s/"},{"content":"Deploy SkyWalking backend and UI in Kubernetes Before you read Kubernetes deployment guidance, please make sure you have read Quick Start and Advanced Setup documents. Most SkyWalking OAP settings are controlled through System environment variables when applying helm deployment.\nFollow instructions in the deploying SkyWalking backend to Kubernetes cluster to deploy OAP and UI to a Kubernetes cluster.\nPlease refer to the Readme file.\n","title":"Deploy SkyWalking backend and UI in Kubernetes","url":"/docs/main/next/en/setup/backend/backend-k8s/"},{"content":"Deploy SkyWalking backend and UI in Kubernetes Before you read Kubernetes deployment guidance, please make sure you have read Quick Start and Advanced Setup documents. Most SkyWalking OAP settings are controlled through System environment variables when applying helm deployment.\nFollow instructions in the deploying SkyWalking backend to Kubernetes cluster to deploy OAP and UI to a Kubernetes cluster.\nPlease refer to the Readme file.\n","title":"Deploy SkyWalking backend and UI in Kubernetes","url":"/docs/main/v9.1.0/en/setup/backend/backend-k8s/"},{"content":"Deploy SkyWalking backend and UI in Kubernetes Before you read Kubernetes deployment guidance, please make sure you have read Quick Start and Advanced Setup documents. Most SkyWalking OAP settings are controlled through System environment variables when applying helm deployment.\nFollow instructions in the deploying SkyWalking backend to Kubernetes cluster to deploy OAP and UI to a Kubernetes cluster.\nPlease refer to the Readme file.\n","title":"Deploy SkyWalking backend and UI in Kubernetes","url":"/docs/main/v9.2.0/en/setup/backend/backend-k8s/"},{"content":"Deploy SkyWalking backend and UI in Kubernetes Before you read Kubernetes deployment guidance, please make sure you have read Quick Start and Advanced Setup documents. Most SkyWalking OAP settings are controlled through System environment variables when applying helm deployment.\nFollow instructions in the deploying SkyWalking backend to Kubernetes cluster to deploy OAP and UI to a Kubernetes cluster.\nPlease refer to the Readme file.\n","title":"Deploy SkyWalking backend and UI in Kubernetes","url":"/docs/main/v9.3.0/en/setup/backend/backend-k8s/"},{"content":"Deploy SkyWalking backend and UI in Kubernetes Before you read Kubernetes deployment guidance, please make sure you have read Quick Start and Advanced Setup documents. Most SkyWalking OAP settings are controlled through System environment variables when applying helm deployment.\nFollow instructions in the deploying SkyWalking backend to Kubernetes cluster to deploy OAP and UI to a Kubernetes cluster.\nPlease refer to the Readme file.\n","title":"Deploy SkyWalking backend and UI in Kubernetes","url":"/docs/main/v9.4.0/en/setup/backend/backend-k8s/"},{"content":"Deploy SkyWalking backend and UI in Kubernetes Before you read Kubernetes deployment guidance, please make sure you have read Quick Start and Advanced Setup documents. Most SkyWalking OAP settings are controlled through System environment variables when applying helm deployment.\nFollow instructions in the deploying SkyWalking backend to Kubernetes cluster to deploy OAP and UI to a Kubernetes cluster.\nPlease refer to the Readme file.\n","title":"Deploy SkyWalking backend and UI in Kubernetes","url":"/docs/main/v9.5.0/en/setup/backend/backend-k8s/"},{"content":"Deploy SkyWalking backend and UI in Kubernetes Before you read Kubernetes deployment guidance, please make sure you have read Quick Start and Advanced Setup documents. Most SkyWalking OAP settings are controlled through System environment variables when applying helm deployment.\nFollow instructions in the deploying SkyWalking backend to Kubernetes cluster to deploy OAP and UI to a Kubernetes cluster.\nPlease refer to the Readme file.\n","title":"Deploy SkyWalking backend and UI in Kubernetes","url":"/docs/main/v9.6.0/en/setup/backend/backend-k8s/"},{"content":"Deploy SkyWalking backend and UI in Kubernetes Before you read Kubernetes deployment guidance, please make sure you have read Quick Start and Advanced Setup documents. Most SkyWalking OAP settings are controlled through System environment variables when applying helm deployment.\nFollow instructions in the deploying SkyWalking backend to Kubernetes cluster to deploy OAP and UI to a Kubernetes cluster.\nPlease refer to the Readme file.\n","title":"Deploy SkyWalking backend and UI in Kubernetes","url":"/docs/main/v9.7.0/en/setup/backend/backend-k8s/"},{"content":"Deploy SkyWalking backend and UI in kubernetes Before you read Kubernetes deployment guidance, please make sure you have read Quick Start and Advanced Setup documents. Most SkyWalking OAP settings are controlled through System environment variables when apply helm deployment.\nFollow instructions in the deploying SkyWalking backend to Kubernetes cluster to deploy oap and ui to a kubernetes cluster.\nPlease read the Readme file.\n","title":"Deploy SkyWalking backend and UI in kubernetes","url":"/docs/main/v9.0.0/en/setup/backend/backend-k8s/"},{"content":"Deprecated Query Protocol The following query services are deprecated since 9.5.0. All these queries are still available for the short term to keep compatibility.\nQuery protocol official repository, https://github.com/apache/skywalking-query-protocol.\nMetrics Metrics query targets all objects defined in OAL script and MAL. You may obtain the metrics data in linear or thermodynamic matrix formats based on the aggregation functions in script.\nV2 APIs Provide Metrics V2 query APIs since 8.0.0, including metadata, single/multiple values, heatmap, and sampled records metrics.\nextendtypeQuery{# Read metrics single value in the duration of required metricsreadMetricsValue(condition:MetricsCondition!,duration:Duration!):Long!# Read metrics single value in the duration of required metrics# NullableValue#isEmptyValue == true indicates no telemetry data rather than aggregated value is actually zero.readNullableMetricsValue(condition:MetricsCondition!,duration:Duration!):NullableValue!# Read time-series values in the duration of required metricsreadMetricsValues(condition:MetricsCondition!,duration:Duration!):MetricsValues!# Read entity list of required metrics and parent entity type.sortMetrics(condition:TopNCondition!,duration:Duration!):[SelectedRecord!]!# Read value in the given time duration, usually as a linear.# labels: the labels you need to query.readLabeledMetricsValues(condition:MetricsCondition!,labels:[String!]!,duration:Duration!):[MetricsValues!]!# Heatmap is bucket based value statistic result.readHeatMap(condition:MetricsCondition!,duration:Duration!):HeatMap# Deprecated since 9.3.0, replaced by readRecords defined in record.graphqls# Read the sampled records# TopNCondition#scope is not required.readSampledRecords(condition:TopNCondition!,duration:Duration!):[SelectedRecord!]!}V1 APIs 3 types of metrics can be queried. V1 APIs were introduced since 6.x. Now they are a shell to V2 APIs.\n Single value. Most default metrics are in single value. getValues and getLinearIntValues are suitable for this purpose. Multiple value. A metric defined in OAL includes multiple value calculations. Use getMultipleLinearIntValues to obtain all values. percentile is a typical multiple value function in OAL. Heatmap value. Read Heatmap in WIKI for details. thermodynamic is the only OAL function. Use getThermodynamic to get the values.  extendtypeQuery{getValues(metric:BatchMetricConditions!,duration:Duration!):IntValuesgetLinearIntValues(metric:MetricCondition!,duration:Duration!):IntValues# Query the type of metrics including multiple values, and format them as multiple lines.# The seq of these multiple lines base on the calculation func in OAL# Such as, should us this to query the result of func percentile(50,75,90,95,99) in OAL,# then five lines will be responded, p50 is the first element of return value.getMultipleLinearIntValues(metric:MetricCondition!,numOfLinear:Int!,duration:Duration!):[IntValues!]!getThermodynamic(metric:MetricCondition!,duration:Duration!):Thermodynamic}Aggregation Aggregation query means that the metrics data need a secondary aggregation at query stage, which causes the query interfaces to have some different arguments. A typical example of aggregation query is the TopN list of services. Metrics stream aggregation simply calculates the metrics values of each service, but the expected list requires ordering metrics data by their values.\nAggregation query is for single value metrics only.\n# The aggregation query is different with the metric query.# All aggregation queries require backend or/and storage do aggregation in query time.extendtypeQuery{# TopN is an aggregation query.getServiceTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getAllServiceInstanceTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getServiceInstanceTopN(serviceId:ID!,name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getAllEndpointTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getEndpointTopN(serviceId:ID!,name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!}Record Record is a general and abstract type for collected raw data. In the observability, traces and logs have specific and well-defined meanings, meanwhile, the general records represent other collected records. Such as sampled slow SQL statement, HTTP request raw data(request/response header/body)\nextendtypeQuery{# Query collected records with given metric name and parent entity conditions, and return in the requested order.readRecords(condition:RecordCondition!,duration:Duration!):[Record!]!}","title":"Deprecated Query Protocol","url":"/docs/main/latest/en/api/query-protocol-deprecated/"},{"content":"Deprecated Query Protocol The following query services are deprecated since 9.5.0. All these queries are still available for the short term to keep compatibility.\nQuery protocol official repository, https://github.com/apache/skywalking-query-protocol.\nMetadata Metadata contains concise information on all services and their instances, endpoints, etc. under monitoring. You may query the metadata in different ways.\nV1 APIs V1 APIs were introduced since 6.x. Now they are a shell to V2 APIs since 9.0.0.\nextendtypeQuery{# Normal service related meta info getAllServices(duration:Duration!,group:String):[Service!]!searchServices(duration:Duration!,keyword:String!):[Service!]!searchService(serviceCode:String!):Service# Fetch all services of Browser typegetAllBrowserServices(duration:Duration!):[Service!]!searchBrowserServices(duration:Duration!,keyword:String!):[Service!]!searchBrowserService(serviceCode:String!):Service# Service instance querygetServiceInstances(duration:Duration!,serviceId:ID!):[ServiceInstance!]!# Endpoint query# Consider there are huge numbers of endpoint,# must use endpoint owner\u0026#39;s service id, keyword and limit filter to do query.searchEndpoint(keyword:String!,serviceId:ID!,limit:Int!):[Endpoint!]!# Database related meta info.getAllDatabases(duration:Duration!):[Database!]!}Metrics Metrics query targets all objects defined in OAL script and MAL. You may obtain the metrics data in linear or thermodynamic matrix formats based on the aggregation functions in script.\nV2 APIs Provide Metrics V2 query APIs since 8.0.0, including metadata, single/multiple values, heatmap, and sampled records metrics.\nextendtypeQuery{# Read metrics single value in the duration of required metricsreadMetricsValue(condition:MetricsCondition!,duration:Duration!):Long!# Read metrics single value in the duration of required metrics# NullableValue#isEmptyValue == true indicates no telemetry data rather than aggregated value is actually zero.readNullableMetricsValue(condition:MetricsCondition!,duration:Duration!):NullableValue!# Read time-series values in the duration of required metricsreadMetricsValues(condition:MetricsCondition!,duration:Duration!):MetricsValues!# Read entity list of required metrics and parent entity type.sortMetrics(condition:TopNCondition!,duration:Duration!):[SelectedRecord!]!# Read value in the given time duration, usually as a linear.# labels: the labels you need to query.readLabeledMetricsValues(condition:MetricsCondition!,labels:[String!]!,duration:Duration!):[MetricsValues!]!# Heatmap is bucket based value statistic result.readHeatMap(condition:MetricsCondition!,duration:Duration!):HeatMap# Deprecated since 9.3.0, replaced by readRecords defined in record.graphqls# Read the sampled records# TopNCondition#scope is not required.readSampledRecords(condition:TopNCondition!,duration:Duration!):[SelectedRecord!]!}V1 APIs 3 types of metrics can be queried. V1 APIs were introduced since 6.x. Now they are a shell to V2 APIs.\n Single value. Most default metrics are in single value. getValues and getLinearIntValues are suitable for this purpose. Multiple value. A metric defined in OAL includes multiple value calculations. Use getMultipleLinearIntValues to obtain all values. percentile is a typical multiple value function in OAL. Heatmap value. Read Heatmap in WIKI for details. thermodynamic is the only OAL function. Use getThermodynamic to get the values.  extendtypeQuery{getValues(metric:BatchMetricConditions!,duration:Duration!):IntValuesgetLinearIntValues(metric:MetricCondition!,duration:Duration!):IntValues# Query the type of metrics including multiple values, and format them as multiple lines.# The seq of these multiple lines base on the calculation func in OAL# Such as, should us this to query the result of func percentile(50,75,90,95,99) in OAL,# then five lines will be responded, p50 is the first element of return value.getMultipleLinearIntValues(metric:MetricCondition!,numOfLinear:Int!,duration:Duration!):[IntValues!]!getThermodynamic(metric:MetricCondition!,duration:Duration!):Thermodynamic}Aggregation Aggregation query means that the metrics data need a secondary aggregation at query stage, which causes the query interfaces to have some different arguments. A typical example of aggregation query is the TopN list of services. Metrics stream aggregation simply calculates the metrics values of each service, but the expected list requires ordering metrics data by their values.\nAggregation query is for single value metrics only.\n# The aggregation query is different with the metric query.# All aggregation queries require backend or/and storage do aggregation in query time.extendtypeQuery{# TopN is an aggregation query.getServiceTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getAllServiceInstanceTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getServiceInstanceTopN(serviceId:ID!,name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getAllEndpointTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getEndpointTopN(serviceId:ID!,name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!}Record Record is a general and abstract type for collected raw data. In the observability, traces and logs have specific and well-defined meanings, meanwhile, the general records represent other collected records. Such as sampled slow SQL statement, HTTP request raw data(request/response header/body)\nextendtypeQuery{# Query collected records with given metric name and parent entity conditions, and return in the requested order.readRecords(condition:RecordCondition!,duration:Duration!):[Record!]!}","title":"Deprecated Query Protocol","url":"/docs/main/next/en/api/query-protocol-deprecated/"},{"content":"Deprecated Query Protocol The following query services are deprecated since 9.5.0. All these queries are still available for the short term to keep compatibility.\nQuery protocol official repository, https://github.com/apache/skywalking-query-protocol.\nMetrics Metrics query targets all objects defined in OAL script and MAL. You may obtain the metrics data in linear or thermodynamic matrix formats based on the aggregation functions in script.\nV2 APIs Provide Metrics V2 query APIs since 8.0.0, including metadata, single/multiple values, heatmap, and sampled records metrics.\nextendtypeQuery{# Read metrics single value in the duration of required metricsreadMetricsValue(condition:MetricsCondition!,duration:Duration!):Long!# Read metrics single value in the duration of required metrics# NullableValue#isEmptyValue == true indicates no telemetry data rather than aggregated value is actually zero.readNullableMetricsValue(condition:MetricsCondition!,duration:Duration!):NullableValue!# Read time-series values in the duration of required metricsreadMetricsValues(condition:MetricsCondition!,duration:Duration!):MetricsValues!# Read entity list of required metrics and parent entity type.sortMetrics(condition:TopNCondition!,duration:Duration!):[SelectedRecord!]!# Read value in the given time duration, usually as a linear.# labels: the labels you need to query.readLabeledMetricsValues(condition:MetricsCondition!,labels:[String!]!,duration:Duration!):[MetricsValues!]!# Heatmap is bucket based value statistic result.readHeatMap(condition:MetricsCondition!,duration:Duration!):HeatMap# Deprecated since 9.3.0, replaced by readRecords defined in record.graphqls# Read the sampled records# TopNCondition#scope is not required.readSampledRecords(condition:TopNCondition!,duration:Duration!):[SelectedRecord!]!}V1 APIs 3 types of metrics can be queried. V1 APIs were introduced since 6.x. Now they are a shell to V2 APIs.\n Single value. Most default metrics are in single value. getValues and getLinearIntValues are suitable for this purpose. Multiple value. A metric defined in OAL includes multiple value calculations. Use getMultipleLinearIntValues to obtain all values. percentile is a typical multiple value function in OAL. Heatmap value. Read Heatmap in WIKI for details. thermodynamic is the only OAL function. Use getThermodynamic to get the values.  extendtypeQuery{getValues(metric:BatchMetricConditions!,duration:Duration!):IntValuesgetLinearIntValues(metric:MetricCondition!,duration:Duration!):IntValues# Query the type of metrics including multiple values, and format them as multiple lines.# The seq of these multiple lines base on the calculation func in OAL# Such as, should us this to query the result of func percentile(50,75,90,95,99) in OAL,# then five lines will be responded, p50 is the first element of return value.getMultipleLinearIntValues(metric:MetricCondition!,numOfLinear:Int!,duration:Duration!):[IntValues!]!getThermodynamic(metric:MetricCondition!,duration:Duration!):Thermodynamic}Aggregation Aggregation query means that the metrics data need a secondary aggregation at query stage, which causes the query interfaces to have some different arguments. A typical example of aggregation query is the TopN list of services. Metrics stream aggregation simply calculates the metrics values of each service, but the expected list requires ordering metrics data by their values.\nAggregation query is for single value metrics only.\n# The aggregation query is different with the metric query.# All aggregation queries require backend or/and storage do aggregation in query time.extendtypeQuery{# TopN is an aggregation query.getServiceTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getAllServiceInstanceTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getServiceInstanceTopN(serviceId:ID!,name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getAllEndpointTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getEndpointTopN(serviceId:ID!,name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!}Record Record is a general and abstract type for collected raw data. In the observability, traces and logs have specific and well-defined meanings, meanwhile, the general records represent other collected records. Such as sampled slow SQL statement, HTTP request raw data(request/response header/body)\nextendtypeQuery{# Query collected records with given metric name and parent entity conditions, and return in the requested order.readRecords(condition:RecordCondition!,duration:Duration!):[Record!]!}","title":"Deprecated Query Protocol","url":"/docs/main/v9.5.0/en/api/query-protocol-deprecated/"},{"content":"Deprecated Query Protocol The following query services are deprecated since 9.5.0. All these queries are still available for the short term to keep compatibility.\nQuery protocol official repository, https://github.com/apache/skywalking-query-protocol.\nMetrics Metrics query targets all objects defined in OAL script and MAL. You may obtain the metrics data in linear or thermodynamic matrix formats based on the aggregation functions in script.\nV2 APIs Provide Metrics V2 query APIs since 8.0.0, including metadata, single/multiple values, heatmap, and sampled records metrics.\nextendtypeQuery{# Read metrics single value in the duration of required metricsreadMetricsValue(condition:MetricsCondition!,duration:Duration!):Long!# Read metrics single value in the duration of required metrics# NullableValue#isEmptyValue == true indicates no telemetry data rather than aggregated value is actually zero.readNullableMetricsValue(condition:MetricsCondition!,duration:Duration!):NullableValue!# Read time-series values in the duration of required metricsreadMetricsValues(condition:MetricsCondition!,duration:Duration!):MetricsValues!# Read entity list of required metrics and parent entity type.sortMetrics(condition:TopNCondition!,duration:Duration!):[SelectedRecord!]!# Read value in the given time duration, usually as a linear.# labels: the labels you need to query.readLabeledMetricsValues(condition:MetricsCondition!,labels:[String!]!,duration:Duration!):[MetricsValues!]!# Heatmap is bucket based value statistic result.readHeatMap(condition:MetricsCondition!,duration:Duration!):HeatMap# Deprecated since 9.3.0, replaced by readRecords defined in record.graphqls# Read the sampled records# TopNCondition#scope is not required.readSampledRecords(condition:TopNCondition!,duration:Duration!):[SelectedRecord!]!}V1 APIs 3 types of metrics can be queried. V1 APIs were introduced since 6.x. Now they are a shell to V2 APIs.\n Single value. Most default metrics are in single value. getValues and getLinearIntValues are suitable for this purpose. Multiple value. A metric defined in OAL includes multiple value calculations. Use getMultipleLinearIntValues to obtain all values. percentile is a typical multiple value function in OAL. Heatmap value. Read Heatmap in WIKI for details. thermodynamic is the only OAL function. Use getThermodynamic to get the values.  extendtypeQuery{getValues(metric:BatchMetricConditions!,duration:Duration!):IntValuesgetLinearIntValues(metric:MetricCondition!,duration:Duration!):IntValues# Query the type of metrics including multiple values, and format them as multiple lines.# The seq of these multiple lines base on the calculation func in OAL# Such as, should us this to query the result of func percentile(50,75,90,95,99) in OAL,# then five lines will be responded, p50 is the first element of return value.getMultipleLinearIntValues(metric:MetricCondition!,numOfLinear:Int!,duration:Duration!):[IntValues!]!getThermodynamic(metric:MetricCondition!,duration:Duration!):Thermodynamic}Aggregation Aggregation query means that the metrics data need a secondary aggregation at query stage, which causes the query interfaces to have some different arguments. A typical example of aggregation query is the TopN list of services. Metrics stream aggregation simply calculates the metrics values of each service, but the expected list requires ordering metrics data by their values.\nAggregation query is for single value metrics only.\n# The aggregation query is different with the metric query.# All aggregation queries require backend or/and storage do aggregation in query time.extendtypeQuery{# TopN is an aggregation query.getServiceTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getAllServiceInstanceTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getServiceInstanceTopN(serviceId:ID!,name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getAllEndpointTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getEndpointTopN(serviceId:ID!,name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!}Record Record is a general and abstract type for collected raw data. In the observability, traces and logs have specific and well-defined meanings, meanwhile, the general records represent other collected records. Such as sampled slow SQL statement, HTTP request raw data(request/response header/body)\nextendtypeQuery{# Query collected records with given metric name and parent entity conditions, and return in the requested order.readRecords(condition:RecordCondition!,duration:Duration!):[Record!]!}","title":"Deprecated Query Protocol","url":"/docs/main/v9.6.0/en/api/query-protocol-deprecated/"},{"content":"Deprecated Query Protocol The following query services are deprecated since 9.5.0. All these queries are still available for the short term to keep compatibility.\nQuery protocol official repository, https://github.com/apache/skywalking-query-protocol.\nMetrics Metrics query targets all objects defined in OAL script and MAL. You may obtain the metrics data in linear or thermodynamic matrix formats based on the aggregation functions in script.\nV2 APIs Provide Metrics V2 query APIs since 8.0.0, including metadata, single/multiple values, heatmap, and sampled records metrics.\nextendtypeQuery{# Read metrics single value in the duration of required metricsreadMetricsValue(condition:MetricsCondition!,duration:Duration!):Long!# Read metrics single value in the duration of required metrics# NullableValue#isEmptyValue == true indicates no telemetry data rather than aggregated value is actually zero.readNullableMetricsValue(condition:MetricsCondition!,duration:Duration!):NullableValue!# Read time-series values in the duration of required metricsreadMetricsValues(condition:MetricsCondition!,duration:Duration!):MetricsValues!# Read entity list of required metrics and parent entity type.sortMetrics(condition:TopNCondition!,duration:Duration!):[SelectedRecord!]!# Read value in the given time duration, usually as a linear.# labels: the labels you need to query.readLabeledMetricsValues(condition:MetricsCondition!,labels:[String!]!,duration:Duration!):[MetricsValues!]!# Heatmap is bucket based value statistic result.readHeatMap(condition:MetricsCondition!,duration:Duration!):HeatMap# Deprecated since 9.3.0, replaced by readRecords defined in record.graphqls# Read the sampled records# TopNCondition#scope is not required.readSampledRecords(condition:TopNCondition!,duration:Duration!):[SelectedRecord!]!}V1 APIs 3 types of metrics can be queried. V1 APIs were introduced since 6.x. Now they are a shell to V2 APIs.\n Single value. Most default metrics are in single value. getValues and getLinearIntValues are suitable for this purpose. Multiple value. A metric defined in OAL includes multiple value calculations. Use getMultipleLinearIntValues to obtain all values. percentile is a typical multiple value function in OAL. Heatmap value. Read Heatmap in WIKI for details. thermodynamic is the only OAL function. Use getThermodynamic to get the values.  extendtypeQuery{getValues(metric:BatchMetricConditions!,duration:Duration!):IntValuesgetLinearIntValues(metric:MetricCondition!,duration:Duration!):IntValues# Query the type of metrics including multiple values, and format them as multiple lines.# The seq of these multiple lines base on the calculation func in OAL# Such as, should us this to query the result of func percentile(50,75,90,95,99) in OAL,# then five lines will be responded, p50 is the first element of return value.getMultipleLinearIntValues(metric:MetricCondition!,numOfLinear:Int!,duration:Duration!):[IntValues!]!getThermodynamic(metric:MetricCondition!,duration:Duration!):Thermodynamic}Aggregation Aggregation query means that the metrics data need a secondary aggregation at query stage, which causes the query interfaces to have some different arguments. A typical example of aggregation query is the TopN list of services. Metrics stream aggregation simply calculates the metrics values of each service, but the expected list requires ordering metrics data by their values.\nAggregation query is for single value metrics only.\n# The aggregation query is different with the metric query.# All aggregation queries require backend or/and storage do aggregation in query time.extendtypeQuery{# TopN is an aggregation query.getServiceTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getAllServiceInstanceTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getServiceInstanceTopN(serviceId:ID!,name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getAllEndpointTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getEndpointTopN(serviceId:ID!,name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!}Record Record is a general and abstract type for collected raw data. In the observability, traces and logs have specific and well-defined meanings, meanwhile, the general records represent other collected records. Such as sampled slow SQL statement, HTTP request raw data(request/response header/body)\nextendtypeQuery{# Query collected records with given metric name and parent entity conditions, and return in the requested order.readRecords(condition:RecordCondition!,duration:Duration!):[Record!]!}","title":"Deprecated Query Protocol","url":"/docs/main/v9.7.0/en/api/query-protocol-deprecated/"},{"content":"Design The mmap-queue is a big, fast, and persistent queue based on the memory-mapped files. One mmap-queue has a directory to store the whole data. The queue directory is made up of many segments and 1 metafile. This is originally implemented by bigqueue project, we changed it a little for fitting the Satellite project requirements.\n Segment: Segment is the real data store center, that provides large-space storage and does not reduce read and write performance as much as possible by using mmap. And we will avoid deleting files by reusing them. Meta: The purpose of meta is to find the data that the consumer needs.  Meta Metadata only needs 80B to store the Metadata for the pipe. But for memory alignment, it takes at least one memory page size, which is generally 4K.\n[ 8Bit ][ 8Bit ][ 8Bit ][ 8Bit ][ 8Bit ][ 8Bit ][ 8Bit ][ 8Bit ][ 8Bit ][ 8Bit ] [metaVersion][ ID ][ offset][ ID ][ offset][ ID ][ offset][ ID ][ offset][capacity] [metaVersion][writing offset][watermark offset][committed offset][reading offset][capacity] Transforming BenchmarkTest Test machine: macbook pro 2018\nModel Name:\tMacBook Pro Model Identifier:\tMacBookPro15,1 Processor Name:\t6-Core Intel Core i7 Processor Speed:\t2.2 GHz Number of Processors:\t1 Total Number of Cores:\t6 L2 Cache (per Core):\t256 KB L3 Cache:\t9 MB Hyper-Threading Technology:\tEnabled Memory:\t16 GB System Firmware Version:\t1554.60.15.0.0 (iBridge: 18.16.13030.0.0,0 push operation goos: darwin goarch: amd64 pkg: github.com/apache/skywalking-satellite/plugins/queue/mmap BenchmarkEnqueue BenchmarkEnqueue/segmentSize:_128KB_maxInMemSegments:18_message:8KB_queueCapacity:10000 27585\t43559 ns/op\t9889 B/op\t9 allocs/op BenchmarkEnqueue/segmentSize:_256KB_maxInMemSegments:10_message:8KB_queueCapacity:10000 39326\t31773 ns/op\t9840 B/op\t9 allocs/op BenchmarkEnqueue/segmentSize:_512KB_maxInMemSegments:6_message:8KB_queueCapacity:10000 56770\t22990 ns/op\t9816 B/op\t9 allocs/op BenchmarkEnqueue/segmentSize:_256KB_maxInMemSegments:20_message:8KB_queueCapacity:10000 43803\t29778 ns/op\t9840 B/op\t9 allocs/op BenchmarkEnqueue/segmentSize:_128KB_maxInMemSegments:10_message:16KB_queueCapacity:10000 16870\t80576 ns/op\t18944 B/op\t10 allocs/op BenchmarkEnqueue/segmentSize:_128KB_maxInMemSegments:10_message:8KB_queueCapacity:100000 36922\t39085 ns/op\t9889 B/op\t9 allocs/op PASS push and pop operation goos: darwin goarch: amd64 pkg: github.com/apache/skywalking-satellite/plugins/queue/mmap BenchmarkEnqueueAndDequeue BenchmarkEnqueueAndDequeue/segmentSize:_128KB_maxInMemSegments:18_message:8KB_queueCapacity:10000 21030\t60728 ns/op\t28774 B/op\t42 allocs/op BenchmarkEnqueueAndDequeue/segmentSize:_256KB_maxInMemSegments:10_message:8KB_queueCapacity:10000 30327\t41274 ns/op\t28726 B/op\t42 allocs/op BenchmarkEnqueueAndDequeue/segmentSize:_512KB_maxInMemSegments:6_message:8KB_queueCapacity:10000 32738\t37923 ns/op\t28700 B/op\t42 allocs/op BenchmarkEnqueueAndDequeue/segmentSize:_256KB_maxInMemSegments:20_message:8KB_queueCapacity:10000 28209\t41169 ns/op\t28726 B/op\t42 allocs/op BenchmarkEnqueueAndDequeue/segmentSize:_128KB_maxInMemSegments:10_message:16KB_queueCapacity:10000 14677\t89637 ns/op\t54981 B/op\t43 allocs/op BenchmarkEnqueueAndDequeue/segmentSize:_128KB_maxInMemSegments:10_message:8KB_queueCapacity:100000 22228\t54963 ns/op\t28774 B/op\t42 allocs/op PASS ","title":"Design","url":"/docs/skywalking-satellite/latest/en/concepts-and-designs/mmap-queue/"},{"content":"Design The mmap-queue is a big, fast, and persistent queue based on the memory-mapped files. One mmap-queue has a directory to store the whole data. The queue directory is made up of many segments and 1 metafile. This is originally implemented by bigqueue project, we changed it a little for fitting the Satellite project requirements.\n Segment: Segment is the real data store center, that provides large-space storage and does not reduce read and write performance as much as possible by using mmap. And we will avoid deleting files by reusing them. Meta: The purpose of meta is to find the data that the consumer needs.  Meta Metadata only needs 80B to store the Metadata for the pipe. But for memory alignment, it takes at least one memory page size, which is generally 4K.\n[ 8Bit ][ 8Bit ][ 8Bit ][ 8Bit ][ 8Bit ][ 8Bit ][ 8Bit ][ 8Bit ][ 8Bit ][ 8Bit ] [metaVersion][ ID ][ offset][ ID ][ offset][ ID ][ offset][ ID ][ offset][capacity] [metaVersion][writing offset][watermark offset][committed offset][reading offset][capacity] Transforming BenchmarkTest Test machine: macbook pro 2018\nModel Name:\tMacBook Pro Model Identifier:\tMacBookPro15,1 Processor Name:\t6-Core Intel Core i7 Processor Speed:\t2.2 GHz Number of Processors:\t1 Total Number of Cores:\t6 L2 Cache (per Core):\t256 KB L3 Cache:\t9 MB Hyper-Threading Technology:\tEnabled Memory:\t16 GB System Firmware Version:\t1554.60.15.0.0 (iBridge: 18.16.13030.0.0,0 push operation goos: darwin goarch: amd64 pkg: github.com/apache/skywalking-satellite/plugins/queue/mmap BenchmarkEnqueue BenchmarkEnqueue/segmentSize:_128KB_maxInMemSegments:18_message:8KB_queueCapacity:10000 27585\t43559 ns/op\t9889 B/op\t9 allocs/op BenchmarkEnqueue/segmentSize:_256KB_maxInMemSegments:10_message:8KB_queueCapacity:10000 39326\t31773 ns/op\t9840 B/op\t9 allocs/op BenchmarkEnqueue/segmentSize:_512KB_maxInMemSegments:6_message:8KB_queueCapacity:10000 56770\t22990 ns/op\t9816 B/op\t9 allocs/op BenchmarkEnqueue/segmentSize:_256KB_maxInMemSegments:20_message:8KB_queueCapacity:10000 43803\t29778 ns/op\t9840 B/op\t9 allocs/op BenchmarkEnqueue/segmentSize:_128KB_maxInMemSegments:10_message:16KB_queueCapacity:10000 16870\t80576 ns/op\t18944 B/op\t10 allocs/op BenchmarkEnqueue/segmentSize:_128KB_maxInMemSegments:10_message:8KB_queueCapacity:100000 36922\t39085 ns/op\t9889 B/op\t9 allocs/op PASS push and pop operation goos: darwin goarch: amd64 pkg: github.com/apache/skywalking-satellite/plugins/queue/mmap BenchmarkEnqueueAndDequeue BenchmarkEnqueueAndDequeue/segmentSize:_128KB_maxInMemSegments:18_message:8KB_queueCapacity:10000 21030\t60728 ns/op\t28774 B/op\t42 allocs/op BenchmarkEnqueueAndDequeue/segmentSize:_256KB_maxInMemSegments:10_message:8KB_queueCapacity:10000 30327\t41274 ns/op\t28726 B/op\t42 allocs/op BenchmarkEnqueueAndDequeue/segmentSize:_512KB_maxInMemSegments:6_message:8KB_queueCapacity:10000 32738\t37923 ns/op\t28700 B/op\t42 allocs/op BenchmarkEnqueueAndDequeue/segmentSize:_256KB_maxInMemSegments:20_message:8KB_queueCapacity:10000 28209\t41169 ns/op\t28726 B/op\t42 allocs/op BenchmarkEnqueueAndDequeue/segmentSize:_128KB_maxInMemSegments:10_message:16KB_queueCapacity:10000 14677\t89637 ns/op\t54981 B/op\t43 allocs/op BenchmarkEnqueueAndDequeue/segmentSize:_128KB_maxInMemSegments:10_message:8KB_queueCapacity:100000 22228\t54963 ns/op\t28774 B/op\t42 allocs/op PASS ","title":"Design","url":"/docs/skywalking-satellite/next/en/concepts-and-designs/mmap-queue/"},{"content":"Design The mmap-queue is a big, fast, and persistent queue based on the memory-mapped files. One mmap-queue has a directory to store the whole data. The queue directory is made up of many segments and 1 metafile. This is originally implemented by bigqueue project, we changed it a little for fitting the Satellite project requirements.\n Segment: Segment is the real data store center, that provides large-space storage and does not reduce read and write performance as much as possible by using mmap. And we will avoid deleting files by reusing them. Meta: The purpose of meta is to find the data that the consumer needs.  Meta Metadata only needs 80B to store the Metadata for the pipe. But for memory alignment, it takes at least one memory page size, which is generally 4K.\n[ 8Bit ][ 8Bit ][ 8Bit ][ 8Bit ][ 8Bit ][ 8Bit ][ 8Bit ][ 8Bit ][ 8Bit ][ 8Bit ] [metaVersion][ ID ][ offset][ ID ][ offset][ ID ][ offset][ ID ][ offset][capacity] [metaVersion][writing offset][watermark offset][committed offset][reading offset][capacity] Transforming BenchmarkTest Test machine: macbook pro 2018\nModel Name:\tMacBook Pro Model Identifier:\tMacBookPro15,1 Processor Name:\t6-Core Intel Core i7 Processor Speed:\t2.2 GHz Number of Processors:\t1 Total Number of Cores:\t6 L2 Cache (per Core):\t256 KB L3 Cache:\t9 MB Hyper-Threading Technology:\tEnabled Memory:\t16 GB System Firmware Version:\t1554.60.15.0.0 (iBridge: 18.16.13030.0.0,0 push operation goos: darwin goarch: amd64 pkg: github.com/apache/skywalking-satellite/plugins/queue/mmap BenchmarkEnqueue BenchmarkEnqueue/segmentSize:_128KB_maxInMemSegments:18_message:8KB_queueCapacity:10000 27585\t43559 ns/op\t9889 B/op\t9 allocs/op BenchmarkEnqueue/segmentSize:_256KB_maxInMemSegments:10_message:8KB_queueCapacity:10000 39326\t31773 ns/op\t9840 B/op\t9 allocs/op BenchmarkEnqueue/segmentSize:_512KB_maxInMemSegments:6_message:8KB_queueCapacity:10000 56770\t22990 ns/op\t9816 B/op\t9 allocs/op BenchmarkEnqueue/segmentSize:_256KB_maxInMemSegments:20_message:8KB_queueCapacity:10000 43803\t29778 ns/op\t9840 B/op\t9 allocs/op BenchmarkEnqueue/segmentSize:_128KB_maxInMemSegments:10_message:16KB_queueCapacity:10000 16870\t80576 ns/op\t18944 B/op\t10 allocs/op BenchmarkEnqueue/segmentSize:_128KB_maxInMemSegments:10_message:8KB_queueCapacity:100000 36922\t39085 ns/op\t9889 B/op\t9 allocs/op PASS push and pop operation goos: darwin goarch: amd64 pkg: github.com/apache/skywalking-satellite/plugins/queue/mmap BenchmarkEnqueueAndDequeue BenchmarkEnqueueAndDequeue/segmentSize:_128KB_maxInMemSegments:18_message:8KB_queueCapacity:10000 21030\t60728 ns/op\t28774 B/op\t42 allocs/op BenchmarkEnqueueAndDequeue/segmentSize:_256KB_maxInMemSegments:10_message:8KB_queueCapacity:10000 30327\t41274 ns/op\t28726 B/op\t42 allocs/op BenchmarkEnqueueAndDequeue/segmentSize:_512KB_maxInMemSegments:6_message:8KB_queueCapacity:10000 32738\t37923 ns/op\t28700 B/op\t42 allocs/op BenchmarkEnqueueAndDequeue/segmentSize:_256KB_maxInMemSegments:20_message:8KB_queueCapacity:10000 28209\t41169 ns/op\t28726 B/op\t42 allocs/op BenchmarkEnqueueAndDequeue/segmentSize:_128KB_maxInMemSegments:10_message:16KB_queueCapacity:10000 14677\t89637 ns/op\t54981 B/op\t43 allocs/op BenchmarkEnqueueAndDequeue/segmentSize:_128KB_maxInMemSegments:10_message:8KB_queueCapacity:100000 22228\t54963 ns/op\t28774 B/op\t42 allocs/op PASS ","title":"Design","url":"/docs/skywalking-satellite/v1.2.0/en/concepts-and-designs/mmap-queue/"},{"content":"Design Goals This document outlines the core design goals for the SkyWalking project.\n  Maintaining Observability. Regardless of the deployment method of the target system, SkyWalking provides an integration solution for it to maintain observability. Based on this, SkyWalking provides multiple runtime forms and probes.\n  Topology, Metrics and Trace Together. The first step to understanding a distributed system is the topology map. It visualizes the entire complex system in an easy-to-read layout. Under the topology, the OSS personnel have higher requirements in terms of the metrics for service, instance, endpoint and calls. Traces are in the form of detailed logs to make sense of those metrics. For example, when the endpoint latency becomes long, you want to see the slowest the trace to find out why. So you can see, they are from big picture to details, they are all needed. SkyWalking integrates and provides a lot of features to make this possible and easy understand.\n  Light Weight. There two parts of light weight are needed. (1) In probe, we just depend on network communication framework, prefer gRPC. By that, the probe should be as small as possible, to avoid the library conflicts and the payload of VM, such as permsize requirement in JVM. (2) As an observability platform, it is secondary and third level system in your project environment. So we are using our own light weight framework to build the backend core. Then you don\u0026rsquo;t need to deploy big data tech platform and maintain them. SkyWalking should be simple in tech stack.\n  Pluggable. SkyWalking core team provides many default implementations, but definitely it is not enough, and also don\u0026rsquo;t fit every scenario. So, we provide a lot of features for being pluggable.\n  Portability. SkyWalking can run in multiple environments, including: (1) Use traditional register center like eureka. (2) Use RPC framework including service discovery, like Spring Cloud, Apache Dubbo. (3) Use Service Mesh in modern infrastructure. (4) Use cloud services. (5) Across cloud deployment. SkyWalking should run well in all of these cases.\n  Interoperability. The observability landscape is so vast that it is virtually impossible for SkyWalking to support all systems, even with the support of its community. Currently, it supports interoperability with other OSS systems, especially probes, such as Zipkin, Jaeger, and OpenTelemetry. It is very important to end users that SkyWalking has the ability to accept and read these data formats, since the users are not required to switch their libraries.\n  What is next?  See probe Introduction to learn about SkyWalking\u0026rsquo;s probe groups. From backend overview, you can understand what the backend does after it receives probe data.  ","title":"Design Goals","url":"/docs/main/latest/en/concepts-and-designs/project-goals/"},{"content":"Design Goals This document outlines the core design goals for the SkyWalking project.\n  Maintaining Observability. Regardless of the deployment method of the target system, SkyWalking provides an integration solution for it to maintain observability. Based on this, SkyWalking provides multiple runtime forms and probes.\n  Topology, Metrics and Trace Together. The first step to understanding a distributed system is the topology map. It visualizes the entire complex system in an easy-to-read layout. Under the topology, the OSS personnel have higher requirements in terms of the metrics for service, instance, endpoint and calls. Traces are in the form of detailed logs to make sense of those metrics. For example, when the endpoint latency becomes long, you want to see the slowest the trace to find out why. So you can see, they are from big picture to details, they are all needed. SkyWalking integrates and provides a lot of features to make this possible and easy understand.\n  Light Weight. There two parts of light weight are needed. (1) In probe, we just depend on network communication framework, prefer gRPC. By that, the probe should be as small as possible, to avoid the library conflicts and the payload of VM, such as permsize requirement in JVM. (2) As an observability platform, it is secondary and third level system in your project environment. So we are using our own light weight framework to build the backend core. Then you don\u0026rsquo;t need to deploy big data tech platform and maintain them. SkyWalking should be simple in tech stack.\n  Pluggable. SkyWalking core team provides many default implementations, but definitely it is not enough, and also don\u0026rsquo;t fit every scenario. So, we provide a lot of features for being pluggable.\n  Portability. SkyWalking can run in multiple environments, including: (1) Use traditional register center like eureka. (2) Use RPC framework including service discovery, like Spring Cloud, Apache Dubbo. (3) Use Service Mesh in modern infrastructure. (4) Use cloud services. (5) Across cloud deployment. SkyWalking should run well in all of these cases.\n  Interoperability. The observability landscape is so vast that it is virtually impossible for SkyWalking to support all systems, even with the support of its community. Currently, it supports interoperability with other OSS systems, especially probes, such as Zipkin, Jaeger, and OpenTelemetry. It is very important to end users that SkyWalking has the ability to accept and read these data formats, since the users are not required to switch their libraries.\n  What is next?  See probe Introduction to learn about SkyWalking\u0026rsquo;s probe groups. From backend overview, you can understand what the backend does after it receives probe data.  ","title":"Design Goals","url":"/docs/main/next/en/concepts-and-designs/project-goals/"},{"content":"Design Goals This document outlines the core design goals for the SkyWalking project.\n  Maintaining Observability. Regardless of the deployment method of the target system, SkyWalking provides an integration solution for it to maintain observability. Based on this, SkyWalking provides multiple runtime forms and probes.\n  Topology, Metrics and Trace Together. The first step to understanding a distributed system is the topology map. It visualizes the entire complex system in an easy-to-read layout. Under the topology, the OSS personnel have higher requirements in terms of the metrics for service, instance, endpoint and calls. Traces are in the form of detailed logs to make sense of those metrics. For example, when the endpoint latency becomes long, you want to see the slowest the trace to find out why. So you can see, they are from big picture to details, they are all needed. SkyWalking integrates and provides a lot of features to make this possible and easy understand.\n  Light Weight. There two parts of light weight are needed. (1) In probe, we just depend on network communication framework, prefer gRPC. By that, the probe should be as small as possible, to avoid the library conflicts and the payload of VM, such as permsize requirement in JVM. (2) As an observability platform, it is secondary and third level system in your project environment. So we are using our own light weight framework to build the backend core. Then you don\u0026rsquo;t need to deploy big data tech platform and maintain them. SkyWalking should be simple in tech stack.\n  Pluggable. SkyWalking core team provides many default implementations, but definitely it is not enough, and also don\u0026rsquo;t fit every scenario. So, we provide a lot of features for being pluggable.\n  Portability. SkyWalking can run in multiple environments, including: (1) Use traditional register center like eureka. (2) Use RPC framework including service discovery, like Spring Cloud, Apache Dubbo. (3) Use Service Mesh in modern infrastructure. (4) Use cloud services. (5) Across cloud deployment. SkyWalking should run well in all of these cases.\n  Interoperability. The observability landscape is so vast that it is virtually impossible for SkyWalking to support all systems, even with the support of its community. Currently, it supports interoperability with other OSS systems, especially probes, such as Zipkin, Jaeger, OpenTracing, and OpenCensus. It is very important to end users that SkyWalking has the ability to accept and read these data formats, since the users are not required to switch their libraries.\n  What is next?  See probe Introduction to learn about SkyWalking\u0026rsquo;s probe groups. From backend overview, you can understand what the backend does after it receives probe data.  ","title":"Design Goals","url":"/docs/main/v9.0.0/en/concepts-and-designs/project-goals/"},{"content":"Design Goals This document outlines the core design goals for the SkyWalking project.\n  Maintaining Observability. Regardless of the deployment method of the target system, SkyWalking provides an integration solution for it to maintain observability. Based on this, SkyWalking provides multiple runtime forms and probes.\n  Topology, Metrics and Trace Together. The first step to understanding a distributed system is the topology map. It visualizes the entire complex system in an easy-to-read layout. Under the topology, the OSS personnel have higher requirements in terms of the metrics for service, instance, endpoint and calls. Traces are in the form of detailed logs to make sense of those metrics. For example, when the endpoint latency becomes long, you want to see the slowest the trace to find out why. So you can see, they are from big picture to details, they are all needed. SkyWalking integrates and provides a lot of features to make this possible and easy understand.\n  Light Weight. There two parts of light weight are needed. (1) In probe, we just depend on network communication framework, prefer gRPC. By that, the probe should be as small as possible, to avoid the library conflicts and the payload of VM, such as permsize requirement in JVM. (2) As an observability platform, it is secondary and third level system in your project environment. So we are using our own light weight framework to build the backend core. Then you don\u0026rsquo;t need to deploy big data tech platform and maintain them. SkyWalking should be simple in tech stack.\n  Pluggable. SkyWalking core team provides many default implementations, but definitely it is not enough, and also don\u0026rsquo;t fit every scenario. So, we provide a lot of features for being pluggable.\n  Portability. SkyWalking can run in multiple environments, including: (1) Use traditional register center like eureka. (2) Use RPC framework including service discovery, like Spring Cloud, Apache Dubbo. (3) Use Service Mesh in modern infrastructure. (4) Use cloud services. (5) Across cloud deployment. SkyWalking should run well in all of these cases.\n  Interoperability. The observability landscape is so vast that it is virtually impossible for SkyWalking to support all systems, even with the support of its community. Currently, it supports interoperability with other OSS systems, especially probes, such as Zipkin, Jaeger, OpenTracing, and OpenCensus. It is very important to end users that SkyWalking has the ability to accept and read these data formats, since the users are not required to switch their libraries.\n  What is next?  See probe Introduction to learn about SkyWalking\u0026rsquo;s probe groups. From backend overview, you can understand what the backend does after it receives probe data.  ","title":"Design Goals","url":"/docs/main/v9.1.0/en/concepts-and-designs/project-goals/"},{"content":"Design Goals This document outlines the core design goals for the SkyWalking project.\n  Maintaining Observability. Regardless of the deployment method of the target system, SkyWalking provides an integration solution for it to maintain observability. Based on this, SkyWalking provides multiple runtime forms and probes.\n  Topology, Metrics and Trace Together. The first step to understanding a distributed system is the topology map. It visualizes the entire complex system in an easy-to-read layout. Under the topology, the OSS personnel have higher requirements in terms of the metrics for service, instance, endpoint and calls. Traces are in the form of detailed logs to make sense of those metrics. For example, when the endpoint latency becomes long, you want to see the slowest the trace to find out why. So you can see, they are from big picture to details, they are all needed. SkyWalking integrates and provides a lot of features to make this possible and easy understand.\n  Light Weight. There two parts of light weight are needed. (1) In probe, we just depend on network communication framework, prefer gRPC. By that, the probe should be as small as possible, to avoid the library conflicts and the payload of VM, such as permsize requirement in JVM. (2) As an observability platform, it is secondary and third level system in your project environment. So we are using our own light weight framework to build the backend core. Then you don\u0026rsquo;t need to deploy big data tech platform and maintain them. SkyWalking should be simple in tech stack.\n  Pluggable. SkyWalking core team provides many default implementations, but definitely it is not enough, and also don\u0026rsquo;t fit every scenario. So, we provide a lot of features for being pluggable.\n  Portability. SkyWalking can run in multiple environments, including: (1) Use traditional register center like eureka. (2) Use RPC framework including service discovery, like Spring Cloud, Apache Dubbo. (3) Use Service Mesh in modern infrastructure. (4) Use cloud services. (5) Across cloud deployment. SkyWalking should run well in all of these cases.\n  Interoperability. The observability landscape is so vast that it is virtually impossible for SkyWalking to support all systems, even with the support of its community. Currently, it supports interoperability with other OSS systems, especially probes, such as Zipkin, Jaeger, OpenTracing, and OpenCensus. It is very important to end users that SkyWalking has the ability to accept and read these data formats, since the users are not required to switch their libraries.\n  What is next?  See probe Introduction to learn about SkyWalking\u0026rsquo;s probe groups. From backend overview, you can understand what the backend does after it receives probe data.  ","title":"Design Goals","url":"/docs/main/v9.2.0/en/concepts-and-designs/project-goals/"},{"content":"Design Goals This document outlines the core design goals for the SkyWalking project.\n  Maintaining Observability. Regardless of the deployment method of the target system, SkyWalking provides an integration solution for it to maintain observability. Based on this, SkyWalking provides multiple runtime forms and probes.\n  Topology, Metrics and Trace Together. The first step to understanding a distributed system is the topology map. It visualizes the entire complex system in an easy-to-read layout. Under the topology, the OSS personnel have higher requirements in terms of the metrics for service, instance, endpoint and calls. Traces are in the form of detailed logs to make sense of those metrics. For example, when the endpoint latency becomes long, you want to see the slowest the trace to find out why. So you can see, they are from big picture to details, they are all needed. SkyWalking integrates and provides a lot of features to make this possible and easy understand.\n  Light Weight. There two parts of light weight are needed. (1) In probe, we just depend on network communication framework, prefer gRPC. By that, the probe should be as small as possible, to avoid the library conflicts and the payload of VM, such as permsize requirement in JVM. (2) As an observability platform, it is secondary and third level system in your project environment. So we are using our own light weight framework to build the backend core. Then you don\u0026rsquo;t need to deploy big data tech platform and maintain them. SkyWalking should be simple in tech stack.\n  Pluggable. SkyWalking core team provides many default implementations, but definitely it is not enough, and also don\u0026rsquo;t fit every scenario. So, we provide a lot of features for being pluggable.\n  Portability. SkyWalking can run in multiple environments, including: (1) Use traditional register center like eureka. (2) Use RPC framework including service discovery, like Spring Cloud, Apache Dubbo. (3) Use Service Mesh in modern infrastructure. (4) Use cloud services. (5) Across cloud deployment. SkyWalking should run well in all of these cases.\n  Interoperability. The observability landscape is so vast that it is virtually impossible for SkyWalking to support all systems, even with the support of its community. Currently, it supports interoperability with other OSS systems, especially probes, such as Zipkin, Jaeger, OpenTracing, and OpenCensus. It is very important to end users that SkyWalking has the ability to accept and read these data formats, since the users are not required to switch their libraries.\n  What is next?  See probe Introduction to learn about SkyWalking\u0026rsquo;s probe groups. From backend overview, you can understand what the backend does after it receives probe data.  ","title":"Design Goals","url":"/docs/main/v9.3.0/en/concepts-and-designs/project-goals/"},{"content":"Design Goals This document outlines the core design goals for the SkyWalking project.\n  Maintaining Observability. Regardless of the deployment method of the target system, SkyWalking provides an integration solution for it to maintain observability. Based on this, SkyWalking provides multiple runtime forms and probes.\n  Topology, Metrics and Trace Together. The first step to understanding a distributed system is the topology map. It visualizes the entire complex system in an easy-to-read layout. Under the topology, the OSS personnel have higher requirements in terms of the metrics for service, instance, endpoint and calls. Traces are in the form of detailed logs to make sense of those metrics. For example, when the endpoint latency becomes long, you want to see the slowest the trace to find out why. So you can see, they are from big picture to details, they are all needed. SkyWalking integrates and provides a lot of features to make this possible and easy understand.\n  Light Weight. There two parts of light weight are needed. (1) In probe, we just depend on network communication framework, prefer gRPC. By that, the probe should be as small as possible, to avoid the library conflicts and the payload of VM, such as permsize requirement in JVM. (2) As an observability platform, it is secondary and third level system in your project environment. So we are using our own light weight framework to build the backend core. Then you don\u0026rsquo;t need to deploy big data tech platform and maintain them. SkyWalking should be simple in tech stack.\n  Pluggable. SkyWalking core team provides many default implementations, but definitely it is not enough, and also don\u0026rsquo;t fit every scenario. So, we provide a lot of features for being pluggable.\n  Portability. SkyWalking can run in multiple environments, including: (1) Use traditional register center like eureka. (2) Use RPC framework including service discovery, like Spring Cloud, Apache Dubbo. (3) Use Service Mesh in modern infrastructure. (4) Use cloud services. (5) Across cloud deployment. SkyWalking should run well in all of these cases.\n  Interoperability. The observability landscape is so vast that it is virtually impossible for SkyWalking to support all systems, even with the support of its community. Currently, it supports interoperability with other OSS systems, especially probes, such as Zipkin, Jaeger, OpenTracing, and OpenCensus. It is very important to end users that SkyWalking has the ability to accept and read these data formats, since the users are not required to switch their libraries.\n  What is next?  See probe Introduction to learn about SkyWalking\u0026rsquo;s probe groups. From backend overview, you can understand what the backend does after it receives probe data.  ","title":"Design Goals","url":"/docs/main/v9.4.0/en/concepts-and-designs/project-goals/"},{"content":"Design Goals This document outlines the core design goals for the SkyWalking project.\n  Maintaining Observability. Regardless of the deployment method of the target system, SkyWalking provides an integration solution for it to maintain observability. Based on this, SkyWalking provides multiple runtime forms and probes.\n  Topology, Metrics and Trace Together. The first step to understanding a distributed system is the topology map. It visualizes the entire complex system in an easy-to-read layout. Under the topology, the OSS personnel have higher requirements in terms of the metrics for service, instance, endpoint and calls. Traces are in the form of detailed logs to make sense of those metrics. For example, when the endpoint latency becomes long, you want to see the slowest the trace to find out why. So you can see, they are from big picture to details, they are all needed. SkyWalking integrates and provides a lot of features to make this possible and easy understand.\n  Light Weight. There two parts of light weight are needed. (1) In probe, we just depend on network communication framework, prefer gRPC. By that, the probe should be as small as possible, to avoid the library conflicts and the payload of VM, such as permsize requirement in JVM. (2) As an observability platform, it is secondary and third level system in your project environment. So we are using our own light weight framework to build the backend core. Then you don\u0026rsquo;t need to deploy big data tech platform and maintain them. SkyWalking should be simple in tech stack.\n  Pluggable. SkyWalking core team provides many default implementations, but definitely it is not enough, and also don\u0026rsquo;t fit every scenario. So, we provide a lot of features for being pluggable.\n  Portability. SkyWalking can run in multiple environments, including: (1) Use traditional register center like eureka. (2) Use RPC framework including service discovery, like Spring Cloud, Apache Dubbo. (3) Use Service Mesh in modern infrastructure. (4) Use cloud services. (5) Across cloud deployment. SkyWalking should run well in all of these cases.\n  Interoperability. The observability landscape is so vast that it is virtually impossible for SkyWalking to support all systems, even with the support of its community. Currently, it supports interoperability with other OSS systems, especially probes, such as Zipkin, Jaeger, and OpenTelemetry. It is very important to end users that SkyWalking has the ability to accept and read these data formats, since the users are not required to switch their libraries.\n  What is next?  See probe Introduction to learn about SkyWalking\u0026rsquo;s probe groups. From backend overview, you can understand what the backend does after it receives probe data.  ","title":"Design Goals","url":"/docs/main/v9.5.0/en/concepts-and-designs/project-goals/"},{"content":"Design Goals This document outlines the core design goals for the SkyWalking project.\n  Maintaining Observability. Regardless of the deployment method of the target system, SkyWalking provides an integration solution for it to maintain observability. Based on this, SkyWalking provides multiple runtime forms and probes.\n  Topology, Metrics and Trace Together. The first step to understanding a distributed system is the topology map. It visualizes the entire complex system in an easy-to-read layout. Under the topology, the OSS personnel have higher requirements in terms of the metrics for service, instance, endpoint and calls. Traces are in the form of detailed logs to make sense of those metrics. For example, when the endpoint latency becomes long, you want to see the slowest the trace to find out why. So you can see, they are from big picture to details, they are all needed. SkyWalking integrates and provides a lot of features to make this possible and easy understand.\n  Light Weight. There two parts of light weight are needed. (1) In probe, we just depend on network communication framework, prefer gRPC. By that, the probe should be as small as possible, to avoid the library conflicts and the payload of VM, such as permsize requirement in JVM. (2) As an observability platform, it is secondary and third level system in your project environment. So we are using our own light weight framework to build the backend core. Then you don\u0026rsquo;t need to deploy big data tech platform and maintain them. SkyWalking should be simple in tech stack.\n  Pluggable. SkyWalking core team provides many default implementations, but definitely it is not enough, and also don\u0026rsquo;t fit every scenario. So, we provide a lot of features for being pluggable.\n  Portability. SkyWalking can run in multiple environments, including: (1) Use traditional register center like eureka. (2) Use RPC framework including service discovery, like Spring Cloud, Apache Dubbo. (3) Use Service Mesh in modern infrastructure. (4) Use cloud services. (5) Across cloud deployment. SkyWalking should run well in all of these cases.\n  Interoperability. The observability landscape is so vast that it is virtually impossible for SkyWalking to support all systems, even with the support of its community. Currently, it supports interoperability with other OSS systems, especially probes, such as Zipkin, Jaeger, and OpenTelemetry. It is very important to end users that SkyWalking has the ability to accept and read these data formats, since the users are not required to switch their libraries.\n  What is next?  See probe Introduction to learn about SkyWalking\u0026rsquo;s probe groups. From backend overview, you can understand what the backend does after it receives probe data.  ","title":"Design Goals","url":"/docs/main/v9.6.0/en/concepts-and-designs/project-goals/"},{"content":"Design Goals This document outlines the core design goals for the SkyWalking project.\n  Maintaining Observability. Regardless of the deployment method of the target system, SkyWalking provides an integration solution for it to maintain observability. Based on this, SkyWalking provides multiple runtime forms and probes.\n  Topology, Metrics and Trace Together. The first step to understanding a distributed system is the topology map. It visualizes the entire complex system in an easy-to-read layout. Under the topology, the OSS personnel have higher requirements in terms of the metrics for service, instance, endpoint and calls. Traces are in the form of detailed logs to make sense of those metrics. For example, when the endpoint latency becomes long, you want to see the slowest the trace to find out why. So you can see, they are from big picture to details, they are all needed. SkyWalking integrates and provides a lot of features to make this possible and easy understand.\n  Light Weight. There two parts of light weight are needed. (1) In probe, we just depend on network communication framework, prefer gRPC. By that, the probe should be as small as possible, to avoid the library conflicts and the payload of VM, such as permsize requirement in JVM. (2) As an observability platform, it is secondary and third level system in your project environment. So we are using our own light weight framework to build the backend core. Then you don\u0026rsquo;t need to deploy big data tech platform and maintain them. SkyWalking should be simple in tech stack.\n  Pluggable. SkyWalking core team provides many default implementations, but definitely it is not enough, and also don\u0026rsquo;t fit every scenario. So, we provide a lot of features for being pluggable.\n  Portability. SkyWalking can run in multiple environments, including: (1) Use traditional register center like eureka. (2) Use RPC framework including service discovery, like Spring Cloud, Apache Dubbo. (3) Use Service Mesh in modern infrastructure. (4) Use cloud services. (5) Across cloud deployment. SkyWalking should run well in all of these cases.\n  Interoperability. The observability landscape is so vast that it is virtually impossible for SkyWalking to support all systems, even with the support of its community. Currently, it supports interoperability with other OSS systems, especially probes, such as Zipkin, Jaeger, and OpenTelemetry. It is very important to end users that SkyWalking has the ability to accept and read these data formats, since the users are not required to switch their libraries.\n  What is next?  See probe Introduction to learn about SkyWalking\u0026rsquo;s probe groups. From backend overview, you can understand what the backend does after it receives probe data.  ","title":"Design Goals","url":"/docs/main/v9.7.0/en/concepts-and-designs/project-goals/"},{"content":"Design Goals The document outlines the core design goals for the SkyWalking Infra E2E project.\n Support various E2E testing requirements in SkyWalking main repository with other ecosystem repositories. Support both docker-compose and KinD to orchestrate the tested services under different environments. Be language-independent as much as possible, users only need to configure YAMLs and run commands, without writing code.  Non-Goal  This framework is not involved with the build process, i.e. it won’t do something like mvn package or docker build, the artifacts (.tar, docker images) should be ready in an earlier process before this; This project doesn’t take the plugin tests into account, at least for now;  ","title":"Design Goals","url":"/docs/skywalking-infra-e2e/latest/en/concepts-and-designs/project-goals/"},{"content":"Design Goals The document outlines the core design goals for the SkyWalking Infra E2E project.\n Support various E2E testing requirements in SkyWalking main repository with other ecosystem repositories. Support both docker-compose and KinD to orchestrate the tested services under different environments. Be language-independent as much as possible, users only need to configure YAMLs and run commands, without writing code.  Non-Goal  This framework is not involved with the build process, i.e. it won’t do something like mvn package or docker build, the artifacts (.tar, docker images) should be ready in an earlier process before this; This project doesn’t take the plugin tests into account, at least for now;  ","title":"Design Goals","url":"/docs/skywalking-infra-e2e/next/en/concepts-and-designs/project-goals/"},{"content":"Design Goals The document outlines the core design goals for the SkyWalking Infra E2E project.\n Support various E2E testing requirements in SkyWalking main repository with other ecosystem repositories. Support both docker-compose and KinD to orchestrate the tested services under different environments. Be language-independent as much as possible, users only need to configure YAMLs and run commands, without writing code.  Non-Goal  This framework is not involved with the build process, i.e. it won’t do something like mvn package or docker build, the artifacts (.tar, docker images) should be ready in an earlier process before this; This project doesn’t take the plugin tests into account, at least for now;  ","title":"Design Goals","url":"/docs/skywalking-infra-e2e/v1.3.0/en/concepts-and-designs/project-goals/"},{"content":"Design Goals The document outlines the core design goals for SkyWalking Satellite project.\n  Light Weight. SkyWalking Satellite has a limited cost for resources and high-performance because of the requirements of the sidecar deployment model.\n  Pluggability. SkyWalking Satellite core team provides many default implementations, but definitely it is not enough, and also don\u0026rsquo;t fit every scenario. So, we provide a lot of features for being pluggable.\n  Portability. SkyWalking Satellite can run in multiple environments, including:\n Use traditional deployment as a daemon process to collect data. Use cloud services as a sidecar, such as in the Kubernetes platform.    Interoperability. Observability is a big landscape, SkyWalking is impossible to support all, even by its community. So SkyWalking Satellite is compatible with many protocols, including:\n SkyWalking protocol (WIP) Prometheus protocol.    ","title":"Design Goals","url":"/docs/skywalking-satellite/latest/en/concepts-and-designs/project-goals/"},{"content":"Design Goals The document outlines the core design goals for SkyWalking Satellite project.\n  Light Weight. SkyWalking Satellite has a limited cost for resources and high-performance because of the requirements of the sidecar deployment model.\n  Pluggability. SkyWalking Satellite core team provides many default implementations, but definitely it is not enough, and also don\u0026rsquo;t fit every scenario. So, we provide a lot of features for being pluggable.\n  Portability. SkyWalking Satellite can run in multiple environments, including:\n Use traditional deployment as a daemon process to collect data. Use cloud services as a sidecar, such as in the Kubernetes platform.    Interoperability. Observability is a big landscape, SkyWalking is impossible to support all, even by its community. So SkyWalking Satellite is compatible with many protocols, including:\n SkyWalking protocol (WIP) Prometheus protocol.    ","title":"Design Goals","url":"/docs/skywalking-satellite/next/en/concepts-and-designs/project-goals/"},{"content":"Design Goals The document outlines the core design goals for SkyWalking Satellite project.\n  Light Weight. SkyWalking Satellite has a limited cost for resources and high-performance because of the requirements of the sidecar deployment model.\n  Pluggability. SkyWalking Satellite core team provides many default implementations, but definitely it is not enough, and also don\u0026rsquo;t fit every scenario. So, we provide a lot of features for being pluggable.\n  Portability. SkyWalking Satellite can run in multiple environments, including:\n Use traditional deployment as a daemon process to collect data. Use cloud services as a sidecar, such as in the Kubernetes platform.    Interoperability. Observability is a big landscape, SkyWalking is impossible to support all, even by its community. So SkyWalking Satellite is compatible with many protocols, including:\n SkyWalking protocol (WIP) Prometheus protocol.    ","title":"Design Goals","url":"/docs/skywalking-satellite/v1.2.0/en/concepts-and-designs/project-goals/"},{"content":"Diagnose Service Mesh Network Performance with eBPF Background This article will show how to use Apache SkyWalking with eBPF to make network troubleshooting easier in a service mesh environment.\nApache SkyWalking is an application performance monitor tool for distributed systems. It observes metrics, logs, traces, and events in the service mesh environment and uses that data to generate a dependency graph of your pods and services. This dependency graph can provide quick insights into your system, especially when there\u0026rsquo;s an issue.\nHowever, when troubleshooting network issues in SkyWalking\u0026rsquo;s service topology, it is not always easy to pinpoint where the error actually is. There are two reasons for the difficulty:\n Traffic through the Envoy sidecar is not easy to observe. Data from Envoy\u0026rsquo;s Access Log Service (ALS) shows traffic between services (sidecar-to-sidecar), but not metrics on communication between the Envoy sidecar and the service it proxies. Without that information, it is more difficult to understand the impact of the sidecar. There is a lack of data from transport layer (OSI Layer 4) communication. Since services generally use application layer (OSI Layer 7) protocols such as HTTP, observability data is generally restricted to application layer communication. However, the root cause may actually be in the transport layer, which is typically opaque to observability tools.  Access to metrics from Envoy-to-service and transport layer communication can make it easier to diagnose service issues. To this end, SkyWalking needs to collect and analyze transport layer metrics between processes inside Kubernetes pods - a task well suited to eBPF. We investigated using eBPF for this purpose and present our results and a demo below.\nMonitoring Kubernetes Networks with eBPF With its origins as the Extended Berkeley Packet Filter, eBPF is a general purpose mechanism for injecting and running your own code into the Linux kernel and is an excellent tool for monitoring network traffic in Kubernetes Pods. In the next few sections, we'll provide an overview of how to use eBPF for network monitoring as background for introducing Skywalking Rover, a metrics collector and profiler powered by eBPF to diagnose CPU and network performance.\nHow Applications and the Network Interact Interactions between the application and the network can generally be divided into the following steps from higher to lower levels of abstraction:\n User Code: Application code uses high-level network libraries in the application stack to exchange data across the network, like sending and receiving HTTP requests. Network Library: When the network library receives a network request, it interacts with the language API to send the network data. Language API: Each language provides an API for operating the network, system, etc. When a request is received, it interacts with the system API. In Linux, this API is called syscalls. Linux API: When the Linux kernel receives the request through the API, it communicates with the socket to send the data, which is usually closer to an OSI Layer 4 protocol, such as TCP, UDP, etc. Socket Ops: Sending or receiving the data to/from the NIC.  Our hypothesis is that eBPF can monitor the network. There are two ways to implement the interception: User space (uprobe) or Kernel space (kprobe). The table below summarizes the differences.\n    Pros Cons     uprobe •\tGet more application-related contexts, such as whether the current request is HTTP or HTTPS.•\tRequests and responses can be intercepted by a single method •\tData structures can be unstable, so it is more difficult to get the desired data.  •\tImplementation may differ between language/library versions.  •\tDoes not work in applications without symbol tables.   kprobe •\tAvailable for all languages.  •\tThe data structure and methods are stable and do not require much adaptation.  •\tEasier correlation with underlying data, such as getting the destination address of TCP, OSI Layer 4 protocol metrics, etc. •\tA single request and response may be split into multiple probes.  •\tContextual information is not easy to get for stateful requests. For example header compression in HTTP/2.    For the general network performance monitor, we chose to use the kprobe (intercept the syscalls) for the following reasons:\n It\u0026rsquo;s available for applications written in any programming language, and it\u0026rsquo;s stable, so it saves a lot of development/adaptation costs. It can be correlated with metrics from the system level, which makes it easier to troubleshoot. As a single request and response are split into multiple probes, we can use technology to correlate them. For contextual information, It\u0026rsquo;s usually used in OSI Layer 7 protocol network analysis. So, if we just monitor the network performance, then they can be ignored.  Kprobes and network monitoring Following the network syscalls of Linux documentation, we can implement network monitoring by intercepting two types of methods: socket operations and send/receive methods.\nSocket Operations When accepting or connecting with another socket, we can get the following information:\n Connection information: Includes the remote address from the connection which helps us to understand which pod is connected. Connection statics: Includes basic metrics from sockets, such as round-trip time (RTT), lost packet count in TCP, etc. Socket and file descriptor (FD) mapping: Includes the relationship between the Linux file descriptor and socket object. It is useful when sending and receiving data through a Linux file descriptor.  Send/Receive The interface related to sending or receiving data is the focus of performance analysis. It mainly contains the following parameters:\n Socket file descriptor: The file descriptor of the current operation corresponding to the socket. Buffer: The data sent or received, passed as a byte array.  Based on the above parameters, we can analyze the following data:\n Bytes: The size of the packet in bytes. Protocol: The protocol analysis according to the buffer data, such as HTTP, MySQL, etc. Execution Time: The time it takes to send/receive the data.  At this point (Figure 1) we can analyze the following steps for the whole lifecycle of the connection:\n Connect/Accept: When the connection is created. Transform: Sending and receiving data on the connection. Close: When the connection is closed.  Figure 1\nProtocol and TLS The previous section described how to analyze connections using send or receive buffer data. For example, following the HTTP/1.1 message specification to analyze the connection. However, this does not work for TLS requests/responses.\nFigure 2\nWhen TLS is in use, the Linux Kernel transmits data encrypted in user space. In the figure above, The application usually transmits SSL data through a third-party library (such as OpenSSL). For this case, the Linux API can only get the encrypted data, so it cannot recognize any higher layer protocol. To decrypt inside eBPF, we need to follow these steps:\n Read unencrypted data through uprobe: Compatible multiple languages, using uprobe to capture the data that is not encrypted before sending or after receiving. In this way, we can get the original data and associate it with the socket. Associate with socket: We can associate unencrypted data with the socket.  OpenSSL Use case For example, the most common way to send/receive SSL data is to use OpenSSL as a shared library, specifically the SSL_read and SSL_write methods to submit the buffer data with the socket.\nFollowing the documentation, we can intercept these two methods, which are almost identical to the API in Linux. The source code of the SSL structure in OpenSSL shows that the Socket FD exists in the BIO object of the SSL structure, and we can get it by the offset.\nIn summary, with knowledge of how OpenSSL works, we can read unencrypted data in an eBPF function.\nIntroducing SkyWalking Rover, an eBPF-based Metrics Collector and Profiler SkyWalking Rover introduces the eBPF network profiling feature into the SkyWalking ecosystem. It\u0026rsquo;s currently supported in a Kubernetes environment, so must be deployed inside a Kubernetes cluster. Once the deployment is complete, SkyWalking Rover can monitor the network for all processes inside a given Pod. Based on the monitoring data, SkyWalking can generate the topology relationship diagram and metrics between processes.\nTopology Diagram The topology diagram can help us understand the network access between processes inside the same Pod, and between the process and external environment (other Pod or service). Additionally, it can identify the data direction of traffic based on the line flow direction.\nIn Figure 3 below, all nodes within the hexagon are the internal process of a Pod, and nodes outside the hexagon are externally associated services or Pods. Nodes are connected by lines, which indicate the direction of requests or responses between nodes (client or server). The protocol is indicated on the line, and it\u0026rsquo;s either HTTP(S), TCP, or TCP(TLS). Also, we can see in this figure that the line between Envoy and Python applications is bidirectional because Envoy intercepts all application traffic.\nFigure 3\nMetrics Once we recognize the network call relationship between processes through the topology, we can select a specific line and view the TCP metrics between the two processes.\nThe diagram below (Figure 4) shows the metrics of network monitoring between two processes. There are four metrics in each line. Two on the left side are on the client side, and two on the right side are on the server side. If the remote process is not in the same Pod, only one side of the metrics is displayed.\nFigure 4\nThe following two metric types are available:\n Counter: Records the total number of data in a certain period. Each counter contains the following data: a. Count: Execution count. b. Bytes: Packet size in bytes. c. Execution time: Execution duration. Histogram: Records the distribution of data in the buckets.  Based on the above data types, the following metrics are exposed:\n   Name Type Unit Description     Write Counter and histogram Millisecond The socket write counter.   Read Counter and histogram Millisecond The socket read counter.   Write RTT Counter and histogram Microsecond The socket write round trip time (RTT) counter.   Connect Counter and histogram Millisecond The socket connect/accept with another server/client counter.   Close Counter and histogram Millisecond The socket with other socket counter.   Retransmit Counter Millisecond The socket retransmit package counter.   Drop Counter Millisecond The socket drop package counter.    Demo In this section, we demonstrate how to perform network profiling in the service mesh. To follow along, you will need a running Kubernetes environment.\nNOTE: All commands and scripts are available in this GitHub repository.\nInstall Istio Istio is the most widely deployed service mesh, and comes with a complete demo application that we can use for testing. To install Istio and the demo application, follow these steps:\n Install Istio using the demo configuration profile. Label the default namespace, so Istio automatically injects Envoy sidecar proxies when we\u0026rsquo;ll deploy the application. Deploy the bookinfo application to the cluster. Deploy the traffic generator to generate some traffic to the application.  export ISTIO_VERSION=1.13.1 # install istio istioctl install -y --set profile=demo kubectl label namespace default istio-injection=enabled # deploy the bookinfo applications kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/platform/kube/bookinfo.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/bookinfo-gateway.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/destination-rule-all.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/virtual-service-all-v1.yaml # generate traffic kubectl apply -f https://raw.githubusercontent.com/mrproliu/skywalking-network-profiling-demo/main/resources/traffic-generator.yaml Install SkyWalking The following will install the storage, backend, and UI needed for SkyWalking:\ngit clone https://github.com/apache/skywalking-helm.git cd skywalking-helm cd chart helm dep up skywalking helm -n istio-system install skywalking skywalking \\  --set fullnameOverride=skywalking \\  --set elasticsearch.minimumMasterNodes=1 \\  --set elasticsearch.imageTag=7.5.1 \\  --set oap.replicas=1 \\  --set ui.image.repository=apache/skywalking-ui \\  --set ui.image.tag=9.2.0 \\  --set oap.image.tag=9.2.0 \\  --set oap.envoy.als.enabled=true \\  --set oap.image.repository=apache/skywalking-oap-server \\  --set oap.storageType=elasticsearch \\  --set oap.env.SW_METER_ANALYZER_ACTIVE_FILES=\u0026#39;network-profiling\u0026#39; Install SkyWalking Rover SkyWalking Rover is deployed on every node in Kubernetes, and it automatically detects the services in the Kubernetes cluster. The network profiling feature has been released in the version 0.3.0 of SkyWalking Rover. When a network monitoring task is created, the SkyWalking rover sends the data to the SkyWalking backend.\nkubectl apply -f https://raw.githubusercontent.com/mrproliu/skywalking-network-profiling-demo/main/resources/skywalking-rover.yaml Start the Network Profiling Task Once all deployments are completed, we must create a network profiling task for a specific instance of the service in the SkyWalking UI.\nTo open SkyWalking UI, run:\nkubectl port-forward svc/skywalking-ui 8080:80 --namespace istio-system Currently, we can select the specific instances that we wish to monitor by clicking the Data Plane item in the Service Mesh panel and the Service item in the Kubernetes panel.\nIn the figure below, we have selected an instance with a list of tasks in the network profiling tab. When we click the start button, the SkyWalking Rover starts monitoring this instance\u0026rsquo;s network.\nFigure 5\nDone! After a few seconds, you will see the process topology appear on the right side of the page.\nFigure 6\nWhen you click on the line between processes, you can see the TCP metrics between the two processes.\nFigure 7\nConclusion In this article, we detailed a problem that makes troubleshooting service mesh architectures difficult: lack of context between layers in the network stack. These are the cases when eBPF begins to really help with debugging/productivity when existing service mesh/envoy cannot. Then, we researched how eBPF could be applied to common communication, such as TLS. Finally, we demo the implementation of this process with SkyWalking Rover.\nFor now, we have completed the performance analysis for OSI layer 4 (mostly TCP). In the future, we will also introduce the analysis for OSI layer 7 protocols like HTTP.\n","title":"Diagnose Service Mesh Network Performance with eBPF","url":"/docs/main/latest/en/academy/diagnose-service-mesh-network-performance-with-ebpf/"},{"content":"Diagnose Service Mesh Network Performance with eBPF Background This article will show how to use Apache SkyWalking with eBPF to make network troubleshooting easier in a service mesh environment.\nApache SkyWalking is an application performance monitor tool for distributed systems. It observes metrics, logs, traces, and events in the service mesh environment and uses that data to generate a dependency graph of your pods and services. This dependency graph can provide quick insights into your system, especially when there\u0026rsquo;s an issue.\nHowever, when troubleshooting network issues in SkyWalking\u0026rsquo;s service topology, it is not always easy to pinpoint where the error actually is. There are two reasons for the difficulty:\n Traffic through the Envoy sidecar is not easy to observe. Data from Envoy\u0026rsquo;s Access Log Service (ALS) shows traffic between services (sidecar-to-sidecar), but not metrics on communication between the Envoy sidecar and the service it proxies. Without that information, it is more difficult to understand the impact of the sidecar. There is a lack of data from transport layer (OSI Layer 4) communication. Since services generally use application layer (OSI Layer 7) protocols such as HTTP, observability data is generally restricted to application layer communication. However, the root cause may actually be in the transport layer, which is typically opaque to observability tools.  Access to metrics from Envoy-to-service and transport layer communication can make it easier to diagnose service issues. To this end, SkyWalking needs to collect and analyze transport layer metrics between processes inside Kubernetes pods - a task well suited to eBPF. We investigated using eBPF for this purpose and present our results and a demo below.\nMonitoring Kubernetes Networks with eBPF With its origins as the Extended Berkeley Packet Filter, eBPF is a general purpose mechanism for injecting and running your own code into the Linux kernel and is an excellent tool for monitoring network traffic in Kubernetes Pods. In the next few sections, we'll provide an overview of how to use eBPF for network monitoring as background for introducing Skywalking Rover, a metrics collector and profiler powered by eBPF to diagnose CPU and network performance.\nHow Applications and the Network Interact Interactions between the application and the network can generally be divided into the following steps from higher to lower levels of abstraction:\n User Code: Application code uses high-level network libraries in the application stack to exchange data across the network, like sending and receiving HTTP requests. Network Library: When the network library receives a network request, it interacts with the language API to send the network data. Language API: Each language provides an API for operating the network, system, etc. When a request is received, it interacts with the system API. In Linux, this API is called syscalls. Linux API: When the Linux kernel receives the request through the API, it communicates with the socket to send the data, which is usually closer to an OSI Layer 4 protocol, such as TCP, UDP, etc. Socket Ops: Sending or receiving the data to/from the NIC.  Our hypothesis is that eBPF can monitor the network. There are two ways to implement the interception: User space (uprobe) or Kernel space (kprobe). The table below summarizes the differences.\n    Pros Cons     uprobe •\tGet more application-related contexts, such as whether the current request is HTTP or HTTPS.•\tRequests and responses can be intercepted by a single method •\tData structures can be unstable, so it is more difficult to get the desired data.  •\tImplementation may differ between language/library versions.  •\tDoes not work in applications without symbol tables.   kprobe •\tAvailable for all languages.  •\tThe data structure and methods are stable and do not require much adaptation.  •\tEasier correlation with underlying data, such as getting the destination address of TCP, OSI Layer 4 protocol metrics, etc. •\tA single request and response may be split into multiple probes.  •\tContextual information is not easy to get for stateful requests. For example header compression in HTTP/2.    For the general network performance monitor, we chose to use the kprobe (intercept the syscalls) for the following reasons:\n It\u0026rsquo;s available for applications written in any programming language, and it\u0026rsquo;s stable, so it saves a lot of development/adaptation costs. It can be correlated with metrics from the system level, which makes it easier to troubleshoot. As a single request and response are split into multiple probes, we can use technology to correlate them. For contextual information, It\u0026rsquo;s usually used in OSI Layer 7 protocol network analysis. So, if we just monitor the network performance, then they can be ignored.  Kprobes and network monitoring Following the network syscalls of Linux documentation, we can implement network monitoring by intercepting two types of methods: socket operations and send/receive methods.\nSocket Operations When accepting or connecting with another socket, we can get the following information:\n Connection information: Includes the remote address from the connection which helps us to understand which pod is connected. Connection statics: Includes basic metrics from sockets, such as round-trip time (RTT), lost packet count in TCP, etc. Socket and file descriptor (FD) mapping: Includes the relationship between the Linux file descriptor and socket object. It is useful when sending and receiving data through a Linux file descriptor.  Send/Receive The interface related to sending or receiving data is the focus of performance analysis. It mainly contains the following parameters:\n Socket file descriptor: The file descriptor of the current operation corresponding to the socket. Buffer: The data sent or received, passed as a byte array.  Based on the above parameters, we can analyze the following data:\n Bytes: The size of the packet in bytes. Protocol: The protocol analysis according to the buffer data, such as HTTP, MySQL, etc. Execution Time: The time it takes to send/receive the data.  At this point (Figure 1) we can analyze the following steps for the whole lifecycle of the connection:\n Connect/Accept: When the connection is created. Transform: Sending and receiving data on the connection. Close: When the connection is closed.  Figure 1\nProtocol and TLS The previous section described how to analyze connections using send or receive buffer data. For example, following the HTTP/1.1 message specification to analyze the connection. However, this does not work for TLS requests/responses.\nFigure 2\nWhen TLS is in use, the Linux Kernel transmits data encrypted in user space. In the figure above, The application usually transmits SSL data through a third-party library (such as OpenSSL). For this case, the Linux API can only get the encrypted data, so it cannot recognize any higher layer protocol. To decrypt inside eBPF, we need to follow these steps:\n Read unencrypted data through uprobe: Compatible multiple languages, using uprobe to capture the data that is not encrypted before sending or after receiving. In this way, we can get the original data and associate it with the socket. Associate with socket: We can associate unencrypted data with the socket.  OpenSSL Use case For example, the most common way to send/receive SSL data is to use OpenSSL as a shared library, specifically the SSL_read and SSL_write methods to submit the buffer data with the socket.\nFollowing the documentation, we can intercept these two methods, which are almost identical to the API in Linux. The source code of the SSL structure in OpenSSL shows that the Socket FD exists in the BIO object of the SSL structure, and we can get it by the offset.\nIn summary, with knowledge of how OpenSSL works, we can read unencrypted data in an eBPF function.\nIntroducing SkyWalking Rover, an eBPF-based Metrics Collector and Profiler SkyWalking Rover introduces the eBPF network profiling feature into the SkyWalking ecosystem. It\u0026rsquo;s currently supported in a Kubernetes environment, so must be deployed inside a Kubernetes cluster. Once the deployment is complete, SkyWalking Rover can monitor the network for all processes inside a given Pod. Based on the monitoring data, SkyWalking can generate the topology relationship diagram and metrics between processes.\nTopology Diagram The topology diagram can help us understand the network access between processes inside the same Pod, and between the process and external environment (other Pod or service). Additionally, it can identify the data direction of traffic based on the line flow direction.\nIn Figure 3 below, all nodes within the hexagon are the internal process of a Pod, and nodes outside the hexagon are externally associated services or Pods. Nodes are connected by lines, which indicate the direction of requests or responses between nodes (client or server). The protocol is indicated on the line, and it\u0026rsquo;s either HTTP(S), TCP, or TCP(TLS). Also, we can see in this figure that the line between Envoy and Python applications is bidirectional because Envoy intercepts all application traffic.\nFigure 3\nMetrics Once we recognize the network call relationship between processes through the topology, we can select a specific line and view the TCP metrics between the two processes.\nThe diagram below (Figure 4) shows the metrics of network monitoring between two processes. There are four metrics in each line. Two on the left side are on the client side, and two on the right side are on the server side. If the remote process is not in the same Pod, only one side of the metrics is displayed.\nFigure 4\nThe following two metric types are available:\n Counter: Records the total number of data in a certain period. Each counter contains the following data: a. Count: Execution count. b. Bytes: Packet size in bytes. c. Execution time: Execution duration. Histogram: Records the distribution of data in the buckets.  Based on the above data types, the following metrics are exposed:\n   Name Type Unit Description     Write Counter and histogram Millisecond The socket write counter.   Read Counter and histogram Millisecond The socket read counter.   Write RTT Counter and histogram Microsecond The socket write round trip time (RTT) counter.   Connect Counter and histogram Millisecond The socket connect/accept with another server/client counter.   Close Counter and histogram Millisecond The socket with other socket counter.   Retransmit Counter Millisecond The socket retransmit package counter.   Drop Counter Millisecond The socket drop package counter.    Demo In this section, we demonstrate how to perform network profiling in the service mesh. To follow along, you will need a running Kubernetes environment.\nNOTE: All commands and scripts are available in this GitHub repository.\nInstall Istio Istio is the most widely deployed service mesh, and comes with a complete demo application that we can use for testing. To install Istio and the demo application, follow these steps:\n Install Istio using the demo configuration profile. Label the default namespace, so Istio automatically injects Envoy sidecar proxies when we\u0026rsquo;ll deploy the application. Deploy the bookinfo application to the cluster. Deploy the traffic generator to generate some traffic to the application.  export ISTIO_VERSION=1.13.1 # install istio istioctl install -y --set profile=demo kubectl label namespace default istio-injection=enabled # deploy the bookinfo applications kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/platform/kube/bookinfo.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/bookinfo-gateway.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/destination-rule-all.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/virtual-service-all-v1.yaml # generate traffic kubectl apply -f https://raw.githubusercontent.com/mrproliu/skywalking-network-profiling-demo/main/resources/traffic-generator.yaml Install SkyWalking The following will install the storage, backend, and UI needed for SkyWalking:\ngit clone https://github.com/apache/skywalking-helm.git cd skywalking-helm cd chart helm dep up skywalking helm -n istio-system install skywalking skywalking \\  --set fullnameOverride=skywalking \\  --set elasticsearch.minimumMasterNodes=1 \\  --set elasticsearch.imageTag=7.5.1 \\  --set oap.replicas=1 \\  --set ui.image.repository=apache/skywalking-ui \\  --set ui.image.tag=9.2.0 \\  --set oap.image.tag=9.2.0 \\  --set oap.envoy.als.enabled=true \\  --set oap.image.repository=apache/skywalking-oap-server \\  --set oap.storageType=elasticsearch \\  --set oap.env.SW_METER_ANALYZER_ACTIVE_FILES=\u0026#39;network-profiling\u0026#39; Install SkyWalking Rover SkyWalking Rover is deployed on every node in Kubernetes, and it automatically detects the services in the Kubernetes cluster. The network profiling feature has been released in the version 0.3.0 of SkyWalking Rover. When a network monitoring task is created, the SkyWalking rover sends the data to the SkyWalking backend.\nkubectl apply -f https://raw.githubusercontent.com/mrproliu/skywalking-network-profiling-demo/main/resources/skywalking-rover.yaml Start the Network Profiling Task Once all deployments are completed, we must create a network profiling task for a specific instance of the service in the SkyWalking UI.\nTo open SkyWalking UI, run:\nkubectl port-forward svc/skywalking-ui 8080:80 --namespace istio-system Currently, we can select the specific instances that we wish to monitor by clicking the Data Plane item in the Service Mesh panel and the Service item in the Kubernetes panel.\nIn the figure below, we have selected an instance with a list of tasks in the network profiling tab. When we click the start button, the SkyWalking Rover starts monitoring this instance\u0026rsquo;s network.\nFigure 5\nDone! After a few seconds, you will see the process topology appear on the right side of the page.\nFigure 6\nWhen you click on the line between processes, you can see the TCP metrics between the two processes.\nFigure 7\nConclusion In this article, we detailed a problem that makes troubleshooting service mesh architectures difficult: lack of context between layers in the network stack. These are the cases when eBPF begins to really help with debugging/productivity when existing service mesh/envoy cannot. Then, we researched how eBPF could be applied to common communication, such as TLS. Finally, we demo the implementation of this process with SkyWalking Rover.\nFor now, we have completed the performance analysis for OSI layer 4 (mostly TCP). In the future, we will also introduce the analysis for OSI layer 7 protocols like HTTP.\n","title":"Diagnose Service Mesh Network Performance with eBPF","url":"/docs/main/next/en/academy/diagnose-service-mesh-network-performance-with-ebpf/"},{"content":"Diagnose Service Mesh Network Performance with eBPF Background This article will show how to use Apache SkyWalking with eBPF to make network troubleshooting easier in a service mesh environment.\nApache SkyWalking is an application performance monitor tool for distributed systems. It observes metrics, logs, traces, and events in the service mesh environment and uses that data to generate a dependency graph of your pods and services. This dependency graph can provide quick insights into your system, especially when there\u0026rsquo;s an issue.\nHowever, when troubleshooting network issues in SkyWalking\u0026rsquo;s service topology, it is not always easy to pinpoint where the error actually is. There are two reasons for the difficulty:\n Traffic through the Envoy sidecar is not easy to observe. Data from Envoy\u0026rsquo;s Access Log Service (ALS) shows traffic between services (sidecar-to-sidecar), but not metrics on communication between the Envoy sidecar and the service it proxies. Without that information, it is more difficult to understand the impact of the sidecar. There is a lack of data from transport layer (OSI Layer 4) communication. Since services generally use application layer (OSI Layer 7) protocols such as HTTP, observability data is generally restricted to application layer communication. However, the root cause may actually be in the transport layer, which is typically opaque to observability tools.  Access to metrics from Envoy-to-service and transport layer communication can make it easier to diagnose service issues. To this end, SkyWalking needs to collect and analyze transport layer metrics between processes inside Kubernetes pods - a task well suited to eBPF. We investigated using eBPF for this purpose and present our results and a demo below.\nMonitoring Kubernetes Networks with eBPF With its origins as the Extended Berkeley Packet Filter, eBPF is a general purpose mechanism for injecting and running your own code into the Linux kernel and is an excellent tool for monitoring network traffic in Kubernetes Pods. In the next few sections, we'll provide an overview of how to use eBPF for network monitoring as background for introducing Skywalking Rover, a metrics collector and profiler powered by eBPF to diagnose CPU and network performance.\nHow Applications and the Network Interact Interactions between the application and the network can generally be divided into the following steps from higher to lower levels of abstraction:\n User Code: Application code uses high-level network libraries in the application stack to exchange data across the network, like sending and receiving HTTP requests. Network Library: When the network library receives a network request, it interacts with the language API to send the network data. Language API: Each language provides an API for operating the network, system, etc. When a request is received, it interacts with the system API. In Linux, this API is called syscalls. Linux API: When the Linux kernel receives the request through the API, it communicates with the socket to send the data, which is usually closer to an OSI Layer 4 protocol, such as TCP, UDP, etc. Socket Ops: Sending or receiving the data to/from the NIC.  Our hypothesis is that eBPF can monitor the network. There are two ways to implement the interception: User space (uprobe) or Kernel space (kprobe). The table below summarizes the differences.\n    Pros Cons     uprobe •\tGet more application-related contexts, such as whether the current request is HTTP or HTTPS.•\tRequests and responses can be intercepted by a single method •\tData structures can be unstable, so it is more difficult to get the desired data.  •\tImplementation may differ between language/library versions.  •\tDoes not work in applications without symbol tables.   kprobe •\tAvailable for all languages.  •\tThe data structure and methods are stable and do not require much adaptation.  •\tEasier correlation with underlying data, such as getting the destination address of TCP, OSI Layer 4 protocol metrics, etc. •\tA single request and response may be split into multiple probes.  •\tContextual information is not easy to get for stateful requests. For example header compression in HTTP/2.    For the general network performance monitor, we chose to use the kprobe (intercept the syscalls) for the following reasons:\n It\u0026rsquo;s available for applications written in any programming language, and it\u0026rsquo;s stable, so it saves a lot of development/adaptation costs. It can be correlated with metrics from the system level, which makes it easier to troubleshoot. As a single request and response are split into multiple probes, we can use technology to correlate them. For contextual information, It\u0026rsquo;s usually used in OSI Layer 7 protocol network analysis. So, if we just monitor the network performance, then they can be ignored.  Kprobes and network monitoring Following the network syscalls of Linux documentation, we can implement network monitoring by intercepting two types of methods: socket operations and send/receive methods.\nSocket Operations When accepting or connecting with another socket, we can get the following information:\n Connection information: Includes the remote address from the connection which helps us to understand which pod is connected. Connection statics: Includes basic metrics from sockets, such as round-trip time (RTT), lost packet count in TCP, etc. Socket and file descriptor (FD) mapping: Includes the relationship between the Linux file descriptor and socket object. It is useful when sending and receiving data through a Linux file descriptor.  Send/Receive The interface related to sending or receiving data is the focus of performance analysis. It mainly contains the following parameters:\n Socket file descriptor: The file descriptor of the current operation corresponding to the socket. Buffer: The data sent or received, passed as a byte array.  Based on the above parameters, we can analyze the following data:\n Bytes: The size of the packet in bytes. Protocol: The protocol analysis according to the buffer data, such as HTTP, MySQL, etc. Execution Time: The time it takes to send/receive the data.  At this point (Figure 1) we can analyze the following steps for the whole lifecycle of the connection:\n Connect/Accept: When the connection is created. Transform: Sending and receiving data on the connection. Close: When the connection is closed.  Figure 1\nProtocol and TLS The previous section described how to analyze connections using send or receive buffer data. For example, following the HTTP/1.1 message specification to analyze the connection. However, this does not work for TLS requests/responses.\nFigure 2\nWhen TLS is in use, the Linux Kernel transmits data encrypted in user space. In the figure above, The application usually transmits SSL data through a third-party library (such as OpenSSL). For this case, the Linux API can only get the encrypted data, so it cannot recognize any higher layer protocol. To decrypt inside eBPF, we need to follow these steps:\n Read unencrypted data through uprobe: Compatible multiple languages, using uprobe to capture the data that is not encrypted before sending or after receiving. In this way, we can get the original data and associate it with the socket. Associate with socket: We can associate unencrypted data with the socket.  OpenSSL Use case For example, the most common way to send/receive SSL data is to use OpenSSL as a shared library, specifically the SSL_read and SSL_write methods to submit the buffer data with the socket.\nFollowing the documentation, we can intercept these two methods, which are almost identical to the API in Linux. The source code of the SSL structure in OpenSSL shows that the Socket FD exists in the BIO object of the SSL structure, and we can get it by the offset.\nIn summary, with knowledge of how OpenSSL works, we can read unencrypted data in an eBPF function.\nIntroducing SkyWalking Rover, an eBPF-based Metrics Collector and Profiler SkyWalking Rover introduces the eBPF network profiling feature into the SkyWalking ecosystem. It\u0026rsquo;s currently supported in a Kubernetes environment, so must be deployed inside a Kubernetes cluster. Once the deployment is complete, SkyWalking Rover can monitor the network for all processes inside a given Pod. Based on the monitoring data, SkyWalking can generate the topology relationship diagram and metrics between processes.\nTopology Diagram The topology diagram can help us understand the network access between processes inside the same Pod, and between the process and external environment (other Pod or service). Additionally, it can identify the data direction of traffic based on the line flow direction.\nIn Figure 3 below, all nodes within the hexagon are the internal process of a Pod, and nodes outside the hexagon are externally associated services or Pods. Nodes are connected by lines, which indicate the direction of requests or responses between nodes (client or server). The protocol is indicated on the line, and it\u0026rsquo;s either HTTP(S), TCP, or TCP(TLS). Also, we can see in this figure that the line between Envoy and Python applications is bidirectional because Envoy intercepts all application traffic.\nFigure 3\nMetrics Once we recognize the network call relationship between processes through the topology, we can select a specific line and view the TCP metrics between the two processes.\nThe diagram below (Figure 4) shows the metrics of network monitoring between two processes. There are four metrics in each line. Two on the left side are on the client side, and two on the right side are on the server side. If the remote process is not in the same Pod, only one side of the metrics is displayed.\nFigure 4\nThe following two metric types are available:\n Counter: Records the total number of data in a certain period. Each counter contains the following data: a. Count: Execution count. b. Bytes: Packet size in bytes. c. Execution time: Execution duration. Histogram: Records the distribution of data in the buckets.  Based on the above data types, the following metrics are exposed:\n   Name Type Unit Description     Write Counter and histogram Millisecond The socket write counter.   Read Counter and histogram Millisecond The socket read counter.   Write RTT Counter and histogram Microsecond The socket write round trip time (RTT) counter.   Connect Counter and histogram Millisecond The socket connect/accept with another server/client counter.   Close Counter and histogram Millisecond The socket with other socket counter.   Retransmit Counter Millisecond The socket retransmit package counter.   Drop Counter Millisecond The socket drop package counter.    Demo In this section, we demonstrate how to perform network profiling in the service mesh. To follow along, you will need a running Kubernetes environment.\nNOTE: All commands and scripts are available in this GitHub repository.\nInstall Istio Istio is the most widely deployed service mesh, and comes with a complete demo application that we can use for testing. To install Istio and the demo application, follow these steps:\n Install Istio using the demo configuration profile. Label the default namespace, so Istio automatically injects Envoy sidecar proxies when we\u0026rsquo;ll deploy the application. Deploy the bookinfo application to the cluster. Deploy the traffic generator to generate some traffic to the application.  export ISTIO_VERSION=1.13.1 # install istio istioctl install -y --set profile=demo kubectl label namespace default istio-injection=enabled # deploy the bookinfo applications kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/platform/kube/bookinfo.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/bookinfo-gateway.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/destination-rule-all.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/virtual-service-all-v1.yaml # generate traffic kubectl apply -f https://raw.githubusercontent.com/mrproliu/skywalking-network-profiling-demo/main/resources/traffic-generator.yaml Install SkyWalking The following will install the storage, backend, and UI needed for SkyWalking:\ngit clone https://github.com/apache/skywalking-kubernetes.git cd skywalking-kubernetes cd chart helm dep up skywalking helm -n istio-system install skywalking skywalking \\  --set fullnameOverride=skywalking \\  --set elasticsearch.minimumMasterNodes=1 \\  --set elasticsearch.imageTag=7.5.1 \\  --set oap.replicas=1 \\  --set ui.image.repository=apache/skywalking-ui \\  --set ui.image.tag=9.2.0 \\  --set oap.image.tag=9.2.0 \\  --set oap.envoy.als.enabled=true \\  --set oap.image.repository=apache/skywalking-oap-server \\  --set oap.storageType=elasticsearch \\  --set oap.env.SW_METER_ANALYZER_ACTIVE_FILES=\u0026#39;network-profiling\u0026#39; Install SkyWalking Rover SkyWalking Rover is deployed on every node in Kubernetes, and it automatically detects the services in the Kubernetes cluster. The network profiling feature has been released in the version 0.3.0 of SkyWalking Rover. When a network monitoring task is created, the SkyWalking rover sends the data to the SkyWalking backend.\nkubectl apply -f https://raw.githubusercontent.com/mrproliu/skywalking-network-profiling-demo/main/resources/skywalking-rover.yaml Start the Network Profiling Task Once all deployments are completed, we must create a network profiling task for a specific instance of the service in the SkyWalking UI.\nTo open SkyWalking UI, run:\nkubectl port-forward svc/skywalking-ui 8080:80 --namespace istio-system Currently, we can select the specific instances that we wish to monitor by clicking the Data Plane item in the Service Mesh panel and the Service item in the Kubernetes panel.\nIn the figure below, we have selected an instance with a list of tasks in the network profiling tab. When we click the start button, the SkyWalking Rover starts monitoring this instance\u0026rsquo;s network.\nFigure 5\nDone! After a few seconds, you will see the process topology appear on the right side of the page.\nFigure 6\nWhen you click on the line between processes, you can see the TCP metrics between the two processes.\nFigure 7\nConclusion In this article, we detailed a problem that makes troubleshooting service mesh architectures difficult: lack of context between layers in the network stack. These are the cases when eBPF begins to really help with debugging/productivity when existing service mesh/envoy cannot. Then, we researched how eBPF could be applied to common communication, such as TLS. Finally, we demo the implementation of this process with SkyWalking Rover.\nFor now, we have completed the performance analysis for OSI layer 4 (mostly TCP). In the future, we will also introduce the analysis for OSI layer 7 protocols like HTTP.\n","title":"Diagnose Service Mesh Network Performance with eBPF","url":"/docs/main/v9.3.0/en/academy/diagnose-service-mesh-network-performance-with-ebpf/"},{"content":"Diagnose Service Mesh Network Performance with eBPF Background This article will show how to use Apache SkyWalking with eBPF to make network troubleshooting easier in a service mesh environment.\nApache SkyWalking is an application performance monitor tool for distributed systems. It observes metrics, logs, traces, and events in the service mesh environment and uses that data to generate a dependency graph of your pods and services. This dependency graph can provide quick insights into your system, especially when there\u0026rsquo;s an issue.\nHowever, when troubleshooting network issues in SkyWalking\u0026rsquo;s service topology, it is not always easy to pinpoint where the error actually is. There are two reasons for the difficulty:\n Traffic through the Envoy sidecar is not easy to observe. Data from Envoy\u0026rsquo;s Access Log Service (ALS) shows traffic between services (sidecar-to-sidecar), but not metrics on communication between the Envoy sidecar and the service it proxies. Without that information, it is more difficult to understand the impact of the sidecar. There is a lack of data from transport layer (OSI Layer 4) communication. Since services generally use application layer (OSI Layer 7) protocols such as HTTP, observability data is generally restricted to application layer communication. However, the root cause may actually be in the transport layer, which is typically opaque to observability tools.  Access to metrics from Envoy-to-service and transport layer communication can make it easier to diagnose service issues. To this end, SkyWalking needs to collect and analyze transport layer metrics between processes inside Kubernetes pods - a task well suited to eBPF. We investigated using eBPF for this purpose and present our results and a demo below.\nMonitoring Kubernetes Networks with eBPF With its origins as the Extended Berkeley Packet Filter, eBPF is a general purpose mechanism for injecting and running your own code into the Linux kernel and is an excellent tool for monitoring network traffic in Kubernetes Pods. In the next few sections, we'll provide an overview of how to use eBPF for network monitoring as background for introducing Skywalking Rover, a metrics collector and profiler powered by eBPF to diagnose CPU and network performance.\nHow Applications and the Network Interact Interactions between the application and the network can generally be divided into the following steps from higher to lower levels of abstraction:\n User Code: Application code uses high-level network libraries in the application stack to exchange data across the network, like sending and receiving HTTP requests. Network Library: When the network library receives a network request, it interacts with the language API to send the network data. Language API: Each language provides an API for operating the network, system, etc. When a request is received, it interacts with the system API. In Linux, this API is called syscalls. Linux API: When the Linux kernel receives the request through the API, it communicates with the socket to send the data, which is usually closer to an OSI Layer 4 protocol, such as TCP, UDP, etc. Socket Ops: Sending or receiving the data to/from the NIC.  Our hypothesis is that eBPF can monitor the network. There are two ways to implement the interception: User space (uprobe) or Kernel space (kprobe). The table below summarizes the differences.\n    Pros Cons     uprobe •\tGet more application-related contexts, such as whether the current request is HTTP or HTTPS.•\tRequests and responses can be intercepted by a single method •\tData structures can be unstable, so it is more difficult to get the desired data.  •\tImplementation may differ between language/library versions.  •\tDoes not work in applications without symbol tables.   kprobe •\tAvailable for all languages.  •\tThe data structure and methods are stable and do not require much adaptation.  •\tEasier correlation with underlying data, such as getting the destination address of TCP, OSI Layer 4 protocol metrics, etc. •\tA single request and response may be split into multiple probes.  •\tContextual information is not easy to get for stateful requests. For example header compression in HTTP/2.    For the general network performance monitor, we chose to use the kprobe (intercept the syscalls) for the following reasons:\n It\u0026rsquo;s available for applications written in any programming language, and it\u0026rsquo;s stable, so it saves a lot of development/adaptation costs. It can be correlated with metrics from the system level, which makes it easier to troubleshoot. As a single request and response are split into multiple probes, we can use technology to correlate them. For contextual information, It\u0026rsquo;s usually used in OSI Layer 7 protocol network analysis. So, if we just monitor the network performance, then they can be ignored.  Kprobes and network monitoring Following the network syscalls of Linux documentation, we can implement network monitoring by intercepting two types of methods: socket operations and send/receive methods.\nSocket Operations When accepting or connecting with another socket, we can get the following information:\n Connection information: Includes the remote address from the connection which helps us to understand which pod is connected. Connection statics: Includes basic metrics from sockets, such as round-trip time (RTT), lost packet count in TCP, etc. Socket and file descriptor (FD) mapping: Includes the relationship between the Linux file descriptor and socket object. It is useful when sending and receiving data through a Linux file descriptor.  Send/Receive The interface related to sending or receiving data is the focus of performance analysis. It mainly contains the following parameters:\n Socket file descriptor: The file descriptor of the current operation corresponding to the socket. Buffer: The data sent or received, passed as a byte array.  Based on the above parameters, we can analyze the following data:\n Bytes: The size of the packet in bytes. Protocol: The protocol analysis according to the buffer data, such as HTTP, MySQL, etc. Execution Time: The time it takes to send/receive the data.  At this point (Figure 1) we can analyze the following steps for the whole lifecycle of the connection:\n Connect/Accept: When the connection is created. Transform: Sending and receiving data on the connection. Close: When the connection is closed.  Figure 1\nProtocol and TLS The previous section described how to analyze connections using send or receive buffer data. For example, following the HTTP/1.1 message specification to analyze the connection. However, this does not work for TLS requests/responses.\nFigure 2\nWhen TLS is in use, the Linux Kernel transmits data encrypted in user space. In the figure above, The application usually transmits SSL data through a third-party library (such as OpenSSL). For this case, the Linux API can only get the encrypted data, so it cannot recognize any higher layer protocol. To decrypt inside eBPF, we need to follow these steps:\n Read unencrypted data through uprobe: Compatible multiple languages, using uprobe to capture the data that is not encrypted before sending or after receiving. In this way, we can get the original data and associate it with the socket. Associate with socket: We can associate unencrypted data with the socket.  OpenSSL Use case For example, the most common way to send/receive SSL data is to use OpenSSL as a shared library, specifically the SSL_read and SSL_write methods to submit the buffer data with the socket.\nFollowing the documentation, we can intercept these two methods, which are almost identical to the API in Linux. The source code of the SSL structure in OpenSSL shows that the Socket FD exists in the BIO object of the SSL structure, and we can get it by the offset.\nIn summary, with knowledge of how OpenSSL works, we can read unencrypted data in an eBPF function.\nIntroducing SkyWalking Rover, an eBPF-based Metrics Collector and Profiler SkyWalking Rover introduces the eBPF network profiling feature into the SkyWalking ecosystem. It\u0026rsquo;s currently supported in a Kubernetes environment, so must be deployed inside a Kubernetes cluster. Once the deployment is complete, SkyWalking Rover can monitor the network for all processes inside a given Pod. Based on the monitoring data, SkyWalking can generate the topology relationship diagram and metrics between processes.\nTopology Diagram The topology diagram can help us understand the network access between processes inside the same Pod, and between the process and external environment (other Pod or service). Additionally, it can identify the data direction of traffic based on the line flow direction.\nIn Figure 3 below, all nodes within the hexagon are the internal process of a Pod, and nodes outside the hexagon are externally associated services or Pods. Nodes are connected by lines, which indicate the direction of requests or responses between nodes (client or server). The protocol is indicated on the line, and it\u0026rsquo;s either HTTP(S), TCP, or TCP(TLS). Also, we can see in this figure that the line between Envoy and Python applications is bidirectional because Envoy intercepts all application traffic.\nFigure 3\nMetrics Once we recognize the network call relationship between processes through the topology, we can select a specific line and view the TCP metrics between the two processes.\nThe diagram below (Figure 4) shows the metrics of network monitoring between two processes. There are four metrics in each line. Two on the left side are on the client side, and two on the right side are on the server side. If the remote process is not in the same Pod, only one side of the metrics is displayed.\nFigure 4\nThe following two metric types are available:\n Counter: Records the total number of data in a certain period. Each counter contains the following data: a. Count: Execution count. b. Bytes: Packet size in bytes. c. Execution time: Execution duration. Histogram: Records the distribution of data in the buckets.  Based on the above data types, the following metrics are exposed:\n   Name Type Unit Description     Write Counter and histogram Millisecond The socket write counter.   Read Counter and histogram Millisecond The socket read counter.   Write RTT Counter and histogram Microsecond The socket write round trip time (RTT) counter.   Connect Counter and histogram Millisecond The socket connect/accept with another server/client counter.   Close Counter and histogram Millisecond The socket with other socket counter.   Retransmit Counter Millisecond The socket retransmit package counter.   Drop Counter Millisecond The socket drop package counter.    Demo In this section, we demonstrate how to perform network profiling in the service mesh. To follow along, you will need a running Kubernetes environment.\nNOTE: All commands and scripts are available in this GitHub repository.\nInstall Istio Istio is the most widely deployed service mesh, and comes with a complete demo application that we can use for testing. To install Istio and the demo application, follow these steps:\n Install Istio using the demo configuration profile. Label the default namespace, so Istio automatically injects Envoy sidecar proxies when we\u0026rsquo;ll deploy the application. Deploy the bookinfo application to the cluster. Deploy the traffic generator to generate some traffic to the application.  export ISTIO_VERSION=1.13.1 # install istio istioctl install -y --set profile=demo kubectl label namespace default istio-injection=enabled # deploy the bookinfo applications kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/platform/kube/bookinfo.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/bookinfo-gateway.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/destination-rule-all.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/virtual-service-all-v1.yaml # generate traffic kubectl apply -f https://raw.githubusercontent.com/mrproliu/skywalking-network-profiling-demo/main/resources/traffic-generator.yaml Install SkyWalking The following will install the storage, backend, and UI needed for SkyWalking:\ngit clone https://github.com/apache/skywalking-kubernetes.git cd skywalking-kubernetes cd chart helm dep up skywalking helm -n istio-system install skywalking skywalking \\  --set fullnameOverride=skywalking \\  --set elasticsearch.minimumMasterNodes=1 \\  --set elasticsearch.imageTag=7.5.1 \\  --set oap.replicas=1 \\  --set ui.image.repository=apache/skywalking-ui \\  --set ui.image.tag=9.2.0 \\  --set oap.image.tag=9.2.0 \\  --set oap.envoy.als.enabled=true \\  --set oap.image.repository=apache/skywalking-oap-server \\  --set oap.storageType=elasticsearch \\  --set oap.env.SW_METER_ANALYZER_ACTIVE_FILES=\u0026#39;network-profiling\u0026#39; Install SkyWalking Rover SkyWalking Rover is deployed on every node in Kubernetes, and it automatically detects the services in the Kubernetes cluster. The network profiling feature has been released in the version 0.3.0 of SkyWalking Rover. When a network monitoring task is created, the SkyWalking rover sends the data to the SkyWalking backend.\nkubectl apply -f https://raw.githubusercontent.com/mrproliu/skywalking-network-profiling-demo/main/resources/skywalking-rover.yaml Start the Network Profiling Task Once all deployments are completed, we must create a network profiling task for a specific instance of the service in the SkyWalking UI.\nTo open SkyWalking UI, run:\nkubectl port-forward svc/skywalking-ui 8080:80 --namespace istio-system Currently, we can select the specific instances that we wish to monitor by clicking the Data Plane item in the Service Mesh panel and the Service item in the Kubernetes panel.\nIn the figure below, we have selected an instance with a list of tasks in the network profiling tab. When we click the start button, the SkyWalking Rover starts monitoring this instance\u0026rsquo;s network.\nFigure 5\nDone! After a few seconds, you will see the process topology appear on the right side of the page.\nFigure 6\nWhen you click on the line between processes, you can see the TCP metrics between the two processes.\nFigure 7\nConclusion In this article, we detailed a problem that makes troubleshooting service mesh architectures difficult: lack of context between layers in the network stack. These are the cases when eBPF begins to really help with debugging/productivity when existing service mesh/envoy cannot. Then, we researched how eBPF could be applied to common communication, such as TLS. Finally, we demo the implementation of this process with SkyWalking Rover.\nFor now, we have completed the performance analysis for OSI layer 4 (mostly TCP). In the future, we will also introduce the analysis for OSI layer 7 protocols like HTTP.\n","title":"Diagnose Service Mesh Network Performance with eBPF","url":"/docs/main/v9.4.0/en/academy/diagnose-service-mesh-network-performance-with-ebpf/"},{"content":"Diagnose Service Mesh Network Performance with eBPF Background This article will show how to use Apache SkyWalking with eBPF to make network troubleshooting easier in a service mesh environment.\nApache SkyWalking is an application performance monitor tool for distributed systems. It observes metrics, logs, traces, and events in the service mesh environment and uses that data to generate a dependency graph of your pods and services. This dependency graph can provide quick insights into your system, especially when there\u0026rsquo;s an issue.\nHowever, when troubleshooting network issues in SkyWalking\u0026rsquo;s service topology, it is not always easy to pinpoint where the error actually is. There are two reasons for the difficulty:\n Traffic through the Envoy sidecar is not easy to observe. Data from Envoy\u0026rsquo;s Access Log Service (ALS) shows traffic between services (sidecar-to-sidecar), but not metrics on communication between the Envoy sidecar and the service it proxies. Without that information, it is more difficult to understand the impact of the sidecar. There is a lack of data from transport layer (OSI Layer 4) communication. Since services generally use application layer (OSI Layer 7) protocols such as HTTP, observability data is generally restricted to application layer communication. However, the root cause may actually be in the transport layer, which is typically opaque to observability tools.  Access to metrics from Envoy-to-service and transport layer communication can make it easier to diagnose service issues. To this end, SkyWalking needs to collect and analyze transport layer metrics between processes inside Kubernetes pods - a task well suited to eBPF. We investigated using eBPF for this purpose and present our results and a demo below.\nMonitoring Kubernetes Networks with eBPF With its origins as the Extended Berkeley Packet Filter, eBPF is a general purpose mechanism for injecting and running your own code into the Linux kernel and is an excellent tool for monitoring network traffic in Kubernetes Pods. In the next few sections, we'll provide an overview of how to use eBPF for network monitoring as background for introducing Skywalking Rover, a metrics collector and profiler powered by eBPF to diagnose CPU and network performance.\nHow Applications and the Network Interact Interactions between the application and the network can generally be divided into the following steps from higher to lower levels of abstraction:\n User Code: Application code uses high-level network libraries in the application stack to exchange data across the network, like sending and receiving HTTP requests. Network Library: When the network library receives a network request, it interacts with the language API to send the network data. Language API: Each language provides an API for operating the network, system, etc. When a request is received, it interacts with the system API. In Linux, this API is called syscalls. Linux API: When the Linux kernel receives the request through the API, it communicates with the socket to send the data, which is usually closer to an OSI Layer 4 protocol, such as TCP, UDP, etc. Socket Ops: Sending or receiving the data to/from the NIC.  Our hypothesis is that eBPF can monitor the network. There are two ways to implement the interception: User space (uprobe) or Kernel space (kprobe). The table below summarizes the differences.\n    Pros Cons     uprobe •\tGet more application-related contexts, such as whether the current request is HTTP or HTTPS.•\tRequests and responses can be intercepted by a single method •\tData structures can be unstable, so it is more difficult to get the desired data.  •\tImplementation may differ between language/library versions.  •\tDoes not work in applications without symbol tables.   kprobe •\tAvailable for all languages.  •\tThe data structure and methods are stable and do not require much adaptation.  •\tEasier correlation with underlying data, such as getting the destination address of TCP, OSI Layer 4 protocol metrics, etc. •\tA single request and response may be split into multiple probes.  •\tContextual information is not easy to get for stateful requests. For example header compression in HTTP/2.    For the general network performance monitor, we chose to use the kprobe (intercept the syscalls) for the following reasons:\n It\u0026rsquo;s available for applications written in any programming language, and it\u0026rsquo;s stable, so it saves a lot of development/adaptation costs. It can be correlated with metrics from the system level, which makes it easier to troubleshoot. As a single request and response are split into multiple probes, we can use technology to correlate them. For contextual information, It\u0026rsquo;s usually used in OSI Layer 7 protocol network analysis. So, if we just monitor the network performance, then they can be ignored.  Kprobes and network monitoring Following the network syscalls of Linux documentation, we can implement network monitoring by intercepting two types of methods: socket operations and send/receive methods.\nSocket Operations When accepting or connecting with another socket, we can get the following information:\n Connection information: Includes the remote address from the connection which helps us to understand which pod is connected. Connection statics: Includes basic metrics from sockets, such as round-trip time (RTT), lost packet count in TCP, etc. Socket and file descriptor (FD) mapping: Includes the relationship between the Linux file descriptor and socket object. It is useful when sending and receiving data through a Linux file descriptor.  Send/Receive The interface related to sending or receiving data is the focus of performance analysis. It mainly contains the following parameters:\n Socket file descriptor: The file descriptor of the current operation corresponding to the socket. Buffer: The data sent or received, passed as a byte array.  Based on the above parameters, we can analyze the following data:\n Bytes: The size of the packet in bytes. Protocol: The protocol analysis according to the buffer data, such as HTTP, MySQL, etc. Execution Time: The time it takes to send/receive the data.  At this point (Figure 1) we can analyze the following steps for the whole lifecycle of the connection:\n Connect/Accept: When the connection is created. Transform: Sending and receiving data on the connection. Close: When the connection is closed.  Figure 1\nProtocol and TLS The previous section described how to analyze connections using send or receive buffer data. For example, following the HTTP/1.1 message specification to analyze the connection. However, this does not work for TLS requests/responses.\nFigure 2\nWhen TLS is in use, the Linux Kernel transmits data encrypted in user space. In the figure above, The application usually transmits SSL data through a third-party library (such as OpenSSL). For this case, the Linux API can only get the encrypted data, so it cannot recognize any higher layer protocol. To decrypt inside eBPF, we need to follow these steps:\n Read unencrypted data through uprobe: Compatible multiple languages, using uprobe to capture the data that is not encrypted before sending or after receiving. In this way, we can get the original data and associate it with the socket. Associate with socket: We can associate unencrypted data with the socket.  OpenSSL Use case For example, the most common way to send/receive SSL data is to use OpenSSL as a shared library, specifically the SSL_read and SSL_write methods to submit the buffer data with the socket.\nFollowing the documentation, we can intercept these two methods, which are almost identical to the API in Linux. The source code of the SSL structure in OpenSSL shows that the Socket FD exists in the BIO object of the SSL structure, and we can get it by the offset.\nIn summary, with knowledge of how OpenSSL works, we can read unencrypted data in an eBPF function.\nIntroducing SkyWalking Rover, an eBPF-based Metrics Collector and Profiler SkyWalking Rover introduces the eBPF network profiling feature into the SkyWalking ecosystem. It\u0026rsquo;s currently supported in a Kubernetes environment, so must be deployed inside a Kubernetes cluster. Once the deployment is complete, SkyWalking Rover can monitor the network for all processes inside a given Pod. Based on the monitoring data, SkyWalking can generate the topology relationship diagram and metrics between processes.\nTopology Diagram The topology diagram can help us understand the network access between processes inside the same Pod, and between the process and external environment (other Pod or service). Additionally, it can identify the data direction of traffic based on the line flow direction.\nIn Figure 3 below, all nodes within the hexagon are the internal process of a Pod, and nodes outside the hexagon are externally associated services or Pods. Nodes are connected by lines, which indicate the direction of requests or responses between nodes (client or server). The protocol is indicated on the line, and it\u0026rsquo;s either HTTP(S), TCP, or TCP(TLS). Also, we can see in this figure that the line between Envoy and Python applications is bidirectional because Envoy intercepts all application traffic.\nFigure 3\nMetrics Once we recognize the network call relationship between processes through the topology, we can select a specific line and view the TCP metrics between the two processes.\nThe diagram below (Figure 4) shows the metrics of network monitoring between two processes. There are four metrics in each line. Two on the left side are on the client side, and two on the right side are on the server side. If the remote process is not in the same Pod, only one side of the metrics is displayed.\nFigure 4\nThe following two metric types are available:\n Counter: Records the total number of data in a certain period. Each counter contains the following data: a. Count: Execution count. b. Bytes: Packet size in bytes. c. Execution time: Execution duration. Histogram: Records the distribution of data in the buckets.  Based on the above data types, the following metrics are exposed:\n   Name Type Unit Description     Write Counter and histogram Millisecond The socket write counter.   Read Counter and histogram Millisecond The socket read counter.   Write RTT Counter and histogram Microsecond The socket write round trip time (RTT) counter.   Connect Counter and histogram Millisecond The socket connect/accept with another server/client counter.   Close Counter and histogram Millisecond The socket with other socket counter.   Retransmit Counter Millisecond The socket retransmit package counter.   Drop Counter Millisecond The socket drop package counter.    Demo In this section, we demonstrate how to perform network profiling in the service mesh. To follow along, you will need a running Kubernetes environment.\nNOTE: All commands and scripts are available in this GitHub repository.\nInstall Istio Istio is the most widely deployed service mesh, and comes with a complete demo application that we can use for testing. To install Istio and the demo application, follow these steps:\n Install Istio using the demo configuration profile. Label the default namespace, so Istio automatically injects Envoy sidecar proxies when we\u0026rsquo;ll deploy the application. Deploy the bookinfo application to the cluster. Deploy the traffic generator to generate some traffic to the application.  export ISTIO_VERSION=1.13.1 # install istio istioctl install -y --set profile=demo kubectl label namespace default istio-injection=enabled # deploy the bookinfo applications kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/platform/kube/bookinfo.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/bookinfo-gateway.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/destination-rule-all.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/virtual-service-all-v1.yaml # generate traffic kubectl apply -f https://raw.githubusercontent.com/mrproliu/skywalking-network-profiling-demo/main/resources/traffic-generator.yaml Install SkyWalking The following will install the storage, backend, and UI needed for SkyWalking:\ngit clone https://github.com/apache/skywalking-kubernetes.git cd skywalking-kubernetes cd chart helm dep up skywalking helm -n istio-system install skywalking skywalking \\  --set fullnameOverride=skywalking \\  --set elasticsearch.minimumMasterNodes=1 \\  --set elasticsearch.imageTag=7.5.1 \\  --set oap.replicas=1 \\  --set ui.image.repository=apache/skywalking-ui \\  --set ui.image.tag=9.2.0 \\  --set oap.image.tag=9.2.0 \\  --set oap.envoy.als.enabled=true \\  --set oap.image.repository=apache/skywalking-oap-server \\  --set oap.storageType=elasticsearch \\  --set oap.env.SW_METER_ANALYZER_ACTIVE_FILES=\u0026#39;network-profiling\u0026#39; Install SkyWalking Rover SkyWalking Rover is deployed on every node in Kubernetes, and it automatically detects the services in the Kubernetes cluster. The network profiling feature has been released in the version 0.3.0 of SkyWalking Rover. When a network monitoring task is created, the SkyWalking rover sends the data to the SkyWalking backend.\nkubectl apply -f https://raw.githubusercontent.com/mrproliu/skywalking-network-profiling-demo/main/resources/skywalking-rover.yaml Start the Network Profiling Task Once all deployments are completed, we must create a network profiling task for a specific instance of the service in the SkyWalking UI.\nTo open SkyWalking UI, run:\nkubectl port-forward svc/skywalking-ui 8080:80 --namespace istio-system Currently, we can select the specific instances that we wish to monitor by clicking the Data Plane item in the Service Mesh panel and the Service item in the Kubernetes panel.\nIn the figure below, we have selected an instance with a list of tasks in the network profiling tab. When we click the start button, the SkyWalking Rover starts monitoring this instance\u0026rsquo;s network.\nFigure 5\nDone! After a few seconds, you will see the process topology appear on the right side of the page.\nFigure 6\nWhen you click on the line between processes, you can see the TCP metrics between the two processes.\nFigure 7\nConclusion In this article, we detailed a problem that makes troubleshooting service mesh architectures difficult: lack of context between layers in the network stack. These are the cases when eBPF begins to really help with debugging/productivity when existing service mesh/envoy cannot. Then, we researched how eBPF could be applied to common communication, such as TLS. Finally, we demo the implementation of this process with SkyWalking Rover.\nFor now, we have completed the performance analysis for OSI layer 4 (mostly TCP). In the future, we will also introduce the analysis for OSI layer 7 protocols like HTTP.\n","title":"Diagnose Service Mesh Network Performance with eBPF","url":"/docs/main/v9.5.0/en/academy/diagnose-service-mesh-network-performance-with-ebpf/"},{"content":"Diagnose Service Mesh Network Performance with eBPF Background This article will show how to use Apache SkyWalking with eBPF to make network troubleshooting easier in a service mesh environment.\nApache SkyWalking is an application performance monitor tool for distributed systems. It observes metrics, logs, traces, and events in the service mesh environment and uses that data to generate a dependency graph of your pods and services. This dependency graph can provide quick insights into your system, especially when there\u0026rsquo;s an issue.\nHowever, when troubleshooting network issues in SkyWalking\u0026rsquo;s service topology, it is not always easy to pinpoint where the error actually is. There are two reasons for the difficulty:\n Traffic through the Envoy sidecar is not easy to observe. Data from Envoy\u0026rsquo;s Access Log Service (ALS) shows traffic between services (sidecar-to-sidecar), but not metrics on communication between the Envoy sidecar and the service it proxies. Without that information, it is more difficult to understand the impact of the sidecar. There is a lack of data from transport layer (OSI Layer 4) communication. Since services generally use application layer (OSI Layer 7) protocols such as HTTP, observability data is generally restricted to application layer communication. However, the root cause may actually be in the transport layer, which is typically opaque to observability tools.  Access to metrics from Envoy-to-service and transport layer communication can make it easier to diagnose service issues. To this end, SkyWalking needs to collect and analyze transport layer metrics between processes inside Kubernetes pods - a task well suited to eBPF. We investigated using eBPF for this purpose and present our results and a demo below.\nMonitoring Kubernetes Networks with eBPF With its origins as the Extended Berkeley Packet Filter, eBPF is a general purpose mechanism for injecting and running your own code into the Linux kernel and is an excellent tool for monitoring network traffic in Kubernetes Pods. In the next few sections, we'll provide an overview of how to use eBPF for network monitoring as background for introducing Skywalking Rover, a metrics collector and profiler powered by eBPF to diagnose CPU and network performance.\nHow Applications and the Network Interact Interactions between the application and the network can generally be divided into the following steps from higher to lower levels of abstraction:\n User Code: Application code uses high-level network libraries in the application stack to exchange data across the network, like sending and receiving HTTP requests. Network Library: When the network library receives a network request, it interacts with the language API to send the network data. Language API: Each language provides an API for operating the network, system, etc. When a request is received, it interacts with the system API. In Linux, this API is called syscalls. Linux API: When the Linux kernel receives the request through the API, it communicates with the socket to send the data, which is usually closer to an OSI Layer 4 protocol, such as TCP, UDP, etc. Socket Ops: Sending or receiving the data to/from the NIC.  Our hypothesis is that eBPF can monitor the network. There are two ways to implement the interception: User space (uprobe) or Kernel space (kprobe). The table below summarizes the differences.\n    Pros Cons     uprobe •\tGet more application-related contexts, such as whether the current request is HTTP or HTTPS.•\tRequests and responses can be intercepted by a single method •\tData structures can be unstable, so it is more difficult to get the desired data.  •\tImplementation may differ between language/library versions.  •\tDoes not work in applications without symbol tables.   kprobe •\tAvailable for all languages.  •\tThe data structure and methods are stable and do not require much adaptation.  •\tEasier correlation with underlying data, such as getting the destination address of TCP, OSI Layer 4 protocol metrics, etc. •\tA single request and response may be split into multiple probes.  •\tContextual information is not easy to get for stateful requests. For example header compression in HTTP/2.    For the general network performance monitor, we chose to use the kprobe (intercept the syscalls) for the following reasons:\n It\u0026rsquo;s available for applications written in any programming language, and it\u0026rsquo;s stable, so it saves a lot of development/adaptation costs. It can be correlated with metrics from the system level, which makes it easier to troubleshoot. As a single request and response are split into multiple probes, we can use technology to correlate them. For contextual information, It\u0026rsquo;s usually used in OSI Layer 7 protocol network analysis. So, if we just monitor the network performance, then they can be ignored.  Kprobes and network monitoring Following the network syscalls of Linux documentation, we can implement network monitoring by intercepting two types of methods: socket operations and send/receive methods.\nSocket Operations When accepting or connecting with another socket, we can get the following information:\n Connection information: Includes the remote address from the connection which helps us to understand which pod is connected. Connection statics: Includes basic metrics from sockets, such as round-trip time (RTT), lost packet count in TCP, etc. Socket and file descriptor (FD) mapping: Includes the relationship between the Linux file descriptor and socket object. It is useful when sending and receiving data through a Linux file descriptor.  Send/Receive The interface related to sending or receiving data is the focus of performance analysis. It mainly contains the following parameters:\n Socket file descriptor: The file descriptor of the current operation corresponding to the socket. Buffer: The data sent or received, passed as a byte array.  Based on the above parameters, we can analyze the following data:\n Bytes: The size of the packet in bytes. Protocol: The protocol analysis according to the buffer data, such as HTTP, MySQL, etc. Execution Time: The time it takes to send/receive the data.  At this point (Figure 1) we can analyze the following steps for the whole lifecycle of the connection:\n Connect/Accept: When the connection is created. Transform: Sending and receiving data on the connection. Close: When the connection is closed.  Figure 1\nProtocol and TLS The previous section described how to analyze connections using send or receive buffer data. For example, following the HTTP/1.1 message specification to analyze the connection. However, this does not work for TLS requests/responses.\nFigure 2\nWhen TLS is in use, the Linux Kernel transmits data encrypted in user space. In the figure above, The application usually transmits SSL data through a third-party library (such as OpenSSL). For this case, the Linux API can only get the encrypted data, so it cannot recognize any higher layer protocol. To decrypt inside eBPF, we need to follow these steps:\n Read unencrypted data through uprobe: Compatible multiple languages, using uprobe to capture the data that is not encrypted before sending or after receiving. In this way, we can get the original data and associate it with the socket. Associate with socket: We can associate unencrypted data with the socket.  OpenSSL Use case For example, the most common way to send/receive SSL data is to use OpenSSL as a shared library, specifically the SSL_read and SSL_write methods to submit the buffer data with the socket.\nFollowing the documentation, we can intercept these two methods, which are almost identical to the API in Linux. The source code of the SSL structure in OpenSSL shows that the Socket FD exists in the BIO object of the SSL structure, and we can get it by the offset.\nIn summary, with knowledge of how OpenSSL works, we can read unencrypted data in an eBPF function.\nIntroducing SkyWalking Rover, an eBPF-based Metrics Collector and Profiler SkyWalking Rover introduces the eBPF network profiling feature into the SkyWalking ecosystem. It\u0026rsquo;s currently supported in a Kubernetes environment, so must be deployed inside a Kubernetes cluster. Once the deployment is complete, SkyWalking Rover can monitor the network for all processes inside a given Pod. Based on the monitoring data, SkyWalking can generate the topology relationship diagram and metrics between processes.\nTopology Diagram The topology diagram can help us understand the network access between processes inside the same Pod, and between the process and external environment (other Pod or service). Additionally, it can identify the data direction of traffic based on the line flow direction.\nIn Figure 3 below, all nodes within the hexagon are the internal process of a Pod, and nodes outside the hexagon are externally associated services or Pods. Nodes are connected by lines, which indicate the direction of requests or responses between nodes (client or server). The protocol is indicated on the line, and it\u0026rsquo;s either HTTP(S), TCP, or TCP(TLS). Also, we can see in this figure that the line between Envoy and Python applications is bidirectional because Envoy intercepts all application traffic.\nFigure 3\nMetrics Once we recognize the network call relationship between processes through the topology, we can select a specific line and view the TCP metrics between the two processes.\nThe diagram below (Figure 4) shows the metrics of network monitoring between two processes. There are four metrics in each line. Two on the left side are on the client side, and two on the right side are on the server side. If the remote process is not in the same Pod, only one side of the metrics is displayed.\nFigure 4\nThe following two metric types are available:\n Counter: Records the total number of data in a certain period. Each counter contains the following data: a. Count: Execution count. b. Bytes: Packet size in bytes. c. Execution time: Execution duration. Histogram: Records the distribution of data in the buckets.  Based on the above data types, the following metrics are exposed:\n   Name Type Unit Description     Write Counter and histogram Millisecond The socket write counter.   Read Counter and histogram Millisecond The socket read counter.   Write RTT Counter and histogram Microsecond The socket write round trip time (RTT) counter.   Connect Counter and histogram Millisecond The socket connect/accept with another server/client counter.   Close Counter and histogram Millisecond The socket with other socket counter.   Retransmit Counter Millisecond The socket retransmit package counter.   Drop Counter Millisecond The socket drop package counter.    Demo In this section, we demonstrate how to perform network profiling in the service mesh. To follow along, you will need a running Kubernetes environment.\nNOTE: All commands and scripts are available in this GitHub repository.\nInstall Istio Istio is the most widely deployed service mesh, and comes with a complete demo application that we can use for testing. To install Istio and the demo application, follow these steps:\n Install Istio using the demo configuration profile. Label the default namespace, so Istio automatically injects Envoy sidecar proxies when we\u0026rsquo;ll deploy the application. Deploy the bookinfo application to the cluster. Deploy the traffic generator to generate some traffic to the application.  export ISTIO_VERSION=1.13.1 # install istio istioctl install -y --set profile=demo kubectl label namespace default istio-injection=enabled # deploy the bookinfo applications kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/platform/kube/bookinfo.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/bookinfo-gateway.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/destination-rule-all.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/virtual-service-all-v1.yaml # generate traffic kubectl apply -f https://raw.githubusercontent.com/mrproliu/skywalking-network-profiling-demo/main/resources/traffic-generator.yaml Install SkyWalking The following will install the storage, backend, and UI needed for SkyWalking:\ngit clone https://github.com/apache/skywalking-kubernetes.git cd skywalking-kubernetes cd chart helm dep up skywalking helm -n istio-system install skywalking skywalking \\  --set fullnameOverride=skywalking \\  --set elasticsearch.minimumMasterNodes=1 \\  --set elasticsearch.imageTag=7.5.1 \\  --set oap.replicas=1 \\  --set ui.image.repository=apache/skywalking-ui \\  --set ui.image.tag=9.2.0 \\  --set oap.image.tag=9.2.0 \\  --set oap.envoy.als.enabled=true \\  --set oap.image.repository=apache/skywalking-oap-server \\  --set oap.storageType=elasticsearch \\  --set oap.env.SW_METER_ANALYZER_ACTIVE_FILES=\u0026#39;network-profiling\u0026#39; Install SkyWalking Rover SkyWalking Rover is deployed on every node in Kubernetes, and it automatically detects the services in the Kubernetes cluster. The network profiling feature has been released in the version 0.3.0 of SkyWalking Rover. When a network monitoring task is created, the SkyWalking rover sends the data to the SkyWalking backend.\nkubectl apply -f https://raw.githubusercontent.com/mrproliu/skywalking-network-profiling-demo/main/resources/skywalking-rover.yaml Start the Network Profiling Task Once all deployments are completed, we must create a network profiling task for a specific instance of the service in the SkyWalking UI.\nTo open SkyWalking UI, run:\nkubectl port-forward svc/skywalking-ui 8080:80 --namespace istio-system Currently, we can select the specific instances that we wish to monitor by clicking the Data Plane item in the Service Mesh panel and the Service item in the Kubernetes panel.\nIn the figure below, we have selected an instance with a list of tasks in the network profiling tab. When we click the start button, the SkyWalking Rover starts monitoring this instance\u0026rsquo;s network.\nFigure 5\nDone! After a few seconds, you will see the process topology appear on the right side of the page.\nFigure 6\nWhen you click on the line between processes, you can see the TCP metrics between the two processes.\nFigure 7\nConclusion In this article, we detailed a problem that makes troubleshooting service mesh architectures difficult: lack of context between layers in the network stack. These are the cases when eBPF begins to really help with debugging/productivity when existing service mesh/envoy cannot. Then, we researched how eBPF could be applied to common communication, such as TLS. Finally, we demo the implementation of this process with SkyWalking Rover.\nFor now, we have completed the performance analysis for OSI layer 4 (mostly TCP). In the future, we will also introduce the analysis for OSI layer 7 protocols like HTTP.\n","title":"Diagnose Service Mesh Network Performance with eBPF","url":"/docs/main/v9.6.0/en/academy/diagnose-service-mesh-network-performance-with-ebpf/"},{"content":"Diagnose Service Mesh Network Performance with eBPF Background This article will show how to use Apache SkyWalking with eBPF to make network troubleshooting easier in a service mesh environment.\nApache SkyWalking is an application performance monitor tool for distributed systems. It observes metrics, logs, traces, and events in the service mesh environment and uses that data to generate a dependency graph of your pods and services. This dependency graph can provide quick insights into your system, especially when there\u0026rsquo;s an issue.\nHowever, when troubleshooting network issues in SkyWalking\u0026rsquo;s service topology, it is not always easy to pinpoint where the error actually is. There are two reasons for the difficulty:\n Traffic through the Envoy sidecar is not easy to observe. Data from Envoy\u0026rsquo;s Access Log Service (ALS) shows traffic between services (sidecar-to-sidecar), but not metrics on communication between the Envoy sidecar and the service it proxies. Without that information, it is more difficult to understand the impact of the sidecar. There is a lack of data from transport layer (OSI Layer 4) communication. Since services generally use application layer (OSI Layer 7) protocols such as HTTP, observability data is generally restricted to application layer communication. However, the root cause may actually be in the transport layer, which is typically opaque to observability tools.  Access to metrics from Envoy-to-service and transport layer communication can make it easier to diagnose service issues. To this end, SkyWalking needs to collect and analyze transport layer metrics between processes inside Kubernetes pods - a task well suited to eBPF. We investigated using eBPF for this purpose and present our results and a demo below.\nMonitoring Kubernetes Networks with eBPF With its origins as the Extended Berkeley Packet Filter, eBPF is a general purpose mechanism for injecting and running your own code into the Linux kernel and is an excellent tool for monitoring network traffic in Kubernetes Pods. In the next few sections, we'll provide an overview of how to use eBPF for network monitoring as background for introducing Skywalking Rover, a metrics collector and profiler powered by eBPF to diagnose CPU and network performance.\nHow Applications and the Network Interact Interactions between the application and the network can generally be divided into the following steps from higher to lower levels of abstraction:\n User Code: Application code uses high-level network libraries in the application stack to exchange data across the network, like sending and receiving HTTP requests. Network Library: When the network library receives a network request, it interacts with the language API to send the network data. Language API: Each language provides an API for operating the network, system, etc. When a request is received, it interacts with the system API. In Linux, this API is called syscalls. Linux API: When the Linux kernel receives the request through the API, it communicates with the socket to send the data, which is usually closer to an OSI Layer 4 protocol, such as TCP, UDP, etc. Socket Ops: Sending or receiving the data to/from the NIC.  Our hypothesis is that eBPF can monitor the network. There are two ways to implement the interception: User space (uprobe) or Kernel space (kprobe). The table below summarizes the differences.\n    Pros Cons     uprobe •\tGet more application-related contexts, such as whether the current request is HTTP or HTTPS.•\tRequests and responses can be intercepted by a single method •\tData structures can be unstable, so it is more difficult to get the desired data.  •\tImplementation may differ between language/library versions.  •\tDoes not work in applications without symbol tables.   kprobe •\tAvailable for all languages.  •\tThe data structure and methods are stable and do not require much adaptation.  •\tEasier correlation with underlying data, such as getting the destination address of TCP, OSI Layer 4 protocol metrics, etc. •\tA single request and response may be split into multiple probes.  •\tContextual information is not easy to get for stateful requests. For example header compression in HTTP/2.    For the general network performance monitor, we chose to use the kprobe (intercept the syscalls) for the following reasons:\n It\u0026rsquo;s available for applications written in any programming language, and it\u0026rsquo;s stable, so it saves a lot of development/adaptation costs. It can be correlated with metrics from the system level, which makes it easier to troubleshoot. As a single request and response are split into multiple probes, we can use technology to correlate them. For contextual information, It\u0026rsquo;s usually used in OSI Layer 7 protocol network analysis. So, if we just monitor the network performance, then they can be ignored.  Kprobes and network monitoring Following the network syscalls of Linux documentation, we can implement network monitoring by intercepting two types of methods: socket operations and send/receive methods.\nSocket Operations When accepting or connecting with another socket, we can get the following information:\n Connection information: Includes the remote address from the connection which helps us to understand which pod is connected. Connection statics: Includes basic metrics from sockets, such as round-trip time (RTT), lost packet count in TCP, etc. Socket and file descriptor (FD) mapping: Includes the relationship between the Linux file descriptor and socket object. It is useful when sending and receiving data through a Linux file descriptor.  Send/Receive The interface related to sending or receiving data is the focus of performance analysis. It mainly contains the following parameters:\n Socket file descriptor: The file descriptor of the current operation corresponding to the socket. Buffer: The data sent or received, passed as a byte array.  Based on the above parameters, we can analyze the following data:\n Bytes: The size of the packet in bytes. Protocol: The protocol analysis according to the buffer data, such as HTTP, MySQL, etc. Execution Time: The time it takes to send/receive the data.  At this point (Figure 1) we can analyze the following steps for the whole lifecycle of the connection:\n Connect/Accept: When the connection is created. Transform: Sending and receiving data on the connection. Close: When the connection is closed.  Figure 1\nProtocol and TLS The previous section described how to analyze connections using send or receive buffer data. For example, following the HTTP/1.1 message specification to analyze the connection. However, this does not work for TLS requests/responses.\nFigure 2\nWhen TLS is in use, the Linux Kernel transmits data encrypted in user space. In the figure above, The application usually transmits SSL data through a third-party library (such as OpenSSL). For this case, the Linux API can only get the encrypted data, so it cannot recognize any higher layer protocol. To decrypt inside eBPF, we need to follow these steps:\n Read unencrypted data through uprobe: Compatible multiple languages, using uprobe to capture the data that is not encrypted before sending or after receiving. In this way, we can get the original data and associate it with the socket. Associate with socket: We can associate unencrypted data with the socket.  OpenSSL Use case For example, the most common way to send/receive SSL data is to use OpenSSL as a shared library, specifically the SSL_read and SSL_write methods to submit the buffer data with the socket.\nFollowing the documentation, we can intercept these two methods, which are almost identical to the API in Linux. The source code of the SSL structure in OpenSSL shows that the Socket FD exists in the BIO object of the SSL structure, and we can get it by the offset.\nIn summary, with knowledge of how OpenSSL works, we can read unencrypted data in an eBPF function.\nIntroducing SkyWalking Rover, an eBPF-based Metrics Collector and Profiler SkyWalking Rover introduces the eBPF network profiling feature into the SkyWalking ecosystem. It\u0026rsquo;s currently supported in a Kubernetes environment, so must be deployed inside a Kubernetes cluster. Once the deployment is complete, SkyWalking Rover can monitor the network for all processes inside a given Pod. Based on the monitoring data, SkyWalking can generate the topology relationship diagram and metrics between processes.\nTopology Diagram The topology diagram can help us understand the network access between processes inside the same Pod, and between the process and external environment (other Pod or service). Additionally, it can identify the data direction of traffic based on the line flow direction.\nIn Figure 3 below, all nodes within the hexagon are the internal process of a Pod, and nodes outside the hexagon are externally associated services or Pods. Nodes are connected by lines, which indicate the direction of requests or responses between nodes (client or server). The protocol is indicated on the line, and it\u0026rsquo;s either HTTP(S), TCP, or TCP(TLS). Also, we can see in this figure that the line between Envoy and Python applications is bidirectional because Envoy intercepts all application traffic.\nFigure 3\nMetrics Once we recognize the network call relationship between processes through the topology, we can select a specific line and view the TCP metrics between the two processes.\nThe diagram below (Figure 4) shows the metrics of network monitoring between two processes. There are four metrics in each line. Two on the left side are on the client side, and two on the right side are on the server side. If the remote process is not in the same Pod, only one side of the metrics is displayed.\nFigure 4\nThe following two metric types are available:\n Counter: Records the total number of data in a certain period. Each counter contains the following data: a. Count: Execution count. b. Bytes: Packet size in bytes. c. Execution time: Execution duration. Histogram: Records the distribution of data in the buckets.  Based on the above data types, the following metrics are exposed:\n   Name Type Unit Description     Write Counter and histogram Millisecond The socket write counter.   Read Counter and histogram Millisecond The socket read counter.   Write RTT Counter and histogram Microsecond The socket write round trip time (RTT) counter.   Connect Counter and histogram Millisecond The socket connect/accept with another server/client counter.   Close Counter and histogram Millisecond The socket with other socket counter.   Retransmit Counter Millisecond The socket retransmit package counter.   Drop Counter Millisecond The socket drop package counter.    Demo In this section, we demonstrate how to perform network profiling in the service mesh. To follow along, you will need a running Kubernetes environment.\nNOTE: All commands and scripts are available in this GitHub repository.\nInstall Istio Istio is the most widely deployed service mesh, and comes with a complete demo application that we can use for testing. To install Istio and the demo application, follow these steps:\n Install Istio using the demo configuration profile. Label the default namespace, so Istio automatically injects Envoy sidecar proxies when we\u0026rsquo;ll deploy the application. Deploy the bookinfo application to the cluster. Deploy the traffic generator to generate some traffic to the application.  export ISTIO_VERSION=1.13.1 # install istio istioctl install -y --set profile=demo kubectl label namespace default istio-injection=enabled # deploy the bookinfo applications kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/platform/kube/bookinfo.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/bookinfo-gateway.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/destination-rule-all.yaml kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/networking/virtual-service-all-v1.yaml # generate traffic kubectl apply -f https://raw.githubusercontent.com/mrproliu/skywalking-network-profiling-demo/main/resources/traffic-generator.yaml Install SkyWalking The following will install the storage, backend, and UI needed for SkyWalking:\ngit clone https://github.com/apache/skywalking-helm.git cd skywalking-helm cd chart helm dep up skywalking helm -n istio-system install skywalking skywalking \\  --set fullnameOverride=skywalking \\  --set elasticsearch.minimumMasterNodes=1 \\  --set elasticsearch.imageTag=7.5.1 \\  --set oap.replicas=1 \\  --set ui.image.repository=apache/skywalking-ui \\  --set ui.image.tag=9.2.0 \\  --set oap.image.tag=9.2.0 \\  --set oap.envoy.als.enabled=true \\  --set oap.image.repository=apache/skywalking-oap-server \\  --set oap.storageType=elasticsearch \\  --set oap.env.SW_METER_ANALYZER_ACTIVE_FILES=\u0026#39;network-profiling\u0026#39; Install SkyWalking Rover SkyWalking Rover is deployed on every node in Kubernetes, and it automatically detects the services in the Kubernetes cluster. The network profiling feature has been released in the version 0.3.0 of SkyWalking Rover. When a network monitoring task is created, the SkyWalking rover sends the data to the SkyWalking backend.\nkubectl apply -f https://raw.githubusercontent.com/mrproliu/skywalking-network-profiling-demo/main/resources/skywalking-rover.yaml Start the Network Profiling Task Once all deployments are completed, we must create a network profiling task for a specific instance of the service in the SkyWalking UI.\nTo open SkyWalking UI, run:\nkubectl port-forward svc/skywalking-ui 8080:80 --namespace istio-system Currently, we can select the specific instances that we wish to monitor by clicking the Data Plane item in the Service Mesh panel and the Service item in the Kubernetes panel.\nIn the figure below, we have selected an instance with a list of tasks in the network profiling tab. When we click the start button, the SkyWalking Rover starts monitoring this instance\u0026rsquo;s network.\nFigure 5\nDone! After a few seconds, you will see the process topology appear on the right side of the page.\nFigure 6\nWhen you click on the line between processes, you can see the TCP metrics between the two processes.\nFigure 7\nConclusion In this article, we detailed a problem that makes troubleshooting service mesh architectures difficult: lack of context between layers in the network stack. These are the cases when eBPF begins to really help with debugging/productivity when existing service mesh/envoy cannot. Then, we researched how eBPF could be applied to common communication, such as TLS. Finally, we demo the implementation of this process with SkyWalking Rover.\nFor now, we have completed the performance analysis for OSI layer 4 (mostly TCP). In the future, we will also introduce the analysis for OSI layer 7 protocols like HTTP.\n","title":"Diagnose Service Mesh Network Performance with eBPF","url":"/docs/main/v9.7.0/en/academy/diagnose-service-mesh-network-performance-with-ebpf/"},{"content":"Disable plugins Delete or remove the specific libraries / jars in skywalking-agent/plugins/*.jar\n+-- skywalking-agent +-- activations apm-toolkit-log4j-1.x-activation.jar apm-toolkit-log4j-2.x-activation.jar apm-toolkit-logback-1.x-activation.jar ... +-- config agent.config +-- plugins apm-dubbo-plugin.jar apm-feign-default-http-9.x.jar apm-httpClient-4.x-plugin.jar ..... skywalking-agent.jar ","title":"Disable plugins","url":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/how-to-disable-plugin/"},{"content":"Disable plugins Delete or remove the specific libraries / jars in skywalking-agent/plugins/*.jar\n+-- skywalking-agent +-- activations apm-toolkit-log4j-1.x-activation.jar apm-toolkit-log4j-2.x-activation.jar apm-toolkit-logback-1.x-activation.jar ... +-- config agent.config +-- plugins apm-dubbo-plugin.jar apm-feign-default-http-9.x.jar apm-httpClient-4.x-plugin.jar ..... skywalking-agent.jar ","title":"Disable plugins","url":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/how-to-disable-plugin/"},{"content":"Disable plugins Delete or remove the specific libraries / jars in skywalking-agent/plugins/*.jar\n+-- skywalking-agent +-- activations apm-toolkit-log4j-1.x-activation.jar apm-toolkit-log4j-2.x-activation.jar apm-toolkit-logback-1.x-activation.jar ... +-- config agent.config +-- plugins apm-dubbo-plugin.jar apm-feign-default-http-9.x.jar apm-httpClient-4.x-plugin.jar ..... skywalking-agent.jar ","title":"Disable plugins","url":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/how-to-disable-plugin/"},{"content":"Disable plugins Delete or remove the specific libraries / jars in skywalking-agent/plugins/*.jar\n+-- skywalking-agent +-- activations apm-toolkit-log4j-1.x-activation.jar apm-toolkit-log4j-2.x-activation.jar apm-toolkit-logback-1.x-activation.jar ... +-- config agent.config +-- plugins apm-dubbo-plugin.jar apm-feign-default-http-9.x.jar apm-httpClient-4.x-plugin.jar ..... skywalking-agent.jar ","title":"Disable plugins","url":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/how-to-disable-plugin/"},{"content":"Disable plugins Delete or remove the specific libraries / jars in skywalking-agent/plugins/*.jar\n+-- skywalking-agent +-- activations apm-toolkit-log4j-1.x-activation.jar apm-toolkit-log4j-2.x-activation.jar apm-toolkit-logback-1.x-activation.jar ... +-- config agent.config +-- plugins apm-dubbo-plugin.jar apm-feign-default-http-9.x.jar apm-httpClient-4.x-plugin.jar ..... skywalking-agent.jar ","title":"Disable plugins","url":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/how-to-disable-plugin/"},{"content":"Dynamic Configuration SkyWalking Configurations are mostly set through application.yml and OS system environment variables.\nAt the same time, some of them support dynamic settings from an upstream management system.\nCurrently, SkyWalking supports two types of dynamic configurations: Single and Group.\nThis feature depends on upstream service, so it is DISABLED by default.\nconfiguration:selector:${SW_CONFIGURATION:none}none:grpc:host:${SW_DCS_SERVER_HOST:\u0026#34;\u0026#34;}port:${SW_DCS_SERVER_PORT:80}clusterName:${SW_DCS_CLUSTER_NAME:SkyWalking}period:${SW_DCS_PERIOD:20}# ... other implementationsSingle Configuration Single Configuration is a config key that corresponds to a specific config value. The logic structure is:\n{configKey}:{configValue} For example:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} Supported configurations are as follows:\n   Config Key Value Description Value Format Example     agent-analyzer.default.slowDBAccessThreshold Thresholds of slow Database statement. Overrides agent-analyzer/default/slowDBAccessThreshold of application.yml. default:200,mongodb:50   agent-analyzer.default.uninstrumentedGateways The uninstrumented gateways. Overrides gateways.yml. Same as gateways.yml.   alarm.default.alarm-settings The alarm settings. Overrides alarm-settings.yml. Same as alarm-settings.yml.   core.default.apdexThreshold The apdex threshold settings. Overrides service-apdex-threshold.yml. Same as service-apdex-threshold.yml.   core.default.endpoint-name-grouping The endpoint name grouping setting. Overrides endpoint-name-grouping.yml. Same as endpoint-name-grouping.yml.   core.default.log4j-xml The log4j xml configuration. Overrides log4j2.xml. Same as log4j2.xml.   core.default.searchableTracesTags The searchableTracesTags configuration. Override core/default/searchableTracesTags in the application.yml. http.method,http.status_code,rpc.status_code,db.type,db.instance,mq.queue,mq.topic,mq.broker   agent-analyzer.default.traceSamplingPolicy The sampling policy for default and service dimension, override trace-sampling-policy-settings.yml. same as trace-sampling-policy-settings.yml   configuration-discovery.default.agentConfigurations The ConfigurationDiscovery settings. See configuration-discovery.md.    Group Configuration Group Configuration is a config key corresponding to a group sub config item. A sub config item is a key-value pair. The logic structure is:\n{configKey}: |{subItemkey1}:{subItemValue1} |{subItemkey2}:{subItemValue2} |{subItemkey3}:{subItemValue3} ... For example:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} Supported configurations are as follows:\n   Config Key SubItem Key Description Value Description Value Format Example     core.default.endpoint-name-grouping-openapi The serviceName relevant to openAPI definition file. eg. serviceA. If the serviceName relevant to multiple files should add subItems for each files, and each subItem key should split serviceName and fileName with . eg. serviceA.API-file1,serviceA.API-file2 The openAPI definitions file contents(yaml format) for create endpoint name grouping rules. Same as productAPI-v2.yaml    Dynamic Configuration Implementations  Dynamic Configuration Service, DCS Zookeeper Implementation Etcd Implementation Consul Implementation Apollo Implementation Kubernetes Configmap Implementation Nacos Implementation  ","title":"Dynamic Configuration","url":"/docs/main/latest/en/setup/backend/dynamic-config/"},{"content":"Dynamic Configuration SkyWalking Configurations are mostly set through application.yml and OS system environment variables.\nAt the same time, some of them support dynamic settings from an upstream management system.\nCurrently, SkyWalking supports two types of dynamic configurations: Single and Group.\nThis feature depends on upstream service, so it is DISABLED by default.\nconfiguration:selector:${SW_CONFIGURATION:none}none:grpc:host:${SW_DCS_SERVER_HOST:\u0026#34;\u0026#34;}port:${SW_DCS_SERVER_PORT:80}clusterName:${SW_DCS_CLUSTER_NAME:SkyWalking}period:${SW_DCS_PERIOD:20}# ... other implementationsSingle Configuration Single Configuration is a config key that corresponds to a specific config value. The logic structure is:\n{configKey}:{configValue} For example:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} Supported configurations are as follows:\n   Config Key Value Description Value Format Example     agent-analyzer.default.slowDBAccessThreshold Thresholds of slow Database statement. Overrides agent-analyzer/default/slowDBAccessThreshold of application.yml. default:200,mongodb:50   agent-analyzer.default.uninstrumentedGateways The uninstrumented gateways. Overrides gateways.yml. Same as gateways.yml.   alarm.default.alarm-settings The alarm settings. Overrides alarm-settings.yml. Same as alarm-settings.yml.   core.default.apdexThreshold The apdex threshold settings. Overrides service-apdex-threshold.yml. Same as service-apdex-threshold.yml.   core.default.endpoint-name-grouping The endpoint name grouping setting. Overrides endpoint-name-grouping.yml. Same as endpoint-name-grouping.yml.   core.default.log4j-xml The log4j xml configuration. Overrides log4j2.xml. Same as log4j2.xml.   core.default.searchableTracesTags The searchableTracesTags configuration. Override core/default/searchableTracesTags in the application.yml. http.method,http.status_code,rpc.status_code,db.type,db.instance,mq.queue,mq.topic,mq.broker   agent-analyzer.default.traceSamplingPolicy The sampling policy for default and service dimension, override trace-sampling-policy-settings.yml. same as trace-sampling-policy-settings.yml   configuration-discovery.default.agentConfigurations The ConfigurationDiscovery settings. See configuration-discovery.md.    Group Configuration Group Configuration is a config key corresponding to a group sub config item. A sub config item is a key-value pair. The logic structure is:\n{configKey}: |{subItemkey1}:{subItemValue1} |{subItemkey2}:{subItemValue2} |{subItemkey3}:{subItemValue3} ... For example:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} Supported configurations are as follows:\n   Config Key SubItem Key Description Value Description Value Format Example     core.default.endpoint-name-grouping-openapi The serviceName relevant to openAPI definition file. eg. serviceA. If the serviceName relevant to multiple files should add subItems for each files, and each subItem key should split serviceName and fileName with . eg. serviceA.API-file1,serviceA.API-file2 The openAPI definitions file contents(yaml format) for create endpoint name grouping rules. Same as productAPI-v2.yaml    Dynamic Configuration Implementations  Dynamic Configuration Service, DCS Zookeeper Implementation Etcd Implementation Consul Implementation Apollo Implementation Kubernetes Configmap Implementation Nacos Implementation  ","title":"Dynamic Configuration","url":"/docs/main/next/en/setup/backend/dynamic-config/"},{"content":"Dynamic Configuration SkyWalking Configurations are mostly set through application.yml and OS system environment variables. At the same time, some of them support dynamic settings from upstream management system.\nCurrently, SkyWalking supports the 2 types of dynamic configurations: Single and Group.\nThis feature depends on upstream service, so it is DISABLED by default.\nconfiguration:selector:${SW_CONFIGURATION:none}none:grpc:host:${SW_DCS_SERVER_HOST:\u0026#34;\u0026#34;}port:${SW_DCS_SERVER_PORT:80}clusterName:${SW_DCS_CLUSTER_NAME:SkyWalking}period:${SW_DCS_PERIOD:20}# ... other implementationsSingle Configuration Single Configuration is a config key that corresponds to a specific config value. The logic structure is:\n{configKey}:{configVaule} For example:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} Supported configurations are as follows:\n   Config Key Value Description Value Format Example     agent-analyzer.default.slowDBAccessThreshold Thresholds of slow Database statement. Overrides receiver-trace/default/slowDBAccessThreshold of application.yml. default:200,mongodb:50   agent-analyzer.default.uninstrumentedGateways The uninstrumented gateways. Overrides gateways.yml. Same as gateways.yml.   alarm.default.alarm-settings The alarm settings. Overrides alarm-settings.yml. Same as alarm-settings.yml.   core.default.apdexThreshold The apdex threshold settings. Overrides service-apdex-threshold.yml. Same as service-apdex-threshold.yml.   core.default.endpoint-name-grouping The endpoint name grouping setting. Overrides endpoint-name-grouping.yml. Same as endpoint-name-grouping.yml.   core.default.log4j-xml The log4j xml configuration. Overrides log4j2.xml. Same as log4j2.xml.   agent-analyzer.default.traceSamplingPolicy The sampling policy for default and service dimension, override trace-sampling-policy-settings.yml. same as trace-sampling-policy-settings.yml   configuration-discovery.default.agentConfigurations The ConfigurationDiscovery settings. See configuration-discovery.md.    Group Configuration Group Configuration is a config key that corresponds to a group sub config items. A sub config item is a key value pair. The logic structure is:\n{configKey}: |{subItemkey1}:{subItemValue1} |{subItemkey2}:{subItemValue2} |{subItemkey3}:{subItemValue3} ... For example:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} Supported configurations are as follows:\n   Config Key SubItem Key Description Value Description Value Format Example     core.default.endpoint-name-grouping-openapi The serviceName relevant to openAPI definition file. eg. serviceA. If the serviceName relevant to multiple files should add subItems for each files, and each subItem key should split serviceName and fileName with . eg. serviceA.API-file1,serviceA.API-file2 The openAPI definitions file contents(yaml format) for create endpoint name grouping rules. Same as productAPI-v2.yaml    Dynamic Configuration Implementations  Dynamic Configuration Service, DCS Zookeeper Implementation Etcd Implementation Consul Implementation Apollo Implementation Kuberbetes Configmap Implementation Nacos Implementation  ","title":"Dynamic Configuration","url":"/docs/main/v9.0.0/en/setup/backend/dynamic-config/"},{"content":"Dynamic Configuration SkyWalking Configurations are mostly set through application.yml and OS system environment variables.\nAt the same time, some of them support dynamic settings from an upstream management system.\nCurrently, SkyWalking supports two types of dynamic configurations: Single and Group.\nThis feature depends on upstream service, so it is DISABLED by default.\nconfiguration:selector:${SW_CONFIGURATION:none}none:grpc:host:${SW_DCS_SERVER_HOST:\u0026#34;\u0026#34;}port:${SW_DCS_SERVER_PORT:80}clusterName:${SW_DCS_CLUSTER_NAME:SkyWalking}period:${SW_DCS_PERIOD:20}# ... other implementationsSingle Configuration Single Configuration is a config key that corresponds to a specific config value. The logic structure is:\n{configKey}:{configVaule} For example:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} Supported configurations are as follows:\n   Config Key Value Description Value Format Example     agent-analyzer.default.slowDBAccessThreshold Thresholds of slow Database statement. Overrides agent-analyzer/default/slowDBAccessThreshold of application.yml. default:200,mongodb:50   agent-analyzer.default.uninstrumentedGateways The uninstrumented gateways. Overrides gateways.yml. Same as gateways.yml.   alarm.default.alarm-settings The alarm settings. Overrides alarm-settings.yml. Same as alarm-settings.yml.   core.default.apdexThreshold The apdex threshold settings. Overrides service-apdex-threshold.yml. Same as service-apdex-threshold.yml.   core.default.endpoint-name-grouping The endpoint name grouping setting. Overrides endpoint-name-grouping.yml. Same as endpoint-name-grouping.yml.   core.default.log4j-xml The log4j xml configuration. Overrides log4j2.xml. Same as log4j2.xml.   agent-analyzer.default.traceSamplingPolicy The sampling policy for default and service dimension, override trace-sampling-policy-settings.yml. same as trace-sampling-policy-settings.yml   configuration-discovery.default.agentConfigurations The ConfigurationDiscovery settings. See configuration-discovery.md.    Group Configuration Group Configuration is a config key corresponding to a group sub config item. A sub config item is a key-value pair. The logic structure is:\n{configKey}: |{subItemkey1}:{subItemValue1} |{subItemkey2}:{subItemValue2} |{subItemkey3}:{subItemValue3} ... For example:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} Supported configurations are as follows:\n   Config Key SubItem Key Description Value Description Value Format Example     core.default.endpoint-name-grouping-openapi The serviceName relevant to openAPI definition file. eg. serviceA. If the serviceName relevant to multiple files should add subItems for each files, and each subItem key should split serviceName and fileName with . eg. serviceA.API-file1,serviceA.API-file2 The openAPI definitions file contents(yaml format) for create endpoint name grouping rules. Same as productAPI-v2.yaml    Dynamic Configuration Implementations  Dynamic Configuration Service, DCS Zookeeper Implementation Etcd Implementation Consul Implementation Apollo Implementation Kuberbetes Configmap Implementation Nacos Implementation  ","title":"Dynamic Configuration","url":"/docs/main/v9.1.0/en/setup/backend/dynamic-config/"},{"content":"Dynamic Configuration SkyWalking Configurations are mostly set through application.yml and OS system environment variables.\nAt the same time, some of them support dynamic settings from an upstream management system.\nCurrently, SkyWalking supports two types of dynamic configurations: Single and Group.\nThis feature depends on upstream service, so it is DISABLED by default.\nconfiguration:selector:${SW_CONFIGURATION:none}none:grpc:host:${SW_DCS_SERVER_HOST:\u0026#34;\u0026#34;}port:${SW_DCS_SERVER_PORT:80}clusterName:${SW_DCS_CLUSTER_NAME:SkyWalking}period:${SW_DCS_PERIOD:20}# ... other implementationsSingle Configuration Single Configuration is a config key that corresponds to a specific config value. The logic structure is:\n{configKey}:{configValue} For example:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} Supported configurations are as follows:\n   Config Key Value Description Value Format Example     agent-analyzer.default.slowDBAccessThreshold Thresholds of slow Database statement. Overrides agent-analyzer/default/slowDBAccessThreshold of application.yml. default:200,mongodb:50   agent-analyzer.default.uninstrumentedGateways The uninstrumented gateways. Overrides gateways.yml. Same as gateways.yml.   alarm.default.alarm-settings The alarm settings. Overrides alarm-settings.yml. Same as alarm-settings.yml.   core.default.apdexThreshold The apdex threshold settings. Overrides service-apdex-threshold.yml. Same as service-apdex-threshold.yml.   core.default.endpoint-name-grouping The endpoint name grouping setting. Overrides endpoint-name-grouping.yml. Same as endpoint-name-grouping.yml.   core.default.log4j-xml The log4j xml configuration. Overrides log4j2.xml. Same as log4j2.xml.   agent-analyzer.default.traceSamplingPolicy The sampling policy for default and service dimension, override trace-sampling-policy-settings.yml. same as trace-sampling-policy-settings.yml   configuration-discovery.default.agentConfigurations The ConfigurationDiscovery settings. See configuration-discovery.md.    Group Configuration Group Configuration is a config key corresponding to a group sub config item. A sub config item is a key-value pair. The logic structure is:\n{configKey}: |{subItemkey1}:{subItemValue1} |{subItemkey2}:{subItemValue2} |{subItemkey3}:{subItemValue3} ... For example:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} Supported configurations are as follows:\n   Config Key SubItem Key Description Value Description Value Format Example     core.default.endpoint-name-grouping-openapi The serviceName relevant to openAPI definition file. eg. serviceA. If the serviceName relevant to multiple files should add subItems for each files, and each subItem key should split serviceName and fileName with . eg. serviceA.API-file1,serviceA.API-file2 The openAPI definitions file contents(yaml format) for create endpoint name grouping rules. Same as productAPI-v2.yaml    Dynamic Configuration Implementations  Dynamic Configuration Service, DCS Zookeeper Implementation Etcd Implementation Consul Implementation Apollo Implementation Kubernetes Configmap Implementation Nacos Implementation  ","title":"Dynamic Configuration","url":"/docs/main/v9.2.0/en/setup/backend/dynamic-config/"},{"content":"Dynamic Configuration SkyWalking Configurations are mostly set through application.yml and OS system environment variables.\nAt the same time, some of them support dynamic settings from an upstream management system.\nCurrently, SkyWalking supports two types of dynamic configurations: Single and Group.\nThis feature depends on upstream service, so it is DISABLED by default.\nconfiguration:selector:${SW_CONFIGURATION:none}none:grpc:host:${SW_DCS_SERVER_HOST:\u0026#34;\u0026#34;}port:${SW_DCS_SERVER_PORT:80}clusterName:${SW_DCS_CLUSTER_NAME:SkyWalking}period:${SW_DCS_PERIOD:20}# ... other implementationsSingle Configuration Single Configuration is a config key that corresponds to a specific config value. The logic structure is:\n{configKey}:{configValue} For example:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} Supported configurations are as follows:\n   Config Key Value Description Value Format Example     agent-analyzer.default.slowDBAccessThreshold Thresholds of slow Database statement. Overrides agent-analyzer/default/slowDBAccessThreshold of application.yml. default:200,mongodb:50   agent-analyzer.default.uninstrumentedGateways The uninstrumented gateways. Overrides gateways.yml. Same as gateways.yml.   alarm.default.alarm-settings The alarm settings. Overrides alarm-settings.yml. Same as alarm-settings.yml.   core.default.apdexThreshold The apdex threshold settings. Overrides service-apdex-threshold.yml. Same as service-apdex-threshold.yml.   core.default.endpoint-name-grouping The endpoint name grouping setting. Overrides endpoint-name-grouping.yml. Same as endpoint-name-grouping.yml.   core.default.log4j-xml The log4j xml configuration. Overrides log4j2.xml. Same as log4j2.xml.   agent-analyzer.default.traceSamplingPolicy The sampling policy for default and service dimension, override trace-sampling-policy-settings.yml. same as trace-sampling-policy-settings.yml   configuration-discovery.default.agentConfigurations The ConfigurationDiscovery settings. See configuration-discovery.md.    Group Configuration Group Configuration is a config key corresponding to a group sub config item. A sub config item is a key-value pair. The logic structure is:\n{configKey}: |{subItemkey1}:{subItemValue1} |{subItemkey2}:{subItemValue2} |{subItemkey3}:{subItemValue3} ... For example:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} Supported configurations are as follows:\n   Config Key SubItem Key Description Value Description Value Format Example     core.default.endpoint-name-grouping-openapi The serviceName relevant to openAPI definition file. eg. serviceA. If the serviceName relevant to multiple files should add subItems for each files, and each subItem key should split serviceName and fileName with . eg. serviceA.API-file1,serviceA.API-file2 The openAPI definitions file contents(yaml format) for create endpoint name grouping rules. Same as productAPI-v2.yaml    Dynamic Configuration Implementations  Dynamic Configuration Service, DCS Zookeeper Implementation Etcd Implementation Consul Implementation Apollo Implementation Kubernetes Configmap Implementation Nacos Implementation  ","title":"Dynamic Configuration","url":"/docs/main/v9.3.0/en/setup/backend/dynamic-config/"},{"content":"Dynamic Configuration SkyWalking Configurations are mostly set through application.yml and OS system environment variables.\nAt the same time, some of them support dynamic settings from an upstream management system.\nCurrently, SkyWalking supports two types of dynamic configurations: Single and Group.\nThis feature depends on upstream service, so it is DISABLED by default.\nconfiguration:selector:${SW_CONFIGURATION:none}none:grpc:host:${SW_DCS_SERVER_HOST:\u0026#34;\u0026#34;}port:${SW_DCS_SERVER_PORT:80}clusterName:${SW_DCS_CLUSTER_NAME:SkyWalking}period:${SW_DCS_PERIOD:20}# ... other implementationsSingle Configuration Single Configuration is a config key that corresponds to a specific config value. The logic structure is:\n{configKey}:{configValue} For example:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} Supported configurations are as follows:\n   Config Key Value Description Value Format Example     agent-analyzer.default.slowDBAccessThreshold Thresholds of slow Database statement. Overrides agent-analyzer/default/slowDBAccessThreshold of application.yml. default:200,mongodb:50   agent-analyzer.default.uninstrumentedGateways The uninstrumented gateways. Overrides gateways.yml. Same as gateways.yml.   alarm.default.alarm-settings The alarm settings. Overrides alarm-settings.yml. Same as alarm-settings.yml.   core.default.apdexThreshold The apdex threshold settings. Overrides service-apdex-threshold.yml. Same as service-apdex-threshold.yml.   core.default.endpoint-name-grouping The endpoint name grouping setting. Overrides endpoint-name-grouping.yml. Same as endpoint-name-grouping.yml.   core.default.log4j-xml The log4j xml configuration. Overrides log4j2.xml. Same as log4j2.xml.   agent-analyzer.default.traceSamplingPolicy The sampling policy for default and service dimension, override trace-sampling-policy-settings.yml. same as trace-sampling-policy-settings.yml   configuration-discovery.default.agentConfigurations The ConfigurationDiscovery settings. See configuration-discovery.md.    Group Configuration Group Configuration is a config key corresponding to a group sub config item. A sub config item is a key-value pair. The logic structure is:\n{configKey}: |{subItemkey1}:{subItemValue1} |{subItemkey2}:{subItemValue2} |{subItemkey3}:{subItemValue3} ... For example:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} Supported configurations are as follows:\n   Config Key SubItem Key Description Value Description Value Format Example     core.default.endpoint-name-grouping-openapi The serviceName relevant to openAPI definition file. eg. serviceA. If the serviceName relevant to multiple files should add subItems for each files, and each subItem key should split serviceName and fileName with . eg. serviceA.API-file1,serviceA.API-file2 The openAPI definitions file contents(yaml format) for create endpoint name grouping rules. Same as productAPI-v2.yaml    Dynamic Configuration Implementations  Dynamic Configuration Service, DCS Zookeeper Implementation Etcd Implementation Consul Implementation Apollo Implementation Kubernetes Configmap Implementation Nacos Implementation  ","title":"Dynamic Configuration","url":"/docs/main/v9.4.0/en/setup/backend/dynamic-config/"},{"content":"Dynamic Configuration SkyWalking Configurations are mostly set through application.yml and OS system environment variables.\nAt the same time, some of them support dynamic settings from an upstream management system.\nCurrently, SkyWalking supports two types of dynamic configurations: Single and Group.\nThis feature depends on upstream service, so it is DISABLED by default.\nconfiguration:selector:${SW_CONFIGURATION:none}none:grpc:host:${SW_DCS_SERVER_HOST:\u0026#34;\u0026#34;}port:${SW_DCS_SERVER_PORT:80}clusterName:${SW_DCS_CLUSTER_NAME:SkyWalking}period:${SW_DCS_PERIOD:20}# ... other implementationsSingle Configuration Single Configuration is a config key that corresponds to a specific config value. The logic structure is:\n{configKey}:{configValue} For example:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} Supported configurations are as follows:\n   Config Key Value Description Value Format Example     agent-analyzer.default.slowDBAccessThreshold Thresholds of slow Database statement. Overrides agent-analyzer/default/slowDBAccessThreshold of application.yml. default:200,mongodb:50   agent-analyzer.default.uninstrumentedGateways The uninstrumented gateways. Overrides gateways.yml. Same as gateways.yml.   alarm.default.alarm-settings The alarm settings. Overrides alarm-settings.yml. Same as alarm-settings.yml.   core.default.apdexThreshold The apdex threshold settings. Overrides service-apdex-threshold.yml. Same as service-apdex-threshold.yml.   core.default.endpoint-name-grouping The endpoint name grouping setting. Overrides endpoint-name-grouping.yml. Same as endpoint-name-grouping.yml.   core.default.log4j-xml The log4j xml configuration. Overrides log4j2.xml. Same as log4j2.xml.   core.default.searchableTracesTags The searchableTracesTags configuration. Override core/default/searchableTracesTags in the application.yml. http.method,http.status_code,rpc.status_code,db.type,db.instance,mq.queue,mq.topic,mq.broker   agent-analyzer.default.traceSamplingPolicy The sampling policy for default and service dimension, override trace-sampling-policy-settings.yml. same as trace-sampling-policy-settings.yml   configuration-discovery.default.agentConfigurations The ConfigurationDiscovery settings. See configuration-discovery.md.    Group Configuration Group Configuration is a config key corresponding to a group sub config item. A sub config item is a key-value pair. The logic structure is:\n{configKey}: |{subItemkey1}:{subItemValue1} |{subItemkey2}:{subItemValue2} |{subItemkey3}:{subItemValue3} ... For example:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} Supported configurations are as follows:\n   Config Key SubItem Key Description Value Description Value Format Example     core.default.endpoint-name-grouping-openapi The serviceName relevant to openAPI definition file. eg. serviceA. If the serviceName relevant to multiple files should add subItems for each files, and each subItem key should split serviceName and fileName with . eg. serviceA.API-file1,serviceA.API-file2 The openAPI definitions file contents(yaml format) for create endpoint name grouping rules. Same as productAPI-v2.yaml    Dynamic Configuration Implementations  Dynamic Configuration Service, DCS Zookeeper Implementation Etcd Implementation Consul Implementation Apollo Implementation Kubernetes Configmap Implementation Nacos Implementation  ","title":"Dynamic Configuration","url":"/docs/main/v9.5.0/en/setup/backend/dynamic-config/"},{"content":"Dynamic Configuration SkyWalking Configurations are mostly set through application.yml and OS system environment variables.\nAt the same time, some of them support dynamic settings from an upstream management system.\nCurrently, SkyWalking supports two types of dynamic configurations: Single and Group.\nThis feature depends on upstream service, so it is DISABLED by default.\nconfiguration:selector:${SW_CONFIGURATION:none}none:grpc:host:${SW_DCS_SERVER_HOST:\u0026#34;\u0026#34;}port:${SW_DCS_SERVER_PORT:80}clusterName:${SW_DCS_CLUSTER_NAME:SkyWalking}period:${SW_DCS_PERIOD:20}# ... other implementationsSingle Configuration Single Configuration is a config key that corresponds to a specific config value. The logic structure is:\n{configKey}:{configValue} For example:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} Supported configurations are as follows:\n   Config Key Value Description Value Format Example     agent-analyzer.default.slowDBAccessThreshold Thresholds of slow Database statement. Overrides agent-analyzer/default/slowDBAccessThreshold of application.yml. default:200,mongodb:50   agent-analyzer.default.uninstrumentedGateways The uninstrumented gateways. Overrides gateways.yml. Same as gateways.yml.   alarm.default.alarm-settings The alarm settings. Overrides alarm-settings.yml. Same as alarm-settings.yml.   core.default.apdexThreshold The apdex threshold settings. Overrides service-apdex-threshold.yml. Same as service-apdex-threshold.yml.   core.default.endpoint-name-grouping The endpoint name grouping setting. Overrides endpoint-name-grouping.yml. Same as endpoint-name-grouping.yml.   core.default.log4j-xml The log4j xml configuration. Overrides log4j2.xml. Same as log4j2.xml.   core.default.searchableTracesTags The searchableTracesTags configuration. Override core/default/searchableTracesTags in the application.yml. http.method,http.status_code,rpc.status_code,db.type,db.instance,mq.queue,mq.topic,mq.broker   agent-analyzer.default.traceSamplingPolicy The sampling policy for default and service dimension, override trace-sampling-policy-settings.yml. same as trace-sampling-policy-settings.yml   configuration-discovery.default.agentConfigurations The ConfigurationDiscovery settings. See configuration-discovery.md.    Group Configuration Group Configuration is a config key corresponding to a group sub config item. A sub config item is a key-value pair. The logic structure is:\n{configKey}: |{subItemkey1}:{subItemValue1} |{subItemkey2}:{subItemValue2} |{subItemkey3}:{subItemValue3} ... For example:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} Supported configurations are as follows:\n   Config Key SubItem Key Description Value Description Value Format Example     core.default.endpoint-name-grouping-openapi The serviceName relevant to openAPI definition file. eg. serviceA. If the serviceName relevant to multiple files should add subItems for each files, and each subItem key should split serviceName and fileName with . eg. serviceA.API-file1,serviceA.API-file2 The openAPI definitions file contents(yaml format) for create endpoint name grouping rules. Same as productAPI-v2.yaml    Dynamic Configuration Implementations  Dynamic Configuration Service, DCS Zookeeper Implementation Etcd Implementation Consul Implementation Apollo Implementation Kubernetes Configmap Implementation Nacos Implementation  ","title":"Dynamic Configuration","url":"/docs/main/v9.6.0/en/setup/backend/dynamic-config/"},{"content":"Dynamic Configuration SkyWalking Configurations are mostly set through application.yml and OS system environment variables.\nAt the same time, some of them support dynamic settings from an upstream management system.\nCurrently, SkyWalking supports two types of dynamic configurations: Single and Group.\nThis feature depends on upstream service, so it is DISABLED by default.\nconfiguration:selector:${SW_CONFIGURATION:none}none:grpc:host:${SW_DCS_SERVER_HOST:\u0026#34;\u0026#34;}port:${SW_DCS_SERVER_PORT:80}clusterName:${SW_DCS_CLUSTER_NAME:SkyWalking}period:${SW_DCS_PERIOD:20}# ... other implementationsSingle Configuration Single Configuration is a config key that corresponds to a specific config value. The logic structure is:\n{configKey}:{configValue} For example:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} Supported configurations are as follows:\n   Config Key Value Description Value Format Example     agent-analyzer.default.slowDBAccessThreshold Thresholds of slow Database statement. Overrides agent-analyzer/default/slowDBAccessThreshold of application.yml. default:200,mongodb:50   agent-analyzer.default.uninstrumentedGateways The uninstrumented gateways. Overrides gateways.yml. Same as gateways.yml.   alarm.default.alarm-settings The alarm settings. Overrides alarm-settings.yml. Same as alarm-settings.yml.   core.default.apdexThreshold The apdex threshold settings. Overrides service-apdex-threshold.yml. Same as service-apdex-threshold.yml.   core.default.endpoint-name-grouping The endpoint name grouping setting. Overrides endpoint-name-grouping.yml. Same as endpoint-name-grouping.yml.   core.default.log4j-xml The log4j xml configuration. Overrides log4j2.xml. Same as log4j2.xml.   core.default.searchableTracesTags The searchableTracesTags configuration. Override core/default/searchableTracesTags in the application.yml. http.method,http.status_code,rpc.status_code,db.type,db.instance,mq.queue,mq.topic,mq.broker   agent-analyzer.default.traceSamplingPolicy The sampling policy for default and service dimension, override trace-sampling-policy-settings.yml. same as trace-sampling-policy-settings.yml   configuration-discovery.default.agentConfigurations The ConfigurationDiscovery settings. See configuration-discovery.md.    Group Configuration Group Configuration is a config key corresponding to a group sub config item. A sub config item is a key-value pair. The logic structure is:\n{configKey}: |{subItemkey1}:{subItemValue1} |{subItemkey2}:{subItemValue2} |{subItemkey3}:{subItemValue3} ... For example:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} Supported configurations are as follows:\n   Config Key SubItem Key Description Value Description Value Format Example     core.default.endpoint-name-grouping-openapi The serviceName relevant to openAPI definition file. eg. serviceA. If the serviceName relevant to multiple files should add subItems for each files, and each subItem key should split serviceName and fileName with . eg. serviceA.API-file1,serviceA.API-file2 The openAPI definitions file contents(yaml format) for create endpoint name grouping rules. Same as productAPI-v2.yaml    Dynamic Configuration Implementations  Dynamic Configuration Service, DCS Zookeeper Implementation Etcd Implementation Consul Implementation Apollo Implementation Kubernetes Configmap Implementation Nacos Implementation  ","title":"Dynamic Configuration","url":"/docs/main/v9.7.0/en/setup/backend/dynamic-config/"},{"content":"Dynamic Configuration Apollo Implementation Apollo is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:apollo}apollo:apolloMeta:${SW_CONFIG_APOLLO:http://localhost:8080}apolloCluster:${SW_CONFIG_APOLLO_CLUSTER:default}apolloEnv:${SW_CONFIG_APOLLO_ENV:\u0026#34;\u0026#34;}appId:${SW_CONFIG_APOLLO_APP_ID:skywalking}Config Storage Single Config Single configs in Apollo are key/value pairs:\n   Key Value     configKey configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in Apollo is:\n   Key Value     agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in Apollo are key/value pairs as well, and the key is composited by configKey and subItemKey with ..\n   Key Value     configKey.subItemkey1 subItemValue1   configKey.subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config in Apollo is:\n   Key Value     core.default.endpoint-name-grouping-openapi.customerAPI-v1 value of customerAPI-v1   core.default.endpoint-name-grouping-openapi.productAPI-v1 value of productAPI-v1   core.default.endpoint-name-grouping-openapi.productAPI-v2 value of productAPI-v2    ","title":"Dynamic Configuration Apollo Implementation","url":"/docs/main/latest/en/setup/backend/dynamic-config-apollo/"},{"content":"Dynamic Configuration Apollo Implementation Apollo is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:apollo}apollo:apolloMeta:${SW_CONFIG_APOLLO:http://localhost:8080}apolloCluster:${SW_CONFIG_APOLLO_CLUSTER:default}apolloEnv:${SW_CONFIG_APOLLO_ENV:\u0026#34;\u0026#34;}appId:${SW_CONFIG_APOLLO_APP_ID:skywalking}Config Storage Single Config Single configs in Apollo are key/value pairs:\n   Key Value     configKey configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in Apollo is:\n   Key Value     agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in Apollo are key/value pairs as well, and the key is composited by configKey and subItemKey with ..\n   Key Value     configKey.subItemkey1 subItemValue1   configKey.subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config in Apollo is:\n   Key Value     core.default.endpoint-name-grouping-openapi.customerAPI-v1 value of customerAPI-v1   core.default.endpoint-name-grouping-openapi.productAPI-v1 value of productAPI-v1   core.default.endpoint-name-grouping-openapi.productAPI-v2 value of productAPI-v2    ","title":"Dynamic Configuration Apollo Implementation","url":"/docs/main/next/en/setup/backend/dynamic-config-apollo/"},{"content":"Dynamic Configuration Apollo Implementation Apollo is also supported as Dynamic Configuration Center (DCC). To use it, please configure as follows:\nconfiguration:selector:${SW_CONFIGURATION:apollo}apollo:apolloMeta:${SW_CONFIG_APOLLO:http://localhost:8080}apolloCluster:${SW_CONFIG_APOLLO_CLUSTER:default}apolloEnv:${SW_CONFIG_APOLLO_ENV:\u0026#34;\u0026#34;}appId:${SW_CONFIG_APOLLO_APP_ID:skywalking}period:${SW_CONFIG_APOLLO_PERIOD:60}Config Storage Single Config Single configs in apollo are key/value pairs:\n   Key Value     configKey configVaule    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in apollo is:\n   Key Value     agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in apollo are key/value pairs as well, and the key is composited by configKey and subItemKey with ..\n   Key Value     configKey.subItemkey1 subItemValue1   configKey.subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config in apollo is:\n   Key Value     core.default.endpoint-name-grouping-openapi.customerAPI-v1 value of customerAPI-v1   core.default.endpoint-name-grouping-openapi.productAPI-v1 value of productAPI-v1   core.default.endpoint-name-grouping-openapi.productAPI-v2 value of productAPI-v2    ","title":"Dynamic Configuration Apollo Implementation","url":"/docs/main/v9.0.0/en/setup/backend/dynamic-config-apollo/"},{"content":"Dynamic Configuration Apollo Implementation Apollo is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:apollo}apollo:apolloMeta:${SW_CONFIG_APOLLO:http://localhost:8080}apolloCluster:${SW_CONFIG_APOLLO_CLUSTER:default}apolloEnv:${SW_CONFIG_APOLLO_ENV:\u0026#34;\u0026#34;}appId:${SW_CONFIG_APOLLO_APP_ID:skywalking}period:${SW_CONFIG_APOLLO_PERIOD:60}Config Storage Single Config Single configs in Apollo are key/value pairs:\n   Key Value     configKey configVaule    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in Apollo is:\n   Key Value     agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in Apollo are key/value pairs as well, and the key is composited by configKey and subItemKey with ..\n   Key Value     configKey.subItemkey1 subItemValue1   configKey.subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config in Apollo is:\n   Key Value     core.default.endpoint-name-grouping-openapi.customerAPI-v1 value of customerAPI-v1   core.default.endpoint-name-grouping-openapi.productAPI-v1 value of productAPI-v1   core.default.endpoint-name-grouping-openapi.productAPI-v2 value of productAPI-v2    ","title":"Dynamic Configuration Apollo Implementation","url":"/docs/main/v9.1.0/en/setup/backend/dynamic-config-apollo/"},{"content":"Dynamic Configuration Apollo Implementation Apollo is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:apollo}apollo:apolloMeta:${SW_CONFIG_APOLLO:http://localhost:8080}apolloCluster:${SW_CONFIG_APOLLO_CLUSTER:default}apolloEnv:${SW_CONFIG_APOLLO_ENV:\u0026#34;\u0026#34;}appId:${SW_CONFIG_APOLLO_APP_ID:skywalking}period:${SW_CONFIG_APOLLO_PERIOD:60}Config Storage Single Config Single configs in Apollo are key/value pairs:\n   Key Value     configKey configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in Apollo is:\n   Key Value     agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in Apollo are key/value pairs as well, and the key is composited by configKey and subItemKey with ..\n   Key Value     configKey.subItemkey1 subItemValue1   configKey.subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config in Apollo is:\n   Key Value     core.default.endpoint-name-grouping-openapi.customerAPI-v1 value of customerAPI-v1   core.default.endpoint-name-grouping-openapi.productAPI-v1 value of productAPI-v1   core.default.endpoint-name-grouping-openapi.productAPI-v2 value of productAPI-v2    ","title":"Dynamic Configuration Apollo Implementation","url":"/docs/main/v9.2.0/en/setup/backend/dynamic-config-apollo/"},{"content":"Dynamic Configuration Apollo Implementation Apollo is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:apollo}apollo:apolloMeta:${SW_CONFIG_APOLLO:http://localhost:8080}apolloCluster:${SW_CONFIG_APOLLO_CLUSTER:default}apolloEnv:${SW_CONFIG_APOLLO_ENV:\u0026#34;\u0026#34;}appId:${SW_CONFIG_APOLLO_APP_ID:skywalking}period:${SW_CONFIG_APOLLO_PERIOD:60}Config Storage Single Config Single configs in Apollo are key/value pairs:\n   Key Value     configKey configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in Apollo is:\n   Key Value     agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in Apollo are key/value pairs as well, and the key is composited by configKey and subItemKey with ..\n   Key Value     configKey.subItemkey1 subItemValue1   configKey.subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config in Apollo is:\n   Key Value     core.default.endpoint-name-grouping-openapi.customerAPI-v1 value of customerAPI-v1   core.default.endpoint-name-grouping-openapi.productAPI-v1 value of productAPI-v1   core.default.endpoint-name-grouping-openapi.productAPI-v2 value of productAPI-v2    ","title":"Dynamic Configuration Apollo Implementation","url":"/docs/main/v9.3.0/en/setup/backend/dynamic-config-apollo/"},{"content":"Dynamic Configuration Apollo Implementation Apollo is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:apollo}apollo:apolloMeta:${SW_CONFIG_APOLLO:http://localhost:8080}apolloCluster:${SW_CONFIG_APOLLO_CLUSTER:default}apolloEnv:${SW_CONFIG_APOLLO_ENV:\u0026#34;\u0026#34;}appId:${SW_CONFIG_APOLLO_APP_ID:skywalking}period:${SW_CONFIG_APOLLO_PERIOD:60}Config Storage Single Config Single configs in Apollo are key/value pairs:\n   Key Value     configKey configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in Apollo is:\n   Key Value     agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in Apollo are key/value pairs as well, and the key is composited by configKey and subItemKey with ..\n   Key Value     configKey.subItemkey1 subItemValue1   configKey.subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config in Apollo is:\n   Key Value     core.default.endpoint-name-grouping-openapi.customerAPI-v1 value of customerAPI-v1   core.default.endpoint-name-grouping-openapi.productAPI-v1 value of productAPI-v1   core.default.endpoint-name-grouping-openapi.productAPI-v2 value of productAPI-v2    ","title":"Dynamic Configuration Apollo Implementation","url":"/docs/main/v9.4.0/en/setup/backend/dynamic-config-apollo/"},{"content":"Dynamic Configuration Apollo Implementation Apollo is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:apollo}apollo:apolloMeta:${SW_CONFIG_APOLLO:http://localhost:8080}apolloCluster:${SW_CONFIG_APOLLO_CLUSTER:default}apolloEnv:${SW_CONFIG_APOLLO_ENV:\u0026#34;\u0026#34;}appId:${SW_CONFIG_APOLLO_APP_ID:skywalking}period:${SW_CONFIG_APOLLO_PERIOD:60}Config Storage Single Config Single configs in Apollo are key/value pairs:\n   Key Value     configKey configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in Apollo is:\n   Key Value     agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in Apollo are key/value pairs as well, and the key is composited by configKey and subItemKey with ..\n   Key Value     configKey.subItemkey1 subItemValue1   configKey.subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config in Apollo is:\n   Key Value     core.default.endpoint-name-grouping-openapi.customerAPI-v1 value of customerAPI-v1   core.default.endpoint-name-grouping-openapi.productAPI-v1 value of productAPI-v1   core.default.endpoint-name-grouping-openapi.productAPI-v2 value of productAPI-v2    ","title":"Dynamic Configuration Apollo Implementation","url":"/docs/main/v9.5.0/en/setup/backend/dynamic-config-apollo/"},{"content":"Dynamic Configuration Apollo Implementation Apollo is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:apollo}apollo:apolloMeta:${SW_CONFIG_APOLLO:http://localhost:8080}apolloCluster:${SW_CONFIG_APOLLO_CLUSTER:default}apolloEnv:${SW_CONFIG_APOLLO_ENV:\u0026#34;\u0026#34;}appId:${SW_CONFIG_APOLLO_APP_ID:skywalking}Config Storage Single Config Single configs in Apollo are key/value pairs:\n   Key Value     configKey configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in Apollo is:\n   Key Value     agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in Apollo are key/value pairs as well, and the key is composited by configKey and subItemKey with ..\n   Key Value     configKey.subItemkey1 subItemValue1   configKey.subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config in Apollo is:\n   Key Value     core.default.endpoint-name-grouping-openapi.customerAPI-v1 value of customerAPI-v1   core.default.endpoint-name-grouping-openapi.productAPI-v1 value of productAPI-v1   core.default.endpoint-name-grouping-openapi.productAPI-v2 value of productAPI-v2    ","title":"Dynamic Configuration Apollo Implementation","url":"/docs/main/v9.6.0/en/setup/backend/dynamic-config-apollo/"},{"content":"Dynamic Configuration Apollo Implementation Apollo is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:apollo}apollo:apolloMeta:${SW_CONFIG_APOLLO:http://localhost:8080}apolloCluster:${SW_CONFIG_APOLLO_CLUSTER:default}apolloEnv:${SW_CONFIG_APOLLO_ENV:\u0026#34;\u0026#34;}appId:${SW_CONFIG_APOLLO_APP_ID:skywalking}Config Storage Single Config Single configs in Apollo are key/value pairs:\n   Key Value     configKey configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in Apollo is:\n   Key Value     agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in Apollo are key/value pairs as well, and the key is composited by configKey and subItemKey with ..\n   Key Value     configKey.subItemkey1 subItemValue1   configKey.subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config in Apollo is:\n   Key Value     core.default.endpoint-name-grouping-openapi.customerAPI-v1 value of customerAPI-v1   core.default.endpoint-name-grouping-openapi.productAPI-v1 value of productAPI-v1   core.default.endpoint-name-grouping-openapi.productAPI-v2 value of productAPI-v2    ","title":"Dynamic Configuration Apollo Implementation","url":"/docs/main/v9.7.0/en/setup/backend/dynamic-config-apollo/"},{"content":"Dynamic Configuration Consul Implementation Consul is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:consul}consul:# Consul host and ports, separated by comma, e.g. 1.2.3.4:8500,2.3.4.5:8500hostAndPorts:${SW_CONFIG_CONSUL_HOST_AND_PORTS:1.2.3.4:8500}# Sync period in seconds. Defaults to 60 seconds.period:${SW_CONFIG_CONSUL_PERIOD:1}# Consul aclTokenaclToken:${SW_CONFIG_CONSUL_ACL_TOKEN:\u0026#34;\u0026#34;}Config Storage Single Config Single configs in Consul are key/value pairs:\n   Key Value     configKey configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in Consul is:\n   Key Value     agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in Consul are key/value pairs as well, but according to the level keys organized by /.\n   Key Value     configKey/subItemkey1 subItemValue1   configKey/subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    If we use Consul UI, we can see keys organized like a folder:\nconfigKey -- subItemkey1 -- subItemkey2 ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config in Consul is:\n   Key Value     core.default.endpoint-name-grouping-openapi/customerAPI-v1 value of customerAPI-v1   core.default.endpoint-name-grouping-openapi/productAPI-v1 value of productAPI-v1   core.default.endpoint-name-grouping-openapi/productAPI-v2 value of productAPI-v2    ","title":"Dynamic Configuration Consul Implementation","url":"/docs/main/latest/en/setup/backend/dynamic-config-consul/"},{"content":"Dynamic Configuration Consul Implementation Consul is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:consul}consul:# Consul host and ports, separated by comma, e.g. 1.2.3.4:8500,2.3.4.5:8500hostAndPorts:${SW_CONFIG_CONSUL_HOST_AND_PORTS:1.2.3.4:8500}# Sync period in seconds. Defaults to 60 seconds.period:${SW_CONFIG_CONSUL_PERIOD:1}# Consul aclTokenaclToken:${SW_CONFIG_CONSUL_ACL_TOKEN:\u0026#34;\u0026#34;}Config Storage Single Config Single configs in Consul are key/value pairs:\n   Key Value     configKey configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in Consul is:\n   Key Value     agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in Consul are key/value pairs as well, but according to the level keys organized by /.\n   Key Value     configKey/subItemkey1 subItemValue1   configKey/subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    If we use Consul UI, we can see keys organized like a folder:\nconfigKey -- subItemkey1 -- subItemkey2 ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config in Consul is:\n   Key Value     core.default.endpoint-name-grouping-openapi/customerAPI-v1 value of customerAPI-v1   core.default.endpoint-name-grouping-openapi/productAPI-v1 value of productAPI-v1   core.default.endpoint-name-grouping-openapi/productAPI-v2 value of productAPI-v2    ","title":"Dynamic Configuration Consul Implementation","url":"/docs/main/next/en/setup/backend/dynamic-config-consul/"},{"content":"Dynamic Configuration Consul Implementation Consul is also supported as Dynamic Configuration Center (DCC). To use it, please configure as follows:\nconfiguration:selector:${SW_CONFIGURATION:consul}consul:# Consul host and ports, separated by comma, e.g. 1.2.3.4:8500,2.3.4.5:8500hostAndPorts:${SW_CONFIG_CONSUL_HOST_AND_PORTS:1.2.3.4:8500}# Sync period in seconds. Defaults to 60 seconds.period:${SW_CONFIG_CONSUL_PERIOD:1}# Consul aclTokenaclToken:${SW_CONFIG_CONSUL_ACL_TOKEN:\u0026#34;\u0026#34;}Config Storage Single Config Single configs in Consul are key/value pairs:\n   Key Value     configKey configVaule    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in Consul is:\n   Key Value     agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in Consul are key/value pairs as well, but according to the level keys organized by /, see: https://www.consul.io/docs/dynamic-app-config/kv#using-consul-kv\n   Key Value     configKey/subItemkey1 subItemValue1   configKey/subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    If use Consul UI we can see keys organized like folder:\nconfigKey -- subItemkey1 -- subItemkey2 ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config in Consul is:\n   Key Value     core.default.endpoint-name-grouping-openapi/customerAPI-v1 value of customerAPI-v1   core.default.endpoint-name-grouping-openapi/productAPI-v1 value of productAPI-v1   core.default.endpoint-name-grouping-openapi/productAPI-v2 value of productAPI-v2    ","title":"Dynamic Configuration Consul Implementation","url":"/docs/main/v9.0.0/en/setup/backend/dynamic-config-consul/"},{"content":"Dynamic Configuration Consul Implementation Consul is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:consul}consul:# Consul host and ports, separated by comma, e.g. 1.2.3.4:8500,2.3.4.5:8500hostAndPorts:${SW_CONFIG_CONSUL_HOST_AND_PORTS:1.2.3.4:8500}# Sync period in seconds. Defaults to 60 seconds.period:${SW_CONFIG_CONSUL_PERIOD:1}# Consul aclTokenaclToken:${SW_CONFIG_CONSUL_ACL_TOKEN:\u0026#34;\u0026#34;}Config Storage Single Config Single configs in Consul are key/value pairs:\n   Key Value     configKey configVaule    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in Consul is:\n   Key Value     agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in Consul are key/value pairs as well, but according to the level keys organized by /, see: https://www.consul.io/docs/dynamic-app-config/kv#using-consul-kv\n   Key Value     configKey/subItemkey1 subItemValue1   configKey/subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    If we use Consul UI, we can see keys organized like a folder:\nconfigKey -- subItemkey1 -- subItemkey2 ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config in Consul is:\n   Key Value     core.default.endpoint-name-grouping-openapi/customerAPI-v1 value of customerAPI-v1   core.default.endpoint-name-grouping-openapi/productAPI-v1 value of productAPI-v1   core.default.endpoint-name-grouping-openapi/productAPI-v2 value of productAPI-v2    ","title":"Dynamic Configuration Consul Implementation","url":"/docs/main/v9.1.0/en/setup/backend/dynamic-config-consul/"},{"content":"Dynamic Configuration Consul Implementation Consul is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:consul}consul:# Consul host and ports, separated by comma, e.g. 1.2.3.4:8500,2.3.4.5:8500hostAndPorts:${SW_CONFIG_CONSUL_HOST_AND_PORTS:1.2.3.4:8500}# Sync period in seconds. Defaults to 60 seconds.period:${SW_CONFIG_CONSUL_PERIOD:1}# Consul aclTokenaclToken:${SW_CONFIG_CONSUL_ACL_TOKEN:\u0026#34;\u0026#34;}Config Storage Single Config Single configs in Consul are key/value pairs:\n   Key Value     configKey configVaule    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in Consul is:\n   Key Value     agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in Consul are key/value pairs as well, but according to the level keys organized by /, see: https://www.consul.io/docs/dynamic-app-config/kv#using-consul-kv\n   Key Value     configKey/subItemkey1 subItemValue1   configKey/subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    If we use Consul UI, we can see keys organized like a folder:\nconfigKey -- subItemkey1 -- subItemkey2 ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config in Consul is:\n   Key Value     core.default.endpoint-name-grouping-openapi/customerAPI-v1 value of customerAPI-v1   core.default.endpoint-name-grouping-openapi/productAPI-v1 value of productAPI-v1   core.default.endpoint-name-grouping-openapi/productAPI-v2 value of productAPI-v2    ","title":"Dynamic Configuration Consul Implementation","url":"/docs/main/v9.2.0/en/setup/backend/dynamic-config-consul/"},{"content":"Dynamic Configuration Consul Implementation Consul is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:consul}consul:# Consul host and ports, separated by comma, e.g. 1.2.3.4:8500,2.3.4.5:8500hostAndPorts:${SW_CONFIG_CONSUL_HOST_AND_PORTS:1.2.3.4:8500}# Sync period in seconds. Defaults to 60 seconds.period:${SW_CONFIG_CONSUL_PERIOD:1}# Consul aclTokenaclToken:${SW_CONFIG_CONSUL_ACL_TOKEN:\u0026#34;\u0026#34;}Config Storage Single Config Single configs in Consul are key/value pairs:\n   Key Value     configKey configVaule    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in Consul is:\n   Key Value     agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in Consul are key/value pairs as well, but according to the level keys organized by /.\n   Key Value     configKey/subItemkey1 subItemValue1   configKey/subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    If we use Consul UI, we can see keys organized like a folder:\nconfigKey -- subItemkey1 -- subItemkey2 ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config in Consul is:\n   Key Value     core.default.endpoint-name-grouping-openapi/customerAPI-v1 value of customerAPI-v1   core.default.endpoint-name-grouping-openapi/productAPI-v1 value of productAPI-v1   core.default.endpoint-name-grouping-openapi/productAPI-v2 value of productAPI-v2    ","title":"Dynamic Configuration Consul Implementation","url":"/docs/main/v9.3.0/en/setup/backend/dynamic-config-consul/"},{"content":"Dynamic Configuration Consul Implementation Consul is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:consul}consul:# Consul host and ports, separated by comma, e.g. 1.2.3.4:8500,2.3.4.5:8500hostAndPorts:${SW_CONFIG_CONSUL_HOST_AND_PORTS:1.2.3.4:8500}# Sync period in seconds. Defaults to 60 seconds.period:${SW_CONFIG_CONSUL_PERIOD:1}# Consul aclTokenaclToken:${SW_CONFIG_CONSUL_ACL_TOKEN:\u0026#34;\u0026#34;}Config Storage Single Config Single configs in Consul are key/value pairs:\n   Key Value     configKey configVaule    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in Consul is:\n   Key Value     agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in Consul are key/value pairs as well, but according to the level keys organized by /.\n   Key Value     configKey/subItemkey1 subItemValue1   configKey/subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    If we use Consul UI, we can see keys organized like a folder:\nconfigKey -- subItemkey1 -- subItemkey2 ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config in Consul is:\n   Key Value     core.default.endpoint-name-grouping-openapi/customerAPI-v1 value of customerAPI-v1   core.default.endpoint-name-grouping-openapi/productAPI-v1 value of productAPI-v1   core.default.endpoint-name-grouping-openapi/productAPI-v2 value of productAPI-v2    ","title":"Dynamic Configuration Consul Implementation","url":"/docs/main/v9.4.0/en/setup/backend/dynamic-config-consul/"},{"content":"Dynamic Configuration Consul Implementation Consul is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:consul}consul:# Consul host and ports, separated by comma, e.g. 1.2.3.4:8500,2.3.4.5:8500hostAndPorts:${SW_CONFIG_CONSUL_HOST_AND_PORTS:1.2.3.4:8500}# Sync period in seconds. Defaults to 60 seconds.period:${SW_CONFIG_CONSUL_PERIOD:1}# Consul aclTokenaclToken:${SW_CONFIG_CONSUL_ACL_TOKEN:\u0026#34;\u0026#34;}Config Storage Single Config Single configs in Consul are key/value pairs:\n   Key Value     configKey configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in Consul is:\n   Key Value     agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in Consul are key/value pairs as well, but according to the level keys organized by /.\n   Key Value     configKey/subItemkey1 subItemValue1   configKey/subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    If we use Consul UI, we can see keys organized like a folder:\nconfigKey -- subItemkey1 -- subItemkey2 ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config in Consul is:\n   Key Value     core.default.endpoint-name-grouping-openapi/customerAPI-v1 value of customerAPI-v1   core.default.endpoint-name-grouping-openapi/productAPI-v1 value of productAPI-v1   core.default.endpoint-name-grouping-openapi/productAPI-v2 value of productAPI-v2    ","title":"Dynamic Configuration Consul Implementation","url":"/docs/main/v9.5.0/en/setup/backend/dynamic-config-consul/"},{"content":"Dynamic Configuration Consul Implementation Consul is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:consul}consul:# Consul host and ports, separated by comma, e.g. 1.2.3.4:8500,2.3.4.5:8500hostAndPorts:${SW_CONFIG_CONSUL_HOST_AND_PORTS:1.2.3.4:8500}# Sync period in seconds. Defaults to 60 seconds.period:${SW_CONFIG_CONSUL_PERIOD:1}# Consul aclTokenaclToken:${SW_CONFIG_CONSUL_ACL_TOKEN:\u0026#34;\u0026#34;}Config Storage Single Config Single configs in Consul are key/value pairs:\n   Key Value     configKey configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in Consul is:\n   Key Value     agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in Consul are key/value pairs as well, but according to the level keys organized by /.\n   Key Value     configKey/subItemkey1 subItemValue1   configKey/subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    If we use Consul UI, we can see keys organized like a folder:\nconfigKey -- subItemkey1 -- subItemkey2 ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config in Consul is:\n   Key Value     core.default.endpoint-name-grouping-openapi/customerAPI-v1 value of customerAPI-v1   core.default.endpoint-name-grouping-openapi/productAPI-v1 value of productAPI-v1   core.default.endpoint-name-grouping-openapi/productAPI-v2 value of productAPI-v2    ","title":"Dynamic Configuration Consul Implementation","url":"/docs/main/v9.6.0/en/setup/backend/dynamic-config-consul/"},{"content":"Dynamic Configuration Consul Implementation Consul is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:consul}consul:# Consul host and ports, separated by comma, e.g. 1.2.3.4:8500,2.3.4.5:8500hostAndPorts:${SW_CONFIG_CONSUL_HOST_AND_PORTS:1.2.3.4:8500}# Sync period in seconds. Defaults to 60 seconds.period:${SW_CONFIG_CONSUL_PERIOD:1}# Consul aclTokenaclToken:${SW_CONFIG_CONSUL_ACL_TOKEN:\u0026#34;\u0026#34;}Config Storage Single Config Single configs in Consul are key/value pairs:\n   Key Value     configKey configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in Consul is:\n   Key Value     agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in Consul are key/value pairs as well, but according to the level keys organized by /.\n   Key Value     configKey/subItemkey1 subItemValue1   configKey/subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    If we use Consul UI, we can see keys organized like a folder:\nconfigKey -- subItemkey1 -- subItemkey2 ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config in Consul is:\n   Key Value     core.default.endpoint-name-grouping-openapi/customerAPI-v1 value of customerAPI-v1   core.default.endpoint-name-grouping-openapi/productAPI-v1 value of productAPI-v1   core.default.endpoint-name-grouping-openapi/productAPI-v2 value of productAPI-v2    ","title":"Dynamic Configuration Consul Implementation","url":"/docs/main/v9.7.0/en/setup/backend/dynamic-config-consul/"},{"content":"Dynamic Configuration Etcd Implementation Etcd is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:etcd}etcd:period:${SW_CONFIG_ETCD_PERIOD:60}# Unit seconds, sync period. Default fetch every 60 seconds.endpoints:${SW_CONFIG_ETCD_ENDPOINTS:http://localhost:2379}namespace:${SW_CONFIG_ETCD_NAMESPACE:/skywalking}authentication:${SW_CONFIG_ETCD_AUTHENTICATION:false}user:${SW_CONFIG_ETCD_USER:}password:${SW_CONFIG_ETCD_password:}NOTE: Since 8.7.0, only the v3 protocol is supported.\nConfig Storage Single Config Single configs in etcd are key/value pairs:\n   Key Value     {namespace}/configKey configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If namespace = /skywalking the config in etcd is:\n   Key Value     /skywalking/agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in etcd are key/value pairs as well, and the key is composited by configKey and subItemKey with /.\n   Key Value     {namespace}/configKey/subItemkey1 subItemValue1   {namespace}/configKey/subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If namespace = /skywalking the config in etcd is:\n   Key Value     /skywalking/core.default.endpoint-name-grouping-openapi/customerAPI-v1 value of customerAPI-v1   /skywalking/core.default.endpoint-name-grouping-openapi/productAPI-v1 value of productAPI-v1   /skywalking/core.default.endpoint-name-grouping-openapi/productAPI-v2 value of productAPI-v2    ","title":"Dynamic Configuration Etcd Implementation","url":"/docs/main/latest/en/setup/backend/dynamic-config-etcd/"},{"content":"Dynamic Configuration Etcd Implementation Etcd is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:etcd}etcd:period:${SW_CONFIG_ETCD_PERIOD:60}# Unit seconds, sync period. Default fetch every 60 seconds.endpoints:${SW_CONFIG_ETCD_ENDPOINTS:http://localhost:2379}namespace:${SW_CONFIG_ETCD_NAMESPACE:/skywalking}authentication:${SW_CONFIG_ETCD_AUTHENTICATION:false}user:${SW_CONFIG_ETCD_USER:}password:${SW_CONFIG_ETCD_password:}NOTE: Since 8.7.0, only the v3 protocol is supported.\nConfig Storage Single Config Single configs in etcd are key/value pairs:\n   Key Value     {namespace}/configKey configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If namespace = /skywalking the config in etcd is:\n   Key Value     /skywalking/agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in etcd are key/value pairs as well, and the key is composited by configKey and subItemKey with /.\n   Key Value     {namespace}/configKey/subItemkey1 subItemValue1   {namespace}/configKey/subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If namespace = /skywalking the config in etcd is:\n   Key Value     /skywalking/core.default.endpoint-name-grouping-openapi/customerAPI-v1 value of customerAPI-v1   /skywalking/core.default.endpoint-name-grouping-openapi/productAPI-v1 value of productAPI-v1   /skywalking/core.default.endpoint-name-grouping-openapi/productAPI-v2 value of productAPI-v2    ","title":"Dynamic Configuration Etcd Implementation","url":"/docs/main/next/en/setup/backend/dynamic-config-etcd/"},{"content":"Dynamic Configuration Etcd Implementation Etcd is also supported as Dynamic Configuration Center (DCC). To use it, please configure as follows:\nconfiguration:selector:${SW_CONFIGURATION:etcd}etcd:period:${SW_CONFIG_ETCD_PERIOD:60}# Unit seconds, sync period. Default fetch every 60 seconds.endpoints:${SW_CONFIG_ETCD_ENDPOINTS:http://localhost:2379}namespace:${SW_CONFIG_ETCD_NAMESPACE:/skywalking}authentication:${SW_CONFIG_ETCD_AUTHENTICATION:false}user:${SW_CONFIG_ETCD_USER:}password:${SW_CONFIG_ETCD_password:}NOTE: Only the v3 protocol is supported since 8.7.0.\nConfig Storage Single Config Single configs in etcd are key/value pairs:\n   Key Value     {namespace}/configKey configVaule    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If namespace = /skywalking the config in etcd is:\n   Key Value     /skywalking/agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in etcd are key/value pairs as well and the key is composited by configKey and subItemKey with /.\n   Key Value     {namespace}/configKey/subItemkey1 subItemValue1   {namespace}/configKey/subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If namespace = /skywalking the config in etcd is:\n   Key Value     /skywalking/core.default.endpoint-name-grouping-openapi/customerAPI-v1 value of customerAPI-v1   /skywalking/core.default.endpoint-name-grouping-openapi/productAPI-v1 value of productAPI-v1   /skywalking/core.default.endpoint-name-grouping-openapi/productAPI-v2 value of productAPI-v2    ","title":"Dynamic Configuration Etcd Implementation","url":"/docs/main/v9.0.0/en/setup/backend/dynamic-config-etcd/"},{"content":"Dynamic Configuration Etcd Implementation Etcd is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:etcd}etcd:period:${SW_CONFIG_ETCD_PERIOD:60}# Unit seconds, sync period. Default fetch every 60 seconds.endpoints:${SW_CONFIG_ETCD_ENDPOINTS:http://localhost:2379}namespace:${SW_CONFIG_ETCD_NAMESPACE:/skywalking}authentication:${SW_CONFIG_ETCD_AUTHENTICATION:false}user:${SW_CONFIG_ETCD_USER:}password:${SW_CONFIG_ETCD_password:}NOTE: Since 8.7.0, only the v3 protocol is supported.\nConfig Storage Single Config Single configs in etcd are key/value pairs:\n   Key Value     {namespace}/configKey configVaule    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If namespace = /skywalking the config in etcd is:\n   Key Value     /skywalking/agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in etcd are key/value pairs as well, and the key is composited by configKey and subItemKey with /.\n   Key Value     {namespace}/configKey/subItemkey1 subItemValue1   {namespace}/configKey/subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If namespace = /skywalking the config in etcd is:\n   Key Value     /skywalking/core.default.endpoint-name-grouping-openapi/customerAPI-v1 value of customerAPI-v1   /skywalking/core.default.endpoint-name-grouping-openapi/productAPI-v1 value of productAPI-v1   /skywalking/core.default.endpoint-name-grouping-openapi/productAPI-v2 value of productAPI-v2    ","title":"Dynamic Configuration Etcd Implementation","url":"/docs/main/v9.1.0/en/setup/backend/dynamic-config-etcd/"},{"content":"Dynamic Configuration Etcd Implementation Etcd is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:etcd}etcd:period:${SW_CONFIG_ETCD_PERIOD:60}# Unit seconds, sync period. Default fetch every 60 seconds.endpoints:${SW_CONFIG_ETCD_ENDPOINTS:http://localhost:2379}namespace:${SW_CONFIG_ETCD_NAMESPACE:/skywalking}authentication:${SW_CONFIG_ETCD_AUTHENTICATION:false}user:${SW_CONFIG_ETCD_USER:}password:${SW_CONFIG_ETCD_password:}NOTE: Since 8.7.0, only the v3 protocol is supported.\nConfig Storage Single Config Single configs in etcd are key/value pairs:\n   Key Value     {namespace}/configKey configVaule    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If namespace = /skywalking the config in etcd is:\n   Key Value     /skywalking/agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in etcd are key/value pairs as well, and the key is composited by configKey and subItemKey with /.\n   Key Value     {namespace}/configKey/subItemkey1 subItemValue1   {namespace}/configKey/subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If namespace = /skywalking the config in etcd is:\n   Key Value     /skywalking/core.default.endpoint-name-grouping-openapi/customerAPI-v1 value of customerAPI-v1   /skywalking/core.default.endpoint-name-grouping-openapi/productAPI-v1 value of productAPI-v1   /skywalking/core.default.endpoint-name-grouping-openapi/productAPI-v2 value of productAPI-v2    ","title":"Dynamic Configuration Etcd Implementation","url":"/docs/main/v9.2.0/en/setup/backend/dynamic-config-etcd/"},{"content":"Dynamic Configuration Etcd Implementation Etcd is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:etcd}etcd:period:${SW_CONFIG_ETCD_PERIOD:60}# Unit seconds, sync period. Default fetch every 60 seconds.endpoints:${SW_CONFIG_ETCD_ENDPOINTS:http://localhost:2379}namespace:${SW_CONFIG_ETCD_NAMESPACE:/skywalking}authentication:${SW_CONFIG_ETCD_AUTHENTICATION:false}user:${SW_CONFIG_ETCD_USER:}password:${SW_CONFIG_ETCD_password:}NOTE: Since 8.7.0, only the v3 protocol is supported.\nConfig Storage Single Config Single configs in etcd are key/value pairs:\n   Key Value     {namespace}/configKey configVaule    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If namespace = /skywalking the config in etcd is:\n   Key Value     /skywalking/agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in etcd are key/value pairs as well, and the key is composited by configKey and subItemKey with /.\n   Key Value     {namespace}/configKey/subItemkey1 subItemValue1   {namespace}/configKey/subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If namespace = /skywalking the config in etcd is:\n   Key Value     /skywalking/core.default.endpoint-name-grouping-openapi/customerAPI-v1 value of customerAPI-v1   /skywalking/core.default.endpoint-name-grouping-openapi/productAPI-v1 value of productAPI-v1   /skywalking/core.default.endpoint-name-grouping-openapi/productAPI-v2 value of productAPI-v2    ","title":"Dynamic Configuration Etcd Implementation","url":"/docs/main/v9.3.0/en/setup/backend/dynamic-config-etcd/"},{"content":"Dynamic Configuration Etcd Implementation Etcd is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:etcd}etcd:period:${SW_CONFIG_ETCD_PERIOD:60}# Unit seconds, sync period. Default fetch every 60 seconds.endpoints:${SW_CONFIG_ETCD_ENDPOINTS:http://localhost:2379}namespace:${SW_CONFIG_ETCD_NAMESPACE:/skywalking}authentication:${SW_CONFIG_ETCD_AUTHENTICATION:false}user:${SW_CONFIG_ETCD_USER:}password:${SW_CONFIG_ETCD_password:}NOTE: Since 8.7.0, only the v3 protocol is supported.\nConfig Storage Single Config Single configs in etcd are key/value pairs:\n   Key Value     {namespace}/configKey configVaule    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If namespace = /skywalking the config in etcd is:\n   Key Value     /skywalking/agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in etcd are key/value pairs as well, and the key is composited by configKey and subItemKey with /.\n   Key Value     {namespace}/configKey/subItemkey1 subItemValue1   {namespace}/configKey/subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If namespace = /skywalking the config in etcd is:\n   Key Value     /skywalking/core.default.endpoint-name-grouping-openapi/customerAPI-v1 value of customerAPI-v1   /skywalking/core.default.endpoint-name-grouping-openapi/productAPI-v1 value of productAPI-v1   /skywalking/core.default.endpoint-name-grouping-openapi/productAPI-v2 value of productAPI-v2    ","title":"Dynamic Configuration Etcd Implementation","url":"/docs/main/v9.4.0/en/setup/backend/dynamic-config-etcd/"},{"content":"Dynamic Configuration Etcd Implementation Etcd is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:etcd}etcd:period:${SW_CONFIG_ETCD_PERIOD:60}# Unit seconds, sync period. Default fetch every 60 seconds.endpoints:${SW_CONFIG_ETCD_ENDPOINTS:http://localhost:2379}namespace:${SW_CONFIG_ETCD_NAMESPACE:/skywalking}authentication:${SW_CONFIG_ETCD_AUTHENTICATION:false}user:${SW_CONFIG_ETCD_USER:}password:${SW_CONFIG_ETCD_password:}NOTE: Since 8.7.0, only the v3 protocol is supported.\nConfig Storage Single Config Single configs in etcd are key/value pairs:\n   Key Value     {namespace}/configKey configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If namespace = /skywalking the config in etcd is:\n   Key Value     /skywalking/agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in etcd are key/value pairs as well, and the key is composited by configKey and subItemKey with /.\n   Key Value     {namespace}/configKey/subItemkey1 subItemValue1   {namespace}/configKey/subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If namespace = /skywalking the config in etcd is:\n   Key Value     /skywalking/core.default.endpoint-name-grouping-openapi/customerAPI-v1 value of customerAPI-v1   /skywalking/core.default.endpoint-name-grouping-openapi/productAPI-v1 value of productAPI-v1   /skywalking/core.default.endpoint-name-grouping-openapi/productAPI-v2 value of productAPI-v2    ","title":"Dynamic Configuration Etcd Implementation","url":"/docs/main/v9.5.0/en/setup/backend/dynamic-config-etcd/"},{"content":"Dynamic Configuration Etcd Implementation Etcd is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:etcd}etcd:period:${SW_CONFIG_ETCD_PERIOD:60}# Unit seconds, sync period. Default fetch every 60 seconds.endpoints:${SW_CONFIG_ETCD_ENDPOINTS:http://localhost:2379}namespace:${SW_CONFIG_ETCD_NAMESPACE:/skywalking}authentication:${SW_CONFIG_ETCD_AUTHENTICATION:false}user:${SW_CONFIG_ETCD_USER:}password:${SW_CONFIG_ETCD_password:}NOTE: Since 8.7.0, only the v3 protocol is supported.\nConfig Storage Single Config Single configs in etcd are key/value pairs:\n   Key Value     {namespace}/configKey configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If namespace = /skywalking the config in etcd is:\n   Key Value     /skywalking/agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in etcd are key/value pairs as well, and the key is composited by configKey and subItemKey with /.\n   Key Value     {namespace}/configKey/subItemkey1 subItemValue1   {namespace}/configKey/subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If namespace = /skywalking the config in etcd is:\n   Key Value     /skywalking/core.default.endpoint-name-grouping-openapi/customerAPI-v1 value of customerAPI-v1   /skywalking/core.default.endpoint-name-grouping-openapi/productAPI-v1 value of productAPI-v1   /skywalking/core.default.endpoint-name-grouping-openapi/productAPI-v2 value of productAPI-v2    ","title":"Dynamic Configuration Etcd Implementation","url":"/docs/main/v9.6.0/en/setup/backend/dynamic-config-etcd/"},{"content":"Dynamic Configuration Etcd Implementation Etcd is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:etcd}etcd:period:${SW_CONFIG_ETCD_PERIOD:60}# Unit seconds, sync period. Default fetch every 60 seconds.endpoints:${SW_CONFIG_ETCD_ENDPOINTS:http://localhost:2379}namespace:${SW_CONFIG_ETCD_NAMESPACE:/skywalking}authentication:${SW_CONFIG_ETCD_AUTHENTICATION:false}user:${SW_CONFIG_ETCD_USER:}password:${SW_CONFIG_ETCD_password:}NOTE: Since 8.7.0, only the v3 protocol is supported.\nConfig Storage Single Config Single configs in etcd are key/value pairs:\n   Key Value     {namespace}/configKey configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If namespace = /skywalking the config in etcd is:\n   Key Value     /skywalking/agent-analyzer.default.slowDBAccessThreshold default:200,mongodb:50   \u0026hellip; \u0026hellip;    Group Config Group config in etcd are key/value pairs as well, and the key is composited by configKey and subItemKey with /.\n   Key Value     {namespace}/configKey/subItemkey1 subItemValue1   {namespace}/configKey/subItemkey2 subItemValue2   \u0026hellip; \u0026hellip;    e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If namespace = /skywalking the config in etcd is:\n   Key Value     /skywalking/core.default.endpoint-name-grouping-openapi/customerAPI-v1 value of customerAPI-v1   /skywalking/core.default.endpoint-name-grouping-openapi/productAPI-v1 value of productAPI-v1   /skywalking/core.default.endpoint-name-grouping-openapi/productAPI-v2 value of productAPI-v2    ","title":"Dynamic Configuration Etcd Implementation","url":"/docs/main/v9.7.0/en/setup/backend/dynamic-config-etcd/"},{"content":"Dynamic Configuration Kuberbetes Configmap Implementation configmap is also supported as Dynamic Configuration Center (DCC). To use it, please configure as follows:\nconfiguration:selector:${SW_CONFIGURATION:k8s-configmap}# [example] (../../../../oap-server/server-configuration/configuration-k8s-configmap/src/test/resources/skywalking-dynamic-configmap.example.yaml)k8s-configmap:# Sync period in seconds. Defaults to 60 seconds.period:${SW_CONFIG_CONFIGMAP_PERIOD:60}# Which namespace is configmap deployed in.namespace:${SW_CLUSTER_K8S_NAMESPACE:default}# Labelselector is used to locate specific configmaplabelSelector:${SW_CLUSTER_K8S_LABEL:app=collector,release=skywalking}{namespace} is the k8s namespace to which the configmap belongs. {labelSelector} is used to identify which configmaps would be selected.\ne.g. These 2 configmaps would be selected by the above config:\napiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: configKey1: configValue1 configKey2: configValue2 ... apiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config2 namespace: default labels: app: collector release: skywalking data: configKey3: configValue3 ... Config Storage The configs is configmap data items as the above example shows. we can organize the configs in 1 or more configmap files.\nSingle Config Under configmap.data:\n configKey: configValue e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in configmap is:\napiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: agent-analyzer.default.slowDBAccessThreshold: default:200,mongodb:50 Group Config The data key is composited by configKey and subItemKey to identify it is a group config:\nconfigKey.subItemKey1: subItemValue1 configKey.subItemKey2: subItemValue2 ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config can separate into 2 configmaps is:\napiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: core.default.endpoint-name-grouping-openapi.customerAPI-v1: value of customerAPI-v1 core.default.endpoint-name-grouping-openapi.productAPI-v1: value of productAPI-v1 apiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config2 namespace: default labels: app: collector release: skywalking data: core.default.endpoint-name-grouping-openapi.productAPI-v2: value of productAPI-v2 ","title":"Dynamic Configuration Kuberbetes Configmap Implementation","url":"/docs/main/v9.0.0/en/setup/backend/dynamic-config-configmap/"},{"content":"Dynamic Configuration Kuberbetes Configmap Implementation configmap is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:k8s-configmap}# [example] (../../../../oap-server/server-configuration/configuration-k8s-configmap/src/test/resources/skywalking-dynamic-configmap.example.yaml)k8s-configmap:# Sync period in seconds. Defaults to 60 seconds.period:${SW_CONFIG_CONFIGMAP_PERIOD:60}# Which namespace is configmap deployed in.namespace:${SW_CLUSTER_K8S_NAMESPACE:default}# Labelselector is used to locate specific configmaplabelSelector:${SW_CLUSTER_K8S_LABEL:app=collector,release=skywalking}{namespace} is the k8s namespace to which the configmap belongs. {labelSelector} is used to identify which configmaps would be selected.\ne.g. These 2 configmaps would be selected by the above config:\napiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: configKey1: configValue1 configKey2: configValue2 ... apiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config2 namespace: default labels: app: collector release: skywalking data: configKey3: configValue3 ... Config Storage The configs are configmap data items, as the above example shows. we can organize the configs in 1 or more configmap files.\nSingle Config Under configmap.data:\n configKey: configValue e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in configmap is:\napiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: agent-analyzer.default.slowDBAccessThreshold: default:200,mongodb:50 Group Config The data key is composited by configKey and subItemKey to identify it is a group config:\nconfigKey.subItemKey1: subItemValue1 configKey.subItemKey2: subItemValue2 ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config can separate into 2 configmaps is:\napiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: core.default.endpoint-name-grouping-openapi.customerAPI-v1: value of customerAPI-v1 core.default.endpoint-name-grouping-openapi.productAPI-v1: value of productAPI-v1 apiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config2 namespace: default labels: app: collector release: skywalking data: core.default.endpoint-name-grouping-openapi.productAPI-v2: value of productAPI-v2 ","title":"Dynamic Configuration Kuberbetes Configmap Implementation","url":"/docs/main/v9.1.0/en/setup/backend/dynamic-config-configmap/"},{"content":"Dynamic Configuration Kubernetes Configmap Implementation configmap is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:k8s-configmap}# [example] (../../../../oap-server/server-configuration/configuration-k8s-configmap/src/test/resources/skywalking-dynamic-configmap.example.yaml)k8s-configmap:# Sync period in seconds. Defaults to 60 seconds.period:${SW_CONFIG_CONFIGMAP_PERIOD:60}# Which namespace is configmap deployed in.namespace:${SW_CLUSTER_K8S_NAMESPACE:default}# Labelselector is used to locate specific configmaplabelSelector:${SW_CLUSTER_K8S_LABEL:app=collector,release=skywalking}{namespace} is the k8s namespace to which the configmap belongs. {labelSelector} is used to identify which configmaps would be selected.\ne.g. These 2 configmaps would be selected by the above config:\napiVersion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: configKey1: configValue1 configKey2: configValue2 ... apiVersion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config2 namespace: default labels: app: collector release: skywalking data: configKey3: configValue3 ... Config Storage The configs are configmap data items, as the above example shows. we can organize the configs in 1 or more configmap files.\nSingle Config Under configmap.data:\n configKey: configValue e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in configmap is:\napiVersion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: agent-analyzer.default.slowDBAccessThreshold: default:200,mongodb:50 Group Config The data key is composited by configKey and subItemKey to identify it is a group config:\nconfigKey.subItemKey1: subItemValue1 configKey.subItemKey2: subItemValue2 ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config can separate into 2 configmaps is:\napiVersion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: core.default.endpoint-name-grouping-openapi.customerAPI-v1: value of customerAPI-v1 core.default.endpoint-name-grouping-openapi.productAPI-v1: value of productAPI-v1 apiVersion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config2 namespace: default labels: app: collector release: skywalking data: core.default.endpoint-name-grouping-openapi.productAPI-v2: value of productAPI-v2 ","title":"Dynamic Configuration Kubernetes Configmap Implementation","url":"/docs/main/latest/en/setup/backend/dynamic-config-configmap/"},{"content":"Dynamic Configuration Kubernetes Configmap Implementation configmap is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:k8s-configmap}# [example] (../../../../oap-server/server-configuration/configuration-k8s-configmap/src/test/resources/skywalking-dynamic-configmap.example.yaml)k8s-configmap:# Sync period in seconds. Defaults to 60 seconds.period:${SW_CONFIG_CONFIGMAP_PERIOD:60}# Which namespace is configmap deployed in.namespace:${SW_CLUSTER_K8S_NAMESPACE:default}# Labelselector is used to locate specific configmaplabelSelector:${SW_CLUSTER_K8S_LABEL:app=collector,release=skywalking}{namespace} is the k8s namespace to which the configmap belongs. {labelSelector} is used to identify which configmaps would be selected.\ne.g. These 2 configmaps would be selected by the above config:\napiVersion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: configKey1: configValue1 configKey2: configValue2 ... apiVersion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config2 namespace: default labels: app: collector release: skywalking data: configKey3: configValue3 ... Config Storage The configs are configmap data items, as the above example shows. we can organize the configs in 1 or more configmap files.\nSingle Config Under configmap.data:\n configKey: configValue e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in configmap is:\napiVersion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: agent-analyzer.default.slowDBAccessThreshold: default:200,mongodb:50 Group Config The data key is composited by configKey and subItemKey to identify it is a group config:\nconfigKey.subItemKey1: subItemValue1 configKey.subItemKey2: subItemValue2 ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config can separate into 2 configmaps is:\napiVersion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: core.default.endpoint-name-grouping-openapi.customerAPI-v1: value of customerAPI-v1 core.default.endpoint-name-grouping-openapi.productAPI-v1: value of productAPI-v1 apiVersion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config2 namespace: default labels: app: collector release: skywalking data: core.default.endpoint-name-grouping-openapi.productAPI-v2: value of productAPI-v2 ","title":"Dynamic Configuration Kubernetes Configmap Implementation","url":"/docs/main/next/en/setup/backend/dynamic-config-configmap/"},{"content":"Dynamic Configuration Kubernetes Configmap Implementation configmap is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:k8s-configmap}# [example] (../../../../oap-server/server-configuration/configuration-k8s-configmap/src/test/resources/skywalking-dynamic-configmap.example.yaml)k8s-configmap:# Sync period in seconds. Defaults to 60 seconds.period:${SW_CONFIG_CONFIGMAP_PERIOD:60}# Which namespace is configmap deployed in.namespace:${SW_CLUSTER_K8S_NAMESPACE:default}# Labelselector is used to locate specific configmaplabelSelector:${SW_CLUSTER_K8S_LABEL:app=collector,release=skywalking}{namespace} is the k8s namespace to which the configmap belongs. {labelSelector} is used to identify which configmaps would be selected.\ne.g. These 2 configmaps would be selected by the above config:\napiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: configKey1: configValue1 configKey2: configValue2 ... apiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config2 namespace: default labels: app: collector release: skywalking data: configKey3: configValue3 ... Config Storage The configs are configmap data items, as the above example shows. we can organize the configs in 1 or more configmap files.\nSingle Config Under configmap.data:\n configKey: configValue e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in configmap is:\napiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: agent-analyzer.default.slowDBAccessThreshold: default:200,mongodb:50 Group Config The data key is composited by configKey and subItemKey to identify it is a group config:\nconfigKey.subItemKey1: subItemValue1 configKey.subItemKey2: subItemValue2 ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config can separate into 2 configmaps is:\napiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: core.default.endpoint-name-grouping-openapi.customerAPI-v1: value of customerAPI-v1 core.default.endpoint-name-grouping-openapi.productAPI-v1: value of productAPI-v1 apiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config2 namespace: default labels: app: collector release: skywalking data: core.default.endpoint-name-grouping-openapi.productAPI-v2: value of productAPI-v2 ","title":"Dynamic Configuration Kubernetes Configmap Implementation","url":"/docs/main/v9.2.0/en/setup/backend/dynamic-config-configmap/"},{"content":"Dynamic Configuration Kubernetes Configmap Implementation configmap is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:k8s-configmap}# [example] (../../../../oap-server/server-configuration/configuration-k8s-configmap/src/test/resources/skywalking-dynamic-configmap.example.yaml)k8s-configmap:# Sync period in seconds. Defaults to 60 seconds.period:${SW_CONFIG_CONFIGMAP_PERIOD:60}# Which namespace is configmap deployed in.namespace:${SW_CLUSTER_K8S_NAMESPACE:default}# Labelselector is used to locate specific configmaplabelSelector:${SW_CLUSTER_K8S_LABEL:app=collector,release=skywalking}{namespace} is the k8s namespace to which the configmap belongs. {labelSelector} is used to identify which configmaps would be selected.\ne.g. These 2 configmaps would be selected by the above config:\napiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: configKey1: configValue1 configKey2: configValue2 ... apiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config2 namespace: default labels: app: collector release: skywalking data: configKey3: configValue3 ... Config Storage The configs are configmap data items, as the above example shows. we can organize the configs in 1 or more configmap files.\nSingle Config Under configmap.data:\n configKey: configValue e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in configmap is:\napiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: agent-analyzer.default.slowDBAccessThreshold: default:200,mongodb:50 Group Config The data key is composited by configKey and subItemKey to identify it is a group config:\nconfigKey.subItemKey1: subItemValue1 configKey.subItemKey2: subItemValue2 ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config can separate into 2 configmaps is:\napiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: core.default.endpoint-name-grouping-openapi.customerAPI-v1: value of customerAPI-v1 core.default.endpoint-name-grouping-openapi.productAPI-v1: value of productAPI-v1 apiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config2 namespace: default labels: app: collector release: skywalking data: core.default.endpoint-name-grouping-openapi.productAPI-v2: value of productAPI-v2 ","title":"Dynamic Configuration Kubernetes Configmap Implementation","url":"/docs/main/v9.3.0/en/setup/backend/dynamic-config-configmap/"},{"content":"Dynamic Configuration Kubernetes Configmap Implementation configmap is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:k8s-configmap}# [example] (../../../../oap-server/server-configuration/configuration-k8s-configmap/src/test/resources/skywalking-dynamic-configmap.example.yaml)k8s-configmap:# Sync period in seconds. Defaults to 60 seconds.period:${SW_CONFIG_CONFIGMAP_PERIOD:60}# Which namespace is configmap deployed in.namespace:${SW_CLUSTER_K8S_NAMESPACE:default}# Labelselector is used to locate specific configmaplabelSelector:${SW_CLUSTER_K8S_LABEL:app=collector,release=skywalking}{namespace} is the k8s namespace to which the configmap belongs. {labelSelector} is used to identify which configmaps would be selected.\ne.g. These 2 configmaps would be selected by the above config:\napiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: configKey1: configValue1 configKey2: configValue2 ... apiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config2 namespace: default labels: app: collector release: skywalking data: configKey3: configValue3 ... Config Storage The configs are configmap data items, as the above example shows. we can organize the configs in 1 or more configmap files.\nSingle Config Under configmap.data:\n configKey: configValue e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in configmap is:\napiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: agent-analyzer.default.slowDBAccessThreshold: default:200,mongodb:50 Group Config The data key is composited by configKey and subItemKey to identify it is a group config:\nconfigKey.subItemKey1: subItemValue1 configKey.subItemKey2: subItemValue2 ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config can separate into 2 configmaps is:\napiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: core.default.endpoint-name-grouping-openapi.customerAPI-v1: value of customerAPI-v1 core.default.endpoint-name-grouping-openapi.productAPI-v1: value of productAPI-v1 apiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config2 namespace: default labels: app: collector release: skywalking data: core.default.endpoint-name-grouping-openapi.productAPI-v2: value of productAPI-v2 ","title":"Dynamic Configuration Kubernetes Configmap Implementation","url":"/docs/main/v9.4.0/en/setup/backend/dynamic-config-configmap/"},{"content":"Dynamic Configuration Kubernetes Configmap Implementation configmap is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:k8s-configmap}# [example] (../../../../oap-server/server-configuration/configuration-k8s-configmap/src/test/resources/skywalking-dynamic-configmap.example.yaml)k8s-configmap:# Sync period in seconds. Defaults to 60 seconds.period:${SW_CONFIG_CONFIGMAP_PERIOD:60}# Which namespace is configmap deployed in.namespace:${SW_CLUSTER_K8S_NAMESPACE:default}# Labelselector is used to locate specific configmaplabelSelector:${SW_CLUSTER_K8S_LABEL:app=collector,release=skywalking}{namespace} is the k8s namespace to which the configmap belongs. {labelSelector} is used to identify which configmaps would be selected.\ne.g. These 2 configmaps would be selected by the above config:\napiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: configKey1: configValue1 configKey2: configValue2 ... apiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config2 namespace: default labels: app: collector release: skywalking data: configKey3: configValue3 ... Config Storage The configs are configmap data items, as the above example shows. we can organize the configs in 1 or more configmap files.\nSingle Config Under configmap.data:\n configKey: configValue e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in configmap is:\napiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: agent-analyzer.default.slowDBAccessThreshold: default:200,mongodb:50 Group Config The data key is composited by configKey and subItemKey to identify it is a group config:\nconfigKey.subItemKey1: subItemValue1 configKey.subItemKey2: subItemValue2 ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config can separate into 2 configmaps is:\napiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: core.default.endpoint-name-grouping-openapi.customerAPI-v1: value of customerAPI-v1 core.default.endpoint-name-grouping-openapi.productAPI-v1: value of productAPI-v1 apiversion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config2 namespace: default labels: app: collector release: skywalking data: core.default.endpoint-name-grouping-openapi.productAPI-v2: value of productAPI-v2 ","title":"Dynamic Configuration Kubernetes Configmap Implementation","url":"/docs/main/v9.5.0/en/setup/backend/dynamic-config-configmap/"},{"content":"Dynamic Configuration Kubernetes Configmap Implementation configmap is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:k8s-configmap}# [example] (../../../../oap-server/server-configuration/configuration-k8s-configmap/src/test/resources/skywalking-dynamic-configmap.example.yaml)k8s-configmap:# Sync period in seconds. Defaults to 60 seconds.period:${SW_CONFIG_CONFIGMAP_PERIOD:60}# Which namespace is configmap deployed in.namespace:${SW_CLUSTER_K8S_NAMESPACE:default}# Labelselector is used to locate specific configmaplabelSelector:${SW_CLUSTER_K8S_LABEL:app=collector,release=skywalking}{namespace} is the k8s namespace to which the configmap belongs. {labelSelector} is used to identify which configmaps would be selected.\ne.g. These 2 configmaps would be selected by the above config:\napiVersion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: configKey1: configValue1 configKey2: configValue2 ... apiVersion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config2 namespace: default labels: app: collector release: skywalking data: configKey3: configValue3 ... Config Storage The configs are configmap data items, as the above example shows. we can organize the configs in 1 or more configmap files.\nSingle Config Under configmap.data:\n configKey: configValue e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in configmap is:\napiVersion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: agent-analyzer.default.slowDBAccessThreshold: default:200,mongodb:50 Group Config The data key is composited by configKey and subItemKey to identify it is a group config:\nconfigKey.subItemKey1: subItemValue1 configKey.subItemKey2: subItemValue2 ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config can separate into 2 configmaps is:\napiVersion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: core.default.endpoint-name-grouping-openapi.customerAPI-v1: value of customerAPI-v1 core.default.endpoint-name-grouping-openapi.productAPI-v1: value of productAPI-v1 apiVersion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config2 namespace: default labels: app: collector release: skywalking data: core.default.endpoint-name-grouping-openapi.productAPI-v2: value of productAPI-v2 ","title":"Dynamic Configuration Kubernetes Configmap Implementation","url":"/docs/main/v9.6.0/en/setup/backend/dynamic-config-configmap/"},{"content":"Dynamic Configuration Kubernetes Configmap Implementation configmap is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:k8s-configmap}# [example] (../../../../oap-server/server-configuration/configuration-k8s-configmap/src/test/resources/skywalking-dynamic-configmap.example.yaml)k8s-configmap:# Sync period in seconds. Defaults to 60 seconds.period:${SW_CONFIG_CONFIGMAP_PERIOD:60}# Which namespace is configmap deployed in.namespace:${SW_CLUSTER_K8S_NAMESPACE:default}# Labelselector is used to locate specific configmaplabelSelector:${SW_CLUSTER_K8S_LABEL:app=collector,release=skywalking}{namespace} is the k8s namespace to which the configmap belongs. {labelSelector} is used to identify which configmaps would be selected.\ne.g. These 2 configmaps would be selected by the above config:\napiVersion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: configKey1: configValue1 configKey2: configValue2 ... apiVersion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config2 namespace: default labels: app: collector release: skywalking data: configKey3: configValue3 ... Config Storage The configs are configmap data items, as the above example shows. we can organize the configs in 1 or more configmap files.\nSingle Config Under configmap.data:\n configKey: configValue e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The config in configmap is:\napiVersion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: agent-analyzer.default.slowDBAccessThreshold: default:200,mongodb:50 Group Config The data key is composited by configKey and subItemKey to identify it is a group config:\nconfigKey.subItemKey1: subItemValue1 configKey.subItemKey2: subItemValue2 ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The config can separate into 2 configmaps is:\napiVersion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config namespace: default labels: app: collector release: skywalking data: core.default.endpoint-name-grouping-openapi.customerAPI-v1: value of customerAPI-v1 core.default.endpoint-name-grouping-openapi.productAPI-v1: value of productAPI-v1 apiVersion: v1 kind: ConfigMap metadata: name: skywalking-dynamic-config2 namespace: default labels: app: collector release: skywalking data: core.default.endpoint-name-grouping-openapi.productAPI-v2: value of productAPI-v2 ","title":"Dynamic Configuration Kubernetes Configmap Implementation","url":"/docs/main/v9.7.0/en/setup/backend/dynamic-config-configmap/"},{"content":"Dynamic Configuration Nacos Implementation Nacos is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:nacos}nacos:# Nacos Server HostserverAddr:${SW_CONFIG_NACOS_SERVER_ADDR:127.0.0.1}# Nacos Server Portport:${SW_CONFIG_NACOS_SERVER_PORT:8848}# Nacos Configuration Groupgroup:${SW_CONFIG_NACOS_SERVER_GROUP:skywalking}# Nacos Configuration namespacenamespace:${SW_CONFIG_NACOS_SERVER_NAMESPACE:}# Unit seconds, sync period. Default fetch every 60 seconds.period:${SW_CONFIG_NACOS_PERIOD:60}# the name of current cluster, set the name if you want to upstream system known.clusterName:${SW_CONFIG_NACOS_CLUSTER_NAME:default}Config Storage Single Config    Data Id Group Config Value     configKey {group} configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If group = skywalking, the config in Nacos is:\n   Data Id Group Config Value     agent-analyzer.default.slowDBAccessThreshold skywalking default:200,mongodb:50    Group Config    Data Id Group Config Value Config Type     configKey {group} subItemkey1subItemkey2\u0026hellip; TEXT   subItemkey1 {group} subItemValue1    subItemkey2 {group} subItemValue2    \u0026hellip; \u0026hellip; \u0026hellip;     Notice: If you add/remove a subItem, you need to add/remove the subItemKey from the group to which the subItem belongs:\n   Data Id Group Config Value Config Type     configKey {group} subItemkey1subItemkey2\u0026hellip; TEXT    We separate subItemkeys by \\n or \\r\\n, trim leading and trailing whitespace; if you set the config by Nacos UI, each subItemkey should be in a new line:\nsubItemValue1 subItemValue2 ... If you set the config by API, each subItemkey should be separated by \\n or \\r\\n:\nconfigService.publishConfig(\u0026quot;test-module.default.testKeyGroup\u0026quot;, \u0026quot;skywalking\u0026quot;, \u0026quot;subItemkey1\\n subItemkey2\u0026quot;)); e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If group = skywalking, the config in Nacos is:\n   Data Id Group Config Value Config Type     core.default.endpoint-name-grouping-openapi skywalking customerAPI-v1productAPI-v1productAPI-v2 TEXT   customerAPI-v1 skywalking value of customerAPI-v1    productAPI-v1 skywalking value of productAPI-v1    productAPI-v2 skywalking value of productAPI-v2     ","title":"Dynamic Configuration Nacos Implementation","url":"/docs/main/latest/en/setup/backend/dynamic-config-nacos/"},{"content":"Dynamic Configuration Nacos Implementation Nacos is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:nacos}nacos:# Nacos Server HostserverAddr:${SW_CONFIG_NACOS_SERVER_ADDR:127.0.0.1}# Nacos Server Portport:${SW_CONFIG_NACOS_SERVER_PORT:8848}# Nacos Configuration Groupgroup:${SW_CONFIG_NACOS_SERVER_GROUP:skywalking}# Nacos Configuration namespacenamespace:${SW_CONFIG_NACOS_SERVER_NAMESPACE:}# Unit seconds, sync period. Default fetch every 60 seconds.period:${SW_CONFIG_NACOS_PERIOD:60}# the name of current cluster, set the name if you want to upstream system known.clusterName:${SW_CONFIG_NACOS_CLUSTER_NAME:default}Config Storage Single Config    Data Id Group Config Value     configKey {group} configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If group = skywalking, the config in Nacos is:\n   Data Id Group Config Value     agent-analyzer.default.slowDBAccessThreshold skywalking default:200,mongodb:50    Group Config    Data Id Group Config Value Config Type     configKey {group} subItemkey1subItemkey2\u0026hellip; TEXT   subItemkey1 {group} subItemValue1    subItemkey2 {group} subItemValue2    \u0026hellip; \u0026hellip; \u0026hellip;     Notice: If you add/remove a subItem, you need to add/remove the subItemKey from the group to which the subItem belongs:\n   Data Id Group Config Value Config Type     configKey {group} subItemkey1subItemkey2\u0026hellip; TEXT    We separate subItemkeys by \\n or \\r\\n, trim leading and trailing whitespace; if you set the config by Nacos UI, each subItemkey should be in a new line:\nsubItemValue1 subItemValue2 ... If you set the config by API, each subItemkey should be separated by \\n or \\r\\n:\nconfigService.publishConfig(\u0026quot;test-module.default.testKeyGroup\u0026quot;, \u0026quot;skywalking\u0026quot;, \u0026quot;subItemkey1\\n subItemkey2\u0026quot;)); e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If group = skywalking, the config in Nacos is:\n   Data Id Group Config Value Config Type     core.default.endpoint-name-grouping-openapi skywalking customerAPI-v1productAPI-v1productAPI-v2 TEXT   customerAPI-v1 skywalking value of customerAPI-v1    productAPI-v1 skywalking value of productAPI-v1    productAPI-v2 skywalking value of productAPI-v2     ","title":"Dynamic Configuration Nacos Implementation","url":"/docs/main/next/en/setup/backend/dynamic-config-nacos/"},{"content":"Dynamic Configuration Nacos Implementation Nacos is also supported as Dynamic Configuration Center (DCC). To use it, please configure as follows:\nconfiguration:selector:${SW_CONFIGURATION:nacos}nacos:# Nacos Server HostserverAddr:${SW_CONFIG_NACOS_SERVER_ADDR:127.0.0.1}# Nacos Server Portport:${SW_CONFIG_NACOS_SERVER_PORT:8848}# Nacos Configuration Groupgroup:${SW_CONFIG_NACOS_SERVER_GROUP:skywalking}# Nacos Configuration namespacenamespace:${SW_CONFIG_NACOS_SERVER_NAMESPACE:}# Unit seconds, sync period. Default fetch every 60 seconds.period:${SW_CONFIG_NACOS_PERIOD:60}# the name of current cluster, set the name if you want to upstream system known.clusterName:${SW_CONFIG_NACOS_CLUSTER_NAME:default}Config Storage Single Config    Data Id Group Config Value     configKey {group} configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If group = skywalking the config in nacos is:\n   Data Id Group Config Value     agent-analyzer.default.slowDBAccessThreshold skywalking default:200,mongodb:50    Group Config    Data Id Group Config Value Config Type     configKey {group} subItemkey1subItemkey2\u0026hellip; TEXT   subItemkey1 {group} subItemValue1    subItemkey2 {group} subItemValue2    \u0026hellip; \u0026hellip; \u0026hellip;     Notice: If you add/remove a subItem, you need to add/remove the subItemKey from the group which the subItem belongs:\n   Data Id Group Config Value Config Type     configKey {group} subItemkey1subItemkey2\u0026hellip; TEXT    We separate subItemkeys by \\n or \\r\\n, trim leading and trailing whitespace, if you set the config by Nacos UI each subItemkey should in a new line:\nsubItemValue1 subItemValue2 ... If you set the config by API each subItemkey should separated by \\n or \\r\\n:\nconfigService.publishConfig(\u0026quot;test-module.default.testKeyGroup\u0026quot;, \u0026quot;skywalking\u0026quot;, \u0026quot;subItemkey1\\n subItemkey2\u0026quot;)); e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If group = skywalking the config in nacos is:\n   Data Id Group Config Value Config Type     core.default.endpoint-name-grouping-openapi skywalking customerAPI-v1productAPI-v1productAPI-v2 TEXT   customerAPI-v1 skywalking value of customerAPI-v1    productAPI-v1 skywalking value of productAPI-v1    productAPI-v2 skywalking value of productAPI-v2     ","title":"Dynamic Configuration Nacos Implementation","url":"/docs/main/v9.0.0/en/setup/backend/dynamic-config-nacos/"},{"content":"Dynamic Configuration Nacos Implementation Nacos is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:nacos}nacos:# Nacos Server HostserverAddr:${SW_CONFIG_NACOS_SERVER_ADDR:127.0.0.1}# Nacos Server Portport:${SW_CONFIG_NACOS_SERVER_PORT:8848}# Nacos Configuration Groupgroup:${SW_CONFIG_NACOS_SERVER_GROUP:skywalking}# Nacos Configuration namespacenamespace:${SW_CONFIG_NACOS_SERVER_NAMESPACE:}# Unit seconds, sync period. Default fetch every 60 seconds.period:${SW_CONFIG_NACOS_PERIOD:60}# the name of current cluster, set the name if you want to upstream system known.clusterName:${SW_CONFIG_NACOS_CLUSTER_NAME:default}Config Storage Single Config    Data Id Group Config Value     configKey {group} configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If group = skywalking, the config in Nacos is:\n   Data Id Group Config Value     agent-analyzer.default.slowDBAccessThreshold skywalking default:200,mongodb:50    Group Config    Data Id Group Config Value Config Type     configKey {group} subItemkey1subItemkey2\u0026hellip; TEXT   subItemkey1 {group} subItemValue1    subItemkey2 {group} subItemValue2    \u0026hellip; \u0026hellip; \u0026hellip;     Notice: If you add/remove a subItem, you need to add/remove the subItemKey from the group to which the subItem belongs:\n   Data Id Group Config Value Config Type     configKey {group} subItemkey1subItemkey2\u0026hellip; TEXT    We separate subItemkeys by \\n or \\r\\n, trim leading and trailing whitespace; if you set the config by Nacos UI, each subItemkey should be in a new line:\nsubItemValue1 subItemValue2 ... If you set the config by API, each subItemkey should be separated by \\n or \\r\\n:\nconfigService.publishConfig(\u0026quot;test-module.default.testKeyGroup\u0026quot;, \u0026quot;skywalking\u0026quot;, \u0026quot;subItemkey1\\n subItemkey2\u0026quot;)); e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If group = skywalking, the config in Nacos is:\n   Data Id Group Config Value Config Type     core.default.endpoint-name-grouping-openapi skywalking customerAPI-v1productAPI-v1productAPI-v2 TEXT   customerAPI-v1 skywalking value of customerAPI-v1    productAPI-v1 skywalking value of productAPI-v1    productAPI-v2 skywalking value of productAPI-v2     ","title":"Dynamic Configuration Nacos Implementation","url":"/docs/main/v9.1.0/en/setup/backend/dynamic-config-nacos/"},{"content":"Dynamic Configuration Nacos Implementation Nacos is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:nacos}nacos:# Nacos Server HostserverAddr:${SW_CONFIG_NACOS_SERVER_ADDR:127.0.0.1}# Nacos Server Portport:${SW_CONFIG_NACOS_SERVER_PORT:8848}# Nacos Configuration Groupgroup:${SW_CONFIG_NACOS_SERVER_GROUP:skywalking}# Nacos Configuration namespacenamespace:${SW_CONFIG_NACOS_SERVER_NAMESPACE:}# Unit seconds, sync period. Default fetch every 60 seconds.period:${SW_CONFIG_NACOS_PERIOD:60}# the name of current cluster, set the name if you want to upstream system known.clusterName:${SW_CONFIG_NACOS_CLUSTER_NAME:default}Config Storage Single Config    Data Id Group Config Value     configKey {group} configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If group = skywalking, the config in Nacos is:\n   Data Id Group Config Value     agent-analyzer.default.slowDBAccessThreshold skywalking default:200,mongodb:50    Group Config    Data Id Group Config Value Config Type     configKey {group} subItemkey1subItemkey2\u0026hellip; TEXT   subItemkey1 {group} subItemValue1    subItemkey2 {group} subItemValue2    \u0026hellip; \u0026hellip; \u0026hellip;     Notice: If you add/remove a subItem, you need to add/remove the subItemKey from the group to which the subItem belongs:\n   Data Id Group Config Value Config Type     configKey {group} subItemkey1subItemkey2\u0026hellip; TEXT    We separate subItemkeys by \\n or \\r\\n, trim leading and trailing whitespace; if you set the config by Nacos UI, each subItemkey should be in a new line:\nsubItemValue1 subItemValue2 ... If you set the config by API, each subItemkey should be separated by \\n or \\r\\n:\nconfigService.publishConfig(\u0026quot;test-module.default.testKeyGroup\u0026quot;, \u0026quot;skywalking\u0026quot;, \u0026quot;subItemkey1\\n subItemkey2\u0026quot;)); e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If group = skywalking, the config in Nacos is:\n   Data Id Group Config Value Config Type     core.default.endpoint-name-grouping-openapi skywalking customerAPI-v1productAPI-v1productAPI-v2 TEXT   customerAPI-v1 skywalking value of customerAPI-v1    productAPI-v1 skywalking value of productAPI-v1    productAPI-v2 skywalking value of productAPI-v2     ","title":"Dynamic Configuration Nacos Implementation","url":"/docs/main/v9.2.0/en/setup/backend/dynamic-config-nacos/"},{"content":"Dynamic Configuration Nacos Implementation Nacos is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:nacos}nacos:# Nacos Server HostserverAddr:${SW_CONFIG_NACOS_SERVER_ADDR:127.0.0.1}# Nacos Server Portport:${SW_CONFIG_NACOS_SERVER_PORT:8848}# Nacos Configuration Groupgroup:${SW_CONFIG_NACOS_SERVER_GROUP:skywalking}# Nacos Configuration namespacenamespace:${SW_CONFIG_NACOS_SERVER_NAMESPACE:}# Unit seconds, sync period. Default fetch every 60 seconds.period:${SW_CONFIG_NACOS_PERIOD:60}# the name of current cluster, set the name if you want to upstream system known.clusterName:${SW_CONFIG_NACOS_CLUSTER_NAME:default}Config Storage Single Config    Data Id Group Config Value     configKey {group} configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If group = skywalking, the config in Nacos is:\n   Data Id Group Config Value     agent-analyzer.default.slowDBAccessThreshold skywalking default:200,mongodb:50    Group Config    Data Id Group Config Value Config Type     configKey {group} subItemkey1subItemkey2\u0026hellip; TEXT   subItemkey1 {group} subItemValue1    subItemkey2 {group} subItemValue2    \u0026hellip; \u0026hellip; \u0026hellip;     Notice: If you add/remove a subItem, you need to add/remove the subItemKey from the group to which the subItem belongs:\n   Data Id Group Config Value Config Type     configKey {group} subItemkey1subItemkey2\u0026hellip; TEXT    We separate subItemkeys by \\n or \\r\\n, trim leading and trailing whitespace; if you set the config by Nacos UI, each subItemkey should be in a new line:\nsubItemValue1 subItemValue2 ... If you set the config by API, each subItemkey should be separated by \\n or \\r\\n:\nconfigService.publishConfig(\u0026quot;test-module.default.testKeyGroup\u0026quot;, \u0026quot;skywalking\u0026quot;, \u0026quot;subItemkey1\\n subItemkey2\u0026quot;)); e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If group = skywalking, the config in Nacos is:\n   Data Id Group Config Value Config Type     core.default.endpoint-name-grouping-openapi skywalking customerAPI-v1productAPI-v1productAPI-v2 TEXT   customerAPI-v1 skywalking value of customerAPI-v1    productAPI-v1 skywalking value of productAPI-v1    productAPI-v2 skywalking value of productAPI-v2     ","title":"Dynamic Configuration Nacos Implementation","url":"/docs/main/v9.3.0/en/setup/backend/dynamic-config-nacos/"},{"content":"Dynamic Configuration Nacos Implementation Nacos is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:nacos}nacos:# Nacos Server HostserverAddr:${SW_CONFIG_NACOS_SERVER_ADDR:127.0.0.1}# Nacos Server Portport:${SW_CONFIG_NACOS_SERVER_PORT:8848}# Nacos Configuration Groupgroup:${SW_CONFIG_NACOS_SERVER_GROUP:skywalking}# Nacos Configuration namespacenamespace:${SW_CONFIG_NACOS_SERVER_NAMESPACE:}# Unit seconds, sync period. Default fetch every 60 seconds.period:${SW_CONFIG_NACOS_PERIOD:60}# the name of current cluster, set the name if you want to upstream system known.clusterName:${SW_CONFIG_NACOS_CLUSTER_NAME:default}Config Storage Single Config    Data Id Group Config Value     configKey {group} configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If group = skywalking, the config in Nacos is:\n   Data Id Group Config Value     agent-analyzer.default.slowDBAccessThreshold skywalking default:200,mongodb:50    Group Config    Data Id Group Config Value Config Type     configKey {group} subItemkey1subItemkey2\u0026hellip; TEXT   subItemkey1 {group} subItemValue1    subItemkey2 {group} subItemValue2    \u0026hellip; \u0026hellip; \u0026hellip;     Notice: If you add/remove a subItem, you need to add/remove the subItemKey from the group to which the subItem belongs:\n   Data Id Group Config Value Config Type     configKey {group} subItemkey1subItemkey2\u0026hellip; TEXT    We separate subItemkeys by \\n or \\r\\n, trim leading and trailing whitespace; if you set the config by Nacos UI, each subItemkey should be in a new line:\nsubItemValue1 subItemValue2 ... If you set the config by API, each subItemkey should be separated by \\n or \\r\\n:\nconfigService.publishConfig(\u0026quot;test-module.default.testKeyGroup\u0026quot;, \u0026quot;skywalking\u0026quot;, \u0026quot;subItemkey1\\n subItemkey2\u0026quot;)); e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If group = skywalking, the config in Nacos is:\n   Data Id Group Config Value Config Type     core.default.endpoint-name-grouping-openapi skywalking customerAPI-v1productAPI-v1productAPI-v2 TEXT   customerAPI-v1 skywalking value of customerAPI-v1    productAPI-v1 skywalking value of productAPI-v1    productAPI-v2 skywalking value of productAPI-v2     ","title":"Dynamic Configuration Nacos Implementation","url":"/docs/main/v9.4.0/en/setup/backend/dynamic-config-nacos/"},{"content":"Dynamic Configuration Nacos Implementation Nacos is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:nacos}nacos:# Nacos Server HostserverAddr:${SW_CONFIG_NACOS_SERVER_ADDR:127.0.0.1}# Nacos Server Portport:${SW_CONFIG_NACOS_SERVER_PORT:8848}# Nacos Configuration Groupgroup:${SW_CONFIG_NACOS_SERVER_GROUP:skywalking}# Nacos Configuration namespacenamespace:${SW_CONFIG_NACOS_SERVER_NAMESPACE:}# Unit seconds, sync period. Default fetch every 60 seconds.period:${SW_CONFIG_NACOS_PERIOD:60}# the name of current cluster, set the name if you want to upstream system known.clusterName:${SW_CONFIG_NACOS_CLUSTER_NAME:default}Config Storage Single Config    Data Id Group Config Value     configKey {group} configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If group = skywalking, the config in Nacos is:\n   Data Id Group Config Value     agent-analyzer.default.slowDBAccessThreshold skywalking default:200,mongodb:50    Group Config    Data Id Group Config Value Config Type     configKey {group} subItemkey1subItemkey2\u0026hellip; TEXT   subItemkey1 {group} subItemValue1    subItemkey2 {group} subItemValue2    \u0026hellip; \u0026hellip; \u0026hellip;     Notice: If you add/remove a subItem, you need to add/remove the subItemKey from the group to which the subItem belongs:\n   Data Id Group Config Value Config Type     configKey {group} subItemkey1subItemkey2\u0026hellip; TEXT    We separate subItemkeys by \\n or \\r\\n, trim leading and trailing whitespace; if you set the config by Nacos UI, each subItemkey should be in a new line:\nsubItemValue1 subItemValue2 ... If you set the config by API, each subItemkey should be separated by \\n or \\r\\n:\nconfigService.publishConfig(\u0026quot;test-module.default.testKeyGroup\u0026quot;, \u0026quot;skywalking\u0026quot;, \u0026quot;subItemkey1\\n subItemkey2\u0026quot;)); e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If group = skywalking, the config in Nacos is:\n   Data Id Group Config Value Config Type     core.default.endpoint-name-grouping-openapi skywalking customerAPI-v1productAPI-v1productAPI-v2 TEXT   customerAPI-v1 skywalking value of customerAPI-v1    productAPI-v1 skywalking value of productAPI-v1    productAPI-v2 skywalking value of productAPI-v2     ","title":"Dynamic Configuration Nacos Implementation","url":"/docs/main/v9.5.0/en/setup/backend/dynamic-config-nacos/"},{"content":"Dynamic Configuration Nacos Implementation Nacos is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:nacos}nacos:# Nacos Server HostserverAddr:${SW_CONFIG_NACOS_SERVER_ADDR:127.0.0.1}# Nacos Server Portport:${SW_CONFIG_NACOS_SERVER_PORT:8848}# Nacos Configuration Groupgroup:${SW_CONFIG_NACOS_SERVER_GROUP:skywalking}# Nacos Configuration namespacenamespace:${SW_CONFIG_NACOS_SERVER_NAMESPACE:}# Unit seconds, sync period. Default fetch every 60 seconds.period:${SW_CONFIG_NACOS_PERIOD:60}# the name of current cluster, set the name if you want to upstream system known.clusterName:${SW_CONFIG_NACOS_CLUSTER_NAME:default}Config Storage Single Config    Data Id Group Config Value     configKey {group} configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If group = skywalking, the config in Nacos is:\n   Data Id Group Config Value     agent-analyzer.default.slowDBAccessThreshold skywalking default:200,mongodb:50    Group Config    Data Id Group Config Value Config Type     configKey {group} subItemkey1subItemkey2\u0026hellip; TEXT   subItemkey1 {group} subItemValue1    subItemkey2 {group} subItemValue2    \u0026hellip; \u0026hellip; \u0026hellip;     Notice: If you add/remove a subItem, you need to add/remove the subItemKey from the group to which the subItem belongs:\n   Data Id Group Config Value Config Type     configKey {group} subItemkey1subItemkey2\u0026hellip; TEXT    We separate subItemkeys by \\n or \\r\\n, trim leading and trailing whitespace; if you set the config by Nacos UI, each subItemkey should be in a new line:\nsubItemValue1 subItemValue2 ... If you set the config by API, each subItemkey should be separated by \\n or \\r\\n:\nconfigService.publishConfig(\u0026quot;test-module.default.testKeyGroup\u0026quot;, \u0026quot;skywalking\u0026quot;, \u0026quot;subItemkey1\\n subItemkey2\u0026quot;)); e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If group = skywalking, the config in Nacos is:\n   Data Id Group Config Value Config Type     core.default.endpoint-name-grouping-openapi skywalking customerAPI-v1productAPI-v1productAPI-v2 TEXT   customerAPI-v1 skywalking value of customerAPI-v1    productAPI-v1 skywalking value of productAPI-v1    productAPI-v2 skywalking value of productAPI-v2     ","title":"Dynamic Configuration Nacos Implementation","url":"/docs/main/v9.6.0/en/setup/backend/dynamic-config-nacos/"},{"content":"Dynamic Configuration Nacos Implementation Nacos is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:nacos}nacos:# Nacos Server HostserverAddr:${SW_CONFIG_NACOS_SERVER_ADDR:127.0.0.1}# Nacos Server Portport:${SW_CONFIG_NACOS_SERVER_PORT:8848}# Nacos Configuration Groupgroup:${SW_CONFIG_NACOS_SERVER_GROUP:skywalking}# Nacos Configuration namespacenamespace:${SW_CONFIG_NACOS_SERVER_NAMESPACE:}# Unit seconds, sync period. Default fetch every 60 seconds.period:${SW_CONFIG_NACOS_PERIOD:60}# the name of current cluster, set the name if you want to upstream system known.clusterName:${SW_CONFIG_NACOS_CLUSTER_NAME:default}Config Storage Single Config    Data Id Group Config Value     configKey {group} configValue    e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If group = skywalking, the config in Nacos is:\n   Data Id Group Config Value     agent-analyzer.default.slowDBAccessThreshold skywalking default:200,mongodb:50    Group Config    Data Id Group Config Value Config Type     configKey {group} subItemkey1subItemkey2\u0026hellip; TEXT   subItemkey1 {group} subItemValue1    subItemkey2 {group} subItemValue2    \u0026hellip; \u0026hellip; \u0026hellip;     Notice: If you add/remove a subItem, you need to add/remove the subItemKey from the group to which the subItem belongs:\n   Data Id Group Config Value Config Type     configKey {group} subItemkey1subItemkey2\u0026hellip; TEXT    We separate subItemkeys by \\n or \\r\\n, trim leading and trailing whitespace; if you set the config by Nacos UI, each subItemkey should be in a new line:\nsubItemValue1 subItemValue2 ... If you set the config by API, each subItemkey should be separated by \\n or \\r\\n:\nconfigService.publishConfig(\u0026quot;test-module.default.testKeyGroup\u0026quot;, \u0026quot;skywalking\u0026quot;, \u0026quot;subItemkey1\\n subItemkey2\u0026quot;)); e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If group = skywalking, the config in Nacos is:\n   Data Id Group Config Value Config Type     core.default.endpoint-name-grouping-openapi skywalking customerAPI-v1productAPI-v1productAPI-v2 TEXT   customerAPI-v1 skywalking value of customerAPI-v1    productAPI-v1 skywalking value of productAPI-v1    productAPI-v2 skywalking value of productAPI-v2     ","title":"Dynamic Configuration Nacos Implementation","url":"/docs/main/v9.7.0/en/setup/backend/dynamic-config-nacos/"},{"content":"Dynamic Configuration Service, DCS Dynamic Configuration Service is a gRPC service which requires implementation of the upstream system. The SkyWalking OAP fetches the configuration from the implementation (any system) after you open the implementation like this:\nconfiguration:selector:${SW_CONFIGURATION:grpc}grpc:host:${SW_DCS_SERVER_HOST:\u0026#34;\u0026#34;}port:${SW_DCS_SERVER_PORT:80}clusterName:${SW_DCS_CLUSTER_NAME:SkyWalking}period:${SW_DCS_PERIOD:20}Config Server Response uuid: To identify whether the config data changed, if uuid is the same, it is not required to respond to the config data.\nSingle Config Implement:\nrpc call (ConfigurationRequest) returns (ConfigurationResponse) { } e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The response configTable is:\nconfigTable { name: \u0026quot;agent-analyzer.default.slowDBAccessThreshold\u0026quot; value: \u0026quot;default:200,mongodb:50\u0026quot; } Group Config Implement:\nrpc callGroup (ConfigurationRequest) returns (GroupConfigurationResponse) {} Respond config data GroupConfigItems groupConfigTable\ne.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The response groupConfigTable is:\ngroupConfigTable { groupName: \u0026quot;core.default.endpoint-name-grouping-openapi\u0026quot; items { name: \u0026quot;customerAPI-v1\u0026quot; value: \u0026quot;value of customerAPI-v1\u0026quot; } items { name: \u0026quot;productAPI-v1\u0026quot; value: \u0026quot;value of productAPI-v1\u0026quot; } items { name: \u0026quot;productAPI-v2\u0026quot; value: \u0026quot;value of productAPI-v2\u0026quot; } } ","title":"Dynamic Configuration Service, DCS","url":"/docs/main/latest/en/setup/backend/dynamic-config-service/"},{"content":"Dynamic Configuration Service, DCS Dynamic Configuration Service is a gRPC service which requires implementation of the upstream system. The SkyWalking OAP fetches the configuration from the implementation (any system) after you open the implementation like this:\nconfiguration:selector:${SW_CONFIGURATION:grpc}grpc:host:${SW_DCS_SERVER_HOST:\u0026#34;\u0026#34;}port:${SW_DCS_SERVER_PORT:80}clusterName:${SW_DCS_CLUSTER_NAME:SkyWalking}period:${SW_DCS_PERIOD:20}Config Server Response uuid: To identify whether the config data changed, if uuid is the same, it is not required to respond to the config data.\nSingle Config Implement:\nrpc call (ConfigurationRequest) returns (ConfigurationResponse) { } e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The response configTable is:\nconfigTable { name: \u0026quot;agent-analyzer.default.slowDBAccessThreshold\u0026quot; value: \u0026quot;default:200,mongodb:50\u0026quot; } Group Config Implement:\nrpc callGroup (ConfigurationRequest) returns (GroupConfigurationResponse) {} Respond config data GroupConfigItems groupConfigTable\ne.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The response groupConfigTable is:\ngroupConfigTable { groupName: \u0026quot;core.default.endpoint-name-grouping-openapi\u0026quot; items { name: \u0026quot;customerAPI-v1\u0026quot; value: \u0026quot;value of customerAPI-v1\u0026quot; } items { name: \u0026quot;productAPI-v1\u0026quot; value: \u0026quot;value of productAPI-v1\u0026quot; } items { name: \u0026quot;productAPI-v2\u0026quot; value: \u0026quot;value of productAPI-v2\u0026quot; } } ","title":"Dynamic Configuration Service, DCS","url":"/docs/main/next/en/setup/backend/dynamic-config-service/"},{"content":"Dynamic Configuration Service, DCS Dynamic Configuration Service is a gRPC service which requires implementation of the upstream system. The SkyWalking OAP fetches the configuration from the implementation (any system), after you open the implementation like this:\nconfiguration:selector:${SW_CONFIGURATION:grpc}grpc:host:${SW_DCS_SERVER_HOST:\u0026#34;\u0026#34;}port:${SW_DCS_SERVER_PORT:80}clusterName:${SW_DCS_CLUSTER_NAME:SkyWalking}period:${SW_DCS_PERIOD:20}Config Server Response uuid: To identify whether the config data changed, if uuid is the same not required to respond the config data.\nSingle Config Implement:\nrpc call (ConfigurationRequest) returns (ConfigurationResponse) { } e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The response configTable is:\nconfigTable { name: \u0026quot;agent-analyzer.default.slowDBAccessThreshold\u0026quot; value: \u0026quot;default:200,mongodb:50\u0026quot; } Group Config Implement:\nrpc callGroup (ConfigurationRequest) returns (GroupConfigurationResponse) {} Respond config data GroupConfigItems groupConfigTable\ne.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The response groupConfigTable is:\ngroupConfigTable { groupName: \u0026quot;core.default.endpoint-name-grouping-openapi\u0026quot; items { name: \u0026quot;customerAPI-v1\u0026quot; value: \u0026quot;value of customerAPI-v1\u0026quot; } items { name: \u0026quot;productAPI-v1\u0026quot; value: \u0026quot;value of productAPI-v1\u0026quot; } items { name: \u0026quot;productAPI-v2\u0026quot; value: \u0026quot;value of productAPI-v2\u0026quot; } } ","title":"Dynamic Configuration Service, DCS","url":"/docs/main/v9.0.0/en/setup/backend/dynamic-config-service/"},{"content":"Dynamic Configuration Service, DCS Dynamic Configuration Service is a gRPC service which requires implementation of the upstream system. The SkyWalking OAP fetches the configuration from the implementation (any system) after you open the implementation like this:\nconfiguration:selector:${SW_CONFIGURATION:grpc}grpc:host:${SW_DCS_SERVER_HOST:\u0026#34;\u0026#34;}port:${SW_DCS_SERVER_PORT:80}clusterName:${SW_DCS_CLUSTER_NAME:SkyWalking}period:${SW_DCS_PERIOD:20}Config Server Response uuid: To identify whether the config data changed, if uuid is the same, it is not required to respond to the config data.\nSingle Config Implement:\nrpc call (ConfigurationRequest) returns (ConfigurationResponse) { } e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The response configTable is:\nconfigTable { name: \u0026quot;agent-analyzer.default.slowDBAccessThreshold\u0026quot; value: \u0026quot;default:200,mongodb:50\u0026quot; } Group Config Implement:\nrpc callGroup (ConfigurationRequest) returns (GroupConfigurationResponse) {} Respond config data GroupConfigItems groupConfigTable\ne.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The response groupConfigTable is:\ngroupConfigTable { groupName: \u0026quot;core.default.endpoint-name-grouping-openapi\u0026quot; items { name: \u0026quot;customerAPI-v1\u0026quot; value: \u0026quot;value of customerAPI-v1\u0026quot; } items { name: \u0026quot;productAPI-v1\u0026quot; value: \u0026quot;value of productAPI-v1\u0026quot; } items { name: \u0026quot;productAPI-v2\u0026quot; value: \u0026quot;value of productAPI-v2\u0026quot; } } ","title":"Dynamic Configuration Service, DCS","url":"/docs/main/v9.1.0/en/setup/backend/dynamic-config-service/"},{"content":"Dynamic Configuration Service, DCS Dynamic Configuration Service is a gRPC service which requires implementation of the upstream system. The SkyWalking OAP fetches the configuration from the implementation (any system) after you open the implementation like this:\nconfiguration:selector:${SW_CONFIGURATION:grpc}grpc:host:${SW_DCS_SERVER_HOST:\u0026#34;\u0026#34;}port:${SW_DCS_SERVER_PORT:80}clusterName:${SW_DCS_CLUSTER_NAME:SkyWalking}period:${SW_DCS_PERIOD:20}Config Server Response uuid: To identify whether the config data changed, if uuid is the same, it is not required to respond to the config data.\nSingle Config Implement:\nrpc call (ConfigurationRequest) returns (ConfigurationResponse) { } e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The response configTable is:\nconfigTable { name: \u0026quot;agent-analyzer.default.slowDBAccessThreshold\u0026quot; value: \u0026quot;default:200,mongodb:50\u0026quot; } Group Config Implement:\nrpc callGroup (ConfigurationRequest) returns (GroupConfigurationResponse) {} Respond config data GroupConfigItems groupConfigTable\ne.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The response groupConfigTable is:\ngroupConfigTable { groupName: \u0026quot;core.default.endpoint-name-grouping-openapi\u0026quot; items { name: \u0026quot;customerAPI-v1\u0026quot; value: \u0026quot;value of customerAPI-v1\u0026quot; } items { name: \u0026quot;productAPI-v1\u0026quot; value: \u0026quot;value of productAPI-v1\u0026quot; } items { name: \u0026quot;productAPI-v2\u0026quot; value: \u0026quot;value of productAPI-v2\u0026quot; } } ","title":"Dynamic Configuration Service, DCS","url":"/docs/main/v9.2.0/en/setup/backend/dynamic-config-service/"},{"content":"Dynamic Configuration Service, DCS Dynamic Configuration Service is a gRPC service which requires implementation of the upstream system. The SkyWalking OAP fetches the configuration from the implementation (any system) after you open the implementation like this:\nconfiguration:selector:${SW_CONFIGURATION:grpc}grpc:host:${SW_DCS_SERVER_HOST:\u0026#34;\u0026#34;}port:${SW_DCS_SERVER_PORT:80}clusterName:${SW_DCS_CLUSTER_NAME:SkyWalking}period:${SW_DCS_PERIOD:20}Config Server Response uuid: To identify whether the config data changed, if uuid is the same, it is not required to respond to the config data.\nSingle Config Implement:\nrpc call (ConfigurationRequest) returns (ConfigurationResponse) { } e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The response configTable is:\nconfigTable { name: \u0026quot;agent-analyzer.default.slowDBAccessThreshold\u0026quot; value: \u0026quot;default:200,mongodb:50\u0026quot; } Group Config Implement:\nrpc callGroup (ConfigurationRequest) returns (GroupConfigurationResponse) {} Respond config data GroupConfigItems groupConfigTable\ne.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The response groupConfigTable is:\ngroupConfigTable { groupName: \u0026quot;core.default.endpoint-name-grouping-openapi\u0026quot; items { name: \u0026quot;customerAPI-v1\u0026quot; value: \u0026quot;value of customerAPI-v1\u0026quot; } items { name: \u0026quot;productAPI-v1\u0026quot; value: \u0026quot;value of productAPI-v1\u0026quot; } items { name: \u0026quot;productAPI-v2\u0026quot; value: \u0026quot;value of productAPI-v2\u0026quot; } } ","title":"Dynamic Configuration Service, DCS","url":"/docs/main/v9.3.0/en/setup/backend/dynamic-config-service/"},{"content":"Dynamic Configuration Service, DCS Dynamic Configuration Service is a gRPC service which requires implementation of the upstream system. The SkyWalking OAP fetches the configuration from the implementation (any system) after you open the implementation like this:\nconfiguration:selector:${SW_CONFIGURATION:grpc}grpc:host:${SW_DCS_SERVER_HOST:\u0026#34;\u0026#34;}port:${SW_DCS_SERVER_PORT:80}clusterName:${SW_DCS_CLUSTER_NAME:SkyWalking}period:${SW_DCS_PERIOD:20}Config Server Response uuid: To identify whether the config data changed, if uuid is the same, it is not required to respond to the config data.\nSingle Config Implement:\nrpc call (ConfigurationRequest) returns (ConfigurationResponse) { } e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The response configTable is:\nconfigTable { name: \u0026quot;agent-analyzer.default.slowDBAccessThreshold\u0026quot; value: \u0026quot;default:200,mongodb:50\u0026quot; } Group Config Implement:\nrpc callGroup (ConfigurationRequest) returns (GroupConfigurationResponse) {} Respond config data GroupConfigItems groupConfigTable\ne.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The response groupConfigTable is:\ngroupConfigTable { groupName: \u0026quot;core.default.endpoint-name-grouping-openapi\u0026quot; items { name: \u0026quot;customerAPI-v1\u0026quot; value: \u0026quot;value of customerAPI-v1\u0026quot; } items { name: \u0026quot;productAPI-v1\u0026quot; value: \u0026quot;value of productAPI-v1\u0026quot; } items { name: \u0026quot;productAPI-v2\u0026quot; value: \u0026quot;value of productAPI-v2\u0026quot; } } ","title":"Dynamic Configuration Service, DCS","url":"/docs/main/v9.4.0/en/setup/backend/dynamic-config-service/"},{"content":"Dynamic Configuration Service, DCS Dynamic Configuration Service is a gRPC service which requires implementation of the upstream system. The SkyWalking OAP fetches the configuration from the implementation (any system) after you open the implementation like this:\nconfiguration:selector:${SW_CONFIGURATION:grpc}grpc:host:${SW_DCS_SERVER_HOST:\u0026#34;\u0026#34;}port:${SW_DCS_SERVER_PORT:80}clusterName:${SW_DCS_CLUSTER_NAME:SkyWalking}period:${SW_DCS_PERIOD:20}Config Server Response uuid: To identify whether the config data changed, if uuid is the same, it is not required to respond to the config data.\nSingle Config Implement:\nrpc call (ConfigurationRequest) returns (ConfigurationResponse) { } e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The response configTable is:\nconfigTable { name: \u0026quot;agent-analyzer.default.slowDBAccessThreshold\u0026quot; value: \u0026quot;default:200,mongodb:50\u0026quot; } Group Config Implement:\nrpc callGroup (ConfigurationRequest) returns (GroupConfigurationResponse) {} Respond config data GroupConfigItems groupConfigTable\ne.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The response groupConfigTable is:\ngroupConfigTable { groupName: \u0026quot;core.default.endpoint-name-grouping-openapi\u0026quot; items { name: \u0026quot;customerAPI-v1\u0026quot; value: \u0026quot;value of customerAPI-v1\u0026quot; } items { name: \u0026quot;productAPI-v1\u0026quot; value: \u0026quot;value of productAPI-v1\u0026quot; } items { name: \u0026quot;productAPI-v2\u0026quot; value: \u0026quot;value of productAPI-v2\u0026quot; } } ","title":"Dynamic Configuration Service, DCS","url":"/docs/main/v9.5.0/en/setup/backend/dynamic-config-service/"},{"content":"Dynamic Configuration Service, DCS Dynamic Configuration Service is a gRPC service which requires implementation of the upstream system. The SkyWalking OAP fetches the configuration from the implementation (any system) after you open the implementation like this:\nconfiguration:selector:${SW_CONFIGURATION:grpc}grpc:host:${SW_DCS_SERVER_HOST:\u0026#34;\u0026#34;}port:${SW_DCS_SERVER_PORT:80}clusterName:${SW_DCS_CLUSTER_NAME:SkyWalking}period:${SW_DCS_PERIOD:20}Config Server Response uuid: To identify whether the config data changed, if uuid is the same, it is not required to respond to the config data.\nSingle Config Implement:\nrpc call (ConfigurationRequest) returns (ConfigurationResponse) { } e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The response configTable is:\nconfigTable { name: \u0026quot;agent-analyzer.default.slowDBAccessThreshold\u0026quot; value: \u0026quot;default:200,mongodb:50\u0026quot; } Group Config Implement:\nrpc callGroup (ConfigurationRequest) returns (GroupConfigurationResponse) {} Respond config data GroupConfigItems groupConfigTable\ne.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The response groupConfigTable is:\ngroupConfigTable { groupName: \u0026quot;core.default.endpoint-name-grouping-openapi\u0026quot; items { name: \u0026quot;customerAPI-v1\u0026quot; value: \u0026quot;value of customerAPI-v1\u0026quot; } items { name: \u0026quot;productAPI-v1\u0026quot; value: \u0026quot;value of productAPI-v1\u0026quot; } items { name: \u0026quot;productAPI-v2\u0026quot; value: \u0026quot;value of productAPI-v2\u0026quot; } } ","title":"Dynamic Configuration Service, DCS","url":"/docs/main/v9.6.0/en/setup/backend/dynamic-config-service/"},{"content":"Dynamic Configuration Service, DCS Dynamic Configuration Service is a gRPC service which requires implementation of the upstream system. The SkyWalking OAP fetches the configuration from the implementation (any system) after you open the implementation like this:\nconfiguration:selector:${SW_CONFIGURATION:grpc}grpc:host:${SW_DCS_SERVER_HOST:\u0026#34;\u0026#34;}port:${SW_DCS_SERVER_PORT:80}clusterName:${SW_DCS_CLUSTER_NAME:SkyWalking}period:${SW_DCS_PERIOD:20}Config Server Response uuid: To identify whether the config data changed, if uuid is the same, it is not required to respond to the config data.\nSingle Config Implement:\nrpc call (ConfigurationRequest) returns (ConfigurationResponse) { } e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} The response configTable is:\nconfigTable { name: \u0026quot;agent-analyzer.default.slowDBAccessThreshold\u0026quot; value: \u0026quot;default:200,mongodb:50\u0026quot; } Group Config Implement:\nrpc callGroup (ConfigurationRequest) returns (GroupConfigurationResponse) {} Respond config data GroupConfigItems groupConfigTable\ne.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} The response groupConfigTable is:\ngroupConfigTable { groupName: \u0026quot;core.default.endpoint-name-grouping-openapi\u0026quot; items { name: \u0026quot;customerAPI-v1\u0026quot; value: \u0026quot;value of customerAPI-v1\u0026quot; } items { name: \u0026quot;productAPI-v1\u0026quot; value: \u0026quot;value of productAPI-v1\u0026quot; } items { name: \u0026quot;productAPI-v2\u0026quot; value: \u0026quot;value of productAPI-v2\u0026quot; } } ","title":"Dynamic Configuration Service, DCS","url":"/docs/main/v9.7.0/en/setup/backend/dynamic-config-service/"},{"content":"Dynamic Configuration Zookeeper Implementation Zookeeper is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:zookeeper}zookeeper:period:${SW_CONFIG_ZK_PERIOD:60}# Unit seconds, sync period. Default fetch every 60 seconds.namespace:${SW_CONFIG_ZK_NAMESPACE:/default}hostPort:${SW_CONFIG_ZK_HOST_PORT:localhost:2181}# Retry PolicybaseSleepTimeMs:${SW_CONFIG_ZK_BASE_SLEEP_TIME_MS:1000}# initial amount of time to wait between retriesmaxRetries:${SW_CONFIG_ZK_MAX_RETRIES:3}# max number of times to retryThe namespace is the ZooKeeper path. The config key and value are the properties of the namespace folder.\nConfig Storage Single Config znode.path = {namespace}/configKey configValue = znode.data e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If namespace = /default the config in zookeeper is:\nznode.path = /default/agent-analyzer.default.slowDBAccessThreshold znode.data = default:200,mongodb:50 Group Config znode.path = {namespace}/configKey znode.child1.path = {znode.path}/subItemkey1 znode.child2.path = {znode.path}/subItemkey2 ... subItemValue1 = znode.child1.data subItemValue2 = znode.child2.data ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If namespace = /default the config in zookeeper is:\nznode.path = /default/core.default.endpoint-name-grouping-openapi znode.customerAPI-v1.path = /default/core.default.endpoint-name-grouping-openapi/customerAPI-v1 znode.productAPI-v1.path = /default/core.default.endpoint-name-grouping-openapi/productAPI-v1 znode.productAPI-v2.path = /default/core.default.endpoint-name-grouping-openapi/productAPI-v2 znode.customerAPI-v1.data = value of customerAPI-v1 znode.productAPI-v1.data = value of productAPI-v1 znode.productAPI-v2.data = value of productAPI-v2 ","title":"Dynamic Configuration Zookeeper Implementation","url":"/docs/main/latest/en/setup/backend/dynamic-config-zookeeper/"},{"content":"Dynamic Configuration Zookeeper Implementation Zookeeper is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:zookeeper}zookeeper:period:${SW_CONFIG_ZK_PERIOD:60}# Unit seconds, sync period. Default fetch every 60 seconds.namespace:${SW_CONFIG_ZK_NAMESPACE:/default}hostPort:${SW_CONFIG_ZK_HOST_PORT:localhost:2181}# Retry PolicybaseSleepTimeMs:${SW_CONFIG_ZK_BASE_SLEEP_TIME_MS:1000}# initial amount of time to wait between retriesmaxRetries:${SW_CONFIG_ZK_MAX_RETRIES:3}# max number of times to retryThe namespace is the ZooKeeper path. The config key and value are the properties of the namespace folder.\nConfig Storage Single Config znode.path = {namespace}/configKey configValue = znode.data e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If namespace = /default the config in zookeeper is:\nznode.path = /default/agent-analyzer.default.slowDBAccessThreshold znode.data = default:200,mongodb:50 Group Config znode.path = {namespace}/configKey znode.child1.path = {znode.path}/subItemkey1 znode.child2.path = {znode.path}/subItemkey2 ... subItemValue1 = znode.child1.data subItemValue2 = znode.child2.data ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If namespace = /default the config in zookeeper is:\nznode.path = /default/core.default.endpoint-name-grouping-openapi znode.customerAPI-v1.path = /default/core.default.endpoint-name-grouping-openapi/customerAPI-v1 znode.productAPI-v1.path = /default/core.default.endpoint-name-grouping-openapi/productAPI-v1 znode.productAPI-v2.path = /default/core.default.endpoint-name-grouping-openapi/productAPI-v2 znode.customerAPI-v1.data = value of customerAPI-v1 znode.productAPI-v1.data = value of productAPI-v1 znode.productAPI-v2.data = value of productAPI-v2 ","title":"Dynamic Configuration Zookeeper Implementation","url":"/docs/main/next/en/setup/backend/dynamic-config-zookeeper/"},{"content":"Dynamic Configuration Zookeeper Implementation Zookeeper is also supported as Dynamic Configuration Center (DCC). To use it, please configure as follows:\nconfiguration:selector:${SW_CONFIGURATION:zookeeper}zookeeper:period:${SW_CONFIG_ZK_PERIOD:60}# Unit seconds, sync period. Default fetch every 60 seconds.namespace:${SW_CONFIG_ZK_NAMESPACE:/default}hostPort:${SW_CONFIG_ZK_HOST_PORT:localhost:2181}# Retry PolicybaseSleepTimeMs:${SW_CONFIG_ZK_BASE_SLEEP_TIME_MS:1000}# initial amount of time to wait between retriesmaxRetries:${SW_CONFIG_ZK_MAX_RETRIES:3}# max number of times to retryThe namespace is the ZooKeeper path. The config key and value are the properties of the namespace folder.\nConfig Storage Single Config znode.path = {namespace}/configKey configValue = znode.data e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If namespace = /default the config in zookeeper is:\nznode.path = /default/agent-analyzer.default.slowDBAccessThreshold znode.data = default:200,mongodb:50 Group Config znode.path = {namespace}/configKey znode.child1.path = {znode.path}/subItemkey1 znode.child2.path = {znode.path}/subItemkey2 ... subItemValue1 = znode.child1.data subItemValue2 = znode.child2.data ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If namespace = /default the config in zookeeper is:\nznode.path = /default/core.default.endpoint-name-grouping-openapi znode.customerAPI-v1.path = /default/core.default.endpoint-name-grouping-openapi/customerAPI-v1 znode.productAPI-v1.path = /default/core.default.endpoint-name-grouping-openapi/productAPI-v1 znode.productAPI-v2.path = /default/core.default.endpoint-name-grouping-openapi/productAPI-v2 znode.customerAPI-v1.data = value of customerAPI-v1 znode.productAPI-v1.data = value of productAPI-v1 znode.productAPI-v2.data = value of productAPI-v2 ","title":"Dynamic Configuration Zookeeper Implementation","url":"/docs/main/v9.0.0/en/setup/backend/dynamic-config-zookeeper/"},{"content":"Dynamic Configuration Zookeeper Implementation Zookeeper is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:zookeeper}zookeeper:period:${SW_CONFIG_ZK_PERIOD:60}# Unit seconds, sync period. Default fetch every 60 seconds.namespace:${SW_CONFIG_ZK_NAMESPACE:/default}hostPort:${SW_CONFIG_ZK_HOST_PORT:localhost:2181}# Retry PolicybaseSleepTimeMs:${SW_CONFIG_ZK_BASE_SLEEP_TIME_MS:1000}# initial amount of time to wait between retriesmaxRetries:${SW_CONFIG_ZK_MAX_RETRIES:3}# max number of times to retryThe namespace is the ZooKeeper path. The config key and value are the properties of the namespace folder.\nConfig Storage Single Config znode.path = {namespace}/configKey configValue = znode.data e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If namespace = /default the config in zookeeper is:\nznode.path = /default/agent-analyzer.default.slowDBAccessThreshold znode.data = default:200,mongodb:50 Group Config znode.path = {namespace}/configKey znode.child1.path = {znode.path}/subItemkey1 znode.child2.path = {znode.path}/subItemkey2 ... subItemValue1 = znode.child1.data subItemValue2 = znode.child2.data ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If namespace = /default the config in zookeeper is:\nznode.path = /default/core.default.endpoint-name-grouping-openapi znode.customerAPI-v1.path = /default/core.default.endpoint-name-grouping-openapi/customerAPI-v1 znode.productAPI-v1.path = /default/core.default.endpoint-name-grouping-openapi/productAPI-v1 znode.productAPI-v2.path = /default/core.default.endpoint-name-grouping-openapi/productAPI-v2 znode.customerAPI-v1.data = value of customerAPI-v1 znode.productAPI-v1.data = value of productAPI-v1 znode.productAPI-v2.data = value of productAPI-v2 ","title":"Dynamic Configuration Zookeeper Implementation","url":"/docs/main/v9.1.0/en/setup/backend/dynamic-config-zookeeper/"},{"content":"Dynamic Configuration Zookeeper Implementation Zookeeper is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:zookeeper}zookeeper:period:${SW_CONFIG_ZK_PERIOD:60}# Unit seconds, sync period. Default fetch every 60 seconds.namespace:${SW_CONFIG_ZK_NAMESPACE:/default}hostPort:${SW_CONFIG_ZK_HOST_PORT:localhost:2181}# Retry PolicybaseSleepTimeMs:${SW_CONFIG_ZK_BASE_SLEEP_TIME_MS:1000}# initial amount of time to wait between retriesmaxRetries:${SW_CONFIG_ZK_MAX_RETRIES:3}# max number of times to retryThe namespace is the ZooKeeper path. The config key and value are the properties of the namespace folder.\nConfig Storage Single Config znode.path = {namespace}/configKey configValue = znode.data e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If namespace = /default the config in zookeeper is:\nznode.path = /default/agent-analyzer.default.slowDBAccessThreshold znode.data = default:200,mongodb:50 Group Config znode.path = {namespace}/configKey znode.child1.path = {znode.path}/subItemkey1 znode.child2.path = {znode.path}/subItemkey2 ... subItemValue1 = znode.child1.data subItemValue2 = znode.child2.data ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If namespace = /default the config in zookeeper is:\nznode.path = /default/core.default.endpoint-name-grouping-openapi znode.customerAPI-v1.path = /default/core.default.endpoint-name-grouping-openapi/customerAPI-v1 znode.productAPI-v1.path = /default/core.default.endpoint-name-grouping-openapi/productAPI-v1 znode.productAPI-v2.path = /default/core.default.endpoint-name-grouping-openapi/productAPI-v2 znode.customerAPI-v1.data = value of customerAPI-v1 znode.productAPI-v1.data = value of productAPI-v1 znode.productAPI-v2.data = value of productAPI-v2 ","title":"Dynamic Configuration Zookeeper Implementation","url":"/docs/main/v9.2.0/en/setup/backend/dynamic-config-zookeeper/"},{"content":"Dynamic Configuration Zookeeper Implementation Zookeeper is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:zookeeper}zookeeper:period:${SW_CONFIG_ZK_PERIOD:60}# Unit seconds, sync period. Default fetch every 60 seconds.namespace:${SW_CONFIG_ZK_NAMESPACE:/default}hostPort:${SW_CONFIG_ZK_HOST_PORT:localhost:2181}# Retry PolicybaseSleepTimeMs:${SW_CONFIG_ZK_BASE_SLEEP_TIME_MS:1000}# initial amount of time to wait between retriesmaxRetries:${SW_CONFIG_ZK_MAX_RETRIES:3}# max number of times to retryThe namespace is the ZooKeeper path. The config key and value are the properties of the namespace folder.\nConfig Storage Single Config znode.path = {namespace}/configKey configValue = znode.data e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If namespace = /default the config in zookeeper is:\nznode.path = /default/agent-analyzer.default.slowDBAccessThreshold znode.data = default:200,mongodb:50 Group Config znode.path = {namespace}/configKey znode.child1.path = {znode.path}/subItemkey1 znode.child2.path = {znode.path}/subItemkey2 ... subItemValue1 = znode.child1.data subItemValue2 = znode.child2.data ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If namespace = /default the config in zookeeper is:\nznode.path = /default/core.default.endpoint-name-grouping-openapi znode.customerAPI-v1.path = /default/core.default.endpoint-name-grouping-openapi/customerAPI-v1 znode.productAPI-v1.path = /default/core.default.endpoint-name-grouping-openapi/productAPI-v1 znode.productAPI-v2.path = /default/core.default.endpoint-name-grouping-openapi/productAPI-v2 znode.customerAPI-v1.data = value of customerAPI-v1 znode.productAPI-v1.data = value of productAPI-v1 znode.productAPI-v2.data = value of productAPI-v2 ","title":"Dynamic Configuration Zookeeper Implementation","url":"/docs/main/v9.3.0/en/setup/backend/dynamic-config-zookeeper/"},{"content":"Dynamic Configuration Zookeeper Implementation Zookeeper is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:zookeeper}zookeeper:period:${SW_CONFIG_ZK_PERIOD:60}# Unit seconds, sync period. Default fetch every 60 seconds.namespace:${SW_CONFIG_ZK_NAMESPACE:/default}hostPort:${SW_CONFIG_ZK_HOST_PORT:localhost:2181}# Retry PolicybaseSleepTimeMs:${SW_CONFIG_ZK_BASE_SLEEP_TIME_MS:1000}# initial amount of time to wait between retriesmaxRetries:${SW_CONFIG_ZK_MAX_RETRIES:3}# max number of times to retryThe namespace is the ZooKeeper path. The config key and value are the properties of the namespace folder.\nConfig Storage Single Config znode.path = {namespace}/configKey configValue = znode.data e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If namespace = /default the config in zookeeper is:\nznode.path = /default/agent-analyzer.default.slowDBAccessThreshold znode.data = default:200,mongodb:50 Group Config znode.path = {namespace}/configKey znode.child1.path = {znode.path}/subItemkey1 znode.child2.path = {znode.path}/subItemkey2 ... subItemValue1 = znode.child1.data subItemValue2 = znode.child2.data ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If namespace = /default the config in zookeeper is:\nznode.path = /default/core.default.endpoint-name-grouping-openapi znode.customerAPI-v1.path = /default/core.default.endpoint-name-grouping-openapi/customerAPI-v1 znode.productAPI-v1.path = /default/core.default.endpoint-name-grouping-openapi/productAPI-v1 znode.productAPI-v2.path = /default/core.default.endpoint-name-grouping-openapi/productAPI-v2 znode.customerAPI-v1.data = value of customerAPI-v1 znode.productAPI-v1.data = value of productAPI-v1 znode.productAPI-v2.data = value of productAPI-v2 ","title":"Dynamic Configuration Zookeeper Implementation","url":"/docs/main/v9.4.0/en/setup/backend/dynamic-config-zookeeper/"},{"content":"Dynamic Configuration Zookeeper Implementation Zookeeper is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:zookeeper}zookeeper:period:${SW_CONFIG_ZK_PERIOD:60}# Unit seconds, sync period. Default fetch every 60 seconds.namespace:${SW_CONFIG_ZK_NAMESPACE:/default}hostPort:${SW_CONFIG_ZK_HOST_PORT:localhost:2181}# Retry PolicybaseSleepTimeMs:${SW_CONFIG_ZK_BASE_SLEEP_TIME_MS:1000}# initial amount of time to wait between retriesmaxRetries:${SW_CONFIG_ZK_MAX_RETRIES:3}# max number of times to retryThe namespace is the ZooKeeper path. The config key and value are the properties of the namespace folder.\nConfig Storage Single Config znode.path = {namespace}/configKey configValue = znode.data e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If namespace = /default the config in zookeeper is:\nznode.path = /default/agent-analyzer.default.slowDBAccessThreshold znode.data = default:200,mongodb:50 Group Config znode.path = {namespace}/configKey znode.child1.path = {znode.path}/subItemkey1 znode.child2.path = {znode.path}/subItemkey2 ... subItemValue1 = znode.child1.data subItemValue2 = znode.child2.data ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If namespace = /default the config in zookeeper is:\nznode.path = /default/core.default.endpoint-name-grouping-openapi znode.customerAPI-v1.path = /default/core.default.endpoint-name-grouping-openapi/customerAPI-v1 znode.productAPI-v1.path = /default/core.default.endpoint-name-grouping-openapi/productAPI-v1 znode.productAPI-v2.path = /default/core.default.endpoint-name-grouping-openapi/productAPI-v2 znode.customerAPI-v1.data = value of customerAPI-v1 znode.productAPI-v1.data = value of productAPI-v1 znode.productAPI-v2.data = value of productAPI-v2 ","title":"Dynamic Configuration Zookeeper Implementation","url":"/docs/main/v9.5.0/en/setup/backend/dynamic-config-zookeeper/"},{"content":"Dynamic Configuration Zookeeper Implementation Zookeeper is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:zookeeper}zookeeper:period:${SW_CONFIG_ZK_PERIOD:60}# Unit seconds, sync period. Default fetch every 60 seconds.namespace:${SW_CONFIG_ZK_NAMESPACE:/default}hostPort:${SW_CONFIG_ZK_HOST_PORT:localhost:2181}# Retry PolicybaseSleepTimeMs:${SW_CONFIG_ZK_BASE_SLEEP_TIME_MS:1000}# initial amount of time to wait between retriesmaxRetries:${SW_CONFIG_ZK_MAX_RETRIES:3}# max number of times to retryThe namespace is the ZooKeeper path. The config key and value are the properties of the namespace folder.\nConfig Storage Single Config znode.path = {namespace}/configKey configValue = znode.data e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If namespace = /default the config in zookeeper is:\nznode.path = /default/agent-analyzer.default.slowDBAccessThreshold znode.data = default:200,mongodb:50 Group Config znode.path = {namespace}/configKey znode.child1.path = {znode.path}/subItemkey1 znode.child2.path = {znode.path}/subItemkey2 ... subItemValue1 = znode.child1.data subItemValue2 = znode.child2.data ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If namespace = /default the config in zookeeper is:\nznode.path = /default/core.default.endpoint-name-grouping-openapi znode.customerAPI-v1.path = /default/core.default.endpoint-name-grouping-openapi/customerAPI-v1 znode.productAPI-v1.path = /default/core.default.endpoint-name-grouping-openapi/productAPI-v1 znode.productAPI-v2.path = /default/core.default.endpoint-name-grouping-openapi/productAPI-v2 znode.customerAPI-v1.data = value of customerAPI-v1 znode.productAPI-v1.data = value of productAPI-v1 znode.productAPI-v2.data = value of productAPI-v2 ","title":"Dynamic Configuration Zookeeper Implementation","url":"/docs/main/v9.6.0/en/setup/backend/dynamic-config-zookeeper/"},{"content":"Dynamic Configuration Zookeeper Implementation Zookeeper is also supported as a Dynamic Configuration Center (DCC). To use it, please configure it as follows:\nconfiguration:selector:${SW_CONFIGURATION:zookeeper}zookeeper:period:${SW_CONFIG_ZK_PERIOD:60}# Unit seconds, sync period. Default fetch every 60 seconds.namespace:${SW_CONFIG_ZK_NAMESPACE:/default}hostPort:${SW_CONFIG_ZK_HOST_PORT:localhost:2181}# Retry PolicybaseSleepTimeMs:${SW_CONFIG_ZK_BASE_SLEEP_TIME_MS:1000}# initial amount of time to wait between retriesmaxRetries:${SW_CONFIG_ZK_MAX_RETRIES:3}# max number of times to retryThe namespace is the ZooKeeper path. The config key and value are the properties of the namespace folder.\nConfig Storage Single Config znode.path = {namespace}/configKey configValue = znode.data e.g. The config is:\n{agent-analyzer.default.slowDBAccessThreshold}:{default:200,mongodb:50} If namespace = /default the config in zookeeper is:\nznode.path = /default/agent-analyzer.default.slowDBAccessThreshold znode.data = default:200,mongodb:50 Group Config znode.path = {namespace}/configKey znode.child1.path = {znode.path}/subItemkey1 znode.child2.path = {znode.path}/subItemkey2 ... subItemValue1 = znode.child1.data subItemValue2 = znode.child2.data ... e.g. The config is:\n{core.default.endpoint-name-grouping-openapi}:|{customerAPI-v1}:{value of customerAPI-v1} |{productAPI-v1}:{value of productAPI-v1} |{productAPI-v2}:{value of productAPI-v2} If namespace = /default the config in zookeeper is:\nznode.path = /default/core.default.endpoint-name-grouping-openapi znode.customerAPI-v1.path = /default/core.default.endpoint-name-grouping-openapi/customerAPI-v1 znode.productAPI-v1.path = /default/core.default.endpoint-name-grouping-openapi/productAPI-v1 znode.productAPI-v2.path = /default/core.default.endpoint-name-grouping-openapi/productAPI-v2 znode.customerAPI-v1.data = value of customerAPI-v1 znode.productAPI-v1.data = value of productAPI-v1 znode.productAPI-v2.data = value of productAPI-v2 ","title":"Dynamic Configuration Zookeeper Implementation","url":"/docs/main/v9.7.0/en/setup/backend/dynamic-config-zookeeper/"},{"content":"Dynamical Logging The OAP server leverages log4j2 to manage the logging system. log4j2 supports changing the configuration at runtime, but you have to manually update the XML configuration file, which could be time-consuming and prone to man-made mistakes.\nDynamical logging, which depends on dynamic configuration, provides us with an agile way to update all OAP log4j configurations through a single operation.\nThe key of the configuration item is core.default.log4j-xml, and you can select any of the configuration implements to store the content of log4j.xml. In the booting phase, once the core module gets started, core.default.log4j-xml would come into the OAP log4j context.\nIf the configuration is changed after the OAP startup, you have to wait for a while for the changes to be applied. The default value is 60 seconds, which you could change through configuration.\u0026lt;configuration implement\u0026gt;.period in application.yaml.\nIf you remove core.default.log4j-xml from the configuration center or disable the configuration module, log4j.xml in the config directory would be affected.\n Caveat: The OAP only supports the XML configuration format.\n This is an example of configuring dynamical logging through a ConfigMap in a Kubernetes cluster. You may set up other configuration clusters following the same procedures.\napiVersion:v1data:core.default.log4j-xml:|-\u0026lt;Configuration status=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout charset=\u0026#34;UTF-8\u0026#34; pattern=\u0026#34;%d - %c - %L [%t] %-5p %x - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;logger name=\u0026#34;io.grpc.netty\u0026#34; level=\u0026#34;INFO\u0026#34;/\u0026gt; \u0026lt;logger name=\u0026#34;org.apache.skywalking.oap.server.configuration.api\u0026#34; level=\u0026#34;TRACE\u0026#34;/\u0026gt; \u0026lt;logger name=\u0026#34;org.apache.skywalking.oap.server.configuration.configmap\u0026#34; level=\u0026#34;DEBUG\u0026#34;/\u0026gt; \u0026lt;Root level=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Console\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;kind:ConfigMapmetadata:labels:app:collectorrelease:skywalkingname:skywalking-oapnamespace:default","title":"Dynamical Logging","url":"/docs/main/latest/en/setup/backend/dynamical-logging/"},{"content":"Dynamical Logging The OAP server leverages log4j2 to manage the logging system. log4j2 supports changing the configuration at runtime, but you have to manually update the XML configuration file, which could be time-consuming and prone to man-made mistakes.\nDynamical logging, which depends on dynamic configuration, provides us with an agile way to update all OAP log4j configurations through a single operation.\nThe key of the configuration item is core.default.log4j-xml, and you can select any of the configuration implements to store the content of log4j.xml. In the booting phase, once the core module gets started, core.default.log4j-xml would come into the OAP log4j context.\nIf the configuration is changed after the OAP startup, you have to wait for a while for the changes to be applied. The default value is 60 seconds, which you could change through configuration.\u0026lt;configuration implement\u0026gt;.period in application.yaml.\nIf you remove core.default.log4j-xml from the configuration center or disable the configuration module, log4j.xml in the config directory would be affected.\n Caveat: The OAP only supports the XML configuration format.\n This is an example of configuring dynamical logging through a ConfigMap in a Kubernetes cluster. You may set up other configuration clusters following the same procedures.\napiVersion:v1data:core.default.log4j-xml:|-\u0026lt;Configuration status=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout charset=\u0026#34;UTF-8\u0026#34; pattern=\u0026#34;%d - %c - %L [%t] %-5p %x - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;logger name=\u0026#34;io.grpc.netty\u0026#34; level=\u0026#34;INFO\u0026#34;/\u0026gt; \u0026lt;logger name=\u0026#34;org.apache.skywalking.oap.server.configuration.api\u0026#34; level=\u0026#34;TRACE\u0026#34;/\u0026gt; \u0026lt;logger name=\u0026#34;org.apache.skywalking.oap.server.configuration.configmap\u0026#34; level=\u0026#34;DEBUG\u0026#34;/\u0026gt; \u0026lt;Root level=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Console\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;kind:ConfigMapmetadata:labels:app:collectorrelease:skywalkingname:skywalking-oapnamespace:default","title":"Dynamical Logging","url":"/docs/main/next/en/setup/backend/dynamical-logging/"},{"content":"Dynamical Logging The OAP server leverages log4j2 to manage the logging system. log4j2 supports changing the configuration at runtime, but you have to update the XML configuration file manually, which could be time-consuming and prone to manmade mistakes.\nDynamical logging, which depends on dynamic configuration, provides us with an agile way to update all OAP log4j configurations through a single operation.\nThe key of the configuration item is core.default.log4j-xml, and you can select any of the configuration implements to store the content of log4j.xml. In the booting phase, once the core module gets started, core.default.log4j-xml would come into the OAP log4j context.\nIf the configuration is changed after the OAP has started, you have to wait for a while for the changes to be applied. The default value is 60 seconds, which you could change through configuration.\u0026lt;configuration implement\u0026gt;.peroid in application.yaml.\nIf you remove core.default.log4j-xml from the configuration center or disable the configuration module, log4j.xml in the config directory would be affected.\n Caveat: The OAP only supports the XML configuration format.\n This is an example on how to config dynamical logging through a ConfigMap in a Kubernetes cluster. You may set up other configuration clusters following the same procedures.\napiVersion:v1data:core.default.log4j-xml:|-\u0026lt;Configuration status=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout charset=\u0026#34;UTF-8\u0026#34; pattern=\u0026#34;%d - %c - %L [%t] %-5p %x - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;logger name=\u0026#34;io.grpc.netty\u0026#34; level=\u0026#34;INFO\u0026#34;/\u0026gt; \u0026lt;logger name=\u0026#34;org.apache.skywalking.oap.server.configuration.api\u0026#34; level=\u0026#34;TRACE\u0026#34;/\u0026gt; \u0026lt;logger name=\u0026#34;org.apache.skywalking.oap.server.configuration.configmap\u0026#34; level=\u0026#34;DEBUG\u0026#34;/\u0026gt; \u0026lt;Root level=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Console\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;kind:ConfigMapmetadata:labels:app:collectorrelease:skywalkingname:skywalking-oapnamespace:default","title":"Dynamical Logging","url":"/docs/main/v9.0.0/en/setup/backend/dynamical-logging/"},{"content":"Dynamical Logging The OAP server leverages log4j2 to manage the logging system. log4j2 supports changing the configuration at runtime, but you have to manually update the XML configuration file, which could be time-consuming and prone to man-made mistakes.\nDynamical logging, which depends on dynamic configuration, provides us with an agile way to update all OAP log4j configurations through a single operation.\nThe key of the configuration item is core.default.log4j-xml, and you can select any of the configuration implements to store the content of log4j.xml. In the booting phase, once the core module gets started, core.default.log4j-xml would come into the OAP log4j context.\nIf the configuration is changed after the OAP startup, you have to wait for a while for the changes to be applied. The default value is 60 seconds, which you could change through configuration.\u0026lt;configuration implement\u0026gt;.peroid in application.yaml.\nIf you remove core.default.log4j-xml from the configuration center or disable the configuration module, log4j.xml in the config directory would be affected.\n Caveat: The OAP only supports the XML configuration format.\n This is an example of configuring dynamical logging through a ConfigMap in a Kubernetes cluster. You may set up other configuration clusters following the same procedures.\napiVersion:v1data:core.default.log4j-xml:|-\u0026lt;Configuration status=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout charset=\u0026#34;UTF-8\u0026#34; pattern=\u0026#34;%d - %c - %L [%t] %-5p %x - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;logger name=\u0026#34;io.grpc.netty\u0026#34; level=\u0026#34;INFO\u0026#34;/\u0026gt; \u0026lt;logger name=\u0026#34;org.apache.skywalking.oap.server.configuration.api\u0026#34; level=\u0026#34;TRACE\u0026#34;/\u0026gt; \u0026lt;logger name=\u0026#34;org.apache.skywalking.oap.server.configuration.configmap\u0026#34; level=\u0026#34;DEBUG\u0026#34;/\u0026gt; \u0026lt;Root level=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Console\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;kind:ConfigMapmetadata:labels:app:collectorrelease:skywalkingname:skywalking-oapnamespace:default","title":"Dynamical Logging","url":"/docs/main/v9.1.0/en/setup/backend/dynamical-logging/"},{"content":"Dynamical Logging The OAP server leverages log4j2 to manage the logging system. log4j2 supports changing the configuration at runtime, but you have to manually update the XML configuration file, which could be time-consuming and prone to man-made mistakes.\nDynamical logging, which depends on dynamic configuration, provides us with an agile way to update all OAP log4j configurations through a single operation.\nThe key of the configuration item is core.default.log4j-xml, and you can select any of the configuration implements to store the content of log4j.xml. In the booting phase, once the core module gets started, core.default.log4j-xml would come into the OAP log4j context.\nIf the configuration is changed after the OAP startup, you have to wait for a while for the changes to be applied. The default value is 60 seconds, which you could change through configuration.\u0026lt;configuration implement\u0026gt;.period in application.yaml.\nIf you remove core.default.log4j-xml from the configuration center or disable the configuration module, log4j.xml in the config directory would be affected.\n Caveat: The OAP only supports the XML configuration format.\n This is an example of configuring dynamical logging through a ConfigMap in a Kubernetes cluster. You may set up other configuration clusters following the same procedures.\napiVersion:v1data:core.default.log4j-xml:|-\u0026lt;Configuration status=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout charset=\u0026#34;UTF-8\u0026#34; pattern=\u0026#34;%d - %c - %L [%t] %-5p %x - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;logger name=\u0026#34;io.grpc.netty\u0026#34; level=\u0026#34;INFO\u0026#34;/\u0026gt; \u0026lt;logger name=\u0026#34;org.apache.skywalking.oap.server.configuration.api\u0026#34; level=\u0026#34;TRACE\u0026#34;/\u0026gt; \u0026lt;logger name=\u0026#34;org.apache.skywalking.oap.server.configuration.configmap\u0026#34; level=\u0026#34;DEBUG\u0026#34;/\u0026gt; \u0026lt;Root level=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Console\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;kind:ConfigMapmetadata:labels:app:collectorrelease:skywalkingname:skywalking-oapnamespace:default","title":"Dynamical Logging","url":"/docs/main/v9.2.0/en/setup/backend/dynamical-logging/"},{"content":"Dynamical Logging The OAP server leverages log4j2 to manage the logging system. log4j2 supports changing the configuration at runtime, but you have to manually update the XML configuration file, which could be time-consuming and prone to man-made mistakes.\nDynamical logging, which depends on dynamic configuration, provides us with an agile way to update all OAP log4j configurations through a single operation.\nThe key of the configuration item is core.default.log4j-xml, and you can select any of the configuration implements to store the content of log4j.xml. In the booting phase, once the core module gets started, core.default.log4j-xml would come into the OAP log4j context.\nIf the configuration is changed after the OAP startup, you have to wait for a while for the changes to be applied. The default value is 60 seconds, which you could change through configuration.\u0026lt;configuration implement\u0026gt;.period in application.yaml.\nIf you remove core.default.log4j-xml from the configuration center or disable the configuration module, log4j.xml in the config directory would be affected.\n Caveat: The OAP only supports the XML configuration format.\n This is an example of configuring dynamical logging through a ConfigMap in a Kubernetes cluster. You may set up other configuration clusters following the same procedures.\napiVersion:v1data:core.default.log4j-xml:|-\u0026lt;Configuration status=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout charset=\u0026#34;UTF-8\u0026#34; pattern=\u0026#34;%d - %c - %L [%t] %-5p %x - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;logger name=\u0026#34;io.grpc.netty\u0026#34; level=\u0026#34;INFO\u0026#34;/\u0026gt; \u0026lt;logger name=\u0026#34;org.apache.skywalking.oap.server.configuration.api\u0026#34; level=\u0026#34;TRACE\u0026#34;/\u0026gt; \u0026lt;logger name=\u0026#34;org.apache.skywalking.oap.server.configuration.configmap\u0026#34; level=\u0026#34;DEBUG\u0026#34;/\u0026gt; \u0026lt;Root level=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Console\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;kind:ConfigMapmetadata:labels:app:collectorrelease:skywalkingname:skywalking-oapnamespace:default","title":"Dynamical Logging","url":"/docs/main/v9.3.0/en/setup/backend/dynamical-logging/"},{"content":"Dynamical Logging The OAP server leverages log4j2 to manage the logging system. log4j2 supports changing the configuration at runtime, but you have to manually update the XML configuration file, which could be time-consuming and prone to man-made mistakes.\nDynamical logging, which depends on dynamic configuration, provides us with an agile way to update all OAP log4j configurations through a single operation.\nThe key of the configuration item is core.default.log4j-xml, and you can select any of the configuration implements to store the content of log4j.xml. In the booting phase, once the core module gets started, core.default.log4j-xml would come into the OAP log4j context.\nIf the configuration is changed after the OAP startup, you have to wait for a while for the changes to be applied. The default value is 60 seconds, which you could change through configuration.\u0026lt;configuration implement\u0026gt;.period in application.yaml.\nIf you remove core.default.log4j-xml from the configuration center or disable the configuration module, log4j.xml in the config directory would be affected.\n Caveat: The OAP only supports the XML configuration format.\n This is an example of configuring dynamical logging through a ConfigMap in a Kubernetes cluster. You may set up other configuration clusters following the same procedures.\napiVersion:v1data:core.default.log4j-xml:|-\u0026lt;Configuration status=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout charset=\u0026#34;UTF-8\u0026#34; pattern=\u0026#34;%d - %c - %L [%t] %-5p %x - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;logger name=\u0026#34;io.grpc.netty\u0026#34; level=\u0026#34;INFO\u0026#34;/\u0026gt; \u0026lt;logger name=\u0026#34;org.apache.skywalking.oap.server.configuration.api\u0026#34; level=\u0026#34;TRACE\u0026#34;/\u0026gt; \u0026lt;logger name=\u0026#34;org.apache.skywalking.oap.server.configuration.configmap\u0026#34; level=\u0026#34;DEBUG\u0026#34;/\u0026gt; \u0026lt;Root level=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Console\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;kind:ConfigMapmetadata:labels:app:collectorrelease:skywalkingname:skywalking-oapnamespace:default","title":"Dynamical Logging","url":"/docs/main/v9.4.0/en/setup/backend/dynamical-logging/"},{"content":"Dynamical Logging The OAP server leverages log4j2 to manage the logging system. log4j2 supports changing the configuration at runtime, but you have to manually update the XML configuration file, which could be time-consuming and prone to man-made mistakes.\nDynamical logging, which depends on dynamic configuration, provides us with an agile way to update all OAP log4j configurations through a single operation.\nThe key of the configuration item is core.default.log4j-xml, and you can select any of the configuration implements to store the content of log4j.xml. In the booting phase, once the core module gets started, core.default.log4j-xml would come into the OAP log4j context.\nIf the configuration is changed after the OAP startup, you have to wait for a while for the changes to be applied. The default value is 60 seconds, which you could change through configuration.\u0026lt;configuration implement\u0026gt;.period in application.yaml.\nIf you remove core.default.log4j-xml from the configuration center or disable the configuration module, log4j.xml in the config directory would be affected.\n Caveat: The OAP only supports the XML configuration format.\n This is an example of configuring dynamical logging through a ConfigMap in a Kubernetes cluster. You may set up other configuration clusters following the same procedures.\napiVersion:v1data:core.default.log4j-xml:|-\u0026lt;Configuration status=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout charset=\u0026#34;UTF-8\u0026#34; pattern=\u0026#34;%d - %c - %L [%t] %-5p %x - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;logger name=\u0026#34;io.grpc.netty\u0026#34; level=\u0026#34;INFO\u0026#34;/\u0026gt; \u0026lt;logger name=\u0026#34;org.apache.skywalking.oap.server.configuration.api\u0026#34; level=\u0026#34;TRACE\u0026#34;/\u0026gt; \u0026lt;logger name=\u0026#34;org.apache.skywalking.oap.server.configuration.configmap\u0026#34; level=\u0026#34;DEBUG\u0026#34;/\u0026gt; \u0026lt;Root level=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Console\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;kind:ConfigMapmetadata:labels:app:collectorrelease:skywalkingname:skywalking-oapnamespace:default","title":"Dynamical Logging","url":"/docs/main/v9.5.0/en/setup/backend/dynamical-logging/"},{"content":"Dynamical Logging The OAP server leverages log4j2 to manage the logging system. log4j2 supports changing the configuration at runtime, but you have to manually update the XML configuration file, which could be time-consuming and prone to man-made mistakes.\nDynamical logging, which depends on dynamic configuration, provides us with an agile way to update all OAP log4j configurations through a single operation.\nThe key of the configuration item is core.default.log4j-xml, and you can select any of the configuration implements to store the content of log4j.xml. In the booting phase, once the core module gets started, core.default.log4j-xml would come into the OAP log4j context.\nIf the configuration is changed after the OAP startup, you have to wait for a while for the changes to be applied. The default value is 60 seconds, which you could change through configuration.\u0026lt;configuration implement\u0026gt;.period in application.yaml.\nIf you remove core.default.log4j-xml from the configuration center or disable the configuration module, log4j.xml in the config directory would be affected.\n Caveat: The OAP only supports the XML configuration format.\n This is an example of configuring dynamical logging through a ConfigMap in a Kubernetes cluster. You may set up other configuration clusters following the same procedures.\napiVersion:v1data:core.default.log4j-xml:|-\u0026lt;Configuration status=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout charset=\u0026#34;UTF-8\u0026#34; pattern=\u0026#34;%d - %c - %L [%t] %-5p %x - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;logger name=\u0026#34;io.grpc.netty\u0026#34; level=\u0026#34;INFO\u0026#34;/\u0026gt; \u0026lt;logger name=\u0026#34;org.apache.skywalking.oap.server.configuration.api\u0026#34; level=\u0026#34;TRACE\u0026#34;/\u0026gt; \u0026lt;logger name=\u0026#34;org.apache.skywalking.oap.server.configuration.configmap\u0026#34; level=\u0026#34;DEBUG\u0026#34;/\u0026gt; \u0026lt;Root level=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Console\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;kind:ConfigMapmetadata:labels:app:collectorrelease:skywalkingname:skywalking-oapnamespace:default","title":"Dynamical Logging","url":"/docs/main/v9.6.0/en/setup/backend/dynamical-logging/"},{"content":"Dynamical Logging The OAP server leverages log4j2 to manage the logging system. log4j2 supports changing the configuration at runtime, but you have to manually update the XML configuration file, which could be time-consuming and prone to man-made mistakes.\nDynamical logging, which depends on dynamic configuration, provides us with an agile way to update all OAP log4j configurations through a single operation.\nThe key of the configuration item is core.default.log4j-xml, and you can select any of the configuration implements to store the content of log4j.xml. In the booting phase, once the core module gets started, core.default.log4j-xml would come into the OAP log4j context.\nIf the configuration is changed after the OAP startup, you have to wait for a while for the changes to be applied. The default value is 60 seconds, which you could change through configuration.\u0026lt;configuration implement\u0026gt;.period in application.yaml.\nIf you remove core.default.log4j-xml from the configuration center or disable the configuration module, log4j.xml in the config directory would be affected.\n Caveat: The OAP only supports the XML configuration format.\n This is an example of configuring dynamical logging through a ConfigMap in a Kubernetes cluster. You may set up other configuration clusters following the same procedures.\napiVersion:v1data:core.default.log4j-xml:|-\u0026lt;Configuration status=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;Appenders\u0026gt; \u0026lt;Console name=\u0026#34;Console\u0026#34; target=\u0026#34;SYSTEM_OUT\u0026#34;\u0026gt; \u0026lt;PatternLayout charset=\u0026#34;UTF-8\u0026#34; pattern=\u0026#34;%d - %c - %L [%t] %-5p %x - %m%n\u0026#34;/\u0026gt; \u0026lt;/Console\u0026gt; \u0026lt;/Appenders\u0026gt; \u0026lt;Loggers\u0026gt; \u0026lt;logger name=\u0026#34;io.grpc.netty\u0026#34; level=\u0026#34;INFO\u0026#34;/\u0026gt; \u0026lt;logger name=\u0026#34;org.apache.skywalking.oap.server.configuration.api\u0026#34; level=\u0026#34;TRACE\u0026#34;/\u0026gt; \u0026lt;logger name=\u0026#34;org.apache.skywalking.oap.server.configuration.configmap\u0026#34; level=\u0026#34;DEBUG\u0026#34;/\u0026gt; \u0026lt;Root level=\u0026#34;WARN\u0026#34;\u0026gt; \u0026lt;AppenderRef ref=\u0026#34;Console\u0026#34;/\u0026gt; \u0026lt;/Root\u0026gt; \u0026lt;/Loggers\u0026gt; \u0026lt;/Configuration\u0026gt;kind:ConfigMapmetadata:labels:app:collectorrelease:skywalkingname:skywalking-oapnamespace:default","title":"Dynamical Logging","url":"/docs/main/v9.7.0/en/setup/backend/dynamical-logging/"},{"content":"eBPF Profiling eBPF Profiling utilizes the eBPF technology to monitor applications without requiring any modifications to the application itself. Corresponds to Out-Process Profiling.\nTo use eBPF Profiling, the SkyWalking Rover application (eBPF Agent) needs to be installed on the host machine. When the agent receives a Profiling task, it starts the Profiling task for the specific application to analyze performance bottlenecks for the corresponding type of Profiling.\nLean more about the eBPF profiling in following blogs:\n Pinpoint Service Mesh Critical Performance Impact by using eBPF Diagnose Service Mesh Network Performance with eBPF  Active in the OAP OAP and the agent use a brand-new protocol to exchange eBPF Profiling data, so it is necessary to start OAP with the following configuration:\nreceiver-ebpf:selector:${SW_RECEIVER_EBPF:default}default:Profiling type eBPF Profiling leverages eBPF technology to provide support for the following types of tasks:\n On CPU Profiling: Periodically samples the thread stacks of the current program while it\u0026rsquo;s executing on the CPU using PERF_COUNT_SW_CPU_CLOCK. Off CPU Profiling: Collects and aggregates thread stacks when the program executes the kernel function finish_task_switch. Network Profiling: Collects the execution details of the application when performing network-related syscalls, and then aggregates them into a topology map and metrics for different network protocols.  On CPU Profiling On CPU Profiling periodically samples the thread stacks of the target program while it\u0026rsquo;s executing on the CPU and aggregates the thread stacks to create a flame graph. This helps users identify performance bottlenecks based on the flame graph information.\nCreating task When creating an On CPU Profiling task, you need to specify which eligible processes need to be sampled. The required configuration information is as follows:\n Service: The processes under which service entity need to perform Profiling tasks. Labels: Specifies which processes with certain labels under the service entity can perform profiling tasks. If left blank, all processes under the specified service will require profiling. Start Time: Whether the current task needs to be executed immediately or at a future point in time. Duration: The execution time of the current profiling task.  The eBPF agent would periodically request from the OAP whether there are any eligible tasks among all the processes collected by the current eBPF agent. When the eBPF agent receives a task, it would start the profiling task with the process.\nProfiling analyze Once the eBPF agent starts a profiling task for a specific process, it would periodically collect data and report it to the OAP. At this point, a scheduling of task is generated. The scheduling data contains the following information:\n Schedule ID: The ID of current schedule. Task: The task to which the current scheduling data belongs. Process: The process for which the current scheduling Profiling data is being collected. Start Time: The execution start time of the current schedule. End Time: The time when the last sampling of the current schedule was completed.  Once the schedule is created, we can use the existing scheduling ID and time range to query the CPU execution situation of the specified process within a specific time period. The query contains the following fields:\n Schedule ID: The schedule ID you want to query. Time: The start and end times you want to query.  After the query, the following data would be returned. With the data, it\u0026rsquo;s easy to generate a flame graph:\n Id: Element ID. Parent ID: Parent element ID. The dependency relationship between elements can be determined using the element ID and parent element ID. Symbol: The symbol name of the current element. Usually, it represents the method names of thread stacks in different languages. Stack Type: The type of thread stack where the current element is located. Supports KERNEL_SPACE and USER_SPACE, which represent user mode and kernel mode, respectively. Dump Count: The number of times the current element was sampled. The more samples of symbol, means the longer the method execution time.  Off CPU Profiling Off CPU Profiling can analyze the thread state when a thread switch occurs in the current process, thereby determining performance loss caused by blocked on I/O, locks, timers, paging/swapping, and other reasons. The execution flow between the eBPF agent and OAP in Off CPU Profiling is the same as in On CPU Profiling, but the data content being analyzed is different.\nCreate task The process of creating an Off CPU Profiling task is the same as creating an On CPU Profiling task, with the only difference being that the Profiling task type is changed to OFF CPU Profiling. For specific parameters, please refer to the previous section.\nProfiling analyze When the eBPF agent receives a Off CPU Profiling task, it would also collect data and generate a schedule. When analyzing data, unlike On CPU Profiling, Off CPU Profiling can generate different flame graphs based on the following two aggregation methods:\n By Time: Aggregate based on the time consumed by each method, allowing you to analyze which methods take longer. By Count: Aggregate based on the number of times a method switches to non-CPU execution, allowing you to analyze which methods cause more non-CPU executions for the task.  Network Profiling Network Profiling can analyze and monitor network requests related to process, and based on the data, generate topology diagrams, metrics, and other information. Furthermore, it can be integrated with existing Tracing systems to enhance the data content.\nCreate task Unlike On/Off CPU Profiling, Network Profiling requires specifying the instance entity information when creating a task. For example, in a Service Mesh, there may be multiple processes under a single instance(Pod), such as an application and Envoy. In network analysis, they usually work together, so analyzing them together can give you a better understanding of the network execution situation of the Pod. The following parameters are needed:\n Instance: The current Instance entity. Sampling: Sampling information for network requests.  Sampling represents how the current system samples raw data and combines it with the existing Tracing system, allowing you to see the complete network data corresponding to a Span in Tracing Span. Currently, it supports sampling Raw information for Spans using HTTP/1.x as RPC and parsing SkyWalking and Zipkin protocols. The sampling information configuration is as follows:\n URI Regex: Only collect requests that match the specified URI. If empty, all requests will be collected. Min Duration: Only sample data with a response time greater than or equal to the specified duration. If empty, all requests will be collected. When 4XX: Only sample data with a response status code between 400 and 500 (exclusive). When 5XX: Only sample data with a response status code between 500 and 600 (exclusive). Settings: When network data meets the above rules, how to collect the data.  Require Complete Request: Whether to collect request data. Max Request Size: The maximum data size for collecting requests. If empty, all data will be collected. Require Complete Response: Whether to collect response data. Max Response Size: The maximum data size for collecting responses. If empty, all data will be collected.    Profiling analysis After starting the task, the following data can be analyzed:\n Topology: Analyze the data flow and data types when the current instance interacts internally and externally. TCP Metrics: Network Layer-4 metrics between two process. HTTP/1.x Metrics: If there are HTTP/1.x requests between two nodes, the HTTP/1.x metrics would be analyzed based on the data content. HTTP Request: If two nodes use HTTP/1.x and include a tracing system, the tracing data would be extended with events.  Topology The topology can generate two types of data:\n Internal entities: The network call relationships between all processes within the current instance. Entities and external: The call relationships between processes inside the entity and external network nodes.  For external nodes, since eBPF can only collect remote IP and port information during data collection, OAP can use Kubernetes cluster information to recognize the corresponding Service or Pod names.\nBetween two nodes, data flow direction can be detected, and the following types of data protocols can be identified:\n HTTP: Two nodes communicate using HTTP/1.x or HTTP/2.x protocol. HTTPS: Two nodes communicate using HTTPS. TLS: Two nodes use encrypted data for transition, such as when using OpenSSL. TCP: There is TCP data transmission between two nodes.  TCP Metrics In the TCP metrics, each metric includes both client-side and server-side data. The metrics are as follows:\n   Name Unit Description     Write CPM Count Number of write requests initiated per minute   Write Total Bytes B Total data size written per minute   Write Avg Execute Time ns Average execution time for each write operation   Write RTT ns Round Trip Time (RTT)   Read CPM Count Number of read requests per minute   Read Total Bytes B Total data size read per minute   Read Avg Execute Time ns Average execution time for each read operation   Connect CPM Count Number of new connections established   Connect Execute Time ns Time taken to establish a connection   Close CPM Count Number of closed connections   Close Execute Time ns Time taken to close a connection   Retransmit CPM Count Number of data retransmissions per minute   Drop CPM Count Number of dropped packets per minute    HTTP/1.x Metrics If there is HTTP/1.x protocol communication between two nodes, the eBPF agent can recognize the request data and parse the following metric information:\n   Name Unit Description     Request CPM Count Number of requests received per minute   Response Status CPM Count Number of occurrences of each response status code per minute   Request Package Size B Average request package data size   Response Package Size B Average response package data size   Client Duration ns Time taken for the client to receive a response   Server Duration ns Time taken for the server to send a response    HTTP Request If two nodes communicate using the HTTP/1.x protocol, and they employ a distributed tracing system, then eBPf agent can collect raw data according to the sampling rules configured in the previous sections.\nSampling Raw Data When the sampling conditions are met, the original request or response data would be collected, including the following fields:\n Data Size: The data size of the current request/response content. Data Content: The raw data content. Non-plain format content would not be collected. Data Direction: The data transfer direction, either Ingress or Egress. Data Type: The data type, either Request or Response. Connection Role: The current node\u0026rsquo;s role as a client or server. Entity: The entity information of the current process. Time: The Request or response sent/received time.  Syscall Event When sampling rules are applied, the related Syscall invocations for the request or response would also be collected, including the following information:\n Method Name: System Syscall method names such as read, write, readv, writev, etc. Packet Size: The current TCP packet size. Packet Count: The number of sent or received packets. Network Interface Information: The network interface from which the packet was sent.  ","title":"eBPF Profiling","url":"/docs/main/latest/en/setup/backend/backend-ebpf-profiling/"},{"content":"eBPF Profiling eBPF Profiling utilizes the eBPF technology to monitor applications without requiring any modifications to the application itself. Corresponds to Out-Process Profiling.\nTo use eBPF Profiling, the SkyWalking Rover application (eBPF Agent) needs to be installed on the host machine. When the agent receives a Profiling task, it starts the Profiling task for the specific application to analyze performance bottlenecks for the corresponding type of Profiling.\nLean more about the eBPF profiling in following blogs:\n Pinpoint Service Mesh Critical Performance Impact by using eBPF Diagnose Service Mesh Network Performance with eBPF  Active in the OAP OAP and the agent use a brand-new protocol to exchange eBPF Profiling data, so it is necessary to start OAP with the following configuration:\nreceiver-ebpf:selector:${SW_RECEIVER_EBPF:default}default:Profiling type eBPF Profiling leverages eBPF technology to provide support for the following types of tasks:\n On CPU Profiling: Periodically samples the thread stacks of the current program while it\u0026rsquo;s executing on the CPU using PERF_COUNT_SW_CPU_CLOCK. Off CPU Profiling: Collects and aggregates thread stacks when the program executes the kernel function finish_task_switch. Network Profiling: Collects the execution details of the application when performing network-related syscalls, and then aggregates them into a topology map and metrics for different network protocols.  On CPU Profiling On CPU Profiling periodically samples the thread stacks of the target program while it\u0026rsquo;s executing on the CPU and aggregates the thread stacks to create a flame graph. This helps users identify performance bottlenecks based on the flame graph information.\nCreating task When creating an On CPU Profiling task, you need to specify which eligible processes need to be sampled. The required configuration information is as follows:\n Service: The processes under which service entity need to perform Profiling tasks. Labels: Specifies which processes with certain labels under the service entity can perform profiling tasks. If left blank, all processes under the specified service will require profiling. Start Time: Whether the current task needs to be executed immediately or at a future point in time. Duration: The execution time of the current profiling task.  The eBPF agent would periodically request from the OAP whether there are any eligible tasks among all the processes collected by the current eBPF agent. When the eBPF agent receives a task, it would start the profiling task with the process.\nProfiling analyze Once the eBPF agent starts a profiling task for a specific process, it would periodically collect data and report it to the OAP. At this point, a scheduling of task is generated. The scheduling data contains the following information:\n Schedule ID: The ID of current schedule. Task: The task to which the current scheduling data belongs. Process: The process for which the current scheduling Profiling data is being collected. Start Time: The execution start time of the current schedule. End Time: The time when the last sampling of the current schedule was completed.  Once the schedule is created, we can use the existing scheduling ID and time range to query the CPU execution situation of the specified process within a specific time period. The query contains the following fields:\n Schedule ID: The schedule ID you want to query. Time: The start and end times you want to query.  After the query, the following data would be returned. With the data, it\u0026rsquo;s easy to generate a flame graph:\n Id: Element ID. Parent ID: Parent element ID. The dependency relationship between elements can be determined using the element ID and parent element ID. Symbol: The symbol name of the current element. Usually, it represents the method names of thread stacks in different languages. Stack Type: The type of thread stack where the current element is located. Supports KERNEL_SPACE and USER_SPACE, which represent user mode and kernel mode, respectively. Dump Count: The number of times the current element was sampled. The more samples of symbol, means the longer the method execution time.  Off CPU Profiling Off CPU Profiling can analyze the thread state when a thread switch occurs in the current process, thereby determining performance loss caused by blocked on I/O, locks, timers, paging/swapping, and other reasons. The execution flow between the eBPF agent and OAP in Off CPU Profiling is the same as in On CPU Profiling, but the data content being analyzed is different.\nCreate task The process of creating an Off CPU Profiling task is the same as creating an On CPU Profiling task, with the only difference being that the Profiling task type is changed to OFF CPU Profiling. For specific parameters, please refer to the previous section.\nProfiling analyze When the eBPF agent receives a Off CPU Profiling task, it would also collect data and generate a schedule. When analyzing data, unlike On CPU Profiling, Off CPU Profiling can generate different flame graphs based on the following two aggregation methods:\n By Time: Aggregate based on the time consumed by each method, allowing you to analyze which methods take longer. By Count: Aggregate based on the number of times a method switches to non-CPU execution, allowing you to analyze which methods cause more non-CPU executions for the task.  Network Profiling Network Profiling can analyze and monitor network requests related to process, and based on the data, generate topology diagrams, metrics, and other information. Furthermore, it can be integrated with existing Tracing systems to enhance the data content.\nCreate task Unlike On/Off CPU Profiling, Network Profiling requires specifying the instance entity information when creating a task. For example, in a Service Mesh, there may be multiple processes under a single instance(Pod), such as an application and Envoy. In network analysis, they usually work together, so analyzing them together can give you a better understanding of the network execution situation of the Pod. The following parameters are needed:\n Instance: The current Instance entity. Sampling: Sampling information for network requests.  Sampling represents how the current system samples raw data and combines it with the existing Tracing system, allowing you to see the complete network data corresponding to a Span in Tracing Span. Currently, it supports sampling Raw information for Spans using HTTP/1.x as RPC and parsing SkyWalking and Zipkin protocols. The sampling information configuration is as follows:\n URI Regex: Only collect requests that match the specified URI. If empty, all requests will be collected. Min Duration: Only sample data with a response time greater than or equal to the specified duration. If empty, all requests will be collected. When 4XX: Only sample data with a response status code between 400 and 500 (exclusive). When 5XX: Only sample data with a response status code between 500 and 600 (exclusive). Settings: When network data meets the above rules, how to collect the data.  Require Complete Request: Whether to collect request data. Max Request Size: The maximum data size for collecting requests. If empty, all data will be collected. Require Complete Response: Whether to collect response data. Max Response Size: The maximum data size for collecting responses. If empty, all data will be collected.    Profiling analysis After starting the task, the following data can be analyzed:\n Topology: Analyze the data flow and data types when the current instance interacts internally and externally. TCP Metrics: Network Layer-4 metrics between two process. HTTP/1.x Metrics: If there are HTTP/1.x requests between two nodes, the HTTP/1.x metrics would be analyzed based on the data content. HTTP Request: If two nodes use HTTP/1.x and include a tracing system, the tracing data would be extended with events.  Topology The topology can generate two types of data:\n Internal entities: The network call relationships between all processes within the current instance. Entities and external: The call relationships between processes inside the entity and external network nodes.  For external nodes, since eBPF can only collect remote IP and port information during data collection, OAP can use Kubernetes cluster information to recognize the corresponding Service or Pod names.\nBetween two nodes, data flow direction can be detected, and the following types of data protocols can be identified:\n HTTP: Two nodes communicate using HTTP/1.x or HTTP/2.x protocol. HTTPS: Two nodes communicate using HTTPS. TLS: Two nodes use encrypted data for transition, such as when using OpenSSL. TCP: There is TCP data transmission between two nodes.  TCP Metrics In the TCP metrics, each metric includes both client-side and server-side data. The metrics are as follows:\n   Name Unit Description     Write CPM Count Number of write requests initiated per minute   Write Total Bytes B Total data size written per minute   Write Avg Execute Time ns Average execution time for each write operation   Write RTT ns Round Trip Time (RTT)   Read CPM Count Number of read requests per minute   Read Total Bytes B Total data size read per minute   Read Avg Execute Time ns Average execution time for each read operation   Connect CPM Count Number of new connections established   Connect Execute Time ns Time taken to establish a connection   Close CPM Count Number of closed connections   Close Execute Time ns Time taken to close a connection   Retransmit CPM Count Number of data retransmissions per minute   Drop CPM Count Number of dropped packets per minute    HTTP/1.x Metrics If there is HTTP/1.x protocol communication between two nodes, the eBPF agent can recognize the request data and parse the following metric information:\n   Name Unit Description     Request CPM Count Number of requests received per minute   Response Status CPM Count Number of occurrences of each response status code per minute   Request Package Size B Average request package data size   Response Package Size B Average response package data size   Client Duration ns Time taken for the client to receive a response   Server Duration ns Time taken for the server to send a response    HTTP Request If two nodes communicate using the HTTP/1.x protocol, and they employ a distributed tracing system, then eBPf agent can collect raw data according to the sampling rules configured in the previous sections.\nSampling Raw Data When the sampling conditions are met, the original request or response data would be collected, including the following fields:\n Data Size: The data size of the current request/response content. Data Content: The raw data content. Non-plain format content would not be collected. Data Direction: The data transfer direction, either Ingress or Egress. Data Type: The data type, either Request or Response. Connection Role: The current node\u0026rsquo;s role as a client or server. Entity: The entity information of the current process. Time: The Request or response sent/received time.  Syscall Event When sampling rules are applied, the related Syscall invocations for the request or response would also be collected, including the following information:\n Method Name: System Syscall method names such as read, write, readv, writev, etc. Packet Size: The current TCP packet size. Packet Count: The number of sent or received packets. Network Interface Information: The network interface from which the packet was sent.  ","title":"eBPF Profiling","url":"/docs/main/next/en/setup/backend/backend-ebpf-profiling/"},{"content":"eBPF Profiling eBPF Profiling utilizes the eBPF technology to monitor applications without requiring any modifications to the application itself. Corresponds to Out-Process Profiling.\nTo use eBPF Profiling, the SkyWalking Rover application (eBPF Agent) needs to be installed on the host machine. When the agent receives a Profiling task, it starts the Profiling task for the specific application to analyze performance bottlenecks for the corresponding type of Profiling.\nLean more about the eBPF profiling in following blogs:\n Pinpoint Service Mesh Critical Performance Impact by using eBPF Diagnose Service Mesh Network Performance with eBPF  Active in the OAP OAP and the agent use a brand-new protocol to exchange eBPF Profiling data, so it is necessary to start OAP with the following configuration:\nreceiver-ebpf:selector:${SW_RECEIVER_EBPF:default}default:Profiling type eBPF Profiling leverages eBPF technology to provide support for the following types of tasks:\n On CPU Profiling: Periodically samples the thread stacks of the current program while it\u0026rsquo;s executing on the CPU using PERF_COUNT_SW_CPU_CLOCK. Off CPU Profiling: Collects and aggregates thread stacks when the program executes the kernel function finish_task_switch. Network Profiling: Collects the execution details of the application when performing network-related syscalls, and then aggregates them into a topology map and metrics for different network protocols.  On CPU Profiling On CPU Profiling periodically samples the thread stacks of the target program while it\u0026rsquo;s executing on the CPU and aggregates the thread stacks to create a flame graph. This helps users identify performance bottlenecks based on the flame graph information.\nCreating task When creating an On CPU Profiling task, you need to specify which eligible processes need to be sampled. The required configuration information is as follows:\n Service: The processes under which service entity need to perform Profiling tasks. Labels: Specifies which processes with certain labels under the service entity can perform profiling tasks. If left blank, all processes under the specified service will require profiling. Start Time: Whether the current task needs to be executed immediately or at a future point in time. Duration: The execution time of the current profiling task.  The eBPF agent would periodically request from the OAP whether there are any eligible tasks among all the processes collected by the current eBPF agent. When the eBPF agent receives a task, it would start the profiling task with the process.\nProfiling analyze Once the eBPF agent starts a profiling task for a specific process, it would periodically collect data and report it to the OAP. At this point, a scheduling of task is generated. The scheduling data contains the following information:\n Schedule ID: The ID of current schedule. Task: The task to which the current scheduling data belongs. Process: The process for which the current scheduling Profiling data is being collected. Start Time: The execution start time of the current schedule. End Time: The time when the last sampling of the current schedule was completed.  Once the schedule is created, we can use the existing scheduling ID and time range to query the CPU execution situation of the specified process within a specific time period. The query contains the following fields:\n Schedule ID: The schedule ID you want to query. Time: The start and end times you want to query.  After the query, the following data would be returned. With the data, it\u0026rsquo;s easy to generate a flame graph:\n Id: Element ID. Parent ID: Parent element ID. The dependency relationship between elements can be determined using the element ID and parent element ID. Symbol: The symbol name of the current element. Usually, it represents the method names of thread stacks in different languages. Stack Type: The type of thread stack where the current element is located. Supports KERNEL_SPACE and USER_SPACE, which represent user mode and kernel mode, respectively. Dump Count: The number of times the current element was sampled. The more samples of symbol, means the longer the method execution time.  Off CPU Profiling Off CPU Profiling can analyze the thread state when a thread switch occurs in the current process, thereby determining performance loss caused by blocked on I/O, locks, timers, paging/swapping, and other reasons. The execution flow between the eBPF agent and OAP in Off CPU Profiling is the same as in On CPU Profiling, but the data content being analyzed is different.\nCreate task The process of creating an Off CPU Profiling task is the same as creating an On CPU Profiling task, with the only difference being that the Profiling task type is changed to OFF CPU Profiling. For specific parameters, please refer to the previous section.\nProfiling analyze When the eBPF agent receives a Off CPU Profiling task, it would also collect data and generate a schedule. When analyzing data, unlike On CPU Profiling, Off CPU Profiling can generate different flame graphs based on the following two aggregation methods:\n By Time: Aggregate based on the time consumed by each method, allowing you to analyze which methods take longer. By Count: Aggregate based on the number of times a method switches to non-CPU execution, allowing you to analyze which methods cause more non-CPU executions for the task.  Network Profiling Network Profiling can analyze and monitor network requests related to process, and based on the data, generate topology diagrams, metrics, and other information. Furthermore, it can be integrated with existing Tracing systems to enhance the data content.\nCreate task Unlike On/Off CPU Profiling, Network Profiling requires specifying the instance entity information when creating a task. For example, in a Service Mesh, there may be multiple processes under a single instance(Pod), such as an application and Envoy. In network analysis, they usually work together, so analyzing them together can give you a better understanding of the network execution situation of the Pod. The following parameters are needed:\n Instance: The current Instance entity. Sampling: Sampling information for network requests.  Sampling represents how the current system samples raw data and combines it with the existing Tracing system, allowing you to see the complete network data corresponding to a Span in Tracing Span. Currently, it supports sampling Raw information for Spans using HTTP/1.x as RPC and parsing SkyWalking and Zipkin protocols. The sampling information configuration is as follows:\n URI Regex: Only collect requests that match the specified URI. If empty, all requests will be collected. Min Duration: Only sample data with a response time greater than or equal to the specified duration. If empty, all requests will be collected. When 4XX: Only sample data with a response status code between 400 and 500 (exclusive). When 5XX: Only sample data with a response status code between 500 and 600 (exclusive). Settings: When network data meets the above rules, how to collect the data.  Require Complete Request: Whether to collect request data. Max Request Size: The maximum data size for collecting requests. If empty, all data will be collected. Require Complete Response: Whether to collect response data. Max Response Size: The maximum data size for collecting responses. If empty, all data will be collected.    Profiling analysis After starting the task, the following data can be analyzed:\n Topology: Analyze the data flow and data types when the current instance interacts internally and externally. TCP Metrics: Network Layer-4 metrics between two process. HTTP/1.x Metrics: If there are HTTP/1.x requests between two nodes, the HTTP/1.x metrics would be analyzed based on the data content. HTTP Request: If two nodes use HTTP/1.x and include a tracing system, the tracing data would be extended with events.  Topology The topology can generate two types of data:\n Internal entities: The network call relationships between all processes within the current instance. Entities and external: The call relationships between processes inside the entity and external network nodes.  For external nodes, since eBPF can only collect remote IP and port information during data collection, OAP can use Kubernetes cluster information to recognize the corresponding Service or Pod names.\nBetween two nodes, data flow direction can be detected, and the following types of data protocols can be identified:\n HTTP: Two nodes communicate using HTTP/1.x or HTTP/2.x protocol. HTTPS: Two nodes communicate using HTTPS. TLS: Two nodes use encrypted data for transition, such as when using OpenSSL. TCP: There is TCP data transmission between two nodes.  TCP Metrics In the TCP metrics, each metric includes both client-side and server-side data. The metrics are as follows:\n   Name Unit Description     Write CPM Count Number of write requests initiated per minute   Write Total Bytes B Total data size written per minute   Write Avg Execute Time ns Average execution time for each write operation   Write RTT ns Round Trip Time (RTT)   Read CPM Count Number of read requests per minute   Read Total Bytes B Total data size read per minute   Read Avg Execute Time ns Average execution time for each read operation   Connect CPM Count Number of new connections established   Connect Execute Time ns Time taken to establish a connection   Close CPM Count Number of closed connections   Close Execute Time ns Time taken to close a connection   Retransmit CPM Count Number of data retransmissions per minute   Drop CPM Count Number of dropped packets per minute    HTTP/1.x Metrics If there is HTTP/1.x protocol communication between two nodes, the eBPF agent can recognize the request data and parse the following metric information:\n   Name Unit Description     Request CPM Count Number of requests received per minute   Response Status CPM Count Number of occurrences of each response status code per minute   Request Package Size B Average request package data size   Response Package Size B Average response package data size   Client Duration ns Time taken for the client to receive a response   Server Duration ns Time taken for the server to send a response    HTTP Request If two nodes communicate using the HTTP/1.x protocol, and they employ a distributed tracing system, then eBPf agent can collect raw data according to the sampling rules configured in the previous sections.\nSampling Raw Data When the sampling conditions are met, the original request or response data would be collected, including the following fields:\n Data Size: The data size of the current request/response content. Data Content: The raw data content. Non-plain format content would not be collected. Data Direction: The data transfer direction, either Ingress or Egress. Data Type: The data type, either Request or Response. Connection Role: The current node\u0026rsquo;s role as a client or server. Entity: The entity information of the current process. Time: The Request or response sent/received time.  Syscall Event When sampling rules are applied, the related Syscall invocations for the request or response would also be collected, including the following information:\n Method Name: System Syscall method names such as read, write, readv, writev, etc. Packet Size: The current TCP packet size. Packet Count: The number of sent or received packets. Network Interface Information: The network interface from which the packet was sent.  ","title":"eBPF Profiling","url":"/docs/main/v9.5.0/en/setup/backend/backend-ebpf-profiling/"},{"content":"eBPF Profiling eBPF Profiling utilizes the eBPF technology to monitor applications without requiring any modifications to the application itself. Corresponds to Out-Process Profiling.\nTo use eBPF Profiling, the SkyWalking Rover application (eBPF Agent) needs to be installed on the host machine. When the agent receives a Profiling task, it starts the Profiling task for the specific application to analyze performance bottlenecks for the corresponding type of Profiling.\nLean more about the eBPF profiling in following blogs:\n Pinpoint Service Mesh Critical Performance Impact by using eBPF Diagnose Service Mesh Network Performance with eBPF  Active in the OAP OAP and the agent use a brand-new protocol to exchange eBPF Profiling data, so it is necessary to start OAP with the following configuration:\nreceiver-ebpf:selector:${SW_RECEIVER_EBPF:default}default:Profiling type eBPF Profiling leverages eBPF technology to provide support for the following types of tasks:\n On CPU Profiling: Periodically samples the thread stacks of the current program while it\u0026rsquo;s executing on the CPU using PERF_COUNT_SW_CPU_CLOCK. Off CPU Profiling: Collects and aggregates thread stacks when the program executes the kernel function finish_task_switch. Network Profiling: Collects the execution details of the application when performing network-related syscalls, and then aggregates them into a topology map and metrics for different network protocols.  On CPU Profiling On CPU Profiling periodically samples the thread stacks of the target program while it\u0026rsquo;s executing on the CPU and aggregates the thread stacks to create a flame graph. This helps users identify performance bottlenecks based on the flame graph information.\nCreating task When creating an On CPU Profiling task, you need to specify which eligible processes need to be sampled. The required configuration information is as follows:\n Service: The processes under which service entity need to perform Profiling tasks. Labels: Specifies which processes with certain labels under the service entity can perform profiling tasks. If left blank, all processes under the specified service will require profiling. Start Time: Whether the current task needs to be executed immediately or at a future point in time. Duration: The execution time of the current profiling task.  The eBPF agent would periodically request from the OAP whether there are any eligible tasks among all the processes collected by the current eBPF agent. When the eBPF agent receives a task, it would start the profiling task with the process.\nProfiling analyze Once the eBPF agent starts a profiling task for a specific process, it would periodically collect data and report it to the OAP. At this point, a scheduling of task is generated. The scheduling data contains the following information:\n Schedule ID: The ID of current schedule. Task: The task to which the current scheduling data belongs. Process: The process for which the current scheduling Profiling data is being collected. Start Time: The execution start time of the current schedule. End Time: The time when the last sampling of the current schedule was completed.  Once the schedule is created, we can use the existing scheduling ID and time range to query the CPU execution situation of the specified process within a specific time period. The query contains the following fields:\n Schedule ID: The schedule ID you want to query. Time: The start and end times you want to query.  After the query, the following data would be returned. With the data, it\u0026rsquo;s easy to generate a flame graph:\n Id: Element ID. Parent ID: Parent element ID. The dependency relationship between elements can be determined using the element ID and parent element ID. Symbol: The symbol name of the current element. Usually, it represents the method names of thread stacks in different languages. Stack Type: The type of thread stack where the current element is located. Supports KERNEL_SPACE and USER_SPACE, which represent user mode and kernel mode, respectively. Dump Count: The number of times the current element was sampled. The more samples of symbol, means the longer the method execution time.  Off CPU Profiling Off CPU Profiling can analyze the thread state when a thread switch occurs in the current process, thereby determining performance loss caused by blocked on I/O, locks, timers, paging/swapping, and other reasons. The execution flow between the eBPF agent and OAP in Off CPU Profiling is the same as in On CPU Profiling, but the data content being analyzed is different.\nCreate task The process of creating an Off CPU Profiling task is the same as creating an On CPU Profiling task, with the only difference being that the Profiling task type is changed to OFF CPU Profiling. For specific parameters, please refer to the previous section.\nProfiling analyze When the eBPF agent receives a Off CPU Profiling task, it would also collect data and generate a schedule. When analyzing data, unlike On CPU Profiling, Off CPU Profiling can generate different flame graphs based on the following two aggregation methods:\n By Time: Aggregate based on the time consumed by each method, allowing you to analyze which methods take longer. By Count: Aggregate based on the number of times a method switches to non-CPU execution, allowing you to analyze which methods cause more non-CPU executions for the task.  Network Profiling Network Profiling can analyze and monitor network requests related to process, and based on the data, generate topology diagrams, metrics, and other information. Furthermore, it can be integrated with existing Tracing systems to enhance the data content.\nCreate task Unlike On/Off CPU Profiling, Network Profiling requires specifying the instance entity information when creating a task. For example, in a Service Mesh, there may be multiple processes under a single instance(Pod), such as an application and Envoy. In network analysis, they usually work together, so analyzing them together can give you a better understanding of the network execution situation of the Pod. The following parameters are needed:\n Instance: The current Instance entity. Sampling: Sampling information for network requests.  Sampling represents how the current system samples raw data and combines it with the existing Tracing system, allowing you to see the complete network data corresponding to a Span in Tracing Span. Currently, it supports sampling Raw information for Spans using HTTP/1.x as RPC and parsing SkyWalking and Zipkin protocols. The sampling information configuration is as follows:\n URI Regex: Only collect requests that match the specified URI. If empty, all requests will be collected. Min Duration: Only sample data with a response time greater than or equal to the specified duration. If empty, all requests will be collected. When 4XX: Only sample data with a response status code between 400 and 500 (exclusive). When 5XX: Only sample data with a response status code between 500 and 600 (exclusive). Settings: When network data meets the above rules, how to collect the data.  Require Complete Request: Whether to collect request data. Max Request Size: The maximum data size for collecting requests. If empty, all data will be collected. Require Complete Response: Whether to collect response data. Max Response Size: The maximum data size for collecting responses. If empty, all data will be collected.    Profiling analysis After starting the task, the following data can be analyzed:\n Topology: Analyze the data flow and data types when the current instance interacts internally and externally. TCP Metrics: Network Layer-4 metrics between two process. HTTP/1.x Metrics: If there are HTTP/1.x requests between two nodes, the HTTP/1.x metrics would be analyzed based on the data content. HTTP Request: If two nodes use HTTP/1.x and include a tracing system, the tracing data would be extended with events.  Topology The topology can generate two types of data:\n Internal entities: The network call relationships between all processes within the current instance. Entities and external: The call relationships between processes inside the entity and external network nodes.  For external nodes, since eBPF can only collect remote IP and port information during data collection, OAP can use Kubernetes cluster information to recognize the corresponding Service or Pod names.\nBetween two nodes, data flow direction can be detected, and the following types of data protocols can be identified:\n HTTP: Two nodes communicate using HTTP/1.x or HTTP/2.x protocol. HTTPS: Two nodes communicate using HTTPS. TLS: Two nodes use encrypted data for transition, such as when using OpenSSL. TCP: There is TCP data transmission between two nodes.  TCP Metrics In the TCP metrics, each metric includes both client-side and server-side data. The metrics are as follows:\n   Name Unit Description     Write CPM Count Number of write requests initiated per minute   Write Total Bytes B Total data size written per minute   Write Avg Execute Time ns Average execution time for each write operation   Write RTT ns Round Trip Time (RTT)   Read CPM Count Number of read requests per minute   Read Total Bytes B Total data size read per minute   Read Avg Execute Time ns Average execution time for each read operation   Connect CPM Count Number of new connections established   Connect Execute Time ns Time taken to establish a connection   Close CPM Count Number of closed connections   Close Execute Time ns Time taken to close a connection   Retransmit CPM Count Number of data retransmissions per minute   Drop CPM Count Number of dropped packets per minute    HTTP/1.x Metrics If there is HTTP/1.x protocol communication between two nodes, the eBPF agent can recognize the request data and parse the following metric information:\n   Name Unit Description     Request CPM Count Number of requests received per minute   Response Status CPM Count Number of occurrences of each response status code per minute   Request Package Size B Average request package data size   Response Package Size B Average response package data size   Client Duration ns Time taken for the client to receive a response   Server Duration ns Time taken for the server to send a response    HTTP Request If two nodes communicate using the HTTP/1.x protocol, and they employ a distributed tracing system, then eBPf agent can collect raw data according to the sampling rules configured in the previous sections.\nSampling Raw Data When the sampling conditions are met, the original request or response data would be collected, including the following fields:\n Data Size: The data size of the current request/response content. Data Content: The raw data content. Non-plain format content would not be collected. Data Direction: The data transfer direction, either Ingress or Egress. Data Type: The data type, either Request or Response. Connection Role: The current node\u0026rsquo;s role as a client or server. Entity: The entity information of the current process. Time: The Request or response sent/received time.  Syscall Event When sampling rules are applied, the related Syscall invocations for the request or response would also be collected, including the following information:\n Method Name: System Syscall method names such as read, write, readv, writev, etc. Packet Size: The current TCP packet size. Packet Count: The number of sent or received packets. Network Interface Information: The network interface from which the packet was sent.  ","title":"eBPF Profiling","url":"/docs/main/v9.6.0/en/setup/backend/backend-ebpf-profiling/"},{"content":"eBPF Profiling eBPF Profiling utilizes the eBPF technology to monitor applications without requiring any modifications to the application itself. Corresponds to Out-Process Profiling.\nTo use eBPF Profiling, the SkyWalking Rover application (eBPF Agent) needs to be installed on the host machine. When the agent receives a Profiling task, it starts the Profiling task for the specific application to analyze performance bottlenecks for the corresponding type of Profiling.\nLean more about the eBPF profiling in following blogs:\n Pinpoint Service Mesh Critical Performance Impact by using eBPF Diagnose Service Mesh Network Performance with eBPF  Active in the OAP OAP and the agent use a brand-new protocol to exchange eBPF Profiling data, so it is necessary to start OAP with the following configuration:\nreceiver-ebpf:selector:${SW_RECEIVER_EBPF:default}default:Profiling type eBPF Profiling leverages eBPF technology to provide support for the following types of tasks:\n On CPU Profiling: Periodically samples the thread stacks of the current program while it\u0026rsquo;s executing on the CPU using PERF_COUNT_SW_CPU_CLOCK. Off CPU Profiling: Collects and aggregates thread stacks when the program executes the kernel function finish_task_switch. Network Profiling: Collects the execution details of the application when performing network-related syscalls, and then aggregates them into a topology map and metrics for different network protocols.  On CPU Profiling On CPU Profiling periodically samples the thread stacks of the target program while it\u0026rsquo;s executing on the CPU and aggregates the thread stacks to create a flame graph. This helps users identify performance bottlenecks based on the flame graph information.\nCreating task When creating an On CPU Profiling task, you need to specify which eligible processes need to be sampled. The required configuration information is as follows:\n Service: The processes under which service entity need to perform Profiling tasks. Labels: Specifies which processes with certain labels under the service entity can perform profiling tasks. If left blank, all processes under the specified service will require profiling. Start Time: Whether the current task needs to be executed immediately or at a future point in time. Duration: The execution time of the current profiling task.  The eBPF agent would periodically request from the OAP whether there are any eligible tasks among all the processes collected by the current eBPF agent. When the eBPF agent receives a task, it would start the profiling task with the process.\nProfiling analyze Once the eBPF agent starts a profiling task for a specific process, it would periodically collect data and report it to the OAP. At this point, a scheduling of task is generated. The scheduling data contains the following information:\n Schedule ID: The ID of current schedule. Task: The task to which the current scheduling data belongs. Process: The process for which the current scheduling Profiling data is being collected. Start Time: The execution start time of the current schedule. End Time: The time when the last sampling of the current schedule was completed.  Once the schedule is created, we can use the existing scheduling ID and time range to query the CPU execution situation of the specified process within a specific time period. The query contains the following fields:\n Schedule ID: The schedule ID you want to query. Time: The start and end times you want to query.  After the query, the following data would be returned. With the data, it\u0026rsquo;s easy to generate a flame graph:\n Id: Element ID. Parent ID: Parent element ID. The dependency relationship between elements can be determined using the element ID and parent element ID. Symbol: The symbol name of the current element. Usually, it represents the method names of thread stacks in different languages. Stack Type: The type of thread stack where the current element is located. Supports KERNEL_SPACE and USER_SPACE, which represent user mode and kernel mode, respectively. Dump Count: The number of times the current element was sampled. The more samples of symbol, means the longer the method execution time.  Off CPU Profiling Off CPU Profiling can analyze the thread state when a thread switch occurs in the current process, thereby determining performance loss caused by blocked on I/O, locks, timers, paging/swapping, and other reasons. The execution flow between the eBPF agent and OAP in Off CPU Profiling is the same as in On CPU Profiling, but the data content being analyzed is different.\nCreate task The process of creating an Off CPU Profiling task is the same as creating an On CPU Profiling task, with the only difference being that the Profiling task type is changed to OFF CPU Profiling. For specific parameters, please refer to the previous section.\nProfiling analyze When the eBPF agent receives a Off CPU Profiling task, it would also collect data and generate a schedule. When analyzing data, unlike On CPU Profiling, Off CPU Profiling can generate different flame graphs based on the following two aggregation methods:\n By Time: Aggregate based on the time consumed by each method, allowing you to analyze which methods take longer. By Count: Aggregate based on the number of times a method switches to non-CPU execution, allowing you to analyze which methods cause more non-CPU executions for the task.  Network Profiling Network Profiling can analyze and monitor network requests related to process, and based on the data, generate topology diagrams, metrics, and other information. Furthermore, it can be integrated with existing Tracing systems to enhance the data content.\nCreate task Unlike On/Off CPU Profiling, Network Profiling requires specifying the instance entity information when creating a task. For example, in a Service Mesh, there may be multiple processes under a single instance(Pod), such as an application and Envoy. In network analysis, they usually work together, so analyzing them together can give you a better understanding of the network execution situation of the Pod. The following parameters are needed:\n Instance: The current Instance entity. Sampling: Sampling information for network requests.  Sampling represents how the current system samples raw data and combines it with the existing Tracing system, allowing you to see the complete network data corresponding to a Span in Tracing Span. Currently, it supports sampling Raw information for Spans using HTTP/1.x as RPC and parsing SkyWalking and Zipkin protocols. The sampling information configuration is as follows:\n URI Regex: Only collect requests that match the specified URI. If empty, all requests will be collected. Min Duration: Only sample data with a response time greater than or equal to the specified duration. If empty, all requests will be collected. When 4XX: Only sample data with a response status code between 400 and 500 (exclusive). When 5XX: Only sample data with a response status code between 500 and 600 (exclusive). Settings: When network data meets the above rules, how to collect the data.  Require Complete Request: Whether to collect request data. Max Request Size: The maximum data size for collecting requests. If empty, all data will be collected. Require Complete Response: Whether to collect response data. Max Response Size: The maximum data size for collecting responses. If empty, all data will be collected.    Profiling analysis After starting the task, the following data can be analyzed:\n Topology: Analyze the data flow and data types when the current instance interacts internally and externally. TCP Metrics: Network Layer-4 metrics between two process. HTTP/1.x Metrics: If there are HTTP/1.x requests between two nodes, the HTTP/1.x metrics would be analyzed based on the data content. HTTP Request: If two nodes use HTTP/1.x and include a tracing system, the tracing data would be extended with events.  Topology The topology can generate two types of data:\n Internal entities: The network call relationships between all processes within the current instance. Entities and external: The call relationships between processes inside the entity and external network nodes.  For external nodes, since eBPF can only collect remote IP and port information during data collection, OAP can use Kubernetes cluster information to recognize the corresponding Service or Pod names.\nBetween two nodes, data flow direction can be detected, and the following types of data protocols can be identified:\n HTTP: Two nodes communicate using HTTP/1.x or HTTP/2.x protocol. HTTPS: Two nodes communicate using HTTPS. TLS: Two nodes use encrypted data for transition, such as when using OpenSSL. TCP: There is TCP data transmission between two nodes.  TCP Metrics In the TCP metrics, each metric includes both client-side and server-side data. The metrics are as follows:\n   Name Unit Description     Write CPM Count Number of write requests initiated per minute   Write Total Bytes B Total data size written per minute   Write Avg Execute Time ns Average execution time for each write operation   Write RTT ns Round Trip Time (RTT)   Read CPM Count Number of read requests per minute   Read Total Bytes B Total data size read per minute   Read Avg Execute Time ns Average execution time for each read operation   Connect CPM Count Number of new connections established   Connect Execute Time ns Time taken to establish a connection   Close CPM Count Number of closed connections   Close Execute Time ns Time taken to close a connection   Retransmit CPM Count Number of data retransmissions per minute   Drop CPM Count Number of dropped packets per minute    HTTP/1.x Metrics If there is HTTP/1.x protocol communication between two nodes, the eBPF agent can recognize the request data and parse the following metric information:\n   Name Unit Description     Request CPM Count Number of requests received per minute   Response Status CPM Count Number of occurrences of each response status code per minute   Request Package Size B Average request package data size   Response Package Size B Average response package data size   Client Duration ns Time taken for the client to receive a response   Server Duration ns Time taken for the server to send a response    HTTP Request If two nodes communicate using the HTTP/1.x protocol, and they employ a distributed tracing system, then eBPf agent can collect raw data according to the sampling rules configured in the previous sections.\nSampling Raw Data When the sampling conditions are met, the original request or response data would be collected, including the following fields:\n Data Size: The data size of the current request/response content. Data Content: The raw data content. Non-plain format content would not be collected. Data Direction: The data transfer direction, either Ingress or Egress. Data Type: The data type, either Request or Response. Connection Role: The current node\u0026rsquo;s role as a client or server. Entity: The entity information of the current process. Time: The Request or response sent/received time.  Syscall Event When sampling rules are applied, the related Syscall invocations for the request or response would also be collected, including the following information:\n Method Name: System Syscall method names such as read, write, readv, writev, etc. Packet Size: The current TCP packet size. Packet Count: The number of sent or received packets. Network Interface Information: The network interface from which the packet was sent.  ","title":"eBPF Profiling","url":"/docs/main/v9.7.0/en/setup/backend/backend-ebpf-profiling/"},{"content":"ElasticSearch Some new users may encounter the following issues:\n The performance of ElasticSearch is not as good as expected. For instance, the latest data cannot be accessed after some time.  Or\n ERROR CODE 429.   Suppressed: org.elasticsearch.client.ResponseException: method [POST], host [http://127.0.0.1:9200], URI [/service_instance_inventory/type/6_tcc-app-gateway-77b98ff6ff-crblx.cards_0_0/_update?refresh=true\u0026amp;timeout=1m], status line [HTTP/1.1 429 Too Many Requests] {\u0026quot;error\u0026quot;:{\u0026quot;root_cause\u0026quot;:[{\u0026quot;type\u0026quot;:\u0026quot;remote_transport_exception\u0026quot;,\u0026quot;reason\u0026quot;:\u0026quot;[elasticsearch-0][10.16.9.130:9300][indices:data/write/update[s]]\u0026quot;}],\u0026quot;type\u0026quot;:\u0026quot;es_rejected_execution_exception\u0026quot;,\u0026quot;reason\u0026quot;:\u0026quot;rejected execution of org.elasticsearch.transport.TransportService$7@19a5cf02 on EsThreadPoolExecutor[name = elasticsearch-0/write, queue capacity = 200, org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor@389297ad[Running, pool size = 2, active threads = 2, queued tasks = 200, completed tasks = 147611]]\u0026quot;},\u0026quot;status\u0026quot;:429} at org.elasticsearch.client.RestClient$SyncResponseListener.get(RestClient.java:705) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestClient.performRequest(RestClient.java:235) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestClient.performRequest(RestClient.java:198) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestHighLevelClient.performRequest(RestHighLevelClient.java:522) ~[elasticsearch You could add the following config to elasticsearch.yml, and set the value based on your environment variable.\n# In the case of tracing, consider setting a value higher than this.thread_pool.index.queue_size:1000thread_pool.write.queue_size:1000# When you face query error at trace page, remember to check this.index.max_result_window:1000000For more information, see ElasticSearch\u0026rsquo;s official documentation.\n","title":"ElasticSearch","url":"/docs/main/latest/en/faq/es-server-faq/"},{"content":"ElasticSearch Some new users may encounter the following issues:\n The performance of ElasticSearch is not as good as expected. For instance, the latest data cannot be accessed after some time.  Or\n ERROR CODE 429.   Suppressed: org.elasticsearch.client.ResponseException: method [POST], host [http://127.0.0.1:9200], URI [/service_instance_inventory/type/6_tcc-app-gateway-77b98ff6ff-crblx.cards_0_0/_update?refresh=true\u0026amp;timeout=1m], status line [HTTP/1.1 429 Too Many Requests] {\u0026quot;error\u0026quot;:{\u0026quot;root_cause\u0026quot;:[{\u0026quot;type\u0026quot;:\u0026quot;remote_transport_exception\u0026quot;,\u0026quot;reason\u0026quot;:\u0026quot;[elasticsearch-0][10.16.9.130:9300][indices:data/write/update[s]]\u0026quot;}],\u0026quot;type\u0026quot;:\u0026quot;es_rejected_execution_exception\u0026quot;,\u0026quot;reason\u0026quot;:\u0026quot;rejected execution of org.elasticsearch.transport.TransportService$7@19a5cf02 on EsThreadPoolExecutor[name = elasticsearch-0/write, queue capacity = 200, org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor@389297ad[Running, pool size = 2, active threads = 2, queued tasks = 200, completed tasks = 147611]]\u0026quot;},\u0026quot;status\u0026quot;:429} at org.elasticsearch.client.RestClient$SyncResponseListener.get(RestClient.java:705) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestClient.performRequest(RestClient.java:235) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestClient.performRequest(RestClient.java:198) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestHighLevelClient.performRequest(RestHighLevelClient.java:522) ~[elasticsearch You could add the following config to elasticsearch.yml, and set the value based on your environment variable.\n# In the case of tracing, consider setting a value higher than this.thread_pool.index.queue_size:1000thread_pool.write.queue_size:1000# When you face query error at trace page, remember to check this.index.max_result_window:1000000For more information, see ElasticSearch\u0026rsquo;s official documentation.\n","title":"ElasticSearch","url":"/docs/main/next/en/faq/es-server-faq/"},{"content":"ElasticSearch Some new users may encounter the following issues:\n The performance of ElasticSearch is not as good as expected. For instance, the latest data cannot be accessed after some time.  Or\n ERROR CODE 429.   Suppressed: org.elasticsearch.client.ResponseException: method [POST], host [http://127.0.0.1:9200], URI [/service_instance_inventory/type/6_tcc-app-gateway-77b98ff6ff-crblx.cards_0_0/_update?refresh=true\u0026amp;timeout=1m], status line [HTTP/1.1 429 Too Many Requests] {\u0026quot;error\u0026quot;:{\u0026quot;root_cause\u0026quot;:[{\u0026quot;type\u0026quot;:\u0026quot;remote_transport_exception\u0026quot;,\u0026quot;reason\u0026quot;:\u0026quot;[elasticsearch-0][10.16.9.130:9300][indices:data/write/update[s]]\u0026quot;}],\u0026quot;type\u0026quot;:\u0026quot;es_rejected_execution_exception\u0026quot;,\u0026quot;reason\u0026quot;:\u0026quot;rejected execution of org.elasticsearch.transport.TransportService$7@19a5cf02 on EsThreadPoolExecutor[name = elasticsearch-0/write, queue capacity = 200, org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor@389297ad[Running, pool size = 2, active threads = 2, queued tasks = 200, completed tasks = 147611]]\u0026quot;},\u0026quot;status\u0026quot;:429} at org.elasticsearch.client.RestClient$SyncResponseListener.get(RestClient.java:705) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestClient.performRequest(RestClient.java:235) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestClient.performRequest(RestClient.java:198) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestHighLevelClient.performRequest(RestHighLevelClient.java:522) ~[elasticsearch You could add the following config to elasticsearch.yml, and set the value based on your environment variable.\n# In the case of tracing, consider setting a value higher than this.thread_pool.index.queue_size:1000thread_pool.write.queue_size:1000# When you face query error at trace page, remember to check this.index.max_result_window:1000000For more information, see ElasticSearch\u0026rsquo;s official documentation.\n","title":"ElasticSearch","url":"/docs/main/v9.0.0/en/faq/es-server-faq/"},{"content":"ElasticSearch Some new users may encounter the following issues:\n The performance of ElasticSearch is not as good as expected. For instance, the latest data cannot be accessed after some time.  Or\n ERROR CODE 429.   Suppressed: org.elasticsearch.client.ResponseException: method [POST], host [http://127.0.0.1:9200], URI [/service_instance_inventory/type/6_tcc-app-gateway-77b98ff6ff-crblx.cards_0_0/_update?refresh=true\u0026amp;timeout=1m], status line [HTTP/1.1 429 Too Many Requests] {\u0026quot;error\u0026quot;:{\u0026quot;root_cause\u0026quot;:[{\u0026quot;type\u0026quot;:\u0026quot;remote_transport_exception\u0026quot;,\u0026quot;reason\u0026quot;:\u0026quot;[elasticsearch-0][10.16.9.130:9300][indices:data/write/update[s]]\u0026quot;}],\u0026quot;type\u0026quot;:\u0026quot;es_rejected_execution_exception\u0026quot;,\u0026quot;reason\u0026quot;:\u0026quot;rejected execution of org.elasticsearch.transport.TransportService$7@19a5cf02 on EsThreadPoolExecutor[name = elasticsearch-0/write, queue capacity = 200, org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor@389297ad[Running, pool size = 2, active threads = 2, queued tasks = 200, completed tasks = 147611]]\u0026quot;},\u0026quot;status\u0026quot;:429} at org.elasticsearch.client.RestClient$SyncResponseListener.get(RestClient.java:705) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestClient.performRequest(RestClient.java:235) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestClient.performRequest(RestClient.java:198) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestHighLevelClient.performRequest(RestHighLevelClient.java:522) ~[elasticsearch You could add the following config to elasticsearch.yml, and set the value based on your environment variable.\n# In the case of tracing, consider setting a value higher than this.thread_pool.index.queue_size:1000thread_pool.write.queue_size:1000# When you face query error at trace page, remember to check this.index.max_result_window:1000000For more information, see ElasticSearch\u0026rsquo;s official documentation.\n","title":"ElasticSearch","url":"/docs/main/v9.1.0/en/faq/es-server-faq/"},{"content":"ElasticSearch Some new users may encounter the following issues:\n The performance of ElasticSearch is not as good as expected. For instance, the latest data cannot be accessed after some time.  Or\n ERROR CODE 429.   Suppressed: org.elasticsearch.client.ResponseException: method [POST], host [http://127.0.0.1:9200], URI [/service_instance_inventory/type/6_tcc-app-gateway-77b98ff6ff-crblx.cards_0_0/_update?refresh=true\u0026amp;timeout=1m], status line [HTTP/1.1 429 Too Many Requests] {\u0026quot;error\u0026quot;:{\u0026quot;root_cause\u0026quot;:[{\u0026quot;type\u0026quot;:\u0026quot;remote_transport_exception\u0026quot;,\u0026quot;reason\u0026quot;:\u0026quot;[elasticsearch-0][10.16.9.130:9300][indices:data/write/update[s]]\u0026quot;}],\u0026quot;type\u0026quot;:\u0026quot;es_rejected_execution_exception\u0026quot;,\u0026quot;reason\u0026quot;:\u0026quot;rejected execution of org.elasticsearch.transport.TransportService$7@19a5cf02 on EsThreadPoolExecutor[name = elasticsearch-0/write, queue capacity = 200, org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor@389297ad[Running, pool size = 2, active threads = 2, queued tasks = 200, completed tasks = 147611]]\u0026quot;},\u0026quot;status\u0026quot;:429} at org.elasticsearch.client.RestClient$SyncResponseListener.get(RestClient.java:705) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestClient.performRequest(RestClient.java:235) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestClient.performRequest(RestClient.java:198) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestHighLevelClient.performRequest(RestHighLevelClient.java:522) ~[elasticsearch You could add the following config to elasticsearch.yml, and set the value based on your environment variable.\n# In the case of tracing, consider setting a value higher than this.thread_pool.index.queue_size:1000thread_pool.write.queue_size:1000# When you face query error at trace page, remember to check this.index.max_result_window:1000000For more information, see ElasticSearch\u0026rsquo;s official documentation.\n","title":"ElasticSearch","url":"/docs/main/v9.2.0/en/faq/es-server-faq/"},{"content":"ElasticSearch Some new users may encounter the following issues:\n The performance of ElasticSearch is not as good as expected. For instance, the latest data cannot be accessed after some time.  Or\n ERROR CODE 429.   Suppressed: org.elasticsearch.client.ResponseException: method [POST], host [http://127.0.0.1:9200], URI [/service_instance_inventory/type/6_tcc-app-gateway-77b98ff6ff-crblx.cards_0_0/_update?refresh=true\u0026amp;timeout=1m], status line [HTTP/1.1 429 Too Many Requests] {\u0026quot;error\u0026quot;:{\u0026quot;root_cause\u0026quot;:[{\u0026quot;type\u0026quot;:\u0026quot;remote_transport_exception\u0026quot;,\u0026quot;reason\u0026quot;:\u0026quot;[elasticsearch-0][10.16.9.130:9300][indices:data/write/update[s]]\u0026quot;}],\u0026quot;type\u0026quot;:\u0026quot;es_rejected_execution_exception\u0026quot;,\u0026quot;reason\u0026quot;:\u0026quot;rejected execution of org.elasticsearch.transport.TransportService$7@19a5cf02 on EsThreadPoolExecutor[name = elasticsearch-0/write, queue capacity = 200, org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor@389297ad[Running, pool size = 2, active threads = 2, queued tasks = 200, completed tasks = 147611]]\u0026quot;},\u0026quot;status\u0026quot;:429} at org.elasticsearch.client.RestClient$SyncResponseListener.get(RestClient.java:705) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestClient.performRequest(RestClient.java:235) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestClient.performRequest(RestClient.java:198) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestHighLevelClient.performRequest(RestHighLevelClient.java:522) ~[elasticsearch You could add the following config to elasticsearch.yml, and set the value based on your environment variable.\n# In the case of tracing, consider setting a value higher than this.thread_pool.index.queue_size:1000thread_pool.write.queue_size:1000# When you face query error at trace page, remember to check this.index.max_result_window:1000000For more information, see ElasticSearch\u0026rsquo;s official documentation.\n","title":"ElasticSearch","url":"/docs/main/v9.3.0/en/faq/es-server-faq/"},{"content":"ElasticSearch Some new users may encounter the following issues:\n The performance of ElasticSearch is not as good as expected. For instance, the latest data cannot be accessed after some time.  Or\n ERROR CODE 429.   Suppressed: org.elasticsearch.client.ResponseException: method [POST], host [http://127.0.0.1:9200], URI [/service_instance_inventory/type/6_tcc-app-gateway-77b98ff6ff-crblx.cards_0_0/_update?refresh=true\u0026amp;timeout=1m], status line [HTTP/1.1 429 Too Many Requests] {\u0026quot;error\u0026quot;:{\u0026quot;root_cause\u0026quot;:[{\u0026quot;type\u0026quot;:\u0026quot;remote_transport_exception\u0026quot;,\u0026quot;reason\u0026quot;:\u0026quot;[elasticsearch-0][10.16.9.130:9300][indices:data/write/update[s]]\u0026quot;}],\u0026quot;type\u0026quot;:\u0026quot;es_rejected_execution_exception\u0026quot;,\u0026quot;reason\u0026quot;:\u0026quot;rejected execution of org.elasticsearch.transport.TransportService$7@19a5cf02 on EsThreadPoolExecutor[name = elasticsearch-0/write, queue capacity = 200, org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor@389297ad[Running, pool size = 2, active threads = 2, queued tasks = 200, completed tasks = 147611]]\u0026quot;},\u0026quot;status\u0026quot;:429} at org.elasticsearch.client.RestClient$SyncResponseListener.get(RestClient.java:705) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestClient.performRequest(RestClient.java:235) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestClient.performRequest(RestClient.java:198) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestHighLevelClient.performRequest(RestHighLevelClient.java:522) ~[elasticsearch You could add the following config to elasticsearch.yml, and set the value based on your environment variable.\n# In the case of tracing, consider setting a value higher than this.thread_pool.index.queue_size:1000thread_pool.write.queue_size:1000# When you face query error at trace page, remember to check this.index.max_result_window:1000000For more information, see ElasticSearch\u0026rsquo;s official documentation.\n","title":"ElasticSearch","url":"/docs/main/v9.4.0/en/faq/es-server-faq/"},{"content":"ElasticSearch Some new users may encounter the following issues:\n The performance of ElasticSearch is not as good as expected. For instance, the latest data cannot be accessed after some time.  Or\n ERROR CODE 429.   Suppressed: org.elasticsearch.client.ResponseException: method [POST], host [http://127.0.0.1:9200], URI [/service_instance_inventory/type/6_tcc-app-gateway-77b98ff6ff-crblx.cards_0_0/_update?refresh=true\u0026amp;timeout=1m], status line [HTTP/1.1 429 Too Many Requests] {\u0026quot;error\u0026quot;:{\u0026quot;root_cause\u0026quot;:[{\u0026quot;type\u0026quot;:\u0026quot;remote_transport_exception\u0026quot;,\u0026quot;reason\u0026quot;:\u0026quot;[elasticsearch-0][10.16.9.130:9300][indices:data/write/update[s]]\u0026quot;}],\u0026quot;type\u0026quot;:\u0026quot;es_rejected_execution_exception\u0026quot;,\u0026quot;reason\u0026quot;:\u0026quot;rejected execution of org.elasticsearch.transport.TransportService$7@19a5cf02 on EsThreadPoolExecutor[name = elasticsearch-0/write, queue capacity = 200, org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor@389297ad[Running, pool size = 2, active threads = 2, queued tasks = 200, completed tasks = 147611]]\u0026quot;},\u0026quot;status\u0026quot;:429} at org.elasticsearch.client.RestClient$SyncResponseListener.get(RestClient.java:705) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestClient.performRequest(RestClient.java:235) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestClient.performRequest(RestClient.java:198) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestHighLevelClient.performRequest(RestHighLevelClient.java:522) ~[elasticsearch You could add the following config to elasticsearch.yml, and set the value based on your environment variable.\n# In the case of tracing, consider setting a value higher than this.thread_pool.index.queue_size:1000thread_pool.write.queue_size:1000# When you face query error at trace page, remember to check this.index.max_result_window:1000000For more information, see ElasticSearch\u0026rsquo;s official documentation.\n","title":"ElasticSearch","url":"/docs/main/v9.5.0/en/faq/es-server-faq/"},{"content":"ElasticSearch Some new users may encounter the following issues:\n The performance of ElasticSearch is not as good as expected. For instance, the latest data cannot be accessed after some time.  Or\n ERROR CODE 429.   Suppressed: org.elasticsearch.client.ResponseException: method [POST], host [http://127.0.0.1:9200], URI [/service_instance_inventory/type/6_tcc-app-gateway-77b98ff6ff-crblx.cards_0_0/_update?refresh=true\u0026amp;timeout=1m], status line [HTTP/1.1 429 Too Many Requests] {\u0026quot;error\u0026quot;:{\u0026quot;root_cause\u0026quot;:[{\u0026quot;type\u0026quot;:\u0026quot;remote_transport_exception\u0026quot;,\u0026quot;reason\u0026quot;:\u0026quot;[elasticsearch-0][10.16.9.130:9300][indices:data/write/update[s]]\u0026quot;}],\u0026quot;type\u0026quot;:\u0026quot;es_rejected_execution_exception\u0026quot;,\u0026quot;reason\u0026quot;:\u0026quot;rejected execution of org.elasticsearch.transport.TransportService$7@19a5cf02 on EsThreadPoolExecutor[name = elasticsearch-0/write, queue capacity = 200, org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor@389297ad[Running, pool size = 2, active threads = 2, queued tasks = 200, completed tasks = 147611]]\u0026quot;},\u0026quot;status\u0026quot;:429} at org.elasticsearch.client.RestClient$SyncResponseListener.get(RestClient.java:705) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestClient.performRequest(RestClient.java:235) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestClient.performRequest(RestClient.java:198) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestHighLevelClient.performRequest(RestHighLevelClient.java:522) ~[elasticsearch You could add the following config to elasticsearch.yml, and set the value based on your environment variable.\n# In the case of tracing, consider setting a value higher than this.thread_pool.index.queue_size:1000thread_pool.write.queue_size:1000# When you face query error at trace page, remember to check this.index.max_result_window:1000000For more information, see ElasticSearch\u0026rsquo;s official documentation.\n","title":"ElasticSearch","url":"/docs/main/v9.6.0/en/faq/es-server-faq/"},{"content":"ElasticSearch Some new users may encounter the following issues:\n The performance of ElasticSearch is not as good as expected. For instance, the latest data cannot be accessed after some time.  Or\n ERROR CODE 429.   Suppressed: org.elasticsearch.client.ResponseException: method [POST], host [http://127.0.0.1:9200], URI [/service_instance_inventory/type/6_tcc-app-gateway-77b98ff6ff-crblx.cards_0_0/_update?refresh=true\u0026amp;timeout=1m], status line [HTTP/1.1 429 Too Many Requests] {\u0026quot;error\u0026quot;:{\u0026quot;root_cause\u0026quot;:[{\u0026quot;type\u0026quot;:\u0026quot;remote_transport_exception\u0026quot;,\u0026quot;reason\u0026quot;:\u0026quot;[elasticsearch-0][10.16.9.130:9300][indices:data/write/update[s]]\u0026quot;}],\u0026quot;type\u0026quot;:\u0026quot;es_rejected_execution_exception\u0026quot;,\u0026quot;reason\u0026quot;:\u0026quot;rejected execution of org.elasticsearch.transport.TransportService$7@19a5cf02 on EsThreadPoolExecutor[name = elasticsearch-0/write, queue capacity = 200, org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor@389297ad[Running, pool size = 2, active threads = 2, queued tasks = 200, completed tasks = 147611]]\u0026quot;},\u0026quot;status\u0026quot;:429} at org.elasticsearch.client.RestClient$SyncResponseListener.get(RestClient.java:705) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestClient.performRequest(RestClient.java:235) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestClient.performRequest(RestClient.java:198) ~[elasticsearch-rest-client-6.3.2.jar:6.3.2] at org.elasticsearch.client.RestHighLevelClient.performRequest(RestHighLevelClient.java:522) ~[elasticsearch You could add the following config to elasticsearch.yml, and set the value based on your environment variable.\n# In the case of tracing, consider setting a value higher than this.thread_pool.index.queue_size:1000thread_pool.write.queue_size:1000# When you face query error at trace page, remember to check this.index.max_result_window:1000000For more information, see ElasticSearch\u0026rsquo;s official documentation.\n","title":"ElasticSearch","url":"/docs/main/v9.7.0/en/faq/es-server-faq/"},{"content":"Elasticsearch and OpenSearch Elasticsearch and OpenSearch are supported as storage. The storage provider is elasticsearch. This storage option is recommended for a large scale production environment, such as more than 1000 services, 10000 endpoints, and 100000 traces per minute, and plan to 100% sampling rate for the persistent in the storage.\nOpenSearch OpenSearch is a fork from ElasticSearch 7.11 but licensed in Apache 2.0. OpenSearch storage shares the same configurations as ElasticSearch. In order to activate OpenSearch as storage, set the storage provider to elasticsearch.\nWe support and tested the following versions of OpenSearch:\n 1.1.0, 1.3.10 2.4.0, 2.8.0  Elasticsearch NOTE: Elastic announced through their blog that Elasticsearch will be moving over to a Server Side Public License (SSPL) and/or Elastic License 2.0(ELv2), since Feb. 2021, which is incompatible with Apache License 2.0. Both of these licenses are not OSS licenses approved by the Open Source Initiative (OSI). This license change is effective from Elasticsearch version 7.11. So please choose the suitable ElasticSearch version according to your usage. If you have concerns about SSPL/ELv2, choose the versions before 7.11 or switch to OpenSearch.\nBy default, SkyWalking uses following indices for various telemetry data.\n sw_management (All SkyWalking management data, e.g. UI dashboard settings, UI Menu, Continuous profiling policy) sw_metrics-all-${day-format} (All metrics/meters generated through MAL and OAL engines, and metadata of service/instance/endpoint) sw_log-${day-format} (Collected logs, exclude browser logs) sw_segment-${day-format} (Native trace segments) sw_browser_error_log-${day-format} (Collected browser logs) sw_zipkin_span-${day-format} (Zipkin trace spans) sw_records-all-${day-format} (All sampled records, e.g. slow SQLs, agent profiling, and ebpf profiling)  SkyWalking rebuilds the ElasticSearch client on top of ElasticSearch REST API and automatically picks up correct request formats according to the server-side version, hence you don\u0026rsquo;t need to download different binaries and don\u0026rsquo;t need to configure different storage selectors for different ElasticSearch server-side versions anymore.\nFor now, SkyWalking supports ElasticSearch 7.x, ElasticSearch 8.x, and OpenSearch 1.x, their configurations are as follows:\nNotice, ElasticSearch 6 worked and is not promised due to end of life officially.\nstorage:selector:${SW_STORAGE:elasticsearch}elasticsearch:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}clusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:9200}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;http\u0026#34;}trustStorePath:${SW_STORAGE_ES_SSL_JKS_PATH:\u0026#34;\u0026#34;}trustStorePass:${SW_STORAGE_ES_SSL_JKS_PASS:\u0026#34;\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}password:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}secretsManagementFile:${SW_ES_SECRETS_MANAGEMENT_FILE:\u0026#34;\u0026#34;}# Secrets management file in the properties format includes the username, password, which are managed by 3rd party tool.dayStep:${SW_STORAGE_DAY_STEP:1}# Represent the number of days in the one minute/hour/day index.indexShardsNumber:${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:1}# Shard number of new indexesindexReplicasNumber:${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:1}# Replicas number of new indexes# Specify the settings for each index individually.# If configured, this setting has the highest priority and overrides the generic settings.specificIndexSettings:${SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS:\u0026#34;\u0026#34;}# Super data set has been defined in the codes, such as trace segments.The following 3 config would be improve es performance when storage super size data in es.superDatasetDayStep:${SW_STORAGE_ES_SUPER_DATASET_DAY_STEP:-1}# Represent the number of days in the super size dataset record index, the default value is the same as dayStep when the value is less than 0superDatasetIndexShardsFactor:${SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR:5}# This factor provides more shards for the super data set, shards number = indexShardsNumber * superDatasetIndexShardsFactor. Also, this factor effects Zipkin traces.superDatasetIndexReplicasNumber:${SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER:0}# Represent the replicas number in the super size dataset record index, the default value is 0.indexTemplateOrder:${SW_STORAGE_ES_INDEX_TEMPLATE_ORDER:0}# the order of index templatebulkActions:${SW_STORAGE_ES_BULK_ACTIONS:1000}# Execute the async bulk record data every ${SW_STORAGE_ES_BULK_ACTIONS} requestsflushInterval:${SW_STORAGE_ES_FLUSH_INTERVAL:10}# flush the bulk every 10 seconds whatever the number of requestsconcurrentRequests:${SW_STORAGE_ES_CONCURRENT_REQUESTS:2}# the number of concurrent requestsresultWindowMaxSize:${SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE:10000}metadataQueryMaxSize:${SW_STORAGE_ES_QUERY_MAX_SIZE:5000}segmentQueryMaxSize:${SW_STORAGE_ES_QUERY_SEGMENT_SIZE:200}profileTaskQueryMaxSize:${SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE:200}profileDataQueryScrollBatchSize:${SW_STORAGE_ES_QUERY_PROFILE_DATA_SCROLLING_BATCH_SIZE:100}oapAnalyzer:${SW_STORAGE_ES_OAP_ANALYZER:\u0026#34;{\\\u0026#34;analyzer\\\u0026#34;:{\\\u0026#34;oap_analyzer\\\u0026#34;:{\\\u0026#34;type\\\u0026#34;:\\\u0026#34;stop\\\u0026#34;}}}\u0026#34;}# the oap analyzer.oapLogAnalyzer:${SW_STORAGE_ES_OAP_LOG_ANALYZER:\u0026#34;{\\\u0026#34;analyzer\\\u0026#34;:{\\\u0026#34;oap_log_analyzer\\\u0026#34;:{\\\u0026#34;type\\\u0026#34;:\\\u0026#34;standard\\\u0026#34;}}}\u0026#34;}# the oap log analyzer. It could be customized by the ES analyzer configuration to support more language log formats, such as Chinese log, Japanese log and etc.advanced:${SW_STORAGE_ES_ADVANCED:\u0026#34;\u0026#34;}# Set it to `true` could shard metrics indices into multi-physical indices# as same as the versions(one index template per metric/meter aggregation function) before 9.2.0.logicSharding:${SW_STORAGE_ES_LOGIC_SHARDING:false}# Custom routing can reduce the impact of searches. Instead of having to fan out a search request to all the shards in an index, the request can be sent to just the shard that matches the specific routing value (or values).enableCustomRouting:${SW_STORAGE_ES_ENABLE_CUSTOM_ROUTING:false}ElasticSearch With Https SSL Encrypting communications. Example:\nstorage:selector:${SW_STORAGE:elasticsearch}elasticsearch:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}# User needs to be set when Http Basic authentication is enabledpassword:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}# Password to be set when Http Basic authentication is enabledclusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:443}trustStorePath:${SW_SW_STORAGE_ES_SSL_JKS_PATH:\u0026#34;../es_keystore.jks\u0026#34;}trustStorePass:${SW_SW_STORAGE_ES_SSL_JKS_PASS:\u0026#34;\u0026#34;}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;https\u0026#34;}... File at trustStorePath is being monitored. Once it is changed, the ElasticSearch client will reconnect. trustStorePass could be changed in the runtime through Secrets Management File Of ElasticSearch Authentication.  Daily Index Step Daily index step(storage/elasticsearch/dayStep, default 1) represents the index creation period. In this period, metrics for several days (dayStep value) are saved.\nIn most cases, users don\u0026rsquo;t need to change the value manually, as SkyWalking is designed to observe large-scale distributed systems. But in some cases, users may want to set a long TTL value, such as more than 60 days. However, their ElasticSearch cluster may not be powerful enough due to low traffic in the production environment. This value could be increased to 5 (or more) if users could ensure a single index could support the metrics and traces for these days (5 in this case).\nFor example, if dayStep == 11,\n Data in [2000-01-01, 2000-01-11] will be merged into the index-20000101. Data in [2000-01-12, 2000-01-22] will be merged into the index-20000112.  storage/elasticsearch/superDatasetDayStep overrides the storage/elasticsearch/dayStep if the value is positive. This would affect the record-related entities, such as trace segments. In some cases, the size of metrics is much smaller than the record (trace). This would improve the shards balance in the ElasticSearch cluster.\nNOTE: TTL deletion would be affected by these steps. You should set an extra dayStep in your TTL. For example, if you want to have TTL == 30 days and dayStep == 10, you are recommended to set TTL = 40.\nSecrets Management File Of ElasticSearch Authentication The value of secretsManagementFile should point to the secrets management file absolute path. The file includes the username, password, and JKS password of the ElasticSearch server in the properties format.\nuser=xxx password=yyy trustStorePass=zzz The major difference between using user, password, trustStorePass configs in the application.yaml file is that the Secrets Management File is being watched by the OAP server. Once it is changed manually or through a 3rd party tool, such as Vault, the storage provider will use the new username, password, and JKS password to establish the connection and close the old one. If the information exists in the file, the user/password will be overridden.\nIndex Settings The following settings control the number of shards and replicas for new and existing index templates. The update only got applied after OAP reboots.\nstorage:elasticsearch:# ......indexShardsNumber:${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:1}indexReplicasNumber:${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:1}specificIndexSettings:${SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS:\u0026#34;\u0026#34;}superDatasetIndexShardsFactor:${SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR:5}superDatasetIndexReplicasNumber:${SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER:0}The following table shows the relationship between those config items and Elasticsearch index number_of_shards/number_of_replicas. And also you can specify the settings for each index individually.\n   index number_of_shards number_of_replicas     sw_ui_template indexShardsNumber indexReplicasNumber   sw_metrics-all-${day-format} indexShardsNumber indexReplicasNumber   sw_log-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_segment-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_browser_error_log-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_zipkin_span-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_records-all-${day-format} indexShardsNumber indexReplicasNumber    Advanced Configurations For Elasticsearch Index You can add advanced configurations in JSON format to set ElasticSearch index settings by following ElasticSearch doc\nFor example, set translog settings:\nstorage:elasticsearch:# ......advanced:${SW_STORAGE_ES_ADVANCED:\u0026#34;{\\\u0026#34;index.translog.durability\\\u0026#34;:\\\u0026#34;request\\\u0026#34;,\\\u0026#34;index.translog.sync_interval\\\u0026#34;:\\\u0026#34;5s\\\u0026#34;}\u0026#34;}Specify Settings For Each Elasticsearch Index Individually You can specify the settings for one or more indexes individually by using SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS.\nNOTE: Supported settings:\n number_of_shards number_of_replicas  NOTE: These settings have the highest priority and will override the existing generic settings mentioned in index settings doc.\nThe settings are in JSON format. The index name here is logic entity name, which should exclude the ${SW_NAMESPACE} which is sw by default, e.g.\n{ \u0026#34;metrics-all\u0026#34;:{ \u0026#34;number_of_shards\u0026#34;:\u0026#34;3\u0026#34;, \u0026#34;number_of_replicas\u0026#34;:\u0026#34;2\u0026#34; }, \u0026#34;segment\u0026#34;:{ \u0026#34;number_of_shards\u0026#34;:\u0026#34;6\u0026#34;, \u0026#34;number_of_replicas\u0026#34;:\u0026#34;1\u0026#34; } } This configuration in the YAML file is like this,\nstorage:elasticsearch:# ......specificIndexSettings:${SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS:\u0026#34;{\\\u0026#34;metrics-all\\\u0026#34;:{\\\u0026#34;number_of_shards\\\u0026#34;:\\\u0026#34;3\\\u0026#34;,\\\u0026#34;number_of_replicas\\\u0026#34;:\\\u0026#34;2\\\u0026#34;},\\\u0026#34;segment\\\u0026#34;:{\\\u0026#34;number_of_shards\\\u0026#34;:\\\u0026#34;6\\\u0026#34;,\\\u0026#34;number_of_replicas\\\u0026#34;:\\\u0026#34;1\\\u0026#34;}}\u0026#34;}Recommended ElasticSearch server-side configurations You could add the following configuration to elasticsearch.yml, and set the value based on your environment.\n# In tracing scenario, consider to set more than this at least.thread_pool.index.queue_size:1000# Only suitable for ElasticSearch 6thread_pool.write.queue_size:1000# Suitable for ElasticSearch 6 and 7# When you face a query error on the traces page, remember to check this.index.max_result_window:1000000We strongly recommend that you read more about these configurations from ElasticSearch\u0026rsquo;s official documentation since they directly impact the performance of ElasticSearch.\nAbout Namespace When a namespace is set, all index names in ElasticSearch will use it as the prefix.\n","title":"Elasticsearch and OpenSearch","url":"/docs/main/latest/en/setup/backend/storages/elasticsearch/"},{"content":"Elasticsearch and OpenSearch Elasticsearch and OpenSearch are supported as storage. The storage provider is elasticsearch. This storage option is recommended for a large scale production environment, such as more than 1000 services, 10000 endpoints, and 100000 traces per minute, and plan to 100% sampling rate for the persistent in the storage.\nOpenSearch OpenSearch is a fork from ElasticSearch 7.11 but licensed in Apache 2.0. OpenSearch storage shares the same configurations as ElasticSearch. In order to activate OpenSearch as storage, set the storage provider to elasticsearch.\nWe support and tested the following versions of OpenSearch:\n 1.1.0, 1.3.10 2.4.0, 2.8.0  Elasticsearch NOTE: Elastic announced through their blog that Elasticsearch will be moving over to a Server Side Public License (SSPL) and/or Elastic License 2.0(ELv2), since Feb. 2021, which is incompatible with Apache License 2.0. Both of these licenses are not OSS licenses approved by the Open Source Initiative (OSI). This license change is effective from Elasticsearch version 7.11. So please choose the suitable ElasticSearch version according to your usage. If you have concerns about SSPL/ELv2, choose the versions before 7.11 or switch to OpenSearch.\nBy default, SkyWalking uses following indices for various telemetry data.\n sw_management (All SkyWalking management data, e.g. UI dashboard settings, UI Menu, Continuous profiling policy) sw_metrics-all-${day-format} (All metrics/meters generated through MAL and OAL engines, and metadata of service/instance/endpoint) sw_log-${day-format} (Collected logs, exclude browser logs) sw_segment-${day-format} (Native trace segments) sw_browser_error_log-${day-format} (Collected browser logs) sw_zipkin_span-${day-format} (Zipkin trace spans) sw_records-all-${day-format} (All sampled records, e.g. slow SQLs, agent profiling, and ebpf profiling)  SkyWalking rebuilds the ElasticSearch client on top of ElasticSearch REST API and automatically picks up correct request formats according to the server-side version, hence you don\u0026rsquo;t need to download different binaries and don\u0026rsquo;t need to configure different storage selectors for different ElasticSearch server-side versions anymore.\nFor now, SkyWalking supports ElasticSearch 7.x, ElasticSearch 8.x, and OpenSearch 1.x, their configurations are as follows:\nNotice, ElasticSearch 6 worked and is not promised due to end of life officially.\nstorage:selector:${SW_STORAGE:elasticsearch}elasticsearch:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}clusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:9200}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;http\u0026#34;}trustStorePath:${SW_STORAGE_ES_SSL_JKS_PATH:\u0026#34;\u0026#34;}trustStorePass:${SW_STORAGE_ES_SSL_JKS_PASS:\u0026#34;\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}password:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}secretsManagementFile:${SW_ES_SECRETS_MANAGEMENT_FILE:\u0026#34;\u0026#34;}# Secrets management file in the properties format includes the username, password, which are managed by 3rd party tool.dayStep:${SW_STORAGE_DAY_STEP:1}# Represent the number of days in the one minute/hour/day index.indexShardsNumber:${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:1}# Shard number of new indexesindexReplicasNumber:${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:1}# Replicas number of new indexes# Specify the settings for each index individually.# If configured, this setting has the highest priority and overrides the generic settings.specificIndexSettings:${SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS:\u0026#34;\u0026#34;}# Super data set has been defined in the codes, such as trace segments.The following 3 config would be improve es performance when storage super size data in es.superDatasetDayStep:${SW_STORAGE_ES_SUPER_DATASET_DAY_STEP:-1}# Represent the number of days in the super size dataset record index, the default value is the same as dayStep when the value is less than 0superDatasetIndexShardsFactor:${SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR:5}# This factor provides more shards for the super data set, shards number = indexShardsNumber * superDatasetIndexShardsFactor. Also, this factor effects Zipkin traces.superDatasetIndexReplicasNumber:${SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER:0}# Represent the replicas number in the super size dataset record index, the default value is 0.indexTemplateOrder:${SW_STORAGE_ES_INDEX_TEMPLATE_ORDER:0}# the order of index templatebulkActions:${SW_STORAGE_ES_BULK_ACTIONS:1000}# Execute the async bulk record data every ${SW_STORAGE_ES_BULK_ACTIONS} requestsflushInterval:${SW_STORAGE_ES_FLUSH_INTERVAL:10}# flush the bulk every 10 seconds whatever the number of requestsconcurrentRequests:${SW_STORAGE_ES_CONCURRENT_REQUESTS:2}# the number of concurrent requestsresultWindowMaxSize:${SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE:10000}metadataQueryMaxSize:${SW_STORAGE_ES_QUERY_MAX_SIZE:5000}segmentQueryMaxSize:${SW_STORAGE_ES_QUERY_SEGMENT_SIZE:200}profileTaskQueryMaxSize:${SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE:200}profileDataQueryScrollBatchSize:${SW_STORAGE_ES_QUERY_PROFILE_DATA_SCROLLING_BATCH_SIZE:100}oapAnalyzer:${SW_STORAGE_ES_OAP_ANALYZER:\u0026#34;{\\\u0026#34;analyzer\\\u0026#34;:{\\\u0026#34;oap_analyzer\\\u0026#34;:{\\\u0026#34;type\\\u0026#34;:\\\u0026#34;stop\\\u0026#34;}}}\u0026#34;}# the oap analyzer.oapLogAnalyzer:${SW_STORAGE_ES_OAP_LOG_ANALYZER:\u0026#34;{\\\u0026#34;analyzer\\\u0026#34;:{\\\u0026#34;oap_log_analyzer\\\u0026#34;:{\\\u0026#34;type\\\u0026#34;:\\\u0026#34;standard\\\u0026#34;}}}\u0026#34;}# the oap log analyzer. It could be customized by the ES analyzer configuration to support more language log formats, such as Chinese log, Japanese log and etc.advanced:${SW_STORAGE_ES_ADVANCED:\u0026#34;\u0026#34;}# Set it to `true` could shard metrics indices into multi-physical indices# as same as the versions(one index template per metric/meter aggregation function) before 9.2.0.logicSharding:${SW_STORAGE_ES_LOGIC_SHARDING:false}# Custom routing can reduce the impact of searches. Instead of having to fan out a search request to all the shards in an index, the request can be sent to just the shard that matches the specific routing value (or values).enableCustomRouting:${SW_STORAGE_ES_ENABLE_CUSTOM_ROUTING:false}ElasticSearch With Https SSL Encrypting communications. Example:\nstorage:selector:${SW_STORAGE:elasticsearch}elasticsearch:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}# User needs to be set when Http Basic authentication is enabledpassword:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}# Password to be set when Http Basic authentication is enabledclusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:443}trustStorePath:${SW_STORAGE_ES_SSL_JKS_PATH:\u0026#34;../es_keystore.jks\u0026#34;}trustStorePass:${SW_STORAGE_ES_SSL_JKS_PASS:\u0026#34;\u0026#34;}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;https\u0026#34;}... File at trustStorePath is being monitored. Once it is changed, the ElasticSearch client will reconnect. trustStorePass could be changed in the runtime through Secrets Management File Of ElasticSearch Authentication.  Daily Index Step Daily index step(storage/elasticsearch/dayStep, default 1) represents the index creation period. In this period, metrics for several days (dayStep value) are saved.\nIn most cases, users don\u0026rsquo;t need to change the value manually, as SkyWalking is designed to observe large-scale distributed systems. But in some cases, users may want to set a long TTL value, such as more than 60 days. However, their ElasticSearch cluster may not be powerful enough due to low traffic in the production environment. This value could be increased to 5 (or more) if users could ensure a single index could support the metrics and traces for these days (5 in this case).\nFor example, if dayStep == 11,\n Data in [2000-01-01, 2000-01-11] will be merged into the index-20000101. Data in [2000-01-12, 2000-01-22] will be merged into the index-20000112.  storage/elasticsearch/superDatasetDayStep overrides the storage/elasticsearch/dayStep if the value is positive. This would affect the record-related entities, such as trace segments. In some cases, the size of metrics is much smaller than the record (trace). This would improve the shards balance in the ElasticSearch cluster.\nNOTE: TTL deletion would be affected by these steps. You should set an extra dayStep in your TTL. For example, if you want to have TTL == 30 days and dayStep == 10, you are recommended to set TTL = 40.\nSecrets Management File Of ElasticSearch Authentication The value of secretsManagementFile should point to the secrets management file absolute path. The file includes the username, password, and JKS password of the ElasticSearch server in the properties format.\nuser=xxx password=yyy trustStorePass=zzz The major difference between using user, password, trustStorePass configs in the application.yaml file is that the Secrets Management File is being watched by the OAP server. Once it is changed manually or through a 3rd party tool, such as Vault, the storage provider will use the new username, password, and JKS password to establish the connection and close the old one. If the information exists in the file, the user/password will be overridden.\nIndex Settings The following settings control the number of shards and replicas for new and existing index templates. The update only got applied after OAP reboots.\nstorage:elasticsearch:# ......indexShardsNumber:${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:1}indexReplicasNumber:${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:1}specificIndexSettings:${SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS:\u0026#34;\u0026#34;}superDatasetIndexShardsFactor:${SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR:5}superDatasetIndexReplicasNumber:${SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER:0}The following table shows the relationship between those config items and Elasticsearch index number_of_shards/number_of_replicas. And also you can specify the settings for each index individually.\n   index number_of_shards number_of_replicas     sw_ui_template indexShardsNumber indexReplicasNumber   sw_metrics-all-${day-format} indexShardsNumber indexReplicasNumber   sw_log-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_segment-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_browser_error_log-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_zipkin_span-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_records-all-${day-format} indexShardsNumber indexReplicasNumber    Advanced Configurations For Elasticsearch Index You can add advanced configurations in JSON format to set ElasticSearch index settings by following ElasticSearch doc\nFor example, set translog settings:\nstorage:elasticsearch:# ......advanced:${SW_STORAGE_ES_ADVANCED:\u0026#34;{\\\u0026#34;index.translog.durability\\\u0026#34;:\\\u0026#34;request\\\u0026#34;,\\\u0026#34;index.translog.sync_interval\\\u0026#34;:\\\u0026#34;5s\\\u0026#34;}\u0026#34;}Specify Settings For Each Elasticsearch Index Individually You can specify the settings for one or more indexes individually by using SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS.\nNOTE: Supported settings:\n number_of_shards number_of_replicas  NOTE: These settings have the highest priority and will override the existing generic settings mentioned in index settings doc.\nThe settings are in JSON format. The index name here is logic entity name, which should exclude the ${SW_NAMESPACE} which is sw by default, e.g.\n{ \u0026#34;metrics-all\u0026#34;:{ \u0026#34;number_of_shards\u0026#34;:\u0026#34;3\u0026#34;, \u0026#34;number_of_replicas\u0026#34;:\u0026#34;2\u0026#34; }, \u0026#34;segment\u0026#34;:{ \u0026#34;number_of_shards\u0026#34;:\u0026#34;6\u0026#34;, \u0026#34;number_of_replicas\u0026#34;:\u0026#34;1\u0026#34; } } This configuration in the YAML file is like this,\nstorage:elasticsearch:# ......specificIndexSettings:${SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS:\u0026#34;{\\\u0026#34;metrics-all\\\u0026#34;:{\\\u0026#34;number_of_shards\\\u0026#34;:\\\u0026#34;3\\\u0026#34;,\\\u0026#34;number_of_replicas\\\u0026#34;:\\\u0026#34;2\\\u0026#34;},\\\u0026#34;segment\\\u0026#34;:{\\\u0026#34;number_of_shards\\\u0026#34;:\\\u0026#34;6\\\u0026#34;,\\\u0026#34;number_of_replicas\\\u0026#34;:\\\u0026#34;1\\\u0026#34;}}\u0026#34;}Recommended ElasticSearch server-side configurations You could add the following configuration to elasticsearch.yml, and set the value based on your environment.\n# In tracing scenario, consider to set more than this at least.thread_pool.index.queue_size:1000# Only suitable for ElasticSearch 6thread_pool.write.queue_size:1000# Suitable for ElasticSearch 6 and 7# When you face a query error on the traces page, remember to check this.index.max_result_window:1000000We strongly recommend that you read more about these configurations from ElasticSearch\u0026rsquo;s official documentation since they directly impact the performance of ElasticSearch.\nAbout Namespace When a namespace is set, all index names in ElasticSearch will use it as the prefix.\n","title":"Elasticsearch and OpenSearch","url":"/docs/main/next/en/setup/backend/storages/elasticsearch/"},{"content":"Elasticsearch and OpenSearch Elasticsearch and OpenSearch are supported as storage. The storage provider is elasticsearch. This storage option is recommended for a large scale production environment, such as more than 1000 services, 10000 endpoints, and 100000 traces per minute, and plan to 100% sampling rate for the persistent in the storage.\nOpenSearch OpenSearch is a fork from ElasticSearch 7.11 but licensed in Apache 2.0. OpenSearch storage shares the same configurations as ElasticSearch. In order to activate OpenSearch as storage, set the storage provider to elasticsearch.\nWe support and tested the following versions of OpenSearch:\n 1.1.0, 1.3.10 2.4.0, 2.8.0  Elasticsearch NOTE: Elastic announced through their blog that Elasticsearch will be moving over to a Server Side Public License (SSPL) and/or Elastic License 2.0(ELv2), since Feb. 2021, which is incompatible with Apache License 2.0. Both of these licenses are not OSS licenses approved by the Open Source Initiative (OSI). This license change is effective from Elasticsearch version 7.11. So please choose the suitable ElasticSearch version according to your usage. If you have concerns about SSPL/ELv2, choose the versions before 7.11 or switch to OpenSearch.\nBy default, SkyWalking uses following indices for various telemetry data.\n sw_management (All SkyWalking management data, e.g. UI dashboard settings, UI Menu, Continuous profiling policy) sw_metrics-all-${day-format} (All metrics/meters generated through MAL and OAL engines, and metadata of service/instance/endpoint) sw_log-${day-format} (Collected logs, exclude browser logs) sw_segment-${day-format} (Native trace segments) sw_browser_error_log-${day-format} (Collected browser logs) sw_zipkin_span-${day-format} (Zipkin trace spans) sw_records-all-${day-format} (All sampled records, e.g. slow SQLs, agent profiling, and ebpf profiling)  SkyWalking rebuilds the ElasticSearch client on top of ElasticSearch REST API and automatically picks up correct request formats according to the server-side version, hence you don\u0026rsquo;t need to download different binaries and don\u0026rsquo;t need to configure different storage selectors for different ElasticSearch server-side versions anymore.\nFor now, SkyWalking supports ElasticSearch 7.x, ElasticSearch 8.x, and OpenSearch 1.x, their configurations are as follows:\nNotice, ElasticSearch 6 worked and is not promised due to end of life officially.\nstorage:selector:${SW_STORAGE:elasticsearch}elasticsearch:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}clusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:9200}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;http\u0026#34;}trustStorePath:${SW_STORAGE_ES_SSL_JKS_PATH:\u0026#34;\u0026#34;}trustStorePass:${SW_STORAGE_ES_SSL_JKS_PASS:\u0026#34;\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}password:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}secretsManagementFile:${SW_ES_SECRETS_MANAGEMENT_FILE:\u0026#34;\u0026#34;}# Secrets management file in the properties format includes the username, password, which are managed by 3rd party tool.dayStep:${SW_STORAGE_DAY_STEP:1}# Represent the number of days in the one minute/hour/day index.indexShardsNumber:${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:1}# Shard number of new indexesindexReplicasNumber:${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:1}# Replicas number of new indexes# Specify the settings for each index individually.# If configured, this setting has the highest priority and overrides the generic settings.specificIndexSettings:${SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS:\u0026#34;\u0026#34;}# Super data set has been defined in the codes, such as trace segments.The following 3 config would be improve es performance when storage super size data in es.superDatasetDayStep:${SW_STORAGE_ES_SUPER_DATASET_DAY_STEP:-1}# Represent the number of days in the super size dataset record index, the default value is the same as dayStep when the value is less than 0superDatasetIndexShardsFactor:${SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR:5}# This factor provides more shards for the super data set, shards number = indexShardsNumber * superDatasetIndexShardsFactor. Also, this factor effects Zipkin traces.superDatasetIndexReplicasNumber:${SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER:0}# Represent the replicas number in the super size dataset record index, the default value is 0.indexTemplateOrder:${SW_STORAGE_ES_INDEX_TEMPLATE_ORDER:0}# the order of index templatebulkActions:${SW_STORAGE_ES_BULK_ACTIONS:1000}# Execute the async bulk record data every ${SW_STORAGE_ES_BULK_ACTIONS} requestsflushInterval:${SW_STORAGE_ES_FLUSH_INTERVAL:10}# flush the bulk every 10 seconds whatever the number of requestsconcurrentRequests:${SW_STORAGE_ES_CONCURRENT_REQUESTS:2}# the number of concurrent requestsresultWindowMaxSize:${SW_STORAGE_ES_QUERY_MAX_WINDOW_SIZE:10000}metadataQueryMaxSize:${SW_STORAGE_ES_QUERY_MAX_SIZE:5000}segmentQueryMaxSize:${SW_STORAGE_ES_QUERY_SEGMENT_SIZE:200}profileTaskQueryMaxSize:${SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE:200}profileDataQueryScrollBatchSize:${SW_STORAGE_ES_QUERY_PROFILE_DATA_SCROLLING_BATCH_SIZE:100}oapAnalyzer:${SW_STORAGE_ES_OAP_ANALYZER:\u0026#34;{\\\u0026#34;analyzer\\\u0026#34;:{\\\u0026#34;oap_analyzer\\\u0026#34;:{\\\u0026#34;type\\\u0026#34;:\\\u0026#34;stop\\\u0026#34;}}}\u0026#34;}# the oap analyzer.oapLogAnalyzer:${SW_STORAGE_ES_OAP_LOG_ANALYZER:\u0026#34;{\\\u0026#34;analyzer\\\u0026#34;:{\\\u0026#34;oap_log_analyzer\\\u0026#34;:{\\\u0026#34;type\\\u0026#34;:\\\u0026#34;standard\\\u0026#34;}}}\u0026#34;}# the oap log analyzer. It could be customized by the ES analyzer configuration to support more language log formats, such as Chinese log, Japanese log and etc.advanced:${SW_STORAGE_ES_ADVANCED:\u0026#34;\u0026#34;}# Set it to `true` could shard metrics indices into multi-physical indices# as same as the versions(one index template per metric/meter aggregation function) before 9.2.0.logicSharding:${SW_STORAGE_ES_LOGIC_SHARDING:false}# Custom routing can reduce the impact of searches. Instead of having to fan out a search request to all the shards in an index, the request can be sent to just the shard that matches the specific routing value (or values).enableCustomRouting:${SW_STORAGE_ES_ENABLE_CUSTOM_ROUTING:false}ElasticSearch With Https SSL Encrypting communications. Example:\nstorage:selector:${SW_STORAGE:elasticsearch}elasticsearch:namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}user:${SW_ES_USER:\u0026#34;\u0026#34;}# User needs to be set when Http Basic authentication is enabledpassword:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}# Password to be set when Http Basic authentication is enabledclusterNodes:${SW_STORAGE_ES_CLUSTER_NODES:localhost:443}trustStorePath:${SW_SW_STORAGE_ES_SSL_JKS_PATH:\u0026#34;../es_keystore.jks\u0026#34;}trustStorePass:${SW_SW_STORAGE_ES_SSL_JKS_PASS:\u0026#34;\u0026#34;}protocol:${SW_STORAGE_ES_HTTP_PROTOCOL:\u0026#34;https\u0026#34;}... File at trustStorePath is being monitored. Once it is changed, the ElasticSearch client will reconnect. trustStorePass could be changed in the runtime through Secrets Management File Of ElasticSearch Authentication.  Daily Index Step Daily index step(storage/elasticsearch/dayStep, default 1) represents the index creation period. In this period, metrics for several days (dayStep value) are saved.\nIn most cases, users don\u0026rsquo;t need to change the value manually, as SkyWalking is designed to observe large-scale distributed systems. But in some cases, users may want to set a long TTL value, such as more than 60 days. However, their ElasticSearch cluster may not be powerful enough due to low traffic in the production environment. This value could be increased to 5 (or more) if users could ensure a single index could support the metrics and traces for these days (5 in this case).\nFor example, if dayStep == 11,\n Data in [2000-01-01, 2000-01-11] will be merged into the index-20000101. Data in [2000-01-12, 2000-01-22] will be merged into the index-20000112.  storage/elasticsearch/superDatasetDayStep overrides the storage/elasticsearch/dayStep if the value is positive. This would affect the record-related entities, such as trace segments. In some cases, the size of metrics is much smaller than the record (trace). This would improve the shards balance in the ElasticSearch cluster.\nNOTE: TTL deletion would be affected by these steps. You should set an extra dayStep in your TTL. For example, if you want to have TTL == 30 days and dayStep == 10, you are recommended to set TTL = 40.\nSecrets Management File Of ElasticSearch Authentication The value of secretsManagementFile should point to the secrets management file absolute path. The file includes the username, password, and JKS password of the ElasticSearch server in the properties format.\nuser=xxx password=yyy trustStorePass=zzz The major difference between using user, password, trustStorePass configs in the application.yaml file is that the Secrets Management File is being watched by the OAP server. Once it is changed manually or through a 3rd party tool, such as Vault, the storage provider will use the new username, password, and JKS password to establish the connection and close the old one. If the information exists in the file, the user/password will be overridden.\nIndex Settings The following settings control the number of shards and replicas for new and existing index templates. The update only got applied after OAP reboots.\nstorage:elasticsearch:# ......indexShardsNumber:${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:1}indexReplicasNumber:${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:1}specificIndexSettings:${SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS:\u0026#34;\u0026#34;}superDatasetIndexShardsFactor:${SW_STORAGE_ES_SUPER_DATASET_INDEX_SHARDS_FACTOR:5}superDatasetIndexReplicasNumber:${SW_STORAGE_ES_SUPER_DATASET_INDEX_REPLICAS_NUMBER:0}The following table shows the relationship between those config items and Elasticsearch index number_of_shards/number_of_replicas. And also you can specify the settings for each index individually.\n   index number_of_shards number_of_replicas     sw_ui_template indexShardsNumber indexReplicasNumber   sw_metrics-all-${day-format} indexShardsNumber indexReplicasNumber   sw_log-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_segment-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_browser_error_log-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_zipkin_span-${day-format} indexShardsNumber * superDatasetIndexShardsFactor superDatasetIndexReplicasNumber   sw_records-all-${day-format} indexShardsNumber indexReplicasNumber    Advanced Configurations For Elasticsearch Index You can add advanced configurations in JSON format to set ElasticSearch index settings by following ElasticSearch doc\nFor example, set translog settings:\nstorage:elasticsearch:# ......advanced:${SW_STORAGE_ES_ADVANCED:\u0026#34;{\\\u0026#34;index.translog.durability\\\u0026#34;:\\\u0026#34;request\\\u0026#34;,\\\u0026#34;index.translog.sync_interval\\\u0026#34;:\\\u0026#34;5s\\\u0026#34;}\u0026#34;}Specify Settings For Each Elasticsearch Index Individually You can specify the settings for one or more indexes individually by using SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS.\nNOTE: Supported settings:\n number_of_shards number_of_replicas  NOTE: These settings have the highest priority and will override the existing generic settings mentioned in index settings doc.\nThe settings are in JSON format. The index name here is logic entity name, which should exclude the ${SW_NAMESPACE} which is sw by default, e.g.\n{ \u0026#34;metrics-all\u0026#34;:{ \u0026#34;number_of_shards\u0026#34;:\u0026#34;3\u0026#34;, \u0026#34;number_of_replicas\u0026#34;:\u0026#34;2\u0026#34; }, \u0026#34;segment\u0026#34;:{ \u0026#34;number_of_shards\u0026#34;:\u0026#34;6\u0026#34;, \u0026#34;number_of_replicas\u0026#34;:\u0026#34;1\u0026#34; } } This configuration in the YAML file is like this,\nstorage:elasticsearch:# ......specificIndexSettings:${SW_STORAGE_ES_SPECIFIC_INDEX_SETTINGS:\u0026#34;{\\\u0026#34;metrics-all\\\u0026#34;:{\\\u0026#34;number_of_shards\\\u0026#34;:\\\u0026#34;3\\\u0026#34;,\\\u0026#34;number_of_replicas\\\u0026#34;:\\\u0026#34;2\\\u0026#34;},\\\u0026#34;segment\\\u0026#34;:{\\\u0026#34;number_of_shards\\\u0026#34;:\\\u0026#34;6\\\u0026#34;,\\\u0026#34;number_of_replicas\\\u0026#34;:\\\u0026#34;1\\\u0026#34;}}\u0026#34;}Recommended ElasticSearch server-side configurations You could add the following configuration to elasticsearch.yml, and set the value based on your environment.\n# In tracing scenario, consider to set more than this at least.thread_pool.index.queue_size:1000# Only suitable for ElasticSearch 6thread_pool.write.queue_size:1000# Suitable for ElasticSearch 6 and 7# When you face a query error on the traces page, remember to check this.index.max_result_window:1000000We strongly recommend that you read more about these configurations from ElasticSearch\u0026rsquo;s official documentation since they directly impact the performance of ElasticSearch.\nAbout Namespace When a namespace is set, all index names in ElasticSearch will use it as the prefix.\n","title":"Elasticsearch and OpenSearch","url":"/docs/main/v9.7.0/en/setup/backend/storages/elasticsearch/"},{"content":"Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Since 8.7.0, we did the following optimization to reduce Elasticsearch load.\nPerformance: remove the synchronous persistence mechanism from batch ElasticSearch DAO. Because the current enhanced persistent session mechanism, don\u0026#39;t require the data queryable immediately after the insert and update anymore. Due to this, we flush the metrics into Elasticsearch without using WriteRequest.RefreshPolicy.WAIT_UNTIL. This reduces the load of persistent works in OAP server and load of Elasticsearch CPU dramatically.\nMeanwhile, there is little chance you could see following warns in your logs.\n{ \u0026quot;timeMillis\u0026quot;: 1626247722647, \u0026quot;thread\u0026quot;: \u0026quot;I/O dispatcher 4\u0026quot;, \u0026quot;level\u0026quot;: \u0026quot;WARN\u0026quot;, \u0026quot;loggerName\u0026quot;: \u0026quot;org.apache.skywalking.oap.server.library.client.elasticsearch.ElasticSearchClient\u0026quot;, \u0026quot;message\u0026quot;: \u0026quot;Bulk [70] executed with failures:[failure in bulk execution:\\n[18875]: index [sw8_service_relation_client_side-20210714], type [_doc], id [20210714_b3BlcmF0aW9uLXJ1bGUtc2VydmVyQDExNDgx.1-bWFya2V0LXJlZmVycmFsLXNlcnZlckAxMDI1MQ==.1], message [[sw8_service_relation_client_side-20210714/D7qzncbeRq6qh2QF5MogTw][[sw8_service_relation_client_side-20210714][0]] ElasticsearchException[Elasticsearch exception [type=version_conflict_engine_exception, reason=[20210714_b3BlcmF0aW9uLXJ1bGUtc2VydmVyQDExNDgx.1-bWFya2V0LXJlZmVycmFsLXNlcnZlckAxMDI1MQ==.1]: version conflict, required seqNo [14012594], primary term [1]. current document has seqNo [14207928] and primary term [1]]]]]\u0026quot;, \u0026quot;endOfBatch\u0026quot;: false, \u0026quot;loggerFqcn\u0026quot;: \u0026quot;org.apache.logging.slf4j.Log4jLogger\u0026quot;, \u0026quot;threadId\u0026quot;: 44, \u0026quot;threadPriority\u0026quot;: 5, \u0026quot;timestamp\u0026quot;: \u0026quot;2021-07-14 15:28:42.647\u0026quot; } This would not affect the system much, just a possibility of inaccurate of metrics. If this wouldn\u0026rsquo;t show up in high frequency, you could ignore this directly.\nIn case you could see many logs like this. Then it is a signal, that the flush period of your ElasticSearch template can\u0026rsquo;t catch up your setting. Or you set the persistentPeriod less than the flush period.\n","title":"Elasticsearch exception `type=version_conflict_engine_exception` since 8.7.0","url":"/docs/main/latest/en/faq/es-version-conflict/"},{"content":"Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Since 8.7.0, we did the following optimization to reduce Elasticsearch load.\nPerformance: remove the synchronous persistence mechanism from batch ElasticSearch DAO. Because the current enhanced persistent session mechanism, don\u0026#39;t require the data queryable immediately after the insert and update anymore. Due to this, we flush the metrics into Elasticsearch without using WriteRequest.RefreshPolicy.WAIT_UNTIL. This reduces the load of persistent works in OAP server and load of Elasticsearch CPU dramatically.\nMeanwhile, there is little chance you could see following warns in your logs.\n{ \u0026quot;timeMillis\u0026quot;: 1626247722647, \u0026quot;thread\u0026quot;: \u0026quot;I/O dispatcher 4\u0026quot;, \u0026quot;level\u0026quot;: \u0026quot;WARN\u0026quot;, \u0026quot;loggerName\u0026quot;: \u0026quot;org.apache.skywalking.oap.server.library.client.elasticsearch.ElasticSearchClient\u0026quot;, \u0026quot;message\u0026quot;: \u0026quot;Bulk [70] executed with failures:[failure in bulk execution:\\n[18875]: index [sw8_service_relation_client_side-20210714], type [_doc], id [20210714_b3BlcmF0aW9uLXJ1bGUtc2VydmVyQDExNDgx.1-bWFya2V0LXJlZmVycmFsLXNlcnZlckAxMDI1MQ==.1], message [[sw8_service_relation_client_side-20210714/D7qzncbeRq6qh2QF5MogTw][[sw8_service_relation_client_side-20210714][0]] ElasticsearchException[Elasticsearch exception [type=version_conflict_engine_exception, reason=[20210714_b3BlcmF0aW9uLXJ1bGUtc2VydmVyQDExNDgx.1-bWFya2V0LXJlZmVycmFsLXNlcnZlckAxMDI1MQ==.1]: version conflict, required seqNo [14012594], primary term [1]. current document has seqNo [14207928] and primary term [1]]]]]\u0026quot;, \u0026quot;endOfBatch\u0026quot;: false, \u0026quot;loggerFqcn\u0026quot;: \u0026quot;org.apache.logging.slf4j.Log4jLogger\u0026quot;, \u0026quot;threadId\u0026quot;: 44, \u0026quot;threadPriority\u0026quot;: 5, \u0026quot;timestamp\u0026quot;: \u0026quot;2021-07-14 15:28:42.647\u0026quot; } This would not affect the system much, just a possibility of inaccurate of metrics. If this wouldn\u0026rsquo;t show up in high frequency, you could ignore this directly.\nIn case you could see many logs like this. Then it is a signal, that the flush period of your ElasticSearch template can\u0026rsquo;t catch up your setting. Or you set the persistentPeriod less than the flush period.\n","title":"Elasticsearch exception `type=version_conflict_engine_exception` since 8.7.0","url":"/docs/main/next/en/faq/es-version-conflict/"},{"content":"Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Since 8.7.0, we did the following optimization to reduce Elasticsearch load.\nPerformance: remove the synchronous persistence mechanism from batch ElasticSearch DAO. Because the current enhanced persistent session mechanism, don\u0026#39;t require the data queryable immediately after the insert and update anymore. Due to this, we flush the metrics into Elasticsearch without using WriteRequest.RefreshPolicy.WAIT_UNTIL. This reduces the load of persistent works in OAP server and load of Elasticsearch CPU dramatically.\nMeanwhile, there is little chance you could see following warns in your logs.\n{ \u0026quot;timeMillis\u0026quot;: 1626247722647, \u0026quot;thread\u0026quot;: \u0026quot;I/O dispatcher 4\u0026quot;, \u0026quot;level\u0026quot;: \u0026quot;WARN\u0026quot;, \u0026quot;loggerName\u0026quot;: \u0026quot;org.apache.skywalking.oap.server.library.client.elasticsearch.ElasticSearchClient\u0026quot;, \u0026quot;message\u0026quot;: \u0026quot;Bulk [70] executed with failures:[failure in bulk execution:\\n[18875]: index [sw8_service_relation_client_side-20210714], type [_doc], id [20210714_b3BlcmF0aW9uLXJ1bGUtc2VydmVyQDExNDgx.1-bWFya2V0LXJlZmVycmFsLXNlcnZlckAxMDI1MQ==.1], message [[sw8_service_relation_client_side-20210714/D7qzncbeRq6qh2QF5MogTw][[sw8_service_relation_client_side-20210714][0]] ElasticsearchException[Elasticsearch exception [type=version_conflict_engine_exception, reason=[20210714_b3BlcmF0aW9uLXJ1bGUtc2VydmVyQDExNDgx.1-bWFya2V0LXJlZmVycmFsLXNlcnZlckAxMDI1MQ==.1]: version conflict, required seqNo [14012594], primary term [1]. current document has seqNo [14207928] and primary term [1]]]]]\u0026quot;, \u0026quot;endOfBatch\u0026quot;: false, \u0026quot;loggerFqcn\u0026quot;: \u0026quot;org.apache.logging.slf4j.Log4jLogger\u0026quot;, \u0026quot;threadId\u0026quot;: 44, \u0026quot;threadPriority\u0026quot;: 5, \u0026quot;timestamp\u0026quot;: \u0026quot;2021-07-14 15:28:42.647\u0026quot; } This would not affect the system much, just a possibility of inaccurate of metrics. If this wouldn\u0026rsquo;t show up in high frequency, you could ignore this directly.\nIn case you could see many logs like this. Then it is a signal, that the flush period of your ElasticSearch template can\u0026rsquo;t catch up your setting. Or you set the persistentPeriod less than the flush period.\n","title":"Elasticsearch exception `type=version_conflict_engine_exception` since 8.7.0","url":"/docs/main/v9.0.0/en/faq/es-version-conflict/"},{"content":"Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Since 8.7.0, we did the following optimization to reduce Elasticsearch load.\nPerformance: remove the synchronous persistence mechanism from batch ElasticSearch DAO. Because the current enhanced persistent session mechanism, don\u0026#39;t require the data queryable immediately after the insert and update anymore. Due to this, we flush the metrics into Elasticsearch without using WriteRequest.RefreshPolicy.WAIT_UNTIL. This reduces the load of persistent works in OAP server and load of Elasticsearch CPU dramatically.\nMeanwhile, there is little chance you could see following warns in your logs.\n{ \u0026quot;timeMillis\u0026quot;: 1626247722647, \u0026quot;thread\u0026quot;: \u0026quot;I/O dispatcher 4\u0026quot;, \u0026quot;level\u0026quot;: \u0026quot;WARN\u0026quot;, \u0026quot;loggerName\u0026quot;: \u0026quot;org.apache.skywalking.oap.server.library.client.elasticsearch.ElasticSearchClient\u0026quot;, \u0026quot;message\u0026quot;: \u0026quot;Bulk [70] executed with failures:[failure in bulk execution:\\n[18875]: index [sw8_service_relation_client_side-20210714], type [_doc], id [20210714_b3BlcmF0aW9uLXJ1bGUtc2VydmVyQDExNDgx.1-bWFya2V0LXJlZmVycmFsLXNlcnZlckAxMDI1MQ==.1], message [[sw8_service_relation_client_side-20210714/D7qzncbeRq6qh2QF5MogTw][[sw8_service_relation_client_side-20210714][0]] ElasticsearchException[Elasticsearch exception [type=version_conflict_engine_exception, reason=[20210714_b3BlcmF0aW9uLXJ1bGUtc2VydmVyQDExNDgx.1-bWFya2V0LXJlZmVycmFsLXNlcnZlckAxMDI1MQ==.1]: version conflict, required seqNo [14012594], primary term [1]. current document has seqNo [14207928] and primary term [1]]]]]\u0026quot;, \u0026quot;endOfBatch\u0026quot;: false, \u0026quot;loggerFqcn\u0026quot;: \u0026quot;org.apache.logging.slf4j.Log4jLogger\u0026quot;, \u0026quot;threadId\u0026quot;: 44, \u0026quot;threadPriority\u0026quot;: 5, \u0026quot;timestamp\u0026quot;: \u0026quot;2021-07-14 15:28:42.647\u0026quot; } This would not affect the system much, just a possibility of inaccurate of metrics. If this wouldn\u0026rsquo;t show up in high frequency, you could ignore this directly.\nIn case you could see many logs like this. Then it is a signal, that the flush period of your ElasticSearch template can\u0026rsquo;t catch up your setting. Or you set the persistentPeriod less than the flush period.\n","title":"Elasticsearch exception `type=version_conflict_engine_exception` since 8.7.0","url":"/docs/main/v9.1.0/en/faq/es-version-conflict/"},{"content":"Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Since 8.7.0, we did the following optimization to reduce Elasticsearch load.\nPerformance: remove the synchronous persistence mechanism from batch ElasticSearch DAO. Because the current enhanced persistent session mechanism, don\u0026#39;t require the data queryable immediately after the insert and update anymore. Due to this, we flush the metrics into Elasticsearch without using WriteRequest.RefreshPolicy.WAIT_UNTIL. This reduces the load of persistent works in OAP server and load of Elasticsearch CPU dramatically.\nMeanwhile, there is little chance you could see following warns in your logs.\n{ \u0026quot;timeMillis\u0026quot;: 1626247722647, \u0026quot;thread\u0026quot;: \u0026quot;I/O dispatcher 4\u0026quot;, \u0026quot;level\u0026quot;: \u0026quot;WARN\u0026quot;, \u0026quot;loggerName\u0026quot;: \u0026quot;org.apache.skywalking.oap.server.library.client.elasticsearch.ElasticSearchClient\u0026quot;, \u0026quot;message\u0026quot;: \u0026quot;Bulk [70] executed with failures:[failure in bulk execution:\\n[18875]: index [sw8_service_relation_client_side-20210714], type [_doc], id [20210714_b3BlcmF0aW9uLXJ1bGUtc2VydmVyQDExNDgx.1-bWFya2V0LXJlZmVycmFsLXNlcnZlckAxMDI1MQ==.1], message [[sw8_service_relation_client_side-20210714/D7qzncbeRq6qh2QF5MogTw][[sw8_service_relation_client_side-20210714][0]] ElasticsearchException[Elasticsearch exception [type=version_conflict_engine_exception, reason=[20210714_b3BlcmF0aW9uLXJ1bGUtc2VydmVyQDExNDgx.1-bWFya2V0LXJlZmVycmFsLXNlcnZlckAxMDI1MQ==.1]: version conflict, required seqNo [14012594], primary term [1]. current document has seqNo [14207928] and primary term [1]]]]]\u0026quot;, \u0026quot;endOfBatch\u0026quot;: false, \u0026quot;loggerFqcn\u0026quot;: \u0026quot;org.apache.logging.slf4j.Log4jLogger\u0026quot;, \u0026quot;threadId\u0026quot;: 44, \u0026quot;threadPriority\u0026quot;: 5, \u0026quot;timestamp\u0026quot;: \u0026quot;2021-07-14 15:28:42.647\u0026quot; } This would not affect the system much, just a possibility of inaccurate of metrics. If this wouldn\u0026rsquo;t show up in high frequency, you could ignore this directly.\nIn case you could see many logs like this. Then it is a signal, that the flush period of your ElasticSearch template can\u0026rsquo;t catch up your setting. Or you set the persistentPeriod less than the flush period.\n","title":"Elasticsearch exception `type=version_conflict_engine_exception` since 8.7.0","url":"/docs/main/v9.2.0/en/faq/es-version-conflict/"},{"content":"Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Since 8.7.0, we did the following optimization to reduce Elasticsearch load.\nPerformance: remove the synchronous persistence mechanism from batch ElasticSearch DAO. Because the current enhanced persistent session mechanism, don\u0026#39;t require the data queryable immediately after the insert and update anymore. Due to this, we flush the metrics into Elasticsearch without using WriteRequest.RefreshPolicy.WAIT_UNTIL. This reduces the load of persistent works in OAP server and load of Elasticsearch CPU dramatically.\nMeanwhile, there is little chance you could see following warns in your logs.\n{ \u0026quot;timeMillis\u0026quot;: 1626247722647, \u0026quot;thread\u0026quot;: \u0026quot;I/O dispatcher 4\u0026quot;, \u0026quot;level\u0026quot;: \u0026quot;WARN\u0026quot;, \u0026quot;loggerName\u0026quot;: \u0026quot;org.apache.skywalking.oap.server.library.client.elasticsearch.ElasticSearchClient\u0026quot;, \u0026quot;message\u0026quot;: \u0026quot;Bulk [70] executed with failures:[failure in bulk execution:\\n[18875]: index [sw8_service_relation_client_side-20210714], type [_doc], id [20210714_b3BlcmF0aW9uLXJ1bGUtc2VydmVyQDExNDgx.1-bWFya2V0LXJlZmVycmFsLXNlcnZlckAxMDI1MQ==.1], message [[sw8_service_relation_client_side-20210714/D7qzncbeRq6qh2QF5MogTw][[sw8_service_relation_client_side-20210714][0]] ElasticsearchException[Elasticsearch exception [type=version_conflict_engine_exception, reason=[20210714_b3BlcmF0aW9uLXJ1bGUtc2VydmVyQDExNDgx.1-bWFya2V0LXJlZmVycmFsLXNlcnZlckAxMDI1MQ==.1]: version conflict, required seqNo [14012594], primary term [1]. current document has seqNo [14207928] and primary term [1]]]]]\u0026quot;, \u0026quot;endOfBatch\u0026quot;: false, \u0026quot;loggerFqcn\u0026quot;: \u0026quot;org.apache.logging.slf4j.Log4jLogger\u0026quot;, \u0026quot;threadId\u0026quot;: 44, \u0026quot;threadPriority\u0026quot;: 5, \u0026quot;timestamp\u0026quot;: \u0026quot;2021-07-14 15:28:42.647\u0026quot; } This would not affect the system much, just a possibility of inaccurate of metrics. If this wouldn\u0026rsquo;t show up in high frequency, you could ignore this directly.\nIn case you could see many logs like this. Then it is a signal, that the flush period of your ElasticSearch template can\u0026rsquo;t catch up your setting. Or you set the persistentPeriod less than the flush period.\n","title":"Elasticsearch exception `type=version_conflict_engine_exception` since 8.7.0","url":"/docs/main/v9.3.0/en/faq/es-version-conflict/"},{"content":"Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Since 8.7.0, we did the following optimization to reduce Elasticsearch load.\nPerformance: remove the synchronous persistence mechanism from batch ElasticSearch DAO. Because the current enhanced persistent session mechanism, don\u0026#39;t require the data queryable immediately after the insert and update anymore. Due to this, we flush the metrics into Elasticsearch without using WriteRequest.RefreshPolicy.WAIT_UNTIL. This reduces the load of persistent works in OAP server and load of Elasticsearch CPU dramatically.\nMeanwhile, there is little chance you could see following warns in your logs.\n{ \u0026quot;timeMillis\u0026quot;: 1626247722647, \u0026quot;thread\u0026quot;: \u0026quot;I/O dispatcher 4\u0026quot;, \u0026quot;level\u0026quot;: \u0026quot;WARN\u0026quot;, \u0026quot;loggerName\u0026quot;: \u0026quot;org.apache.skywalking.oap.server.library.client.elasticsearch.ElasticSearchClient\u0026quot;, \u0026quot;message\u0026quot;: \u0026quot;Bulk [70] executed with failures:[failure in bulk execution:\\n[18875]: index [sw8_service_relation_client_side-20210714], type [_doc], id [20210714_b3BlcmF0aW9uLXJ1bGUtc2VydmVyQDExNDgx.1-bWFya2V0LXJlZmVycmFsLXNlcnZlckAxMDI1MQ==.1], message [[sw8_service_relation_client_side-20210714/D7qzncbeRq6qh2QF5MogTw][[sw8_service_relation_client_side-20210714][0]] ElasticsearchException[Elasticsearch exception [type=version_conflict_engine_exception, reason=[20210714_b3BlcmF0aW9uLXJ1bGUtc2VydmVyQDExNDgx.1-bWFya2V0LXJlZmVycmFsLXNlcnZlckAxMDI1MQ==.1]: version conflict, required seqNo [14012594], primary term [1]. current document has seqNo [14207928] and primary term [1]]]]]\u0026quot;, \u0026quot;endOfBatch\u0026quot;: false, \u0026quot;loggerFqcn\u0026quot;: \u0026quot;org.apache.logging.slf4j.Log4jLogger\u0026quot;, \u0026quot;threadId\u0026quot;: 44, \u0026quot;threadPriority\u0026quot;: 5, \u0026quot;timestamp\u0026quot;: \u0026quot;2021-07-14 15:28:42.647\u0026quot; } This would not affect the system much, just a possibility of inaccurate of metrics. If this wouldn\u0026rsquo;t show up in high frequency, you could ignore this directly.\nIn case you could see many logs like this. Then it is a signal, that the flush period of your ElasticSearch template can\u0026rsquo;t catch up your setting. Or you set the persistentPeriod less than the flush period.\n","title":"Elasticsearch exception `type=version_conflict_engine_exception` since 8.7.0","url":"/docs/main/v9.4.0/en/faq/es-version-conflict/"},{"content":"Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Since 8.7.0, we did the following optimization to reduce Elasticsearch load.\nPerformance: remove the synchronous persistence mechanism from batch ElasticSearch DAO. Because the current enhanced persistent session mechanism, don\u0026#39;t require the data queryable immediately after the insert and update anymore. Due to this, we flush the metrics into Elasticsearch without using WriteRequest.RefreshPolicy.WAIT_UNTIL. This reduces the load of persistent works in OAP server and load of Elasticsearch CPU dramatically.\nMeanwhile, there is little chance you could see following warns in your logs.\n{ \u0026quot;timeMillis\u0026quot;: 1626247722647, \u0026quot;thread\u0026quot;: \u0026quot;I/O dispatcher 4\u0026quot;, \u0026quot;level\u0026quot;: \u0026quot;WARN\u0026quot;, \u0026quot;loggerName\u0026quot;: \u0026quot;org.apache.skywalking.oap.server.library.client.elasticsearch.ElasticSearchClient\u0026quot;, \u0026quot;message\u0026quot;: \u0026quot;Bulk [70] executed with failures:[failure in bulk execution:\\n[18875]: index [sw8_service_relation_client_side-20210714], type [_doc], id [20210714_b3BlcmF0aW9uLXJ1bGUtc2VydmVyQDExNDgx.1-bWFya2V0LXJlZmVycmFsLXNlcnZlckAxMDI1MQ==.1], message [[sw8_service_relation_client_side-20210714/D7qzncbeRq6qh2QF5MogTw][[sw8_service_relation_client_side-20210714][0]] ElasticsearchException[Elasticsearch exception [type=version_conflict_engine_exception, reason=[20210714_b3BlcmF0aW9uLXJ1bGUtc2VydmVyQDExNDgx.1-bWFya2V0LXJlZmVycmFsLXNlcnZlckAxMDI1MQ==.1]: version conflict, required seqNo [14012594], primary term [1]. current document has seqNo [14207928] and primary term [1]]]]]\u0026quot;, \u0026quot;endOfBatch\u0026quot;: false, \u0026quot;loggerFqcn\u0026quot;: \u0026quot;org.apache.logging.slf4j.Log4jLogger\u0026quot;, \u0026quot;threadId\u0026quot;: 44, \u0026quot;threadPriority\u0026quot;: 5, \u0026quot;timestamp\u0026quot;: \u0026quot;2021-07-14 15:28:42.647\u0026quot; } This would not affect the system much, just a possibility of inaccurate of metrics. If this wouldn\u0026rsquo;t show up in high frequency, you could ignore this directly.\nIn case you could see many logs like this. Then it is a signal, that the flush period of your ElasticSearch template can\u0026rsquo;t catch up your setting. Or you set the persistentPeriod less than the flush period.\n","title":"Elasticsearch exception `type=version_conflict_engine_exception` since 8.7.0","url":"/docs/main/v9.5.0/en/faq/es-version-conflict/"},{"content":"Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Since 8.7.0, we did the following optimization to reduce Elasticsearch load.\nPerformance: remove the synchronous persistence mechanism from batch ElasticSearch DAO. Because the current enhanced persistent session mechanism, don\u0026#39;t require the data queryable immediately after the insert and update anymore. Due to this, we flush the metrics into Elasticsearch without using WriteRequest.RefreshPolicy.WAIT_UNTIL. This reduces the load of persistent works in OAP server and load of Elasticsearch CPU dramatically.\nMeanwhile, there is little chance you could see following warns in your logs.\n{ \u0026quot;timeMillis\u0026quot;: 1626247722647, \u0026quot;thread\u0026quot;: \u0026quot;I/O dispatcher 4\u0026quot;, \u0026quot;level\u0026quot;: \u0026quot;WARN\u0026quot;, \u0026quot;loggerName\u0026quot;: \u0026quot;org.apache.skywalking.oap.server.library.client.elasticsearch.ElasticSearchClient\u0026quot;, \u0026quot;message\u0026quot;: \u0026quot;Bulk [70] executed with failures:[failure in bulk execution:\\n[18875]: index [sw8_service_relation_client_side-20210714], type [_doc], id [20210714_b3BlcmF0aW9uLXJ1bGUtc2VydmVyQDExNDgx.1-bWFya2V0LXJlZmVycmFsLXNlcnZlckAxMDI1MQ==.1], message [[sw8_service_relation_client_side-20210714/D7qzncbeRq6qh2QF5MogTw][[sw8_service_relation_client_side-20210714][0]] ElasticsearchException[Elasticsearch exception [type=version_conflict_engine_exception, reason=[20210714_b3BlcmF0aW9uLXJ1bGUtc2VydmVyQDExNDgx.1-bWFya2V0LXJlZmVycmFsLXNlcnZlckAxMDI1MQ==.1]: version conflict, required seqNo [14012594], primary term [1]. current document has seqNo [14207928] and primary term [1]]]]]\u0026quot;, \u0026quot;endOfBatch\u0026quot;: false, \u0026quot;loggerFqcn\u0026quot;: \u0026quot;org.apache.logging.slf4j.Log4jLogger\u0026quot;, \u0026quot;threadId\u0026quot;: 44, \u0026quot;threadPriority\u0026quot;: 5, \u0026quot;timestamp\u0026quot;: \u0026quot;2021-07-14 15:28:42.647\u0026quot; } This would not affect the system much, just a possibility of inaccurate of metrics. If this wouldn\u0026rsquo;t show up in high frequency, you could ignore this directly.\nIn case you could see many logs like this. Then it is a signal, that the flush period of your ElasticSearch template can\u0026rsquo;t catch up your setting. Or you set the persistentPeriod less than the flush period.\n","title":"Elasticsearch exception `type=version_conflict_engine_exception` since 8.7.0","url":"/docs/main/v9.6.0/en/faq/es-version-conflict/"},{"content":"Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Since 8.7.0, we did the following optimization to reduce Elasticsearch load.\nPerformance: remove the synchronous persistence mechanism from batch ElasticSearch DAO. Because the current enhanced persistent session mechanism, don\u0026#39;t require the data queryable immediately after the insert and update anymore. Due to this, we flush the metrics into Elasticsearch without using WriteRequest.RefreshPolicy.WAIT_UNTIL. This reduces the load of persistent works in OAP server and load of Elasticsearch CPU dramatically.\nMeanwhile, there is little chance you could see following warns in your logs.\n{ \u0026quot;timeMillis\u0026quot;: 1626247722647, \u0026quot;thread\u0026quot;: \u0026quot;I/O dispatcher 4\u0026quot;, \u0026quot;level\u0026quot;: \u0026quot;WARN\u0026quot;, \u0026quot;loggerName\u0026quot;: \u0026quot;org.apache.skywalking.oap.server.library.client.elasticsearch.ElasticSearchClient\u0026quot;, \u0026quot;message\u0026quot;: \u0026quot;Bulk [70] executed with failures:[failure in bulk execution:\\n[18875]: index [sw8_service_relation_client_side-20210714], type [_doc], id [20210714_b3BlcmF0aW9uLXJ1bGUtc2VydmVyQDExNDgx.1-bWFya2V0LXJlZmVycmFsLXNlcnZlckAxMDI1MQ==.1], message [[sw8_service_relation_client_side-20210714/D7qzncbeRq6qh2QF5MogTw][[sw8_service_relation_client_side-20210714][0]] ElasticsearchException[Elasticsearch exception [type=version_conflict_engine_exception, reason=[20210714_b3BlcmF0aW9uLXJ1bGUtc2VydmVyQDExNDgx.1-bWFya2V0LXJlZmVycmFsLXNlcnZlckAxMDI1MQ==.1]: version conflict, required seqNo [14012594], primary term [1]. current document has seqNo [14207928] and primary term [1]]]]]\u0026quot;, \u0026quot;endOfBatch\u0026quot;: false, \u0026quot;loggerFqcn\u0026quot;: \u0026quot;org.apache.logging.slf4j.Log4jLogger\u0026quot;, \u0026quot;threadId\u0026quot;: 44, \u0026quot;threadPriority\u0026quot;: 5, \u0026quot;timestamp\u0026quot;: \u0026quot;2021-07-14 15:28:42.647\u0026quot; } This would not affect the system much, just a possibility of inaccurate of metrics. If this wouldn\u0026rsquo;t show up in high frequency, you could ignore this directly.\nIn case you could see many logs like this. Then it is a signal, that the flush period of your ElasticSearch template can\u0026rsquo;t catch up your setting. Or you set the persistentPeriod less than the flush period.\n","title":"Elasticsearch exception `type=version_conflict_engine_exception` since 8.7.0","url":"/docs/main/v9.7.0/en/faq/es-version-conflict/"},{"content":"Elasticsearch monitoring SkyWalking leverages elasticsearch-exporter for collecting metrics data from Elasticsearch. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  The elasticsearch-exporter collect metrics data from Elasticsearch. OpenTelemetry Collector fetches metrics from elasticsearch-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup elasticsearch-exporter. Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  Elasticsearch Monitoring Elasticsearch monitoring provides multidimensional metrics monitoring of Elasticsearch clusters as Layer: ELASTICSEARCH Service in the OAP. In each cluster, the nodes are represented as Instance and indices are Endpoints.\nElasticsearch Cluster Supported Metrics    Monitoring Panel Metric Name Description Data Source     Cluster Health meter_elasticsearch_cluster_health_status Whether all primary and replica shards are allocated elasticsearch-exporter   Tripped Of Breakers meter_elasticsearch_cluster_breakers_tripped Tripped for breaker elasticsearch-exporter   Nodes meter_elasticsearch_cluster_nodes Number of nodes in the cluster. elasticsearch-exporter   Data Nodes meter_elasticsearch_cluster_data_nodes Number of data nodes in the cluster elasticsearch-exporter   Pending Tasks meter_elasticsearch_cluster_pending_tasks_total Cluster level changes which have not yet been executed elasticsearch-exporter   CPU Usage Avg. (%) meter_elasticsearch_cluster_cpu_usage_avg Cluster level percent CPU used by process elasticsearch-exporter   JVM Memory Used Avg. (%) meter_elasticsearch_cluster_jvm_memory_used_avg Cluster level percent JVM memory used elasticsearch-exporter   Open Files meter_elasticsearch_cluster_open_file_count Open file descriptors elasticsearch-exporter   Active Primary Shards meter_elasticsearch_cluster_primary_shards_total The number of primary shards in your cluster. This is an aggregate total across all indices elasticsearch-exporter   Active Shards meter_elasticsearch_cluster_shards_total Aggregate total of all shards across all indices, which includes replica shards elasticsearch-exporter   Initializing Shards meter_elasticsearch_cluster_initializing_shards_total Count of shards that are being freshly created elasticsearch-exporter   Delayed Unassigned Shards meter_elasticsearch_cluster_delayed_unassigned_shards_total Shards delayed to reduce reallocation overhead elasticsearch-exporter   Relocating Shards meter_elasticsearch_cluster_relocating_shards_total The number of shards that are currently moving from one node to another node elasticsearch-exporter   Unassigned Shards meter_elasticsearch_cluster_unassigned_shards_total The number of shards that exist in the cluster state, but cannot be found in the cluster itself elasticsearch-exporter    Elasticsearch Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Node Rules  meter_elasticsearch_node_rules Node roles elasticsearch-exporter   JVM Memory Used MB meter_elasticsearch_node_jvm_memory_used Node level JVM memory used size elasticsearch-exporter   CPU Percent % meter_elasticsearch_node_process_cpu_percent Node level percent CPU used by process elasticsearch-exporter   Documents  meter_elasticsearch_node_indices_docs Count of index documents on this node elasticsearch-exporter   Segments  meter_elasticsearch_node_segment_count Count of index segments on this node elasticsearch-exporter   Disk Free Space GB meter_elasticsearch_node_all_disk_free_space Available space on all block device elasticsearch-exporter   Open Files  meter_elasticsearch_node_open_file_count Open file descriptors elasticsearch-exporter   Process CPU Usage Percent % meter_elasticsearch_node_process_cpu_percent Percent CPU used by process elasticsearch-exporter   OS CPU usage percent % meter_elasticsearch_node_os_cpu_percent Percent CPU used by the OS elasticsearch-exporter   Load Average  meter_elasticsearch_node_os_load1 meter_elasticsearch_node_os_load5meter_elasticsearch_node_os_load15 Shortterm, Midterm, Longterm load average elasticsearch-exporter   JVM Memory Usage MB meter_elasticsearch_node_jvm_memory_nonheap_used\nmeter_elasticsearch_node_jvm_memory_heap_usedmeter_elasticsearch_node_jvm_memory_heap_max JVM memory currently usage by area elasticsearch-exporter   JVM Pool Peak Used MB meter_elasticsearch_node_jvm_memory_pool_peak_used JVM memory currently used by pool elasticsearch-exporter   GC Count  meter_elasticsearch_node_jvm_gc_count Count of JVM GC runs elasticsearch-exporter   GC Time ms/min meter_elasticsearch_node_jvm_gc_time GC run time elasticsearch-exporter   All Operations ReqRate  meter_elasticsearch_node_indices_*_req_rate All Operations ReqRate on node elasticsearch-exporter   Indexing Rate reqps meter_elasticsearch_node_indices_indexing_index_total_req_rate\nmeter_elasticsearch_node_indices_indexing_index_total_proc_rate Indexing rate on node elasticsearch-exporter   Searching Rate reqps meter_elasticsearch_node_indices_search_fetch_total_req_rate\nmeter_elasticsearch_node_indices_search_query_time_seconds_proc_rate Searching rate on node elasticsearch-exporter   Total Translog Operations  meter_elasticsearch_node_indices_translog_operations Total translog operations elasticsearch-exporter   Total Translog Size MB meter_elasticsearch_node_indices_translog_size Total translog size elasticsearch-exporter   Tripped For Breakers  meter_elasticsearch_node_breakers_tripped Tripped for breaker elasticsearch-exporter   Estimated Size Of Breaker MB meter_elasticsearch_node_breakers_estimated_size Estimated size of breaker elasticsearch-exporter   Documents Count KB/s meter_elasticsearch_node_indices_docs Count of documents on this node elasticsearch-exporter   Merged Documents Count count/s meter_elasticsearch_node_indices_merges_docs_total Cumulative docs merged elasticsearch-exporter   Deleted Documents Count  meter_elasticsearch_node_indices_docs_deleted_total Count of deleted documents on this node elasticsearch-exporter   Documents Index Rate calls/s meter_elasticsearch_node_indices_indexing_index_total_req_rate Total index calls per second elasticsearch-exporter   Merged Documents Rate MB / s meter_elasticsearch_node_indices_merges_total_size_bytes_total Total merge size per second elasticsearch-exporter   Documents Deleted Rate docs/s meter_elasticsearch_node_indices_docs_deleted Count of deleted documents per second on this node elasticsearch-exporter   Count Of Index Segments  meter_elasticsearch_node_segment_count Count of index segments on this node elasticsearch-exporter   Current Memory Size Of Segments MB meter_elasticsearch_node_segment_memory Current memory size of segments elasticsearch-exporter   Network bytes/sec meter_elasticsearch_node_network_send_bytesmeter_elasticsearch_node_network_receive_bytes Total number of bytes sent and receive elasticsearch-exporter   Disk Usage Percent % meter_elasticsearch_node_disk_usage_percent Used space on block device elasticsearch-exporter   Disk Usage GB meter_elasticsearch_node_disk_usage Used space size of block device elasticsearch-exporter   Disk Read KBs meter_elasticsearch_node_disk_io_read_bytes Total kilobytes read from disk elasticsearch-exporter   Disk Write KBs meter_elasticsearch_node_disk_io_write_bytes Total kilobytes write from disk elasticsearch-exporter    Elasticsearch Index Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Documents Primary  meter_elasticsearch_index_indices_docs_primary Count of documents with only primary shards on all nodes elasticsearch-exporter   Deleted Documents Primary  meter_elasticsearch_index_indices_deleted_docs_primary Count of deleted documents with only primary shards elasticsearch-exporter   Data Primary GB meter_elasticsearch_index_indices_store_size_bytes_primary Current total size of stored index data with only primary shards on all nodes elasticsearch-exporter   Data GB meter_elasticsearch_index_indices_store_size_bytes_total Current total size of stored index data with all shards on all nodes elasticsearch-exporter   Segments Primary  meter_elasticsearch_index_indices_segment_count_primary Current number of segments with only primary shards on all nodes elasticsearch-exporter   Segments Memory Primary MB meter_elasticsearch_index_indices_segment_memory_bytes_primary Current size of segments with only primary shards on all nodes elasticsearch-exporter   Segments  meter_elasticsearch_index_indices_segment_count_total Current number of segments with all shards on all nodes elasticsearch-exporter   Segments Memory MB meter_elasticsearch_index_indices_segment_memory_bytes_total Current size of segments with all shards on all nodes elasticsearch-exporter   Indexing Rate  meter_elasticsearch_index_stats_indexing_index_total_req_ratemeter_elasticsearch_index_stats_indexing_index_total_proc_rate Indexing rate on index elasticsearch-exporter   Searching Rate  meter_elasticsearch_index_stats_search_query_total_req_ratemeter_elasticsearch_index_stats_search_query_total_proc_rate Searching rate on index elasticsearch-exporter   All Operations ReqRate  meter_elasticsearch_index_stats_*_req_rate All Operations ReqRate on index elasticsearch-exporter   All Operations Runtime  meter_elasticsearch_index_stats_*_time_seconds_total All Operations Runtime/s on index elasticsearch-exporter   Avg. Search Time Execute / Request s meter_elasticsearch_index_search_fetch_avg_timemeter_elasticsearch_index_search_query_avg_timemeter_elasticsearch_index_search_scroll_avg_timemeter_elasticsearch_index_search_suggest_avg_time Search Operation Avg. time on index elasticsearch-exporter   Search Operations Rate req/s meter_elasticsearch_index_stats_search_query_total_req_ratemeter_elasticsearch_index_stats_search_fetch_total_req_ratemeter_elasticsearch_index_stats_search_scroll_total_req_ratemeter_elasticsearch_index_stats_search_suggest_total_req_rate Search Operations ReqRate on index elasticsearch-exporter   Shards Documents  meter_elasticsearch_index_indices_shards_docs Count of documents per shards on index elasticsearch-exporter   Documents (Primary Shards)  meter_elasticsearch_index_indices_docs_primary Count of documents with only primary shards on index elasticsearch-exporter   Documents Created Per Min (Primary Shards)  meter_elasticsearch_index_indices_docs_primary_rate Documents rate with only primary shards on index elasticsearch-exporter   Total Size Of Index (Primary Shards) MB meter_elasticsearch_index_indices_store_size_bytes_primary Current total size of stored index data in bytes with only primary shards on all nodes elasticsearch-exporter   Documents (All Shards)  meter_elasticsearch_index_indices_docs_total Count of documents with all shards on index elasticsearch-exporter   Documents Created Per Min (All Shards)  meter_elasticsearch_index_indices_docs_total_rate Documents rate with only all shards on index elasticsearch-exporter   Total Size Of Index (All Shards) MB meter_elasticsearch_index_indices_store_size_bytes_total Current total size of stored index data in bytes with all shards on all nodes elasticsearch-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/elasticsearch/elasticsearch-cluster.yaml, /config/otel-rules/elasticsearch/elasticsearch-node.yaml, /config/otel-rules/elasticsearch/elasticsearch-index.yaml. The Elasticsearch dashboard panel configurations are found in /config/ui-initialized-templates/elasticsearch.\n","title":"Elasticsearch monitoring","url":"/docs/main/latest/en/setup/backend/backend-elasticsearch-monitoring/"},{"content":"Elasticsearch monitoring SkyWalking leverages elasticsearch-exporter for collecting metrics data from Elasticsearch. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  The elasticsearch-exporter collect metrics data from Elasticsearch. OpenTelemetry Collector fetches metrics from elasticsearch-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup elasticsearch-exporter. Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  Elasticsearch Monitoring Elasticsearch monitoring provides multidimensional metrics monitoring of Elasticsearch clusters as Layer: ELASTICSEARCH Service in the OAP. In each cluster, the nodes are represented as Instance and indices are Endpoints.\nElasticsearch Cluster Supported Metrics    Monitoring Panel Metric Name Description Data Source     Cluster Health meter_elasticsearch_cluster_health_status Whether all primary and replica shards are allocated elasticsearch-exporter   Tripped Of Breakers meter_elasticsearch_cluster_breakers_tripped Tripped for breaker elasticsearch-exporter   Nodes meter_elasticsearch_cluster_nodes Number of nodes in the cluster. elasticsearch-exporter   Data Nodes meter_elasticsearch_cluster_data_nodes Number of data nodes in the cluster elasticsearch-exporter   Pending Tasks meter_elasticsearch_cluster_pending_tasks_total Cluster level changes which have not yet been executed elasticsearch-exporter   CPU Usage Avg. (%) meter_elasticsearch_cluster_cpu_usage_avg Cluster level percent CPU used by process elasticsearch-exporter   JVM Memory Used Avg. (%) meter_elasticsearch_cluster_jvm_memory_used_avg Cluster level percent JVM memory used elasticsearch-exporter   Open Files meter_elasticsearch_cluster_open_file_count Open file descriptors elasticsearch-exporter   Active Primary Shards meter_elasticsearch_cluster_primary_shards_total The number of primary shards in your cluster. This is an aggregate total across all indices elasticsearch-exporter   Active Shards meter_elasticsearch_cluster_shards_total Aggregate total of all shards across all indices, which includes replica shards elasticsearch-exporter   Initializing Shards meter_elasticsearch_cluster_initializing_shards_total Count of shards that are being freshly created elasticsearch-exporter   Delayed Unassigned Shards meter_elasticsearch_cluster_delayed_unassigned_shards_total Shards delayed to reduce reallocation overhead elasticsearch-exporter   Relocating Shards meter_elasticsearch_cluster_relocating_shards_total The number of shards that are currently moving from one node to another node elasticsearch-exporter   Unassigned Shards meter_elasticsearch_cluster_unassigned_shards_total The number of shards that exist in the cluster state, but cannot be found in the cluster itself elasticsearch-exporter    Elasticsearch Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Node Rules  meter_elasticsearch_node_rules Node roles elasticsearch-exporter   JVM Memory Used MB meter_elasticsearch_node_jvm_memory_used Node level JVM memory used size elasticsearch-exporter   CPU Percent % meter_elasticsearch_node_process_cpu_percent Node level percent CPU used by process elasticsearch-exporter   Documents  meter_elasticsearch_node_indices_docs Count of index documents on this node elasticsearch-exporter   Segments  meter_elasticsearch_node_segment_count Count of index segments on this node elasticsearch-exporter   Disk Free Space GB meter_elasticsearch_node_all_disk_free_space Available space on all block device elasticsearch-exporter   Open Files  meter_elasticsearch_node_open_file_count Open file descriptors elasticsearch-exporter   Process CPU Usage Percent % meter_elasticsearch_node_process_cpu_percent Percent CPU used by process elasticsearch-exporter   OS CPU usage percent % meter_elasticsearch_node_os_cpu_percent Percent CPU used by the OS elasticsearch-exporter   Load Average  meter_elasticsearch_node_os_load1 meter_elasticsearch_node_os_load5meter_elasticsearch_node_os_load15 Shortterm, Midterm, Longterm load average elasticsearch-exporter   JVM Memory Usage MB meter_elasticsearch_node_jvm_memory_nonheap_used\nmeter_elasticsearch_node_jvm_memory_heap_usedmeter_elasticsearch_node_jvm_memory_heap_max JVM memory currently usage by area elasticsearch-exporter   JVM Pool Peak Used MB meter_elasticsearch_node_jvm_memory_pool_peak_used JVM memory currently used by pool elasticsearch-exporter   GC Count  meter_elasticsearch_node_jvm_gc_count Count of JVM GC runs elasticsearch-exporter   GC Time ms/min meter_elasticsearch_node_jvm_gc_time GC run time elasticsearch-exporter   All Operations ReqRate  meter_elasticsearch_node_indices_*_req_rate All Operations ReqRate on node elasticsearch-exporter   Indexing Rate reqps meter_elasticsearch_node_indices_indexing_index_total_req_rate\nmeter_elasticsearch_node_indices_indexing_index_total_proc_rate Indexing rate on node elasticsearch-exporter   Searching Rate reqps meter_elasticsearch_node_indices_search_fetch_total_req_rate\nmeter_elasticsearch_node_indices_search_query_time_seconds_proc_rate Searching rate on node elasticsearch-exporter   Total Translog Operations  meter_elasticsearch_node_indices_translog_operations Total translog operations elasticsearch-exporter   Total Translog Size MB meter_elasticsearch_node_indices_translog_size Total translog size elasticsearch-exporter   Tripped For Breakers  meter_elasticsearch_node_breakers_tripped Tripped for breaker elasticsearch-exporter   Estimated Size Of Breaker MB meter_elasticsearch_node_breakers_estimated_size Estimated size of breaker elasticsearch-exporter   Documents Count KB/s meter_elasticsearch_node_indices_docs Count of documents on this node elasticsearch-exporter   Merged Documents Count count/s meter_elasticsearch_node_indices_merges_docs_total Cumulative docs merged elasticsearch-exporter   Deleted Documents Count  meter_elasticsearch_node_indices_docs_deleted_total Count of deleted documents on this node elasticsearch-exporter   Documents Index Rate calls/s meter_elasticsearch_node_indices_indexing_index_total_req_rate Total index calls per second elasticsearch-exporter   Merged Documents Rate MB / s meter_elasticsearch_node_indices_merges_total_size_bytes_total Total merge size per second elasticsearch-exporter   Documents Deleted Rate docs/s meter_elasticsearch_node_indices_docs_deleted Count of deleted documents per second on this node elasticsearch-exporter   Count Of Index Segments  meter_elasticsearch_node_segment_count Count of index segments on this node elasticsearch-exporter   Current Memory Size Of Segments MB meter_elasticsearch_node_segment_memory Current memory size of segments elasticsearch-exporter   Network bytes/sec meter_elasticsearch_node_network_send_bytesmeter_elasticsearch_node_network_receive_bytes Total number of bytes sent and receive elasticsearch-exporter   Disk Usage Percent % meter_elasticsearch_node_disk_usage_percent Used space on block device elasticsearch-exporter   Disk Usage GB meter_elasticsearch_node_disk_usage Used space size of block device elasticsearch-exporter   Disk Read KBs meter_elasticsearch_node_disk_io_read_bytes Total kilobytes read from disk elasticsearch-exporter   Disk Write KBs meter_elasticsearch_node_disk_io_write_bytes Total kilobytes write from disk elasticsearch-exporter    Elasticsearch Index Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Documents Primary  meter_elasticsearch_index_indices_docs_primary Count of documents with only primary shards on all nodes elasticsearch-exporter   Deleted Documents Primary  meter_elasticsearch_index_indices_deleted_docs_primary Count of deleted documents with only primary shards elasticsearch-exporter   Data Primary GB meter_elasticsearch_index_indices_store_size_bytes_primary Current total size of stored index data with only primary shards on all nodes elasticsearch-exporter   Data GB meter_elasticsearch_index_indices_store_size_bytes_total Current total size of stored index data with all shards on all nodes elasticsearch-exporter   Segments Primary  meter_elasticsearch_index_indices_segment_count_primary Current number of segments with only primary shards on all nodes elasticsearch-exporter   Segments Memory Primary MB meter_elasticsearch_index_indices_segment_memory_bytes_primary Current size of segments with only primary shards on all nodes elasticsearch-exporter   Segments  meter_elasticsearch_index_indices_segment_count_total Current number of segments with all shards on all nodes elasticsearch-exporter   Segments Memory MB meter_elasticsearch_index_indices_segment_memory_bytes_total Current size of segments with all shards on all nodes elasticsearch-exporter   Indexing Rate  meter_elasticsearch_index_stats_indexing_index_total_req_ratemeter_elasticsearch_index_stats_indexing_index_total_proc_rate Indexing rate on index elasticsearch-exporter   Searching Rate  meter_elasticsearch_index_stats_search_query_total_req_ratemeter_elasticsearch_index_stats_search_query_total_proc_rate Searching rate on index elasticsearch-exporter   All Operations ReqRate  meter_elasticsearch_index_stats_*_req_rate All Operations ReqRate on index elasticsearch-exporter   All Operations Runtime  meter_elasticsearch_index_stats_*_time_seconds_total All Operations Runtime/s on index elasticsearch-exporter   Avg. Search Time Execute / Request s meter_elasticsearch_index_search_fetch_avg_timemeter_elasticsearch_index_search_query_avg_timemeter_elasticsearch_index_search_scroll_avg_timemeter_elasticsearch_index_search_suggest_avg_time Search Operation Avg. time on index elasticsearch-exporter   Search Operations Rate req/s meter_elasticsearch_index_stats_search_query_total_req_ratemeter_elasticsearch_index_stats_search_fetch_total_req_ratemeter_elasticsearch_index_stats_search_scroll_total_req_ratemeter_elasticsearch_index_stats_search_suggest_total_req_rate Search Operations ReqRate on index elasticsearch-exporter   Shards Documents  meter_elasticsearch_index_indices_shards_docs Count of documents per shards on index elasticsearch-exporter   Documents (Primary Shards)  meter_elasticsearch_index_indices_docs_primary Count of documents with only primary shards on index elasticsearch-exporter   Documents Created Per Min (Primary Shards)  meter_elasticsearch_index_indices_docs_primary_rate Documents rate with only primary shards on index elasticsearch-exporter   Total Size Of Index (Primary Shards) MB meter_elasticsearch_index_indices_store_size_bytes_primary Current total size of stored index data in bytes with only primary shards on all nodes elasticsearch-exporter   Documents (All Shards)  meter_elasticsearch_index_indices_docs_total Count of documents with all shards on index elasticsearch-exporter   Documents Created Per Min (All Shards)  meter_elasticsearch_index_indices_docs_total_rate Documents rate with only all shards on index elasticsearch-exporter   Total Size Of Index (All Shards) MB meter_elasticsearch_index_indices_store_size_bytes_total Current total size of stored index data in bytes with all shards on all nodes elasticsearch-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/elasticsearch/elasticsearch-cluster.yaml, /config/otel-rules/elasticsearch/elasticsearch-node.yaml, /config/otel-rules/elasticsearch/elasticsearch-index.yaml. The Elasticsearch dashboard panel configurations are found in /config/ui-initialized-templates/elasticsearch.\n","title":"Elasticsearch monitoring","url":"/docs/main/next/en/setup/backend/backend-elasticsearch-monitoring/"},{"content":"Elasticsearch monitoring SkyWalking leverages elasticsearch-exporter for collecting metrics data from Elasticsearch. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  The elasticsearch-exporter collect metrics data from Elasticsearch. OpenTelemetry Collector fetches metrics from elasticsearch-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup elasticsearch-exporter. Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  Elasticsearch Monitoring Elasticsearch monitoring provides multidimensional metrics monitoring of Elasticsearch clusters as Layer: ELASTICSEARCH Service in the OAP. In each cluster, the nodes are represented as Instance and indices are Endpoints.\nElasticsearch Cluster Supported Metrics    Monitoring Panel Metric Name Description Data Source     Cluster Health meter_elasticsearch_cluster_health_status Whether all primary and replica shards are allocated elasticsearch-exporter   Tripped Of Breakers meter_elasticsearch_cluster_breakers_tripped Tripped for breaker elasticsearch-exporter   Nodes meter_elasticsearch_cluster_nodes Number of nodes in the cluster. elasticsearch-exporter   Data Nodes meter_elasticsearch_cluster_data_nodes Number of data nodes in the cluster elasticsearch-exporter   Pending Tasks meter_elasticsearch_cluster_pending_tasks_total Cluster level changes which have not yet been executed elasticsearch-exporter   CPU Usage Avg. (%) meter_elasticsearch_cluster_cpu_usage_avg Cluster level percent CPU used by process elasticsearch-exporter   JVM Memory Used Avg. (%) meter_elasticsearch_cluster_jvm_memory_used_avg Cluster level percent JVM memory used elasticsearch-exporter   Open Files meter_elasticsearch_cluster_open_file_count Open file descriptors elasticsearch-exporter   Active Primary Shards meter_elasticsearch_cluster_primary_shards_total The number of primary shards in your cluster. This is an aggregate total across all indices elasticsearch-exporter   Active Shards meter_elasticsearch_cluster_shards_total Aggregate total of all shards across all indices, which includes replica shards elasticsearch-exporter   Initializing Shards meter_elasticsearch_cluster_initializing_shards_total Count of shards that are being freshly created elasticsearch-exporter   Delayed Unassigned Shards meter_elasticsearch_cluster_delayed_unassigned_shards_total Shards delayed to reduce reallocation overhead elasticsearch-exporter   Relocating Shards meter_elasticsearch_cluster_relocating_shards_total The number of shards that are currently moving from one node to another node elasticsearch-exporter   Unassigned Shards meter_elasticsearch_cluster_unassigned_shards_total The number of shards that exist in the cluster state, but cannot be found in the cluster itself elasticsearch-exporter    Elasticsearch Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Node Rules  meter_elasticsearch_node_rules Node roles elasticsearch-exporter   JVM Memory Used MB meter_elasticsearch_node_jvm_memory_used Node level JVM memory used size elasticsearch-exporter   CPU Percent % meter_elasticsearch_node_process_cpu_percent Node level percent CPU used by process elasticsearch-exporter   Documents  meter_elasticsearch_node_indices_docs Count of index documents on this node elasticsearch-exporter   Segments  meter_elasticsearch_node_segment_count Count of index segments on this node elasticsearch-exporter   Disk Free Space GB meter_elasticsearch_node_all_disk_free_space Available space on all block device elasticsearch-exporter   Open Files  meter_elasticsearch_node_open_file_count Open file descriptors elasticsearch-exporter   Process CPU Usage Percent % meter_elasticsearch_node_process_cpu_percent Percent CPU used by process elasticsearch-exporter   OS CPU usage percent % meter_elasticsearch_node_os_cpu_percent Percent CPU used by the OS elasticsearch-exporter   Load Average  meter_elasticsearch_node_os_load1 meter_elasticsearch_node_os_load5meter_elasticsearch_node_os_load15 Shortterm, Midterm, Longterm load average elasticsearch-exporter   JVM Memory Usage MB meter_elasticsearch_node_jvm_memory_nonheap_used\nmeter_elasticsearch_node_jvm_memory_heap_usedmeter_elasticsearch_node_jvm_memory_heap_max JVM memory currently usage by area elasticsearch-exporter   JVM Pool Peak Used MB meter_elasticsearch_node_jvm_memory_pool_peak_used JVM memory currently used by pool elasticsearch-exporter   GC Count  meter_elasticsearch_node_jvm_gc_count Count of JVM GC runs elasticsearch-exporter   GC Time ms/min meter_elasticsearch_node_jvm_gc_time GC run time elasticsearch-exporter   All Operations ReqRate  meter_elasticsearch_node_indices_*_req_rate All Operations ReqRate on node elasticsearch-exporter   Indexing Rate reqps meter_elasticsearch_node_indices_indexing_index_total_req_rate\nmeter_elasticsearch_node_indices_indexing_index_total_proc_rate Indexing rate on node elasticsearch-exporter   Searching Rate reqps meter_elasticsearch_node_indices_search_fetch_total_req_rate\nmeter_elasticsearch_node_indices_search_query_time_seconds_proc_rate Searching rate on node elasticsearch-exporter   Total Translog Operations  meter_elasticsearch_node_indices_translog_operations Total translog operations elasticsearch-exporter   Total Translog Size MB meter_elasticsearch_node_indices_translog_size Total translog size elasticsearch-exporter   Tripped For Breakers  meter_elasticsearch_node_breakers_tripped Tripped for breaker elasticsearch-exporter   Estimated Size Of Breaker MB meter_elasticsearch_node_breakers_estimated_size Estimated size of breaker elasticsearch-exporter   Documents Count KB/s meter_elasticsearch_node_indices_docs Count of documents on this node elasticsearch-exporter   Merged Documents Count count/s meter_elasticsearch_node_indices_merges_docs_total Cumulative docs merged elasticsearch-exporter   Deleted Documents Count  meter_elasticsearch_node_indices_docs_deleted_total Count of deleted documents on this node elasticsearch-exporter   Documents Index Rate calls/s meter_elasticsearch_node_indices_indexing_index_total_req_rate Total index calls per second elasticsearch-exporter   Merged Documents Rate MB / s meter_elasticsearch_node_indices_merges_total_size_bytes_total Total merge size per second elasticsearch-exporter   Documents Deleted Rate docs/s meter_elasticsearch_node_indices_docs_deleted Count of deleted documents per second on this node elasticsearch-exporter   Count Of Index Segments  meter_elasticsearch_node_segment_count Count of index segments on this node elasticsearch-exporter   Current Memory Size Of Segments MB meter_elasticsearch_node_segment_memory Current memory size of segments elasticsearch-exporter   Network bytes/sec meter_elasticsearch_node_network_send_bytesmeter_elasticsearch_node_network_receive_bytes Total number of bytes sent and receive elasticsearch-exporter   Disk Usage Percent % meter_elasticsearch_node_disk_usage_percent Used space on block device elasticsearch-exporter   Disk Usage GB meter_elasticsearch_node_disk_usage Used space size of block device elasticsearch-exporter   Disk Read KBs meter_elasticsearch_node_disk_io_read_bytes Total kilobytes read from disk elasticsearch-exporter   Disk Write KBs meter_elasticsearch_node_disk_io_write_bytes Total kilobytes write from disk elasticsearch-exporter    Elasticsearch Index Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Documents Primary  meter_elasticsearch_index_indices_docs_primary Count of documents with only primary shards on all nodes elasticsearch-exporter   Deleted Documents Primary  meter_elasticsearch_index_indices_deleted_docs_primary Count of deleted documents with only primary shards elasticsearch-exporter   Data Primary GB meter_elasticsearch_index_indices_store_size_bytes_primary Current total size of stored index data with only primary shards on all nodes elasticsearch-exporter   Data GB meter_elasticsearch_index_indices_store_size_bytes_total Current total size of stored index data with all shards on all nodes elasticsearch-exporter   Segments Primary  meter_elasticsearch_index_indices_segment_count_primary Current number of segments with only primary shards on all nodes elasticsearch-exporter   Segments Memory Primary MB meter_elasticsearch_index_indices_segment_memory_bytes_primary Current size of segments with only primary shards on all nodes elasticsearch-exporter   Segments  meter_elasticsearch_index_indices_segment_count_total Current number of segments with all shards on all nodes elasticsearch-exporter   Segments Memory MB meter_elasticsearch_index_indices_segment_memory_bytes_total Current size of segments with all shards on all nodes elasticsearch-exporter   Indexing Rate  meter_elasticsearch_index_stats_indexing_index_total_req_ratemeter_elasticsearch_index_stats_indexing_index_total_proc_rate Indexing rate on index elasticsearch-exporter   Searching Rate  meter_elasticsearch_index_stats_search_query_total_req_ratemeter_elasticsearch_index_stats_search_query_total_proc_rate Searching rate on index elasticsearch-exporter   All Operations ReqRate  meter_elasticsearch_index_stats_*_req_rate All Operations ReqRate on index elasticsearch-exporter   All Operations Runtime  meter_elasticsearch_index_stats_*_time_seconds_total All Operations Runtime/s on index elasticsearch-exporter   Avg. Search Time Execute / Request s meter_elasticsearch_index_search_fetch_avg_timemeter_elasticsearch_index_search_query_avg_timemeter_elasticsearch_index_search_scroll_avg_timemeter_elasticsearch_index_search_suggest_avg_time Search Operation Avg. time on index elasticsearch-exporter   Search Operations Rate req/s meter_elasticsearch_index_stats_search_query_total_req_ratemeter_elasticsearch_index_stats_search_fetch_total_req_ratemeter_elasticsearch_index_stats_search_scroll_total_req_ratemeter_elasticsearch_index_stats_search_suggest_total_req_rate Search Operations ReqRate on index elasticsearch-exporter   Shards Documents  meter_elasticsearch_index_indices_shards_docs Count of documents per shards on index elasticsearch-exporter   Documents (Primary Shards)  meter_elasticsearch_index_indices_docs_primary Count of documents with only primary shards on index elasticsearch-exporter   Documents Created Per Min (Primary Shards)  meter_elasticsearch_index_indices_docs_primary_rate Documents rate with only primary shards on index elasticsearch-exporter   Total Size Of Index (Primary Shards) MB meter_elasticsearch_index_indices_store_size_bytes_primary Current total size of stored index data in bytes with only primary shards on all nodes elasticsearch-exporter   Documents (All Shards)  meter_elasticsearch_index_indices_docs_total Count of documents with all shards on index elasticsearch-exporter   Documents Created Per Min (All Shards)  meter_elasticsearch_index_indices_docs_total_rate Documents rate with only all shards on index elasticsearch-exporter   Total Size Of Index (All Shards) MB meter_elasticsearch_index_indices_store_size_bytes_total Current total size of stored index data in bytes with all shards on all nodes elasticsearch-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/elasticsearch/elasticsearch-cluster.yaml, /config/otel-rules/elasticsearch/elasticsearch-node.yaml, /config/otel-rules/elasticsearch/elasticsearch-index.yaml. The Elasticsearch dashboard panel configurations are found in /config/ui-initialized-templates/elasticsearch.\n","title":"Elasticsearch monitoring","url":"/docs/main/v9.5.0/en/setup/backend/backend-elasticsearch-monitoring/"},{"content":"Elasticsearch monitoring SkyWalking leverages elasticsearch-exporter for collecting metrics data from Elasticsearch. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  The elasticsearch-exporter collect metrics data from Elasticsearch. OpenTelemetry Collector fetches metrics from elasticsearch-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup elasticsearch-exporter. Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  Elasticsearch Monitoring Elasticsearch monitoring provides multidimensional metrics monitoring of Elasticsearch clusters as Layer: ELASTICSEARCH Service in the OAP. In each cluster, the nodes are represented as Instance and indices are Endpoints.\nElasticsearch Cluster Supported Metrics    Monitoring Panel Metric Name Description Data Source     Cluster Health meter_elasticsearch_cluster_health_status Whether all primary and replica shards are allocated elasticsearch-exporter   Tripped Of Breakers meter_elasticsearch_cluster_breakers_tripped Tripped for breaker elasticsearch-exporter   Nodes meter_elasticsearch_cluster_nodes Number of nodes in the cluster. elasticsearch-exporter   Data Nodes meter_elasticsearch_cluster_data_nodes Number of data nodes in the cluster elasticsearch-exporter   Pending Tasks meter_elasticsearch_cluster_pending_tasks_total Cluster level changes which have not yet been executed elasticsearch-exporter   CPU Usage Avg. (%) meter_elasticsearch_cluster_cpu_usage_avg Cluster level percent CPU used by process elasticsearch-exporter   JVM Memory Used Avg. (%) meter_elasticsearch_cluster_jvm_memory_used_avg Cluster level percent JVM memory used elasticsearch-exporter   Open Files meter_elasticsearch_cluster_open_file_count Open file descriptors elasticsearch-exporter   Active Primary Shards meter_elasticsearch_cluster_primary_shards_total The number of primary shards in your cluster. This is an aggregate total across all indices elasticsearch-exporter   Active Shards meter_elasticsearch_cluster_shards_total Aggregate total of all shards across all indices, which includes replica shards elasticsearch-exporter   Initializing Shards meter_elasticsearch_cluster_initializing_shards_total Count of shards that are being freshly created elasticsearch-exporter   Delayed Unassigned Shards meter_elasticsearch_cluster_delayed_unassigned_shards_total Shards delayed to reduce reallocation overhead elasticsearch-exporter   Relocating Shards meter_elasticsearch_cluster_relocating_shards_total The number of shards that are currently moving from one node to another node elasticsearch-exporter   Unassigned Shards meter_elasticsearch_cluster_unassigned_shards_total The number of shards that exist in the cluster state, but cannot be found in the cluster itself elasticsearch-exporter    Elasticsearch Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Node Rules  meter_elasticsearch_node_rules Node roles elasticsearch-exporter   JVM Memory Used MB meter_elasticsearch_node_jvm_memory_used Node level JVM memory used size elasticsearch-exporter   CPU Percent % meter_elasticsearch_node_process_cpu_percent Node level percent CPU used by process elasticsearch-exporter   Documents  meter_elasticsearch_node_indices_docs Count of index documents on this node elasticsearch-exporter   Segments  meter_elasticsearch_node_segment_count Count of index segments on this node elasticsearch-exporter   Disk Free Space GB meter_elasticsearch_node_all_disk_free_space Available space on all block device elasticsearch-exporter   Open Files  meter_elasticsearch_node_open_file_count Open file descriptors elasticsearch-exporter   Process CPU Usage Percent % meter_elasticsearch_node_process_cpu_percent Percent CPU used by process elasticsearch-exporter   OS CPU usage percent % meter_elasticsearch_node_os_cpu_percent Percent CPU used by the OS elasticsearch-exporter   Load Average  meter_elasticsearch_node_os_load1 meter_elasticsearch_node_os_load5meter_elasticsearch_node_os_load15 Shortterm, Midterm, Longterm load average elasticsearch-exporter   JVM Memory Usage MB meter_elasticsearch_node_jvm_memory_nonheap_used\nmeter_elasticsearch_node_jvm_memory_heap_usedmeter_elasticsearch_node_jvm_memory_heap_max JVM memory currently usage by area elasticsearch-exporter   JVM Pool Peak Used MB meter_elasticsearch_node_jvm_memory_pool_peak_used JVM memory currently used by pool elasticsearch-exporter   GC Count  meter_elasticsearch_node_jvm_gc_count Count of JVM GC runs elasticsearch-exporter   GC Time ms/min meter_elasticsearch_node_jvm_gc_time GC run time elasticsearch-exporter   All Operations ReqRate  meter_elasticsearch_node_indices_*_req_rate All Operations ReqRate on node elasticsearch-exporter   Indexing Rate reqps meter_elasticsearch_node_indices_indexing_index_total_req_rate\nmeter_elasticsearch_node_indices_indexing_index_total_proc_rate Indexing rate on node elasticsearch-exporter   Searching Rate reqps meter_elasticsearch_node_indices_search_fetch_total_req_rate\nmeter_elasticsearch_node_indices_search_query_time_seconds_proc_rate Searching rate on node elasticsearch-exporter   Total Translog Operations  meter_elasticsearch_node_indices_translog_operations Total translog operations elasticsearch-exporter   Total Translog Size MB meter_elasticsearch_node_indices_translog_size Total translog size elasticsearch-exporter   Tripped For Breakers  meter_elasticsearch_node_breakers_tripped Tripped for breaker elasticsearch-exporter   Estimated Size Of Breaker MB meter_elasticsearch_node_breakers_estimated_size Estimated size of breaker elasticsearch-exporter   Documents Count KB/s meter_elasticsearch_node_indices_docs Count of documents on this node elasticsearch-exporter   Merged Documents Count count/s meter_elasticsearch_node_indices_merges_docs_total Cumulative docs merged elasticsearch-exporter   Deleted Documents Count  meter_elasticsearch_node_indices_docs_deleted_total Count of deleted documents on this node elasticsearch-exporter   Documents Index Rate calls/s meter_elasticsearch_node_indices_indexing_index_total_req_rate Total index calls per second elasticsearch-exporter   Merged Documents Rate MB / s meter_elasticsearch_node_indices_merges_total_size_bytes_total Total merge size per second elasticsearch-exporter   Documents Deleted Rate docs/s meter_elasticsearch_node_indices_docs_deleted Count of deleted documents per second on this node elasticsearch-exporter   Count Of Index Segments  meter_elasticsearch_node_segment_count Count of index segments on this node elasticsearch-exporter   Current Memory Size Of Segments MB meter_elasticsearch_node_segment_memory Current memory size of segments elasticsearch-exporter   Network bytes/sec meter_elasticsearch_node_network_send_bytesmeter_elasticsearch_node_network_receive_bytes Total number of bytes sent and receive elasticsearch-exporter   Disk Usage Percent % meter_elasticsearch_node_disk_usage_percent Used space on block device elasticsearch-exporter   Disk Usage GB meter_elasticsearch_node_disk_usage Used space size of block device elasticsearch-exporter   Disk Read KBs meter_elasticsearch_node_disk_io_read_bytes Total kilobytes read from disk elasticsearch-exporter   Disk Write KBs meter_elasticsearch_node_disk_io_write_bytes Total kilobytes write from disk elasticsearch-exporter    Elasticsearch Index Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Documents Primary  meter_elasticsearch_index_indices_docs_primary Count of documents with only primary shards on all nodes elasticsearch-exporter   Deleted Documents Primary  meter_elasticsearch_index_indices_deleted_docs_primary Count of deleted documents with only primary shards elasticsearch-exporter   Data Primary GB meter_elasticsearch_index_indices_store_size_bytes_primary Current total size of stored index data with only primary shards on all nodes elasticsearch-exporter   Data GB meter_elasticsearch_index_indices_store_size_bytes_total Current total size of stored index data with all shards on all nodes elasticsearch-exporter   Segments Primary  meter_elasticsearch_index_indices_segment_count_primary Current number of segments with only primary shards on all nodes elasticsearch-exporter   Segments Memory Primary MB meter_elasticsearch_index_indices_segment_memory_bytes_primary Current size of segments with only primary shards on all nodes elasticsearch-exporter   Segments  meter_elasticsearch_index_indices_segment_count_total Current number of segments with all shards on all nodes elasticsearch-exporter   Segments Memory MB meter_elasticsearch_index_indices_segment_memory_bytes_total Current size of segments with all shards on all nodes elasticsearch-exporter   Indexing Rate  meter_elasticsearch_index_stats_indexing_index_total_req_ratemeter_elasticsearch_index_stats_indexing_index_total_proc_rate Indexing rate on index elasticsearch-exporter   Searching Rate  meter_elasticsearch_index_stats_search_query_total_req_ratemeter_elasticsearch_index_stats_search_query_total_proc_rate Searching rate on index elasticsearch-exporter   All Operations ReqRate  meter_elasticsearch_index_stats_*_req_rate All Operations ReqRate on index elasticsearch-exporter   All Operations Runtime  meter_elasticsearch_index_stats_*_time_seconds_total All Operations Runtime/s on index elasticsearch-exporter   Avg. Search Time Execute / Request s meter_elasticsearch_index_search_fetch_avg_timemeter_elasticsearch_index_search_query_avg_timemeter_elasticsearch_index_search_scroll_avg_timemeter_elasticsearch_index_search_suggest_avg_time Search Operation Avg. time on index elasticsearch-exporter   Search Operations Rate req/s meter_elasticsearch_index_stats_search_query_total_req_ratemeter_elasticsearch_index_stats_search_fetch_total_req_ratemeter_elasticsearch_index_stats_search_scroll_total_req_ratemeter_elasticsearch_index_stats_search_suggest_total_req_rate Search Operations ReqRate on index elasticsearch-exporter   Shards Documents  meter_elasticsearch_index_indices_shards_docs Count of documents per shards on index elasticsearch-exporter   Documents (Primary Shards)  meter_elasticsearch_index_indices_docs_primary Count of documents with only primary shards on index elasticsearch-exporter   Documents Created Per Min (Primary Shards)  meter_elasticsearch_index_indices_docs_primary_rate Documents rate with only primary shards on index elasticsearch-exporter   Total Size Of Index (Primary Shards) MB meter_elasticsearch_index_indices_store_size_bytes_primary Current total size of stored index data in bytes with only primary shards on all nodes elasticsearch-exporter   Documents (All Shards)  meter_elasticsearch_index_indices_docs_total Count of documents with all shards on index elasticsearch-exporter   Documents Created Per Min (All Shards)  meter_elasticsearch_index_indices_docs_total_rate Documents rate with only all shards on index elasticsearch-exporter   Total Size Of Index (All Shards) MB meter_elasticsearch_index_indices_store_size_bytes_total Current total size of stored index data in bytes with all shards on all nodes elasticsearch-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/elasticsearch/elasticsearch-cluster.yaml, /config/otel-rules/elasticsearch/elasticsearch-node.yaml, /config/otel-rules/elasticsearch/elasticsearch-index.yaml. The Elasticsearch dashboard panel configurations are found in /config/ui-initialized-templates/elasticsearch.\n","title":"Elasticsearch monitoring","url":"/docs/main/v9.6.0/en/setup/backend/backend-elasticsearch-monitoring/"},{"content":"Elasticsearch monitoring SkyWalking leverages elasticsearch-exporter for collecting metrics data from Elasticsearch. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  The elasticsearch-exporter collect metrics data from Elasticsearch. OpenTelemetry Collector fetches metrics from elasticsearch-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup elasticsearch-exporter. Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  Elasticsearch Monitoring Elasticsearch monitoring provides multidimensional metrics monitoring of Elasticsearch clusters as Layer: ELASTICSEARCH Service in the OAP. In each cluster, the nodes are represented as Instance and indices are Endpoints.\nElasticsearch Cluster Supported Metrics    Monitoring Panel Metric Name Description Data Source     Cluster Health meter_elasticsearch_cluster_health_status Whether all primary and replica shards are allocated elasticsearch-exporter   Tripped Of Breakers meter_elasticsearch_cluster_breakers_tripped Tripped for breaker elasticsearch-exporter   Nodes meter_elasticsearch_cluster_nodes Number of nodes in the cluster. elasticsearch-exporter   Data Nodes meter_elasticsearch_cluster_data_nodes Number of data nodes in the cluster elasticsearch-exporter   Pending Tasks meter_elasticsearch_cluster_pending_tasks_total Cluster level changes which have not yet been executed elasticsearch-exporter   CPU Usage Avg. (%) meter_elasticsearch_cluster_cpu_usage_avg Cluster level percent CPU used by process elasticsearch-exporter   JVM Memory Used Avg. (%) meter_elasticsearch_cluster_jvm_memory_used_avg Cluster level percent JVM memory used elasticsearch-exporter   Open Files meter_elasticsearch_cluster_open_file_count Open file descriptors elasticsearch-exporter   Active Primary Shards meter_elasticsearch_cluster_primary_shards_total The number of primary shards in your cluster. This is an aggregate total across all indices elasticsearch-exporter   Active Shards meter_elasticsearch_cluster_shards_total Aggregate total of all shards across all indices, which includes replica shards elasticsearch-exporter   Initializing Shards meter_elasticsearch_cluster_initializing_shards_total Count of shards that are being freshly created elasticsearch-exporter   Delayed Unassigned Shards meter_elasticsearch_cluster_delayed_unassigned_shards_total Shards delayed to reduce reallocation overhead elasticsearch-exporter   Relocating Shards meter_elasticsearch_cluster_relocating_shards_total The number of shards that are currently moving from one node to another node elasticsearch-exporter   Unassigned Shards meter_elasticsearch_cluster_unassigned_shards_total The number of shards that exist in the cluster state, but cannot be found in the cluster itself elasticsearch-exporter    Elasticsearch Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Node Rules  meter_elasticsearch_node_rules Node roles elasticsearch-exporter   JVM Memory Used MB meter_elasticsearch_node_jvm_memory_used Node level JVM memory used size elasticsearch-exporter   CPU Percent % meter_elasticsearch_node_process_cpu_percent Node level percent CPU used by process elasticsearch-exporter   Documents  meter_elasticsearch_node_indices_docs Count of index documents on this node elasticsearch-exporter   Segments  meter_elasticsearch_node_segment_count Count of index segments on this node elasticsearch-exporter   Disk Free Space GB meter_elasticsearch_node_all_disk_free_space Available space on all block device elasticsearch-exporter   Open Files  meter_elasticsearch_node_open_file_count Open file descriptors elasticsearch-exporter   Process CPU Usage Percent % meter_elasticsearch_node_process_cpu_percent Percent CPU used by process elasticsearch-exporter   OS CPU usage percent % meter_elasticsearch_node_os_cpu_percent Percent CPU used by the OS elasticsearch-exporter   Load Average  meter_elasticsearch_node_os_load1 meter_elasticsearch_node_os_load5meter_elasticsearch_node_os_load15 Shortterm, Midterm, Longterm load average elasticsearch-exporter   JVM Memory Usage MB meter_elasticsearch_node_jvm_memory_nonheap_used\nmeter_elasticsearch_node_jvm_memory_heap_usedmeter_elasticsearch_node_jvm_memory_heap_max JVM memory currently usage by area elasticsearch-exporter   JVM Pool Peak Used MB meter_elasticsearch_node_jvm_memory_pool_peak_used JVM memory currently used by pool elasticsearch-exporter   GC Count  meter_elasticsearch_node_jvm_gc_count Count of JVM GC runs elasticsearch-exporter   GC Time ms/min meter_elasticsearch_node_jvm_gc_time GC run time elasticsearch-exporter   All Operations ReqRate  meter_elasticsearch_node_indices_*_req_rate All Operations ReqRate on node elasticsearch-exporter   Indexing Rate reqps meter_elasticsearch_node_indices_indexing_index_total_req_rate\nmeter_elasticsearch_node_indices_indexing_index_total_proc_rate Indexing rate on node elasticsearch-exporter   Searching Rate reqps meter_elasticsearch_node_indices_search_fetch_total_req_rate\nmeter_elasticsearch_node_indices_search_query_time_seconds_proc_rate Searching rate on node elasticsearch-exporter   Total Translog Operations  meter_elasticsearch_node_indices_translog_operations Total translog operations elasticsearch-exporter   Total Translog Size MB meter_elasticsearch_node_indices_translog_size Total translog size elasticsearch-exporter   Tripped For Breakers  meter_elasticsearch_node_breakers_tripped Tripped for breaker elasticsearch-exporter   Estimated Size Of Breaker MB meter_elasticsearch_node_breakers_estimated_size Estimated size of breaker elasticsearch-exporter   Documents Count KB/s meter_elasticsearch_node_indices_docs Count of documents on this node elasticsearch-exporter   Merged Documents Count count/s meter_elasticsearch_node_indices_merges_docs_total Cumulative docs merged elasticsearch-exporter   Deleted Documents Count  meter_elasticsearch_node_indices_docs_deleted_total Count of deleted documents on this node elasticsearch-exporter   Documents Index Rate calls/s meter_elasticsearch_node_indices_indexing_index_total_req_rate Total index calls per second elasticsearch-exporter   Merged Documents Rate MB / s meter_elasticsearch_node_indices_merges_total_size_bytes_total Total merge size per second elasticsearch-exporter   Documents Deleted Rate docs/s meter_elasticsearch_node_indices_docs_deleted Count of deleted documents per second on this node elasticsearch-exporter   Count Of Index Segments  meter_elasticsearch_node_segment_count Count of index segments on this node elasticsearch-exporter   Current Memory Size Of Segments MB meter_elasticsearch_node_segment_memory Current memory size of segments elasticsearch-exporter   Network bytes/sec meter_elasticsearch_node_network_send_bytesmeter_elasticsearch_node_network_receive_bytes Total number of bytes sent and receive elasticsearch-exporter   Disk Usage Percent % meter_elasticsearch_node_disk_usage_percent Used space on block device elasticsearch-exporter   Disk Usage GB meter_elasticsearch_node_disk_usage Used space size of block device elasticsearch-exporter   Disk Read KBs meter_elasticsearch_node_disk_io_read_bytes Total kilobytes read from disk elasticsearch-exporter   Disk Write KBs meter_elasticsearch_node_disk_io_write_bytes Total kilobytes write from disk elasticsearch-exporter    Elasticsearch Index Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Documents Primary  meter_elasticsearch_index_indices_docs_primary Count of documents with only primary shards on all nodes elasticsearch-exporter   Deleted Documents Primary  meter_elasticsearch_index_indices_deleted_docs_primary Count of deleted documents with only primary shards elasticsearch-exporter   Data Primary GB meter_elasticsearch_index_indices_store_size_bytes_primary Current total size of stored index data with only primary shards on all nodes elasticsearch-exporter   Data GB meter_elasticsearch_index_indices_store_size_bytes_total Current total size of stored index data with all shards on all nodes elasticsearch-exporter   Segments Primary  meter_elasticsearch_index_indices_segment_count_primary Current number of segments with only primary shards on all nodes elasticsearch-exporter   Segments Memory Primary MB meter_elasticsearch_index_indices_segment_memory_bytes_primary Current size of segments with only primary shards on all nodes elasticsearch-exporter   Segments  meter_elasticsearch_index_indices_segment_count_total Current number of segments with all shards on all nodes elasticsearch-exporter   Segments Memory MB meter_elasticsearch_index_indices_segment_memory_bytes_total Current size of segments with all shards on all nodes elasticsearch-exporter   Indexing Rate  meter_elasticsearch_index_stats_indexing_index_total_req_ratemeter_elasticsearch_index_stats_indexing_index_total_proc_rate Indexing rate on index elasticsearch-exporter   Searching Rate  meter_elasticsearch_index_stats_search_query_total_req_ratemeter_elasticsearch_index_stats_search_query_total_proc_rate Searching rate on index elasticsearch-exporter   All Operations ReqRate  meter_elasticsearch_index_stats_*_req_rate All Operations ReqRate on index elasticsearch-exporter   All Operations Runtime  meter_elasticsearch_index_stats_*_time_seconds_total All Operations Runtime/s on index elasticsearch-exporter   Avg. Search Time Execute / Request s meter_elasticsearch_index_search_fetch_avg_timemeter_elasticsearch_index_search_query_avg_timemeter_elasticsearch_index_search_scroll_avg_timemeter_elasticsearch_index_search_suggest_avg_time Search Operation Avg. time on index elasticsearch-exporter   Search Operations Rate req/s meter_elasticsearch_index_stats_search_query_total_req_ratemeter_elasticsearch_index_stats_search_fetch_total_req_ratemeter_elasticsearch_index_stats_search_scroll_total_req_ratemeter_elasticsearch_index_stats_search_suggest_total_req_rate Search Operations ReqRate on index elasticsearch-exporter   Shards Documents  meter_elasticsearch_index_indices_shards_docs Count of documents per shards on index elasticsearch-exporter   Documents (Primary Shards)  meter_elasticsearch_index_indices_docs_primary Count of documents with only primary shards on index elasticsearch-exporter   Documents Created Per Min (Primary Shards)  meter_elasticsearch_index_indices_docs_primary_rate Documents rate with only primary shards on index elasticsearch-exporter   Total Size Of Index (Primary Shards) MB meter_elasticsearch_index_indices_store_size_bytes_primary Current total size of stored index data in bytes with only primary shards on all nodes elasticsearch-exporter   Documents (All Shards)  meter_elasticsearch_index_indices_docs_total Count of documents with all shards on index elasticsearch-exporter   Documents Created Per Min (All Shards)  meter_elasticsearch_index_indices_docs_total_rate Documents rate with only all shards on index elasticsearch-exporter   Total Size Of Index (All Shards) MB meter_elasticsearch_index_indices_store_size_bytes_total Current total size of stored index data in bytes with all shards on all nodes elasticsearch-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/elasticsearch/elasticsearch-cluster.yaml, /config/otel-rules/elasticsearch/elasticsearch-node.yaml, /config/otel-rules/elasticsearch/elasticsearch-index.yaml. The Elasticsearch dashboard panel configurations are found in /config/ui-initialized-templates/elasticsearch.\n","title":"Elasticsearch monitoring","url":"/docs/main/v9.7.0/en/setup/backend/backend-elasticsearch-monitoring/"},{"content":"Enable/Disable Channel Different channels mean that different protocols can be transparently transmitted to upstream services(OAP).\nConfig In the Satellite configuration, a channel is represented under the configured pipes. By default, we open all channels and process all known protocols.\nYou could delete the channel if you don\u0026rsquo;t want to receive and transmit in satellite.\nAfter restart the satellite service, then the channel what you delete is disable.\n","title":"Enable/Disable Channel","url":"/docs/skywalking-satellite/latest/en/setup/examples/feature/enable-disable-channel/readme/"},{"content":"Enable/Disable Channel Different channels mean that different protocols can be transparently transmitted to upstream services(OAP).\nConfig In the Satellite configuration, a channel is represented under the configured pipes. By default, we open all channels and process all known protocols.\nYou could delete the channel if you don\u0026rsquo;t want to receive and transmit in satellite.\nAfter restart the satellite service, then the channel what you delete is disable.\n","title":"Enable/Disable Channel","url":"/docs/skywalking-satellite/next/en/setup/examples/feature/enable-disable-channel/readme/"},{"content":"Enable/Disable Channel Different channels mean that different protocols can be transparently transmitted to upstream services(OAP).\nConfig In the Satellite configuration, a channel is represented under the configured pipes. By default, we open all channels and process all known protocols.\nYou could delete the channel if you don\u0026rsquo;t want to receive and transmit in satellite.\nAfter restart the satellite service, then the channel what you delete is disable.\n","title":"Enable/Disable Channel","url":"/docs/skywalking-satellite/v1.2.0/en/setup/examples/feature/enable-disable-channel/readme/"},{"content":"End to End Tests (E2E) SkyWalking heavily rely more automatic tests to perform software quality assurance. E2E is an integral part of it.\n End-to-end testing is a methodology used to test whether the flow of an application is performing as designed from start to finish. The purpose of carrying out end-to-end tests is to identify system dependencies and to ensure that the right information is passed between various system components and systems.\n E2E in SkyWalking is always setting the OAP, monitored services and relative remote server dependencies in a real environment, and verify the dataflow and ultimate query results.\nThe E2E test involves some/all of the OAP server, storage, coordinator, webapp, and the instrumented services, all of which are orchestrated by docker-compose or KinD. Since version 8.9.0, we immigrate to e2e-v2 which leverage skywalking-infra-e2e and skywalking-cli to do the whole e2e process. skywalking-infra-e2e is used to control the e2e process and skywalking-cli is used to interact with the OAP such as request and get response metrics from OAP.\nWriting E2E Cases  Set up the environment   Set up skywalking-infra-e2e Set up skywalking-cli, yq (generally these 2 are enough) and others tools if your cases need. Can reference the script under skywalking/test/e2e-v2/script/prepare/setup-e2e-shell.   Orchestrate the components  The goal of the E2E tests is to test the SkyWalking project as a whole, including the OAP server, storage, coordinator, webapp, and even the frontend UI (not for now), on the single node mode as well as the cluster mode. Therefore, the first step is to determine what case we are going to verify, and orchestrate the components.\nTo make the orchestration process easier, we\u0026rsquo;re using a docker-compose that provides a simple file format (docker-compose.yml) for orchestrating the required containers, and offers an opportunity to define the dependencies of the components.\nFollow these steps:\n Decide what (and how many) containers will be needed. For example, for cluster testing, you\u0026rsquo;ll need \u0026gt; 2 OAP nodes, coordinators (e.g. zookeeper), storage (e.g. ElasticSearch), and instrumented services; Define the containers in docker-compose.yml, and carefully specify the dependencies, starting orders, and most importantly, link them together, e.g. set the correct OAP address on the agent end, and set the correct coordinator address in OAP, etc. Define the e2e case config in e2e.yaml. Write the expected data(yml) for verify.   Run e2e test  All e2e cases should under skywalking/test/e2e-v2/cases. You could execute e2e run command in skywalking/ e.g.\ne2e run -c test/e2e-v2/cases/alarm/h2/e2e.yaml  Troubleshooting  We expose all logs from all containers to the stdout in the non-CI (local) mode, but save and upload them to the GitHub server. You can download them (only when the tests have failed) at \u0026ldquo;Artifacts/Download artifacts/logs\u0026rdquo; (see top right) for debugging.\nNOTE: Please verify the newly-added E2E test case locally first. However, if you find that it has passed locally but failed in the PR check status, make sure that all the updated/newly-added files (especially those in the submodules) are committed and included in the PR, or reset the git HEAD to the remote and verify locally again.\n","title":"End to End Tests (E2E)","url":"/docs/main/latest/en/guides/e2e/"},{"content":"End to End Tests (E2E) SkyWalking heavily rely more automatic tests to perform software quality assurance. E2E is an integral part of it.\n End-to-end testing is a methodology used to test whether the flow of an application is performing as designed from start to finish. The purpose of carrying out end-to-end tests is to identify system dependencies and to ensure that the right information is passed between various system components and systems.\n E2E in SkyWalking is always setting the OAP, monitored services and relative remote server dependencies in a real environment, and verify the dataflow and ultimate query results.\nThe E2E test involves some/all of the OAP server, storage, coordinator, webapp, and the instrumented services, all of which are orchestrated by docker-compose or KinD. Since version 8.9.0, we immigrate to e2e-v2 which leverage skywalking-infra-e2e and skywalking-cli to do the whole e2e process. skywalking-infra-e2e is used to control the e2e process and skywalking-cli is used to interact with the OAP such as request and get response metrics from OAP.\nWriting E2E Cases  Set up the environment   Set up skywalking-infra-e2e Set up skywalking-cli, yq (generally these 2 are enough) and others tools if your cases need. Can reference the script under skywalking/test/e2e-v2/script/prepare/setup-e2e-shell.   Orchestrate the components  The goal of the E2E tests is to test the SkyWalking project as a whole, including the OAP server, storage, coordinator, webapp, and even the frontend UI (not for now), on the single node mode as well as the cluster mode. Therefore, the first step is to determine what case we are going to verify, and orchestrate the components.\nTo make the orchestration process easier, we\u0026rsquo;re using a docker-compose that provides a simple file format (docker-compose.yml) for orchestrating the required containers, and offers an opportunity to define the dependencies of the components.\nFollow these steps:\n Decide what (and how many) containers will be needed. For example, for cluster testing, you\u0026rsquo;ll need \u0026gt; 2 OAP nodes, coordinators (e.g. zookeeper), storage (e.g. ElasticSearch), and instrumented services; Define the containers in docker-compose.yml, and carefully specify the dependencies, starting orders, and most importantly, link them together, e.g. set the correct OAP address on the agent end, and set the correct coordinator address in OAP, etc. Define the e2e case config in e2e.yaml. Write the expected data(yml) for verify.   Run e2e test  All e2e cases should under skywalking/test/e2e-v2/cases. You could execute e2e run command in skywalking/ e.g.\ne2e run -c test/e2e-v2/cases/alarm/h2/e2e.yaml  Troubleshooting  We expose all logs from all containers to the stdout in the non-CI (local) mode, but save and upload them to the GitHub server. You can download them (only when the tests have failed) at \u0026ldquo;Artifacts/Download artifacts/logs\u0026rdquo; (see top right) for debugging.\nNOTE: Please verify the newly-added E2E test case locally first. However, if you find that it has passed locally but failed in the PR check status, make sure that all the updated/newly-added files (especially those in the submodules) are committed and included in the PR, or reset the git HEAD to the remote and verify locally again.\n","title":"End to End Tests (E2E)","url":"/docs/main/next/en/guides/e2e/"},{"content":"End to End Tests (E2E) SkyWalking heavily rely more automatic tests to perform software quality assurance. E2E is an integral part of it.\n End-to-end testing is a methodology used to test whether the flow of an application is performing as designed from start to finish. The purpose of carrying out end-to-end tests is to identify system dependencies and to ensure that the right information is passed between various system components and systems.\n E2E in SkyWalking is always setting the OAP, monitored services and relative remote server dependencies in a real environment, and verify the dataflow and ultimate query results.\nThe E2E test involves some/all of the OAP server, storage, coordinator, webapp, and the instrumented services, all of which are orchestrated by docker-compose or KinD. Since version 8.9.0, we immigrate to e2e-v2 which leverage skywalking-infra-e2e and skywalking-cli to do the whole e2e process. skywalking-infra-e2e is used to control the e2e process and skywalking-cli is used to interact with the OAP such as request and get response metrics from OAP.\nWriting E2E Cases  Set up the environment   Set up skywalking-infra-e2e Set up skywalking-cli, yq (generally these 2 are enough) and others tools if your cases need. Can reference the script under skywalking/test/e2e-v2/script/prepare/setup-e2e-shell.   Orchestrate the components  The goal of the E2E tests is to test the SkyWalking project as a whole, including the OAP server, storage, coordinator, webapp, and even the frontend UI (not for now), on the single node mode as well as the cluster mode. Therefore, the first step is to determine what case we are going to verify, and orchestrate the components.\nTo make the orchestration process easier, we\u0026rsquo;re using a docker-compose that provides a simple file format (docker-compose.yml) for orchestrating the required containers, and offers an opportunity to define the dependencies of the components.\nFollow these steps:\n Decide what (and how many) containers will be needed. For example, for cluster testing, you\u0026rsquo;ll need \u0026gt; 2 OAP nodes, coordinators (e.g. zookeeper), storage (e.g. ElasticSearch), and instrumented services; Define the containers in docker-compose.yml, and carefully specify the dependencies, starting orders, and most importantly, link them together, e.g. set the correct OAP address on the agent end, and set the correct coordinator address in OAP, etc. Define the e2e case config in e2e.yaml. Write the expected data(yml) for verify.   Run e2e test  All e2e cases should under skywalking/test/e2e-v2/cases. You could execute e2e run command in skywalking/ e.g.\ne2e run -c test/e2e-v2/cases/alarm/h2/e2e.yaml  Troubleshooting  We expose all logs from all containers to the stdout in the non-CI (local) mode, but save and upload them to the GitHub server. You can download them (only when the tests have failed) at \u0026ldquo;Artifacts/Download artifacts/logs\u0026rdquo; (see top right) for debugging.\nNOTE: Please verify the newly-added E2E test case locally first. However, if you find that it has passed locally but failed in the PR check status, make sure that all the updated/newly-added files (especially those in the submodules) are committed and included in the PR, or reset the git HEAD to the remote and verify locally again.\n","title":"End to End Tests (E2E)","url":"/docs/main/v9.6.0/en/guides/e2e/"},{"content":"End to End Tests (E2E) SkyWalking heavily rely more automatic tests to perform software quality assurance. E2E is an integral part of it.\n End-to-end testing is a methodology used to test whether the flow of an application is performing as designed from start to finish. The purpose of carrying out end-to-end tests is to identify system dependencies and to ensure that the right information is passed between various system components and systems.\n E2E in SkyWalking is always setting the OAP, monitored services and relative remote server dependencies in a real environment, and verify the dataflow and ultimate query results.\nThe E2E test involves some/all of the OAP server, storage, coordinator, webapp, and the instrumented services, all of which are orchestrated by docker-compose or KinD. Since version 8.9.0, we immigrate to e2e-v2 which leverage skywalking-infra-e2e and skywalking-cli to do the whole e2e process. skywalking-infra-e2e is used to control the e2e process and skywalking-cli is used to interact with the OAP such as request and get response metrics from OAP.\nWriting E2E Cases  Set up the environment   Set up skywalking-infra-e2e Set up skywalking-cli, yq (generally these 2 are enough) and others tools if your cases need. Can reference the script under skywalking/test/e2e-v2/script/prepare/setup-e2e-shell.   Orchestrate the components  The goal of the E2E tests is to test the SkyWalking project as a whole, including the OAP server, storage, coordinator, webapp, and even the frontend UI (not for now), on the single node mode as well as the cluster mode. Therefore, the first step is to determine what case we are going to verify, and orchestrate the components.\nTo make the orchestration process easier, we\u0026rsquo;re using a docker-compose that provides a simple file format (docker-compose.yml) for orchestrating the required containers, and offers an opportunity to define the dependencies of the components.\nFollow these steps:\n Decide what (and how many) containers will be needed. For example, for cluster testing, you\u0026rsquo;ll need \u0026gt; 2 OAP nodes, coordinators (e.g. zookeeper), storage (e.g. ElasticSearch), and instrumented services; Define the containers in docker-compose.yml, and carefully specify the dependencies, starting orders, and most importantly, link them together, e.g. set the correct OAP address on the agent end, and set the correct coordinator address in OAP, etc. Define the e2e case config in e2e.yaml. Write the expected data(yml) for verify.   Run e2e test  All e2e cases should under skywalking/test/e2e-v2/cases. You could execute e2e run command in skywalking/ e.g.\ne2e run -c test/e2e-v2/cases/alarm/h2/e2e.yaml  Troubleshooting  We expose all logs from all containers to the stdout in the non-CI (local) mode, but save and upload them to the GitHub server. You can download them (only when the tests have failed) at \u0026ldquo;Artifacts/Download artifacts/logs\u0026rdquo; (see top right) for debugging.\nNOTE: Please verify the newly-added E2E test case locally first. However, if you find that it has passed locally but failed in the PR check status, make sure that all the updated/newly-added files (especially those in the submodules) are committed and included in the PR, or reset the git HEAD to the remote and verify locally again.\n","title":"End to End Tests (E2E)","url":"/docs/main/v9.7.0/en/guides/e2e/"},{"content":"Events SkyWalking already supports the three pillars of observability, namely logs, metrics, and traces. In reality, a production system experiences many other events that may affect the performance of the system, such as upgrading, rebooting, chaos testing, etc. Although some of these events are reflected in the logs, many others are not. Hence, SkyWalking provides a more native way to collect these events. This doc details how SkyWalking collects events and what events look like in SkyWalking.\nHow to Report Events The SkyWalking backend supports three protocols to collect events: gRPC, HTTP, and Kafka. Any agent or CLI that implements one of these protocols can report events to SkyWalking. Currently, the officially supported clients to report events are:\n Java Agent Toolkit: Using the Java agent toolkit to report events within the applications. SkyWalking CLI: Using the CLI to report events from the command line interface. Kubernetes Event Exporter: Deploying an event exporter to refine and report Kubernetes events.  Event Definitions An event contains the following fields. The definitions of event can be found at the protocol repo.\nUUID Unique ID of the event. Since an event may span a long period of time, the UUID is necessary to associate the start time with the end time of the same event.\nSource The source object on which the event occurs. In SkyWalking, the object is typically a service, service instance, etc.\nName Name of the event. For example, Start, Stop, Crash, Reboot, Upgrade, etc.\nType Type of the event. This field is friendly for UI visualization, where events of type Normal are considered normal operations, while Error is considered unexpected operations, such as Crash events. Marking them with different colors allows us to more easily identify them.\nMessage The detail of the event that describes why this event happened. This should be a one-line message that briefly describes why the event is reported. Examples of an Upgrade event may be something like Upgrade from ${from_version} to ${to_version}. It\u0026rsquo;s NOT recommended to include the detailed logs of this event, such as the exception stack trace.\nParameters The parameters in the message field. This is a simple \u0026lt;string,string\u0026gt; map.\nStart Time The start time of the event. This field is mandatory when an event occurs.\nEnd Time The end time of the event. This field may be empty if the event has not ended yet, otherwise there should be a valid timestamp after startTime.\nNOTE: When reporting an event, you typically call the report function twice, the first time for starting of the event and the second time for ending of the event, both with the same UUID. There are also cases where you would already have both the start time and end time. For example, when exporting events from a third-party system, the start time and end time are already known so you may simply call the report function once.\nCorrelation between events and metrics SkyWalking UI visualizes the events in the dashboard when the event service / instance / endpoint matches the displayed service / instance / endpoint.\nKnown Events    Name Type When Where     Start Normal When your Java Application starts with SkyWalking Agent installed, the Start Event will be created. Reported from SkyWalking agent.   Shutdown Normal When your Java Application stops with SkyWalking Agent installed, the Shutdown Event will be created. Reported from SkyWalking agent.   Alarm Error When the Alarm is triggered, the corresponding Alarm Event will is created. Reported from internal SkyWalking OAP.    The following events are all reported by Kubernetes Event Exporter, in order to see these events, please make sure you have deployed the exporter.\n   Name Type When Where     Killing Normal When the Kubernetes Pod is being killing. Reporter by Kubernetes Event Exporter.   Pulling Normal When a docker image is being pulled for deployment. Reporter by Kubernetes Event Exporter.   Pulled Normal When a docker image is pulled for deployment. Reporter by Kubernetes Event Exporter.   Created Normal When a container inside a Pod is created. Reporter by Kubernetes Event Exporter.   Started Normal When a container inside a Pod is started. Reporter by Kubernetes Event Exporter.   Unhealthy Error When the readiness probe failed. Reporter by Kubernetes Event Exporter.    The complete event lists can be found in the Kubernetes codebase, please note that not all the events are supported by the exporter for now.\n","title":"Events","url":"/docs/main/latest/en/concepts-and-designs/event/"},{"content":"Events SkyWalking already supports the three pillars of observability, namely logs, metrics, and traces. In reality, a production system experiences many other events that may affect the performance of the system, such as upgrading, rebooting, chaos testing, etc. Although some of these events are reflected in the logs, many others are not. Hence, SkyWalking provides a more native way to collect these events. This doc details how SkyWalking collects events and what events look like in SkyWalking.\nHow to Report Events The SkyWalking backend supports three protocols to collect events: gRPC, HTTP, and Kafka. Any agent or CLI that implements one of these protocols can report events to SkyWalking. Currently, the officially supported clients to report events are:\n Java Agent Toolkit: Using the Java agent toolkit to report events within the applications. SkyWalking CLI: Using the CLI to report events from the command line interface. Kubernetes Event Exporter: Deploying an event exporter to refine and report Kubernetes events.  Event Definitions An event contains the following fields. The definitions of event can be found at the protocol repo.\nUUID Unique ID of the event. Since an event may span a long period of time, the UUID is necessary to associate the start time with the end time of the same event.\nSource The source object on which the event occurs. In SkyWalking, the object is typically a service, service instance, etc.\nName Name of the event. For example, Start, Stop, Crash, Reboot, Upgrade, etc.\nType Type of the event. This field is friendly for UI visualization, where events of type Normal are considered normal operations, while Error is considered unexpected operations, such as Crash events. Marking them with different colors allows us to more easily identify them.\nMessage The detail of the event that describes why this event happened. This should be a one-line message that briefly describes why the event is reported. Examples of an Upgrade event may be something like Upgrade from ${from_version} to ${to_version}. It\u0026rsquo;s NOT recommended to include the detailed logs of this event, such as the exception stack trace.\nParameters The parameters in the message field. This is a simple \u0026lt;string,string\u0026gt; map.\nStart Time The start time of the event. This field is mandatory when an event occurs.\nEnd Time The end time of the event. This field may be empty if the event has not ended yet, otherwise there should be a valid timestamp after startTime.\nNOTE: When reporting an event, you typically call the report function twice, the first time for starting of the event and the second time for ending of the event, both with the same UUID. There are also cases where you would already have both the start time and end time. For example, when exporting events from a third-party system, the start time and end time are already known so you may simply call the report function once.\nCorrelation between events and metrics SkyWalking UI visualizes the events in the dashboard when the event service / instance / endpoint matches the displayed service / instance / endpoint.\nKnown Events    Name Type When Where     Start Normal When your Java Application starts with SkyWalking Agent installed, the Start Event will be created. Reported from SkyWalking agent.   Shutdown Normal When your Java Application stops with SkyWalking Agent installed, the Shutdown Event will be created. Reported from SkyWalking agent.   Alarm Error When the Alarm is triggered, the corresponding Alarm Event will is created. Reported from internal SkyWalking OAP.    The following events are all reported by Kubernetes Event Exporter, in order to see these events, please make sure you have deployed the exporter.\n   Name Type When Where     Killing Normal When the Kubernetes Pod is being killing. Reporter by Kubernetes Event Exporter.   Pulling Normal When a docker image is being pulled for deployment. Reporter by Kubernetes Event Exporter.   Pulled Normal When a docker image is pulled for deployment. Reporter by Kubernetes Event Exporter.   Created Normal When a container inside a Pod is created. Reporter by Kubernetes Event Exporter.   Started Normal When a container inside a Pod is started. Reporter by Kubernetes Event Exporter.   Unhealthy Error When the readiness probe failed. Reporter by Kubernetes Event Exporter.    The complete event lists can be found in the Kubernetes codebase, please note that not all the events are supported by the exporter for now.\n","title":"Events","url":"/docs/main/next/en/concepts-and-designs/event/"},{"content":"Events SkyWalking already supports the three pillars of observability, namely logs, metrics, and traces. In reality, a production system experiences many other events that may affect the performance of the system, such as upgrading, rebooting, chaos testing, etc. Although some of these events are reflected in the logs, many others are not. Hence, SkyWalking provides a more native way to collect these events. This doc details how SkyWalking collects events and what events look like in SkyWalking.\nHow to Report Events The SkyWalking backend supports three protocols to collect events: gRPC, HTTP, and Kafka. Any agent or CLI that implements one of these protocols can report events to SkyWalking. Currently, the officially supported clients to report events are:\n Java Agent Toolkit: Using the Java agent toolkit to report events within the applications. SkyWalking CLI: Using the CLI to report events from the command line interface. Kubernetes Event Exporter: Deploying an event exporter to refine and report Kubernetes events.  Event Definitions An event contains the following fields. The definitions of event can be found at the protocol repo.\nUUID Unique ID of the event. Since an event may span a long period of time, the UUID is necessary to associate the start time with the end time of the same event.\nSource The source object on which the event occurs. In SkyWalking, the object is typically a service, service instance, etc.\nName Name of the event. For example, Start, Stop, Crash, Reboot, Upgrade, etc.\nType Type of the event. This field is friendly for UI visualization, where events of type Normal are considered normal operations, while Error is considered unexpected operations, such as Crash events. Marking them with different colors allows us to more easily identify them.\nMessage The detail of the event that describes why this event happened. This should be a one-line message that briefly describes why the event is reported. Examples of an Upgrade event may be something like Upgrade from ${from_version} to ${to_version}. It\u0026rsquo;s NOT recommended to include the detailed logs of this event, such as the exception stack trace.\nParameters The parameters in the message field. This is a simple \u0026lt;string,string\u0026gt; map.\nStart Time The start time of the event. This field is mandatory when an event occurs.\nEnd Time The end time of the event. This field may be empty if the event has not ended yet, otherwise there should be a valid timestamp after startTime.\nNOTE: When reporting an event, you typically call the report function twice, the first time for starting of the event and the second time for ending of the event, both with the same UUID. There are also cases where you would already have both the start time and end time. For example, when exporting events from a third-party system, the start time and end time are already known so you may simply call the report function once.\nHow to Configure Alarms for Events Events derive from metrics, and can be the source to trigger alarms. For example, if a specific event occurs for a certain times in a period, alarms can be triggered and sent.\nEvery event has a default value = 1, when n events with the same name are reported, they are aggregated into value = n as follows.\nEvent{name=Unhealthy, source={service=A,instance=a}, ...} Event{name=Unhealthy, source={service=A,instance=a}, ...} Event{name=Unhealthy, source={service=A,instance=a}, ...} Event{name=Unhealthy, source={service=A,instance=a}, ...} Event{name=Unhealthy, source={service=A,instance=a}, ...} Event{name=Unhealthy, source={service=A,instance=a}, ...} will be aggregated into\nEvent{name=Unhealthy, source={service=A,instance=a}, ...} \u0026lt;value = 6\u0026gt; so you can configure the following alarm rule to trigger alarm when Unhealthy event occurs more than 5 times within 10 minutes.\nrules:unhealthy_event_rule:metrics-name:Unhealthy# Healthiness check is usually a scheduled task,# they may be unhealthy for the first few times,# and can be unhealthy occasionally due to network jitter,# please adjust the threshold as per your actual situation.threshold:5op:\u0026#34;\u0026gt;\u0026#34;period:10count:1message:Service instance has been unhealthy for 10 minutesFor more alarm configuration details, please refer to the alarm doc.\nNote that the Unhealthy event above is only for demonstration, they are not detected by default in SkyWalking, however, you can use the methods in How to Report Events to report this kind of events.\nCorrelation between events and metrics SkyWalking UI visualizes the events in the dashboard when the event service / instance / endpoint matches the displayed service / instance / endpoint.\nBy default, SkyWalking also generates some metrics for events by using OAL. The default metrics list of event may change over time, you can find the complete list in event.oal. If you want to generate you custom metrics from events, please refer to OAL about how to write OAL rules.\nKnown Events    Name Type When Where     Start Normal When your Java Application starts with SkyWalking Agent installed, the Start Event will be created. Reported from SkyWalking agent.   Shutdown Normal When your Java Application stops with SkyWalking Agent installed, the Shutdown Event will be created. Reported from SkyWalking agent.   Alarm Error When the Alarm is triggered, the corresponding Alarm Event will is created. Reported from internal SkyWalking OAP.    The following events are all reported by Kubernetes Event Exporter, in order to see these events, please make sure you have deployed the exporter.\n   Name Type When Where     Killing Normal When the Kubernetes Pod is being killing. Reporter by Kubernetes Event Exporter.   Pulling Normal When a docker image is being pulled for deployment. Reporter by Kubernetes Event Exporter.   Pulled Normal When a docker image is pulled for deployment. Reporter by Kubernetes Event Exporter.   Created Normal When a container inside a Pod is created. Reporter by Kubernetes Event Exporter.   Started Normal When a container inside a Pod is started. Reporter by Kubernetes Event Exporter.   Unhealthy Error When the readiness probe failed. Reporter by Kubernetes Event Exporter.    The complete event lists can be found in the Kubernetes codebase, please note that not all the events are supported by the exporter for now.\n","title":"Events","url":"/docs/main/v9.0.0/en/concepts-and-designs/event/"},{"content":"Events SkyWalking already supports the three pillars of observability, namely logs, metrics, and traces. In reality, a production system experiences many other events that may affect the performance of the system, such as upgrading, rebooting, chaos testing, etc. Although some of these events are reflected in the logs, many others are not. Hence, SkyWalking provides a more native way to collect these events. This doc details how SkyWalking collects events and what events look like in SkyWalking.\nHow to Report Events The SkyWalking backend supports three protocols to collect events: gRPC, HTTP, and Kafka. Any agent or CLI that implements one of these protocols can report events to SkyWalking. Currently, the officially supported clients to report events are:\n Java Agent Toolkit: Using the Java agent toolkit to report events within the applications. SkyWalking CLI: Using the CLI to report events from the command line interface. Kubernetes Event Exporter: Deploying an event exporter to refine and report Kubernetes events.  Event Definitions An event contains the following fields. The definitions of event can be found at the protocol repo.\nUUID Unique ID of the event. Since an event may span a long period of time, the UUID is necessary to associate the start time with the end time of the same event.\nSource The source object on which the event occurs. In SkyWalking, the object is typically a service, service instance, etc.\nName Name of the event. For example, Start, Stop, Crash, Reboot, Upgrade, etc.\nType Type of the event. This field is friendly for UI visualization, where events of type Normal are considered normal operations, while Error is considered unexpected operations, such as Crash events. Marking them with different colors allows us to more easily identify them.\nMessage The detail of the event that describes why this event happened. This should be a one-line message that briefly describes why the event is reported. Examples of an Upgrade event may be something like Upgrade from ${from_version} to ${to_version}. It\u0026rsquo;s NOT recommended to include the detailed logs of this event, such as the exception stack trace.\nParameters The parameters in the message field. This is a simple \u0026lt;string,string\u0026gt; map.\nStart Time The start time of the event. This field is mandatory when an event occurs.\nEnd Time The end time of the event. This field may be empty if the event has not ended yet, otherwise there should be a valid timestamp after startTime.\nNOTE: When reporting an event, you typically call the report function twice, the first time for starting of the event and the second time for ending of the event, both with the same UUID. There are also cases where you would already have both the start time and end time. For example, when exporting events from a third-party system, the start time and end time are already known so you may simply call the report function once.\nHow to Configure Alarms for Events Events derive from metrics, and can be the source to trigger alarms. For example, if a specific event occurs for a certain times in a period, alarms can be triggered and sent.\nEvery event has a default value = 1, when n events with the same name are reported, they are aggregated into value = n as follows.\nEvent{name=Unhealthy, source={service=A,instance=a}, ...} Event{name=Unhealthy, source={service=A,instance=a}, ...} Event{name=Unhealthy, source={service=A,instance=a}, ...} Event{name=Unhealthy, source={service=A,instance=a}, ...} Event{name=Unhealthy, source={service=A,instance=a}, ...} Event{name=Unhealthy, source={service=A,instance=a}, ...} will be aggregated into\nEvent{name=Unhealthy, source={service=A,instance=a}, ...} \u0026lt;value = 6\u0026gt; so you can configure the following alarm rule to trigger alarm when Unhealthy event occurs more than 5 times within 10 minutes.\nrules:unhealthy_event_rule:metrics-name:Unhealthy# Healthiness check is usually a scheduled task,# they may be unhealthy for the first few times,# and can be unhealthy occasionally due to network jitter,# please adjust the threshold as per your actual situation.threshold:5op:\u0026#34;\u0026gt;\u0026#34;period:10count:1message:Service instance has been unhealthy for 10 minutesFor more alarm configuration details, please refer to the alarm doc.\nNote that the Unhealthy event above is only for demonstration, they are not detected by default in SkyWalking, however, you can use the methods in How to Report Events to report this kind of events.\nCorrelation between events and metrics SkyWalking UI visualizes the events in the dashboard when the event service / instance / endpoint matches the displayed service / instance / endpoint.\nKnown Events    Name Type When Where     Start Normal When your Java Application starts with SkyWalking Agent installed, the Start Event will be created. Reported from SkyWalking agent.   Shutdown Normal When your Java Application stops with SkyWalking Agent installed, the Shutdown Event will be created. Reported from SkyWalking agent.   Alarm Error When the Alarm is triggered, the corresponding Alarm Event will is created. Reported from internal SkyWalking OAP.    The following events are all reported by Kubernetes Event Exporter, in order to see these events, please make sure you have deployed the exporter.\n   Name Type When Where     Killing Normal When the Kubernetes Pod is being killing. Reporter by Kubernetes Event Exporter.   Pulling Normal When a docker image is being pulled for deployment. Reporter by Kubernetes Event Exporter.   Pulled Normal When a docker image is pulled for deployment. Reporter by Kubernetes Event Exporter.   Created Normal When a container inside a Pod is created. Reporter by Kubernetes Event Exporter.   Started Normal When a container inside a Pod is started. Reporter by Kubernetes Event Exporter.   Unhealthy Error When the readiness probe failed. Reporter by Kubernetes Event Exporter.    The complete event lists can be found in the Kubernetes codebase, please note that not all the events are supported by the exporter for now.\n","title":"Events","url":"/docs/main/v9.1.0/en/concepts-and-designs/event/"},{"content":"Events SkyWalking already supports the three pillars of observability, namely logs, metrics, and traces. In reality, a production system experiences many other events that may affect the performance of the system, such as upgrading, rebooting, chaos testing, etc. Although some of these events are reflected in the logs, many others are not. Hence, SkyWalking provides a more native way to collect these events. This doc details how SkyWalking collects events and what events look like in SkyWalking.\nHow to Report Events The SkyWalking backend supports three protocols to collect events: gRPC, HTTP, and Kafka. Any agent or CLI that implements one of these protocols can report events to SkyWalking. Currently, the officially supported clients to report events are:\n Java Agent Toolkit: Using the Java agent toolkit to report events within the applications. SkyWalking CLI: Using the CLI to report events from the command line interface. Kubernetes Event Exporter: Deploying an event exporter to refine and report Kubernetes events.  Event Definitions An event contains the following fields. The definitions of event can be found at the protocol repo.\nUUID Unique ID of the event. Since an event may span a long period of time, the UUID is necessary to associate the start time with the end time of the same event.\nSource The source object on which the event occurs. In SkyWalking, the object is typically a service, service instance, etc.\nName Name of the event. For example, Start, Stop, Crash, Reboot, Upgrade, etc.\nType Type of the event. This field is friendly for UI visualization, where events of type Normal are considered normal operations, while Error is considered unexpected operations, such as Crash events. Marking them with different colors allows us to more easily identify them.\nMessage The detail of the event that describes why this event happened. This should be a one-line message that briefly describes why the event is reported. Examples of an Upgrade event may be something like Upgrade from ${from_version} to ${to_version}. It\u0026rsquo;s NOT recommended to include the detailed logs of this event, such as the exception stack trace.\nParameters The parameters in the message field. This is a simple \u0026lt;string,string\u0026gt; map.\nStart Time The start time of the event. This field is mandatory when an event occurs.\nEnd Time The end time of the event. This field may be empty if the event has not ended yet, otherwise there should be a valid timestamp after startTime.\nNOTE: When reporting an event, you typically call the report function twice, the first time for starting of the event and the second time for ending of the event, both with the same UUID. There are also cases where you would already have both the start time and end time. For example, when exporting events from a third-party system, the start time and end time are already known so you may simply call the report function once.\nCorrelation between events and metrics SkyWalking UI visualizes the events in the dashboard when the event service / instance / endpoint matches the displayed service / instance / endpoint.\nKnown Events    Name Type When Where     Start Normal When your Java Application starts with SkyWalking Agent installed, the Start Event will be created. Reported from SkyWalking agent.   Shutdown Normal When your Java Application stops with SkyWalking Agent installed, the Shutdown Event will be created. Reported from SkyWalking agent.   Alarm Error When the Alarm is triggered, the corresponding Alarm Event will is created. Reported from internal SkyWalking OAP.    The following events are all reported by Kubernetes Event Exporter, in order to see these events, please make sure you have deployed the exporter.\n   Name Type When Where     Killing Normal When the Kubernetes Pod is being killing. Reporter by Kubernetes Event Exporter.   Pulling Normal When a docker image is being pulled for deployment. Reporter by Kubernetes Event Exporter.   Pulled Normal When a docker image is pulled for deployment. Reporter by Kubernetes Event Exporter.   Created Normal When a container inside a Pod is created. Reporter by Kubernetes Event Exporter.   Started Normal When a container inside a Pod is started. Reporter by Kubernetes Event Exporter.   Unhealthy Error When the readiness probe failed. Reporter by Kubernetes Event Exporter.    The complete event lists can be found in the Kubernetes codebase, please note that not all the events are supported by the exporter for now.\n","title":"Events","url":"/docs/main/v9.2.0/en/concepts-and-designs/event/"},{"content":"Events SkyWalking already supports the three pillars of observability, namely logs, metrics, and traces. In reality, a production system experiences many other events that may affect the performance of the system, such as upgrading, rebooting, chaos testing, etc. Although some of these events are reflected in the logs, many others are not. Hence, SkyWalking provides a more native way to collect these events. This doc details how SkyWalking collects events and what events look like in SkyWalking.\nHow to Report Events The SkyWalking backend supports three protocols to collect events: gRPC, HTTP, and Kafka. Any agent or CLI that implements one of these protocols can report events to SkyWalking. Currently, the officially supported clients to report events are:\n Java Agent Toolkit: Using the Java agent toolkit to report events within the applications. SkyWalking CLI: Using the CLI to report events from the command line interface. Kubernetes Event Exporter: Deploying an event exporter to refine and report Kubernetes events.  Event Definitions An event contains the following fields. The definitions of event can be found at the protocol repo.\nUUID Unique ID of the event. Since an event may span a long period of time, the UUID is necessary to associate the start time with the end time of the same event.\nSource The source object on which the event occurs. In SkyWalking, the object is typically a service, service instance, etc.\nName Name of the event. For example, Start, Stop, Crash, Reboot, Upgrade, etc.\nType Type of the event. This field is friendly for UI visualization, where events of type Normal are considered normal operations, while Error is considered unexpected operations, such as Crash events. Marking them with different colors allows us to more easily identify them.\nMessage The detail of the event that describes why this event happened. This should be a one-line message that briefly describes why the event is reported. Examples of an Upgrade event may be something like Upgrade from ${from_version} to ${to_version}. It\u0026rsquo;s NOT recommended to include the detailed logs of this event, such as the exception stack trace.\nParameters The parameters in the message field. This is a simple \u0026lt;string,string\u0026gt; map.\nStart Time The start time of the event. This field is mandatory when an event occurs.\nEnd Time The end time of the event. This field may be empty if the event has not ended yet, otherwise there should be a valid timestamp after startTime.\nNOTE: When reporting an event, you typically call the report function twice, the first time for starting of the event and the second time for ending of the event, both with the same UUID. There are also cases where you would already have both the start time and end time. For example, when exporting events from a third-party system, the start time and end time are already known so you may simply call the report function once.\nCorrelation between events and metrics SkyWalking UI visualizes the events in the dashboard when the event service / instance / endpoint matches the displayed service / instance / endpoint.\nKnown Events    Name Type When Where     Start Normal When your Java Application starts with SkyWalking Agent installed, the Start Event will be created. Reported from SkyWalking agent.   Shutdown Normal When your Java Application stops with SkyWalking Agent installed, the Shutdown Event will be created. Reported from SkyWalking agent.   Alarm Error When the Alarm is triggered, the corresponding Alarm Event will is created. Reported from internal SkyWalking OAP.    The following events are all reported by Kubernetes Event Exporter, in order to see these events, please make sure you have deployed the exporter.\n   Name Type When Where     Killing Normal When the Kubernetes Pod is being killing. Reporter by Kubernetes Event Exporter.   Pulling Normal When a docker image is being pulled for deployment. Reporter by Kubernetes Event Exporter.   Pulled Normal When a docker image is pulled for deployment. Reporter by Kubernetes Event Exporter.   Created Normal When a container inside a Pod is created. Reporter by Kubernetes Event Exporter.   Started Normal When a container inside a Pod is started. Reporter by Kubernetes Event Exporter.   Unhealthy Error When the readiness probe failed. Reporter by Kubernetes Event Exporter.    The complete event lists can be found in the Kubernetes codebase, please note that not all the events are supported by the exporter for now.\n","title":"Events","url":"/docs/main/v9.3.0/en/concepts-and-designs/event/"},{"content":"Events SkyWalking already supports the three pillars of observability, namely logs, metrics, and traces. In reality, a production system experiences many other events that may affect the performance of the system, such as upgrading, rebooting, chaos testing, etc. Although some of these events are reflected in the logs, many others are not. Hence, SkyWalking provides a more native way to collect these events. This doc details how SkyWalking collects events and what events look like in SkyWalking.\nHow to Report Events The SkyWalking backend supports three protocols to collect events: gRPC, HTTP, and Kafka. Any agent or CLI that implements one of these protocols can report events to SkyWalking. Currently, the officially supported clients to report events are:\n Java Agent Toolkit: Using the Java agent toolkit to report events within the applications. SkyWalking CLI: Using the CLI to report events from the command line interface. Kubernetes Event Exporter: Deploying an event exporter to refine and report Kubernetes events.  Event Definitions An event contains the following fields. The definitions of event can be found at the protocol repo.\nUUID Unique ID of the event. Since an event may span a long period of time, the UUID is necessary to associate the start time with the end time of the same event.\nSource The source object on which the event occurs. In SkyWalking, the object is typically a service, service instance, etc.\nName Name of the event. For example, Start, Stop, Crash, Reboot, Upgrade, etc.\nType Type of the event. This field is friendly for UI visualization, where events of type Normal are considered normal operations, while Error is considered unexpected operations, such as Crash events. Marking them with different colors allows us to more easily identify them.\nMessage The detail of the event that describes why this event happened. This should be a one-line message that briefly describes why the event is reported. Examples of an Upgrade event may be something like Upgrade from ${from_version} to ${to_version}. It\u0026rsquo;s NOT recommended to include the detailed logs of this event, such as the exception stack trace.\nParameters The parameters in the message field. This is a simple \u0026lt;string,string\u0026gt; map.\nStart Time The start time of the event. This field is mandatory when an event occurs.\nEnd Time The end time of the event. This field may be empty if the event has not ended yet, otherwise there should be a valid timestamp after startTime.\nNOTE: When reporting an event, you typically call the report function twice, the first time for starting of the event and the second time for ending of the event, both with the same UUID. There are also cases where you would already have both the start time and end time. For example, when exporting events from a third-party system, the start time and end time are already known so you may simply call the report function once.\nCorrelation between events and metrics SkyWalking UI visualizes the events in the dashboard when the event service / instance / endpoint matches the displayed service / instance / endpoint.\nKnown Events    Name Type When Where     Start Normal When your Java Application starts with SkyWalking Agent installed, the Start Event will be created. Reported from SkyWalking agent.   Shutdown Normal When your Java Application stops with SkyWalking Agent installed, the Shutdown Event will be created. Reported from SkyWalking agent.   Alarm Error When the Alarm is triggered, the corresponding Alarm Event will is created. Reported from internal SkyWalking OAP.    The following events are all reported by Kubernetes Event Exporter, in order to see these events, please make sure you have deployed the exporter.\n   Name Type When Where     Killing Normal When the Kubernetes Pod is being killing. Reporter by Kubernetes Event Exporter.   Pulling Normal When a docker image is being pulled for deployment. Reporter by Kubernetes Event Exporter.   Pulled Normal When a docker image is pulled for deployment. Reporter by Kubernetes Event Exporter.   Created Normal When a container inside a Pod is created. Reporter by Kubernetes Event Exporter.   Started Normal When a container inside a Pod is started. Reporter by Kubernetes Event Exporter.   Unhealthy Error When the readiness probe failed. Reporter by Kubernetes Event Exporter.    The complete event lists can be found in the Kubernetes codebase, please note that not all the events are supported by the exporter for now.\n","title":"Events","url":"/docs/main/v9.4.0/en/concepts-and-designs/event/"},{"content":"Events SkyWalking already supports the three pillars of observability, namely logs, metrics, and traces. In reality, a production system experiences many other events that may affect the performance of the system, such as upgrading, rebooting, chaos testing, etc. Although some of these events are reflected in the logs, many others are not. Hence, SkyWalking provides a more native way to collect these events. This doc details how SkyWalking collects events and what events look like in SkyWalking.\nHow to Report Events The SkyWalking backend supports three protocols to collect events: gRPC, HTTP, and Kafka. Any agent or CLI that implements one of these protocols can report events to SkyWalking. Currently, the officially supported clients to report events are:\n Java Agent Toolkit: Using the Java agent toolkit to report events within the applications. SkyWalking CLI: Using the CLI to report events from the command line interface. Kubernetes Event Exporter: Deploying an event exporter to refine and report Kubernetes events.  Event Definitions An event contains the following fields. The definitions of event can be found at the protocol repo.\nUUID Unique ID of the event. Since an event may span a long period of time, the UUID is necessary to associate the start time with the end time of the same event.\nSource The source object on which the event occurs. In SkyWalking, the object is typically a service, service instance, etc.\nName Name of the event. For example, Start, Stop, Crash, Reboot, Upgrade, etc.\nType Type of the event. This field is friendly for UI visualization, where events of type Normal are considered normal operations, while Error is considered unexpected operations, such as Crash events. Marking them with different colors allows us to more easily identify them.\nMessage The detail of the event that describes why this event happened. This should be a one-line message that briefly describes why the event is reported. Examples of an Upgrade event may be something like Upgrade from ${from_version} to ${to_version}. It\u0026rsquo;s NOT recommended to include the detailed logs of this event, such as the exception stack trace.\nParameters The parameters in the message field. This is a simple \u0026lt;string,string\u0026gt; map.\nStart Time The start time of the event. This field is mandatory when an event occurs.\nEnd Time The end time of the event. This field may be empty if the event has not ended yet, otherwise there should be a valid timestamp after startTime.\nNOTE: When reporting an event, you typically call the report function twice, the first time for starting of the event and the second time for ending of the event, both with the same UUID. There are also cases where you would already have both the start time and end time. For example, when exporting events from a third-party system, the start time and end time are already known so you may simply call the report function once.\nCorrelation between events and metrics SkyWalking UI visualizes the events in the dashboard when the event service / instance / endpoint matches the displayed service / instance / endpoint.\nKnown Events    Name Type When Where     Start Normal When your Java Application starts with SkyWalking Agent installed, the Start Event will be created. Reported from SkyWalking agent.   Shutdown Normal When your Java Application stops with SkyWalking Agent installed, the Shutdown Event will be created. Reported from SkyWalking agent.   Alarm Error When the Alarm is triggered, the corresponding Alarm Event will is created. Reported from internal SkyWalking OAP.    The following events are all reported by Kubernetes Event Exporter, in order to see these events, please make sure you have deployed the exporter.\n   Name Type When Where     Killing Normal When the Kubernetes Pod is being killing. Reporter by Kubernetes Event Exporter.   Pulling Normal When a docker image is being pulled for deployment. Reporter by Kubernetes Event Exporter.   Pulled Normal When a docker image is pulled for deployment. Reporter by Kubernetes Event Exporter.   Created Normal When a container inside a Pod is created. Reporter by Kubernetes Event Exporter.   Started Normal When a container inside a Pod is started. Reporter by Kubernetes Event Exporter.   Unhealthy Error When the readiness probe failed. Reporter by Kubernetes Event Exporter.    The complete event lists can be found in the Kubernetes codebase, please note that not all the events are supported by the exporter for now.\n","title":"Events","url":"/docs/main/v9.5.0/en/concepts-and-designs/event/"},{"content":"Events SkyWalking already supports the three pillars of observability, namely logs, metrics, and traces. In reality, a production system experiences many other events that may affect the performance of the system, such as upgrading, rebooting, chaos testing, etc. Although some of these events are reflected in the logs, many others are not. Hence, SkyWalking provides a more native way to collect these events. This doc details how SkyWalking collects events and what events look like in SkyWalking.\nHow to Report Events The SkyWalking backend supports three protocols to collect events: gRPC, HTTP, and Kafka. Any agent or CLI that implements one of these protocols can report events to SkyWalking. Currently, the officially supported clients to report events are:\n Java Agent Toolkit: Using the Java agent toolkit to report events within the applications. SkyWalking CLI: Using the CLI to report events from the command line interface. Kubernetes Event Exporter: Deploying an event exporter to refine and report Kubernetes events.  Event Definitions An event contains the following fields. The definitions of event can be found at the protocol repo.\nUUID Unique ID of the event. Since an event may span a long period of time, the UUID is necessary to associate the start time with the end time of the same event.\nSource The source object on which the event occurs. In SkyWalking, the object is typically a service, service instance, etc.\nName Name of the event. For example, Start, Stop, Crash, Reboot, Upgrade, etc.\nType Type of the event. This field is friendly for UI visualization, where events of type Normal are considered normal operations, while Error is considered unexpected operations, such as Crash events. Marking them with different colors allows us to more easily identify them.\nMessage The detail of the event that describes why this event happened. This should be a one-line message that briefly describes why the event is reported. Examples of an Upgrade event may be something like Upgrade from ${from_version} to ${to_version}. It\u0026rsquo;s NOT recommended to include the detailed logs of this event, such as the exception stack trace.\nParameters The parameters in the message field. This is a simple \u0026lt;string,string\u0026gt; map.\nStart Time The start time of the event. This field is mandatory when an event occurs.\nEnd Time The end time of the event. This field may be empty if the event has not ended yet, otherwise there should be a valid timestamp after startTime.\nNOTE: When reporting an event, you typically call the report function twice, the first time for starting of the event and the second time for ending of the event, both with the same UUID. There are also cases where you would already have both the start time and end time. For example, when exporting events from a third-party system, the start time and end time are already known so you may simply call the report function once.\nCorrelation between events and metrics SkyWalking UI visualizes the events in the dashboard when the event service / instance / endpoint matches the displayed service / instance / endpoint.\nKnown Events    Name Type When Where     Start Normal When your Java Application starts with SkyWalking Agent installed, the Start Event will be created. Reported from SkyWalking agent.   Shutdown Normal When your Java Application stops with SkyWalking Agent installed, the Shutdown Event will be created. Reported from SkyWalking agent.   Alarm Error When the Alarm is triggered, the corresponding Alarm Event will is created. Reported from internal SkyWalking OAP.    The following events are all reported by Kubernetes Event Exporter, in order to see these events, please make sure you have deployed the exporter.\n   Name Type When Where     Killing Normal When the Kubernetes Pod is being killing. Reporter by Kubernetes Event Exporter.   Pulling Normal When a docker image is being pulled for deployment. Reporter by Kubernetes Event Exporter.   Pulled Normal When a docker image is pulled for deployment. Reporter by Kubernetes Event Exporter.   Created Normal When a container inside a Pod is created. Reporter by Kubernetes Event Exporter.   Started Normal When a container inside a Pod is started. Reporter by Kubernetes Event Exporter.   Unhealthy Error When the readiness probe failed. Reporter by Kubernetes Event Exporter.    The complete event lists can be found in the Kubernetes codebase, please note that not all the events are supported by the exporter for now.\n","title":"Events","url":"/docs/main/v9.6.0/en/concepts-and-designs/event/"},{"content":"Events SkyWalking already supports the three pillars of observability, namely logs, metrics, and traces. In reality, a production system experiences many other events that may affect the performance of the system, such as upgrading, rebooting, chaos testing, etc. Although some of these events are reflected in the logs, many others are not. Hence, SkyWalking provides a more native way to collect these events. This doc details how SkyWalking collects events and what events look like in SkyWalking.\nHow to Report Events The SkyWalking backend supports three protocols to collect events: gRPC, HTTP, and Kafka. Any agent or CLI that implements one of these protocols can report events to SkyWalking. Currently, the officially supported clients to report events are:\n Java Agent Toolkit: Using the Java agent toolkit to report events within the applications. SkyWalking CLI: Using the CLI to report events from the command line interface. Kubernetes Event Exporter: Deploying an event exporter to refine and report Kubernetes events.  Event Definitions An event contains the following fields. The definitions of event can be found at the protocol repo.\nUUID Unique ID of the event. Since an event may span a long period of time, the UUID is necessary to associate the start time with the end time of the same event.\nSource The source object on which the event occurs. In SkyWalking, the object is typically a service, service instance, etc.\nName Name of the event. For example, Start, Stop, Crash, Reboot, Upgrade, etc.\nType Type of the event. This field is friendly for UI visualization, where events of type Normal are considered normal operations, while Error is considered unexpected operations, such as Crash events. Marking them with different colors allows us to more easily identify them.\nMessage The detail of the event that describes why this event happened. This should be a one-line message that briefly describes why the event is reported. Examples of an Upgrade event may be something like Upgrade from ${from_version} to ${to_version}. It\u0026rsquo;s NOT recommended to include the detailed logs of this event, such as the exception stack trace.\nParameters The parameters in the message field. This is a simple \u0026lt;string,string\u0026gt; map.\nStart Time The start time of the event. This field is mandatory when an event occurs.\nEnd Time The end time of the event. This field may be empty if the event has not ended yet, otherwise there should be a valid timestamp after startTime.\nNOTE: When reporting an event, you typically call the report function twice, the first time for starting of the event and the second time for ending of the event, both with the same UUID. There are also cases where you would already have both the start time and end time. For example, when exporting events from a third-party system, the start time and end time are already known so you may simply call the report function once.\nCorrelation between events and metrics SkyWalking UI visualizes the events in the dashboard when the event service / instance / endpoint matches the displayed service / instance / endpoint.\nKnown Events    Name Type When Where     Start Normal When your Java Application starts with SkyWalking Agent installed, the Start Event will be created. Reported from SkyWalking agent.   Shutdown Normal When your Java Application stops with SkyWalking Agent installed, the Shutdown Event will be created. Reported from SkyWalking agent.   Alarm Error When the Alarm is triggered, the corresponding Alarm Event will is created. Reported from internal SkyWalking OAP.    The following events are all reported by Kubernetes Event Exporter, in order to see these events, please make sure you have deployed the exporter.\n   Name Type When Where     Killing Normal When the Kubernetes Pod is being killing. Reporter by Kubernetes Event Exporter.   Pulling Normal When a docker image is being pulled for deployment. Reporter by Kubernetes Event Exporter.   Pulled Normal When a docker image is pulled for deployment. Reporter by Kubernetes Event Exporter.   Created Normal When a container inside a Pod is created. Reporter by Kubernetes Event Exporter.   Started Normal When a container inside a Pod is started. Reporter by Kubernetes Event Exporter.   Unhealthy Error When the readiness probe failed. Reporter by Kubernetes Event Exporter.    The complete event lists can be found in the Kubernetes codebase, please note that not all the events are supported by the exporter for now.\n","title":"Events","url":"/docs/main/v9.7.0/en/concepts-and-designs/event/"},{"content":"Events Report Protocol The protocol is used to report events to the backend. The doc introduces the definition of an event, and the protocol repository defines gRPC services and message formats of events.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.event.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/event/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;service EventService { // When reporting an event, you typically call the collect function twice, one for starting of the event and the other one for ending of the event, with the same UUID.  // There are also cases where you have both start time and end time already, for example, when exporting events from a 3rd-party system,  // the start time and end time are already known so that you can call the collect function only once.  rpc collect (stream Event) returns (Commands) { }}message Event { // Unique ID of the event. Because an event may span a long period of time, the UUID is necessary to associate the  // start time with the end time of the same event.  string uuid = 1; // The source object that the event occurs on.  Source source = 2; // The name of the event. For example, `Reboot`, `Upgrade` etc.  string name = 3; // The type of the event. This field is friendly for UI visualization, where events of type `Normal` are considered as normal operations,  // while `Error` is considered as unexpected operations, such as `Crash` events, therefore we can mark them with different colors to be easier identified.  Type type = 4; // The detail of the event that describes why this event happened. This should be a one-line message that briefly describes why the event is reported.  // Examples of an `Upgrade` event may be something like `Upgrade from ${from_version} to ${to_version}`.  // It\u0026#39;s NOT encouraged to include the detailed logs of this event, such as the exception stack trace.  string message = 5; // The parameters in the `message` field.  map\u0026lt;string, string\u0026gt; parameters = 6; // The start time (in milliseconds) of the event, measured between the current time and midnight, January 1, 1970 UTC.  // This field is mandatory when an event occurs.  int64 startTime = 7; // The end time (in milliseconds) of the event. , measured between the current time and midnight, January 1, 1970 UTC.  // This field may be empty if the event has not stopped yet, otherwise it should be a valid timestamp after `startTime`.  int64 endTime = 8;  // [Required] Since 9.0.0  // Name of the layer to which the event belongs.  string layer = 9;}enum Type { Normal = 0; Error = 1;}// If the event occurs on a service ONLY, the `service` field is mandatory, the serviceInstance field and endpoint field are optional; // If the event occurs on a service instance, the `service` and `serviceInstance` are mandatory and endpoint is optional; // If the event occurs on an endpoint, `service` and `endpoint` are mandatory, `serviceInstance` is optional; message Source { string service = 1; string serviceInstance = 2; string endpoint = 3;}JSON format events can be reported via HTTP API. The endpoint is http://\u0026lt;oap-address\u0026gt;:12800/v3/events. Example of a JSON event record:\n[ { \u0026#34;uuid\u0026#34;: \u0026#34;f498b3c0-8bca-438d-a5b0-3701826ae21c\u0026#34;, \u0026#34;source\u0026#34;: { \u0026#34;service\u0026#34;: \u0026#34;SERVICE-A\u0026#34;, \u0026#34;instance\u0026#34;: \u0026#34;INSTANCE-1\u0026#34; }, \u0026#34;name\u0026#34;: \u0026#34;Reboot\u0026#34;, \u0026#34;type\u0026#34;: \u0026#34;Normal\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;App reboot.\u0026#34;, \u0026#34;parameters\u0026#34;: {}, \u0026#34;startTime\u0026#34;: 1628044330000, \u0026#34;endTime\u0026#34;: 1628044331000 } ] ","title":"Events Report Protocol","url":"/docs/main/latest/en/api/event/"},{"content":"Events Report Protocol The protocol is used to report events to the backend. The doc introduces the definition of an event, and the protocol repository defines gRPC services and message formats of events.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.event.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/event/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;service EventService { // When reporting an event, you typically call the collect function twice, one for starting of the event and the other one for ending of the event, with the same UUID.  // There are also cases where you have both start time and end time already, for example, when exporting events from a 3rd-party system,  // the start time and end time are already known so that you can call the collect function only once.  rpc collect (stream Event) returns (Commands) { }}message Event { // Unique ID of the event. Because an event may span a long period of time, the UUID is necessary to associate the  // start time with the end time of the same event.  string uuid = 1; // The source object that the event occurs on.  Source source = 2; // The name of the event. For example, `Reboot`, `Upgrade` etc.  string name = 3; // The type of the event. This field is friendly for UI visualization, where events of type `Normal` are considered as normal operations,  // while `Error` is considered as unexpected operations, such as `Crash` events, therefore we can mark them with different colors to be easier identified.  Type type = 4; // The detail of the event that describes why this event happened. This should be a one-line message that briefly describes why the event is reported.  // Examples of an `Upgrade` event may be something like `Upgrade from ${from_version} to ${to_version}`.  // It\u0026#39;s NOT encouraged to include the detailed logs of this event, such as the exception stack trace.  string message = 5; // The parameters in the `message` field.  map\u0026lt;string, string\u0026gt; parameters = 6; // The start time (in milliseconds) of the event, measured between the current time and midnight, January 1, 1970 UTC.  // This field is mandatory when an event occurs.  int64 startTime = 7; // The end time (in milliseconds) of the event. , measured between the current time and midnight, January 1, 1970 UTC.  // This field may be empty if the event has not stopped yet, otherwise it should be a valid timestamp after `startTime`.  int64 endTime = 8;  // [Required] Since 9.0.0  // Name of the layer to which the event belongs.  string layer = 9;}enum Type { Normal = 0; Error = 1;}// If the event occurs on a service ONLY, the `service` field is mandatory, the serviceInstance field and endpoint field are optional; // If the event occurs on a service instance, the `service` and `serviceInstance` are mandatory and endpoint is optional; // If the event occurs on an endpoint, `service` and `endpoint` are mandatory, `serviceInstance` is optional; message Source { string service = 1; string serviceInstance = 2; string endpoint = 3;}JSON format events can be reported via HTTP API. The endpoint is http://\u0026lt;oap-address\u0026gt;:12800/v3/events. Example of a JSON event record:\n[ { \u0026#34;uuid\u0026#34;: \u0026#34;f498b3c0-8bca-438d-a5b0-3701826ae21c\u0026#34;, \u0026#34;source\u0026#34;: { \u0026#34;service\u0026#34;: \u0026#34;SERVICE-A\u0026#34;, \u0026#34;instance\u0026#34;: \u0026#34;INSTANCE-1\u0026#34; }, \u0026#34;name\u0026#34;: \u0026#34;Reboot\u0026#34;, \u0026#34;type\u0026#34;: \u0026#34;Normal\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;App reboot.\u0026#34;, \u0026#34;parameters\u0026#34;: {}, \u0026#34;startTime\u0026#34;: 1628044330000, \u0026#34;endTime\u0026#34;: 1628044331000 } ] ","title":"Events Report Protocol","url":"/docs/main/next/en/api/event/"},{"content":"Events Report Protocol The protocol is used to report events to the backend. The doc introduces the definition of an event, and the protocol repository defines gRPC services and message formats of events.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.event.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/event/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;service EventService { // When reporting an event, you typically call the collect function twice, one for starting of the event and the other one for ending of the event, with the same UUID.  // There are also cases where you have both start time and end time already, for example, when exporting events from a 3rd-party system,  // the start time and end time are already known so that you can call the collect function only once.  rpc collect (stream Event) returns (Commands) { }}message Event { // Unique ID of the event. Because an event may span a long period of time, the UUID is necessary to associate the  // start time with the end time of the same event.  string uuid = 1; // The source object that the event occurs on.  Source source = 2; // The name of the event. For example, `Reboot`, `Upgrade` etc.  string name = 3; // The type of the event. This field is friendly for UI visualization, where events of type `Normal` are considered as normal operations,  // while `Error` is considered as unexpected operations, such as `Crash` events, therefore we can mark them with different colors to be easier identified.  Type type = 4; // The detail of the event that describes why this event happened. This should be a one-line message that briefly describes why the event is reported.  // Examples of an `Upgrade` event may be something like `Upgrade from ${from_version} to ${to_version}`.  // It\u0026#39;s NOT encouraged to include the detailed logs of this event, such as the exception stack trace.  string message = 5; // The parameters in the `message` field.  map\u0026lt;string, string\u0026gt; parameters = 6; // The start time (in milliseconds) of the event, measured between the current time and midnight, January 1, 1970 UTC.  // This field is mandatory when an event occurs.  int64 startTime = 7; // The end time (in milliseconds) of the event. , measured between the current time and midnight, January 1, 1970 UTC.  // This field may be empty if the event has not stopped yet, otherwise it should be a valid timestamp after `startTime`.  int64 endTime = 8;  // [Required] Since 9.0.0  // Name of the layer to which the event belongs.  string layer = 9;}enum Type { Normal = 0; Error = 1;}// If the event occurs on a service ONLY, the `service` field is mandatory, the serviceInstance field and endpoint field are optional; // If the event occurs on a service instance, the `service` and `serviceInstance` are mandatory and endpoint is optional; // If the event occurs on an endpoint, `service` and `endpoint` are mandatory, `serviceInstance` is optional; message Source { string service = 1; string serviceInstance = 2; string endpoint = 3;}JSON format events can be reported via HTTP API. The endpoint is http://\u0026lt;oap-address\u0026gt;:12800/v3/events. Example of a JSON event record:\n[ { \u0026#34;uuid\u0026#34;: \u0026#34;f498b3c0-8bca-438d-a5b0-3701826ae21c\u0026#34;, \u0026#34;source\u0026#34;: { \u0026#34;service\u0026#34;: \u0026#34;SERVICE-A\u0026#34;, \u0026#34;instance\u0026#34;: \u0026#34;INSTANCE-1\u0026#34; }, \u0026#34;name\u0026#34;: \u0026#34;Reboot\u0026#34;, \u0026#34;type\u0026#34;: \u0026#34;Normal\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;App reboot.\u0026#34;, \u0026#34;parameters\u0026#34;: {}, \u0026#34;startTime\u0026#34;: 1628044330000, \u0026#34;endTime\u0026#34;: 1628044331000 } ] ","title":"Events Report Protocol","url":"/docs/main/v9.4.0/en/api/event/"},{"content":"Events Report Protocol The protocol is used to report events to the backend. The doc introduces the definition of an event, and the protocol repository defines gRPC services and message formats of events.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.event.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/event/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;service EventService { // When reporting an event, you typically call the collect function twice, one for starting of the event and the other one for ending of the event, with the same UUID.  // There are also cases where you have both start time and end time already, for example, when exporting events from a 3rd-party system,  // the start time and end time are already known so that you can call the collect function only once.  rpc collect (stream Event) returns (Commands) { }}message Event { // Unique ID of the event. Because an event may span a long period of time, the UUID is necessary to associate the  // start time with the end time of the same event.  string uuid = 1; // The source object that the event occurs on.  Source source = 2; // The name of the event. For example, `Reboot`, `Upgrade` etc.  string name = 3; // The type of the event. This field is friendly for UI visualization, where events of type `Normal` are considered as normal operations,  // while `Error` is considered as unexpected operations, such as `Crash` events, therefore we can mark them with different colors to be easier identified.  Type type = 4; // The detail of the event that describes why this event happened. This should be a one-line message that briefly describes why the event is reported.  // Examples of an `Upgrade` event may be something like `Upgrade from ${from_version} to ${to_version}`.  // It\u0026#39;s NOT encouraged to include the detailed logs of this event, such as the exception stack trace.  string message = 5; // The parameters in the `message` field.  map\u0026lt;string, string\u0026gt; parameters = 6; // The start time (in milliseconds) of the event, measured between the current time and midnight, January 1, 1970 UTC.  // This field is mandatory when an event occurs.  int64 startTime = 7; // The end time (in milliseconds) of the event. , measured between the current time and midnight, January 1, 1970 UTC.  // This field may be empty if the event has not stopped yet, otherwise it should be a valid timestamp after `startTime`.  int64 endTime = 8;  // [Required] Since 9.0.0  // Name of the layer to which the event belongs.  string layer = 9;}enum Type { Normal = 0; Error = 1;}// If the event occurs on a service ONLY, the `service` field is mandatory, the serviceInstance field and endpoint field are optional; // If the event occurs on a service instance, the `service` and `serviceInstance` are mandatory and endpoint is optional; // If the event occurs on an endpoint, `service` and `endpoint` are mandatory, `serviceInstance` is optional; message Source { string service = 1; string serviceInstance = 2; string endpoint = 3;}JSON format events can be reported via HTTP API. The endpoint is http://\u0026lt;oap-address\u0026gt;:12800/v3/events. Example of a JSON event record:\n[ { \u0026#34;uuid\u0026#34;: \u0026#34;f498b3c0-8bca-438d-a5b0-3701826ae21c\u0026#34;, \u0026#34;source\u0026#34;: { \u0026#34;service\u0026#34;: \u0026#34;SERVICE-A\u0026#34;, \u0026#34;instance\u0026#34;: \u0026#34;INSTANCE-1\u0026#34; }, \u0026#34;name\u0026#34;: \u0026#34;Reboot\u0026#34;, \u0026#34;type\u0026#34;: \u0026#34;Normal\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;App reboot.\u0026#34;, \u0026#34;parameters\u0026#34;: {}, \u0026#34;startTime\u0026#34;: 1628044330000, \u0026#34;endTime\u0026#34;: 1628044331000 } ] ","title":"Events Report Protocol","url":"/docs/main/v9.5.0/en/api/event/"},{"content":"Events Report Protocol The protocol is used to report events to the backend. The doc introduces the definition of an event, and the protocol repository defines gRPC services and message formats of events.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.event.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/event/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;service EventService { // When reporting an event, you typically call the collect function twice, one for starting of the event and the other one for ending of the event, with the same UUID.  // There are also cases where you have both start time and end time already, for example, when exporting events from a 3rd-party system,  // the start time and end time are already known so that you can call the collect function only once.  rpc collect (stream Event) returns (Commands) { }}message Event { // Unique ID of the event. Because an event may span a long period of time, the UUID is necessary to associate the  // start time with the end time of the same event.  string uuid = 1; // The source object that the event occurs on.  Source source = 2; // The name of the event. For example, `Reboot`, `Upgrade` etc.  string name = 3; // The type of the event. This field is friendly for UI visualization, where events of type `Normal` are considered as normal operations,  // while `Error` is considered as unexpected operations, such as `Crash` events, therefore we can mark them with different colors to be easier identified.  Type type = 4; // The detail of the event that describes why this event happened. This should be a one-line message that briefly describes why the event is reported.  // Examples of an `Upgrade` event may be something like `Upgrade from ${from_version} to ${to_version}`.  // It\u0026#39;s NOT encouraged to include the detailed logs of this event, such as the exception stack trace.  string message = 5; // The parameters in the `message` field.  map\u0026lt;string, string\u0026gt; parameters = 6; // The start time (in milliseconds) of the event, measured between the current time and midnight, January 1, 1970 UTC.  // This field is mandatory when an event occurs.  int64 startTime = 7; // The end time (in milliseconds) of the event. , measured between the current time and midnight, January 1, 1970 UTC.  // This field may be empty if the event has not stopped yet, otherwise it should be a valid timestamp after `startTime`.  int64 endTime = 8;  // [Required] Since 9.0.0  // Name of the layer to which the event belongs.  string layer = 9;}enum Type { Normal = 0; Error = 1;}// If the event occurs on a service ONLY, the `service` field is mandatory, the serviceInstance field and endpoint field are optional; // If the event occurs on a service instance, the `service` and `serviceInstance` are mandatory and endpoint is optional; // If the event occurs on an endpoint, `service` and `endpoint` are mandatory, `serviceInstance` is optional; message Source { string service = 1; string serviceInstance = 2; string endpoint = 3;}JSON format events can be reported via HTTP API. The endpoint is http://\u0026lt;oap-address\u0026gt;:12800/v3/events. Example of a JSON event record:\n[ { \u0026#34;uuid\u0026#34;: \u0026#34;f498b3c0-8bca-438d-a5b0-3701826ae21c\u0026#34;, \u0026#34;source\u0026#34;: { \u0026#34;service\u0026#34;: \u0026#34;SERVICE-A\u0026#34;, \u0026#34;instance\u0026#34;: \u0026#34;INSTANCE-1\u0026#34; }, \u0026#34;name\u0026#34;: \u0026#34;Reboot\u0026#34;, \u0026#34;type\u0026#34;: \u0026#34;Normal\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;App reboot.\u0026#34;, \u0026#34;parameters\u0026#34;: {}, \u0026#34;startTime\u0026#34;: 1628044330000, \u0026#34;endTime\u0026#34;: 1628044331000 } ] ","title":"Events Report Protocol","url":"/docs/main/v9.6.0/en/api/event/"},{"content":"Events Report Protocol The protocol is used to report events to the backend. The doc introduces the definition of an event, and the protocol repository defines gRPC services and message formats of events.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.event.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/event/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;service EventService { // When reporting an event, you typically call the collect function twice, one for starting of the event and the other one for ending of the event, with the same UUID.  // There are also cases where you have both start time and end time already, for example, when exporting events from a 3rd-party system,  // the start time and end time are already known so that you can call the collect function only once.  rpc collect (stream Event) returns (Commands) { }}message Event { // Unique ID of the event. Because an event may span a long period of time, the UUID is necessary to associate the  // start time with the end time of the same event.  string uuid = 1; // The source object that the event occurs on.  Source source = 2; // The name of the event. For example, `Reboot`, `Upgrade` etc.  string name = 3; // The type of the event. This field is friendly for UI visualization, where events of type `Normal` are considered as normal operations,  // while `Error` is considered as unexpected operations, such as `Crash` events, therefore we can mark them with different colors to be easier identified.  Type type = 4; // The detail of the event that describes why this event happened. This should be a one-line message that briefly describes why the event is reported.  // Examples of an `Upgrade` event may be something like `Upgrade from ${from_version} to ${to_version}`.  // It\u0026#39;s NOT encouraged to include the detailed logs of this event, such as the exception stack trace.  string message = 5; // The parameters in the `message` field.  map\u0026lt;string, string\u0026gt; parameters = 6; // The start time (in milliseconds) of the event, measured between the current time and midnight, January 1, 1970 UTC.  // This field is mandatory when an event occurs.  int64 startTime = 7; // The end time (in milliseconds) of the event. , measured between the current time and midnight, January 1, 1970 UTC.  // This field may be empty if the event has not stopped yet, otherwise it should be a valid timestamp after `startTime`.  int64 endTime = 8;  // [Required] Since 9.0.0  // Name of the layer to which the event belongs.  string layer = 9;}enum Type { Normal = 0; Error = 1;}// If the event occurs on a service ONLY, the `service` field is mandatory, the serviceInstance field and endpoint field are optional; // If the event occurs on a service instance, the `service` and `serviceInstance` are mandatory and endpoint is optional; // If the event occurs on an endpoint, `service` and `endpoint` are mandatory, `serviceInstance` is optional; message Source { string service = 1; string serviceInstance = 2; string endpoint = 3;}JSON format events can be reported via HTTP API. The endpoint is http://\u0026lt;oap-address\u0026gt;:12800/v3/events. Example of a JSON event record:\n[ { \u0026#34;uuid\u0026#34;: \u0026#34;f498b3c0-8bca-438d-a5b0-3701826ae21c\u0026#34;, \u0026#34;source\u0026#34;: { \u0026#34;service\u0026#34;: \u0026#34;SERVICE-A\u0026#34;, \u0026#34;instance\u0026#34;: \u0026#34;INSTANCE-1\u0026#34; }, \u0026#34;name\u0026#34;: \u0026#34;Reboot\u0026#34;, \u0026#34;type\u0026#34;: \u0026#34;Normal\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;App reboot.\u0026#34;, \u0026#34;parameters\u0026#34;: {}, \u0026#34;startTime\u0026#34;: 1628044330000, \u0026#34;endTime\u0026#34;: 1628044331000 } ] ","title":"Events Report Protocol","url":"/docs/main/v9.7.0/en/api/event/"},{"content":"Exporter SkyWalking provides the essential functions of observability, including metrics aggregation, trace, log, alerting, and profiling. In many real-world scenarios, users may want to forward their data to a 3rd party system for further in-depth analysis. Exporter has made that possible.\nThe exporter is an independent module that has to be manually activated.\nRight now, we provide the following exporting channels:\n gRPC Exporter   Metrics  Kafka Exporter   Trace Log  gRPC Exporter Metrics gRPC Exporter Metrics gRPC exporter uses SkyWalking\u0026rsquo;s native export service definition. Here is the proto definition: metric-exporter.proto.\nservice MetricExportService { rpc export (stream ExportMetricValue) returns (ExportResponse) { } rpc subscription (SubscriptionReq) returns (SubscriptionsResp) { }}To activate the exporter, you should set ${SW_EXPORTER_ENABLE_GRPC_METRICS:true} and config the target gRPC server address.\nexporter:default:# gRPC exporterenableGRPCMetrics:${SW_EXPORTER_ENABLE_GRPC_METRICS:true}gRPCTargetHost:${SW_EXPORTER_GRPC_HOST:127.0.0.1}gRPCTargetPort:${SW_EXPORTER_GRPC_PORT:9870}... gRPCTargetHost:gRPCTargetPort is the expected target service address. You could set any gRPC server to receive the data. Target gRPC service needs to go on standby; otherwise, the OAP startup may fail.  Target exporter service   Subscription implementation. Return the expected metrics name list with event type (incremental or total). All names must match the OAL/MAL script definition. Return empty list, if you want to export all metrics in the incremental event type.\n  Export implementation. Stream service. All subscribed metrics will be sent here based on the OAP core schedule. Also, if the OAP is deployed as a cluster, this method will be called concurrently. For metrics value, you need to follow #type to choose #longValue or #doubleValue.\n  Kafka Exporter Trace Kafka Exporter Trace kafka exporter pushes messages to the Kafka Broker and Topic skywalking-export-trace to export the trace. Here is the message:\nProducerRecord\u0026lt;String, Bytes\u0026gt; Key: TraceSegmentId Value: Bytes of SegmentObject The SegmentObject definition follows the protocol: SkyWalking data collect protocol#Tracing.proto.\n// The segment is a collection of spans. It includes all collected spans in a simple one request context, such as a HTTP request process. message SegmentObject { string traceId = 1; string traceSegmentId = 2; repeated SpanObject spans = 3; string service = 4; string serviceInstance = 5; bool isSizeLimited = 6;}To activate the exporter, you should set ${SW_EXPORTER_ENABLE_KAFKA_TRACE:true} and config the Kafka server.\nexporter:default:# Kafka exporterenableKafkaTrace:${SW_EXPORTER_ENABLE_KAFKA_TRACE:true}kafkaBootstrapServers:${SW_EXPORTER_KAFKA_SERVERS:localhost:9092}# Kafka producer config, JSON format as Properties.kafkaProducerConfig:${SW_EXPORTER_KAFKA_PRODUCER_CONFIG:\u0026#34;\u0026#34;}kafkaTopicTrace:${SW_EXPORTER_KAFKA_TOPIC_TRACE:skywalking-export-trace}exportErrorStatusTraceOnly:${SW_EXPORTER_KAFKA_TRACE_FILTER_ERROR:false}... exportErrorStatusTraceOnly=true represents that only export the error status trace segments through the Kafka channel.  Log Kafka Exporter Log kafka exporter pushes messages to the Kafka Broker and Topic skywalking-export-log to export the log. Here is the message:\nProducerRecord\u0026lt;String, Bytes\u0026gt; Key: LogRecordId Value: Bytes of LogData The LogData definition follows the protocol: SkyWalking data collect protocol#Logging.proto.\nmessage LogData { int64 timestamp = 1; string service = 2; string serviceInstance = 3; string endpoint = 4; LogDataBody body = 5; TraceContext traceContext = 6; LogTags tags = 7; string layer = 8;}To activate the exporter, you should set ${SW_EXPORTER_ENABLE_KAFKA_LOG:true} and config the Kafka server.\nexporter:default:# Kafka exporterenableKafkaLog:${SW_EXPORTER_ENABLE_KAFKA_LOG:true}kafkaBootstrapServers:${SW_EXPORTER_KAFKA_SERVERS:localhost:9092}# Kafka producer config, JSON format as Properties.kafkaProducerConfig:${SW_EXPORTER_KAFKA_PRODUCER_CONFIG:\u0026#34;\u0026#34;}kafkaTopicLog:${SW_EXPORTER_KAFKA_TOPIC_LOG:skywalking-export-log}...","title":"Exporter","url":"/docs/main/latest/en/setup/backend/exporter/"},{"content":"Exporter SkyWalking provides the essential functions of observability, including metrics aggregation, trace, log, alerting, and profiling. In many real-world scenarios, users may want to forward their data to a 3rd party system for further in-depth analysis. Exporter has made that possible.\nThe exporter is an independent module that has to be manually activated.\nRight now, we provide the following exporting channels:\n gRPC Exporter   Metrics  Kafka Exporter   Trace Log  gRPC Exporter Metrics gRPC Exporter Metrics gRPC exporter uses SkyWalking\u0026rsquo;s native export service definition. Here is the proto definition: metric-exporter.proto.\nservice MetricExportService { rpc export (stream ExportMetricValue) returns (ExportResponse) { } rpc subscription (SubscriptionReq) returns (SubscriptionsResp) { }}To activate the exporter, you should set ${SW_EXPORTER:default} and ${SW_EXPORTER_ENABLE_GRPC_METRICS:true}, configure the target gRPC server address.\nexporter:selector:${SW_EXPORTER:default}default:# gRPC exporterenableGRPCMetrics:${SW_EXPORTER_ENABLE_GRPC_METRICS:true}gRPCTargetHost:${SW_EXPORTER_GRPC_HOST:127.0.0.1}gRPCTargetPort:${SW_EXPORTER_GRPC_PORT:9870}... gRPCTargetHost:gRPCTargetPort is the expected target service address. You could set any gRPC server to receive the data. Target gRPC service needs to go on standby; otherwise, the OAP startup may fail.  Target exporter service   Subscription implementation. Return the expected metrics name list with event type (incremental or total). All names must match the OAL/MAL script definition. Return empty list, if you want to export all metrics in the incremental event type.\n  Export implementation. Stream service. All subscribed metrics will be sent here based on the OAP core schedule. Also, if the OAP is deployed as a cluster, this method will be called concurrently.\n  Kafka Exporter Trace Kafka Exporter Trace kafka exporter pushes messages to the Kafka Broker and Topic skywalking-export-trace to export the trace. Here is the message:\nProducerRecord\u0026lt;String, Bytes\u0026gt; Key: TraceSegmentId Value: Bytes of SegmentObject The SegmentObject definition follows the protocol: SkyWalking data collect protocol#Tracing.proto.\n// The segment is a collection of spans. It includes all collected spans in a simple one request context, such as a HTTP request process. message SegmentObject { string traceId = 1; string traceSegmentId = 2; repeated SpanObject spans = 3; string service = 4; string serviceInstance = 5; bool isSizeLimited = 6;}To activate the exporter, you should set ${SW_EXPORTER:default} and ${SW_EXPORTER_ENABLE_KAFKA_TRACE:true}, configure the Kafka server addresses.\nexporter:selector:${SW_EXPORTER:default}default:# Kafka exporterenableKafkaTrace:${SW_EXPORTER_ENABLE_KAFKA_TRACE:true}kafkaBootstrapServers:${SW_EXPORTER_KAFKA_SERVERS:localhost:9092}# Kafka producer config, JSON format as Properties.kafkaProducerConfig:${SW_EXPORTER_KAFKA_PRODUCER_CONFIG:\u0026#34;\u0026#34;}kafkaTopicTrace:${SW_EXPORTER_KAFKA_TOPIC_TRACE:skywalking-export-trace}exportErrorStatusTraceOnly:${SW_EXPORTER_KAFKA_TRACE_FILTER_ERROR:false}... exportErrorStatusTraceOnly=true represents that only export the error status trace segments through the Kafka channel.  Log Kafka Exporter Log kafka exporter pushes messages to the Kafka Broker and Topic skywalking-export-log to export the log. Here is the message:\nProducerRecord\u0026lt;String, Bytes\u0026gt; Key: LogRecordId Value: Bytes of LogData The LogData definition follows the protocol: SkyWalking data collect protocol#Logging.proto.\nmessage LogData { int64 timestamp = 1; string service = 2; string serviceInstance = 3; string endpoint = 4; LogDataBody body = 5; TraceContext traceContext = 6; LogTags tags = 7; string layer = 8;}To activate the exporter, you should set ${SW_EXPORTER:default} and ${SW_EXPORTER_ENABLE_KAFKA_LOG:true}, configure the Kafka server addresses.\nexporter:selector:${SW_EXPORTER:default}default:# Kafka exporterenableKafkaLog:${SW_EXPORTER_ENABLE_KAFKA_LOG:true}kafkaBootstrapServers:${SW_EXPORTER_KAFKA_SERVERS:localhost:9092}# Kafka producer config, JSON format as Properties.kafkaProducerConfig:${SW_EXPORTER_KAFKA_PRODUCER_CONFIG:\u0026#34;\u0026#34;}kafkaTopicLog:${SW_EXPORTER_KAFKA_TOPIC_LOG:skywalking-export-log}...","title":"Exporter","url":"/docs/main/next/en/setup/backend/exporter/"},{"content":"Exporter SkyWalking provides the essential functions of observability, including metrics aggregation, trace, log, alerting, and profiling. In many real-world scenarios, users may want to forward their data to a 3rd party system for further in-depth analysis. Exporter has made that possible.\nThe exporter is an independent module that has to be manually activated.\nRight now, we provide the following exporting channels:\n gRPC Exporter   Metrics   Kafka Exporter   Trace Log  gRPC Exporter Metrics gRPC Exporter Metrics gRPC exporter uses SkyWalking\u0026rsquo;s native export service definition. Here is the proto definition: metric-exporter.proto.\nservice MetricExportService { rpc export (stream ExportMetricValue) returns (ExportResponse) { } rpc subscription (SubscriptionReq) returns (SubscriptionsResp) { }}To activate the exporter, you should set ${SW_EXPORTER_ENABLE_GRPC_METRICS:true} and config the target gRPC server address.\nexporter:default:# gRPC exporterenableGRPCMetrics:${SW_EXPORTER_ENABLE_GRPC_METRICS:true}gRPCTargetHost:${SW_EXPORTER_GRPC_HOST:127.0.0.1}gRPCTargetPort:${SW_EXPORTER_GRPC_PORT:9870}... gRPCTargetHost:gRPCTargetPort is the expected target service address. You could set any gRPC server to receive the data. Target gRPC service needs to go on standby; otherwise, the OAP startup may fail.  Target exporter service   Subscription implementation. Return the expected metrics name list with event type (incremental or total). All names must match the OAL/MAL script definition. Return empty list, if you want to export all metrics in the incremental event type.\n  Export implementation. Stream service. All subscribed metrics will be sent here based on the OAP core schedule. Also, if the OAP is deployed as a cluster, this method will be called concurrently. For metrics value, you need to follow #type to choose #longValue or #doubleValue.\n  Kafka Exporter Trace Kafka Exporter Trace kafka exporter pushes messages to the Kafka Broker and Topic skywalking-export-trace to export the trace. Here is the message:\nProducerRecord\u0026lt;String, Bytes\u0026gt; Key: TraceSegmentId Value: Bytes of SegmentObject The SegmentObject definition follows the protocol: SkyWalking data collect protocol#Tracing.proto.\n// The segment is a collection of spans. It includes all collected spans in a simple one request context, such as a HTTP request process. message SegmentObject { string traceId = 1; string traceSegmentId = 2; repeated SpanObject spans = 3; string service = 4; string serviceInstance = 5; bool isSizeLimited = 6;}To activate the exporter, you should set ${SW_EXPORTER_ENABLE_KAFKA_TRACE:true} and config the Kafka server.\nexporter:default:# Kafka exporterenableKafkaTrace:${SW_EXPORTER_ENABLE_KAFKA_TRACE:true}kafkaBootstrapServers:${SW_EXPORTER_KAFKA_SERVERS:localhost:9092}# Kafka producer config, JSON format as Properties.kafkaProducerConfig:${SW_EXPORTER_KAFKA_PRODUCER_CONFIG:\u0026#34;\u0026#34;}kafkaTopicTrace:${SW_EXPORTER_KAFKA_TOPIC_TRACE:skywalking-export-trace}...Log Kafka Exporter Log kafka exporter pushes messages to the Kafka Broker and Topic skywalking-export-log to export the log. Here is the message:\nProducerRecord\u0026lt;String, Bytes\u0026gt; Key: LogRecordId Value: Bytes of LogData The LogData definition follows the protocol: SkyWalking data collect protocol#Logging.proto.\nmessage LogData { int64 timestamp = 1; string service = 2; string serviceInstance = 3; string endpoint = 4; LogDataBody body = 5; TraceContext traceContext = 6; LogTags tags = 7; string layer = 8;}To activate the exporter, you should set ${SW_EXPORTER_ENABLE_KAFKA_LOG:true} and config the Kafka server.\nexporter:default:# Kafka exporterenableKafkaLog:${SW_EXPORTER_ENABLE_KAFKA_LOG:true}kafkaBootstrapServers:${SW_EXPORTER_KAFKA_SERVERS:localhost:9092}# Kafka producer config, JSON format as Properties.kafkaProducerConfig:${SW_EXPORTER_KAFKA_PRODUCER_CONFIG:\u0026#34;\u0026#34;}kafkaTopicLog:${SW_EXPORTER_KAFKA_TOPIC_LOG:skywalking-export-log}...","title":"Exporter","url":"/docs/main/v9.3.0/en/setup/backend/exporter/"},{"content":"Exporter SkyWalking provides the essential functions of observability, including metrics aggregation, trace, log, alerting, and profiling. In many real-world scenarios, users may want to forward their data to a 3rd party system for further in-depth analysis. Exporter has made that possible.\nThe exporter is an independent module that has to be manually activated.\nRight now, we provide the following exporting channels:\n gRPC Exporter   Metrics   Kafka Exporter   Trace Log  gRPC Exporter Metrics gRPC Exporter Metrics gRPC exporter uses SkyWalking\u0026rsquo;s native export service definition. Here is the proto definition: metric-exporter.proto.\nservice MetricExportService { rpc export (stream ExportMetricValue) returns (ExportResponse) { } rpc subscription (SubscriptionReq) returns (SubscriptionsResp) { }}To activate the exporter, you should set ${SW_EXPORTER_ENABLE_GRPC_METRICS:true} and config the target gRPC server address.\nexporter:default:# gRPC exporterenableGRPCMetrics:${SW_EXPORTER_ENABLE_GRPC_METRICS:true}gRPCTargetHost:${SW_EXPORTER_GRPC_HOST:127.0.0.1}gRPCTargetPort:${SW_EXPORTER_GRPC_PORT:9870}... gRPCTargetHost:gRPCTargetPort is the expected target service address. You could set any gRPC server to receive the data. Target gRPC service needs to go on standby; otherwise, the OAP startup may fail.  Target exporter service   Subscription implementation. Return the expected metrics name list with event type (incremental or total). All names must match the OAL/MAL script definition. Return empty list, if you want to export all metrics in the incremental event type.\n  Export implementation. Stream service. All subscribed metrics will be sent here based on the OAP core schedule. Also, if the OAP is deployed as a cluster, this method will be called concurrently. For metrics value, you need to follow #type to choose #longValue or #doubleValue.\n  Kafka Exporter Trace Kafka Exporter Trace kafka exporter pushes messages to the Kafka Broker and Topic skywalking-export-trace to export the trace. Here is the message:\nProducerRecord\u0026lt;String, Bytes\u0026gt; Key: TraceSegmentId Value: Bytes of SegmentObject The SegmentObject definition follows the protocol: SkyWalking data collect protocol#Tracing.proto.\n// The segment is a collection of spans. It includes all collected spans in a simple one request context, such as a HTTP request process. message SegmentObject { string traceId = 1; string traceSegmentId = 2; repeated SpanObject spans = 3; string service = 4; string serviceInstance = 5; bool isSizeLimited = 6;}To activate the exporter, you should set ${SW_EXPORTER_ENABLE_KAFKA_TRACE:true} and config the Kafka server.\nexporter:default:# Kafka exporterenableKafkaTrace:${SW_EXPORTER_ENABLE_KAFKA_TRACE:true}kafkaBootstrapServers:${SW_EXPORTER_KAFKA_SERVERS:localhost:9092}# Kafka producer config, JSON format as Properties.kafkaProducerConfig:${SW_EXPORTER_KAFKA_PRODUCER_CONFIG:\u0026#34;\u0026#34;}kafkaTopicTrace:${SW_EXPORTER_KAFKA_TOPIC_TRACE:skywalking-export-trace}...Log Kafka Exporter Log kafka exporter pushes messages to the Kafka Broker and Topic skywalking-export-log to export the log. Here is the message:\nProducerRecord\u0026lt;String, Bytes\u0026gt; Key: LogRecordId Value: Bytes of LogData The LogData definition follows the protocol: SkyWalking data collect protocol#Logging.proto.\nmessage LogData { int64 timestamp = 1; string service = 2; string serviceInstance = 3; string endpoint = 4; LogDataBody body = 5; TraceContext traceContext = 6; LogTags tags = 7; string layer = 8;}To activate the exporter, you should set ${SW_EXPORTER_ENABLE_KAFKA_LOG:true} and config the Kafka server.\nexporter:default:# Kafka exporterenableKafkaLog:${SW_EXPORTER_ENABLE_KAFKA_LOG:true}kafkaBootstrapServers:${SW_EXPORTER_KAFKA_SERVERS:localhost:9092}# Kafka producer config, JSON format as Properties.kafkaProducerConfig:${SW_EXPORTER_KAFKA_PRODUCER_CONFIG:\u0026#34;\u0026#34;}kafkaTopicLog:${SW_EXPORTER_KAFKA_TOPIC_LOG:skywalking-export-log}...","title":"Exporter","url":"/docs/main/v9.4.0/en/setup/backend/exporter/"},{"content":"Exporter SkyWalking provides the essential functions of observability, including metrics aggregation, trace, log, alerting, and profiling. In many real-world scenarios, users may want to forward their data to a 3rd party system for further in-depth analysis. Exporter has made that possible.\nThe exporter is an independent module that has to be manually activated.\nRight now, we provide the following exporting channels:\n gRPC Exporter   Metrics  Kafka Exporter   Trace Log  gRPC Exporter Metrics gRPC Exporter Metrics gRPC exporter uses SkyWalking\u0026rsquo;s native export service definition. Here is the proto definition: metric-exporter.proto.\nservice MetricExportService { rpc export (stream ExportMetricValue) returns (ExportResponse) { } rpc subscription (SubscriptionReq) returns (SubscriptionsResp) { }}To activate the exporter, you should set ${SW_EXPORTER_ENABLE_GRPC_METRICS:true} and config the target gRPC server address.\nexporter:default:# gRPC exporterenableGRPCMetrics:${SW_EXPORTER_ENABLE_GRPC_METRICS:true}gRPCTargetHost:${SW_EXPORTER_GRPC_HOST:127.0.0.1}gRPCTargetPort:${SW_EXPORTER_GRPC_PORT:9870}... gRPCTargetHost:gRPCTargetPort is the expected target service address. You could set any gRPC server to receive the data. Target gRPC service needs to go on standby; otherwise, the OAP startup may fail.  Target exporter service   Subscription implementation. Return the expected metrics name list with event type (incremental or total). All names must match the OAL/MAL script definition. Return empty list, if you want to export all metrics in the incremental event type.\n  Export implementation. Stream service. All subscribed metrics will be sent here based on the OAP core schedule. Also, if the OAP is deployed as a cluster, this method will be called concurrently. For metrics value, you need to follow #type to choose #longValue or #doubleValue.\n  Kafka Exporter Trace Kafka Exporter Trace kafka exporter pushes messages to the Kafka Broker and Topic skywalking-export-trace to export the trace. Here is the message:\nProducerRecord\u0026lt;String, Bytes\u0026gt; Key: TraceSegmentId Value: Bytes of SegmentObject The SegmentObject definition follows the protocol: SkyWalking data collect protocol#Tracing.proto.\n// The segment is a collection of spans. It includes all collected spans in a simple one request context, such as a HTTP request process. message SegmentObject { string traceId = 1; string traceSegmentId = 2; repeated SpanObject spans = 3; string service = 4; string serviceInstance = 5; bool isSizeLimited = 6;}To activate the exporter, you should set ${SW_EXPORTER_ENABLE_KAFKA_TRACE:true} and config the Kafka server.\nexporter:default:# Kafka exporterenableKafkaTrace:${SW_EXPORTER_ENABLE_KAFKA_TRACE:true}kafkaBootstrapServers:${SW_EXPORTER_KAFKA_SERVERS:localhost:9092}# Kafka producer config, JSON format as Properties.kafkaProducerConfig:${SW_EXPORTER_KAFKA_PRODUCER_CONFIG:\u0026#34;\u0026#34;}kafkaTopicTrace:${SW_EXPORTER_KAFKA_TOPIC_TRACE:skywalking-export-trace}exportErrorStatusTraceOnly:${SW_EXPORTER_KAFKA_TRACE_FILTER_ERROR:false}... exportErrorStatusTraceOnly=true represents that only export the error status trace segments through the Kafka channel.  Log Kafka Exporter Log kafka exporter pushes messages to the Kafka Broker and Topic skywalking-export-log to export the log. Here is the message:\nProducerRecord\u0026lt;String, Bytes\u0026gt; Key: LogRecordId Value: Bytes of LogData The LogData definition follows the protocol: SkyWalking data collect protocol#Logging.proto.\nmessage LogData { int64 timestamp = 1; string service = 2; string serviceInstance = 3; string endpoint = 4; LogDataBody body = 5; TraceContext traceContext = 6; LogTags tags = 7; string layer = 8;}To activate the exporter, you should set ${SW_EXPORTER_ENABLE_KAFKA_LOG:true} and config the Kafka server.\nexporter:default:# Kafka exporterenableKafkaLog:${SW_EXPORTER_ENABLE_KAFKA_LOG:true}kafkaBootstrapServers:${SW_EXPORTER_KAFKA_SERVERS:localhost:9092}# Kafka producer config, JSON format as Properties.kafkaProducerConfig:${SW_EXPORTER_KAFKA_PRODUCER_CONFIG:\u0026#34;\u0026#34;}kafkaTopicLog:${SW_EXPORTER_KAFKA_TOPIC_LOG:skywalking-export-log}...","title":"Exporter","url":"/docs/main/v9.5.0/en/setup/backend/exporter/"},{"content":"Exporter SkyWalking provides the essential functions of observability, including metrics aggregation, trace, log, alerting, and profiling. In many real-world scenarios, users may want to forward their data to a 3rd party system for further in-depth analysis. Exporter has made that possible.\nThe exporter is an independent module that has to be manually activated.\nRight now, we provide the following exporting channels:\n gRPC Exporter   Metrics  Kafka Exporter   Trace Log  gRPC Exporter Metrics gRPC Exporter Metrics gRPC exporter uses SkyWalking\u0026rsquo;s native export service definition. Here is the proto definition: metric-exporter.proto.\nservice MetricExportService { rpc export (stream ExportMetricValue) returns (ExportResponse) { } rpc subscription (SubscriptionReq) returns (SubscriptionsResp) { }}To activate the exporter, you should set ${SW_EXPORTER_ENABLE_GRPC_METRICS:true} and config the target gRPC server address.\nexporter:default:# gRPC exporterenableGRPCMetrics:${SW_EXPORTER_ENABLE_GRPC_METRICS:true}gRPCTargetHost:${SW_EXPORTER_GRPC_HOST:127.0.0.1}gRPCTargetPort:${SW_EXPORTER_GRPC_PORT:9870}... gRPCTargetHost:gRPCTargetPort is the expected target service address. You could set any gRPC server to receive the data. Target gRPC service needs to go on standby; otherwise, the OAP startup may fail.  Target exporter service   Subscription implementation. Return the expected metrics name list with event type (incremental or total). All names must match the OAL/MAL script definition. Return empty list, if you want to export all metrics in the incremental event type.\n  Export implementation. Stream service. All subscribed metrics will be sent here based on the OAP core schedule. Also, if the OAP is deployed as a cluster, this method will be called concurrently. For metrics value, you need to follow #type to choose #longValue or #doubleValue.\n  Kafka Exporter Trace Kafka Exporter Trace kafka exporter pushes messages to the Kafka Broker and Topic skywalking-export-trace to export the trace. Here is the message:\nProducerRecord\u0026lt;String, Bytes\u0026gt; Key: TraceSegmentId Value: Bytes of SegmentObject The SegmentObject definition follows the protocol: SkyWalking data collect protocol#Tracing.proto.\n// The segment is a collection of spans. It includes all collected spans in a simple one request context, such as a HTTP request process. message SegmentObject { string traceId = 1; string traceSegmentId = 2; repeated SpanObject spans = 3; string service = 4; string serviceInstance = 5; bool isSizeLimited = 6;}To activate the exporter, you should set ${SW_EXPORTER_ENABLE_KAFKA_TRACE:true} and config the Kafka server.\nexporter:default:# Kafka exporterenableKafkaTrace:${SW_EXPORTER_ENABLE_KAFKA_TRACE:true}kafkaBootstrapServers:${SW_EXPORTER_KAFKA_SERVERS:localhost:9092}# Kafka producer config, JSON format as Properties.kafkaProducerConfig:${SW_EXPORTER_KAFKA_PRODUCER_CONFIG:\u0026#34;\u0026#34;}kafkaTopicTrace:${SW_EXPORTER_KAFKA_TOPIC_TRACE:skywalking-export-trace}exportErrorStatusTraceOnly:${SW_EXPORTER_KAFKA_TRACE_FILTER_ERROR:false}... exportErrorStatusTraceOnly=true represents that only export the error status trace segments through the Kafka channel.  Log Kafka Exporter Log kafka exporter pushes messages to the Kafka Broker and Topic skywalking-export-log to export the log. Here is the message:\nProducerRecord\u0026lt;String, Bytes\u0026gt; Key: LogRecordId Value: Bytes of LogData The LogData definition follows the protocol: SkyWalking data collect protocol#Logging.proto.\nmessage LogData { int64 timestamp = 1; string service = 2; string serviceInstance = 3; string endpoint = 4; LogDataBody body = 5; TraceContext traceContext = 6; LogTags tags = 7; string layer = 8;}To activate the exporter, you should set ${SW_EXPORTER_ENABLE_KAFKA_LOG:true} and config the Kafka server.\nexporter:default:# Kafka exporterenableKafkaLog:${SW_EXPORTER_ENABLE_KAFKA_LOG:true}kafkaBootstrapServers:${SW_EXPORTER_KAFKA_SERVERS:localhost:9092}# Kafka producer config, JSON format as Properties.kafkaProducerConfig:${SW_EXPORTER_KAFKA_PRODUCER_CONFIG:\u0026#34;\u0026#34;}kafkaTopicLog:${SW_EXPORTER_KAFKA_TOPIC_LOG:skywalking-export-log}...","title":"Exporter","url":"/docs/main/v9.6.0/en/setup/backend/exporter/"},{"content":"Exporter SkyWalking provides the essential functions of observability, including metrics aggregation, trace, log, alerting, and profiling. In many real-world scenarios, users may want to forward their data to a 3rd party system for further in-depth analysis. Exporter has made that possible.\nThe exporter is an independent module that has to be manually activated.\nRight now, we provide the following exporting channels:\n gRPC Exporter   Metrics  Kafka Exporter   Trace Log  gRPC Exporter Metrics gRPC Exporter Metrics gRPC exporter uses SkyWalking\u0026rsquo;s native export service definition. Here is the proto definition: metric-exporter.proto.\nservice MetricExportService { rpc export (stream ExportMetricValue) returns (ExportResponse) { } rpc subscription (SubscriptionReq) returns (SubscriptionsResp) { }}To activate the exporter, you should set ${SW_EXPORTER_ENABLE_GRPC_METRICS:true} and config the target gRPC server address.\nexporter:default:# gRPC exporterenableGRPCMetrics:${SW_EXPORTER_ENABLE_GRPC_METRICS:true}gRPCTargetHost:${SW_EXPORTER_GRPC_HOST:127.0.0.1}gRPCTargetPort:${SW_EXPORTER_GRPC_PORT:9870}... gRPCTargetHost:gRPCTargetPort is the expected target service address. You could set any gRPC server to receive the data. Target gRPC service needs to go on standby; otherwise, the OAP startup may fail.  Target exporter service   Subscription implementation. Return the expected metrics name list with event type (incremental or total). All names must match the OAL/MAL script definition. Return empty list, if you want to export all metrics in the incremental event type.\n  Export implementation. Stream service. All subscribed metrics will be sent here based on the OAP core schedule. Also, if the OAP is deployed as a cluster, this method will be called concurrently. For metrics value, you need to follow #type to choose #longValue or #doubleValue.\n  Kafka Exporter Trace Kafka Exporter Trace kafka exporter pushes messages to the Kafka Broker and Topic skywalking-export-trace to export the trace. Here is the message:\nProducerRecord\u0026lt;String, Bytes\u0026gt; Key: TraceSegmentId Value: Bytes of SegmentObject The SegmentObject definition follows the protocol: SkyWalking data collect protocol#Tracing.proto.\n// The segment is a collection of spans. It includes all collected spans in a simple one request context, such as a HTTP request process. message SegmentObject { string traceId = 1; string traceSegmentId = 2; repeated SpanObject spans = 3; string service = 4; string serviceInstance = 5; bool isSizeLimited = 6;}To activate the exporter, you should set ${SW_EXPORTER_ENABLE_KAFKA_TRACE:true} and config the Kafka server.\nexporter:default:# Kafka exporterenableKafkaTrace:${SW_EXPORTER_ENABLE_KAFKA_TRACE:true}kafkaBootstrapServers:${SW_EXPORTER_KAFKA_SERVERS:localhost:9092}# Kafka producer config, JSON format as Properties.kafkaProducerConfig:${SW_EXPORTER_KAFKA_PRODUCER_CONFIG:\u0026#34;\u0026#34;}kafkaTopicTrace:${SW_EXPORTER_KAFKA_TOPIC_TRACE:skywalking-export-trace}exportErrorStatusTraceOnly:${SW_EXPORTER_KAFKA_TRACE_FILTER_ERROR:false}... exportErrorStatusTraceOnly=true represents that only export the error status trace segments through the Kafka channel.  Log Kafka Exporter Log kafka exporter pushes messages to the Kafka Broker and Topic skywalking-export-log to export the log. Here is the message:\nProducerRecord\u0026lt;String, Bytes\u0026gt; Key: LogRecordId Value: Bytes of LogData The LogData definition follows the protocol: SkyWalking data collect protocol#Logging.proto.\nmessage LogData { int64 timestamp = 1; string service = 2; string serviceInstance = 3; string endpoint = 4; LogDataBody body = 5; TraceContext traceContext = 6; LogTags tags = 7; string layer = 8;}To activate the exporter, you should set ${SW_EXPORTER_ENABLE_KAFKA_LOG:true} and config the Kafka server.\nexporter:default:# Kafka exporterenableKafkaLog:${SW_EXPORTER_ENABLE_KAFKA_LOG:true}kafkaBootstrapServers:${SW_EXPORTER_KAFKA_SERVERS:localhost:9092}# Kafka producer config, JSON format as Properties.kafkaProducerConfig:${SW_EXPORTER_KAFKA_PRODUCER_CONFIG:\u0026#34;\u0026#34;}kafkaTopicLog:${SW_EXPORTER_KAFKA_TOPIC_LOG:skywalking-export-log}...","title":"Exporter","url":"/docs/main/v9.7.0/en/setup/backend/exporter/"},{"content":"Exporter tool for profile raw data When visualization doesn\u0026rsquo;t work well on the official UI, users may submit issue reports. This tool helps users package the original profile data to assist the community in locating the issues in the users' cases. NOTE: This report includes the class name, method name, line number, etc. Before making your submission, please make sure that the security of your system wouldn\u0026rsquo;t be compromised.\nExport using command line  Set the storage in the tools/profile-exporter/application.yml file based on your use case. Prepare the data  Profile task ID: Profile task ID Trace ID: Trace ID of the profile error Export dir: Directory exported by the data   Enter the Skywalking root path Execute shell command bash tools/profile-exporter/profile_exporter.sh --taskid={profileTaskId} --traceid={traceId} {exportDir}  The file {traceId}.tar.gz will be generated after executing shell.  Exported data content  basic.yml: Contains the complete information of the profiled segments in the trace. snapshot.data: All monitored thread snapshot data in the current segment.  Report profile issues  Provide exported data generated from this tool. Provide the operation name and the mode of analysis (including/excluding child span) for the span. Issue description. (It would be great if you could provide UI screenshots.)  ","title":"Exporter tool for profile raw data","url":"/docs/main/latest/en/guides/backend-profile-export/"},{"content":"Exporter tool for profile raw data When visualization doesn\u0026rsquo;t work well on the official UI, users may submit issue reports. This tool helps users package the original profile data to assist the community in locating the issues in the users' cases. NOTE: This report includes the class name, method name, line number, etc. Before making your submission, please make sure that the security of your system wouldn\u0026rsquo;t be compromised.\nExport using command line  Set the storage in the tools/profile-exporter/application.yml file based on your use case. Prepare the data  Profile task ID: Profile task ID Trace ID: Trace ID of the profile error Export dir: Directory exported by the data   Enter the Skywalking root path Execute shell command bash tools/profile-exporter/profile_exporter.sh --taskid={profileTaskId} --traceid={traceId} {exportDir}  The file {traceId}.tar.gz will be generated after executing shell.  Exported data content  basic.yml: Contains the complete information of the profiled segments in the trace. snapshot.data: All monitored thread snapshot data in the current segment.  Report profile issues  Provide exported data generated from this tool. Provide the operation name and the mode of analysis (including/excluding child span) for the span. Issue description. (It would be great if you could provide UI screenshots.)  ","title":"Exporter tool for profile raw data","url":"/docs/main/next/en/guides/backend-profile-export/"},{"content":"Exporter tool for profile raw data When visualization doesn\u0026rsquo;t work well on the official UI, users may submit issue reports. This tool helps users package the original profile data to assist the community in locating the issues in the users' cases. NOTE: This report includes the class name, method name, line number, etc. Before making your submission, please make sure that the security of your system wouldn\u0026rsquo;t be compromised.\nExport using command line  Set the storage in the tools/profile-exporter/application.yml file based on your use case. Prepare the data  Profile task ID: Profile task ID Trace ID: Trace ID of the profile error Export dir: Directory exported by the data   Enter the Skywalking root path Execute shell command bash tools/profile-exporter/profile_exporter.sh --taskid={profileTaskId} --traceid={traceId} {exportDir}  The file {traceId}.tar.gz will be generated after executing shell.  Exported data content  basic.yml: Contains the complete information of the profiled segments in the trace. snapshot.data: All monitored thread snapshot data in the current segment.  Report profile issues  Provide exported data generated from this tool. Provide the operation name and the mode of analysis (including/excluding child span) for the span. Issue description. (It would be great if you could provide UI screenshots.)  ","title":"Exporter tool for profile raw data","url":"/docs/main/v9.0.0/en/guides/backend-profile-export/"},{"content":"Exporter tool for profile raw data When visualization doesn\u0026rsquo;t work well on the official UI, users may submit issue reports. This tool helps users package the original profile data to assist the community in locating the issues in the users' cases. NOTE: This report includes the class name, method name, line number, etc. Before making your submission, please make sure that the security of your system wouldn\u0026rsquo;t be compromised.\nExport using command line  Set the storage in the tools/profile-exporter/application.yml file based on your use case. Prepare the data  Profile task ID: Profile task ID Trace ID: Trace ID of the profile error Export dir: Directory exported by the data   Enter the Skywalking root path Execute shell command bash tools/profile-exporter/profile_exporter.sh --taskid={profileTaskId} --traceid={traceId} {exportDir}  The file {traceId}.tar.gz will be generated after executing shell.  Exported data content  basic.yml: Contains the complete information of the profiled segments in the trace. snapshot.data: All monitored thread snapshot data in the current segment.  Report profile issues  Provide exported data generated from this tool. Provide the operation name and the mode of analysis (including/excluding child span) for the span. Issue description. (It would be great if you could provide UI screenshots.)  ","title":"Exporter tool for profile raw data","url":"/docs/main/v9.1.0/en/guides/backend-profile-export/"},{"content":"Exporter tool for profile raw data When visualization doesn\u0026rsquo;t work well on the official UI, users may submit issue reports. This tool helps users package the original profile data to assist the community in locating the issues in the users' cases. NOTE: This report includes the class name, method name, line number, etc. Before making your submission, please make sure that the security of your system wouldn\u0026rsquo;t be compromised.\nExport using command line  Set the storage in the tools/profile-exporter/application.yml file based on your use case. Prepare the data  Profile task ID: Profile task ID Trace ID: Trace ID of the profile error Export dir: Directory exported by the data   Enter the Skywalking root path Execute shell command bash tools/profile-exporter/profile_exporter.sh --taskid={profileTaskId} --traceid={traceId} {exportDir}  The file {traceId}.tar.gz will be generated after executing shell.  Exported data content  basic.yml: Contains the complete information of the profiled segments in the trace. snapshot.data: All monitored thread snapshot data in the current segment.  Report profile issues  Provide exported data generated from this tool. Provide the operation name and the mode of analysis (including/excluding child span) for the span. Issue description. (It would be great if you could provide UI screenshots.)  ","title":"Exporter tool for profile raw data","url":"/docs/main/v9.2.0/en/guides/backend-profile-export/"},{"content":"Exporter tool for profile raw data When visualization doesn\u0026rsquo;t work well on the official UI, users may submit issue reports. This tool helps users package the original profile data to assist the community in locating the issues in the users' cases. NOTE: This report includes the class name, method name, line number, etc. Before making your submission, please make sure that the security of your system wouldn\u0026rsquo;t be compromised.\nExport using command line  Set the storage in the tools/profile-exporter/application.yml file based on your use case. Prepare the data  Profile task ID: Profile task ID Trace ID: Trace ID of the profile error Export dir: Directory exported by the data   Enter the Skywalking root path Execute shell command bash tools/profile-exporter/profile_exporter.sh --taskid={profileTaskId} --traceid={traceId} {exportDir}  The file {traceId}.tar.gz will be generated after executing shell.  Exported data content  basic.yml: Contains the complete information of the profiled segments in the trace. snapshot.data: All monitored thread snapshot data in the current segment.  Report profile issues  Provide exported data generated from this tool. Provide the operation name and the mode of analysis (including/excluding child span) for the span. Issue description. (It would be great if you could provide UI screenshots.)  ","title":"Exporter tool for profile raw data","url":"/docs/main/v9.3.0/en/guides/backend-profile-export/"},{"content":"Exporter tool for profile raw data When visualization doesn\u0026rsquo;t work well on the official UI, users may submit issue reports. This tool helps users package the original profile data to assist the community in locating the issues in the users' cases. NOTE: This report includes the class name, method name, line number, etc. Before making your submission, please make sure that the security of your system wouldn\u0026rsquo;t be compromised.\nExport using command line  Set the storage in the tools/profile-exporter/application.yml file based on your use case. Prepare the data  Profile task ID: Profile task ID Trace ID: Trace ID of the profile error Export dir: Directory exported by the data   Enter the Skywalking root path Execute shell command bash tools/profile-exporter/profile_exporter.sh --taskid={profileTaskId} --traceid={traceId} {exportDir}  The file {traceId}.tar.gz will be generated after executing shell.  Exported data content  basic.yml: Contains the complete information of the profiled segments in the trace. snapshot.data: All monitored thread snapshot data in the current segment.  Report profile issues  Provide exported data generated from this tool. Provide the operation name and the mode of analysis (including/excluding child span) for the span. Issue description. (It would be great if you could provide UI screenshots.)  ","title":"Exporter tool for profile raw data","url":"/docs/main/v9.4.0/en/guides/backend-profile-export/"},{"content":"Exporter tool for profile raw data When visualization doesn\u0026rsquo;t work well on the official UI, users may submit issue reports. This tool helps users package the original profile data to assist the community in locating the issues in the users' cases. NOTE: This report includes the class name, method name, line number, etc. Before making your submission, please make sure that the security of your system wouldn\u0026rsquo;t be compromised.\nExport using command line  Set the storage in the tools/profile-exporter/application.yml file based on your use case. Prepare the data  Profile task ID: Profile task ID Trace ID: Trace ID of the profile error Export dir: Directory exported by the data   Enter the Skywalking root path Execute shell command bash tools/profile-exporter/profile_exporter.sh --taskid={profileTaskId} --traceid={traceId} {exportDir}  The file {traceId}.tar.gz will be generated after executing shell.  Exported data content  basic.yml: Contains the complete information of the profiled segments in the trace. snapshot.data: All monitored thread snapshot data in the current segment.  Report profile issues  Provide exported data generated from this tool. Provide the operation name and the mode of analysis (including/excluding child span) for the span. Issue description. (It would be great if you could provide UI screenshots.)  ","title":"Exporter tool for profile raw data","url":"/docs/main/v9.5.0/en/guides/backend-profile-export/"},{"content":"Exporter tool for profile raw data When visualization doesn\u0026rsquo;t work well on the official UI, users may submit issue reports. This tool helps users package the original profile data to assist the community in locating the issues in the users' cases. NOTE: This report includes the class name, method name, line number, etc. Before making your submission, please make sure that the security of your system wouldn\u0026rsquo;t be compromised.\nExport using command line  Set the storage in the tools/profile-exporter/application.yml file based on your use case. Prepare the data  Profile task ID: Profile task ID Trace ID: Trace ID of the profile error Export dir: Directory exported by the data   Enter the Skywalking root path Execute shell command bash tools/profile-exporter/profile_exporter.sh --taskid={profileTaskId} --traceid={traceId} {exportDir}  The file {traceId}.tar.gz will be generated after executing shell.  Exported data content  basic.yml: Contains the complete information of the profiled segments in the trace. snapshot.data: All monitored thread snapshot data in the current segment.  Report profile issues  Provide exported data generated from this tool. Provide the operation name and the mode of analysis (including/excluding child span) for the span. Issue description. (It would be great if you could provide UI screenshots.)  ","title":"Exporter tool for profile raw data","url":"/docs/main/v9.6.0/en/guides/backend-profile-export/"},{"content":"Exporter tool for profile raw data When visualization doesn\u0026rsquo;t work well on the official UI, users may submit issue reports. This tool helps users package the original profile data to assist the community in locating the issues in the users' cases. NOTE: This report includes the class name, method name, line number, etc. Before making your submission, please make sure that the security of your system wouldn\u0026rsquo;t be compromised.\nExport using command line  Set the storage in the tools/profile-exporter/application.yml file based on your use case. Prepare the data  Profile task ID: Profile task ID Trace ID: Trace ID of the profile error Export dir: Directory exported by the data   Enter the Skywalking root path Execute shell command bash tools/profile-exporter/profile_exporter.sh --taskid={profileTaskId} --traceid={traceId} {exportDir}  The file {traceId}.tar.gz will be generated after executing shell.  Exported data content  basic.yml: Contains the complete information of the profiled segments in the trace. snapshot.data: All monitored thread snapshot data in the current segment.  Report profile issues  Provide exported data generated from this tool. Provide the operation name and the mode of analysis (including/excluding child span) for the span. Issue description. (It would be great if you could provide UI screenshots.)  ","title":"Exporter tool for profile raw data","url":"/docs/main/v9.7.0/en/guides/backend-profile-export/"},{"content":"Extend storage SkyWalking has already provided several storage solutions. In this document, you could learn how to easily implement a new storage.\nDefine your storage provider  Define class extension org.apache.skywalking.oap.server.library.module.ModuleProvider. Set this provider targeting to storage module.  @Override public Class\u0026lt;? extends ModuleDefine\u0026gt; module() { return StorageModule.class; } Implement all DAOs Here\u0026rsquo;s a list of all DAO interfaces in storage:\n  IServiceInventoryCacheDAO\n  IServiceInstanceInventoryCacheDAO\n  IEndpointInventoryCacheDAO\n  INetworkAddressInventoryCacheDAO\n  IBatchDAO\n  StorageDAO\n  IRegisterLockDAO\n  ITopologyQueryDAO\n  IMetricsQueryDAO\n  ITraceQueryDAO\n  IMetadataQueryDAO\n  IAggregationQueryDAO\n  IAlarmQueryDAO\n  IHistoryDeleteDAO\n  IMetricsDAO\n  IRecordDAO\n  IRegisterDAO\n  ILogQueryDAO\n  ITopNRecordsQueryDAO\n  IBrowserLogQueryDAO\n  IProfileTaskQueryDAO\n  IProfileTaskLogQueryDAO\n  IProfileThreadSnapshotQueryDAO\n  UITemplateManagementDAO\n  Register all service implementations In public void prepare(), use this#registerServiceImplementation method to register and bind with your implementation of the above interfaces.\nExample org.apache.skywalking.oap.server.storage.plugin.elasticsearch.StorageModuleElasticsearchProvider and org.apache.skywalking.oap.server.storage.plugin.jdbc.mysql.MySQLStorageProvider are good examples.\n","title":"Extend storage","url":"/docs/main/v9.0.0/en/guides/storage-extention/"},{"content":"Extend storage SkyWalking has already provided several storage solutions. In this document, you could learn how to easily implement a new storage.\nDefine your storage provider  Define class extension org.apache.skywalking.oap.server.library.module.ModuleProvider. Set this provider targeting to storage module.  @Override public Class\u0026lt;? extends ModuleDefine\u0026gt; module() { return StorageModule.class; } Implement all DAOs Here\u0026rsquo;s a list of all DAO interfaces in storage:\n  IServiceInventoryCacheDAO\n  IServiceInstanceInventoryCacheDAO\n  IEndpointInventoryCacheDAO\n  INetworkAddressInventoryCacheDAO\n  IBatchDAO\n  StorageDAO\n  IRegisterLockDAO\n  ITopologyQueryDAO\n  IMetricsQueryDAO\n  ITraceQueryDAO\n  IMetadataQueryDAO\n  IAggregationQueryDAO\n  IAlarmQueryDAO\n  IHistoryDeleteDAO\n  IMetricsDAO\n  IRecordDAO\n  IRegisterDAO\n  ILogQueryDAO\n  ITopNRecordsQueryDAO\n  IBrowserLogQueryDAO\n  IProfileTaskQueryDAO\n  IProfileTaskLogQueryDAO\n  IProfileThreadSnapshotQueryDAO\n  UITemplateManagementDAO\n  Register all service implementations In public void prepare(), use this#registerServiceImplementation method to register and bind with your implementation of the above interfaces.\nExample org.apache.skywalking.oap.server.storage.plugin.elasticsearch.StorageModuleElasticsearchProvider and org.apache.skywalking.oap.server.storage.plugin.jdbc.mysql.MySQLStorageProvider are good examples.\n","title":"Extend storage","url":"/docs/main/v9.1.0/en/guides/storage-extention/"},{"content":"Extend storage SkyWalking has already provided several storage solutions. In this document, you could learn how to easily implement a new storage.\nDefine your storage provider  Define class extension org.apache.skywalking.oap.server.library.module.ModuleProvider. Set this provider targeting to storage module.  @Override public Class\u0026lt;? extends ModuleDefine\u0026gt; module() { return StorageModule.class; } Implement all DAOs Here\u0026rsquo;s a list of all DAO interfaces in storage:\n  IServiceInventoryCacheDAO\n  IServiceInstanceInventoryCacheDAO\n  IEndpointInventoryCacheDAO\n  INetworkAddressInventoryCacheDAO\n  IBatchDAO\n  StorageDAO\n  IRegisterLockDAO\n  ITopologyQueryDAO\n  IMetricsQueryDAO\n  ITraceQueryDAO\n  IMetadataQueryDAO\n  IAggregationQueryDAO\n  IAlarmQueryDAO\n  IHistoryDeleteDAO\n  IMetricsDAO\n  IRecordDAO\n  IRegisterDAO\n  ILogQueryDAO\n  ITopNRecordsQueryDAO\n  IBrowserLogQueryDAO\n  IProfileTaskQueryDAO\n  IProfileTaskLogQueryDAO\n  IProfileThreadSnapshotQueryDAO\n  UITemplateManagementDAO\n  Register all service implementations In public void prepare(), use this#registerServiceImplementation method to register and bind with your implementation of the above interfaces.\nExample org.apache.skywalking.oap.server.storage.plugin.elasticsearch.StorageModuleElasticsearchProvider and org.apache.skywalking.oap.server.storage.plugin.jdbc.mysql.MySQLStorageProvider are good examples.\n","title":"Extend storage","url":"/docs/main/v9.2.0/en/guides/storage-extention/"},{"content":"Extend storage SkyWalking has already provided several storage solutions. In this document, you could learn how to easily implement a new storage.\nDefine your storage provider  Define class extension org.apache.skywalking.oap.server.library.module.ModuleProvider. Set this provider targeting to storage module.  @Override public Class\u0026lt;? extends ModuleDefine\u0026gt; module() { return StorageModule.class; } Implement all DAOs Here\u0026rsquo;s a list of all DAO interfaces in storage:\n  IServiceInventoryCacheDAO\n  IServiceInstanceInventoryCacheDAO\n  IEndpointInventoryCacheDAO\n  INetworkAddressInventoryCacheDAO\n  IBatchDAO\n  StorageDAO\n  IRegisterLockDAO\n  ITopologyQueryDAO\n  IMetricsQueryDAO\n  ITraceQueryDAO\n  IMetadataQueryDAO\n  IAggregationQueryDAO\n  IAlarmQueryDAO\n  IHistoryDeleteDAO\n  IMetricsDAO\n  IRecordDAO\n  IRegisterDAO\n  ILogQueryDAO\n  ITopNRecordsQueryDAO\n  IBrowserLogQueryDAO\n  IProfileTaskQueryDAO\n  IProfileTaskLogQueryDAO\n  IProfileThreadSnapshotQueryDAO\n  UITemplateManagementDAO\n  Register all service implementations In public void prepare(), use this#registerServiceImplementation method to register and bind with your implementation of the above interfaces.\nExample org.apache.skywalking.oap.server.storage.plugin.elasticsearch.StorageModuleElasticsearchProvider and org.apache.skywalking.oap.server.storage.plugin.jdbc.mysql.MySQLStorageProvider are good examples.\n","title":"Extend storage","url":"/docs/main/v9.3.0/en/guides/storage-extention/"},{"content":"Extend storage SkyWalking has already provided several storage solutions. In this document, you could learn how to easily implement a new storage.\nDefine your storage provider  Define class extension org.apache.skywalking.oap.server.library.module.ModuleProvider. Set this provider targeting to storage module.  @Override public Class\u0026lt;? extends ModuleDefine\u0026gt; module() { return StorageModule.class; } Implement all DAOs Here\u0026rsquo;s a list of all DAO interfaces in storage:\n  IServiceInventoryCacheDAO\n  IServiceInstanceInventoryCacheDAO\n  IEndpointInventoryCacheDAO\n  INetworkAddressInventoryCacheDAO\n  IBatchDAO\n  StorageDAO\n  IRegisterLockDAO\n  ITopologyQueryDAO\n  IMetricsQueryDAO\n  ITraceQueryDAO\n  IMetadataQueryDAO\n  IAggregationQueryDAO\n  IAlarmQueryDAO\n  IHistoryDeleteDAO\n  IMetricsDAO\n  IRecordDAO\n  IRegisterDAO\n  ILogQueryDAO\n  ITopNRecordsQueryDAO\n  IBrowserLogQueryDAO\n  IProfileTaskQueryDAO\n  IProfileTaskLogQueryDAO\n  IProfileThreadSnapshotQueryDAO\n  UITemplateManagementDAO\n  Register all service implementations In public void prepare(), use this#registerServiceImplementation method to register and bind with your implementation of the above interfaces.\nExample org.apache.skywalking.oap.server.storage.plugin.elasticsearch.StorageModuleElasticsearchProvider and org.apache.skywalking.oap.server.storage.plugin.jdbc.mysql.MySQLStorageProvider are good examples.\n","title":"Extend storage","url":"/docs/main/v9.4.0/en/guides/storage-extention/"},{"content":"Extend storage SkyWalking has already provided several storage solutions. In this document, you could learn how to easily implement a new storage.\nDefine your storage provider  Define class extension org.apache.skywalking.oap.server.library.module.ModuleProvider. Set this provider targeting to storage module.  @Override public Class\u0026lt;? extends ModuleDefine\u0026gt; module() { return StorageModule.class; } Implement all DAOs Here\u0026rsquo;s a list of all DAO interfaces in storage:\n  IServiceInventoryCacheDAO\n  IServiceInstanceInventoryCacheDAO\n  IEndpointInventoryCacheDAO\n  INetworkAddressInventoryCacheDAO\n  IBatchDAO\n  StorageDAO\n  IRegisterLockDAO\n  ITopologyQueryDAO\n  IMetricsQueryDAO\n  ITraceQueryDAO\n  IMetadataQueryDAO\n  IAggregationQueryDAO\n  IAlarmQueryDAO\n  IHistoryDeleteDAO\n  IMetricsDAO\n  IRecordDAO\n  IRegisterDAO\n  ILogQueryDAO\n  ITopNRecordsQueryDAO\n  IBrowserLogQueryDAO\n  IProfileTaskQueryDAO\n  IProfileTaskLogQueryDAO\n  IProfileThreadSnapshotQueryDAO\n  UITemplateManagementDAO\n  Register all service implementations In public void prepare(), use this#registerServiceImplementation method to register and bind with your implementation of the above interfaces.\nExample org.apache.skywalking.oap.server.storage.plugin.elasticsearch.StorageModuleElasticsearchProvider and org.apache.skywalking.oap.server.storage.plugin.jdbc.mysql.MySQLStorageProvider are good examples.\n","title":"Extend storage","url":"/docs/main/v9.5.0/en/guides/storage-extention/"},{"content":"Fallbacker/none-fallbacker Description The fallbacker would do nothing when facing failure data.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Fallbacker/none-fallbacker","url":"/docs/skywalking-satellite/latest/en/setup/plugins/fallbacker_none-fallbacker/"},{"content":"Fallbacker/none-fallbacker Description The fallbacker would do nothing when facing failure data.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Fallbacker/none-fallbacker","url":"/docs/skywalking-satellite/next/en/setup/plugins/fallbacker_none-fallbacker/"},{"content":"Fallbacker/none-fallbacker Description The fallbacker would do nothing when facing failure data.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Fallbacker/none-fallbacker","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/fallbacker_none-fallbacker/"},{"content":"Fallbacker/timer-fallbacker Description This is a timer fallback trigger to process the forward failure data.\nDefaultConfig # The forwarder max attempt times.max_attempts:3# The exponential_backoff is the standard retry duration, and the time for each retry is expanded# by 2 times until the number of retries reaches the maximum.(Time unit is millisecond.)exponential_backoff:2000# The max backoff time used in retrying, which would override the latency time when the latency time# with exponential increasing larger than it.(Time unit is millisecond.)max_backoff:5000Configuration    Name Type Description     max_attempts int    exponential_backoff int    max_backoff int     ","title":"Fallbacker/timer-fallbacker","url":"/docs/skywalking-satellite/latest/en/setup/plugins/fallbacker_timer-fallbacker/"},{"content":"Fallbacker/timer-fallbacker Description This is a timer fallback trigger to process the forward failure data.\nDefaultConfig # The forwarder max attempt times.max_attempts:3# The exponential_backoff is the standard retry duration, and the time for each retry is expanded# by 2 times until the number of retries reaches the maximum.(Time unit is millisecond.)exponential_backoff:2000# The max backoff time used in retrying, which would override the latency time when the latency time# with exponential increasing larger than it.(Time unit is millisecond.)max_backoff:5000Configuration    Name Type Description     max_attempts int    exponential_backoff int    max_backoff int     ","title":"Fallbacker/timer-fallbacker","url":"/docs/skywalking-satellite/next/en/setup/plugins/fallbacker_timer-fallbacker/"},{"content":"Fallbacker/timer-fallbacker Description This is a timer fallback trigger to process the forward failure data.\nDefaultConfig # The forwarder max attempt times.max_attempts:3# The exponential_backoff is the standard retry duration, and the time for each retry is expanded# by 2 times until the number of retries reaches the maximum.(Time unit is millisecond.)exponential_backoff:2000# The max backoff time used in retrying, which would override the latency time when the latency time# with exponential increasing larger than it.(Time unit is millisecond.)max_backoff:5000Configuration    Name Type Description     max_attempts int    exponential_backoff int    max_backoff int     ","title":"Fallbacker/timer-fallbacker","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/fallbacker_timer-fallbacker/"},{"content":"FAQs These are known and frequently asked questions about SkyWalking. We welcome you to contribute here.\nDesign  Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture?  Compiling  Protoc plugin fails in maven build Required items could not be found when importing project into Eclipse Maven compilation failure with error such as python2 not found Compiling issues on Mac\u0026rsquo;s M1 chip  Runtime  New ElasticSearch storage option explanation in 9.2.0 Version 9.x+ upgrade Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Version 8.x+ upgrade Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? Version 6.x upgrade Why are there only traces in UI? Tracing doesn\u0026rsquo;t work on the Kafka consumer end Agent or collector version upgrade, 3.x -\u0026gt; 5.0.0-alpha EnhanceRequireObjectCache class cast exception ElasticSearch server performance issues, including ERROR CODE:429 IllegalStateException when installing Java agent on WebSphere 7 \u0026ldquo;FORBIDDEN/12/index read-only / allow delete (api)\u0026rdquo; appears in the log No data shown and backend replies with \u0026ldquo;Variable \u0026lsquo;serviceId\u0026rsquo; has coerced Null value for NonNull type \u0026lsquo;ID!'\u0026quot; Unexpected endpoint register warning after 6.6.0 Use the profile exporter tool if the profile analysis is not right Compatibility with other javaagent bytecode processes Java agent memory leak when enhancing Worker thread at Thread Pool Thrift plugin  UI  What is VNode? And why does SkyWalking have that?  ","title":"FAQs","url":"/docs/main/latest/en/faq/readme/"},{"content":"FAQs These are known and frequently asked questions about SkyWalking. We welcome you to contribute here.\nDesign  Why does SkyWalking use RPC(gRPC and RESTful) rather than MQ as transport layer by default? Why is Clickhouse or Loki or xxx not supported as a storage option?  Compiling  Protoc plugin fails in maven build Required items could not be found when importing project into Eclipse Maven compilation failure with error such as python2 not found Compiling issues on Mac\u0026rsquo;s M1 chip  Runtime  New ElasticSearch storage option explanation in 9.2.0 Version 9.x+ upgrade Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Version 8.x+ upgrade Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? Version 6.x upgrade Why are there only traces in UI? Tracing doesn\u0026rsquo;t work on the Kafka consumer end Agent or collector version upgrade, 3.x -\u0026gt; 5.0.0-alpha EnhanceRequireObjectCache class cast exception ElasticSearch server performance issues, including ERROR CODE:429 IllegalStateException when installing Java agent on WebSphere 7 \u0026ldquo;FORBIDDEN/12/index read-only / allow delete (api)\u0026rdquo; appears in the log No data shown and backend replies with \u0026ldquo;Variable \u0026lsquo;serviceId\u0026rsquo; has coerced Null value for NonNull type \u0026lsquo;ID!'\u0026quot; Unexpected endpoint register warning after 6.6.0 Use the profile exporter tool if the profile analysis is not right Compatibility with other javaagent bytecode processes Java agent memory leak when enhancing Worker thread at Thread Pool Thrift plugin  UI  What is VNode? And why does SkyWalking have that?  ","title":"FAQs","url":"/docs/main/next/en/faq/readme/"},{"content":"FAQs These are known and frequently asked questions about SkyWalking. We welcome you to contribute here.\nDesign  Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture?  Compiling  Protoc plugin fails in maven build Required items could not be found when importing project into Eclipse Maven compilation failure with error such as python2 not found Compiling issues on Mac\u0026rsquo;s M1 chip  Runtime  Version 9.x+ upgrade Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Version 8.x+ upgrade Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? Version 6.x upgrade Why are there only traces in UI? Tracing doesn\u0026rsquo;t work on the Kafka consumer end Agent or collector version upgrade, 3.x -\u0026gt; 5.0.0-alpha EnhanceRequireObjectCache class cast exception ElasticSearch server performance issues, including ERROR CODE:429 IllegalStateException when installing Java agent on WebSphere 7 \u0026ldquo;FORBIDDEN/12/index read-only / allow delete (api)\u0026rdquo; appears in the log No data shown and backend replies with \u0026ldquo;Variable \u0026lsquo;serviceId\u0026rsquo; has coerced Null value for NonNull type \u0026lsquo;ID!'\u0026quot; Unexpected endpoint register warning after 6.6.0 Use the profile exporter tool if the profile analysis is not right Compatibility with other javaagent bytecode processes Java agent memory leak when enhancing Worker thread at Thread Pool Thrift plugin  UI  What is VNode? And why does SkyWalking have that?  ","title":"FAQs","url":"/docs/main/v9.0.0/en/faq/readme/"},{"content":"FAQs These are known and frequently asked questions about SkyWalking. We welcome you to contribute here.\nDesign  Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture?  Compiling  Protoc plugin fails in maven build Required items could not be found when importing project into Eclipse Maven compilation failure with error such as python2 not found Compiling issues on Mac\u0026rsquo;s M1 chip  Runtime  Version 9.x+ upgrade Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Version 8.x+ upgrade Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? Version 6.x upgrade Why are there only traces in UI? Tracing doesn\u0026rsquo;t work on the Kafka consumer end Agent or collector version upgrade, 3.x -\u0026gt; 5.0.0-alpha EnhanceRequireObjectCache class cast exception ElasticSearch server performance issues, including ERROR CODE:429 IllegalStateException when installing Java agent on WebSphere 7 \u0026ldquo;FORBIDDEN/12/index read-only / allow delete (api)\u0026rdquo; appears in the log No data shown and backend replies with \u0026ldquo;Variable \u0026lsquo;serviceId\u0026rsquo; has coerced Null value for NonNull type \u0026lsquo;ID!'\u0026quot; Unexpected endpoint register warning after 6.6.0 Use the profile exporter tool if the profile analysis is not right Compatibility with other javaagent bytecode processes Java agent memory leak when enhancing Worker thread at Thread Pool Thrift plugin  UI  What is VNode? And why does SkyWalking have that?  ","title":"FAQs","url":"/docs/main/v9.1.0/en/faq/readme/"},{"content":"FAQs These are known and frequently asked questions about SkyWalking. We welcome you to contribute here.\nDesign  Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture?  Compiling  Protoc plugin fails in maven build Required items could not be found when importing project into Eclipse Maven compilation failure with error such as python2 not found Compiling issues on Mac\u0026rsquo;s M1 chip  Runtime  New ElasticSearch storage option explanation in 9.2.0 Version 9.x+ upgrade Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Version 8.x+ upgrade Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? Version 6.x upgrade Why are there only traces in UI? Tracing doesn\u0026rsquo;t work on the Kafka consumer end Agent or collector version upgrade, 3.x -\u0026gt; 5.0.0-alpha EnhanceRequireObjectCache class cast exception ElasticSearch server performance issues, including ERROR CODE:429 IllegalStateException when installing Java agent on WebSphere 7 \u0026ldquo;FORBIDDEN/12/index read-only / allow delete (api)\u0026rdquo; appears in the log No data shown and backend replies with \u0026ldquo;Variable \u0026lsquo;serviceId\u0026rsquo; has coerced Null value for NonNull type \u0026lsquo;ID!'\u0026quot; Unexpected endpoint register warning after 6.6.0 Use the profile exporter tool if the profile analysis is not right Compatibility with other javaagent bytecode processes Java agent memory leak when enhancing Worker thread at Thread Pool Thrift plugin  UI  What is VNode? And why does SkyWalking have that?  ","title":"FAQs","url":"/docs/main/v9.2.0/en/faq/readme/"},{"content":"FAQs These are known and frequently asked questions about SkyWalking. We welcome you to contribute here.\nDesign  Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture?  Compiling  Protoc plugin fails in maven build Required items could not be found when importing project into Eclipse Maven compilation failure with error such as python2 not found Compiling issues on Mac\u0026rsquo;s M1 chip  Runtime  New ElasticSearch storage option explanation in 9.2.0 Version 9.x+ upgrade Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Version 8.x+ upgrade Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? Version 6.x upgrade Why are there only traces in UI? Tracing doesn\u0026rsquo;t work on the Kafka consumer end Agent or collector version upgrade, 3.x -\u0026gt; 5.0.0-alpha EnhanceRequireObjectCache class cast exception ElasticSearch server performance issues, including ERROR CODE:429 IllegalStateException when installing Java agent on WebSphere 7 \u0026ldquo;FORBIDDEN/12/index read-only / allow delete (api)\u0026rdquo; appears in the log No data shown and backend replies with \u0026ldquo;Variable \u0026lsquo;serviceId\u0026rsquo; has coerced Null value for NonNull type \u0026lsquo;ID!'\u0026quot; Unexpected endpoint register warning after 6.6.0 Use the profile exporter tool if the profile analysis is not right Compatibility with other javaagent bytecode processes Java agent memory leak when enhancing Worker thread at Thread Pool Thrift plugin  UI  What is VNode? And why does SkyWalking have that?  ","title":"FAQs","url":"/docs/main/v9.3.0/en/faq/readme/"},{"content":"FAQs These are known and frequently asked questions about SkyWalking. We welcome you to contribute here.\nDesign  Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture?  Compiling  Protoc plugin fails in maven build Required items could not be found when importing project into Eclipse Maven compilation failure with error such as python2 not found Compiling issues on Mac\u0026rsquo;s M1 chip  Runtime  New ElasticSearch storage option explanation in 9.2.0 Version 9.x+ upgrade Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Version 8.x+ upgrade Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? Version 6.x upgrade Why are there only traces in UI? Tracing doesn\u0026rsquo;t work on the Kafka consumer end Agent or collector version upgrade, 3.x -\u0026gt; 5.0.0-alpha EnhanceRequireObjectCache class cast exception ElasticSearch server performance issues, including ERROR CODE:429 IllegalStateException when installing Java agent on WebSphere 7 \u0026ldquo;FORBIDDEN/12/index read-only / allow delete (api)\u0026rdquo; appears in the log No data shown and backend replies with \u0026ldquo;Variable \u0026lsquo;serviceId\u0026rsquo; has coerced Null value for NonNull type \u0026lsquo;ID!'\u0026quot; Unexpected endpoint register warning after 6.6.0 Use the profile exporter tool if the profile analysis is not right Compatibility with other javaagent bytecode processes Java agent memory leak when enhancing Worker thread at Thread Pool Thrift plugin  UI  What is VNode? And why does SkyWalking have that?  ","title":"FAQs","url":"/docs/main/v9.4.0/en/faq/readme/"},{"content":"FAQs These are known and frequently asked questions about SkyWalking. We welcome you to contribute here.\nDesign  Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture?  Compiling  Protoc plugin fails in maven build Required items could not be found when importing project into Eclipse Maven compilation failure with error such as python2 not found Compiling issues on Mac\u0026rsquo;s M1 chip  Runtime  New ElasticSearch storage option explanation in 9.2.0 Version 9.x+ upgrade Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Version 8.x+ upgrade Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? Version 6.x upgrade Why are there only traces in UI? Tracing doesn\u0026rsquo;t work on the Kafka consumer end Agent or collector version upgrade, 3.x -\u0026gt; 5.0.0-alpha EnhanceRequireObjectCache class cast exception ElasticSearch server performance issues, including ERROR CODE:429 IllegalStateException when installing Java agent on WebSphere 7 \u0026ldquo;FORBIDDEN/12/index read-only / allow delete (api)\u0026rdquo; appears in the log No data shown and backend replies with \u0026ldquo;Variable \u0026lsquo;serviceId\u0026rsquo; has coerced Null value for NonNull type \u0026lsquo;ID!'\u0026quot; Unexpected endpoint register warning after 6.6.0 Use the profile exporter tool if the profile analysis is not right Compatibility with other javaagent bytecode processes Java agent memory leak when enhancing Worker thread at Thread Pool Thrift plugin  UI  What is VNode? And why does SkyWalking have that?  ","title":"FAQs","url":"/docs/main/v9.5.0/en/faq/readme/"},{"content":"FAQs These are known and frequently asked questions about SkyWalking. We welcome you to contribute here.\nDesign  Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture?  Compiling  Protoc plugin fails in maven build Required items could not be found when importing project into Eclipse Maven compilation failure with error such as python2 not found Compiling issues on Mac\u0026rsquo;s M1 chip  Runtime  New ElasticSearch storage option explanation in 9.2.0 Version 9.x+ upgrade Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Version 8.x+ upgrade Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? Version 6.x upgrade Why are there only traces in UI? Tracing doesn\u0026rsquo;t work on the Kafka consumer end Agent or collector version upgrade, 3.x -\u0026gt; 5.0.0-alpha EnhanceRequireObjectCache class cast exception ElasticSearch server performance issues, including ERROR CODE:429 IllegalStateException when installing Java agent on WebSphere 7 \u0026ldquo;FORBIDDEN/12/index read-only / allow delete (api)\u0026rdquo; appears in the log No data shown and backend replies with \u0026ldquo;Variable \u0026lsquo;serviceId\u0026rsquo; has coerced Null value for NonNull type \u0026lsquo;ID!'\u0026quot; Unexpected endpoint register warning after 6.6.0 Use the profile exporter tool if the profile analysis is not right Compatibility with other javaagent bytecode processes Java agent memory leak when enhancing Worker thread at Thread Pool Thrift plugin  UI  What is VNode? And why does SkyWalking have that?  ","title":"FAQs","url":"/docs/main/v9.6.0/en/faq/readme/"},{"content":"FAQs These are known and frequently asked questions about SkyWalking. We welcome you to contribute here.\nDesign  Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture?  Compiling  Protoc plugin fails in maven build Required items could not be found when importing project into Eclipse Maven compilation failure with error such as python2 not found Compiling issues on Mac\u0026rsquo;s M1 chip  Runtime  New ElasticSearch storage option explanation in 9.2.0 Version 9.x+ upgrade Elasticsearch exception type=version_conflict_engine_exception since 8.7.0 Version 8.x+ upgrade Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? Version 6.x upgrade Why are there only traces in UI? Tracing doesn\u0026rsquo;t work on the Kafka consumer end Agent or collector version upgrade, 3.x -\u0026gt; 5.0.0-alpha EnhanceRequireObjectCache class cast exception ElasticSearch server performance issues, including ERROR CODE:429 IllegalStateException when installing Java agent on WebSphere 7 \u0026ldquo;FORBIDDEN/12/index read-only / allow delete (api)\u0026rdquo; appears in the log No data shown and backend replies with \u0026ldquo;Variable \u0026lsquo;serviceId\u0026rsquo; has coerced Null value for NonNull type \u0026lsquo;ID!'\u0026quot; Unexpected endpoint register warning after 6.6.0 Use the profile exporter tool if the profile analysis is not right Compatibility with other javaagent bytecode processes Java agent memory leak when enhancing Worker thread at Thread Pool Thrift plugin  UI  What is VNode? And why does SkyWalking have that?  ","title":"FAQs","url":"/docs/main/v9.7.0/en/faq/readme/"},{"content":"Fetch metrics from the Istio control plane(istiod) In this example, you will learn how to setup a Fetcher to fetch Istio control plane metrics, then push them to OAP server.\nInstall Operator Follow Operator installation instrument to install the operator.\nInstall Istio control plane Follow Install with istioctl to install a istiod.\nDeploy Fetcher, OAP server and UI with default settings Clone this repo, then change current directory to samples.\nIssue the below command to deploy an OAP server and UI.\nkubectl apply -f fetcher.yaml Get created custom resources as below:\n$ kubectl get oapserver,ui,fetcher NAME INSTANCES RUNNING ADDRESS oapserver.operator.skywalking.apache.org/default 1 1 default-oap.skywalking-swck-system NAME INSTANCES RUNNING INTERNALADDRESS EXTERNALIPS PORTS ui.operator.skywalking.apache.org/default 1 1 default-ui.skywalking-swck-system [80] NAME AGE fetcher.operator.skywalking.apache.org/istio-prod-cluster 36h View Istio Control Plane Dashboard from UI Follow View the UI to access the UI service.\nNavigate to Dashboard-\u0026gt;Istio Control Plane to view relevant metric diagrams.\n","title":"Fetch metrics from the Istio control plane(istiod)","url":"/docs/skywalking-swck/latest/examples/istio-controlplane/"},{"content":"Fetch metrics from the Istio control plane(istiod) In this example, you will learn how to setup a Fetcher to fetch Istio control plane metrics, then push them to OAP server.\nInstall Operator Follow Operator installation instrument to install the operator.\nInstall Istio control plane Follow Install with istioctl to install a istiod.\nDeploy Fetcher, OAP server and UI with default settings Clone this repo, then change current directory to samples.\nIssue the below command to deploy an OAP server and UI.\nkubectl apply -f fetcher.yaml Get created custom resources as below:\n$ kubectl get oapserver,ui,fetcher NAME INSTANCES RUNNING ADDRESS oapserver.operator.skywalking.apache.org/default 1 1 default-oap.skywalking-swck-system NAME INSTANCES RUNNING INTERNALADDRESS EXTERNALIPS PORTS ui.operator.skywalking.apache.org/default 1 1 default-ui.skywalking-swck-system [80] NAME AGE fetcher.operator.skywalking.apache.org/istio-prod-cluster 36h View Istio Control Plane Dashboard from UI Follow View the UI to access the UI service.\nNavigate to Dashboard-\u0026gt;Istio Control Plane to view relevant metric diagrams.\n","title":"Fetch metrics from the Istio control plane(istiod)","url":"/docs/skywalking-swck/next/examples/istio-controlplane/"},{"content":"Fetch metrics from the Istio control plane(istiod) In this example, you will learn how to setup a Fetcher to fetch Istio control plane metrics, then push them to OAP server.\nInstall Operator Follow Operator installation instrument to install the operator.\nInstall Istio control plane Follow Install with istioctl to install a istiod.\nDeploy Fetcher, OAP server and UI with default settings Clone this repo, then change current directory to samples.\nIssue the below command to deploy an OAP server and UI.\nkubectl apply -f fetcher.yaml Get created custom resources as below:\n$ kubectl get oapserver,ui,fetcher NAME INSTANCES RUNNING ADDRESS oapserver.operator.skywalking.apache.org/default 1 1 default-oap.skywalking-swck-system NAME INSTANCES RUNNING INTERNALADDRESS EXTERNALIPS PORTS ui.operator.skywalking.apache.org/default 1 1 default-ui.skywalking-swck-system [80] NAME AGE fetcher.operator.skywalking.apache.org/istio-prod-cluster 36h View Istio Control Plane Dashboard from UI Follow View the UI to access the UI service.\nNavigate to Dashboard-\u0026gt;Istio Control Plane to view relevant metric diagrams.\n","title":"Fetch metrics from the Istio control plane(istiod)","url":"/docs/skywalking-swck/v0.9.0/examples/istio-controlplane/"},{"content":"Forwarder/envoy-als-v2-grpc-forwarder Description This is a synchronization ALS v2 grpc forwarder with the Envoy ALS protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Forwarder/envoy-als-v2-grpc-forwarder","url":"/docs/skywalking-satellite/latest/en/setup/plugins/forwarder_envoy-als-v2-grpc-forwarder/"},{"content":"Forwarder/envoy-als-v2-grpc-forwarder Description This is a synchronization ALS v2 grpc forwarder with the Envoy ALS protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Forwarder/envoy-als-v2-grpc-forwarder","url":"/docs/skywalking-satellite/next/en/setup/plugins/forwarder_envoy-als-v2-grpc-forwarder/"},{"content":"Forwarder/envoy-als-v2-grpc-forwarder Description This is a synchronization ALS v2 grpc forwarder with the Envoy ALS protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Forwarder/envoy-als-v2-grpc-forwarder","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/forwarder_envoy-als-v2-grpc-forwarder/"},{"content":"Forwarder/envoy-als-v3-grpc-forwarder Description This is a synchronization ALS v3 grpc forwarder with the Envoy ALS protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Forwarder/envoy-als-v3-grpc-forwarder","url":"/docs/skywalking-satellite/latest/en/setup/plugins/forwarder_envoy-als-v3-grpc-forwarder/"},{"content":"Forwarder/envoy-als-v3-grpc-forwarder Description This is a synchronization ALS v3 grpc forwarder with the Envoy ALS protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Forwarder/envoy-als-v3-grpc-forwarder","url":"/docs/skywalking-satellite/next/en/setup/plugins/forwarder_envoy-als-v3-grpc-forwarder/"},{"content":"Forwarder/envoy-als-v3-grpc-forwarder Description This is a synchronization ALS v3 grpc forwarder with the Envoy ALS protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Forwarder/envoy-als-v3-grpc-forwarder","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/forwarder_envoy-als-v3-grpc-forwarder/"},{"content":"Forwarder/envoy-metrics-v2-grpc-forwarder Description This is a synchronization Metrics v2 grpc forwarder with the Envoy metrics protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Forwarder/envoy-metrics-v2-grpc-forwarder","url":"/docs/skywalking-satellite/latest/en/setup/plugins/forwarder_envoy-metrics-v2-grpc-forwarder/"},{"content":"Forwarder/envoy-metrics-v2-grpc-forwarder Description This is a synchronization Metrics v2 grpc forwarder with the Envoy metrics protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Forwarder/envoy-metrics-v2-grpc-forwarder","url":"/docs/skywalking-satellite/next/en/setup/plugins/forwarder_envoy-metrics-v2-grpc-forwarder/"},{"content":"Forwarder/envoy-metrics-v2-grpc-forwarder Description This is a synchronization Metrics v2 grpc forwarder with the Envoy metrics protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Forwarder/envoy-metrics-v2-grpc-forwarder","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/forwarder_envoy-metrics-v2-grpc-forwarder/"},{"content":"Forwarder/envoy-metrics-v3-grpc-forwarder Description This is a synchronization Metrics v3 grpc forwarder with the Envoy metrics protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Forwarder/envoy-metrics-v3-grpc-forwarder","url":"/docs/skywalking-satellite/latest/en/setup/plugins/forwarder_envoy-metrics-v3-grpc-forwarder/"},{"content":"Forwarder/envoy-metrics-v3-grpc-forwarder Description This is a synchronization Metrics v3 grpc forwarder with the Envoy metrics protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Forwarder/envoy-metrics-v3-grpc-forwarder","url":"/docs/skywalking-satellite/next/en/setup/plugins/forwarder_envoy-metrics-v3-grpc-forwarder/"},{"content":"Forwarder/envoy-metrics-v3-grpc-forwarder Description This is a synchronization Metrics v3 grpc forwarder with the Envoy metrics protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Forwarder/envoy-metrics-v3-grpc-forwarder","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/forwarder_envoy-metrics-v3-grpc-forwarder/"},{"content":"Forwarder/native-cds-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native Configuration Discovery Service protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Forwarder/native-cds-grpc-forwarder","url":"/docs/skywalking-satellite/latest/en/setup/plugins/forwarder_native-cds-grpc-forwarder/"},{"content":"Forwarder/native-cds-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native Configuration Discovery Service protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Forwarder/native-cds-grpc-forwarder","url":"/docs/skywalking-satellite/next/en/setup/plugins/forwarder_native-cds-grpc-forwarder/"},{"content":"Forwarder/native-cds-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native Configuration Discovery Service protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Forwarder/native-cds-grpc-forwarder","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/forwarder_native-cds-grpc-forwarder/"},{"content":"Forwarder/native-clr-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native Configuration Discovery Service protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Forwarder/native-clr-grpc-forwarder","url":"/docs/skywalking-satellite/latest/en/setup/plugins/forwarder_native-clr-grpc-forwarder/"},{"content":"Forwarder/native-clr-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native Configuration Discovery Service protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Forwarder/native-clr-grpc-forwarder","url":"/docs/skywalking-satellite/next/en/setup/plugins/forwarder_native-clr-grpc-forwarder/"},{"content":"Forwarder/native-clr-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native Configuration Discovery Service protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Forwarder/native-clr-grpc-forwarder","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/forwarder_native-clr-grpc-forwarder/"},{"content":"Forwarder/native-ebpf-accesslog-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native eBPF access log protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Forwarder/native-ebpf-accesslog-grpc-forwarder","url":"/docs/skywalking-satellite/next/en/setup/plugins/forwarder_native-ebpf-accesslog-grpc-forwarder/"},{"content":"Forwarder/native-ebpf-profiling-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native process protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Forwarder/native-ebpf-profiling-grpc-forwarder","url":"/docs/skywalking-satellite/latest/en/setup/plugins/forwarder_native-ebpf-profiling-grpc-forwarder/"},{"content":"Forwarder/native-ebpf-profiling-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native process protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Forwarder/native-ebpf-profiling-grpc-forwarder","url":"/docs/skywalking-satellite/next/en/setup/plugins/forwarder_native-ebpf-profiling-grpc-forwarder/"},{"content":"Forwarder/native-ebpf-profiling-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native process protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Forwarder/native-ebpf-profiling-grpc-forwarder","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/forwarder_native-ebpf-profiling-grpc-forwarder/"},{"content":"Forwarder/native-event-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native event protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Forwarder/native-event-grpc-forwarder","url":"/docs/skywalking-satellite/latest/en/setup/plugins/forwarder_native-event-grpc-forwarder/"},{"content":"Forwarder/native-event-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native event protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Forwarder/native-event-grpc-forwarder","url":"/docs/skywalking-satellite/next/en/setup/plugins/forwarder_native-event-grpc-forwarder/"},{"content":"Forwarder/native-event-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native event protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Forwarder/native-event-grpc-forwarder","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/forwarder_native-event-grpc-forwarder/"},{"content":"Forwarder/native-jvm-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native Configuration Discovery Service protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Forwarder/native-jvm-grpc-forwarder","url":"/docs/skywalking-satellite/latest/en/setup/plugins/forwarder_native-jvm-grpc-forwarder/"},{"content":"Forwarder/native-jvm-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native Configuration Discovery Service protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Forwarder/native-jvm-grpc-forwarder","url":"/docs/skywalking-satellite/next/en/setup/plugins/forwarder_native-jvm-grpc-forwarder/"},{"content":"Forwarder/native-jvm-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native Configuration Discovery Service protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Forwarder/native-jvm-grpc-forwarder","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/forwarder_native-jvm-grpc-forwarder/"},{"content":"Forwarder/native-log-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native log protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Forwarder/native-log-grpc-forwarder","url":"/docs/skywalking-satellite/latest/en/setup/plugins/forwarder_native-log-grpc-forwarder/"},{"content":"Forwarder/native-log-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native log protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Forwarder/native-log-grpc-forwarder","url":"/docs/skywalking-satellite/next/en/setup/plugins/forwarder_native-log-grpc-forwarder/"},{"content":"Forwarder/native-log-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native log protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Forwarder/native-log-grpc-forwarder","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/forwarder_native-log-grpc-forwarder/"},{"content":"Forwarder/native-log-kafka-forwarder Description This is a synchronization Kafka forwarder with the SkyWalking native log protocol.\nDefaultConfig # The remote topic. topic:\u0026#34;log-topic\u0026#34;Configuration    Name Type Description     topic string The forwarder topic.    ","title":"Forwarder/native-log-kafka-forwarder","url":"/docs/skywalking-satellite/latest/en/setup/plugins/forwarder_native-log-kafka-forwarder/"},{"content":"Forwarder/native-log-kafka-forwarder Description This is a synchronization Kafka forwarder with the SkyWalking native log protocol.\nDefaultConfig # The remote topic. topic:\u0026#34;log-topic\u0026#34;Configuration    Name Type Description     topic string The forwarder topic.    ","title":"Forwarder/native-log-kafka-forwarder","url":"/docs/skywalking-satellite/next/en/setup/plugins/forwarder_native-log-kafka-forwarder/"},{"content":"Forwarder/native-log-kafka-forwarder Description This is a synchronization Kafka forwarder with the SkyWalking native log protocol.\nDefaultConfig # The remote topic. topic:\u0026#34;log-topic\u0026#34;Configuration    Name Type Description     topic string The forwarder topic.    ","title":"Forwarder/native-log-kafka-forwarder","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/forwarder_native-log-kafka-forwarder/"},{"content":"Forwarder/native-management-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native management protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Forwarder/native-management-grpc-forwarder","url":"/docs/skywalking-satellite/latest/en/setup/plugins/forwarder_native-management-grpc-forwarder/"},{"content":"Forwarder/native-management-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native management protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Forwarder/native-management-grpc-forwarder","url":"/docs/skywalking-satellite/next/en/setup/plugins/forwarder_native-management-grpc-forwarder/"},{"content":"Forwarder/native-management-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native management protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Forwarder/native-management-grpc-forwarder","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/forwarder_native-management-grpc-forwarder/"},{"content":"Forwarder/native-meter-grpc-forwarder Description This is a synchronization meter grpc forwarder with the SkyWalking meter protocol.\nDefaultConfig # The LRU policy cache size for hosting routine rules of service instance.routing_rule_lru_cache_size:5000# The TTL of the LRU cache size for hosting routine rules of service instance.routing_rule_lru_cache_ttl:180Configuration    Name Type Description     routing_rule_lru_cache_size int The LRU policy cache size for hosting routine rules of service instance.   routing_rule_lru_cache_ttl int The TTL of the LRU cache size for hosting routine rules of service instance.    ","title":"Forwarder/native-meter-grpc-forwarder","url":"/docs/skywalking-satellite/latest/en/setup/plugins/forwarder_native-meter-grpc-forwarder/"},{"content":"Forwarder/native-meter-grpc-forwarder Description This is a synchronization meter grpc forwarder with the SkyWalking meter protocol.\nDefaultConfig # The LRU policy cache size for hosting routine rules of service instance.routing_rule_lru_cache_size:5000# The TTL of the LRU cache size for hosting routine rules of service instance.routing_rule_lru_cache_ttl:180Configuration    Name Type Description     routing_rule_lru_cache_size int The LRU policy cache size for hosting routine rules of service instance.   routing_rule_lru_cache_ttl int The TTL of the LRU cache size for hosting routine rules of service instance.    ","title":"Forwarder/native-meter-grpc-forwarder","url":"/docs/skywalking-satellite/next/en/setup/plugins/forwarder_native-meter-grpc-forwarder/"},{"content":"Forwarder/native-meter-grpc-forwarder Description This is a synchronization meter grpc forwarder with the SkyWalking meter protocol.\nDefaultConfig # The LRU policy cache size for hosting routine rules of service instance.routing_rule_lru_cache_size:5000# The TTL of the LRU cache size for hosting routine rules of service instance.routing_rule_lru_cache_ttl:180Configuration    Name Type Description     routing_rule_lru_cache_size int The LRU policy cache size for hosting routine rules of service instance.   routing_rule_lru_cache_ttl int The TTL of the LRU cache size for hosting routine rules of service instance.    ","title":"Forwarder/native-meter-grpc-forwarder","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/forwarder_native-meter-grpc-forwarder/"},{"content":"Forwarder/native-process-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native process protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Forwarder/native-process-grpc-forwarder","url":"/docs/skywalking-satellite/latest/en/setup/plugins/forwarder_native-process-grpc-forwarder/"},{"content":"Forwarder/native-process-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native process protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Forwarder/native-process-grpc-forwarder","url":"/docs/skywalking-satellite/next/en/setup/plugins/forwarder_native-process-grpc-forwarder/"},{"content":"Forwarder/native-process-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native process protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Forwarder/native-process-grpc-forwarder","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/forwarder_native-process-grpc-forwarder/"},{"content":"Forwarder/native-profile-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native log protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Forwarder/native-profile-grpc-forwarder","url":"/docs/skywalking-satellite/latest/en/setup/plugins/forwarder_native-profile-grpc-forwarder/"},{"content":"Forwarder/native-profile-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native log protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Forwarder/native-profile-grpc-forwarder","url":"/docs/skywalking-satellite/next/en/setup/plugins/forwarder_native-profile-grpc-forwarder/"},{"content":"Forwarder/native-profile-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native log protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Forwarder/native-profile-grpc-forwarder","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/forwarder_native-profile-grpc-forwarder/"},{"content":"Forwarder/native-tracing-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native tracing protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Forwarder/native-tracing-grpc-forwarder","url":"/docs/skywalking-satellite/latest/en/setup/plugins/forwarder_native-tracing-grpc-forwarder/"},{"content":"Forwarder/native-tracing-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native tracing protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Forwarder/native-tracing-grpc-forwarder","url":"/docs/skywalking-satellite/next/en/setup/plugins/forwarder_native-tracing-grpc-forwarder/"},{"content":"Forwarder/native-tracing-grpc-forwarder Description This is a synchronization grpc forwarder with the SkyWalking native tracing protocol.\nDefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Forwarder/native-tracing-grpc-forwarder","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/forwarder_native-tracing-grpc-forwarder/"},{"content":"Forwarder/otlp-metrics-v1-grpc-forwarder Description This is a synchronization grpc forwarder with the OpenTelemetry metrics v1 protocol.\nDefaultConfig # The LRU policy cache size for hosting routine rules of service instance.routing_rule_lru_cache_size:5000# The TTL of the LRU cache size for hosting routine rules of service instance.routing_rule_lru_cache_ttl:180# The label key of the routing data, multiple keys are split by \u0026#34;,\u0026#34;routing_label_keys:net.host.name,host.name,job,service.nameConfiguration    Name Type Description     routing_label_keys string The label key of the routing data, multiple keys are split by \u0026ldquo;,\u0026rdquo;   routing_rule_lru_cache_size int The LRU policy cache size for hosting routine rules of service instance.   routing_rule_lru_cache_ttl int The TTL of the LRU cache size for hosting routine rules of service instance.    ","title":"Forwarder/otlp-metrics-v1-grpc-forwarder","url":"/docs/skywalking-satellite/latest/en/setup/plugins/forwarder_otlp-metrics-v1-grpc-forwarder/"},{"content":"Forwarder/otlp-metrics-v1-grpc-forwarder Description This is a synchronization grpc forwarder with the OpenTelemetry metrics v1 protocol.\nDefaultConfig # The LRU policy cache size for hosting routine rules of service instance.routing_rule_lru_cache_size:5000# The TTL of the LRU cache size for hosting routine rules of service instance.routing_rule_lru_cache_ttl:180# The label key of the routing data, multiple keys are split by \u0026#34;,\u0026#34;routing_label_keys:net.host.name,host.name,job,service.nameConfiguration    Name Type Description     routing_label_keys string The label key of the routing data, multiple keys are split by \u0026ldquo;,\u0026rdquo;   routing_rule_lru_cache_size int The LRU policy cache size for hosting routine rules of service instance.   routing_rule_lru_cache_ttl int The TTL of the LRU cache size for hosting routine rules of service instance.    ","title":"Forwarder/otlp-metrics-v1-grpc-forwarder","url":"/docs/skywalking-satellite/next/en/setup/plugins/forwarder_otlp-metrics-v1-grpc-forwarder/"},{"content":"Forwarder/otlp-metrics-v1-grpc-forwarder Description This is a synchronization grpc forwarder with the OpenTelemetry metrics v1 protocol.\nDefaultConfig # The LRU policy cache size for hosting routine rules of service instance.routing_rule_lru_cache_size:5000# The TTL of the LRU cache size for hosting routine rules of service instance.routing_rule_lru_cache_ttl:180# The label key of the routing data, multiple keys are split by \u0026#34;,\u0026#34;routing_label_keys:net.host.name,host.name,job,service.nameConfiguration    Name Type Description     routing_label_keys string The label key of the routing data, multiple keys are split by \u0026ldquo;,\u0026rdquo;   routing_rule_lru_cache_size int The LRU policy cache size for hosting routine rules of service instance.   routing_rule_lru_cache_ttl int The TTL of the LRU cache size for hosting routine rules of service instance.    ","title":"Forwarder/otlp-metrics-v1-grpc-forwarder","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/forwarder_otlp-metrics-v1-grpc-forwarder/"},{"content":"Get Binaries This page shows how to get binaries of Banyand.\nPrebuilt Released binaries Get binaries from the download.\nBuild From Source Requirements Users who want to build a binary from sources have to set up:\n Go 1.20 Node 18.16 Git \u0026gt;= 2.30 Linux, macOS or Windows+WSL2 GNU make  Windows BanyanDB is built on Linux and macOS that introduced several platform-specific characters to the building system. Therefore, we highly recommend you use WSL2+Ubuntu to execute tasks of the Makefile.\nBuild Binaries To issue the below command to get basic binaries of banyand and bydbctl.\n$ make generate ... $ make build ... --- banyand: all --- make[1]: Entering directory \u0026#39;\u0026lt;path_to_project_root\u0026gt;/banyand\u0026#39; ... chmod +x build/bin/banyand-server Done building banyand server make[1]: Leaving directory \u0026#39;\u0026lt;path_to_project_root\u0026gt;/banyand\u0026#39; ... --- bydbctl: all --- make[1]: Entering directory \u0026#39;\u0026lt;path_to_project_root\u0026gt;/bydbctl\u0026#39; ... chmod +x build/bin/bydbctl Done building bydbctl make[1]: Leaving directory \u0026#39;\u0026lt;path_to_project_root\u0026gt;/bydbctl\u0026#39; The build system provides a series of binary options as well.\n make -C banyand banyand-server generates a basic banyand-server. make -C banyand release builds out a static binary for releasing. make -C banyand debug gives a binary for debugging without the complier\u0026rsquo;s optimizations. make -C banyand debug-static is a static binary for debugging. make -C bydbctl release cross-builds several binaries for multi-platforms.  Then users get binaries as below\n$ ls banyand/build/bin banyand-server banyand-server-debug banyand-server-debug-static banyand-server-static $ ls banyand/build/bin bydbctl ","title":"Get Binaries","url":"/docs/skywalking-banyandb/latest/installation/binaries/"},{"content":"Get Binaries This page shows how to get binaries of Banyand.\nPrebuilt Released binaries Get binaries from the download.\nBuild From Source Requirements Users who want to build a binary from sources have to set up:\n Go 1.22 Node 20.12 Git \u0026gt;= 2.30 Linux, macOS or Windows+WSL2 GNU make  Windows BanyanDB is built on Linux and macOS that introduced several platform-specific characters to the building system. Therefore, we highly recommend you use WSL2+Ubuntu to execute tasks of the Makefile.\nBuild Binaries To issue the below command to get basic binaries of banyand and bydbctl.\n$ make generate ... $ make build ... --- banyand: all --- make[1]: Entering directory \u0026#39;\u0026lt;path_to_project_root\u0026gt;/banyand\u0026#39; ... chmod +x build/bin/banyand-server Done building banyand server make[1]: Leaving directory \u0026#39;\u0026lt;path_to_project_root\u0026gt;/banyand\u0026#39; ... --- bydbctl: all --- make[1]: Entering directory \u0026#39;\u0026lt;path_to_project_root\u0026gt;/bydbctl\u0026#39; ... chmod +x build/bin/bydbctl Done building bydbctl make[1]: Leaving directory \u0026#39;\u0026lt;path_to_project_root\u0026gt;/bydbctl\u0026#39; The build system provides a series of binary options as well.\n make -C banyand banyand-server generates a basic banyand-server. make -C banyand release or make -C banyand static builds out a static binary banyand-server-static for releasing. make -C banyand debug gives a binary for debugging without the complier\u0026rsquo;s optimizations. make -C banyand debug-static is a static binary for debugging. make -C bydbctl release cross-builds several binaries for multi-platforms.  Then users get binaries as below\n$ ls banyand/build/bin banyand-server banyand-server-debug banyand-server-debug-static $ ls bydbctl/build/bin bydbctl bydbctl--darwin-amd64 bydbctl--darwin-arm64 bydbctl--linux-386 bydbctl--linux-amd64 bydbctl--linux-arm64 bydbctl--windows-386 bydbctl--windows-amd64 ","title":"Get Binaries","url":"/docs/skywalking-banyandb/next/installation/binaries/"},{"content":"Get Binaries This page shows how to get binaries of Banyand.\nPrebuilt Released binaries Get binaries from the download.\nBuild From Source Requirements Users who want to build a binary from sources have to set up:\n Go 1.20 Node 18.16 Git \u0026gt;= 2.30 Linux, macOS or Windows+WSL2 GNU make  Windows BanyanDB is built on Linux and macOS that introduced several platform-specific characters to the building system. Therefore, we highly recommend you use WSL2+Ubuntu to execute tasks of the Makefile.\nBuild Binaries To issue the below command to get basic binaries of banyand and bydbctl.\n$ make generate ... $ make build ... --- banyand: all --- make[1]: Entering directory \u0026#39;\u0026lt;path_to_project_root\u0026gt;/banyand\u0026#39; ... chmod +x build/bin/banyand-server Done building banyand server make[1]: Leaving directory \u0026#39;\u0026lt;path_to_project_root\u0026gt;/banyand\u0026#39; ... --- bydbctl: all --- make[1]: Entering directory \u0026#39;\u0026lt;path_to_project_root\u0026gt;/bydbctl\u0026#39; ... chmod +x build/bin/bydbctl Done building bydbctl make[1]: Leaving directory \u0026#39;\u0026lt;path_to_project_root\u0026gt;/bydbctl\u0026#39; The build system provides a series of binary options as well.\n make -C banyand banyand-server generates a basic banyand-server. make -C banyand release builds out a static binary for releasing. make -C banyand debug gives a binary for debugging without the complier\u0026rsquo;s optimizations. make -C banyand debug-static is a static binary for debugging. make -C bydbctl release cross-builds several binaries for multi-platforms.  Then users get binaries as below\n$ ls banyand/build/bin banyand-server banyand-server-debug banyand-server-debug-static banyand-server-static $ ls banyand/build/bin bydbctl ","title":"Get Binaries","url":"/docs/skywalking-banyandb/v0.5.0/installation/binaries/"},{"content":"Getting Started This document introduces how to create a kubernetes cluster locally using kind and how to deploy the basic skywalking components to the cluster.\nPrerequisites  docker \u0026gt;= v20.10.6 kubectl \u0026gt;= v1.21.0 kind \u0026gt;= v0.20.0 swctl \u0026gt;= v0.10.0  Step1: Create a kubernetes cluster locally using kind  Note: If you have a kubernetes cluster (\u0026gt; v1.21.10) already, you can skip this step.\n Here we create a kubernetes cluster with 1 control-plane node and 1 worker nodes.\n$ cat \u0026lt;\u0026lt;EOF | kind create cluster --config=- kind: Cluster apiVersion: kind.x-k8s.io/v1alpha4 nodes: - role: control-plane image: kindest/node:v1.21.10 - role: worker image: kindest/node:v1.21.10 EOF  Expected output Creating cluster \u0026#34;kind\u0026#34; ... ✓ Ensuring node image (kindest/node:v1.21.10) 🖼 ✓ Preparing nodes 📦 📦 ✓ Writing configuration 📜 ✓ Starting control-plane 🕹️ ✓ Installing CNI 🔌 ✓ Installing StorageClass 💾 ✓ Joining worker nodes 🚜 Set kubectl context to \u0026#34;kind-kind\u0026#34; You can now use your cluster with: kubectl cluster-info --context kind-kind Not sure what to do next? 😅 Check out https://kind.sigs.k8s.io/docs/user/quick-start/  Check all pods in the cluster.\n$ kubectl get pods -A  Expected output NAMESPACE NAME READY STATUS RESTARTS AGE kube-system coredns-558bd4d5db-h5gxt 1/1 Running 0 106s kube-system coredns-558bd4d5db-lhnvz 1/1 Running 0 106s kube-system etcd-kind-control-plane 1/1 Running 0 116s kube-system kindnet-fxlkm 1/1 Running 0 106s kube-system kindnet-vmcvl 1/1 Running 0 91s kube-system kube-apiserver-kind-control-plane 1/1 Running 0 116s kube-system kube-controller-manager-kind-control-plane 1/1 Running 0 116s kube-system kube-proxy-nr4f4 1/1 Running 0 91s kube-system kube-proxy-zl4h2 1/1 Running 0 106s kube-system kube-scheduler-kind-control-plane 1/1 Running 0 116s local-path-storage local-path-provisioner-74567d47b4-kmtjh 1/1 Running 0 106s  Step2: Build the operator image Check into the root directory of SWCK and build the operator image as follows.\n$ cd operator # Build the operator image $ make docker-build You will get the operator image controller:latest as follows.\n$ docker images REPOSITORY TAG IMAGE ID CREATED SIZE controller latest 84da7509092a 22 seconds ago 53.6MB Load the operator image into the kind cluster or push the image to a registry that your kubernetes cluster can access.\n$ kind load docker-image controller or\n$ docker push $(YOUR_REGISTRY)/controller Step3: Deploy operator on the kubernetes cluster Install the CRDs as follows.\n$ make install Check the CRDs are installed successfully.\n Expected output kubectl get crd | grep skywalking banyandbs.operator.skywalking.apache.org 2023-11-05T03:30:43Z fetchers.operator.skywalking.apache.org 2023-11-05T03:30:43Z javaagents.operator.skywalking.apache.org 2023-11-05T03:30:43Z oapserverconfigs.operator.skywalking.apache.org 2023-11-05T03:30:43Z oapserverdynamicconfigs.operator.skywalking.apache.org 2023-11-05T03:30:43Z oapservers.operator.skywalking.apache.org 2023-11-05T03:30:43Z satellites.operator.skywalking.apache.org 2023-11-05T03:30:43Z storages.operator.skywalking.apache.org 2023-11-05T03:30:43Z swagents.operator.skywalking.apache.org 2023-11-05T03:30:43Z uis.operator.skywalking.apache.org 2023-11-05T03:30:43Z  Deploy the SWCK operator to the cluster.\n$ make deploy Or deploy the SWCK operator to the cluster with your own image.\n$ make deploy OPERATOR_IMG=$(YOUR_REGISTRY)/controller Get the status of the SWCK operator pod.\n$ kubectl get pod -n skywalking-swck-system NAME READY STATUS RESTARTS AGE skywalking-swck-controller-manager-5f5bbd4fd-9wdw6 2/2 Running 0 34s Step4: Deploy skywalking componentes on the kubernetes cluster Create the skywalking-system namespace.\n$ kubectl create namespace skywalking-system Deploy the skywalking components to the cluster.\n$ cat \u0026lt;\u0026lt;EOF | kubectl apply -f - apiVersion: operator.skywalking.apache.org/v1alpha1 kind: OAPServer metadata: name: skywalking-system namespace: skywalking-system spec: version: 9.5.0 instances: 1 image: apache/skywalking-oap-server:9.5.0 service: template: type: ClusterIP --- apiVersion: operator.skywalking.apache.org/v1alpha1 kind: UI metadata: name: skywalking-system namespace: skywalking-system spec: version: 9.5.0 instances: 1 image: apache/skywalking-ui:9.5.0 OAPServerAddress: http://skywalking-system-oap.skywalking-system:12800 service: template: type: ClusterIP ingress: host: demo.ui.skywalking EOF Check the status of the skywalking components.\n$ kubectl get pod -n skywalking-system NAME READY STATUS RESTARTS AGE skywalking-system-oap-68bd877f57-fhzdz 1/1 Running 0 6m23s skywalking-system-ui-6db8579b47-rphtl 1/1 Running 0 6m23s Step5: Use the java agent injector to inject the java agent into the application pod Label the namespace where the application pod is located with swck-injection=enabled.\n$ kubectl label namespace skywalking-system swck-injection=enabled Create the application pod.\n Note: The application pod must be labeled with swck-java-agent-injected=true and the agent.skywalking.apache.org/collector.backend_service annotation must be set to the address of the OAP server. For more configurations, please refer to the guide.\n $ cat \u0026lt;\u0026lt;EOF | kubectl apply -f - apiVersion: apps/v1 kind: Deployment metadata: name: demo namespace: skywalking-system spec: selector: matchLabels: app: demo template: metadata: labels: # enable the java agent injector swck-java-agent-injected: \u0026#34;true\u0026#34; app: demo annotations: agent.skywalking.apache.org/collector.backend_service: \u0026#34;skywalking-system-oap.skywalking-system:11800\u0026#34; spec: containers: - name: demo1 imagePullPolicy: IfNotPresent image: ghcr.io/apache/skywalking-swck-spring-demo:v0.0.1 command: [\u0026#34;java\u0026#34;] args: [\u0026#34;-jar\u0026#34;,\u0026#34;/app.jar\u0026#34;] ports: - containerPort: 8085 readinessProbe: httpGet: path: /hello port: 8085 initialDelaySeconds: 3 periodSeconds: 3 failureThreshold: 10 --- apiVersion: v1 kind: Service metadata: name: demo namespace: skywalking-system spec: type: ClusterIP ports: - name: 8085-tcp port: 8085 protocol: TCP targetPort: 8085 selector: app: demo EOF Check the status of the application pod and make sure the java agent is injected into the application pod.\n$ kubectl get pod -n skywalking-system -l app=demo -ojsonpath=\u0026#39;{.items[0].spec.initContainers[0]}\u0026#39;  Expected output {\u0026#34;args\u0026#34;:[\u0026#34;-c\u0026#34;,\u0026#34;mkdir -p /sky/agent \\u0026\\u0026 cp -r /skywalking/agent/* /sky/agent\u0026#34;],\u0026#34;command\u0026#34;:[\u0026#34;sh\u0026#34;],\u0026#34;image\u0026#34;:\u0026#34;apache/skywalking-java-agent:8.16.0-java8\u0026#34;,\u0026#34;imagePullPolicy\u0026#34;:\u0026#34;IfNotPresent\u0026#34;,\u0026#34;name\u0026#34;:\u0026#34;inject-skywalking-agent\u0026#34;,\u0026#34;resources\u0026#34;:{},\u0026#34;terminationMessagePath\u0026#34;:\u0026#34;/dev/termination-log\u0026#34;,\u0026#34;terminationMessagePolicy\u0026#34;:\u0026#34;File\u0026#34;,\u0026#34;volumeMounts\u0026#34;:[{\u0026#34;mountPath\u0026#34;:\u0026#34;/sky/agent\u0026#34;,\u0026#34;name\u0026#34;:\u0026#34;sky-agent\u0026#34;},{\u0026#34;mountPath\u0026#34;:\u0026#34;/var/run/secrets/kubernetes.io/serviceaccount\u0026#34;,\u0026#34;name\u0026#34;:\u0026#34;kube-api-access-4qk26\u0026#34;,\u0026#34;readOnly\u0026#34;:true}]}  Also, you could check the final java agent configurations with the following command.\n$ kubectl get javaagent -n skywalking-system -l app=demo -oyaml  Expected output apiVersion: v1 items: - apiVersion: operator.skywalking.apache.org/v1alpha1 kind: JavaAgent metadata: creationTimestamp: \u0026#34;2023-11-19T05:34:03Z\u0026#34; generation: 1 labels: app: demo name: app-demo-javaagent namespace: skywalking-system ownerReferences: - apiVersion: apps/v1 blockOwnerDeletion: true controller: true kind: ReplicaSet name: demo-75d8d995cc uid: 8cb64abc-9b50-4f67-9304-2e09de476168 resourceVersion: \u0026#34;21515\u0026#34; uid: 6cbafb3d-9f43-4448-95e8-bda1f7c72bc3 spec: agentConfiguration: collector.backend_service: skywalking-system-oap.skywalking-system:11800 optional-plugin: webflux|cloud-gateway-2.1.x backendService: skywalking-system-oap.skywalking-system:11800 podSelector: app=demo serviceName: Your_ApplicationName status: creationTime: \u0026#34;2023-11-19T05:34:03Z\u0026#34; expectedInjectiedNum: 1 lastUpdateTime: \u0026#34;2023-11-19T05:34:46Z\u0026#34; realInjectedNum: 1 kind: List metadata: resourceVersion: \u0026#34;\u0026#34; selfLink: \u0026#34;\u0026#34;  If you want to check the logs of the java agent, you can run the following command.\n$ kubectl logs -f -n skywalking-system -l app=demo -c inject-skywalking-agent Step6: Check the application metrics in the skywalking UI First, port-forward the demo service to your local machine.\n$ kubectl port-forward svc/demo 8085:8085 -n skywalking-system Then, trigger the application to generate some metrics.\n$ for i in {1..10}; do curl http://127.0.0.1:8085/hello \u0026amp;\u0026amp; echo \u0026#34;\u0026#34;; done After that, you can port-forward the skywalking UI to your local machine.\n$ kubectl port-forward svc/skywalking-system-ui 8080:80 -n skywalking-system Open the skywalking UI in your browser and navigate to http://127.0.0.1:8080 to check the application metrics.\n Expected output  Also, if you want to expose the external metrics to the kubernetes HPA, you can follow the guide to deploy the custom metrics adapter and you may get some inspiration from the e2e test.\n","title":"Getting Started","url":"/docs/skywalking-swck/next/getting-started/"},{"content":"Getting Started This document introduces how to create a kubernetes cluster locally using kind and how to deploy the basic skywalking components to the cluster.\nPrerequisites  docker \u0026gt;= v20.10.6 kubectl \u0026gt;= v1.21.0 kind \u0026gt;= v0.20.0 swctl \u0026gt;= v0.10.0  Step1: Create a kubernetes cluster locally using kind  Note: If you have a kubernetes cluster (\u0026gt; v1.21.10) already, you can skip this step.\n Here we create a kubernetes cluster with 1 control-plane node and 1 worker nodes.\n$ cat \u0026lt;\u0026lt;EOF | kind create cluster --config=- kind: Cluster apiVersion: kind.x-k8s.io/v1alpha4 nodes: - role: control-plane image: kindest/node:v1.21.10 - role: worker image: kindest/node:v1.21.10 EOF  Expected output Creating cluster \u0026#34;kind\u0026#34; ... ✓ Ensuring node image (kindest/node:v1.21.10) 🖼 ✓ Preparing nodes 📦 📦 ✓ Writing configuration 📜 ✓ Starting control-plane 🕹️ ✓ Installing CNI 🔌 ✓ Installing StorageClass 💾 ✓ Joining worker nodes 🚜 Set kubectl context to \u0026#34;kind-kind\u0026#34; You can now use your cluster with: kubectl cluster-info --context kind-kind Not sure what to do next? 😅 Check out https://kind.sigs.k8s.io/docs/user/quick-start/  Check all pods in the cluster.\n$ kubectl get pods -A  Expected output NAMESPACE NAME READY STATUS RESTARTS AGE kube-system coredns-558bd4d5db-h5gxt 1/1 Running 0 106s kube-system coredns-558bd4d5db-lhnvz 1/1 Running 0 106s kube-system etcd-kind-control-plane 1/1 Running 0 116s kube-system kindnet-fxlkm 1/1 Running 0 106s kube-system kindnet-vmcvl 1/1 Running 0 91s kube-system kube-apiserver-kind-control-plane 1/1 Running 0 116s kube-system kube-controller-manager-kind-control-plane 1/1 Running 0 116s kube-system kube-proxy-nr4f4 1/1 Running 0 91s kube-system kube-proxy-zl4h2 1/1 Running 0 106s kube-system kube-scheduler-kind-control-plane 1/1 Running 0 116s local-path-storage local-path-provisioner-74567d47b4-kmtjh 1/1 Running 0 106s  Step2: Build the operator image Check into the root directory of SWCK and build the operator image as follows.\n$ cd operator # Build the operator image $ make docker-build You will get the operator image controller:latest as follows.\n$ docker images REPOSITORY TAG IMAGE ID CREATED SIZE controller latest 84da7509092a 22 seconds ago 53.6MB Load the operator image into the kind cluster or push the image to a registry that your kubernetes cluster can access.\n$ kind load docker-image controller or\n$ docker push $(YOUR_REGISTRY)/controller Step3: Deploy operator on the kubernetes cluster Install the CRDs as follows.\n$ make install Check the CRDs are installed successfully.\n Expected output kubectl get crd | grep skywalking banyandbs.operator.skywalking.apache.org 2023-11-05T03:30:43Z fetchers.operator.skywalking.apache.org 2023-11-05T03:30:43Z javaagents.operator.skywalking.apache.org 2023-11-05T03:30:43Z oapserverconfigs.operator.skywalking.apache.org 2023-11-05T03:30:43Z oapserverdynamicconfigs.operator.skywalking.apache.org 2023-11-05T03:30:43Z oapservers.operator.skywalking.apache.org 2023-11-05T03:30:43Z satellites.operator.skywalking.apache.org 2023-11-05T03:30:43Z storages.operator.skywalking.apache.org 2023-11-05T03:30:43Z swagents.operator.skywalking.apache.org 2023-11-05T03:30:43Z uis.operator.skywalking.apache.org 2023-11-05T03:30:43Z  Deploy the SWCK operator to the cluster.\n$ make deploy Or deploy the SWCK operator to the cluster with your own image.\n$ make deploy OPERATOR_IMG=$(YOUR_REGISTRY)/controller Get the status of the SWCK operator pod.\n$ kubectl get pod -n skywalking-swck-system NAME READY STATUS RESTARTS AGE skywalking-swck-controller-manager-5f5bbd4fd-9wdw6 2/2 Running 0 34s Step4: Deploy skywalking componentes on the kubernetes cluster Create the skywalking-system namespace.\n$ kubectl create namespace skywalking-system Deploy the skywalking components to the cluster.\n$ cat \u0026lt;\u0026lt;EOF | kubectl apply -f - apiVersion: operator.skywalking.apache.org/v1alpha1 kind: OAPServer metadata: name: skywalking-system namespace: skywalking-system spec: version: 9.5.0 instances: 1 image: apache/skywalking-oap-server:9.5.0 service: template: type: ClusterIP --- apiVersion: operator.skywalking.apache.org/v1alpha1 kind: UI metadata: name: skywalking-system namespace: skywalking-system spec: version: 9.5.0 instances: 1 image: apache/skywalking-ui:9.5.0 OAPServerAddress: http://skywalking-system-oap.skywalking-system:12800 service: template: type: ClusterIP ingress: host: demo.ui.skywalking EOF Check the status of the skywalking components.\n$ kubectl get pod -n skywalking-system NAME READY STATUS RESTARTS AGE skywalking-system-oap-68bd877f57-fhzdz 1/1 Running 0 6m23s skywalking-system-ui-6db8579b47-rphtl 1/1 Running 0 6m23s Step5: Use the java agent injector to inject the java agent into the application pod Label the namespace where the application pod is located with swck-injection=enabled.\n$ kubectl label namespace skywalking-system swck-injection=enabled Create the application pod.\n Note: The application pod must be labeled with swck-java-agent-injected=true and the agent.skywalking.apache.org/collector.backend_service annotation must be set to the address of the OAP server. For more configurations, please refer to the guide.\n $ cat \u0026lt;\u0026lt;EOF | kubectl apply -f - apiVersion: apps/v1 kind: Deployment metadata: name: demo namespace: skywalking-system spec: selector: matchLabels: app: demo template: metadata: labels: # enable the java agent injector swck-java-agent-injected: \u0026#34;true\u0026#34; app: demo annotations: agent.skywalking.apache.org/collector.backend_service: \u0026#34;skywalking-system-oap.skywalking-system:11800\u0026#34; spec: containers: - name: demo1 imagePullPolicy: IfNotPresent image: ghcr.io/apache/skywalking-swck-spring-demo:v0.0.1 command: [\u0026#34;java\u0026#34;] args: [\u0026#34;-jar\u0026#34;,\u0026#34;/app.jar\u0026#34;] ports: - containerPort: 8085 readinessProbe: httpGet: path: /hello port: 8085 initialDelaySeconds: 3 periodSeconds: 3 failureThreshold: 10 --- apiVersion: v1 kind: Service metadata: name: demo namespace: skywalking-system spec: type: ClusterIP ports: - name: 8085-tcp port: 8085 protocol: TCP targetPort: 8085 selector: app: demo EOF Check the status of the application pod and make sure the java agent is injected into the application pod.\n$ kubectl get pod -n skywalking-system -l app=demo -ojsonpath=\u0026#39;{.items[0].spec.initContainers[0]}\u0026#39;  Expected output {\u0026#34;args\u0026#34;:[\u0026#34;-c\u0026#34;,\u0026#34;mkdir -p /sky/agent \\u0026\\u0026 cp -r /skywalking/agent/* /sky/agent\u0026#34;],\u0026#34;command\u0026#34;:[\u0026#34;sh\u0026#34;],\u0026#34;image\u0026#34;:\u0026#34;apache/skywalking-java-agent:8.16.0-java8\u0026#34;,\u0026#34;imagePullPolicy\u0026#34;:\u0026#34;IfNotPresent\u0026#34;,\u0026#34;name\u0026#34;:\u0026#34;inject-skywalking-agent\u0026#34;,\u0026#34;resources\u0026#34;:{},\u0026#34;terminationMessagePath\u0026#34;:\u0026#34;/dev/termination-log\u0026#34;,\u0026#34;terminationMessagePolicy\u0026#34;:\u0026#34;File\u0026#34;,\u0026#34;volumeMounts\u0026#34;:[{\u0026#34;mountPath\u0026#34;:\u0026#34;/sky/agent\u0026#34;,\u0026#34;name\u0026#34;:\u0026#34;sky-agent\u0026#34;},{\u0026#34;mountPath\u0026#34;:\u0026#34;/var/run/secrets/kubernetes.io/serviceaccount\u0026#34;,\u0026#34;name\u0026#34;:\u0026#34;kube-api-access-4qk26\u0026#34;,\u0026#34;readOnly\u0026#34;:true}]}  Also, you could check the final java agent configurations with the following command.\n$ kubectl get javaagent -n skywalking-system -l app=demo -oyaml  Expected output apiVersion: v1 items: - apiVersion: operator.skywalking.apache.org/v1alpha1 kind: JavaAgent metadata: creationTimestamp: \u0026#34;2023-11-19T05:34:03Z\u0026#34; generation: 1 labels: app: demo name: app-demo-javaagent namespace: skywalking-system ownerReferences: - apiVersion: apps/v1 blockOwnerDeletion: true controller: true kind: ReplicaSet name: demo-75d8d995cc uid: 8cb64abc-9b50-4f67-9304-2e09de476168 resourceVersion: \u0026#34;21515\u0026#34; uid: 6cbafb3d-9f43-4448-95e8-bda1f7c72bc3 spec: agentConfiguration: collector.backend_service: skywalking-system-oap.skywalking-system:11800 optional-plugin: webflux|cloud-gateway-2.1.x backendService: skywalking-system-oap.skywalking-system:11800 podSelector: app=demo serviceName: Your_ApplicationName status: creationTime: \u0026#34;2023-11-19T05:34:03Z\u0026#34; expectedInjectiedNum: 1 lastUpdateTime: \u0026#34;2023-11-19T05:34:46Z\u0026#34; realInjectedNum: 1 kind: List metadata: resourceVersion: \u0026#34;\u0026#34; selfLink: \u0026#34;\u0026#34;  If you want to check the logs of the java agent, you can run the following command.\n$ kubectl logs -f -n skywalking-system -l app=demo -c inject-skywalking-agent Step6: Check the application metrics in the skywalking UI First, port-forward the demo service to your local machine.\n$ kubectl port-forward svc/demo 8085:8085 -n skywalking-system Then, trigger the application to generate some metrics.\n$ for i in {1..10}; do curl http://127.0.0.1:8085/hello \u0026amp;\u0026amp; echo \u0026#34;\u0026#34;; done After that, you can port-forward the skywalking UI to your local machine.\n$ kubectl port-forward svc/skywalking-system-ui 8080:80 -n skywalking-system Open the skywalking UI in your browser and navigate to http://127.0.0.1:8080 to check the application metrics.\n Expected output  Also, if you want to expose the external metrics to the kubernetes HPA, you can follow the guide to deploy the custom metrics adapter and you may get some inspiration from the e2e test.\n","title":"Getting Started","url":"/docs/skywalking-swck/v0.9.0/getting-started/"},{"content":"Group Parameterized Endpoints In most cases, endpoints are detected automatically through language agents, service mesh observability solutions, or meter system configurations.\nThere are some special cases, especially when REST-style URI is used, where the application codes include the parameter in the endpoint name, such as putting order ID in the URI. Examples are /prod/ORDER123 and /prod/ORDER456. But logically, most would expect to have an endpoint name like prod/{order-id}. This is a specially designed feature in parameterized endpoint grouping.\nIf the incoming endpoint name accords with the rules, SkyWalking will group the endpoint by rules.\nThere are two approaches in which SkyWalking supports endpoint grouping:\n Endpoint name grouping by OpenAPI definitions. Endpoint name grouping by custom configurations.  Both grouping approaches can work together in sequence.\nEndpoint name grouping by OpenAPI definitions The OpenAPI definitions are documents based on the OpenAPI Specification (OAS), which is used to define a standard, language-agnostic interface for HTTP APIs.\nSkyWalking now supports OAS v2.0+. It could parse the documents (yaml) and build grouping rules from them automatically.\nHow to use   Add Specification Extensions for SkyWalking config in the OpenAPI definition documents; otherwise, all configs are default:\n${METHOD} is a reserved placeholder which represents the HTTP method, e.g. POST/GET... . ${PATH} is a reserved placeholder which represents the path, e.g. /products/{id}.\n   Extension Name Required Description Default Value     x-sw-service-name false The service name to which these endpoints belong. The directory name to which the OpenAPI definition documents belong.   x-sw-endpoint-name-match-rule false The rule used to match the endpoint. ${METHOD}:${PATH}   x-sw-endpoint-name-format false The endpoint name after grouping. ${METHOD}:${PATH}    These extensions are under OpenAPI Object. For example, the document below has a full custom config:\n  openapi:3.0.0x-sw-service-name:serviceBx-sw-endpoint-name-match-rule:\u0026#34;${METHOD}:${PATH}\u0026#34;x-sw-endpoint-name-format:\u0026#34;${METHOD}:${PATH}\u0026#34;info:description:OpenAPI definition for SkyWalking test.version:v2title:Product API...We highly recommend using the default config. The custom config (x-sw-endpoint-name-match-rule/x-sw-endpoint-name-format) is considered part of the match rules (regex pattern). We have provided some use cases in org.apache.skywalking.oap.server.core.config.group.openapi.EndpointGroupingRuleReader4OpenapiTest. You may validate your custom config as well.\nAll OpenAPI definition documents are located in the openapi-definitions directory, with directories having at most two levels. We recommend using the service name as the subDirectory name, as you will then not be required to set x-sw-service-name. For example:  ├── openapi-definitions │ ├── serviceA │ │ ├── customerAPI-v1.yaml │ │ └── productAPI-v1.yaml │ └── serviceB │ └── productAPI-v2.yaml The feature is enabled by default. You can disable it by setting the Core Module configuration ${SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI:false}.  Rules match priority We recommend designing the API path as clearly as possible. If the API path is fuzzy and an endpoint name matches multiple paths, SkyWalking would select a path according to the match priority set out below:\n The exact path is matched. E.g. /products or /products/inventory The path with fewer variables. E.g. In the case of /products/{var1}/{var2} and /products/{var1}/abc, endpoint name /products/123/abc will match the second one. If the paths have the same number of variables, the longest path is matched, and the vars are considered to be 1. E.g. In the case of /products/abc/{var1} and products/{var12345}/ef, endpoint name /products/abc/ef will match the first one, because length(\u0026quot;abc\u0026quot;) = 3 is larger than length(\u0026quot;ef\u0026quot;) = 2.  Examples If we have an OpenAPI definition doc productAPI-v2.yaml in directory serviceB, it will look like this:\nopenapi:3.0.0info:description:OpenAPI definition for SkyWalking test.version:v2title:Product APItags:- name:productdescription:product- name:relatedProductsdescription:Related Productspaths:/products:get:tags:- productsummary:Get all products listdescription:Get all products list.operationId:getProductsresponses:\u0026#34;200\u0026#34;:description:Successcontent:application/json:schema:type:arrayitems:$ref:\u0026#34;#/components/schemas/Product\u0026#34;/products/{region}/{country}:get:tags:- productsummary:Get products regionaldescription:Get products regional with the given id.operationId:getProductRegionalparameters:- name:regionin:pathdescription:Products regionrequired:trueschema:type:string- name:countryin:pathdescription:Products countryrequired:trueschema:type:stringresponses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/Product\u0026#34;\u0026#34;400\u0026#34;:description:Invalid parameters supplied/products/{id}:get:tags:- productsummary:Get product detailsdescription:Get product details with the given id.operationId:getProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/ProductDetails\u0026#34;\u0026#34;400\u0026#34;:description:Invalid product idpost:tags:- productsummary:Update product detailsdescription:Update product details with the given id.operationId:updateProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64- name:namein:querydescription:Product namerequired:trueschema:type:stringresponses:\u0026#34;200\u0026#34;:description:successful operationdelete:tags:- productsummary:Delete product detailsdescription:Delete product details with the given id.operationId:deleteProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operation/products/{id}/relatedProducts:get:tags:- relatedProductssummary:Get related productsdescription:Get related products with the given product id.operationId:getRelatedProductsparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/RelatedProducts\u0026#34;\u0026#34;400\u0026#34;:description:Invalid product idcomponents:schemas:Product:type:objectdescription:Product id and nameproperties:id:type:integerformat:int64description:Product idname:type:stringdescription:Product namerequired:- id- nameProductDetails:type:objectdescription:Product detailsproperties:id:type:integerformat:int64description:Product idname:type:stringdescription:Product namedescription:type:stringdescription:Product descriptionrequired:- id- nameRelatedProducts:type:objectdescription:Related Productsproperties:id:type:integerformat:int32description:Product idrelatedProducts:type:arraydescription:List of related productsitems:$ref:\u0026#34;#/components/schemas/Product\u0026#34;Here are some use cases:\n   Incoming Endpoint Incoming Service x-sw-service-name x-sw-endpoint-name-match-rule x-sw-endpoint-name-format Matched Grouping Result     GET:/products serviceB default default default true GET:/products   GET:/products/asia/cn serviceB default default default true GET:/products/{region}/{country}   GET:/products/123 serviceB default default default true GET:/products{id}   GET:/products/123/abc/efg serviceB default default default false GET:/products/123/abc/efg   \u0026lt;GET\u0026gt;:/products/123 serviceB default default default false \u0026lt;GET\u0026gt;:/products/123   GET:/products/123 serviceC default default default false GET:/products/123   GET:/products/123 serviceC serviceC default default true GET:/products/123   \u0026lt;GET\u0026gt;:/products/123 serviceB default \u0026lt;${METHOD}\u0026gt;:${PATH} \u0026lt;${METHOD}\u0026gt;:${PATH} true \u0026lt;GET\u0026gt;:/products/{id}   GET:/products/123 serviceB default default ${PATH}:\u0026lt;${METHOD}\u0026gt; true /products/{id}:\u0026lt;GET\u0026gt;   /products/123:\u0026lt;GET\u0026gt; serviceB default ${PATH}:\u0026lt;${METHOD}\u0026gt; default true GET:/products/{id}    Initialize and update the OpenAPI definitions dynamically Use Dynamic Configuration to initialize and update OpenAPI definitions, the endpoint grouping rules from OpenAPI will re-create by the new config.\nEndpoint name grouping by custom configuration Currently, a user could set up grouping rules through the static YAML file named endpoint-name-grouping.yml, or use Dynamic Configuration to initialize and update endpoint grouping rules.\nConfiguration Format Both the static local file and dynamic configuration value share the same YAML format.\ngrouping:# Endpoint of the service would follow the following rules- service-name:serviceArules:# {var} represents any variable string in the URI.- /prod/{var}","title":"Group Parameterized Endpoints","url":"/docs/main/latest/en/setup/backend/endpoint-grouping-rules/"},{"content":"Group Parameterized Endpoints In most cases, endpoints are detected automatically through language agents, service mesh observability solutions, or meter system configurations.\nThere are some special cases, especially when REST-style URI is used, where the application codes include the parameter in the endpoint name, such as putting order ID in the URI. Examples are /prod/ORDER123 and /prod/ORDER456. But logically, most would expect to have an endpoint name like prod/{order-id}. This is a specially designed feature in parameterized endpoint grouping.\nIf the incoming endpoint name accords with the rules, SkyWalking will group the endpoint by rules.\nThere are two approaches in which SkyWalking supports endpoint grouping:\n Endpoint name grouping by OpenAPI definitions. Endpoint name grouping by custom configurations.  Both grouping approaches can work together in sequence.\nEndpoint name grouping by OpenAPI definitions The OpenAPI definitions are documents based on the OpenAPI Specification (OAS), which is used to define a standard, language-agnostic interface for HTTP APIs.\nSkyWalking now supports OAS v2.0+. It could parse the documents (yaml) and build grouping rules from them automatically.\nHow to use   Add Specification Extensions for SkyWalking config in the OpenAPI definition documents; otherwise, all configs are default:\n${METHOD} is a reserved placeholder which represents the HTTP method, e.g. POST/GET... . ${PATH} is a reserved placeholder which represents the path, e.g. /products/{id}.\n   Extension Name Required Description Default Value     x-sw-service-name false The service name to which these endpoints belong. The directory name to which the OpenAPI definition documents belong.   x-sw-endpoint-name-match-rule false The rule used to match the endpoint. ${METHOD}:${PATH}   x-sw-endpoint-name-format false The endpoint name after grouping. ${METHOD}:${PATH}    These extensions are under OpenAPI Object. For example, the document below has a full custom config:\n  openapi:3.0.0x-sw-service-name:serviceBx-sw-endpoint-name-match-rule:\u0026#34;${METHOD}:${PATH}\u0026#34;x-sw-endpoint-name-format:\u0026#34;${METHOD}:${PATH}\u0026#34;info:description:OpenAPI definition for SkyWalking test.version:v2title:Product API...We highly recommend using the default config. The custom config (x-sw-endpoint-name-match-rule/x-sw-endpoint-name-format) is considered part of the match rules (regex pattern). We have provided some use cases in org.apache.skywalking.oap.server.core.config.group.openapi.EndpointGroupingRuleReader4OpenapiTest. You may validate your custom config as well.\nAll OpenAPI definition documents are located in the openapi-definitions directory, with directories having at most two levels. We recommend using the service name as the subDirectory name, as you will then not be required to set x-sw-service-name. For example:  ├── openapi-definitions │ ├── serviceA │ │ ├── customerAPI-v1.yaml │ │ └── productAPI-v1.yaml │ └── serviceB │ └── productAPI-v2.yaml The feature is enabled by default. You can disable it by setting the Core Module configuration ${SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI:false}.  Rules match priority We recommend designing the API path as clearly as possible. If the API path is fuzzy and an endpoint name matches multiple paths, SkyWalking would select a path according to the match priority set out below:\n The exact path is matched. E.g. /products or /products/inventory The path with fewer variables. E.g. In the case of /products/{var1}/{var2} and /products/{var1}/abc, endpoint name /products/123/abc will match the second one. If the paths have the same number of variables, the longest path is matched, and the vars are considered to be 1. E.g. In the case of /products/abc/{var1} and products/{var12345}/ef, endpoint name /products/abc/ef will match the first one, because length(\u0026quot;abc\u0026quot;) = 3 is larger than length(\u0026quot;ef\u0026quot;) = 2.  Examples If we have an OpenAPI definition doc productAPI-v2.yaml in directory serviceB, it will look like this:\nopenapi:3.0.0info:description:OpenAPI definition for SkyWalking test.version:v2title:Product APItags:- name:productdescription:product- name:relatedProductsdescription:Related Productspaths:/products:get:tags:- productsummary:Get all products listdescription:Get all products list.operationId:getProductsresponses:\u0026#34;200\u0026#34;:description:Successcontent:application/json:schema:type:arrayitems:$ref:\u0026#34;#/components/schemas/Product\u0026#34;/products/{region}/{country}:get:tags:- productsummary:Get products regionaldescription:Get products regional with the given id.operationId:getProductRegionalparameters:- name:regionin:pathdescription:Products regionrequired:trueschema:type:string- name:countryin:pathdescription:Products countryrequired:trueschema:type:stringresponses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/Product\u0026#34;\u0026#34;400\u0026#34;:description:Invalid parameters supplied/products/{id}:get:tags:- productsummary:Get product detailsdescription:Get product details with the given id.operationId:getProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/ProductDetails\u0026#34;\u0026#34;400\u0026#34;:description:Invalid product idpost:tags:- productsummary:Update product detailsdescription:Update product details with the given id.operationId:updateProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64- name:namein:querydescription:Product namerequired:trueschema:type:stringresponses:\u0026#34;200\u0026#34;:description:successful operationdelete:tags:- productsummary:Delete product detailsdescription:Delete product details with the given id.operationId:deleteProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operation/products/{id}/relatedProducts:get:tags:- relatedProductssummary:Get related productsdescription:Get related products with the given product id.operationId:getRelatedProductsparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/RelatedProducts\u0026#34;\u0026#34;400\u0026#34;:description:Invalid product idcomponents:schemas:Product:type:objectdescription:Product id and nameproperties:id:type:integerformat:int64description:Product idname:type:stringdescription:Product namerequired:- id- nameProductDetails:type:objectdescription:Product detailsproperties:id:type:integerformat:int64description:Product idname:type:stringdescription:Product namedescription:type:stringdescription:Product descriptionrequired:- id- nameRelatedProducts:type:objectdescription:Related Productsproperties:id:type:integerformat:int32description:Product idrelatedProducts:type:arraydescription:List of related productsitems:$ref:\u0026#34;#/components/schemas/Product\u0026#34;Here are some use cases:\n   Incoming Endpoint Incoming Service x-sw-service-name x-sw-endpoint-name-match-rule x-sw-endpoint-name-format Matched Grouping Result     GET:/products serviceB default default default true GET:/products   GET:/products/asia/cn serviceB default default default true GET:/products/{region}/{country}   GET:/products/123 serviceB default default default true GET:/products{id}   GET:/products/123/abc/efg serviceB default default default false GET:/products/123/abc/efg   \u0026lt;GET\u0026gt;:/products/123 serviceB default default default false \u0026lt;GET\u0026gt;:/products/123   GET:/products/123 serviceC default default default false GET:/products/123   GET:/products/123 serviceC serviceC default default true GET:/products/123   \u0026lt;GET\u0026gt;:/products/123 serviceB default \u0026lt;${METHOD}\u0026gt;:${PATH} \u0026lt;${METHOD}\u0026gt;:${PATH} true \u0026lt;GET\u0026gt;:/products/{id}   GET:/products/123 serviceB default default ${PATH}:\u0026lt;${METHOD}\u0026gt; true /products/{id}:\u0026lt;GET\u0026gt;   /products/123:\u0026lt;GET\u0026gt; serviceB default ${PATH}:\u0026lt;${METHOD}\u0026gt; default true GET:/products/{id}    Initialize and update the OpenAPI definitions dynamically Use Dynamic Configuration to initialize and update OpenAPI definitions, the endpoint grouping rules from OpenAPI will re-create by the new config.\nEndpoint name grouping by custom configuration Currently, a user could set up grouping rules through the static YAML file named endpoint-name-grouping.yml, or use Dynamic Configuration to initialize and update endpoint grouping rules.\nConfiguration Format Both the static local file and dynamic configuration value share the same YAML format.\ngrouping:# Endpoint of the service would follow the following rules- service-name:serviceArules:# {var} represents any variable string in the URI.- /prod/{var}","title":"Group Parameterized Endpoints","url":"/docs/main/next/en/setup/backend/endpoint-grouping-rules/"},{"content":"Group Parameterized Endpoints In most cases, endpoints are detected automatically through language agents, service mesh observability solutions, or meter system configurations.\nThere are some special cases, especially when REST style URI is used, where the application codes include the parameter in the endpoint name, such as putting order ID in the URI. Examples are /prod/ORDER123 and /prod/ORDER456. But logically, most would expect to have an endpoint name like prod/{order-id}. This is a specially designed feature in parameterized endpoint grouping.\nIf the incoming endpoint name accords with the rules, SkyWalking will group the endpoint by rules.\nThere are two approaches in which SkyWalking supports endpoint grouping:\n Endpoint name grouping by OpenAPI definitions. Endpoint name grouping by custom configurations.  Both grouping approaches can work together in sequence.\nEndpoint name grouping by OpenAPI definitions The OpenAPI definitions are documents based on the OpenAPI Specification (OAS), which is used to define a standard, language-agnostic interface for HTTP APIs.\nSkyWalking now supports OAS v2.0+. It could parse the documents (yaml) and build grouping rules from them automatically.\nHow to use   Add Specification Extensions for SkyWalking config in the OpenAPI definition documents; otherwise, all configs are default:\n${METHOD} is a reserved placeholder which represents the HTTP method, e.g. POST/GET... . ${PATH} is a reserved placeholder which represents the path, e.g. /products/{id}.\n   Extension Name Required Description Default Value     x-sw-service-name false The service name to which these endpoints belong. The directory name to which the OpenAPI definition documents belong.   x-sw-endpoint-name-match-rule false The rule used to match the endpoint. ${METHOD}:${PATH}   x-sw-endpoint-name-format false The endpoint name after grouping. ${METHOD}:${PATH}    These extensions are under OpenAPI Object. For example, the document below has a full custom config:\n  openapi:3.0.0x-sw-service-name:serviceBx-sw-endpoint-name-match-rule:\u0026#34;${METHOD}:${PATH}\u0026#34;x-sw-endpoint-name-format:\u0026#34;${METHOD}:${PATH}\u0026#34;info:description:OpenAPI definition for SkyWalking test.version:v2title:Product API...We highly recommend using the default config. The custom config (x-sw-endpoint-name-match-rule/x-sw-endpoint-name-format) is considered part of the match rules (regex pattern). We have provided some use cases in org.apache.skywalking.oap.server.core.config.group.openapi.EndpointGroupingRuleReader4OpenapiTest. You may validate your custom config as well.\nAll OpenAPI definition documents are located in the openapi-definitions directory, with directories having at most two levels. We recommend using the service name as the subDirectory name, as you will then not be required to set x-sw-service-name. For example:  ├── openapi-definitions │ ├── serviceA │ │ ├── customerAPI-v1.yaml │ │ └── productAPI-v1.yaml │ └── serviceB │ └── productAPI-v2.yaml The feature is enabled by default. You can disable it by setting the Core Module configuration ${SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPAENAPI:false}.  Rules match priority We recommend designing the API path as clearly as possible. If the API path is fuzzy and an endpoint name matches multiple paths, SkyWalking would select a path according to the match priority set out below:\n The exact path being matched. E.g. /products or /products/inventory The path which has less variables. E.g. In the case of /products/{var1}/{var2} and /products/{var1}/abc, endpoint name /products/123/abc will match the second one. If the paths have the same number of variables, the longest path is matched, and the vars are considered to be 1. E.g. In the case of /products/abc/{var1} and products/{var12345}/ef, endpoint name /products/abc/ef will match the first one, because length(\u0026quot;abc\u0026quot;) = 3 is larger than length(\u0026quot;ef\u0026quot;) = 2.  Examples If we have an OpenAPI definition doc productAPI-v2.yaml in directory serviceB, it will look like this:\nopenapi:3.0.0info:description:OpenAPI definition for SkyWalking test.version:v2title:Product APItags:- name:productdescription:product- name:relatedProductsdescription:Related Productspaths:/products:get:tags:- productsummary:Get all products listdescription:Get all products list.operationId:getProductsresponses:\u0026#34;200\u0026#34;:description:Successcontent:application/json:schema:type:arrayitems:$ref:\u0026#34;#/components/schemas/Product\u0026#34;/products/{region}/{country}:get:tags:- productsummary:Get products regionaldescription:Get products regional with the given id.operationId:getProductRegionalparameters:- name:regionin:pathdescription:Products regionrequired:trueschema:type:string- name:countryin:pathdescription:Products countryrequired:trueschema:type:stringresponses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/Product\u0026#34;\u0026#34;400\u0026#34;:description:Invalid parameters supplied/products/{id}:get:tags:- productsummary:Get product detailsdescription:Get product details with the given id.operationId:getProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/ProductDetails\u0026#34;\u0026#34;400\u0026#34;:description:Invalid product idpost:tags:- productsummary:Update product detailsdescription:Update product details with the given id.operationId:updateProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64- name:namein:querydescription:Product namerequired:trueschema:type:stringresponses:\u0026#34;200\u0026#34;:description:successful operationdelete:tags:- productsummary:Delete product detailsdescription:Delete product details with the given id.operationId:deleteProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operation/products/{id}/relatedProducts:get:tags:- relatedProductssummary:Get related productsdescription:Get related products with the given product id.operationId:getRelatedProductsparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/RelatedProducts\u0026#34;\u0026#34;400\u0026#34;:description:Invalid product idcomponents:schemas:Product:type:objectdescription:Product id and nameproperties:id:type:integerformat:int64description:Product idname:type:stringdescription:Product namerequired:- id- nameProductDetails:type:objectdescription:Product detailsproperties:id:type:integerformat:int64description:Product idname:type:stringdescription:Product namedescription:type:stringdescription:Product descriptionrequired:- id- nameRelatedProducts:type:objectdescription:Related Productsproperties:id:type:integerformat:int32description:Product idrelatedProducts:type:arraydescription:List of related productsitems:$ref:\u0026#34;#/components/schemas/Product\u0026#34;Here are some use cases:\n   Incoming Endpiont Incoming Service x-sw-service-name x-sw-endpoint-name-match-rule x-sw-endpoint-name-format Matched Grouping Result     GET:/products serviceB default default default true GET:/products   GET:/products/123 serviceB default default default true GET:/products{id}   GET:/products/asia/cn serviceB default default default true GET:/products/{region}/{country}   GET:/products/123/abc/efg serviceB default default default false GET:/products/123/abc/efg   \u0026lt;GET\u0026gt;:/products/123 serviceB default default default false \u0026lt;GET\u0026gt;:/products/123   GET:/products/123 serviceC default default default false GET:/products/123   GET:/products/123 serviceC serviceC default default true GET:/products/123   \u0026lt;GET\u0026gt;:/products/123 serviceB default \u0026lt;${METHOD}\u0026gt;:${PATH} \u0026lt;${METHOD}\u0026gt;:${PATH} true \u0026lt;GET\u0026gt;:/products/{id}   GET:/products/123 serviceB default default ${PATH}:\u0026lt;${METHOD}\u0026gt; true /products/{id}:\u0026lt;GET\u0026gt;   /products/123:\u0026lt;GET\u0026gt; serviceB default ${PATH}:\u0026lt;${METHOD}\u0026gt; default true GET:/products/{id}    Initialize and update the OpenAPI definitions dynamically Use Dynamic Configuration to initialize and update OpenAPI definitions, the endpoint grouping rules from OpenAPI will re-create by new config.\nEndpoint name grouping by custom configuration Currently, a user could set up grouping rules through the static YAML file named endpoint-name-grouping.yml, or use Dynamic Configuration to initialize and update endpoint grouping rules.\nConfiguration Format Both the static local file and dynamic configuration value share the same YAML format.\ngrouping:# Endpoint of the service would follow the following rules- service-name:serviceArules:# Logic name when the regex expression matched.- endpoint-name:/prod/{id}regex:\\/prod\\/.+","title":"Group Parameterized Endpoints","url":"/docs/main/v9.0.0/en/setup/backend/endpoint-grouping-rules/"},{"content":"Group Parameterized Endpoints In most cases, endpoints are detected automatically through language agents, service mesh observability solutions, or meter system configurations.\nThere are some special cases, especially when REST-style URI is used, where the application codes include the parameter in the endpoint name, such as putting order ID in the URI. Examples are /prod/ORDER123 and /prod/ORDER456. But logically, most would expect to have an endpoint name like prod/{order-id}. This is a specially designed feature in parameterized endpoint grouping.\nIf the incoming endpoint name accords with the rules, SkyWalking will group the endpoint by rules.\nThere are two approaches in which SkyWalking supports endpoint grouping:\n Endpoint name grouping by OpenAPI definitions. Endpoint name grouping by custom configurations.  Both grouping approaches can work together in sequence.\nEndpoint name grouping by OpenAPI definitions The OpenAPI definitions are documents based on the OpenAPI Specification (OAS), which is used to define a standard, language-agnostic interface for HTTP APIs.\nSkyWalking now supports OAS v2.0+. It could parse the documents (yaml) and build grouping rules from them automatically.\nHow to use   Add Specification Extensions for SkyWalking config in the OpenAPI definition documents; otherwise, all configs are default:\n${METHOD} is a reserved placeholder which represents the HTTP method, e.g. POST/GET... . ${PATH} is a reserved placeholder which represents the path, e.g. /products/{id}.\n   Extension Name Required Description Default Value     x-sw-service-name false The service name to which these endpoints belong. The directory name to which the OpenAPI definition documents belong.   x-sw-endpoint-name-match-rule false The rule used to match the endpoint. ${METHOD}:${PATH}   x-sw-endpoint-name-format false The endpoint name after grouping. ${METHOD}:${PATH}    These extensions are under OpenAPI Object. For example, the document below has a full custom config:\n  openapi:3.0.0x-sw-service-name:serviceBx-sw-endpoint-name-match-rule:\u0026#34;${METHOD}:${PATH}\u0026#34;x-sw-endpoint-name-format:\u0026#34;${METHOD}:${PATH}\u0026#34;info:description:OpenAPI definition for SkyWalking test.version:v2title:Product API...We highly recommend using the default config. The custom config (x-sw-endpoint-name-match-rule/x-sw-endpoint-name-format) is considered part of the match rules (regex pattern). We have provided some use cases in org.apache.skywalking.oap.server.core.config.group.openapi.EndpointGroupingRuleReader4OpenapiTest. You may validate your custom config as well.\nAll OpenAPI definition documents are located in the openapi-definitions directory, with directories having at most two levels. We recommend using the service name as the subDirectory name, as you will then not be required to set x-sw-service-name. For example:  ├── openapi-definitions │ ├── serviceA │ │ ├── customerAPI-v1.yaml │ │ └── productAPI-v1.yaml │ └── serviceB │ └── productAPI-v2.yaml The feature is enabled by default. You can disable it by setting the Core Module configuration ${SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPAENAPI:false}.  Rules match priority We recommend designing the API path as clearly as possible. If the API path is fuzzy and an endpoint name matches multiple paths, SkyWalking would select a path according to the match priority set out below:\n The exact path is matched. E.g. /products or /products/inventory The path with fewer variables. E.g. In the case of /products/{var1}/{var2} and /products/{var1}/abc, endpoint name /products/123/abc will match the second one. If the paths have the same number of variables, the longest path is matched, and the vars are considered to be 1. E.g. In the case of /products/abc/{var1} and products/{var12345}/ef, endpoint name /products/abc/ef will match the first one, because length(\u0026quot;abc\u0026quot;) = 3 is larger than length(\u0026quot;ef\u0026quot;) = 2.  Examples If we have an OpenAPI definition doc productAPI-v2.yaml in directory serviceB, it will look like this:\nopenapi:3.0.0info:description:OpenAPI definition for SkyWalking test.version:v2title:Product APItags:- name:productdescription:product- name:relatedProductsdescription:Related Productspaths:/products:get:tags:- productsummary:Get all products listdescription:Get all products list.operationId:getProductsresponses:\u0026#34;200\u0026#34;:description:Successcontent:application/json:schema:type:arrayitems:$ref:\u0026#34;#/components/schemas/Product\u0026#34;/products/{region}/{country}:get:tags:- productsummary:Get products regionaldescription:Get products regional with the given id.operationId:getProductRegionalparameters:- name:regionin:pathdescription:Products regionrequired:trueschema:type:string- name:countryin:pathdescription:Products countryrequired:trueschema:type:stringresponses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/Product\u0026#34;\u0026#34;400\u0026#34;:description:Invalid parameters supplied/products/{id}:get:tags:- productsummary:Get product detailsdescription:Get product details with the given id.operationId:getProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/ProductDetails\u0026#34;\u0026#34;400\u0026#34;:description:Invalid product idpost:tags:- productsummary:Update product detailsdescription:Update product details with the given id.operationId:updateProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64- name:namein:querydescription:Product namerequired:trueschema:type:stringresponses:\u0026#34;200\u0026#34;:description:successful operationdelete:tags:- productsummary:Delete product detailsdescription:Delete product details with the given id.operationId:deleteProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operation/products/{id}/relatedProducts:get:tags:- relatedProductssummary:Get related productsdescription:Get related products with the given product id.operationId:getRelatedProductsparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/RelatedProducts\u0026#34;\u0026#34;400\u0026#34;:description:Invalid product idcomponents:schemas:Product:type:objectdescription:Product id and nameproperties:id:type:integerformat:int64description:Product idname:type:stringdescription:Product namerequired:- id- nameProductDetails:type:objectdescription:Product detailsproperties:id:type:integerformat:int64description:Product idname:type:stringdescription:Product namedescription:type:stringdescription:Product descriptionrequired:- id- nameRelatedProducts:type:objectdescription:Related Productsproperties:id:type:integerformat:int32description:Product idrelatedProducts:type:arraydescription:List of related productsitems:$ref:\u0026#34;#/components/schemas/Product\u0026#34;Here are some use cases:\n   Incoming Endpiont Incoming Service x-sw-service-name x-sw-endpoint-name-match-rule x-sw-endpoint-name-format Matched Grouping Result     GET:/products serviceB default default default true GET:/products   GET:/products/123 serviceB default default default true GET:/products{id}   GET:/products/asia/cn serviceB default default default true GET:/products/{region}/{country}   GET:/products/123/abc/efg serviceB default default default false GET:/products/123/abc/efg   \u0026lt;GET\u0026gt;:/products/123 serviceB default default default false \u0026lt;GET\u0026gt;:/products/123   GET:/products/123 serviceC default default default false GET:/products/123   GET:/products/123 serviceC serviceC default default true GET:/products/123   \u0026lt;GET\u0026gt;:/products/123 serviceB default \u0026lt;${METHOD}\u0026gt;:${PATH} \u0026lt;${METHOD}\u0026gt;:${PATH} true \u0026lt;GET\u0026gt;:/products/{id}   GET:/products/123 serviceB default default ${PATH}:\u0026lt;${METHOD}\u0026gt; true /products/{id}:\u0026lt;GET\u0026gt;   /products/123:\u0026lt;GET\u0026gt; serviceB default ${PATH}:\u0026lt;${METHOD}\u0026gt; default true GET:/products/{id}    Initialize and update the OpenAPI definitions dynamically Use Dynamic Configuration to initialize and update OpenAPI definitions, the endpoint grouping rules from OpenAPI will re-create by the new config.\nEndpoint name grouping by custom configuration Currently, a user could set up grouping rules through the static YAML file named endpoint-name-grouping.yml, or use Dynamic Configuration to initialize and update endpoint grouping rules.\nConfiguration Format Both the static local file and dynamic configuration value share the same YAML format.\ngrouping:# Endpoint of the service would follow the following rules- service-name:serviceArules:# Logic name when the regex expression matched.- endpoint-name:/prod/{id}regex:\\/prod\\/.+","title":"Group Parameterized Endpoints","url":"/docs/main/v9.1.0/en/setup/backend/endpoint-grouping-rules/"},{"content":"Group Parameterized Endpoints In most cases, endpoints are detected automatically through language agents, service mesh observability solutions, or meter system configurations.\nThere are some special cases, especially when REST-style URI is used, where the application codes include the parameter in the endpoint name, such as putting order ID in the URI. Examples are /prod/ORDER123 and /prod/ORDER456. But logically, most would expect to have an endpoint name like prod/{order-id}. This is a specially designed feature in parameterized endpoint grouping.\nIf the incoming endpoint name accords with the rules, SkyWalking will group the endpoint by rules.\nThere are two approaches in which SkyWalking supports endpoint grouping:\n Endpoint name grouping by OpenAPI definitions. Endpoint name grouping by custom configurations.  Both grouping approaches can work together in sequence.\nEndpoint name grouping by OpenAPI definitions The OpenAPI definitions are documents based on the OpenAPI Specification (OAS), which is used to define a standard, language-agnostic interface for HTTP APIs.\nSkyWalking now supports OAS v2.0+. It could parse the documents (yaml) and build grouping rules from them automatically.\nHow to use   Add Specification Extensions for SkyWalking config in the OpenAPI definition documents; otherwise, all configs are default:\n${METHOD} is a reserved placeholder which represents the HTTP method, e.g. POST/GET... . ${PATH} is a reserved placeholder which represents the path, e.g. /products/{id}.\n   Extension Name Required Description Default Value     x-sw-service-name false The service name to which these endpoints belong. The directory name to which the OpenAPI definition documents belong.   x-sw-endpoint-name-match-rule false The rule used to match the endpoint. ${METHOD}:${PATH}   x-sw-endpoint-name-format false The endpoint name after grouping. ${METHOD}:${PATH}    These extensions are under OpenAPI Object. For example, the document below has a full custom config:\n  openapi:3.0.0x-sw-service-name:serviceBx-sw-endpoint-name-match-rule:\u0026#34;${METHOD}:${PATH}\u0026#34;x-sw-endpoint-name-format:\u0026#34;${METHOD}:${PATH}\u0026#34;info:description:OpenAPI definition for SkyWalking test.version:v2title:Product API...We highly recommend using the default config. The custom config (x-sw-endpoint-name-match-rule/x-sw-endpoint-name-format) is considered part of the match rules (regex pattern). We have provided some use cases in org.apache.skywalking.oap.server.core.config.group.openapi.EndpointGroupingRuleReader4OpenapiTest. You may validate your custom config as well.\nAll OpenAPI definition documents are located in the openapi-definitions directory, with directories having at most two levels. We recommend using the service name as the subDirectory name, as you will then not be required to set x-sw-service-name. For example:  ├── openapi-definitions │ ├── serviceA │ │ ├── customerAPI-v1.yaml │ │ └── productAPI-v1.yaml │ └── serviceB │ └── productAPI-v2.yaml The feature is enabled by default. You can disable it by setting the Core Module configuration ${SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPAENAPI:false}.  Rules match priority We recommend designing the API path as clearly as possible. If the API path is fuzzy and an endpoint name matches multiple paths, SkyWalking would select a path according to the match priority set out below:\n The exact path is matched. E.g. /products or /products/inventory The path with fewer variables. E.g. In the case of /products/{var1}/{var2} and /products/{var1}/abc, endpoint name /products/123/abc will match the second one. If the paths have the same number of variables, the longest path is matched, and the vars are considered to be 1. E.g. In the case of /products/abc/{var1} and products/{var12345}/ef, endpoint name /products/abc/ef will match the first one, because length(\u0026quot;abc\u0026quot;) = 3 is larger than length(\u0026quot;ef\u0026quot;) = 2.  Examples If we have an OpenAPI definition doc productAPI-v2.yaml in directory serviceB, it will look like this:\nopenapi:3.0.0info:description:OpenAPI definition for SkyWalking test.version:v2title:Product APItags:- name:productdescription:product- name:relatedProductsdescription:Related Productspaths:/products:get:tags:- productsummary:Get all products listdescription:Get all products list.operationId:getProductsresponses:\u0026#34;200\u0026#34;:description:Successcontent:application/json:schema:type:arrayitems:$ref:\u0026#34;#/components/schemas/Product\u0026#34;/products/{region}/{country}:get:tags:- productsummary:Get products regionaldescription:Get products regional with the given id.operationId:getProductRegionalparameters:- name:regionin:pathdescription:Products regionrequired:trueschema:type:string- name:countryin:pathdescription:Products countryrequired:trueschema:type:stringresponses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/Product\u0026#34;\u0026#34;400\u0026#34;:description:Invalid parameters supplied/products/{id}:get:tags:- productsummary:Get product detailsdescription:Get product details with the given id.operationId:getProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/ProductDetails\u0026#34;\u0026#34;400\u0026#34;:description:Invalid product idpost:tags:- productsummary:Update product detailsdescription:Update product details with the given id.operationId:updateProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64- name:namein:querydescription:Product namerequired:trueschema:type:stringresponses:\u0026#34;200\u0026#34;:description:successful operationdelete:tags:- productsummary:Delete product detailsdescription:Delete product details with the given id.operationId:deleteProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operation/products/{id}/relatedProducts:get:tags:- relatedProductssummary:Get related productsdescription:Get related products with the given product id.operationId:getRelatedProductsparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/RelatedProducts\u0026#34;\u0026#34;400\u0026#34;:description:Invalid product idcomponents:schemas:Product:type:objectdescription:Product id and nameproperties:id:type:integerformat:int64description:Product idname:type:stringdescription:Product namerequired:- id- nameProductDetails:type:objectdescription:Product detailsproperties:id:type:integerformat:int64description:Product idname:type:stringdescription:Product namedescription:type:stringdescription:Product descriptionrequired:- id- nameRelatedProducts:type:objectdescription:Related Productsproperties:id:type:integerformat:int32description:Product idrelatedProducts:type:arraydescription:List of related productsitems:$ref:\u0026#34;#/components/schemas/Product\u0026#34;Here are some use cases:\n   Incoming Endpoint Incoming Service x-sw-service-name x-sw-endpoint-name-match-rule x-sw-endpoint-name-format Matched Grouping Result     GET:/products serviceB default default default true GET:/products   GET:/products/123 serviceB default default default true GET:/products{id}   GET:/products/asia/cn serviceB default default default true GET:/products/{region}/{country}   GET:/products/123/abc/efg serviceB default default default false GET:/products/123/abc/efg   \u0026lt;GET\u0026gt;:/products/123 serviceB default default default false \u0026lt;GET\u0026gt;:/products/123   GET:/products/123 serviceC default default default false GET:/products/123   GET:/products/123 serviceC serviceC default default true GET:/products/123   \u0026lt;GET\u0026gt;:/products/123 serviceB default \u0026lt;${METHOD}\u0026gt;:${PATH} \u0026lt;${METHOD}\u0026gt;:${PATH} true \u0026lt;GET\u0026gt;:/products/{id}   GET:/products/123 serviceB default default ${PATH}:\u0026lt;${METHOD}\u0026gt; true /products/{id}:\u0026lt;GET\u0026gt;   /products/123:\u0026lt;GET\u0026gt; serviceB default ${PATH}:\u0026lt;${METHOD}\u0026gt; default true GET:/products/{id}    Initialize and update the OpenAPI definitions dynamically Use Dynamic Configuration to initialize and update OpenAPI definitions, the endpoint grouping rules from OpenAPI will re-create by the new config.\nEndpoint name grouping by custom configuration Currently, a user could set up grouping rules through the static YAML file named endpoint-name-grouping.yml, or use Dynamic Configuration to initialize and update endpoint grouping rules.\nConfiguration Format Both the static local file and dynamic configuration value share the same YAML format.\ngrouping:# Endpoint of the service would follow the following rules- service-name:serviceArules:# Logic name when the regex expression matched.- endpoint-name:/prod/{id}regex:\\/prod\\/.+","title":"Group Parameterized Endpoints","url":"/docs/main/v9.2.0/en/setup/backend/endpoint-grouping-rules/"},{"content":"Group Parameterized Endpoints In most cases, endpoints are detected automatically through language agents, service mesh observability solutions, or meter system configurations.\nThere are some special cases, especially when REST-style URI is used, where the application codes include the parameter in the endpoint name, such as putting order ID in the URI. Examples are /prod/ORDER123 and /prod/ORDER456. But logically, most would expect to have an endpoint name like prod/{order-id}. This is a specially designed feature in parameterized endpoint grouping.\nIf the incoming endpoint name accords with the rules, SkyWalking will group the endpoint by rules.\nThere are two approaches in which SkyWalking supports endpoint grouping:\n Endpoint name grouping by OpenAPI definitions. Endpoint name grouping by custom configurations.  Both grouping approaches can work together in sequence.\nEndpoint name grouping by OpenAPI definitions The OpenAPI definitions are documents based on the OpenAPI Specification (OAS), which is used to define a standard, language-agnostic interface for HTTP APIs.\nSkyWalking now supports OAS v2.0+. It could parse the documents (yaml) and build grouping rules from them automatically.\nHow to use   Add Specification Extensions for SkyWalking config in the OpenAPI definition documents; otherwise, all configs are default:\n${METHOD} is a reserved placeholder which represents the HTTP method, e.g. POST/GET... . ${PATH} is a reserved placeholder which represents the path, e.g. /products/{id}.\n   Extension Name Required Description Default Value     x-sw-service-name false The service name to which these endpoints belong. The directory name to which the OpenAPI definition documents belong.   x-sw-endpoint-name-match-rule false The rule used to match the endpoint. ${METHOD}:${PATH}   x-sw-endpoint-name-format false The endpoint name after grouping. ${METHOD}:${PATH}    These extensions are under OpenAPI Object. For example, the document below has a full custom config:\n  openapi:3.0.0x-sw-service-name:serviceBx-sw-endpoint-name-match-rule:\u0026#34;${METHOD}:${PATH}\u0026#34;x-sw-endpoint-name-format:\u0026#34;${METHOD}:${PATH}\u0026#34;info:description:OpenAPI definition for SkyWalking test.version:v2title:Product API...We highly recommend using the default config. The custom config (x-sw-endpoint-name-match-rule/x-sw-endpoint-name-format) is considered part of the match rules (regex pattern). We have provided some use cases in org.apache.skywalking.oap.server.core.config.group.openapi.EndpointGroupingRuleReader4OpenapiTest. You may validate your custom config as well.\nAll OpenAPI definition documents are located in the openapi-definitions directory, with directories having at most two levels. We recommend using the service name as the subDirectory name, as you will then not be required to set x-sw-service-name. For example:  ├── openapi-definitions │ ├── serviceA │ │ ├── customerAPI-v1.yaml │ │ └── productAPI-v1.yaml │ └── serviceB │ └── productAPI-v2.yaml The feature is enabled by default. You can disable it by setting the Core Module configuration ${SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI:false}.  Rules match priority We recommend designing the API path as clearly as possible. If the API path is fuzzy and an endpoint name matches multiple paths, SkyWalking would select a path according to the match priority set out below:\n The exact path is matched. E.g. /products or /products/inventory The path with fewer variables. E.g. In the case of /products/{var1}/{var2} and /products/{var1}/abc, endpoint name /products/123/abc will match the second one. If the paths have the same number of variables, the longest path is matched, and the vars are considered to be 1. E.g. In the case of /products/abc/{var1} and products/{var12345}/ef, endpoint name /products/abc/ef will match the first one, because length(\u0026quot;abc\u0026quot;) = 3 is larger than length(\u0026quot;ef\u0026quot;) = 2.  Examples If we have an OpenAPI definition doc productAPI-v2.yaml in directory serviceB, it will look like this:\nopenapi:3.0.0info:description:OpenAPI definition for SkyWalking test.version:v2title:Product APItags:- name:productdescription:product- name:relatedProductsdescription:Related Productspaths:/products:get:tags:- productsummary:Get all products listdescription:Get all products list.operationId:getProductsresponses:\u0026#34;200\u0026#34;:description:Successcontent:application/json:schema:type:arrayitems:$ref:\u0026#34;#/components/schemas/Product\u0026#34;/products/{region}/{country}:get:tags:- productsummary:Get products regionaldescription:Get products regional with the given id.operationId:getProductRegionalparameters:- name:regionin:pathdescription:Products regionrequired:trueschema:type:string- name:countryin:pathdescription:Products countryrequired:trueschema:type:stringresponses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/Product\u0026#34;\u0026#34;400\u0026#34;:description:Invalid parameters supplied/products/{id}:get:tags:- productsummary:Get product detailsdescription:Get product details with the given id.operationId:getProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/ProductDetails\u0026#34;\u0026#34;400\u0026#34;:description:Invalid product idpost:tags:- productsummary:Update product detailsdescription:Update product details with the given id.operationId:updateProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64- name:namein:querydescription:Product namerequired:trueschema:type:stringresponses:\u0026#34;200\u0026#34;:description:successful operationdelete:tags:- productsummary:Delete product detailsdescription:Delete product details with the given id.operationId:deleteProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operation/products/{id}/relatedProducts:get:tags:- relatedProductssummary:Get related productsdescription:Get related products with the given product id.operationId:getRelatedProductsparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/RelatedProducts\u0026#34;\u0026#34;400\u0026#34;:description:Invalid product idcomponents:schemas:Product:type:objectdescription:Product id and nameproperties:id:type:integerformat:int64description:Product idname:type:stringdescription:Product namerequired:- id- nameProductDetails:type:objectdescription:Product detailsproperties:id:type:integerformat:int64description:Product idname:type:stringdescription:Product namedescription:type:stringdescription:Product descriptionrequired:- id- nameRelatedProducts:type:objectdescription:Related Productsproperties:id:type:integerformat:int32description:Product idrelatedProducts:type:arraydescription:List of related productsitems:$ref:\u0026#34;#/components/schemas/Product\u0026#34;Here are some use cases:\n   Incoming Endpoint Incoming Service x-sw-service-name x-sw-endpoint-name-match-rule x-sw-endpoint-name-format Matched Grouping Result     GET:/products serviceB default default default true GET:/products   GET:/products/123 serviceB default default default true GET:/products{id}   GET:/products/asia/cn serviceB default default default true GET:/products/{region}/{country}   GET:/products/123/abc/efg serviceB default default default false GET:/products/123/abc/efg   \u0026lt;GET\u0026gt;:/products/123 serviceB default default default false \u0026lt;GET\u0026gt;:/products/123   GET:/products/123 serviceC default default default false GET:/products/123   GET:/products/123 serviceC serviceC default default true GET:/products/123   \u0026lt;GET\u0026gt;:/products/123 serviceB default \u0026lt;${METHOD}\u0026gt;:${PATH} \u0026lt;${METHOD}\u0026gt;:${PATH} true \u0026lt;GET\u0026gt;:/products/{id}   GET:/products/123 serviceB default default ${PATH}:\u0026lt;${METHOD}\u0026gt; true /products/{id}:\u0026lt;GET\u0026gt;   /products/123:\u0026lt;GET\u0026gt; serviceB default ${PATH}:\u0026lt;${METHOD}\u0026gt; default true GET:/products/{id}    Initialize and update the OpenAPI definitions dynamically Use Dynamic Configuration to initialize and update OpenAPI definitions, the endpoint grouping rules from OpenAPI will re-create by the new config.\nEndpoint name grouping by custom configuration Currently, a user could set up grouping rules through the static YAML file named endpoint-name-grouping.yml, or use Dynamic Configuration to initialize and update endpoint grouping rules.\nConfiguration Format Both the static local file and dynamic configuration value share the same YAML format.\ngrouping:# Endpoint of the service would follow the following rules- service-name:serviceArules:# Logic name when the regex expression matched.- endpoint-name:/prod/{id}regex:\\/prod\\/.+","title":"Group Parameterized Endpoints","url":"/docs/main/v9.3.0/en/setup/backend/endpoint-grouping-rules/"},{"content":"Group Parameterized Endpoints In most cases, endpoints are detected automatically through language agents, service mesh observability solutions, or meter system configurations.\nThere are some special cases, especially when REST-style URI is used, where the application codes include the parameter in the endpoint name, such as putting order ID in the URI. Examples are /prod/ORDER123 and /prod/ORDER456. But logically, most would expect to have an endpoint name like prod/{order-id}. This is a specially designed feature in parameterized endpoint grouping.\nIf the incoming endpoint name accords with the rules, SkyWalking will group the endpoint by rules.\nThere are two approaches in which SkyWalking supports endpoint grouping:\n Endpoint name grouping by OpenAPI definitions. Endpoint name grouping by custom configurations.  Both grouping approaches can work together in sequence.\nEndpoint name grouping by OpenAPI definitions The OpenAPI definitions are documents based on the OpenAPI Specification (OAS), which is used to define a standard, language-agnostic interface for HTTP APIs.\nSkyWalking now supports OAS v2.0+. It could parse the documents (yaml) and build grouping rules from them automatically.\nHow to use   Add Specification Extensions for SkyWalking config in the OpenAPI definition documents; otherwise, all configs are default:\n${METHOD} is a reserved placeholder which represents the HTTP method, e.g. POST/GET... . ${PATH} is a reserved placeholder which represents the path, e.g. /products/{id}.\n   Extension Name Required Description Default Value     x-sw-service-name false The service name to which these endpoints belong. The directory name to which the OpenAPI definition documents belong.   x-sw-endpoint-name-match-rule false The rule used to match the endpoint. ${METHOD}:${PATH}   x-sw-endpoint-name-format false The endpoint name after grouping. ${METHOD}:${PATH}    These extensions are under OpenAPI Object. For example, the document below has a full custom config:\n  openapi:3.0.0x-sw-service-name:serviceBx-sw-endpoint-name-match-rule:\u0026#34;${METHOD}:${PATH}\u0026#34;x-sw-endpoint-name-format:\u0026#34;${METHOD}:${PATH}\u0026#34;info:description:OpenAPI definition for SkyWalking test.version:v2title:Product API...We highly recommend using the default config. The custom config (x-sw-endpoint-name-match-rule/x-sw-endpoint-name-format) is considered part of the match rules (regex pattern). We have provided some use cases in org.apache.skywalking.oap.server.core.config.group.openapi.EndpointGroupingRuleReader4OpenapiTest. You may validate your custom config as well.\nAll OpenAPI definition documents are located in the openapi-definitions directory, with directories having at most two levels. We recommend using the service name as the subDirectory name, as you will then not be required to set x-sw-service-name. For example:  ├── openapi-definitions │ ├── serviceA │ │ ├── customerAPI-v1.yaml │ │ └── productAPI-v1.yaml │ └── serviceB │ └── productAPI-v2.yaml The feature is enabled by default. You can disable it by setting the Core Module configuration ${SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI:false}.  Rules match priority We recommend designing the API path as clearly as possible. If the API path is fuzzy and an endpoint name matches multiple paths, SkyWalking would select a path according to the match priority set out below:\n The exact path is matched. E.g. /products or /products/inventory The path with fewer variables. E.g. In the case of /products/{var1}/{var2} and /products/{var1}/abc, endpoint name /products/123/abc will match the second one. If the paths have the same number of variables, the longest path is matched, and the vars are considered to be 1. E.g. In the case of /products/abc/{var1} and products/{var12345}/ef, endpoint name /products/abc/ef will match the first one, because length(\u0026quot;abc\u0026quot;) = 3 is larger than length(\u0026quot;ef\u0026quot;) = 2.  Examples If we have an OpenAPI definition doc productAPI-v2.yaml in directory serviceB, it will look like this:\nopenapi:3.0.0info:description:OpenAPI definition for SkyWalking test.version:v2title:Product APItags:- name:productdescription:product- name:relatedProductsdescription:Related Productspaths:/products:get:tags:- productsummary:Get all products listdescription:Get all products list.operationId:getProductsresponses:\u0026#34;200\u0026#34;:description:Successcontent:application/json:schema:type:arrayitems:$ref:\u0026#34;#/components/schemas/Product\u0026#34;/products/{region}/{country}:get:tags:- productsummary:Get products regionaldescription:Get products regional with the given id.operationId:getProductRegionalparameters:- name:regionin:pathdescription:Products regionrequired:trueschema:type:string- name:countryin:pathdescription:Products countryrequired:trueschema:type:stringresponses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/Product\u0026#34;\u0026#34;400\u0026#34;:description:Invalid parameters supplied/products/{id}:get:tags:- productsummary:Get product detailsdescription:Get product details with the given id.operationId:getProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/ProductDetails\u0026#34;\u0026#34;400\u0026#34;:description:Invalid product idpost:tags:- productsummary:Update product detailsdescription:Update product details with the given id.operationId:updateProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64- name:namein:querydescription:Product namerequired:trueschema:type:stringresponses:\u0026#34;200\u0026#34;:description:successful operationdelete:tags:- productsummary:Delete product detailsdescription:Delete product details with the given id.operationId:deleteProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operation/products/{id}/relatedProducts:get:tags:- relatedProductssummary:Get related productsdescription:Get related products with the given product id.operationId:getRelatedProductsparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/RelatedProducts\u0026#34;\u0026#34;400\u0026#34;:description:Invalid product idcomponents:schemas:Product:type:objectdescription:Product id and nameproperties:id:type:integerformat:int64description:Product idname:type:stringdescription:Product namerequired:- id- nameProductDetails:type:objectdescription:Product detailsproperties:id:type:integerformat:int64description:Product idname:type:stringdescription:Product namedescription:type:stringdescription:Product descriptionrequired:- id- nameRelatedProducts:type:objectdescription:Related Productsproperties:id:type:integerformat:int32description:Product idrelatedProducts:type:arraydescription:List of related productsitems:$ref:\u0026#34;#/components/schemas/Product\u0026#34;Here are some use cases:\n   Incoming Endpoint Incoming Service x-sw-service-name x-sw-endpoint-name-match-rule x-sw-endpoint-name-format Matched Grouping Result     GET:/products serviceB default default default true GET:/products   GET:/products/123 serviceB default default default true GET:/products{id}   GET:/products/asia/cn serviceB default default default true GET:/products/{region}/{country}   GET:/products/123/abc/efg serviceB default default default false GET:/products/123/abc/efg   \u0026lt;GET\u0026gt;:/products/123 serviceB default default default false \u0026lt;GET\u0026gt;:/products/123   GET:/products/123 serviceC default default default false GET:/products/123   GET:/products/123 serviceC serviceC default default true GET:/products/123   \u0026lt;GET\u0026gt;:/products/123 serviceB default \u0026lt;${METHOD}\u0026gt;:${PATH} \u0026lt;${METHOD}\u0026gt;:${PATH} true \u0026lt;GET\u0026gt;:/products/{id}   GET:/products/123 serviceB default default ${PATH}:\u0026lt;${METHOD}\u0026gt; true /products/{id}:\u0026lt;GET\u0026gt;   /products/123:\u0026lt;GET\u0026gt; serviceB default ${PATH}:\u0026lt;${METHOD}\u0026gt; default true GET:/products/{id}    Initialize and update the OpenAPI definitions dynamically Use Dynamic Configuration to initialize and update OpenAPI definitions, the endpoint grouping rules from OpenAPI will re-create by the new config.\nEndpoint name grouping by custom configuration Currently, a user could set up grouping rules through the static YAML file named endpoint-name-grouping.yml, or use Dynamic Configuration to initialize and update endpoint grouping rules.\nConfiguration Format Both the static local file and dynamic configuration value share the same YAML format.\ngrouping:# Endpoint of the service would follow the following rules- service-name:serviceArules:# Logic name when the regex expression matched.- endpoint-name:/prod/{id}regex:\\/prod\\/.+","title":"Group Parameterized Endpoints","url":"/docs/main/v9.4.0/en/setup/backend/endpoint-grouping-rules/"},{"content":"Group Parameterized Endpoints In most cases, endpoints are detected automatically through language agents, service mesh observability solutions, or meter system configurations.\nThere are some special cases, especially when REST-style URI is used, where the application codes include the parameter in the endpoint name, such as putting order ID in the URI. Examples are /prod/ORDER123 and /prod/ORDER456. But logically, most would expect to have an endpoint name like prod/{order-id}. This is a specially designed feature in parameterized endpoint grouping.\nIf the incoming endpoint name accords with the rules, SkyWalking will group the endpoint by rules.\nThere are two approaches in which SkyWalking supports endpoint grouping:\n Endpoint name grouping by OpenAPI definitions. Endpoint name grouping by custom configurations.  Both grouping approaches can work together in sequence.\nEndpoint name grouping by OpenAPI definitions The OpenAPI definitions are documents based on the OpenAPI Specification (OAS), which is used to define a standard, language-agnostic interface for HTTP APIs.\nSkyWalking now supports OAS v2.0+. It could parse the documents (yaml) and build grouping rules from them automatically.\nHow to use   Add Specification Extensions for SkyWalking config in the OpenAPI definition documents; otherwise, all configs are default:\n${METHOD} is a reserved placeholder which represents the HTTP method, e.g. POST/GET... . ${PATH} is a reserved placeholder which represents the path, e.g. /products/{id}.\n   Extension Name Required Description Default Value     x-sw-service-name false The service name to which these endpoints belong. The directory name to which the OpenAPI definition documents belong.   x-sw-endpoint-name-match-rule false The rule used to match the endpoint. ${METHOD}:${PATH}   x-sw-endpoint-name-format false The endpoint name after grouping. ${METHOD}:${PATH}    These extensions are under OpenAPI Object. For example, the document below has a full custom config:\n  openapi:3.0.0x-sw-service-name:serviceBx-sw-endpoint-name-match-rule:\u0026#34;${METHOD}:${PATH}\u0026#34;x-sw-endpoint-name-format:\u0026#34;${METHOD}:${PATH}\u0026#34;info:description:OpenAPI definition for SkyWalking test.version:v2title:Product API...We highly recommend using the default config. The custom config (x-sw-endpoint-name-match-rule/x-sw-endpoint-name-format) is considered part of the match rules (regex pattern). We have provided some use cases in org.apache.skywalking.oap.server.core.config.group.openapi.EndpointGroupingRuleReader4OpenapiTest. You may validate your custom config as well.\nAll OpenAPI definition documents are located in the openapi-definitions directory, with directories having at most two levels. We recommend using the service name as the subDirectory name, as you will then not be required to set x-sw-service-name. For example:  ├── openapi-definitions │ ├── serviceA │ │ ├── customerAPI-v1.yaml │ │ └── productAPI-v1.yaml │ └── serviceB │ └── productAPI-v2.yaml The feature is enabled by default. You can disable it by setting the Core Module configuration ${SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI:false}.  Rules match priority We recommend designing the API path as clearly as possible. If the API path is fuzzy and an endpoint name matches multiple paths, SkyWalking would select a path according to the match priority set out below:\n The exact path is matched. E.g. /products or /products/inventory The path with fewer variables. E.g. In the case of /products/{var1}/{var2} and /products/{var1}/abc, endpoint name /products/123/abc will match the second one. If the paths have the same number of variables, the longest path is matched, and the vars are considered to be 1. E.g. In the case of /products/abc/{var1} and products/{var12345}/ef, endpoint name /products/abc/ef will match the first one, because length(\u0026quot;abc\u0026quot;) = 3 is larger than length(\u0026quot;ef\u0026quot;) = 2.  Examples If we have an OpenAPI definition doc productAPI-v2.yaml in directory serviceB, it will look like this:\nopenapi:3.0.0info:description:OpenAPI definition for SkyWalking test.version:v2title:Product APItags:- name:productdescription:product- name:relatedProductsdescription:Related Productspaths:/products:get:tags:- productsummary:Get all products listdescription:Get all products list.operationId:getProductsresponses:\u0026#34;200\u0026#34;:description:Successcontent:application/json:schema:type:arrayitems:$ref:\u0026#34;#/components/schemas/Product\u0026#34;/products/{region}/{country}:get:tags:- productsummary:Get products regionaldescription:Get products regional with the given id.operationId:getProductRegionalparameters:- name:regionin:pathdescription:Products regionrequired:trueschema:type:string- name:countryin:pathdescription:Products countryrequired:trueschema:type:stringresponses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/Product\u0026#34;\u0026#34;400\u0026#34;:description:Invalid parameters supplied/products/{id}:get:tags:- productsummary:Get product detailsdescription:Get product details with the given id.operationId:getProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/ProductDetails\u0026#34;\u0026#34;400\u0026#34;:description:Invalid product idpost:tags:- productsummary:Update product detailsdescription:Update product details with the given id.operationId:updateProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64- name:namein:querydescription:Product namerequired:trueschema:type:stringresponses:\u0026#34;200\u0026#34;:description:successful operationdelete:tags:- productsummary:Delete product detailsdescription:Delete product details with the given id.operationId:deleteProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operation/products/{id}/relatedProducts:get:tags:- relatedProductssummary:Get related productsdescription:Get related products with the given product id.operationId:getRelatedProductsparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/RelatedProducts\u0026#34;\u0026#34;400\u0026#34;:description:Invalid product idcomponents:schemas:Product:type:objectdescription:Product id and nameproperties:id:type:integerformat:int64description:Product idname:type:stringdescription:Product namerequired:- id- nameProductDetails:type:objectdescription:Product detailsproperties:id:type:integerformat:int64description:Product idname:type:stringdescription:Product namedescription:type:stringdescription:Product descriptionrequired:- id- nameRelatedProducts:type:objectdescription:Related Productsproperties:id:type:integerformat:int32description:Product idrelatedProducts:type:arraydescription:List of related productsitems:$ref:\u0026#34;#/components/schemas/Product\u0026#34;Here are some use cases:\n   Incoming Endpoint Incoming Service x-sw-service-name x-sw-endpoint-name-match-rule x-sw-endpoint-name-format Matched Grouping Result     GET:/products serviceB default default default true GET:/products   GET:/products/asia/cn serviceB default default default true GET:/products/{region}/{country}   GET:/products/123 serviceB default default default true GET:/products{id}   GET:/products/123/abc/efg serviceB default default default false GET:/products/123/abc/efg   \u0026lt;GET\u0026gt;:/products/123 serviceB default default default false \u0026lt;GET\u0026gt;:/products/123   GET:/products/123 serviceC default default default false GET:/products/123   GET:/products/123 serviceC serviceC default default true GET:/products/123   \u0026lt;GET\u0026gt;:/products/123 serviceB default \u0026lt;${METHOD}\u0026gt;:${PATH} \u0026lt;${METHOD}\u0026gt;:${PATH} true \u0026lt;GET\u0026gt;:/products/{id}   GET:/products/123 serviceB default default ${PATH}:\u0026lt;${METHOD}\u0026gt; true /products/{id}:\u0026lt;GET\u0026gt;   /products/123:\u0026lt;GET\u0026gt; serviceB default ${PATH}:\u0026lt;${METHOD}\u0026gt; default true GET:/products/{id}    Initialize and update the OpenAPI definitions dynamically Use Dynamic Configuration to initialize and update OpenAPI definitions, the endpoint grouping rules from OpenAPI will re-create by the new config.\nEndpoint name grouping by custom configuration Currently, a user could set up grouping rules through the static YAML file named endpoint-name-grouping.yml, or use Dynamic Configuration to initialize and update endpoint grouping rules.\nConfiguration Format Both the static local file and dynamic configuration value share the same YAML format.\ngrouping:# Endpoint of the service would follow the following rules- service-name:serviceArules:# {var} represents any variable string in the URI.- /prod/{var}","title":"Group Parameterized Endpoints","url":"/docs/main/v9.5.0/en/setup/backend/endpoint-grouping-rules/"},{"content":"Group Parameterized Endpoints In most cases, endpoints are detected automatically through language agents, service mesh observability solutions, or meter system configurations.\nThere are some special cases, especially when REST-style URI is used, where the application codes include the parameter in the endpoint name, such as putting order ID in the URI. Examples are /prod/ORDER123 and /prod/ORDER456. But logically, most would expect to have an endpoint name like prod/{order-id}. This is a specially designed feature in parameterized endpoint grouping.\nIf the incoming endpoint name accords with the rules, SkyWalking will group the endpoint by rules.\nThere are two approaches in which SkyWalking supports endpoint grouping:\n Endpoint name grouping by OpenAPI definitions. Endpoint name grouping by custom configurations.  Both grouping approaches can work together in sequence.\nEndpoint name grouping by OpenAPI definitions The OpenAPI definitions are documents based on the OpenAPI Specification (OAS), which is used to define a standard, language-agnostic interface for HTTP APIs.\nSkyWalking now supports OAS v2.0+. It could parse the documents (yaml) and build grouping rules from them automatically.\nHow to use   Add Specification Extensions for SkyWalking config in the OpenAPI definition documents; otherwise, all configs are default:\n${METHOD} is a reserved placeholder which represents the HTTP method, e.g. POST/GET... . ${PATH} is a reserved placeholder which represents the path, e.g. /products/{id}.\n   Extension Name Required Description Default Value     x-sw-service-name false The service name to which these endpoints belong. The directory name to which the OpenAPI definition documents belong.   x-sw-endpoint-name-match-rule false The rule used to match the endpoint. ${METHOD}:${PATH}   x-sw-endpoint-name-format false The endpoint name after grouping. ${METHOD}:${PATH}    These extensions are under OpenAPI Object. For example, the document below has a full custom config:\n  openapi:3.0.0x-sw-service-name:serviceBx-sw-endpoint-name-match-rule:\u0026#34;${METHOD}:${PATH}\u0026#34;x-sw-endpoint-name-format:\u0026#34;${METHOD}:${PATH}\u0026#34;info:description:OpenAPI definition for SkyWalking test.version:v2title:Product API...We highly recommend using the default config. The custom config (x-sw-endpoint-name-match-rule/x-sw-endpoint-name-format) is considered part of the match rules (regex pattern). We have provided some use cases in org.apache.skywalking.oap.server.core.config.group.openapi.EndpointGroupingRuleReader4OpenapiTest. You may validate your custom config as well.\nAll OpenAPI definition documents are located in the openapi-definitions directory, with directories having at most two levels. We recommend using the service name as the subDirectory name, as you will then not be required to set x-sw-service-name. For example:  ├── openapi-definitions │ ├── serviceA │ │ ├── customerAPI-v1.yaml │ │ └── productAPI-v1.yaml │ └── serviceB │ └── productAPI-v2.yaml The feature is enabled by default. You can disable it by setting the Core Module configuration ${SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI:false}.  Rules match priority We recommend designing the API path as clearly as possible. If the API path is fuzzy and an endpoint name matches multiple paths, SkyWalking would select a path according to the match priority set out below:\n The exact path is matched. E.g. /products or /products/inventory The path with fewer variables. E.g. In the case of /products/{var1}/{var2} and /products/{var1}/abc, endpoint name /products/123/abc will match the second one. If the paths have the same number of variables, the longest path is matched, and the vars are considered to be 1. E.g. In the case of /products/abc/{var1} and products/{var12345}/ef, endpoint name /products/abc/ef will match the first one, because length(\u0026quot;abc\u0026quot;) = 3 is larger than length(\u0026quot;ef\u0026quot;) = 2.  Examples If we have an OpenAPI definition doc productAPI-v2.yaml in directory serviceB, it will look like this:\nopenapi:3.0.0info:description:OpenAPI definition for SkyWalking test.version:v2title:Product APItags:- name:productdescription:product- name:relatedProductsdescription:Related Productspaths:/products:get:tags:- productsummary:Get all products listdescription:Get all products list.operationId:getProductsresponses:\u0026#34;200\u0026#34;:description:Successcontent:application/json:schema:type:arrayitems:$ref:\u0026#34;#/components/schemas/Product\u0026#34;/products/{region}/{country}:get:tags:- productsummary:Get products regionaldescription:Get products regional with the given id.operationId:getProductRegionalparameters:- name:regionin:pathdescription:Products regionrequired:trueschema:type:string- name:countryin:pathdescription:Products countryrequired:trueschema:type:stringresponses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/Product\u0026#34;\u0026#34;400\u0026#34;:description:Invalid parameters supplied/products/{id}:get:tags:- productsummary:Get product detailsdescription:Get product details with the given id.operationId:getProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/ProductDetails\u0026#34;\u0026#34;400\u0026#34;:description:Invalid product idpost:tags:- productsummary:Update product detailsdescription:Update product details with the given id.operationId:updateProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64- name:namein:querydescription:Product namerequired:trueschema:type:stringresponses:\u0026#34;200\u0026#34;:description:successful operationdelete:tags:- productsummary:Delete product detailsdescription:Delete product details with the given id.operationId:deleteProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operation/products/{id}/relatedProducts:get:tags:- relatedProductssummary:Get related productsdescription:Get related products with the given product id.operationId:getRelatedProductsparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/RelatedProducts\u0026#34;\u0026#34;400\u0026#34;:description:Invalid product idcomponents:schemas:Product:type:objectdescription:Product id and nameproperties:id:type:integerformat:int64description:Product idname:type:stringdescription:Product namerequired:- id- nameProductDetails:type:objectdescription:Product detailsproperties:id:type:integerformat:int64description:Product idname:type:stringdescription:Product namedescription:type:stringdescription:Product descriptionrequired:- id- nameRelatedProducts:type:objectdescription:Related Productsproperties:id:type:integerformat:int32description:Product idrelatedProducts:type:arraydescription:List of related productsitems:$ref:\u0026#34;#/components/schemas/Product\u0026#34;Here are some use cases:\n   Incoming Endpoint Incoming Service x-sw-service-name x-sw-endpoint-name-match-rule x-sw-endpoint-name-format Matched Grouping Result     GET:/products serviceB default default default true GET:/products   GET:/products/asia/cn serviceB default default default true GET:/products/{region}/{country}   GET:/products/123 serviceB default default default true GET:/products{id}   GET:/products/123/abc/efg serviceB default default default false GET:/products/123/abc/efg   \u0026lt;GET\u0026gt;:/products/123 serviceB default default default false \u0026lt;GET\u0026gt;:/products/123   GET:/products/123 serviceC default default default false GET:/products/123   GET:/products/123 serviceC serviceC default default true GET:/products/123   \u0026lt;GET\u0026gt;:/products/123 serviceB default \u0026lt;${METHOD}\u0026gt;:${PATH} \u0026lt;${METHOD}\u0026gt;:${PATH} true \u0026lt;GET\u0026gt;:/products/{id}   GET:/products/123 serviceB default default ${PATH}:\u0026lt;${METHOD}\u0026gt; true /products/{id}:\u0026lt;GET\u0026gt;   /products/123:\u0026lt;GET\u0026gt; serviceB default ${PATH}:\u0026lt;${METHOD}\u0026gt; default true GET:/products/{id}    Initialize and update the OpenAPI definitions dynamically Use Dynamic Configuration to initialize and update OpenAPI definitions, the endpoint grouping rules from OpenAPI will re-create by the new config.\nEndpoint name grouping by custom configuration Currently, a user could set up grouping rules through the static YAML file named endpoint-name-grouping.yml, or use Dynamic Configuration to initialize and update endpoint grouping rules.\nConfiguration Format Both the static local file and dynamic configuration value share the same YAML format.\ngrouping:# Endpoint of the service would follow the following rules- service-name:serviceArules:# {var} represents any variable string in the URI.- /prod/{var}","title":"Group Parameterized Endpoints","url":"/docs/main/v9.6.0/en/setup/backend/endpoint-grouping-rules/"},{"content":"Group Parameterized Endpoints In most cases, endpoints are detected automatically through language agents, service mesh observability solutions, or meter system configurations.\nThere are some special cases, especially when REST-style URI is used, where the application codes include the parameter in the endpoint name, such as putting order ID in the URI. Examples are /prod/ORDER123 and /prod/ORDER456. But logically, most would expect to have an endpoint name like prod/{order-id}. This is a specially designed feature in parameterized endpoint grouping.\nIf the incoming endpoint name accords with the rules, SkyWalking will group the endpoint by rules.\nThere are two approaches in which SkyWalking supports endpoint grouping:\n Endpoint name grouping by OpenAPI definitions. Endpoint name grouping by custom configurations.  Both grouping approaches can work together in sequence.\nEndpoint name grouping by OpenAPI definitions The OpenAPI definitions are documents based on the OpenAPI Specification (OAS), which is used to define a standard, language-agnostic interface for HTTP APIs.\nSkyWalking now supports OAS v2.0+. It could parse the documents (yaml) and build grouping rules from them automatically.\nHow to use   Add Specification Extensions for SkyWalking config in the OpenAPI definition documents; otherwise, all configs are default:\n${METHOD} is a reserved placeholder which represents the HTTP method, e.g. POST/GET... . ${PATH} is a reserved placeholder which represents the path, e.g. /products/{id}.\n   Extension Name Required Description Default Value     x-sw-service-name false The service name to which these endpoints belong. The directory name to which the OpenAPI definition documents belong.   x-sw-endpoint-name-match-rule false The rule used to match the endpoint. ${METHOD}:${PATH}   x-sw-endpoint-name-format false The endpoint name after grouping. ${METHOD}:${PATH}    These extensions are under OpenAPI Object. For example, the document below has a full custom config:\n  openapi:3.0.0x-sw-service-name:serviceBx-sw-endpoint-name-match-rule:\u0026#34;${METHOD}:${PATH}\u0026#34;x-sw-endpoint-name-format:\u0026#34;${METHOD}:${PATH}\u0026#34;info:description:OpenAPI definition for SkyWalking test.version:v2title:Product API...We highly recommend using the default config. The custom config (x-sw-endpoint-name-match-rule/x-sw-endpoint-name-format) is considered part of the match rules (regex pattern). We have provided some use cases in org.apache.skywalking.oap.server.core.config.group.openapi.EndpointGroupingRuleReader4OpenapiTest. You may validate your custom config as well.\nAll OpenAPI definition documents are located in the openapi-definitions directory, with directories having at most two levels. We recommend using the service name as the subDirectory name, as you will then not be required to set x-sw-service-name. For example:  ├── openapi-definitions │ ├── serviceA │ │ ├── customerAPI-v1.yaml │ │ └── productAPI-v1.yaml │ └── serviceB │ └── productAPI-v2.yaml The feature is enabled by default. You can disable it by setting the Core Module configuration ${SW_CORE_ENABLE_ENDPOINT_NAME_GROUPING_BY_OPENAPI:false}.  Rules match priority We recommend designing the API path as clearly as possible. If the API path is fuzzy and an endpoint name matches multiple paths, SkyWalking would select a path according to the match priority set out below:\n The exact path is matched. E.g. /products or /products/inventory The path with fewer variables. E.g. In the case of /products/{var1}/{var2} and /products/{var1}/abc, endpoint name /products/123/abc will match the second one. If the paths have the same number of variables, the longest path is matched, and the vars are considered to be 1. E.g. In the case of /products/abc/{var1} and products/{var12345}/ef, endpoint name /products/abc/ef will match the first one, because length(\u0026quot;abc\u0026quot;) = 3 is larger than length(\u0026quot;ef\u0026quot;) = 2.  Examples If we have an OpenAPI definition doc productAPI-v2.yaml in directory serviceB, it will look like this:\nopenapi:3.0.0info:description:OpenAPI definition for SkyWalking test.version:v2title:Product APItags:- name:productdescription:product- name:relatedProductsdescription:Related Productspaths:/products:get:tags:- productsummary:Get all products listdescription:Get all products list.operationId:getProductsresponses:\u0026#34;200\u0026#34;:description:Successcontent:application/json:schema:type:arrayitems:$ref:\u0026#34;#/components/schemas/Product\u0026#34;/products/{region}/{country}:get:tags:- productsummary:Get products regionaldescription:Get products regional with the given id.operationId:getProductRegionalparameters:- name:regionin:pathdescription:Products regionrequired:trueschema:type:string- name:countryin:pathdescription:Products countryrequired:trueschema:type:stringresponses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/Product\u0026#34;\u0026#34;400\u0026#34;:description:Invalid parameters supplied/products/{id}:get:tags:- productsummary:Get product detailsdescription:Get product details with the given id.operationId:getProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/ProductDetails\u0026#34;\u0026#34;400\u0026#34;:description:Invalid product idpost:tags:- productsummary:Update product detailsdescription:Update product details with the given id.operationId:updateProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64- name:namein:querydescription:Product namerequired:trueschema:type:stringresponses:\u0026#34;200\u0026#34;:description:successful operationdelete:tags:- productsummary:Delete product detailsdescription:Delete product details with the given id.operationId:deleteProductparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operation/products/{id}/relatedProducts:get:tags:- relatedProductssummary:Get related productsdescription:Get related products with the given product id.operationId:getRelatedProductsparameters:- name:idin:pathdescription:Product idrequired:trueschema:type:integerformat:int64responses:\u0026#34;200\u0026#34;:description:successful operationcontent:application/json:schema:$ref:\u0026#34;#/components/schemas/RelatedProducts\u0026#34;\u0026#34;400\u0026#34;:description:Invalid product idcomponents:schemas:Product:type:objectdescription:Product id and nameproperties:id:type:integerformat:int64description:Product idname:type:stringdescription:Product namerequired:- id- nameProductDetails:type:objectdescription:Product detailsproperties:id:type:integerformat:int64description:Product idname:type:stringdescription:Product namedescription:type:stringdescription:Product descriptionrequired:- id- nameRelatedProducts:type:objectdescription:Related Productsproperties:id:type:integerformat:int32description:Product idrelatedProducts:type:arraydescription:List of related productsitems:$ref:\u0026#34;#/components/schemas/Product\u0026#34;Here are some use cases:\n   Incoming Endpoint Incoming Service x-sw-service-name x-sw-endpoint-name-match-rule x-sw-endpoint-name-format Matched Grouping Result     GET:/products serviceB default default default true GET:/products   GET:/products/asia/cn serviceB default default default true GET:/products/{region}/{country}   GET:/products/123 serviceB default default default true GET:/products{id}   GET:/products/123/abc/efg serviceB default default default false GET:/products/123/abc/efg   \u0026lt;GET\u0026gt;:/products/123 serviceB default default default false \u0026lt;GET\u0026gt;:/products/123   GET:/products/123 serviceC default default default false GET:/products/123   GET:/products/123 serviceC serviceC default default true GET:/products/123   \u0026lt;GET\u0026gt;:/products/123 serviceB default \u0026lt;${METHOD}\u0026gt;:${PATH} \u0026lt;${METHOD}\u0026gt;:${PATH} true \u0026lt;GET\u0026gt;:/products/{id}   GET:/products/123 serviceB default default ${PATH}:\u0026lt;${METHOD}\u0026gt; true /products/{id}:\u0026lt;GET\u0026gt;   /products/123:\u0026lt;GET\u0026gt; serviceB default ${PATH}:\u0026lt;${METHOD}\u0026gt; default true GET:/products/{id}    Initialize and update the OpenAPI definitions dynamically Use Dynamic Configuration to initialize and update OpenAPI definitions, the endpoint grouping rules from OpenAPI will re-create by the new config.\nEndpoint name grouping by custom configuration Currently, a user could set up grouping rules through the static YAML file named endpoint-name-grouping.yml, or use Dynamic Configuration to initialize and update endpoint grouping rules.\nConfiguration Format Both the static local file and dynamic configuration value share the same YAML format.\ngrouping:# Endpoint of the service would follow the following rules- service-name:serviceArules:# {var} represents any variable string in the URI.- /prod/{var}","title":"Group Parameterized Endpoints","url":"/docs/main/v9.7.0/en/setup/backend/endpoint-grouping-rules/"},{"content":"gRPC SSL transportation support for OAP server For OAP communication, we are currently using gRPC, a multi-platform RPC framework that uses protocol buffers for message serialization. The nice part about gRPC is that it promotes the use of SSL/TLS to authenticate and encrypt exchanges. Now OAP supports enabling SSL transportation for gRPC receivers. Since 8.8.0, OAP supports enabling mutual TLS authentication between probes and OAP servers.\nTo enable this feature, follow the steps below.\nPreparation By default, the communication between OAP nodes and the communication between receiver and probe share the same gRPC server. Its configuration is in application.yml/core/default section.\nThe advanced gRPC receiver is only for communication with the probes. This configuration is in application.yml/receiver-sharing-server/default section.\nThe first step is to generate certificates and private key files for encrypting communication.\nCreating SSL/TLS Certificates The first step is to generate certificates and key files for encrypting communication. This is fairly straightforward: use openssl from the command line.\nUse this script if you are not familiar with how to generate key files.\nWe need the following files:\n ca.crt: A certificate authority public key for a client to validate the server\u0026rsquo;s certificate. server.pem, client.pem: A private RSA key to sign and authenticate the public key. It\u0026rsquo;s either a PKCS#8(PEM) or PKCS#1(DER). server.crt, client.crt: Self-signed X.509 public keys for distribution.  TLS on OAP servers By default, the communication between OAP nodes and the communication between receiver and probe share the same gRPC server. That means once you enable SSL for receivers and probes, the OAP nodes will enable it too.\nNOTE: SkyWalking does not support enabling mTLS on OAP server nodes communication. That means you have to enable receiver-sharing-server for enabling mTLS on communication between probes and OAP servers. More details see Enable mTLS mode on gRPC receiver.\nYou can enable gRPC SSL by adding the following lines to application.yml/core/default.\ngRPCSslEnabled:truegRPCSslKeyPath:/path/to/server.pemgRPCSslCertChainPath:/path/to/server.crtgRPCSslTrustedCAPath:/path/to/ca.crtgRPCSslKeyPath and gRPCSslCertChainPath are loaded by the OAP server to encrypt communication. gRPCSslTrustedCAPath helps the gRPC client to verify server certificates in cluster mode.\n There is a gRPC client and server in every OAP server node. The gRPC client communicates with OAP servers in cluster mode. They are sharing the core module configuration.\n When new files are in place, they can be loaded dynamically, and you won\u0026rsquo;t have to restart an OAP instance.\nEnable TLS on independent gRPC receiver If you enable receiver-sharing-server to ingest data from an external source, add the following lines to application.yml/receiver-sharing-server/default:\ngRPCPort:${SW_RECEIVER_GRPC_PORT:\u0026#34;changeMe\u0026#34;}gRPCSslEnabled:truegRPCSslKeyPath:/path/to/server.pemgRPCSslCertChainPath:/path/to/server.crtSince receiver-sharing-server only receives data from an external source, it doesn\u0026rsquo;t need a CA at all. But you have to configure the CA for the clients, such as Java agent, Satellite. If you port to the Java agent, refer to the Java agent repo to configure the Java agent and enable TLS.\nNOTE: change the SW_RECEIVER_GRPC_PORT as non-zero to enable receiver-sharing-server. And the port is open for the clients.\nEnable mTLS mode on gRPC receiver Since 8.8.0, SkyWalking has supported mutual TLS authentication for transporting between clients and OAP servers. Enable mTLS mode for the gRPC channel requires Sharing gRPC Server enabled, as the following configuration.\nreceiver-sharing-server:selector:${SW_RECEIVER_SHARING_SERVER:default}default:# For gRPC servergRPCHost:${SW_RECEIVER_GRPC_HOST:0.0.0.0}gRPCPort:${SW_RECEIVER_GRPC_PORT:\u0026#34;changeMe\u0026#34;}maxConcurrentCallsPerConnection:${SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL:0}maxMessageSize:${SW_RECEIVER_GRPC_MAX_MESSAGE_SIZE:0}gRPCThreadPoolQueueSize:${SW_RECEIVER_GRPC_POOL_QUEUE_SIZE:0}gRPCThreadPoolSize:${SW_RECEIVER_GRPC_THREAD_POOL_SIZE:0}gRPCSslEnabled:${SW_RECEIVER_GRPC_SSL_ENABLED:true}gRPCSslKeyPath:${SW_RECEIVER_GRPC_SSL_KEY_PATH:\u0026#34;/path/to/server.pem\u0026#34;}gRPCSslCertChainPath:${SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH:\u0026#34;/path/to/server.crt\u0026#34;}gRPCSslTrustedCAsPath:${SW_RECEIVER_GRPC_SSL_TRUSTED_CAS_PATH:\u0026#34;/path/to/ca.crt\u0026#34;}authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}You can still use this script to generate CA certificate and the key files of server-side(for OAP Server) and client-side(for Agent/Satellite). You have to notice the keys, including server and client-side, are from the same CA certificate.\n","title":"gRPC SSL transportation support for OAP server","url":"/docs/main/latest/en/setup/backend/grpc-security/"},{"content":"gRPC SSL transportation support for OAP server For OAP communication, we are currently using gRPC, a multi-platform RPC framework that uses protocol buffers for message serialization. The nice part about gRPC is that it promotes the use of SSL/TLS to authenticate and encrypt exchanges. Now OAP supports enabling SSL transportation for gRPC receivers. Since 8.8.0, OAP supports enabling mutual TLS authentication between probes and OAP servers.\nTo enable this feature, follow the steps below.\nPreparation By default, the communication between OAP nodes and the communication between receiver and probe share the same gRPC server. Its configuration is in application.yml/core/default section.\nThe advanced gRPC receiver is only for communication with the probes. This configuration is in application.yml/receiver-sharing-server/default section.\nThe first step is to generate certificates and private key files for encrypting communication.\nCreating SSL/TLS Certificates The first step is to generate certificates and key files for encrypting communication. This is fairly straightforward: use openssl from the command line.\nUse this script if you are not familiar with how to generate key files.\nWe need the following files:\n ca.crt: A certificate authority public key for a client to validate the server\u0026rsquo;s certificate. server.pem, client.pem: A private RSA key to sign and authenticate the public key. It\u0026rsquo;s either a PKCS#8(PEM) or PKCS#1(DER). server.crt, client.crt: Self-signed X.509 public keys for distribution.  TLS on OAP servers By default, the communication between OAP nodes and the communication between receiver and probe share the same gRPC server. That means once you enable SSL for receivers and probes, the OAP nodes will enable it too.\nNOTE: SkyWalking does not support enabling mTLS on OAP server nodes communication. That means you have to enable receiver-sharing-server for enabling mTLS on communication between probes and OAP servers. More details see Enable mTLS mode on gRPC receiver.\nYou can enable gRPC SSL by adding the following lines to application.yml/core/default.\ngRPCSslEnabled:truegRPCSslKeyPath:/path/to/server.pemgRPCSslCertChainPath:/path/to/server.crtgRPCSslTrustedCAPath:/path/to/ca.crtgRPCSslKeyPath and gRPCSslCertChainPath are loaded by the OAP server to encrypt communication. gRPCSslTrustedCAPath helps the gRPC client to verify server certificates in cluster mode.\n There is a gRPC client and server in every OAP server node. The gRPC client communicates with OAP servers in cluster mode. They are sharing the core module configuration.\n When new files are in place, they can be loaded dynamically, and you won\u0026rsquo;t have to restart an OAP instance.\nEnable TLS on independent gRPC receiver If you enable receiver-sharing-server to ingest data from an external source, add the following lines to application.yml/receiver-sharing-server/default:\ngRPCPort:${SW_RECEIVER_GRPC_PORT:\u0026#34;changeMe\u0026#34;}gRPCSslEnabled:truegRPCSslKeyPath:/path/to/server.pemgRPCSslCertChainPath:/path/to/server.crtSince receiver-sharing-server only receives data from an external source, it doesn\u0026rsquo;t need a CA at all. But you have to configure the CA for the clients, such as Java agent, Satellite. If you port to the Java agent, refer to the Java agent repo to configure the Java agent and enable TLS.\nNOTE: change the SW_RECEIVER_GRPC_PORT as non-zero to enable receiver-sharing-server. And the port is open for the clients.\nEnable mTLS mode on gRPC receiver Since 8.8.0, SkyWalking has supported mutual TLS authentication for transporting between clients and OAP servers. Enable mTLS mode for the gRPC channel requires Sharing gRPC Server enabled, as the following configuration.\nreceiver-sharing-server:selector:${SW_RECEIVER_SHARING_SERVER:default}default:# For gRPC servergRPCHost:${SW_RECEIVER_GRPC_HOST:0.0.0.0}gRPCPort:${SW_RECEIVER_GRPC_PORT:\u0026#34;changeMe\u0026#34;}maxConcurrentCallsPerConnection:${SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL:0}maxMessageSize:${SW_RECEIVER_GRPC_MAX_MESSAGE_SIZE:0}gRPCThreadPoolSize:${SW_RECEIVER_GRPC_THREAD_POOL_SIZE:0}gRPCSslEnabled:${SW_RECEIVER_GRPC_SSL_ENABLED:true}gRPCSslKeyPath:${SW_RECEIVER_GRPC_SSL_KEY_PATH:\u0026#34;/path/to/server.pem\u0026#34;}gRPCSslCertChainPath:${SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH:\u0026#34;/path/to/server.crt\u0026#34;}gRPCSslTrustedCAsPath:${SW_RECEIVER_GRPC_SSL_TRUSTED_CAS_PATH:\u0026#34;/path/to/ca.crt\u0026#34;}authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}You can still use this script to generate CA certificate and the key files of server-side(for OAP Server) and client-side(for Agent/Satellite). You have to notice the keys, including server and client-side, are from the same CA certificate.\n","title":"gRPC SSL transportation support for OAP server","url":"/docs/main/next/en/setup/backend/grpc-security/"},{"content":"gRPC SSL transportation support for OAP server For OAP communication, we are currently using gRPC, a multi-platform RPC framework that uses protocol buffers for message serialization. The nice part about gRPC is that it promotes the use of SSL/TLS to authenticate and encrypt exchanges. Now OAP supports enabling SSL transportation for gRPC receivers. Since 8.8.0, OAP supports enabling mutual TLS authentication between probes and OAP servers.\nTo enable this feature, follow the steps below.\nPreparation By default, the communication between OAP nodes and the communication between receiver and probe share the same gRPC server. Its configuration is in application.yml/core/default section.\nThe advanced gRPC receiver is only for communication with the probes. This configuration is in application.yml/receiver-sharing-server/default section.\nThe first step is to generate certificates and private key files for encrypting communication.\nCreating SSL/TLS Certificates The first step is to generate certificates and key files for encrypting communication. This is fairly straightforward: use openssl from the command line.\nUse this script if you are not familiar with how to generate key files.\nWe need the following files:\n ca.crt: A certificate authority public key for a client to validate the server\u0026rsquo;s certificate. server.pem, client.pem: A private RSA key to sign and authenticate the public key. It\u0026rsquo;s either a PKCS#8(PEM) or PKCS#1(DER). server.crt, client.crt: Self-signed X.509 public keys for distribution.  TLS on OAP servers By default, the communication between OAP nodes and the communication between receiver and probe share the same gRPC server. That means once you enabling SSL for receivers and probes, the OAP nodes will enable it too.\nNOTE: SkyWalking does not support to enable mTLS on OAP server nodes communication. That means you have to enable receiver-sharing-server for enabling mTLS on communication between probes ang OAP servers. More details see Enable mTLS mode on gRPC receiver.\nYou can enable gRPC SSL by adding the following lines to application.yml/core/default.\ngRPCSslEnabled:truegRPCSslKeyPath:/path/to/server.pemgRPCSslCertChainPath:/path/to/server.crtgRPCSslTrustedCAPath:/path/to/ca.crtgRPCSslKeyPath and gRPCSslCertChainPath are loaded by the OAP server to encrypt communication. gRPCSslTrustedCAPath helps the gRPC client to verify server certificates in cluster mode.\n There is a gRPC client and server in every OAP server node. The gRPC client comunicates with OAP servers in cluster mode. They are sharing the core module configuration.\n When new files are in place, they can be loaded dynamically, and you won\u0026rsquo;t have to restart an OAP instance.\nEnable TLS on independent gRPC receiver If you enable receiver-sharing-server to ingest data from an external source, add the following lines to application.yml/receiver-sharing-server/default:\ngRPCPort:${SW_RECEIVER_GRPC_PORT:\u0026#34;changeMe\u0026#34;}gRPCSslEnabled:truegRPCSslKeyPath:/path/to/server.pemgRPCSslCertChainPath:/path/to/server.crtSince recevier-sharing-server only receives data from an external source, it doesn\u0026rsquo;t need a CA at all. But you have to configure the CA for the clients, such as Java agent, Satellite. If you port to Java agent, refer to the Java agent repo to configure java agent and enable TLS.\nNOTE: change the SW_RECEIVER_GRPC_PORT as non-zore to enable receiver-sharing-server. And the port is open for the clients.\nEnable mTLS mode on gRPC receiver Since 8.8.0, SkyWalking supports enable mutual TLS authentication for transporting between clients and OAP servers. To enable mTLS mode for gRPC channel requires Sharing gRPC Server enabled, as the following configuration.\nreceiver-sharing-server:selector:${SW_RECEIVER_SHARING_SERVER:default}default:# For gRPC servergRPCHost:${SW_RECEIVER_GRPC_HOST:0.0.0.0}gRPCPort:${SW_RECEIVER_GRPC_PORT:\u0026#34;changeMe\u0026#34;}maxConcurrentCallsPerConnection:${SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL:0}maxMessageSize:${SW_RECEIVER_GRPC_MAX_MESSAGE_SIZE:0}gRPCThreadPoolQueueSize:${SW_RECEIVER_GRPC_POOL_QUEUE_SIZE:0}gRPCThreadPoolSize:${SW_RECEIVER_GRPC_THREAD_POOL_SIZE:0}gRPCSslEnabled:${SW_RECEIVER_GRPC_SSL_ENABLED:true}gRPCSslKeyPath:${SW_RECEIVER_GRPC_SSL_KEY_PATH:\u0026#34;/path/to/server.pem\u0026#34;}gRPCSslCertChainPath:${SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH:\u0026#34;/path/to/server.crt\u0026#34;}gRPCSslTrustedCAsPath:${SW_RECEIVER_GRPC_SSL_TRUSTED_CAS_PATH:\u0026#34;/path/to/ca.crt\u0026#34;}authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}You can still use this script to generate CA certificate and the key files of server-side(for OAP Server) and client-side(for Agent/Satellite). You have to notice the keys, including server and client-side, are from the same CA certificate.\n","title":"gRPC SSL transportation support for OAP server","url":"/docs/main/v9.0.0/en/setup/backend/grpc-security/"},{"content":"gRPC SSL transportation support for OAP server For OAP communication, we are currently using gRPC, a multi-platform RPC framework that uses protocol buffers for message serialization. The nice part about gRPC is that it promotes the use of SSL/TLS to authenticate and encrypt exchanges. Now OAP supports enabling SSL transportation for gRPC receivers. Since 8.8.0, OAP supports enabling mutual TLS authentication between probes and OAP servers.\nTo enable this feature, follow the steps below.\nPreparation By default, the communication between OAP nodes and the communication between receiver and probe share the same gRPC server. Its configuration is in application.yml/core/default section.\nThe advanced gRPC receiver is only for communication with the probes. This configuration is in application.yml/receiver-sharing-server/default section.\nThe first step is to generate certificates and private key files for encrypting communication.\nCreating SSL/TLS Certificates The first step is to generate certificates and key files for encrypting communication. This is fairly straightforward: use openssl from the command line.\nUse this script if you are not familiar with how to generate key files.\nWe need the following files:\n ca.crt: A certificate authority public key for a client to validate the server\u0026rsquo;s certificate. server.pem, client.pem: A private RSA key to sign and authenticate the public key. It\u0026rsquo;s either a PKCS#8(PEM) or PKCS#1(DER). server.crt, client.crt: Self-signed X.509 public keys for distribution.  TLS on OAP servers By default, the communication between OAP nodes and the communication between receiver and probe share the same gRPC server. That means once you enable SSL for receivers and probes, the OAP nodes will enable it too.\nNOTE: SkyWalking does not support enabling mTLS on OAP server nodes communication. That means you have to enable receiver-sharing-server for enabling mTLS on communication between probes and OAP servers. More details see Enable mTLS mode on gRPC receiver.\nYou can enable gRPC SSL by adding the following lines to application.yml/core/default.\ngRPCSslEnabled:truegRPCSslKeyPath:/path/to/server.pemgRPCSslCertChainPath:/path/to/server.crtgRPCSslTrustedCAPath:/path/to/ca.crtgRPCSslKeyPath and gRPCSslCertChainPath are loaded by the OAP server to encrypt communication. gRPCSslTrustedCAPath helps the gRPC client to verify server certificates in cluster mode.\n There is a gRPC client and server in every OAP server node. The gRPC client communicates with OAP servers in cluster mode. They are sharing the core module configuration.\n When new files are in place, they can be loaded dynamically, and you won\u0026rsquo;t have to restart an OAP instance.\nEnable TLS on independent gRPC receiver If you enable receiver-sharing-server to ingest data from an external source, add the following lines to application.yml/receiver-sharing-server/default:\ngRPCPort:${SW_RECEIVER_GRPC_PORT:\u0026#34;changeMe\u0026#34;}gRPCSslEnabled:truegRPCSslKeyPath:/path/to/server.pemgRPCSslCertChainPath:/path/to/server.crtSince receiver-sharing-server only receives data from an external source, it doesn\u0026rsquo;t need a CA at all. But you have to configure the CA for the clients, such as Java agent, Satellite. If you port to the Java agent, refer to the Java agent repo to configure the Java agent and enable TLS.\nNOTE: change the SW_RECEIVER_GRPC_PORT as non-zero to enable receiver-sharing-server. And the port is open for the clients.\nEnable mTLS mode on gRPC receiver Since 8.8.0, SkyWalking has supported mutual TLS authentication for transporting between clients and OAP servers. Enable mTLS mode for the gRPC channel requires Sharing gRPC Server enabled, as the following configuration.\nreceiver-sharing-server:selector:${SW_RECEIVER_SHARING_SERVER:default}default:# For gRPC servergRPCHost:${SW_RECEIVER_GRPC_HOST:0.0.0.0}gRPCPort:${SW_RECEIVER_GRPC_PORT:\u0026#34;changeMe\u0026#34;}maxConcurrentCallsPerConnection:${SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL:0}maxMessageSize:${SW_RECEIVER_GRPC_MAX_MESSAGE_SIZE:0}gRPCThreadPoolQueueSize:${SW_RECEIVER_GRPC_POOL_QUEUE_SIZE:0}gRPCThreadPoolSize:${SW_RECEIVER_GRPC_THREAD_POOL_SIZE:0}gRPCSslEnabled:${SW_RECEIVER_GRPC_SSL_ENABLED:true}gRPCSslKeyPath:${SW_RECEIVER_GRPC_SSL_KEY_PATH:\u0026#34;/path/to/server.pem\u0026#34;}gRPCSslCertChainPath:${SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH:\u0026#34;/path/to/server.crt\u0026#34;}gRPCSslTrustedCAsPath:${SW_RECEIVER_GRPC_SSL_TRUSTED_CAS_PATH:\u0026#34;/path/to/ca.crt\u0026#34;}authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}You can still use this script to generate CA certificate and the key files of server-side(for OAP Server) and client-side(for Agent/Satellite). You have to notice the keys, including server and client-side, are from the same CA certificate.\n","title":"gRPC SSL transportation support for OAP server","url":"/docs/main/v9.1.0/en/setup/backend/grpc-security/"},{"content":"gRPC SSL transportation support for OAP server For OAP communication, we are currently using gRPC, a multi-platform RPC framework that uses protocol buffers for message serialization. The nice part about gRPC is that it promotes the use of SSL/TLS to authenticate and encrypt exchanges. Now OAP supports enabling SSL transportation for gRPC receivers. Since 8.8.0, OAP supports enabling mutual TLS authentication between probes and OAP servers.\nTo enable this feature, follow the steps below.\nPreparation By default, the communication between OAP nodes and the communication between receiver and probe share the same gRPC server. Its configuration is in application.yml/core/default section.\nThe advanced gRPC receiver is only for communication with the probes. This configuration is in application.yml/receiver-sharing-server/default section.\nThe first step is to generate certificates and private key files for encrypting communication.\nCreating SSL/TLS Certificates The first step is to generate certificates and key files for encrypting communication. This is fairly straightforward: use openssl from the command line.\nUse this script if you are not familiar with how to generate key files.\nWe need the following files:\n ca.crt: A certificate authority public key for a client to validate the server\u0026rsquo;s certificate. server.pem, client.pem: A private RSA key to sign and authenticate the public key. It\u0026rsquo;s either a PKCS#8(PEM) or PKCS#1(DER). server.crt, client.crt: Self-signed X.509 public keys for distribution.  TLS on OAP servers By default, the communication between OAP nodes and the communication between receiver and probe share the same gRPC server. That means once you enable SSL for receivers and probes, the OAP nodes will enable it too.\nNOTE: SkyWalking does not support enabling mTLS on OAP server nodes communication. That means you have to enable receiver-sharing-server for enabling mTLS on communication between probes and OAP servers. More details see Enable mTLS mode on gRPC receiver.\nYou can enable gRPC SSL by adding the following lines to application.yml/core/default.\ngRPCSslEnabled:truegRPCSslKeyPath:/path/to/server.pemgRPCSslCertChainPath:/path/to/server.crtgRPCSslTrustedCAPath:/path/to/ca.crtgRPCSslKeyPath and gRPCSslCertChainPath are loaded by the OAP server to encrypt communication. gRPCSslTrustedCAPath helps the gRPC client to verify server certificates in cluster mode.\n There is a gRPC client and server in every OAP server node. The gRPC client communicates with OAP servers in cluster mode. They are sharing the core module configuration.\n When new files are in place, they can be loaded dynamically, and you won\u0026rsquo;t have to restart an OAP instance.\nEnable TLS on independent gRPC receiver If you enable receiver-sharing-server to ingest data from an external source, add the following lines to application.yml/receiver-sharing-server/default:\ngRPCPort:${SW_RECEIVER_GRPC_PORT:\u0026#34;changeMe\u0026#34;}gRPCSslEnabled:truegRPCSslKeyPath:/path/to/server.pemgRPCSslCertChainPath:/path/to/server.crtSince receiver-sharing-server only receives data from an external source, it doesn\u0026rsquo;t need a CA at all. But you have to configure the CA for the clients, such as Java agent, Satellite. If you port to the Java agent, refer to the Java agent repo to configure the Java agent and enable TLS.\nNOTE: change the SW_RECEIVER_GRPC_PORT as non-zero to enable receiver-sharing-server. And the port is open for the clients.\nEnable mTLS mode on gRPC receiver Since 8.8.0, SkyWalking has supported mutual TLS authentication for transporting between clients and OAP servers. Enable mTLS mode for the gRPC channel requires Sharing gRPC Server enabled, as the following configuration.\nreceiver-sharing-server:selector:${SW_RECEIVER_SHARING_SERVER:default}default:# For gRPC servergRPCHost:${SW_RECEIVER_GRPC_HOST:0.0.0.0}gRPCPort:${SW_RECEIVER_GRPC_PORT:\u0026#34;changeMe\u0026#34;}maxConcurrentCallsPerConnection:${SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL:0}maxMessageSize:${SW_RECEIVER_GRPC_MAX_MESSAGE_SIZE:0}gRPCThreadPoolQueueSize:${SW_RECEIVER_GRPC_POOL_QUEUE_SIZE:0}gRPCThreadPoolSize:${SW_RECEIVER_GRPC_THREAD_POOL_SIZE:0}gRPCSslEnabled:${SW_RECEIVER_GRPC_SSL_ENABLED:true}gRPCSslKeyPath:${SW_RECEIVER_GRPC_SSL_KEY_PATH:\u0026#34;/path/to/server.pem\u0026#34;}gRPCSslCertChainPath:${SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH:\u0026#34;/path/to/server.crt\u0026#34;}gRPCSslTrustedCAsPath:${SW_RECEIVER_GRPC_SSL_TRUSTED_CAS_PATH:\u0026#34;/path/to/ca.crt\u0026#34;}authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}You can still use this script to generate CA certificate and the key files of server-side(for OAP Server) and client-side(for Agent/Satellite). You have to notice the keys, including server and client-side, are from the same CA certificate.\n","title":"gRPC SSL transportation support for OAP server","url":"/docs/main/v9.2.0/en/setup/backend/grpc-security/"},{"content":"gRPC SSL transportation support for OAP server For OAP communication, we are currently using gRPC, a multi-platform RPC framework that uses protocol buffers for message serialization. The nice part about gRPC is that it promotes the use of SSL/TLS to authenticate and encrypt exchanges. Now OAP supports enabling SSL transportation for gRPC receivers. Since 8.8.0, OAP supports enabling mutual TLS authentication between probes and OAP servers.\nTo enable this feature, follow the steps below.\nPreparation By default, the communication between OAP nodes and the communication between receiver and probe share the same gRPC server. Its configuration is in application.yml/core/default section.\nThe advanced gRPC receiver is only for communication with the probes. This configuration is in application.yml/receiver-sharing-server/default section.\nThe first step is to generate certificates and private key files for encrypting communication.\nCreating SSL/TLS Certificates The first step is to generate certificates and key files for encrypting communication. This is fairly straightforward: use openssl from the command line.\nUse this script if you are not familiar with how to generate key files.\nWe need the following files:\n ca.crt: A certificate authority public key for a client to validate the server\u0026rsquo;s certificate. server.pem, client.pem: A private RSA key to sign and authenticate the public key. It\u0026rsquo;s either a PKCS#8(PEM) or PKCS#1(DER). server.crt, client.crt: Self-signed X.509 public keys for distribution.  TLS on OAP servers By default, the communication between OAP nodes and the communication between receiver and probe share the same gRPC server. That means once you enable SSL for receivers and probes, the OAP nodes will enable it too.\nNOTE: SkyWalking does not support enabling mTLS on OAP server nodes communication. That means you have to enable receiver-sharing-server for enabling mTLS on communication between probes and OAP servers. More details see Enable mTLS mode on gRPC receiver.\nYou can enable gRPC SSL by adding the following lines to application.yml/core/default.\ngRPCSslEnabled:truegRPCSslKeyPath:/path/to/server.pemgRPCSslCertChainPath:/path/to/server.crtgRPCSslTrustedCAPath:/path/to/ca.crtgRPCSslKeyPath and gRPCSslCertChainPath are loaded by the OAP server to encrypt communication. gRPCSslTrustedCAPath helps the gRPC client to verify server certificates in cluster mode.\n There is a gRPC client and server in every OAP server node. The gRPC client communicates with OAP servers in cluster mode. They are sharing the core module configuration.\n When new files are in place, they can be loaded dynamically, and you won\u0026rsquo;t have to restart an OAP instance.\nEnable TLS on independent gRPC receiver If you enable receiver-sharing-server to ingest data from an external source, add the following lines to application.yml/receiver-sharing-server/default:\ngRPCPort:${SW_RECEIVER_GRPC_PORT:\u0026#34;changeMe\u0026#34;}gRPCSslEnabled:truegRPCSslKeyPath:/path/to/server.pemgRPCSslCertChainPath:/path/to/server.crtSince receiver-sharing-server only receives data from an external source, it doesn\u0026rsquo;t need a CA at all. But you have to configure the CA for the clients, such as Java agent, Satellite. If you port to the Java agent, refer to the Java agent repo to configure the Java agent and enable TLS.\nNOTE: change the SW_RECEIVER_GRPC_PORT as non-zero to enable receiver-sharing-server. And the port is open for the clients.\nEnable mTLS mode on gRPC receiver Since 8.8.0, SkyWalking has supported mutual TLS authentication for transporting between clients and OAP servers. Enable mTLS mode for the gRPC channel requires Sharing gRPC Server enabled, as the following configuration.\nreceiver-sharing-server:selector:${SW_RECEIVER_SHARING_SERVER:default}default:# For gRPC servergRPCHost:${SW_RECEIVER_GRPC_HOST:0.0.0.0}gRPCPort:${SW_RECEIVER_GRPC_PORT:\u0026#34;changeMe\u0026#34;}maxConcurrentCallsPerConnection:${SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL:0}maxMessageSize:${SW_RECEIVER_GRPC_MAX_MESSAGE_SIZE:0}gRPCThreadPoolQueueSize:${SW_RECEIVER_GRPC_POOL_QUEUE_SIZE:0}gRPCThreadPoolSize:${SW_RECEIVER_GRPC_THREAD_POOL_SIZE:0}gRPCSslEnabled:${SW_RECEIVER_GRPC_SSL_ENABLED:true}gRPCSslKeyPath:${SW_RECEIVER_GRPC_SSL_KEY_PATH:\u0026#34;/path/to/server.pem\u0026#34;}gRPCSslCertChainPath:${SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH:\u0026#34;/path/to/server.crt\u0026#34;}gRPCSslTrustedCAsPath:${SW_RECEIVER_GRPC_SSL_TRUSTED_CAS_PATH:\u0026#34;/path/to/ca.crt\u0026#34;}authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}You can still use this script to generate CA certificate and the key files of server-side(for OAP Server) and client-side(for Agent/Satellite). You have to notice the keys, including server and client-side, are from the same CA certificate.\n","title":"gRPC SSL transportation support for OAP server","url":"/docs/main/v9.3.0/en/setup/backend/grpc-security/"},{"content":"gRPC SSL transportation support for OAP server For OAP communication, we are currently using gRPC, a multi-platform RPC framework that uses protocol buffers for message serialization. The nice part about gRPC is that it promotes the use of SSL/TLS to authenticate and encrypt exchanges. Now OAP supports enabling SSL transportation for gRPC receivers. Since 8.8.0, OAP supports enabling mutual TLS authentication between probes and OAP servers.\nTo enable this feature, follow the steps below.\nPreparation By default, the communication between OAP nodes and the communication between receiver and probe share the same gRPC server. Its configuration is in application.yml/core/default section.\nThe advanced gRPC receiver is only for communication with the probes. This configuration is in application.yml/receiver-sharing-server/default section.\nThe first step is to generate certificates and private key files for encrypting communication.\nCreating SSL/TLS Certificates The first step is to generate certificates and key files for encrypting communication. This is fairly straightforward: use openssl from the command line.\nUse this script if you are not familiar with how to generate key files.\nWe need the following files:\n ca.crt: A certificate authority public key for a client to validate the server\u0026rsquo;s certificate. server.pem, client.pem: A private RSA key to sign and authenticate the public key. It\u0026rsquo;s either a PKCS#8(PEM) or PKCS#1(DER). server.crt, client.crt: Self-signed X.509 public keys for distribution.  TLS on OAP servers By default, the communication between OAP nodes and the communication between receiver and probe share the same gRPC server. That means once you enable SSL for receivers and probes, the OAP nodes will enable it too.\nNOTE: SkyWalking does not support enabling mTLS on OAP server nodes communication. That means you have to enable receiver-sharing-server for enabling mTLS on communication between probes and OAP servers. More details see Enable mTLS mode on gRPC receiver.\nYou can enable gRPC SSL by adding the following lines to application.yml/core/default.\ngRPCSslEnabled:truegRPCSslKeyPath:/path/to/server.pemgRPCSslCertChainPath:/path/to/server.crtgRPCSslTrustedCAPath:/path/to/ca.crtgRPCSslKeyPath and gRPCSslCertChainPath are loaded by the OAP server to encrypt communication. gRPCSslTrustedCAPath helps the gRPC client to verify server certificates in cluster mode.\n There is a gRPC client and server in every OAP server node. The gRPC client communicates with OAP servers in cluster mode. They are sharing the core module configuration.\n When new files are in place, they can be loaded dynamically, and you won\u0026rsquo;t have to restart an OAP instance.\nEnable TLS on independent gRPC receiver If you enable receiver-sharing-server to ingest data from an external source, add the following lines to application.yml/receiver-sharing-server/default:\ngRPCPort:${SW_RECEIVER_GRPC_PORT:\u0026#34;changeMe\u0026#34;}gRPCSslEnabled:truegRPCSslKeyPath:/path/to/server.pemgRPCSslCertChainPath:/path/to/server.crtSince receiver-sharing-server only receives data from an external source, it doesn\u0026rsquo;t need a CA at all. But you have to configure the CA for the clients, such as Java agent, Satellite. If you port to the Java agent, refer to the Java agent repo to configure the Java agent and enable TLS.\nNOTE: change the SW_RECEIVER_GRPC_PORT as non-zero to enable receiver-sharing-server. And the port is open for the clients.\nEnable mTLS mode on gRPC receiver Since 8.8.0, SkyWalking has supported mutual TLS authentication for transporting between clients and OAP servers. Enable mTLS mode for the gRPC channel requires Sharing gRPC Server enabled, as the following configuration.\nreceiver-sharing-server:selector:${SW_RECEIVER_SHARING_SERVER:default}default:# For gRPC servergRPCHost:${SW_RECEIVER_GRPC_HOST:0.0.0.0}gRPCPort:${SW_RECEIVER_GRPC_PORT:\u0026#34;changeMe\u0026#34;}maxConcurrentCallsPerConnection:${SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL:0}maxMessageSize:${SW_RECEIVER_GRPC_MAX_MESSAGE_SIZE:0}gRPCThreadPoolQueueSize:${SW_RECEIVER_GRPC_POOL_QUEUE_SIZE:0}gRPCThreadPoolSize:${SW_RECEIVER_GRPC_THREAD_POOL_SIZE:0}gRPCSslEnabled:${SW_RECEIVER_GRPC_SSL_ENABLED:true}gRPCSslKeyPath:${SW_RECEIVER_GRPC_SSL_KEY_PATH:\u0026#34;/path/to/server.pem\u0026#34;}gRPCSslCertChainPath:${SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH:\u0026#34;/path/to/server.crt\u0026#34;}gRPCSslTrustedCAsPath:${SW_RECEIVER_GRPC_SSL_TRUSTED_CAS_PATH:\u0026#34;/path/to/ca.crt\u0026#34;}authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}You can still use this script to generate CA certificate and the key files of server-side(for OAP Server) and client-side(for Agent/Satellite). You have to notice the keys, including server and client-side, are from the same CA certificate.\n","title":"gRPC SSL transportation support for OAP server","url":"/docs/main/v9.4.0/en/setup/backend/grpc-security/"},{"content":"gRPC SSL transportation support for OAP server For OAP communication, we are currently using gRPC, a multi-platform RPC framework that uses protocol buffers for message serialization. The nice part about gRPC is that it promotes the use of SSL/TLS to authenticate and encrypt exchanges. Now OAP supports enabling SSL transportation for gRPC receivers. Since 8.8.0, OAP supports enabling mutual TLS authentication between probes and OAP servers.\nTo enable this feature, follow the steps below.\nPreparation By default, the communication between OAP nodes and the communication between receiver and probe share the same gRPC server. Its configuration is in application.yml/core/default section.\nThe advanced gRPC receiver is only for communication with the probes. This configuration is in application.yml/receiver-sharing-server/default section.\nThe first step is to generate certificates and private key files for encrypting communication.\nCreating SSL/TLS Certificates The first step is to generate certificates and key files for encrypting communication. This is fairly straightforward: use openssl from the command line.\nUse this script if you are not familiar with how to generate key files.\nWe need the following files:\n ca.crt: A certificate authority public key for a client to validate the server\u0026rsquo;s certificate. server.pem, client.pem: A private RSA key to sign and authenticate the public key. It\u0026rsquo;s either a PKCS#8(PEM) or PKCS#1(DER). server.crt, client.crt: Self-signed X.509 public keys for distribution.  TLS on OAP servers By default, the communication between OAP nodes and the communication between receiver and probe share the same gRPC server. That means once you enable SSL for receivers and probes, the OAP nodes will enable it too.\nNOTE: SkyWalking does not support enabling mTLS on OAP server nodes communication. That means you have to enable receiver-sharing-server for enabling mTLS on communication between probes and OAP servers. More details see Enable mTLS mode on gRPC receiver.\nYou can enable gRPC SSL by adding the following lines to application.yml/core/default.\ngRPCSslEnabled:truegRPCSslKeyPath:/path/to/server.pemgRPCSslCertChainPath:/path/to/server.crtgRPCSslTrustedCAPath:/path/to/ca.crtgRPCSslKeyPath and gRPCSslCertChainPath are loaded by the OAP server to encrypt communication. gRPCSslTrustedCAPath helps the gRPC client to verify server certificates in cluster mode.\n There is a gRPC client and server in every OAP server node. The gRPC client communicates with OAP servers in cluster mode. They are sharing the core module configuration.\n When new files are in place, they can be loaded dynamically, and you won\u0026rsquo;t have to restart an OAP instance.\nEnable TLS on independent gRPC receiver If you enable receiver-sharing-server to ingest data from an external source, add the following lines to application.yml/receiver-sharing-server/default:\ngRPCPort:${SW_RECEIVER_GRPC_PORT:\u0026#34;changeMe\u0026#34;}gRPCSslEnabled:truegRPCSslKeyPath:/path/to/server.pemgRPCSslCertChainPath:/path/to/server.crtSince receiver-sharing-server only receives data from an external source, it doesn\u0026rsquo;t need a CA at all. But you have to configure the CA for the clients, such as Java agent, Satellite. If you port to the Java agent, refer to the Java agent repo to configure the Java agent and enable TLS.\nNOTE: change the SW_RECEIVER_GRPC_PORT as non-zero to enable receiver-sharing-server. And the port is open for the clients.\nEnable mTLS mode on gRPC receiver Since 8.8.0, SkyWalking has supported mutual TLS authentication for transporting between clients and OAP servers. Enable mTLS mode for the gRPC channel requires Sharing gRPC Server enabled, as the following configuration.\nreceiver-sharing-server:selector:${SW_RECEIVER_SHARING_SERVER:default}default:# For gRPC servergRPCHost:${SW_RECEIVER_GRPC_HOST:0.0.0.0}gRPCPort:${SW_RECEIVER_GRPC_PORT:\u0026#34;changeMe\u0026#34;}maxConcurrentCallsPerConnection:${SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL:0}maxMessageSize:${SW_RECEIVER_GRPC_MAX_MESSAGE_SIZE:0}gRPCThreadPoolQueueSize:${SW_RECEIVER_GRPC_POOL_QUEUE_SIZE:0}gRPCThreadPoolSize:${SW_RECEIVER_GRPC_THREAD_POOL_SIZE:0}gRPCSslEnabled:${SW_RECEIVER_GRPC_SSL_ENABLED:true}gRPCSslKeyPath:${SW_RECEIVER_GRPC_SSL_KEY_PATH:\u0026#34;/path/to/server.pem\u0026#34;}gRPCSslCertChainPath:${SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH:\u0026#34;/path/to/server.crt\u0026#34;}gRPCSslTrustedCAsPath:${SW_RECEIVER_GRPC_SSL_TRUSTED_CAS_PATH:\u0026#34;/path/to/ca.crt\u0026#34;}authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}You can still use this script to generate CA certificate and the key files of server-side(for OAP Server) and client-side(for Agent/Satellite). You have to notice the keys, including server and client-side, are from the same CA certificate.\n","title":"gRPC SSL transportation support for OAP server","url":"/docs/main/v9.5.0/en/setup/backend/grpc-security/"},{"content":"gRPC SSL transportation support for OAP server For OAP communication, we are currently using gRPC, a multi-platform RPC framework that uses protocol buffers for message serialization. The nice part about gRPC is that it promotes the use of SSL/TLS to authenticate and encrypt exchanges. Now OAP supports enabling SSL transportation for gRPC receivers. Since 8.8.0, OAP supports enabling mutual TLS authentication between probes and OAP servers.\nTo enable this feature, follow the steps below.\nPreparation By default, the communication between OAP nodes and the communication between receiver and probe share the same gRPC server. Its configuration is in application.yml/core/default section.\nThe advanced gRPC receiver is only for communication with the probes. This configuration is in application.yml/receiver-sharing-server/default section.\nThe first step is to generate certificates and private key files for encrypting communication.\nCreating SSL/TLS Certificates The first step is to generate certificates and key files for encrypting communication. This is fairly straightforward: use openssl from the command line.\nUse this script if you are not familiar with how to generate key files.\nWe need the following files:\n ca.crt: A certificate authority public key for a client to validate the server\u0026rsquo;s certificate. server.pem, client.pem: A private RSA key to sign and authenticate the public key. It\u0026rsquo;s either a PKCS#8(PEM) or PKCS#1(DER). server.crt, client.crt: Self-signed X.509 public keys for distribution.  TLS on OAP servers By default, the communication between OAP nodes and the communication between receiver and probe share the same gRPC server. That means once you enable SSL for receivers and probes, the OAP nodes will enable it too.\nNOTE: SkyWalking does not support enabling mTLS on OAP server nodes communication. That means you have to enable receiver-sharing-server for enabling mTLS on communication between probes and OAP servers. More details see Enable mTLS mode on gRPC receiver.\nYou can enable gRPC SSL by adding the following lines to application.yml/core/default.\ngRPCSslEnabled:truegRPCSslKeyPath:/path/to/server.pemgRPCSslCertChainPath:/path/to/server.crtgRPCSslTrustedCAPath:/path/to/ca.crtgRPCSslKeyPath and gRPCSslCertChainPath are loaded by the OAP server to encrypt communication. gRPCSslTrustedCAPath helps the gRPC client to verify server certificates in cluster mode.\n There is a gRPC client and server in every OAP server node. The gRPC client communicates with OAP servers in cluster mode. They are sharing the core module configuration.\n When new files are in place, they can be loaded dynamically, and you won\u0026rsquo;t have to restart an OAP instance.\nEnable TLS on independent gRPC receiver If you enable receiver-sharing-server to ingest data from an external source, add the following lines to application.yml/receiver-sharing-server/default:\ngRPCPort:${SW_RECEIVER_GRPC_PORT:\u0026#34;changeMe\u0026#34;}gRPCSslEnabled:truegRPCSslKeyPath:/path/to/server.pemgRPCSslCertChainPath:/path/to/server.crtSince receiver-sharing-server only receives data from an external source, it doesn\u0026rsquo;t need a CA at all. But you have to configure the CA for the clients, such as Java agent, Satellite. If you port to the Java agent, refer to the Java agent repo to configure the Java agent and enable TLS.\nNOTE: change the SW_RECEIVER_GRPC_PORT as non-zero to enable receiver-sharing-server. And the port is open for the clients.\nEnable mTLS mode on gRPC receiver Since 8.8.0, SkyWalking has supported mutual TLS authentication for transporting between clients and OAP servers. Enable mTLS mode for the gRPC channel requires Sharing gRPC Server enabled, as the following configuration.\nreceiver-sharing-server:selector:${SW_RECEIVER_SHARING_SERVER:default}default:# For gRPC servergRPCHost:${SW_RECEIVER_GRPC_HOST:0.0.0.0}gRPCPort:${SW_RECEIVER_GRPC_PORT:\u0026#34;changeMe\u0026#34;}maxConcurrentCallsPerConnection:${SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL:0}maxMessageSize:${SW_RECEIVER_GRPC_MAX_MESSAGE_SIZE:0}gRPCThreadPoolQueueSize:${SW_RECEIVER_GRPC_POOL_QUEUE_SIZE:0}gRPCThreadPoolSize:${SW_RECEIVER_GRPC_THREAD_POOL_SIZE:0}gRPCSslEnabled:${SW_RECEIVER_GRPC_SSL_ENABLED:true}gRPCSslKeyPath:${SW_RECEIVER_GRPC_SSL_KEY_PATH:\u0026#34;/path/to/server.pem\u0026#34;}gRPCSslCertChainPath:${SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH:\u0026#34;/path/to/server.crt\u0026#34;}gRPCSslTrustedCAsPath:${SW_RECEIVER_GRPC_SSL_TRUSTED_CAS_PATH:\u0026#34;/path/to/ca.crt\u0026#34;}authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}You can still use this script to generate CA certificate and the key files of server-side(for OAP Server) and client-side(for Agent/Satellite). You have to notice the keys, including server and client-side, are from the same CA certificate.\n","title":"gRPC SSL transportation support for OAP server","url":"/docs/main/v9.6.0/en/setup/backend/grpc-security/"},{"content":"gRPC SSL transportation support for OAP server For OAP communication, we are currently using gRPC, a multi-platform RPC framework that uses protocol buffers for message serialization. The nice part about gRPC is that it promotes the use of SSL/TLS to authenticate and encrypt exchanges. Now OAP supports enabling SSL transportation for gRPC receivers. Since 8.8.0, OAP supports enabling mutual TLS authentication between probes and OAP servers.\nTo enable this feature, follow the steps below.\nPreparation By default, the communication between OAP nodes and the communication between receiver and probe share the same gRPC server. Its configuration is in application.yml/core/default section.\nThe advanced gRPC receiver is only for communication with the probes. This configuration is in application.yml/receiver-sharing-server/default section.\nThe first step is to generate certificates and private key files for encrypting communication.\nCreating SSL/TLS Certificates The first step is to generate certificates and key files for encrypting communication. This is fairly straightforward: use openssl from the command line.\nUse this script if you are not familiar with how to generate key files.\nWe need the following files:\n ca.crt: A certificate authority public key for a client to validate the server\u0026rsquo;s certificate. server.pem, client.pem: A private RSA key to sign and authenticate the public key. It\u0026rsquo;s either a PKCS#8(PEM) or PKCS#1(DER). server.crt, client.crt: Self-signed X.509 public keys for distribution.  TLS on OAP servers By default, the communication between OAP nodes and the communication between receiver and probe share the same gRPC server. That means once you enable SSL for receivers and probes, the OAP nodes will enable it too.\nNOTE: SkyWalking does not support enabling mTLS on OAP server nodes communication. That means you have to enable receiver-sharing-server for enabling mTLS on communication between probes and OAP servers. More details see Enable mTLS mode on gRPC receiver.\nYou can enable gRPC SSL by adding the following lines to application.yml/core/default.\ngRPCSslEnabled:truegRPCSslKeyPath:/path/to/server.pemgRPCSslCertChainPath:/path/to/server.crtgRPCSslTrustedCAPath:/path/to/ca.crtgRPCSslKeyPath and gRPCSslCertChainPath are loaded by the OAP server to encrypt communication. gRPCSslTrustedCAPath helps the gRPC client to verify server certificates in cluster mode.\n There is a gRPC client and server in every OAP server node. The gRPC client communicates with OAP servers in cluster mode. They are sharing the core module configuration.\n When new files are in place, they can be loaded dynamically, and you won\u0026rsquo;t have to restart an OAP instance.\nEnable TLS on independent gRPC receiver If you enable receiver-sharing-server to ingest data from an external source, add the following lines to application.yml/receiver-sharing-server/default:\ngRPCPort:${SW_RECEIVER_GRPC_PORT:\u0026#34;changeMe\u0026#34;}gRPCSslEnabled:truegRPCSslKeyPath:/path/to/server.pemgRPCSslCertChainPath:/path/to/server.crtSince receiver-sharing-server only receives data from an external source, it doesn\u0026rsquo;t need a CA at all. But you have to configure the CA for the clients, such as Java agent, Satellite. If you port to the Java agent, refer to the Java agent repo to configure the Java agent and enable TLS.\nNOTE: change the SW_RECEIVER_GRPC_PORT as non-zero to enable receiver-sharing-server. And the port is open for the clients.\nEnable mTLS mode on gRPC receiver Since 8.8.0, SkyWalking has supported mutual TLS authentication for transporting between clients and OAP servers. Enable mTLS mode for the gRPC channel requires Sharing gRPC Server enabled, as the following configuration.\nreceiver-sharing-server:selector:${SW_RECEIVER_SHARING_SERVER:default}default:# For gRPC servergRPCHost:${SW_RECEIVER_GRPC_HOST:0.0.0.0}gRPCPort:${SW_RECEIVER_GRPC_PORT:\u0026#34;changeMe\u0026#34;}maxConcurrentCallsPerConnection:${SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL:0}maxMessageSize:${SW_RECEIVER_GRPC_MAX_MESSAGE_SIZE:0}gRPCThreadPoolQueueSize:${SW_RECEIVER_GRPC_POOL_QUEUE_SIZE:0}gRPCThreadPoolSize:${SW_RECEIVER_GRPC_THREAD_POOL_SIZE:0}gRPCSslEnabled:${SW_RECEIVER_GRPC_SSL_ENABLED:true}gRPCSslKeyPath:${SW_RECEIVER_GRPC_SSL_KEY_PATH:\u0026#34;/path/to/server.pem\u0026#34;}gRPCSslCertChainPath:${SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH:\u0026#34;/path/to/server.crt\u0026#34;}gRPCSslTrustedCAsPath:${SW_RECEIVER_GRPC_SSL_TRUSTED_CAS_PATH:\u0026#34;/path/to/ca.crt\u0026#34;}authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}You can still use this script to generate CA certificate and the key files of server-side(for OAP Server) and client-side(for Agent/Satellite). You have to notice the keys, including server and client-side, are from the same CA certificate.\n","title":"gRPC SSL transportation support for OAP server","url":"/docs/main/v9.7.0/en/setup/backend/grpc-security/"},{"content":"Guide  This section explains how to manage translations for internationalization of menu items.\n SkyWalking UI\u0026rsquo;s internationalization translations are in the src/locales/lang. The translations include menu name and description. The translation key of menu name is the value of i18nKey from menu definition file. The translation key of description consists of the i18nKey value and _desc suffix. The description contents will be displayed on the Marketplace page.\nThe following is a typical menu name and description for i18nKey=general_service\n{ \u0026#34;general_service\u0026#34;: \u0026#34;General Service\u0026#34;, \u0026#34;general_service_desc\u0026#34;: \u0026#34;Observe services and relative direct dependencies through telemetry data collected from SkyWalking Agents.\u0026#34; } ","title":"Guide","url":"/docs/main/latest/en/guides/i18n/"},{"content":"Guide  This section explains how to manage translations for internationalization of menu items.\n SkyWalking UI\u0026rsquo;s internationalization translations are in the src/locales/lang. The translations include menu name and description. The translation key of menu name is the value of i18nKey from menu definition file. The translation key of description consists of the i18nKey value and _desc suffix. The description contents will be displayed on the Marketplace page.\nThe following is a typical menu name and description for i18nKey=general_service\n{ \u0026#34;general_service\u0026#34;: \u0026#34;General Service\u0026#34;, \u0026#34;general_service_desc\u0026#34;: \u0026#34;Observe services and relative direct dependencies through telemetry data collected from SkyWalking Agents.\u0026#34; } ","title":"Guide","url":"/docs/main/next/en/guides/i18n/"},{"content":"Guide  This section explains how to manage translations for internationalization of menu items.\n SkyWalking UI\u0026rsquo;s internationalization translations are in the src/locales/lang. The translations include menu name and description. The translation key of menu name is the value of i18nKey from menu definition file. The translation key of description consists of the i18nKey value and _desc suffix. The description contents will be displayed on the Marketplace page.\nThe following is a typical menu name and description for i18nKey=general_service\n{ \u0026#34;general_service\u0026#34;: \u0026#34;General Service\u0026#34;, \u0026#34;general_service_desc\u0026#34;: \u0026#34;Observe services and relative direct dependencies through telemetry data collected from SkyWalking Agents.\u0026#34; } ","title":"Guide","url":"/docs/main/v9.6.0/en/guides/i18n/"},{"content":"Guide  This section explains how to manage translations for internationalization of menu items.\n SkyWalking UI\u0026rsquo;s internationalization translations are in the src/locales/lang. The translations include menu name and description. The translation key of menu name is the value of i18nKey from menu definition file. The translation key of description consists of the i18nKey value and _desc suffix. The description contents will be displayed on the Marketplace page.\nThe following is a typical menu name and description for i18nKey=general_service\n{ \u0026#34;general_service\u0026#34;: \u0026#34;General Service\u0026#34;, \u0026#34;general_service_desc\u0026#34;: \u0026#34;Observe services and relative direct dependencies through telemetry data collected from SkyWalking Agents.\u0026#34; } ","title":"Guide","url":"/docs/main/v9.7.0/en/guides/i18n/"},{"content":"Guides There are many ways you can connect and contribute to the SkyWalking community.\n Submit an issue for an addressed issue or feature implementation plan. Submit a discussion to ask questions, feature proposal and uncertain bug discussion. Mail list: dev@skywalking.apache.org. Mail to dev-subscribe@skywalking.apache.org. Follow the instructions in the reply to subscribe to the mail list. Send Request to join SkyWalking slack mail to the mail list(dev@skywalking.apache.org), we will invite you in. For Chinese speaker, send [CN] Request to join SkyWalking slack mail to the mail list(dev@skywalking.apache.org), we will invite you in.  ","title":"Guides","url":"/docs/main/latest/en/guides/community/"},{"content":"Guides There are many ways you can connect and contribute to the SkyWalking community.\n Submit an issue for an addressed issue or feature implementation plan. Submit a discussion to ask questions, feature proposal and uncertain bug discussion. Mail list: dev@skywalking.apache.org. Mail to dev-subscribe@skywalking.apache.org. Follow the instructions in the reply to subscribe to the mail list. Send Request to join SkyWalking slack mail to the mail list(dev@skywalking.apache.org), we will invite you in. For Chinese speaker, send [CN] Request to join SkyWalking slack mail to the mail list(dev@skywalking.apache.org), we will invite you in.  ","title":"Guides","url":"/docs/main/next/en/guides/community/"},{"content":"Guides There are many ways you can contribute to the SkyWalking community.\n Go through our documents, and point out or fix a problem. Translate the documents into other languages. Download our releases, try to monitor your applications, and provide feedback to us. Read our source codes. For details, reach out to us. If you find any bugs, submit an issue. You can also try to fix it. Find good first issue issues. This is a good place for you to start. Submit an issue or start a discussion at GitHub issue. See all mail list discussions at website list review. If you are already a SkyWalking committer, you can log in and use the mail list in the browser mode. Otherwise, subscribe following the step below. Issue reports and discussions may also take place via dev@skywalking.apache.org. Mail to dev-subscribe@skywalking.apache.org, and follow the instructions in the reply to subscribe to the mail list.  Contact Us All the following channels are open to the community.\n Submit an issue for an issue or feature proposal. Mail list: dev@skywalking.apache.org. Mail to dev-subscribe@skywalking.apache.org. Follow the instructions in the reply to subscribe to the mail list. Submit a discussion to ask questions.  Become an official Apache SkyWalking Committer The PMC assesses the contributions of every contributor, including their code contributions. It also promotes, votes on, and invites new committers and PMC members according to the Apache guides. See Become official Apache SkyWalking Committer for more details.\nFor code developer For developers, the starting point is the Compiling Guide. It guides developers on how to build the project in local and set up the environment.\nIntegration Tests After setting up the environment and writing your codes, to facilitate integration with the SkyWalking project, you\u0026rsquo;ll need to run tests locally to verify that your codes would not break any existing features, as well as write some unit test (UT) codes to verify that the new codes would work well. This will prevent them from being broken by future contributors. If the new codes involve other components or libraries, you should also write integration tests (IT).\nSkyWalking leverages the plugin maven-surefire-plugin to run the UTs and uses maven-failsafe-plugin to run the ITs. maven-surefire-plugin excludes ITs (whose class name starts with IT) and leaves them for maven-failsafe-plugin to run, which is bound to the verify goal. Therefore, to run the UTs, try ./mvnw clean test, which only runs the UTs but not the ITs.\nIf you would like to run the ITs, please set the property skipITs to false as well as the profiles of the modules whose ITs you want to run. E.g. if you would like to run the ITs in oap-server, try ./mvnw -Pbackend clean verify -DskipITs=false, and if you would like to run all the ITs, simply run ./mvnw clean verify -DskipITs=false.\nPlease be advised that if you\u0026rsquo;re writing integration tests, name it with the pattern IT* so they would only run when property skipITs is set to false.\nJava Microbenchmark Harness (JMH) JMH is a Java harness for building, running, and analysing nano/micro/milli/macro benchmarks written in Java and other languages targeting the JVM.\nWe have a module called microbench which performs a series of micro-benchmark tests for JMH testing. Make new JMH tests extend the org.apache.skywalking.oap.server.microbench.base.AbstractMicrobenchmark to customize runtime conditions (Measurement, Fork, Warmup, etc.).\nJMH tests could run as a normal unit test. And they could run as an independent uber jar via java -jar benchmark.jar for all benchmarks, or via java -jar /benchmarks.jar exampleClassName for a specific test.\nOutput test results in JSON format, you can add -rf json like java -jar benchmarks.jar -rf json, if you run through the IDE, you can configure the -DperfReportDir=savePath parameter to set the JMH report result save path, a report results in JSON format will be generated when the run ends.\nMore information about JMH can be found here: jmh docs.\nEnd to End Tests (E2E) Since version 6.3.0, we have introduced more automatic tests to perform software quality assurance. E2E is an integral part of it.\n End-to-end testing is a methodology used to test whether the flow of an application is performing as designed from start to finish. The purpose of carrying out end-to-end tests is to identify system dependencies and to ensure that the right information is passed between various system components and systems.\n The E2E test involves some/all of the OAP server, storage, coordinator, webapp, and the instrumented services, all of which are orchestrated by docker-compose or KinD. Since version 8.9.0, we immigrate to e2e-v2 which leverage skywalking-infra-e2e and skywalking-cli to do the whole e2e process. skywalking-infra-e2e is used to control the e2e process and skywalking-cli is used to interact with the OAP such as request and get response metris from OAP.\nWriting E2E Cases  Set up the environment   Set up skywalking-infra-e2e Set up skywalking-cli, yq (generally these 2 are enough) and others tools if your cases need. Can reference the script under skywalking/test/e2e-v2/script/prepare/setup-e2e-shell.   Orchestrate the components  The goal of the E2E tests is to test the SkyWalking project as a whole, including the OAP server, storage, coordinator, webapp, and even the frontend UI (not for now), on the single node mode as well as the cluster mode. Therefore, the first step is to determine what case we are going to verify, and orchestrate the components.\nTo make the orchestration process easier, we\u0026rsquo;re using a docker-compose that provides a simple file format (docker-compose.yml) for orchestrating the required containers, and offers an opportunity to define the dependencies of the components.\nFollow these steps:\n Decide what (and how many) containers will be needed. For example, for cluster testing, you\u0026rsquo;ll need \u0026gt; 2 OAP nodes, coordinators (e.g. zookeeper), storage (e.g. ElasticSearch), and instrumented services; Define the containers in docker-compose.yml, and carefully specify the dependencies, starting orders, and most importantly, link them together, e.g. set the correct OAP address on the agent end, and set the correct coordinator address in OAP, etc. Define the e2e case config in e2e.yaml. Write the expected data(yml) for verify.   Run e2e test  All e2e cases should under skywalking/test/e2e-v2/cases. You could execute e2e run command in skywalking/ e.g.\ne2e run -c test/e2e-v2/cases/alarm/h2/e2e.yaml  Troubleshooting  We expose all logs from all containers to the stdout in the non-CI (local) mode, but save and upload them to the GitHub server. You can download them (only when the tests have failed) at \u0026ldquo;Artifacts/Download artifacts/logs\u0026rdquo; (see top right) for debugging.\nNOTE: Please verify the newly-added E2E test case locally first. However, if you find that it has passed locally but failed in the PR check status, make sure that all the updated/newly-added files (especially those in the submodules) are committed and included in the PR, or reset the git HEAD to the remote and verify locally again.\nProject Extensions The SkyWalking project supports various extensions of existing features. If you are interesting in writing extensions, read the following guides.\nThis guides you in developing SkyWalking agent plugins to support more frameworks. Developers for both open source and private plugins should read this.\n If you would like to build a new probe or plugin in any language, please read the Component library definition and extension document. Storage extension development guide. Potential contributors can learn how to build a new storage implementor in addition to the official one. Customize analysis using OAL scripts. OAL scripts are located in config/oal/*.oal. You could modify them and reboot the OAP server. Read Observability Analysis Language Introduction to learn more about OAL scripts. Source and scope extension for new metrics. For analysis of a new metric which SkyWalking hasn\u0026rsquo;t yet provided, add a new receiver. You would most likely have to add a new source and scope. To learn how to do this, read the document.  OAP backend dependency management  This section is only applicable to dependencies of the backend module.\n As one of the Top Level Projects of The Apache Software Foundation (ASF), SkyWalking must follow the ASF 3RD PARTY LICENSE POLICY. So if you\u0026rsquo;re adding new dependencies to the project, you should make sure that the new dependencies would not break the policy, and add their LICENSE and NOTICE to the project.\nWe have a simple script to help you make sure that you haven\u0026rsquo;t missed out any new dependencies:\n Build a distribution package and unzip/untar it to folder dist. Run the script in the root directory. It will print out all new dependencies. Check the LICENSE and NOTICE of those dependencies to make sure that they can be included in an ASF project. Add them to the apm-dist/release-docs/{LICENSE,NOTICE} file. Add the names of these dependencies to the tools/dependencies/known-oap-backend-dependencies.txt file (in alphabetical order). check-LICENSE.sh should pass in the next run.  Profile The performance profile is an enhancement feature in the APM system. We use thread dump to estimate the method execution time, rather than adding multiple local spans. In this way, the cost would be significantly reduced compared to using distributed tracing to locate the slow method. This feature is suitable in the production environment. The following documents are key to understanding the essential parts of this feature.\n Profile data report protocol is provided through gRPC, just like other traces and JVM data. Thread dump merging mechanism introduces the merging mechanism. This mechanism helps end users understand profile reports. Exporter tool of profile raw data guides you on how to package the original profile data for issue reports when the visualization doesn\u0026rsquo;t work well on the official UI.  Release If you\u0026rsquo;re a committer, read the Apache Release Guide to learn about how to create an official Apache version release in accordance with avoid Apache\u0026rsquo;s rules. As long as you keep our LICENSE and NOTICE, the Apache license allows everyone to redistribute.\n","title":"Guides","url":"/docs/main/v9.0.0/en/guides/readme/"},{"content":"Guides There are many ways you can contribute to the SkyWalking community.\n Go through our documents, and point out or fix a problem. Translate the documents into other languages. Download our releases, try to monitor your applications, and provide feedback to us. Read our source codes. For details, reach out to us. If you find any bugs, submit an issue. You can also try to fix it. Find good first issue issues. This is a good place for you to start. Submit an issue or start a discussion at GitHub issue. See all mail list discussions at website list review. If you are already a SkyWalking committer, you can log in and use the mail list in the browser mode. Otherwise, subscribe following the step below. Issue reports and discussions may also take place via dev@skywalking.apache.org. Mail to dev-subscribe@skywalking.apache.org, and follow the instructions in the reply to subscribe to the mail list.  Contact Us All the following channels are open to the community.\n Submit an issue for an issue or feature proposal. Mail list: dev@skywalking.apache.org. Mail to dev-subscribe@skywalking.apache.org. Follow the instructions in the reply to subscribe to the mail list. Submit a discussion to ask questions.  Become an official Apache SkyWalking Committer The PMC assesses the contributions of every contributor, including their code contributions. It also promotes, votes on, and invites new committers and PMC members according to the Apache guides. See Become official Apache SkyWalking Committer for more details.\nFor code developer For developers, the starting point is the Compiling Guide. It guides developers on how to build the project in local and set up the environment.\nIntegration Tests After setting up the environment and writing your codes, to facilitate integration with the SkyWalking project, you\u0026rsquo;ll need to run tests locally to verify that your codes would not break any existing features, as well as write some unit test (UT) codes to verify that the new codes would work well. This will prevent them from being broken by future contributors. If the new codes involve other components or libraries, you should also write integration tests (IT).\nSkyWalking leverages the plugin maven-surefire-plugin to run the UTs and uses maven-failsafe-plugin to run the ITs. maven-surefire-plugin excludes ITs (whose class name starts with IT) and leaves them for maven-failsafe-plugin to run, which is bound to the verify goal. Therefore, to run the UTs, try ./mvnw clean test, which only runs the UTs but not the ITs.\nIf you would like to run the ITs, please set the property skipITs to false as well as the profiles of the modules whose ITs you want to run. E.g. if you would like to run the ITs in oap-server, try ./mvnw -Pbackend clean verify -DskipITs=false, and if you would like to run all the ITs, simply run ./mvnw clean verify -DskipITs=false.\nPlease be advised that if you\u0026rsquo;re writing integration tests, name it with the pattern IT* so they would only run when property skipITs is set to false.\nJava Microbenchmark Harness (JMH) JMH is a Java harness for building, running, and analysing nano/micro/milli/macro benchmarks written in Java and other languages targeting the JVM.\nWe have a module called microbench which performs a series of micro-benchmark tests for JMH testing. Make new JMH tests extend the org.apache.skywalking.oap.server.microbench.base.AbstractMicrobenchmark to customize runtime conditions (Measurement, Fork, Warmup, etc.).\nJMH tests could run as a normal unit test. And they could run as an independent uber jar via java -jar benchmark.jar for all benchmarks, or via java -jar /benchmarks.jar exampleClassName for a specific test.\nOutput test results in JSON format, you can add -rf json like java -jar benchmarks.jar -rf json, if you run through the IDE, you can configure the -DperfReportDir=savePath parameter to set the JMH report result save path, a report results in JSON format will be generated when the run ends.\nMore information about JMH can be found here: jmh docs.\nEnd to End Tests (E2E) Since version 6.3.0, we have introduced more automatic tests to perform software quality assurance. E2E is an integral part of it.\n End-to-end testing is a methodology used to test whether the flow of an application is performing as designed from start to finish. The purpose of carrying out end-to-end tests is to identify system dependencies and to ensure that the right information is passed between various system components and systems.\n The E2E test involves some/all of the OAP server, storage, coordinator, webapp, and the instrumented services, all of which are orchestrated by docker-compose or KinD. Since version 8.9.0, we immigrate to e2e-v2 which leverage skywalking-infra-e2e and skywalking-cli to do the whole e2e process. skywalking-infra-e2e is used to control the e2e process and skywalking-cli is used to interact with the OAP such as request and get response metris from OAP.\nWriting E2E Cases  Set up the environment   Set up skywalking-infra-e2e Set up skywalking-cli, yq (generally these 2 are enough) and others tools if your cases need. Can reference the script under skywalking/test/e2e-v2/script/prepare/setup-e2e-shell.   Orchestrate the components  The goal of the E2E tests is to test the SkyWalking project as a whole, including the OAP server, storage, coordinator, webapp, and even the frontend UI (not for now), on the single node mode as well as the cluster mode. Therefore, the first step is to determine what case we are going to verify, and orchestrate the components.\nTo make the orchestration process easier, we\u0026rsquo;re using a docker-compose that provides a simple file format (docker-compose.yml) for orchestrating the required containers, and offers an opportunity to define the dependencies of the components.\nFollow these steps:\n Decide what (and how many) containers will be needed. For example, for cluster testing, you\u0026rsquo;ll need \u0026gt; 2 OAP nodes, coordinators (e.g. zookeeper), storage (e.g. ElasticSearch), and instrumented services; Define the containers in docker-compose.yml, and carefully specify the dependencies, starting orders, and most importantly, link them together, e.g. set the correct OAP address on the agent end, and set the correct coordinator address in OAP, etc. Define the e2e case config in e2e.yaml. Write the expected data(yml) for verify.   Run e2e test  All e2e cases should under skywalking/test/e2e-v2/cases. You could execute e2e run command in skywalking/ e.g.\ne2e run -c test/e2e-v2/cases/alarm/h2/e2e.yaml  Troubleshooting  We expose all logs from all containers to the stdout in the non-CI (local) mode, but save and upload them to the GitHub server. You can download them (only when the tests have failed) at \u0026ldquo;Artifacts/Download artifacts/logs\u0026rdquo; (see top right) for debugging.\nNOTE: Please verify the newly-added E2E test case locally first. However, if you find that it has passed locally but failed in the PR check status, make sure that all the updated/newly-added files (especially those in the submodules) are committed and included in the PR, or reset the git HEAD to the remote and verify locally again.\nProject Extensions The SkyWalking project supports various extensions of existing features. If you are interesting in writing extensions, read the following guides.\nThis guides you in developing SkyWalking agent plugins to support more frameworks. Developers for both open source and private plugins should read this.\n If you would like to build a new probe or plugin in any language, please read the Component library definition and extension document. Storage extension development guide. Potential contributors can learn how to build a new storage implementor in addition to the official one. Customize analysis using OAL scripts. OAL scripts are located in config/oal/*.oal. You could modify them and reboot the OAP server. Read Observability Analysis Language Introduction to learn more about OAL scripts. Source and scope extension for new metrics. For analysis of a new metric which SkyWalking hasn\u0026rsquo;t yet provided, add a new receiver. You would most likely have to add a new source and scope. To learn how to do this, read the document.  OAP backend dependency management  This section is only applicable to dependencies of the backend module.\n As one of the Top Level Projects of The Apache Software Foundation (ASF), SkyWalking must follow the ASF 3RD PARTY LICENSE POLICY. So if you\u0026rsquo;re adding new dependencies to the project, you should make sure that the new dependencies would not break the policy, and add their LICENSE and NOTICE to the project.\nWe have a simple script to help you make sure that you haven\u0026rsquo;t missed out any new dependencies:\n Build a distribution package and unzip/untar it to folder dist. Run the script in the root directory. It will print out all new dependencies. Check the LICENSE and NOTICE of those dependencies to make sure that they can be included in an ASF project. Add them to the apm-dist/release-docs/{LICENSE,NOTICE} file. Add the names of these dependencies to the tools/dependencies/known-oap-backend-dependencies.txt file (in alphabetical order). check-LICENSE.sh should pass in the next run.  Profile The performance profile is an enhancement feature in the APM system. We use thread dump to estimate the method execution time, rather than adding multiple local spans. In this way, the cost would be significantly reduced compared to using distributed tracing to locate the slow method. This feature is suitable in the production environment. The following documents are key to understanding the essential parts of this feature.\n Profile data report protocol is provided through gRPC, just like other traces and JVM data. Thread dump merging mechanism introduces the merging mechanism. This mechanism helps end users understand profile reports. Exporter tool of profile raw data guides you on how to package the original profile data for issue reports when the visualization doesn\u0026rsquo;t work well on the official UI.  Release If you\u0026rsquo;re a committer, read the Apache Release Guide to learn about how to create an official Apache version release in accordance with avoid Apache\u0026rsquo;s rules. As long as you keep our LICENSE and NOTICE, the Apache license allows everyone to redistribute.\n","title":"Guides","url":"/docs/main/v9.1.0/en/guides/readme/"},{"content":"Guides There are many ways you can contribute to the SkyWalking community.\n Go through our documents, and point out or fix a problem. Translate the documents into other languages. Download our releases, try to monitor your applications, and provide feedback to us. Read our source codes. For details, reach out to us. If you find any bugs, submit an issue. You can also try to fix it. Find good first issue issues. This is a good place for you to start. Submit an issue or start a discussion at GitHub issue. See all mail list discussions at website list review. If you are already a SkyWalking committer, you can log in and use the mail list in the browser mode. Otherwise, subscribe following the step below. Issue reports and discussions may also take place via dev@skywalking.apache.org. Mail to dev-subscribe@skywalking.apache.org, and follow the instructions in the reply to subscribe to the mail list.  Contact Us All the following channels are open to the community.\n Submit an issue for an issue or feature proposal. Mail list: dev@skywalking.apache.org. Mail to dev-subscribe@skywalking.apache.org. Follow the instructions in the reply to subscribe to the mail list. Submit a discussion to ask questions.  Become an official Apache SkyWalking Committer The PMC assesses the contributions of every contributor, including their code contributions. It also promotes, votes on, and invites new committers and PMC members according to the Apache guides. See Become official Apache SkyWalking Committer for more details.\nFor code developer For developers, the starting point is the Compiling Guide. It guides developers on how to build the project in local and set up the environment.\nIntegration Tests After setting up the environment and writing your codes, to facilitate integration with the SkyWalking project, you\u0026rsquo;ll need to run tests locally to verify that your codes would not break any existing features, as well as write some unit test (UT) codes to verify that the new codes would work well. This will prevent them from being broken by future contributors. If the new codes involve other components or libraries, you should also write integration tests (IT).\nSkyWalking leverages the plugin maven-surefire-plugin to run the UTs and uses maven-failsafe-plugin to run the ITs. maven-surefire-plugin excludes ITs (whose class name starts with IT) and leaves them for maven-failsafe-plugin to run, which is bound to the verify goal. Therefore, to run the UTs, try ./mvnw clean test, which only runs the UTs but not the ITs.\nIf you would like to run the ITs, please set the property skipITs to false as well as the profiles of the modules whose ITs you want to run. E.g. if you would like to run the ITs in oap-server, try ./mvnw -Pbackend clean verify -DskipITs=false, and if you would like to run all the ITs, simply run ./mvnw clean verify -DskipITs=false.\nPlease be advised that if you\u0026rsquo;re writing integration tests, name it with the pattern IT* so they would only run when property skipITs is set to false.\nJava Microbenchmark Harness (JMH) JMH is a Java harness for building, running, and analysing nano/micro/milli/macro benchmarks written in Java and other languages targeting the JVM.\nWe have a module called microbench which performs a series of micro-benchmark tests for JMH testing. Make new JMH tests extend the org.apache.skywalking.oap.server.microbench.base.AbstractMicrobenchmark to customize runtime conditions (Measurement, Fork, Warmup, etc.).\nJMH tests could run as a normal unit test. And they could run as an independent uber jar via java -jar benchmark.jar for all benchmarks, or via java -jar /benchmarks.jar exampleClassName for a specific test.\nOutput test results in JSON format, you can add -rf json like java -jar benchmarks.jar -rf json, if you run through the IDE, you can configure the -DperfReportDir=savePath parameter to set the JMH report result save path, a report results in JSON format will be generated when the run ends.\nMore information about JMH can be found here: jmh docs.\nEnd to End Tests (E2E) Since version 6.3.0, we have introduced more automatic tests to perform software quality assurance. E2E is an integral part of it.\n End-to-end testing is a methodology used to test whether the flow of an application is performing as designed from start to finish. The purpose of carrying out end-to-end tests is to identify system dependencies and to ensure that the right information is passed between various system components and systems.\n The E2E test involves some/all of the OAP server, storage, coordinator, webapp, and the instrumented services, all of which are orchestrated by docker-compose or KinD. Since version 8.9.0, we immigrate to e2e-v2 which leverage skywalking-infra-e2e and skywalking-cli to do the whole e2e process. skywalking-infra-e2e is used to control the e2e process and skywalking-cli is used to interact with the OAP such as request and get response metrics from OAP.\nWriting E2E Cases  Set up the environment   Set up skywalking-infra-e2e Set up skywalking-cli, yq (generally these 2 are enough) and others tools if your cases need. Can reference the script under skywalking/test/e2e-v2/script/prepare/setup-e2e-shell.   Orchestrate the components  The goal of the E2E tests is to test the SkyWalking project as a whole, including the OAP server, storage, coordinator, webapp, and even the frontend UI (not for now), on the single node mode as well as the cluster mode. Therefore, the first step is to determine what case we are going to verify, and orchestrate the components.\nTo make the orchestration process easier, we\u0026rsquo;re using a docker-compose that provides a simple file format (docker-compose.yml) for orchestrating the required containers, and offers an opportunity to define the dependencies of the components.\nFollow these steps:\n Decide what (and how many) containers will be needed. For example, for cluster testing, you\u0026rsquo;ll need \u0026gt; 2 OAP nodes, coordinators (e.g. zookeeper), storage (e.g. ElasticSearch), and instrumented services; Define the containers in docker-compose.yml, and carefully specify the dependencies, starting orders, and most importantly, link them together, e.g. set the correct OAP address on the agent end, and set the correct coordinator address in OAP, etc. Define the e2e case config in e2e.yaml. Write the expected data(yml) for verify.   Run e2e test  All e2e cases should under skywalking/test/e2e-v2/cases. You could execute e2e run command in skywalking/ e.g.\ne2e run -c test/e2e-v2/cases/alarm/h2/e2e.yaml  Troubleshooting  We expose all logs from all containers to the stdout in the non-CI (local) mode, but save and upload them to the GitHub server. You can download them (only when the tests have failed) at \u0026ldquo;Artifacts/Download artifacts/logs\u0026rdquo; (see top right) for debugging.\nNOTE: Please verify the newly-added E2E test case locally first. However, if you find that it has passed locally but failed in the PR check status, make sure that all the updated/newly-added files (especially those in the submodules) are committed and included in the PR, or reset the git HEAD to the remote and verify locally again.\nProject Extensions The SkyWalking project supports various extensions of existing features. If you are interesting in writing extensions, read the following guides.\nThis guides you in developing SkyWalking agent plugins to support more frameworks. Developers for both open source and private plugins should read this.\n If you would like to build a new probe or plugin in any language, please read the Component library definition and extension document. Storage extension development guide. Potential contributors can learn how to build a new storage implementor in addition to the official one. Customize analysis using OAL scripts. OAL scripts are located in config/oal/*.oal. You could modify them and reboot the OAP server. Read Observability Analysis Language Introduction to learn more about OAL scripts. Source and scope extension for new metrics. For analysis of a new metric which SkyWalking hasn\u0026rsquo;t yet provided, add a new receiver. You would most likely have to add a new source and scope. To learn how to do this, read the document.  OAP backend dependency management  This section is only applicable to dependencies of the backend module.\n As one of the Top Level Projects of The Apache Software Foundation (ASF), SkyWalking must follow the ASF 3RD PARTY LICENSE POLICY. So if you\u0026rsquo;re adding new dependencies to the project, you should make sure that the new dependencies would not break the policy, and add their LICENSE and NOTICE to the project.\nWe use license-eye to help you make sure that you haven\u0026rsquo;t missed out any new dependencies:\n Install license-eye according to the doc. Run license-eye dependency resolve --summary ./dist-material/release-docs/LICENSE.tpl in the root directory of this project. Check the modified lines in ./dist-material/release-docs/LICENSE (via command git diff -U0 ./dist-material/release-docs/LICENSE) and check whether the new dependencies' licenses are compatible with Apache 2.0. Add the new dependencies' notice files (if any) to ./dist-material/release-docs/NOTICE if they are Apache 2.0 license. Copy their license files to ./dist-material/release-docs/licenses if they are not standard Apache 2.0 license. Copy the new dependencies' license file to ./dist-material/release-docs/licenses if they are not standard Apache 2.0 license.  Profile The performance profile is an enhancement feature in the APM system. We use thread dump to estimate the method execution time, rather than adding multiple local spans. In this way, the cost would be significantly reduced compared to using distributed tracing to locate the slow method. This feature is suitable in the production environment. The following documents are key to understanding the essential parts of this feature.\n Profile data report protocol is provided through gRPC, just like other traces and JVM data. Thread dump merging mechanism introduces the merging mechanism. This mechanism helps end users understand profile reports. Exporter tool of profile raw data guides you on how to package the original profile data for issue reports when the visualization doesn\u0026rsquo;t work well on the official UI.  Release If you\u0026rsquo;re a committer, read the Apache Release Guide to learn about how to create an official Apache version release in accordance with avoid Apache\u0026rsquo;s rules. As long as you keep our LICENSE and NOTICE, the Apache license allows everyone to redistribute.\n","title":"Guides","url":"/docs/main/v9.2.0/en/guides/readme/"},{"content":"Guides There are many ways you can contribute to the SkyWalking community.\n Go through our documents, and point out or fix a problem. Translate the documents into other languages. Download our releases, try to monitor your applications, and provide feedback to us. Read our source codes. For details, reach out to us. If you find any bugs, submit an issue. You can also try to fix it. Find good first issue issues. This is a good place for you to start. Submit an issue or start a discussion at GitHub issue. See all mail list discussions at website list review. If you are already a SkyWalking committer, you can log in and use the mail list in the browser mode. Otherwise, subscribe following the step below. Issue reports and discussions may also take place via dev@skywalking.apache.org. Mail to dev-subscribe@skywalking.apache.org, and follow the instructions in the reply to subscribe to the mail list.  Contact Us All the following channels are open to the community.\n Submit an issue for an issue or feature proposal. Mail list: dev@skywalking.apache.org. Mail to dev-subscribe@skywalking.apache.org. Follow the instructions in the reply to subscribe to the mail list. Submit a discussion to ask questions.  Become an official Apache SkyWalking Committer The PMC assesses the contributions of every contributor, including their code contributions. It also promotes, votes on, and invites new committers and PMC members according to the Apache guides. See Become official Apache SkyWalking Committer for more details.\nFor code developer For developers, the starting point is the Compiling Guide. It guides developers on how to build the project in local and set up the environment.\nIntegration Tests After setting up the environment and writing your codes, to facilitate integration with the SkyWalking project, you\u0026rsquo;ll need to run tests locally to verify that your codes would not break any existing features, as well as write some unit test (UT) codes to verify that the new codes would work well. This will prevent them from being broken by future contributors. If the new codes involve other components or libraries, you should also write integration tests (IT).\nSkyWalking leverages the plugin maven-surefire-plugin to run the UTs and uses maven-failsafe-plugin to run the ITs. maven-surefire-plugin excludes ITs (whose class name starts with IT) and leaves them for maven-failsafe-plugin to run, which is bound to the verify goal. Therefore, to run the UTs, try ./mvnw clean test, which only runs the UTs but not the ITs.\nIf you would like to run the ITs, please set the property skipITs to false as well as the profiles of the modules whose ITs you want to run. E.g. if you would like to run the ITs in oap-server, try ./mvnw -Pbackend clean verify -DskipITs=false, and if you would like to run all the ITs, simply run ./mvnw clean verify -DskipITs=false.\nPlease be advised that if you\u0026rsquo;re writing integration tests, name it with the pattern IT* so they would only run when property skipITs is set to false.\nJava Microbenchmark Harness (JMH) JMH is a Java harness for building, running, and analysing nano/micro/milli/macro benchmarks written in Java and other languages targeting the JVM.\nWe have a module called microbench which performs a series of micro-benchmark tests for JMH testing. Make new JMH tests extend the org.apache.skywalking.oap.server.microbench.base.AbstractMicrobenchmark to customize runtime conditions (Measurement, Fork, Warmup, etc.).\nJMH tests could run as a normal unit test. And they could run as an independent uber jar via java -jar benchmark.jar for all benchmarks, or via java -jar /benchmarks.jar exampleClassName for a specific test.\nOutput test results in JSON format, you can add -rf json like java -jar benchmarks.jar -rf json, if you run through the IDE, you can configure the -DperfReportDir=savePath parameter to set the JMH report result save path, a report results in JSON format will be generated when the run ends.\nMore information about JMH can be found here: jmh docs.\nEnd to End Tests (E2E) Since version 6.3.0, we have introduced more automatic tests to perform software quality assurance. E2E is an integral part of it.\n End-to-end testing is a methodology used to test whether the flow of an application is performing as designed from start to finish. The purpose of carrying out end-to-end tests is to identify system dependencies and to ensure that the right information is passed between various system components and systems.\n The E2E test involves some/all of the OAP server, storage, coordinator, webapp, and the instrumented services, all of which are orchestrated by docker-compose or KinD. Since version 8.9.0, we immigrate to e2e-v2 which leverage skywalking-infra-e2e and skywalking-cli to do the whole e2e process. skywalking-infra-e2e is used to control the e2e process and skywalking-cli is used to interact with the OAP such as request and get response metrics from OAP.\nWriting E2E Cases  Set up the environment   Set up skywalking-infra-e2e Set up skywalking-cli, yq (generally these 2 are enough) and others tools if your cases need. Can reference the script under skywalking/test/e2e-v2/script/prepare/setup-e2e-shell.   Orchestrate the components  The goal of the E2E tests is to test the SkyWalking project as a whole, including the OAP server, storage, coordinator, webapp, and even the frontend UI (not for now), on the single node mode as well as the cluster mode. Therefore, the first step is to determine what case we are going to verify, and orchestrate the components.\nTo make the orchestration process easier, we\u0026rsquo;re using a docker-compose that provides a simple file format (docker-compose.yml) for orchestrating the required containers, and offers an opportunity to define the dependencies of the components.\nFollow these steps:\n Decide what (and how many) containers will be needed. For example, for cluster testing, you\u0026rsquo;ll need \u0026gt; 2 OAP nodes, coordinators (e.g. zookeeper), storage (e.g. ElasticSearch), and instrumented services; Define the containers in docker-compose.yml, and carefully specify the dependencies, starting orders, and most importantly, link them together, e.g. set the correct OAP address on the agent end, and set the correct coordinator address in OAP, etc. Define the e2e case config in e2e.yaml. Write the expected data(yml) for verify.   Run e2e test  All e2e cases should under skywalking/test/e2e-v2/cases. You could execute e2e run command in skywalking/ e.g.\ne2e run -c test/e2e-v2/cases/alarm/h2/e2e.yaml  Troubleshooting  We expose all logs from all containers to the stdout in the non-CI (local) mode, but save and upload them to the GitHub server. You can download them (only when the tests have failed) at \u0026ldquo;Artifacts/Download artifacts/logs\u0026rdquo; (see top right) for debugging.\nNOTE: Please verify the newly-added E2E test case locally first. However, if you find that it has passed locally but failed in the PR check status, make sure that all the updated/newly-added files (especially those in the submodules) are committed and included in the PR, or reset the git HEAD to the remote and verify locally again.\nProject Extensions The SkyWalking project supports various extensions of existing features. If you are interesting in writing extensions, read the following guides.\nThis guides you in developing SkyWalking agent plugins to support more frameworks. Developers for both open source and private plugins should read this.\n If you would like to build a new probe or plugin in any language, please read the Component library definition and extension document. Storage extension development guide. Potential contributors can learn how to build a new storage implementor in addition to the official one. Customize analysis using OAL scripts. OAL scripts are located in config/oal/*.oal. You could modify them and reboot the OAP server. Read Observability Analysis Language Introduction to learn more about OAL scripts. Source and scope extension for new metrics. For analysis of a new metric which SkyWalking hasn\u0026rsquo;t yet provided, add a new receiver. You would most likely have to add a new source and scope. To learn how to do this, read the document. If you would like to add a new root menu or sub-menu to booster UI, read the UI menu control document.  OAP backend dependency management  This section is only applicable to dependencies of the backend module.\n As one of the Top Level Projects of The Apache Software Foundation (ASF), SkyWalking must follow the ASF 3RD PARTY LICENSE POLICY. So if you\u0026rsquo;re adding new dependencies to the project, you should make sure that the new dependencies would not break the policy, and add their LICENSE and NOTICE to the project.\nWe use license-eye to help you make sure that you haven\u0026rsquo;t missed out any new dependencies:\n Install license-eye according to the doc. Run license-eye dependency resolve --summary ./dist-material/release-docs/LICENSE.tpl in the root directory of this project. Check the modified lines in ./dist-material/release-docs/LICENSE (via command git diff -U0 ./dist-material/release-docs/LICENSE) and check whether the new dependencies' licenses are compatible with Apache 2.0. Add the new dependencies' notice files (if any) to ./dist-material/release-docs/NOTICE if they are Apache 2.0 license. Copy their license files to ./dist-material/release-docs/licenses if they are not standard Apache 2.0 license. Copy the new dependencies' license file to ./dist-material/release-docs/licenses if they are not standard Apache 2.0 license.  Profile The performance profile is an enhancement feature in the APM system. We use thread dump to estimate the method execution time, rather than adding multiple local spans. In this way, the cost would be significantly reduced compared to using distributed tracing to locate the slow method. This feature is suitable in the production environment. The following documents are key to understanding the essential parts of this feature.\n Profile data report protocol is provided through gRPC, just like other traces and JVM data. Thread dump merging mechanism introduces the merging mechanism. This mechanism helps end users understand profile reports. Exporter tool of profile raw data guides you on how to package the original profile data for issue reports when the visualization doesn\u0026rsquo;t work well on the official UI.  Release If you\u0026rsquo;re a committer, read the Apache Release Guide to learn about how to create an official Apache version release in accordance with avoid Apache\u0026rsquo;s rules. As long as you keep our LICENSE and NOTICE, the Apache license allows everyone to redistribute.\n","title":"Guides","url":"/docs/main/v9.3.0/en/guides/readme/"},{"content":"Guides There are many ways you can contribute to the SkyWalking community.\n Go through our documents, and point out or fix a problem. Translate the documents into other languages. Download our releases, try to monitor your applications, and provide feedback to us. Read our source codes. For details, reach out to us. If you find any bugs, submit an issue. You can also try to fix it. Find good first issue issues. This is a good place for you to start. Submit an issue or start a discussion at GitHub issue. See all mail list discussions at website list review. If you are already a SkyWalking committer, you can log in and use the mail list in the browser mode. Otherwise, subscribe following the step below. Issue reports and discussions may also take place via dev@skywalking.apache.org. Mail to dev-subscribe@skywalking.apache.org, and follow the instructions in the reply to subscribe to the mail list.  Contact Us All the following channels are open to the community.\n Submit an issue for an issue or feature proposal. Mail list: dev@skywalking.apache.org. Mail to dev-subscribe@skywalking.apache.org. Follow the instructions in the reply to subscribe to the mail list. Submit a discussion to ask questions.  Become an official Apache SkyWalking Committer The PMC assesses the contributions of every contributor, including their code contributions. It also promotes, votes on, and invites new committers and PMC members according to the Apache guides. See Become official Apache SkyWalking Committer for more details.\nFor code developer For developers, the starting point is the Compiling Guide. It guides developers on how to build the project in local and set up the environment.\nIntegration Tests After setting up the environment and writing your codes, to facilitate integration with the SkyWalking project, you\u0026rsquo;ll need to run tests locally to verify that your codes would not break any existing features, as well as write some unit test (UT) codes to verify that the new codes would work well. This will prevent them from being broken by future contributors. If the new codes involve other components or libraries, you should also write integration tests (IT).\nSkyWalking leverages the plugin maven-surefire-plugin to run the UTs and uses maven-failsafe-plugin to run the ITs. maven-surefire-plugin excludes ITs (whose class name starts or ends with *IT, IT*) and leaves them for maven-failsafe-plugin to run, which is bound to the integration-test goal. Therefore, to run the UTs, try ./mvnw clean test, which only runs the UTs but not the ITs.\nIf you would like to run the ITs, please run ./mvnw integration-test as well as the profiles of the modules whose ITs you want to run. If you don\u0026rsquo;t want to run UTs, please add -DskipUTs=true. E.g. if you would like to only run the ITs in oap-server, try ./mvnw -Pbackend clean verify -DskipUTs=true, and if you would like to run all the ITs, simply run ./mvnw clean integration-test -DskipUTs=true.\nPlease be advised that if you\u0026rsquo;re writing integration tests, name it with the pattern IT* or *IT so they would only run in goal integration-test.\nJava Microbenchmark Harness (JMH) JMH is a Java harness for building, running, and analysing nano/micro/milli/macro benchmarks written in Java and other languages targeting the JVM.\nWe have a module called microbench which performs a series of micro-benchmark tests for JMH testing. Make new JMH tests extend the org.apache.skywalking.oap.server.microbench.base.AbstractMicrobenchmark to customize runtime conditions (Measurement, Fork, Warmup, etc.).\nJMH tests could run as a normal unit test. And they could run as an independent uber jar via java -jar benchmark.jar for all benchmarks, or via java -jar /benchmarks.jar exampleClassName for a specific test.\nOutput test results in JSON format, you can add -rf json like java -jar benchmarks.jar -rf json, if you run through the IDE, you can configure the -DperfReportDir=savePath parameter to set the JMH report result save path, a report results in JSON format will be generated when the run ends.\nMore information about JMH can be found here: jmh docs.\nEnd to End Tests (E2E) Since version 6.3.0, we have introduced more automatic tests to perform software quality assurance. E2E is an integral part of it.\n End-to-end testing is a methodology used to test whether the flow of an application is performing as designed from start to finish. The purpose of carrying out end-to-end tests is to identify system dependencies and to ensure that the right information is passed between various system components and systems.\n The E2E test involves some/all of the OAP server, storage, coordinator, webapp, and the instrumented services, all of which are orchestrated by docker-compose or KinD. Since version 8.9.0, we immigrate to e2e-v2 which leverage skywalking-infra-e2e and skywalking-cli to do the whole e2e process. skywalking-infra-e2e is used to control the e2e process and skywalking-cli is used to interact with the OAP such as request and get response metrics from OAP.\nWriting E2E Cases  Set up the environment   Set up skywalking-infra-e2e Set up skywalking-cli, yq (generally these 2 are enough) and others tools if your cases need. Can reference the script under skywalking/test/e2e-v2/script/prepare/setup-e2e-shell.   Orchestrate the components  The goal of the E2E tests is to test the SkyWalking project as a whole, including the OAP server, storage, coordinator, webapp, and even the frontend UI (not for now), on the single node mode as well as the cluster mode. Therefore, the first step is to determine what case we are going to verify, and orchestrate the components.\nTo make the orchestration process easier, we\u0026rsquo;re using a docker-compose that provides a simple file format (docker-compose.yml) for orchestrating the required containers, and offers an opportunity to define the dependencies of the components.\nFollow these steps:\n Decide what (and how many) containers will be needed. For example, for cluster testing, you\u0026rsquo;ll need \u0026gt; 2 OAP nodes, coordinators (e.g. zookeeper), storage (e.g. ElasticSearch), and instrumented services; Define the containers in docker-compose.yml, and carefully specify the dependencies, starting orders, and most importantly, link them together, e.g. set the correct OAP address on the agent end, and set the correct coordinator address in OAP, etc. Define the e2e case config in e2e.yaml. Write the expected data(yml) for verify.   Run e2e test  All e2e cases should under skywalking/test/e2e-v2/cases. You could execute e2e run command in skywalking/ e.g.\ne2e run -c test/e2e-v2/cases/alarm/h2/e2e.yaml  Troubleshooting  We expose all logs from all containers to the stdout in the non-CI (local) mode, but save and upload them to the GitHub server. You can download them (only when the tests have failed) at \u0026ldquo;Artifacts/Download artifacts/logs\u0026rdquo; (see top right) for debugging.\nNOTE: Please verify the newly-added E2E test case locally first. However, if you find that it has passed locally but failed in the PR check status, make sure that all the updated/newly-added files (especially those in the submodules) are committed and included in the PR, or reset the git HEAD to the remote and verify locally again.\nProject Extensions The SkyWalking project supports various extensions of existing features. If you are interesting in writing extensions, read the following guides.\nThis guides you in developing SkyWalking agent plugins to support more frameworks. Developers for both open source and private plugins should read this.\n If you would like to build a new probe or plugin in any language, please read the Component library definition and extension document. Storage extension development guide. Potential contributors can learn how to build a new storage implementor in addition to the official one. Customize analysis using OAL scripts. OAL scripts are located in config/oal/*.oal. You could modify them and reboot the OAP server. Read Observability Analysis Language Introduction to learn more about OAL scripts. Source and scope extension for new metrics. For analysis of a new metric which SkyWalking hasn\u0026rsquo;t yet provided, add a new receiver. You would most likely have to add a new source and scope. To learn how to do this, read the document. If you would like to add a new root menu or sub-menu to booster UI, read the UI menu control document.  OAP backend dependency management  This section is only applicable to dependencies of the backend module.\n As one of the Top Level Projects of The Apache Software Foundation (ASF), SkyWalking must follow the ASF 3RD PARTY LICENSE POLICY. So if you\u0026rsquo;re adding new dependencies to the project, you should make sure that the new dependencies would not break the policy, and add their LICENSE and NOTICE to the project.\nWe use license-eye to help you make sure that you haven\u0026rsquo;t missed out any new dependencies:\n Install license-eye according to the doc. Run license-eye dependency resolve --summary ./dist-material/release-docs/LICENSE.tpl in the root directory of this project. Check the modified lines in ./dist-material/release-docs/LICENSE (via command git diff -U0 ./dist-material/release-docs/LICENSE) and check whether the new dependencies' licenses are compatible with Apache 2.0. Add the new dependencies' notice files (if any) to ./dist-material/release-docs/NOTICE if they are Apache 2.0 license. Copy their license files to ./dist-material/release-docs/licenses if they are not standard Apache 2.0 license. Copy the new dependencies' license file to ./dist-material/release-docs/licenses if they are not standard Apache 2.0 license.  Profile The performance profile is an enhancement feature in the APM system. We use thread dump to estimate the method execution time, rather than adding multiple local spans. In this way, the cost would be significantly reduced compared to using distributed tracing to locate the slow method. This feature is suitable in the production environment. The following documents are key to understanding the essential parts of this feature.\n Profile data report protocol is provided through gRPC, just like other traces and JVM data. Thread dump merging mechanism introduces the merging mechanism. This mechanism helps end users understand profile reports. Exporter tool of profile raw data guides you on how to package the original profile data for issue reports when the visualization doesn\u0026rsquo;t work well on the official UI.  Release If you\u0026rsquo;re a committer, read the Apache Release Guide to learn about how to create an official Apache version release in accordance with avoid Apache\u0026rsquo;s rules. As long as you keep our LICENSE and NOTICE, the Apache license allows everyone to redistribute.\n","title":"Guides","url":"/docs/main/v9.4.0/en/guides/readme/"},{"content":"Guides There are many ways you can contribute to the SkyWalking community.\n Go through our documents, and point out or fix a problem. Translate the documents into other languages. Download our releases, try to monitor your applications, and provide feedback to us. Read our source codes. For details, reach out to us. If you find any bugs, submit an issue. You can also try to fix it. Find good first issue issues. This is a good place for you to start. Submit an issue or start a discussion at GitHub issue. See all mail list discussions at website list review. If you are already a SkyWalking committer, you can log in and use the mail list in the browser mode. Otherwise, subscribe following the step below. Issue reports and discussions may also take place via dev@skywalking.apache.org. Mail to dev-subscribe@skywalking.apache.org, and follow the instructions in the reply to subscribe to the mail list.  Contact Us All the following channels are open to the community.\n Submit an issue for an issue or feature proposal. Mail list: dev@skywalking.apache.org. Mail to dev-subscribe@skywalking.apache.org. Follow the instructions in the reply to subscribe to the mail list. Submit a discussion to ask questions.  Become an official Apache SkyWalking Committer The PMC assesses the contributions of every contributor, including their code contributions. It also promotes, votes on, and invites new committers and PMC members according to the Apache guides. See Become official Apache SkyWalking Committer for more details.\nFor code developer For developers, the starting point is the Compiling Guide. It guides developers on how to build the project in local and set up the environment.\nIntegration Tests After setting up the environment and writing your codes, to facilitate integration with the SkyWalking project, you\u0026rsquo;ll need to run tests locally to verify that your codes would not break any existing features, as well as write some unit test (UT) codes to verify that the new codes would work well. This will prevent them from being broken by future contributors. If the new codes involve other components or libraries, you should also write integration tests (IT).\nSkyWalking leverages the plugin maven-surefire-plugin to run the UTs and uses maven-failsafe-plugin to run the ITs. maven-surefire-plugin excludes ITs (whose class name starts or ends with *IT, IT*) and leaves them for maven-failsafe-plugin to run, which is bound to the integration-test goal. Therefore, to run the UTs, try ./mvnw clean test, which only runs the UTs but not the ITs.\nIf you would like to run the ITs, please run ./mvnw integration-test as well as the profiles of the modules whose ITs you want to run. If you don\u0026rsquo;t want to run UTs, please add -DskipUTs=true. E.g. if you would like to only run the ITs in oap-server, try ./mvnw -Pbackend clean verify -DskipUTs=true, and if you would like to run all the ITs, simply run ./mvnw clean integration-test -DskipUTs=true.\nPlease be advised that if you\u0026rsquo;re writing integration tests, name it with the pattern IT* or *IT so they would only run in goal integration-test.\nJava Microbenchmark Harness (JMH) JMH is a Java harness for building, running, and analysing nano/micro/milli/macro benchmarks written in Java and other languages targeting the JVM.\nWe have a module called microbench which performs a series of micro-benchmark tests for JMH testing. Make new JMH tests extend the org.apache.skywalking.oap.server.microbench.base.AbstractMicrobenchmark to customize runtime conditions (Measurement, Fork, Warmup, etc.).\nJMH tests could run as a normal unit test. And they could run as an independent uber jar via java -jar benchmark.jar for all benchmarks, or via java -jar /benchmarks.jar exampleClassName for a specific test.\nOutput test results in JSON format, you can add -rf json like java -jar benchmarks.jar -rf json, if you run through the IDE, you can configure the -DperfReportDir=savePath parameter to set the JMH report result save path, a report results in JSON format will be generated when the run ends.\nMore information about JMH can be found here: jmh docs.\nEnd to End Tests (E2E) Since version 6.3.0, we have introduced more automatic tests to perform software quality assurance. E2E is an integral part of it.\n End-to-end testing is a methodology used to test whether the flow of an application is performing as designed from start to finish. The purpose of carrying out end-to-end tests is to identify system dependencies and to ensure that the right information is passed between various system components and systems.\n The E2E test involves some/all of the OAP server, storage, coordinator, webapp, and the instrumented services, all of which are orchestrated by docker-compose or KinD. Since version 8.9.0, we immigrate to e2e-v2 which leverage skywalking-infra-e2e and skywalking-cli to do the whole e2e process. skywalking-infra-e2e is used to control the e2e process and skywalking-cli is used to interact with the OAP such as request and get response metrics from OAP.\nWriting E2E Cases  Set up the environment   Set up skywalking-infra-e2e Set up skywalking-cli, yq (generally these 2 are enough) and others tools if your cases need. Can reference the script under skywalking/test/e2e-v2/script/prepare/setup-e2e-shell.   Orchestrate the components  The goal of the E2E tests is to test the SkyWalking project as a whole, including the OAP server, storage, coordinator, webapp, and even the frontend UI (not for now), on the single node mode as well as the cluster mode. Therefore, the first step is to determine what case we are going to verify, and orchestrate the components.\nTo make the orchestration process easier, we\u0026rsquo;re using a docker-compose that provides a simple file format (docker-compose.yml) for orchestrating the required containers, and offers an opportunity to define the dependencies of the components.\nFollow these steps:\n Decide what (and how many) containers will be needed. For example, for cluster testing, you\u0026rsquo;ll need \u0026gt; 2 OAP nodes, coordinators (e.g. zookeeper), storage (e.g. ElasticSearch), and instrumented services; Define the containers in docker-compose.yml, and carefully specify the dependencies, starting orders, and most importantly, link them together, e.g. set the correct OAP address on the agent end, and set the correct coordinator address in OAP, etc. Define the e2e case config in e2e.yaml. Write the expected data(yml) for verify.   Run e2e test  All e2e cases should under skywalking/test/e2e-v2/cases. You could execute e2e run command in skywalking/ e.g.\ne2e run -c test/e2e-v2/cases/alarm/h2/e2e.yaml  Troubleshooting  We expose all logs from all containers to the stdout in the non-CI (local) mode, but save and upload them to the GitHub server. You can download them (only when the tests have failed) at \u0026ldquo;Artifacts/Download artifacts/logs\u0026rdquo; (see top right) for debugging.\nNOTE: Please verify the newly-added E2E test case locally first. However, if you find that it has passed locally but failed in the PR check status, make sure that all the updated/newly-added files (especially those in the submodules) are committed and included in the PR, or reset the git HEAD to the remote and verify locally again.\nProject Extensions The SkyWalking project supports various extensions of existing features. If you are interesting in writing extensions, read the following guides.\nThis guides you in developing SkyWalking agent plugins to support more frameworks. Developers for both open source and private plugins should read this.\n If you would like to build a new probe or plugin in any language, please read the Component library definition and extension document. Storage extension development guide. Potential contributors can learn how to build a new storage implementor in addition to the official one. Customize analysis using OAL scripts. OAL scripts are located in config/oal/*.oal. You could modify them and reboot the OAP server. Read Observability Analysis Language Introduction to learn more about OAL scripts. Source and scope extension for new metrics. For analysis of a new metric which SkyWalking hasn\u0026rsquo;t yet provided, add a new receiver. You would most likely have to add a new source and scope. To learn how to do this, read the document. If you would like to add a new root menu or sub-menu to booster UI, read the UI menu control document.  OAP backend dependency management  This section is only applicable to dependencies of the backend module.\n As one of the Top Level Projects of The Apache Software Foundation (ASF), SkyWalking must follow the ASF 3RD PARTY LICENSE POLICY. So if you\u0026rsquo;re adding new dependencies to the project, you should make sure that the new dependencies would not break the policy, and add their LICENSE and NOTICE to the project.\nWe use license-eye to help you make sure that you haven\u0026rsquo;t missed out any new dependencies:\n Install license-eye according to the doc. Run license-eye dependency resolve --summary ./dist-material/release-docs/LICENSE.tpl in the root directory of this project. Check the modified lines in ./dist-material/release-docs/LICENSE (via command git diff -U0 ./dist-material/release-docs/LICENSE) and check whether the new dependencies' licenses are compatible with Apache 2.0. Add the new dependencies' notice files (if any) to ./dist-material/release-docs/NOTICE if they are Apache 2.0 license. Copy their license files to ./dist-material/release-docs/licenses if they are not standard Apache 2.0 license. Copy the new dependencies' license file to ./dist-material/release-docs/licenses if they are not standard Apache 2.0 license.  Release If you\u0026rsquo;re a committer, read the Apache Release Guide to learn about how to create an official Apache version release in accordance with avoid Apache\u0026rsquo;s rules. As long as you keep our LICENSE and NOTICE, the Apache license allows everyone to redistribute.\n","title":"Guides","url":"/docs/main/v9.5.0/en/guides/readme/"},{"content":"Guides There are many ways you can connect and contribute to the SkyWalking community.\n Submit an issue for an addressed issue or feature implementation plan. Submit a discussion to ask questions, feature proposal and uncertain bug discussion. Mail list: dev@skywalking.apache.org. Mail to dev-subscribe@skywalking.apache.org. Follow the instructions in the reply to subscribe to the mail list. Send Request to join SkyWalking slack mail to the mail list(dev@skywalking.apache.org), we will invite you in. For Chinese speaker, send [CN] Request to join SkyWalking slack mail to the mail list(dev@skywalking.apache.org), we will invite you in.  ","title":"Guides","url":"/docs/main/v9.6.0/en/guides/community/"},{"content":"Guides There are many ways you can connect and contribute to the SkyWalking community.\n Submit an issue for an addressed issue or feature implementation plan. Submit a discussion to ask questions, feature proposal and uncertain bug discussion. Mail list: dev@skywalking.apache.org. Mail to dev-subscribe@skywalking.apache.org. Follow the instructions in the reply to subscribe to the mail list. Send Request to join SkyWalking slack mail to the mail list(dev@skywalking.apache.org), we will invite you in. For Chinese speaker, send [CN] Request to join SkyWalking slack mail to the mail list(dev@skywalking.apache.org), we will invite you in.  ","title":"Guides","url":"/docs/main/v9.7.0/en/guides/community/"},{"content":"Guides If you want to debug or develop SkyWalking Rover, The following documentations would guide you.\n Contribution  How to contribute a module?   Compile  How to compile SkyWalking Rover?    ","title":"Guides","url":"/docs/skywalking-rover/latest/en/guides/readme/"},{"content":"Guides If you want to debug or develop SkyWalking Rover, The following documentations would guide you.\n Contribution  How to contribute a module?   Compile  How to compile SkyWalking Rover?    ","title":"Guides","url":"/docs/skywalking-rover/next/en/guides/readme/"},{"content":"Guides If you want to debug or develop SkyWalking Rover, The following documentations would guide you.\n Contribution  How to contribute a module?   Compile  How to compile SkyWalking Rover?    ","title":"Guides","url":"/docs/skywalking-rover/v0.6.0/en/guides/readme/"},{"content":"Guides If you want to debug or develop SkyWalking Satellite, The following documentations would guide you.\n Contribution  How to contribute a plugin? How to release SkyWalking Satellite?   Compile  How to compile SkyWalking Satellite?   Test  How to add unit test for a plugin?    ","title":"Guides","url":"/docs/skywalking-satellite/latest/en/guides/readme/"},{"content":"Guides If you want to debug or develop SkyWalking Satellite, The following documentations would guide you.\n Contribution  How to contribute a plugin? How to release SkyWalking Satellite?   Compile  How to compile SkyWalking Satellite?   Test  How to add unit test for a plugin?    ","title":"Guides","url":"/docs/skywalking-satellite/next/en/guides/readme/"},{"content":"Guides If you want to debug or develop SkyWalking Satellite, The following documentations would guide you.\n Contribution  How to contribute a plugin? How to release SkyWalking Satellite?   Compile  How to compile SkyWalking Satellite?   Test  How to add unit test for a plugin?    ","title":"Guides","url":"/docs/skywalking-satellite/v1.2.0/en/guides/readme/"},{"content":"H2 Activate H2 as storage, set storage provider to H2 In-Memory Databases by default in the distribution package. Please read Database URL Overview in H2 official document. You can set the target to H2 in Embedded, Server and Mixed modes.\nSetting fragment example\nstorage:selector:${SW_STORAGE:h2}h2:driver:org.h2.jdbcx.JdbcDataSourceurl:jdbc:h2:mem:skywalking-oap-dbuser:samaxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:100}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:1}","title":"H2","url":"/docs/main/latest/en/setup/backend/storages/h2/"},{"content":"H2 Activate H2 as storage, set storage provider to H2 In-Memory Databases by default in the distribution package. Please read Database URL Overview in H2 official document. You can set the target to H2 in Embedded, Server and Mixed modes.\nSetting fragment example\nstorage:selector:${SW_STORAGE:h2}h2:driver:org.h2.jdbcx.JdbcDataSourceurl:jdbc:h2:mem:skywalking-oap-dbuser:samaxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:100}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:1}","title":"H2","url":"/docs/main/next/en/setup/backend/storages/h2/"},{"content":"H2 Activate H2 as storage, set storage provider to H2 In-Memory Databases by default in the distribution package. Please read Database URL Overview in H2 official document. You can set the target to H2 in Embedded, Server and Mixed modes.\nSetting fragment example\nstorage:selector:${SW_STORAGE:h2}h2:driver:org.h2.jdbcx.JdbcDataSourceurl:jdbc:h2:mem:skywalking-oap-dbuser:samaxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:100}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:1}","title":"H2","url":"/docs/main/v9.7.0/en/setup/backend/storages/h2/"},{"content":"Health Check Health check intends to provide a unique approach to checking the health status of the OAP server. It includes the health status of modules, GraphQL, and gRPC services readiness.\n 0 means healthy, and more than 0 means unhealthy. less than 0 means that the OAP doesn\u0026rsquo;t start up.\n Health Checker Module. The Health Checker module helps observe the health status of modules. You may activate it as follows:\nhealth-checker:selector:${SW_HEALTH_CHECKER:default}default:checkIntervalSeconds:${SW_HEALTH_CHECKER_INTERVAL_SECONDS:5}Note: The telemetry module should be enabled at the same time. This means that the provider should not be - and none.\nAfter that, we can check the OAP server health status by querying GraphQL:\nquery{ checkHealth{ score details } } If the OAP server is healthy, the response should be\n{ \u0026#34;data\u0026#34;: { \u0026#34;checkHealth\u0026#34;: { \u0026#34;score\u0026#34;: 0, \u0026#34;details\u0026#34;: \u0026#34;\u0026#34; } } } If some modules are unhealthy (e.g. storage H2 is down), then the result may look as follows:\n{ \u0026#34;data\u0026#34;: { \u0026#34;checkHealth\u0026#34;: { \u0026#34;score\u0026#34;: 1, \u0026#34;details\u0026#34;: \u0026#34;storage_h2,\u0026#34; } } } Refer to checkHealth query for more details.\nThe readiness of GraphQL and gRPC Use the query above to check the readiness of GraphQL.\nOAP has implemented the gRPC Health Checking Protocol. You may use the grpc-health-probe or any other tools to check the health of OAP gRPC services.\nCLI tool Please follow the CLI doc to get the health status score directly through the checkhealth command.\n","title":"Health Check","url":"/docs/main/latest/en/setup/backend/backend-health-check/"},{"content":"Health Check Health check intends to provide a unique approach to checking the health status of the OAP server. It includes the health status of modules, GraphQL, and gRPC services readiness.\n 0 means healthy, and more than 0 means unhealthy. less than 0 means that the OAP doesn\u0026rsquo;t start up.\n Health Checker Module. The Health Checker module helps observe the health status of modules. You may activate it as follows:\nhealth-checker:selector:${SW_HEALTH_CHECKER:default}default:checkIntervalSeconds:${SW_HEALTH_CHECKER_INTERVAL_SECONDS:5}Note: The telemetry module should be enabled at the same time. This means that the provider should not be - and none.\nAfter that, we can check the OAP server health status by querying GraphQL:\nquery{ checkHealth{ score details } } If the OAP server is healthy, the response should be\n{ \u0026#34;data\u0026#34;: { \u0026#34;checkHealth\u0026#34;: { \u0026#34;score\u0026#34;: 0, \u0026#34;details\u0026#34;: \u0026#34;\u0026#34; } } } If some modules are unhealthy (e.g. storage H2 is down), then the result may look as follows:\n{ \u0026#34;data\u0026#34;: { \u0026#34;checkHealth\u0026#34;: { \u0026#34;score\u0026#34;: 1, \u0026#34;details\u0026#34;: \u0026#34;storage_h2,\u0026#34; } } } Refer to checkHealth query for more details.\nThe readiness of GraphQL and gRPC Use the query above to check the readiness of GraphQL.\nOAP has implemented the gRPC Health Checking Protocol. You may use the grpc-health-probe or any other tools to check the health of OAP gRPC services.\nCLI tool Please follow the CLI doc to get the health status score directly through the checkhealth command.\n","title":"Health Check","url":"/docs/main/next/en/setup/backend/backend-health-check/"},{"content":"Health Check Health check intends to provide a unique approach to check the health status of the OAP server. It includes the health status of modules, GraphQL, and gRPC services readiness.\n 0 means healthy, and more than 0 means unhealthy. less than 0 means that the OAP doesn\u0026rsquo;t start up.\n Health Checker Module. The Health Checker module helps observe the health status of modules. You may activate it as follows:\nhealth-checker:selector:${SW_HEALTH_CHECKER:default}default:checkIntervalSeconds:${SW_HEALTH_CHECKER_INTERVAL_SECONDS:5}Note: The telemetry module should be enabled at the same time. This means that the provider should not be - and none.\nAfter that, we can check the OAP server health status by querying GraphQL:\nquery{ checkHealth{ score details } } If the OAP server is healthy, the response should be\n{ \u0026#34;data\u0026#34;: { \u0026#34;checkHealth\u0026#34;: { \u0026#34;score\u0026#34;: 0, \u0026#34;details\u0026#34;: \u0026#34;\u0026#34; } } } If some modules are unhealthy (e.g. storage H2 is down), then the result may look as follows:\n{ \u0026#34;data\u0026#34;: { \u0026#34;checkHealth\u0026#34;: { \u0026#34;score\u0026#34;: 1, \u0026#34;details\u0026#34;: \u0026#34;storage_h2,\u0026#34; } } } Refer to checkHealth query for more details.\nThe readiness of GraphQL and gRPC Use the query above to check the readiness of GraphQL.\nOAP has implemented the gRPC Health Checking Protocol. You may use the grpc-health-probe or any other tools to check the health of OAP gRPC services.\nCLI tool Please follow the CLI doc to get the health status score directly through the checkhealth command.\n","title":"Health Check","url":"/docs/main/v9.0.0/en/setup/backend/backend-health-check/"},{"content":"Health Check Health check intends to provide a unique approach to checking the health status of the OAP server. It includes the health status of modules, GraphQL, and gRPC services readiness.\n 0 means healthy, and more than 0 means unhealthy. less than 0 means that the OAP doesn\u0026rsquo;t start up.\n Health Checker Module. The Health Checker module helps observe the health status of modules. You may activate it as follows:\nhealth-checker:selector:${SW_HEALTH_CHECKER:default}default:checkIntervalSeconds:${SW_HEALTH_CHECKER_INTERVAL_SECONDS:5}Note: The telemetry module should be enabled at the same time. This means that the provider should not be - and none.\nAfter that, we can check the OAP server health status by querying GraphQL:\nquery{ checkHealth{ score details } } If the OAP server is healthy, the response should be\n{ \u0026#34;data\u0026#34;: { \u0026#34;checkHealth\u0026#34;: { \u0026#34;score\u0026#34;: 0, \u0026#34;details\u0026#34;: \u0026#34;\u0026#34; } } } If some modules are unhealthy (e.g. storage H2 is down), then the result may look as follows:\n{ \u0026#34;data\u0026#34;: { \u0026#34;checkHealth\u0026#34;: { \u0026#34;score\u0026#34;: 1, \u0026#34;details\u0026#34;: \u0026#34;storage_h2,\u0026#34; } } } Refer to checkHealth query for more details.\nThe readiness of GraphQL and gRPC Use the query above to check the readiness of GraphQL.\nOAP has implemented the gRPC Health Checking Protocol. You may use the grpc-health-probe or any other tools to check the health of OAP gRPC services.\nCLI tool Please follow the CLI doc to get the health status score directly through the checkhealth command.\n","title":"Health Check","url":"/docs/main/v9.1.0/en/setup/backend/backend-health-check/"},{"content":"Health Check Health check intends to provide a unique approach to checking the health status of the OAP server. It includes the health status of modules, GraphQL, and gRPC services readiness.\n 0 means healthy, and more than 0 means unhealthy. less than 0 means that the OAP doesn\u0026rsquo;t start up.\n Health Checker Module. The Health Checker module helps observe the health status of modules. You may activate it as follows:\nhealth-checker:selector:${SW_HEALTH_CHECKER:default}default:checkIntervalSeconds:${SW_HEALTH_CHECKER_INTERVAL_SECONDS:5}Note: The telemetry module should be enabled at the same time. This means that the provider should not be - and none.\nAfter that, we can check the OAP server health status by querying GraphQL:\nquery{ checkHealth{ score details } } If the OAP server is healthy, the response should be\n{ \u0026#34;data\u0026#34;: { \u0026#34;checkHealth\u0026#34;: { \u0026#34;score\u0026#34;: 0, \u0026#34;details\u0026#34;: \u0026#34;\u0026#34; } } } If some modules are unhealthy (e.g. storage H2 is down), then the result may look as follows:\n{ \u0026#34;data\u0026#34;: { \u0026#34;checkHealth\u0026#34;: { \u0026#34;score\u0026#34;: 1, \u0026#34;details\u0026#34;: \u0026#34;storage_h2,\u0026#34; } } } Refer to checkHealth query for more details.\nThe readiness of GraphQL and gRPC Use the query above to check the readiness of GraphQL.\nOAP has implemented the gRPC Health Checking Protocol. You may use the grpc-health-probe or any other tools to check the health of OAP gRPC services.\nCLI tool Please follow the CLI doc to get the health status score directly through the checkhealth command.\n","title":"Health Check","url":"/docs/main/v9.2.0/en/setup/backend/backend-health-check/"},{"content":"Health Check Health check intends to provide a unique approach to checking the health status of the OAP server. It includes the health status of modules, GraphQL, and gRPC services readiness.\n 0 means healthy, and more than 0 means unhealthy. less than 0 means that the OAP doesn\u0026rsquo;t start up.\n Health Checker Module. The Health Checker module helps observe the health status of modules. You may activate it as follows:\nhealth-checker:selector:${SW_HEALTH_CHECKER:default}default:checkIntervalSeconds:${SW_HEALTH_CHECKER_INTERVAL_SECONDS:5}Note: The telemetry module should be enabled at the same time. This means that the provider should not be - and none.\nAfter that, we can check the OAP server health status by querying GraphQL:\nquery{ checkHealth{ score details } } If the OAP server is healthy, the response should be\n{ \u0026#34;data\u0026#34;: { \u0026#34;checkHealth\u0026#34;: { \u0026#34;score\u0026#34;: 0, \u0026#34;details\u0026#34;: \u0026#34;\u0026#34; } } } If some modules are unhealthy (e.g. storage H2 is down), then the result may look as follows:\n{ \u0026#34;data\u0026#34;: { \u0026#34;checkHealth\u0026#34;: { \u0026#34;score\u0026#34;: 1, \u0026#34;details\u0026#34;: \u0026#34;storage_h2,\u0026#34; } } } Refer to checkHealth query for more details.\nThe readiness of GraphQL and gRPC Use the query above to check the readiness of GraphQL.\nOAP has implemented the gRPC Health Checking Protocol. You may use the grpc-health-probe or any other tools to check the health of OAP gRPC services.\nCLI tool Please follow the CLI doc to get the health status score directly through the checkhealth command.\n","title":"Health Check","url":"/docs/main/v9.3.0/en/setup/backend/backend-health-check/"},{"content":"Health Check Health check intends to provide a unique approach to checking the health status of the OAP server. It includes the health status of modules, GraphQL, and gRPC services readiness.\n 0 means healthy, and more than 0 means unhealthy. less than 0 means that the OAP doesn\u0026rsquo;t start up.\n Health Checker Module. The Health Checker module helps observe the health status of modules. You may activate it as follows:\nhealth-checker:selector:${SW_HEALTH_CHECKER:default}default:checkIntervalSeconds:${SW_HEALTH_CHECKER_INTERVAL_SECONDS:5}Note: The telemetry module should be enabled at the same time. This means that the provider should not be - and none.\nAfter that, we can check the OAP server health status by querying GraphQL:\nquery{ checkHealth{ score details } } If the OAP server is healthy, the response should be\n{ \u0026#34;data\u0026#34;: { \u0026#34;checkHealth\u0026#34;: { \u0026#34;score\u0026#34;: 0, \u0026#34;details\u0026#34;: \u0026#34;\u0026#34; } } } If some modules are unhealthy (e.g. storage H2 is down), then the result may look as follows:\n{ \u0026#34;data\u0026#34;: { \u0026#34;checkHealth\u0026#34;: { \u0026#34;score\u0026#34;: 1, \u0026#34;details\u0026#34;: \u0026#34;storage_h2,\u0026#34; } } } Refer to checkHealth query for more details.\nThe readiness of GraphQL and gRPC Use the query above to check the readiness of GraphQL.\nOAP has implemented the gRPC Health Checking Protocol. You may use the grpc-health-probe or any other tools to check the health of OAP gRPC services.\nCLI tool Please follow the CLI doc to get the health status score directly through the checkhealth command.\n","title":"Health Check","url":"/docs/main/v9.4.0/en/setup/backend/backend-health-check/"},{"content":"Health Check Health check intends to provide a unique approach to checking the health status of the OAP server. It includes the health status of modules, GraphQL, and gRPC services readiness.\n 0 means healthy, and more than 0 means unhealthy. less than 0 means that the OAP doesn\u0026rsquo;t start up.\n Health Checker Module. The Health Checker module helps observe the health status of modules. You may activate it as follows:\nhealth-checker:selector:${SW_HEALTH_CHECKER:default}default:checkIntervalSeconds:${SW_HEALTH_CHECKER_INTERVAL_SECONDS:5}Note: The telemetry module should be enabled at the same time. This means that the provider should not be - and none.\nAfter that, we can check the OAP server health status by querying GraphQL:\nquery{ checkHealth{ score details } } If the OAP server is healthy, the response should be\n{ \u0026#34;data\u0026#34;: { \u0026#34;checkHealth\u0026#34;: { \u0026#34;score\u0026#34;: 0, \u0026#34;details\u0026#34;: \u0026#34;\u0026#34; } } } If some modules are unhealthy (e.g. storage H2 is down), then the result may look as follows:\n{ \u0026#34;data\u0026#34;: { \u0026#34;checkHealth\u0026#34;: { \u0026#34;score\u0026#34;: 1, \u0026#34;details\u0026#34;: \u0026#34;storage_h2,\u0026#34; } } } Refer to checkHealth query for more details.\nThe readiness of GraphQL and gRPC Use the query above to check the readiness of GraphQL.\nOAP has implemented the gRPC Health Checking Protocol. You may use the grpc-health-probe or any other tools to check the health of OAP gRPC services.\nCLI tool Please follow the CLI doc to get the health status score directly through the checkhealth command.\n","title":"Health Check","url":"/docs/main/v9.5.0/en/setup/backend/backend-health-check/"},{"content":"Health Check Health check intends to provide a unique approach to checking the health status of the OAP server. It includes the health status of modules, GraphQL, and gRPC services readiness.\n 0 means healthy, and more than 0 means unhealthy. less than 0 means that the OAP doesn\u0026rsquo;t start up.\n Health Checker Module. The Health Checker module helps observe the health status of modules. You may activate it as follows:\nhealth-checker:selector:${SW_HEALTH_CHECKER:default}default:checkIntervalSeconds:${SW_HEALTH_CHECKER_INTERVAL_SECONDS:5}Note: The telemetry module should be enabled at the same time. This means that the provider should not be - and none.\nAfter that, we can check the OAP server health status by querying GraphQL:\nquery{ checkHealth{ score details } } If the OAP server is healthy, the response should be\n{ \u0026#34;data\u0026#34;: { \u0026#34;checkHealth\u0026#34;: { \u0026#34;score\u0026#34;: 0, \u0026#34;details\u0026#34;: \u0026#34;\u0026#34; } } } If some modules are unhealthy (e.g. storage H2 is down), then the result may look as follows:\n{ \u0026#34;data\u0026#34;: { \u0026#34;checkHealth\u0026#34;: { \u0026#34;score\u0026#34;: 1, \u0026#34;details\u0026#34;: \u0026#34;storage_h2,\u0026#34; } } } Refer to checkHealth query for more details.\nThe readiness of GraphQL and gRPC Use the query above to check the readiness of GraphQL.\nOAP has implemented the gRPC Health Checking Protocol. You may use the grpc-health-probe or any other tools to check the health of OAP gRPC services.\nCLI tool Please follow the CLI doc to get the health status score directly through the checkhealth command.\n","title":"Health Check","url":"/docs/main/v9.6.0/en/setup/backend/backend-health-check/"},{"content":"Health Check Health check intends to provide a unique approach to checking the health status of the OAP server. It includes the health status of modules, GraphQL, and gRPC services readiness.\n 0 means healthy, and more than 0 means unhealthy. less than 0 means that the OAP doesn\u0026rsquo;t start up.\n Health Checker Module. The Health Checker module helps observe the health status of modules. You may activate it as follows:\nhealth-checker:selector:${SW_HEALTH_CHECKER:default}default:checkIntervalSeconds:${SW_HEALTH_CHECKER_INTERVAL_SECONDS:5}Note: The telemetry module should be enabled at the same time. This means that the provider should not be - and none.\nAfter that, we can check the OAP server health status by querying GraphQL:\nquery{ checkHealth{ score details } } If the OAP server is healthy, the response should be\n{ \u0026#34;data\u0026#34;: { \u0026#34;checkHealth\u0026#34;: { \u0026#34;score\u0026#34;: 0, \u0026#34;details\u0026#34;: \u0026#34;\u0026#34; } } } If some modules are unhealthy (e.g. storage H2 is down), then the result may look as follows:\n{ \u0026#34;data\u0026#34;: { \u0026#34;checkHealth\u0026#34;: { \u0026#34;score\u0026#34;: 1, \u0026#34;details\u0026#34;: \u0026#34;storage_h2,\u0026#34; } } } Refer to checkHealth query for more details.\nThe readiness of GraphQL and gRPC Use the query above to check the readiness of GraphQL.\nOAP has implemented the gRPC Health Checking Protocol. You may use the grpc-health-probe or any other tools to check the health of OAP gRPC services.\nCLI tool Please follow the CLI doc to get the health status score directly through the checkhealth command.\n","title":"Health Check","url":"/docs/main/v9.7.0/en/setup/backend/backend-health-check/"},{"content":"How does threading-profiler (the default mode) work These blogs skywalking-profiling and skywalking-python-profiling described how the threading-profiler works\nAnd this figure demonstrates how the profiler works as well:\nsequenceDiagram API-\u0026gt;\u0026gt;+working thread: get: /api/v1/user/ rect rgb(0,200,0) API-\u0026gt;\u0026gt;+profiling thread: start profiling profiling thread-\u0026gt;\u0026gt;working thread: snapshot profiling thread-\u0026gt;\u0026gt;working thread: snapshot profiling thread-\u0026gt;\u0026gt;working thread: snapshot profiling thread-\u0026gt;\u0026gt;-working thread: snapshot end working thread--\u0026gt;\u0026gt;-API: response It works well with threading mode because the whole process will be executed in the same thread, so the profiling thread can fetch the complete profiling info of the process of the API request.\nWhy doesn\u0026rsquo;t threading-profiler work in greenlet mode When the python program runs with gevent + greenlet, the process would be like this:\nsequenceDiagram API-\u0026gt;\u0026gt;+working thread 1: get: /api/v1/user/ rect rgb(0,200,0) greenlet.HUB--\u0026gt;\u0026gt;+working thread 1: swap in the profiled greenlet API-\u0026gt;\u0026gt;+profiling thread: start profiling profiling thread-\u0026gt;\u0026gt;working thread 1: snapshot working thread 1--\u0026gt;\u0026gt;-greenlet.HUB : swap out the profiled greenlet end greenlet.HUB--\u0026gt;\u0026gt;+working thread 1: swap in the other greenlet profiling thread-\u0026gt;\u0026gt;working thread 1: snapshot greenlet.HUB--\u0026gt;\u0026gt;+working thread 2: swap in the profiled greenlet profiling thread-\u0026gt;\u0026gt;working thread 1: snapshot profiling thread-\u0026gt;\u0026gt;working thread 1: snapshot working thread 2--\u0026gt;-greenlet.HUB : swap out the profiled greenlet profiling thread-\u0026gt;\u0026gt;working thread 1: snapshot profiling thread-\u0026gt;\u0026gt;-working thread 1: snapshot working thread 1--\u0026gt;\u0026gt;-greenlet.HUB : swap out the other greenlet working thread 1--\u0026gt;\u0026gt;-API: response In this circumstance, the snapshot of the working thread includes multi contexts of different greenlets, which will make skywalking confused to build the trace stack.\nFortunately, greenlet has an API for profiling, the doc is here. We can implement a greenlet profiler to solve this issue.\nHow the greenlet profiler works A greenlet profiler leverages the trace callback of greenlet, it works like this:\nsequenceDiagram API-\u0026gt;\u0026gt;+working thread 1: get: /api/v1/user/ rect rgb(0,200,0) greenlet.HUB--\u0026gt;\u0026gt;+working thread 1: swap in the profiled greenlet and snapshot working thread 1--\u0026gt;\u0026gt;-greenlet.HUB : swap out the profiled greenlet and snapshot end greenlet.HUB--\u0026gt;\u0026gt;+working thread 1: swap in the other greenlet rect rgb(0,200,0) greenlet.HUB--\u0026gt;\u0026gt;+working thread 2: swap in the profiled greenlet and snapshot working thread 2--\u0026gt;-greenlet.HUB : swap out the profiled greenlet and snapshot end working thread 1--\u0026gt;\u0026gt;-greenlet.HUB : swap out the other greenlet working thread 1--\u0026gt;\u0026gt;-API: response We can set a callback function to the greenlet that we need to profiling, then when the greenlet.HUB switches the context in/out to the working thread, the callback will build a snapshot of the greenlet\u0026rsquo;s traceback and send it to skywalking.\nThe difference between these two profilers The greenlet profiler will significantly reduce the snapshot times of the profiling process, which means that it will cost less CPU time than the threading profiler.\n","title":"How does threading-profiler (the default mode) work","url":"/docs/skywalking-python/latest/en/profiling/profiling/"},{"content":"How does threading-profiler (the default mode) work These blogs skywalking-profiling and skywalking-python-profiling described how the threading-profiler works\nAnd this figure demonstrates how the profiler works as well:\nsequenceDiagram API-\u0026gt;\u0026gt;+working thread: get: /api/v1/user/ rect rgb(0,200,0) API-\u0026gt;\u0026gt;+profiling thread: start profiling profiling thread-\u0026gt;\u0026gt;working thread: snapshot profiling thread-\u0026gt;\u0026gt;working thread: snapshot profiling thread-\u0026gt;\u0026gt;working thread: snapshot profiling thread-\u0026gt;\u0026gt;-working thread: snapshot end working thread--\u0026gt;\u0026gt;-API: response It works well with threading mode because the whole process will be executed in the same thread, so the profiling thread can fetch the complete profiling info of the process of the API request.\nWhy doesn\u0026rsquo;t threading-profiler work in greenlet mode When the python program runs with gevent + greenlet, the process would be like this:\nsequenceDiagram API-\u0026gt;\u0026gt;+working thread 1: get: /api/v1/user/ rect rgb(0,200,0) greenlet.HUB--\u0026gt;\u0026gt;+working thread 1: swap in the profiled greenlet API-\u0026gt;\u0026gt;+profiling thread: start profiling profiling thread-\u0026gt;\u0026gt;working thread 1: snapshot working thread 1--\u0026gt;\u0026gt;-greenlet.HUB : swap out the profiled greenlet end greenlet.HUB--\u0026gt;\u0026gt;+working thread 1: swap in the other greenlet profiling thread-\u0026gt;\u0026gt;working thread 1: snapshot greenlet.HUB--\u0026gt;\u0026gt;+working thread 2: swap in the profiled greenlet profiling thread-\u0026gt;\u0026gt;working thread 1: snapshot profiling thread-\u0026gt;\u0026gt;working thread 1: snapshot working thread 2--\u0026gt;-greenlet.HUB : swap out the profiled greenlet profiling thread-\u0026gt;\u0026gt;working thread 1: snapshot profiling thread-\u0026gt;\u0026gt;-working thread 1: snapshot working thread 1--\u0026gt;\u0026gt;-greenlet.HUB : swap out the other greenlet working thread 1--\u0026gt;\u0026gt;-API: response In this circumstance, the snapshot of the working thread includes multi contexts of different greenlets, which will make skywalking confused to build the trace stack.\nFortunately, greenlet has an API for profiling, the doc is here. We can implement a greenlet profiler to solve this issue.\nHow the greenlet profiler works A greenlet profiler leverages the trace callback of greenlet, it works like this:\nsequenceDiagram API-\u0026gt;\u0026gt;+working thread 1: get: /api/v1/user/ rect rgb(0,200,0) greenlet.HUB--\u0026gt;\u0026gt;+working thread 1: swap in the profiled greenlet and snapshot working thread 1--\u0026gt;\u0026gt;-greenlet.HUB : swap out the profiled greenlet and snapshot end greenlet.HUB--\u0026gt;\u0026gt;+working thread 1: swap in the other greenlet rect rgb(0,200,0) greenlet.HUB--\u0026gt;\u0026gt;+working thread 2: swap in the profiled greenlet and snapshot working thread 2--\u0026gt;-greenlet.HUB : swap out the profiled greenlet and snapshot end working thread 1--\u0026gt;\u0026gt;-greenlet.HUB : swap out the other greenlet working thread 1--\u0026gt;\u0026gt;-API: response We can set a callback function to the greenlet that we need to profiling, then when the greenlet.HUB switches the context in/out to the working thread, the callback will build a snapshot of the greenlet\u0026rsquo;s traceback and send it to skywalking.\nThe difference between these two profilers The greenlet profiler will significantly reduce the snapshot times of the profiling process, which means that it will cost less CPU time than the threading profiler.\n","title":"How does threading-profiler (the default mode) work","url":"/docs/skywalking-python/next/en/profiling/profiling/"},{"content":"How does threading-profiler (the default mode) work These blogs skywalking-profiling and skywalking-python-profiling described how the threading-profiler works\nAnd this figure demonstrates how the profiler works as well:\nsequenceDiagram API-\u0026gt;\u0026gt;+working thread: get: /api/v1/user/ rect rgb(0,200,0) API-\u0026gt;\u0026gt;+profiling thread: start profiling profiling thread-\u0026gt;\u0026gt;working thread: snapshot profiling thread-\u0026gt;\u0026gt;working thread: snapshot profiling thread-\u0026gt;\u0026gt;working thread: snapshot profiling thread-\u0026gt;\u0026gt;-working thread: snapshot end working thread--\u0026gt;\u0026gt;-API: response It works well with threading mode because the whole process will be executed in the same thread, so the profiling thread can fetch the complete profiling info of the process of the API request.\nWhy doesn\u0026rsquo;t threading-profiler work in greenlet mode When the python program runs with gevent + greenlet, the process would be like this:\nsequenceDiagram API-\u0026gt;\u0026gt;+working thread 1: get: /api/v1/user/ rect rgb(0,200,0) greenlet.HUB--\u0026gt;\u0026gt;+working thread 1: swap in the profiled greenlet API-\u0026gt;\u0026gt;+profiling thread: start profiling profiling thread-\u0026gt;\u0026gt;working thread 1: snapshot working thread 1--\u0026gt;\u0026gt;-greenlet.HUB : swap out the profiled greenlet end greenlet.HUB--\u0026gt;\u0026gt;+working thread 1: swap in the other greenlet profiling thread-\u0026gt;\u0026gt;working thread 1: snapshot greenlet.HUB--\u0026gt;\u0026gt;+working thread 2: swap in the profiled greenlet profiling thread-\u0026gt;\u0026gt;working thread 1: snapshot profiling thread-\u0026gt;\u0026gt;working thread 1: snapshot working thread 2--\u0026gt;-greenlet.HUB : swap out the profiled greenlet profiling thread-\u0026gt;\u0026gt;working thread 1: snapshot profiling thread-\u0026gt;\u0026gt;-working thread 1: snapshot working thread 1--\u0026gt;\u0026gt;-greenlet.HUB : swap out the other greenlet working thread 1--\u0026gt;\u0026gt;-API: response In this circumstance, the snapshot of the working thread includes multi contexts of different greenlets, which will make skywalking confused to build the trace stack.\nFortunately, greenlet has an API for profiling, the doc is here. We can implement a greenlet profiler to solve this issue.\nHow the greenlet profiler works A greenlet profiler leverages the trace callback of greenlet, it works like this:\nsequenceDiagram API-\u0026gt;\u0026gt;+working thread 1: get: /api/v1/user/ rect rgb(0,200,0) greenlet.HUB--\u0026gt;\u0026gt;+working thread 1: swap in the profiled greenlet and snapshot working thread 1--\u0026gt;\u0026gt;-greenlet.HUB : swap out the profiled greenlet and snapshot end greenlet.HUB--\u0026gt;\u0026gt;+working thread 1: swap in the other greenlet rect rgb(0,200,0) greenlet.HUB--\u0026gt;\u0026gt;+working thread 2: swap in the profiled greenlet and snapshot working thread 2--\u0026gt;-greenlet.HUB : swap out the profiled greenlet and snapshot end working thread 1--\u0026gt;\u0026gt;-greenlet.HUB : swap out the other greenlet working thread 1--\u0026gt;\u0026gt;-API: response We can set a callback function to the greenlet that we need to profiling, then when the greenlet.HUB switches the context in/out to the working thread, the callback will build a snapshot of the greenlet\u0026rsquo;s traceback and send it to skywalking.\nThe difference between these two profilers The greenlet profiler will significantly reduce the snapshot times of the profiling process, which means that it will cost less CPU time than the threading profiler.\n","title":"How does threading-profiler (the default mode) work","url":"/docs/skywalking-python/v1.0.1/en/profiling/profiling/"},{"content":"How to add a new root menu or sub-menu to booster UI If you would like to add a new root menu or sub-menu, you should add data to src/router/data/xx and add translation contents for the title to src/locales/lang/xx in booster UI.\n Create a new file called xxx.ts in src/router/data. Add configurations to the xxx.ts, configurations should be like this.  export default [ { // Add `Infrastructure` menu  path: \u0026#34;\u0026#34;, name: \u0026#34;Infrastructure\u0026#34;, meta: { title: \u0026#34;infrastructure\u0026#34;, icon: \u0026#34;scatter_plot\u0026#34;, hasGroup: true, }, redirect: \u0026#34;/linux\u0026#34;, children: [ // Add a sub menu of the `Infrastructure`  { path: \u0026#34;/linux\u0026#34;, name: \u0026#34;Linux\u0026#34;, meta: { title: \u0026#34;linux\u0026#34;, layer: \u0026#34;OS_LINUX\u0026#34;, }, }, // If there are Tabs widgets in your dashboards, add following extra configuration to provide static links to the specific tab.  { path: \u0026#34;/linux/tab/:activeTabIndex\u0026#34;, name: \u0026#34;LinuxActiveTabIndex\u0026#34;, meta: { title: \u0026#34;linux\u0026#34;, notShow: true, layer: \u0026#34;OS_LINUX\u0026#34;, }, }, ], }, ]; import configurations in src/router/data/index.ts.  import name from \u0026#34;./xxx\u0026#34;; ","title":"How to add a new root menu or sub-menu to booster UI","url":"/docs/main/v9.3.0/en/guides/how-to-add-menu/"},{"content":"How to add a new root menu or sub-menu to booster UI If you would like to add a new root menu or sub-menu, you should add data to src/router/data/xx and add translation contents for the title to src/locales/lang/xx in booster UI.\n Create a new file called xxx.ts in src/router/data. Add configurations to the xxx.ts, configurations should be like this.  export default [ { // Add `Infrastructure` menu  path: \u0026#34;\u0026#34;, name: \u0026#34;Infrastructure\u0026#34;, meta: { title: \u0026#34;infrastructure\u0026#34;, icon: \u0026#34;scatter_plot\u0026#34;, hasGroup: true, }, redirect: \u0026#34;/linux\u0026#34;, children: [ // Add a sub menu of the `Infrastructure`  { path: \u0026#34;/linux\u0026#34;, name: \u0026#34;Linux\u0026#34;, meta: { title: \u0026#34;linux\u0026#34;, layer: \u0026#34;OS_LINUX\u0026#34;, }, }, // If there are Tabs widgets in your dashboards, add following extra configuration to provide static links to the specific tab.  { path: \u0026#34;/linux/tab/:activeTabIndex\u0026#34;, name: \u0026#34;LinuxActiveTabIndex\u0026#34;, meta: { title: \u0026#34;linux\u0026#34;, notShow: true, layer: \u0026#34;OS_LINUX\u0026#34;, }, }, ], }, ]; import configurations in src/router/data/index.ts.  import name from \u0026#34;./xxx\u0026#34;; ","title":"How to add a new root menu or sub-menu to booster UI","url":"/docs/main/v9.4.0/en/guides/how-to-add-menu/"},{"content":"How to add a new root menu or sub-menu to booster UI If you would like to add a new root menu or sub-menu, you should add data to src/router/data/xx and add translation contents for the title to src/locales/lang/xx in booster UI.\n Create a new file called xxx.ts in src/router/data. Add configurations to the xxx.ts, configurations should be like this.  export default [ { // Add `Infrastructure` menu  path: \u0026#34;\u0026#34;, name: \u0026#34;Infrastructure\u0026#34;, meta: { title: \u0026#34;infrastructure\u0026#34;, icon: \u0026#34;scatter_plot\u0026#34;, hasGroup: true, }, redirect: \u0026#34;/linux\u0026#34;, children: [ // Add a sub menu of the `Infrastructure`  { path: \u0026#34;/linux\u0026#34;, name: \u0026#34;Linux\u0026#34;, meta: { title: \u0026#34;linux\u0026#34;, layer: \u0026#34;OS_LINUX\u0026#34;, }, }, // If there are Tabs widgets in your dashboards, add following extra configuration to provide static links to the specific tab.  { path: \u0026#34;/linux/tab/:activeTabIndex\u0026#34;, name: \u0026#34;LinuxActiveTabIndex\u0026#34;, meta: { title: \u0026#34;linux\u0026#34;, notShow: true, layer: \u0026#34;OS_LINUX\u0026#34;, }, }, ], }, ]; import configurations in src/router/data/index.ts.  import name from \u0026#34;./xxx\u0026#34;; ","title":"How to add a new root menu or sub-menu to booster UI","url":"/docs/main/v9.5.0/en/guides/how-to-add-menu/"},{"content":"How to add CRD and Controller in SWCK? The guide intends to help contributors who want to add CRDs and Controllers in SWCK.\n1. Install the kubebuilder  Notice, SWCK is built by kubebuilder v3.2.0, so you need to install it at first.\n SWCK is based on the kubebuilder, and you could download the kubebuilder by the script.\n2. Create CRD and Controller You can use kubebuilder create api to scaffold a new Kind and corresponding controller. Here we use the Demo as an example.\n$ cd operator \u0026amp;\u0026amp; kubebuilder create api --group operator --version v1alpha1 --kind Demo(Your CRD) Then you need to input twice y to create the Resource and Controller, and there will be some newly added files.\n$ git status On branch master Your branch is up to date with \u0026#39;origin/master\u0026#39;. Changes not staged for commit: (use \u0026#34;git add \u0026lt;file\u0026gt;...\u0026#34; to update what will be committed) (use \u0026#34;git restore \u0026lt;file\u0026gt;...\u0026#34; to discard changes in working directory) modified: PROJECT modified: apis/operator/v1alpha1/zz_generated.deepcopy.go modified: config/crd/bases/operator.skywalking.apache.org_swagents.yaml modified: config/crd/kustomization.yaml modified: config/rbac/role.yaml modified: go.mod modified: go.sum modified: main.go Untracked files: (use \u0026#34;git add \u0026lt;file\u0026gt;...\u0026#34; to include in what will be committed) apis/operator/v1alpha1/demo_types.go config/crd/bases/operator.skywalking.apache.org_demoes.yaml config/crd/patches/cainjection_in_operator_demoes.yaml config/crd/patches/webhook_in_operator_demoes.yaml config/rbac/operator_demo_editor_role.yaml config/rbac/operator_demo_viewer_role.yaml config/samples/operator_v1alpha1_demo.yaml controllers/operator/demo_controller.go controllers/operator/suite_test.go no changes added to commit (use \u0026#34;git add\u0026#34; and/or \u0026#34;git commit -a\u0026#34;) Next, we need to focus on the file apis/operator/v1alpha1/demo_types.go which defines your CRD, and the file controllers/operator/configuration_controller.go which defines the Controller. The others files are some configurations generated by the kubebuilder markers. Here are some references:\n  Kubebuilder project demo, in which you can understand the overall architecture.\n  How to add new-api, which you can find more details for oapserverconfig_types.go.\n  Controller-overview, where you can find more details about oapserverconfig_controller.go.\n  3. Create webhook If you want to fields or set defaults to CRs, creating webhooks is a good practice:\nkubebuilder create webhook --group operator --version v1alpha1 --kind Demo --defaulting --programmatic-validation The newly generated files are as follows.\n$ git status On branch master Your branch is ahead of \u0026#39;origin/master\u0026#39; by 1 commit. (use \u0026#34;git push\u0026#34; to publish your local commits) Changes not staged for commit: (use \u0026#34;git add \u0026lt;file\u0026gt;...\u0026#34; to update what will be committed) (use \u0026#34;git restore \u0026lt;file\u0026gt;...\u0026#34; to discard changes in working directory) modified: PROJECT modified: config/webhook/manifests.yaml modified: main.go Untracked files: (use \u0026#34;git add \u0026lt;file\u0026gt;...\u0026#34; to include in what will be committed) apis/operator/v1alpha1/demo_webhook.go apis/operator/v1alpha1/webhook_suite_test.go no changes added to commit (use \u0026#34;git add\u0026#34; and/or \u0026#34;git commit -a\u0026#34;) You can get more details through webhook-overview.\n4. Create the template Generally, a controller would generate a series of resources, such as workload, rbac, service, etc based on CRDs. SWCK is using the Go standard template engine to generate these resources. All template files are stored in the ./operator/pkg/operator/manifests. You could create a directory there such as demo to hold templates. The framework would transfer the CR as the arguments to these templates. More than CR, it supports passing custom rendering functions by setting up the TmplFunc. At last, you need to change the comment and add a field demo there to embed the template files into golang binaries.\n Notice, every file under the template directory can only contain one resource and we can\u0026rsquo;t use the --- to create multiple resources in a single file.\n 5. Build and Test SWCK needs to run in the k8s environment, so we highly recommend using the kind if you don\u0026rsquo;t have a cluster in hand. There are currently two ways to test your implementation.\n Before testing, please make sure you have the kind installed.\n  Test locally. After finishing your implementation, you could use the following steps to test locally:   Disable the webhook  export ENABLE_WEBHOOKS=false Run the main.go with the kubeconfig file.  go run main.go --kubeconfig=(use your kubeconfig file here, and the default is ~/.kube/config)  If you want to test the webhook, please refer the guide.\n  Test in-cluster.   Before testing the swck, please install cert-manager to provide the certificate for webhook in swck.  kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.9.1/cert-manager.yaml At first, you should build the swck image and load it into the kind cluster, and then you could install the crds and the operator as follows.  make docker-build \u0026amp;\u0026amp; kind load docker-image controller:latest \u0026amp;\u0026amp; make install \u0026amp;\u0026amp; make deploy After the swck is installed, and then you could use the following command to get the logs produced by the operator.  kubectl logs -f [skywalking-swck-controller-manager-*](../use the swck deployment name) -n skywalking-swck-system ","title":"How to add CRD and Controller in SWCK?","url":"/docs/skywalking-swck/latest/how-to-add-new-crd-and-controller/"},{"content":"How to add CRD and Controller in SWCK? The guide intends to help contributors who want to add CRDs and Controllers in SWCK.\n1. Install the kubebuilder  Notice, SWCK is built by kubebuilder v3.2.0, so you need to install it at first.\n SWCK is based on the kubebuilder, and you could download the kubebuilder by the script.\n2. Create CRD and Controller You can use kubebuilder create api to scaffold a new Kind and corresponding controller. Here we use the Demo as an example.\n$ cd operator \u0026amp;\u0026amp; kubebuilder create api --group operator --version v1alpha1 --kind Demo(Your CRD) Then you need to input twice y to create the Resource and Controller, and there will be some newly added files.\n$ git status On branch master Your branch is up to date with \u0026#39;origin/master\u0026#39;. Changes not staged for commit: (use \u0026#34;git add \u0026lt;file\u0026gt;...\u0026#34; to update what will be committed) (use \u0026#34;git restore \u0026lt;file\u0026gt;...\u0026#34; to discard changes in working directory) modified: PROJECT modified: apis/operator/v1alpha1/zz_generated.deepcopy.go modified: config/crd/bases/operator.skywalking.apache.org_swagents.yaml modified: config/crd/kustomization.yaml modified: config/rbac/role.yaml modified: go.mod modified: go.sum modified: main.go Untracked files: (use \u0026#34;git add \u0026lt;file\u0026gt;...\u0026#34; to include in what will be committed) apis/operator/v1alpha1/demo_types.go config/crd/bases/operator.skywalking.apache.org_demoes.yaml config/crd/patches/cainjection_in_operator_demoes.yaml config/crd/patches/webhook_in_operator_demoes.yaml config/rbac/operator_demo_editor_role.yaml config/rbac/operator_demo_viewer_role.yaml config/samples/operator_v1alpha1_demo.yaml controllers/operator/demo_controller.go controllers/operator/suite_test.go no changes added to commit (use \u0026#34;git add\u0026#34; and/or \u0026#34;git commit -a\u0026#34;) Next, we need to focus on the file apis/operator/v1alpha1/demo_types.go which defines your CRD, and the file controllers/operator/configuration_controller.go which defines the Controller. The others files are some configurations generated by the kubebuilder markers. Here are some references:\n  Kubebuilder project demo, in which you can understand the overall architecture.\n  How to add new-api, which you can find more details for oapserverconfig_types.go.\n  Controller-overview, where you can find more details about oapserverconfig_controller.go.\n  3. Create webhook If you want to fields or set defaults to CRs, creating webhooks is a good practice:\nkubebuilder create webhook --group operator --version v1alpha1 --kind Demo --defaulting --programmatic-validation The newly generated files are as follows.\n$ git status On branch master Your branch is ahead of \u0026#39;origin/master\u0026#39; by 1 commit. (use \u0026#34;git push\u0026#34; to publish your local commits) Changes not staged for commit: (use \u0026#34;git add \u0026lt;file\u0026gt;...\u0026#34; to update what will be committed) (use \u0026#34;git restore \u0026lt;file\u0026gt;...\u0026#34; to discard changes in working directory) modified: PROJECT modified: config/webhook/manifests.yaml modified: main.go Untracked files: (use \u0026#34;git add \u0026lt;file\u0026gt;...\u0026#34; to include in what will be committed) apis/operator/v1alpha1/demo_webhook.go apis/operator/v1alpha1/webhook_suite_test.go no changes added to commit (use \u0026#34;git add\u0026#34; and/or \u0026#34;git commit -a\u0026#34;) You can get more details through webhook-overview.\n4. Create the template Generally, a controller would generate a series of resources, such as workload, rbac, service, etc based on CRDs. SWCK is using the Go standard template engine to generate these resources. All template files are stored in the ./operator/pkg/operator/manifests. You could create a directory there such as demo to hold templates. The framework would transfer the CR as the arguments to these templates. More than CR, it supports passing custom rendering functions by setting up the TmplFunc. At last, you need to change the comment and add a field demo there to embed the template files into golang binaries.\n Notice, every file under the template directory can only contain one resource and we can\u0026rsquo;t use the --- to create multiple resources in a single file.\n 5. Build and Test SWCK needs to run in the k8s environment, so we highly recommend using the kind if you don\u0026rsquo;t have a cluster in hand. There are currently two ways to test your implementation.\n Before testing, please make sure you have the kind installed.\n  Test locally. After finishing your implementation, you could use the following steps to test locally:   Disable the webhook  export ENABLE_WEBHOOKS=false Run the main.go with the kubeconfig file.  go run main.go --kubeconfig=(use your kubeconfig file here, and the default is ~/.kube/config)  If you want to test the webhook, please refer the guide.\n  Test in-cluster.   Before testing the swck, please install cert-manager to provide the certificate for webhook in swck.  kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.9.1/cert-manager.yaml At first, you should build the swck image and load it into the kind cluster, and then you could install the crds and the operator as follows.  make docker-build \u0026amp;\u0026amp; kind load docker-image controller:latest \u0026amp;\u0026amp; make install \u0026amp;\u0026amp; make deploy After the swck is installed, and then you could use the following command to get the logs produced by the operator.  kubectl logs -f [skywalking-swck-controller-manager-*](../use the swck deployment name) -n skywalking-swck-system ","title":"How to add CRD and Controller in SWCK?","url":"/docs/skywalking-swck/next/how-to-add-new-crd-and-controller/"},{"content":"How to add CRD and Controller in SWCK? The guide intends to help contributors who want to add CRDs and Controllers in SWCK.\n1. Install the kubebuilder  Notice, SWCK is built by kubebuilder v3.2.0, so you need to install it at first.\n SWCK is based on the kubebuilder, and you could download the kubebuilder by the script.\n2. Create CRD and Controller You can use kubebuilder create api to scaffold a new Kind and corresponding controller. Here we use the Demo as an example.\n$ cd operator \u0026amp;\u0026amp; kubebuilder create api --group operator --version v1alpha1 --kind Demo(Your CRD) Then you need to input twice y to create the Resource and Controller, and there will be some newly added files.\n$ git status On branch master Your branch is up to date with \u0026#39;origin/master\u0026#39;. Changes not staged for commit: (use \u0026#34;git add \u0026lt;file\u0026gt;...\u0026#34; to update what will be committed) (use \u0026#34;git restore \u0026lt;file\u0026gt;...\u0026#34; to discard changes in working directory) modified: PROJECT modified: apis/operator/v1alpha1/zz_generated.deepcopy.go modified: config/crd/bases/operator.skywalking.apache.org_swagents.yaml modified: config/crd/kustomization.yaml modified: config/rbac/role.yaml modified: go.mod modified: go.sum modified: main.go Untracked files: (use \u0026#34;git add \u0026lt;file\u0026gt;...\u0026#34; to include in what will be committed) apis/operator/v1alpha1/demo_types.go config/crd/bases/operator.skywalking.apache.org_demoes.yaml config/crd/patches/cainjection_in_operator_demoes.yaml config/crd/patches/webhook_in_operator_demoes.yaml config/rbac/operator_demo_editor_role.yaml config/rbac/operator_demo_viewer_role.yaml config/samples/operator_v1alpha1_demo.yaml controllers/operator/demo_controller.go controllers/operator/suite_test.go no changes added to commit (use \u0026#34;git add\u0026#34; and/or \u0026#34;git commit -a\u0026#34;) Next, we need to focus on the file apis/operator/v1alpha1/demo_types.go which defines your CRD, and the file controllers/operator/configuration_controller.go which defines the Controller. The others files are some configurations generated by the kubebuilder markers. Here are some references:\n  Kubebuilder project demo, in which you can understand the overall architecture.\n  How to add new-api, which you can find more details for oapserverconfig_types.go.\n  Controller-overview, where you can find more details about oapserverconfig_controller.go.\n  3. Create webhook If you want to fields or set defaults to CRs, creating webhooks is a good practice:\nkubebuilder create webhook --group operator --version v1alpha1 --kind Demo --defaulting --programmatic-validation The newly generated files are as follows.\n$ git status On branch master Your branch is ahead of \u0026#39;origin/master\u0026#39; by 1 commit. (use \u0026#34;git push\u0026#34; to publish your local commits) Changes not staged for commit: (use \u0026#34;git add \u0026lt;file\u0026gt;...\u0026#34; to update what will be committed) (use \u0026#34;git restore \u0026lt;file\u0026gt;...\u0026#34; to discard changes in working directory) modified: PROJECT modified: config/webhook/manifests.yaml modified: main.go Untracked files: (use \u0026#34;git add \u0026lt;file\u0026gt;...\u0026#34; to include in what will be committed) apis/operator/v1alpha1/demo_webhook.go apis/operator/v1alpha1/webhook_suite_test.go no changes added to commit (use \u0026#34;git add\u0026#34; and/or \u0026#34;git commit -a\u0026#34;) You can get more details through webhook-overview.\n4. Create the template Generally, a controller would generate a series of resources, such as workload, rbac, service, etc based on CRDs. SWCK is using the Go standard template engine to generate these resources. All template files are stored in the ./operator/pkg/operator/manifests. You could create a directory there such as demo to hold templates. The framework would transfer the CR as the arguments to these templates. More than CR, it supports passing custom rendering functions by setting up the TmplFunc. At last, you need to change the comment and add a field demo there to embed the template files into golang binaries.\n Notice, every file under the template directory can only contain one resource and we can\u0026rsquo;t use the --- to create multiple resources in a single file.\n 5. Build and Test SWCK needs to run in the k8s environment, so we highly recommend using the kind if you don\u0026rsquo;t have a cluster in hand. There are currently two ways to test your implementation.\n Before testing, please make sure you have the kind installed.\n  Test locally. After finishing your implementation, you could use the following steps to test locally:   Disable the webhook  export ENABLE_WEBHOOKS=false Run the main.go with the kubeconfig file.  go run main.go --kubeconfig=(use your kubeconfig file here, and the default is ~/.kube/config)  If you want to test the webhook, please refer the guide.\n  Test in-cluster.   Before testing the swck, please install cert-manager to provide the certificate for webhook in swck.  kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.9.1/cert-manager.yaml At first, you should build the swck image and load it into the kind cluster, and then you could install the crds and the operator as follows.  make docker-build \u0026amp;\u0026amp; kind load docker-image controller:latest \u0026amp;\u0026amp; make install \u0026amp;\u0026amp; make deploy After the swck is installed, and then you could use the following command to get the logs produced by the operator.  kubectl logs -f [skywalking-swck-controller-manager-*](../use the swck deployment name) -n skywalking-swck-system ","title":"How to add CRD and Controller in SWCK?","url":"/docs/skywalking-swck/v0.9.0/how-to-add-new-crd-and-controller/"},{"content":"How to build a project This document will help you compile and build a project in your maven and set your IDE.\nBuilding the Project Since we are using Git submodule, we do not recommend using the GitHub tag or release page to download source codes for compiling.\nMaven behind the Proxy If you need to execute build behind the proxy, edit the .mvn/jvm.config and set the follow properties:\n-Dhttp.proxyHost=proxy_ip -Dhttp.proxyPort=proxy_port -Dhttps.proxyHost=proxy_ip -Dhttps.proxyPort=proxy_port -Dhttp.proxyUser=username -Dhttp.proxyPassword=password Building from GitHub   Prepare git, JDK 11 or 17 (LTS versions), and Maven 3.6+.\n  Clone the project.\nIf you want to build a release from source codes, set a tag name by using git clone -b [tag_name] ... while cloning.\ngit clone --recurse-submodules https://github.com/apache/skywalking.git cd skywalking/ OR git clone https://github.com/apache/skywalking.git cd skywalking/ git submodule init git submodule update   Run ./mvnw clean package -Dmaven.test.skip\n  All packages are in /dist (.tar.gz for Linux and .zip for Windows).\n  Building from Apache source code release  What is the Apache source code release?  For each official Apache release, there is a complete and independent source code tar, which includes all source codes. You could download it from SkyWalking Apache download page. There is no requirement related to git when compiling this. Just follow these steps.\n Prepare JDK11+ and Maven 3.6+. Run ./mvnw clean package -Dmaven.test.skip. All packages are in /dist.(.tar.gz for Linux and .zip for Windows).  Advanced compiling SkyWalking is a complex maven project that has many modules. Therefore, the time to compile may be a bit longer than usual. If you just want to recompile part of the project, you have the following options:\n Compile backend and package   ./mvnw package -Pbackend,dist\n or\n make build.backend\n If you intend to compile a single plugin, such as one in the dev stage, you could\n cd plugin_module_dir \u0026amp; mvn clean package\n  Compile UI and package   ./mvnw package -Pui,dist\n or\n make build.ui\n Building docker images You can build docker images of backend and ui with Makefile located in root folder.\nRefer to Build docker image for more details.\nSetting up your IntelliJ IDEA NOTE: If you clone the codes from GitHub, please make sure that you have finished steps 1 to 3 in section Build from GitHub. If you download the source codes from the official website of SkyWalking, please make sure that you have followed the steps in section Build from Apache source code release.\n Import the project as a maven project. Run ./mvnw compile -Dmaven.test.skip=true to compile project and generate source codes. The reason is that we use gRPC and protobuf. Set Generated Source Codes folders.  grpc-java and java folders in apm-protocol/apm-network/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-core/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-receiver-plugin/receiver-proto/target/generated-sources/fbs grpc-java and java folders in oap-server/server-receiver-plugin/receiver-proto/target/generated-sources/protobuf grpc-java and java folders in oap-server/exporter/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-configuration/grpc-configuration-sync/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-alarm-plugin/target/generated-sources/protobuf antlr4 folder in oap-server/oal-grammar/target/generated-sources    ","title":"How to build a project","url":"/docs/main/latest/en/guides/how-to-build/"},{"content":"How to build a project This document will help you compile and build a project in your maven and set your IDE.\nBuilding the Project Since we are using Git submodule, we do not recommend using the GitHub tag or release page to download source codes for compiling.\nMaven behind the Proxy If you need to execute build behind the proxy, edit the .mvn/jvm.config and set the follow properties:\n-Dhttp.proxyHost=proxy_ip -Dhttp.proxyPort=proxy_port -Dhttps.proxyHost=proxy_ip -Dhttps.proxyPort=proxy_port -Dhttp.proxyUser=username -Dhttp.proxyPassword=password Building from GitHub   Prepare git, JDK 11, 17, 21 (LTS versions), and Maven 3.6+.\n  Clone the project.\nIf you want to build a release from source codes, set a tag name by using git clone -b [tag_name] ... while cloning.\ngit clone --recurse-submodules https://github.com/apache/skywalking.git cd skywalking/ OR git clone https://github.com/apache/skywalking.git cd skywalking/ git submodule init git submodule update   Run ./mvnw clean package -Dmaven.test.skip\n  All packages are in /dist (.tar.gz for Linux and .zip for Windows).\n  Building from Apache source code release  What is the Apache source code release?  For each official Apache release, there is a complete and independent source code tar, which includes all source codes. You could download it from SkyWalking Apache download page. There is no requirement related to git when compiling this. Just follow these steps.\n Prepare JDK11+ and Maven 3.6+. Run ./mvnw clean package -Dmaven.test.skip. All packages are in /dist.(.tar.gz for Linux and .zip for Windows).  Advanced compiling SkyWalking is a complex maven project that has many modules. Therefore, the time to compile may be a bit longer than usual. If you just want to recompile part of the project, you have the following options:\n Compile backend and package   ./mvnw package -Pbackend,dist\n or\n make build.backend\n If you intend to compile a single plugin, such as one in the dev stage, you could\n cd plugin_module_dir \u0026amp; mvn clean package\n  Compile UI and package   ./mvnw package -Pui,dist\n or\n make build.ui\n Building docker images You can build docker images of backend and ui with Makefile located in root folder.\nRefer to Build docker image for more details.\nSetting up your IntelliJ IDEA NOTE: If you clone the codes from GitHub, please make sure that you have finished steps 1 to 3 in section Build from GitHub. If you download the source codes from the official website of SkyWalking, please make sure that you have followed the steps in section Build from Apache source code release.\n Import the project as a maven project. Run ./mvnw compile -Dmaven.test.skip=true to compile project and generate source codes. The reason is that we use gRPC and protobuf. Set Generated Source Codes folders.  grpc-java and java folders in apm-protocol/apm-network/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-core/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-receiver-plugin/receiver-proto/target/generated-sources/fbs grpc-java and java folders in oap-server/server-receiver-plugin/receiver-proto/target/generated-sources/protobuf grpc-java and java folders in oap-server/exporter/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-configuration/grpc-configuration-sync/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-alarm-plugin/target/generated-sources/protobuf antlr4 folder in oap-server/oal-grammar/target/generated-sources    ","title":"How to build a project","url":"/docs/main/next/en/guides/how-to-build/"},{"content":"How to build a project This document will help you compile and build a project in your maven and set your IDE.\nBuilding the Project Since we are using Git submodule, we do not recommend using the GitHub tag or release page to download source codes for compiling.\nMaven behind the Proxy If you need to execute build behind the proxy, edit the .mvn/jvm.config and set the follow properties:\n-Dhttp.proxyHost=proxy_ip -Dhttp.proxyPort=proxy_port -Dhttps.proxyHost=proxy_ip -Dhttps.proxyPort=proxy_port -Dhttp.proxyUser=username -Dhttp.proxyPassword=password Building from GitHub   Prepare git, JDK8+, and Maven 3.6+.\n  Clone the project.\nIf you want to build a release from source codes, set a tag name by using git clone -b [tag_name] ... while cloning.\ngit clone --recurse-submodules https://github.com/apache/skywalking.git cd skywalking/ OR git clone https://github.com/apache/skywalking.git cd skywalking/ git submodule init git submodule update   Run ./mvnw clean package -Dmaven.test.skip\n  All packages are in /dist (.tar.gz for Linux and .zip for Windows).\n  Building from Apache source code release  What is the Apache source code release?  For each official Apache release, there is a complete and independent source code tar, which includes all source codes. You could download it from SkyWalking Apache download page. There is no requirement related to git when compiling this. Just follow these steps.\n Prepare JDK8+ and Maven 3.6+. Run ./mvnw clean package -Dmaven.test.skip. All packages are in /dist.(.tar.gz for Linux and .zip for Windows).  Advanced compiling SkyWalking is a complex maven project that has many modules. Therefore, the time to compile may be a bit longer than usual. If you just want to recompile part of the project, you have the following options:\n Compile backend and package   ./mvnw package -Pbackend,dist\n or\n make build.backend\n If you intend to compile a single plugin, such as one in the dev stage, you could\n cd plugin_module_dir \u0026amp; mvn clean package\n  Compile UI and package   ./mvnw package -Pui,dist\n or\n make build.ui\n Building docker images You can build docker images of backend and ui with Makefile located in root folder.\nRefer to Build docker image for more details.\nSetting up your IntelliJ IDEA NOTE: If you clone the codes from GitHub, please make sure that you have finished steps 1 to 3 in section Build from GitHub. If you download the source codes from the official website of SkyWalking, please make sure that you have followed the steps in section Build from Apache source code release.\n Import the project as a maven project. Run ./mvnw compile -Dmaven.test.skip=true to compile project and generate source codes. The reason is that we use gRPC and protobuf. Set Generated Source Codes folders.  grpc-java and java folders in apm-protocol/apm-network/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-core/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-receiver-plugin/receiver-proto/target/generated-sources/fbs grpc-java and java folders in oap-server/server-receiver-plugin/receiver-proto/target/generated-sources/protobuf grpc-java and java folders in oap-server/exporter/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-configuration/grpc-configuration-sync/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-alarm-plugin/target/generated-sources/protobuf antlr4 folder in oap-server/oal-grammar/target/generated-sources    ","title":"How to build a project","url":"/docs/main/v9.0.0/en/guides/how-to-build/"},{"content":"How to build a project This document will help you compile and build a project in your maven and set your IDE.\nBuilding the Project Since we are using Git submodule, we do not recommend using the GitHub tag or release page to download source codes for compiling.\nMaven behind the Proxy If you need to execute build behind the proxy, edit the .mvn/jvm.config and set the follow properties:\n-Dhttp.proxyHost=proxy_ip -Dhttp.proxyPort=proxy_port -Dhttps.proxyHost=proxy_ip -Dhttps.proxyPort=proxy_port -Dhttp.proxyUser=username -Dhttp.proxyPassword=password Building from GitHub   Prepare git, JDK8+, and Maven 3.6+.\n  Clone the project.\nIf you want to build a release from source codes, set a tag name by using git clone -b [tag_name] ... while cloning.\ngit clone --recurse-submodules https://github.com/apache/skywalking.git cd skywalking/ OR git clone https://github.com/apache/skywalking.git cd skywalking/ git submodule init git submodule update   Run ./mvnw clean package -Dmaven.test.skip\n  All packages are in /dist (.tar.gz for Linux and .zip for Windows).\n  Building from Apache source code release  What is the Apache source code release?  For each official Apache release, there is a complete and independent source code tar, which includes all source codes. You could download it from SkyWalking Apache download page. There is no requirement related to git when compiling this. Just follow these steps.\n Prepare JDK8+ and Maven 3.6+. Run ./mvnw clean package -Dmaven.test.skip. All packages are in /dist.(.tar.gz for Linux and .zip for Windows).  Advanced compiling SkyWalking is a complex maven project that has many modules. Therefore, the time to compile may be a bit longer than usual. If you just want to recompile part of the project, you have the following options:\n Compile backend and package   ./mvnw package -Pbackend,dist\n or\n make build.backend\n If you intend to compile a single plugin, such as one in the dev stage, you could\n cd plugin_module_dir \u0026amp; mvn clean package\n  Compile UI and package   ./mvnw package -Pui,dist\n or\n make build.ui\n Building docker images You can build docker images of backend and ui with Makefile located in root folder.\nRefer to Build docker image for more details.\nSetting up your IntelliJ IDEA NOTE: If you clone the codes from GitHub, please make sure that you have finished steps 1 to 3 in section Build from GitHub. If you download the source codes from the official website of SkyWalking, please make sure that you have followed the steps in section Build from Apache source code release.\n Import the project as a maven project. Run ./mvnw compile -Dmaven.test.skip=true to compile project and generate source codes. The reason is that we use gRPC and protobuf. Set Generated Source Codes folders.  grpc-java and java folders in apm-protocol/apm-network/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-core/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-receiver-plugin/receiver-proto/target/generated-sources/fbs grpc-java and java folders in oap-server/server-receiver-plugin/receiver-proto/target/generated-sources/protobuf grpc-java and java folders in oap-server/exporter/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-configuration/grpc-configuration-sync/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-alarm-plugin/target/generated-sources/protobuf antlr4 folder in oap-server/oal-grammar/target/generated-sources    ","title":"How to build a project","url":"/docs/main/v9.1.0/en/guides/how-to-build/"},{"content":"How to build a project This document will help you compile and build a project in your maven and set your IDE.\nBuilding the Project Since we are using Git submodule, we do not recommend using the GitHub tag or release page to download source codes for compiling.\nMaven behind the Proxy If you need to execute build behind the proxy, edit the .mvn/jvm.config and set the follow properties:\n-Dhttp.proxyHost=proxy_ip -Dhttp.proxyPort=proxy_port -Dhttps.proxyHost=proxy_ip -Dhttps.proxyPort=proxy_port -Dhttp.proxyUser=username -Dhttp.proxyPassword=password Building from GitHub   Prepare git, JDK8+, and Maven 3.6+.\n  Clone the project.\nIf you want to build a release from source codes, set a tag name by using git clone -b [tag_name] ... while cloning.\ngit clone --recurse-submodules https://github.com/apache/skywalking.git cd skywalking/ OR git clone https://github.com/apache/skywalking.git cd skywalking/ git submodule init git submodule update   Run ./mvnw clean package -Dmaven.test.skip\n  All packages are in /dist (.tar.gz for Linux and .zip for Windows).\n  Building from Apache source code release  What is the Apache source code release?  For each official Apache release, there is a complete and independent source code tar, which includes all source codes. You could download it from SkyWalking Apache download page. There is no requirement related to git when compiling this. Just follow these steps.\n Prepare JDK8+ and Maven 3.6+. Run ./mvnw clean package -Dmaven.test.skip. All packages are in /dist.(.tar.gz for Linux and .zip for Windows).  Advanced compiling SkyWalking is a complex maven project that has many modules. Therefore, the time to compile may be a bit longer than usual. If you just want to recompile part of the project, you have the following options:\n Compile backend and package   ./mvnw package -Pbackend,dist\n or\n make build.backend\n If you intend to compile a single plugin, such as one in the dev stage, you could\n cd plugin_module_dir \u0026amp; mvn clean package\n  Compile UI and package   ./mvnw package -Pui,dist\n or\n make build.ui\n Building docker images You can build docker images of backend and ui with Makefile located in root folder.\nRefer to Build docker image for more details.\nSetting up your IntelliJ IDEA NOTE: If you clone the codes from GitHub, please make sure that you have finished steps 1 to 3 in section Build from GitHub. If you download the source codes from the official website of SkyWalking, please make sure that you have followed the steps in section Build from Apache source code release.\n Import the project as a maven project. Run ./mvnw compile -Dmaven.test.skip=true to compile project and generate source codes. The reason is that we use gRPC and protobuf. Set Generated Source Codes folders.  grpc-java and java folders in apm-protocol/apm-network/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-core/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-receiver-plugin/receiver-proto/target/generated-sources/fbs grpc-java and java folders in oap-server/server-receiver-plugin/receiver-proto/target/generated-sources/protobuf grpc-java and java folders in oap-server/exporter/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-configuration/grpc-configuration-sync/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-alarm-plugin/target/generated-sources/protobuf antlr4 folder in oap-server/oal-grammar/target/generated-sources    ","title":"How to build a project","url":"/docs/main/v9.2.0/en/guides/how-to-build/"},{"content":"How to build a project This document will help you compile and build a project in your maven and set your IDE.\nBuilding the Project Since we are using Git submodule, we do not recommend using the GitHub tag or release page to download source codes for compiling.\nMaven behind the Proxy If you need to execute build behind the proxy, edit the .mvn/jvm.config and set the follow properties:\n-Dhttp.proxyHost=proxy_ip -Dhttp.proxyPort=proxy_port -Dhttps.proxyHost=proxy_ip -Dhttps.proxyPort=proxy_port -Dhttp.proxyUser=username -Dhttp.proxyPassword=password Building from GitHub   Prepare git, JDK8+, and Maven 3.6+.\n  Clone the project.\nIf you want to build a release from source codes, set a tag name by using git clone -b [tag_name] ... while cloning.\ngit clone --recurse-submodules https://github.com/apache/skywalking.git cd skywalking/ OR git clone https://github.com/apache/skywalking.git cd skywalking/ git submodule init git submodule update   Run ./mvnw clean package -Dmaven.test.skip\n  All packages are in /dist (.tar.gz for Linux and .zip for Windows).\n  Building from Apache source code release  What is the Apache source code release?  For each official Apache release, there is a complete and independent source code tar, which includes all source codes. You could download it from SkyWalking Apache download page. There is no requirement related to git when compiling this. Just follow these steps.\n Prepare JDK8+ and Maven 3.6+. Run ./mvnw clean package -Dmaven.test.skip. All packages are in /dist.(.tar.gz for Linux and .zip for Windows).  Advanced compiling SkyWalking is a complex maven project that has many modules. Therefore, the time to compile may be a bit longer than usual. If you just want to recompile part of the project, you have the following options:\n Compile backend and package   ./mvnw package -Pbackend,dist\n or\n make build.backend\n If you intend to compile a single plugin, such as one in the dev stage, you could\n cd plugin_module_dir \u0026amp; mvn clean package\n  Compile UI and package   ./mvnw package -Pui,dist\n or\n make build.ui\n Building docker images You can build docker images of backend and ui with Makefile located in root folder.\nRefer to Build docker image for more details.\nSetting up your IntelliJ IDEA NOTE: If you clone the codes from GitHub, please make sure that you have finished steps 1 to 3 in section Build from GitHub. If you download the source codes from the official website of SkyWalking, please make sure that you have followed the steps in section Build from Apache source code release.\n Import the project as a maven project. Run ./mvnw compile -Dmaven.test.skip=true to compile project and generate source codes. The reason is that we use gRPC and protobuf. Set Generated Source Codes folders.  grpc-java and java folders in apm-protocol/apm-network/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-core/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-receiver-plugin/receiver-proto/target/generated-sources/fbs grpc-java and java folders in oap-server/server-receiver-plugin/receiver-proto/target/generated-sources/protobuf grpc-java and java folders in oap-server/exporter/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-configuration/grpc-configuration-sync/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-alarm-plugin/target/generated-sources/protobuf antlr4 folder in oap-server/oal-grammar/target/generated-sources    ","title":"How to build a project","url":"/docs/main/v9.3.0/en/guides/how-to-build/"},{"content":"How to build a project This document will help you compile and build a project in your maven and set your IDE.\nBuilding the Project Since we are using Git submodule, we do not recommend using the GitHub tag or release page to download source codes for compiling.\nMaven behind the Proxy If you need to execute build behind the proxy, edit the .mvn/jvm.config and set the follow properties:\n-Dhttp.proxyHost=proxy_ip -Dhttp.proxyPort=proxy_port -Dhttps.proxyHost=proxy_ip -Dhttps.proxyPort=proxy_port -Dhttp.proxyUser=username -Dhttp.proxyPassword=password Building from GitHub   Prepare git, JDK11+, and Maven 3.6+.\n  Clone the project.\nIf you want to build a release from source codes, set a tag name by using git clone -b [tag_name] ... while cloning.\ngit clone --recurse-submodules https://github.com/apache/skywalking.git cd skywalking/ OR git clone https://github.com/apache/skywalking.git cd skywalking/ git submodule init git submodule update   Run ./mvnw clean package -Dmaven.test.skip\n  All packages are in /dist (.tar.gz for Linux and .zip for Windows).\n  Building from Apache source code release  What is the Apache source code release?  For each official Apache release, there is a complete and independent source code tar, which includes all source codes. You could download it from SkyWalking Apache download page. There is no requirement related to git when compiling this. Just follow these steps.\n Prepare JDK11+ and Maven 3.6+. Run ./mvnw clean package -Dmaven.test.skip. All packages are in /dist.(.tar.gz for Linux and .zip for Windows).  Advanced compiling SkyWalking is a complex maven project that has many modules. Therefore, the time to compile may be a bit longer than usual. If you just want to recompile part of the project, you have the following options:\n Compile backend and package   ./mvnw package -Pbackend,dist\n or\n make build.backend\n If you intend to compile a single plugin, such as one in the dev stage, you could\n cd plugin_module_dir \u0026amp; mvn clean package\n  Compile UI and package   ./mvnw package -Pui,dist\n or\n make build.ui\n Building docker images You can build docker images of backend and ui with Makefile located in root folder.\nRefer to Build docker image for more details.\nSetting up your IntelliJ IDEA NOTE: If you clone the codes from GitHub, please make sure that you have finished steps 1 to 3 in section Build from GitHub. If you download the source codes from the official website of SkyWalking, please make sure that you have followed the steps in section Build from Apache source code release.\n Import the project as a maven project. Run ./mvnw compile -Dmaven.test.skip=true to compile project and generate source codes. The reason is that we use gRPC and protobuf. Set Generated Source Codes folders.  grpc-java and java folders in apm-protocol/apm-network/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-core/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-receiver-plugin/receiver-proto/target/generated-sources/fbs grpc-java and java folders in oap-server/server-receiver-plugin/receiver-proto/target/generated-sources/protobuf grpc-java and java folders in oap-server/exporter/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-configuration/grpc-configuration-sync/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-alarm-plugin/target/generated-sources/protobuf antlr4 folder in oap-server/oal-grammar/target/generated-sources    ","title":"How to build a project","url":"/docs/main/v9.4.0/en/guides/how-to-build/"},{"content":"How to build a project This document will help you compile and build a project in your maven and set your IDE.\nBuilding the Project Since we are using Git submodule, we do not recommend using the GitHub tag or release page to download source codes for compiling.\nMaven behind the Proxy If you need to execute build behind the proxy, edit the .mvn/jvm.config and set the follow properties:\n-Dhttp.proxyHost=proxy_ip -Dhttp.proxyPort=proxy_port -Dhttps.proxyHost=proxy_ip -Dhttps.proxyPort=proxy_port -Dhttp.proxyUser=username -Dhttp.proxyPassword=password Building from GitHub   Prepare git, JDK11+, and Maven 3.6+.\n  Clone the project.\nIf you want to build a release from source codes, set a tag name by using git clone -b [tag_name] ... while cloning.\ngit clone --recurse-submodules https://github.com/apache/skywalking.git cd skywalking/ OR git clone https://github.com/apache/skywalking.git cd skywalking/ git submodule init git submodule update   Run ./mvnw clean package -Dmaven.test.skip\n  All packages are in /dist (.tar.gz for Linux and .zip for Windows).\n  Building from Apache source code release  What is the Apache source code release?  For each official Apache release, there is a complete and independent source code tar, which includes all source codes. You could download it from SkyWalking Apache download page. There is no requirement related to git when compiling this. Just follow these steps.\n Prepare JDK11+ and Maven 3.6+. Run ./mvnw clean package -Dmaven.test.skip. All packages are in /dist.(.tar.gz for Linux and .zip for Windows).  Advanced compiling SkyWalking is a complex maven project that has many modules. Therefore, the time to compile may be a bit longer than usual. If you just want to recompile part of the project, you have the following options:\n Compile backend and package   ./mvnw package -Pbackend,dist\n or\n make build.backend\n If you intend to compile a single plugin, such as one in the dev stage, you could\n cd plugin_module_dir \u0026amp; mvn clean package\n  Compile UI and package   ./mvnw package -Pui,dist\n or\n make build.ui\n Building docker images You can build docker images of backend and ui with Makefile located in root folder.\nRefer to Build docker image for more details.\nSetting up your IntelliJ IDEA NOTE: If you clone the codes from GitHub, please make sure that you have finished steps 1 to 3 in section Build from GitHub. If you download the source codes from the official website of SkyWalking, please make sure that you have followed the steps in section Build from Apache source code release.\n Import the project as a maven project. Run ./mvnw compile -Dmaven.test.skip=true to compile project and generate source codes. The reason is that we use gRPC and protobuf. Set Generated Source Codes folders.  grpc-java and java folders in apm-protocol/apm-network/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-core/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-receiver-plugin/receiver-proto/target/generated-sources/fbs grpc-java and java folders in oap-server/server-receiver-plugin/receiver-proto/target/generated-sources/protobuf grpc-java and java folders in oap-server/exporter/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-configuration/grpc-configuration-sync/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-alarm-plugin/target/generated-sources/protobuf antlr4 folder in oap-server/oal-grammar/target/generated-sources    ","title":"How to build a project","url":"/docs/main/v9.5.0/en/guides/how-to-build/"},{"content":"How to build a project This document will help you compile and build a project in your maven and set your IDE.\nBuilding the Project Since we are using Git submodule, we do not recommend using the GitHub tag or release page to download source codes for compiling.\nMaven behind the Proxy If you need to execute build behind the proxy, edit the .mvn/jvm.config and set the follow properties:\n-Dhttp.proxyHost=proxy_ip -Dhttp.proxyPort=proxy_port -Dhttps.proxyHost=proxy_ip -Dhttps.proxyPort=proxy_port -Dhttp.proxyUser=username -Dhttp.proxyPassword=password Building from GitHub   Prepare git, JDK11+, and Maven 3.6+.\n  Clone the project.\nIf you want to build a release from source codes, set a tag name by using git clone -b [tag_name] ... while cloning.\ngit clone --recurse-submodules https://github.com/apache/skywalking.git cd skywalking/ OR git clone https://github.com/apache/skywalking.git cd skywalking/ git submodule init git submodule update   Run ./mvnw clean package -Dmaven.test.skip\n  All packages are in /dist (.tar.gz for Linux and .zip for Windows).\n  Building from Apache source code release  What is the Apache source code release?  For each official Apache release, there is a complete and independent source code tar, which includes all source codes. You could download it from SkyWalking Apache download page. There is no requirement related to git when compiling this. Just follow these steps.\n Prepare JDK11+ and Maven 3.6+. Run ./mvnw clean package -Dmaven.test.skip. All packages are in /dist.(.tar.gz for Linux and .zip for Windows).  Advanced compiling SkyWalking is a complex maven project that has many modules. Therefore, the time to compile may be a bit longer than usual. If you just want to recompile part of the project, you have the following options:\n Compile backend and package   ./mvnw package -Pbackend,dist\n or\n make build.backend\n If you intend to compile a single plugin, such as one in the dev stage, you could\n cd plugin_module_dir \u0026amp; mvn clean package\n  Compile UI and package   ./mvnw package -Pui,dist\n or\n make build.ui\n Building docker images You can build docker images of backend and ui with Makefile located in root folder.\nRefer to Build docker image for more details.\nSetting up your IntelliJ IDEA NOTE: If you clone the codes from GitHub, please make sure that you have finished steps 1 to 3 in section Build from GitHub. If you download the source codes from the official website of SkyWalking, please make sure that you have followed the steps in section Build from Apache source code release.\n Import the project as a maven project. Run ./mvnw compile -Dmaven.test.skip=true to compile project and generate source codes. The reason is that we use gRPC and protobuf. Set Generated Source Codes folders.  grpc-java and java folders in apm-protocol/apm-network/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-core/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-receiver-plugin/receiver-proto/target/generated-sources/fbs grpc-java and java folders in oap-server/server-receiver-plugin/receiver-proto/target/generated-sources/protobuf grpc-java and java folders in oap-server/exporter/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-configuration/grpc-configuration-sync/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-alarm-plugin/target/generated-sources/protobuf antlr4 folder in oap-server/oal-grammar/target/generated-sources    ","title":"How to build a project","url":"/docs/main/v9.6.0/en/guides/how-to-build/"},{"content":"How to build a project This document will help you compile and build a project in your maven and set your IDE.\nBuilding the Project Since we are using Git submodule, we do not recommend using the GitHub tag or release page to download source codes for compiling.\nMaven behind the Proxy If you need to execute build behind the proxy, edit the .mvn/jvm.config and set the follow properties:\n-Dhttp.proxyHost=proxy_ip -Dhttp.proxyPort=proxy_port -Dhttps.proxyHost=proxy_ip -Dhttps.proxyPort=proxy_port -Dhttp.proxyUser=username -Dhttp.proxyPassword=password Building from GitHub   Prepare git, JDK 11 or 17 (LTS versions), and Maven 3.6+.\n  Clone the project.\nIf you want to build a release from source codes, set a tag name by using git clone -b [tag_name] ... while cloning.\ngit clone --recurse-submodules https://github.com/apache/skywalking.git cd skywalking/ OR git clone https://github.com/apache/skywalking.git cd skywalking/ git submodule init git submodule update   Run ./mvnw clean package -Dmaven.test.skip\n  All packages are in /dist (.tar.gz for Linux and .zip for Windows).\n  Building from Apache source code release  What is the Apache source code release?  For each official Apache release, there is a complete and independent source code tar, which includes all source codes. You could download it from SkyWalking Apache download page. There is no requirement related to git when compiling this. Just follow these steps.\n Prepare JDK11+ and Maven 3.6+. Run ./mvnw clean package -Dmaven.test.skip. All packages are in /dist.(.tar.gz for Linux and .zip for Windows).  Advanced compiling SkyWalking is a complex maven project that has many modules. Therefore, the time to compile may be a bit longer than usual. If you just want to recompile part of the project, you have the following options:\n Compile backend and package   ./mvnw package -Pbackend,dist\n or\n make build.backend\n If you intend to compile a single plugin, such as one in the dev stage, you could\n cd plugin_module_dir \u0026amp; mvn clean package\n  Compile UI and package   ./mvnw package -Pui,dist\n or\n make build.ui\n Building docker images You can build docker images of backend and ui with Makefile located in root folder.\nRefer to Build docker image for more details.\nSetting up your IntelliJ IDEA NOTE: If you clone the codes from GitHub, please make sure that you have finished steps 1 to 3 in section Build from GitHub. If you download the source codes from the official website of SkyWalking, please make sure that you have followed the steps in section Build from Apache source code release.\n Import the project as a maven project. Run ./mvnw compile -Dmaven.test.skip=true to compile project and generate source codes. The reason is that we use gRPC and protobuf. Set Generated Source Codes folders.  grpc-java and java folders in apm-protocol/apm-network/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-core/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-receiver-plugin/receiver-proto/target/generated-sources/fbs grpc-java and java folders in oap-server/server-receiver-plugin/receiver-proto/target/generated-sources/protobuf grpc-java and java folders in oap-server/exporter/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-configuration/grpc-configuration-sync/target/generated-sources/protobuf grpc-java and java folders in oap-server/server-alarm-plugin/target/generated-sources/protobuf antlr4 folder in oap-server/oal-grammar/target/generated-sources    ","title":"How to build a project","url":"/docs/main/v9.7.0/en/guides/how-to-build/"},{"content":"How to build from sources? Download the source tar from the official website, and run the following commands to build from source\nMake sure you have Python 3.7+ and the python3 command available\n$ tar -zxf skywalking-python-src-\u0026lt;version\u0026gt;.tgz $ cd skywalking-python-src-\u0026lt;version\u0026gt; $ make install If you want to build from the latest source codes from GitHub for some reasons, for example, you want to try the latest features that are not released yet, please clone the source codes from GitHub and make install it:\n$ git clone https://github.com/apache/skywalking-python $ cd skywalking-python $ git submodule update --init $ make install NOTE that only releases from the website are official Apache releases.\n","title":"How to build from sources?","url":"/docs/skywalking-python/latest/en/setup/faq/how-to-build-from-sources/"},{"content":"How to build from sources? Download the source tar from the official website, and run the following commands to build from source\nMake sure you have Python 3.7+ and the python3 command available\n$ tar -zxf skywalking-python-src-\u0026lt;version\u0026gt;.tgz $ cd skywalking-python-src-\u0026lt;version\u0026gt; $ make install If you want to build from the latest source codes from GitHub for some reasons, for example, you want to try the latest features that are not released yet, please clone the source codes from GitHub and make install it:\n$ git clone https://github.com/apache/skywalking-python $ cd skywalking-python $ git submodule update --init $ make install NOTE that only releases from the website are official Apache releases.\n","title":"How to build from sources?","url":"/docs/skywalking-python/next/en/setup/faq/how-to-build-from-sources/"},{"content":"How to build from sources? Download the source tar from the official website, and run the following commands to build from source\nMake sure you have Python 3.7+ and the python3 command available\n$ tar -zxf skywalking-python-src-\u0026lt;version\u0026gt;.tgz $ cd skywalking-python-src-\u0026lt;version\u0026gt; $ make install If you want to build from the latest source codes from GitHub for some reasons, for example, you want to try the latest features that are not released yet, please clone the source codes from GitHub and make install it:\n$ git clone https://github.com/apache/skywalking-python $ cd skywalking-python $ git submodule update --init $ make install NOTE that only releases from the website are official Apache releases.\n","title":"How to build from sources?","url":"/docs/skywalking-python/v1.0.1/en/setup/faq/how-to-build-from-sources/"},{"content":"How to bump up Zipkin Lens dependency Because SkyWalking embeds Zipkin Lens UI as a part of the SkyWalking UI, and Zipkin Lens UI contains a lot of other front-end dependencies that we also distribute in SkyWalking binary tars, so we have to take care of the dependencies' licenses when we bump up the Zipkin Lens dependency.\nMake sure to do the following steps when you bump up the Zipkin Lens dependency:\n Clone the Zipkin project into a directory.  ZIPKIN_VERSION=\u0026lt;the Zipkin version you want to bump to\u0026gt; git clone https://github.com/openzipkin/zipkin \u0026amp;\u0026amp; cd zipkin git checkout $ZIPKIN_VERSION cd zipkin-lens  Create .licenserc.yaml with the following content.  cat \u0026gt; .licenserc.yaml \u0026lt;\u0026lt; EOF header: license: spdx-id: Apache-2.0 copyright-owner: Apache Software Foundation dependency: files: - package.json licenses: - name: cli-table version: 0.3.1 license: MIT - name: domutils version: 1.5.1 license: BSD-2-Clause - name: rework version: 1.0.1 license: MIT EOF  Create license template LICENSE.tpl with the following content.  {{ range .Groups }} ======================================================================== {{ .LicenseID }} licenses ======================================================================== The following components are provided under the {{ .LicenseID }} License. See project link for details. {{- if eq .LicenseID \u0026quot;Apache-2.0\u0026quot; }} The text of each license is the standard Apache 2.0 license. {{- else }} The text of each license is also included in licenses/LICENSE-[project].txt. {{ end }} {{- range .Deps }} https://npmjs.com/package/{{ .Name }}/v/{{ .Version }} {{ .Version }} {{ .LicenseID }} {{- end }} {{ end }}  Make sure you\u0026rsquo;re using the supported NodeJS version and NPM version.  node -v # should be v14.x.x npm -v # should be 6.x.x  Run the following command to generate the license file.  license-eye dependency resolve --summary LICENSE.tpl  Copy the generated file LICENSE to replace the zipkin-LICENSE in SkyWalking repo.  Note: if there are dependencies that license-eye failed to identify the license, you should manually identify the license and add it to the step above in .licenserc.yaml.\n","title":"How to bump up Zipkin Lens dependency","url":"/docs/main/latest/en/guides/how-to-bump-up-zipkin/"},{"content":"How to bump up Zipkin Lens dependency Because SkyWalking embeds Zipkin Lens UI as a part of the SkyWalking UI, and Zipkin Lens UI contains a lot of other front-end dependencies that we also distribute in SkyWalking binary tars, so we have to take care of the dependencies' licenses when we bump up the Zipkin Lens dependency.\nMake sure to do the following steps when you bump up the Zipkin Lens dependency:\n Clone the Zipkin project into a directory.  ZIPKIN_VERSION=\u0026lt;the Zipkin version you want to bump to\u0026gt; git clone https://github.com/openzipkin/zipkin \u0026amp;\u0026amp; cd zipkin git checkout $ZIPKIN_VERSION cd zipkin-lens  Create .licenserc.yaml with the following content.  cat \u0026gt; .licenserc.yaml \u0026lt;\u0026lt; EOF header: license: spdx-id: Apache-2.0 copyright-owner: Apache Software Foundation dependency: files: - package.json licenses: - name: cli-table version: 0.3.1 license: MIT - name: domutils version: 1.5.1 license: BSD-2-Clause - name: rework version: 1.0.1 license: MIT EOF  Create license template LICENSE.tpl with the following content.  {{ range .Groups }} ======================================================================== {{ .LicenseID }} licenses ======================================================================== The following components are provided under the {{ .LicenseID }} License. See project link for details. {{- if eq .LicenseID \u0026quot;Apache-2.0\u0026quot; }} The text of each license is the standard Apache 2.0 license. {{- else }} The text of each license is also included in licenses/LICENSE-[project].txt. {{ end }} {{- range .Deps }} https://npmjs.com/package/{{ .Name }}/v/{{ .Version }} {{ .Version }} {{ .LicenseID }} {{- end }} {{ end }}  Make sure you\u0026rsquo;re using the supported NodeJS version and NPM version.  node -v # should be v14.x.x npm -v # should be 6.x.x  Run the following command to generate the license file.  license-eye dependency resolve --summary LICENSE.tpl  Copy the generated file LICENSE to replace the zipkin-LICENSE in SkyWalking repo.  Note: if there are dependencies that license-eye failed to identify the license, you should manually identify the license and add it to the step above in .licenserc.yaml.\n","title":"How to bump up Zipkin Lens dependency","url":"/docs/main/next/en/guides/how-to-bump-up-zipkin/"},{"content":"How to bump up Zipkin Lens dependency Because SkyWalking embeds Zipkin Lens UI as a part of the SkyWalking UI, and Zipkin Lens UI contains a lot of other front-end dependencies that we also distribute in SkyWalking binary tars, so we have to take care of the dependencies' licenses when we bump up the Zipkin Lens dependency.\nMake sure to do the following steps when you bump up the Zipkin Lens dependency:\n Clone the Zipkin project into a directory.  ZIPKIN_VERSION=\u0026lt;the Zipkin version you want to bump to\u0026gt; git clone https://github.com/openzipkin/zipkin \u0026amp;\u0026amp; cd zipkin git checkout $ZIPKIN_VERSION cd zipkin-lens  Create .licenserc.yaml with the following content.  cat \u0026gt; .licenserc.yaml \u0026lt;\u0026lt; EOF header: license: spdx-id: Apache-2.0 copyright-owner: Apache Software Foundation dependency: files: - package.json licenses: - name: cli-table version: 0.3.1 license: MIT - name: domutils version: 1.5.1 license: BSD-2-Clause - name: rework version: 1.0.1 license: MIT EOF  Create license template LICENSE.tpl with the following content.  {{ range .Groups }} ======================================================================== {{ .LicenseID }} licenses ======================================================================== The following components are provided under the {{ .LicenseID }} License. See project link for details. {{- if eq .LicenseID \u0026quot;Apache-2.0\u0026quot; }} The text of each license is the standard Apache 2.0 license. {{- else }} The text of each license is also included in licenses/LICENSE-[project].txt. {{ end }} {{- range .Deps }} https://npmjs.com/package/{{ .Name }}/v/{{ .Version }} {{ .Version }} {{ .LicenseID }} {{- end }} {{ end }}  Make sure you\u0026rsquo;re using the supported NodeJS version and NPM version.  node -v # should be v14.x.x npm -v # should be 6.x.x  Run the following command to generate the license file.  license-eye dependency resolve --summary LICENSE.tpl  Copy the generated file LICENSE to replace the zipkin-LICENSE in SkyWalking repo.  Note: if there are dependencies that license-eye failed to identify the license, you should manually identify the license and add it to the step above in .licenserc.yaml.\n","title":"How to bump up Zipkin Lens dependency","url":"/docs/main/v9.4.0/en/guides/how-to-bump-up-zipkin/"},{"content":"How to bump up Zipkin Lens dependency Because SkyWalking embeds Zipkin Lens UI as a part of the SkyWalking UI, and Zipkin Lens UI contains a lot of other front-end dependencies that we also distribute in SkyWalking binary tars, so we have to take care of the dependencies' licenses when we bump up the Zipkin Lens dependency.\nMake sure to do the following steps when you bump up the Zipkin Lens dependency:\n Clone the Zipkin project into a directory.  ZIPKIN_VERSION=\u0026lt;the Zipkin version you want to bump to\u0026gt; git clone https://github.com/openzipkin/zipkin \u0026amp;\u0026amp; cd zipkin git checkout $ZIPKIN_VERSION cd zipkin-lens  Create .licenserc.yaml with the following content.  cat \u0026gt; .licenserc.yaml \u0026lt;\u0026lt; EOF header: license: spdx-id: Apache-2.0 copyright-owner: Apache Software Foundation dependency: files: - package.json licenses: - name: cli-table version: 0.3.1 license: MIT - name: domutils version: 1.5.1 license: BSD-2-Clause - name: rework version: 1.0.1 license: MIT EOF  Create license template LICENSE.tpl with the following content.  {{ range .Groups }} ======================================================================== {{ .LicenseID }} licenses ======================================================================== The following components are provided under the {{ .LicenseID }} License. See project link for details. {{- if eq .LicenseID \u0026quot;Apache-2.0\u0026quot; }} The text of each license is the standard Apache 2.0 license. {{- else }} The text of each license is also included in licenses/LICENSE-[project].txt. {{ end }} {{- range .Deps }} https://npmjs.com/package/{{ .Name }}/v/{{ .Version }} {{ .Version }} {{ .LicenseID }} {{- end }} {{ end }}  Make sure you\u0026rsquo;re using the supported NodeJS version and NPM version.  node -v # should be v14.x.x npm -v # should be 6.x.x  Run the following command to generate the license file.  license-eye dependency resolve --summary LICENSE.tpl  Copy the generated file LICENSE to replace the zipkin-LICENSE in SkyWalking repo.  Note: if there are dependencies that license-eye failed to identify the license, you should manually identify the license and add it to the step above in .licenserc.yaml.\n","title":"How to bump up Zipkin Lens dependency","url":"/docs/main/v9.5.0/en/guides/how-to-bump-up-zipkin/"},{"content":"How to bump up Zipkin Lens dependency Because SkyWalking embeds Zipkin Lens UI as a part of the SkyWalking UI, and Zipkin Lens UI contains a lot of other front-end dependencies that we also distribute in SkyWalking binary tars, so we have to take care of the dependencies' licenses when we bump up the Zipkin Lens dependency.\nMake sure to do the following steps when you bump up the Zipkin Lens dependency:\n Clone the Zipkin project into a directory.  ZIPKIN_VERSION=\u0026lt;the Zipkin version you want to bump to\u0026gt; git clone https://github.com/openzipkin/zipkin \u0026amp;\u0026amp; cd zipkin git checkout $ZIPKIN_VERSION cd zipkin-lens  Create .licenserc.yaml with the following content.  cat \u0026gt; .licenserc.yaml \u0026lt;\u0026lt; EOF header: license: spdx-id: Apache-2.0 copyright-owner: Apache Software Foundation dependency: files: - package.json licenses: - name: cli-table version: 0.3.1 license: MIT - name: domutils version: 1.5.1 license: BSD-2-Clause - name: rework version: 1.0.1 license: MIT EOF  Create license template LICENSE.tpl with the following content.  {{ range .Groups }} ======================================================================== {{ .LicenseID }} licenses ======================================================================== The following components are provided under the {{ .LicenseID }} License. See project link for details. {{- if eq .LicenseID \u0026quot;Apache-2.0\u0026quot; }} The text of each license is the standard Apache 2.0 license. {{- else }} The text of each license is also included in licenses/LICENSE-[project].txt. {{ end }} {{- range .Deps }} https://npmjs.com/package/{{ .Name }}/v/{{ .Version }} {{ .Version }} {{ .LicenseID }} {{- end }} {{ end }}  Make sure you\u0026rsquo;re using the supported NodeJS version and NPM version.  node -v # should be v14.x.x npm -v # should be 6.x.x  Run the following command to generate the license file.  license-eye dependency resolve --summary LICENSE.tpl  Copy the generated file LICENSE to replace the zipkin-LICENSE in SkyWalking repo.  Note: if there are dependencies that license-eye failed to identify the license, you should manually identify the license and add it to the step above in .licenserc.yaml.\n","title":"How to bump up Zipkin Lens dependency","url":"/docs/main/v9.6.0/en/guides/how-to-bump-up-zipkin/"},{"content":"How to bump up Zipkin Lens dependency Because SkyWalking embeds Zipkin Lens UI as a part of the SkyWalking UI, and Zipkin Lens UI contains a lot of other front-end dependencies that we also distribute in SkyWalking binary tars, so we have to take care of the dependencies' licenses when we bump up the Zipkin Lens dependency.\nMake sure to do the following steps when you bump up the Zipkin Lens dependency:\n Clone the Zipkin project into a directory.  ZIPKIN_VERSION=\u0026lt;the Zipkin version you want to bump to\u0026gt; git clone https://github.com/openzipkin/zipkin \u0026amp;\u0026amp; cd zipkin git checkout $ZIPKIN_VERSION cd zipkin-lens  Create .licenserc.yaml with the following content.  cat \u0026gt; .licenserc.yaml \u0026lt;\u0026lt; EOF header: license: spdx-id: Apache-2.0 copyright-owner: Apache Software Foundation dependency: files: - package.json licenses: - name: cli-table version: 0.3.1 license: MIT - name: domutils version: 1.5.1 license: BSD-2-Clause - name: rework version: 1.0.1 license: MIT EOF  Create license template LICENSE.tpl with the following content.  {{ range .Groups }} ======================================================================== {{ .LicenseID }} licenses ======================================================================== The following components are provided under the {{ .LicenseID }} License. See project link for details. {{- if eq .LicenseID \u0026quot;Apache-2.0\u0026quot; }} The text of each license is the standard Apache 2.0 license. {{- else }} The text of each license is also included in licenses/LICENSE-[project].txt. {{ end }} {{- range .Deps }} https://npmjs.com/package/{{ .Name }}/v/{{ .Version }} {{ .Version }} {{ .LicenseID }} {{- end }} {{ end }}  Make sure you\u0026rsquo;re using the supported NodeJS version and NPM version.  node -v # should be v14.x.x npm -v # should be 6.x.x  Run the following command to generate the license file.  license-eye dependency resolve --summary LICENSE.tpl  Copy the generated file LICENSE to replace the zipkin-LICENSE in SkyWalking repo.  Note: if there are dependencies that license-eye failed to identify the license, you should manually identify the license and add it to the step above in .licenserc.yaml.\n","title":"How to bump up Zipkin Lens dependency","url":"/docs/main/v9.7.0/en/guides/how-to-bump-up-zipkin/"},{"content":"How to disable some plugins? You can find the plugin name in the list and disable one or more plugins by following methods.\nfrom skywalking import config config.agent_disable_plugins = [\u0026#39;sw_http_server\u0026#39;, \u0026#39;sw_urllib_request\u0026#39;] # can be also CSV format, i.e. \u0026#39;sw_http_server,sw_urllib_request\u0026#39; You can also disable the plugins via environment variables SW_AGENT_DISABLE_PLUGINS, please check the Environment Variables List for an explanation.\n","title":"How to disable some plugins?","url":"/docs/skywalking-python/latest/en/setup/faq/how-to-disable-plugin/"},{"content":"How to disable some plugins? You can find the plugin name in the list and disable one or more plugins by following methods.\nfrom skywalking import config config.agent_disable_plugins = [\u0026#39;sw_http_server\u0026#39;, \u0026#39;sw_urllib_request\u0026#39;] # can be also CSV format, i.e. \u0026#39;sw_http_server,sw_urllib_request\u0026#39; You can also disable the plugins via environment variables SW_AGENT_DISABLE_PLUGINS, please check the Environment Variables List for an explanation.\n","title":"How to disable some plugins?","url":"/docs/skywalking-python/next/en/setup/faq/how-to-disable-plugin/"},{"content":"How to disable some plugins? You can find the plugin name in the list and disable one or more plugins by following methods.\nfrom skywalking import config config.agent_disable_plugins = [\u0026#39;sw_http_server\u0026#39;, \u0026#39;sw_urllib_request\u0026#39;] # can be also CSV format, i.e. \u0026#39;sw_http_server,sw_urllib_request\u0026#39; You can also disable the plugins via environment variables SW_AGENT_DISABLE_PLUGINS, please check the Environment Variables List for an explanation.\n","title":"How to disable some plugins?","url":"/docs/skywalking-python/v1.0.1/en/setup/faq/how-to-disable-plugin/"},{"content":"How to make SkyWalking agent works in OSGI environment? OSGI implements its own set of modularity, which means that each Bundle has its own unique class loader for isolating different versions of classes. By default, OSGI runtime uses the boot classloader for the bundle codes, which makes the java.lang.NoClassDefFoundError exception in the booting stage.\njava.lang.NoClassDefFoundError: org/apache/skywalking/apm/agent/core/plugin/interceptor/enhance/EnhancedInstance at ch.qos.logback.classic.Logger.buildLoggingEventAndAppend(Logger.java:419) at ch.qos.logback.classic.Logger.filterAndLog_0_Or3Plus(Logger.java:383) at ch.qos.logback.classic.Logger.log(Logger.java:765) at org.apache.commons.logging.impl.SLF4JLocationAwareLog.error(SLF4JLocationAwareLog.java:216) at org.springframework.boot.SpringApplication.reportFailure(SpringApplication.java:771) at org.springframework.boot.SpringApplication.handleRunFailure(SpringApplication.java:748) at org.springframework.boot.SpringApplication.run(SpringApplication.java:314) at org.springframework.boot.SpringApplication.run(SpringApplication.java:1118) at org.springframework.boot.SpringApplication.run(SpringApplication.java:1107) at by.kolodyuk.osgi.springboot.SpringBootBundleActivator.start(SpringBootBundleActivator.java:21) at org.apache.felix.framework.util.SecureAction.startActivator(SecureAction.java:849) at org.apache.felix.framework.Felix.activateBundle(Felix.java:2429) at org.apache.felix.framework.Felix.startBundle(Felix.java:2335) at org.apache.felix.framework.Felix.setActiveStartLevel(Felix.java:1566) at org.apache.felix.framework.FrameworkStartLevelImpl.run(FrameworkStartLevelImpl.java:297) at java.base/java.lang.Thread.run(Thread.java:829) How to resolve this issue?  we need to set the parent classloader in OSGI to AppClassLoader, through the specific parameter org.osgi.framework.bundle.parent=app. The list of parameters can be found in the OSGI API Load the SkyWalking related classes to the bundle parent class loader, AppClassLoader, with the parameter org.osgi.framework.bootdelegation=org.apache.skywalking.apm.* or org.osgi.framework.bootdelegation=*. This step is optional. Some OSGi implementations (i.e. Equinox) enable them by default  ","title":"How to make SkyWalking agent works in `OSGI` environment?","url":"/docs/skywalking-java/latest/en/faq/osgi/"},{"content":"How to make SkyWalking agent works in OSGI environment? OSGI implements its own set of modularity, which means that each Bundle has its own unique class loader for isolating different versions of classes. By default, OSGI runtime uses the boot classloader for the bundle codes, which makes the java.lang.NoClassDefFoundError exception in the booting stage.\njava.lang.NoClassDefFoundError: org/apache/skywalking/apm/agent/core/plugin/interceptor/enhance/EnhancedInstance at ch.qos.logback.classic.Logger.buildLoggingEventAndAppend(Logger.java:419) at ch.qos.logback.classic.Logger.filterAndLog_0_Or3Plus(Logger.java:383) at ch.qos.logback.classic.Logger.log(Logger.java:765) at org.apache.commons.logging.impl.SLF4JLocationAwareLog.error(SLF4JLocationAwareLog.java:216) at org.springframework.boot.SpringApplication.reportFailure(SpringApplication.java:771) at org.springframework.boot.SpringApplication.handleRunFailure(SpringApplication.java:748) at org.springframework.boot.SpringApplication.run(SpringApplication.java:314) at org.springframework.boot.SpringApplication.run(SpringApplication.java:1118) at org.springframework.boot.SpringApplication.run(SpringApplication.java:1107) at by.kolodyuk.osgi.springboot.SpringBootBundleActivator.start(SpringBootBundleActivator.java:21) at org.apache.felix.framework.util.SecureAction.startActivator(SecureAction.java:849) at org.apache.felix.framework.Felix.activateBundle(Felix.java:2429) at org.apache.felix.framework.Felix.startBundle(Felix.java:2335) at org.apache.felix.framework.Felix.setActiveStartLevel(Felix.java:1566) at org.apache.felix.framework.FrameworkStartLevelImpl.run(FrameworkStartLevelImpl.java:297) at java.base/java.lang.Thread.run(Thread.java:829) How to resolve this issue?  we need to set the parent classloader in OSGI to AppClassLoader, through the specific parameter org.osgi.framework.bundle.parent=app. The list of parameters can be found in the OSGI API Load the SkyWalking related classes to the bundle parent class loader, AppClassLoader, with the parameter org.osgi.framework.bootdelegation=org.apache.skywalking.apm.* or org.osgi.framework.bootdelegation=*. This step is optional. Some OSGi implementations (i.e. Equinox) enable them by default  ","title":"How to make SkyWalking agent works in `OSGI` environment?","url":"/docs/skywalking-java/next/en/faq/osgi/"},{"content":"How to make SkyWalking agent works in OSGI environment? OSGI implements its own set of modularity, which means that each Bundle has its own unique class loader for isolating different versions of classes. By default, OSGI runtime uses the boot classloader for the bundle codes, which makes the java.lang.NoClassDefFoundError exception in the booting stage.\njava.lang.NoClassDefFoundError: org/apache/skywalking/apm/agent/core/plugin/interceptor/enhance/EnhancedInstance at ch.qos.logback.classic.Logger.buildLoggingEventAndAppend(Logger.java:419) at ch.qos.logback.classic.Logger.filterAndLog_0_Or3Plus(Logger.java:383) at ch.qos.logback.classic.Logger.log(Logger.java:765) at org.apache.commons.logging.impl.SLF4JLocationAwareLog.error(SLF4JLocationAwareLog.java:216) at org.springframework.boot.SpringApplication.reportFailure(SpringApplication.java:771) at org.springframework.boot.SpringApplication.handleRunFailure(SpringApplication.java:748) at org.springframework.boot.SpringApplication.run(SpringApplication.java:314) at org.springframework.boot.SpringApplication.run(SpringApplication.java:1118) at org.springframework.boot.SpringApplication.run(SpringApplication.java:1107) at by.kolodyuk.osgi.springboot.SpringBootBundleActivator.start(SpringBootBundleActivator.java:21) at org.apache.felix.framework.util.SecureAction.startActivator(SecureAction.java:849) at org.apache.felix.framework.Felix.activateBundle(Felix.java:2429) at org.apache.felix.framework.Felix.startBundle(Felix.java:2335) at org.apache.felix.framework.Felix.setActiveStartLevel(Felix.java:1566) at org.apache.felix.framework.FrameworkStartLevelImpl.run(FrameworkStartLevelImpl.java:297) at java.base/java.lang.Thread.run(Thread.java:829) How to resolve this issue?  we need to set the parent classloader in OSGI to AppClassLoader, through the specific parameter org.osgi.framework.bundle.parent=app. The list of parameters can be found in the OSGI API Load the SkyWalking related classes to the bundle parent class loader, AppClassLoader, with the parameter org.osgi.framework.bootdelegation=org.apache.skywalking.apm.* or org.osgi.framework.bootdelegation=*. This step is optional. Some OSGi implementations (i.e. Equinox) enable them by default  ","title":"How to make SkyWalking agent works in `OSGI` environment?","url":"/docs/skywalking-java/v9.0.0/en/faq/osgi/"},{"content":"How to make SkyWalking agent works in OSGI environment? OSGI implements its own set of modularity, which means that each Bundle has its own unique class loader for isolating different versions of classes. By default, OSGI runtime uses the boot classloader for the bundle codes, which makes the java.lang.NoClassDefFoundError exception in the booting stage.\njava.lang.NoClassDefFoundError: org/apache/skywalking/apm/agent/core/plugin/interceptor/enhance/EnhancedInstance at ch.qos.logback.classic.Logger.buildLoggingEventAndAppend(Logger.java:419) at ch.qos.logback.classic.Logger.filterAndLog_0_Or3Plus(Logger.java:383) at ch.qos.logback.classic.Logger.log(Logger.java:765) at org.apache.commons.logging.impl.SLF4JLocationAwareLog.error(SLF4JLocationAwareLog.java:216) at org.springframework.boot.SpringApplication.reportFailure(SpringApplication.java:771) at org.springframework.boot.SpringApplication.handleRunFailure(SpringApplication.java:748) at org.springframework.boot.SpringApplication.run(SpringApplication.java:314) at org.springframework.boot.SpringApplication.run(SpringApplication.java:1118) at org.springframework.boot.SpringApplication.run(SpringApplication.java:1107) at by.kolodyuk.osgi.springboot.SpringBootBundleActivator.start(SpringBootBundleActivator.java:21) at org.apache.felix.framework.util.SecureAction.startActivator(SecureAction.java:849) at org.apache.felix.framework.Felix.activateBundle(Felix.java:2429) at org.apache.felix.framework.Felix.startBundle(Felix.java:2335) at org.apache.felix.framework.Felix.setActiveStartLevel(Felix.java:1566) at org.apache.felix.framework.FrameworkStartLevelImpl.run(FrameworkStartLevelImpl.java:297) at java.base/java.lang.Thread.run(Thread.java:829) How to resolve this issue?  we need to set the parent classloader in OSGI to AppClassLoader, through the specific parameter org.osgi.framework.bundle.parent=app. The list of parameters can be found in the OSGI API Load the SkyWalking related classes to the bundle parent class loader, AppClassLoader, with the parameter org.osgi.framework.bootdelegation=org.apache.skywalking.apm.* or org.osgi.framework.bootdelegation=*. This step is optional. Some OSGi implementations (i.e. Equinox) enable them by default  ","title":"How to make SkyWalking agent works in `OSGI` environment?","url":"/docs/skywalking-java/v9.1.0/en/faq/osgi/"},{"content":"How to make SkyWalking agent works in OSGI environment? OSGI implements its own set of modularity, which means that each Bundle has its own unique class loader for isolating different versions of classes. By default, OSGI runtime uses the boot classloader for the bundle codes, which makes the java.lang.NoClassDefFoundError exception in the booting stage.\njava.lang.NoClassDefFoundError: org/apache/skywalking/apm/agent/core/plugin/interceptor/enhance/EnhancedInstance at ch.qos.logback.classic.Logger.buildLoggingEventAndAppend(Logger.java:419) at ch.qos.logback.classic.Logger.filterAndLog_0_Or3Plus(Logger.java:383) at ch.qos.logback.classic.Logger.log(Logger.java:765) at org.apache.commons.logging.impl.SLF4JLocationAwareLog.error(SLF4JLocationAwareLog.java:216) at org.springframework.boot.SpringApplication.reportFailure(SpringApplication.java:771) at org.springframework.boot.SpringApplication.handleRunFailure(SpringApplication.java:748) at org.springframework.boot.SpringApplication.run(SpringApplication.java:314) at org.springframework.boot.SpringApplication.run(SpringApplication.java:1118) at org.springframework.boot.SpringApplication.run(SpringApplication.java:1107) at by.kolodyuk.osgi.springboot.SpringBootBundleActivator.start(SpringBootBundleActivator.java:21) at org.apache.felix.framework.util.SecureAction.startActivator(SecureAction.java:849) at org.apache.felix.framework.Felix.activateBundle(Felix.java:2429) at org.apache.felix.framework.Felix.startBundle(Felix.java:2335) at org.apache.felix.framework.Felix.setActiveStartLevel(Felix.java:1566) at org.apache.felix.framework.FrameworkStartLevelImpl.run(FrameworkStartLevelImpl.java:297) at java.base/java.lang.Thread.run(Thread.java:829) How to resolve this issue?  we need to set the parent classloader in OSGI to AppClassLoader, through the specific parameter org.osgi.framework.bundle.parent=app. The list of parameters can be found in the OSGI API Load the SkyWalking related classes to the bundle parent class loader, AppClassLoader, with the parameter org.osgi.framework.bootdelegation=org.apache.skywalking.apm.* or org.osgi.framework.bootdelegation=*. This step is optional. Some OSGi implementations (i.e. Equinox) enable them by default  ","title":"How to make SkyWalking agent works in `OSGI` environment?","url":"/docs/skywalking-java/v9.2.0/en/faq/osgi/"},{"content":"How to test locally? This guide assumes you just cloned the repo and are ready to make some changes.\nAfter cloning the repo, make sure you also have cloned the submodule for protocol. Otherwise, run the command below.\ngit submodule update --init Please first refer to the Developer Guide to set up a development environment.\nTL;DR: run make env. This will create virtual environments for python and generate the protocol folder needed for the agent.\nNote: Make sure you have python3 aliased to python available on Windows computers instead of pointing to the Microsoft app store.\nBy now, you can do what you want. Let\u0026rsquo;s get to the topic of how to test.\nThe test process requires docker and docker-compose throughout. If you haven\u0026rsquo;t installed them, please install them first.\nThen run make test, which will generate a list of plugin versions based on the support_matrix variable in each Plugin and orchestrate the tests automatically. Remember to inspect the outcomes carefully to debug your plugin.\nAlternatively, you can run full tests via our GitHub action workflow on your own GitHub fork, it is usually easier since local environment can be tricky to setup for new contributors.\nTo do so, you need to fork this repo on GitHub and enable GitHub actions on your forked repo. Then, you can simply push your changes and open a Pull Request to the fork\u0026rsquo;s master branch.\nNote: GitHub automatically targets Pull Requests to the upstream repo, be careful when you open them to avoid accidental PRs to upstream.\n","title":"How to test locally?","url":"/docs/skywalking-python/latest/en/contribution/how-to-test-locally/"},{"content":"How to test locally? This guide assumes you just cloned the repo and are ready to make some changes.\nAfter cloning the repo, make sure you also have cloned the submodule for protocol. Otherwise, run the command below.\ngit submodule update --init Please first refer to the Developer Guide to set up a development environment.\nTL;DR: run make env. This will create virtual environments for python and generate the protocol folder needed for the agent.\nNote: Make sure you have python3 aliased to python available on Windows computers instead of pointing to the Microsoft app store.\nBy now, you can do what you want. Let\u0026rsquo;s get to the topic of how to test.\nThe test process requires docker and docker-compose throughout. If you haven\u0026rsquo;t installed them, please install them first.\nThen run make test, which will generate a list of plugin versions based on the support_matrix variable in each Plugin and orchestrate the tests automatically. Remember to inspect the outcomes carefully to debug your plugin.\nAlternatively, you can run full tests via our GitHub action workflow on your own GitHub fork, it is usually easier since local environment can be tricky to setup for new contributors.\nTo do so, you need to fork this repo on GitHub and enable GitHub actions on your forked repo. Then, you can simply push your changes and open a Pull Request to the fork\u0026rsquo;s master branch.\nNote: GitHub automatically targets Pull Requests to the upstream repo, be careful when you open them to avoid accidental PRs to upstream.\n","title":"How to test locally?","url":"/docs/skywalking-python/next/en/contribution/how-to-test-locally/"},{"content":"How to test locally? This guide assumes you just cloned the repo and are ready to make some changes.\nAfter cloning the repo, make sure you also have cloned the submodule for protocol. Otherwise, run the command below.\ngit submodule update --init Please first refer to the Developer Guide to set up a development environment.\nTL;DR: run make env. This will create virtual environments for python and generate the protocol folder needed for the agent.\nNote: Make sure you have python3 aliased to python available on Windows computers instead of pointing to the Microsoft app store.\nBy now, you can do what you want. Let\u0026rsquo;s get to the topic of how to test.\nThe test process requires docker and docker-compose throughout. If you haven\u0026rsquo;t installed them, please install them first.\nThen run make test, which will generate a list of plugin versions based on the support_matrix variable in each Plugin and orchestrate the tests automatically. Remember to inspect the outcomes carefully to debug your plugin.\nAlternatively, you can run full tests via our GitHub action workflow on your own GitHub fork, it is usually easier since local environment can be tricky to setup for new contributors.\nTo do so, you need to fork this repo on GitHub and enable GitHub actions on your forked repo. Then, you can simply push your changes and open a Pull Request to the fork\u0026rsquo;s master branch.\nNote: GitHub automatically targets Pull Requests to the upstream repo, be careful when you open them to avoid accidental PRs to upstream.\n","title":"How to test locally?","url":"/docs/skywalking-python/v1.0.1/en/contribution/how-to-test-locally/"},{"content":"How to tolerate custom exceptions In some codes, the exception is being used as a way of controlling business flow. Skywalking provides 2 ways to tolerate an exception which is traced in a span.\n Set the names of exception classes in the agent config Use our annotation in the codes.  Set the names of exception classes in the agent config The property named \u0026ldquo;statuscheck.ignored_exceptions\u0026rdquo; is used to set up class names in the agent configuration file. if the exception listed here are detected in the agent, the agent core would flag the related span as the error status.\nDemo   A custom exception.\n TestNamedMatchException  package org.apache.skywalking.apm.agent.core.context.status; public class TestNamedMatchException extends RuntimeException { public TestNamedMatchException() { } public TestNamedMatchException(final String message) { super(message); } ... }  TestHierarchyMatchException  package org.apache.skywalking.apm.agent.core.context.status; public class TestHierarchyMatchException extends TestNamedMatchException { public TestHierarchyMatchException() { } public TestHierarchyMatchException(final String message) { super(message); } ... }   When the above exceptions traced in some spans, the status is like the following.\n   The traced exception Final span status     org.apache.skywalking.apm.agent.core.context.status.TestNamedMatchException true   org.apache.skywalking.apm.agent.core.context.status.TestHierarchyMatchException true      After set these class names through \u0026ldquo;statuscheck.ignored_exceptions\u0026rdquo;, the status of spans would be changed.\nstatuscheck.ignored_exceptions=org.apache.skywalking.apm.agent.core.context.status.TestNamedMatchException    The traced exception Final span status     org.apache.skywalking.apm.agent.core.context.status.TestNamedMatchException false   org.apache.skywalking.apm.agent.core.context.status.TestHierarchyMatchException false      Use our annotation in the codes. If an exception has the @IgnoredException annotation, the exception wouldn\u0026rsquo;t be marked as error status when tracing. Because the annotation supports inheritance, also affects the subclasses.\nDependency  Dependency the toolkit, such as using maven or gradle. Since 8.2.0.  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-trace\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Demo   A custom exception.\npackage org.apache.skywalking.apm.agent.core.context.status; public class TestAnnotatedException extends RuntimeException { public TestAnnotatedException() { } public TestAnnotatedException(final String message) { super(message); } ... }   When the above exception traced in some spans, the status is like the following.\n   The traced exception Final span status     org.apache.skywalking.apm.agent.core.context.status.TestAnnotatedException true      However, when the exception annotated with the annotation, the status would be changed.\npackage org.apache.skywalking.apm.agent.core.context.status; @IgnoredException public class TestAnnotatedException extends RuntimeException { public TestAnnotatedException() { } public TestAnnotatedException(final String message) { super(message); } ... }    The traced exception Final span status     org.apache.skywalking.apm.agent.core.context.status.TestAnnotatedException false      Recursive check Due to the wrapper nature of Java exceptions, sometimes users need recursive checking. Skywalking also supports it.\nstatuscheck.max_recursive_depth=${SW_STATUSCHECK_MAX_RECURSIVE_DEPTH:1} The following report shows the benchmark results of the exception checks with different recursive depths,\n# JMH version: 1.33 # VM version: JDK 1.8.0_292, OpenJDK 64-Bit Server VM, 25.292-b10 # VM invoker: /Library/Java/JavaVirtualMachines/adoptopenjdk-8.jdk/Contents/Home/jre/bin/java # VM options: -javaagent:/Applications/IntelliJ IDEA.app/Contents/lib/idea_rt.jar=54972:/Applications/IntelliJ IDEA.app/Contents/bin -Dfile.encoding=UTF-8 # Blackhole mode: full + dont-inline hint (default, use -Djmh.blackhole.autoDetect=true to auto-detect) # Warmup: 5 iterations, 10 s each # Measurement: 5 iterations, 10 s each # Timeout: 10 min per iteration # Threads: 1 thread, will synchronize iterations # Benchmark mode: Average time, time/op Benchmark Mode Cnt Score Error Units HierarchyMatchExceptionBenchmark.depthOneBenchmark avgt 25 31.050 ± 0.731 ns/op HierarchyMatchExceptionBenchmark.depthTwoBenchmark avgt 25 64.918 ± 2.537 ns/op HierarchyMatchExceptionBenchmark.depthThreeBenchmark avgt 25 89.645 ± 2.556 ns/op According to the reported results above, the exception check time is nearly proportional to the recursive depth being set. For each single check, it costs about ten of nanoseconds (~30 nanoseconds in the report, but may vary according to different hardware and platforms).\nTypically, we don\u0026rsquo;t recommend setting this more than 10, which could cause a performance issue. Negative value and 0 would be ignored, which means all exceptions would make the span tagged in error status.\n","title":"How to tolerate custom exceptions","url":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/how-to-tolerate-exceptions/"},{"content":"How to tolerate custom exceptions In some codes, the exception is being used as a way of controlling business flow. Skywalking provides 2 ways to tolerate an exception which is traced in a span.\n Set the names of exception classes in the agent config Use our annotation in the codes.  Set the names of exception classes in the agent config The property named \u0026ldquo;statuscheck.ignored_exceptions\u0026rdquo; is used to set up class names in the agent configuration file. if the exception listed here are detected in the agent, the agent core would flag the related span as the error status.\nDemo   A custom exception.\n TestNamedMatchException  package org.apache.skywalking.apm.agent.core.context.status; public class TestNamedMatchException extends RuntimeException { public TestNamedMatchException() { } public TestNamedMatchException(final String message) { super(message); } ... }  TestHierarchyMatchException  package org.apache.skywalking.apm.agent.core.context.status; public class TestHierarchyMatchException extends TestNamedMatchException { public TestHierarchyMatchException() { } public TestHierarchyMatchException(final String message) { super(message); } ... }   When the above exceptions traced in some spans, the status is like the following.\n   The traced exception Final span status     org.apache.skywalking.apm.agent.core.context.status.TestNamedMatchException true   org.apache.skywalking.apm.agent.core.context.status.TestHierarchyMatchException true      After set these class names through \u0026ldquo;statuscheck.ignored_exceptions\u0026rdquo;, the status of spans would be changed.\nstatuscheck.ignored_exceptions=org.apache.skywalking.apm.agent.core.context.status.TestNamedMatchException    The traced exception Final span status     org.apache.skywalking.apm.agent.core.context.status.TestNamedMatchException false   org.apache.skywalking.apm.agent.core.context.status.TestHierarchyMatchException false      Use our annotation in the codes. If an exception has the @IgnoredException annotation, the exception wouldn\u0026rsquo;t be marked as error status when tracing. Because the annotation supports inheritance, also affects the subclasses.\nDependency  Dependency the toolkit, such as using maven or gradle. Since 8.2.0.  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-trace\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Demo   A custom exception.\npackage org.apache.skywalking.apm.agent.core.context.status; public class TestAnnotatedException extends RuntimeException { public TestAnnotatedException() { } public TestAnnotatedException(final String message) { super(message); } ... }   When the above exception traced in some spans, the status is like the following.\n   The traced exception Final span status     org.apache.skywalking.apm.agent.core.context.status.TestAnnotatedException true      However, when the exception annotated with the annotation, the status would be changed.\npackage org.apache.skywalking.apm.agent.core.context.status; @IgnoredException public class TestAnnotatedException extends RuntimeException { public TestAnnotatedException() { } public TestAnnotatedException(final String message) { super(message); } ... }    The traced exception Final span status     org.apache.skywalking.apm.agent.core.context.status.TestAnnotatedException false      Recursive check Due to the wrapper nature of Java exceptions, sometimes users need recursive checking. Skywalking also supports it.\nstatuscheck.max_recursive_depth=${SW_STATUSCHECK_MAX_RECURSIVE_DEPTH:1} The following report shows the benchmark results of the exception checks with different recursive depths,\n# JMH version: 1.33 # VM version: JDK 1.8.0_292, OpenJDK 64-Bit Server VM, 25.292-b10 # VM invoker: /Library/Java/JavaVirtualMachines/adoptopenjdk-8.jdk/Contents/Home/jre/bin/java # VM options: -javaagent:/Applications/IntelliJ IDEA.app/Contents/lib/idea_rt.jar=54972:/Applications/IntelliJ IDEA.app/Contents/bin -Dfile.encoding=UTF-8 # Blackhole mode: full + dont-inline hint (default, use -Djmh.blackhole.autoDetect=true to auto-detect) # Warmup: 5 iterations, 10 s each # Measurement: 5 iterations, 10 s each # Timeout: 10 min per iteration # Threads: 1 thread, will synchronize iterations # Benchmark mode: Average time, time/op Benchmark Mode Cnt Score Error Units HierarchyMatchExceptionBenchmark.depthOneBenchmark avgt 25 31.050 ± 0.731 ns/op HierarchyMatchExceptionBenchmark.depthTwoBenchmark avgt 25 64.918 ± 2.537 ns/op HierarchyMatchExceptionBenchmark.depthThreeBenchmark avgt 25 89.645 ± 2.556 ns/op According to the reported results above, the exception check time is nearly proportional to the recursive depth being set. For each single check, it costs about ten of nanoseconds (~30 nanoseconds in the report, but may vary according to different hardware and platforms).\nTypically, we don\u0026rsquo;t recommend setting this more than 10, which could cause a performance issue. Negative value and 0 would be ignored, which means all exceptions would make the span tagged in error status.\n","title":"How to tolerate custom exceptions","url":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/how-to-tolerate-exceptions/"},{"content":"How to tolerate custom exceptions In some codes, the exception is being used as a way of controlling business flow. Skywalking provides 2 ways to tolerate an exception which is traced in a span.\n Set the names of exception classes in the agent config Use our annotation in the codes.  Set the names of exception classes in the agent config The property named \u0026ldquo;statuscheck.ignored_exceptions\u0026rdquo; is used to set up class names in the agent configuration file. if the exception listed here are detected in the agent, the agent core would flag the related span as the error status.\nDemo   A custom exception.\n TestNamedMatchException  package org.apache.skywalking.apm.agent.core.context.status; public class TestNamedMatchException extends RuntimeException { public TestNamedMatchException() { } public TestNamedMatchException(final String message) { super(message); } ... }  TestHierarchyMatchException  package org.apache.skywalking.apm.agent.core.context.status; public class TestHierarchyMatchException extends TestNamedMatchException { public TestHierarchyMatchException() { } public TestHierarchyMatchException(final String message) { super(message); } ... }   When the above exceptions traced in some spans, the status is like the following.\n   The traced exception Final span status     org.apache.skywalking.apm.agent.core.context.status.TestNamedMatchException true   org.apache.skywalking.apm.agent.core.context.status.TestHierarchyMatchException true      After set these class names through \u0026ldquo;statuscheck.ignored_exceptions\u0026rdquo;, the status of spans would be changed.\nstatuscheck.ignored_exceptions=org.apache.skywalking.apm.agent.core.context.status.TestNamedMatchException    The traced exception Final span status     org.apache.skywalking.apm.agent.core.context.status.TestNamedMatchException false   org.apache.skywalking.apm.agent.core.context.status.TestHierarchyMatchException false      Use our annotation in the codes. If an exception has the @IgnoredException annotation, the exception wouldn\u0026rsquo;t be marked as error status when tracing. Because the annotation supports inheritance, also affects the subclasses.\nDependency  Dependency the toolkit, such as using maven or gradle. Since 8.2.0.  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-trace\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Demo   A custom exception.\npackage org.apache.skywalking.apm.agent.core.context.status; public class TestAnnotatedException extends RuntimeException { public TestAnnotatedException() { } public TestAnnotatedException(final String message) { super(message); } ... }   When the above exception traced in some spans, the status is like the following.\n   The traced exception Final span status     org.apache.skywalking.apm.agent.core.context.status.TestAnnotatedException true      However, when the exception annotated with the annotation, the status would be changed.\npackage org.apache.skywalking.apm.agent.core.context.status; @IgnoredException public class TestAnnotatedException extends RuntimeException { public TestAnnotatedException() { } public TestAnnotatedException(final String message) { super(message); } ... }    The traced exception Final span status     org.apache.skywalking.apm.agent.core.context.status.TestAnnotatedException false      Recursive check Due to the wrapper nature of Java exceptions, sometimes users need recursive checking. Skywalking also supports it.\nstatuscheck.max_recursive_depth=${SW_STATUSCHECK_MAX_RECURSIVE_DEPTH:1} The following report shows the benchmark results of the exception checks with different recursive depths,\n# JMH version: 1.33 # VM version: JDK 1.8.0_292, OpenJDK 64-Bit Server VM, 25.292-b10 # VM invoker: /Library/Java/JavaVirtualMachines/adoptopenjdk-8.jdk/Contents/Home/jre/bin/java # VM options: -javaagent:/Applications/IntelliJ IDEA.app/Contents/lib/idea_rt.jar=54972:/Applications/IntelliJ IDEA.app/Contents/bin -Dfile.encoding=UTF-8 # Blackhole mode: full + dont-inline hint (default, use -Djmh.blackhole.autoDetect=true to auto-detect) # Warmup: 5 iterations, 10 s each # Measurement: 5 iterations, 10 s each # Timeout: 10 min per iteration # Threads: 1 thread, will synchronize iterations # Benchmark mode: Average time, time/op Benchmark Mode Cnt Score Error Units HierarchyMatchExceptionBenchmark.depthOneBenchmark avgt 25 31.050 ± 0.731 ns/op HierarchyMatchExceptionBenchmark.depthTwoBenchmark avgt 25 64.918 ± 2.537 ns/op HierarchyMatchExceptionBenchmark.depthThreeBenchmark avgt 25 89.645 ± 2.556 ns/op According to the reported results above, the exception check time is nearly proportional to the recursive depth being set. For each single check, it costs about ten of nanoseconds (~30 nanoseconds in the report, but may vary according to different hardware and platforms).\nTypically, we don\u0026rsquo;t recommend setting this more than 10, which could cause a performance issue. Negative value and 0 would be ignored, which means all exceptions would make the span tagged in error status.\n","title":"How to tolerate custom exceptions","url":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/how-to-tolerate-exceptions/"},{"content":"How to tolerate custom exceptions In some codes, the exception is being used as a way of controlling business flow. Skywalking provides 2 ways to tolerate an exception which is traced in a span.\n Set the names of exception classes in the agent config Use our annotation in the codes.  Set the names of exception classes in the agent config The property named \u0026ldquo;statuscheck.ignored_exceptions\u0026rdquo; is used to set up class names in the agent configuration file. if the exception listed here are detected in the agent, the agent core would flag the related span as the error status.\nDemo   A custom exception.\n TestNamedMatchException  package org.apache.skywalking.apm.agent.core.context.status; public class TestNamedMatchException extends RuntimeException { public TestNamedMatchException() { } public TestNamedMatchException(final String message) { super(message); } ... }  TestHierarchyMatchException  package org.apache.skywalking.apm.agent.core.context.status; public class TestHierarchyMatchException extends TestNamedMatchException { public TestHierarchyMatchException() { } public TestHierarchyMatchException(final String message) { super(message); } ... }   When the above exceptions traced in some spans, the status is like the following.\n   The traced exception Final span status     org.apache.skywalking.apm.agent.core.context.status.TestNamedMatchException true   org.apache.skywalking.apm.agent.core.context.status.TestHierarchyMatchException true      After set these class names through \u0026ldquo;statuscheck.ignored_exceptions\u0026rdquo;, the status of spans would be changed.\nstatuscheck.ignored_exceptions=org.apache.skywalking.apm.agent.core.context.status.TestNamedMatchException    The traced exception Final span status     org.apache.skywalking.apm.agent.core.context.status.TestNamedMatchException false   org.apache.skywalking.apm.agent.core.context.status.TestHierarchyMatchException false      Use our annotation in the codes. If an exception has the @IgnoredException annotation, the exception wouldn\u0026rsquo;t be marked as error status when tracing. Because the annotation supports inheritance, also affects the subclasses.\nDependency  Dependency the toolkit, such as using maven or gradle. Since 8.2.0.  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-trace\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Demo   A custom exception.\npackage org.apache.skywalking.apm.agent.core.context.status; public class TestAnnotatedException extends RuntimeException { public TestAnnotatedException() { } public TestAnnotatedException(final String message) { super(message); } ... }   When the above exception traced in some spans, the status is like the following.\n   The traced exception Final span status     org.apache.skywalking.apm.agent.core.context.status.TestAnnotatedException true      However, when the exception annotated with the annotation, the status would be changed.\npackage org.apache.skywalking.apm.agent.core.context.status; @IgnoredException public class TestAnnotatedException extends RuntimeException { public TestAnnotatedException() { } public TestAnnotatedException(final String message) { super(message); } ... }    The traced exception Final span status     org.apache.skywalking.apm.agent.core.context.status.TestAnnotatedException false      Recursive check Due to the wrapper nature of Java exceptions, sometimes users need recursive checking. Skywalking also supports it.\nstatuscheck.max_recursive_depth=${SW_STATUSCHECK_MAX_RECURSIVE_DEPTH:1} The following report shows the benchmark results of the exception checks with different recursive depths,\n# JMH version: 1.33 # VM version: JDK 1.8.0_292, OpenJDK 64-Bit Server VM, 25.292-b10 # VM invoker: /Library/Java/JavaVirtualMachines/adoptopenjdk-8.jdk/Contents/Home/jre/bin/java # VM options: -javaagent:/Applications/IntelliJ IDEA.app/Contents/lib/idea_rt.jar=54972:/Applications/IntelliJ IDEA.app/Contents/bin -Dfile.encoding=UTF-8 # Blackhole mode: full + dont-inline hint (default, use -Djmh.blackhole.autoDetect=true to auto-detect) # Warmup: 5 iterations, 10 s each # Measurement: 5 iterations, 10 s each # Timeout: 10 min per iteration # Threads: 1 thread, will synchronize iterations # Benchmark mode: Average time, time/op Benchmark Mode Cnt Score Error Units HierarchyMatchExceptionBenchmark.depthOneBenchmark avgt 25 31.050 ± 0.731 ns/op HierarchyMatchExceptionBenchmark.depthTwoBenchmark avgt 25 64.918 ± 2.537 ns/op HierarchyMatchExceptionBenchmark.depthThreeBenchmark avgt 25 89.645 ± 2.556 ns/op According to the reported results above, the exception check time is nearly proportional to the recursive depth being set. For each single check, it costs about ten of nanoseconds (~30 nanoseconds in the report, but may vary according to different hardware and platforms).\nTypically, we don\u0026rsquo;t recommend setting this more than 10, which could cause a performance issue. Negative value and 0 would be ignored, which means all exceptions would make the span tagged in error status.\n","title":"How to tolerate custom exceptions","url":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/how-to-tolerate-exceptions/"},{"content":"How to tolerate custom exceptions In some codes, the exception is being used as a way of controlling business flow. Skywalking provides 2 ways to tolerate an exception which is traced in a span.\n Set the names of exception classes in the agent config Use our annotation in the codes.  Set the names of exception classes in the agent config The property named \u0026ldquo;statuscheck.ignored_exceptions\u0026rdquo; is used to set up class names in the agent configuration file. if the exception listed here are detected in the agent, the agent core would flag the related span as the error status.\nDemo   A custom exception.\n TestNamedMatchException  package org.apache.skywalking.apm.agent.core.context.status; public class TestNamedMatchException extends RuntimeException { public TestNamedMatchException() { } public TestNamedMatchException(final String message) { super(message); } ... }  TestHierarchyMatchException  package org.apache.skywalking.apm.agent.core.context.status; public class TestHierarchyMatchException extends TestNamedMatchException { public TestHierarchyMatchException() { } public TestHierarchyMatchException(final String message) { super(message); } ... }   When the above exceptions traced in some spans, the status is like the following.\n   The traced exception Final span status     org.apache.skywalking.apm.agent.core.context.status.TestNamedMatchException true   org.apache.skywalking.apm.agent.core.context.status.TestHierarchyMatchException true      After set these class names through \u0026ldquo;statuscheck.ignored_exceptions\u0026rdquo;, the status of spans would be changed.\nstatuscheck.ignored_exceptions=org.apache.skywalking.apm.agent.core.context.status.TestNamedMatchException    The traced exception Final span status     org.apache.skywalking.apm.agent.core.context.status.TestNamedMatchException false   org.apache.skywalking.apm.agent.core.context.status.TestHierarchyMatchException false      Use our annotation in the codes. If an exception has the @IgnoredException annotation, the exception wouldn\u0026rsquo;t be marked as error status when tracing. Because the annotation supports inheritance, also affects the subclasses.\nDependency  Dependency the toolkit, such as using maven or gradle. Since 8.2.0.  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-trace\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Demo   A custom exception.\npackage org.apache.skywalking.apm.agent.core.context.status; public class TestAnnotatedException extends RuntimeException { public TestAnnotatedException() { } public TestAnnotatedException(final String message) { super(message); } ... }   When the above exception traced in some spans, the status is like the following.\n   The traced exception Final span status     org.apache.skywalking.apm.agent.core.context.status.TestAnnotatedException true      However, when the exception annotated with the annotation, the status would be changed.\npackage org.apache.skywalking.apm.agent.core.context.status; @IgnoredException public class TestAnnotatedException extends RuntimeException { public TestAnnotatedException() { } public TestAnnotatedException(final String message) { super(message); } ... }    The traced exception Final span status     org.apache.skywalking.apm.agent.core.context.status.TestAnnotatedException false      Recursive check Due to the wrapper nature of Java exceptions, sometimes users need recursive checking. Skywalking also supports it.\nstatuscheck.max_recursive_depth=${SW_STATUSCHECK_MAX_RECURSIVE_DEPTH:1} The following report shows the benchmark results of the exception checks with different recursive depths,\n# JMH version: 1.33 # VM version: JDK 1.8.0_292, OpenJDK 64-Bit Server VM, 25.292-b10 # VM invoker: /Library/Java/JavaVirtualMachines/adoptopenjdk-8.jdk/Contents/Home/jre/bin/java # VM options: -javaagent:/Applications/IntelliJ IDEA.app/Contents/lib/idea_rt.jar=54972:/Applications/IntelliJ IDEA.app/Contents/bin -Dfile.encoding=UTF-8 # Blackhole mode: full + dont-inline hint (default, use -Djmh.blackhole.autoDetect=true to auto-detect) # Warmup: 5 iterations, 10 s each # Measurement: 5 iterations, 10 s each # Timeout: 10 min per iteration # Threads: 1 thread, will synchronize iterations # Benchmark mode: Average time, time/op Benchmark Mode Cnt Score Error Units HierarchyMatchExceptionBenchmark.depthOneBenchmark avgt 25 31.050 ± 0.731 ns/op HierarchyMatchExceptionBenchmark.depthTwoBenchmark avgt 25 64.918 ± 2.537 ns/op HierarchyMatchExceptionBenchmark.depthThreeBenchmark avgt 25 89.645 ± 2.556 ns/op According to the reported results above, the exception check time is nearly proportional to the recursive depth being set. For each single check, it costs about ten of nanoseconds (~30 nanoseconds in the report, but may vary according to different hardware and platforms).\nTypically, we don\u0026rsquo;t recommend setting this more than 10, which could cause a performance issue. Negative value and 0 would be ignored, which means all exceptions would make the span tagged in error status.\n","title":"How to tolerate custom exceptions","url":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/how-to-tolerate-exceptions/"},{"content":"How to use the Docker images Start a standalone container with H2 storage docker run --name oap --restart always -d apache/skywalking-oap-server:9.0.0 Start a standalone container with ElasticSearch 7 as storage whose address is elasticsearch:9200 docker run --name oap --restart always -d -e SW_STORAGE=elasticsearch -e SW_STORAGE_ES_CLUSTER_NODES=elasticsearch:9200 apache/skywalking-oap-server:9.0.0 Configuration We could set up environment variables to configure this image. They are defined in backend-setup.\nExtend image If you intend to override or add config files in /skywalking/config, /skywalking/ext-config is the location for you to put extra files. The files with the same name will be overridden; otherwise, they will be added to /skywalking/config.\nIf you want to add more libs/jars into the classpath of OAP, for example, new metrics for OAL. These jars can be mounted into /skywalking/ext-libs, then entrypoint bash will append them into the classpath. Notice, you can\u0026rsquo;t override an existing jar in classpath.\n","title":"How to use the Docker images","url":"/docs/main/latest/en/setup/backend/backend-docker/"},{"content":"How to use the Docker images Start the storage, OAP and Booster UI with docker-compose As a quick start, you can use our one-liner script to start ElasticSearch or BanyanDB as the storage, OAP server and Booster UI, please make sure you have installed Docker.\nLinux, macOS, Windows (WSL)\nbash \u0026lt;(curl -sSL https://skywalking.apache.org/quickstart-docker.sh) Windows (Powershell)\nInvoke-Expression ([System.Text.Encoding]::UTF8.GetString((Invoke-WebRequest -Uri https://skywalking.apache.org/quickstart-docker.ps1 -UseBasicParsing).Content)) You will be prompted to choose the storage type, and then the script will start the backend cluster with the selected storage.\nTo tear down the cluster, run the following command:\ndocker compose --project-name=skywalking-quickstart down Start a standalone container with H2 storage docker run --name oap --restart always -d apache/skywalking-oap-server:9.7.0 Start a standalone container with BanyanDB as storage, whose address is banyandb:17912 docker run --name oap --restart always -d -e SW_STORAGE=banyandb -e SW_STORAGE_BANYANDB_TARGETS=banyandb:17912 apache/skywalking-oap-server:9.7.0 Start a standalone container with ElasticSearch 7 as storage, whose address is elasticsearch:9200 docker run --name oap --restart always -d -e SW_STORAGE=elasticsearch -e SW_STORAGE_ES_CLUSTER_NODES=elasticsearch:9200 apache/skywalking-oap-server:9.7.0 Configuration We could set up environment variables to configure this image. They are defined in backend-setup.\nExtend image If you intend to override or add config files in /skywalking/config, /skywalking/ext-config is the location for you to put extra files. The files with the same name will be overridden; otherwise, they will be added to /skywalking/config.\nIf you want to add more libs/jars into the classpath of OAP, for example, new metrics for OAL. These jars can be mounted into /skywalking/ext-libs, then entrypoint bash will append them into the classpath. Notice, you can\u0026rsquo;t override an existing jar in classpath.\n","title":"How to use the Docker images","url":"/docs/main/next/en/setup/backend/backend-docker/"},{"content":"How to use the Docker images Start a standlone container with H2 storage docker run --name oap --restart always -d apache/skywalking-oap-server:8.8.0 Start a standlone container with ElasticSearch 7 as storage whose address is elasticsearch:9200 docker run --name oap --restart always -d -e SW_STORAGE=elasticsearch -e SW_STORAGE_ES_CLUSTER_NODES=elasticsearch:9200 apache/skywalking-oap-server:8.8.0 Configuration We could set up environment variables to configure this image. They are defined in backend-setup.\nExtend image If you intend to override or add config files in /skywalking/config, /skywalking/ext-config is the location for you to put extra files. The files with the same name will be overridden, otherwise, they will be added in /skywalking/config.\nIf you want to add more libs/jars into the classpath of OAP, for example, new metrics for OAL. These jars can be mounted into /skywalking/ext-libs, then entrypoint bash will append them into the classpath. Notice, you can\u0026rsquo;t override an existing jar in classpath.\n","title":"How to use the Docker images","url":"/docs/main/v9.0.0/en/setup/backend/backend-docker/"},{"content":"How to use the Docker images Start a standlone container with H2 storage docker run --name oap --restart always -d apache/skywalking-oap-server:9.0.0 Start a standlone container with ElasticSearch 7 as storage whose address is elasticsearch:9200 docker run --name oap --restart always -d -e SW_STORAGE=elasticsearch -e SW_STORAGE_ES_CLUSTER_NODES=elasticsearch:9200 apache/skywalking-oap-server:9.0.0 Configuration We could set up environment variables to configure this image. They are defined in backend-setup.\nExtend image If you intend to override or add config files in /skywalking/config, /skywalking/ext-config is the location for you to put extra files. The files with the same name will be overridden; otherwise, they will be added to /skywalking/config.\nIf you want to add more libs/jars into the classpath of OAP, for example, new metrics for OAL. These jars can be mounted into /skywalking/ext-libs, then entrypoint bash will append them into the classpath. Notice, you can\u0026rsquo;t override an existing jar in classpath.\n","title":"How to use the Docker images","url":"/docs/main/v9.1.0/en/setup/backend/backend-docker/"},{"content":"How to use the Docker images Start a standalone container with H2 storage docker run --name oap --restart always -d apache/skywalking-oap-server:9.0.0 Start a standalone container with ElasticSearch 7 as storage whose address is elasticsearch:9200 docker run --name oap --restart always -d -e SW_STORAGE=elasticsearch -e SW_STORAGE_ES_CLUSTER_NODES=elasticsearch:9200 apache/skywalking-oap-server:9.0.0 Configuration We could set up environment variables to configure this image. They are defined in backend-setup.\nExtend image If you intend to override or add config files in /skywalking/config, /skywalking/ext-config is the location for you to put extra files. The files with the same name will be overridden; otherwise, they will be added to /skywalking/config.\nIf you want to add more libs/jars into the classpath of OAP, for example, new metrics for OAL. These jars can be mounted into /skywalking/ext-libs, then entrypoint bash will append them into the classpath. Notice, you can\u0026rsquo;t override an existing jar in classpath.\n","title":"How to use the Docker images","url":"/docs/main/v9.2.0/en/setup/backend/backend-docker/"},{"content":"How to use the Docker images Start a standalone container with H2 storage docker run --name oap --restart always -d apache/skywalking-oap-server:9.0.0 Start a standalone container with ElasticSearch 7 as storage whose address is elasticsearch:9200 docker run --name oap --restart always -d -e SW_STORAGE=elasticsearch -e SW_STORAGE_ES_CLUSTER_NODES=elasticsearch:9200 apache/skywalking-oap-server:9.0.0 Configuration We could set up environment variables to configure this image. They are defined in backend-setup.\nExtend image If you intend to override or add config files in /skywalking/config, /skywalking/ext-config is the location for you to put extra files. The files with the same name will be overridden; otherwise, they will be added to /skywalking/config.\nIf you want to add more libs/jars into the classpath of OAP, for example, new metrics for OAL. These jars can be mounted into /skywalking/ext-libs, then entrypoint bash will append them into the classpath. Notice, you can\u0026rsquo;t override an existing jar in classpath.\n","title":"How to use the Docker images","url":"/docs/main/v9.3.0/en/setup/backend/backend-docker/"},{"content":"How to use the Docker images Start a standalone container with H2 storage docker run --name oap --restart always -d apache/skywalking-oap-server:9.0.0 Start a standalone container with ElasticSearch 7 as storage whose address is elasticsearch:9200 docker run --name oap --restart always -d -e SW_STORAGE=elasticsearch -e SW_STORAGE_ES_CLUSTER_NODES=elasticsearch:9200 apache/skywalking-oap-server:9.0.0 Configuration We could set up environment variables to configure this image. They are defined in backend-setup.\nExtend image If you intend to override or add config files in /skywalking/config, /skywalking/ext-config is the location for you to put extra files. The files with the same name will be overridden; otherwise, they will be added to /skywalking/config.\nIf you want to add more libs/jars into the classpath of OAP, for example, new metrics for OAL. These jars can be mounted into /skywalking/ext-libs, then entrypoint bash will append them into the classpath. Notice, you can\u0026rsquo;t override an existing jar in classpath.\n","title":"How to use the Docker images","url":"/docs/main/v9.4.0/en/setup/backend/backend-docker/"},{"content":"How to use the Docker images Start a standalone container with H2 storage docker run --name oap --restart always -d apache/skywalking-oap-server:9.0.0 Start a standalone container with ElasticSearch 7 as storage whose address is elasticsearch:9200 docker run --name oap --restart always -d -e SW_STORAGE=elasticsearch -e SW_STORAGE_ES_CLUSTER_NODES=elasticsearch:9200 apache/skywalking-oap-server:9.0.0 Configuration We could set up environment variables to configure this image. They are defined in backend-setup.\nExtend image If you intend to override or add config files in /skywalking/config, /skywalking/ext-config is the location for you to put extra files. The files with the same name will be overridden; otherwise, they will be added to /skywalking/config.\nIf you want to add more libs/jars into the classpath of OAP, for example, new metrics for OAL. These jars can be mounted into /skywalking/ext-libs, then entrypoint bash will append them into the classpath. Notice, you can\u0026rsquo;t override an existing jar in classpath.\n","title":"How to use the Docker images","url":"/docs/main/v9.5.0/en/setup/backend/backend-docker/"},{"content":"How to use the Docker images Start a standalone container with H2 storage docker run --name oap --restart always -d apache/skywalking-oap-server:9.0.0 Start a standalone container with ElasticSearch 7 as storage whose address is elasticsearch:9200 docker run --name oap --restart always -d -e SW_STORAGE=elasticsearch -e SW_STORAGE_ES_CLUSTER_NODES=elasticsearch:9200 apache/skywalking-oap-server:9.0.0 Configuration We could set up environment variables to configure this image. They are defined in backend-setup.\nExtend image If you intend to override or add config files in /skywalking/config, /skywalking/ext-config is the location for you to put extra files. The files with the same name will be overridden; otherwise, they will be added to /skywalking/config.\nIf you want to add more libs/jars into the classpath of OAP, for example, new metrics for OAL. These jars can be mounted into /skywalking/ext-libs, then entrypoint bash will append them into the classpath. Notice, you can\u0026rsquo;t override an existing jar in classpath.\n","title":"How to use the Docker images","url":"/docs/main/v9.6.0/en/setup/backend/backend-docker/"},{"content":"How to use the Docker images Start a standalone container with H2 storage docker run --name oap --restart always -d apache/skywalking-oap-server:9.0.0 Start a standalone container with ElasticSearch 7 as storage whose address is elasticsearch:9200 docker run --name oap --restart always -d -e SW_STORAGE=elasticsearch -e SW_STORAGE_ES_CLUSTER_NODES=elasticsearch:9200 apache/skywalking-oap-server:9.0.0 Configuration We could set up environment variables to configure this image. They are defined in backend-setup.\nExtend image If you intend to override or add config files in /skywalking/config, /skywalking/ext-config is the location for you to put extra files. The files with the same name will be overridden; otherwise, they will be added to /skywalking/config.\nIf you want to add more libs/jars into the classpath of OAP, for example, new metrics for OAL. These jars can be mounted into /skywalking/ext-libs, then entrypoint bash will append them into the classpath. Notice, you can\u0026rsquo;t override an existing jar in classpath.\n","title":"How to use the Docker images","url":"/docs/main/v9.7.0/en/setup/backend/backend-docker/"},{"content":"How to use with Gunicorn? Gunicorn is another popular process manager and prefork server widely used in production. The state-of-the-art practice is to use Gunicorn as the process manager for ASGI applications such as FastAPI to get resilient \u0026amp; blazing fast services.\nSince Gunicorn is a prefork server, it will fork a new process for each worker, and the forked process will be the one that actually serves requests.\n Tired of understanding these complicated multiprocessing behaviors? Try the new sw-python run --prefork/-p support for Gunicorn first! You can always fall back to the manual approach (although it\u0026rsquo;s also non-intrusive for application).\n Automatic Injection Approach (Non-intrusive)  Caveat: Although E2E test passes for Python3.7, there\u0026rsquo;s a small chance that this approach won\u0026rsquo;t work on Python 3.7 if your application uses gPRC protocol AND subprocess AND fork together (you will immediately see service is not starting normally, not randomly breaking after)\nThis is due to an unfixed bug in gRPC core that leads to deadlock if Python 3.7 application involves subprocess (like debug mode). You should upgrade to Python 3.8+ soon since the EOL is approaching on 2023 June 27th, or fallback to manual approach should this case happen, or simply use HTTP/Kafka protocol.\n TL;DR: specify -p or --prefork in sw-python run -p and all Gunicorn workers and master will get their own working agent.\nImportant: if the call to gunicorn is prefixed with other commands, this approach will fail since agent currently looks for the command line input at index 0 for safety as an experimental feature.\nsw-python run -p gunicorn gunicorn_consumer_prefork:app --workers 2 --worker-class uvicorn.workers.UvicornWorker --bind 0.0.0.0:8088 Long version: (notice this is different from how uWSGI equivalent works)\nBy specifying the -p or \u0026ndash;prefork option in sw-python CLI, the agent_experimental_fork_support agent option will be turned on automatically.\nStartup flow: sw-python -\u0026gt; gunicorn -\u0026gt; master process (agent starts) -\u0026gt; fork -\u0026gt; worker process (agent restarts due to os.register_at_fork)\nThe master process will get its own agent, although it won\u0026rsquo;t report any trace, since obviously it doesn\u0026rsquo;t take requests, it still reports metrics that is useful for debugging\n A runnable example can be found in the demo folder of skywalking-python GitHub repository\n Manual Approach (only use when sw-python doesn\u0026rsquo;t work) Limitation: Using normal postfork hook will not add observability to the master process, you could also define a prefork hook to start an agent in the master process, with a instance name like instance-name-master(\u0026lt;pid\u0026gt;)\nThe following is just an example, since Gunicorn\u0026rsquo;s automatic injection approach is likely to work in many situations.\n The manual approach should not be used together with the agent\u0026rsquo;s fork support. Otherwise, agent will be dual booted and raise an error saying that you should not do so.\n # Usage explained here: https://docs.gunicorn.org/en/stable/settings.html#post-fork bind = '0.0.0.0:8088' workers = 3 def post_fork(server, worker): # Important: The import of skywalking should be inside the post_fork function import os from skywalking import agent, config # append pid-suffix to instance name # This must be done to distinguish instances if you give your instance customized names # (highly recommended to identify workers) # Notice the -child(pid) part is required to tell the difference of each worker. agent_instance_name = f'\u0026lt;some_good_name\u0026gt;-child({os.getpid()})' config.init(agent_collector_backend_services='127.0.0.1:11800', agent_name='your awesome service', agent_instance_name=agent_instance_name) agent.start() Run Gunicorn normally without sw-python CLI:\ngunicorn gunicorn_consumer_prefork:app --workers 2 --worker-class uvicorn.workers.UvicornWorker --bind 0.0.0.0:8088 ","title":"How to use with Gunicorn?","url":"/docs/skywalking-python/latest/en/setup/faq/how-to-use-with-gunicorn/"},{"content":"How to use with Gunicorn? Gunicorn is another popular process manager and prefork server widely used in production. The state-of-the-art practice is to use Gunicorn as the process manager for ASGI applications such as FastAPI to get resilient \u0026amp; blazing fast services.\nSince Gunicorn is a prefork server, it will fork a new process for each worker, and the forked process will be the one that actually serves requests.\n Tired of understanding these complicated multiprocessing behaviors? Try the new sw-python run --prefork/-p support for Gunicorn first! You can always fall back to the manual approach (although it\u0026rsquo;s also non-intrusive for application).\n Automatic Injection Approach (Non-intrusive)  Caveat: Although E2E test passes for Python3.7, there\u0026rsquo;s a small chance that this approach won\u0026rsquo;t work on Python 3.7 if your application uses gPRC protocol AND subprocess AND fork together (you will immediately see service is not starting normally, not randomly breaking after)\nThis is due to an unfixed bug in gRPC core that leads to deadlock if Python 3.7 application involves subprocess (like debug mode). You should upgrade to Python 3.8+ soon since the EOL is approaching on 2023 June 27th, or fallback to manual approach should this case happen, or simply use HTTP/Kafka protocol.\n TL;DR: specify -p or --prefork in sw-python run -p and all Gunicorn workers and master will get their own working agent.\nImportant: if the call to gunicorn is prefixed with other commands, this approach will fail since agent currently looks for the command line input at index 0 for safety as an experimental feature.\nsw-python run -p gunicorn gunicorn_consumer_prefork:app --workers 2 --worker-class uvicorn.workers.UvicornWorker --bind 0.0.0.0:8088 Long version: (notice this is different from how uWSGI equivalent works)\nBy specifying the -p or \u0026ndash;prefork option in sw-python CLI, the agent_experimental_fork_support agent option will be turned on automatically.\nStartup flow: sw-python -\u0026gt; gunicorn -\u0026gt; master process (agent starts) -\u0026gt; fork -\u0026gt; worker process (agent restarts due to os.register_at_fork)\nThe master process will get its own agent, although it won\u0026rsquo;t report any trace, since obviously it doesn\u0026rsquo;t take requests, it still reports metrics that is useful for debugging\n A runnable example can be found in the demo folder of skywalking-python GitHub repository\n Manual Approach (only use when sw-python doesn\u0026rsquo;t work) Limitation: Using normal postfork hook will not add observability to the master process, you could also define a prefork hook to start an agent in the master process, with a instance name like instance-name-master(\u0026lt;pid\u0026gt;)\nThe following is just an example, since Gunicorn\u0026rsquo;s automatic injection approach is likely to work in many situations.\n The manual approach should not be used together with the agent\u0026rsquo;s fork support. Otherwise, agent will be dual booted and raise an error saying that you should not do so.\n # Usage explained here: https://docs.gunicorn.org/en/stable/settings.html#post-fork bind = '0.0.0.0:8088' workers = 3 def post_fork(server, worker): # Important: The import of skywalking should be inside the post_fork function import os from skywalking import agent, config # append pid-suffix to instance name # This must be done to distinguish instances if you give your instance customized names # (highly recommended to identify workers) # Notice the -child(pid) part is required to tell the difference of each worker. agent_instance_name = f'\u0026lt;some_good_name\u0026gt;-child({os.getpid()})' config.init(agent_collector_backend_services='127.0.0.1:11800', agent_name='your awesome service', agent_instance_name=agent_instance_name) agent.start() Run Gunicorn normally without sw-python CLI:\ngunicorn gunicorn_consumer_prefork:app --workers 2 --worker-class uvicorn.workers.UvicornWorker --bind 0.0.0.0:8088 ","title":"How to use with Gunicorn?","url":"/docs/skywalking-python/next/en/setup/faq/how-to-use-with-gunicorn/"},{"content":"How to use with Gunicorn? Gunicorn is another popular process manager and prefork server widely used in production. The state-of-the-art practice is to use Gunicorn as the process manager for ASGI applications such as FastAPI to get resilient \u0026amp; blazing fast services.\nSince Gunicorn is a prefork server, it will fork a new process for each worker, and the forked process will be the one that actually serves requests.\n Tired of understanding these complicated multiprocessing behaviors? Try the new sw-python run --prefork/-p support for Gunicorn first! You can always fall back to the manual approach (although it\u0026rsquo;s also non-intrusive for application).\n Automatic Injection Approach (Non-intrusive)  Caveat: Although E2E test passes for Python3.7, there\u0026rsquo;s a small chance that this approach won\u0026rsquo;t work on Python 3.7 if your application uses gPRC protocol AND subprocess AND fork together (you will immediately see service is not starting normally, not randomly breaking after)\nThis is due to an unfixed bug in gRPC core that leads to deadlock if Python 3.7 application involves subprocess (like debug mode). You should upgrade to Python 3.8+ soon since the EOL is approaching on 2023 June 27th, or fallback to manual approach should this case happen, or simply use HTTP/Kafka protocol.\n TL;DR: specify -p or --prefork in sw-python run -p and all Gunicorn workers and master will get their own working agent.\nImportant: if the call to gunicorn is prefixed with other commands, this approach will fail since agent currently looks for the command line input at index 0 for safety as an experimental feature.\nsw-python run -p gunicorn gunicorn_consumer_prefork:app --workers 2 --worker-class uvicorn.workers.UvicornWorker --bind 0.0.0.0:8088 Long version: (notice this is different from how uWSGI equivalent works)\nBy specifying the -p or \u0026ndash;prefork option in sw-python CLI, the agent_experimental_fork_support agent option will be turned on automatically.\nStartup flow: sw-python -\u0026gt; gunicorn -\u0026gt; master process (agent starts) -\u0026gt; fork -\u0026gt; worker process (agent restarts due to os.register_at_fork)\nThe master process will get its own agent, although it won\u0026rsquo;t report any trace, since obviously it doesn\u0026rsquo;t take requests, it still reports metrics that is useful for debugging\n A runnable example can be found in the demo folder of skywalking-python GitHub repository\n Manual Approach (only use when sw-python doesn\u0026rsquo;t work) Limitation: Using normal postfork hook will not add observability to the master process, you could also define a prefork hook to start an agent in the master process, with a instance name like instance-name-master(\u0026lt;pid\u0026gt;)\nThe following is just an example, since Gunicorn\u0026rsquo;s automatic injection approach is likely to work in many situations.\n The manual approach should not be used together with the agent\u0026rsquo;s fork support. Otherwise, agent will be dual booted and raise an error saying that you should not do so.\n # Usage explained here: https://docs.gunicorn.org/en/stable/settings.html#post-fork bind = '0.0.0.0:8088' workers = 3 def post_fork(server, worker): # Important: The import of skywalking should be inside the post_fork function import os from skywalking import agent, config # append pid-suffix to instance name # This must be done to distinguish instances if you give your instance customized names # (highly recommended to identify workers) # Notice the -child(pid) part is required to tell the difference of each worker. agent_instance_name = f'\u0026lt;some_good_name\u0026gt;-child({os.getpid()})' config.init(agent_collector_backend_services='127.0.0.1:11800', agent_name='your awesome service', agent_instance_name=agent_instance_name) agent.start() Run Gunicorn normally without sw-python CLI:\ngunicorn gunicorn_consumer_prefork:app --workers 2 --worker-class uvicorn.workers.UvicornWorker --bind 0.0.0.0:8088 ","title":"How to use with Gunicorn?","url":"/docs/skywalking-python/v1.0.1/en/setup/faq/how-to-use-with-gunicorn/"},{"content":"How to use with uWSGI? uWSGI is popular in the Python ecosystem. It is a lightweight, fast, and easy-to-use web server.\nSince uWSGI is relatively old and offers multi-language support, it can get quite troublesome due to the usage of a system-level fork.\nSome of the original discussion can be found here:\n [Python] Apache Skywalking, flask uwsgi, no metrics send to server · Issue #6324 · apache/skywalking [Bug] skywalking-python not work with uwsgi + flask in master workers mode and threads mode · Issue #8566 · apache/skywalking   Tired of understanding these complicated multiprocessing behaviours? Try the new sw-python run --prefork/-p support for uWSGI first! You can always fall back to the manual approach. (although it\u0026rsquo;s also possible to pass postfork hook without changing code, which is essentially how sw-python is implemented)\n  Limitation: regardless of the approach used, uWSGI master process cannot be safely monitored. Since it doesn\u0026rsquo;t take any requests, it is generally acceptable. Alternatively, you could switch to Gunicorn, where its master process can be monitored properly along with all child workers.\n Important: The --enable-threads and --master option must be given to allow the usage of post_fork hooks and threading in workers. In the sw-python CLI, these two options will be automatically injected for you in addition to the post_fork hook.\nAutomatic Injection Approach (Non-intrusive) TL;DR: specify -p or --prefork in sw-python run -p and all uWSGI workers will get their own working agent.\nImportant: if the call to uwsgi is prefixed with other commands, this approach will fail since agent currently looks for the command line input at index 0 for safety as an experimental feature.\nsw-python run -p uwsgi --die-on-term \\  --http 0.0.0.0:9090 \\  --http-manage-expect \\  --master --workers 2 \\  --enable-threads \\  --threads 2 \\  --manage-script-name \\  --mount /=flask_consumer_prefork:app Long version: (notice this is different from how Gunicorn equivalent works)\nBy specifying the -p or \u0026ndash;prefork option in sw-python CLI, a uwsgi_hook will be registered by the CLI by adding the environment variable into one of [\u0026lsquo;UWSGI_SHARED_PYTHON_IMPORT\u0026rsquo;, \u0026lsquo;UWSGI_SHARED_IMPORT\u0026rsquo;, \u0026lsquo;UWSGI_SHARED_PYIMPORT\u0026rsquo;, \u0026lsquo;UWSGI_SHARED_PY_IMPORT\u0026rsquo;]. uWSGI will then import the module and start the agent in forked workers.\nStartup flow: sw-python -\u0026gt; uwsgi -\u0026gt; master process (agent doesn\u0026rsquo;t start here) -\u0026gt; fork -\u0026gt; worker process (agent starts due to post_fork hook)\nThe master process (which doesn\u0026rsquo;t accept requests) currently does not get its own agent as it can not be safely started and handled by os.register_at_fork() handlers.\n A runnable example can be found in the demo folder of skywalking-python GitHub repository\n Manual Approach (only use when sw-python doesn\u0026rsquo;t work) If you get some problems when using SkyWalking Python agent, you can try to use the following manual method to call @postfork, the low-level API of uWSGI to initialize the agent.\nThe following is an example of the use of uWSGI and flask.\nImportant: Never directly start the agent in the app, forked workers are unlikely to work properly (even if they do, it\u0026rsquo;s out of luck) you should either add the following postfork, or try our new experimental automatic startup through sw-python CLI (see above).\n# main.py # Note: The --master uwsgi flag must be on, otherwise the decorators will not be available to import from uwsgidecorators import postfork @postfork def init_tracing(): # Important: The import of skywalking must be inside the postfork function from skywalking import agent, config # append pid-suffix to instance name # This must be done to distinguish instances if you give your instance customized names # (highly recommended to identify workers) # Notice the -child(pid) part is required to tell the difference of each worker. agent_instance_name = f'\u0026lt;some_good_name\u0026gt;-child({os.getpid()})' config.init(agent_collector_backend_services='127.0.0.1:11800', agent_name='your awesome service', agent_instance_name=agent_instance_name) agent.start() from flask import Flask app = Flask(__name__) @app.route('/') def hello_world(): return 'Hello World!' if __name__ == '__main__': app.run() Run uWSGI normally without sw-python CLI:\nuwsgi --die-on-term \\  --http 0.0.0.0:5000 \\  --http-manage-expect \\  --master --workers 3 \\  --enable-threads \\  --threads 3 \\  --manage-script-name \\  --mount /=main:app ","title":"How to use with uWSGI?","url":"/docs/skywalking-python/latest/en/setup/faq/how-to-use-with-uwsgi/"},{"content":"How to use with uWSGI? uWSGI is popular in the Python ecosystem. It is a lightweight, fast, and easy-to-use web server.\nSince uWSGI is relatively old and offers multi-language support, it can get quite troublesome due to the usage of a system-level fork.\nSome of the original discussion can be found here:\n [Python] Apache Skywalking, flask uwsgi, no metrics send to server · Issue #6324 · apache/skywalking [Bug] skywalking-python not work with uwsgi + flask in master workers mode and threads mode · Issue #8566 · apache/skywalking   Tired of understanding these complicated multiprocessing behaviours? Try the new sw-python run --prefork/-p support for uWSGI first! You can always fall back to the manual approach. (although it\u0026rsquo;s also possible to pass postfork hook without changing code, which is essentially how sw-python is implemented)\n  Limitation: regardless of the approach used, uWSGI master process cannot be safely monitored. Since it doesn\u0026rsquo;t take any requests, it is generally acceptable. Alternatively, you could switch to Gunicorn, where its master process can be monitored properly along with all child workers.\n Important: The --enable-threads and --master option must be given to allow the usage of post_fork hooks and threading in workers. In the sw-python CLI, these two options will be automatically injected for you in addition to the post_fork hook.\nAutomatic Injection Approach (Non-intrusive) TL;DR: specify -p or --prefork in sw-python run -p and all uWSGI workers will get their own working agent.\nImportant: if the call to uwsgi is prefixed with other commands, this approach will fail since agent currently looks for the command line input at index 0 for safety as an experimental feature.\nsw-python run -p uwsgi --die-on-term \\  --http 0.0.0.0:9090 \\  --http-manage-expect \\  --master --workers 2 \\  --enable-threads \\  --threads 2 \\  --manage-script-name \\  --mount /=flask_consumer_prefork:app Long version: (notice this is different from how Gunicorn equivalent works)\nBy specifying the -p or \u0026ndash;prefork option in sw-python CLI, a uwsgi_hook will be registered by the CLI by adding the environment variable into one of [\u0026lsquo;UWSGI_SHARED_PYTHON_IMPORT\u0026rsquo;, \u0026lsquo;UWSGI_SHARED_IMPORT\u0026rsquo;, \u0026lsquo;UWSGI_SHARED_PYIMPORT\u0026rsquo;, \u0026lsquo;UWSGI_SHARED_PY_IMPORT\u0026rsquo;]. uWSGI will then import the module and start the agent in forked workers.\nStartup flow: sw-python -\u0026gt; uwsgi -\u0026gt; master process (agent doesn\u0026rsquo;t start here) -\u0026gt; fork -\u0026gt; worker process (agent starts due to post_fork hook)\nThe master process (which doesn\u0026rsquo;t accept requests) currently does not get its own agent as it can not be safely started and handled by os.register_at_fork() handlers.\n A runnable example can be found in the demo folder of skywalking-python GitHub repository\n Manual Approach (only use when sw-python doesn\u0026rsquo;t work) If you get some problems when using SkyWalking Python agent, you can try to use the following manual method to call @postfork, the low-level API of uWSGI to initialize the agent.\nThe following is an example of the use of uWSGI and flask.\nImportant: Never directly start the agent in the app, forked workers are unlikely to work properly (even if they do, it\u0026rsquo;s out of luck) you should either add the following postfork, or try our new experimental automatic startup through sw-python CLI (see above).\n# main.py # Note: The --master uwsgi flag must be on, otherwise the decorators will not be available to import from uwsgidecorators import postfork @postfork def init_tracing(): # Important: The import of skywalking must be inside the postfork function from skywalking import agent, config # append pid-suffix to instance name # This must be done to distinguish instances if you give your instance customized names # (highly recommended to identify workers) # Notice the -child(pid) part is required to tell the difference of each worker. agent_instance_name = f'\u0026lt;some_good_name\u0026gt;-child({os.getpid()})' config.init(agent_collector_backend_services='127.0.0.1:11800', agent_name='your awesome service', agent_instance_name=agent_instance_name) agent.start() from flask import Flask app = Flask(__name__) @app.route('/') def hello_world(): return 'Hello World!' if __name__ == '__main__': app.run() Run uWSGI normally without sw-python CLI:\nuwsgi --die-on-term \\  --http 0.0.0.0:5000 \\  --http-manage-expect \\  --master --workers 3 \\  --enable-threads \\  --threads 3 \\  --manage-script-name \\  --mount /=main:app ","title":"How to use with uWSGI?","url":"/docs/skywalking-python/next/en/setup/faq/how-to-use-with-uwsgi/"},{"content":"How to use with uWSGI? uWSGI is popular in the Python ecosystem. It is a lightweight, fast, and easy-to-use web server.\nSince uWSGI is relatively old and offers multi-language support, it can get quite troublesome due to the usage of a system-level fork.\nSome of the original discussion can be found here:\n [Python] Apache Skywalking, flask uwsgi, no metrics send to server · Issue #6324 · apache/skywalking [Bug] skywalking-python not work with uwsgi + flask in master workers mode and threads mode · Issue #8566 · apache/skywalking   Tired of understanding these complicated multiprocessing behaviours? Try the new sw-python run --prefork/-p support for uWSGI first! You can always fall back to the manual approach. (although it\u0026rsquo;s also possible to pass postfork hook without changing code, which is essentially how sw-python is implemented)\n  Limitation: regardless of the approach used, uWSGI master process cannot be safely monitored. Since it doesn\u0026rsquo;t take any requests, it is generally acceptable. Alternatively, you could switch to Gunicorn, where its master process can be monitored properly along with all child workers.\n Important: The --enable-threads and --master option must be given to allow the usage of post_fork hooks and threading in workers. In the sw-python CLI, these two options will be automatically injected for you in addition to the post_fork hook.\nAutomatic Injection Approach (Non-intrusive) TL;DR: specify -p or --prefork in sw-python run -p and all uWSGI workers will get their own working agent.\nImportant: if the call to uwsgi is prefixed with other commands, this approach will fail since agent currently looks for the command line input at index 0 for safety as an experimental feature.\nsw-python run -p uwsgi --die-on-term \\  --http 0.0.0.0:9090 \\  --http-manage-expect \\  --master --workers 2 \\  --enable-threads \\  --threads 2 \\  --manage-script-name \\  --mount /=flask_consumer_prefork:app Long version: (notice this is different from how Gunicorn equivalent works)\nBy specifying the -p or \u0026ndash;prefork option in sw-python CLI, a uwsgi_hook will be registered by the CLI by adding the environment variable into one of [\u0026lsquo;UWSGI_SHARED_PYTHON_IMPORT\u0026rsquo;, \u0026lsquo;UWSGI_SHARED_IMPORT\u0026rsquo;, \u0026lsquo;UWSGI_SHARED_PYIMPORT\u0026rsquo;, \u0026lsquo;UWSGI_SHARED_PY_IMPORT\u0026rsquo;]. uWSGI will then import the module and start the agent in forked workers.\nStartup flow: sw-python -\u0026gt; uwsgi -\u0026gt; master process (agent doesn\u0026rsquo;t start here) -\u0026gt; fork -\u0026gt; worker process (agent starts due to post_fork hook)\nThe master process (which doesn\u0026rsquo;t accept requests) currently does not get its own agent as it can not be safely started and handled by os.register_at_fork() handlers.\n A runnable example can be found in the demo folder of skywalking-python GitHub repository\n Manual Approach (only use when sw-python doesn\u0026rsquo;t work) If you get some problems when using SkyWalking Python agent, you can try to use the following manual method to call @postfork, the low-level API of uWSGI to initialize the agent.\nThe following is an example of the use of uWSGI and flask.\nImportant: Never directly start the agent in the app, forked workers are unlikely to work properly (even if they do, it\u0026rsquo;s out of luck) you should either add the following postfork, or try our new experimental automatic startup through sw-python CLI (see above).\n# main.py # Note: The --master uwsgi flag must be on, otherwise the decorators will not be available to import from uwsgidecorators import postfork @postfork def init_tracing(): # Important: The import of skywalking must be inside the postfork function from skywalking import agent, config # append pid-suffix to instance name # This must be done to distinguish instances if you give your instance customized names # (highly recommended to identify workers) # Notice the -child(pid) part is required to tell the difference of each worker. agent_instance_name = f'\u0026lt;some_good_name\u0026gt;-child({os.getpid()})' config.init(agent_collector_backend_services='127.0.0.1:11800', agent_name='your awesome service', agent_instance_name=agent_instance_name) agent.start() from flask import Flask app = Flask(__name__) @app.route('/') def hello_world(): return 'Hello World!' if __name__ == '__main__': app.run() Run uWSGI normally without sw-python CLI:\nuwsgi --die-on-term \\  --http 0.0.0.0:5000 \\  --http-manage-expect \\  --master --workers 3 \\  --enable-threads \\  --threads 3 \\  --manage-script-name \\  --mount /=main:app ","title":"How to use with uWSGI?","url":"/docs/skywalking-python/v1.0.1/en/setup/faq/how-to-use-with-uwsgi/"},{"content":"How to write a new module? If you want to add a custom module to SkyWalking Rover, the following contents would guide you. Let\u0026rsquo;s use the profiling module as an example of how to write a module.\n Please read the Module Design to understand what is module. The module should be written in the skywalking-rover/pkg directory. So we create a new directory called profiling as the module codes space. Implement the interface in the skywalking-rover/pkg/module. Each module has 6 methods, which are Name, RequiredModules, Config, Start, NotifyStartSuccess, and Shutdown.  Name returns the unique name of the module, also this name is used to define in the configuration file. RequiredModules returns this needs depended on module names. In the profiling module, it needs to query the existing process and send snapshots to the backend, so it needs the core and process module. Config returns the config content of this module, which relate to the configuration file, and you could declare the tag(mapstructure) with the field to define the name in the configuration file. Start is triggered when the module needs to start. if this module start failure, please return the error. NotifyStartSuccess is triggered after all the active modules are Start method success. Shutdown   Add the configuration into the skywalking-rover/configs/rover_configs.yaml. It should same as the config declaration. Register the module into skywalking-rover/pkg/boot/register.go. Add the Unit test or E2E testing for testing the module is works well. Write the documentation under the skywalking-rover/docs/en directory and add it to the documentation index file skywalking-rover/docs/menu.yml.  ","title":"How to write a new module?","url":"/docs/skywalking-rover/latest/en/guides/contribution/how-to-write-module/"},{"content":"How to write a new module? If you want to add a custom module to SkyWalking Rover, the following contents would guide you. Let\u0026rsquo;s use the profiling module as an example of how to write a module.\n Please read the Module Design to understand what is module. The module should be written in the skywalking-rover/pkg directory. So we create a new directory called profiling as the module codes space. Implement the interface in the skywalking-rover/pkg/module. Each module has 6 methods, which are Name, RequiredModules, Config, Start, NotifyStartSuccess, and Shutdown.  Name returns the unique name of the module, also this name is used to define in the configuration file. RequiredModules returns this needs depended on module names. In the profiling module, it needs to query the existing process and send snapshots to the backend, so it needs the core and process module. Config returns the config content of this module, which relate to the configuration file, and you could declare the tag(mapstructure) with the field to define the name in the configuration file. Start is triggered when the module needs to start. if this module start failure, please return the error. NotifyStartSuccess is triggered after all the active modules are Start method success. Shutdown   Add the configuration into the skywalking-rover/configs/rover_configs.yaml. It should same as the config declaration. Register the module into skywalking-rover/pkg/boot/register.go. Add the Unit test or E2E testing for testing the module is works well. Write the documentation under the skywalking-rover/docs/en directory and add it to the documentation index file skywalking-rover/docs/menu.yml.  ","title":"How to write a new module?","url":"/docs/skywalking-rover/next/en/guides/contribution/how-to-write-module/"},{"content":"How to write a new module? If you want to add a custom module to SkyWalking Rover, the following contents would guide you. Let\u0026rsquo;s use the profiling module as an example of how to write a module.\n Please read the Module Design to understand what is module. The module should be written in the skywalking-rover/pkg directory. So we create a new directory called profiling as the module codes space. Implement the interface in the skywalking-rover/pkg/module. Each module has 6 methods, which are Name, RequiredModules, Config, Start, NotifyStartSuccess, and Shutdown.  Name returns the unique name of the module, also this name is used to define in the configuration file. RequiredModules returns this needs depended on module names. In the profiling module, it needs to query the existing process and send snapshots to the backend, so it needs the core and process module. Config returns the config content of this module, which relate to the configuration file, and you could declare the tag(mapstructure) with the field to define the name in the configuration file. Start is triggered when the module needs to start. if this module start failure, please return the error. NotifyStartSuccess is triggered after all the active modules are Start method success. Shutdown   Add the configuration into the skywalking-rover/configs/rover_configs.yaml. It should same as the config declaration. Register the module into skywalking-rover/pkg/boot/register.go. Add the Unit test or E2E testing for testing the module is works well. Write the documentation under the skywalking-rover/docs/en directory and add it to the documentation index file skywalking-rover/docs/menu.yml.  ","title":"How to write a new module?","url":"/docs/skywalking-rover/v0.6.0/en/guides/contribution/how-to-write-module/"},{"content":"How to write a new plugin? If you want to add a custom plugin in SkyWalking Satellite, the following contents would guide you. Let\u0026rsquo;s use memory-queue as an example of how to write a plugin.\n  Choose the plugin category. As the memory-queue is a queue, the plugin should be written in the skywalking-satellite/plugins/queue directory. So we create a new directory called memory as the plugin codes space.\n  Implement the interface in the skywalking-satellite/plugins/queue/api. Each plugin has 3 common methods, which are Name(), Description(), DefaultConfig().\n Name() returns the unique name in the plugin category. Description() returns the description of the plugin, which would be used to generate the plugin documentation. DefaultConfig() returns the default plugin config with yaml pattern, which would be used as the default value in the plugin struct and to generate the plugin documentation.  type Queue struct { config.CommonFields // config  EventBufferSize int `mapstructure:\u0026#34;event_buffer_size\u0026#34;` // The maximum buffer event size.  // components  buffer *goconcurrentqueue.FixedFIFO } func (q *Queue) Name() string { return Name } func (q *Queue) Description() string { return \u0026#34;this is a memory queue to buffer the input event.\u0026#34; } func (q *Queue) DefaultConfig() string { return ` # The maximum buffer event size. event_buffer_size: 5000   Add unit test.\n  Generate the plugin docs.\n  make gen-docs ","title":"How to write a new plugin?","url":"/docs/skywalking-satellite/latest/en/guides/contribution/how-to-write-plugin/"},{"content":"How to write a new plugin? If you want to add a custom plugin in SkyWalking Satellite, the following contents would guide you. Let\u0026rsquo;s use memory-queue as an example of how to write a plugin.\n  Choose the plugin category. As the memory-queue is a queue, the plugin should be written in the skywalking-satellite/plugins/queue directory. So we create a new directory called memory as the plugin codes space.\n  Implement the interface in the skywalking-satellite/plugins/queue/api. Each plugin has 3 common methods, which are Name(), Description(), DefaultConfig().\n Name() returns the unique name in the plugin category. Description() returns the description of the plugin, which would be used to generate the plugin documentation. DefaultConfig() returns the default plugin config with yaml pattern, which would be used as the default value in the plugin struct and to generate the plugin documentation.  type Queue struct { config.CommonFields // config  EventBufferSize int `mapstructure:\u0026#34;event_buffer_size\u0026#34;` // The maximum buffer event size.  // components  buffer *goconcurrentqueue.FixedFIFO } func (q *Queue) Name() string { return Name } func (q *Queue) Description() string { return \u0026#34;this is a memory queue to buffer the input event.\u0026#34; } func (q *Queue) DefaultConfig() string { return ` # The maximum buffer event size. event_buffer_size: 5000   Add unit test.\n  Generate the plugin docs.\n  make gen-docs ","title":"How to write a new plugin?","url":"/docs/skywalking-satellite/next/en/guides/contribution/how-to-write-plugin/"},{"content":"How to write a new plugin? If you want to add a custom plugin in SkyWalking Satellite, the following contents would guide you. Let\u0026rsquo;s use memory-queue as an example of how to write a plugin.\n  Choose the plugin category. As the memory-queue is a queue, the plugin should be written in the skywalking-satellite/plugins/queue directory. So we create a new directory called memory as the plugin codes space.\n  Implement the interface in the skywalking-satellite/plugins/queue/api. Each plugin has 3 common methods, which are Name(), Description(), DefaultConfig().\n Name() returns the unique name in the plugin category. Description() returns the description of the plugin, which would be used to generate the plugin documentation. DefaultConfig() returns the default plugin config with yaml pattern, which would be used as the default value in the plugin struct and to generate the plugin documentation.  type Queue struct { config.CommonFields // config  EventBufferSize int `mapstructure:\u0026#34;event_buffer_size\u0026#34;` // The maximum buffer event size.  // components  buffer *goconcurrentqueue.FixedFIFO } func (q *Queue) Name() string { return Name } func (q *Queue) Description() string { return \u0026#34;this is a memory queue to buffer the input event.\u0026#34; } func (q *Queue) DefaultConfig() string { return ` # The maximum buffer event size. event_buffer_size: 5000   Add unit test.\n  Generate the plugin docs.\n  make gen-docs ","title":"How to write a new plugin?","url":"/docs/skywalking-satellite/v1.2.0/en/guides/contribution/how-to-write-plugin/"},{"content":"HTTP API Protocol HTTP API Protocol defines the API data format, including API request and response data format. They use the HTTP1.1 wrapper of the official SkyWalking Browser Protocol. Read it for more details.\nPerformance Data Report Detailed information about data format can be found in BrowserPerf.proto.\nPOST http://localhost:12800/browser/perfData Send a performance data object in JSON format.\nInput:\n{ \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;redirectTime\u0026#34;: 10, \u0026#34;dnsTime\u0026#34;: 10, \u0026#34;ttfbTime\u0026#34;: 10, \u0026#34;tcpTime\u0026#34;: 10, \u0026#34;transTime\u0026#34;: 10, \u0026#34;domAnalysisTime\u0026#34;: 10, \u0026#34;fptTime\u0026#34;: 10, \u0026#34;domReadyTime\u0026#34;: 10, \u0026#34;loadPageTime\u0026#34;: 10, \u0026#34;resTime\u0026#34;: 10, \u0026#34;sslTime\u0026#34;: 10, \u0026#34;ttlTime\u0026#34;: 10, \u0026#34;firstPackTime\u0026#34;: 10, \u0026#34;fmpTime\u0026#34;: 10 } OutPut:\nHttp Status: 204\nError Log Report Detailed information about data format can be found in BrowserPerf.proto.\nPOST http://localhost:12800/browser/errorLogs Send an error log object list in JSON format.\nInput:\n[ { \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b01\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; }, { \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b02\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; } ] OutPut:\nHttp Status: 204\nPOST http://localhost:12800/browser/errorLog Send a single error log object in JSON format.\nInput:\n{ \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b01\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; } OutPut:\nHttp Status: 204\n","title":"HTTP API Protocol","url":"/docs/main/latest/en/api/browser-http-api-protocol/"},{"content":"HTTP API Protocol HTTP API Protocol defines the API data format, including API request and response data format. They use the HTTP1.1 wrapper of the official SkyWalking Browser Protocol. Read it for more details.\nPerformance Data Report Detailed information about data format can be found in BrowserPerf.proto.\nPOST http://localhost:12800/browser/perfData Send a performance data object in JSON format.\nInput:\n{ \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;redirectTime\u0026#34;: 10, \u0026#34;dnsTime\u0026#34;: 10, \u0026#34;ttfbTime\u0026#34;: 10, \u0026#34;tcpTime\u0026#34;: 10, \u0026#34;transTime\u0026#34;: 10, \u0026#34;domAnalysisTime\u0026#34;: 10, \u0026#34;fptTime\u0026#34;: 10, \u0026#34;domReadyTime\u0026#34;: 10, \u0026#34;loadPageTime\u0026#34;: 10, \u0026#34;resTime\u0026#34;: 10, \u0026#34;sslTime\u0026#34;: 10, \u0026#34;ttlTime\u0026#34;: 10, \u0026#34;firstPackTime\u0026#34;: 10, \u0026#34;fmpTime\u0026#34;: 10 } OutPut:\nHttp Status: 204\nError Log Report Detailed information about data format can be found in BrowserPerf.proto.\nPOST http://localhost:12800/browser/errorLogs Send an error log object list in JSON format.\nInput:\n[ { \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b01\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; }, { \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b02\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; } ] OutPut:\nHttp Status: 204\nPOST http://localhost:12800/browser/errorLog Send a single error log object in JSON format.\nInput:\n{ \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b01\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; } OutPut:\nHttp Status: 204\n","title":"HTTP API Protocol","url":"/docs/main/next/en/api/browser-http-api-protocol/"},{"content":"HTTP API Protocol HTTP API Protocol defines the API data format, including API request and response data format. They use the HTTP1.1 wrapper of the official SkyWalking Browser Protocol. Read it for more details.\nPerformance Data Report Detailed information about data format can be found in BrowserPerf.proto.\nPOST http://localhost:12800/browser/perfData Send a performance data object in JSON format.\nInput:\n{ \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;redirectTime\u0026#34;: 10, \u0026#34;dnsTime\u0026#34;: 10, \u0026#34;ttfbTime\u0026#34;: 10, \u0026#34;tcpTime\u0026#34;: 10, \u0026#34;transTime\u0026#34;: 10, \u0026#34;domAnalysisTime\u0026#34;: 10, \u0026#34;fptTime\u0026#34;: 10, \u0026#34;domReadyTime\u0026#34;: 10, \u0026#34;loadPageTime\u0026#34;: 10, \u0026#34;resTime\u0026#34;: 10, \u0026#34;sslTime\u0026#34;: 10, \u0026#34;ttlTime\u0026#34;: 10, \u0026#34;firstPackTime\u0026#34;: 10, \u0026#34;fmpTime\u0026#34;: 10 } OutPut:\nHttp Status: 204\nError Log Report Detailed information about data format can be found in BrowserPerf.proto.\nPOST http://localhost:12800/browser/errorLogs Send an error log object list in JSON format.\nInput:\n[ { \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b01\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; }, { \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b02\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; } ] OutPut:\nHttp Status: 204\nPOST http://localhost:12800/browser/errorLog Send a single error log object in JSON format.\nInput:\n{ \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b01\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; } OutPut:\nHttp Status: 204\n","title":"HTTP API Protocol","url":"/docs/main/v9.0.0/en/protocols/browser-http-api-protocol/"},{"content":"HTTP API Protocol HTTP API Protocol defines the API data format, including API request and response data format. They use the HTTP1.1 wrapper of the official SkyWalking Trace Data Protocol v3. Read it for more details.\nInstance Management Detailed information about data format can be found in Instance Management.\n Report service instance properties   POST http://localhost:12800/v3/management/reportProperties\n Input:\n{ \u0026#34;service\u0026#34;: \u0026#34;User Service Name\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User Service Instance Name\u0026#34;, \u0026#34;properties\u0026#34;: [{ \u0026#34;language\u0026#34;: \u0026#34;Lua\u0026#34; }] } Output JSON Array:\n{}  Service instance ping   POST http://localhost:12800/v3/management/keepAlive\n Input:\n{ \u0026#34;service\u0026#34;: \u0026#34;User Service Name\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User Service Instance Name\u0026#34; } OutPut:\n{} Trace Report Detailed information about data format can be found in Instance Management. There are two ways to report segment data: one segment per request or segment array in bulk mode.\nPOST http://localhost:12800/v3/segment Send a single segment object in JSON format.\nInput:\n{ \u0026#34;traceId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34; } OutPut:\nPOST http://localhost:12800/v3/segments Send a segment object list in JSON format.\nInput:\n[{ \u0026#34;traceId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34; }, { \u0026#34;traceId\u0026#34;: \u0026#34;f956699e-5106-4ea3-95e5-da748c55bac1\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577250, \u0026#34;endTime\u0026#34;: 1588664577250, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577250, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577250, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;f956699e-5106-4ea3-95e5-da748c55bac1\u0026#34; }] OutPut:\n","title":"HTTP API Protocol","url":"/docs/main/v9.0.0/en/protocols/http-api-protocol/"},{"content":"HTTP API Protocol HTTP API Protocol defines the API data format, including API request and response data format. They use the HTTP1.1 wrapper of the official SkyWalking Browser Protocol. Read it for more details.\nPerformance Data Report Detailed information about data format can be found in BrowserPerf.proto.\nPOST http://localhost:12800/browser/perfData Send a performance data object in JSON format.\nInput:\n{ \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;redirectTime\u0026#34;: 10, \u0026#34;dnsTime\u0026#34;: 10, \u0026#34;ttfbTime\u0026#34;: 10, \u0026#34;tcpTime\u0026#34;: 10, \u0026#34;transTime\u0026#34;: 10, \u0026#34;domAnalysisTime\u0026#34;: 10, \u0026#34;fptTime\u0026#34;: 10, \u0026#34;domReadyTime\u0026#34;: 10, \u0026#34;loadPageTime\u0026#34;: 10, \u0026#34;resTime\u0026#34;: 10, \u0026#34;sslTime\u0026#34;: 10, \u0026#34;ttlTime\u0026#34;: 10, \u0026#34;firstPackTime\u0026#34;: 10, \u0026#34;fmpTime\u0026#34;: 10 } OutPut:\nHttp Status: 204\nError Log Report Detailed information about data format can be found in BrowserPerf.proto.\nPOST http://localhost:12800/browser/errorLogs Send an error log object list in JSON format.\nInput:\n[ { \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b01\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; }, { \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b02\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; } ] OutPut:\nHttp Status: 204\nPOST http://localhost:12800/browser/errorLog Send a single error log object in JSON format.\nInput:\n{ \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b01\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; } OutPut:\nHttp Status: 204\n","title":"HTTP API Protocol","url":"/docs/main/v9.1.0/en/protocols/browser-http-api-protocol/"},{"content":"HTTP API Protocol HTTP API Protocol defines the API data format, including API request and response data format. They use the HTTP1.1 wrapper of the official SkyWalking Trace Data Protocol v3. Read it for more details.\nInstance Management Detailed information about data format can be found in Instance Management.\n Report service instance properties   POST http://localhost:12800/v3/management/reportProperties\n Input:\n{ \u0026#34;service\u0026#34;: \u0026#34;User Service Name\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User Service Instance Name\u0026#34;, \u0026#34;properties\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;language\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;Lua\u0026#34; } ] } Output JSON Array:\n{}  Service instance ping   POST http://localhost:12800/v3/management/keepAlive\n Input:\n{ \u0026#34;service\u0026#34;: \u0026#34;User Service Name\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User Service Instance Name\u0026#34; } OutPut:\n{} Trace Report Detailed information about data format can be found in Instance Management. There are two ways to report segment data: one segment per request or segment array in bulk mode.\nPOST http://localhost:12800/v3/segment Send a single segment object in JSON format.\nInput:\n{ \u0026#34;traceId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34; } OutPut:\nPOST http://localhost:12800/v3/segments Send a segment object list in JSON format.\nInput:\n[{ \u0026#34;traceId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34; }, { \u0026#34;traceId\u0026#34;: \u0026#34;f956699e-5106-4ea3-95e5-da748c55bac1\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577250, \u0026#34;endTime\u0026#34;: 1588664577250, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577250, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577250, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;f956699e-5106-4ea3-95e5-da748c55bac1\u0026#34; }] OutPut:\n","title":"HTTP API Protocol","url":"/docs/main/v9.1.0/en/protocols/http-api-protocol/"},{"content":"HTTP API Protocol HTTP API Protocol defines the API data format, including API request and response data format. They use the HTTP1.1 wrapper of the official SkyWalking Browser Protocol. Read it for more details.\nPerformance Data Report Detailed information about data format can be found in BrowserPerf.proto.\nPOST http://localhost:12800/browser/perfData Send a performance data object in JSON format.\nInput:\n{ \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;redirectTime\u0026#34;: 10, \u0026#34;dnsTime\u0026#34;: 10, \u0026#34;ttfbTime\u0026#34;: 10, \u0026#34;tcpTime\u0026#34;: 10, \u0026#34;transTime\u0026#34;: 10, \u0026#34;domAnalysisTime\u0026#34;: 10, \u0026#34;fptTime\u0026#34;: 10, \u0026#34;domReadyTime\u0026#34;: 10, \u0026#34;loadPageTime\u0026#34;: 10, \u0026#34;resTime\u0026#34;: 10, \u0026#34;sslTime\u0026#34;: 10, \u0026#34;ttlTime\u0026#34;: 10, \u0026#34;firstPackTime\u0026#34;: 10, \u0026#34;fmpTime\u0026#34;: 10 } OutPut:\nHttp Status: 204\nError Log Report Detailed information about data format can be found in BrowserPerf.proto.\nPOST http://localhost:12800/browser/errorLogs Send an error log object list in JSON format.\nInput:\n[ { \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b01\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; }, { \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b02\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; } ] OutPut:\nHttp Status: 204\nPOST http://localhost:12800/browser/errorLog Send a single error log object in JSON format.\nInput:\n{ \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b01\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; } OutPut:\nHttp Status: 204\n","title":"HTTP API Protocol","url":"/docs/main/v9.2.0/en/protocols/browser-http-api-protocol/"},{"content":"HTTP API Protocol HTTP API Protocol defines the API data format, including API request and response data format. They use the HTTP1.1 wrapper of the official SkyWalking Trace Data Protocol v3. Read it for more details.\nInstance Management Detailed information about data format can be found in Instance Management.\n Report service instance properties   POST http://localhost:12800/v3/management/reportProperties\n Input:\n{ \u0026#34;service\u0026#34;: \u0026#34;User Service Name\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User Service Instance Name\u0026#34;, \u0026#34;properties\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;language\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;Lua\u0026#34; } ] } Output JSON Array:\n{}  Service instance ping   POST http://localhost:12800/v3/management/keepAlive\n Input:\n{ \u0026#34;service\u0026#34;: \u0026#34;User Service Name\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User Service Instance Name\u0026#34; } OutPut:\n{} Trace Report Detailed information about data format can be found in Instance Management. There are two ways to report segment data: one segment per request or segment array in bulk mode.\nPOST http://localhost:12800/v3/segment Send a single segment object in JSON format.\nInput:\n{ \u0026#34;traceId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34; } OutPut:\nPOST http://localhost:12800/v3/segments Send a segment object list in JSON format.\nInput:\n[{ \u0026#34;traceId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34; }, { \u0026#34;traceId\u0026#34;: \u0026#34;f956699e-5106-4ea3-95e5-da748c55bac1\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577250, \u0026#34;endTime\u0026#34;: 1588664577250, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577250, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577250, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;f956699e-5106-4ea3-95e5-da748c55bac1\u0026#34; }] OutPut:\n","title":"HTTP API Protocol","url":"/docs/main/v9.2.0/en/protocols/http-api-protocol/"},{"content":"HTTP API Protocol HTTP API Protocol defines the API data format, including API request and response data format. They use the HTTP1.1 wrapper of the official SkyWalking Browser Protocol. Read it for more details.\nPerformance Data Report Detailed information about data format can be found in BrowserPerf.proto.\nPOST http://localhost:12800/browser/perfData Send a performance data object in JSON format.\nInput:\n{ \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;redirectTime\u0026#34;: 10, \u0026#34;dnsTime\u0026#34;: 10, \u0026#34;ttfbTime\u0026#34;: 10, \u0026#34;tcpTime\u0026#34;: 10, \u0026#34;transTime\u0026#34;: 10, \u0026#34;domAnalysisTime\u0026#34;: 10, \u0026#34;fptTime\u0026#34;: 10, \u0026#34;domReadyTime\u0026#34;: 10, \u0026#34;loadPageTime\u0026#34;: 10, \u0026#34;resTime\u0026#34;: 10, \u0026#34;sslTime\u0026#34;: 10, \u0026#34;ttlTime\u0026#34;: 10, \u0026#34;firstPackTime\u0026#34;: 10, \u0026#34;fmpTime\u0026#34;: 10 } OutPut:\nHttp Status: 204\nError Log Report Detailed information about data format can be found in BrowserPerf.proto.\nPOST http://localhost:12800/browser/errorLogs Send an error log object list in JSON format.\nInput:\n[ { \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b01\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; }, { \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b02\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; } ] OutPut:\nHttp Status: 204\nPOST http://localhost:12800/browser/errorLog Send a single error log object in JSON format.\nInput:\n{ \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b01\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; } OutPut:\nHttp Status: 204\n","title":"HTTP API Protocol","url":"/docs/main/v9.3.0/en/protocols/browser-http-api-protocol/"},{"content":"HTTP API Protocol HTTP API Protocol defines the API data format, including API request and response data format. They use the HTTP1.1 wrapper of the official SkyWalking Trace Data Protocol v3. Read it for more details.\nInstance Management Detailed information about data format can be found in Instance Management.\n Report service instance properties   POST http://localhost:12800/v3/management/reportProperties\n Input:\n{ \u0026#34;service\u0026#34;: \u0026#34;User Service Name\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User Service Instance Name\u0026#34;, \u0026#34;properties\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;language\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;Lua\u0026#34; } ] } Output JSON Array:\n{}  Service instance ping   POST http://localhost:12800/v3/management/keepAlive\n Input:\n{ \u0026#34;service\u0026#34;: \u0026#34;User Service Name\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User Service Instance Name\u0026#34; } OutPut:\n{} Trace Report Detailed information about data format can be found in Instance Management. There are two ways to report segment data: one segment per request or segment array in bulk mode.\nPOST http://localhost:12800/v3/segment Send a single segment object in JSON format.\nInput:\n{ \u0026#34;traceId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34; } OutPut:\nPOST http://localhost:12800/v3/segments Send a segment object list in JSON format.\nInput:\n[{ \u0026#34;traceId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34; }, { \u0026#34;traceId\u0026#34;: \u0026#34;f956699e-5106-4ea3-95e5-da748c55bac1\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577250, \u0026#34;endTime\u0026#34;: 1588664577250, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577250, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577250, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;f956699e-5106-4ea3-95e5-da748c55bac1\u0026#34; }] OutPut:\n","title":"HTTP API Protocol","url":"/docs/main/v9.3.0/en/protocols/http-api-protocol/"},{"content":"HTTP API Protocol HTTP API Protocol defines the API data format, including API request and response data format. They use the HTTP1.1 wrapper of the official SkyWalking Browser Protocol. Read it for more details.\nPerformance Data Report Detailed information about data format can be found in BrowserPerf.proto.\nPOST http://localhost:12800/browser/perfData Send a performance data object in JSON format.\nInput:\n{ \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;redirectTime\u0026#34;: 10, \u0026#34;dnsTime\u0026#34;: 10, \u0026#34;ttfbTime\u0026#34;: 10, \u0026#34;tcpTime\u0026#34;: 10, \u0026#34;transTime\u0026#34;: 10, \u0026#34;domAnalysisTime\u0026#34;: 10, \u0026#34;fptTime\u0026#34;: 10, \u0026#34;domReadyTime\u0026#34;: 10, \u0026#34;loadPageTime\u0026#34;: 10, \u0026#34;resTime\u0026#34;: 10, \u0026#34;sslTime\u0026#34;: 10, \u0026#34;ttlTime\u0026#34;: 10, \u0026#34;firstPackTime\u0026#34;: 10, \u0026#34;fmpTime\u0026#34;: 10 } OutPut:\nHttp Status: 204\nError Log Report Detailed information about data format can be found in BrowserPerf.proto.\nPOST http://localhost:12800/browser/errorLogs Send an error log object list in JSON format.\nInput:\n[ { \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b01\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; }, { \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b02\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; } ] OutPut:\nHttp Status: 204\nPOST http://localhost:12800/browser/errorLog Send a single error log object in JSON format.\nInput:\n{ \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b01\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; } OutPut:\nHttp Status: 204\n","title":"HTTP API Protocol","url":"/docs/main/v9.4.0/en/api/browser-http-api-protocol/"},{"content":"HTTP API Protocol HTTP API Protocol defines the API data format, including API request and response data format. They use the HTTP1.1 wrapper of the official SkyWalking Browser Protocol. Read it for more details.\nPerformance Data Report Detailed information about data format can be found in BrowserPerf.proto.\nPOST http://localhost:12800/browser/perfData Send a performance data object in JSON format.\nInput:\n{ \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;redirectTime\u0026#34;: 10, \u0026#34;dnsTime\u0026#34;: 10, \u0026#34;ttfbTime\u0026#34;: 10, \u0026#34;tcpTime\u0026#34;: 10, \u0026#34;transTime\u0026#34;: 10, \u0026#34;domAnalysisTime\u0026#34;: 10, \u0026#34;fptTime\u0026#34;: 10, \u0026#34;domReadyTime\u0026#34;: 10, \u0026#34;loadPageTime\u0026#34;: 10, \u0026#34;resTime\u0026#34;: 10, \u0026#34;sslTime\u0026#34;: 10, \u0026#34;ttlTime\u0026#34;: 10, \u0026#34;firstPackTime\u0026#34;: 10, \u0026#34;fmpTime\u0026#34;: 10 } OutPut:\nHttp Status: 204\nError Log Report Detailed information about data format can be found in BrowserPerf.proto.\nPOST http://localhost:12800/browser/errorLogs Send an error log object list in JSON format.\nInput:\n[ { \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b01\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; }, { \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b02\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; } ] OutPut:\nHttp Status: 204\nPOST http://localhost:12800/browser/errorLog Send a single error log object in JSON format.\nInput:\n{ \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b01\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; } OutPut:\nHttp Status: 204\n","title":"HTTP API Protocol","url":"/docs/main/v9.5.0/en/api/browser-http-api-protocol/"},{"content":"HTTP API Protocol HTTP API Protocol defines the API data format, including API request and response data format. They use the HTTP1.1 wrapper of the official SkyWalking Browser Protocol. Read it for more details.\nPerformance Data Report Detailed information about data format can be found in BrowserPerf.proto.\nPOST http://localhost:12800/browser/perfData Send a performance data object in JSON format.\nInput:\n{ \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;redirectTime\u0026#34;: 10, \u0026#34;dnsTime\u0026#34;: 10, \u0026#34;ttfbTime\u0026#34;: 10, \u0026#34;tcpTime\u0026#34;: 10, \u0026#34;transTime\u0026#34;: 10, \u0026#34;domAnalysisTime\u0026#34;: 10, \u0026#34;fptTime\u0026#34;: 10, \u0026#34;domReadyTime\u0026#34;: 10, \u0026#34;loadPageTime\u0026#34;: 10, \u0026#34;resTime\u0026#34;: 10, \u0026#34;sslTime\u0026#34;: 10, \u0026#34;ttlTime\u0026#34;: 10, \u0026#34;firstPackTime\u0026#34;: 10, \u0026#34;fmpTime\u0026#34;: 10 } OutPut:\nHttp Status: 204\nError Log Report Detailed information about data format can be found in BrowserPerf.proto.\nPOST http://localhost:12800/browser/errorLogs Send an error log object list in JSON format.\nInput:\n[ { \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b01\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; }, { \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b02\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; } ] OutPut:\nHttp Status: 204\nPOST http://localhost:12800/browser/errorLog Send a single error log object in JSON format.\nInput:\n{ \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b01\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; } OutPut:\nHttp Status: 204\n","title":"HTTP API Protocol","url":"/docs/main/v9.6.0/en/api/browser-http-api-protocol/"},{"content":"HTTP API Protocol HTTP API Protocol defines the API data format, including API request and response data format. They use the HTTP1.1 wrapper of the official SkyWalking Browser Protocol. Read it for more details.\nPerformance Data Report Detailed information about data format can be found in BrowserPerf.proto.\nPOST http://localhost:12800/browser/perfData Send a performance data object in JSON format.\nInput:\n{ \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;redirectTime\u0026#34;: 10, \u0026#34;dnsTime\u0026#34;: 10, \u0026#34;ttfbTime\u0026#34;: 10, \u0026#34;tcpTime\u0026#34;: 10, \u0026#34;transTime\u0026#34;: 10, \u0026#34;domAnalysisTime\u0026#34;: 10, \u0026#34;fptTime\u0026#34;: 10, \u0026#34;domReadyTime\u0026#34;: 10, \u0026#34;loadPageTime\u0026#34;: 10, \u0026#34;resTime\u0026#34;: 10, \u0026#34;sslTime\u0026#34;: 10, \u0026#34;ttlTime\u0026#34;: 10, \u0026#34;firstPackTime\u0026#34;: 10, \u0026#34;fmpTime\u0026#34;: 10 } OutPut:\nHttp Status: 204\nError Log Report Detailed information about data format can be found in BrowserPerf.proto.\nPOST http://localhost:12800/browser/errorLogs Send an error log object list in JSON format.\nInput:\n[ { \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b01\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; }, { \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b02\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; } ] OutPut:\nHttp Status: 204\nPOST http://localhost:12800/browser/errorLog Send a single error log object in JSON format.\nInput:\n{ \u0026#34;uniqueId\u0026#34;: \u0026#34;55ec6178-3fb7-43ef-899c-a26944407b01\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;web\u0026#34;, \u0026#34;serviceVersion\u0026#34;: \u0026#34;v0.0.1\u0026#34;, \u0026#34;pagePath\u0026#34;: \u0026#34;/index.html\u0026#34;, \u0026#34;category\u0026#34;: \u0026#34;ajax\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;line\u0026#34;: 1, \u0026#34;col\u0026#34;: 1, \u0026#34;stack\u0026#34;: \u0026#34;error\u0026#34;, \u0026#34;errorUrl\u0026#34;: \u0026#34;/index.html\u0026#34; } OutPut:\nHttp Status: 204\n","title":"HTTP API Protocol","url":"/docs/main/v9.7.0/en/api/browser-http-api-protocol/"},{"content":"HTTP Restful URI recognition As introduced in the Group Parameterized Endpoints doc, HTTP Restful URIs are identified as endpoints. With some additional rules, we can identify the parameters in the URI and group the endpoints in case of annoying and huge size of endpoint candidates with low value of the metrics.\nIn the ML/AI specific fields, decision trees or neural networks can be trained on labeled URI data to automatically recognize and classify different URI patterns, as well as many other ways.\nIn this pipeline, OAP has the capabilities to cache the URI candidates with occurrence count, and push the data to 3rd party for further analysis. Then OAP would pull the analyzed results for processing the further telemetry traffic.\nSet up OAP to connect remote URI recognition server uriRecognitionServerAddr and uriRecognitionServerPort are the configurations to set up the remote URI recognition server.\nThe URI recognition server is a gRPC server, which is defined in URIRecognition.proto.\nservice HttpUriRecognitionService { // Sync for the pattern recognition dictionary.  rpc fetchAllPatterns(HttpUriRecognitionSyncRequest) returns (HttpUriRecognitionResponse) {} // Feed new raw data and matched patterns to the AI-server.  rpc feedRawData(HttpUriRecognitionRequest) returns (google.protobuf.Empty) {}} fetchAllPatterns service  fetchAllPatterns is up and running in 1 minute period from every OAP to fetch all recognized patterns from the remote server.\n feedRawData service  feedRawData is running in 25-30 minutes period to push the raw data to the remote server for training.\nConfigurations  core/maxHttpUrisNumberPerService The max number of HTTP URIs per service for further URI pattern recognition. core/syncPeriodHttpUriRecognitionPattern The period of HTTP URI pattern recognition(feedRawData). Unit is second, 10s by default. core/trainingPeriodHttpUriRecognitionPattern The training period of HTTP URI pattern recognition(fetchAllPatterns). Unit is second, 60s by default.  Optional Server Implementation R3 RESTful Pattern Recognition(R3) is an Apache 2.0 licensed implementation for the URI recognition, and natively supports URIRecognition.proto defined in OAP.\n","title":"HTTP Restful URI recognition","url":"/docs/main/latest/en/setup/ai-pipeline/http-restful-uri-pattern/"},{"content":"HTTP Restful URI recognition As introduced in the Group Parameterized Endpoints doc, HTTP Restful URIs are identified as endpoints. With some additional rules, we can identify the parameters in the URI and group the endpoints in case of annoying and huge size of endpoint candidates with low value of the metrics.\nIn the ML/AI specific fields, decision trees or neural networks can be trained on labeled URI data to automatically recognize and classify different URI patterns, as well as many other ways.\nIn this pipeline, OAP has the capabilities to cache the URI candidates with occurrence count, and push the data to 3rd party for further analysis. Then OAP would pull the analyzed results for processing the further telemetry traffic.\nSet up OAP to connect remote URI recognition server uriRecognitionServerAddr and uriRecognitionServerPort are the configurations to set up the remote URI recognition server.\nThe URI recognition server is a gRPC server, which is defined in URIRecognition.proto.\nservice HttpUriRecognitionService { // Sync for the pattern recognition dictionary.  rpc fetchAllPatterns(HttpUriRecognitionSyncRequest) returns (HttpUriRecognitionResponse) {} // Feed new raw data and matched patterns to the AI-server.  rpc feedRawData(HttpUriRecognitionRequest) returns (google.protobuf.Empty) {}} fetchAllPatterns service  fetchAllPatterns is up and running in 1 minute period from every OAP to fetch all recognized patterns from the remote server.\n feedRawData service  feedRawData is running in 25-30 minutes period to push the raw data to the remote server for training.\nConfigurations  core/maxHttpUrisNumberPerService The max number of HTTP URIs per service for further URI pattern recognition. core/syncPeriodHttpUriRecognitionPattern The period of HTTP URI pattern recognition(feedRawData). Unit is second, 10s by default. core/trainingPeriodHttpUriRecognitionPattern The training period of HTTP URI pattern recognition(fetchAllPatterns). Unit is second, 60s by default.  Optional Server Implementation R3 RESTful Pattern Recognition(R3) is an Apache 2.0 licensed implementation for the URI recognition, and natively supports URIRecognition.proto defined in OAP.\n","title":"HTTP Restful URI recognition","url":"/docs/main/next/en/setup/ai-pipeline/http-restful-uri-pattern/"},{"content":"HTTP Restful URI recognition As introduced in the Group Parameterized Endpoints doc, HTTP Restful URIs are identified as endpoints. With some additional rules, we can identify the parameters in the URI and group the endpoints in case of annoying and huge size of endpoint candidates with low value of the metrics.\nIn the ML/AI specific fields, decision trees or neural networks can be trained on labeled URI data to automatically recognize and classify different URI patterns, as well as many other ways.\nIn this pipeline, OAP has the capabilities to cache the URI candidates with occurrence count, and push the data to 3rd party for further analysis. Then OAP would pull the analyzed results for processing the further telemetry traffic.\nSet up OAP to connect remote URI recognition server uriRecognitionServerAddr and uriRecognitionServerPort are the configurations to set up the remote URI recognition server.\nThe URI recognition server is a gRPC server, which is defined in URIRecognition.proto.\nservice HttpUriRecognitionService { // Sync for the pattern recognition dictionary.  rpc fetchAllPatterns(HttpUriRecognitionSyncRequest) returns (HttpUriRecognitionResponse) {} // Feed new raw data and matched patterns to the AI-server.  rpc feedRawData(HttpUriRecognitionRequest) returns (google.protobuf.Empty) {}} fetchAllPatterns service  fetchAllPatterns is up and running in 1 minute period from every OAP to fetch all recognized patterns from the remote server.\n feedRawData service  feedRawData is running in 25-30 minutes period to push the raw data to the remote server for training.\nConfigurations  core/maxHttpUrisNumberPerService The max number of HTTP URIs per service for further URI pattern recognition. No configuration to set periods of feedRawData and fetchAllPatterns services.  Optional Server Implementation R3 RESTful Pattern Recognition(R3) is an Apache 2.0 licensed implementation for the URI recognition, and natively supports URIRecognition.proto defined in OAP.\n","title":"HTTP Restful URI recognition","url":"/docs/main/v9.5.0/en/setup/ai-pipeline/http-restful-uri-pattern/"},{"content":"HTTP Restful URI recognition As introduced in the Group Parameterized Endpoints doc, HTTP Restful URIs are identified as endpoints. With some additional rules, we can identify the parameters in the URI and group the endpoints in case of annoying and huge size of endpoint candidates with low value of the metrics.\nIn the ML/AI specific fields, decision trees or neural networks can be trained on labeled URI data to automatically recognize and classify different URI patterns, as well as many other ways.\nIn this pipeline, OAP has the capabilities to cache the URI candidates with occurrence count, and push the data to 3rd party for further analysis. Then OAP would pull the analyzed results for processing the further telemetry traffic.\nSet up OAP to connect remote URI recognition server uriRecognitionServerAddr and uriRecognitionServerPort are the configurations to set up the remote URI recognition server.\nThe URI recognition server is a gRPC server, which is defined in URIRecognition.proto.\nservice HttpUriRecognitionService { // Sync for the pattern recognition dictionary.  rpc fetchAllPatterns(HttpUriRecognitionSyncRequest) returns (HttpUriRecognitionResponse) {} // Feed new raw data and matched patterns to the AI-server.  rpc feedRawData(HttpUriRecognitionRequest) returns (google.protobuf.Empty) {}} fetchAllPatterns service  fetchAllPatterns is up and running in 1 minute period from every OAP to fetch all recognized patterns from the remote server.\n feedRawData service  feedRawData is running in 25-30 minutes period to push the raw data to the remote server for training.\nConfigurations  core/maxHttpUrisNumberPerService The max number of HTTP URIs per service for further URI pattern recognition. core/syncPeriodHttpUriRecognitionPattern The period of HTTP URI pattern recognition(feedRawData). Unit is second, 10s by default. core/trainingPeriodHttpUriRecognitionPattern The training period of HTTP URI pattern recognition(fetchAllPatterns). Unit is second, 60s by default.  Optional Server Implementation R3 RESTful Pattern Recognition(R3) is an Apache 2.0 licensed implementation for the URI recognition, and natively supports URIRecognition.proto defined in OAP.\n","title":"HTTP Restful URI recognition","url":"/docs/main/v9.6.0/en/setup/ai-pipeline/http-restful-uri-pattern/"},{"content":"HTTP Restful URI recognition As introduced in the Group Parameterized Endpoints doc, HTTP Restful URIs are identified as endpoints. With some additional rules, we can identify the parameters in the URI and group the endpoints in case of annoying and huge size of endpoint candidates with low value of the metrics.\nIn the ML/AI specific fields, decision trees or neural networks can be trained on labeled URI data to automatically recognize and classify different URI patterns, as well as many other ways.\nIn this pipeline, OAP has the capabilities to cache the URI candidates with occurrence count, and push the data to 3rd party for further analysis. Then OAP would pull the analyzed results for processing the further telemetry traffic.\nSet up OAP to connect remote URI recognition server uriRecognitionServerAddr and uriRecognitionServerPort are the configurations to set up the remote URI recognition server.\nThe URI recognition server is a gRPC server, which is defined in URIRecognition.proto.\nservice HttpUriRecognitionService { // Sync for the pattern recognition dictionary.  rpc fetchAllPatterns(HttpUriRecognitionSyncRequest) returns (HttpUriRecognitionResponse) {} // Feed new raw data and matched patterns to the AI-server.  rpc feedRawData(HttpUriRecognitionRequest) returns (google.protobuf.Empty) {}} fetchAllPatterns service  fetchAllPatterns is up and running in 1 minute period from every OAP to fetch all recognized patterns from the remote server.\n feedRawData service  feedRawData is running in 25-30 minutes period to push the raw data to the remote server for training.\nConfigurations  core/maxHttpUrisNumberPerService The max number of HTTP URIs per service for further URI pattern recognition. core/syncPeriodHttpUriRecognitionPattern The period of HTTP URI pattern recognition(feedRawData). Unit is second, 10s by default. core/trainingPeriodHttpUriRecognitionPattern The training period of HTTP URI pattern recognition(fetchAllPatterns). Unit is second, 60s by default.  Optional Server Implementation R3 RESTful Pattern Recognition(R3) is an Apache 2.0 licensed implementation for the URI recognition, and natively supports URIRecognition.proto defined in OAP.\n","title":"HTTP Restful URI recognition","url":"/docs/main/v9.7.0/en/setup/ai-pipeline/http-restful-uri-pattern/"},{"content":"Hybrid Compilation Hybrid compilation technology is the base of SkyWalking Go\u0026rsquo;s implementation.\nIt utilizes the -toolexec flag during Golang compilation to introduce custom programs that intercept all original files in the compilation stage. This allows for the modification or addition of files to be completed seamlessly.\nToolchain in Golang The -toolexec flag in Golang is a powerful feature that can be used during stages such as build, test, and others. When this flag is used, developers can provide a custom program or script to replace the default go tools functionality. This offers greater flexibility and control over the build, test, or analysis processes.\nWhen passing this flag during a go build, it can intercept the execution flow of commands such as compile, asm, and link, which are required during Golang\u0026rsquo;s compilation process. These commands are also referred to as the toolchain within Golang.\nInformation about the Toolchain The following command demonstrates the parameter information for the specified -toolexec program when it is invoked:\n/usr/bin/skywalking-go /usr/local/opt/go/libexec/pkg/tool/darwin_amd64/compile -o /var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build452071603/b011/_pkg_.a -trimpath /var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build452071603/b011=\u0026gt; -p runtime -std -+ -buildid zSeDyjJh0lgXlIqBZScI/zSeDyjJh0lgXlIqBZScI -goversion go1.19.2 -symabis /var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build452071603/b011/symabis -c=4 -nolocalimports -importcfg /var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build452071603/b011/importcfg -pack -asmhdr /var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build452071603/b011/go_asm.h /usr/local/opt/go/libexec/src/runtime/alg.go /usr/local/opt/go/libexec/src/runtime/asan0.go ... The code above demonstrates the parameters used when a custom program is executed, which mainly includes the following information:\n Current toolchain tool: In this example, it is a compilation tool with the path: /usr/local/opt/go/libexec/pkg/tool/darwin_amd64/compile. Target file of the tool: The final target file that the current tool needs to generate. Package information: The module package path information being compiled, which is the parameter value of the -p flag. The current package path is runtime. Temporary directory address: For each compilation, the Go program would generate a corresponding temporary directory. This directory contains all the temporary files required for the compilation. Files to be compiled: Many .go file paths can be seen at the end of the command, which are the file path list of the module that needs to be compiled.  Toolchain with SkyWalking Go Agent SkyWalking Go Agent works by intercepting the compile program through the toolchain and making changes to the program based on the information above. The main parts include:\n AST: Using AST to parse and manipulate the codes. File copying/generation: Copy or generate files to the temporary directory required for the compilation, and add file path addresses when the compilation command is executed. Proxy command execution: After completing the modification of the specified package, the new codes are weaved into the target.  Hybrid Compilation After enhancing the program with SkyWalking Go Agent, the following parts of the program will be enhanced:\n SkyWalking Go: The agent core part of the code would be dynamically copied to the agent path for plugin use. Plugins: Enhance the specified framework code according to the enhancement rules of the plugins. Runtime: Enhance the runtime package in Go, including extensions for goroutines and other content. Main: Enhance the main package during system startup, for stating the system with Agent.  ","title":"Hybrid Compilation","url":"/docs/skywalking-go/latest/en/concepts-and-designs/hybrid-compilation/"},{"content":"Hybrid Compilation Hybrid compilation technology is the base of SkyWalking Go\u0026rsquo;s implementation.\nIt utilizes the -toolexec flag during Golang compilation to introduce custom programs that intercept all original files in the compilation stage. This allows for the modification or addition of files to be completed seamlessly.\nToolchain in Golang The -toolexec flag in Golang is a powerful feature that can be used during stages such as build, test, and others. When this flag is used, developers can provide a custom program or script to replace the default go tools functionality. This offers greater flexibility and control over the build, test, or analysis processes.\nWhen passing this flag during a go build, it can intercept the execution flow of commands such as compile, asm, and link, which are required during Golang\u0026rsquo;s compilation process. These commands are also referred to as the toolchain within Golang.\nInformation about the Toolchain The following command demonstrates the parameter information for the specified -toolexec program when it is invoked:\n/usr/bin/skywalking-go /usr/local/opt/go/libexec/pkg/tool/darwin_amd64/compile -o /var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build452071603/b011/_pkg_.a -trimpath /var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build452071603/b011=\u0026gt; -p runtime -std -+ -buildid zSeDyjJh0lgXlIqBZScI/zSeDyjJh0lgXlIqBZScI -goversion go1.19.2 -symabis /var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build452071603/b011/symabis -c=4 -nolocalimports -importcfg /var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build452071603/b011/importcfg -pack -asmhdr /var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build452071603/b011/go_asm.h /usr/local/opt/go/libexec/src/runtime/alg.go /usr/local/opt/go/libexec/src/runtime/asan0.go ... The code above demonstrates the parameters used when a custom program is executed, which mainly includes the following information:\n Current toolchain tool: In this example, it is a compilation tool with the path: /usr/local/opt/go/libexec/pkg/tool/darwin_amd64/compile. Target file of the tool: The final target file that the current tool needs to generate. Package information: The module package path information being compiled, which is the parameter value of the -p flag. The current package path is runtime. Temporary directory address: For each compilation, the Go program would generate a corresponding temporary directory. This directory contains all the temporary files required for the compilation. Files to be compiled: Many .go file paths can be seen at the end of the command, which are the file path list of the module that needs to be compiled.  Toolchain with SkyWalking Go Agent SkyWalking Go Agent works by intercepting the compile program through the toolchain and making changes to the program based on the information above. The main parts include:\n AST: Using AST to parse and manipulate the codes. File copying/generation: Copy or generate files to the temporary directory required for the compilation, and add file path addresses when the compilation command is executed. Proxy command execution: After completing the modification of the specified package, the new codes are weaved into the target.  Hybrid Compilation After enhancing the program with SkyWalking Go Agent, the following parts of the program will be enhanced:\n SkyWalking Go: The agent core part of the code would be dynamically copied to the agent path for plugin use. Plugins: Enhance the specified framework code according to the enhancement rules of the plugins. Runtime: Enhance the runtime package in Go, including extensions for goroutines and other content. Main: Enhance the main package during system startup, for stating the system with Agent.  ","title":"Hybrid Compilation","url":"/docs/skywalking-go/next/en/concepts-and-designs/hybrid-compilation/"},{"content":"Hybrid Compilation Hybrid compilation technology is the base of SkyWalking Go\u0026rsquo;s implementation.\nIt utilizes the -toolexec flag during Golang compilation to introduce custom programs that intercept all original files in the compilation stage. This allows for the modification or addition of files to be completed seamlessly.\nToolchain in Golang The -toolexec flag in Golang is a powerful feature that can be used during stages such as build, test, and others. When this flag is used, developers can provide a custom program or script to replace the default go tools functionality. This offers greater flexibility and control over the build, test, or analysis processes.\nWhen passing this flag during a go build, it can intercept the execution flow of commands such as compile, asm, and link, which are required during Golang\u0026rsquo;s compilation process. These commands are also referred to as the toolchain within Golang.\nInformation about the Toolchain The following command demonstrates the parameter information for the specified -toolexec program when it is invoked:\n/usr/bin/skywalking-go /usr/local/opt/go/libexec/pkg/tool/darwin_amd64/compile -o /var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build452071603/b011/_pkg_.a -trimpath /var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build452071603/b011=\u0026gt; -p runtime -std -+ -buildid zSeDyjJh0lgXlIqBZScI/zSeDyjJh0lgXlIqBZScI -goversion go1.19.2 -symabis /var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build452071603/b011/symabis -c=4 -nolocalimports -importcfg /var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build452071603/b011/importcfg -pack -asmhdr /var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build452071603/b011/go_asm.h /usr/local/opt/go/libexec/src/runtime/alg.go /usr/local/opt/go/libexec/src/runtime/asan0.go ... The code above demonstrates the parameters used when a custom program is executed, which mainly includes the following information:\n Current toolchain tool: In this example, it is a compilation tool with the path: /usr/local/opt/go/libexec/pkg/tool/darwin_amd64/compile. Target file of the tool: The final target file that the current tool needs to generate. Package information: The module package path information being compiled, which is the parameter value of the -p flag. The current package path is runtime. Temporary directory address: For each compilation, the Go program would generate a corresponding temporary directory. This directory contains all the temporary files required for the compilation. Files to be compiled: Many .go file paths can be seen at the end of the command, which are the file path list of the module that needs to be compiled.  Toolchain with SkyWalking Go Agent SkyWalking Go Agent works by intercepting the compile program through the toolchain and making changes to the program based on the information above. The main parts include:\n AST: Using AST to parse and manipulate the codes. File copying/generation: Copy or generate files to the temporary directory required for the compilation, and add file path addresses when the compilation command is executed. Proxy command execution: After completing the modification of the specified package, the new codes are weaved into the target.  Hybrid Compilation After enhancing the program with SkyWalking Go Agent, the following parts of the program will be enhanced:\n SkyWalking Go: The agent core part of the code would be dynamically copied to the agent path for plugin use. Plugins: Enhance the specified framework code according to the enhancement rules of the plugins. Runtime: Enhance the runtime package in Go, including extensions for goroutines and other content. Main: Enhance the main package during system startup, for stating the system with Agent.  ","title":"Hybrid Compilation","url":"/docs/skywalking-go/v0.4.0/en/concepts-and-designs/hybrid-compilation/"},{"content":"IllegalStateException when installing Java agent on WebSphere This issue was found in our community discussion and feedback. A user installed the SkyWalking Java agent on WebSphere 7.0.0.11 and ibm jdk 1.8_20160719 and 1.7.0_20150407, and experienced the following error logs:\nWARN 2019-05-09 17:01:35:905 SkywalkingAgent-1-GRPCChannelManager-0 ProtectiveShieldMatcher : Byte-buddy occurs exception when match type. java.lang.IllegalStateException: Cannot resolve type description for java.security.PrivilegedAction at org.apache.skywalking.apm.dependencies.net.bytebuddy.pool.TypePool$Resolution$Illegal.resolve(TypePool.java:144) at org.apache.skywalking.apm.dependencies.net.bytebuddy.pool.TypePool$Default$WithLazyResolution$LazyTypeDescription.delegate(TypePool.java:1392) at org.apache.skywalking.apm.dependencies.net.bytebuddy.description.type.TypeDescription$AbstractBase$OfSimpleType$WithDelegation.getInterfaces(TypeDescription.java:8016) at org.apache.skywalking.apm.dependencies.net.bytebuddy.description.type.TypeDescription$Generic$OfNonGenericType.getInterfaces(TypeDescription.java:3621) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.hasInterface(HasSuperTypeMatcher.java:53) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.hasInterface(HasSuperTypeMatcher.java:54) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.matches(HasSuperTypeMatcher.java:38) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.matches(HasSuperTypeMatcher.java:15) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Conjunction.matches(ElementMatcher.java:107) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) ... The exception occurred because access grant was required in WebSphere. Simply follow these steps:\n Set the agent\u0026rsquo;s owner to the owner of WebSphere. Add \u0026ldquo;grant codeBase \u0026ldquo;file:${agent_dir}/-\u0026rdquo; { permission java.security.AllPermission; };\u0026rdquo; in the file of \u0026ldquo;server.policy\u0026rdquo;.  ","title":"IllegalStateException when installing Java agent on WebSphere","url":"/docs/main/latest/en/faq/install_agent_on_websphere/"},{"content":"IllegalStateException when installing Java agent on WebSphere This issue was found in our community discussion and feedback. A user installed the SkyWalking Java agent on WebSphere 7.0.0.11 and ibm jdk 1.8_20160719 and 1.7.0_20150407, and experienced the following error logs:\nWARN 2019-05-09 17:01:35:905 SkywalkingAgent-1-GRPCChannelManager-0 ProtectiveShieldMatcher : Byte-buddy occurs exception when match type. java.lang.IllegalStateException: Cannot resolve type description for java.security.PrivilegedAction at org.apache.skywalking.apm.dependencies.net.bytebuddy.pool.TypePool$Resolution$Illegal.resolve(TypePool.java:144) at org.apache.skywalking.apm.dependencies.net.bytebuddy.pool.TypePool$Default$WithLazyResolution$LazyTypeDescription.delegate(TypePool.java:1392) at org.apache.skywalking.apm.dependencies.net.bytebuddy.description.type.TypeDescription$AbstractBase$OfSimpleType$WithDelegation.getInterfaces(TypeDescription.java:8016) at org.apache.skywalking.apm.dependencies.net.bytebuddy.description.type.TypeDescription$Generic$OfNonGenericType.getInterfaces(TypeDescription.java:3621) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.hasInterface(HasSuperTypeMatcher.java:53) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.hasInterface(HasSuperTypeMatcher.java:54) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.matches(HasSuperTypeMatcher.java:38) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.matches(HasSuperTypeMatcher.java:15) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Conjunction.matches(ElementMatcher.java:107) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) ... The exception occurred because access grant was required in WebSphere. Simply follow these steps:\n Set the agent\u0026rsquo;s owner to the owner of WebSphere. Add \u0026ldquo;grant codeBase \u0026ldquo;file:${agent_dir}/-\u0026rdquo; { permission java.security.AllPermission; };\u0026rdquo; in the file of \u0026ldquo;server.policy\u0026rdquo;.  ","title":"IllegalStateException when installing Java agent on WebSphere","url":"/docs/main/next/en/faq/install_agent_on_websphere/"},{"content":"IllegalStateException when installing Java agent on WebSphere This issue was found in our community discussion and feedback. A user installed the SkyWalking Java agent on WebSphere 7.0.0.11 and ibm jdk 1.8_20160719 and 1.7.0_20150407, and experienced the following error logs:\nWARN 2019-05-09 17:01:35:905 SkywalkingAgent-1-GRPCChannelManager-0 ProtectiveShieldMatcher : Byte-buddy occurs exception when match type. java.lang.IllegalStateException: Cannot resolve type description for java.security.PrivilegedAction at org.apache.skywalking.apm.dependencies.net.bytebuddy.pool.TypePool$Resolution$Illegal.resolve(TypePool.java:144) at org.apache.skywalking.apm.dependencies.net.bytebuddy.pool.TypePool$Default$WithLazyResolution$LazyTypeDescription.delegate(TypePool.java:1392) at org.apache.skywalking.apm.dependencies.net.bytebuddy.description.type.TypeDescription$AbstractBase$OfSimpleType$WithDelegation.getInterfaces(TypeDescription.java:8016) at org.apache.skywalking.apm.dependencies.net.bytebuddy.description.type.TypeDescription$Generic$OfNonGenericType.getInterfaces(TypeDescription.java:3621) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.hasInterface(HasSuperTypeMatcher.java:53) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.hasInterface(HasSuperTypeMatcher.java:54) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.matches(HasSuperTypeMatcher.java:38) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.matches(HasSuperTypeMatcher.java:15) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Conjunction.matches(ElementMatcher.java:107) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) ... The exception occured because access grant was required in WebSphere. Simply follow these steps:\n Set the agent\u0026rsquo;s owner to the owner of WebSphere. Add \u0026ldquo;grant codeBase \u0026ldquo;file:${agent_dir}/-\u0026rdquo; { permission java.security.AllPermission; };\u0026rdquo; in the file of \u0026ldquo;server.policy\u0026rdquo;.  ","title":"IllegalStateException when installing Java agent on WebSphere","url":"/docs/main/v9.0.0/en/faq/install_agent_on_websphere/"},{"content":"IllegalStateException when installing Java agent on WebSphere This issue was found in our community discussion and feedback. A user installed the SkyWalking Java agent on WebSphere 7.0.0.11 and ibm jdk 1.8_20160719 and 1.7.0_20150407, and experienced the following error logs:\nWARN 2019-05-09 17:01:35:905 SkywalkingAgent-1-GRPCChannelManager-0 ProtectiveShieldMatcher : Byte-buddy occurs exception when match type. java.lang.IllegalStateException: Cannot resolve type description for java.security.PrivilegedAction at org.apache.skywalking.apm.dependencies.net.bytebuddy.pool.TypePool$Resolution$Illegal.resolve(TypePool.java:144) at org.apache.skywalking.apm.dependencies.net.bytebuddy.pool.TypePool$Default$WithLazyResolution$LazyTypeDescription.delegate(TypePool.java:1392) at org.apache.skywalking.apm.dependencies.net.bytebuddy.description.type.TypeDescription$AbstractBase$OfSimpleType$WithDelegation.getInterfaces(TypeDescription.java:8016) at org.apache.skywalking.apm.dependencies.net.bytebuddy.description.type.TypeDescription$Generic$OfNonGenericType.getInterfaces(TypeDescription.java:3621) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.hasInterface(HasSuperTypeMatcher.java:53) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.hasInterface(HasSuperTypeMatcher.java:54) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.matches(HasSuperTypeMatcher.java:38) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.matches(HasSuperTypeMatcher.java:15) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Conjunction.matches(ElementMatcher.java:107) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) ... The exception occured because access grant was required in WebSphere. Simply follow these steps:\n Set the agent\u0026rsquo;s owner to the owner of WebSphere. Add \u0026ldquo;grant codeBase \u0026ldquo;file:${agent_dir}/-\u0026rdquo; { permission java.security.AllPermission; };\u0026rdquo; in the file of \u0026ldquo;server.policy\u0026rdquo;.  ","title":"IllegalStateException when installing Java agent on WebSphere","url":"/docs/main/v9.1.0/en/faq/install_agent_on_websphere/"},{"content":"IllegalStateException when installing Java agent on WebSphere This issue was found in our community discussion and feedback. A user installed the SkyWalking Java agent on WebSphere 7.0.0.11 and ibm jdk 1.8_20160719 and 1.7.0_20150407, and experienced the following error logs:\nWARN 2019-05-09 17:01:35:905 SkywalkingAgent-1-GRPCChannelManager-0 ProtectiveShieldMatcher : Byte-buddy occurs exception when match type. java.lang.IllegalStateException: Cannot resolve type description for java.security.PrivilegedAction at org.apache.skywalking.apm.dependencies.net.bytebuddy.pool.TypePool$Resolution$Illegal.resolve(TypePool.java:144) at org.apache.skywalking.apm.dependencies.net.bytebuddy.pool.TypePool$Default$WithLazyResolution$LazyTypeDescription.delegate(TypePool.java:1392) at org.apache.skywalking.apm.dependencies.net.bytebuddy.description.type.TypeDescription$AbstractBase$OfSimpleType$WithDelegation.getInterfaces(TypeDescription.java:8016) at org.apache.skywalking.apm.dependencies.net.bytebuddy.description.type.TypeDescription$Generic$OfNonGenericType.getInterfaces(TypeDescription.java:3621) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.hasInterface(HasSuperTypeMatcher.java:53) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.hasInterface(HasSuperTypeMatcher.java:54) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.matches(HasSuperTypeMatcher.java:38) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.matches(HasSuperTypeMatcher.java:15) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Conjunction.matches(ElementMatcher.java:107) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) ... The exception occurred because access grant was required in WebSphere. Simply follow these steps:\n Set the agent\u0026rsquo;s owner to the owner of WebSphere. Add \u0026ldquo;grant codeBase \u0026ldquo;file:${agent_dir}/-\u0026rdquo; { permission java.security.AllPermission; };\u0026rdquo; in the file of \u0026ldquo;server.policy\u0026rdquo;.  ","title":"IllegalStateException when installing Java agent on WebSphere","url":"/docs/main/v9.2.0/en/faq/install_agent_on_websphere/"},{"content":"IllegalStateException when installing Java agent on WebSphere This issue was found in our community discussion and feedback. A user installed the SkyWalking Java agent on WebSphere 7.0.0.11 and ibm jdk 1.8_20160719 and 1.7.0_20150407, and experienced the following error logs:\nWARN 2019-05-09 17:01:35:905 SkywalkingAgent-1-GRPCChannelManager-0 ProtectiveShieldMatcher : Byte-buddy occurs exception when match type. java.lang.IllegalStateException: Cannot resolve type description for java.security.PrivilegedAction at org.apache.skywalking.apm.dependencies.net.bytebuddy.pool.TypePool$Resolution$Illegal.resolve(TypePool.java:144) at org.apache.skywalking.apm.dependencies.net.bytebuddy.pool.TypePool$Default$WithLazyResolution$LazyTypeDescription.delegate(TypePool.java:1392) at org.apache.skywalking.apm.dependencies.net.bytebuddy.description.type.TypeDescription$AbstractBase$OfSimpleType$WithDelegation.getInterfaces(TypeDescription.java:8016) at org.apache.skywalking.apm.dependencies.net.bytebuddy.description.type.TypeDescription$Generic$OfNonGenericType.getInterfaces(TypeDescription.java:3621) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.hasInterface(HasSuperTypeMatcher.java:53) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.hasInterface(HasSuperTypeMatcher.java:54) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.matches(HasSuperTypeMatcher.java:38) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.matches(HasSuperTypeMatcher.java:15) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Conjunction.matches(ElementMatcher.java:107) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) ... The exception occurred because access grant was required in WebSphere. Simply follow these steps:\n Set the agent\u0026rsquo;s owner to the owner of WebSphere. Add \u0026ldquo;grant codeBase \u0026ldquo;file:${agent_dir}/-\u0026rdquo; { permission java.security.AllPermission; };\u0026rdquo; in the file of \u0026ldquo;server.policy\u0026rdquo;.  ","title":"IllegalStateException when installing Java agent on WebSphere","url":"/docs/main/v9.3.0/en/faq/install_agent_on_websphere/"},{"content":"IllegalStateException when installing Java agent on WebSphere This issue was found in our community discussion and feedback. A user installed the SkyWalking Java agent on WebSphere 7.0.0.11 and ibm jdk 1.8_20160719 and 1.7.0_20150407, and experienced the following error logs:\nWARN 2019-05-09 17:01:35:905 SkywalkingAgent-1-GRPCChannelManager-0 ProtectiveShieldMatcher : Byte-buddy occurs exception when match type. java.lang.IllegalStateException: Cannot resolve type description for java.security.PrivilegedAction at org.apache.skywalking.apm.dependencies.net.bytebuddy.pool.TypePool$Resolution$Illegal.resolve(TypePool.java:144) at org.apache.skywalking.apm.dependencies.net.bytebuddy.pool.TypePool$Default$WithLazyResolution$LazyTypeDescription.delegate(TypePool.java:1392) at org.apache.skywalking.apm.dependencies.net.bytebuddy.description.type.TypeDescription$AbstractBase$OfSimpleType$WithDelegation.getInterfaces(TypeDescription.java:8016) at org.apache.skywalking.apm.dependencies.net.bytebuddy.description.type.TypeDescription$Generic$OfNonGenericType.getInterfaces(TypeDescription.java:3621) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.hasInterface(HasSuperTypeMatcher.java:53) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.hasInterface(HasSuperTypeMatcher.java:54) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.matches(HasSuperTypeMatcher.java:38) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.matches(HasSuperTypeMatcher.java:15) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Conjunction.matches(ElementMatcher.java:107) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) ... The exception occurred because access grant was required in WebSphere. Simply follow these steps:\n Set the agent\u0026rsquo;s owner to the owner of WebSphere. Add \u0026ldquo;grant codeBase \u0026ldquo;file:${agent_dir}/-\u0026rdquo; { permission java.security.AllPermission; };\u0026rdquo; in the file of \u0026ldquo;server.policy\u0026rdquo;.  ","title":"IllegalStateException when installing Java agent on WebSphere","url":"/docs/main/v9.4.0/en/faq/install_agent_on_websphere/"},{"content":"IllegalStateException when installing Java agent on WebSphere This issue was found in our community discussion and feedback. A user installed the SkyWalking Java agent on WebSphere 7.0.0.11 and ibm jdk 1.8_20160719 and 1.7.0_20150407, and experienced the following error logs:\nWARN 2019-05-09 17:01:35:905 SkywalkingAgent-1-GRPCChannelManager-0 ProtectiveShieldMatcher : Byte-buddy occurs exception when match type. java.lang.IllegalStateException: Cannot resolve type description for java.security.PrivilegedAction at org.apache.skywalking.apm.dependencies.net.bytebuddy.pool.TypePool$Resolution$Illegal.resolve(TypePool.java:144) at org.apache.skywalking.apm.dependencies.net.bytebuddy.pool.TypePool$Default$WithLazyResolution$LazyTypeDescription.delegate(TypePool.java:1392) at org.apache.skywalking.apm.dependencies.net.bytebuddy.description.type.TypeDescription$AbstractBase$OfSimpleType$WithDelegation.getInterfaces(TypeDescription.java:8016) at org.apache.skywalking.apm.dependencies.net.bytebuddy.description.type.TypeDescription$Generic$OfNonGenericType.getInterfaces(TypeDescription.java:3621) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.hasInterface(HasSuperTypeMatcher.java:53) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.hasInterface(HasSuperTypeMatcher.java:54) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.matches(HasSuperTypeMatcher.java:38) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.matches(HasSuperTypeMatcher.java:15) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Conjunction.matches(ElementMatcher.java:107) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) ... The exception occurred because access grant was required in WebSphere. Simply follow these steps:\n Set the agent\u0026rsquo;s owner to the owner of WebSphere. Add \u0026ldquo;grant codeBase \u0026ldquo;file:${agent_dir}/-\u0026rdquo; { permission java.security.AllPermission; };\u0026rdquo; in the file of \u0026ldquo;server.policy\u0026rdquo;.  ","title":"IllegalStateException when installing Java agent on WebSphere","url":"/docs/main/v9.5.0/en/faq/install_agent_on_websphere/"},{"content":"IllegalStateException when installing Java agent on WebSphere This issue was found in our community discussion and feedback. A user installed the SkyWalking Java agent on WebSphere 7.0.0.11 and ibm jdk 1.8_20160719 and 1.7.0_20150407, and experienced the following error logs:\nWARN 2019-05-09 17:01:35:905 SkywalkingAgent-1-GRPCChannelManager-0 ProtectiveShieldMatcher : Byte-buddy occurs exception when match type. java.lang.IllegalStateException: Cannot resolve type description for java.security.PrivilegedAction at org.apache.skywalking.apm.dependencies.net.bytebuddy.pool.TypePool$Resolution$Illegal.resolve(TypePool.java:144) at org.apache.skywalking.apm.dependencies.net.bytebuddy.pool.TypePool$Default$WithLazyResolution$LazyTypeDescription.delegate(TypePool.java:1392) at org.apache.skywalking.apm.dependencies.net.bytebuddy.description.type.TypeDescription$AbstractBase$OfSimpleType$WithDelegation.getInterfaces(TypeDescription.java:8016) at org.apache.skywalking.apm.dependencies.net.bytebuddy.description.type.TypeDescription$Generic$OfNonGenericType.getInterfaces(TypeDescription.java:3621) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.hasInterface(HasSuperTypeMatcher.java:53) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.hasInterface(HasSuperTypeMatcher.java:54) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.matches(HasSuperTypeMatcher.java:38) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.matches(HasSuperTypeMatcher.java:15) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Conjunction.matches(ElementMatcher.java:107) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) ... The exception occurred because access grant was required in WebSphere. Simply follow these steps:\n Set the agent\u0026rsquo;s owner to the owner of WebSphere. Add \u0026ldquo;grant codeBase \u0026ldquo;file:${agent_dir}/-\u0026rdquo; { permission java.security.AllPermission; };\u0026rdquo; in the file of \u0026ldquo;server.policy\u0026rdquo;.  ","title":"IllegalStateException when installing Java agent on WebSphere","url":"/docs/main/v9.6.0/en/faq/install_agent_on_websphere/"},{"content":"IllegalStateException when installing Java agent on WebSphere This issue was found in our community discussion and feedback. A user installed the SkyWalking Java agent on WebSphere 7.0.0.11 and ibm jdk 1.8_20160719 and 1.7.0_20150407, and experienced the following error logs:\nWARN 2019-05-09 17:01:35:905 SkywalkingAgent-1-GRPCChannelManager-0 ProtectiveShieldMatcher : Byte-buddy occurs exception when match type. java.lang.IllegalStateException: Cannot resolve type description for java.security.PrivilegedAction at org.apache.skywalking.apm.dependencies.net.bytebuddy.pool.TypePool$Resolution$Illegal.resolve(TypePool.java:144) at org.apache.skywalking.apm.dependencies.net.bytebuddy.pool.TypePool$Default$WithLazyResolution$LazyTypeDescription.delegate(TypePool.java:1392) at org.apache.skywalking.apm.dependencies.net.bytebuddy.description.type.TypeDescription$AbstractBase$OfSimpleType$WithDelegation.getInterfaces(TypeDescription.java:8016) at org.apache.skywalking.apm.dependencies.net.bytebuddy.description.type.TypeDescription$Generic$OfNonGenericType.getInterfaces(TypeDescription.java:3621) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.hasInterface(HasSuperTypeMatcher.java:53) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.hasInterface(HasSuperTypeMatcher.java:54) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.matches(HasSuperTypeMatcher.java:38) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.HasSuperTypeMatcher.matches(HasSuperTypeMatcher.java:15) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Conjunction.matches(ElementMatcher.java:107) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) at org.apache.skywalking.apm.dependencies.net.bytebuddy.matcher.ElementMatcher$Junction$Disjunction.matches(ElementMatcher.java:147) ... The exception occurred because access grant was required in WebSphere. Simply follow these steps:\n Set the agent\u0026rsquo;s owner to the owner of WebSphere. Add \u0026ldquo;grant codeBase \u0026ldquo;file:${agent_dir}/-\u0026rdquo; { permission java.security.AllPermission; };\u0026rdquo; in the file of \u0026ldquo;server.policy\u0026rdquo;.  ","title":"IllegalStateException when installing Java agent on WebSphere","url":"/docs/main/v9.7.0/en/faq/install_agent_on_websphere/"},{"content":"INI Settings This is the configuration list supported in php.ini.\n   Configuration Item Description Default Value     skywalking_agent.enable Enable skywalking_agent extension or not. Off   skywalking_agent.log_file Log file path. /tmp/skywalking-agent.log   skywalking_agent.log_level Log level: one of OFF, TRACE, DEBUG, INFO, WARN, ERROR. INFO   skywalking_agent.runtime_dir Skywalking agent runtime directory. /tmp/skywalking-agent   skywalking_agent.server_addr Address of skywalking oap server. Only available when reporter_type is grpc. 127.0.0.1:11800   skywalking_agent.service_name Application service name. hello-skywalking   skywalking_agent.skywalking_version Skywalking version, 8 or 9. 8   skywalking_agent.authentication Skywalking authentication token, let it empty if the backend isn\u0026rsquo;t enabled. Only available when reporter_type is grpc.    skywalking_agent.worker_threads Skywalking worker threads, 0 will auto set as the cpu core size. 0   skywalking_agent.enable_tls Wether to enable tls for gPRC, default is false. Only available when reporter_type is grpc. Off   skywalking_agent.ssl_trusted_ca_path The gRPC SSL trusted ca file. Only available when reporter_type is grpc.    skywalking_agent.ssl_key_path The private key file. Enable mTLS when ssl_key_path and ssl_cert_chain_path exist. Only available when reporter_type is grpc.    skywalking_agent.ssl_cert_chain_path The certificate file. Enable mTLS when ssl_key_path and ssl_cert_chain_path exist. Only available when reporter_type is grpc.    skywalking_agent.heartbeat_period Agent heartbeat report period. Unit, second. 30   skywalking_agent.properties_report_period_factor The agent sends the instance properties to the backend every heartbeat_period * properties_report_period_factor seconds. 10   skywalking_agent.enable_zend_observer Whether to use zend observer instead of zend_execute_ex to hook the functions, this feature is only available for PHP8+. Off   skywalking_agent.reporter_type Reporter type, optional values are grpc and kafka. grpc   skywalking_agent.kafka_bootstrap_servers A list of host/port pairs to use for connect to the Kafka cluster. Only available when reporter_type is kafka.    skywalking_agent.kafka_producer_config Configure Kafka Producer configuration in JSON format {\u0026quot;key\u0026quot;: \u0026quot;value}. Only available when reporter_type is kafka. {}    ","title":"INI Settings","url":"/docs/skywalking-php/latest/en/configuration/ini-settings/"},{"content":"INI Settings This is the configuration list supported in php.ini.\n   Configuration Item Description Default Value     skywalking_agent.enable Enable skywalking_agent extension or not. Off   skywalking_agent.log_file Log file path. /tmp/skywalking-agent.log   skywalking_agent.log_level Log level: one of OFF, TRACE, DEBUG, INFO, WARN, ERROR. INFO   skywalking_agent.runtime_dir Skywalking agent runtime directory. /tmp/skywalking-agent   skywalking_agent.server_addr Address of skywalking oap server. Only available when reporter_type is grpc. 127.0.0.1:11800   skywalking_agent.service_name Application service name. hello-skywalking   skywalking_agent.skywalking_version Skywalking version, 8 or 9. 8   skywalking_agent.authentication Skywalking authentication token, let it empty if the backend isn\u0026rsquo;t enabled. Only available when reporter_type is grpc.    skywalking_agent.worker_threads Skywalking worker threads, 0 will auto set as the cpu core size. 0   skywalking_agent.enable_tls Wether to enable tls for gPRC, default is false. Only available when reporter_type is grpc. Off   skywalking_agent.ssl_trusted_ca_path The gRPC SSL trusted ca file. Only available when reporter_type is grpc.    skywalking_agent.ssl_key_path The private key file. Enable mTLS when ssl_key_path and ssl_cert_chain_path exist. Only available when reporter_type is grpc.    skywalking_agent.ssl_cert_chain_path The certificate file. Enable mTLS when ssl_key_path and ssl_cert_chain_path exist. Only available when reporter_type is grpc.    skywalking_agent.heartbeat_period Agent heartbeat report period. Unit, second. 30   skywalking_agent.properties_report_period_factor The agent sends the instance properties to the backend every heartbeat_period * properties_report_period_factor seconds. 10   skywalking_agent.enable_zend_observer Whether to use zend observer instead of zend_execute_ex to hook the functions, this feature is only available for PHP8+. Off   skywalking_agent.reporter_type Reporter type, optional values are grpc and kafka. grpc   skywalking_agent.kafka_bootstrap_servers A list of host/port pairs to use for connect to the Kafka cluster. Only available when reporter_type is kafka.    skywalking_agent.kafka_producer_config Configure Kafka Producer configuration in JSON format {\u0026quot;key\u0026quot;: \u0026quot;value}. Only available when reporter_type is kafka. {}   skywalking_agent.inject_context Whether to enable automatic injection of skywalking context variables (such as SW_TRACE_ID). For php-fpm mode, it will be injected into the $_SERVER variable. For swoole mode, it will be injected into the $request-\u0026gt;server variable. Off    ","title":"INI Settings","url":"/docs/skywalking-php/next/en/configuration/ini-settings/"},{"content":"INI Settings This is the configuration list supported in php.ini.\n   Configuration Item Description Default Value     skywalking_agent.enable Enable skywalking_agent extension or not. Off   skywalking_agent.log_file Log file path. /tmp/skywalking-agent.log   skywalking_agent.log_level Log level: one of OFF, TRACE, DEBUG, INFO, WARN, ERROR. INFO   skywalking_agent.runtime_dir Skywalking agent runtime directory. /tmp/skywalking-agent   skywalking_agent.server_addr Address of skywalking oap server. Only available when reporter_type is grpc. 127.0.0.1:11800   skywalking_agent.service_name Application service name. hello-skywalking   skywalking_agent.skywalking_version Skywalking version, 8 or 9. 8   skywalking_agent.authentication Skywalking authentication token, let it empty if the backend isn\u0026rsquo;t enabled. Only available when reporter_type is grpc.    skywalking_agent.worker_threads Skywalking worker threads, 0 will auto set as the cpu core size. 0   skywalking_agent.enable_tls Wether to enable tls for gPRC, default is false. Only available when reporter_type is grpc. Off   skywalking_agent.ssl_trusted_ca_path The gRPC SSL trusted ca file. Only available when reporter_type is grpc.    skywalking_agent.ssl_key_path The private key file. Enable mTLS when ssl_key_path and ssl_cert_chain_path exist. Only available when reporter_type is grpc.    skywalking_agent.ssl_cert_chain_path The certificate file. Enable mTLS when ssl_key_path and ssl_cert_chain_path exist. Only available when reporter_type is grpc.    skywalking_agent.heartbeat_period Agent heartbeat report period. Unit, second. 30   skywalking_agent.properties_report_period_factor The agent sends the instance properties to the backend every heartbeat_period * properties_report_period_factor seconds. 10   skywalking_agent.enable_zend_observer Whether to use zend observer instead of zend_execute_ex to hook the functions, this feature is only available for PHP8+. Off   skywalking_agent.reporter_type Reporter type, optional values are grpc and kafka. grpc   skywalking_agent.kafka_bootstrap_servers A list of host/port pairs to use for connect to the Kafka cluster. Only available when reporter_type is kafka.    skywalking_agent.kafka_producer_config Configure Kafka Producer configuration in JSON format {\u0026quot;key\u0026quot;: \u0026quot;value}. Only available when reporter_type is kafka. {}    ","title":"INI Settings","url":"/docs/skywalking-php/v0.7.0/en/configuration/ini-settings/"},{"content":"Init mode The SkyWalking backend supports multiple storage implementors. Most of them would automatically initialize the storage, such as Elastic Search or Database, when the backend starts up first.\nBut there may be some unexpected events that may occur with the storage, such as When multiple Elastic Search indexes are created concurrently, these backend instances would startup at the same time., When there is a change, the APIs of Elastic Search would be blocked without reporting any exception. This often happens on container management platforms, such as k8s.\nThis is where you need the Init mode startup.\nSolution Only a single instance should run in the Init mode before other instances start up. And this instance will exit graciously after all initialization steps are done.\nUse oapServiceInit.sh/oapServiceInit.bat to start up backend. You should see the following logs:\n 2018-11-09 23:04:39,465 - org.apache.skywalking.oap.server.starter.OAPServerStartUp -2214 [main] INFO [] - OAP starts up in init mode successfully, exit now\u0026hellip;\n Kubernetes Initialization in this mode would be included in our Kubernetes scripts and Helm.\n","title":"Init mode","url":"/docs/main/latest/en/setup/backend/backend-init-mode/"},{"content":"Init mode The SkyWalking backend supports multiple storage implementors. Most of them would automatically initialize the storage, such as Elastic Search or Database, when the backend starts up first.\nBut there may be some unexpected events that may occur with the storage, such as When multiple Elastic Search indexes are created concurrently, these backend instances would startup at the same time., When there is a change, the APIs of Elastic Search would be blocked without reporting any exception. This often happens on container management platforms, such as k8s.\nThis is where you need the Init mode startup.\nSolution Only a single instance should run in the Init mode before other instances start up. And this instance will exit graciously after all initialization steps are done.\nUse oapServiceInit.sh/oapServiceInit.bat to start up backend. You should see the following logs:\n 2018-11-09 23:04:39,465 - org.apache.skywalking.oap.server.starter.OAPServerStartUp -2214 [main] INFO [] - OAP starts up in init mode successfully, exit now\u0026hellip;\n Kubernetes Initialization in this mode would be included in our Kubernetes scripts and Helm.\n","title":"Init mode","url":"/docs/main/next/en/setup/backend/backend-init-mode/"},{"content":"Init mode The SkyWalking backend supports multiple storage implementors. Most of them would automatically initialize the storage, such as Elastic Search or Database, when the backend starts up at first.\nBut there may be some unexpected events that may occur with the storage, such as When multiple Elastic Search indexes are created concurrently, these backend instances would start up at the same time., When there is a change, the APIs of Elastic Search would be blocked without reporting any exception. This often happens on container management platforms, such as k8s.\nThis is where you need the Init mode startup.\nSolution Only one single instance should run in the Init mode before other instances start up. And this instance will exit graciously after all initialization steps are done.\nUse oapServiceInit.sh/oapServiceInit.bat to start up backend. You should see the following logs:\n 2018-11-09 23:04:39,465 - org.apache.skywalking.oap.server.starter.OAPServerStartUp -2214 [main] INFO [] - OAP starts up in init mode successfully, exit now\u0026hellip;\n Kubernetes Initialization in this mode would be included in our Kubernetes scripts and Helm.\n","title":"Init mode","url":"/docs/main/v9.0.0/en/setup/backend/backend-init-mode/"},{"content":"Init mode The SkyWalking backend supports multiple storage implementors. Most of them would automatically initialize the storage, such as Elastic Search or Database, when the backend starts up first.\nBut there may be some unexpected events that may occur with the storage, such as When multiple Elastic Search indexes are created concurrently, these backend instances would startup at the same time., When there is a change, the APIs of Elastic Search would be blocked without reporting any exception. This often happens on container management platforms, such as k8s.\nThis is where you need the Init mode startup.\nSolution Only a single instance should run in the Init mode before other instances start up. And this instance will exit graciously after all initialization steps are done.\nUse oapServiceInit.sh/oapServiceInit.bat to start up backend. You should see the following logs:\n 2018-11-09 23:04:39,465 - org.apache.skywalking.oap.server.starter.OAPServerStartUp -2214 [main] INFO [] - OAP starts up in init mode successfully, exit now\u0026hellip;\n Kubernetes Initialization in this mode would be included in our Kubernetes scripts and Helm.\n","title":"Init mode","url":"/docs/main/v9.1.0/en/setup/backend/backend-init-mode/"},{"content":"Init mode The SkyWalking backend supports multiple storage implementors. Most of them would automatically initialize the storage, such as Elastic Search or Database, when the backend starts up first.\nBut there may be some unexpected events that may occur with the storage, such as When multiple Elastic Search indexes are created concurrently, these backend instances would startup at the same time., When there is a change, the APIs of Elastic Search would be blocked without reporting any exception. This often happens on container management platforms, such as k8s.\nThis is where you need the Init mode startup.\nSolution Only a single instance should run in the Init mode before other instances start up. And this instance will exit graciously after all initialization steps are done.\nUse oapServiceInit.sh/oapServiceInit.bat to start up backend. You should see the following logs:\n 2018-11-09 23:04:39,465 - org.apache.skywalking.oap.server.starter.OAPServerStartUp -2214 [main] INFO [] - OAP starts up in init mode successfully, exit now\u0026hellip;\n Kubernetes Initialization in this mode would be included in our Kubernetes scripts and Helm.\n","title":"Init mode","url":"/docs/main/v9.2.0/en/setup/backend/backend-init-mode/"},{"content":"Init mode The SkyWalking backend supports multiple storage implementors. Most of them would automatically initialize the storage, such as Elastic Search or Database, when the backend starts up first.\nBut there may be some unexpected events that may occur with the storage, such as When multiple Elastic Search indexes are created concurrently, these backend instances would startup at the same time., When there is a change, the APIs of Elastic Search would be blocked without reporting any exception. This often happens on container management platforms, such as k8s.\nThis is where you need the Init mode startup.\nSolution Only a single instance should run in the Init mode before other instances start up. And this instance will exit graciously after all initialization steps are done.\nUse oapServiceInit.sh/oapServiceInit.bat to start up backend. You should see the following logs:\n 2018-11-09 23:04:39,465 - org.apache.skywalking.oap.server.starter.OAPServerStartUp -2214 [main] INFO [] - OAP starts up in init mode successfully, exit now\u0026hellip;\n Kubernetes Initialization in this mode would be included in our Kubernetes scripts and Helm.\n","title":"Init mode","url":"/docs/main/v9.3.0/en/setup/backend/backend-init-mode/"},{"content":"Init mode The SkyWalking backend supports multiple storage implementors. Most of them would automatically initialize the storage, such as Elastic Search or Database, when the backend starts up first.\nBut there may be some unexpected events that may occur with the storage, such as When multiple Elastic Search indexes are created concurrently, these backend instances would startup at the same time., When there is a change, the APIs of Elastic Search would be blocked without reporting any exception. This often happens on container management platforms, such as k8s.\nThis is where you need the Init mode startup.\nSolution Only a single instance should run in the Init mode before other instances start up. And this instance will exit graciously after all initialization steps are done.\nUse oapServiceInit.sh/oapServiceInit.bat to start up backend. You should see the following logs:\n 2018-11-09 23:04:39,465 - org.apache.skywalking.oap.server.starter.OAPServerStartUp -2214 [main] INFO [] - OAP starts up in init mode successfully, exit now\u0026hellip;\n Kubernetes Initialization in this mode would be included in our Kubernetes scripts and Helm.\n","title":"Init mode","url":"/docs/main/v9.4.0/en/setup/backend/backend-init-mode/"},{"content":"Init mode The SkyWalking backend supports multiple storage implementors. Most of them would automatically initialize the storage, such as Elastic Search or Database, when the backend starts up first.\nBut there may be some unexpected events that may occur with the storage, such as When multiple Elastic Search indexes are created concurrently, these backend instances would startup at the same time., When there is a change, the APIs of Elastic Search would be blocked without reporting any exception. This often happens on container management platforms, such as k8s.\nThis is where you need the Init mode startup.\nSolution Only a single instance should run in the Init mode before other instances start up. And this instance will exit graciously after all initialization steps are done.\nUse oapServiceInit.sh/oapServiceInit.bat to start up backend. You should see the following logs:\n 2018-11-09 23:04:39,465 - org.apache.skywalking.oap.server.starter.OAPServerStartUp -2214 [main] INFO [] - OAP starts up in init mode successfully, exit now\u0026hellip;\n Kubernetes Initialization in this mode would be included in our Kubernetes scripts and Helm.\n","title":"Init mode","url":"/docs/main/v9.5.0/en/setup/backend/backend-init-mode/"},{"content":"Init mode The SkyWalking backend supports multiple storage implementors. Most of them would automatically initialize the storage, such as Elastic Search or Database, when the backend starts up first.\nBut there may be some unexpected events that may occur with the storage, such as When multiple Elastic Search indexes are created concurrently, these backend instances would startup at the same time., When there is a change, the APIs of Elastic Search would be blocked without reporting any exception. This often happens on container management platforms, such as k8s.\nThis is where you need the Init mode startup.\nSolution Only a single instance should run in the Init mode before other instances start up. And this instance will exit graciously after all initialization steps are done.\nUse oapServiceInit.sh/oapServiceInit.bat to start up backend. You should see the following logs:\n 2018-11-09 23:04:39,465 - org.apache.skywalking.oap.server.starter.OAPServerStartUp -2214 [main] INFO [] - OAP starts up in init mode successfully, exit now\u0026hellip;\n Kubernetes Initialization in this mode would be included in our Kubernetes scripts and Helm.\n","title":"Init mode","url":"/docs/main/v9.6.0/en/setup/backend/backend-init-mode/"},{"content":"Init mode The SkyWalking backend supports multiple storage implementors. Most of them would automatically initialize the storage, such as Elastic Search or Database, when the backend starts up first.\nBut there may be some unexpected events that may occur with the storage, such as When multiple Elastic Search indexes are created concurrently, these backend instances would startup at the same time., When there is a change, the APIs of Elastic Search would be blocked without reporting any exception. This often happens on container management platforms, such as k8s.\nThis is where you need the Init mode startup.\nSolution Only a single instance should run in the Init mode before other instances start up. And this instance will exit graciously after all initialization steps are done.\nUse oapServiceInit.sh/oapServiceInit.bat to start up backend. You should see the following logs:\n 2018-11-09 23:04:39,465 - org.apache.skywalking.oap.server.starter.OAPServerStartUp -2214 [main] INFO [] - OAP starts up in init mode successfully, exit now\u0026hellip;\n Kubernetes Initialization in this mode would be included in our Kubernetes scripts and Helm.\n","title":"Init mode","url":"/docs/main/v9.7.0/en/setup/backend/backend-init-mode/"},{"content":"Install SkyWalking Infra E2E Download pre-built binaries Download the pre-built binaries from our website, currently we have pre-built binaries for macOS, Linux and Windows. Extract the tarball and add bin/\u0026lt;os\u0026gt;/e2e to you PATH environment variable.\nInstall from source codes If you want to try some features that are not released yet, you can compile from the source code.\nmkdir skywalking-infra-e2e \u0026amp;\u0026amp; cd skywalking-infra-e2e git clone https://github.com/apache/skywalking-infra-e2e.git . make build Then add the binary in bin/\u0026lt;os\u0026gt;/e2e to your PATH.\nInstall via go install If you already have Go SDK installed, you can also directly install e2e via go install.\ngo install github.com/apache/skywalking-infra-e2e/cmd/e2e@\u0026lt;revision\u0026gt; Note that installation via go install is only supported after Git commit 2a33478 so you can only go install a revision afterwards.\n","title":"Install SkyWalking Infra E2E","url":"/docs/skywalking-infra-e2e/latest/en/setup/install/"},{"content":"Install SkyWalking Infra E2E Download pre-built binaries Download the pre-built binaries from our website, currently we have pre-built binaries for macOS, Linux and Windows. Extract the tarball and add bin/\u0026lt;os\u0026gt;/e2e to you PATH environment variable.\nInstall from source codes If you want to try some features that are not released yet, you can compile from the source code.\nmkdir skywalking-infra-e2e \u0026amp;\u0026amp; cd skywalking-infra-e2e git clone https://github.com/apache/skywalking-infra-e2e.git . make build Then add the binary in bin/\u0026lt;os\u0026gt;/e2e to your PATH.\nInstall via go install If you already have Go SDK installed, you can also directly install e2e via go install.\ngo install github.com/apache/skywalking-infra-e2e/cmd/e2e@\u0026lt;revision\u0026gt; Note that installation via go install is only supported after Git commit 2a33478 so you can only go install a revision afterwards.\n","title":"Install SkyWalking Infra E2E","url":"/docs/skywalking-infra-e2e/next/en/setup/install/"},{"content":"Install SkyWalking Infra E2E Download pre-built binaries Download the pre-built binaries from our website, currently we have pre-built binaries for macOS, Linux and Windows. Extract the tarball and add bin/\u0026lt;os\u0026gt;/e2e to you PATH environment variable.\nInstall from source codes If you want to try some features that are not released yet, you can compile from the source code.\nmkdir skywalking-infra-e2e \u0026amp;\u0026amp; cd skywalking-infra-e2e git clone https://github.com/apache/skywalking-infra-e2e.git . make build Then add the binary in bin/\u0026lt;os\u0026gt;/e2e to your PATH.\nInstall via go install If you already have Go SDK installed, you can also directly install e2e via go install.\ngo install github.com/apache/skywalking-infra-e2e/cmd/e2e@\u0026lt;revision\u0026gt; Note that installation via go install is only supported after Git commit 2a33478 so you can only go install a revision afterwards.\n","title":"Install SkyWalking Infra E2E","url":"/docs/skywalking-infra-e2e/v1.3.0/en/setup/install/"},{"content":"Installation Banyand is the daemon server of the BanyanDB database. This section will show several paths installing it in your environment.\nGet Binaries Released binaries Get binaries from the download.\nBuild From Source Requirements Users who want to build a binary from sources have to set up:\n Go 1.20 Node 18.16 Git \u0026gt;= 2.30 Linux, macOS or Windows+WSL2 GNU make  Windows BanyanDB is built on Linux and macOS that introduced several platform-specific characters to the building system. Therefore, we highly recommend you use WSL2+Ubuntu to execute tasks of the Makefile.\nBuild Binaries To issue the below command to get basic binaries of banyand and bydbctl.\n$ make generate ... $ make build ... --- banyand: all --- make[1]: Entering directory \u0026#39;\u0026lt;path_to_project_root\u0026gt;/banyand\u0026#39; ... chmod +x build/bin/banyand-server Done building banyand server make[1]: Leaving directory \u0026#39;\u0026lt;path_to_project_root\u0026gt;/banyand\u0026#39; ... --- bydbctl: all --- make[1]: Entering directory \u0026#39;\u0026lt;path_to_project_root\u0026gt;/bydbctl\u0026#39; ... chmod +x build/bin/bydbctl Done building bydbctl make[1]: Leaving directory \u0026#39;\u0026lt;path_to_project_root\u0026gt;/bydbctl\u0026#39; The build system provides a series of binary options as well.\n make -C banyand banyand-server generates a basic banyand-server. make -C banyand release builds out a static binary for releasing. make -C banyand debug gives a binary for debugging without the complier\u0026rsquo;s optimizations. make -C banyand debug-static is a static binary for debugging. make -C bydbctl release cross-builds several binaries for multi-platforms.  Then users get binaries as below\n$ ls banyand/build/bin banyand-server banyand-server-debug banyand-server-debug-static banyand-server-static $ ls banyand/build/bin bydbctl Setup Banyand Banyand shows its available commands and arguments by\n$ ./banyand-server ██████╗ █████╗ ███╗ ██╗██╗ ██╗ █████╗ ███╗ ██╗██████╗ ██████╗ ██╔══██╗██╔══██╗████╗ ██║╚██╗ ██╔╝██╔══██╗████╗ ██║██╔══██╗██╔══██╗ ██████╔╝███████║██╔██╗ ██║ ╚████╔╝ ███████║██╔██╗ ██║██║ ██║██████╔╝ ██╔══██╗██╔══██║██║╚██╗██║ ╚██╔╝ ██╔══██║██║╚██╗██║██║ ██║██╔══██╗ ██████╔╝██║ ██║██║ ╚████║ ██║ ██║ ██║██║ ╚████║██████╔╝██████╔╝ ╚═════╝ ╚═╝ ╚═╝╚═╝ ╚═══╝ ╚═╝ ╚═╝ ╚═╝╚═╝ ╚═══╝╚═════╝ ╚═════╝ BanyanDB, as an observability database, aims to ingest, analyze and store Metrics, Tracing and Logging data Usage: [command] Available Commands: completion generate the autocompletion script for the specified shell help Help about any command liaison Run as the liaison server meta Run as the meta server standalone Run as the standalone server storage Run as the storage server Flags: -h, --help help for this command -v, --version version for this command Use \u0026#34; [command] --help\u0026#34; for more information about a command. Banyand is running as a standalone server by\n$ ./banyand-server standalone ██████╗ █████╗ ███╗ ██╗██╗ ██╗ █████╗ ███╗ ██╗██████╗ ██████╗ ██╔══██╗██╔══██╗████╗ ██║╚██╗ ██╔╝██╔══██╗████╗ ██║██╔══██╗██╔══██╗ ██████╔╝███████║██╔██╗ ██║ ╚████╔╝ ███████║██╔██╗ ██║██║ ██║██████╔╝ ██╔══██╗██╔══██║██║╚██╗██║ ╚██╔╝ ██╔══██║██║╚██╗██║██║ ██║██╔══██╗ ██████╔╝██║ ██║██║ ╚████║ ██║ ██║ ██║██║ ╚████║██████╔╝██████╔╝ ╚═════╝ ╚═╝ ╚═╝╚═╝ ╚═══╝ ╚═╝ ╚═╝ ╚═╝╚═╝ ╚═══╝╚═════╝ ╚═════╝ ***starting as a standalone server**** ... ... ***Listening to**** addr::17912 module:LIAISON-GRPC The banyand-server would be listening on the 0.0.0.0:17912 if no errors occurred.\nSetup Multiple Banyand as Cluster Firstly, you need to setup a etcd cluster which is required for the metadata module to provide the metadata service and nodes discovery service for the whole cluster. The etcd cluster can be setup by the etcd installation guide. The etcd version should be v3.1 or above.\nThen, you can start the metadata module by\nConsidering the etcd cluster is spread across three nodes with the addresses `10.0.0.1:2379`, `10.0.0.2:2379`, and `10.0.0.3:2379`, Data nodes and liaison nodes are running as independent processes by ```shell $ ./banyand-server storage --etcd-endpoints=http://10.0.0.1:2379,http://10.0.0.2:2379,http://10.0.0.3:2379 \u0026lt;flags\u0026gt; $ ./banyand-server storage --etcd-endpoints=http://10.0.0.1:2379,http://10.0.0.2:2379,http://10.0.0.3:2379 \u0026lt;flags\u0026gt; $ ./banyand-server storage --etcd-endpoints=http://10.0.0.1:2379,http://10.0.0.2:2379,http://10.0.0.3:2379 \u0026lt;flags\u0026gt; $ ./banyand-server liaison --etcd-endpoints=http://10.0.0.1:2379,http://10.0.0.2:2379,http://10.0.0.3:2379 \u0026lt;flags\u0026gt; Docker \u0026amp; Kubernetes The docker image of banyandb is available on Docker Hub.\nIf you want to onboard banyandb to the Kubernetes, you can refer to the banyandb-helm.\n","title":"Installation","url":"/docs/skywalking-banyandb/latest/installation/"},{"content":"Installation Banyand is the daemon server of the BanyanDB database. This section will show several paths installing it in your environment.\nGet Binaries Released binaries Get binaries from the download.\nBuild From Source Requirements Users who want to build a binary from sources have to set up:\n Go 1.20 Node 18.16 Git \u0026gt;= 2.30 Linux, macOS or Windows+WSL2 GNU make  Windows BanyanDB is built on Linux and macOS that introduced several platform-specific characters to the building system. Therefore, we highly recommend you use WSL2+Ubuntu to execute tasks of the Makefile.\nBuild Binaries To issue the below command to get basic binaries of banyand and bydbctl.\n$ make generate ... $ make build ... --- banyand: all --- make[1]: Entering directory \u0026#39;\u0026lt;path_to_project_root\u0026gt;/banyand\u0026#39; ... chmod +x build/bin/banyand-server Done building banyand server make[1]: Leaving directory \u0026#39;\u0026lt;path_to_project_root\u0026gt;/banyand\u0026#39; ... --- bydbctl: all --- make[1]: Entering directory \u0026#39;\u0026lt;path_to_project_root\u0026gt;/bydbctl\u0026#39; ... chmod +x build/bin/bydbctl Done building bydbctl make[1]: Leaving directory \u0026#39;\u0026lt;path_to_project_root\u0026gt;/bydbctl\u0026#39; The build system provides a series of binary options as well.\n make -C banyand banyand-server generates a basic banyand-server. make -C banyand release builds out a static binary for releasing. make -C banyand debug gives a binary for debugging without the complier\u0026rsquo;s optimizations. make -C banyand debug-static is a static binary for debugging. make -C bydbctl release cross-builds several binaries for multi-platforms.  Then users get binaries as below\n$ ls banyand/build/bin banyand-server banyand-server-debug banyand-server-debug-static banyand-server-static $ ls banyand/build/bin bydbctl Setup Banyand Banyand shows its available commands and arguments by\n$ ./banyand-server ██████╗ █████╗ ███╗ ██╗██╗ ██╗ █████╗ ███╗ ██╗██████╗ ██████╗ ██╔══██╗██╔══██╗████╗ ██║╚██╗ ██╔╝██╔══██╗████╗ ██║██╔══██╗██╔══██╗ ██████╔╝███████║██╔██╗ ██║ ╚████╔╝ ███████║██╔██╗ ██║██║ ██║██████╔╝ ██╔══██╗██╔══██║██║╚██╗██║ ╚██╔╝ ██╔══██║██║╚██╗██║██║ ██║██╔══██╗ ██████╔╝██║ ██║██║ ╚████║ ██║ ██║ ██║██║ ╚████║██████╔╝██████╔╝ ╚═════╝ ╚═╝ ╚═╝╚═╝ ╚═══╝ ╚═╝ ╚═╝ ╚═╝╚═╝ ╚═══╝╚═════╝ ╚═════╝ BanyanDB, as an observability database, aims to ingest, analyze and store Metrics, Tracing and Logging data Usage: [command] Available Commands: completion generate the autocompletion script for the specified shell help Help about any command liaison Run as the liaison server meta Run as the meta server standalone Run as the standalone server storage Run as the storage server Flags: -h, --help help for this command -v, --version version for this command Use \u0026#34; [command] --help\u0026#34; for more information about a command. Banyand is running as a standalone server by\n$ ./banyand-server standalone ██████╗ █████╗ ███╗ ██╗██╗ ██╗ █████╗ ███╗ ██╗██████╗ ██████╗ ██╔══██╗██╔══██╗████╗ ██║╚██╗ ██╔╝██╔══██╗████╗ ██║██╔══██╗██╔══██╗ ██████╔╝███████║██╔██╗ ██║ ╚████╔╝ ███████║██╔██╗ ██║██║ ██║██████╔╝ ██╔══██╗██╔══██║██║╚██╗██║ ╚██╔╝ ██╔══██║██║╚██╗██║██║ ██║██╔══██╗ ██████╔╝██║ ██║██║ ╚████║ ██║ ██║ ██║██║ ╚████║██████╔╝██████╔╝ ╚═════╝ ╚═╝ ╚═╝╚═╝ ╚═══╝ ╚═╝ ╚═╝ ╚═╝╚═╝ ╚═══╝╚═════╝ ╚═════╝ ***starting as a standalone server**** ... ... ***Listening to**** addr::17912 module:LIAISON-GRPC The banyand-server would be listening on the 0.0.0.0:17912 if no errors occurred.\nSetup Multiple Banyand as Cluster Firstly, you need to setup a etcd cluster which is required for the metadata module to provide the metadata service and nodes discovery service for the whole cluster. The etcd cluster can be setup by the etcd installation guide. The etcd version should be v3.1 or above.\nThen, you can start the metadata module by\nConsidering the etcd cluster is spread across three nodes with the addresses `10.0.0.1:2379`, `10.0.0.2:2379`, and `10.0.0.3:2379`, Data nodes and liaison nodes are running as independent processes by ```shell $ ./banyand-server storage --etcd-endpoints=http://10.0.0.1:2379,http://10.0.0.2:2379,http://10.0.0.3:2379 \u0026lt;flags\u0026gt; $ ./banyand-server storage --etcd-endpoints=http://10.0.0.1:2379,http://10.0.0.2:2379,http://10.0.0.3:2379 \u0026lt;flags\u0026gt; $ ./banyand-server storage --etcd-endpoints=http://10.0.0.1:2379,http://10.0.0.2:2379,http://10.0.0.3:2379 \u0026lt;flags\u0026gt; $ ./banyand-server liaison --etcd-endpoints=http://10.0.0.1:2379,http://10.0.0.2:2379,http://10.0.0.3:2379 \u0026lt;flags\u0026gt; Docker \u0026amp; Kubernetes The docker image of banyandb is available on Docker Hub.\nIf you want to onboard banyandb to the Kubernetes, you can refer to the banyandb-helm.\n","title":"Installation","url":"/docs/skywalking-banyandb/v0.5.0/installation/"},{"content":"Installation SkyWalking Python agent requires SkyWalking 8.0+ and Python 3.7+\nYou can install the SkyWalking Python agent via various ways described next.\n Already installed? Check out easy ways to start the agent in your application\n  Non-intrusive  | Intrusive  | Containerization\n  All available configurations are listed here\n Important Note on Different Reporter Protocols Currently only gRPC protocol fully supports all available telemetry capabilities in the Python agent.\nWhile gRPC is highly recommended, we provide alternative protocols to suit your production requirements.\nPlease refer to the table below before deciding which report protocol suits best for you.\n   Reporter Protocol Trace Reporter Log Reporter Meter Reporter Profiling     gRPC ✅ ✅ ✅ ✅   HTTP ✅ ✅ ❌ ❌   Kafka ✅ ✅ ✅ ❌    From PyPI  If you want to try out the latest features that are not released yet, please refer to this guide to build from sources.\n The Python agent module is published to PyPI, from where you can use pip to install:\n# Install the latest version, using the default gRPC protocol to report data to OAP pip install \u0026#34;apache-skywalking\u0026#34; # Install support for every protocol (gRPC, HTTP, Kafka) pip install \u0026#34;apache-skywalking[all]\u0026#34; # Install the latest version, using the http protocol to report data to OAP pip install \u0026#34;apache-skywalking[http]\u0026#34; # Install the latest version, using the kafka protocol to report data to OAP pip install \u0026#34;apache-skywalking[kafka]\u0026#34; # Install a specific version x.y.z # pip install apache-skywalking==x.y.z pip install apache-skywalking==0.1.0 # For example, install version 0.1.0 no matter what the latest version is From Docker Hub SkyWalking Python agent provides convenient dockerfile and images for easy integration utilizing its auto-bootstrap capability.\nSimply pull SkyWalking Python image from Docker Hub based on desired agent version, protocol and Python version.\nFROMapache/skywalking-python:0.8.0-grpc-py3.10# ... build your Python application# If you prefer compact images (built from official Python slim image)FROMapache/skywalking-python:0.8.0-grpc-py3.10-slim# ... build your Python applicationThen, You can build your Python application image based on our agent-enabled Python images and start your applications with SkyWalking agent enabled for you. Please refer to our Containerization Guide for further instructions on integration and configuring.\nFrom Source Code Please refer to the How-to-build-from-sources FAQ.\n","title":"Installation","url":"/docs/skywalking-python/latest/en/setup/installation/"},{"content":"Installation SkyWalking Python agent requires SkyWalking 8.0+ and Python 3.7+\nYou can install the SkyWalking Python agent via various ways described next.\n Already installed? Check out easy ways to start the agent in your application\n  Non-intrusive  | Intrusive  | Containerization\n  All available configurations are listed here\n Important Note on Different Reporter Protocols Currently only gRPC protocol fully supports all available telemetry capabilities in the Python agent.\nWhile gRPC is highly recommended, we provide alternative protocols to suit your production requirements.\nPlease refer to the table below before deciding which report protocol suits best for you.\n   Reporter Protocol Trace Reporter Log Reporter Meter Reporter Profiling     gRPC ✅ ✅ ✅ ✅   HTTP ✅ ✅ ❌ ❌   Kafka ✅ ✅ ✅ ❌    From PyPI  If you want to try out the latest features that are not released yet, please refer to this guide to build from sources.\n The Python agent module is published to PyPI, from where you can use pip to install:\n# Install the latest version, using the default gRPC protocol to report data to OAP pip install \u0026#34;apache-skywalking\u0026#34; # Install support for every protocol (gRPC, HTTP, Kafka) pip install \u0026#34;apache-skywalking[all]\u0026#34; # Install the latest version, using the http protocol to report data to OAP pip install \u0026#34;apache-skywalking[http]\u0026#34; # Install the latest version, using the kafka protocol to report data to OAP pip install \u0026#34;apache-skywalking[kafka]\u0026#34; # Install a specific version x.y.z # pip install apache-skywalking==x.y.z pip install apache-skywalking==0.1.0 # For example, install version 0.1.0 no matter what the latest version is From Docker Hub SkyWalking Python agent provides convenient dockerfile and images for easy integration utilizing its auto-bootstrap capability.\nSimply pull SkyWalking Python image from Docker Hub based on desired agent version, protocol and Python version.\nFROMapache/skywalking-python:0.8.0-grpc-py3.10# ... build your Python application# If you prefer compact images (built from official Python slim image)FROMapache/skywalking-python:0.8.0-grpc-py3.10-slim# ... build your Python applicationThen, You can build your Python application image based on our agent-enabled Python images and start your applications with SkyWalking agent enabled for you. Please refer to our Containerization Guide for further instructions on integration and configuring.\nFrom Source Code Please refer to the How-to-build-from-sources FAQ.\n","title":"Installation","url":"/docs/skywalking-python/next/en/setup/installation/"},{"content":"Installation SkyWalking Python agent requires SkyWalking 8.0+ and Python 3.7+\nYou can install the SkyWalking Python agent via various ways described next.\n Already installed? Check out easy ways to start the agent in your application\n  Non-intrusive  | Intrusive  | Containerization\n  All available configurations are listed here\n Important Note on Different Reporter Protocols Currently only gRPC protocol fully supports all available telemetry capabilities in the Python agent.\nWhile gRPC is highly recommended, we provide alternative protocols to suit your production requirements.\nPlease refer to the table below before deciding which report protocol suits best for you.\n   Reporter Protocol Trace Reporter Log Reporter Meter Reporter Profiling     gRPC ✅ ✅ ✅ ✅   HTTP ✅ ✅ ❌ ❌   Kafka ✅ ✅ ✅ ❌    From PyPI  If you want to try out the latest features that are not released yet, please refer to this guide to build from sources.\n The Python agent module is published to PyPI, from where you can use pip to install:\n# Install the latest version, using the default gRPC protocol to report data to OAP pip install \u0026#34;apache-skywalking\u0026#34; # Install support for every protocol (gRPC, HTTP, Kafka) pip install \u0026#34;apache-skywalking[all]\u0026#34; # Install the latest version, using the http protocol to report data to OAP pip install \u0026#34;apache-skywalking[http]\u0026#34; # Install the latest version, using the kafka protocol to report data to OAP pip install \u0026#34;apache-skywalking[kafka]\u0026#34; # Install a specific version x.y.z # pip install apache-skywalking==x.y.z pip install apache-skywalking==0.1.0 # For example, install version 0.1.0 no matter what the latest version is From Docker Hub SkyWalking Python agent provides convenient dockerfile and images for easy integration utilizing its auto-bootstrap capability.\nSimply pull SkyWalking Python image from Docker Hub based on desired agent version, protocol and Python version.\nFROMapache/skywalking-python:0.8.0-grpc-py3.10# ... build your Python application# If you prefer compact images (built from official Python slim image)FROMapache/skywalking-python:0.8.0-grpc-py3.10-slim# ... build your Python applicationThen, You can build your Python application image based on our agent-enabled Python images and start your applications with SkyWalking agent enabled for you. Please refer to our Containerization Guide for further instructions on integration and configuring.\nFrom Source Code Please refer to the How-to-build-from-sources FAQ.\n","title":"Installation","url":"/docs/skywalking-python/v1.0.1/en/setup/installation/"},{"content":"Integration Tests IT(Integration Tests) represents the JUnit driven integration test to verify the features and compatibility between lib and known server with various versions.\nAfter setting up the environment and writing your codes, to facilitate integration with the SkyWalking project, you\u0026rsquo;ll need to run tests locally to verify that your codes would not break any existing features, as well as write some unit test (UT) codes to verify that the new codes would work well. This will prevent them from being broken by future contributors. If the new codes involve other components or libraries, you should also write integration tests (IT).\nSkyWalking leverages the plugin maven-surefire-plugin to run the UTs and uses maven-failsafe-plugin to run the ITs. maven-surefire-plugin excludes ITs (whose class name starts or ends with *IT, IT*) and leaves them for maven-failsafe-plugin to run, which is bound to the integration-test goal. Therefore, to run the UTs, try ./mvnw clean test, which only runs the UTs but not the ITs.\nIf you would like to run the ITs, please run ./mvnw integration-test as well as the profiles of the modules whose ITs you want to run. If you don\u0026rsquo;t want to run UTs, please add -DskipUTs=true. E.g. if you would like to only run the ITs in oap-server, try ./mvnw -Pbackend clean verify -DskipUTs=true, and if you would like to run all the ITs, simply run ./mvnw clean integration-test -DskipUTs=true.\nPlease be advised that if you\u0026rsquo;re writing integration tests, name it with the pattern IT* or *IT so they would only run in goal integration-test.\n","title":"Integration Tests","url":"/docs/main/latest/en/guides/it-guide/"},{"content":"Integration Tests IT(Integration Tests) represents the JUnit driven integration test to verify the features and compatibility between lib and known server with various versions.\nAfter setting up the environment and writing your codes, to facilitate integration with the SkyWalking project, you\u0026rsquo;ll need to run tests locally to verify that your codes would not break any existing features, as well as write some unit test (UT) codes to verify that the new codes would work well. This will prevent them from being broken by future contributors. If the new codes involve other components or libraries, you should also write integration tests (IT).\nSkyWalking leverages the plugin maven-surefire-plugin to run the UTs and uses maven-failsafe-plugin to run the ITs. maven-surefire-plugin excludes ITs (whose class name starts or ends with *IT, IT*) and leaves them for maven-failsafe-plugin to run, which is bound to the integration-test goal. Therefore, to run the UTs, try ./mvnw clean test, which only runs the UTs but not the ITs.\nIf you would like to run the ITs, please run ./mvnw integration-test as well as the profiles of the modules whose ITs you want to run. If you don\u0026rsquo;t want to run UTs, please add -DskipUTs=true. E.g. if you would like to only run the ITs in oap-server, try ./mvnw -Pbackend clean verify -DskipUTs=true, and if you would like to run all the ITs, simply run ./mvnw clean integration-test -DskipUTs=true.\nPlease be advised that if you\u0026rsquo;re writing integration tests, name it with the pattern IT* or *IT so they would only run in goal integration-test.\n","title":"Integration Tests","url":"/docs/main/next/en/guides/it-guide/"},{"content":"Integration Tests IT(Integration Tests) represents the JUnit driven integration test to verify the features and compatibility between lib and known server with various versions.\nAfter setting up the environment and writing your codes, to facilitate integration with the SkyWalking project, you\u0026rsquo;ll need to run tests locally to verify that your codes would not break any existing features, as well as write some unit test (UT) codes to verify that the new codes would work well. This will prevent them from being broken by future contributors. If the new codes involve other components or libraries, you should also write integration tests (IT).\nSkyWalking leverages the plugin maven-surefire-plugin to run the UTs and uses maven-failsafe-plugin to run the ITs. maven-surefire-plugin excludes ITs (whose class name starts or ends with *IT, IT*) and leaves them for maven-failsafe-plugin to run, which is bound to the integration-test goal. Therefore, to run the UTs, try ./mvnw clean test, which only runs the UTs but not the ITs.\nIf you would like to run the ITs, please run ./mvnw integration-test as well as the profiles of the modules whose ITs you want to run. If you don\u0026rsquo;t want to run UTs, please add -DskipUTs=true. E.g. if you would like to only run the ITs in oap-server, try ./mvnw -Pbackend clean verify -DskipUTs=true, and if you would like to run all the ITs, simply run ./mvnw clean integration-test -DskipUTs=true.\nPlease be advised that if you\u0026rsquo;re writing integration tests, name it with the pattern IT* or *IT so they would only run in goal integration-test.\n","title":"Integration Tests","url":"/docs/main/v9.6.0/en/guides/it-guide/"},{"content":"Integration Tests IT(Integration Tests) represents the JUnit driven integration test to verify the features and compatibility between lib and known server with various versions.\nAfter setting up the environment and writing your codes, to facilitate integration with the SkyWalking project, you\u0026rsquo;ll need to run tests locally to verify that your codes would not break any existing features, as well as write some unit test (UT) codes to verify that the new codes would work well. This will prevent them from being broken by future contributors. If the new codes involve other components or libraries, you should also write integration tests (IT).\nSkyWalking leverages the plugin maven-surefire-plugin to run the UTs and uses maven-failsafe-plugin to run the ITs. maven-surefire-plugin excludes ITs (whose class name starts or ends with *IT, IT*) and leaves them for maven-failsafe-plugin to run, which is bound to the integration-test goal. Therefore, to run the UTs, try ./mvnw clean test, which only runs the UTs but not the ITs.\nIf you would like to run the ITs, please run ./mvnw integration-test as well as the profiles of the modules whose ITs you want to run. If you don\u0026rsquo;t want to run UTs, please add -DskipUTs=true. E.g. if you would like to only run the ITs in oap-server, try ./mvnw -Pbackend clean verify -DskipUTs=true, and if you would like to run all the ITs, simply run ./mvnw clean integration-test -DskipUTs=true.\nPlease be advised that if you\u0026rsquo;re writing integration tests, name it with the pattern IT* or *IT so they would only run in goal integration-test.\n","title":"Integration Tests","url":"/docs/main/v9.7.0/en/guides/it-guide/"},{"content":"Introduction to UI The SkyWalking official UI provides the default and powerful visualization capabilities for SkyWalking to observe full-stack applications.\nThe left side menu lists all available supported stacks with default dashboards.\nFollow the Official Dashboards menu to explore all default dashboards on their ways to monitor different tech stacks.\nSidebar Menu and Marketplace All available feature menu items are only listed in the marketplace(since 9.6.0). They are only visible on the Sidebar Menu when there are relative services being observed by various supported observation agents, such as installed language agents, service mesh platform, OTEL integration.\nThe menu items defined in ui-initialized-templates/menu.yaml are the universal marketplace for all default-supported integration. The menu definition supports one and two levels items. The leaf menu item should have the layer for navigation.\nmenus:- name:GeneralServiceicon:general_servicemenus:- name:Serviceslayer:GENERAL- name:VisualDatabaselayer:VIRTUAL_DATABASE- name:VisualCachelayer:VIRTUAL_CACHE- name:VisualMQlayer:VIRTUAL_MQ....- name:SelfObservabilityicon:self_observabilitymenus:- name:SkyWalkingServerlayer:SO11Y_OAP- name:Satellitelayer:SO11Y_SATELLITEThe menu items would automatically pop up on the left after short period of time that at least one service was observed. For more details, please refer to the \u0026ldquo;uiMenuRefreshInterval\u0026rdquo; configuration item in the backend settings\nCustom Dashboard Besides official dashboards, Dashboards provide customization capabilities to end-users to add new tabs/pages/widgets, and flexibility to re-config the dashboard on your own preference.\nThe dashboard has two key attributes, Layer and Entity Type. Learn these two concepts first before you begin any customization. Also, trace, metrics, and log analysis are relative to OAL, MAL, and LAL engines in the SkyWalking kernel. It would help if you learned them first, too.\nService and All entity type dashboard could be set as root(set this to root), which means this dashboard would be used as the entrance of its Layer. If you have multiple root dashboards, UI will choose one randomly (We don\u0026rsquo;t recommend doing so).\nNotice, dashboard editable is disabled on release; set system env(SW_ENABLE_UPDATE_UI_TEMPLATE=true) to activate them. Before you save the edited dashboard, it is just stored in memory. Closing a tab would LOSE the change permanently.\nA new dashboard should be added through New Dashboard in the Dashboards menu. Meanwhile, there are two ways to edit an existing dashboard.\n Dashboard List in the Dashboard menu provides edit/delete/set-as-root features to manage existing dashboards. In every dashboard page, click the right top V toggle, and turn to E(representing Edit) mode.  Widget A dashboard consists of various widget. In the Edit mode, widgets could be added/moved/removed/edit according to the Layer.(Every widget declares its suitable layer.)\nThe widget provides the ability to visualize the metrics, generated through OAL, MAL, or LAL scripts.\nMetrics To display one or more metrics in a graph, the following information is required:\n Name: The name of the metric. Data Type: The way of reading the metrics data according to various metric types. Visualization: The graph options to visualize the metric. Each data type has its own matched graph options. See the mapping doc for more details. Unit: The unit of the metrics data. Calculation: The calculation formula for the metric. The available formulas are here.  Common Graphs    Metrics Data Type Visualization Demo     read all values in the duration Line    get sorted top N values Top List    read all values of labels in the duration Table    read all values in the duration Area    read all values in the duration Service/Instance/Endpoint List    read sampled records in the duration Records List     Calculations    Label Calculation     Percentage Value / 100   Apdex Value / 10000   Average Sum of values / Count of values   Percentage + Avg-preview Sum of values / Count of values / 100   Apdex + Avg-preview Sum of values / Count of values / 10000   Byte to KB Value / 1024   Byte to MB Value / 1024 / 1024   Byte to GB Value / 1024 / 1024 / 1024   Seconds to YYYY-MM-DD HH:mm:ss dayjs(value * 1000).format(\u0026ldquo;YYYY-MM-DD HH:mm:ss\u0026rdquo;)   Milliseconds to YYYY-MM-DD HH:mm:ss dayjs(value).format(\u0026ldquo;YYYY-MM-DD HH:mm:ss\u0026rdquo;)   Precision Value.toFixed(2)   Milliseconds to seconds Value / 1000   Seconds to days Value / 86400    Graph styles Graph advanced style options.\nWidget options Define the following properties of the widget:\n Name: The name of the widget, which used to associate with other widget in the dashboard. Title: The title name of the widget. Tooltip Content: Additional explanation of the widget.  Association Options Widget provides the ability to associate with other widgets to show axis pointer with tips for the same time point, in order to help users to understand the connectivity among metrics.\nWidget Static Link On the right top of every widget on the dashboard, there is a Generate Link option, which could generate a static link to represent this widget. By using this link, users could share this widget, or integrate it into any 3rd party iFrame to build a network operations center(NOC) dashboard on the wall easily. About this link, there are several customizable options\n Lock Query Duration. Set the query duration manually. It is OFF by default. Auto Fresh option is ON with 6s query period and last 30 mins time range. Query period and range are customizable.  Settings Settings provide language, server time zone, and auto-fresh options. These settings are stored in the browser\u0026rsquo;s local storage. Unless you clear them manually, those will not change.\nFAQ Login and Authentication SkyWalking doesn\u0026rsquo;t provide login and authentication as usual for years. If you need, a lot of Gateway solutions have provides well-established solutions, such as the Nginx ecosystem.\n","title":"Introduction to UI","url":"/docs/main/latest/en/ui/readme/"},{"content":"Introduction to UI The SkyWalking official UI provides the default and powerful visualization capabilities for SkyWalking to observe full-stack applications.\nThe left side menu lists all available supported stacks with default dashboards.\nFollow the Official Dashboards menu to explore all default dashboards on their ways to monitor different tech stacks.\nSidebar Menu and Marketplace All available feature menu items are only listed in the marketplace(since 9.6.0). They are only visible on the Sidebar Menu when there are relative services being observed by various supported observation agents, such as installed language agents, service mesh platform, OTEL integration.\nThe menu items defined in ui-initialized-templates/menu.yaml are the universal marketplace for all default-supported integration. The menu definition supports one and two levels items. The leaf menu item should have the layer for navigation.\nmenus:- name:GeneralServiceicon:general_servicemenus:- name:Serviceslayer:GENERAL- name:VisualDatabaselayer:VIRTUAL_DATABASE- name:VisualCachelayer:VIRTUAL_CACHE- name:VisualMQlayer:VIRTUAL_MQ....- name:SelfObservabilityicon:self_observabilitymenus:- name:SkyWalkingServerlayer:SO11Y_OAP- name:Satellitelayer:SO11Y_SATELLITEThe menu items would automatically pop up on the left after short period of time that at least one service was observed. For more details, please refer to the \u0026ldquo;uiMenuRefreshInterval\u0026rdquo; configuration item in the backend settings\nCustom Dashboard Besides official dashboards, Dashboards provide customization capabilities to end-users to add new tabs/pages/widgets, and flexibility to re-config the dashboard on your own preference.\nThe dashboard has two key attributes, Layer and Entity Type. Learn these two concepts first before you begin any customization. Also, trace, metrics, and log analysis are relative to OAL, MAL, and LAL engines in the SkyWalking kernel. It would help if you learned them first, too.\nService and All entity type dashboard could be set as root(set this to root), which means this dashboard would be used as the entrance of its Layer. If you have multiple root dashboards, UI will choose one randomly (We don\u0026rsquo;t recommend doing so).\nNotice, dashboard editable is disabled on release; set system env(SW_ENABLE_UPDATE_UI_TEMPLATE=true) to activate them. Before you save the edited dashboard, it is just stored in memory. Closing a tab would LOSE the change permanently.\nA new dashboard should be added through New Dashboard in the Dashboards menu. Meanwhile, there are two ways to edit an existing dashboard.\n Dashboard List in the Dashboard menu provides edit/delete/set-as-root features to manage existing dashboards. In every dashboard page, click the right top V toggle, and turn to E(representing Edit) mode.  Widget A dashboard consists of various widget. In the Edit mode, widgets could be added/moved/removed/edit according to the Layer.(Every widget declares its suitable layer.)\nThe widget provides the ability to visualize the metrics, generated through OAL, MAL, or LAL scripts.\nMetrics To display one or more metrics in a graph, the following information is required:\n Name: The name of the metric. Data Type: The way of reading the metrics data according to various metric types. Visualization: The graph options to visualize the metric. Each data type has its own matched graph options. See the mapping doc for more details. Unit: The unit of the metrics data. Calculation: The calculation formula for the metric. The available formulas are here.  Common Graphs    Metrics Data Type Visualization Demo     read all values in the duration Line    get sorted top N values Top List    read all values of labels in the duration Table    read all values in the duration Area    read all values in the duration Service/Instance/Endpoint List    read sampled records in the duration Records List     Calculations    Label Calculation     Percentage Value / 100   Apdex Value / 10000   Average Sum of values / Count of values   Percentage + Avg-preview Sum of values / Count of values / 100   Apdex + Avg-preview Sum of values / Count of values / 10000   Byte to KB Value / 1024   Byte to MB Value / 1024 / 1024   Byte to GB Value / 1024 / 1024 / 1024   Seconds to YYYY-MM-DD HH:mm:ss dayjs(value * 1000).format(\u0026ldquo;YYYY-MM-DD HH:mm:ss\u0026rdquo;)   Milliseconds to YYYY-MM-DD HH:mm:ss dayjs(value).format(\u0026ldquo;YYYY-MM-DD HH:mm:ss\u0026rdquo;)   Precision Value.toFixed(2)   Milliseconds to seconds Value / 1000   Seconds to days Value / 86400    Graph styles Graph advanced style options.\nWidget options Define the following properties of the widget:\n Name: The name of the widget, which used to associate with other widget in the dashboard. Title: The title name of the widget. Tooltip Content: Additional explanation of the widget.  Association Options Widget provides the ability to associate with other widgets to show axis pointer with tips for the same time point, in order to help users to understand the connectivity among metrics.\nWidget Static Link On the right top of every widget on the dashboard, there is a Generate Link option, which could generate a static link to represent this widget. By using this link, users could share this widget, or integrate it into any 3rd party iFrame to build a network operations center(NOC) dashboard on the wall easily. About this link, there are several customizable options\n Lock Query Duration. Set the query duration manually. It is OFF by default. Auto Fresh option is ON with 6s query period and last 30 mins time range. Query period and range are customizable.  Settings Settings provide language, server time zone, and auto-fresh options. These settings are stored in the browser\u0026rsquo;s local storage. Unless you clear them manually, those will not change.\nFAQ Login and Authentication SkyWalking doesn\u0026rsquo;t provide login and authentication as usual for years. If you need, a lot of Gateway solutions have provides well-established solutions, such as the Nginx ecosystem.\n","title":"Introduction to UI","url":"/docs/main/next/en/ui/readme/"},{"content":"Introduction to UI The SkyWalking official UI provides the default and powerful visualization capabilities for SkyWalking to observe full-stack application.\nThe left side menu lists all available supported stack, with default dashboards.\nFollow Official Dashboards menu explores all default dashboards about how to monitor different tech stacks.\nCustom Dashboard Besides, official dashboards, Dashboards provides customization to end users to add new tabs/pages/widgets, and flexibility to re-config the dashboard on your own preference.\nThe dashboard has two key attributes, Layer and Entity Type. Learn these two concepts first before you begin any customization. Also, trace, metrics, log analysis are relative to OAL, MAL, and LAL engines in SkyWalking kernel. You should learn them first too.\nService and All entity type dashboard could be set as root(set this to root), which mean this dashboard would be used as the entrance of its layer. If you have multiple root dashboards, UI could choose one randomly(Don\u0026rsquo;t recommend doing so).\nNotice, dashboard editable is disabled on release, set system env(SW_ENABLE_UPDATE_UI_TEMPLATE=true) to activate them. Before you save the edited dashboard, it is just stored in memory, closing tab would LOSE the change permanently.\nSettings Settings provide language, server time zone, and auto-fresh option. These settings are stored in browser local storage. Unless you clear them manually, those would not change.\nFAQ Login and Authentication SkyWalking doesn\u0026rsquo;t provide login and authentication as usual for years. If you need, a lot of Gateway solutions have provides well-established solutions, such as Nginx ecosystem.\n","title":"Introduction to UI","url":"/docs/main/v9.0.0/en/ui/readme/"},{"content":"Introduction to UI The SkyWalking official UI provides the default and powerful visualization capabilities for SkyWalking to observe full-stack applications.\nThe left side menu lists all available supported stacks with default dashboards.\nFollow the Official Dashboards menu to explore all default dashboards on their ways to monitor different tech stacks.\nCustom Dashboard Besides official dashboards, Dashboards provide customization capabilities to end-users to add new tabs/pages/widgets, and flexibility to re-config the dashboard on your own preference.\nThe dashboard has two key attributes, Layer and Entity Type. Learn these two concepts first before you begin any customization. Also, trace, metrics, and log analysis are relative to OAL, MAL, and LAL engines in the SkyWalking kernel. It would help if you learned them first, too.\nService and All entity type dashboard could be set as root(set this to root), which means this dashboard would be used as the entrance of its Layer. If you have multiple root dashboards, UI will choose one randomly (We don\u0026rsquo;t recommend doing so).\nNotice, dashboard editable is disabled on release; set system env(SW_ENABLE_UPDATE_UI_TEMPLATE=true) to activate them. Before you save the edited dashboard, it is just stored in memory. Closing a tab would LOSE the change permanently.\nSettings Settings provide language, server time zone, and auto-fresh options. These settings are stored in the browser\u0026rsquo;s local storage. Unless you clear them manually, those will not change.\nFAQ Login and Authentication SkyWalking doesn\u0026rsquo;t provide login and authentication as usual for years. If you need, a lot of Gateway solutions have provides well-established solutions, such as the Nginx ecosystem.\n","title":"Introduction to UI","url":"/docs/main/v9.1.0/en/ui/readme/"},{"content":"Introduction to UI The SkyWalking official UI provides the default and powerful visualization capabilities for SkyWalking to observe full-stack applications.\nThe left side menu lists all available supported stacks with default dashboards.\nFollow the Official Dashboards menu to explore all default dashboards on their ways to monitor different tech stacks.\nCustom Dashboard Besides official dashboards, Dashboards provide customization capabilities to end-users to add new tabs/pages/widgets, and flexibility to re-config the dashboard on your own preference.\nThe dashboard has two key attributes, Layer and Entity Type. Learn these two concepts first before you begin any customization. Also, trace, metrics, and log analysis are relative to OAL, MAL, and LAL engines in the SkyWalking kernel. It would help if you learned them first, too.\nService and All entity type dashboard could be set as root(set this to root), which means this dashboard would be used as the entrance of its Layer. If you have multiple root dashboards, UI will choose one randomly (We don\u0026rsquo;t recommend doing so).\nNotice, dashboard editable is disabled on release; set system env(SW_ENABLE_UPDATE_UI_TEMPLATE=true) to activate them. Before you save the edited dashboard, it is just stored in memory. Closing a tab would LOSE the change permanently.\nWidget The widget provides the ability to visualize the metrics, generated through OAL, MAL, or LAL scripts.\nMetrics To display one or more metrics in a graph, the following information is required:\n Name: The name of the metric. Data Type: The way of reading the metrics data according to various metric types. Visualization: The graph options to visualize the metric. Each data type has its own matched graph options. See the mapping doc for more details. Unit: The unit of the metrics data. Calculation: The calculation formula for the metric. The available formulas are here.  Common Graphs    Metrics Data Type Visualization Demo     read all values in the duration Line    get sorted top N values Top List    read all values of labels in the duration Table    read all values in the duration Area    read all values in the duration Service/Instance/Endpoint List     Calculations    Label Calculation     Percentage Value / 100   Apdex Value / 10000   Average Sum of values / Count of values   Percentage + Avg-preview Sum of values / Count of values / 100   Apdex + Avg-preview Sum of values / Count of values / 10000   Byte to KB Value / 1024   Byte to MB Value / 1024 / 1024   Byte to GB Value / 1024 / 1024 / 1024   Seconds to YYYY-MM-DD HH:mm:ss dayjs(value * 1000).format(\u0026ldquo;YYYY-MM-DD HH:mm:ss\u0026rdquo;)   Milliseconds to YYYY-MM-DD HH:mm:ss dayjs(value).format(\u0026ldquo;YYYY-MM-DD HH:mm:ss\u0026rdquo;)   Precision Value.toFixed(2)   Milliseconds to seconds Value / 1000   Seconds to days Value / 86400    Graph styles Graph advanced style options.\nWidget options Define the following properties of the widget:\n Name: The name of the widget, which used to associate with other widget in the dashboard. Title: The title name of the widget. Tooltip Content: Additional explanation of the widget.  Association Options Widget provides the ability to associate with other widgets to show axis pointer with tips for the same time point, in order to help users to understand the connectivity among metrics.\nSettings Settings provide language, server time zone, and auto-fresh options. These settings are stored in the browser\u0026rsquo;s local storage. Unless you clear them manually, those will not change.\nFAQ Login and Authentication SkyWalking doesn\u0026rsquo;t provide login and authentication as usual for years. If you need, a lot of Gateway solutions have provides well-established solutions, such as the Nginx ecosystem.\n","title":"Introduction to UI","url":"/docs/main/v9.2.0/en/ui/readme/"},{"content":"Introduction to UI The SkyWalking official UI provides the default and powerful visualization capabilities for SkyWalking to observe full-stack applications.\nThe left side menu lists all available supported stacks with default dashboards.\nFollow the Official Dashboards menu to explore all default dashboards on their ways to monitor different tech stacks.\nCustom Dashboard Besides official dashboards, Dashboards provide customization capabilities to end-users to add new tabs/pages/widgets, and flexibility to re-config the dashboard on your own preference.\nThe dashboard has two key attributes, Layer and Entity Type. Learn these two concepts first before you begin any customization. Also, trace, metrics, and log analysis are relative to OAL, MAL, and LAL engines in the SkyWalking kernel. It would help if you learned them first, too.\nService and All entity type dashboard could be set as root(set this to root), which means this dashboard would be used as the entrance of its Layer. If you have multiple root dashboards, UI will choose one randomly (We don\u0026rsquo;t recommend doing so).\nNotice, dashboard editable is disabled on release; set system env(SW_ENABLE_UPDATE_UI_TEMPLATE=true) to activate them. Before you save the edited dashboard, it is just stored in memory. Closing a tab would LOSE the change permanently.\nA new dashboard should be added through New Dashboard in the Dashboards menu. Meanwhile, there are two ways to edit an existing dashboard.\n Dashboard List in the Dashboard menu provides edit/delete/set-as-root features to manage existing dashboards. In every dashboard page, click the right top V toggle, and turn to E(representing Edit) mode.  Widget A dashboard consists of various widget. In the Edit mode, widgets could be added/moved/removed/edit according to the Layer.(Every widget declares its suitable layer.)\nThe widget provides the ability to visualize the metrics, generated through OAL, MAL, or LAL scripts.\nMetrics To display one or more metrics in a graph, the following information is required:\n Name: The name of the metric. Data Type: The way of reading the metrics data according to various metric types. Visualization: The graph options to visualize the metric. Each data type has its own matched graph options. See the mapping doc for more details. Unit: The unit of the metrics data. Calculation: The calculation formula for the metric. The available formulas are here.  Common Graphs    Metrics Data Type Visualization Demo     read all values in the duration Line    get sorted top N values Top List    read all values of labels in the duration Table    read all values in the duration Area    read all values in the duration Service/Instance/Endpoint List    read sampled records in the duration Records List     Calculations    Label Calculation     Percentage Value / 100   Apdex Value / 10000   Average Sum of values / Count of values   Percentage + Avg-preview Sum of values / Count of values / 100   Apdex + Avg-preview Sum of values / Count of values / 10000   Byte to KB Value / 1024   Byte to MB Value / 1024 / 1024   Byte to GB Value / 1024 / 1024 / 1024   Seconds to YYYY-MM-DD HH:mm:ss dayjs(value * 1000).format(\u0026ldquo;YYYY-MM-DD HH:mm:ss\u0026rdquo;)   Milliseconds to YYYY-MM-DD HH:mm:ss dayjs(value).format(\u0026ldquo;YYYY-MM-DD HH:mm:ss\u0026rdquo;)   Precision Value.toFixed(2)   Milliseconds to seconds Value / 1000   Seconds to days Value / 86400    Graph styles Graph advanced style options.\nWidget options Define the following properties of the widget:\n Name: The name of the widget, which used to associate with other widget in the dashboard. Title: The title name of the widget. Tooltip Content: Additional explanation of the widget.  Association Options Widget provides the ability to associate with other widgets to show axis pointer with tips for the same time point, in order to help users to understand the connectivity among metrics.\nSettings Settings provide language, server time zone, and auto-fresh options. These settings are stored in the browser\u0026rsquo;s local storage. Unless you clear them manually, those will not change.\nFAQ Login and Authentication SkyWalking doesn\u0026rsquo;t provide login and authentication as usual for years. If you need, a lot of Gateway solutions have provides well-established solutions, such as the Nginx ecosystem.\n","title":"Introduction to UI","url":"/docs/main/v9.3.0/en/ui/readme/"},{"content":"Introduction to UI The SkyWalking official UI provides the default and powerful visualization capabilities for SkyWalking to observe full-stack applications.\nThe left side menu lists all available supported stacks with default dashboards.\nFollow the Official Dashboards menu to explore all default dashboards on their ways to monitor different tech stacks.\nCustom Dashboard Besides official dashboards, Dashboards provide customization capabilities to end-users to add new tabs/pages/widgets, and flexibility to re-config the dashboard on your own preference.\nThe dashboard has two key attributes, Layer and Entity Type. Learn these two concepts first before you begin any customization. Also, trace, metrics, and log analysis are relative to OAL, MAL, and LAL engines in the SkyWalking kernel. It would help if you learned them first, too.\nService and All entity type dashboard could be set as root(set this to root), which means this dashboard would be used as the entrance of its Layer. If you have multiple root dashboards, UI will choose one randomly (We don\u0026rsquo;t recommend doing so).\nNotice, dashboard editable is disabled on release; set system env(SW_ENABLE_UPDATE_UI_TEMPLATE=true) to activate them. Before you save the edited dashboard, it is just stored in memory. Closing a tab would LOSE the change permanently.\nA new dashboard should be added through New Dashboard in the Dashboards menu. Meanwhile, there are two ways to edit an existing dashboard.\n Dashboard List in the Dashboard menu provides edit/delete/set-as-root features to manage existing dashboards. In every dashboard page, click the right top V toggle, and turn to E(representing Edit) mode.  Widget A dashboard consists of various widget. In the Edit mode, widgets could be added/moved/removed/edit according to the Layer.(Every widget declares its suitable layer.)\nThe widget provides the ability to visualize the metrics, generated through OAL, MAL, or LAL scripts.\nMetrics To display one or more metrics in a graph, the following information is required:\n Name: The name of the metric. Data Type: The way of reading the metrics data according to various metric types. Visualization: The graph options to visualize the metric. Each data type has its own matched graph options. See the mapping doc for more details. Unit: The unit of the metrics data. Calculation: The calculation formula for the metric. The available formulas are here.  Common Graphs    Metrics Data Type Visualization Demo     read all values in the duration Line    get sorted top N values Top List    read all values of labels in the duration Table    read all values in the duration Area    read all values in the duration Service/Instance/Endpoint List    read sampled records in the duration Records List     Calculations    Label Calculation     Percentage Value / 100   Apdex Value / 10000   Average Sum of values / Count of values   Percentage + Avg-preview Sum of values / Count of values / 100   Apdex + Avg-preview Sum of values / Count of values / 10000   Byte to KB Value / 1024   Byte to MB Value / 1024 / 1024   Byte to GB Value / 1024 / 1024 / 1024   Seconds to YYYY-MM-DD HH:mm:ss dayjs(value * 1000).format(\u0026ldquo;YYYY-MM-DD HH:mm:ss\u0026rdquo;)   Milliseconds to YYYY-MM-DD HH:mm:ss dayjs(value).format(\u0026ldquo;YYYY-MM-DD HH:mm:ss\u0026rdquo;)   Precision Value.toFixed(2)   Milliseconds to seconds Value / 1000   Seconds to days Value / 86400    Graph styles Graph advanced style options.\nWidget options Define the following properties of the widget:\n Name: The name of the widget, which used to associate with other widget in the dashboard. Title: The title name of the widget. Tooltip Content: Additional explanation of the widget.  Association Options Widget provides the ability to associate with other widgets to show axis pointer with tips for the same time point, in order to help users to understand the connectivity among metrics.\nWidget Static Link On the right top of every widget on the dashboard, there is a Generate Link option, which could generate a static link to represent this widget. By using this link, users could share this widget, or integrate it into any 3rd party iFrame to build a network operations center(NOC) dashboard on the wall easily. About this link, there are several customizable options\n Lock Query Duration. Set the query duration manually. It is OFF by default. Auto Fresh option is ON with 6s query period and last 30 mins time range. Query period and range are customizable.  Settings Settings provide language, server time zone, and auto-fresh options. These settings are stored in the browser\u0026rsquo;s local storage. Unless you clear them manually, those will not change.\nFAQ Login and Authentication SkyWalking doesn\u0026rsquo;t provide login and authentication as usual for years. If you need, a lot of Gateway solutions have provides well-established solutions, such as the Nginx ecosystem.\n","title":"Introduction to UI","url":"/docs/main/v9.4.0/en/ui/readme/"},{"content":"Introduction to UI The SkyWalking official UI provides the default and powerful visualization capabilities for SkyWalking to observe full-stack applications.\nThe left side menu lists all available supported stacks with default dashboards.\nFollow the Official Dashboards menu to explore all default dashboards on their ways to monitor different tech stacks.\nCustom Dashboard Besides official dashboards, Dashboards provide customization capabilities to end-users to add new tabs/pages/widgets, and flexibility to re-config the dashboard on your own preference.\nThe dashboard has two key attributes, Layer and Entity Type. Learn these two concepts first before you begin any customization. Also, trace, metrics, and log analysis are relative to OAL, MAL, and LAL engines in the SkyWalking kernel. It would help if you learned them first, too.\nService and All entity type dashboard could be set as root(set this to root), which means this dashboard would be used as the entrance of its Layer. If you have multiple root dashboards, UI will choose one randomly (We don\u0026rsquo;t recommend doing so).\nNotice, dashboard editable is disabled on release; set system env(SW_ENABLE_UPDATE_UI_TEMPLATE=true) to activate them. Before you save the edited dashboard, it is just stored in memory. Closing a tab would LOSE the change permanently.\nA new dashboard should be added through New Dashboard in the Dashboards menu. Meanwhile, there are two ways to edit an existing dashboard.\n Dashboard List in the Dashboard menu provides edit/delete/set-as-root features to manage existing dashboards. In every dashboard page, click the right top V toggle, and turn to E(representing Edit) mode.  Widget A dashboard consists of various widget. In the Edit mode, widgets could be added/moved/removed/edit according to the Layer.(Every widget declares its suitable layer.)\nThe widget provides the ability to visualize the metrics, generated through OAL, MAL, or LAL scripts.\nMetrics To display one or more metrics in a graph, the following information is required:\n Name: The name of the metric. Data Type: The way of reading the metrics data according to various metric types. Visualization: The graph options to visualize the metric. Each data type has its own matched graph options. See the mapping doc for more details. Unit: The unit of the metrics data. Calculation: The calculation formula for the metric. The available formulas are here.  Common Graphs    Metrics Data Type Visualization Demo     read all values in the duration Line    get sorted top N values Top List    read all values of labels in the duration Table    read all values in the duration Area    read all values in the duration Service/Instance/Endpoint List    read sampled records in the duration Records List     Calculations    Label Calculation     Percentage Value / 100   Apdex Value / 10000   Average Sum of values / Count of values   Percentage + Avg-preview Sum of values / Count of values / 100   Apdex + Avg-preview Sum of values / Count of values / 10000   Byte to KB Value / 1024   Byte to MB Value / 1024 / 1024   Byte to GB Value / 1024 / 1024 / 1024   Seconds to YYYY-MM-DD HH:mm:ss dayjs(value * 1000).format(\u0026ldquo;YYYY-MM-DD HH:mm:ss\u0026rdquo;)   Milliseconds to YYYY-MM-DD HH:mm:ss dayjs(value).format(\u0026ldquo;YYYY-MM-DD HH:mm:ss\u0026rdquo;)   Precision Value.toFixed(2)   Milliseconds to seconds Value / 1000   Seconds to days Value / 86400    Graph styles Graph advanced style options.\nWidget options Define the following properties of the widget:\n Name: The name of the widget, which used to associate with other widget in the dashboard. Title: The title name of the widget. Tooltip Content: Additional explanation of the widget.  Association Options Widget provides the ability to associate with other widgets to show axis pointer with tips for the same time point, in order to help users to understand the connectivity among metrics.\nWidget Static Link On the right top of every widget on the dashboard, there is a Generate Link option, which could generate a static link to represent this widget. By using this link, users could share this widget, or integrate it into any 3rd party iFrame to build a network operations center(NOC) dashboard on the wall easily. About this link, there are several customizable options\n Lock Query Duration. Set the query duration manually. It is OFF by default. Auto Fresh option is ON with 6s query period and last 30 mins time range. Query period and range are customizable.  Settings Settings provide language, server time zone, and auto-fresh options. These settings are stored in the browser\u0026rsquo;s local storage. Unless you clear them manually, those will not change.\nFAQ Login and Authentication SkyWalking doesn\u0026rsquo;t provide login and authentication as usual for years. If you need, a lot of Gateway solutions have provides well-established solutions, such as the Nginx ecosystem.\n","title":"Introduction to UI","url":"/docs/main/v9.5.0/en/ui/readme/"},{"content":"Introduction to UI The SkyWalking official UI provides the default and powerful visualization capabilities for SkyWalking to observe full-stack applications.\nThe left side menu lists all available supported stacks with default dashboards.\nFollow the Official Dashboards menu to explore all default dashboards on their ways to monitor different tech stacks.\nSidebar Menu and Marketplace All available feature menu items are only listed in the marketplace(since 9.6.0). They are only visible on the Sidebar Menu when there are relative services being observed by various supported observation agents, such as installed language agents, service mesh platform, OTEL integration.\nThe menu items defined in ui-initialized-templates/menu.yaml are the universal marketplace for all default-supported integration. The menu definition supports one and two levels items. The leaf menu item should have the layer for navigation.\nmenus:- name:GeneralServiceicon:general_servicemenus:- name:Serviceslayer:GENERAL- name:VisualDatabaselayer:VIRTUAL_DATABASE- name:VisualCachelayer:VIRTUAL_CACHE- name:VisualMQlayer:VIRTUAL_MQ....- name:SelfObservabilityicon:self_observabilitymenus:- name:SkyWalkingServerlayer:SO11Y_OAP- name:Satellitelayer:SO11Y_SATELLITEThe menu items would automatically pop up on the left after short period of time that at least one service was observed. For more details, please refer to the \u0026ldquo;uiMenuRefreshInterval\u0026rdquo; configuration item in the backend settings\nCustom Dashboard Besides official dashboards, Dashboards provide customization capabilities to end-users to add new tabs/pages/widgets, and flexibility to re-config the dashboard on your own preference.\nThe dashboard has two key attributes, Layer and Entity Type. Learn these two concepts first before you begin any customization. Also, trace, metrics, and log analysis are relative to OAL, MAL, and LAL engines in the SkyWalking kernel. It would help if you learned them first, too.\nService and All entity type dashboard could be set as root(set this to root), which means this dashboard would be used as the entrance of its Layer. If you have multiple root dashboards, UI will choose one randomly (We don\u0026rsquo;t recommend doing so).\nNotice, dashboard editable is disabled on release; set system env(SW_ENABLE_UPDATE_UI_TEMPLATE=true) to activate them. Before you save the edited dashboard, it is just stored in memory. Closing a tab would LOSE the change permanently.\nA new dashboard should be added through New Dashboard in the Dashboards menu. Meanwhile, there are two ways to edit an existing dashboard.\n Dashboard List in the Dashboard menu provides edit/delete/set-as-root features to manage existing dashboards. In every dashboard page, click the right top V toggle, and turn to E(representing Edit) mode.  Widget A dashboard consists of various widget. In the Edit mode, widgets could be added/moved/removed/edit according to the Layer.(Every widget declares its suitable layer.)\nThe widget provides the ability to visualize the metrics, generated through OAL, MAL, or LAL scripts.\nMetrics To display one or more metrics in a graph, the following information is required:\n Name: The name of the metric. Data Type: The way of reading the metrics data according to various metric types. Visualization: The graph options to visualize the metric. Each data type has its own matched graph options. See the mapping doc for more details. Unit: The unit of the metrics data. Calculation: The calculation formula for the metric. The available formulas are here.  Common Graphs    Metrics Data Type Visualization Demo     read all values in the duration Line    get sorted top N values Top List    read all values of labels in the duration Table    read all values in the duration Area    read all values in the duration Service/Instance/Endpoint List    read sampled records in the duration Records List     Calculations    Label Calculation     Percentage Value / 100   Apdex Value / 10000   Average Sum of values / Count of values   Percentage + Avg-preview Sum of values / Count of values / 100   Apdex + Avg-preview Sum of values / Count of values / 10000   Byte to KB Value / 1024   Byte to MB Value / 1024 / 1024   Byte to GB Value / 1024 / 1024 / 1024   Seconds to YYYY-MM-DD HH:mm:ss dayjs(value * 1000).format(\u0026ldquo;YYYY-MM-DD HH:mm:ss\u0026rdquo;)   Milliseconds to YYYY-MM-DD HH:mm:ss dayjs(value).format(\u0026ldquo;YYYY-MM-DD HH:mm:ss\u0026rdquo;)   Precision Value.toFixed(2)   Milliseconds to seconds Value / 1000   Seconds to days Value / 86400    Graph styles Graph advanced style options.\nWidget options Define the following properties of the widget:\n Name: The name of the widget, which used to associate with other widget in the dashboard. Title: The title name of the widget. Tooltip Content: Additional explanation of the widget.  Association Options Widget provides the ability to associate with other widgets to show axis pointer with tips for the same time point, in order to help users to understand the connectivity among metrics.\nWidget Static Link On the right top of every widget on the dashboard, there is a Generate Link option, which could generate a static link to represent this widget. By using this link, users could share this widget, or integrate it into any 3rd party iFrame to build a network operations center(NOC) dashboard on the wall easily. About this link, there are several customizable options\n Lock Query Duration. Set the query duration manually. It is OFF by default. Auto Fresh option is ON with 6s query period and last 30 mins time range. Query period and range are customizable.  Settings Settings provide language, server time zone, and auto-fresh options. These settings are stored in the browser\u0026rsquo;s local storage. Unless you clear them manually, those will not change.\nFAQ Login and Authentication SkyWalking doesn\u0026rsquo;t provide login and authentication as usual for years. If you need, a lot of Gateway solutions have provides well-established solutions, such as the Nginx ecosystem.\n","title":"Introduction to UI","url":"/docs/main/v9.6.0/en/ui/readme/"},{"content":"Introduction to UI The SkyWalking official UI provides the default and powerful visualization capabilities for SkyWalking to observe full-stack applications.\nThe left side menu lists all available supported stacks with default dashboards.\nFollow the Official Dashboards menu to explore all default dashboards on their ways to monitor different tech stacks.\nSidebar Menu and Marketplace All available feature menu items are only listed in the marketplace(since 9.6.0). They are only visible on the Sidebar Menu when there are relative services being observed by various supported observation agents, such as installed language agents, service mesh platform, OTEL integration.\nThe menu items defined in ui-initialized-templates/menu.yaml are the universal marketplace for all default-supported integration. The menu definition supports one and two levels items. The leaf menu item should have the layer for navigation.\nmenus:- name:GeneralServiceicon:general_servicemenus:- name:Serviceslayer:GENERAL- name:VisualDatabaselayer:VIRTUAL_DATABASE- name:VisualCachelayer:VIRTUAL_CACHE- name:VisualMQlayer:VIRTUAL_MQ....- name:SelfObservabilityicon:self_observabilitymenus:- name:SkyWalkingServerlayer:SO11Y_OAP- name:Satellitelayer:SO11Y_SATELLITEThe menu items would automatically pop up on the left after short period of time that at least one service was observed. For more details, please refer to the \u0026ldquo;uiMenuRefreshInterval\u0026rdquo; configuration item in the backend settings\nCustom Dashboard Besides official dashboards, Dashboards provide customization capabilities to end-users to add new tabs/pages/widgets, and flexibility to re-config the dashboard on your own preference.\nThe dashboard has two key attributes, Layer and Entity Type. Learn these two concepts first before you begin any customization. Also, trace, metrics, and log analysis are relative to OAL, MAL, and LAL engines in the SkyWalking kernel. It would help if you learned them first, too.\nService and All entity type dashboard could be set as root(set this to root), which means this dashboard would be used as the entrance of its Layer. If you have multiple root dashboards, UI will choose one randomly (We don\u0026rsquo;t recommend doing so).\nNotice, dashboard editable is disabled on release; set system env(SW_ENABLE_UPDATE_UI_TEMPLATE=true) to activate them. Before you save the edited dashboard, it is just stored in memory. Closing a tab would LOSE the change permanently.\nA new dashboard should be added through New Dashboard in the Dashboards menu. Meanwhile, there are two ways to edit an existing dashboard.\n Dashboard List in the Dashboard menu provides edit/delete/set-as-root features to manage existing dashboards. In every dashboard page, click the right top V toggle, and turn to E(representing Edit) mode.  Widget A dashboard consists of various widget. In the Edit mode, widgets could be added/moved/removed/edit according to the Layer.(Every widget declares its suitable layer.)\nThe widget provides the ability to visualize the metrics, generated through OAL, MAL, or LAL scripts.\nMetrics To display one or more metrics in a graph, the following information is required:\n Name: The name of the metric. Data Type: The way of reading the metrics data according to various metric types. Visualization: The graph options to visualize the metric. Each data type has its own matched graph options. See the mapping doc for more details. Unit: The unit of the metrics data. Calculation: The calculation formula for the metric. The available formulas are here.  Common Graphs    Metrics Data Type Visualization Demo     read all values in the duration Line    get sorted top N values Top List    read all values of labels in the duration Table    read all values in the duration Area    read all values in the duration Service/Instance/Endpoint List    read sampled records in the duration Records List     Calculations    Label Calculation     Percentage Value / 100   Apdex Value / 10000   Average Sum of values / Count of values   Percentage + Avg-preview Sum of values / Count of values / 100   Apdex + Avg-preview Sum of values / Count of values / 10000   Byte to KB Value / 1024   Byte to MB Value / 1024 / 1024   Byte to GB Value / 1024 / 1024 / 1024   Seconds to YYYY-MM-DD HH:mm:ss dayjs(value * 1000).format(\u0026ldquo;YYYY-MM-DD HH:mm:ss\u0026rdquo;)   Milliseconds to YYYY-MM-DD HH:mm:ss dayjs(value).format(\u0026ldquo;YYYY-MM-DD HH:mm:ss\u0026rdquo;)   Precision Value.toFixed(2)   Milliseconds to seconds Value / 1000   Seconds to days Value / 86400    Graph styles Graph advanced style options.\nWidget options Define the following properties of the widget:\n Name: The name of the widget, which used to associate with other widget in the dashboard. Title: The title name of the widget. Tooltip Content: Additional explanation of the widget.  Association Options Widget provides the ability to associate with other widgets to show axis pointer with tips for the same time point, in order to help users to understand the connectivity among metrics.\nWidget Static Link On the right top of every widget on the dashboard, there is a Generate Link option, which could generate a static link to represent this widget. By using this link, users could share this widget, or integrate it into any 3rd party iFrame to build a network operations center(NOC) dashboard on the wall easily. About this link, there are several customizable options\n Lock Query Duration. Set the query duration manually. It is OFF by default. Auto Fresh option is ON with 6s query period and last 30 mins time range. Query period and range are customizable.  Settings Settings provide language, server time zone, and auto-fresh options. These settings are stored in the browser\u0026rsquo;s local storage. Unless you clear them manually, those will not change.\nFAQ Login and Authentication SkyWalking doesn\u0026rsquo;t provide login and authentication as usual for years. If you need, a lot of Gateway solutions have provides well-established solutions, such as the Nginx ecosystem.\n","title":"Introduction to UI","url":"/docs/main/v9.7.0/en/ui/readme/"},{"content":"IP and port setting The backend uses IP and port binding in order to allow the OS to have multiple IPs. The binding/listening IP and port are specified by the core module\ncore:default:restHost:0.0.0.0restPort:12800restContextPath:/gRPCHost:0.0.0.0gRPCPort:11800There are two IP/port pairs for gRPC and HTTP REST services.\n Most agents and probes use gRPC service for better performance and code readability. Some agents use REST service because gRPC may not be supported in that language. The UI uses REST service, but the data is always in GraphQL format.  Note IP binding For users unfamiliar with IP binding, note that once IP binding is complete, the client could only use this IP to access the service. For example, if 172.09.13.28 is bound, even if you are in this machine, you must use 172.09.13.28, rather than 127.0.0.1 or localhost, to access the service.\nModule provider specified IP and port The IP and port in the core module are provided by default. But it is common for some module providers, such as receiver modules, to provide other IP and port settings.\n","title":"IP and port setting","url":"/docs/main/latest/en/setup/backend/backend-ip-port/"},{"content":"IP and port setting The backend uses IP and port binding in order to allow the OS to have multiple IPs. The binding/listening IP and port are specified by the core module\ncore:default:restHost:0.0.0.0restPort:12800restContextPath:/gRPCHost:0.0.0.0gRPCPort:11800There are two IP/port pairs for gRPC and HTTP REST services.\n Most agents and probes use gRPC service for better performance and code readability. Some agents use REST service because gRPC may not be supported in that language. The UI uses REST service, but the data is always in GraphQL format.  Note IP binding For users unfamiliar with IP binding, note that once IP binding is complete, the client could only use this IP to access the service. For example, if 172.09.13.28 is bound, even if you are in this machine, you must use 172.09.13.28, rather than 127.0.0.1 or localhost, to access the service.\nModule provider specified IP and port The IP and port in the core module are provided by default. But it is common for some module providers, such as receiver modules, to provide other IP and port settings.\n","title":"IP and port setting","url":"/docs/main/next/en/setup/backend/backend-ip-port/"},{"content":"IP and port setting The backend uses IP and port binding in order to allow the OS to have multiple IPs. The binding/listening IP and port are specified by the core module\ncore:default:restHost:0.0.0.0restPort:12800restContextPath:/gRPCHost:0.0.0.0gRPCPort:11800There are two IP/port pairs for gRPC and HTTP REST services.\n Most agents and probes use gRPC service for better performance and code readability. Some agents use REST service, because gRPC may be not supported in that language. The UI uses REST service, but the data is always in GraphQL format.  Note IP binding For users who are not familiar with IP binding, note that once IP binding is complete, the client could only use this IP to access the service. For example, if 172.09.13.28 is bound, even if you are in this machine, you must use 172.09.13.28, rather than 127.0.0.1 or localhost, to access the service.\nModule provider specified IP and port The IP and port in the core module are provided by default. But it is common for some module providers, such as receiver modules, to provide other IP and port settings.\n","title":"IP and port setting","url":"/docs/main/v9.0.0/en/setup/backend/backend-ip-port/"},{"content":"IP and port setting The backend uses IP and port binding in order to allow the OS to have multiple IPs. The binding/listening IP and port are specified by the core module\ncore:default:restHost:0.0.0.0restPort:12800restContextPath:/gRPCHost:0.0.0.0gRPCPort:11800There are two IP/port pairs for gRPC and HTTP REST services.\n Most agents and probes use gRPC service for better performance and code readability. Some agents use REST service because gRPC may not be supported in that language. The UI uses REST service, but the data is always in GraphQL format.  Note IP binding For users unfamiliar with IP binding, note that once IP binding is complete, the client could only use this IP to access the service. For example, if 172.09.13.28 is bound, even if you are in this machine, you must use 172.09.13.28, rather than 127.0.0.1 or localhost, to access the service.\nModule provider specified IP and port The IP and port in the core module are provided by default. But it is common for some module providers, such as receiver modules, to provide other IP and port settings.\n","title":"IP and port setting","url":"/docs/main/v9.1.0/en/setup/backend/backend-ip-port/"},{"content":"IP and port setting The backend uses IP and port binding in order to allow the OS to have multiple IPs. The binding/listening IP and port are specified by the core module\ncore:default:restHost:0.0.0.0restPort:12800restContextPath:/gRPCHost:0.0.0.0gRPCPort:11800There are two IP/port pairs for gRPC and HTTP REST services.\n Most agents and probes use gRPC service for better performance and code readability. Some agents use REST service because gRPC may not be supported in that language. The UI uses REST service, but the data is always in GraphQL format.  Note IP binding For users unfamiliar with IP binding, note that once IP binding is complete, the client could only use this IP to access the service. For example, if 172.09.13.28 is bound, even if you are in this machine, you must use 172.09.13.28, rather than 127.0.0.1 or localhost, to access the service.\nModule provider specified IP and port The IP and port in the core module are provided by default. But it is common for some module providers, such as receiver modules, to provide other IP and port settings.\n","title":"IP and port setting","url":"/docs/main/v9.2.0/en/setup/backend/backend-ip-port/"},{"content":"IP and port setting The backend uses IP and port binding in order to allow the OS to have multiple IPs. The binding/listening IP and port are specified by the core module\ncore:default:restHost:0.0.0.0restPort:12800restContextPath:/gRPCHost:0.0.0.0gRPCPort:11800There are two IP/port pairs for gRPC and HTTP REST services.\n Most agents and probes use gRPC service for better performance and code readability. Some agents use REST service because gRPC may not be supported in that language. The UI uses REST service, but the data is always in GraphQL format.  Note IP binding For users unfamiliar with IP binding, note that once IP binding is complete, the client could only use this IP to access the service. For example, if 172.09.13.28 is bound, even if you are in this machine, you must use 172.09.13.28, rather than 127.0.0.1 or localhost, to access the service.\nModule provider specified IP and port The IP and port in the core module are provided by default. But it is common for some module providers, such as receiver modules, to provide other IP and port settings.\n","title":"IP and port setting","url":"/docs/main/v9.3.0/en/setup/backend/backend-ip-port/"},{"content":"IP and port setting The backend uses IP and port binding in order to allow the OS to have multiple IPs. The binding/listening IP and port are specified by the core module\ncore:default:restHost:0.0.0.0restPort:12800restContextPath:/gRPCHost:0.0.0.0gRPCPort:11800There are two IP/port pairs for gRPC and HTTP REST services.\n Most agents and probes use gRPC service for better performance and code readability. Some agents use REST service because gRPC may not be supported in that language. The UI uses REST service, but the data is always in GraphQL format.  Note IP binding For users unfamiliar with IP binding, note that once IP binding is complete, the client could only use this IP to access the service. For example, if 172.09.13.28 is bound, even if you are in this machine, you must use 172.09.13.28, rather than 127.0.0.1 or localhost, to access the service.\nModule provider specified IP and port The IP and port in the core module are provided by default. But it is common for some module providers, such as receiver modules, to provide other IP and port settings.\n","title":"IP and port setting","url":"/docs/main/v9.4.0/en/setup/backend/backend-ip-port/"},{"content":"IP and port setting The backend uses IP and port binding in order to allow the OS to have multiple IPs. The binding/listening IP and port are specified by the core module\ncore:default:restHost:0.0.0.0restPort:12800restContextPath:/gRPCHost:0.0.0.0gRPCPort:11800There are two IP/port pairs for gRPC and HTTP REST services.\n Most agents and probes use gRPC service for better performance and code readability. Some agents use REST service because gRPC may not be supported in that language. The UI uses REST service, but the data is always in GraphQL format.  Note IP binding For users unfamiliar with IP binding, note that once IP binding is complete, the client could only use this IP to access the service. For example, if 172.09.13.28 is bound, even if you are in this machine, you must use 172.09.13.28, rather than 127.0.0.1 or localhost, to access the service.\nModule provider specified IP and port The IP and port in the core module are provided by default. But it is common for some module providers, such as receiver modules, to provide other IP and port settings.\n","title":"IP and port setting","url":"/docs/main/v9.5.0/en/setup/backend/backend-ip-port/"},{"content":"IP and port setting The backend uses IP and port binding in order to allow the OS to have multiple IPs. The binding/listening IP and port are specified by the core module\ncore:default:restHost:0.0.0.0restPort:12800restContextPath:/gRPCHost:0.0.0.0gRPCPort:11800There are two IP/port pairs for gRPC and HTTP REST services.\n Most agents and probes use gRPC service for better performance and code readability. Some agents use REST service because gRPC may not be supported in that language. The UI uses REST service, but the data is always in GraphQL format.  Note IP binding For users unfamiliar with IP binding, note that once IP binding is complete, the client could only use this IP to access the service. For example, if 172.09.13.28 is bound, even if you are in this machine, you must use 172.09.13.28, rather than 127.0.0.1 or localhost, to access the service.\nModule provider specified IP and port The IP and port in the core module are provided by default. But it is common for some module providers, such as receiver modules, to provide other IP and port settings.\n","title":"IP and port setting","url":"/docs/main/v9.6.0/en/setup/backend/backend-ip-port/"},{"content":"IP and port setting The backend uses IP and port binding in order to allow the OS to have multiple IPs. The binding/listening IP and port are specified by the core module\ncore:default:restHost:0.0.0.0restPort:12800restContextPath:/gRPCHost:0.0.0.0gRPCPort:11800There are two IP/port pairs for gRPC and HTTP REST services.\n Most agents and probes use gRPC service for better performance and code readability. Some agents use REST service because gRPC may not be supported in that language. The UI uses REST service, but the data is always in GraphQL format.  Note IP binding For users unfamiliar with IP binding, note that once IP binding is complete, the client could only use this IP to access the service. For example, if 172.09.13.28 is bound, even if you are in this machine, you must use 172.09.13.28, rather than 127.0.0.1 or localhost, to access the service.\nModule provider specified IP and port The IP and port in the core module are provided by default. But it is common for some module providers, such as receiver modules, to provide other IP and port settings.\n","title":"IP and port setting","url":"/docs/main/v9.7.0/en/setup/backend/backend-ip-port/"},{"content":"Java agent injector Manual To use the java agent more natively, we propose the java agent injector to inject the agent sidecar into a pod.\nWhen enabled in a pod\u0026rsquo;s namespace, the injector injects the java agent container at pod creation time using a mutating webhook admission controller. By rendering the java agent to a shared volume, containers within the pod can use the java agent.\nThe following sections describe how to configure the agent, if you want to try it directly, please see Usage for more details.\nInstall Injector The java agent injector is a component of the operator, so you need to follow Operator installation instrument to install the operator firstly.\nActive the java agent injection We have two granularities here: namespace and pod.\n   Resource Label Enabled value Disabled value     Namespace swck-injection enabled disabled   Pod swck-java-agent-injected \u0026ldquo;true\u0026rdquo; \u0026ldquo;false\u0026rdquo;    The injector is configured with the following logic:\n If either label is disabled, the pod is not injected. If two labels are enabled, the pod is injected.  Follow the next steps to active java agent injection.\n Label the namespace with swck-injection=enabled  $ kubectl label namespace default(your namespace) swck-injection=enabled  Add label swck-java-agent-injected: \u0026quot;true\u0026quot; to the pod, and get the result as below.  $ kubectl get pod -l swck-java-agent-injected=true NAME READY STATUS RESTARTS AGE inject-demo 1/1 Running 0 2d2h The ways to configure the agent The java agent injector supports a precedence order to configure the agent:\n Annotations \u0026gt; SwAgent \u0026gt; Configmap (Deprecated) \u0026gt; Default Configmap (Deprecated)\nAnnotations Annotations are described in kubernetes annotations doc.\nWe support annotations in agent annotations and sidecar annotations.\nSwAgent SwAgent is a Customer Resource defined by SWCK.\nWe support SwAgent in SwAgent usage guide\nConfigmap (Deprecated) Configmap is described in kubernetes configmap doc.\nWe need to use configmap to set agent.config so that we can modify the agent configuration without entering the container.\nIf there are different configmap in the namepsace, you can choose a configmap by setting sidecar annotations; If there is no configmap, the injector will create a default configmap.\nDefault configmap (Deprecated) The injector will create the default configmap to overlay the agent.config in the agent container.\nThe default configmap is shown as below, one is agent.service_name and the string can\u0026rsquo;t be empty; the other is collector.backend_service and it needs to be a legal IP address and port, the other fields need to be guaranteed by users themselves. Users can change it as their default configmap.\ndata: agent.config: | # The service name in UI agent.service_name=${SW_AGENT_NAME:Your_ApplicationName} # Backend service addresses. collector.backend_service=${SW_AGENT_COLLECTOR_BACKEND_SERVICES:127.0.0.1:11800} # Please refer to https://skywalking.apache.org/docs/skywalking-java/latest/en/setup/service-agent/java-agent/configurations/#table-of-agent-configuration-properties to get more details. To avoid the default configmap deleting by mistake, we use a configmap controller to watch the default configmap. In addition, if the user applies an invalid configuration, such as a malformed backend_service, the controller will use the default configmap.\nConfigure the agent The injector supports two methods to configure agent:\n Only use the default configuration. Use annotations to overlay the default configuration.  Use the default agent configuration After activating the java agent injection, if not set the annotations, the injector will use the default agent configuration directly as below.\ninitContainers: - args: - -c - mkdir -p /sky/agent \u0026amp;\u0026amp; cp -r /skywalking/agent/* /sky/agent command: - sh image: apache/skywalking-java-agent:8.16.0-java8 name: inject-skywalking-agent volumeMounts: - mountPath: /sky/agent name: sky-agent volumes: - emptyDir: {} name: sky-agent - configMap: name: skywalking-swck-java-agent-configmap name: java-agent-configmap-volume Use SwAgent to overlay default agent configuration The injector will read the SwAgent CR when pods creating.\nSwAgent CRD basic structure is like:\napiVersion:operator.skywalking.apache.org/v1alpha1kind:SwAgentmetadata:name:swagent-demonamespace:defaultspec:containerMatcher:\u0026#39;\u0026#39;selector:javaSidecar:name:swagent-demoimage:apache/skywalking-java-agent:8.16.0-java8env:- name:\u0026#34;SW_LOGGING_LEVEL\u0026#34;value:\u0026#34;DEBUG\u0026#34;- name:\u0026#34;SW_AGENT_COLLECTOR_BACKEND_SERVICES\u0026#34;value:\u0026#34;skywalking-system-oap:11800\u0026#34;sharedVolumeName:\u0026#34;sky-agent-demo\u0026#34;optionalPlugins:- \u0026#34;webflux\u0026#34;- \u0026#34;cloud-gateway-2.1.x\u0026#34;There are three kind of configs in SwAgent CR.\n1. label selector and container matcher label selector and container matcher decides which pod and container should be injected.\n   key path description default value     spec.selector label selector for pods which should be effected during injection. if no label selector was set, SwAgent CR config will affect every pod during injection. no default value   spec.containerMatcher container matcher is used to decide which container to be inject during injection. regular expression is supported. default value \u0026lsquo;.*\u0026rsquo; would match any container name. .*    2. injection configuration injection configuration will affect on agent injection behaviour\n   key path description default value     javaSidecar javaSidecar is the configs for init container, which holds agent sdk and take agent sdk to the target containers.    javaSidecar.name the name of the init container. inject-skywalking-agent   javaSidecar.image the image of the init container. apache/skywalking-java-agent:8.16.0-java8   SharedVolumeName SharedVolume is the name of an empty volume which shared by initContainer and target containers. sky-agent   OptionalPlugins Select the optional plugin which needs to be moved to the directory(/plugins). Such as trace,webflux,cloud-gateway-2.1.x. no default value   OptionalReporterPlugins Select the optional reporter plugin which needs to be moved to the directory(/plugins). such as kafka. no default value    3. skywalking agent configuration skywalking agent configuration is for agent SDK.\n   key path description default value     javaSidecar.env the env list to be appended to target containers. usually we can use it to setup agent configuration at container level. no default value.    Use annotations to overlay default agent configuration The injector can recognize five kinds of annotations to configure the agent as below.\n1. strategy configuration The strategy configuration is the annotation as below.\n   Annotation key Description Annotation Default value     strategy.skywalking.apache.org/inject.Container Select the injected container, if not set, inject all containers. not set    2. agent configuration The agent configuration is the annotation like agent.skywalking.apache.org/{option}: {value}, and the option support agent.xxx 、osinfo.xxx 、collector.xxx 、 logging.xxx 、statuscheck.xxx 、correlation.xxx 、jvm.xxx 、buffer.xxx 、 profile.xxx 、 meter.xxx 、 log.xxx in agent.config, such as agent.skywalking.apache.org/agent.namespace, agent.skywalking.apache.org/meter.max_meter_size, etc.\n3. plugins configuration The plugins configuration is the annotation like plugins.skywalking.apache.org/{option}: {value}, and the option only support plugin.xxx in the agent.config, such as plugins.skywalking.apache.org/plugin.mount, plugins.skywalking.apache.org/plugin.mongodb.trace_param, etc.\n4. optional plugin configuration The optional plugin configuration is the annotation as below.\n   Annotation key Description Annotation value     optional.skywalking.apache.org Select the optional plugin which needs to be moved to the directory(/plugins). Users can select several optional plugins by separating from |, such as trace|webflux|cloud-gateway-2.1.x. not set    5. optional reporter plugin configuration The optional reporter plugin configuration is the annotation as below.\n   Annotation key Description Annotation value     optional-reporter.skywalking.apache.org Select the optional reporter plugin which needs to be moved to the directory(/plugins). Users can select several optional reporter plugins by separating from |, such as kafka. not set    Configure sidecar The injector can recognize the following annotations to configure the sidecar:\n   Annotation key Description Annotation Default value     sidecar.skywalking.apache.org/initcontainer.Name The name of the injected java agent container. inject-skywalking-agent   sidecar.skywalking.apache.org/initcontainer.Image The container image of the injected java agent container. apache/skywalking-java-agent:8.16.0-java8   sidecar.skywalking.apache.org/initcontainer.Command The command of the injected java agent container. sh   sidecar.skywalking.apache.org/initcontainer.args.Option The args option of the injected java agent container. -c   sidecar.skywalking.apache.org/initcontainer.args.Command The args command of the injected java agent container. mkdir -p /sky/agent \u0026amp;\u0026amp; cp -r /skywalking/agent/* /sky/agent   sidecar.skywalking.apache.org/initcontainer.resources.limits The resources limits of the injected java agent container. You should use json type to define it such as {\u0026quot;memory\u0026quot;: \u0026quot;100Mi\u0026quot;,\u0026quot;cpu\u0026quot;: \u0026quot;100m\u0026quot;} nil   sidecar.skywalking.apache.org/initcontainer.resources.requests The resources requests of the injected java agent container. You should use json type to define it such as {\u0026quot;memory\u0026quot;: \u0026quot;100Mi\u0026quot;,\u0026quot;cpu\u0026quot;: \u0026quot;100m\u0026quot;} nil   sidecar.skywalking.apache.org/sidecarVolume.Name The name of sidecar Volume. sky-agent   sidecar.skywalking.apache.org/sidecarVolumeMount.MountPath Mount path of the agent directory in the injected container. /sky/agent   sidecar.skywalking.apache.org/env.Name Environment Name used by the injected container (application container). JAVA_TOOL_OPTIONS   sidecar.skywalking.apache.org/env.Value Environment variables used by the injected container (application container). -javaagent:/sky/agent/skywalking-agent.jar    The ways to get the final injected agent\u0026rsquo;s configuration Please see javaagent introduction for details.\n","title":"Java agent injector Manual","url":"/docs/skywalking-swck/latest/java-agent-injector/"},{"content":"Java agent injector Manual To use the java agent more natively, we propose the java agent injector to inject the agent sidecar into a pod.\nWhen enabled in a pod\u0026rsquo;s namespace, the injector injects the java agent container at pod creation time using a mutating webhook admission controller. By rendering the java agent to a shared volume, containers within the pod can use the java agent.\nThe following sections describe how to configure the agent, if you want to try it directly, please see Usage for more details.\nInstall Injector The java agent injector is a component of the operator, so you need to follow Operator installation instrument to install the operator firstly.\nActive the java agent injection We have two granularities here: namespace and pod.\n   Resource Label Enabled value Disabled value     Namespace swck-injection enabled disabled   Pod swck-java-agent-injected \u0026ldquo;true\u0026rdquo; \u0026ldquo;false\u0026rdquo;    The injector is configured with the following logic:\n If either label is disabled, the pod is not injected. If two labels are enabled, the pod is injected.  Follow the next steps to active java agent injection.\n Label the namespace with swck-injection=enabled  $ kubectl label namespace default(your namespace) swck-injection=enabled  Add label swck-java-agent-injected: \u0026quot;true\u0026quot; to the pod, and get the result as below.  $ kubectl get pod -l swck-java-agent-injected=true NAME READY STATUS RESTARTS AGE inject-demo 1/1 Running 0 2d2h The ways to configure the agent The java agent injector supports a precedence order to configure the agent:\n Annotations \u0026gt; SwAgent \u0026gt; Configmap (Deprecated) \u0026gt; Default Configmap (Deprecated)\nAnnotations Annotations are described in kubernetes annotations doc.\nWe support annotations in agent annotations and sidecar annotations.\nSwAgent SwAgent is a Customer Resource defined by SWCK.\nWe support SwAgent in SwAgent usage guide\nConfigmap (Deprecated) Configmap is described in kubernetes configmap doc.\nWe need to use configmap to set agent.config so that we can modify the agent configuration without entering the container.\nIf there are different configmap in the namepsace, you can choose a configmap by setting sidecar annotations; If there is no configmap, the injector will create a default configmap.\nDefault configmap (Deprecated) The injector will create the default configmap to overlay the agent.config in the agent container.\nThe default configmap is shown as below, one is agent.service_name and the string can\u0026rsquo;t be empty; the other is collector.backend_service and it needs to be a legal IP address and port, the other fields need to be guaranteed by users themselves. Users can change it as their default configmap.\ndata: agent.config: | # The service name in UI agent.service_name=${SW_AGENT_NAME:Your_ApplicationName} # Backend service addresses. collector.backend_service=${SW_AGENT_COLLECTOR_BACKEND_SERVICES:127.0.0.1:11800} # Please refer to https://skywalking.apache.org/docs/skywalking-java/latest/en/setup/service-agent/java-agent/configurations/#table-of-agent-configuration-properties to get more details. To avoid the default configmap deleting by mistake, we use a configmap controller to watch the default configmap. In addition, if the user applies an invalid configuration, such as a malformed backend_service, the controller will use the default configmap.\nConfigure the agent The injector supports two methods to configure agent:\n Only use the default configuration. Use annotations to overlay the default configuration.  Use the default agent configuration After activating the java agent injection, if not set the annotations, the injector will use the default agent configuration directly as below.\ninitContainers: - args: - -c - mkdir -p /sky/agent \u0026amp;\u0026amp; cp -r /skywalking/agent/* /sky/agent command: - sh image: apache/skywalking-java-agent:8.16.0-java8 name: inject-skywalking-agent volumeMounts: - mountPath: /sky/agent name: sky-agent volumes: - emptyDir: {} name: sky-agent - configMap: name: skywalking-swck-java-agent-configmap name: java-agent-configmap-volume Use SwAgent to overlay default agent configuration The injector will read the SwAgent CR when pods creating.\nSwAgent CRD basic structure is like:\napiVersion:operator.skywalking.apache.org/v1alpha1kind:SwAgentmetadata:name:swagent-demonamespace:defaultspec:containerMatcher:\u0026#39;\u0026#39;selector:javaSidecar:name:swagent-demoimage:apache/skywalking-java-agent:8.16.0-java8env:- name:\u0026#34;SW_LOGGING_LEVEL\u0026#34;value:\u0026#34;DEBUG\u0026#34;- name:\u0026#34;SW_AGENT_COLLECTOR_BACKEND_SERVICES\u0026#34;value:\u0026#34;skywalking-system-oap:11800\u0026#34;sharedVolumeName:\u0026#34;sky-agent-demo\u0026#34;optionalPlugins:- \u0026#34;webflux\u0026#34;- \u0026#34;cloud-gateway-2.1.x\u0026#34;bootstrapPlugins:- \u0026#34;jdk-threading\u0026#34;There are three kind of configs in SwAgent CR.\n1. label selector and container matcher label selector and container matcher decides which pod and container should be injected.\n   key path description default value     spec.selector label selector for pods which should be effected during injection. if no label selector was set, SwAgent CR config will affect every pod during injection. no default value   spec.containerMatcher container matcher is used to decide which container to be inject during injection. regular expression is supported. default value \u0026lsquo;.*\u0026rsquo; would match any container name. .*    2. injection configuration injection configuration will affect on agent injection behaviour\n   key path description default value     javaSidecar javaSidecar is the configs for init container, which holds agent sdk and take agent sdk to the target containers.    javaSidecar.name the name of the init container. inject-skywalking-agent   javaSidecar.image the image of the init container. apache/skywalking-java-agent:8.16.0-java8   SharedVolumeName SharedVolume is the name of an empty volume which shared by initContainer and target containers. sky-agent   OptionalPlugins Select the optional plugin which needs to be moved to the directory(/plugins). Such as trace,webflux,cloud-gateway-2.1.x. no default value   OptionalReporterPlugins Select the optional reporter plugin which needs to be moved to the directory(/plugins). such as kafka. no default value   BootstrapPlugins Select the bootstrap plugin which needs to be moved to the directory(/plugins). such as jdk-threading. no default value    3. skywalking agent configuration skywalking agent configuration is for agent SDK.\n   key path description default value     javaSidecar.env the env list to be appended to target containers. usually we can use it to setup agent configuration at container level. no default value.    Use annotations to overlay default agent configuration The injector can recognize five kinds of annotations to configure the agent as below.\n1. strategy configuration The strategy configuration is the annotation as below.\n   Annotation key Description Annotation Default value     strategy.skywalking.apache.org/inject.Container Select the injected container, if not set, inject all containers. not set    2. agent configuration The agent configuration is the annotation like agent.skywalking.apache.org/{option}: {value}, and the option support agent.xxx 、osinfo.xxx 、collector.xxx 、 logging.xxx 、statuscheck.xxx 、correlation.xxx 、jvm.xxx 、buffer.xxx 、 profile.xxx 、 meter.xxx 、 log.xxx in agent.config, such as agent.skywalking.apache.org/agent.namespace, agent.skywalking.apache.org/meter.max_meter_size, etc.\n3. plugins configuration The plugins configuration is the annotation like plugins.skywalking.apache.org/{option}: {value}, and the option only support plugin.xxx in the agent.config, such as plugins.skywalking.apache.org/plugin.mount, plugins.skywalking.apache.org/plugin.mongodb.trace_param, etc.\n4. optional plugin configuration The optional plugin configuration is the annotation as below.\n   Annotation key Description Annotation value     optional.skywalking.apache.org Select the optional plugin which needs to be moved to the directory(/plugins). Users can select several optional plugins by separating from |, such as trace|webflux|cloud-gateway-2.1.x. not set    5. optional reporter plugin configuration The optional reporter plugin configuration is the annotation as below.\n   Annotation key Description Annotation value     optional-reporter.skywalking.apache.org Select the optional reporter plugin which needs to be moved to the directory(/plugins). Users can select several optional reporter plugins by separating from |, such as kafka. not set    6. bootstrap plugin configuration The bootstrap plugin configuration is the annotation as below.\n   Annotation key Description Annotation value     bootstrap.skywalking.apache.org Select the bootstrap plugin which needs to be moved to the directory(/plugins). Users can select several bootstrap plugins by separating from |, such as jdk-threading. not set    Configure sidecar The injector can recognize the following annotations to configure the sidecar:\n   Annotation key Description Annotation Default value     sidecar.skywalking.apache.org/initcontainer.Name The name of the injected java agent container. inject-skywalking-agent   sidecar.skywalking.apache.org/initcontainer.Image The container image of the injected java agent container. apache/skywalking-java-agent:8.16.0-java8   sidecar.skywalking.apache.org/initcontainer.Command The command of the injected java agent container. sh   sidecar.skywalking.apache.org/initcontainer.args.Option The args option of the injected java agent container. -c   sidecar.skywalking.apache.org/initcontainer.args.Command The args command of the injected java agent container. mkdir -p /sky/agent \u0026amp;\u0026amp; cp -r /skywalking/agent/* /sky/agent   sidecar.skywalking.apache.org/initcontainer.resources.limits The resources limits of the injected java agent container. You should use json type to define it such as {\u0026quot;memory\u0026quot;: \u0026quot;100Mi\u0026quot;,\u0026quot;cpu\u0026quot;: \u0026quot;100m\u0026quot;} nil   sidecar.skywalking.apache.org/initcontainer.resources.requests The resources requests of the injected java agent container. You should use json type to define it such as {\u0026quot;memory\u0026quot;: \u0026quot;100Mi\u0026quot;,\u0026quot;cpu\u0026quot;: \u0026quot;100m\u0026quot;} nil   sidecar.skywalking.apache.org/sidecarVolume.Name The name of sidecar Volume. sky-agent   sidecar.skywalking.apache.org/sidecarVolumeMount.MountPath Mount path of the agent directory in the injected container. /sky/agent   sidecar.skywalking.apache.org/env.Name Environment Name used by the injected container (application container). JAVA_TOOL_OPTIONS   sidecar.skywalking.apache.org/env.Value Environment variables used by the injected container (application container). -javaagent:/sky/agent/skywalking-agent.jar    The ways to get the final injected agent\u0026rsquo;s configuration Please see javaagent introduction for details.\n","title":"Java agent injector Manual","url":"/docs/skywalking-swck/next/java-agent-injector/"},{"content":"Java agent injector Manual To use the java agent more natively, we propose the java agent injector to inject the agent sidecar into a pod.\nWhen enabled in a pod\u0026rsquo;s namespace, the injector injects the java agent container at pod creation time using a mutating webhook admission controller. By rendering the java agent to a shared volume, containers within the pod can use the java agent.\nThe following sections describe how to configure the agent, if you want to try it directly, please see Usage for more details.\nInstall Injector The java agent injector is a component of the operator, so you need to follow Operator installation instrument to install the operator firstly.\nActive the java agent injection We have two granularities here: namespace and pod.\n   Resource Label Enabled value Disabled value     Namespace swck-injection enabled disabled   Pod swck-java-agent-injected \u0026ldquo;true\u0026rdquo; \u0026ldquo;false\u0026rdquo;    The injector is configured with the following logic:\n If either label is disabled, the pod is not injected. If two labels are enabled, the pod is injected.  Follow the next steps to active java agent injection.\n Label the namespace with swck-injection=enabled  $ kubectl label namespace default(your namespace) swck-injection=enabled  Add label swck-java-agent-injected: \u0026quot;true\u0026quot; to the pod, and get the result as below.  $ kubectl get pod -l swck-java-agent-injected=true NAME READY STATUS RESTARTS AGE inject-demo 1/1 Running 0 2d2h The ways to configure the agent The java agent injector supports a precedence order to configure the agent:\n Annotations \u0026gt; SwAgent \u0026gt; Configmap (Deprecated) \u0026gt; Default Configmap (Deprecated)\nAnnotations Annotations are described in kubernetes annotations doc.\nWe support annotations in agent annotations and sidecar annotations.\nSwAgent SwAgent is a Customer Resource defined by SWCK.\nWe support SwAgent in SwAgent usage guide\nConfigmap (Deprecated) Configmap is described in kubernetes configmap doc.\nWe need to use configmap to set agent.config so that we can modify the agent configuration without entering the container.\nIf there are different configmap in the namepsace, you can choose a configmap by setting sidecar annotations; If there is no configmap, the injector will create a default configmap.\nDefault configmap (Deprecated) The injector will create the default configmap to overlay the agent.config in the agent container.\nThe default configmap is shown as below, one is agent.service_name and the string can\u0026rsquo;t be empty; the other is collector.backend_service and it needs to be a legal IP address and port, the other fields need to be guaranteed by users themselves. Users can change it as their default configmap.\ndata: agent.config: | # The service name in UI agent.service_name=${SW_AGENT_NAME:Your_ApplicationName} # Backend service addresses. collector.backend_service=${SW_AGENT_COLLECTOR_BACKEND_SERVICES:127.0.0.1:11800} # Please refer to https://skywalking.apache.org/docs/skywalking-java/latest/en/setup/service-agent/java-agent/configurations/#table-of-agent-configuration-properties to get more details. To avoid the default configmap deleting by mistake, we use a configmap controller to watch the default configmap. In addition, if the user applies an invalid configuration, such as a malformed backend_service, the controller will use the default configmap.\nConfigure the agent The injector supports two methods to configure agent:\n Only use the default configuration. Use annotations to overlay the default configuration.  Use the default agent configuration After activating the java agent injection, if not set the annotations, the injector will use the default agent configuration directly as below.\ninitContainers: - args: - -c - mkdir -p /sky/agent \u0026amp;\u0026amp; cp -r /skywalking/agent/* /sky/agent command: - sh image: apache/skywalking-java-agent:8.16.0-java8 name: inject-skywalking-agent volumeMounts: - mountPath: /sky/agent name: sky-agent volumes: - emptyDir: {} name: sky-agent - configMap: name: skywalking-swck-java-agent-configmap name: java-agent-configmap-volume Use SwAgent to overlay default agent configuration The injector will read the SwAgent CR when pods creating.\nSwAgent CRD basic structure is like:\napiVersion:operator.skywalking.apache.org/v1alpha1kind:SwAgentmetadata:name:swagent-demonamespace:defaultspec:containerMatcher:\u0026#39;\u0026#39;selector:javaSidecar:name:swagent-demoimage:apache/skywalking-java-agent:8.16.0-java8env:- name:\u0026#34;SW_LOGGING_LEVEL\u0026#34;value:\u0026#34;DEBUG\u0026#34;- name:\u0026#34;SW_AGENT_COLLECTOR_BACKEND_SERVICES\u0026#34;value:\u0026#34;skywalking-system-oap:11800\u0026#34;sharedVolumeName:\u0026#34;sky-agent-demo\u0026#34;optionalPlugins:- \u0026#34;webflux\u0026#34;- \u0026#34;cloud-gateway-2.1.x\u0026#34;bootstrapPlugins:- \u0026#34;jdk-threading\u0026#34;There are three kind of configs in SwAgent CR.\n1. label selector and container matcher label selector and container matcher decides which pod and container should be injected.\n   key path description default value     spec.selector label selector for pods which should be effected during injection. if no label selector was set, SwAgent CR config will affect every pod during injection. no default value   spec.containerMatcher container matcher is used to decide which container to be inject during injection. regular expression is supported. default value \u0026lsquo;.*\u0026rsquo; would match any container name. .*    2. injection configuration injection configuration will affect on agent injection behaviour\n   key path description default value     javaSidecar javaSidecar is the configs for init container, which holds agent sdk and take agent sdk to the target containers.    javaSidecar.name the name of the init container. inject-skywalking-agent   javaSidecar.image the image of the init container. apache/skywalking-java-agent:8.16.0-java8   SharedVolumeName SharedVolume is the name of an empty volume which shared by initContainer and target containers. sky-agent   OptionalPlugins Select the optional plugin which needs to be moved to the directory(/plugins). Such as trace,webflux,cloud-gateway-2.1.x. no default value   OptionalReporterPlugins Select the optional reporter plugin which needs to be moved to the directory(/plugins). such as kafka. no default value   BootstrapPlugins Select the bootstrap plugin which needs to be moved to the directory(/plugins). such as jdk-threading. no default value    3. skywalking agent configuration skywalking agent configuration is for agent SDK.\n   key path description default value     javaSidecar.env the env list to be appended to target containers. usually we can use it to setup agent configuration at container level. no default value.    Use annotations to overlay default agent configuration The injector can recognize five kinds of annotations to configure the agent as below.\n1. strategy configuration The strategy configuration is the annotation as below.\n   Annotation key Description Annotation Default value     strategy.skywalking.apache.org/inject.Container Select the injected container, if not set, inject all containers. not set    2. agent configuration The agent configuration is the annotation like agent.skywalking.apache.org/{option}: {value}, and the option support agent.xxx 、osinfo.xxx 、collector.xxx 、 logging.xxx 、statuscheck.xxx 、correlation.xxx 、jvm.xxx 、buffer.xxx 、 profile.xxx 、 meter.xxx 、 log.xxx in agent.config, such as agent.skywalking.apache.org/agent.namespace, agent.skywalking.apache.org/meter.max_meter_size, etc.\n3. plugins configuration The plugins configuration is the annotation like plugins.skywalking.apache.org/{option}: {value}, and the option only support plugin.xxx in the agent.config, such as plugins.skywalking.apache.org/plugin.mount, plugins.skywalking.apache.org/plugin.mongodb.trace_param, etc.\n4. optional plugin configuration The optional plugin configuration is the annotation as below.\n   Annotation key Description Annotation value     optional.skywalking.apache.org Select the optional plugin which needs to be moved to the directory(/plugins). Users can select several optional plugins by separating from |, such as trace|webflux|cloud-gateway-2.1.x. not set    5. optional reporter plugin configuration The optional reporter plugin configuration is the annotation as below.\n   Annotation key Description Annotation value     optional-reporter.skywalking.apache.org Select the optional reporter plugin which needs to be moved to the directory(/plugins). Users can select several optional reporter plugins by separating from |, such as kafka. not set    6. bootstrap plugin configuration The bootstrap plugin configuration is the annotation as below.\n   Annotation key Description Annotation value     bootstrap.skywalking.apache.org Select the bootstrap plugin which needs to be moved to the directory(/plugins). Users can select several bootstrap plugins by separating from |, such as jdk-threading. not set    Configure sidecar The injector can recognize the following annotations to configure the sidecar:\n   Annotation key Description Annotation Default value     sidecar.skywalking.apache.org/initcontainer.Name The name of the injected java agent container. inject-skywalking-agent   sidecar.skywalking.apache.org/initcontainer.Image The container image of the injected java agent container. apache/skywalking-java-agent:8.16.0-java8   sidecar.skywalking.apache.org/initcontainer.Command The command of the injected java agent container. sh   sidecar.skywalking.apache.org/initcontainer.args.Option The args option of the injected java agent container. -c   sidecar.skywalking.apache.org/initcontainer.args.Command The args command of the injected java agent container. mkdir -p /sky/agent \u0026amp;\u0026amp; cp -r /skywalking/agent/* /sky/agent   sidecar.skywalking.apache.org/initcontainer.resources.limits The resources limits of the injected java agent container. You should use json type to define it such as {\u0026quot;memory\u0026quot;: \u0026quot;100Mi\u0026quot;,\u0026quot;cpu\u0026quot;: \u0026quot;100m\u0026quot;} nil   sidecar.skywalking.apache.org/initcontainer.resources.requests The resources requests of the injected java agent container. You should use json type to define it such as {\u0026quot;memory\u0026quot;: \u0026quot;100Mi\u0026quot;,\u0026quot;cpu\u0026quot;: \u0026quot;100m\u0026quot;} nil   sidecar.skywalking.apache.org/sidecarVolume.Name The name of sidecar Volume. sky-agent   sidecar.skywalking.apache.org/sidecarVolumeMount.MountPath Mount path of the agent directory in the injected container. /sky/agent   sidecar.skywalking.apache.org/env.Name Environment Name used by the injected container (application container). JAVA_TOOL_OPTIONS   sidecar.skywalking.apache.org/env.Value Environment variables used by the injected container (application container). -javaagent:/sky/agent/skywalking-agent.jar    The ways to get the final injected agent\u0026rsquo;s configuration Please see javaagent introduction for details.\n","title":"Java agent injector Manual","url":"/docs/skywalking-swck/v0.9.0/java-agent-injector/"},{"content":"Java agent injector Usage In this example, you will learn how to use the java agent injector.\nInstall injector The java agent injector is a component of the operator, so you need to follow Operator installation instrument to install the operator firstly.\nDeployment Example Let\u0026rsquo;s take a demo deployment for example.\n# demo1.yamlapiVersion:apps/v1kind:Deploymentmetadata:name:demo1namespace:defaultspec:selector:matchLabels:app:demo1template:metadata:labels:app:demo1spec:containers:- name:demo1image:ghcr.io/apache/skywalking-swck-spring-demo:v0.0.1command:[\u0026#34;java\u0026#34;]args:[\u0026#34;-jar\u0026#34;,\u0026#34;/app.jar\u0026#34;]ports:- containerPort:8085readinessProbe:httpGet:path:/helloport:8085initialDelaySeconds:3periodSeconds:3failureThreshold:10Enable Injection for Namespace and Deployments/StatefulSets. Firstly, set the injection label in your namespace as below.\nkubectl label namespace default(your namespace) swck-injection=enabled Secondly, set the injection label for your target Deployment/StatefulSet.\nkubectl -n default patch deployment demo1 --patch \u0026#39;{ \u0026#34;spec\u0026#34;: { \u0026#34;template\u0026#34;: { \u0026#34;metadata\u0026#34;: { \u0026#34;labels\u0026#34;: { \u0026#34;swck-java-agent-injected\u0026#34;: \u0026#34;true\u0026#34; } } } } }\u0026#39; Then the pods create by the Deployments/StatefulSets would be recreated with agent injected.\nThe injected pods would be like this:\nspec:containers:- args:- -jar- /app.jarcommand:- javaenv:- name:JAVA_TOOL_OPTIONSvalue:-javaagent:/sky/agent/skywalking-agent.jarimage:ghcr.io/apache/skywalking-swck-spring-demo:v0.0.1name:demo1- mountPath:/sky/agentname:sky-agentinitContainers:- args:- -c- mkdir -p /sky/agent \u0026amp;\u0026amp; cp -r /skywalking/agent/* /sky/agentcommand:- shimage:apache/skywalking-java-agent:8.10.0-java8name:inject-skywalking-agentvolumeMounts:- mountPath:/sky/agentname:sky-agentvolumes:- emptyDir:{}name:sky-agentThen you can get the final agent configuration and the pod as below.\n$ kubectl get javaagent NAME PODSELECTOR SERVICENAME BACKENDSERVICE app-demo1-javaagent app=demo1 demo1 127.0.0.1:11800 $ kubectl get pod -l app=demo1(the podSelector) NAME READY STATUS RESTARTS AGE demo1-5fbb6fcd98-cq5ws 1/1 Running 0 54s Get the javaagent\u0026rsquo;s yaml for more datails.\n$ kubectl get javaagent app-demo1-javaagent -o yaml apiVersion: operator.skywalking.apache.org/v1alpha1 kind: JavaAgent metadata: creationTimestamp: \u0026#34;2022-08-16T12:09:34Z\u0026#34; generation: 1 name: app-demo1-javaagent namespace: default ownerReferences: - apiVersion: apps/v1 blockOwnerDeletion: true controller: true kind: ReplicaSet name: demo1-7fdffc7b95 uid: 417c413f-0cc0-41f9-b6eb-0192eb8c8622 resourceVersion: \u0026#34;25067\u0026#34; uid: 1cdab012-784c-4efb-b5d2-c032eb2fb22a spec: backendService: 127.0.0.1:11800 podSelector: app=demo1 serviceName: Your_ApplicationName status: creationTime: \u0026#34;2022-08-16T12:09:34Z\u0026#34; expectedInjectiedNum: 1 lastUpdateTime: \u0026#34;2022-08-16T12:10:04Z\u0026#34; realInjectedNum: 1 Use SwAgent CR to setup override default configuration Suppose that injection label had been set for Namespace and Deployments/StatefulSets as previous said.\nApply SwAgent CR with correct label selector and container matcher:\n# SwAgent.yamlapiVersion:operator.skywalking.apache.org/v1alpha1kind:SwAgentmetadata:name:swagent-demonamespace:defaultspec:containerMatcher:\u0026#39;\u0026#39;selector:javaSidecar:name:swagent-demoimage:apache/skywalking-java-agent:8.16.0-java8env:- name:\u0026#34;SW_LOGGING_LEVEL\u0026#34;value:\u0026#34;DEBUG\u0026#34;- name:\u0026#34;SW_AGENT_COLLECTOR_BACKEND_SERVICES\u0026#34;value:\u0026#34;skywalking-system-oap:11800\u0026#34;sharedVolumeName:\u0026#34;sky-agent-demo\u0026#34;optionalPlugins:- \u0026#34;webflux\u0026#34;- \u0026#34;cloud-gateway-2.1.x\u0026#34;kubectl -n default apply swagent.yaml You can also get SwAgent CR by:\nkubectl -n default get SwAgent NAME AGE swagent-demo 38s Now the pod is still the old one, because pod could not load the SwAgent config automatically.\nSo you need to recreate pod to load SwAgent config. For the pods created by Deployment/StatefulSet, you can just simply delete the old pod.\n# verify pods to be delete  kubectl -n default get pods -l app=demo1 # delete pods kubectl -n default delete pods -l app=demo1 After the pods recreated, we can get injected pod as below.\nkubectl -n default get pods -l app=demo1 spec:containers:- args:- -jar- /app.jarcommand:- javaenv:- name:JAVA_TOOL_OPTIONSvalue:-javaagent:/sky/agent/skywalking-agent.jar=agent.service_name=demo1,collector.backend_service=skywalking-system-oap.skywalking-system:11800- name:SW_LOGGING_LEVELvalue:DEBUG- name:SW_AGENT_COLLECTOR_BACKEND_SERVICESvalue:skywalking-system-oap.default.svc:11800image:ghcr.io/apache/skywalking-swck-spring-demo:v0.0.1name:demo1- mountPath:/sky/agentname:sky-agent-demoinitContainers:- args:- -c- mkdir -p /sky/agent \u0026amp;\u0026amp; cp -r /skywalking/agent/* /sky/agent \u0026amp;\u0026amp; cd /sky/agent/optional-plugins/\u0026amp;\u0026amp;ls | grep -E \u0026#34;webflux|cloud-gateway-2.1.x\u0026#34; | xargs -i cp {} /sky/agent/plugins/command:- shimage:apache/skywalking-java-agent:8.16.0-java8name:swagent-demovolumeMounts:- mountPath:/sky/agentname:sky-agent-demovolumes:- emptyDir:{}name:sky-agent-demoUse annotation to override sidecar configuration Suppose that injection label had been set for Namespace and Deployments/StatefulSets as previous said.\nThen add agent configuration and sidecar configuration to annotations as below.\n# demo1_anno.yamlapiVersion:apps/v1kind:Deploymentmetadata:name:demo1namespace:defaultspec:selector:matchLabels:app:demo1template:metadata:annotations:strategy.skywalking.apache.org/inject.Container:\u0026#34;demo1\u0026#34;agent.skywalking.apache.org/agent.service_name:\u0026#34;app\u0026#34;agent.skywalking.apache.org/agent.sample_n_per_3_secs:\u0026#34;6\u0026#34;agent.skywalking.apache.org/agent.class_cache_mode:\u0026#34;MEMORY\u0026#34;agent.skywalking.apache.org/agent.ignore_suffix:\u0026#34;\u0026#39;jpg,.jpeg\u0026#39;\u0026#34;plugins.skywalking.apache.org/plugin.mount:\u0026#34;\u0026#39;plugins,activations\u0026#39;\u0026#34;plugins.skywalking.apache.org/plugin.mongodb.trace_param:\u0026#34;true\u0026#34;plugins.skywalking.apache.org/plugin.influxdb.trace_influxql:\u0026#34;false\u0026#34;optional.skywalking.apache.org:\u0026#34;trace|webflux|cloud-gateway-2.1.x\u0026#34;optional-reporter.skywalking.apache.org:\u0026#34;kafka\u0026#34;labels:swck-java-agent-injected:\u0026#34;true\u0026#34;app:demo1spec:containers:- name:demo1image:ghcr.io/apache/skywalking-swck-spring-demo:v0.0.1command:[\u0026#34;java\u0026#34;]args:[\u0026#34;-jar\u0026#34;,\u0026#34;/app.jar\u0026#34;]ports:- containerPort:8085readinessProbe:httpGet:path:/helloport:8085initialDelaySeconds:3periodSeconds:3failureThreshold:10Then we can get injected pod as below:\nkubectl -n default get pods -l app=demo1 spec:containers:- image:nginx:1.16.1imagePullPolicy:IfNotPresentname:nginx- args:- -jar- /app.jarcommand:- javaenv:- name:JAVA_TOOL_OPTIONSvalue:-javaagent:/sky/agent/skywalking-agent.jar=agent.ignore_suffix=\u0026#39;jpg,.jpeg\u0026#39;,agent.service_name=app,agent.class_cache_mode=MEMORY,agent.sample_n_per_3_secs=6,plugin.mongodb.trace_param=true,plugin.influxdb.trace_influxql=false,plugin.mount=\u0026#39;plugins,activations\u0026#39;image:ghcr.io/apache/skywalking-swck-spring-demo:v0.0.1name:demo1ports:- containerPort:8085protocol:TCPreadinessProbe:failureThreshold:10httpGet:path:/helloport:8085scheme:HTTPinitialDelaySeconds:3periodSeconds:3successThreshold:1timeoutSeconds:1volumeMounts:- mountPath:/sky/agentname:sky-agentinitContainers:- args:- -c- mkdir -p /sky/agent \u0026amp;\u0026amp; cp -r /skywalking/agent/* /sky/agent \u0026amp;\u0026amp; cd /sky/agent/optional-plugins/\u0026amp;\u0026amp;ls | grep -E \u0026#34;trace|webflux|cloud-gateway-2.1.x\u0026#34; | xargs -i cp {} /sky/agent/plugins/\u0026amp;\u0026amp;cd /sky/agent/optional-reporter-plugins/ \u0026amp;\u0026amp; ls | grep -E \u0026#34;kafka\u0026#34; | xargs-i cp {} /sky/agent/plugins/command:- shimage:apache/skywalking-java-agent:8.16.0-java8name:inject-skywalking-agentvolumeMounts:- mountPath:/sky/agentname:sky-agentvolumes:- emptyDir:{}name:sky-agentThen you can get the final agent configuration and the pod as below.\n$ kubectl get javaagent NAME PODSELECTOR SERVICENAME BACKENDSERVICE app-demo1-javaagent app=demo1 app 127.0.0.1:11800 $ kubectl get pod -l app=demo1(the podSelector) NAME READY STATUS RESTARTS AGE demo1-d48b96467-p7zrv 1/1 Running 0 5m25s Get the javaagent\u0026rsquo;s yaml for more datails.\n$ kubectl get javaagent app-demo1-javaagent -o yaml apiVersion: operator.skywalking.apache.org/v1alpha1 kind: JavaAgent metadata: creationTimestamp: \u0026#34;2022-08-16T12:18:53Z\u0026#34; generation: 1 name: app-demo1-javaagent namespace: default ownerReferences: - apiVersion: apps/v1 blockOwnerDeletion: true controller: true kind: ReplicaSet name: demo1-d48b96467 uid: 2b7f1ac4-b459-41cd-8568-ecd4578ca457 resourceVersion: \u0026#34;26187\u0026#34; uid: c2b2f3e2-9442-4465-9423-d24249b2c53b spec: agentConfiguration: agent.class_cache_mode: MEMORY agent.ignore_suffix: \u0026#39;\u0026#39;\u0026#39;jpg,.jpeg\u0026#39;\u0026#39;\u0026#39; agent.sample_n_per_3_secs: \u0026#34;6\u0026#34; agent.service_name: app optional-plugin: trace|webflux|cloud-gateway-2.1.x optional-reporter-plugin: kafka plugin.influxdb.trace_influxql: \u0026#34;false\u0026#34; plugin.mongodb.trace_param: \u0026#34;true\u0026#34; plugin.mount: \u0026#39;\u0026#39;\u0026#39;plugins,activations\u0026#39;\u0026#39;\u0026#39; backendService: 127.0.0.1:11800 podSelector: app=demo1 serviceName: app status: creationTime: \u0026#34;2022-08-16T12:18:53Z\u0026#34; expectedInjectiedNum: 1 lastUpdateTime: \u0026#34;2022-08-16T12:19:18Z\u0026#34; realInjectedNum: 1 ","title":"Java agent injector Usage","url":"/docs/skywalking-swck/latest/examples/java-agent-injector-usage/"},{"content":"Java agent injector Usage In this example, you will learn how to use the java agent injector.\nInstall injector The java agent injector is a component of the operator, so you need to follow Operator installation instrument to install the operator firstly.\nDeployment Example Let\u0026rsquo;s take a demo deployment for example.\n# demo1.yamlapiVersion:apps/v1kind:Deploymentmetadata:name:demo1namespace:defaultspec:selector:matchLabels:app:demo1template:metadata:labels:app:demo1spec:containers:- name:demo1image:ghcr.io/apache/skywalking-swck-spring-demo:v0.0.1command:[\u0026#34;java\u0026#34;]args:[\u0026#34;-jar\u0026#34;,\u0026#34;/app.jar\u0026#34;]ports:- containerPort:8085readinessProbe:httpGet:path:/helloport:8085initialDelaySeconds:3periodSeconds:3failureThreshold:10Enable Injection for Namespace and Deployments/StatefulSets. Firstly, set the injection label in your namespace as below.\nkubectl label namespace default(your namespace) swck-injection=enabled Secondly, set the injection label for your target Deployment/StatefulSet.\nkubectl -n default patch deployment demo1 --patch \u0026#39;{ \u0026#34;spec\u0026#34;: { \u0026#34;template\u0026#34;: { \u0026#34;metadata\u0026#34;: { \u0026#34;labels\u0026#34;: { \u0026#34;swck-java-agent-injected\u0026#34;: \u0026#34;true\u0026#34; } } } } }\u0026#39; Then the pods create by the Deployments/StatefulSets would be recreated with agent injected.\nThe injected pods would be like this:\nspec:containers:- args:- -jar- /app.jarcommand:- javaenv:- name:JAVA_TOOL_OPTIONSvalue:-javaagent:/sky/agent/skywalking-agent.jarimage:ghcr.io/apache/skywalking-swck-spring-demo:v0.0.1name:demo1- mountPath:/sky/agentname:sky-agentinitContainers:- args:- -c- mkdir -p /sky/agent \u0026amp;\u0026amp; cp -r /skywalking/agent/* /sky/agentcommand:- shimage:apache/skywalking-java-agent:8.10.0-java8name:inject-skywalking-agentvolumeMounts:- mountPath:/sky/agentname:sky-agentvolumes:- emptyDir:{}name:sky-agentThen you can get the final agent configuration and the pod as below.\n$ kubectl get javaagent NAME PODSELECTOR SERVICENAME BACKENDSERVICE app-demo1-javaagent app=demo1 demo1 127.0.0.1:11800 $ kubectl get pod -l app=demo1(the podSelector) NAME READY STATUS RESTARTS AGE demo1-5fbb6fcd98-cq5ws 1/1 Running 0 54s Get the javaagent\u0026rsquo;s yaml for more datails.\n$ kubectl get javaagent app-demo1-javaagent -o yaml apiVersion: operator.skywalking.apache.org/v1alpha1 kind: JavaAgent metadata: creationTimestamp: \u0026#34;2022-08-16T12:09:34Z\u0026#34; generation: 1 name: app-demo1-javaagent namespace: default ownerReferences: - apiVersion: apps/v1 blockOwnerDeletion: true controller: true kind: ReplicaSet name: demo1-7fdffc7b95 uid: 417c413f-0cc0-41f9-b6eb-0192eb8c8622 resourceVersion: \u0026#34;25067\u0026#34; uid: 1cdab012-784c-4efb-b5d2-c032eb2fb22a spec: backendService: 127.0.0.1:11800 podSelector: app=demo1 serviceName: Your_ApplicationName status: creationTime: \u0026#34;2022-08-16T12:09:34Z\u0026#34; expectedInjectiedNum: 1 lastUpdateTime: \u0026#34;2022-08-16T12:10:04Z\u0026#34; realInjectedNum: 1 Use SwAgent CR to setup override default configuration Suppose that injection label had been set for Namespace and Deployments/StatefulSets as previous said.\nApply SwAgent CR with correct label selector and container matcher:\n# SwAgent.yamlapiVersion:operator.skywalking.apache.org/v1alpha1kind:SwAgentmetadata:name:swagent-demonamespace:defaultspec:containerMatcher:\u0026#39;\u0026#39;selector:javaSidecar:name:swagent-demoimage:apache/skywalking-java-agent:8.16.0-java8env:- name:\u0026#34;SW_LOGGING_LEVEL\u0026#34;value:\u0026#34;DEBUG\u0026#34;- name:\u0026#34;SW_AGENT_COLLECTOR_BACKEND_SERVICES\u0026#34;value:\u0026#34;skywalking-system-oap:11800\u0026#34;sharedVolumeName:\u0026#34;sky-agent-demo\u0026#34;optionalPlugins:- \u0026#34;webflux\u0026#34;- \u0026#34;cloud-gateway-2.1.x\u0026#34;kubectl -n default apply swagent.yaml You can also get SwAgent CR by:\nkubectl -n default get SwAgent NAME AGE swagent-demo 38s Now the pod is still the old one, because pod could not load the SwAgent config automatically.\nSo you need to recreate pod to load SwAgent config. For the pods created by Deployment/StatefulSet, you can just simply delete the old pod.\n# verify pods to be delete  kubectl -n default get pods -l app=demo1 # delete pods kubectl -n default delete pods -l app=demo1 After the pods recreated, we can get injected pod as below.\nkubectl -n default get pods -l app=demo1 spec:containers:- args:- -jar- /app.jarcommand:- javaenv:- name:JAVA_TOOL_OPTIONSvalue:-javaagent:/sky/agent/skywalking-agent.jar=agent.service_name=demo1,collector.backend_service=skywalking-system-oap.skywalking-system:11800- name:SW_LOGGING_LEVELvalue:DEBUG- name:SW_AGENT_COLLECTOR_BACKEND_SERVICESvalue:skywalking-system-oap.default.svc:11800image:ghcr.io/apache/skywalking-swck-spring-demo:v0.0.1name:demo1- mountPath:/sky/agentname:sky-agent-demoinitContainers:- args:- -c- mkdir -p /sky/agent \u0026amp;\u0026amp; cp -r /skywalking/agent/* /sky/agent \u0026amp;\u0026amp; cd /sky/agent/optional-plugins/\u0026amp;\u0026amp;ls | grep -E \u0026#34;webflux|cloud-gateway-2.1.x\u0026#34; | xargs -i cp {} /sky/agent/plugins/command:- shimage:apache/skywalking-java-agent:8.16.0-java8name:swagent-demovolumeMounts:- mountPath:/sky/agentname:sky-agent-demovolumes:- emptyDir:{}name:sky-agent-demoUse annotation to override sidecar configuration Suppose that injection label had been set for Namespace and Deployments/StatefulSets as previous said.\nThen add agent configuration and sidecar configuration to annotations as below.\n# demo1_anno.yamlapiVersion:apps/v1kind:Deploymentmetadata:name:demo1namespace:defaultspec:selector:matchLabels:app:demo1template:metadata:annotations:strategy.skywalking.apache.org/inject.Container:\u0026#34;demo1\u0026#34;agent.skywalking.apache.org/agent.service_name:\u0026#34;app\u0026#34;agent.skywalking.apache.org/agent.sample_n_per_3_secs:\u0026#34;6\u0026#34;agent.skywalking.apache.org/agent.class_cache_mode:\u0026#34;MEMORY\u0026#34;agent.skywalking.apache.org/agent.ignore_suffix:\u0026#34;\u0026#39;jpg,.jpeg\u0026#39;\u0026#34;plugins.skywalking.apache.org/plugin.mount:\u0026#34;\u0026#39;plugins,activations\u0026#39;\u0026#34;plugins.skywalking.apache.org/plugin.mongodb.trace_param:\u0026#34;true\u0026#34;plugins.skywalking.apache.org/plugin.influxdb.trace_influxql:\u0026#34;false\u0026#34;optional.skywalking.apache.org:\u0026#34;trace|webflux|cloud-gateway-2.1.x\u0026#34;optional-reporter.skywalking.apache.org:\u0026#34;kafka\u0026#34;labels:swck-java-agent-injected:\u0026#34;true\u0026#34;app:demo1spec:containers:- name:demo1image:ghcr.io/apache/skywalking-swck-spring-demo:v0.0.1command:[\u0026#34;java\u0026#34;]args:[\u0026#34;-jar\u0026#34;,\u0026#34;/app.jar\u0026#34;]ports:- containerPort:8085readinessProbe:httpGet:path:/helloport:8085initialDelaySeconds:3periodSeconds:3failureThreshold:10Then we can get injected pod as below:\nkubectl -n default get pods -l app=demo1 spec:containers:- image:nginx:1.16.1imagePullPolicy:IfNotPresentname:nginx- args:- -jar- /app.jarcommand:- javaenv:- name:JAVA_TOOL_OPTIONSvalue:-javaagent:/sky/agent/skywalking-agent.jar=agent.ignore_suffix=\u0026#39;jpg,.jpeg\u0026#39;,agent.service_name=app,agent.class_cache_mode=MEMORY,agent.sample_n_per_3_secs=6,plugin.mongodb.trace_param=true,plugin.influxdb.trace_influxql=false,plugin.mount=\u0026#39;plugins,activations\u0026#39;image:ghcr.io/apache/skywalking-swck-spring-demo:v0.0.1name:demo1ports:- containerPort:8085protocol:TCPreadinessProbe:failureThreshold:10httpGet:path:/helloport:8085scheme:HTTPinitialDelaySeconds:3periodSeconds:3successThreshold:1timeoutSeconds:1volumeMounts:- mountPath:/sky/agentname:sky-agentinitContainers:- args:- -c- mkdir -p /sky/agent \u0026amp;\u0026amp; cp -r /skywalking/agent/* /sky/agent \u0026amp;\u0026amp; cd /sky/agent/optional-plugins/\u0026amp;\u0026amp;ls | grep -E \u0026#34;trace|webflux|cloud-gateway-2.1.x\u0026#34; | xargs -i cp {} /sky/agent/plugins/\u0026amp;\u0026amp;cd /sky/agent/optional-reporter-plugins/ \u0026amp;\u0026amp; ls | grep -E \u0026#34;kafka\u0026#34; | xargs-i cp {} /sky/agent/plugins/command:- shimage:apache/skywalking-java-agent:8.16.0-java8name:inject-skywalking-agentvolumeMounts:- mountPath:/sky/agentname:sky-agentvolumes:- emptyDir:{}name:sky-agentThen you can get the final agent configuration and the pod as below.\n$ kubectl get javaagent NAME PODSELECTOR SERVICENAME BACKENDSERVICE app-demo1-javaagent app=demo1 app 127.0.0.1:11800 $ kubectl get pod -l app=demo1(the podSelector) NAME READY STATUS RESTARTS AGE demo1-d48b96467-p7zrv 1/1 Running 0 5m25s Get the javaagent\u0026rsquo;s yaml for more datails.\n$ kubectl get javaagent app-demo1-javaagent -o yaml apiVersion: operator.skywalking.apache.org/v1alpha1 kind: JavaAgent metadata: creationTimestamp: \u0026#34;2022-08-16T12:18:53Z\u0026#34; generation: 1 name: app-demo1-javaagent namespace: default ownerReferences: - apiVersion: apps/v1 blockOwnerDeletion: true controller: true kind: ReplicaSet name: demo1-d48b96467 uid: 2b7f1ac4-b459-41cd-8568-ecd4578ca457 resourceVersion: \u0026#34;26187\u0026#34; uid: c2b2f3e2-9442-4465-9423-d24249b2c53b spec: agentConfiguration: agent.class_cache_mode: MEMORY agent.ignore_suffix: \u0026#39;\u0026#39;\u0026#39;jpg,.jpeg\u0026#39;\u0026#39;\u0026#39; agent.sample_n_per_3_secs: \u0026#34;6\u0026#34; agent.service_name: app optional-plugin: trace|webflux|cloud-gateway-2.1.x optional-reporter-plugin: kafka plugin.influxdb.trace_influxql: \u0026#34;false\u0026#34; plugin.mongodb.trace_param: \u0026#34;true\u0026#34; plugin.mount: \u0026#39;\u0026#39;\u0026#39;plugins,activations\u0026#39;\u0026#39;\u0026#39; backendService: 127.0.0.1:11800 podSelector: app=demo1 serviceName: app status: creationTime: \u0026#34;2022-08-16T12:18:53Z\u0026#34; expectedInjectiedNum: 1 lastUpdateTime: \u0026#34;2022-08-16T12:19:18Z\u0026#34; realInjectedNum: 1 ","title":"Java agent injector Usage","url":"/docs/skywalking-swck/next/examples/java-agent-injector-usage/"},{"content":"Java agent injector Usage In this example, you will learn how to use the java agent injector.\nInstall injector The java agent injector is a component of the operator, so you need to follow Operator installation instrument to install the operator firstly.\nDeployment Example Let\u0026rsquo;s take a demo deployment for example.\n# demo1.yamlapiVersion:apps/v1kind:Deploymentmetadata:name:demo1namespace:defaultspec:selector:matchLabels:app:demo1template:metadata:labels:app:demo1spec:containers:- name:demo1image:ghcr.io/apache/skywalking-swck-spring-demo:v0.0.1command:[\u0026#34;java\u0026#34;]args:[\u0026#34;-jar\u0026#34;,\u0026#34;/app.jar\u0026#34;]ports:- containerPort:8085readinessProbe:httpGet:path:/helloport:8085initialDelaySeconds:3periodSeconds:3failureThreshold:10Enable Injection for Namespace and Deployments/StatefulSets. Firstly, set the injection label in your namespace as below.\nkubectl label namespace default(your namespace) swck-injection=enabled Secondly, set the injection label for your target Deployment/StatefulSet.\nkubectl -n default patch deployment demo1 --patch \u0026#39;{ \u0026#34;spec\u0026#34;: { \u0026#34;template\u0026#34;: { \u0026#34;metadata\u0026#34;: { \u0026#34;labels\u0026#34;: { \u0026#34;swck-java-agent-injected\u0026#34;: \u0026#34;true\u0026#34; } } } } }\u0026#39; Then the pods create by the Deployments/StatefulSets would be recreated with agent injected.\nThe injected pods would be like this:\nspec:containers:- args:- -jar- /app.jarcommand:- javaenv:- name:JAVA_TOOL_OPTIONSvalue:-javaagent:/sky/agent/skywalking-agent.jarimage:ghcr.io/apache/skywalking-swck-spring-demo:v0.0.1name:demo1- mountPath:/sky/agentname:sky-agentinitContainers:- args:- -c- mkdir -p /sky/agent \u0026amp;\u0026amp; cp -r /skywalking/agent/* /sky/agentcommand:- shimage:apache/skywalking-java-agent:8.10.0-java8name:inject-skywalking-agentvolumeMounts:- mountPath:/sky/agentname:sky-agentvolumes:- emptyDir:{}name:sky-agentThen you can get the final agent configuration and the pod as below.\n$ kubectl get javaagent NAME PODSELECTOR SERVICENAME BACKENDSERVICE app-demo1-javaagent app=demo1 demo1 127.0.0.1:11800 $ kubectl get pod -l app=demo1(the podSelector) NAME READY STATUS RESTARTS AGE demo1-5fbb6fcd98-cq5ws 1/1 Running 0 54s Get the javaagent\u0026rsquo;s yaml for more datails.\n$ kubectl get javaagent app-demo1-javaagent -o yaml apiVersion: operator.skywalking.apache.org/v1alpha1 kind: JavaAgent metadata: creationTimestamp: \u0026#34;2022-08-16T12:09:34Z\u0026#34; generation: 1 name: app-demo1-javaagent namespace: default ownerReferences: - apiVersion: apps/v1 blockOwnerDeletion: true controller: true kind: ReplicaSet name: demo1-7fdffc7b95 uid: 417c413f-0cc0-41f9-b6eb-0192eb8c8622 resourceVersion: \u0026#34;25067\u0026#34; uid: 1cdab012-784c-4efb-b5d2-c032eb2fb22a spec: backendService: 127.0.0.1:11800 podSelector: app=demo1 serviceName: Your_ApplicationName status: creationTime: \u0026#34;2022-08-16T12:09:34Z\u0026#34; expectedInjectiedNum: 1 lastUpdateTime: \u0026#34;2022-08-16T12:10:04Z\u0026#34; realInjectedNum: 1 Use SwAgent CR to setup override default configuration Suppose that injection label had been set for Namespace and Deployments/StatefulSets as previous said.\nApply SwAgent CR with correct label selector and container matcher:\n# SwAgent.yamlapiVersion:operator.skywalking.apache.org/v1alpha1kind:SwAgentmetadata:name:swagent-demonamespace:defaultspec:containerMatcher:\u0026#39;\u0026#39;selector:javaSidecar:name:swagent-demoimage:apache/skywalking-java-agent:8.16.0-java8env:- name:\u0026#34;SW_LOGGING_LEVEL\u0026#34;value:\u0026#34;DEBUG\u0026#34;- name:\u0026#34;SW_AGENT_COLLECTOR_BACKEND_SERVICES\u0026#34;value:\u0026#34;skywalking-system-oap:11800\u0026#34;sharedVolumeName:\u0026#34;sky-agent-demo\u0026#34;optionalPlugins:- \u0026#34;webflux\u0026#34;- \u0026#34;cloud-gateway-2.1.x\u0026#34;kubectl -n default apply swagent.yaml You can also get SwAgent CR by:\nkubectl -n default get SwAgent NAME AGE swagent-demo 38s Now the pod is still the old one, because pod could not load the SwAgent config automatically.\nSo you need to recreate pod to load SwAgent config. For the pods created by Deployment/StatefulSet, you can just simply delete the old pod.\n# verify pods to be delete  kubectl -n default get pods -l app=demo1 # delete pods kubectl -n default delete pods -l app=demo1 After the pods recreated, we can get injected pod as below.\nkubectl -n default get pods -l app=demo1 spec:containers:- args:- -jar- /app.jarcommand:- javaenv:- name:JAVA_TOOL_OPTIONSvalue:-javaagent:/sky/agent/skywalking-agent.jar=agent.service_name=demo1,collector.backend_service=skywalking-system-oap.skywalking-system:11800- name:SW_LOGGING_LEVELvalue:DEBUG- name:SW_AGENT_COLLECTOR_BACKEND_SERVICESvalue:skywalking-system-oap.default.svc:11800image:ghcr.io/apache/skywalking-swck-spring-demo:v0.0.1name:demo1- mountPath:/sky/agentname:sky-agent-demoinitContainers:- args:- -c- mkdir -p /sky/agent \u0026amp;\u0026amp; cp -r /skywalking/agent/* /sky/agent \u0026amp;\u0026amp; cd /sky/agent/optional-plugins/\u0026amp;\u0026amp;ls | grep -E \u0026#34;webflux|cloud-gateway-2.1.x\u0026#34; | xargs -i cp {} /sky/agent/plugins/command:- shimage:apache/skywalking-java-agent:8.16.0-java8name:swagent-demovolumeMounts:- mountPath:/sky/agentname:sky-agent-demovolumes:- emptyDir:{}name:sky-agent-demoUse annotation to override sidecar configuration Suppose that injection label had been set for Namespace and Deployments/StatefulSets as previous said.\nThen add agent configuration and sidecar configuration to annotations as below.\n# demo1_anno.yamlapiVersion:apps/v1kind:Deploymentmetadata:name:demo1namespace:defaultspec:selector:matchLabels:app:demo1template:metadata:annotations:strategy.skywalking.apache.org/inject.Container:\u0026#34;demo1\u0026#34;agent.skywalking.apache.org/agent.service_name:\u0026#34;app\u0026#34;agent.skywalking.apache.org/agent.sample_n_per_3_secs:\u0026#34;6\u0026#34;agent.skywalking.apache.org/agent.class_cache_mode:\u0026#34;MEMORY\u0026#34;agent.skywalking.apache.org/agent.ignore_suffix:\u0026#34;\u0026#39;jpg,.jpeg\u0026#39;\u0026#34;plugins.skywalking.apache.org/plugin.mount:\u0026#34;\u0026#39;plugins,activations\u0026#39;\u0026#34;plugins.skywalking.apache.org/plugin.mongodb.trace_param:\u0026#34;true\u0026#34;plugins.skywalking.apache.org/plugin.influxdb.trace_influxql:\u0026#34;false\u0026#34;optional.skywalking.apache.org:\u0026#34;trace|webflux|cloud-gateway-2.1.x\u0026#34;optional-reporter.skywalking.apache.org:\u0026#34;kafka\u0026#34;labels:swck-java-agent-injected:\u0026#34;true\u0026#34;app:demo1spec:containers:- name:demo1image:ghcr.io/apache/skywalking-swck-spring-demo:v0.0.1command:[\u0026#34;java\u0026#34;]args:[\u0026#34;-jar\u0026#34;,\u0026#34;/app.jar\u0026#34;]ports:- containerPort:8085readinessProbe:httpGet:path:/helloport:8085initialDelaySeconds:3periodSeconds:3failureThreshold:10Then we can get injected pod as below:\nkubectl -n default get pods -l app=demo1 spec:containers:- image:nginx:1.16.1imagePullPolicy:IfNotPresentname:nginx- args:- -jar- /app.jarcommand:- javaenv:- name:JAVA_TOOL_OPTIONSvalue:-javaagent:/sky/agent/skywalking-agent.jar=agent.ignore_suffix=\u0026#39;jpg,.jpeg\u0026#39;,agent.service_name=app,agent.class_cache_mode=MEMORY,agent.sample_n_per_3_secs=6,plugin.mongodb.trace_param=true,plugin.influxdb.trace_influxql=false,plugin.mount=\u0026#39;plugins,activations\u0026#39;image:ghcr.io/apache/skywalking-swck-spring-demo:v0.0.1name:demo1ports:- containerPort:8085protocol:TCPreadinessProbe:failureThreshold:10httpGet:path:/helloport:8085scheme:HTTPinitialDelaySeconds:3periodSeconds:3successThreshold:1timeoutSeconds:1volumeMounts:- mountPath:/sky/agentname:sky-agentinitContainers:- args:- -c- mkdir -p /sky/agent \u0026amp;\u0026amp; cp -r /skywalking/agent/* /sky/agent \u0026amp;\u0026amp; cd /sky/agent/optional-plugins/\u0026amp;\u0026amp;ls | grep -E \u0026#34;trace|webflux|cloud-gateway-2.1.x\u0026#34; | xargs -i cp {} /sky/agent/plugins/\u0026amp;\u0026amp;cd /sky/agent/optional-reporter-plugins/ \u0026amp;\u0026amp; ls | grep -E \u0026#34;kafka\u0026#34; | xargs-i cp {} /sky/agent/plugins/command:- shimage:apache/skywalking-java-agent:8.16.0-java8name:inject-skywalking-agentvolumeMounts:- mountPath:/sky/agentname:sky-agentvolumes:- emptyDir:{}name:sky-agentThen you can get the final agent configuration and the pod as below.\n$ kubectl get javaagent NAME PODSELECTOR SERVICENAME BACKENDSERVICE app-demo1-javaagent app=demo1 app 127.0.0.1:11800 $ kubectl get pod -l app=demo1(the podSelector) NAME READY STATUS RESTARTS AGE demo1-d48b96467-p7zrv 1/1 Running 0 5m25s Get the javaagent\u0026rsquo;s yaml for more datails.\n$ kubectl get javaagent app-demo1-javaagent -o yaml apiVersion: operator.skywalking.apache.org/v1alpha1 kind: JavaAgent metadata: creationTimestamp: \u0026#34;2022-08-16T12:18:53Z\u0026#34; generation: 1 name: app-demo1-javaagent namespace: default ownerReferences: - apiVersion: apps/v1 blockOwnerDeletion: true controller: true kind: ReplicaSet name: demo1-d48b96467 uid: 2b7f1ac4-b459-41cd-8568-ecd4578ca457 resourceVersion: \u0026#34;26187\u0026#34; uid: c2b2f3e2-9442-4465-9423-d24249b2c53b spec: agentConfiguration: agent.class_cache_mode: MEMORY agent.ignore_suffix: \u0026#39;\u0026#39;\u0026#39;jpg,.jpeg\u0026#39;\u0026#39;\u0026#39; agent.sample_n_per_3_secs: \u0026#34;6\u0026#34; agent.service_name: app optional-plugin: trace|webflux|cloud-gateway-2.1.x optional-reporter-plugin: kafka plugin.influxdb.trace_influxql: \u0026#34;false\u0026#34; plugin.mongodb.trace_param: \u0026#34;true\u0026#34; plugin.mount: \u0026#39;\u0026#39;\u0026#39;plugins,activations\u0026#39;\u0026#39;\u0026#39; backendService: 127.0.0.1:11800 podSelector: app=demo1 serviceName: app status: creationTime: \u0026#34;2022-08-16T12:18:53Z\u0026#34; expectedInjectiedNum: 1 lastUpdateTime: \u0026#34;2022-08-16T12:19:18Z\u0026#34; realInjectedNum: 1 ","title":"Java agent injector Usage","url":"/docs/skywalking-swck/v0.9.0/examples/java-agent-injector-usage/"},{"content":"Java Microbenchmark Harness (JMH) JMH is a Java harness for building, running, and analysing nano/micro/milli/macro benchmarks written in Java and other languages targeting the JVM.\nWe have a module called microbench which performs a series of micro-benchmark tests for JMH testing. Make new JMH tests extend the org.apache.skywalking.oap.server.microbench.base.AbstractMicrobenchmark to customize runtime conditions (Measurement, Fork, Warmup, etc.).\nYou can build the jar with command ./mvnw -Dmaven.test.skip -DskipTests -pl :microbench package -am -Pbenchmark.\nJMH tests could run as a normal unit test. And they could run as an independent uber jar via java -jar benchmark.jar for all benchmarks, or via java -jar /benchmarks.jar exampleClassName for a specific test.\nOutput test results in JSON format, you can add -rf json like java -jar benchmarks.jar -rf json, if you run through the IDE, you can configure the -DperfReportDir=savePath parameter to set the JMH report result save path, a report results in JSON format will be generated when the run ends.\nMore information about JMH can be found here: jmh docs.\n","title":"Java Microbenchmark Harness (JMH)","url":"/docs/main/latest/en/guides/benchmark/"},{"content":"Java Microbenchmark Harness (JMH) JMH is a Java harness for building, running, and analysing nano/micro/milli/macro benchmarks written in Java and other languages targeting the JVM.\nWe have a module called microbench which performs a series of micro-benchmark tests for JMH testing. Make new JMH tests extend the org.apache.skywalking.oap.server.microbench.base.AbstractMicrobenchmark to customize runtime conditions (Measurement, Fork, Warmup, etc.).\nYou can build the jar with command ./mvnw -Dmaven.test.skip -DskipTests -pl :microbench package -am -Pbenchmark.\nJMH tests could run as a normal unit test. And they could run as an independent uber jar via java -jar benchmark.jar for all benchmarks, or via java -jar /benchmarks.jar exampleClassName for a specific test.\nOutput test results in JSON format, you can add -rf json like java -jar benchmarks.jar -rf json, if you run through the IDE, you can configure the -DperfReportDir=savePath parameter to set the JMH report result save path, a report results in JSON format will be generated when the run ends.\nMore information about JMH can be found here: jmh docs.\n","title":"Java Microbenchmark Harness (JMH)","url":"/docs/main/next/en/guides/benchmark/"},{"content":"Java Microbenchmark Harness (JMH) JMH is a Java harness for building, running, and analysing nano/micro/milli/macro benchmarks written in Java and other languages targeting the JVM.\nWe have a module called microbench which performs a series of micro-benchmark tests for JMH testing. Make new JMH tests extend the org.apache.skywalking.oap.server.microbench.base.AbstractMicrobenchmark to customize runtime conditions (Measurement, Fork, Warmup, etc.).\nYou can build the jar with command ./mvnw -Dmaven.test.skip -DskipTests -pl :microbench package -am -Pbenchmark.\nJMH tests could run as a normal unit test. And they could run as an independent uber jar via java -jar benchmark.jar for all benchmarks, or via java -jar /benchmarks.jar exampleClassName for a specific test.\nOutput test results in JSON format, you can add -rf json like java -jar benchmarks.jar -rf json, if you run through the IDE, you can configure the -DperfReportDir=savePath parameter to set the JMH report result save path, a report results in JSON format will be generated when the run ends.\nMore information about JMH can be found here: jmh docs.\n","title":"Java Microbenchmark Harness (JMH)","url":"/docs/main/v9.6.0/en/guides/benchmark/"},{"content":"Java Microbenchmark Harness (JMH) JMH is a Java harness for building, running, and analysing nano/micro/milli/macro benchmarks written in Java and other languages targeting the JVM.\nWe have a module called microbench which performs a series of micro-benchmark tests for JMH testing. Make new JMH tests extend the org.apache.skywalking.oap.server.microbench.base.AbstractMicrobenchmark to customize runtime conditions (Measurement, Fork, Warmup, etc.).\nYou can build the jar with command ./mvnw -Dmaven.test.skip -DskipTests -pl :microbench package -am -Pbenchmark.\nJMH tests could run as a normal unit test. And they could run as an independent uber jar via java -jar benchmark.jar for all benchmarks, or via java -jar /benchmarks.jar exampleClassName for a specific test.\nOutput test results in JSON format, you can add -rf json like java -jar benchmarks.jar -rf json, if you run through the IDE, you can configure the -DperfReportDir=savePath parameter to set the JMH report result save path, a report results in JSON format will be generated when the run ends.\nMore information about JMH can be found here: jmh docs.\n","title":"Java Microbenchmark Harness (JMH)","url":"/docs/main/v9.7.0/en/guides/benchmark/"},{"content":"JavaAgent Introduction To see the final injected agent\u0026rsquo;s configuration, we define a CustomDefinitionResource called JavaAgent.\nWhen the pod is injected, the pod will be labeled with sidecar.skywalking.apache.org/succeed, then the controller will watch the specific pod labeled with sidecar.skywalking.apache.org/succeed. After the pod is created, the controller will create JavaAgent(custom resource), which contains the final agent configuration as below.\nSpec    Field Name Description     podSelector We hope users can use workloads to create pods, the podSelector is the selector label of workload.   serviceName serviceName is an important attribute that needs to be printed.   backendService backendService is an important attribute that needs to be printed.   agentConfiguration agentConfiguration contains serviceName、backendService and covered agent configuration, other default configurations will not be displayed, please see agent.config for details.    Status    Field Name Description     creationTime The creation time of the JavaAgent   lastUpdateTime The last Update time of the JavaAgent   expectedInjectiedNum The number of the pod that need to be injected   realInjectedNum The real number of injected pods.    Demo This demo shows the usage of javaagent. If you want to see the complete process, please see java-agent-injector-usagefor details.\nWhen we use java-agent-injector, we can get custom resources as below.\n$ kubectl get javaagent -A NAMESPACE NAME PODSELECTOR SERVICENAME BACKENDSERVICE default app-demo1-javaagent app=demo1 Your_ApplicationName 127.0.0.1:11800 default app-demo2-javaagent app=demo2 Your_ApplicationName 127.0.0.1:11800 $ kubectl get pod -l app=demo1 NAME READY STATUS RESTARTS AGE demo1-bb97b8b4d-bkwm4 1/1 Running 0 28s demo1-bb97b8b4d-wxgs2 1/1 Running 0 28s $ kubectl get pod -l app=demo2 NAME READY STATUS RESTARTS AGE app2-0 1/1 Running 0 27s app2-1 1/1 Running 0 25s app2-2 1/1 Running 0 23s If we want to see more information, we can get the specific javaagent\u0026rsquo;s yaml as below.\n$ kubectl get javaagent app-demo1-javaagent -oyaml apiVersion: operator.skywalking.apache.org/v1alpha1 kind: JavaAgent metadata: creationTimestamp: \u0026quot;2021-10-14T07:07:12Z\u0026quot; generation: 1 name: app-demo1-javaagent namespace: default ownerReferences: - apiVersion: apps/v1 blockOwnerDeletion: true controller: true kind: ReplicaSet name: demo1-bb97b8b4d uid: c712924f-4652-4c07-8332-b3938ad72392 resourceVersion: \u0026quot;330808\u0026quot; selfLink: /apis/operator.skywalking.apache.org/v1alpha1/namespaces/default/javaagents/app-demo1-javaagent uid: 9350338f-15a5-4832-84d1-530f8d0e1c3b spec: agentConfiguration: agent.namespace: default-namespace agent.service_name: Your_ApplicationName collector.backend_service: 127.0.0.1:11800 backendService: 127.0.0.1:11800 podSelector: app=demo1 serviceName: Your_ApplicationName status: creationTime: \u0026quot;2021-10-14T07:07:12Z\u0026quot; expectedInjectiedNum: 2 lastUpdateTime: \u0026quot;2021-10-14T07:07:14Z\u0026quot; realInjectedNum: 2 ","title":"JavaAgent Introduction","url":"/docs/skywalking-swck/latest/javaagent/"},{"content":"JavaAgent Introduction To see the final injected agent\u0026rsquo;s configuration, we define a CustomDefinitionResource called JavaAgent.\nWhen the pod is injected, the pod will be labeled with sidecar.skywalking.apache.org/succeed, then the controller will watch the specific pod labeled with sidecar.skywalking.apache.org/succeed. After the pod is created, the controller will create JavaAgent(custom resource), which contains the final agent configuration as below.\nSpec    Field Name Description     podSelector We hope users can use workloads to create pods, the podSelector is the selector label of workload.   serviceName serviceName is an important attribute that needs to be printed.   backendService backendService is an important attribute that needs to be printed.   agentConfiguration agentConfiguration contains serviceName、backendService and covered agent configuration, other default configurations will not be displayed, please see agent.config for details.    Status    Field Name Description     creationTime The creation time of the JavaAgent   lastUpdateTime The last Update time of the JavaAgent   expectedInjectiedNum The number of the pod that need to be injected   realInjectedNum The real number of injected pods.    Demo This demo shows the usage of javaagent. If you want to see the complete process, please see java-agent-injector-usagefor details.\nWhen we use java-agent-injector, we can get custom resources as below.\n$ kubectl get javaagent -A NAMESPACE NAME PODSELECTOR SERVICENAME BACKENDSERVICE default app-demo1-javaagent app=demo1 Your_ApplicationName 127.0.0.1:11800 default app-demo2-javaagent app=demo2 Your_ApplicationName 127.0.0.1:11800 $ kubectl get pod -l app=demo1 NAME READY STATUS RESTARTS AGE demo1-bb97b8b4d-bkwm4 1/1 Running 0 28s demo1-bb97b8b4d-wxgs2 1/1 Running 0 28s $ kubectl get pod -l app=demo2 NAME READY STATUS RESTARTS AGE app2-0 1/1 Running 0 27s app2-1 1/1 Running 0 25s app2-2 1/1 Running 0 23s If we want to see more information, we can get the specific javaagent\u0026rsquo;s yaml as below.\n$ kubectl get javaagent app-demo1-javaagent -oyaml apiVersion: operator.skywalking.apache.org/v1alpha1 kind: JavaAgent metadata: creationTimestamp: \u0026quot;2021-10-14T07:07:12Z\u0026quot; generation: 1 name: app-demo1-javaagent namespace: default ownerReferences: - apiVersion: apps/v1 blockOwnerDeletion: true controller: true kind: ReplicaSet name: demo1-bb97b8b4d uid: c712924f-4652-4c07-8332-b3938ad72392 resourceVersion: \u0026quot;330808\u0026quot; selfLink: /apis/operator.skywalking.apache.org/v1alpha1/namespaces/default/javaagents/app-demo1-javaagent uid: 9350338f-15a5-4832-84d1-530f8d0e1c3b spec: agentConfiguration: agent.namespace: default-namespace agent.service_name: Your_ApplicationName collector.backend_service: 127.0.0.1:11800 backendService: 127.0.0.1:11800 podSelector: app=demo1 serviceName: Your_ApplicationName status: creationTime: \u0026quot;2021-10-14T07:07:12Z\u0026quot; expectedInjectiedNum: 2 lastUpdateTime: \u0026quot;2021-10-14T07:07:14Z\u0026quot; realInjectedNum: 2 ","title":"JavaAgent Introduction","url":"/docs/skywalking-swck/next/javaagent/"},{"content":"JavaAgent Introduction To see the final injected agent\u0026rsquo;s configuration, we define a CustomDefinitionResource called JavaAgent.\nWhen the pod is injected, the pod will be labeled with sidecar.skywalking.apache.org/succeed, then the controller will watch the specific pod labeled with sidecar.skywalking.apache.org/succeed. After the pod is created, the controller will create JavaAgent(custom resource), which contains the final agent configuration as below.\nSpec    Field Name Description     podSelector We hope users can use workloads to create pods, the podSelector is the selector label of workload.   serviceName serviceName is an important attribute that needs to be printed.   backendService backendService is an important attribute that needs to be printed.   agentConfiguration agentConfiguration contains serviceName、backendService and covered agent configuration, other default configurations will not be displayed, please see agent.config for details.    Status    Field Name Description     creationTime The creation time of the JavaAgent   lastUpdateTime The last Update time of the JavaAgent   expectedInjectiedNum The number of the pod that need to be injected   realInjectedNum The real number of injected pods.    Demo This demo shows the usage of javaagent. If you want to see the complete process, please see java-agent-injector-usagefor details.\nWhen we use java-agent-injector, we can get custom resources as below.\n$ kubectl get javaagent -A NAMESPACE NAME PODSELECTOR SERVICENAME BACKENDSERVICE default app-demo1-javaagent app=demo1 Your_ApplicationName 127.0.0.1:11800 default app-demo2-javaagent app=demo2 Your_ApplicationName 127.0.0.1:11800 $ kubectl get pod -l app=demo1 NAME READY STATUS RESTARTS AGE demo1-bb97b8b4d-bkwm4 1/1 Running 0 28s demo1-bb97b8b4d-wxgs2 1/1 Running 0 28s $ kubectl get pod -l app=demo2 NAME READY STATUS RESTARTS AGE app2-0 1/1 Running 0 27s app2-1 1/1 Running 0 25s app2-2 1/1 Running 0 23s If we want to see more information, we can get the specific javaagent\u0026rsquo;s yaml as below.\n$ kubectl get javaagent app-demo1-javaagent -oyaml apiVersion: operator.skywalking.apache.org/v1alpha1 kind: JavaAgent metadata: creationTimestamp: \u0026quot;2021-10-14T07:07:12Z\u0026quot; generation: 1 name: app-demo1-javaagent namespace: default ownerReferences: - apiVersion: apps/v1 blockOwnerDeletion: true controller: true kind: ReplicaSet name: demo1-bb97b8b4d uid: c712924f-4652-4c07-8332-b3938ad72392 resourceVersion: \u0026quot;330808\u0026quot; selfLink: /apis/operator.skywalking.apache.org/v1alpha1/namespaces/default/javaagents/app-demo1-javaagent uid: 9350338f-15a5-4832-84d1-530f8d0e1c3b spec: agentConfiguration: agent.namespace: default-namespace agent.service_name: Your_ApplicationName collector.backend_service: 127.0.0.1:11800 backendService: 127.0.0.1:11800 podSelector: app=demo1 serviceName: Your_ApplicationName status: creationTime: \u0026quot;2021-10-14T07:07:12Z\u0026quot; expectedInjectiedNum: 2 lastUpdateTime: \u0026quot;2021-10-14T07:07:14Z\u0026quot; realInjectedNum: 2 ","title":"JavaAgent Introduction","url":"/docs/skywalking-swck/v0.9.0/javaagent/"},{"content":"JVM Metrics APIs Notice, SkyWalking has provided general available meter APIs for all kinds of metrics. This API is still supported for forward compatibility only. SkyWalking community would not accept new language specific metric APIs anymore.\nUplink the JVM metrics, including PermSize, HeapSize, CPU, Memory, etc., every second.\ngRPC service define\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.language.agent.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/language/agent/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the JVM metrics report service. service JVMMetricReportService { rpc collect (JVMMetricCollection) returns (Commands) { }}message JVMMetricCollection { repeated JVMMetric metrics = 1; string service = 2; string serviceInstance = 3;}message JVMMetric { int64 time = 1; CPU cpu = 2; repeated Memory memory = 3; repeated MemoryPool memoryPool = 4; repeated GC gc = 5; Thread thread = 6; Class clazz = 7;}message Memory { bool isHeap = 1; int64 init = 2; int64 max = 3; int64 used = 4; int64 committed = 5;}message MemoryPool { PoolType type = 1; int64 init = 2; int64 max = 3; int64 used = 4; int64 committed = 5;}enum PoolType { CODE_CACHE_USAGE = 0; NEWGEN_USAGE = 1; OLDGEN_USAGE = 2; SURVIVOR_USAGE = 3; PERMGEN_USAGE = 4; METASPACE_USAGE = 5; ZHEAP_USAGE = 6; COMPRESSED_CLASS_SPACE_USAGE = 7; CODEHEAP_NON_NMETHODS_USAGE = 8; CODEHEAP_PROFILED_NMETHODS_USAGE = 9; CODEHEAP_NON_PROFILED_NMETHODS_USAGE = 10;}message GC { GCPhase phase = 1; int64 count = 2; int64 time = 3;}enum GCPhase { NEW = 0; OLD = 1; NORMAL = 2; // The type of GC doesn\u0026#39;t have new and old phases, like Z Garbage Collector (ZGC) }// See: https://docs.oracle.com/javase/8/docs/api/java/lang/management/ThreadMXBean.html message Thread { int64 liveCount = 1; int64 daemonCount = 2; int64 peakCount = 3; int64 runnableStateThreadCount = 4; int64 blockedStateThreadCount = 5; int64 waitingStateThreadCount = 6; int64 timedWaitingStateThreadCount = 7;}// See: https://docs.oracle.com/javase/8/docs/api/java/lang/management/ClassLoadingMXBean.html message Class { int64 loadedClassCount = 1; int64 totalUnloadedClassCount = 2; int64 totalLoadedClassCount = 3;}","title":"JVM Metrics APIs","url":"/docs/main/latest/en/api/jvm-protocol/"},{"content":"JVM Metrics APIs Notice, SkyWalking has provided general available meter APIs for all kinds of metrics. This API is still supported for forward compatibility only. SkyWalking community would not accept new language specific metric APIs anymore.\nUplink the JVM metrics, including PermSize, HeapSize, CPU, Memory, etc., every second.\ngRPC service define\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.language.agent.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/language/agent/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the JVM metrics report service. service JVMMetricReportService { rpc collect (JVMMetricCollection) returns (Commands) { }}message JVMMetricCollection { repeated JVMMetric metrics = 1; string service = 2; string serviceInstance = 3;}message JVMMetric { int64 time = 1; CPU cpu = 2; repeated Memory memory = 3; repeated MemoryPool memoryPool = 4; repeated GC gc = 5; Thread thread = 6; Class clazz = 7;}message Memory { bool isHeap = 1; int64 init = 2; int64 max = 3; int64 used = 4; int64 committed = 5;}message MemoryPool { PoolType type = 1; int64 init = 2; int64 max = 3; int64 used = 4; int64 committed = 5;}enum PoolType { CODE_CACHE_USAGE = 0; NEWGEN_USAGE = 1; OLDGEN_USAGE = 2; SURVIVOR_USAGE = 3; PERMGEN_USAGE = 4; METASPACE_USAGE = 5; ZHEAP_USAGE = 6; COMPRESSED_CLASS_SPACE_USAGE = 7; CODEHEAP_NON_NMETHODS_USAGE = 8; CODEHEAP_PROFILED_NMETHODS_USAGE = 9; CODEHEAP_NON_PROFILED_NMETHODS_USAGE = 10;}message GC { GCPhase phase = 1; int64 count = 2; int64 time = 3;}enum GCPhase { NEW = 0; OLD = 1; NORMAL = 2; // The type of GC doesn\u0026#39;t have new and old phases, like Z Garbage Collector (ZGC) }// See: https://docs.oracle.com/javase/8/docs/api/java/lang/management/ThreadMXBean.html message Thread { int64 liveCount = 1; int64 daemonCount = 2; int64 peakCount = 3; int64 runnableStateThreadCount = 4; int64 blockedStateThreadCount = 5; int64 waitingStateThreadCount = 6; int64 timedWaitingStateThreadCount = 7;}// See: https://docs.oracle.com/javase/8/docs/api/java/lang/management/ClassLoadingMXBean.html message Class { int64 loadedClassCount = 1; int64 totalUnloadedClassCount = 2; int64 totalLoadedClassCount = 3;}","title":"JVM Metrics APIs","url":"/docs/main/next/en/api/jvm-protocol/"},{"content":"JVM Metrics APIs Notice, SkyWalking has provided general available meter APIs for all kinds of metrics. This API is still supported for forward compatibility only. SkyWalking community would not accept new language specific metric APIs anymore.\nUplink the JVM metrics, including PermSize, HeapSize, CPU, Memory, etc., every second.\ngRPC service define\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.language.agent.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/language/agent/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the JVM metrics report service. service JVMMetricReportService { rpc collect (JVMMetricCollection) returns (Commands) { }}message JVMMetricCollection { repeated JVMMetric metrics = 1; string service = 2; string serviceInstance = 3;}message JVMMetric { int64 time = 1; CPU cpu = 2; repeated Memory memory = 3; repeated MemoryPool memoryPool = 4; repeated GC gc = 5; Thread thread = 6; Class clazz = 7;}message Memory { bool isHeap = 1; int64 init = 2; int64 max = 3; int64 used = 4; int64 committed = 5;}message MemoryPool { PoolType type = 1; int64 init = 2; int64 max = 3; int64 used = 4; int64 committed = 5;}enum PoolType { CODE_CACHE_USAGE = 0; NEWGEN_USAGE = 1; OLDGEN_USAGE = 2; SURVIVOR_USAGE = 3; PERMGEN_USAGE = 4; METASPACE_USAGE = 5;}message GC { GCPhase phase = 1; int64 count = 2; int64 time = 3;}enum GCPhase { NEW = 0; OLD = 1; NORMAL = 2; // The type of GC doesn\u0026#39;t have new and old phases, like Z Garbage Collector (ZGC) }// See: https://docs.oracle.com/javase/8/docs/api/java/lang/management/ThreadMXBean.html message Thread { int64 liveCount = 1; int64 daemonCount = 2; int64 peakCount = 3; int64 runnableStateThreadCount = 4; int64 blockedStateThreadCount = 5; int64 waitingStateThreadCount = 6; int64 timedWaitingStateThreadCount = 7;}// See: https://docs.oracle.com/javase/8/docs/api/java/lang/management/ClassLoadingMXBean.html message Class { int64 loadedClassCount = 1; int64 totalUnloadedClassCount = 2; int64 totalLoadedClassCount = 3;}","title":"JVM Metrics APIs","url":"/docs/main/v9.4.0/en/api/jvm-protocol/"},{"content":"JVM Metrics APIs Notice, SkyWalking has provided general available meter APIs for all kinds of metrics. This API is still supported for forward compatibility only. SkyWalking community would not accept new language specific metric APIs anymore.\nUplink the JVM metrics, including PermSize, HeapSize, CPU, Memory, etc., every second.\ngRPC service define\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.language.agent.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/language/agent/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the JVM metrics report service. service JVMMetricReportService { rpc collect (JVMMetricCollection) returns (Commands) { }}message JVMMetricCollection { repeated JVMMetric metrics = 1; string service = 2; string serviceInstance = 3;}message JVMMetric { int64 time = 1; CPU cpu = 2; repeated Memory memory = 3; repeated MemoryPool memoryPool = 4; repeated GC gc = 5; Thread thread = 6; Class clazz = 7;}message Memory { bool isHeap = 1; int64 init = 2; int64 max = 3; int64 used = 4; int64 committed = 5;}message MemoryPool { PoolType type = 1; int64 init = 2; int64 max = 3; int64 used = 4; int64 committed = 5;}enum PoolType { CODE_CACHE_USAGE = 0; NEWGEN_USAGE = 1; OLDGEN_USAGE = 2; SURVIVOR_USAGE = 3; PERMGEN_USAGE = 4; METASPACE_USAGE = 5;}message GC { GCPhase phase = 1; int64 count = 2; int64 time = 3;}enum GCPhase { NEW = 0; OLD = 1; NORMAL = 2; // The type of GC doesn\u0026#39;t have new and old phases, like Z Garbage Collector (ZGC) }// See: https://docs.oracle.com/javase/8/docs/api/java/lang/management/ThreadMXBean.html message Thread { int64 liveCount = 1; int64 daemonCount = 2; int64 peakCount = 3; int64 runnableStateThreadCount = 4; int64 blockedStateThreadCount = 5; int64 waitingStateThreadCount = 6; int64 timedWaitingStateThreadCount = 7;}// See: https://docs.oracle.com/javase/8/docs/api/java/lang/management/ClassLoadingMXBean.html message Class { int64 loadedClassCount = 1; int64 totalUnloadedClassCount = 2; int64 totalLoadedClassCount = 3;}","title":"JVM Metrics APIs","url":"/docs/main/v9.5.0/en/api/jvm-protocol/"},{"content":"JVM Metrics APIs Notice, SkyWalking has provided general available meter APIs for all kinds of metrics. This API is still supported for forward compatibility only. SkyWalking community would not accept new language specific metric APIs anymore.\nUplink the JVM metrics, including PermSize, HeapSize, CPU, Memory, etc., every second.\ngRPC service define\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.language.agent.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/language/agent/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the JVM metrics report service. service JVMMetricReportService { rpc collect (JVMMetricCollection) returns (Commands) { }}message JVMMetricCollection { repeated JVMMetric metrics = 1; string service = 2; string serviceInstance = 3;}message JVMMetric { int64 time = 1; CPU cpu = 2; repeated Memory memory = 3; repeated MemoryPool memoryPool = 4; repeated GC gc = 5; Thread thread = 6; Class clazz = 7;}message Memory { bool isHeap = 1; int64 init = 2; int64 max = 3; int64 used = 4; int64 committed = 5;}message MemoryPool { PoolType type = 1; int64 init = 2; int64 max = 3; int64 used = 4; int64 committed = 5;}enum PoolType { CODE_CACHE_USAGE = 0; NEWGEN_USAGE = 1; OLDGEN_USAGE = 2; SURVIVOR_USAGE = 3; PERMGEN_USAGE = 4; METASPACE_USAGE = 5;}message GC { GCPhase phase = 1; int64 count = 2; int64 time = 3;}enum GCPhase { NEW = 0; OLD = 1; NORMAL = 2; // The type of GC doesn\u0026#39;t have new and old phases, like Z Garbage Collector (ZGC) }// See: https://docs.oracle.com/javase/8/docs/api/java/lang/management/ThreadMXBean.html message Thread { int64 liveCount = 1; int64 daemonCount = 2; int64 peakCount = 3; int64 runnableStateThreadCount = 4; int64 blockedStateThreadCount = 5; int64 waitingStateThreadCount = 6; int64 timedWaitingStateThreadCount = 7;}// See: https://docs.oracle.com/javase/8/docs/api/java/lang/management/ClassLoadingMXBean.html message Class { int64 loadedClassCount = 1; int64 totalUnloadedClassCount = 2; int64 totalLoadedClassCount = 3;}","title":"JVM Metrics APIs","url":"/docs/main/v9.6.0/en/api/jvm-protocol/"},{"content":"JVM Metrics APIs Notice, SkyWalking has provided general available meter APIs for all kinds of metrics. This API is still supported for forward compatibility only. SkyWalking community would not accept new language specific metric APIs anymore.\nUplink the JVM metrics, including PermSize, HeapSize, CPU, Memory, etc., every second.\ngRPC service define\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.language.agent.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/language/agent/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the JVM metrics report service. service JVMMetricReportService { rpc collect (JVMMetricCollection) returns (Commands) { }}message JVMMetricCollection { repeated JVMMetric metrics = 1; string service = 2; string serviceInstance = 3;}message JVMMetric { int64 time = 1; CPU cpu = 2; repeated Memory memory = 3; repeated MemoryPool memoryPool = 4; repeated GC gc = 5; Thread thread = 6; Class clazz = 7;}message Memory { bool isHeap = 1; int64 init = 2; int64 max = 3; int64 used = 4; int64 committed = 5;}message MemoryPool { PoolType type = 1; int64 init = 2; int64 max = 3; int64 used = 4; int64 committed = 5;}enum PoolType { CODE_CACHE_USAGE = 0; NEWGEN_USAGE = 1; OLDGEN_USAGE = 2; SURVIVOR_USAGE = 3; PERMGEN_USAGE = 4; METASPACE_USAGE = 5; ZHEAP_USAGE = 6; COMPRESSED_CLASS_SPACE_USAGE = 7; CODEHEAP_NON_NMETHODS_USAGE = 8; CODEHEAP_PROFILED_NMETHODS_USAGE = 9; CODEHEAP_NON_PROFILED_NMETHODS_USAGE = 10;}message GC { GCPhase phase = 1; int64 count = 2; int64 time = 3;}enum GCPhase { NEW = 0; OLD = 1; NORMAL = 2; // The type of GC doesn\u0026#39;t have new and old phases, like Z Garbage Collector (ZGC) }// See: https://docs.oracle.com/javase/8/docs/api/java/lang/management/ThreadMXBean.html message Thread { int64 liveCount = 1; int64 daemonCount = 2; int64 peakCount = 3; int64 runnableStateThreadCount = 4; int64 blockedStateThreadCount = 5; int64 waitingStateThreadCount = 6; int64 timedWaitingStateThreadCount = 7;}// See: https://docs.oracle.com/javase/8/docs/api/java/lang/management/ClassLoadingMXBean.html message Class { int64 loadedClassCount = 1; int64 totalUnloadedClassCount = 2; int64 totalLoadedClassCount = 3;}","title":"JVM Metrics APIs","url":"/docs/main/v9.7.0/en/api/jvm-protocol/"},{"content":"JVM Metrics Service Abstract Uplink the JVM metrics, including PermSize, HeapSize, CPU, Memory, etc., every second.\ngRPC service define\n","title":"JVM Metrics Service","url":"/docs/main/v9.0.0/en/protocols/jvm-protocol/"},{"content":"JVM Metrics Service Abstract Uplink the JVM metrics, including PermSize, HeapSize, CPU, Memory, etc., every second.\ngRPC service define\n","title":"JVM Metrics Service","url":"/docs/main/v9.1.0/en/protocols/jvm-protocol/"},{"content":"JVM Metrics Service Abstract Uplink the JVM metrics, including PermSize, HeapSize, CPU, Memory, etc., every second.\ngRPC service define\n","title":"JVM Metrics Service","url":"/docs/main/v9.2.0/en/protocols/jvm-protocol/"},{"content":"JVM Metrics Service Abstract Uplink the JVM metrics, including PermSize, HeapSize, CPU, Memory, etc., every second.\ngRPC service define\n","title":"JVM Metrics Service","url":"/docs/main/v9.3.0/en/protocols/jvm-protocol/"},{"content":"K8s monitoring SkyWalking leverages K8s kube-state-metrics and cAdvisor for collecting metrics data from K8s, and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. This feature requires authorizing the OAP Server to access K8s\u0026rsquo;s API Server.\nData flow  K8s kube-state-metrics and cAdvisor collect metrics data from K8s. OpenTelemetry Collector fetches metrics from kube-state-metrics and cAdvisor via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via the OpenCensus GRPC Exporter. The SkyWalking OAP Server access to K8s\u0026rsquo;s API Server gets meta info and parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup kube-state-metric. cAdvisor is integrated into kubelet by default. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector for K8s, refer to here. For a quick start, we have provided a full example of configuration and recommended version , you can refer to showcase. Config SkyWalking OpenTelemetry receiver.  K8s Cluster Monitoring K8s cluster monitoring provide monitoring of the status and resources of the K8S Cluster, including the whole cluster and each node. K8s cluster as a Service in OAP, K8s node as a Instance in OAP, and land on the Layer: K8S.\nK8s Cluster Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Node Total  k8s_cluster_node_total The number of nodes K8s kube-state-metrics   Namespace Total  k8s_cluster_namespace_total The number of namespaces K8s kube-state-metrics   Deployment Total  k8s_cluster_deployment_total The number of deployments K8s kube-state-metrics   Service Total  k8s_cluster_service_total The number of services K8s kube-state-metrics   Pod Total  k8s_cluster_pod_total The number of pods K8s kube-state-metrics   Container Total  k8s_cluster_container_total The number of containers K8s kube-state-metrics   CPU Resources m k8s_cluster_cpu_cores\nk8s_cluster_cpu_cores_requests\nk8s_cluster_cpu_cores_limits\nk8s_cluster_cpu_cores_allocatable The capacity and the Requests / Limits / Allocatable of the CPU K8s kube-state-metrics   Memory Resources Gi k8s_cluster_memory_total\nk8s_cluster_memory_requests\nk8s_cluster_memory_limits\nk8s_cluster_memory_allocatable The capacity and the Requests / Limits / Allocatable of the memory K8s kube-state-metrics   Storage Resources Gi k8s_cluster_storage_total\nk8s_cluster_storage_allocatable The capacity and allocatable of the storage K8s kube-state-metrics   Node Status  k8s_cluster_node_status The current status of the nodes K8s kube-state-metrics   Deployment Status  k8s_cluster_deployment_status The current status of the deployment K8s kube-state-metrics   Deployment Spec Replicas  k8s_cluster_deployment_spec_replicas The number of desired pods for a deployment K8s kube-state-metrics   Service Status  k8s_cluster_service_pod_status The services current status, depending on the related pods' status K8s kube-state-metrics   Pod Status Not Running  k8s_cluster_pod_status_not_running The pods which are not running in the current phase K8s kube-state-metrics   Pod Status Waiting  k8s_cluster_pod_status_waiting The pods and containers which are currently in the waiting status, with reasons shown K8s kube-state-metrics   Pod Status Terminated  k8s_cluster_container_status_terminated The pods and containers which are currently in the terminated status, with reasons shown K8s kube-state-metrics    K8s Cluster Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Pod Total  k8s_node_pod_total The number of pods in this node K8s kube-state-metrics   Node Status  k8s_node_node_status The current status of this node K8s kube-state-metrics   CPU Resources m k8s_node_cpu_cores\nk8s_node_cpu_cores_allocatable\nk8s_node_cpu_cores_requests\nk8s_node_cpu_cores_limits The capacity and the requests / Limits / Allocatable of the CPU K8s kube-state-metrics   Memory Resources Gi k8s_node_memory_total\nk8s_node_memory_allocatable\nk8s_node_memory_requests\nk8s_node_memory_limits The capacity and the requests / Limits / Allocatable of the memory K8s kube-state-metrics   Storage Resources Gi k8s_node_storage_total\nk8s_node_storage_allocatable The capacity and allocatable of the storage K8s kube-state-metrics   CPU Usage m k8s_node_cpu_usage The total usage of the CPU core, if there are 2 cores the maximum usage is 2000m cAdvisor   Memory Usage Gi k8s_node_memory_usage The totaly memory usage cAdvisor   Network I/O KB/s k8s_node_network_receive\nk8s_node_network_transmit The network receive and transmit cAdvisor    K8s Service Monitoring K8s Service Monitoring provide observe service status and resources from Kubernetes. K8s Service as a Service in OAP and land on the Layer: K8S_SERVICE.\nK8s Service Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Service Pod Total  k8s_service_pod_total The number of pods K8s kube-state-metrics   Service Pod Status  k8s_service_pod_status The current status of pods K8s kube-state-metrics   Service CPU Resources m k8s_service_cpu_cores_requests\nk8s_service_cpu_cores_limits The CPU resources requests / Limits of this service K8s kube-state-metrics   Service Memory Resources MB k8s_service_memory_requests\nk8s_service_memory_limits The memory resources requests / Limits of this service K8s kube-state-metrics   Pod CPU Usage m k8s_service_pod_cpu_usage The CPU resources total usage of pods cAdvisor   Pod Memory Usage MB k8s_service_pod_memory_usage The memory resources total usage of pods cAdvisor   Pod Waiting  k8s_service_pod_status_waiting The pods and containers which are currently in the waiting status, with reasons shown K8s kube-state-metrics   Pod Terminated  k8s_service_pod_status_terminated The pods and containers which are currently in the terminated status, with reasons shown K8s kube-state-metrics   Pod Restarts  k8s_service_pod_status_restarts_total The number of per container restarts related to the pods K8s kube-state-metrics    Customizing You can customize your own metrics/expression/dashboard panel.\nThe metrics definition and expression rules are found in /config/otel-oc-rules/k8s-cluster.yaml,/config/otel-oc-rules/k8s-node.yaml, /config/otel-oc-rules/k8s-service.yaml.\nThe K8s Cluster dashboard panel configurations are found in /config/ui-initialized-templates/k8s. The K8s Service dashboard panel configurations are found in /config/ui-initialized-templates/k8s_service.\n","title":"K8s monitoring","url":"/docs/main/v9.0.0/en/setup/backend/backend-k8s-monitoring/"},{"content":"Kafka Fetcher The Kafka Fetcher pulls messages from the Kafka Broker to learn about what agents have delivered. Check the agent documentation for details on how to enable the Kafka reporter. Typically, tracing segments, service/instance properties, JVM metrics, and meter system data are supported (depending on the agent implementation). Kafka Fetcher can work with gRPC/HTTP Receivers simultaneously for adopting different transport protocols.\nKafka Fetcher is disabled by default. To enable it, configure it as follows.\nNamespace aims to isolate multi OAP clusters when using the same Kafka cluster. If you set a namespace for Kafka fetcher, the OAP will add a prefix to the topic name. You should also set the namespace in the property named plugin.kafka.namespace in agent.config.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}skywalking-segments, skywalking-metrics, skywalking-profilings, skywalking-managements, skywalking-meters, skywalking-logs and skywalking-logs-json topics are required by kafka-fetcher. If they do not exist, Kafka Fetcher will create them by default. Also, you can create them by yourself before the OAP server starts.\nWhen using the OAP server automatic creation mechanism, you could modify the number of partitions and replications of the topics using the following configurations:\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}When using Kafka MirrorMaker 2.0 to replicate topics between Kafka clusters, you can set the source Kafka Cluster alias (mm2SourceAlias) and separator (mm2SourceSeparator) according to your Kafka MirrorMaker config.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}mm2SourceAlias:${SW_KAFKA_MM2_SOURCE_ALIAS:\u0026#34;\u0026#34;}mm2SourceSeparator:${SW_KAFKA_MM2_SOURCE_SEPARATOR:\u0026#34;\u0026#34;}kafkaConsumerConfig:enable.auto.commit:true...","title":"Kafka Fetcher","url":"/docs/main/latest/en/setup/backend/kafka-fetcher/"},{"content":"Kafka Fetcher The Kafka Fetcher pulls messages from the Kafka Broker to learn about what agents have delivered. Check the agent documentation for details on how to enable the Kafka reporter. Typically, tracing segments, service/instance properties, JVM metrics, and meter system data are supported (depending on the agent implementation). Kafka Fetcher can work with gRPC/HTTP Receivers simultaneously for adopting different transport protocols.\nKafka Fetcher is disabled by default. To enable it, configure it as follows.\nNamespace aims to isolate multi OAP clusters when using the same Kafka cluster. If you set a namespace for Kafka fetcher, the OAP will add a prefix to the topic name. You should also set the namespace in the property named plugin.kafka.namespace in agent.config.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}skywalking-segments, skywalking-metrics, skywalking-profilings, skywalking-managements, skywalking-meters, skywalking-logs and skywalking-logs-json topics are required by kafka-fetcher. If they do not exist, Kafka Fetcher will create them by default. Also, you can create them by yourself before the OAP server starts.\nWhen using the OAP server automatic creation mechanism, you could modify the number of partitions and replications of the topics using the following configurations:\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}When using Kafka MirrorMaker 2.0 to replicate topics between Kafka clusters, you can set the source Kafka Cluster alias (mm2SourceAlias) and separator (mm2SourceSeparator) according to your Kafka MirrorMaker config.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}mm2SourceAlias:${SW_KAFKA_MM2_SOURCE_ALIAS:\u0026#34;\u0026#34;}mm2SourceSeparator:${SW_KAFKA_MM2_SOURCE_SEPARATOR:\u0026#34;\u0026#34;}kafkaConsumerConfig:enable.auto.commit:true...","title":"Kafka Fetcher","url":"/docs/main/next/en/setup/backend/kafka-fetcher/"},{"content":"Kafka Fetcher The Kafka Fetcher pulls messages from the Kafka Broker to learn about what agent is delivered. Check the agent documentation for details. Typically, tracing segments, service/instance properties, JVM metrics, and meter system data are supported. Kafka Fetcher can work with gRPC/HTTP Receivers at the same time for adopting different transport protocols.\nKafka Fetcher is disabled by default. To enable it, configure as follows.\nNamespace aims to isolate multi OAP cluster when using the same Kafka cluster. If you set a namespace for Kafka fetcher, the OAP will add a prefix to topic name. You should also set namespace in the property named plugin.kafka.namespace in agent.config.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}skywalking-segments, skywalking-metrics, skywalking-profilings, skywalking-managements, skywalking-meters, skywalking-logs and skywalking-logs-json topics are required by kafka-fetcher. If they do not exist, Kafka Fetcher will create them by default. Also, you can create them by yourself before the OAP server starts.\nWhen using the OAP server automatic creation mechanism, you could modify the number of partitions and replications of the topics using the following configurations:\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}isSharding:${SW_KAFKA_FETCHER_IS_SHARDING:false}consumePartitions:${SW_KAFKA_FETCHER_CONSUME_PARTITIONS:\u0026#34;\u0026#34;}In the cluster mode, all topics have the same number of partitions. Set \u0026quot;isSharding\u0026quot; to \u0026quot;true\u0026quot; and assign the partitions to consume for the OAP server. Use commas to separate multiple partitions for the OAP server.\nThe Kafka Fetcher allows you to configure all the Kafka producers listed here in property kafkaConsumerConfig. For example:\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}isSharding:${SW_KAFKA_FETCHER_IS_SHARDING:true}consumePartitions:${SW_KAFKA_FETCHER_CONSUME_PARTITIONS:1,3,5}kafkaConsumerConfig:enable.auto.commit:true...When using Kafka MirrorMaker 2.0 to replicate topics between Kafka clusters, you can set the source Kafka Cluster alias (mm2SourceAlias) and separator (mm2SourceSeparator) according to your Kafka MirrorMaker config.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}isSharding:${SW_KAFKA_FETCHER_IS_SHARDING:true}consumePartitions:${SW_KAFKA_FETCHER_CONSUME_PARTITIONS:1,3,5}mm2SourceAlias:${SW_KAFKA_MM2_SOURCE_ALIAS:\u0026#34;\u0026#34;}mm2SourceSeparator:${SW_KAFKA_MM2_SOURCE_SEPARATOR:\u0026#34;\u0026#34;}kafkaConsumerConfig:enable.auto.commit:true...Other Fetcher Plugins There are other transporter plugins. You could find these plugins from 3rd party repositories.\n  Pulsar Fetcher Plugin\n  RocketMQ Fetcher Plugin\n  ","title":"Kafka Fetcher","url":"/docs/main/v9.0.0/en/setup/backend/kafka-fetcher/"},{"content":"Kafka Fetcher The Kafka Fetcher pulls messages from the Kafka Broker to learn about what agents have delivered. Check the agent documentation for details on how to enable the Kafka reporter. Typically, tracing segments, service/instance properties, JVM metrics, and meter system data are supported (depending on the agent implementation). Kafka Fetcher can work with gRPC/HTTP Receivers simultaneously for adopting different transport protocols.\nKafka Fetcher is disabled by default. To enable it, configure it as follows.\nNamespace aims to isolate multi OAP clusters when using the same Kafka cluster. If you set a namespace for Kafka fetcher, the OAP will add a prefix to the topic name. You should also set the namespace in the property named plugin.kafka.namespace in agent.config.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}skywalking-segments, skywalking-metrics, skywalking-profilings, skywalking-managements, skywalking-meters, skywalking-logs and skywalking-logs-json topics are required by kafka-fetcher. If they do not exist, Kafka Fetcher will create them by default. Also, you can create them by yourself before the OAP server starts.\nWhen using the OAP server automatic creation mechanism, you could modify the number of partitions and replications of the topics using the following configurations:\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}isSharding:${SW_KAFKA_FETCHER_IS_SHARDING:false}consumePartitions:${SW_KAFKA_FETCHER_CONSUME_PARTITIONS:\u0026#34;\u0026#34;}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}In the cluster mode, all topics have the same number of partitions. Set \u0026quot;isSharding\u0026quot; to \u0026quot;true\u0026quot; and assign the partitions to consume for the OAP server. Use commas to separate multiple partitions for the OAP server.\nThe Kafka Fetcher allows you to configure all the Kafka producers listed here in property kafkaConsumerConfig. For example:\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}isSharding:${SW_KAFKA_FETCHER_IS_SHARDING:true}consumePartitions:${SW_KAFKA_FETCHER_CONSUME_PARTITIONS:1,3,5}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}kafkaConsumerConfig:enable.auto.commit:true...When using Kafka MirrorMaker 2.0 to replicate topics between Kafka clusters, you can set the source Kafka Cluster alias (mm2SourceAlias) and separator (mm2SourceSeparator) according to your Kafka MirrorMaker config.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}isSharding:${SW_KAFKA_FETCHER_IS_SHARDING:true}consumePartitions:${SW_KAFKA_FETCHER_CONSUME_PARTITIONS:1,3,5}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}mm2SourceAlias:${SW_KAFKA_MM2_SOURCE_ALIAS:\u0026#34;\u0026#34;}mm2SourceSeparator:${SW_KAFKA_MM2_SOURCE_SEPARATOR:\u0026#34;\u0026#34;}kafkaConsumerConfig:enable.auto.commit:true...Other Fetcher Plugins There are other transporter plugins. You can find these plugins from 3rd party repositories.\n  Pulsar Fetcher Plugin\n  RocketMQ Fetcher Plugin\n  ","title":"Kafka Fetcher","url":"/docs/main/v9.1.0/en/setup/backend/kafka-fetcher/"},{"content":"Kafka Fetcher The Kafka Fetcher pulls messages from the Kafka Broker to learn about what agents have delivered. Check the agent documentation for details on how to enable the Kafka reporter. Typically, tracing segments, service/instance properties, JVM metrics, and meter system data are supported (depending on the agent implementation). Kafka Fetcher can work with gRPC/HTTP Receivers simultaneously for adopting different transport protocols.\nKafka Fetcher is disabled by default. To enable it, configure it as follows.\nNamespace aims to isolate multi OAP clusters when using the same Kafka cluster. If you set a namespace for Kafka fetcher, the OAP will add a prefix to the topic name. You should also set the namespace in the property named plugin.kafka.namespace in agent.config.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}skywalking-segments, skywalking-metrics, skywalking-profilings, skywalking-managements, skywalking-meters, skywalking-logs and skywalking-logs-json topics are required by kafka-fetcher. If they do not exist, Kafka Fetcher will create them by default. Also, you can create them by yourself before the OAP server starts.\nWhen using the OAP server automatic creation mechanism, you could modify the number of partitions and replications of the topics using the following configurations:\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}isSharding:${SW_KAFKA_FETCHER_IS_SHARDING:false}consumePartitions:${SW_KAFKA_FETCHER_CONSUME_PARTITIONS:\u0026#34;\u0026#34;}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}In the cluster mode, all topics have the same number of partitions. Set \u0026quot;isSharding\u0026quot; to \u0026quot;true\u0026quot; and assign the partitions to consume for the OAP server. Use commas to separate multiple partitions for the OAP server.\nThe Kafka Fetcher allows you to configure all the Kafka producers listed here in property kafkaConsumerConfig. For example:\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}isSharding:${SW_KAFKA_FETCHER_IS_SHARDING:true}consumePartitions:${SW_KAFKA_FETCHER_CONSUME_PARTITIONS:1,3,5}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}kafkaConsumerConfig:enable.auto.commit:true...When using Kafka MirrorMaker 2.0 to replicate topics between Kafka clusters, you can set the source Kafka Cluster alias (mm2SourceAlias) and separator (mm2SourceSeparator) according to your Kafka MirrorMaker config.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}isSharding:${SW_KAFKA_FETCHER_IS_SHARDING:true}consumePartitions:${SW_KAFKA_FETCHER_CONSUME_PARTITIONS:1,3,5}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}mm2SourceAlias:${SW_KAFKA_MM2_SOURCE_ALIAS:\u0026#34;\u0026#34;}mm2SourceSeparator:${SW_KAFKA_MM2_SOURCE_SEPARATOR:\u0026#34;\u0026#34;}kafkaConsumerConfig:enable.auto.commit:true...Other Fetcher Plugins There are other transporter plugins. You can find these plugins from 3rd party repositories.\n  Pulsar Fetcher Plugin\n  RocketMQ Fetcher Plugin\n  ","title":"Kafka Fetcher","url":"/docs/main/v9.2.0/en/setup/backend/kafka-fetcher/"},{"content":"Kafka Fetcher The Kafka Fetcher pulls messages from the Kafka Broker to learn about what agents have delivered. Check the agent documentation for details on how to enable the Kafka reporter. Typically, tracing segments, service/instance properties, JVM metrics, and meter system data are supported (depending on the agent implementation). Kafka Fetcher can work with gRPC/HTTP Receivers simultaneously for adopting different transport protocols.\nKafka Fetcher is disabled by default. To enable it, configure it as follows.\nNamespace aims to isolate multi OAP clusters when using the same Kafka cluster. If you set a namespace for Kafka fetcher, the OAP will add a prefix to the topic name. You should also set the namespace in the property named plugin.kafka.namespace in agent.config.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}skywalking-segments, skywalking-metrics, skywalking-profilings, skywalking-managements, skywalking-meters, skywalking-logs and skywalking-logs-json topics are required by kafka-fetcher. If they do not exist, Kafka Fetcher will create them by default. Also, you can create them by yourself before the OAP server starts.\nWhen using the OAP server automatic creation mechanism, you could modify the number of partitions and replications of the topics using the following configurations:\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}isSharding:${SW_KAFKA_FETCHER_IS_SHARDING:false}consumePartitions:${SW_KAFKA_FETCHER_CONSUME_PARTITIONS:\u0026#34;\u0026#34;}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}In the cluster mode, all topics have the same number of partitions. Set \u0026quot;isSharding\u0026quot; to \u0026quot;true\u0026quot; and assign the partitions to consume for the OAP server. Use commas to separate multiple partitions for the OAP server.\nThe Kafka Fetcher allows you to configure all the Kafka producers listed here in property kafkaConsumerConfig. For example:\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}isSharding:${SW_KAFKA_FETCHER_IS_SHARDING:true}consumePartitions:${SW_KAFKA_FETCHER_CONSUME_PARTITIONS:1,3,5}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}kafkaConsumerConfig:enable.auto.commit:true...When using Kafka MirrorMaker 2.0 to replicate topics between Kafka clusters, you can set the source Kafka Cluster alias (mm2SourceAlias) and separator (mm2SourceSeparator) according to your Kafka MirrorMaker config.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}isSharding:${SW_KAFKA_FETCHER_IS_SHARDING:true}consumePartitions:${SW_KAFKA_FETCHER_CONSUME_PARTITIONS:1,3,5}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}mm2SourceAlias:${SW_KAFKA_MM2_SOURCE_ALIAS:\u0026#34;\u0026#34;}mm2SourceSeparator:${SW_KAFKA_MM2_SOURCE_SEPARATOR:\u0026#34;\u0026#34;}kafkaConsumerConfig:enable.auto.commit:true...Other Fetcher Plugins There are other transporter plugins. You can find these plugins from 3rd party repositories.\n  Pulsar Fetcher Plugin\n  RocketMQ Fetcher Plugin\n  ","title":"Kafka Fetcher","url":"/docs/main/v9.3.0/en/setup/backend/kafka-fetcher/"},{"content":"Kafka Fetcher The Kafka Fetcher pulls messages from the Kafka Broker to learn about what agents have delivered. Check the agent documentation for details on how to enable the Kafka reporter. Typically, tracing segments, service/instance properties, JVM metrics, and meter system data are supported (depending on the agent implementation). Kafka Fetcher can work with gRPC/HTTP Receivers simultaneously for adopting different transport protocols.\nKafka Fetcher is disabled by default. To enable it, configure it as follows.\nNamespace aims to isolate multi OAP clusters when using the same Kafka cluster. If you set a namespace for Kafka fetcher, the OAP will add a prefix to the topic name. You should also set the namespace in the property named plugin.kafka.namespace in agent.config.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}skywalking-segments, skywalking-metrics, skywalking-profilings, skywalking-managements, skywalking-meters, skywalking-logs and skywalking-logs-json topics are required by kafka-fetcher. If they do not exist, Kafka Fetcher will create them by default. Also, you can create them by yourself before the OAP server starts.\nWhen using the OAP server automatic creation mechanism, you could modify the number of partitions and replications of the topics using the following configurations:\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}isSharding:${SW_KAFKA_FETCHER_IS_SHARDING:false}consumePartitions:${SW_KAFKA_FETCHER_CONSUME_PARTITIONS:\u0026#34;\u0026#34;}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}In the cluster mode, all topics have the same number of partitions. Set \u0026quot;isSharding\u0026quot; to \u0026quot;true\u0026quot; and assign the partitions to consume for the OAP server. Use commas to separate multiple partitions for the OAP server.\nThe Kafka Fetcher allows you to configure all the Kafka producers listed here in property kafkaConsumerConfig. For example:\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}isSharding:${SW_KAFKA_FETCHER_IS_SHARDING:true}consumePartitions:${SW_KAFKA_FETCHER_CONSUME_PARTITIONS:1,3,5}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}kafkaConsumerConfig:enable.auto.commit:true...When using Kafka MirrorMaker 2.0 to replicate topics between Kafka clusters, you can set the source Kafka Cluster alias (mm2SourceAlias) and separator (mm2SourceSeparator) according to your Kafka MirrorMaker config.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}isSharding:${SW_KAFKA_FETCHER_IS_SHARDING:true}consumePartitions:${SW_KAFKA_FETCHER_CONSUME_PARTITIONS:1,3,5}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}mm2SourceAlias:${SW_KAFKA_MM2_SOURCE_ALIAS:\u0026#34;\u0026#34;}mm2SourceSeparator:${SW_KAFKA_MM2_SOURCE_SEPARATOR:\u0026#34;\u0026#34;}kafkaConsumerConfig:enable.auto.commit:true...Other Fetcher Plugins There are other transporter plugins. You can find these plugins from 3rd party repositories.\n  Pulsar Fetcher Plugin\n  RocketMQ Fetcher Plugin\n  ","title":"Kafka Fetcher","url":"/docs/main/v9.4.0/en/setup/backend/kafka-fetcher/"},{"content":"Kafka Fetcher The Kafka Fetcher pulls messages from the Kafka Broker to learn about what agents have delivered. Check the agent documentation for details on how to enable the Kafka reporter. Typically, tracing segments, service/instance properties, JVM metrics, and meter system data are supported (depending on the agent implementation). Kafka Fetcher can work with gRPC/HTTP Receivers simultaneously for adopting different transport protocols.\nKafka Fetcher is disabled by default. To enable it, configure it as follows.\nNamespace aims to isolate multi OAP clusters when using the same Kafka cluster. If you set a namespace for Kafka fetcher, the OAP will add a prefix to the topic name. You should also set the namespace in the property named plugin.kafka.namespace in agent.config.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}skywalking-segments, skywalking-metrics, skywalking-profilings, skywalking-managements, skywalking-meters, skywalking-logs and skywalking-logs-json topics are required by kafka-fetcher. If they do not exist, Kafka Fetcher will create them by default. Also, you can create them by yourself before the OAP server starts.\nWhen using the OAP server automatic creation mechanism, you could modify the number of partitions and replications of the topics using the following configurations:\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}isSharding:${SW_KAFKA_FETCHER_IS_SHARDING:false}consumePartitions:${SW_KAFKA_FETCHER_CONSUME_PARTITIONS:\u0026#34;\u0026#34;}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}In the cluster mode, all topics have the same number of partitions. Set \u0026quot;isSharding\u0026quot; to \u0026quot;true\u0026quot; and assign the partitions to consume for the OAP server. Use commas to separate multiple partitions for the OAP server.\nThe Kafka Fetcher allows you to configure all the Kafka producers listed here in property kafkaConsumerConfig. For example:\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}isSharding:${SW_KAFKA_FETCHER_IS_SHARDING:true}consumePartitions:${SW_KAFKA_FETCHER_CONSUME_PARTITIONS:1,3,5}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}kafkaConsumerConfig:enable.auto.commit:true...When using Kafka MirrorMaker 2.0 to replicate topics between Kafka clusters, you can set the source Kafka Cluster alias (mm2SourceAlias) and separator (mm2SourceSeparator) according to your Kafka MirrorMaker config.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}isSharding:${SW_KAFKA_FETCHER_IS_SHARDING:true}consumePartitions:${SW_KAFKA_FETCHER_CONSUME_PARTITIONS:1,3,5}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}mm2SourceAlias:${SW_KAFKA_MM2_SOURCE_ALIAS:\u0026#34;\u0026#34;}mm2SourceSeparator:${SW_KAFKA_MM2_SOURCE_SEPARATOR:\u0026#34;\u0026#34;}kafkaConsumerConfig:enable.auto.commit:true...Other Fetcher Plugins There are other transporter plugins. You can find these plugins from 3rd party repositories.\n  Pulsar Fetcher Plugin\n  RocketMQ Fetcher Plugin\n  ","title":"Kafka Fetcher","url":"/docs/main/v9.5.0/en/setup/backend/kafka-fetcher/"},{"content":"Kafka Fetcher The Kafka Fetcher pulls messages from the Kafka Broker to learn about what agents have delivered. Check the agent documentation for details on how to enable the Kafka reporter. Typically, tracing segments, service/instance properties, JVM metrics, and meter system data are supported (depending on the agent implementation). Kafka Fetcher can work with gRPC/HTTP Receivers simultaneously for adopting different transport protocols.\nKafka Fetcher is disabled by default. To enable it, configure it as follows.\nNamespace aims to isolate multi OAP clusters when using the same Kafka cluster. If you set a namespace for Kafka fetcher, the OAP will add a prefix to the topic name. You should also set the namespace in the property named plugin.kafka.namespace in agent.config.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}skywalking-segments, skywalking-metrics, skywalking-profilings, skywalking-managements, skywalking-meters, skywalking-logs and skywalking-logs-json topics are required by kafka-fetcher. If they do not exist, Kafka Fetcher will create them by default. Also, you can create them by yourself before the OAP server starts.\nWhen using the OAP server automatic creation mechanism, you could modify the number of partitions and replications of the topics using the following configurations:\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}When using Kafka MirrorMaker 2.0 to replicate topics between Kafka clusters, you can set the source Kafka Cluster alias (mm2SourceAlias) and separator (mm2SourceSeparator) according to your Kafka MirrorMaker config.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}mm2SourceAlias:${SW_KAFKA_MM2_SOURCE_ALIAS:\u0026#34;\u0026#34;}mm2SourceSeparator:${SW_KAFKA_MM2_SOURCE_SEPARATOR:\u0026#34;\u0026#34;}kafkaConsumerConfig:enable.auto.commit:true...","title":"Kafka Fetcher","url":"/docs/main/v9.6.0/en/setup/backend/kafka-fetcher/"},{"content":"Kafka Fetcher The Kafka Fetcher pulls messages from the Kafka Broker to learn about what agents have delivered. Check the agent documentation for details on how to enable the Kafka reporter. Typically, tracing segments, service/instance properties, JVM metrics, and meter system data are supported (depending on the agent implementation). Kafka Fetcher can work with gRPC/HTTP Receivers simultaneously for adopting different transport protocols.\nKafka Fetcher is disabled by default. To enable it, configure it as follows.\nNamespace aims to isolate multi OAP clusters when using the same Kafka cluster. If you set a namespace for Kafka fetcher, the OAP will add a prefix to the topic name. You should also set the namespace in the property named plugin.kafka.namespace in agent.config.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}skywalking-segments, skywalking-metrics, skywalking-profilings, skywalking-managements, skywalking-meters, skywalking-logs and skywalking-logs-json topics are required by kafka-fetcher. If they do not exist, Kafka Fetcher will create them by default. Also, you can create them by yourself before the OAP server starts.\nWhen using the OAP server automatic creation mechanism, you could modify the number of partitions and replications of the topics using the following configurations:\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}When using Kafka MirrorMaker 2.0 to replicate topics between Kafka clusters, you can set the source Kafka Cluster alias (mm2SourceAlias) and separator (mm2SourceSeparator) according to your Kafka MirrorMaker config.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}namespace:${SW_NAMESPACE:\u0026#34;\u0026#34;}partitions:${SW_KAFKA_FETCHER_PARTITIONS:3}replicationFactor:${SW_KAFKA_FETCHER_PARTITIONS_FACTOR:2}consumers:${SW_KAFKA_FETCHER_CONSUMERS:1}mm2SourceAlias:${SW_KAFKA_MM2_SOURCE_ALIAS:\u0026#34;\u0026#34;}mm2SourceSeparator:${SW_KAFKA_MM2_SOURCE_SEPARATOR:\u0026#34;\u0026#34;}kafkaConsumerConfig:enable.auto.commit:true...","title":"Kafka Fetcher","url":"/docs/main/v9.7.0/en/setup/backend/kafka-fetcher/"},{"content":"Kafka monitoring SkyWalking leverages Prometheus JMX Exporter to collect metrics data from the Kafka and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. Kafka entity as a Service in OAP and on the Layer: KAFKA.\nData flow  The prometheus_JMX_Exporter collect metrics data from Kafka. Note: Running the exporter as a Java agent. OpenTelemetry Collector fetches metrics from prometheus_JMX_Exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup prometheus_JMX_Exporter. This is an example for JMX Exporter configuration kafka-2_0_0.yml. Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  Kafka Monitoring Kafka monitoring provides multidimensional metrics monitoring of Kafka cluster as Layer: KAFKA Service in the OAP. In each cluster, the kafka brokers are represented as Instance.\nKafka Cluster Supported Metrics    Monitoring Panel Metric Name Description Data Source     Under-Replicated Partitions meter_kafka_under_replicated_partitions Number of under-replicated partitions in the broker. A higher number is a sign of potential issues. Prometheus JMX Exporter   Offline Partitions Count meter_kafka_offline_partitions_count Number of partitions that are offline. Non-zero values indicate a problem. Prometheus JMX Exporter   Partition Count meter_kafka_partition_count Total number of partitions on the broker. Prometheus JMX Exporter   Leader Count meter_kafka_leader_count Number of leader partitions on this broker. Prometheus JMX Exporter   Active Controller Count meter_kafka_active_controller_count The number of active controllers in the cluster. Typically should be 1. Prometheus JMX Exporter   Leader Election Rate meter_kafka_leader_election_rate The rate of leader elections per minute. High rate could be a sign of instability. Prometheus JMX Exporter   Unclean Leader Elections Per Second meter_kafka_unclean_leader_elections_per_second The rate of unclean leader elections per second. Non-zero values indicate a serious problem. Prometheus JMX Exporter   Max Lag meter_kafka_max_lag The maximum lag between the leader and followers in terms of messages still needed to be sent. Higher lag indicates delays. Prometheus JMX Exporter    Kafka Broker Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     CPU Usage % meter_kafka_broker_cpu_time_total CPU usage in percentage Prometheus JMX Exporter   Memory Usage % meter_kafka_broker_memory_usage_percentage JVM heap memory usage in percentage Prometheus JMX Exporter   Incoming Messages Msg/sec meter_kafka_broker_messages_per_second Rate of incoming messages Prometheus JMX Exporter   Bytes In Bytes/sec meter_kafka_broker_bytes_in_per_second Rate of incoming bytes Prometheus JMX Exporter   Bytes Out Bytes/sec meter_kafka_broker_bytes_out_per_second Rate of outgoing bytes Prometheus JMX Exporter   Replication Bytes In Bytes/sec meter_kafka_broker_replication_bytes_in_per_second Rate of incoming bytes for replication Prometheus JMX Exporter   Replication Bytes Out Bytes/sec meter_kafka_broker_replication_bytes_out_per_second Rate of outgoing bytes for replication Prometheus JMX Exporter   Under-Replicated Partitions Count meter_kafka_broker_under_replicated_partitions Number of under-replicated partitions Prometheus JMX Exporter   Under Min ISR Partition Count Count meter_kafka_broker_under_min_isr_partition_count Number of partitions below the minimum ISR (In-Sync Replicas) Prometheus JMX Exporter   Partition Count Count meter_kafka_broker_partition_count Total number of partitions Prometheus JMX Exporter   Leader Count Count meter_kafka_broker_leader_count Number of partitions for which this broker is the leader Prometheus JMX Exporter   ISR Shrinks Count/sec meter_kafka_broker_isr_shrinks_per_second Rate of ISR (In-Sync Replicas) shrinking Prometheus JMX Exporter   ISR Expands Count/sec meter_kafka_broker_isr_expands_per_second Rate of ISR (In-Sync Replicas) expanding Prometheus JMX Exporter   Max Lag Count meter_kafka_broker_max_lag Maximum lag between the leader and follower for a partition Prometheus JMX Exporter   Purgatory Size Count meter_kafka_broker_purgatory_size Size of purgatory for Produce and Fetch operations Prometheus JMX Exporter   Garbage Collector Count Count/sec meter_kafka_broker_garbage_collector_count Rate of garbage collection cycles Prometheus JMX Exporter   Requests Per Second Req/sec meter_kafka_broker_requests_per_second Rate of requests to the broker Prometheus JMX Exporter   Request Queue Time ms meter_kafka_broker_request_queue_time_ms Average time a request spends in the request queue Prometheus JMX Exporter   Remote Time ms meter_kafka_broker_remote_time_ms Average time taken for a remote operation Prometheus JMX Exporter   Response Queue Time ms meter_kafka_broker_response_queue_time_ms Average time a response spends in the response queue Prometheus JMX Exporter   Response Send Time ms meter_kafka_broker_response_send_time_ms Average time taken to send a response Prometheus JMX Exporter   Network Processor Avg Idle % meter_kafka_broker_network_processor_avg_idle_percent Percentage of idle time for the network processor Prometheus JMX Exporter   Topic Messages In Total Count meter_kafka_broker_topic_messages_in_total Total number of messages per topic Prometheus JMX Exporter   Topic Bytes Out Per Second Bytes/sec meter_kafka_broker_topic_bytesout_per_second Rate of outgoing bytes per topic Prometheus JMX Exporter   Topic Bytes In Per Second Bytes/sec meter_kafka_broker_topic_bytesin_per_second Rate of incoming bytes per topic Prometheus JMX Exporter   Topic Fetch Requests Per Second Req/sec meter_kafka_broker_topic_fetch_requests_per_second Rate of fetch requests per topic Prometheus JMX Exporter   Topic Produce Requests Per Second Req/sec meter_kafka_broker_topic_produce_requests_per_second Rate of produce requests per topic Prometheus JMX Exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/kafka/kafka-cluster.yaml, /config/otel-rules/kafka/kafka-node.yaml. The Kafka dashboard panel configurations are found in /config/ui-initialized-templates/kafka.\nReference For more details on monitoring Kafka and the metrics to focus on, see the following articles:\n Monitoring Kafka Streams Applications Kafka Monitoring  ","title":"Kafka monitoring","url":"/docs/main/latest/en/setup/backend/backend-kafka-monitoring/"},{"content":"Kafka monitoring SkyWalking leverages Prometheus JMX Exporter to collect metrics data from the Kafka and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. Kafka entity as a Service in OAP and on the Layer: KAFKA.\nData flow  The prometheus_JMX_Exporter collect metrics data from Kafka. Note: Running the exporter as a Java agent. OpenTelemetry Collector fetches metrics from prometheus_JMX_Exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup prometheus_JMX_Exporter. This is an example for JMX Exporter configuration kafka-2_0_0.yml. Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  Kafka Monitoring Kafka monitoring provides multidimensional metrics monitoring of Kafka cluster as Layer: KAFKA Service in the OAP. In each cluster, the kafka brokers are represented as Instance.\nKafka Cluster Supported Metrics    Monitoring Panel Metric Name Description Data Source     Under-Replicated Partitions meter_kafka_under_replicated_partitions Number of under-replicated partitions in the broker. A higher number is a sign of potential issues. Prometheus JMX Exporter   Offline Partitions Count meter_kafka_offline_partitions_count Number of partitions that are offline. Non-zero values indicate a problem. Prometheus JMX Exporter   Partition Count meter_kafka_partition_count Total number of partitions on the broker. Prometheus JMX Exporter   Leader Count meter_kafka_leader_count Number of leader partitions on this broker. Prometheus JMX Exporter   Active Controller Count meter_kafka_active_controller_count The number of active controllers in the cluster. Typically should be 1. Prometheus JMX Exporter   Leader Election Rate meter_kafka_leader_election_rate The rate of leader elections per minute. High rate could be a sign of instability. Prometheus JMX Exporter   Unclean Leader Elections Per Second meter_kafka_unclean_leader_elections_per_second The rate of unclean leader elections per second. Non-zero values indicate a serious problem. Prometheus JMX Exporter   Max Lag meter_kafka_max_lag The maximum lag between the leader and followers in terms of messages still needed to be sent. Higher lag indicates delays. Prometheus JMX Exporter    Kafka Broker Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     CPU Usage % meter_kafka_broker_cpu_time_total CPU usage in percentage Prometheus JMX Exporter   Memory Usage % meter_kafka_broker_memory_usage_percentage JVM heap memory usage in percentage Prometheus JMX Exporter   Incoming Messages Msg/sec meter_kafka_broker_messages_per_second Rate of incoming messages Prometheus JMX Exporter   Bytes In Bytes/sec meter_kafka_broker_bytes_in_per_second Rate of incoming bytes Prometheus JMX Exporter   Bytes Out Bytes/sec meter_kafka_broker_bytes_out_per_second Rate of outgoing bytes Prometheus JMX Exporter   Replication Bytes In Bytes/sec meter_kafka_broker_replication_bytes_in_per_second Rate of incoming bytes for replication Prometheus JMX Exporter   Replication Bytes Out Bytes/sec meter_kafka_broker_replication_bytes_out_per_second Rate of outgoing bytes for replication Prometheus JMX Exporter   Under-Replicated Partitions Count meter_kafka_broker_under_replicated_partitions Number of under-replicated partitions Prometheus JMX Exporter   Under Min ISR Partition Count Count meter_kafka_broker_under_min_isr_partition_count Number of partitions below the minimum ISR (In-Sync Replicas) Prometheus JMX Exporter   Partition Count Count meter_kafka_broker_partition_count Total number of partitions Prometheus JMX Exporter   Leader Count Count meter_kafka_broker_leader_count Number of partitions for which this broker is the leader Prometheus JMX Exporter   ISR Shrinks Count/sec meter_kafka_broker_isr_shrinks_per_second Rate of ISR (In-Sync Replicas) shrinking Prometheus JMX Exporter   ISR Expands Count/sec meter_kafka_broker_isr_expands_per_second Rate of ISR (In-Sync Replicas) expanding Prometheus JMX Exporter   Max Lag Count meter_kafka_broker_max_lag Maximum lag between the leader and follower for a partition Prometheus JMX Exporter   Purgatory Size Count meter_kafka_broker_purgatory_size Size of purgatory for Produce and Fetch operations Prometheus JMX Exporter   Garbage Collector Count Count/sec meter_kafka_broker_garbage_collector_count Rate of garbage collection cycles Prometheus JMX Exporter   Requests Per Second Req/sec meter_kafka_broker_requests_per_second Rate of requests to the broker Prometheus JMX Exporter   Request Queue Time ms meter_kafka_broker_request_queue_time_ms Average time a request spends in the request queue Prometheus JMX Exporter   Remote Time ms meter_kafka_broker_remote_time_ms Average time taken for a remote operation Prometheus JMX Exporter   Response Queue Time ms meter_kafka_broker_response_queue_time_ms Average time a response spends in the response queue Prometheus JMX Exporter   Response Send Time ms meter_kafka_broker_response_send_time_ms Average time taken to send a response Prometheus JMX Exporter   Network Processor Avg Idle % meter_kafka_broker_network_processor_avg_idle_percent Percentage of idle time for the network processor Prometheus JMX Exporter   Topic Messages In Total Count meter_kafka_broker_topic_messages_in_total Total number of messages per topic Prometheus JMX Exporter   Topic Bytes Out Per Second Bytes/sec meter_kafka_broker_topic_bytesout_per_second Rate of outgoing bytes per topic Prometheus JMX Exporter   Topic Bytes In Per Second Bytes/sec meter_kafka_broker_topic_bytesin_per_second Rate of incoming bytes per topic Prometheus JMX Exporter   Topic Fetch Requests Per Second Req/sec meter_kafka_broker_topic_fetch_requests_per_second Rate of fetch requests per topic Prometheus JMX Exporter   Topic Produce Requests Per Second Req/sec meter_kafka_broker_topic_produce_requests_per_second Rate of produce requests per topic Prometheus JMX Exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/kafka/kafka-cluster.yaml, /config/otel-rules/kafka/kafka-node.yaml. The Kafka dashboard panel configurations are found in /config/ui-initialized-templates/kafka.\nReference For more details on monitoring Kafka and the metrics to focus on, see the following articles:\n Monitoring Kafka Streams Applications Kafka Monitoring  ","title":"Kafka monitoring","url":"/docs/main/next/en/setup/backend/backend-kafka-monitoring/"},{"content":"Kafka monitoring SkyWalking leverages Prometheus JMX Exporter to collect metrics data from the Kafka and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. Kafka entity as a Service in OAP and on the Layer: KAFKA.\nData flow  The prometheus_JMX_Exporter collect metrics data from Kafka. Note: Running the exporter as a Java agent. OpenTelemetry Collector fetches metrics from prometheus_JMX_Exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup prometheus_JMX_Exporter. This is an example for JMX Exporter configuration kafka-2_0_0.yml. Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  Kafka Monitoring Kafka monitoring provides multidimensional metrics monitoring of Kafka cluster as Layer: KAFKA Service in the OAP. In each cluster, the kafka brokers are represented as Instance.\nKafka Cluster Supported Metrics    Monitoring Panel Metric Name Description Data Source     Under-Replicated Partitions meter_kafka_under_replicated_partitions Number of under-replicated partitions in the broker. A higher number is a sign of potential issues. Prometheus JMX Exporter   Offline Partitions Count meter_kafka_offline_partitions_count Number of partitions that are offline. Non-zero values indicate a problem. Prometheus JMX Exporter   Partition Count meter_kafka_partition_count Total number of partitions on the broker. Prometheus JMX Exporter   Leader Count meter_kafka_leader_count Number of leader partitions on this broker. Prometheus JMX Exporter   Active Controller Count meter_kafka_active_controller_count The number of active controllers in the cluster. Typically should be 1. Prometheus JMX Exporter   Leader Election Rate meter_kafka_leader_election_rate The rate of leader elections per minute. High rate could be a sign of instability. Prometheus JMX Exporter   Unclean Leader Elections Per Second meter_kafka_unclean_leader_elections_per_second The rate of unclean leader elections per second. Non-zero values indicate a serious problem. Prometheus JMX Exporter   Max Lag meter_kafka_max_lag The maximum lag between the leader and followers in terms of messages still needed to be sent. Higher lag indicates delays. Prometheus JMX Exporter    Kafka Broker Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     CPU Usage % meter_kafka_broker_cpu_time_total CPU usage in percentage Prometheus JMX Exporter   Memory Usage % meter_kafka_broker_memory_usage_percentage JVM heap memory usage in percentage Prometheus JMX Exporter   Incoming Messages Msg/sec meter_kafka_broker_messages_per_second Rate of incoming messages Prometheus JMX Exporter   Bytes In Bytes/sec meter_kafka_broker_bytes_in_per_second Rate of incoming bytes Prometheus JMX Exporter   Bytes Out Bytes/sec meter_kafka_broker_bytes_out_per_second Rate of outgoing bytes Prometheus JMX Exporter   Replication Bytes In Bytes/sec meter_kafka_broker_replication_bytes_in_per_second Rate of incoming bytes for replication Prometheus JMX Exporter   Replication Bytes Out Bytes/sec meter_kafka_broker_replication_bytes_out_per_second Rate of outgoing bytes for replication Prometheus JMX Exporter   Under-Replicated Partitions Count meter_kafka_broker_under_replicated_partitions Number of under-replicated partitions Prometheus JMX Exporter   Under Min ISR Partition Count Count meter_kafka_broker_under_min_isr_partition_count Number of partitions below the minimum ISR (In-Sync Replicas) Prometheus JMX Exporter   Partition Count Count meter_kafka_broker_partition_count Total number of partitions Prometheus JMX Exporter   Leader Count Count meter_kafka_broker_leader_count Number of partitions for which this broker is the leader Prometheus JMX Exporter   ISR Shrinks Count/sec meter_kafka_broker_isr_shrinks_per_second Rate of ISR (In-Sync Replicas) shrinking Prometheus JMX Exporter   ISR Expands Count/sec meter_kafka_broker_isr_expands_per_second Rate of ISR (In-Sync Replicas) expanding Prometheus JMX Exporter   Max Lag Count meter_kafka_broker_max_lag Maximum lag between the leader and follower for a partition Prometheus JMX Exporter   Purgatory Size Count meter_kafka_broker_purgatory_size Size of purgatory for Produce and Fetch operations Prometheus JMX Exporter   Garbage Collector Count Count/sec meter_kafka_broker_garbage_collector_count Rate of garbage collection cycles Prometheus JMX Exporter   Requests Per Second Req/sec meter_kafka_broker_requests_per_second Rate of requests to the broker Prometheus JMX Exporter   Request Queue Time ms meter_kafka_broker_request_queue_time_ms Average time a request spends in the request queue Prometheus JMX Exporter   Remote Time ms meter_kafka_broker_remote_time_ms Average time taken for a remote operation Prometheus JMX Exporter   Response Queue Time ms meter_kafka_broker_response_queue_time_ms Average time a response spends in the response queue Prometheus JMX Exporter   Response Send Time ms meter_kafka_broker_response_send_time_ms Average time taken to send a response Prometheus JMX Exporter   Network Processor Avg Idle % meter_kafka_broker_network_processor_avg_idle_percent Percentage of idle time for the network processor Prometheus JMX Exporter   Topic Messages In Total Count meter_kafka_broker_topic_messages_in_total Total number of messages per topic Prometheus JMX Exporter   Topic Bytes Out Per Second Bytes/sec meter_kafka_broker_topic_bytesout_per_second Rate of outgoing bytes per topic Prometheus JMX Exporter   Topic Bytes In Per Second Bytes/sec meter_kafka_broker_topic_bytesin_per_second Rate of incoming bytes per topic Prometheus JMX Exporter   Topic Fetch Requests Per Second Req/sec meter_kafka_broker_topic_fetch_requests_per_second Rate of fetch requests per topic Prometheus JMX Exporter   Topic Produce Requests Per Second Req/sec meter_kafka_broker_topic_produce_requests_per_second Rate of produce requests per topic Prometheus JMX Exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/kafka/kafka-cluster.yaml, /config/otel-rules/kafka/kafka-node.yaml. The Kafka dashboard panel configurations are found in /config/ui-initialized-templates/kafka.\nReference For more details on monitoring Kafka and the metrics to focus on, see the following articles:\n Monitoring Kafka Streams Applications Kafka Monitoring  ","title":"Kafka monitoring","url":"/docs/main/v9.7.0/en/setup/backend/backend-kafka-monitoring/"},{"content":"Kafka Poll And Invoke  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-kafka\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  usage 1.  public class ConsumerThread2 extends Thread { @Override public void run() { Properties consumerProperties = new Properties(); //...consumerProperties.put()  KafkaConsumer\u0026lt;String, String\u0026gt; consumer = new KafkaConsumer\u0026lt;\u0026gt;(consumerProperties); consumer.subscribe(topicPattern, new NoOpConsumerRebalanceListener()); while (true) { if (pollAndInvoke(consumer)) break; } consumer.close(); } @KafkaPollAndInvoke private boolean pollAndInvoke(KafkaConsumer\u0026lt;String, String\u0026gt; consumer) { try { Thread.sleep(1000); } catch (InterruptedException e) { } ConsumerRecords\u0026lt;String, String\u0026gt; records = consumer.poll(100); if (!records.isEmpty()) { OkHttpClient client = new OkHttpClient.Builder().build(); Request request = new Request.Builder().url(\u0026#34;http://localhost:8080/kafka-scenario/case/kafka-thread2-ping\u0026#34;).build(); Response response = null; try { response = client.newCall(request).execute(); } catch (IOException e) { } response.body().close(); return true; } return false; } } Sample codes only\n","title":"Kafka Poll And Invoke","url":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/application-toolkit-kafka/"},{"content":"Kafka Poll And Invoke  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-kafka\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  usage 1.  public class ConsumerThread2 extends Thread { @Override public void run() { Properties consumerProperties = new Properties(); //...consumerProperties.put()  KafkaConsumer\u0026lt;String, String\u0026gt; consumer = new KafkaConsumer\u0026lt;\u0026gt;(consumerProperties); consumer.subscribe(topicPattern, new NoOpConsumerRebalanceListener()); while (true) { if (pollAndInvoke(consumer)) break; } consumer.close(); } @KafkaPollAndInvoke private boolean pollAndInvoke(KafkaConsumer\u0026lt;String, String\u0026gt; consumer) { try { Thread.sleep(1000); } catch (InterruptedException e) { } ConsumerRecords\u0026lt;String, String\u0026gt; records = consumer.poll(100); if (!records.isEmpty()) { OkHttpClient client = new OkHttpClient.Builder().build(); Request request = new Request.Builder().url(\u0026#34;http://localhost:8080/kafka-scenario/case/kafka-thread2-ping\u0026#34;).build(); Response response = null; try { response = client.newCall(request).execute(); } catch (IOException e) { } response.body().close(); return true; } return false; } } Sample codes only\n","title":"Kafka Poll And Invoke","url":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-kafka/"},{"content":"Kafka Poll And Invoke  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-kafka\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  usage 1.  public class ConsumerThread2 extends Thread { @Override public void run() { Properties consumerProperties = new Properties(); //...consumerProperties.put()  KafkaConsumer\u0026lt;String, String\u0026gt; consumer = new KafkaConsumer\u0026lt;\u0026gt;(consumerProperties); consumer.subscribe(topicPattern, new NoOpConsumerRebalanceListener()); while (true) { if (pollAndInvoke(consumer)) break; } consumer.close(); } @KafkaPollAndInvoke private boolean pollAndInvoke(KafkaConsumer\u0026lt;String, String\u0026gt; consumer) { try { Thread.sleep(1000); } catch (InterruptedException e) { } ConsumerRecords\u0026lt;String, String\u0026gt; records = consumer.poll(100); if (!records.isEmpty()) { OkHttpClient client = new OkHttpClient.Builder().build(); Request request = new Request.Builder().url(\u0026#34;http://localhost:8080/kafka-scenario/case/kafka-thread2-ping\u0026#34;).build(); Response response = null; try { response = client.newCall(request).execute(); } catch (IOException e) { } response.body().close(); return true; } return false; } } Sample codes only\n","title":"Kafka Poll And Invoke","url":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/application-toolkit-kafka/"},{"content":"Kafka Poll And Invoke  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-kafka\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  usage 1.  public class ConsumerThread2 extends Thread { @Override public void run() { Properties consumerProperties = new Properties(); //...consumerProperties.put()  KafkaConsumer\u0026lt;String, String\u0026gt; consumer = new KafkaConsumer\u0026lt;\u0026gt;(consumerProperties); consumer.subscribe(topicPattern, new NoOpConsumerRebalanceListener()); while (true) { if (pollAndInvoke(consumer)) break; } consumer.close(); } @KafkaPollAndInvoke private boolean pollAndInvoke(KafkaConsumer\u0026lt;String, String\u0026gt; consumer) { try { Thread.sleep(1000); } catch (InterruptedException e) { } ConsumerRecords\u0026lt;String, String\u0026gt; records = consumer.poll(100); if (!records.isEmpty()) { OkHttpClient client = new OkHttpClient.Builder().build(); Request request = new Request.Builder().url(\u0026#34;http://localhost:8080/kafka-scenario/case/kafka-thread2-ping\u0026#34;).build(); Response response = null; try { response = client.newCall(request).execute(); } catch (IOException e) { } response.body().close(); return true; } return false; } } Sample codes only\n","title":"Kafka Poll And Invoke","url":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/application-toolkit-kafka/"},{"content":"Kafka Poll And Invoke  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-kafka\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  usage 1.  public class ConsumerThread2 extends Thread { @Override public void run() { Properties consumerProperties = new Properties(); //...consumerProperties.put()  KafkaConsumer\u0026lt;String, String\u0026gt; consumer = new KafkaConsumer\u0026lt;\u0026gt;(consumerProperties); consumer.subscribe(topicPattern, new NoOpConsumerRebalanceListener()); while (true) { if (pollAndInvoke(consumer)) break; } consumer.close(); } @KafkaPollAndInvoke private boolean pollAndInvoke(KafkaConsumer\u0026lt;String, String\u0026gt; consumer) { try { Thread.sleep(1000); } catch (InterruptedException e) { } ConsumerRecords\u0026lt;String, String\u0026gt; records = consumer.poll(100); if (!records.isEmpty()) { OkHttpClient client = new OkHttpClient.Builder().build(); Request request = new Request.Builder().url(\u0026#34;http://localhost:8080/kafka-scenario/case/kafka-thread2-ping\u0026#34;).build(); Response response = null; try { response = client.newCall(request).execute(); } catch (IOException e) { } response.body().close(); return true; } return false; } } Sample codes only\n","title":"Kafka Poll And Invoke","url":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/application-toolkit-kafka/"},{"content":"Kafka reporter By default, the configuration option skywalking_agent.reporter_type is grpc, means that the skywalking agent will report the traces, metrics, logs etc. to SkyWalking OAP Server by gPRC protocol.\nAt the same time, SkyWalking also supports kafka-fetcher, so you can report traces, metrics, logs, etc. by kafka.\nBut the skywalking agent does not compile the kafka-reporter feature by default, you need to enable the it.\nSteps   Compile the skywalking agent with feature kafka-reporter.\nFor pecl:\npecl install skywalking_agent Enable the kafka reporter interactively:\n68 source files, building running: phpize Configuring for: PHP Api Version: 20220829 Zend Module Api No: 20220829 Zend Extension Api No: 420220829 enable cargo debug? [no] : enable kafka reporter? [no] : yes Or, build from sources:\nphpize ./configure --enable-kafka-reporter make make install   Config php.ini.\nSwitch to use kafka reporter.\n[skywalking_agent] extension = skywalking_agent.so skywalking_agent.reporter_type = kafka skywalking_agent.kafka_bootstrap_servers = 127.0.0.1:9092,127.0.0.2:9092,127.0.0.3:9092 If you want to custom the kafka reporter properties, you can specify it by JSON format:\nskywalking_agent.kafka_producer_config = {\u0026#34;delivery.timeout.ms\u0026#34;: \u0026#34;12000\u0026#34;}   ","title":"Kafka reporter","url":"/docs/skywalking-php/latest/en/reporter/kafka-reporter/"},{"content":"Kafka reporter By default, the configuration option skywalking_agent.reporter_type is grpc, means that the skywalking agent will report the traces, metrics, logs etc. to SkyWalking OAP Server by gPRC protocol.\nAt the same time, SkyWalking also supports kafka-fetcher, so you can report traces, metrics, logs, etc. by kafka.\nBut the skywalking agent does not compile the kafka-reporter feature by default, you need to enable the it.\nSteps   Compile the skywalking agent with feature kafka-reporter.\nFor pecl:\npecl install skywalking_agent Enable the kafka reporter interactively:\n68 source files, building running: phpize Configuring for: PHP Api Version: 20220829 Zend Module Api No: 20220829 Zend Extension Api No: 420220829 enable cargo debug? [no] : enable kafka reporter? [no] : yes Or, build from sources:\nphpize ./configure --enable-kafka-reporter make make install   Config php.ini.\nSwitch to use kafka reporter.\n[skywalking_agent] extension = skywalking_agent.so skywalking_agent.reporter_type = kafka skywalking_agent.kafka_bootstrap_servers = 127.0.0.1:9092,127.0.0.2:9092,127.0.0.3:9092 If you want to custom the kafka reporter properties, you can specify it by JSON format:\nskywalking_agent.kafka_producer_config = {\u0026#34;delivery.timeout.ms\u0026#34;: \u0026#34;12000\u0026#34;}   ","title":"Kafka reporter","url":"/docs/skywalking-php/next/en/reporter/kafka-reporter/"},{"content":"Kafka reporter By default, the configuration option skywalking_agent.reporter_type is grpc, means that the skywalking agent will report the traces, metrics, logs etc. to SkyWalking OAP Server by gPRC protocol.\nAt the same time, SkyWalking also supports kafka-fetcher, so you can report traces, metrics, logs, etc. by kafka.\nBut the skywalking agent does not compile the kafka-reporter feature by default, you need to enable the it.\nSteps   Compile the skywalking agent with feature kafka-reporter.\nFor pecl:\npecl install skywalking_agent Enable the kafka reporter interactively:\n68 source files, building running: phpize Configuring for: PHP Api Version: 20220829 Zend Module Api No: 20220829 Zend Extension Api No: 420220829 enable cargo debug? [no] : enable kafka reporter? [no] : yes Or, build from sources:\nphpize ./configure --enable-kafka-reporter make make install   Config php.ini.\nSwitch to use kafka reporter.\n[skywalking_agent] extension = skywalking_agent.so skywalking_agent.reporter_type = kafka skywalking_agent.kafka_bootstrap_servers = 127.0.0.1:9092,127.0.0.2:9092,127.0.0.3:9092 If you want to custom the kafka reporter properties, you can specify it by JSON format:\nskywalking_agent.kafka_producer_config = {\u0026#34;delivery.timeout.ms\u0026#34;: \u0026#34;12000\u0026#34;}   ","title":"Kafka reporter","url":"/docs/skywalking-php/v0.7.0/en/reporter/kafka-reporter/"},{"content":"Key Principle Introduce the key technical processes used in the SkyWalking Go Agent, to help the developers and end users understand how the agent works easier.\nMethod Interceptor Method interception is particularly important in SkyWalking Go, as it enables the creation of plugins. In SkyWalking Go, method interception mainly involves the following key points:\n Finding Method: Using AST to find method information in the target code to be enhanced. Modifying Methods: Enhancing the specified methods and embedding interceptor code. Saving and Compiling: Updating the modified files in the compilation arguments.  Finding Method When looking for methods, the SkyWalking Go Agent requires to search according to the provided compilation arguments, which mainly include the following two parts:\n Package information: Based on the package name provided by the arguments, the Agent can find the specific plugin. Go files: When a matching plugin is found, the Agent reads the .go files and uses AST to parse the method information from these source files. When the method information matches the method information required by the plugin for the interception, the agent would consider the method found.  Modifying Methods After finding the method, the SkyWalking Go Agent needs to modify the method implication and embed the interceptor code.\nChange Method Body When intercepting a method, the first thing to do is to modify the method and embed the template code. This code segment includes two method executions:\n Before method execution: Pass in the current method\u0026rsquo;s arguments, instances, and other information. After method execution: Using the defer method, intercept the result parameters after the code execution is completed.  Based on these two methods, the agent can intercept before and after method execution.\nIn order not to affect the line of code execution, this code segment will only be executed in the same line as the first statement in the method. This ensures that when an exception occurs in the framework code execution, the exact location can still be found without being affected by the enhanced code.\nWrite Delegator File After the agent enhances the method body, it needs to implement the above two methods and write them into a single file, called the delegator file. These two methods would do the following:\n Before method execution: Build by the template. Build the context for before and after interception, and pass the parameter information during execution to the interceptor in each plugin. After method execution: Build by the template. Pass the method return value to the interceptor and execute the method.  Copy Files After completing the delegator file, the agent would perform the following copy operations:\n Plugin Code: Copy the Go files containing the interceptors in the plugin to the same level directory as the current framework. Plugin Development API Code: Copy the operation APIs required by the interceptors in the plugin to the same level directory as the current framework, such as tracing.  After copying the files, they cannot be immediately added to the compilation parameters, because they may have the same name as the existing framework code. Therefore, we need to perform some rewriting operations, which include the following parts:\n Types: Rename created structures, interfaces, methods, and other types by adding a unified prefix. Static Methods: Add a prefix to non-instance methods. Static methods do not need to be rewritten since they have already been processed in the types. Variables: Add a prefix to global variables. It\u0026rsquo;s not necessary to add a prefix to variables inside methods because they can ensure no conflicts would arise and are helpful for debugging.  In the Tracing API, we can see several methods, such as:\nvar ( errParameter = operator.NewError(\u0026#34;parameter are nil\u0026#34;) ) func CreateLocalSpan(operationName string, opts ...SpanOption) (s Span, err error) type SpanOption interface { Apply(interface{}) } After performed rewriting operations, they would become:\nvar ( skywalkingOperatorVarTracingerrParameter = skywalkingOperatorStaticMethodOperatorNewError(\u0026#34;parameter are nil\u0026#34;) ) func skywalkingOperatorStaticMethodTracingCreateLocalSpan(operationName string, opts ...skywalkingOperatorTypeTracingSpanOption) (s skywalkingOperatorTypeTracingSpan, err error) type skywalkingOperatorTypeTracingSpanOption interface { Apply(interface{}) } Saving and Compiling After the above steps are completed, the agent needs to save the modified files and add them to the compilation parameters.\nAt this point, when the framework executes the enhanced method, it can have the following capabilities:\n Execute Plugin Code: Custom code can be embedded before and after the method execution, and real-time parameter information can be obtained. Operate Agent: By calling the Agent API, interaction with the Agent Core can be achieved, enabling functions such as distributed tracing.  Propagation Context SkyWalking uses a new and internal mechanism to propagate context(e.g. tracing context) instead of relying on go native context.Context. This reduces the requirement for the target codes.\nContext Propagation between Methods In the agent, it would enhance the g structure in the runtime package. The g structure in Golang represents the internal data of the current goroutine. By enhancing this structure and using the runtime.getg() method, we can obtain the enhanced data in the current structure in real-time.\nEnhancement includes the following steps:\n Add Attributes to g: Add a new field to the g struct, and value as interface{}. Export Methods: Export methods for real-time setting and getting of custom field values in the current goroutine through go:linkname. Import methods: In the Agent Core, import the setting and getting methods for custom fields.  Through these, the agent has a shared context in any place within the same goroutine, similar to Java\u0026rsquo;s Thread Local.\nContext Propagation between Goroutines Besides using g object as the in-goroutine context propagation, SkyWalking builds a mechanism to propagate context between Goroutines.\nWhen a new goroutine is started on an existing goroutine, the runtime.newproc1 method is called to create a new goroutine based on the existing one. The agent would do context-copy from the previous goroutine to the newly created goroutine. The new context in the goroutine only shares limited information to help continues tracing.\nThe specific operation process is as follows:\n Write the copy method: Create a method for copying data from the previous goroutine. Insert code into newproc1: Insert the defer code, intercept the g objects before and after the execution, and call the copy method to assign values to the custom fields' data.  Agent with Dependency Since SkyWalking Go Agent is based on compile-time enhancement, it cannot introduce third-party modules. For example, when SkyWalking Agent communicates with OAP, it needs to exchange data through the gRPC protocol. If the user does not introduce the gRPC module, it cannot be completed.\nDue to resolve this problem, users need to introduce relevant modules to complete the basic dependency functions. This is why import _ \u0026quot;github.com/apache/skywalking-go\u0026quot; is required. The main key modules that users currently need to introduce include:\n uuid: Used to generate UUIDs, mainly for TraceID generation. errors: To encapsulate error content. gRPC: The basic library used for communication between SkyWalking Go Agent and the Server. skywalking-goapi: The data protocol for communication between Agent and Server in SkyWalking.  Agent Core Copy To simplify the complexity of using Agent, the SkyWalking Go introduced by users only contains the user usage API and code import. The Agent Core code would be dynamically added during hybrid compilation, so when the Agent releases new features, users only need to upgrade the Agent enhancement program without modifying the references in the program.\nCode Import You can see a lot of imports.go files anywhere in the SkyWalking Go, such as imports.go in the root directory, but there is no actual code. This is because, during hybrid compilation, if the code to be compiled references other libraries, such as os, fmt, etc., they need to be referenced through the importcfg file during compilation.\nThe content of the importcfg file is shown below, which specifies the package dependency information required for all Go files to be compiled in the current package path.\npackagefile errors=/var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build2774248373/b006/_pkg_.a packagefile internal/itoa=/var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build2774248373/b027/_pkg_.a packagefile internal/oserror=/var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build2774248373/b035/_pkg_.a So when the file is copied and added to the compilation process, the relevant dependency libraries need to be declared in importcfg. Therefore, by predefining import in the project, the compiler can be forced to introduce the relevant libraries during compilation, thus completing the dynamic enhancement operation.\nPlugin with Agent Core As mentioned in the previous section, it is not possible to dynamically add dependencies between modules. Agent can only modify the importcfg file to reference dependencies if we are sure that the previous dependencies have already been loaded, but this is often impractical. For example, Agent cannot introduce dependencies from the plugin code into the Agent Core, because the plugin is unaware of the Agent\u0026rsquo;s existence. This raises a question: how can agent enable communication between plugins and Agent Core?\nCurrently, agent employ the following method: a global object is introduced in the runtime package, provided by Agent Core. When a plugin needs to interact with Agent Core, it simply searches for this global object from runtime package. The specific steps are as follows:\n Global object definition: Add a global variable when the runtime package is loaded and provide corresponding set and get methods. Set the variable when the Agent loads: When the Agent Core is copied and enhanced, import the method for setting the global variable and initialize the object in the global variable. Plugins: When the plugin is built, import the methods for reading the global variables and APIs. At this point, we can access the object set in Agent Core and use the defined interface for the plugin to access methods in Agent Core.  Limitation Since the communication between the plugin API and Agent Core is through an interface, and the plugin API is copied in each plugin, they can only transfer basic data types or any(interface{}) type. The reason is that when additional types are transferred, agent would be copied multiple times, so the types transferred in the plugin are not consistent with the types in Agent Core, as the types also need to be defined multiple times.\nTherefore, when communicating, they only pass structured data through any type, and when the Agent Core or plugin obtains the data, a type cast is simply required.\nDebugging Based on the introductions in the previous sections, both Agent Core and plugin code are dynamically copied/modified into the target package. So, how can we debug the program during development to identify issues?\nOur current approach consists of the following steps:\n Inform the source code location during flag: Enhance the debug parameters during compilation and inform the system path, for example: -toolexec \u0026quot;/path/to/agent -debug /path/to/code\u0026quot; Get the original file path: Find the absolute location of the source code of the file to be copied based on the rules. Introduce the //line directive: Add the //line directive to the copied target file to inform the compiler of the location of the original file after copying.  At this point, when the program is executed, developer can find the original file to be copied in the source code.\n","title":"Key Principle","url":"/docs/skywalking-go/latest/en/concepts-and-designs/key-principles/"},{"content":"Key Principle Introduce the key technical processes used in the SkyWalking Go Agent, to help the developers and end users understand how the agent works easier.\nMethod Interceptor Method interception is particularly important in SkyWalking Go, as it enables the creation of plugins. In SkyWalking Go, method interception mainly involves the following key points:\n Finding Method: Using AST to find method information in the target code to be enhanced. Modifying Methods: Enhancing the specified methods and embedding interceptor code. Saving and Compiling: Updating the modified files in the compilation arguments.  Finding Method When looking for methods, the SkyWalking Go Agent requires to search according to the provided compilation arguments, which mainly include the following two parts:\n Package information: Based on the package name provided by the arguments, the Agent can find the specific plugin. Go files: When a matching plugin is found, the Agent reads the .go files and uses AST to parse the method information from these source files. When the method information matches the method information required by the plugin for the interception, the agent would consider the method found.  Modifying Methods After finding the method, the SkyWalking Go Agent needs to modify the method implication and embed the interceptor code.\nChange Method Body When intercepting a method, the first thing to do is to modify the method and embed the template code. This code segment includes two method executions:\n Before method execution: Pass in the current method\u0026rsquo;s arguments, instances, and other information. After method execution: Using the defer method, intercept the result parameters after the code execution is completed.  Based on these two methods, the agent can intercept before and after method execution.\nIn order not to affect the line of code execution, this code segment will only be executed in the same line as the first statement in the method. This ensures that when an exception occurs in the framework code execution, the exact location can still be found without being affected by the enhanced code.\nWrite Delegator File After the agent enhances the method body, it needs to implement the above two methods and write them into a single file, called the delegator file. These two methods would do the following:\n Before method execution: Build by the template. Build the context for before and after interception, and pass the parameter information during execution to the interceptor in each plugin. After method execution: Build by the template. Pass the method return value to the interceptor and execute the method.  Copy Files After completing the delegator file, the agent would perform the following copy operations:\n Plugin Code: Copy the Go files containing the interceptors in the plugin to the same level directory as the current framework. Plugin Development API Code: Copy the operation APIs required by the interceptors in the plugin to the same level directory as the current framework, such as tracing.  After copying the files, they cannot be immediately added to the compilation parameters, because they may have the same name as the existing framework code. Therefore, we need to perform some rewriting operations, which include the following parts:\n Types: Rename created structures, interfaces, methods, and other types by adding a unified prefix. Static Methods: Add a prefix to non-instance methods. Static methods do not need to be rewritten since they have already been processed in the types. Variables: Add a prefix to global variables. It\u0026rsquo;s not necessary to add a prefix to variables inside methods because they can ensure no conflicts would arise and are helpful for debugging.  In the Tracing API, we can see several methods, such as:\nvar ( errParameter = operator.NewError(\u0026#34;parameter are nil\u0026#34;) ) func CreateLocalSpan(operationName string, opts ...SpanOption) (s Span, err error) type SpanOption interface { Apply(interface{}) } After performed rewriting operations, they would become:\nvar ( skywalkingOperatorVarTracingerrParameter = skywalkingOperatorStaticMethodOperatorNewError(\u0026#34;parameter are nil\u0026#34;) ) func skywalkingOperatorStaticMethodTracingCreateLocalSpan(operationName string, opts ...skywalkingOperatorTypeTracingSpanOption) (s skywalkingOperatorTypeTracingSpan, err error) type skywalkingOperatorTypeTracingSpanOption interface { Apply(interface{}) } Saving and Compiling After the above steps are completed, the agent needs to save the modified files and add them to the compilation parameters.\nAt this point, when the framework executes the enhanced method, it can have the following capabilities:\n Execute Plugin Code: Custom code can be embedded before and after the method execution, and real-time parameter information can be obtained. Operate Agent: By calling the Agent API, interaction with the Agent Core can be achieved, enabling functions such as distributed tracing.  Propagation Context SkyWalking uses a new and internal mechanism to propagate context(e.g. tracing context) instead of relying on go native context.Context. This reduces the requirement for the target codes.\nContext Propagation between Methods In the agent, it would enhance the g structure in the runtime package. The g structure in Golang represents the internal data of the current goroutine. By enhancing this structure and using the runtime.getg() method, we can obtain the enhanced data in the current structure in real-time.\nEnhancement includes the following steps:\n Add Attributes to g: Add a new field to the g struct, and value as interface{}. Export Methods: Export methods for real-time setting and getting of custom field values in the current goroutine through go:linkname. Import methods: In the Agent Core, import the setting and getting methods for custom fields.  Through these, the agent has a shared context in any place within the same goroutine, similar to Java\u0026rsquo;s Thread Local.\nContext Propagation between Goroutines Besides using g object as the in-goroutine context propagation, SkyWalking builds a mechanism to propagate context between Goroutines.\nWhen a new goroutine is started on an existing goroutine, the runtime.newproc1 method is called to create a new goroutine based on the existing one. The agent would do context-copy from the previous goroutine to the newly created goroutine. The new context in the goroutine only shares limited information to help continues tracing.\nThe specific operation process is as follows:\n Write the copy method: Create a method for copying data from the previous goroutine. Insert code into newproc1: Insert the defer code, intercept the g objects before and after the execution, and call the copy method to assign values to the custom fields' data.  Agent with Dependency Since SkyWalking Go Agent is based on compile-time enhancement, it cannot introduce third-party modules. For example, when SkyWalking Agent communicates with OAP, it needs to exchange data through the gRPC protocol. If the user does not introduce the gRPC module, it cannot be completed.\nDue to resolve this problem, users need to introduce relevant modules to complete the basic dependency functions. This is why import _ \u0026quot;github.com/apache/skywalking-go\u0026quot; is required. The main key modules that users currently need to introduce include:\n uuid: Used to generate UUIDs, mainly for TraceID generation. errors: To encapsulate error content. gRPC: The basic library used for communication between SkyWalking Go Agent and the Server. skywalking-goapi: The data protocol for communication between Agent and Server in SkyWalking.  Agent Core Copy To simplify the complexity of using Agent, the SkyWalking Go introduced by users only contains the user usage API and code import. The Agent Core code would be dynamically added during hybrid compilation, so when the Agent releases new features, users only need to upgrade the Agent enhancement program without modifying the references in the program.\nCode Import You can see a lot of imports.go files anywhere in the SkyWalking Go, such as imports.go in the root directory, but there is no actual code. This is because, during hybrid compilation, if the code to be compiled references other libraries, such as os, fmt, etc., they need to be referenced through the importcfg file during compilation.\nThe content of the importcfg file is shown below, which specifies the package dependency information required for all Go files to be compiled in the current package path.\npackagefile errors=/var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build2774248373/b006/_pkg_.a packagefile internal/itoa=/var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build2774248373/b027/_pkg_.a packagefile internal/oserror=/var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build2774248373/b035/_pkg_.a So when the file is copied and added to the compilation process, the relevant dependency libraries need to be declared in importcfg. Therefore, by predefining import in the project, the compiler can be forced to introduce the relevant libraries during compilation, thus completing the dynamic enhancement operation.\nPlugin with Agent Core As mentioned in the previous section, it is not possible to dynamically add dependencies between modules. Agent can only modify the importcfg file to reference dependencies if we are sure that the previous dependencies have already been loaded, but this is often impractical. For example, Agent cannot introduce dependencies from the plugin code into the Agent Core, because the plugin is unaware of the Agent\u0026rsquo;s existence. This raises a question: how can agent enable communication between plugins and Agent Core?\nCurrently, agent employ the following method: a global object is introduced in the runtime package, provided by Agent Core. When a plugin needs to interact with Agent Core, it simply searches for this global object from runtime package. The specific steps are as follows:\n Global object definition: Add a global variable when the runtime package is loaded and provide corresponding set and get methods. Set the variable when the Agent loads: When the Agent Core is copied and enhanced, import the method for setting the global variable and initialize the object in the global variable. Plugins: When the plugin is built, import the methods for reading the global variables and APIs. At this point, we can access the object set in Agent Core and use the defined interface for the plugin to access methods in Agent Core.  Limitation Since the communication between the plugin API and Agent Core is through an interface, and the plugin API is copied in each plugin, they can only transfer basic data types or any(interface{}) type. The reason is that when additional types are transferred, agent would be copied multiple times, so the types transferred in the plugin are not consistent with the types in Agent Core, as the types also need to be defined multiple times.\nTherefore, when communicating, they only pass structured data through any type, and when the Agent Core or plugin obtains the data, a type cast is simply required.\nDebugging Based on the introductions in the previous sections, both Agent Core and plugin code are dynamically copied/modified into the target package. So, how can we debug the program during development to identify issues?\nOur current approach consists of the following steps:\n Inform the source code location during flag: Enhance the debug parameters during compilation and inform the system path, for example: -toolexec \u0026quot;/path/to/agent -debug /path/to/code\u0026quot; Get the original file path: Find the absolute location of the source code of the file to be copied based on the rules. Introduce the //line directive: Add the //line directive to the copied target file to inform the compiler of the location of the original file after copying.  At this point, when the program is executed, developer can find the original file to be copied in the source code.\n","title":"Key Principle","url":"/docs/skywalking-go/next/en/concepts-and-designs/key-principles/"},{"content":"Key Principle Introduce the key technical processes used in the SkyWalking Go Agent, to help the developers and end users understand how the agent works easier.\nMethod Interceptor Method interception is particularly important in SkyWalking Go, as it enables the creation of plugins. In SkyWalking Go, method interception mainly involves the following key points:\n Finding Method: Using AST to find method information in the target code to be enhanced. Modifying Methods: Enhancing the specified methods and embedding interceptor code. Saving and Compiling: Updating the modified files in the compilation arguments.  Finding Method When looking for methods, the SkyWalking Go Agent requires to search according to the provided compilation arguments, which mainly include the following two parts:\n Package information: Based on the package name provided by the arguments, the Agent can find the specific plugin. Go files: When a matching plugin is found, the Agent reads the .go files and uses AST to parse the method information from these source files. When the method information matches the method information required by the plugin for the interception, the agent would consider the method found.  Modifying Methods After finding the method, the SkyWalking Go Agent needs to modify the method implication and embed the interceptor code.\nChange Method Body When intercepting a method, the first thing to do is to modify the method and embed the template code. This code segment includes two method executions:\n Before method execution: Pass in the current method\u0026rsquo;s arguments, instances, and other information. After method execution: Using the defer method, intercept the result parameters after the code execution is completed.  Based on these two methods, the agent can intercept before and after method execution.\nIn order not to affect the line of code execution, this code segment will only be executed in the same line as the first statement in the method. This ensures that when an exception occurs in the framework code execution, the exact location can still be found without being affected by the enhanced code.\nWrite Delegator File After the agent enhances the method body, it needs to implement the above two methods and write them into a single file, called the delegator file. These two methods would do the following:\n Before method execution: Build by the template. Build the context for before and after interception, and pass the parameter information during execution to the interceptor in each plugin. After method execution: Build by the template. Pass the method return value to the interceptor and execute the method.  Copy Files After completing the delegator file, the agent would perform the following copy operations:\n Plugin Code: Copy the Go files containing the interceptors in the plugin to the same level directory as the current framework. Plugin Development API Code: Copy the operation APIs required by the interceptors in the plugin to the same level directory as the current framework, such as tracing.  After copying the files, they cannot be immediately added to the compilation parameters, because they may have the same name as the existing framework code. Therefore, we need to perform some rewriting operations, which include the following parts:\n Types: Rename created structures, interfaces, methods, and other types by adding a unified prefix. Static Methods: Add a prefix to non-instance methods. Static methods do not need to be rewritten since they have already been processed in the types. Variables: Add a prefix to global variables. It\u0026rsquo;s not necessary to add a prefix to variables inside methods because they can ensure no conflicts would arise and are helpful for debugging.  In the Tracing API, we can see several methods, such as:\nvar ( errParameter = operator.NewError(\u0026#34;parameter are nil\u0026#34;) ) func CreateLocalSpan(operationName string, opts ...SpanOption) (s Span, err error) type SpanOption interface { Apply(interface{}) } After performed rewriting operations, they would become:\nvar ( skywalkingOperatorVarTracingerrParameter = skywalkingOperatorStaticMethodOperatorNewError(\u0026#34;parameter are nil\u0026#34;) ) func skywalkingOperatorStaticMethodTracingCreateLocalSpan(operationName string, opts ...skywalkingOperatorTypeTracingSpanOption) (s skywalkingOperatorTypeTracingSpan, err error) type skywalkingOperatorTypeTracingSpanOption interface { Apply(interface{}) } Saving and Compiling After the above steps are completed, the agent needs to save the modified files and add them to the compilation parameters.\nAt this point, when the framework executes the enhanced method, it can have the following capabilities:\n Execute Plugin Code: Custom code can be embedded before and after the method execution, and real-time parameter information can be obtained. Operate Agent: By calling the Agent API, interaction with the Agent Core can be achieved, enabling functions such as distributed tracing.  Propagation Context SkyWalking uses a new and internal mechanism to propagate context(e.g. tracing context) instead of relying on go native context.Context. This reduces the requirement for the target codes.\nContext Propagation between Methods In the agent, it would enhance the g structure in the runtime package. The g structure in Golang represents the internal data of the current goroutine. By enhancing this structure and using the runtime.getg() method, we can obtain the enhanced data in the current structure in real-time.\nEnhancement includes the following steps:\n Add Attributes to g: Add a new field to the g struct, and value as interface{}. Export Methods: Export methods for real-time setting and getting of custom field values in the current goroutine through go:linkname. Import methods: In the Agent Core, import the setting and getting methods for custom fields.  Through these, the agent has a shared context in any place within the same goroutine, similar to Java\u0026rsquo;s Thread Local.\nContext Propagation between Goroutines Besides using g object as the in-goroutine context propagation, SkyWalking builds a mechanism to propagate context between Goroutines.\nWhen a new goroutine is started on an existing goroutine, the runtime.newproc1 method is called to create a new goroutine based on the existing one. The agent would do context-copy from the previous goroutine to the newly created goroutine. The new context in the goroutine only shares limited information to help continues tracing.\nThe specific operation process is as follows:\n Write the copy method: Create a method for copying data from the previous goroutine. Insert code into newproc1: Insert the defer code, intercept the g objects before and after the execution, and call the copy method to assign values to the custom fields' data.  Agent with Dependency Since SkyWalking Go Agent is based on compile-time enhancement, it cannot introduce third-party modules. For example, when SkyWalking Agent communicates with OAP, it needs to exchange data through the gRPC protocol. If the user does not introduce the gRPC module, it cannot be completed.\nDue to resolve this problem, users need to introduce relevant modules to complete the basic dependency functions. This is why import _ \u0026quot;github.com/apache/skywalking-go\u0026quot; is required. The main key modules that users currently need to introduce include:\n uuid: Used to generate UUIDs, mainly for TraceID generation. errors: To encapsulate error content. gRPC: The basic library used for communication between SkyWalking Go Agent and the Server. skywalking-goapi: The data protocol for communication between Agent and Server in SkyWalking.  Agent Core Copy To simplify the complexity of using Agent, the SkyWalking Go introduced by users only contains the user usage API and code import. The Agent Core code would be dynamically added during hybrid compilation, so when the Agent releases new features, users only need to upgrade the Agent enhancement program without modifying the references in the program.\nCode Import You can see a lot of imports.go files anywhere in the SkyWalking Go, such as imports.go in the root directory, but there is no actual code. This is because, during hybrid compilation, if the code to be compiled references other libraries, such as os, fmt, etc., they need to be referenced through the importcfg file during compilation.\nThe content of the importcfg file is shown below, which specifies the package dependency information required for all Go files to be compiled in the current package path.\npackagefile errors=/var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build2774248373/b006/_pkg_.a packagefile internal/itoa=/var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build2774248373/b027/_pkg_.a packagefile internal/oserror=/var/folders/wz/s5m922z15vz4fjhf5l4458xm0000gn/T/go-build2774248373/b035/_pkg_.a So when the file is copied and added to the compilation process, the relevant dependency libraries need to be declared in importcfg. Therefore, by predefining import in the project, the compiler can be forced to introduce the relevant libraries during compilation, thus completing the dynamic enhancement operation.\nPlugin with Agent Core As mentioned in the previous section, it is not possible to dynamically add dependencies between modules. Agent can only modify the importcfg file to reference dependencies if we are sure that the previous dependencies have already been loaded, but this is often impractical. For example, Agent cannot introduce dependencies from the plugin code into the Agent Core, because the plugin is unaware of the Agent\u0026rsquo;s existence. This raises a question: how can agent enable communication between plugins and Agent Core?\nCurrently, agent employ the following method: a global object is introduced in the runtime package, provided by Agent Core. When a plugin needs to interact with Agent Core, it simply searches for this global object from runtime package. The specific steps are as follows:\n Global object definition: Add a global variable when the runtime package is loaded and provide corresponding set and get methods. Set the variable when the Agent loads: When the Agent Core is copied and enhanced, import the method for setting the global variable and initialize the object in the global variable. Plugins: When the plugin is built, import the methods for reading the global variables and APIs. At this point, we can access the object set in Agent Core and use the defined interface for the plugin to access methods in Agent Core.  Limitation Since the communication between the plugin API and Agent Core is through an interface, and the plugin API is copied in each plugin, they can only transfer basic data types or any(interface{}) type. The reason is that when additional types are transferred, agent would be copied multiple times, so the types transferred in the plugin are not consistent with the types in Agent Core, as the types also need to be defined multiple times.\nTherefore, when communicating, they only pass structured data through any type, and when the Agent Core or plugin obtains the data, a type cast is simply required.\nDebugging Based on the introductions in the previous sections, both Agent Core and plugin code are dynamically copied/modified into the target package. So, how can we debug the program during development to identify issues?\nOur current approach consists of the following steps:\n Inform the source code location during flag: Enhance the debug parameters during compilation and inform the system path, for example: -toolexec \u0026quot;/path/to/agent -debug /path/to/code\u0026quot; Get the original file path: Find the absolute location of the source code of the file to be copied based on the rules. Introduce the //line directive: Add the //line directive to the copied target file to inform the compiler of the location of the original file after copying.  At this point, when the program is executed, developer can find the original file to be copied in the source code.\n","title":"Key Principle","url":"/docs/skywalking-go/v0.4.0/en/concepts-and-designs/key-principles/"},{"content":"Kubernetes (K8s) monitoring SkyWalking leverages K8s kube-state-metrics (KSM) and cAdvisor for collecting metrics data from K8s. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. This feature requires authorizing the OAP Server to access K8s\u0026rsquo;s API Server.\nData flow  K8s kube-state-metrics and cAdvisor collect metrics data from K8s. OpenTelemetry Collector fetches metrics from kube-state-metrics and cAdvisor via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server access to K8s\u0026rsquo;s API Server gets meta info and parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup kube-state-metric. cAdvisor is integrated into kubelet by default. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector for K8s, refer to here. For a quick start, we have provided a complete example of configuration and recommended version; you can refer to showcase. Config SkyWalking OpenTelemetry receiver.  Kubernetes Cluster Monitoring K8s cluster monitoring provides monitoring of the status and resources of the whole cluster and each node. K8s cluster as a Service in OAP, K8s node as an Instance in OAP, and land on the Layer: K8S.\nKubernetes Cluster Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Node Total  k8s_cluster_node_total The number of nodes K8s kube-state-metrics   Namespace Total  k8s_cluster_namespace_total The number of namespaces K8s kube-state-metrics   Deployment Total  k8s_cluster_deployment_total The number of deployments K8s kube-state-metrics   StatefulSet Total  k8s_cluster_statefulset_total The number of statefulsets K8s kube-state-metrics   DaemonSet Total  k8s_cluster_daemonset_total The number of daemonsets K8s kube-state-metrics   Service Total  k8s_cluster_service_total The number of services K8s kube-state-metrics   Pod Total  k8s_cluster_pod_total The number of pods K8s kube-state-metrics   Container Total  k8s_cluster_container_total The number of containers K8s kube-state-metrics   CPU Resources m k8s_cluster_cpu_cores\nk8s_cluster_cpu_cores_requests\nk8s_cluster_cpu_cores_limits\nk8s_cluster_cpu_cores_allocatable The capacity and the Requests / Limits / Allocatable of the CPU K8s kube-state-metrics   Memory Resources Gi k8s_cluster_memory_total\nk8s_cluster_memory_requests\nk8s_cluster_memory_limits\nk8s_cluster_memory_allocatable The capacity and the Requests / Limits / Allocatable of the memory K8s kube-state-metrics   Storage Resources Gi k8s_cluster_storage_total\nk8s_cluster_storage_allocatable The capacity and allocatable of the storage K8s kube-state-metrics   Node Status  k8s_cluster_node_status The current status of the nodes K8s kube-state-metrics   Deployment Status  k8s_cluster_deployment_status The current status of the deployment K8s kube-state-metrics   Deployment Spec Replicas  k8s_cluster_deployment_spec_replicas The number of desired pods for a deployment K8s kube-state-metrics   Service Status  k8s_cluster_service_pod_status The services current status, depending on the related pods' status K8s kube-state-metrics   Pod Status Not Running  k8s_cluster_pod_status_not_running The pods which are not running in the current phase K8s kube-state-metrics   Pod Status Waiting  k8s_cluster_pod_status_waiting The pods and containers which are currently in the waiting status, with reasons shown K8s kube-state-metrics   Pod Status Terminated  k8s_cluster_container_status_terminated The pods and containers which are currently in the terminated status, with reasons shown K8s kube-state-metrics    Kubernetes Cluster Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Pod Total  k8s_node_pod_total The number of pods in this node K8s kube-state-metrics   Node Status  k8s_node_node_status The current status of this node K8s kube-state-metrics   CPU Resources m k8s_node_cpu_cores\nk8s_node_cpu_cores_allocatable\nk8s_node_cpu_cores_requests\nk8s_node_cpu_cores_limits The capacity and the requests / Limits / Allocatable of the CPU K8s kube-state-metrics   Memory Resources Gi k8s_node_memory_total\nk8s_node_memory_allocatable\nk8s_node_memory_requests\nk8s_node_memory_limits The capacity and the requests / Limits / Allocatable of the memory K8s kube-state-metrics   Storage Resources Gi k8s_node_storage_total\nk8s_node_storage_allocatable The capacity and allocatable of the storage K8s kube-state-metrics   CPU Usage m k8s_node_cpu_usage The total usage of the CPU core, if there are 2 cores the maximum usage is 2000m cAdvisor   Memory Usage Gi k8s_node_memory_usage The totaly memory usage cAdvisor   Network I/O KB/s k8s_node_network_receive\nk8s_node_network_transmit The network receive and transmit cAdvisor    Kubernetes Service Monitoring K8s Service Monitoring provides observabilities into service status and resources from Kubernetes. K8s Service as a Service in OAP and land on the Layer: K8S_SERVICE.\nKubernetes Service Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Service Pod Total  k8s_service_pod_total The number of pods K8s kube-state-metrics   Service Pod Status  k8s_service_pod_status The current status of pods K8s kube-state-metrics   Service CPU Resources m k8s_service_cpu_cores_requests\nk8s_service_cpu_cores_limits The CPU resources requests / Limits of this service K8s kube-state-metrics   Service Memory Resources MB k8s_service_memory_requests\nk8s_service_memory_limits The memory resources requests / Limits of this service K8s kube-state-metrics   Pod CPU Usage m k8s_service_pod_cpu_usage The CPU resources total usage of pods cAdvisor   Pod Memory Usage MB k8s_service_pod_memory_usage The memory resources total usage of pods cAdvisor   Pod Waiting  k8s_service_pod_status_waiting The pods and containers which are currently in the waiting status, with reasons shown K8s kube-state-metrics   Pod Terminated  k8s_service_pod_status_terminated The pods and containers which are currently in the terminated status, with reasons shown K8s kube-state-metrics   Pod Restarts  k8s_service_pod_status_restarts_total The number of per container restarts related to the pods K8s kube-state-metrics    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/k8s/k8s-cluster.yaml,/config/otel-rules/k8s/k8s-node.yaml, /config/otel-rules/k8s/k8s-service.yaml. The K8s Cluster dashboard panel configurations are found in /config/ui-initialized-templates/k8s. The K8s Service dashboard panel configurations are found in /config/ui-initialized-templates/k8s_service.\n","title":"Kubernetes (K8s) monitoring","url":"/docs/main/latest/en/setup/backend/backend-k8s-monitoring/"},{"content":"Kubernetes (K8s) monitoring Kubernetes is an open-source container-orchestration system for automating computer application deployment, scaling, and management. It was originally designed by Google and is now maintained by the Cloud Native Computing Foundation. It aims to provide a \u0026ldquo;platform for automating deployment, scaling, and operations of application containers across clusters of hosts\u0026rdquo;. It works with a range of container tools, including Docker.\nNowadays, Kubernetes is the fundamental infrastructure for cloud native applications. SkyWalking provides the following ways to monitor deployments on Kubernetes.\n Use kube-state-metrics (KSM) and cAdvisor to collect metrics of Kubernetes resources, such as CPU, service, pod, and node. Read kube-state-metrics and cAdvisor setup guide for more details. Rover is a SkyWalking native eBPF agent to collect network Access Logs to support topology-aware and metrics analysis. Meanwhile, due to the power of eBPF, it could profile running services written by C++, Rust, Golang, etc. Read Rover setup guide for more details.  SkyWalking deeply integrates with Kubernetes to help users understand the status of their applications on Kubernetes. Cillium with Hubble is in our v10 plan.\n","title":"Kubernetes (K8s) monitoring","url":"/docs/main/next/en/setup/backend/backend-k8s-monitoring/"},{"content":"Kubernetes (K8s) monitoring SkyWalking leverages K8s kube-state-metrics (KSM) and cAdvisor for collecting metrics data from K8s. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. This feature requires authorizing the OAP Server to access K8s\u0026rsquo;s API Server.\nData flow  K8s kube-state-metrics and cAdvisor collect metrics data from K8s. OpenTelemetry Collector fetches metrics from kube-state-metrics and cAdvisor via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via the OpenCensus GRPC Exporter. The SkyWalking OAP Server access to K8s\u0026rsquo;s API Server gets meta info and parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup kube-state-metric. cAdvisor is integrated into kubelet by default. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector for K8s, refer to here. For a quick start, we have provided a complete example of configuration and recommended version; you can refer to showcase. Config SkyWalking OpenTelemetry receiver.  Kubernetes Cluster Monitoring K8s cluster monitoring provides monitoring of the status and resources of the whole cluster and each node. K8s cluster as a Service in OAP, K8s node as an Instance in OAP, and land on the Layer: K8S.\nKubernetes Cluster Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Node Total  k8s_cluster_node_total The number of nodes K8s kube-state-metrics   Namespace Total  k8s_cluster_namespace_total The number of namespaces K8s kube-state-metrics   Deployment Total  k8s_cluster_deployment_total The number of deployments K8s kube-state-metrics   Service Total  k8s_cluster_service_total The number of services K8s kube-state-metrics   Pod Total  k8s_cluster_pod_total The number of pods K8s kube-state-metrics   Container Total  k8s_cluster_container_total The number of containers K8s kube-state-metrics   CPU Resources m k8s_cluster_cpu_cores\nk8s_cluster_cpu_cores_requests\nk8s_cluster_cpu_cores_limits\nk8s_cluster_cpu_cores_allocatable The capacity and the Requests / Limits / Allocatable of the CPU K8s kube-state-metrics   Memory Resources Gi k8s_cluster_memory_total\nk8s_cluster_memory_requests\nk8s_cluster_memory_limits\nk8s_cluster_memory_allocatable The capacity and the Requests / Limits / Allocatable of the memory K8s kube-state-metrics   Storage Resources Gi k8s_cluster_storage_total\nk8s_cluster_storage_allocatable The capacity and allocatable of the storage K8s kube-state-metrics   Node Status  k8s_cluster_node_status The current status of the nodes K8s kube-state-metrics   Deployment Status  k8s_cluster_deployment_status The current status of the deployment K8s kube-state-metrics   Deployment Spec Replicas  k8s_cluster_deployment_spec_replicas The number of desired pods for a deployment K8s kube-state-metrics   Service Status  k8s_cluster_service_pod_status The services current status, depending on the related pods' status K8s kube-state-metrics   Pod Status Not Running  k8s_cluster_pod_status_not_running The pods which are not running in the current phase K8s kube-state-metrics   Pod Status Waiting  k8s_cluster_pod_status_waiting The pods and containers which are currently in the waiting status, with reasons shown K8s kube-state-metrics   Pod Status Terminated  k8s_cluster_container_status_terminated The pods and containers which are currently in the terminated status, with reasons shown K8s kube-state-metrics    Kubernetes Cluster Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Pod Total  k8s_node_pod_total The number of pods in this node K8s kube-state-metrics   Node Status  k8s_node_node_status The current status of this node K8s kube-state-metrics   CPU Resources m k8s_node_cpu_cores\nk8s_node_cpu_cores_allocatable\nk8s_node_cpu_cores_requests\nk8s_node_cpu_cores_limits The capacity and the requests / Limits / Allocatable of the CPU K8s kube-state-metrics   Memory Resources Gi k8s_node_memory_total\nk8s_node_memory_allocatable\nk8s_node_memory_requests\nk8s_node_memory_limits The capacity and the requests / Limits / Allocatable of the memory K8s kube-state-metrics   Storage Resources Gi k8s_node_storage_total\nk8s_node_storage_allocatable The capacity and allocatable of the storage K8s kube-state-metrics   CPU Usage m k8s_node_cpu_usage The total usage of the CPU core, if there are 2 cores the maximum usage is 2000m cAdvisor   Memory Usage Gi k8s_node_memory_usage The totaly memory usage cAdvisor   Network I/O KB/s k8s_node_network_receive\nk8s_node_network_transmit The network receive and transmit cAdvisor    Kubernetes Service Monitoring K8s Service Monitoring provides observabilities into service status and resources from Kubernetes. K8s Service as a Service in OAP and land on the Layer: K8S_SERVICE.\nKubernetes Service Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Service Pod Total  k8s_service_pod_total The number of pods K8s kube-state-metrics   Service Pod Status  k8s_service_pod_status The current status of pods K8s kube-state-metrics   Service CPU Resources m k8s_service_cpu_cores_requests\nk8s_service_cpu_cores_limits The CPU resources requests / Limits of this service K8s kube-state-metrics   Service Memory Resources MB k8s_service_memory_requests\nk8s_service_memory_limits The memory resources requests / Limits of this service K8s kube-state-metrics   Pod CPU Usage m k8s_service_pod_cpu_usage The CPU resources total usage of pods cAdvisor   Pod Memory Usage MB k8s_service_pod_memory_usage The memory resources total usage of pods cAdvisor   Pod Waiting  k8s_service_pod_status_waiting The pods and containers which are currently in the waiting status, with reasons shown K8s kube-state-metrics   Pod Terminated  k8s_service_pod_status_terminated The pods and containers which are currently in the terminated status, with reasons shown K8s kube-state-metrics   Pod Restarts  k8s_service_pod_status_restarts_total The number of per container restarts related to the pods K8s kube-state-metrics    Customizations You can customize your own metrics/expression/dashboard panel.\nThe metrics definition and expression rules are found in /config/otel-oc-rules/k8s-cluster.yaml,/config/otel-oc-rules/k8s-node.yaml, /config/otel-oc-rules/k8s-service.yaml.\nThe K8s Cluster dashboard panel configurations are found in /config/ui-initialized-templates/k8s. The K8s Service dashboard panel configurations are found in /config/ui-initialized-templates/k8s_service.\n","title":"Kubernetes (K8s) monitoring","url":"/docs/main/v9.1.0/en/setup/backend/backend-k8s-monitoring/"},{"content":"Kubernetes (K8s) monitoring SkyWalking leverages K8s kube-state-metrics (KSM) and cAdvisor for collecting metrics data from K8s. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. This feature requires authorizing the OAP Server to access K8s\u0026rsquo;s API Server.\nData flow  K8s kube-state-metrics and cAdvisor collect metrics data from K8s. OpenTelemetry Collector fetches metrics from kube-state-metrics and cAdvisor via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via the OpenCensus gRPC Exporter or OpenTelemetry gRPC exporter. The SkyWalking OAP Server access to K8s\u0026rsquo;s API Server gets meta info and parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup kube-state-metric. cAdvisor is integrated into kubelet by default. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector for K8s, refer to here. For a quick start, we have provided a complete example of configuration and recommended version; you can refer to showcase. Config SkyWalking OpenTelemetry receiver.  Kubernetes Cluster Monitoring K8s cluster monitoring provides monitoring of the status and resources of the whole cluster and each node. K8s cluster as a Service in OAP, K8s node as an Instance in OAP, and land on the Layer: K8S.\nKubernetes Cluster Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Node Total  k8s_cluster_node_total The number of nodes K8s kube-state-metrics   Namespace Total  k8s_cluster_namespace_total The number of namespaces K8s kube-state-metrics   Deployment Total  k8s_cluster_deployment_total The number of deployments K8s kube-state-metrics   Service Total  k8s_cluster_service_total The number of services K8s kube-state-metrics   Pod Total  k8s_cluster_pod_total The number of pods K8s kube-state-metrics   Container Total  k8s_cluster_container_total The number of containers K8s kube-state-metrics   CPU Resources m k8s_cluster_cpu_cores\nk8s_cluster_cpu_cores_requests\nk8s_cluster_cpu_cores_limits\nk8s_cluster_cpu_cores_allocatable The capacity and the Requests / Limits / Allocatable of the CPU K8s kube-state-metrics   Memory Resources Gi k8s_cluster_memory_total\nk8s_cluster_memory_requests\nk8s_cluster_memory_limits\nk8s_cluster_memory_allocatable The capacity and the Requests / Limits / Allocatable of the memory K8s kube-state-metrics   Storage Resources Gi k8s_cluster_storage_total\nk8s_cluster_storage_allocatable The capacity and allocatable of the storage K8s kube-state-metrics   Node Status  k8s_cluster_node_status The current status of the nodes K8s kube-state-metrics   Deployment Status  k8s_cluster_deployment_status The current status of the deployment K8s kube-state-metrics   Deployment Spec Replicas  k8s_cluster_deployment_spec_replicas The number of desired pods for a deployment K8s kube-state-metrics   Service Status  k8s_cluster_service_pod_status The services current status, depending on the related pods' status K8s kube-state-metrics   Pod Status Not Running  k8s_cluster_pod_status_not_running The pods which are not running in the current phase K8s kube-state-metrics   Pod Status Waiting  k8s_cluster_pod_status_waiting The pods and containers which are currently in the waiting status, with reasons shown K8s kube-state-metrics   Pod Status Terminated  k8s_cluster_container_status_terminated The pods and containers which are currently in the terminated status, with reasons shown K8s kube-state-metrics    Kubernetes Cluster Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Pod Total  k8s_node_pod_total The number of pods in this node K8s kube-state-metrics   Node Status  k8s_node_node_status The current status of this node K8s kube-state-metrics   CPU Resources m k8s_node_cpu_cores\nk8s_node_cpu_cores_allocatable\nk8s_node_cpu_cores_requests\nk8s_node_cpu_cores_limits The capacity and the requests / Limits / Allocatable of the CPU K8s kube-state-metrics   Memory Resources Gi k8s_node_memory_total\nk8s_node_memory_allocatable\nk8s_node_memory_requests\nk8s_node_memory_limits The capacity and the requests / Limits / Allocatable of the memory K8s kube-state-metrics   Storage Resources Gi k8s_node_storage_total\nk8s_node_storage_allocatable The capacity and allocatable of the storage K8s kube-state-metrics   CPU Usage m k8s_node_cpu_usage The total usage of the CPU core, if there are 2 cores the maximum usage is 2000m cAdvisor   Memory Usage Gi k8s_node_memory_usage The totaly memory usage cAdvisor   Network I/O KB/s k8s_node_network_receive\nk8s_node_network_transmit The network receive and transmit cAdvisor    Kubernetes Service Monitoring K8s Service Monitoring provides observabilities into service status and resources from Kubernetes. K8s Service as a Service in OAP and land on the Layer: K8S_SERVICE.\nKubernetes Service Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Service Pod Total  k8s_service_pod_total The number of pods K8s kube-state-metrics   Service Pod Status  k8s_service_pod_status The current status of pods K8s kube-state-metrics   Service CPU Resources m k8s_service_cpu_cores_requests\nk8s_service_cpu_cores_limits The CPU resources requests / Limits of this service K8s kube-state-metrics   Service Memory Resources MB k8s_service_memory_requests\nk8s_service_memory_limits The memory resources requests / Limits of this service K8s kube-state-metrics   Pod CPU Usage m k8s_service_pod_cpu_usage The CPU resources total usage of pods cAdvisor   Pod Memory Usage MB k8s_service_pod_memory_usage The memory resources total usage of pods cAdvisor   Pod Waiting  k8s_service_pod_status_waiting The pods and containers which are currently in the waiting status, with reasons shown K8s kube-state-metrics   Pod Terminated  k8s_service_pod_status_terminated The pods and containers which are currently in the terminated status, with reasons shown K8s kube-state-metrics   Pod Restarts  k8s_service_pod_status_restarts_total The number of per container restarts related to the pods K8s kube-state-metrics    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/k8s-cluster.yaml,/config/otel-rules/k8s-node.yaml, /config/otel-rules/k8s-service.yaml. The K8s Cluster dashboard panel configurations are found in /config/ui-initialized-templates/k8s. The K8s Service dashboard panel configurations are found in /config/ui-initialized-templates/k8s_service.\n","title":"Kubernetes (K8s) monitoring","url":"/docs/main/v9.2.0/en/setup/backend/backend-k8s-monitoring/"},{"content":"Kubernetes (K8s) monitoring SkyWalking leverages K8s kube-state-metrics (KSM) and cAdvisor for collecting metrics data from K8s. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. This feature requires authorizing the OAP Server to access K8s\u0026rsquo;s API Server.\nData flow  K8s kube-state-metrics and cAdvisor collect metrics data from K8s. OpenTelemetry Collector fetches metrics from kube-state-metrics and cAdvisor via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via the OpenCensus gRPC Exporter or OpenTelemetry gRPC exporter. The SkyWalking OAP Server access to K8s\u0026rsquo;s API Server gets meta info and parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup kube-state-metric. cAdvisor is integrated into kubelet by default. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector for K8s, refer to here. For a quick start, we have provided a complete example of configuration and recommended version; you can refer to showcase. Config SkyWalking OpenTelemetry receiver.  Kubernetes Cluster Monitoring K8s cluster monitoring provides monitoring of the status and resources of the whole cluster and each node. K8s cluster as a Service in OAP, K8s node as an Instance in OAP, and land on the Layer: K8S.\nKubernetes Cluster Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Node Total  k8s_cluster_node_total The number of nodes K8s kube-state-metrics   Namespace Total  k8s_cluster_namespace_total The number of namespaces K8s kube-state-metrics   Deployment Total  k8s_cluster_deployment_total The number of deployments K8s kube-state-metrics   Service Total  k8s_cluster_service_total The number of services K8s kube-state-metrics   Pod Total  k8s_cluster_pod_total The number of pods K8s kube-state-metrics   Container Total  k8s_cluster_container_total The number of containers K8s kube-state-metrics   CPU Resources m k8s_cluster_cpu_cores\nk8s_cluster_cpu_cores_requests\nk8s_cluster_cpu_cores_limits\nk8s_cluster_cpu_cores_allocatable The capacity and the Requests / Limits / Allocatable of the CPU K8s kube-state-metrics   Memory Resources Gi k8s_cluster_memory_total\nk8s_cluster_memory_requests\nk8s_cluster_memory_limits\nk8s_cluster_memory_allocatable The capacity and the Requests / Limits / Allocatable of the memory K8s kube-state-metrics   Storage Resources Gi k8s_cluster_storage_total\nk8s_cluster_storage_allocatable The capacity and allocatable of the storage K8s kube-state-metrics   Node Status  k8s_cluster_node_status The current status of the nodes K8s kube-state-metrics   Deployment Status  k8s_cluster_deployment_status The current status of the deployment K8s kube-state-metrics   Deployment Spec Replicas  k8s_cluster_deployment_spec_replicas The number of desired pods for a deployment K8s kube-state-metrics   Service Status  k8s_cluster_service_pod_status The services current status, depending on the related pods' status K8s kube-state-metrics   Pod Status Not Running  k8s_cluster_pod_status_not_running The pods which are not running in the current phase K8s kube-state-metrics   Pod Status Waiting  k8s_cluster_pod_status_waiting The pods and containers which are currently in the waiting status, with reasons shown K8s kube-state-metrics   Pod Status Terminated  k8s_cluster_container_status_terminated The pods and containers which are currently in the terminated status, with reasons shown K8s kube-state-metrics    Kubernetes Cluster Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Pod Total  k8s_node_pod_total The number of pods in this node K8s kube-state-metrics   Node Status  k8s_node_node_status The current status of this node K8s kube-state-metrics   CPU Resources m k8s_node_cpu_cores\nk8s_node_cpu_cores_allocatable\nk8s_node_cpu_cores_requests\nk8s_node_cpu_cores_limits The capacity and the requests / Limits / Allocatable of the CPU K8s kube-state-metrics   Memory Resources Gi k8s_node_memory_total\nk8s_node_memory_allocatable\nk8s_node_memory_requests\nk8s_node_memory_limits The capacity and the requests / Limits / Allocatable of the memory K8s kube-state-metrics   Storage Resources Gi k8s_node_storage_total\nk8s_node_storage_allocatable The capacity and allocatable of the storage K8s kube-state-metrics   CPU Usage m k8s_node_cpu_usage The total usage of the CPU core, if there are 2 cores the maximum usage is 2000m cAdvisor   Memory Usage Gi k8s_node_memory_usage The totaly memory usage cAdvisor   Network I/O KB/s k8s_node_network_receive\nk8s_node_network_transmit The network receive and transmit cAdvisor    Kubernetes Service Monitoring K8s Service Monitoring provides observabilities into service status and resources from Kubernetes. K8s Service as a Service in OAP and land on the Layer: K8S_SERVICE.\nKubernetes Service Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Service Pod Total  k8s_service_pod_total The number of pods K8s kube-state-metrics   Service Pod Status  k8s_service_pod_status The current status of pods K8s kube-state-metrics   Service CPU Resources m k8s_service_cpu_cores_requests\nk8s_service_cpu_cores_limits The CPU resources requests / Limits of this service K8s kube-state-metrics   Service Memory Resources MB k8s_service_memory_requests\nk8s_service_memory_limits The memory resources requests / Limits of this service K8s kube-state-metrics   Pod CPU Usage m k8s_service_pod_cpu_usage The CPU resources total usage of pods cAdvisor   Pod Memory Usage MB k8s_service_pod_memory_usage The memory resources total usage of pods cAdvisor   Pod Waiting  k8s_service_pod_status_waiting The pods and containers which are currently in the waiting status, with reasons shown K8s kube-state-metrics   Pod Terminated  k8s_service_pod_status_terminated The pods and containers which are currently in the terminated status, with reasons shown K8s kube-state-metrics   Pod Restarts  k8s_service_pod_status_restarts_total The number of per container restarts related to the pods K8s kube-state-metrics    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/k8s-cluster.yaml,/config/otel-rules/k8s-node.yaml, /config/otel-rules/k8s-service.yaml. The K8s Cluster dashboard panel configurations are found in /config/ui-initialized-templates/k8s. The K8s Service dashboard panel configurations are found in /config/ui-initialized-templates/k8s_service.\n","title":"Kubernetes (K8s) monitoring","url":"/docs/main/v9.3.0/en/setup/backend/backend-k8s-monitoring/"},{"content":"Kubernetes (K8s) monitoring SkyWalking leverages K8s kube-state-metrics (KSM) and cAdvisor for collecting metrics data from K8s. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. This feature requires authorizing the OAP Server to access K8s\u0026rsquo;s API Server.\nData flow  K8s kube-state-metrics and cAdvisor collect metrics data from K8s. OpenTelemetry Collector fetches metrics from kube-state-metrics and cAdvisor via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via the OpenCensus gRPC Exporter or OpenTelemetry gRPC exporter. The SkyWalking OAP Server access to K8s\u0026rsquo;s API Server gets meta info and parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup kube-state-metric. cAdvisor is integrated into kubelet by default. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector for K8s, refer to here. For a quick start, we have provided a complete example of configuration and recommended version; you can refer to showcase. Config SkyWalking OpenTelemetry receiver.  Kubernetes Cluster Monitoring K8s cluster monitoring provides monitoring of the status and resources of the whole cluster and each node. K8s cluster as a Service in OAP, K8s node as an Instance in OAP, and land on the Layer: K8S.\nKubernetes Cluster Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Node Total  k8s_cluster_node_total The number of nodes K8s kube-state-metrics   Namespace Total  k8s_cluster_namespace_total The number of namespaces K8s kube-state-metrics   Deployment Total  k8s_cluster_deployment_total The number of deployments K8s kube-state-metrics   Service Total  k8s_cluster_service_total The number of services K8s kube-state-metrics   Pod Total  k8s_cluster_pod_total The number of pods K8s kube-state-metrics   Container Total  k8s_cluster_container_total The number of containers K8s kube-state-metrics   CPU Resources m k8s_cluster_cpu_cores\nk8s_cluster_cpu_cores_requests\nk8s_cluster_cpu_cores_limits\nk8s_cluster_cpu_cores_allocatable The capacity and the Requests / Limits / Allocatable of the CPU K8s kube-state-metrics   Memory Resources Gi k8s_cluster_memory_total\nk8s_cluster_memory_requests\nk8s_cluster_memory_limits\nk8s_cluster_memory_allocatable The capacity and the Requests / Limits / Allocatable of the memory K8s kube-state-metrics   Storage Resources Gi k8s_cluster_storage_total\nk8s_cluster_storage_allocatable The capacity and allocatable of the storage K8s kube-state-metrics   Node Status  k8s_cluster_node_status The current status of the nodes K8s kube-state-metrics   Deployment Status  k8s_cluster_deployment_status The current status of the deployment K8s kube-state-metrics   Deployment Spec Replicas  k8s_cluster_deployment_spec_replicas The number of desired pods for a deployment K8s kube-state-metrics   Service Status  k8s_cluster_service_pod_status The services current status, depending on the related pods' status K8s kube-state-metrics   Pod Status Not Running  k8s_cluster_pod_status_not_running The pods which are not running in the current phase K8s kube-state-metrics   Pod Status Waiting  k8s_cluster_pod_status_waiting The pods and containers which are currently in the waiting status, with reasons shown K8s kube-state-metrics   Pod Status Terminated  k8s_cluster_container_status_terminated The pods and containers which are currently in the terminated status, with reasons shown K8s kube-state-metrics    Kubernetes Cluster Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Pod Total  k8s_node_pod_total The number of pods in this node K8s kube-state-metrics   Node Status  k8s_node_node_status The current status of this node K8s kube-state-metrics   CPU Resources m k8s_node_cpu_cores\nk8s_node_cpu_cores_allocatable\nk8s_node_cpu_cores_requests\nk8s_node_cpu_cores_limits The capacity and the requests / Limits / Allocatable of the CPU K8s kube-state-metrics   Memory Resources Gi k8s_node_memory_total\nk8s_node_memory_allocatable\nk8s_node_memory_requests\nk8s_node_memory_limits The capacity and the requests / Limits / Allocatable of the memory K8s kube-state-metrics   Storage Resources Gi k8s_node_storage_total\nk8s_node_storage_allocatable The capacity and allocatable of the storage K8s kube-state-metrics   CPU Usage m k8s_node_cpu_usage The total usage of the CPU core, if there are 2 cores the maximum usage is 2000m cAdvisor   Memory Usage Gi k8s_node_memory_usage The totaly memory usage cAdvisor   Network I/O KB/s k8s_node_network_receive\nk8s_node_network_transmit The network receive and transmit cAdvisor    Kubernetes Service Monitoring K8s Service Monitoring provides observabilities into service status and resources from Kubernetes. K8s Service as a Service in OAP and land on the Layer: K8S_SERVICE.\nKubernetes Service Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Service Pod Total  k8s_service_pod_total The number of pods K8s kube-state-metrics   Service Pod Status  k8s_service_pod_status The current status of pods K8s kube-state-metrics   Service CPU Resources m k8s_service_cpu_cores_requests\nk8s_service_cpu_cores_limits The CPU resources requests / Limits of this service K8s kube-state-metrics   Service Memory Resources MB k8s_service_memory_requests\nk8s_service_memory_limits The memory resources requests / Limits of this service K8s kube-state-metrics   Pod CPU Usage m k8s_service_pod_cpu_usage The CPU resources total usage of pods cAdvisor   Pod Memory Usage MB k8s_service_pod_memory_usage The memory resources total usage of pods cAdvisor   Pod Waiting  k8s_service_pod_status_waiting The pods and containers which are currently in the waiting status, with reasons shown K8s kube-state-metrics   Pod Terminated  k8s_service_pod_status_terminated The pods and containers which are currently in the terminated status, with reasons shown K8s kube-state-metrics   Pod Restarts  k8s_service_pod_status_restarts_total The number of per container restarts related to the pods K8s kube-state-metrics    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/k8s/k8s-cluster.yaml,/config/otel-rules/k8s/k8s-node.yaml, /config/otel-rules/k8s/k8s-service.yaml. The K8s Cluster dashboard panel configurations are found in /config/ui-initialized-templates/k8s. The K8s Service dashboard panel configurations are found in /config/ui-initialized-templates/k8s_service.\n","title":"Kubernetes (K8s) monitoring","url":"/docs/main/v9.4.0/en/setup/backend/backend-k8s-monitoring/"},{"content":"Kubernetes (K8s) monitoring SkyWalking leverages K8s kube-state-metrics (KSM) and cAdvisor for collecting metrics data from K8s. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. This feature requires authorizing the OAP Server to access K8s\u0026rsquo;s API Server.\nData flow  K8s kube-state-metrics and cAdvisor collect metrics data from K8s. OpenTelemetry Collector fetches metrics from kube-state-metrics and cAdvisor via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server access to K8s\u0026rsquo;s API Server gets meta info and parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup kube-state-metric. cAdvisor is integrated into kubelet by default. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector for K8s, refer to here. For a quick start, we have provided a complete example of configuration and recommended version; you can refer to showcase. Config SkyWalking OpenTelemetry receiver.  Kubernetes Cluster Monitoring K8s cluster monitoring provides monitoring of the status and resources of the whole cluster and each node. K8s cluster as a Service in OAP, K8s node as an Instance in OAP, and land on the Layer: K8S.\nKubernetes Cluster Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Node Total  k8s_cluster_node_total The number of nodes K8s kube-state-metrics   Namespace Total  k8s_cluster_namespace_total The number of namespaces K8s kube-state-metrics   Deployment Total  k8s_cluster_deployment_total The number of deployments K8s kube-state-metrics   StatefulSet Total  k8s_cluster_statefulset_total The number of statefulsets K8s kube-state-metrics   DaemonSet Total  k8s_cluster_daemonset_total The number of daemonsets K8s kube-state-metrics   Service Total  k8s_cluster_service_total The number of services K8s kube-state-metrics   Pod Total  k8s_cluster_pod_total The number of pods K8s kube-state-metrics   Container Total  k8s_cluster_container_total The number of containers K8s kube-state-metrics   CPU Resources m k8s_cluster_cpu_cores\nk8s_cluster_cpu_cores_requests\nk8s_cluster_cpu_cores_limits\nk8s_cluster_cpu_cores_allocatable The capacity and the Requests / Limits / Allocatable of the CPU K8s kube-state-metrics   Memory Resources Gi k8s_cluster_memory_total\nk8s_cluster_memory_requests\nk8s_cluster_memory_limits\nk8s_cluster_memory_allocatable The capacity and the Requests / Limits / Allocatable of the memory K8s kube-state-metrics   Storage Resources Gi k8s_cluster_storage_total\nk8s_cluster_storage_allocatable The capacity and allocatable of the storage K8s kube-state-metrics   Node Status  k8s_cluster_node_status The current status of the nodes K8s kube-state-metrics   Deployment Status  k8s_cluster_deployment_status The current status of the deployment K8s kube-state-metrics   Deployment Spec Replicas  k8s_cluster_deployment_spec_replicas The number of desired pods for a deployment K8s kube-state-metrics   Service Status  k8s_cluster_service_pod_status The services current status, depending on the related pods' status K8s kube-state-metrics   Pod Status Not Running  k8s_cluster_pod_status_not_running The pods which are not running in the current phase K8s kube-state-metrics   Pod Status Waiting  k8s_cluster_pod_status_waiting The pods and containers which are currently in the waiting status, with reasons shown K8s kube-state-metrics   Pod Status Terminated  k8s_cluster_container_status_terminated The pods and containers which are currently in the terminated status, with reasons shown K8s kube-state-metrics    Kubernetes Cluster Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Pod Total  k8s_node_pod_total The number of pods in this node K8s kube-state-metrics   Node Status  k8s_node_node_status The current status of this node K8s kube-state-metrics   CPU Resources m k8s_node_cpu_cores\nk8s_node_cpu_cores_allocatable\nk8s_node_cpu_cores_requests\nk8s_node_cpu_cores_limits The capacity and the requests / Limits / Allocatable of the CPU K8s kube-state-metrics   Memory Resources Gi k8s_node_memory_total\nk8s_node_memory_allocatable\nk8s_node_memory_requests\nk8s_node_memory_limits The capacity and the requests / Limits / Allocatable of the memory K8s kube-state-metrics   Storage Resources Gi k8s_node_storage_total\nk8s_node_storage_allocatable The capacity and allocatable of the storage K8s kube-state-metrics   CPU Usage m k8s_node_cpu_usage The total usage of the CPU core, if there are 2 cores the maximum usage is 2000m cAdvisor   Memory Usage Gi k8s_node_memory_usage The totaly memory usage cAdvisor   Network I/O KB/s k8s_node_network_receive\nk8s_node_network_transmit The network receive and transmit cAdvisor    Kubernetes Service Monitoring K8s Service Monitoring provides observabilities into service status and resources from Kubernetes. K8s Service as a Service in OAP and land on the Layer: K8S_SERVICE.\nKubernetes Service Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Service Pod Total  k8s_service_pod_total The number of pods K8s kube-state-metrics   Service Pod Status  k8s_service_pod_status The current status of pods K8s kube-state-metrics   Service CPU Resources m k8s_service_cpu_cores_requests\nk8s_service_cpu_cores_limits The CPU resources requests / Limits of this service K8s kube-state-metrics   Service Memory Resources MB k8s_service_memory_requests\nk8s_service_memory_limits The memory resources requests / Limits of this service K8s kube-state-metrics   Pod CPU Usage m k8s_service_pod_cpu_usage The CPU resources total usage of pods cAdvisor   Pod Memory Usage MB k8s_service_pod_memory_usage The memory resources total usage of pods cAdvisor   Pod Waiting  k8s_service_pod_status_waiting The pods and containers which are currently in the waiting status, with reasons shown K8s kube-state-metrics   Pod Terminated  k8s_service_pod_status_terminated The pods and containers which are currently in the terminated status, with reasons shown K8s kube-state-metrics   Pod Restarts  k8s_service_pod_status_restarts_total The number of per container restarts related to the pods K8s kube-state-metrics    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/k8s/k8s-cluster.yaml,/config/otel-rules/k8s/k8s-node.yaml, /config/otel-rules/k8s/k8s-service.yaml. The K8s Cluster dashboard panel configurations are found in /config/ui-initialized-templates/k8s. The K8s Service dashboard panel configurations are found in /config/ui-initialized-templates/k8s_service.\n","title":"Kubernetes (K8s) monitoring","url":"/docs/main/v9.5.0/en/setup/backend/backend-k8s-monitoring/"},{"content":"Kubernetes (K8s) monitoring SkyWalking leverages K8s kube-state-metrics (KSM) and cAdvisor for collecting metrics data from K8s. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. This feature requires authorizing the OAP Server to access K8s\u0026rsquo;s API Server.\nData flow  K8s kube-state-metrics and cAdvisor collect metrics data from K8s. OpenTelemetry Collector fetches metrics from kube-state-metrics and cAdvisor via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server access to K8s\u0026rsquo;s API Server gets meta info and parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup kube-state-metric. cAdvisor is integrated into kubelet by default. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector for K8s, refer to here. For a quick start, we have provided a complete example of configuration and recommended version; you can refer to showcase. Config SkyWalking OpenTelemetry receiver.  Kubernetes Cluster Monitoring K8s cluster monitoring provides monitoring of the status and resources of the whole cluster and each node. K8s cluster as a Service in OAP, K8s node as an Instance in OAP, and land on the Layer: K8S.\nKubernetes Cluster Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Node Total  k8s_cluster_node_total The number of nodes K8s kube-state-metrics   Namespace Total  k8s_cluster_namespace_total The number of namespaces K8s kube-state-metrics   Deployment Total  k8s_cluster_deployment_total The number of deployments K8s kube-state-metrics   StatefulSet Total  k8s_cluster_statefulset_total The number of statefulsets K8s kube-state-metrics   DaemonSet Total  k8s_cluster_daemonset_total The number of daemonsets K8s kube-state-metrics   Service Total  k8s_cluster_service_total The number of services K8s kube-state-metrics   Pod Total  k8s_cluster_pod_total The number of pods K8s kube-state-metrics   Container Total  k8s_cluster_container_total The number of containers K8s kube-state-metrics   CPU Resources m k8s_cluster_cpu_cores\nk8s_cluster_cpu_cores_requests\nk8s_cluster_cpu_cores_limits\nk8s_cluster_cpu_cores_allocatable The capacity and the Requests / Limits / Allocatable of the CPU K8s kube-state-metrics   Memory Resources Gi k8s_cluster_memory_total\nk8s_cluster_memory_requests\nk8s_cluster_memory_limits\nk8s_cluster_memory_allocatable The capacity and the Requests / Limits / Allocatable of the memory K8s kube-state-metrics   Storage Resources Gi k8s_cluster_storage_total\nk8s_cluster_storage_allocatable The capacity and allocatable of the storage K8s kube-state-metrics   Node Status  k8s_cluster_node_status The current status of the nodes K8s kube-state-metrics   Deployment Status  k8s_cluster_deployment_status The current status of the deployment K8s kube-state-metrics   Deployment Spec Replicas  k8s_cluster_deployment_spec_replicas The number of desired pods for a deployment K8s kube-state-metrics   Service Status  k8s_cluster_service_pod_status The services current status, depending on the related pods' status K8s kube-state-metrics   Pod Status Not Running  k8s_cluster_pod_status_not_running The pods which are not running in the current phase K8s kube-state-metrics   Pod Status Waiting  k8s_cluster_pod_status_waiting The pods and containers which are currently in the waiting status, with reasons shown K8s kube-state-metrics   Pod Status Terminated  k8s_cluster_container_status_terminated The pods and containers which are currently in the terminated status, with reasons shown K8s kube-state-metrics    Kubernetes Cluster Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Pod Total  k8s_node_pod_total The number of pods in this node K8s kube-state-metrics   Node Status  k8s_node_node_status The current status of this node K8s kube-state-metrics   CPU Resources m k8s_node_cpu_cores\nk8s_node_cpu_cores_allocatable\nk8s_node_cpu_cores_requests\nk8s_node_cpu_cores_limits The capacity and the requests / Limits / Allocatable of the CPU K8s kube-state-metrics   Memory Resources Gi k8s_node_memory_total\nk8s_node_memory_allocatable\nk8s_node_memory_requests\nk8s_node_memory_limits The capacity and the requests / Limits / Allocatable of the memory K8s kube-state-metrics   Storage Resources Gi k8s_node_storage_total\nk8s_node_storage_allocatable The capacity and allocatable of the storage K8s kube-state-metrics   CPU Usage m k8s_node_cpu_usage The total usage of the CPU core, if there are 2 cores the maximum usage is 2000m cAdvisor   Memory Usage Gi k8s_node_memory_usage The totaly memory usage cAdvisor   Network I/O KB/s k8s_node_network_receive\nk8s_node_network_transmit The network receive and transmit cAdvisor    Kubernetes Service Monitoring K8s Service Monitoring provides observabilities into service status and resources from Kubernetes. K8s Service as a Service in OAP and land on the Layer: K8S_SERVICE.\nKubernetes Service Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Service Pod Total  k8s_service_pod_total The number of pods K8s kube-state-metrics   Service Pod Status  k8s_service_pod_status The current status of pods K8s kube-state-metrics   Service CPU Resources m k8s_service_cpu_cores_requests\nk8s_service_cpu_cores_limits The CPU resources requests / Limits of this service K8s kube-state-metrics   Service Memory Resources MB k8s_service_memory_requests\nk8s_service_memory_limits The memory resources requests / Limits of this service K8s kube-state-metrics   Pod CPU Usage m k8s_service_pod_cpu_usage The CPU resources total usage of pods cAdvisor   Pod Memory Usage MB k8s_service_pod_memory_usage The memory resources total usage of pods cAdvisor   Pod Waiting  k8s_service_pod_status_waiting The pods and containers which are currently in the waiting status, with reasons shown K8s kube-state-metrics   Pod Terminated  k8s_service_pod_status_terminated The pods and containers which are currently in the terminated status, with reasons shown K8s kube-state-metrics   Pod Restarts  k8s_service_pod_status_restarts_total The number of per container restarts related to the pods K8s kube-state-metrics    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/k8s/k8s-cluster.yaml,/config/otel-rules/k8s/k8s-node.yaml, /config/otel-rules/k8s/k8s-service.yaml. The K8s Cluster dashboard panel configurations are found in /config/ui-initialized-templates/k8s. The K8s Service dashboard panel configurations are found in /config/ui-initialized-templates/k8s_service.\n","title":"Kubernetes (K8s) monitoring","url":"/docs/main/v9.6.0/en/setup/backend/backend-k8s-monitoring/"},{"content":"Kubernetes (K8s) monitoring SkyWalking leverages K8s kube-state-metrics (KSM) and cAdvisor for collecting metrics data from K8s. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. This feature requires authorizing the OAP Server to access K8s\u0026rsquo;s API Server.\nData flow  K8s kube-state-metrics and cAdvisor collect metrics data from K8s. OpenTelemetry Collector fetches metrics from kube-state-metrics and cAdvisor via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server access to K8s\u0026rsquo;s API Server gets meta info and parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup kube-state-metric. cAdvisor is integrated into kubelet by default. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector for K8s, refer to here. For a quick start, we have provided a complete example of configuration and recommended version; you can refer to showcase. Config SkyWalking OpenTelemetry receiver.  Kubernetes Cluster Monitoring K8s cluster monitoring provides monitoring of the status and resources of the whole cluster and each node. K8s cluster as a Service in OAP, K8s node as an Instance in OAP, and land on the Layer: K8S.\nKubernetes Cluster Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Node Total  k8s_cluster_node_total The number of nodes K8s kube-state-metrics   Namespace Total  k8s_cluster_namespace_total The number of namespaces K8s kube-state-metrics   Deployment Total  k8s_cluster_deployment_total The number of deployments K8s kube-state-metrics   StatefulSet Total  k8s_cluster_statefulset_total The number of statefulsets K8s kube-state-metrics   DaemonSet Total  k8s_cluster_daemonset_total The number of daemonsets K8s kube-state-metrics   Service Total  k8s_cluster_service_total The number of services K8s kube-state-metrics   Pod Total  k8s_cluster_pod_total The number of pods K8s kube-state-metrics   Container Total  k8s_cluster_container_total The number of containers K8s kube-state-metrics   CPU Resources m k8s_cluster_cpu_cores\nk8s_cluster_cpu_cores_requests\nk8s_cluster_cpu_cores_limits\nk8s_cluster_cpu_cores_allocatable The capacity and the Requests / Limits / Allocatable of the CPU K8s kube-state-metrics   Memory Resources Gi k8s_cluster_memory_total\nk8s_cluster_memory_requests\nk8s_cluster_memory_limits\nk8s_cluster_memory_allocatable The capacity and the Requests / Limits / Allocatable of the memory K8s kube-state-metrics   Storage Resources Gi k8s_cluster_storage_total\nk8s_cluster_storage_allocatable The capacity and allocatable of the storage K8s kube-state-metrics   Node Status  k8s_cluster_node_status The current status of the nodes K8s kube-state-metrics   Deployment Status  k8s_cluster_deployment_status The current status of the deployment K8s kube-state-metrics   Deployment Spec Replicas  k8s_cluster_deployment_spec_replicas The number of desired pods for a deployment K8s kube-state-metrics   Service Status  k8s_cluster_service_pod_status The services current status, depending on the related pods' status K8s kube-state-metrics   Pod Status Not Running  k8s_cluster_pod_status_not_running The pods which are not running in the current phase K8s kube-state-metrics   Pod Status Waiting  k8s_cluster_pod_status_waiting The pods and containers which are currently in the waiting status, with reasons shown K8s kube-state-metrics   Pod Status Terminated  k8s_cluster_container_status_terminated The pods and containers which are currently in the terminated status, with reasons shown K8s kube-state-metrics    Kubernetes Cluster Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Pod Total  k8s_node_pod_total The number of pods in this node K8s kube-state-metrics   Node Status  k8s_node_node_status The current status of this node K8s kube-state-metrics   CPU Resources m k8s_node_cpu_cores\nk8s_node_cpu_cores_allocatable\nk8s_node_cpu_cores_requests\nk8s_node_cpu_cores_limits The capacity and the requests / Limits / Allocatable of the CPU K8s kube-state-metrics   Memory Resources Gi k8s_node_memory_total\nk8s_node_memory_allocatable\nk8s_node_memory_requests\nk8s_node_memory_limits The capacity and the requests / Limits / Allocatable of the memory K8s kube-state-metrics   Storage Resources Gi k8s_node_storage_total\nk8s_node_storage_allocatable The capacity and allocatable of the storage K8s kube-state-metrics   CPU Usage m k8s_node_cpu_usage The total usage of the CPU core, if there are 2 cores the maximum usage is 2000m cAdvisor   Memory Usage Gi k8s_node_memory_usage The totaly memory usage cAdvisor   Network I/O KB/s k8s_node_network_receive\nk8s_node_network_transmit The network receive and transmit cAdvisor    Kubernetes Service Monitoring K8s Service Monitoring provides observabilities into service status and resources from Kubernetes. K8s Service as a Service in OAP and land on the Layer: K8S_SERVICE.\nKubernetes Service Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Service Pod Total  k8s_service_pod_total The number of pods K8s kube-state-metrics   Service Pod Status  k8s_service_pod_status The current status of pods K8s kube-state-metrics   Service CPU Resources m k8s_service_cpu_cores_requests\nk8s_service_cpu_cores_limits The CPU resources requests / Limits of this service K8s kube-state-metrics   Service Memory Resources MB k8s_service_memory_requests\nk8s_service_memory_limits The memory resources requests / Limits of this service K8s kube-state-metrics   Pod CPU Usage m k8s_service_pod_cpu_usage The CPU resources total usage of pods cAdvisor   Pod Memory Usage MB k8s_service_pod_memory_usage The memory resources total usage of pods cAdvisor   Pod Waiting  k8s_service_pod_status_waiting The pods and containers which are currently in the waiting status, with reasons shown K8s kube-state-metrics   Pod Terminated  k8s_service_pod_status_terminated The pods and containers which are currently in the terminated status, with reasons shown K8s kube-state-metrics   Pod Restarts  k8s_service_pod_status_restarts_total The number of per container restarts related to the pods K8s kube-state-metrics    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/k8s/k8s-cluster.yaml,/config/otel-rules/k8s/k8s-node.yaml, /config/otel-rules/k8s/k8s-service.yaml. The K8s Cluster dashboard panel configurations are found in /config/ui-initialized-templates/k8s. The K8s Service dashboard panel configurations are found in /config/ui-initialized-templates/k8s_service.\n","title":"Kubernetes (K8s) monitoring","url":"/docs/main/v9.7.0/en/setup/backend/backend-k8s-monitoring/"},{"content":"Kubernetes (K8s) monitoring from kube-state-metrics and cAdvisor SkyWalking leverages K8s kube-state-metrics (KSM) and cAdvisor for collecting metrics data from K8s. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. This feature requires authorizing the OAP Server to access K8s\u0026rsquo;s API Server.\nData flow  K8s kube-state-metrics and cAdvisor collect metrics data from K8s. OpenTelemetry Collector fetches metrics from kube-state-metrics and cAdvisor via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server access to K8s\u0026rsquo;s API Server gets meta info and parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup kube-state-metric. cAdvisor is integrated into kubelet by default. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector for K8s, refer to here. For a quick start, we have provided a complete example of configuration and recommended version; you can refer to showcase. Config SkyWalking OpenTelemetry receiver.  Kubernetes Cluster Monitoring K8s cluster monitoring provides monitoring of the status and resources of the whole cluster and each node. K8s cluster as a Service in OAP, K8s node as an Instance in OAP, and land on the Layer: K8S.\nKubernetes Cluster Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Node Total  k8s_cluster_node_total The number of nodes K8s kube-state-metrics   Namespace Total  k8s_cluster_namespace_total The number of namespaces K8s kube-state-metrics   Deployment Total  k8s_cluster_deployment_total The number of deployments K8s kube-state-metrics   StatefulSet Total  k8s_cluster_statefulset_total The number of statefulsets K8s kube-state-metrics   DaemonSet Total  k8s_cluster_daemonset_total The number of daemonsets K8s kube-state-metrics   Service Total  k8s_cluster_service_total The number of services K8s kube-state-metrics   Pod Total  k8s_cluster_pod_total The number of pods K8s kube-state-metrics   Container Total  k8s_cluster_container_total The number of containers K8s kube-state-metrics   CPU Resources m k8s_cluster_cpu_cores\nk8s_cluster_cpu_cores_requests\nk8s_cluster_cpu_cores_limits\nk8s_cluster_cpu_cores_allocatable The capacity and the Requests / Limits / Allocatable of the CPU K8s kube-state-metrics   Memory Resources Gi k8s_cluster_memory_total\nk8s_cluster_memory_requests\nk8s_cluster_memory_limits\nk8s_cluster_memory_allocatable The capacity and the Requests / Limits / Allocatable of the memory K8s kube-state-metrics   Storage Resources Gi k8s_cluster_storage_total\nk8s_cluster_storage_allocatable The capacity and allocatable of the storage K8s kube-state-metrics   Node Status  k8s_cluster_node_status The current status of the nodes K8s kube-state-metrics   Deployment Status  k8s_cluster_deployment_status The current status of the deployment K8s kube-state-metrics   Deployment Spec Replicas  k8s_cluster_deployment_spec_replicas The number of desired pods for a deployment K8s kube-state-metrics   Service Status  k8s_cluster_service_pod_status The services current status, depending on the related pods' status K8s kube-state-metrics   Pod Status Not Running  k8s_cluster_pod_status_not_running The pods which are not running in the current phase K8s kube-state-metrics   Pod Status Waiting  k8s_cluster_pod_status_waiting The pods and containers which are currently in the waiting status, with reasons shown K8s kube-state-metrics   Pod Status Terminated  k8s_cluster_container_status_terminated The pods and containers which are currently in the terminated status, with reasons shown K8s kube-state-metrics    Kubernetes Cluster Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Pod Total  k8s_node_pod_total The number of pods in this node K8s kube-state-metrics   Node Status  k8s_node_node_status The current status of this node K8s kube-state-metrics   CPU Resources m k8s_node_cpu_cores\nk8s_node_cpu_cores_allocatable\nk8s_node_cpu_cores_requests\nk8s_node_cpu_cores_limits The capacity and the requests / Limits / Allocatable of the CPU K8s kube-state-metrics   Memory Resources Gi k8s_node_memory_total\nk8s_node_memory_allocatable\nk8s_node_memory_requests\nk8s_node_memory_limits The capacity and the requests / Limits / Allocatable of the memory K8s kube-state-metrics   Storage Resources Gi k8s_node_storage_total\nk8s_node_storage_allocatable The capacity and allocatable of the storage K8s kube-state-metrics   CPU Usage m k8s_node_cpu_usage The total usage of the CPU core, if there are 2 cores the maximum usage is 2000m cAdvisor   Memory Usage Gi k8s_node_memory_usage The totaly memory usage cAdvisor   Network I/O KB/s k8s_node_network_receive\nk8s_node_network_transmit The network receive and transmit cAdvisor    Kubernetes Service Monitoring K8s Service Monitoring provides observabilities into service status and resources from Kubernetes. K8s Service as a Service in OAP and land on the Layer: K8S_SERVICE.\nKubernetes Service Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Service Pod Total  k8s_service_pod_total The number of pods K8s kube-state-metrics   Service Pod Status  k8s_service_pod_status The current status of pods K8s kube-state-metrics   Service CPU Resources m k8s_service_cpu_cores_requests\nk8s_service_cpu_cores_limits The CPU resources requests / Limits of this service K8s kube-state-metrics   Service Memory Resources MB k8s_service_memory_requests\nk8s_service_memory_limits The memory resources requests / Limits of this service K8s kube-state-metrics   Pod CPU Usage m k8s_service_pod_cpu_usage The CPU resources total usage of pods cAdvisor   Pod Memory Usage MB k8s_service_pod_memory_usage The memory resources total usage of pods cAdvisor   Pod Waiting  k8s_service_pod_status_waiting The pods and containers which are currently in the waiting status, with reasons shown K8s kube-state-metrics   Pod Terminated  k8s_service_pod_status_terminated The pods and containers which are currently in the terminated status, with reasons shown K8s kube-state-metrics   Pod Restarts  k8s_service_pod_status_restarts_total The number of per container restarts related to the pods K8s kube-state-metrics    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/k8s/k8s-cluster.yaml,/config/otel-rules/k8s/k8s-node.yaml, /config/otel-rules/k8s/k8s-service.yaml. The K8s Cluster dashboard panel configurations are found in /config/ui-initialized-templates/k8s. The K8s Service dashboard panel configurations are found in /config/ui-initialized-templates/k8s_service.\n","title":"Kubernetes (K8s) monitoring from kube-state-metrics and cAdvisor","url":"/docs/main/next/en/setup/backend/backend-k8s-monitoring-metrics-cadvisor/"},{"content":"Kubernetes (K8s) monitoring from Rover SkyWalking uses the SkyWalking Rover system to collect access logs from Kubernetes clusters and hands them over to the OAL system for metrics and entity analysis.\nData flow  SkyWalking Rover monitoring access log data from K8s and send to the OAP. The SkyWalking OAP Server receive access log from Rover through gRPC, analysis the generate entity, and using OAL to generating metrics.  Setup  Setup Rover in the Kubernetes and enable access log service. Setup eBPF receiver module by the following configuration.  receiver-ebpf:selector:${SW_RECEIVER_EBPF:default}default:Generated Entities SkyWalking receive the access logs from Rover, analyzes the kubernetes connection information to parse out the following corresponding entities:\n Service Service Instance Service Endpoint Service Relation Service Instance Relation Service Endpoint Relation  Generate Metrics For each of the above-mentioned entities, metrics such as connection, transmission, and protocol can be analyzed.\nConnection Metrics Record the relevant metrics for every service establishing/closing connections with other services.\n   Name Unit Description     Connect CPM Count Total Connect to other Service counts per minutes.   Connect Duration Nanoseconds Total Connect to other Service use duration.   Connect Success CPM Count Success to connect to other Service counts per minutes.   Accept CPM Count Accept new connection from other Service counts per minutes.   Accept Duration Nanoseconds Total accept new connection from other Service use duration.   Close CPM Count Close one connection counts per minutes.   Close Duration Nanoseconds Total Close connections use duration.    Transfer Metrics Record the basic information and L2-L4 layer details for each syscall made during network requests by every service to other services.\nRead Data from Connection    Name Unit Description     Read CPM Count Read from connection counts per minutes.   Read Duration Nanoseconds Total read data use duration.   Read Package CPM Count Total read TCP Package count per minutes.   Read Package Size Bytes Total read TCP package size per minutes.   Read Layer 4 Duration Nanoseconds Total read data on the Layer 4 use duration.   Read Layer 3 Duration Nanoseconds Total read data on the Layer 3 use duration.   Read Layer 3 Recv Duration Nanoseconds Total read data on the Layer 3 receive use duration.   Read Layer 3 Local Duration Nanoseconds Total read data on the Layer 3 local use duration.   Read Package To Queue Duration Nanoseconds Total duration between TCP package received and send to Queue.   Read Package From Queue Duration Nanoseconds Total duration between send to Queue and receive from Queue.   Read Net Filter CPM Count Total Net Filtered count when read data.   Read Net Filter Duration Nanoseconds Total Net Filtered use duration.    Write Data to Connection    Name Unit Description     Write CPM Count Write to connection counts per minutes.   Write Duration Nanoseconds Total write data to connection use duration.   Write Package CPM Count Total write TCP Package count per minutes.   Write Package Size Bytes Total write TCP Package size per minutes.   Write L4 Duration Nanoseconds Total write data to connection Layer 4 use duration.   Write L3 Duration Nanoseconds Total write data to connection Layer 3 use duration.   Write L3 Local Duration Nanoseconds Total write data to the connection Layer 3 Local use duration.   Write L3 Output Duration Nanoseconds Total write data to the connection Layer 3 Output use duration.   Write L2 Duration Nanoseconds Total write data to connection Layer 2 use duration.   Write L2 Ready Send Duration Nanoseconds Total write data to the connection Layer 2 ready send data queue use duration.   Write L2 Send NetDevice Duration Nanoseconds Total write data to the connection Layer 2 send data to net device use duration.    Protocol Based on each transfer data analysis, extract the information of the 7-layer network protocol.\nHTTP/1.x or HTTP/2.x    Name Init Description     Call CPM Count HTTP Request calls per minutes.   Duration Nanoseconds Total HTTP Response use duration.   Success CPM Count Total HTTP Response success(status \u0026lt; 500) count.   Request Header Size Bytes Total Request Header size.   Request Body Size Bytes Total Request Body size.   Response Header Size Bytes Total Response Header size.   Response Body Size Bytes Total Response Body size.    Customizations You can customize your own metrics/dashboard panel. The metrics definition and expression rules are found in /config/oal/ebpf.oal, please refer the Scope Declaration Documentation. The K8s dashboard panel configurations are found in /config/ui-initialized-templates/k8s_service.\n","title":"Kubernetes (K8s) monitoring from Rover","url":"/docs/main/next/en/setup/backend/backend-k8s-monitoring-rover/"},{"content":"Kubernetes Network monitoring SkyWalking leverages SkyWalking Rover network profiling feature to measure network performance for particular pods on-demand, including metrics of L4(TCP) and L7(HTTP) traffic and raw data of HTTP requests and responses. Underlying, SkyWalking Rover converts data from socket data to metrics using eBPF technology.\nData flow  SkyWalking OAP server observes which specific k8s pod needs to monitor the network. SkyWalking Rover receives tasks from SkyWalking OAP server and executes them, and converts the network data into metrics send to the backend service. The SkyWalking OAP Server accesses K8s\u0026rsquo;s API Server to fetch meta info and parses the expression with MAL to aggregate.  Setup  Setup SkyWalking Rover. Enable the network profiling MAL file in the OAP server.  agent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:meterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:network-profiling}Sampling config Notice the precondition, the HTTP request must have the trace header in SkyWalking(sw8 header) or Zipkin(b3 header(s)) format.\nThe sampling configurations define the sampling boundaries for the HTTP traffic. When a HTTP calling is sampled, the SkyWalking Rover could collect the HTTP request/response raw data and upload it to the span attached event.\nThe sampling config contains multiple rules, and each of rules has the following configurations:\n URI Regex: The match pattern for HTTP requests is HTTP URI-oriented. Match all requests if the URI regex is not set. Minimal Request Duration (ms): Sample the HTTP requests with slower latency than this threshold. Sample HTTP requests and responses with tracing when the response code is between 400 and 499: This is OFF by default. Sample HTTP requests and responses with tracing when the response code is between 500 and 599: This is ON by default.  Supported metrics After SkyWalking OAP server receives the metrics from the SkyWalking Rover, it supports to analysis the following data:\n Topology: Based on the process and peer address, the following topology data is supported:  Relation: Analyze the relationship between local processes, or local process with external pods or services. SSL: The socket read or write package with SSL. Protocol: The protocols for write or read data.   TCP socket read and write metrics, including following types:  Call Per Minute: The count of the socket read or write. Bytes: The package size of the socket data. Execute Time: The executed time of the socket read or write. Connect: The socket connect/accept with peer address count and execute time. Close: The socket close the socket count and execute time. RTT: The RTT(Round Trip Time) of socket communicate with peer address.   Local process communicate with peer address exception data, including following types:  Retransmit: The count of TCP package is retransmitted. Drop: The count of TCP package is dropped.   HTTP/1.x request/response related metrics, including following types:  Request CPM: The calls per minute of requests. Response CPM: The calls per minute of responses with status code. Request Package Size: The size(KB) of the request package. Response Package Size: The size(KB) of the response package. Client Side Response Duration: The duration(ms) of the client receive the response. Server Side Response Duration: The duration(ms) of the server send the response.   HTTP sampled request with traces, including following types:  Slow traces: The traces which have slow duration. Traces from HTTP Code in [400, 500) (ms): The traces which response status code in [400, 500). Traces from HTTP Code in [500, 600) (ms): The traces which response status code in [500, 600).    ","title":"Kubernetes Network monitoring","url":"/docs/main/latest/en/setup/backend/backend-k8s-network-monitoring/"},{"content":"Kubernetes Network monitoring SkyWalking leverages SkyWalking Rover network profiling feature to measure network performance for particular pods on-demand, including metrics of L4(TCP) and L7(HTTP) traffic and raw data of HTTP requests and responses. Underlying, SkyWalking Rover converts data from socket data to metrics using eBPF technology.\nData flow  SkyWalking OAP server observes which specific k8s pod needs to monitor the network. SkyWalking Rover receives tasks from SkyWalking OAP server and executes them, and converts the network data into metrics send to the backend service. The SkyWalking OAP Server accesses K8s\u0026rsquo;s API Server to fetch meta info and parses the expression with MAL to aggregate.  Setup  Setup SkyWalking Rover. Enable the network profiling MAL file in the OAP server.  agent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:meterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:network-profiling}Sampling config Notice the precondition, the HTTP request must have the trace header in SkyWalking(sw8 header) or Zipkin(b3 header(s)) format.\nThe sampling configurations define the sampling boundaries for the HTTP traffic. When a HTTP calling is sampled, the SkyWalking Rover could collect the HTTP request/response raw data and upload it to the span attached event.\nThe sampling config contains multiple rules, and each of rules has the following configurations:\n URI Regex: The match pattern for HTTP requests is HTTP URI-oriented. Match all requests if the URI regex is not set. Minimal Request Duration (ms): Sample the HTTP requests with slower latency than this threshold. Sample HTTP requests and responses with tracing when the response code is between 400 and 499: This is OFF by default. Sample HTTP requests and responses with tracing when the response code is between 500 and 599: This is ON by default.  Supported metrics After SkyWalking OAP server receives the metrics from the SkyWalking Rover, it supports to analysis the following data:\n Topology: Based on the process and peer address, the following topology data is supported:  Relation: Analyze the relationship between local processes, or local process with external pods or services. SSL: The socket read or write package with SSL. Protocol: The protocols for write or read data.   TCP socket read and write metrics, including following types:  Call Per Minute: The count of the socket read or write. Bytes: The package size of the socket data. Execute Time: The executed time of the socket read or write. Connect: The socket connect/accept with peer address count and execute time. Close: The socket close the socket count and execute time. RTT: The RTT(Round Trip Time) of socket communicate with peer address.   Local process communicate with peer address exception data, including following types:  Retransmit: The count of TCP package is retransmitted. Drop: The count of TCP package is dropped.   HTTP/1.x request/response related metrics, including following types:  Request CPM: The calls per minute of requests. Response CPM: The calls per minute of responses with status code. Request Package Size: The size(KB) of the request package. Response Package Size: The size(KB) of the response package. Client Side Response Duration: The duration(ms) of the client receive the response. Server Side Response Duration: The duration(ms) of the server send the response.   HTTP sampled request with traces, including following types:  Slow traces: The traces which have slow duration. Traces from HTTP Code in [400, 500) (ms): The traces which response status code in [400, 500). Traces from HTTP Code in [500, 600) (ms): The traces which response status code in [500, 600).    ","title":"Kubernetes Network monitoring","url":"/docs/main/next/en/setup/backend/backend-k8s-network-monitoring/"},{"content":"Kubernetes Network monitoring SkyWalking leverages SkyWalking Rover network profiling feature for collecting metrics data from the network. SkyWalking Rover converts data from socket data to metrics using eBPF technology.\nData flow  SkyWalking OAP server observes which specific k8s pod needs to monitor the network. SkyWalking Rover receives tasks from SkyWalking OAP server and executes them, and converts the network data into metrics send to the backend service. The SkyWalking OAP Server accesses K8s\u0026rsquo;s API Server to fetch meta info and parses the expression with MAL to aggregate.  Setup  Setup SkyWalking Rover. Enable the network profiling MAL file in the OAP server.  agent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:meterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:network-profiling}Supported metrics After SkyWalking OAP server receives the metrics from the SkyWalking Rover, it supports to analysis the following data:\n Topology: Based on the process and peer address, the following topology data is supported:  Relation: Analyze the relationship between local processes, or local process with external pods or services. SSL: The socket read or write package with SSL. Protocol: The protocols for write or read data.   TCP socket read and write metrics, including following types:  Call Per Minute: The count of the socket read or write. Bytes: The package size of the socket data. Execute Time: The executed time of the socket read or write. Connect: The socket connect/accept with peer address count and execute time. Close: The socket close the socket count and execute time. RTT: The RTT(Round Trip Time) of socket communicate with peer address.   Local process communicate with peer address exception data, including following types:  Retransmit: The count of TCP package is retransmitted. Drop: The count of TCP package is dropped.    ","title":"Kubernetes Network monitoring","url":"/docs/main/v9.2.0/en/setup/backend/backend-k8s-network-monitoring/"},{"content":"Kubernetes Network monitoring SkyWalking leverages SkyWalking Rover network profiling feature to measure network performance for particular pods on-demand, including metrics of L4(TCP) and L7(HTTP) traffic and raw data of HTTP requests and responses. Underlying, SkyWalking Rover converts data from socket data to metrics using eBPF technology.\nData flow  SkyWalking OAP server observes which specific k8s pod needs to monitor the network. SkyWalking Rover receives tasks from SkyWalking OAP server and executes them, and converts the network data into metrics send to the backend service. The SkyWalking OAP Server accesses K8s\u0026rsquo;s API Server to fetch meta info and parses the expression with MAL to aggregate.  Setup  Setup SkyWalking Rover. Enable the network profiling MAL file in the OAP server.  agent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:meterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:network-profiling}Sampling config Notice the precondition, the HTTP request must have the trace header in SkyWalking(sw8 header) or Zipkin(b3 header(s)) format.\nThe sampling configurations define the sampling boundaries for the HTTP traffic. When a HTTP calling is sampled, the SkyWalking Rover could collect the HTTP request/response raw data and upload it to the span attached event.\nThe sampling config contains multiple rules, and each of rules has the following configurations:\n URI Regex: The match pattern for HTTP requests is HTTP URI-oriented. Match all requests if the URI regex is not set. Minimal Request Duration (ms): Sample the HTTP requests with slower latency than this threshold. Sample HTTP requests and responses with tracing when the response code is between 400 and 499: This is OFF by default. Sample HTTP requests and responses with tracing when the response code is between 500 and 599: This is ON by default.  Supported metrics After SkyWalking OAP server receives the metrics from the SkyWalking Rover, it supports to analysis the following data:\n Topology: Based on the process and peer address, the following topology data is supported:  Relation: Analyze the relationship between local processes, or local process with external pods or services. SSL: The socket read or write package with SSL. Protocol: The protocols for write or read data.   TCP socket read and write metrics, including following types:  Call Per Minute: The count of the socket read or write. Bytes: The package size of the socket data. Execute Time: The executed time of the socket read or write. Connect: The socket connect/accept with peer address count and execute time. Close: The socket close the socket count and execute time. RTT: The RTT(Round Trip Time) of socket communicate with peer address.   Local process communicate with peer address exception data, including following types:  Retransmit: The count of TCP package is retransmitted. Drop: The count of TCP package is dropped.   HTTP/1.x request/response related metrics, including following types:  Request CPM: The calls per minute of requests. Response CPM: The calls per minute of responses with status code. Request Package Size: The size(KB) of the request package. Response Package Size: The size(KB) of the response package. Client Side Response Duration: The duration(ms) of the client receive the response. Server Side Response Duration: The duration(ms) of the server send the response.   HTTP sampled request with traces, including following types:  Slow traces: The traces which have slow duration. Traces from HTTP Code in [400, 500) (ms): The traces which response status code in [400, 500). Traces from HTTP Code in [500, 600) (ms): The traces which response status code in [500, 600).    ","title":"Kubernetes Network monitoring","url":"/docs/main/v9.3.0/en/setup/backend/backend-k8s-network-monitoring/"},{"content":"Kubernetes Network monitoring SkyWalking leverages SkyWalking Rover network profiling feature to measure network performance for particular pods on-demand, including metrics of L4(TCP) and L7(HTTP) traffic and raw data of HTTP requests and responses. Underlying, SkyWalking Rover converts data from socket data to metrics using eBPF technology.\nData flow  SkyWalking OAP server observes which specific k8s pod needs to monitor the network. SkyWalking Rover receives tasks from SkyWalking OAP server and executes them, and converts the network data into metrics send to the backend service. The SkyWalking OAP Server accesses K8s\u0026rsquo;s API Server to fetch meta info and parses the expression with MAL to aggregate.  Setup  Setup SkyWalking Rover. Enable the network profiling MAL file in the OAP server.  agent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:meterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:network-profiling}Sampling config Notice the precondition, the HTTP request must have the trace header in SkyWalking(sw8 header) or Zipkin(b3 header(s)) format.\nThe sampling configurations define the sampling boundaries for the HTTP traffic. When a HTTP calling is sampled, the SkyWalking Rover could collect the HTTP request/response raw data and upload it to the span attached event.\nThe sampling config contains multiple rules, and each of rules has the following configurations:\n URI Regex: The match pattern for HTTP requests is HTTP URI-oriented. Match all requests if the URI regex is not set. Minimal Request Duration (ms): Sample the HTTP requests with slower latency than this threshold. Sample HTTP requests and responses with tracing when the response code is between 400 and 499: This is OFF by default. Sample HTTP requests and responses with tracing when the response code is between 500 and 599: This is ON by default.  Supported metrics After SkyWalking OAP server receives the metrics from the SkyWalking Rover, it supports to analysis the following data:\n Topology: Based on the process and peer address, the following topology data is supported:  Relation: Analyze the relationship between local processes, or local process with external pods or services. SSL: The socket read or write package with SSL. Protocol: The protocols for write or read data.   TCP socket read and write metrics, including following types:  Call Per Minute: The count of the socket read or write. Bytes: The package size of the socket data. Execute Time: The executed time of the socket read or write. Connect: The socket connect/accept with peer address count and execute time. Close: The socket close the socket count and execute time. RTT: The RTT(Round Trip Time) of socket communicate with peer address.   Local process communicate with peer address exception data, including following types:  Retransmit: The count of TCP package is retransmitted. Drop: The count of TCP package is dropped.   HTTP/1.x request/response related metrics, including following types:  Request CPM: The calls per minute of requests. Response CPM: The calls per minute of responses with status code. Request Package Size: The size(KB) of the request package. Response Package Size: The size(KB) of the response package. Client Side Response Duration: The duration(ms) of the client receive the response. Server Side Response Duration: The duration(ms) of the server send the response.   HTTP sampled request with traces, including following types:  Slow traces: The traces which have slow duration. Traces from HTTP Code in [400, 500) (ms): The traces which response status code in [400, 500). Traces from HTTP Code in [500, 600) (ms): The traces which response status code in [500, 600).    ","title":"Kubernetes Network monitoring","url":"/docs/main/v9.4.0/en/setup/backend/backend-k8s-network-monitoring/"},{"content":"Kubernetes Network monitoring SkyWalking leverages SkyWalking Rover network profiling feature to measure network performance for particular pods on-demand, including metrics of L4(TCP) and L7(HTTP) traffic and raw data of HTTP requests and responses. Underlying, SkyWalking Rover converts data from socket data to metrics using eBPF technology.\nData flow  SkyWalking OAP server observes which specific k8s pod needs to monitor the network. SkyWalking Rover receives tasks from SkyWalking OAP server and executes them, and converts the network data into metrics send to the backend service. The SkyWalking OAP Server accesses K8s\u0026rsquo;s API Server to fetch meta info and parses the expression with MAL to aggregate.  Setup  Setup SkyWalking Rover. Enable the network profiling MAL file in the OAP server.  agent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:meterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:network-profiling}Sampling config Notice the precondition, the HTTP request must have the trace header in SkyWalking(sw8 header) or Zipkin(b3 header(s)) format.\nThe sampling configurations define the sampling boundaries for the HTTP traffic. When a HTTP calling is sampled, the SkyWalking Rover could collect the HTTP request/response raw data and upload it to the span attached event.\nThe sampling config contains multiple rules, and each of rules has the following configurations:\n URI Regex: The match pattern for HTTP requests is HTTP URI-oriented. Match all requests if the URI regex is not set. Minimal Request Duration (ms): Sample the HTTP requests with slower latency than this threshold. Sample HTTP requests and responses with tracing when the response code is between 400 and 499: This is OFF by default. Sample HTTP requests and responses with tracing when the response code is between 500 and 599: This is ON by default.  Supported metrics After SkyWalking OAP server receives the metrics from the SkyWalking Rover, it supports to analysis the following data:\n Topology: Based on the process and peer address, the following topology data is supported:  Relation: Analyze the relationship between local processes, or local process with external pods or services. SSL: The socket read or write package with SSL. Protocol: The protocols for write or read data.   TCP socket read and write metrics, including following types:  Call Per Minute: The count of the socket read or write. Bytes: The package size of the socket data. Execute Time: The executed time of the socket read or write. Connect: The socket connect/accept with peer address count and execute time. Close: The socket close the socket count and execute time. RTT: The RTT(Round Trip Time) of socket communicate with peer address.   Local process communicate with peer address exception data, including following types:  Retransmit: The count of TCP package is retransmitted. Drop: The count of TCP package is dropped.   HTTP/1.x request/response related metrics, including following types:  Request CPM: The calls per minute of requests. Response CPM: The calls per minute of responses with status code. Request Package Size: The size(KB) of the request package. Response Package Size: The size(KB) of the response package. Client Side Response Duration: The duration(ms) of the client receive the response. Server Side Response Duration: The duration(ms) of the server send the response.   HTTP sampled request with traces, including following types:  Slow traces: The traces which have slow duration. Traces from HTTP Code in [400, 500) (ms): The traces which response status code in [400, 500). Traces from HTTP Code in [500, 600) (ms): The traces which response status code in [500, 600).    ","title":"Kubernetes Network monitoring","url":"/docs/main/v9.5.0/en/setup/backend/backend-k8s-network-monitoring/"},{"content":"Kubernetes Network monitoring SkyWalking leverages SkyWalking Rover network profiling feature to measure network performance for particular pods on-demand, including metrics of L4(TCP) and L7(HTTP) traffic and raw data of HTTP requests and responses. Underlying, SkyWalking Rover converts data from socket data to metrics using eBPF technology.\nData flow  SkyWalking OAP server observes which specific k8s pod needs to monitor the network. SkyWalking Rover receives tasks from SkyWalking OAP server and executes them, and converts the network data into metrics send to the backend service. The SkyWalking OAP Server accesses K8s\u0026rsquo;s API Server to fetch meta info and parses the expression with MAL to aggregate.  Setup  Setup SkyWalking Rover. Enable the network profiling MAL file in the OAP server.  agent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:meterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:network-profiling}Sampling config Notice the precondition, the HTTP request must have the trace header in SkyWalking(sw8 header) or Zipkin(b3 header(s)) format.\nThe sampling configurations define the sampling boundaries for the HTTP traffic. When a HTTP calling is sampled, the SkyWalking Rover could collect the HTTP request/response raw data and upload it to the span attached event.\nThe sampling config contains multiple rules, and each of rules has the following configurations:\n URI Regex: The match pattern for HTTP requests is HTTP URI-oriented. Match all requests if the URI regex is not set. Minimal Request Duration (ms): Sample the HTTP requests with slower latency than this threshold. Sample HTTP requests and responses with tracing when the response code is between 400 and 499: This is OFF by default. Sample HTTP requests and responses with tracing when the response code is between 500 and 599: This is ON by default.  Supported metrics After SkyWalking OAP server receives the metrics from the SkyWalking Rover, it supports to analysis the following data:\n Topology: Based on the process and peer address, the following topology data is supported:  Relation: Analyze the relationship between local processes, or local process with external pods or services. SSL: The socket read or write package with SSL. Protocol: The protocols for write or read data.   TCP socket read and write metrics, including following types:  Call Per Minute: The count of the socket read or write. Bytes: The package size of the socket data. Execute Time: The executed time of the socket read or write. Connect: The socket connect/accept with peer address count and execute time. Close: The socket close the socket count and execute time. RTT: The RTT(Round Trip Time) of socket communicate with peer address.   Local process communicate with peer address exception data, including following types:  Retransmit: The count of TCP package is retransmitted. Drop: The count of TCP package is dropped.   HTTP/1.x request/response related metrics, including following types:  Request CPM: The calls per minute of requests. Response CPM: The calls per minute of responses with status code. Request Package Size: The size(KB) of the request package. Response Package Size: The size(KB) of the response package. Client Side Response Duration: The duration(ms) of the client receive the response. Server Side Response Duration: The duration(ms) of the server send the response.   HTTP sampled request with traces, including following types:  Slow traces: The traces which have slow duration. Traces from HTTP Code in [400, 500) (ms): The traces which response status code in [400, 500). Traces from HTTP Code in [500, 600) (ms): The traces which response status code in [500, 600).    ","title":"Kubernetes Network monitoring","url":"/docs/main/v9.6.0/en/setup/backend/backend-k8s-network-monitoring/"},{"content":"Kubernetes Network monitoring SkyWalking leverages SkyWalking Rover network profiling feature to measure network performance for particular pods on-demand, including metrics of L4(TCP) and L7(HTTP) traffic and raw data of HTTP requests and responses. Underlying, SkyWalking Rover converts data from socket data to metrics using eBPF technology.\nData flow  SkyWalking OAP server observes which specific k8s pod needs to monitor the network. SkyWalking Rover receives tasks from SkyWalking OAP server and executes them, and converts the network data into metrics send to the backend service. The SkyWalking OAP Server accesses K8s\u0026rsquo;s API Server to fetch meta info and parses the expression with MAL to aggregate.  Setup  Setup SkyWalking Rover. Enable the network profiling MAL file in the OAP server.  agent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:meterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:network-profiling}Sampling config Notice the precondition, the HTTP request must have the trace header in SkyWalking(sw8 header) or Zipkin(b3 header(s)) format.\nThe sampling configurations define the sampling boundaries for the HTTP traffic. When a HTTP calling is sampled, the SkyWalking Rover could collect the HTTP request/response raw data and upload it to the span attached event.\nThe sampling config contains multiple rules, and each of rules has the following configurations:\n URI Regex: The match pattern for HTTP requests is HTTP URI-oriented. Match all requests if the URI regex is not set. Minimal Request Duration (ms): Sample the HTTP requests with slower latency than this threshold. Sample HTTP requests and responses with tracing when the response code is between 400 and 499: This is OFF by default. Sample HTTP requests and responses with tracing when the response code is between 500 and 599: This is ON by default.  Supported metrics After SkyWalking OAP server receives the metrics from the SkyWalking Rover, it supports to analysis the following data:\n Topology: Based on the process and peer address, the following topology data is supported:  Relation: Analyze the relationship between local processes, or local process with external pods or services. SSL: The socket read or write package with SSL. Protocol: The protocols for write or read data.   TCP socket read and write metrics, including following types:  Call Per Minute: The count of the socket read or write. Bytes: The package size of the socket data. Execute Time: The executed time of the socket read or write. Connect: The socket connect/accept with peer address count and execute time. Close: The socket close the socket count and execute time. RTT: The RTT(Round Trip Time) of socket communicate with peer address.   Local process communicate with peer address exception data, including following types:  Retransmit: The count of TCP package is retransmitted. Drop: The count of TCP package is dropped.   HTTP/1.x request/response related metrics, including following types:  Request CPM: The calls per minute of requests. Response CPM: The calls per minute of responses with status code. Request Package Size: The size(KB) of the request package. Response Package Size: The size(KB) of the response package. Client Side Response Duration: The duration(ms) of the client receive the response. Server Side Response Duration: The duration(ms) of the server send the response.   HTTP sampled request with traces, including following types:  Slow traces: The traces which have slow duration. Traces from HTTP Code in [400, 500) (ms): The traces which response status code in [400, 500). Traces from HTTP Code in [500, 600) (ms): The traces which response status code in [500, 600).    ","title":"Kubernetes Network monitoring","url":"/docs/main/v9.7.0/en/setup/backend/backend-k8s-network-monitoring/"},{"content":"Legacy Setup You can always fall back to our traditional way of integration as introduced below, which is by importing SkyWalking into your project and starting the agent.\nDefaults By default, SkyWalking Python agent uses gRPC protocol to report data to SkyWalking backend, in SkyWalking backend, the port of gRPC protocol is 11800, and the port of HTTP protocol is 12800,\nSee all default configuration values in the Configuration Vocabulary\nYou could configure agent_collector_backend_services (or environment variable SW_AGENT_COLLECTOR_BACKEND_SERVICES) and set agent_protocol (or environment variable SW_AGENT_PROTOCOL to one of gprc, http or kafka according to the protocol you would like to use.\nReport data via gRPC protocol (Default) For example, if you want to use gRPC protocol to report data, configure agent_collector_backend_services (or environment variable SW_AGENT_COLLECTOR_BACKEND_SERVICES) to \u0026lt;oap-ip-or-host\u0026gt;:11800, such as 127.0.0.1:11800:\nfrom skywalking import agent, config config.init(agent_collector_backend_services=\u0026#39;127.0.0.1:11800\u0026#39;, agent_name=\u0026#39;your awesome service\u0026#39;, agent_instance_name=\u0026#39;your-instance-name or \u0026lt;generated uuid\u0026gt;\u0026#39;) agent.start() Report data via HTTP protocol However, if you want to use HTTP protocol to report data, configure agent_collector_backend_services (or environment variable SW_AGENT_COLLECTOR_BACKEND_SERVICES) to \u0026lt;oap-ip-or-host\u0026gt;:12800, such as 127.0.0.1:12800, further set agent_protocol (or environment variable SW_AGENT_PROTOCOL to http):\n Remember you should install skywalking-python with extra requires http, pip install \u0026quot;apache-skywalking[http].\n from skywalking import agent, config config.init(agent_collector_backend_services=\u0026#39;127.0.0.1:12800\u0026#39;, agent_name=\u0026#39;your awesome service\u0026#39;, agent_protocol=\u0026#39;http\u0026#39;, agent_instance_name=\u0026#39;your-instance-name or \u0026lt;generated uuid\u0026gt;\u0026#39;) agent.start() Report data via Kafka protocol Please make sure OAP is consuming the same Kafka topic as your agent produces to, kafka_namespace must match OAP side configuration plugin.kafka.namespace\nFinally, if you want to use Kafka protocol to report data, configure kafka_bootstrap_servers (or environment variable SW_KAFKA_BOOTSTRAP_SERVERS) to kafka-brokers, such as 127.0.0.1:9200, further set agent_protocol (or environment variable SW_AGENT_PROTOCOL to kafka):\n Remember you should install skywalking-python with extra requires kafka, pip install \u0026quot;apache-skywalking[kafka]\u0026quot;.\n from skywalking import agent, config config.init(kafka_bootstrap_servers=\u0026#39;127.0.0.1:9200\u0026#39;, agent_name=\u0026#39;your awesome service\u0026#39;, agent_protocol=\u0026#39;kafka\u0026#39;, agent_instance_name=\u0026#39;your-instance-name or \u0026lt;generated uuid\u0026gt;\u0026#39;) agent.start() Alternatively, you can also pass the configurations via environment variables (such as SW_AGENT_NAME, SW_AGENT_COLLECTOR_BACKEND_SERVICES, etc.) so that you don\u0026rsquo;t need to call config.init.\nAll supported environment variables can be found in the Environment Variables List.\n","title":"Legacy Setup","url":"/docs/skywalking-python/latest/en/setup/intrusive/"},{"content":"Legacy Setup You can always fall back to our traditional way of integration as introduced below, which is by importing SkyWalking into your project and starting the agent.\nDefaults By default, SkyWalking Python agent uses gRPC protocol to report data to SkyWalking backend, in SkyWalking backend, the port of gRPC protocol is 11800, and the port of HTTP protocol is 12800,\nSee all default configuration values in the Configuration Vocabulary\nYou could configure agent_collector_backend_services (or environment variable SW_AGENT_COLLECTOR_BACKEND_SERVICES) and set agent_protocol (or environment variable SW_AGENT_PROTOCOL to one of gprc, http or kafka according to the protocol you would like to use.\nReport data via gRPC protocol (Default) For example, if you want to use gRPC protocol to report data, configure agent_collector_backend_services (or environment variable SW_AGENT_COLLECTOR_BACKEND_SERVICES) to \u0026lt;oap-ip-or-host\u0026gt;:11800, such as 127.0.0.1:11800:\nfrom skywalking import agent, config config.init(agent_collector_backend_services=\u0026#39;127.0.0.1:11800\u0026#39;, agent_name=\u0026#39;your awesome service\u0026#39;, agent_instance_name=\u0026#39;your-instance-name or \u0026lt;generated uuid\u0026gt;\u0026#39;) agent.start() Report data via HTTP protocol However, if you want to use HTTP protocol to report data, configure agent_collector_backend_services (or environment variable SW_AGENT_COLLECTOR_BACKEND_SERVICES) to \u0026lt;oap-ip-or-host\u0026gt;:12800, such as 127.0.0.1:12800, further set agent_protocol (or environment variable SW_AGENT_PROTOCOL to http):\n Remember you should install skywalking-python with extra requires http, pip install \u0026quot;apache-skywalking[http].\n from skywalking import agent, config config.init(agent_collector_backend_services=\u0026#39;127.0.0.1:12800\u0026#39;, agent_name=\u0026#39;your awesome service\u0026#39;, agent_protocol=\u0026#39;http\u0026#39;, agent_instance_name=\u0026#39;your-instance-name or \u0026lt;generated uuid\u0026gt;\u0026#39;) agent.start() Report data via Kafka protocol Please make sure OAP is consuming the same Kafka topic as your agent produces to, kafka_namespace must match OAP side configuration plugin.kafka.namespace\nFinally, if you want to use Kafka protocol to report data, configure kafka_bootstrap_servers (or environment variable SW_KAFKA_BOOTSTRAP_SERVERS) to kafka-brokers, such as 127.0.0.1:9200, further set agent_protocol (or environment variable SW_AGENT_PROTOCOL to kafka):\n Remember you should install skywalking-python with extra requires kafka, pip install \u0026quot;apache-skywalking[kafka]\u0026quot;.\n from skywalking import agent, config config.init(kafka_bootstrap_servers=\u0026#39;127.0.0.1:9200\u0026#39;, agent_name=\u0026#39;your awesome service\u0026#39;, agent_protocol=\u0026#39;kafka\u0026#39;, agent_instance_name=\u0026#39;your-instance-name or \u0026lt;generated uuid\u0026gt;\u0026#39;) agent.start() Alternatively, you can also pass the configurations via environment variables (such as SW_AGENT_NAME, SW_AGENT_COLLECTOR_BACKEND_SERVICES, etc.) so that you don\u0026rsquo;t need to call config.init.\nAll supported environment variables can be found in the Environment Variables List.\n","title":"Legacy Setup","url":"/docs/skywalking-python/next/en/setup/intrusive/"},{"content":"Legacy Setup You can always fall back to our traditional way of integration as introduced below, which is by importing SkyWalking into your project and starting the agent.\nDefaults By default, SkyWalking Python agent uses gRPC protocol to report data to SkyWalking backend, in SkyWalking backend, the port of gRPC protocol is 11800, and the port of HTTP protocol is 12800,\nSee all default configuration values in the Configuration Vocabulary\nYou could configure agent_collector_backend_services (or environment variable SW_AGENT_COLLECTOR_BACKEND_SERVICES) and set agent_protocol (or environment variable SW_AGENT_PROTOCOL to one of gprc, http or kafka according to the protocol you would like to use.\nReport data via gRPC protocol (Default) For example, if you want to use gRPC protocol to report data, configure agent_collector_backend_services (or environment variable SW_AGENT_COLLECTOR_BACKEND_SERVICES) to \u0026lt;oap-ip-or-host\u0026gt;:11800, such as 127.0.0.1:11800:\nfrom skywalking import agent, config config.init(agent_collector_backend_services=\u0026#39;127.0.0.1:11800\u0026#39;, agent_name=\u0026#39;your awesome service\u0026#39;, agent_instance_name=\u0026#39;your-instance-name or \u0026lt;generated uuid\u0026gt;\u0026#39;) agent.start() Report data via HTTP protocol However, if you want to use HTTP protocol to report data, configure agent_collector_backend_services (or environment variable SW_AGENT_COLLECTOR_BACKEND_SERVICES) to \u0026lt;oap-ip-or-host\u0026gt;:12800, such as 127.0.0.1:12800, further set agent_protocol (or environment variable SW_AGENT_PROTOCOL to http):\n Remember you should install skywalking-python with extra requires http, pip install \u0026quot;apache-skywalking[http].\n from skywalking import agent, config config.init(agent_collector_backend_services=\u0026#39;127.0.0.1:12800\u0026#39;, agent_name=\u0026#39;your awesome service\u0026#39;, agent_protocol=\u0026#39;http\u0026#39;, agent_instance_name=\u0026#39;your-instance-name or \u0026lt;generated uuid\u0026gt;\u0026#39;) agent.start() Report data via Kafka protocol Please make sure OAP is consuming the same Kafka topic as your agent produces to, kafka_namespace must match OAP side configuration plugin.kafka.namespace\nFinally, if you want to use Kafka protocol to report data, configure kafka_bootstrap_servers (or environment variable SW_KAFKA_BOOTSTRAP_SERVERS) to kafka-brokers, such as 127.0.0.1:9200, further set agent_protocol (or environment variable SW_AGENT_PROTOCOL to kafka):\n Remember you should install skywalking-python with extra requires kafka, pip install \u0026quot;apache-skywalking[kafka]\u0026quot;.\n from skywalking import agent, config config.init(kafka_bootstrap_servers=\u0026#39;127.0.0.1:9200\u0026#39;, agent_name=\u0026#39;your awesome service\u0026#39;, agent_protocol=\u0026#39;kafka\u0026#39;, agent_instance_name=\u0026#39;your-instance-name or \u0026lt;generated uuid\u0026gt;\u0026#39;) agent.start() Alternatively, you can also pass the configurations via environment variables (such as SW_AGENT_NAME, SW_AGENT_COLLECTOR_BACKEND_SERVICES, etc.) so that you don\u0026rsquo;t need to call config.init.\nAll supported environment variables can be found in the Environment Variables List.\n","title":"Legacy Setup","url":"/docs/skywalking-python/v1.0.1/en/setup/intrusive/"},{"content":"Linux Monitoring SkyWalking leverages Prometheus node-exporter to collect metrics data from the VMs and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. VM entity as a Service in OAP and on the Layer: OS_LINUX.\nSkyWalking also provides InfluxDB Telegraf to receive VMs' metrics data by Telegraf receiver. The telegraf receiver plugin receiver, process and convert the metrics, then it send converted metrics to Meter System. VM entity as a Service in OAP and on the Layer: OS_LINUX.\nData flow For OpenTelemetry receiver:\n The Prometheus node-exporter collects metrics data from the VMs. The OpenTelemetry Collector fetches metrics from node-exporter via Prometheus Receiver and pushes metrics to the SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  For Telegraf receiver:\n The InfluxDB Telegraf input plugins collects various metrics data from the VMs. The cpu, mem, system, disk and diskio input plugins should be set in telegraf.conf file. The InfluxDB Telegraf send JSON format metrics by HTTP messages to Telegraf Receiver, then pushes converted metrics to the SkyWalking OAP Server Meter System. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate ad store the results. The meter_vm_cpu_average_used metrics indicates the average usage of each CPU core for telegraf receiver.  Setup For OpenTelemetry receiver:\n Setup Prometheus node-exporter. Setup OpenTelemetry Collector. This is an example for OpenTelemetry Collector configuration otel-collector-config.yaml. Config SkyWalking OpenTelemetry receiver.  For Telegraf receiver:\n Setup InfluxDB Telegraf\u0026rsquo;s telegraf.conf file according to Telegraf office document. Setup InfluxDB Telegraf\u0026rsquo;s telegraf.conf file specific rules according to Telegraf receiver document. Config SkyWalking Telegraf receiver.  Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     CPU Usage % meter_vm_cpu_total_percentage The total percentage usage of the CPU core. If there are 2 cores, the maximum usage is 200%. Prometheus node-exporter\nTelegraf input plugin   Memory RAM Usage MB meter_vm_memory_used The total RAM usage Prometheus node-exporter\nTelegraf input plugin   Memory Swap Usage % meter_vm_memory_swap_percentage The percentage usage of swap memory Prometheus node-exporter\nTelegraf input plugin   CPU Average Used % meter_vm_cpu_average_used The percentage usage of the CPU core in each mode Prometheus node-exporter\nTelegraf input plugin   CPU Load  meter_vm_cpu_load1\nmeter_vm_cpu_load5\nmeter_vm_cpu_load15 The CPU 1m / 5m / 15m average load Prometheus node-exporter\nTelegraf input plugin   Memory RAM MB meter_vm_memory_total\nmeter_vm_memory_available\nmeter_vm_memory_used\nmeter_vm_memory_buff_cache The RAM statistics, including Total / Available / Used / Buff-Cache Prometheus node-exporter\nTelegraf input plugin   Memory Swap MB meter_vm_memory_swap_free\nmeter_vm_memory_swap_total Swap memory statistics, including Free / Total Prometheus node-exporter\nTelegraf input plugin   File System Mountpoint Usage % meter_vm_filesystem_percentage The percentage usage of the file system at each mount point Prometheus node-exporter\nTelegraf input plugin   Disk R/W KB/s meter_vm_disk_read\nmeter_vm_disk_written The disk read and written Prometheus node-exporter\nTelegraf input plugin   Network Bandwidth Usage KB/s meter_vm_network_receive\nmeter_vm_network_transmit The network receive and transmit Prometheus node-exporter\nTelegraf input plugin   Network Status  meter_vm_tcp_curr_estab\nmeter_vm_tcp_tw\nmeter_vm_tcp_alloc\nmeter_vm_sockets_used\nmeter_vm_udp_inuse The number of TCPs established / TCP time wait / TCPs allocated / sockets in use / UDPs in use Prometheus node-exporter\nTelegraf input plugin   Filefd Allocated  meter_vm_filefd_allocated The number of file descriptors allocated Prometheus node-exporter    Customizing You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/vm.yaml and /config/telegraf-rules/vm.yaml. The dashboard panel confirmations are found in /config/ui-initialized-templates/os_linux.\nBlog For more details, see the blog article SkyWalking 8.4 provides infrastructure monitoring.\n","title":"Linux Monitoring","url":"/docs/main/latest/en/setup/backend/backend-vm-monitoring/"},{"content":"Linux Monitoring SkyWalking leverages Prometheus node-exporter to collect metrics data from the VMs and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. VM entity as a Service in OAP and on the Layer: OS_LINUX.\nSkyWalking also provides InfluxDB Telegraf to receive VMs' metrics data by Telegraf receiver. The telegraf receiver plugin receiver, process and convert the metrics, then it send converted metrics to Meter System. VM entity as a Service in OAP and on the Layer: OS_LINUX.\nData flow For OpenTelemetry receiver:\n The Prometheus node-exporter collects metrics data from the VMs. The OpenTelemetry Collector fetches metrics from node-exporter via Prometheus Receiver and pushes metrics to the SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  For Telegraf receiver:\n The InfluxDB Telegraf input plugins collects various metrics data from the VMs. The cpu, mem, system, disk and diskio input plugins should be set in telegraf.conf file. The InfluxDB Telegraf send JSON format metrics by HTTP messages to Telegraf Receiver, then pushes converted metrics to the SkyWalking OAP Server Meter System. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate ad store the results. The meter_vm_cpu_average_used metrics indicates the average usage of each CPU core for telegraf receiver.  Setup For OpenTelemetry receiver:\n Setup Prometheus node-exporter. Setup OpenTelemetry Collector. This is an example for OpenTelemetry Collector configuration otel-collector-config.yaml. Config SkyWalking OpenTelemetry receiver.  For Telegraf receiver:\n Setup InfluxDB Telegraf\u0026rsquo;s telegraf.conf file according to Telegraf office document. Setup InfluxDB Telegraf\u0026rsquo;s telegraf.conf file specific rules according to Telegraf receiver document. Config SkyWalking Telegraf receiver.  Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     CPU Usage % meter_vm_cpu_total_percentage The total percentage usage of the CPU core. If there are 2 cores, the maximum usage is 200%. Prometheus node-exporter\nTelegraf input plugin   Memory RAM Usage MB meter_vm_memory_used The total RAM usage Prometheus node-exporter\nTelegraf input plugin   Memory Swap Usage % meter_vm_memory_swap_percentage The percentage usage of swap memory Prometheus node-exporter\nTelegraf input plugin   CPU Average Used % meter_vm_cpu_average_used The percentage usage of the CPU core in each mode Prometheus node-exporter\nTelegraf input plugin   CPU Load  meter_vm_cpu_load1\nmeter_vm_cpu_load5\nmeter_vm_cpu_load15 The CPU 1m / 5m / 15m average load Prometheus node-exporter\nTelegraf input plugin   Memory RAM MB meter_vm_memory_total\nmeter_vm_memory_available\nmeter_vm_memory_used\nmeter_vm_memory_buff_cache The RAM statistics, including Total / Available / Used / Buff-Cache Prometheus node-exporter\nTelegraf input plugin   Memory Swap MB meter_vm_memory_swap_free\nmeter_vm_memory_swap_total Swap memory statistics, including Free / Total Prometheus node-exporter\nTelegraf input plugin   File System Mountpoint Usage % meter_vm_filesystem_percentage The percentage usage of the file system at each mount point Prometheus node-exporter\nTelegraf input plugin   Disk R/W KB/s meter_vm_disk_read\nmeter_vm_disk_written The disk read and written Prometheus node-exporter\nTelegraf input plugin   Network Bandwidth Usage KB/s meter_vm_network_receive\nmeter_vm_network_transmit The network receive and transmit Prometheus node-exporter\nTelegraf input plugin   Network Status  meter_vm_tcp_curr_estab\nmeter_vm_tcp_tw\nmeter_vm_tcp_alloc\nmeter_vm_sockets_used\nmeter_vm_udp_inuse The number of TCPs established / TCP time wait / TCPs allocated / sockets in use / UDPs in use Prometheus node-exporter\nTelegraf input plugin   Filefd Allocated  meter_vm_filefd_allocated The number of file descriptors allocated Prometheus node-exporter    Customizing You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/vm.yaml and /config/telegraf-rules/vm.yaml. The dashboard panel confirmations are found in /config/ui-initialized-templates/os_linux.\nBlog For more details, see the blog article SkyWalking 8.4 provides infrastructure monitoring.\n","title":"Linux Monitoring","url":"/docs/main/next/en/setup/backend/backend-vm-monitoring/"},{"content":"Linux Monitoring SkyWalking leverages Prometheus node-exporter to collect metrics data from the VMs, and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nVM entity as a Service in OAP, and on the Layer: OS_LINUX.\nData flow  The Prometheus node-exporter collects metrics data from the VMs. The OpenTelemetry Collector fetches metrics from node-exporter via Prometheus Receiver and pushes metrics to the SkyWalking OAP Server via the OpenCensus gRPC Exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup Prometheus node-exporter. Setup OpenTelemetry Collector . This is an example for OpenTelemetry Collector configuration otel-collector-config.yaml. Config SkyWalking OpenTelemetry receiver.  Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     CPU Usage % cpu_total_percentage The total percentage usage of the CPU core. If there are 2 cores, the maximum usage is 200%. Prometheus node-exporter   Memory RAM Usage MB meter_vm_memory_used The total RAM usage Prometheus node-exporter   Memory Swap Usage % meter_vm_memory_swap_percentage The percentage usage of swap memory Prometheus node-exporter   CPU Average Used % meter_vm_cpu_average_used The percentage usage of the CPU core in each mode Prometheus node-exporter   CPU Load  meter_vm_cpu_load1\nmeter_vm_cpu_load5\nmeter_vm_cpu_load15 The CPU 1m / 5m / 15m average load Prometheus node-exporter   Memory RAM MB meter_vm_memory_total\nmeter_vm_memory_available\nmeter_vm_memory_used The RAM statistics, including Total / Available / Used Prometheus node-exporter   Memory Swap MB meter_vm_memory_swap_free\nmeter_vm_memory_swap_total Swap memory statistics, including Free / Total Prometheus node-exporter   File System Mountpoint Usage % meter_vm_filesystem_percentage The percentage usage of the file system at each mount point Prometheus node-exporter   Disk R/W KB/s meter_vm_disk_read,meter_vm_disk_written The disk read and written Prometheus node-exporter   Network Bandwidth Usage KB/s meter_vm_network_receive\nmeter_vm_network_transmit The network receive and transmit Prometheus node-exporter   Network Status  meter_vm_tcp_curr_estab\nmeter_vm_tcp_tw\nmeter_vm_tcp_alloc\nmeter_vm_sockets_used\nmeter_vm_udp_inuse The number of TCPs established / TCP time wait / TCPs allocated / sockets in use / UDPs in use Prometheus node-exporter   Filefd Allocated  meter_vm_filefd_allocated The number of file descriptors allocated Prometheus node-exporter    Customizing You can customize your own metrics/expression/dashboard panel.\nThe metrics definition and expression rules are found in /config/otel-oc-rules/vm.yaml.\nThe dashboard panel confirmations are found in /config/ui-initialized-templates/os_linux.\nBlog For more details, see blog article SkyWalking 8.4 provides infrastructure monitoring.\n","title":"Linux Monitoring","url":"/docs/main/v9.0.0/en/setup/backend/backend-vm-monitoring/"},{"content":"Linux Monitoring SkyWalking leverages Prometheus node-exporter to collect metrics data from the VMs and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nVM entity as a Service in OAP and on the Layer: OS_LINUX.\nData flow  The Prometheus node-exporter collects metrics data from the VMs. The OpenTelemetry Collector fetches metrics from node-exporter via Prometheus Receiver and pushes metrics to the SkyWalking OAP Server via the OpenCensus gRPC Exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup Prometheus node-exporter. Setup OpenTelemetry Collector . This is an example for OpenTelemetry Collector configuration otel-collector-config.yaml. Config SkyWalking OpenTelemetry receiver.  Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     CPU Usage % cpu_total_percentage The total percentage usage of the CPU core. If there are 2 cores, the maximum usage is 200%. Prometheus node-exporter   Memory RAM Usage MB meter_vm_memory_used The total RAM usage Prometheus node-exporter   Memory Swap Usage % meter_vm_memory_swap_percentage The percentage usage of swap memory Prometheus node-exporter   CPU Average Used % meter_vm_cpu_average_used The percentage usage of the CPU core in each mode Prometheus node-exporter   CPU Load  meter_vm_cpu_load1\nmeter_vm_cpu_load5\nmeter_vm_cpu_load15 The CPU 1m / 5m / 15m average load Prometheus node-exporter   Memory RAM MB meter_vm_memory_total\nmeter_vm_memory_available\nmeter_vm_memory_used The RAM statistics, including Total / Available / Used Prometheus node-exporter   Memory Swap MB meter_vm_memory_swap_free\nmeter_vm_memory_swap_total Swap memory statistics, including Free / Total Prometheus node-exporter   File System Mountpoint Usage % meter_vm_filesystem_percentage The percentage usage of the file system at each mount point Prometheus node-exporter   Disk R/W KB/s meter_vm_disk_read,meter_vm_disk_written The disk read and written Prometheus node-exporter   Network Bandwidth Usage KB/s meter_vm_network_receive\nmeter_vm_network_transmit The network receive and transmit Prometheus node-exporter   Network Status  meter_vm_tcp_curr_estab\nmeter_vm_tcp_tw\nmeter_vm_tcp_alloc\nmeter_vm_sockets_used\nmeter_vm_udp_inuse The number of TCPs established / TCP time wait / TCPs allocated / sockets in use / UDPs in use Prometheus node-exporter   Filefd Allocated  meter_vm_filefd_allocated The number of file descriptors allocated Prometheus node-exporter    Customizing You can customize your own metrics/expression/dashboard panel.\nThe metrics definition and expression rules are found in /config/otel-oc-rules/vm.yaml.\nThe dashboard panel confirmations are found in /config/ui-initialized-templates/os_linux.\nBlog For more details, see the blog article SkyWalking 8.4 provides infrastructure monitoring.\n","title":"Linux Monitoring","url":"/docs/main/v9.1.0/en/setup/backend/backend-vm-monitoring/"},{"content":"Linux Monitoring SkyWalking leverages Prometheus node-exporter to collect metrics data from the VMs and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. VM entity as a Service in OAP and on the Layer: OS_LINUX.\nData flow  The Prometheus node-exporter collects metrics data from the VMs. The OpenTelemetry Collector fetches metrics from node-exporter via Prometheus Receiver and pushes metrics to the SkyWalking OAP Server via the OpenCensus gRPC Exporter or OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup Prometheus node-exporter. Setup OpenTelemetry Collector . This is an example for OpenTelemetry Collector configuration otel-collector-config.yaml. Config SkyWalking OpenTelemetry receiver.  Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     CPU Usage % cpu_total_percentage The total percentage usage of the CPU core. If there are 2 cores, the maximum usage is 200%. Prometheus node-exporter   Memory RAM Usage MB meter_vm_memory_used The total RAM usage Prometheus node-exporter   Memory Swap Usage % meter_vm_memory_swap_percentage The percentage usage of swap memory Prometheus node-exporter   CPU Average Used % meter_vm_cpu_average_used The percentage usage of the CPU core in each mode Prometheus node-exporter   CPU Load  meter_vm_cpu_load1\nmeter_vm_cpu_load5\nmeter_vm_cpu_load15 The CPU 1m / 5m / 15m average load Prometheus node-exporter   Memory RAM MB meter_vm_memory_total\nmeter_vm_memory_available\nmeter_vm_memory_used The RAM statistics, including Total / Available / Used Prometheus node-exporter   Memory Swap MB meter_vm_memory_swap_free\nmeter_vm_memory_swap_total Swap memory statistics, including Free / Total Prometheus node-exporter   File System Mountpoint Usage % meter_vm_filesystem_percentage The percentage usage of the file system at each mount point Prometheus node-exporter   Disk R/W KB/s meter_vm_disk_read,meter_vm_disk_written The disk read and written Prometheus node-exporter   Network Bandwidth Usage KB/s meter_vm_network_receive\nmeter_vm_network_transmit The network receive and transmit Prometheus node-exporter   Network Status  meter_vm_tcp_curr_estab\nmeter_vm_tcp_tw\nmeter_vm_tcp_alloc\nmeter_vm_sockets_used\nmeter_vm_udp_inuse The number of TCPs established / TCP time wait / TCPs allocated / sockets in use / UDPs in use Prometheus node-exporter   Filefd Allocated  meter_vm_filefd_allocated The number of file descriptors allocated Prometheus node-exporter    Customizing You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/vm.yaml. The dashboard panel confirmations are found in /config/ui-initialized-templates/os_linux.\nBlog For more details, see the blog article SkyWalking 8.4 provides infrastructure monitoring.\n","title":"Linux Monitoring","url":"/docs/main/v9.2.0/en/setup/backend/backend-vm-monitoring/"},{"content":"Linux Monitoring SkyWalking leverages Prometheus node-exporter to collect metrics data from the VMs and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. VM entity as a Service in OAP and on the Layer: OS_LINUX.\nSkyWalking also provides InfluxDB Telegraf to receive VMs' metrics data by Telegraf receiver. The telegraf receiver plugin receiver, process and convert the metrics, then it send converted metrics to Meter System. VM entity as a Service in OAP and on the Layer: OS_LINUX.\nData flow For OpenTelemetry receiver:\n The Prometheus node-exporter collects metrics data from the VMs. The OpenTelemetry Collector fetches metrics from node-exporter via Prometheus Receiver and pushes metrics to the SkyWalking OAP Server via the OpenCensus gRPC Exporter or OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  For Telegraf receiver:\n The InfluxDB Telegraf input plugins collects various metrics data from the VMs. The cpu, mem, system, disk and diskio input plugins should be set in telegraf.conf file. The InfluxDB Telegraf send JSON format metrics by HTTP messages to Telegraf Receiver, then pushes converted metrics to the SkyWalking OAP Server Meter System. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate ad store the results. The meter_vm_cpu_average_used metrics indicates the average usage of each CPU core for telegraf receiver.  Setup For OpenTelemetry receiver:\n Setup Prometheus node-exporter. Setup OpenTelemetry Collector . This is an example for OpenTelemetry Collector configuration otel-collector-config.yaml. Config SkyWalking OpenTelemetry receiver.  For Telegraf receiver:\n Setup InfluxDB Telegraf\u0026rsquo;s telegraf.conf file according to Telegraf office document. Setup InfluxDB Telegraf\u0026rsquo;s telegraf.conf file specific rules according to Telegraf receiver document. Config SkyWalking Telegraf receiver.  Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     CPU Usage % meter_vm_cpu_total_percentage The total percentage usage of the CPU core. If there are 2 cores, the maximum usage is 200%. Prometheus node-exporter\nTelegraf input plugin   Memory RAM Usage MB meter_vm_memory_used The total RAM usage Prometheus node-exporter\nTelegraf input plugin   Memory Swap Usage % meter_vm_memory_swap_percentage The percentage usage of swap memory Prometheus node-exporter\nTelegraf input plugin   CPU Average Used % meter_vm_cpu_average_used The percentage usage of the CPU core in each mode Prometheus node-exporter\nTelegraf input plugin   CPU Load  meter_vm_cpu_load1\nmeter_vm_cpu_load5\nmeter_vm_cpu_load15 The CPU 1m / 5m / 15m average load Prometheus node-exporter\nTelegraf input plugin   Memory RAM MB meter_vm_memory_total\nmeter_vm_memory_available\nmeter_vm_memory_used The RAM statistics, including Total / Available / Used Prometheus node-exporter\nTelegraf input plugin   Memory Swap MB meter_vm_memory_swap_free\nmeter_vm_memory_swap_total Swap memory statistics, including Free / Total Prometheus node-exporter\nTelegraf input plugin   File System Mountpoint Usage % meter_vm_filesystem_percentage The percentage usage of the file system at each mount point Prometheus node-exporter\nTelegraf input plugin   Disk R/W KB/s meter_vm_disk_read,meter_vm_disk_written The disk read and written Prometheus node-exporter\nTelegraf input plugin   Network Bandwidth Usage KB/s meter_vm_network_receive\nmeter_vm_network_transmit The network receive and transmit Prometheus node-exporter\nTelegraf input plugin   Network Status  meter_vm_tcp_curr_estab\nmeter_vm_tcp_tw\nmeter_vm_tcp_alloc\nmeter_vm_sockets_used\nmeter_vm_udp_inuse The number of TCPs established / TCP time wait / TCPs allocated / sockets in use / UDPs in use Prometheus node-exporter\nTelegraf input plugin   Filefd Allocated  meter_vm_filefd_allocated The number of file descriptors allocated Prometheus node-exporter    Customizing You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/vm.yaml and /config/telegraf-rules/vm.yaml. The dashboard panel confirmations are found in /config/ui-initialized-templates/os_linux.\nBlog For more details, see the blog article SkyWalking 8.4 provides infrastructure monitoring.\n","title":"Linux Monitoring","url":"/docs/main/v9.3.0/en/setup/backend/backend-vm-monitoring/"},{"content":"Linux Monitoring SkyWalking leverages Prometheus node-exporter to collect metrics data from the VMs and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. VM entity as a Service in OAP and on the Layer: OS_LINUX.\nSkyWalking also provides InfluxDB Telegraf to receive VMs' metrics data by Telegraf receiver. The telegraf receiver plugin receiver, process and convert the metrics, then it send converted metrics to Meter System. VM entity as a Service in OAP and on the Layer: OS_LINUX.\nData flow For OpenTelemetry receiver:\n The Prometheus node-exporter collects metrics data from the VMs. The OpenTelemetry Collector fetches metrics from node-exporter via Prometheus Receiver and pushes metrics to the SkyWalking OAP Server via the OpenCensus gRPC Exporter or OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  For Telegraf receiver:\n The InfluxDB Telegraf input plugins collects various metrics data from the VMs. The cpu, mem, system, disk and diskio input plugins should be set in telegraf.conf file. The InfluxDB Telegraf send JSON format metrics by HTTP messages to Telegraf Receiver, then pushes converted metrics to the SkyWalking OAP Server Meter System. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate ad store the results. The meter_vm_cpu_average_used metrics indicates the average usage of each CPU core for telegraf receiver.  Setup For OpenTelemetry receiver:\n Setup Prometheus node-exporter. Setup OpenTelemetry Collector . This is an example for OpenTelemetry Collector configuration otel-collector-config.yaml. Config SkyWalking OpenTelemetry receiver.  For Telegraf receiver:\n Setup InfluxDB Telegraf\u0026rsquo;s telegraf.conf file according to Telegraf office document. Setup InfluxDB Telegraf\u0026rsquo;s telegraf.conf file specific rules according to Telegraf receiver document. Config SkyWalking Telegraf receiver.  Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     CPU Usage % meter_vm_cpu_total_percentage The total percentage usage of the CPU core. If there are 2 cores, the maximum usage is 200%. Prometheus node-exporter\nTelegraf input plugin   Memory RAM Usage MB meter_vm_memory_used The total RAM usage Prometheus node-exporter\nTelegraf input plugin   Memory Swap Usage % meter_vm_memory_swap_percentage The percentage usage of swap memory Prometheus node-exporter\nTelegraf input plugin   CPU Average Used % meter_vm_cpu_average_used The percentage usage of the CPU core in each mode Prometheus node-exporter\nTelegraf input plugin   CPU Load  meter_vm_cpu_load1\nmeter_vm_cpu_load5\nmeter_vm_cpu_load15 The CPU 1m / 5m / 15m average load Prometheus node-exporter\nTelegraf input plugin   Memory RAM MB meter_vm_memory_total\nmeter_vm_memory_available\nmeter_vm_memory_used The RAM statistics, including Total / Available / Used Prometheus node-exporter\nTelegraf input plugin   Memory Swap MB meter_vm_memory_swap_free\nmeter_vm_memory_swap_total Swap memory statistics, including Free / Total Prometheus node-exporter\nTelegraf input plugin   File System Mountpoint Usage % meter_vm_filesystem_percentage The percentage usage of the file system at each mount point Prometheus node-exporter\nTelegraf input plugin   Disk R/W KB/s meter_vm_disk_read,meter_vm_disk_written The disk read and written Prometheus node-exporter\nTelegraf input plugin   Network Bandwidth Usage KB/s meter_vm_network_receive\nmeter_vm_network_transmit The network receive and transmit Prometheus node-exporter\nTelegraf input plugin   Network Status  meter_vm_tcp_curr_estab\nmeter_vm_tcp_tw\nmeter_vm_tcp_alloc\nmeter_vm_sockets_used\nmeter_vm_udp_inuse The number of TCPs established / TCP time wait / TCPs allocated / sockets in use / UDPs in use Prometheus node-exporter\nTelegraf input plugin   Filefd Allocated  meter_vm_filefd_allocated The number of file descriptors allocated Prometheus node-exporter    Customizing You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/vm.yaml and /config/telegraf-rules/vm.yaml. The dashboard panel confirmations are found in /config/ui-initialized-templates/os_linux.\nBlog For more details, see the blog article SkyWalking 8.4 provides infrastructure monitoring.\n","title":"Linux Monitoring","url":"/docs/main/v9.4.0/en/setup/backend/backend-vm-monitoring/"},{"content":"Linux Monitoring SkyWalking leverages Prometheus node-exporter to collect metrics data from the VMs and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. VM entity as a Service in OAP and on the Layer: OS_LINUX.\nSkyWalking also provides InfluxDB Telegraf to receive VMs' metrics data by Telegraf receiver. The telegraf receiver plugin receiver, process and convert the metrics, then it send converted metrics to Meter System. VM entity as a Service in OAP and on the Layer: OS_LINUX.\nData flow For OpenTelemetry receiver:\n The Prometheus node-exporter collects metrics data from the VMs. The OpenTelemetry Collector fetches metrics from node-exporter via Prometheus Receiver and pushes metrics to the SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  For Telegraf receiver:\n The InfluxDB Telegraf input plugins collects various metrics data from the VMs. The cpu, mem, system, disk and diskio input plugins should be set in telegraf.conf file. The InfluxDB Telegraf send JSON format metrics by HTTP messages to Telegraf Receiver, then pushes converted metrics to the SkyWalking OAP Server Meter System. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate ad store the results. The meter_vm_cpu_average_used metrics indicates the average usage of each CPU core for telegraf receiver.  Setup For OpenTelemetry receiver:\n Setup Prometheus node-exporter. Setup OpenTelemetry Collector. This is an example for OpenTelemetry Collector configuration otel-collector-config.yaml. Config SkyWalking OpenTelemetry receiver.  For Telegraf receiver:\n Setup InfluxDB Telegraf\u0026rsquo;s telegraf.conf file according to Telegraf office document. Setup InfluxDB Telegraf\u0026rsquo;s telegraf.conf file specific rules according to Telegraf receiver document. Config SkyWalking Telegraf receiver.  Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     CPU Usage % meter_vm_cpu_total_percentage The total percentage usage of the CPU core. If there are 2 cores, the maximum usage is 200%. Prometheus node-exporter\nTelegraf input plugin   Memory RAM Usage MB meter_vm_memory_used The total RAM usage Prometheus node-exporter\nTelegraf input plugin   Memory Swap Usage % meter_vm_memory_swap_percentage The percentage usage of swap memory Prometheus node-exporter\nTelegraf input plugin   CPU Average Used % meter_vm_cpu_average_used The percentage usage of the CPU core in each mode Prometheus node-exporter\nTelegraf input plugin   CPU Load  meter_vm_cpu_load1\nmeter_vm_cpu_load5\nmeter_vm_cpu_load15 The CPU 1m / 5m / 15m average load Prometheus node-exporter\nTelegraf input plugin   Memory RAM MB meter_vm_memory_total\nmeter_vm_memory_available\nmeter_vm_memory_used\nmeter_vm_memory_buff_cache The RAM statistics, including Total / Available / Used / Buff-Cache Prometheus node-exporter\nTelegraf input plugin   Memory Swap MB meter_vm_memory_swap_free\nmeter_vm_memory_swap_total Swap memory statistics, including Free / Total Prometheus node-exporter\nTelegraf input plugin   File System Mountpoint Usage % meter_vm_filesystem_percentage The percentage usage of the file system at each mount point Prometheus node-exporter\nTelegraf input plugin   Disk R/W KB/s meter_vm_disk_read\nmeter_vm_disk_written The disk read and written Prometheus node-exporter\nTelegraf input plugin   Network Bandwidth Usage KB/s meter_vm_network_receive\nmeter_vm_network_transmit The network receive and transmit Prometheus node-exporter\nTelegraf input plugin   Network Status  meter_vm_tcp_curr_estab\nmeter_vm_tcp_tw\nmeter_vm_tcp_alloc\nmeter_vm_sockets_used\nmeter_vm_udp_inuse The number of TCPs established / TCP time wait / TCPs allocated / sockets in use / UDPs in use Prometheus node-exporter\nTelegraf input plugin   Filefd Allocated  meter_vm_filefd_allocated The number of file descriptors allocated Prometheus node-exporter    Customizing You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/vm.yaml and /config/telegraf-rules/vm.yaml. The dashboard panel confirmations are found in /config/ui-initialized-templates/os_linux.\nBlog For more details, see the blog article SkyWalking 8.4 provides infrastructure monitoring.\n","title":"Linux Monitoring","url":"/docs/main/v9.5.0/en/setup/backend/backend-vm-monitoring/"},{"content":"Linux Monitoring SkyWalking leverages Prometheus node-exporter to collect metrics data from the VMs and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. VM entity as a Service in OAP and on the Layer: OS_LINUX.\nSkyWalking also provides InfluxDB Telegraf to receive VMs' metrics data by Telegraf receiver. The telegraf receiver plugin receiver, process and convert the metrics, then it send converted metrics to Meter System. VM entity as a Service in OAP and on the Layer: OS_LINUX.\nData flow For OpenTelemetry receiver:\n The Prometheus node-exporter collects metrics data from the VMs. The OpenTelemetry Collector fetches metrics from node-exporter via Prometheus Receiver and pushes metrics to the SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  For Telegraf receiver:\n The InfluxDB Telegraf input plugins collects various metrics data from the VMs. The cpu, mem, system, disk and diskio input plugins should be set in telegraf.conf file. The InfluxDB Telegraf send JSON format metrics by HTTP messages to Telegraf Receiver, then pushes converted metrics to the SkyWalking OAP Server Meter System. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate ad store the results. The meter_vm_cpu_average_used metrics indicates the average usage of each CPU core for telegraf receiver.  Setup For OpenTelemetry receiver:\n Setup Prometheus node-exporter. Setup OpenTelemetry Collector. This is an example for OpenTelemetry Collector configuration otel-collector-config.yaml. Config SkyWalking OpenTelemetry receiver.  For Telegraf receiver:\n Setup InfluxDB Telegraf\u0026rsquo;s telegraf.conf file according to Telegraf office document. Setup InfluxDB Telegraf\u0026rsquo;s telegraf.conf file specific rules according to Telegraf receiver document. Config SkyWalking Telegraf receiver.  Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     CPU Usage % meter_vm_cpu_total_percentage The total percentage usage of the CPU core. If there are 2 cores, the maximum usage is 200%. Prometheus node-exporter\nTelegraf input plugin   Memory RAM Usage MB meter_vm_memory_used The total RAM usage Prometheus node-exporter\nTelegraf input plugin   Memory Swap Usage % meter_vm_memory_swap_percentage The percentage usage of swap memory Prometheus node-exporter\nTelegraf input plugin   CPU Average Used % meter_vm_cpu_average_used The percentage usage of the CPU core in each mode Prometheus node-exporter\nTelegraf input plugin   CPU Load  meter_vm_cpu_load1\nmeter_vm_cpu_load5\nmeter_vm_cpu_load15 The CPU 1m / 5m / 15m average load Prometheus node-exporter\nTelegraf input plugin   Memory RAM MB meter_vm_memory_total\nmeter_vm_memory_available\nmeter_vm_memory_used\nmeter_vm_memory_buff_cache The RAM statistics, including Total / Available / Used / Buff-Cache Prometheus node-exporter\nTelegraf input plugin   Memory Swap MB meter_vm_memory_swap_free\nmeter_vm_memory_swap_total Swap memory statistics, including Free / Total Prometheus node-exporter\nTelegraf input plugin   File System Mountpoint Usage % meter_vm_filesystem_percentage The percentage usage of the file system at each mount point Prometheus node-exporter\nTelegraf input plugin   Disk R/W KB/s meter_vm_disk_read\nmeter_vm_disk_written The disk read and written Prometheus node-exporter\nTelegraf input plugin   Network Bandwidth Usage KB/s meter_vm_network_receive\nmeter_vm_network_transmit The network receive and transmit Prometheus node-exporter\nTelegraf input plugin   Network Status  meter_vm_tcp_curr_estab\nmeter_vm_tcp_tw\nmeter_vm_tcp_alloc\nmeter_vm_sockets_used\nmeter_vm_udp_inuse The number of TCPs established / TCP time wait / TCPs allocated / sockets in use / UDPs in use Prometheus node-exporter\nTelegraf input plugin   Filefd Allocated  meter_vm_filefd_allocated The number of file descriptors allocated Prometheus node-exporter    Customizing You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/vm.yaml and /config/telegraf-rules/vm.yaml. The dashboard panel confirmations are found in /config/ui-initialized-templates/os_linux.\nBlog For more details, see the blog article SkyWalking 8.4 provides infrastructure monitoring.\n","title":"Linux Monitoring","url":"/docs/main/v9.6.0/en/setup/backend/backend-vm-monitoring/"},{"content":"Linux Monitoring SkyWalking leverages Prometheus node-exporter to collect metrics data from the VMs and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. VM entity as a Service in OAP and on the Layer: OS_LINUX.\nSkyWalking also provides InfluxDB Telegraf to receive VMs' metrics data by Telegraf receiver. The telegraf receiver plugin receiver, process and convert the metrics, then it send converted metrics to Meter System. VM entity as a Service in OAP and on the Layer: OS_LINUX.\nData flow For OpenTelemetry receiver:\n The Prometheus node-exporter collects metrics data from the VMs. The OpenTelemetry Collector fetches metrics from node-exporter via Prometheus Receiver and pushes metrics to the SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  For Telegraf receiver:\n The InfluxDB Telegraf input plugins collects various metrics data from the VMs. The cpu, mem, system, disk and diskio input plugins should be set in telegraf.conf file. The InfluxDB Telegraf send JSON format metrics by HTTP messages to Telegraf Receiver, then pushes converted metrics to the SkyWalking OAP Server Meter System. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate ad store the results. The meter_vm_cpu_average_used metrics indicates the average usage of each CPU core for telegraf receiver.  Setup For OpenTelemetry receiver:\n Setup Prometheus node-exporter. Setup OpenTelemetry Collector. This is an example for OpenTelemetry Collector configuration otel-collector-config.yaml. Config SkyWalking OpenTelemetry receiver.  For Telegraf receiver:\n Setup InfluxDB Telegraf\u0026rsquo;s telegraf.conf file according to Telegraf office document. Setup InfluxDB Telegraf\u0026rsquo;s telegraf.conf file specific rules according to Telegraf receiver document. Config SkyWalking Telegraf receiver.  Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     CPU Usage % meter_vm_cpu_total_percentage The total percentage usage of the CPU core. If there are 2 cores, the maximum usage is 200%. Prometheus node-exporter\nTelegraf input plugin   Memory RAM Usage MB meter_vm_memory_used The total RAM usage Prometheus node-exporter\nTelegraf input plugin   Memory Swap Usage % meter_vm_memory_swap_percentage The percentage usage of swap memory Prometheus node-exporter\nTelegraf input plugin   CPU Average Used % meter_vm_cpu_average_used The percentage usage of the CPU core in each mode Prometheus node-exporter\nTelegraf input plugin   CPU Load  meter_vm_cpu_load1\nmeter_vm_cpu_load5\nmeter_vm_cpu_load15 The CPU 1m / 5m / 15m average load Prometheus node-exporter\nTelegraf input plugin   Memory RAM MB meter_vm_memory_total\nmeter_vm_memory_available\nmeter_vm_memory_used\nmeter_vm_memory_buff_cache The RAM statistics, including Total / Available / Used / Buff-Cache Prometheus node-exporter\nTelegraf input plugin   Memory Swap MB meter_vm_memory_swap_free\nmeter_vm_memory_swap_total Swap memory statistics, including Free / Total Prometheus node-exporter\nTelegraf input plugin   File System Mountpoint Usage % meter_vm_filesystem_percentage The percentage usage of the file system at each mount point Prometheus node-exporter\nTelegraf input plugin   Disk R/W KB/s meter_vm_disk_read\nmeter_vm_disk_written The disk read and written Prometheus node-exporter\nTelegraf input plugin   Network Bandwidth Usage KB/s meter_vm_network_receive\nmeter_vm_network_transmit The network receive and transmit Prometheus node-exporter\nTelegraf input plugin   Network Status  meter_vm_tcp_curr_estab\nmeter_vm_tcp_tw\nmeter_vm_tcp_alloc\nmeter_vm_sockets_used\nmeter_vm_udp_inuse The number of TCPs established / TCP time wait / TCPs allocated / sockets in use / UDPs in use Prometheus node-exporter\nTelegraf input plugin   Filefd Allocated  meter_vm_filefd_allocated The number of file descriptors allocated Prometheus node-exporter    Customizing You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/vm.yaml and /config/telegraf-rules/vm.yaml. The dashboard panel confirmations are found in /config/ui-initialized-templates/os_linux.\nBlog For more details, see the blog article SkyWalking 8.4 provides infrastructure monitoring.\n","title":"Linux Monitoring","url":"/docs/main/v9.7.0/en/setup/backend/backend-vm-monitoring/"},{"content":"Locate agent config file by system property Supported version 5.0.0-RC+\nWhat is Locate agent config file by system property ? In Default. The agent will try to locate agent.config, which should be in the /config dictionary of agent package. If User sets the specified agent config file through system properties, The agent will try to load file from there. By the way, This function has no conflict with Setting Override\nOverride priority The specified agent config \u0026gt; The default agent config\nHow to use The content formats of the specified config must be same as the default config.\nUsing System.Properties(-D) to set the specified config path\n-Dskywalking_config=/path/to/agent.config /path/to/agent.config is the absolute path of the specified config file\n","title":"Locate agent config file by system property","url":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/specified-agent-config/"},{"content":"Locate agent config file by system property Supported version 5.0.0-RC+\nWhat is Locate agent config file by system property ? In Default. The agent will try to locate agent.config, which should be in the /config dictionary of agent package. If User sets the specified agent config file through system properties, The agent will try to load file from there. By the way, This function has no conflict with Setting Override\nOverride priority The specified agent config \u0026gt; The default agent config\nHow to use The content formats of the specified config must be same as the default config.\nUsing System.Properties(-D) to set the specified config path\n-Dskywalking_config=/path/to/agent.config /path/to/agent.config is the absolute path of the specified config file\n","title":"Locate agent config file by system property","url":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/specified-agent-config/"},{"content":"Locate agent config file by system property Supported version 5.0.0-RC+\nWhat is Locate agent config file by system property ? In Default. The agent will try to locate agent.config, which should be in the /config dictionary of agent package. If User sets the specified agent config file through system properties, The agent will try to load file from there. By the way, This function has no conflict with Setting Override\nOverride priority The specified agent config \u0026gt; The default agent config\nHow to use The content formats of the specified config must be same as the default config.\nUsing System.Properties(-D) to set the specified config path\n-Dskywalking_config=/path/to/agent.config /path/to/agent.config is the absolute path of the specified config file\n","title":"Locate agent config file by system property","url":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/specified-agent-config/"},{"content":"Locate agent config file by system property Supported version 5.0.0-RC+\nWhat is Locate agent config file by system property ? In Default. The agent will try to locate agent.config, which should be in the /config dictionary of agent package. If User sets the specified agent config file through system properties, The agent will try to load file from there. By the way, This function has no conflict with Setting Override\nOverride priority The specified agent config \u0026gt; The default agent config\nHow to use The content formats of the specified config must be same as the default config.\nUsing System.Properties(-D) to set the specified config path\n-Dskywalking_config=/path/to/agent.config /path/to/agent.config is the absolute path of the specified config file\n","title":"Locate agent config file by system property","url":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/specified-agent-config/"},{"content":"Locate agent config file by system property Supported version 5.0.0-RC+\nWhat is Locate agent config file by system property ? In Default. The agent will try to locate agent.config, which should be in the /config dictionary of agent package. If User sets the specified agent config file through system properties, The agent will try to load file from there. By the way, This function has no conflict with Setting Override\nOverride priority The specified agent config \u0026gt; The default agent config\nHow to use The content formats of the specified config must be same as the default config.\nUsing System.Properties(-D) to set the specified config path\n-Dskywalking_config=/path/to/agent.config /path/to/agent.config is the absolute path of the specified config file\n","title":"Locate agent config file by system property","url":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/specified-agent-config/"},{"content":"Log Analysis Log analyzer of OAP server supports native log data. OAP could use Log Analysis Language to structure log content through parsing, extracting and saving logs. The analyzer also uses Meter Analysis Language Engine for further metrics calculation.\nlog-analyzer:selector:${SW_LOG_ANALYZER:default}default:lalFiles:${SW_LOG_LAL_FILES:default}malFiles:${SW_LOG_MAL_FILES:\u0026#34;\u0026#34;}Read the doc on Log Analysis Language(LAL) for more on log structuring and metrics analysis. The LAL\u0026rsquo;s metrics extracts provide the capabilities to generate new metrics from the raw log text for further calculation.\n","title":"Log Analysis","url":"/docs/main/latest/en/setup/backend/log-analyzer/"},{"content":"Log Analysis Log analyzer of OAP server supports native log data. OAP could use Log Analysis Language to structure log content through parsing, extracting and saving logs. The analyzer also uses Meter Analysis Language Engine for further metrics calculation.\nlog-analyzer:selector:${SW_LOG_ANALYZER:default}default:lalFiles:${SW_LOG_LAL_FILES:default}malFiles:${SW_LOG_MAL_FILES:\u0026#34;\u0026#34;}Read the doc on Log Analysis Language(LAL) for more on log structuring and metrics analysis. The LAL\u0026rsquo;s metrics extracts provide the capabilities to generate new metrics from the raw log text for further calculation.\n","title":"Log Analysis","url":"/docs/main/next/en/setup/backend/log-analyzer/"},{"content":"Log Analysis Log analyzer of OAP server supports native log data. OAP could use Log Analysis Language to structure log content through parsing, extracting and saving logs. The analyzer also uses Meter Analysis Language Engine for further metrics calculation.\nlog-analyzer:selector:${SW_LOG_ANALYZER:default}default:lalFiles:${SW_LOG_LAL_FILES:default}malFiles:${SW_LOG_MAL_FILES:\u0026#34;\u0026#34;}Read the doc on Log Analysis Language(LAL) for more on log structuring and metrics analysis. The LAL\u0026rsquo;s metrics extracts provide the capabilities to generate new metrics from the raw log text for further calculation.\n","title":"Log Analysis","url":"/docs/main/v9.5.0/en/setup/backend/log-analyzer/"},{"content":"Log Analysis Log analyzer of OAP server supports native log data. OAP could use Log Analysis Language to structure log content through parsing, extracting and saving logs. The analyzer also uses Meter Analysis Language Engine for further metrics calculation.\nlog-analyzer:selector:${SW_LOG_ANALYZER:default}default:lalFiles:${SW_LOG_LAL_FILES:default}malFiles:${SW_LOG_MAL_FILES:\u0026#34;\u0026#34;}Read the doc on Log Analysis Language(LAL) for more on log structuring and metrics analysis. The LAL\u0026rsquo;s metrics extracts provide the capabilities to generate new metrics from the raw log text for further calculation.\n","title":"Log Analysis","url":"/docs/main/v9.6.0/en/setup/backend/log-analyzer/"},{"content":"Log Analysis Log analyzer of OAP server supports native log data. OAP could use Log Analysis Language to structure log content through parsing, extracting and saving logs. The analyzer also uses Meter Analysis Language Engine for further metrics calculation.\nlog-analyzer:selector:${SW_LOG_ANALYZER:default}default:lalFiles:${SW_LOG_LAL_FILES:default}malFiles:${SW_LOG_MAL_FILES:\u0026#34;\u0026#34;}Read the doc on Log Analysis Language(LAL) for more on log structuring and metrics analysis. The LAL\u0026rsquo;s metrics extracts provide the capabilities to generate new metrics from the raw log text for further calculation.\n","title":"Log Analysis","url":"/docs/main/v9.7.0/en/setup/backend/log-analyzer/"},{"content":"Log Analysis Language Log Analysis Language (LAL) in SkyWalking is essentially a Domain-Specific Language (DSL) to analyze logs. You can use LAL to parse, extract, and save the logs, as well as collaborate the logs with traces (by extracting the trace ID, segment ID and span ID) and metrics (by generating metrics from the logs and sending them to the meter system).\nThe LAL config files are in YAML format, and are located under directory lal. You can set log-analyzer/default/lalFiles in the application.yml file or set environment variable SW_LOG_LAL_FILES to activate specific LAL config files.\nLayer Layer should be declared in the LAL script to represent the analysis scope of the logs.\nFilter A filter is a group of parser, extractor and sink. Users can use one or more filters to organize their processing logic. Every piece of log will be sent to all filters in an LAL rule. A piece of log sent to the filter is available as property log in the LAL, therefore you can access the log service name via log.service. For all available fields of log, please refer to the protocol definition.\nAll components are executed sequentially in the orders they are declared.\nGlobal Functions Globally available functions may be used them in all components (i.e. parsers, extractors, and sinks) where necessary.\n abort  By default, all components declared are executed no matter what flags (dropped, saved, etc.) have been set. There are cases where you may want the filter chain to stop earlier when specified conditions are met. abort function aborts the remaining filter chain from where it\u0026rsquo;s declared, and all the remaining components won\u0026rsquo;t be executed at all. abort function serves as a fast-fail mechanism in LAL.\nfilter { if (log.service == \u0026#34;TestingService\u0026#34;) { // Don\u0026#39;t waste resources on TestingServices  abort {} // all remaining components won\u0026#39;t be executed at all  } // ... parsers, extractors, sinks } Note that when you put regexp in an if statement, you need to surround the expression with () like regexp(\u0026lt;the expression\u0026gt;), instead of regexp \u0026lt;the expression\u0026gt;.\n tag  tag function provide a convenient way to get the value of a tag key.\nWe can add tags like following:\n[ { \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;TEST_KEY\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;TEST_VALUE\u0026#34; } ] }, \u0026#34;body\u0026#34;:{ ... } ... } ] And we can use this method to get the value of the tag key TEST_KEY.\nfilter { if (tag(\u0026#34;TEST_KEY\u0026#34;) == \u0026#34;TEST_VALUE\u0026#34;) { ... } } Parser Parsers are responsible for parsing the raw logs into structured data in SkyWalking for further processing. There are 3 types of parsers at the moment, namely json, yaml, and text.\nWhen a piece of log is parsed, there is a corresponding property available, called parsed, injected by LAL. Property parsed is typically a map, containing all the fields parsed from the raw logs. For example, if the parser is json / yaml, parsed is a map containing all the key-values in the json / yaml; if the parser is text , parsed is a map containing all the captured groups and their values (for regexp and grok).\nAll parsers share the following options:\n   Option Type Description Default Value     abortOnFailure boolean Whether the filter chain should abort if the parser failed to parse / match the logs true    See examples below.\njson filter { json { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  } } yaml filter { yaml { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  } } text For unstructured logs, there are some text parsers for use.\n regexp  regexp parser uses a regular expression (regexp) to parse the logs. It leverages the captured groups of the regexp, all the captured groups can be used later in the extractors or sinks. regexp returns a boolean indicating whether the log matches the pattern or not.\nfilter { text { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  // this is just a demo pattern  regexp \u0026#34;(?\u0026lt;timestamp\u0026gt;\\\\d{8}) (?\u0026lt;thread\u0026gt;\\\\w+) (?\u0026lt;level\u0026gt;\\\\w+) (?\u0026lt;traceId\u0026gt;\\\\w+) (?\u0026lt;msg\u0026gt;.+)\u0026#34; } extractor { tag level: parsed.level // we add a tag called `level` and its value is parsed.level, captured from the regexp above  traceId parsed.traceId // we also extract the trace id from the parsed result, which will be used to associate the log with the trace  } // ... }  grok (TODO)  We\u0026rsquo;re aware of certain performance issues in the grok Java library, and so we\u0026rsquo;re currently conducting investigations and benchmarking. Contributions are welcome.\nExtractor Extractors aim to extract metadata from the logs. The metadata can be a service name, a service instance name, an endpoint name, or even a trace ID, all of which can be associated with the existing traces and metrics.\n service  service extracts the service name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n instance  instance extracts the service instance name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n endpoint  endpoint extracts the service instance name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n traceId  traceId extracts the trace ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n segmentId  segmentId extracts the segment ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n spanId  spanId extracts the span ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n timestamp  timestamp extracts the timestamp from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\nThe parameter of timestamp can be a millisecond:\nfilter { // ... parser  extractor { timestamp parsed.time as String } } or a datetime string with a specified pattern:\nfilter { // ... parser  extractor { timestamp parsed.time as String, \u0026#34;yyyy-MM-dd HH:mm:ss\u0026#34; } }  layer  layer extracts the layer from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with service.\n tag  tag extracts the tags from the parsed result, and set them into the LogData. The form of this extractor should look something like this: tag key1: value, key2: value2. You may use the properties of parsed as both keys and values.\nimport javax.swing.text.LayeredHighlighter filter { // ... parser  extractor { tag level: parsed.level, (parsed.statusCode): parsed.statusMsg tag anotherKey: \u0026#34;anotherConstantValue\u0026#34; layer \u0026#39;GENERAL\u0026#39; } }  metrics  metrics extracts / generates metrics from the logs, and sends the generated metrics to the meter system. You may configure MAL for further analysis of these metrics. The dedicated MAL config files are under directory log-mal-rules, and you can set log-analyzer/default/malFiles to enable configured files.\n# application.yml# ...log-analyzer:selector:${SW_LOG_ANALYZER:default}default:lalFiles:${SW_LOG_LAL_FILES:my-lal-config}# files are under \u0026#34;lal\u0026#34; directorymalFiles:${SW_LOG_MAL_FILES:my-lal-mal-config, folder1/another-lal-mal-config, folder2/*}# files are under \u0026#34;log-mal-rules\u0026#34; directoryExamples are as follows:\nfilter { // ...  extractor { service parsed.serviceName metrics { name \u0026#34;log_count\u0026#34; timestamp parsed.timestamp labels level: parsed.level, service: parsed.service, instance: parsed.instance value 1 } metrics { name \u0026#34;http_response_time\u0026#34; timestamp parsed.timestamp labels status_code: parsed.statusCode, service: parsed.service, instance: parsed.instance value parsed.duration } } // ... } The extractor above generates a metrics named log_count, with tag key level and value 1. After that, you can configure MAL rules to calculate the log count grouping by logging level like this:\n# ... other configurations of MALmetrics:- name:log_count_debugexp:log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;DEBUG\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT1M\u0026#39;)- name:log_count_errorexp:log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;ERROR\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT1M\u0026#39;)The other metrics generated is http_response_time, so you can configure MAL rules to generate more useful metrics like percentiles.\n# ... other configurations of MALmetrics:- name:response_time_percentileexp:http_response_time.sum([\u0026#39;le\u0026#39;, \u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT5M\u0026#39;).histogram().histogram_percentile([50,70,90,99]) slowSql  slowSql aims to convert LogData to DatabaseSlowStatement. It extracts data from parsed result and save them as DatabaseSlowStatement. SlowSql will not abort or edit logs, you can use other LAL for further processing. SlowSql will reuse service, layer and timestamp of extractor, so it is necessary to use SlowSQL after setting these. We require a log tag \u0026quot;LOG_KIND\u0026quot; = \u0026quot;SLOW_SQL\u0026quot; to make OAP distinguish slow SQL logs from other log reports.\nNote, slow SQL sampling would only flag this SQL in the candidate list. The OAP server would run statistic per service and only persistent the top 50 every 10(controlled by topNReportPeriod: ${SW_CORE_TOPN_REPORT_PERIOD:10}) minutes by default.\nAn example of JSON sent to OAP is as following:\n[ { \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;LOG_KIND\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;SLOW_SQL\u0026#34; } ] }, \u0026#34;layer\u0026#34;:\u0026#34;MYSQL\u0026#34;, \u0026#34;body\u0026#34;:{ \u0026#34;json\u0026#34;:{ \u0026#34;json\u0026#34;:\u0026#34;{\\\u0026#34;time\\\u0026#34;:\\\u0026#34;1663063011\\\u0026#34;,\\\u0026#34;id\\\u0026#34;:\\\u0026#34;cb92c1a5b-2691e-fb2f-457a-9c72a392d9ed\\\u0026#34;,\\\u0026#34;service\\\u0026#34;:\\\u0026#34;root[root]@[localhost]\\\u0026#34;,\\\u0026#34;statement\\\u0026#34;:\\\u0026#34;select sleep(2);\\\u0026#34;,\\\u0026#34;layer\\\u0026#34;:\\\u0026#34;MYSQL\\\u0026#34;,\\\u0026#34;query_time\\\u0026#34;:2000}\u0026#34; } }, \u0026#34;service\u0026#34;:\u0026#34;root[root]@[localhost]\u0026#34; } ]  statement  statement extracts the SQL statement from the parsed result, and set it into the DatabaseSlowStatement, which will be persisted (if not dropped) and is used to associate with TopNDatabaseStatement.\n latency  latency extracts the latency from the parsed result, and set it into the DatabaseSlowStatement, which will be persisted (if not dropped) and is used to associate with TopNDatabaseStatement.\n id  id extracts the id from the parsed result, and set it into the DatabaseSlowStatement, which will be persisted (if not dropped) and is used to associate with TopNDatabaseStatement.\nA Example of LAL to distinguish slow logs:\nfilter { json{ } extractor{ layer parsed.layer as String service parsed.service as String timestamp parsed.time as String if (tag(\u0026#34;LOG_KIND\u0026#34;) == \u0026#34;SLOW_SQL\u0026#34;) { slowSql { id parsed.id as String statement parsed.statement as String latency parsed.query_time as Long } } } }  sampledTrace  sampledTrace aims to convert LogData to SampledTrace Records. It extracts data from parsed result and save them as SampledTraceRecord. SampledTrace will not abort or edit logs, you can use other LAL for further processing. We require a log tag \u0026quot;LOG_KIND\u0026quot; = \u0026quot;NET_PROFILING_SAMPLED_TRACE\u0026quot; to make OAP distinguish slow trace logs from other log reports. An example of JSON sent to OAP is as following:\n[ { \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;LOG_KIND\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;NET_PROFILING_SAMPLED_TRACE\u0026#34; } ] }, \u0026#34;layer\u0026#34;:\u0026#34;MESH\u0026#34;, \u0026#34;body\u0026#34;:{ \u0026#34;json\u0026#34;:{ \u0026#34;json\u0026#34;:\u0026#34;{\\\u0026#34;uri\\\u0026#34;:\\\u0026#34;/provider\\\u0026#34;,\\\u0026#34;reason\\\u0026#34;:\\\u0026#34;slow\\\u0026#34;,\\\u0026#34;latency\\\u0026#34;:2048,\\\u0026#34;client_process\\\u0026#34;:{\\\u0026#34;process_id\\\u0026#34;:\\\u0026#34;c1519f4555ec11eda8df0242ac1d0002\\\u0026#34;,\\\u0026#34;local\\\u0026#34;:false,\\\u0026#34;address\\\u0026#34;:\\\u0026#34;\\\u0026#34;},\\\u0026#34;server_process\\\u0026#34;:{\\\u0026#34;process_id\\\u0026#34;:\\\u0026#34;\\\u0026#34;,\\\u0026#34;local\\\u0026#34;:false,\\\u0026#34;address\\\u0026#34;:\\\u0026#34;172.31.0.3:443\\\u0026#34;},\\\u0026#34;detect_point\\\u0026#34;:\\\u0026#34;client\\\u0026#34;,\\\u0026#34;component\\\u0026#34;:\\\u0026#34;http\\\u0026#34;,\\\u0026#34;ssl\\\u0026#34;:true}\u0026#34; } }, \u0026#34;service\u0026#34;:\u0026#34;test-service\u0026#34;, \u0026#34;serviceInstance\u0026#34;:\u0026#34;test-service-instance\u0026#34;, \u0026#34;timestamp\u0026#34;: 1666916962406, } ] Examples are as follows:\nfilter { json { } if (tag(\u0026#34;LOG_KIND\u0026#34;) == \u0026#34;NET_PROFILING_SAMPLED_TRACE\u0026#34;) { sampledTrace { latency parsed.latency as Long uri parsed.uri as String reason parsed.reason as String if (parsed.client_process.process_id as String != \u0026#34;\u0026#34;) { processId parsed.client_process.process_id as String } else if (parsed.client_process.local as Boolean) { processId ProcessRegistry.generateVirtualLocalProcess(parsed.service as String, parsed.serviceInstance as String) as String } else { processId ProcessRegistry.generateVirtualRemoteProcess(parsed.service as String, parsed.serviceInstance as String, parsed.client_process.address as String) as String } if (parsed.server_process.process_id as String != \u0026#34;\u0026#34;) { destProcessId parsed.server_process.process_id as String } else if (parsed.server_process.local as Boolean) { destProcessId ProcessRegistry.generateVirtualLocalProcess(parsed.service as String, parsed.serviceInstance as String) as String } else { destProcessId ProcessRegistry.generateVirtualRemoteProcess(parsed.service as String, parsed.serviceInstance as String, parsed.server_process.address as String) as String } detectPoint parsed.detect_point as String if (parsed.component as String == \u0026#34;http\u0026#34; \u0026amp;\u0026amp; parsed.ssl as Boolean) { componentId 129 } else if (parsed.component as String == \u0026#34;http\u0026#34;) { componentId 49 } else if (parsed.ssl as Boolean) { componentId 130 } else { componentId 110 } } } } Sink Sinks are the persistent layer of the LAL. By default, all the logs of each filter are persisted into the storage. However, some mechanisms allow you to selectively save some logs, or even drop all the logs after you\u0026rsquo;ve extracted useful information, such as metrics.\nSampler Sampler allows you to save the logs in a sampling manner. Currently, the following sampling strategies are supported:\n rateLimit: samples n logs at a maximum rate of 1 minute. rateLimit(\u0026quot;SamplerID\u0026quot;) requires an ID for the sampler. Sampler declarations with the same ID share the same sampler instance, thus sharing the same rpm and resetting logic. possibility: every piece of log has a pseudo possibility of percentage to be sampled, the possibility was generated by Java random number generator and compare to the given percentage option.  We welcome contributions on more sampling strategies. If multiple samplers are specified, the last one determines the final sampling result. See examples in Enforcer.\nExamples 1, rateLimit:\nfilter { // ... parser  sink { sampler { if (parsed.service == \u0026#34;ImportantApp\u0026#34;) { rateLimit(\u0026#34;ImportantAppSampler\u0026#34;) { rpm 1800 // samples 1800 pieces of logs every minute for service \u0026#34;ImportantApp\u0026#34;  } } else { rateLimit(\u0026#34;OtherSampler\u0026#34;) { rpm 180 // samples 180 pieces of logs every minute for other services than \u0026#34;ImportantApp\u0026#34;  } } } } } Examples 2, possibility:\nfilter { // ... parser  sink { sampler { if (parsed.service == \u0026#34;ImportantApp\u0026#34;) { possibility(80) { // samples 80% of the logs for service \u0026#34;ImportantApp\u0026#34;  } } else { possibility(30) { // samples 30% of the logs for other services than \u0026#34;ImportantApp\u0026#34;  } } } } } Dropper Dropper is a special sink, meaning that all logs are dropped without any exception. This is useful when you want to drop debugging logs.\nfilter { // ... parser  sink { if (parsed.level == \u0026#34;DEBUG\u0026#34;) { dropper {} } else { sampler { // ... configs  } } } } Or if you have multiple filters, some of which are for extracting metrics, only one of them has to be persisted.\nfilter { // filter A: this is for persistence  // ... parser  sink { sampler { // .. sampler configs  } } } filter { // filter B:  // ... extractors to generate many metrics  extractors { metrics { // ... metrics  } } sink { dropper {} // drop all logs because they have been saved in \u0026#34;filter A\u0026#34; above.  } } Enforcer Enforcer is another special sink that forcibly samples the log. A typical use case of enforcer is when you have configured a sampler and want to save some logs forcibly, such as to save error logs even if the sampling mechanism has been configured.\nfilter { // ... parser  sink { sampler { // ... sampler configs  } if (parsed.level == \u0026#34;ERROR\u0026#34; || parsed.userId == \u0026#34;TestingUserId\u0026#34;) { // sample error logs or testing users\u0026#39; logs (userId == \u0026#34;TestingUserId\u0026#34;) even if the sampling strategy is configured  enforcer { } } } } ","title":"Log Analysis Language","url":"/docs/main/latest/en/concepts-and-designs/lal/"},{"content":"Log Analysis Language Log Analysis Language (LAL) in SkyWalking is essentially a Domain-Specific Language (DSL) to analyze logs. You can use LAL to parse, extract, and save the logs, as well as collaborate the logs with traces (by extracting the trace ID, segment ID and span ID) and metrics (by generating metrics from the logs and sending them to the meter system).\nThe LAL config files are in YAML format, and are located under directory lal. You can set log-analyzer/default/lalFiles in the application.yml file or set environment variable SW_LOG_LAL_FILES to activate specific LAL config files.\nLayer Layer should be declared in the LAL script to represent the analysis scope of the logs.\nFilter A filter is a group of parser, extractor and sink. Users can use one or more filters to organize their processing logic. Every piece of log will be sent to all filters in an LAL rule. A piece of log sent to the filter is available as property log in the LAL, therefore you can access the log service name via log.service. For all available fields of log, please refer to the protocol definition.\nAll components are executed sequentially in the orders they are declared.\nGlobal Functions Globally available functions may be used them in all components (i.e. parsers, extractors, and sinks) where necessary.\n abort  By default, all components declared are executed no matter what flags (dropped, saved, etc.) have been set. There are cases where you may want the filter chain to stop earlier when specified conditions are met. abort function aborts the remaining filter chain from where it\u0026rsquo;s declared, and all the remaining components won\u0026rsquo;t be executed at all. abort function serves as a fast-fail mechanism in LAL.\nfilter { if (log.service == \u0026#34;TestingService\u0026#34;) { // Don\u0026#39;t waste resources on TestingServices  abort {} // all remaining components won\u0026#39;t be executed at all  } // ... parsers, extractors, sinks } Note that when you put regexp in an if statement, you need to surround the expression with () like regexp(\u0026lt;the expression\u0026gt;), instead of regexp \u0026lt;the expression\u0026gt;.\n tag  tag function provide a convenient way to get the value of a tag key.\nWe can add tags like following:\n[ { \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;TEST_KEY\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;TEST_VALUE\u0026#34; } ] }, \u0026#34;body\u0026#34;:{ ... } ... } ] And we can use this method to get the value of the tag key TEST_KEY.\nfilter { if (tag(\u0026#34;TEST_KEY\u0026#34;) == \u0026#34;TEST_VALUE\u0026#34;) { ... } } Parser Parsers are responsible for parsing the raw logs into structured data in SkyWalking for further processing. There are 3 types of parsers at the moment, namely json, yaml, and text.\nWhen a piece of log is parsed, there is a corresponding property available, called parsed, injected by LAL. Property parsed is typically a map, containing all the fields parsed from the raw logs. For example, if the parser is json / yaml, parsed is a map containing all the key-values in the json / yaml; if the parser is text , parsed is a map containing all the captured groups and their values (for regexp and grok).\nAll parsers share the following options:\n   Option Type Description Default Value     abortOnFailure boolean Whether the filter chain should abort if the parser failed to parse / match the logs true    See examples below.\njson filter { json { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  } } yaml filter { yaml { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  } } text For unstructured logs, there are some text parsers for use.\n regexp  regexp parser uses a regular expression (regexp) to parse the logs. It leverages the captured groups of the regexp, all the captured groups can be used later in the extractors or sinks. regexp returns a boolean indicating whether the log matches the pattern or not.\nfilter { text { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  // this is just a demo pattern  regexp \u0026#34;(?\u0026lt;timestamp\u0026gt;\\\\d{8}) (?\u0026lt;thread\u0026gt;\\\\w+) (?\u0026lt;level\u0026gt;\\\\w+) (?\u0026lt;traceId\u0026gt;\\\\w+) (?\u0026lt;msg\u0026gt;.+)\u0026#34; } extractor { tag level: parsed.level // we add a tag called `level` and its value is parsed.level, captured from the regexp above  traceId parsed.traceId // we also extract the trace id from the parsed result, which will be used to associate the log with the trace  } // ... }  grok (TODO)  We\u0026rsquo;re aware of certain performance issues in the grok Java library, and so we\u0026rsquo;re currently conducting investigations and benchmarking. Contributions are welcome.\nExtractor Extractors aim to extract metadata from the logs. The metadata can be a service name, a service instance name, an endpoint name, or even a trace ID, all of which can be associated with the existing traces and metrics.\n service  service extracts the service name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n instance  instance extracts the service instance name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n endpoint  endpoint extracts the service instance name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n traceId  traceId extracts the trace ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n segmentId  segmentId extracts the segment ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n spanId  spanId extracts the span ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n timestamp  timestamp extracts the timestamp from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\nThe parameter of timestamp can be a millisecond:\nfilter { // ... parser  extractor { timestamp parsed.time as String } } or a datetime string with a specified pattern:\nfilter { // ... parser  extractor { timestamp parsed.time as String, \u0026#34;yyyy-MM-dd HH:mm:ss\u0026#34; } }  layer  layer extracts the layer from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with service.\n tag  tag extracts the tags from the parsed result, and set them into the LogData. The form of this extractor should look something like this: tag key1: value, key2: value2. You may use the properties of parsed as both keys and values.\nimport javax.swing.text.LayeredHighlighter filter { // ... parser  extractor { tag level: parsed.level, (parsed.statusCode): parsed.statusMsg tag anotherKey: \u0026#34;anotherConstantValue\u0026#34; layer \u0026#39;GENERAL\u0026#39; } }  metrics  metrics extracts / generates metrics from the logs, and sends the generated metrics to the meter system. You may configure MAL for further analysis of these metrics. The dedicated MAL config files are under directory log-mal-rules, and you can set log-analyzer/default/malFiles to enable configured files.\n# application.yml# ...log-analyzer:selector:${SW_LOG_ANALYZER:default}default:lalFiles:${SW_LOG_LAL_FILES:my-lal-config}# files are under \u0026#34;lal\u0026#34; directorymalFiles:${SW_LOG_MAL_FILES:my-lal-mal-config, folder1/another-lal-mal-config, folder2/*}# files are under \u0026#34;log-mal-rules\u0026#34; directoryExamples are as follows:\nfilter { // ...  extractor { service parsed.serviceName metrics { name \u0026#34;log_count\u0026#34; timestamp parsed.timestamp labels level: parsed.level, service: parsed.service, instance: parsed.instance value 1 } metrics { name \u0026#34;http_response_time\u0026#34; timestamp parsed.timestamp labels status_code: parsed.statusCode, service: parsed.service, instance: parsed.instance value parsed.duration } } // ... } The extractor above generates a metrics named log_count, with tag key level and value 1. After that, you can configure MAL rules to calculate the log count grouping by logging level like this:\n# ... other configurations of MALmetrics:- name:log_count_debugexp:log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;DEBUG\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT1M\u0026#39;)- name:log_count_errorexp:log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;ERROR\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT1M\u0026#39;)The other metrics generated is http_response_time, so you can configure MAL rules to generate more useful metrics like percentiles.\n# ... other configurations of MALmetrics:- name:response_time_percentileexp:http_response_time.sum([\u0026#39;le\u0026#39;, \u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT5M\u0026#39;).histogram().histogram_percentile([50,70,90,99]) slowSql  slowSql aims to convert LogData to DatabaseSlowStatement. It extracts data from parsed result and save them as DatabaseSlowStatement. SlowSql will not abort or edit logs, you can use other LAL for further processing. SlowSql will reuse service, layer and timestamp of extractor, so it is necessary to use SlowSQL after setting these. We require a log tag \u0026quot;LOG_KIND\u0026quot; = \u0026quot;SLOW_SQL\u0026quot; to make OAP distinguish slow SQL logs from other log reports.\nNote, slow SQL sampling would only flag this SQL in the candidate list. The OAP server would run statistic per service and only persistent the top 50 every 10(controlled by topNReportPeriod: ${SW_CORE_TOPN_REPORT_PERIOD:10}) minutes by default.\nAn example of JSON sent to OAP is as following:\n[ { \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;LOG_KIND\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;SLOW_SQL\u0026#34; } ] }, \u0026#34;layer\u0026#34;:\u0026#34;MYSQL\u0026#34;, \u0026#34;body\u0026#34;:{ \u0026#34;json\u0026#34;:{ \u0026#34;json\u0026#34;:\u0026#34;{\\\u0026#34;time\\\u0026#34;:\\\u0026#34;1663063011\\\u0026#34;,\\\u0026#34;id\\\u0026#34;:\\\u0026#34;cb92c1a5b-2691e-fb2f-457a-9c72a392d9ed\\\u0026#34;,\\\u0026#34;service\\\u0026#34;:\\\u0026#34;root[root]@[localhost]\\\u0026#34;,\\\u0026#34;statement\\\u0026#34;:\\\u0026#34;select sleep(2);\\\u0026#34;,\\\u0026#34;layer\\\u0026#34;:\\\u0026#34;MYSQL\\\u0026#34;,\\\u0026#34;query_time\\\u0026#34;:2000}\u0026#34; } }, \u0026#34;service\u0026#34;:\u0026#34;root[root]@[localhost]\u0026#34; } ]  statement  statement extracts the SQL statement from the parsed result, and set it into the DatabaseSlowStatement, which will be persisted (if not dropped) and is used to associate with TopNDatabaseStatement.\n latency  latency extracts the latency from the parsed result, and set it into the DatabaseSlowStatement, which will be persisted (if not dropped) and is used to associate with TopNDatabaseStatement.\n id  id extracts the id from the parsed result, and set it into the DatabaseSlowStatement, which will be persisted (if not dropped) and is used to associate with TopNDatabaseStatement.\nA Example of LAL to distinguish slow logs:\nfilter { json{ } extractor{ layer parsed.layer as String service parsed.service as String timestamp parsed.time as String if (tag(\u0026#34;LOG_KIND\u0026#34;) == \u0026#34;SLOW_SQL\u0026#34;) { slowSql { id parsed.id as String statement parsed.statement as String latency parsed.query_time as Long } } } }  sampledTrace  sampledTrace aims to convert LogData to SampledTrace Records. It extracts data from parsed result and save them as SampledTraceRecord. SampledTrace will not abort or edit logs, you can use other LAL for further processing. We require a log tag \u0026quot;LOG_KIND\u0026quot; = \u0026quot;NET_PROFILING_SAMPLED_TRACE\u0026quot; to make OAP distinguish slow trace logs from other log reports. An example of JSON sent to OAP is as following:\n[ { \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;LOG_KIND\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;NET_PROFILING_SAMPLED_TRACE\u0026#34; } ] }, \u0026#34;layer\u0026#34;:\u0026#34;MESH\u0026#34;, \u0026#34;body\u0026#34;:{ \u0026#34;json\u0026#34;:{ \u0026#34;json\u0026#34;:\u0026#34;{\\\u0026#34;uri\\\u0026#34;:\\\u0026#34;/provider\\\u0026#34;,\\\u0026#34;reason\\\u0026#34;:\\\u0026#34;slow\\\u0026#34;,\\\u0026#34;latency\\\u0026#34;:2048,\\\u0026#34;client_process\\\u0026#34;:{\\\u0026#34;process_id\\\u0026#34;:\\\u0026#34;c1519f4555ec11eda8df0242ac1d0002\\\u0026#34;,\\\u0026#34;local\\\u0026#34;:false,\\\u0026#34;address\\\u0026#34;:\\\u0026#34;\\\u0026#34;},\\\u0026#34;server_process\\\u0026#34;:{\\\u0026#34;process_id\\\u0026#34;:\\\u0026#34;\\\u0026#34;,\\\u0026#34;local\\\u0026#34;:false,\\\u0026#34;address\\\u0026#34;:\\\u0026#34;172.31.0.3:443\\\u0026#34;},\\\u0026#34;detect_point\\\u0026#34;:\\\u0026#34;client\\\u0026#34;,\\\u0026#34;component\\\u0026#34;:\\\u0026#34;http\\\u0026#34;,\\\u0026#34;ssl\\\u0026#34;:true}\u0026#34; } }, \u0026#34;service\u0026#34;:\u0026#34;test-service\u0026#34;, \u0026#34;serviceInstance\u0026#34;:\u0026#34;test-service-instance\u0026#34;, \u0026#34;timestamp\u0026#34;: 1666916962406, } ] Examples are as follows:\nfilter { json { } if (tag(\u0026#34;LOG_KIND\u0026#34;) == \u0026#34;NET_PROFILING_SAMPLED_TRACE\u0026#34;) { sampledTrace { latency parsed.latency as Long uri parsed.uri as String reason parsed.reason as String if (parsed.client_process.process_id as String != \u0026#34;\u0026#34;) { processId parsed.client_process.process_id as String } else if (parsed.client_process.local as Boolean) { processId ProcessRegistry.generateVirtualLocalProcess(parsed.service as String, parsed.serviceInstance as String) as String } else { processId ProcessRegistry.generateVirtualRemoteProcess(parsed.service as String, parsed.serviceInstance as String, parsed.client_process.address as String) as String } if (parsed.server_process.process_id as String != \u0026#34;\u0026#34;) { destProcessId parsed.server_process.process_id as String } else if (parsed.server_process.local as Boolean) { destProcessId ProcessRegistry.generateVirtualLocalProcess(parsed.service as String, parsed.serviceInstance as String) as String } else { destProcessId ProcessRegistry.generateVirtualRemoteProcess(parsed.service as String, parsed.serviceInstance as String, parsed.server_process.address as String) as String } detectPoint parsed.detect_point as String if (parsed.component as String == \u0026#34;http\u0026#34; \u0026amp;\u0026amp; parsed.ssl as Boolean) { componentId 129 } else if (parsed.component as String == \u0026#34;http\u0026#34;) { componentId 49 } else if (parsed.ssl as Boolean) { componentId 130 } else { componentId 110 } } } } Sink Sinks are the persistent layer of the LAL. By default, all the logs of each filter are persisted into the storage. However, some mechanisms allow you to selectively save some logs, or even drop all the logs after you\u0026rsquo;ve extracted useful information, such as metrics.\nSampler Sampler allows you to save the logs in a sampling manner. Currently, the following sampling strategies are supported:\n rateLimit: samples n logs at a maximum rate of 1 minute. rateLimit(\u0026quot;SamplerID\u0026quot;) requires an ID for the sampler. Sampler declarations with the same ID share the same sampler instance, thus sharing the same rpm and resetting logic. possibility: every piece of log has a pseudo possibility of percentage to be sampled, the possibility was generated by Java random number generator and compare to the given percentage option.  We welcome contributions on more sampling strategies. If multiple samplers are specified, the last one determines the final sampling result. See examples in Enforcer.\nExamples 1, rateLimit:\nfilter { // ... parser  sink { sampler { if (parsed.service == \u0026#34;ImportantApp\u0026#34;) { rateLimit(\u0026#34;ImportantAppSampler\u0026#34;) { rpm 1800 // samples 1800 pieces of logs every minute for service \u0026#34;ImportantApp\u0026#34;  } } else { rateLimit(\u0026#34;OtherSampler\u0026#34;) { rpm 180 // samples 180 pieces of logs every minute for other services than \u0026#34;ImportantApp\u0026#34;  } } } } } Examples 2, possibility:\nfilter { // ... parser  sink { sampler { if (parsed.service == \u0026#34;ImportantApp\u0026#34;) { possibility(80) { // samples 80% of the logs for service \u0026#34;ImportantApp\u0026#34;  } } else { possibility(30) { // samples 30% of the logs for other services than \u0026#34;ImportantApp\u0026#34;  } } } } } Dropper Dropper is a special sink, meaning that all logs are dropped without any exception. This is useful when you want to drop debugging logs.\nfilter { // ... parser  sink { if (parsed.level == \u0026#34;DEBUG\u0026#34;) { dropper {} } else { sampler { // ... configs  } } } } Or if you have multiple filters, some of which are for extracting metrics, only one of them has to be persisted.\nfilter { // filter A: this is for persistence  // ... parser  sink { sampler { // .. sampler configs  } } } filter { // filter B:  // ... extractors to generate many metrics  extractors { metrics { // ... metrics  } } sink { dropper {} // drop all logs because they have been saved in \u0026#34;filter A\u0026#34; above.  } } Enforcer Enforcer is another special sink that forcibly samples the log. A typical use case of enforcer is when you have configured a sampler and want to save some logs forcibly, such as to save error logs even if the sampling mechanism has been configured.\nfilter { // ... parser  sink { sampler { // ... sampler configs  } if (parsed.level == \u0026#34;ERROR\u0026#34; || parsed.userId == \u0026#34;TestingUserId\u0026#34;) { // sample error logs or testing users\u0026#39; logs (userId == \u0026#34;TestingUserId\u0026#34;) even if the sampling strategy is configured  enforcer { } } } } ","title":"Log Analysis Language","url":"/docs/main/next/en/concepts-and-designs/lal/"},{"content":"Log Analysis Language Log Analysis Language (LAL) in SkyWalking is essentially a Domain-Specific Language (DSL) to analyze logs. You can use LAL to parse, extract, and save the logs, as well as collaborate the logs with traces (by extracting the trace ID, segment ID and span ID) and metrics (by generating metrics from the logs and sending them to the meter system).\nThe LAL config files are in YAML format, and are located under directory lal. You can set log-analyzer/default/lalFiles in the application.yml file or set environment variable SW_LOG_LAL_FILES to activate specific LAL config files.\nFilter A filter is a group of parser, extractor and sink. Users can use one or more filters to organize their processing logic. Every piece of log will be sent to all filters in an LAL rule. A piece of log sent to the filter is available as property log in the LAL, therefore you can access the log service name via log.service. For all available fields of log, please refer to the protocol definition.\nAll components are executed sequentially in the orders they are declared.\nGlobal Functions Globally available functions may be used them in all components (i.e. parsers, extractors, and sinks) where necessary.\n abort  By default, all components declared are executed no matter what flags (dropped, saved, etc.) have been set. There are cases where you may want the filter chain to stop earlier when specified conditions are met. abort function aborts the remaining filter chain from where it\u0026rsquo;s declared, and all the remaining components won\u0026rsquo;t be executed at all. abort function serves as a fast-fail mechanism in LAL.\nfilter { if (log.service == \u0026#34;TestingService\u0026#34;) { // Don\u0026#39;t waste resources on TestingServices  abort {} // all remaining components won\u0026#39;t be executed at all  } // ... parsers, extractors, sinks } Note that when you put regexp in an if statement, you need to surround the expression with () like regexp(\u0026lt;the expression\u0026gt;), instead of regexp \u0026lt;the expression\u0026gt;.\nParser Parsers are responsible for parsing the raw logs into structured data in SkyWalking for further processing. There are 3 types of parsers at the moment, namely json, yaml, and text.\nWhen a piece of log is parsed, there is a corresponding property available, called parsed, injected by LAL. Property parsed is typically a map, containing all the fields parsed from the raw logs. For example, if the parser is json / yaml, parsed is a map containing all the key-values in the json / yaml; if the parser is text , parsed is a map containing all the captured groups and their values (for regexp and grok).\nAll parsers share the following options:\n   Option Type Description Default Value     abortOnFailure boolean Whether the filter chain should abort if the parser failed to parse / match the logs true    See examples below.\njson filter { json { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  } } yaml filter { yaml { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  } } text For unstructured logs, there are some text parsers for use.\n regexp  regexp parser uses a regular expression (regexp) to parse the logs. It leverages the captured groups of the regexp, all the captured groups can be used later in the extractors or sinks. regexp returns a boolean indicating whether the log matches the pattern or not.\nfilter { text { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  // this is just a demo pattern  regexp \u0026#34;(?\u0026lt;timestamp\u0026gt;\\\\d{8}) (?\u0026lt;thread\u0026gt;\\\\w+) (?\u0026lt;level\u0026gt;\\\\w+) (?\u0026lt;traceId\u0026gt;\\\\w+) (?\u0026lt;msg\u0026gt;.+)\u0026#34; } extractor { tag level: parsed.level // we add a tag called `level` and its value is parsed.level, captured from the regexp above  traceId parsed.traceId // we also extract the trace id from the parsed result, which will be used to associate the log with the trace  } // ... }  grok (TODO)  We\u0026rsquo;re aware of certains performance issues in the grok Java library, and so we\u0026rsquo;re currently conducting investigations and benchmarking. Contributions are welcome.\nExtractor Extractors aim to extract metadata from the logs. The metadata can be a service name, a service instance name, an endpoint name, or even a trace ID, all of which can be associated with the existing traces and metrics.\n service  service extracts the service name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n instance  instance extracts the service instance name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n endpoint  endpoint extracts the service instance name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n traceId  traceId extracts the trace ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n segmentId  segmentId extracts the segment ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n spanId  spanId extracts the span ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n timestamp  timestamp extracts the timestamp from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\nThe unit of timestamp is millisecond.\n layer  layer extracts the layer from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with service / instance.\n tag  tag extracts the tags from the parsed result, and set them into the LogData. The form of this extractor should look something like this: tag key1: value, key2: value2. You may use the properties of parsed as both keys and values.\nimport javax.swing.text.LayeredHighlighter filter { // ... parser  extractor { tag level: parsed.level, (parsed.statusCode): parsed.statusMsg tag anotherKey: \u0026#34;anotherConstantValue\u0026#34; layer \u0026#39;GENERAL\u0026#39; } }  metrics  metrics extracts / generates metrics from the logs, and sends the generated metrics to the meter system. You may configure MAL for further analysis of these metrics. The dedicated MAL config files are under directory log-mal-rules, and you can set log-analyzer/default/malFiles to enable configured files.\n# application.yml# ...log-analyzer:selector:${SW_LOG_ANALYZER:default}default:lalFiles:${SW_LOG_LAL_FILES:my-lal-config}# files are under \u0026#34;lal\u0026#34; directorymalFiles:${SW_LOG_MAL_FILES:my-lal-mal-config,another-lal-mal-config}# files are under \u0026#34;log-mal-rules\u0026#34; directoryExamples are as follows:\nfilter { // ...  extractor { service parsed.serviceName metrics { name \u0026#34;log_count\u0026#34; timestamp parsed.timestamp labels level: parsed.level, service: parsed.service, instance: parsed.instance value 1 } metrics { name \u0026#34;http_response_time\u0026#34; timestamp parsed.timestamp labels status_code: parsed.statusCode, service: parsed.service, instance: parsed.instance value parsed.duration } } // ... } The extractor above generates a metrics named log_count, with tag key level and value 1. After that, you can configure MAL rules to calculate the log count grouping by logging level like this:\n# ... other configurations of MALmetrics:- name:log_count_debugexp:log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;DEBUG\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT1M\u0026#39;)- name:log_count_errorexp:log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;ERROR\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT1M\u0026#39;)The other metrics generated is http_response_time, so you can configure MAL rules to generate more useful metrics like percentiles.\n# ... other configurations of MALmetrics:- name:response_time_percentileexp:http_response_time.sum([\u0026#39;le\u0026#39;, \u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT5M\u0026#39;).histogram().histogram_percentile([50,70,90,99])Sink Sinks are the persistent layer of the LAL. By default, all the logs of each filter are persisted into the storage. However, some mechanisms allow you to selectively save some logs, or even drop all the logs after you\u0026rsquo;ve extracted useful information, such as metrics.\nSampler Sampler allows you to save the logs in a sampling manner. Currently, the following sampling strategies are supported:\n rateLimit: samples n logs at a maximum rate of 1 minute. rateLimit(\u0026quot;SamplerID\u0026quot;) requires an ID for the sampler. Sampler declarations with the same ID share the same sampler instance, thus sharing the same rpm and resetting logic. possibility: every piece of log has a pseudo possibility of percentage to be sampled, the possibility was generated by Java random number generator and compare to the given percentage option.  We welcome contributions on more sampling strategies. If multiple samplers are specified, the last one determines the final sampling result. See examples in Enforcer.\nExamples 1, rateLimit:\nfilter { // ... parser  sink { sampler { if (parsed.service == \u0026#34;ImportantApp\u0026#34;) { rateLimit(\u0026#34;ImportantAppSampler\u0026#34;) { rpm 1800 // samples 1800 pieces of logs every minute for service \u0026#34;ImportantApp\u0026#34;  } } else { rateLimit(\u0026#34;OtherSampler\u0026#34;) { rpm 180 // samples 180 pieces of logs every minute for other services than \u0026#34;ImportantApp\u0026#34;  } } } } } Examples 2, possibility:\nfilter { // ... parser  sink { sampler { if (parsed.service == \u0026#34;ImportantApp\u0026#34;) { possibility(80) { // samples 80% of the logs for service \u0026#34;ImportantApp\u0026#34;  } } else { possibility(30) { // samples 30% of the logs for other services than \u0026#34;ImportantApp\u0026#34;  } } } } } Dropper Dropper is a special sink, meaning that all logs are dropped without any exception. This is useful when you want to drop debugging logs.\nfilter { // ... parser  sink { if (parsed.level == \u0026#34;DEBUG\u0026#34;) { dropper {} } else { sampler { // ... configs  } } } } Or if you have multiple filters, some of which are for extracting metrics, only one of them has to be persisted.\nfilter { // filter A: this is for persistence  // ... parser  sink { sampler { // .. sampler configs  } } } filter { // filter B:  // ... extractors to generate many metrics  extractors { metrics { // ... metrics  } } sink { dropper {} // drop all logs because they have been saved in \u0026#34;filter A\u0026#34; above.  } } Enforcer Enforcer is another special sink that forcibly samples the log. A typical use case of enforcer is when you have configured a sampler and want to save some logs forcibly, such as to save error logs even if the sampling mechanism has been configured.\nfilter { // ... parser  sink { sampler { // ... sampler configs  } if (parserd.level == \u0026#34;ERROR\u0026#34; || parsed.userId == \u0026#34;TestingUserId\u0026#34;) { // sample error logs or testing users\u0026#39; logs (userId == \u0026#34;TestingUserId\u0026#34;) even if the sampling strategy is configured  enforcer { } } } } ","title":"Log Analysis Language","url":"/docs/main/v9.0.0/en/concepts-and-designs/lal/"},{"content":"Log Analysis Language Log Analysis Language (LAL) in SkyWalking is essentially a Domain-Specific Language (DSL) to analyze logs. You can use LAL to parse, extract, and save the logs, as well as collaborate the logs with traces (by extracting the trace ID, segment ID and span ID) and metrics (by generating metrics from the logs and sending them to the meter system).\nThe LAL config files are in YAML format, and are located under directory lal. You can set log-analyzer/default/lalFiles in the application.yml file or set environment variable SW_LOG_LAL_FILES to activate specific LAL config files.\nFilter A filter is a group of parser, extractor and sink. Users can use one or more filters to organize their processing logic. Every piece of log will be sent to all filters in an LAL rule. A piece of log sent to the filter is available as property log in the LAL, therefore you can access the log service name via log.service. For all available fields of log, please refer to the protocol definition.\nAll components are executed sequentially in the orders they are declared.\nGlobal Functions Globally available functions may be used them in all components (i.e. parsers, extractors, and sinks) where necessary.\n abort  By default, all components declared are executed no matter what flags (dropped, saved, etc.) have been set. There are cases where you may want the filter chain to stop earlier when specified conditions are met. abort function aborts the remaining filter chain from where it\u0026rsquo;s declared, and all the remaining components won\u0026rsquo;t be executed at all. abort function serves as a fast-fail mechanism in LAL.\nfilter { if (log.service == \u0026#34;TestingService\u0026#34;) { // Don\u0026#39;t waste resources on TestingServices  abort {} // all remaining components won\u0026#39;t be executed at all  } // ... parsers, extractors, sinks } Note that when you put regexp in an if statement, you need to surround the expression with () like regexp(\u0026lt;the expression\u0026gt;), instead of regexp \u0026lt;the expression\u0026gt;.\nParser Parsers are responsible for parsing the raw logs into structured data in SkyWalking for further processing. There are 3 types of parsers at the moment, namely json, yaml, and text.\nWhen a piece of log is parsed, there is a corresponding property available, called parsed, injected by LAL. Property parsed is typically a map, containing all the fields parsed from the raw logs. For example, if the parser is json / yaml, parsed is a map containing all the key-values in the json / yaml; if the parser is text , parsed is a map containing all the captured groups and their values (for regexp and grok).\nAll parsers share the following options:\n   Option Type Description Default Value     abortOnFailure boolean Whether the filter chain should abort if the parser failed to parse / match the logs true    See examples below.\njson filter { json { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  } } yaml filter { yaml { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  } } text For unstructured logs, there are some text parsers for use.\n regexp  regexp parser uses a regular expression (regexp) to parse the logs. It leverages the captured groups of the regexp, all the captured groups can be used later in the extractors or sinks. regexp returns a boolean indicating whether the log matches the pattern or not.\nfilter { text { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  // this is just a demo pattern  regexp \u0026#34;(?\u0026lt;timestamp\u0026gt;\\\\d{8}) (?\u0026lt;thread\u0026gt;\\\\w+) (?\u0026lt;level\u0026gt;\\\\w+) (?\u0026lt;traceId\u0026gt;\\\\w+) (?\u0026lt;msg\u0026gt;.+)\u0026#34; } extractor { tag level: parsed.level // we add a tag called `level` and its value is parsed.level, captured from the regexp above  traceId parsed.traceId // we also extract the trace id from the parsed result, which will be used to associate the log with the trace  } // ... }  grok (TODO)  We\u0026rsquo;re aware of certains performance issues in the grok Java library, and so we\u0026rsquo;re currently conducting investigations and benchmarking. Contributions are welcome.\nExtractor Extractors aim to extract metadata from the logs. The metadata can be a service name, a service instance name, an endpoint name, or even a trace ID, all of which can be associated with the existing traces and metrics.\n service  service extracts the service name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n instance  instance extracts the service instance name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n endpoint  endpoint extracts the service instance name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n traceId  traceId extracts the trace ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n segmentId  segmentId extracts the segment ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n spanId  spanId extracts the span ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n timestamp  timestamp extracts the timestamp from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\nThe unit of timestamp is millisecond.\n layer  layer extracts the layer from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with service.\n tag  tag extracts the tags from the parsed result, and set them into the LogData. The form of this extractor should look something like this: tag key1: value, key2: value2. You may use the properties of parsed as both keys and values.\nimport javax.swing.text.LayeredHighlighter filter { // ... parser  extractor { tag level: parsed.level, (parsed.statusCode): parsed.statusMsg tag anotherKey: \u0026#34;anotherConstantValue\u0026#34; layer \u0026#39;GENERAL\u0026#39; } }  metrics  metrics extracts / generates metrics from the logs, and sends the generated metrics to the meter system. You may configure MAL for further analysis of these metrics. The dedicated MAL config files are under directory log-mal-rules, and you can set log-analyzer/default/malFiles to enable configured files.\n# application.yml# ...log-analyzer:selector:${SW_LOG_ANALYZER:default}default:lalFiles:${SW_LOG_LAL_FILES:my-lal-config}# files are under \u0026#34;lal\u0026#34; directorymalFiles:${SW_LOG_MAL_FILES:my-lal-mal-config,another-lal-mal-config}# files are under \u0026#34;log-mal-rules\u0026#34; directoryExamples are as follows:\nfilter { // ...  extractor { service parsed.serviceName metrics { name \u0026#34;log_count\u0026#34; timestamp parsed.timestamp labels level: parsed.level, service: parsed.service, instance: parsed.instance value 1 } metrics { name \u0026#34;http_response_time\u0026#34; timestamp parsed.timestamp labels status_code: parsed.statusCode, service: parsed.service, instance: parsed.instance value parsed.duration } } // ... } The extractor above generates a metrics named log_count, with tag key level and value 1. After that, you can configure MAL rules to calculate the log count grouping by logging level like this:\n# ... other configurations of MALmetrics:- name:log_count_debugexp:log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;DEBUG\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT1M\u0026#39;)- name:log_count_errorexp:log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;ERROR\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT1M\u0026#39;)The other metrics generated is http_response_time, so you can configure MAL rules to generate more useful metrics like percentiles.\n# ... other configurations of MALmetrics:- name:response_time_percentileexp:http_response_time.sum([\u0026#39;le\u0026#39;, \u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT5M\u0026#39;).histogram().histogram_percentile([50,70,90,99])Sink Sinks are the persistent layer of the LAL. By default, all the logs of each filter are persisted into the storage. However, some mechanisms allow you to selectively save some logs, or even drop all the logs after you\u0026rsquo;ve extracted useful information, such as metrics.\nSampler Sampler allows you to save the logs in a sampling manner. Currently, the following sampling strategies are supported:\n rateLimit: samples n logs at a maximum rate of 1 minute. rateLimit(\u0026quot;SamplerID\u0026quot;) requires an ID for the sampler. Sampler declarations with the same ID share the same sampler instance, thus sharing the same rpm and resetting logic. possibility: every piece of log has a pseudo possibility of percentage to be sampled, the possibility was generated by Java random number generator and compare to the given percentage option.  We welcome contributions on more sampling strategies. If multiple samplers are specified, the last one determines the final sampling result. See examples in Enforcer.\nExamples 1, rateLimit:\nfilter { // ... parser  sink { sampler { if (parsed.service == \u0026#34;ImportantApp\u0026#34;) { rateLimit(\u0026#34;ImportantAppSampler\u0026#34;) { rpm 1800 // samples 1800 pieces of logs every minute for service \u0026#34;ImportantApp\u0026#34;  } } else { rateLimit(\u0026#34;OtherSampler\u0026#34;) { rpm 180 // samples 180 pieces of logs every minute for other services than \u0026#34;ImportantApp\u0026#34;  } } } } } Examples 2, possibility:\nfilter { // ... parser  sink { sampler { if (parsed.service == \u0026#34;ImportantApp\u0026#34;) { possibility(80) { // samples 80% of the logs for service \u0026#34;ImportantApp\u0026#34;  } } else { possibility(30) { // samples 30% of the logs for other services than \u0026#34;ImportantApp\u0026#34;  } } } } } Dropper Dropper is a special sink, meaning that all logs are dropped without any exception. This is useful when you want to drop debugging logs.\nfilter { // ... parser  sink { if (parsed.level == \u0026#34;DEBUG\u0026#34;) { dropper {} } else { sampler { // ... configs  } } } } Or if you have multiple filters, some of which are for extracting metrics, only one of them has to be persisted.\nfilter { // filter A: this is for persistence  // ... parser  sink { sampler { // .. sampler configs  } } } filter { // filter B:  // ... extractors to generate many metrics  extractors { metrics { // ... metrics  } } sink { dropper {} // drop all logs because they have been saved in \u0026#34;filter A\u0026#34; above.  } } Enforcer Enforcer is another special sink that forcibly samples the log. A typical use case of enforcer is when you have configured a sampler and want to save some logs forcibly, such as to save error logs even if the sampling mechanism has been configured.\nfilter { // ... parser  sink { sampler { // ... sampler configs  } if (parserd.level == \u0026#34;ERROR\u0026#34; || parsed.userId == \u0026#34;TestingUserId\u0026#34;) { // sample error logs or testing users\u0026#39; logs (userId == \u0026#34;TestingUserId\u0026#34;) even if the sampling strategy is configured  enforcer { } } } } ","title":"Log Analysis Language","url":"/docs/main/v9.1.0/en/concepts-and-designs/lal/"},{"content":"Log Analysis Language Log Analysis Language (LAL) in SkyWalking is essentially a Domain-Specific Language (DSL) to analyze logs. You can use LAL to parse, extract, and save the logs, as well as collaborate the logs with traces (by extracting the trace ID, segment ID and span ID) and metrics (by generating metrics from the logs and sending them to the meter system).\nThe LAL config files are in YAML format, and are located under directory lal. You can set log-analyzer/default/lalFiles in the application.yml file or set environment variable SW_LOG_LAL_FILES to activate specific LAL config files.\nFilter A filter is a group of parser, extractor and sink. Users can use one or more filters to organize their processing logic. Every piece of log will be sent to all filters in an LAL rule. A piece of log sent to the filter is available as property log in the LAL, therefore you can access the log service name via log.service. For all available fields of log, please refer to the protocol definition.\nAll components are executed sequentially in the orders they are declared.\nGlobal Functions Globally available functions may be used them in all components (i.e. parsers, extractors, and sinks) where necessary.\n abort  By default, all components declared are executed no matter what flags (dropped, saved, etc.) have been set. There are cases where you may want the filter chain to stop earlier when specified conditions are met. abort function aborts the remaining filter chain from where it\u0026rsquo;s declared, and all the remaining components won\u0026rsquo;t be executed at all. abort function serves as a fast-fail mechanism in LAL.\nfilter { if (log.service == \u0026#34;TestingService\u0026#34;) { // Don\u0026#39;t waste resources on TestingServices  abort {} // all remaining components won\u0026#39;t be executed at all  } // ... parsers, extractors, sinks } Note that when you put regexp in an if statement, you need to surround the expression with () like regexp(\u0026lt;the expression\u0026gt;), instead of regexp \u0026lt;the expression\u0026gt;.\nParser Parsers are responsible for parsing the raw logs into structured data in SkyWalking for further processing. There are 3 types of parsers at the moment, namely json, yaml, and text.\nWhen a piece of log is parsed, there is a corresponding property available, called parsed, injected by LAL. Property parsed is typically a map, containing all the fields parsed from the raw logs. For example, if the parser is json / yaml, parsed is a map containing all the key-values in the json / yaml; if the parser is text , parsed is a map containing all the captured groups and their values (for regexp and grok).\nAll parsers share the following options:\n   Option Type Description Default Value     abortOnFailure boolean Whether the filter chain should abort if the parser failed to parse / match the logs true    See examples below.\njson filter { json { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  } } yaml filter { yaml { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  } } text For unstructured logs, there are some text parsers for use.\n regexp  regexp parser uses a regular expression (regexp) to parse the logs. It leverages the captured groups of the regexp, all the captured groups can be used later in the extractors or sinks. regexp returns a boolean indicating whether the log matches the pattern or not.\nfilter { text { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  // this is just a demo pattern  regexp \u0026#34;(?\u0026lt;timestamp\u0026gt;\\\\d{8}) (?\u0026lt;thread\u0026gt;\\\\w+) (?\u0026lt;level\u0026gt;\\\\w+) (?\u0026lt;traceId\u0026gt;\\\\w+) (?\u0026lt;msg\u0026gt;.+)\u0026#34; } extractor { tag level: parsed.level // we add a tag called `level` and its value is parsed.level, captured from the regexp above  traceId parsed.traceId // we also extract the trace id from the parsed result, which will be used to associate the log with the trace  } // ... }  grok (TODO)  We\u0026rsquo;re aware of certain performance issues in the grok Java library, and so we\u0026rsquo;re currently conducting investigations and benchmarking. Contributions are welcome.\nExtractor Extractors aim to extract metadata from the logs. The metadata can be a service name, a service instance name, an endpoint name, or even a trace ID, all of which can be associated with the existing traces and metrics.\n service  service extracts the service name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n instance  instance extracts the service instance name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n endpoint  endpoint extracts the service instance name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n traceId  traceId extracts the trace ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n segmentId  segmentId extracts the segment ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n spanId  spanId extracts the span ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n timestamp  timestamp extracts the timestamp from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\nThe unit of timestamp is millisecond.\n layer  layer extracts the layer from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with service.\n tag  tag extracts the tags from the parsed result, and set them into the LogData. The form of this extractor should look something like this: tag key1: value, key2: value2. You may use the properties of parsed as both keys and values.\nimport javax.swing.text.LayeredHighlighter filter { // ... parser  extractor { tag level: parsed.level, (parsed.statusCode): parsed.statusMsg tag anotherKey: \u0026#34;anotherConstantValue\u0026#34; layer \u0026#39;GENERAL\u0026#39; } }  metrics  metrics extracts / generates metrics from the logs, and sends the generated metrics to the meter system. You may configure MAL for further analysis of these metrics. The dedicated MAL config files are under directory log-mal-rules, and you can set log-analyzer/default/malFiles to enable configured files.\n# application.yml# ...log-analyzer:selector:${SW_LOG_ANALYZER:default}default:lalFiles:${SW_LOG_LAL_FILES:my-lal-config}# files are under \u0026#34;lal\u0026#34; directorymalFiles:${SW_LOG_MAL_FILES:my-lal-mal-config,another-lal-mal-config}# files are under \u0026#34;log-mal-rules\u0026#34; directoryExamples are as follows:\nfilter { // ...  extractor { service parsed.serviceName metrics { name \u0026#34;log_count\u0026#34; timestamp parsed.timestamp labels level: parsed.level, service: parsed.service, instance: parsed.instance value 1 } metrics { name \u0026#34;http_response_time\u0026#34; timestamp parsed.timestamp labels status_code: parsed.statusCode, service: parsed.service, instance: parsed.instance value parsed.duration } } // ... } The extractor above generates a metrics named log_count, with tag key level and value 1. After that, you can configure MAL rules to calculate the log count grouping by logging level like this:\n# ... other configurations of MALmetrics:- name:log_count_debugexp:log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;DEBUG\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT1M\u0026#39;)- name:log_count_errorexp:log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;ERROR\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT1M\u0026#39;)The other metrics generated is http_response_time, so you can configure MAL rules to generate more useful metrics like percentiles.\n# ... other configurations of MALmetrics:- name:response_time_percentileexp:http_response_time.sum([\u0026#39;le\u0026#39;, \u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT5M\u0026#39;).histogram().histogram_percentile([50,70,90,99])Sink Sinks are the persistent layer of the LAL. By default, all the logs of each filter are persisted into the storage. However, some mechanisms allow you to selectively save some logs, or even drop all the logs after you\u0026rsquo;ve extracted useful information, such as metrics.\nSampler Sampler allows you to save the logs in a sampling manner. Currently, the following sampling strategies are supported:\n rateLimit: samples n logs at a maximum rate of 1 minute. rateLimit(\u0026quot;SamplerID\u0026quot;) requires an ID for the sampler. Sampler declarations with the same ID share the same sampler instance, thus sharing the same rpm and resetting logic. possibility: every piece of log has a pseudo possibility of percentage to be sampled, the possibility was generated by Java random number generator and compare to the given percentage option.  We welcome contributions on more sampling strategies. If multiple samplers are specified, the last one determines the final sampling result. See examples in Enforcer.\nExamples 1, rateLimit:\nfilter { // ... parser  sink { sampler { if (parsed.service == \u0026#34;ImportantApp\u0026#34;) { rateLimit(\u0026#34;ImportantAppSampler\u0026#34;) { rpm 1800 // samples 1800 pieces of logs every minute for service \u0026#34;ImportantApp\u0026#34;  } } else { rateLimit(\u0026#34;OtherSampler\u0026#34;) { rpm 180 // samples 180 pieces of logs every minute for other services than \u0026#34;ImportantApp\u0026#34;  } } } } } Examples 2, possibility:\nfilter { // ... parser  sink { sampler { if (parsed.service == \u0026#34;ImportantApp\u0026#34;) { possibility(80) { // samples 80% of the logs for service \u0026#34;ImportantApp\u0026#34;  } } else { possibility(30) { // samples 30% of the logs for other services than \u0026#34;ImportantApp\u0026#34;  } } } } } Dropper Dropper is a special sink, meaning that all logs are dropped without any exception. This is useful when you want to drop debugging logs.\nfilter { // ... parser  sink { if (parsed.level == \u0026#34;DEBUG\u0026#34;) { dropper {} } else { sampler { // ... configs  } } } } Or if you have multiple filters, some of which are for extracting metrics, only one of them has to be persisted.\nfilter { // filter A: this is for persistence  // ... parser  sink { sampler { // .. sampler configs  } } } filter { // filter B:  // ... extractors to generate many metrics  extractors { metrics { // ... metrics  } } sink { dropper {} // drop all logs because they have been saved in \u0026#34;filter A\u0026#34; above.  } } Enforcer Enforcer is another special sink that forcibly samples the log. A typical use case of enforcer is when you have configured a sampler and want to save some logs forcibly, such as to save error logs even if the sampling mechanism has been configured.\nfilter { // ... parser  sink { sampler { // ... sampler configs  } if (parsed.level == \u0026#34;ERROR\u0026#34; || parsed.userId == \u0026#34;TestingUserId\u0026#34;) { // sample error logs or testing users\u0026#39; logs (userId == \u0026#34;TestingUserId\u0026#34;) even if the sampling strategy is configured  enforcer { } } } } ","title":"Log Analysis Language","url":"/docs/main/v9.2.0/en/concepts-and-designs/lal/"},{"content":"Log Analysis Language Log Analysis Language (LAL) in SkyWalking is essentially a Domain-Specific Language (DSL) to analyze logs. You can use LAL to parse, extract, and save the logs, as well as collaborate the logs with traces (by extracting the trace ID, segment ID and span ID) and metrics (by generating metrics from the logs and sending them to the meter system).\nThe LAL config files are in YAML format, and are located under directory lal. You can set log-analyzer/default/lalFiles in the application.yml file or set environment variable SW_LOG_LAL_FILES to activate specific LAL config files.\nLayer Layer should be declared in the LAL script to represent the analysis scope of the logs.\nFilter A filter is a group of parser, extractor and sink. Users can use one or more filters to organize their processing logic. Every piece of log will be sent to all filters in an LAL rule. A piece of log sent to the filter is available as property log in the LAL, therefore you can access the log service name via log.service. For all available fields of log, please refer to the protocol definition.\nAll components are executed sequentially in the orders they are declared.\nGlobal Functions Globally available functions may be used them in all components (i.e. parsers, extractors, and sinks) where necessary.\n abort  By default, all components declared are executed no matter what flags (dropped, saved, etc.) have been set. There are cases where you may want the filter chain to stop earlier when specified conditions are met. abort function aborts the remaining filter chain from where it\u0026rsquo;s declared, and all the remaining components won\u0026rsquo;t be executed at all. abort function serves as a fast-fail mechanism in LAL.\nfilter { if (log.service == \u0026#34;TestingService\u0026#34;) { // Don\u0026#39;t waste resources on TestingServices  abort {} // all remaining components won\u0026#39;t be executed at all  } // ... parsers, extractors, sinks } Note that when you put regexp in an if statement, you need to surround the expression with () like regexp(\u0026lt;the expression\u0026gt;), instead of regexp \u0026lt;the expression\u0026gt;.\n tag  tag function provide a convenient way to get the value of a tag key.\nWe can add tags like following:\n[ { \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;TEST_KEY\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;TEST_VALUE\u0026#34; } ] }, \u0026#34;body\u0026#34;:{ ... } ... } ] And we can use this method to get the value of the tag key TEST_KEY.\nfilter { if (tag(\u0026#34;TEST_KEY\u0026#34;) == \u0026#34;TEST_VALUE\u0026#34;) { ... } } Parser Parsers are responsible for parsing the raw logs into structured data in SkyWalking for further processing. There are 3 types of parsers at the moment, namely json, yaml, and text.\nWhen a piece of log is parsed, there is a corresponding property available, called parsed, injected by LAL. Property parsed is typically a map, containing all the fields parsed from the raw logs. For example, if the parser is json / yaml, parsed is a map containing all the key-values in the json / yaml; if the parser is text , parsed is a map containing all the captured groups and their values (for regexp and grok).\nAll parsers share the following options:\n   Option Type Description Default Value     abortOnFailure boolean Whether the filter chain should abort if the parser failed to parse / match the logs true    See examples below.\njson filter { json { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  } } yaml filter { yaml { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  } } text For unstructured logs, there are some text parsers for use.\n regexp  regexp parser uses a regular expression (regexp) to parse the logs. It leverages the captured groups of the regexp, all the captured groups can be used later in the extractors or sinks. regexp returns a boolean indicating whether the log matches the pattern or not.\nfilter { text { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  // this is just a demo pattern  regexp \u0026#34;(?\u0026lt;timestamp\u0026gt;\\\\d{8}) (?\u0026lt;thread\u0026gt;\\\\w+) (?\u0026lt;level\u0026gt;\\\\w+) (?\u0026lt;traceId\u0026gt;\\\\w+) (?\u0026lt;msg\u0026gt;.+)\u0026#34; } extractor { tag level: parsed.level // we add a tag called `level` and its value is parsed.level, captured from the regexp above  traceId parsed.traceId // we also extract the trace id from the parsed result, which will be used to associate the log with the trace  } // ... }  grok (TODO)  We\u0026rsquo;re aware of certain performance issues in the grok Java library, and so we\u0026rsquo;re currently conducting investigations and benchmarking. Contributions are welcome.\nExtractor Extractors aim to extract metadata from the logs. The metadata can be a service name, a service instance name, an endpoint name, or even a trace ID, all of which can be associated with the existing traces and metrics.\n service  service extracts the service name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n instance  instance extracts the service instance name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n endpoint  endpoint extracts the service instance name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n traceId  traceId extracts the trace ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n segmentId  segmentId extracts the segment ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n spanId  spanId extracts the span ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n timestamp  timestamp extracts the timestamp from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\nThe unit of timestamp is millisecond.\n layer  layer extracts the layer from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with service.\n tag  tag extracts the tags from the parsed result, and set them into the LogData. The form of this extractor should look something like this: tag key1: value, key2: value2. You may use the properties of parsed as both keys and values.\nimport javax.swing.text.LayeredHighlighter filter { // ... parser  extractor { tag level: parsed.level, (parsed.statusCode): parsed.statusMsg tag anotherKey: \u0026#34;anotherConstantValue\u0026#34; layer \u0026#39;GENERAL\u0026#39; } }  metrics  metrics extracts / generates metrics from the logs, and sends the generated metrics to the meter system. You may configure MAL for further analysis of these metrics. The dedicated MAL config files are under directory log-mal-rules, and you can set log-analyzer/default/malFiles to enable configured files.\n# application.yml# ...log-analyzer:selector:${SW_LOG_ANALYZER:default}default:lalFiles:${SW_LOG_LAL_FILES:my-lal-config}# files are under \u0026#34;lal\u0026#34; directorymalFiles:${SW_LOG_MAL_FILES:my-lal-mal-config, folder1/another-lal-mal-config, folder2/*}# files are under \u0026#34;log-mal-rules\u0026#34; directoryExamples are as follows:\nfilter { // ...  extractor { service parsed.serviceName metrics { name \u0026#34;log_count\u0026#34; timestamp parsed.timestamp labels level: parsed.level, service: parsed.service, instance: parsed.instance value 1 } metrics { name \u0026#34;http_response_time\u0026#34; timestamp parsed.timestamp labels status_code: parsed.statusCode, service: parsed.service, instance: parsed.instance value parsed.duration } } // ... } The extractor above generates a metrics named log_count, with tag key level and value 1. After that, you can configure MAL rules to calculate the log count grouping by logging level like this:\n# ... other configurations of MALmetrics:- name:log_count_debugexp:log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;DEBUG\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT1M\u0026#39;)- name:log_count_errorexp:log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;ERROR\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT1M\u0026#39;)The other metrics generated is http_response_time, so you can configure MAL rules to generate more useful metrics like percentiles.\n# ... other configurations of MALmetrics:- name:response_time_percentileexp:http_response_time.sum([\u0026#39;le\u0026#39;, \u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT5M\u0026#39;).histogram().histogram_percentile([50,70,90,99]) slowSql  slowSql aims to convert LogData to DatabaseSlowStatement. It extracts data from parsed result and save them as DatabaseSlowStatement. SlowSql will not abort or edit logs, you can use other LAL for further processing. SlowSql will reuse service, layer and timestamp of extractor, so it is necessary to use SlowSQL after setting these. We require a log tag \u0026quot;LOG_KIND\u0026quot; = \u0026quot;SLOW_SQL\u0026quot; to make OAP distinguish slow SQL logs from other log reports.\nNote, slow SQL sampling would only flag this SQL in the candidate list. The OAP server would run statistic per service and only persistent the top 50 every 10(controlled by topNReportPeriod: ${SW_CORE_TOPN_REPORT_PERIOD:10}) minutes by default.\nAn example of JSON sent to OAP is as following:\n[ { \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;LOG_KIND\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;SLOW_SQL\u0026#34; } ] }, \u0026#34;layer\u0026#34;:\u0026#34;MYSQL\u0026#34;, \u0026#34;body\u0026#34;:{ \u0026#34;json\u0026#34;:{ \u0026#34;json\u0026#34;:\u0026#34;{\\\u0026#34;time\\\u0026#34;:\\\u0026#34;1663063011\\\u0026#34;,\\\u0026#34;id\\\u0026#34;:\\\u0026#34;cb92c1a5b-2691e-fb2f-457a-9c72a392d9ed\\\u0026#34;,\\\u0026#34;service\\\u0026#34;:\\\u0026#34;root[root]@[localhost]\\\u0026#34;,\\\u0026#34;statement\\\u0026#34;:\\\u0026#34;select sleep(2);\\\u0026#34;,\\\u0026#34;layer\\\u0026#34;:\\\u0026#34;MYSQL\\\u0026#34;,\\\u0026#34;query_time\\\u0026#34;:2000}\u0026#34; } }, \u0026#34;service\u0026#34;:\u0026#34;root[root]@[localhost]\u0026#34; } ]  statement  statement extracts the SQL statement from the parsed result, and set it into the DatabaseSlowStatement, which will be persisted (if not dropped) and is used to associate with TopNDatabaseStatement.\n latency  latency extracts the latency from the parsed result, and set it into the DatabaseSlowStatement, which will be persisted (if not dropped) and is used to associate with TopNDatabaseStatement.\n id  id extracts the id from the parsed result, and set it into the DatabaseSlowStatement, which will be persisted (if not dropped) and is used to associate with TopNDatabaseStatement.\nA Example of LAL to distinguish slow logs:\nfilter { json{ } extractor{ layer parsed.layer as String service parsed.service as String timestamp parsed.time as String if (tag(\u0026#34;LOG_KIND\u0026#34;) == \u0026#34;SLOW_SQL\u0026#34;) { slowSql { id parsed.id as String statement parsed.statement as String latency parsed.query_time as Long } } } }  sampledTrace  sampledTrace aims to convert LogData to SampledTrace Records. It extracts data from parsed result and save them as SampledTraceRecord. SampledTrace will not abort or edit logs, you can use other LAL for further processing. We require a log tag \u0026quot;LOG_KIND\u0026quot; = \u0026quot;NET_PROFILING_SAMPLED_TRACE\u0026quot; to make OAP distinguish slow trace logs from other log reports. An example of JSON sent to OAP is as following:\n[ { \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;LOG_KIND\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;NET_PROFILING_SAMPLED_TRACE\u0026#34; } ] }, \u0026#34;layer\u0026#34;:\u0026#34;MESH\u0026#34;, \u0026#34;body\u0026#34;:{ \u0026#34;json\u0026#34;:{ \u0026#34;json\u0026#34;:\u0026#34;{\\\u0026#34;uri\\\u0026#34;:\\\u0026#34;/provider\\\u0026#34;,\\\u0026#34;reason\\\u0026#34;:\\\u0026#34;slow\\\u0026#34;,\\\u0026#34;latency\\\u0026#34;:2048,\\\u0026#34;client_process\\\u0026#34;:{\\\u0026#34;process_id\\\u0026#34;:\\\u0026#34;c1519f4555ec11eda8df0242ac1d0002\\\u0026#34;,\\\u0026#34;local\\\u0026#34;:false,\\\u0026#34;address\\\u0026#34;:\\\u0026#34;\\\u0026#34;},\\\u0026#34;server_process\\\u0026#34;:{\\\u0026#34;process_id\\\u0026#34;:\\\u0026#34;\\\u0026#34;,\\\u0026#34;local\\\u0026#34;:false,\\\u0026#34;address\\\u0026#34;:\\\u0026#34;172.31.0.3:443\\\u0026#34;},\\\u0026#34;detect_point\\\u0026#34;:\\\u0026#34;client\\\u0026#34;,\\\u0026#34;component\\\u0026#34;:\\\u0026#34;http\\\u0026#34;,\\\u0026#34;ssl\\\u0026#34;:true}\u0026#34; } }, \u0026#34;service\u0026#34;:\u0026#34;test-service\u0026#34;, \u0026#34;serviceInstance\u0026#34;:\u0026#34;test-service-instance\u0026#34;, \u0026#34;timestamp\u0026#34;: 1666916962406, } ] Examples are as follows:\nfilter { json { } if (tag(\u0026#34;LOG_KIND\u0026#34;) == \u0026#34;NET_PROFILING_SAMPLED_TRACE\u0026#34;) { sampledTrace { latency parsed.latency as Long uri parsed.uri as String reason parsed.reason as String if (parsed.client_process.process_id as String != \u0026#34;\u0026#34;) { processId parsed.client_process.process_id as String } else if (parsed.client_process.local as Boolean) { processId ProcessRegistry.generateVirtualLocalProcess(parsed.service as String, parsed.serviceInstance as String) as String } else { processId ProcessRegistry.generateVirtualRemoteProcess(parsed.service as String, parsed.serviceInstance as String, parsed.client_process.address as String) as String } if (parsed.server_process.process_id as String != \u0026#34;\u0026#34;) { destProcessId parsed.server_process.process_id as String } else if (parsed.server_process.local as Boolean) { destProcessId ProcessRegistry.generateVirtualLocalProcess(parsed.service as String, parsed.serviceInstance as String) as String } else { destProcessId ProcessRegistry.generateVirtualRemoteProcess(parsed.service as String, parsed.serviceInstance as String, parsed.server_process.address as String) as String } detectPoint parsed.detect_point as String if (parsed.component as String == \u0026#34;http\u0026#34; \u0026amp;\u0026amp; parsed.ssl as Boolean) { componentId 129 } else if (parsed.component as String == \u0026#34;http\u0026#34;) { componentId 49 } else if (parsed.ssl as Boolean) { componentId 130 } else { componentId 110 } } } } Sink Sinks are the persistent layer of the LAL. By default, all the logs of each filter are persisted into the storage. However, some mechanisms allow you to selectively save some logs, or even drop all the logs after you\u0026rsquo;ve extracted useful information, such as metrics.\nSampler Sampler allows you to save the logs in a sampling manner. Currently, the following sampling strategies are supported:\n rateLimit: samples n logs at a maximum rate of 1 minute. rateLimit(\u0026quot;SamplerID\u0026quot;) requires an ID for the sampler. Sampler declarations with the same ID share the same sampler instance, thus sharing the same rpm and resetting logic. possibility: every piece of log has a pseudo possibility of percentage to be sampled, the possibility was generated by Java random number generator and compare to the given percentage option.  We welcome contributions on more sampling strategies. If multiple samplers are specified, the last one determines the final sampling result. See examples in Enforcer.\nExamples 1, rateLimit:\nfilter { // ... parser  sink { sampler { if (parsed.service == \u0026#34;ImportantApp\u0026#34;) { rateLimit(\u0026#34;ImportantAppSampler\u0026#34;) { rpm 1800 // samples 1800 pieces of logs every minute for service \u0026#34;ImportantApp\u0026#34;  } } else { rateLimit(\u0026#34;OtherSampler\u0026#34;) { rpm 180 // samples 180 pieces of logs every minute for other services than \u0026#34;ImportantApp\u0026#34;  } } } } } Examples 2, possibility:\nfilter { // ... parser  sink { sampler { if (parsed.service == \u0026#34;ImportantApp\u0026#34;) { possibility(80) { // samples 80% of the logs for service \u0026#34;ImportantApp\u0026#34;  } } else { possibility(30) { // samples 30% of the logs for other services than \u0026#34;ImportantApp\u0026#34;  } } } } } Dropper Dropper is a special sink, meaning that all logs are dropped without any exception. This is useful when you want to drop debugging logs.\nfilter { // ... parser  sink { if (parsed.level == \u0026#34;DEBUG\u0026#34;) { dropper {} } else { sampler { // ... configs  } } } } Or if you have multiple filters, some of which are for extracting metrics, only one of them has to be persisted.\nfilter { // filter A: this is for persistence  // ... parser  sink { sampler { // .. sampler configs  } } } filter { // filter B:  // ... extractors to generate many metrics  extractors { metrics { // ... metrics  } } sink { dropper {} // drop all logs because they have been saved in \u0026#34;filter A\u0026#34; above.  } } Enforcer Enforcer is another special sink that forcibly samples the log. A typical use case of enforcer is when you have configured a sampler and want to save some logs forcibly, such as to save error logs even if the sampling mechanism has been configured.\nfilter { // ... parser  sink { sampler { // ... sampler configs  } if (parsed.level == \u0026#34;ERROR\u0026#34; || parsed.userId == \u0026#34;TestingUserId\u0026#34;) { // sample error logs or testing users\u0026#39; logs (userId == \u0026#34;TestingUserId\u0026#34;) even if the sampling strategy is configured  enforcer { } } } } ","title":"Log Analysis Language","url":"/docs/main/v9.3.0/en/concepts-and-designs/lal/"},{"content":"Log Analysis Language Log Analysis Language (LAL) in SkyWalking is essentially a Domain-Specific Language (DSL) to analyze logs. You can use LAL to parse, extract, and save the logs, as well as collaborate the logs with traces (by extracting the trace ID, segment ID and span ID) and metrics (by generating metrics from the logs and sending them to the meter system).\nThe LAL config files are in YAML format, and are located under directory lal. You can set log-analyzer/default/lalFiles in the application.yml file or set environment variable SW_LOG_LAL_FILES to activate specific LAL config files.\nLayer Layer should be declared in the LAL script to represent the analysis scope of the logs.\nFilter A filter is a group of parser, extractor and sink. Users can use one or more filters to organize their processing logic. Every piece of log will be sent to all filters in an LAL rule. A piece of log sent to the filter is available as property log in the LAL, therefore you can access the log service name via log.service. For all available fields of log, please refer to the protocol definition.\nAll components are executed sequentially in the orders they are declared.\nGlobal Functions Globally available functions may be used them in all components (i.e. parsers, extractors, and sinks) where necessary.\n abort  By default, all components declared are executed no matter what flags (dropped, saved, etc.) have been set. There are cases where you may want the filter chain to stop earlier when specified conditions are met. abort function aborts the remaining filter chain from where it\u0026rsquo;s declared, and all the remaining components won\u0026rsquo;t be executed at all. abort function serves as a fast-fail mechanism in LAL.\nfilter { if (log.service == \u0026#34;TestingService\u0026#34;) { // Don\u0026#39;t waste resources on TestingServices  abort {} // all remaining components won\u0026#39;t be executed at all  } // ... parsers, extractors, sinks } Note that when you put regexp in an if statement, you need to surround the expression with () like regexp(\u0026lt;the expression\u0026gt;), instead of regexp \u0026lt;the expression\u0026gt;.\n tag  tag function provide a convenient way to get the value of a tag key.\nWe can add tags like following:\n[ { \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;TEST_KEY\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;TEST_VALUE\u0026#34; } ] }, \u0026#34;body\u0026#34;:{ ... } ... } ] And we can use this method to get the value of the tag key TEST_KEY.\nfilter { if (tag(\u0026#34;TEST_KEY\u0026#34;) == \u0026#34;TEST_VALUE\u0026#34;) { ... } } Parser Parsers are responsible for parsing the raw logs into structured data in SkyWalking for further processing. There are 3 types of parsers at the moment, namely json, yaml, and text.\nWhen a piece of log is parsed, there is a corresponding property available, called parsed, injected by LAL. Property parsed is typically a map, containing all the fields parsed from the raw logs. For example, if the parser is json / yaml, parsed is a map containing all the key-values in the json / yaml; if the parser is text , parsed is a map containing all the captured groups and their values (for regexp and grok).\nAll parsers share the following options:\n   Option Type Description Default Value     abortOnFailure boolean Whether the filter chain should abort if the parser failed to parse / match the logs true    See examples below.\njson filter { json { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  } } yaml filter { yaml { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  } } text For unstructured logs, there are some text parsers for use.\n regexp  regexp parser uses a regular expression (regexp) to parse the logs. It leverages the captured groups of the regexp, all the captured groups can be used later in the extractors or sinks. regexp returns a boolean indicating whether the log matches the pattern or not.\nfilter { text { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  // this is just a demo pattern  regexp \u0026#34;(?\u0026lt;timestamp\u0026gt;\\\\d{8}) (?\u0026lt;thread\u0026gt;\\\\w+) (?\u0026lt;level\u0026gt;\\\\w+) (?\u0026lt;traceId\u0026gt;\\\\w+) (?\u0026lt;msg\u0026gt;.+)\u0026#34; } extractor { tag level: parsed.level // we add a tag called `level` and its value is parsed.level, captured from the regexp above  traceId parsed.traceId // we also extract the trace id from the parsed result, which will be used to associate the log with the trace  } // ... }  grok (TODO)  We\u0026rsquo;re aware of certain performance issues in the grok Java library, and so we\u0026rsquo;re currently conducting investigations and benchmarking. Contributions are welcome.\nExtractor Extractors aim to extract metadata from the logs. The metadata can be a service name, a service instance name, an endpoint name, or even a trace ID, all of which can be associated with the existing traces and metrics.\n service  service extracts the service name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n instance  instance extracts the service instance name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n endpoint  endpoint extracts the service instance name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n traceId  traceId extracts the trace ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n segmentId  segmentId extracts the segment ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n spanId  spanId extracts the span ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n timestamp  timestamp extracts the timestamp from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\nThe unit of timestamp is millisecond.\n layer  layer extracts the layer from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with service.\n tag  tag extracts the tags from the parsed result, and set them into the LogData. The form of this extractor should look something like this: tag key1: value, key2: value2. You may use the properties of parsed as both keys and values.\nimport javax.swing.text.LayeredHighlighter filter { // ... parser  extractor { tag level: parsed.level, (parsed.statusCode): parsed.statusMsg tag anotherKey: \u0026#34;anotherConstantValue\u0026#34; layer \u0026#39;GENERAL\u0026#39; } }  metrics  metrics extracts / generates metrics from the logs, and sends the generated metrics to the meter system. You may configure MAL for further analysis of these metrics. The dedicated MAL config files are under directory log-mal-rules, and you can set log-analyzer/default/malFiles to enable configured files.\n# application.yml# ...log-analyzer:selector:${SW_LOG_ANALYZER:default}default:lalFiles:${SW_LOG_LAL_FILES:my-lal-config}# files are under \u0026#34;lal\u0026#34; directorymalFiles:${SW_LOG_MAL_FILES:my-lal-mal-config, folder1/another-lal-mal-config, folder2/*}# files are under \u0026#34;log-mal-rules\u0026#34; directoryExamples are as follows:\nfilter { // ...  extractor { service parsed.serviceName metrics { name \u0026#34;log_count\u0026#34; timestamp parsed.timestamp labels level: parsed.level, service: parsed.service, instance: parsed.instance value 1 } metrics { name \u0026#34;http_response_time\u0026#34; timestamp parsed.timestamp labels status_code: parsed.statusCode, service: parsed.service, instance: parsed.instance value parsed.duration } } // ... } The extractor above generates a metrics named log_count, with tag key level and value 1. After that, you can configure MAL rules to calculate the log count grouping by logging level like this:\n# ... other configurations of MALmetrics:- name:log_count_debugexp:log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;DEBUG\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT1M\u0026#39;)- name:log_count_errorexp:log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;ERROR\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT1M\u0026#39;)The other metrics generated is http_response_time, so you can configure MAL rules to generate more useful metrics like percentiles.\n# ... other configurations of MALmetrics:- name:response_time_percentileexp:http_response_time.sum([\u0026#39;le\u0026#39;, \u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT5M\u0026#39;).histogram().histogram_percentile([50,70,90,99]) slowSql  slowSql aims to convert LogData to DatabaseSlowStatement. It extracts data from parsed result and save them as DatabaseSlowStatement. SlowSql will not abort or edit logs, you can use other LAL for further processing. SlowSql will reuse service, layer and timestamp of extractor, so it is necessary to use SlowSQL after setting these. We require a log tag \u0026quot;LOG_KIND\u0026quot; = \u0026quot;SLOW_SQL\u0026quot; to make OAP distinguish slow SQL logs from other log reports.\nNote, slow SQL sampling would only flag this SQL in the candidate list. The OAP server would run statistic per service and only persistent the top 50 every 10(controlled by topNReportPeriod: ${SW_CORE_TOPN_REPORT_PERIOD:10}) minutes by default.\nAn example of JSON sent to OAP is as following:\n[ { \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;LOG_KIND\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;SLOW_SQL\u0026#34; } ] }, \u0026#34;layer\u0026#34;:\u0026#34;MYSQL\u0026#34;, \u0026#34;body\u0026#34;:{ \u0026#34;json\u0026#34;:{ \u0026#34;json\u0026#34;:\u0026#34;{\\\u0026#34;time\\\u0026#34;:\\\u0026#34;1663063011\\\u0026#34;,\\\u0026#34;id\\\u0026#34;:\\\u0026#34;cb92c1a5b-2691e-fb2f-457a-9c72a392d9ed\\\u0026#34;,\\\u0026#34;service\\\u0026#34;:\\\u0026#34;root[root]@[localhost]\\\u0026#34;,\\\u0026#34;statement\\\u0026#34;:\\\u0026#34;select sleep(2);\\\u0026#34;,\\\u0026#34;layer\\\u0026#34;:\\\u0026#34;MYSQL\\\u0026#34;,\\\u0026#34;query_time\\\u0026#34;:2000}\u0026#34; } }, \u0026#34;service\u0026#34;:\u0026#34;root[root]@[localhost]\u0026#34; } ]  statement  statement extracts the SQL statement from the parsed result, and set it into the DatabaseSlowStatement, which will be persisted (if not dropped) and is used to associate with TopNDatabaseStatement.\n latency  latency extracts the latency from the parsed result, and set it into the DatabaseSlowStatement, which will be persisted (if not dropped) and is used to associate with TopNDatabaseStatement.\n id  id extracts the id from the parsed result, and set it into the DatabaseSlowStatement, which will be persisted (if not dropped) and is used to associate with TopNDatabaseStatement.\nA Example of LAL to distinguish slow logs:\nfilter { json{ } extractor{ layer parsed.layer as String service parsed.service as String timestamp parsed.time as String if (tag(\u0026#34;LOG_KIND\u0026#34;) == \u0026#34;SLOW_SQL\u0026#34;) { slowSql { id parsed.id as String statement parsed.statement as String latency parsed.query_time as Long } } } }  sampledTrace  sampledTrace aims to convert LogData to SampledTrace Records. It extracts data from parsed result and save them as SampledTraceRecord. SampledTrace will not abort or edit logs, you can use other LAL for further processing. We require a log tag \u0026quot;LOG_KIND\u0026quot; = \u0026quot;NET_PROFILING_SAMPLED_TRACE\u0026quot; to make OAP distinguish slow trace logs from other log reports. An example of JSON sent to OAP is as following:\n[ { \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;LOG_KIND\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;NET_PROFILING_SAMPLED_TRACE\u0026#34; } ] }, \u0026#34;layer\u0026#34;:\u0026#34;MESH\u0026#34;, \u0026#34;body\u0026#34;:{ \u0026#34;json\u0026#34;:{ \u0026#34;json\u0026#34;:\u0026#34;{\\\u0026#34;uri\\\u0026#34;:\\\u0026#34;/provider\\\u0026#34;,\\\u0026#34;reason\\\u0026#34;:\\\u0026#34;slow\\\u0026#34;,\\\u0026#34;latency\\\u0026#34;:2048,\\\u0026#34;client_process\\\u0026#34;:{\\\u0026#34;process_id\\\u0026#34;:\\\u0026#34;c1519f4555ec11eda8df0242ac1d0002\\\u0026#34;,\\\u0026#34;local\\\u0026#34;:false,\\\u0026#34;address\\\u0026#34;:\\\u0026#34;\\\u0026#34;},\\\u0026#34;server_process\\\u0026#34;:{\\\u0026#34;process_id\\\u0026#34;:\\\u0026#34;\\\u0026#34;,\\\u0026#34;local\\\u0026#34;:false,\\\u0026#34;address\\\u0026#34;:\\\u0026#34;172.31.0.3:443\\\u0026#34;},\\\u0026#34;detect_point\\\u0026#34;:\\\u0026#34;client\\\u0026#34;,\\\u0026#34;component\\\u0026#34;:\\\u0026#34;http\\\u0026#34;,\\\u0026#34;ssl\\\u0026#34;:true}\u0026#34; } }, \u0026#34;service\u0026#34;:\u0026#34;test-service\u0026#34;, \u0026#34;serviceInstance\u0026#34;:\u0026#34;test-service-instance\u0026#34;, \u0026#34;timestamp\u0026#34;: 1666916962406, } ] Examples are as follows:\nfilter { json { } if (tag(\u0026#34;LOG_KIND\u0026#34;) == \u0026#34;NET_PROFILING_SAMPLED_TRACE\u0026#34;) { sampledTrace { latency parsed.latency as Long uri parsed.uri as String reason parsed.reason as String if (parsed.client_process.process_id as String != \u0026#34;\u0026#34;) { processId parsed.client_process.process_id as String } else if (parsed.client_process.local as Boolean) { processId ProcessRegistry.generateVirtualLocalProcess(parsed.service as String, parsed.serviceInstance as String) as String } else { processId ProcessRegistry.generateVirtualRemoteProcess(parsed.service as String, parsed.serviceInstance as String, parsed.client_process.address as String) as String } if (parsed.server_process.process_id as String != \u0026#34;\u0026#34;) { destProcessId parsed.server_process.process_id as String } else if (parsed.server_process.local as Boolean) { destProcessId ProcessRegistry.generateVirtualLocalProcess(parsed.service as String, parsed.serviceInstance as String) as String } else { destProcessId ProcessRegistry.generateVirtualRemoteProcess(parsed.service as String, parsed.serviceInstance as String, parsed.server_process.address as String) as String } detectPoint parsed.detect_point as String if (parsed.component as String == \u0026#34;http\u0026#34; \u0026amp;\u0026amp; parsed.ssl as Boolean) { componentId 129 } else if (parsed.component as String == \u0026#34;http\u0026#34;) { componentId 49 } else if (parsed.ssl as Boolean) { componentId 130 } else { componentId 110 } } } } Sink Sinks are the persistent layer of the LAL. By default, all the logs of each filter are persisted into the storage. However, some mechanisms allow you to selectively save some logs, or even drop all the logs after you\u0026rsquo;ve extracted useful information, such as metrics.\nSampler Sampler allows you to save the logs in a sampling manner. Currently, the following sampling strategies are supported:\n rateLimit: samples n logs at a maximum rate of 1 minute. rateLimit(\u0026quot;SamplerID\u0026quot;) requires an ID for the sampler. Sampler declarations with the same ID share the same sampler instance, thus sharing the same rpm and resetting logic. possibility: every piece of log has a pseudo possibility of percentage to be sampled, the possibility was generated by Java random number generator and compare to the given percentage option.  We welcome contributions on more sampling strategies. If multiple samplers are specified, the last one determines the final sampling result. See examples in Enforcer.\nExamples 1, rateLimit:\nfilter { // ... parser  sink { sampler { if (parsed.service == \u0026#34;ImportantApp\u0026#34;) { rateLimit(\u0026#34;ImportantAppSampler\u0026#34;) { rpm 1800 // samples 1800 pieces of logs every minute for service \u0026#34;ImportantApp\u0026#34;  } } else { rateLimit(\u0026#34;OtherSampler\u0026#34;) { rpm 180 // samples 180 pieces of logs every minute for other services than \u0026#34;ImportantApp\u0026#34;  } } } } } Examples 2, possibility:\nfilter { // ... parser  sink { sampler { if (parsed.service == \u0026#34;ImportantApp\u0026#34;) { possibility(80) { // samples 80% of the logs for service \u0026#34;ImportantApp\u0026#34;  } } else { possibility(30) { // samples 30% of the logs for other services than \u0026#34;ImportantApp\u0026#34;  } } } } } Dropper Dropper is a special sink, meaning that all logs are dropped without any exception. This is useful when you want to drop debugging logs.\nfilter { // ... parser  sink { if (parsed.level == \u0026#34;DEBUG\u0026#34;) { dropper {} } else { sampler { // ... configs  } } } } Or if you have multiple filters, some of which are for extracting metrics, only one of them has to be persisted.\nfilter { // filter A: this is for persistence  // ... parser  sink { sampler { // .. sampler configs  } } } filter { // filter B:  // ... extractors to generate many metrics  extractors { metrics { // ... metrics  } } sink { dropper {} // drop all logs because they have been saved in \u0026#34;filter A\u0026#34; above.  } } Enforcer Enforcer is another special sink that forcibly samples the log. A typical use case of enforcer is when you have configured a sampler and want to save some logs forcibly, such as to save error logs even if the sampling mechanism has been configured.\nfilter { // ... parser  sink { sampler { // ... sampler configs  } if (parsed.level == \u0026#34;ERROR\u0026#34; || parsed.userId == \u0026#34;TestingUserId\u0026#34;) { // sample error logs or testing users\u0026#39; logs (userId == \u0026#34;TestingUserId\u0026#34;) even if the sampling strategy is configured  enforcer { } } } } ","title":"Log Analysis Language","url":"/docs/main/v9.4.0/en/concepts-and-designs/lal/"},{"content":"Log Analysis Language Log Analysis Language (LAL) in SkyWalking is essentially a Domain-Specific Language (DSL) to analyze logs. You can use LAL to parse, extract, and save the logs, as well as collaborate the logs with traces (by extracting the trace ID, segment ID and span ID) and metrics (by generating metrics from the logs and sending them to the meter system).\nThe LAL config files are in YAML format, and are located under directory lal. You can set log-analyzer/default/lalFiles in the application.yml file or set environment variable SW_LOG_LAL_FILES to activate specific LAL config files.\nLayer Layer should be declared in the LAL script to represent the analysis scope of the logs.\nFilter A filter is a group of parser, extractor and sink. Users can use one or more filters to organize their processing logic. Every piece of log will be sent to all filters in an LAL rule. A piece of log sent to the filter is available as property log in the LAL, therefore you can access the log service name via log.service. For all available fields of log, please refer to the protocol definition.\nAll components are executed sequentially in the orders they are declared.\nGlobal Functions Globally available functions may be used them in all components (i.e. parsers, extractors, and sinks) where necessary.\n abort  By default, all components declared are executed no matter what flags (dropped, saved, etc.) have been set. There are cases where you may want the filter chain to stop earlier when specified conditions are met. abort function aborts the remaining filter chain from where it\u0026rsquo;s declared, and all the remaining components won\u0026rsquo;t be executed at all. abort function serves as a fast-fail mechanism in LAL.\nfilter { if (log.service == \u0026#34;TestingService\u0026#34;) { // Don\u0026#39;t waste resources on TestingServices  abort {} // all remaining components won\u0026#39;t be executed at all  } // ... parsers, extractors, sinks } Note that when you put regexp in an if statement, you need to surround the expression with () like regexp(\u0026lt;the expression\u0026gt;), instead of regexp \u0026lt;the expression\u0026gt;.\n tag  tag function provide a convenient way to get the value of a tag key.\nWe can add tags like following:\n[ { \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;TEST_KEY\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;TEST_VALUE\u0026#34; } ] }, \u0026#34;body\u0026#34;:{ ... } ... } ] And we can use this method to get the value of the tag key TEST_KEY.\nfilter { if (tag(\u0026#34;TEST_KEY\u0026#34;) == \u0026#34;TEST_VALUE\u0026#34;) { ... } } Parser Parsers are responsible for parsing the raw logs into structured data in SkyWalking for further processing. There are 3 types of parsers at the moment, namely json, yaml, and text.\nWhen a piece of log is parsed, there is a corresponding property available, called parsed, injected by LAL. Property parsed is typically a map, containing all the fields parsed from the raw logs. For example, if the parser is json / yaml, parsed is a map containing all the key-values in the json / yaml; if the parser is text , parsed is a map containing all the captured groups and their values (for regexp and grok).\nAll parsers share the following options:\n   Option Type Description Default Value     abortOnFailure boolean Whether the filter chain should abort if the parser failed to parse / match the logs true    See examples below.\njson filter { json { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  } } yaml filter { yaml { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  } } text For unstructured logs, there are some text parsers for use.\n regexp  regexp parser uses a regular expression (regexp) to parse the logs. It leverages the captured groups of the regexp, all the captured groups can be used later in the extractors or sinks. regexp returns a boolean indicating whether the log matches the pattern or not.\nfilter { text { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  // this is just a demo pattern  regexp \u0026#34;(?\u0026lt;timestamp\u0026gt;\\\\d{8}) (?\u0026lt;thread\u0026gt;\\\\w+) (?\u0026lt;level\u0026gt;\\\\w+) (?\u0026lt;traceId\u0026gt;\\\\w+) (?\u0026lt;msg\u0026gt;.+)\u0026#34; } extractor { tag level: parsed.level // we add a tag called `level` and its value is parsed.level, captured from the regexp above  traceId parsed.traceId // we also extract the trace id from the parsed result, which will be used to associate the log with the trace  } // ... }  grok (TODO)  We\u0026rsquo;re aware of certain performance issues in the grok Java library, and so we\u0026rsquo;re currently conducting investigations and benchmarking. Contributions are welcome.\nExtractor Extractors aim to extract metadata from the logs. The metadata can be a service name, a service instance name, an endpoint name, or even a trace ID, all of which can be associated with the existing traces and metrics.\n service  service extracts the service name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n instance  instance extracts the service instance name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n endpoint  endpoint extracts the service instance name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n traceId  traceId extracts the trace ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n segmentId  segmentId extracts the segment ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n spanId  spanId extracts the span ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n timestamp  timestamp extracts the timestamp from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\nThe unit of timestamp is millisecond.\n layer  layer extracts the layer from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with service.\n tag  tag extracts the tags from the parsed result, and set them into the LogData. The form of this extractor should look something like this: tag key1: value, key2: value2. You may use the properties of parsed as both keys and values.\nimport javax.swing.text.LayeredHighlighter filter { // ... parser  extractor { tag level: parsed.level, (parsed.statusCode): parsed.statusMsg tag anotherKey: \u0026#34;anotherConstantValue\u0026#34; layer \u0026#39;GENERAL\u0026#39; } }  metrics  metrics extracts / generates metrics from the logs, and sends the generated metrics to the meter system. You may configure MAL for further analysis of these metrics. The dedicated MAL config files are under directory log-mal-rules, and you can set log-analyzer/default/malFiles to enable configured files.\n# application.yml# ...log-analyzer:selector:${SW_LOG_ANALYZER:default}default:lalFiles:${SW_LOG_LAL_FILES:my-lal-config}# files are under \u0026#34;lal\u0026#34; directorymalFiles:${SW_LOG_MAL_FILES:my-lal-mal-config, folder1/another-lal-mal-config, folder2/*}# files are under \u0026#34;log-mal-rules\u0026#34; directoryExamples are as follows:\nfilter { // ...  extractor { service parsed.serviceName metrics { name \u0026#34;log_count\u0026#34; timestamp parsed.timestamp labels level: parsed.level, service: parsed.service, instance: parsed.instance value 1 } metrics { name \u0026#34;http_response_time\u0026#34; timestamp parsed.timestamp labels status_code: parsed.statusCode, service: parsed.service, instance: parsed.instance value parsed.duration } } // ... } The extractor above generates a metrics named log_count, with tag key level and value 1. After that, you can configure MAL rules to calculate the log count grouping by logging level like this:\n# ... other configurations of MALmetrics:- name:log_count_debugexp:log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;DEBUG\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT1M\u0026#39;)- name:log_count_errorexp:log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;ERROR\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT1M\u0026#39;)The other metrics generated is http_response_time, so you can configure MAL rules to generate more useful metrics like percentiles.\n# ... other configurations of MALmetrics:- name:response_time_percentileexp:http_response_time.sum([\u0026#39;le\u0026#39;, \u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT5M\u0026#39;).histogram().histogram_percentile([50,70,90,99]) slowSql  slowSql aims to convert LogData to DatabaseSlowStatement. It extracts data from parsed result and save them as DatabaseSlowStatement. SlowSql will not abort or edit logs, you can use other LAL for further processing. SlowSql will reuse service, layer and timestamp of extractor, so it is necessary to use SlowSQL after setting these. We require a log tag \u0026quot;LOG_KIND\u0026quot; = \u0026quot;SLOW_SQL\u0026quot; to make OAP distinguish slow SQL logs from other log reports.\nNote, slow SQL sampling would only flag this SQL in the candidate list. The OAP server would run statistic per service and only persistent the top 50 every 10(controlled by topNReportPeriod: ${SW_CORE_TOPN_REPORT_PERIOD:10}) minutes by default.\nAn example of JSON sent to OAP is as following:\n[ { \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;LOG_KIND\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;SLOW_SQL\u0026#34; } ] }, \u0026#34;layer\u0026#34;:\u0026#34;MYSQL\u0026#34;, \u0026#34;body\u0026#34;:{ \u0026#34;json\u0026#34;:{ \u0026#34;json\u0026#34;:\u0026#34;{\\\u0026#34;time\\\u0026#34;:\\\u0026#34;1663063011\\\u0026#34;,\\\u0026#34;id\\\u0026#34;:\\\u0026#34;cb92c1a5b-2691e-fb2f-457a-9c72a392d9ed\\\u0026#34;,\\\u0026#34;service\\\u0026#34;:\\\u0026#34;root[root]@[localhost]\\\u0026#34;,\\\u0026#34;statement\\\u0026#34;:\\\u0026#34;select sleep(2);\\\u0026#34;,\\\u0026#34;layer\\\u0026#34;:\\\u0026#34;MYSQL\\\u0026#34;,\\\u0026#34;query_time\\\u0026#34;:2000}\u0026#34; } }, \u0026#34;service\u0026#34;:\u0026#34;root[root]@[localhost]\u0026#34; } ]  statement  statement extracts the SQL statement from the parsed result, and set it into the DatabaseSlowStatement, which will be persisted (if not dropped) and is used to associate with TopNDatabaseStatement.\n latency  latency extracts the latency from the parsed result, and set it into the DatabaseSlowStatement, which will be persisted (if not dropped) and is used to associate with TopNDatabaseStatement.\n id  id extracts the id from the parsed result, and set it into the DatabaseSlowStatement, which will be persisted (if not dropped) and is used to associate with TopNDatabaseStatement.\nA Example of LAL to distinguish slow logs:\nfilter { json{ } extractor{ layer parsed.layer as String service parsed.service as String timestamp parsed.time as String if (tag(\u0026#34;LOG_KIND\u0026#34;) == \u0026#34;SLOW_SQL\u0026#34;) { slowSql { id parsed.id as String statement parsed.statement as String latency parsed.query_time as Long } } } }  sampledTrace  sampledTrace aims to convert LogData to SampledTrace Records. It extracts data from parsed result and save them as SampledTraceRecord. SampledTrace will not abort or edit logs, you can use other LAL for further processing. We require a log tag \u0026quot;LOG_KIND\u0026quot; = \u0026quot;NET_PROFILING_SAMPLED_TRACE\u0026quot; to make OAP distinguish slow trace logs from other log reports. An example of JSON sent to OAP is as following:\n[ { \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;LOG_KIND\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;NET_PROFILING_SAMPLED_TRACE\u0026#34; } ] }, \u0026#34;layer\u0026#34;:\u0026#34;MESH\u0026#34;, \u0026#34;body\u0026#34;:{ \u0026#34;json\u0026#34;:{ \u0026#34;json\u0026#34;:\u0026#34;{\\\u0026#34;uri\\\u0026#34;:\\\u0026#34;/provider\\\u0026#34;,\\\u0026#34;reason\\\u0026#34;:\\\u0026#34;slow\\\u0026#34;,\\\u0026#34;latency\\\u0026#34;:2048,\\\u0026#34;client_process\\\u0026#34;:{\\\u0026#34;process_id\\\u0026#34;:\\\u0026#34;c1519f4555ec11eda8df0242ac1d0002\\\u0026#34;,\\\u0026#34;local\\\u0026#34;:false,\\\u0026#34;address\\\u0026#34;:\\\u0026#34;\\\u0026#34;},\\\u0026#34;server_process\\\u0026#34;:{\\\u0026#34;process_id\\\u0026#34;:\\\u0026#34;\\\u0026#34;,\\\u0026#34;local\\\u0026#34;:false,\\\u0026#34;address\\\u0026#34;:\\\u0026#34;172.31.0.3:443\\\u0026#34;},\\\u0026#34;detect_point\\\u0026#34;:\\\u0026#34;client\\\u0026#34;,\\\u0026#34;component\\\u0026#34;:\\\u0026#34;http\\\u0026#34;,\\\u0026#34;ssl\\\u0026#34;:true}\u0026#34; } }, \u0026#34;service\u0026#34;:\u0026#34;test-service\u0026#34;, \u0026#34;serviceInstance\u0026#34;:\u0026#34;test-service-instance\u0026#34;, \u0026#34;timestamp\u0026#34;: 1666916962406, } ] Examples are as follows:\nfilter { json { } if (tag(\u0026#34;LOG_KIND\u0026#34;) == \u0026#34;NET_PROFILING_SAMPLED_TRACE\u0026#34;) { sampledTrace { latency parsed.latency as Long uri parsed.uri as String reason parsed.reason as String if (parsed.client_process.process_id as String != \u0026#34;\u0026#34;) { processId parsed.client_process.process_id as String } else if (parsed.client_process.local as Boolean) { processId ProcessRegistry.generateVirtualLocalProcess(parsed.service as String, parsed.serviceInstance as String) as String } else { processId ProcessRegistry.generateVirtualRemoteProcess(parsed.service as String, parsed.serviceInstance as String, parsed.client_process.address as String) as String } if (parsed.server_process.process_id as String != \u0026#34;\u0026#34;) { destProcessId parsed.server_process.process_id as String } else if (parsed.server_process.local as Boolean) { destProcessId ProcessRegistry.generateVirtualLocalProcess(parsed.service as String, parsed.serviceInstance as String) as String } else { destProcessId ProcessRegistry.generateVirtualRemoteProcess(parsed.service as String, parsed.serviceInstance as String, parsed.server_process.address as String) as String } detectPoint parsed.detect_point as String if (parsed.component as String == \u0026#34;http\u0026#34; \u0026amp;\u0026amp; parsed.ssl as Boolean) { componentId 129 } else if (parsed.component as String == \u0026#34;http\u0026#34;) { componentId 49 } else if (parsed.ssl as Boolean) { componentId 130 } else { componentId 110 } } } } Sink Sinks are the persistent layer of the LAL. By default, all the logs of each filter are persisted into the storage. However, some mechanisms allow you to selectively save some logs, or even drop all the logs after you\u0026rsquo;ve extracted useful information, such as metrics.\nSampler Sampler allows you to save the logs in a sampling manner. Currently, the following sampling strategies are supported:\n rateLimit: samples n logs at a maximum rate of 1 minute. rateLimit(\u0026quot;SamplerID\u0026quot;) requires an ID for the sampler. Sampler declarations with the same ID share the same sampler instance, thus sharing the same rpm and resetting logic. possibility: every piece of log has a pseudo possibility of percentage to be sampled, the possibility was generated by Java random number generator and compare to the given percentage option.  We welcome contributions on more sampling strategies. If multiple samplers are specified, the last one determines the final sampling result. See examples in Enforcer.\nExamples 1, rateLimit:\nfilter { // ... parser  sink { sampler { if (parsed.service == \u0026#34;ImportantApp\u0026#34;) { rateLimit(\u0026#34;ImportantAppSampler\u0026#34;) { rpm 1800 // samples 1800 pieces of logs every minute for service \u0026#34;ImportantApp\u0026#34;  } } else { rateLimit(\u0026#34;OtherSampler\u0026#34;) { rpm 180 // samples 180 pieces of logs every minute for other services than \u0026#34;ImportantApp\u0026#34;  } } } } } Examples 2, possibility:\nfilter { // ... parser  sink { sampler { if (parsed.service == \u0026#34;ImportantApp\u0026#34;) { possibility(80) { // samples 80% of the logs for service \u0026#34;ImportantApp\u0026#34;  } } else { possibility(30) { // samples 30% of the logs for other services than \u0026#34;ImportantApp\u0026#34;  } } } } } Dropper Dropper is a special sink, meaning that all logs are dropped without any exception. This is useful when you want to drop debugging logs.\nfilter { // ... parser  sink { if (parsed.level == \u0026#34;DEBUG\u0026#34;) { dropper {} } else { sampler { // ... configs  } } } } Or if you have multiple filters, some of which are for extracting metrics, only one of them has to be persisted.\nfilter { // filter A: this is for persistence  // ... parser  sink { sampler { // .. sampler configs  } } } filter { // filter B:  // ... extractors to generate many metrics  extractors { metrics { // ... metrics  } } sink { dropper {} // drop all logs because they have been saved in \u0026#34;filter A\u0026#34; above.  } } Enforcer Enforcer is another special sink that forcibly samples the log. A typical use case of enforcer is when you have configured a sampler and want to save some logs forcibly, such as to save error logs even if the sampling mechanism has been configured.\nfilter { // ... parser  sink { sampler { // ... sampler configs  } if (parsed.level == \u0026#34;ERROR\u0026#34; || parsed.userId == \u0026#34;TestingUserId\u0026#34;) { // sample error logs or testing users\u0026#39; logs (userId == \u0026#34;TestingUserId\u0026#34;) even if the sampling strategy is configured  enforcer { } } } } ","title":"Log Analysis Language","url":"/docs/main/v9.5.0/en/concepts-and-designs/lal/"},{"content":"Log Analysis Language Log Analysis Language (LAL) in SkyWalking is essentially a Domain-Specific Language (DSL) to analyze logs. You can use LAL to parse, extract, and save the logs, as well as collaborate the logs with traces (by extracting the trace ID, segment ID and span ID) and metrics (by generating metrics from the logs and sending them to the meter system).\nThe LAL config files are in YAML format, and are located under directory lal. You can set log-analyzer/default/lalFiles in the application.yml file or set environment variable SW_LOG_LAL_FILES to activate specific LAL config files.\nLayer Layer should be declared in the LAL script to represent the analysis scope of the logs.\nFilter A filter is a group of parser, extractor and sink. Users can use one or more filters to organize their processing logic. Every piece of log will be sent to all filters in an LAL rule. A piece of log sent to the filter is available as property log in the LAL, therefore you can access the log service name via log.service. For all available fields of log, please refer to the protocol definition.\nAll components are executed sequentially in the orders they are declared.\nGlobal Functions Globally available functions may be used them in all components (i.e. parsers, extractors, and sinks) where necessary.\n abort  By default, all components declared are executed no matter what flags (dropped, saved, etc.) have been set. There are cases where you may want the filter chain to stop earlier when specified conditions are met. abort function aborts the remaining filter chain from where it\u0026rsquo;s declared, and all the remaining components won\u0026rsquo;t be executed at all. abort function serves as a fast-fail mechanism in LAL.\nfilter { if (log.service == \u0026#34;TestingService\u0026#34;) { // Don\u0026#39;t waste resources on TestingServices  abort {} // all remaining components won\u0026#39;t be executed at all  } // ... parsers, extractors, sinks } Note that when you put regexp in an if statement, you need to surround the expression with () like regexp(\u0026lt;the expression\u0026gt;), instead of regexp \u0026lt;the expression\u0026gt;.\n tag  tag function provide a convenient way to get the value of a tag key.\nWe can add tags like following:\n[ { \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;TEST_KEY\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;TEST_VALUE\u0026#34; } ] }, \u0026#34;body\u0026#34;:{ ... } ... } ] And we can use this method to get the value of the tag key TEST_KEY.\nfilter { if (tag(\u0026#34;TEST_KEY\u0026#34;) == \u0026#34;TEST_VALUE\u0026#34;) { ... } } Parser Parsers are responsible for parsing the raw logs into structured data in SkyWalking for further processing. There are 3 types of parsers at the moment, namely json, yaml, and text.\nWhen a piece of log is parsed, there is a corresponding property available, called parsed, injected by LAL. Property parsed is typically a map, containing all the fields parsed from the raw logs. For example, if the parser is json / yaml, parsed is a map containing all the key-values in the json / yaml; if the parser is text , parsed is a map containing all the captured groups and their values (for regexp and grok).\nAll parsers share the following options:\n   Option Type Description Default Value     abortOnFailure boolean Whether the filter chain should abort if the parser failed to parse / match the logs true    See examples below.\njson filter { json { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  } } yaml filter { yaml { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  } } text For unstructured logs, there are some text parsers for use.\n regexp  regexp parser uses a regular expression (regexp) to parse the logs. It leverages the captured groups of the regexp, all the captured groups can be used later in the extractors or sinks. regexp returns a boolean indicating whether the log matches the pattern or not.\nfilter { text { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  // this is just a demo pattern  regexp \u0026#34;(?\u0026lt;timestamp\u0026gt;\\\\d{8}) (?\u0026lt;thread\u0026gt;\\\\w+) (?\u0026lt;level\u0026gt;\\\\w+) (?\u0026lt;traceId\u0026gt;\\\\w+) (?\u0026lt;msg\u0026gt;.+)\u0026#34; } extractor { tag level: parsed.level // we add a tag called `level` and its value is parsed.level, captured from the regexp above  traceId parsed.traceId // we also extract the trace id from the parsed result, which will be used to associate the log with the trace  } // ... }  grok (TODO)  We\u0026rsquo;re aware of certain performance issues in the grok Java library, and so we\u0026rsquo;re currently conducting investigations and benchmarking. Contributions are welcome.\nExtractor Extractors aim to extract metadata from the logs. The metadata can be a service name, a service instance name, an endpoint name, or even a trace ID, all of which can be associated with the existing traces and metrics.\n service  service extracts the service name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n instance  instance extracts the service instance name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n endpoint  endpoint extracts the service instance name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n traceId  traceId extracts the trace ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n segmentId  segmentId extracts the segment ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n spanId  spanId extracts the span ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n timestamp  timestamp extracts the timestamp from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\nThe unit of timestamp is millisecond.\n layer  layer extracts the layer from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with service.\n tag  tag extracts the tags from the parsed result, and set them into the LogData. The form of this extractor should look something like this: tag key1: value, key2: value2. You may use the properties of parsed as both keys and values.\nimport javax.swing.text.LayeredHighlighter filter { // ... parser  extractor { tag level: parsed.level, (parsed.statusCode): parsed.statusMsg tag anotherKey: \u0026#34;anotherConstantValue\u0026#34; layer \u0026#39;GENERAL\u0026#39; } }  metrics  metrics extracts / generates metrics from the logs, and sends the generated metrics to the meter system. You may configure MAL for further analysis of these metrics. The dedicated MAL config files are under directory log-mal-rules, and you can set log-analyzer/default/malFiles to enable configured files.\n# application.yml# ...log-analyzer:selector:${SW_LOG_ANALYZER:default}default:lalFiles:${SW_LOG_LAL_FILES:my-lal-config}# files are under \u0026#34;lal\u0026#34; directorymalFiles:${SW_LOG_MAL_FILES:my-lal-mal-config, folder1/another-lal-mal-config, folder2/*}# files are under \u0026#34;log-mal-rules\u0026#34; directoryExamples are as follows:\nfilter { // ...  extractor { service parsed.serviceName metrics { name \u0026#34;log_count\u0026#34; timestamp parsed.timestamp labels level: parsed.level, service: parsed.service, instance: parsed.instance value 1 } metrics { name \u0026#34;http_response_time\u0026#34; timestamp parsed.timestamp labels status_code: parsed.statusCode, service: parsed.service, instance: parsed.instance value parsed.duration } } // ... } The extractor above generates a metrics named log_count, with tag key level and value 1. After that, you can configure MAL rules to calculate the log count grouping by logging level like this:\n# ... other configurations of MALmetrics:- name:log_count_debugexp:log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;DEBUG\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT1M\u0026#39;)- name:log_count_errorexp:log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;ERROR\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT1M\u0026#39;)The other metrics generated is http_response_time, so you can configure MAL rules to generate more useful metrics like percentiles.\n# ... other configurations of MALmetrics:- name:response_time_percentileexp:http_response_time.sum([\u0026#39;le\u0026#39;, \u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT5M\u0026#39;).histogram().histogram_percentile([50,70,90,99]) slowSql  slowSql aims to convert LogData to DatabaseSlowStatement. It extracts data from parsed result and save them as DatabaseSlowStatement. SlowSql will not abort or edit logs, you can use other LAL for further processing. SlowSql will reuse service, layer and timestamp of extractor, so it is necessary to use SlowSQL after setting these. We require a log tag \u0026quot;LOG_KIND\u0026quot; = \u0026quot;SLOW_SQL\u0026quot; to make OAP distinguish slow SQL logs from other log reports.\nNote, slow SQL sampling would only flag this SQL in the candidate list. The OAP server would run statistic per service and only persistent the top 50 every 10(controlled by topNReportPeriod: ${SW_CORE_TOPN_REPORT_PERIOD:10}) minutes by default.\nAn example of JSON sent to OAP is as following:\n[ { \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;LOG_KIND\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;SLOW_SQL\u0026#34; } ] }, \u0026#34;layer\u0026#34;:\u0026#34;MYSQL\u0026#34;, \u0026#34;body\u0026#34;:{ \u0026#34;json\u0026#34;:{ \u0026#34;json\u0026#34;:\u0026#34;{\\\u0026#34;time\\\u0026#34;:\\\u0026#34;1663063011\\\u0026#34;,\\\u0026#34;id\\\u0026#34;:\\\u0026#34;cb92c1a5b-2691e-fb2f-457a-9c72a392d9ed\\\u0026#34;,\\\u0026#34;service\\\u0026#34;:\\\u0026#34;root[root]@[localhost]\\\u0026#34;,\\\u0026#34;statement\\\u0026#34;:\\\u0026#34;select sleep(2);\\\u0026#34;,\\\u0026#34;layer\\\u0026#34;:\\\u0026#34;MYSQL\\\u0026#34;,\\\u0026#34;query_time\\\u0026#34;:2000}\u0026#34; } }, \u0026#34;service\u0026#34;:\u0026#34;root[root]@[localhost]\u0026#34; } ]  statement  statement extracts the SQL statement from the parsed result, and set it into the DatabaseSlowStatement, which will be persisted (if not dropped) and is used to associate with TopNDatabaseStatement.\n latency  latency extracts the latency from the parsed result, and set it into the DatabaseSlowStatement, which will be persisted (if not dropped) and is used to associate with TopNDatabaseStatement.\n id  id extracts the id from the parsed result, and set it into the DatabaseSlowStatement, which will be persisted (if not dropped) and is used to associate with TopNDatabaseStatement.\nA Example of LAL to distinguish slow logs:\nfilter { json{ } extractor{ layer parsed.layer as String service parsed.service as String timestamp parsed.time as String if (tag(\u0026#34;LOG_KIND\u0026#34;) == \u0026#34;SLOW_SQL\u0026#34;) { slowSql { id parsed.id as String statement parsed.statement as String latency parsed.query_time as Long } } } }  sampledTrace  sampledTrace aims to convert LogData to SampledTrace Records. It extracts data from parsed result and save them as SampledTraceRecord. SampledTrace will not abort or edit logs, you can use other LAL for further processing. We require a log tag \u0026quot;LOG_KIND\u0026quot; = \u0026quot;NET_PROFILING_SAMPLED_TRACE\u0026quot; to make OAP distinguish slow trace logs from other log reports. An example of JSON sent to OAP is as following:\n[ { \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;LOG_KIND\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;NET_PROFILING_SAMPLED_TRACE\u0026#34; } ] }, \u0026#34;layer\u0026#34;:\u0026#34;MESH\u0026#34;, \u0026#34;body\u0026#34;:{ \u0026#34;json\u0026#34;:{ \u0026#34;json\u0026#34;:\u0026#34;{\\\u0026#34;uri\\\u0026#34;:\\\u0026#34;/provider\\\u0026#34;,\\\u0026#34;reason\\\u0026#34;:\\\u0026#34;slow\\\u0026#34;,\\\u0026#34;latency\\\u0026#34;:2048,\\\u0026#34;client_process\\\u0026#34;:{\\\u0026#34;process_id\\\u0026#34;:\\\u0026#34;c1519f4555ec11eda8df0242ac1d0002\\\u0026#34;,\\\u0026#34;local\\\u0026#34;:false,\\\u0026#34;address\\\u0026#34;:\\\u0026#34;\\\u0026#34;},\\\u0026#34;server_process\\\u0026#34;:{\\\u0026#34;process_id\\\u0026#34;:\\\u0026#34;\\\u0026#34;,\\\u0026#34;local\\\u0026#34;:false,\\\u0026#34;address\\\u0026#34;:\\\u0026#34;172.31.0.3:443\\\u0026#34;},\\\u0026#34;detect_point\\\u0026#34;:\\\u0026#34;client\\\u0026#34;,\\\u0026#34;component\\\u0026#34;:\\\u0026#34;http\\\u0026#34;,\\\u0026#34;ssl\\\u0026#34;:true}\u0026#34; } }, \u0026#34;service\u0026#34;:\u0026#34;test-service\u0026#34;, \u0026#34;serviceInstance\u0026#34;:\u0026#34;test-service-instance\u0026#34;, \u0026#34;timestamp\u0026#34;: 1666916962406, } ] Examples are as follows:\nfilter { json { } if (tag(\u0026#34;LOG_KIND\u0026#34;) == \u0026#34;NET_PROFILING_SAMPLED_TRACE\u0026#34;) { sampledTrace { latency parsed.latency as Long uri parsed.uri as String reason parsed.reason as String if (parsed.client_process.process_id as String != \u0026#34;\u0026#34;) { processId parsed.client_process.process_id as String } else if (parsed.client_process.local as Boolean) { processId ProcessRegistry.generateVirtualLocalProcess(parsed.service as String, parsed.serviceInstance as String) as String } else { processId ProcessRegistry.generateVirtualRemoteProcess(parsed.service as String, parsed.serviceInstance as String, parsed.client_process.address as String) as String } if (parsed.server_process.process_id as String != \u0026#34;\u0026#34;) { destProcessId parsed.server_process.process_id as String } else if (parsed.server_process.local as Boolean) { destProcessId ProcessRegistry.generateVirtualLocalProcess(parsed.service as String, parsed.serviceInstance as String) as String } else { destProcessId ProcessRegistry.generateVirtualRemoteProcess(parsed.service as String, parsed.serviceInstance as String, parsed.server_process.address as String) as String } detectPoint parsed.detect_point as String if (parsed.component as String == \u0026#34;http\u0026#34; \u0026amp;\u0026amp; parsed.ssl as Boolean) { componentId 129 } else if (parsed.component as String == \u0026#34;http\u0026#34;) { componentId 49 } else if (parsed.ssl as Boolean) { componentId 130 } else { componentId 110 } } } } Sink Sinks are the persistent layer of the LAL. By default, all the logs of each filter are persisted into the storage. However, some mechanisms allow you to selectively save some logs, or even drop all the logs after you\u0026rsquo;ve extracted useful information, such as metrics.\nSampler Sampler allows you to save the logs in a sampling manner. Currently, the following sampling strategies are supported:\n rateLimit: samples n logs at a maximum rate of 1 minute. rateLimit(\u0026quot;SamplerID\u0026quot;) requires an ID for the sampler. Sampler declarations with the same ID share the same sampler instance, thus sharing the same rpm and resetting logic. possibility: every piece of log has a pseudo possibility of percentage to be sampled, the possibility was generated by Java random number generator and compare to the given percentage option.  We welcome contributions on more sampling strategies. If multiple samplers are specified, the last one determines the final sampling result. See examples in Enforcer.\nExamples 1, rateLimit:\nfilter { // ... parser  sink { sampler { if (parsed.service == \u0026#34;ImportantApp\u0026#34;) { rateLimit(\u0026#34;ImportantAppSampler\u0026#34;) { rpm 1800 // samples 1800 pieces of logs every minute for service \u0026#34;ImportantApp\u0026#34;  } } else { rateLimit(\u0026#34;OtherSampler\u0026#34;) { rpm 180 // samples 180 pieces of logs every minute for other services than \u0026#34;ImportantApp\u0026#34;  } } } } } Examples 2, possibility:\nfilter { // ... parser  sink { sampler { if (parsed.service == \u0026#34;ImportantApp\u0026#34;) { possibility(80) { // samples 80% of the logs for service \u0026#34;ImportantApp\u0026#34;  } } else { possibility(30) { // samples 30% of the logs for other services than \u0026#34;ImportantApp\u0026#34;  } } } } } Dropper Dropper is a special sink, meaning that all logs are dropped without any exception. This is useful when you want to drop debugging logs.\nfilter { // ... parser  sink { if (parsed.level == \u0026#34;DEBUG\u0026#34;) { dropper {} } else { sampler { // ... configs  } } } } Or if you have multiple filters, some of which are for extracting metrics, only one of them has to be persisted.\nfilter { // filter A: this is for persistence  // ... parser  sink { sampler { // .. sampler configs  } } } filter { // filter B:  // ... extractors to generate many metrics  extractors { metrics { // ... metrics  } } sink { dropper {} // drop all logs because they have been saved in \u0026#34;filter A\u0026#34; above.  } } Enforcer Enforcer is another special sink that forcibly samples the log. A typical use case of enforcer is when you have configured a sampler and want to save some logs forcibly, such as to save error logs even if the sampling mechanism has been configured.\nfilter { // ... parser  sink { sampler { // ... sampler configs  } if (parsed.level == \u0026#34;ERROR\u0026#34; || parsed.userId == \u0026#34;TestingUserId\u0026#34;) { // sample error logs or testing users\u0026#39; logs (userId == \u0026#34;TestingUserId\u0026#34;) even if the sampling strategy is configured  enforcer { } } } } ","title":"Log Analysis Language","url":"/docs/main/v9.6.0/en/concepts-and-designs/lal/"},{"content":"Log Analysis Language Log Analysis Language (LAL) in SkyWalking is essentially a Domain-Specific Language (DSL) to analyze logs. You can use LAL to parse, extract, and save the logs, as well as collaborate the logs with traces (by extracting the trace ID, segment ID and span ID) and metrics (by generating metrics from the logs and sending them to the meter system).\nThe LAL config files are in YAML format, and are located under directory lal. You can set log-analyzer/default/lalFiles in the application.yml file or set environment variable SW_LOG_LAL_FILES to activate specific LAL config files.\nLayer Layer should be declared in the LAL script to represent the analysis scope of the logs.\nFilter A filter is a group of parser, extractor and sink. Users can use one or more filters to organize their processing logic. Every piece of log will be sent to all filters in an LAL rule. A piece of log sent to the filter is available as property log in the LAL, therefore you can access the log service name via log.service. For all available fields of log, please refer to the protocol definition.\nAll components are executed sequentially in the orders they are declared.\nGlobal Functions Globally available functions may be used them in all components (i.e. parsers, extractors, and sinks) where necessary.\n abort  By default, all components declared are executed no matter what flags (dropped, saved, etc.) have been set. There are cases where you may want the filter chain to stop earlier when specified conditions are met. abort function aborts the remaining filter chain from where it\u0026rsquo;s declared, and all the remaining components won\u0026rsquo;t be executed at all. abort function serves as a fast-fail mechanism in LAL.\nfilter { if (log.service == \u0026#34;TestingService\u0026#34;) { // Don\u0026#39;t waste resources on TestingServices  abort {} // all remaining components won\u0026#39;t be executed at all  } // ... parsers, extractors, sinks } Note that when you put regexp in an if statement, you need to surround the expression with () like regexp(\u0026lt;the expression\u0026gt;), instead of regexp \u0026lt;the expression\u0026gt;.\n tag  tag function provide a convenient way to get the value of a tag key.\nWe can add tags like following:\n[ { \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;TEST_KEY\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;TEST_VALUE\u0026#34; } ] }, \u0026#34;body\u0026#34;:{ ... } ... } ] And we can use this method to get the value of the tag key TEST_KEY.\nfilter { if (tag(\u0026#34;TEST_KEY\u0026#34;) == \u0026#34;TEST_VALUE\u0026#34;) { ... } } Parser Parsers are responsible for parsing the raw logs into structured data in SkyWalking for further processing. There are 3 types of parsers at the moment, namely json, yaml, and text.\nWhen a piece of log is parsed, there is a corresponding property available, called parsed, injected by LAL. Property parsed is typically a map, containing all the fields parsed from the raw logs. For example, if the parser is json / yaml, parsed is a map containing all the key-values in the json / yaml; if the parser is text , parsed is a map containing all the captured groups and their values (for regexp and grok).\nAll parsers share the following options:\n   Option Type Description Default Value     abortOnFailure boolean Whether the filter chain should abort if the parser failed to parse / match the logs true    See examples below.\njson filter { json { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  } } yaml filter { yaml { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  } } text For unstructured logs, there are some text parsers for use.\n regexp  regexp parser uses a regular expression (regexp) to parse the logs. It leverages the captured groups of the regexp, all the captured groups can be used later in the extractors or sinks. regexp returns a boolean indicating whether the log matches the pattern or not.\nfilter { text { abortOnFailure true // this is optional because it\u0026#39;s default behaviour  // this is just a demo pattern  regexp \u0026#34;(?\u0026lt;timestamp\u0026gt;\\\\d{8}) (?\u0026lt;thread\u0026gt;\\\\w+) (?\u0026lt;level\u0026gt;\\\\w+) (?\u0026lt;traceId\u0026gt;\\\\w+) (?\u0026lt;msg\u0026gt;.+)\u0026#34; } extractor { tag level: parsed.level // we add a tag called `level` and its value is parsed.level, captured from the regexp above  traceId parsed.traceId // we also extract the trace id from the parsed result, which will be used to associate the log with the trace  } // ... }  grok (TODO)  We\u0026rsquo;re aware of certain performance issues in the grok Java library, and so we\u0026rsquo;re currently conducting investigations and benchmarking. Contributions are welcome.\nExtractor Extractors aim to extract metadata from the logs. The metadata can be a service name, a service instance name, an endpoint name, or even a trace ID, all of which can be associated with the existing traces and metrics.\n service  service extracts the service name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n instance  instance extracts the service instance name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n endpoint  endpoint extracts the service instance name from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n traceId  traceId extracts the trace ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n segmentId  segmentId extracts the segment ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n spanId  spanId extracts the span ID from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\n timestamp  timestamp extracts the timestamp from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with traces / metrics.\nThe parameter of timestamp can be a millisecond:\nfilter { // ... parser  extractor { timestamp parsed.time as String } } or a datetime string with a specified pattern:\nfilter { // ... parser  extractor { timestamp parsed.time as String, \u0026#34;yyyy-MM-dd HH:mm:ss\u0026#34; } }  layer  layer extracts the layer from the parsed result, and set it into the LogData, which will be persisted (if not dropped) and is used to associate with service.\n tag  tag extracts the tags from the parsed result, and set them into the LogData. The form of this extractor should look something like this: tag key1: value, key2: value2. You may use the properties of parsed as both keys and values.\nimport javax.swing.text.LayeredHighlighter filter { // ... parser  extractor { tag level: parsed.level, (parsed.statusCode): parsed.statusMsg tag anotherKey: \u0026#34;anotherConstantValue\u0026#34; layer \u0026#39;GENERAL\u0026#39; } }  metrics  metrics extracts / generates metrics from the logs, and sends the generated metrics to the meter system. You may configure MAL for further analysis of these metrics. The dedicated MAL config files are under directory log-mal-rules, and you can set log-analyzer/default/malFiles to enable configured files.\n# application.yml# ...log-analyzer:selector:${SW_LOG_ANALYZER:default}default:lalFiles:${SW_LOG_LAL_FILES:my-lal-config}# files are under \u0026#34;lal\u0026#34; directorymalFiles:${SW_LOG_MAL_FILES:my-lal-mal-config, folder1/another-lal-mal-config, folder2/*}# files are under \u0026#34;log-mal-rules\u0026#34; directoryExamples are as follows:\nfilter { // ...  extractor { service parsed.serviceName metrics { name \u0026#34;log_count\u0026#34; timestamp parsed.timestamp labels level: parsed.level, service: parsed.service, instance: parsed.instance value 1 } metrics { name \u0026#34;http_response_time\u0026#34; timestamp parsed.timestamp labels status_code: parsed.statusCode, service: parsed.service, instance: parsed.instance value parsed.duration } } // ... } The extractor above generates a metrics named log_count, with tag key level and value 1. After that, you can configure MAL rules to calculate the log count grouping by logging level like this:\n# ... other configurations of MALmetrics:- name:log_count_debugexp:log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;DEBUG\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT1M\u0026#39;)- name:log_count_errorexp:log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;ERROR\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT1M\u0026#39;)The other metrics generated is http_response_time, so you can configure MAL rules to generate more useful metrics like percentiles.\n# ... other configurations of MALmetrics:- name:response_time_percentileexp:http_response_time.sum([\u0026#39;le\u0026#39;, \u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).increase(\u0026#39;PT5M\u0026#39;).histogram().histogram_percentile([50,70,90,99]) slowSql  slowSql aims to convert LogData to DatabaseSlowStatement. It extracts data from parsed result and save them as DatabaseSlowStatement. SlowSql will not abort or edit logs, you can use other LAL for further processing. SlowSql will reuse service, layer and timestamp of extractor, so it is necessary to use SlowSQL after setting these. We require a log tag \u0026quot;LOG_KIND\u0026quot; = \u0026quot;SLOW_SQL\u0026quot; to make OAP distinguish slow SQL logs from other log reports.\nNote, slow SQL sampling would only flag this SQL in the candidate list. The OAP server would run statistic per service and only persistent the top 50 every 10(controlled by topNReportPeriod: ${SW_CORE_TOPN_REPORT_PERIOD:10}) minutes by default.\nAn example of JSON sent to OAP is as following:\n[ { \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;LOG_KIND\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;SLOW_SQL\u0026#34; } ] }, \u0026#34;layer\u0026#34;:\u0026#34;MYSQL\u0026#34;, \u0026#34;body\u0026#34;:{ \u0026#34;json\u0026#34;:{ \u0026#34;json\u0026#34;:\u0026#34;{\\\u0026#34;time\\\u0026#34;:\\\u0026#34;1663063011\\\u0026#34;,\\\u0026#34;id\\\u0026#34;:\\\u0026#34;cb92c1a5b-2691e-fb2f-457a-9c72a392d9ed\\\u0026#34;,\\\u0026#34;service\\\u0026#34;:\\\u0026#34;root[root]@[localhost]\\\u0026#34;,\\\u0026#34;statement\\\u0026#34;:\\\u0026#34;select sleep(2);\\\u0026#34;,\\\u0026#34;layer\\\u0026#34;:\\\u0026#34;MYSQL\\\u0026#34;,\\\u0026#34;query_time\\\u0026#34;:2000}\u0026#34; } }, \u0026#34;service\u0026#34;:\u0026#34;root[root]@[localhost]\u0026#34; } ]  statement  statement extracts the SQL statement from the parsed result, and set it into the DatabaseSlowStatement, which will be persisted (if not dropped) and is used to associate with TopNDatabaseStatement.\n latency  latency extracts the latency from the parsed result, and set it into the DatabaseSlowStatement, which will be persisted (if not dropped) and is used to associate with TopNDatabaseStatement.\n id  id extracts the id from the parsed result, and set it into the DatabaseSlowStatement, which will be persisted (if not dropped) and is used to associate with TopNDatabaseStatement.\nA Example of LAL to distinguish slow logs:\nfilter { json{ } extractor{ layer parsed.layer as String service parsed.service as String timestamp parsed.time as String if (tag(\u0026#34;LOG_KIND\u0026#34;) == \u0026#34;SLOW_SQL\u0026#34;) { slowSql { id parsed.id as String statement parsed.statement as String latency parsed.query_time as Long } } } }  sampledTrace  sampledTrace aims to convert LogData to SampledTrace Records. It extracts data from parsed result and save them as SampledTraceRecord. SampledTrace will not abort or edit logs, you can use other LAL for further processing. We require a log tag \u0026quot;LOG_KIND\u0026quot; = \u0026quot;NET_PROFILING_SAMPLED_TRACE\u0026quot; to make OAP distinguish slow trace logs from other log reports. An example of JSON sent to OAP is as following:\n[ { \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;LOG_KIND\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;NET_PROFILING_SAMPLED_TRACE\u0026#34; } ] }, \u0026#34;layer\u0026#34;:\u0026#34;MESH\u0026#34;, \u0026#34;body\u0026#34;:{ \u0026#34;json\u0026#34;:{ \u0026#34;json\u0026#34;:\u0026#34;{\\\u0026#34;uri\\\u0026#34;:\\\u0026#34;/provider\\\u0026#34;,\\\u0026#34;reason\\\u0026#34;:\\\u0026#34;slow\\\u0026#34;,\\\u0026#34;latency\\\u0026#34;:2048,\\\u0026#34;client_process\\\u0026#34;:{\\\u0026#34;process_id\\\u0026#34;:\\\u0026#34;c1519f4555ec11eda8df0242ac1d0002\\\u0026#34;,\\\u0026#34;local\\\u0026#34;:false,\\\u0026#34;address\\\u0026#34;:\\\u0026#34;\\\u0026#34;},\\\u0026#34;server_process\\\u0026#34;:{\\\u0026#34;process_id\\\u0026#34;:\\\u0026#34;\\\u0026#34;,\\\u0026#34;local\\\u0026#34;:false,\\\u0026#34;address\\\u0026#34;:\\\u0026#34;172.31.0.3:443\\\u0026#34;},\\\u0026#34;detect_point\\\u0026#34;:\\\u0026#34;client\\\u0026#34;,\\\u0026#34;component\\\u0026#34;:\\\u0026#34;http\\\u0026#34;,\\\u0026#34;ssl\\\u0026#34;:true}\u0026#34; } }, \u0026#34;service\u0026#34;:\u0026#34;test-service\u0026#34;, \u0026#34;serviceInstance\u0026#34;:\u0026#34;test-service-instance\u0026#34;, \u0026#34;timestamp\u0026#34;: 1666916962406, } ] Examples are as follows:\nfilter { json { } if (tag(\u0026#34;LOG_KIND\u0026#34;) == \u0026#34;NET_PROFILING_SAMPLED_TRACE\u0026#34;) { sampledTrace { latency parsed.latency as Long uri parsed.uri as String reason parsed.reason as String if (parsed.client_process.process_id as String != \u0026#34;\u0026#34;) { processId parsed.client_process.process_id as String } else if (parsed.client_process.local as Boolean) { processId ProcessRegistry.generateVirtualLocalProcess(parsed.service as String, parsed.serviceInstance as String) as String } else { processId ProcessRegistry.generateVirtualRemoteProcess(parsed.service as String, parsed.serviceInstance as String, parsed.client_process.address as String) as String } if (parsed.server_process.process_id as String != \u0026#34;\u0026#34;) { destProcessId parsed.server_process.process_id as String } else if (parsed.server_process.local as Boolean) { destProcessId ProcessRegistry.generateVirtualLocalProcess(parsed.service as String, parsed.serviceInstance as String) as String } else { destProcessId ProcessRegistry.generateVirtualRemoteProcess(parsed.service as String, parsed.serviceInstance as String, parsed.server_process.address as String) as String } detectPoint parsed.detect_point as String if (parsed.component as String == \u0026#34;http\u0026#34; \u0026amp;\u0026amp; parsed.ssl as Boolean) { componentId 129 } else if (parsed.component as String == \u0026#34;http\u0026#34;) { componentId 49 } else if (parsed.ssl as Boolean) { componentId 130 } else { componentId 110 } } } } Sink Sinks are the persistent layer of the LAL. By default, all the logs of each filter are persisted into the storage. However, some mechanisms allow you to selectively save some logs, or even drop all the logs after you\u0026rsquo;ve extracted useful information, such as metrics.\nSampler Sampler allows you to save the logs in a sampling manner. Currently, the following sampling strategies are supported:\n rateLimit: samples n logs at a maximum rate of 1 minute. rateLimit(\u0026quot;SamplerID\u0026quot;) requires an ID for the sampler. Sampler declarations with the same ID share the same sampler instance, thus sharing the same rpm and resetting logic. possibility: every piece of log has a pseudo possibility of percentage to be sampled, the possibility was generated by Java random number generator and compare to the given percentage option.  We welcome contributions on more sampling strategies. If multiple samplers are specified, the last one determines the final sampling result. See examples in Enforcer.\nExamples 1, rateLimit:\nfilter { // ... parser  sink { sampler { if (parsed.service == \u0026#34;ImportantApp\u0026#34;) { rateLimit(\u0026#34;ImportantAppSampler\u0026#34;) { rpm 1800 // samples 1800 pieces of logs every minute for service \u0026#34;ImportantApp\u0026#34;  } } else { rateLimit(\u0026#34;OtherSampler\u0026#34;) { rpm 180 // samples 180 pieces of logs every minute for other services than \u0026#34;ImportantApp\u0026#34;  } } } } } Examples 2, possibility:\nfilter { // ... parser  sink { sampler { if (parsed.service == \u0026#34;ImportantApp\u0026#34;) { possibility(80) { // samples 80% of the logs for service \u0026#34;ImportantApp\u0026#34;  } } else { possibility(30) { // samples 30% of the logs for other services than \u0026#34;ImportantApp\u0026#34;  } } } } } Dropper Dropper is a special sink, meaning that all logs are dropped without any exception. This is useful when you want to drop debugging logs.\nfilter { // ... parser  sink { if (parsed.level == \u0026#34;DEBUG\u0026#34;) { dropper {} } else { sampler { // ... configs  } } } } Or if you have multiple filters, some of which are for extracting metrics, only one of them has to be persisted.\nfilter { // filter A: this is for persistence  // ... parser  sink { sampler { // .. sampler configs  } } } filter { // filter B:  // ... extractors to generate many metrics  extractors { metrics { // ... metrics  } } sink { dropper {} // drop all logs because they have been saved in \u0026#34;filter A\u0026#34; above.  } } Enforcer Enforcer is another special sink that forcibly samples the log. A typical use case of enforcer is when you have configured a sampler and want to save some logs forcibly, such as to save error logs even if the sampling mechanism has been configured.\nfilter { // ... parser  sink { sampler { // ... sampler configs  } if (parsed.level == \u0026#34;ERROR\u0026#34; || parsed.userId == \u0026#34;TestingUserId\u0026#34;) { // sample error logs or testing users\u0026#39; logs (userId == \u0026#34;TestingUserId\u0026#34;) even if the sampling strategy is configured  enforcer { } } } } ","title":"Log Analysis Language","url":"/docs/main/v9.7.0/en/concepts-and-designs/lal/"},{"content":"Log Collection and Analysis Collection There are various ways to collect logs from applications.\nLog files collector You can use Filebeat, Fluentd and FluentBit to collect logs, and then transport the logs to SkyWalking OAP through Kafka or HTTP protocol, with the formats Kafka JSON or HTTP JSON array.\nFilebeat Filebeat supports using Kafka to transport logs. Open kafka-fetcher and enable configs enableNativeJsonLog.\nTake the following filebeat config yaml as an example to set up Filebeat:\n filebeat.yml  Fluentd Fluentd supports using Kafka to transport logs. Open kafka-fetcher and enable configs enableNativeJsonLog.\nTake the following fluentd config file as an example to set up Fluentd:\n fluentd.conf  Fluent-bit Fluent-bit sends logs to OAP directly through HTTP(rest port). Point the output address to restHost:restPort of receiver-sharing-server or core(if receiver-sharing-server is inactivated)\nTake the following fluent-bit config files as an example to set up Fluent-bit:\n fluent-bit.conf  OpenTelemetry You can use OpenTelemetry Collector to transport the logs to SkyWalking OAP. Read the doc on Skywalking Exporter for a detailed guide.\nJava agent\u0026rsquo;s toolkits Java agent provides toolkits for log4j, log4j2, and logback to report logs through gRPC with automatically injected trace context.\nSkyWalking Satellite sidecar is a recommended proxy/side that forwards logs (including the use of Kafka MQ to transport logs). When using this, open kafka-fetcher and enable configs enableNativeProtoLog.\nJava agent provides toolkits for log4j, log4j2, and logback to report logs through files with automatically injected trace context.\nLog framework config examples:\n log4j1.x fileAppender log4j2.x fileAppender logback fileAppender  Python agent log reporter SkyWalking Python Agent implements a log reporter for the logging module with functionalities aligning with the Java toolkits.\nTo explore how to enable the reporting features for your use cases, please refer to the Log Reporter Doc for a detailed guide.\nLog Analyzer Log analyzer of OAP server supports native log data. OAP could use Log Analysis Language to structure log content through parsing, extracting, and saving logs. The analyzer also uses Meter Analysis Language Engine for further metrics calculation.\nlog-analyzer:selector:${SW_LOG_ANALYZER:default}default:lalFiles:${SW_LOG_LAL_FILES:default}malFiles:${SW_LOG_MAL_FILES:\u0026#34;\u0026#34;}Read the doc on Log Analysis Language for more on log structuring and metrics analysis.\n","title":"Log Collection and Analysis","url":"/docs/main/v9.0.0/en/setup/backend/log-analyzer/"},{"content":"Log Collection and Analysis Collection There are various ways to collect logs from applications.\nLog files collector You can use Filebeat, Fluentd and FluentBit to collect logs, and then transport the logs to SkyWalking OAP through Kafka or HTTP protocol, with the formats Kafka JSON or HTTP JSON array.\nFilebeat Filebeat supports using Kafka to transport logs. Open kafka-fetcher and enable configs enableNativeJsonLog.\nTake the following Filebeat config YAML as an example to set up Filebeat:\n filebeat.yml  Fluentd Fluentd supports using Kafka to transport logs. Open kafka-fetcher and enable configs enableNativeJsonLog.\nTake the following fluentd config file as an example to set up Fluentd:\n fluentd.conf  Fluent-bit Fluent-bit sends logs to OAP directly through HTTP(rest port). Point the output address to restHost:restPort of receiver-sharing-server or core(if receiver-sharing-server is inactivated)\nTake the following fluent-bit config files as an example to set up Fluent-bit:\n fluent-bit.conf  OpenTelemetry You can use OpenTelemetry Collector to transport the logs to SkyWalking OAP. Read the doc on Skywalking Exporter for a detailed guide.\nJava agent\u0026rsquo;s toolkits Java agent provides toolkits for log4j, log4j2, and logback to report logs through gRPC with automatically injected trace context.\nSkyWalking Satellite sidecar is a recommended proxy/side that forwards logs (including the use of Kafka MQ to transport logs). When using this, open kafka-fetcher and enable configs enableNativeProtoLog.\nJava agent provides toolkits for log4j, log4j2, and logback to report logs through files with automatically injected trace context.\nLog framework config examples:\n log4j1.x fileAppender log4j2.x fileAppender logback fileAppender  Python agent log reporter SkyWalking Python Agent implements a log reporter for the logging module with functionalities aligning with the Java toolkits.\nTo explore how to enable the reporting features for your use cases, please refer to the Log Reporter Doc for a detailed guide.\nLog Analyzer Log analyzer of OAP server supports native log data. OAP could use Log Analysis Language to structure log content through parsing, extracting and saving logs. The analyzer also uses Meter Analysis Language Engine for further metrics calculation.\nlog-analyzer:selector:${SW_LOG_ANALYZER:default}default:lalFiles:${SW_LOG_LAL_FILES:default}malFiles:${SW_LOG_MAL_FILES:\u0026#34;\u0026#34;}Read the doc on Log Analysis Language for more on log structuring and metrics analysis.\n","title":"Log Collection and Analysis","url":"/docs/main/v9.1.0/en/setup/backend/log-analyzer/"},{"content":"Log Collection and Analysis Collection There are various ways to collect logs from applications.\nLog files collector You can use Filebeat, Fluentd and FluentBit to collect logs, and then transport the logs to SkyWalking OAP through Kafka or HTTP protocol, with the formats Kafka JSON or HTTP JSON array.\nFilebeat Filebeat supports using Kafka to transport logs. Open kafka-fetcher and enable configs enableNativeJsonLog.\nTake the following Filebeat config YAML as an example to set up Filebeat:\n filebeat.yml  Fluentd Fluentd supports using Kafka to transport logs. Open kafka-fetcher and enable configs enableNativeJsonLog.\nTake the following fluentd config file as an example to set up Fluentd:\n fluentd.conf  Fluent-bit Fluent-bit sends logs to OAP directly through HTTP(rest port). Point the output address to restHost:restPort of receiver-sharing-server or core(if receiver-sharing-server is inactivated)\nTake the following fluent-bit config files as an example to set up Fluent-bit:\n fluent-bit.conf  OpenTelemetry You can use OpenTelemetry Collector to transport the logs to SkyWalking OAP. Read the doc on Skywalking Exporter for a detailed guide.\nJava agent\u0026rsquo;s toolkits Java agent provides toolkits for log4j, log4j2, and logback to report logs through gRPC with automatically injected trace context.\nSkyWalking Satellite sidecar is a recommended proxy/side that forwards logs (including the use of Kafka MQ to transport logs). When using this, open kafka-fetcher and enable configs enableNativeProtoLog.\nJava agent provides toolkits for log4j, log4j2, and logback to report logs through files with automatically injected trace context.\nLog framework config examples:\n log4j1.x fileAppender log4j2.x fileAppender logback fileAppender  Python agent log reporter SkyWalking Python Agent implements a log reporter for the logging module with functionalities aligning with the Java toolkits.\nTo explore how to enable the reporting features for your use cases, please refer to the Log Reporter Doc for a detailed guide.\nLog Analyzer Log analyzer of OAP server supports native log data. OAP could use Log Analysis Language to structure log content through parsing, extracting and saving logs. The analyzer also uses Meter Analysis Language Engine for further metrics calculation.\nlog-analyzer:selector:${SW_LOG_ANALYZER:default}default:lalFiles:${SW_LOG_LAL_FILES:default}malFiles:${SW_LOG_MAL_FILES:\u0026#34;\u0026#34;}Read the doc on Log Analysis Language for more on log structuring and metrics analysis.\n","title":"Log Collection and Analysis","url":"/docs/main/v9.2.0/en/setup/backend/log-analyzer/"},{"content":"Log Collection and Analysis Collection There are various ways to collect logs from applications.\nLog files collector You can use Filebeat, Fluentd and FluentBit to collect logs, and then transport the logs to SkyWalking OAP through Kafka or HTTP protocol, with the formats Kafka JSON or HTTP JSON array.\nFilebeat Filebeat supports using Kafka to transport logs. Open kafka-fetcher and enable configs enableNativeJsonLog.\nTake the following Filebeat config YAML as an example to set up Filebeat:\n filebeat.yml  Fluentd Fluentd supports using Kafka to transport logs. Open kafka-fetcher and enable configs enableNativeJsonLog.\nTake the following fluentd config file as an example to set up Fluentd:\n fluentd.conf  Fluent-bit Fluent-bit sends logs to OAP directly through HTTP(rest port). Point the output address to restHost:restPort of receiver-sharing-server or core(if receiver-sharing-server is inactivated)\nTake the following fluent-bit config files as an example to set up Fluent-bit:\n fluent-bit.conf  OpenTelemetry You can use OpenTelemetry Collector to transport the logs to SkyWalking OAP. Read the doc on Skywalking Exporter for a detailed guide.\nJava agent\u0026rsquo;s toolkits Java agent provides toolkits for log4j, log4j2, and logback to report logs through gRPC with automatically injected trace context.\nSkyWalking Satellite sidecar is a recommended proxy/side that forwards logs (including the use of Kafka MQ to transport logs). When using this, open kafka-fetcher and enable configs enableNativeProtoLog.\nJava agent provides toolkits for log4j, log4j2, and logback to report logs through files with automatically injected trace context.\nLog framework config examples:\n log4j1.x fileAppender log4j2.x fileAppender logback fileAppender  Python agent log reporter SkyWalking Python Agent implements a log reporter for the logging module with functionalities aligning with the Java toolkits.\nTo explore how to enable the reporting features for your use cases, please refer to the Log Reporter Doc for a detailed guide.\nLog Analyzer Log analyzer of OAP server supports native log data. OAP could use Log Analysis Language to structure log content through parsing, extracting and saving logs. The analyzer also uses Meter Analysis Language Engine for further metrics calculation.\nlog-analyzer:selector:${SW_LOG_ANALYZER:default}default:lalFiles:${SW_LOG_LAL_FILES:default}malFiles:${SW_LOG_MAL_FILES:\u0026#34;\u0026#34;}Read the doc on Log Analysis Language for more on log structuring and metrics analysis.\n","title":"Log Collection and Analysis","url":"/docs/main/v9.3.0/en/setup/backend/log-analyzer/"},{"content":"Log Collection and Analysis Collection There are various ways to collect logs from applications.\nLog files collector You can use Filebeat, Fluentd and FluentBit to collect logs, and then transport the logs to SkyWalking OAP through Kafka or HTTP protocol, with the formats Kafka JSON or HTTP JSON array.\nFilebeat Filebeat supports using Kafka to transport logs. Open kafka-fetcher and enable configs enableNativeJsonLog.\nTake the following Filebeat config YAML as an example to set up Filebeat:\n filebeat.yml  Fluentd Fluentd supports using Kafka to transport logs. Open kafka-fetcher and enable configs enableNativeJsonLog.\nTake the following fluentd config file as an example to set up Fluentd:\n fluentd.conf  Fluent-bit Fluent-bit sends logs to OAP directly through HTTP(rest port). Point the output address to restHost:restPort of receiver-sharing-server or core(if receiver-sharing-server is inactivated)\nTake the following fluent-bit config files as an example to set up Fluent-bit:\n fluent-bit.conf  OpenTelemetry You can use OpenTelemetry Collector to transport the logs to SkyWalking OAP. Read the doc on Skywalking Exporter for a detailed guide.\nJava agent\u0026rsquo;s toolkits Java agent provides toolkits for log4j, log4j2, and logback to report logs through gRPC with automatically injected trace context.\nSkyWalking Satellite sidecar is a recommended proxy/side that forwards logs (including the use of Kafka MQ to transport logs). When using this, open kafka-fetcher and enable configs enableNativeProtoLog.\nJava agent provides toolkits for log4j, log4j2, and logback to report logs through files with automatically injected trace context.\nLog framework config examples:\n log4j1.x fileAppender log4j2.x fileAppender logback fileAppender  Python agent log reporter SkyWalking Python Agent implements a log reporter for the logging module with functionalities aligning with the Java toolkits.\nTo explore how to enable the reporting features for your use cases, please refer to the Log Reporter Doc for a detailed guide.\nLog Analyzer Log analyzer of OAP server supports native log data. OAP could use Log Analysis Language to structure log content through parsing, extracting and saving logs. The analyzer also uses Meter Analysis Language Engine for further metrics calculation.\nlog-analyzer:selector:${SW_LOG_ANALYZER:default}default:lalFiles:${SW_LOG_LAL_FILES:default}malFiles:${SW_LOG_MAL_FILES:\u0026#34;\u0026#34;}Read the doc on Log Analysis Language for more on log structuring and metrics analysis.\n","title":"Log Collection and Analysis","url":"/docs/main/v9.4.0/en/setup/backend/log-analyzer/"},{"content":"Log Data Protocol Report log data via protocol.\nNative Proto Protocol Report native-proto format log via gRPC.\ngRPC service define\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.logging.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/logging/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Report collected logs into the OAP backend service LogReportService { // Recommend to report log data in a stream mode.  // The service/instance/endpoint of the log could share the previous value if they are not set.  // Reporting the logs of same service in the batch mode could reduce the network cost.  rpc collect (stream LogData) returns (Commands) { }}// Log data is collected through file scratcher of agent. // Natively, Satellite provides various ways to collect logs. message LogData { // [Optional] The timestamp of the log, in millisecond.  // If not set, OAP server would use the received timestamp as log\u0026#39;s timestamp, or relies on the OAP server analyzer.  int64 timestamp = 1; // [Required] **Service**. Represents a set/group of workloads which provide the same behaviours for incoming requests.  //  // The logic name represents the service. This would show as a separate node in the topology.  // The metrics analyzed from the spans, would be aggregated for this entity as the service level.  //  // If this is not the first element of the streaming, use the previous not-null name as the service name.  string service = 2; // [Optional] **Service Instance**. Each individual workload in the Service group is known as an instance. Like `pods` in Kubernetes, it  // doesn\u0026#39;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process.  //  // The logic name represents the service instance. This would show as a separate node in the instance relationship.  // The metrics analyzed from the spans, would be aggregated for this entity as the service instance level.  string serviceInstance = 3; // [Optional] **Endpoint**. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature.  //  // The logic name represents the endpoint, which logs belong.  string endpoint = 4; // [Required] The content of the log.  LogDataBody body = 5; // [Optional] Logs with trace context  TraceContext traceContext = 6; // [Optional] The available tags. OAP server could provide search/analysis capabilities based on these.  LogTags tags = 7; // [Optional] Since 9.0.0  // The layer of the service and servce instance. If absent, the OAP would set `layer`=`ID: 2, NAME: general`  string layer = 8;}// The content of the log data message LogDataBody { // A type to match analyzer(s) at the OAP server.  // The data could be analyzed at the client side, but could be partial  string type = 1; // Content with extendable format.  oneof content { TextLog text = 2; JSONLog json = 3; YAMLLog yaml = 4; }}// Literal text log, typically requires regex or split mechanism to filter meaningful info. message TextLog { string text = 1;}// JSON formatted log. The json field represents the string that could be formatted as a JSON object. message JSONLog { string json = 1;}// YAML formatted log. The yaml field represents the string that could be formatted as a YAML map. message YAMLLog { string yaml = 1;}// Logs with trace context, represent agent system has injects context(IDs) into log text. message TraceContext { // [Optional] A string id represents the whole trace.  string traceId = 1; // [Optional] A unique id represents this segment. Other segments could use this id to reference as a child segment.  string traceSegmentId = 2; // [Optional] The number id of the span. Should be unique in the whole segment.  // Starting at 0.  int32 spanId = 3;}message LogTags { // String key, String value pair.  repeated KeyStringValuePair data = 1;}Native Kafka Protocol Report native-json format log via kafka.\nJson log record example:\n{ \u0026#34;timestamp\u0026#34;:1618161813371, \u0026#34;service\u0026#34;:\u0026#34;Your_ApplicationName\u0026#34;, \u0026#34;serviceInstance\u0026#34;:\u0026#34;3a5b8da5a5ba40c0b192e91b5c80f1a8@192.168.1.8\u0026#34;, \u0026#34;layer\u0026#34;:\u0026#34;GENERAL\u0026#34;, \u0026#34;traceContext\u0026#34;:{ \u0026#34;traceId\u0026#34;:\u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470001\u0026#34;, \u0026#34;spanId\u0026#34;:\u0026#34;0\u0026#34;, \u0026#34;traceSegmentId\u0026#34;:\u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470000\u0026#34; }, \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;INFO\u0026#34; }, { \u0026#34;key\u0026#34;:\u0026#34;logger\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;com.example.MyLogger\u0026#34; } ] }, \u0026#34;body\u0026#34;:{ \u0026#34;text\u0026#34;:{ \u0026#34;text\u0026#34;:\u0026#34;log message\u0026#34; } } } HTTP API Report json format logs via HTTP API, the endpoint is http://\u0026lt;oap-address\u0026gt;:12800/v3/logs.\nJson log record example:\n[ { \u0026#34;timestamp\u0026#34;: 1618161813371, \u0026#34;service\u0026#34;: \u0026#34;Your_ApplicationName\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;3a5b8da5a5ba40c0b192e91b5c80f1a8@192.168.1.8\u0026#34;, \u0026#34;layer\u0026#34;:\u0026#34;GENERAL\u0026#34;, \u0026#34;traceContext\u0026#34;: { \u0026#34;traceId\u0026#34;: \u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470001\u0026#34;, \u0026#34;spanId\u0026#34;: \u0026#34;0\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470000\u0026#34; }, \u0026#34;tags\u0026#34;: { \u0026#34;data\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;INFO\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;logger\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;com.example.MyLogger\u0026#34; } ] }, \u0026#34;body\u0026#34;: { \u0026#34;text\u0026#34;: { \u0026#34;text\u0026#34;: \u0026#34;log message\u0026#34; } } } ] ","title":"Log Data Protocol","url":"/docs/main/latest/en/api/log-data-protocol/"},{"content":"Log Data Protocol Report log data via protocol.\nNative Proto Protocol Report native-proto format log via gRPC.\ngRPC service define\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.logging.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/logging/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Report collected logs into the OAP backend service LogReportService { // Recommend to report log data in a stream mode.  // The service/instance/endpoint of the log could share the previous value if they are not set.  // Reporting the logs of same service in the batch mode could reduce the network cost.  rpc collect (stream LogData) returns (Commands) { }}// Log data is collected through file scratcher of agent. // Natively, Satellite provides various ways to collect logs. message LogData { // [Optional] The timestamp of the log, in millisecond.  // If not set, OAP server would use the received timestamp as log\u0026#39;s timestamp, or relies on the OAP server analyzer.  int64 timestamp = 1; // [Required] **Service**. Represents a set/group of workloads which provide the same behaviours for incoming requests.  //  // The logic name represents the service. This would show as a separate node in the topology.  // The metrics analyzed from the spans, would be aggregated for this entity as the service level.  //  // If this is not the first element of the streaming, use the previous not-null name as the service name.  string service = 2; // [Optional] **Service Instance**. Each individual workload in the Service group is known as an instance. Like `pods` in Kubernetes, it  // doesn\u0026#39;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process.  //  // The logic name represents the service instance. This would show as a separate node in the instance relationship.  // The metrics analyzed from the spans, would be aggregated for this entity as the service instance level.  string serviceInstance = 3; // [Optional] **Endpoint**. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature.  //  // The logic name represents the endpoint, which logs belong.  string endpoint = 4; // [Required] The content of the log.  LogDataBody body = 5; // [Optional] Logs with trace context  TraceContext traceContext = 6; // [Optional] The available tags. OAP server could provide search/analysis capabilities based on these.  LogTags tags = 7; // [Optional] Since 9.0.0  // The layer of the service and servce instance. If absent, the OAP would set `layer`=`ID: 2, NAME: general`  string layer = 8;}// The content of the log data message LogDataBody { // A type to match analyzer(s) at the OAP server.  // The data could be analyzed at the client side, but could be partial  string type = 1; // Content with extendable format.  oneof content { TextLog text = 2; JSONLog json = 3; YAMLLog yaml = 4; }}// Literal text log, typically requires regex or split mechanism to filter meaningful info. message TextLog { string text = 1;}// JSON formatted log. The json field represents the string that could be formatted as a JSON object. message JSONLog { string json = 1;}// YAML formatted log. The yaml field represents the string that could be formatted as a YAML map. message YAMLLog { string yaml = 1;}// Logs with trace context, represent agent system has injects context(IDs) into log text. message TraceContext { // [Optional] A string id represents the whole trace.  string traceId = 1; // [Optional] A unique id represents this segment. Other segments could use this id to reference as a child segment.  string traceSegmentId = 2; // [Optional] The number id of the span. Should be unique in the whole segment.  // Starting at 0.  int32 spanId = 3;}message LogTags { // String key, String value pair.  repeated KeyStringValuePair data = 1;}Native Kafka Protocol Report native-json format log via kafka.\nJson log record example:\n{ \u0026#34;timestamp\u0026#34;:1618161813371, \u0026#34;service\u0026#34;:\u0026#34;Your_ApplicationName\u0026#34;, \u0026#34;serviceInstance\u0026#34;:\u0026#34;3a5b8da5a5ba40c0b192e91b5c80f1a8@192.168.1.8\u0026#34;, \u0026#34;layer\u0026#34;:\u0026#34;GENERAL\u0026#34;, \u0026#34;traceContext\u0026#34;:{ \u0026#34;traceId\u0026#34;:\u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470001\u0026#34;, \u0026#34;spanId\u0026#34;:\u0026#34;0\u0026#34;, \u0026#34;traceSegmentId\u0026#34;:\u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470000\u0026#34; }, \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;INFO\u0026#34; }, { \u0026#34;key\u0026#34;:\u0026#34;logger\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;com.example.MyLogger\u0026#34; } ] }, \u0026#34;body\u0026#34;:{ \u0026#34;text\u0026#34;:{ \u0026#34;text\u0026#34;:\u0026#34;log message\u0026#34; } } } HTTP API Report json format logs via HTTP API, the endpoint is http://\u0026lt;oap-address\u0026gt;:12800/v3/logs.\nJson log record example:\n[ { \u0026#34;timestamp\u0026#34;: 1618161813371, \u0026#34;service\u0026#34;: \u0026#34;Your_ApplicationName\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;3a5b8da5a5ba40c0b192e91b5c80f1a8@192.168.1.8\u0026#34;, \u0026#34;layer\u0026#34;:\u0026#34;GENERAL\u0026#34;, \u0026#34;traceContext\u0026#34;: { \u0026#34;traceId\u0026#34;: \u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470001\u0026#34;, \u0026#34;spanId\u0026#34;: \u0026#34;0\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470000\u0026#34; }, \u0026#34;tags\u0026#34;: { \u0026#34;data\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;INFO\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;logger\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;com.example.MyLogger\u0026#34; } ] }, \u0026#34;body\u0026#34;: { \u0026#34;text\u0026#34;: { \u0026#34;text\u0026#34;: \u0026#34;log message\u0026#34; } } } ] ","title":"Log Data Protocol","url":"/docs/main/next/en/api/log-data-protocol/"},{"content":"Log Data Protocol Report log data via protocol.\nNative Proto Protocol Report native-proto format log via gRPC.\ngRPC service define\nNative Kafka Protocol Report native-json format log via kafka.\nJson log record example:\n{ \u0026#34;timestamp\u0026#34;:1618161813371, \u0026#34;service\u0026#34;:\u0026#34;Your_ApplicationName\u0026#34;, \u0026#34;serviceInstance\u0026#34;:\u0026#34;3a5b8da5a5ba40c0b192e91b5c80f1a8@192.168.1.8\u0026#34;, \u0026#34;layer\u0026#34;:\u0026#34;GENERAL\u0026#34;, \u0026#34;traceContext\u0026#34;:{ \u0026#34;traceId\u0026#34;:\u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470001\u0026#34;, \u0026#34;spanId\u0026#34;:\u0026#34;0\u0026#34;, \u0026#34;traceSegmentId\u0026#34;:\u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470000\u0026#34; }, \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;INFO\u0026#34; }, { \u0026#34;key\u0026#34;:\u0026#34;logger\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;com.example.MyLogger\u0026#34; } ] }, \u0026#34;body\u0026#34;:{ \u0026#34;text\u0026#34;:{ \u0026#34;text\u0026#34;:\u0026#34;log message\u0026#34; } } } HTTP API Report json format logs via HTTP API, the endpoint is http://\u0026lt;oap-address\u0026gt;:12800/v3/logs.\nJson log record example:\n[ { \u0026#34;timestamp\u0026#34;: 1618161813371, \u0026#34;service\u0026#34;: \u0026#34;Your_ApplicationName\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;3a5b8da5a5ba40c0b192e91b5c80f1a8@192.168.1.8\u0026#34;, \u0026#34;layer\u0026#34;:\u0026#34;GENERAL\u0026#34;, \u0026#34;traceContext\u0026#34;: { \u0026#34;traceId\u0026#34;: \u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470001\u0026#34;, \u0026#34;spanId\u0026#34;: \u0026#34;0\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470000\u0026#34; }, \u0026#34;tags\u0026#34;: { \u0026#34;data\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;INFO\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;logger\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;com.example.MyLogger\u0026#34; } ] }, \u0026#34;body\u0026#34;: { \u0026#34;text\u0026#34;: { \u0026#34;text\u0026#34;: \u0026#34;log message\u0026#34; } } } ] ","title":"Log Data Protocol","url":"/docs/main/v9.0.0/en/protocols/log-data-protocol/"},{"content":"Log Data Protocol Report log data via protocol.\nNative Proto Protocol Report native-proto format log via gRPC.\ngRPC service define\nNative Kafka Protocol Report native-json format log via kafka.\nJson log record example:\n{ \u0026#34;timestamp\u0026#34;:1618161813371, \u0026#34;service\u0026#34;:\u0026#34;Your_ApplicationName\u0026#34;, \u0026#34;serviceInstance\u0026#34;:\u0026#34;3a5b8da5a5ba40c0b192e91b5c80f1a8@192.168.1.8\u0026#34;, \u0026#34;layer\u0026#34;:\u0026#34;GENERAL\u0026#34;, \u0026#34;traceContext\u0026#34;:{ \u0026#34;traceId\u0026#34;:\u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470001\u0026#34;, \u0026#34;spanId\u0026#34;:\u0026#34;0\u0026#34;, \u0026#34;traceSegmentId\u0026#34;:\u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470000\u0026#34; }, \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;INFO\u0026#34; }, { \u0026#34;key\u0026#34;:\u0026#34;logger\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;com.example.MyLogger\u0026#34; } ] }, \u0026#34;body\u0026#34;:{ \u0026#34;text\u0026#34;:{ \u0026#34;text\u0026#34;:\u0026#34;log message\u0026#34; } } } HTTP API Report json format logs via HTTP API, the endpoint is http://\u0026lt;oap-address\u0026gt;:12800/v3/logs.\nJson log record example:\n[ { \u0026#34;timestamp\u0026#34;: 1618161813371, \u0026#34;service\u0026#34;: \u0026#34;Your_ApplicationName\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;3a5b8da5a5ba40c0b192e91b5c80f1a8@192.168.1.8\u0026#34;, \u0026#34;layer\u0026#34;:\u0026#34;GENERAL\u0026#34;, \u0026#34;traceContext\u0026#34;: { \u0026#34;traceId\u0026#34;: \u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470001\u0026#34;, \u0026#34;spanId\u0026#34;: \u0026#34;0\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470000\u0026#34; }, \u0026#34;tags\u0026#34;: { \u0026#34;data\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;INFO\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;logger\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;com.example.MyLogger\u0026#34; } ] }, \u0026#34;body\u0026#34;: { \u0026#34;text\u0026#34;: { \u0026#34;text\u0026#34;: \u0026#34;log message\u0026#34; } } } ] ","title":"Log Data Protocol","url":"/docs/main/v9.1.0/en/protocols/log-data-protocol/"},{"content":"Log Data Protocol Report log data via protocol.\nNative Proto Protocol Report native-proto format log via gRPC.\ngRPC service define\nNative Kafka Protocol Report native-json format log via kafka.\nJson log record example:\n{ \u0026#34;timestamp\u0026#34;:1618161813371, \u0026#34;service\u0026#34;:\u0026#34;Your_ApplicationName\u0026#34;, \u0026#34;serviceInstance\u0026#34;:\u0026#34;3a5b8da5a5ba40c0b192e91b5c80f1a8@192.168.1.8\u0026#34;, \u0026#34;layer\u0026#34;:\u0026#34;GENERAL\u0026#34;, \u0026#34;traceContext\u0026#34;:{ \u0026#34;traceId\u0026#34;:\u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470001\u0026#34;, \u0026#34;spanId\u0026#34;:\u0026#34;0\u0026#34;, \u0026#34;traceSegmentId\u0026#34;:\u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470000\u0026#34; }, \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;INFO\u0026#34; }, { \u0026#34;key\u0026#34;:\u0026#34;logger\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;com.example.MyLogger\u0026#34; } ] }, \u0026#34;body\u0026#34;:{ \u0026#34;text\u0026#34;:{ \u0026#34;text\u0026#34;:\u0026#34;log message\u0026#34; } } } HTTP API Report json format logs via HTTP API, the endpoint is http://\u0026lt;oap-address\u0026gt;:12800/v3/logs.\nJson log record example:\n[ { \u0026#34;timestamp\u0026#34;: 1618161813371, \u0026#34;service\u0026#34;: \u0026#34;Your_ApplicationName\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;3a5b8da5a5ba40c0b192e91b5c80f1a8@192.168.1.8\u0026#34;, \u0026#34;layer\u0026#34;:\u0026#34;GENERAL\u0026#34;, \u0026#34;traceContext\u0026#34;: { \u0026#34;traceId\u0026#34;: \u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470001\u0026#34;, \u0026#34;spanId\u0026#34;: \u0026#34;0\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470000\u0026#34; }, \u0026#34;tags\u0026#34;: { \u0026#34;data\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;INFO\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;logger\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;com.example.MyLogger\u0026#34; } ] }, \u0026#34;body\u0026#34;: { \u0026#34;text\u0026#34;: { \u0026#34;text\u0026#34;: \u0026#34;log message\u0026#34; } } } ] ","title":"Log Data Protocol","url":"/docs/main/v9.2.0/en/protocols/log-data-protocol/"},{"content":"Log Data Protocol Report log data via protocol.\nNative Proto Protocol Report native-proto format log via gRPC.\ngRPC service define\nNative Kafka Protocol Report native-json format log via kafka.\nJson log record example:\n{ \u0026#34;timestamp\u0026#34;:1618161813371, \u0026#34;service\u0026#34;:\u0026#34;Your_ApplicationName\u0026#34;, \u0026#34;serviceInstance\u0026#34;:\u0026#34;3a5b8da5a5ba40c0b192e91b5c80f1a8@192.168.1.8\u0026#34;, \u0026#34;layer\u0026#34;:\u0026#34;GENERAL\u0026#34;, \u0026#34;traceContext\u0026#34;:{ \u0026#34;traceId\u0026#34;:\u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470001\u0026#34;, \u0026#34;spanId\u0026#34;:\u0026#34;0\u0026#34;, \u0026#34;traceSegmentId\u0026#34;:\u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470000\u0026#34; }, \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;INFO\u0026#34; }, { \u0026#34;key\u0026#34;:\u0026#34;logger\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;com.example.MyLogger\u0026#34; } ] }, \u0026#34;body\u0026#34;:{ \u0026#34;text\u0026#34;:{ \u0026#34;text\u0026#34;:\u0026#34;log message\u0026#34; } } } HTTP API Report json format logs via HTTP API, the endpoint is http://\u0026lt;oap-address\u0026gt;:12800/v3/logs.\nJson log record example:\n[ { \u0026#34;timestamp\u0026#34;: 1618161813371, \u0026#34;service\u0026#34;: \u0026#34;Your_ApplicationName\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;3a5b8da5a5ba40c0b192e91b5c80f1a8@192.168.1.8\u0026#34;, \u0026#34;layer\u0026#34;:\u0026#34;GENERAL\u0026#34;, \u0026#34;traceContext\u0026#34;: { \u0026#34;traceId\u0026#34;: \u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470001\u0026#34;, \u0026#34;spanId\u0026#34;: \u0026#34;0\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470000\u0026#34; }, \u0026#34;tags\u0026#34;: { \u0026#34;data\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;INFO\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;logger\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;com.example.MyLogger\u0026#34; } ] }, \u0026#34;body\u0026#34;: { \u0026#34;text\u0026#34;: { \u0026#34;text\u0026#34;: \u0026#34;log message\u0026#34; } } } ] ","title":"Log Data Protocol","url":"/docs/main/v9.3.0/en/protocols/log-data-protocol/"},{"content":"Log Data Protocol Report log data via protocol.\nNative Proto Protocol Report native-proto format log via gRPC.\ngRPC service define\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.logging.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/logging/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Report collected logs into the OAP backend service LogReportService { // Recommend to report log data in a stream mode.  // The service/instance/endpoint of the log could share the previous value if they are not set.  // Reporting the logs of same service in the batch mode could reduce the network cost.  rpc collect (stream LogData) returns (Commands) { }}// Log data is collected through file scratcher of agent. // Natively, Satellite provides various ways to collect logs. message LogData { // [Optional] The timestamp of the log, in millisecond.  // If not set, OAP server would use the received timestamp as log\u0026#39;s timestamp, or relies on the OAP server analyzer.  int64 timestamp = 1; // [Required] **Service**. Represents a set/group of workloads which provide the same behaviours for incoming requests.  //  // The logic name represents the service. This would show as a separate node in the topology.  // The metrics analyzed from the spans, would be aggregated for this entity as the service level.  //  // If this is not the first element of the streaming, use the previous not-null name as the service name.  string service = 2; // [Optional] **Service Instance**. Each individual workload in the Service group is known as an instance. Like `pods` in Kubernetes, it  // doesn\u0026#39;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process.  //  // The logic name represents the service instance. This would show as a separate node in the instance relationship.  // The metrics analyzed from the spans, would be aggregated for this entity as the service instance level.  string serviceInstance = 3; // [Optional] **Endpoint**. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature.  //  // The logic name represents the endpoint, which logs belong.  string endpoint = 4; // [Required] The content of the log.  LogDataBody body = 5; // [Optional] Logs with trace context  TraceContext traceContext = 6; // [Optional] The available tags. OAP server could provide search/analysis capabilities based on these.  LogTags tags = 7; // [Optional] Since 9.0.0  // The layer of the service and servce instance. If absent, the OAP would set `layer`=`ID: 2, NAME: general`  string layer = 8;}// The content of the log data message LogDataBody { // A type to match analyzer(s) at the OAP server.  // The data could be analyzed at the client side, but could be partial  string type = 1; // Content with extendable format.  oneof content { TextLog text = 2; JSONLog json = 3; YAMLLog yaml = 4; }}// Literal text log, typically requires regex or split mechanism to filter meaningful info. message TextLog { string text = 1;}// JSON formatted log. The json field represents the string that could be formatted as a JSON object. message JSONLog { string json = 1;}// YAML formatted log. The yaml field represents the string that could be formatted as a YAML map. message YAMLLog { string yaml = 1;}// Logs with trace context, represent agent system has injects context(IDs) into log text. message TraceContext { // [Optional] A string id represents the whole trace.  string traceId = 1; // [Optional] A unique id represents this segment. Other segments could use this id to reference as a child segment.  string traceSegmentId = 2; // [Optional] The number id of the span. Should be unique in the whole segment.  // Starting at 0.  int32 spanId = 3;}message LogTags { // String key, String value pair.  repeated KeyStringValuePair data = 1;}Native Kafka Protocol Report native-json format log via kafka.\nJson log record example:\n{ \u0026#34;timestamp\u0026#34;:1618161813371, \u0026#34;service\u0026#34;:\u0026#34;Your_ApplicationName\u0026#34;, \u0026#34;serviceInstance\u0026#34;:\u0026#34;3a5b8da5a5ba40c0b192e91b5c80f1a8@192.168.1.8\u0026#34;, \u0026#34;layer\u0026#34;:\u0026#34;GENERAL\u0026#34;, \u0026#34;traceContext\u0026#34;:{ \u0026#34;traceId\u0026#34;:\u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470001\u0026#34;, \u0026#34;spanId\u0026#34;:\u0026#34;0\u0026#34;, \u0026#34;traceSegmentId\u0026#34;:\u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470000\u0026#34; }, \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;INFO\u0026#34; }, { \u0026#34;key\u0026#34;:\u0026#34;logger\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;com.example.MyLogger\u0026#34; } ] }, \u0026#34;body\u0026#34;:{ \u0026#34;text\u0026#34;:{ \u0026#34;text\u0026#34;:\u0026#34;log message\u0026#34; } } } HTTP API Report json format logs via HTTP API, the endpoint is http://\u0026lt;oap-address\u0026gt;:12800/v3/logs.\nJson log record example:\n[ { \u0026#34;timestamp\u0026#34;: 1618161813371, \u0026#34;service\u0026#34;: \u0026#34;Your_ApplicationName\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;3a5b8da5a5ba40c0b192e91b5c80f1a8@192.168.1.8\u0026#34;, \u0026#34;layer\u0026#34;:\u0026#34;GENERAL\u0026#34;, \u0026#34;traceContext\u0026#34;: { \u0026#34;traceId\u0026#34;: \u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470001\u0026#34;, \u0026#34;spanId\u0026#34;: \u0026#34;0\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470000\u0026#34; }, \u0026#34;tags\u0026#34;: { \u0026#34;data\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;INFO\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;logger\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;com.example.MyLogger\u0026#34; } ] }, \u0026#34;body\u0026#34;: { \u0026#34;text\u0026#34;: { \u0026#34;text\u0026#34;: \u0026#34;log message\u0026#34; } } } ] ","title":"Log Data Protocol","url":"/docs/main/v9.4.0/en/api/log-data-protocol/"},{"content":"Log Data Protocol Report log data via protocol.\nNative Proto Protocol Report native-proto format log via gRPC.\ngRPC service define\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.logging.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/logging/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Report collected logs into the OAP backend service LogReportService { // Recommend to report log data in a stream mode.  // The service/instance/endpoint of the log could share the previous value if they are not set.  // Reporting the logs of same service in the batch mode could reduce the network cost.  rpc collect (stream LogData) returns (Commands) { }}// Log data is collected through file scratcher of agent. // Natively, Satellite provides various ways to collect logs. message LogData { // [Optional] The timestamp of the log, in millisecond.  // If not set, OAP server would use the received timestamp as log\u0026#39;s timestamp, or relies on the OAP server analyzer.  int64 timestamp = 1; // [Required] **Service**. Represents a set/group of workloads which provide the same behaviours for incoming requests.  //  // The logic name represents the service. This would show as a separate node in the topology.  // The metrics analyzed from the spans, would be aggregated for this entity as the service level.  //  // If this is not the first element of the streaming, use the previous not-null name as the service name.  string service = 2; // [Optional] **Service Instance**. Each individual workload in the Service group is known as an instance. Like `pods` in Kubernetes, it  // doesn\u0026#39;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process.  //  // The logic name represents the service instance. This would show as a separate node in the instance relationship.  // The metrics analyzed from the spans, would be aggregated for this entity as the service instance level.  string serviceInstance = 3; // [Optional] **Endpoint**. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature.  //  // The logic name represents the endpoint, which logs belong.  string endpoint = 4; // [Required] The content of the log.  LogDataBody body = 5; // [Optional] Logs with trace context  TraceContext traceContext = 6; // [Optional] The available tags. OAP server could provide search/analysis capabilities based on these.  LogTags tags = 7; // [Optional] Since 9.0.0  // The layer of the service and servce instance. If absent, the OAP would set `layer`=`ID: 2, NAME: general`  string layer = 8;}// The content of the log data message LogDataBody { // A type to match analyzer(s) at the OAP server.  // The data could be analyzed at the client side, but could be partial  string type = 1; // Content with extendable format.  oneof content { TextLog text = 2; JSONLog json = 3; YAMLLog yaml = 4; }}// Literal text log, typically requires regex or split mechanism to filter meaningful info. message TextLog { string text = 1;}// JSON formatted log. The json field represents the string that could be formatted as a JSON object. message JSONLog { string json = 1;}// YAML formatted log. The yaml field represents the string that could be formatted as a YAML map. message YAMLLog { string yaml = 1;}// Logs with trace context, represent agent system has injects context(IDs) into log text. message TraceContext { // [Optional] A string id represents the whole trace.  string traceId = 1; // [Optional] A unique id represents this segment. Other segments could use this id to reference as a child segment.  string traceSegmentId = 2; // [Optional] The number id of the span. Should be unique in the whole segment.  // Starting at 0.  int32 spanId = 3;}message LogTags { // String key, String value pair.  repeated KeyStringValuePair data = 1;}Native Kafka Protocol Report native-json format log via kafka.\nJson log record example:\n{ \u0026#34;timestamp\u0026#34;:1618161813371, \u0026#34;service\u0026#34;:\u0026#34;Your_ApplicationName\u0026#34;, \u0026#34;serviceInstance\u0026#34;:\u0026#34;3a5b8da5a5ba40c0b192e91b5c80f1a8@192.168.1.8\u0026#34;, \u0026#34;layer\u0026#34;:\u0026#34;GENERAL\u0026#34;, \u0026#34;traceContext\u0026#34;:{ \u0026#34;traceId\u0026#34;:\u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470001\u0026#34;, \u0026#34;spanId\u0026#34;:\u0026#34;0\u0026#34;, \u0026#34;traceSegmentId\u0026#34;:\u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470000\u0026#34; }, \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;INFO\u0026#34; }, { \u0026#34;key\u0026#34;:\u0026#34;logger\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;com.example.MyLogger\u0026#34; } ] }, \u0026#34;body\u0026#34;:{ \u0026#34;text\u0026#34;:{ \u0026#34;text\u0026#34;:\u0026#34;log message\u0026#34; } } } HTTP API Report json format logs via HTTP API, the endpoint is http://\u0026lt;oap-address\u0026gt;:12800/v3/logs.\nJson log record example:\n[ { \u0026#34;timestamp\u0026#34;: 1618161813371, \u0026#34;service\u0026#34;: \u0026#34;Your_ApplicationName\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;3a5b8da5a5ba40c0b192e91b5c80f1a8@192.168.1.8\u0026#34;, \u0026#34;layer\u0026#34;:\u0026#34;GENERAL\u0026#34;, \u0026#34;traceContext\u0026#34;: { \u0026#34;traceId\u0026#34;: \u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470001\u0026#34;, \u0026#34;spanId\u0026#34;: \u0026#34;0\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470000\u0026#34; }, \u0026#34;tags\u0026#34;: { \u0026#34;data\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;INFO\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;logger\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;com.example.MyLogger\u0026#34; } ] }, \u0026#34;body\u0026#34;: { \u0026#34;text\u0026#34;: { \u0026#34;text\u0026#34;: \u0026#34;log message\u0026#34; } } } ] ","title":"Log Data Protocol","url":"/docs/main/v9.5.0/en/api/log-data-protocol/"},{"content":"Log Data Protocol Report log data via protocol.\nNative Proto Protocol Report native-proto format log via gRPC.\ngRPC service define\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.logging.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/logging/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Report collected logs into the OAP backend service LogReportService { // Recommend to report log data in a stream mode.  // The service/instance/endpoint of the log could share the previous value if they are not set.  // Reporting the logs of same service in the batch mode could reduce the network cost.  rpc collect (stream LogData) returns (Commands) { }}// Log data is collected through file scratcher of agent. // Natively, Satellite provides various ways to collect logs. message LogData { // [Optional] The timestamp of the log, in millisecond.  // If not set, OAP server would use the received timestamp as log\u0026#39;s timestamp, or relies on the OAP server analyzer.  int64 timestamp = 1; // [Required] **Service**. Represents a set/group of workloads which provide the same behaviours for incoming requests.  //  // The logic name represents the service. This would show as a separate node in the topology.  // The metrics analyzed from the spans, would be aggregated for this entity as the service level.  //  // If this is not the first element of the streaming, use the previous not-null name as the service name.  string service = 2; // [Optional] **Service Instance**. Each individual workload in the Service group is known as an instance. Like `pods` in Kubernetes, it  // doesn\u0026#39;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process.  //  // The logic name represents the service instance. This would show as a separate node in the instance relationship.  // The metrics analyzed from the spans, would be aggregated for this entity as the service instance level.  string serviceInstance = 3; // [Optional] **Endpoint**. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature.  //  // The logic name represents the endpoint, which logs belong.  string endpoint = 4; // [Required] The content of the log.  LogDataBody body = 5; // [Optional] Logs with trace context  TraceContext traceContext = 6; // [Optional] The available tags. OAP server could provide search/analysis capabilities based on these.  LogTags tags = 7; // [Optional] Since 9.0.0  // The layer of the service and servce instance. If absent, the OAP would set `layer`=`ID: 2, NAME: general`  string layer = 8;}// The content of the log data message LogDataBody { // A type to match analyzer(s) at the OAP server.  // The data could be analyzed at the client side, but could be partial  string type = 1; // Content with extendable format.  oneof content { TextLog text = 2; JSONLog json = 3; YAMLLog yaml = 4; }}// Literal text log, typically requires regex or split mechanism to filter meaningful info. message TextLog { string text = 1;}// JSON formatted log. The json field represents the string that could be formatted as a JSON object. message JSONLog { string json = 1;}// YAML formatted log. The yaml field represents the string that could be formatted as a YAML map. message YAMLLog { string yaml = 1;}// Logs with trace context, represent agent system has injects context(IDs) into log text. message TraceContext { // [Optional] A string id represents the whole trace.  string traceId = 1; // [Optional] A unique id represents this segment. Other segments could use this id to reference as a child segment.  string traceSegmentId = 2; // [Optional] The number id of the span. Should be unique in the whole segment.  // Starting at 0.  int32 spanId = 3;}message LogTags { // String key, String value pair.  repeated KeyStringValuePair data = 1;}Native Kafka Protocol Report native-json format log via kafka.\nJson log record example:\n{ \u0026#34;timestamp\u0026#34;:1618161813371, \u0026#34;service\u0026#34;:\u0026#34;Your_ApplicationName\u0026#34;, \u0026#34;serviceInstance\u0026#34;:\u0026#34;3a5b8da5a5ba40c0b192e91b5c80f1a8@192.168.1.8\u0026#34;, \u0026#34;layer\u0026#34;:\u0026#34;GENERAL\u0026#34;, \u0026#34;traceContext\u0026#34;:{ \u0026#34;traceId\u0026#34;:\u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470001\u0026#34;, \u0026#34;spanId\u0026#34;:\u0026#34;0\u0026#34;, \u0026#34;traceSegmentId\u0026#34;:\u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470000\u0026#34; }, \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;INFO\u0026#34; }, { \u0026#34;key\u0026#34;:\u0026#34;logger\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;com.example.MyLogger\u0026#34; } ] }, \u0026#34;body\u0026#34;:{ \u0026#34;text\u0026#34;:{ \u0026#34;text\u0026#34;:\u0026#34;log message\u0026#34; } } } HTTP API Report json format logs via HTTP API, the endpoint is http://\u0026lt;oap-address\u0026gt;:12800/v3/logs.\nJson log record example:\n[ { \u0026#34;timestamp\u0026#34;: 1618161813371, \u0026#34;service\u0026#34;: \u0026#34;Your_ApplicationName\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;3a5b8da5a5ba40c0b192e91b5c80f1a8@192.168.1.8\u0026#34;, \u0026#34;layer\u0026#34;:\u0026#34;GENERAL\u0026#34;, \u0026#34;traceContext\u0026#34;: { \u0026#34;traceId\u0026#34;: \u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470001\u0026#34;, \u0026#34;spanId\u0026#34;: \u0026#34;0\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470000\u0026#34; }, \u0026#34;tags\u0026#34;: { \u0026#34;data\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;INFO\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;logger\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;com.example.MyLogger\u0026#34; } ] }, \u0026#34;body\u0026#34;: { \u0026#34;text\u0026#34;: { \u0026#34;text\u0026#34;: \u0026#34;log message\u0026#34; } } } ] ","title":"Log Data Protocol","url":"/docs/main/v9.6.0/en/api/log-data-protocol/"},{"content":"Log Data Protocol Report log data via protocol.\nNative Proto Protocol Report native-proto format log via gRPC.\ngRPC service define\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.logging.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/logging/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Report collected logs into the OAP backend service LogReportService { // Recommend to report log data in a stream mode.  // The service/instance/endpoint of the log could share the previous value if they are not set.  // Reporting the logs of same service in the batch mode could reduce the network cost.  rpc collect (stream LogData) returns (Commands) { }}// Log data is collected through file scratcher of agent. // Natively, Satellite provides various ways to collect logs. message LogData { // [Optional] The timestamp of the log, in millisecond.  // If not set, OAP server would use the received timestamp as log\u0026#39;s timestamp, or relies on the OAP server analyzer.  int64 timestamp = 1; // [Required] **Service**. Represents a set/group of workloads which provide the same behaviours for incoming requests.  //  // The logic name represents the service. This would show as a separate node in the topology.  // The metrics analyzed from the spans, would be aggregated for this entity as the service level.  //  // If this is not the first element of the streaming, use the previous not-null name as the service name.  string service = 2; // [Optional] **Service Instance**. Each individual workload in the Service group is known as an instance. Like `pods` in Kubernetes, it  // doesn\u0026#39;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process.  //  // The logic name represents the service instance. This would show as a separate node in the instance relationship.  // The metrics analyzed from the spans, would be aggregated for this entity as the service instance level.  string serviceInstance = 3; // [Optional] **Endpoint**. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature.  //  // The logic name represents the endpoint, which logs belong.  string endpoint = 4; // [Required] The content of the log.  LogDataBody body = 5; // [Optional] Logs with trace context  TraceContext traceContext = 6; // [Optional] The available tags. OAP server could provide search/analysis capabilities based on these.  LogTags tags = 7; // [Optional] Since 9.0.0  // The layer of the service and servce instance. If absent, the OAP would set `layer`=`ID: 2, NAME: general`  string layer = 8;}// The content of the log data message LogDataBody { // A type to match analyzer(s) at the OAP server.  // The data could be analyzed at the client side, but could be partial  string type = 1; // Content with extendable format.  oneof content { TextLog text = 2; JSONLog json = 3; YAMLLog yaml = 4; }}// Literal text log, typically requires regex or split mechanism to filter meaningful info. message TextLog { string text = 1;}// JSON formatted log. The json field represents the string that could be formatted as a JSON object. message JSONLog { string json = 1;}// YAML formatted log. The yaml field represents the string that could be formatted as a YAML map. message YAMLLog { string yaml = 1;}// Logs with trace context, represent agent system has injects context(IDs) into log text. message TraceContext { // [Optional] A string id represents the whole trace.  string traceId = 1; // [Optional] A unique id represents this segment. Other segments could use this id to reference as a child segment.  string traceSegmentId = 2; // [Optional] The number id of the span. Should be unique in the whole segment.  // Starting at 0.  int32 spanId = 3;}message LogTags { // String key, String value pair.  repeated KeyStringValuePair data = 1;}Native Kafka Protocol Report native-json format log via kafka.\nJson log record example:\n{ \u0026#34;timestamp\u0026#34;:1618161813371, \u0026#34;service\u0026#34;:\u0026#34;Your_ApplicationName\u0026#34;, \u0026#34;serviceInstance\u0026#34;:\u0026#34;3a5b8da5a5ba40c0b192e91b5c80f1a8@192.168.1.8\u0026#34;, \u0026#34;layer\u0026#34;:\u0026#34;GENERAL\u0026#34;, \u0026#34;traceContext\u0026#34;:{ \u0026#34;traceId\u0026#34;:\u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470001\u0026#34;, \u0026#34;spanId\u0026#34;:\u0026#34;0\u0026#34;, \u0026#34;traceSegmentId\u0026#34;:\u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470000\u0026#34; }, \u0026#34;tags\u0026#34;:{ \u0026#34;data\u0026#34;:[ { \u0026#34;key\u0026#34;:\u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;INFO\u0026#34; }, { \u0026#34;key\u0026#34;:\u0026#34;logger\u0026#34;, \u0026#34;value\u0026#34;:\u0026#34;com.example.MyLogger\u0026#34; } ] }, \u0026#34;body\u0026#34;:{ \u0026#34;text\u0026#34;:{ \u0026#34;text\u0026#34;:\u0026#34;log message\u0026#34; } } } HTTP API Report json format logs via HTTP API, the endpoint is http://\u0026lt;oap-address\u0026gt;:12800/v3/logs.\nJson log record example:\n[ { \u0026#34;timestamp\u0026#34;: 1618161813371, \u0026#34;service\u0026#34;: \u0026#34;Your_ApplicationName\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;3a5b8da5a5ba40c0b192e91b5c80f1a8@192.168.1.8\u0026#34;, \u0026#34;layer\u0026#34;:\u0026#34;GENERAL\u0026#34;, \u0026#34;traceContext\u0026#34;: { \u0026#34;traceId\u0026#34;: \u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470001\u0026#34;, \u0026#34;spanId\u0026#34;: \u0026#34;0\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;ddd92f52207c468e9cd03ddd107cd530.69.16181331190470000\u0026#34; }, \u0026#34;tags\u0026#34;: { \u0026#34;data\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;level\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;INFO\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;logger\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;com.example.MyLogger\u0026#34; } ] }, \u0026#34;body\u0026#34;: { \u0026#34;text\u0026#34;: { \u0026#34;text\u0026#34;: \u0026#34;log message\u0026#34; } } } ] ","title":"Log Data Protocol","url":"/docs/main/v9.7.0/en/api/log-data-protocol/"},{"content":"logback plugin  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-logback-1.x\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Print trace ID in your logs  set %tid in Pattern section of logback.xml  \u0026lt;appender name=\u0026#34;STDOUT\u0026#34; class=\u0026#34;ch.qos.logback.core.ConsoleAppender\u0026#34;\u0026gt; \u0026lt;encoder class=\u0026#34;ch.qos.logback.core.encoder.LayoutWrappingEncoder\u0026#34;\u0026gt; \u0026lt;layout class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.TraceIdPatternLogbackLayout\u0026#34;\u0026gt; \u0026lt;Pattern\u0026gt;%d{yyyy-MM-dd HH:mm:ss.SSS} [%tid] [%thread] %-5level %logger{36} -%msg%n\u0026lt;/Pattern\u0026gt; \u0026lt;/layout\u0026gt; \u0026lt;/encoder\u0026gt; \u0026lt;/appender\u0026gt;  with the MDC, set %X{tid} in Pattern section of logback.xml  \u0026lt;appender name=\u0026#34;STDOUT\u0026#34; class=\u0026#34;ch.qos.logback.core.ConsoleAppender\u0026#34;\u0026gt; \u0026lt;encoder class=\u0026#34;ch.qos.logback.core.encoder.LayoutWrappingEncoder\u0026#34;\u0026gt; \u0026lt;layout class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.mdc.TraceIdMDCPatternLogbackLayout\u0026#34;\u0026gt; \u0026lt;Pattern\u0026gt;%d{yyyy-MM-dd HH:mm:ss.SSS} [%X{tid}] [%thread] %-5level %logger{36} -%msg%n\u0026lt;/Pattern\u0026gt; \u0026lt;/layout\u0026gt; \u0026lt;/encoder\u0026gt; \u0026lt;/appender\u0026gt;  Support logback AsyncAppender(MDC also support), No additional configuration is required. Refer to the demo of logback.xml below. For details: Logback AsyncAppender  \u0026lt;configuration scan=\u0026#34;true\u0026#34; scanPeriod=\u0026#34; 5 seconds\u0026#34;\u0026gt; \u0026lt;appender name=\u0026#34;STDOUT\u0026#34; class=\u0026#34;ch.qos.logback.core.ConsoleAppender\u0026#34;\u0026gt; \u0026lt;encoder class=\u0026#34;ch.qos.logback.core.encoder.LayoutWrappingEncoder\u0026#34;\u0026gt; \u0026lt;layout class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.mdc.TraceIdMDCPatternLogbackLayout\u0026#34;\u0026gt; \u0026lt;Pattern\u0026gt;%d{yyyy-MM-dd HH:mm:ss.SSS} [%X{tid}] [%thread] %-5level %logger{36} -%msg%n\u0026lt;/Pattern\u0026gt; \u0026lt;/layout\u0026gt; \u0026lt;/encoder\u0026gt; \u0026lt;/appender\u0026gt; \u0026lt;appender name=\u0026#34;ASYNC\u0026#34; class=\u0026#34;ch.qos.logback.classic.AsyncAppender\u0026#34;\u0026gt; \u0026lt;discardingThreshold\u0026gt;0\u0026lt;/discardingThreshold\u0026gt; \u0026lt;queueSize\u0026gt;1024\u0026lt;/queueSize\u0026gt; \u0026lt;neverBlock\u0026gt;true\u0026lt;/neverBlock\u0026gt; \u0026lt;appender-ref ref=\u0026#34;STDOUT\u0026#34;/\u0026gt; \u0026lt;/appender\u0026gt; \u0026lt;root level=\u0026#34;INFO\u0026#34;\u0026gt; \u0026lt;appender-ref ref=\u0026#34;ASYNC\u0026#34;/\u0026gt; \u0026lt;/root\u0026gt; \u0026lt;/configuration\u0026gt;  When you use -javaagent to active the SkyWalking tracer, logback will output traceId, if it existed. If the tracer is inactive, the output will be TID: N/A.  Print SkyWalking context in your logs   Your only need to replace pattern %tid or %X{tid]} with %sw_ctx or %X{sw_ctx}.\n  When you use -javaagent to active the SkyWalking tracer, logback will output SW_CTX: [$serviceName,$instanceName,$traceId,$traceSegmentId,$spanId], if it existed. If the tracer is inactive, the output will be SW_CTX: N/A.\n  logstash logback plugin  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-logback-1.x\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  set LogstashEncoder of logback.xml  \u0026lt;encoder charset=\u0026#34;UTF-8\u0026#34; class=\u0026#34;net.logstash.logback.encoder.LogstashEncoder\u0026#34;\u0026gt; \u0026lt;!-- add TID(traceId) field --\u0026gt; \u0026lt;provider class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.logstash.TraceIdJsonProvider\u0026#34;\u0026gt; \u0026lt;/provider\u0026gt; \u0026lt;!-- add SW_CTX(SkyWalking context) field --\u0026gt; \u0026lt;provider class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.logstash.SkyWalkingContextJsonProvider\u0026#34;\u0026gt; \u0026lt;/provider\u0026gt; \u0026lt;/encoder\u0026gt;  set LoggingEventCompositeJsonEncoder of logstash in logback-spring.xml for custom json format  1.add converter for %tid or %sw_ctx as child of  node\n\u0026lt;!-- add converter for %tid --\u0026gt; \u0026lt;conversionRule conversionWord=\u0026#34;tid\u0026#34; converterClass=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.LogbackPatternConverter\u0026#34;/\u0026gt; \u0026lt;!-- add converter for %sw_ctx --\u0026gt; \u0026lt;conversionRule conversionWord=\u0026#34;sw_ctx\u0026#34; converterClass=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.LogbackSkyWalkingContextPatternConverter\u0026#34;/\u0026gt; 2.add json encoder for custom json format\n\u0026lt;encoder class=\u0026#34;net.logstash.logback.encoder.LoggingEventCompositeJsonEncoder\u0026#34;\u0026gt; \u0026lt;providers\u0026gt; \u0026lt;timestamp\u0026gt; \u0026lt;timeZone\u0026gt;UTC\u0026lt;/timeZone\u0026gt; \u0026lt;/timestamp\u0026gt; \u0026lt;pattern\u0026gt; \u0026lt;pattern\u0026gt; { \u0026#34;level\u0026#34;: \u0026#34;%level\u0026#34;, \u0026#34;tid\u0026#34;: \u0026#34;%tid\u0026#34;, \u0026#34;skyWalkingContext\u0026#34;: \u0026#34;%sw_ctx\u0026#34;, \u0026#34;thread\u0026#34;: \u0026#34;%thread\u0026#34;, \u0026#34;class\u0026#34;: \u0026#34;%logger{1.}:%L\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;%message\u0026#34;, \u0026#34;stackTrace\u0026#34;: \u0026#34;%exception{10}\u0026#34; } \u0026lt;/pattern\u0026gt; \u0026lt;/pattern\u0026gt; \u0026lt;/providers\u0026gt; \u0026lt;/encoder\u0026gt; gRPC reporter The gRPC reporter could forward the collected logs to SkyWalking OAP server, or SkyWalking Satellite sidecar. Trace id, segment id, and span id will attach to logs automatically. There is no need to modify existing layouts.\n Add GRPCLogClientAppender in logback.xml  \u0026lt;appender name=\u0026#34;grpc-log\u0026#34; class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.log.GRPCLogClientAppender\u0026#34;\u0026gt; \u0026lt;encoder class=\u0026#34;ch.qos.logback.core.encoder.LayoutWrappingEncoder\u0026#34;\u0026gt; \u0026lt;layout class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.mdc.TraceIdMDCPatternLogbackLayout\u0026#34;\u0026gt; \u0026lt;Pattern\u0026gt;%d{yyyy-MM-dd HH:mm:ss.SSS} [%X{tid}] [%thread] %-5level %logger{36} -%msg%n\u0026lt;/Pattern\u0026gt; \u0026lt;/layout\u0026gt; \u0026lt;/encoder\u0026gt; \u0026lt;/appender\u0026gt;  Add config of the plugin or use default  log.max_message_size=${SW_GRPC_LOG_MAX_MESSAGE_SIZE:10485760} Transmitting un-formatted messages The logback 1.x gRPC reporter supports transmitting logs as formatted or un-formatted. Transmitting formatted data is the default but can be disabled by adding the following to the agent config:\nplugin.toolkit.log.transmit_formatted=false The above will result in the content field being used for the log pattern with additional log tags of argument.0, argument.1, and so on representing each logged argument as well as an additional exception tag which is only present if a throwable is also logged.\nFor example, the following code:\nlog.info(\u0026#34;{} {} {}\u0026#34;, 1, 2, 3); Will result in:\n{ \u0026#34;content\u0026#34;: \u0026#34;{} {} {}\u0026#34;, \u0026#34;tags\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;argument.0\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.1\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.2\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;3\u0026#34; } ] } ","title":"logback plugin","url":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/application-toolkit-logback-1.x/"},{"content":"logback plugin  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-logback-1.x\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Print trace ID in your logs  set %tid in Pattern section of logback.xml  \u0026lt;appender name=\u0026#34;STDOUT\u0026#34; class=\u0026#34;ch.qos.logback.core.ConsoleAppender\u0026#34;\u0026gt; \u0026lt;encoder class=\u0026#34;ch.qos.logback.core.encoder.LayoutWrappingEncoder\u0026#34;\u0026gt; \u0026lt;layout class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.TraceIdPatternLogbackLayout\u0026#34;\u0026gt; \u0026lt;Pattern\u0026gt;%d{yyyy-MM-dd HH:mm:ss.SSS} [%tid] [%thread] %-5level %logger{36} -%msg%n\u0026lt;/Pattern\u0026gt; \u0026lt;/layout\u0026gt; \u0026lt;/encoder\u0026gt; \u0026lt;/appender\u0026gt;  with the MDC, set %X{tid} in Pattern section of logback.xml  \u0026lt;appender name=\u0026#34;STDOUT\u0026#34; class=\u0026#34;ch.qos.logback.core.ConsoleAppender\u0026#34;\u0026gt; \u0026lt;encoder class=\u0026#34;ch.qos.logback.core.encoder.LayoutWrappingEncoder\u0026#34;\u0026gt; \u0026lt;layout class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.mdc.TraceIdMDCPatternLogbackLayout\u0026#34;\u0026gt; \u0026lt;Pattern\u0026gt;%d{yyyy-MM-dd HH:mm:ss.SSS} [%X{tid}] [%thread] %-5level %logger{36} -%msg%n\u0026lt;/Pattern\u0026gt; \u0026lt;/layout\u0026gt; \u0026lt;/encoder\u0026gt; \u0026lt;/appender\u0026gt;  Support logback AsyncAppender(MDC also support), No additional configuration is required. Refer to the demo of logback.xml below. For details: Logback AsyncAppender  \u0026lt;configuration scan=\u0026#34;true\u0026#34; scanPeriod=\u0026#34; 5 seconds\u0026#34;\u0026gt; \u0026lt;appender name=\u0026#34;STDOUT\u0026#34; class=\u0026#34;ch.qos.logback.core.ConsoleAppender\u0026#34;\u0026gt; \u0026lt;encoder class=\u0026#34;ch.qos.logback.core.encoder.LayoutWrappingEncoder\u0026#34;\u0026gt; \u0026lt;layout class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.mdc.TraceIdMDCPatternLogbackLayout\u0026#34;\u0026gt; \u0026lt;Pattern\u0026gt;%d{yyyy-MM-dd HH:mm:ss.SSS} [%X{tid}] [%thread] %-5level %logger{36} -%msg%n\u0026lt;/Pattern\u0026gt; \u0026lt;/layout\u0026gt; \u0026lt;/encoder\u0026gt; \u0026lt;/appender\u0026gt; \u0026lt;appender name=\u0026#34;ASYNC\u0026#34; class=\u0026#34;ch.qos.logback.classic.AsyncAppender\u0026#34;\u0026gt; \u0026lt;discardingThreshold\u0026gt;0\u0026lt;/discardingThreshold\u0026gt; \u0026lt;queueSize\u0026gt;1024\u0026lt;/queueSize\u0026gt; \u0026lt;neverBlock\u0026gt;true\u0026lt;/neverBlock\u0026gt; \u0026lt;appender-ref ref=\u0026#34;STDOUT\u0026#34;/\u0026gt; \u0026lt;/appender\u0026gt; \u0026lt;root level=\u0026#34;INFO\u0026#34;\u0026gt; \u0026lt;appender-ref ref=\u0026#34;ASYNC\u0026#34;/\u0026gt; \u0026lt;/root\u0026gt; \u0026lt;/configuration\u0026gt;  When you use -javaagent to active the SkyWalking tracer, logback will output traceId, if it existed. If the tracer is inactive, the output will be TID: N/A.  Print SkyWalking context in your logs   Your only need to replace pattern %tid or %X{tid]} with %sw_ctx or %X{sw_ctx}.\n  When you use -javaagent to active the SkyWalking tracer, logback will output SW_CTX: [$serviceName,$instanceName,$traceId,$traceSegmentId,$spanId], if it existed. If the tracer is inactive, the output will be SW_CTX: N/A.\n  logstash logback plugin  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-logback-1.x\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  set LogstashEncoder of logback.xml  \u0026lt;encoder charset=\u0026#34;UTF-8\u0026#34; class=\u0026#34;net.logstash.logback.encoder.LogstashEncoder\u0026#34;\u0026gt; \u0026lt;!-- add TID(traceId) field --\u0026gt; \u0026lt;provider class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.logstash.TraceIdJsonProvider\u0026#34;\u0026gt; \u0026lt;/provider\u0026gt; \u0026lt;!-- add SW_CTX(SkyWalking context) field --\u0026gt; \u0026lt;provider class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.logstash.SkyWalkingContextJsonProvider\u0026#34;\u0026gt; \u0026lt;/provider\u0026gt; \u0026lt;/encoder\u0026gt;  set LoggingEventCompositeJsonEncoder of logstash in logback-spring.xml for custom json format  1.add converter for %tid or %sw_ctx as child of  node\n\u0026lt;!-- add converter for %tid --\u0026gt; \u0026lt;conversionRule conversionWord=\u0026#34;tid\u0026#34; converterClass=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.LogbackPatternConverter\u0026#34;/\u0026gt; \u0026lt;!-- add converter for %sw_ctx --\u0026gt; \u0026lt;conversionRule conversionWord=\u0026#34;sw_ctx\u0026#34; converterClass=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.LogbackSkyWalkingContextPatternConverter\u0026#34;/\u0026gt; 2.add json encoder for custom json format\n\u0026lt;encoder class=\u0026#34;net.logstash.logback.encoder.LoggingEventCompositeJsonEncoder\u0026#34;\u0026gt; \u0026lt;providers\u0026gt; \u0026lt;timestamp\u0026gt; \u0026lt;timeZone\u0026gt;UTC\u0026lt;/timeZone\u0026gt; \u0026lt;/timestamp\u0026gt; \u0026lt;pattern\u0026gt; \u0026lt;pattern\u0026gt; { \u0026#34;level\u0026#34;: \u0026#34;%level\u0026#34;, \u0026#34;tid\u0026#34;: \u0026#34;%tid\u0026#34;, \u0026#34;skyWalkingContext\u0026#34;: \u0026#34;%sw_ctx\u0026#34;, \u0026#34;thread\u0026#34;: \u0026#34;%thread\u0026#34;, \u0026#34;class\u0026#34;: \u0026#34;%logger{1.}:%L\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;%message\u0026#34;, \u0026#34;stackTrace\u0026#34;: \u0026#34;%exception{10}\u0026#34; } \u0026lt;/pattern\u0026gt; \u0026lt;/pattern\u0026gt; \u0026lt;/providers\u0026gt; \u0026lt;/encoder\u0026gt; gRPC reporter The gRPC reporter could forward the collected logs to SkyWalking OAP server, or SkyWalking Satellite sidecar. Trace id, segment id, and span id will attach to logs automatically. There is no need to modify existing layouts.\n Add GRPCLogClientAppender in logback.xml  \u0026lt;appender name=\u0026#34;grpc-log\u0026#34; class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.log.GRPCLogClientAppender\u0026#34;\u0026gt; \u0026lt;encoder class=\u0026#34;ch.qos.logback.core.encoder.LayoutWrappingEncoder\u0026#34;\u0026gt; \u0026lt;layout class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.mdc.TraceIdMDCPatternLogbackLayout\u0026#34;\u0026gt; \u0026lt;Pattern\u0026gt;%d{yyyy-MM-dd HH:mm:ss.SSS} [%X{tid}] [%thread] %-5level %logger{36} -%msg%n\u0026lt;/Pattern\u0026gt; \u0026lt;/layout\u0026gt; \u0026lt;/encoder\u0026gt; \u0026lt;/appender\u0026gt;  Add config of the plugin or use default  log.max_message_size=${SW_GRPC_LOG_MAX_MESSAGE_SIZE:10485760} Transmitting un-formatted messages The logback 1.x gRPC reporter supports transmitting logs as formatted or un-formatted. Transmitting formatted data is the default but can be disabled by adding the following to the agent config:\nplugin.toolkit.log.transmit_formatted=false The above will result in the content field being used for the log pattern with additional log tags of argument.0, argument.1, and so on representing each logged argument as well as an additional exception tag which is only present if a throwable is also logged.\nFor example, the following code:\nlog.info(\u0026#34;{} {} {}\u0026#34;, 1, 2, 3); Will result in:\n{ \u0026#34;content\u0026#34;: \u0026#34;{} {} {}\u0026#34;, \u0026#34;tags\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;argument.0\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.1\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.2\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;3\u0026#34; } ] } ","title":"logback plugin","url":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-logback-1.x/"},{"content":"logback plugin  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-logback-1.x\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Print trace ID in your logs  set %tid in Pattern section of logback.xml  \u0026lt;appender name=\u0026#34;STDOUT\u0026#34; class=\u0026#34;ch.qos.logback.core.ConsoleAppender\u0026#34;\u0026gt; \u0026lt;encoder class=\u0026#34;ch.qos.logback.core.encoder.LayoutWrappingEncoder\u0026#34;\u0026gt; \u0026lt;layout class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.TraceIdPatternLogbackLayout\u0026#34;\u0026gt; \u0026lt;Pattern\u0026gt;%d{yyyy-MM-dd HH:mm:ss.SSS} [%tid] [%thread] %-5level %logger{36} -%msg%n\u0026lt;/Pattern\u0026gt; \u0026lt;/layout\u0026gt; \u0026lt;/encoder\u0026gt; \u0026lt;/appender\u0026gt;  with the MDC, set %X{tid} in Pattern section of logback.xml  \u0026lt;appender name=\u0026#34;STDOUT\u0026#34; class=\u0026#34;ch.qos.logback.core.ConsoleAppender\u0026#34;\u0026gt; \u0026lt;encoder class=\u0026#34;ch.qos.logback.core.encoder.LayoutWrappingEncoder\u0026#34;\u0026gt; \u0026lt;layout class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.mdc.TraceIdMDCPatternLogbackLayout\u0026#34;\u0026gt; \u0026lt;Pattern\u0026gt;%d{yyyy-MM-dd HH:mm:ss.SSS} [%X{tid}] [%thread] %-5level %logger{36} -%msg%n\u0026lt;/Pattern\u0026gt; \u0026lt;/layout\u0026gt; \u0026lt;/encoder\u0026gt; \u0026lt;/appender\u0026gt;  Support logback AsyncAppender(MDC also support), No additional configuration is required. Refer to the demo of logback.xml below. For details: Logback AsyncAppender  \u0026lt;configuration scan=\u0026#34;true\u0026#34; scanPeriod=\u0026#34; 5 seconds\u0026#34;\u0026gt; \u0026lt;appender name=\u0026#34;STDOUT\u0026#34; class=\u0026#34;ch.qos.logback.core.ConsoleAppender\u0026#34;\u0026gt; \u0026lt;encoder class=\u0026#34;ch.qos.logback.core.encoder.LayoutWrappingEncoder\u0026#34;\u0026gt; \u0026lt;layout class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.mdc.TraceIdMDCPatternLogbackLayout\u0026#34;\u0026gt; \u0026lt;Pattern\u0026gt;%d{yyyy-MM-dd HH:mm:ss.SSS} [%X{tid}] [%thread] %-5level %logger{36} -%msg%n\u0026lt;/Pattern\u0026gt; \u0026lt;/layout\u0026gt; \u0026lt;/encoder\u0026gt; \u0026lt;/appender\u0026gt; \u0026lt;appender name=\u0026#34;ASYNC\u0026#34; class=\u0026#34;ch.qos.logback.classic.AsyncAppender\u0026#34;\u0026gt; \u0026lt;discardingThreshold\u0026gt;0\u0026lt;/discardingThreshold\u0026gt; \u0026lt;queueSize\u0026gt;1024\u0026lt;/queueSize\u0026gt; \u0026lt;neverBlock\u0026gt;true\u0026lt;/neverBlock\u0026gt; \u0026lt;appender-ref ref=\u0026#34;STDOUT\u0026#34;/\u0026gt; \u0026lt;/appender\u0026gt; \u0026lt;root level=\u0026#34;INFO\u0026#34;\u0026gt; \u0026lt;appender-ref ref=\u0026#34;ASYNC\u0026#34;/\u0026gt; \u0026lt;/root\u0026gt; \u0026lt;/configuration\u0026gt;  When you use -javaagent to active the SkyWalking tracer, logback will output traceId, if it existed. If the tracer is inactive, the output will be TID: N/A.  Print SkyWalking context in your logs   Your only need to replace pattern %tid or %X{tid]} with %sw_ctx or %X{sw_ctx}.\n  When you use -javaagent to active the SkyWalking tracer, logback will output SW_CTX: [$serviceName,$instanceName,$traceId,$traceSegmentId,$spanId], if it existed. If the tracer is inactive, the output will be SW_CTX: N/A.\n  logstash logback plugin  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-logback-1.x\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  set LogstashEncoder of logback.xml  \u0026lt;encoder charset=\u0026#34;UTF-8\u0026#34; class=\u0026#34;net.logstash.logback.encoder.LogstashEncoder\u0026#34;\u0026gt; \u0026lt;!-- add TID(traceId) field --\u0026gt; \u0026lt;provider class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.logstash.TraceIdJsonProvider\u0026#34;\u0026gt; \u0026lt;/provider\u0026gt; \u0026lt;!-- add SW_CTX(SkyWalking context) field --\u0026gt; \u0026lt;provider class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.logstash.SkyWalkingContextJsonProvider\u0026#34;\u0026gt; \u0026lt;/provider\u0026gt; \u0026lt;/encoder\u0026gt;  set LoggingEventCompositeJsonEncoder of logstash in logback-spring.xml for custom json format  1.add converter for %tid or %sw_ctx as child of  node\n\u0026lt;!-- add converter for %tid --\u0026gt; \u0026lt;conversionRule conversionWord=\u0026#34;tid\u0026#34; converterClass=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.LogbackPatternConverter\u0026#34;/\u0026gt; \u0026lt;!-- add converter for %sw_ctx --\u0026gt; \u0026lt;conversionRule conversionWord=\u0026#34;sw_ctx\u0026#34; converterClass=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.LogbackSkyWalkingContextPatternConverter\u0026#34;/\u0026gt; 2.add json encoder for custom json format\n\u0026lt;encoder class=\u0026#34;net.logstash.logback.encoder.LoggingEventCompositeJsonEncoder\u0026#34;\u0026gt; \u0026lt;providers\u0026gt; \u0026lt;timestamp\u0026gt; \u0026lt;timeZone\u0026gt;UTC\u0026lt;/timeZone\u0026gt; \u0026lt;/timestamp\u0026gt; \u0026lt;pattern\u0026gt; \u0026lt;pattern\u0026gt; { \u0026#34;level\u0026#34;: \u0026#34;%level\u0026#34;, \u0026#34;tid\u0026#34;: \u0026#34;%tid\u0026#34;, \u0026#34;skyWalkingContext\u0026#34;: \u0026#34;%sw_ctx\u0026#34;, \u0026#34;thread\u0026#34;: \u0026#34;%thread\u0026#34;, \u0026#34;class\u0026#34;: \u0026#34;%logger{1.}:%L\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;%message\u0026#34;, \u0026#34;stackTrace\u0026#34;: \u0026#34;%exception{10}\u0026#34; } \u0026lt;/pattern\u0026gt; \u0026lt;/pattern\u0026gt; \u0026lt;/providers\u0026gt; \u0026lt;/encoder\u0026gt; gRPC reporter The gRPC reporter could forward the collected logs to SkyWalking OAP server, or SkyWalking Satellite sidecar. Trace id, segment id, and span id will attach to logs automatically. There is no need to modify existing layouts.\n Add GRPCLogClientAppender in logback.xml  \u0026lt;appender name=\u0026#34;grpc-log\u0026#34; class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.log.GRPCLogClientAppender\u0026#34;\u0026gt; \u0026lt;encoder class=\u0026#34;ch.qos.logback.core.encoder.LayoutWrappingEncoder\u0026#34;\u0026gt; \u0026lt;layout class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.mdc.TraceIdMDCPatternLogbackLayout\u0026#34;\u0026gt; \u0026lt;Pattern\u0026gt;%d{yyyy-MM-dd HH:mm:ss.SSS} [%X{tid}] [%thread] %-5level %logger{36} -%msg%n\u0026lt;/Pattern\u0026gt; \u0026lt;/layout\u0026gt; \u0026lt;/encoder\u0026gt; \u0026lt;/appender\u0026gt;  Add config of the plugin or use default  log.max_message_size=${SW_GRPC_LOG_MAX_MESSAGE_SIZE:10485760} Transmitting un-formatted messages The logback 1.x gRPC reporter supports transmitting logs as formatted or un-formatted. Transmitting formatted data is the default but can be disabled by adding the following to the agent config:\nplugin.toolkit.log.transmit_formatted=false The above will result in the content field being used for the log pattern with additional log tags of argument.0, argument.1, and so on representing each logged argument as well as an additional exception tag which is only present if a throwable is also logged.\nFor example, the following code:\nlog.info(\u0026#34;{} {} {}\u0026#34;, 1, 2, 3); Will result in:\n{ \u0026#34;content\u0026#34;: \u0026#34;{} {} {}\u0026#34;, \u0026#34;tags\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;argument.0\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.1\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.2\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;3\u0026#34; } ] } ","title":"logback plugin","url":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/application-toolkit-logback-1.x/"},{"content":"logback plugin  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-logback-1.x\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Print trace ID in your logs  set %tid in Pattern section of logback.xml  \u0026lt;appender name=\u0026#34;STDOUT\u0026#34; class=\u0026#34;ch.qos.logback.core.ConsoleAppender\u0026#34;\u0026gt; \u0026lt;encoder class=\u0026#34;ch.qos.logback.core.encoder.LayoutWrappingEncoder\u0026#34;\u0026gt; \u0026lt;layout class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.TraceIdPatternLogbackLayout\u0026#34;\u0026gt; \u0026lt;Pattern\u0026gt;%d{yyyy-MM-dd HH:mm:ss.SSS} [%tid] [%thread] %-5level %logger{36} -%msg%n\u0026lt;/Pattern\u0026gt; \u0026lt;/layout\u0026gt; \u0026lt;/encoder\u0026gt; \u0026lt;/appender\u0026gt;  with the MDC, set %X{tid} in Pattern section of logback.xml  \u0026lt;appender name=\u0026#34;STDOUT\u0026#34; class=\u0026#34;ch.qos.logback.core.ConsoleAppender\u0026#34;\u0026gt; \u0026lt;encoder class=\u0026#34;ch.qos.logback.core.encoder.LayoutWrappingEncoder\u0026#34;\u0026gt; \u0026lt;layout class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.mdc.TraceIdMDCPatternLogbackLayout\u0026#34;\u0026gt; \u0026lt;Pattern\u0026gt;%d{yyyy-MM-dd HH:mm:ss.SSS} [%X{tid}] [%thread] %-5level %logger{36} -%msg%n\u0026lt;/Pattern\u0026gt; \u0026lt;/layout\u0026gt; \u0026lt;/encoder\u0026gt; \u0026lt;/appender\u0026gt;  Support logback AsyncAppender(MDC also support), No additional configuration is required. Refer to the demo of logback.xml below. For details: Logback AsyncAppender  \u0026lt;configuration scan=\u0026#34;true\u0026#34; scanPeriod=\u0026#34; 5 seconds\u0026#34;\u0026gt; \u0026lt;appender name=\u0026#34;STDOUT\u0026#34; class=\u0026#34;ch.qos.logback.core.ConsoleAppender\u0026#34;\u0026gt; \u0026lt;encoder class=\u0026#34;ch.qos.logback.core.encoder.LayoutWrappingEncoder\u0026#34;\u0026gt; \u0026lt;layout class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.mdc.TraceIdMDCPatternLogbackLayout\u0026#34;\u0026gt; \u0026lt;Pattern\u0026gt;%d{yyyy-MM-dd HH:mm:ss.SSS} [%X{tid}] [%thread] %-5level %logger{36} -%msg%n\u0026lt;/Pattern\u0026gt; \u0026lt;/layout\u0026gt; \u0026lt;/encoder\u0026gt; \u0026lt;/appender\u0026gt; \u0026lt;appender name=\u0026#34;ASYNC\u0026#34; class=\u0026#34;ch.qos.logback.classic.AsyncAppender\u0026#34;\u0026gt; \u0026lt;discardingThreshold\u0026gt;0\u0026lt;/discardingThreshold\u0026gt; \u0026lt;queueSize\u0026gt;1024\u0026lt;/queueSize\u0026gt; \u0026lt;neverBlock\u0026gt;true\u0026lt;/neverBlock\u0026gt; \u0026lt;appender-ref ref=\u0026#34;STDOUT\u0026#34;/\u0026gt; \u0026lt;/appender\u0026gt; \u0026lt;root level=\u0026#34;INFO\u0026#34;\u0026gt; \u0026lt;appender-ref ref=\u0026#34;ASYNC\u0026#34;/\u0026gt; \u0026lt;/root\u0026gt; \u0026lt;/configuration\u0026gt;  When you use -javaagent to active the SkyWalking tracer, logback will output traceId, if it existed. If the tracer is inactive, the output will be TID: N/A.  Print SkyWalking context in your logs   Your only need to replace pattern %tid or %X{tid]} with %sw_ctx or %X{sw_ctx}.\n  When you use -javaagent to active the SkyWalking tracer, logback will output SW_CTX: [$serviceName,$instanceName,$traceId,$traceSegmentId,$spanId], if it existed. If the tracer is inactive, the output will be SW_CTX: N/A.\n  logstash logback plugin  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-logback-1.x\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  set LogstashEncoder of logback.xml  \u0026lt;encoder charset=\u0026#34;UTF-8\u0026#34; class=\u0026#34;net.logstash.logback.encoder.LogstashEncoder\u0026#34;\u0026gt; \u0026lt;!-- add TID(traceId) field --\u0026gt; \u0026lt;provider class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.logstash.TraceIdJsonProvider\u0026#34;\u0026gt; \u0026lt;/provider\u0026gt; \u0026lt;!-- add SW_CTX(SkyWalking context) field --\u0026gt; \u0026lt;provider class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.logstash.SkyWalkingContextJsonProvider\u0026#34;\u0026gt; \u0026lt;/provider\u0026gt; \u0026lt;/encoder\u0026gt;  set LoggingEventCompositeJsonEncoder of logstash in logback-spring.xml for custom json format  1.add converter for %tid or %sw_ctx as child of  node\n\u0026lt;!-- add converter for %tid --\u0026gt; \u0026lt;conversionRule conversionWord=\u0026#34;tid\u0026#34; converterClass=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.LogbackPatternConverter\u0026#34;/\u0026gt; \u0026lt;!-- add converter for %sw_ctx --\u0026gt; \u0026lt;conversionRule conversionWord=\u0026#34;sw_ctx\u0026#34; converterClass=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.LogbackSkyWalkingContextPatternConverter\u0026#34;/\u0026gt; 2.add json encoder for custom json format\n\u0026lt;encoder class=\u0026#34;net.logstash.logback.encoder.LoggingEventCompositeJsonEncoder\u0026#34;\u0026gt; \u0026lt;providers\u0026gt; \u0026lt;timestamp\u0026gt; \u0026lt;timeZone\u0026gt;UTC\u0026lt;/timeZone\u0026gt; \u0026lt;/timestamp\u0026gt; \u0026lt;pattern\u0026gt; \u0026lt;pattern\u0026gt; { \u0026#34;level\u0026#34;: \u0026#34;%level\u0026#34;, \u0026#34;tid\u0026#34;: \u0026#34;%tid\u0026#34;, \u0026#34;skyWalkingContext\u0026#34;: \u0026#34;%sw_ctx\u0026#34;, \u0026#34;thread\u0026#34;: \u0026#34;%thread\u0026#34;, \u0026#34;class\u0026#34;: \u0026#34;%logger{1.}:%L\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;%message\u0026#34;, \u0026#34;stackTrace\u0026#34;: \u0026#34;%exception{10}\u0026#34; } \u0026lt;/pattern\u0026gt; \u0026lt;/pattern\u0026gt; \u0026lt;/providers\u0026gt; \u0026lt;/encoder\u0026gt; gRPC reporter The gRPC reporter could forward the collected logs to SkyWalking OAP server, or SkyWalking Satellite sidecar. Trace id, segment id, and span id will attach to logs automatically. There is no need to modify existing layouts.\n Add GRPCLogClientAppender in logback.xml  \u0026lt;appender name=\u0026#34;grpc-log\u0026#34; class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.log.GRPCLogClientAppender\u0026#34;\u0026gt; \u0026lt;encoder class=\u0026#34;ch.qos.logback.core.encoder.LayoutWrappingEncoder\u0026#34;\u0026gt; \u0026lt;layout class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.mdc.TraceIdMDCPatternLogbackLayout\u0026#34;\u0026gt; \u0026lt;Pattern\u0026gt;%d{yyyy-MM-dd HH:mm:ss.SSS} [%X{tid}] [%thread] %-5level %logger{36} -%msg%n\u0026lt;/Pattern\u0026gt; \u0026lt;/layout\u0026gt; \u0026lt;/encoder\u0026gt; \u0026lt;/appender\u0026gt;  Add config of the plugin or use default  log.max_message_size=${SW_GRPC_LOG_MAX_MESSAGE_SIZE:10485760} Transmitting un-formatted messages The logback 1.x gRPC reporter supports transmitting logs as formatted or un-formatted. Transmitting formatted data is the default but can be disabled by adding the following to the agent config:\nplugin.toolkit.log.transmit_formatted=false The above will result in the content field being used for the log pattern with additional log tags of argument.0, argument.1, and so on representing each logged argument as well as an additional exception tag which is only present if a throwable is also logged.\nFor example, the following code:\nlog.info(\u0026#34;{} {} {}\u0026#34;, 1, 2, 3); Will result in:\n{ \u0026#34;content\u0026#34;: \u0026#34;{} {} {}\u0026#34;, \u0026#34;tags\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;argument.0\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.1\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.2\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;3\u0026#34; } ] } ","title":"logback plugin","url":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/application-toolkit-logback-1.x/"},{"content":"logback plugin  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-logback-1.x\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;{project.release.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Print trace ID in your logs  set %tid in Pattern section of logback.xml  \u0026lt;appender name=\u0026#34;STDOUT\u0026#34; class=\u0026#34;ch.qos.logback.core.ConsoleAppender\u0026#34;\u0026gt; \u0026lt;encoder class=\u0026#34;ch.qos.logback.core.encoder.LayoutWrappingEncoder\u0026#34;\u0026gt; \u0026lt;layout class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.TraceIdPatternLogbackLayout\u0026#34;\u0026gt; \u0026lt;Pattern\u0026gt;%d{yyyy-MM-dd HH:mm:ss.SSS} [%tid] [%thread] %-5level %logger{36} -%msg%n\u0026lt;/Pattern\u0026gt; \u0026lt;/layout\u0026gt; \u0026lt;/encoder\u0026gt; \u0026lt;/appender\u0026gt;  with the MDC, set %X{tid} in Pattern section of logback.xml  \u0026lt;appender name=\u0026#34;STDOUT\u0026#34; class=\u0026#34;ch.qos.logback.core.ConsoleAppender\u0026#34;\u0026gt; \u0026lt;encoder class=\u0026#34;ch.qos.logback.core.encoder.LayoutWrappingEncoder\u0026#34;\u0026gt; \u0026lt;layout class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.mdc.TraceIdMDCPatternLogbackLayout\u0026#34;\u0026gt; \u0026lt;Pattern\u0026gt;%d{yyyy-MM-dd HH:mm:ss.SSS} [%X{tid}] [%thread] %-5level %logger{36} -%msg%n\u0026lt;/Pattern\u0026gt; \u0026lt;/layout\u0026gt; \u0026lt;/encoder\u0026gt; \u0026lt;/appender\u0026gt;  Support logback AsyncAppender(MDC also support), No additional configuration is required. Refer to the demo of logback.xml below. For details: Logback AsyncAppender  \u0026lt;configuration scan=\u0026#34;true\u0026#34; scanPeriod=\u0026#34; 5 seconds\u0026#34;\u0026gt; \u0026lt;appender name=\u0026#34;STDOUT\u0026#34; class=\u0026#34;ch.qos.logback.core.ConsoleAppender\u0026#34;\u0026gt; \u0026lt;encoder class=\u0026#34;ch.qos.logback.core.encoder.LayoutWrappingEncoder\u0026#34;\u0026gt; \u0026lt;layout class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.mdc.TraceIdMDCPatternLogbackLayout\u0026#34;\u0026gt; \u0026lt;Pattern\u0026gt;%d{yyyy-MM-dd HH:mm:ss.SSS} [%X{tid}] [%thread] %-5level %logger{36} -%msg%n\u0026lt;/Pattern\u0026gt; \u0026lt;/layout\u0026gt; \u0026lt;/encoder\u0026gt; \u0026lt;/appender\u0026gt; \u0026lt;appender name=\u0026#34;ASYNC\u0026#34; class=\u0026#34;ch.qos.logback.classic.AsyncAppender\u0026#34;\u0026gt; \u0026lt;discardingThreshold\u0026gt;0\u0026lt;/discardingThreshold\u0026gt; \u0026lt;queueSize\u0026gt;1024\u0026lt;/queueSize\u0026gt; \u0026lt;neverBlock\u0026gt;true\u0026lt;/neverBlock\u0026gt; \u0026lt;appender-ref ref=\u0026#34;STDOUT\u0026#34;/\u0026gt; \u0026lt;/appender\u0026gt; \u0026lt;root level=\u0026#34;INFO\u0026#34;\u0026gt; \u0026lt;appender-ref ref=\u0026#34;ASYNC\u0026#34;/\u0026gt; \u0026lt;/root\u0026gt; \u0026lt;/configuration\u0026gt;  When you use -javaagent to active the SkyWalking tracer, logback will output traceId, if it existed. If the tracer is inactive, the output will be TID: N/A.  Print SkyWalking context in your logs   Your only need to replace pattern %tid or %X{tid]} with %sw_ctx or %X{sw_ctx}.\n  When you use -javaagent to active the SkyWalking tracer, logback will output SW_CTX: [$serviceName,$instanceName,$traceId,$traceSegmentId,$spanId], if it existed. If the tracer is inactive, the output will be SW_CTX: N/A.\n  logstash logback plugin  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-logback-1.x\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  set LogstashEncoder of logback.xml  \u0026lt;encoder charset=\u0026#34;UTF-8\u0026#34; class=\u0026#34;net.logstash.logback.encoder.LogstashEncoder\u0026#34;\u0026gt; \u0026lt;!-- add TID(traceId) field --\u0026gt; \u0026lt;provider class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.logstash.TraceIdJsonProvider\u0026#34;\u0026gt; \u0026lt;/provider\u0026gt; \u0026lt;!-- add SW_CTX(SkyWalking context) field --\u0026gt; \u0026lt;provider class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.logstash.SkyWalkingContextJsonProvider\u0026#34;\u0026gt; \u0026lt;/provider\u0026gt; \u0026lt;/encoder\u0026gt;  set LoggingEventCompositeJsonEncoder of logstash in logback-spring.xml for custom json format  1.add converter for %tid or %sw_ctx as child of  node\n\u0026lt;!-- add converter for %tid --\u0026gt; \u0026lt;conversionRule conversionWord=\u0026#34;tid\u0026#34; converterClass=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.LogbackPatternConverter\u0026#34;/\u0026gt; \u0026lt;!-- add converter for %sw_ctx --\u0026gt; \u0026lt;conversionRule conversionWord=\u0026#34;sw_ctx\u0026#34; converterClass=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.LogbackSkyWalkingContextPatternConverter\u0026#34;/\u0026gt; 2.add json encoder for custom json format\n\u0026lt;encoder class=\u0026#34;net.logstash.logback.encoder.LoggingEventCompositeJsonEncoder\u0026#34;\u0026gt; \u0026lt;providers\u0026gt; \u0026lt;timestamp\u0026gt; \u0026lt;timeZone\u0026gt;UTC\u0026lt;/timeZone\u0026gt; \u0026lt;/timestamp\u0026gt; \u0026lt;pattern\u0026gt; \u0026lt;pattern\u0026gt; { \u0026#34;level\u0026#34;: \u0026#34;%level\u0026#34;, \u0026#34;tid\u0026#34;: \u0026#34;%tid\u0026#34;, \u0026#34;skyWalkingContext\u0026#34;: \u0026#34;%sw_ctx\u0026#34;, \u0026#34;thread\u0026#34;: \u0026#34;%thread\u0026#34;, \u0026#34;class\u0026#34;: \u0026#34;%logger{1.}:%L\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;%message\u0026#34;, \u0026#34;stackTrace\u0026#34;: \u0026#34;%exception{10}\u0026#34; } \u0026lt;/pattern\u0026gt; \u0026lt;/pattern\u0026gt; \u0026lt;/providers\u0026gt; \u0026lt;/encoder\u0026gt; gRPC reporter The gRPC reporter could forward the collected logs to SkyWalking OAP server, or SkyWalking Satellite sidecar. Trace id, segment id, and span id will attach to logs automatically. There is no need to modify existing layouts.\n Add GRPCLogClientAppender in logback.xml  \u0026lt;appender name=\u0026#34;grpc-log\u0026#34; class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.log.GRPCLogClientAppender\u0026#34;\u0026gt; \u0026lt;encoder class=\u0026#34;ch.qos.logback.core.encoder.LayoutWrappingEncoder\u0026#34;\u0026gt; \u0026lt;layout class=\u0026#34;org.apache.skywalking.apm.toolkit.log.logback.v1.x.mdc.TraceIdMDCPatternLogbackLayout\u0026#34;\u0026gt; \u0026lt;Pattern\u0026gt;%d{yyyy-MM-dd HH:mm:ss.SSS} [%X{tid}] [%thread] %-5level %logger{36} -%msg%n\u0026lt;/Pattern\u0026gt; \u0026lt;/layout\u0026gt; \u0026lt;/encoder\u0026gt; \u0026lt;/appender\u0026gt;  Add config of the plugin or use default  log.max_message_size=${SW_GRPC_LOG_MAX_MESSAGE_SIZE:10485760} Transmitting un-formatted messages The logback 1.x gRPC reporter supports transmitting logs as formatted or un-formatted. Transmitting formatted data is the default but can be disabled by adding the following to the agent config:\nplugin.toolkit.log.transmit_formatted=false The above will result in the content field being used for the log pattern with additional log tags of argument.0, argument.1, and so on representing each logged argument as well as an additional exception tag which is only present if a throwable is also logged.\nFor example, the following code:\nlog.info(\u0026#34;{} {} {}\u0026#34;, 1, 2, 3); Will result in:\n{ \u0026#34;content\u0026#34;: \u0026#34;{} {} {}\u0026#34;, \u0026#34;tags\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;argument.0\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.1\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.2\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;3\u0026#34; } ] } ","title":"logback plugin","url":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/application-toolkit-logback-1.x/"},{"content":"Logging Setup Logging Setup is used to integrate the Go Agent with the logging system in the current service. It currently supports the recognition of Logrus and Zap frameworks. If neither of these frameworks is present, it would output logs using Std Error.\nYou can learn about the configuration details through the \u0026ldquo;log\u0026rdquo; configuration item in the default settings.\nLogging Detection Log detection means that the logging plugin would automatically detect the usage of logs in your application. When the log type is set to auto, it would choose the appropriate log based on the creation rules of different frameworks. The selection rules vary depending on the framework:\n Logrus: It automatically selects the current logger when executing functions such as logrus.New, logger.SetOutput, or logger.SetFormatter. Zap: It automatically selects the current logger when executing functions such as zap.New, zap.NewNop, zap.NewProduction, zap.NewDevelopment, or zap.NewExample.  If there are multiple different logging systems in your current application, the last-called logging system would be chosen.\nThe configuration information is as follows:\n   Name Environment Key Default Value Description     log.type SW_LOG_TYPE auto The type of logging system. It currently supports auto, logrus, zap, and std.    Agent with Logging system The integration of the Agent with logs includes the two parts as following.\n Integrating Agent logs into the Service: Integrating the logs from the Agent into the framework used by the service. Integrating Tracing information into the Service: Integrating the information from Tracing into the service logs.  Agent logs into the Service Agent logs output the current running status of the Agent system, most of which are execution exceptions. For example, communication anomalies between the Agent and the backend service, plugin execution exceptions, etc.\nIntegrating Agent logs into the service\u0026rsquo;s logging system can effectively help users quickly troubleshoot whether there are issues with the current Agent execution.\nTracing information into the Service The Agent would also enhance the existing logging system. When the service outputs log, if the current goroutine contains Tracing data, it would be outputted together with the current logs. This helps users to quickly locate the link based on the Tracing data.\nTracing data The Tracing includes the following information:\n ServiceName: Current service name. ServiceInstanceName: Current service instance name. TraceID: The current Trace ID. If there is no link, it outputs N/A. SegmentID: The Segment ID in the current Trace. If there is no link, it outputs N/A. SpanID: The Span ID currently being operated on. If there is no link, it outputs -1.  The output format is as follows: [${ServiceName},${ServiceInstanceName},${TraceID},${SegmentID},${SpanID}].\nThe following is an example of a log output when using Zap.NewProduction:\n{\u0026quot;level\u0026quot;:\u0026quot;info\u0026quot;,\u0026quot;ts\u0026quot;:1683641507.052247,\u0026quot;caller\u0026quot;:\u0026quot;gin/main.go:45\u0026quot;,\u0026quot;msg\u0026quot;:\u0026quot;test log\u0026quot;,\u0026quot;SW_CTX\u0026quot;:\u0026quot;[Your_ApplicationName,681e4178ee7311ed864facde48001122@192.168.50.193,6f13069eee7311ed864facde48001122,6f13070cee7311ed864facde48001122,0]\u0026quot;} The configuration information is as follows:\n   Name Environment Key Default Value Description     log.tracing.enable SW_AGENT_LOG_TRACING_ENABLE true Whether to automatically integrate Tracing information into the logs.   log.tracing.key SW_AGENT_LOG_TRACING_KEY SW_CTX The key of the Tracing information in the log.    Log Upload The Agent would report the following two types of logs to the SkyWalking backend for storage and querying:\n Application Logs: It provides support for various logging frameworks and reports logs along with the corresponding distributed tracing information related to the current request. Only the relevant logs matching the current system log level would be output. Agent Logs: These are the logs generated by the Agent itself.  The current configuration options available are as follows:\n   Name Environment Key Default Value Description     log.reporter.enable SW_LOG_REPORTER_ENABLE true Whether to enable log reporting.   log.reporter.label_keys SW_LOG_REPORTER_LABEL_KEYS  By default, all fields are not reported. To specify the fields that need to be reported, please provide a comma-separated list of configuration item keys.    ","title":"Logging Setup","url":"/docs/skywalking-go/latest/en/advanced-features/logging-setup/"},{"content":"Logging Setup Logging Setup is used to integrate the Go Agent with the logging system in the current service. It currently supports the recognition of Logrus and Zap frameworks. If neither of these frameworks is present, it would output logs using Std Error.\nYou can learn about the configuration details through the \u0026ldquo;log\u0026rdquo; configuration item in the default settings.\nLogging Detection Log detection means that the logging plugin would automatically detect the usage of logs in your application. When the log type is set to auto, it would choose the appropriate log based on the creation rules of different frameworks. The selection rules vary depending on the framework:\n Logrus: It automatically selects the current logger when executing functions such as logrus.New, logger.SetOutput, or logger.SetFormatter. Zap: It automatically selects the current logger when executing functions such as zap.New, zap.NewNop, zap.NewProduction, zap.NewDevelopment, or zap.NewExample.  If there are multiple different logging systems in your current application, the last-called logging system would be chosen.\nThe configuration information is as follows:\n   Name Environment Key Default Value Description     log.type SW_LOG_TYPE auto The type of logging system. It currently supports auto, logrus, zap, and std.    Agent with Logging system The integration of the Agent with logs includes the two parts as following.\n Integrating Agent logs into the Service: Integrating the logs from the Agent into the framework used by the service. Integrating Tracing information into the Service: Integrating the information from Tracing into the service logs.  Agent logs into the Service Agent logs output the current running status of the Agent system, most of which are execution exceptions. For example, communication anomalies between the Agent and the backend service, plugin execution exceptions, etc.\nIntegrating Agent logs into the service\u0026rsquo;s logging system can effectively help users quickly troubleshoot whether there are issues with the current Agent execution.\nTracing information into the Service The Agent would also enhance the existing logging system. When the service outputs log, if the current goroutine contains Tracing data, it would be outputted together with the current logs. This helps users to quickly locate the link based on the Tracing data.\nTracing data The Tracing includes the following information:\n ServiceName: Current service name. ServiceInstanceName: Current service instance name. TraceID: The current Trace ID. If there is no link, it outputs N/A. SegmentID: The Segment ID in the current Trace. If there is no link, it outputs N/A. SpanID: The Span ID currently being operated on. If there is no link, it outputs -1.  The output format is as follows: [${ServiceName},${ServiceInstanceName},${TraceID},${SegmentID},${SpanID}].\nThe following is an example of a log output when using Zap.NewProduction:\n{\u0026quot;level\u0026quot;:\u0026quot;info\u0026quot;,\u0026quot;ts\u0026quot;:1683641507.052247,\u0026quot;caller\u0026quot;:\u0026quot;gin/main.go:45\u0026quot;,\u0026quot;msg\u0026quot;:\u0026quot;test log\u0026quot;,\u0026quot;SW_CTX\u0026quot;:\u0026quot;[Your_ApplicationName,681e4178ee7311ed864facde48001122@192.168.50.193,6f13069eee7311ed864facde48001122,6f13070cee7311ed864facde48001122,0]\u0026quot;} The configuration information is as follows:\n   Name Environment Key Default Value Description     log.tracing.enable SW_AGENT_LOG_TRACING_ENABLE true Whether to automatically integrate Tracing information into the logs.   log.tracing.key SW_AGENT_LOG_TRACING_KEY SW_CTX The key of the Tracing information in the log.    Log Upload The Agent would report the following two types of logs to the SkyWalking backend for storage and querying:\n Application Logs: It provides support for various logging frameworks and reports logs along with the corresponding distributed tracing information related to the current request. Only the relevant logs matching the current system log level would be output. Agent Logs: These are the logs generated by the Agent itself.  The current configuration options available are as follows:\n   Name Environment Key Default Value Description     log.reporter.enable SW_LOG_REPORTER_ENABLE true Whether to enable log reporting.   log.reporter.label_keys SW_LOG_REPORTER_LABEL_KEYS  By default, all fields are not reported. To specify the fields that need to be reported, please provide a comma-separated list of configuration item keys.    ","title":"Logging Setup","url":"/docs/skywalking-go/next/en/advanced-features/logging-setup/"},{"content":"Logging Setup Logging Setup is used to integrate the Go Agent with the logging system in the current service. It currently supports the recognition of Logrus and Zap frameworks. If neither of these frameworks is present, it would output logs using Std Error.\nYou can learn about the configuration details through the \u0026ldquo;log\u0026rdquo; configuration item in the default settings.\nLogging Detection Log detection means that the logging plugin would automatically detect the usage of logs in your application. When the log type is set to auto, it would choose the appropriate log based on the creation rules of different frameworks. The selection rules vary depending on the framework:\n Logrus: It automatically selects the current logger when executing functions such as logrus.New, logger.SetOutput, or logger.SetFormatter. Zap: It automatically selects the current logger when executing functions such as zap.New, zap.NewNop, zap.NewProduction, zap.NewDevelopment, or zap.NewExample.  If there are multiple different logging systems in your current application, the last-called logging system would be chosen.\nThe configuration information is as follows:\n   Name Environment Key Default Value Description     log.type SW_LOG_TYPE auto The type of logging system. It currently supports auto, logrus, zap, and std.    Agent with Logging system The integration of the Agent with logs includes the two parts as following.\n Integrating Agent logs into the Service: Integrating the logs from the Agent into the framework used by the service. Integrating Tracing information into the Service: Integrating the information from Tracing into the service logs.  Agent logs into the Service Agent logs output the current running status of the Agent system, most of which are execution exceptions. For example, communication anomalies between the Agent and the backend service, plugin execution exceptions, etc.\nIntegrating Agent logs into the service\u0026rsquo;s logging system can effectively help users quickly troubleshoot whether there are issues with the current Agent execution.\nTracing information into the Service The Agent would also enhance the existing logging system. When the service outputs log, if the current goroutine contains Tracing data, it would be outputted together with the current logs. This helps users to quickly locate the link based on the Tracing data.\nTracing data The Tracing includes the following information:\n ServiceName: Current service name. ServiceInstanceName: Current service instance name. TraceID: The current Trace ID. If there is no link, it outputs N/A. SegmentID: The Segment ID in the current Trace. If there is no link, it outputs N/A. SpanID: The Span ID currently being operated on. If there is no link, it outputs -1.  The output format is as follows: [${ServiceName},${ServiceInstanceName},${TraceID},${SegmentID},${SpanID}].\nThe following is an example of a log output when using Zap.NewProduction:\n{\u0026quot;level\u0026quot;:\u0026quot;info\u0026quot;,\u0026quot;ts\u0026quot;:1683641507.052247,\u0026quot;caller\u0026quot;:\u0026quot;gin/main.go:45\u0026quot;,\u0026quot;msg\u0026quot;:\u0026quot;test log\u0026quot;,\u0026quot;SW_CTX\u0026quot;:\u0026quot;[Your_ApplicationName,681e4178ee7311ed864facde48001122@192.168.50.193,6f13069eee7311ed864facde48001122,6f13070cee7311ed864facde48001122,0]\u0026quot;} The configuration information is as follows:\n   Name Environment Key Default Value Description     log.tracing.enable SW_AGENT_LOG_TRACING_ENABLE true Whether to automatically integrate Tracing information into the logs.   log.tracing.key SW_AGENT_LOG_TRACING_KEY SW_CTX The key of the Tracing information in the log.    Log Upload The Agent would report the following two types of logs to the SkyWalking backend for storage and querying:\n Application Logs: It provides support for various logging frameworks and reports logs along with the corresponding distributed tracing information related to the current request. Only the relevant logs matching the current system log level would be output. Agent Logs: These are the logs generated by the Agent itself.  The current configuration options available are as follows:\n   Name Environment Key Default Value Description     log.reporter.enable SW_LOG_REPORTER_ENABLE true Whether to enable log reporting.   log.reporter.label_keys SW_LOG_REPORTER_LABEL_KEYS  By default, all fields are not reported. To specify the fields that need to be reported, please provide a comma-separated list of configuration item keys.    ","title":"Logging Setup","url":"/docs/skywalking-go/v0.4.0/en/advanced-features/logging-setup/"},{"content":"LogQL Service LogQL (Log Query Language) is Grafana Loki’s PromQL-inspired query language. LogQL Service exposes Loki Querying HTTP APIs including the bundled LogQL expression system. Third-party systems or visualization platforms that already support LogQL (such as Grafana), could obtain logs through LogQL Service.\nAs Skywalking log mechanism is different from Loki(metric extract, storage, etc.), the LogQL implemented by Skywalking won\u0026rsquo;t be a full features LogQL.\nDetails Of Supported LogQL The following doc describes the details of the supported protocol and compared it to the LogQL official documentation. If not mentioned, it will not be supported by default.\nLog queries The picture bellow is LogQL syntax in log queries: The expression supported by LogQL is composed of the following parts (expression with [✅] is implemented in SkyWalking):\n stream selector:The stream selector determines which log streams to include in a query’s results by labels. line filter: The line filter expression does a grep over the logs from the matching log streams. label filter: Label filter expression allows filtering log line using their original and extracted labels. parser: Parser expression can parse and extract labels from the log content. Those extracted labels can then be used by label filter expressions. line formate: The line format expression can rewrite the log line content by using the text/template format. labels formate: The label format expression can rename, modify or add labels. drop labels: The drop expression will drop the given labels in the pipeline.  The stream selector operator supported by LogQL is composed of the following (operator with [✅] is implemented in SkyWalking):\n =: exactly equal !=: not equal =~: regex matches !~: regex does not match  The filter operator supported by LogQL is composed of the following (operator with [✅] is implemented in SkyWalking):\n |=: Log line contains string !=: Log line does not contain string |~: Log line contains a match to the regular expression !~: Log line does not contain a match to the regular expression  Here are some typical expressions used in SkyWalking log query:\n# query service instance logs with specified traceId {service=\u0026quot;$service\u0026quot;, service_instance=\u0026quot;$service_instance\u0026quot;, trace_id=\u0026quot;$trace_id\u0026quot;} # query service instance logs contains keyword in content {service=\u0026quot;$service\u0026quot;, service_instance=\u0026quot;$service_instance\u0026quot;} |= \u0026quot;$keyword_contains\u0026quot; # query service instance logs not contains keyword in content {service=\u0026quot;$service\u0026quot;, service_instance=\u0026quot;$service_instance\u0026quot;} != \u0026quot;$keyword_not_contains\u0026quot; # query service instance logs contains A keyword but not contains B keyword in content {service=\u0026quot;$service\u0026quot;, service_instance=\u0026quot;$service_instance\u0026quot;} |= \u0026quot;$keyword_contains\u0026quot; != \u0026quot;$keyword_not_contains\u0026quot; Metric queries Metric queries is used to calculate metrics from logs in Loki. In SkyWalking, it is recommended to use LAL(Log Analysis Language). So metric queries LogQL won\u0026rsquo;t be supported in SkyWalking.\nDetails Of Supported Http Query API List Labels Query log tags within a range of time. It is different from Loki. In loki, this api query all labels used in stream selector, but in SkyWalking, this api only for log tags query. Others metadata (service, service_instance, endpoint) query is provided by PromQL Service.\nGET /loki/api/v1/labels    Parameter Definition Optional     start start timestamp in nanoseconds no   end end timestamp in nanoseconds no    For example:\n/loki/api/v1/labels?start=1690947455457000000\u0026amp;end=1690947671936000000 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;level\u0026#34; ] } List Label values Query log tag values of tag within a range of time.\nGET /loki/api/v1/label/\u0026lt;label_name\u0026gt;/values    Parameter Definition Optional     start start timestamp in nanoseconds no   end end timestamp in nanoseconds no    For example:\n/loki/api/v1/label/level/values?start=1690947455457000000\u0026amp;end=1690947671936000000 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;INFO\u0026#34;, \u0026#34;WARN\u0026#34;, \u0026#34;ERROR\u0026#34; ] } Range queries Query logs within a range of time with LogQL expression.\nGET /loki/api/v1/query_range    Parameter Definition Optional     query logql expression no   start start timestamp in nanoseconds no   end end timestamp in nanoseconds no   limit numbers of log line returned in a query no   direction log order,FORWARD or BACKWARD no    For example:\n/api/v1/query_range?query={service=\u0026#39;agent::songs\u0026#39;}\u0026amp;start=1690947455457000000\u0026amp;end=1690947671936000000\u0026amp;limit=100\u0026amp;direction=BACKWARD Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;streams\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;stream\u0026#34;: { \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;instance1\u0026#34;, \u0026#34;endpoint\u0026#34;: \u0026#34;xxx\u0026#34;, \u0026#34;trace_id\u0026#34;: \u0026#34;xxx\u0026#34; }, \u0026#34;values\u0026#34;: [ [ \u0026#34;1690947671936000000\u0026#34;, \u0026#34;foo\u0026#34; ], [ \u0026#34;1690947455457000000\u0026#34;, \u0026#34;bar\u0026#34; ] ] }, { \u0026#34;stream\u0026#34;: { \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;instance2\u0026#34;, \u0026#34;endpoint\u0026#34;: \u0026#34;xxx\u0026#34;, \u0026#34;trace_id\u0026#34;: \u0026#34;xxx\u0026#34; }, \u0026#34;values\u0026#34;: [ [ \u0026#34;1690947671936000000\u0026#34;, \u0026#34;foo\u0026#34; ], [ \u0026#34;1690947455457000000\u0026#34;, \u0026#34;bar\u0026#34; ] ] } ] } } ","title":"LogQL Service","url":"/docs/main/latest/en/api/logql-service/"},{"content":"LogQL Service LogQL (Log Query Language) is Grafana Loki’s PromQL-inspired query language. LogQL Service exposes Loki Querying HTTP APIs including the bundled LogQL expression system. Third-party systems or visualization platforms that already support LogQL (such as Grafana), could obtain logs through LogQL Service.\nAs Skywalking log mechanism is different from Loki(metric extract, storage, etc.), the LogQL implemented by Skywalking won\u0026rsquo;t be a full features LogQL.\nDetails Of Supported LogQL The following doc describes the details of the supported protocol and compared it to the LogQL official documentation. If not mentioned, it will not be supported by default.\nLog queries The picture bellow is LogQL syntax in log queries: The expression supported by LogQL is composed of the following parts (expression with [✅] is implemented in SkyWalking):\n stream selector:The stream selector determines which log streams to include in a query’s results by labels. line filter: The line filter expression does a grep over the logs from the matching log streams. label filter: Label filter expression allows filtering log line using their original and extracted labels. parser: Parser expression can parse and extract labels from the log content. Those extracted labels can then be used by label filter expressions. line formate: The line format expression can rewrite the log line content by using the text/template format. labels formate: The label format expression can rename, modify or add labels. drop labels: The drop expression will drop the given labels in the pipeline.  The stream selector operator supported by LogQL is composed of the following (operator with [✅] is implemented in SkyWalking):\n =: exactly equal !=: not equal =~: regex matches !~: regex does not match  The filter operator supported by LogQL is composed of the following (operator with [✅] is implemented in SkyWalking):\n |=: Log line contains string !=: Log line does not contain string |~: Log line contains a match to the regular expression !~: Log line does not contain a match to the regular expression  Here are some typical expressions used in SkyWalking log query:\n# query service instance logs with specified traceId {service=\u0026quot;$service\u0026quot;, service_instance=\u0026quot;$service_instance\u0026quot;, trace_id=\u0026quot;$trace_id\u0026quot;} # query service instance logs contains keyword in content {service=\u0026quot;$service\u0026quot;, service_instance=\u0026quot;$service_instance\u0026quot;} |= \u0026quot;$keyword_contains\u0026quot; # query service instance logs not contains keyword in content {service=\u0026quot;$service\u0026quot;, service_instance=\u0026quot;$service_instance\u0026quot;} != \u0026quot;$keyword_not_contains\u0026quot; # query service instance logs contains A keyword but not contains B keyword in content {service=\u0026quot;$service\u0026quot;, service_instance=\u0026quot;$service_instance\u0026quot;} |= \u0026quot;$keyword_contains\u0026quot; != \u0026quot;$keyword_not_contains\u0026quot; Metric queries Metric queries is used to calculate metrics from logs in Loki. In SkyWalking, it is recommended to use LAL(Log Analysis Language). So metric queries LogQL won\u0026rsquo;t be supported in SkyWalking.\nDetails Of Supported Http Query API List Labels Query log tags within a range of time. It is different from Loki. In loki, this api query all labels used in stream selector, but in SkyWalking, this api only for log tags query. Others metadata (service, service_instance, endpoint) query is provided by PromQL Service.\nGET /loki/api/v1/labels    Parameter Definition Optional     start start timestamp in nanoseconds no   end end timestamp in nanoseconds no    For example:\n/loki/api/v1/labels?start=1690947455457000000\u0026amp;end=1690947671936000000 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;level\u0026#34; ] } List Label values Query log tag values of tag within a range of time.\nGET /loki/api/v1/label/\u0026lt;label_name\u0026gt;/values    Parameter Definition Optional     start start timestamp in nanoseconds no   end end timestamp in nanoseconds no    For example:\n/loki/api/v1/label/level/values?start=1690947455457000000\u0026amp;end=1690947671936000000 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;INFO\u0026#34;, \u0026#34;WARN\u0026#34;, \u0026#34;ERROR\u0026#34; ] } Range queries Query logs within a range of time with LogQL expression.\nGET /loki/api/v1/query_range    Parameter Definition Optional     query logql expression no   start start timestamp in nanoseconds no   end end timestamp in nanoseconds no   limit numbers of log line returned in a query no   direction log order,FORWARD or BACKWARD no    For example:\n/api/v1/query_range?query={service=\u0026#39;agent::songs\u0026#39;}\u0026amp;start=1690947455457000000\u0026amp;end=1690947671936000000\u0026amp;limit=100\u0026amp;direction=BACKWARD Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;streams\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;stream\u0026#34;: { \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;instance1\u0026#34;, \u0026#34;endpoint\u0026#34;: \u0026#34;xxx\u0026#34;, \u0026#34;trace_id\u0026#34;: \u0026#34;xxx\u0026#34; }, \u0026#34;values\u0026#34;: [ [ \u0026#34;1690947671936000000\u0026#34;, \u0026#34;foo\u0026#34; ], [ \u0026#34;1690947455457000000\u0026#34;, \u0026#34;bar\u0026#34; ] ] }, { \u0026#34;stream\u0026#34;: { \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;instance2\u0026#34;, \u0026#34;endpoint\u0026#34;: \u0026#34;xxx\u0026#34;, \u0026#34;trace_id\u0026#34;: \u0026#34;xxx\u0026#34; }, \u0026#34;values\u0026#34;: [ [ \u0026#34;1690947671936000000\u0026#34;, \u0026#34;foo\u0026#34; ], [ \u0026#34;1690947455457000000\u0026#34;, \u0026#34;bar\u0026#34; ] ] } ] } } ","title":"LogQL Service","url":"/docs/main/next/en/api/logql-service/"},{"content":"LogQL Service LogQL (Log Query Language) is Grafana Loki’s PromQL-inspired query language. LogQL Service exposes Loki Querying HTTP APIs including the bundled LogQL expression system. Third-party systems or visualization platforms that already support LogQL (such as Grafana), could obtain logs through LogQL Service.\nAs Skywalking log mechanism is different from Loki(metric extract, storage, etc.), the LogQL implemented by Skywalking won\u0026rsquo;t be a full features LogQL.\nDetails Of Supported LogQL The following doc describes the details of the supported protocol and compared it to the LogQL official documentation. If not mentioned, it will not be supported by default.\nLog queries The picture bellow is LogQL syntax in log queries: The expression supported by LogQL is composed of the following parts (expression with [✅] is implemented in SkyWalking):\n stream selector:The stream selector determines which log streams to include in a query’s results by labels. line filter: The line filter expression does a grep over the logs from the matching log streams. label filter: Label filter expression allows filtering log line using their original and extracted labels. parser: Parser expression can parse and extract labels from the log content. Those extracted labels can then be used by label filter expressions. line formate: The line format expression can rewrite the log line content by using the text/template format. labels formate: The label format expression can rename, modify or add labels. drop labels: The drop expression will drop the given labels in the pipeline.  The stream selector operator supported by LogQL is composed of the following (operator with [✅] is implemented in SkyWalking):\n =: exactly equal !=: not equal =~: regex matches !~: regex does not match  The filter operator supported by LogQL is composed of the following (operator with [✅] is implemented in SkyWalking):\n |=: Log line contains string !=: Log line does not contain string |~: Log line contains a match to the regular expression !~: Log line does not contain a match to the regular expression  Here are some typical expressions used in SkyWalking log query:\n# query service instance logs with specified traceId {service=\u0026quot;$service\u0026quot;, service_instance=\u0026quot;$service_instance\u0026quot;, trace_id=\u0026quot;$trace_id\u0026quot;} # query service instance logs contains keyword in content {service=\u0026quot;$service\u0026quot;, service_instance=\u0026quot;$service_instance\u0026quot;} |= \u0026quot;$keyword_contains\u0026quot; # query service instance logs not contains keyword in content {service=\u0026quot;$service\u0026quot;, service_instance=\u0026quot;$service_instance\u0026quot;} != \u0026quot;$keyword_not_contains\u0026quot; # query service instance logs contains A keyword but not contains B keyword in content {service=\u0026quot;$service\u0026quot;, service_instance=\u0026quot;$service_instance\u0026quot;} |= \u0026quot;$keyword_contains\u0026quot; != \u0026quot;$keyword_not_contains\u0026quot; Metric queries Metric queries is used to calculate metrics from logs in Loki. In SkyWalking, it is recommended to use LAL(Log Analysis Language). So metric queries LogQL won\u0026rsquo;t be supported in SkyWalking.\nDetails Of Supported Http Query API List Labels Query log tags within a range of time. It is different from Loki. In loki, this api query all labels used in stream selector, but in SkyWalking, this api only for log tags query. Others metadata (service, service_instance, endpoint) query is provided by PromQL Service.\nGET /loki/api/v1/labels    Parameter Definition Optional     start start timestamp in nanoseconds no   end end timestamp in nanoseconds no    For example:\n/loki/api/v1/labels?start=1690947455457000000\u0026amp;end=1690947671936000000 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;level\u0026#34; ] } List Label values Query log tag values of tag within a range of time.\nGET /loki/api/v1/label/\u0026lt;label_name\u0026gt;/values    Parameter Definition Optional     start start timestamp in nanoseconds no   end end timestamp in nanoseconds no    For example:\n/loki/api/v1/label/level/values?start=1690947455457000000\u0026amp;end=1690947671936000000 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;INFO\u0026#34;, \u0026#34;WARN\u0026#34;, \u0026#34;ERROR\u0026#34; ] } Range queries Query logs within a range of time with LogQL expression.\nGET /loki/api/v1/query_range    Parameter Definition Optional     query logql expression no   start start timestamp in nanoseconds no   end end timestamp in nanoseconds no   limit numbers of log line returned in a query no   direction log order,FORWARD or BACKWARD no    For example:\n/api/v1/query_range?query={service=\u0026#39;agent::songs\u0026#39;}\u0026amp;start=1690947455457000000\u0026amp;end=1690947671936000000\u0026amp;limit=100\u0026amp;direction=BACKWARD Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;streams\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;stream\u0026#34;: { \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;instance1\u0026#34;, \u0026#34;endpoint\u0026#34;: \u0026#34;xxx\u0026#34;, \u0026#34;trace_id\u0026#34;: \u0026#34;xxx\u0026#34; }, \u0026#34;values\u0026#34;: [ [ \u0026#34;1690947671936000000\u0026#34;, \u0026#34;foo\u0026#34; ], [ \u0026#34;1690947455457000000\u0026#34;, \u0026#34;bar\u0026#34; ] ] }, { \u0026#34;stream\u0026#34;: { \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;instance2\u0026#34;, \u0026#34;endpoint\u0026#34;: \u0026#34;xxx\u0026#34;, \u0026#34;trace_id\u0026#34;: \u0026#34;xxx\u0026#34; }, \u0026#34;values\u0026#34;: [ [ \u0026#34;1690947671936000000\u0026#34;, \u0026#34;foo\u0026#34; ], [ \u0026#34;1690947455457000000\u0026#34;, \u0026#34;bar\u0026#34; ] ] } ] } } ","title":"LogQL Service","url":"/docs/main/v9.6.0/en/api/logql-service/"},{"content":"LogQL Service LogQL (Log Query Language) is Grafana Loki’s PromQL-inspired query language. LogQL Service exposes Loki Querying HTTP APIs including the bundled LogQL expression system. Third-party systems or visualization platforms that already support LogQL (such as Grafana), could obtain logs through LogQL Service.\nAs Skywalking log mechanism is different from Loki(metric extract, storage, etc.), the LogQL implemented by Skywalking won\u0026rsquo;t be a full features LogQL.\nDetails Of Supported LogQL The following doc describes the details of the supported protocol and compared it to the LogQL official documentation. If not mentioned, it will not be supported by default.\nLog queries The picture bellow is LogQL syntax in log queries: The expression supported by LogQL is composed of the following parts (expression with [✅] is implemented in SkyWalking):\n stream selector:The stream selector determines which log streams to include in a query’s results by labels. line filter: The line filter expression does a grep over the logs from the matching log streams. label filter: Label filter expression allows filtering log line using their original and extracted labels. parser: Parser expression can parse and extract labels from the log content. Those extracted labels can then be used by label filter expressions. line formate: The line format expression can rewrite the log line content by using the text/template format. labels formate: The label format expression can rename, modify or add labels. drop labels: The drop expression will drop the given labels in the pipeline.  The stream selector operator supported by LogQL is composed of the following (operator with [✅] is implemented in SkyWalking):\n =: exactly equal !=: not equal =~: regex matches !~: regex does not match  The filter operator supported by LogQL is composed of the following (operator with [✅] is implemented in SkyWalking):\n |=: Log line contains string !=: Log line does not contain string |~: Log line contains a match to the regular expression !~: Log line does not contain a match to the regular expression  Here are some typical expressions used in SkyWalking log query:\n# query service instance logs with specified traceId {service=\u0026quot;$service\u0026quot;, service_instance=\u0026quot;$service_instance\u0026quot;, trace_id=\u0026quot;$trace_id\u0026quot;} # query service instance logs contains keyword in content {service=\u0026quot;$service\u0026quot;, service_instance=\u0026quot;$service_instance\u0026quot;} |= \u0026quot;$keyword_contains\u0026quot; # query service instance logs not contains keyword in content {service=\u0026quot;$service\u0026quot;, service_instance=\u0026quot;$service_instance\u0026quot;} != \u0026quot;$keyword_not_contains\u0026quot; # query service instance logs contains A keyword but not contains B keyword in content {service=\u0026quot;$service\u0026quot;, service_instance=\u0026quot;$service_instance\u0026quot;} |= \u0026quot;$keyword_contains\u0026quot; != \u0026quot;$keyword_not_contains\u0026quot; Metric queries Metric queries is used to calculate metrics from logs in Loki. In SkyWalking, it is recommended to use LAL(Log Analysis Language). So metric queries LogQL won\u0026rsquo;t be supported in SkyWalking.\nDetails Of Supported Http Query API List Labels Query log tags within a range of time. It is different from Loki. In loki, this api query all labels used in stream selector, but in SkyWalking, this api only for log tags query. Others metadata (service, service_instance, endpoint) query is provided by PromQL Service.\nGET /loki/api/v1/labels    Parameter Definition Optional     start start timestamp in nanoseconds no   end end timestamp in nanoseconds no    For example:\n/loki/api/v1/labels?start=1690947455457000000\u0026amp;end=1690947671936000000 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;level\u0026#34; ] } List Label values Query log tag values of tag within a range of time.\nGET /loki/api/v1/label/\u0026lt;label_name\u0026gt;/values    Parameter Definition Optional     start start timestamp in nanoseconds no   end end timestamp in nanoseconds no    For example:\n/loki/api/v1/label/level/values?start=1690947455457000000\u0026amp;end=1690947671936000000 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;INFO\u0026#34;, \u0026#34;WARN\u0026#34;, \u0026#34;ERROR\u0026#34; ] } Range queries Query logs within a range of time with LogQL expression.\nGET /loki/api/v1/query_range    Parameter Definition Optional     query logql expression no   start start timestamp in nanoseconds no   end end timestamp in nanoseconds no   limit numbers of log line returned in a query no   direction log order,FORWARD or BACKWARD no    For example:\n/api/v1/query_range?query={service=\u0026#39;agent::songs\u0026#39;}\u0026amp;start=1690947455457000000\u0026amp;end=1690947671936000000\u0026amp;limit=100\u0026amp;direction=BACKWARD Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;streams\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;stream\u0026#34;: { \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;instance1\u0026#34;, \u0026#34;endpoint\u0026#34;: \u0026#34;xxx\u0026#34;, \u0026#34;trace_id\u0026#34;: \u0026#34;xxx\u0026#34; }, \u0026#34;values\u0026#34;: [ [ \u0026#34;1690947671936000000\u0026#34;, \u0026#34;foo\u0026#34; ], [ \u0026#34;1690947455457000000\u0026#34;, \u0026#34;bar\u0026#34; ] ] }, { \u0026#34;stream\u0026#34;: { \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;instance2\u0026#34;, \u0026#34;endpoint\u0026#34;: \u0026#34;xxx\u0026#34;, \u0026#34;trace_id\u0026#34;: \u0026#34;xxx\u0026#34; }, \u0026#34;values\u0026#34;: [ [ \u0026#34;1690947671936000000\u0026#34;, \u0026#34;foo\u0026#34; ], [ \u0026#34;1690947455457000000\u0026#34;, \u0026#34;bar\u0026#34; ] ] } ] } } ","title":"LogQL Service","url":"/docs/main/v9.7.0/en/api/logql-service/"},{"content":"Manual instrument SDK Our incredible community has contributed to the manual instrument SDK.\n Rust. Rust SDK follows the SkyWalking format. C++. C++ SDK follows the SkyWalking format.  Below is the archived list.\n Go2Sky. Since Jun 14, 2023.  What are the SkyWalking format and the propagation protocols?  Tracing APIs Meter APIs Logging APIs  Envoy tracer Envoy has its internal tracer implementation for SkyWalking. Read SkyWalking Tracer doc and SkyWalking tracing sandbox for more details.\n","title":"Manual instrument SDK","url":"/docs/main/latest/en/concepts-and-designs/manual-sdk/"},{"content":"Manual instrument SDK Our incredible community has contributed to the manual instrument SDK.\n Rust. Rust SDK follows the SkyWalking format. C++. C++ SDK follows the SkyWalking format.  Below is the archived list.\n Go2Sky. Since Jun 14, 2023.  What are the SkyWalking format and the propagation protocols?  Tracing APIs Meter APIs Logging APIs  Envoy tracer Envoy has its internal tracer implementation for SkyWalking. Read SkyWalking Tracer doc and SkyWalking tracing sandbox for more details.\n","title":"Manual instrument SDK","url":"/docs/main/next/en/concepts-and-designs/manual-sdk/"},{"content":"Manual instrument SDK Our incredible community has contributed to the manual instrument SDK.\n Go2Sky. Go SDK follows the SkyWalking format. C++. C++ SDK follows the SkyWalking format.  What are the SkyWalking format and the propagation protocols? See these protocols in protocols document.\nEnvoy tracer Envoy has its internal tracer implementation for SkyWalking. Read SkyWalking Tracer doc and SkyWalking tracing sandbox for more details.\n","title":"Manual instrument SDK","url":"/docs/main/v9.0.0/en/concepts-and-designs/manual-sdk/"},{"content":"Manual instrument SDK Our incredible community has contributed to the manual instrument SDK.\n Go2Sky. Go SDK follows the SkyWalking format. C++. C++ SDK follows the SkyWalking format.  What are the SkyWalking format and the propagation protocols? See these protocols in protocols document.\nEnvoy tracer Envoy has its internal tracer implementation for SkyWalking. Read SkyWalking Tracer doc and SkyWalking tracing sandbox for more details.\n","title":"Manual instrument SDK","url":"/docs/main/v9.1.0/en/concepts-and-designs/manual-sdk/"},{"content":"Manual instrument SDK Our incredible community has contributed to the manual instrument SDK.\n Go2Sky. Go SDK follows the SkyWalking format. C++. C++ SDK follows the SkyWalking format.  What are the SkyWalking format and the propagation protocols? See these protocols in protocols document.\nEnvoy tracer Envoy has its internal tracer implementation for SkyWalking. Read SkyWalking Tracer doc and SkyWalking tracing sandbox for more details.\n","title":"Manual instrument SDK","url":"/docs/main/v9.2.0/en/concepts-and-designs/manual-sdk/"},{"content":"Manual instrument SDK Our incredible community has contributed to the manual instrument SDK.\n Rust. Rust SDK follows the SkyWalking format. Go2Sky. Go SDK follows the SkyWalking format. C++. C++ SDK follows the SkyWalking format.  What are the SkyWalking format and the propagation protocols? See these protocols in protocols document.\nEnvoy tracer Envoy has its internal tracer implementation for SkyWalking. Read SkyWalking Tracer doc and SkyWalking tracing sandbox for more details.\n","title":"Manual instrument SDK","url":"/docs/main/v9.3.0/en/concepts-and-designs/manual-sdk/"},{"content":"Manual instrument SDK Our incredible community has contributed to the manual instrument SDK.\n Rust. Rust SDK follows the SkyWalking format. Go2Sky. Go SDK follows the SkyWalking format. C++. C++ SDK follows the SkyWalking format.  What are the SkyWalking format and the propagation protocols?  Tracing APIs Meter APIs Logging APIs  Envoy tracer Envoy has its internal tracer implementation for SkyWalking. Read SkyWalking Tracer doc and SkyWalking tracing sandbox for more details.\n","title":"Manual instrument SDK","url":"/docs/main/v9.4.0/en/concepts-and-designs/manual-sdk/"},{"content":"Manual instrument SDK Our incredible community has contributed to the manual instrument SDK.\n Rust. Rust SDK follows the SkyWalking format. Go2Sky. Go SDK follows the SkyWalking format. C++. C++ SDK follows the SkyWalking format.  What are the SkyWalking format and the propagation protocols?  Tracing APIs Meter APIs Logging APIs  Envoy tracer Envoy has its internal tracer implementation for SkyWalking. Read SkyWalking Tracer doc and SkyWalking tracing sandbox for more details.\n","title":"Manual instrument SDK","url":"/docs/main/v9.5.0/en/concepts-and-designs/manual-sdk/"},{"content":"Manual instrument SDK Our incredible community has contributed to the manual instrument SDK.\n Rust. Rust SDK follows the SkyWalking format. Go2Sky. Go SDK follows the SkyWalking format. C++. C++ SDK follows the SkyWalking format.  What are the SkyWalking format and the propagation protocols?  Tracing APIs Meter APIs Logging APIs  Envoy tracer Envoy has its internal tracer implementation for SkyWalking. Read SkyWalking Tracer doc and SkyWalking tracing sandbox for more details.\n","title":"Manual instrument SDK","url":"/docs/main/v9.6.0/en/concepts-and-designs/manual-sdk/"},{"content":"Manual instrument SDK Our incredible community has contributed to the manual instrument SDK.\n Rust. Rust SDK follows the SkyWalking format. C++. C++ SDK follows the SkyWalking format.  Below is the archived list.\n Go2Sky. Since Jun 14, 2023.  What are the SkyWalking format and the propagation protocols?  Tracing APIs Meter APIs Logging APIs  Envoy tracer Envoy has its internal tracer implementation for SkyWalking. Read SkyWalking Tracer doc and SkyWalking tracing sandbox for more details.\n","title":"Manual instrument SDK","url":"/docs/main/v9.7.0/en/concepts-and-designs/manual-sdk/"},{"content":"Message Queue performance and consuming latency monitoring Message Queue server plays an essential role in today\u0026rsquo;s distributed system to reduce the length and latency of blocking RPC and eventually improve user experience. But in this async way, the measure for queue consuming traffic and latency becomes significant.\nSince 8.9.0, SkyWalking leverages native tracing agent and Extension Header Item of SkyWalking Cross Process Propagation Headers Protocol v3 To provide performance monitoring for the Message Queue systems.\nIn default, we provide Message Queue Consuming Count and Message Queue Avg Consuming Latency metrics for service and endpoint levels.\nMore metrics could be added through core.oal.\n","title":"Message Queue performance and consuming latency monitoring","url":"/docs/main/latest/en/setup/backend/mq/"},{"content":"Message Queue performance and consuming latency monitoring Message Queue server plays an essential role in today\u0026rsquo;s distributed system to reduce the length and latency of blocking RPC and eventually improve user experience. But in this async way, the measure for queue consuming traffic and latency becomes significant.\nSince 8.9.0, SkyWalking leverages native tracing agent and Extension Header Item of SkyWalking Cross Process Propagation Headers Protocol v3 To provide performance monitoring for the Message Queue systems.\nIn default, we provide Message Queue Consuming Count and Message Queue Avg Consuming Latency metrics for service and endpoint levels.\nMore metrics could be added through core.oal.\n","title":"Message Queue performance and consuming latency monitoring","url":"/docs/main/next/en/setup/backend/mq/"},{"content":"Message Queue performance and consuming latency monitoring Message Queue server plays an important role in today\u0026rsquo;s distributed system, in order to reduce the length and latency of blocking RPC, and eventually improve user experience. But in this async way, the measure for queue consuming traffic and latency becomes significant.\nSince 8.9.0, SkyWalking leverages native tracing agent and Extension Header Item of SkyWalking Cross Process Propagation Headers Protocol v3 , to provide performance monitoring for Message Queue system.\nIn default, we provide Message Queue Consuming Count and Message Queue Avg Consuming Latency metrics for service and endpoint levels.\nMore metrics could be added through core.oal.\n","title":"Message Queue performance and consuming latency monitoring","url":"/docs/main/v9.0.0/en/setup/backend/mq/"},{"content":"Message Queue performance and consuming latency monitoring Message Queue server plays an essential role in today\u0026rsquo;s distributed system to reduce the length and latency of blocking RPC and eventually improve user experience. But in this async way, the measure for queue consuming traffic and latency becomes significant.\nSince 8.9.0, SkyWalking leverages native tracing agent and Extension Header Item of SkyWalking Cross Process Propagation Headers Protocol v3 To provide performance monitoring for the Message Queue systems.\nIn default, we provide Message Queue Consuming Count and Message Queue Avg Consuming Latency metrics for service and endpoint levels.\nMore metrics could be added through core.oal.\n","title":"Message Queue performance and consuming latency monitoring","url":"/docs/main/v9.1.0/en/setup/backend/mq/"},{"content":"Message Queue performance and consuming latency monitoring Message Queue server plays an essential role in today\u0026rsquo;s distributed system to reduce the length and latency of blocking RPC and eventually improve user experience. But in this async way, the measure for queue consuming traffic and latency becomes significant.\nSince 8.9.0, SkyWalking leverages native tracing agent and Extension Header Item of SkyWalking Cross Process Propagation Headers Protocol v3 To provide performance monitoring for the Message Queue systems.\nIn default, we provide Message Queue Consuming Count and Message Queue Avg Consuming Latency metrics for service and endpoint levels.\nMore metrics could be added through core.oal.\n","title":"Message Queue performance and consuming latency monitoring","url":"/docs/main/v9.2.0/en/setup/backend/mq/"},{"content":"Message Queue performance and consuming latency monitoring Message Queue server plays an essential role in today\u0026rsquo;s distributed system to reduce the length and latency of blocking RPC and eventually improve user experience. But in this async way, the measure for queue consuming traffic and latency becomes significant.\nSince 8.9.0, SkyWalking leverages native tracing agent and Extension Header Item of SkyWalking Cross Process Propagation Headers Protocol v3 To provide performance monitoring for the Message Queue systems.\nIn default, we provide Message Queue Consuming Count and Message Queue Avg Consuming Latency metrics for service and endpoint levels.\nMore metrics could be added through core.oal.\n","title":"Message Queue performance and consuming latency monitoring","url":"/docs/main/v9.3.0/en/setup/backend/mq/"},{"content":"Message Queue performance and consuming latency monitoring Message Queue server plays an essential role in today\u0026rsquo;s distributed system to reduce the length and latency of blocking RPC and eventually improve user experience. But in this async way, the measure for queue consuming traffic and latency becomes significant.\nSince 8.9.0, SkyWalking leverages native tracing agent and Extension Header Item of SkyWalking Cross Process Propagation Headers Protocol v3 To provide performance monitoring for the Message Queue systems.\nIn default, we provide Message Queue Consuming Count and Message Queue Avg Consuming Latency metrics for service and endpoint levels.\nMore metrics could be added through core.oal.\n","title":"Message Queue performance and consuming latency monitoring","url":"/docs/main/v9.4.0/en/setup/backend/mq/"},{"content":"Message Queue performance and consuming latency monitoring Message Queue server plays an essential role in today\u0026rsquo;s distributed system to reduce the length and latency of blocking RPC and eventually improve user experience. But in this async way, the measure for queue consuming traffic and latency becomes significant.\nSince 8.9.0, SkyWalking leverages native tracing agent and Extension Header Item of SkyWalking Cross Process Propagation Headers Protocol v3 To provide performance monitoring for the Message Queue systems.\nIn default, we provide Message Queue Consuming Count and Message Queue Avg Consuming Latency metrics for service and endpoint levels.\nMore metrics could be added through core.oal.\n","title":"Message Queue performance and consuming latency monitoring","url":"/docs/main/v9.5.0/en/setup/backend/mq/"},{"content":"Message Queue performance and consuming latency monitoring Message Queue server plays an essential role in today\u0026rsquo;s distributed system to reduce the length and latency of blocking RPC and eventually improve user experience. But in this async way, the measure for queue consuming traffic and latency becomes significant.\nSince 8.9.0, SkyWalking leverages native tracing agent and Extension Header Item of SkyWalking Cross Process Propagation Headers Protocol v3 To provide performance monitoring for the Message Queue systems.\nIn default, we provide Message Queue Consuming Count and Message Queue Avg Consuming Latency metrics for service and endpoint levels.\nMore metrics could be added through core.oal.\n","title":"Message Queue performance and consuming latency monitoring","url":"/docs/main/v9.6.0/en/setup/backend/mq/"},{"content":"Message Queue performance and consuming latency monitoring Message Queue server plays an essential role in today\u0026rsquo;s distributed system to reduce the length and latency of blocking RPC and eventually improve user experience. But in this async way, the measure for queue consuming traffic and latency becomes significant.\nSince 8.9.0, SkyWalking leverages native tracing agent and Extension Header Item of SkyWalking Cross Process Propagation Headers Protocol v3 To provide performance monitoring for the Message Queue systems.\nIn default, we provide Message Queue Consuming Count and Message Queue Avg Consuming Latency metrics for service and endpoint levels.\nMore metrics could be added through core.oal.\n","title":"Message Queue performance and consuming latency monitoring","url":"/docs/main/v9.7.0/en/setup/backend/mq/"},{"content":"Meter Analysis Language The meter system provides a functional analysis language called MAL (Meter Analysis Language) that lets users analyze and aggregate meter data in the OAP streaming system. The result of an expression can either be ingested by the agent analyzer, or the OpenTelemetry/Prometheus analyzer.\nLanguage data type In MAL, an expression or sub-expression can evaluate to one of the following two types:\n Sample family: A set of samples (metrics) containing a range of metrics whose names are identical. Scalar: A simple numeric value that supports integer/long and floating/double.  Sample family A set of samples, which acts as the basic unit in MAL. For example:\ninstance_trace_count The sample family above may contain the following samples which are provided by external modules, such as the agent analyzer:\ninstance_trace_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 100 instance_trace_count{region=\u0026quot;us-east\u0026quot;,az=\u0026quot;az-3\u0026quot;} 20 instance_trace_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 33 Tag filter MAL supports four type operations to filter samples in a sample family by tag:\n tagEqual: Filter tags exactly equal to the string provided. tagNotEqual: Filter tags not equal to the string provided. tagMatch: Filter tags that regex-match the string provided. tagNotMatch: Filter labels that do not regex-match the string provided.  For example, this filters all instance_trace_count samples for us-west and asia-north region and az-1 az:\ninstance_trace_count.tagMatch(\u0026quot;region\u0026quot;, \u0026quot;us-west|asia-north\u0026quot;).tagEqual(\u0026quot;az\u0026quot;, \u0026quot;az-1\u0026quot;) Value filter MAL supports six type operations to filter samples in a sample family by value:\n valueEqual: Filter values exactly equal to the value provided. valueNotEqual: Filter values equal to the value provided. valueGreater: Filter values greater than the value provided. valueGreaterEqual: Filter values greater than or equal to the value provided. valueLess: Filter values less than the value provided. valueLessEqual: Filter values less than or equal to the value provided.  For example, this filters all instance_trace_count samples for values \u0026gt;= 33:\ninstance_trace_count.valueGreaterEqual(33) Tag manipulator MAL allows tag manipulators to change (i.e. add/delete/update) tags and their values.\nK8s MAL supports using the metadata of K8s to manipulate the tags and their values. This feature requires authorizing the OAP Server to access K8s\u0026rsquo;s API Server.\nretagByK8sMeta retagByK8sMeta(newLabelName, K8sRetagType, existingLabelName, namespaceLabelName). Add a new tag to the sample family based on the value of an existing label. Provide several internal converting types, including\n K8sRetagType.Pod2Service  Add a tag to the sample using service as the key, $serviceName.$namespace as the value, and according to the given value of the tag key, which represents the name of a pod.\nFor example:\ncontainer_cpu_usage_seconds_total{namespace=default, container=my-nginx, cpu=total, pod=my-nginx-5dc4865748-mbczh} 2 Expression:\ncontainer_cpu_usage_seconds_total.retagByK8sMeta('service' , K8sRetagType.Pod2Service , 'pod' , 'namespace') Output:\ncontainer_cpu_usage_seconds_total{namespace=default, container=my-nginx, cpu=total, pod=my-nginx-5dc4865748-mbczh, service='nginx-service.default'} 2 Binary operators The following binary arithmetic operators are available in MAL:\n + (addition) - (subtraction) * (multiplication) / (division)  Binary operators are defined between scalar/scalar, sampleFamily/scalar and sampleFamily/sampleFamily value pairs.\nBetween two scalars: they evaluate to another scalar that is the result of the operator being applied to both scalar operands:\n1 + 2 Between a sample family and a scalar, the operator is applied to the value of every sample in the sample family. For example:\ninstance_trace_count + 2 or\n2 + instance_trace_count results in\ninstance_trace_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 102 // 100 + 2 instance_trace_count{region=\u0026quot;us-east\u0026quot;,az=\u0026quot;az-3\u0026quot;} 22 // 20 + 2 instance_trace_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 35 // 33 + 2 Between two sample families, a binary operator is applied to each sample in the sample family on the left and its matching sample in the sample family on the right. A new sample family with empty name will be generated. Only the matched tags will be reserved. Samples with no matching samples in the sample family on the right will not be found in the result.\nAnother sample family instance_trace_analysis_error_count is\ninstance_trace_analysis_error_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 20 instance_trace_analysis_error_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 11 Example expression:\ninstance_trace_analysis_error_count / instance_trace_count This returns a resulting sample family containing the error rate of trace analysis. Samples with region us-west and az az-3 have no match and will not show up in the result:\n{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 0.2 // 20 / 100 {region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 0.3333 // 11 / 33 Aggregation Operation Sample family supports the following aggregation operations that can be used to aggregate the samples of a single sample family, resulting in a new sample family having fewer samples (sometimes having just a single sample) with aggregated values:\n sum (calculate sum over dimensions) min (select minimum over dimensions) max (select maximum over dimensions) avg (calculate the average over dimensions)  These operations can be used to aggregate overall label dimensions or preserve distinct dimensions by inputting by parameter( the keyword by could be omitted)\n\u0026lt;aggr-op\u0026gt;(by=[\u0026lt;tag1\u0026gt;, \u0026lt;tag2\u0026gt;, ...]) Example expression:\ninstance_trace_count.sum(by=['az']) will output the following result:\ninstance_trace_count{az=\u0026quot;az-1\u0026quot;} 133 // 100 + 33 instance_trace_count{az=\u0026quot;az-3\u0026quot;} 20 Function Duration is a textual representation of a time range. The formats accepted are based on the ISO-8601 duration format {@code PnDTnHnMn.nS} where a day is regarded as exactly 24 hours.\nExamples:\n \u0026ldquo;PT20.345S\u0026rdquo; \u0026ndash; parses as \u0026ldquo;20.345 seconds\u0026rdquo; \u0026ldquo;PT15M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;15 minutes\u0026rdquo; (where a minute is 60 seconds) \u0026ldquo;PT10H\u0026rdquo; \u0026ndash; parses as \u0026ldquo;10 hours\u0026rdquo; (where an hour is 3600 seconds) \u0026ldquo;P2D\u0026rdquo; \u0026ndash; parses as \u0026ldquo;2 days\u0026rdquo; (where a day is 24 hours or 86400 seconds) \u0026ldquo;P2DT3H4M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;2 days, 3 hours and 4 minutes\u0026rdquo; \u0026ldquo;P-6H3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;-6 hours and +3 minutes\u0026rdquo; \u0026ldquo;-P6H3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;-6 hours and -3 minutes\u0026rdquo; \u0026ldquo;-P-6H+3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;+6 hours and -3 minutes\u0026rdquo;  increase increase(Duration): Calculates the increase in the time range.\nrate rate(Duration): Calculates the per-second average rate of increase in the time range.\nirate irate(): Calculates the per-second instant rate of increase in the time range.\ntag tag({allTags -\u0026gt; }): Updates tags of samples. User can add, drop, rename and update tags.\nhistogram histogram(le: '\u0026lt;the tag name of le\u0026gt;'): Transforms less-based histogram buckets to meter system histogram buckets. le parameter represents the tag name of the bucket.\nhistogram_percentile histogram_percentile([\u0026lt;p scalar\u0026gt;]): Represents the meter-system to calculate the p-percentile (0 ≤ p ≤ 100) from the buckets.\ntime time(): Returns the number of seconds since January 1, 1970 UTC.\nforeach forEach([string_array], Closure\u0026lt;Void\u0026gt; each): Iterates all samples according to the first array argument, and provide two parameters in the second closure argument:\n element: element in the array. tags: tags in each sample.  Down Sampling Operation MAL should instruct meter-system on how to downsample for metrics. It doesn\u0026rsquo;t only refer to aggregate raw samples to minute level, but also expresses data from minute in higher levels, such as hour and day.\nDown sampling function is called downsampling in MAL, and it accepts the following types:\n AVG SUM LATEST SUM_PER_MIN MIN (TODO) MAX (TODO) MEAN (TODO) COUNT (TODO)  The default type is AVG.\nIf users want to get the latest time from last_server_state_sync_time_in_seconds:\nlast_server_state_sync_time_in_seconds.tagEqual('production', 'catalog').downsampling(LATEST) Metric level function They extract level relevant labels from metric labels, then informs the meter-system the level and layer to which this metric belongs.\n service([svc_label1, svc_label2...], Layer) extracts service level labels from the array argument, extracts layer from Layer argument. instance([svc_label1, svc_label2...], [ins_label1, ins_label2...], Layer, Closure\u0026lt;Map\u0026lt;String, String\u0026gt;\u0026gt; propertiesExtractor) extracts service level labels from the first array argument, extracts instance level labels from the second array argument, extracts layer from Layer argument, propertiesExtractor is an optional closure that extracts instance properties from tags, e.g. { tags -\u0026gt; ['pod': tags.pod, 'namespace': tags.namespace] }. endpoint([svc_label1, svc_label2...], [ep_label1, ep_label2...]) extracts service level labels from the first array argument, extracts endpoint level labels from the second array argument, extracts layer from Layer argument. process([svc_label1, svc_label2...], [ins_label1, ins_label2...], [ps_label1, ps_label2...], layer_lable) extracts service level labels from the first array argument, extracts instance level labels from the second array argument, extracts process level labels from the third array argument, extracts layer label from fourse argument. serviceRelation(DetectPoint, [source_svc_label1...], [dest_svc_label1...], Layer) DetectPoint including DetectPoint.CLIENT and DetectPoint.SERVER, extracts sourceService labels from the first array argument, extracts destService labels from the second array argument, extracts layer from Layer argument. processRelation(detect_point_label, [service_label1...], [instance_label1...], source_process_id_label, dest_process_id_label, component_label) extracts DetectPoint labels from first argument, the label value should be client or server. extracts Service labels from the first array argument, extracts Instance labels from the second array argument, extracts ProcessID labels from the fourth and fifth arguments of the source and destination.  Configuration file The OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP fails to start up. The files are located at $CLASSPATH/otel-rules, $CLASSPATH/meter-analyzer-config, $CLASSPATH/envoy-metrics-rules and $CLASSPATH/zabbix-rules.\nThe file is written in YAML format, defined by the scheme described below. Brackets indicate that a parameter is optional.\nA full example can be found here\nGeneric placeholders are defined as follows:\n \u0026lt;string\u0026gt;: A regular string. \u0026lt;closure\u0026gt;: A closure with custom logic.  # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# filter the metrics, only those metrics that satisfy this condition will be passed into the `metricsRules` below.filter: \u0026lt;closure\u0026gt; # example:\u0026#39;{ tags -\u0026gt; tags.job_name == \u0026#34;vm-monitoring\u0026#34; }\u0026#39;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# Metrics rule allow you to recompute queries.metricsRules:[- \u0026lt;metric_rules\u0026gt; ]\u0026lt;metric_rules\u0026gt; # The name of rule, which combinates with a prefix \u0026#39;meter_\u0026#39; as the index/table name in storage.name:\u0026lt;string\u0026gt;# MAL expression.exp:\u0026lt;string\u0026gt;More Examples Please refer to OAP Self-Observability.\n","title":"Meter Analysis Language","url":"/docs/main/latest/en/concepts-and-designs/mal/"},{"content":"Meter Analysis Language The meter system provides a functional analysis language called MAL (Meter Analysis Language) that lets users analyze and aggregate meter data in the OAP streaming system. The result of an expression can either be ingested by the agent analyzer, or the OpenTelemetry/Prometheus analyzer.\nLanguage data type In MAL, an expression or sub-expression can evaluate to one of the following two types:\n Sample family: A set of samples (metrics) containing a range of metrics whose names are identical. Scalar: A simple numeric value that supports integer/long and floating/double.  Sample family A set of samples, which acts as the basic unit in MAL. For example:\ninstance_trace_count The sample family above may contain the following samples which are provided by external modules, such as the agent analyzer:\ninstance_trace_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 100 instance_trace_count{region=\u0026quot;us-east\u0026quot;,az=\u0026quot;az-3\u0026quot;} 20 instance_trace_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 33 Tag filter MAL supports four type operations to filter samples in a sample family by tag:\n tagEqual: Filter tags exactly equal to the string provided. tagNotEqual: Filter tags not equal to the string provided. tagMatch: Filter tags that regex-match the string provided. tagNotMatch: Filter labels that do not regex-match the string provided.  For example, this filters all instance_trace_count samples for us-west and asia-north region and az-1 az:\ninstance_trace_count.tagMatch(\u0026quot;region\u0026quot;, \u0026quot;us-west|asia-north\u0026quot;).tagEqual(\u0026quot;az\u0026quot;, \u0026quot;az-1\u0026quot;) Value filter MAL supports six type operations to filter samples in a sample family by value:\n valueEqual: Filter values exactly equal to the value provided. valueNotEqual: Filter values equal to the value provided. valueGreater: Filter values greater than the value provided. valueGreaterEqual: Filter values greater than or equal to the value provided. valueLess: Filter values less than the value provided. valueLessEqual: Filter values less than or equal to the value provided.  For example, this filters all instance_trace_count samples for values \u0026gt;= 33:\ninstance_trace_count.valueGreaterEqual(33) Tag manipulator MAL allows tag manipulators to change (i.e. add/delete/update) tags and their values.\nK8s MAL supports using the metadata of K8s to manipulate the tags and their values. This feature requires authorizing the OAP Server to access K8s\u0026rsquo;s API Server.\nretagByK8sMeta retagByK8sMeta(newLabelName, K8sRetagType, existingLabelName, namespaceLabelName). Add a new tag to the sample family based on the value of an existing label. Provide several internal converting types, including\n K8sRetagType.Pod2Service  Add a tag to the sample using service as the key, $serviceName.$namespace as the value, and according to the given value of the tag key, which represents the name of a pod.\nFor example:\ncontainer_cpu_usage_seconds_total{namespace=default, container=my-nginx, cpu=total, pod=my-nginx-5dc4865748-mbczh} 2 Expression:\ncontainer_cpu_usage_seconds_total.retagByK8sMeta('service' , K8sRetagType.Pod2Service , 'pod' , 'namespace') Output:\ncontainer_cpu_usage_seconds_total{namespace=default, container=my-nginx, cpu=total, pod=my-nginx-5dc4865748-mbczh, service='nginx-service.default'} 2 Binary operators The following binary arithmetic operators are available in MAL:\n + (addition) - (subtraction) * (multiplication) / (division)  Binary operators are defined between scalar/scalar, sampleFamily/scalar and sampleFamily/sampleFamily value pairs.\nBetween two scalars: they evaluate to another scalar that is the result of the operator being applied to both scalar operands:\n1 + 2 Between a sample family and a scalar, the operator is applied to the value of every sample in the sample family. For example:\ninstance_trace_count + 2 or\n2 + instance_trace_count results in\ninstance_trace_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 102 // 100 + 2 instance_trace_count{region=\u0026quot;us-east\u0026quot;,az=\u0026quot;az-3\u0026quot;} 22 // 20 + 2 instance_trace_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 35 // 33 + 2 Between two sample families, a binary operator is applied to each sample in the sample family on the left and its matching sample in the sample family on the right. A new sample family with empty name will be generated. Only the matched tags will be reserved. Samples with no matching samples in the sample family on the right will not be found in the result.\nAnother sample family instance_trace_analysis_error_count is\ninstance_trace_analysis_error_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 20 instance_trace_analysis_error_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 11 Example expression:\ninstance_trace_analysis_error_count / instance_trace_count This returns a resulting sample family containing the error rate of trace analysis. Samples with region us-west and az az-3 have no match and will not show up in the result:\n{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 0.2 // 20 / 100 {region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 0.3333 // 11 / 33 Aggregation Operation Sample family supports the following aggregation operations that can be used to aggregate the samples of a single sample family, resulting in a new sample family having fewer samples (sometimes having just a single sample) with aggregated values:\n sum (calculate sum over dimensions) min (select minimum over dimensions) max (select maximum over dimensions) avg (calculate the average over dimensions) count (calculate the count over dimensions, the last tag will be counted)  These operations can be used to aggregate overall label dimensions or preserve distinct dimensions by inputting by parameter( the keyword by could be omitted)\n\u0026lt;aggr-op\u0026gt;(by=[\u0026lt;tag1\u0026gt;, \u0026lt;tag2\u0026gt;, ...]) Example expression:\ninstance_trace_count.sum(by=['az']) will output the following result:\ninstance_trace_count{az=\u0026quot;az-1\u0026quot;} 133 // 100 + 33 instance_trace_count{az=\u0026quot;az-3\u0026quot;} 20  Note, aggregation operations affect the samples from one bulk only. If the metrics are reported parallel from multiple instances/nodes through different SampleFamily, this aggregation would NOT work.\nIn the best practice for this scenario, build the metric with labels that represent each instance/node. Then use the AggregateLabels Operation in MQE to aggregate the metrics.\n Function Duration is a textual representation of a time range. The formats accepted are based on the ISO-8601 duration format {@code PnDTnHnMn.nS} where a day is regarded as exactly 24 hours.\nExamples:\n \u0026ldquo;PT20.345S\u0026rdquo; \u0026ndash; parses as \u0026ldquo;20.345 seconds\u0026rdquo; \u0026ldquo;PT15M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;15 minutes\u0026rdquo; (where a minute is 60 seconds) \u0026ldquo;PT10H\u0026rdquo; \u0026ndash; parses as \u0026ldquo;10 hours\u0026rdquo; (where an hour is 3600 seconds) \u0026ldquo;P2D\u0026rdquo; \u0026ndash; parses as \u0026ldquo;2 days\u0026rdquo; (where a day is 24 hours or 86400 seconds) \u0026ldquo;P2DT3H4M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;2 days, 3 hours and 4 minutes\u0026rdquo; \u0026ldquo;P-6H3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;-6 hours and +3 minutes\u0026rdquo; \u0026ldquo;-P6H3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;-6 hours and -3 minutes\u0026rdquo; \u0026ldquo;-P-6H+3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;+6 hours and -3 minutes\u0026rdquo;  increase increase(Duration): Calculates the increase in the time range.\nrate rate(Duration): Calculates the per-second average rate of increase in the time range.\nirate irate(): Calculates the per-second instant rate of increase in the time range.\ntag tag({allTags -\u0026gt; }): Updates tags of samples. User can add, drop, rename and update tags.\nhistogram histogram(le: '\u0026lt;the tag name of le\u0026gt;'): Transforms less-based histogram buckets to meter system histogram buckets. le parameter represents the tag name of the bucket.\nhistogram_percentile histogram_percentile([\u0026lt;p scalar\u0026gt;]): Represents the meter-system to calculate the p-percentile (0 ≤ p ≤ 100) from the buckets.\ntime time(): Returns the number of seconds since January 1, 1970 UTC.\nforeach forEach([string_array], Closure\u0026lt;Void\u0026gt; each): Iterates all samples according to the first array argument, and provide two parameters in the second closure argument:\n element: element in the array. tags: tags in each sample.  Down Sampling Operation MAL should instruct meter-system on how to downsample for metrics. It doesn\u0026rsquo;t only refer to aggregate raw samples to minute level, but also expresses data from minute in higher levels, such as hour and day.\nDown sampling function is called downsampling in MAL, and it accepts the following types:\n AVG SUM LATEST SUM_PER_MIN MIN MAX MEAN (TODO) COUNT (TODO)  The default type is AVG.\nIf users want to get the latest time from last_server_state_sync_time_in_seconds:\nlast_server_state_sync_time_in_seconds.tagEqual('production', 'catalog').downsampling(LATEST) Metric level function They extract level relevant labels from metric labels, then informs the meter-system the level and layer to which this metric belongs.\n service([svc_label1, svc_label2...], Layer) extracts service level labels from the array argument, extracts layer from Layer argument. instance([svc_label1, svc_label2...], [ins_label1, ins_label2...], Layer, Closure\u0026lt;Map\u0026lt;String, String\u0026gt;\u0026gt; propertiesExtractor) extracts service level labels from the first array argument, extracts instance level labels from the second array argument, extracts layer from Layer argument, propertiesExtractor is an optional closure that extracts instance properties from tags, e.g. { tags -\u0026gt; ['pod': tags.pod, 'namespace': tags.namespace] }. endpoint([svc_label1, svc_label2...], [ep_label1, ep_label2...]) extracts service level labels from the first array argument, extracts endpoint level labels from the second array argument, extracts layer from Layer argument. process([svc_label1, svc_label2...], [ins_label1, ins_label2...], [ps_label1, ps_label2...], layer_lable) extracts service level labels from the first array argument, extracts instance level labels from the second array argument, extracts process level labels from the third array argument, extracts layer label from fourse argument. serviceRelation(DetectPoint, [source_svc_label1...], [dest_svc_label1...], Layer) DetectPoint including DetectPoint.CLIENT and DetectPoint.SERVER, extracts sourceService labels from the first array argument, extracts destService labels from the second array argument, extracts layer from Layer argument. processRelation(detect_point_label, [service_label1...], [instance_label1...], source_process_id_label, dest_process_id_label, component_label) extracts DetectPoint labels from first argument, the label value should be client or server. extracts Service labels from the first array argument, extracts Instance labels from the second array argument, extracts ProcessID labels from the fourth and fifth arguments of the source and destination.  Configuration file The OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP fails to start up. The files are located at $CLASSPATH/otel-rules, $CLASSPATH/meter-analyzer-config, $CLASSPATH/envoy-metrics-rules and $CLASSPATH/zabbix-rules.\nThe file is written in YAML format, defined by the scheme described below. Brackets indicate that a parameter is optional.\nA full example can be found here\nGeneric placeholders are defined as follows:\n \u0026lt;string\u0026gt;: A regular string. \u0026lt;closure\u0026gt;: A closure with custom logic.  # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# filter the metrics, only those metrics that satisfy this condition will be passed into the `metricsRules` below.filter: \u0026lt;closure\u0026gt; # example:\u0026#39;{ tags -\u0026gt; tags.job_name == \u0026#34;vm-monitoring\u0026#34; }\u0026#39;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# Metrics rule allow you to recompute queries.metricsRules:[- \u0026lt;metric_rules\u0026gt; ]\u0026lt;metric_rules\u0026gt; # The name of rule, which combinates with a prefix \u0026#39;meter_\u0026#39; as the index/table name in storage.name:\u0026lt;string\u0026gt;# MAL expression.exp:\u0026lt;string\u0026gt;More Examples Please refer to OAP Self-Observability.\n","title":"Meter Analysis Language","url":"/docs/main/next/en/concepts-and-designs/mal/"},{"content":"Meter Analysis Language The meter system provides a functional analysis language called MAL (Meter Analysis Language) that lets users analyze and aggregate meter data in the OAP streaming system. The result of an expression can either be ingested by the agent analyzer, or the OC/Prometheus analyzer.\nLanguage data type In MAL, an expression or sub-expression can evaluate to one of the following two types:\n Sample family: A set of samples (metrics) containing a range of metrics whose names are identical. Scalar: A simple numeric value that supports integer/long and floating/double.  Sample family A set of samples, which acts as the basic unit in MAL. For example:\ninstance_trace_count The sample family above may contain the following samples which are provided by external modules, such as the agent analyzer:\ninstance_trace_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 100 instance_trace_count{region=\u0026quot;us-east\u0026quot;,az=\u0026quot;az-3\u0026quot;} 20 instance_trace_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 33 Tag filter MAL supports four type operations to filter samples in a sample family:\n tagEqual: Filter tags exactly equal to the string provided. tagNotEqual: Filter tags not equal to the string provided. tagMatch: Filter tags that regex-match the string provided. tagNotMatch: Filter labels that do not regex-match the string provided.  For example, this filters all instance_trace_count samples for us-west and asia-north region and az-1 az:\ninstance_trace_count.tagMatch(\u0026quot;region\u0026quot;, \u0026quot;us-west|asia-north\u0026quot;).tagEqual(\u0026quot;az\u0026quot;, \u0026quot;az-1\u0026quot;) Value filter MAL supports six type operations to filter samples in a sample family by value:\n valueEqual: Filter values exactly equal to the value provided. valueNotEqual: Filter values equal to the value provided. valueGreater: Filter values greater than the value provided. valueGreaterEqual: Filter values greater than or equal to the value provided. valueLess: Filter values less than the value provided. valueLessEqual: Filter values less than or equal to the value provided.  For example, this filters all instance_trace_count samples for values \u0026gt;= 33:\ninstance_trace_count.valueGreaterEqual(33) Tag manipulator MAL allows tag manipulators to change (i.e. add/delete/update) tags and their values.\nK8s MAL supports using the metadata of K8s to manipulate the tags and their values. This feature requires authorizing the OAP Server to access K8s\u0026rsquo;s API Server.\nretagByK8sMeta retagByK8sMeta(newLabelName, K8sRetagType, existingLabelName, namespaceLabelName). Add a new tag to the sample family based on the value of an existing label. Provide several internal converting types, including\n K8sRetagType.Pod2Service  Add a tag to the sample using service as the key, $serviceName.$namespace as the value, and according to the given value of the tag key, which represents the name of a pod.\nFor example:\ncontainer_cpu_usage_seconds_total{namespace=default, container=my-nginx, cpu=total, pod=my-nginx-5dc4865748-mbczh} 2 Expression:\ncontainer_cpu_usage_seconds_total.retagByK8sMeta('service' , K8sRetagType.Pod2Service , 'pod' , 'namespace') Output:\ncontainer_cpu_usage_seconds_total{namespace=default, container=my-nginx, cpu=total, pod=my-nginx-5dc4865748-mbczh, service='nginx-service.default'} 2 Binary operators The following binary arithmetic operators are available in MAL:\n + (addition) - (subtraction) * (multiplication) / (division)  Binary operators are defined between scalar/scalar, sampleFamily/scalar and sampleFamily/sampleFamily value pairs.\nBetween two scalars: they evaluate to another scalar that is the result of the operator being applied to both scalar operands:\n1 + 2 Between a sample family and a scalar, the operator is applied to the value of every sample in the sample family. For example:\ninstance_trace_count + 2 or\n2 + instance_trace_count results in\ninstance_trace_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 102 // 100 + 2 instance_trace_count{region=\u0026quot;us-east\u0026quot;,az=\u0026quot;az-3\u0026quot;} 22 // 20 + 2 instance_trace_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 35 // 33 + 2 Between two sample families, a binary operator is applied to each sample in the sample family on the left and its matching sample in the sample family on the right. A new sample family with empty name will be generated. Only the matched tags will be reserved. Samples with no matching samples in the sample family on the right will not be found in the result.\nAnother sample family instance_trace_analysis_error_count is\ninstance_trace_analysis_error_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 20 instance_trace_analysis_error_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 11 Example expression:\ninstance_trace_analysis_error_count / instance_trace_count This returns a resulting sample family containing the error rate of trace analysis. Samples with region us-west and az az-3 have no match and will not show up in the result:\n{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 0.8 // 20 / 100 {region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 0.3333 // 11 / 33 Aggregation Operation Sample family supports the following aggregation operations that can be used to aggregate the samples of a single sample family, resulting in a new sample family having fewer samples (sometimes having just a single sample) with aggregated values:\n sum (calculate sum over dimensions) min (select minimum over dimensions) max (select maximum over dimensions) avg (calculate the average over dimensions)  These operations can be used to aggregate overall label dimensions or preserve distinct dimensions by inputting by parameter.\n\u0026lt;aggr-op\u0026gt;(by: \u0026lt;tag1, tag2, ...\u0026gt;) Example expression:\ninstance_trace_count.sum(by: ['az']) will output the following result:\ninstance_trace_count{az=\u0026quot;az-1\u0026quot;} 133 // 100 + 33 instance_trace_count{az=\u0026quot;az-3\u0026quot;} 20 Function Duraton is a textual representation of a time range. The formats accepted are based on the ISO-8601 duration format {@code PnDTnHnMn.nS} where a day is regarded as exactly 24 hours.\nExamples:\n \u0026ldquo;PT20.345S\u0026rdquo; \u0026ndash; parses as \u0026ldquo;20.345 seconds\u0026rdquo; \u0026ldquo;PT15M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;15 minutes\u0026rdquo; (where a minute is 60 seconds) \u0026ldquo;PT10H\u0026rdquo; \u0026ndash; parses as \u0026ldquo;10 hours\u0026rdquo; (where an hour is 3600 seconds) \u0026ldquo;P2D\u0026rdquo; \u0026ndash; parses as \u0026ldquo;2 days\u0026rdquo; (where a day is 24 hours or 86400 seconds) \u0026ldquo;P2DT3H4M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;2 days, 3 hours and 4 minutes\u0026rdquo; \u0026ldquo;P-6H3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;-6 hours and +3 minutes\u0026rdquo; \u0026ldquo;-P6H3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;-6 hours and -3 minutes\u0026rdquo; \u0026ldquo;-P-6H+3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;+6 hours and -3 minutes\u0026rdquo;  increase increase(Duration): Calculates the increase in the time range.\nrate rate(Duration): Calculates the per-second average rate of increase in the time range.\nirate irate(): Calculates the per-second instant rate of increase in the time range.\ntag tag({allTags -\u0026gt; }): Updates tags of samples. User can add, drop, rename and update tags.\nhistogram histogram(le: '\u0026lt;the tag name of le\u0026gt;'): Transforms less-based histogram buckets to meter system histogram buckets. le parameter represents the tag name of the bucket.\nhistogram_percentile histogram_percentile([\u0026lt;p scalar\u0026gt;]). Represents the meter-system to calculate the p-percentile (0 ≤ p ≤ 100) from the buckets.\ntime time(): Returns the number of seconds since January 1, 1970 UTC.\nDown Sampling Operation MAL should instruct meter-system on how to downsample for metrics. It doesn\u0026rsquo;t only refer to aggregate raw samples to minute level, but also expresses data from minute in higher levels, such as hour and day.\nDown sampling function is called downsampling in MAL, and it accepts the following types:\n AVG SUM LATEST MIN (TODO) MAX (TODO) MEAN (TODO) COUNT (TODO)  The default type is AVG.\nIf users want to get the latest time from last_server_state_sync_time_in_seconds:\nlast_server_state_sync_time_in_seconds.tagEqual('production', 'catalog').downsampling(LATEST) Metric level function They extract level relevant labels from metric labels, then informs the meter-system the level and layer to which this metric belongs.\n service([svc_label1, svc_label2...], Layer) extracts service level labels from the array argument, extracts layer from Layer argument. instance([svc_label1, svc_label2...], [ins_label1, ins_label2...], Layer) extracts service level labels from the first array argument, extracts instance level labels from the second array argument, extracts layer from Layer argument. endpoint([svc_label1, svc_label2...], [ep_label1, ep_label2...]) extracts service level labels from the first array argument, extracts endpoint level labels from the second array argument, extracts layer from Layer argument. serviceRelation(DetectPoint, [source_svc_label1...], [dest_svc_label1...], Layer) DetectPoint including DetectPoint.CLIENT and DetectPoint.SERVER, extracts sourceService labels from the first array argument, extracts destService labels from the second array argument, extracts layer from Layer argument.  More Examples Please refer to OAP Self-Observability\n","title":"Meter Analysis Language","url":"/docs/main/v9.0.0/en/concepts-and-designs/mal/"},{"content":"Meter Analysis Language The meter system provides a functional analysis language called MAL (Meter Analysis Language) that lets users analyze and aggregate meter data in the OAP streaming system. The result of an expression can either be ingested by the agent analyzer, or the OC/Prometheus analyzer.\nLanguage data type In MAL, an expression or sub-expression can evaluate to one of the following two types:\n Sample family: A set of samples (metrics) containing a range of metrics whose names are identical. Scalar: A simple numeric value that supports integer/long and floating/double.  Sample family A set of samples, which acts as the basic unit in MAL. For example:\ninstance_trace_count The sample family above may contain the following samples which are provided by external modules, such as the agent analyzer:\ninstance_trace_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 100 instance_trace_count{region=\u0026quot;us-east\u0026quot;,az=\u0026quot;az-3\u0026quot;} 20 instance_trace_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 33 Tag filter MAL supports four type operations to filter samples in a sample family:\n tagEqual: Filter tags exactly equal to the string provided. tagNotEqual: Filter tags not equal to the string provided. tagMatch: Filter tags that regex-match the string provided. tagNotMatch: Filter labels that do not regex-match the string provided.  For example, this filters all instance_trace_count samples for us-west and asia-north region and az-1 az:\ninstance_trace_count.tagMatch(\u0026quot;region\u0026quot;, \u0026quot;us-west|asia-north\u0026quot;).tagEqual(\u0026quot;az\u0026quot;, \u0026quot;az-1\u0026quot;) Value filter MAL supports six type operations to filter samples in a sample family by value:\n valueEqual: Filter values exactly equal to the value provided. valueNotEqual: Filter values equal to the value provided. valueGreater: Filter values greater than the value provided. valueGreaterEqual: Filter values greater than or equal to the value provided. valueLess: Filter values less than the value provided. valueLessEqual: Filter values less than or equal to the value provided.  For example, this filters all instance_trace_count samples for values \u0026gt;= 33:\ninstance_trace_count.valueGreaterEqual(33) Tag manipulator MAL allows tag manipulators to change (i.e. add/delete/update) tags and their values.\nK8s MAL supports using the metadata of K8s to manipulate the tags and their values. This feature requires authorizing the OAP Server to access K8s\u0026rsquo;s API Server.\nretagByK8sMeta retagByK8sMeta(newLabelName, K8sRetagType, existingLabelName, namespaceLabelName). Add a new tag to the sample family based on the value of an existing label. Provide several internal converting types, including\n K8sRetagType.Pod2Service  Add a tag to the sample using service as the key, $serviceName.$namespace as the value, and according to the given value of the tag key, which represents the name of a pod.\nFor example:\ncontainer_cpu_usage_seconds_total{namespace=default, container=my-nginx, cpu=total, pod=my-nginx-5dc4865748-mbczh} 2 Expression:\ncontainer_cpu_usage_seconds_total.retagByK8sMeta('service' , K8sRetagType.Pod2Service , 'pod' , 'namespace') Output:\ncontainer_cpu_usage_seconds_total{namespace=default, container=my-nginx, cpu=total, pod=my-nginx-5dc4865748-mbczh, service='nginx-service.default'} 2 Binary operators The following binary arithmetic operators are available in MAL:\n + (addition) - (subtraction) * (multiplication) / (division)  Binary operators are defined between scalar/scalar, sampleFamily/scalar and sampleFamily/sampleFamily value pairs.\nBetween two scalars: they evaluate to another scalar that is the result of the operator being applied to both scalar operands:\n1 + 2 Between a sample family and a scalar, the operator is applied to the value of every sample in the sample family. For example:\ninstance_trace_count + 2 or\n2 + instance_trace_count results in\ninstance_trace_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 102 // 100 + 2 instance_trace_count{region=\u0026quot;us-east\u0026quot;,az=\u0026quot;az-3\u0026quot;} 22 // 20 + 2 instance_trace_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 35 // 33 + 2 Between two sample families, a binary operator is applied to each sample in the sample family on the left and its matching sample in the sample family on the right. A new sample family with empty name will be generated. Only the matched tags will be reserved. Samples with no matching samples in the sample family on the right will not be found in the result.\nAnother sample family instance_trace_analysis_error_count is\ninstance_trace_analysis_error_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 20 instance_trace_analysis_error_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 11 Example expression:\ninstance_trace_analysis_error_count / instance_trace_count This returns a resulting sample family containing the error rate of trace analysis. Samples with region us-west and az az-3 have no match and will not show up in the result:\n{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 0.2 // 20 / 100 {region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 0.3333 // 11 / 33 Aggregation Operation Sample family supports the following aggregation operations that can be used to aggregate the samples of a single sample family, resulting in a new sample family having fewer samples (sometimes having just a single sample) with aggregated values:\n sum (calculate sum over dimensions) min (select minimum over dimensions) max (select maximum over dimensions) avg (calculate the average over dimensions)  These operations can be used to aggregate overall label dimensions or preserve distinct dimensions by inputting by parameter.\n\u0026lt;aggr-op\u0026gt;(by: \u0026lt;tag1, tag2, ...\u0026gt;) Example expression:\ninstance_trace_count.sum(by: ['az']) will output the following result:\ninstance_trace_count{az=\u0026quot;az-1\u0026quot;} 133 // 100 + 33 instance_trace_count{az=\u0026quot;az-3\u0026quot;} 20 Function Duration is a textual representation of a time range. The formats accepted are based on the ISO-8601 duration format {@code PnDTnHnMn.nS} where a day is regarded as exactly 24 hours.\nExamples:\n \u0026ldquo;PT20.345S\u0026rdquo; \u0026ndash; parses as \u0026ldquo;20.345 seconds\u0026rdquo; \u0026ldquo;PT15M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;15 minutes\u0026rdquo; (where a minute is 60 seconds) \u0026ldquo;PT10H\u0026rdquo; \u0026ndash; parses as \u0026ldquo;10 hours\u0026rdquo; (where an hour is 3600 seconds) \u0026ldquo;P2D\u0026rdquo; \u0026ndash; parses as \u0026ldquo;2 days\u0026rdquo; (where a day is 24 hours or 86400 seconds) \u0026ldquo;P2DT3H4M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;2 days, 3 hours and 4 minutes\u0026rdquo; \u0026ldquo;P-6H3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;-6 hours and +3 minutes\u0026rdquo; \u0026ldquo;-P6H3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;-6 hours and -3 minutes\u0026rdquo; \u0026ldquo;-P-6H+3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;+6 hours and -3 minutes\u0026rdquo;  increase increase(Duration): Calculates the increase in the time range.\nrate rate(Duration): Calculates the per-second average rate of increase in the time range.\nirate irate(): Calculates the per-second instant rate of increase in the time range.\ntag tag({allTags -\u0026gt; }): Updates tags of samples. User can add, drop, rename and update tags.\nhistogram histogram(le: '\u0026lt;the tag name of le\u0026gt;'): Transforms less-based histogram buckets to meter system histogram buckets. le parameter represents the tag name of the bucket.\nhistogram_percentile histogram_percentile([\u0026lt;p scalar\u0026gt;]). Represents the meter-system to calculate the p-percentile (0 ≤ p ≤ 100) from the buckets.\ntime time(): Returns the number of seconds since January 1, 1970 UTC.\nDown Sampling Operation MAL should instruct meter-system on how to downsample for metrics. It doesn\u0026rsquo;t only refer to aggregate raw samples to minute level, but also expresses data from minute in higher levels, such as hour and day.\nDown sampling function is called downsampling in MAL, and it accepts the following types:\n AVG SUM LATEST MIN (TODO) MAX (TODO) MEAN (TODO) COUNT (TODO)  The default type is AVG.\nIf users want to get the latest time from last_server_state_sync_time_in_seconds:\nlast_server_state_sync_time_in_seconds.tagEqual('production', 'catalog').downsampling(LATEST) Metric level function They extract level relevant labels from metric labels, then informs the meter-system the level and layer to which this metric belongs.\n service([svc_label1, svc_label2...], Layer) extracts service level labels from the array argument, extracts layer from Layer argument. instance([svc_label1, svc_label2...], [ins_label1, ins_label2...], Layer, Closure\u0026lt;Map\u0026lt;String, String\u0026gt;\u0026gt; propertiesExtractor) extracts service level labels from the first array argument, extracts instance level labels from the second array argument, extracts layer from Layer argument, propertiesExtractor is an optional closure that extracts instance properties from tags, e.g. { tags -\u0026gt; ['pod': tags.pod, 'namespace': tags.namespace] }. endpoint([svc_label1, svc_label2...], [ep_label1, ep_label2...]) extracts service level labels from the first array argument, extracts endpoint level labels from the second array argument, extracts layer from Layer argument. serviceRelation(DetectPoint, [source_svc_label1...], [dest_svc_label1...], Layer) DetectPoint including DetectPoint.CLIENT and DetectPoint.SERVER, extracts sourceService labels from the first array argument, extracts destService labels from the second array argument, extracts layer from Layer argument.  More Examples Please refer to OAP Self-Observability\n","title":"Meter Analysis Language","url":"/docs/main/v9.1.0/en/concepts-and-designs/mal/"},{"content":"Meter Analysis Language The meter system provides a functional analysis language called MAL (Meter Analysis Language) that lets users analyze and aggregate meter data in the OAP streaming system. The result of an expression can either be ingested by the agent analyzer, or the OC/Prometheus analyzer.\nLanguage data type In MAL, an expression or sub-expression can evaluate to one of the following two types:\n Sample family: A set of samples (metrics) containing a range of metrics whose names are identical. Scalar: A simple numeric value that supports integer/long and floating/double.  Sample family A set of samples, which acts as the basic unit in MAL. For example:\ninstance_trace_count The sample family above may contain the following samples which are provided by external modules, such as the agent analyzer:\ninstance_trace_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 100 instance_trace_count{region=\u0026quot;us-east\u0026quot;,az=\u0026quot;az-3\u0026quot;} 20 instance_trace_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 33 Tag filter MAL supports four type operations to filter samples in a sample family:\n tagEqual: Filter tags exactly equal to the string provided. tagNotEqual: Filter tags not equal to the string provided. tagMatch: Filter tags that regex-match the string provided. tagNotMatch: Filter labels that do not regex-match the string provided.  For example, this filters all instance_trace_count samples for us-west and asia-north region and az-1 az:\ninstance_trace_count.tagMatch(\u0026quot;region\u0026quot;, \u0026quot;us-west|asia-north\u0026quot;).tagEqual(\u0026quot;az\u0026quot;, \u0026quot;az-1\u0026quot;) Value filter MAL supports six type operations to filter samples in a sample family by value:\n valueEqual: Filter values exactly equal to the value provided. valueNotEqual: Filter values equal to the value provided. valueGreater: Filter values greater than the value provided. valueGreaterEqual: Filter values greater than or equal to the value provided. valueLess: Filter values less than the value provided. valueLessEqual: Filter values less than or equal to the value provided.  For example, this filters all instance_trace_count samples for values \u0026gt;= 33:\ninstance_trace_count.valueGreaterEqual(33) Tag manipulator MAL allows tag manipulators to change (i.e. add/delete/update) tags and their values.\nK8s MAL supports using the metadata of K8s to manipulate the tags and their values. This feature requires authorizing the OAP Server to access K8s\u0026rsquo;s API Server.\nretagByK8sMeta retagByK8sMeta(newLabelName, K8sRetagType, existingLabelName, namespaceLabelName). Add a new tag to the sample family based on the value of an existing label. Provide several internal converting types, including\n K8sRetagType.Pod2Service  Add a tag to the sample using service as the key, $serviceName.$namespace as the value, and according to the given value of the tag key, which represents the name of a pod.\nFor example:\ncontainer_cpu_usage_seconds_total{namespace=default, container=my-nginx, cpu=total, pod=my-nginx-5dc4865748-mbczh} 2 Expression:\ncontainer_cpu_usage_seconds_total.retagByK8sMeta('service' , K8sRetagType.Pod2Service , 'pod' , 'namespace') Output:\ncontainer_cpu_usage_seconds_total{namespace=default, container=my-nginx, cpu=total, pod=my-nginx-5dc4865748-mbczh, service='nginx-service.default'} 2 Binary operators The following binary arithmetic operators are available in MAL:\n + (addition) - (subtraction) * (multiplication) / (division)  Binary operators are defined between scalar/scalar, sampleFamily/scalar and sampleFamily/sampleFamily value pairs.\nBetween two scalars: they evaluate to another scalar that is the result of the operator being applied to both scalar operands:\n1 + 2 Between a sample family and a scalar, the operator is applied to the value of every sample in the sample family. For example:\ninstance_trace_count + 2 or\n2 + instance_trace_count results in\ninstance_trace_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 102 // 100 + 2 instance_trace_count{region=\u0026quot;us-east\u0026quot;,az=\u0026quot;az-3\u0026quot;} 22 // 20 + 2 instance_trace_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 35 // 33 + 2 Between two sample families, a binary operator is applied to each sample in the sample family on the left and its matching sample in the sample family on the right. A new sample family with empty name will be generated. Only the matched tags will be reserved. Samples with no matching samples in the sample family on the right will not be found in the result.\nAnother sample family instance_trace_analysis_error_count is\ninstance_trace_analysis_error_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 20 instance_trace_analysis_error_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 11 Example expression:\ninstance_trace_analysis_error_count / instance_trace_count This returns a resulting sample family containing the error rate of trace analysis. Samples with region us-west and az az-3 have no match and will not show up in the result:\n{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 0.2 // 20 / 100 {region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 0.3333 // 11 / 33 Aggregation Operation Sample family supports the following aggregation operations that can be used to aggregate the samples of a single sample family, resulting in a new sample family having fewer samples (sometimes having just a single sample) with aggregated values:\n sum (calculate sum over dimensions) min (select minimum over dimensions) max (select maximum over dimensions) avg (calculate the average over dimensions)  These operations can be used to aggregate overall label dimensions or preserve distinct dimensions by inputting by parameter.\n\u0026lt;aggr-op\u0026gt;(by: \u0026lt;tag1, tag2, ...\u0026gt;) Example expression:\ninstance_trace_count.sum(by: ['az']) will output the following result:\ninstance_trace_count{az=\u0026quot;az-1\u0026quot;} 133 // 100 + 33 instance_trace_count{az=\u0026quot;az-3\u0026quot;} 20 Function Duration is a textual representation of a time range. The formats accepted are based on the ISO-8601 duration format {@code PnDTnHnMn.nS} where a day is regarded as exactly 24 hours.\nExamples:\n \u0026ldquo;PT20.345S\u0026rdquo; \u0026ndash; parses as \u0026ldquo;20.345 seconds\u0026rdquo; \u0026ldquo;PT15M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;15 minutes\u0026rdquo; (where a minute is 60 seconds) \u0026ldquo;PT10H\u0026rdquo; \u0026ndash; parses as \u0026ldquo;10 hours\u0026rdquo; (where an hour is 3600 seconds) \u0026ldquo;P2D\u0026rdquo; \u0026ndash; parses as \u0026ldquo;2 days\u0026rdquo; (where a day is 24 hours or 86400 seconds) \u0026ldquo;P2DT3H4M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;2 days, 3 hours and 4 minutes\u0026rdquo; \u0026ldquo;P-6H3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;-6 hours and +3 minutes\u0026rdquo; \u0026ldquo;-P6H3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;-6 hours and -3 minutes\u0026rdquo; \u0026ldquo;-P-6H+3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;+6 hours and -3 minutes\u0026rdquo;  increase increase(Duration): Calculates the increase in the time range.\nrate rate(Duration): Calculates the per-second average rate of increase in the time range.\nirate irate(): Calculates the per-second instant rate of increase in the time range.\ntag tag({allTags -\u0026gt; }): Updates tags of samples. User can add, drop, rename and update tags.\nhistogram histogram(le: '\u0026lt;the tag name of le\u0026gt;'): Transforms less-based histogram buckets to meter system histogram buckets. le parameter represents the tag name of the bucket.\nhistogram_percentile histogram_percentile([\u0026lt;p scalar\u0026gt;]). Represents the meter-system to calculate the p-percentile (0 ≤ p ≤ 100) from the buckets.\ntime time(): Returns the number of seconds since January 1, 1970 UTC.\nforeach forEach([string_array], Closure\u0026lt;Void\u0026gt; each): Iterates all samples according to the first array argument, and provide two parameters in the second closure argument:\n element: element in the array. tags: tags in each sample.  Down Sampling Operation MAL should instruct meter-system on how to downsample for metrics. It doesn\u0026rsquo;t only refer to aggregate raw samples to minute level, but also expresses data from minute in higher levels, such as hour and day.\nDown sampling function is called downsampling in MAL, and it accepts the following types:\n AVG SUM LATEST MIN (TODO) MAX (TODO) MEAN (TODO) COUNT (TODO)  The default type is AVG.\nIf users want to get the latest time from last_server_state_sync_time_in_seconds:\nlast_server_state_sync_time_in_seconds.tagEqual('production', 'catalog').downsampling(LATEST) Metric level function They extract level relevant labels from metric labels, then informs the meter-system the level and layer to which this metric belongs.\n service([svc_label1, svc_label2...], Layer) extracts service level labels from the array argument, extracts layer from Layer argument. instance([svc_label1, svc_label2...], [ins_label1, ins_label2...], Layer, Closure\u0026lt;Map\u0026lt;String, String\u0026gt;\u0026gt; propertiesExtractor) extracts service level labels from the first array argument, extracts instance level labels from the second array argument, extracts layer from Layer argument, propertiesExtractor is an optional closure that extracts instance properties from tags, e.g. { tags -\u0026gt; ['pod': tags.pod, 'namespace': tags.namespace] }. endpoint([svc_label1, svc_label2...], [ep_label1, ep_label2...]) extracts service level labels from the first array argument, extracts endpoint level labels from the second array argument, extracts layer from Layer argument. serviceRelation(DetectPoint, [source_svc_label1...], [dest_svc_label1...], Layer) DetectPoint including DetectPoint.CLIENT and DetectPoint.SERVER, extracts sourceService labels from the first array argument, extracts destService labels from the second array argument, extracts layer from Layer argument. processRelation(detect_point_label, [service_label1...], [instance_label1...], source_process_id_label, dest_process_id_label, component_label) extracts DetectPoint labels from first argument, the label value should be client or server. extracts Service labels from the first array argument, extracts Instance labels from the second array argument, extracts ProcessID labels from the fourth and fifth arguments of the source and destination.  More Examples Please refer to OAP Self-Observability\n","title":"Meter Analysis Language","url":"/docs/main/v9.2.0/en/concepts-and-designs/mal/"},{"content":"Meter Analysis Language The meter system provides a functional analysis language called MAL (Meter Analysis Language) that lets users analyze and aggregate meter data in the OAP streaming system. The result of an expression can either be ingested by the agent analyzer, or the OC/Prometheus analyzer.\nLanguage data type In MAL, an expression or sub-expression can evaluate to one of the following two types:\n Sample family: A set of samples (metrics) containing a range of metrics whose names are identical. Scalar: A simple numeric value that supports integer/long and floating/double.  Sample family A set of samples, which acts as the basic unit in MAL. For example:\ninstance_trace_count The sample family above may contain the following samples which are provided by external modules, such as the agent analyzer:\ninstance_trace_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 100 instance_trace_count{region=\u0026quot;us-east\u0026quot;,az=\u0026quot;az-3\u0026quot;} 20 instance_trace_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 33 Tag filter MAL supports four type operations to filter samples in a sample family:\n tagEqual: Filter tags exactly equal to the string provided. tagNotEqual: Filter tags not equal to the string provided. tagMatch: Filter tags that regex-match the string provided. tagNotMatch: Filter labels that do not regex-match the string provided.  For example, this filters all instance_trace_count samples for us-west and asia-north region and az-1 az:\ninstance_trace_count.tagMatch(\u0026quot;region\u0026quot;, \u0026quot;us-west|asia-north\u0026quot;).tagEqual(\u0026quot;az\u0026quot;, \u0026quot;az-1\u0026quot;) Value filter MAL supports six type operations to filter samples in a sample family by value:\n valueEqual: Filter values exactly equal to the value provided. valueNotEqual: Filter values equal to the value provided. valueGreater: Filter values greater than the value provided. valueGreaterEqual: Filter values greater than or equal to the value provided. valueLess: Filter values less than the value provided. valueLessEqual: Filter values less than or equal to the value provided.  For example, this filters all instance_trace_count samples for values \u0026gt;= 33:\ninstance_trace_count.valueGreaterEqual(33) Tag manipulator MAL allows tag manipulators to change (i.e. add/delete/update) tags and their values.\nK8s MAL supports using the metadata of K8s to manipulate the tags and their values. This feature requires authorizing the OAP Server to access K8s\u0026rsquo;s API Server.\nretagByK8sMeta retagByK8sMeta(newLabelName, K8sRetagType, existingLabelName, namespaceLabelName). Add a new tag to the sample family based on the value of an existing label. Provide several internal converting types, including\n K8sRetagType.Pod2Service  Add a tag to the sample using service as the key, $serviceName.$namespace as the value, and according to the given value of the tag key, which represents the name of a pod.\nFor example:\ncontainer_cpu_usage_seconds_total{namespace=default, container=my-nginx, cpu=total, pod=my-nginx-5dc4865748-mbczh} 2 Expression:\ncontainer_cpu_usage_seconds_total.retagByK8sMeta('service' , K8sRetagType.Pod2Service , 'pod' , 'namespace') Output:\ncontainer_cpu_usage_seconds_total{namespace=default, container=my-nginx, cpu=total, pod=my-nginx-5dc4865748-mbczh, service='nginx-service.default'} 2 Binary operators The following binary arithmetic operators are available in MAL:\n + (addition) - (subtraction) * (multiplication) / (division)  Binary operators are defined between scalar/scalar, sampleFamily/scalar and sampleFamily/sampleFamily value pairs.\nBetween two scalars: they evaluate to another scalar that is the result of the operator being applied to both scalar operands:\n1 + 2 Between a sample family and a scalar, the operator is applied to the value of every sample in the sample family. For example:\ninstance_trace_count + 2 or\n2 + instance_trace_count results in\ninstance_trace_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 102 // 100 + 2 instance_trace_count{region=\u0026quot;us-east\u0026quot;,az=\u0026quot;az-3\u0026quot;} 22 // 20 + 2 instance_trace_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 35 // 33 + 2 Between two sample families, a binary operator is applied to each sample in the sample family on the left and its matching sample in the sample family on the right. A new sample family with empty name will be generated. Only the matched tags will be reserved. Samples with no matching samples in the sample family on the right will not be found in the result.\nAnother sample family instance_trace_analysis_error_count is\ninstance_trace_analysis_error_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 20 instance_trace_analysis_error_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 11 Example expression:\ninstance_trace_analysis_error_count / instance_trace_count This returns a resulting sample family containing the error rate of trace analysis. Samples with region us-west and az az-3 have no match and will not show up in the result:\n{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 0.2 // 20 / 100 {region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 0.3333 // 11 / 33 Aggregation Operation Sample family supports the following aggregation operations that can be used to aggregate the samples of a single sample family, resulting in a new sample family having fewer samples (sometimes having just a single sample) with aggregated values:\n sum (calculate sum over dimensions) min (select minimum over dimensions) max (select maximum over dimensions) avg (calculate the average over dimensions)  These operations can be used to aggregate overall label dimensions or preserve distinct dimensions by inputting by parameter( the keyword by could be omitted)\n\u0026lt;aggr-op\u0026gt;(by=[\u0026lt;tag1\u0026gt;, \u0026lt;tag2\u0026gt;, ...]) Example expression:\ninstance_trace_count.sum(by=['az']) will output the following result:\ninstance_trace_count{az=\u0026quot;az-1\u0026quot;} 133 // 100 + 33 instance_trace_count{az=\u0026quot;az-3\u0026quot;} 20 Function Duration is a textual representation of a time range. The formats accepted are based on the ISO-8601 duration format {@code PnDTnHnMn.nS} where a day is regarded as exactly 24 hours.\nExamples:\n \u0026ldquo;PT20.345S\u0026rdquo; \u0026ndash; parses as \u0026ldquo;20.345 seconds\u0026rdquo; \u0026ldquo;PT15M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;15 minutes\u0026rdquo; (where a minute is 60 seconds) \u0026ldquo;PT10H\u0026rdquo; \u0026ndash; parses as \u0026ldquo;10 hours\u0026rdquo; (where an hour is 3600 seconds) \u0026ldquo;P2D\u0026rdquo; \u0026ndash; parses as \u0026ldquo;2 days\u0026rdquo; (where a day is 24 hours or 86400 seconds) \u0026ldquo;P2DT3H4M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;2 days, 3 hours and 4 minutes\u0026rdquo; \u0026ldquo;P-6H3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;-6 hours and +3 minutes\u0026rdquo; \u0026ldquo;-P6H3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;-6 hours and -3 minutes\u0026rdquo; \u0026ldquo;-P-6H+3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;+6 hours and -3 minutes\u0026rdquo;  increase increase(Duration): Calculates the increase in the time range.\nrate rate(Duration): Calculates the per-second average rate of increase in the time range.\nirate irate(): Calculates the per-second instant rate of increase in the time range.\ntag tag({allTags -\u0026gt; }): Updates tags of samples. User can add, drop, rename and update tags.\nhistogram histogram(le: '\u0026lt;the tag name of le\u0026gt;'): Transforms less-based histogram buckets to meter system histogram buckets. le parameter represents the tag name of the bucket.\nhistogram_percentile histogram_percentile([\u0026lt;p scalar\u0026gt;]). Represents the meter-system to calculate the p-percentile (0 ≤ p ≤ 100) from the buckets.\ntime time(): Returns the number of seconds since January 1, 1970 UTC.\nforeach forEach([string_array], Closure\u0026lt;Void\u0026gt; each): Iterates all samples according to the first array argument, and provide two parameters in the second closure argument:\n element: element in the array. tags: tags in each sample.  Down Sampling Operation MAL should instruct meter-system on how to downsample for metrics. It doesn\u0026rsquo;t only refer to aggregate raw samples to minute level, but also expresses data from minute in higher levels, such as hour and day.\nDown sampling function is called downsampling in MAL, and it accepts the following types:\n AVG SUM LATEST MIN (TODO) MAX (TODO) MEAN (TODO) COUNT (TODO)  The default type is AVG.\nIf users want to get the latest time from last_server_state_sync_time_in_seconds:\nlast_server_state_sync_time_in_seconds.tagEqual('production', 'catalog').downsampling(LATEST) Metric level function They extract level relevant labels from metric labels, then informs the meter-system the level and layer to which this metric belongs.\n service([svc_label1, svc_label2...], Layer) extracts service level labels from the array argument, extracts layer from Layer argument. instance([svc_label1, svc_label2...], [ins_label1, ins_label2...], Layer, Closure\u0026lt;Map\u0026lt;String, String\u0026gt;\u0026gt; propertiesExtractor) extracts service level labels from the first array argument, extracts instance level labels from the second array argument, extracts layer from Layer argument, propertiesExtractor is an optional closure that extracts instance properties from tags, e.g. { tags -\u0026gt; ['pod': tags.pod, 'namespace': tags.namespace] }. endpoint([svc_label1, svc_label2...], [ep_label1, ep_label2...]) extracts service level labels from the first array argument, extracts endpoint level labels from the second array argument, extracts layer from Layer argument. serviceRelation(DetectPoint, [source_svc_label1...], [dest_svc_label1...], Layer) DetectPoint including DetectPoint.CLIENT and DetectPoint.SERVER, extracts sourceService labels from the first array argument, extracts destService labels from the second array argument, extracts layer from Layer argument. processRelation(detect_point_label, [service_label1...], [instance_label1...], source_process_id_label, dest_process_id_label, component_label) extracts DetectPoint labels from first argument, the label value should be client or server. extracts Service labels from the first array argument, extracts Instance labels from the second array argument, extracts ProcessID labels from the fourth and fifth arguments of the source and destination.  Configuration file The OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP fails to start up. The files are located at $CLASSPATH/otel-rules, $CLASSPATH/meter-analyzer-config, $CLASSPATH/envoy-metrics-rules and $CLASSPATH/zabbix-rules.\nThe file is written in YAML format, defined by the scheme described below. Brackets indicate that a parameter is optional.\nA full example can be found here\nGeneric placeholders are defined as follows:\n \u0026lt;string\u0026gt;: A regular string. \u0026lt;closure\u0026gt;: A closure with custom logic.  # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# filter the metrics, only those metrics that satisfy this condition will be passed into the `metricsRules` below.filter: \u0026lt;closure\u0026gt; # example:\u0026#39;{ tags -\u0026gt; tags.job_name == \u0026#34;vm-monitoring\u0026#34; }\u0026#39;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# Metrics rule allow you to recompute queries.metricsRules:[- \u0026lt;metric_rules\u0026gt; ]\u0026lt;metric_rules\u0026gt; # The name of rule, which combinates with a prefix \u0026#39;meter_\u0026#39; as the index/table name in storage.name:\u0026lt;string\u0026gt;# MAL expression.exp:\u0026lt;string\u0026gt;More Examples Please refer to OAP Self-Observability.\n","title":"Meter Analysis Language","url":"/docs/main/v9.3.0/en/concepts-and-designs/mal/"},{"content":"Meter Analysis Language The meter system provides a functional analysis language called MAL (Meter Analysis Language) that lets users analyze and aggregate meter data in the OAP streaming system. The result of an expression can either be ingested by the agent analyzer, or the OC/Prometheus analyzer.\nLanguage data type In MAL, an expression or sub-expression can evaluate to one of the following two types:\n Sample family: A set of samples (metrics) containing a range of metrics whose names are identical. Scalar: A simple numeric value that supports integer/long and floating/double.  Sample family A set of samples, which acts as the basic unit in MAL. For example:\ninstance_trace_count The sample family above may contain the following samples which are provided by external modules, such as the agent analyzer:\ninstance_trace_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 100 instance_trace_count{region=\u0026quot;us-east\u0026quot;,az=\u0026quot;az-3\u0026quot;} 20 instance_trace_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 33 Tag filter MAL supports four type operations to filter samples in a sample family:\n tagEqual: Filter tags exactly equal to the string provided. tagNotEqual: Filter tags not equal to the string provided. tagMatch: Filter tags that regex-match the string provided. tagNotMatch: Filter labels that do not regex-match the string provided.  For example, this filters all instance_trace_count samples for us-west and asia-north region and az-1 az:\ninstance_trace_count.tagMatch(\u0026quot;region\u0026quot;, \u0026quot;us-west|asia-north\u0026quot;).tagEqual(\u0026quot;az\u0026quot;, \u0026quot;az-1\u0026quot;) Value filter MAL supports six type operations to filter samples in a sample family by value:\n valueEqual: Filter values exactly equal to the value provided. valueNotEqual: Filter values equal to the value provided. valueGreater: Filter values greater than the value provided. valueGreaterEqual: Filter values greater than or equal to the value provided. valueLess: Filter values less than the value provided. valueLessEqual: Filter values less than or equal to the value provided.  For example, this filters all instance_trace_count samples for values \u0026gt;= 33:\ninstance_trace_count.valueGreaterEqual(33) Tag manipulator MAL allows tag manipulators to change (i.e. add/delete/update) tags and their values.\nK8s MAL supports using the metadata of K8s to manipulate the tags and their values. This feature requires authorizing the OAP Server to access K8s\u0026rsquo;s API Server.\nretagByK8sMeta retagByK8sMeta(newLabelName, K8sRetagType, existingLabelName, namespaceLabelName). Add a new tag to the sample family based on the value of an existing label. Provide several internal converting types, including\n K8sRetagType.Pod2Service  Add a tag to the sample using service as the key, $serviceName.$namespace as the value, and according to the given value of the tag key, which represents the name of a pod.\nFor example:\ncontainer_cpu_usage_seconds_total{namespace=default, container=my-nginx, cpu=total, pod=my-nginx-5dc4865748-mbczh} 2 Expression:\ncontainer_cpu_usage_seconds_total.retagByK8sMeta('service' , K8sRetagType.Pod2Service , 'pod' , 'namespace') Output:\ncontainer_cpu_usage_seconds_total{namespace=default, container=my-nginx, cpu=total, pod=my-nginx-5dc4865748-mbczh, service='nginx-service.default'} 2 Binary operators The following binary arithmetic operators are available in MAL:\n + (addition) - (subtraction) * (multiplication) / (division)  Binary operators are defined between scalar/scalar, sampleFamily/scalar and sampleFamily/sampleFamily value pairs.\nBetween two scalars: they evaluate to another scalar that is the result of the operator being applied to both scalar operands:\n1 + 2 Between a sample family and a scalar, the operator is applied to the value of every sample in the sample family. For example:\ninstance_trace_count + 2 or\n2 + instance_trace_count results in\ninstance_trace_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 102 // 100 + 2 instance_trace_count{region=\u0026quot;us-east\u0026quot;,az=\u0026quot;az-3\u0026quot;} 22 // 20 + 2 instance_trace_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 35 // 33 + 2 Between two sample families, a binary operator is applied to each sample in the sample family on the left and its matching sample in the sample family on the right. A new sample family with empty name will be generated. Only the matched tags will be reserved. Samples with no matching samples in the sample family on the right will not be found in the result.\nAnother sample family instance_trace_analysis_error_count is\ninstance_trace_analysis_error_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 20 instance_trace_analysis_error_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 11 Example expression:\ninstance_trace_analysis_error_count / instance_trace_count This returns a resulting sample family containing the error rate of trace analysis. Samples with region us-west and az az-3 have no match and will not show up in the result:\n{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 0.2 // 20 / 100 {region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 0.3333 // 11 / 33 Aggregation Operation Sample family supports the following aggregation operations that can be used to aggregate the samples of a single sample family, resulting in a new sample family having fewer samples (sometimes having just a single sample) with aggregated values:\n sum (calculate sum over dimensions) min (select minimum over dimensions) max (select maximum over dimensions) avg (calculate the average over dimensions)  These operations can be used to aggregate overall label dimensions or preserve distinct dimensions by inputting by parameter( the keyword by could be omitted)\n\u0026lt;aggr-op\u0026gt;(by=[\u0026lt;tag1\u0026gt;, \u0026lt;tag2\u0026gt;, ...]) Example expression:\ninstance_trace_count.sum(by=['az']) will output the following result:\ninstance_trace_count{az=\u0026quot;az-1\u0026quot;} 133 // 100 + 33 instance_trace_count{az=\u0026quot;az-3\u0026quot;} 20 Function Duration is a textual representation of a time range. The formats accepted are based on the ISO-8601 duration format {@code PnDTnHnMn.nS} where a day is regarded as exactly 24 hours.\nExamples:\n \u0026ldquo;PT20.345S\u0026rdquo; \u0026ndash; parses as \u0026ldquo;20.345 seconds\u0026rdquo; \u0026ldquo;PT15M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;15 minutes\u0026rdquo; (where a minute is 60 seconds) \u0026ldquo;PT10H\u0026rdquo; \u0026ndash; parses as \u0026ldquo;10 hours\u0026rdquo; (where an hour is 3600 seconds) \u0026ldquo;P2D\u0026rdquo; \u0026ndash; parses as \u0026ldquo;2 days\u0026rdquo; (where a day is 24 hours or 86400 seconds) \u0026ldquo;P2DT3H4M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;2 days, 3 hours and 4 minutes\u0026rdquo; \u0026ldquo;P-6H3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;-6 hours and +3 minutes\u0026rdquo; \u0026ldquo;-P6H3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;-6 hours and -3 minutes\u0026rdquo; \u0026ldquo;-P-6H+3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;+6 hours and -3 minutes\u0026rdquo;  increase increase(Duration): Calculates the increase in the time range.\nrate rate(Duration): Calculates the per-second average rate of increase in the time range.\nirate irate(): Calculates the per-second instant rate of increase in the time range.\ntag tag({allTags -\u0026gt; }): Updates tags of samples. User can add, drop, rename and update tags.\nhistogram histogram(le: '\u0026lt;the tag name of le\u0026gt;'): Transforms less-based histogram buckets to meter system histogram buckets. le parameter represents the tag name of the bucket.\nhistogram_percentile histogram_percentile([\u0026lt;p scalar\u0026gt;]). Represents the meter-system to calculate the p-percentile (0 ≤ p ≤ 100) from the buckets.\ntime time(): Returns the number of seconds since January 1, 1970 UTC.\nforeach forEach([string_array], Closure\u0026lt;Void\u0026gt; each): Iterates all samples according to the first array argument, and provide two parameters in the second closure argument:\n element: element in the array. tags: tags in each sample.  Down Sampling Operation MAL should instruct meter-system on how to downsample for metrics. It doesn\u0026rsquo;t only refer to aggregate raw samples to minute level, but also expresses data from minute in higher levels, such as hour and day.\nDown sampling function is called downsampling in MAL, and it accepts the following types:\n AVG SUM LATEST MIN (TODO) MAX (TODO) MEAN (TODO) COUNT (TODO)  The default type is AVG.\nIf users want to get the latest time from last_server_state_sync_time_in_seconds:\nlast_server_state_sync_time_in_seconds.tagEqual('production', 'catalog').downsampling(LATEST) Metric level function They extract level relevant labels from metric labels, then informs the meter-system the level and layer to which this metric belongs.\n service([svc_label1, svc_label2...], Layer) extracts service level labels from the array argument, extracts layer from Layer argument. instance([svc_label1, svc_label2...], [ins_label1, ins_label2...], Layer, Closure\u0026lt;Map\u0026lt;String, String\u0026gt;\u0026gt; propertiesExtractor) extracts service level labels from the first array argument, extracts instance level labels from the second array argument, extracts layer from Layer argument, propertiesExtractor is an optional closure that extracts instance properties from tags, e.g. { tags -\u0026gt; ['pod': tags.pod, 'namespace': tags.namespace] }. endpoint([svc_label1, svc_label2...], [ep_label1, ep_label2...]) extracts service level labels from the first array argument, extracts endpoint level labels from the second array argument, extracts layer from Layer argument. serviceRelation(DetectPoint, [source_svc_label1...], [dest_svc_label1...], Layer) DetectPoint including DetectPoint.CLIENT and DetectPoint.SERVER, extracts sourceService labels from the first array argument, extracts destService labels from the second array argument, extracts layer from Layer argument. processRelation(detect_point_label, [service_label1...], [instance_label1...], source_process_id_label, dest_process_id_label, component_label) extracts DetectPoint labels from first argument, the label value should be client or server. extracts Service labels from the first array argument, extracts Instance labels from the second array argument, extracts ProcessID labels from the fourth and fifth arguments of the source and destination.  Configuration file The OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP fails to start up. The files are located at $CLASSPATH/otel-rules, $CLASSPATH/meter-analyzer-config, $CLASSPATH/envoy-metrics-rules and $CLASSPATH/zabbix-rules.\nThe file is written in YAML format, defined by the scheme described below. Brackets indicate that a parameter is optional.\nA full example can be found here\nGeneric placeholders are defined as follows:\n \u0026lt;string\u0026gt;: A regular string. \u0026lt;closure\u0026gt;: A closure with custom logic.  # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# filter the metrics, only those metrics that satisfy this condition will be passed into the `metricsRules` below.filter: \u0026lt;closure\u0026gt; # example:\u0026#39;{ tags -\u0026gt; tags.job_name == \u0026#34;vm-monitoring\u0026#34; }\u0026#39;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# Metrics rule allow you to recompute queries.metricsRules:[- \u0026lt;metric_rules\u0026gt; ]\u0026lt;metric_rules\u0026gt; # The name of rule, which combinates with a prefix \u0026#39;meter_\u0026#39; as the index/table name in storage.name:\u0026lt;string\u0026gt;# MAL expression.exp:\u0026lt;string\u0026gt;More Examples Please refer to OAP Self-Observability.\n","title":"Meter Analysis Language","url":"/docs/main/v9.4.0/en/concepts-and-designs/mal/"},{"content":"Meter Analysis Language The meter system provides a functional analysis language called MAL (Meter Analysis Language) that lets users analyze and aggregate meter data in the OAP streaming system. The result of an expression can either be ingested by the agent analyzer, or the OpenTelemetry/Prometheus analyzer.\nLanguage data type In MAL, an expression or sub-expression can evaluate to one of the following two types:\n Sample family: A set of samples (metrics) containing a range of metrics whose names are identical. Scalar: A simple numeric value that supports integer/long and floating/double.  Sample family A set of samples, which acts as the basic unit in MAL. For example:\ninstance_trace_count The sample family above may contain the following samples which are provided by external modules, such as the agent analyzer:\ninstance_trace_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 100 instance_trace_count{region=\u0026quot;us-east\u0026quot;,az=\u0026quot;az-3\u0026quot;} 20 instance_trace_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 33 Tag filter MAL supports four type operations to filter samples in a sample family by tag:\n tagEqual: Filter tags exactly equal to the string provided. tagNotEqual: Filter tags not equal to the string provided. tagMatch: Filter tags that regex-match the string provided. tagNotMatch: Filter labels that do not regex-match the string provided.  For example, this filters all instance_trace_count samples for us-west and asia-north region and az-1 az:\ninstance_trace_count.tagMatch(\u0026quot;region\u0026quot;, \u0026quot;us-west|asia-north\u0026quot;).tagEqual(\u0026quot;az\u0026quot;, \u0026quot;az-1\u0026quot;) Value filter MAL supports six type operations to filter samples in a sample family by value:\n valueEqual: Filter values exactly equal to the value provided. valueNotEqual: Filter values equal to the value provided. valueGreater: Filter values greater than the value provided. valueGreaterEqual: Filter values greater than or equal to the value provided. valueLess: Filter values less than the value provided. valueLessEqual: Filter values less than or equal to the value provided.  For example, this filters all instance_trace_count samples for values \u0026gt;= 33:\ninstance_trace_count.valueGreaterEqual(33) Tag manipulator MAL allows tag manipulators to change (i.e. add/delete/update) tags and their values.\nK8s MAL supports using the metadata of K8s to manipulate the tags and their values. This feature requires authorizing the OAP Server to access K8s\u0026rsquo;s API Server.\nretagByK8sMeta retagByK8sMeta(newLabelName, K8sRetagType, existingLabelName, namespaceLabelName). Add a new tag to the sample family based on the value of an existing label. Provide several internal converting types, including\n K8sRetagType.Pod2Service  Add a tag to the sample using service as the key, $serviceName.$namespace as the value, and according to the given value of the tag key, which represents the name of a pod.\nFor example:\ncontainer_cpu_usage_seconds_total{namespace=default, container=my-nginx, cpu=total, pod=my-nginx-5dc4865748-mbczh} 2 Expression:\ncontainer_cpu_usage_seconds_total.retagByK8sMeta('service' , K8sRetagType.Pod2Service , 'pod' , 'namespace') Output:\ncontainer_cpu_usage_seconds_total{namespace=default, container=my-nginx, cpu=total, pod=my-nginx-5dc4865748-mbczh, service='nginx-service.default'} 2 Binary operators The following binary arithmetic operators are available in MAL:\n + (addition) - (subtraction) * (multiplication) / (division)  Binary operators are defined between scalar/scalar, sampleFamily/scalar and sampleFamily/sampleFamily value pairs.\nBetween two scalars: they evaluate to another scalar that is the result of the operator being applied to both scalar operands:\n1 + 2 Between a sample family and a scalar, the operator is applied to the value of every sample in the sample family. For example:\ninstance_trace_count + 2 or\n2 + instance_trace_count results in\ninstance_trace_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 102 // 100 + 2 instance_trace_count{region=\u0026quot;us-east\u0026quot;,az=\u0026quot;az-3\u0026quot;} 22 // 20 + 2 instance_trace_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 35 // 33 + 2 Between two sample families, a binary operator is applied to each sample in the sample family on the left and its matching sample in the sample family on the right. A new sample family with empty name will be generated. Only the matched tags will be reserved. Samples with no matching samples in the sample family on the right will not be found in the result.\nAnother sample family instance_trace_analysis_error_count is\ninstance_trace_analysis_error_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 20 instance_trace_analysis_error_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 11 Example expression:\ninstance_trace_analysis_error_count / instance_trace_count This returns a resulting sample family containing the error rate of trace analysis. Samples with region us-west and az az-3 have no match and will not show up in the result:\n{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 0.2 // 20 / 100 {region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 0.3333 // 11 / 33 Aggregation Operation Sample family supports the following aggregation operations that can be used to aggregate the samples of a single sample family, resulting in a new sample family having fewer samples (sometimes having just a single sample) with aggregated values:\n sum (calculate sum over dimensions) min (select minimum over dimensions) max (select maximum over dimensions) avg (calculate the average over dimensions)  These operations can be used to aggregate overall label dimensions or preserve distinct dimensions by inputting by parameter( the keyword by could be omitted)\n\u0026lt;aggr-op\u0026gt;(by=[\u0026lt;tag1\u0026gt;, \u0026lt;tag2\u0026gt;, ...]) Example expression:\ninstance_trace_count.sum(by=['az']) will output the following result:\ninstance_trace_count{az=\u0026quot;az-1\u0026quot;} 133 // 100 + 33 instance_trace_count{az=\u0026quot;az-3\u0026quot;} 20 Function Duration is a textual representation of a time range. The formats accepted are based on the ISO-8601 duration format {@code PnDTnHnMn.nS} where a day is regarded as exactly 24 hours.\nExamples:\n \u0026ldquo;PT20.345S\u0026rdquo; \u0026ndash; parses as \u0026ldquo;20.345 seconds\u0026rdquo; \u0026ldquo;PT15M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;15 minutes\u0026rdquo; (where a minute is 60 seconds) \u0026ldquo;PT10H\u0026rdquo; \u0026ndash; parses as \u0026ldquo;10 hours\u0026rdquo; (where an hour is 3600 seconds) \u0026ldquo;P2D\u0026rdquo; \u0026ndash; parses as \u0026ldquo;2 days\u0026rdquo; (where a day is 24 hours or 86400 seconds) \u0026ldquo;P2DT3H4M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;2 days, 3 hours and 4 minutes\u0026rdquo; \u0026ldquo;P-6H3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;-6 hours and +3 minutes\u0026rdquo; \u0026ldquo;-P6H3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;-6 hours and -3 minutes\u0026rdquo; \u0026ldquo;-P-6H+3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;+6 hours and -3 minutes\u0026rdquo;  increase increase(Duration): Calculates the increase in the time range.\nrate rate(Duration): Calculates the per-second average rate of increase in the time range.\nirate irate(): Calculates the per-second instant rate of increase in the time range.\ntag tag({allTags -\u0026gt; }): Updates tags of samples. User can add, drop, rename and update tags.\nhistogram histogram(le: '\u0026lt;the tag name of le\u0026gt;'): Transforms less-based histogram buckets to meter system histogram buckets. le parameter represents the tag name of the bucket.\nhistogram_percentile histogram_percentile([\u0026lt;p scalar\u0026gt;]): Represents the meter-system to calculate the p-percentile (0 ≤ p ≤ 100) from the buckets.\ntime time(): Returns the number of seconds since January 1, 1970 UTC.\nforeach forEach([string_array], Closure\u0026lt;Void\u0026gt; each): Iterates all samples according to the first array argument, and provide two parameters in the second closure argument:\n element: element in the array. tags: tags in each sample.  Down Sampling Operation MAL should instruct meter-system on how to downsample for metrics. It doesn\u0026rsquo;t only refer to aggregate raw samples to minute level, but also expresses data from minute in higher levels, such as hour and day.\nDown sampling function is called downsampling in MAL, and it accepts the following types:\n AVG SUM LATEST SUM_PER_MIN MIN (TODO) MAX (TODO) MEAN (TODO) COUNT (TODO)  The default type is AVG.\nIf users want to get the latest time from last_server_state_sync_time_in_seconds:\nlast_server_state_sync_time_in_seconds.tagEqual('production', 'catalog').downsampling(LATEST) Metric level function They extract level relevant labels from metric labels, then informs the meter-system the level and layer to which this metric belongs.\n service([svc_label1, svc_label2...], Layer) extracts service level labels from the array argument, extracts layer from Layer argument. instance([svc_label1, svc_label2...], [ins_label1, ins_label2...], Layer, Closure\u0026lt;Map\u0026lt;String, String\u0026gt;\u0026gt; propertiesExtractor) extracts service level labels from the first array argument, extracts instance level labels from the second array argument, extracts layer from Layer argument, propertiesExtractor is an optional closure that extracts instance properties from tags, e.g. { tags -\u0026gt; ['pod': tags.pod, 'namespace': tags.namespace] }. endpoint([svc_label1, svc_label2...], [ep_label1, ep_label2...]) extracts service level labels from the first array argument, extracts endpoint level labels from the second array argument, extracts layer from Layer argument. process([svc_label1, svc_label2...], [ins_label1, ins_label2...], [ps_label1, ps_label2...], layer_lable) extracts service level labels from the first array argument, extracts instance level labels from the second array argument, extracts process level labels from the third array argument, extracts layer label from fourse argument. serviceRelation(DetectPoint, [source_svc_label1...], [dest_svc_label1...], Layer) DetectPoint including DetectPoint.CLIENT and DetectPoint.SERVER, extracts sourceService labels from the first array argument, extracts destService labels from the second array argument, extracts layer from Layer argument. processRelation(detect_point_label, [service_label1...], [instance_label1...], source_process_id_label, dest_process_id_label, component_label) extracts DetectPoint labels from first argument, the label value should be client or server. extracts Service labels from the first array argument, extracts Instance labels from the second array argument, extracts ProcessID labels from the fourth and fifth arguments of the source and destination.  Configuration file The OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP fails to start up. The files are located at $CLASSPATH/otel-rules, $CLASSPATH/meter-analyzer-config, $CLASSPATH/envoy-metrics-rules and $CLASSPATH/zabbix-rules.\nThe file is written in YAML format, defined by the scheme described below. Brackets indicate that a parameter is optional.\nA full example can be found here\nGeneric placeholders are defined as follows:\n \u0026lt;string\u0026gt;: A regular string. \u0026lt;closure\u0026gt;: A closure with custom logic.  # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# filter the metrics, only those metrics that satisfy this condition will be passed into the `metricsRules` below.filter: \u0026lt;closure\u0026gt; # example:\u0026#39;{ tags -\u0026gt; tags.job_name == \u0026#34;vm-monitoring\u0026#34; }\u0026#39;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# Metrics rule allow you to recompute queries.metricsRules:[- \u0026lt;metric_rules\u0026gt; ]\u0026lt;metric_rules\u0026gt; # The name of rule, which combinates with a prefix \u0026#39;meter_\u0026#39; as the index/table name in storage.name:\u0026lt;string\u0026gt;# MAL expression.exp:\u0026lt;string\u0026gt;More Examples Please refer to OAP Self-Observability.\n","title":"Meter Analysis Language","url":"/docs/main/v9.5.0/en/concepts-and-designs/mal/"},{"content":"Meter Analysis Language The meter system provides a functional analysis language called MAL (Meter Analysis Language) that lets users analyze and aggregate meter data in the OAP streaming system. The result of an expression can either be ingested by the agent analyzer, or the OpenTelemetry/Prometheus analyzer.\nLanguage data type In MAL, an expression or sub-expression can evaluate to one of the following two types:\n Sample family: A set of samples (metrics) containing a range of metrics whose names are identical. Scalar: A simple numeric value that supports integer/long and floating/double.  Sample family A set of samples, which acts as the basic unit in MAL. For example:\ninstance_trace_count The sample family above may contain the following samples which are provided by external modules, such as the agent analyzer:\ninstance_trace_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 100 instance_trace_count{region=\u0026quot;us-east\u0026quot;,az=\u0026quot;az-3\u0026quot;} 20 instance_trace_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 33 Tag filter MAL supports four type operations to filter samples in a sample family by tag:\n tagEqual: Filter tags exactly equal to the string provided. tagNotEqual: Filter tags not equal to the string provided. tagMatch: Filter tags that regex-match the string provided. tagNotMatch: Filter labels that do not regex-match the string provided.  For example, this filters all instance_trace_count samples for us-west and asia-north region and az-1 az:\ninstance_trace_count.tagMatch(\u0026quot;region\u0026quot;, \u0026quot;us-west|asia-north\u0026quot;).tagEqual(\u0026quot;az\u0026quot;, \u0026quot;az-1\u0026quot;) Value filter MAL supports six type operations to filter samples in a sample family by value:\n valueEqual: Filter values exactly equal to the value provided. valueNotEqual: Filter values equal to the value provided. valueGreater: Filter values greater than the value provided. valueGreaterEqual: Filter values greater than or equal to the value provided. valueLess: Filter values less than the value provided. valueLessEqual: Filter values less than or equal to the value provided.  For example, this filters all instance_trace_count samples for values \u0026gt;= 33:\ninstance_trace_count.valueGreaterEqual(33) Tag manipulator MAL allows tag manipulators to change (i.e. add/delete/update) tags and their values.\nK8s MAL supports using the metadata of K8s to manipulate the tags and their values. This feature requires authorizing the OAP Server to access K8s\u0026rsquo;s API Server.\nretagByK8sMeta retagByK8sMeta(newLabelName, K8sRetagType, existingLabelName, namespaceLabelName). Add a new tag to the sample family based on the value of an existing label. Provide several internal converting types, including\n K8sRetagType.Pod2Service  Add a tag to the sample using service as the key, $serviceName.$namespace as the value, and according to the given value of the tag key, which represents the name of a pod.\nFor example:\ncontainer_cpu_usage_seconds_total{namespace=default, container=my-nginx, cpu=total, pod=my-nginx-5dc4865748-mbczh} 2 Expression:\ncontainer_cpu_usage_seconds_total.retagByK8sMeta('service' , K8sRetagType.Pod2Service , 'pod' , 'namespace') Output:\ncontainer_cpu_usage_seconds_total{namespace=default, container=my-nginx, cpu=total, pod=my-nginx-5dc4865748-mbczh, service='nginx-service.default'} 2 Binary operators The following binary arithmetic operators are available in MAL:\n + (addition) - (subtraction) * (multiplication) / (division)  Binary operators are defined between scalar/scalar, sampleFamily/scalar and sampleFamily/sampleFamily value pairs.\nBetween two scalars: they evaluate to another scalar that is the result of the operator being applied to both scalar operands:\n1 + 2 Between a sample family and a scalar, the operator is applied to the value of every sample in the sample family. For example:\ninstance_trace_count + 2 or\n2 + instance_trace_count results in\ninstance_trace_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 102 // 100 + 2 instance_trace_count{region=\u0026quot;us-east\u0026quot;,az=\u0026quot;az-3\u0026quot;} 22 // 20 + 2 instance_trace_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 35 // 33 + 2 Between two sample families, a binary operator is applied to each sample in the sample family on the left and its matching sample in the sample family on the right. A new sample family with empty name will be generated. Only the matched tags will be reserved. Samples with no matching samples in the sample family on the right will not be found in the result.\nAnother sample family instance_trace_analysis_error_count is\ninstance_trace_analysis_error_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 20 instance_trace_analysis_error_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 11 Example expression:\ninstance_trace_analysis_error_count / instance_trace_count This returns a resulting sample family containing the error rate of trace analysis. Samples with region us-west and az az-3 have no match and will not show up in the result:\n{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 0.2 // 20 / 100 {region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 0.3333 // 11 / 33 Aggregation Operation Sample family supports the following aggregation operations that can be used to aggregate the samples of a single sample family, resulting in a new sample family having fewer samples (sometimes having just a single sample) with aggregated values:\n sum (calculate sum over dimensions) min (select minimum over dimensions) max (select maximum over dimensions) avg (calculate the average over dimensions)  These operations can be used to aggregate overall label dimensions or preserve distinct dimensions by inputting by parameter( the keyword by could be omitted)\n\u0026lt;aggr-op\u0026gt;(by=[\u0026lt;tag1\u0026gt;, \u0026lt;tag2\u0026gt;, ...]) Example expression:\ninstance_trace_count.sum(by=['az']) will output the following result:\ninstance_trace_count{az=\u0026quot;az-1\u0026quot;} 133 // 100 + 33 instance_trace_count{az=\u0026quot;az-3\u0026quot;} 20 Function Duration is a textual representation of a time range. The formats accepted are based on the ISO-8601 duration format {@code PnDTnHnMn.nS} where a day is regarded as exactly 24 hours.\nExamples:\n \u0026ldquo;PT20.345S\u0026rdquo; \u0026ndash; parses as \u0026ldquo;20.345 seconds\u0026rdquo; \u0026ldquo;PT15M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;15 minutes\u0026rdquo; (where a minute is 60 seconds) \u0026ldquo;PT10H\u0026rdquo; \u0026ndash; parses as \u0026ldquo;10 hours\u0026rdquo; (where an hour is 3600 seconds) \u0026ldquo;P2D\u0026rdquo; \u0026ndash; parses as \u0026ldquo;2 days\u0026rdquo; (where a day is 24 hours or 86400 seconds) \u0026ldquo;P2DT3H4M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;2 days, 3 hours and 4 minutes\u0026rdquo; \u0026ldquo;P-6H3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;-6 hours and +3 minutes\u0026rdquo; \u0026ldquo;-P6H3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;-6 hours and -3 minutes\u0026rdquo; \u0026ldquo;-P-6H+3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;+6 hours and -3 minutes\u0026rdquo;  increase increase(Duration): Calculates the increase in the time range.\nrate rate(Duration): Calculates the per-second average rate of increase in the time range.\nirate irate(): Calculates the per-second instant rate of increase in the time range.\ntag tag({allTags -\u0026gt; }): Updates tags of samples. User can add, drop, rename and update tags.\nhistogram histogram(le: '\u0026lt;the tag name of le\u0026gt;'): Transforms less-based histogram buckets to meter system histogram buckets. le parameter represents the tag name of the bucket.\nhistogram_percentile histogram_percentile([\u0026lt;p scalar\u0026gt;]): Represents the meter-system to calculate the p-percentile (0 ≤ p ≤ 100) from the buckets.\ntime time(): Returns the number of seconds since January 1, 1970 UTC.\nforeach forEach([string_array], Closure\u0026lt;Void\u0026gt; each): Iterates all samples according to the first array argument, and provide two parameters in the second closure argument:\n element: element in the array. tags: tags in each sample.  Down Sampling Operation MAL should instruct meter-system on how to downsample for metrics. It doesn\u0026rsquo;t only refer to aggregate raw samples to minute level, but also expresses data from minute in higher levels, such as hour and day.\nDown sampling function is called downsampling in MAL, and it accepts the following types:\n AVG SUM LATEST SUM_PER_MIN MIN (TODO) MAX (TODO) MEAN (TODO) COUNT (TODO)  The default type is AVG.\nIf users want to get the latest time from last_server_state_sync_time_in_seconds:\nlast_server_state_sync_time_in_seconds.tagEqual('production', 'catalog').downsampling(LATEST) Metric level function They extract level relevant labels from metric labels, then informs the meter-system the level and layer to which this metric belongs.\n service([svc_label1, svc_label2...], Layer) extracts service level labels from the array argument, extracts layer from Layer argument. instance([svc_label1, svc_label2...], [ins_label1, ins_label2...], Layer, Closure\u0026lt;Map\u0026lt;String, String\u0026gt;\u0026gt; propertiesExtractor) extracts service level labels from the first array argument, extracts instance level labels from the second array argument, extracts layer from Layer argument, propertiesExtractor is an optional closure that extracts instance properties from tags, e.g. { tags -\u0026gt; ['pod': tags.pod, 'namespace': tags.namespace] }. endpoint([svc_label1, svc_label2...], [ep_label1, ep_label2...]) extracts service level labels from the first array argument, extracts endpoint level labels from the second array argument, extracts layer from Layer argument. process([svc_label1, svc_label2...], [ins_label1, ins_label2...], [ps_label1, ps_label2...], layer_lable) extracts service level labels from the first array argument, extracts instance level labels from the second array argument, extracts process level labels from the third array argument, extracts layer label from fourse argument. serviceRelation(DetectPoint, [source_svc_label1...], [dest_svc_label1...], Layer) DetectPoint including DetectPoint.CLIENT and DetectPoint.SERVER, extracts sourceService labels from the first array argument, extracts destService labels from the second array argument, extracts layer from Layer argument. processRelation(detect_point_label, [service_label1...], [instance_label1...], source_process_id_label, dest_process_id_label, component_label) extracts DetectPoint labels from first argument, the label value should be client or server. extracts Service labels from the first array argument, extracts Instance labels from the second array argument, extracts ProcessID labels from the fourth and fifth arguments of the source and destination.  Configuration file The OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP fails to start up. The files are located at $CLASSPATH/otel-rules, $CLASSPATH/meter-analyzer-config, $CLASSPATH/envoy-metrics-rules and $CLASSPATH/zabbix-rules.\nThe file is written in YAML format, defined by the scheme described below. Brackets indicate that a parameter is optional.\nA full example can be found here\nGeneric placeholders are defined as follows:\n \u0026lt;string\u0026gt;: A regular string. \u0026lt;closure\u0026gt;: A closure with custom logic.  # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# filter the metrics, only those metrics that satisfy this condition will be passed into the `metricsRules` below.filter: \u0026lt;closure\u0026gt; # example:\u0026#39;{ tags -\u0026gt; tags.job_name == \u0026#34;vm-monitoring\u0026#34; }\u0026#39;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# Metrics rule allow you to recompute queries.metricsRules:[- \u0026lt;metric_rules\u0026gt; ]\u0026lt;metric_rules\u0026gt; # The name of rule, which combinates with a prefix \u0026#39;meter_\u0026#39; as the index/table name in storage.name:\u0026lt;string\u0026gt;# MAL expression.exp:\u0026lt;string\u0026gt;More Examples Please refer to OAP Self-Observability.\n","title":"Meter Analysis Language","url":"/docs/main/v9.6.0/en/concepts-and-designs/mal/"},{"content":"Meter Analysis Language The meter system provides a functional analysis language called MAL (Meter Analysis Language) that lets users analyze and aggregate meter data in the OAP streaming system. The result of an expression can either be ingested by the agent analyzer, or the OpenTelemetry/Prometheus analyzer.\nLanguage data type In MAL, an expression or sub-expression can evaluate to one of the following two types:\n Sample family: A set of samples (metrics) containing a range of metrics whose names are identical. Scalar: A simple numeric value that supports integer/long and floating/double.  Sample family A set of samples, which acts as the basic unit in MAL. For example:\ninstance_trace_count The sample family above may contain the following samples which are provided by external modules, such as the agent analyzer:\ninstance_trace_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 100 instance_trace_count{region=\u0026quot;us-east\u0026quot;,az=\u0026quot;az-3\u0026quot;} 20 instance_trace_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 33 Tag filter MAL supports four type operations to filter samples in a sample family by tag:\n tagEqual: Filter tags exactly equal to the string provided. tagNotEqual: Filter tags not equal to the string provided. tagMatch: Filter tags that regex-match the string provided. tagNotMatch: Filter labels that do not regex-match the string provided.  For example, this filters all instance_trace_count samples for us-west and asia-north region and az-1 az:\ninstance_trace_count.tagMatch(\u0026quot;region\u0026quot;, \u0026quot;us-west|asia-north\u0026quot;).tagEqual(\u0026quot;az\u0026quot;, \u0026quot;az-1\u0026quot;) Value filter MAL supports six type operations to filter samples in a sample family by value:\n valueEqual: Filter values exactly equal to the value provided. valueNotEqual: Filter values equal to the value provided. valueGreater: Filter values greater than the value provided. valueGreaterEqual: Filter values greater than or equal to the value provided. valueLess: Filter values less than the value provided. valueLessEqual: Filter values less than or equal to the value provided.  For example, this filters all instance_trace_count samples for values \u0026gt;= 33:\ninstance_trace_count.valueGreaterEqual(33) Tag manipulator MAL allows tag manipulators to change (i.e. add/delete/update) tags and their values.\nK8s MAL supports using the metadata of K8s to manipulate the tags and their values. This feature requires authorizing the OAP Server to access K8s\u0026rsquo;s API Server.\nretagByK8sMeta retagByK8sMeta(newLabelName, K8sRetagType, existingLabelName, namespaceLabelName). Add a new tag to the sample family based on the value of an existing label. Provide several internal converting types, including\n K8sRetagType.Pod2Service  Add a tag to the sample using service as the key, $serviceName.$namespace as the value, and according to the given value of the tag key, which represents the name of a pod.\nFor example:\ncontainer_cpu_usage_seconds_total{namespace=default, container=my-nginx, cpu=total, pod=my-nginx-5dc4865748-mbczh} 2 Expression:\ncontainer_cpu_usage_seconds_total.retagByK8sMeta('service' , K8sRetagType.Pod2Service , 'pod' , 'namespace') Output:\ncontainer_cpu_usage_seconds_total{namespace=default, container=my-nginx, cpu=total, pod=my-nginx-5dc4865748-mbczh, service='nginx-service.default'} 2 Binary operators The following binary arithmetic operators are available in MAL:\n + (addition) - (subtraction) * (multiplication) / (division)  Binary operators are defined between scalar/scalar, sampleFamily/scalar and sampleFamily/sampleFamily value pairs.\nBetween two scalars: they evaluate to another scalar that is the result of the operator being applied to both scalar operands:\n1 + 2 Between a sample family and a scalar, the operator is applied to the value of every sample in the sample family. For example:\ninstance_trace_count + 2 or\n2 + instance_trace_count results in\ninstance_trace_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 102 // 100 + 2 instance_trace_count{region=\u0026quot;us-east\u0026quot;,az=\u0026quot;az-3\u0026quot;} 22 // 20 + 2 instance_trace_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 35 // 33 + 2 Between two sample families, a binary operator is applied to each sample in the sample family on the left and its matching sample in the sample family on the right. A new sample family with empty name will be generated. Only the matched tags will be reserved. Samples with no matching samples in the sample family on the right will not be found in the result.\nAnother sample family instance_trace_analysis_error_count is\ninstance_trace_analysis_error_count{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 20 instance_trace_analysis_error_count{region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 11 Example expression:\ninstance_trace_analysis_error_count / instance_trace_count This returns a resulting sample family containing the error rate of trace analysis. Samples with region us-west and az az-3 have no match and will not show up in the result:\n{region=\u0026quot;us-west\u0026quot;,az=\u0026quot;az-1\u0026quot;} 0.2 // 20 / 100 {region=\u0026quot;asia-north\u0026quot;,az=\u0026quot;az-1\u0026quot;} 0.3333 // 11 / 33 Aggregation Operation Sample family supports the following aggregation operations that can be used to aggregate the samples of a single sample family, resulting in a new sample family having fewer samples (sometimes having just a single sample) with aggregated values:\n sum (calculate sum over dimensions) min (select minimum over dimensions) max (select maximum over dimensions) avg (calculate the average over dimensions)  These operations can be used to aggregate overall label dimensions or preserve distinct dimensions by inputting by parameter( the keyword by could be omitted)\n\u0026lt;aggr-op\u0026gt;(by=[\u0026lt;tag1\u0026gt;, \u0026lt;tag2\u0026gt;, ...]) Example expression:\ninstance_trace_count.sum(by=['az']) will output the following result:\ninstance_trace_count{az=\u0026quot;az-1\u0026quot;} 133 // 100 + 33 instance_trace_count{az=\u0026quot;az-3\u0026quot;} 20 Function Duration is a textual representation of a time range. The formats accepted are based on the ISO-8601 duration format {@code PnDTnHnMn.nS} where a day is regarded as exactly 24 hours.\nExamples:\n \u0026ldquo;PT20.345S\u0026rdquo; \u0026ndash; parses as \u0026ldquo;20.345 seconds\u0026rdquo; \u0026ldquo;PT15M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;15 minutes\u0026rdquo; (where a minute is 60 seconds) \u0026ldquo;PT10H\u0026rdquo; \u0026ndash; parses as \u0026ldquo;10 hours\u0026rdquo; (where an hour is 3600 seconds) \u0026ldquo;P2D\u0026rdquo; \u0026ndash; parses as \u0026ldquo;2 days\u0026rdquo; (where a day is 24 hours or 86400 seconds) \u0026ldquo;P2DT3H4M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;2 days, 3 hours and 4 minutes\u0026rdquo; \u0026ldquo;P-6H3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;-6 hours and +3 minutes\u0026rdquo; \u0026ldquo;-P6H3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;-6 hours and -3 minutes\u0026rdquo; \u0026ldquo;-P-6H+3M\u0026rdquo; \u0026ndash; parses as \u0026ldquo;+6 hours and -3 minutes\u0026rdquo;  increase increase(Duration): Calculates the increase in the time range.\nrate rate(Duration): Calculates the per-second average rate of increase in the time range.\nirate irate(): Calculates the per-second instant rate of increase in the time range.\ntag tag({allTags -\u0026gt; }): Updates tags of samples. User can add, drop, rename and update tags.\nhistogram histogram(le: '\u0026lt;the tag name of le\u0026gt;'): Transforms less-based histogram buckets to meter system histogram buckets. le parameter represents the tag name of the bucket.\nhistogram_percentile histogram_percentile([\u0026lt;p scalar\u0026gt;]): Represents the meter-system to calculate the p-percentile (0 ≤ p ≤ 100) from the buckets.\ntime time(): Returns the number of seconds since January 1, 1970 UTC.\nforeach forEach([string_array], Closure\u0026lt;Void\u0026gt; each): Iterates all samples according to the first array argument, and provide two parameters in the second closure argument:\n element: element in the array. tags: tags in each sample.  Down Sampling Operation MAL should instruct meter-system on how to downsample for metrics. It doesn\u0026rsquo;t only refer to aggregate raw samples to minute level, but also expresses data from minute in higher levels, such as hour and day.\nDown sampling function is called downsampling in MAL, and it accepts the following types:\n AVG SUM LATEST SUM_PER_MIN MIN (TODO) MAX (TODO) MEAN (TODO) COUNT (TODO)  The default type is AVG.\nIf users want to get the latest time from last_server_state_sync_time_in_seconds:\nlast_server_state_sync_time_in_seconds.tagEqual('production', 'catalog').downsampling(LATEST) Metric level function They extract level relevant labels from metric labels, then informs the meter-system the level and layer to which this metric belongs.\n service([svc_label1, svc_label2...], Layer) extracts service level labels from the array argument, extracts layer from Layer argument. instance([svc_label1, svc_label2...], [ins_label1, ins_label2...], Layer, Closure\u0026lt;Map\u0026lt;String, String\u0026gt;\u0026gt; propertiesExtractor) extracts service level labels from the first array argument, extracts instance level labels from the second array argument, extracts layer from Layer argument, propertiesExtractor is an optional closure that extracts instance properties from tags, e.g. { tags -\u0026gt; ['pod': tags.pod, 'namespace': tags.namespace] }. endpoint([svc_label1, svc_label2...], [ep_label1, ep_label2...]) extracts service level labels from the first array argument, extracts endpoint level labels from the second array argument, extracts layer from Layer argument. process([svc_label1, svc_label2...], [ins_label1, ins_label2...], [ps_label1, ps_label2...], layer_lable) extracts service level labels from the first array argument, extracts instance level labels from the second array argument, extracts process level labels from the third array argument, extracts layer label from fourse argument. serviceRelation(DetectPoint, [source_svc_label1...], [dest_svc_label1...], Layer) DetectPoint including DetectPoint.CLIENT and DetectPoint.SERVER, extracts sourceService labels from the first array argument, extracts destService labels from the second array argument, extracts layer from Layer argument. processRelation(detect_point_label, [service_label1...], [instance_label1...], source_process_id_label, dest_process_id_label, component_label) extracts DetectPoint labels from first argument, the label value should be client or server. extracts Service labels from the first array argument, extracts Instance labels from the second array argument, extracts ProcessID labels from the fourth and fifth arguments of the source and destination.  Configuration file The OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP fails to start up. The files are located at $CLASSPATH/otel-rules, $CLASSPATH/meter-analyzer-config, $CLASSPATH/envoy-metrics-rules and $CLASSPATH/zabbix-rules.\nThe file is written in YAML format, defined by the scheme described below. Brackets indicate that a parameter is optional.\nA full example can be found here\nGeneric placeholders are defined as follows:\n \u0026lt;string\u0026gt;: A regular string. \u0026lt;closure\u0026gt;: A closure with custom logic.  # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# filter the metrics, only those metrics that satisfy this condition will be passed into the `metricsRules` below.filter: \u0026lt;closure\u0026gt; # example:\u0026#39;{ tags -\u0026gt; tags.job_name == \u0026#34;vm-monitoring\u0026#34; }\u0026#39;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# Metrics rule allow you to recompute queries.metricsRules:[- \u0026lt;metric_rules\u0026gt; ]\u0026lt;metric_rules\u0026gt; # The name of rule, which combinates with a prefix \u0026#39;meter_\u0026#39; as the index/table name in storage.name:\u0026lt;string\u0026gt;# MAL expression.exp:\u0026lt;string\u0026gt;More Examples Please refer to OAP Self-Observability.\n","title":"Meter Analysis Language","url":"/docs/main/v9.7.0/en/concepts-and-designs/mal/"},{"content":"Meter APIs SkyWalking has a native metrics format, and supports widely used metric formats, such as Prometheus, OpenTelemetry, and Zabbix.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.language.agent.v3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/language/agent/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;service MeterReportService { // Meter data is reported in a certain period. The agent/SDK should report all collected metrics in this period through one stream.  // The whole stream is an input data set, client should onComplete the stream per report period.  rpc collect (stream MeterData) returns (Commands) { } // Reporting meter data in bulk mode as MeterDataCollection.  // By using this, each one in the stream would be treated as a complete input for MAL engine,  // comparing to `collect (stream MeterData)`, which is using one stream as an input data set.  rpc collectBatch (stream MeterDataCollection) returns (Commands) { }}// Label of the meter message Label { string name = 1; string value = 2;}// The histogram element definition. It includes the bucket lower boundary and the count in the bucket. message MeterBucketValue { // The value represents the min value of the bucket,  // the upper boundary is determined by next MeterBucketValue$bucket,  // if it doesn\u0026#39;t exist, the upper boundary is positive infinity.  double bucket = 1; int64 count = 2; // If is negative infinity, the value of the bucket is invalid  bool isNegativeInfinity = 3;}// Meter single value message MeterSingleValue { // Meter name  string name = 1; // Labels  repeated Label labels = 2; // Single value  double value = 3;}// Histogram message MeterHistogram { // Meter name  string name = 1; // Labels  repeated Label labels = 2; // Customize the buckets  repeated MeterBucketValue values = 3;}// Single meter data, if the same metrics have a different label, they will separate. message MeterData { // Meter data could be a single value or histogram.  oneof metric { MeterSingleValue singleValue = 1; MeterHistogram histogram = 2; } // Service name, be set value in the first element in the stream-call.  string service = 3; // Service instance name, be set value in the first element in the stream-call.  string serviceInstance = 4; // Meter data report time, be set value in the first element in the stream-call.  int64 timestamp = 5;}message MeterDataCollection { repeated MeterData meterData = 1;}OpenTelemetry collector, Telegraf agents, Zabbix agents could use their native protocol(e.g. OTLP) and OAP server would convert metrics into native format and forward them to Meter Analysis Language engine.\nTo learn more about receiving 3rd party formats metrics, see\n Meter receiver OpenTelemetry receiver. Zabbix receiver  ","title":"Meter APIs","url":"/docs/main/latest/en/api/meter/"},{"content":"Meter APIs SkyWalking has a native metrics format, and supports widely used metric formats, such as Prometheus, OpenTelemetry, and Zabbix.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.language.agent.v3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/language/agent/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;service MeterReportService { // Meter data is reported in a certain period. The agent/SDK should report all collected metrics in this period through one stream.  // The whole stream is an input data set, client should onComplete the stream per report period.  rpc collect (stream MeterData) returns (Commands) { } // Reporting meter data in bulk mode as MeterDataCollection.  // By using this, each one in the stream would be treated as a complete input for MAL engine,  // comparing to `collect (stream MeterData)`, which is using one stream as an input data set.  rpc collectBatch (stream MeterDataCollection) returns (Commands) { }}// Label of the meter message Label { string name = 1; string value = 2;}// The histogram element definition. It includes the bucket lower boundary and the count in the bucket. message MeterBucketValue { // The value represents the min value of the bucket,  // the upper boundary is determined by next MeterBucketValue$bucket,  // if it doesn\u0026#39;t exist, the upper boundary is positive infinity.  double bucket = 1; int64 count = 2; // If is negative infinity, the value of the bucket is invalid  bool isNegativeInfinity = 3;}// Meter single value message MeterSingleValue { // Meter name  string name = 1; // Labels  repeated Label labels = 2; // Single value  double value = 3;}// Histogram message MeterHistogram { // Meter name  string name = 1; // Labels  repeated Label labels = 2; // Customize the buckets  repeated MeterBucketValue values = 3;}// Single meter data, if the same metrics have a different label, they will separate. message MeterData { // Meter data could be a single value or histogram.  oneof metric { MeterSingleValue singleValue = 1; MeterHistogram histogram = 2; } // Service name, be set value in the first element in the stream-call.  string service = 3; // Service instance name, be set value in the first element in the stream-call.  string serviceInstance = 4; // Meter data report time, be set value in the first element in the stream-call.  int64 timestamp = 5;}message MeterDataCollection { repeated MeterData meterData = 1;}OpenTelemetry collector, Telegraf agents, Zabbix agents could use their native protocol(e.g. OTLP) and OAP server would convert metrics into native format and forward them to Meter Analysis Language engine.\nTo learn more about receiving 3rd party formats metrics, see\n Meter receiver OpenTelemetry receiver. Zabbix receiver  ","title":"Meter APIs","url":"/docs/main/next/en/api/meter/"},{"content":"Meter APIs SkyWalking has a native metrics format, and supports widely used metric formats, such as Prometheus, OpenCensus, OpenTelemetry, and Zabbix.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.language.agent.v3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/language/agent/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;service MeterReportService { // Meter data is reported in a certain period. The agent/SDK should report all collected metrics in this period through one stream.  // The whole stream is an input data set, client should onComplete the stream per report period.  rpc collect (stream MeterData) returns (Commands) { } // Reporting meter data in bulk mode as MeterDataCollection.  // By using this, each one in the stream would be treated as a complete input for MAL engine,  // comparing to `collect (stream MeterData)`, which is using one stream as an input data set.  rpc collectBatch (stream MeterDataCollection) returns (Commands) { }}// Label of the meter message Label { string name = 1; string value = 2;}// The histogram element definition. It includes the bucket lower boundary and the count in the bucket. message MeterBucketValue { // The value represents the min value of the bucket,  // the upper boundary is determined by next MeterBucketValue$bucket,  // if it doesn\u0026#39;t exist, the upper boundary is positive infinity.  double bucket = 1; int64 count = 2; // If is negative infinity, the value of the bucket is invalid  bool isNegativeInfinity = 3;}// Meter single value message MeterSingleValue { // Meter name  string name = 1; // Labels  repeated Label labels = 2; // Single value  double value = 3;}// Histogram message MeterHistogram { // Meter name  string name = 1; // Labels  repeated Label labels = 2; // Customize the buckets  repeated MeterBucketValue values = 3;}// Single meter data, if the same metrics have a different label, they will separate. message MeterData { // Meter data could be a single value or histogram.  oneof metric { MeterSingleValue singleValue = 1; MeterHistogram histogram = 2; } // Service name, be set value in the first element in the stream-call.  string service = 3; // Service instance name, be set value in the first element in the stream-call.  string serviceInstance = 4; // Meter data report time, be set value in the first element in the stream-call.  int64 timestamp = 5;}message MeterDataCollection { repeated MeterData meterData = 1;}OpenTelemetry collector, Telegraf agents, Zabbix agents could use their native protocol(e.g. OTLP) and OAP server would convert metrics into native format and forward them to Meter Analysis Language engine.\nTo learn more about receiving 3rd party formats metrics, see\n Meter receiver OpenTelemetry receiver. Zabbix receiver  ","title":"Meter APIs","url":"/docs/main/v9.4.0/en/api/meter/"},{"content":"Meter APIs SkyWalking has a native metrics format, and supports widely used metric formats, such as Prometheus, OpenTelemetry, and Zabbix.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.language.agent.v3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/language/agent/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;service MeterReportService { // Meter data is reported in a certain period. The agent/SDK should report all collected metrics in this period through one stream.  // The whole stream is an input data set, client should onComplete the stream per report period.  rpc collect (stream MeterData) returns (Commands) { } // Reporting meter data in bulk mode as MeterDataCollection.  // By using this, each one in the stream would be treated as a complete input for MAL engine,  // comparing to `collect (stream MeterData)`, which is using one stream as an input data set.  rpc collectBatch (stream MeterDataCollection) returns (Commands) { }}// Label of the meter message Label { string name = 1; string value = 2;}// The histogram element definition. It includes the bucket lower boundary and the count in the bucket. message MeterBucketValue { // The value represents the min value of the bucket,  // the upper boundary is determined by next MeterBucketValue$bucket,  // if it doesn\u0026#39;t exist, the upper boundary is positive infinity.  double bucket = 1; int64 count = 2; // If is negative infinity, the value of the bucket is invalid  bool isNegativeInfinity = 3;}// Meter single value message MeterSingleValue { // Meter name  string name = 1; // Labels  repeated Label labels = 2; // Single value  double value = 3;}// Histogram message MeterHistogram { // Meter name  string name = 1; // Labels  repeated Label labels = 2; // Customize the buckets  repeated MeterBucketValue values = 3;}// Single meter data, if the same metrics have a different label, they will separate. message MeterData { // Meter data could be a single value or histogram.  oneof metric { MeterSingleValue singleValue = 1; MeterHistogram histogram = 2; } // Service name, be set value in the first element in the stream-call.  string service = 3; // Service instance name, be set value in the first element in the stream-call.  string serviceInstance = 4; // Meter data report time, be set value in the first element in the stream-call.  int64 timestamp = 5;}message MeterDataCollection { repeated MeterData meterData = 1;}OpenTelemetry collector, Telegraf agents, Zabbix agents could use their native protocol(e.g. OTLP) and OAP server would convert metrics into native format and forward them to Meter Analysis Language engine.\nTo learn more about receiving 3rd party formats metrics, see\n Meter receiver OpenTelemetry receiver. Zabbix receiver  ","title":"Meter APIs","url":"/docs/main/v9.5.0/en/api/meter/"},{"content":"Meter APIs SkyWalking has a native metrics format, and supports widely used metric formats, such as Prometheus, OpenTelemetry, and Zabbix.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.language.agent.v3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/language/agent/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;service MeterReportService { // Meter data is reported in a certain period. The agent/SDK should report all collected metrics in this period through one stream.  // The whole stream is an input data set, client should onComplete the stream per report period.  rpc collect (stream MeterData) returns (Commands) { } // Reporting meter data in bulk mode as MeterDataCollection.  // By using this, each one in the stream would be treated as a complete input for MAL engine,  // comparing to `collect (stream MeterData)`, which is using one stream as an input data set.  rpc collectBatch (stream MeterDataCollection) returns (Commands) { }}// Label of the meter message Label { string name = 1; string value = 2;}// The histogram element definition. It includes the bucket lower boundary and the count in the bucket. message MeterBucketValue { // The value represents the min value of the bucket,  // the upper boundary is determined by next MeterBucketValue$bucket,  // if it doesn\u0026#39;t exist, the upper boundary is positive infinity.  double bucket = 1; int64 count = 2; // If is negative infinity, the value of the bucket is invalid  bool isNegativeInfinity = 3;}// Meter single value message MeterSingleValue { // Meter name  string name = 1; // Labels  repeated Label labels = 2; // Single value  double value = 3;}// Histogram message MeterHistogram { // Meter name  string name = 1; // Labels  repeated Label labels = 2; // Customize the buckets  repeated MeterBucketValue values = 3;}// Single meter data, if the same metrics have a different label, they will separate. message MeterData { // Meter data could be a single value or histogram.  oneof metric { MeterSingleValue singleValue = 1; MeterHistogram histogram = 2; } // Service name, be set value in the first element in the stream-call.  string service = 3; // Service instance name, be set value in the first element in the stream-call.  string serviceInstance = 4; // Meter data report time, be set value in the first element in the stream-call.  int64 timestamp = 5;}message MeterDataCollection { repeated MeterData meterData = 1;}OpenTelemetry collector, Telegraf agents, Zabbix agents could use their native protocol(e.g. OTLP) and OAP server would convert metrics into native format and forward them to Meter Analysis Language engine.\nTo learn more about receiving 3rd party formats metrics, see\n Meter receiver OpenTelemetry receiver. Zabbix receiver  ","title":"Meter APIs","url":"/docs/main/v9.6.0/en/api/meter/"},{"content":"Meter APIs SkyWalking has a native metrics format, and supports widely used metric formats, such as Prometheus, OpenTelemetry, and Zabbix.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.language.agent.v3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/language/agent/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;service MeterReportService { // Meter data is reported in a certain period. The agent/SDK should report all collected metrics in this period through one stream.  // The whole stream is an input data set, client should onComplete the stream per report period.  rpc collect (stream MeterData) returns (Commands) { } // Reporting meter data in bulk mode as MeterDataCollection.  // By using this, each one in the stream would be treated as a complete input for MAL engine,  // comparing to `collect (stream MeterData)`, which is using one stream as an input data set.  rpc collectBatch (stream MeterDataCollection) returns (Commands) { }}// Label of the meter message Label { string name = 1; string value = 2;}// The histogram element definition. It includes the bucket lower boundary and the count in the bucket. message MeterBucketValue { // The value represents the min value of the bucket,  // the upper boundary is determined by next MeterBucketValue$bucket,  // if it doesn\u0026#39;t exist, the upper boundary is positive infinity.  double bucket = 1; int64 count = 2; // If is negative infinity, the value of the bucket is invalid  bool isNegativeInfinity = 3;}// Meter single value message MeterSingleValue { // Meter name  string name = 1; // Labels  repeated Label labels = 2; // Single value  double value = 3;}// Histogram message MeterHistogram { // Meter name  string name = 1; // Labels  repeated Label labels = 2; // Customize the buckets  repeated MeterBucketValue values = 3;}// Single meter data, if the same metrics have a different label, they will separate. message MeterData { // Meter data could be a single value or histogram.  oneof metric { MeterSingleValue singleValue = 1; MeterHistogram histogram = 2; } // Service name, be set value in the first element in the stream-call.  string service = 3; // Service instance name, be set value in the first element in the stream-call.  string serviceInstance = 4; // Meter data report time, be set value in the first element in the stream-call.  int64 timestamp = 5;}message MeterDataCollection { repeated MeterData meterData = 1;}OpenTelemetry collector, Telegraf agents, Zabbix agents could use their native protocol(e.g. OTLP) and OAP server would convert metrics into native format and forward them to Meter Analysis Language engine.\nTo learn more about receiving 3rd party formats metrics, see\n Meter receiver OpenTelemetry receiver. Zabbix receiver  ","title":"Meter APIs","url":"/docs/main/v9.7.0/en/api/meter/"},{"content":"Meter receiver The meter receiver accepts the metrics of meter protocol into the meter system.\nModule definition Module definition is defined in application.yml, typically located at $SKYWALKING_BASE_DIR/config/application.yml by default.\nreceiver-meter:selector:${SW_RECEIVER_METER:default}default:In Kafka Fetcher, follow these configurations to enable it.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}Report Meter Telemetry Data Manual Meter API Custom metrics may be collected by the Manual Meter API. Custom metrics collected cannot be used directly; they should be configured in the meter-analyzer-config configuration files described in the next part.\nThe receiver adds labels with key = service and key = instance to the collected data samples, and values from service and service instance name defined in SkyWalking Agent, for identification of the metric data.\nA typical manual meter API set is Spring MicroMeter Observations APIs\nOpenTelemetry Exporter You can use OpenTelemetry Collector to transport the metrics to SkyWalking OAP. Read the doc on Skywalking Exporter for a detailed guide.\nThe following is the correspondence between the OpenTelemetry Metric Data Type and the Skywalking Data Collect Protocol:\n   OpenTelemetry Metric Data Type Skywalking Data Collect Protocol     MetricDataTypeGauge MeterSingleValue   MetricDataTypeSum MeterSingleValue   MetricDataTypeHistogram MeterHistogram and two MeterSingleValues containing $name_sum and $name_count metrics.   MetricDataTypeSummary A series of MeterSingleValue containing tag quantile and two MeterSingleValues containing $name_sum and $name_count metrics.   MetricDataTypeExponentialHistogram Not Supported    Note: $name is the original metric name.\nConfiguration file The meter receiver is configured via a configuration file. The configuration file defines everything related to receiving from agents, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP may fail to start up. The files are located at $CLASSPATH/meter-analyzer-config.\nNew meter-analyzer-config files is NOT enabled by default, you should make meter configuration take effect through section agent-analyzer in application.yml of skywalking backend.\nagent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:# ... take care of other analyzersmeterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:your-custom-meter-conf-without-ext-name}# The multiple files should be separated by \u0026#34;,\u0026#34;Meter-analyzer-config file is written in YAML format, defined by the scheme described below. Brackets indicate that a parameter is optional.\nAll available meter analysis scripts could be found here.\n   Rule Name Description Configuration File Data Source     satellite Metrics of SkyWalking Satellite self-observability(so11y) meter-analyzer-config/satellite.yaml SkyWalking Satellite \u0026ndash;meter format\u0026ndash;\u0026gt;SkyWalking OAP Server   threadpool Metrics of Thread Pool meter-analyzer-config/threadpool.yaml Thread Pool \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server   datasource Metrics of DataSource metrics meter-analyzer-config/datasource.yaml Datasource \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server   spring-micrometer Metrics of Spring Sleuth Application meter-analyzer-config/spring-micrometer.yaml Spring Sleuth Application \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server    An example can be found here. If you\u0026rsquo;re using Spring MicroMeter Observations, see Spring MicroMeter Observations APIs.\nMeters configuration # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# filter the metrics, only those metrics that satisfy this condition will be passed into the `metricsRules` below.filter: \u0026lt;closure\u0026gt; # example:\u0026#39;{ tags -\u0026gt; tags.job_name == \u0026#34;vm-monitoring\u0026#34; }\u0026#39;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# Metrics rule allow you to recompute queries.metricsRules:# The name of rule, which combinates with a prefix \u0026#39;\u0026lt;metricPrefix\u0026gt;_\u0026#39; as the index/table name in storage.# The name with prefix can also be quoted in UI (Dashboard/Template/Item/Metrics)name:\u0026lt;string\u0026gt;# MAL expression. Raw name of custom metrics collected can be used hereexp:\u0026lt;string\u0026gt;For more information on MAL, please refer to mal.md\nrate, irate, and increase Although we support the rate, irate, increase functions in the backend, we still recommend users to consider using client-side APIs to run these functions. The reasons are as follows:\n The OAP has to set up caches to calculate the values. Once the agent reconnects to another OAP instance, the time windows of rate calculation break. This leads to inaccurate results.  ","title":"Meter receiver","url":"/docs/main/latest/en/setup/backend/backend-meter/"},{"content":"Meter receiver The meter receiver accepts the metrics of meter protocol into the meter system.\nModule definition Module definition is defined in application.yml, typically located at $SKYWALKING_BASE_DIR/config/application.yml by default.\nreceiver-meter:selector:${SW_RECEIVER_METER:default}default:In Kafka Fetcher, follow these configurations to enable it.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}Report Meter Telemetry Data Manual Meter API Custom metrics may be collected by the Manual Meter API. Custom metrics collected cannot be used directly; they should be configured in the meter-analyzer-config configuration files described in the next part.\nThe receiver adds labels with key = service and key = instance to the collected data samples, and values from service and service instance name defined in SkyWalking Agent, for identification of the metric data.\nThere are following known API libs to report meter telemetry data:\n SkyWalking Java Meter toolkit APIs Spring MicroMeter Observations APIs works with OAP MicroMeter Observations setup  Agents Bundled Meters All following agents and components have built-in meters reporting to the OAP through Meter APIs.\n Go agent for Go VM metrics Python agent for PVM metrics Java agent with Spring micrometer toolkit Java agent for datasource metrics Java agent for thread-pool metrics Rover(eBPF) agent for metrics used continues profiling Satellite proxy self-observability metrics  Configuration file The meter receiver is configured via a configuration file. The configuration file defines everything related to receiving from agents, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP may fail to start up. The files are located at $CLASSPATH/meter-analyzer-config.\nNew meter-analyzer-config files is NOT enabled by default, you should make meter configuration take effect through section agent-analyzer in application.yml of skywalking backend.\nagent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:# ... take care of other analyzersmeterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:your-custom-meter-conf-without-ext-name}# The multiple files should be separated by \u0026#34;,\u0026#34;Meter-analyzer-config file is written in YAML format, defined by the scheme described below. Brackets indicate that a parameter is optional.\nMeters configuration # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# filter the metrics, only those metrics that satisfy this condition will be passed into the `metricsRules` below.filter: \u0026lt;closure\u0026gt; # example:\u0026#39;{ tags -\u0026gt; tags.job_name == \u0026#34;vm-monitoring\u0026#34; }\u0026#39;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# Metrics rule allow you to recompute queries.metricsRules:# The name of rule, which combinates with a prefix \u0026#39;\u0026lt;metricPrefix\u0026gt;_\u0026#39; as the index/table name in storage.# The name with prefix can also be quoted in UI (Dashboard/Template/Item/Metrics)name:\u0026lt;string\u0026gt;# MAL expression. Raw name of custom metrics collected can be used hereexp:\u0026lt;string\u0026gt;For more information on MAL, please refer to mal.md\nrate, irate, and increase Although we support the rate, irate, increase functions in the backend, we still recommend users to consider using client-side APIs to run these functions. The reasons are as follows:\n The OAP has to set up caches to calculate the values. Once the agent reconnects to another OAP instance, the time windows of rate calculation break. This leads to inaccurate results.  ","title":"Meter receiver","url":"/docs/main/next/en/setup/backend/backend-meter/"},{"content":"Meter receiver The meter receiver accepts the metrics of meter protocol into the meter system.\nModule definition Module definition is defined in application.yml, typically located at $SKYWALKING_BASE_DIR/config/application.yml by default.\nreceiver-meter:selector:${SW_RECEIVER_METER:default}default:In Kafka Fetcher, follow these configurations to enable it.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}Report Meter Telemetry Data Manual Meter API Custom metrics may be collected by Manual Meter API. Custom metrics collected cannot be used directly, they should be configured in meter-analyzer-config configuration files, which is described in next part.\nThe receiver adds labels with key = service and key = instance to the collected data samples, and values from service and service instance name defined in SkyWalking Agent, for identification of the metric data.\nA typical manual meter API set is Spring Sleuth APIs\nOpenTelemetry Exporter You can use OpenTelemetry Collector to transport the metrics to SkyWalking OAP. Read the doc on Skywalking Exporter for a detailed guide.\nThe following is the correspondence between the OpenTelemetry Metric Data Type and the Skywalking Data Collect Protocol:\n   OpenTelemetry Metric Data Type Skywalking Data Collect Protocol     MetricDataTypeGauge MeterSingleValue   MetricDataTypeSum MeterSingleValue   MetricDataTypeHistogram MeterHistogram and two MeterSingleValues containing $name_sum and $name_count metrics.   MetricDataTypeSummary A series of MeterSingleValue containing tag quantile and two MeterSingleValues containing $name_sum and $name_count metrics.   MetricDataTypeExponentialHistogram Not Supported    Note: $name is the original metric name.\nConfiguration file The meter receiver is configured via a configuration file. The configuration file defines everything related to receiving from agents, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP may fail to start up. The files are located at $CLASSPATH/meter-analyzer-config.\nNew meter-analyzer-config files is NOT enabled by default, you should make meter configuration take effect through section agent-analyzer in application.yml of skywalking backend.\nagent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:# ... take care of other analyzersmeterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:your-custom-meter-conf-without-ext-name}# The multiple files should be separated by \u0026#34;,\u0026#34;Meter-analyzer-config file is written in YAML format, defined by the scheme described below. Brackets indicate that a parameter is optional.\nAll available meter analysis scripts could be found here.\n   Rule Name Description Configuration File Data Source     satellite Metrics of SkyWalking Satellite self-observability(so11y) meter-analyzer-config/satellite.yaml SkyWalking Satellite \u0026ndash;meter format\u0026ndash;\u0026gt;SkyWalking OAP Server   threadpool Metrics of Thread Pool meter-analyzer-config/threadpool.yaml Thread Pool \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server   datasource Metrics of DataSource metrics meter-analyzer-config/datasource.yaml Datasource \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server   spring-sleuth Metrics of Spring Sleuth Application meter-analyzer-config/spring-sleuth.yaml Sprign Sleuth Application \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server    An example can be found here. If you\u0026rsquo;re using Spring Sleuth, see Spring Sleuth Setup.\nMeters configuration # filter the metrics, only those metrics that satisfy this condition will be passed into the `metricsRules` below.filter: \u0026lt;closure\u0026gt; # example:\u0026#39;{ tags -\u0026gt; tags.job_name == \u0026#34;vm-monitoring\u0026#34; }\u0026#39;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# Metrics rule allow you to recompute queries.metricsRules:# The name of rule, which combinates with a prefix \u0026#39;\u0026lt;metricPrefix\u0026gt;_\u0026#39; as the index/table name in storage.# The name with prefix can also be quoted in UI (Dashboard/Template/Item/Metrics)name:\u0026lt;string\u0026gt;# MAL expression. Raw name of custom metrics collected can be used hereexp:\u0026lt;string\u0026gt;For more information on MAL, please refer to mal.md\nrate, irate, and increase Although we support the rate, irate, increase functions in the backend, we still recommend users to consider using client-side APIs to run these functions. The reasons are as follows:\n The OAP has to set up caches to calculate the values. Once the agent reconnects to another OAP instance, the time windows of rate calculation break. This leads to inaccurate results.  ","title":"Meter receiver","url":"/docs/main/v9.0.0/en/setup/backend/backend-meter/"},{"content":"Meter receiver The meter receiver accepts the metrics of meter protocol into the meter system.\nModule definition Module definition is defined in application.yml, typically located at $SKYWALKING_BASE_DIR/config/application.yml by default.\nreceiver-meter:selector:${SW_RECEIVER_METER:default}default:In Kafka Fetcher, follow these configurations to enable it.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}Report Meter Telemetry Data Manual Meter API Custom metrics may be collected by the Manual Meter API. Custom metrics collected cannot be used directly; they should be configured in the meter-analyzer-config configuration files described in the next part.\nThe receiver adds labels with key = service and key = instance to the collected data samples, and values from service and service instance name defined in SkyWalking Agent, for identification of the metric data.\nA typical manual meter API set is Spring Sleuth APIs\nOpenTelemetry Exporter You can use OpenTelemetry Collector to transport the metrics to SkyWalking OAP. Read the doc on Skywalking Exporter for a detailed guide.\nThe following is the correspondence between the OpenTelemetry Metric Data Type and the Skywalking Data Collect Protocol:\n   OpenTelemetry Metric Data Type Skywalking Data Collect Protocol     MetricDataTypeGauge MeterSingleValue   MetricDataTypeSum MeterSingleValue   MetricDataTypeHistogram MeterHistogram and two MeterSingleValues containing $name_sum and $name_count metrics.   MetricDataTypeSummary A series of MeterSingleValue containing tag quantile and two MeterSingleValues containing $name_sum and $name_count metrics.   MetricDataTypeExponentialHistogram Not Supported    Note: $name is the original metric name.\nConfiguration file The meter receiver is configured via a configuration file. The configuration file defines everything related to receiving from agents, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP may fail to start up. The files are located at $CLASSPATH/meter-analyzer-config.\nNew meter-analyzer-config files is NOT enabled by default, you should make meter configuration take effect through section agent-analyzer in application.yml of skywalking backend.\nagent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:# ... take care of other analyzersmeterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:your-custom-meter-conf-without-ext-name}# The multiple files should be separated by \u0026#34;,\u0026#34;Meter-analyzer-config file is written in YAML format, defined by the scheme described below. Brackets indicate that a parameter is optional.\nAll available meter analysis scripts could be found here.\n   Rule Name Description Configuration File Data Source     satellite Metrics of SkyWalking Satellite self-observability(so11y) meter-analyzer-config/satellite.yaml SkyWalking Satellite \u0026ndash;meter format\u0026ndash;\u0026gt;SkyWalking OAP Server   threadpool Metrics of Thread Pool meter-analyzer-config/threadpool.yaml Thread Pool \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server   datasource Metrics of DataSource metrics meter-analyzer-config/datasource.yaml Datasource \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server   spring-sleuth Metrics of Spring Sleuth Application meter-analyzer-config/spring-sleuth.yaml Sprign Sleuth Application \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server    An example can be found here. If you\u0026rsquo;re using Spring Sleuth, see Spring Sleuth Setup.\nMeters configuration # filter the metrics, only those metrics that satisfy this condition will be passed into the `metricsRules` below.filter: \u0026lt;closure\u0026gt; # example:\u0026#39;{ tags -\u0026gt; tags.job_name == \u0026#34;vm-monitoring\u0026#34; }\u0026#39;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# Metrics rule allow you to recompute queries.metricsRules:# The name of rule, which combinates with a prefix \u0026#39;\u0026lt;metricPrefix\u0026gt;_\u0026#39; as the index/table name in storage.# The name with prefix can also be quoted in UI (Dashboard/Template/Item/Metrics)name:\u0026lt;string\u0026gt;# MAL expression. Raw name of custom metrics collected can be used hereexp:\u0026lt;string\u0026gt;For more information on MAL, please refer to mal.md\nrate, irate, and increase Although we support the rate, irate, increase functions in the backend, we still recommend users to consider using client-side APIs to run these functions. The reasons are as follows:\n The OAP has to set up caches to calculate the values. Once the agent reconnects to another OAP instance, the time windows of rate calculation break. This leads to inaccurate results.  ","title":"Meter receiver","url":"/docs/main/v9.1.0/en/setup/backend/backend-meter/"},{"content":"Meter receiver The meter receiver accepts the metrics of meter protocol into the meter system.\nModule definition Module definition is defined in application.yml, typically located at $SKYWALKING_BASE_DIR/config/application.yml by default.\nreceiver-meter:selector:${SW_RECEIVER_METER:default}default:In Kafka Fetcher, follow these configurations to enable it.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}Report Meter Telemetry Data Manual Meter API Custom metrics may be collected by the Manual Meter API. Custom metrics collected cannot be used directly; they should be configured in the meter-analyzer-config configuration files described in the next part.\nThe receiver adds labels with key = service and key = instance to the collected data samples, and values from service and service instance name defined in SkyWalking Agent, for identification of the metric data.\nA typical manual meter API set is Spring Sleuth APIs\nOpenTelemetry Exporter You can use OpenTelemetry Collector to transport the metrics to SkyWalking OAP. Read the doc on Skywalking Exporter for a detailed guide.\nThe following is the correspondence between the OpenTelemetry Metric Data Type and the Skywalking Data Collect Protocol:\n   OpenTelemetry Metric Data Type Skywalking Data Collect Protocol     MetricDataTypeGauge MeterSingleValue   MetricDataTypeSum MeterSingleValue   MetricDataTypeHistogram MeterHistogram and two MeterSingleValues containing $name_sum and $name_count metrics.   MetricDataTypeSummary A series of MeterSingleValue containing tag quantile and two MeterSingleValues containing $name_sum and $name_count metrics.   MetricDataTypeExponentialHistogram Not Supported    Note: $name is the original metric name.\nConfiguration file The meter receiver is configured via a configuration file. The configuration file defines everything related to receiving from agents, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP may fail to start up. The files are located at $CLASSPATH/meter-analyzer-config.\nNew meter-analyzer-config files is NOT enabled by default, you should make meter configuration take effect through section agent-analyzer in application.yml of skywalking backend.\nagent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:# ... take care of other analyzersmeterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:your-custom-meter-conf-without-ext-name}# The multiple files should be separated by \u0026#34;,\u0026#34;Meter-analyzer-config file is written in YAML format, defined by the scheme described below. Brackets indicate that a parameter is optional.\nAll available meter analysis scripts could be found here.\n   Rule Name Description Configuration File Data Source     satellite Metrics of SkyWalking Satellite self-observability(so11y) meter-analyzer-config/satellite.yaml SkyWalking Satellite \u0026ndash;meter format\u0026ndash;\u0026gt;SkyWalking OAP Server   threadpool Metrics of Thread Pool meter-analyzer-config/threadpool.yaml Thread Pool \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server   datasource Metrics of DataSource metrics meter-analyzer-config/datasource.yaml Datasource \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server   spring-sleuth Metrics of Spring Sleuth Application meter-analyzer-config/spring-sleuth.yaml Sprign Sleuth Application \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server    An example can be found here. If you\u0026rsquo;re using Spring Sleuth, see Spring Sleuth Setup.\nMeters configuration # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# filter the metrics, only those metrics that satisfy this condition will be passed into the `metricsRules` below.filter: \u0026lt;closure\u0026gt; # example:\u0026#39;{ tags -\u0026gt; tags.job_name == \u0026#34;vm-monitoring\u0026#34; }\u0026#39;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# Metrics rule allow you to recompute queries.metricsRules:# The name of rule, which combinates with a prefix \u0026#39;\u0026lt;metricPrefix\u0026gt;_\u0026#39; as the index/table name in storage.# The name with prefix can also be quoted in UI (Dashboard/Template/Item/Metrics)name:\u0026lt;string\u0026gt;# MAL expression. Raw name of custom metrics collected can be used hereexp:\u0026lt;string\u0026gt;For more information on MAL, please refer to mal.md\nrate, irate, and increase Although we support the rate, irate, increase functions in the backend, we still recommend users to consider using client-side APIs to run these functions. The reasons are as follows:\n The OAP has to set up caches to calculate the values. Once the agent reconnects to another OAP instance, the time windows of rate calculation break. This leads to inaccurate results.  ","title":"Meter receiver","url":"/docs/main/v9.2.0/en/setup/backend/backend-meter/"},{"content":"Meter receiver The meter receiver accepts the metrics of meter protocol into the meter system.\nModule definition Module definition is defined in application.yml, typically located at $SKYWALKING_BASE_DIR/config/application.yml by default.\nreceiver-meter:selector:${SW_RECEIVER_METER:default}default:In Kafka Fetcher, follow these configurations to enable it.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}Report Meter Telemetry Data Manual Meter API Custom metrics may be collected by the Manual Meter API. Custom metrics collected cannot be used directly; they should be configured in the meter-analyzer-config configuration files described in the next part.\nThe receiver adds labels with key = service and key = instance to the collected data samples, and values from service and service instance name defined in SkyWalking Agent, for identification of the metric data.\nA typical manual meter API set is Spring Sleuth APIs\nOpenTelemetry Exporter You can use OpenTelemetry Collector to transport the metrics to SkyWalking OAP. Read the doc on Skywalking Exporter for a detailed guide.\nThe following is the correspondence between the OpenTelemetry Metric Data Type and the Skywalking Data Collect Protocol:\n   OpenTelemetry Metric Data Type Skywalking Data Collect Protocol     MetricDataTypeGauge MeterSingleValue   MetricDataTypeSum MeterSingleValue   MetricDataTypeHistogram MeterHistogram and two MeterSingleValues containing $name_sum and $name_count metrics.   MetricDataTypeSummary A series of MeterSingleValue containing tag quantile and two MeterSingleValues containing $name_sum and $name_count metrics.   MetricDataTypeExponentialHistogram Not Supported    Note: $name is the original metric name.\nConfiguration file The meter receiver is configured via a configuration file. The configuration file defines everything related to receiving from agents, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP may fail to start up. The files are located at $CLASSPATH/meter-analyzer-config.\nNew meter-analyzer-config files is NOT enabled by default, you should make meter configuration take effect through section agent-analyzer in application.yml of skywalking backend.\nagent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:# ... take care of other analyzersmeterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:your-custom-meter-conf-without-ext-name}# The multiple files should be separated by \u0026#34;,\u0026#34;Meter-analyzer-config file is written in YAML format, defined by the scheme described below. Brackets indicate that a parameter is optional.\nAll available meter analysis scripts could be found here.\n   Rule Name Description Configuration File Data Source     satellite Metrics of SkyWalking Satellite self-observability(so11y) meter-analyzer-config/satellite.yaml SkyWalking Satellite \u0026ndash;meter format\u0026ndash;\u0026gt;SkyWalking OAP Server   threadpool Metrics of Thread Pool meter-analyzer-config/threadpool.yaml Thread Pool \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server   datasource Metrics of DataSource metrics meter-analyzer-config/datasource.yaml Datasource \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server   spring-sleuth Metrics of Spring Sleuth Application meter-analyzer-config/spring-sleuth.yaml Sprign Sleuth Application \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server    An example can be found here. If you\u0026rsquo;re using Spring Sleuth, see Spring Sleuth Setup.\nMeters configuration # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# filter the metrics, only those metrics that satisfy this condition will be passed into the `metricsRules` below.filter: \u0026lt;closure\u0026gt; # example:\u0026#39;{ tags -\u0026gt; tags.job_name == \u0026#34;vm-monitoring\u0026#34; }\u0026#39;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# Metrics rule allow you to recompute queries.metricsRules:# The name of rule, which combinates with a prefix \u0026#39;\u0026lt;metricPrefix\u0026gt;_\u0026#39; as the index/table name in storage.# The name with prefix can also be quoted in UI (Dashboard/Template/Item/Metrics)name:\u0026lt;string\u0026gt;# MAL expression. Raw name of custom metrics collected can be used hereexp:\u0026lt;string\u0026gt;For more information on MAL, please refer to mal.md\nrate, irate, and increase Although we support the rate, irate, increase functions in the backend, we still recommend users to consider using client-side APIs to run these functions. The reasons are as follows:\n The OAP has to set up caches to calculate the values. Once the agent reconnects to another OAP instance, the time windows of rate calculation break. This leads to inaccurate results.  ","title":"Meter receiver","url":"/docs/main/v9.3.0/en/setup/backend/backend-meter/"},{"content":"Meter receiver The meter receiver accepts the metrics of meter protocol into the meter system.\nModule definition Module definition is defined in application.yml, typically located at $SKYWALKING_BASE_DIR/config/application.yml by default.\nreceiver-meter:selector:${SW_RECEIVER_METER:default}default:In Kafka Fetcher, follow these configurations to enable it.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}Report Meter Telemetry Data Manual Meter API Custom metrics may be collected by the Manual Meter API. Custom metrics collected cannot be used directly; they should be configured in the meter-analyzer-config configuration files described in the next part.\nThe receiver adds labels with key = service and key = instance to the collected data samples, and values from service and service instance name defined in SkyWalking Agent, for identification of the metric data.\nA typical manual meter API set is Spring MicroMeter Observations APIs\nOpenTelemetry Exporter You can use OpenTelemetry Collector to transport the metrics to SkyWalking OAP. Read the doc on Skywalking Exporter for a detailed guide.\nThe following is the correspondence between the OpenTelemetry Metric Data Type and the Skywalking Data Collect Protocol:\n   OpenTelemetry Metric Data Type Skywalking Data Collect Protocol     MetricDataTypeGauge MeterSingleValue   MetricDataTypeSum MeterSingleValue   MetricDataTypeHistogram MeterHistogram and two MeterSingleValues containing $name_sum and $name_count metrics.   MetricDataTypeSummary A series of MeterSingleValue containing tag quantile and two MeterSingleValues containing $name_sum and $name_count metrics.   MetricDataTypeExponentialHistogram Not Supported    Note: $name is the original metric name.\nConfiguration file The meter receiver is configured via a configuration file. The configuration file defines everything related to receiving from agents, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP may fail to start up. The files are located at $CLASSPATH/meter-analyzer-config.\nNew meter-analyzer-config files is NOT enabled by default, you should make meter configuration take effect through section agent-analyzer in application.yml of skywalking backend.\nagent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:# ... take care of other analyzersmeterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:your-custom-meter-conf-without-ext-name}# The multiple files should be separated by \u0026#34;,\u0026#34;Meter-analyzer-config file is written in YAML format, defined by the scheme described below. Brackets indicate that a parameter is optional.\nAll available meter analysis scripts could be found here.\n   Rule Name Description Configuration File Data Source     satellite Metrics of SkyWalking Satellite self-observability(so11y) meter-analyzer-config/satellite.yaml SkyWalking Satellite \u0026ndash;meter format\u0026ndash;\u0026gt;SkyWalking OAP Server   threadpool Metrics of Thread Pool meter-analyzer-config/threadpool.yaml Thread Pool \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server   datasource Metrics of DataSource metrics meter-analyzer-config/datasource.yaml Datasource \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server   spring-micrometer Metrics of Spring Sleuth Application meter-analyzer-config/spring-micrometer.yaml Sprign Sleuth Application \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server    An example can be found here. If you\u0026rsquo;re using Spring MicroMeter Observations, see Spring MicroMeter Observations APIs.\nMeters configuration # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# filter the metrics, only those metrics that satisfy this condition will be passed into the `metricsRules` below.filter: \u0026lt;closure\u0026gt; # example:\u0026#39;{ tags -\u0026gt; tags.job_name == \u0026#34;vm-monitoring\u0026#34; }\u0026#39;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# Metrics rule allow you to recompute queries.metricsRules:# The name of rule, which combinates with a prefix \u0026#39;\u0026lt;metricPrefix\u0026gt;_\u0026#39; as the index/table name in storage.# The name with prefix can also be quoted in UI (Dashboard/Template/Item/Metrics)name:\u0026lt;string\u0026gt;# MAL expression. Raw name of custom metrics collected can be used hereexp:\u0026lt;string\u0026gt;For more information on MAL, please refer to mal.md\nrate, irate, and increase Although we support the rate, irate, increase functions in the backend, we still recommend users to consider using client-side APIs to run these functions. The reasons are as follows:\n The OAP has to set up caches to calculate the values. Once the agent reconnects to another OAP instance, the time windows of rate calculation break. This leads to inaccurate results.  ","title":"Meter receiver","url":"/docs/main/v9.4.0/en/setup/backend/backend-meter/"},{"content":"Meter receiver The meter receiver accepts the metrics of meter protocol into the meter system.\nModule definition Module definition is defined in application.yml, typically located at $SKYWALKING_BASE_DIR/config/application.yml by default.\nreceiver-meter:selector:${SW_RECEIVER_METER:default}default:In Kafka Fetcher, follow these configurations to enable it.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}Report Meter Telemetry Data Manual Meter API Custom metrics may be collected by the Manual Meter API. Custom metrics collected cannot be used directly; they should be configured in the meter-analyzer-config configuration files described in the next part.\nThe receiver adds labels with key = service and key = instance to the collected data samples, and values from service and service instance name defined in SkyWalking Agent, for identification of the metric data.\nA typical manual meter API set is Spring MicroMeter Observations APIs\nOpenTelemetry Exporter You can use OpenTelemetry Collector to transport the metrics to SkyWalking OAP. Read the doc on Skywalking Exporter for a detailed guide.\nThe following is the correspondence between the OpenTelemetry Metric Data Type and the Skywalking Data Collect Protocol:\n   OpenTelemetry Metric Data Type Skywalking Data Collect Protocol     MetricDataTypeGauge MeterSingleValue   MetricDataTypeSum MeterSingleValue   MetricDataTypeHistogram MeterHistogram and two MeterSingleValues containing $name_sum and $name_count metrics.   MetricDataTypeSummary A series of MeterSingleValue containing tag quantile and two MeterSingleValues containing $name_sum and $name_count metrics.   MetricDataTypeExponentialHistogram Not Supported    Note: $name is the original metric name.\nConfiguration file The meter receiver is configured via a configuration file. The configuration file defines everything related to receiving from agents, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP may fail to start up. The files are located at $CLASSPATH/meter-analyzer-config.\nNew meter-analyzer-config files is NOT enabled by default, you should make meter configuration take effect through section agent-analyzer in application.yml of skywalking backend.\nagent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:# ... take care of other analyzersmeterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:your-custom-meter-conf-without-ext-name}# The multiple files should be separated by \u0026#34;,\u0026#34;Meter-analyzer-config file is written in YAML format, defined by the scheme described below. Brackets indicate that a parameter is optional.\nAll available meter analysis scripts could be found here.\n   Rule Name Description Configuration File Data Source     satellite Metrics of SkyWalking Satellite self-observability(so11y) meter-analyzer-config/satellite.yaml SkyWalking Satellite \u0026ndash;meter format\u0026ndash;\u0026gt;SkyWalking OAP Server   threadpool Metrics of Thread Pool meter-analyzer-config/threadpool.yaml Thread Pool \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server   datasource Metrics of DataSource metrics meter-analyzer-config/datasource.yaml Datasource \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server   spring-micrometer Metrics of Spring Sleuth Application meter-analyzer-config/spring-micrometer.yaml Spring Sleuth Application \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server    An example can be found here. If you\u0026rsquo;re using Spring MicroMeter Observations, see Spring MicroMeter Observations APIs.\nMeters configuration # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# filter the metrics, only those metrics that satisfy this condition will be passed into the `metricsRules` below.filter: \u0026lt;closure\u0026gt; # example:\u0026#39;{ tags -\u0026gt; tags.job_name == \u0026#34;vm-monitoring\u0026#34; }\u0026#39;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# Metrics rule allow you to recompute queries.metricsRules:# The name of rule, which combinates with a prefix \u0026#39;\u0026lt;metricPrefix\u0026gt;_\u0026#39; as the index/table name in storage.# The name with prefix can also be quoted in UI (Dashboard/Template/Item/Metrics)name:\u0026lt;string\u0026gt;# MAL expression. Raw name of custom metrics collected can be used hereexp:\u0026lt;string\u0026gt;For more information on MAL, please refer to mal.md\nrate, irate, and increase Although we support the rate, irate, increase functions in the backend, we still recommend users to consider using client-side APIs to run these functions. The reasons are as follows:\n The OAP has to set up caches to calculate the values. Once the agent reconnects to another OAP instance, the time windows of rate calculation break. This leads to inaccurate results.  ","title":"Meter receiver","url":"/docs/main/v9.5.0/en/setup/backend/backend-meter/"},{"content":"Meter receiver The meter receiver accepts the metrics of meter protocol into the meter system.\nModule definition Module definition is defined in application.yml, typically located at $SKYWALKING_BASE_DIR/config/application.yml by default.\nreceiver-meter:selector:${SW_RECEIVER_METER:default}default:In Kafka Fetcher, follow these configurations to enable it.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}Report Meter Telemetry Data Manual Meter API Custom metrics may be collected by the Manual Meter API. Custom metrics collected cannot be used directly; they should be configured in the meter-analyzer-config configuration files described in the next part.\nThe receiver adds labels with key = service and key = instance to the collected data samples, and values from service and service instance name defined in SkyWalking Agent, for identification of the metric data.\nA typical manual meter API set is Spring MicroMeter Observations APIs\nOpenTelemetry Exporter You can use OpenTelemetry Collector to transport the metrics to SkyWalking OAP. Read the doc on Skywalking Exporter for a detailed guide.\nThe following is the correspondence between the OpenTelemetry Metric Data Type and the Skywalking Data Collect Protocol:\n   OpenTelemetry Metric Data Type Skywalking Data Collect Protocol     MetricDataTypeGauge MeterSingleValue   MetricDataTypeSum MeterSingleValue   MetricDataTypeHistogram MeterHistogram and two MeterSingleValues containing $name_sum and $name_count metrics.   MetricDataTypeSummary A series of MeterSingleValue containing tag quantile and two MeterSingleValues containing $name_sum and $name_count metrics.   MetricDataTypeExponentialHistogram Not Supported    Note: $name is the original metric name.\nConfiguration file The meter receiver is configured via a configuration file. The configuration file defines everything related to receiving from agents, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP may fail to start up. The files are located at $CLASSPATH/meter-analyzer-config.\nNew meter-analyzer-config files is NOT enabled by default, you should make meter configuration take effect through section agent-analyzer in application.yml of skywalking backend.\nagent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:# ... take care of other analyzersmeterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:your-custom-meter-conf-without-ext-name}# The multiple files should be separated by \u0026#34;,\u0026#34;Meter-analyzer-config file is written in YAML format, defined by the scheme described below. Brackets indicate that a parameter is optional.\nAll available meter analysis scripts could be found here.\n   Rule Name Description Configuration File Data Source     satellite Metrics of SkyWalking Satellite self-observability(so11y) meter-analyzer-config/satellite.yaml SkyWalking Satellite \u0026ndash;meter format\u0026ndash;\u0026gt;SkyWalking OAP Server   threadpool Metrics of Thread Pool meter-analyzer-config/threadpool.yaml Thread Pool \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server   datasource Metrics of DataSource metrics meter-analyzer-config/datasource.yaml Datasource \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server   spring-micrometer Metrics of Spring Sleuth Application meter-analyzer-config/spring-micrometer.yaml Spring Sleuth Application \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server    An example can be found here. If you\u0026rsquo;re using Spring MicroMeter Observations, see Spring MicroMeter Observations APIs.\nMeters configuration # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# filter the metrics, only those metrics that satisfy this condition will be passed into the `metricsRules` below.filter: \u0026lt;closure\u0026gt; # example:\u0026#39;{ tags -\u0026gt; tags.job_name == \u0026#34;vm-monitoring\u0026#34; }\u0026#39;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# Metrics rule allow you to recompute queries.metricsRules:# The name of rule, which combinates with a prefix \u0026#39;\u0026lt;metricPrefix\u0026gt;_\u0026#39; as the index/table name in storage.# The name with prefix can also be quoted in UI (Dashboard/Template/Item/Metrics)name:\u0026lt;string\u0026gt;# MAL expression. Raw name of custom metrics collected can be used hereexp:\u0026lt;string\u0026gt;For more information on MAL, please refer to mal.md\nrate, irate, and increase Although we support the rate, irate, increase functions in the backend, we still recommend users to consider using client-side APIs to run these functions. The reasons are as follows:\n The OAP has to set up caches to calculate the values. Once the agent reconnects to another OAP instance, the time windows of rate calculation break. This leads to inaccurate results.  ","title":"Meter receiver","url":"/docs/main/v9.6.0/en/setup/backend/backend-meter/"},{"content":"Meter receiver The meter receiver accepts the metrics of meter protocol into the meter system.\nModule definition Module definition is defined in application.yml, typically located at $SKYWALKING_BASE_DIR/config/application.yml by default.\nreceiver-meter:selector:${SW_RECEIVER_METER:default}default:In Kafka Fetcher, follow these configurations to enable it.\nkafka-fetcher:selector:${SW_KAFKA_FETCHER:default}default:bootstrapServers:${SW_KAFKA_FETCHER_SERVERS:localhost:9092}Report Meter Telemetry Data Manual Meter API Custom metrics may be collected by the Manual Meter API. Custom metrics collected cannot be used directly; they should be configured in the meter-analyzer-config configuration files described in the next part.\nThe receiver adds labels with key = service and key = instance to the collected data samples, and values from service and service instance name defined in SkyWalking Agent, for identification of the metric data.\nA typical manual meter API set is Spring MicroMeter Observations APIs\nOpenTelemetry Exporter You can use OpenTelemetry Collector to transport the metrics to SkyWalking OAP. Read the doc on Skywalking Exporter for a detailed guide.\nThe following is the correspondence between the OpenTelemetry Metric Data Type and the Skywalking Data Collect Protocol:\n   OpenTelemetry Metric Data Type Skywalking Data Collect Protocol     MetricDataTypeGauge MeterSingleValue   MetricDataTypeSum MeterSingleValue   MetricDataTypeHistogram MeterHistogram and two MeterSingleValues containing $name_sum and $name_count metrics.   MetricDataTypeSummary A series of MeterSingleValue containing tag quantile and two MeterSingleValues containing $name_sum and $name_count metrics.   MetricDataTypeExponentialHistogram Not Supported    Note: $name is the original metric name.\nConfiguration file The meter receiver is configured via a configuration file. The configuration file defines everything related to receiving from agents, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP may fail to start up. The files are located at $CLASSPATH/meter-analyzer-config.\nNew meter-analyzer-config files is NOT enabled by default, you should make meter configuration take effect through section agent-analyzer in application.yml of skywalking backend.\nagent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:# ... take care of other analyzersmeterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:your-custom-meter-conf-without-ext-name}# The multiple files should be separated by \u0026#34;,\u0026#34;Meter-analyzer-config file is written in YAML format, defined by the scheme described below. Brackets indicate that a parameter is optional.\nAll available meter analysis scripts could be found here.\n   Rule Name Description Configuration File Data Source     satellite Metrics of SkyWalking Satellite self-observability(so11y) meter-analyzer-config/satellite.yaml SkyWalking Satellite \u0026ndash;meter format\u0026ndash;\u0026gt;SkyWalking OAP Server   threadpool Metrics of Thread Pool meter-analyzer-config/threadpool.yaml Thread Pool \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server   datasource Metrics of DataSource metrics meter-analyzer-config/datasource.yaml Datasource \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server   spring-micrometer Metrics of Spring Sleuth Application meter-analyzer-config/spring-micrometer.yaml Spring Sleuth Application \u0026ndash;meter format\u0026ndash;\u0026gt; SkyWalking OAP Server    An example can be found here. If you\u0026rsquo;re using Spring MicroMeter Observations, see Spring MicroMeter Observations APIs.\nMeters configuration # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# filter the metrics, only those metrics that satisfy this condition will be passed into the `metricsRules` below.filter: \u0026lt;closure\u0026gt; # example:\u0026#39;{ tags -\u0026gt; tags.job_name == \u0026#34;vm-monitoring\u0026#34; }\u0026#39;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# Metrics rule allow you to recompute queries.metricsRules:# The name of rule, which combinates with a prefix \u0026#39;\u0026lt;metricPrefix\u0026gt;_\u0026#39; as the index/table name in storage.# The name with prefix can also be quoted in UI (Dashboard/Template/Item/Metrics)name:\u0026lt;string\u0026gt;# MAL expression. Raw name of custom metrics collected can be used hereexp:\u0026lt;string\u0026gt;For more information on MAL, please refer to mal.md\nrate, irate, and increase Although we support the rate, irate, increase functions in the backend, we still recommend users to consider using client-side APIs to run these functions. The reasons are as follows:\n The OAP has to set up caches to calculate the values. Once the agent reconnects to another OAP instance, the time windows of rate calculation break. This leads to inaccurate results.  ","title":"Meter receiver","url":"/docs/main/v9.7.0/en/setup/backend/backend-meter/"},{"content":"Meter System Meter system is another streaming calculation mode designed for metrics data. In the OAL, there are clear Scope Definitions, including definitions for native objects. Meter system is focused on the data type itself, and provides a more flexible approach to the end user in defining the scope entity.\nThe meter system is open to different receivers and fetchers in the backend, see the backend setup document for more details.\nEvery metric is declared in the meter system to include the following attributes:\n Metrics Name. A globally unique name to avoid overlapping between the OAL variable names. Function Name. The function used for this metric, namely distributed aggregation, value calculation or down sampling calculation based on the function implementation. Further, the data structure is determined by the function as well, such as function Avg is for Long. Scope Type. Unlike within the OAL, there are plenty of logic scope definitions. In the meter system, only type is required. Type values include service, instance, and endpoint, just as we have described in the Overview section. The values of scope entity name, such as service name, are required when metrics data are generated with the metrics data values.  NOTE: The metrics must be declared in the bootstrap stage, and there must be no change to runtime.\nThe Meter System supports the following binding functions:\n avg. Calculates the avg value for every entity under the same metrics name. histogram. Aggregates the counts in the configurable buckets. Buckets are configurable but must be assigned in the declaration stage. percentile. See percentile in WIKI. Unlike the OAL, we provide 50/75/90/95/99 by default. In the meter system function, the percentile function accepts several ranks, which should be in the (0, 100) range.  ","title":"Meter System","url":"/docs/main/latest/en/concepts-and-designs/meter/"},{"content":"Meter System Meter system is another streaming calculation mode designed for metrics data. In the OAL, there are clear Scope Definitions, including definitions for native objects. Meter system is focused on the data type itself, and provides a more flexible approach to the end user in defining the scope entity.\nThe meter system is open to different receivers and fetchers in the backend, see the backend setup document for more details.\nEvery metric is declared in the meter system to include the following attributes:\n Metrics Name. A globally unique name to avoid overlapping between the OAL variable names. Function Name. The function used for this metric, namely distributed aggregation, value calculation or down sampling calculation based on the function implementation. Further, the data structure is determined by the function as well, such as function Avg is for Long. Scope Type. Unlike within the OAL, there are plenty of logic scope definitions. In the meter system, only type is required. Type values include service, instance, and endpoint, just as we have described in the Overview section. The values of scope entity name, such as service name, are required when metrics data are generated with the metrics data values.  NOTE: The metrics must be declared in the bootstrap stage, and there must be no change to runtime.\nThe Meter System supports the following binding functions:\n avg. Calculates the avg value for every entity under the same metrics name. histogram. Aggregates the counts in the configurable buckets. Buckets are configurable but must be assigned in the declaration stage. percentile. See percentile in WIKI. Unlike the OAL, we provide 50/75/90/95/99 by default. In the meter system function, the percentile function accepts several ranks, which should be in the (0, 100) range.  ","title":"Meter System","url":"/docs/main/next/en/concepts-and-designs/meter/"},{"content":"Meter System Meter system is another streaming calculation mode designed for metrics data. In the OAL, there are clear Scope Definitions, including definitions for native objects. Meter system is focused on the data type itself, and provides a more flexible approach to the end user in defining the scope entity.\nThe meter system is open to different receivers and fetchers in the backend, see the backend setup document for more details.\nEvery metric is declared in the meter system to include the following attributes:\n Metrics Name. A globally unique name to avoid overlapping between the OAL variable names. Function Name. The function used for this metric, namely distributed aggregation, value calculation or down sampling calculation based on the function implementation. Further, the data structure is determined by the function as well, such as function Avg is for Long. Scope Type. Unlike within the OAL, there are plenty of logic scope definitions. In the meter system, only type is required. Type values include service, instance, and endpoint, just as we have described in the Overview section. The values of scope entity name, such as service name, are required when metrics data are generated with the metrics data values.  NOTE: The metrics must be declared in the bootstrap stage, and there must be no change to runtime.\nThe Meter System supports the following binding functions:\n avg. Calculates the avg value for every entity under the same metrics name. histogram. Aggregates the counts in the configurable buckets. Buckets are configurable but must be assigned in the declaration stage. percentile. See percentile in WIKI. Unlike the OAL, we provide 50/75/90/95/99 by default. In the meter system function, the percentile function accepts several ranks, which should be in the (0, 100) range.  ","title":"Meter System","url":"/docs/main/v9.0.0/en/concepts-and-designs/meter/"},{"content":"Meter System Meter system is another streaming calculation mode designed for metrics data. In the OAL, there are clear Scope Definitions, including definitions for native objects. Meter system is focused on the data type itself, and provides a more flexible approach to the end user in defining the scope entity.\nThe meter system is open to different receivers and fetchers in the backend, see the backend setup document for more details.\nEvery metric is declared in the meter system to include the following attributes:\n Metrics Name. A globally unique name to avoid overlapping between the OAL variable names. Function Name. The function used for this metric, namely distributed aggregation, value calculation or down sampling calculation based on the function implementation. Further, the data structure is determined by the function as well, such as function Avg is for Long. Scope Type. Unlike within the OAL, there are plenty of logic scope definitions. In the meter system, only type is required. Type values include service, instance, and endpoint, just as we have described in the Overview section. The values of scope entity name, such as service name, are required when metrics data are generated with the metrics data values.  NOTE: The metrics must be declared in the bootstrap stage, and there must be no change to runtime.\nThe Meter System supports the following binding functions:\n avg. Calculates the avg value for every entity under the same metrics name. histogram. Aggregates the counts in the configurable buckets. Buckets are configurable but must be assigned in the declaration stage. percentile. See percentile in WIKI. Unlike the OAL, we provide 50/75/90/95/99 by default. In the meter system function, the percentile function accepts several ranks, which should be in the (0, 100) range.  ","title":"Meter System","url":"/docs/main/v9.1.0/en/concepts-and-designs/meter/"},{"content":"Meter System Meter system is another streaming calculation mode designed for metrics data. In the OAL, there are clear Scope Definitions, including definitions for native objects. Meter system is focused on the data type itself, and provides a more flexible approach to the end user in defining the scope entity.\nThe meter system is open to different receivers and fetchers in the backend, see the backend setup document for more details.\nEvery metric is declared in the meter system to include the following attributes:\n Metrics Name. A globally unique name to avoid overlapping between the OAL variable names. Function Name. The function used for this metric, namely distributed aggregation, value calculation or down sampling calculation based on the function implementation. Further, the data structure is determined by the function as well, such as function Avg is for Long. Scope Type. Unlike within the OAL, there are plenty of logic scope definitions. In the meter system, only type is required. Type values include service, instance, and endpoint, just as we have described in the Overview section. The values of scope entity name, such as service name, are required when metrics data are generated with the metrics data values.  NOTE: The metrics must be declared in the bootstrap stage, and there must be no change to runtime.\nThe Meter System supports the following binding functions:\n avg. Calculates the avg value for every entity under the same metrics name. histogram. Aggregates the counts in the configurable buckets. Buckets are configurable but must be assigned in the declaration stage. percentile. See percentile in WIKI. Unlike the OAL, we provide 50/75/90/95/99 by default. In the meter system function, the percentile function accepts several ranks, which should be in the (0, 100) range.  ","title":"Meter System","url":"/docs/main/v9.2.0/en/concepts-and-designs/meter/"},{"content":"Meter System Meter system is another streaming calculation mode designed for metrics data. In the OAL, there are clear Scope Definitions, including definitions for native objects. Meter system is focused on the data type itself, and provides a more flexible approach to the end user in defining the scope entity.\nThe meter system is open to different receivers and fetchers in the backend, see the backend setup document for more details.\nEvery metric is declared in the meter system to include the following attributes:\n Metrics Name. A globally unique name to avoid overlapping between the OAL variable names. Function Name. The function used for this metric, namely distributed aggregation, value calculation or down sampling calculation based on the function implementation. Further, the data structure is determined by the function as well, such as function Avg is for Long. Scope Type. Unlike within the OAL, there are plenty of logic scope definitions. In the meter system, only type is required. Type values include service, instance, and endpoint, just as we have described in the Overview section. The values of scope entity name, such as service name, are required when metrics data are generated with the metrics data values.  NOTE: The metrics must be declared in the bootstrap stage, and there must be no change to runtime.\nThe Meter System supports the following binding functions:\n avg. Calculates the avg value for every entity under the same metrics name. histogram. Aggregates the counts in the configurable buckets. Buckets are configurable but must be assigned in the declaration stage. percentile. See percentile in WIKI. Unlike the OAL, we provide 50/75/90/95/99 by default. In the meter system function, the percentile function accepts several ranks, which should be in the (0, 100) range.  ","title":"Meter System","url":"/docs/main/v9.3.0/en/concepts-and-designs/meter/"},{"content":"Meter System Meter system is another streaming calculation mode designed for metrics data. In the OAL, there are clear Scope Definitions, including definitions for native objects. Meter system is focused on the data type itself, and provides a more flexible approach to the end user in defining the scope entity.\nThe meter system is open to different receivers and fetchers in the backend, see the backend setup document for more details.\nEvery metric is declared in the meter system to include the following attributes:\n Metrics Name. A globally unique name to avoid overlapping between the OAL variable names. Function Name. The function used for this metric, namely distributed aggregation, value calculation or down sampling calculation based on the function implementation. Further, the data structure is determined by the function as well, such as function Avg is for Long. Scope Type. Unlike within the OAL, there are plenty of logic scope definitions. In the meter system, only type is required. Type values include service, instance, and endpoint, just as we have described in the Overview section. The values of scope entity name, such as service name, are required when metrics data are generated with the metrics data values.  NOTE: The metrics must be declared in the bootstrap stage, and there must be no change to runtime.\nThe Meter System supports the following binding functions:\n avg. Calculates the avg value for every entity under the same metrics name. histogram. Aggregates the counts in the configurable buckets. Buckets are configurable but must be assigned in the declaration stage. percentile. See percentile in WIKI. Unlike the OAL, we provide 50/75/90/95/99 by default. In the meter system function, the percentile function accepts several ranks, which should be in the (0, 100) range.  ","title":"Meter System","url":"/docs/main/v9.4.0/en/concepts-and-designs/meter/"},{"content":"Meter System Meter system is another streaming calculation mode designed for metrics data. In the OAL, there are clear Scope Definitions, including definitions for native objects. Meter system is focused on the data type itself, and provides a more flexible approach to the end user in defining the scope entity.\nThe meter system is open to different receivers and fetchers in the backend, see the backend setup document for more details.\nEvery metric is declared in the meter system to include the following attributes:\n Metrics Name. A globally unique name to avoid overlapping between the OAL variable names. Function Name. The function used for this metric, namely distributed aggregation, value calculation or down sampling calculation based on the function implementation. Further, the data structure is determined by the function as well, such as function Avg is for Long. Scope Type. Unlike within the OAL, there are plenty of logic scope definitions. In the meter system, only type is required. Type values include service, instance, and endpoint, just as we have described in the Overview section. The values of scope entity name, such as service name, are required when metrics data are generated with the metrics data values.  NOTE: The metrics must be declared in the bootstrap stage, and there must be no change to runtime.\nThe Meter System supports the following binding functions:\n avg. Calculates the avg value for every entity under the same metrics name. histogram. Aggregates the counts in the configurable buckets. Buckets are configurable but must be assigned in the declaration stage. percentile. See percentile in WIKI. Unlike the OAL, we provide 50/75/90/95/99 by default. In the meter system function, the percentile function accepts several ranks, which should be in the (0, 100) range.  ","title":"Meter System","url":"/docs/main/v9.5.0/en/concepts-and-designs/meter/"},{"content":"Meter System Meter system is another streaming calculation mode designed for metrics data. In the OAL, there are clear Scope Definitions, including definitions for native objects. Meter system is focused on the data type itself, and provides a more flexible approach to the end user in defining the scope entity.\nThe meter system is open to different receivers and fetchers in the backend, see the backend setup document for more details.\nEvery metric is declared in the meter system to include the following attributes:\n Metrics Name. A globally unique name to avoid overlapping between the OAL variable names. Function Name. The function used for this metric, namely distributed aggregation, value calculation or down sampling calculation based on the function implementation. Further, the data structure is determined by the function as well, such as function Avg is for Long. Scope Type. Unlike within the OAL, there are plenty of logic scope definitions. In the meter system, only type is required. Type values include service, instance, and endpoint, just as we have described in the Overview section. The values of scope entity name, such as service name, are required when metrics data are generated with the metrics data values.  NOTE: The metrics must be declared in the bootstrap stage, and there must be no change to runtime.\nThe Meter System supports the following binding functions:\n avg. Calculates the avg value for every entity under the same metrics name. histogram. Aggregates the counts in the configurable buckets. Buckets are configurable but must be assigned in the declaration stage. percentile. See percentile in WIKI. Unlike the OAL, we provide 50/75/90/95/99 by default. In the meter system function, the percentile function accepts several ranks, which should be in the (0, 100) range.  ","title":"Meter System","url":"/docs/main/v9.6.0/en/concepts-and-designs/meter/"},{"content":"Meter System Meter system is another streaming calculation mode designed for metrics data. In the OAL, there are clear Scope Definitions, including definitions for native objects. Meter system is focused on the data type itself, and provides a more flexible approach to the end user in defining the scope entity.\nThe meter system is open to different receivers and fetchers in the backend, see the backend setup document for more details.\nEvery metric is declared in the meter system to include the following attributes:\n Metrics Name. A globally unique name to avoid overlapping between the OAL variable names. Function Name. The function used for this metric, namely distributed aggregation, value calculation or down sampling calculation based on the function implementation. Further, the data structure is determined by the function as well, such as function Avg is for Long. Scope Type. Unlike within the OAL, there are plenty of logic scope definitions. In the meter system, only type is required. Type values include service, instance, and endpoint, just as we have described in the Overview section. The values of scope entity name, such as service name, are required when metrics data are generated with the metrics data values.  NOTE: The metrics must be declared in the bootstrap stage, and there must be no change to runtime.\nThe Meter System supports the following binding functions:\n avg. Calculates the avg value for every entity under the same metrics name. histogram. Aggregates the counts in the configurable buckets. Buckets are configurable but must be assigned in the declaration stage. percentile. See percentile in WIKI. Unlike the OAL, we provide 50/75/90/95/99 by default. In the meter system function, the percentile function accepts several ranks, which should be in the (0, 100) range.  ","title":"Meter System","url":"/docs/main/v9.7.0/en/concepts-and-designs/meter/"},{"content":"Metrics  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-micrometer-registry\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  Using org.apache.skywalking.apm.meter.micrometer.SkywalkingMeterRegistry as the registry, it could forward the MicroMeter collected metrics to OAP server.  import org.apache.skywalking.apm.meter.micrometer.SkywalkingMeterRegistry; SkywalkingMeterRegistry registry = new SkywalkingMeterRegistry(); // If you has some counter want to rate by agent side SkywalkingConfig config = new SkywalkingConfig(Arrays.asList(\u0026#34;test_rate_counter\u0026#34;)); new SkywalkingMeterRegistry(config); // Also you could using composite registry to combine multiple meter registry, such as collect to Skywalking and prometheus CompositeMeterRegistry compositeRegistry = new CompositeMeterRegistry(); compositeRegistry.add(new PrometheusMeterRegistry(PrometheusConfig.DEFAULT)); compositeRegistry.add(new SkywalkingMeterRegistry());   Using snake case as the naming convention. Such as test.meter will be send to test_meter.\n  Using Millisecond as the time unit.\n  Adapt micrometer data convention.\n     Micrometer data type Transform to meter name Skywalking data type Description     Counter Counter name Counter Same with counter   Gauges Gauges name Gauges Same with gauges   Timer Timer name + \u0026ldquo;_count\u0026rdquo; Counter Execute finished count    Timer name + \u0026ldquo;_sum\u0026rdquo; Counter Total execute finished duration    Timer name + \u0026ldquo;_max\u0026rdquo; Gauges Max duration of execute finished time    Timer name + \u0026ldquo;_histogram\u0026rdquo; Histogram Histogram of execute finished duration   LongTaskTimer Timer name + \u0026ldquo;_active_count\u0026rdquo; Gauges Executing task count    Timer name + \u0026ldquo;_duration_sum\u0026rdquo; Counter All of executing task sum duration    Timer name + \u0026ldquo;_max\u0026rdquo; Counter Current longest running task execute duration   Function Timer Timer name + \u0026ldquo;_count\u0026rdquo; Gauges Execute finished timer count    Timer name + \u0026ldquo;_sum\u0026rdquo; Gauges Execute finished timer total duration   Function Counter Counter name Counter Custom counter value   Distribution summary Summary name + \u0026ldquo;_count\u0026rdquo; Counter Total record count    Summary name + \u0026ldquo;_sum\u0026rdquo; Counter Total record amount sum    Summary name + \u0026ldquo;_max\u0026rdquo; Gauges Max record amount    Summary name + \u0026ldquo;_histogram\u0026rdquo; Gauges Histogram of the amount     Not Adapt data convention.     Micrometer data type Data type     LongTaskTimer Histogram    ","title":"Metrics","url":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/application-toolkit-micrometer/"},{"content":"Metrics  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-micrometer-registry\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  Using org.apache.skywalking.apm.meter.micrometer.SkywalkingMeterRegistry as the registry, it could forward the MicroMeter collected metrics to OAP server.  import org.apache.skywalking.apm.meter.micrometer.SkywalkingMeterRegistry; SkywalkingMeterRegistry registry = new SkywalkingMeterRegistry(); // If you has some counter want to rate by agent side SkywalkingConfig config = new SkywalkingConfig(Arrays.asList(\u0026#34;test_rate_counter\u0026#34;)); new SkywalkingMeterRegistry(config); // Also you could using composite registry to combine multiple meter registry, such as collect to Skywalking and prometheus CompositeMeterRegistry compositeRegistry = new CompositeMeterRegistry(); compositeRegistry.add(new PrometheusMeterRegistry(PrometheusConfig.DEFAULT)); compositeRegistry.add(new SkywalkingMeterRegistry());   Using snake case as the naming convention. Such as test.meter will be send to test_meter.\n  Using Millisecond as the time unit.\n  Adapt micrometer data convention.\n     Micrometer data type Transform to meter name Skywalking data type Description     Counter Counter name Counter Same with counter   Gauges Gauges name Gauges Same with gauges   Timer Timer name + \u0026ldquo;_count\u0026rdquo; Counter Execute finished count    Timer name + \u0026ldquo;_sum\u0026rdquo; Counter Total execute finished duration    Timer name + \u0026ldquo;_max\u0026rdquo; Gauges Max duration of execute finished time    Timer name + \u0026ldquo;_histogram\u0026rdquo; Histogram Histogram of execute finished duration   LongTaskTimer Timer name + \u0026ldquo;_active_count\u0026rdquo; Gauges Executing task count    Timer name + \u0026ldquo;_duration_sum\u0026rdquo; Counter All of executing task sum duration    Timer name + \u0026ldquo;_max\u0026rdquo; Counter Current longest running task execute duration   Function Timer Timer name + \u0026ldquo;_count\u0026rdquo; Gauges Execute finished timer count    Timer name + \u0026ldquo;_sum\u0026rdquo; Gauges Execute finished timer total duration   Function Counter Counter name Counter Custom counter value   Distribution summary Summary name + \u0026ldquo;_count\u0026rdquo; Counter Total record count    Summary name + \u0026ldquo;_sum\u0026rdquo; Counter Total record amount sum    Summary name + \u0026ldquo;_max\u0026rdquo; Gauges Max record amount    Summary name + \u0026ldquo;_histogram\u0026rdquo; Gauges Histogram of the amount     Not Adapt data convention.     Micrometer data type Data type     LongTaskTimer Histogram    ","title":"Metrics","url":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-micrometer/"},{"content":"Metrics  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-micrometer-registry\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  Using org.apache.skywalking.apm.meter.micrometer.SkywalkingMeterRegistry as the registry, it could forward the MicroMeter collected metrics to OAP server.  import org.apache.skywalking.apm.meter.micrometer.SkywalkingMeterRegistry; SkywalkingMeterRegistry registry = new SkywalkingMeterRegistry(); // If you has some counter want to rate by agent side SkywalkingConfig config = new SkywalkingConfig(Arrays.asList(\u0026#34;test_rate_counter\u0026#34;)); new SkywalkingMeterRegistry(config); // Also you could using composite registry to combine multiple meter registry, such as collect to Skywalking and prometheus CompositeMeterRegistry compositeRegistry = new CompositeMeterRegistry(); compositeRegistry.add(new PrometheusMeterRegistry(PrometheusConfig.DEFAULT)); compositeRegistry.add(new SkywalkingMeterRegistry());   Using snake case as the naming convention. Such as test.meter will be send to test_meter.\n  Using Millisecond as the time unit.\n  Adapt micrometer data convention.\n     Micrometer data type Transform to meter name Skywalking data type Description     Counter Counter name Counter Same with counter   Gauges Gauges name Gauges Same with gauges   Timer Timer name + \u0026ldquo;_count\u0026rdquo; Counter Execute finished count    Timer name + \u0026ldquo;_sum\u0026rdquo; Counter Total execute finished duration    Timer name + \u0026ldquo;_max\u0026rdquo; Gauges Max duration of execute finished time    Timer name + \u0026ldquo;_histogram\u0026rdquo; Histogram Histogram of execute finished duration   LongTaskTimer Timer name + \u0026ldquo;_active_count\u0026rdquo; Gauges Executing task count    Timer name + \u0026ldquo;_duration_sum\u0026rdquo; Counter All of executing task sum duration    Timer name + \u0026ldquo;_max\u0026rdquo; Counter Current longest running task execute duration   Function Timer Timer name + \u0026ldquo;_count\u0026rdquo; Gauges Execute finished timer count    Timer name + \u0026ldquo;_sum\u0026rdquo; Gauges Execute finished timer total duration   Function Counter Counter name Counter Custom counter value   Distribution summary Summary name + \u0026ldquo;_count\u0026rdquo; Counter Total record count    Summary name + \u0026ldquo;_sum\u0026rdquo; Counter Total record amount sum    Summary name + \u0026ldquo;_max\u0026rdquo; Gauges Max record amount    Summary name + \u0026ldquo;_histogram\u0026rdquo; Gauges Histogram of the amount     Not Adapt data convention.     Micrometer data type Data type     LongTaskTimer Histogram    ","title":"Metrics","url":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/application-toolkit-micrometer/"},{"content":"Metrics  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-micrometer-registry\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  Using org.apache.skywalking.apm.meter.micrometer.SkywalkingMeterRegistry as the registry, it could forward the MicroMeter collected metrics to OAP server.  import org.apache.skywalking.apm.meter.micrometer.SkywalkingMeterRegistry; SkywalkingMeterRegistry registry = new SkywalkingMeterRegistry(); // If you has some counter want to rate by agent side SkywalkingConfig config = new SkywalkingConfig(Arrays.asList(\u0026#34;test_rate_counter\u0026#34;)); new SkywalkingMeterRegistry(config); // Also you could using composite registry to combine multiple meter registry, such as collect to Skywalking and prometheus CompositeMeterRegistry compositeRegistry = new CompositeMeterRegistry(); compositeRegistry.add(new PrometheusMeterRegistry(PrometheusConfig.DEFAULT)); compositeRegistry.add(new SkywalkingMeterRegistry());   Using snake case as the naming convention. Such as test.meter will be send to test_meter.\n  Using Millisecond as the time unit.\n  Adapt micrometer data convention.\n     Micrometer data type Transform to meter name Skywalking data type Description     Counter Counter name Counter Same with counter   Gauges Gauges name Gauges Same with gauges   Timer Timer name + \u0026ldquo;_count\u0026rdquo; Counter Execute finished count    Timer name + \u0026ldquo;_sum\u0026rdquo; Counter Total execute finished duration    Timer name + \u0026ldquo;_max\u0026rdquo; Gauges Max duration of execute finished time    Timer name + \u0026ldquo;_histogram\u0026rdquo; Histogram Histogram of execute finished duration   LongTaskTimer Timer name + \u0026ldquo;_active_count\u0026rdquo; Gauges Executing task count    Timer name + \u0026ldquo;_duration_sum\u0026rdquo; Counter All of executing task sum duration    Timer name + \u0026ldquo;_max\u0026rdquo; Counter Current longest running task execute duration   Function Timer Timer name + \u0026ldquo;_count\u0026rdquo; Gauges Execute finished timer count    Timer name + \u0026ldquo;_sum\u0026rdquo; Gauges Execute finished timer total duration   Function Counter Counter name Counter Custom counter value   Distribution summary Summary name + \u0026ldquo;_count\u0026rdquo; Counter Total record count    Summary name + \u0026ldquo;_sum\u0026rdquo; Counter Total record amount sum    Summary name + \u0026ldquo;_max\u0026rdquo; Gauges Max record amount    Summary name + \u0026ldquo;_histogram\u0026rdquo; Gauges Histogram of the amount     Not Adapt data convention.     Micrometer data type Data type     LongTaskTimer Histogram    ","title":"Metrics","url":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/application-toolkit-micrometer/"},{"content":"Metrics  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-micrometer-registry\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  Using org.apache.skywalking.apm.meter.micrometer.SkywalkingMeterRegistry as the registry, it could forward the MicroMeter collected metrics to OAP server.  import org.apache.skywalking.apm.meter.micrometer.SkywalkingMeterRegistry; SkywalkingMeterRegistry registry = new SkywalkingMeterRegistry(); // If you has some counter want to rate by agent side SkywalkingConfig config = new SkywalkingConfig(Arrays.asList(\u0026#34;test_rate_counter\u0026#34;)); new SkywalkingMeterRegistry(config); // Also you could using composite registry to combine multiple meter registry, such as collect to Skywalking and prometheus CompositeMeterRegistry compositeRegistry = new CompositeMeterRegistry(); compositeRegistry.add(new PrometheusMeterRegistry(PrometheusConfig.DEFAULT)); compositeRegistry.add(new SkywalkingMeterRegistry());   Using snake case as the naming convention. Such as test.meter will be send to test_meter.\n  Using Millisecond as the time unit.\n  Adapt micrometer data convention.\n     Micrometer data type Transform to meter name Skywalking data type Description     Counter Counter name Counter Same with counter   Gauges Gauges name Gauges Same with gauges   Timer Timer name + \u0026ldquo;_count\u0026rdquo; Counter Execute finished count    Timer name + \u0026ldquo;_sum\u0026rdquo; Counter Total execute finished duration    Timer name + \u0026ldquo;_max\u0026rdquo; Gauges Max duration of execute finished time    Timer name + \u0026ldquo;_histogram\u0026rdquo; Histogram Histogram of execute finished duration   LongTaskTimer Timer name + \u0026ldquo;_active_count\u0026rdquo; Gauges Executing task count    Timer name + \u0026ldquo;_duration_sum\u0026rdquo; Counter All of executing task sum duration    Timer name + \u0026ldquo;_max\u0026rdquo; Counter Current longest running task execute duration   Function Timer Timer name + \u0026ldquo;_count\u0026rdquo; Gauges Execute finished timer count    Timer name + \u0026ldquo;_sum\u0026rdquo; Gauges Execute finished timer total duration   Function Counter Counter name Counter Custom counter value   Distribution summary Summary name + \u0026ldquo;_count\u0026rdquo; Counter Total record count    Summary name + \u0026ldquo;_sum\u0026rdquo; Counter Total record amount sum    Summary name + \u0026ldquo;_max\u0026rdquo; Gauges Max record amount    Summary name + \u0026ldquo;_histogram\u0026rdquo; Gauges Histogram of the amount     Not Adapt data convention.     Micrometer data type Data type     LongTaskTimer Histogram    ","title":"Metrics","url":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/application-toolkit-micrometer/"},{"content":"Metrics Exporter SkyWalking provides the essential functions of metrics aggregation, alarm, and analysis. In the real world, many may want to forward their data to a 3rd party system for an in-depth analysis or otherwise. Metrics Exporter has made that possible.\nMetrics exporter is an independent module that has to be manually activated.\nRight now, we provide the following exporters:\n gRPC exporter  gRPC exporter gRPC exporter uses SkyWalking\u0026rsquo;s native exporter service definition. Here is the proto definition.\nservice MetricExportService { rpc export (stream ExportMetricValue) returns (ExportResponse) { } rpc subscription (SubscriptionReq) returns (SubscriptionsResp) { }}message ExportMetricValue { string metricName = 1; string entityName = 2; string entityId = 3; ValueType type = 4; int64 timeBucket = 5; int64 longValue = 6; double doubleValue = 7; repeated int64 longValues = 8;}message SubscriptionsResp { repeated SubscriptionMetric metrics = 1;}message SubscriptionMetric { string metricName = 1; EventType eventType = 2;}enum ValueType { LONG = 0; DOUBLE = 1; MULTI_LONG = 2;}enum EventType { // The metrics aggregated in this bulk, not include the existing persistent data.  INCREMENT = 0; // Final result of the metrics at this moment.  TOTAL = 1;}message SubscriptionReq {}message ExportResponse {}To activate the exporter, you should add this into your application.yml\nexporter:grpc:targetHost:127.0.0.1targetPort:9870 targetHost:targetPort is the expected target service address. You could set any gRPC server to receive the data. Target gRPC service needs to go on standby; otherwise, the OAP startup may fail.  Target exporter service Subscription implementation Return the expected metrics name list with event type (incremental or total). All names must match the OAL/MAL script definition. Return empty list, if you want to export all metrics in the incremental event type.\nExport implementation Stream service. All subscribed metrics will be sent here based on the OAP core schedule. Also, if the OAP is deployed as cluster, this method will be called concurrently. For metrics value, you need to follow #type to choose #longValue or #doubleValue.\n","title":"Metrics Exporter","url":"/docs/main/v9.0.0/en/setup/backend/metrics-exporter/"},{"content":"Metrics Exporter SkyWalking provides the essential functions of metrics aggregation, alarm, and analysis. In many real-world scenarios, users may want to forward their data to a 3rd party system for further in-depth analysis. Metrics Exporter has made that possible.\nThe metrics exporter is an independent module that has to be manually activated.\nRight now, we provide the following exporters:\n gRPC exporter  gRPC exporter gRPC exporter uses SkyWalking\u0026rsquo;s native exporter service definition. Here is the proto definition.\nservice MetricExportService { rpc export (stream ExportMetricValue) returns (ExportResponse) { } rpc subscription (SubscriptionReq) returns (SubscriptionsResp) { }}message ExportMetricValue { string metricName = 1; string entityName = 2; string entityId = 3; ValueType type = 4; int64 timeBucket = 5; int64 longValue = 6; double doubleValue = 7; repeated int64 longValues = 8;}message SubscriptionsResp { repeated SubscriptionMetric metrics = 1;}message SubscriptionMetric { string metricName = 1; EventType eventType = 2;}enum ValueType { LONG = 0; DOUBLE = 1; MULTI_LONG = 2;}enum EventType { // The metrics aggregated in this bulk, not include the existing persistent data.  INCREMENT = 0; // Final result of the metrics at this moment.  TOTAL = 1;}message SubscriptionReq {}message ExportResponse {}To activate the exporter, you should add this into your application.yml\nexporter:grpc:targetHost:127.0.0.1targetPort:9870 targetHost:targetPort is the expected target service address. You could set any gRPC server to receive the data. Target gRPC service needs to go on standby; otherwise, the OAP startup may fail.  Target exporter service Subscription implementation Return the expected metrics name list with event type (incremental or total). All names must match the OAL/MAL script definition. Return empty list, if you want to export all metrics in the incremental event type.\nExport implementation Stream service. All subscribed metrics will be sent here based on the OAP core schedule. Also, if the OAP is deployed as a cluster, this method will be called concurrently. For metrics value, you need to follow #type to choose #longValue or #doubleValue.\n","title":"Metrics Exporter","url":"/docs/main/v9.1.0/en/setup/backend/metrics-exporter/"},{"content":"Metrics Exporter SkyWalking provides the essential functions of metrics aggregation, alarm, and analysis. In many real-world scenarios, users may want to forward their data to a 3rd party system for further in-depth analysis. Metrics Exporter has made that possible.\nThe metrics exporter is an independent module that has to be manually activated.\nRight now, we provide the following exporters:\n gRPC exporter  gRPC exporter gRPC exporter uses SkyWalking\u0026rsquo;s native exporter service definition. Here is the proto definition.\nservice MetricExportService { rpc export (stream ExportMetricValue) returns (ExportResponse) { } rpc subscription (SubscriptionReq) returns (SubscriptionsResp) { }}message ExportMetricValue { string metricName = 1; string entityName = 2; string entityId = 3; ValueType type = 4; int64 timeBucket = 5; int64 longValue = 6; double doubleValue = 7; repeated int64 longValues = 8;}message SubscriptionsResp { repeated SubscriptionMetric metrics = 1;}message SubscriptionMetric { string metricName = 1; EventType eventType = 2;}enum ValueType { LONG = 0; DOUBLE = 1; MULTI_LONG = 2;}enum EventType { // The metrics aggregated in this bulk, not include the existing persistent data.  INCREMENT = 0; // Final result of the metrics at this moment.  TOTAL = 1;}message SubscriptionReq {}message ExportResponse {}To activate the exporter, you should add this into your application.yml\nexporter:grpc:targetHost:127.0.0.1targetPort:9870 targetHost:targetPort is the expected target service address. You could set any gRPC server to receive the data. Target gRPC service needs to go on standby; otherwise, the OAP startup may fail.  Target exporter service Subscription implementation Return the expected metrics name list with event type (incremental or total). All names must match the OAL/MAL script definition. Return empty list, if you want to export all metrics in the incremental event type.\nExport implementation Stream service. All subscribed metrics will be sent here based on the OAP core schedule. Also, if the OAP is deployed as a cluster, this method will be called concurrently. For metrics value, you need to follow #type to choose #longValue or #doubleValue.\n","title":"Metrics Exporter","url":"/docs/main/v9.2.0/en/setup/backend/metrics-exporter/"},{"content":"Metrics Query Expression(MQE) Syntax MQE is a string that consists of one or more expressions. Each expression could be a combination of one or more operations. The expression allows users to do simple query-stage calculation through V3 APIs.\nExpression = \u0026lt;Operation\u0026gt; Expression1 \u0026lt;Operation\u0026gt; Expression2 \u0026lt;Operation\u0026gt; Expression3 ... The following document lists the operations supported by MQE.\nMetrics Expression Metrics Expression will return a collection of time-series values.\nCommon Value Metrics Expression:\n\u0026lt;metric_name\u0026gt; For example: If we want to query the service_sla metric, we can use the following expression:\nservice_sla Result Type The ExpressionResultType of the expression is TIME_SERIES_VALUES.\nLabeled Value Metrics For now, we only have a single anonymous label with multi label values in a labeled metric. To be able to use it in expressions, define _ as the anonymous label name (key).\nExpression:\n\u0026lt;metric_name\u0026gt;{_=\u0026#39;\u0026lt;label_value_1\u0026gt;,...\u0026#39;} {_='\u0026lt;label_value_1\u0026gt;,...'} is the selected label value of the metric. If is not specified, all label values of the metric will be selected.\nFor example: If we want to query the service_percentile metric with the label values 0,1,2,3,4, we can use the following expression:\nservice_percentile{_=\u0026#39;0,1,2,3,4\u0026#39;} If we want to rename the label values to P50,P75,P90,P95,P99, see Relabel Operation.\nResult Type The ExpressionResultType of the expression is TIME_SERIES_VALUES and with labels.\nBinary Operation The Binary Operation is an operation that takes two expressions and performs a calculation on their results. The following table lists the binary operations supported by MQE.\nExpression:\nExpression1 \u0026lt;Binary-Operator\u0026gt; Expression2    Operator Definition     + addition   - subtraction   * multiplication   / division   % modulo    For example: If we want to transform the service_sla metric value to percent, we can use the following expression:\nservice_sla / 100 Result Type For the result type of the expression, please refer to the following table.\nBinary Operation Rules The following table lists if the different result types of the input expressions could do this operation and the result type after the operation. The expression could be on the left or right side of the operator. Note: If the expressions on both sides of the operator are the TIME_SERIES_VALUES with labels, they should have the same labels for calculation.\n   Expression Expression Yes/No ExpressionResultType     SINGLE_VALUE SINGLE_VALUE Yes SINGLE_VALUE   SINGLE_VALUE TIME_SERIES_VALUES Yes TIME_SERIES_VALUES   SINGLE_VALUE SORTED_LIST/RECORD_LIST Yes SORTED_LIST/RECORD_LIST   TIME_SERIES_VALUES TIME_SERIES_VALUES Yes TIME_SERIES_VALUES   TIME_SERIES_VALUES SORTED_LIST/RECORD_LIST no    SORTED_LIST/RECORD_LIST SORTED_LIST/RECORD_LIST no     Compare Operation Compare Operation takes two expressions and compares their results. The following table lists the compare operations supported by MQE.\nExpression:\nExpression1 \u0026lt;Compare-Operator\u0026gt; Expression2    Operator Definition     \u0026gt; greater than   \u0026gt;= greater than or equal   \u0026lt; less than   \u0026lt;= less than or equal   == equal   != not equal    The result of the compare operation is an int value:\n 1: true 0: false  For example: Compare the service_resp_time metric value if greater than 3000, if the service_resp_time result is:\n{ \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 3500, \u0026#34;traceID\u0026#34;: null}] } ] } } } we can use the following expression:\nservice_resp_time \u0026gt; 3000 and get result:\n{ \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;0\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 1, \u0026#34;traceID\u0026#34;: null}] } ] } } } Compare Operation Rules and Result Type Same as the Binary Operation Rules.\nAggregation Operation Aggregation Operation takes an expression and performs aggregate calculations on its results.\nExpression:\n\u0026lt;Aggregation-Operator\u0026gt;(Expression)    Operator Definition ExpressionResultType     avg average the result SINGLE_VALUE   count count number of the result SINGLE_VALUE   latest select the latest non-null value from the result SINGLE_VALUE   sum sum the result SINGLE_VALUE   max select maximum from the result SINGLE_VALUE   min select minimum from the result SINGLE_VALUE    For example: If we want to query the average value of the service_cpm metric, we can use the following expression:\navg(service_cpm) Result Type The different operators could impact the ExpressionResultType, please refer to the above table.\nMathematical Operation Mathematical Operation takes an expression and performs mathematical calculations on its results.\nExpression:\n\u0026lt;Mathematical-Operator\u0026gt;(Expression, parameters)    Operator Definition parameters ExpressionResultType     abs returns the absolute value of the result  follow the input expression   ceil returns the smallest integer value that is greater or equal to the result  follow the input expression   floor returns the largest integer value that is greater or equal to the result  follow the input expression   round returns result round to specific decimal places places: a positive integer specific decimal places of the result follow the input expression    For example: If we want to query the average value of the service_cpm metric in seconds, and round the result to 2 decimal places, we can use the following expression:\nround(service_cpm / 60 , 2) Result Type The different operators could impact the ExpressionResultType, please refer to the above table.\nTopN Operation TopN Operation takes an expression and performs TopN calculation on its results.\nExpression:\ntop_n(\u0026lt;metric_name\u0026gt;, \u0026lt;top_number\u0026gt;, \u0026lt;order\u0026gt;) top_number is the number of the top results, should be a positive integer.\norder is the order of the top results. The value of order can be asc or des.\nFor example: If we want to query the top 10 services with the highest service_cpm metric value, we can use the following expression:\ntop_n(service_instance_cpm, 10, des) Result Type According to the type of the metric, the ExpressionResultType of the expression will be SORTED_LIST or RECORD_LIST.\nRelabel Operation Relabel Operation takes an expression and replaces the label values with new label values on its results.\nExpression:\nrelabel(Expression, _=\u0026#39;\u0026lt;new_label_value_1\u0026gt;,...\u0026#39;) _ is the new label of the metric after the label is relabeled, the order of the new label values should be the same as the order of the label values in the input expression result.\nFor example: If we want to query the service_percentile metric with the label values 0,1,2,3,4, and rename the label values to P50,P75,P90,P95,P99, we can use the following expression:\nrelabel(service_percentile{_=\u0026#39;0,1,2,3,4\u0026#39;}, _=\u0026#39;P50,P75,P90,P95,P99\u0026#39;) Result Type Follow the input expression.\nAggregateLabels Operation AggregateLabels Operation takes an expression and performs an aggregate calculation on its Labeled Value Metrics results. It aggregates a group of TIME_SERIES_VALUES into a single TIME_SERIES_VALUES.\nExpression:\naggregate_labels(Expression, parameter)    parameter Definition ExpressionResultType     avg calculate avg value of a Labeled Value Metrics TIME_SERIES_VALUES   sum calculate sum value of a Labeled Value Metrics TIME_SERIES_VALUES   max select the maximum value from a Labeled Value Metrics TIME_SERIES_VALUES   min select the minimum value from a Labeled Value Metrics TIME_SERIES_VALUES    For example: If we want to query all Redis command total rates, we can use the following expression(total_commands_rate is a metric which recorded every command rate in labeled value):\naggregate_labels(total_commands_rate, SUM) Result Type The ExpressionResultType of the aggregateLabels operation is TIME_SERIES_VALUES.\nLogical Operation ViewAsSequence Operation ViewAsSequence operation represents the first not-null metric from the listing metrics in the given prioritized sequence(left to right). It could also be considered as a short-circuit of given metrics for the first value existing metric.\nExpression:\nview_as_seq([\u0026lt;expression_1\u0026gt;, \u0026lt;expression_2\u0026gt;, ...]) For example: if the first expression value is empty but the second one is not empty, it would return the result from the second expression. The following example would return the content of the service_cpm metric.\nview_as_seq(not_existing, service_cpm) Result Type The result type is determined by the type of selected not-null metric expression.\nTrend Operation Trend Operation takes an expression and performs a trend calculation on its results.\nExpression:\n\u0026lt;Trend-Operator\u0026gt;(Metrics Expression, time_range) time_range is the positive int of the calculated range. The unit will automatically align with to the query Step, for example, if the query Step is MINUTE, the unit of time_range is minute.\n   Operator Definition ExpressionResultType     increase returns the increase in the time range in the time series TIME_SERIES_VALUES   rate returns the per-second average rate of increase in the time range in the time series TIME_SERIES_VALUES    For example: If we want to query the increase value of the service_cpm metric in 2 minute(assume the query Step is MINUTE), we can use the following expression:\nincrease(service_cpm, 2) If the query duration is 3 minutes, from (T1 to T3) and the metric has values in time series:\nV(T1-2), V(T1-1), V(T1), V(T2), V(T3) then the expression result is:\nV(T1)-V(T1-2), V(T2)-V(T1-1), V(T3)-V(T1) Note:\n If the calculated metric value is empty, the result will be empty. Assume in the T3 point, the increase value = V(T3)-V(T1), If the metric V(T3) or V(T1) is empty, the result value in T3 will be empty.  Result Type TIME_SERIES_VALUES.\nExpression Query Example Labeled Value Metrics service_percentile{_=\u0026#39;0,1\u0026#39;} The example result is:\n{ \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;0\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1000\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 2000, \u0026#34;traceID\u0026#34;: null}] }, { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2000\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 3000, \u0026#34;traceID\u0026#34;: null}] } ] } } } If we want to transform the percentile value unit from ms to s the expression is:\nservice_percentile{_=\u0026#39;0,1\u0026#39;} / 1000 { \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;0\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 2, \u0026#34;traceID\u0026#34;: null}] }, { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 3, \u0026#34;traceID\u0026#34;: null}] } ] } } } Get the average value of each percentile, the expression is:\navg(service_percentile{_=\u0026#39;0,1\u0026#39;}) { \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;SINGLE_VALUE\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;0\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: null, \u0026#34;value\u0026#34;: \u0026#34;1500\u0026#34;, \u0026#34;traceID\u0026#34;: null}] }, { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: null, \u0026#34;value\u0026#34;: \u0026#34;2500\u0026#34;, \u0026#34;traceID\u0026#34;: null}] } ] } } } Calculate the difference between the percentile and the average value, the expression is:\nservice_percentile{_=\u0026#39;0,1\u0026#39;} - avg(service_percentile{_=\u0026#39;0,1\u0026#39;}) { \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;0\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;-500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 500, \u0026#34;traceID\u0026#34;: null}] }, { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;-500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 500, \u0026#34;traceID\u0026#34;: null}] } ] } } } Calculate the difference between the service_resp_time and the service_percentile, if the service_resp_time result is:\n{ \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 3500, \u0026#34;traceID\u0026#34;: null}] } ] } } } The expression is:\nservice_resp_time - service_percentile{_=\u0026#39;0,1\u0026#39;} { \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;0\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1500\u0026#34;, \u0026#34;traceID\u0026#34;: null}] }, { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;500\u0026#34;, \u0026#34;traceID\u0026#34;: null}] } ] } } } ","title":"Metrics Query Expression(MQE) Syntax","url":"/docs/main/latest/en/api/metrics-query-expression/"},{"content":"Metrics Query Expression(MQE) Syntax MQE is a string that consists of one or more expressions. Each expression could be a combination of one or more operations. The expression allows users to do simple query-stage calculation through V3 APIs.\nExpression = \u0026lt;Operation\u0026gt; Expression1 \u0026lt;Operation\u0026gt; Expression2 \u0026lt;Operation\u0026gt; Expression3 ... The following document lists the operations supported by MQE.\nMetrics Expression Metrics Expression will return a collection of time-series values.\nCommon Value Metrics Expression:\n\u0026lt;metric_name\u0026gt; For example: If we want to query the service_sla metric, we can use the following expression:\nservice_sla Result Type The ExpressionResultType of the expression is TIME_SERIES_VALUES.\nLabeled Value Metrics Since v10.0.0, SkyWalking supports multiple labels metrics. We could query the specific labels of the metric by the following expression.\nExpression:\n\u0026lt;metric_name\u0026gt;{\u0026lt;label1_name\u0026gt;=\u0026#39;\u0026lt;label1_value_1\u0026gt;,...\u0026#39;, \u0026lt;label2_name\u0026gt;=\u0026#39;\u0026lt;label2_value_1\u0026gt;,...\u0026#39;,\u0026lt;label2...} {\u0026lt;label1_name\u0026gt;='\u0026lt;label_value_1\u0026gt;,...'} is the selected label name/value of the metric. If is not specified, all label values of the metric will be selected.\nFor example: The k8s_cluster_deployment_status metric has labels namespace, deployment and status. If we want to query all deployment metric value with namespace=skywalking-showcase and status=true, we can use the following expression:\nk8s_cluster_deployment_status{namespace=\u0026#39;skywalking-showcase\u0026#39;, status=\u0026#39;true\u0026#39;} We also could query the label with multiple values by separating the values with ,: If we want to query the service_percentile metric with the label name p and values 50,75,90,95,99, we can use the following expression:\nservice_percentile{p=\u0026#39;50,75,90,95,99\u0026#39;} If we want to rename the label values to P50,P75,P90,P95,P99, see Relabel Operation.\nResult Type The ExpressionResultType of the expression is TIME_SERIES_VALUES and with labels.\nBinary Operation The Binary Operation is an operation that takes two expressions and performs a calculation on their results. The following table lists the binary operations supported by MQE.\nExpression:\nExpression1 \u0026lt;Binary-Operator\u0026gt; Expression2    Operator Definition     + addition   - subtraction   * multiplication   / division   % modulo    For example: If we want to transform the service_sla metric value to percent, we can use the following expression:\nservice_sla / 100 Result Type For the result type of the expression, please refer to the following table.\nBinary Operation Rules The following table lists if the different result types of the input expressions could do this operation and the result type after the operation. The expression could be on the left or right side of the operator. Note: If the expressions result on both sides of the operator are with labels, they should have the same labels for calculation. If the labels match, will reserve left expression result labels and the calculated value. Otherwise, will return empty value.\n   Expression Expression Yes/No ExpressionResultType     SINGLE_VALUE SINGLE_VALUE Yes SINGLE_VALUE   SINGLE_VALUE TIME_SERIES_VALUES Yes TIME_SERIES_VALUES   SINGLE_VALUE SORTED_LIST/RECORD_LIST Yes SORTED_LIST/RECORD_LIST   TIME_SERIES_VALUES TIME_SERIES_VALUES Yes TIME_SERIES_VALUES   TIME_SERIES_VALUES SORTED_LIST/RECORD_LIST no    SORTED_LIST/RECORD_LIST SORTED_LIST/RECORD_LIST no     Compare Operation Compare Operation takes two expressions and compares their results. The following table lists the compare operations supported by MQE.\nExpression:\nExpression1 \u0026lt;Compare-Operator\u0026gt; Expression2    Operator Definition     \u0026gt; greater than   \u0026gt;= greater than or equal   \u0026lt; less than   \u0026lt;= less than or equal   == equal   != not equal    The result of the compare operation is an int value:\n 1: true 0: false  For example: Compare the service_resp_time metric value if greater than 3000, if the service_resp_time result is:\n{ \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 3500, \u0026#34;traceID\u0026#34;: null}] } ] } } } we can use the following expression:\nservice_resp_time \u0026gt; 3000 and get result:\n{ \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;0\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 1, \u0026#34;traceID\u0026#34;: null}] } ] } } } Compare Operation Rules and Result Type Same as the Binary Operation Rules.\nAggregation Operation Aggregation Operation takes an expression and performs aggregate calculations on its results.\nExpression:\n\u0026lt;Aggregation-Operator\u0026gt;(Expression)    Operator Definition ExpressionResultType     avg average the result SINGLE_VALUE   count count number of the result SINGLE_VALUE   latest select the latest non-null value from the result SINGLE_VALUE   sum sum the result SINGLE_VALUE   max select maximum from the result SINGLE_VALUE   min select minimum from the result SINGLE_VALUE    For example: If we want to query the average value of the service_cpm metric, we can use the following expression:\navg(service_cpm) Result Type The different operators could impact the ExpressionResultType, please refer to the above table.\nMathematical Operation Mathematical Operation takes an expression and performs mathematical calculations on its results.\nExpression:\n\u0026lt;Mathematical-Operator\u0026gt;(Expression, parameters)    Operator Definition parameters ExpressionResultType     abs returns the absolute value of the result  follow the input expression   ceil returns the smallest integer value that is greater or equal to the result  follow the input expression   floor returns the largest integer value that is greater or equal to the result  follow the input expression   round returns result round to specific decimal places places: a positive integer specific decimal places of the result follow the input expression    For example: If we want to query the average value of the service_cpm metric in seconds, and round the result to 2 decimal places, we can use the following expression:\nround(service_cpm / 60 , 2) Result Type The different operators could impact the ExpressionResultType, please refer to the above table.\nTopN Operation TopN Operation takes an expression and performs calculation to get the TopN of Services/Instances/Endpoints. The result depends on the entity condition in the query.\n Global TopN:  The entity is empty. The result is the topN Services/Instances/Endpoints in the whole traffics. Notice: If query the Endpoints metric, the global candidate set could be huge, please use it carefully.   Service\u0026rsquo;s Instances/Endpoints TopN:  The serviceName in the entity is not empty. The result is the topN Instances/Endpoints of the service.    Expression:\ntop_n(\u0026lt;metric_name\u0026gt;, \u0026lt;top_number\u0026gt;, \u0026lt;order\u0026gt;)  top_number is the number of the top results, should be a positive integer. order is the order of the top results. The value of order can be asc or des.  For example: If we want to query the current service\u0026rsquo;s top 10 instances with the highest service_instance_cpm metric value, we can use the following expression under specific service:\ntop_n(service_instance_cpm, 10, des) Result Type According to the type of the metric, the ExpressionResultType of the expression will be SORTED_LIST or RECORD_LIST.\nRelabel Operation Relabel Operation takes an expression and replaces the label values with new label values on its results. Since v10.0.0, SkyWalking supports relabel multiple labels.\nExpression:\nrelabel(Expression, \u0026lt;target_label_name\u0026gt;=\u0026#39;\u0026lt;origin_label_value_1\u0026gt;,...\u0026#39;, \u0026lt;new_label_name\u0026gt;=\u0026#39;\u0026lt;new_label_value_1\u0026gt;,...\u0026#39;) The order of the new label values should be the same as the order of the label values in the input expression result.\nFor example: If we want to query the service_percentile metric with the label values 50,75,90,95,99, and rename the label name to percentile and the label values to P50,P75,P90,P95,P99, we can use the following expression:\nrelabel(service_percentile{p=\u0026#39;50,75,90,95,99\u0026#39;}, p=\u0026#39;50,75,90,95,99\u0026#39;, percentile=\u0026#39;P50,P75,P90,P95,P99\u0026#39;) Result Type Follow the input expression.\nAggregateLabels Operation AggregateLabels Operation takes an expression and performs an aggregate calculation on its Labeled Value Metrics results. It aggregates a group of TIME_SERIES_VALUES into a single TIME_SERIES_VALUES.\nExpression:\naggregate_labels(Expression, \u0026lt;AggregateType\u0026gt;(\u0026lt;label1_name\u0026gt;,\u0026lt;label2_name\u0026gt;...))  AggregateType is the type of the aggregation operation. \u0026lt;label1_name\u0026gt;,\u0026lt;label2_name\u0026gt;... is the label names that need to be aggregated. If not specified, all labels will be aggregated. Optional.     AggregateType Definition ExpressionResultType     avg calculate avg value of a Labeled Value Metrics TIME_SERIES_VALUES   sum calculate sum value of a Labeled Value Metrics TIME_SERIES_VALUES   max select the maximum value from a Labeled Value Metrics TIME_SERIES_VALUES   min select the minimum value from a Labeled Value Metrics TIME_SERIES_VALUES    For example: If we want to query all Redis command total rates, we can use the following expression(total_commands_rate is a metric which recorded every command rate in labeled value): Aggregating all the labels:\naggregate_labels(total_commands_rate, sum) Also, we can aggregate by the cmd label:\naggregate_labels(total_commands_rate, sum(cmd)) Result Type The ExpressionResultType of the aggregateLabels operation is TIME_SERIES_VALUES.\nLogical Operation ViewAsSequence Operation ViewAsSequence operation represents the first not-null metric from the listing metrics in the given prioritized sequence(left to right). It could also be considered as a short-circuit of given metrics for the first value existing metric.\nExpression:\nview_as_seq([\u0026lt;expression_1\u0026gt;, \u0026lt;expression_2\u0026gt;, ...]) For example: if the first expression value is empty but the second one is not empty, it would return the result from the second expression. The following example would return the content of the service_cpm metric.\nview_as_seq(not_existing, service_cpm) Result Type The result type is determined by the type of selected not-null metric expression.\nIsPresent Operation IsPresent operation represents that in a list of metrics, if any expression has a value, it would return 1 in the result; otherwise, it would return 0.\nExpression:\nis_present([\u0026lt;expression_1\u0026gt;, \u0026lt;expression_2\u0026gt;, ...]) For example: When the meter does not exist or the metrics has no value, it would return 0. However, if the metrics list contains meter with values, it would return 1.\nis_present(not_existing, existing_without_value, existing_with_value) Result Type The result type is SINGLE_VALUE, and the result(1 or 0) in the first value.\nTrend Operation Trend Operation takes an expression and performs a trend calculation on its results.\nExpression:\n\u0026lt;Trend-Operator\u0026gt;(Metrics Expression, time_range) time_range is the positive int of the calculated range. The unit will automatically align with to the query Step, for example, if the query Step is MINUTE, the unit of time_range is minute.\n   Operator Definition ExpressionResultType     increase returns the increase in the time range in the time series TIME_SERIES_VALUES   rate returns the per-second average rate of increase in the time range in the time series TIME_SERIES_VALUES    For example: If we want to query the increase value of the service_cpm metric in 2 minute(assume the query Step is MINUTE), we can use the following expression:\nincrease(service_cpm, 2) If the query duration is 3 minutes, from (T1 to T3) and the metric has values in time series:\nV(T1-2), V(T1-1), V(T1), V(T2), V(T3) then the expression result is:\nV(T1)-V(T1-2), V(T2)-V(T1-1), V(T3)-V(T1) Note:\n If the calculated metric value is empty, the result will be empty. Assume in the T3 point, the increase value = V(T3)-V(T1), If the metric V(T3) or V(T1) is empty, the result value in T3 will be empty.  Result Type TIME_SERIES_VALUES.\nSort Operation SortValues Operation SortValues Operation takes an expression and sorts the values of the input expression result.\nExpression:\nsort_values(Expression, \u0026lt;limit\u0026gt;, \u0026lt;order\u0026gt;)  limit is the number of the sort results, should be a positive integer, if not specified, will return all results. Optional. order is the order of the sort results. The value of order can be asc or des.  For example: If we want to sort the service_resp_time metric values in descending order and get the top 10 values, we can use the following expression:\nsort_values(service_resp_time, 10, des) Result Type The result type follows the input expression.\nSortLabelValues Operation SortLabelValues Operation takes an expression and sorts the label values of the input expression result. This function uses natural sort order.\nExpression:\nsort_label_values(Expression, \u0026lt;order\u0026gt;, \u0026lt;label1_name\u0026gt;, \u0026lt;label2_name\u0026gt; ...)  order is the order of the sort results. The value of order can be asc or des. \u0026lt;label1_name\u0026gt;, \u0026lt;label2_name\u0026gt; ... is the label names that need to be sorted by their values. At least one label name should be specified. The labels in the head of the list will be sorted first, and if the label not be included in the expression result will be ignored.  For example: If we want to sort the service_percentile metric label values in descending order by the p label, we can use the following expression:\nsort_label_values(service_percentile{p=\u0026#39;50,75,90,95,99\u0026#39;}, des, p) For multiple labels, assume the metric has 2 labels:\nmetric{label1=\u0026#39;a\u0026#39;, label2=\u0026#39;2a\u0026#39;} metric{label1=\u0026#39;a\u0026#39;, label2=\u0026#39;2c\u0026#39;} metric{label1=\u0026#39;b\u0026#39;, label2=\u0026#39;2a\u0026#39;} metric{label1=\u0026#39;b\u0026#39;, label2=\u0026#39;2c\u0026#39;} If we want to sort the metric metric label values in descending order by the label1 and label2 labels, we can use the following expression:\nsort_label_values(metric, des, label1, label2) And the result will be:\nmetric{label1=\u0026#39;b\u0026#39;, label2=\u0026#39;2c\u0026#39;} metric{label1=\u0026#39;b\u0026#39;, label2=\u0026#39;2a\u0026#39;} metric{label1=\u0026#39;a\u0026#39;, label2=\u0026#39;2c\u0026#39;} metric{label1=\u0026#39;a\u0026#39;, label2=\u0026#39;2a\u0026#39;} Expression Query Example Labeled Value Metrics service_percentile{p=\u0026#39;50,95\u0026#39;} The example result is:\n{ \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;p\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;50\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1000\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 2000, \u0026#34;traceID\u0026#34;: null}] }, { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;p\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;75\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2000\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 3000, \u0026#34;traceID\u0026#34;: null}] } ] } } } If we want to transform the percentile value unit from ms to s the expression is:\nservice_percentile{p=\u0026#39;50,75\u0026#39;} / 1000 { \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;p\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;50\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 2, \u0026#34;traceID\u0026#34;: null}] }, { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;p\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;75\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 3, \u0026#34;traceID\u0026#34;: null}] } ] } } } Get the average value of each percentile, the expression is:\navg(service_percentile{p=\u0026#39;50,75\u0026#39;}) { \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;SINGLE_VALUE\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;p\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;50\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: null, \u0026#34;value\u0026#34;: \u0026#34;1500\u0026#34;, \u0026#34;traceID\u0026#34;: null}] }, { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;p\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;75\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: null, \u0026#34;value\u0026#34;: \u0026#34;2500\u0026#34;, \u0026#34;traceID\u0026#34;: null}] } ] } } } Calculate the difference between the percentile and the average value, the expression is:\nservice_percentile{p=\u0026#39;50,75\u0026#39;} - avg(service_percentile{p=\u0026#39;50,75\u0026#39;}) { \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;p\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;50\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;-500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 500, \u0026#34;traceID\u0026#34;: null}] }, { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;p\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;75\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;-500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 500, \u0026#34;traceID\u0026#34;: null}] } ] } } } Calculate the difference between the service_resp_time and the service_percentile, if the service_resp_time result is:\n{ \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 3500, \u0026#34;traceID\u0026#34;: null}] } ] } } } The expression is:\nservice_resp_time - service_percentile{p=\u0026#39;50,75\u0026#39;} { \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;p\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;50\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1500\u0026#34;, \u0026#34;traceID\u0026#34;: null}] }, { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;p\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;75\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;500\u0026#34;, \u0026#34;traceID\u0026#34;: null}] } ] } } } ","title":"Metrics Query Expression(MQE) Syntax","url":"/docs/main/next/en/api/metrics-query-expression/"},{"content":"Metrics Query Expression(MQE) Syntax MQE is a string that consists of one or more expressions. Each expression could be a combination of one or more operations. The expression allows users to do simple query-stage calculation through V3 APIs.\nExpression = \u0026lt;Operation\u0026gt; Expression1 \u0026lt;Operation\u0026gt; Expression2 \u0026lt;Operation\u0026gt; Expression3 ... The following document lists the operations supported by MQE.\nMetrics Expression Metrics Expression will return a collection of time-series values.\nCommon Value Metrics Expression:\n\u0026lt;metric_name\u0026gt; For example: If we want to query the service_sla metric, we can use the following expression:\nservice_sla Result Type The ExpressionResultType of the expression is TIME_SERIES_VALUES.\nLabeled Value Metrics Expression:\n\u0026lt;metric_name\u0026gt;{label=\u0026#39;\u0026lt;label_1\u0026gt;,...\u0026#39;} label is the selected label of the metric. If label is not specified, all label values of the metric will be selected.\nFor example: If we want to query the service_percentile metric with the labels 0,1,2,3,4, we can use the following expression:\nservice_percentile{label=\u0026#39;0,1,2,3,4\u0026#39;} If we want to rename the labels to P50,P75,P90,P95,P99, see Relabel Operation.\nResult Type The ExpressionResultType of the expression is TIME_SERIES_VALUES and with labels.\nBinary Operation Binary Operation is an operation that takes two expressions and performs a calculation on their results. The following table lists the binary operations supported by MQE.\nExpression:\nExpression1 \u0026lt;Binary-Operator\u0026gt; Expression2    Operator Definition     + addition   - subtraction   * multiplication   / division   % modulo    For example: If we want to transform the service_sla metric value to percent, we can use the following expression:\nservice_sla / 100 Result Type The result type of the expression please refer to the following table.\nBinary Operation Rules The following table listed if the difference result types of the input expressions could do this operation and the result type after the operation. The expression could on the left or right side of the operator. Note: If the expressions on both sides of the operator are the TIME_SERIES_VALUES with labels, they should have the same labels for calculation.\n   Expression Expression Yes/No ExpressionResultType     SINGLE_VALUE SINGLE_VALUE Yes SINGLE_VALUE   SINGLE_VALUE TIME_SERIES_VALUES Yes TIME_SERIES_VALUES   SINGLE_VALUE SORTED_LIST/RECORD_LIST Yes SORTED_LIST/RECORD_LIST   TIME_SERIES_VALUES TIME_SERIES_VALUES Yes TIME_SERIES_VALUES   TIME_SERIES_VALUES SORTED_LIST/RECORD_LIST no    SORTED_LIST/RECORD_LIST SORTED_LIST/RECORD_LIST no     Aggregation Operation Aggregation Operation takes an expression and performs aggregate calculation on its results.\nExpression:\n\u0026lt;Aggregation-Operator\u0026gt;(Expression)    Operator Definition ExpressionResultType     avg average the result SINGLE_VALUE   count count number of the result SINGLE_VALUE   latest select the latest non-null value from the result SINGLE_VALUE   sum sum the result SINGLE_VALUE   max select maximum from the result SINGLE_VALUE   min select minimum from the result SINGLE_VALUE    For example: If we want to query the average value of the service_cpm metric, we can use the following expression:\navg(service_cpm) Result Type The different operator could impact the ExpressionResultType, please refer to the above table.\nFunction Operation Function Operation takes an expression and performs function calculation on its results.\nExpression:\n\u0026lt;Function-Operator\u0026gt;(Expression, parameters)    Operator Definition parameters ExpressionResultType     abs returns the absolute value of the result  follow the input expression   ceil returns the smallest integer value that is greater or equal to the result  follow the input expression   floor returns the largest integer value that is greater or equal to the result  follow the input expression   round returns result round to specific decimal places places: a positive integer specific decimal places of the result follow the input expression    For example: If we want to query the average value of the service_cpm metric in seconds, and round the result to 2 decimal places, we can use the following expression:\nround(service_cpm / 60 , 2) Result Type The different operator could impact the ExpressionResultType, please refer to the above table.\nTopN Operation TopN Operation takes an expression and performs TopN calculation on its results.\nExpression:\ntop_n(\u0026lt;metric_name\u0026gt;, \u0026lt;top_number\u0026gt;, \u0026lt;order\u0026gt;) top_number is the number of the top results, should be a positive integer.\norder is the order of the top results. The value of order can be asc or des.\nFor example: If we want to query the top 10 services with the highest service_cpm metric value, we can use the following expression:\ntop_n(service_instance_cpm, 10, des) Result Type According to the type of the metric, the ExpressionResultType of the expression will be SORTED_LIST or RECORD_LIST.\nRelabel Operation Relabel Operation takes an expression and replace the labels to new labels on its results.\nExpression:\nrelabel(Expression, label=\u0026#39;\u0026lt;new_label_1\u0026gt;,...\u0026#39;) label is the new labels of the metric after the label is relabeled, the order of the new labels should be the same as the order of the labels in the input expression result.\nFor example: If we want to query the service_percentile metric with the labels 0,1,2,3,4, and rename the labels to P50,P75,P90,P95,P99, we can use the following expression:\nrelabel(service_percentile{label=\u0026#39;0,1,2,3,4\u0026#39;}, label=\u0026#39;P50,P75,P90,P95,P99\u0026#39;) Result Type Follow the input expression.\n","title":"Metrics Query Expression(MQE) Syntax","url":"/docs/main/v9.5.0/en/api/metrics-query-expression/"},{"content":"Metrics Query Expression(MQE) Syntax MQE is a string that consists of one or more expressions. Each expression could be a combination of one or more operations. The expression allows users to do simple query-stage calculation through V3 APIs.\nExpression = \u0026lt;Operation\u0026gt; Expression1 \u0026lt;Operation\u0026gt; Expression2 \u0026lt;Operation\u0026gt; Expression3 ... The following document lists the operations supported by MQE.\nMetrics Expression Metrics Expression will return a collection of time-series values.\nCommon Value Metrics Expression:\n\u0026lt;metric_name\u0026gt; For example: If we want to query the service_sla metric, we can use the following expression:\nservice_sla Result Type The ExpressionResultType of the expression is TIME_SERIES_VALUES.\nLabeled Value Metrics For now, we only have a single anonymous label with multi label values in a labeled metric. To be able to use it in expressions, define _ as the anonymous label name (key).\nExpression:\n\u0026lt;metric_name\u0026gt;{_=\u0026#39;\u0026lt;label_value_1\u0026gt;,...\u0026#39;} {_='\u0026lt;label_value_1\u0026gt;,...'} is the selected label value of the metric. If is not specified, all label values of the metric will be selected.\nFor example: If we want to query the service_percentile metric with the label values 0,1,2,3,4, we can use the following expression:\nservice_percentile{_=\u0026#39;0,1,2,3,4\u0026#39;} If we want to rename the label values to P50,P75,P90,P95,P99, see Relabel Operation.\nResult Type The ExpressionResultType of the expression is TIME_SERIES_VALUES and with labels.\nBinary Operation The Binary Operation is an operation that takes two expressions and performs a calculation on their results. The following table lists the binary operations supported by MQE.\nExpression:\nExpression1 \u0026lt;Binary-Operator\u0026gt; Expression2    Operator Definition     + addition   - subtraction   * multiplication   / division   % modulo    For example: If we want to transform the service_sla metric value to percent, we can use the following expression:\nservice_sla / 100 Result Type For the result type of the expression, please refer to the following table.\nBinary Operation Rules The following table lists if the different result types of the input expressions could do this operation and the result type after the operation. The expression could be on the left or right side of the operator. Note: If the expressions on both sides of the operator are the TIME_SERIES_VALUES with labels, they should have the same labels for calculation.\n   Expression Expression Yes/No ExpressionResultType     SINGLE_VALUE SINGLE_VALUE Yes SINGLE_VALUE   SINGLE_VALUE TIME_SERIES_VALUES Yes TIME_SERIES_VALUES   SINGLE_VALUE SORTED_LIST/RECORD_LIST Yes SORTED_LIST/RECORD_LIST   TIME_SERIES_VALUES TIME_SERIES_VALUES Yes TIME_SERIES_VALUES   TIME_SERIES_VALUES SORTED_LIST/RECORD_LIST no    SORTED_LIST/RECORD_LIST SORTED_LIST/RECORD_LIST no     Compare Operation Compare Operation takes two expressions and compares their results. The following table lists the compare operations supported by MQE.\nExpression:\nExpression1 \u0026lt;Compare-Operator\u0026gt; Expression2    Operator Definition     \u0026gt; greater than   \u0026gt;= greater than or equal   \u0026lt; less than   \u0026lt;= less than or equal   == equal   != not equal    The result of the compare operation is an int value:\n 1: true 0: false  For example: Compare the service_resp_time metric value if greater than 3000, if the service_resp_time result is:\n{ \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 3500, \u0026#34;traceID\u0026#34;: null}] } ] } } } we can use the following expression:\nservice_resp_time \u0026gt; 3000 and get result:\n{ \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;0\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 1, \u0026#34;traceID\u0026#34;: null}] } ] } } } Compare Operation Rules and Result Type Same as the Binary Operation Rules.\nAggregation Operation Aggregation Operation takes an expression and performs aggregate calculations on its results.\nExpression:\n\u0026lt;Aggregation-Operator\u0026gt;(Expression)    Operator Definition ExpressionResultType     avg average the result SINGLE_VALUE   count count number of the result SINGLE_VALUE   latest select the latest non-null value from the result SINGLE_VALUE   sum sum the result SINGLE_VALUE   max select maximum from the result SINGLE_VALUE   min select minimum from the result SINGLE_VALUE    For example: If we want to query the average value of the service_cpm metric, we can use the following expression:\navg(service_cpm) Result Type The different operators could impact the ExpressionResultType, please refer to the above table.\nMathematical Operation Mathematical Operation takes an expression and performs mathematical calculations on its results.\nExpression:\n\u0026lt;Mathematical-Operator\u0026gt;(Expression, parameters)    Operator Definition parameters ExpressionResultType     abs returns the absolute value of the result  follow the input expression   ceil returns the smallest integer value that is greater or equal to the result  follow the input expression   floor returns the largest integer value that is greater or equal to the result  follow the input expression   round returns result round to specific decimal places places: a positive integer specific decimal places of the result follow the input expression    For example: If we want to query the average value of the service_cpm metric in seconds, and round the result to 2 decimal places, we can use the following expression:\nround(service_cpm / 60 , 2) Result Type The different operators could impact the ExpressionResultType, please refer to the above table.\nTopN Operation TopN Operation takes an expression and performs TopN calculation on its results.\nExpression:\ntop_n(\u0026lt;metric_name\u0026gt;, \u0026lt;top_number\u0026gt;, \u0026lt;order\u0026gt;) top_number is the number of the top results, should be a positive integer.\norder is the order of the top results. The value of order can be asc or des.\nFor example: If we want to query the top 10 services with the highest service_cpm metric value, we can use the following expression:\ntop_n(service_instance_cpm, 10, des) Result Type According to the type of the metric, the ExpressionResultType of the expression will be SORTED_LIST or RECORD_LIST.\nRelabel Operation Relabel Operation takes an expression and replaces the label values with new label values on its results.\nExpression:\nrelabel(Expression, _=\u0026#39;\u0026lt;new_label_value_1\u0026gt;,...\u0026#39;) _ is the new label of the metric after the label is relabeled, the order of the new label values should be the same as the order of the label values in the input expression result.\nFor example: If we want to query the service_percentile metric with the label values 0,1,2,3,4, and rename the label values to P50,P75,P90,P95,P99, we can use the following expression:\nrelabel(service_percentile{_=\u0026#39;0,1,2,3,4\u0026#39;}, _=\u0026#39;P50,P75,P90,P95,P99\u0026#39;) Result Type Follow the input expression.\nAggregateLabels Operation AggregateLabels Operation takes an expression and performs an aggregate calculation on its Labeled Value Metrics results. It aggregates a group of TIME_SERIES_VALUES into a single TIME_SERIES_VALUES.\nExpression:\naggregate_labels(Expression, parameter)    parameter Definition ExpressionResultType     avg calculate avg value of a Labeled Value Metrics TIME_SERIES_VALUES   sum calculate sum value of a Labeled Value Metrics TIME_SERIES_VALUES   max select the maximum value from a Labeled Value Metrics TIME_SERIES_VALUES   min select the minimum value from a Labeled Value Metrics TIME_SERIES_VALUES    For example: If we want to query all Redis command total rates, we can use the following expression(total_commands_rate is a metric which recorded every command rate in labeled value):\naggregate_labels(total_commands_rate, SUM) Result Type The ExpressionResultType of the aggregateLabels operation is TIME_SERIES_VALUES.\nLogical Operation ViewAsSequence Operation ViewAsSequence operation represents the first not-null metric from the listing metrics in the given prioritized sequence(left to right). It could also be considered as a short-circuit of given metrics for the first value existing metric.\nExpression:\nview_as_seq([\u0026lt;expression_1\u0026gt;, \u0026lt;expression_2\u0026gt;, ...]) For example: if the first expression value is empty but the second one is not empty, it would return the result from the second expression. The following example would return the content of the service_cpm metric.\nview_as_seq(not_existing, service_cpm) Result Type The result type is determined by the type of selected not-null metric expression.\nExpression Query Example Labeled Value Metrics service_percentile{_=\u0026#39;0,1\u0026#39;} The example result is:\n{ \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;0\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1000\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 2000, \u0026#34;traceID\u0026#34;: null}] }, { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2000\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 3000, \u0026#34;traceID\u0026#34;: null}] } ] } } } If we want to transform the percentile value unit from ms to s the expression is:\nservice_percentile{_=\u0026#39;0,1\u0026#39;} / 1000 { \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;0\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 2, \u0026#34;traceID\u0026#34;: null}] }, { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 3, \u0026#34;traceID\u0026#34;: null}] } ] } } } Get the average value of each percentile, the expression is:\navg(service_percentile{_=\u0026#39;0,1\u0026#39;}) { \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;SINGLE_VALUE\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;0\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: null, \u0026#34;value\u0026#34;: \u0026#34;1500\u0026#34;, \u0026#34;traceID\u0026#34;: null}] }, { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: null, \u0026#34;value\u0026#34;: \u0026#34;2500\u0026#34;, \u0026#34;traceID\u0026#34;: null}] } ] } } } Calculate the difference between the percentile and the average value, the expression is:\nservice_percentile{_=\u0026#39;0,1\u0026#39;} - avg(service_percentile{_=\u0026#39;0,1\u0026#39;}) { \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;0\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;-500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 500, \u0026#34;traceID\u0026#34;: null}] }, { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;-500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 500, \u0026#34;traceID\u0026#34;: null}] } ] } } } Calculate the difference between the service_resp_time and the service_percentile, if the service_resp_time result is:\n{ \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 3500, \u0026#34;traceID\u0026#34;: null}] } ] } } } The expression is:\nservice_resp_time - service_percentile{_=\u0026#39;0,1\u0026#39;} { \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;0\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1500\u0026#34;, \u0026#34;traceID\u0026#34;: null}] }, { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;500\u0026#34;, \u0026#34;traceID\u0026#34;: null}] } ] } } } ","title":"Metrics Query Expression(MQE) Syntax","url":"/docs/main/v9.6.0/en/api/metrics-query-expression/"},{"content":"Metrics Query Expression(MQE) Syntax MQE is a string that consists of one or more expressions. Each expression could be a combination of one or more operations. The expression allows users to do simple query-stage calculation through V3 APIs.\nExpression = \u0026lt;Operation\u0026gt; Expression1 \u0026lt;Operation\u0026gt; Expression2 \u0026lt;Operation\u0026gt; Expression3 ... The following document lists the operations supported by MQE.\nMetrics Expression Metrics Expression will return a collection of time-series values.\nCommon Value Metrics Expression:\n\u0026lt;metric_name\u0026gt; For example: If we want to query the service_sla metric, we can use the following expression:\nservice_sla Result Type The ExpressionResultType of the expression is TIME_SERIES_VALUES.\nLabeled Value Metrics For now, we only have a single anonymous label with multi label values in a labeled metric. To be able to use it in expressions, define _ as the anonymous label name (key).\nExpression:\n\u0026lt;metric_name\u0026gt;{_=\u0026#39;\u0026lt;label_value_1\u0026gt;,...\u0026#39;} {_='\u0026lt;label_value_1\u0026gt;,...'} is the selected label value of the metric. If is not specified, all label values of the metric will be selected.\nFor example: If we want to query the service_percentile metric with the label values 0,1,2,3,4, we can use the following expression:\nservice_percentile{_=\u0026#39;0,1,2,3,4\u0026#39;} If we want to rename the label values to P50,P75,P90,P95,P99, see Relabel Operation.\nResult Type The ExpressionResultType of the expression is TIME_SERIES_VALUES and with labels.\nBinary Operation The Binary Operation is an operation that takes two expressions and performs a calculation on their results. The following table lists the binary operations supported by MQE.\nExpression:\nExpression1 \u0026lt;Binary-Operator\u0026gt; Expression2    Operator Definition     + addition   - subtraction   * multiplication   / division   % modulo    For example: If we want to transform the service_sla metric value to percent, we can use the following expression:\nservice_sla / 100 Result Type For the result type of the expression, please refer to the following table.\nBinary Operation Rules The following table lists if the different result types of the input expressions could do this operation and the result type after the operation. The expression could be on the left or right side of the operator. Note: If the expressions on both sides of the operator are the TIME_SERIES_VALUES with labels, they should have the same labels for calculation.\n   Expression Expression Yes/No ExpressionResultType     SINGLE_VALUE SINGLE_VALUE Yes SINGLE_VALUE   SINGLE_VALUE TIME_SERIES_VALUES Yes TIME_SERIES_VALUES   SINGLE_VALUE SORTED_LIST/RECORD_LIST Yes SORTED_LIST/RECORD_LIST   TIME_SERIES_VALUES TIME_SERIES_VALUES Yes TIME_SERIES_VALUES   TIME_SERIES_VALUES SORTED_LIST/RECORD_LIST no    SORTED_LIST/RECORD_LIST SORTED_LIST/RECORD_LIST no     Compare Operation Compare Operation takes two expressions and compares their results. The following table lists the compare operations supported by MQE.\nExpression:\nExpression1 \u0026lt;Compare-Operator\u0026gt; Expression2    Operator Definition     \u0026gt; greater than   \u0026gt;= greater than or equal   \u0026lt; less than   \u0026lt;= less than or equal   == equal   != not equal    The result of the compare operation is an int value:\n 1: true 0: false  For example: Compare the service_resp_time metric value if greater than 3000, if the service_resp_time result is:\n{ \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 3500, \u0026#34;traceID\u0026#34;: null}] } ] } } } we can use the following expression:\nservice_resp_time \u0026gt; 3000 and get result:\n{ \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;0\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 1, \u0026#34;traceID\u0026#34;: null}] } ] } } } Compare Operation Rules and Result Type Same as the Binary Operation Rules.\nAggregation Operation Aggregation Operation takes an expression and performs aggregate calculations on its results.\nExpression:\n\u0026lt;Aggregation-Operator\u0026gt;(Expression)    Operator Definition ExpressionResultType     avg average the result SINGLE_VALUE   count count number of the result SINGLE_VALUE   latest select the latest non-null value from the result SINGLE_VALUE   sum sum the result SINGLE_VALUE   max select maximum from the result SINGLE_VALUE   min select minimum from the result SINGLE_VALUE    For example: If we want to query the average value of the service_cpm metric, we can use the following expression:\navg(service_cpm) Result Type The different operators could impact the ExpressionResultType, please refer to the above table.\nMathematical Operation Mathematical Operation takes an expression and performs mathematical calculations on its results.\nExpression:\n\u0026lt;Mathematical-Operator\u0026gt;(Expression, parameters)    Operator Definition parameters ExpressionResultType     abs returns the absolute value of the result  follow the input expression   ceil returns the smallest integer value that is greater or equal to the result  follow the input expression   floor returns the largest integer value that is greater or equal to the result  follow the input expression   round returns result round to specific decimal places places: a positive integer specific decimal places of the result follow the input expression    For example: If we want to query the average value of the service_cpm metric in seconds, and round the result to 2 decimal places, we can use the following expression:\nround(service_cpm / 60 , 2) Result Type The different operators could impact the ExpressionResultType, please refer to the above table.\nTopN Operation TopN Operation takes an expression and performs TopN calculation on its results.\nExpression:\ntop_n(\u0026lt;metric_name\u0026gt;, \u0026lt;top_number\u0026gt;, \u0026lt;order\u0026gt;) top_number is the number of the top results, should be a positive integer.\norder is the order of the top results. The value of order can be asc or des.\nFor example: If we want to query the top 10 services with the highest service_cpm metric value, we can use the following expression:\ntop_n(service_instance_cpm, 10, des) Result Type According to the type of the metric, the ExpressionResultType of the expression will be SORTED_LIST or RECORD_LIST.\nRelabel Operation Relabel Operation takes an expression and replaces the label values with new label values on its results.\nExpression:\nrelabel(Expression, _=\u0026#39;\u0026lt;new_label_value_1\u0026gt;,...\u0026#39;) _ is the new label of the metric after the label is relabeled, the order of the new label values should be the same as the order of the label values in the input expression result.\nFor example: If we want to query the service_percentile metric with the label values 0,1,2,3,4, and rename the label values to P50,P75,P90,P95,P99, we can use the following expression:\nrelabel(service_percentile{_=\u0026#39;0,1,2,3,4\u0026#39;}, _=\u0026#39;P50,P75,P90,P95,P99\u0026#39;) Result Type Follow the input expression.\nAggregateLabels Operation AggregateLabels Operation takes an expression and performs an aggregate calculation on its Labeled Value Metrics results. It aggregates a group of TIME_SERIES_VALUES into a single TIME_SERIES_VALUES.\nExpression:\naggregate_labels(Expression, parameter)    parameter Definition ExpressionResultType     avg calculate avg value of a Labeled Value Metrics TIME_SERIES_VALUES   sum calculate sum value of a Labeled Value Metrics TIME_SERIES_VALUES   max select the maximum value from a Labeled Value Metrics TIME_SERIES_VALUES   min select the minimum value from a Labeled Value Metrics TIME_SERIES_VALUES    For example: If we want to query all Redis command total rates, we can use the following expression(total_commands_rate is a metric which recorded every command rate in labeled value):\naggregate_labels(total_commands_rate, SUM) Result Type The ExpressionResultType of the aggregateLabels operation is TIME_SERIES_VALUES.\nLogical Operation ViewAsSequence Operation ViewAsSequence operation represents the first not-null metric from the listing metrics in the given prioritized sequence(left to right). It could also be considered as a short-circuit of given metrics for the first value existing metric.\nExpression:\nview_as_seq([\u0026lt;expression_1\u0026gt;, \u0026lt;expression_2\u0026gt;, ...]) For example: if the first expression value is empty but the second one is not empty, it would return the result from the second expression. The following example would return the content of the service_cpm metric.\nview_as_seq(not_existing, service_cpm) Result Type The result type is determined by the type of selected not-null metric expression.\nTrend Operation Trend Operation takes an expression and performs a trend calculation on its results.\nExpression:\n\u0026lt;Trend-Operator\u0026gt;(Metrics Expression, time_range) time_range is the positive int of the calculated range. The unit will automatically align with to the query Step, for example, if the query Step is MINUTE, the unit of time_range is minute.\n   Operator Definition ExpressionResultType     increase returns the increase in the time range in the time series TIME_SERIES_VALUES   rate returns the per-second average rate of increase in the time range in the time series TIME_SERIES_VALUES    For example: If we want to query the increase value of the service_cpm metric in 2 minute(assume the query Step is MINUTE), we can use the following expression:\nincrease(service_cpm, 2) If the query duration is 3 minutes, from (T1 to T3) and the metric has values in time series:\nV(T1-2), V(T1-1), V(T1), V(T2), V(T3) then the expression result is:\nV(T1)-V(T1-2), V(T2)-V(T1-1), V(T3)-V(T1) Note:\n If the calculated metric value is empty, the result will be empty. Assume in the T3 point, the increase value = V(T3)-V(T1), If the metric V(T3) or V(T1) is empty, the result value in T3 will be empty.  Result Type TIME_SERIES_VALUES.\nExpression Query Example Labeled Value Metrics service_percentile{_=\u0026#39;0,1\u0026#39;} The example result is:\n{ \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;0\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1000\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 2000, \u0026#34;traceID\u0026#34;: null}] }, { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2000\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 3000, \u0026#34;traceID\u0026#34;: null}] } ] } } } If we want to transform the percentile value unit from ms to s the expression is:\nservice_percentile{_=\u0026#39;0,1\u0026#39;} / 1000 { \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;0\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 2, \u0026#34;traceID\u0026#34;: null}] }, { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 3, \u0026#34;traceID\u0026#34;: null}] } ] } } } Get the average value of each percentile, the expression is:\navg(service_percentile{_=\u0026#39;0,1\u0026#39;}) { \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;SINGLE_VALUE\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;0\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: null, \u0026#34;value\u0026#34;: \u0026#34;1500\u0026#34;, \u0026#34;traceID\u0026#34;: null}] }, { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: null, \u0026#34;value\u0026#34;: \u0026#34;2500\u0026#34;, \u0026#34;traceID\u0026#34;: null}] } ] } } } Calculate the difference between the percentile and the average value, the expression is:\nservice_percentile{_=\u0026#39;0,1\u0026#39;} - avg(service_percentile{_=\u0026#39;0,1\u0026#39;}) { \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;0\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;-500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 500, \u0026#34;traceID\u0026#34;: null}] }, { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;-500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 500, \u0026#34;traceID\u0026#34;: null}] } ] } } } Calculate the difference between the service_resp_time and the service_percentile, if the service_resp_time result is:\n{ \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;2500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: 3500, \u0026#34;traceID\u0026#34;: null}] } ] } } } The expression is:\nservice_resp_time - service_percentile{_=\u0026#39;0,1\u0026#39;} { \u0026#34;data\u0026#34;: { \u0026#34;execExpression\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;TIME_SERIES_VALUES\u0026#34;, \u0026#34;error\u0026#34;: null, \u0026#34;results\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;0\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1500\u0026#34;, \u0026#34;traceID\u0026#34;: null}] }, { \u0026#34;metric\u0026#34;: { \u0026#34;labels\u0026#34;: [{\u0026#34;key\u0026#34;: \u0026#34;_\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;1\u0026#34;}] }, \u0026#34;values\u0026#34;: [{\u0026#34;id\u0026#34;: \u0026#34;1691658000000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;500\u0026#34;, \u0026#34;traceID\u0026#34;: null}, {\u0026#34;id\u0026#34;: \u0026#34;1691661600000\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;500\u0026#34;, \u0026#34;traceID\u0026#34;: null}] } ] } } } ","title":"Metrics Query Expression(MQE) Syntax","url":"/docs/main/v9.7.0/en/api/metrics-query-expression/"},{"content":"MicroMeter Observations setup Micrometer Observation is part of the Micrometer project and contains the Observation API. SkyWalking integrates its MicroMeter 1.10 APIs so that it can send metrics to the Skywalking Meter System.\nFollow Java agent Observations docs to set up agent in the Spring first.\nSet up backend receiver  Make sure to enable meter receiver in application.yml.  receiver-meter:selector:${SW_RECEIVER_METER:default}default: Configure the meter config file. It already has the spring sleuth meter config. If you have a customized meter at the agent side, please configure the meter using the steps set out in the meter document.\n  Enable Spring sleuth config in application.yml.\n  agent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:meterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:spring-micrometer}Dashboard configuration SkyWalking provides the Spring Sleuth dashboard by default under the general service instance, which contains the metrics provided by Spring Sleuth by default. Once you have added customized metrics in the application and configuration the meter config file in the backend. Please following the customized dashboard documentation to add the metrics in the dashboard.\nSupported meter Three types of information are supported: Application, System, and JVM.\n Application: HTTP request count and duration, JDBC max/idle/active connection count, and Tomcat session active/reject count. System: CPU system/process usage, OS system load, and OS process file count. JVM: GC pause count and duration, memory max/used/committed size, thread peak/live/daemon count, and classes loaded/unloaded count.  ","title":"MicroMeter Observations setup","url":"/docs/main/latest/en/setup/backend/micrometer-observations/"},{"content":"MicroMeter Observations setup Micrometer Observation is part of the Micrometer project and contains the Observation API. SkyWalking integrates its MicroMeter 1.10 APIs so that it can send metrics to the SkyWalking Meter System.\nFollow Java agent Observations docs to set up agent in the Spring first.\nSet up backend receiver  Make sure to enable meter receiver in application.yml.  receiver-meter:selector:${SW_RECEIVER_METER:default}default: Configure the meter config file. It already has the spring sleuth meter config. If you have a customized meter at the agent side, please configure the meter using the steps set out in the meter document.\n  Enable Spring sleuth config in application.yml.\n  agent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:meterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:spring-micrometer}Dashboard configuration SkyWalking provides the Spring Sleuth dashboard by default under the general service instance, which contains the metrics provided by Spring Sleuth by default. Once you have added customized metrics in the application and configuration the meter config file in the backend. Please following the customized dashboard documentation to add the metrics in the dashboard.\nSupported meter Three types of information are supported: Application, System, and JVM.\n Application: HTTP request count and duration, JDBC max/idle/active connection count, and Tomcat session active/reject count. System: CPU system/process usage, OS system load, and OS process file count. JVM: GC pause count and duration, memory max/used/committed size, thread peak/live/daemon count, and classes loaded/unloaded count.  ","title":"MicroMeter Observations setup","url":"/docs/main/next/en/setup/backend/micrometer-observations/"},{"content":"MicroMeter Observations setup Micrometer Observation is part of the Micrometer project and contains the Observation API. SkyWalking integrates its MicroMeter 1.10 APIs so that it can send metrics to the Skywalking Meter System.\nFollow Java agent Observations docs to set up agent in the Spring first.\nSet up backend receiver  Make sure to enable meter receiver in application.yml.  receiver-meter:selector:${SW_RECEIVER_METER:default}default: Configure the meter config file. It already has the spring sleuth meter config. If you have a customized meter at the agent side, please configure the meter using the steps set out in the meter document.\n  Enable Spring sleuth config in application.yml.\n  agent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:meterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:spring-micrometer}Dashboard configuration SkyWalking provides the Spring Sleuth dashboard by default under the general service instance, which contains the metrics provided by Spring Sleuth by default. Once you have added customized metrics in the application and configuration the meter config file in the backend. Please following the customized dashboard documentation to add the metrics in the dashboard.\nSupported meter Three types of information are supported: Application, System, and JVM.\n Application: HTTP request count and duration, JDBC max/idle/active connection count, and Tomcat session active/reject count. System: CPU system/process usage, OS system load, and OS process file count. JVM: GC pause count and duration, memory max/used/committed size, thread peak/live/daemon count, and classes loaded/unloaded count.  ","title":"MicroMeter Observations setup","url":"/docs/main/v9.4.0/en/setup/backend/micrometer-observations/"},{"content":"MicroMeter Observations setup Micrometer Observation is part of the Micrometer project and contains the Observation API. SkyWalking integrates its MicroMeter 1.10 APIs so that it can send metrics to the Skywalking Meter System.\nFollow Java agent Observations docs to set up agent in the Spring first.\nSet up backend receiver  Make sure to enable meter receiver in application.yml.  receiver-meter:selector:${SW_RECEIVER_METER:default}default: Configure the meter config file. It already has the spring sleuth meter config. If you have a customized meter at the agent side, please configure the meter using the steps set out in the meter document.\n  Enable Spring sleuth config in application.yml.\n  agent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:meterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:spring-micrometer}Dashboard configuration SkyWalking provides the Spring Sleuth dashboard by default under the general service instance, which contains the metrics provided by Spring Sleuth by default. Once you have added customized metrics in the application and configuration the meter config file in the backend. Please following the customized dashboard documentation to add the metrics in the dashboard.\nSupported meter Three types of information are supported: Application, System, and JVM.\n Application: HTTP request count and duration, JDBC max/idle/active connection count, and Tomcat session active/reject count. System: CPU system/process usage, OS system load, and OS process file count. JVM: GC pause count and duration, memory max/used/committed size, thread peak/live/daemon count, and classes loaded/unloaded count.  ","title":"MicroMeter Observations setup","url":"/docs/main/v9.5.0/en/setup/backend/micrometer-observations/"},{"content":"MicroMeter Observations setup Micrometer Observation is part of the Micrometer project and contains the Observation API. SkyWalking integrates its MicroMeter 1.10 APIs so that it can send metrics to the Skywalking Meter System.\nFollow Java agent Observations docs to set up agent in the Spring first.\nSet up backend receiver  Make sure to enable meter receiver in application.yml.  receiver-meter:selector:${SW_RECEIVER_METER:default}default: Configure the meter config file. It already has the spring sleuth meter config. If you have a customized meter at the agent side, please configure the meter using the steps set out in the meter document.\n  Enable Spring sleuth config in application.yml.\n  agent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:meterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:spring-micrometer}Dashboard configuration SkyWalking provides the Spring Sleuth dashboard by default under the general service instance, which contains the metrics provided by Spring Sleuth by default. Once you have added customized metrics in the application and configuration the meter config file in the backend. Please following the customized dashboard documentation to add the metrics in the dashboard.\nSupported meter Three types of information are supported: Application, System, and JVM.\n Application: HTTP request count and duration, JDBC max/idle/active connection count, and Tomcat session active/reject count. System: CPU system/process usage, OS system load, and OS process file count. JVM: GC pause count and duration, memory max/used/committed size, thread peak/live/daemon count, and classes loaded/unloaded count.  ","title":"MicroMeter Observations setup","url":"/docs/main/v9.6.0/en/setup/backend/micrometer-observations/"},{"content":"MicroMeter Observations setup Micrometer Observation is part of the Micrometer project and contains the Observation API. SkyWalking integrates its MicroMeter 1.10 APIs so that it can send metrics to the Skywalking Meter System.\nFollow Java agent Observations docs to set up agent in the Spring first.\nSet up backend receiver  Make sure to enable meter receiver in application.yml.  receiver-meter:selector:${SW_RECEIVER_METER:default}default: Configure the meter config file. It already has the spring sleuth meter config. If you have a customized meter at the agent side, please configure the meter using the steps set out in the meter document.\n  Enable Spring sleuth config in application.yml.\n  agent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:meterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:spring-micrometer}Dashboard configuration SkyWalking provides the Spring Sleuth dashboard by default under the general service instance, which contains the metrics provided by Spring Sleuth by default. Once you have added customized metrics in the application and configuration the meter config file in the backend. Please following the customized dashboard documentation to add the metrics in the dashboard.\nSupported meter Three types of information are supported: Application, System, and JVM.\n Application: HTTP request count and duration, JDBC max/idle/active connection count, and Tomcat session active/reject count. System: CPU system/process usage, OS system load, and OS process file count. JVM: GC pause count and duration, memory max/used/committed size, thread peak/live/daemon count, and classes loaded/unloaded count.  ","title":"MicroMeter Observations setup","url":"/docs/main/v9.7.0/en/setup/backend/micrometer-observations/"},{"content":"Mock data generator for testing In 9.1.0, SkyWalking adds a module to generate mock data for testing. You can use this module to generate mock data that will be sent to the storage.\nTo start the data generator, execute the script tools/data-generator/bin/start.sh.\nNote that SkyWalking doesn\u0026rsquo;t release a Docker image for this module, but you can still build it yourselves by running the commands:\n# build a Docker image for local use make docker.data-generator # or push to your registry export HUB=\u0026lt;your-registry\u0026gt; make push.docker.data-generator Currently the module can generate two kinds of SkyWalking data, segments and logs. For each type, there are some generators that can be used to fill the fields.\nGenerate mock data To generate mock data, POST a request to URL path /mock-data/segments/tasks (segments) or /mock-data/logs/tasks (logs) with a generator template:\ncurl -XPOST \u0026#39;http://localhost:12800/mock-data/segments/tasks?size=20\u0026#39; -H\u0026#39;Content-Type: application/json\u0026#39; -d \u0026#34;@segment-template.json\u0026#34; curl -XPOST \u0026#39;http://localhost:12800/mock-data/logs/tasks?size=20\u0026#39; -H\u0026#39;Content-Type: application/json\u0026#39; -d \u0026#34;@logs-template.json\u0026#34; There are two possible types of task to generate mock data, size and qps:\n size (/mock-data/segments/tasks?size=20): the task will generate total number of size segments/logs and then finish. qps (/mock-data/segments/tasks?qps=20): the task will generate qps segments/logs per second continuously, until the task is cancelled.  Refer to the segment template, the log template and the Generators for more details about how to compose a template.\nCancel a task When the task is acknowledged by the server it will return a task id that can be used to cancelled the task by sending a DELETE request to URL path /mock-data/logs/tasks with a parameter requestId (i.e. /mock-data/logs/tasks?requestId={request id returned in previous request}):\ncurl -XDELETE \u0026#39;http://localhost:12800/mock-data/segments/task?requestId=70d8a39e-b51e-49de-a6fc-43abf80482c1\u0026#39; curl -XDELETE \u0026#39;http://localhost:12800/mock-data/logs/task?requestId=70d8a39e-b51e-49de-a6fc-43abf80482c1\u0026#39; Cancel all tasks When needed, you can also send a DELETE request to path /mock-data/segments/tasks to cancel all segment tasks.\ncurl -XDELETE \u0026#39;http://localhost:12800/mock-data/segments/tasks curl -XDELETE \u0026#39;http://localhost:12800/mock-data/logs/tasks Generators uuid uuid generator leverages java.util.UUID to generate a string. You can use uuid generator to fill the traceId field of segments.\nchangingFrequency property can be used when you want to reuse a uuid for multiple times, for example, if you want a traceId to be reused by 5 segments, then setting changingFrequency to 5 would do the trick. By setting changingFrequency to 5, uuid generates 1 string, and uses it for 5 times, then re-generates a new uuid string and uses it for another 5 times.\n\u0026#34;traceId\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;uuid\u0026#34;, \u0026#34;changingFrequency\u0026#34;: \u0026#34;5\u0026#34; } randomString (String) length (int) length specifies the length of the random string to be generated, i.e. generatedString.length() == length is always true.\nprefix (String) prefix is always added to the random strings after they are generated, that means:\n generatedString.startsWith(prefix) is always true, and, generatedString.length() == length + prefix.length() is always true.  letters (boolean) Specifies whether the random string contains letters (i.e. a-zA-Z).\nnumbers (boolean) Specifies whether the random string contains numbers (i.e. 0-9).\ndomainSize (int) When generating random strings, you might just want some random strings and use them over and over again randomly, by setting domainSize, the generator generates domainSize random strings, and pick them randomly every time you need a string.\nrandomBool (boolean) This generator generates a Boolean value, true or false with a default possibility of 50%, while you can change the possibility below.\npossibility (double, [0, 1]) possibility is a double value \u0026gt;= 0 and \u0026lt;= 1, it\u0026rsquo;s 0.5 by default, meaning about half of the generated values are true.\nTo always return a fixed boolean value true, you can just set the possibility to 1, to always return a fixed boolean value false, you can set the possibility to 0\n\u0026#34;error\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomBool\u0026#34;, \u0026#34;possibility\u0026#34;: \u0026#34;0.9\u0026#34; }  90 percent of the generated values are true.\n randomInt (long) min (long) The minimum value of the random integers, meaning all generated values satisfy generatedInt \u0026gt;= min.\nmax (long) The maximum value of the random integers, meaning all generated values satisfy generatedInt \u0026lt; min.\ndomainSize (int) This is similar to randomString\u0026rsquo;s domainSize.\nrandomList (list / array) size (int) The list size of the generated list, i.e. generatedList.size() == size.\nitem (object) item is a template that will be use as a prototype to generate the list items, for example when generating a list of Tag, the item should be the prototype of Tag, which can be composed by the generators again.\n\u0026#34;tags\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomList\u0026#34;, \u0026#34;size\u0026#34;: 5, \u0026#34;item\u0026#34;: { \u0026#34;key\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomString\u0026#34;, \u0026#34;length\u0026#34;: \u0026#34;10\u0026#34;, \u0026#34;prefix\u0026#34;: \u0026#34;test_tag_\u0026#34;, \u0026#34;letters\u0026#34;: true, \u0026#34;numbers\u0026#34;: true, \u0026#34;domainSize\u0026#34;: 10 }, \u0026#34;value\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomString\u0026#34;, \u0026#34;length\u0026#34;: \u0026#34;10\u0026#34;, \u0026#34;prefix\u0026#34;: \u0026#34;test_value_\u0026#34;, \u0026#34;letters\u0026#34;: true, \u0026#34;numbers\u0026#34;: true } } } fixedString (string) This generator always returns a fixed value of string.\nsequence (long) sequence generator generates a sequence of monotonically increasing integers, with a configurable fluctuation.\nmin (long) The minimum value of the sequence.\nmax (long) The maximum value of the sequence.\nstep (long) The increasing step of this sequence, i.e. the next generated value == the previous value + step.\ndomainSize (int) This is similar to randomString\u0026rsquo;s domainSize.\nfluctuation (int) By default, sequence is strictly increasing numbers, but in some cases you might want the numbers to fluctuate slightly while they are increasing. Adding property fluctuation to the generator will add a random number \u0026gt;= -fluctuation, \u0026lt;= fluctuation to the sequence elements.\nFor example, min = 10, max = 15, step = 1 generates a sequence [10, 11, 12, 13, 14, 15], but adding fluctuation = 2 might generate a sequence [10, 12, 11, 14, 13, 15].\n","title":"Mock data generator for testing","url":"/docs/main/latest/en/setup/backend/backend-data-generator/"},{"content":"Mock data generator for testing In 9.1.0, SkyWalking adds a module to generate mock data for testing. You can use this module to generate mock data that will be sent to the storage.\nTo start the data generator, execute the script tools/data-generator/bin/start.sh.\nNote that SkyWalking doesn\u0026rsquo;t release a Docker image for this module, but you can still build it yourselves by running the commands:\n# build a Docker image for local use make docker.data-generator # or push to your registry export HUB=\u0026lt;your-registry\u0026gt; make push.docker.data-generator Currently the module can generate two kinds of SkyWalking data, segments and logs. For each type, there are some generators that can be used to fill the fields.\nGenerate mock data To generate mock data, POST a request to URL path /mock-data/segments/tasks (segments) or /mock-data/logs/tasks (logs) with a generator template:\ncurl -XPOST \u0026#39;http://localhost:12800/mock-data/segments/tasks?size=20\u0026#39; -H\u0026#39;Content-Type: application/json\u0026#39; -d \u0026#34;@segment-template.json\u0026#34; curl -XPOST \u0026#39;http://localhost:12800/mock-data/logs/tasks?size=20\u0026#39; -H\u0026#39;Content-Type: application/json\u0026#39; -d \u0026#34;@logs-template.json\u0026#34; There are two possible types of task to generate mock data, size and qps:\n size (/mock-data/segments/tasks?size=20): the task will generate total number of size segments/logs and then finish. qps (/mock-data/segments/tasks?qps=20): the task will generate qps segments/logs per second continuously, until the task is cancelled.  Refer to the segment template, the log template and the Generators for more details about how to compose a template.\nCancel a task When the task is acknowledged by the server it will return a task id that can be used to cancelled the task by sending a DELETE request to URL path /mock-data/logs/tasks with a parameter requestId (i.e. /mock-data/logs/tasks?requestId={request id returned in previous request}):\ncurl -XDELETE \u0026#39;http://localhost:12800/mock-data/segments/task?requestId=70d8a39e-b51e-49de-a6fc-43abf80482c1\u0026#39; curl -XDELETE \u0026#39;http://localhost:12800/mock-data/logs/task?requestId=70d8a39e-b51e-49de-a6fc-43abf80482c1\u0026#39; Cancel all tasks When needed, you can also send a DELETE request to path /mock-data/segments/tasks to cancel all segment tasks.\ncurl -XDELETE \u0026#39;http://localhost:12800/mock-data/segments/tasks curl -XDELETE \u0026#39;http://localhost:12800/mock-data/logs/tasks Generators uuid uuid generator leverages java.util.UUID to generate a string. You can use uuid generator to fill the traceId field of segments.\nchangingFrequency property can be used when you want to reuse a uuid for multiple times, for example, if you want a traceId to be reused by 5 segments, then setting changingFrequency to 5 would do the trick. By setting changingFrequency to 5, uuid generates 1 string, and uses it for 5 times, then re-generates a new uuid string and uses it for another 5 times.\n\u0026#34;traceId\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;uuid\u0026#34;, \u0026#34;changingFrequency\u0026#34;: \u0026#34;5\u0026#34; } randomString (String) length (int) length specifies the length of the random string to be generated, i.e. generatedString.length() == length is always true.\nprefix (String) prefix is always added to the random strings after they are generated, that means:\n generatedString.startsWith(prefix) is always true, and, generatedString.length() == length + prefix.length() is always true.  letters (boolean) Specifies whether the random string contains letters (i.e. a-zA-Z).\nnumbers (boolean) Specifies whether the random string contains numbers (i.e. 0-9).\ndomainSize (int) When generating random strings, you might just want some random strings and use them over and over again randomly, by setting domainSize, the generator generates domainSize random strings, and pick them randomly every time you need a string.\nrandomBool (boolean) This generator generates a Boolean value, true or false with a default possibility of 50%, while you can change the possibility below.\npossibility (double, [0, 1]) possibility is a double value \u0026gt;= 0 and \u0026lt;= 1, it\u0026rsquo;s 0.5 by default, meaning about half of the generated values are true.\nTo always return a fixed boolean value true, you can just set the possibility to 1, to always return a fixed boolean value false, you can set the possibility to 0\n\u0026#34;error\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomBool\u0026#34;, \u0026#34;possibility\u0026#34;: \u0026#34;0.9\u0026#34; }  90 percent of the generated values are true.\n randomInt (long) min (long) The minimum value of the random integers, meaning all generated values satisfy generatedInt \u0026gt;= min.\nmax (long) The maximum value of the random integers, meaning all generated values satisfy generatedInt \u0026lt; min.\ndomainSize (int) This is similar to randomString\u0026rsquo;s domainSize.\nrandomList (list / array) size (int) The list size of the generated list, i.e. generatedList.size() == size.\nitem (object) item is a template that will be use as a prototype to generate the list items, for example when generating a list of Tag, the item should be the prototype of Tag, which can be composed by the generators again.\n\u0026#34;tags\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomList\u0026#34;, \u0026#34;size\u0026#34;: 5, \u0026#34;item\u0026#34;: { \u0026#34;key\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomString\u0026#34;, \u0026#34;length\u0026#34;: \u0026#34;10\u0026#34;, \u0026#34;prefix\u0026#34;: \u0026#34;test_tag_\u0026#34;, \u0026#34;letters\u0026#34;: true, \u0026#34;numbers\u0026#34;: true, \u0026#34;domainSize\u0026#34;: 10 }, \u0026#34;value\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomString\u0026#34;, \u0026#34;length\u0026#34;: \u0026#34;10\u0026#34;, \u0026#34;prefix\u0026#34;: \u0026#34;test_value_\u0026#34;, \u0026#34;letters\u0026#34;: true, \u0026#34;numbers\u0026#34;: true } } } fixedString (string) This generator always returns a fixed value of string.\nsequence (long) sequence generator generates a sequence of monotonically increasing integers, with a configurable fluctuation.\nmin (long) The minimum value of the sequence.\nmax (long) The maximum value of the sequence.\nstep (long) The increasing step of this sequence, i.e. the next generated value == the previous value + step.\ndomainSize (int) This is similar to randomString\u0026rsquo;s domainSize.\nfluctuation (int) By default, sequence is strictly increasing numbers, but in some cases you might want the numbers to fluctuate slightly while they are increasing. Adding property fluctuation to the generator will add a random number \u0026gt;= -fluctuation, \u0026lt;= fluctuation to the sequence elements.\nFor example, min = 10, max = 15, step = 1 generates a sequence [10, 11, 12, 13, 14, 15], but adding fluctuation = 2 might generate a sequence [10, 12, 11, 14, 13, 15].\n","title":"Mock data generator for testing","url":"/docs/main/next/en/setup/backend/backend-data-generator/"},{"content":"Mock data generator for testing In 9.1.0, SkyWalking adds a module to generate mock data for testing. You can use this module to generate mock data that will be sent to the storage.\nTo start the data generator, execute the script tools/data-generator/bin/start.sh.\nNote that SkyWalking doesn\u0026rsquo;t release a Docker image for this module, but you can still build it yourselves by running the commands:\n# build a Docker image for local use make docker.data-generator # or push to your registry export HUB=\u0026lt;your-registry\u0026gt; make push.docker.data-generator Currently the module can generate two kinds of SkyWalking data, segments and logs. For each type, there are some generators that can be used to fill the fields.\nGenerate mock data To generate mock data, POST a request to URL path /mock-data/segments/tasks (segments) or /mock-data/logs/tasks (logs) with a generator template:\ncurl -XPOST \u0026#39;http://localhost:12800/mock-data/segments/tasks?size=20\u0026#39; -H\u0026#39;Content-Type: application/json\u0026#39; -d \u0026#34;@segment-template.json\u0026#34; curl -XPOST \u0026#39;http://localhost:12800/mock-data/logs/tasks?size=20\u0026#39; -H\u0026#39;Content-Type: application/json\u0026#39; -d \u0026#34;@logs-template.json\u0026#34; There are two possible types of task to generate mock data, size and qps:\n size (/mock-data/segments/tasks?size=20): the task will generate total number of size segments/logs and then finish. qps (/mock-data/segments/tasks?qps=20): the task will generate qps segments/logs per second continuously, until the task is cancelled.  Refer to the segment template, the log template and the Generators for more details about how to compose a template.\nCancel a task When the task is acknowledged by the server it will return a task id that can be used to cancelled the task by sending a DELETE request to URL path /mock-data/logs/tasks with a parameter requestId (i.e. /mock-data/logs/tasks?requestId={request id returned in previous request}):\ncurl -XDELETE \u0026#39;http://localhost:12800/mock-data/segments/task?requestId=70d8a39e-b51e-49de-a6fc-43abf80482c1\u0026#39; curl -XDELETE \u0026#39;http://localhost:12800/mock-data/logs/task?requestId=70d8a39e-b51e-49de-a6fc-43abf80482c1\u0026#39; Cancel all tasks When needed, you can also send a DELETE request to path /mock-data/segments/tasks to cancel all segment tasks.\ncurl -XDELETE \u0026#39;http://localhost:12800/mock-data/segments/tasks curl -XDELETE \u0026#39;http://localhost:12800/mock-data/logs/tasks Generators uuid uuid generator leverages java.util.UUID to generate a string. You can use uuid generator to fill the traceId field of segments.\nchangingFrequency property can be used when you want to reuse a uuid for multiple times, for example, if you want a traceId to be reused by 5 segments, then setting changingFrequency to 5 would do the trick. By setting changingFrequency to 5, uuid generates 1 string, and uses it for 5 times, then re-generates a new uuid string and uses it for another 5 times.\n\u0026#34;traceId\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;uuid\u0026#34;, \u0026#34;changingFrequency\u0026#34;: \u0026#34;5\u0026#34; } randomString (String) length (int) length specifies the length of the random string to be generated, i.e. generatedString.length() == length is always true.\nprefix (String) prefix is always added to the random strings after they are generated, that means:\n generatedString.startsWith(prefix) is always true, and, generatedString.length() == length + prefix.length() is always true.  letters (boolean) Specifies whether the random string contains letters (i.e. a-zA-Z).\nnumbers (boolean) Specifies whether the random string contains numbers (i.e. 0-9).\ndomainSize (int) When generating random strings, you might just want some random strings and use them over and over again randomly, by setting domainSize, the generator generates domainSize random strings, and pick them randomly every time you need a string.\nrandomBool (boolean) This generator generates a Boolean value, true or false with a default possibility of 50%, while you can change the possibility below.\npossibility (double, [0, 1]) possibility is a double value \u0026gt;= 0 and \u0026lt;= 1, it\u0026rsquo;s 0.5 by default, meaning about half of the generated values are true.\nTo always return a fixed boolean value true, you can just set the possibility to 1, to always return a fixed boolean value false, you can set the possibility to 0\n\u0026#34;error\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomBool\u0026#34;, \u0026#34;possibility\u0026#34;: \u0026#34;0.9\u0026#34; }  90 percent of the generated values are true.\n randomInt (long) min (long) The minimum value of the random integers, meaning all generated values satisfy generatedInt \u0026gt;= min.\nmax (long) The maximum value of the random integers, meaning all generated values satisfy generatedInt \u0026lt; min.\ndomainSize (int) This is similar to randomString\u0026rsquo;s domainSize.\nrandomList (list / array) size (int) The list size of the generated list, i.e. generatedList.size() == size.\nitem (object) item is a template that will be use as a prototype to generate the list items, for example when generating a list of Tag, the item should be the prototype of Tag, which can be composed by the generators again.\n\u0026#34;tags\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomList\u0026#34;, \u0026#34;size\u0026#34;: 5, \u0026#34;item\u0026#34;: { \u0026#34;key\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomString\u0026#34;, \u0026#34;length\u0026#34;: \u0026#34;10\u0026#34;, \u0026#34;prefix\u0026#34;: \u0026#34;test_tag_\u0026#34;, \u0026#34;letters\u0026#34;: true, \u0026#34;numbers\u0026#34;: true, \u0026#34;domainSize\u0026#34;: 10 }, \u0026#34;value\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomString\u0026#34;, \u0026#34;length\u0026#34;: \u0026#34;10\u0026#34;, \u0026#34;prefix\u0026#34;: \u0026#34;test_value_\u0026#34;, \u0026#34;letters\u0026#34;: true, \u0026#34;numbers\u0026#34;: true } } } fixedString (string) This generator always returns a fixed value of string.\nsequence (long) sequence generator generates a sequence of monotonically increasing integers, with a configurable fluctuation.\nmin (long) The minimum value of the sequence.\nmax (long) The maximum value of the sequence.\nstep (long) The increasing step of this sequence, i.e. the next generated value == the previous value + step.\ndomainSize (int) This is similar to randomString\u0026rsquo;s domainSize.\nfluctuation (int) By default, sequence is strictly increasing numbers, but in some cases you might want the numbers to fluctuate slightly while they are increasing. Adding property fluctuation to the generator will add a random number \u0026gt;= -fluctuation, \u0026lt;= fluctuation to the sequence elements.\nFor example, min = 10, max = 15, step = 1 generates a sequence [10, 11, 12, 13, 14, 15], but adding fluctuation = 2 might generate a sequence [10, 12, 11, 14, 13, 15].\n","title":"Mock data generator for testing","url":"/docs/main/v9.1.0/en/setup/backend/backend-data-generator/"},{"content":"Mock data generator for testing In 9.1.0, SkyWalking adds a module to generate mock data for testing. You can use this module to generate mock data that will be sent to the storage.\nTo start the data generator, execute the script tools/data-generator/bin/start.sh.\nNote that SkyWalking doesn\u0026rsquo;t release a Docker image for this module, but you can still build it yourselves by running the commands:\n# build a Docker image for local use make docker.data-generator # or push to your registry export HUB=\u0026lt;your-registry\u0026gt; make push.docker.data-generator Currently the module can generate two kinds of SkyWalking data, segments and logs. For each type, there are some generators that can be used to fill the fields.\nGenerate mock data To generate mock data, POST a request to URL path /mock-data/segments/tasks (segments) or /mock-data/logs/tasks (logs) with a generator template:\ncurl -XPOST \u0026#39;http://localhost:12800/mock-data/segments/tasks?size=20\u0026#39; -H\u0026#39;Content-Type: application/json\u0026#39; -d \u0026#34;@segment-template.json\u0026#34; curl -XPOST \u0026#39;http://localhost:12800/mock-data/logs/tasks?size=20\u0026#39; -H\u0026#39;Content-Type: application/json\u0026#39; -d \u0026#34;@logs-template.json\u0026#34; There are two possible types of task to generate mock data, size and qps:\n size (/mock-data/segments/tasks?size=20): the task will generate total number of size segments/logs and then finish. qps (/mock-data/segments/tasks?qps=20): the task will generate qps segments/logs per second continuously, until the task is cancelled.  Refer to the segment template, the log template and the Generators for more details about how to compose a template.\nCancel a task When the task is acknowledged by the server it will return a task id that can be used to cancelled the task by sending a DELETE request to URL path /mock-data/logs/tasks with a parameter requestId (i.e. /mock-data/logs/tasks?requestId={request id returned in previous request}):\ncurl -XDELETE \u0026#39;http://localhost:12800/mock-data/segments/task?requestId=70d8a39e-b51e-49de-a6fc-43abf80482c1\u0026#39; curl -XDELETE \u0026#39;http://localhost:12800/mock-data/logs/task?requestId=70d8a39e-b51e-49de-a6fc-43abf80482c1\u0026#39; Cancel all tasks When needed, you can also send a DELETE request to path /mock-data/segments/tasks to cancel all segment tasks.\ncurl -XDELETE \u0026#39;http://localhost:12800/mock-data/segments/tasks curl -XDELETE \u0026#39;http://localhost:12800/mock-data/logs/tasks Generators uuid uuid generator leverages java.util.UUID to generate a string. You can use uuid generator to fill the traceId field of segments.\nchangingFrequency property can be used when you want to reuse a uuid for multiple times, for example, if you want a traceId to be reused by 5 segments, then setting changingFrequency to 5 would do the trick. By setting changingFrequency to 5, uuid generates 1 string, and uses it for 5 times, then re-generates a new uuid string and uses it for another 5 times.\n\u0026#34;traceId\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;uuid\u0026#34;, \u0026#34;changingFrequency\u0026#34;: \u0026#34;5\u0026#34; } randomString (String) length (int) length specifies the length of the random string to be generated, i.e. generatedString.length() == length is always true.\nprefix (String) prefix is always added to the random strings after they are generated, that means:\n generatedString.startsWith(prefix) is always true, and, generatedString.length() == length + prefix.length() is always true.  letters (boolean) Specifies whether the random string contains letters (i.e. a-zA-Z).\nnumbers (boolean) Specifies whether the random string contains numbers (i.e. 0-9).\ndomainSize (int) When generating random strings, you might just want some random strings and use them over and over again randomly, by setting domainSize, the generator generates domainSize random strings, and pick them randomly every time you need a string.\nrandomBool (boolean) This generator generates a Boolean value, true or false with a default possibility of 50%, while you can change the possibility below.\npossibility (double, [0, 1]) possibility is a double value \u0026gt;= 0 and \u0026lt;= 1, it\u0026rsquo;s 0.5 by default, meaning about half of the generated values are true.\nTo always return a fixed boolean value true, you can just set the possibility to 1, to always return a fixed boolean value false, you can set the possibility to 0\n\u0026#34;error\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomBool\u0026#34;, \u0026#34;possibility\u0026#34;: \u0026#34;0.9\u0026#34; }  90 percent of the generated values are true.\n randomInt (long) min (long) The minimum value of the random integers, meaning all generated values satisfy generatedInt \u0026gt;= min.\nmax (long) The maximum value of the random integers, meaning all generated values satisfy generatedInt \u0026lt; min.\ndomainSize (int) This is similar to randomString\u0026rsquo;s domainSize.\nrandomList (list / array) size (int) The list size of the generated list, i.e. generatedList.size() == size.\nitem (object) item is a template that will be use as a prototype to generate the list items, for example when generating a list of Tag, the item should be the prototype of Tag, which can be composed by the generators again.\n\u0026#34;tags\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomList\u0026#34;, \u0026#34;size\u0026#34;: 5, \u0026#34;item\u0026#34;: { \u0026#34;key\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomString\u0026#34;, \u0026#34;length\u0026#34;: \u0026#34;10\u0026#34;, \u0026#34;prefix\u0026#34;: \u0026#34;test_tag_\u0026#34;, \u0026#34;letters\u0026#34;: true, \u0026#34;numbers\u0026#34;: true, \u0026#34;domainSize\u0026#34;: 10 }, \u0026#34;value\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomString\u0026#34;, \u0026#34;length\u0026#34;: \u0026#34;10\u0026#34;, \u0026#34;prefix\u0026#34;: \u0026#34;test_value_\u0026#34;, \u0026#34;letters\u0026#34;: true, \u0026#34;numbers\u0026#34;: true } } } fixedString (string) This generator always returns a fixed value of string.\nsequence (long) sequence generator generates a sequence of monotonically increasing integers, with a configurable fluctuation.\nmin (long) The minimum value of the sequence.\nmax (long) The maximum value of the sequence.\nstep (long) The increasing step of this sequence, i.e. the next generated value == the previous value + step.\ndomainSize (int) This is similar to randomString\u0026rsquo;s domainSize.\nfluctuation (int) By default, sequence is strictly increasing numbers, but in some cases you might want the numbers to fluctuate slightly while they are increasing. Adding property fluctuation to the generator will add a random number \u0026gt;= -fluctuation, \u0026lt;= fluctuation to the sequence elements.\nFor example, min = 10, max = 15, step = 1 generates a sequence [10, 11, 12, 13, 14, 15], but adding fluctuation = 2 might generate a sequence [10, 12, 11, 14, 13, 15].\n","title":"Mock data generator for testing","url":"/docs/main/v9.2.0/en/setup/backend/backend-data-generator/"},{"content":"Mock data generator for testing In 9.1.0, SkyWalking adds a module to generate mock data for testing. You can use this module to generate mock data that will be sent to the storage.\nTo start the data generator, execute the script tools/data-generator/bin/start.sh.\nNote that SkyWalking doesn\u0026rsquo;t release a Docker image for this module, but you can still build it yourselves by running the commands:\n# build a Docker image for local use make docker.data-generator # or push to your registry export HUB=\u0026lt;your-registry\u0026gt; make push.docker.data-generator Currently the module can generate two kinds of SkyWalking data, segments and logs. For each type, there are some generators that can be used to fill the fields.\nGenerate mock data To generate mock data, POST a request to URL path /mock-data/segments/tasks (segments) or /mock-data/logs/tasks (logs) with a generator template:\ncurl -XPOST \u0026#39;http://localhost:12800/mock-data/segments/tasks?size=20\u0026#39; -H\u0026#39;Content-Type: application/json\u0026#39; -d \u0026#34;@segment-template.json\u0026#34; curl -XPOST \u0026#39;http://localhost:12800/mock-data/logs/tasks?size=20\u0026#39; -H\u0026#39;Content-Type: application/json\u0026#39; -d \u0026#34;@logs-template.json\u0026#34; There are two possible types of task to generate mock data, size and qps:\n size (/mock-data/segments/tasks?size=20): the task will generate total number of size segments/logs and then finish. qps (/mock-data/segments/tasks?qps=20): the task will generate qps segments/logs per second continuously, until the task is cancelled.  Refer to the segment template, the log template and the Generators for more details about how to compose a template.\nCancel a task When the task is acknowledged by the server it will return a task id that can be used to cancelled the task by sending a DELETE request to URL path /mock-data/logs/tasks with a parameter requestId (i.e. /mock-data/logs/tasks?requestId={request id returned in previous request}):\ncurl -XDELETE \u0026#39;http://localhost:12800/mock-data/segments/task?requestId=70d8a39e-b51e-49de-a6fc-43abf80482c1\u0026#39; curl -XDELETE \u0026#39;http://localhost:12800/mock-data/logs/task?requestId=70d8a39e-b51e-49de-a6fc-43abf80482c1\u0026#39; Cancel all tasks When needed, you can also send a DELETE request to path /mock-data/segments/tasks to cancel all segment tasks.\ncurl -XDELETE \u0026#39;http://localhost:12800/mock-data/segments/tasks curl -XDELETE \u0026#39;http://localhost:12800/mock-data/logs/tasks Generators uuid uuid generator leverages java.util.UUID to generate a string. You can use uuid generator to fill the traceId field of segments.\nchangingFrequency property can be used when you want to reuse a uuid for multiple times, for example, if you want a traceId to be reused by 5 segments, then setting changingFrequency to 5 would do the trick. By setting changingFrequency to 5, uuid generates 1 string, and uses it for 5 times, then re-generates a new uuid string and uses it for another 5 times.\n\u0026#34;traceId\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;uuid\u0026#34;, \u0026#34;changingFrequency\u0026#34;: \u0026#34;5\u0026#34; } randomString (String) length (int) length specifies the length of the random string to be generated, i.e. generatedString.length() == length is always true.\nprefix (String) prefix is always added to the random strings after they are generated, that means:\n generatedString.startsWith(prefix) is always true, and, generatedString.length() == length + prefix.length() is always true.  letters (boolean) Specifies whether the random string contains letters (i.e. a-zA-Z).\nnumbers (boolean) Specifies whether the random string contains numbers (i.e. 0-9).\ndomainSize (int) When generating random strings, you might just want some random strings and use them over and over again randomly, by setting domainSize, the generator generates domainSize random strings, and pick them randomly every time you need a string.\nrandomBool (boolean) This generator generates a Boolean value, true or false with a default possibility of 50%, while you can change the possibility below.\npossibility (double, [0, 1]) possibility is a double value \u0026gt;= 0 and \u0026lt;= 1, it\u0026rsquo;s 0.5 by default, meaning about half of the generated values are true.\nTo always return a fixed boolean value true, you can just set the possibility to 1, to always return a fixed boolean value false, you can set the possibility to 0\n\u0026#34;error\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomBool\u0026#34;, \u0026#34;possibility\u0026#34;: \u0026#34;0.9\u0026#34; }  90 percent of the generated values are true.\n randomInt (long) min (long) The minimum value of the random integers, meaning all generated values satisfy generatedInt \u0026gt;= min.\nmax (long) The maximum value of the random integers, meaning all generated values satisfy generatedInt \u0026lt; min.\ndomainSize (int) This is similar to randomString\u0026rsquo;s domainSize.\nrandomList (list / array) size (int) The list size of the generated list, i.e. generatedList.size() == size.\nitem (object) item is a template that will be use as a prototype to generate the list items, for example when generating a list of Tag, the item should be the prototype of Tag, which can be composed by the generators again.\n\u0026#34;tags\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomList\u0026#34;, \u0026#34;size\u0026#34;: 5, \u0026#34;item\u0026#34;: { \u0026#34;key\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomString\u0026#34;, \u0026#34;length\u0026#34;: \u0026#34;10\u0026#34;, \u0026#34;prefix\u0026#34;: \u0026#34;test_tag_\u0026#34;, \u0026#34;letters\u0026#34;: true, \u0026#34;numbers\u0026#34;: true, \u0026#34;domainSize\u0026#34;: 10 }, \u0026#34;value\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomString\u0026#34;, \u0026#34;length\u0026#34;: \u0026#34;10\u0026#34;, \u0026#34;prefix\u0026#34;: \u0026#34;test_value_\u0026#34;, \u0026#34;letters\u0026#34;: true, \u0026#34;numbers\u0026#34;: true } } } fixedString (string) This generator always returns a fixed value of string.\nsequence (long) sequence generator generates a sequence of monotonically increasing integers, with a configurable fluctuation.\nmin (long) The minimum value of the sequence.\nmax (long) The maximum value of the sequence.\nstep (long) The increasing step of this sequence, i.e. the next generated value == the previous value + step.\ndomainSize (int) This is similar to randomString\u0026rsquo;s domainSize.\nfluctuation (int) By default, sequence is strictly increasing numbers, but in some cases you might want the numbers to fluctuate slightly while they are increasing. Adding property fluctuation to the generator will add a random number \u0026gt;= -fluctuation, \u0026lt;= fluctuation to the sequence elements.\nFor example, min = 10, max = 15, step = 1 generates a sequence [10, 11, 12, 13, 14, 15], but adding fluctuation = 2 might generate a sequence [10, 12, 11, 14, 13, 15].\n","title":"Mock data generator for testing","url":"/docs/main/v9.3.0/en/setup/backend/backend-data-generator/"},{"content":"Mock data generator for testing In 9.1.0, SkyWalking adds a module to generate mock data for testing. You can use this module to generate mock data that will be sent to the storage.\nTo start the data generator, execute the script tools/data-generator/bin/start.sh.\nNote that SkyWalking doesn\u0026rsquo;t release a Docker image for this module, but you can still build it yourselves by running the commands:\n# build a Docker image for local use make docker.data-generator # or push to your registry export HUB=\u0026lt;your-registry\u0026gt; make push.docker.data-generator Currently the module can generate two kinds of SkyWalking data, segments and logs. For each type, there are some generators that can be used to fill the fields.\nGenerate mock data To generate mock data, POST a request to URL path /mock-data/segments/tasks (segments) or /mock-data/logs/tasks (logs) with a generator template:\ncurl -XPOST \u0026#39;http://localhost:12800/mock-data/segments/tasks?size=20\u0026#39; -H\u0026#39;Content-Type: application/json\u0026#39; -d \u0026#34;@segment-template.json\u0026#34; curl -XPOST \u0026#39;http://localhost:12800/mock-data/logs/tasks?size=20\u0026#39; -H\u0026#39;Content-Type: application/json\u0026#39; -d \u0026#34;@logs-template.json\u0026#34; There are two possible types of task to generate mock data, size and qps:\n size (/mock-data/segments/tasks?size=20): the task will generate total number of size segments/logs and then finish. qps (/mock-data/segments/tasks?qps=20): the task will generate qps segments/logs per second continuously, until the task is cancelled.  Refer to the segment template, the log template and the Generators for more details about how to compose a template.\nCancel a task When the task is acknowledged by the server it will return a task id that can be used to cancelled the task by sending a DELETE request to URL path /mock-data/logs/tasks with a parameter requestId (i.e. /mock-data/logs/tasks?requestId={request id returned in previous request}):\ncurl -XDELETE \u0026#39;http://localhost:12800/mock-data/segments/task?requestId=70d8a39e-b51e-49de-a6fc-43abf80482c1\u0026#39; curl -XDELETE \u0026#39;http://localhost:12800/mock-data/logs/task?requestId=70d8a39e-b51e-49de-a6fc-43abf80482c1\u0026#39; Cancel all tasks When needed, you can also send a DELETE request to path /mock-data/segments/tasks to cancel all segment tasks.\ncurl -XDELETE \u0026#39;http://localhost:12800/mock-data/segments/tasks curl -XDELETE \u0026#39;http://localhost:12800/mock-data/logs/tasks Generators uuid uuid generator leverages java.util.UUID to generate a string. You can use uuid generator to fill the traceId field of segments.\nchangingFrequency property can be used when you want to reuse a uuid for multiple times, for example, if you want a traceId to be reused by 5 segments, then setting changingFrequency to 5 would do the trick. By setting changingFrequency to 5, uuid generates 1 string, and uses it for 5 times, then re-generates a new uuid string and uses it for another 5 times.\n\u0026#34;traceId\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;uuid\u0026#34;, \u0026#34;changingFrequency\u0026#34;: \u0026#34;5\u0026#34; } randomString (String) length (int) length specifies the length of the random string to be generated, i.e. generatedString.length() == length is always true.\nprefix (String) prefix is always added to the random strings after they are generated, that means:\n generatedString.startsWith(prefix) is always true, and, generatedString.length() == length + prefix.length() is always true.  letters (boolean) Specifies whether the random string contains letters (i.e. a-zA-Z).\nnumbers (boolean) Specifies whether the random string contains numbers (i.e. 0-9).\ndomainSize (int) When generating random strings, you might just want some random strings and use them over and over again randomly, by setting domainSize, the generator generates domainSize random strings, and pick them randomly every time you need a string.\nrandomBool (boolean) This generator generates a Boolean value, true or false with a default possibility of 50%, while you can change the possibility below.\npossibility (double, [0, 1]) possibility is a double value \u0026gt;= 0 and \u0026lt;= 1, it\u0026rsquo;s 0.5 by default, meaning about half of the generated values are true.\nTo always return a fixed boolean value true, you can just set the possibility to 1, to always return a fixed boolean value false, you can set the possibility to 0\n\u0026#34;error\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomBool\u0026#34;, \u0026#34;possibility\u0026#34;: \u0026#34;0.9\u0026#34; }  90 percent of the generated values are true.\n randomInt (long) min (long) The minimum value of the random integers, meaning all generated values satisfy generatedInt \u0026gt;= min.\nmax (long) The maximum value of the random integers, meaning all generated values satisfy generatedInt \u0026lt; min.\ndomainSize (int) This is similar to randomString\u0026rsquo;s domainSize.\nrandomList (list / array) size (int) The list size of the generated list, i.e. generatedList.size() == size.\nitem (object) item is a template that will be use as a prototype to generate the list items, for example when generating a list of Tag, the item should be the prototype of Tag, which can be composed by the generators again.\n\u0026#34;tags\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomList\u0026#34;, \u0026#34;size\u0026#34;: 5, \u0026#34;item\u0026#34;: { \u0026#34;key\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomString\u0026#34;, \u0026#34;length\u0026#34;: \u0026#34;10\u0026#34;, \u0026#34;prefix\u0026#34;: \u0026#34;test_tag_\u0026#34;, \u0026#34;letters\u0026#34;: true, \u0026#34;numbers\u0026#34;: true, \u0026#34;domainSize\u0026#34;: 10 }, \u0026#34;value\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomString\u0026#34;, \u0026#34;length\u0026#34;: \u0026#34;10\u0026#34;, \u0026#34;prefix\u0026#34;: \u0026#34;test_value_\u0026#34;, \u0026#34;letters\u0026#34;: true, \u0026#34;numbers\u0026#34;: true } } } fixedString (string) This generator always returns a fixed value of string.\nsequence (long) sequence generator generates a sequence of monotonically increasing integers, with a configurable fluctuation.\nmin (long) The minimum value of the sequence.\nmax (long) The maximum value of the sequence.\nstep (long) The increasing step of this sequence, i.e. the next generated value == the previous value + step.\ndomainSize (int) This is similar to randomString\u0026rsquo;s domainSize.\nfluctuation (int) By default, sequence is strictly increasing numbers, but in some cases you might want the numbers to fluctuate slightly while they are increasing. Adding property fluctuation to the generator will add a random number \u0026gt;= -fluctuation, \u0026lt;= fluctuation to the sequence elements.\nFor example, min = 10, max = 15, step = 1 generates a sequence [10, 11, 12, 13, 14, 15], but adding fluctuation = 2 might generate a sequence [10, 12, 11, 14, 13, 15].\n","title":"Mock data generator for testing","url":"/docs/main/v9.4.0/en/setup/backend/backend-data-generator/"},{"content":"Mock data generator for testing In 9.1.0, SkyWalking adds a module to generate mock data for testing. You can use this module to generate mock data that will be sent to the storage.\nTo start the data generator, execute the script tools/data-generator/bin/start.sh.\nNote that SkyWalking doesn\u0026rsquo;t release a Docker image for this module, but you can still build it yourselves by running the commands:\n# build a Docker image for local use make docker.data-generator # or push to your registry export HUB=\u0026lt;your-registry\u0026gt; make push.docker.data-generator Currently the module can generate two kinds of SkyWalking data, segments and logs. For each type, there are some generators that can be used to fill the fields.\nGenerate mock data To generate mock data, POST a request to URL path /mock-data/segments/tasks (segments) or /mock-data/logs/tasks (logs) with a generator template:\ncurl -XPOST \u0026#39;http://localhost:12800/mock-data/segments/tasks?size=20\u0026#39; -H\u0026#39;Content-Type: application/json\u0026#39; -d \u0026#34;@segment-template.json\u0026#34; curl -XPOST \u0026#39;http://localhost:12800/mock-data/logs/tasks?size=20\u0026#39; -H\u0026#39;Content-Type: application/json\u0026#39; -d \u0026#34;@logs-template.json\u0026#34; There are two possible types of task to generate mock data, size and qps:\n size (/mock-data/segments/tasks?size=20): the task will generate total number of size segments/logs and then finish. qps (/mock-data/segments/tasks?qps=20): the task will generate qps segments/logs per second continuously, until the task is cancelled.  Refer to the segment template, the log template and the Generators for more details about how to compose a template.\nCancel a task When the task is acknowledged by the server it will return a task id that can be used to cancelled the task by sending a DELETE request to URL path /mock-data/logs/tasks with a parameter requestId (i.e. /mock-data/logs/tasks?requestId={request id returned in previous request}):\ncurl -XDELETE \u0026#39;http://localhost:12800/mock-data/segments/task?requestId=70d8a39e-b51e-49de-a6fc-43abf80482c1\u0026#39; curl -XDELETE \u0026#39;http://localhost:12800/mock-data/logs/task?requestId=70d8a39e-b51e-49de-a6fc-43abf80482c1\u0026#39; Cancel all tasks When needed, you can also send a DELETE request to path /mock-data/segments/tasks to cancel all segment tasks.\ncurl -XDELETE \u0026#39;http://localhost:12800/mock-data/segments/tasks curl -XDELETE \u0026#39;http://localhost:12800/mock-data/logs/tasks Generators uuid uuid generator leverages java.util.UUID to generate a string. You can use uuid generator to fill the traceId field of segments.\nchangingFrequency property can be used when you want to reuse a uuid for multiple times, for example, if you want a traceId to be reused by 5 segments, then setting changingFrequency to 5 would do the trick. By setting changingFrequency to 5, uuid generates 1 string, and uses it for 5 times, then re-generates a new uuid string and uses it for another 5 times.\n\u0026#34;traceId\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;uuid\u0026#34;, \u0026#34;changingFrequency\u0026#34;: \u0026#34;5\u0026#34; } randomString (String) length (int) length specifies the length of the random string to be generated, i.e. generatedString.length() == length is always true.\nprefix (String) prefix is always added to the random strings after they are generated, that means:\n generatedString.startsWith(prefix) is always true, and, generatedString.length() == length + prefix.length() is always true.  letters (boolean) Specifies whether the random string contains letters (i.e. a-zA-Z).\nnumbers (boolean) Specifies whether the random string contains numbers (i.e. 0-9).\ndomainSize (int) When generating random strings, you might just want some random strings and use them over and over again randomly, by setting domainSize, the generator generates domainSize random strings, and pick them randomly every time you need a string.\nrandomBool (boolean) This generator generates a Boolean value, true or false with a default possibility of 50%, while you can change the possibility below.\npossibility (double, [0, 1]) possibility is a double value \u0026gt;= 0 and \u0026lt;= 1, it\u0026rsquo;s 0.5 by default, meaning about half of the generated values are true.\nTo always return a fixed boolean value true, you can just set the possibility to 1, to always return a fixed boolean value false, you can set the possibility to 0\n\u0026#34;error\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomBool\u0026#34;, \u0026#34;possibility\u0026#34;: \u0026#34;0.9\u0026#34; }  90 percent of the generated values are true.\n randomInt (long) min (long) The minimum value of the random integers, meaning all generated values satisfy generatedInt \u0026gt;= min.\nmax (long) The maximum value of the random integers, meaning all generated values satisfy generatedInt \u0026lt; min.\ndomainSize (int) This is similar to randomString\u0026rsquo;s domainSize.\nrandomList (list / array) size (int) The list size of the generated list, i.e. generatedList.size() == size.\nitem (object) item is a template that will be use as a prototype to generate the list items, for example when generating a list of Tag, the item should be the prototype of Tag, which can be composed by the generators again.\n\u0026#34;tags\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomList\u0026#34;, \u0026#34;size\u0026#34;: 5, \u0026#34;item\u0026#34;: { \u0026#34;key\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomString\u0026#34;, \u0026#34;length\u0026#34;: \u0026#34;10\u0026#34;, \u0026#34;prefix\u0026#34;: \u0026#34;test_tag_\u0026#34;, \u0026#34;letters\u0026#34;: true, \u0026#34;numbers\u0026#34;: true, \u0026#34;domainSize\u0026#34;: 10 }, \u0026#34;value\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomString\u0026#34;, \u0026#34;length\u0026#34;: \u0026#34;10\u0026#34;, \u0026#34;prefix\u0026#34;: \u0026#34;test_value_\u0026#34;, \u0026#34;letters\u0026#34;: true, \u0026#34;numbers\u0026#34;: true } } } fixedString (string) This generator always returns a fixed value of string.\nsequence (long) sequence generator generates a sequence of monotonically increasing integers, with a configurable fluctuation.\nmin (long) The minimum value of the sequence.\nmax (long) The maximum value of the sequence.\nstep (long) The increasing step of this sequence, i.e. the next generated value == the previous value + step.\ndomainSize (int) This is similar to randomString\u0026rsquo;s domainSize.\nfluctuation (int) By default, sequence is strictly increasing numbers, but in some cases you might want the numbers to fluctuate slightly while they are increasing. Adding property fluctuation to the generator will add a random number \u0026gt;= -fluctuation, \u0026lt;= fluctuation to the sequence elements.\nFor example, min = 10, max = 15, step = 1 generates a sequence [10, 11, 12, 13, 14, 15], but adding fluctuation = 2 might generate a sequence [10, 12, 11, 14, 13, 15].\n","title":"Mock data generator for testing","url":"/docs/main/v9.5.0/en/setup/backend/backend-data-generator/"},{"content":"Mock data generator for testing In 9.1.0, SkyWalking adds a module to generate mock data for testing. You can use this module to generate mock data that will be sent to the storage.\nTo start the data generator, execute the script tools/data-generator/bin/start.sh.\nNote that SkyWalking doesn\u0026rsquo;t release a Docker image for this module, but you can still build it yourselves by running the commands:\n# build a Docker image for local use make docker.data-generator # or push to your registry export HUB=\u0026lt;your-registry\u0026gt; make push.docker.data-generator Currently the module can generate two kinds of SkyWalking data, segments and logs. For each type, there are some generators that can be used to fill the fields.\nGenerate mock data To generate mock data, POST a request to URL path /mock-data/segments/tasks (segments) or /mock-data/logs/tasks (logs) with a generator template:\ncurl -XPOST \u0026#39;http://localhost:12800/mock-data/segments/tasks?size=20\u0026#39; -H\u0026#39;Content-Type: application/json\u0026#39; -d \u0026#34;@segment-template.json\u0026#34; curl -XPOST \u0026#39;http://localhost:12800/mock-data/logs/tasks?size=20\u0026#39; -H\u0026#39;Content-Type: application/json\u0026#39; -d \u0026#34;@logs-template.json\u0026#34; There are two possible types of task to generate mock data, size and qps:\n size (/mock-data/segments/tasks?size=20): the task will generate total number of size segments/logs and then finish. qps (/mock-data/segments/tasks?qps=20): the task will generate qps segments/logs per second continuously, until the task is cancelled.  Refer to the segment template, the log template and the Generators for more details about how to compose a template.\nCancel a task When the task is acknowledged by the server it will return a task id that can be used to cancelled the task by sending a DELETE request to URL path /mock-data/logs/tasks with a parameter requestId (i.e. /mock-data/logs/tasks?requestId={request id returned in previous request}):\ncurl -XDELETE \u0026#39;http://localhost:12800/mock-data/segments/task?requestId=70d8a39e-b51e-49de-a6fc-43abf80482c1\u0026#39; curl -XDELETE \u0026#39;http://localhost:12800/mock-data/logs/task?requestId=70d8a39e-b51e-49de-a6fc-43abf80482c1\u0026#39; Cancel all tasks When needed, you can also send a DELETE request to path /mock-data/segments/tasks to cancel all segment tasks.\ncurl -XDELETE \u0026#39;http://localhost:12800/mock-data/segments/tasks curl -XDELETE \u0026#39;http://localhost:12800/mock-data/logs/tasks Generators uuid uuid generator leverages java.util.UUID to generate a string. You can use uuid generator to fill the traceId field of segments.\nchangingFrequency property can be used when you want to reuse a uuid for multiple times, for example, if you want a traceId to be reused by 5 segments, then setting changingFrequency to 5 would do the trick. By setting changingFrequency to 5, uuid generates 1 string, and uses it for 5 times, then re-generates a new uuid string and uses it for another 5 times.\n\u0026#34;traceId\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;uuid\u0026#34;, \u0026#34;changingFrequency\u0026#34;: \u0026#34;5\u0026#34; } randomString (String) length (int) length specifies the length of the random string to be generated, i.e. generatedString.length() == length is always true.\nprefix (String) prefix is always added to the random strings after they are generated, that means:\n generatedString.startsWith(prefix) is always true, and, generatedString.length() == length + prefix.length() is always true.  letters (boolean) Specifies whether the random string contains letters (i.e. a-zA-Z).\nnumbers (boolean) Specifies whether the random string contains numbers (i.e. 0-9).\ndomainSize (int) When generating random strings, you might just want some random strings and use them over and over again randomly, by setting domainSize, the generator generates domainSize random strings, and pick them randomly every time you need a string.\nrandomBool (boolean) This generator generates a Boolean value, true or false with a default possibility of 50%, while you can change the possibility below.\npossibility (double, [0, 1]) possibility is a double value \u0026gt;= 0 and \u0026lt;= 1, it\u0026rsquo;s 0.5 by default, meaning about half of the generated values are true.\nTo always return a fixed boolean value true, you can just set the possibility to 1, to always return a fixed boolean value false, you can set the possibility to 0\n\u0026#34;error\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomBool\u0026#34;, \u0026#34;possibility\u0026#34;: \u0026#34;0.9\u0026#34; }  90 percent of the generated values are true.\n randomInt (long) min (long) The minimum value of the random integers, meaning all generated values satisfy generatedInt \u0026gt;= min.\nmax (long) The maximum value of the random integers, meaning all generated values satisfy generatedInt \u0026lt; min.\ndomainSize (int) This is similar to randomString\u0026rsquo;s domainSize.\nrandomList (list / array) size (int) The list size of the generated list, i.e. generatedList.size() == size.\nitem (object) item is a template that will be use as a prototype to generate the list items, for example when generating a list of Tag, the item should be the prototype of Tag, which can be composed by the generators again.\n\u0026#34;tags\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomList\u0026#34;, \u0026#34;size\u0026#34;: 5, \u0026#34;item\u0026#34;: { \u0026#34;key\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomString\u0026#34;, \u0026#34;length\u0026#34;: \u0026#34;10\u0026#34;, \u0026#34;prefix\u0026#34;: \u0026#34;test_tag_\u0026#34;, \u0026#34;letters\u0026#34;: true, \u0026#34;numbers\u0026#34;: true, \u0026#34;domainSize\u0026#34;: 10 }, \u0026#34;value\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomString\u0026#34;, \u0026#34;length\u0026#34;: \u0026#34;10\u0026#34;, \u0026#34;prefix\u0026#34;: \u0026#34;test_value_\u0026#34;, \u0026#34;letters\u0026#34;: true, \u0026#34;numbers\u0026#34;: true } } } fixedString (string) This generator always returns a fixed value of string.\nsequence (long) sequence generator generates a sequence of monotonically increasing integers, with a configurable fluctuation.\nmin (long) The minimum value of the sequence.\nmax (long) The maximum value of the sequence.\nstep (long) The increasing step of this sequence, i.e. the next generated value == the previous value + step.\ndomainSize (int) This is similar to randomString\u0026rsquo;s domainSize.\nfluctuation (int) By default, sequence is strictly increasing numbers, but in some cases you might want the numbers to fluctuate slightly while they are increasing. Adding property fluctuation to the generator will add a random number \u0026gt;= -fluctuation, \u0026lt;= fluctuation to the sequence elements.\nFor example, min = 10, max = 15, step = 1 generates a sequence [10, 11, 12, 13, 14, 15], but adding fluctuation = 2 might generate a sequence [10, 12, 11, 14, 13, 15].\n","title":"Mock data generator for testing","url":"/docs/main/v9.6.0/en/setup/backend/backend-data-generator/"},{"content":"Mock data generator for testing In 9.1.0, SkyWalking adds a module to generate mock data for testing. You can use this module to generate mock data that will be sent to the storage.\nTo start the data generator, execute the script tools/data-generator/bin/start.sh.\nNote that SkyWalking doesn\u0026rsquo;t release a Docker image for this module, but you can still build it yourselves by running the commands:\n# build a Docker image for local use make docker.data-generator # or push to your registry export HUB=\u0026lt;your-registry\u0026gt; make push.docker.data-generator Currently the module can generate two kinds of SkyWalking data, segments and logs. For each type, there are some generators that can be used to fill the fields.\nGenerate mock data To generate mock data, POST a request to URL path /mock-data/segments/tasks (segments) or /mock-data/logs/tasks (logs) with a generator template:\ncurl -XPOST \u0026#39;http://localhost:12800/mock-data/segments/tasks?size=20\u0026#39; -H\u0026#39;Content-Type: application/json\u0026#39; -d \u0026#34;@segment-template.json\u0026#34; curl -XPOST \u0026#39;http://localhost:12800/mock-data/logs/tasks?size=20\u0026#39; -H\u0026#39;Content-Type: application/json\u0026#39; -d \u0026#34;@logs-template.json\u0026#34; There are two possible types of task to generate mock data, size and qps:\n size (/mock-data/segments/tasks?size=20): the task will generate total number of size segments/logs and then finish. qps (/mock-data/segments/tasks?qps=20): the task will generate qps segments/logs per second continuously, until the task is cancelled.  Refer to the segment template, the log template and the Generators for more details about how to compose a template.\nCancel a task When the task is acknowledged by the server it will return a task id that can be used to cancelled the task by sending a DELETE request to URL path /mock-data/logs/tasks with a parameter requestId (i.e. /mock-data/logs/tasks?requestId={request id returned in previous request}):\ncurl -XDELETE \u0026#39;http://localhost:12800/mock-data/segments/task?requestId=70d8a39e-b51e-49de-a6fc-43abf80482c1\u0026#39; curl -XDELETE \u0026#39;http://localhost:12800/mock-data/logs/task?requestId=70d8a39e-b51e-49de-a6fc-43abf80482c1\u0026#39; Cancel all tasks When needed, you can also send a DELETE request to path /mock-data/segments/tasks to cancel all segment tasks.\ncurl -XDELETE \u0026#39;http://localhost:12800/mock-data/segments/tasks curl -XDELETE \u0026#39;http://localhost:12800/mock-data/logs/tasks Generators uuid uuid generator leverages java.util.UUID to generate a string. You can use uuid generator to fill the traceId field of segments.\nchangingFrequency property can be used when you want to reuse a uuid for multiple times, for example, if you want a traceId to be reused by 5 segments, then setting changingFrequency to 5 would do the trick. By setting changingFrequency to 5, uuid generates 1 string, and uses it for 5 times, then re-generates a new uuid string and uses it for another 5 times.\n\u0026#34;traceId\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;uuid\u0026#34;, \u0026#34;changingFrequency\u0026#34;: \u0026#34;5\u0026#34; } randomString (String) length (int) length specifies the length of the random string to be generated, i.e. generatedString.length() == length is always true.\nprefix (String) prefix is always added to the random strings after they are generated, that means:\n generatedString.startsWith(prefix) is always true, and, generatedString.length() == length + prefix.length() is always true.  letters (boolean) Specifies whether the random string contains letters (i.e. a-zA-Z).\nnumbers (boolean) Specifies whether the random string contains numbers (i.e. 0-9).\ndomainSize (int) When generating random strings, you might just want some random strings and use them over and over again randomly, by setting domainSize, the generator generates domainSize random strings, and pick them randomly every time you need a string.\nrandomBool (boolean) This generator generates a Boolean value, true or false with a default possibility of 50%, while you can change the possibility below.\npossibility (double, [0, 1]) possibility is a double value \u0026gt;= 0 and \u0026lt;= 1, it\u0026rsquo;s 0.5 by default, meaning about half of the generated values are true.\nTo always return a fixed boolean value true, you can just set the possibility to 1, to always return a fixed boolean value false, you can set the possibility to 0\n\u0026#34;error\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomBool\u0026#34;, \u0026#34;possibility\u0026#34;: \u0026#34;0.9\u0026#34; }  90 percent of the generated values are true.\n randomInt (long) min (long) The minimum value of the random integers, meaning all generated values satisfy generatedInt \u0026gt;= min.\nmax (long) The maximum value of the random integers, meaning all generated values satisfy generatedInt \u0026lt; min.\ndomainSize (int) This is similar to randomString\u0026rsquo;s domainSize.\nrandomList (list / array) size (int) The list size of the generated list, i.e. generatedList.size() == size.\nitem (object) item is a template that will be use as a prototype to generate the list items, for example when generating a list of Tag, the item should be the prototype of Tag, which can be composed by the generators again.\n\u0026#34;tags\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomList\u0026#34;, \u0026#34;size\u0026#34;: 5, \u0026#34;item\u0026#34;: { \u0026#34;key\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomString\u0026#34;, \u0026#34;length\u0026#34;: \u0026#34;10\u0026#34;, \u0026#34;prefix\u0026#34;: \u0026#34;test_tag_\u0026#34;, \u0026#34;letters\u0026#34;: true, \u0026#34;numbers\u0026#34;: true, \u0026#34;domainSize\u0026#34;: 10 }, \u0026#34;value\u0026#34;: { \u0026#34;type\u0026#34;: \u0026#34;randomString\u0026#34;, \u0026#34;length\u0026#34;: \u0026#34;10\u0026#34;, \u0026#34;prefix\u0026#34;: \u0026#34;test_value_\u0026#34;, \u0026#34;letters\u0026#34;: true, \u0026#34;numbers\u0026#34;: true } } } fixedString (string) This generator always returns a fixed value of string.\nsequence (long) sequence generator generates a sequence of monotonically increasing integers, with a configurable fluctuation.\nmin (long) The minimum value of the sequence.\nmax (long) The maximum value of the sequence.\nstep (long) The increasing step of this sequence, i.e. the next generated value == the previous value + step.\ndomainSize (int) This is similar to randomString\u0026rsquo;s domainSize.\nfluctuation (int) By default, sequence is strictly increasing numbers, but in some cases you might want the numbers to fluctuate slightly while they are increasing. Adding property fluctuation to the generator will add a random number \u0026gt;= -fluctuation, \u0026lt;= fluctuation to the sequence elements.\nFor example, min = 10, max = 15, step = 1 generates a sequence [10, 11, 12, 13, 14, 15], but adding fluctuation = 2 might generate a sequence [10, 12, 11, 14, 13, 15].\n","title":"Mock data generator for testing","url":"/docs/main/v9.7.0/en/setup/backend/backend-data-generator/"},{"content":"Module Design Controller The controller means composing all the steps declared in the configuration file, it progressive and display which step is currently running. If it failed in a step, the error message could be shown, as much comprehensive as possible. An example of the output might be.\ne2e run ✔ Started Kind Cluster - Cluster Name ✔ Checked Pods Readiness - All pods are ready ? Generating Traffic - HTTP localhost:9090/users (progress spinner) ✔ Verified Output - service ls (progress spinner) Verifying Output - endpoint ls ✘ Failed to Verify Output Data - endpoint ls \u0026lt;the diff content\u0026gt; ✔ Clean Up Compared with running the steps one by one, the controller is also responsible for cleaning up the environment (by executing the cleanup command) no matter what status other commands are, even if they are failed, the controller has the following semantics in terms of setup and cleanup.\n// Java try { setup(); // trigger step // verify step // ... } finally { cleanup(); } // GoLang func run() { setup(); defer cleanup(); // trigger step // verify step // ... } Steps According to the content in the Controller, E2E Testing can be divided into the following steps.\nSetup Start the environment required for this E2E Testing, such as database, back-end process, API, etc.\nSupport two ways to set up the environment:\n compose:  Start the docker-compose services. Check the services' healthiness. Wait until all services are ready according to the interval, etc. Execute command to set up the testing environment or help verify, such as yq help to eval the YAML format.   kind:  Start the KinD cluster according to the config files or Start on an existing kubernetes cluster. Apply the resources files (--manifests) or/and run the custom init command (--commands). Check the pods' readiness. Wait until all pods are ready according to the interval, etc.    Trigger Generate traffic by trigger the action, It could access HTTP API or execute commands with interval.\nIt could have these settings:\n interval: How frequency to trigger the action. times: How many times the operation is triggered before aborting on the condition that the trigger had failed always. 0=infinite. action: The action of the trigger.  Verify Verify that the data content is matching with the expected results. such as unit test assert, etc.\nIt could have these settings:\n actual: The actual data file. query: The query to get the actual data, could run shell commands to generate the data. expected: The expected data file, could specify some matching rules to verify the actual content.  Cleanup This step requires the same options in the setup step so that it can clean up all things necessarily. Such as destroy the environment, etc.\n","title":"Module Design","url":"/docs/skywalking-infra-e2e/latest/en/concepts-and-designs/module-design/"},{"content":"Module Design Controller The controller means composing all the steps declared in the configuration file, it progressive and display which step is currently running. If it failed in a step, the error message could be shown, as much comprehensive as possible. An example of the output might be.\ne2e run ✔ Started Kind Cluster - Cluster Name ✔ Checked Pods Readiness - All pods are ready ? Generating Traffic - HTTP localhost:9090/users (progress spinner) ✔ Verified Output - service ls (progress spinner) Verifying Output - endpoint ls ✘ Failed to Verify Output Data - endpoint ls \u0026lt;the diff content\u0026gt; ✔ Clean Up Compared with running the steps one by one, the controller is also responsible for cleaning up the environment (by executing the cleanup command) no matter what status other commands are, even if they are failed, the controller has the following semantics in terms of setup and cleanup.\n// Java try { setup(); // trigger step // verify step // ... } finally { cleanup(); } // GoLang func run() { setup(); defer cleanup(); // trigger step // verify step // ... } Steps According to the content in the Controller, E2E Testing can be divided into the following steps.\nSetup Start the environment required for this E2E Testing, such as database, back-end process, API, etc.\nSupport two ways to set up the environment:\n compose:  Start the docker-compose services. Check the services' healthiness. Wait until all services are ready according to the interval, etc. Execute command to set up the testing environment or help verify, such as yq help to eval the YAML format.   kind:  Start the KinD cluster according to the config files or Start on an existing kubernetes cluster. Apply the resources files (--manifests) or/and run the custom init command (--commands). Check the pods' readiness. Wait until all pods are ready according to the interval, etc.    Trigger Generate traffic by trigger the action, It could access HTTP API or execute commands with interval.\nIt could have these settings:\n interval: How frequency to trigger the action. times: How many times the operation is triggered before aborting on the condition that the trigger had failed always. 0=infinite. action: The action of the trigger.  Verify Verify that the data content is matching with the expected results. such as unit test assert, etc.\nIt could have these settings:\n actual: The actual data file. query: The query to get the actual data, could run shell commands to generate the data. expected: The expected data file, could specify some matching rules to verify the actual content.  Cleanup This step requires the same options in the setup step so that it can clean up all things necessarily. Such as destroy the environment, etc.\n","title":"Module Design","url":"/docs/skywalking-infra-e2e/next/en/concepts-and-designs/module-design/"},{"content":"Module Design Controller The controller means composing all the steps declared in the configuration file, it progressive and display which step is currently running. If it failed in a step, the error message could be shown, as much comprehensive as possible. An example of the output might be.\ne2e run ✔ Started Kind Cluster - Cluster Name ✔ Checked Pods Readiness - All pods are ready ? Generating Traffic - HTTP localhost:9090/users (progress spinner) ✔ Verified Output - service ls (progress spinner) Verifying Output - endpoint ls ✘ Failed to Verify Output Data - endpoint ls \u0026lt;the diff content\u0026gt; ✔ Clean Up Compared with running the steps one by one, the controller is also responsible for cleaning up the environment (by executing the cleanup command) no matter what status other commands are, even if they are failed, the controller has the following semantics in terms of setup and cleanup.\n// Java try { setup(); // trigger step // verify step // ... } finally { cleanup(); } // GoLang func run() { setup(); defer cleanup(); // trigger step // verify step // ... } Steps According to the content in the Controller, E2E Testing can be divided into the following steps.\nSetup Start the environment required for this E2E Testing, such as database, back-end process, API, etc.\nSupport two ways to set up the environment:\n compose:  Start the docker-compose services. Check the services' healthiness. Wait until all services are ready according to the interval, etc. Execute command to set up the testing environment or help verify, such as yq help to eval the YAML format.   kind:  Start the KinD cluster according to the config files or Start on an existing kubernetes cluster. Apply the resources files (--manifests) or/and run the custom init command (--commands). Check the pods' readiness. Wait until all pods are ready according to the interval, etc.    Trigger Generate traffic by trigger the action, It could access HTTP API or execute commands with interval.\nIt could have these settings:\n interval: How frequency to trigger the action. times: How many times the operation is triggered before aborting on the condition that the trigger had failed always. 0=infinite. action: The action of the trigger.  Verify Verify that the data content is matching with the expected results. such as unit test assert, etc.\nIt could have these settings:\n actual: The actual data file. query: The query to get the actual data, could run shell commands to generate the data. expected: The expected data file, could specify some matching rules to verify the actual content.  Cleanup This step requires the same options in the setup step so that it can clean up all things necessarily. Such as destroy the environment, etc.\n","title":"Module Design","url":"/docs/skywalking-infra-e2e/v1.3.0/en/concepts-and-designs/module-design/"},{"content":"Module Design Pipe The pipe is an isolation concept in Satellite. Each pipe has one pipeline to process the telemetry data(metrics/traces/logs). Two pipes are not sharing data.\n Satellite --------------------------------------------------------------------- | ------------------------------------------- | | | Pipe | | | ------------------------------------------- | | ------------------------------------------- | | | Pipe | | | ------------------------------------------- | | ------------------------------------------- | | | Pipe | | | ------------------------------------------- | --------------------------------------------------------------------- Modules Module is the core workers in Satellite. Module is constituted by the specific extension plugins. There are 3 modules in one namespace, which are Gatherer, Processor, and Sender.\n The Gatherer module is responsible for fetching or receiving data and pushing the data to Queue. So there are 2 kinds of Gatherer, which are ReceiverGatherer and FetcherGatherer. The Processor module is responsible for reading data from the queue and processing data by a series of filter chains. The Sender module is responsible for async processing and forwarding the data to the external services in the batch mode. After sending success, Sender would also acknowledge the offset of Queue in Gatherer.   Pipe -------------------------------------------------------------------- | ---------- ----------- -------- | | | Gatherer | =\u0026gt; | Processor | =\u0026gt; | Sender | | | ---------- ----------- -------- | -------------------------------------------------------------------- LifeCycle\n Prepare: Prepare phase is to do some preparation works, such as register the client status listener to the client in ReceiverGatherer. Boot: Boot phase is to start the current module until receives a close signal. ShutDown: ShutDown phase is to close the used resources.  Plugins Plugin is the minimal components in the module. Satellite has 2 plugin catalogs, which are sharing plugins and normal plugins.\n a sharing plugin instance could be sharing with multiple modules in the different pipes. a normal plugin instance is only be used in a fixed module of the fixed pipes.  Sharing plugin Nowadays, there are 2 kinds of sharing plugins in Satellite, which are server plugins and client plugins. The reason why they are sharing plugins is to reduce the resource cost in connection. Server plugins are sharing with the ReceiverGatherer modules in the different pipes to receive the external requests. And the client plugins is sharing with the Sender modules in the different pipes to connect with external services, such as Kafka and OAP.\n Sharing Server Sharing Client -------------------------------------------------------------------- | ------------------ ----------- -------- | | | ReceiverGatherer | =\u0026gt; | Processor | =\u0026gt; | Sender | | | ------------------ ----------- -------- | -------------------------------------------------------------------- -------------------------------------------------------------------- | ------------------ ----------- -------- | | | ReceiverGatherer | =\u0026gt; | Processor | =\u0026gt; | Sender | | | ------------------ ----------- -------- | -------------------------------------------------------------------- -------------------------------------------------------------------- | ------------------ ----------- -------- | | | ReceiverGatherer | =\u0026gt; | Processor | =\u0026gt; | Sender | | | ------------------ ----------- -------- | -------------------------------------------------------------------- Normal plugin There are 7 kinds of normal plugins in Satellite, which are Receiver, Fetcher, Queue, Parser, Filter, Forwarder, and Fallbacker.\n Receiver: receives the input APM data from the request. Fetcher: fetch the APM data by fetching. Queue: store the APM data to ensure the data stability. Parser: supports some ways to parse data, such parse a csv file. Filter: processes the APM data. Forwarder: forwards the APM data to the external receiver, such as Kafka and OAP. Fallbacker: supports some fallback strategies, such as timer retry strategy.   Gatherer Processor ------------------------------- ------------------------------------------- | ----------- --------- | | ----------- ----------- | | | Receiver | ==\u0026gt; | Queue | |==\u0026gt;| | Filter | ==\u0026gt; ... ==\u0026gt; | Filter | | | | /Fetcher | | Mem/File | | | ----------- ----------- | | ----------- ---------- | | || || | -------------------------------- | \\/\t\\/ | | --------------------------------------- | | | OutputEventContext | | | --------------------------------------- | ------------------------------------------- || \\/ Sender ------------------------------------------ | --- --- | | | B | | D | ----------------- | | | A | | I | |Segment Forwarder| | | | T | | S | | (Fallbacker) | | | | C | | P | ----------------- | | | H | =\u0026gt; | A | | ===\u0026gt; Kafka/OAP | | B | | T | =\u0026gt; ...... | | | U | | C | | | | F | | H | ----------------- | | | F | | E | | Meter Forwarder| | | | E | | R | | (Fallbacker | | | | R | | | ----------------- | | --- --- | ------------------------------------------ 1. The Fetcher/Receiver plugin would fetch or receive the input data. 2. The Parser plugin would parse the input data to SerializableEvent that is supported to be stored in Queue. 3. The Queue plugin stores the SerializableEvent. However, whether serializing depends on the Queue implements. For example, the serialization is unnecessary when using a Memory Queue. Once an event is pulled by the consumer of Queue, the event will be processed by the filters in Processor. 4. The Filter plugin would process the event to create a new event. Next, the event is passed to the next filter to do the same things until the whole filters are performed. All created events would be stored in the OutputEventContext. However, only the events labeled with RemoteEvent type would be forwarded by Forwarder. 5. After processing, the events in OutputEventContext would be stored in the BatchBuffer. When the timer is triggered or the capacity limit is reached, the events in BatchBuffer would be partitioned by EventType and sent to the different Forwarders, such as Segment Forwarder and Meter Forwarder. 6. The Follower in different Senders would share with the remote client to avoid make duplicate connections and have the same Fallbacker(FallBack strategy) to process data. When all forwarders send success or process success in Fallbacker, the dispatcher would also ack the batch is a success. ============================================================================================ ","title":"Module Design","url":"/docs/skywalking-satellite/latest/en/concepts-and-designs/module_design/"},{"content":"Module Design Pipe The pipe is an isolation concept in Satellite. Each pipe has one pipeline to process the telemetry data(metrics/traces/logs). Two pipes are not sharing data.\n Satellite --------------------------------------------------------------------- | ------------------------------------------- | | | Pipe | | | ------------------------------------------- | | ------------------------------------------- | | | Pipe | | | ------------------------------------------- | | ------------------------------------------- | | | Pipe | | | ------------------------------------------- | --------------------------------------------------------------------- Modules Module is the core workers in Satellite. Module is constituted by the specific extension plugins. There are 3 modules in one namespace, which are Gatherer, Processor, and Sender.\n The Gatherer module is responsible for fetching or receiving data and pushing the data to Queue. So there are 2 kinds of Gatherer, which are ReceiverGatherer and FetcherGatherer. The Processor module is responsible for reading data from the queue and processing data by a series of filter chains. The Sender module is responsible for async processing and forwarding the data to the external services in the batch mode. After sending success, Sender would also acknowledge the offset of Queue in Gatherer.   Pipe -------------------------------------------------------------------- | ---------- ----------- -------- | | | Gatherer | =\u0026gt; | Processor | =\u0026gt; | Sender | | | ---------- ----------- -------- | -------------------------------------------------------------------- LifeCycle\n Prepare: Prepare phase is to do some preparation works, such as register the client status listener to the client in ReceiverGatherer. Boot: Boot phase is to start the current module until receives a close signal. ShutDown: ShutDown phase is to close the used resources.  Plugins Plugin is the minimal components in the module. Satellite has 2 plugin catalogs, which are sharing plugins and normal plugins.\n a sharing plugin instance could be sharing with multiple modules in the different pipes. a normal plugin instance is only be used in a fixed module of the fixed pipes.  Sharing plugin Nowadays, there are 2 kinds of sharing plugins in Satellite, which are server plugins and client plugins. The reason why they are sharing plugins is to reduce the resource cost in connection. Server plugins are sharing with the ReceiverGatherer modules in the different pipes to receive the external requests. And the client plugins is sharing with the Sender modules in the different pipes to connect with external services, such as Kafka and OAP.\n Sharing Server Sharing Client -------------------------------------------------------------------- | ------------------ ----------- -------- | | | ReceiverGatherer | =\u0026gt; | Processor | =\u0026gt; | Sender | | | ------------------ ----------- -------- | -------------------------------------------------------------------- -------------------------------------------------------------------- | ------------------ ----------- -------- | | | ReceiverGatherer | =\u0026gt; | Processor | =\u0026gt; | Sender | | | ------------------ ----------- -------- | -------------------------------------------------------------------- -------------------------------------------------------------------- | ------------------ ----------- -------- | | | ReceiverGatherer | =\u0026gt; | Processor | =\u0026gt; | Sender | | | ------------------ ----------- -------- | -------------------------------------------------------------------- Normal plugin There are 7 kinds of normal plugins in Satellite, which are Receiver, Fetcher, Queue, Parser, Filter, Forwarder, and Fallbacker.\n Receiver: receives the input APM data from the request. Fetcher: fetch the APM data by fetching. Queue: store the APM data to ensure the data stability. Parser: supports some ways to parse data, such parse a csv file. Filter: processes the APM data. Forwarder: forwards the APM data to the external receiver, such as Kafka and OAP. Fallbacker: supports some fallback strategies, such as timer retry strategy.   Gatherer Processor ------------------------------- ------------------------------------------- | ----------- --------- | | ----------- ----------- | | | Receiver | ==\u0026gt; | Queue | |==\u0026gt;| | Filter | ==\u0026gt; ... ==\u0026gt; | Filter | | | | /Fetcher | | Mem/File | | | ----------- ----------- | | ----------- ---------- | | || || | -------------------------------- | \\/\t\\/ | | --------------------------------------- | | | OutputEventContext | | | --------------------------------------- | ------------------------------------------- || \\/ Sender ------------------------------------------ | --- --- | | | B | | D | ----------------- | | | A | | I | |Segment Forwarder| | | | T | | S | | (Fallbacker) | | | | C | | P | ----------------- | | | H | =\u0026gt; | A | | ===\u0026gt; Kafka/OAP | | B | | T | =\u0026gt; ...... | | | U | | C | | | | F | | H | ----------------- | | | F | | E | | Meter Forwarder| | | | E | | R | | (Fallbacker | | | | R | | | ----------------- | | --- --- | ------------------------------------------ 1. The Fetcher/Receiver plugin would fetch or receive the input data. 2. The Parser plugin would parse the input data to SerializableEvent that is supported to be stored in Queue. 3. The Queue plugin stores the SerializableEvent. However, whether serializing depends on the Queue implements. For example, the serialization is unnecessary when using a Memory Queue. Once an event is pulled by the consumer of Queue, the event will be processed by the filters in Processor. 4. The Filter plugin would process the event to create a new event. Next, the event is passed to the next filter to do the same things until the whole filters are performed. All created events would be stored in the OutputEventContext. However, only the events labeled with RemoteEvent type would be forwarded by Forwarder. 5. After processing, the events in OutputEventContext would be stored in the BatchBuffer. When the timer is triggered or the capacity limit is reached, the events in BatchBuffer would be partitioned by EventType and sent to the different Forwarders, such as Segment Forwarder and Meter Forwarder. 6. The Follower in different Senders would share with the remote client to avoid make duplicate connections and have the same Fallbacker(FallBack strategy) to process data. When all forwarders send success or process success in Fallbacker, the dispatcher would also ack the batch is a success. ============================================================================================ ","title":"Module Design","url":"/docs/skywalking-satellite/next/en/concepts-and-designs/module_design/"},{"content":"Module Design Pipe The pipe is an isolation concept in Satellite. Each pipe has one pipeline to process the telemetry data(metrics/traces/logs). Two pipes are not sharing data.\n Satellite --------------------------------------------------------------------- | ------------------------------------------- | | | Pipe | | | ------------------------------------------- | | ------------------------------------------- | | | Pipe | | | ------------------------------------------- | | ------------------------------------------- | | | Pipe | | | ------------------------------------------- | --------------------------------------------------------------------- Modules Module is the core workers in Satellite. Module is constituted by the specific extension plugins. There are 3 modules in one namespace, which are Gatherer, Processor, and Sender.\n The Gatherer module is responsible for fetching or receiving data and pushing the data to Queue. So there are 2 kinds of Gatherer, which are ReceiverGatherer and FetcherGatherer. The Processor module is responsible for reading data from the queue and processing data by a series of filter chains. The Sender module is responsible for async processing and forwarding the data to the external services in the batch mode. After sending success, Sender would also acknowledge the offset of Queue in Gatherer.   Pipe -------------------------------------------------------------------- | ---------- ----------- -------- | | | Gatherer | =\u0026gt; | Processor | =\u0026gt; | Sender | | | ---------- ----------- -------- | -------------------------------------------------------------------- LifeCycle\n Prepare: Prepare phase is to do some preparation works, such as register the client status listener to the client in ReceiverGatherer. Boot: Boot phase is to start the current module until receives a close signal. ShutDown: ShutDown phase is to close the used resources.  Plugins Plugin is the minimal components in the module. Satellite has 2 plugin catalogs, which are sharing plugins and normal plugins.\n a sharing plugin instance could be sharing with multiple modules in the different pipes. a normal plugin instance is only be used in a fixed module of the fixed pipes.  Sharing plugin Nowadays, there are 2 kinds of sharing plugins in Satellite, which are server plugins and client plugins. The reason why they are sharing plugins is to reduce the resource cost in connection. Server plugins are sharing with the ReceiverGatherer modules in the different pipes to receive the external requests. And the client plugins is sharing with the Sender modules in the different pipes to connect with external services, such as Kafka and OAP.\n Sharing Server Sharing Client -------------------------------------------------------------------- | ------------------ ----------- -------- | | | ReceiverGatherer | =\u0026gt; | Processor | =\u0026gt; | Sender | | | ------------------ ----------- -------- | -------------------------------------------------------------------- -------------------------------------------------------------------- | ------------------ ----------- -------- | | | ReceiverGatherer | =\u0026gt; | Processor | =\u0026gt; | Sender | | | ------------------ ----------- -------- | -------------------------------------------------------------------- -------------------------------------------------------------------- | ------------------ ----------- -------- | | | ReceiverGatherer | =\u0026gt; | Processor | =\u0026gt; | Sender | | | ------------------ ----------- -------- | -------------------------------------------------------------------- Normal plugin There are 7 kinds of normal plugins in Satellite, which are Receiver, Fetcher, Queue, Parser, Filter, Forwarder, and Fallbacker.\n Receiver: receives the input APM data from the request. Fetcher: fetch the APM data by fetching. Queue: store the APM data to ensure the data stability. Parser: supports some ways to parse data, such parse a csv file. Filter: processes the APM data. Forwarder: forwards the APM data to the external receiver, such as Kafka and OAP. Fallbacker: supports some fallback strategies, such as timer retry strategy.   Gatherer Processor ------------------------------- ------------------------------------------- | ----------- --------- | | ----------- ----------- | | | Receiver | ==\u0026gt; | Queue | |==\u0026gt;| | Filter | ==\u0026gt; ... ==\u0026gt; | Filter | | | | /Fetcher | | Mem/File | | | ----------- ----------- | | ----------- ---------- | | || || | -------------------------------- | \\/\t\\/ | | --------------------------------------- | | | OutputEventContext | | | --------------------------------------- | ------------------------------------------- || \\/ Sender ------------------------------------------ | --- --- | | | B | | D | ----------------- | | | A | | I | |Segment Forwarder| | | | T | | S | | (Fallbacker) | | | | C | | P | ----------------- | | | H | =\u0026gt; | A | | ===\u0026gt; Kafka/OAP | | B | | T | =\u0026gt; ...... | | | U | | C | | | | F | | H | ----------------- | | | F | | E | | Meter Forwarder| | | | E | | R | | (Fallbacker | | | | R | | | ----------------- | | --- --- | ------------------------------------------ 1. The Fetcher/Receiver plugin would fetch or receive the input data. 2. The Parser plugin would parse the input data to SerializableEvent that is supported to be stored in Queue. 3. The Queue plugin stores the SerializableEvent. However, whether serializing depends on the Queue implements. For example, the serialization is unnecessary when using a Memory Queue. Once an event is pulled by the consumer of Queue, the event will be processed by the filters in Processor. 4. The Filter plugin would process the event to create a new event. Next, the event is passed to the next filter to do the same things until the whole filters are performed. All created events would be stored in the OutputEventContext. However, only the events labeled with RemoteEvent type would be forwarded by Forwarder. 5. After processing, the events in OutputEventContext would be stored in the BatchBuffer. When the timer is triggered or the capacity limit is reached, the events in BatchBuffer would be partitioned by EventType and sent to the different Forwarders, such as Segment Forwarder and Meter Forwarder. 6. The Follower in different Senders would share with the remote client to avoid make duplicate connections and have the same Fallbacker(FallBack strategy) to process data. When all forwarders send success or process success in Fallbacker, the dispatcher would also ack the batch is a success. ============================================================================================ ","title":"Module Design","url":"/docs/skywalking-satellite/v1.2.0/en/concepts-and-designs/module_design/"},{"content":"MongoDB monitoring SkyWalking leverages mongodb-exporter for collecting metrics data from MongoDB. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  The mongodb-exporter collects metrics data from MongoDB. The exporter works side by side with the MongoDB node. OpenTelemetry Collector fetches metrics from mongodb-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup mongodb-exporter. Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  MongoDB Monitoring MongoDB monitoring provides multidimensional metrics monitoring of MongoDB clusters as Layer: MONGODB Service in the OAP. In each cluster, the nodes are represented as Instance.\nMongoDB Cluster Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Uptime (day) day meter_mongodb_cluster_uptime Maximum uptime of nodes in the cluster mongodb-exporter   Data Size (GB) GB meter_mongodb_cluster_data_size Total data size of the cluster mongodb-exporter   Collection Count  meter_mongodb_cluster_collection_count Number of collection of the cluster mongodb-exporter   Object Count  meter_mongodb_cluster_object_count Number of object of the cluster mongodb-exporter   Document Avg QPS  meter_mongodb_cluster_document_avg_qps Avg document operations rate of nodes mongodb-exporter   Operation Avg QPS  meter_mongodb_cluster_operation_avg_qps Avg operations rate of nodes mongodb-exporter   Total Connections  meter_mongodb_cluster_connections Cluster total connections of nodes mongodb-exporter   Cursor Avg  meter_mongodb_cluster_cursor_avg Avg Opened cursor of nodes mongodb-exporter   Replication Lag (ms) ms meter_mongodb_cluster_repl_lag Repl set member avg replication lag, this metric works in repl mode mongodb-exporter   DB Avg Data Size Per Shard (GB) GB meter_mongodb_cluster_db_data_size Avg data size per shard (replSet) of every database mongodb-exporter   DB Avg Index Size Per Shard (GB) GB meter_mongodb_cluster_db_index_size Avg index size per shard (replSet) of every database mongodb-exporter   DB Avg Collection Count Per Shard  meter_mongodb_cluster_db_collection_count Avg collection count per shard (replSet) of every database mongodb-exporter   DB Avg Index Count Per Shard  meter_mongodb_cluster_db_index_count Avg index count per shard (replSet) of every database mongodb-exporter    MongoDB Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Uptime (day) day meter_mongodb_node_uptime Uptime of the node mongodb-exporter   QPS  meter_mongodb_node_qps Operations per second of the node mongodb-exporter   Latency µs meter_mongodb_node_latency Latency of operations mongodb-exporter   Memory Usage % meter_mongodb_node_memory_usage Memory usage percent of RAM mongodb-exporter   Version  meter_mongodb_node_version MongoDB edition and version mongodb-exporter   ReplSet State  meter_mongodb_node_rs_state Repl set state of the node, this metric works in repl mode mongodb-exporter   CPU Usage (%) % meter_mongodb_node_cpu_total_percentage Cpu usage percent of the node mongodb-exporter   Network (KB/s) KB/s meter_mongodb_node_network_bytes_inmeter_mongodb_node_network_bytes_out Inbound and outbound network bytes of node mongodb-exporter   Memory Free (GB) GB meter_mongodb_node_memory_free_kbmeter_mongodb_node_swap_memory_free_kb Free memory of RAM and swap mongodb-exporter   Disk (GB) GB meter_mongodb_node_fs_used_sizemeter_mongodb_node_fs_total_size Used and total size of disk mongodb-exporter   Connections  meter_mongodb_node_connections Connection nums of node mongodb-exporter   Active Client  meter_mongodb_node_active_total_nummeter_mongodb_node_active_reader_nummeter_mongodb_node_active_writer_num Count of active reader and writer mongodb-exporter   Transactions  meter_mongodb_node_transactions_activemeter_mongodb_node_transactions_inactive Count of transactions running on the node mongodb-exporter   Document QPS  meter_mongodb_node_document_qps Document operations per second mongodb-exporter   Operation QPS  meter_mongodb_node_operation_qps Operations per second mongodb-exporter   Repl Operation QPS  meter_mongodb_node_repl_operation_qps Repl operations per second mongodb-exporter   Operation Latency (µs) µs meter_mongodb_node_operation_latency Latencies for different operation type mongodb-exporter   Cursor  meter_mongodb_node_cursor Opened cursor of the node mongodb-exporter   Server Status Memory (MB) MB meter_mongodb_node_mem_virtualmeter_mongodb_node_mem_resident Virtual and resident memory of the node mongodb-exporter   Asserts  meter_mongodb_node_asserts The rate of raised assertions mongodb-exporter   Repl Buffer Count  meter_mongodb_node_repl_buffer_count The current number of operations in the oplog buffer mongodb-exporter   Repl Buffer Size (MB) MB meter_mongodb_node_repl_buffer_sizemeter_mongodb_node_repl_buffer_size_max The maximum size of the oplog buffer mongodb-exporter   Queued Operation  meter_mongodb_node_queued_operation The number of operations queued because of a lock mongodb-exporter   getLastError Write Num  meter_mongodb_node_write_wait_nummeter_mongodb_node_write_wait_timeout_num The number of write concern operation mongodb-exporter   getLastError Write Time (ms) ms meter_mongodb_node_write_wait_time The wait time of write concern operation mongodb-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/mongodb/mongodb-cluster.yaml, /config/otel-rules/mongodb/mongodb-node.yaml. The MongoDB dashboard panel configurations are found in /config/ui-initialized-templates/mongodb.\n","title":"MongoDB monitoring","url":"/docs/main/latest/en/setup/backend/backend-mongodb-monitoring/"},{"content":"MongoDB monitoring SkyWalking leverages mongodb-exporter for collecting metrics data from MongoDB. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  The mongodb-exporter collects metrics data from MongoDB. The exporter works side by side with the MongoDB node. OpenTelemetry Collector fetches metrics from mongodb-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup mongodb-exporter. Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  MongoDB Monitoring MongoDB monitoring provides multidimensional metrics monitoring of MongoDB clusters as Layer: MONGODB Service in the OAP. In each cluster, the nodes are represented as Instance.\nMongoDB Cluster Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Uptime (day) day meter_mongodb_cluster_uptime Maximum uptime of nodes in the cluster mongodb-exporter   Data Size (GB) GB meter_mongodb_cluster_data_size Total data size of the cluster mongodb-exporter   Collection Count  meter_mongodb_cluster_collection_count Number of collection of the cluster mongodb-exporter   Object Count  meter_mongodb_cluster_object_count Number of object of the cluster mongodb-exporter   Document Total QPS  meter_mongodb_cluster_document_avg_qps Total document operations rate of nodes mongodb-exporter   Operation Total QPS  meter_mongodb_cluster_operation_avg_qps Total operations rate of nodes mongodb-exporter   Total Connections  meter_mongodb_cluster_connections Cluster total connections of nodes mongodb-exporter   Cursor Total  meter_mongodb_cluster_cursor_avg Total Opened cursor of nodes mongodb-exporter   Replication Lag (ms) ms meter_mongodb_cluster_repl_lag Repl set member avg replication lag, this metric works in repl mode mongodb-exporter   DB Total Data Size (GB) GB meter_mongodb_cluster_db_data_size Total data size of every database mongodb-exporter   DB Total Index Size (GB) GB meter_mongodb_cluster_db_index_size Total index size per of every database mongodb-exporter   DB Total Collection Count  meter_mongodb_cluster_db_collection_count Total collection count of every database mongodb-exporter   DB Total Index Count  meter_mongodb_cluster_db_index_count Total index count of every database mongodb-exporter    MongoDB Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Uptime (day) day meter_mongodb_node_uptime Uptime of the node mongodb-exporter   QPS  meter_mongodb_node_qps Operations per second of the node mongodb-exporter   Latency µs meter_mongodb_node_op_ratemeter_mongodb_node_latency_rate Latency of operations mongodb-exporter   Memory Usage % meter_mongodb_node_memory_usage Memory usage percent of RAM mongodb-exporter   Version  meter_mongodb_node_version MongoDB edition and version mongodb-exporter   ReplSet State  meter_mongodb_node_rs_state Repl set state of the node, this metric works in repl mode mongodb-exporter   CPU Usage (%) % meter_mongodb_node_cpu_total_percentage Cpu usage percent of the node mongodb-exporter   Network (KB/s) KB/s meter_mongodb_node_network_bytes_inmeter_mongodb_node_network_bytes_out Inbound and outbound network bytes of node mongodb-exporter   Memory Free (GB) GB meter_mongodb_node_memory_free_kbmeter_mongodb_node_swap_memory_free_kb Free memory of RAM and swap mongodb-exporter   Disk (GB) GB meter_mongodb_node_fs_used_sizemeter_mongodb_node_fs_total_size Used and total size of disk mongodb-exporter   Connections  meter_mongodb_node_connections Connection nums of node mongodb-exporter   Active Client  meter_mongodb_node_active_total_nummeter_mongodb_node_active_reader_nummeter_mongodb_node_active_writer_num Count of active reader and writer mongodb-exporter   Transactions  meter_mongodb_node_transactions_activemeter_mongodb_node_transactions_inactive Count of transactions running on the node mongodb-exporter   Document QPS  meter_mongodb_node_document_qps Document operations per second mongodb-exporter   Operation QPS  meter_mongodb_node_operation_qps Operations per second mongodb-exporter   Repl Operation QPS  meter_mongodb_node_repl_operation_qps Repl operations per second mongodb-exporter   Operation Latency (µs) µs meter_mongodb_node_op_ratemeter_mongodb_node_latency_rate Latencies for different operation type mongodb-exporter   Cursor  meter_mongodb_node_cursor Opened cursor of the node mongodb-exporter   Server Status Memory (MB) MB meter_mongodb_node_mem_virtualmeter_mongodb_node_mem_resident Virtual and resident memory of the node mongodb-exporter   Asserts  meter_mongodb_node_asserts The rate of raised assertions mongodb-exporter   Repl Buffer Count  meter_mongodb_node_repl_buffer_count The current number of operations in the oplog buffer mongodb-exporter   Repl Buffer Size (MB) MB meter_mongodb_node_repl_buffer_sizemeter_mongodb_node_repl_buffer_size_max The maximum size of the oplog buffer mongodb-exporter   Queued Operation  meter_mongodb_node_queued_operation The number of operations queued because of a lock mongodb-exporter   getLastError Write Num  meter_mongodb_node_write_wait_nummeter_mongodb_node_write_wait_timeout_num The number of write concern operation mongodb-exporter   getLastError Write Time (ms) ms meter_mongodb_node_write_wait_time The wait time of write concern operation mongodb-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/mongodb/mongodb-cluster.yaml, /config/otel-rules/mongodb/mongodb-node.yaml. The MongoDB dashboard panel configurations are found in /config/ui-initialized-templates/mongodb.\n","title":"MongoDB monitoring","url":"/docs/main/next/en/setup/backend/backend-mongodb-monitoring/"},{"content":"MongoDB monitoring SkyWalking leverages mongodb-exporter for collecting metrics data from MongoDB. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  The mongodb-exporter collects metrics data from MongoDB. The exporter works side by side with the MongoDB node. OpenTelemetry Collector fetches metrics from mongodb-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup mongodb-exporter. Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  MongoDB Monitoring MongoDB monitoring provides multidimensional metrics monitoring of MongoDB clusters as Layer: MONGODB Service in the OAP. In each cluster, the nodes are represented as Instance.\nMongoDB Cluster Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Uptime (day) day meter_mongodb_cluster_uptime Maximum uptime of nodes in the cluster mongodb-exporter   Data Size (GB) GB meter_mongodb_cluster_data_size Total data size of the cluster mongodb-exporter   Collection Count  meter_mongodb_cluster_collection_count Number of collection of the cluster mongodb-exporter   Object Count  meter_mongodb_cluster_object_count Number of object of the cluster mongodb-exporter   Document Avg QPS  meter_mongodb_cluster_document_avg_qps Avg document operations rate of nodes mongodb-exporter   Operation Avg QPS  meter_mongodb_cluster_operation_avg_qps Avg operations rate of nodes mongodb-exporter   Total Connections  meter_mongodb_cluster_connections Cluster total connections of nodes mongodb-exporter   Cursor Avg  meter_mongodb_cluster_cursor_avg Avg Opened cursor of nodes mongodb-exporter   Replication Lag (ms) ms meter_mongodb_cluster_repl_lag Repl set member avg replication lag, this metric works in repl mode mongodb-exporter   DB Avg Data Size Per Shard (GB) GB meter_mongodb_cluster_db_data_size Avg data size per shard (replSet) of every database mongodb-exporter   DB Avg Index Size Per Shard (GB) GB meter_mongodb_cluster_db_index_size Avg index size per shard (replSet) of every database mongodb-exporter   DB Avg Collection Count Per Shard  meter_mongodb_cluster_db_collection_count Avg collection count per shard (replSet) of every database mongodb-exporter   DB Avg Index Count Per Shard  meter_mongodb_cluster_db_index_count Avg index count per shard (replSet) of every database mongodb-exporter    MongoDB Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Uptime (day) day meter_mongodb_node_uptime Uptime of the node mongodb-exporter   QPS  meter_mongodb_node_qps Operations per second of the node mongodb-exporter   Latency µs meter_mongodb_node_latency Latency of operations mongodb-exporter   Memory Usage % meter_mongodb_node_memory_usage Memory usage percent of RAM mongodb-exporter   Version  meter_mongodb_node_version MongoDB edition and version mongodb-exporter   ReplSet State  meter_mongodb_node_rs_state Repl set state of the node, this metric works in repl mode mongodb-exporter   CPU Usage (%) % meter_mongodb_node_cpu_total_percentage Cpu usage percent of the node mongodb-exporter   Network (KB/s) KB/s meter_mongodb_node_network_bytes_inmeter_mongodb_node_network_bytes_out Inbound and outbound network bytes of node mongodb-exporter   Memory Free (GB) GB meter_mongodb_node_memory_free_kbmeter_mongodb_node_swap_memory_free_kb Free memory of RAM and swap mongodb-exporter   Disk (GB) GB meter_mongodb_node_fs_used_sizemeter_mongodb_node_fs_total_size Used and total size of disk mongodb-exporter   Connections  meter_mongodb_node_connections Connection nums of node mongodb-exporter   Active Client  meter_mongodb_node_active_total_nummeter_mongodb_node_active_reader_nummeter_mongodb_node_active_writer_num Count of active reader and writer mongodb-exporter   Transactions  meter_mongodb_node_transactions_activemeter_mongodb_node_transactions_inactive Count of transactions running on the node mongodb-exporter   Document QPS  meter_mongodb_node_document_qps Document operations per second mongodb-exporter   Operation QPS  meter_mongodb_node_operation_qps Operations per second mongodb-exporter   Repl Operation QPS  meter_mongodb_node_repl_operation_qps Repl operations per second mongodb-exporter   Operation Latency (µs) µs meter_mongodb_node_operation_latency Latencies for different operation type mongodb-exporter   Cursor  meter_mongodb_node_cursor Opened cursor of the node mongodb-exporter   Server Status Memory (MB) MB meter_mongodb_node_mem_virtualmeter_mongodb_node_mem_resident Virtual and resident memory of the node mongodb-exporter   Asserts  meter_mongodb_node_asserts The rate of raised assertions mongodb-exporter   Repl Buffer Count  meter_mongodb_node_repl_buffer_count The current number of operations in the oplog buffer mongodb-exporter   Repl Buffer Size (MB) MB meter_mongodb_node_repl_buffer_sizemeter_mongodb_node_repl_buffer_size_max The maximum size of the oplog buffer mongodb-exporter   Queued Operation  meter_mongodb_node_queued_operation The number of operations queued because of a lock mongodb-exporter   getLastError Write Num  meter_mongodb_node_write_wait_nummeter_mongodb_node_write_wait_timeout_num The number of write concern operation mongodb-exporter   getLastError Write Time (ms) ms meter_mongodb_node_write_wait_time The wait time of write concern operation mongodb-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/mongodb/mongodb-cluster.yaml, /config/otel-rules/mongodb/mongodb-node.yaml. The MongoDB dashboard panel configurations are found in /config/ui-initialized-templates/mongodb.\n","title":"MongoDB monitoring","url":"/docs/main/v9.6.0/en/setup/backend/backend-mongodb-monitoring/"},{"content":"MongoDB monitoring SkyWalking leverages mongodb-exporter for collecting metrics data from MongoDB. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  The mongodb-exporter collects metrics data from MongoDB. The exporter works side by side with the MongoDB node. OpenTelemetry Collector fetches metrics from mongodb-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup mongodb-exporter. Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  MongoDB Monitoring MongoDB monitoring provides multidimensional metrics monitoring of MongoDB clusters as Layer: MONGODB Service in the OAP. In each cluster, the nodes are represented as Instance.\nMongoDB Cluster Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Uptime (day) day meter_mongodb_cluster_uptime Maximum uptime of nodes in the cluster mongodb-exporter   Data Size (GB) GB meter_mongodb_cluster_data_size Total data size of the cluster mongodb-exporter   Collection Count  meter_mongodb_cluster_collection_count Number of collection of the cluster mongodb-exporter   Object Count  meter_mongodb_cluster_object_count Number of object of the cluster mongodb-exporter   Document Avg QPS  meter_mongodb_cluster_document_avg_qps Avg document operations rate of nodes mongodb-exporter   Operation Avg QPS  meter_mongodb_cluster_operation_avg_qps Avg operations rate of nodes mongodb-exporter   Total Connections  meter_mongodb_cluster_connections Cluster total connections of nodes mongodb-exporter   Cursor Avg  meter_mongodb_cluster_cursor_avg Avg Opened cursor of nodes mongodb-exporter   Replication Lag (ms) ms meter_mongodb_cluster_repl_lag Repl set member avg replication lag, this metric works in repl mode mongodb-exporter   DB Avg Data Size Per Shard (GB) GB meter_mongodb_cluster_db_data_size Avg data size per shard (replSet) of every database mongodb-exporter   DB Avg Index Size Per Shard (GB) GB meter_mongodb_cluster_db_index_size Avg index size per shard (replSet) of every database mongodb-exporter   DB Avg Collection Count Per Shard  meter_mongodb_cluster_db_collection_count Avg collection count per shard (replSet) of every database mongodb-exporter   DB Avg Index Count Per Shard  meter_mongodb_cluster_db_index_count Avg index count per shard (replSet) of every database mongodb-exporter    MongoDB Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Uptime (day) day meter_mongodb_node_uptime Uptime of the node mongodb-exporter   QPS  meter_mongodb_node_qps Operations per second of the node mongodb-exporter   Latency µs meter_mongodb_node_latency Latency of operations mongodb-exporter   Memory Usage % meter_mongodb_node_memory_usage Memory usage percent of RAM mongodb-exporter   Version  meter_mongodb_node_version MongoDB edition and version mongodb-exporter   ReplSet State  meter_mongodb_node_rs_state Repl set state of the node, this metric works in repl mode mongodb-exporter   CPU Usage (%) % meter_mongodb_node_cpu_total_percentage Cpu usage percent of the node mongodb-exporter   Network (KB/s) KB/s meter_mongodb_node_network_bytes_inmeter_mongodb_node_network_bytes_out Inbound and outbound network bytes of node mongodb-exporter   Memory Free (GB) GB meter_mongodb_node_memory_free_kbmeter_mongodb_node_swap_memory_free_kb Free memory of RAM and swap mongodb-exporter   Disk (GB) GB meter_mongodb_node_fs_used_sizemeter_mongodb_node_fs_total_size Used and total size of disk mongodb-exporter   Connections  meter_mongodb_node_connections Connection nums of node mongodb-exporter   Active Client  meter_mongodb_node_active_total_nummeter_mongodb_node_active_reader_nummeter_mongodb_node_active_writer_num Count of active reader and writer mongodb-exporter   Transactions  meter_mongodb_node_transactions_activemeter_mongodb_node_transactions_inactive Count of transactions running on the node mongodb-exporter   Document QPS  meter_mongodb_node_document_qps Document operations per second mongodb-exporter   Operation QPS  meter_mongodb_node_operation_qps Operations per second mongodb-exporter   Repl Operation QPS  meter_mongodb_node_repl_operation_qps Repl operations per second mongodb-exporter   Operation Latency (µs) µs meter_mongodb_node_operation_latency Latencies for different operation type mongodb-exporter   Cursor  meter_mongodb_node_cursor Opened cursor of the node mongodb-exporter   Server Status Memory (MB) MB meter_mongodb_node_mem_virtualmeter_mongodb_node_mem_resident Virtual and resident memory of the node mongodb-exporter   Asserts  meter_mongodb_node_asserts The rate of raised assertions mongodb-exporter   Repl Buffer Count  meter_mongodb_node_repl_buffer_count The current number of operations in the oplog buffer mongodb-exporter   Repl Buffer Size (MB) MB meter_mongodb_node_repl_buffer_sizemeter_mongodb_node_repl_buffer_size_max The maximum size of the oplog buffer mongodb-exporter   Queued Operation  meter_mongodb_node_queued_operation The number of operations queued because of a lock mongodb-exporter   getLastError Write Num  meter_mongodb_node_write_wait_nummeter_mongodb_node_write_wait_timeout_num The number of write concern operation mongodb-exporter   getLastError Write Time (ms) ms meter_mongodb_node_write_wait_time The wait time of write concern operation mongodb-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/mongodb/mongodb-cluster.yaml, /config/otel-rules/mongodb/mongodb-node.yaml. The MongoDB dashboard panel configurations are found in /config/ui-initialized-templates/mongodb.\n","title":"MongoDB monitoring","url":"/docs/main/v9.7.0/en/setup/backend/backend-mongodb-monitoring/"},{"content":"MySQL Activate MySQL as storage, and set storage provider to mysql.\nNOTE: MySQL driver is NOT allowed in Apache official distribution and source codes. Please download the MySQL driver on your own. Copy the connection driver jar to oap-libs.\nstorage:selector:${SW_STORAGE:mysql}mysql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:3306/swtest?rewriteBatchedStatements=true\u0026amp;allowMultiQueries=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root@1234}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password, are found in application.yml. Only part of the settings is listed here. See the HikariCP connection pool document for full settings. To understand the function of the parameter rewriteBatchedStatements=true in MySQL, see the MySQL official document for more details.\nIn theory, all other databases that are compatible with MySQL protocol should be able to use this storage plugin, such as TiDB. Please compose the JDBC URL according to the database\u0026rsquo;s documentation.\n","title":"MySQL","url":"/docs/main/latest/en/setup/backend/storages/mysql/"},{"content":"MySQL Activate MySQL as storage, and set storage provider to mysql.\nNOTE: MySQL driver is NOT allowed in Apache official distribution and source codes. Please download the MySQL driver on your own. Copy the connection driver jar to oap-libs.\nstorage:selector:${SW_STORAGE:mysql}mysql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:3306/swtest?rewriteBatchedStatements=true\u0026amp;allowMultiQueries=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root@1234}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password, are found in application.yml. Only part of the settings is listed here. See the HikariCP connection pool document for full settings. To understand the function of the parameter rewriteBatchedStatements=true in MySQL, see the MySQL official document for more details.\nIn theory, all other databases that are compatible with MySQL protocol should be able to use this storage plugin, such as TiDB. Please compose the JDBC URL according to the database\u0026rsquo;s documentation.\n","title":"MySQL","url":"/docs/main/next/en/setup/backend/storages/mysql/"},{"content":"MySQL Activate MySQL as storage, and set storage provider to mysql.\nNOTE: MySQL driver is NOT allowed in Apache official distribution and source codes. Please download the MySQL driver on your own. Copy the connection driver jar to oap-libs.\nstorage:selector:${SW_STORAGE:mysql}mysql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:mysql://localhost:3306/swtest?rewriteBatchedStatements=true\u0026amp;allowMultiQueries=true\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:root}dataSource.password:${SW_DATA_SOURCE_PASSWORD:root@1234}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password, are found in application.yml. Only part of the settings is listed here. See the HikariCP connection pool document for full settings. To understand the function of the parameter rewriteBatchedStatements=true in MySQL, see the MySQL official document for more details.\nIn theory, all other databases that are compatible with MySQL protocol should be able to use this storage plugin, such as TiDB. Please compose the JDBC URL according to the database\u0026rsquo;s documentation.\n","title":"MySQL","url":"/docs/main/v9.7.0/en/setup/backend/storages/mysql/"},{"content":"MySQL monitoring SkyWalking leverages prometheus/mysqld_exporter for collecting metrics data from MySQL. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  mysqld_exporter collect metrics data from MySQL. OpenTelemetry Collector fetches metrics from mysqld_exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via the OpenCensus gRPC Exporter or OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up mysqld_exporter. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  MySQL Monitoring MySQL monitoring provides monitoring of the status and resources of the MySQL server. MySQL server as a Service in OAP, and land on the Layer: MYSQL.\nMySQL Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     MySQL Uptime day meter_mysql_uptime The MySQL startup time mysqld_exporter   Max Connections  meter_mysql_max_connections The max number of connections. mysqld_exporter   Innodb Buffer Pool Size MB meter_mysql_innodb_buffer_pool_size The buffer pool size in Innodb engine mysqld_exporter   Thread Cache Size  meter_mysql_thread_cache_size The size of thread cache mysqld_exporter   Current QPS  meter_mysql_qps Queries Per Second mysqld_exporter   Current TPS  meter_mysql_tps Transactions Per Second mysqld_exporter   Commands Rate  meter_mysql_commands_insert_rate meter_mysql_commands_select_rate\nmeter_mysql_commands_delete_rate\nmeter_mysql_commands_update_rate The rate of total number of insert/select/delete/update executed by the current server mysqld_exporter   Threads  meter_mysql_threads_connected\nmeter_mysql_threads_created\nmeter_mysql_threads_cached\nmeter_mysql_threads_running The number of currently open connections(threads_connected)  The number of threads created(threads_created)  The number of threads in the thread cache(threads_cached)  The number of threads that are not sleeping(threads_running) mysqld_exporter   Connects  meter_mysql_connects_available\nmeter_mysql_connects_aborted The number of available connections(connects_available)The number of MySQL instance connection rejections(connects_aborted) mysqld_exporter   Connection Errors  meter_mysql_connection_errors_internal  meter_mysql_connection_errors_max_connections Errors due to exceeding the max_connections(connection_errors_max_connections) Error caused by internal system(connection_errors_internal) mysqld_exporter   Slow Queries Rate  meter_mysql_slow_queries_rate The rate of slow queries mysqld_exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/mysql.yaml. The MySQL dashboard panel configurations are found in /config/ui-initialized-templates/mysql.\n","title":"MySQL monitoring","url":"/docs/main/v9.2.0/en/setup/backend/backend-mysql-monitoring/"},{"content":"MySQL monitoring MySQL server performance from prometheus/mysqld_exporter SkyWalking leverages prometheus/mysqld_exporter for collecting metrics data. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  mysqld_exporter collect metrics data from MySQL. OpenTelemetry Collector fetches metrics from mysqld_exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via the OpenCensus gRPC Exporter or OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up mysqld_exporter. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  MySQL Monitoring MySQL monitoring provides monitoring of the status and resources of the MySQL server. MySQL cluster is cataloged as a Layer: MYSQL Service in OAP. Each MySQL server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     MySQL Uptime day meter_mysql_uptime The MySQL startup time mysqld_exporter   Max Connections  meter_mysql_max_connections The max number of connections. mysqld_exporter   Innodb Buffer Pool Size MB meter_mysql_innodb_buffer_pool_size The buffer pool size in Innodb engine mysqld_exporter   Thread Cache Size  meter_mysql_thread_cache_size The size of thread cache mysqld_exporter   Current QPS  meter_mysql_qps Queries Per Second mysqld_exporter   Current TPS  meter_mysql_tps Transactions Per Second mysqld_exporter   Commands Rate  meter_mysql_commands_insert_rate meter_mysql_commands_select_rate\nmeter_mysql_commands_delete_rate\nmeter_mysql_commands_update_rate The rate of total number of insert/select/delete/update executed by the current server mysqld_exporter   Threads  meter_mysql_threads_connected\nmeter_mysql_threads_created\nmeter_mysql_threads_cached\nmeter_mysql_threads_running The number of currently open connections(threads_connected)  The number of threads created(threads_created)  The number of threads in the thread cache(threads_cached)  The number of threads that are not sleeping(threads_running) mysqld_exporter   Connects  meter_mysql_connects_available\nmeter_mysql_connects_aborted The number of available connections(connects_available)The number of MySQL instance connection rejections(connects_aborted) mysqld_exporter   Connection Errors  meter_mysql_connection_errors_internal  meter_mysql_connection_errors_max_connections Errors due to exceeding the max_connections(connection_errors_max_connections) Error caused by internal system(connection_errors_internal) mysqld_exporter   Slow Queries Rate  meter_mysql_slow_queries_rate The rate of slow queries mysqld_exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/mysql.yaml. The MySQL dashboard panel configurations are found in /config/ui-initialized-templates/mysql.\nCollect sampled slow SQLs SkyWalking leverages fluentbit or other log agents for collecting slow SQL statements from MySQL.\nData flow  fluentbit agent collects slow sql logs from MySQL. fluentbit agent sends data to SkyWalking OAP Server using native meter APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Set up fluentbit. Config fluentbit Config MySQL to enable slow log.example.  Slow SQL Monitoring Slow SQL monitoring provides monitoring of the slow SQL statements of the MySQL server. MySQL server is cataloged as a Layer: MYSQL Service in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Slow Statements ms top_n_database_statement The latency and statement of MySQL slow SQLs fluentbit    Customizations You can customize your own metrics/expression/dashboard panel. The slowsql expression rules are found in /config/lal/mysql-slowsql.yaml The MySQL dashboard panel configurations are found in /config/ui-initialized-templates/mysql.\n","title":"MySQL monitoring","url":"/docs/main/v9.3.0/en/setup/backend/backend-mysql-monitoring/"},{"content":"MySQL/MariaDB monitoring MySQL/MariaDB server performance from prometheus/mysqld_exporter SkyWalking leverages prometheus/mysqld_exporter for collecting metrics data. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  mysqld_exporter collect metrics data from MySQL/MariaDB. OpenTelemetry Collector fetches metrics from mysqld_exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up mysqld_exporter. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  MySQL/MariaDB Monitoring MySQL/MariaDB monitoring provides monitoring of the status and resources of the MySQL/MariaDB server. MySQL/MariaDB cluster is cataloged as a Layer: MYSQL Service in OAP. Each MySQL/MariaDB server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     MySQL Uptime day meter_mysql_uptime The MySQL startup time mysqld_exporter   Max Connections  meter_mysql_max_connections The max number of connections. mysqld_exporter   Innodb Buffer Pool Size MB meter_mysql_innodb_buffer_pool_size The buffer pool size in Innodb engine mysqld_exporter   Thread Cache Size  meter_mysql_thread_cache_size The size of thread cache mysqld_exporter   Current QPS  meter_mysql_qps Queries Per Second mysqld_exporter   Current TPS  meter_mysql_tps Transactions Per Second mysqld_exporter   Commands Rate  meter_mysql_commands_insert_rate meter_mysql_commands_select_rate\nmeter_mysql_commands_delete_rate\nmeter_mysql_commands_update_rate The rate of total number of insert/select/delete/update executed by the current server mysqld_exporter   Threads  meter_mysql_threads_connected\nmeter_mysql_threads_created\nmeter_mysql_threads_cached\nmeter_mysql_threads_running The number of currently open connections(threads_connected)  The number of threads created(threads_created)  The number of threads in the thread cache(threads_cached)  The number of threads that are not sleeping(threads_running) mysqld_exporter   Connects  meter_mysql_connects_available\nmeter_mysql_connects_aborted The number of available connections(connects_available)The number of MySQL instance connection rejections(connects_aborted) mysqld_exporter   Connection Errors  meter_mysql_connection_errors_internal  meter_mysql_connection_errors_max_connections Errors due to exceeding the max_connections(connection_errors_max_connections) Error caused by internal system(connection_errors_internal) mysqld_exporter   Slow Queries Rate  meter_mysql_slow_queries_rate The rate of slow queries mysqld_exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/mysql. The MySQL dashboard panel configurations are found in /config/ui-initialized-templates/mysql.\nCollect sampled slow SQLs SkyWalking leverages fluentbit or other log agents for collecting slow SQL statements from MySQL/MariaDB.\nData flow  fluentbit agent collects slow sql logs from MySQL/MariaDB. fluentbit agent sends data to SkyWalking OAP Server using native meter APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Set up fluentbit. Config fluentbit from here for MySQL or here for MariaDB. Enable slow log from here for MySQL or here for MariaDB.  Slow SQL Monitoring Slow SQL monitoring provides monitoring of the slow SQL statements of the MySQL/MariaDB server. MySQL/MariaDB server is cataloged as a Layer: MYSQL Service in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Slow Statements ms top_n_database_statement The latency and statement of MySQL/MariaDB slow SQLs fluentbit    Customizations You can customize your own metrics/expression/dashboard panel. The slowsql expression rules are found in /config/lal/mysql-slowsql.yaml The MySQL/MariaDB dashboard panel configurations are found in /config/ui-initialized-templates/mysql.\n","title":"MySQL/MariaDB monitoring","url":"/docs/main/latest/en/setup/backend/backend-mysql-monitoring/"},{"content":"MySQL/MariaDB monitoring MySQL/MariaDB server performance from prometheus/mysqld_exporter SkyWalking leverages prometheus/mysqld_exporter for collecting metrics data. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  mysqld_exporter collect metrics data from MySQL/MariaDB. OpenTelemetry Collector fetches metrics from mysqld_exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up mysqld_exporter. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  MySQL/MariaDB Monitoring MySQL/MariaDB monitoring provides monitoring of the status and resources of the MySQL/MariaDB server. MySQL/MariaDB cluster is cataloged as a Layer: MYSQL Service in OAP. Each MySQL/MariaDB server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     MySQL Uptime day meter_mysql_uptime The MySQL startup time mysqld_exporter   Max Connections  meter_mysql_max_connections The max number of connections. mysqld_exporter   Innodb Buffer Pool Size MB meter_mysql_innodb_buffer_pool_size The buffer pool size in Innodb engine mysqld_exporter   Thread Cache Size  meter_mysql_thread_cache_size The size of thread cache mysqld_exporter   Current QPS  meter_mysql_qps Queries Per Second mysqld_exporter   Current TPS  meter_mysql_tps Transactions Per Second mysqld_exporter   Commands Rate  meter_mysql_commands_insert_rate meter_mysql_commands_select_rate\nmeter_mysql_commands_delete_rate\nmeter_mysql_commands_update_rate The rate of total number of insert/select/delete/update executed by the current server mysqld_exporter   Threads  meter_mysql_threads_connected\nmeter_mysql_threads_created\nmeter_mysql_threads_cached\nmeter_mysql_threads_running The number of currently open connections(threads_connected)  The number of threads created(threads_created)  The number of threads in the thread cache(threads_cached)  The number of threads that are not sleeping(threads_running) mysqld_exporter   Connects  meter_mysql_max_connections\nmeter_mysql_status_thread_connected\nmeter_mysql_connects_aborted The number of available connections(connects_available)The number of MySQL instance connection rejections(connects_aborted) mysqld_exporter   Connection Errors  meter_mysql_connection_errors_internal  meter_mysql_connection_errors_max_connections Errors due to exceeding the max_connections(connection_errors_max_connections) Error caused by internal system(connection_errors_internal) mysqld_exporter   Slow Queries Rate  meter_mysql_slow_queries_rate The rate of slow queries mysqld_exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/mysql. The MySQL dashboard panel configurations are found in /config/ui-initialized-templates/mysql.\nCollect sampled slow SQLs SkyWalking leverages fluentbit or other log agents for collecting slow SQL statements from MySQL/MariaDB.\nData flow  fluentbit agent collects slow sql logs from MySQL/MariaDB. fluentbit agent sends data to SkyWalking OAP Server using native meter APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Set up fluentbit. Config fluentbit from here for MySQL or here for MariaDB. Enable slow log from here for MySQL or here for MariaDB.  Slow SQL Monitoring Slow SQL monitoring provides monitoring of the slow SQL statements of the MySQL/MariaDB server. MySQL/MariaDB server is cataloged as a Layer: MYSQL Service in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Slow Statements ms top_n_database_statement The latency and statement of MySQL/MariaDB slow SQLs fluentbit    Customizations You can customize your own metrics/expression/dashboard panel. The slowsql expression rules are found in /config/lal/mysql-slowsql.yaml The MySQL/MariaDB dashboard panel configurations are found in /config/ui-initialized-templates/mysql.\n","title":"MySQL/MariaDB monitoring","url":"/docs/main/next/en/setup/backend/backend-mysql-monitoring/"},{"content":"MySQL/MariaDB monitoring MySQL/MariaDB server performance from prometheus/mysqld_exporter SkyWalking leverages prometheus/mysqld_exporter for collecting metrics data. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  mysqld_exporter collect metrics data from MySQL/MariaDB. OpenTelemetry Collector fetches metrics from mysqld_exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via the OpenCensus gRPC Exporter or OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up mysqld_exporter. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  MySQL/MariaDB Monitoring MySQL/MariaDB monitoring provides monitoring of the status and resources of the MySQL/MariaDB server. MySQL/MariaDB cluster is cataloged as a Layer: MYSQL Service in OAP. Each MySQL/MariaDB server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     MySQL Uptime day meter_mysql_uptime The MySQL startup time mysqld_exporter   Max Connections  meter_mysql_max_connections The max number of connections. mysqld_exporter   Innodb Buffer Pool Size MB meter_mysql_innodb_buffer_pool_size The buffer pool size in Innodb engine mysqld_exporter   Thread Cache Size  meter_mysql_thread_cache_size The size of thread cache mysqld_exporter   Current QPS  meter_mysql_qps Queries Per Second mysqld_exporter   Current TPS  meter_mysql_tps Transactions Per Second mysqld_exporter   Commands Rate  meter_mysql_commands_insert_rate meter_mysql_commands_select_rate\nmeter_mysql_commands_delete_rate\nmeter_mysql_commands_update_rate The rate of total number of insert/select/delete/update executed by the current server mysqld_exporter   Threads  meter_mysql_threads_connected\nmeter_mysql_threads_created\nmeter_mysql_threads_cached\nmeter_mysql_threads_running The number of currently open connections(threads_connected)  The number of threads created(threads_created)  The number of threads in the thread cache(threads_cached)  The number of threads that are not sleeping(threads_running) mysqld_exporter   Connects  meter_mysql_connects_available\nmeter_mysql_connects_aborted The number of available connections(connects_available)The number of MySQL instance connection rejections(connects_aborted) mysqld_exporter   Connection Errors  meter_mysql_connection_errors_internal  meter_mysql_connection_errors_max_connections Errors due to exceeding the max_connections(connection_errors_max_connections) Error caused by internal system(connection_errors_internal) mysqld_exporter   Slow Queries Rate  meter_mysql_slow_queries_rate The rate of slow queries mysqld_exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/mysql. The MySQL dashboard panel configurations are found in /config/ui-initialized-templates/mysql.\nCollect sampled slow SQLs SkyWalking leverages fluentbit or other log agents for collecting slow SQL statements from MySQL/MariaDB.\nData flow  fluentbit agent collects slow sql logs from MySQL/MariaDB. fluentbit agent sends data to SkyWalking OAP Server using native meter APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Set up fluentbit. Config fluentbit from here for MySQL or here for MariaDB. Enable slow log from here for MySQL or here for MariaDB.  Slow SQL Monitoring Slow SQL monitoring provides monitoring of the slow SQL statements of the MySQL/MariaDB server. MySQL/MariaDB server is cataloged as a Layer: MYSQL Service in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Slow Statements ms top_n_database_statement The latency and statement of MySQL/MariaDB slow SQLs fluentbit    Customizations You can customize your own metrics/expression/dashboard panel. The slowsql expression rules are found in /config/lal/mysql-slowsql.yaml The MySQL/MariaDB dashboard panel configurations are found in /config/ui-initialized-templates/mysql.\n","title":"MySQL/MariaDB monitoring","url":"/docs/main/v9.4.0/en/setup/backend/backend-mysql-monitoring/"},{"content":"MySQL/MariaDB monitoring MySQL/MariaDB server performance from prometheus/mysqld_exporter SkyWalking leverages prometheus/mysqld_exporter for collecting metrics data. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  mysqld_exporter collect metrics data from MySQL/MariaDB. OpenTelemetry Collector fetches metrics from mysqld_exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up mysqld_exporter. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  MySQL/MariaDB Monitoring MySQL/MariaDB monitoring provides monitoring of the status and resources of the MySQL/MariaDB server. MySQL/MariaDB cluster is cataloged as a Layer: MYSQL Service in OAP. Each MySQL/MariaDB server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     MySQL Uptime day meter_mysql_uptime The MySQL startup time mysqld_exporter   Max Connections  meter_mysql_max_connections The max number of connections. mysqld_exporter   Innodb Buffer Pool Size MB meter_mysql_innodb_buffer_pool_size The buffer pool size in Innodb engine mysqld_exporter   Thread Cache Size  meter_mysql_thread_cache_size The size of thread cache mysqld_exporter   Current QPS  meter_mysql_qps Queries Per Second mysqld_exporter   Current TPS  meter_mysql_tps Transactions Per Second mysqld_exporter   Commands Rate  meter_mysql_commands_insert_rate meter_mysql_commands_select_rate\nmeter_mysql_commands_delete_rate\nmeter_mysql_commands_update_rate The rate of total number of insert/select/delete/update executed by the current server mysqld_exporter   Threads  meter_mysql_threads_connected\nmeter_mysql_threads_created\nmeter_mysql_threads_cached\nmeter_mysql_threads_running The number of currently open connections(threads_connected)  The number of threads created(threads_created)  The number of threads in the thread cache(threads_cached)  The number of threads that are not sleeping(threads_running) mysqld_exporter   Connects  meter_mysql_connects_available\nmeter_mysql_connects_aborted The number of available connections(connects_available)The number of MySQL instance connection rejections(connects_aborted) mysqld_exporter   Connection Errors  meter_mysql_connection_errors_internal  meter_mysql_connection_errors_max_connections Errors due to exceeding the max_connections(connection_errors_max_connections) Error caused by internal system(connection_errors_internal) mysqld_exporter   Slow Queries Rate  meter_mysql_slow_queries_rate The rate of slow queries mysqld_exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/mysql. The MySQL dashboard panel configurations are found in /config/ui-initialized-templates/mysql.\nCollect sampled slow SQLs SkyWalking leverages fluentbit or other log agents for collecting slow SQL statements from MySQL/MariaDB.\nData flow  fluentbit agent collects slow sql logs from MySQL/MariaDB. fluentbit agent sends data to SkyWalking OAP Server using native meter APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Set up fluentbit. Config fluentbit from here for MySQL or here for MariaDB. Enable slow log from here for MySQL or here for MariaDB.  Slow SQL Monitoring Slow SQL monitoring provides monitoring of the slow SQL statements of the MySQL/MariaDB server. MySQL/MariaDB server is cataloged as a Layer: MYSQL Service in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Slow Statements ms top_n_database_statement The latency and statement of MySQL/MariaDB slow SQLs fluentbit    Customizations You can customize your own metrics/expression/dashboard panel. The slowsql expression rules are found in /config/lal/mysql-slowsql.yaml The MySQL/MariaDB dashboard panel configurations are found in /config/ui-initialized-templates/mysql.\n","title":"MySQL/MariaDB monitoring","url":"/docs/main/v9.5.0/en/setup/backend/backend-mysql-monitoring/"},{"content":"MySQL/MariaDB monitoring MySQL/MariaDB server performance from prometheus/mysqld_exporter SkyWalking leverages prometheus/mysqld_exporter for collecting metrics data. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  mysqld_exporter collect metrics data from MySQL/MariaDB. OpenTelemetry Collector fetches metrics from mysqld_exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up mysqld_exporter. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  MySQL/MariaDB Monitoring MySQL/MariaDB monitoring provides monitoring of the status and resources of the MySQL/MariaDB server. MySQL/MariaDB cluster is cataloged as a Layer: MYSQL Service in OAP. Each MySQL/MariaDB server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     MySQL Uptime day meter_mysql_uptime The MySQL startup time mysqld_exporter   Max Connections  meter_mysql_max_connections The max number of connections. mysqld_exporter   Innodb Buffer Pool Size MB meter_mysql_innodb_buffer_pool_size The buffer pool size in Innodb engine mysqld_exporter   Thread Cache Size  meter_mysql_thread_cache_size The size of thread cache mysqld_exporter   Current QPS  meter_mysql_qps Queries Per Second mysqld_exporter   Current TPS  meter_mysql_tps Transactions Per Second mysqld_exporter   Commands Rate  meter_mysql_commands_insert_rate meter_mysql_commands_select_rate\nmeter_mysql_commands_delete_rate\nmeter_mysql_commands_update_rate The rate of total number of insert/select/delete/update executed by the current server mysqld_exporter   Threads  meter_mysql_threads_connected\nmeter_mysql_threads_created\nmeter_mysql_threads_cached\nmeter_mysql_threads_running The number of currently open connections(threads_connected)  The number of threads created(threads_created)  The number of threads in the thread cache(threads_cached)  The number of threads that are not sleeping(threads_running) mysqld_exporter   Connects  meter_mysql_connects_available\nmeter_mysql_connects_aborted The number of available connections(connects_available)The number of MySQL instance connection rejections(connects_aborted) mysqld_exporter   Connection Errors  meter_mysql_connection_errors_internal  meter_mysql_connection_errors_max_connections Errors due to exceeding the max_connections(connection_errors_max_connections) Error caused by internal system(connection_errors_internal) mysqld_exporter   Slow Queries Rate  meter_mysql_slow_queries_rate The rate of slow queries mysqld_exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/mysql. The MySQL dashboard panel configurations are found in /config/ui-initialized-templates/mysql.\nCollect sampled slow SQLs SkyWalking leverages fluentbit or other log agents for collecting slow SQL statements from MySQL/MariaDB.\nData flow  fluentbit agent collects slow sql logs from MySQL/MariaDB. fluentbit agent sends data to SkyWalking OAP Server using native meter APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Set up fluentbit. Config fluentbit from here for MySQL or here for MariaDB. Enable slow log from here for MySQL or here for MariaDB.  Slow SQL Monitoring Slow SQL monitoring provides monitoring of the slow SQL statements of the MySQL/MariaDB server. MySQL/MariaDB server is cataloged as a Layer: MYSQL Service in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Slow Statements ms top_n_database_statement The latency and statement of MySQL/MariaDB slow SQLs fluentbit    Customizations You can customize your own metrics/expression/dashboard panel. The slowsql expression rules are found in /config/lal/mysql-slowsql.yaml The MySQL/MariaDB dashboard panel configurations are found in /config/ui-initialized-templates/mysql.\n","title":"MySQL/MariaDB monitoring","url":"/docs/main/v9.6.0/en/setup/backend/backend-mysql-monitoring/"},{"content":"MySQL/MariaDB monitoring MySQL/MariaDB server performance from prometheus/mysqld_exporter SkyWalking leverages prometheus/mysqld_exporter for collecting metrics data. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  mysqld_exporter collect metrics data from MySQL/MariaDB. OpenTelemetry Collector fetches metrics from mysqld_exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up mysqld_exporter. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  MySQL/MariaDB Monitoring MySQL/MariaDB monitoring provides monitoring of the status and resources of the MySQL/MariaDB server. MySQL/MariaDB cluster is cataloged as a Layer: MYSQL Service in OAP. Each MySQL/MariaDB server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     MySQL Uptime day meter_mysql_uptime The MySQL startup time mysqld_exporter   Max Connections  meter_mysql_max_connections The max number of connections. mysqld_exporter   Innodb Buffer Pool Size MB meter_mysql_innodb_buffer_pool_size The buffer pool size in Innodb engine mysqld_exporter   Thread Cache Size  meter_mysql_thread_cache_size The size of thread cache mysqld_exporter   Current QPS  meter_mysql_qps Queries Per Second mysqld_exporter   Current TPS  meter_mysql_tps Transactions Per Second mysqld_exporter   Commands Rate  meter_mysql_commands_insert_rate meter_mysql_commands_select_rate\nmeter_mysql_commands_delete_rate\nmeter_mysql_commands_update_rate The rate of total number of insert/select/delete/update executed by the current server mysqld_exporter   Threads  meter_mysql_threads_connected\nmeter_mysql_threads_created\nmeter_mysql_threads_cached\nmeter_mysql_threads_running The number of currently open connections(threads_connected)  The number of threads created(threads_created)  The number of threads in the thread cache(threads_cached)  The number of threads that are not sleeping(threads_running) mysqld_exporter   Connects  meter_mysql_connects_available\nmeter_mysql_connects_aborted The number of available connections(connects_available)The number of MySQL instance connection rejections(connects_aborted) mysqld_exporter   Connection Errors  meter_mysql_connection_errors_internal  meter_mysql_connection_errors_max_connections Errors due to exceeding the max_connections(connection_errors_max_connections) Error caused by internal system(connection_errors_internal) mysqld_exporter   Slow Queries Rate  meter_mysql_slow_queries_rate The rate of slow queries mysqld_exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/mysql. The MySQL dashboard panel configurations are found in /config/ui-initialized-templates/mysql.\nCollect sampled slow SQLs SkyWalking leverages fluentbit or other log agents for collecting slow SQL statements from MySQL/MariaDB.\nData flow  fluentbit agent collects slow sql logs from MySQL/MariaDB. fluentbit agent sends data to SkyWalking OAP Server using native meter APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Set up fluentbit. Config fluentbit from here for MySQL or here for MariaDB. Enable slow log from here for MySQL or here for MariaDB.  Slow SQL Monitoring Slow SQL monitoring provides monitoring of the slow SQL statements of the MySQL/MariaDB server. MySQL/MariaDB server is cataloged as a Layer: MYSQL Service in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Slow Statements ms top_n_database_statement The latency and statement of MySQL/MariaDB slow SQLs fluentbit    Customizations You can customize your own metrics/expression/dashboard panel. The slowsql expression rules are found in /config/lal/mysql-slowsql.yaml The MySQL/MariaDB dashboard panel configurations are found in /config/ui-initialized-templates/mysql.\n","title":"MySQL/MariaDB monitoring","url":"/docs/main/v9.7.0/en/setup/backend/backend-mysql-monitoring/"},{"content":"New ElasticSearch storage option explanation in 9.2.0 Since v9.2.0, SkyWalking OAP provides 2 storage options for all data, including metadata, metrics, traces, logs, events, profiling data, etc.. OAP exposes a system environment variable (SW_STORAGE_ES_LOGIC_SHARDING) to control the running mode.\nNo-Sharding Mode (OAP default setting, SW_STORAGE_ES_LOGIC_SHARDING = false) This is the new mode introduced in 9.2.0. It prefers to keep data with similar properties in one index template, such as all metrics and metadata.\n OAP merges all metrics/meter and records(without super datasets, such as segments) indices into one physical index template metrics-all and records-all. The logic index name would be present in columns metric_table or record_table. If the logic column name has an alias (configured through @ElasticSearch.Column()), the alias would be the real physical column name.  The super dataset would not be affected by this, such as traces and logs.\nSharding Mode (SW_STORAGE_ES_LOGIC_SHARDING = true )  OAP shard metrics/meter indices into multi-physical indices as in the previous versions(one index template per metric/meter aggregation function). Records and metrics without configuring aggregation functions with @MetricsFunction or @MeterFunction annotation would not be merged. They would be kept in a separate index template. The shard template name would be metrics-aggregation function name or meter-aggregation function name such as metrics-count, and the logic index name would be present in column metric_table. The OAP would not use the column alias, the logic column name would be the real physical column name.   Notice: Users still could choose to adjust ElasticSearch\u0026rsquo;s shard number(SW_STORAGE_ES_INDEX_SHARDS_NUMBER) to scale out in either mode.\n","title":"New ElasticSearch storage option explanation in 9.2.0","url":"/docs/main/latest/en/faq/new-elasticsearch-storage-option-explanation-in-9.2.0/"},{"content":"New ElasticSearch storage option explanation in 9.2.0 Since v9.2.0, SkyWalking OAP provides 2 storage options for all data, including metadata, metrics, traces, logs, events, profiling data, etc.. OAP exposes a system environment variable (SW_STORAGE_ES_LOGIC_SHARDING) to control the running mode.\nNo-Sharding Mode (OAP default setting, SW_STORAGE_ES_LOGIC_SHARDING = false) This is the new mode introduced in 9.2.0. It prefers to keep data with similar properties in one index template, such as all metrics and metadata.\n OAP merges all metrics/meter and records(without super datasets, such as segments) indices into one physical index template metrics-all and records-all. The logic index name would be present in columns metric_table or record_table. If the logic column name has an alias (configured through @ElasticSearch.Column()), the alias would be the real physical column name.  The super dataset would not be affected by this, such as traces and logs.\nSharding Mode (SW_STORAGE_ES_LOGIC_SHARDING = true )  OAP shard metrics/meter indices into multi-physical indices as in the previous versions(one index template per metric/meter aggregation function). Records and metrics without configuring aggregation functions with @MetricsFunction or @MeterFunction annotation would not be merged. They would be kept in a separate index template. The shard template name would be metrics-aggregation function name or meter-aggregation function name such as metrics-count, and the logic index name would be present in column metric_table. The OAP would not use the column alias, the logic column name would be the real physical column name.   Notice: Users still could choose to adjust ElasticSearch\u0026rsquo;s shard number(SW_STORAGE_ES_INDEX_SHARDS_NUMBER) to scale out in either mode.\n","title":"New ElasticSearch storage option explanation in 9.2.0","url":"/docs/main/next/en/faq/new-elasticsearch-storage-option-explanation-in-9.2.0/"},{"content":"New ElasticSearch storage option explanation in 9.2.0 Since v9.2.0, SkyWalking OAP provides 2 storage options for all data, including metadata, metrics, traces, logs, events, profiling data, etc.. OAP exposes a system environment variable (SW_STORAGE_ES_LOGIC_SHARDING) to control the running mode.\nNo-Sharding Mode (OAP default setting, SW_STORAGE_ES_LOGIC_SHARDING = false) This is the new mode introduced in 9.2.0. It prefers to keep data with similar properties in one index template, such as all metrics and metadata.\n OAP merges all metrics/meter and records(without super datasets, such as segments) indices into one physical index template metrics-all and records-all. The logic index name would be present in columns metric_table or record_table. If the logic column name has an alias (configured through @ElasticSearch.Column()), the alias would be the real physical column name.  The super dataset would not be affected by this, such as traces and logs.\nSharding Mode (SW_STORAGE_ES_LOGIC_SHARDING = true )  OAP shard metrics/meter indices into multi-physical indices as in the previous versions(one index template per metric/meter aggregation function). Records and metrics without configuring aggregation functions with @MetricsFunction or @MeterFunction annotation would not be merged. They would be kept in a separate index template. The shard template name would be metrics-aggregation function name or meter-aggregation function name such as metrics-count, and the logic index name would be present in column metric_table. The OAP would not use the column alias, the logic column name would be the real physical column name.   Notice: Users still could choose to adjust ElasticSearch\u0026rsquo;s shard number(SW_STORAGE_ES_INDEX_SHARDS_NUMBER) to scale out in either mode.\n","title":"New ElasticSearch storage option explanation in 9.2.0","url":"/docs/main/v9.2.0/en/faq/new-elasticsearch-storage-option-explanation-in-9.2.0/"},{"content":"New ElasticSearch storage option explanation in 9.2.0 Since v9.2.0, SkyWalking OAP provides 2 storage options for all data, including metadata, metrics, traces, logs, events, profiling data, etc.. OAP exposes a system environment variable (SW_STORAGE_ES_LOGIC_SHARDING) to control the running mode.\nNo-Sharding Mode (OAP default setting, SW_STORAGE_ES_LOGIC_SHARDING = false) This is the new mode introduced in 9.2.0. It prefers to keep data with similar properties in one index template, such as all metrics and metadata.\n OAP merges all metrics/meter and records(without super datasets, such as segments) indices into one physical index template metrics-all and records-all. The logic index name would be present in columns metric_table or record_table. If the logic column name has an alias (configured through @ElasticSearch.Column()), the alias would be the real physical column name.  The super dataset would not be affected by this, such as traces and logs.\nSharding Mode (SW_STORAGE_ES_LOGIC_SHARDING = true )  OAP shard metrics/meter indices into multi-physical indices as in the previous versions(one index template per metric/meter aggregation function). Records and metrics without configuring aggregation functions with @MetricsFunction or @MeterFunction annotation would not be merged. They would be kept in a separate index template. The shard template name would be metrics-aggregation function name or meter-aggregation function name such as metrics-count, and the logic index name would be present in column metric_table. The OAP would not use the column alias, the logic column name would be the real physical column name.   Notice: Users still could choose to adjust ElasticSearch\u0026rsquo;s shard number(SW_STORAGE_ES_INDEX_SHARDS_NUMBER) to scale out in either mode.\n","title":"New ElasticSearch storage option explanation in 9.2.0","url":"/docs/main/v9.3.0/en/faq/new-elasticsearch-storage-option-explanation-in-9.2.0/"},{"content":"New ElasticSearch storage option explanation in 9.2.0 Since v9.2.0, SkyWalking OAP provides 2 storage options for all data, including metadata, metrics, traces, logs, events, profiling data, etc.. OAP exposes a system environment variable (SW_STORAGE_ES_LOGIC_SHARDING) to control the running mode.\nNo-Sharding Mode (OAP default setting, SW_STORAGE_ES_LOGIC_SHARDING = false) This is the new mode introduced in 9.2.0. It prefers to keep data with similar properties in one index template, such as all metrics and metadata.\n OAP merges all metrics/meter and records(without super datasets, such as segments) indices into one physical index template metrics-all and records-all. The logic index name would be present in columns metric_table or record_table. If the logic column name has an alias (configured through @ElasticSearch.Column()), the alias would be the real physical column name.  The super dataset would not be affected by this, such as traces and logs.\nSharding Mode (SW_STORAGE_ES_LOGIC_SHARDING = true )  OAP shard metrics/meter indices into multi-physical indices as in the previous versions(one index template per metric/meter aggregation function). Records and metrics without configuring aggregation functions with @MetricsFunction or @MeterFunction annotation would not be merged. They would be kept in a separate index template. The shard template name would be metrics-aggregation function name or meter-aggregation function name such as metrics-count, and the logic index name would be present in column metric_table. The OAP would not use the column alias, the logic column name would be the real physical column name.   Notice: Users still could choose to adjust ElasticSearch\u0026rsquo;s shard number(SW_STORAGE_ES_INDEX_SHARDS_NUMBER) to scale out in either mode.\n","title":"New ElasticSearch storage option explanation in 9.2.0","url":"/docs/main/v9.4.0/en/faq/new-elasticsearch-storage-option-explanation-in-9.2.0/"},{"content":"New ElasticSearch storage option explanation in 9.2.0 Since v9.2.0, SkyWalking OAP provides 2 storage options for all data, including metadata, metrics, traces, logs, events, profiling data, etc.. OAP exposes a system environment variable (SW_STORAGE_ES_LOGIC_SHARDING) to control the running mode.\nNo-Sharding Mode (OAP default setting, SW_STORAGE_ES_LOGIC_SHARDING = false) This is the new mode introduced in 9.2.0. It prefers to keep data with similar properties in one index template, such as all metrics and metadata.\n OAP merges all metrics/meter and records(without super datasets, such as segments) indices into one physical index template metrics-all and records-all. The logic index name would be present in columns metric_table or record_table. If the logic column name has an alias (configured through @ElasticSearch.Column()), the alias would be the real physical column name.  The super dataset would not be affected by this, such as traces and logs.\nSharding Mode (SW_STORAGE_ES_LOGIC_SHARDING = true )  OAP shard metrics/meter indices into multi-physical indices as in the previous versions(one index template per metric/meter aggregation function). Records and metrics without configuring aggregation functions with @MetricsFunction or @MeterFunction annotation would not be merged. They would be kept in a separate index template. The shard template name would be metrics-aggregation function name or meter-aggregation function name such as metrics-count, and the logic index name would be present in column metric_table. The OAP would not use the column alias, the logic column name would be the real physical column name.   Notice: Users still could choose to adjust ElasticSearch\u0026rsquo;s shard number(SW_STORAGE_ES_INDEX_SHARDS_NUMBER) to scale out in either mode.\n","title":"New ElasticSearch storage option explanation in 9.2.0","url":"/docs/main/v9.5.0/en/faq/new-elasticsearch-storage-option-explanation-in-9.2.0/"},{"content":"New ElasticSearch storage option explanation in 9.2.0 Since v9.2.0, SkyWalking OAP provides 2 storage options for all data, including metadata, metrics, traces, logs, events, profiling data, etc.. OAP exposes a system environment variable (SW_STORAGE_ES_LOGIC_SHARDING) to control the running mode.\nNo-Sharding Mode (OAP default setting, SW_STORAGE_ES_LOGIC_SHARDING = false) This is the new mode introduced in 9.2.0. It prefers to keep data with similar properties in one index template, such as all metrics and metadata.\n OAP merges all metrics/meter and records(without super datasets, such as segments) indices into one physical index template metrics-all and records-all. The logic index name would be present in columns metric_table or record_table. If the logic column name has an alias (configured through @ElasticSearch.Column()), the alias would be the real physical column name.  The super dataset would not be affected by this, such as traces and logs.\nSharding Mode (SW_STORAGE_ES_LOGIC_SHARDING = true )  OAP shard metrics/meter indices into multi-physical indices as in the previous versions(one index template per metric/meter aggregation function). Records and metrics without configuring aggregation functions with @MetricsFunction or @MeterFunction annotation would not be merged. They would be kept in a separate index template. The shard template name would be metrics-aggregation function name or meter-aggregation function name such as metrics-count, and the logic index name would be present in column metric_table. The OAP would not use the column alias, the logic column name would be the real physical column name.   Notice: Users still could choose to adjust ElasticSearch\u0026rsquo;s shard number(SW_STORAGE_ES_INDEX_SHARDS_NUMBER) to scale out in either mode.\n","title":"New ElasticSearch storage option explanation in 9.2.0","url":"/docs/main/v9.6.0/en/faq/new-elasticsearch-storage-option-explanation-in-9.2.0/"},{"content":"New ElasticSearch storage option explanation in 9.2.0 Since v9.2.0, SkyWalking OAP provides 2 storage options for all data, including metadata, metrics, traces, logs, events, profiling data, etc.. OAP exposes a system environment variable (SW_STORAGE_ES_LOGIC_SHARDING) to control the running mode.\nNo-Sharding Mode (OAP default setting, SW_STORAGE_ES_LOGIC_SHARDING = false) This is the new mode introduced in 9.2.0. It prefers to keep data with similar properties in one index template, such as all metrics and metadata.\n OAP merges all metrics/meter and records(without super datasets, such as segments) indices into one physical index template metrics-all and records-all. The logic index name would be present in columns metric_table or record_table. If the logic column name has an alias (configured through @ElasticSearch.Column()), the alias would be the real physical column name.  The super dataset would not be affected by this, such as traces and logs.\nSharding Mode (SW_STORAGE_ES_LOGIC_SHARDING = true )  OAP shard metrics/meter indices into multi-physical indices as in the previous versions(one index template per metric/meter aggregation function). Records and metrics without configuring aggregation functions with @MetricsFunction or @MeterFunction annotation would not be merged. They would be kept in a separate index template. The shard template name would be metrics-aggregation function name or meter-aggregation function name such as metrics-count, and the logic index name would be present in column metric_table. The OAP would not use the column alias, the logic column name would be the real physical column name.   Notice: Users still could choose to adjust ElasticSearch\u0026rsquo;s shard number(SW_STORAGE_ES_INDEX_SHARDS_NUMBER) to scale out in either mode.\n","title":"New ElasticSearch storage option explanation in 9.2.0","url":"/docs/main/v9.7.0/en/faq/new-elasticsearch-storage-option-explanation-in-9.2.0/"},{"content":"Nginx monitoring Nginx performance from nginx-lua-prometheus The nginx-lua-prometheus is a lua library that can be used with Nginx to collect metrics and expose them on a separate web page. To use this library, you will need Nginx with lua-nginx-module or directly OpenResty.\nSkyWalking leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  nginx-lua-prometheus collects metrics from Nginx and expose them to an endpoint. OpenTelemetry Collector fetches metrics from the endpoint expose above via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Collect Nginx metrics and expose the following four metrics by nginx-lua-prometheus. For details on metrics definition, refer to here.   histogram: nginx_http_latency gauge: nginx_http_connections counter: nginx_http_size_bytes counter: nginx_http_requests_total  Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  Nginx Monitoring SkyWalking observes the status, payload, and latency of the Nginx server, which is cataloged as a LAYER: Nginx Service in the OAP and instances would be recognized as LAYER: Nginx instance.\nAbout LAYER: Nginx endpoint, it depends on how precision you want to monitor the nginx. We do not recommend expose every request path metrics, because it will cause explosion of metrics endpoint data.\nYou can collect host metrics:\nhttp { log_by_lua_block { metric_bytes:inc(tonumber(ngx.var.request_length), {\u0026quot;request\u0026quot;, ngx.var.host}) metric_bytes:inc(tonumber(ngx.var.bytes_send), {\u0026quot;response\u0026quot;, ngx.var.host}) metric_requests:inc(1, {ngx.var.status, ngx.var.host}) metric_latency:observe(tonumber(ngx.var.request_time), {ngx.var.host}) } } or grouped urls and upstream metrics:\nupstream backend { server ip:port; } server { location /test { default_type application/json; return 200 '{\u0026quot;code\u0026quot;: 200, \u0026quot;message\u0026quot;: \u0026quot;success\u0026quot;}'; log_by_lua_block { metric_bytes:inc(tonumber(ngx.var.request_length), {\u0026quot;request\u0026quot;, \u0026quot;/test/**\u0026quot;}) metric_bytes:inc(tonumber(ngx.var.bytes_send), {\u0026quot;response\u0026quot;, \u0026quot;/test/**\u0026quot;}) metric_requests:inc(1, {ngx.var.status, \u0026quot;/test/**\u0026quot;}) metric_latency:observe(tonumber(ngx.var.request_time), {\u0026quot;/test/**\u0026quot;}) } } location /test_upstream { proxy_pass http://backend; log_by_lua_block { metric_bytes:inc(tonumber(ngx.var.request_length), {\u0026quot;request\u0026quot;, \u0026quot;upstream/backend\u0026quot;}) metric_bytes:inc(tonumber(ngx.var.bytes_send), {\u0026quot;response\u0026quot;, \u0026quot;upstream/backend\u0026quot;}) metric_requests:inc(1, {ngx.var.status, \u0026quot;upstream/backend\u0026quot;}) metric_latency:observe(tonumber(ngx.var.request_time), {\u0026quot;upstream/backend\u0026quot;}) } } } Nginx Service Supported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     HTTP Request Trend  meter_nginx_service_http_requests Service The increment rate of HTTP requests nginx-lua-prometheus   HTTP Latency ms meter_nginx_service_http_latency Service The increment rate of the latency of HTTP requests nginx-lua-prometheus   HTTP Bandwidth KB meter_nginx_service_bandwidth Service The increment rate of the bandwidth of HTTP requests nginx-lua-prometheus   HTTP Connections  meter_nginx_service_http_connections Service The avg number of the connections nginx-lua-prometheus   HTTP Status Trend  meter_nginx_service_http_status Service The increment rate of the status of HTTP requests nginx-lua-prometheus   HTTP Status 4xx Percent % meter_nginx_service_http_4xx_requests_increment / meter_nginx_service_http_requests_increment Service The percentage of 4xx status of HTTP requests nginx-lua-prometheus   HTTP Status 5xx Percent % meter_nginx_service_http_5xx_requests_increment / meter_nginx_service_http_requests_increment Service The percentage of 4xx status of HTTP requests nginx-lua-prometheus    Nginx Instance Supported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     HTTP Request Trend  meter_nginx_instance_http_requests Instance The increment rate of HTTP requests nginx-lua-prometheus   HTTP Latency ms meter_nginx_instance_http_latency Instance The increment rate of the latency of HTTP requests nginx-lua-prometheus   HTTP Bandwidth KB meter_nginx_instance_bandwidth Instance The increment rate of the bandwidth of HTTP requests nginx-lua-prometheus   HTTP Connections  meter_nginx_instance_http_connections Instance The avg number of the connections nginx-lua-prometheus   HTTP Status Trend  meter_nginx_instance_http_status Instance The increment rate of the status of HTTP requests nginx-lua-prometheus   HTTP Status 4xx Percent % meter_nginx_instance_http_4xx_requests_increment / meter_nginx_instance_http_requests_increment Instance The percentage of 4xx status of HTTP requests nginx-lua-prometheus   HTTP Status 5xx Percent % meter_nginx_instance_http_5xx_requests_increment / meter_nginx_instance_http_requests_increment Instance The percentage of 4xx status of HTTP requests nginx-lua-prometheus    Nginx Endpoint Supported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     HTTP Request Trend  meter_nginx_endpoint_http_requests Endpoint The increment rate of HTTP requests nginx-lua-prometheus   HTTP Latency ms meter_nginx_endpoint_http_latency Endpoint The increment rate of the latency of HTTP requests nginx-lua-prometheus   HTTP Bandwidth KB meter_nginx_endpoint_bandwidth Endpoint The increment rate of the bandwidth of HTTP requests nginx-lua-prometheus   HTTP Status Trend  meter_nginx_endpoint_http_status Endpoint The increment rate of the status of HTTP requests nginx-lua-prometheus   HTTP Status 4xx Percent % meter_nginx_endpoint_http_4xx_requests_increment / meter_nginx_endpoint_http_requests_increment Endpoint The percentage of 4xx status of HTTP requests nginx-lua-prometheus   HTTP Status 5xx Percent % meter_nginx_endpoint_http_5xx_requests_increment / meter_nginx_endpoint_http_requests_increment Endpoint The percentage of 4xx status of HTTP requests nginx-lua-prometheus    Customizations You can customize your own metrics/expression/dashboard panel.\nThe metrics definition and expression rules are found in /config/otel-rules/nginx-service.yaml, /config/otel-rules/nginx-instance.yaml, /config/otel-rules/nginx-endpoint.yaml.\nThe Nginx dashboard panel configurations are found in /config/ui-initialized-templates/nginx.\nCollect nginx access and error log SkyWalking leverages fluentbit or other log agents for collecting access log and error log of Nginx.\nData flow  fluentbit agent collects access log and error log from Nginx. fluentbit agent sends data to SkyWalking OAP Server using native meter APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Install fluentbit. Config fluent bit with fluent-bit.conf, refer to here.  Error Log Monitoring Error Log monitoring provides monitoring of the error.log of the Nginx server.\nSupported Metrics    Monitoring Panel Metric Name Catalog Description Data Source     Service Error Log Count meter_nginx_service_error_log_count Service The count of log level of nginx error.log fluent bit   Instance Error Log Count meter_nginx_instance_error_log_count Instance The count of log level of nginx error.log fluent bit    Customizations You can customize your own metrics/expression/dashboard panel.\nThe log collect and analyse rules are found in /config/lal/nginx.yaml, /config/log-mal-rules/nginx.yaml.\nThe Nginx dashboard panel configurations are found in /config/ui-initialized-templates/nginx.\n","title":"Nginx monitoring","url":"/docs/main/latest/en/setup/backend/backend-nginx-monitoring/"},{"content":"Nginx monitoring Nginx performance from nginx-lua-prometheus The nginx-lua-prometheus is a lua library that can be used with Nginx to collect metrics and expose them on a separate web page. To use this library, you will need Nginx with lua-nginx-module or directly OpenResty.\nSkyWalking leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  nginx-lua-prometheus collects metrics from Nginx and expose them to an endpoint. OpenTelemetry Collector fetches metrics from the endpoint expose above via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Collect Nginx metrics and expose the following four metrics by nginx-lua-prometheus. For details on metrics definition, refer to here.   histogram: nginx_http_latency gauge: nginx_http_connections counter: nginx_http_size_bytes counter: nginx_http_requests_total  Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  Nginx Monitoring SkyWalking observes the status, payload, and latency of the Nginx server, which is cataloged as a LAYER: Nginx Service in the OAP and instances would be recognized as LAYER: Nginx instance.\nAbout LAYER: Nginx endpoint, it depends on how precision you want to monitor the nginx. We do not recommend expose every request path metrics, because it will cause explosion of metrics endpoint data.\nYou can collect host metrics:\nhttp { log_by_lua_block { metric_bytes:inc(tonumber(ngx.var.request_length), {\u0026quot;request\u0026quot;, ngx.var.host}) metric_bytes:inc(tonumber(ngx.var.bytes_send), {\u0026quot;response\u0026quot;, ngx.var.host}) metric_requests:inc(1, {ngx.var.status, ngx.var.host}) metric_latency:observe(tonumber(ngx.var.request_time), {ngx.var.host}) } } or grouped urls and upstream metrics:\nupstream backend { server ip:port; } server { location /test { default_type application/json; return 200 '{\u0026quot;code\u0026quot;: 200, \u0026quot;message\u0026quot;: \u0026quot;success\u0026quot;}'; log_by_lua_block { metric_bytes:inc(tonumber(ngx.var.request_length), {\u0026quot;request\u0026quot;, \u0026quot;/test/**\u0026quot;}) metric_bytes:inc(tonumber(ngx.var.bytes_send), {\u0026quot;response\u0026quot;, \u0026quot;/test/**\u0026quot;}) metric_requests:inc(1, {ngx.var.status, \u0026quot;/test/**\u0026quot;}) metric_latency:observe(tonumber(ngx.var.request_time), {\u0026quot;/test/**\u0026quot;}) } } location /test_upstream { proxy_pass http://backend; log_by_lua_block { metric_bytes:inc(tonumber(ngx.var.request_length), {\u0026quot;request\u0026quot;, \u0026quot;upstream/backend\u0026quot;}) metric_bytes:inc(tonumber(ngx.var.bytes_send), {\u0026quot;response\u0026quot;, \u0026quot;upstream/backend\u0026quot;}) metric_requests:inc(1, {ngx.var.status, \u0026quot;upstream/backend\u0026quot;}) metric_latency:observe(tonumber(ngx.var.request_time), {\u0026quot;upstream/backend\u0026quot;}) } } } Nginx Service Supported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     HTTP Request Trend  meter_nginx_service_http_requests Service The increment rate of HTTP requests nginx-lua-prometheus   HTTP Latency ms meter_nginx_service_http_latency Service The increment rate of the latency of HTTP requests nginx-lua-prometheus   HTTP Bandwidth KB meter_nginx_service_bandwidth Service The increment rate of the bandwidth of HTTP requests nginx-lua-prometheus   HTTP Connections  meter_nginx_service_http_connections Service The avg number of the connections nginx-lua-prometheus   HTTP Status Trend  meter_nginx_service_http_status Service The increment rate of the status of HTTP requests nginx-lua-prometheus   HTTP Status 4xx Percent % meter_nginx_service_http_4xx_requests_increment / meter_nginx_service_http_requests_increment Service The percentage of 4xx status of HTTP requests nginx-lua-prometheus   HTTP Status 5xx Percent % meter_nginx_service_http_5xx_requests_increment / meter_nginx_service_http_requests_increment Service The percentage of 4xx status of HTTP requests nginx-lua-prometheus    Nginx Instance Supported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     HTTP Request Trend  meter_nginx_instance_http_requests Instance The increment rate of HTTP requests nginx-lua-prometheus   HTTP Latency ms meter_nginx_instance_http_latency Instance The increment rate of the latency of HTTP requests nginx-lua-prometheus   HTTP Bandwidth KB meter_nginx_instance_bandwidth Instance The increment rate of the bandwidth of HTTP requests nginx-lua-prometheus   HTTP Connections  meter_nginx_instance_http_connections Instance The avg number of the connections nginx-lua-prometheus   HTTP Status Trend  meter_nginx_instance_http_status Instance The increment rate of the status of HTTP requests nginx-lua-prometheus   HTTP Status 4xx Percent % meter_nginx_instance_http_4xx_requests_increment / meter_nginx_instance_http_requests_increment Instance The percentage of 4xx status of HTTP requests nginx-lua-prometheus   HTTP Status 5xx Percent % meter_nginx_instance_http_5xx_requests_increment / meter_nginx_instance_http_requests_increment Instance The percentage of 4xx status of HTTP requests nginx-lua-prometheus    Nginx Endpoint Supported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     HTTP Request Trend  meter_nginx_endpoint_http_requests Endpoint The increment rate of HTTP requests nginx-lua-prometheus   HTTP Latency ms meter_nginx_endpoint_http_latency Endpoint The increment rate of the latency of HTTP requests nginx-lua-prometheus   HTTP Bandwidth KB meter_nginx_endpoint_bandwidth Endpoint The increment rate of the bandwidth of HTTP requests nginx-lua-prometheus   HTTP Status Trend  meter_nginx_endpoint_http_status Endpoint The increment rate of the status of HTTP requests nginx-lua-prometheus   HTTP Status 4xx Percent % meter_nginx_endpoint_http_4xx_requests_increment / meter_nginx_endpoint_http_requests_increment Endpoint The percentage of 4xx status of HTTP requests nginx-lua-prometheus   HTTP Status 5xx Percent % meter_nginx_endpoint_http_5xx_requests_increment / meter_nginx_endpoint_http_requests_increment Endpoint The percentage of 4xx status of HTTP requests nginx-lua-prometheus    Customizations You can customize your own metrics/expression/dashboard panel.\nThe metrics definition and expression rules are found in /config/otel-rules/nginx-service.yaml, /config/otel-rules/nginx-instance.yaml, /config/otel-rules/nginx-endpoint.yaml.\nThe Nginx dashboard panel configurations are found in /config/ui-initialized-templates/nginx.\nCollect nginx access and error log SkyWalking leverages fluentbit or other log agents for collecting access log and error log of Nginx.\nData flow  fluentbit agent collects access log and error log from Nginx. fluentbit agent sends data to SkyWalking OAP Server using native meter APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Install fluentbit. Config fluent bit with fluent-bit.conf, refer to here.  Error Log Monitoring Error Log monitoring provides monitoring of the error.log of the Nginx server.\nSupported Metrics    Monitoring Panel Metric Name Catalog Description Data Source     Service Error Log Count meter_nginx_service_error_log_count Service The count of log level of nginx error.log fluent bit   Instance Error Log Count meter_nginx_instance_error_log_count Instance The count of log level of nginx error.log fluent bit    Customizations You can customize your own metrics/expression/dashboard panel.\nThe log collect and analyse rules are found in /config/lal/nginx.yaml, /config/log-mal-rules/nginx.yaml.\nThe Nginx dashboard panel configurations are found in /config/ui-initialized-templates/nginx.\n","title":"Nginx monitoring","url":"/docs/main/next/en/setup/backend/backend-nginx-monitoring/"},{"content":"Nginx monitoring Nginx performance from nginx-lua-prometheus The nginx-lua-prometheus is a lua library that can be used with Nginx to collect metrics and expose them on a separate web page. To use this library, you will need Nginx with lua-nginx-module or directly OpenResty.\nSkyWalking leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  nginx-lua-prometheus collects metrics from Nginx and expose them to an endpoint. OpenTelemetry Collector fetches metrics from the endpoint expose above via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Collect Nginx metrics and expose the following four metrics by nginx-lua-prometheus. For details on metrics definition, refer to here.   histogram: nginx_http_latency gauge: nginx_http_connections counter: nginx_http_size_bytes counter: nginx_http_requests_total  Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  Nginx Monitoring SkyWalking observes the status, payload, and latency of the Nginx server, which is cataloged as a LAYER: Nginx Service in the OAP and instances would be recognized as LAYER: Nginx instance.\nAbout LAYER: Nginx endpoint, it depends on how precision you want to monitor the nginx. We do not recommend expose every request path metrics, because it will cause explosion of metrics endpoint data.\nYou can collect host metrics:\nhttp { log_by_lua_block { metric_bytes:inc(tonumber(ngx.var.request_length), {\u0026quot;request\u0026quot;, ngx.var.host}) metric_bytes:inc(tonumber(ngx.var.bytes_send), {\u0026quot;response\u0026quot;, ngx.var.host}) metric_requests:inc(1, {ngx.var.status, ngx.var.host}) metric_latency:observe(tonumber(ngx.var.request_time), {ngx.var.host}) } } or grouped urls and upstream metrics:\nupstream backend { server ip:port; } server { location /test { default_type application/json; return 200 '{\u0026quot;code\u0026quot;: 200, \u0026quot;message\u0026quot;: \u0026quot;success\u0026quot;}'; log_by_lua_block { metric_bytes:inc(tonumber(ngx.var.request_length), {\u0026quot;request\u0026quot;, \u0026quot;/test/**\u0026quot;}) metric_bytes:inc(tonumber(ngx.var.bytes_send), {\u0026quot;response\u0026quot;, \u0026quot;/test/**\u0026quot;}) metric_requests:inc(1, {ngx.var.status, \u0026quot;/test/**\u0026quot;}) metric_latency:observe(tonumber(ngx.var.request_time), {\u0026quot;/test/**\u0026quot;}) } } location /test_upstream { proxy_pass http://backend; log_by_lua_block { metric_bytes:inc(tonumber(ngx.var.request_length), {\u0026quot;request\u0026quot;, \u0026quot;upstream/backend\u0026quot;}) metric_bytes:inc(tonumber(ngx.var.bytes_send), {\u0026quot;response\u0026quot;, \u0026quot;upstream/backend\u0026quot;}) metric_requests:inc(1, {ngx.var.status, \u0026quot;upstream/backend\u0026quot;}) metric_latency:observe(tonumber(ngx.var.request_time), {\u0026quot;upstream/backend\u0026quot;}) } } } Nginx Service Supported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     HTTP Request Trend  meter_nginx_service_http_requests Service The increment rate of HTTP requests nginx-lua-prometheus   HTTP Latency ms meter_nginx_service_http_latency Service The increment rate of the latency of HTTP requests nginx-lua-prometheus   HTTP Bandwidth KB meter_nginx_service_bandwidth Service The increment rate of the bandwidth of HTTP requests nginx-lua-prometheus   HTTP Connections  meter_nginx_service_http_connections Service The avg number of the connections nginx-lua-prometheus   HTTP Status Trend  meter_nginx_service_http_status Service The increment rate of the status of HTTP requests nginx-lua-prometheus   HTTP Status 4xx Percent % meter_nginx_service_http_4xx_requests_increment / meter_nginx_service_http_requests_increment Service The percentage of 4xx status of HTTP requests nginx-lua-prometheus   HTTP Status 5xx Percent % meter_nginx_service_http_5xx_requests_increment / meter_nginx_service_http_requests_increment Service The percentage of 4xx status of HTTP requests nginx-lua-prometheus    Nginx Instance Supported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     HTTP Request Trend  meter_nginx_instance_http_requests Instance The increment rate of HTTP requests nginx-lua-prometheus   HTTP Latency ms meter_nginx_instance_http_latency Instance The increment rate of the latency of HTTP requests nginx-lua-prometheus   HTTP Bandwidth KB meter_nginx_instance_bandwidth Instance The increment rate of the bandwidth of HTTP requests nginx-lua-prometheus   HTTP Connections  meter_nginx_instance_http_connections Instance The avg number of the connections nginx-lua-prometheus   HTTP Status Trend  meter_nginx_instance_http_status Instance The increment rate of the status of HTTP requests nginx-lua-prometheus   HTTP Status 4xx Percent % meter_nginx_instance_http_4xx_requests_increment / meter_nginx_instance_http_requests_increment Instance The percentage of 4xx status of HTTP requests nginx-lua-prometheus   HTTP Status 5xx Percent % meter_nginx_instance_http_5xx_requests_increment / meter_nginx_instance_http_requests_increment Instance The percentage of 4xx status of HTTP requests nginx-lua-prometheus    Nginx Endpoint Supported Metrics    Monitoring Panel Unit Metric Name Catalog Description Data Source     HTTP Request Trend  meter_nginx_endpoint_http_requests Endpoint The increment rate of HTTP requests nginx-lua-prometheus   HTTP Latency ms meter_nginx_endpoint_http_latency Endpoint The increment rate of the latency of HTTP requests nginx-lua-prometheus   HTTP Bandwidth KB meter_nginx_endpoint_bandwidth Endpoint The increment rate of the bandwidth of HTTP requests nginx-lua-prometheus   HTTP Status Trend  meter_nginx_endpoint_http_status Endpoint The increment rate of the status of HTTP requests nginx-lua-prometheus   HTTP Status 4xx Percent % meter_nginx_endpoint_http_4xx_requests_increment / meter_nginx_endpoint_http_requests_increment Endpoint The percentage of 4xx status of HTTP requests nginx-lua-prometheus   HTTP Status 5xx Percent % meter_nginx_endpoint_http_5xx_requests_increment / meter_nginx_endpoint_http_requests_increment Endpoint The percentage of 4xx status of HTTP requests nginx-lua-prometheus    Customizations You can customize your own metrics/expression/dashboard panel.\nThe metrics definition and expression rules are found in /config/otel-rules/nginx-service.yaml, /config/otel-rules/nginx-instance.yaml, /config/otel-rules/nginx-endpoint.yaml.\nThe Nginx dashboard panel configurations are found in /config/ui-initialized-templates/nginx.\nCollect nginx access and error log SkyWalking leverages fluentbit or other log agents for collecting access log and error log of Nginx.\nData flow  fluentbit agent collects access log and error log from Nginx. fluentbit agent sends data to SkyWalking OAP Server using native meter APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Install fluentbit. Config fluent bit with fluent-bit.conf, refer to here.  Error Log Monitoring Error Log monitoring provides monitoring of the error.log of the Nginx server.\nSupported Metrics    Monitoring Panel Metric Name Catalog Description Data Source     Service Error Log Count meter_nginx_service_error_log_count Service The count of log level of nginx error.log fluent bit   Instance Error Log Count meter_nginx_instance_error_log_count Instance The count of log level of nginx error.log fluent bit    Customizations You can customize your own metrics/expression/dashboard panel.\nThe log collect and analyse rules are found in /config/lal/nginx.yaml, /config/log-mal-rules/nginx.yaml.\nThe Nginx dashboard panel configurations are found in /config/ui-initialized-templates/nginx.\n","title":"Nginx monitoring","url":"/docs/main/v9.7.0/en/setup/backend/backend-nginx-monitoring/"},{"content":"OAP backend dependency management  This section is only applicable to dependencies of the OAP server and UI.\n As one of the Top Level Projects of The Apache Software Foundation (ASF), SkyWalking must follow the ASF 3RD PARTY LICENSE POLICY. So if you\u0026rsquo;re adding new dependencies to the project, you should make sure that the new dependencies would not break the policy, and add their LICENSE and NOTICE to the project.\nWe use license-eye to help you make sure that you haven\u0026rsquo;t missed out any new dependencies:\n Install license-eye according to the doc. Run license-eye dependency resolve --summary ./dist-material/release-docs/LICENSE.tpl in the root directory of this project. Check the modified lines in ./dist-material/release-docs/LICENSE (via command git diff -U0 ./dist-material/release-docs/LICENSE) and check whether the new dependencies' licenses are compatible with Apache 2.0. Add the new dependencies' notice files (if any) to ./dist-material/release-docs/NOTICE if they are Apache 2.0 license. Copy their license files to ./dist-material/release-docs/licenses if they are not standard Apache 2.0 license. Copy the new dependencies' license file to ./dist-material/release-docs/licenses if they are not standard Apache 2.0 license.  ","title":"OAP backend dependency management","url":"/docs/main/latest/en/guides/dependencies/"},{"content":"OAP backend dependency management  This section is only applicable to dependencies of the OAP server and UI.\n As one of the Top Level Projects of The Apache Software Foundation (ASF), SkyWalking must follow the ASF 3RD PARTY LICENSE POLICY. So if you\u0026rsquo;re adding new dependencies to the project, you should make sure that the new dependencies would not break the policy, and add their LICENSE and NOTICE to the project.\nWe use license-eye to help you make sure that you haven\u0026rsquo;t missed out any new dependencies:\n Install license-eye according to the doc. Run license-eye dependency resolve --summary ./dist-material/release-docs/LICENSE.tpl in the root directory of this project. Check the modified lines in ./dist-material/release-docs/LICENSE (via command git diff -U0 ./dist-material/release-docs/LICENSE) and check whether the new dependencies' licenses are compatible with Apache 2.0. Add the new dependencies' notice files (if any) to ./dist-material/release-docs/NOTICE if they are Apache 2.0 license. Copy their license files to ./dist-material/release-docs/licenses if they are not standard Apache 2.0 license. Copy the new dependencies' license file to ./dist-material/release-docs/licenses if they are not standard Apache 2.0 license.  ","title":"OAP backend dependency management","url":"/docs/main/next/en/guides/dependencies/"},{"content":"OAP backend dependency management  This section is only applicable to dependencies of the OAP server and UI.\n As one of the Top Level Projects of The Apache Software Foundation (ASF), SkyWalking must follow the ASF 3RD PARTY LICENSE POLICY. So if you\u0026rsquo;re adding new dependencies to the project, you should make sure that the new dependencies would not break the policy, and add their LICENSE and NOTICE to the project.\nWe use license-eye to help you make sure that you haven\u0026rsquo;t missed out any new dependencies:\n Install license-eye according to the doc. Run license-eye dependency resolve --summary ./dist-material/release-docs/LICENSE.tpl in the root directory of this project. Check the modified lines in ./dist-material/release-docs/LICENSE (via command git diff -U0 ./dist-material/release-docs/LICENSE) and check whether the new dependencies' licenses are compatible with Apache 2.0. Add the new dependencies' notice files (if any) to ./dist-material/release-docs/NOTICE if they are Apache 2.0 license. Copy their license files to ./dist-material/release-docs/licenses if they are not standard Apache 2.0 license. Copy the new dependencies' license file to ./dist-material/release-docs/licenses if they are not standard Apache 2.0 license.  ","title":"OAP backend dependency management","url":"/docs/main/v9.6.0/en/guides/dependencies/"},{"content":"OAP backend dependency management  This section is only applicable to dependencies of the OAP server and UI.\n As one of the Top Level Projects of The Apache Software Foundation (ASF), SkyWalking must follow the ASF 3RD PARTY LICENSE POLICY. So if you\u0026rsquo;re adding new dependencies to the project, you should make sure that the new dependencies would not break the policy, and add their LICENSE and NOTICE to the project.\nWe use license-eye to help you make sure that you haven\u0026rsquo;t missed out any new dependencies:\n Install license-eye according to the doc. Run license-eye dependency resolve --summary ./dist-material/release-docs/LICENSE.tpl in the root directory of this project. Check the modified lines in ./dist-material/release-docs/LICENSE (via command git diff -U0 ./dist-material/release-docs/LICENSE) and check whether the new dependencies' licenses are compatible with Apache 2.0. Add the new dependencies' notice files (if any) to ./dist-material/release-docs/NOTICE if they are Apache 2.0 license. Copy their license files to ./dist-material/release-docs/licenses if they are not standard Apache 2.0 license. Copy the new dependencies' license file to ./dist-material/release-docs/licenses if they are not standard Apache 2.0 license.  ","title":"OAP backend dependency management","url":"/docs/main/v9.7.0/en/guides/dependencies/"},{"content":"OAP self observability dashboard SkyWalking itself collects and exports metrics in Prometheus format for consuming, it also provides a dashboard to visualize the self-observability metrics.\nData flow  SkyWalking OAP collects metrics data internally and exposes a Prometheus http endpoint to retrieve the metrics. SkyWalking OAP itself (or OpenTelemetry Collector, prefered in Kubernetes scenarios) fetches metrics from the Prometheus endpoint in step (1). OAP (or OpenTelemetry Collector) pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up Follow OAP Self Observability Telemetry doc to set up OAP and OpenTelemetry Collector.\nSelf observability monitoring Self observability monitoring provides monitoring of the status and resources of the OAP server itself. oap-server is a Service in OAP, and land on the Layer: SO11Y_OAP.\nSelf observability metrics    Unit Metric Name Description Data Source     Count Per Minute meter_oap_instance_jvm_gc_count GC Count oap self observability   MB meter_oap_instance_jvm_memory_bytes_used Memory oap self observability   ms / min meter_oap_instance_jvm_young_gc_time GC Time (ms / min) oap self observability   ms / min meter_oap_instance_jvm_old_gc_time GC Time (ms / min) oap self observability   Count Per Minute meter_oap_instance_mesh_count Mesh Analysis Count (Per Minute) oap self observability   Count Per Minute meter_oap_instance_mesh_analysis_error_count Mesh Analysis Count (Per Minute) oap self observability   ms meter_oap_instance_trace_latency_percentile Trace Analysis Latency (ms) oap self observability   Count meter_oap_jvm_class_loaded_count Class Count oap self observability   Count meter_oap_jvm_class_total_unloaded_count Class Count oap self observability   Count meter_oap_jvm_class_total_loaded_count Class Count oap self observability   Count meter_oap_instance_persistence_prepare_count Persistence Count (Per 5 Minutes) oap self observability   Count meter_oap_instance_persistence_execute_count Persistence Count (Per 5 Minutes) oap self observability   Count meter_oap_jvm_thread_live_count Thread Count oap self observability   Count meter_oap_jvm_thread_peak_count Thread Count oap self observability   Count meter_oap_jvm_thread_daemon_count Thread Count oap self observability   ms meter_oap_instance_persistence_execute_percentile Persistence Execution Latency Per Metric Type (ms) oap self observability   ms meter_oap_instance_persistence_prepare_percentile Persistence Preparing Latency Per Metric Type (ms) oap self observability   Count meter_oap_jvm_thread_runnable_count Thread State Count oap self observability   Count meter_oap_jvm_thread_timed_waiting_count Thread State Count oap self observability   Count meter_oap_jvm_thread_blocked_count Thread State Count oap self observability   Count meter_oap_jvm_thread_waiting_count Thread State Count oap self observability   Count per minute meter_oap_instance_metrics_aggregation Aggregation (Per Minute) oap self observability   ms meter_oap_instance_mesh_latency_percentile Mesh Analysis Latency (ms) oap self observability   Count per minute meter_oap_instance_trace_count Trace Analysis Count (Per Minute) oap self observability   Count per minute meter_oap_instance_trace_analysis_error_count Trace Analysis Count (Per Minute) oap self observability   Percentage meter_oap_instance_cpu_percentage CPU (%) oap self observability   Count meter_oap_instance_metrics_persistent_cache count of metrics cache hit and no-hit oap self observability    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/fetcher-prom-rules/self.yaml and config/otel-rules/oap.yaml. The self observability dashboard panel configurations are found in /config/ui-initialized-templates/so11y_oap.\n","title":"OAP self observability dashboard","url":"/docs/main/latest/en/setup/backend/dashboards-so11y/"},{"content":"OAP self observability dashboard SkyWalking itself collects and exports metrics in Prometheus format for consuming, it also provides a dashboard to visualize the self-observability metrics.\nData flow  SkyWalking OAP collects metrics data internally and exposes a Prometheus http endpoint to retrieve the metrics. SkyWalking OAP itself (or OpenTelemetry Collector, prefered in Kubernetes scenarios) fetches metrics from the Prometheus endpoint in step (1). OAP (or OpenTelemetry Collector) pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up Follow OAP Self Observability Telemetry doc to set up OAP and OpenTelemetry Collector.\nSelf observability monitoring Self observability monitoring provides monitoring of the status and resources of the OAP server itself. oap-server is a Service in OAP, and land on the Layer: SO11Y_OAP.\nSelf observability metrics    Unit Metric Name Description Data Source     Count Per Minute meter_oap_instance_jvm_gc_count GC Count oap self observability   MB meter_oap_instance_jvm_memory_bytes_used Memory oap self observability   ms / min meter_oap_instance_jvm_young_gc_time GC Time (ms / min) oap self observability   ms / min meter_oap_instance_jvm_old_gc_time GC Time (ms / min) oap self observability   Count Per Minute meter_oap_instance_mesh_count Mesh Analysis Count (Per Minute) oap self observability   Count Per Minute meter_oap_instance_mesh_analysis_error_count Mesh Analysis Count (Per Minute) oap self observability   ms meter_oap_instance_trace_latency_percentile Trace Analysis Latency (ms) oap self observability   Count meter_oap_jvm_class_loaded_count Class Count oap self observability   Count meter_oap_jvm_class_total_unloaded_count Class Count oap self observability   Count meter_oap_jvm_class_total_loaded_count Class Count oap self observability   Count meter_oap_instance_persistence_prepare_count Persistence Count (Per 5 Minutes) oap self observability   Count meter_oap_instance_persistence_execute_count Persistence Count (Per 5 Minutes) oap self observability   Count meter_oap_jvm_thread_live_count Thread Count oap self observability   Count meter_oap_jvm_thread_peak_count Thread Count oap self observability   Count meter_oap_jvm_thread_daemon_count Thread Count oap self observability   ms meter_oap_instance_persistence_execute_percentile Persistence Execution Latency Per Metric Type (ms) oap self observability   ms meter_oap_instance_persistence_prepare_percentile Persistence Preparing Latency Per Metric Type (ms) oap self observability   Count meter_oap_jvm_thread_runnable_count Thread State Count oap self observability   Count meter_oap_jvm_thread_timed_waiting_count Thread State Count oap self observability   Count meter_oap_jvm_thread_blocked_count Thread State Count oap self observability   Count meter_oap_jvm_thread_waiting_count Thread State Count oap self observability   Count per minute meter_oap_instance_metrics_aggregation Aggregation (Per Minute) oap self observability   ms meter_oap_instance_mesh_latency_percentile Mesh Analysis Latency (ms) oap self observability   Count per minute meter_oap_instance_trace_count Trace Analysis Count (Per Minute) oap self observability   Count per minute meter_oap_instance_trace_analysis_error_count Trace Analysis Count (Per Minute) oap self observability   Percentage meter_oap_instance_cpu_percentage CPU (%) oap self observability   Count meter_oap_instance_metrics_persistent_cache count of metrics cache hit and no-hit oap self observability    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/fetcher-prom-rules/self.yaml and config/otel-rules/oap.yaml. The self observability dashboard panel configurations are found in /config/ui-initialized-templates/so11y_oap.\n","title":"OAP self observability dashboard","url":"/docs/main/next/en/setup/backend/dashboards-so11y/"},{"content":"OAP self observability dashboard SkyWalking itself collects and exports metrics in Prometheus format for consuming, it also provides a dashboard to visualize the self-observability metrics.\nData flow  SkyWalking OAP collects metrics data internally and exposes a Prometheus http endpoint to retrieve the metrics. SkyWalking OAP itself (or OpenTelemetry Collector, prefered in Kubernetes scenarios) fetches metrics from the Prometheus endpoint in step (1). OAP (or OpenTelemetry Collector) pushes metrics to SkyWalking OAP Server via the OpenCensus gRPC Exporter or OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up SkyWalking Self Observability. (Optional) Set up OpenTelemetry Collector .. Config SkyWalking OpenTelemetry receiver.  Self observability monitoring Self observability monitoring provides monitoring of the status and resources of the OAP server itself. oap-server is a Service in OAP, and land on the Layer: SO11Y_OAP.\nSelf observability metrics    Unit Metric Name Description Data Source     Count Per Minute meter_oap_instance_jvm_gc_count GC Count oap self observability   MB meter_oap_instance_jvm_memory_bytes_used Memory oap self observability   ms / min meter_oap_instance_jvm_young_gc_time GC Time (ms / min) oap self observability   ms / min meter_oap_instance_jvm_old_gc_time GC Time (ms / min) oap self observability   Count Per Minute meter_oap_instance_mesh_count Mesh Analysis Count (Per Minute) oap self observability   Count Per Minute meter_oap_instance_mesh_analysis_error_count Mesh Analysis Count (Per Minute) oap self observability   ms meter_oap_instance_trace_latency_percentile Trace Analysis Latency (ms) oap self observability   Count meter_oap_jvm_class_loaded_count Class Count oap self observability   Count meter_oap_jvm_class_total_unloaded_count Class Count oap self observability   Count meter_oap_jvm_class_total_loaded_count Class Count oap self observability   Count meter_oap_instance_persistence_prepare_count Persistence Count (Per 5 Minutes) oap self observability   Count meter_oap_instance_persistence_execute_count Persistence Count (Per 5 Minutes) oap self observability   Count meter_oap_jvm_thread_live_count Thread Count oap self observability   Count meter_oap_jvm_thread_peak_count Thread Count oap self observability   Count meter_oap_jvm_thread_daemon_count Thread Count oap self observability   ms meter_oap_instance_persistence_execute_percentile Persistence Execution Latency Per Metric Type (ms) oap self observability   ms meter_oap_instance_persistence_prepare_percentile Persistence Preparing Latency Per Metric Type (ms) oap self observability   Count meter_oap_jvm_thread_runnable_count Thread State Count oap self observability   Count meter_oap_jvm_thread_timed_waiting_count Thread State Count oap self observability   Count meter_oap_jvm_thread_blocked_count Thread State Count oap self observability   Count meter_oap_jvm_thread_waiting_count Thread State Count oap self observability   Count per minute meter_oap_instance_metrics_aggregation Aggregation (Per Minute) oap self observability   ms meter_oap_instance_mesh_latency_percentile Mesh Analysis Latency (ms) oap self observability   Count per minute meter_oap_instance_trace_count Trace Analysis Count (Per Minute) oap self observability   Count per minute meter_oap_instance_trace_analysis_error_count Trace Analysis Count (Per Minute) oap self observability   Percentage meter_oap_instance_cpu_percentage CPU (%) oap self observability   Count meter_oap_instance_metrics_persistent_cache count of metrics cache hit and no-hit oap self observability    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/fetcher-prom-rules/self.yaml and config/otel-rules/oap.yaml. The self observability dashboard panel configurations are found in /config/ui-initialized-templates/so11y_oap.\n","title":"OAP self observability dashboard","url":"/docs/main/v9.3.0/en/setup/backend/dashboards-so11y/"},{"content":"OAP self observability dashboard SkyWalking itself collects and exports metrics in Prometheus format for consuming, it also provides a dashboard to visualize the self-observability metrics.\nData flow  SkyWalking OAP collects metrics data internally and exposes a Prometheus http endpoint to retrieve the metrics. SkyWalking OAP itself (or OpenTelemetry Collector, prefered in Kubernetes scenarios) fetches metrics from the Prometheus endpoint in step (1). OAP (or OpenTelemetry Collector) pushes metrics to SkyWalking OAP Server via the OpenCensus gRPC Exporter or OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up Follow OAP Self Observability Telemetry doc to set up OAP and OpenTelemetry Collector.\nSelf observability monitoring Self observability monitoring provides monitoring of the status and resources of the OAP server itself. oap-server is a Service in OAP, and land on the Layer: SO11Y_OAP.\nSelf observability metrics    Unit Metric Name Description Data Source     Count Per Minute meter_oap_instance_jvm_gc_count GC Count oap self observability   MB meter_oap_instance_jvm_memory_bytes_used Memory oap self observability   ms / min meter_oap_instance_jvm_young_gc_time GC Time (ms / min) oap self observability   ms / min meter_oap_instance_jvm_old_gc_time GC Time (ms / min) oap self observability   Count Per Minute meter_oap_instance_mesh_count Mesh Analysis Count (Per Minute) oap self observability   Count Per Minute meter_oap_instance_mesh_analysis_error_count Mesh Analysis Count (Per Minute) oap self observability   ms meter_oap_instance_trace_latency_percentile Trace Analysis Latency (ms) oap self observability   Count meter_oap_jvm_class_loaded_count Class Count oap self observability   Count meter_oap_jvm_class_total_unloaded_count Class Count oap self observability   Count meter_oap_jvm_class_total_loaded_count Class Count oap self observability   Count meter_oap_instance_persistence_prepare_count Persistence Count (Per 5 Minutes) oap self observability   Count meter_oap_instance_persistence_execute_count Persistence Count (Per 5 Minutes) oap self observability   Count meter_oap_jvm_thread_live_count Thread Count oap self observability   Count meter_oap_jvm_thread_peak_count Thread Count oap self observability   Count meter_oap_jvm_thread_daemon_count Thread Count oap self observability   ms meter_oap_instance_persistence_execute_percentile Persistence Execution Latency Per Metric Type (ms) oap self observability   ms meter_oap_instance_persistence_prepare_percentile Persistence Preparing Latency Per Metric Type (ms) oap self observability   Count meter_oap_jvm_thread_runnable_count Thread State Count oap self observability   Count meter_oap_jvm_thread_timed_waiting_count Thread State Count oap self observability   Count meter_oap_jvm_thread_blocked_count Thread State Count oap self observability   Count meter_oap_jvm_thread_waiting_count Thread State Count oap self observability   Count per minute meter_oap_instance_metrics_aggregation Aggregation (Per Minute) oap self observability   ms meter_oap_instance_mesh_latency_percentile Mesh Analysis Latency (ms) oap self observability   Count per minute meter_oap_instance_trace_count Trace Analysis Count (Per Minute) oap self observability   Count per minute meter_oap_instance_trace_analysis_error_count Trace Analysis Count (Per Minute) oap self observability   Percentage meter_oap_instance_cpu_percentage CPU (%) oap self observability   Count meter_oap_instance_metrics_persistent_cache count of metrics cache hit and no-hit oap self observability    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/fetcher-prom-rules/self.yaml and config/otel-rules/oap.yaml. The self observability dashboard panel configurations are found in /config/ui-initialized-templates/so11y_oap.\n","title":"OAP self observability dashboard","url":"/docs/main/v9.4.0/en/setup/backend/dashboards-so11y/"},{"content":"OAP self observability dashboard SkyWalking itself collects and exports metrics in Prometheus format for consuming, it also provides a dashboard to visualize the self-observability metrics.\nData flow  SkyWalking OAP collects metrics data internally and exposes a Prometheus http endpoint to retrieve the metrics. SkyWalking OAP itself (or OpenTelemetry Collector, prefered in Kubernetes scenarios) fetches metrics from the Prometheus endpoint in step (1). OAP (or OpenTelemetry Collector) pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up Follow OAP Self Observability Telemetry doc to set up OAP and OpenTelemetry Collector.\nSelf observability monitoring Self observability monitoring provides monitoring of the status and resources of the OAP server itself. oap-server is a Service in OAP, and land on the Layer: SO11Y_OAP.\nSelf observability metrics    Unit Metric Name Description Data Source     Count Per Minute meter_oap_instance_jvm_gc_count GC Count oap self observability   MB meter_oap_instance_jvm_memory_bytes_used Memory oap self observability   ms / min meter_oap_instance_jvm_young_gc_time GC Time (ms / min) oap self observability   ms / min meter_oap_instance_jvm_old_gc_time GC Time (ms / min) oap self observability   Count Per Minute meter_oap_instance_mesh_count Mesh Analysis Count (Per Minute) oap self observability   Count Per Minute meter_oap_instance_mesh_analysis_error_count Mesh Analysis Count (Per Minute) oap self observability   ms meter_oap_instance_trace_latency_percentile Trace Analysis Latency (ms) oap self observability   Count meter_oap_jvm_class_loaded_count Class Count oap self observability   Count meter_oap_jvm_class_total_unloaded_count Class Count oap self observability   Count meter_oap_jvm_class_total_loaded_count Class Count oap self observability   Count meter_oap_instance_persistence_prepare_count Persistence Count (Per 5 Minutes) oap self observability   Count meter_oap_instance_persistence_execute_count Persistence Count (Per 5 Minutes) oap self observability   Count meter_oap_jvm_thread_live_count Thread Count oap self observability   Count meter_oap_jvm_thread_peak_count Thread Count oap self observability   Count meter_oap_jvm_thread_daemon_count Thread Count oap self observability   ms meter_oap_instance_persistence_execute_percentile Persistence Execution Latency Per Metric Type (ms) oap self observability   ms meter_oap_instance_persistence_prepare_percentile Persistence Preparing Latency Per Metric Type (ms) oap self observability   Count meter_oap_jvm_thread_runnable_count Thread State Count oap self observability   Count meter_oap_jvm_thread_timed_waiting_count Thread State Count oap self observability   Count meter_oap_jvm_thread_blocked_count Thread State Count oap self observability   Count meter_oap_jvm_thread_waiting_count Thread State Count oap self observability   Count per minute meter_oap_instance_metrics_aggregation Aggregation (Per Minute) oap self observability   ms meter_oap_instance_mesh_latency_percentile Mesh Analysis Latency (ms) oap self observability   Count per minute meter_oap_instance_trace_count Trace Analysis Count (Per Minute) oap self observability   Count per minute meter_oap_instance_trace_analysis_error_count Trace Analysis Count (Per Minute) oap self observability   Percentage meter_oap_instance_cpu_percentage CPU (%) oap self observability   Count meter_oap_instance_metrics_persistent_cache count of metrics cache hit and no-hit oap self observability    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/fetcher-prom-rules/self.yaml and config/otel-rules/oap.yaml. The self observability dashboard panel configurations are found in /config/ui-initialized-templates/so11y_oap.\n","title":"OAP self observability dashboard","url":"/docs/main/v9.5.0/en/setup/backend/dashboards-so11y/"},{"content":"OAP self observability dashboard SkyWalking itself collects and exports metrics in Prometheus format for consuming, it also provides a dashboard to visualize the self-observability metrics.\nData flow  SkyWalking OAP collects metrics data internally and exposes a Prometheus http endpoint to retrieve the metrics. SkyWalking OAP itself (or OpenTelemetry Collector, prefered in Kubernetes scenarios) fetches metrics from the Prometheus endpoint in step (1). OAP (or OpenTelemetry Collector) pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up Follow OAP Self Observability Telemetry doc to set up OAP and OpenTelemetry Collector.\nSelf observability monitoring Self observability monitoring provides monitoring of the status and resources of the OAP server itself. oap-server is a Service in OAP, and land on the Layer: SO11Y_OAP.\nSelf observability metrics    Unit Metric Name Description Data Source     Count Per Minute meter_oap_instance_jvm_gc_count GC Count oap self observability   MB meter_oap_instance_jvm_memory_bytes_used Memory oap self observability   ms / min meter_oap_instance_jvm_young_gc_time GC Time (ms / min) oap self observability   ms / min meter_oap_instance_jvm_old_gc_time GC Time (ms / min) oap self observability   Count Per Minute meter_oap_instance_mesh_count Mesh Analysis Count (Per Minute) oap self observability   Count Per Minute meter_oap_instance_mesh_analysis_error_count Mesh Analysis Count (Per Minute) oap self observability   ms meter_oap_instance_trace_latency_percentile Trace Analysis Latency (ms) oap self observability   Count meter_oap_jvm_class_loaded_count Class Count oap self observability   Count meter_oap_jvm_class_total_unloaded_count Class Count oap self observability   Count meter_oap_jvm_class_total_loaded_count Class Count oap self observability   Count meter_oap_instance_persistence_prepare_count Persistence Count (Per 5 Minutes) oap self observability   Count meter_oap_instance_persistence_execute_count Persistence Count (Per 5 Minutes) oap self observability   Count meter_oap_jvm_thread_live_count Thread Count oap self observability   Count meter_oap_jvm_thread_peak_count Thread Count oap self observability   Count meter_oap_jvm_thread_daemon_count Thread Count oap self observability   ms meter_oap_instance_persistence_execute_percentile Persistence Execution Latency Per Metric Type (ms) oap self observability   ms meter_oap_instance_persistence_prepare_percentile Persistence Preparing Latency Per Metric Type (ms) oap self observability   Count meter_oap_jvm_thread_runnable_count Thread State Count oap self observability   Count meter_oap_jvm_thread_timed_waiting_count Thread State Count oap self observability   Count meter_oap_jvm_thread_blocked_count Thread State Count oap self observability   Count meter_oap_jvm_thread_waiting_count Thread State Count oap self observability   Count per minute meter_oap_instance_metrics_aggregation Aggregation (Per Minute) oap self observability   ms meter_oap_instance_mesh_latency_percentile Mesh Analysis Latency (ms) oap self observability   Count per minute meter_oap_instance_trace_count Trace Analysis Count (Per Minute) oap self observability   Count per minute meter_oap_instance_trace_analysis_error_count Trace Analysis Count (Per Minute) oap self observability   Percentage meter_oap_instance_cpu_percentage CPU (%) oap self observability   Count meter_oap_instance_metrics_persistent_cache count of metrics cache hit and no-hit oap self observability    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/fetcher-prom-rules/self.yaml and config/otel-rules/oap.yaml. The self observability dashboard panel configurations are found in /config/ui-initialized-templates/so11y_oap.\n","title":"OAP self observability dashboard","url":"/docs/main/v9.6.0/en/setup/backend/dashboards-so11y/"},{"content":"OAP self observability dashboard SkyWalking itself collects and exports metrics in Prometheus format for consuming, it also provides a dashboard to visualize the self-observability metrics.\nData flow  SkyWalking OAP collects metrics data internally and exposes a Prometheus http endpoint to retrieve the metrics. SkyWalking OAP itself (or OpenTelemetry Collector, prefered in Kubernetes scenarios) fetches metrics from the Prometheus endpoint in step (1). OAP (or OpenTelemetry Collector) pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up Follow OAP Self Observability Telemetry doc to set up OAP and OpenTelemetry Collector.\nSelf observability monitoring Self observability monitoring provides monitoring of the status and resources of the OAP server itself. oap-server is a Service in OAP, and land on the Layer: SO11Y_OAP.\nSelf observability metrics    Unit Metric Name Description Data Source     Count Per Minute meter_oap_instance_jvm_gc_count GC Count oap self observability   MB meter_oap_instance_jvm_memory_bytes_used Memory oap self observability   ms / min meter_oap_instance_jvm_young_gc_time GC Time (ms / min) oap self observability   ms / min meter_oap_instance_jvm_old_gc_time GC Time (ms / min) oap self observability   Count Per Minute meter_oap_instance_mesh_count Mesh Analysis Count (Per Minute) oap self observability   Count Per Minute meter_oap_instance_mesh_analysis_error_count Mesh Analysis Count (Per Minute) oap self observability   ms meter_oap_instance_trace_latency_percentile Trace Analysis Latency (ms) oap self observability   Count meter_oap_jvm_class_loaded_count Class Count oap self observability   Count meter_oap_jvm_class_total_unloaded_count Class Count oap self observability   Count meter_oap_jvm_class_total_loaded_count Class Count oap self observability   Count meter_oap_instance_persistence_prepare_count Persistence Count (Per 5 Minutes) oap self observability   Count meter_oap_instance_persistence_execute_count Persistence Count (Per 5 Minutes) oap self observability   Count meter_oap_jvm_thread_live_count Thread Count oap self observability   Count meter_oap_jvm_thread_peak_count Thread Count oap self observability   Count meter_oap_jvm_thread_daemon_count Thread Count oap self observability   ms meter_oap_instance_persistence_execute_percentile Persistence Execution Latency Per Metric Type (ms) oap self observability   ms meter_oap_instance_persistence_prepare_percentile Persistence Preparing Latency Per Metric Type (ms) oap self observability   Count meter_oap_jvm_thread_runnable_count Thread State Count oap self observability   Count meter_oap_jvm_thread_timed_waiting_count Thread State Count oap self observability   Count meter_oap_jvm_thread_blocked_count Thread State Count oap self observability   Count meter_oap_jvm_thread_waiting_count Thread State Count oap self observability   Count per minute meter_oap_instance_metrics_aggregation Aggregation (Per Minute) oap self observability   ms meter_oap_instance_mesh_latency_percentile Mesh Analysis Latency (ms) oap self observability   Count per minute meter_oap_instance_trace_count Trace Analysis Count (Per Minute) oap self observability   Count per minute meter_oap_instance_trace_analysis_error_count Trace Analysis Count (Per Minute) oap self observability   Percentage meter_oap_instance_cpu_percentage CPU (%) oap self observability   Count meter_oap_instance_metrics_persistent_cache count of metrics cache hit and no-hit oap self observability    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/fetcher-prom-rules/self.yaml and config/otel-rules/oap.yaml. The self observability dashboard panel configurations are found in /config/ui-initialized-templates/so11y_oap.\n","title":"OAP self observability dashboard","url":"/docs/main/v9.7.0/en/setup/backend/dashboards-so11y/"},{"content":"OAPSever Configuration Introduction To configure the OAP Sever, we propose two CRDs:\n OAPServerConfig: The CRD holds all static configuration, including environment variable and file configuration. OAPServerDynamicConfig: The CRD holds all dynamic configuration.  Spec of OAPServerConfig    Field Name Description     Version The version of OAP server, the default value is 9.5.0   Env The environment variable of OAP server   File The static file in OAP Server, which contains three fieldsfile.path、file.name and file.data. The file.path plus the file.name is the real file that needs to be replaced in the container image, and the file.data is the final data in the specific file.    Status of OAPServerConfig    Field Name Description     Desired The number of oapserver that need to be configured   Ready The number of oapserver that configured successfully   CreationTime The time the OAPServerConfig was created.   LastUpdateTime The last time this condition was updated.    Demo of OAPServerConfig  When using the file, please don\u0026rsquo;t set the same name\n # static configuration of OAPServerapiVersion:operator.skywalking.apache.org/v1alpha1kind:OAPServerConfigmetadata:name:oapserverconfig-samplenamespace:skywalking-systemspec:# The version of OAPServerversion:9.5.0# The env configuration of OAPServerenv:- name:JAVA_OPTSvalue:-Xmx2048M- name:SW_CLUSTERvalue:kubernetes- name:SW_CLUSTER_K8S_NAMESPACEvalue:skywalking-system# enable the dynamic configuration- name:SW_CONFIGURATIONvalue:k8s-configmap# set the labelselector of the dynamic configuration- name:SW_CLUSTER_K8S_LABELvalue:app=collector,release=skywalking- name:SW_TELEMETRYvalue:prometheus- name:SW_HEALTH_CHECKERvalue:default- name:SKYWALKING_COLLECTOR_UIDvalueFrom:fieldRef:fieldPath:metadata.uid- name:SW_LOG_LAL_FILESvalue:test1- name:SW_LOG_MAL_FILESvalue:test2# The file configuration of OAPServer# we should avoid setting the same file name in the filefile:- name:test1.yamlpath:/skywalking/config/laldata:|rules: - name: example dsl: | filter { text { abortOnFailure false // for test purpose, we want to persist all logs regexp $/(?s)(?\u0026lt;timestamp\u0026gt;\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}.\\d{3}) \\[TID:(?\u0026lt;tid\u0026gt;.+?)] \\[(?\u0026lt;thread\u0026gt;.+?)] (?\u0026lt;level\u0026gt;\\w{4,}) (?\u0026lt;logger\u0026gt;.{1,36}) (?\u0026lt;msg\u0026gt;.+)/$ } extractor { metrics { timestamp log.timestamp as Long labels level: parsed.level, service: log.service, instance: log.serviceInstance name \u0026#34;log_count\u0026#34; value 1 } } sink { } }- name:test2.yamlpath:/skywalking/config/log-mal-rulesdata:|expSuffix: instance([\u0026#39;service\u0026#39;], [\u0026#39;instance\u0026#39;], Layer.GENERAL) metricPrefix: log metricsRules: - name: count_info exp: log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;INFO\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).downsampling(SUM)Spec of OAPServerDynamicConfig    Field Name Description     Version The version of the OAP server, the default value is 9.5.0   LabelSelector The label selector of the specific configmap, the default value is \u0026ldquo;app=collector,release=skywalking\u0026rdquo;   Data All configurations' key and value    Status of OAPServerDynamicConfig    Field Name Description     State The state of dynamic configuration, running or stopped   CreationTime All configurations in one CR, the default value is false   LastUpdateTime The last time this condition was updated    Usage of OAPServerDynamicConfig  Notice, the CR\u0026rsquo;s name cannot contain capital letters.\n Users can split all configurations into several CRs. when using the OAPServerDynamicConfig, users can not only put some configurations in a CR, but also put a configuration in a CR, and the spec.data.name in CR represents one dynamic configuration.\nDemo of Global configuration apiVersion:operator.skywalking.apache.org/v1alpha1kind:OAPServerDynamicConfigmetadata:name:oapserverdynamicconfig-samplespec:# The version of OAPServerversion:9.5.0# The labelselector of OAPServer\u0026#39;s dynamic configuration, it should be the same as labelSelector of OAPServerConfiglabelSelector:app=collector,release=skywalkingdata:- name:agent-analyzer.default.slowDBAccessThresholdvalue:default:200,mongodb:50- name:alarm.default.alarm-settingsvalue:|-rules: # Rule unique name, must be ended with `_rule`. service_resp_time_rule: metrics-name: service_resp_time op: \u0026#34;\u0026gt;\u0026#34; threshold: 1000 period: 10 count: 3 silence-period: 5 message: Response time of service {name} is more than 1000ms in 3 minutes of last 10 minutes. service_sla_rule: # Metrics value need to be long, double or int metrics-name: service_sla op: \u0026#34;\u0026lt;\u0026#34; threshold: 8000 # The length of time to evaluate the metrics period: 10 # How many times after the metrics match the condition, will trigger alarm count: 2 # How many times of checks, the alarm keeps silence after alarm triggered, default as same as period. silence-period: 3 message: Successful rate of service {name} is lower than 80% in 2 minutes of last 10 minutes service_resp_time_percentile_rule: # Metrics value need to be long, double or int metrics-name: service_percentile op: \u0026#34;\u0026gt;\u0026#34; threshold: 1000,1000,1000,1000,1000 period: 10 count: 3 silence-period: 5 message: Percentile response time of service {name} alarm in 3 minutes of last 10 minutes, due to more than one condition of p50 \u0026gt; 1000, p75 \u0026gt; 1000, p90 \u0026gt; 1000, p95 \u0026gt; 1000, p99 \u0026gt; 1000 service_instance_resp_time_rule: metrics-name: service_instance_resp_time op: \u0026#34;\u0026gt;\u0026#34; threshold: 1000 period: 10 count: 2 silence-period: 5 message: Response time of service instance {name} is more than 1000ms in 2 minutes of last 10 minutes database_access_resp_time_rule: metrics-name: database_access_resp_time threshold: 1000 op: \u0026#34;\u0026gt;\u0026#34; period: 10 count: 2 message: Response time of database access {name} is more than 1000ms in 2 minutes of last 10 minutes endpoint_relation_resp_time_rule: metrics-name: endpoint_relation_resp_time threshold: 1000 op: \u0026#34;\u0026gt;\u0026#34; period: 10 count: 2 message: Response time of endpoint relation {name} is more than 1000ms in 2 minutes of last 10 minutes # Active endpoint related metrics alarm will cost more memory than service and service instance metrics alarm. # Because the number of endpoint is much more than service and instance. # # endpoint_resp_time_rule: # metrics-name: endpoint_resp_time # op: \u0026#34;\u0026gt;\u0026#34; # threshold: 1000 # period: 10 # count: 2 # silence-period: 5 # message: Response time of endpoint {name} is more than 1000ms in 2 minutes of last 10 minutes webhooks: # - http://127.0.0.1/notify/ # - http://127.0.0.1/go-wechat/- name:core.default.apdexThresholdvalue:|-default: 500 # example: # the threshold of service \u0026#34;tomcat\u0026#34; is 1s # tomcat: 1000 # the threshold of service \u0026#34;springboot1\u0026#34; is 50ms # springboot1: 50- name:agent-analyzer.default.uninstrumentedGatewaysvalue:|-#gateways: # - name: proxy0 # instances: # - host: 127.0.0.1 # the host/ip of this gateway instance # port: 9099 # the port of this gateway instance, defaults to 80Demo of Single configuration Set the dynamic configuration agent-analyzer.default.slowDBAccessThreshold as follows.\napiVersion:operator.skywalking.apache.org/v1alpha1kind:OAPServerDynamicConfigmetadata:name:agent-analyzer.defaultspec:# The version of OAPServerversion:9.5.0# The labelselector of OAPServer\u0026#39;s dynamic configuration, it should be the same as labelSelector of OAPServerConfiglabelSelector:app=collector,release=skywalkingdata:- name:slowDBAccessThresholdvalue:default:200,mongodb:50Set the dynamic configuration core.default.endpoint-name-grouping-openapi.customerAPI-v1 and core.default.endpoint-name-grouping-openapi.productAPI-v1 as follows.\napiVersion:operator.skywalking.apache.org/v1alpha1kind:OAPServerDynamicConfigmetadata:name:core.default.endpoint-name-grouping-openapispec:# The version of OAPServerversion:9.5.0# The labelselector of OAPServer\u0026#39;s dynamic configuration, it should be the same as labelSelector of OAPServerConfiglabelSelector:app=collector,release=skywalkingdata:- name:customerAPI-v1value:value of customerAPI-v1- name:productAPI-v1value:value of productAPI-v1","title":"OAPSever Configuration Introduction","url":"/docs/skywalking-swck/latest/oapserver-configuration/"},{"content":"OAPSever Configuration Introduction To configure the OAP Sever, we propose two CRDs:\n OAPServerConfig: The CRD holds all static configuration, including environment variable and file configuration. OAPServerDynamicConfig: The CRD holds all dynamic configuration.  Spec of OAPServerConfig    Field Name Description     Version The version of OAP server, the default value is 9.5.0   Env The environment variable of OAP server   File The static file in OAP Server, which contains three fieldsfile.path、file.name and file.data. The file.path plus the file.name is the real file that needs to be replaced in the container image, and the file.data is the final data in the specific file.    Status of OAPServerConfig    Field Name Description     Desired The number of oapserver that need to be configured   Ready The number of oapserver that configured successfully   CreationTime The time the OAPServerConfig was created.   LastUpdateTime The last time this condition was updated.    Demo of OAPServerConfig  When using the file, please don\u0026rsquo;t set the same name\n # static configuration of OAPServerapiVersion:operator.skywalking.apache.org/v1alpha1kind:OAPServerConfigmetadata:name:oapserverconfig-samplenamespace:skywalking-systemspec:# The version of OAPServerversion:9.5.0# The env configuration of OAPServerenv:- name:JAVA_OPTSvalue:-Xmx2048M- name:SW_CLUSTERvalue:kubernetes- name:SW_CLUSTER_K8S_NAMESPACEvalue:skywalking-system# enable the dynamic configuration- name:SW_CONFIGURATIONvalue:k8s-configmap# set the labelselector of the dynamic configuration- name:SW_CLUSTER_K8S_LABELvalue:app=collector,release=skywalking- name:SW_TELEMETRYvalue:prometheus- name:SW_HEALTH_CHECKERvalue:default- name:SKYWALKING_COLLECTOR_UIDvalueFrom:fieldRef:fieldPath:metadata.uid- name:SW_LOG_LAL_FILESvalue:test1- name:SW_LOG_MAL_FILESvalue:test2# The file configuration of OAPServer# we should avoid setting the same file name in the filefile:- name:test1.yamlpath:/skywalking/config/laldata:|rules: - name: example dsl: | filter { text { abortOnFailure false // for test purpose, we want to persist all logs regexp $/(?s)(?\u0026lt;timestamp\u0026gt;\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}.\\d{3}) \\[TID:(?\u0026lt;tid\u0026gt;.+?)] \\[(?\u0026lt;thread\u0026gt;.+?)] (?\u0026lt;level\u0026gt;\\w{4,}) (?\u0026lt;logger\u0026gt;.{1,36}) (?\u0026lt;msg\u0026gt;.+)/$ } extractor { metrics { timestamp log.timestamp as Long labels level: parsed.level, service: log.service, instance: log.serviceInstance name \u0026#34;log_count\u0026#34; value 1 } } sink { } }- name:test2.yamlpath:/skywalking/config/log-mal-rulesdata:|expSuffix: instance([\u0026#39;service\u0026#39;], [\u0026#39;instance\u0026#39;], Layer.GENERAL) metricPrefix: log metricsRules: - name: count_info exp: log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;INFO\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).downsampling(SUM)Spec of OAPServerDynamicConfig    Field Name Description     Version The version of the OAP server, the default value is 9.5.0   LabelSelector The label selector of the specific configmap, the default value is \u0026ldquo;app=collector,release=skywalking\u0026rdquo;   Data All configurations' key and value    Status of OAPServerDynamicConfig    Field Name Description     State The state of dynamic configuration, running or stopped   CreationTime All configurations in one CR, the default value is false   LastUpdateTime The last time this condition was updated    Usage of OAPServerDynamicConfig  Notice, the CR\u0026rsquo;s name cannot contain capital letters.\n Users can split all configurations into several CRs. when using the OAPServerDynamicConfig, users can not only put some configurations in a CR, but also put a configuration in a CR, and the spec.data.name in CR represents one dynamic configuration.\nDemo of Global configuration apiVersion:operator.skywalking.apache.org/v1alpha1kind:OAPServerDynamicConfigmetadata:name:oapserverdynamicconfig-samplespec:# The version of OAPServerversion:9.5.0# The labelselector of OAPServer\u0026#39;s dynamic configuration, it should be the same as labelSelector of OAPServerConfiglabelSelector:app=collector,release=skywalkingdata:- name:agent-analyzer.default.slowDBAccessThresholdvalue:default:200,mongodb:50- name:alarm.default.alarm-settingsvalue:|-rules: # Rule unique name, must be ended with `_rule`. service_resp_time_rule: metrics-name: service_resp_time op: \u0026#34;\u0026gt;\u0026#34; threshold: 1000 period: 10 count: 3 silence-period: 5 message: Response time of service {name} is more than 1000ms in 3 minutes of last 10 minutes. service_sla_rule: # Metrics value need to be long, double or int metrics-name: service_sla op: \u0026#34;\u0026lt;\u0026#34; threshold: 8000 # The length of time to evaluate the metrics period: 10 # How many times after the metrics match the condition, will trigger alarm count: 2 # How many times of checks, the alarm keeps silence after alarm triggered, default as same as period. silence-period: 3 message: Successful rate of service {name} is lower than 80% in 2 minutes of last 10 minutes service_resp_time_percentile_rule: # Metrics value need to be long, double or int metrics-name: service_percentile op: \u0026#34;\u0026gt;\u0026#34; threshold: 1000,1000,1000,1000,1000 period: 10 count: 3 silence-period: 5 message: Percentile response time of service {name} alarm in 3 minutes of last 10 minutes, due to more than one condition of p50 \u0026gt; 1000, p75 \u0026gt; 1000, p90 \u0026gt; 1000, p95 \u0026gt; 1000, p99 \u0026gt; 1000 service_instance_resp_time_rule: metrics-name: service_instance_resp_time op: \u0026#34;\u0026gt;\u0026#34; threshold: 1000 period: 10 count: 2 silence-period: 5 message: Response time of service instance {name} is more than 1000ms in 2 minutes of last 10 minutes database_access_resp_time_rule: metrics-name: database_access_resp_time threshold: 1000 op: \u0026#34;\u0026gt;\u0026#34; period: 10 count: 2 message: Response time of database access {name} is more than 1000ms in 2 minutes of last 10 minutes endpoint_relation_resp_time_rule: metrics-name: endpoint_relation_resp_time threshold: 1000 op: \u0026#34;\u0026gt;\u0026#34; period: 10 count: 2 message: Response time of endpoint relation {name} is more than 1000ms in 2 minutes of last 10 minutes # Active endpoint related metrics alarm will cost more memory than service and service instance metrics alarm. # Because the number of endpoint is much more than service and instance. # # endpoint_resp_time_rule: # metrics-name: endpoint_resp_time # op: \u0026#34;\u0026gt;\u0026#34; # threshold: 1000 # period: 10 # count: 2 # silence-period: 5 # message: Response time of endpoint {name} is more than 1000ms in 2 minutes of last 10 minutes webhooks: # - http://127.0.0.1/notify/ # - http://127.0.0.1/go-wechat/- name:core.default.apdexThresholdvalue:|-default: 500 # example: # the threshold of service \u0026#34;tomcat\u0026#34; is 1s # tomcat: 1000 # the threshold of service \u0026#34;springboot1\u0026#34; is 50ms # springboot1: 50- name:agent-analyzer.default.uninstrumentedGatewaysvalue:|-#gateways: # - name: proxy0 # instances: # - host: 127.0.0.1 # the host/ip of this gateway instance # port: 9099 # the port of this gateway instance, defaults to 80Demo of Single configuration Set the dynamic configuration agent-analyzer.default.slowDBAccessThreshold as follows.\napiVersion:operator.skywalking.apache.org/v1alpha1kind:OAPServerDynamicConfigmetadata:name:agent-analyzer.defaultspec:# The version of OAPServerversion:9.5.0# The labelselector of OAPServer\u0026#39;s dynamic configuration, it should be the same as labelSelector of OAPServerConfiglabelSelector:app=collector,release=skywalkingdata:- name:slowDBAccessThresholdvalue:default:200,mongodb:50Set the dynamic configuration core.default.endpoint-name-grouping-openapi.customerAPI-v1 and core.default.endpoint-name-grouping-openapi.productAPI-v1 as follows.\napiVersion:operator.skywalking.apache.org/v1alpha1kind:OAPServerDynamicConfigmetadata:name:core.default.endpoint-name-grouping-openapispec:# The version of OAPServerversion:9.5.0# The labelselector of OAPServer\u0026#39;s dynamic configuration, it should be the same as labelSelector of OAPServerConfiglabelSelector:app=collector,release=skywalkingdata:- name:customerAPI-v1value:value of customerAPI-v1- name:productAPI-v1value:value of productAPI-v1","title":"OAPSever Configuration Introduction","url":"/docs/skywalking-swck/next/oapserver-configuration/"},{"content":"OAPSever Configuration Introduction To configure the OAP Sever, we propose two CRDs:\n OAPServerConfig: The CRD holds all static configuration, including environment variable and file configuration. OAPServerDynamicConfig: The CRD holds all dynamic configuration.  Spec of OAPServerConfig    Field Name Description     Version The version of OAP server, the default value is 9.5.0   Env The environment variable of OAP server   File The static file in OAP Server, which contains three fieldsfile.path、file.name and file.data. The file.path plus the file.name is the real file that needs to be replaced in the container image, and the file.data is the final data in the specific file.    Status of OAPServerConfig    Field Name Description     Desired The number of oapserver that need to be configured   Ready The number of oapserver that configured successfully   CreationTime The time the OAPServerConfig was created.   LastUpdateTime The last time this condition was updated.    Demo of OAPServerConfig  When using the file, please don\u0026rsquo;t set the same name\n # static configuration of OAPServerapiVersion:operator.skywalking.apache.org/v1alpha1kind:OAPServerConfigmetadata:name:oapserverconfig-samplenamespace:skywalking-systemspec:# The version of OAPServerversion:9.5.0# The env configuration of OAPServerenv:- name:JAVA_OPTSvalue:-Xmx2048M- name:SW_CLUSTERvalue:kubernetes- name:SW_CLUSTER_K8S_NAMESPACEvalue:skywalking-system# enable the dynamic configuration- name:SW_CONFIGURATIONvalue:k8s-configmap# set the labelselector of the dynamic configuration- name:SW_CLUSTER_K8S_LABELvalue:app=collector,release=skywalking- name:SW_TELEMETRYvalue:prometheus- name:SW_HEALTH_CHECKERvalue:default- name:SKYWALKING_COLLECTOR_UIDvalueFrom:fieldRef:fieldPath:metadata.uid- name:SW_LOG_LAL_FILESvalue:test1- name:SW_LOG_MAL_FILESvalue:test2# The file configuration of OAPServer# we should avoid setting the same file name in the filefile:- name:test1.yamlpath:/skywalking/config/laldata:|rules: - name: example dsl: | filter { text { abortOnFailure false // for test purpose, we want to persist all logs regexp $/(?s)(?\u0026lt;timestamp\u0026gt;\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}.\\d{3}) \\[TID:(?\u0026lt;tid\u0026gt;.+?)] \\[(?\u0026lt;thread\u0026gt;.+?)] (?\u0026lt;level\u0026gt;\\w{4,}) (?\u0026lt;logger\u0026gt;.{1,36}) (?\u0026lt;msg\u0026gt;.+)/$ } extractor { metrics { timestamp log.timestamp as Long labels level: parsed.level, service: log.service, instance: log.serviceInstance name \u0026#34;log_count\u0026#34; value 1 } } sink { } }- name:test2.yamlpath:/skywalking/config/log-mal-rulesdata:|expSuffix: instance([\u0026#39;service\u0026#39;], [\u0026#39;instance\u0026#39;], Layer.GENERAL) metricPrefix: log metricsRules: - name: count_info exp: log_count.tagEqual(\u0026#39;level\u0026#39;, \u0026#39;INFO\u0026#39;).sum([\u0026#39;service\u0026#39;, \u0026#39;instance\u0026#39;]).downsampling(SUM)Spec of OAPServerDynamicConfig    Field Name Description     Version The version of the OAP server, the default value is 9.5.0   LabelSelector The label selector of the specific configmap, the default value is \u0026ldquo;app=collector,release=skywalking\u0026rdquo;   Data All configurations' key and value    Status of OAPServerDynamicConfig    Field Name Description     State The state of dynamic configuration, running or stopped   CreationTime All configurations in one CR, the default value is false   LastUpdateTime The last time this condition was updated    Usage of OAPServerDynamicConfig  Notice, the CR\u0026rsquo;s name cannot contain capital letters.\n Users can split all configurations into several CRs. when using the OAPServerDynamicConfig, users can not only put some configurations in a CR, but also put a configuration in a CR, and the spec.data.name in CR represents one dynamic configuration.\nDemo of Global configuration apiVersion:operator.skywalking.apache.org/v1alpha1kind:OAPServerDynamicConfigmetadata:name:oapserverdynamicconfig-samplespec:# The version of OAPServerversion:9.5.0# The labelselector of OAPServer\u0026#39;s dynamic configuration, it should be the same as labelSelector of OAPServerConfiglabelSelector:app=collector,release=skywalkingdata:- name:agent-analyzer.default.slowDBAccessThresholdvalue:default:200,mongodb:50- name:alarm.default.alarm-settingsvalue:|-rules: # Rule unique name, must be ended with `_rule`. service_resp_time_rule: metrics-name: service_resp_time op: \u0026#34;\u0026gt;\u0026#34; threshold: 1000 period: 10 count: 3 silence-period: 5 message: Response time of service {name} is more than 1000ms in 3 minutes of last 10 minutes. service_sla_rule: # Metrics value need to be long, double or int metrics-name: service_sla op: \u0026#34;\u0026lt;\u0026#34; threshold: 8000 # The length of time to evaluate the metrics period: 10 # How many times after the metrics match the condition, will trigger alarm count: 2 # How many times of checks, the alarm keeps silence after alarm triggered, default as same as period. silence-period: 3 message: Successful rate of service {name} is lower than 80% in 2 minutes of last 10 minutes service_resp_time_percentile_rule: # Metrics value need to be long, double or int metrics-name: service_percentile op: \u0026#34;\u0026gt;\u0026#34; threshold: 1000,1000,1000,1000,1000 period: 10 count: 3 silence-period: 5 message: Percentile response time of service {name} alarm in 3 minutes of last 10 minutes, due to more than one condition of p50 \u0026gt; 1000, p75 \u0026gt; 1000, p90 \u0026gt; 1000, p95 \u0026gt; 1000, p99 \u0026gt; 1000 service_instance_resp_time_rule: metrics-name: service_instance_resp_time op: \u0026#34;\u0026gt;\u0026#34; threshold: 1000 period: 10 count: 2 silence-period: 5 message: Response time of service instance {name} is more than 1000ms in 2 minutes of last 10 minutes database_access_resp_time_rule: metrics-name: database_access_resp_time threshold: 1000 op: \u0026#34;\u0026gt;\u0026#34; period: 10 count: 2 message: Response time of database access {name} is more than 1000ms in 2 minutes of last 10 minutes endpoint_relation_resp_time_rule: metrics-name: endpoint_relation_resp_time threshold: 1000 op: \u0026#34;\u0026gt;\u0026#34; period: 10 count: 2 message: Response time of endpoint relation {name} is more than 1000ms in 2 minutes of last 10 minutes # Active endpoint related metrics alarm will cost more memory than service and service instance metrics alarm. # Because the number of endpoint is much more than service and instance. # # endpoint_resp_time_rule: # metrics-name: endpoint_resp_time # op: \u0026#34;\u0026gt;\u0026#34; # threshold: 1000 # period: 10 # count: 2 # silence-period: 5 # message: Response time of endpoint {name} is more than 1000ms in 2 minutes of last 10 minutes webhooks: # - http://127.0.0.1/notify/ # - http://127.0.0.1/go-wechat/- name:core.default.apdexThresholdvalue:|-default: 500 # example: # the threshold of service \u0026#34;tomcat\u0026#34; is 1s # tomcat: 1000 # the threshold of service \u0026#34;springboot1\u0026#34; is 50ms # springboot1: 50- name:agent-analyzer.default.uninstrumentedGatewaysvalue:|-#gateways: # - name: proxy0 # instances: # - host: 127.0.0.1 # the host/ip of this gateway instance # port: 9099 # the port of this gateway instance, defaults to 80Demo of Single configuration Set the dynamic configuration agent-analyzer.default.slowDBAccessThreshold as follows.\napiVersion:operator.skywalking.apache.org/v1alpha1kind:OAPServerDynamicConfigmetadata:name:agent-analyzer.defaultspec:# The version of OAPServerversion:9.5.0# The labelselector of OAPServer\u0026#39;s dynamic configuration, it should be the same as labelSelector of OAPServerConfiglabelSelector:app=collector,release=skywalkingdata:- name:slowDBAccessThresholdvalue:default:200,mongodb:50Set the dynamic configuration core.default.endpoint-name-grouping-openapi.customerAPI-v1 and core.default.endpoint-name-grouping-openapi.productAPI-v1 as follows.\napiVersion:operator.skywalking.apache.org/v1alpha1kind:OAPServerDynamicConfigmetadata:name:core.default.endpoint-name-grouping-openapispec:# The version of OAPServerversion:9.5.0# The labelselector of OAPServer\u0026#39;s dynamic configuration, it should be the same as labelSelector of OAPServerConfiglabelSelector:app=collector,release=skywalkingdata:- name:customerAPI-v1value:value of customerAPI-v1- name:productAPI-v1value:value of productAPI-v1","title":"OAPSever Configuration Introduction","url":"/docs/skywalking-swck/v0.9.0/oapserver-configuration/"},{"content":"Observability This document outlines the observability features of BanyanDB, which include metrics, profiling, and tracing. These features help monitor and understand the performance, behavior, and overall health of BanyanDB.\nMetrics BanyanDB has built-in support for metrics collection through the use of build tags. The metrics provider can be enabled by specifying the build tag during the compilation process.\nCurrently, there is only one supported metrics provider: Prometheus. To use Prometheus as the metrics client, include the prometheus build tag when building BanyanDB:\nBUILD_TAGS=prometheus make -C banyand banyand-server\nIf no build tag is specified, the metrics server will not be started, and no metrics will be collected:\nmake -C banyand banyand-server\nWhen the Prometheus metrics provider is enabled, the metrics server listens on port 2121. This allows Prometheus to scrape metrics data from BanyanDB for monitoring and analysis.\nThe Docker image is tagged as \u0026ldquo;prometheus\u0026rdquo; to facilitate cloud-native operations and simplify deployment on Kubernetes. This allows users to directly deploy the Docker image onto their Kubernetes cluster without having to rebuild it with the \u0026ldquo;prometheus\u0026rdquo; tag.\nProfiling Banyand, the server of BanyanDB, supports profiling automatically. The profiling data is collected by the pprof package and can be accessed through the /debug/pprof endpoint. The port of the profiling server is 2122 by default.\nTracing TODO: Add details about the tracing support in BanyanDB, such as how to enable tracing, available tracing tools, and how to analyze tracing data.\n","title":"Observability","url":"/docs/skywalking-banyandb/latest/observability/"},{"content":"Observability This document outlines the observability features of BanyanDB, which include metrics, profiling, and tracing. These features help monitor and understand the performance, behavior, and overall health of BanyanDB.\nMetrics BanyanDB has built-in support for metrics collection through the use of build tags. The metrics provider can be enabled by specifying the build tag during the compilation process.\nCurrently, there is only one supported metrics provider: Prometheus. To use Prometheus as the metrics client, include the prometheus build tag when building BanyanDB:\nBUILD_TAGS=prometheus make -C banyand banyand-server\nIf no build tag is specified, the metrics server will not be started, and no metrics will be collected:\nmake -C banyand banyand-server\nWhen the Prometheus metrics provider is enabled, the metrics server listens on port 2121. This allows Prometheus to scrape metrics data from BanyanDB for monitoring and analysis.\nThe Docker image is tagged as \u0026ldquo;prometheus\u0026rdquo; to facilitate cloud-native operations and simplify deployment on Kubernetes. This allows users to directly deploy the Docker image onto their Kubernetes cluster without having to rebuild it with the \u0026ldquo;prometheus\u0026rdquo; tag.\nProfiling Banyand, the server of BanyanDB, supports profiling automatically. The profiling data is collected by the pprof package and can be accessed through the /debug/pprof endpoint. The port of the profiling server is 2122 by default.\nTracing TODO: Add details about the tracing support in BanyanDB, such as how to enable tracing, available tracing tools, and how to analyze tracing data.\n","title":"Observability","url":"/docs/skywalking-banyandb/next/observability/"},{"content":"Observability This document outlines the observability features of BanyanDB, which include metrics, profiling, and tracing. These features help monitor and understand the performance, behavior, and overall health of BanyanDB.\nMetrics BanyanDB has built-in support for metrics collection through the use of build tags. The metrics provider can be enabled by specifying the build tag during the compilation process.\nCurrently, there is only one supported metrics provider: Prometheus. To use Prometheus as the metrics client, include the prometheus build tag when building BanyanDB:\nBUILD_TAGS=prometheus make -C banyand banyand-server\nIf no build tag is specified, the metrics server will not be started, and no metrics will be collected:\nmake -C banyand banyand-server\nWhen the Prometheus metrics provider is enabled, the metrics server listens on port 2121. This allows Prometheus to scrape metrics data from BanyanDB for monitoring and analysis.\nThe Docker image is tagged as \u0026ldquo;prometheus\u0026rdquo; to facilitate cloud-native operations and simplify deployment on Kubernetes. This allows users to directly deploy the Docker image onto their Kubernetes cluster without having to rebuild it with the \u0026ldquo;prometheus\u0026rdquo; tag.\nProfiling Banyand, the server of BanyanDB, supports profiling automatically. The profiling data is collected by the pprof package and can be accessed through the /debug/pprof endpoint. The port of the profiling server is 2122 by default.\nTracing TODO: Add details about the tracing support in BanyanDB, such as how to enable tracing, available tracing tools, and how to analyze tracing data.\n","title":"Observability","url":"/docs/skywalking-banyandb/v0.5.0/observability/"},{"content":"Observability Analysis Language OAL(Observability Analysis Language) serves to analyze incoming data in streaming mode.\nOAL focuses on metrics in Service, Service Instance and Endpoint. Therefore, the language is easy to learn and use.\nSince 6.3, the OAL engine is embedded in OAP server runtime as oal-rt(OAL Runtime). OAL scripts are now found in the /config folder, and users could simply change and reboot the server to run them. However, the OAL script is a compiled language, and the OAL Runtime generates java codes dynamically.\nYou can open set SW_OAL_ENGINE_DEBUG=Y at system env to see which classes are generated.\nGrammar Scripts should be named *.oal\n// Declare the metrics. METRICS_NAME = from(CAST SCOPE.(* | [FIELD][,FIELD ...])) [.filter(CAST FIELD OP [INT | STRING])] .FUNCTION([PARAM][, PARAM ...]) // Disable hard code disable(METRICS_NAME); From The from statement defines the data source of this OAL expression.\nPrimary SCOPEs are Service, ServiceInstance, Endpoint, ServiceRelation, ServiceInstanceRelation, and EndpointRelation. There are also some secondary scopes which belong to a primary scope.\nSee Scope Definitions, where you can find all existing Scopes and Fields.\nFilter Use filter to build conditions for the value of fields by using field name and expression.\nThe expressions support linking by and, or and (...). The OPs support ==, !=, \u0026gt;, \u0026lt;, \u0026gt;=, \u0026lt;=, in [...] ,like %..., like ...% , like %...% , contain and not contain, with type detection based on field type. In the event of incompatibility, compile or code generation errors may be triggered.\nAggregation Function The default functions are provided by the SkyWalking OAP core, and it is possible to implement additional functions.\nFunctions provided\n longAvg. The avg of all input per scope entity. The input field must be a long.   instance_jvm_memory_max = from(ServiceInstanceJVMMemory.max).longAvg();\n In this case, the input represents the request of each ServiceInstanceJVMMemory scope, and avg is based on field max.\n doubleAvg. The avg of all input per scope entity. The input field must be a double.   instance_jvm_cpu = from(ServiceInstanceJVMCPU.usePercent).doubleAvg();\n In this case, the input represents the request of each ServiceInstanceJVMCPU scope, and avg is based on field usePercent.\n percent. The number or ratio is expressed as a fraction of 100, where the input matches with the condition.   endpoint_percent = from(Endpoint.*).percent(status == true);\n In this case, all input represents requests of each endpoint, and the condition is endpoint.status == true.\n rate. The rate expressed is as a fraction of 100, where the input matches with the condition.   browser_app_error_rate = from(BrowserAppTraffic.*).rate(trafficCategory == BrowserAppTrafficCategory.FIRST_ERROR, trafficCategory == BrowserAppTrafficCategory.NORMAL);\n In this case, all input represents requests of each browser app traffic, the numerator condition is trafficCategory == BrowserAppTrafficCategory.FIRST_ERROR and denominator condition is trafficCategory == BrowserAppTrafficCategory.NORMAL. Parameter (1) is the numerator condition. Parameter (2) is the denominator condition.\n count. The sum of calls per scope entity.   service_calls_sum = from(Service.*).count();\n In this case, the number of calls of each service.\n histogram. See Heatmap in WIKI.   service_heatmap = from(Service.latency).histogram(100, 20);\n In this case, the thermodynamic heatmap of all incoming requests. Parameter (1) is the precision of latency calculation, such as in the above case, where 113ms and 193ms are considered the same in the 101-200ms group. Parameter (2) is the group amount. In the above case, 21(param value + 1) groups are 0-100ms, 101-200ms, \u0026hellip; 1901-2000ms, 2000+ms\n apdex. See Apdex in WIKI.   service_apdex = from(Service.latency).apdex(name, status);\n In this case, the apdex score of each service. Parameter (1) is the service name, which reflects the Apdex threshold value loaded from service-apdex-threshold.yml in the config folder. Parameter (2) is the status of this request. The status(success/failure) reflects the Apdex calculation.\n p99, p95, p90, p75, p50. See percentile in WIKI.   service_percentile = from(Service.latency).percentile(10);\n percentile is the first multiple-value metric, which has been introduced since 7.0.0. As a metric with multiple values, it could be queried through the getMultipleLinearIntValues GraphQL query. In this case, see p99, p95, p90, p75, and p50 of all incoming requests. The parameter is precise to a latency at p99, such as in the above case, and 120ms and 124ms are considered to produce the same response time. Before 7.0.0, p99, p95, p90, p75, p50 func(s) are used to calculate metrics separately. They are still supported in 7.x, but they are no longer recommended and are not included in the current official OAL script.\n service_p99 = from(Service.latency).p99(10);\n In this case, the p99 value of all incoming requests. The parameter is precise to a latency at p99, such as in the above case, and 120ms and 124ms are considered to produce the same response time.\nMetrics name The metrics name for storage implementor, alarm and query modules. The type inference is supported by core.\nGroup All metrics data will be grouped by Scope.ID and min-level TimeBucket.\n In the Endpoint scope, the Scope.ID is same as the Endpoint ID (i.e. the unique ID based on service and its endpoint).  Cast Fields of source are static type. In some cases, the type required by the filter expression and aggregation function doesn\u0026rsquo;t match the type in the source, such as tag value in the source is String type, most aggregation calculation requires numeric.\nCast expression is provided to do so.\n (str-\u0026gt;long) or (long), cast string type into long. (str-\u0026gt;int) or (int), cast string type into int.  mq_consume_latency = from((str-\u0026gt;long)Service.tag[\u0026quot;transmission.latency\u0026quot;]).longAvg(); // the value of tag is string type. Cast statement is supported in\n From statement. from((cast)source.attre). Filter expression. .filter((cast)tag[\u0026quot;transmission.latency\u0026quot;] \u0026gt; 0) Aggregation function parameter. .longAvg((cast)strField1== 1, (cast)strField2)  Disable Disable is an advanced statement in OAL, which is only used in certain cases. Some of the aggregation and metrics are defined through core hard codes. Examples include segment and top_n_database_statement. This disable statement is designed to render them inactive. By default, none of them are disabled.\nNOTICE, all disable statements should be in oal/disable.oal script file.\nExamples // Calculate p99 of both Endpoint1 and Endpoint2 endpoint_p99 = from(Endpoint.latency).filter(name in (\u0026quot;Endpoint1\u0026quot;, \u0026quot;Endpoint2\u0026quot;)).summary(0.99) // Calculate p99 of Endpoint name started with `serv` serv_Endpoint_p99 = from(Endpoint.latency).filter(name like \u0026quot;serv%\u0026quot;).summary(0.99) // Calculate the avg response time of each Endpoint endpoint_resp_time = from(Endpoint.latency).avg() // Calculate the p50, p75, p90, p95 and p99 of each Endpoint by 50 ms steps. endpoint_percentile = from(Endpoint.latency).percentile(10) // Calculate the percent of response status is true, for each service. endpoint_success = from(Endpoint.*).filter(status == true).percent() // Calculate the sum of response code in [404, 500, 503], for each service. endpoint_abnormal = from(Endpoint.*).filter(responseCode in [404, 500, 503]).count() // Calculate the sum of request type in [RequestType.RPC, RequestType.gRPC], for each service. endpoint_rpc_calls_sum = from(Endpoint.*).filter(type in [RequestType.RPC, RequestType.gRPC]).count() // Calculate the sum of endpoint name in [\u0026quot;/v1\u0026quot;, \u0026quot;/v2\u0026quot;], for each service. endpoint_url_sum = from(Endpoint.*).filter(name in [\u0026quot;/v1\u0026quot;, \u0026quot;/v2\u0026quot;]).count() // Calculate the sum of calls for each service. endpoint_calls = from(Endpoint.*).count() // Calculate the CPM with the GET method for each service.The value is made up with `tagKey:tagValue`. // Option 1, use `tags contain`. service_cpm_http_get = from(Service.*).filter(tags contain \u0026quot;http.method:GET\u0026quot;).cpm() // Option 2, use `tag[key]`. service_cpm_http_get = from(Service.*).filter(tag[\u0026quot;http.method\u0026quot;] == \u0026quot;GET\u0026quot;).cpm(); // Calculate the CPM with the HTTP method except for the GET method for each service.The value is made up with `tagKey:tagValue`. service_cpm_http_other = from(Service.*).filter(tags not contain \u0026quot;http.method:GET\u0026quot;).cpm() disable(segment); disable(endpoint_relation_server_side); disable(top_n_database_statement); ","title":"Observability Analysis Language","url":"/docs/main/v9.0.0/en/concepts-and-designs/oal/"},{"content":"Observability Analysis Language OAL(Observability Analysis Language) serves to analyze incoming data in streaming mode.\nOAL focuses on metrics in Service, Service Instance and Endpoint. Therefore, the language is easy to learn and use.\nSince 6.3, the OAL engine is embedded in OAP server runtime as oal-rt(OAL Runtime). OAL scripts are now found in the /config folder, and users could simply change and reboot the server to run them. However, the OAL script is a compiled language, and the OAL Runtime generates java codes dynamically.\nYou can open set SW_OAL_ENGINE_DEBUG=Y at system env to see which classes are generated.\nGrammar Scripts should be named *.oal\n// Declare the metrics. METRICS_NAME = from(CAST SCOPE.(* | [FIELD][,FIELD ...])) [.filter(CAST FIELD OP [INT | STRING])] .FUNCTION([PARAM][, PARAM ...]) // Disable hard code disable(METRICS_NAME); From The from statement defines the data source of this OAL expression.\nPrimary SCOPEs are Service, ServiceInstance, Endpoint, ServiceRelation, ServiceInstanceRelation, and EndpointRelation. There are also some secondary scopes which belong to a primary scope.\nSee Scope Definitions, where you can find all existing Scopes and Fields.\nFilter Use filter to build conditions for the value of fields by using field name and expression.\nThe expressions support linking by and, or and (...). The OPs support ==, !=, \u0026gt;, \u0026lt;, \u0026gt;=, \u0026lt;=, in [...] ,like %..., like ...% , like %...% , contain and not contain, with type detection based on field type. In the event of incompatibility, compile or code generation errors may be triggered.\nAggregation Function The default functions are provided by the SkyWalking OAP core, and it is possible to implement additional functions.\nFunctions provided\n longAvg. The avg of all input per scope entity. The input field must be a long.   instance_jvm_memory_max = from(ServiceInstanceJVMMemory.max).longAvg();\n In this case, the input represents the request of each ServiceInstanceJVMMemory scope, and avg is based on field max.\n doubleAvg. The avg of all input per scope entity. The input field must be a double.   instance_jvm_cpu = from(ServiceInstanceJVMCPU.usePercent).doubleAvg();\n In this case, the input represents the request of each ServiceInstanceJVMCPU scope, and avg is based on field usePercent.\n percent. The number or ratio is expressed as a fraction of 100, where the input matches with the condition.   endpoint_percent = from(Endpoint.*).percent(status == true);\n In this case, all input represents requests of each endpoint, and the condition is endpoint.status == true.\n rate. The rate expressed is as a fraction of 100, where the input matches with the condition.   browser_app_error_rate = from(BrowserAppTraffic.*).rate(trafficCategory == BrowserAppTrafficCategory.FIRST_ERROR, trafficCategory == BrowserAppTrafficCategory.NORMAL);\n In this case, all input represents requests of each browser app traffic, the numerator condition is trafficCategory == BrowserAppTrafficCategory.FIRST_ERROR and denominator condition is trafficCategory == BrowserAppTrafficCategory.NORMAL. Parameter (1) is the numerator condition. Parameter (2) is the denominator condition.\n count. The sum of calls per scope entity.   service_calls_sum = from(Service.*).count();\n In this case, the number of calls of each service.\n histogram. See Heatmap in WIKI.   service_heatmap = from(Service.latency).histogram(100, 20);\n In this case, the thermodynamic heatmap of all incoming requests. Parameter (1) is the precision of latency calculation, such as in the above case, where 113ms and 193ms are considered the same in the 101-200ms group. Parameter (2) is the group amount. In the above case, 21(param value + 1) groups are 0-100ms, 101-200ms, \u0026hellip; 1901-2000ms, 2000+ms\n apdex. See Apdex in WIKI.   service_apdex = from(Service.latency).apdex(name, status);\n In this case, the apdex score of each service. Parameter (1) is the service name, which reflects the Apdex threshold value loaded from service-apdex-threshold.yml in the config folder. Parameter (2) is the status of this request. The status(success/failure) reflects the Apdex calculation.\n p99, p95, p90, p75, p50. See percentile in WIKI.   service_percentile = from(Service.latency).percentile(10);\n percentile is the first multiple-value metric, which has been introduced since 7.0.0. As a metric with multiple values, it could be queried through the getMultipleLinearIntValues GraphQL query. In this case, see p99, p95, p90, p75, and p50 of all incoming requests. The parameter is precise to a latency at p99, such as in the above case, and 120ms and 124ms are considered to produce the same response time. Before 7.0.0, p99, p95, p90, p75, p50 func(s) are used to calculate metrics separately. They are still supported in 7.x, but they are no longer recommended and are not included in the current official OAL script.\n service_p99 = from(Service.latency).p99(10);\n In this case, the p99 value of all incoming requests. The parameter is precise to a latency at p99, such as in the above case, and 120ms and 124ms are considered to produce the same response time.\nMetrics name The metrics name for storage implementor, alarm and query modules. The type inference is supported by core.\nGroup All metrics data will be grouped by Scope.ID and min-level TimeBucket.\n In the Endpoint scope, the Scope.ID is same as the Endpoint ID (i.e. the unique ID based on service and its endpoint).  Cast Fields of source are static type. In some cases, the type required by the filter expression and aggregation function doesn\u0026rsquo;t match the type in the source, such as tag value in the source is String type, most aggregation calculation requires numeric.\nCast expression is provided to do so.\n (str-\u0026gt;long) or (long), cast string type into long. (str-\u0026gt;int) or (int), cast string type into int.  mq_consume_latency = from((str-\u0026gt;long)Service.tag[\u0026quot;transmission.latency\u0026quot;]).longAvg(); // the value of tag is string type. Cast statement is supported in\n From statement. from((cast)source.attre). Filter expression. .filter((cast)tag[\u0026quot;transmission.latency\u0026quot;] \u0026gt; 0) Aggregation function parameter. .longAvg((cast)strField1== 1, (cast)strField2)  Disable Disable is an advanced statement in OAL, which is only used in certain cases. Some of the aggregation and metrics are defined through core hard codes. Examples include segment and top_n_database_statement. This disable statement is designed to render them inactive. By default, none of them are disabled.\nNOTICE, all disable statements should be in oal/disable.oal script file.\nExamples // Calculate p99 of both Endpoint1 and Endpoint2 endpoint_p99 = from(Endpoint.latency).filter(name in (\u0026quot;Endpoint1\u0026quot;, \u0026quot;Endpoint2\u0026quot;)).summary(0.99) // Calculate p99 of Endpoint name started with `serv` serv_Endpoint_p99 = from(Endpoint.latency).filter(name like \u0026quot;serv%\u0026quot;).summary(0.99) // Calculate the avg response time of each Endpoint endpoint_resp_time = from(Endpoint.latency).avg() // Calculate the p50, p75, p90, p95 and p99 of each Endpoint by 50 ms steps. endpoint_percentile = from(Endpoint.latency).percentile(10) // Calculate the percent of response status is true, for each service. endpoint_success = from(Endpoint.*).filter(status == true).percent() // Calculate the sum of response code in [404, 500, 503], for each service. endpoint_abnormal = from(Endpoint.*).filter(responseCode in [404, 500, 503]).count() // Calculate the sum of request type in [RequestType.RPC, RequestType.gRPC], for each service. endpoint_rpc_calls_sum = from(Endpoint.*).filter(type in [RequestType.RPC, RequestType.gRPC]).count() // Calculate the sum of endpoint name in [\u0026quot;/v1\u0026quot;, \u0026quot;/v2\u0026quot;], for each service. endpoint_url_sum = from(Endpoint.*).filter(name in [\u0026quot;/v1\u0026quot;, \u0026quot;/v2\u0026quot;]).count() // Calculate the sum of calls for each service. endpoint_calls = from(Endpoint.*).count() // Calculate the CPM with the GET method for each service.The value is made up with `tagKey:tagValue`. // Option 1, use `tags contain`. service_cpm_http_get = from(Service.*).filter(tags contain \u0026quot;http.method:GET\u0026quot;).cpm() // Option 2, use `tag[key]`. service_cpm_http_get = from(Service.*).filter(tag[\u0026quot;http.method\u0026quot;] == \u0026quot;GET\u0026quot;).cpm(); // Calculate the CPM with the HTTP method except for the GET method for each service.The value is made up with `tagKey:tagValue`. service_cpm_http_other = from(Service.*).filter(tags not contain \u0026quot;http.method:GET\u0026quot;).cpm() disable(segment); disable(endpoint_relation_server_side); disable(top_n_database_statement); ","title":"Observability Analysis Language","url":"/docs/main/v9.1.0/en/concepts-and-designs/oal/"},{"content":"Observability Analysis Language OAL(Observability Analysis Language) serves to analyze incoming data in streaming mode.\nOAL focuses on metrics in Service, Service Instance and Endpoint. Therefore, the language is easy to learn and use.\nSince 6.3, the OAL engine is embedded in OAP server runtime as oal-rt(OAL Runtime). OAL scripts are now found in the /config folder, and users could simply change and reboot the server to run them. However, the OAL script is a compiled language, and the OAL Runtime generates java codes dynamically.\nYou can open set SW_OAL_ENGINE_DEBUG=Y at system env to see which classes are generated.\nGrammar Scripts should be named *.oal\n// Declare the metrics. METRICS_NAME = from(CAST SCOPE.(* | [FIELD][,FIELD ...])) [.filter(CAST FIELD OP [INT | STRING])] .FUNCTION([PARAM][, PARAM ...]) // Disable hard code disable(METRICS_NAME); From The from statement defines the data source of this OAL expression.\nPrimary SCOPEs are Service, ServiceInstance, Endpoint, ServiceRelation, ServiceInstanceRelation, and EndpointRelation. There are also some secondary scopes which belong to a primary scope.\nSee Scope Definitions, where you can find all existing Scopes and Fields.\nFilter Use filter to build conditions for the value of fields by using field name and expression.\nThe expressions support linking by and, or and (...). The OPs support ==, !=, \u0026gt;, \u0026lt;, \u0026gt;=, \u0026lt;=, in [...] ,like %..., like ...% , like %...% , contain and not contain, with type detection based on field type. In the event of incompatibility, compile or code generation errors may be triggered.\nAggregation Function The default functions are provided by the SkyWalking OAP core, and it is possible to implement additional functions.\nFunctions provided\n longAvg. The avg of all input per scope entity. The input field must be a long.   instance_jvm_memory_max = from(ServiceInstanceJVMMemory.max).longAvg();\n In this case, the input represents the request of each ServiceInstanceJVMMemory scope, and avg is based on field max.\n doubleAvg. The avg of all input per scope entity. The input field must be a double.   instance_jvm_cpu = from(ServiceInstanceJVMCPU.usePercent).doubleAvg();\n In this case, the input represents the request of each ServiceInstanceJVMCPU scope, and avg is based on field usePercent.\n percent. The number or ratio is expressed as a fraction of 100, where the input matches with the condition.   endpoint_percent = from(Endpoint.*).percent(status == true);\n In this case, all input represents requests of each endpoint, and the condition is endpoint.status == true.\n rate. The rate expressed is as a fraction of 100, where the input matches with the condition.   browser_app_error_rate = from(BrowserAppTraffic.*).rate(trafficCategory == BrowserAppTrafficCategory.FIRST_ERROR, trafficCategory == BrowserAppTrafficCategory.NORMAL);\n In this case, all input represents requests of each browser app traffic, the numerator condition is trafficCategory == BrowserAppTrafficCategory.FIRST_ERROR and denominator condition is trafficCategory == BrowserAppTrafficCategory.NORMAL. Parameter (1) is the numerator condition. Parameter (2) is the denominator condition.\n count. The sum of calls per scope entity.   service_calls_sum = from(Service.*).count();\n In this case, the number of calls of each service.\n histogram. See Heatmap in WIKI.   service_heatmap = from(Service.latency).histogram(100, 20);\n In this case, the thermodynamic heatmap of all incoming requests. Parameter (1) is the precision of latency calculation, such as in the above case, where 113ms and 193ms are considered the same in the 101-200ms group. Parameter (2) is the group amount. In the above case, 21(param value + 1) groups are 0-100ms, 101-200ms, \u0026hellip; 1901-2000ms, 2000+ms\n apdex. See Apdex in WIKI.   service_apdex = from(Service.latency).apdex(name, status);\n In this case, the apdex score of each service. Parameter (1) is the service name, which reflects the Apdex threshold value loaded from service-apdex-threshold.yml in the config folder. Parameter (2) is the status of this request. The status(success/failure) reflects the Apdex calculation.\n p99, p95, p90, p75, p50. See percentile in WIKI.   service_percentile = from(Service.latency).percentile(10);\n percentile is the first multiple-value metric, which has been introduced since 7.0.0. As a metric with multiple values, it could be queried through the getMultipleLinearIntValues GraphQL query. In this case, see p99, p95, p90, p75, and p50 of all incoming requests. The parameter is precise to a latency at p99, such as in the above case, and 120ms and 124ms are considered to produce the same response time.\nIn this case, the p99 value of all incoming requests. The parameter is precise to a latency at p99, such as in the above case, and 120ms and 124ms are considered to produce the same response time.\nMetrics name The metrics name for storage implementor, alarm and query modules. The type inference is supported by core.\nGroup All metrics data will be grouped by Scope.ID and min-level TimeBucket.\n In the Endpoint scope, the Scope.ID is same as the Endpoint ID (i.e. the unique ID based on service and its endpoint).  Cast Fields of source are static type. In some cases, the type required by the filter expression and aggregation function doesn\u0026rsquo;t match the type in the source, such as tag value in the source is String type, most aggregation calculation requires numeric.\nCast expression is provided to do so.\n (str-\u0026gt;long) or (long), cast string type into long. (str-\u0026gt;int) or (int), cast string type into int.  mq_consume_latency = from((str-\u0026gt;long)Service.tag[\u0026quot;transmission.latency\u0026quot;]).longAvg(); // the value of tag is string type. Cast statement is supported in\n From statement. from((cast)source.attre). Filter expression. .filter((cast)tag[\u0026quot;transmission.latency\u0026quot;] \u0026gt; 0) Aggregation function parameter. .longAvg((cast)strField1== 1, (cast)strField2)  Disable Disable is an advanced statement in OAL, which is only used in certain cases. Some of the aggregation and metrics are defined through core hard codes. Examples include segment and top_n_database_statement. This disable statement is designed to render them inactive. By default, none of them are disabled.\nNOTICE, all disable statements should be in oal/disable.oal script file.\nExamples // Calculate p99 of both Endpoint1 and Endpoint2 endpoint_p99 = from(Endpoint.latency).filter(name in (\u0026quot;Endpoint1\u0026quot;, \u0026quot;Endpoint2\u0026quot;)).summary(0.99) // Calculate p99 of Endpoint name started with `serv` serv_Endpoint_p99 = from(Endpoint.latency).filter(name like \u0026quot;serv%\u0026quot;).summary(0.99) // Calculate the avg response time of each Endpoint endpoint_resp_time = from(Endpoint.latency).avg() // Calculate the p50, p75, p90, p95 and p99 of each Endpoint by 50 ms steps. endpoint_percentile = from(Endpoint.latency).percentile(10) // Calculate the percent of response status is true, for each service. endpoint_success = from(Endpoint.*).filter(status == true).percent() // Calculate the sum of response code in [404, 500, 503], for each service. endpoint_abnormal = from(Endpoint.*).filter(responseCode in [404, 500, 503]).count() // Calculate the sum of request type in [RequestType.RPC, RequestType.gRPC], for each service. endpoint_rpc_calls_sum = from(Endpoint.*).filter(type in [RequestType.RPC, RequestType.gRPC]).count() // Calculate the sum of endpoint name in [\u0026quot;/v1\u0026quot;, \u0026quot;/v2\u0026quot;], for each service. endpoint_url_sum = from(Endpoint.*).filter(name in [\u0026quot;/v1\u0026quot;, \u0026quot;/v2\u0026quot;]).count() // Calculate the sum of calls for each service. endpoint_calls = from(Endpoint.*).count() // Calculate the CPM with the GET method for each service.The value is made up with `tagKey:tagValue`. // Option 1, use `tags contain`. service_cpm_http_get = from(Service.*).filter(tags contain \u0026quot;http.method:GET\u0026quot;).cpm() // Option 2, use `tag[key]`. service_cpm_http_get = from(Service.*).filter(tag[\u0026quot;http.method\u0026quot;] == \u0026quot;GET\u0026quot;).cpm(); // Calculate the CPM with the HTTP method except for the GET method for each service.The value is made up with `tagKey:tagValue`. service_cpm_http_other = from(Service.*).filter(tags not contain \u0026quot;http.method:GET\u0026quot;).cpm() disable(segment); disable(endpoint_relation_server_side); disable(top_n_database_statement); ","title":"Observability Analysis Language","url":"/docs/main/v9.2.0/en/concepts-and-designs/oal/"},{"content":"Observability Analysis Language OAL(Observability Analysis Language) serves to analyze incoming data in streaming mode.\nOAL focuses on metrics in Service, Service Instance and Endpoint. Therefore, the language is easy to learn and use.\nSince 6.3, the OAL engine is embedded in OAP server runtime as oal-rt(OAL Runtime). OAL scripts are now found in the /config folder, and users could simply change and reboot the server to run them. However, the OAL script is a compiled language, and the OAL Runtime generates java codes dynamically.\nYou can open set SW_OAL_ENGINE_DEBUG=Y at system env to see which classes are generated.\nGrammar Scripts should be named *.oal\n// Declare the metrics. METRICS_NAME = from(CAST SCOPE.(* | [FIELD][,FIELD ...])) [.filter(CAST FIELD OP [INT | STRING])] .FUNCTION([PARAM][, PARAM ...]) // Disable hard code disable(METRICS_NAME); From The from statement defines the data source of this OAL expression.\nPrimary SCOPEs are Service, ServiceInstance, Endpoint, ServiceRelation, ServiceInstanceRelation, and EndpointRelation. There are also some secondary scopes which belong to a primary scope.\nSee Scope Definitions, where you can find all existing Scopes and Fields.\nFilter Use filter to build conditions for the value of fields by using field name and expression.\nThe expressions support linking by and, or and (...). The OPs support ==, !=, \u0026gt;, \u0026lt;, \u0026gt;=, \u0026lt;=, in [...] ,like %..., like ...% , like %...% , contain and not contain, with type detection based on field type. In the event of incompatibility, compile or code generation errors may be triggered.\nAggregation Function The default functions are provided by the SkyWalking OAP core, and it is possible to implement additional functions.\nFunctions provided\n longAvg. The avg of all input per scope entity. The input field must be a long.   instance_jvm_memory_max = from(ServiceInstanceJVMMemory.max).longAvg();\n In this case, the input represents the request of each ServiceInstanceJVMMemory scope, and avg is based on field max.\n doubleAvg. The avg of all input per scope entity. The input field must be a double.   instance_jvm_cpu = from(ServiceInstanceJVMCPU.usePercent).doubleAvg();\n In this case, the input represents the request of each ServiceInstanceJVMCPU scope, and avg is based on field usePercent.\n percent. The number or ratio is expressed as a fraction of 100, where the input matches with the condition.   endpoint_percent = from(Endpoint.*).percent(status == true);\n In this case, all input represents requests of each endpoint, and the condition is endpoint.status == true.\n rate. The rate expressed is as a fraction of 100, where the input matches with the condition.   browser_app_error_rate = from(BrowserAppTraffic.*).rate(trafficCategory == BrowserAppTrafficCategory.FIRST_ERROR, trafficCategory == BrowserAppTrafficCategory.NORMAL);\n In this case, all input represents requests of each browser app traffic, the numerator condition is trafficCategory == BrowserAppTrafficCategory.FIRST_ERROR and denominator condition is trafficCategory == BrowserAppTrafficCategory.NORMAL. Parameter (1) is the numerator condition. Parameter (2) is the denominator condition.\n count. The sum of calls per scope entity.   service_calls_sum = from(Service.*).count();\n In this case, the number of calls of each service.\n histogram. See Heatmap in WIKI.   service_heatmap = from(Service.latency).histogram(100, 20);\n In this case, the thermodynamic heatmap of all incoming requests. Parameter (1) is the precision of latency calculation, such as in the above case, where 113ms and 193ms are considered the same in the 101-200ms group. Parameter (2) is the group amount. In the above case, 21(param value + 1) groups are 0-100ms, 101-200ms, \u0026hellip; 1901-2000ms, 2000+ms\n apdex. See Apdex in WIKI.   service_apdex = from(Service.latency).apdex(name, status);\n In this case, the apdex score of each service. Parameter (1) is the service name, which reflects the Apdex threshold value loaded from service-apdex-threshold.yml in the config folder. Parameter (2) is the status of this request. The status(success/failure) reflects the Apdex calculation.\n p99, p95, p90, p75, p50. See percentile in WIKI.   service_percentile = from(Service.latency).percentile(10);\n percentile is the first multiple-value metric, which has been introduced since 7.0.0. As a metric with multiple values, it could be queried through the getMultipleLinearIntValues GraphQL query. In this case, see p99, p95, p90, p75, and p50 of all incoming requests. The parameter is precise to a latency at p99, such as in the above case, and 120ms and 124ms are considered to produce the same response time.\nIn this case, the p99 value of all incoming requests. The parameter is precise to a latency at p99, such as in the above case, and 120ms and 124ms are considered to produce the same response time.\nMetrics name The metrics name for storage implementor, alarm and query modules. The type inference is supported by core.\nGroup All metrics data will be grouped by Scope.ID and min-level TimeBucket.\n In the Endpoint scope, the Scope.ID is same as the Endpoint ID (i.e. the unique ID based on service and its endpoint).  Cast Fields of source are static type. In some cases, the type required by the filter expression and aggregation function doesn\u0026rsquo;t match the type in the source, such as tag value in the source is String type, most aggregation calculation requires numeric.\nCast expression is provided to do so.\n (str-\u0026gt;long) or (long), cast string type into long. (str-\u0026gt;int) or (int), cast string type into int.  mq_consume_latency = from((str-\u0026gt;long)Service.tag[\u0026quot;transmission.latency\u0026quot;]).longAvg(); // the value of tag is string type. Cast statement is supported in\n From statement. from((cast)source.attre). Filter expression. .filter((cast)tag[\u0026quot;transmission.latency\u0026quot;] \u0026gt; 0) Aggregation function parameter. .longAvg((cast)strField1== 1, (cast)strField2)  Disable Disable is an advanced statement in OAL, which is only used in certain cases. Some of the aggregation and metrics are defined through core hard codes. Examples include segment and top_n_database_statement. This disable statement is designed to render them inactive. By default, none of them are disabled.\nNOTICE, all disable statements should be in oal/disable.oal script file.\nExamples // Calculate p99 of both Endpoint1 and Endpoint2 endpoint_p99 = from(Endpoint.latency).filter(name in (\u0026quot;Endpoint1\u0026quot;, \u0026quot;Endpoint2\u0026quot;)).summary(0.99) // Calculate p99 of Endpoint name started with `serv` serv_Endpoint_p99 = from(Endpoint.latency).filter(name like \u0026quot;serv%\u0026quot;).summary(0.99) // Calculate the avg response time of each Endpoint endpoint_resp_time = from(Endpoint.latency).avg() // Calculate the p50, p75, p90, p95 and p99 of each Endpoint by 50 ms steps. endpoint_percentile = from(Endpoint.latency).percentile(10) // Calculate the percent of response status is true, for each service. endpoint_success = from(Endpoint.*).filter(status == true).percent() // Calculate the sum of response code in [404, 500, 503], for each service. endpoint_abnormal = from(Endpoint.*).filter(httpResponseStatusCode in [404, 500, 503]).count() // Calculate the sum of request type in [RequestType.RPC, RequestType.gRPC], for each service. endpoint_rpc_calls_sum = from(Endpoint.*).filter(type in [RequestType.RPC, RequestType.gRPC]).count() // Calculate the sum of endpoint name in [\u0026quot;/v1\u0026quot;, \u0026quot;/v2\u0026quot;], for each service. endpoint_url_sum = from(Endpoint.*).filter(name in [\u0026quot;/v1\u0026quot;, \u0026quot;/v2\u0026quot;]).count() // Calculate the sum of calls for each service. endpoint_calls = from(Endpoint.*).count() // Calculate the CPM with the GET method for each service.The value is made up with `tagKey:tagValue`. // Option 1, use `tags contain`. service_cpm_http_get = from(Service.*).filter(tags contain \u0026quot;http.method:GET\u0026quot;).cpm() // Option 2, use `tag[key]`. service_cpm_http_get = from(Service.*).filter(tag[\u0026quot;http.method\u0026quot;] == \u0026quot;GET\u0026quot;).cpm(); // Calculate the CPM with the HTTP method except for the GET method for each service.The value is made up with `tagKey:tagValue`. service_cpm_http_other = from(Service.*).filter(tags not contain \u0026quot;http.method:GET\u0026quot;).cpm() disable(segment); disable(endpoint_relation_server_side); disable(top_n_database_statement); ","title":"Observability Analysis Language","url":"/docs/main/v9.3.0/en/concepts-and-designs/oal/"},{"content":"Observability Analysis Language OAL(Observability Analysis Language) serves to analyze incoming data in streaming mode.\nOAL focuses on metrics in Service, Service Instance and Endpoint. Therefore, the language is easy to learn and use.\nSince 6.3, the OAL engine is embedded in OAP server runtime as oal-rt(OAL Runtime). OAL scripts are now found in the /config folder, and users could simply change and reboot the server to run them. However, the OAL script is a compiled language, and the OAL Runtime generates java codes dynamically.\nYou can open set SW_OAL_ENGINE_DEBUG=Y at system env to see which classes are generated.\nGrammar Scripts should be named *.oal\n// Declare the metrics. METRICS_NAME = from(CAST SCOPE.(* | [FIELD][,FIELD ...])) [.filter(CAST FIELD OP [INT | STRING])] .FUNCTION([PARAM][, PARAM ...]) // Disable hard code disable(METRICS_NAME); From The from statement defines the data source of this OAL expression.\nPrimary SCOPEs are Service, ServiceInstance, Endpoint, ServiceRelation, ServiceInstanceRelation, and EndpointRelation. There are also some secondary scopes which belong to a primary scope.\nSee Scope Definitions, where you can find all existing Scopes and Fields.\nFilter Use filter to build conditions for the value of fields by using field name and expression.\nThe expressions support linking by and, or and (...). The OPs support ==, !=, \u0026gt;, \u0026lt;, \u0026gt;=, \u0026lt;=, in [...] ,like %..., like ...% , like %...% , contain and not contain, with type detection based on field type. In the event of incompatibility, compile or code generation errors may be triggered.\nAggregation Function The default functions are provided by the SkyWalking OAP core, and it is possible to implement additional functions.\nFunctions provided\n longAvg. The avg of all input per scope entity. The input field must be a long.   instance_jvm_memory_max = from(ServiceInstanceJVMMemory.max).longAvg();\n In this case, the input represents the request of each ServiceInstanceJVMMemory scope, and avg is based on field max.\n doubleAvg. The avg of all input per scope entity. The input field must be a double.   instance_jvm_cpu = from(ServiceInstanceJVMCPU.usePercent).doubleAvg();\n In this case, the input represents the request of each ServiceInstanceJVMCPU scope, and avg is based on field usePercent.\n percent. The number or ratio is expressed as a fraction of 100, where the input matches with the condition.   endpoint_percent = from(Endpoint.*).percent(status == true);\n In this case, all input represents requests of each endpoint, and the condition is endpoint.status == true.\n rate. The rate expressed is as a fraction of 100, where the input matches with the condition.   browser_app_error_rate = from(BrowserAppTraffic.*).rate(trafficCategory == BrowserAppTrafficCategory.FIRST_ERROR, trafficCategory == BrowserAppTrafficCategory.NORMAL);\n In this case, all input represents requests of each browser app traffic, the numerator condition is trafficCategory == BrowserAppTrafficCategory.FIRST_ERROR and denominator condition is trafficCategory == BrowserAppTrafficCategory.NORMAL. Parameter (1) is the numerator condition. Parameter (2) is the denominator condition.\n count. The sum of calls per scope entity.   service_calls_sum = from(Service.*).count();\n In this case, the number of calls of each service.\n histogram. See Heatmap in WIKI.   service_heatmap = from(Service.latency).histogram(100, 20);\n In this case, the thermodynamic heatmap of all incoming requests. Parameter (1) is the precision of latency calculation, such as in the above case, where 113ms and 193ms are considered the same in the 101-200ms group. Parameter (2) is the group amount. In the above case, 21(param value + 1) groups are 0-100ms, 101-200ms, \u0026hellip; 1901-2000ms, 2000+ms\n apdex. See Apdex in WIKI.   service_apdex = from(Service.latency).apdex(name, status);\n In this case, the apdex score of each service. Parameter (1) is the service name, which reflects the Apdex threshold value loaded from service-apdex-threshold.yml in the config folder. Parameter (2) is the status of this request. The status(success/failure) reflects the Apdex calculation.\n p99, p95, p90, p75, p50. See percentile in WIKI.   service_percentile = from(Service.latency).percentile(10);\n percentile is the first multiple-value metric, which has been introduced since 7.0.0. As a metric with multiple values, it could be queried through the getMultipleLinearIntValues GraphQL query. In this case, see p99, p95, p90, p75, and p50 of all incoming requests. The parameter is precise to a latency at p99, such as in the above case, and 120ms and 124ms are considered to produce the same response time.\nIn this case, the p99 value of all incoming requests. The parameter is precise to a latency at p99, such as in the above case, and 120ms and 124ms are considered to produce the same response time.\nMetrics name The metrics name for storage implementor, alarm and query modules. The type inference is supported by core.\nGroup All metrics data will be grouped by Scope.ID and min-level TimeBucket.\n In the Endpoint scope, the Scope.ID is same as the Endpoint ID (i.e. the unique ID based on service and its endpoint).  Cast Fields of source are static type. In some cases, the type required by the filter expression and aggregation function doesn\u0026rsquo;t match the type in the source, such as tag value in the source is String type, most aggregation calculation requires numeric.\nCast expression is provided to do so.\n (str-\u0026gt;long) or (long), cast string type into long. (str-\u0026gt;int) or (int), cast string type into int.  mq_consume_latency = from((str-\u0026gt;long)Service.tag[\u0026quot;transmission.latency\u0026quot;]).longAvg(); // the value of tag is string type. Cast statement is supported in\n From statement. from((cast)source.attre). Filter expression. .filter((cast)tag[\u0026quot;transmission.latency\u0026quot;] \u0026gt; 0) Aggregation function parameter. .longAvg((cast)strField1== 1, (cast)strField2)  Disable Disable is an advanced statement in OAL, which is only used in certain cases. Some of the aggregation and metrics are defined through core hard codes. Examples include segment and top_n_database_statement. This disable statement is designed to render them inactive. By default, none of them are disabled.\nNOTICE, all disable statements should be in oal/disable.oal script file.\nExamples // Calculate p99 of both Endpoint1 and Endpoint2 endpoint_p99 = from(Endpoint.latency).filter(name in (\u0026quot;Endpoint1\u0026quot;, \u0026quot;Endpoint2\u0026quot;)).summary(0.99) // Calculate p99 of Endpoint name started with `serv` serv_Endpoint_p99 = from(Endpoint.latency).filter(name like \u0026quot;serv%\u0026quot;).summary(0.99) // Calculate the avg response time of each Endpoint endpoint_resp_time = from(Endpoint.latency).avg() // Calculate the p50, p75, p90, p95 and p99 of each Endpoint by 50 ms steps. endpoint_percentile = from(Endpoint.latency).percentile(10) // Calculate the percent of response status is true, for each service. endpoint_success = from(Endpoint.*).filter(status == true).percent() // Calculate the sum of response code in [404, 500, 503], for each service. endpoint_abnormal = from(Endpoint.*).filter(httpResponseStatusCode in [404, 500, 503]).count() // Calculate the sum of request type in [RequestType.RPC, RequestType.gRPC], for each service. endpoint_rpc_calls_sum = from(Endpoint.*).filter(type in [RequestType.RPC, RequestType.gRPC]).count() // Calculate the sum of endpoint name in [\u0026quot;/v1\u0026quot;, \u0026quot;/v2\u0026quot;], for each service. endpoint_url_sum = from(Endpoint.*).filter(name in [\u0026quot;/v1\u0026quot;, \u0026quot;/v2\u0026quot;]).count() // Calculate the sum of calls for each service. endpoint_calls = from(Endpoint.*).count() // Calculate the CPM with the GET method for each service.The value is made up with `tagKey:tagValue`. // Option 1, use `tags contain`. service_cpm_http_get = from(Service.*).filter(tags contain \u0026quot;http.method:GET\u0026quot;).cpm() // Option 2, use `tag[key]`. service_cpm_http_get = from(Service.*).filter(tag[\u0026quot;http.method\u0026quot;] == \u0026quot;GET\u0026quot;).cpm(); // Calculate the CPM with the HTTP method except for the GET method for each service.The value is made up with `tagKey:tagValue`. service_cpm_http_other = from(Service.*).filter(tags not contain \u0026quot;http.method:GET\u0026quot;).cpm() disable(segment); disable(endpoint_relation_server_side); disable(top_n_database_statement); ","title":"Observability Analysis Language","url":"/docs/main/v9.4.0/en/concepts-and-designs/oal/"},{"content":"Observability Analysis Language OAL(Observability Analysis Language) serves to analyze incoming data in streaming mode.\nOAL focuses on metrics in Service, Service Instance and Endpoint. Therefore, the language is easy to learn and use.\nSince 6.3, the OAL engine is embedded in OAP server runtime as oal-rt(OAL Runtime). OAL scripts are now found in the /config folder, and users could simply change and reboot the server to run them. However, the OAL script is a compiled language, and the OAL Runtime generates java codes dynamically.\nYou can open set SW_OAL_ENGINE_DEBUG=Y at system env to see which classes are generated.\nGrammar Scripts should be named *.oal\n// Declare the metrics. METRICS_NAME = from(CAST SCOPE.(* | [FIELD][,FIELD ...])) [.filter(CAST FIELD OP [INT | STRING])] .FUNCTION([PARAM][, PARAM ...]) // Disable hard code disable(METRICS_NAME); From The from statement defines the data source of this OAL expression.\nPrimary SCOPEs are Service, ServiceInstance, Endpoint, ServiceRelation, ServiceInstanceRelation, and EndpointRelation. There are also some secondary scopes which belong to a primary scope.\nSee Scope Definitions, where you can find all existing Scopes and Fields.\nFilter Use filter to build conditions for the value of fields by using field name and expression.\nThe expressions support linking by and, or and (...). The OPs support ==, !=, \u0026gt;, \u0026lt;, \u0026gt;=, \u0026lt;=, in [...] ,like %..., like ...% , like %...% , contain and not contain, with type detection based on field type. In the event of incompatibility, compile or code generation errors may be triggered.\nAggregation Function The default functions are provided by the SkyWalking OAP core, and it is possible to implement additional functions.\nFunctions provided\n longAvg. The avg of all input per scope entity. The input field must be a long.   instance_jvm_memory_max = from(ServiceInstanceJVMMemory.max).longAvg();\n In this case, the input represents the request of each ServiceInstanceJVMMemory scope, and avg is based on field max.\n doubleAvg. The avg of all input per scope entity. The input field must be a double.   instance_jvm_cpu = from(ServiceInstanceJVMCPU.usePercent).doubleAvg();\n In this case, the input represents the request of each ServiceInstanceJVMCPU scope, and avg is based on field usePercent.\n percent. The number or ratio is expressed as a fraction of 100, where the input matches with the condition.   endpoint_percent = from(Endpoint.*).percent(status == true);\n In this case, all input represents requests of each endpoint, and the condition is endpoint.status == true.\n rate. The rate expressed is as a fraction of 100, where the input matches with the condition.   browser_app_error_rate = from(BrowserAppTraffic.*).rate(trafficCategory == BrowserAppTrafficCategory.FIRST_ERROR, trafficCategory == BrowserAppTrafficCategory.NORMAL);\n In this case, all input represents requests of each browser app traffic, the numerator condition is trafficCategory == BrowserAppTrafficCategory.FIRST_ERROR and denominator condition is trafficCategory == BrowserAppTrafficCategory.NORMAL. Parameter (1) is the numerator condition. Parameter (2) is the denominator condition.\n count. The sum of calls per scope entity.   service_calls_sum = from(Service.*).count();\n In this case, the number of calls of each service.\n histogram. See Heatmap in WIKI.   service_heatmap = from(Service.latency).histogram(100, 20);\n In this case, the thermodynamic heatmap of all incoming requests. Parameter (1) is the precision of latency calculation, such as in the above case, where 113ms and 193ms are considered the same in the 101-200ms group. Parameter (2) is the group amount. In the above case, 21(param value + 1) groups are 0-100ms, 101-200ms, \u0026hellip; 1901-2000ms, 2000+ms\n apdex. See Apdex in WIKI.   service_apdex = from(Service.latency).apdex(name, status);\n In this case, the apdex score of each service. Parameter (1) is the service name, which reflects the Apdex threshold value loaded from service-apdex-threshold.yml in the config folder. Parameter (2) is the status of this request. The status(success/failure) reflects the Apdex calculation.\n p99, p95, p90, p75, p50. See percentile in WIKI.   service_percentile = from(Service.latency).percentile(10);\n percentile is the first multiple-value metric, which has been introduced since 7.0.0. As a metric with multiple values, it could be queried through the getMultipleLinearIntValues GraphQL query. In this case, see p99, p95, p90, p75, and p50 of all incoming requests. The parameter is precise to a latency at p99, such as in the above case, and 120ms and 124ms are considered to produce the same response time.\nIn this case, the p99 value of all incoming requests. The parameter is precise to a latency at p99, such as in the above case, and 120ms and 124ms are considered to produce the same response time.\nMetrics name The metrics name for storage implementor, alarm and query modules. The type inference is supported by core.\nGroup All metrics data will be grouped by Scope.ID and min-level TimeBucket.\n In the Endpoint scope, the Scope.ID is same as the Endpoint ID (i.e. the unique ID based on service and its endpoint).  Cast Fields of source are static type. In some cases, the type required by the filter expression and aggregation function doesn\u0026rsquo;t match the type in the source, such as tag value in the source is String type, most aggregation calculation requires numeric.\nCast expression is provided to do so.\n (str-\u0026gt;long) or (long), cast string type into long. (str-\u0026gt;int) or (int), cast string type into int.  mq_consume_latency = from((str-\u0026gt;long)Service.tag[\u0026quot;transmission.latency\u0026quot;]).longAvg(); // the value of tag is string type. Cast statement is supported in\n From statement. from((cast)source.attre). Filter expression. .filter((cast)tag[\u0026quot;transmission.latency\u0026quot;] \u0026gt; 0) Aggregation function parameter. .longAvg((cast)strField1== 1, (cast)strField2)  Disable Disable is an advanced statement in OAL, which is only used in certain cases. Some of the aggregation and metrics are defined through core hard codes. Examples include segment and top_n_database_statement. This disable statement is designed to render them inactive. By default, none of them are disabled.\nNOTICE, all disable statements should be in oal/disable.oal script file.\nExamples // Calculate p99 of both Endpoint1 and Endpoint2 endpoint_p99 = from(Endpoint.latency).filter(name in (\u0026quot;Endpoint1\u0026quot;, \u0026quot;Endpoint2\u0026quot;)).summary(0.99) // Calculate p99 of Endpoint name started with `serv` serv_Endpoint_p99 = from(Endpoint.latency).filter(name like \u0026quot;serv%\u0026quot;).summary(0.99) // Calculate the avg response time of each Endpoint endpoint_resp_time = from(Endpoint.latency).avg() // Calculate the p50, p75, p90, p95 and p99 of each Endpoint by 50 ms steps. endpoint_percentile = from(Endpoint.latency).percentile(10) // Calculate the percent of response status is true, for each service. endpoint_success = from(Endpoint.*).filter(status == true).percent() // Calculate the sum of response code in [404, 500, 503], for each service. endpoint_abnormal = from(Endpoint.*).filter(httpResponseStatusCode in [404, 500, 503]).count() // Calculate the sum of request type in [RequestType.RPC, RequestType.gRPC], for each service. endpoint_rpc_calls_sum = from(Endpoint.*).filter(type in [RequestType.RPC, RequestType.gRPC]).count() // Calculate the sum of endpoint name in [\u0026quot;/v1\u0026quot;, \u0026quot;/v2\u0026quot;], for each service. endpoint_url_sum = from(Endpoint.*).filter(name in [\u0026quot;/v1\u0026quot;, \u0026quot;/v2\u0026quot;]).count() // Calculate the sum of calls for each service. endpoint_calls = from(Endpoint.*).count() // Calculate the CPM with the GET method for each service.The value is made up with `tagKey:tagValue`. // Option 1, use `tags contain`. service_cpm_http_get = from(Service.*).filter(tags contain \u0026quot;http.method:GET\u0026quot;).cpm() // Option 2, use `tag[key]`. service_cpm_http_get = from(Service.*).filter(tag[\u0026quot;http.method\u0026quot;] == \u0026quot;GET\u0026quot;).cpm(); // Calculate the CPM with the HTTP method except for the GET method for each service.The value is made up with `tagKey:tagValue`. service_cpm_http_other = from(Service.*).filter(tags not contain \u0026quot;http.method:GET\u0026quot;).cpm() disable(segment); disable(endpoint_relation_server_side); disable(top_n_database_statement); ","title":"Observability Analysis Language","url":"/docs/main/v9.5.0/en/concepts-and-designs/oal/"},{"content":"Observability Analysis Language OAL(Observability Analysis Language) serves to analyze incoming data in streaming mode.\nOAL focuses on metrics in Service, Service Instance and Endpoint. Therefore, the language is easy to learn and use.\nSince 6.3, the OAL engine is embedded in OAP server runtime as oal-rt(OAL Runtime). OAL scripts are now found in the /config folder, and users could simply change and reboot the server to run them. However, the OAL script is a compiled language, and the OAL Runtime generates java codes dynamically.\nYou can open set SW_OAL_ENGINE_DEBUG=Y at system env to see which classes are generated.\nGrammar Scripts should be named *.oal\n// Declare the metrics. METRICS_NAME = from(CAST SCOPE.(* | [FIELD][,FIELD ...])) [.filter(CAST FIELD OP [INT | STRING])] .FUNCTION([PARAM][, PARAM ...]) // Disable hard code disable(METRICS_NAME); From The from statement defines the data source of this OAL expression.\nPrimary SCOPEs are Service, ServiceInstance, Endpoint, ServiceRelation, ServiceInstanceRelation, and EndpointRelation. There are also some secondary scopes which belong to a primary scope.\nSee Scope Definitions, where you can find all existing Scopes and Fields.\nFilter Use filter to build conditions for the value of fields by using field name and expression.\nThe filter expressions run as a chain, generally connected with logic AND. The OPs support ==, !=, \u0026gt;, \u0026lt;, \u0026gt;=, \u0026lt;=, in [...] ,like %..., like ...% , like %...% , contain and not contain, with type detection based on field type. In the event of incompatibility, compile or code generation errors may be triggered.\nAggregation Function The default functions are provided by the SkyWalking OAP core, and it is possible to implement additional functions.\nFunctions provided\n longAvg. The avg of all input per scope entity. The input field must be a long.   instance_jvm_memory_max = from(ServiceInstanceJVMMemory.max).longAvg();\n In this case, the input represents the request of each ServiceInstanceJVMMemory scope, and avg is based on field max.\n doubleAvg. The avg of all input per scope entity. The input field must be a double.   instance_jvm_cpu = from(ServiceInstanceJVMCPU.usePercent).doubleAvg();\n In this case, the input represents the request of each ServiceInstanceJVMCPU scope, and avg is based on field usePercent.\n percent. The number or ratio is expressed as a fraction of 100, where the input matches with the condition.   endpoint_percent = from(Endpoint.*).percent(status == true);\n In this case, all input represents requests of each endpoint, and the condition is endpoint.status == true.\n rate. The rate expressed is as a fraction of 100, where the input matches with the condition.   browser_app_error_rate = from(BrowserAppTraffic.*).rate(trafficCategory == BrowserAppTrafficCategory.FIRST_ERROR, trafficCategory == BrowserAppTrafficCategory.NORMAL);\n In this case, all input represents requests of each browser app traffic, the numerator condition is trafficCategory == BrowserAppTrafficCategory.FIRST_ERROR and denominator condition is trafficCategory == BrowserAppTrafficCategory.NORMAL. Parameter (1) is the numerator condition. Parameter (2) is the denominator condition.\n count. The sum of calls per scope entity.   service_calls_sum = from(Service.*).count();\n In this case, the number of calls of each service.\n histogram. See Heatmap in WIKI.   service_heatmap = from(Service.latency).histogram(100, 20);\n In this case, the thermodynamic heatmap of all incoming requests. Parameter (1) is the precision of latency calculation, such as in the above case, where 113ms and 193ms are considered the same in the 101-200ms group. Parameter (2) is the group amount. In the above case, 21(param value + 1) groups are 0-100ms, 101-200ms, \u0026hellip; 1901-2000ms, 2000+ms\n apdex. See Apdex in WIKI.   service_apdex = from(Service.latency).apdex(name, status);\n In this case, the apdex score of each service. Parameter (1) is the service name, which reflects the Apdex threshold value loaded from service-apdex-threshold.yml in the config folder. Parameter (2) is the status of this request. The status(success/failure) reflects the Apdex calculation.\n p99, p95, p90, p75, p50. See percentile in WIKI.   service_percentile = from(Service.latency).percentile(10);\n percentile is the first multiple-value metric, which has been introduced since 7.0.0. As a metric with multiple values, it could be queried through the getMultipleLinearIntValues GraphQL query. In this case, see p99, p95, p90, p75, and p50 of all incoming requests. The parameter is precise to a latency at p99, such as in the above case, and 120ms and 124ms are considered to produce the same response time.\nIn this case, the p99 value of all incoming requests. The parameter is precise to a latency at p99, such as in the above case, and 120ms and 124ms are considered to produce the same response time.\nMetrics name The metrics name for storage implementor, alarm and query modules. The type inference is supported by core.\nGroup All metrics data will be grouped by Scope.ID and min-level TimeBucket.\n In the Endpoint scope, the Scope.ID is same as the Endpoint ID (i.e. the unique ID based on service and its endpoint).  Cast Fields of source are static type. In some cases, the type required by the filter expression and aggregation function doesn\u0026rsquo;t match the type in the source, such as tag value in the source is String type, most aggregation calculation requires numeric.\nCast expression is provided to do so.\n (str-\u0026gt;long) or (long), cast string type into long. (str-\u0026gt;int) or (int), cast string type into int.  mq_consume_latency = from((str-\u0026gt;long)Service.tag[\u0026quot;transmission.latency\u0026quot;]).longAvg(); // the value of tag is string type. Cast statement is supported in\n From statement. from((cast)source.attre). Filter expression. .filter((cast)tag[\u0026quot;transmission.latency\u0026quot;] \u0026gt; 0) Aggregation function parameter. .longAvg((cast)strField1== 1, (cast)strField2)  Disable Disable is an advanced statement in OAL, which is only used in certain cases. Some of the aggregation and metrics are defined through core hard codes. Examples include segment and top_n_database_statement. This disable statement is designed to render them inactive. By default, none of them are disabled.\nNOTICE, all disable statements should be in oal/disable.oal script file.\nExamples // Calculate p99 of both Endpoint1 and Endpoint2 endpoint_p99 = from(Endpoint.latency).filter(name in (\u0026quot;Endpoint1\u0026quot;, \u0026quot;Endpoint2\u0026quot;)).summary(0.99) // Calculate p99 of Endpoint name started with `serv` serv_Endpoint_p99 = from(Endpoint.latency).filter(name like \u0026quot;serv%\u0026quot;).summary(0.99) // Calculate the avg response time of each Endpoint endpoint_resp_time = from(Endpoint.latency).avg() // Calculate the p50, p75, p90, p95 and p99 of each Endpoint by 50 ms steps. endpoint_percentile = from(Endpoint.latency).percentile(10) // Calculate the percent of response status is true, for each service. endpoint_success = from(Endpoint.*).filter(status == true).percent() // Calculate the sum of response code in [404, 500, 503], for each service. endpoint_abnormal = from(Endpoint.*).filter(httpResponseStatusCode in [404, 500, 503]).count() // Calculate the sum of request type in [RequestType.RPC, RequestType.gRPC], for each service. endpoint_rpc_calls_sum = from(Endpoint.*).filter(type in [RequestType.RPC, RequestType.gRPC]).count() // Calculate the sum of endpoint name in [\u0026quot;/v1\u0026quot;, \u0026quot;/v2\u0026quot;], for each service. endpoint_url_sum = from(Endpoint.*).filter(name in [\u0026quot;/v1\u0026quot;, \u0026quot;/v2\u0026quot;]).count() // Calculate the sum of calls for each service. endpoint_calls = from(Endpoint.*).count() // Calculate the CPM with the GET method for each service.The value is made up with `tagKey:tagValue`. // Option 1, use `tags contain`. service_cpm_http_get = from(Service.*).filter(tags contain \u0026quot;http.method:GET\u0026quot;).cpm() // Option 2, use `tag[key]`. service_cpm_http_get = from(Service.*).filter(tag[\u0026quot;http.method\u0026quot;] == \u0026quot;GET\u0026quot;).cpm(); // Calculate the CPM with the HTTP method except for the GET method for each service.The value is made up with `tagKey:tagValue`. service_cpm_http_other = from(Service.*).filter(tags not contain \u0026quot;http.method:GET\u0026quot;).cpm() disable(segment); disable(endpoint_relation_server_side); disable(top_n_database_statement); ","title":"Observability Analysis Language","url":"/docs/main/v9.6.0/en/concepts-and-designs/oal/"},{"content":"Observability Analysis Platform SkyWalking is an Observability Analysis Platform that provides full observability to services running in both brown and green zones, as well as services using a hybrid model.\nCapabilities SkyWalking covers all 3 areas of observability, including, Tracing, Metrics and Logging.\n Tracing. SkyWalking native data formats, and Zipkin traces of v1 and v2 formats are supported. Metrics. SkyWalking supports mature metrics formats, including native meter format, OTEL metrics format, and Telegraf format. SkyWalking integrates with Service Mesh platforms, typically Istio and Envoy, to build observability into the data plane or control plane. Also, SkyWalking native agents can run in the metrics mode, which greatly improves performances. Logging. Includes logs collected from disk or through network. Native agents could bind the tracing context with logs automatically, or use SkyWalking to bind the trace and log through the text content.  There are 3 powerful and native language engines designed to analyze observability data from the above areas.\n Observability Analysis Language processes native traces and service mesh data. Meter Analysis Language is responsible for metrics calculation for native meter data, and adopts a stable and widely used metrics system, such as Prometheus and OpenTelemetry. Log Analysis Language focuses on log contents and collaborate with Meter Analysis Language.  ","title":"Observability Analysis Platform","url":"/docs/main/latest/en/concepts-and-designs/backend-overview/"},{"content":"Observability Analysis Platform SkyWalking OAP and UI provides dozens of features to support observability analysis for your services, cloud infrastructure, open-source components, and more.\nBesides those out-of-box features for monitoring, users could leverage the powerful and flexible analysis language to build their own analysis and visualization.\nThere are 3 powerful and native language engines designed to analyze observability data from the above areas.\n Observability Analysis Language processes native traces and service mesh data to build metrics of entity and topology map. Meter Analysis Language is responsible for metrics calculation for native meter data, and adopts a stable and widely used metrics system, such as Prometheus and OpenTelemetry. Log Analysis Language focuses on analyzing log contents to format and label them, and extract metrics from them to feed Meter Analysis Language for further analysis.  SkyWalking community is willing to accept your monitoring extension powered by these languages, if the monitoring targets are public and general usable.\n","title":"Observability Analysis Platform","url":"/docs/main/next/en/concepts-and-designs/backend-overview/"},{"content":"Observability Analysis Platform SkyWalking is an Observability Analysis Platform that provides full observability to services running in both brown and green zones, as well as services using a hybrid model.\nCapabilities SkyWalking covers all 3 areas of observability, including, Tracing, Metrics and Logging.\n Tracing. SkyWalking native data formats, including Zipkin v1 and v2, as well as Jaeger. Metrics. SkyWalking integrates with Service Mesh platforms, such as Istio, Envoy, and Linkerd, to build observability into the data plane or control plane. Also, SkyWalking native agents can run in the metrics mode, which greatly improves performances. Logging. Includes logs collected from disk or through network. Native agents could bind the tracing context with logs automatically, or use SkyWalking to bind the trace and log through the text content.  There are 3 powerful and native language engines designed to analyze observability data from the above areas.\n Observability Analysis Language processes native traces and service mesh data. Meter Analysis Language is responsible for metrics calculation for native meter data, and adopts a stable and widely used metrics system, such as Prometheus and OpenTelemetry. Log Analysis Language focuses on log contents and collaborate with Meter Analysis Language.  ","title":"Observability Analysis Platform","url":"/docs/main/v9.0.0/en/concepts-and-designs/backend-overview/"},{"content":"Observability Analysis Platform SkyWalking is an Observability Analysis Platform that provides full observability to services running in both brown and green zones, as well as services using a hybrid model.\nCapabilities SkyWalking covers all 3 areas of observability, including, Tracing, Metrics and Logging.\n Tracing. SkyWalking native data formats, including Zipkin v1 and v2, as well as Jaeger. Metrics. SkyWalking integrates with Service Mesh platforms, such as Istio, Envoy, and Linkerd, to build observability into the data plane or control plane. Also, SkyWalking native agents can run in the metrics mode, which greatly improves performances. Logging. Includes logs collected from disk or through network. Native agents could bind the tracing context with logs automatically, or use SkyWalking to bind the trace and log through the text content.  There are 3 powerful and native language engines designed to analyze observability data from the above areas.\n Observability Analysis Language processes native traces and service mesh data. Meter Analysis Language is responsible for metrics calculation for native meter data, and adopts a stable and widely used metrics system, such as Prometheus and OpenTelemetry. Log Analysis Language focuses on log contents and collaborate with Meter Analysis Language.  ","title":"Observability Analysis Platform","url":"/docs/main/v9.1.0/en/concepts-and-designs/backend-overview/"},{"content":"Observability Analysis Platform SkyWalking is an Observability Analysis Platform that provides full observability to services running in both brown and green zones, as well as services using a hybrid model.\nCapabilities SkyWalking covers all 3 areas of observability, including, Tracing, Metrics and Logging.\n Tracing. SkyWalking native data formats, including Zipkin v1 and v2, as well as Jaeger. Metrics. SkyWalking integrates with Service Mesh platforms, such as Istio, Envoy, and Linkerd, to build observability into the data plane or control plane. Also, SkyWalking native agents can run in the metrics mode, which greatly improves performances. Logging. Includes logs collected from disk or through network. Native agents could bind the tracing context with logs automatically, or use SkyWalking to bind the trace and log through the text content.  There are 3 powerful and native language engines designed to analyze observability data from the above areas.\n Observability Analysis Language processes native traces and service mesh data. Meter Analysis Language is responsible for metrics calculation for native meter data, and adopts a stable and widely used metrics system, such as Prometheus and OpenTelemetry. Log Analysis Language focuses on log contents and collaborate with Meter Analysis Language.  ","title":"Observability Analysis Platform","url":"/docs/main/v9.2.0/en/concepts-and-designs/backend-overview/"},{"content":"Observability Analysis Platform SkyWalking is an Observability Analysis Platform that provides full observability to services running in both brown and green zones, as well as services using a hybrid model.\nCapabilities SkyWalking covers all 3 areas of observability, including, Tracing, Metrics and Logging.\n Tracing. SkyWalking native data formats, including Zipkin v1 and v2, as well as Jaeger. Metrics. SkyWalking integrates with Service Mesh platforms, such as Istio, Envoy, and Linkerd, to build observability into the data plane or control plane. Also, SkyWalking native agents can run in the metrics mode, which greatly improves performances. Logging. Includes logs collected from disk or through network. Native agents could bind the tracing context with logs automatically, or use SkyWalking to bind the trace and log through the text content.  There are 3 powerful and native language engines designed to analyze observability data from the above areas.\n Observability Analysis Language processes native traces and service mesh data. Meter Analysis Language is responsible for metrics calculation for native meter data, and adopts a stable and widely used metrics system, such as Prometheus and OpenTelemetry. Log Analysis Language focuses on log contents and collaborate with Meter Analysis Language.  ","title":"Observability Analysis Platform","url":"/docs/main/v9.3.0/en/concepts-and-designs/backend-overview/"},{"content":"Observability Analysis Platform SkyWalking is an Observability Analysis Platform that provides full observability to services running in both brown and green zones, as well as services using a hybrid model.\nCapabilities SkyWalking covers all 3 areas of observability, including, Tracing, Metrics and Logging.\n Tracing. SkyWalking native data formats, including Zipkin v1 and v2, as well as Jaeger. Metrics. SkyWalking integrates with Service Mesh platforms, such as Istio, Envoy, and Linkerd, to build observability into the data plane or control plane. Also, SkyWalking native agents can run in the metrics mode, which greatly improves performances. Logging. Includes logs collected from disk or through network. Native agents could bind the tracing context with logs automatically, or use SkyWalking to bind the trace and log through the text content.  There are 3 powerful and native language engines designed to analyze observability data from the above areas.\n Observability Analysis Language processes native traces and service mesh data. Meter Analysis Language is responsible for metrics calculation for native meter data, and adopts a stable and widely used metrics system, such as Prometheus and OpenTelemetry. Log Analysis Language focuses on log contents and collaborate with Meter Analysis Language.  ","title":"Observability Analysis Platform","url":"/docs/main/v9.4.0/en/concepts-and-designs/backend-overview/"},{"content":"Observability Analysis Platform SkyWalking is an Observability Analysis Platform that provides full observability to services running in both brown and green zones, as well as services using a hybrid model.\nCapabilities SkyWalking covers all 3 areas of observability, including, Tracing, Metrics and Logging.\n Tracing. SkyWalking native data formats, including Zipkin v1 and v2, as well as Jaeger. Metrics. SkyWalking integrates with Service Mesh platforms, such as Istio, Envoy, and Linkerd, to build observability into the data plane or control plane. Also, SkyWalking native agents can run in the metrics mode, which greatly improves performances. Logging. Includes logs collected from disk or through network. Native agents could bind the tracing context with logs automatically, or use SkyWalking to bind the trace and log through the text content.  There are 3 powerful and native language engines designed to analyze observability data from the above areas.\n Observability Analysis Language processes native traces and service mesh data. Meter Analysis Language is responsible for metrics calculation for native meter data, and adopts a stable and widely used metrics system, such as Prometheus and OpenTelemetry. Log Analysis Language focuses on log contents and collaborate with Meter Analysis Language.  ","title":"Observability Analysis Platform","url":"/docs/main/v9.5.0/en/concepts-and-designs/backend-overview/"},{"content":"Observability Analysis Platform SkyWalking is an Observability Analysis Platform that provides full observability to services running in both brown and green zones, as well as services using a hybrid model.\nCapabilities SkyWalking covers all 3 areas of observability, including, Tracing, Metrics and Logging.\n Tracing. SkyWalking native data formats, including Zipkin v1 and v2, as well as Jaeger. Metrics. SkyWalking integrates with Service Mesh platforms, such as Istio, Envoy, and Linkerd, to build observability into the data plane or control plane. Also, SkyWalking native agents can run in the metrics mode, which greatly improves performances. Logging. Includes logs collected from disk or through network. Native agents could bind the tracing context with logs automatically, or use SkyWalking to bind the trace and log through the text content.  There are 3 powerful and native language engines designed to analyze observability data from the above areas.\n Observability Analysis Language processes native traces and service mesh data. Meter Analysis Language is responsible for metrics calculation for native meter data, and adopts a stable and widely used metrics system, such as Prometheus and OpenTelemetry. Log Analysis Language focuses on log contents and collaborate with Meter Analysis Language.  ","title":"Observability Analysis Platform","url":"/docs/main/v9.6.0/en/concepts-and-designs/backend-overview/"},{"content":"Observability Analysis Platform SkyWalking is an Observability Analysis Platform that provides full observability to services running in both brown and green zones, as well as services using a hybrid model.\nCapabilities SkyWalking covers all 3 areas of observability, including, Tracing, Metrics and Logging.\n Tracing. SkyWalking native data formats, and Zipkin traces of v1 and v2 formats are supported. Metrics. SkyWalking supports mature metrics formats, including native meter format, OTEL metrics format, and Telegraf format. SkyWalking integrates with Service Mesh platforms, typically Istio and Envoy, to build observability into the data plane or control plane. Also, SkyWalking native agents can run in the metrics mode, which greatly improves performances. Logging. Includes logs collected from disk or through network. Native agents could bind the tracing context with logs automatically, or use SkyWalking to bind the trace and log through the text content.  There are 3 powerful and native language engines designed to analyze observability data from the above areas.\n Observability Analysis Language processes native traces and service mesh data. Meter Analysis Language is responsible for metrics calculation for native meter data, and adopts a stable and widely used metrics system, such as Prometheus and OpenTelemetry. Log Analysis Language focuses on log contents and collaborate with Meter Analysis Language.  ","title":"Observability Analysis Platform","url":"/docs/main/v9.7.0/en/concepts-and-designs/backend-overview/"},{"content":"Observations  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-micrometer-1.10\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  To use the Micrometer Observation Registry with Skywalking, you need to add handlers to the registry. Skywalking comes with dedicated SkywalkingMeterHandler (for metrics) and SkywalkingSenderTracingHandler, SkywalkingReceiverTracingHandler SkywalkingDefaultTracingHandler (for traces).  // Here we create the Observation Registry with attached handlers ObservationRegistry registry = ObservationRegistry.create(); // Here we add a meter handler registry.observationConfig() .observationHandler(new ObservationHandler.FirstMatchingCompositeObservationHandler( new SkywalkingMeterHandler(new SkywalkingMeterRegistry()) ); // Here we add tracing handlers registry.observationConfig() .observationHandler(new ObservationHandler.FirstMatchingCompositeObservationHandler( new SkywalkingSenderTracingHandler(), new SkywalkingReceiverTracingHandler(), new SkywalkingDefaultTracingHandler() )); With such setup metrics and traces will be created for any Micrometer Observation based instrumentations.\n","title":"Observations","url":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/application-toolkit-micrometer-1.10/"},{"content":"Observations  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-micrometer-1.10\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  To use the Micrometer Observation Registry with Skywalking, you need to add handlers to the registry. Skywalking comes with dedicated SkywalkingMeterHandler (for metrics) and SkywalkingSenderTracingHandler, SkywalkingReceiverTracingHandler SkywalkingDefaultTracingHandler (for traces).  // Here we create the Observation Registry with attached handlers ObservationRegistry registry = ObservationRegistry.create(); // Here we add a meter handler registry.observationConfig() .observationHandler(new ObservationHandler.FirstMatchingCompositeObservationHandler( new SkywalkingMeterHandler(new SkywalkingMeterRegistry()) ); // Here we add tracing handlers registry.observationConfig() .observationHandler(new ObservationHandler.FirstMatchingCompositeObservationHandler( new SkywalkingSenderTracingHandler(), new SkywalkingReceiverTracingHandler(), new SkywalkingDefaultTracingHandler() )); With such setup metrics and traces will be created for any Micrometer Observation based instrumentations.\n","title":"Observations","url":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-micrometer-1.10/"},{"content":"Observations  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-micrometer-1.10\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  To use the Micrometer Observation Registry with Skywalking, you need to add handlers to the registry. Skywalking comes with dedicated SkywalkingMeterHandler (for metrics) and SkywalkingSenderTracingHandler, SkywalkingReceiverTracingHandler SkywalkingDefaultTracingHandler (for traces).  // Here we create the Observation Registry with attached handlers ObservationRegistry registry = ObservationRegistry.create(); // Here we add a meter handler registry.observationConfig() .observationHandler(new ObservationHandler.FirstMatchingCompositeObservationHandler( new SkywalkingMeterHandler(new SkywalkingMeterRegistry()) ); // Here we add tracing handlers registry.observationConfig() .observationHandler(new ObservationHandler.FirstMatchingCompositeObservationHandler( new SkywalkingSenderTracingHandler(), new SkywalkingReceiverTracingHandler(), new SkywalkingDefaultTracingHandler() )); With such setup metrics and traces will be created for any Micrometer Observation based instrumentations.\n","title":"Observations","url":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/application-toolkit-micrometer-1.10/"},{"content":"Observations  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-micrometer-1.10\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  To use the Micrometer Observation Registry with Skywalking, you need to add handlers to the registry. Skywalking comes with dedicated SkywalkingMeterHandler (for metrics) and SkywalkingSenderTracingHandler, SkywalkingReceiverTracingHandler SkywalkingDefaultTracingHandler (for traces).  // Here we create the Observation Registry with attached handlers ObservationRegistry registry = ObservationRegistry.create(); // Here we add a meter handler registry.observationConfig() .observationHandler(new ObservationHandler.FirstMatchingCompositeObservationHandler( new SkywalkingMeterHandler(new SkywalkingMeterRegistry()) ); // Here we add tracing handlers registry.observationConfig() .observationHandler(new ObservationHandler.FirstMatchingCompositeObservationHandler( new SkywalkingSenderTracingHandler(), new SkywalkingReceiverTracingHandler(), new SkywalkingDefaultTracingHandler() )); With such setup metrics and traces will be created for any Micrometer Observation based instrumentations.\n","title":"Observations","url":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/application-toolkit-micrometer-1.10/"},{"content":"Observations  Dependency the toolkit, such as using maven or gradle  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-micrometer-1.10\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt;  To use the Micrometer Observation Registry with Skywalking, you need to add handlers to the registry. Skywalking comes with dedicated SkywalkingMeterHandler (for metrics) and SkywalkingSenderTracingHandler, SkywalkingReceiverTracingHandler SkywalkingDefaultTracingHandler (for traces).  // Here we create the Observation Registry with attached handlers ObservationRegistry registry = ObservationRegistry.create(); // Here we add a meter handler registry.observationConfig() .observationHandler(new ObservationHandler.FirstMatchingCompositeObservationHandler( new SkywalkingMeterHandler(new SkywalkingMeterRegistry()) ); // Here we add tracing handlers registry.observationConfig() .observationHandler(new ObservationHandler.FirstMatchingCompositeObservationHandler( new SkywalkingSenderTracingHandler(), new SkywalkingReceiverTracingHandler(), new SkywalkingDefaultTracingHandler() )); With such setup metrics and traces will be created for any Micrometer Observation based instrumentations.\n","title":"Observations","url":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/application-toolkit-micrometer-1.10/"},{"content":"Observe Service Mesh through ALS Envoy Access Log Service (ALS) provides full logs on routed RPC, including HTTP and TCP.\nBackground The solution was initialized and first implemented by Sheng Wu, Hongtao Gao, Lizan Zhou, and Dhi Aurrahman on May 17, 2019, and was presented at KubeCon China 2019. Here is a video recording of the presentation.\nSkyWalking is the first open-source project that introduced an ALS-based solution to the world. This solution provides a new take on observability with a lightweight payload on the service mesh.\nEnable ALS and SkyWalking Receiver You need the following steps to set up ALS.\n  Enable envoyAccessLogService in ProxyConfig and set the ALS address to where the SkyWalking OAP listens. In Istio version 1.6.0+, if Istio is installed with demo profile, you can enable ALS with this command:\nistioctl manifest apply \\  --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=\u0026lt;skywalking-oap.skywalking.svc:11800\u0026gt; Note: Replace \u0026lt;skywalking-oap.skywalking.svc:11800\u0026gt; with the real address where SkyWalking OAP is deployed.\n  Activate SkyWalking Envoy Receiver. (activated in default)\n  envoy-metric:selector:${SW_ENVOY_METRIC:default}  Choose an ALS analyzer. There are two available analyzers for both HTTP access logs and TCP access logs: k8s-mesh and mx-mesh. Set the system environment variables SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS and SW_ENVOY_METRIC_ALS_TCP_ANALYSIS, such as SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=mx-mesh and SW_ENVOY_METRIC_ALS_TCP_ANALYSIS=mx-mesh, or in application.yaml to activate the analyzers. For more about the analyzers, see SkyWalking ALS Analyzers.\nenvoy-metric:selector:${SW_ENVOY_METRIC:default}default:acceptMetricsService:${SW_ENVOY_METRIC_SERVICE:true}alsHTTPAnalysis:${SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS:\u0026#34;\u0026#34;}# Setting the system env variable would override this.alsTCPAnalysis:${SW_ENVOY_METRIC_ALS_TCP_ANALYSIS:\u0026#34;\u0026#34;}To use multiple analyzers as a fallback, please use , to concatenate.\n  Example Here\u0026rsquo;s an example of installing Istio and deploying SkyWalking by Helm chart.\nistioctl install \\  --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-oap.istio-system:11800 git clone https://github.com/apache/skywalking-helm.git cd skywalking-helm/chart helm repo add elastic https://helm.elastic.co helm dep up skywalking helm install 8.1.0 skywalking -n istio-system \\  --set oap.env.SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=mx-mesh \\  --set oap.env.SW_ENVOY_METRIC_ALS_TCP_ANALYSIS=mx-mesh \\  --set fullnameOverride=skywalking \\  --set oap.envoy.als.enabled=true You can use kubectl -n istio-system logs -l app=skywalking | grep \u0026quot;K8sALSServiceMeshHTTPAnalysis\u0026quot; to ensure that OAP ALS mx-mesh analyzer has been activated.\nSkyWalking ALS Analyzers There are several available analyzers: k8s-mesh, mx-mesh, and persistence. You can specify one or more analyzers to analyze the access logs. When multiple analyzers are specified, it acts as a fast-success mechanism: SkyWalking loops over the analyzers and use them to analyze the logs. Once there is an analyzer that is able to produce a result, it stops the loop.\nk8s-mesh k8s-mesh uses the metadata from Kubernetes clusters, hence in this analyzer, OAP needs access roles to Pod, Service, and Endpoints.\nThe blog illustrates the details of how it works and a step-by-step tutorial to apply it to the bookinfo application.\nmx-mesh mx-mesh uses the Envoy metadata exchange mechanism to get the service name, etc. This analyzer requires Istio to enable the metadata exchange plugin (you can enable it by --set values.telemetry.v2.enabled=true, or if you\u0026rsquo;re using Istio 1.7+ and installing it with profile demo/preview, it should already be enabled).\nThe blog illustrates the details of how it works and a step-by-step tutorial on applying it to the Online Boutique system.\npersistence persistence analyzer adapts the Envoy access log format to SkyWalking\u0026rsquo;s native log format, and forwards the formatted logs to LAL, where you can configure persistent conditions, such as sampler, only persist error logs, etc. SkyWalking provides a default configuration file envoy-als.yaml that you can adjust as per your needs. Please make sure to activate this rule via adding the rule name envoy-als into config item log-analyzer/default/lalFiles (or environment variable SW_LOG_LAL_FILES, e.g. SW_LOG_LAL_FILES=envoy-als).\nAttention: Since the persistence analyzer also needs a mechanism to map the logs into responding services, you need to configure at least one of k8s-mesh or mx-mesh as its antecedent so that persistence analyzer knows which service the logs belong to. For example, you should set envoy-metric/default/alsHTTPAnalysis (or environment variable SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS) to something like k8s-mesh,persistence, mx-mesh,persistence, or mx-mesh,k8s-mesh,persistence.\n","title":"Observe Service Mesh through ALS","url":"/docs/main/latest/en/setup/envoy/als_setting/"},{"content":"Observe Service Mesh through ALS Envoy Access Log Service (ALS) provides full logs on routed RPC, including HTTP and TCP.\nBackground The solution was initialized and first implemented by Sheng Wu, Hongtao Gao, Lizan Zhou, and Dhi Aurrahman on May 17, 2019, and was presented at KubeCon China 2019. Here is a video recording of the presentation.\nSkyWalking is the first open-source project that introduced an ALS-based solution to the world. This solution provides a new take on observability with a lightweight payload on the service mesh.\nEnable ALS and SkyWalking Receiver You need the following steps to set up ALS.\n  Enable envoyAccessLogService in ProxyConfig and set the ALS address to where the SkyWalking OAP listens. In Istio version 1.6.0+, if Istio is installed with demo profile, you can enable ALS with this command:\nistioctl manifest apply \\  --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=\u0026lt;skywalking-oap.skywalking.svc:11800\u0026gt; Note: Replace \u0026lt;skywalking-oap.skywalking.svc:11800\u0026gt; with the real address where SkyWalking OAP is deployed.\n  Activate SkyWalking Envoy Receiver. (activated in default)\n  envoy-metric:selector:${SW_ENVOY_METRIC:default}  Choose an ALS analyzer. There are two available analyzers for both HTTP access logs and TCP access logs: k8s-mesh and mx-mesh. Set the system environment variables SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS and SW_ENVOY_METRIC_ALS_TCP_ANALYSIS, such as SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=mx-mesh and SW_ENVOY_METRIC_ALS_TCP_ANALYSIS=mx-mesh, or in application.yaml to activate the analyzers. For more about the analyzers, see SkyWalking ALS Analyzers.\nenvoy-metric:selector:${SW_ENVOY_METRIC:default}default:acceptMetricsService:${SW_ENVOY_METRIC_SERVICE:true}alsHTTPAnalysis:${SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS:\u0026#34;\u0026#34;}# Setting the system env variable would override this.alsTCPAnalysis:${SW_ENVOY_METRIC_ALS_TCP_ANALYSIS:\u0026#34;\u0026#34;}To use multiple analyzers as a fallback, please use , to concatenate.\n  Example Here\u0026rsquo;s an example of installing Istio and deploying SkyWalking by Helm chart.\nistioctl install \\  --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-oap.istio-system:11800 git clone https://github.com/apache/skywalking-helm.git cd skywalking-helm/chart helm repo add elastic https://helm.elastic.co helm dep up skywalking helm install 8.1.0 skywalking -n istio-system \\  --set oap.env.SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=mx-mesh \\  --set oap.env.SW_ENVOY_METRIC_ALS_TCP_ANALYSIS=mx-mesh \\  --set fullnameOverride=skywalking \\  --set oap.envoy.als.enabled=true You can use kubectl -n istio-system logs -l app=skywalking | grep \u0026quot;K8sALSServiceMeshHTTPAnalysis\u0026quot; to ensure that OAP ALS mx-mesh analyzer has been activated.\nSkyWalking ALS Analyzers There are several available analyzers: k8s-mesh, mx-mesh, and persistence. You can specify one or more analyzers to analyze the access logs. When multiple analyzers are specified, it acts as a fast-success mechanism: SkyWalking loops over the analyzers and use them to analyze the logs. Once there is an analyzer that is able to produce a result, it stops the loop.\nk8s-mesh k8s-mesh uses the metadata from Kubernetes clusters, hence in this analyzer, OAP needs access roles to Pod, Service, and Endpoints.\nThe blog illustrates the details of how it works and a step-by-step tutorial to apply it to the bookinfo application.\nmx-mesh mx-mesh uses the Envoy metadata exchange mechanism to get the service name, etc. This analyzer requires Istio to enable the metadata exchange plugin (you can enable it by --set values.telemetry.v2.enabled=true, or if you\u0026rsquo;re using Istio 1.7+ and installing it with profile demo/preview, it should already be enabled).\nThe blog illustrates the details of how it works and a step-by-step tutorial on applying it to the Online Boutique system.\npersistence persistence analyzer adapts the Envoy access log format to SkyWalking\u0026rsquo;s native log format, and forwards the formatted logs to LAL, where you can configure persistent conditions, such as sampler, only persist error logs, etc. SkyWalking provides a default configuration file envoy-als.yaml that you can adjust as per your needs. Please make sure to activate this rule via adding the rule name envoy-als into config item log-analyzer/default/lalFiles (or environment variable SW_LOG_LAL_FILES, e.g. SW_LOG_LAL_FILES=envoy-als).\nAttention: Since the persistence analyzer also needs a mechanism to map the logs into responding services, you need to configure at least one of k8s-mesh or mx-mesh as its antecedent so that persistence analyzer knows which service the logs belong to. For example, you should set envoy-metric/default/alsHTTPAnalysis (or environment variable SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS) to something like k8s-mesh,persistence, mx-mesh,persistence, or mx-mesh,k8s-mesh,persistence.\n","title":"Observe Service Mesh through ALS","url":"/docs/main/next/en/setup/envoy/als_setting/"},{"content":"Observe Service Mesh through ALS Envoy Access Log Service (ALS) provides full logs on routed RPC, including HTTP and TCP.\nBackground The solution was initialized and first implemented by Sheng Wu, Hongtao Gao, Lizan Zhou, and Dhi Aurrahman on May 17, 2019, and was presented at KubeCon China 2019. Here is a video recording of the presentation.\nSkyWalking is the first open source project that introduced an ALS-based solution to the world. This solution provides a new take on observability with a lightweight payload on the service mesh.\nEnable ALS and SkyWalking Receiver You need the following steps to set up ALS.\n  Enable envoyAccessLogService in ProxyConfig and set the ALS address to where the SkyWalking OAP listens. In Istio version 1.6.0+, if Istio is installed with demo profile, you can enable ALS with this command:\nistioctl manifest apply \\  --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=\u0026lt;skywalking-oap.skywalking.svc:11800\u0026gt; Note: Replace \u0026lt;skywalking-oap.skywalking.svc:11800\u0026gt; with the real address where SkyWalking OAP is deployed.\n  Activate SkyWalking Envoy Receiver. (activated in default)\n  envoy-metric:selector:${SW_ENVOY_METRIC:default}  Choose an ALS analyzer. There are two available analyzers for both HTTP access logs and TCP access logs: k8s-mesh and mx-mesh. Set the system environment variables SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS and SW_ENVOY_METRIC_ALS_TCP_ANALYSIS, such as SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=mx-mesh and SW_ENVOY_METRIC_ALS_TCP_ANALYSIS=mx-mesh, or in application.yaml to activate the analyzers. For more about the analyzers, see SkyWalking ALS Analyzers.\nenvoy-metric:selector:${SW_ENVOY_METRIC:default}default:acceptMetricsService:${SW_ENVOY_METRIC_SERVICE:true}alsHTTPAnalysis:${SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS:\u0026#34;\u0026#34;}# Setting the system env variable would override this. alsTCPAnalysis:${SW_ENVOY_METRIC_ALS_TCP_ANALYSIS:\u0026#34;\u0026#34;}To use multiple analyzers as a fallback, please use , to concatenate.\n  Example Here\u0026rsquo;s an example on installing Istio and deploying SkyWalking by Helm chart.\nistioctl install \\  --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-oap.istio-system:11800 git clone https://github.com/apache/skywalking-kubernetes.git cd skywalking-kubernetes/chart helm repo add elastic https://helm.elastic.co helm dep up skywalking helm install 8.1.0 skywalking -n istio-system \\  --set oap.env.SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=mx-mesh \\  --set oap.env.SW_ENVOY_METRIC_ALS_TCP_ANALYSIS=mx-mesh \\  --set fullnameOverride=skywalking \\  --set oap.envoy.als.enabled=true You can use kubectl -n istio-system logs -l app=skywalking | grep \u0026quot;K8sALSServiceMeshHTTPAnalysis\u0026quot; to ensure that OAP ALS mx-mesh analyzer has been activated.\nSkyWalking ALS Analyzers There are several available analyzers: k8s-mesh, mx-mesh, and persistence. You can specify one or more analyzers to analyze the access logs. When multiple analyzers are specified, it acts as a fast-success mechanism: SkyWalking loops over the analyzers and use them to analyze the logs. Once there is an analyzer that is able to produce a result, it stops the loop.\nk8s-mesh k8s-mesh uses the metadata from Kubernetes cluster, hence in this analyzer OAP needs access roles to Pod, Service, and Endpoints.\nThe blog illustrates the details of how it works, and a step-by-step tutorial to apply it into the bookinfo application.\nmx-mesh mx-mesh uses the Envoy metadata exchange mechanism to get the service name, etc. This analyzer requires Istio to enable the metadata exchange plugin (you can enable it by --set values.telemetry.v2.enabled=true, or if you\u0026rsquo;re using Istio 1.7+ and installing it with profile demo/preview, it should already be enabled).\nThe blog illustrates the details of how it works, and a step-by-step tutorial to apply it into the Online Boutique system.\npersistence persistence analyzer adapts the Envoy access log format to SkyWalking\u0026rsquo;s native log format, and forwards the formatted logs to LAL, where you can configure persistent conditions, such as sampler, only persist error logs, etc. SkyWalking provides a default configuration file envoy-als.yaml that you can adjust as per your needs. Please make sure to activate this rule via adding the rule name envoy-als into config item log-analyzer/default/lalFiles (or environment variable SW_LOG_LAL_FILES, e.g. SW_LOG_LAL_FILES=envoy-als).\nAttention: Since the persistence analyzer also needs a mechanism to map the logs into responding services, you need to configure at least one of k8s-mesh or mx-mesh as its antecedent so that persistence analyzer knows which service the logs belong to. For example, you should set envoy-metric/default/alsHTTPAnalysis (or environment variable SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS) to something like k8s-mesh,persistence, mx-mesh,persistence, or mx-mesh,k8s-mesh,persistence.\n","title":"Observe Service Mesh through ALS","url":"/docs/main/v9.0.0/en/setup/envoy/als_setting/"},{"content":"Observe Service Mesh through ALS Envoy Access Log Service (ALS) provides full logs on routed RPC, including HTTP and TCP.\nBackground The solution was initialized and first implemented by Sheng Wu, Hongtao Gao, Lizan Zhou, and Dhi Aurrahman on May 17, 2019, and was presented at KubeCon China 2019. Here is a video recording of the presentation.\nSkyWalking is the first open-source project that introduced an ALS-based solution to the world. This solution provides a new take on observability with a lightweight payload on the service mesh.\nEnable ALS and SkyWalking Receiver You need the following steps to set up ALS.\n  Enable envoyAccessLogService in ProxyConfig and set the ALS address to where the SkyWalking OAP listens. In Istio version 1.6.0+, if Istio is installed with demo profile, you can enable ALS with this command:\nistioctl manifest apply \\  --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=\u0026lt;skywalking-oap.skywalking.svc:11800\u0026gt; Note: Replace \u0026lt;skywalking-oap.skywalking.svc:11800\u0026gt; with the real address where SkyWalking OAP is deployed.\n  Activate SkyWalking Envoy Receiver. (activated in default)\n  envoy-metric:selector:${SW_ENVOY_METRIC:default}  Choose an ALS analyzer. There are two available analyzers for both HTTP access logs and TCP access logs: k8s-mesh and mx-mesh. Set the system environment variables SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS and SW_ENVOY_METRIC_ALS_TCP_ANALYSIS, such as SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=mx-mesh and SW_ENVOY_METRIC_ALS_TCP_ANALYSIS=mx-mesh, or in application.yaml to activate the analyzers. For more about the analyzers, see SkyWalking ALS Analyzers.\nenvoy-metric:selector:${SW_ENVOY_METRIC:default}default:acceptMetricsService:${SW_ENVOY_METRIC_SERVICE:true}alsHTTPAnalysis:${SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS:\u0026#34;\u0026#34;}# Setting the system env variable would override this. alsTCPAnalysis:${SW_ENVOY_METRIC_ALS_TCP_ANALYSIS:\u0026#34;\u0026#34;}To use multiple analyzers as a fallback, please use , to concatenate.\n  Example Here\u0026rsquo;s an example of installing Istio and deploying SkyWalking by Helm chart.\nistioctl install \\  --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-oap.istio-system:11800 git clone https://github.com/apache/skywalking-kubernetes.git cd skywalking-kubernetes/chart helm repo add elastic https://helm.elastic.co helm dep up skywalking helm install 8.1.0 skywalking -n istio-system \\  --set oap.env.SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=mx-mesh \\  --set oap.env.SW_ENVOY_METRIC_ALS_TCP_ANALYSIS=mx-mesh \\  --set fullnameOverride=skywalking \\  --set oap.envoy.als.enabled=true You can use kubectl -n istio-system logs -l app=skywalking | grep \u0026quot;K8sALSServiceMeshHTTPAnalysis\u0026quot; to ensure that OAP ALS mx-mesh analyzer has been activated.\nSkyWalking ALS Analyzers There are several available analyzers: k8s-mesh, mx-mesh, and persistence. You can specify one or more analyzers to analyze the access logs. When multiple analyzers are specified, it acts as a fast-success mechanism: SkyWalking loops over the analyzers and use them to analyze the logs. Once there is an analyzer that is able to produce a result, it stops the loop.\nk8s-mesh k8s-mesh uses the metadata from Kubernetes clusters, hence in this analyzer, OAP needs access roles to Pod, Service, and Endpoints.\nThe blog illustrates the details of how it works and a step-by-step tutorial to apply it to the bookinfo application.\nmx-mesh mx-mesh uses the Envoy metadata exchange mechanism to get the service name, etc. This analyzer requires Istio to enable the metadata exchange plugin (you can enable it by --set values.telemetry.v2.enabled=true, or if you\u0026rsquo;re using Istio 1.7+ and installing it with profile demo/preview, it should already be enabled).\nThe blog illustrates the details of how it works and a step-by-step tutorial on applying it to the Online Boutique system.\npersistence persistence analyzer adapts the Envoy access log format to SkyWalking\u0026rsquo;s native log format, and forwards the formatted logs to LAL, where you can configure persistent conditions, such as sampler, only persist error logs, etc. SkyWalking provides a default configuration file envoy-als.yaml that you can adjust as per your needs. Please make sure to activate this rule via adding the rule name envoy-als into config item log-analyzer/default/lalFiles (or environment variable SW_LOG_LAL_FILES, e.g. SW_LOG_LAL_FILES=envoy-als).\nAttention: Since the persistence analyzer also needs a mechanism to map the logs into responding services, you need to configure at least one of k8s-mesh or mx-mesh as its antecedent so that persistence analyzer knows which service the logs belong to. For example, you should set envoy-metric/default/alsHTTPAnalysis (or environment variable SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS) to something like k8s-mesh,persistence, mx-mesh,persistence, or mx-mesh,k8s-mesh,persistence.\n","title":"Observe Service Mesh through ALS","url":"/docs/main/v9.1.0/en/setup/envoy/als_setting/"},{"content":"Observe Service Mesh through ALS Envoy Access Log Service (ALS) provides full logs on routed RPC, including HTTP and TCP.\nBackground The solution was initialized and first implemented by Sheng Wu, Hongtao Gao, Lizan Zhou, and Dhi Aurrahman on May 17, 2019, and was presented at KubeCon China 2019. Here is a video recording of the presentation.\nSkyWalking is the first open-source project that introduced an ALS-based solution to the world. This solution provides a new take on observability with a lightweight payload on the service mesh.\nEnable ALS and SkyWalking Receiver You need the following steps to set up ALS.\n  Enable envoyAccessLogService in ProxyConfig and set the ALS address to where the SkyWalking OAP listens. In Istio version 1.6.0+, if Istio is installed with demo profile, you can enable ALS with this command:\nistioctl manifest apply \\  --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=\u0026lt;skywalking-oap.skywalking.svc:11800\u0026gt; Note: Replace \u0026lt;skywalking-oap.skywalking.svc:11800\u0026gt; with the real address where SkyWalking OAP is deployed.\n  Activate SkyWalking Envoy Receiver. (activated in default)\n  envoy-metric:selector:${SW_ENVOY_METRIC:default}  Choose an ALS analyzer. There are two available analyzers for both HTTP access logs and TCP access logs: k8s-mesh and mx-mesh. Set the system environment variables SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS and SW_ENVOY_METRIC_ALS_TCP_ANALYSIS, such as SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=mx-mesh and SW_ENVOY_METRIC_ALS_TCP_ANALYSIS=mx-mesh, or in application.yaml to activate the analyzers. For more about the analyzers, see SkyWalking ALS Analyzers.\nenvoy-metric:selector:${SW_ENVOY_METRIC:default}default:acceptMetricsService:${SW_ENVOY_METRIC_SERVICE:true}alsHTTPAnalysis:${SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS:\u0026#34;\u0026#34;}# Setting the system env variable would override this. alsTCPAnalysis:${SW_ENVOY_METRIC_ALS_TCP_ANALYSIS:\u0026#34;\u0026#34;}To use multiple analyzers as a fallback, please use , to concatenate.\n  Example Here\u0026rsquo;s an example of installing Istio and deploying SkyWalking by Helm chart.\nistioctl install \\  --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-oap.istio-system:11800 git clone https://github.com/apache/skywalking-kubernetes.git cd skywalking-kubernetes/chart helm repo add elastic https://helm.elastic.co helm dep up skywalking helm install 8.1.0 skywalking -n istio-system \\  --set oap.env.SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=mx-mesh \\  --set oap.env.SW_ENVOY_METRIC_ALS_TCP_ANALYSIS=mx-mesh \\  --set fullnameOverride=skywalking \\  --set oap.envoy.als.enabled=true You can use kubectl -n istio-system logs -l app=skywalking | grep \u0026quot;K8sALSServiceMeshHTTPAnalysis\u0026quot; to ensure that OAP ALS mx-mesh analyzer has been activated.\nSkyWalking ALS Analyzers There are several available analyzers: k8s-mesh, mx-mesh, and persistence. You can specify one or more analyzers to analyze the access logs. When multiple analyzers are specified, it acts as a fast-success mechanism: SkyWalking loops over the analyzers and use them to analyze the logs. Once there is an analyzer that is able to produce a result, it stops the loop.\nk8s-mesh k8s-mesh uses the metadata from Kubernetes clusters, hence in this analyzer, OAP needs access roles to Pod, Service, and Endpoints.\nThe blog illustrates the details of how it works and a step-by-step tutorial to apply it to the bookinfo application.\nmx-mesh mx-mesh uses the Envoy metadata exchange mechanism to get the service name, etc. This analyzer requires Istio to enable the metadata exchange plugin (you can enable it by --set values.telemetry.v2.enabled=true, or if you\u0026rsquo;re using Istio 1.7+ and installing it with profile demo/preview, it should already be enabled).\nThe blog illustrates the details of how it works and a step-by-step tutorial on applying it to the Online Boutique system.\npersistence persistence analyzer adapts the Envoy access log format to SkyWalking\u0026rsquo;s native log format, and forwards the formatted logs to LAL, where you can configure persistent conditions, such as sampler, only persist error logs, etc. SkyWalking provides a default configuration file envoy-als.yaml that you can adjust as per your needs. Please make sure to activate this rule via adding the rule name envoy-als into config item log-analyzer/default/lalFiles (or environment variable SW_LOG_LAL_FILES, e.g. SW_LOG_LAL_FILES=envoy-als).\nAttention: Since the persistence analyzer also needs a mechanism to map the logs into responding services, you need to configure at least one of k8s-mesh or mx-mesh as its antecedent so that persistence analyzer knows which service the logs belong to. For example, you should set envoy-metric/default/alsHTTPAnalysis (or environment variable SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS) to something like k8s-mesh,persistence, mx-mesh,persistence, or mx-mesh,k8s-mesh,persistence.\n","title":"Observe Service Mesh through ALS","url":"/docs/main/v9.2.0/en/setup/envoy/als_setting/"},{"content":"Observe Service Mesh through ALS Envoy Access Log Service (ALS) provides full logs on routed RPC, including HTTP and TCP.\nBackground The solution was initialized and first implemented by Sheng Wu, Hongtao Gao, Lizan Zhou, and Dhi Aurrahman on May 17, 2019, and was presented at KubeCon China 2019. Here is a video recording of the presentation.\nSkyWalking is the first open-source project that introduced an ALS-based solution to the world. This solution provides a new take on observability with a lightweight payload on the service mesh.\nEnable ALS and SkyWalking Receiver You need the following steps to set up ALS.\n  Enable envoyAccessLogService in ProxyConfig and set the ALS address to where the SkyWalking OAP listens. In Istio version 1.6.0+, if Istio is installed with demo profile, you can enable ALS with this command:\nistioctl manifest apply \\  --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=\u0026lt;skywalking-oap.skywalking.svc:11800\u0026gt; Note: Replace \u0026lt;skywalking-oap.skywalking.svc:11800\u0026gt; with the real address where SkyWalking OAP is deployed.\n  Activate SkyWalking Envoy Receiver. (activated in default)\n  envoy-metric:selector:${SW_ENVOY_METRIC:default}  Choose an ALS analyzer. There are two available analyzers for both HTTP access logs and TCP access logs: k8s-mesh and mx-mesh. Set the system environment variables SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS and SW_ENVOY_METRIC_ALS_TCP_ANALYSIS, such as SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=mx-mesh and SW_ENVOY_METRIC_ALS_TCP_ANALYSIS=mx-mesh, or in application.yaml to activate the analyzers. For more about the analyzers, see SkyWalking ALS Analyzers.\nenvoy-metric:selector:${SW_ENVOY_METRIC:default}default:acceptMetricsService:${SW_ENVOY_METRIC_SERVICE:true}alsHTTPAnalysis:${SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS:\u0026#34;\u0026#34;}# Setting the system env variable would override this. alsTCPAnalysis:${SW_ENVOY_METRIC_ALS_TCP_ANALYSIS:\u0026#34;\u0026#34;}To use multiple analyzers as a fallback, please use , to concatenate.\n  Example Here\u0026rsquo;s an example of installing Istio and deploying SkyWalking by Helm chart.\nistioctl install \\  --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-oap.istio-system:11800 git clone https://github.com/apache/skywalking-kubernetes.git cd skywalking-kubernetes/chart helm repo add elastic https://helm.elastic.co helm dep up skywalking helm install 8.1.0 skywalking -n istio-system \\  --set oap.env.SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=mx-mesh \\  --set oap.env.SW_ENVOY_METRIC_ALS_TCP_ANALYSIS=mx-mesh \\  --set fullnameOverride=skywalking \\  --set oap.envoy.als.enabled=true You can use kubectl -n istio-system logs -l app=skywalking | grep \u0026quot;K8sALSServiceMeshHTTPAnalysis\u0026quot; to ensure that OAP ALS mx-mesh analyzer has been activated.\nSkyWalking ALS Analyzers There are several available analyzers: k8s-mesh, mx-mesh, and persistence. You can specify one or more analyzers to analyze the access logs. When multiple analyzers are specified, it acts as a fast-success mechanism: SkyWalking loops over the analyzers and use them to analyze the logs. Once there is an analyzer that is able to produce a result, it stops the loop.\nk8s-mesh k8s-mesh uses the metadata from Kubernetes clusters, hence in this analyzer, OAP needs access roles to Pod, Service, and Endpoints.\nThe blog illustrates the details of how it works and a step-by-step tutorial to apply it to the bookinfo application.\nmx-mesh mx-mesh uses the Envoy metadata exchange mechanism to get the service name, etc. This analyzer requires Istio to enable the metadata exchange plugin (you can enable it by --set values.telemetry.v2.enabled=true, or if you\u0026rsquo;re using Istio 1.7+ and installing it with profile demo/preview, it should already be enabled).\nThe blog illustrates the details of how it works and a step-by-step tutorial on applying it to the Online Boutique system.\npersistence persistence analyzer adapts the Envoy access log format to SkyWalking\u0026rsquo;s native log format, and forwards the formatted logs to LAL, where you can configure persistent conditions, such as sampler, only persist error logs, etc. SkyWalking provides a default configuration file envoy-als.yaml that you can adjust as per your needs. Please make sure to activate this rule via adding the rule name envoy-als into config item log-analyzer/default/lalFiles (or environment variable SW_LOG_LAL_FILES, e.g. SW_LOG_LAL_FILES=envoy-als).\nAttention: Since the persistence analyzer also needs a mechanism to map the logs into responding services, you need to configure at least one of k8s-mesh or mx-mesh as its antecedent so that persistence analyzer knows which service the logs belong to. For example, you should set envoy-metric/default/alsHTTPAnalysis (or environment variable SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS) to something like k8s-mesh,persistence, mx-mesh,persistence, or mx-mesh,k8s-mesh,persistence.\n","title":"Observe Service Mesh through ALS","url":"/docs/main/v9.3.0/en/setup/envoy/als_setting/"},{"content":"Observe Service Mesh through ALS Envoy Access Log Service (ALS) provides full logs on routed RPC, including HTTP and TCP.\nBackground The solution was initialized and first implemented by Sheng Wu, Hongtao Gao, Lizan Zhou, and Dhi Aurrahman on May 17, 2019, and was presented at KubeCon China 2019. Here is a video recording of the presentation.\nSkyWalking is the first open-source project that introduced an ALS-based solution to the world. This solution provides a new take on observability with a lightweight payload on the service mesh.\nEnable ALS and SkyWalking Receiver You need the following steps to set up ALS.\n  Enable envoyAccessLogService in ProxyConfig and set the ALS address to where the SkyWalking OAP listens. In Istio version 1.6.0+, if Istio is installed with demo profile, you can enable ALS with this command:\nistioctl manifest apply \\  --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=\u0026lt;skywalking-oap.skywalking.svc:11800\u0026gt; Note: Replace \u0026lt;skywalking-oap.skywalking.svc:11800\u0026gt; with the real address where SkyWalking OAP is deployed.\n  Activate SkyWalking Envoy Receiver. (activated in default)\n  envoy-metric:selector:${SW_ENVOY_METRIC:default}  Choose an ALS analyzer. There are two available analyzers for both HTTP access logs and TCP access logs: k8s-mesh and mx-mesh. Set the system environment variables SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS and SW_ENVOY_METRIC_ALS_TCP_ANALYSIS, such as SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=mx-mesh and SW_ENVOY_METRIC_ALS_TCP_ANALYSIS=mx-mesh, or in application.yaml to activate the analyzers. For more about the analyzers, see SkyWalking ALS Analyzers.\nenvoy-metric:selector:${SW_ENVOY_METRIC:default}default:acceptMetricsService:${SW_ENVOY_METRIC_SERVICE:true}alsHTTPAnalysis:${SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS:\u0026#34;\u0026#34;}# Setting the system env variable would override this.alsTCPAnalysis:${SW_ENVOY_METRIC_ALS_TCP_ANALYSIS:\u0026#34;\u0026#34;}To use multiple analyzers as a fallback, please use , to concatenate.\n  Example Here\u0026rsquo;s an example of installing Istio and deploying SkyWalking by Helm chart.\nistioctl install \\  --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-oap.istio-system:11800 git clone https://github.com/apache/skywalking-kubernetes.git cd skywalking-kubernetes/chart helm repo add elastic https://helm.elastic.co helm dep up skywalking helm install 8.1.0 skywalking -n istio-system \\  --set oap.env.SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=mx-mesh \\  --set oap.env.SW_ENVOY_METRIC_ALS_TCP_ANALYSIS=mx-mesh \\  --set fullnameOverride=skywalking \\  --set oap.envoy.als.enabled=true You can use kubectl -n istio-system logs -l app=skywalking | grep \u0026quot;K8sALSServiceMeshHTTPAnalysis\u0026quot; to ensure that OAP ALS mx-mesh analyzer has been activated.\nSkyWalking ALS Analyzers There are several available analyzers: k8s-mesh, mx-mesh, and persistence. You can specify one or more analyzers to analyze the access logs. When multiple analyzers are specified, it acts as a fast-success mechanism: SkyWalking loops over the analyzers and use them to analyze the logs. Once there is an analyzer that is able to produce a result, it stops the loop.\nk8s-mesh k8s-mesh uses the metadata from Kubernetes clusters, hence in this analyzer, OAP needs access roles to Pod, Service, and Endpoints.\nThe blog illustrates the details of how it works and a step-by-step tutorial to apply it to the bookinfo application.\nmx-mesh mx-mesh uses the Envoy metadata exchange mechanism to get the service name, etc. This analyzer requires Istio to enable the metadata exchange plugin (you can enable it by --set values.telemetry.v2.enabled=true, or if you\u0026rsquo;re using Istio 1.7+ and installing it with profile demo/preview, it should already be enabled).\nThe blog illustrates the details of how it works and a step-by-step tutorial on applying it to the Online Boutique system.\npersistence persistence analyzer adapts the Envoy access log format to SkyWalking\u0026rsquo;s native log format, and forwards the formatted logs to LAL, where you can configure persistent conditions, such as sampler, only persist error logs, etc. SkyWalking provides a default configuration file envoy-als.yaml that you can adjust as per your needs. Please make sure to activate this rule via adding the rule name envoy-als into config item log-analyzer/default/lalFiles (or environment variable SW_LOG_LAL_FILES, e.g. SW_LOG_LAL_FILES=envoy-als).\nAttention: Since the persistence analyzer also needs a mechanism to map the logs into responding services, you need to configure at least one of k8s-mesh or mx-mesh as its antecedent so that persistence analyzer knows which service the logs belong to. For example, you should set envoy-metric/default/alsHTTPAnalysis (or environment variable SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS) to something like k8s-mesh,persistence, mx-mesh,persistence, or mx-mesh,k8s-mesh,persistence.\n","title":"Observe Service Mesh through ALS","url":"/docs/main/v9.4.0/en/setup/envoy/als_setting/"},{"content":"Observe Service Mesh through ALS Envoy Access Log Service (ALS) provides full logs on routed RPC, including HTTP and TCP.\nBackground The solution was initialized and first implemented by Sheng Wu, Hongtao Gao, Lizan Zhou, and Dhi Aurrahman on May 17, 2019, and was presented at KubeCon China 2019. Here is a video recording of the presentation.\nSkyWalking is the first open-source project that introduced an ALS-based solution to the world. This solution provides a new take on observability with a lightweight payload on the service mesh.\nEnable ALS and SkyWalking Receiver You need the following steps to set up ALS.\n  Enable envoyAccessLogService in ProxyConfig and set the ALS address to where the SkyWalking OAP listens. In Istio version 1.6.0+, if Istio is installed with demo profile, you can enable ALS with this command:\nistioctl manifest apply \\  --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=\u0026lt;skywalking-oap.skywalking.svc:11800\u0026gt; Note: Replace \u0026lt;skywalking-oap.skywalking.svc:11800\u0026gt; with the real address where SkyWalking OAP is deployed.\n  Activate SkyWalking Envoy Receiver. (activated in default)\n  envoy-metric:selector:${SW_ENVOY_METRIC:default}  Choose an ALS analyzer. There are two available analyzers for both HTTP access logs and TCP access logs: k8s-mesh and mx-mesh. Set the system environment variables SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS and SW_ENVOY_METRIC_ALS_TCP_ANALYSIS, such as SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=mx-mesh and SW_ENVOY_METRIC_ALS_TCP_ANALYSIS=mx-mesh, or in application.yaml to activate the analyzers. For more about the analyzers, see SkyWalking ALS Analyzers.\nenvoy-metric:selector:${SW_ENVOY_METRIC:default}default:acceptMetricsService:${SW_ENVOY_METRIC_SERVICE:true}alsHTTPAnalysis:${SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS:\u0026#34;\u0026#34;}# Setting the system env variable would override this.alsTCPAnalysis:${SW_ENVOY_METRIC_ALS_TCP_ANALYSIS:\u0026#34;\u0026#34;}To use multiple analyzers as a fallback, please use , to concatenate.\n  Example Here\u0026rsquo;s an example of installing Istio and deploying SkyWalking by Helm chart.\nistioctl install \\  --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-oap.istio-system:11800 git clone https://github.com/apache/skywalking-kubernetes.git cd skywalking-kubernetes/chart helm repo add elastic https://helm.elastic.co helm dep up skywalking helm install 8.1.0 skywalking -n istio-system \\  --set oap.env.SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=mx-mesh \\  --set oap.env.SW_ENVOY_METRIC_ALS_TCP_ANALYSIS=mx-mesh \\  --set fullnameOverride=skywalking \\  --set oap.envoy.als.enabled=true You can use kubectl -n istio-system logs -l app=skywalking | grep \u0026quot;K8sALSServiceMeshHTTPAnalysis\u0026quot; to ensure that OAP ALS mx-mesh analyzer has been activated.\nSkyWalking ALS Analyzers There are several available analyzers: k8s-mesh, mx-mesh, and persistence. You can specify one or more analyzers to analyze the access logs. When multiple analyzers are specified, it acts as a fast-success mechanism: SkyWalking loops over the analyzers and use them to analyze the logs. Once there is an analyzer that is able to produce a result, it stops the loop.\nk8s-mesh k8s-mesh uses the metadata from Kubernetes clusters, hence in this analyzer, OAP needs access roles to Pod, Service, and Endpoints.\nThe blog illustrates the details of how it works and a step-by-step tutorial to apply it to the bookinfo application.\nmx-mesh mx-mesh uses the Envoy metadata exchange mechanism to get the service name, etc. This analyzer requires Istio to enable the metadata exchange plugin (you can enable it by --set values.telemetry.v2.enabled=true, or if you\u0026rsquo;re using Istio 1.7+ and installing it with profile demo/preview, it should already be enabled).\nThe blog illustrates the details of how it works and a step-by-step tutorial on applying it to the Online Boutique system.\npersistence persistence analyzer adapts the Envoy access log format to SkyWalking\u0026rsquo;s native log format, and forwards the formatted logs to LAL, where you can configure persistent conditions, such as sampler, only persist error logs, etc. SkyWalking provides a default configuration file envoy-als.yaml that you can adjust as per your needs. Please make sure to activate this rule via adding the rule name envoy-als into config item log-analyzer/default/lalFiles (or environment variable SW_LOG_LAL_FILES, e.g. SW_LOG_LAL_FILES=envoy-als).\nAttention: Since the persistence analyzer also needs a mechanism to map the logs into responding services, you need to configure at least one of k8s-mesh or mx-mesh as its antecedent so that persistence analyzer knows which service the logs belong to. For example, you should set envoy-metric/default/alsHTTPAnalysis (or environment variable SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS) to something like k8s-mesh,persistence, mx-mesh,persistence, or mx-mesh,k8s-mesh,persistence.\n","title":"Observe Service Mesh through ALS","url":"/docs/main/v9.5.0/en/setup/envoy/als_setting/"},{"content":"Observe Service Mesh through ALS Envoy Access Log Service (ALS) provides full logs on routed RPC, including HTTP and TCP.\nBackground The solution was initialized and first implemented by Sheng Wu, Hongtao Gao, Lizan Zhou, and Dhi Aurrahman on May 17, 2019, and was presented at KubeCon China 2019. Here is a video recording of the presentation.\nSkyWalking is the first open-source project that introduced an ALS-based solution to the world. This solution provides a new take on observability with a lightweight payload on the service mesh.\nEnable ALS and SkyWalking Receiver You need the following steps to set up ALS.\n  Enable envoyAccessLogService in ProxyConfig and set the ALS address to where the SkyWalking OAP listens. In Istio version 1.6.0+, if Istio is installed with demo profile, you can enable ALS with this command:\nistioctl manifest apply \\  --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=\u0026lt;skywalking-oap.skywalking.svc:11800\u0026gt; Note: Replace \u0026lt;skywalking-oap.skywalking.svc:11800\u0026gt; with the real address where SkyWalking OAP is deployed.\n  Activate SkyWalking Envoy Receiver. (activated in default)\n  envoy-metric:selector:${SW_ENVOY_METRIC:default}  Choose an ALS analyzer. There are two available analyzers for both HTTP access logs and TCP access logs: k8s-mesh and mx-mesh. Set the system environment variables SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS and SW_ENVOY_METRIC_ALS_TCP_ANALYSIS, such as SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=mx-mesh and SW_ENVOY_METRIC_ALS_TCP_ANALYSIS=mx-mesh, or in application.yaml to activate the analyzers. For more about the analyzers, see SkyWalking ALS Analyzers.\nenvoy-metric:selector:${SW_ENVOY_METRIC:default}default:acceptMetricsService:${SW_ENVOY_METRIC_SERVICE:true}alsHTTPAnalysis:${SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS:\u0026#34;\u0026#34;}# Setting the system env variable would override this.alsTCPAnalysis:${SW_ENVOY_METRIC_ALS_TCP_ANALYSIS:\u0026#34;\u0026#34;}To use multiple analyzers as a fallback, please use , to concatenate.\n  Example Here\u0026rsquo;s an example of installing Istio and deploying SkyWalking by Helm chart.\nistioctl install \\  --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-oap.istio-system:11800 git clone https://github.com/apache/skywalking-kubernetes.git cd skywalking-kubernetes/chart helm repo add elastic https://helm.elastic.co helm dep up skywalking helm install 8.1.0 skywalking -n istio-system \\  --set oap.env.SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=mx-mesh \\  --set oap.env.SW_ENVOY_METRIC_ALS_TCP_ANALYSIS=mx-mesh \\  --set fullnameOverride=skywalking \\  --set oap.envoy.als.enabled=true You can use kubectl -n istio-system logs -l app=skywalking | grep \u0026quot;K8sALSServiceMeshHTTPAnalysis\u0026quot; to ensure that OAP ALS mx-mesh analyzer has been activated.\nSkyWalking ALS Analyzers There are several available analyzers: k8s-mesh, mx-mesh, and persistence. You can specify one or more analyzers to analyze the access logs. When multiple analyzers are specified, it acts as a fast-success mechanism: SkyWalking loops over the analyzers and use them to analyze the logs. Once there is an analyzer that is able to produce a result, it stops the loop.\nk8s-mesh k8s-mesh uses the metadata from Kubernetes clusters, hence in this analyzer, OAP needs access roles to Pod, Service, and Endpoints.\nThe blog illustrates the details of how it works and a step-by-step tutorial to apply it to the bookinfo application.\nmx-mesh mx-mesh uses the Envoy metadata exchange mechanism to get the service name, etc. This analyzer requires Istio to enable the metadata exchange plugin (you can enable it by --set values.telemetry.v2.enabled=true, or if you\u0026rsquo;re using Istio 1.7+ and installing it with profile demo/preview, it should already be enabled).\nThe blog illustrates the details of how it works and a step-by-step tutorial on applying it to the Online Boutique system.\npersistence persistence analyzer adapts the Envoy access log format to SkyWalking\u0026rsquo;s native log format, and forwards the formatted logs to LAL, where you can configure persistent conditions, such as sampler, only persist error logs, etc. SkyWalking provides a default configuration file envoy-als.yaml that you can adjust as per your needs. Please make sure to activate this rule via adding the rule name envoy-als into config item log-analyzer/default/lalFiles (or environment variable SW_LOG_LAL_FILES, e.g. SW_LOG_LAL_FILES=envoy-als).\nAttention: Since the persistence analyzer also needs a mechanism to map the logs into responding services, you need to configure at least one of k8s-mesh or mx-mesh as its antecedent so that persistence analyzer knows which service the logs belong to. For example, you should set envoy-metric/default/alsHTTPAnalysis (or environment variable SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS) to something like k8s-mesh,persistence, mx-mesh,persistence, or mx-mesh,k8s-mesh,persistence.\n","title":"Observe Service Mesh through ALS","url":"/docs/main/v9.6.0/en/setup/envoy/als_setting/"},{"content":"Observe Service Mesh through ALS Envoy Access Log Service (ALS) provides full logs on routed RPC, including HTTP and TCP.\nBackground The solution was initialized and first implemented by Sheng Wu, Hongtao Gao, Lizan Zhou, and Dhi Aurrahman on May 17, 2019, and was presented at KubeCon China 2019. Here is a video recording of the presentation.\nSkyWalking is the first open-source project that introduced an ALS-based solution to the world. This solution provides a new take on observability with a lightweight payload on the service mesh.\nEnable ALS and SkyWalking Receiver You need the following steps to set up ALS.\n  Enable envoyAccessLogService in ProxyConfig and set the ALS address to where the SkyWalking OAP listens. In Istio version 1.6.0+, if Istio is installed with demo profile, you can enable ALS with this command:\nistioctl manifest apply \\  --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=\u0026lt;skywalking-oap.skywalking.svc:11800\u0026gt; Note: Replace \u0026lt;skywalking-oap.skywalking.svc:11800\u0026gt; with the real address where SkyWalking OAP is deployed.\n  Activate SkyWalking Envoy Receiver. (activated in default)\n  envoy-metric:selector:${SW_ENVOY_METRIC:default}  Choose an ALS analyzer. There are two available analyzers for both HTTP access logs and TCP access logs: k8s-mesh and mx-mesh. Set the system environment variables SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS and SW_ENVOY_METRIC_ALS_TCP_ANALYSIS, such as SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=mx-mesh and SW_ENVOY_METRIC_ALS_TCP_ANALYSIS=mx-mesh, or in application.yaml to activate the analyzers. For more about the analyzers, see SkyWalking ALS Analyzers.\nenvoy-metric:selector:${SW_ENVOY_METRIC:default}default:acceptMetricsService:${SW_ENVOY_METRIC_SERVICE:true}alsHTTPAnalysis:${SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS:\u0026#34;\u0026#34;}# Setting the system env variable would override this.alsTCPAnalysis:${SW_ENVOY_METRIC_ALS_TCP_ANALYSIS:\u0026#34;\u0026#34;}To use multiple analyzers as a fallback, please use , to concatenate.\n  Example Here\u0026rsquo;s an example of installing Istio and deploying SkyWalking by Helm chart.\nistioctl install \\  --set profile=demo \\  --set meshConfig.enableEnvoyAccessLogService=true \\  --set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-oap.istio-system:11800 git clone https://github.com/apache/skywalking-helm.git cd skywalking-helm/chart helm repo add elastic https://helm.elastic.co helm dep up skywalking helm install 8.1.0 skywalking -n istio-system \\  --set oap.env.SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS=mx-mesh \\  --set oap.env.SW_ENVOY_METRIC_ALS_TCP_ANALYSIS=mx-mesh \\  --set fullnameOverride=skywalking \\  --set oap.envoy.als.enabled=true You can use kubectl -n istio-system logs -l app=skywalking | grep \u0026quot;K8sALSServiceMeshHTTPAnalysis\u0026quot; to ensure that OAP ALS mx-mesh analyzer has been activated.\nSkyWalking ALS Analyzers There are several available analyzers: k8s-mesh, mx-mesh, and persistence. You can specify one or more analyzers to analyze the access logs. When multiple analyzers are specified, it acts as a fast-success mechanism: SkyWalking loops over the analyzers and use them to analyze the logs. Once there is an analyzer that is able to produce a result, it stops the loop.\nk8s-mesh k8s-mesh uses the metadata from Kubernetes clusters, hence in this analyzer, OAP needs access roles to Pod, Service, and Endpoints.\nThe blog illustrates the details of how it works and a step-by-step tutorial to apply it to the bookinfo application.\nmx-mesh mx-mesh uses the Envoy metadata exchange mechanism to get the service name, etc. This analyzer requires Istio to enable the metadata exchange plugin (you can enable it by --set values.telemetry.v2.enabled=true, or if you\u0026rsquo;re using Istio 1.7+ and installing it with profile demo/preview, it should already be enabled).\nThe blog illustrates the details of how it works and a step-by-step tutorial on applying it to the Online Boutique system.\npersistence persistence analyzer adapts the Envoy access log format to SkyWalking\u0026rsquo;s native log format, and forwards the formatted logs to LAL, where you can configure persistent conditions, such as sampler, only persist error logs, etc. SkyWalking provides a default configuration file envoy-als.yaml that you can adjust as per your needs. Please make sure to activate this rule via adding the rule name envoy-als into config item log-analyzer/default/lalFiles (or environment variable SW_LOG_LAL_FILES, e.g. SW_LOG_LAL_FILES=envoy-als).\nAttention: Since the persistence analyzer also needs a mechanism to map the logs into responding services, you need to configure at least one of k8s-mesh or mx-mesh as its antecedent so that persistence analyzer knows which service the logs belong to. For example, you should set envoy-metric/default/alsHTTPAnalysis (or environment variable SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS) to something like k8s-mesh,persistence, mx-mesh,persistence, or mx-mesh,k8s-mesh,persistence.\n","title":"Observe Service Mesh through ALS","url":"/docs/main/v9.7.0/en/setup/envoy/als_setting/"},{"content":"Observe Service Mesh through Zipkin traces Istio has built-in support to generate Zipkin traces from Envoy proxy sidecar, and SkyWalking can serve as a Zipkin server to collect and provide query APIs for these traces, you can deploy SkyWalking to replace Zipkin server in Istio, and point the Zipkin address to SkyWalking. SkyWalking also embeds Zipkin Lens UI as part of SkyWalking UI, you can use it to query Zipkin traces.\nEnable Zipkin Traces Receiver SkyWalking has built-in Zipkin receiver, you can enable it by setting receiver-zipkin to default in application.yml, or by setting environment variable SW_RECEIVER_ZIPKIN=default before starting OAP server:\nreceiver-zipkin:selector:${SW_RECEIVER_ZIPKIN:default}default:# Other configurations...After enabling the Zipkin receiver, SkyWalking listens on port 9411 for Zipkin traces, you can just change the Zipkin server address to SkyWalking\u0026rsquo;s address with 9411 as the port.\nEnable Zipkin Traces Query Module If you want to query Zipkin traces from SkyWalking, you need to enable the Zipkin traces query module by setting query-zipkin to default in application.yml, or by setting environment variable SW_QUERY_ZIPKIN=default before starting OAP server:\nquery-zipkin:selector:${SW_QUERY_ZIPKIN:default}default:# Other configurationsAfter enabling Zipkin query module, SkyWalking listens on port 9412 for Zipkin query APIs, you can also query the Zipkin traces from SkyWalking UI, menu Service Mesh --\u0026gt; Services --\u0026gt; Zipkin Trace.\nSet Up Zipkin Traces in Istio When installing Istio, you can enable Zipkin tracing and point it to SkyWalking by setting\nistioctl install -y --set profile=demo \\ \t--set meshConfig.defaultConfig.tracing.sampling=100 \\ \t--set meshConfig.defaultConfig.tracing.zipkin.address=oap.istio-system.svc.cluster.local:9411 \\ \t--set meshConfig.enableTracing=true so that Istio proxy (Envoy) can generate traces and sent them to SkyWalking.\nFor more details about Zipkin on Istio, refer to the Istio doc.\n","title":"Observe Service Mesh through Zipkin traces","url":"/docs/main/latest/en/setup/zipkin/tracing/"},{"content":"Observe Service Mesh through Zipkin traces Istio has built-in support to generate Zipkin traces from Envoy proxy sidecar, and SkyWalking can serve as a Zipkin server to collect and provide query APIs for these traces, you can deploy SkyWalking to replace Zipkin server in Istio, and point the Zipkin address to SkyWalking. SkyWalking also embeds Zipkin Lens UI as part of SkyWalking UI, you can use it to query Zipkin traces.\nEnable Zipkin Traces Receiver SkyWalking has built-in Zipkin receiver, you can enable it by setting receiver-zipkin to default in application.yml, or by setting environment variable SW_RECEIVER_ZIPKIN=default before starting OAP server:\nreceiver-zipkin:selector:${SW_RECEIVER_ZIPKIN:default}default:# Other configurations...After enabling the Zipkin receiver, SkyWalking listens on port 9411 for Zipkin traces, you can just change the Zipkin server address to SkyWalking\u0026rsquo;s address with 9411 as the port.\nEnable Zipkin Traces Query Module If you want to query Zipkin traces from SkyWalking, you need to enable the Zipkin traces query module by setting query-zipkin to default in application.yml, or by setting environment variable SW_QUERY_ZIPKIN=default before starting OAP server:\nquery-zipkin:selector:${SW_QUERY_ZIPKIN:default}default:# Other configurationsAfter enabling Zipkin query module, SkyWalking listens on port 9412 for Zipkin query APIs, you can also query the Zipkin traces from SkyWalking UI, menu Service Mesh --\u0026gt; Services --\u0026gt; Zipkin Trace.\nSet Up Zipkin Traces in Istio When installing Istio, you can enable Zipkin tracing and point it to SkyWalking by setting\nistioctl install -y --set profile=demo \\ \t--set meshConfig.defaultConfig.tracing.sampling=100 \\ \t--set meshConfig.defaultConfig.tracing.zipkin.address=oap.istio-system.svc.cluster.local:9411 \\ \t--set meshConfig.enableTracing=true so that Istio proxy (Envoy) can generate traces and sent them to SkyWalking.\nFor more details about Zipkin on Istio, refer to the Istio doc.\n","title":"Observe Service Mesh through Zipkin traces","url":"/docs/main/next/en/setup/zipkin/tracing/"},{"content":"Observe Service Mesh through Zipkin traces Istio has built-in support to generate Zipkin traces from Envoy proxy sidecar, and SkyWalking can serve as a Zipkin server to collect and provide query APIs for these traces, you can deploy SkyWalking to replace Zipkin server in Istio, and point the Zipkin address to SkyWalking. SkyWalking also embeds Zipkin Lens UI as part of SkyWalking UI, you can use it to query Zipkin traces.\nEnable Zipkin Traces Receiver SkyWalking has built-in Zipkin receiver, you can enable it by setting receiver-zipkin to default in application.yml, or by setting environment variable SW_RECEIVER_ZIPKIN=default before starting OAP server:\nreceiver-zipkin:selector:${SW_RECEIVER_ZIPKIN:default}default:# Other configurations...After enabling the Zipkin receiver, SkyWalking listens on port 9411 for Zipkin traces, you can just change the Zipkin server address to SkyWalking\u0026rsquo;s address with 9411 as the port.\nEnable Zipkin Traces Query Module If you want to query Zipkin traces from SkyWalking, you need to enable the Zipkin traces query module by setting query-zipkin to default in application.yml, or by setting environment variable SW_QUERY_ZIPKIN=default before starting OAP server:\nquery-zipkin:selector:${SW_QUERY_ZIPKIN:default}default:# Other configurationsAfter enabling Zipkin query module, SkyWalking listens on port 9412 for Zipkin query APIs, you can also query the Zipkin traces from SkyWalking UI, menu Service Mesh --\u0026gt; Services --\u0026gt; Zipkin Trace.\nSet Up Zipkin Traces in Istio When installing Istio, you can enable Zipkin tracing and point it to SkyWalking by setting\nistioctl install -y --set profile=demo \\ \t--set meshConfig.defaultConfig.tracing.sampling=100 \\ \t--set meshConfig.defaultConfig.tracing.zipkin.address=oap.istio-system.svc.cluster.local:9411 \\ \t--set meshConfig.enableTracing=true so that Istio proxy (Envoy) can generate traces and sent them to SkyWalking.\nFor more details about Zipkin on Istio, refer to the Istio doc.\n","title":"Observe Service Mesh through Zipkin traces","url":"/docs/main/v9.4.0/en/setup/zipkin/tracing/"},{"content":"Observe Service Mesh through Zipkin traces Istio has built-in support to generate Zipkin traces from Envoy proxy sidecar, and SkyWalking can serve as a Zipkin server to collect and provide query APIs for these traces, you can deploy SkyWalking to replace Zipkin server in Istio, and point the Zipkin address to SkyWalking. SkyWalking also embeds Zipkin Lens UI as part of SkyWalking UI, you can use it to query Zipkin traces.\nEnable Zipkin Traces Receiver SkyWalking has built-in Zipkin receiver, you can enable it by setting receiver-zipkin to default in application.yml, or by setting environment variable SW_RECEIVER_ZIPKIN=default before starting OAP server:\nreceiver-zipkin:selector:${SW_RECEIVER_ZIPKIN:default}default:# Other configurations...After enabling the Zipkin receiver, SkyWalking listens on port 9411 for Zipkin traces, you can just change the Zipkin server address to SkyWalking\u0026rsquo;s address with 9411 as the port.\nEnable Zipkin Traces Query Module If you want to query Zipkin traces from SkyWalking, you need to enable the Zipkin traces query module by setting query-zipkin to default in application.yml, or by setting environment variable SW_QUERY_ZIPKIN=default before starting OAP server:\nquery-zipkin:selector:${SW_QUERY_ZIPKIN:default}default:# Other configurationsAfter enabling Zipkin query module, SkyWalking listens on port 9412 for Zipkin query APIs, you can also query the Zipkin traces from SkyWalking UI, menu Service Mesh --\u0026gt; Services --\u0026gt; Zipkin Trace.\nSet Up Zipkin Traces in Istio When installing Istio, you can enable Zipkin tracing and point it to SkyWalking by setting\nistioctl install -y --set profile=demo \\ \t--set meshConfig.defaultConfig.tracing.sampling=100 \\ \t--set meshConfig.defaultConfig.tracing.zipkin.address=oap.istio-system.svc.cluster.local:9411 \\ \t--set meshConfig.enableTracing=true so that Istio proxy (Envoy) can generate traces and sent them to SkyWalking.\nFor more details about Zipkin on Istio, refer to the Istio doc.\n","title":"Observe Service Mesh through Zipkin traces","url":"/docs/main/v9.5.0/en/setup/zipkin/tracing/"},{"content":"Observe Service Mesh through Zipkin traces Istio has built-in support to generate Zipkin traces from Envoy proxy sidecar, and SkyWalking can serve as a Zipkin server to collect and provide query APIs for these traces, you can deploy SkyWalking to replace Zipkin server in Istio, and point the Zipkin address to SkyWalking. SkyWalking also embeds Zipkin Lens UI as part of SkyWalking UI, you can use it to query Zipkin traces.\nEnable Zipkin Traces Receiver SkyWalking has built-in Zipkin receiver, you can enable it by setting receiver-zipkin to default in application.yml, or by setting environment variable SW_RECEIVER_ZIPKIN=default before starting OAP server:\nreceiver-zipkin:selector:${SW_RECEIVER_ZIPKIN:default}default:# Other configurations...After enabling the Zipkin receiver, SkyWalking listens on port 9411 for Zipkin traces, you can just change the Zipkin server address to SkyWalking\u0026rsquo;s address with 9411 as the port.\nEnable Zipkin Traces Query Module If you want to query Zipkin traces from SkyWalking, you need to enable the Zipkin traces query module by setting query-zipkin to default in application.yml, or by setting environment variable SW_QUERY_ZIPKIN=default before starting OAP server:\nquery-zipkin:selector:${SW_QUERY_ZIPKIN:default}default:# Other configurationsAfter enabling Zipkin query module, SkyWalking listens on port 9412 for Zipkin query APIs, you can also query the Zipkin traces from SkyWalking UI, menu Service Mesh --\u0026gt; Services --\u0026gt; Zipkin Trace.\nSet Up Zipkin Traces in Istio When installing Istio, you can enable Zipkin tracing and point it to SkyWalking by setting\nistioctl install -y --set profile=demo \\ \t--set meshConfig.defaultConfig.tracing.sampling=100 \\ \t--set meshConfig.defaultConfig.tracing.zipkin.address=oap.istio-system.svc.cluster.local:9411 \\ \t--set meshConfig.enableTracing=true so that Istio proxy (Envoy) can generate traces and sent them to SkyWalking.\nFor more details about Zipkin on Istio, refer to the Istio doc.\n","title":"Observe Service Mesh through Zipkin traces","url":"/docs/main/v9.6.0/en/setup/zipkin/tracing/"},{"content":"Observe Service Mesh through Zipkin traces Istio has built-in support to generate Zipkin traces from Envoy proxy sidecar, and SkyWalking can serve as a Zipkin server to collect and provide query APIs for these traces, you can deploy SkyWalking to replace Zipkin server in Istio, and point the Zipkin address to SkyWalking. SkyWalking also embeds Zipkin Lens UI as part of SkyWalking UI, you can use it to query Zipkin traces.\nEnable Zipkin Traces Receiver SkyWalking has built-in Zipkin receiver, you can enable it by setting receiver-zipkin to default in application.yml, or by setting environment variable SW_RECEIVER_ZIPKIN=default before starting OAP server:\nreceiver-zipkin:selector:${SW_RECEIVER_ZIPKIN:default}default:# Other configurations...After enabling the Zipkin receiver, SkyWalking listens on port 9411 for Zipkin traces, you can just change the Zipkin server address to SkyWalking\u0026rsquo;s address with 9411 as the port.\nEnable Zipkin Traces Query Module If you want to query Zipkin traces from SkyWalking, you need to enable the Zipkin traces query module by setting query-zipkin to default in application.yml, or by setting environment variable SW_QUERY_ZIPKIN=default before starting OAP server:\nquery-zipkin:selector:${SW_QUERY_ZIPKIN:default}default:# Other configurationsAfter enabling Zipkin query module, SkyWalking listens on port 9412 for Zipkin query APIs, you can also query the Zipkin traces from SkyWalking UI, menu Service Mesh --\u0026gt; Services --\u0026gt; Zipkin Trace.\nSet Up Zipkin Traces in Istio When installing Istio, you can enable Zipkin tracing and point it to SkyWalking by setting\nistioctl install -y --set profile=demo \\ \t--set meshConfig.defaultConfig.tracing.sampling=100 \\ \t--set meshConfig.defaultConfig.tracing.zipkin.address=oap.istio-system.svc.cluster.local:9411 \\ \t--set meshConfig.enableTracing=true so that Istio proxy (Envoy) can generate traces and sent them to SkyWalking.\nFor more details about Zipkin on Istio, refer to the Istio doc.\n","title":"Observe Service Mesh through Zipkin traces","url":"/docs/main/v9.7.0/en/setup/zipkin/tracing/"},{"content":"Official OAL script First, read the OAL introduction to learn the OAL script grammar and the source concept.\nFrom 8.0.0, you may find the OAL script at /config/oal/*.oal of the SkyWalking dist. You could change it, such as by adding filter conditions or new metrics. Then, reboot the OAP server, and it will come into effect.\nAll metrics named in this script could be used in alarm and UI query.\nExtension Logic Endpoint In default, SkyWalking only treats the operation name of entry span as the endpoint, which are used in the OAL engine. Users could declare their custom endpoint names by adding the logic endpoint tag manually through agent\u0026rsquo;s plugins or manual APIs.\nThe logic endpoint is a concept that doesn\u0026rsquo;t represent a real RPC call, but requires the statistic. The value of x-le should be in JSON format. There are two options:\n Define a new logic endpoint in the entry span as a separate new endpoint. Provide its own endpoint name, latency and status. Suitable for entry and local span.  { \u0026#34;name\u0026#34;: \u0026#34;GraphQL-service\u0026#34;, \u0026#34;latency\u0026#34;: 100, \u0026#34;status\u0026#34;: true } Declare the current local span representing a logic endpoint.  { \u0026#34;logic-span\u0026#34;: true } References  Java plugin API guides users to write plugins with logic endpoint. Java agent\u0026rsquo;s plugins include native included logic endpoints, also it provides ways to set the tag of logic span. The document could be found here.  ","title":"Official OAL script","url":"/docs/main/latest/en/guides/backend-oal-scripts/"},{"content":"Official OAL script First, read the OAL introduction to learn the OAL script grammar and the source concept.\nFrom 8.0.0, you may find the OAL script at /config/oal/*.oal of the SkyWalking dist. You could change it, such as by adding filter conditions or new metrics. Then, reboot the OAP server, and it will come into effect.\nAll metrics named in this script could be used in alarm and UI query.\nExtension Logic Endpoint In default, SkyWalking only treats the operation name of entry span as the endpoint, which are used in the OAL engine. Users could declare their custom endpoint names by adding the logic endpoint tag manually through agent\u0026rsquo;s plugins or manual APIs.\nThe logic endpoint is a concept that doesn\u0026rsquo;t represent a real RPC call, but requires the statistic. The value of x-le should be in JSON format. There are two options:\n Define a new logic endpoint in the entry span as a separate new endpoint. Provide its own endpoint name, latency and status. Suitable for entry and local span.  { \u0026#34;name\u0026#34;: \u0026#34;GraphQL-service\u0026#34;, \u0026#34;latency\u0026#34;: 100, \u0026#34;status\u0026#34;: true } Declare the current local span representing a logic endpoint.  { \u0026#34;logic-span\u0026#34;: true } References  Java plugin API guides users to write plugins with logic endpoint. Java agent\u0026rsquo;s plugins include native included logic endpoints, also it provides ways to set the tag of logic span. The document could be found here.  ","title":"Official OAL script","url":"/docs/main/next/en/guides/backend-oal-scripts/"},{"content":"Official OAL script First, read the OAL introduction to learn the OAL script grammar and the source concept.\nFrom 8.0.0, you may find the OAL script at /config/oal/*.oal of the SkyWalking dist. You could change it, such as by adding filter conditions or new metrics. Then, reboot the OAP server, and it will come into effect.\nAll metrics named in this script could be used in alarm and UI query.\nExtension Logic Endpoint In default, SkyWalking only treats the operation name of entry span as the endpoint, which are used in the OAL engine. Users could declare their custom endpoint names by adding the logic endpoint tag manually through agent\u0026rsquo;s plugins or manual APIs.\nThe logic endpoint is a concept that doesn\u0026rsquo;t represent a real RPC call, but requires the statistic. The value of x-le should be in JSON format. There are two options:\n Define a new logic endpoint in the entry span as a separate new endpoint. Provide its own endpoint name, latency and status. Suitable for entry and local span.  { \u0026#34;name\u0026#34;: \u0026#34;GraphQL-service\u0026#34;, \u0026#34;latency\u0026#34;: 100, \u0026#34;status\u0026#34;: true } Declare the current local span representing a logic endpoint.  { \u0026#34;logic-span\u0026#34;: true } References  Java plugin API guides users to write plugins with logic endpoint. Java agent\u0026rsquo;s plugins include native included logic endpoints, also it provides ways to set the tag of logic span. The document could be found here.  ","title":"Official OAL script","url":"/docs/main/v9.0.0/en/guides/backend-oal-scripts/"},{"content":"Official OAL script First, read the OAL introduction to learn the OAL script grammar and the source concept.\nFrom 8.0.0, you may find the OAL script at /config/oal/*.oal of the SkyWalking dist. You could change it, such as by adding filter conditions or new metrics. Then, reboot the OAP server, and it will come into effect.\nAll metrics named in this script could be used in alarm and UI query.\nExtension Logic Endpoint In default, SkyWalking only treats the operation name of entry span as the endpoint, which are used in the OAL engine. Users could declare their custom endpoint names by adding the logic endpoint tag manually through agent\u0026rsquo;s plugins or manual APIs.\nThe logic endpoint is a concept that doesn\u0026rsquo;t represent a real RPC call, but requires the statistic. The value of x-le should be in JSON format. There are two options:\n Define a new logic endpoint in the entry span as a separate new endpoint. Provide its own endpoint name, latency and status. Suitable for entry and local span.  { \u0026#34;name\u0026#34;: \u0026#34;GraphQL-service\u0026#34;, \u0026#34;latency\u0026#34;: 100, \u0026#34;status\u0026#34;: true } Declare the current local span representing a logic endpoint.  { \u0026#34;logic-span\u0026#34;: true } References  Java plugin API guides users to write plugins with logic endpoint. Java agent\u0026rsquo;s plugins include native included logic endpoints, also it provides ways to set the tag of logic span. The document could be found here.  ","title":"Official OAL script","url":"/docs/main/v9.1.0/en/guides/backend-oal-scripts/"},{"content":"Official OAL script First, read the OAL introduction to learn the OAL script grammar and the source concept.\nFrom 8.0.0, you may find the OAL script at /config/oal/*.oal of the SkyWalking dist. You could change it, such as by adding filter conditions or new metrics. Then, reboot the OAP server, and it will come into effect.\nAll metrics named in this script could be used in alarm and UI query.\nExtension Logic Endpoint In default, SkyWalking only treats the operation name of entry span as the endpoint, which are used in the OAL engine. Users could declare their custom endpoint names by adding the logic endpoint tag manually through agent\u0026rsquo;s plugins or manual APIs.\nThe logic endpoint is a concept that doesn\u0026rsquo;t represent a real RPC call, but requires the statistic. The value of x-le should be in JSON format. There are two options:\n Define a new logic endpoint in the entry span as a separate new endpoint. Provide its own endpoint name, latency and status. Suitable for entry and local span.  { \u0026#34;name\u0026#34;: \u0026#34;GraphQL-service\u0026#34;, \u0026#34;latency\u0026#34;: 100, \u0026#34;status\u0026#34;: true } Declare the current local span representing a logic endpoint.  { \u0026#34;logic-span\u0026#34;: true } References  Java plugin API guides users to write plugins with logic endpoint. Java agent\u0026rsquo;s plugins include native included logic endpoints, also it provides ways to set the tag of logic span. The document could be found here.  ","title":"Official OAL script","url":"/docs/main/v9.2.0/en/guides/backend-oal-scripts/"},{"content":"Official OAL script First, read the OAL introduction to learn the OAL script grammar and the source concept.\nFrom 8.0.0, you may find the OAL script at /config/oal/*.oal of the SkyWalking dist. You could change it, such as by adding filter conditions or new metrics. Then, reboot the OAP server, and it will come into effect.\nAll metrics named in this script could be used in alarm and UI query.\nExtension Logic Endpoint In default, SkyWalking only treats the operation name of entry span as the endpoint, which are used in the OAL engine. Users could declare their custom endpoint names by adding the logic endpoint tag manually through agent\u0026rsquo;s plugins or manual APIs.\nThe logic endpoint is a concept that doesn\u0026rsquo;t represent a real RPC call, but requires the statistic. The value of x-le should be in JSON format. There are two options:\n Define a new logic endpoint in the entry span as a separate new endpoint. Provide its own endpoint name, latency and status. Suitable for entry and local span.  { \u0026#34;name\u0026#34;: \u0026#34;GraphQL-service\u0026#34;, \u0026#34;latency\u0026#34;: 100, \u0026#34;status\u0026#34;: true } Declare the current local span representing a logic endpoint.  { \u0026#34;logic-span\u0026#34;: true } References  Java plugin API guides users to write plugins with logic endpoint. Java agent\u0026rsquo;s plugins include native included logic endpoints, also it provides ways to set the tag of logic span. The document could be found here.  ","title":"Official OAL script","url":"/docs/main/v9.3.0/en/guides/backend-oal-scripts/"},{"content":"Official OAL script First, read the OAL introduction to learn the OAL script grammar and the source concept.\nFrom 8.0.0, you may find the OAL script at /config/oal/*.oal of the SkyWalking dist. You could change it, such as by adding filter conditions or new metrics. Then, reboot the OAP server, and it will come into effect.\nAll metrics named in this script could be used in alarm and UI query.\nExtension Logic Endpoint In default, SkyWalking only treats the operation name of entry span as the endpoint, which are used in the OAL engine. Users could declare their custom endpoint names by adding the logic endpoint tag manually through agent\u0026rsquo;s plugins or manual APIs.\nThe logic endpoint is a concept that doesn\u0026rsquo;t represent a real RPC call, but requires the statistic. The value of x-le should be in JSON format. There are two options:\n Define a new logic endpoint in the entry span as a separate new endpoint. Provide its own endpoint name, latency and status. Suitable for entry and local span.  { \u0026#34;name\u0026#34;: \u0026#34;GraphQL-service\u0026#34;, \u0026#34;latency\u0026#34;: 100, \u0026#34;status\u0026#34;: true } Declare the current local span representing a logic endpoint.  { \u0026#34;logic-span\u0026#34;: true } References  Java plugin API guides users to write plugins with logic endpoint. Java agent\u0026rsquo;s plugins include native included logic endpoints, also it provides ways to set the tag of logic span. The document could be found here.  ","title":"Official OAL script","url":"/docs/main/v9.4.0/en/guides/backend-oal-scripts/"},{"content":"Official OAL script First, read the OAL introduction to learn the OAL script grammar and the source concept.\nFrom 8.0.0, you may find the OAL script at /config/oal/*.oal of the SkyWalking dist. You could change it, such as by adding filter conditions or new metrics. Then, reboot the OAP server, and it will come into effect.\nAll metrics named in this script could be used in alarm and UI query.\nExtension Logic Endpoint In default, SkyWalking only treats the operation name of entry span as the endpoint, which are used in the OAL engine. Users could declare their custom endpoint names by adding the logic endpoint tag manually through agent\u0026rsquo;s plugins or manual APIs.\nThe logic endpoint is a concept that doesn\u0026rsquo;t represent a real RPC call, but requires the statistic. The value of x-le should be in JSON format. There are two options:\n Define a new logic endpoint in the entry span as a separate new endpoint. Provide its own endpoint name, latency and status. Suitable for entry and local span.  { \u0026#34;name\u0026#34;: \u0026#34;GraphQL-service\u0026#34;, \u0026#34;latency\u0026#34;: 100, \u0026#34;status\u0026#34;: true } Declare the current local span representing a logic endpoint.  { \u0026#34;logic-span\u0026#34;: true } References  Java plugin API guides users to write plugins with logic endpoint. Java agent\u0026rsquo;s plugins include native included logic endpoints, also it provides ways to set the tag of logic span. The document could be found here.  ","title":"Official OAL script","url":"/docs/main/v9.5.0/en/guides/backend-oal-scripts/"},{"content":"Official OAL script First, read the OAL introduction to learn the OAL script grammar and the source concept.\nFrom 8.0.0, you may find the OAL script at /config/oal/*.oal of the SkyWalking dist. You could change it, such as by adding filter conditions or new metrics. Then, reboot the OAP server, and it will come into effect.\nAll metrics named in this script could be used in alarm and UI query.\nExtension Logic Endpoint In default, SkyWalking only treats the operation name of entry span as the endpoint, which are used in the OAL engine. Users could declare their custom endpoint names by adding the logic endpoint tag manually through agent\u0026rsquo;s plugins or manual APIs.\nThe logic endpoint is a concept that doesn\u0026rsquo;t represent a real RPC call, but requires the statistic. The value of x-le should be in JSON format. There are two options:\n Define a new logic endpoint in the entry span as a separate new endpoint. Provide its own endpoint name, latency and status. Suitable for entry and local span.  { \u0026#34;name\u0026#34;: \u0026#34;GraphQL-service\u0026#34;, \u0026#34;latency\u0026#34;: 100, \u0026#34;status\u0026#34;: true } Declare the current local span representing a logic endpoint.  { \u0026#34;logic-span\u0026#34;: true } References  Java plugin API guides users to write plugins with logic endpoint. Java agent\u0026rsquo;s plugins include native included logic endpoints, also it provides ways to set the tag of logic span. The document could be found here.  ","title":"Official OAL script","url":"/docs/main/v9.6.0/en/guides/backend-oal-scripts/"},{"content":"Official OAL script First, read the OAL introduction to learn the OAL script grammar and the source concept.\nFrom 8.0.0, you may find the OAL script at /config/oal/*.oal of the SkyWalking dist. You could change it, such as by adding filter conditions or new metrics. Then, reboot the OAP server, and it will come into effect.\nAll metrics named in this script could be used in alarm and UI query.\nExtension Logic Endpoint In default, SkyWalking only treats the operation name of entry span as the endpoint, which are used in the OAL engine. Users could declare their custom endpoint names by adding the logic endpoint tag manually through agent\u0026rsquo;s plugins or manual APIs.\nThe logic endpoint is a concept that doesn\u0026rsquo;t represent a real RPC call, but requires the statistic. The value of x-le should be in JSON format. There are two options:\n Define a new logic endpoint in the entry span as a separate new endpoint. Provide its own endpoint name, latency and status. Suitable for entry and local span.  { \u0026#34;name\u0026#34;: \u0026#34;GraphQL-service\u0026#34;, \u0026#34;latency\u0026#34;: 100, \u0026#34;status\u0026#34;: true } Declare the current local span representing a logic endpoint.  { \u0026#34;logic-span\u0026#34;: true } References  Java plugin API guides users to write plugins with logic endpoint. Java agent\u0026rsquo;s plugins include native included logic endpoints, also it provides ways to set the tag of logic span. The document could be found here.  ","title":"Official OAL script","url":"/docs/main/v9.7.0/en/guides/backend-oal-scripts/"},{"content":"On Demand Pod Logs This feature is to fetch the Pod logs on users' demand, the logs are fetched and displayed in real time, and are not persisted in any kind. This is helpful when users want to do some experiments and monitor the logs and see what\u0026rsquo;s happing inside the service.\nNote: if you print secrets in the logs, they are also visible to the UI, so for the sake of security, this feature is disabled by default, please read the configuration documentation to enable this feature manually.\nHow it works As the name indicates, this feature only works for Kubernetes Pods.\nSkyWalking OAP collects and saves the service instance\u0026rsquo;s namespace and Pod name in the service instance\u0026rsquo;s properties, named namespace and pod, users can select the same and UI should fetch the logs by service instance in a given interval and display the logs in UI, OAP receives the query and checks the instance\u0026rsquo;s properties and use the namespace and pod to locate the Pod and query the logs.\nIf you want to register a service instance that has on demand logs available, you should add namespace and pod in the service instance properties, so that you can query the real time logs from that Pod.\nThat said, in order to make this feature work properly, you should in advance configure the cluster role for OAP to list/get namespaces, services, pods and pods/log.\n","title":"On Demand Pod Logs","url":"/docs/main/latest/en/setup/backend/on-demand-pod-log/"},{"content":"On Demand Pod Logs This feature is to fetch the Pod logs on users' demand, the logs are fetched and displayed in real time, and are not persisted in any kind. This is helpful when users want to do some experiments and monitor the logs and see what\u0026rsquo;s happing inside the service.\nNote: if you print secrets in the logs, they are also visible to the UI, so for the sake of security, this feature is disabled by default, please read the configuration documentation to enable this feature manually.\nHow it works As the name indicates, this feature only works for Kubernetes Pods.\nSkyWalking OAP collects and saves the service instance\u0026rsquo;s namespace and Pod name in the service instance\u0026rsquo;s properties, named namespace and pod, users can select the same and UI should fetch the logs by service instance in a given interval and display the logs in UI, OAP receives the query and checks the instance\u0026rsquo;s properties and use the namespace and pod to locate the Pod and query the logs.\nIf you want to register a service instance that has on demand logs available, you should add namespace and pod in the service instance properties, so that you can query the real time logs from that Pod.\nThat said, in order to make this feature work properly, you should in advance configure the cluster role for OAP to list/get namespaces, services, pods and pods/log.\n","title":"On Demand Pod Logs","url":"/docs/main/next/en/setup/backend/on-demand-pod-log/"},{"content":"On Demand Pod Logs This feature is to fetch the Pod logs on users' demand, the logs are fetched and displayed in real time, and are not persisted in any kind. This is helpful when users want to do some experiments and monitor the logs and see what\u0026rsquo;s happing inside the service.\nNote: if you print secrets in the logs, they are also visible to the UI, so for the sake of security, this feature is disabled by default, please read the configuration documentation to enable this feature manually.\nHow it works As the name indicates, this feature only works for Kubernetes Pods.\nSkyWalking OAP collects and saves the service instance\u0026rsquo;s namespace and Pod name in ther serivce instance\u0026rsquo;s properties, named namespace and pod, users can select the same and UI should fetch the logs by service instance in a given interval and display the logs in UI, OAP receives the query and checks the instance\u0026rsquo;s properties and use the namespace and pod to locate the Pod and query the logs.\nIf you want to register a service instance that has on demand logs available, you should add namespace and pod in the service instance properties, so that you can query the real time logs from that Pod.\nThat said, in order to make this feature work properly, you should in advance configure the cluster role for OAP to list/get namespaces, services, pods and pods/log.\n","title":"On Demand Pod Logs","url":"/docs/main/v9.1.0/en/setup/backend/on-demand-pod-log/"},{"content":"On Demand Pod Logs This feature is to fetch the Pod logs on users' demand, the logs are fetched and displayed in real time, and are not persisted in any kind. This is helpful when users want to do some experiments and monitor the logs and see what\u0026rsquo;s happing inside the service.\nNote: if you print secrets in the logs, they are also visible to the UI, so for the sake of security, this feature is disabled by default, please read the configuration documentation to enable this feature manually.\nHow it works As the name indicates, this feature only works for Kubernetes Pods.\nSkyWalking OAP collects and saves the service instance\u0026rsquo;s namespace and Pod name in the service instance\u0026rsquo;s properties, named namespace and pod, users can select the same and UI should fetch the logs by service instance in a given interval and display the logs in UI, OAP receives the query and checks the instance\u0026rsquo;s properties and use the namespace and pod to locate the Pod and query the logs.\nIf you want to register a service instance that has on demand logs available, you should add namespace and pod in the service instance properties, so that you can query the real time logs from that Pod.\nThat said, in order to make this feature work properly, you should in advance configure the cluster role for OAP to list/get namespaces, services, pods and pods/log.\n","title":"On Demand Pod Logs","url":"/docs/main/v9.2.0/en/setup/backend/on-demand-pod-log/"},{"content":"On Demand Pod Logs This feature is to fetch the Pod logs on users' demand, the logs are fetched and displayed in real time, and are not persisted in any kind. This is helpful when users want to do some experiments and monitor the logs and see what\u0026rsquo;s happing inside the service.\nNote: if you print secrets in the logs, they are also visible to the UI, so for the sake of security, this feature is disabled by default, please read the configuration documentation to enable this feature manually.\nHow it works As the name indicates, this feature only works for Kubernetes Pods.\nSkyWalking OAP collects and saves the service instance\u0026rsquo;s namespace and Pod name in the service instance\u0026rsquo;s properties, named namespace and pod, users can select the same and UI should fetch the logs by service instance in a given interval and display the logs in UI, OAP receives the query and checks the instance\u0026rsquo;s properties and use the namespace and pod to locate the Pod and query the logs.\nIf you want to register a service instance that has on demand logs available, you should add namespace and pod in the service instance properties, so that you can query the real time logs from that Pod.\nThat said, in order to make this feature work properly, you should in advance configure the cluster role for OAP to list/get namespaces, services, pods and pods/log.\n","title":"On Demand Pod Logs","url":"/docs/main/v9.3.0/en/setup/backend/on-demand-pod-log/"},{"content":"On Demand Pod Logs This feature is to fetch the Pod logs on users' demand, the logs are fetched and displayed in real time, and are not persisted in any kind. This is helpful when users want to do some experiments and monitor the logs and see what\u0026rsquo;s happing inside the service.\nNote: if you print secrets in the logs, they are also visible to the UI, so for the sake of security, this feature is disabled by default, please read the configuration documentation to enable this feature manually.\nHow it works As the name indicates, this feature only works for Kubernetes Pods.\nSkyWalking OAP collects and saves the service instance\u0026rsquo;s namespace and Pod name in the service instance\u0026rsquo;s properties, named namespace and pod, users can select the same and UI should fetch the logs by service instance in a given interval and display the logs in UI, OAP receives the query and checks the instance\u0026rsquo;s properties and use the namespace and pod to locate the Pod and query the logs.\nIf you want to register a service instance that has on demand logs available, you should add namespace and pod in the service instance properties, so that you can query the real time logs from that Pod.\nThat said, in order to make this feature work properly, you should in advance configure the cluster role for OAP to list/get namespaces, services, pods and pods/log.\n","title":"On Demand Pod Logs","url":"/docs/main/v9.4.0/en/setup/backend/on-demand-pod-log/"},{"content":"On Demand Pod Logs This feature is to fetch the Pod logs on users' demand, the logs are fetched and displayed in real time, and are not persisted in any kind. This is helpful when users want to do some experiments and monitor the logs and see what\u0026rsquo;s happing inside the service.\nNote: if you print secrets in the logs, they are also visible to the UI, so for the sake of security, this feature is disabled by default, please read the configuration documentation to enable this feature manually.\nHow it works As the name indicates, this feature only works for Kubernetes Pods.\nSkyWalking OAP collects and saves the service instance\u0026rsquo;s namespace and Pod name in the service instance\u0026rsquo;s properties, named namespace and pod, users can select the same and UI should fetch the logs by service instance in a given interval and display the logs in UI, OAP receives the query and checks the instance\u0026rsquo;s properties and use the namespace and pod to locate the Pod and query the logs.\nIf you want to register a service instance that has on demand logs available, you should add namespace and pod in the service instance properties, so that you can query the real time logs from that Pod.\nThat said, in order to make this feature work properly, you should in advance configure the cluster role for OAP to list/get namespaces, services, pods and pods/log.\n","title":"On Demand Pod Logs","url":"/docs/main/v9.5.0/en/setup/backend/on-demand-pod-log/"},{"content":"On Demand Pod Logs This feature is to fetch the Pod logs on users' demand, the logs are fetched and displayed in real time, and are not persisted in any kind. This is helpful when users want to do some experiments and monitor the logs and see what\u0026rsquo;s happing inside the service.\nNote: if you print secrets in the logs, they are also visible to the UI, so for the sake of security, this feature is disabled by default, please read the configuration documentation to enable this feature manually.\nHow it works As the name indicates, this feature only works for Kubernetes Pods.\nSkyWalking OAP collects and saves the service instance\u0026rsquo;s namespace and Pod name in the service instance\u0026rsquo;s properties, named namespace and pod, users can select the same and UI should fetch the logs by service instance in a given interval and display the logs in UI, OAP receives the query and checks the instance\u0026rsquo;s properties and use the namespace and pod to locate the Pod and query the logs.\nIf you want to register a service instance that has on demand logs available, you should add namespace and pod in the service instance properties, so that you can query the real time logs from that Pod.\nThat said, in order to make this feature work properly, you should in advance configure the cluster role for OAP to list/get namespaces, services, pods and pods/log.\n","title":"On Demand Pod Logs","url":"/docs/main/v9.6.0/en/setup/backend/on-demand-pod-log/"},{"content":"On Demand Pod Logs This feature is to fetch the Pod logs on users' demand, the logs are fetched and displayed in real time, and are not persisted in any kind. This is helpful when users want to do some experiments and monitor the logs and see what\u0026rsquo;s happing inside the service.\nNote: if you print secrets in the logs, they are also visible to the UI, so for the sake of security, this feature is disabled by default, please read the configuration documentation to enable this feature manually.\nHow it works As the name indicates, this feature only works for Kubernetes Pods.\nSkyWalking OAP collects and saves the service instance\u0026rsquo;s namespace and Pod name in the service instance\u0026rsquo;s properties, named namespace and pod, users can select the same and UI should fetch the logs by service instance in a given interval and display the logs in UI, OAP receives the query and checks the instance\u0026rsquo;s properties and use the namespace and pod to locate the Pod and query the logs.\nIf you want to register a service instance that has on demand logs available, you should add namespace and pod in the service instance properties, so that you can query the real time logs from that Pod.\nThat said, in order to make this feature work properly, you should in advance configure the cluster role for OAP to list/get namespaces, services, pods and pods/log.\n","title":"On Demand Pod Logs","url":"/docs/main/v9.7.0/en/setup/backend/on-demand-pod-log/"},{"content":"OpenTelemetry Logging Format SkyWalking can receive logs exported from OpenTelemetry collector, the data flow is:\ngraph LR B[OpenTelemetry SDK 1] C[FluentBit/FluentD, etc.] K[Other sources that OpenTelemetry supports ...] D[OpenTelemetry Collector] E[SkyWalking OAP Server] B --\u0026gt; D C --\u0026gt; D K --\u0026gt; D D -- exporter --\u0026gt; E where the exporter can be one of the following:\n OpenTelemetry SkyWalking Exporter. An exporter that transforms the logs to SkyWalking format before sending them to SkyWalking OAP. Read the doc in the aforementioned link for a detailed guide. OpenTelemetry OTLP Exporter. An exporter that sends the logs to SkyWalking OAP in OTLP format, and SkyWalking OAP is responsible for transforming the data format.  OpenTelemetry OTLP Exporter By using this exporter, you can send any log data to SkyWalking OAP as long as the data is in OTLP format, no matter where the data is generated.\nTo enable this exporter, make sure the receiver-otel is enabled and the otlp-logs value is in the receiver-otel/default/enabledHandlers configuration section:\nreceiver-otel:selector:${SW_OTEL_RECEIVER:default}default:enabledHandlers:${SW_OTEL_RECEIVER_ENABLED_HANDLERS:\u0026#34;otlp-metrics,otlp-logs\u0026#34;}Also, because most of the language SDKs of OpenTelemetry do not support logging feature (yet) or the logging feature is experimental, it\u0026rsquo;s your responsibility to make sure the reported log data contains the following attributes, otherwise SkyWalking is not able to consume them:\n service.name: the name of the service that generates the log data, OpenTelemetry Java SDK (experimental) has this attribute set, if you\u0026rsquo;re using other SDK or agent, please check the corresponding doc.  ","title":"OpenTelemetry Logging Format","url":"/docs/main/latest/en/setup/backend/log-otlp/"},{"content":"OpenTelemetry Logging Format SkyWalking can receive logs exported from OpenTelemetry collector, the data flow is:\ngraph LR B[OpenTelemetry SDK 1] C[FluentBit/FluentD, etc.] K[Other sources that OpenTelemetry supports ...] D[OpenTelemetry Collector] E[SkyWalking OAP Server] B --\u0026gt; D C --\u0026gt; D K --\u0026gt; D D -- exporter --\u0026gt; E Recommend to use OpenTelemetry OTLP Exporter to forward collected logs to OAP server in OTLP format, and SkyWalking OAP is responsible for transforming the data format into native log format with analysis support powered by LAL script.\n Deprecated: unmaintained and not recommended to use, will be removed.\nOpenTelemetry SkyWalking Exporter was first added into open-telemetry/opentelemetry-collector-contrib before OAP OTLP support. It transforms the logs to SkyWalking format before sending them to SkyWalking OAP. Currently, from OTLP community, it is not well maintained, and already being marked as unmaintained, and may be removed in 2024.\n OpenTelemetry OTLP Exporter By using this exporter, you can send any log data to SkyWalking OAP as long as the data is in OTLP format, no matter where the data is generated.\nTo enable this exporter, make sure the receiver-otel is enabled and the otlp-logs value is in the receiver-otel/default/enabledHandlers configuration section:\nreceiver-otel:selector:${SW_OTEL_RECEIVER:default}default:enabledHandlers:${SW_OTEL_RECEIVER_ENABLED_HANDLERS:\u0026#34;otlp-metrics,otlp-logs\u0026#34;}Also, because most of the language SDKs of OpenTelemetry do not support logging feature (yet) or the logging feature is experimental, it\u0026rsquo;s your responsibility to make sure the reported log data contains the following attributes, otherwise SkyWalking is not able to consume them:\n service.name: the name of the service that generates the log data.  And several attributes are optional as add-on information for the logs before analyzing.\n service.layer: the layer of the service that generates the logs. The default value is GENERAL layer, which is 100% sampled defined by LAL general rule service.instance: the instance name that generates the logs. The default value is empty.  Note, that these attributes should be set manually through OpenTelemetry SDK or through attribute#insert in OpenTelemetry Collector.\n","title":"OpenTelemetry Logging Format","url":"/docs/main/next/en/setup/backend/log-otlp/"},{"content":"OpenTelemetry Logging Format SkyWalking can receive logs exported from OpenTelemetry collector, the data flow is:\ngraph LR B[OpenTelemetry SDK 1] C[FluentBit/FluentD, etc.] K[Other sources that OpenTelemetry supports ...] D[OpenTelemetry Collector] E[SkyWalking OAP Server] B --\u0026gt; D C --\u0026gt; D K --\u0026gt; D D -- exporter --\u0026gt; E where the exporter can be one of the following:\n OpenTelemetry SkyWalking Exporter. An exporter that transforms the logs to SkyWalking format before sending them to SkyWalking OAP. Read the doc in the aforementioned link for a detailed guide. OpenTelemetry OTLP Exporter. An exporter that sends the logs to SkyWalking OAP in OTLP format, and SkyWalking OAP is responsible for transforming the data format.  OpenTelemetry OTLP Exporter By using this exporter, you can send any log data to SkyWalking OAP as long as the data is in OTLP format, no matter where the data is generated.\nTo enable this exporter, make sure the receiver-otel is enabled and the otlp-logs value is in the receiver-otel/default/enabledHandlers configuration section:\nreceiver-otel:selector:${SW_OTEL_RECEIVER:default}default:enabledHandlers:${SW_OTEL_RECEIVER_ENABLED_HANDLERS:\u0026#34;otlp-metrics,otlp-logs\u0026#34;}Also, because most of the language SDKs of OpenTelemetry do not support logging feature (yet) or the logging feature is experimental, it\u0026rsquo;s your responsibility to make sure the reported log data contains the following attributes, otherwise SkyWalking is not able to consume them:\n service.name: the name of the service that generates the log data, OpenTelemetry Java SDK (experimental) has this attribute set, if you\u0026rsquo;re using other SDK or agent, please check the corresponding doc.  ","title":"OpenTelemetry Logging Format","url":"/docs/main/v9.5.0/en/setup/backend/log-otlp/"},{"content":"OpenTelemetry Logging Format SkyWalking can receive logs exported from OpenTelemetry collector, the data flow is:\ngraph LR B[OpenTelemetry SDK 1] C[FluentBit/FluentD, etc.] K[Other sources that OpenTelemetry supports ...] D[OpenTelemetry Collector] E[SkyWalking OAP Server] B --\u0026gt; D C --\u0026gt; D K --\u0026gt; D D -- exporter --\u0026gt; E where the exporter can be one of the following:\n OpenTelemetry SkyWalking Exporter. An exporter that transforms the logs to SkyWalking format before sending them to SkyWalking OAP. Read the doc in the aforementioned link for a detailed guide. OpenTelemetry OTLP Exporter. An exporter that sends the logs to SkyWalking OAP in OTLP format, and SkyWalking OAP is responsible for transforming the data format.  OpenTelemetry OTLP Exporter By using this exporter, you can send any log data to SkyWalking OAP as long as the data is in OTLP format, no matter where the data is generated.\nTo enable this exporter, make sure the receiver-otel is enabled and the otlp-logs value is in the receiver-otel/default/enabledHandlers configuration section:\nreceiver-otel:selector:${SW_OTEL_RECEIVER:default}default:enabledHandlers:${SW_OTEL_RECEIVER_ENABLED_HANDLERS:\u0026#34;otlp-metrics,otlp-logs\u0026#34;}Also, because most of the language SDKs of OpenTelemetry do not support logging feature (yet) or the logging feature is experimental, it\u0026rsquo;s your responsibility to make sure the reported log data contains the following attributes, otherwise SkyWalking is not able to consume them:\n service.name: the name of the service that generates the log data, OpenTelemetry Java SDK (experimental) has this attribute set, if you\u0026rsquo;re using other SDK or agent, please check the corresponding doc.  ","title":"OpenTelemetry Logging Format","url":"/docs/main/v9.6.0/en/setup/backend/log-otlp/"},{"content":"OpenTelemetry Logging Format SkyWalking can receive logs exported from OpenTelemetry collector, the data flow is:\ngraph LR B[OpenTelemetry SDK 1] C[FluentBit/FluentD, etc.] K[Other sources that OpenTelemetry supports ...] D[OpenTelemetry Collector] E[SkyWalking OAP Server] B --\u0026gt; D C --\u0026gt; D K --\u0026gt; D D -- exporter --\u0026gt; E where the exporter can be one of the following:\n OpenTelemetry SkyWalking Exporter. An exporter that transforms the logs to SkyWalking format before sending them to SkyWalking OAP. Read the doc in the aforementioned link for a detailed guide. OpenTelemetry OTLP Exporter. An exporter that sends the logs to SkyWalking OAP in OTLP format, and SkyWalking OAP is responsible for transforming the data format.  OpenTelemetry OTLP Exporter By using this exporter, you can send any log data to SkyWalking OAP as long as the data is in OTLP format, no matter where the data is generated.\nTo enable this exporter, make sure the receiver-otel is enabled and the otlp-logs value is in the receiver-otel/default/enabledHandlers configuration section:\nreceiver-otel:selector:${SW_OTEL_RECEIVER:default}default:enabledHandlers:${SW_OTEL_RECEIVER_ENABLED_HANDLERS:\u0026#34;otlp-metrics,otlp-logs\u0026#34;}Also, because most of the language SDKs of OpenTelemetry do not support logging feature (yet) or the logging feature is experimental, it\u0026rsquo;s your responsibility to make sure the reported log data contains the following attributes, otherwise SkyWalking is not able to consume them:\n service.name: the name of the service that generates the log data, OpenTelemetry Java SDK (experimental) has this attribute set, if you\u0026rsquo;re using other SDK or agent, please check the corresponding doc.  ","title":"OpenTelemetry Logging Format","url":"/docs/main/v9.7.0/en/setup/backend/log-otlp/"},{"content":"OpenTelemetry Metrics Format The OpenTelemetry receiver supports ingesting agent metrics by meter-system. The OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP may fail to start up. The files are located at $CLASSPATH/otel-rules.\nSupported handlers:\n otlp: OpenTelemetry gRPC service handler.  Notice: Set SW_OTEL_RECEIVER=default through system environment or change receiver-otel/selector=${SW_OTEL_RECEIVER:default} to activate the OpenTelemetry receiver.\nThe rule file should be in YAML format, defined by the scheme described in MAL. Note: receiver-otel only supports the group, defaultMetricLevel, and metricsRules nodes of the scheme due to its push mode.\nTo activate the otlp handler and relevant rules of istio:\nreceiver-otel:selector:${SW_OTEL_RECEIVER:default}default:enabledHandlers:${SW_OTEL_RECEIVER_ENABLED_HANDLERS:\u0026#34;otlp-metrics\u0026#34;}enabledOtelMetricsRules:${SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES:\u0026#34;istio-controlplane\u0026#34;}The receiver adds label with key node_identifier_host_name to the collected data samples, and its value is from net.host.name (or host.name for some OTLP versions) resource attributes defined in OpenTelemetry proto, for identification of the metric data.\n   Description Configuration File Data Source     Metrics of Istio Control Plane otel-rules/istio-controlplane.yaml Istio Control Plane -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of SkyWalking OAP server itself otel-rules/oap.yaml SkyWalking OAP Server(SelfObservability) -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Linux OS otel-rules/vm.yaml prometheus/node_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Windows OS otel-rules/windows.yaml prometheus-community/windows_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of K8s cluster otel-rules/k8s/k8s-cluster.yaml K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of K8s cluster otel-rules/k8s/k8s-node.yaml cAdvisor \u0026amp; K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of K8s cluster otel-rules/k8s/k8s-service.yaml cAdvisor \u0026amp; K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of MYSQL otel-rules/mysql/mysql-instance.yaml prometheus/mysqld_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of MYSQL otel-rules/mysql/mysql-service.yaml prometheus/mysqld_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of PostgreSQL otel-rules/postgresql/postgresql-instance.yaml prometheus-community/postgres_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of PostgreSQL otel-rules/postgresql/postgresql-service.yaml prometheus-community/postgres_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Apache APISIX otel-rules/apisix.yaml apisix prometheus plugin -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of AWS Cloud EKS otel-rules/aws-eks/eks-cluster.yaml AWS Container Insights Receiver -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of AWS Cloud EKS otel-rules/aws-eks/eks-service.yaml AWS Container Insights Receiver -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of AWS Cloud EKS otel-rules/aws-eks/eks-node.yaml AWS Container Insights Receiver -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Elasticsearch otel-rules/elasticsearch/elasticsearch-cluster.yaml prometheus-community/elasticsearch_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Elasticsearch otel-rules/elasticsearch/elasticsearch-index.yaml prometheus-community/elasticsearch_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Elasticsearch otel-rules/elasticsearch/elasticsearch-node.yaml prometheus-community/elasticsearch_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Redis otel-rules/redis/redis-service.yaml oliver006/redis_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Redis otel-rules/redis/redis-instance.yaml oliver006/redis_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of RabbitMQ otel-rules/rabbitmq/rabbitmq-cluster.yaml rabbitmq-prometheus -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of RabbitMQ otel-rules/rabbitmq/rabbitmq-node.yaml rabbitmq-prometheus -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of MongoDB otel-rules/mongodb/mongodb-cluster.yaml percona/mongodb_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of MongoDB otel-rules/mongodb/mongodb-node.yaml percona/mongodb_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Kafka otel-rules/kafka/kafka-clusteryaml prometheus/jmx_exporter/jmx_prometheus_javaagent -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Kafka otel-rules/kafka/kafka-broker.yaml prometheus/jmx_exporter/jmx_prometheus_javaagent -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Note: You can also use OpenTelemetry exporter to transport the metrics to SkyWalking OAP directly. See OpenTelemetry Exporter.      ","title":"OpenTelemetry Metrics Format","url":"/docs/main/latest/en/setup/backend/opentelemetry-receiver/"},{"content":"OpenTelemetry Metrics Format The OpenTelemetry receiver supports ingesting agent metrics by meter-system. The OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP may fail to start up. The files are located at $CLASSPATH/otel-rules.\nSupported handlers:\n otlp: OpenTelemetry gRPC service handler.  Notice: Set SW_OTEL_RECEIVER=default through system environment or change receiver-otel/selector=${SW_OTEL_RECEIVER:default} to activate the OpenTelemetry receiver.\nThe rule file should be in YAML format, defined by the scheme described in MAL. Note: receiver-otel only supports the group, defaultMetricLevel, and metricsRules nodes of the scheme due to its push mode.\nTo activate the otlp handler and relevant rules of istio:\nreceiver-otel:selector:${SW_OTEL_RECEIVER:default}default:enabledHandlers:${SW_OTEL_RECEIVER_ENABLED_HANDLERS:\u0026#34;otlp-metrics\u0026#34;}enabledOtelMetricsRules:${SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES:\u0026#34;istio-controlplane\u0026#34;}The receiver adds label with key node_identifier_host_name to the collected data samples, and its value is from net.host.name (or host.name for some OTLP versions) resource attributes defined in OpenTelemetry proto, for identification of the metric data.\nNotice: In the resource scope, dots (.) in the attributes' key names are converted to underscores (_), whereas in the metrics scope, they are not converted.\n   Description Configuration File Data Source     Metrics of Istio Control Plane otel-rules/istio-controlplane.yaml Istio Control Plane -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of SkyWalking OAP server itself otel-rules/oap.yaml SkyWalking OAP Server(SelfObservability) -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Linux OS otel-rules/vm.yaml prometheus/node_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Windows OS otel-rules/windows.yaml prometheus-community/windows_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of K8s cluster otel-rules/k8s/k8s-cluster.yaml K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of K8s cluster otel-rules/k8s/k8s-node.yaml cAdvisor \u0026amp; K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of K8s cluster otel-rules/k8s/k8s-service.yaml cAdvisor \u0026amp; K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of MYSQL otel-rules/mysql/mysql-instance.yaml prometheus/mysqld_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of MYSQL otel-rules/mysql/mysql-service.yaml prometheus/mysqld_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of PostgreSQL otel-rules/postgresql/postgresql-instance.yaml prometheus-community/postgres_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of PostgreSQL otel-rules/postgresql/postgresql-service.yaml prometheus-community/postgres_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Apache APISIX otel-rules/apisix.yaml apisix prometheus plugin -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of AWS Cloud EKS otel-rules/aws-eks/eks-cluster.yaml AWS Container Insights Receiver -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of AWS Cloud EKS otel-rules/aws-eks/eks-service.yaml AWS Container Insights Receiver -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of AWS Cloud EKS otel-rules/aws-eks/eks-node.yaml AWS Container Insights Receiver -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Elasticsearch otel-rules/elasticsearch/elasticsearch-cluster.yaml prometheus-community/elasticsearch_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Elasticsearch otel-rules/elasticsearch/elasticsearch-index.yaml prometheus-community/elasticsearch_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Elasticsearch otel-rules/elasticsearch/elasticsearch-node.yaml prometheus-community/elasticsearch_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Redis otel-rules/redis/redis-service.yaml oliver006/redis_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Redis otel-rules/redis/redis-instance.yaml oliver006/redis_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of RabbitMQ otel-rules/rabbitmq/rabbitmq-cluster.yaml rabbitmq-prometheus -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of RabbitMQ otel-rules/rabbitmq/rabbitmq-node.yaml rabbitmq-prometheus -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of MongoDB otel-rules/mongodb/mongodb-cluster.yaml percona/mongodb_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of MongoDB otel-rules/mongodb/mongodb-node.yaml percona/mongodb_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Kafka otel-rules/kafka/kafka-cluster.yaml prometheus/jmx_exporter/jmx_prometheus_javaagent -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Kafka otel-rules/kafka/kafka-broker.yaml prometheus/jmx_exporter/jmx_prometheus_javaagent -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of ClickHouse otel-rules/clickhouse/clickhouse-instance.yaml ClickHouse(embedded prometheus endpoint) -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of ClickHouse otel-rules/clickhouse/clickhouse-service.yaml ClickHouse(embedded prometheus endpoint) -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of RocketMQ otel-rules/rocketmq/rocketmq-cluster.yaml rocketmq-exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of RocketMQ otel-rules/rocketmq/rocketmq-broker.yaml rocketmq-exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of RocketMQ otel-rules/rocketmq/rocketmq-topic.yaml rocketmq-exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server    ","title":"OpenTelemetry Metrics Format","url":"/docs/main/next/en/setup/backend/opentelemetry-receiver/"},{"content":"OpenTelemetry Metrics Format The OpenTelemetry receiver supports ingesting agent metrics by meter-system. The OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP may fail to start up. The files are located at $CLASSPATH/otel-rules.\nSupported handlers:\n otlp: OpenTelemetry gRPC service handler.  Notice: Set SW_OTEL_RECEIVER=default through system environment or change receiver-otel/selector=${SW_OTEL_RECEIVER:default} to activate the OpenTelemetry receiver.\nThe rule file should be in YAML format, defined by the scheme described in MAL. Note: receiver-otel only supports the group, defaultMetricLevel, and metricsRules nodes of the scheme due to its push mode.\nTo activate the otlp handler and relevant rules of istio:\nreceiver-otel:selector:${SW_OTEL_RECEIVER:default}default:enabledHandlers:${SW_OTEL_RECEIVER_ENABLED_HANDLERS:\u0026#34;otlp-metrics\u0026#34;}enabledOtelMetricsRules:${SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES:\u0026#34;istio-controlplane\u0026#34;}The receiver adds label with key node_identifier_host_name to the collected data samples, and its value is from net.host.name (or host.name for some OTLP versions) resource attributes defined in OpenTelemetry proto, for identification of the metric data.\n   Description Configuration File Data Source     Metrics of Istio Control Plane otel-rules/istio-controlplane.yaml Istio Control Plane -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of SkyWalking OAP server itself otel-rules/oap.yaml SkyWalking OAP Server(SelfObservability) -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of VMs otel-rules/vm.yaml Prometheus node-exporter(VMs) -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of K8s cluster otel-rules/k8s/k8s-cluster.yaml K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of K8s cluster otel-rules/k8s/k8s-node.yaml cAdvisor \u0026amp; K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of K8s cluster otel-rules/k8s/k8s-service.yaml cAdvisor \u0026amp; K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of MYSQL otel-rules/mysql/mysql-instance.yaml prometheus/mysqld_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of MYSQL otel-rules/mysql/mysql-service.yaml prometheus/mysqld_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of PostgreSQL otel-rules/postgresql/postgresql-instance.yaml postgres_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of PostgreSQL otel-rules/postgresql/postgresql-service.yaml postgres_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Apache APISIX otel-rules/apisix.yaml apisix prometheus plugin -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of AWS Cloud EKS otel-rules/aws-eks/eks-cluster.yaml AWS Container Insights Receiver -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of AWS Cloud EKS otel-rules/aws-eks/eks-service.yaml AWS Container Insights Receiver -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of AWS Cloud EKS otel-rules/aws-eks/eks-node.yaml AWS Container Insights Receiver -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server    Note: You can also use OpenTelemetry exporter to transport the metrics to SkyWalking OAP directly. See OpenTelemetry Exporter.\n","title":"OpenTelemetry Metrics Format","url":"/docs/main/v9.6.0/en/setup/backend/opentelemetry-receiver/"},{"content":"OpenTelemetry Metrics Format The OpenTelemetry receiver supports ingesting agent metrics by meter-system. The OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP may fail to start up. The files are located at $CLASSPATH/otel-rules.\nSupported handlers:\n otlp: OpenTelemetry gRPC service handler.  Notice: Set SW_OTEL_RECEIVER=default through system environment or change receiver-otel/selector=${SW_OTEL_RECEIVER:default} to activate the OpenTelemetry receiver.\nThe rule file should be in YAML format, defined by the scheme described in MAL. Note: receiver-otel only supports the group, defaultMetricLevel, and metricsRules nodes of the scheme due to its push mode.\nTo activate the otlp handler and relevant rules of istio:\nreceiver-otel:selector:${SW_OTEL_RECEIVER:default}default:enabledHandlers:${SW_OTEL_RECEIVER_ENABLED_HANDLERS:\u0026#34;otlp-metrics\u0026#34;}enabledOtelMetricsRules:${SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES:\u0026#34;istio-controlplane\u0026#34;}The receiver adds label with key node_identifier_host_name to the collected data samples, and its value is from net.host.name (or host.name for some OTLP versions) resource attributes defined in OpenTelemetry proto, for identification of the metric data.\n   Description Configuration File Data Source     Metrics of Istio Control Plane otel-rules/istio-controlplane.yaml Istio Control Plane -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of SkyWalking OAP server itself otel-rules/oap.yaml SkyWalking OAP Server(SelfObservability) -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Linux OS otel-rules/vm.yaml prometheus/node_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Windows OS otel-rules/windows.yaml prometheus-community/windows_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of K8s cluster otel-rules/k8s/k8s-cluster.yaml K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of K8s cluster otel-rules/k8s/k8s-node.yaml cAdvisor \u0026amp; K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of K8s cluster otel-rules/k8s/k8s-service.yaml cAdvisor \u0026amp; K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of MYSQL otel-rules/mysql/mysql-instance.yaml prometheus/mysqld_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of MYSQL otel-rules/mysql/mysql-service.yaml prometheus/mysqld_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of PostgreSQL otel-rules/postgresql/postgresql-instance.yaml prometheus-community/postgres_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of PostgreSQL otel-rules/postgresql/postgresql-service.yaml prometheus-community/postgres_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Apache APISIX otel-rules/apisix.yaml apisix prometheus plugin -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of AWS Cloud EKS otel-rules/aws-eks/eks-cluster.yaml AWS Container Insights Receiver -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of AWS Cloud EKS otel-rules/aws-eks/eks-service.yaml AWS Container Insights Receiver -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of AWS Cloud EKS otel-rules/aws-eks/eks-node.yaml AWS Container Insights Receiver -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Elasticsearch otel-rules/elasticsearch/elasticsearch-cluster.yaml prometheus-community/elasticsearch_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Elasticsearch otel-rules/elasticsearch/elasticsearch-index.yaml prometheus-community/elasticsearch_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Elasticsearch otel-rules/elasticsearch/elasticsearch-node.yaml prometheus-community/elasticsearch_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Redis otel-rules/redis/redis-service.yaml oliver006/redis_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Redis otel-rules/redis/redis-instance.yaml oliver006/redis_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of RabbitMQ otel-rules/rabbitmq/rabbitmq-cluster.yaml rabbitmq-prometheus -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of RabbitMQ otel-rules/rabbitmq/rabbitmq-node.yaml rabbitmq-prometheus -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of MongoDB otel-rules/mongodb/mongodb-cluster.yaml percona/mongodb_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of MongoDB otel-rules/mongodb/mongodb-node.yaml percona/mongodb_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Kafka otel-rules/kafka/kafka-clusteryaml prometheus/jmx_exporter/jmx_prometheus_javaagent -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Kafka otel-rules/kafka/kafka-broker.yaml prometheus/jmx_exporter/jmx_prometheus_javaagent -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Note: You can also use OpenTelemetry exporter to transport the metrics to SkyWalking OAP directly. See OpenTelemetry Exporter.      ","title":"OpenTelemetry Metrics Format","url":"/docs/main/v9.7.0/en/setup/backend/opentelemetry-receiver/"},{"content":"OpenTelemetry receiver The OpenTelemetry receiver supports ingesting agent metrics by meter-system. The OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP may fail to start up. The files are located at $CLASSPATH/otel-\u0026lt;handler\u0026gt;-rules. E.g. The oc handler loads rules from $CLASSPATH/otel-oc-rules.\nSupported handlers:\n oc: OpenCensus gRPC service handler.  Notice: Set SW_OTEL_RECEIVER=default through system environment or change receiver-otel/selector=${SW_OTEL_RECEIVER:default} to activate the OpenTelemetry receiver.\nThe rule file should be in YAML format, defined by the scheme described in prometheus-fetcher. Note: receiver-otel only supports the group, defaultMetricLevel, and metricsRules nodes of the scheme due to its push mode.\nTo activate the oc handler and relevant rules of istio:\nreceiver-otel:// Change selector value to default, for activating the otel receiver.selector:${SW_OTEL_RECEIVER:default}default:enabledHandlers:${SW_OTEL_RECEIVER_ENABLED_HANDLERS:\u0026#34;oc\u0026#34;}enabledOcRules:${SW_OTEL_RECEIVER_ENABLED_OC_RULES:\u0026#34;istio-controlplane\u0026#34;}The receiver adds labels with key = node_identifier_host_name and key = node_identifier_pid to the collected data samples, and values from Node.identifier.host_name and Node.identifier.pid defined in OpenCensus Agent Proto, for identification of the metric data.\n   Rule Name Description Configuration File Data Source     istio-controlplane Metrics of Istio Control Plane otel-oc-rules/istio-controlplane.yaml Istio Control Plane -\u0026gt; OpenTelemetry Collector \u0026ndash;OC format\u0026ndash;\u0026gt; SkyWalking OAP Server   oap Metrics of SkyWalking OAP server itself otel-oc-rules/oap.yaml SkyWalking OAP Server(SelfObservability) -\u0026gt; OpenTelemetry Collector \u0026ndash;OC format\u0026ndash;\u0026gt; SkyWalking OAP Server   vm Metrics of VMs otel-oc-rules/vm.yaml Prometheus node-exporter(VMs) -\u0026gt; OpenTelemetry Collector \u0026ndash;OC format\u0026ndash;\u0026gt; SkyWalking OAP Server   k8s-cluster Metrics of K8s cluster otel-oc-rules/k8s-cluster.yaml K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash;OC format\u0026ndash;\u0026gt; SkyWalking OAP Server   k8s-node Metrics of K8s cluster otel-oc-rules/k8s-node.yaml cAdvisor \u0026amp; K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash;OC format\u0026ndash;\u0026gt; SkyWalking OAP Server   k8s-service Metrics of K8s cluster otel-oc-rules/k8s-service.yaml cAdvisor \u0026amp; K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash;OC format\u0026ndash;\u0026gt; SkyWalking OAP Server    Note: You can also use OpenTelemetry exporter to directly transport the metrics to SkyWalking OAP. See OpenTelemetry Exporter.\n","title":"OpenTelemetry receiver","url":"/docs/main/v9.0.0/en/setup/backend/opentelemetry-receiver/"},{"content":"OpenTelemetry receiver The OpenTelemetry receiver supports ingesting agent metrics by meter-system. The OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP may fail to start up. The files are located at $CLASSPATH/otel-\u0026lt;handler\u0026gt;-rules. E.g. The oc handler loads rules from $CLASSPATH/otel-oc-rules.\nSupported handlers:\n oc: OpenCensus gRPC service handler.  Notice: Set SW_OTEL_RECEIVER=default through system environment or change receiver-otel/selector=${SW_OTEL_RECEIVER:default} to activate the OpenTelemetry receiver.\nThe rule file should be in YAML format, defined by the scheme described in prometheus-fetcher. Note: receiver-otel only supports the group, defaultMetricLevel, and metricsRules nodes of the scheme due to its push mode.\nTo activate the oc handler and relevant rules of istio:\nreceiver-otel:// Change selector value to default, for activating the otel receiver.selector:${SW_OTEL_RECEIVER:default}default:enabledHandlers:${SW_OTEL_RECEIVER_ENABLED_HANDLERS:\u0026#34;oc\u0026#34;}enabledOcRules:${SW_OTEL_RECEIVER_ENABLED_OC_RULES:\u0026#34;istio-controlplane\u0026#34;}The receiver adds labels with key = node_identifier_host_name and key = node_identifier_pid to the collected data samples, and values from Node.identifier.host_name and Node.identifier.pid defined in OpenCensus Agent Proto, for identification of the metric data.\n   Rule Name Description Configuration File Data Source     istio-controlplane Metrics of Istio Control Plane otel-oc-rules/istio-controlplane.yaml Istio Control Plane -\u0026gt; OpenTelemetry Collector \u0026ndash;OC format\u0026ndash;\u0026gt; SkyWalking OAP Server   oap Metrics of SkyWalking OAP server itself otel-oc-rules/oap.yaml SkyWalking OAP Server(SelfObservability) -\u0026gt; OpenTelemetry Collector \u0026ndash;OC format\u0026ndash;\u0026gt; SkyWalking OAP Server   vm Metrics of VMs otel-oc-rules/vm.yaml Prometheus node-exporter(VMs) -\u0026gt; OpenTelemetry Collector \u0026ndash;OC format\u0026ndash;\u0026gt; SkyWalking OAP Server   k8s-cluster Metrics of K8s cluster otel-oc-rules/k8s-cluster.yaml K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash;OC format\u0026ndash;\u0026gt; SkyWalking OAP Server   k8s-node Metrics of K8s cluster otel-oc-rules/k8s-node.yaml cAdvisor \u0026amp; K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash;OC format\u0026ndash;\u0026gt; SkyWalking OAP Server   k8s-service Metrics of K8s cluster otel-oc-rules/k8s-service.yaml cAdvisor \u0026amp; K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash;OC format\u0026ndash;\u0026gt; SkyWalking OAP Server    Note: You can also use OpenTelemetry exporter to transport the metrics to SkyWalking OAP directly. See OpenTelemetry Exporter.\n","title":"OpenTelemetry receiver","url":"/docs/main/v9.1.0/en/setup/backend/opentelemetry-receiver/"},{"content":"OpenTelemetry receiver The OpenTelemetry receiver supports ingesting agent metrics by meter-system. The OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP may fail to start up. The files are located at $CLASSPATH/otel-rules.\nSupported handlers:\n oc: OpenCensus gRPC service handler. otlp: OpenTelemetry gRPC service handler.  Notice: Set SW_OTEL_RECEIVER=default through system environment or change receiver-otel/selector=${SW_OTEL_RECEIVER:default} to activate the OpenTelemetry receiver.\nThe rule file should be in YAML format, defined by the scheme described in prometheus-fetcher. Note: receiver-otel only supports the group, defaultMetricLevel, and metricsRules nodes of the scheme due to its push mode.\nTo activate the oc handler and relevant rules of istio:\nreceiver-otel:// Change selector value to default, for activating the otel receiver.selector:${SW_OTEL_RECEIVER:default}default:enabledHandlers:${SW_OTEL_RECEIVER_ENABLED_HANDLERS:\u0026#34;oc,otlp\u0026#34;}enabledOtelRules:${SW_OTEL_RECEIVER_ENABLED_OTEL_RULES:\u0026#34;istio-controlplane\u0026#34;}The receiver adds label with key node_identifier_host_name to the collected data samples, and its value is from Node.identifier.host_name defined in OpenCensus Agent Proto, or net.host.name (or host.name for some OTLP versions) resource attributes defined in OpenTelemetry proto, for identification of the metric data.\n   Rule Name Description Configuration File Data Source     istio-controlplane Metrics of Istio Control Plane otel-rules/istio-controlplane.yaml Istio Control Plane -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   oap Metrics of SkyWalking OAP server itself otel-rules/oap.yaml SkyWalking OAP Server(SelfObservability) -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   vm Metrics of VMs otel-rules/vm.yaml Prometheus node-exporter(VMs) -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   k8s-cluster Metrics of K8s cluster otel-rules/k8s-cluster.yaml K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   k8s-node Metrics of K8s cluster otel-rules/k8s-node.yaml cAdvisor \u0026amp; K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   k8s-service Metrics of K8s cluster otel-rules/k8s-service.yaml cAdvisor \u0026amp; K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   mysql Metrics of MYSQL otel-rules/mysql.yaml prometheus/mysqld_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   postgresql Metrics of PostgreSQL otel-rules/postgresql.yaml postgres_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server    Note: You can also use OpenTelemetry exporter to transport the metrics to SkyWalking OAP directly. See OpenTelemetry Exporter.\n","title":"OpenTelemetry receiver","url":"/docs/main/v9.2.0/en/setup/backend/opentelemetry-receiver/"},{"content":"OpenTelemetry receiver The OpenTelemetry receiver supports ingesting agent metrics by meter-system. The OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP may fail to start up. The files are located at $CLASSPATH/otel-rules.\nSupported handlers:\n oc: OpenCensus gRPC service handler. otlp: OpenTelemetry gRPC service handler.  Notice: Set SW_OTEL_RECEIVER=default through system environment or change receiver-otel/selector=${SW_OTEL_RECEIVER:default} to activate the OpenTelemetry receiver.\nThe rule file should be in YAML format, defined by the scheme described in MAL. Note: receiver-otel only supports the group, defaultMetricLevel, and metricsRules nodes of the scheme due to its push mode.\nTo activate the oc handler and relevant rules of istio:\nreceiver-otel:// Change selector value to default, for activating the otel receiver.selector:${SW_OTEL_RECEIVER:default}default:enabledHandlers:${SW_OTEL_RECEIVER_ENABLED_HANDLERS:\u0026#34;oc,otlp\u0026#34;}enabledOtelRules:${SW_OTEL_RECEIVER_ENABLED_OTEL_RULES:\u0026#34;istio-controlplane\u0026#34;}The receiver adds label with key node_identifier_host_name to the collected data samples, and its value is from Node.identifier.host_name defined in OpenCensus Agent Proto, or net.host.name (or host.name for some OTLP versions) resource attributes defined in OpenTelemetry proto, for identification of the metric data.\n   Description Configuration File Data Source     Metrics of Istio Control Plane otel-rules/istio-controlplane.yaml Istio Control Plane -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of SkyWalking OAP server itself otel-rules/oap.yaml SkyWalking OAP Server(SelfObservability) -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of VMs otel-rules/vm.yaml Prometheus node-exporter(VMs) -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of K8s cluster otel-rules/k8s-cluster.yaml K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of K8s cluster otel-rules/k8s-node.yaml cAdvisor \u0026amp; K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of K8s cluster otel-rules/k8s-service.yaml cAdvisor \u0026amp; K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of MYSQL otel-rules/mysql.yaml prometheus/mysqld_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of PostgreSQL otel-rules/postgresql.yaml postgres_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Apache APISIX otel-rules/apisix.yaml apisix prometheus plugin -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server    Note: You can also use OpenTelemetry exporter to transport the metrics to SkyWalking OAP directly. See OpenTelemetry Exporter.\n","title":"OpenTelemetry receiver","url":"/docs/main/v9.3.0/en/setup/backend/opentelemetry-receiver/"},{"content":"OpenTelemetry receiver The OpenTelemetry receiver supports ingesting agent metrics by meter-system. The OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP may fail to start up. The files are located at $CLASSPATH/otel-rules.\nSupported handlers:\n oc: OpenCensus gRPC service handler. otlp: OpenTelemetry gRPC service handler.  Notice: Set SW_OTEL_RECEIVER=default through system environment or change receiver-otel/selector=${SW_OTEL_RECEIVER:default} to activate the OpenTelemetry receiver.\nThe rule file should be in YAML format, defined by the scheme described in MAL. Note: receiver-otel only supports the group, defaultMetricLevel, and metricsRules nodes of the scheme due to its push mode.\nTo activate the oc handler and relevant rules of istio:\nreceiver-otel:// Change selector value to default, for activating the otel receiver.selector:${SW_OTEL_RECEIVER:default}default:enabledHandlers:${SW_OTEL_RECEIVER_ENABLED_HANDLERS:\u0026#34;oc,otlp\u0026#34;}enabledOtelRules:${SW_OTEL_RECEIVER_ENABLED_OTEL_RULES:\u0026#34;istio-controlplane\u0026#34;}The receiver adds label with key node_identifier_host_name to the collected data samples, and its value is from Node.identifier.host_name defined in OpenCensus Agent Proto, or net.host.name (or host.name for some OTLP versions) resource attributes defined in OpenTelemetry proto, for identification of the metric data.\n   Description Configuration File Data Source     Metrics of Istio Control Plane otel-rules/istio-controlplane.yaml Istio Control Plane -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of SkyWalking OAP server itself otel-rules/oap.yaml SkyWalking OAP Server(SelfObservability) -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of VMs otel-rules/vm.yaml Prometheus node-exporter(VMs) -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of K8s cluster otel-rules/k8s/k8s-cluster.yaml K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of K8s cluster otel-rules/k8s/k8s-node.yaml cAdvisor \u0026amp; K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of K8s cluster otel-rules/k8s/k8s-service.yaml cAdvisor \u0026amp; K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of MYSQL otel-rules/mysql/mysql-instance.yaml prometheus/mysqld_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of MYSQL otel-rules/mysql/mysql-service.yaml prometheus/mysqld_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of PostgreSQL otel-rules/postgresql/postgresql-instance.yaml postgres_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of PostgreSQL otel-rules/postgresql/postgresql-service.yaml postgres_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Apache APISIX otel-rules/apisix.yaml apisix prometheus plugin -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of AWS Cloud EKS otel-rules/aws-eks/eks-cluster.yaml AWS Container Insights Receiver -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of AWS Cloud EKS otel-rules/aws-eks/eks-service.yaml AWS Container Insights Receiver -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of AWS Cloud EKS otel-rules/aws-eks/eks-node.yaml AWS Container Insights Receiver -\u0026gt; OpenTelemetry Collector \u0026ndash; OC/OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server    Note: You can also use OpenTelemetry exporter to transport the metrics to SkyWalking OAP directly. See OpenTelemetry Exporter.\n","title":"OpenTelemetry receiver","url":"/docs/main/v9.4.0/en/setup/backend/opentelemetry-receiver/"},{"content":"OpenTelemetry receiver The OpenTelemetry receiver supports ingesting agent metrics by meter-system. The OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP may fail to start up. The files are located at $CLASSPATH/otel-rules.\nSupported handlers:\n otlp: OpenTelemetry gRPC service handler.  Notice: Set SW_OTEL_RECEIVER=default through system environment or change receiver-otel/selector=${SW_OTEL_RECEIVER:default} to activate the OpenTelemetry receiver.\nThe rule file should be in YAML format, defined by the scheme described in MAL. Note: receiver-otel only supports the group, defaultMetricLevel, and metricsRules nodes of the scheme due to its push mode.\nTo activate the otlp handler and relevant rules of istio:\nreceiver-otel:selector:${SW_OTEL_RECEIVER:default}default:enabledHandlers:${SW_OTEL_RECEIVER_ENABLED_HANDLERS:\u0026#34;otlp\u0026#34;}enabledOtelMetricsRules:${SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES:\u0026#34;istio-controlplane\u0026#34;}The receiver adds label with key node_identifier_host_name to the collected data samples, and its value is from net.host.name (or host.name for some OTLP versions) resource attributes defined in OpenTelemetry proto, for identification of the metric data.\n   Description Configuration File Data Source     Metrics of Istio Control Plane otel-rules/istio-controlplane.yaml Istio Control Plane -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of SkyWalking OAP server itself otel-rules/oap.yaml SkyWalking OAP Server(SelfObservability) -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of VMs otel-rules/vm.yaml Prometheus node-exporter(VMs) -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of K8s cluster otel-rules/k8s/k8s-cluster.yaml K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of K8s cluster otel-rules/k8s/k8s-node.yaml cAdvisor \u0026amp; K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of K8s cluster otel-rules/k8s/k8s-service.yaml cAdvisor \u0026amp; K8s kube-state-metrics -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of MYSQL otel-rules/mysql/mysql-instance.yaml prometheus/mysqld_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of MYSQL otel-rules/mysql/mysql-service.yaml prometheus/mysqld_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of PostgreSQL otel-rules/postgresql/postgresql-instance.yaml postgres_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of PostgreSQL otel-rules/postgresql/postgresql-service.yaml postgres_exporter -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of Apache APISIX otel-rules/apisix.yaml apisix prometheus plugin -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of AWS Cloud EKS otel-rules/aws-eks/eks-cluster.yaml AWS Container Insights Receiver -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of AWS Cloud EKS otel-rules/aws-eks/eks-service.yaml AWS Container Insights Receiver -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server   Metrics of AWS Cloud EKS otel-rules/aws-eks/eks-node.yaml AWS Container Insights Receiver -\u0026gt; OpenTelemetry Collector \u0026ndash; OTLP exporter \u0026ndash;\u0026gt; SkyWalking OAP Server    Note: You can also use OpenTelemetry exporter to transport the metrics to SkyWalking OAP directly. See OpenTelemetry Exporter.\n","title":"OpenTelemetry receiver","url":"/docs/main/v9.5.0/en/setup/backend/opentelemetry-receiver/"},{"content":"OpenTelemetry Trace Format SkyWalking can receive traces from Traces in OTLP format and convert them to Zipkin Trace format eventually. For data analysis and queries related to Zipkin Trace, please refer to the relevant documentation.\nOTLP Trace handler references the Zipkin Exporter in the OpenTelemetry Collector to convert the data format.\nSet up backend receiver  Make sure to enable otlp-traces handler in OTLP receiver of application.yml.  receiver-otel:selector:defaultdefault:enabledHandlers:otlp-tracesMake sure to enable zipkin receiver and zipkin query in application.yml for config the zipkin.  Setup Query and Lens UI Please read deploy Lens UI documentation for query OTLP traces.\n","title":"OpenTelemetry Trace Format","url":"/docs/main/latest/en/setup/backend/otlp-trace/"},{"content":"OpenTelemetry Trace Format SkyWalking can receive traces from Traces in OTLP format and convert them to Zipkin Trace format eventually. For data analysis and queries related to Zipkin Trace, please refer to the relevant documentation.\nOTLP Trace handler references the Zipkin Exporter in the OpenTelemetry Collector to convert the data format.\nSet up backend receiver  Make sure to enable otlp-traces handler in OTLP receiver of application.yml.  receiver-otel:selector:defaultdefault:enabledHandlers:otlp-tracesMake sure to enable zipkin receiver and zipkin query in application.yml for config the zipkin.  Setup Query and Lens UI Please read deploy Lens UI documentation for query OTLP traces.\n","title":"OpenTelemetry Trace Format","url":"/docs/main/next/en/setup/backend/otlp-trace/"},{"content":"OpenTelemetry Trace Format SkyWalking can receive traces from Traces in OTLP format and convert them to Zipkin Trace format eventually. For data analysis and queries related to Zipkin Trace, please refer to the relevant documentation.\nOTLP Trace handler references the Zipkin Exporter in the OpenTelemetry Collector to convert the data format.\nSet up backend receiver  Make sure to enable otlp-traces handler in OTLP receiver of application.yml.  receiver-otel:selector:defaultdefault:enabledHandlers:otlp-tracesMake sure to enable zipkin receiver and zipkin query in application.yml for config the zipkin.  Setup Query and Lens UI Please read deploy Lens UI documentation for query OTLP traces.\n","title":"OpenTelemetry Trace Format","url":"/docs/main/v9.6.0/en/setup/backend/otlp-trace/"},{"content":"OpenTelemetry Trace Format SkyWalking can receive traces from Traces in OTLP format and convert them to Zipkin Trace format eventually. For data analysis and queries related to Zipkin Trace, please refer to the relevant documentation.\nOTLP Trace handler references the Zipkin Exporter in the OpenTelemetry Collector to convert the data format.\nSet up backend receiver  Make sure to enable otlp-traces handler in OTLP receiver of application.yml.  receiver-otel:selector:defaultdefault:enabledHandlers:otlp-tracesMake sure to enable zipkin receiver and zipkin query in application.yml for config the zipkin.  Setup Query and Lens UI Please read deploy Lens UI documentation for query OTLP traces.\n","title":"OpenTelemetry Trace Format","url":"/docs/main/v9.7.0/en/setup/backend/otlp-trace/"},{"content":"Operator Usage Guide In this guide, you will learn:\n How to deploy the operator from a released package or scratch The core CRDs the operator supports  Operator Deployment You could provision the operator from a binary package or build from sources.\nBinary Package  Go to the download page to download the latest release binary, skywalking-swck-\u0026lt;SWCK_VERSION\u0026gt;-bin.tgz. Unarchive the package to a folder named skywalking-swck-\u0026lt;SWCK_VERSION\u0026gt;-bin To install the operator in an existing cluster, make sure you have cert-manager installed. Apply the manifests for the Controller and CRDs in config:  kubectl apply -f skywalking-swck-\u0026lt;SWCK_VERSION\u0026gt;-bin/config/operator-bundle.yaml Build from sources  Download released source package or clone the source code:  git clone git@github.com:apache/skywalking-swck.git  Build docker image from scratch. If you prefer to your private docker image, a quick path to override OPERATOR_IMG environment variable : export OPERATOR_IMG=\u0026lt;private registry\u0026gt;/controller:\u0026lt;tag\u0026gt;  export OPERATOR_IMG=controller make -C operator docker-build Then, push this image controller:latest to a repository where the operator\u0026rsquo;s pod could pull from. If you use a local KinD cluster:\nkind load docker-image controller   Customize resource configurations based the templates laid in operator/config. We use kustomize to build them, please refer to kustomize in case you don\u0026rsquo;t familiar with its syntax.\n  Install the CRDs to Kubernetes:\n  make -C operator install  Use make to generate the final manifests and deploy:  make -C operator deploy Test your deployment  Deploy a sample OAP server, this will create an OAP server in the default namespace:  curl https://raw.githubusercontent.com/apache/skywalking-swck/master/operator/config/samples/default.yaml | kubectl apply -f -  Check the OAP server in Kubernetes:  kubectl get oapserver  Check the UI server in Kubernetes:  kubectl get ui Troubleshooting If you encounter any issue, you can check the log of the controller by pulling it from Kubernetes:\n# get the pod name of your controller kubectl --namespace skywalking-swck-system get pods # pull the logs kubectl --namespace skywalking-swck-system logs -f [name_of_the_controller_pod] Custom Resource Define(CRD) The custom resources that the operator introduced are:\nJavaAgent The JavaAgent custom resource definition (CRD) declaratively defines a view to tracing the injection result.\nThe java-agent-injector creat JavaAgents once it injects agents into some workloads. Refer to Java Agent for more details.\nOAP The OAP custom resource definition (CRD) declaratively defines a desired OAP setup to run in a Kubernetes cluster. It provides options to configure environment variables and how to connect a Storage.\nUI The UI custom resource definition (CRD) declaratively defines a desired UI setup to run in a Kubernetes cluster. It provides options for how to connect an OAP.\nStorage The Storage custom resource definition (CRD) declaratively defines a desired storage setup to run in a Kubernetes cluster. The Storage could be managed instances onboarded by the operator or an external service. The OAP has options to select which Storage it would connect.\n Caveat: Stroage only supports the Elasticsearch.\n Satellite The Satellite custom resource definition (CRD) declaratively defines a desired Satellite setup to run in a Kubernetes cluster. It provides options for how to connect an OAP.\nFetcher The Fetcher custom resource definition (CRD) declaratively defines a desired Fetcher setup to run in a Kubernetes cluster. It provides options to configure OpenTelemetry collector, which fetches metrics to the deployed OAP.\nExamples of the Operator There are some instant examples to represent the functions or features of the Operator.\n Deploy OAP server and UI with default settings Fetch metrics from the Istio control plane(istiod) Inject the java agent to pods Deploy a storage Deploy a Satellite  ","title":"Operator Usage Guide","url":"/docs/skywalking-swck/latest/operator/"},{"content":"Operator Usage Guide In this guide, you will learn:\n How to deploy the operator from a released package or scratch The core CRDs the operator supports  Operator Deployment You could provision the operator from a binary package or build from sources.\nBinary Package  Go to the download page to download the latest release binary, skywalking-swck-\u0026lt;SWCK_VERSION\u0026gt;-bin.tgz. Unarchive the package to a folder named skywalking-swck-\u0026lt;SWCK_VERSION\u0026gt;-bin To install the operator in an existing cluster, make sure you have cert-manager installed. Apply the manifests for the Controller and CRDs in config:  kubectl apply -f skywalking-swck-\u0026lt;SWCK_VERSION\u0026gt;-bin/config/operator-bundle.yaml Build from sources  Download released source package or clone the source code:  git clone git@github.com:apache/skywalking-swck.git  Build docker image from scratch. If you prefer to your private docker image, a quick path to override OPERATOR_IMG environment variable : export OPERATOR_IMG=\u0026lt;private registry\u0026gt;/controller:\u0026lt;tag\u0026gt;  export OPERATOR_IMG=controller make -C operator docker-build Then, push this image controller:latest to a repository where the operator\u0026rsquo;s pod could pull from. If you use a local KinD cluster:\nkind load docker-image controller   Customize resource configurations based the templates laid in operator/config. We use kustomize to build them, please refer to kustomize in case you don\u0026rsquo;t familiar with its syntax.\n  Install the CRDs to Kubernetes:\n  make -C operator install  Use make to generate the final manifests and deploy:  make -C operator deploy Test your deployment  Deploy a sample OAP server, this will create an OAP server in the default namespace:  curl https://raw.githubusercontent.com/apache/skywalking-swck/master/operator/config/samples/default.yaml | kubectl apply -f -  Check the OAP server in Kubernetes:  kubectl get oapserver  Check the UI server in Kubernetes:  kubectl get ui Troubleshooting If you encounter any issue, you can check the log of the controller by pulling it from Kubernetes:\n# get the pod name of your controller kubectl --namespace skywalking-swck-system get pods # pull the logs kubectl --namespace skywalking-swck-system logs -f [name_of_the_controller_pod] Custom Resource Define(CRD) The custom resources that the operator introduced are:\nJavaAgent The JavaAgent custom resource definition (CRD) declaratively defines a view to tracing the injection result.\nThe java-agent-injector creat JavaAgents once it injects agents into some workloads. Refer to Java Agent for more details.\nOAP The OAP custom resource definition (CRD) declaratively defines a desired OAP setup to run in a Kubernetes cluster. It provides options to configure environment variables and how to connect a Storage.\nUI The UI custom resource definition (CRD) declaratively defines a desired UI setup to run in a Kubernetes cluster. It provides options for how to connect an OAP.\nStorage The Storage custom resource definition (CRD) declaratively defines a desired storage setup to run in a Kubernetes cluster. The Storage could be managed instances onboarded by the operator or an external service. The OAP has options to select which Storage it would connect.\n Caveat: Stroage only supports the Elasticsearch.\n Satellite The Satellite custom resource definition (CRD) declaratively defines a desired Satellite setup to run in a Kubernetes cluster. It provides options for how to connect an OAP.\nFetcher The Fetcher custom resource definition (CRD) declaratively defines a desired Fetcher setup to run in a Kubernetes cluster. It provides options to configure OpenTelemetry collector, which fetches metrics to the deployed OAP.\nExamples of the Operator There are some instant examples to represent the functions or features of the Operator.\n Deploy OAP server and UI with default settings Fetch metrics from the Istio control plane(istiod) Inject the java agent to pods Deploy a storage Deploy a Satellite  ","title":"Operator Usage Guide","url":"/docs/skywalking-swck/next/operator/"},{"content":"Operator Usage Guide In this guide, you will learn:\n How to deploy the operator from a released package or scratch The core CRDs the operator supports  Operator Deployment You could provision the operator from a binary package or build from sources.\nBinary Package  Go to the download page to download the latest release binary, skywalking-swck-\u0026lt;SWCK_VERSION\u0026gt;-bin.tgz. Unarchive the package to a folder named skywalking-swck-\u0026lt;SWCK_VERSION\u0026gt;-bin To install the operator in an existing cluster, make sure you have cert-manager installed. Apply the manifests for the Controller and CRDs in config:  kubectl apply -f skywalking-swck-\u0026lt;SWCK_VERSION\u0026gt;-bin/config/operator-bundle.yaml Build from sources  Download released source package or clone the source code:  git clone git@github.com:apache/skywalking-swck.git  Build docker image from scratch. If you prefer to your private docker image, a quick path to override OPERATOR_IMG environment variable : export OPERATOR_IMG=\u0026lt;private registry\u0026gt;/controller:\u0026lt;tag\u0026gt;  export OPERATOR_IMG=controller make -C operator docker-build Then, push this image controller:latest to a repository where the operator\u0026rsquo;s pod could pull from. If you use a local KinD cluster:\nkind load docker-image controller   Customize resource configurations based the templates laid in operator/config. We use kustomize to build them, please refer to kustomize in case you don\u0026rsquo;t familiar with its syntax.\n  Install the CRDs to Kubernetes:\n  make -C operator install  Use make to generate the final manifests and deploy:  make -C operator deploy Test your deployment  Deploy a sample OAP server, this will create an OAP server in the default namespace:  curl https://raw.githubusercontent.com/apache/skywalking-swck/master/operator/config/samples/default.yaml | kubectl apply -f -  Check the OAP server in Kubernetes:  kubectl get oapserver  Check the UI server in Kubernetes:  kubectl get ui Troubleshooting If you encounter any issue, you can check the log of the controller by pulling it from Kubernetes:\n# get the pod name of your controller kubectl --namespace skywalking-swck-system get pods # pull the logs kubectl --namespace skywalking-swck-system logs -f [name_of_the_controller_pod] Custom Resource Define(CRD) The custom resources that the operator introduced are:\nJavaAgent The JavaAgent custom resource definition (CRD) declaratively defines a view to tracing the injection result.\nThe java-agent-injector creat JavaAgents once it injects agents into some workloads. Refer to Java Agent for more details.\nOAP The OAP custom resource definition (CRD) declaratively defines a desired OAP setup to run in a Kubernetes cluster. It provides options to configure environment variables and how to connect a Storage.\nUI The UI custom resource definition (CRD) declaratively defines a desired UI setup to run in a Kubernetes cluster. It provides options for how to connect an OAP.\nStorage The Storage custom resource definition (CRD) declaratively defines a desired storage setup to run in a Kubernetes cluster. The Storage could be managed instances onboarded by the operator or an external service. The OAP has options to select which Storage it would connect.\n Caveat: Stroage only supports the Elasticsearch.\n Satellite The Satellite custom resource definition (CRD) declaratively defines a desired Satellite setup to run in a Kubernetes cluster. It provides options for how to connect an OAP.\nFetcher The Fetcher custom resource definition (CRD) declaratively defines a desired Fetcher setup to run in a Kubernetes cluster. It provides options to configure OpenTelemetry collector, which fetches metrics to the deployed OAP.\nExamples of the Operator There are some instant examples to represent the functions or features of the Operator.\n Deploy OAP server and UI with default settings Fetch metrics from the Istio control plane(istiod) Inject the java agent to pods Deploy a storage Deploy a Satellite  ","title":"Operator Usage Guide","url":"/docs/skywalking-swck/v0.9.0/operator/"},{"content":"Optional Plugins Java agent plugins are all pluggable. Optional plugins could be provided in optional-plugins and expired-plugins folder under agent or 3rd party repositories. For using these plugins, you need to put the target plugin jar file into /plugins.\nNow, we have the following known 2 kinds of optional plugins.\nOptional Level 2 Plugins These plugins affect the performance or must be used under some conditions, from experiences. So only released in /optional-plugins or /bootstrap-plugins, copy to /plugins in order to make them work.\n Plugin of tracing Spring annotation beans Plugin of tracing Oracle and Resin Filter traces through specified endpoint name patterns Plugin of Gson serialization lib in optional plugin folder. Plugin of Zookeeper 3.4.x in optional plugin folder. The reason of being optional plugin is, many business irrelevant traces are generated, which cause extra payload to agents and backends. At the same time, those traces may be just heartbeat(s). Customize enhance Trace methods based on description files, rather than write plugin or change source codes. Plugin of Spring Cloud Gateway 2.x and 3.x and 4.x in optional plugin folder. Please only activate this plugin when you install agent in Spring Gateway. Plugin of Spring Transaction in optional plugin folder. The reason of being optional plugin is, many local span are generated, which also spend more CPU, memory and network. Plugin of Kotlin coroutine provides the tracing across coroutines automatically. As it will add local spans to all across routines scenarios, Please assess the performance impact. Plugin of quartz-scheduler-2.x in the optional plugin folder. The reason for being an optional plugin is, many task scheduling systems are based on quartz-scheduler, this will cause duplicate tracing and link different sub-tasks as they share the same quartz level trigger, such as ElasticJob. Plugin of spring-webflux-5.x in the optional plugin folder. Please only activate this plugin when you use webflux alone as a web container. If you are using SpringMVC 5 or Spring Gateway, you don\u0026rsquo;t need this plugin. Plugin of mybatis-3.x in optional plugin folder. The reason of being optional plugin is, many local span are generated, which also spend more CPU, memory and network. Plugin of sentinel-1.x in the optional plugin folder. The reason for being an optional plugin is, the sentinel plugin generates a large number of local spans, which have a potential performance impact. Plugin of ehcache-2.x in the optional plugin folder. The reason for being an optional plugin is, this plugin enhanced cache framework, generates large number of local spans, which have a potential performance impact. Plugin of guava-cache in the optional plugin folder. The reason for being an optional plugin is, this plugin enhanced cache framework, generates large number of local spans, which have a potential performance impact. Plugin of fastjson serialization lib in optional plugin folder. Plugin of jackson serialization lib in optional plugin folder. Plugin of Apache ShenYu(incubating) Gateway 2.4.x in optional plugin folder. Please only activate this plugin when you install agent in Apache ShenYu Gateway. Plugin of trace sampler CPU policy in the optional plugin folder. Please only activate this plugin when you need to disable trace collecting when the agent process CPU usage is too high(over threshold). Plugin for Spring 6.x and RestTemplate 6.x are in the optional plugin folder. Spring 6 requires Java 17 but SkyWalking is still compatible with Java 8. So, we put it in the optional plugin folder. Plugin of nacos-client 2.x lib in optional plugin folder. The reason is many business irrelevant traces are generated, which cause extra payload to agents and backends, also spend more CPU, memory and network. Plugin of netty-http 4.1.x lib in optional plugin folder. The reason is some frameworks use Netty HTTP as kernel, which could double the unnecessary spans and create incorrect RPC relative metrics.  Optional Level 3 Plugins. Expired Plugins These plugins are not tested in the CI/CD pipeline, as the previous added tests are not able to run according to the latest CI/CD infrastructure limitations, lack of maintenance, or dependencies/images not available(e.g. removed from DockerHub).\nWarning, there is no guarantee of working and maintenance. The committer team may remove them from the agent package in the future without further notice.\n Plugin of Spring Impala 2.6.x was tested through parrot-stream released images. The images are not available since Mar. 2024. This plugin is expired due to lack of testing.  ","title":"Optional Plugins","url":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/optional-plugins/"},{"content":"Optional Plugins Java agent plugins are all pluggable. Optional plugins could be provided in optional-plugins and expired-plugins folder under agent or 3rd party repositories. For using these plugins, you need to put the target plugin jar file into /plugins.\nNow, we have the following known 2 kinds of optional plugins.\nOptional Level 2 Plugins These plugins affect the performance or must be used under some conditions, from experiences. So only released in /optional-plugins or /bootstrap-plugins, copy to /plugins in order to make them work.\n Plugin of tracing Spring annotation beans Plugin of tracing Oracle and Resin Filter traces through specified endpoint name patterns Plugin of Gson serialization lib in optional plugin folder. Plugin of Zookeeper 3.4.x in optional plugin folder. The reason of being optional plugin is, many business irrelevant traces are generated, which cause extra payload to agents and backends. At the same time, those traces may be just heartbeat(s). Customize enhance Trace methods based on description files, rather than write plugin or change source codes. Plugin of Spring Cloud Gateway 2.x and 3.x and 4.x in optional plugin folder. Please only activate this plugin when you install agent in Spring Gateway. Plugin of Spring Transaction in optional plugin folder. The reason of being optional plugin is, many local span are generated, which also spend more CPU, memory and network. Plugin of Kotlin coroutine provides the tracing across coroutines automatically. As it will add local spans to all across routines scenarios, Please assess the performance impact. Plugin of quartz-scheduler-2.x in the optional plugin folder. The reason for being an optional plugin is, many task scheduling systems are based on quartz-scheduler, this will cause duplicate tracing and link different sub-tasks as they share the same quartz level trigger, such as ElasticJob. Plugin of spring-webflux-5.x in the optional plugin folder. Please only activate this plugin when you use webflux alone as a web container. If you are using SpringMVC 5 or Spring Gateway, you don\u0026rsquo;t need this plugin. Plugin of mybatis-3.x in optional plugin folder. The reason of being optional plugin is, many local span are generated, which also spend more CPU, memory and network. Plugin of sentinel-1.x in the optional plugin folder. The reason for being an optional plugin is, the sentinel plugin generates a large number of local spans, which have a potential performance impact. Plugin of ehcache-2.x in the optional plugin folder. The reason for being an optional plugin is, this plugin enhanced cache framework, generates large number of local spans, which have a potential performance impact. Plugin of guava-cache in the optional plugin folder. The reason for being an optional plugin is, this plugin enhanced cache framework, generates large number of local spans, which have a potential performance impact. Plugin of fastjson serialization lib in optional plugin folder. Plugin of jackson serialization lib in optional plugin folder. Plugin of Apache ShenYu(incubating) Gateway 2.4.x in optional plugin folder. Please only activate this plugin when you install agent in Apache ShenYu Gateway. Plugin of trace sampler CPU policy in the optional plugin folder. Please only activate this plugin when you need to disable trace collecting when the agent process CPU usage is too high(over threshold). Plugin for Spring 6.x and RestTemplate 6.x are in the optional plugin folder. Spring 6 requires Java 17 but SkyWalking is still compatible with Java 8. So, we put it in the optional plugin folder. Plugin of nacos-client 2.x lib in optional plugin folder. The reason is many business irrelevant traces are generated, which cause extra payload to agents and backends, also spend more CPU, memory and network. Plugin of netty-http 4.1.x lib in optional plugin folder. The reason is some frameworks use Netty HTTP as kernel, which could double the unnecessary spans and create incorrect RPC relative metrics.  Optional Level 3 Plugins. Expired Plugins These plugins are not tested in the CI/CD pipeline, as the previous added tests are not able to run according to the latest CI/CD infrastructure limitations, lack of maintenance, or dependencies/images not available(e.g. removed from DockerHub).\nWarning, there is no guarantee of working and maintenance. The committer team may remove them from the agent package in the future without further notice.\n Plugin of Spring Impala 2.6.x was tested through parrot-stream released images. The images are not available since Mar. 2024. This plugin is expired due to lack of testing.  ","title":"Optional Plugins","url":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/optional-plugins/"},{"content":"Optional Plugins Java agent plugins are all pluggable. Optional plugins could be provided in optional-plugins folder under agent or 3rd party repositories. For using these plugins, you need to put the target plugin jar file into /plugins.\nNow, we have the following known optional plugins.\n Plugin of tracing Spring annotation beans Plugin of tracing Oracle and Resin Filter traces through specified endpoint name patterns Plugin of Gson serialization lib in optional plugin folder. Plugin of Zookeeper 3.4.x in optional plugin folder. The reason of being optional plugin is, many business irrelevant traces are generated, which cause extra payload to agents and backends. At the same time, those traces may be just heartbeat(s). Customize enhance Trace methods based on description files, rather than write plugin or change source codes. Plugin of Spring Cloud Gateway 2.x and 3.x in optional plugin folder. Please only activate this plugin when you install agent in Spring Gateway. Plugin of Spring Transaction in optional plugin folder. The reason of being optional plugin is, many local span are generated, which also spend more CPU, memory and network. Plugin of Kotlin coroutine provides the tracing across coroutines automatically. As it will add local spans to all across routines scenarios, Please assess the performance impact. Plugin of quartz-scheduler-2.x in the optional plugin folder. The reason for being an optional plugin is, many task scheduling systems are based on quartz-scheduler, this will cause duplicate tracing and link different sub-tasks as they share the same quartz level trigger, such as ElasticJob. Plugin of spring-webflux-5.x in the optional plugin folder. Please only activate this plugin when you use webflux alone as a web container. If you are using SpringMVC 5 or Spring Gateway, you don\u0026rsquo;t need this plugin. Plugin of mybatis-3.x in optional plugin folder. The reason of being optional plugin is, many local span are generated, which also spend more CPU, memory and network. Plugin of sentinel-1.x in the optional plugin folder. The reason for being an optional plugin is, the sentinel plugin generates a large number of local spans, which have a potential performance impact. Plugin of ehcache-2.x in the optional plugin folder. The reason for being an optional plugin is, this plugin enhanced cache framework, generates large number of local spans, which have a potential performance impact. Plugin of guava-cache in the optional plugin folder. The reason for being an optional plugin is, this plugin enhanced cache framework, generates large number of local spans, which have a potential performance impact. Plugin of fastjson serialization lib in optional plugin folder. Plugin of jackson serialization lib in optional plugin folder. Plugin of Apache ShenYu(incubating) Gateway 2.4.x in optional plugin folder. Please only activate this plugin when you install agent in Apache ShenYu Gateway. Plugin of trace sampler CPU policy in the optional plugin folder. Please only activate this plugin when you need to disable trace collecting when the agent process CPU usage is too high(over threshold). Plugin for Spring 6.x and RestTemplate 6.x are in the optional plugin folder. Spring 6 requires Java 17 but SkyWalking is still compatible with Java 8. So, we put it in the optional plugin folder. Plugin of nacos-client 2.x lib in optional plugin folder.The reason is many business irrelevant traces are generated, which cause extra payload to agents and backends, also spend more CPU, memory and network.  ","title":"Optional Plugins","url":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/optional-plugins/"},{"content":"Optional Plugins Java agent plugins are all pluggable. Optional plugins could be provided in optional-plugins folder under agent or 3rd party repositories. For using these plugins, you need to put the target plugin jar file into /plugins.\nNow, we have the following known optional plugins.\n Plugin of tracing Spring annotation beans Plugin of tracing Oracle and Resin Filter traces through specified endpoint name patterns Plugin of Gson serialization lib in optional plugin folder. Plugin of Zookeeper 3.4.x in optional plugin folder. The reason of being optional plugin is, many business irrelevant traces are generated, which cause extra payload to agents and backends. At the same time, those traces may be just heartbeat(s). Customize enhance Trace methods based on description files, rather than write plugin or change source codes. Plugin of Spring Cloud Gateway 2.x and 3.x in optional plugin folder. Please only activate this plugin when you install agent in Spring Gateway. Plugin of Spring Transaction in optional plugin folder. The reason of being optional plugin is, many local span are generated, which also spend more CPU, memory and network. Plugin of Kotlin coroutine provides the tracing across coroutines automatically. As it will add local spans to all across routines scenarios, Please assess the performance impact. Plugin of quartz-scheduler-2.x in the optional plugin folder. The reason for being an optional plugin is, many task scheduling systems are based on quartz-scheduler, this will cause duplicate tracing and link different sub-tasks as they share the same quartz level trigger, such as ElasticJob. Plugin of spring-webflux-5.x in the optional plugin folder. Please only activate this plugin when you use webflux alone as a web container. If you are using SpringMVC 5 or Spring Gateway, you don\u0026rsquo;t need this plugin. Plugin of mybatis-3.x in optional plugin folder. The reason of being optional plugin is, many local span are generated, which also spend more CPU, memory and network. Plugin of sentinel-1.x in the optional plugin folder. The reason for being an optional plugin is, the sentinel plugin generates a large number of local spans, which have a potential performance impact. Plugin of ehcache-2.x in the optional plugin folder. The reason for being an optional plugin is, this plugin enhanced cache framework, generates large number of local spans, which have a potential performance impact. Plugin of guava-cache in the optional plugin folder. The reason for being an optional plugin is, this plugin enhanced cache framework, generates large number of local spans, which have a potential performance impact. Plugin of fastjson serialization lib in optional plugin folder. Plugin of jackson serialization lib in optional plugin folder. Plugin of Apache ShenYu(incubating) Gateway 2.4.x in optional plugin folder. Please only activate this plugin when you install agent in Apache ShenYu Gateway. Plugin of trace sampler CPU policy in the optional plugin folder. Please only activate this plugin when you need to disable trace collecting when the agent process CPU usage is too high(over threshold). Plugin for Spring 6.x and RestTemplate 6.x are in the optional plugin folder. Spring 6 requires Java 17 but SkyWalking is still compatible with Java 8. So, we put it in the optional plugin folder. Plugin of nacos-client 2.x lib in optional plugin folder. The reason is many business irrelevant traces are generated, which cause extra payload to agents and backends, also spend more CPU, memory and network. Plugin of netty-http 4.1.x lib in optional plugin folder. The reason is some frameworks use Netty HTTP as kernel, which could double the unnecessary spans and create incorrect RPC relative metrics.  ","title":"Optional Plugins","url":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/optional-plugins/"},{"content":"Optional Plugins Java agent plugins are all pluggable. Optional plugins could be provided in optional-plugins and expired-plugins folder under agent or 3rd party repositories. For using these plugins, you need to put the target plugin jar file into /plugins.\nNow, we have the following known 2 kinds of optional plugins.\nOptional Level 2 Plugins These plugins affect the performance or must be used under some conditions, from experiences. So only released in /optional-plugins or /bootstrap-plugins, copy to /plugins in order to make them work.\n Plugin of tracing Spring annotation beans Plugin of tracing Oracle and Resin Filter traces through specified endpoint name patterns Plugin of Gson serialization lib in optional plugin folder. Plugin of Zookeeper 3.4.x in optional plugin folder. The reason of being optional plugin is, many business irrelevant traces are generated, which cause extra payload to agents and backends. At the same time, those traces may be just heartbeat(s). Customize enhance Trace methods based on description files, rather than write plugin or change source codes. Plugin of Spring Cloud Gateway 2.x and 3.x and 4.x in optional plugin folder. Please only activate this plugin when you install agent in Spring Gateway. Plugin of Spring Transaction in optional plugin folder. The reason of being optional plugin is, many local span are generated, which also spend more CPU, memory and network. Plugin of Kotlin coroutine provides the tracing across coroutines automatically. As it will add local spans to all across routines scenarios, Please assess the performance impact. Plugin of quartz-scheduler-2.x in the optional plugin folder. The reason for being an optional plugin is, many task scheduling systems are based on quartz-scheduler, this will cause duplicate tracing and link different sub-tasks as they share the same quartz level trigger, such as ElasticJob. Plugin of spring-webflux-5.x in the optional plugin folder. Please only activate this plugin when you use webflux alone as a web container. If you are using SpringMVC 5 or Spring Gateway, you don\u0026rsquo;t need this plugin. Plugin of mybatis-3.x in optional plugin folder. The reason of being optional plugin is, many local span are generated, which also spend more CPU, memory and network. Plugin of sentinel-1.x in the optional plugin folder. The reason for being an optional plugin is, the sentinel plugin generates a large number of local spans, which have a potential performance impact. Plugin of ehcache-2.x in the optional plugin folder. The reason for being an optional plugin is, this plugin enhanced cache framework, generates large number of local spans, which have a potential performance impact. Plugin of guava-cache in the optional plugin folder. The reason for being an optional plugin is, this plugin enhanced cache framework, generates large number of local spans, which have a potential performance impact. Plugin of fastjson serialization lib in optional plugin folder. Plugin of jackson serialization lib in optional plugin folder. Plugin of Apache ShenYu(incubating) Gateway 2.4.x in optional plugin folder. Please only activate this plugin when you install agent in Apache ShenYu Gateway. Plugin of trace sampler CPU policy in the optional plugin folder. Please only activate this plugin when you need to disable trace collecting when the agent process CPU usage is too high(over threshold). Plugin for Spring 6.x and RestTemplate 6.x are in the optional plugin folder. Spring 6 requires Java 17 but SkyWalking is still compatible with Java 8. So, we put it in the optional plugin folder. Plugin of nacos-client 2.x lib in optional plugin folder. The reason is many business irrelevant traces are generated, which cause extra payload to agents and backends, also spend more CPU, memory and network. Plugin of netty-http 4.1.x lib in optional plugin folder. The reason is some frameworks use Netty HTTP as kernel, which could double the unnecessary spans and create incorrect RPC relative metrics.  Optional Level 3 Plugins. Expired Plugins These plugins are not tested in the CI/CD pipeline, as the previous added tests are not able to run according to the latest CI/CD infrastructure limitations, lack of maintenance, or dependencies/images not available(e.g. removed from DockerHub).\nWarning, there is no guarantee of working and maintenance. The committer team may remove them from the agent package in the future without further notice.\n Plugin of Spring Impala 2.6.x was tested through parrot-stream released images. The images are not available since Mar. 2024. This plugin is expired due to lack of testing.  ","title":"Optional Plugins","url":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/optional-plugins/"},{"content":"Oracle and Resin plugins These plugins can\u0026rsquo;t be provided in Apache release because of Oracle and Resin Licenses. If you want to know details, please read Apache license legal document\nDue to license incompatibilities/restrictions these plugins are hosted and released in 3rd part repository, go to OpenSkywalking java plugin extension repository to get these.\n","title":"Oracle and Resin plugins","url":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/agent-optional-plugins/oracle-resin-plugins/"},{"content":"Oracle and Resin plugins These plugins can\u0026rsquo;t be provided in Apache release because of Oracle and Resin Licenses. If you want to know details, please read Apache license legal document\nDue to license incompatibilities/restrictions these plugins are hosted and released in 3rd part repository, go to OpenSkywalking java plugin extension repository to get these.\n","title":"Oracle and Resin plugins","url":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/agent-optional-plugins/oracle-resin-plugins/"},{"content":"Oracle and Resin plugins These plugins can\u0026rsquo;t be provided in Apache release because of Oracle and Resin Licenses. If you want to know details, please read Apache license legal document\nDue to license incompatibilities/restrictions these plugins are hosted and released in 3rd part repository, go to OpenSkywalking java plugin extension repository to get these.\n","title":"Oracle and Resin plugins","url":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/agent-optional-plugins/oracle-resin-plugins/"},{"content":"Oracle and Resin plugins These plugins can\u0026rsquo;t be provided in Apache release because of Oracle and Resin Licenses. If you want to know details, please read Apache license legal document\nDue to license incompatibilities/restrictions these plugins are hosted and released in 3rd part repository, go to OpenSkywalking java plugin extension repository to get these.\n","title":"Oracle and Resin plugins","url":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/agent-optional-plugins/oracle-resin-plugins/"},{"content":"Oracle and Resin plugins These plugins can\u0026rsquo;t be provided in Apache release because of Oracle and Resin Licenses. If you want to know details, please read Apache license legal document\nDue to license incompatibilities/restrictions these plugins are hosted and released in 3rd part repository, go to OpenSkywalking java plugin extension repository to get these.\n","title":"Oracle and Resin plugins","url":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/agent-optional-plugins/oracle-resin-plugins/"},{"content":"Overview SkyWalking is an open source observability platform used to collect, analyze, aggregate and visualize data from services and cloud native infrastructures. SkyWalking provides an easy way to maintain a clear view of your distributed systems, even across Clouds. It is a modern APM, specially designed for cloud native, container based distributed systems.\nWhy use SkyWalking? SkyWalking provides solutions for observing and monitoring distributed systems, in many different scenarios. First of all, like traditional approaches, SkyWalking provides auto instrument agents for services, such as Java, C#, Node.js, Go, PHP and Nginx LUA. (with calls out for Python and C++ SDK contributions). In multi-language, continuously deployed environments, cloud native infrastructures grow more powerful but also more complex. SkyWalking\u0026rsquo;s service mesh receiver allows SkyWalking to receive telemetry data from service mesh frameworks such as Istio/Envoy and Linkerd, allowing users to understand the entire distributed system.\nSkyWalking provides observability capabilities for service(s), service instance(s), endpoint(s), process(s). The terms Service, Instance and Endpoint are used everywhere today, so it is worth defining their specific meanings in the context of SkyWalking:\n Service. Represents a set/group of workloads which provide the same behaviours for incoming requests. You can define the service name when you are using instrument agents or SDKs. SkyWalking can also use the name you define in platforms such as Istio. Service Instance. Each individual workload in the Service group is known as an instance. Like pods in Kubernetes, it doesn\u0026rsquo;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process. Endpoint. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature. Process. An operating system process. In some scenarios, a Service Instance is not a process, such as a pod Kubernetes could contain multiple processes.  SkyWalking allows users to understand the topology relationship between Services and Endpoints, to view the metrics of every Service/Service Instance/Endpoint and to set alarm rules.\nStarting from v9, SkyWalking introduces the new core concept Layer. A layer represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), Kubernetes(k8s layer). All detected instances belong to a layer to represent the running environment of this instance, the service would have one or multiple layer definitions according to its instances.\nIn addition, you can integrate\n Other distributed tracing using Zipkin. Other metrics systems, such as Prometheus, Sleuth(Micrometer), OpenTelemetry, Telegraf.  Architecture SkyWalking is logically split into four parts: Probes, Platform backend, Storage and UI.\n Probes collect telemetry data, including metrics, traces, logs and events in various formats(SkyWalking, Zipkin, OpenTelemetry, Prometheus, Zabbix, etc.) Platform backend supports data aggregation, analysis and streaming process covers traces, metrics, logs and events. Work as Aggregator Role, Receiver Role or both. Storage houses SkyWalking data through an open/plugable interface. You can choose an existing implementation, such as ElasticSearch, H2, MySQL, TiDB, BanyanDB, or implement your own. UI is a highly customizable web based interface allowing SkyWalking end users to visualize and manage SkyWalking data.  What is next?  Learn SkyWalking\u0026rsquo;s Project Goals FAQ, Why doesn\u0026rsquo;t SkyWalking involve MQ in the architecture in default?  ","title":"Overview","url":"/docs/main/latest/en/concepts-and-designs/overview/"},{"content":"Overview SkyWalking is an open source observability platform used to collect, analyze, aggregate and visualize data from services and cloud native infrastructures. SkyWalking provides an easy way to maintain a clear view of your distributed systems, even across Clouds. It is a modern APM, specially designed for cloud native, container based distributed systems.\nSkyWalking covers all the observability needs in Cloud Native world, including:\n Tracing. SkyWalking native data formats, and Zipkin traces of v1 and v2 formats are supported. Metrics. SkyWalking supports mature metrics formats, including native meter format, OTEL metrics format, and Telegraf format. SkyWalking integrates with Service Mesh platforms, typically Istio and Envoy, to build observability into the data plane or control plane. Also, SkyWalking native agents can run in the metrics mode, which greatly improves performances. Logging. Includes logs collected from disk or through network. Native agents could bind the tracing context with logs automatically, or use SkyWalking to bind the trace and log through the text content. Profiling. Profiling is a powerful tool to help developers understand the performance of their applications from lines of codes perspective. SkyWalking provides profiling feature bundled in native language agents and independent ebpf agents. Event. Event is a special kind of data, which is used to record the important moments in the system, such as version upgrade, configuration change, etc. Linking the events with metrics could help on explain the peaks or valleys in the metrics, and linking the events with traces and logs could help on troubleshooting root cause.  Why use SkyWalking? SkyWalking provides solutions for observing and monitoring distributed systems, in many different scenarios. First of all, like traditional approaches, SkyWalking provides auto instrument agents for services, such as Java, C#, Node.js, Go, PHP and Python, and manually SDKs for C++, Rust, and Nginx LUA. In multi-language, continuously deployed environments, cloud native infrastructures grow more powerful but also more complex. SkyWalking\u0026rsquo;s service mesh receiver allows SkyWalking to receive telemetry data from service mesh frameworks such as Istio/Envoy, allowing users to understand the entire distributed system. Powered by eBPF stack, SkyWalking provides k8s monitoring. Also, by adopting OpenTelemetry, Telegraf, Zabbix, Zipkin, Prometheus, SkyWalking can integrate with other distributed tracing, metrics and logging systems and build a unified APM system to host all data.\nBesides the support of various kinds of telemetry formats, the hierarchy structure of objects in SkyWalking is defined as service(s), service instance(s), endpoint(s), process(s). The terms Service, Instance and Endpoint are used everywhere today, so it is worth defining their specific meanings in the context of SkyWalking:\n Layer. A layer represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), and Kubernetes(k8s layer). A layer is an abstract collection of services. A service typically only belongs to one layer, but in some scenarios, a service could belong to multiple layers. For example, a service could be deployed in an Istio service mesh, it could belong to mesh and mesh-dp(mesh data plane) layer. Service. Represents a set/group of workloads which provide the same behaviours for incoming requests. You can define the service name when you are using instrument agents or SDKs. SkyWalking can also use the name you define in platforms such as Istio. Service Instance. Each individual workload in the Service group is known as an instance. Like pods in Kubernetes, it doesn\u0026rsquo;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process. Endpoint. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature. Process. An operating system process. In some scenarios, a Service Instance is not a process, such as a pod Kubernetes could contain multiple processes.  SkyWalking allows users to understand the topology relationship between Services and Endpoints, also detect API dependencies in the distributed environment if you use our native agents.,\nBesides topology map, SkyWalking provides Service Hierarchy Relationship , which defines the relationships of existing logically same services in various layers. For example, a service could be deployed in a Kubernetes cluster with Istio mesh, services are detected by k8s monitoring and Istio mesh, this hierarchy relationship could connect the services in k8s layer and mesh layer.\nArchitecture SkyWalking is logically split into four parts: Probes, Platform backend, Storage and UI.\n Probes collect telemetry data, including metrics, traces, logs and events in various formats(SkyWalking, Zipkin, OpenTelemetry, Prometheus, Zabbix, etc.) Platform backend supports data aggregation, analysis and streaming process covers traces, metrics, logs and events. Work as Aggregator Role, Receiver Role or both. Storage houses SkyWalking data through an open/plugable interface. You can choose an existing implementation, such as ElasticSearch, H2, MySQL, TiDB, BanyanDB, or implement your own. UI is a highly customizable web based interface allowing SkyWalking end users to visualize and manage SkyWalking data.  What is next?  Learn SkyWalking\u0026rsquo;s Project Goals FAQ, Why doesn\u0026rsquo;t SkyWalking involve MQ in the architecture in default?  ","title":"Overview","url":"/docs/main/next/en/concepts-and-designs/overview/"},{"content":"Overview SkyWalking is an open source observability platform used to collect, analyze, aggregate and visualize data from services and cloud native infrastructures. SkyWalking provides an easy way to maintain a clear view of your distributed systems, even across Clouds. It is a modern APM, specially designed for cloud native, container based distributed systems.\nWhy use SkyWalking? SkyWalking provides solutions for observing and monitoring distributed systems, in many different scenarios. First of all, like traditional approaches, SkyWalking provides auto instrument agents for services, such as Java, C#, Node.js, Go, PHP and Nginx LUA. (with calls out for Python and C++ SDK contributions). In multi-language, continuously deployed environments, cloud native infrastructures grow more powerful but also more complex. SkyWalking\u0026rsquo;s service mesh receiver allows SkyWalking to receive telemetry data from service mesh frameworks such as Istio/Envoy and Linkerd, allowing users to understand the entire distributed system.\nSkyWalking provides observability capabilities for service(s), service instance(s), endpoint(s), process(s). The terms Service, Instance and Endpoint are used everywhere today, so it is worth defining their specific meanings in the context of SkyWalking:\n Service. Represents a set/group of workloads which provide the same behaviours for incoming requests. You can define the service name when you are using instrument agents or SDKs. SkyWalking can also use the name you define in platforms such as Istio. Service Instance. Each individual workload in the Service group is known as an instance. Like pods in Kubernetes, it doesn\u0026rsquo;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process. Endpoint. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature. Process. An operating system process. In some scenarios, a Service Instance is not a process, such as a pod Kubernetes could contain multiple processes.  SkyWalking allows users to understand the topology relationship between Services and Endpoints, to view the metrics of every Service/Service Instance/Endpoint and to set alarm rules.\nStarting from v9, SkyWalking introduces the new core concept Layer. A layer represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), Kubernetes(k8s layer). All detected instances belong to a layer to represent the running environment of this instance, the service would have one or multiple layer definitions according to its instances.\nIn addition, you can integrate\n Other distributed tracing using SkyWalking native agents and SDKs with Zipkin, Jaeger and OpenCensus. Other metrics systems, such as Prometheus, Sleuth(Micrometer), OpenTelemetry.  Architecture SkyWalking is logically split into four parts: Probes, Platform backend, Storage and UI.\n Probes collect data and reformat them for SkyWalking requirements (different probes support different sources). Platform backend supports data aggregation, analysis and streaming process covers traces, metrics, and logs. Storage houses SkyWalking data through an open/plugable interface. You can choose an existing implementation, such as ElasticSearch, H2, MySQL, TiDB, InfluxDB, or implement your own. Patches for new storage implementors welcome! UI is a highly customizable web based interface allowing SkyWalking end users to visualize and manage SkyWalking data.  What is next?  Learn SkyWalking\u0026rsquo;s Project Goals FAQ, Why doesn\u0026rsquo;t SkyWalking involve MQ in the architecture in default?  ","title":"Overview","url":"/docs/main/v9.0.0/en/concepts-and-designs/overview/"},{"content":"Overview SkyWalking is an open source observability platform used to collect, analyze, aggregate and visualize data from services and cloud native infrastructures. SkyWalking provides an easy way to maintain a clear view of your distributed systems, even across Clouds. It is a modern APM, specially designed for cloud native, container based distributed systems.\nWhy use SkyWalking? SkyWalking provides solutions for observing and monitoring distributed systems, in many different scenarios. First of all, like traditional approaches, SkyWalking provides auto instrument agents for services, such as Java, C#, Node.js, Go, PHP and Nginx LUA. (with calls out for Python and C++ SDK contributions). In multi-language, continuously deployed environments, cloud native infrastructures grow more powerful but also more complex. SkyWalking\u0026rsquo;s service mesh receiver allows SkyWalking to receive telemetry data from service mesh frameworks such as Istio/Envoy and Linkerd, allowing users to understand the entire distributed system.\nSkyWalking provides observability capabilities for service(s), service instance(s), endpoint(s), process(s). The terms Service, Instance and Endpoint are used everywhere today, so it is worth defining their specific meanings in the context of SkyWalking:\n Service. Represents a set/group of workloads which provide the same behaviours for incoming requests. You can define the service name when you are using instrument agents or SDKs. SkyWalking can also use the name you define in platforms such as Istio. Service Instance. Each individual workload in the Service group is known as an instance. Like pods in Kubernetes, it doesn\u0026rsquo;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process. Endpoint. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature. Process. An operating system process. In some scenarios, a Service Instance is not a process, such as a pod Kubernetes could contain multiple processes.  SkyWalking allows users to understand the topology relationship between Services and Endpoints, to view the metrics of every Service/Service Instance/Endpoint and to set alarm rules.\nStarting from v9, SkyWalking introduces the new core concept Layer. A layer represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), Kubernetes(k8s layer). All detected instances belong to a layer to represent the running environment of this instance, the service would have one or multiple layer definitions according to its instances.\nIn addition, you can integrate\n Other distributed tracing using SkyWalking native agents and SDKs with Zipkin, Jaeger and OpenCensus. Other metrics systems, such as Prometheus, Sleuth(Micrometer), OpenTelemetry.  Architecture SkyWalking is logically split into four parts: Probes, Platform backend, Storage and UI.\n Probes collect data and reformat them for SkyWalking requirements (different probes support different sources). Platform backend supports data aggregation, analysis and streaming process covers traces, metrics, and logs. Storage houses SkyWalking data through an open/plugable interface. You can choose an existing implementation, such as ElasticSearch, H2, MySQL, TiDB, InfluxDB, or implement your own. Patches for new storage implementors welcome! UI is a highly customizable web based interface allowing SkyWalking end users to visualize and manage SkyWalking data.  What is next?  Learn SkyWalking\u0026rsquo;s Project Goals FAQ, Why doesn\u0026rsquo;t SkyWalking involve MQ in the architecture in default?  ","title":"Overview","url":"/docs/main/v9.1.0/en/concepts-and-designs/overview/"},{"content":"Overview SkyWalking is an open source observability platform used to collect, analyze, aggregate and visualize data from services and cloud native infrastructures. SkyWalking provides an easy way to maintain a clear view of your distributed systems, even across Clouds. It is a modern APM, specially designed for cloud native, container based distributed systems.\nWhy use SkyWalking? SkyWalking provides solutions for observing and monitoring distributed systems, in many different scenarios. First of all, like traditional approaches, SkyWalking provides auto instrument agents for services, such as Java, C#, Node.js, Go, PHP and Nginx LUA. (with calls out for Python and C++ SDK contributions). In multi-language, continuously deployed environments, cloud native infrastructures grow more powerful but also more complex. SkyWalking\u0026rsquo;s service mesh receiver allows SkyWalking to receive telemetry data from service mesh frameworks such as Istio/Envoy and Linkerd, allowing users to understand the entire distributed system.\nSkyWalking provides observability capabilities for service(s), service instance(s), endpoint(s), process(s). The terms Service, Instance and Endpoint are used everywhere today, so it is worth defining their specific meanings in the context of SkyWalking:\n Service. Represents a set/group of workloads which provide the same behaviours for incoming requests. You can define the service name when you are using instrument agents or SDKs. SkyWalking can also use the name you define in platforms such as Istio. Service Instance. Each individual workload in the Service group is known as an instance. Like pods in Kubernetes, it doesn\u0026rsquo;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process. Endpoint. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature. Process. An operating system process. In some scenarios, a Service Instance is not a process, such as a pod Kubernetes could contain multiple processes.  SkyWalking allows users to understand the topology relationship between Services and Endpoints, to view the metrics of every Service/Service Instance/Endpoint and to set alarm rules.\nStarting from v9, SkyWalking introduces the new core concept Layer. A layer represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), Kubernetes(k8s layer). All detected instances belong to a layer to represent the running environment of this instance, the service would have one or multiple layer definitions according to its instances.\nIn addition, you can integrate\n Other distributed tracing using SkyWalking native agents and SDKs with Zipkin, Jaeger and OpenCensus. Other metrics systems, such as Prometheus, Sleuth(Micrometer), OpenTelemetry.  Architecture SkyWalking is logically split into four parts: Probes, Platform backend, Storage and UI.\n Probes collect telemetry data, including metrics, traces, logs and events in various formats(SkyWalking, Zipkin, OpenTelemetry, Prometheus, Zabbix, etc.) Platform backend supports data aggregation, analysis and streaming process covers traces, metrics, logs and events. Work as Aggregator Role, Receiver Role or both. Storage houses SkyWalking data through an open/plugable interface. You can choose an existing implementation, such as ElasticSearch, H2, MySQL, TiDB, BanyanDB, or implement your own. UI is a highly customizable web based interface allowing SkyWalking end users to visualize and manage SkyWalking data.  What is next?  Learn SkyWalking\u0026rsquo;s Project Goals FAQ, Why doesn\u0026rsquo;t SkyWalking involve MQ in the architecture in default?  ","title":"Overview","url":"/docs/main/v9.2.0/en/concepts-and-designs/overview/"},{"content":"Overview SkyWalking is an open source observability platform used to collect, analyze, aggregate and visualize data from services and cloud native infrastructures. SkyWalking provides an easy way to maintain a clear view of your distributed systems, even across Clouds. It is a modern APM, specially designed for cloud native, container based distributed systems.\nWhy use SkyWalking? SkyWalking provides solutions for observing and monitoring distributed systems, in many different scenarios. First of all, like traditional approaches, SkyWalking provides auto instrument agents for services, such as Java, C#, Node.js, Go, PHP and Nginx LUA. (with calls out for Python and C++ SDK contributions). In multi-language, continuously deployed environments, cloud native infrastructures grow more powerful but also more complex. SkyWalking\u0026rsquo;s service mesh receiver allows SkyWalking to receive telemetry data from service mesh frameworks such as Istio/Envoy and Linkerd, allowing users to understand the entire distributed system.\nSkyWalking provides observability capabilities for service(s), service instance(s), endpoint(s), process(s). The terms Service, Instance and Endpoint are used everywhere today, so it is worth defining their specific meanings in the context of SkyWalking:\n Service. Represents a set/group of workloads which provide the same behaviours for incoming requests. You can define the service name when you are using instrument agents or SDKs. SkyWalking can also use the name you define in platforms such as Istio. Service Instance. Each individual workload in the Service group is known as an instance. Like pods in Kubernetes, it doesn\u0026rsquo;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process. Endpoint. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature. Process. An operating system process. In some scenarios, a Service Instance is not a process, such as a pod Kubernetes could contain multiple processes.  SkyWalking allows users to understand the topology relationship between Services and Endpoints, to view the metrics of every Service/Service Instance/Endpoint and to set alarm rules.\nStarting from v9, SkyWalking introduces the new core concept Layer. A layer represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), Kubernetes(k8s layer). All detected instances belong to a layer to represent the running environment of this instance, the service would have one or multiple layer definitions according to its instances.\nIn addition, you can integrate\n Other distributed tracing using SkyWalking native agents and SDKs with Zipkin, Jaeger and OpenCensus. Other metrics systems, such as Prometheus, Sleuth(Micrometer), OpenTelemetry.  Architecture SkyWalking is logically split into four parts: Probes, Platform backend, Storage and UI.\n Probes collect telemetry data, including metrics, traces, logs and events in various formats(SkyWalking, Zipkin, OpenTelemetry, Prometheus, Zabbix, etc.) Platform backend supports data aggregation, analysis and streaming process covers traces, metrics, logs and events. Work as Aggregator Role, Receiver Role or both. Storage houses SkyWalking data through an open/plugable interface. You can choose an existing implementation, such as ElasticSearch, H2, MySQL, TiDB, BanyanDB, or implement your own. UI is a highly customizable web based interface allowing SkyWalking end users to visualize and manage SkyWalking data.  What is next?  Learn SkyWalking\u0026rsquo;s Project Goals FAQ, Why doesn\u0026rsquo;t SkyWalking involve MQ in the architecture in default?  ","title":"Overview","url":"/docs/main/v9.3.0/en/concepts-and-designs/overview/"},{"content":"Overview SkyWalking is an open source observability platform used to collect, analyze, aggregate and visualize data from services and cloud native infrastructures. SkyWalking provides an easy way to maintain a clear view of your distributed systems, even across Clouds. It is a modern APM, specially designed for cloud native, container based distributed systems.\nWhy use SkyWalking? SkyWalking provides solutions for observing and monitoring distributed systems, in many different scenarios. First of all, like traditional approaches, SkyWalking provides auto instrument agents for services, such as Java, C#, Node.js, Go, PHP and Nginx LUA. (with calls out for Python and C++ SDK contributions). In multi-language, continuously deployed environments, cloud native infrastructures grow more powerful but also more complex. SkyWalking\u0026rsquo;s service mesh receiver allows SkyWalking to receive telemetry data from service mesh frameworks such as Istio/Envoy and Linkerd, allowing users to understand the entire distributed system.\nSkyWalking provides observability capabilities for service(s), service instance(s), endpoint(s), process(s). The terms Service, Instance and Endpoint are used everywhere today, so it is worth defining their specific meanings in the context of SkyWalking:\n Service. Represents a set/group of workloads which provide the same behaviours for incoming requests. You can define the service name when you are using instrument agents or SDKs. SkyWalking can also use the name you define in platforms such as Istio. Service Instance. Each individual workload in the Service group is known as an instance. Like pods in Kubernetes, it doesn\u0026rsquo;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process. Endpoint. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature. Process. An operating system process. In some scenarios, a Service Instance is not a process, such as a pod Kubernetes could contain multiple processes.  SkyWalking allows users to understand the topology relationship between Services and Endpoints, to view the metrics of every Service/Service Instance/Endpoint and to set alarm rules.\nStarting from v9, SkyWalking introduces the new core concept Layer. A layer represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), Kubernetes(k8s layer). All detected instances belong to a layer to represent the running environment of this instance, the service would have one or multiple layer definitions according to its instances.\nIn addition, you can integrate\n Other distributed tracing using SkyWalking native agents and SDKs with Zipkin, Jaeger and OpenCensus. Other metrics systems, such as Prometheus, Sleuth(Micrometer), OpenTelemetry.  Architecture SkyWalking is logically split into four parts: Probes, Platform backend, Storage and UI.\n Probes collect telemetry data, including metrics, traces, logs and events in various formats(SkyWalking, Zipkin, OpenTelemetry, Prometheus, Zabbix, etc.) Platform backend supports data aggregation, analysis and streaming process covers traces, metrics, logs and events. Work as Aggregator Role, Receiver Role or both. Storage houses SkyWalking data through an open/plugable interface. You can choose an existing implementation, such as ElasticSearch, H2, MySQL, TiDB, BanyanDB, or implement your own. UI is a highly customizable web based interface allowing SkyWalking end users to visualize and manage SkyWalking data.  What is next?  Learn SkyWalking\u0026rsquo;s Project Goals FAQ, Why doesn\u0026rsquo;t SkyWalking involve MQ in the architecture in default?  ","title":"Overview","url":"/docs/main/v9.4.0/en/concepts-and-designs/overview/"},{"content":"Overview SkyWalking is an open source observability platform used to collect, analyze, aggregate and visualize data from services and cloud native infrastructures. SkyWalking provides an easy way to maintain a clear view of your distributed systems, even across Clouds. It is a modern APM, specially designed for cloud native, container based distributed systems.\nWhy use SkyWalking? SkyWalking provides solutions for observing and monitoring distributed systems, in many different scenarios. First of all, like traditional approaches, SkyWalking provides auto instrument agents for services, such as Java, C#, Node.js, Go, PHP and Nginx LUA. (with calls out for Python and C++ SDK contributions). In multi-language, continuously deployed environments, cloud native infrastructures grow more powerful but also more complex. SkyWalking\u0026rsquo;s service mesh receiver allows SkyWalking to receive telemetry data from service mesh frameworks such as Istio/Envoy and Linkerd, allowing users to understand the entire distributed system.\nSkyWalking provides observability capabilities for service(s), service instance(s), endpoint(s), process(s). The terms Service, Instance and Endpoint are used everywhere today, so it is worth defining their specific meanings in the context of SkyWalking:\n Service. Represents a set/group of workloads which provide the same behaviours for incoming requests. You can define the service name when you are using instrument agents or SDKs. SkyWalking can also use the name you define in platforms such as Istio. Service Instance. Each individual workload in the Service group is known as an instance. Like pods in Kubernetes, it doesn\u0026rsquo;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process. Endpoint. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature. Process. An operating system process. In some scenarios, a Service Instance is not a process, such as a pod Kubernetes could contain multiple processes.  SkyWalking allows users to understand the topology relationship between Services and Endpoints, to view the metrics of every Service/Service Instance/Endpoint and to set alarm rules.\nStarting from v9, SkyWalking introduces the new core concept Layer. A layer represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), Kubernetes(k8s layer). All detected instances belong to a layer to represent the running environment of this instance, the service would have one or multiple layer definitions according to its instances.\nIn addition, you can integrate\n Other distributed tracing using SkyWalking native agents and SDKs with Zipkin and Jaeger. Other metrics systems, such as Prometheus, Sleuth(Micrometer), OpenTelemetry.  Architecture SkyWalking is logically split into four parts: Probes, Platform backend, Storage and UI.\n Probes collect telemetry data, including metrics, traces, logs and events in various formats(SkyWalking, Zipkin, OpenTelemetry, Prometheus, Zabbix, etc.) Platform backend supports data aggregation, analysis and streaming process covers traces, metrics, logs and events. Work as Aggregator Role, Receiver Role or both. Storage houses SkyWalking data through an open/plugable interface. You can choose an existing implementation, such as ElasticSearch, H2, MySQL, TiDB, BanyanDB, or implement your own. UI is a highly customizable web based interface allowing SkyWalking end users to visualize and manage SkyWalking data.  What is next?  Learn SkyWalking\u0026rsquo;s Project Goals FAQ, Why doesn\u0026rsquo;t SkyWalking involve MQ in the architecture in default?  ","title":"Overview","url":"/docs/main/v9.5.0/en/concepts-and-designs/overview/"},{"content":"Overview SkyWalking is an open source observability platform used to collect, analyze, aggregate and visualize data from services and cloud native infrastructures. SkyWalking provides an easy way to maintain a clear view of your distributed systems, even across Clouds. It is a modern APM, specially designed for cloud native, container based distributed systems.\nWhy use SkyWalking? SkyWalking provides solutions for observing and monitoring distributed systems, in many different scenarios. First of all, like traditional approaches, SkyWalking provides auto instrument agents for services, such as Java, C#, Node.js, Go, PHP and Nginx LUA. (with calls out for Python and C++ SDK contributions). In multi-language, continuously deployed environments, cloud native infrastructures grow more powerful but also more complex. SkyWalking\u0026rsquo;s service mesh receiver allows SkyWalking to receive telemetry data from service mesh frameworks such as Istio/Envoy and Linkerd, allowing users to understand the entire distributed system.\nSkyWalking provides observability capabilities for service(s), service instance(s), endpoint(s), process(s). The terms Service, Instance and Endpoint are used everywhere today, so it is worth defining their specific meanings in the context of SkyWalking:\n Service. Represents a set/group of workloads which provide the same behaviours for incoming requests. You can define the service name when you are using instrument agents or SDKs. SkyWalking can also use the name you define in platforms such as Istio. Service Instance. Each individual workload in the Service group is known as an instance. Like pods in Kubernetes, it doesn\u0026rsquo;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process. Endpoint. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature. Process. An operating system process. In some scenarios, a Service Instance is not a process, such as a pod Kubernetes could contain multiple processes.  SkyWalking allows users to understand the topology relationship between Services and Endpoints, to view the metrics of every Service/Service Instance/Endpoint and to set alarm rules.\nStarting from v9, SkyWalking introduces the new core concept Layer. A layer represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), Kubernetes(k8s layer). All detected instances belong to a layer to represent the running environment of this instance, the service would have one or multiple layer definitions according to its instances.\nIn addition, you can integrate\n Other distributed tracing using SkyWalking native agents and SDKs with Zipkin and Jaeger. Other metrics systems, such as Prometheus, Sleuth(Micrometer), OpenTelemetry.  Architecture SkyWalking is logically split into four parts: Probes, Platform backend, Storage and UI.\n Probes collect telemetry data, including metrics, traces, logs and events in various formats(SkyWalking, Zipkin, OpenTelemetry, Prometheus, Zabbix, etc.) Platform backend supports data aggregation, analysis and streaming process covers traces, metrics, logs and events. Work as Aggregator Role, Receiver Role or both. Storage houses SkyWalking data through an open/plugable interface. You can choose an existing implementation, such as ElasticSearch, H2, MySQL, TiDB, BanyanDB, or implement your own. UI is a highly customizable web based interface allowing SkyWalking end users to visualize and manage SkyWalking data.  What is next?  Learn SkyWalking\u0026rsquo;s Project Goals FAQ, Why doesn\u0026rsquo;t SkyWalking involve MQ in the architecture in default?  ","title":"Overview","url":"/docs/main/v9.6.0/en/concepts-and-designs/overview/"},{"content":"Overview SkyWalking is an open source observability platform used to collect, analyze, aggregate and visualize data from services and cloud native infrastructures. SkyWalking provides an easy way to maintain a clear view of your distributed systems, even across Clouds. It is a modern APM, specially designed for cloud native, container based distributed systems.\nWhy use SkyWalking? SkyWalking provides solutions for observing and monitoring distributed systems, in many different scenarios. First of all, like traditional approaches, SkyWalking provides auto instrument agents for services, such as Java, C#, Node.js, Go, PHP and Nginx LUA. (with calls out for Python and C++ SDK contributions). In multi-language, continuously deployed environments, cloud native infrastructures grow more powerful but also more complex. SkyWalking\u0026rsquo;s service mesh receiver allows SkyWalking to receive telemetry data from service mesh frameworks such as Istio/Envoy and Linkerd, allowing users to understand the entire distributed system.\nSkyWalking provides observability capabilities for service(s), service instance(s), endpoint(s), process(s). The terms Service, Instance and Endpoint are used everywhere today, so it is worth defining their specific meanings in the context of SkyWalking:\n Service. Represents a set/group of workloads which provide the same behaviours for incoming requests. You can define the service name when you are using instrument agents or SDKs. SkyWalking can also use the name you define in platforms such as Istio. Service Instance. Each individual workload in the Service group is known as an instance. Like pods in Kubernetes, it doesn\u0026rsquo;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process. Endpoint. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature. Process. An operating system process. In some scenarios, a Service Instance is not a process, such as a pod Kubernetes could contain multiple processes.  SkyWalking allows users to understand the topology relationship between Services and Endpoints, to view the metrics of every Service/Service Instance/Endpoint and to set alarm rules.\nStarting from v9, SkyWalking introduces the new core concept Layer. A layer represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), Kubernetes(k8s layer). All detected instances belong to a layer to represent the running environment of this instance, the service would have one or multiple layer definitions according to its instances.\nIn addition, you can integrate\n Other distributed tracing using Zipkin. Other metrics systems, such as Prometheus, Sleuth(Micrometer), OpenTelemetry, Telegraf.  Architecture SkyWalking is logically split into four parts: Probes, Platform backend, Storage and UI.\n Probes collect telemetry data, including metrics, traces, logs and events in various formats(SkyWalking, Zipkin, OpenTelemetry, Prometheus, Zabbix, etc.) Platform backend supports data aggregation, analysis and streaming process covers traces, metrics, logs and events. Work as Aggregator Role, Receiver Role or both. Storage houses SkyWalking data through an open/plugable interface. You can choose an existing implementation, such as ElasticSearch, H2, MySQL, TiDB, BanyanDB, or implement your own. UI is a highly customizable web based interface allowing SkyWalking end users to visualize and manage SkyWalking data.  What is next?  Learn SkyWalking\u0026rsquo;s Project Goals FAQ, Why doesn\u0026rsquo;t SkyWalking involve MQ in the architecture in default?  ","title":"Overview","url":"/docs/main/v9.7.0/en/concepts-and-designs/overview/"},{"content":"Overview SkyWalking Rover is an open-source collector, which provides a eBPF-based monitor and profiler in the Kubernetes.\nWhy use SkyWalking Rover? On the Kubernetes platform, we could collect a lot of telemetry data. Rover could collect them based on the eBPF technology, and upload them to the SkyWalking backend for analysis, aggregate, and visualize them.\n EBPF-based profiling for C, C++, Golang, and Rust. Network profiling for L4(TCP) and L7(HTTP) traffic, including with TLS. Tracing enhancement. Collect extra information from OS level as attached events for the existing tracing system, such as attach raw data of HTTP request and response. Network monitoring for generating network access logs.  Architecture  Process represents the data monitored by Rover. Rover is deployed in the VM instance, collects data in VM and Process, and reports it to the OAP cluster. OAP collect data from the rover side, analysis, and stores them.  ","title":"Overview","url":"/docs/skywalking-rover/latest/en/concepts-and-designs/overview/"},{"content":"Overview SkyWalking Rover is an open-source collector, which provides a eBPF-based monitor and profiler in the Kubernetes.\nWhy use SkyWalking Rover? On the Kubernetes platform, we could collect a lot of telemetry data. Rover could collect them based on the eBPF technology, and upload them to the SkyWalking backend for analysis, aggregate, and visualize them.\n EBPF-based profiling for C, C++, Golang, and Rust. Network profiling for L4(TCP) and L7(HTTP) traffic, including with TLS. Tracing enhancement. Collect extra information from OS level as attached events for the existing tracing system, such as attach raw data of HTTP request and response. Network monitoring for generating network access logs.  Architecture  Process represents the data monitored by Rover. Rover is deployed in the VM instance, collects data in VM and Process, and reports it to the OAP cluster. OAP collect data from the rover side, analysis, and stores them.  ","title":"Overview","url":"/docs/skywalking-rover/next/en/concepts-and-designs/overview/"},{"content":"Overview SkyWalking Rover is an open-source collector, which provides a eBPF-based monitor and profiler in the Kubernetes.\nWhy use SkyWalking Rover? On the Kubernetes platform, we could collect a lot of telemetry data. Rover could collect them based on the eBPF technology, and upload them to the SkyWalking backend for analysis, aggregate, and visualize them.\n EBPF-based profiling for C, C++, Golang, and Rust. Network profiling for L4(TCP) and L7(HTTP) traffic, including with TLS. Tracing enhancement. Collect extra information from OS level as attached events for the existing tracing system, such as attach raw data of HTTP request and response. Network monitoring for generating network access logs.  Architecture  Process represents the data monitored by Rover. Rover is deployed in the VM instance, collects data in VM and Process, and reports it to the OAP cluster. OAP collect data from the rover side, analysis, and stores them.  ","title":"Overview","url":"/docs/skywalking-rover/v0.6.0/en/concepts-and-designs/overview/"},{"content":"Overview SkyWalking Satellite: an open-source agent designed for the cloud-native infrastructures, which provides a low-cost, high-efficient, and more secure way to collect telemetry data, such that Trace Segments, Logs, or Metrics.\nWhy use SkyWalking Satellite? Observability is the solution to the complex scenario of cloud-native services. However, we may encounter different telemetry data scenarios, different language services, big data analysis, etc. Satellite provides a unified data collection layer for cloud-native services. You can easily use it to connect to the SkyWalking ecosystem and enhance the capacity of SkyWalking. There are some enhance features on the following when using Satellite.\n Provide a unified data collection layer to collect logs, traces, and metrics. Provide a safer local cache to reduce the memory cost of the service. Provide the unified transfer way shields the functional differences in the different language libs, such as MQ. Provides the preprocessing functions to ensure accuracy of the metrics, such as sampling.  Architecture SkyWalking Satellite is logically split into three parts: Gatherer, Processor, and Sender.\n Gatherer collect data and reformat them for SkyWalking requirements. Processor processes the input data to generate the new data for Observability. Sender would transfer the downstream data to the SkyWalking OAP with different protocols.  ","title":"Overview","url":"/docs/skywalking-satellite/latest/en/concepts-and-designs/overview/"},{"content":"Overview SkyWalking Satellite: an open-source agent designed for the cloud-native infrastructures, which provides a low-cost, high-efficient, and more secure way to collect telemetry data, such that Trace Segments, Logs, or Metrics.\nWhy use SkyWalking Satellite? Observability is the solution to the complex scenario of cloud-native services. However, we may encounter different telemetry data scenarios, different language services, big data analysis, etc. Satellite provides a unified data collection layer for cloud-native services. You can easily use it to connect to the SkyWalking ecosystem and enhance the capacity of SkyWalking. There are some enhance features on the following when using Satellite.\n Provide a unified data collection layer to collect logs, traces, and metrics. Provide a safer local cache to reduce the memory cost of the service. Provide the unified transfer way shields the functional differences in the different language libs, such as MQ. Provides the preprocessing functions to ensure accuracy of the metrics, such as sampling.  Architecture SkyWalking Satellite is logically split into three parts: Gatherer, Processor, and Sender.\n Gatherer collect data and reformat them for SkyWalking requirements. Processor processes the input data to generate the new data for Observability. Sender would transfer the downstream data to the SkyWalking OAP with different protocols.  ","title":"Overview","url":"/docs/skywalking-satellite/next/en/concepts-and-designs/overview/"},{"content":"Overview SkyWalking Satellite: an open-source agent designed for the cloud-native infrastructures, which provides a low-cost, high-efficient, and more secure way to collect telemetry data, such that Trace Segments, Logs, or Metrics.\nWhy use SkyWalking Satellite? Observability is the solution to the complex scenario of cloud-native services. However, we may encounter different telemetry data scenarios, different language services, big data analysis, etc. Satellite provides a unified data collection layer for cloud-native services. You can easily use it to connect to the SkyWalking ecosystem and enhance the capacity of SkyWalking. There are some enhance features on the following when using Satellite.\n Provide a unified data collection layer to collect logs, traces, and metrics. Provide a safer local cache to reduce the memory cost of the service. Provide the unified transfer way shields the functional differences in the different language libs, such as MQ. Provides the preprocessing functions to ensure accuracy of the metrics, such as sampling.  Architecture SkyWalking Satellite is logically split into three parts: Gatherer, Processor, and Sender.\n Gatherer collect data and reformat them for SkyWalking requirements. Processor processes the input data to generate the new data for Observability. Sender would transfer the downstream data to the SkyWalking OAP with different protocols.  ","title":"Overview","url":"/docs/skywalking-satellite/v1.2.0/en/concepts-and-designs/overview/"},{"content":"Performance best practices  Following changes are expected in the next official release (v1.1.0).\n The Python agent currently uses a number of threads to communicate with SkyWalking OAP, it is planned to be refactored using AsyncIO (Uvloop) along with an async version of gRPC(aio-client)/HTTP(aiohttp/httpx)/Kafka(aio-kafka) to further minimize the cost of thread switching and IO time.\nFor now, we still have a few points to mention to keep the overhead to your application minimal.\n When using the gRPC protocol to report data, a higher version of gRPC is always recommended. Please also make sure that:  By running python -c \u0026quot;from google.protobuf.internal import api_implementation; print(api_implementation._implementation_type)\u0026quot;, or python -c \u0026quot;from google.protobuf.internal import api_implementation; print(api_implementation._default_implementation_type)\u0026quot; you should either see upb or cpp as the returned value. It means the Protobuf library is using a much faster implementation than Python native. If not, try setting PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION='cpp' or 'upb' or upgrade the gRPC dependency (SkyWalking Python will use whatever version your application uses).   Though HTTP is provided as an alternative, it could be slower compared to other protocols, Kafka is often a good choice when gRPC is not suitable. When some features are not needed in your use case, you could turn them off either via config.init(agent_some_reporter_active=False) or environment variables. Use ignore_path, ignore_method, and log filters to avoid reporting less valuable data that is of large amount. Log reporter safe mode is designed for situations where HTTP basic auth info could be visible in traceback and logs but shouldn\u0026rsquo;t be reported to OAP. You should keep the option as OFF if it\u0026rsquo;s not your case because frequent regular expression searches will inevitably introduce overhead to the CPU. Do not turn on sw-python CLI or agent debug logging in production, otherwise large amount of log will be produced.  sw-python CLI debug mode will automatically turn on agent debug log (override from sitecustomize.py).    ","title":"Performance best practices","url":"/docs/skywalking-python/latest/en/setup/faq/performance/"},{"content":"Performance best practices  Following changes are expected in the next official release (v1.1.0).\n The Python agent currently uses a number of threads to communicate with SkyWalking OAP, it is planned to be refactored using AsyncIO (Uvloop) along with an async version of gRPC(aio-client)/HTTP(aiohttp/httpx)/Kafka(aio-kafka) to further minimize the cost of thread switching and IO time.\nFor now, we still have a few points to mention to keep the overhead to your application minimal.\n When using the gRPC protocol to report data, a higher version of gRPC is always recommended. Please also make sure that:  By running python -c \u0026quot;from google.protobuf.internal import api_implementation; print(api_implementation._implementation_type)\u0026quot;, or python -c \u0026quot;from google.protobuf.internal import api_implementation; print(api_implementation._default_implementation_type)\u0026quot; you should either see upb or cpp as the returned value. It means the Protobuf library is using a much faster implementation than Python native. If not, try setting PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION='cpp' or 'upb' or upgrade the gRPC dependency (SkyWalking Python will use whatever version your application uses).   Though HTTP is provided as an alternative, it could be slower compared to other protocols, Kafka is often a good choice when gRPC is not suitable. When some features are not needed in your use case, you could turn them off either via config.init(agent_some_reporter_active=False) or environment variables. Use ignore_path, ignore_method, and log filters to avoid reporting less valuable data that is of large amount. Log reporter safe mode is designed for situations where HTTP basic auth info could be visible in traceback and logs but shouldn\u0026rsquo;t be reported to OAP. You should keep the option as OFF if it\u0026rsquo;s not your case because frequent regular expression searches will inevitably introduce overhead to the CPU. Do not turn on sw-python CLI or agent debug logging in production, otherwise large amount of log will be produced.  sw-python CLI debug mode will automatically turn on agent debug log (override from sitecustomize.py).    ","title":"Performance best practices","url":"/docs/skywalking-python/next/en/setup/faq/performance/"},{"content":"Performance best practices  Following changes are expected in the next official release (v1.1.0).\n The Python agent currently uses a number of threads to communicate with SkyWalking OAP, it is planned to be refactored using AsyncIO (Uvloop) along with an async version of gRPC(aio-client)/HTTP(aiohttp/httpx)/Kafka(aio-kafka) to further minimize the cost of thread switching and IO time.\nFor now, we still have a few points to mention to keep the overhead to your application minimal.\n When using the gRPC protocol to report data, a higher version of gRPC is always recommended. Please also make sure that:  By running python -c \u0026quot;from google.protobuf.internal import api_implementation; print(api_implementation._implementation_type)\u0026quot;, or python -c \u0026quot;from google.protobuf.internal import api_implementation; print(api_implementation._default_implementation_type)\u0026quot; you should either see upb or cpp as the returned value. It means the Protobuf library is using a much faster implementation than Python native. If not, try setting PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION='cpp' or 'upb' or upgrade the gRPC dependency (SkyWalking Python will use whatever version your application uses).   Though HTTP is provided as an alternative, it could be slower compared to other protocols, Kafka is often a good choice when gRPC is not suitable. When some features are not needed in your use case, you could turn them off either via config.init(agent_some_reporter_active=False) or environment variables. Use ignore_path, ignore_method, and log filters to avoid reporting less valuable data that is of large amount. Log reporter safe mode is designed for situations where HTTP basic auth info could be visible in traceback and logs but shouldn\u0026rsquo;t be reported to OAP. You should keep the option as OFF if it\u0026rsquo;s not your case because frequent regular expression searches will inevitably introduce overhead to the CPU. Do not turn on sw-python CLI or agent debug logging in production, otherwise large amount of log will be produced.  sw-python CLI debug mode will automatically turn on agent debug log (override from sitecustomize.py).    ","title":"Performance best practices","url":"/docs/skywalking-python/v1.0.1/en/setup/faq/performance/"},{"content":"Performance Tests Performance testing is used to verify the impact on application performance when using SkyWalking Go.\nTest Objective By launching both the agent and non-agent compiled applications, we subject them to the same QPS under stress testing, evaluating the CPU, memory, and network latency of the machine during the testing period.\nThe application has been saved and submitted to the test/benchmark-codebase directory, with the following topology:\ntraffic generator -\u0026gt; consumer -\u0026gt; provider The payload(traffic) generator uses multithreading to send HTTP requests to the consumer service. When the consumer receives a request, it sends three requests to the provider service to obtain return data results. Based on these network requests, when using SkyWalking Go, the consumer service generates four Spans (1 Entry Span, 3 Exit Spans).\nApplication The application\u0026rsquo;s integration with SkyWalking Go follows the same process as other applications. For more information, please refer to the documentation.\nIn the application, we use loops and mathematical calculations (math.Log) to simulate the execution of the business program. This consumes a certain amount of CPU usage, preventing idle processing during service stress testing and amplifying the impact of the Agent program on the business application.\nStress Testing Service We use the Vegeta service for stress testing, which launches traffic at a specified QPS to the application. It is based on the Go language and uses goroutines to provide a more efficient stress testing solution.\nTest Environment A total of 4 GCP machines are launched, all instances are running on tbe 4C8G VM.\n traffic generator: Used for deploying traffic to the consumer machine. consumer: Used for deploying the consumer service. provider: Used for deploying the provider service. skywalking: Used for deploying the SkyWalking backend cluster, providing a standalone OAP node (in-memory H2 storage) and a UI interface.  Each service is deployed on a separate machine to ensure there is no interference with one another.\nTest Process Preparation Phase The preparation phase is used to ensure that all machines and test case preparations are completed.\nTraffic Generator Install the Vegeta service on the stress testing instance and create the following file(request.txt) to simulate traffic usage.\nGET http://${CONSUMER_IP}:8080/consumer Sw8: 1-MWYyZDRiZjQ3YmY3MTFlYWI3OTRhY2RlNDgwMDExMjI=-MWU3YzIwNGE3YmY3MTFlYWI4NThhY2RlNDgwMDExMjI=-0-c2VydmljZQ==-aW5zdGFuY2U=-cHJvcGFnYXRpb24=-cHJvcGFnYXRpb246NTU2Ng== Please replace the above CONSUMER_IP with the real IP address of the consumer instance.\nConsumer and Provider Install the skywalking-go service on the machines to be tested, and compile with and without the Agent.\nModify the machine\u0026rsquo;s file limit to prevent the inability to create new connections due to excessive handles: ulimit -n 65536.\nStart the provider service(without Agent) and obtain the provider machine\u0026rsquo;s IP address. Please provide this address when starting the consumer machine later.\nSkyWalking Download the SkyWalking service, modify the SkyWalking OAP startup script to increase the memory size, preventing OAP crashes due to insufficient memory.\nTesting without Agent  Start the Consumer service without the Agent version. Please add the provider flag for the provider address, the format is: http://${PROVIDER_IP}:8080/provider. Execute this command to preheat the system: vegeta attack -duration=1m -rate=1000/s -max-workers=2000 -targets=request.txt Execute this command to perform the stress test. The command will output statistical data of the stress test when completed: vegeta attack -duration=20m -rate=1000/s -max-workers=2000 -targets=request.txt | tee results.bin | vegeta report  Testing with Agent The only difference in the test without the Agent is the version of the consumer that is compiled and launched.\n Add the SW_AGENT_REPORTER_GRPC_BACKEND_SERVICE environment variables to the consumer service, for setting the IP address of the SkyWalking OAP service. Start the Consumer service with the Agent version. Please add the provider flag for the provider address, the format is: http://${PROVIDER_IP}:8080/provider. Execute this command to preheat the system: vegeta attack -duration=1m -rate=1000/s -max-workers=2000 -targets=request.txt Execute this command to perform the stress test. The command will output statistical data of the stress test when completed: vegeta attack -duration=20m -rate=1000/s -max-workers=2000 -targets=request.txt | tee results.bin | vegeta report  Test Results In the tests, we used 1000 QPS as a benchmark to stress test both the Consumer services with and without the Agent.\n In the non-Agent version, the CPU usage was around 74%, memory usage was 2.53%, and the average response time for a single request was 4.18ms. In the Agent-compiled version, the CPU usage was around 81%, memory usage was 2.61%, and the average response time for a single request was 4.32ms.  From these results, we can conclude that after adding the Agent, the CPU usage increased by about 9%, memory usage experienced almost no growth, and the average response time for requests increased by approximately 0.15ms.\nExplanation, approximately 0.15ms is the in-band cost. The most of CPU(extra 9%) cost are due to the amount of out of band data being sent to the collectors from the application(consumer), which is 4000 spans/s in our test case.\n","title":"Performance Tests","url":"/docs/skywalking-go/latest/en/agent/performance-tests/"},{"content":"Performance Tests Performance testing is used to verify the impact on application performance when using SkyWalking Go.\nTest Objective By launching both the agent and non-agent compiled applications, we subject them to the same QPS under stress testing, evaluating the CPU, memory, and network latency of the machine during the testing period.\nThe application has been saved and submitted to the test/benchmark-codebase directory, with the following topology:\ntraffic generator -\u0026gt; consumer -\u0026gt; provider The payload(traffic) generator uses multithreading to send HTTP requests to the consumer service. When the consumer receives a request, it sends three requests to the provider service to obtain return data results. Based on these network requests, when using SkyWalking Go, the consumer service generates four Spans (1 Entry Span, 3 Exit Spans).\nApplication The application\u0026rsquo;s integration with SkyWalking Go follows the same process as other applications. For more information, please refer to the documentation.\nIn the application, we use loops and mathematical calculations (math.Log) to simulate the execution of the business program. This consumes a certain amount of CPU usage, preventing idle processing during service stress testing and amplifying the impact of the Agent program on the business application.\nStress Testing Service We use the Vegeta service for stress testing, which launches traffic at a specified QPS to the application. It is based on the Go language and uses goroutines to provide a more efficient stress testing solution.\nTest Environment A total of 4 GCP machines are launched, all instances are running on tbe 4C8G VM.\n traffic generator: Used for deploying traffic to the consumer machine. consumer: Used for deploying the consumer service. provider: Used for deploying the provider service. skywalking: Used for deploying the SkyWalking backend cluster, providing a standalone OAP node (in-memory H2 storage) and a UI interface.  Each service is deployed on a separate machine to ensure there is no interference with one another.\nTest Process Preparation Phase The preparation phase is used to ensure that all machines and test case preparations are completed.\nTraffic Generator Install the Vegeta service on the stress testing instance and create the following file(request.txt) to simulate traffic usage.\nGET http://${CONSUMER_IP}:8080/consumer Sw8: 1-MWYyZDRiZjQ3YmY3MTFlYWI3OTRhY2RlNDgwMDExMjI=-MWU3YzIwNGE3YmY3MTFlYWI4NThhY2RlNDgwMDExMjI=-0-c2VydmljZQ==-aW5zdGFuY2U=-cHJvcGFnYXRpb24=-cHJvcGFnYXRpb246NTU2Ng== Please replace the above CONSUMER_IP with the real IP address of the consumer instance.\nConsumer and Provider Install the skywalking-go service on the machines to be tested, and compile with and without the Agent.\nModify the machine\u0026rsquo;s file limit to prevent the inability to create new connections due to excessive handles: ulimit -n 65536.\nStart the provider service(without Agent) and obtain the provider machine\u0026rsquo;s IP address. Please provide this address when starting the consumer machine later.\nSkyWalking Download the SkyWalking service, modify the SkyWalking OAP startup script to increase the memory size, preventing OAP crashes due to insufficient memory.\nTesting without Agent  Start the Consumer service without the Agent version. Please add the provider flag for the provider address, the format is: http://${PROVIDER_IP}:8080/provider. Execute this command to preheat the system: vegeta attack -duration=1m -rate=1000/s -max-workers=2000 -targets=request.txt Execute this command to perform the stress test. The command will output statistical data of the stress test when completed: vegeta attack -duration=20m -rate=1000/s -max-workers=2000 -targets=request.txt | tee results.bin | vegeta report  Testing with Agent The only difference in the test without the Agent is the version of the consumer that is compiled and launched.\n Add the SW_AGENT_REPORTER_GRPC_BACKEND_SERVICE environment variables to the consumer service, for setting the IP address of the SkyWalking OAP service. Start the Consumer service with the Agent version. Please add the provider flag for the provider address, the format is: http://${PROVIDER_IP}:8080/provider. Execute this command to preheat the system: vegeta attack -duration=1m -rate=1000/s -max-workers=2000 -targets=request.txt Execute this command to perform the stress test. The command will output statistical data of the stress test when completed: vegeta attack -duration=20m -rate=1000/s -max-workers=2000 -targets=request.txt | tee results.bin | vegeta report  Test Results In the tests, we used 1000 QPS as a benchmark to stress test both the Consumer services with and without the Agent.\n In the non-Agent version, the CPU usage was around 74%, memory usage was 2.53%, and the average response time for a single request was 4.18ms. In the Agent-compiled version, the CPU usage was around 81%, memory usage was 2.61%, and the average response time for a single request was 4.32ms.  From these results, we can conclude that after adding the Agent, the CPU usage increased by about 9%, memory usage experienced almost no growth, and the average response time for requests increased by approximately 0.15ms.\nExplanation, approximately 0.15ms is the in-band cost. The most of CPU(extra 9%) cost are due to the amount of out of band data being sent to the collectors from the application(consumer), which is 4000 spans/s in our test case.\n","title":"Performance Tests","url":"/docs/skywalking-go/next/en/agent/performance-tests/"},{"content":"Performance Tests Performance testing is used to verify the impact on application performance when using SkyWalking Go.\nTest Objective By launching both the agent and non-agent compiled applications, we subject them to the same QPS under stress testing, evaluating the CPU, memory, and network latency of the machine during the testing period.\nThe application has been saved and submitted to the test/benchmark-codebase directory, with the following topology:\ntraffic generator -\u0026gt; consumer -\u0026gt; provider The payload(traffic) generator uses multithreading to send HTTP requests to the consumer service. When the consumer receives a request, it sends three requests to the provider service to obtain return data results. Based on these network requests, when using SkyWalking Go, the consumer service generates four Spans (1 Entry Span, 3 Exit Spans).\nApplication The application\u0026rsquo;s integration with SkyWalking Go follows the same process as other applications. For more information, please refer to the documentation.\nIn the application, we use loops and mathematical calculations (math.Log) to simulate the execution of the business program. This consumes a certain amount of CPU usage, preventing idle processing during service stress testing and amplifying the impact of the Agent program on the business application.\nStress Testing Service We use the Vegeta service for stress testing, which launches traffic at a specified QPS to the application. It is based on the Go language and uses goroutines to provide a more efficient stress testing solution.\nTest Environment A total of 4 GCP machines are launched, all instances are running on tbe 4C8G VM.\n traffic generator: Used for deploying traffic to the consumer machine. consumer: Used for deploying the consumer service. provider: Used for deploying the provider service. skywalking: Used for deploying the SkyWalking backend cluster, providing a standalone OAP node (in-memory H2 storage) and a UI interface.  Each service is deployed on a separate machine to ensure there is no interference with one another.\nTest Process Preparation Phase The preparation phase is used to ensure that all machines and test case preparations are completed.\nTraffic Generator Install the Vegeta service on the stress testing instance and create the following file(request.txt) to simulate traffic usage.\nGET http://${CONSUMER_IP}:8080/consumer Sw8: 1-MWYyZDRiZjQ3YmY3MTFlYWI3OTRhY2RlNDgwMDExMjI=-MWU3YzIwNGE3YmY3MTFlYWI4NThhY2RlNDgwMDExMjI=-0-c2VydmljZQ==-aW5zdGFuY2U=-cHJvcGFnYXRpb24=-cHJvcGFnYXRpb246NTU2Ng== Please replace the above CONSUMER_IP with the real IP address of the consumer instance.\nConsumer and Provider Install the skywalking-go service on the machines to be tested, and compile with and without the Agent.\nModify the machine\u0026rsquo;s file limit to prevent the inability to create new connections due to excessive handles: ulimit -n 65536.\nStart the provider service(without Agent) and obtain the provider machine\u0026rsquo;s IP address. Please provide this address when starting the consumer machine later.\nSkyWalking Download the SkyWalking service, modify the SkyWalking OAP startup script to increase the memory size, preventing OAP crashes due to insufficient memory.\nTesting without Agent  Start the Consumer service without the Agent version. Please add the provider flag for the provider address, the format is: http://${PROVIDER_IP}:8080/provider. Execute this command to preheat the system: vegeta attack -duration=1m -rate=1000/s -max-workers=2000 -targets=request.txt Execute this command to perform the stress test. The command will output statistical data of the stress test when completed: vegeta attack -duration=20m -rate=1000/s -max-workers=2000 -targets=request.txt | tee results.bin | vegeta report  Testing with Agent The only difference in the test without the Agent is the version of the consumer that is compiled and launched.\n Add the SW_AGENT_REPORTER_GRPC_BACKEND_SERVICE environment variables to the consumer service, for setting the IP address of the SkyWalking OAP service. Start the Consumer service with the Agent version. Please add the provider flag for the provider address, the format is: http://${PROVIDER_IP}:8080/provider. Execute this command to preheat the system: vegeta attack -duration=1m -rate=1000/s -max-workers=2000 -targets=request.txt Execute this command to perform the stress test. The command will output statistical data of the stress test when completed: vegeta attack -duration=20m -rate=1000/s -max-workers=2000 -targets=request.txt | tee results.bin | vegeta report  Test Results In the tests, we used 1000 QPS as a benchmark to stress test both the Consumer services with and without the Agent.\n In the non-Agent version, the CPU usage was around 74%, memory usage was 2.53%, and the average response time for a single request was 4.18ms. In the Agent-compiled version, the CPU usage was around 81%, memory usage was 2.61%, and the average response time for a single request was 4.32ms.  From these results, we can conclude that after adding the Agent, the CPU usage increased by about 9%, memory usage experienced almost no growth, and the average response time for requests increased by approximately 0.15ms.\nExplanation, approximately 0.15ms is the in-band cost. The most of CPU(extra 9%) cost are due to the amount of out of band data being sent to the collectors from the application(consumer), which is 4000 spans/s in our test case.\n","title":"Performance Tests","url":"/docs/skywalking-go/v0.4.0/en/agent/performance-tests/"},{"content":"Persistence Storage Persistence storage is used for unifying data of BanyanDB persistence, including write-ahead logging(WAL), index, and data collected from skywalking and other observability platforms or APM systems. It provides various implementations and IO modes to satisfy the need of different components. BanyanDB provides a concise interface that shields the complexity of the implementation from the upper layer. By exposing necessary interfaces, upper components do not need to care how persistence is implemented and avoid dealing with differences between different operating systems.\nArchitecture BanyanDB uses third-party storage for actual storage, and the file system shields the differences between different platforms and storage systems, allowing developers to operate files as easily as the local file system without worrying about specific details.\nFor different data models, stored in different locations, such as for meta and wal data, BanyanDB uses a local file system for storage. For index and data, the architecture of the file system is divided into three layers.\n The first layer is the API interface, which developers only need to care about how to operate the remote file system. The second layer is the storage system adapter, which is used to mask the differences between different storage systems. The last layer is the actual storage system. With the use of remote storage architecture, the local system can still play its role and can borrow the local system to speed up reading and writing.  IO Mode Persistence storage offers a range of IO modes to cater to various throughput requirements. The interface can be accessed by developers and can be configured through settings, which can be set in the configuration file.\nIo_uring Io_uring is a new feature in Linux 5.1, which is fully asynchronous and offers high throughput. In the scene of massive storage, io_uring can bring significant benefits. The following is the diagram about how io_uring works. If the user sets io_uring for use, the read and write requests will first be placed in the submission queue buffer when calling the operation API. When the threshold is reached, batch submissions will be made to SQ. After the kernel threads complete execution, the requests will be placed in the CQ, and the user can obtain the request results.\nSynchronous IO The most common IO mode is Synchronous IO, but it has a relatively low throughput. BanyanDB provides a nonblocking mode that is compatible with lower Linux versions.\nOperation Directory Create Create the specified directory and return the file descriptor, the error will happen if the directory already exists. The following is the pseudocode that calls the API in the go style.、\nparam:\nname: The name of the directory.\npermisson: Permission you want to set. BanyanDB provides three modes: Read, Write, ReadAndWrite. you can use it as Mode.Read.\nCreateDirectory(name String, permission Mode) (error)\nOpen Open the directory and return an error if the file descriptor does not exist. The following is the pseudocode that calls the API in the go style.\nparam:\nname: The name of the directory.\nreturn: Directory pointer, you can use it for various operations.\nOpenDirectory(name String) (*Dir, error)\nDelete Delete the directory and all files and return an error if the directory does not exist or the directory not reading or writing. The following is the pseudocode that calls the API in the go style.\nDir.DeleteDirectory() (error)\nRename Rename the directory and return an error if the directory already exists. The following is the pseudocode that calls the API in the go style.\nparam:\nname: The name of the directory.\nDir.RenameDirectory(newName String) (error)\nRead Get all lists of files or children\u0026rsquo;s directories in the directory and an error if the directory does not exist. The following is the pseudocode that calls the API in the go style.\nreturn: List of files belonging to the directory.\nDir.ReadDirectory() (FileList, error)\nPermission When creating a file, the default owner is the user who created the directory. The owner can specify read and write permissions of the directory. If not specified, the default is read and write permissions, which include permissions for all files in the directory. The following is the pseudocode that calls the API in the go style.\nparam:\npermisson: Permission you want to set. BanyanDB provides three mode: Read, Write, ReadAndWrite. you can use it as Mode.Read.\nDir.SetDirectoryPermission(permission Mode) (error)\nFile Create Create the specified file and return the file descriptor, the error will happen if the file already exists. The following is the pseudocode that calls the API in the go style.\nparam:\nname: The name of the file.\npermisson: Permission you want to set. BanyanDB provides three mode: Read, Write, ReadAndWrite. you can use it as Mode.Read.\nCreateFile(name String, permission Mode) (error)\nOpen Open the file and return an error if the file descriptor does not exist. The following is the pseudocode that calls the API in the go style.\nparam:\nname: The name of the file.\nreturn: File pointer, you can use it for various operations.\nOpenFile(name String) (*File, error)\nWrite BanyanDB provides two methods for writing files. Append mode, which adds new data to the end of a file. This mode is typically used for WAL. And BanyanDB supports vector Append mode, which supports appending consecutive buffers to the end of the file. Flush mode, which flushes all data to one file. It will return an error when writing a directory, the file does not exist or there is not enough space, and the incomplete file will be discarded. The flush operation is atomic, which means the file won\u0026rsquo;t be created if an error happens during the flush process. The following is the pseudocode that calls the API in the go style.\nFor append mode:\nparam:\nbuffer: The data append to the file.\nFile.AppendWriteFile(buffer []byte) (error)\nFor vector append mode:\nparam:\niov: The data in consecutive buffers.\nFile.AppendWritevFile(iov *[][]byte) (error)\nFor flush mode:\nparam:\nbuffer: The data append to the file.\npermisson: Permission you want to set. BanyanDB provides three mode: Read, Write, ReadAndWrite. you can use it as Mode.Read.\nreturn: File pointer, you can use it for various operations.\nFlushWriteFile(buffer []byte, permission Mode) (*File, error)\nDelete BanyanDB provides the deleting operation, which can delete a file at once. it will return an error if the directory does not exist or the file not reading or writing.\nThe following is the pseudocode that calls the API in the go style.\nFile.DeleteFile() (error)\nRead For reading operation, two read methods are provided: Reading a specified location of data, which relies on a specified offset and a buffer. And BanyanDB supports reading contiguous regions of a file and dispersing them into discontinuous buffers. Read the entire file, BanyanDB provides stream reading, which can use when the file is too large, the size gets each time can be set when using stream reading. If entering incorrect parameters such as incorrect offset or non-existent file, it will return an error. The following is the pseudocode that calls the API in the go style.\nFor reading specified location of data:\nparam:\noffset: Read begin location of the file.\nbuffer: The read length is the same as the buffer length.\nFile.ReadFile(offset int, buffer []byte) (error)\nFor vector reading:\nparam:\niov: Discontinuous buffers in memory.\nFile.ReadvFile(iov *[][]byte) (error)\nFor stream reading:\nparam:\noffset: Read begin location of the file.\nbuffer: Every read length in the stream is the same as the buffer length.\nreturn: A Iterator, the size of each iteration is the length of the buffer.\nFile.StreamReadFile(offset int, buffer []byte) (*iter, error)\nRename Rename the file and return an error if the directory exists in this directory. The following is the pseudocode that calls the API in the go style.\nparam:\nnewName: The new name of the file.\nFile.RenameFile(newName String) (error)\nGet size Get the file written data\u0026rsquo;s size and return an error if the file does not exist. The unit of file size is Byte. The following is the pseudocode that calls the API in the go style.\nreturn: the file written data\u0026rsquo;s size.\nFile.GetFileSize() (int, error)\nPermission When creating a file, the default owner is the user who created the file. The owner can specify the read and write permissions of the file. If not specified, the default is read and write permissions. The following is the pseudocode that calls the API in the go style.\nparam:\npermisson: Permission you want to set. BanyanDB provides three mode: Read, Write, ReadAndWrite. you can use it as Mode.Read.\nFile.SetFilePermission(permission Mode) (error)\n","title":"Persistence Storage","url":"/docs/skywalking-banyandb/latest/concept/persistence-storage/"},{"content":"Persistence Storage Persistence storage is used for unifying data of BanyanDB persistence, including index, and data collected from skywalking and other observability platforms or APM systems. It provides various implementations and IO modes to satisfy the need of different components. BanyanDB provides a concise interface that shields the complexity of the implementation from the upper layer. By exposing necessary interfaces, upper components do not need to care how persistence is implemented and avoid dealing with differences between different operating systems.\nArchitecture BanyanDB uses third-party storage for actual storage, and the file system shields the differences between different platforms and storage systems, allowing developers to operate files as easily as the local file system without worrying about specific details.\nFor different data models, stored in different locations, such as for meta data, BanyanDB uses a local file system for storage. For index and data, the architecture of the file system is divided into three layers.\n The first layer is the API interface, which developers only need to care about how to operate the remote file system. The second layer is the storage system adapter, which is used to mask the differences between different storage systems. The last layer is the actual storage system. With the use of remote storage architecture, the local system can still play its role and can borrow the local system to speed up reading and writing.  IO Mode Persistence storage offers a range of IO modes to cater to various throughput requirements. The interface can be accessed by developers and can be configured through settings, which can be set in the configuration file.\nIo_uring Io_uring is a new feature in Linux 5.1, which is fully asynchronous and offers high throughput. In the scene of massive storage, io_uring can bring significant benefits. The following is the diagram about how io_uring works. If the user sets io_uring for use, the read and write requests will first be placed in the submission queue buffer when calling the operation API. When the threshold is reached, batch submissions will be made to SQ. After the kernel threads complete execution, the requests will be placed in the CQ, and the user can obtain the request results.\nSynchronous IO The most common IO mode is Synchronous IO, but it has a relatively low throughput. BanyanDB provides a nonblocking mode that is compatible with lower Linux versions.\nOperation File Create Create the specified file and return the file descriptor, the error will happen if the file already exists. The following is the pseudocode that calls the API in the go style.\nparam:\nname: The name of the file.\npermisson: Permission you want to set. BanyanDB provides three mode: Read, Write, ReadAndWrite. you can use it as Mode.Read.\nreturn: The file instance, can be used for various file operations.\nCreateFile(name String, permission Mode) (File, error)\nWrite BanyanDB provides two methods for writing files. Append mode, which adds new data to the end of a file. BanyanDB also supports vector Append mode, which supports appending consecutive buffers to the end of the file. Flush mode, which flushes all data to one file. It will return an error when writing a directory, the file does not exist or there is not enough space, and the incomplete file will be discarded. The flush operation is atomic, which means the file won\u0026rsquo;t be created if an error happens during the flush process. The following is the pseudocode that calls the API in the go style.\nFor append mode:\nparam:\nbuffer: The data append to the file.\nActual length of written data.\nFile.Write(buffer []byte) (int, error)\nFor vector append mode:\nparam:\niov: The data in consecutive buffers.\nreturn: Actual length of written data.\nFile.Writev(iov *[][]byte) (int, error)\nFor flush mode:\nparam:\nbuffer: The data append to the file.\npermisson: Permission you want to set. BanyanDB provides three mode: Read, Write, ReadAndWrite. you can use it as Mode.Read.\nreturn: Actual length of flushed data.\nWrite(buffer []byte, permission Mode) (int, error)\nDelete BanyanDB provides the deleting operation, which can delete a file at once. it will return an error if the directory does not exist or the file not reading or writing.\nThe following is the pseudocode that calls the API in the go style.\nDeleteFile(name string) (error)\nRead For reading operation, two read methods are provided: Reading a specified location of data, which relies on a specified offset and a buffer. And BanyanDB supports reading contiguous regions of a file and dispersing them into discontinuous buffers. Read the entire file, BanyanDB provides stream reading, which can use when the file is too large, the size gets each time can be set when using stream reading. If entering incorrect parameters such as incorrect offset or non-existent file, it will return an error. The following is the pseudocode that calls the API in the go style.\nFor reading specified location of data:\nparam:\noffset: Read begin location of the file.\nbuffer: The read length is the same as the buffer length.\nreturn: Actual length of reading data.\nFile.Read(offset int64, buffer []byte) (int, error)\nFor vector reading:\nparam:\niov: Discontinuous buffers in memory.\nreturn: Actual length of reading data.\nFile.Readv(iov *[][]byte) (int, error)\nFor stream reading:\nparam:\nbuffer: Every read length in the stream is the same as the buffer length.\nreturn: A Iterator, the size of each iteration is the length of the buffer.\nFile.StreamRead(buffer []byte) (*iter, error)\nGet size Get the file written data\u0026rsquo;s size and return an error if the file does not exist. The unit of file size is Byte. The following is the pseudocode that calls the API in the go style.\nreturn: the file written data\u0026rsquo;s size.\nFile.Size() (int, error)\nClose Close File.The following is the pseudocode that calls the API in the go style.\nFile.Close() error\n","title":"Persistence Storage","url":"/docs/skywalking-banyandb/next/concept/persistence-storage/"},{"content":"Persistence Storage Persistence storage is used for unifying data of BanyanDB persistence, including write-ahead logging(WAL), index, and data collected from skywalking and other observability platforms or APM systems. It provides various implementations and IO modes to satisfy the need of different components. BanyanDB provides a concise interface that shields the complexity of the implementation from the upper layer. By exposing necessary interfaces, upper components do not need to care how persistence is implemented and avoid dealing with differences between different operating systems.\nArchitecture BanyanDB uses third-party storage for actual storage, and the file system shields the differences between different platforms and storage systems, allowing developers to operate files as easily as the local file system without worrying about specific details.\nFor different data models, stored in different locations, such as for meta and wal data, BanyanDB uses a local file system for storage. For index and data, the architecture of the file system is divided into three layers.\n The first layer is the API interface, which developers only need to care about how to operate the remote file system. The second layer is the storage system adapter, which is used to mask the differences between different storage systems. The last layer is the actual storage system. With the use of remote storage architecture, the local system can still play its role and can borrow the local system to speed up reading and writing.  IO Mode Persistence storage offers a range of IO modes to cater to various throughput requirements. The interface can be accessed by developers and can be configured through settings, which can be set in the configuration file.\nIo_uring Io_uring is a new feature in Linux 5.1, which is fully asynchronous and offers high throughput. In the scene of massive storage, io_uring can bring significant benefits. The following is the diagram about how io_uring works. If the user sets io_uring for use, the read and write requests will first be placed in the submission queue buffer when calling the operation API. When the threshold is reached, batch submissions will be made to SQ. After the kernel threads complete execution, the requests will be placed in the CQ, and the user can obtain the request results.\nSynchronous IO The most common IO mode is Synchronous IO, but it has a relatively low throughput. BanyanDB provides a nonblocking mode that is compatible with lower Linux versions.\nOperation Directory Create Create the specified directory and return the file descriptor, the error will happen if the directory already exists. The following is the pseudocode that calls the API in the go style.、\nparam:\nname: The name of the directory.\npermisson: Permission you want to set. BanyanDB provides three modes: Read, Write, ReadAndWrite. you can use it as Mode.Read.\nCreateDirectory(name String, permission Mode) (error)\nOpen Open the directory and return an error if the file descriptor does not exist. The following is the pseudocode that calls the API in the go style.\nparam:\nname: The name of the directory.\nreturn: Directory pointer, you can use it for various operations.\nOpenDirectory(name String) (*Dir, error)\nDelete Delete the directory and all files and return an error if the directory does not exist or the directory not reading or writing. The following is the pseudocode that calls the API in the go style.\nDir.DeleteDirectory() (error)\nRename Rename the directory and return an error if the directory already exists. The following is the pseudocode that calls the API in the go style.\nparam:\nname: The name of the directory.\nDir.RenameDirectory(newName String) (error)\nRead Get all lists of files or children\u0026rsquo;s directories in the directory and an error if the directory does not exist. The following is the pseudocode that calls the API in the go style.\nreturn: List of files belonging to the directory.\nDir.ReadDirectory() (FileList, error)\nPermission When creating a file, the default owner is the user who created the directory. The owner can specify read and write permissions of the directory. If not specified, the default is read and write permissions, which include permissions for all files in the directory. The following is the pseudocode that calls the API in the go style.\nparam:\npermisson: Permission you want to set. BanyanDB provides three mode: Read, Write, ReadAndWrite. you can use it as Mode.Read.\nDir.SetDirectoryPermission(permission Mode) (error)\nFile Create Create the specified file and return the file descriptor, the error will happen if the file already exists. The following is the pseudocode that calls the API in the go style.\nparam:\nname: The name of the file.\npermisson: Permission you want to set. BanyanDB provides three mode: Read, Write, ReadAndWrite. you can use it as Mode.Read.\nCreateFile(name String, permission Mode) (error)\nOpen Open the file and return an error if the file descriptor does not exist. The following is the pseudocode that calls the API in the go style.\nparam:\nname: The name of the file.\nreturn: File pointer, you can use it for various operations.\nOpenFile(name String) (*File, error)\nWrite BanyanDB provides two methods for writing files. Append mode, which adds new data to the end of a file. This mode is typically used for WAL. And BanyanDB supports vector Append mode, which supports appending consecutive buffers to the end of the file. Flush mode, which flushes all data to one file. It will return an error when writing a directory, the file does not exist or there is not enough space, and the incomplete file will be discarded. The flush operation is atomic, which means the file won\u0026rsquo;t be created if an error happens during the flush process. The following is the pseudocode that calls the API in the go style.\nFor append mode:\nparam:\nbuffer: The data append to the file.\nFile.AppendWriteFile(buffer []byte) (error)\nFor vector append mode:\nparam:\niov: The data in consecutive buffers.\nFile.AppendWritevFile(iov *[][]byte) (error)\nFor flush mode:\nparam:\nbuffer: The data append to the file.\npermisson: Permission you want to set. BanyanDB provides three mode: Read, Write, ReadAndWrite. you can use it as Mode.Read.\nreturn: File pointer, you can use it for various operations.\nFlushWriteFile(buffer []byte, permission Mode) (*File, error)\nDelete BanyanDB provides the deleting operation, which can delete a file at once. it will return an error if the directory does not exist or the file not reading or writing.\nThe following is the pseudocode that calls the API in the go style.\nFile.DeleteFile() (error)\nRead For reading operation, two read methods are provided: Reading a specified location of data, which relies on a specified offset and a buffer. And BanyanDB supports reading contiguous regions of a file and dispersing them into discontinuous buffers. Read the entire file, BanyanDB provides stream reading, which can use when the file is too large, the size gets each time can be set when using stream reading. If entering incorrect parameters such as incorrect offset or non-existent file, it will return an error. The following is the pseudocode that calls the API in the go style.\nFor reading specified location of data:\nparam:\noffset: Read begin location of the file.\nbuffer: The read length is the same as the buffer length.\nFile.ReadFile(offset int, buffer []byte) (error)\nFor vector reading:\nparam:\niov: Discontinuous buffers in memory.\nFile.ReadvFile(iov *[][]byte) (error)\nFor stream reading:\nparam:\noffset: Read begin location of the file.\nbuffer: Every read length in the stream is the same as the buffer length.\nreturn: A Iterator, the size of each iteration is the length of the buffer.\nFile.StreamReadFile(offset int, buffer []byte) (*iter, error)\nRename Rename the file and return an error if the directory exists in this directory. The following is the pseudocode that calls the API in the go style.\nparam:\nnewName: The new name of the file.\nFile.RenameFile(newName String) (error)\nGet size Get the file written data\u0026rsquo;s size and return an error if the file does not exist. The unit of file size is Byte. The following is the pseudocode that calls the API in the go style.\nreturn: the file written data\u0026rsquo;s size.\nFile.GetFileSize() (int, error)\nPermission When creating a file, the default owner is the user who created the file. The owner can specify the read and write permissions of the file. If not specified, the default is read and write permissions. The following is the pseudocode that calls the API in the go style.\nparam:\npermisson: Permission you want to set. BanyanDB provides three mode: Read, Write, ReadAndWrite. you can use it as Mode.Read.\nFile.SetFilePermission(permission Mode) (error)\n","title":"Persistence Storage","url":"/docs/skywalking-banyandb/v0.5.0/concept/persistence-storage/"},{"content":"Pinpoint Service Mesh Critical Performance Impact by using eBPF Background Apache SkyWalking observes metrics, logs, traces, and events for services deployed into the service mesh. When troubleshooting, SkyWalking error analysis can be an invaluable tool helping to pinpoint where an error occurred. However, performance problems are more difficult: It’s often impossible to locate the root cause of performance problems with pre-existing observation data. To move beyond the status quo, dynamic debugging and troubleshooting are essential service performance tools. In this article, we\u0026rsquo;ll discuss how to use eBPF technology to improve the profiling feature in SkyWalking and analyze the performance impact in the service mesh.\nTrace Profiling in SkyWalking Since SkyWalking 7.0.0, Trace Profiling has helped developers find performance problems by periodically sampling the thread stack to let developers know which lines of code take more time. However, Trace Profiling is not suitable for the following scenarios:\n Thread Model: Trace Profiling is most useful for profiling code that executes in a single thread. It is less useful for middleware that relies heavily on async execution models. For example Goroutines in Go or Kotlin Coroutines. Language: Currently, Trace Profiling is only supported in Java and Python, since it’s not easy to obtain the thread stack in the runtimes of some languages such as Go and Node.js. Agent Binding: Trace Profiling requires Agent installation, which can be tricky depending on the language (e.g., PHP has to rely on its C kernel; Rust and C/C++ require manual instrumentation to make install). Trace Correlation: Since Trace Profiling is only associated with a single request it can be hard to determine which request is causing the problem. Short Lifecycle Services: Trace Profiling doesn\u0026rsquo;t support short-lived services for (at least) two reasons:  It\u0026rsquo;s hard to differentiate system performance from class code manipulation in the booting stage. Trace profiling is linked to an endpoint to identify performance impact, but there is no endpoint to match these short-lived services.    Fortunately, there are techniques that can go further than Trace Profiling in these situations.\nIntroduce eBPF We have found that eBPF — a technology that can run sandboxed programs in an operating system kernel and thus safely and efficiently extend the capabilities of the kernel without requiring kernel modifications or loading kernel modules — can help us fill gaps left by Trace Profiling. eBPF is a trending technology because it breaks the traditional barrier between user and kernel space. Programs can now inject bytecode that runs in the kernel, instead of having to recompile the kernel to customize it. This is naturally a good fit for observability.\nIn the figure below, we can see that when the system executes the execve syscalls, the eBPF program is triggered, and the current process runtime information is obtained by using function calls.\nUsing eBPF technology, we can expand the scope of Skywalking\u0026rsquo;s profiling capabilities:\n Global Performance Analysis: Before eBPF, data collection was limited to what agents can observe. Since eBPF programs run in the kernel, they can observe all threads. This is especially useful when you are not sure whether a performance problem is caused by a particular request. Data Content: eBPF can dump both user and kernel space thread stacks, so if a performance issue happens in kernel space, it’s easier to find. Agent Binding: All modern Linux kernels support eBPF, so there is no need to install anything. This means it is an orchestration-free vs an agent model. This reduces friction caused by built-in software which may not have the correct agents installed, such as Envoy in a Service Mesh. Sampling Type: Unlike Trace Profiling, eBPF is event-driven and, therefore, not constrained by interval polling. For example, eBPF can trigger events and collect more data depending on a transfer size threshold. This can allow the system to triage and prioritize data collection under extreme load.  eBPF Limitations While eBPF offers significant advantages for hunting performance bottlenecks, no technology is perfect. eBPF has a number of limitations described below. Fortunately, since SkyWalking does not require eBPF, the impact is limited.\n Linux Version Requirement: eBPF programs require a Linux kernel version above 4.4, with later kernel versions offering more data to be collected. The BCC has documented the features supported by different Linux kernel versions, with the differences between versions usually being what data can be collected with eBPF. Privileges Required: All processes that intend to load eBPF programs into the Linux kernel must be running in privileged mode. As such, bugs or other issues in such code may have a big impact. Weak Support for Dynamic Language: eBPF has weak support for JIT-based dynamic languages, such as Java. It also depends on what data you want to collect. For Profiling, eBPF does not support parsing the symbols of the program, which is why most eBPF-based profiling technologies only support static languages like C, C++, Go, and Rust. However, symbol mapping can sometimes be solved through tools provided by the language. For example, in Java, perf-map-agent can be used to generate the symbol mapping. However, dynamic languages don\u0026rsquo;t support the attach (uprobe) functionality that would allow us to trace execution events through symbols.  Introducing SkyWalking Rover SkyWalking Rover introduces the eBPF profiling feature into the SkyWalking ecosystem. The figure below shows the overall architecture of SkyWalking Rover. SkyWalking Rover is currently supported in Kubernetes environments and must be deployed inside a Kubernetes cluster. After establishing a connection with the SkyWalking backend server, it saves information about the processes on the current machine to SkyWalking. When the user creates an eBPF profiling task via the user interface, SkyWalking Rover receives the task and executes it in the relevant C, C++, Golang, and Rust language-based programs.\nOther than an eBPF-capable kernel, there are no additional prerequisites for deploying SkyWalking Rover.\nCPU Profiling with Rover CPU profiling is the most intuitive way to show service performance. Inspired by Brendan Gregg‘s blog post, we\u0026rsquo;ve divided CPU profiling into two types that we have implemented in Rover:\n On-CPU Profiling: Where threads are spending time running on-CPU. Off-CPU Profiling: Where time is spent waiting while blocked on I/O, locks, timers, paging/swapping, etc.  Profiling Envoy with eBPF Envoy is a popular proxy, used as the data plane by the Istio service mesh. In a Kubernetes cluster, Istio injects Envoy into each service’s pod as a sidecar where it transparently intercepts and processes incoming and outgoing traffic. As the data plane, any performance issues in Envoy can affect all service traffic in the mesh. In this scenario, it’s more powerful to use eBPF profiling to analyze issues in production caused by service mesh configuration.\nDemo Environment If you want to see this scenario in action, we\u0026rsquo;ve built a demo environment where we deploy an Nginx service for stress testing. Traffic is intercepted by Envoy and forwarded to Nginx. The commands to install the whole environment can be accessed through GitHub.\nOn-CPU Profiling On-CPU profiling is suitable for analyzing thread stacks when service CPU usage is high. If the stack is dumped more times, it means that the thread stack occupies more CPU resources.\nWhen installing Istio using the demo configuration profile, we found there are two places where we can optimize performance:\n Zipkin Tracing: Different Zipkin sampling percentages have a direct impact on QPS. Access Log Format: Reducing the fields of the Envoy access log can improve QPS.  Zipkin Tracing Zipkin with 100% sampling In the default demo configuration profile, Envoy is using 100% sampling as default tracing policy. How does that impact the performance?\nAs shown in the figure below, using the on-CPU profiling, we found that it takes about 16% of the CPU overhead. At a fixed consumption of 2 CPUs, its QPS can reach 5.7K.\nDisable Zipkin tracing At this point, we found that if Zipkin is not necessary, the sampling percentage can be reduced or we can even disable tracing. Based on the Istio documentation, we can disable tracing when installing the service mesh using the following command:\nistioctl install -y --set profile=demo \\  --set \u0026#39;meshConfig.enableTracing=false\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.tracing.sampling=0.0\u0026#39; After disabling tracing, we performed on-CPU profiling again. According to the figure below, we found that Zipkin has disappeared from the flame graph. With the same 2 CPU consumption as in the previous example, the QPS reached 9K, which is an almost 60% increase. Tracing with Throughput With the same CPU usage, we\u0026rsquo;ve discovered that Envoy performance greatly improves when the tracing feature is disabled. Of course, this requires us to make trade-offs between the number of samples Zipkin collects and the desired performance of Envoy (QPS).\nThe table below illustrates how different Zipkin sampling percentages under the same CPU usage affect QPS.\n   Zipkin sampling % QPS CPUs Note     100% (default) 5.7K 2 16% used by Zipkin   1% 8.1K 2 0.3% used by Zipkin   disabled 9.2K 2 0% used by Zipkin    Access Log Format Default Log Format In the default demo configuration profile, the default Access Log format contains a lot of data. The flame graph below shows various functions involved in parsing the data such as request headers, response headers, and streaming the body.\nSimplifying Access Log Format Typically, we don’t need all the information in the access log, so we can often simplify it to get what we need. The following command simplifies the access log format to only display basic information:\nistioctl install -y --set profile=demo \\  --set meshConfig.accessLogFormat=\u0026#34;[%START_TIME%] \\\u0026#34;%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\\\u0026#34; %RESPONSE_CODE%\\n\u0026#34; After simplifying the access log format, we found that the QPS increased from 5.7K to 5.9K. When executing the on-CPU profiling again, the CPU usage of log formatting dropped from 2.4% to 0.7%.\nSimplifying the log format helped us to improve the performance.\nOff-CPU Profiling Off-CPU profiling is suitable for performance issues that are not caused by high CPU usage. For example, when there are too many threads in one service, using off-CPU profiling could reveal which threads spend more time context switching.\nWe provide data aggregation in two dimensions:\n Switch count: The number of times a thread switches context. When the thread returns to the CPU, it completes one context switch. A thread stack with a higher switch count spends more time context switching. Switch duration: The time it takes a thread to switch the context. A thread stack with a higher switch duration spends more time off-CPU.  Write Access Log Enable Write Using the same environment and settings as before in the on-CPU test, we performed off-CPU profiling. As shown below, we found that access log writes accounted for about 28% of the total context switches. The \u0026ldquo;__write\u0026rdquo; shown below also indicates that this method is the Linux kernel method.\nDisable Write SkyWalking implements Envoy\u0026rsquo;s Access Log Service (ALS) feature which allows us to send access logs to the SkyWalking Observability Analysis Platform (OAP) using the gRPC protocol. Even by disabling the access logging, we can still use ALS to capture/aggregate the logs. We\u0026rsquo;ve disabled writing to the access log using the following command:\nistioctl install -y --set profile=demo --set meshConfig.accessLogFile=\u0026#34;\u0026#34; After disabling the Access Log feature, we performed the off-CPU profiling. File writing entries have disappeared as shown in the figure below. Envoy throughput also increased from 5.7K to 5.9K.\nConclusion In this article, we\u0026rsquo;ve examined the insights Apache Skywalking\u0026rsquo;s Trace Profiling can give us and how much more can be achieved with eBPF profiling. All of these features are implemented in skywalking-rover. In addition to on- and off-CPU profiling, you will also find the following features:\n Continuous profiling, helps you automatically profile without manual intervention. For example, when Rover detects that the CPU exceeds a configurable threshold, it automatically executes the on-CPU profiling task. More profiling types to enrich usage scenarios, such as network, and memory profiling.  ","title":"Pinpoint Service Mesh Critical Performance Impact by using eBPF","url":"/docs/main/latest/en/concepts-and-designs/ebpf-cpu-profiling/"},{"content":"Pinpoint Service Mesh Critical Performance Impact by using eBPF Background Apache SkyWalking observes metrics, logs, traces, and events for services deployed into the service mesh. When troubleshooting, SkyWalking error analysis can be an invaluable tool helping to pinpoint where an error occurred. However, performance problems are more difficult: It’s often impossible to locate the root cause of performance problems with pre-existing observation data. To move beyond the status quo, dynamic debugging and troubleshooting are essential service performance tools. In this article, we\u0026rsquo;ll discuss how to use eBPF technology to improve the profiling feature in SkyWalking and analyze the performance impact in the service mesh.\nTrace Profiling in SkyWalking Since SkyWalking 7.0.0, Trace Profiling has helped developers find performance problems by periodically sampling the thread stack to let developers know which lines of code take more time. However, Trace Profiling is not suitable for the following scenarios:\n Thread Model: Trace Profiling is most useful for profiling code that executes in a single thread. It is less useful for middleware that relies heavily on async execution models. For example Goroutines in Go or Kotlin Coroutines. Language: Currently, Trace Profiling is only supported in Java and Python, since it’s not easy to obtain the thread stack in the runtimes of some languages such as Go and Node.js. Agent Binding: Trace Profiling requires Agent installation, which can be tricky depending on the language (e.g., PHP has to rely on its C kernel; Rust and C/C++ require manual instrumentation to make install). Trace Correlation: Since Trace Profiling is only associated with a single request it can be hard to determine which request is causing the problem. Short Lifecycle Services: Trace Profiling doesn\u0026rsquo;t support short-lived services for (at least) two reasons:  It\u0026rsquo;s hard to differentiate system performance from class code manipulation in the booting stage. Trace profiling is linked to an endpoint to identify performance impact, but there is no endpoint to match these short-lived services.    Fortunately, there are techniques that can go further than Trace Profiling in these situations.\nIntroduce eBPF We have found that eBPF — a technology that can run sandboxed programs in an operating system kernel and thus safely and efficiently extend the capabilities of the kernel without requiring kernel modifications or loading kernel modules — can help us fill gaps left by Trace Profiling. eBPF is a trending technology because it breaks the traditional barrier between user and kernel space. Programs can now inject bytecode that runs in the kernel, instead of having to recompile the kernel to customize it. This is naturally a good fit for observability.\nIn the figure below, we can see that when the system executes the execve syscalls, the eBPF program is triggered, and the current process runtime information is obtained by using function calls.\nUsing eBPF technology, we can expand the scope of Skywalking\u0026rsquo;s profiling capabilities:\n Global Performance Analysis: Before eBPF, data collection was limited to what agents can observe. Since eBPF programs run in the kernel, they can observe all threads. This is especially useful when you are not sure whether a performance problem is caused by a particular request. Data Content: eBPF can dump both user and kernel space thread stacks, so if a performance issue happens in kernel space, it’s easier to find. Agent Binding: All modern Linux kernels support eBPF, so there is no need to install anything. This means it is an orchestration-free vs an agent model. This reduces friction caused by built-in software which may not have the correct agents installed, such as Envoy in a Service Mesh. Sampling Type: Unlike Trace Profiling, eBPF is event-driven and, therefore, not constrained by interval polling. For example, eBPF can trigger events and collect more data depending on a transfer size threshold. This can allow the system to triage and prioritize data collection under extreme load.  eBPF Limitations While eBPF offers significant advantages for hunting performance bottlenecks, no technology is perfect. eBPF has a number of limitations described below. Fortunately, since SkyWalking does not require eBPF, the impact is limited.\n Linux Version Requirement: eBPF programs require a Linux kernel version above 4.4, with later kernel versions offering more data to be collected. The BCC has documented the features supported by different Linux kernel versions, with the differences between versions usually being what data can be collected with eBPF. Privileges Required: All processes that intend to load eBPF programs into the Linux kernel must be running in privileged mode. As such, bugs or other issues in such code may have a big impact. Weak Support for Dynamic Language: eBPF has weak support for JIT-based dynamic languages, such as Java. It also depends on what data you want to collect. For Profiling, eBPF does not support parsing the symbols of the program, which is why most eBPF-based profiling technologies only support static languages like C, C++, Go, and Rust. However, symbol mapping can sometimes be solved through tools provided by the language. For example, in Java, perf-map-agent can be used to generate the symbol mapping. However, dynamic languages don\u0026rsquo;t support the attach (uprobe) functionality that would allow us to trace execution events through symbols.  Introducing SkyWalking Rover SkyWalking Rover introduces the eBPF profiling feature into the SkyWalking ecosystem. The figure below shows the overall architecture of SkyWalking Rover. SkyWalking Rover is currently supported in Kubernetes environments and must be deployed inside a Kubernetes cluster. After establishing a connection with the SkyWalking backend server, it saves information about the processes on the current machine to SkyWalking. When the user creates an eBPF profiling task via the user interface, SkyWalking Rover receives the task and executes it in the relevant C, C++, Golang, and Rust language-based programs.\nOther than an eBPF-capable kernel, there are no additional prerequisites for deploying SkyWalking Rover.\nCPU Profiling with Rover CPU profiling is the most intuitive way to show service performance. Inspired by Brendan Gregg‘s blog post, we\u0026rsquo;ve divided CPU profiling into two types that we have implemented in Rover:\n On-CPU Profiling: Where threads are spending time running on-CPU. Off-CPU Profiling: Where time is spent waiting while blocked on I/O, locks, timers, paging/swapping, etc.  Profiling Envoy with eBPF Envoy is a popular proxy, used as the data plane by the Istio service mesh. In a Kubernetes cluster, Istio injects Envoy into each service’s pod as a sidecar where it transparently intercepts and processes incoming and outgoing traffic. As the data plane, any performance issues in Envoy can affect all service traffic in the mesh. In this scenario, it’s more powerful to use eBPF profiling to analyze issues in production caused by service mesh configuration.\nDemo Environment If you want to see this scenario in action, we\u0026rsquo;ve built a demo environment where we deploy an Nginx service for stress testing. Traffic is intercepted by Envoy and forwarded to Nginx. The commands to install the whole environment can be accessed through GitHub.\nOn-CPU Profiling On-CPU profiling is suitable for analyzing thread stacks when service CPU usage is high. If the stack is dumped more times, it means that the thread stack occupies more CPU resources.\nWhen installing Istio using the demo configuration profile, we found there are two places where we can optimize performance:\n Zipkin Tracing: Different Zipkin sampling percentages have a direct impact on QPS. Access Log Format: Reducing the fields of the Envoy access log can improve QPS.  Zipkin Tracing Zipkin with 100% sampling In the default demo configuration profile, Envoy is using 100% sampling as default tracing policy. How does that impact the performance?\nAs shown in the figure below, using the on-CPU profiling, we found that it takes about 16% of the CPU overhead. At a fixed consumption of 2 CPUs, its QPS can reach 5.7K.\nDisable Zipkin tracing At this point, we found that if Zipkin is not necessary, the sampling percentage can be reduced or we can even disable tracing. Based on the Istio documentation, we can disable tracing when installing the service mesh using the following command:\nistioctl install -y --set profile=demo \\  --set \u0026#39;meshConfig.enableTracing=false\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.tracing.sampling=0.0\u0026#39; After disabling tracing, we performed on-CPU profiling again. According to the figure below, we found that Zipkin has disappeared from the flame graph. With the same 2 CPU consumption as in the previous example, the QPS reached 9K, which is an almost 60% increase. Tracing with Throughput With the same CPU usage, we\u0026rsquo;ve discovered that Envoy performance greatly improves when the tracing feature is disabled. Of course, this requires us to make trade-offs between the number of samples Zipkin collects and the desired performance of Envoy (QPS).\nThe table below illustrates how different Zipkin sampling percentages under the same CPU usage affect QPS.\n   Zipkin sampling % QPS CPUs Note     100% (default) 5.7K 2 16% used by Zipkin   1% 8.1K 2 0.3% used by Zipkin   disabled 9.2K 2 0% used by Zipkin    Access Log Format Default Log Format In the default demo configuration profile, the default Access Log format contains a lot of data. The flame graph below shows various functions involved in parsing the data such as request headers, response headers, and streaming the body.\nSimplifying Access Log Format Typically, we don’t need all the information in the access log, so we can often simplify it to get what we need. The following command simplifies the access log format to only display basic information:\nistioctl install -y --set profile=demo \\  --set meshConfig.accessLogFormat=\u0026#34;[%START_TIME%] \\\u0026#34;%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\\\u0026#34; %RESPONSE_CODE%\\n\u0026#34; After simplifying the access log format, we found that the QPS increased from 5.7K to 5.9K. When executing the on-CPU profiling again, the CPU usage of log formatting dropped from 2.4% to 0.7%.\nSimplifying the log format helped us to improve the performance.\nOff-CPU Profiling Off-CPU profiling is suitable for performance issues that are not caused by high CPU usage. For example, when there are too many threads in one service, using off-CPU profiling could reveal which threads spend more time context switching.\nWe provide data aggregation in two dimensions:\n Switch count: The number of times a thread switches context. When the thread returns to the CPU, it completes one context switch. A thread stack with a higher switch count spends more time context switching. Switch duration: The time it takes a thread to switch the context. A thread stack with a higher switch duration spends more time off-CPU.  Write Access Log Enable Write Using the same environment and settings as before in the on-CPU test, we performed off-CPU profiling. As shown below, we found that access log writes accounted for about 28% of the total context switches. The \u0026ldquo;__write\u0026rdquo; shown below also indicates that this method is the Linux kernel method.\nDisable Write SkyWalking implements Envoy\u0026rsquo;s Access Log Service (ALS) feature which allows us to send access logs to the SkyWalking Observability Analysis Platform (OAP) using the gRPC protocol. Even by disabling the access logging, we can still use ALS to capture/aggregate the logs. We\u0026rsquo;ve disabled writing to the access log using the following command:\nistioctl install -y --set profile=demo --set meshConfig.accessLogFile=\u0026#34;\u0026#34; After disabling the Access Log feature, we performed the off-CPU profiling. File writing entries have disappeared as shown in the figure below. Envoy throughput also increased from 5.7K to 5.9K.\nConclusion In this article, we\u0026rsquo;ve examined the insights Apache Skywalking\u0026rsquo;s Trace Profiling can give us and how much more can be achieved with eBPF profiling. All of these features are implemented in skywalking-rover. In addition to on- and off-CPU profiling, you will also find the following features:\n Continuous profiling, helps you automatically profile without manual intervention. For example, when Rover detects that the CPU exceeds a configurable threshold, it automatically executes the on-CPU profiling task. More profiling types to enrich usage scenarios, such as network, and memory profiling.  ","title":"Pinpoint Service Mesh Critical Performance Impact by using eBPF","url":"/docs/main/next/en/concepts-and-designs/ebpf-cpu-profiling/"},{"content":"Pinpoint Service Mesh Critical Performance Impact by using eBPF Background Apache SkyWalking observes metrics, logs, traces, and events for services deployed into the service mesh. When troubleshooting, SkyWalking error analysis can be an invaluable tool helping to pinpoint where an error occurred. However, performance problems are more difficult: It’s often impossible to locate the root cause of performance problems with pre-existing observation data. To move beyond the status quo, dynamic debugging and troubleshooting are essential service performance tools. In this article, we\u0026rsquo;ll discuss how to use eBPF technology to improve the profiling feature in SkyWalking and analyze the performance impact in the service mesh.\nTrace Profiling in SkyWalking Since SkyWalking 7.0.0, Trace Profiling has helped developers find performance problems by periodically sampling the thread stack to let developers know which lines of code take more time. However, Trace Profiling is not suitable for the following scenarios:\n Thread Model: Trace Profiling is most useful for profiling code that executes in a single thread. It is less useful for middleware that relies heavily on async execution models. For example Goroutines in Go or Kotlin Coroutines. Language: Currently, Trace Profiling is only supported in Java and Python, since it’s not easy to obtain the thread stack in the runtimes of some languages such as Go and Node.js. Agent Binding: Trace Profiling requires Agent installation, which can be tricky depending on the language (e.g., PHP has to rely on its C kernel; Rust and C/C++ require manual instrumentation to make install). Trace Correlation: Since Trace Profiling is only associated with a single request it can be hard to determine which request is causing the problem. Short Lifecycle Services: Trace Profiling doesn\u0026rsquo;t support short-lived services for (at least) two reasons:  It\u0026rsquo;s hard to differentiate system performance from class code manipulation in the booting stage. Trace profiling is linked to an endpoint to identify performance impact, but there is no endpoint to match these short-lived services.    Fortunately, there are techniques that can go further than Trace Profiling in these situations.\nIntroduce eBPF We have found that eBPF — a technology that can run sandboxed programs in an operating system kernel and thus safely and efficiently extend the capabilities of the kernel without requiring kernel modifications or loading kernel modules — can help us fill gaps left by Trace Profiling. eBPF is a trending technology because it breaks the traditional barrier between user and kernel space. Programs can now inject bytecode that runs in the kernel, instead of having to recompile the kernel to customize it. This is naturally a good fit for observability.\nIn the figure below, we can see that when the system executes the execve syscalls, the eBPF program is triggered, and the current process runtime information is obtained by using function calls.\nUsing eBPF technology, we can expand the scope of Skywalking\u0026rsquo;s profiling capabilities:\n Global Performance Analysis: Before eBPF, data collection was limited to what agents can observe. Since eBPF programs run in the kernel, they can observe all threads. This is especially useful when you are not sure whether a performance problem is caused by a particular request. Data Content: eBPF can dump both user and kernel space thread stacks, so if a performance issue happens in kernel space, it’s easier to find. Agent Binding: All modern Linux kernels support eBPF, so there is no need to install anything. This means it is an orchestration-free vs an agent model. This reduces friction caused by built-in software which may not have the correct agents installed, such as Envoy in a Service Mesh. Sampling Type: Unlike Trace Profiling, eBPF is event-driven and, therefore, not constrained by interval polling. For example, eBPF can trigger events and collect more data depending on a transfer size threshold. This can allow the system to triage and prioritize data collection under extreme load.  eBPF Limitations While eBPF offers significant advantages for hunting performance bottlenecks, no technology is perfect. eBPF has a number of limitations described below. Fortunately, since SkyWalking does not require eBPF, the impact is limited.\n Linux Version Requirement: eBPF programs require a Linux kernel version above 4.4, with later kernel versions offering more data to be collected. The BCC has documented the features supported by different Linux kernel versions, with the differences between versions usually being what data can be collected with eBPF. Privileges Required: All processes that intend to load eBPF programs into the Linux kernel must be running in privileged mode. As such, bugs or other issues in such code may have a big impact. Weak Support for Dynamic Language: eBPF has weak support for JIT-based dynamic languages, such as Java. It also depends on what data you want to collect. For Profiling, eBPF does not support parsing the symbols of the program, which is why most eBPF-based profiling technologies only support static languages like C, C++, Go, and Rust. However, symbol mapping can sometimes be solved through tools provided by the language. For example, in Java, perf-map-agent can be used to generate the symbol mapping. However, dynamic languages don\u0026rsquo;t support the attach (uprobe) functionality that would allow us to trace execution events through symbols.  Introducing SkyWalking Rover SkyWalking Rover introduces the eBPF profiling feature into the SkyWalking ecosystem. The figure below shows the overall architecture of SkyWalking Rover. SkyWalking Rover is currently supported in Kubernetes environments and must be deployed inside a Kubernetes cluster. After establishing a connection with the SkyWalking backend server, it saves information about the processes on the current machine to SkyWalking. When the user creates an eBPF profiling task via the user interface, SkyWalking Rover receives the task and executes it in the relevant C, C++, Golang, and Rust language-based programs.\nOther than an eBPF-capable kernel, there are no additional prerequisites for deploying SkyWalking Rover.\nCPU Profiling with Rover CPU profiling is the most intuitive way to show service performance. Inspired by Brendan Gregg‘s blog post, we\u0026rsquo;ve divided CPU profiling into two types that we have implemented in Rover:\n On-CPU Profiling: Where threads are spending time running on-CPU. Off-CPU Profiling: Where time is spent waiting while blocked on I/O, locks, timers, paging/swapping, etc.  Profiling Envoy with eBPF Envoy is a popular proxy, used as the data plane by the Istio service mesh. In a Kubernetes cluster, Istio injects Envoy into each service’s pod as a sidecar where it transparently intercepts and processes incoming and outgoing traffic. As the data plane, any performance issues in Envoy can affect all service traffic in the mesh. In this scenario, it’s more powerful to use eBPF profiling to analyze issues in production caused by service mesh configuration.\nDemo Environment If you want to see this scenario in action, we\u0026rsquo;ve built a demo environment where we deploy an Nginx service for stress testing. Traffic is intercepted by Envoy and forwarded to Nginx. The commands to install the whole environment can be accessed through GitHub.\nOn-CPU Profiling On-CPU profiling is suitable for analyzing thread stacks when service CPU usage is high. If the stack is dumped more times, it means that the thread stack occupies more CPU resources.\nWhen installing Istio using the demo configuration profile, we found there are two places where we can optimize performance:\n Zipkin Tracing: Different Zipkin sampling percentages have a direct impact on QPS. Access Log Format: Reducing the fields of the Envoy access log can improve QPS.  Zipkin Tracing Zipkin with 100% sampling In the default demo configuration profile, Envoy is using 100% sampling as default tracing policy. How does that impact the performance?\nAs shown in the figure below, using the on-CPU profiling, we found that it takes about 16% of the CPU overhead. At a fixed consumption of 2 CPUs, its QPS can reach 5.7K.\nDisable Zipkin tracing At this point, we found that if Zipkin is not necessary, the sampling percentage can be reduced or we can even disable tracing. Based on the Istio documentation, we can disable tracing when installing the service mesh using the following command:\nistioctl install -y --set profile=demo \\  --set \u0026#39;meshConfig.enableTracing=false\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.tracing.sampling=0.0\u0026#39; After disabling tracing, we performed on-CPU profiling again. According to the figure below, we found that Zipkin has disappeared from the flame graph. With the same 2 CPU consumption as in the previous example, the QPS reached 9K, which is an almost 60% increase. Tracing with Throughput With the same CPU usage, we\u0026rsquo;ve discovered that Envoy performance greatly improves when the tracing feature is disabled. Of course, this requires us to make trade-offs between the number of samples Zipkin collects and the desired performance of Envoy (QPS).\nThe table below illustrates how different Zipkin sampling percentages under the same CPU usage affect QPS.\n   Zipkin sampling % QPS CPUs Note     100% (default) 5.7K 2 16% used by Zipkin   1% 8.1K 2 0.3% used by Zipkin   disabled 9.2K 2 0% used by Zipkin    Access Log Format Default Log Format In the default demo configuration profile, the default Access Log format contains a lot of data. The flame graph below shows various functions involved in parsing the data such as request headers, response headers, and streaming the body.\nSimplifying Access Log Format Typically, we don’t need all the information in the access log, so we can often simplify it to get what we need. The following command simplifies the access log format to only display basic information:\nistioctl install -y --set profile=demo \\  --set meshConfig.accessLogFormat=\u0026#34;[%START_TIME%] \\\u0026#34;%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\\\u0026#34; %RESPONSE_CODE%\\n\u0026#34; After simplifying the access log format, we found that the QPS increased from 5.7K to 5.9K. When executing the on-CPU profiling again, the CPU usage of log formatting dropped from 2.4% to 0.7%.\nSimplifying the log format helped us to improve the performance.\nOff-CPU Profiling Off-CPU profiling is suitable for performance issues that are not caused by high CPU usage. For example, when there are too many threads in one service, using off-CPU profiling could reveal which threads spend more time context switching.\nWe provide data aggregation in two dimensions:\n Switch count: The number of times a thread switches context. When the thread returns to the CPU, it completes one context switch. A thread stack with a higher switch count spends more time context switching. Switch duration: The time it takes a thread to switch the context. A thread stack with a higher switch duration spends more time off-CPU.  Write Access Log Enable Write Using the same environment and settings as before in the on-CPU test, we performed off-CPU profiling. As shown below, we found that access log writes accounted for about 28% of the total context switches. The \u0026ldquo;__write\u0026rdquo; shown below also indicates that this method is the Linux kernel method.\nDisable Write SkyWalking implements Envoy\u0026rsquo;s Access Log Service (ALS) feature which allows us to send access logs to the SkyWalking Observability Analysis Platform (OAP) using the gRPC protocol. Even by disabling the access logging, we can still use ALS to capture/aggregate the logs. We\u0026rsquo;ve disabled writing to the access log using the following command:\nistioctl install -y --set profile=demo --set meshConfig.accessLogFile=\u0026#34;\u0026#34; After disabling the Access Log feature, we performed the off-CPU profiling. File writing entries have disappeared as shown in the figure below. Envoy throughput also increased from 5.7K to 5.9K.\nConclusion In this article, we\u0026rsquo;ve examined the insights Apache Skywalking\u0026rsquo;s Trace Profiling can give us and how much more can be achieved with eBPF profiling. All of these features are implemented in skywalking-rover. In addition to on- and off-CPU profiling, you will also find the following features:\n Continuous profiling, helps you automatically profile without manual intervention. For example, when Rover detects that the CPU exceeds a configurable threshold, it automatically executes the on-CPU profiling task. More profiling types to enrich usage scenarios, such as network, and memory profiling.  ","title":"Pinpoint Service Mesh Critical Performance Impact by using eBPF","url":"/docs/main/v9.2.0/en/concepts-and-designs/ebpf-cpu-profiling/"},{"content":"Pinpoint Service Mesh Critical Performance Impact by using eBPF Background Apache SkyWalking observes metrics, logs, traces, and events for services deployed into the service mesh. When troubleshooting, SkyWalking error analysis can be an invaluable tool helping to pinpoint where an error occurred. However, performance problems are more difficult: It’s often impossible to locate the root cause of performance problems with pre-existing observation data. To move beyond the status quo, dynamic debugging and troubleshooting are essential service performance tools. In this article, we\u0026rsquo;ll discuss how to use eBPF technology to improve the profiling feature in SkyWalking and analyze the performance impact in the service mesh.\nTrace Profiling in SkyWalking Since SkyWalking 7.0.0, Trace Profiling has helped developers find performance problems by periodically sampling the thread stack to let developers know which lines of code take more time. However, Trace Profiling is not suitable for the following scenarios:\n Thread Model: Trace Profiling is most useful for profiling code that executes in a single thread. It is less useful for middleware that relies heavily on async execution models. For example Goroutines in Go or Kotlin Coroutines. Language: Currently, Trace Profiling is only supported in Java and Python, since it’s not easy to obtain the thread stack in the runtimes of some languages such as Go and Node.js. Agent Binding: Trace Profiling requires Agent installation, which can be tricky depending on the language (e.g., PHP has to rely on its C kernel; Rust and C/C++ require manual instrumentation to make install). Trace Correlation: Since Trace Profiling is only associated with a single request it can be hard to determine which request is causing the problem. Short Lifecycle Services: Trace Profiling doesn\u0026rsquo;t support short-lived services for (at least) two reasons:  It\u0026rsquo;s hard to differentiate system performance from class code manipulation in the booting stage. Trace profiling is linked to an endpoint to identify performance impact, but there is no endpoint to match these short-lived services.    Fortunately, there are techniques that can go further than Trace Profiling in these situations.\nIntroduce eBPF We have found that eBPF — a technology that can run sandboxed programs in an operating system kernel and thus safely and efficiently extend the capabilities of the kernel without requiring kernel modifications or loading kernel modules — can help us fill gaps left by Trace Profiling. eBPF is a trending technology because it breaks the traditional barrier between user and kernel space. Programs can now inject bytecode that runs in the kernel, instead of having to recompile the kernel to customize it. This is naturally a good fit for observability.\nIn the figure below, we can see that when the system executes the execve syscalls, the eBPF program is triggered, and the current process runtime information is obtained by using function calls.\nUsing eBPF technology, we can expand the scope of Skywalking\u0026rsquo;s profiling capabilities:\n Global Performance Analysis: Before eBPF, data collection was limited to what agents can observe. Since eBPF programs run in the kernel, they can observe all threads. This is especially useful when you are not sure whether a performance problem is caused by a particular request. Data Content: eBPF can dump both user and kernel space thread stacks, so if a performance issue happens in kernel space, it’s easier to find. Agent Binding: All modern Linux kernels support eBPF, so there is no need to install anything. This means it is an orchestration-free vs an agent model. This reduces friction caused by built-in software which may not have the correct agents installed, such as Envoy in a Service Mesh. Sampling Type: Unlike Trace Profiling, eBPF is event-driven and, therefore, not constrained by interval polling. For example, eBPF can trigger events and collect more data depending on a transfer size threshold. This can allow the system to triage and prioritize data collection under extreme load.  eBPF Limitations While eBPF offers significant advantages for hunting performance bottlenecks, no technology is perfect. eBPF has a number of limitations described below. Fortunately, since SkyWalking does not require eBPF, the impact is limited.\n Linux Version Requirement: eBPF programs require a Linux kernel version above 4.4, with later kernel versions offering more data to be collected. The BCC has documented the features supported by different Linux kernel versions, with the differences between versions usually being what data can be collected with eBPF. Privileges Required: All processes that intend to load eBPF programs into the Linux kernel must be running in privileged mode. As such, bugs or other issues in such code may have a big impact. Weak Support for Dynamic Language: eBPF has weak support for JIT-based dynamic languages, such as Java. It also depends on what data you want to collect. For Profiling, eBPF does not support parsing the symbols of the program, which is why most eBPF-based profiling technologies only support static languages like C, C++, Go, and Rust. However, symbol mapping can sometimes be solved through tools provided by the language. For example, in Java, perf-map-agent can be used to generate the symbol mapping. However, dynamic languages don\u0026rsquo;t support the attach (uprobe) functionality that would allow us to trace execution events through symbols.  Introducing SkyWalking Rover SkyWalking Rover introduces the eBPF profiling feature into the SkyWalking ecosystem. The figure below shows the overall architecture of SkyWalking Rover. SkyWalking Rover is currently supported in Kubernetes environments and must be deployed inside a Kubernetes cluster. After establishing a connection with the SkyWalking backend server, it saves information about the processes on the current machine to SkyWalking. When the user creates an eBPF profiling task via the user interface, SkyWalking Rover receives the task and executes it in the relevant C, C++, Golang, and Rust language-based programs.\nOther than an eBPF-capable kernel, there are no additional prerequisites for deploying SkyWalking Rover.\nCPU Profiling with Rover CPU profiling is the most intuitive way to show service performance. Inspired by Brendan Gregg‘s blog post, we\u0026rsquo;ve divided CPU profiling into two types that we have implemented in Rover:\n On-CPU Profiling: Where threads are spending time running on-CPU. Off-CPU Profiling: Where time is spent waiting while blocked on I/O, locks, timers, paging/swapping, etc.  Profiling Envoy with eBPF Envoy is a popular proxy, used as the data plane by the Istio service mesh. In a Kubernetes cluster, Istio injects Envoy into each service’s pod as a sidecar where it transparently intercepts and processes incoming and outgoing traffic. As the data plane, any performance issues in Envoy can affect all service traffic in the mesh. In this scenario, it’s more powerful to use eBPF profiling to analyze issues in production caused by service mesh configuration.\nDemo Environment If you want to see this scenario in action, we\u0026rsquo;ve built a demo environment where we deploy an Nginx service for stress testing. Traffic is intercepted by Envoy and forwarded to Nginx. The commands to install the whole environment can be accessed through GitHub.\nOn-CPU Profiling On-CPU profiling is suitable for analyzing thread stacks when service CPU usage is high. If the stack is dumped more times, it means that the thread stack occupies more CPU resources.\nWhen installing Istio using the demo configuration profile, we found there are two places where we can optimize performance:\n Zipkin Tracing: Different Zipkin sampling percentages have a direct impact on QPS. Access Log Format: Reducing the fields of the Envoy access log can improve QPS.  Zipkin Tracing Zipkin with 100% sampling In the default demo configuration profile, Envoy is using 100% sampling as default tracing policy. How does that impact the performance?\nAs shown in the figure below, using the on-CPU profiling, we found that it takes about 16% of the CPU overhead. At a fixed consumption of 2 CPUs, its QPS can reach 5.7K.\nDisable Zipkin tracing At this point, we found that if Zipkin is not necessary, the sampling percentage can be reduced or we can even disable tracing. Based on the Istio documentation, we can disable tracing when installing the service mesh using the following command:\nistioctl install -y --set profile=demo \\  --set \u0026#39;meshConfig.enableTracing=false\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.tracing.sampling=0.0\u0026#39; After disabling tracing, we performed on-CPU profiling again. According to the figure below, we found that Zipkin has disappeared from the flame graph. With the same 2 CPU consumption as in the previous example, the QPS reached 9K, which is an almost 60% increase. Tracing with Throughput With the same CPU usage, we\u0026rsquo;ve discovered that Envoy performance greatly improves when the tracing feature is disabled. Of course, this requires us to make trade-offs between the number of samples Zipkin collects and the desired performance of Envoy (QPS).\nThe table below illustrates how different Zipkin sampling percentages under the same CPU usage affect QPS.\n   Zipkin sampling % QPS CPUs Note     100% (default) 5.7K 2 16% used by Zipkin   1% 8.1K 2 0.3% used by Zipkin   disabled 9.2K 2 0% used by Zipkin    Access Log Format Default Log Format In the default demo configuration profile, the default Access Log format contains a lot of data. The flame graph below shows various functions involved in parsing the data such as request headers, response headers, and streaming the body.\nSimplifying Access Log Format Typically, we don’t need all the information in the access log, so we can often simplify it to get what we need. The following command simplifies the access log format to only display basic information:\nistioctl install -y --set profile=demo \\  --set meshConfig.accessLogFormat=\u0026#34;[%START_TIME%] \\\u0026#34;%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\\\u0026#34; %RESPONSE_CODE%\\n\u0026#34; After simplifying the access log format, we found that the QPS increased from 5.7K to 5.9K. When executing the on-CPU profiling again, the CPU usage of log formatting dropped from 2.4% to 0.7%.\nSimplifying the log format helped us to improve the performance.\nOff-CPU Profiling Off-CPU profiling is suitable for performance issues that are not caused by high CPU usage. For example, when there are too many threads in one service, using off-CPU profiling could reveal which threads spend more time context switching.\nWe provide data aggregation in two dimensions:\n Switch count: The number of times a thread switches context. When the thread returns to the CPU, it completes one context switch. A thread stack with a higher switch count spends more time context switching. Switch duration: The time it takes a thread to switch the context. A thread stack with a higher switch duration spends more time off-CPU.  Write Access Log Enable Write Using the same environment and settings as before in the on-CPU test, we performed off-CPU profiling. As shown below, we found that access log writes accounted for about 28% of the total context switches. The \u0026ldquo;__write\u0026rdquo; shown below also indicates that this method is the Linux kernel method.\nDisable Write SkyWalking implements Envoy\u0026rsquo;s Access Log Service (ALS) feature which allows us to send access logs to the SkyWalking Observability Analysis Platform (OAP) using the gRPC protocol. Even by disabling the access logging, we can still use ALS to capture/aggregate the logs. We\u0026rsquo;ve disabled writing to the access log using the following command:\nistioctl install -y --set profile=demo --set meshConfig.accessLogFile=\u0026#34;\u0026#34; After disabling the Access Log feature, we performed the off-CPU profiling. File writing entries have disappeared as shown in the figure below. Envoy throughput also increased from 5.7K to 5.9K.\nConclusion In this article, we\u0026rsquo;ve examined the insights Apache Skywalking\u0026rsquo;s Trace Profiling can give us and how much more can be achieved with eBPF profiling. All of these features are implemented in skywalking-rover. In addition to on- and off-CPU profiling, you will also find the following features:\n Continuous profiling, helps you automatically profile without manual intervention. For example, when Rover detects that the CPU exceeds a configurable threshold, it automatically executes the on-CPU profiling task. More profiling types to enrich usage scenarios, such as network, and memory profiling.  ","title":"Pinpoint Service Mesh Critical Performance Impact by using eBPF","url":"/docs/main/v9.3.0/en/concepts-and-designs/ebpf-cpu-profiling/"},{"content":"Pinpoint Service Mesh Critical Performance Impact by using eBPF Background Apache SkyWalking observes metrics, logs, traces, and events for services deployed into the service mesh. When troubleshooting, SkyWalking error analysis can be an invaluable tool helping to pinpoint where an error occurred. However, performance problems are more difficult: It’s often impossible to locate the root cause of performance problems with pre-existing observation data. To move beyond the status quo, dynamic debugging and troubleshooting are essential service performance tools. In this article, we\u0026rsquo;ll discuss how to use eBPF technology to improve the profiling feature in SkyWalking and analyze the performance impact in the service mesh.\nTrace Profiling in SkyWalking Since SkyWalking 7.0.0, Trace Profiling has helped developers find performance problems by periodically sampling the thread stack to let developers know which lines of code take more time. However, Trace Profiling is not suitable for the following scenarios:\n Thread Model: Trace Profiling is most useful for profiling code that executes in a single thread. It is less useful for middleware that relies heavily on async execution models. For example Goroutines in Go or Kotlin Coroutines. Language: Currently, Trace Profiling is only supported in Java and Python, since it’s not easy to obtain the thread stack in the runtimes of some languages such as Go and Node.js. Agent Binding: Trace Profiling requires Agent installation, which can be tricky depending on the language (e.g., PHP has to rely on its C kernel; Rust and C/C++ require manual instrumentation to make install). Trace Correlation: Since Trace Profiling is only associated with a single request it can be hard to determine which request is causing the problem. Short Lifecycle Services: Trace Profiling doesn\u0026rsquo;t support short-lived services for (at least) two reasons:  It\u0026rsquo;s hard to differentiate system performance from class code manipulation in the booting stage. Trace profiling is linked to an endpoint to identify performance impact, but there is no endpoint to match these short-lived services.    Fortunately, there are techniques that can go further than Trace Profiling in these situations.\nIntroduce eBPF We have found that eBPF — a technology that can run sandboxed programs in an operating system kernel and thus safely and efficiently extend the capabilities of the kernel without requiring kernel modifications or loading kernel modules — can help us fill gaps left by Trace Profiling. eBPF is a trending technology because it breaks the traditional barrier between user and kernel space. Programs can now inject bytecode that runs in the kernel, instead of having to recompile the kernel to customize it. This is naturally a good fit for observability.\nIn the figure below, we can see that when the system executes the execve syscalls, the eBPF program is triggered, and the current process runtime information is obtained by using function calls.\nUsing eBPF technology, we can expand the scope of Skywalking\u0026rsquo;s profiling capabilities:\n Global Performance Analysis: Before eBPF, data collection was limited to what agents can observe. Since eBPF programs run in the kernel, they can observe all threads. This is especially useful when you are not sure whether a performance problem is caused by a particular request. Data Content: eBPF can dump both user and kernel space thread stacks, so if a performance issue happens in kernel space, it’s easier to find. Agent Binding: All modern Linux kernels support eBPF, so there is no need to install anything. This means it is an orchestration-free vs an agent model. This reduces friction caused by built-in software which may not have the correct agents installed, such as Envoy in a Service Mesh. Sampling Type: Unlike Trace Profiling, eBPF is event-driven and, therefore, not constrained by interval polling. For example, eBPF can trigger events and collect more data depending on a transfer size threshold. This can allow the system to triage and prioritize data collection under extreme load.  eBPF Limitations While eBPF offers significant advantages for hunting performance bottlenecks, no technology is perfect. eBPF has a number of limitations described below. Fortunately, since SkyWalking does not require eBPF, the impact is limited.\n Linux Version Requirement: eBPF programs require a Linux kernel version above 4.4, with later kernel versions offering more data to be collected. The BCC has documented the features supported by different Linux kernel versions, with the differences between versions usually being what data can be collected with eBPF. Privileges Required: All processes that intend to load eBPF programs into the Linux kernel must be running in privileged mode. As such, bugs or other issues in such code may have a big impact. Weak Support for Dynamic Language: eBPF has weak support for JIT-based dynamic languages, such as Java. It also depends on what data you want to collect. For Profiling, eBPF does not support parsing the symbols of the program, which is why most eBPF-based profiling technologies only support static languages like C, C++, Go, and Rust. However, symbol mapping can sometimes be solved through tools provided by the language. For example, in Java, perf-map-agent can be used to generate the symbol mapping. However, dynamic languages don\u0026rsquo;t support the attach (uprobe) functionality that would allow us to trace execution events through symbols.  Introducing SkyWalking Rover SkyWalking Rover introduces the eBPF profiling feature into the SkyWalking ecosystem. The figure below shows the overall architecture of SkyWalking Rover. SkyWalking Rover is currently supported in Kubernetes environments and must be deployed inside a Kubernetes cluster. After establishing a connection with the SkyWalking backend server, it saves information about the processes on the current machine to SkyWalking. When the user creates an eBPF profiling task via the user interface, SkyWalking Rover receives the task and executes it in the relevant C, C++, Golang, and Rust language-based programs.\nOther than an eBPF-capable kernel, there are no additional prerequisites for deploying SkyWalking Rover.\nCPU Profiling with Rover CPU profiling is the most intuitive way to show service performance. Inspired by Brendan Gregg‘s blog post, we\u0026rsquo;ve divided CPU profiling into two types that we have implemented in Rover:\n On-CPU Profiling: Where threads are spending time running on-CPU. Off-CPU Profiling: Where time is spent waiting while blocked on I/O, locks, timers, paging/swapping, etc.  Profiling Envoy with eBPF Envoy is a popular proxy, used as the data plane by the Istio service mesh. In a Kubernetes cluster, Istio injects Envoy into each service’s pod as a sidecar where it transparently intercepts and processes incoming and outgoing traffic. As the data plane, any performance issues in Envoy can affect all service traffic in the mesh. In this scenario, it’s more powerful to use eBPF profiling to analyze issues in production caused by service mesh configuration.\nDemo Environment If you want to see this scenario in action, we\u0026rsquo;ve built a demo environment where we deploy an Nginx service for stress testing. Traffic is intercepted by Envoy and forwarded to Nginx. The commands to install the whole environment can be accessed through GitHub.\nOn-CPU Profiling On-CPU profiling is suitable for analyzing thread stacks when service CPU usage is high. If the stack is dumped more times, it means that the thread stack occupies more CPU resources.\nWhen installing Istio using the demo configuration profile, we found there are two places where we can optimize performance:\n Zipkin Tracing: Different Zipkin sampling percentages have a direct impact on QPS. Access Log Format: Reducing the fields of the Envoy access log can improve QPS.  Zipkin Tracing Zipkin with 100% sampling In the default demo configuration profile, Envoy is using 100% sampling as default tracing policy. How does that impact the performance?\nAs shown in the figure below, using the on-CPU profiling, we found that it takes about 16% of the CPU overhead. At a fixed consumption of 2 CPUs, its QPS can reach 5.7K.\nDisable Zipkin tracing At this point, we found that if Zipkin is not necessary, the sampling percentage can be reduced or we can even disable tracing. Based on the Istio documentation, we can disable tracing when installing the service mesh using the following command:\nistioctl install -y --set profile=demo \\  --set \u0026#39;meshConfig.enableTracing=false\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.tracing.sampling=0.0\u0026#39; After disabling tracing, we performed on-CPU profiling again. According to the figure below, we found that Zipkin has disappeared from the flame graph. With the same 2 CPU consumption as in the previous example, the QPS reached 9K, which is an almost 60% increase. Tracing with Throughput With the same CPU usage, we\u0026rsquo;ve discovered that Envoy performance greatly improves when the tracing feature is disabled. Of course, this requires us to make trade-offs between the number of samples Zipkin collects and the desired performance of Envoy (QPS).\nThe table below illustrates how different Zipkin sampling percentages under the same CPU usage affect QPS.\n   Zipkin sampling % QPS CPUs Note     100% (default) 5.7K 2 16% used by Zipkin   1% 8.1K 2 0.3% used by Zipkin   disabled 9.2K 2 0% used by Zipkin    Access Log Format Default Log Format In the default demo configuration profile, the default Access Log format contains a lot of data. The flame graph below shows various functions involved in parsing the data such as request headers, response headers, and streaming the body.\nSimplifying Access Log Format Typically, we don’t need all the information in the access log, so we can often simplify it to get what we need. The following command simplifies the access log format to only display basic information:\nistioctl install -y --set profile=demo \\  --set meshConfig.accessLogFormat=\u0026#34;[%START_TIME%] \\\u0026#34;%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\\\u0026#34; %RESPONSE_CODE%\\n\u0026#34; After simplifying the access log format, we found that the QPS increased from 5.7K to 5.9K. When executing the on-CPU profiling again, the CPU usage of log formatting dropped from 2.4% to 0.7%.\nSimplifying the log format helped us to improve the performance.\nOff-CPU Profiling Off-CPU profiling is suitable for performance issues that are not caused by high CPU usage. For example, when there are too many threads in one service, using off-CPU profiling could reveal which threads spend more time context switching.\nWe provide data aggregation in two dimensions:\n Switch count: The number of times a thread switches context. When the thread returns to the CPU, it completes one context switch. A thread stack with a higher switch count spends more time context switching. Switch duration: The time it takes a thread to switch the context. A thread stack with a higher switch duration spends more time off-CPU.  Write Access Log Enable Write Using the same environment and settings as before in the on-CPU test, we performed off-CPU profiling. As shown below, we found that access log writes accounted for about 28% of the total context switches. The \u0026ldquo;__write\u0026rdquo; shown below also indicates that this method is the Linux kernel method.\nDisable Write SkyWalking implements Envoy\u0026rsquo;s Access Log Service (ALS) feature which allows us to send access logs to the SkyWalking Observability Analysis Platform (OAP) using the gRPC protocol. Even by disabling the access logging, we can still use ALS to capture/aggregate the logs. We\u0026rsquo;ve disabled writing to the access log using the following command:\nistioctl install -y --set profile=demo --set meshConfig.accessLogFile=\u0026#34;\u0026#34; After disabling the Access Log feature, we performed the off-CPU profiling. File writing entries have disappeared as shown in the figure below. Envoy throughput also increased from 5.7K to 5.9K.\nConclusion In this article, we\u0026rsquo;ve examined the insights Apache Skywalking\u0026rsquo;s Trace Profiling can give us and how much more can be achieved with eBPF profiling. All of these features are implemented in skywalking-rover. In addition to on- and off-CPU profiling, you will also find the following features:\n Continuous profiling, helps you automatically profile without manual intervention. For example, when Rover detects that the CPU exceeds a configurable threshold, it automatically executes the on-CPU profiling task. More profiling types to enrich usage scenarios, such as network, and memory profiling.  ","title":"Pinpoint Service Mesh Critical Performance Impact by using eBPF","url":"/docs/main/v9.4.0/en/concepts-and-designs/ebpf-cpu-profiling/"},{"content":"Pinpoint Service Mesh Critical Performance Impact by using eBPF Background Apache SkyWalking observes metrics, logs, traces, and events for services deployed into the service mesh. When troubleshooting, SkyWalking error analysis can be an invaluable tool helping to pinpoint where an error occurred. However, performance problems are more difficult: It’s often impossible to locate the root cause of performance problems with pre-existing observation data. To move beyond the status quo, dynamic debugging and troubleshooting are essential service performance tools. In this article, we\u0026rsquo;ll discuss how to use eBPF technology to improve the profiling feature in SkyWalking and analyze the performance impact in the service mesh.\nTrace Profiling in SkyWalking Since SkyWalking 7.0.0, Trace Profiling has helped developers find performance problems by periodically sampling the thread stack to let developers know which lines of code take more time. However, Trace Profiling is not suitable for the following scenarios:\n Thread Model: Trace Profiling is most useful for profiling code that executes in a single thread. It is less useful for middleware that relies heavily on async execution models. For example Goroutines in Go or Kotlin Coroutines. Language: Currently, Trace Profiling is only supported in Java and Python, since it’s not easy to obtain the thread stack in the runtimes of some languages such as Go and Node.js. Agent Binding: Trace Profiling requires Agent installation, which can be tricky depending on the language (e.g., PHP has to rely on its C kernel; Rust and C/C++ require manual instrumentation to make install). Trace Correlation: Since Trace Profiling is only associated with a single request it can be hard to determine which request is causing the problem. Short Lifecycle Services: Trace Profiling doesn\u0026rsquo;t support short-lived services for (at least) two reasons:  It\u0026rsquo;s hard to differentiate system performance from class code manipulation in the booting stage. Trace profiling is linked to an endpoint to identify performance impact, but there is no endpoint to match these short-lived services.    Fortunately, there are techniques that can go further than Trace Profiling in these situations.\nIntroduce eBPF We have found that eBPF — a technology that can run sandboxed programs in an operating system kernel and thus safely and efficiently extend the capabilities of the kernel without requiring kernel modifications or loading kernel modules — can help us fill gaps left by Trace Profiling. eBPF is a trending technology because it breaks the traditional barrier between user and kernel space. Programs can now inject bytecode that runs in the kernel, instead of having to recompile the kernel to customize it. This is naturally a good fit for observability.\nIn the figure below, we can see that when the system executes the execve syscalls, the eBPF program is triggered, and the current process runtime information is obtained by using function calls.\nUsing eBPF technology, we can expand the scope of Skywalking\u0026rsquo;s profiling capabilities:\n Global Performance Analysis: Before eBPF, data collection was limited to what agents can observe. Since eBPF programs run in the kernel, they can observe all threads. This is especially useful when you are not sure whether a performance problem is caused by a particular request. Data Content: eBPF can dump both user and kernel space thread stacks, so if a performance issue happens in kernel space, it’s easier to find. Agent Binding: All modern Linux kernels support eBPF, so there is no need to install anything. This means it is an orchestration-free vs an agent model. This reduces friction caused by built-in software which may not have the correct agents installed, such as Envoy in a Service Mesh. Sampling Type: Unlike Trace Profiling, eBPF is event-driven and, therefore, not constrained by interval polling. For example, eBPF can trigger events and collect more data depending on a transfer size threshold. This can allow the system to triage and prioritize data collection under extreme load.  eBPF Limitations While eBPF offers significant advantages for hunting performance bottlenecks, no technology is perfect. eBPF has a number of limitations described below. Fortunately, since SkyWalking does not require eBPF, the impact is limited.\n Linux Version Requirement: eBPF programs require a Linux kernel version above 4.4, with later kernel versions offering more data to be collected. The BCC has documented the features supported by different Linux kernel versions, with the differences between versions usually being what data can be collected with eBPF. Privileges Required: All processes that intend to load eBPF programs into the Linux kernel must be running in privileged mode. As such, bugs or other issues in such code may have a big impact. Weak Support for Dynamic Language: eBPF has weak support for JIT-based dynamic languages, such as Java. It also depends on what data you want to collect. For Profiling, eBPF does not support parsing the symbols of the program, which is why most eBPF-based profiling technologies only support static languages like C, C++, Go, and Rust. However, symbol mapping can sometimes be solved through tools provided by the language. For example, in Java, perf-map-agent can be used to generate the symbol mapping. However, dynamic languages don\u0026rsquo;t support the attach (uprobe) functionality that would allow us to trace execution events through symbols.  Introducing SkyWalking Rover SkyWalking Rover introduces the eBPF profiling feature into the SkyWalking ecosystem. The figure below shows the overall architecture of SkyWalking Rover. SkyWalking Rover is currently supported in Kubernetes environments and must be deployed inside a Kubernetes cluster. After establishing a connection with the SkyWalking backend server, it saves information about the processes on the current machine to SkyWalking. When the user creates an eBPF profiling task via the user interface, SkyWalking Rover receives the task and executes it in the relevant C, C++, Golang, and Rust language-based programs.\nOther than an eBPF-capable kernel, there are no additional prerequisites for deploying SkyWalking Rover.\nCPU Profiling with Rover CPU profiling is the most intuitive way to show service performance. Inspired by Brendan Gregg‘s blog post, we\u0026rsquo;ve divided CPU profiling into two types that we have implemented in Rover:\n On-CPU Profiling: Where threads are spending time running on-CPU. Off-CPU Profiling: Where time is spent waiting while blocked on I/O, locks, timers, paging/swapping, etc.  Profiling Envoy with eBPF Envoy is a popular proxy, used as the data plane by the Istio service mesh. In a Kubernetes cluster, Istio injects Envoy into each service’s pod as a sidecar where it transparently intercepts and processes incoming and outgoing traffic. As the data plane, any performance issues in Envoy can affect all service traffic in the mesh. In this scenario, it’s more powerful to use eBPF profiling to analyze issues in production caused by service mesh configuration.\nDemo Environment If you want to see this scenario in action, we\u0026rsquo;ve built a demo environment where we deploy an Nginx service for stress testing. Traffic is intercepted by Envoy and forwarded to Nginx. The commands to install the whole environment can be accessed through GitHub.\nOn-CPU Profiling On-CPU profiling is suitable for analyzing thread stacks when service CPU usage is high. If the stack is dumped more times, it means that the thread stack occupies more CPU resources.\nWhen installing Istio using the demo configuration profile, we found there are two places where we can optimize performance:\n Zipkin Tracing: Different Zipkin sampling percentages have a direct impact on QPS. Access Log Format: Reducing the fields of the Envoy access log can improve QPS.  Zipkin Tracing Zipkin with 100% sampling In the default demo configuration profile, Envoy is using 100% sampling as default tracing policy. How does that impact the performance?\nAs shown in the figure below, using the on-CPU profiling, we found that it takes about 16% of the CPU overhead. At a fixed consumption of 2 CPUs, its QPS can reach 5.7K.\nDisable Zipkin tracing At this point, we found that if Zipkin is not necessary, the sampling percentage can be reduced or we can even disable tracing. Based on the Istio documentation, we can disable tracing when installing the service mesh using the following command:\nistioctl install -y --set profile=demo \\  --set \u0026#39;meshConfig.enableTracing=false\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.tracing.sampling=0.0\u0026#39; After disabling tracing, we performed on-CPU profiling again. According to the figure below, we found that Zipkin has disappeared from the flame graph. With the same 2 CPU consumption as in the previous example, the QPS reached 9K, which is an almost 60% increase. Tracing with Throughput With the same CPU usage, we\u0026rsquo;ve discovered that Envoy performance greatly improves when the tracing feature is disabled. Of course, this requires us to make trade-offs between the number of samples Zipkin collects and the desired performance of Envoy (QPS).\nThe table below illustrates how different Zipkin sampling percentages under the same CPU usage affect QPS.\n   Zipkin sampling % QPS CPUs Note     100% (default) 5.7K 2 16% used by Zipkin   1% 8.1K 2 0.3% used by Zipkin   disabled 9.2K 2 0% used by Zipkin    Access Log Format Default Log Format In the default demo configuration profile, the default Access Log format contains a lot of data. The flame graph below shows various functions involved in parsing the data such as request headers, response headers, and streaming the body.\nSimplifying Access Log Format Typically, we don’t need all the information in the access log, so we can often simplify it to get what we need. The following command simplifies the access log format to only display basic information:\nistioctl install -y --set profile=demo \\  --set meshConfig.accessLogFormat=\u0026#34;[%START_TIME%] \\\u0026#34;%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\\\u0026#34; %RESPONSE_CODE%\\n\u0026#34; After simplifying the access log format, we found that the QPS increased from 5.7K to 5.9K. When executing the on-CPU profiling again, the CPU usage of log formatting dropped from 2.4% to 0.7%.\nSimplifying the log format helped us to improve the performance.\nOff-CPU Profiling Off-CPU profiling is suitable for performance issues that are not caused by high CPU usage. For example, when there are too many threads in one service, using off-CPU profiling could reveal which threads spend more time context switching.\nWe provide data aggregation in two dimensions:\n Switch count: The number of times a thread switches context. When the thread returns to the CPU, it completes one context switch. A thread stack with a higher switch count spends more time context switching. Switch duration: The time it takes a thread to switch the context. A thread stack with a higher switch duration spends more time off-CPU.  Write Access Log Enable Write Using the same environment and settings as before in the on-CPU test, we performed off-CPU profiling. As shown below, we found that access log writes accounted for about 28% of the total context switches. The \u0026ldquo;__write\u0026rdquo; shown below also indicates that this method is the Linux kernel method.\nDisable Write SkyWalking implements Envoy\u0026rsquo;s Access Log Service (ALS) feature which allows us to send access logs to the SkyWalking Observability Analysis Platform (OAP) using the gRPC protocol. Even by disabling the access logging, we can still use ALS to capture/aggregate the logs. We\u0026rsquo;ve disabled writing to the access log using the following command:\nistioctl install -y --set profile=demo --set meshConfig.accessLogFile=\u0026#34;\u0026#34; After disabling the Access Log feature, we performed the off-CPU profiling. File writing entries have disappeared as shown in the figure below. Envoy throughput also increased from 5.7K to 5.9K.\nConclusion In this article, we\u0026rsquo;ve examined the insights Apache Skywalking\u0026rsquo;s Trace Profiling can give us and how much more can be achieved with eBPF profiling. All of these features are implemented in skywalking-rover. In addition to on- and off-CPU profiling, you will also find the following features:\n Continuous profiling, helps you automatically profile without manual intervention. For example, when Rover detects that the CPU exceeds a configurable threshold, it automatically executes the on-CPU profiling task. More profiling types to enrich usage scenarios, such as network, and memory profiling.  ","title":"Pinpoint Service Mesh Critical Performance Impact by using eBPF","url":"/docs/main/v9.5.0/en/concepts-and-designs/ebpf-cpu-profiling/"},{"content":"Pinpoint Service Mesh Critical Performance Impact by using eBPF Background Apache SkyWalking observes metrics, logs, traces, and events for services deployed into the service mesh. When troubleshooting, SkyWalking error analysis can be an invaluable tool helping to pinpoint where an error occurred. However, performance problems are more difficult: It’s often impossible to locate the root cause of performance problems with pre-existing observation data. To move beyond the status quo, dynamic debugging and troubleshooting are essential service performance tools. In this article, we\u0026rsquo;ll discuss how to use eBPF technology to improve the profiling feature in SkyWalking and analyze the performance impact in the service mesh.\nTrace Profiling in SkyWalking Since SkyWalking 7.0.0, Trace Profiling has helped developers find performance problems by periodically sampling the thread stack to let developers know which lines of code take more time. However, Trace Profiling is not suitable for the following scenarios:\n Thread Model: Trace Profiling is most useful for profiling code that executes in a single thread. It is less useful for middleware that relies heavily on async execution models. For example Goroutines in Go or Kotlin Coroutines. Language: Currently, Trace Profiling is only supported in Java and Python, since it’s not easy to obtain the thread stack in the runtimes of some languages such as Go and Node.js. Agent Binding: Trace Profiling requires Agent installation, which can be tricky depending on the language (e.g., PHP has to rely on its C kernel; Rust and C/C++ require manual instrumentation to make install). Trace Correlation: Since Trace Profiling is only associated with a single request it can be hard to determine which request is causing the problem. Short Lifecycle Services: Trace Profiling doesn\u0026rsquo;t support short-lived services for (at least) two reasons:  It\u0026rsquo;s hard to differentiate system performance from class code manipulation in the booting stage. Trace profiling is linked to an endpoint to identify performance impact, but there is no endpoint to match these short-lived services.    Fortunately, there are techniques that can go further than Trace Profiling in these situations.\nIntroduce eBPF We have found that eBPF — a technology that can run sandboxed programs in an operating system kernel and thus safely and efficiently extend the capabilities of the kernel without requiring kernel modifications or loading kernel modules — can help us fill gaps left by Trace Profiling. eBPF is a trending technology because it breaks the traditional barrier between user and kernel space. Programs can now inject bytecode that runs in the kernel, instead of having to recompile the kernel to customize it. This is naturally a good fit for observability.\nIn the figure below, we can see that when the system executes the execve syscalls, the eBPF program is triggered, and the current process runtime information is obtained by using function calls.\nUsing eBPF technology, we can expand the scope of Skywalking\u0026rsquo;s profiling capabilities:\n Global Performance Analysis: Before eBPF, data collection was limited to what agents can observe. Since eBPF programs run in the kernel, they can observe all threads. This is especially useful when you are not sure whether a performance problem is caused by a particular request. Data Content: eBPF can dump both user and kernel space thread stacks, so if a performance issue happens in kernel space, it’s easier to find. Agent Binding: All modern Linux kernels support eBPF, so there is no need to install anything. This means it is an orchestration-free vs an agent model. This reduces friction caused by built-in software which may not have the correct agents installed, such as Envoy in a Service Mesh. Sampling Type: Unlike Trace Profiling, eBPF is event-driven and, therefore, not constrained by interval polling. For example, eBPF can trigger events and collect more data depending on a transfer size threshold. This can allow the system to triage and prioritize data collection under extreme load.  eBPF Limitations While eBPF offers significant advantages for hunting performance bottlenecks, no technology is perfect. eBPF has a number of limitations described below. Fortunately, since SkyWalking does not require eBPF, the impact is limited.\n Linux Version Requirement: eBPF programs require a Linux kernel version above 4.4, with later kernel versions offering more data to be collected. The BCC has documented the features supported by different Linux kernel versions, with the differences between versions usually being what data can be collected with eBPF. Privileges Required: All processes that intend to load eBPF programs into the Linux kernel must be running in privileged mode. As such, bugs or other issues in such code may have a big impact. Weak Support for Dynamic Language: eBPF has weak support for JIT-based dynamic languages, such as Java. It also depends on what data you want to collect. For Profiling, eBPF does not support parsing the symbols of the program, which is why most eBPF-based profiling technologies only support static languages like C, C++, Go, and Rust. However, symbol mapping can sometimes be solved through tools provided by the language. For example, in Java, perf-map-agent can be used to generate the symbol mapping. However, dynamic languages don\u0026rsquo;t support the attach (uprobe) functionality that would allow us to trace execution events through symbols.  Introducing SkyWalking Rover SkyWalking Rover introduces the eBPF profiling feature into the SkyWalking ecosystem. The figure below shows the overall architecture of SkyWalking Rover. SkyWalking Rover is currently supported in Kubernetes environments and must be deployed inside a Kubernetes cluster. After establishing a connection with the SkyWalking backend server, it saves information about the processes on the current machine to SkyWalking. When the user creates an eBPF profiling task via the user interface, SkyWalking Rover receives the task and executes it in the relevant C, C++, Golang, and Rust language-based programs.\nOther than an eBPF-capable kernel, there are no additional prerequisites for deploying SkyWalking Rover.\nCPU Profiling with Rover CPU profiling is the most intuitive way to show service performance. Inspired by Brendan Gregg‘s blog post, we\u0026rsquo;ve divided CPU profiling into two types that we have implemented in Rover:\n On-CPU Profiling: Where threads are spending time running on-CPU. Off-CPU Profiling: Where time is spent waiting while blocked on I/O, locks, timers, paging/swapping, etc.  Profiling Envoy with eBPF Envoy is a popular proxy, used as the data plane by the Istio service mesh. In a Kubernetes cluster, Istio injects Envoy into each service’s pod as a sidecar where it transparently intercepts and processes incoming and outgoing traffic. As the data plane, any performance issues in Envoy can affect all service traffic in the mesh. In this scenario, it’s more powerful to use eBPF profiling to analyze issues in production caused by service mesh configuration.\nDemo Environment If you want to see this scenario in action, we\u0026rsquo;ve built a demo environment where we deploy an Nginx service for stress testing. Traffic is intercepted by Envoy and forwarded to Nginx. The commands to install the whole environment can be accessed through GitHub.\nOn-CPU Profiling On-CPU profiling is suitable for analyzing thread stacks when service CPU usage is high. If the stack is dumped more times, it means that the thread stack occupies more CPU resources.\nWhen installing Istio using the demo configuration profile, we found there are two places where we can optimize performance:\n Zipkin Tracing: Different Zipkin sampling percentages have a direct impact on QPS. Access Log Format: Reducing the fields of the Envoy access log can improve QPS.  Zipkin Tracing Zipkin with 100% sampling In the default demo configuration profile, Envoy is using 100% sampling as default tracing policy. How does that impact the performance?\nAs shown in the figure below, using the on-CPU profiling, we found that it takes about 16% of the CPU overhead. At a fixed consumption of 2 CPUs, its QPS can reach 5.7K.\nDisable Zipkin tracing At this point, we found that if Zipkin is not necessary, the sampling percentage can be reduced or we can even disable tracing. Based on the Istio documentation, we can disable tracing when installing the service mesh using the following command:\nistioctl install -y --set profile=demo \\  --set \u0026#39;meshConfig.enableTracing=false\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.tracing.sampling=0.0\u0026#39; After disabling tracing, we performed on-CPU profiling again. According to the figure below, we found that Zipkin has disappeared from the flame graph. With the same 2 CPU consumption as in the previous example, the QPS reached 9K, which is an almost 60% increase. Tracing with Throughput With the same CPU usage, we\u0026rsquo;ve discovered that Envoy performance greatly improves when the tracing feature is disabled. Of course, this requires us to make trade-offs between the number of samples Zipkin collects and the desired performance of Envoy (QPS).\nThe table below illustrates how different Zipkin sampling percentages under the same CPU usage affect QPS.\n   Zipkin sampling % QPS CPUs Note     100% (default) 5.7K 2 16% used by Zipkin   1% 8.1K 2 0.3% used by Zipkin   disabled 9.2K 2 0% used by Zipkin    Access Log Format Default Log Format In the default demo configuration profile, the default Access Log format contains a lot of data. The flame graph below shows various functions involved in parsing the data such as request headers, response headers, and streaming the body.\nSimplifying Access Log Format Typically, we don’t need all the information in the access log, so we can often simplify it to get what we need. The following command simplifies the access log format to only display basic information:\nistioctl install -y --set profile=demo \\  --set meshConfig.accessLogFormat=\u0026#34;[%START_TIME%] \\\u0026#34;%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\\\u0026#34; %RESPONSE_CODE%\\n\u0026#34; After simplifying the access log format, we found that the QPS increased from 5.7K to 5.9K. When executing the on-CPU profiling again, the CPU usage of log formatting dropped from 2.4% to 0.7%.\nSimplifying the log format helped us to improve the performance.\nOff-CPU Profiling Off-CPU profiling is suitable for performance issues that are not caused by high CPU usage. For example, when there are too many threads in one service, using off-CPU profiling could reveal which threads spend more time context switching.\nWe provide data aggregation in two dimensions:\n Switch count: The number of times a thread switches context. When the thread returns to the CPU, it completes one context switch. A thread stack with a higher switch count spends more time context switching. Switch duration: The time it takes a thread to switch the context. A thread stack with a higher switch duration spends more time off-CPU.  Write Access Log Enable Write Using the same environment and settings as before in the on-CPU test, we performed off-CPU profiling. As shown below, we found that access log writes accounted for about 28% of the total context switches. The \u0026ldquo;__write\u0026rdquo; shown below also indicates that this method is the Linux kernel method.\nDisable Write SkyWalking implements Envoy\u0026rsquo;s Access Log Service (ALS) feature which allows us to send access logs to the SkyWalking Observability Analysis Platform (OAP) using the gRPC protocol. Even by disabling the access logging, we can still use ALS to capture/aggregate the logs. We\u0026rsquo;ve disabled writing to the access log using the following command:\nistioctl install -y --set profile=demo --set meshConfig.accessLogFile=\u0026#34;\u0026#34; After disabling the Access Log feature, we performed the off-CPU profiling. File writing entries have disappeared as shown in the figure below. Envoy throughput also increased from 5.7K to 5.9K.\nConclusion In this article, we\u0026rsquo;ve examined the insights Apache Skywalking\u0026rsquo;s Trace Profiling can give us and how much more can be achieved with eBPF profiling. All of these features are implemented in skywalking-rover. In addition to on- and off-CPU profiling, you will also find the following features:\n Continuous profiling, helps you automatically profile without manual intervention. For example, when Rover detects that the CPU exceeds a configurable threshold, it automatically executes the on-CPU profiling task. More profiling types to enrich usage scenarios, such as network, and memory profiling.  ","title":"Pinpoint Service Mesh Critical Performance Impact by using eBPF","url":"/docs/main/v9.6.0/en/concepts-and-designs/ebpf-cpu-profiling/"},{"content":"Pinpoint Service Mesh Critical Performance Impact by using eBPF Background Apache SkyWalking observes metrics, logs, traces, and events for services deployed into the service mesh. When troubleshooting, SkyWalking error analysis can be an invaluable tool helping to pinpoint where an error occurred. However, performance problems are more difficult: It’s often impossible to locate the root cause of performance problems with pre-existing observation data. To move beyond the status quo, dynamic debugging and troubleshooting are essential service performance tools. In this article, we\u0026rsquo;ll discuss how to use eBPF technology to improve the profiling feature in SkyWalking and analyze the performance impact in the service mesh.\nTrace Profiling in SkyWalking Since SkyWalking 7.0.0, Trace Profiling has helped developers find performance problems by periodically sampling the thread stack to let developers know which lines of code take more time. However, Trace Profiling is not suitable for the following scenarios:\n Thread Model: Trace Profiling is most useful for profiling code that executes in a single thread. It is less useful for middleware that relies heavily on async execution models. For example Goroutines in Go or Kotlin Coroutines. Language: Currently, Trace Profiling is only supported in Java and Python, since it’s not easy to obtain the thread stack in the runtimes of some languages such as Go and Node.js. Agent Binding: Trace Profiling requires Agent installation, which can be tricky depending on the language (e.g., PHP has to rely on its C kernel; Rust and C/C++ require manual instrumentation to make install). Trace Correlation: Since Trace Profiling is only associated with a single request it can be hard to determine which request is causing the problem. Short Lifecycle Services: Trace Profiling doesn\u0026rsquo;t support short-lived services for (at least) two reasons:  It\u0026rsquo;s hard to differentiate system performance from class code manipulation in the booting stage. Trace profiling is linked to an endpoint to identify performance impact, but there is no endpoint to match these short-lived services.    Fortunately, there are techniques that can go further than Trace Profiling in these situations.\nIntroduce eBPF We have found that eBPF — a technology that can run sandboxed programs in an operating system kernel and thus safely and efficiently extend the capabilities of the kernel without requiring kernel modifications or loading kernel modules — can help us fill gaps left by Trace Profiling. eBPF is a trending technology because it breaks the traditional barrier between user and kernel space. Programs can now inject bytecode that runs in the kernel, instead of having to recompile the kernel to customize it. This is naturally a good fit for observability.\nIn the figure below, we can see that when the system executes the execve syscalls, the eBPF program is triggered, and the current process runtime information is obtained by using function calls.\nUsing eBPF technology, we can expand the scope of Skywalking\u0026rsquo;s profiling capabilities:\n Global Performance Analysis: Before eBPF, data collection was limited to what agents can observe. Since eBPF programs run in the kernel, they can observe all threads. This is especially useful when you are not sure whether a performance problem is caused by a particular request. Data Content: eBPF can dump both user and kernel space thread stacks, so if a performance issue happens in kernel space, it’s easier to find. Agent Binding: All modern Linux kernels support eBPF, so there is no need to install anything. This means it is an orchestration-free vs an agent model. This reduces friction caused by built-in software which may not have the correct agents installed, such as Envoy in a Service Mesh. Sampling Type: Unlike Trace Profiling, eBPF is event-driven and, therefore, not constrained by interval polling. For example, eBPF can trigger events and collect more data depending on a transfer size threshold. This can allow the system to triage and prioritize data collection under extreme load.  eBPF Limitations While eBPF offers significant advantages for hunting performance bottlenecks, no technology is perfect. eBPF has a number of limitations described below. Fortunately, since SkyWalking does not require eBPF, the impact is limited.\n Linux Version Requirement: eBPF programs require a Linux kernel version above 4.4, with later kernel versions offering more data to be collected. The BCC has documented the features supported by different Linux kernel versions, with the differences between versions usually being what data can be collected with eBPF. Privileges Required: All processes that intend to load eBPF programs into the Linux kernel must be running in privileged mode. As such, bugs or other issues in such code may have a big impact. Weak Support for Dynamic Language: eBPF has weak support for JIT-based dynamic languages, such as Java. It also depends on what data you want to collect. For Profiling, eBPF does not support parsing the symbols of the program, which is why most eBPF-based profiling technologies only support static languages like C, C++, Go, and Rust. However, symbol mapping can sometimes be solved through tools provided by the language. For example, in Java, perf-map-agent can be used to generate the symbol mapping. However, dynamic languages don\u0026rsquo;t support the attach (uprobe) functionality that would allow us to trace execution events through symbols.  Introducing SkyWalking Rover SkyWalking Rover introduces the eBPF profiling feature into the SkyWalking ecosystem. The figure below shows the overall architecture of SkyWalking Rover. SkyWalking Rover is currently supported in Kubernetes environments and must be deployed inside a Kubernetes cluster. After establishing a connection with the SkyWalking backend server, it saves information about the processes on the current machine to SkyWalking. When the user creates an eBPF profiling task via the user interface, SkyWalking Rover receives the task and executes it in the relevant C, C++, Golang, and Rust language-based programs.\nOther than an eBPF-capable kernel, there are no additional prerequisites for deploying SkyWalking Rover.\nCPU Profiling with Rover CPU profiling is the most intuitive way to show service performance. Inspired by Brendan Gregg‘s blog post, we\u0026rsquo;ve divided CPU profiling into two types that we have implemented in Rover:\n On-CPU Profiling: Where threads are spending time running on-CPU. Off-CPU Profiling: Where time is spent waiting while blocked on I/O, locks, timers, paging/swapping, etc.  Profiling Envoy with eBPF Envoy is a popular proxy, used as the data plane by the Istio service mesh. In a Kubernetes cluster, Istio injects Envoy into each service’s pod as a sidecar where it transparently intercepts and processes incoming and outgoing traffic. As the data plane, any performance issues in Envoy can affect all service traffic in the mesh. In this scenario, it’s more powerful to use eBPF profiling to analyze issues in production caused by service mesh configuration.\nDemo Environment If you want to see this scenario in action, we\u0026rsquo;ve built a demo environment where we deploy an Nginx service for stress testing. Traffic is intercepted by Envoy and forwarded to Nginx. The commands to install the whole environment can be accessed through GitHub.\nOn-CPU Profiling On-CPU profiling is suitable for analyzing thread stacks when service CPU usage is high. If the stack is dumped more times, it means that the thread stack occupies more CPU resources.\nWhen installing Istio using the demo configuration profile, we found there are two places where we can optimize performance:\n Zipkin Tracing: Different Zipkin sampling percentages have a direct impact on QPS. Access Log Format: Reducing the fields of the Envoy access log can improve QPS.  Zipkin Tracing Zipkin with 100% sampling In the default demo configuration profile, Envoy is using 100% sampling as default tracing policy. How does that impact the performance?\nAs shown in the figure below, using the on-CPU profiling, we found that it takes about 16% of the CPU overhead. At a fixed consumption of 2 CPUs, its QPS can reach 5.7K.\nDisable Zipkin tracing At this point, we found that if Zipkin is not necessary, the sampling percentage can be reduced or we can even disable tracing. Based on the Istio documentation, we can disable tracing when installing the service mesh using the following command:\nistioctl install -y --set profile=demo \\  --set \u0026#39;meshConfig.enableTracing=false\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.tracing.sampling=0.0\u0026#39; After disabling tracing, we performed on-CPU profiling again. According to the figure below, we found that Zipkin has disappeared from the flame graph. With the same 2 CPU consumption as in the previous example, the QPS reached 9K, which is an almost 60% increase. Tracing with Throughput With the same CPU usage, we\u0026rsquo;ve discovered that Envoy performance greatly improves when the tracing feature is disabled. Of course, this requires us to make trade-offs between the number of samples Zipkin collects and the desired performance of Envoy (QPS).\nThe table below illustrates how different Zipkin sampling percentages under the same CPU usage affect QPS.\n   Zipkin sampling % QPS CPUs Note     100% (default) 5.7K 2 16% used by Zipkin   1% 8.1K 2 0.3% used by Zipkin   disabled 9.2K 2 0% used by Zipkin    Access Log Format Default Log Format In the default demo configuration profile, the default Access Log format contains a lot of data. The flame graph below shows various functions involved in parsing the data such as request headers, response headers, and streaming the body.\nSimplifying Access Log Format Typically, we don’t need all the information in the access log, so we can often simplify it to get what we need. The following command simplifies the access log format to only display basic information:\nistioctl install -y --set profile=demo \\  --set meshConfig.accessLogFormat=\u0026#34;[%START_TIME%] \\\u0026#34;%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\\\u0026#34; %RESPONSE_CODE%\\n\u0026#34; After simplifying the access log format, we found that the QPS increased from 5.7K to 5.9K. When executing the on-CPU profiling again, the CPU usage of log formatting dropped from 2.4% to 0.7%.\nSimplifying the log format helped us to improve the performance.\nOff-CPU Profiling Off-CPU profiling is suitable for performance issues that are not caused by high CPU usage. For example, when there are too many threads in one service, using off-CPU profiling could reveal which threads spend more time context switching.\nWe provide data aggregation in two dimensions:\n Switch count: The number of times a thread switches context. When the thread returns to the CPU, it completes one context switch. A thread stack with a higher switch count spends more time context switching. Switch duration: The time it takes a thread to switch the context. A thread stack with a higher switch duration spends more time off-CPU.  Write Access Log Enable Write Using the same environment and settings as before in the on-CPU test, we performed off-CPU profiling. As shown below, we found that access log writes accounted for about 28% of the total context switches. The \u0026ldquo;__write\u0026rdquo; shown below also indicates that this method is the Linux kernel method.\nDisable Write SkyWalking implements Envoy\u0026rsquo;s Access Log Service (ALS) feature which allows us to send access logs to the SkyWalking Observability Analysis Platform (OAP) using the gRPC protocol. Even by disabling the access logging, we can still use ALS to capture/aggregate the logs. We\u0026rsquo;ve disabled writing to the access log using the following command:\nistioctl install -y --set profile=demo --set meshConfig.accessLogFile=\u0026#34;\u0026#34; After disabling the Access Log feature, we performed the off-CPU profiling. File writing entries have disappeared as shown in the figure below. Envoy throughput also increased from 5.7K to 5.9K.\nConclusion In this article, we\u0026rsquo;ve examined the insights Apache Skywalking\u0026rsquo;s Trace Profiling can give us and how much more can be achieved with eBPF profiling. All of these features are implemented in skywalking-rover. In addition to on- and off-CPU profiling, you will also find the following features:\n Continuous profiling, helps you automatically profile without manual intervention. For example, when Rover detects that the CPU exceeds a configurable threshold, it automatically executes the on-CPU profiling task. More profiling types to enrich usage scenarios, such as network, and memory profiling.  ","title":"Pinpoint Service Mesh Critical Performance Impact by using eBPF","url":"/docs/main/v9.7.0/en/concepts-and-designs/ebpf-cpu-profiling/"},{"content":"Pipe Plugins The pipe plugin configurations contain a series of pipe configuration. Each pipe configuration has 5 parts, which are common_config, gatherer, processor and the sender.\ncommon_config    Config Description     pipe_name The unique collect space name.    Gatherer The gatherer has 2 roles, which are the receiver and fetcher.\nReceiver Role    Config Description     server_name The server name in the sharing pipe, which would be used in the receiver plugin.   receiver The receiver configuration. Please read the doc to find all receiver plugins.   queue The queue buffers the input telemetry data. Please read the doc to find all queue plugins.    Fetcher Role    Config Description     fetch_interval The time interval between two fetch operations. The time unit is millisecond.   fetcher The fetcher configuration. Please read the doc to find all fetcher plugins.   queue The queue buffers the input telemetry data. Please read the doc to find all queue plugins.    processor The filter configuration. Please read the doc to find all filter plugins.\nsender    Config Description     flush_time The time interval between two flush operations. And the time unit is millisecond.   max_buffer_size The maximum buffer elements.   min_flush_events The minimum flush elements.   client_name The client name used in the forwarders of the sharing pipe.   forwarders The forwarder plugin list. Please read the doc to find all forwarders plugins.   fallbacker The fallbacker plugin. Please read the doc to find all fallbacker plugins.    Example pipes:- common_config:pipe_name:pipe1gatherer:server_name:\u0026#34;grpc-server\u0026#34;receiver:plugin_name:\u0026#34;grpc-native-log-receiver\u0026#34;queue:plugin_name:\u0026#34;mmap-queue\u0026#34;segment_size:${SATELLITE_MMAP_QUEUE_SIZE:524288}max_in_mem_segments:${SATELLITE_MMAP_QUEUE_MAX_IN_MEM_SEGMENTS:6}queue_dir:\u0026#34;pipe1-log-grpc-receiver-queue\u0026#34;processor:filters:sender:fallbacker:plugin_name:none-fallbackerflush_time:${SATELLITE_PIPE1_SENDER_FLUSH_TIME:1000}max_buffer_size:${SATELLITE_PIPE1_SENDER_MAX_BUFFER_SIZE:200}min_flush_events:${SATELLITE_PIPE1_SENDER_MIN_FLUSH_EVENTS:100}client_name:kafka-clientforwarders:- plugin_name:native-log-kafka-forwardertopic:${SATELLITE_NATIVELOG-TOPIC:log-topic}","title":"Pipe Plugins","url":"/docs/skywalking-satellite/latest/en/setup/configuration/pipe-plugins/"},{"content":"Pipe Plugins The pipe plugin configurations contain a series of pipe configuration. Each pipe configuration has 5 parts, which are common_config, gatherer, processor and the sender.\ncommon_config    Config Description     pipe_name The unique collect space name.    Gatherer The gatherer has 2 roles, which are the receiver and fetcher.\nReceiver Role    Config Description     server_name The server name in the sharing pipe, which would be used in the receiver plugin.   receiver The receiver configuration. Please read the doc to find all receiver plugins.   queue The queue buffers the input telemetry data. Please read the doc to find all queue plugins.    Fetcher Role    Config Description     fetch_interval The time interval between two fetch operations. The time unit is millisecond.   fetcher The fetcher configuration. Please read the doc to find all fetcher plugins.   queue The queue buffers the input telemetry data. Please read the doc to find all queue plugins.    processor The filter configuration. Please read the doc to find all filter plugins.\nsender    Config Description     flush_time The time interval between two flush operations. And the time unit is millisecond.   max_buffer_size The maximum buffer elements.   min_flush_events The minimum flush elements.   client_name The client name used in the forwarders of the sharing pipe.   forwarders The forwarder plugin list. Please read the doc to find all forwarders plugins.   fallbacker The fallbacker plugin. Please read the doc to find all fallbacker plugins.    Example pipes:- common_config:pipe_name:pipe1gatherer:server_name:\u0026#34;grpc-server\u0026#34;receiver:plugin_name:\u0026#34;grpc-native-log-receiver\u0026#34;queue:plugin_name:\u0026#34;mmap-queue\u0026#34;segment_size:${SATELLITE_MMAP_QUEUE_SIZE:524288}max_in_mem_segments:${SATELLITE_MMAP_QUEUE_MAX_IN_MEM_SEGMENTS:6}queue_dir:\u0026#34;pipe1-log-grpc-receiver-queue\u0026#34;processor:filters:sender:fallbacker:plugin_name:none-fallbackerflush_time:${SATELLITE_PIPE1_SENDER_FLUSH_TIME:1000}max_buffer_size:${SATELLITE_PIPE1_SENDER_MAX_BUFFER_SIZE:200}min_flush_events:${SATELLITE_PIPE1_SENDER_MIN_FLUSH_EVENTS:100}client_name:kafka-clientforwarders:- plugin_name:native-log-kafka-forwardertopic:${SATELLITE_NATIVELOG-TOPIC:log-topic}","title":"Pipe Plugins","url":"/docs/skywalking-satellite/next/en/setup/configuration/pipe-plugins/"},{"content":"Pipe Plugins The pipe plugin configurations contain a series of pipe configuration. Each pipe configuration has 5 parts, which are common_config, gatherer, processor and the sender.\ncommon_config    Config Description     pipe_name The unique collect space name.    Gatherer The gatherer has 2 roles, which are the receiver and fetcher.\nReceiver Role    Config Description     server_name The server name in the sharing pipe, which would be used in the receiver plugin.   receiver The receiver configuration. Please read the doc to find all receiver plugins.   queue The queue buffers the input telemetry data. Please read the doc to find all queue plugins.    Fetcher Role    Config Description     fetch_interval The time interval between two fetch operations. The time unit is millisecond.   fetcher The fetcher configuration. Please read the doc to find all fetcher plugins.   queue The queue buffers the input telemetry data. Please read the doc to find all queue plugins.    processor The filter configuration. Please read the doc to find all filter plugins.\nsender    Config Description     flush_time The time interval between two flush operations. And the time unit is millisecond.   max_buffer_size The maximum buffer elements.   min_flush_events The minimum flush elements.   client_name The client name used in the forwarders of the sharing pipe.   forwarders The forwarder plugin list. Please read the doc to find all forwarders plugins.   fallbacker The fallbacker plugin. Please read the doc to find all fallbacker plugins.    Example pipes:- common_config:pipe_name:pipe1gatherer:server_name:\u0026#34;grpc-server\u0026#34;receiver:plugin_name:\u0026#34;grpc-native-log-receiver\u0026#34;queue:plugin_name:\u0026#34;mmap-queue\u0026#34;segment_size:${SATELLITE_MMAP_QUEUE_SIZE:524288}max_in_mem_segments:${SATELLITE_MMAP_QUEUE_MAX_IN_MEM_SEGMENTS:6}queue_dir:\u0026#34;pipe1-log-grpc-receiver-queue\u0026#34;processor:filters:sender:fallbacker:plugin_name:none-fallbackerflush_time:${SATELLITE_PIPE1_SENDER_FLUSH_TIME:1000}max_buffer_size:${SATELLITE_PIPE1_SENDER_MAX_BUFFER_SIZE:200}min_flush_events:${SATELLITE_PIPE1_SENDER_MIN_FLUSH_EVENTS:100}client_name:kafka-clientforwarders:- plugin_name:native-log-kafka-forwardertopic:${SATELLITE_NATIVELOG-TOPIC:log-topic}","title":"Pipe Plugins","url":"/docs/skywalking-satellite/v1.2.0/en/setup/configuration/pipe-plugins/"},{"content":"Plugin automatic test framework The plugin test framework is designed to verify the function and compatibility of plugins. As there are dozens of plugins and hundreds of versions that need to be verified, it is impossible to do it manually. The test framework uses container-based tech stack and requires a set of real services with the agents installed. Then, the test mock OAP backend runs to check the segments data sent from agents.\nEvery plugin maintained in the main repo requires corresponding test cases as well as matching versions in the supported list doc.\nEnvironment Requirements  MacOS/Linux JDK 8+ Docker Docker Compose  Case Base Image Introduction The test framework provides JVM-container and Tomcat-container base images including JDK8 and JDK17. You can choose the best one for your test case. If both are suitable for your case, JVM-container is preferred.\nJVM-container Image Introduction JVM-container uses eclipse-temurin:8-jdk as the base image. JVM-container supports JDK8 and JDK17 as well in CI, which inherits eclipse-temurin:8-jdk and eclipse-temurin:17-jdk. It is supported to custom the base Java docker image by specify base_image_java. The test case project must be packaged as project-name.zip, including startup.sh and uber jar, by using mvn clean package.\nTake the following test projects as examples:\n sofarpc-scenario is a single project case. webflux-scenario is a case including multiple projects. jdk17-with-gson-scenario is a single project case with JDK17.  Tomcat-container Image Introduction Tomcat-container uses tomcat:8.5-jdk8-openjdk, tomcat:8.5-jdk17-openjdk as the base image. It is supported to custom the base Tomcat docker image by specify base_image_tomcat. The test case project must be packaged as project-name.war by using mvn package.\nTake the following test project as an example\n spring-4.3.x-scenario  Test project hierarchical structure The test case is an independent maven project, and it must be packaged as a war tar ball or zip file, depending on the chosen base image. Also, two external accessible endpoints usually two URLs) are required.\nAll test case codes should be in the org.apache.skywalking.apm.testcase.* package. If there are some codes expected to be instrumented, then the classes could be in the test.org.apache.skywalking.apm.testcase.* package.\nJVM-container test project hierarchical structure\n[plugin-scenario] |- [bin] |- startup.sh |- [config] |- expectedData.yaml |- [src] |- [main] |- ... |- [resource] |- log4j2.xml |- pom.xml |- configuration.yml |- support-version.list [] = directory Tomcat-container test project hierarchical structure\n[plugin-scenario] |- [config] |- expectedData.yaml |- [src] |- [main] |- ... |- [resource] |- log4j2.xml |- [webapp] |- [WEB-INF] |- web.xml |- pom.xml |- configuration.yml |- support-version.list [] = directory Test case configuration files The following files are required in every test case.\n   File Name Descriptions     configuration.yml Declare the basic case information, including case name, entrance endpoints, mode, and dependencies.   expectedData.yaml Describe the expected segmentItems, meterItems or logItems.   support-version.list List the target versions for this case.   startup.sh JVM-container only. This is not required when using Tomcat-container.    * support-version.list format requires every line for a single version (contains only the last version number of each minor version). You may use # to comment out this version.\nconfiguration.yml    Field description     type Image type, options, jvm, or tomcat. Required.   entryService The entrance endpoint (URL) for test case access. Required. (HTTP Method: GET)   healthCheck The health check endpoint (URL) for test case access. Required. (HTTP Method: HEAD)   startScript Path of the start up script. Required in type: jvm only.   runningMode Running mode with the optional plugin, options, default(default), with_optional, or with_bootstrap.   withPlugins Plugin selector rule, e.g.:apm-spring-annotation-plugin-*.jar. Required for runningMode=with_optional or runningMode=with_bootstrap.   environment Same as docker-compose#environment.   depends_on Same as docker-compose#depends_on.   dependencies Same as docker-compose#services, image, links, hostname, command, environment and depends_on are supported.    Note:, docker-compose activates only when dependencies is blank.\nrunningMode option description.\n   Option description     default Activate all plugins in plugin folder like the official distribution agent.   with_optional Activate default and plugins in optional-plugin by the give selector.   with_bootstrap Activate default and plugins in bootstrap-plugin by the give selector.    with_optional/with_bootstrap supports multiple selectors, separated by ;.\nFile Format\ntype: entryService: healthCheck: startScript: runningMode: withPlugins: environment: ... depends_on: ... dependencies: service1: image: hostname: expose: ... environment: ... depends_on: ... links: ... entrypoint: ... healthcheck: ...  dependencies support docker compose healthcheck. But the format is a little different. We need to have - as the start of every config item, and describe it as a string line.  For example, in the official document, the health check is:\nhealthcheck:test:[\u0026#34;CMD\u0026#34;,\u0026#34;curl\u0026#34;,\u0026#34;-f\u0026#34;,\u0026#34;http://localhost\u0026#34;]interval:1m30stimeout:10sretries:3start_period:40sHere you should write:\nhealthcheck:- \u0026#39;test:[\u0026#34;CMD\u0026#34;,\u0026#34;curl\u0026#34;,\u0026#34;-f\u0026#34;,\u0026#34;http://localhost\u0026#34;]\u0026#39;- \u0026#34;interval: 1m30s\u0026#34;- \u0026#34;timeout: 10s\u0026#34;- \u0026#34;retries: 3\u0026#34;- \u0026#34;start_period: 40s\u0026#34;In some cases, the dependency service (usually a third-party server like the SolrJ server) is required to keep the same version as the client lib version, which is defined as ${test.framework.version} in pom. You may use ${CASE_SERVER_IMAGE_VERSION} as the version number, which will be changed in the test for each version.\n It does not support resource related configurations, such as volumes, ports, and ulimits. The reason for this is that in test scenarios, no mapping is required for any port to the host VM, or to mount any folder.\n Take the following test cases as examples:\n dubbo-2.7.x with JVM-container jetty with JVM-container gateway with runningMode canal with docker-compose  expectedData.yaml Operator for number\n   Operator Description     nq Not equal   eq Equal(default)   ge Greater than or equal   gt Greater than    Operator for String\n   Operator Description     not null Not null   not blank Not blank ,it\u0026rsquo;s recommended for String type field as the default value maybe blank string, such as span tags   null Null or empty String   eq Equal(default)   start with Tests if this string starts with the specified prefix. DO NOT use it with meterItem tags value   end with Tests if this string ends with the specified suffix. DO NOT use it with meterItem tags value    Expected Data Format Of The Segment\nsegmentItems:- serviceName:SERVICE_NAME(string)segmentSize:SEGMENT_SIZE(int)segments:- segmentId:SEGMENT_ID(string)spans:...   Field Description     serviceName Service Name.   segmentSize The number of segments is expected.   segmentId Trace ID.   spans Segment span list. In the next section, you will learn how to describe each span.    Expected Data Format Of The Span\nNote: The order of span list should follow the order of the span finish time.\noperationName:OPERATION_NAME(string)parentSpanId:PARENT_SPAN_ID(int)spanId:SPAN_ID(int)startTime:START_TIME(int)endTime:END_TIME(int)isError: IS_ERROR(string:true,false)spanLayer: SPAN_LAYER(string:DB, RPC_FRAMEWORK, HTTP, MQ, CACHE)spanType: SPAN_TYPE(string:Exit, Entry, Local)componentId:COMPONENT_ID(int)tags:- {key: TAG_KEY(string), value:TAG_VALUE(string)}...logs:- {key: LOG_KEY(string), value:LOG_VALUE(string)}...peer:PEER(string)refs:- {traceId:TRACE_ID(string),parentTraceSegmentId:PARENT_TRACE_SEGMENT_ID(string),parentSpanId:PARENT_SPAN_ID(int),parentService:PARENT_SERVICE(string),parentServiceInstance:PARENT_SERVICE_INSTANCE(string),parentEndpoint:PARENT_ENDPOINT_NAME(string),networkAddress:NETWORK_ADDRESS(string),refType: REF_TYPE(string:CrossProcess, CrossThread)}...   Field Description     operationName Span Operation Name.   parentSpanId Parent span ID. Note: The parent span ID of the first span should be -1.   spanId Span ID. Note: Start from 0.   startTime Span start time. It is impossible to get the accurate time, not 0 should be enough.   endTime Span finish time. It is impossible to get the accurate time, not 0 should be enough.   isError Span status, true or false.   componentId Component id for your plugin.   tags Span tag list. Notice, Keep in the same order as the plugin coded.   logs Span log list. Notice, Keep in the same order as the plugin coded.   SpanLayer Options, DB, RPC_FRAMEWORK, HTTP, MQ, CACHE.   SpanType Span type, options, Exit, Entry or Local.   peer Remote network address, IP + port mostly. For exit span, this should be required.    The verify description for SegmentRef\n   Field Description     traceId    parentTraceSegmentId Parent SegmentId, pointing to the segment id in the parent segment.   parentSpanId Parent SpanID, pointing to the span id in the parent segment.   parentService The service of parent/downstream service name.   parentServiceInstance The instance of parent/downstream service instance name.   parentEndpoint The endpoint of parent/downstream service.   networkAddress The peer value of parent exit span.   refType Ref type, options, CrossProcess or CrossThread.    Expected Data Format Of The Meter Items\nmeterItems:- serviceName:SERVICE_NAME(string)meterSize:METER_SIZE(int)meters:- ...   Field Description     serviceName Service Name.   meterSize The number of meters is expected.   meters meter list. Follow the next section to see how to describe every meter.    Expected Data Format Of The Meter\nmeterId:name:NAME(string)tags:- {name: TAG_NAME(string), value:TAG_VALUE(string)}singleValue:SINGLE_VALUE(double)histogramBuckets:- HISTOGRAM_BUCKET(double)...The verify description for MeterId\n   Field Description     name meter name.   tags meter tags.   tags.name tag name.   tags.value tag value.   singleValue counter or gauge value. Using condition operate of the number to validate, such as gt, ge. If current meter is histogram, don\u0026rsquo;t need to write this field.   histogramBuckets histogram bucket. The bucket list must be ordered. The tool assert at least one bucket of the histogram having nonzero count. If current meter is counter or gauge, don\u0026rsquo;t need to write this field.    Expected Data Format Of The Log Items\nlogItems:- serviceName:SERVICE_NAME(string)logSize:LOG_SIZE(int)logs:- ...   Field Description     serviceName Service Name.   logSize The number of logs is expected.   logs log list. Follow the next section to see how to describe every log.    Expected Data Format Of The Log\ntimestamp:TIMESTAMP_VALUE(int)endpoint:ENDPOINT_VALUE(int)traceContext:traceId:TRACE_ID_VALUE(string)traceSegmentId:TRACE_SEGMENT_ID_VALUE(string)spanId:SPAN_ID_VALUE(int)body:type:TYPE_VALUE(string)content:# Choose one of three (text, json or yaml)text:TEXT_VALUE(string)# json: JSON_VALUE(string)# yaml: YAML_VALUE(string)tags:data:- key:TAG_KEY(string)value:TAG_VALUE(string)...layer:LAYER_VALUE(string)...The verify description for Log\n   Field Description     timestamp log timestamp.   endpoint log endpoint.   traceContext.traceId log associated trace id.   traceContext.traceSegmentId log associated trace segment id.   traceContext.spanId log associated span id.   body.type log body type.   body.content log content, the sub field choose one of three (text, json or yaml).   tags.data log tags, key value pairs.   layer log layer.    startup.sh This script provide a start point to JVM based service, most of them starts by a java -jar, with some variables. The following system environment variables are available in the shell.\n   Variable Description     agent_opts Agent plugin opts, check the detail in plugin doc or the same opt added in this PR.   SCENARIO_NAME Service name. Default same as the case folder name   SCENARIO_VERSION Version   SCENARIO_ENTRY_SERVICE Entrance URL to access this service   SCENARIO_HEALTH_CHECK_URL Health check URL     ${agent_opts} is required to add into your java -jar command, which including the parameter injected by test framework, and make agent installed. All other parameters should be added after ${agent_opts}.\n The test framework will set the service name as the test case folder name by default, but in some cases, there are more than one test projects are required to run in different service codes, could set it explicitly like the following example.\nExample\nhome=\u0026#34;$(cd \u0026#34;$(dirname $0)\u0026#34;; pwd)\u0026#34; java -jar ${agent_opts} \u0026#34;-Dskywalking.agent.service_name=jettyserver-scenario\u0026#34; ${home}/../libs/jettyserver-scenario.jar \u0026amp; sleep 1 java -jar ${agent_opts} \u0026#34;-Dskywalking.agent.service_name=jettyclient-scenario\u0026#34; ${home}/../libs/jettyclient-scenario.jar \u0026amp;  Only set this or use other skywalking options when it is really necessary.\n Take the following test cases as examples\n undertow webflux  Best Practices How To Use The Archetype To Create A Test Case Project We provided archetypes and a script to make creating a project easier. It creates a completed project of a test case. So that we only need to focus on cases. First, we can use followed command to get usage about the script.\nbash ${SKYWALKING_HOME}/test/plugin/generator.sh\nThen, runs and generates a project, named by scenario_name, in ./scenarios.\nRecommendations for pom \u0026lt;properties\u0026gt; \u0026lt;!-- Provide and use this property in the pom. --\u0026gt; \u0026lt;!-- This version should match the library version, --\u0026gt; \u0026lt;!-- in this case, http components lib version 4.3. --\u0026gt; \u0026lt;test.framework.version\u0026gt;4.3\u0026lt;/test.framework.version\u0026gt; \u0026lt;/properties\u0026gt; \u0026lt;dependencies\u0026gt; \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.httpcomponents\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;httpclient\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${test.framework.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; ... \u0026lt;/dependencies\u0026gt; \u0026lt;build\u0026gt; \u0026lt;!-- Set the package final name as same as the test case folder case. --\u0026gt; \u0026lt;finalName\u0026gt;httpclient-4.3.x-scenario\u0026lt;/finalName\u0026gt; .... \u0026lt;/build\u0026gt; How To Implement Heartbeat Service Heartbeat service is designed for checking the service available status. This service is a simple HTTP service, returning 200 means the target service is ready. Then the traffic generator will access the entry service and verify the expected data. User should consider to use this service to detect such as whether the dependent services are ready, especially when dependent services are database or cluster.\nNotice, because heartbeat service could be traced fully or partially, so, segmentSize in expectedData.yaml should use ge as the operator, and don\u0026rsquo;t include the segments of heartbeat service in the expected segment data.\nThe example Process of Writing Tracing Expected Data Expected data file, expectedData.yaml, include SegmentItems part.\nWe are using the HttpClient plugin to show how to write the expected data.\nThere are two key points of testing\n Whether is HttpClient span created. Whether the ContextCarrier created correctly, and propagates across processes.  +-------------+ +------------------+ +-------------------------+ | Browser | | Case Servlet | | ContextPropagateServlet | | | | | | | +-----|-------+ +---------|--------+ +------------|------------+ | | | | | | | WebHttp +-+ | +------------------------\u0026gt; |-| HttpClient +-+ | |--------------------------------\u0026gt; |-| | |-| |-| | |-| |-| | |-| \u0026lt;--------------------------------| | |-| +-+ | \u0026lt;--------------------------| | | +-+ | | | | | | | | | | | | | + + + segmentItems By following the flow of HttpClient case, there should be two segments created.\n Segment represents the CaseServlet access. Let\u0026rsquo;s name it as SegmentA. Segment represents the ContextPropagateServlet access. Let\u0026rsquo;s name it as SegmentB.  segmentItems:- serviceName:httpclient-casesegmentSize:ge 2# Could have more than one health check segments, because, the dependency is not standby.Because Tomcat plugin is a default plugin of SkyWalking, so, in SegmentA, there are two spans\n Tomcat entry span HttpClient exit span  SegmentA span list should like following\n- segmentId:not nullspans:- operationName:/httpclient-case/case/context-propagateparentSpanId:0spanId:1startTime:nq 0endTime:nq 0isError:falsespanLayer:HttpspanType:ExitcomponentId:eq 2tags:- {key: url, value:\u0026#39;http://127.0.0.1:8080/httpclient-case/case/context-propagate\u0026#39;}- {key: http.method, value:GET}- {key: http.status_code, value:\u0026#39;200\u0026#39;}logs:[]peer:127.0.0.1:8080- operationName:/httpclient-case/case/httpclientparentSpanId:-1spanId:0startTime:nq 0endTime:nq 0spanLayer:HttpisError:falsespanType:EntrycomponentId:1tags:- {key: url, value:\u0026#39;http://localhost:{SERVER_OUTPUT_PORT}/httpclient-case/case/httpclient\u0026#39;}- {key: http.method, value:GET}- {key: http.status_code, value:\u0026#39;200\u0026#39;}logs:[]peer:nullSegmentB should only have one Tomcat entry span, but includes the Ref pointing to SegmentA.\nSegmentB span list should like following\n- segmentId:not nullspans:-operationName:/httpclient-case/case/context-propagateparentSpanId:-1spanId:0tags:- {key: url, value:\u0026#39;http://127.0.0.1:8080/httpclient-case/case/context-propagate\u0026#39;}- {key: http.method, value:GET}- {key: http.status_code, value:\u0026#39;200\u0026#39;}logs:[]startTime:nq 0endTime:nq 0spanLayer:HttpisError:falsespanType:EntrycomponentId:1peer:nullrefs:- {parentEndpoint: /httpclient-case/case/httpclient, networkAddress: \u0026#39;localhost:8080\u0026#39;, refType: CrossProcess, parentSpanId: 1, parentTraceSegmentId: not null, parentServiceInstance: not null, parentService: not null, traceId:not null}The example Process of Writing Meter Expected Data Expected data file, expectedData.yaml, include MeterItems part.\nWe are using the toolkit plugin to demonstrate how to write the expected data. When write the meter plugin, the expected data file keeps the same.\nThere is one key point of testing\n Build a meter and operate it.  Such as Counter:\nMeterFactory.counter(\u0026#34;test_counter\u0026#34;).tag(\u0026#34;ck1\u0026#34;, \u0026#34;cv1\u0026#34;).build().increment(1d); MeterFactory.histogram(\u0026#34;test_histogram\u0026#34;).tag(\u0026#34;hk1\u0026#34;, \u0026#34;hv1\u0026#34;).steps(1d, 5d, 10d).build().addValue(2d); +-------------+ +------------------+ | Plugin | | Agent core | | | | | +-----|-------+ +---------|--------+ | | | | | Build or operate +-+ +------------------------\u0026gt; |-| | |-] | |-| | |-| | |-| | |-| | \u0026lt;--------------------------| | +-+ | | | | | | | | + + meterItems By following the flow of the toolkit case, there should be two meters created.\n Meter test_counter created from MeterFactory#counter. Let\u0026rsquo;s name it as MeterA. Meter test_histogram created from MeterFactory#histogram. Let\u0026rsquo;s name it as MeterB.  meterItems:- serviceName:toolkit-casemeterSize:2They\u0026rsquo;re showing two kinds of meter, MeterA has a single value, MeterB has a histogram value.\nMeterA should like following, counter and gauge use the same data format.\n- meterId:name:test_countertags:- {name: ck1, value:cv1}singleValue:gt 0MeterB should like following.\n- meterId:name:test_histogramtags:- {name: hk1, value:hv1}histogramBuckets:- 0.0- 1.0- 5.0- 10.0Local Test and Pull Request To The Upstream First of all, the test case project could be compiled successfully, with right project structure and be able to deploy. The developer should test the start script could run in Linux/MacOS, and entryService/health services are able to provide the response.\nYou could run test by using following commands\ncd ${SKYWALKING_HOME} bash ./test/plugin/run.sh -f ${scenario_name} Notice,if codes in ./apm-sniffer have been changed, no matter because your change or git update, please recompile the skywalking-agent. Because the test framework will use the existing skywalking-agent folder, rather than recompiling it every time.\nUse ${SKYWALKING_HOME}/test/plugin/run.sh -h to know more command options.\nIf the local test passed, then you could add it to .github/workflows/plugins-test.\u0026lt;n\u0026gt;.yaml file, which will drive the tests running on the GitHub Actions of official SkyWalking repository. Based on your plugin\u0026rsquo;s name, please add the test case into file .github/workflows/plugins-test.\u0026lt;n\u0026gt;.yaml, by alphabetical orders.\nEvery test case is a GitHub Actions Job. Please use the scenario directory name as the case name, mostly you\u0026rsquo;ll just need to decide which file (plugins-test.\u0026lt;n\u0026gt;.yaml) to add your test case, and simply put one line (as follows) in it, take the existed cases as examples. You can run python3 tools/select-group.py to see which file contains the least cases and add your cases into it, in order to balance the running time of each group.\nIf a test case required to run in JDK 17 environment, please add you test case into file plugins-jdk17-test.\u0026lt;n\u0026gt;.yaml. If a test case required to run in JDK 21 environment, please add you test case into file plugins-jdk21-test.\u0026lt;n\u0026gt;.yaml.\njobs:PluginsTest:name:Pluginruns-on:ubuntu-latesttimeout-minutes:90strategy:fail-fast:truematrix:case:# ...- \u0026lt;your scenario test directory name\u0026gt;# ...","title":"Plugin automatic test framework","url":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/plugin-test/"},{"content":"Plugin automatic test framework The plugin test framework is designed to verify the function and compatibility of plugins. As there are dozens of plugins and hundreds of versions that need to be verified, it is impossible to do it manually. The test framework uses container-based tech stack and requires a set of real services with the agents installed. Then, the test mock OAP backend runs to check the segments data sent from agents.\nEvery plugin maintained in the main repo requires corresponding test cases as well as matching versions in the supported list doc.\nEnvironment Requirements  MacOS/Linux JDK 8+ Docker Docker Compose  Case Base Image Introduction The test framework provides JVM-container and Tomcat-container base images including JDK8 and JDK17. You can choose the best one for your test case. If both are suitable for your case, JVM-container is preferred.\nJVM-container Image Introduction JVM-container uses eclipse-temurin:8-jdk as the base image. JVM-container supports JDK8 and JDK17 as well in CI, which inherits eclipse-temurin:8-jdk and eclipse-temurin:17-jdk. It is supported to custom the base Java docker image by specify base_image_java. The test case project must be packaged as project-name.zip, including startup.sh and uber jar, by using mvn clean package.\nTake the following test projects as examples:\n sofarpc-scenario is a single project case. webflux-scenario is a case including multiple projects. jdk17-with-gson-scenario is a single project case with JDK17.  Tomcat-container Image Introduction Tomcat-container uses tomcat:8.5-jdk8-openjdk, tomcat:8.5-jdk17-openjdk as the base image. It is supported to custom the base Tomcat docker image by specify base_image_tomcat. The test case project must be packaged as project-name.war by using mvn package.\nTake the following test project as an example\n spring-4.3.x-scenario  Test project hierarchical structure The test case is an independent maven project, and it must be packaged as a war tar ball or zip file, depending on the chosen base image. Also, two external accessible endpoints usually two URLs) are required.\nAll test case codes should be in the org.apache.skywalking.apm.testcase.* package. If there are some codes expected to be instrumented, then the classes could be in the test.org.apache.skywalking.apm.testcase.* package.\nJVM-container test project hierarchical structure\n[plugin-scenario] |- [bin] |- startup.sh |- [config] |- expectedData.yaml |- [src] |- [main] |- ... |- [resource] |- log4j2.xml |- pom.xml |- configuration.yml |- support-version.list [] = directory Tomcat-container test project hierarchical structure\n[plugin-scenario] |- [config] |- expectedData.yaml |- [src] |- [main] |- ... |- [resource] |- log4j2.xml |- [webapp] |- [WEB-INF] |- web.xml |- pom.xml |- configuration.yml |- support-version.list [] = directory Test case configuration files The following files are required in every test case.\n   File Name Descriptions     configuration.yml Declare the basic case information, including case name, entrance endpoints, mode, and dependencies.   expectedData.yaml Describe the expected segmentItems, meterItems or logItems.   support-version.list List the target versions for this case.   startup.sh JVM-container only. This is not required when using Tomcat-container.    * support-version.list format requires every line for a single version (contains only the last version number of each minor version). You may use # to comment out this version.\nconfiguration.yml    Field description     type Image type, options, jvm, or tomcat. Required.   entryService The entrance endpoint (URL) for test case access. Required. (HTTP Method: GET)   healthCheck The health check endpoint (URL) for test case access. Required. (HTTP Method: HEAD)   startScript Path of the start up script. Required in type: jvm only.   runningMode Running mode with the optional plugin, options, default(default), with_optional, or with_bootstrap.   withPlugins Plugin selector rule, e.g.:apm-spring-annotation-plugin-*.jar. Required for runningMode=with_optional or runningMode=with_bootstrap.   environment Same as docker-compose#environment.   depends_on Same as docker-compose#depends_on.   dependencies Same as docker-compose#services, image, links, hostname, command, environment and depends_on are supported.    Note:, docker-compose activates only when dependencies is blank.\nrunningMode option description.\n   Option description     default Activate all plugins in plugin folder like the official distribution agent.   with_optional Activate default and plugins in optional-plugin by the give selector.   with_bootstrap Activate default and plugins in bootstrap-plugin by the give selector.    with_optional/with_bootstrap supports multiple selectors, separated by ;.\nFile Format\ntype: entryService: healthCheck: startScript: runningMode: withPlugins: environment: ... depends_on: ... dependencies: service1: image: hostname: expose: ... environment: ... depends_on: ... links: ... entrypoint: ... healthcheck: ...  dependencies support docker compose healthcheck. But the format is a little different. We need to have - as the start of every config item, and describe it as a string line.  For example, in the official document, the health check is:\nhealthcheck:test:[\u0026#34;CMD\u0026#34;,\u0026#34;curl\u0026#34;,\u0026#34;-f\u0026#34;,\u0026#34;http://localhost\u0026#34;]interval:1m30stimeout:10sretries:3start_period:40sHere you should write:\nhealthcheck:- \u0026#39;test:[\u0026#34;CMD\u0026#34;,\u0026#34;curl\u0026#34;,\u0026#34;-f\u0026#34;,\u0026#34;http://localhost\u0026#34;]\u0026#39;- \u0026#34;interval: 1m30s\u0026#34;- \u0026#34;timeout: 10s\u0026#34;- \u0026#34;retries: 3\u0026#34;- \u0026#34;start_period: 40s\u0026#34;In some cases, the dependency service (usually a third-party server like the SolrJ server) is required to keep the same version as the client lib version, which is defined as ${test.framework.version} in pom. You may use ${CASE_SERVER_IMAGE_VERSION} as the version number, which will be changed in the test for each version.\n It does not support resource related configurations, such as volumes, ports, and ulimits. The reason for this is that in test scenarios, no mapping is required for any port to the host VM, or to mount any folder.\n Take the following test cases as examples:\n dubbo-2.7.x with JVM-container jetty with JVM-container gateway with runningMode canal with docker-compose  expectedData.yaml Operator for number\n   Operator Description     nq Not equal   eq Equal(default)   ge Greater than or equal   gt Greater than    Operator for String\n   Operator Description     not null Not null   not blank Not blank ,it\u0026rsquo;s recommended for String type field as the default value maybe blank string, such as span tags   null Null or empty String   eq Equal(default)   start with Tests if this string starts with the specified prefix. DO NOT use it with meterItem tags value   end with Tests if this string ends with the specified suffix. DO NOT use it with meterItem tags value    Expected Data Format Of The Segment\nsegmentItems:- serviceName:SERVICE_NAME(string)segmentSize:SEGMENT_SIZE(int)segments:- segmentId:SEGMENT_ID(string)spans:...   Field Description     serviceName Service Name.   segmentSize The number of segments is expected.   segmentId Trace ID.   spans Segment span list. In the next section, you will learn how to describe each span.    Expected Data Format Of The Span\nNote: The order of span list should follow the order of the span finish time.\noperationName:OPERATION_NAME(string)parentSpanId:PARENT_SPAN_ID(int)spanId:SPAN_ID(int)startTime:START_TIME(int)endTime:END_TIME(int)isError: IS_ERROR(string:true,false)spanLayer: SPAN_LAYER(string:DB, RPC_FRAMEWORK, HTTP, MQ, CACHE)spanType: SPAN_TYPE(string:Exit, Entry, Local)componentId:COMPONENT_ID(int)tags:- {key: TAG_KEY(string), value:TAG_VALUE(string)}...logs:- {key: LOG_KEY(string), value:LOG_VALUE(string)}...peer:PEER(string)refs:- {traceId:TRACE_ID(string),parentTraceSegmentId:PARENT_TRACE_SEGMENT_ID(string),parentSpanId:PARENT_SPAN_ID(int),parentService:PARENT_SERVICE(string),parentServiceInstance:PARENT_SERVICE_INSTANCE(string),parentEndpoint:PARENT_ENDPOINT_NAME(string),networkAddress:NETWORK_ADDRESS(string),refType: REF_TYPE(string:CrossProcess, CrossThread)}...   Field Description     operationName Span Operation Name.   parentSpanId Parent span ID. Note: The parent span ID of the first span should be -1.   spanId Span ID. Note: Start from 0.   startTime Span start time. It is impossible to get the accurate time, not 0 should be enough.   endTime Span finish time. It is impossible to get the accurate time, not 0 should be enough.   isError Span status, true or false.   componentId Component id for your plugin.   tags Span tag list. Notice, Keep in the same order as the plugin coded.   logs Span log list. Notice, Keep in the same order as the plugin coded.   SpanLayer Options, DB, RPC_FRAMEWORK, HTTP, MQ, CACHE.   SpanType Span type, options, Exit, Entry or Local.   peer Remote network address, IP + port mostly. For exit span, this should be required.    The verify description for SegmentRef\n   Field Description     traceId    parentTraceSegmentId Parent SegmentId, pointing to the segment id in the parent segment.   parentSpanId Parent SpanID, pointing to the span id in the parent segment.   parentService The service of parent/downstream service name.   parentServiceInstance The instance of parent/downstream service instance name.   parentEndpoint The endpoint of parent/downstream service.   networkAddress The peer value of parent exit span.   refType Ref type, options, CrossProcess or CrossThread.    Expected Data Format Of The Meter Items\nmeterItems:- serviceName:SERVICE_NAME(string)meterSize:METER_SIZE(int)meters:- ...   Field Description     serviceName Service Name.   meterSize The number of meters is expected.   meters meter list. Follow the next section to see how to describe every meter.    Expected Data Format Of The Meter\nmeterId:name:NAME(string)tags:- {name: TAG_NAME(string), value:TAG_VALUE(string)}singleValue:SINGLE_VALUE(double)histogramBuckets:- HISTOGRAM_BUCKET(double)...The verify description for MeterId\n   Field Description     name meter name.   tags meter tags.   tags.name tag name.   tags.value tag value.   singleValue counter or gauge value. Using condition operate of the number to validate, such as gt, ge. If current meter is histogram, don\u0026rsquo;t need to write this field.   histogramBuckets histogram bucket. The bucket list must be ordered. The tool assert at least one bucket of the histogram having nonzero count. If current meter is counter or gauge, don\u0026rsquo;t need to write this field.    Expected Data Format Of The Log Items\nlogItems:- serviceName:SERVICE_NAME(string)logSize:LOG_SIZE(int)logs:- ...   Field Description     serviceName Service Name.   logSize The number of logs is expected.   logs log list. Follow the next section to see how to describe every log.    Expected Data Format Of The Log\ntimestamp:TIMESTAMP_VALUE(int)endpoint:ENDPOINT_VALUE(int)traceContext:traceId:TRACE_ID_VALUE(string)traceSegmentId:TRACE_SEGMENT_ID_VALUE(string)spanId:SPAN_ID_VALUE(int)body:type:TYPE_VALUE(string)content:# Choose one of three (text, json or yaml)text:TEXT_VALUE(string)# json: JSON_VALUE(string)# yaml: YAML_VALUE(string)tags:data:- key:TAG_KEY(string)value:TAG_VALUE(string)...layer:LAYER_VALUE(string)...The verify description for Log\n   Field Description     timestamp log timestamp.   endpoint log endpoint.   traceContext.traceId log associated trace id.   traceContext.traceSegmentId log associated trace segment id.   traceContext.spanId log associated span id.   body.type log body type.   body.content log content, the sub field choose one of three (text, json or yaml).   tags.data log tags, key value pairs.   layer log layer.    startup.sh This script provide a start point to JVM based service, most of them starts by a java -jar, with some variables. The following system environment variables are available in the shell.\n   Variable Description     agent_opts Agent plugin opts, check the detail in plugin doc or the same opt added in this PR.   SCENARIO_NAME Service name. Default same as the case folder name   SCENARIO_VERSION Version   SCENARIO_ENTRY_SERVICE Entrance URL to access this service   SCENARIO_HEALTH_CHECK_URL Health check URL     ${agent_opts} is required to add into your java -jar command, which including the parameter injected by test framework, and make agent installed. All other parameters should be added after ${agent_opts}.\n The test framework will set the service name as the test case folder name by default, but in some cases, there are more than one test projects are required to run in different service codes, could set it explicitly like the following example.\nExample\nhome=\u0026#34;$(cd \u0026#34;$(dirname $0)\u0026#34;; pwd)\u0026#34; java -jar ${agent_opts} \u0026#34;-Dskywalking.agent.service_name=jettyserver-scenario\u0026#34; ${home}/../libs/jettyserver-scenario.jar \u0026amp; sleep 1 java -jar ${agent_opts} \u0026#34;-Dskywalking.agent.service_name=jettyclient-scenario\u0026#34; ${home}/../libs/jettyclient-scenario.jar \u0026amp;  Only set this or use other skywalking options when it is really necessary.\n Take the following test cases as examples\n undertow webflux  Best Practices How To Use The Archetype To Create A Test Case Project We provided archetypes and a script to make creating a project easier. It creates a completed project of a test case. So that we only need to focus on cases. First, we can use followed command to get usage about the script.\nbash ${SKYWALKING_HOME}/test/plugin/generator.sh\nThen, runs and generates a project, named by scenario_name, in ./scenarios.\nRecommendations for pom \u0026lt;properties\u0026gt; \u0026lt;!-- Provide and use this property in the pom. --\u0026gt; \u0026lt;!-- This version should match the library version, --\u0026gt; \u0026lt;!-- in this case, http components lib version 4.3. --\u0026gt; \u0026lt;test.framework.version\u0026gt;4.3\u0026lt;/test.framework.version\u0026gt; \u0026lt;/properties\u0026gt; \u0026lt;dependencies\u0026gt; \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.httpcomponents\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;httpclient\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${test.framework.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; ... \u0026lt;/dependencies\u0026gt; \u0026lt;build\u0026gt; \u0026lt;!-- Set the package final name as same as the test case folder case. --\u0026gt; \u0026lt;finalName\u0026gt;httpclient-4.3.x-scenario\u0026lt;/finalName\u0026gt; .... \u0026lt;/build\u0026gt; How To Implement Heartbeat Service Heartbeat service is designed for checking the service available status. This service is a simple HTTP service, returning 200 means the target service is ready. Then the traffic generator will access the entry service and verify the expected data. User should consider to use this service to detect such as whether the dependent services are ready, especially when dependent services are database or cluster.\nNotice, because heartbeat service could be traced fully or partially, so, segmentSize in expectedData.yaml should use ge as the operator, and don\u0026rsquo;t include the segments of heartbeat service in the expected segment data.\nThe example Process of Writing Tracing Expected Data Expected data file, expectedData.yaml, include SegmentItems part.\nWe are using the HttpClient plugin to show how to write the expected data.\nThere are two key points of testing\n Whether is HttpClient span created. Whether the ContextCarrier created correctly, and propagates across processes.  +-------------+ +------------------+ +-------------------------+ | Browser | | Case Servlet | | ContextPropagateServlet | | | | | | | +-----|-------+ +---------|--------+ +------------|------------+ | | | | | | | WebHttp +-+ | +------------------------\u0026gt; |-| HttpClient +-+ | |--------------------------------\u0026gt; |-| | |-| |-| | |-| |-| | |-| \u0026lt;--------------------------------| | |-| +-+ | \u0026lt;--------------------------| | | +-+ | | | | | | | | | | | | | + + + segmentItems By following the flow of HttpClient case, there should be two segments created.\n Segment represents the CaseServlet access. Let\u0026rsquo;s name it as SegmentA. Segment represents the ContextPropagateServlet access. Let\u0026rsquo;s name it as SegmentB.  segmentItems:- serviceName:httpclient-casesegmentSize:ge 2# Could have more than one health check segments, because, the dependency is not standby.Because Tomcat plugin is a default plugin of SkyWalking, so, in SegmentA, there are two spans\n Tomcat entry span HttpClient exit span  SegmentA span list should like following\n- segmentId:not nullspans:- operationName:/httpclient-case/case/context-propagateparentSpanId:0spanId:1startTime:nq 0endTime:nq 0isError:falsespanLayer:HttpspanType:ExitcomponentId:eq 2tags:- {key: url, value:\u0026#39;http://127.0.0.1:8080/httpclient-case/case/context-propagate\u0026#39;}- {key: http.method, value:GET}- {key: http.status_code, value:\u0026#39;200\u0026#39;}logs:[]peer:127.0.0.1:8080- operationName:/httpclient-case/case/httpclientparentSpanId:-1spanId:0startTime:nq 0endTime:nq 0spanLayer:HttpisError:falsespanType:EntrycomponentId:1tags:- {key: url, value:\u0026#39;http://localhost:{SERVER_OUTPUT_PORT}/httpclient-case/case/httpclient\u0026#39;}- {key: http.method, value:GET}- {key: http.status_code, value:\u0026#39;200\u0026#39;}logs:[]peer:nullSegmentB should only have one Tomcat entry span, but includes the Ref pointing to SegmentA.\nSegmentB span list should like following\n- segmentId:not nullspans:-operationName:/httpclient-case/case/context-propagateparentSpanId:-1spanId:0tags:- {key: url, value:\u0026#39;http://127.0.0.1:8080/httpclient-case/case/context-propagate\u0026#39;}- {key: http.method, value:GET}- {key: http.status_code, value:\u0026#39;200\u0026#39;}logs:[]startTime:nq 0endTime:nq 0spanLayer:HttpisError:falsespanType:EntrycomponentId:1peer:nullrefs:- {parentEndpoint: /httpclient-case/case/httpclient, networkAddress: \u0026#39;localhost:8080\u0026#39;, refType: CrossProcess, parentSpanId: 1, parentTraceSegmentId: not null, parentServiceInstance: not null, parentService: not null, traceId:not null}The example Process of Writing Meter Expected Data Expected data file, expectedData.yaml, include MeterItems part.\nWe are using the toolkit plugin to demonstrate how to write the expected data. When write the meter plugin, the expected data file keeps the same.\nThere is one key point of testing\n Build a meter and operate it.  Such as Counter:\nMeterFactory.counter(\u0026#34;test_counter\u0026#34;).tag(\u0026#34;ck1\u0026#34;, \u0026#34;cv1\u0026#34;).build().increment(1d); MeterFactory.histogram(\u0026#34;test_histogram\u0026#34;).tag(\u0026#34;hk1\u0026#34;, \u0026#34;hv1\u0026#34;).steps(1d, 5d, 10d).build().addValue(2d); +-------------+ +------------------+ | Plugin | | Agent core | | | | | +-----|-------+ +---------|--------+ | | | | | Build or operate +-+ +------------------------\u0026gt; |-| | |-] | |-| | |-| | |-| | |-| | \u0026lt;--------------------------| | +-+ | | | | | | | | + + meterItems By following the flow of the toolkit case, there should be two meters created.\n Meter test_counter created from MeterFactory#counter. Let\u0026rsquo;s name it as MeterA. Meter test_histogram created from MeterFactory#histogram. Let\u0026rsquo;s name it as MeterB.  meterItems:- serviceName:toolkit-casemeterSize:2They\u0026rsquo;re showing two kinds of meter, MeterA has a single value, MeterB has a histogram value.\nMeterA should like following, counter and gauge use the same data format.\n- meterId:name:test_countertags:- {name: ck1, value:cv1}singleValue:gt 0MeterB should like following.\n- meterId:name:test_histogramtags:- {name: hk1, value:hv1}histogramBuckets:- 0.0- 1.0- 5.0- 10.0Local Test and Pull Request To The Upstream First of all, the test case project could be compiled successfully, with right project structure and be able to deploy. The developer should test the start script could run in Linux/MacOS, and entryService/health services are able to provide the response.\nYou could run test by using following commands\ncd ${SKYWALKING_HOME} bash ./test/plugin/run.sh -f ${scenario_name} Notice,if codes in ./apm-sniffer have been changed, no matter because your change or git update, please recompile the skywalking-agent. Because the test framework will use the existing skywalking-agent folder, rather than recompiling it every time.\nUse ${SKYWALKING_HOME}/test/plugin/run.sh -h to know more command options.\nIf the local test passed, then you could add it to .github/workflows/plugins-test.\u0026lt;n\u0026gt;.yaml file, which will drive the tests running on the GitHub Actions of official SkyWalking repository. Based on your plugin\u0026rsquo;s name, please add the test case into file .github/workflows/plugins-test.\u0026lt;n\u0026gt;.yaml, by alphabetical orders.\nEvery test case is a GitHub Actions Job. Please use the scenario directory name as the case name, mostly you\u0026rsquo;ll just need to decide which file (plugins-test.\u0026lt;n\u0026gt;.yaml) to add your test case, and simply put one line (as follows) in it, take the existed cases as examples. You can run python3 tools/select-group.py to see which file contains the least cases and add your cases into it, in order to balance the running time of each group.\nIf a test case required to run in JDK 17 environment, please add you test case into file plugins-jdk17-test.\u0026lt;n\u0026gt;.yaml. If a test case required to run in JDK 21 environment, please add you test case into file plugins-jdk21-test.\u0026lt;n\u0026gt;.yaml.\njobs:PluginsTest:name:Pluginruns-on:ubuntu-latesttimeout-minutes:90strategy:fail-fast:truematrix:case:# ...- \u0026lt;your scenario test directory name\u0026gt;# ...","title":"Plugin automatic test framework","url":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/plugin-test/"},{"content":"Plugin automatic test framework The plugin test framework is designed to verify the function and compatibility of plugins. As there are dozens of plugins and hundreds of versions that need to be verified, it is impossible to do it manually. The test framework uses container-based tech stack and requires a set of real services with the agents installed. Then, the test mock OAP backend runs to check the segments data sent from agents.\nEvery plugin maintained in the main repo requires corresponding test cases as well as matching versions in the supported list doc.\nEnvironment Requirements  MacOS/Linux JDK 8+ Docker Docker Compose  Case Base Image Introduction The test framework provides JVM-container and Tomcat-container base images including JDK8 and JDK17. You can choose the best one for your test case. If both are suitable for your case, JVM-container is preferred.\nJVM-container Image Introduction JVM-container uses eclipse-temurin:8-jdk as the base image. JVM-container supports JDK8 and JDK17 as well in CI, which inherits eclipse-temurin:8-jdk and eclipse-temurin:17-jdk. It is supported to custom the base Java docker image by specify base_image_java. The test case project must be packaged as project-name.zip, including startup.sh and uber jar, by using mvn clean package.\nTake the following test projects as examples:\n sofarpc-scenario is a single project case. webflux-scenario is a case including multiple projects. jdk17-with-gson-scenario is a single project case with JDK17.  Tomcat-container Image Introduction Tomcat-container uses tomcat:8.5-jdk8-openjdk, tomcat:8.5-jdk17-openjdk as the base image. It is supported to custom the base Tomcat docker image by specify base_image_tomcat. The test case project must be packaged as project-name.war by using mvn package.\nTake the following test project as an example\n spring-4.3.x-scenario  Test project hierarchical structure The test case is an independent maven project, and it must be packaged as a war tar ball or zip file, depending on the chosen base image. Also, two external accessible endpoints usually two URLs) are required.\nAll test case codes should be in the org.apache.skywalking.apm.testcase.* package. If there are some codes expected to be instrumented, then the classes could be in the test.org.apache.skywalking.apm.testcase.* package.\nJVM-container test project hierarchical structure\n[plugin-scenario] |- [bin] |- startup.sh |- [config] |- expectedData.yaml |- [src] |- [main] |- ... |- [resource] |- log4j2.xml |- pom.xml |- configuration.yml |- support-version.list [] = directory Tomcat-container test project hierarchical structure\n[plugin-scenario] |- [config] |- expectedData.yaml |- [src] |- [main] |- ... |- [resource] |- log4j2.xml |- [webapp] |- [WEB-INF] |- web.xml |- pom.xml |- configuration.yml |- support-version.list [] = directory Test case configuration files The following files are required in every test case.\n   File Name Descriptions     configuration.yml Declare the basic case information, including case name, entrance endpoints, mode, and dependencies.   expectedData.yaml Describe the expected segmentItems, meterItems or logItems.   support-version.list List the target versions for this case.   startup.sh JVM-container only. This is not required when using Tomcat-container.    * support-version.list format requires every line for a single version (contains only the last version number of each minor version). You may use # to comment out this version.\nconfiguration.yml    Field description     type Image type, options, jvm, or tomcat. Required.   entryService The entrance endpoint (URL) for test case access. Required. (HTTP Method: GET)   healthCheck The health check endpoint (URL) for test case access. Required. (HTTP Method: HEAD)   startScript Path of the start up script. Required in type: jvm only.   runningMode Running mode with the optional plugin, options, default(default), with_optional, or with_bootstrap.   withPlugins Plugin selector rule, e.g.:apm-spring-annotation-plugin-*.jar. Required for runningMode=with_optional or runningMode=with_bootstrap.   environment Same as docker-compose#environment.   depends_on Same as docker-compose#depends_on.   dependencies Same as docker-compose#services, image, links, hostname, command, environment and depends_on are supported.    Note:, docker-compose activates only when dependencies is blank.\nrunningMode option description.\n   Option description     default Activate all plugins in plugin folder like the official distribution agent.   with_optional Activate default and plugins in optional-plugin by the give selector.   with_bootstrap Activate default and plugins in bootstrap-plugin by the give selector.    with_optional/with_bootstrap supports multiple selectors, separated by ;.\nFile Format\ntype: entryService: healthCheck: startScript: runningMode: withPlugins: environment: ... depends_on: ... dependencies: service1: image: hostname: expose: ... environment: ... depends_on: ... links: ... entrypoint: ... healthcheck: ...  dependencies support docker compose healthcheck. But the format is a little different. We need to have - as the start of every config item, and describe it as a string line.  For example, in the official document, the health check is:\nhealthcheck:test:[\u0026#34;CMD\u0026#34;,\u0026#34;curl\u0026#34;,\u0026#34;-f\u0026#34;,\u0026#34;http://localhost\u0026#34;]interval:1m30stimeout:10sretries:3start_period:40sHere you should write:\nhealthcheck:- \u0026#39;test:[\u0026#34;CMD\u0026#34;,\u0026#34;curl\u0026#34;,\u0026#34;-f\u0026#34;,\u0026#34;http://localhost\u0026#34;]\u0026#39;- \u0026#34;interval: 1m30s\u0026#34;- \u0026#34;timeout: 10s\u0026#34;- \u0026#34;retries: 3\u0026#34;- \u0026#34;start_period: 40s\u0026#34;In some cases, the dependency service (usually a third-party server like the SolrJ server) is required to keep the same version as the client lib version, which is defined as ${test.framework.version} in pom. You may use ${CASE_SERVER_IMAGE_VERSION} as the version number, which will be changed in the test for each version.\n It does not support resource related configurations, such as volumes, ports, and ulimits. The reason for this is that in test scenarios, no mapping is required for any port to the host VM, or to mount any folder.\n Take the following test cases as examples:\n dubbo-2.7.x with JVM-container jetty with JVM-container gateway with runningMode canal with docker-compose  expectedData.yaml Operator for number\n   Operator Description     nq Not equal   eq Equal(default)   ge Greater than or equal   gt Greater than    Operator for String\n   Operator Description     not null Not null   not blank Not blank ,it\u0026rsquo;s recommended for String type field as the default value maybe blank string, such as span tags   null Null or empty String   eq Equal(default)   start with Tests if this string starts with the specified prefix. DO NOT use it with meterItem tags value   end with Tests if this string ends with the specified suffix. DO NOT use it with meterItem tags value    Expected Data Format Of The Segment\nsegmentItems:- serviceName:SERVICE_NAME(string)segmentSize:SEGMENT_SIZE(int)segments:- segmentId:SEGMENT_ID(string)spans:...   Field Description     serviceName Service Name.   segmentSize The number of segments is expected.   segmentId Trace ID.   spans Segment span list. In the next section, you will learn how to describe each span.    Expected Data Format Of The Span\nNote: The order of span list should follow the order of the span finish time.\noperationName:OPERATION_NAME(string)parentSpanId:PARENT_SPAN_ID(int)spanId:SPAN_ID(int)startTime:START_TIME(int)endTime:END_TIME(int)isError: IS_ERROR(string:true,false)spanLayer: SPAN_LAYER(string:DB, RPC_FRAMEWORK, HTTP, MQ, CACHE)spanType: SPAN_TYPE(string:Exit, Entry, Local)componentId:COMPONENT_ID(int)tags:- {key: TAG_KEY(string), value:TAG_VALUE(string)}...logs:- {key: LOG_KEY(string), value:LOG_VALUE(string)}...peer:PEER(string)refs:- {traceId:TRACE_ID(string),parentTraceSegmentId:PARENT_TRACE_SEGMENT_ID(string),parentSpanId:PARENT_SPAN_ID(int),parentService:PARENT_SERVICE(string),parentServiceInstance:PARENT_SERVICE_INSTANCE(string),parentEndpoint:PARENT_ENDPOINT_NAME(string),networkAddress:NETWORK_ADDRESS(string),refType: REF_TYPE(string:CrossProcess, CrossThread)}...   Field Description     operationName Span Operation Name.   parentSpanId Parent span ID. Note: The parent span ID of the first span should be -1.   spanId Span ID. Note: Start from 0.   startTime Span start time. It is impossible to get the accurate time, not 0 should be enough.   endTime Span finish time. It is impossible to get the accurate time, not 0 should be enough.   isError Span status, true or false.   componentId Component id for your plugin.   tags Span tag list. Notice, Keep in the same order as the plugin coded.   logs Span log list. Notice, Keep in the same order as the plugin coded.   SpanLayer Options, DB, RPC_FRAMEWORK, HTTP, MQ, CACHE.   SpanType Span type, options, Exit, Entry or Local.   peer Remote network address, IP + port mostly. For exit span, this should be required.    The verify description for SegmentRef\n   Field Description     traceId    parentTraceSegmentId Parent SegmentId, pointing to the segment id in the parent segment.   parentSpanId Parent SpanID, pointing to the span id in the parent segment.   parentService The service of parent/downstream service name.   parentServiceInstance The instance of parent/downstream service instance name.   parentEndpoint The endpoint of parent/downstream service.   networkAddress The peer value of parent exit span.   refType Ref type, options, CrossProcess or CrossThread.    Expected Data Format Of The Meter Items\nmeterItems:- serviceName:SERVICE_NAME(string)meterSize:METER_SIZE(int)meters:- ...   Field Description     serviceName Service Name.   meterSize The number of meters is expected.   meters meter list. Follow the next section to see how to describe every meter.    Expected Data Format Of The Meter\nmeterId:name:NAME(string)tags:- {name: TAG_NAME(string), value:TAG_VALUE(string)}singleValue:SINGLE_VALUE(double)histogramBuckets:- HISTOGRAM_BUCKET(double)...The verify description for MeterId\n   Field Description     name meter name.   tags meter tags.   tags.name tag name.   tags.value tag value.   singleValue counter or gauge value. Using condition operate of the number to validate, such as gt, ge. If current meter is histogram, don\u0026rsquo;t need to write this field.   histogramBuckets histogram bucket. The bucket list must be ordered. The tool assert at least one bucket of the histogram having nonzero count. If current meter is counter or gauge, don\u0026rsquo;t need to write this field.    Expected Data Format Of The Log Items\nlogItems:- serviceName:SERVICE_NAME(string)logSize:LOG_SIZE(int)logs:- ...   Field Description     serviceName Service Name.   logSize The number of logs is expected.   logs log list. Follow the next section to see how to describe every log.    Expected Data Format Of The Log\ntimestamp:TIMESTAMP_VALUE(int)endpoint:ENDPOINT_VALUE(int)traceContext:traceId:TRACE_ID_VALUE(string)traceSegmentId:TRACE_SEGMENT_ID_VALUE(string)spanId:SPAN_ID_VALUE(int)body:type:TYPE_VALUE(string)content:# Choose one of three (text, json or yaml)text:TEXT_VALUE(string)# json: JSON_VALUE(string)# yaml: YAML_VALUE(string)tags:data:- key:TAG_KEY(string)value:TAG_VALUE(string)...layer:LAYER_VALUE(string)...The verify description for Log\n   Field Description     timestamp log timestamp.   endpoint log endpoint.   traceContext.traceId log associated trace id.   traceContext.traceSegmentId log associated trace segment id.   traceContext.spanId log associated span id.   body.type log body type.   body.content log content, the sub field choose one of three (text, json or yaml).   tags.data log tags, key value pairs.   layer log layer.    startup.sh This script provide a start point to JVM based service, most of them starts by a java -jar, with some variables. The following system environment variables are available in the shell.\n   Variable Description     agent_opts Agent plugin opts, check the detail in plugin doc or the same opt added in this PR.   SCENARIO_NAME Service name. Default same as the case folder name   SCENARIO_VERSION Version   SCENARIO_ENTRY_SERVICE Entrance URL to access this service   SCENARIO_HEALTH_CHECK_URL Health check URL     ${agent_opts} is required to add into your java -jar command, which including the parameter injected by test framework, and make agent installed. All other parameters should be added after ${agent_opts}.\n The test framework will set the service name as the test case folder name by default, but in some cases, there are more than one test projects are required to run in different service codes, could set it explicitly like the following example.\nExample\nhome=\u0026#34;$(cd \u0026#34;$(dirname $0)\u0026#34;; pwd)\u0026#34; java -jar ${agent_opts} \u0026#34;-Dskywalking.agent.service_name=jettyserver-scenario\u0026#34; ${home}/../libs/jettyserver-scenario.jar \u0026amp; sleep 1 java -jar ${agent_opts} \u0026#34;-Dskywalking.agent.service_name=jettyclient-scenario\u0026#34; ${home}/../libs/jettyclient-scenario.jar \u0026amp;  Only set this or use other skywalking options when it is really necessary.\n Take the following test cases as examples\n undertow webflux  Best Practices How To Use The Archetype To Create A Test Case Project We provided archetypes and a script to make creating a project easier. It creates a completed project of a test case. So that we only need to focus on cases. First, we can use followed command to get usage about the script.\nbash ${SKYWALKING_HOME}/test/plugin/generator.sh\nThen, runs and generates a project, named by scenario_name, in ./scenarios.\nRecommendations for pom \u0026lt;properties\u0026gt; \u0026lt;!-- Provide and use this property in the pom. --\u0026gt; \u0026lt;!-- This version should match the library version, --\u0026gt; \u0026lt;!-- in this case, http components lib version 4.3. --\u0026gt; \u0026lt;test.framework.version\u0026gt;4.3\u0026lt;/test.framework.version\u0026gt; \u0026lt;/properties\u0026gt; \u0026lt;dependencies\u0026gt; \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.httpcomponents\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;httpclient\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${test.framework.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; ... \u0026lt;/dependencies\u0026gt; \u0026lt;build\u0026gt; \u0026lt;!-- Set the package final name as same as the test case folder case. --\u0026gt; \u0026lt;finalName\u0026gt;httpclient-4.3.x-scenario\u0026lt;/finalName\u0026gt; .... \u0026lt;/build\u0026gt; How To Implement Heartbeat Service Heartbeat service is designed for checking the service available status. This service is a simple HTTP service, returning 200 means the target service is ready. Then the traffic generator will access the entry service and verify the expected data. User should consider to use this service to detect such as whether the dependent services are ready, especially when dependent services are database or cluster.\nNotice, because heartbeat service could be traced fully or partially, so, segmentSize in expectedData.yaml should use ge as the operator, and don\u0026rsquo;t include the segments of heartbeat service in the expected segment data.\nThe example Process of Writing Tracing Expected Data Expected data file, expectedData.yaml, include SegmentItems part.\nWe are using the HttpClient plugin to show how to write the expected data.\nThere are two key points of testing\n Whether is HttpClient span created. Whether the ContextCarrier created correctly, and propagates across processes.  +-------------+ +------------------+ +-------------------------+ | Browser | | Case Servlet | | ContextPropagateServlet | | | | | | | +-----|-------+ +---------|--------+ +------------|------------+ | | | | | | | WebHttp +-+ | +------------------------\u0026gt; |-| HttpClient +-+ | |--------------------------------\u0026gt; |-| | |-| |-| | |-| |-| | |-| \u0026lt;--------------------------------| | |-| +-+ | \u0026lt;--------------------------| | | +-+ | | | | | | | | | | | | | + + + segmentItems By following the flow of HttpClient case, there should be two segments created.\n Segment represents the CaseServlet access. Let\u0026rsquo;s name it as SegmentA. Segment represents the ContextPropagateServlet access. Let\u0026rsquo;s name it as SegmentB.  segmentItems:- serviceName:httpclient-casesegmentSize:ge 2# Could have more than one health check segments, because, the dependency is not standby.Because Tomcat plugin is a default plugin of SkyWalking, so, in SegmentA, there are two spans\n Tomcat entry span HttpClient exit span  SegmentA span list should like following\n- segmentId:not nullspans:- operationName:/httpclient-case/case/context-propagateparentSpanId:0spanId:1startTime:nq 0endTime:nq 0isError:falsespanLayer:HttpspanType:ExitcomponentId:eq 2tags:- {key: url, value:\u0026#39;http://127.0.0.1:8080/httpclient-case/case/context-propagate\u0026#39;}- {key: http.method, value:GET}- {key: http.status_code, value:\u0026#39;200\u0026#39;}logs:[]peer:127.0.0.1:8080- operationName:/httpclient-case/case/httpclientparentSpanId:-1spanId:0startTime:nq 0endTime:nq 0spanLayer:HttpisError:falsespanType:EntrycomponentId:1tags:- {key: url, value:\u0026#39;http://localhost:{SERVER_OUTPUT_PORT}/httpclient-case/case/httpclient\u0026#39;}- {key: http.method, value:GET}- {key: http.status_code, value:\u0026#39;200\u0026#39;}logs:[]peer:nullSegmentB should only have one Tomcat entry span, but includes the Ref pointing to SegmentA.\nSegmentB span list should like following\n- segmentId:not nullspans:-operationName:/httpclient-case/case/context-propagateparentSpanId:-1spanId:0tags:- {key: url, value:\u0026#39;http://127.0.0.1:8080/httpclient-case/case/context-propagate\u0026#39;}- {key: http.method, value:GET}- {key: http.status_code, value:\u0026#39;200\u0026#39;}logs:[]startTime:nq 0endTime:nq 0spanLayer:HttpisError:falsespanType:EntrycomponentId:1peer:nullrefs:- {parentEndpoint: /httpclient-case/case/httpclient, networkAddress: \u0026#39;localhost:8080\u0026#39;, refType: CrossProcess, parentSpanId: 1, parentTraceSegmentId: not null, parentServiceInstance: not null, parentService: not null, traceId:not null}The example Process of Writing Meter Expected Data Expected data file, expectedData.yaml, include MeterItems part.\nWe are using the toolkit plugin to demonstrate how to write the expected data. When write the meter plugin, the expected data file keeps the same.\nThere is one key point of testing\n Build a meter and operate it.  Such as Counter:\nMeterFactory.counter(\u0026#34;test_counter\u0026#34;).tag(\u0026#34;ck1\u0026#34;, \u0026#34;cv1\u0026#34;).build().increment(1d); MeterFactory.histogram(\u0026#34;test_histogram\u0026#34;).tag(\u0026#34;hk1\u0026#34;, \u0026#34;hv1\u0026#34;).steps(1d, 5d, 10d).build().addValue(2d); +-------------+ +------------------+ | Plugin | | Agent core | | | | | +-----|-------+ +---------|--------+ | | | | | Build or operate +-+ +------------------------\u0026gt; |-| | |-] | |-| | |-| | |-| | |-| | \u0026lt;--------------------------| | +-+ | | | | | | | | + + meterItems By following the flow of the toolkit case, there should be two meters created.\n Meter test_counter created from MeterFactory#counter. Let\u0026rsquo;s name it as MeterA. Meter test_histogram created from MeterFactory#histogram. Let\u0026rsquo;s name it as MeterB.  meterItems:- serviceName:toolkit-casemeterSize:2They\u0026rsquo;re showing two kinds of meter, MeterA has a single value, MeterB has a histogram value.\nMeterA should like following, counter and gauge use the same data format.\n- meterId:name:test_countertags:- {name: ck1, value:cv1}singleValue:gt 0MeterB should like following.\n- meterId:name:test_histogramtags:- {name: hk1, value:hv1}histogramBuckets:- 0.0- 1.0- 5.0- 10.0Local Test and Pull Request To The Upstream First of all, the test case project could be compiled successfully, with right project structure and be able to deploy. The developer should test the start script could run in Linux/MacOS, and entryService/health services are able to provide the response.\nYou could run test by using following commands\ncd ${SKYWALKING_HOME} bash ./test/plugin/run.sh -f ${scenario_name} Notice,if codes in ./apm-sniffer have been changed, no matter because your change or git update, please recompile the skywalking-agent. Because the test framework will use the existing skywalking-agent folder, rather than recompiling it every time.\nUse ${SKYWALKING_HOME}/test/plugin/run.sh -h to know more command options.\nIf the local test passed, then you could add it to .github/workflows/plugins-test.\u0026lt;n\u0026gt;.yaml file, which will drive the tests running on the GitHub Actions of official SkyWalking repository. Based on your plugin\u0026rsquo;s name, please add the test case into file .github/workflows/plugins-test.\u0026lt;n\u0026gt;.yaml, by alphabetical orders.\nEvery test case is a GitHub Actions Job. Please use the scenario directory name as the case name, mostly you\u0026rsquo;ll just need to decide which file (plugins-test.\u0026lt;n\u0026gt;.yaml) to add your test case, and simply put one line (as follows) in it, take the existed cases as examples. You can run python3 tools/select-group.py to see which file contains the least cases and add your cases into it, in order to balance the running time of each group.\nIf a test case required to run in JDK 17 environment, please add you test case into file plugins-jdk17-test.\u0026lt;n\u0026gt;.yaml.\njobs:PluginsTest:name:Pluginruns-on:ubuntu-latesttimeout-minutes:90strategy:fail-fast:truematrix:case:# ...- \u0026lt;your scenario test directory name\u0026gt;# ...","title":"Plugin automatic test framework","url":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/plugin-test/"},{"content":"Plugin automatic test framework The plugin test framework is designed to verify the function and compatibility of plugins. As there are dozens of plugins and hundreds of versions that need to be verified, it is impossible to do it manually. The test framework uses container-based tech stack and requires a set of real services with the agents installed. Then, the test mock OAP backend runs to check the segments data sent from agents.\nEvery plugin maintained in the main repo requires corresponding test cases as well as matching versions in the supported list doc.\nEnvironment Requirements  MacOS/Linux JDK 8+ Docker Docker Compose  Case Base Image Introduction The test framework provides JVM-container and Tomcat-container base images including JDK8 and JDK17. You can choose the best one for your test case. If both are suitable for your case, JVM-container is preferred.\nJVM-container Image Introduction JVM-container uses eclipse-temurin:8-jdk as the base image. JVM-container supports JDK8 and JDK17 as well in CI, which inherits eclipse-temurin:8-jdk and eclipse-temurin:17-jdk. It is supported to custom the base Java docker image by specify base_image_java. The test case project must be packaged as project-name.zip, including startup.sh and uber jar, by using mvn clean package.\nTake the following test projects as examples:\n sofarpc-scenario is a single project case. webflux-scenario is a case including multiple projects. jdk17-with-gson-scenario is a single project case with JDK17.  Tomcat-container Image Introduction Tomcat-container uses tomcat:8.5-jdk8-openjdk, tomcat:8.5-jdk17-openjdk as the base image. It is supported to custom the base Tomcat docker image by specify base_image_tomcat. The test case project must be packaged as project-name.war by using mvn package.\nTake the following test project as an example\n spring-4.3.x-scenario  Test project hierarchical structure The test case is an independent maven project, and it must be packaged as a war tar ball or zip file, depending on the chosen base image. Also, two external accessible endpoints usually two URLs) are required.\nAll test case codes should be in the org.apache.skywalking.apm.testcase.* package. If there are some codes expected to be instrumented, then the classes could be in the test.org.apache.skywalking.apm.testcase.* package.\nJVM-container test project hierarchical structure\n[plugin-scenario] |- [bin] |- startup.sh |- [config] |- expectedData.yaml |- [src] |- [main] |- ... |- [resource] |- log4j2.xml |- pom.xml |- configuration.yml |- support-version.list [] = directory Tomcat-container test project hierarchical structure\n[plugin-scenario] |- [config] |- expectedData.yaml |- [src] |- [main] |- ... |- [resource] |- log4j2.xml |- [webapp] |- [WEB-INF] |- web.xml |- pom.xml |- configuration.yml |- support-version.list [] = directory Test case configuration files The following files are required in every test case.\n   File Name Descriptions     configuration.yml Declare the basic case information, including case name, entrance endpoints, mode, and dependencies.   expectedData.yaml Describe the expected segmentItems, meterItems or logItems.   support-version.list List the target versions for this case.   startup.sh JVM-container only. This is not required when using Tomcat-container.    * support-version.list format requires every line for a single version (contains only the last version number of each minor version). You may use # to comment out this version.\nconfiguration.yml    Field description     type Image type, options, jvm, or tomcat. Required.   entryService The entrance endpoint (URL) for test case access. Required. (HTTP Method: GET)   healthCheck The health check endpoint (URL) for test case access. Required. (HTTP Method: HEAD)   startScript Path of the start up script. Required in type: jvm only.   runningMode Running mode with the optional plugin, options, default(default), with_optional, or with_bootstrap.   withPlugins Plugin selector rule, e.g.:apm-spring-annotation-plugin-*.jar. Required for runningMode=with_optional or runningMode=with_bootstrap.   environment Same as docker-compose#environment.   depends_on Same as docker-compose#depends_on.   dependencies Same as docker-compose#services, image, links, hostname, command, environment and depends_on are supported.    Note:, docker-compose activates only when dependencies is blank.\nrunningMode option description.\n   Option description     default Activate all plugins in plugin folder like the official distribution agent.   with_optional Activate default and plugins in optional-plugin by the give selector.   with_bootstrap Activate default and plugins in bootstrap-plugin by the give selector.    with_optional/with_bootstrap supports multiple selectors, separated by ;.\nFile Format\ntype: entryService: healthCheck: startScript: runningMode: withPlugins: environment: ... depends_on: ... dependencies: service1: image: hostname: expose: ... environment: ... depends_on: ... links: ... entrypoint: ... healthcheck: ...  dependencies support docker compose healthcheck. But the format is a little different. We need to have - as the start of every config item, and describe it as a string line.  For example, in the official document, the health check is:\nhealthcheck:test:[\u0026#34;CMD\u0026#34;,\u0026#34;curl\u0026#34;,\u0026#34;-f\u0026#34;,\u0026#34;http://localhost\u0026#34;]interval:1m30stimeout:10sretries:3start_period:40sHere you should write:\nhealthcheck:- \u0026#39;test:[\u0026#34;CMD\u0026#34;,\u0026#34;curl\u0026#34;,\u0026#34;-f\u0026#34;,\u0026#34;http://localhost\u0026#34;]\u0026#39;- \u0026#34;interval: 1m30s\u0026#34;- \u0026#34;timeout: 10s\u0026#34;- \u0026#34;retries: 3\u0026#34;- \u0026#34;start_period: 40s\u0026#34;In some cases, the dependency service (usually a third-party server like the SolrJ server) is required to keep the same version as the client lib version, which is defined as ${test.framework.version} in pom. You may use ${CASE_SERVER_IMAGE_VERSION} as the version number, which will be changed in the test for each version.\n It does not support resource related configurations, such as volumes, ports, and ulimits. The reason for this is that in test scenarios, no mapping is required for any port to the host VM, or to mount any folder.\n Take the following test cases as examples:\n dubbo-2.7.x with JVM-container jetty with JVM-container gateway with runningMode canal with docker-compose  expectedData.yaml Operator for number\n   Operator Description     nq Not equal   eq Equal(default)   ge Greater than or equal   gt Greater than    Operator for String\n   Operator Description     not null Not null   not blank Not blank ,it\u0026rsquo;s recommended for String type field as the default value maybe blank string, such as span tags   null Null or empty String   eq Equal(default)   start with Tests if this string starts with the specified prefix. DO NOT use it with meterItem tags value   end with Tests if this string ends with the specified suffix. DO NOT use it with meterItem tags value    Expected Data Format Of The Segment\nsegmentItems:- serviceName:SERVICE_NAME(string)segmentSize:SEGMENT_SIZE(int)segments:- segmentId:SEGMENT_ID(string)spans:...   Field Description     serviceName Service Name.   segmentSize The number of segments is expected.   segmentId Trace ID.   spans Segment span list. In the next section, you will learn how to describe each span.    Expected Data Format Of The Span\nNote: The order of span list should follow the order of the span finish time.\noperationName:OPERATION_NAME(string)parentSpanId:PARENT_SPAN_ID(int)spanId:SPAN_ID(int)startTime:START_TIME(int)endTime:END_TIME(int)isError: IS_ERROR(string:true,false)spanLayer: SPAN_LAYER(string:DB, RPC_FRAMEWORK, HTTP, MQ, CACHE)spanType: SPAN_TYPE(string:Exit, Entry, Local)componentId:COMPONENT_ID(int)tags:- {key: TAG_KEY(string), value:TAG_VALUE(string)}...logs:- {key: LOG_KEY(string), value:LOG_VALUE(string)}...peer:PEER(string)refs:- {traceId:TRACE_ID(string),parentTraceSegmentId:PARENT_TRACE_SEGMENT_ID(string),parentSpanId:PARENT_SPAN_ID(int),parentService:PARENT_SERVICE(string),parentServiceInstance:PARENT_SERVICE_INSTANCE(string),parentEndpoint:PARENT_ENDPOINT_NAME(string),networkAddress:NETWORK_ADDRESS(string),refType: REF_TYPE(string:CrossProcess, CrossThread)}...   Field Description     operationName Span Operation Name.   parentSpanId Parent span ID. Note: The parent span ID of the first span should be -1.   spanId Span ID. Note: Start from 0.   startTime Span start time. It is impossible to get the accurate time, not 0 should be enough.   endTime Span finish time. It is impossible to get the accurate time, not 0 should be enough.   isError Span status, true or false.   componentId Component id for your plugin.   tags Span tag list. Notice, Keep in the same order as the plugin coded.   logs Span log list. Notice, Keep in the same order as the plugin coded.   SpanLayer Options, DB, RPC_FRAMEWORK, HTTP, MQ, CACHE.   SpanType Span type, options, Exit, Entry or Local.   peer Remote network address, IP + port mostly. For exit span, this should be required.    The verify description for SegmentRef\n   Field Description     traceId    parentTraceSegmentId Parent SegmentId, pointing to the segment id in the parent segment.   parentSpanId Parent SpanID, pointing to the span id in the parent segment.   parentService The service of parent/downstream service name.   parentServiceInstance The instance of parent/downstream service instance name.   parentEndpoint The endpoint of parent/downstream service.   networkAddress The peer value of parent exit span.   refType Ref type, options, CrossProcess or CrossThread.    Expected Data Format Of The Meter Items\nmeterItems:- serviceName:SERVICE_NAME(string)meterSize:METER_SIZE(int)meters:- ...   Field Description     serviceName Service Name.   meterSize The number of meters is expected.   meters meter list. Follow the next section to see how to describe every meter.    Expected Data Format Of The Meter\nmeterId:name:NAME(string)tags:- {name: TAG_NAME(string), value:TAG_VALUE(string)}singleValue:SINGLE_VALUE(double)histogramBuckets:- HISTOGRAM_BUCKET(double)...The verify description for MeterId\n   Field Description     name meter name.   tags meter tags.   tags.name tag name.   tags.value tag value.   singleValue counter or gauge value. Using condition operate of the number to validate, such as gt, ge. If current meter is histogram, don\u0026rsquo;t need to write this field.   histogramBuckets histogram bucket. The bucket list must be ordered. The tool assert at least one bucket of the histogram having nonzero count. If current meter is counter or gauge, don\u0026rsquo;t need to write this field.    Expected Data Format Of The Log Items\nlogItems:- serviceName:SERVICE_NAME(string)logSize:LOG_SIZE(int)logs:- ...   Field Description     serviceName Service Name.   logSize The number of logs is expected.   logs log list. Follow the next section to see how to describe every log.    Expected Data Format Of The Log\ntimestamp:TIMESTAMP_VALUE(int)endpoint:ENDPOINT_VALUE(int)traceContext:traceId:TRACE_ID_VALUE(string)traceSegmentId:TRACE_SEGMENT_ID_VALUE(string)spanId:SPAN_ID_VALUE(int)body:type:TYPE_VALUE(string)content:# Choose one of three (text, json or yaml)text:TEXT_VALUE(string)# json: JSON_VALUE(string)# yaml: YAML_VALUE(string)tags:data:- key:TAG_KEY(string)value:TAG_VALUE(string)...layer:LAYER_VALUE(string)...The verify description for Log\n   Field Description     timestamp log timestamp.   endpoint log endpoint.   traceContext.traceId log associated trace id.   traceContext.traceSegmentId log associated trace segment id.   traceContext.spanId log associated span id.   body.type log body type.   body.content log content, the sub field choose one of three (text, json or yaml).   tags.data log tags, key value pairs.   layer log layer.    startup.sh This script provide a start point to JVM based service, most of them starts by a java -jar, with some variables. The following system environment variables are available in the shell.\n   Variable Description     agent_opts Agent plugin opts, check the detail in plugin doc or the same opt added in this PR.   SCENARIO_NAME Service name. Default same as the case folder name   SCENARIO_VERSION Version   SCENARIO_ENTRY_SERVICE Entrance URL to access this service   SCENARIO_HEALTH_CHECK_URL Health check URL     ${agent_opts} is required to add into your java -jar command, which including the parameter injected by test framework, and make agent installed. All other parameters should be added after ${agent_opts}.\n The test framework will set the service name as the test case folder name by default, but in some cases, there are more than one test projects are required to run in different service codes, could set it explicitly like the following example.\nExample\nhome=\u0026#34;$(cd \u0026#34;$(dirname $0)\u0026#34;; pwd)\u0026#34; java -jar ${agent_opts} \u0026#34;-Dskywalking.agent.service_name=jettyserver-scenario\u0026#34; ${home}/../libs/jettyserver-scenario.jar \u0026amp; sleep 1 java -jar ${agent_opts} \u0026#34;-Dskywalking.agent.service_name=jettyclient-scenario\u0026#34; ${home}/../libs/jettyclient-scenario.jar \u0026amp;  Only set this or use other skywalking options when it is really necessary.\n Take the following test cases as examples\n undertow webflux  Best Practices How To Use The Archetype To Create A Test Case Project We provided archetypes and a script to make creating a project easier. It creates a completed project of a test case. So that we only need to focus on cases. First, we can use followed command to get usage about the script.\nbash ${SKYWALKING_HOME}/test/plugin/generator.sh\nThen, runs and generates a project, named by scenario_name, in ./scenarios.\nRecommendations for pom \u0026lt;properties\u0026gt; \u0026lt;!-- Provide and use this property in the pom. --\u0026gt; \u0026lt;!-- This version should match the library version, --\u0026gt; \u0026lt;!-- in this case, http components lib version 4.3. --\u0026gt; \u0026lt;test.framework.version\u0026gt;4.3\u0026lt;/test.framework.version\u0026gt; \u0026lt;/properties\u0026gt; \u0026lt;dependencies\u0026gt; \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.httpcomponents\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;httpclient\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${test.framework.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; ... \u0026lt;/dependencies\u0026gt; \u0026lt;build\u0026gt; \u0026lt;!-- Set the package final name as same as the test case folder case. --\u0026gt; \u0026lt;finalName\u0026gt;httpclient-4.3.x-scenario\u0026lt;/finalName\u0026gt; .... \u0026lt;/build\u0026gt; How To Implement Heartbeat Service Heartbeat service is designed for checking the service available status. This service is a simple HTTP service, returning 200 means the target service is ready. Then the traffic generator will access the entry service and verify the expected data. User should consider to use this service to detect such as whether the dependent services are ready, especially when dependent services are database or cluster.\nNotice, because heartbeat service could be traced fully or partially, so, segmentSize in expectedData.yaml should use ge as the operator, and don\u0026rsquo;t include the segments of heartbeat service in the expected segment data.\nThe example Process of Writing Tracing Expected Data Expected data file, expectedData.yaml, include SegmentItems part.\nWe are using the HttpClient plugin to show how to write the expected data.\nThere are two key points of testing\n Whether is HttpClient span created. Whether the ContextCarrier created correctly, and propagates across processes.  +-------------+ +------------------+ +-------------------------+ | Browser | | Case Servlet | | ContextPropagateServlet | | | | | | | +-----|-------+ +---------|--------+ +------------|------------+ | | | | | | | WebHttp +-+ | +------------------------\u0026gt; |-| HttpClient +-+ | |--------------------------------\u0026gt; |-| | |-| |-| | |-| |-| | |-| \u0026lt;--------------------------------| | |-| +-+ | \u0026lt;--------------------------| | | +-+ | | | | | | | | | | | | | + + + segmentItems By following the flow of HttpClient case, there should be two segments created.\n Segment represents the CaseServlet access. Let\u0026rsquo;s name it as SegmentA. Segment represents the ContextPropagateServlet access. Let\u0026rsquo;s name it as SegmentB.  segmentItems:- serviceName:httpclient-casesegmentSize:ge 2# Could have more than one health check segments, because, the dependency is not standby.Because Tomcat plugin is a default plugin of SkyWalking, so, in SegmentA, there are two spans\n Tomcat entry span HttpClient exit span  SegmentA span list should like following\n- segmentId:not nullspans:- operationName:/httpclient-case/case/context-propagateparentSpanId:0spanId:1startTime:nq 0endTime:nq 0isError:falsespanLayer:HttpspanType:ExitcomponentId:eq 2tags:- {key: url, value:\u0026#39;http://127.0.0.1:8080/httpclient-case/case/context-propagate\u0026#39;}- {key: http.method, value:GET}- {key: http.status_code, value:\u0026#39;200\u0026#39;}logs:[]peer:127.0.0.1:8080- operationName:/httpclient-case/case/httpclientparentSpanId:-1spanId:0startTime:nq 0endTime:nq 0spanLayer:HttpisError:falsespanType:EntrycomponentId:1tags:- {key: url, value:\u0026#39;http://localhost:{SERVER_OUTPUT_PORT}/httpclient-case/case/httpclient\u0026#39;}- {key: http.method, value:GET}- {key: http.status_code, value:\u0026#39;200\u0026#39;}logs:[]peer:nullSegmentB should only have one Tomcat entry span, but includes the Ref pointing to SegmentA.\nSegmentB span list should like following\n- segmentId:not nullspans:-operationName:/httpclient-case/case/context-propagateparentSpanId:-1spanId:0tags:- {key: url, value:\u0026#39;http://127.0.0.1:8080/httpclient-case/case/context-propagate\u0026#39;}- {key: http.method, value:GET}- {key: http.status_code, value:\u0026#39;200\u0026#39;}logs:[]startTime:nq 0endTime:nq 0spanLayer:HttpisError:falsespanType:EntrycomponentId:1peer:nullrefs:- {parentEndpoint: /httpclient-case/case/httpclient, networkAddress: \u0026#39;localhost:8080\u0026#39;, refType: CrossProcess, parentSpanId: 1, parentTraceSegmentId: not null, parentServiceInstance: not null, parentService: not null, traceId:not null}The example Process of Writing Meter Expected Data Expected data file, expectedData.yaml, include MeterItems part.\nWe are using the toolkit plugin to demonstrate how to write the expected data. When write the meter plugin, the expected data file keeps the same.\nThere is one key point of testing\n Build a meter and operate it.  Such as Counter:\nMeterFactory.counter(\u0026#34;test_counter\u0026#34;).tag(\u0026#34;ck1\u0026#34;, \u0026#34;cv1\u0026#34;).build().increment(1d); MeterFactory.histogram(\u0026#34;test_histogram\u0026#34;).tag(\u0026#34;hk1\u0026#34;, \u0026#34;hv1\u0026#34;).steps(1d, 5d, 10d).build().addValue(2d); +-------------+ +------------------+ | Plugin | | Agent core | | | | | +-----|-------+ +---------|--------+ | | | | | Build or operate +-+ +------------------------\u0026gt; |-| | |-] | |-| | |-| | |-| | |-| | \u0026lt;--------------------------| | +-+ | | | | | | | | + + meterItems By following the flow of the toolkit case, there should be two meters created.\n Meter test_counter created from MeterFactory#counter. Let\u0026rsquo;s name it as MeterA. Meter test_histogram created from MeterFactory#histogram. Let\u0026rsquo;s name it as MeterB.  meterItems:- serviceName:toolkit-casemeterSize:2They\u0026rsquo;re showing two kinds of meter, MeterA has a single value, MeterB has a histogram value.\nMeterA should like following, counter and gauge use the same data format.\n- meterId:name:test_countertags:- {name: ck1, value:cv1}singleValue:gt 0MeterB should like following.\n- meterId:name:test_histogramtags:- {name: hk1, value:hv1}histogramBuckets:- 0.0- 1.0- 5.0- 10.0Local Test and Pull Request To The Upstream First of all, the test case project could be compiled successfully, with right project structure and be able to deploy. The developer should test the start script could run in Linux/MacOS, and entryService/health services are able to provide the response.\nYou could run test by using following commands\ncd ${SKYWALKING_HOME} bash ./test/plugin/run.sh -f ${scenario_name} Notice,if codes in ./apm-sniffer have been changed, no matter because your change or git update, please recompile the skywalking-agent. Because the test framework will use the existing skywalking-agent folder, rather than recompiling it every time.\nUse ${SKYWALKING_HOME}/test/plugin/run.sh -h to know more command options.\nIf the local test passed, then you could add it to .github/workflows/plugins-test.\u0026lt;n\u0026gt;.yaml file, which will drive the tests running on the GitHub Actions of official SkyWalking repository. Based on your plugin\u0026rsquo;s name, please add the test case into file .github/workflows/plugins-test.\u0026lt;n\u0026gt;.yaml, by alphabetical orders.\nEvery test case is a GitHub Actions Job. Please use the scenario directory name as the case name, mostly you\u0026rsquo;ll just need to decide which file (plugins-test.\u0026lt;n\u0026gt;.yaml) to add your test case, and simply put one line (as follows) in it, take the existed cases as examples. You can run python3 tools/select-group.py to see which file contains the least cases and add your cases into it, in order to balance the running time of each group.\nIf a test case required to run in JDK 17 environment, please add you test case into file plugins-jdk17-test.\u0026lt;n\u0026gt;.yaml. If a test case required to run in JDK 21 environment, please add you test case into file plugins-jdk21-test.\u0026lt;n\u0026gt;.yaml.\njobs:PluginsTest:name:Pluginruns-on:ubuntu-latesttimeout-minutes:90strategy:fail-fast:truematrix:case:# ...- \u0026lt;your scenario test directory name\u0026gt;# ...","title":"Plugin automatic test framework","url":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/plugin-test/"},{"content":"Plugin automatic test framework The plugin test framework is designed to verify the function and compatibility of plugins. As there are dozens of plugins and hundreds of versions that need to be verified, it is impossible to do it manually. The test framework uses container-based tech stack and requires a set of real services with the agents installed. Then, the test mock OAP backend runs to check the segments data sent from agents.\nEvery plugin maintained in the main repo requires corresponding test cases as well as matching versions in the supported list doc.\nEnvironment Requirements  MacOS/Linux JDK 8+ Docker Docker Compose  Case Base Image Introduction The test framework provides JVM-container and Tomcat-container base images including JDK8 and JDK17. You can choose the best one for your test case. If both are suitable for your case, JVM-container is preferred.\nJVM-container Image Introduction JVM-container uses eclipse-temurin:8-jdk as the base image. JVM-container supports JDK8 and JDK17 as well in CI, which inherits eclipse-temurin:8-jdk and eclipse-temurin:17-jdk. It is supported to custom the base Java docker image by specify base_image_java. The test case project must be packaged as project-name.zip, including startup.sh and uber jar, by using mvn clean package.\nTake the following test projects as examples:\n sofarpc-scenario is a single project case. webflux-scenario is a case including multiple projects. jdk17-with-gson-scenario is a single project case with JDK17.  Tomcat-container Image Introduction Tomcat-container uses tomcat:8.5-jdk8-openjdk, tomcat:8.5-jdk17-openjdk as the base image. It is supported to custom the base Tomcat docker image by specify base_image_tomcat. The test case project must be packaged as project-name.war by using mvn package.\nTake the following test project as an example\n spring-4.3.x-scenario  Test project hierarchical structure The test case is an independent maven project, and it must be packaged as a war tar ball or zip file, depending on the chosen base image. Also, two external accessible endpoints usually two URLs) are required.\nAll test case codes should be in the org.apache.skywalking.apm.testcase.* package. If there are some codes expected to be instrumented, then the classes could be in the test.org.apache.skywalking.apm.testcase.* package.\nJVM-container test project hierarchical structure\n[plugin-scenario] |- [bin] |- startup.sh |- [config] |- expectedData.yaml |- [src] |- [main] |- ... |- [resource] |- log4j2.xml |- pom.xml |- configuration.yml |- support-version.list [] = directory Tomcat-container test project hierarchical structure\n[plugin-scenario] |- [config] |- expectedData.yaml |- [src] |- [main] |- ... |- [resource] |- log4j2.xml |- [webapp] |- [WEB-INF] |- web.xml |- pom.xml |- configuration.yml |- support-version.list [] = directory Test case configuration files The following files are required in every test case.\n   File Name Descriptions     configuration.yml Declare the basic case information, including case name, entrance endpoints, mode, and dependencies.   expectedData.yaml Describe the expected segmentItems, meterItems or logItems.   support-version.list List the target versions for this case.   startup.sh JVM-container only. This is not required when using Tomcat-container.    * support-version.list format requires every line for a single version (contains only the last version number of each minor version). You may use # to comment out this version.\nconfiguration.yml    Field description     type Image type, options, jvm, or tomcat. Required.   entryService The entrance endpoint (URL) for test case access. Required. (HTTP Method: GET)   healthCheck The health check endpoint (URL) for test case access. Required. (HTTP Method: HEAD)   startScript Path of the start up script. Required in type: jvm only.   runningMode Running mode with the optional plugin, options, default(default), with_optional, or with_bootstrap.   withPlugins Plugin selector rule, e.g.:apm-spring-annotation-plugin-*.jar. Required for runningMode=with_optional or runningMode=with_bootstrap.   environment Same as docker-compose#environment.   depends_on Same as docker-compose#depends_on.   dependencies Same as docker-compose#services, image, links, hostname, command, environment and depends_on are supported.    Note:, docker-compose activates only when dependencies is blank.\nrunningMode option description.\n   Option description     default Activate all plugins in plugin folder like the official distribution agent.   with_optional Activate default and plugins in optional-plugin by the give selector.   with_bootstrap Activate default and plugins in bootstrap-plugin by the give selector.    with_optional/with_bootstrap supports multiple selectors, separated by ;.\nFile Format\ntype: entryService: healthCheck: startScript: runningMode: withPlugins: environment: ... depends_on: ... dependencies: service1: image: hostname: expose: ... environment: ... depends_on: ... links: ... entrypoint: ... healthcheck: ...  dependencies support docker compose healthcheck. But the format is a little different. We need to have - as the start of every config item, and describe it as a string line.  For example, in the official document, the health check is:\nhealthcheck:test:[\u0026#34;CMD\u0026#34;,\u0026#34;curl\u0026#34;,\u0026#34;-f\u0026#34;,\u0026#34;http://localhost\u0026#34;]interval:1m30stimeout:10sretries:3start_period:40sHere you should write:\nhealthcheck:- \u0026#39;test:[\u0026#34;CMD\u0026#34;,\u0026#34;curl\u0026#34;,\u0026#34;-f\u0026#34;,\u0026#34;http://localhost\u0026#34;]\u0026#39;- \u0026#34;interval: 1m30s\u0026#34;- \u0026#34;timeout: 10s\u0026#34;- \u0026#34;retries: 3\u0026#34;- \u0026#34;start_period: 40s\u0026#34;In some cases, the dependency service (usually a third-party server like the SolrJ server) is required to keep the same version as the client lib version, which is defined as ${test.framework.version} in pom. You may use ${CASE_SERVER_IMAGE_VERSION} as the version number, which will be changed in the test for each version.\n It does not support resource related configurations, such as volumes, ports, and ulimits. The reason for this is that in test scenarios, no mapping is required for any port to the host VM, or to mount any folder.\n Take the following test cases as examples:\n dubbo-2.7.x with JVM-container jetty with JVM-container gateway with runningMode canal with docker-compose  expectedData.yaml Operator for number\n   Operator Description     nq Not equal   eq Equal(default)   ge Greater than or equal   gt Greater than    Operator for String\n   Operator Description     not null Not null   not blank Not blank ,it\u0026rsquo;s recommended for String type field as the default value maybe blank string, such as span tags   null Null or empty String   eq Equal(default)   start with Tests if this string starts with the specified prefix. DO NOT use it with meterItem tags value   end with Tests if this string ends with the specified suffix. DO NOT use it with meterItem tags value    Expected Data Format Of The Segment\nsegmentItems:- serviceName:SERVICE_NAME(string)segmentSize:SEGMENT_SIZE(int)segments:- segmentId:SEGMENT_ID(string)spans:...   Field Description     serviceName Service Name.   segmentSize The number of segments is expected.   segmentId Trace ID.   spans Segment span list. In the next section, you will learn how to describe each span.    Expected Data Format Of The Span\nNote: The order of span list should follow the order of the span finish time.\noperationName:OPERATION_NAME(string)parentSpanId:PARENT_SPAN_ID(int)spanId:SPAN_ID(int)startTime:START_TIME(int)endTime:END_TIME(int)isError: IS_ERROR(string:true,false)spanLayer: SPAN_LAYER(string:DB, RPC_FRAMEWORK, HTTP, MQ, CACHE)spanType: SPAN_TYPE(string:Exit, Entry, Local)componentId:COMPONENT_ID(int)tags:- {key: TAG_KEY(string), value:TAG_VALUE(string)}...logs:- {key: LOG_KEY(string), value:LOG_VALUE(string)}...peer:PEER(string)refs:- {traceId:TRACE_ID(string),parentTraceSegmentId:PARENT_TRACE_SEGMENT_ID(string),parentSpanId:PARENT_SPAN_ID(int),parentService:PARENT_SERVICE(string),parentServiceInstance:PARENT_SERVICE_INSTANCE(string),parentEndpoint:PARENT_ENDPOINT_NAME(string),networkAddress:NETWORK_ADDRESS(string),refType: REF_TYPE(string:CrossProcess, CrossThread)}...   Field Description     operationName Span Operation Name.   parentSpanId Parent span ID. Note: The parent span ID of the first span should be -1.   spanId Span ID. Note: Start from 0.   startTime Span start time. It is impossible to get the accurate time, not 0 should be enough.   endTime Span finish time. It is impossible to get the accurate time, not 0 should be enough.   isError Span status, true or false.   componentId Component id for your plugin.   tags Span tag list. Notice, Keep in the same order as the plugin coded.   logs Span log list. Notice, Keep in the same order as the plugin coded.   SpanLayer Options, DB, RPC_FRAMEWORK, HTTP, MQ, CACHE.   SpanType Span type, options, Exit, Entry or Local.   peer Remote network address, IP + port mostly. For exit span, this should be required.    The verify description for SegmentRef\n   Field Description     traceId    parentTraceSegmentId Parent SegmentId, pointing to the segment id in the parent segment.   parentSpanId Parent SpanID, pointing to the span id in the parent segment.   parentService The service of parent/downstream service name.   parentServiceInstance The instance of parent/downstream service instance name.   parentEndpoint The endpoint of parent/downstream service.   networkAddress The peer value of parent exit span.   refType Ref type, options, CrossProcess or CrossThread.    Expected Data Format Of The Meter Items\nmeterItems:- serviceName:SERVICE_NAME(string)meterSize:METER_SIZE(int)meters:- ...   Field Description     serviceName Service Name.   meterSize The number of meters is expected.   meters meter list. Follow the next section to see how to describe every meter.    Expected Data Format Of The Meter\nmeterId:name:NAME(string)tags:- {name: TAG_NAME(string), value:TAG_VALUE(string)}singleValue:SINGLE_VALUE(double)histogramBuckets:- HISTOGRAM_BUCKET(double)...The verify description for MeterId\n   Field Description     name meter name.   tags meter tags.   tags.name tag name.   tags.value tag value.   singleValue counter or gauge value. Using condition operate of the number to validate, such as gt, ge. If current meter is histogram, don\u0026rsquo;t need to write this field.   histogramBuckets histogram bucket. The bucket list must be ordered. The tool assert at least one bucket of the histogram having nonzero count. If current meter is counter or gauge, don\u0026rsquo;t need to write this field.    Expected Data Format Of The Log Items\nlogItems:- serviceName:SERVICE_NAME(string)logSize:LOG_SIZE(int)logs:- ...   Field Description     serviceName Service Name.   logSize The number of logs is expected.   logs log list. Follow the next section to see how to describe every log.    Expected Data Format Of The Log\ntimestamp:TIMESTAMP_VALUE(int)endpoint:ENDPOINT_VALUE(int)traceContext:traceId:TRACE_ID_VALUE(string)traceSegmentId:TRACE_SEGMENT_ID_VALUE(string)spanId:SPAN_ID_VALUE(int)body:type:TYPE_VALUE(string)content:# Choose one of three (text, json or yaml)text:TEXT_VALUE(string)# json: JSON_VALUE(string)# yaml: YAML_VALUE(string)tags:data:- key:TAG_KEY(string)value:TAG_VALUE(string)...layer:LAYER_VALUE(string)...The verify description for Log\n   Field Description     timestamp log timestamp.   endpoint log endpoint.   traceContext.traceId log associated trace id.   traceContext.traceSegmentId log associated trace segment id.   traceContext.spanId log associated span id.   body.type log body type.   body.content log content, the sub field choose one of three (text, json or yaml).   tags.data log tags, key value pairs.   layer log layer.    startup.sh This script provide a start point to JVM based service, most of them starts by a java -jar, with some variables. The following system environment variables are available in the shell.\n   Variable Description     agent_opts Agent plugin opts, check the detail in plugin doc or the same opt added in this PR.   SCENARIO_NAME Service name. Default same as the case folder name   SCENARIO_VERSION Version   SCENARIO_ENTRY_SERVICE Entrance URL to access this service   SCENARIO_HEALTH_CHECK_URL Health check URL     ${agent_opts} is required to add into your java -jar command, which including the parameter injected by test framework, and make agent installed. All other parameters should be added after ${agent_opts}.\n The test framework will set the service name as the test case folder name by default, but in some cases, there are more than one test projects are required to run in different service codes, could set it explicitly like the following example.\nExample\nhome=\u0026#34;$(cd \u0026#34;$(dirname $0)\u0026#34;; pwd)\u0026#34; java -jar ${agent_opts} \u0026#34;-Dskywalking.agent.service_name=jettyserver-scenario\u0026#34; ${home}/../libs/jettyserver-scenario.jar \u0026amp; sleep 1 java -jar ${agent_opts} \u0026#34;-Dskywalking.agent.service_name=jettyclient-scenario\u0026#34; ${home}/../libs/jettyclient-scenario.jar \u0026amp;  Only set this or use other skywalking options when it is really necessary.\n Take the following test cases as examples\n undertow webflux  Best Practices How To Use The Archetype To Create A Test Case Project We provided archetypes and a script to make creating a project easier. It creates a completed project of a test case. So that we only need to focus on cases. First, we can use followed command to get usage about the script.\nbash ${SKYWALKING_HOME}/test/plugin/generator.sh\nThen, runs and generates a project, named by scenario_name, in ./scenarios.\nRecommendations for pom \u0026lt;properties\u0026gt; \u0026lt;!-- Provide and use this property in the pom. --\u0026gt; \u0026lt;!-- This version should match the library version, --\u0026gt; \u0026lt;!-- in this case, http components lib version 4.3. --\u0026gt; \u0026lt;test.framework.version\u0026gt;4.3\u0026lt;/test.framework.version\u0026gt; \u0026lt;/properties\u0026gt; \u0026lt;dependencies\u0026gt; \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.httpcomponents\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;httpclient\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${test.framework.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; ... \u0026lt;/dependencies\u0026gt; \u0026lt;build\u0026gt; \u0026lt;!-- Set the package final name as same as the test case folder case. --\u0026gt; \u0026lt;finalName\u0026gt;httpclient-4.3.x-scenario\u0026lt;/finalName\u0026gt; .... \u0026lt;/build\u0026gt; How To Implement Heartbeat Service Heartbeat service is designed for checking the service available status. This service is a simple HTTP service, returning 200 means the target service is ready. Then the traffic generator will access the entry service and verify the expected data. User should consider to use this service to detect such as whether the dependent services are ready, especially when dependent services are database or cluster.\nNotice, because heartbeat service could be traced fully or partially, so, segmentSize in expectedData.yaml should use ge as the operator, and don\u0026rsquo;t include the segments of heartbeat service in the expected segment data.\nThe example Process of Writing Tracing Expected Data Expected data file, expectedData.yaml, include SegmentItems part.\nWe are using the HttpClient plugin to show how to write the expected data.\nThere are two key points of testing\n Whether is HttpClient span created. Whether the ContextCarrier created correctly, and propagates across processes.  +-------------+ +------------------+ +-------------------------+ | Browser | | Case Servlet | | ContextPropagateServlet | | | | | | | +-----|-------+ +---------|--------+ +------------|------------+ | | | | | | | WebHttp +-+ | +------------------------\u0026gt; |-| HttpClient +-+ | |--------------------------------\u0026gt; |-| | |-| |-| | |-| |-| | |-| \u0026lt;--------------------------------| | |-| +-+ | \u0026lt;--------------------------| | | +-+ | | | | | | | | | | | | | + + + segmentItems By following the flow of HttpClient case, there should be two segments created.\n Segment represents the CaseServlet access. Let\u0026rsquo;s name it as SegmentA. Segment represents the ContextPropagateServlet access. Let\u0026rsquo;s name it as SegmentB.  segmentItems:- serviceName:httpclient-casesegmentSize:ge 2# Could have more than one health check segments, because, the dependency is not standby.Because Tomcat plugin is a default plugin of SkyWalking, so, in SegmentA, there are two spans\n Tomcat entry span HttpClient exit span  SegmentA span list should like following\n- segmentId:not nullspans:- operationName:/httpclient-case/case/context-propagateparentSpanId:0spanId:1startTime:nq 0endTime:nq 0isError:falsespanLayer:HttpspanType:ExitcomponentId:eq 2tags:- {key: url, value:\u0026#39;http://127.0.0.1:8080/httpclient-case/case/context-propagate\u0026#39;}- {key: http.method, value:GET}- {key: http.status_code, value:\u0026#39;200\u0026#39;}logs:[]peer:127.0.0.1:8080- operationName:/httpclient-case/case/httpclientparentSpanId:-1spanId:0startTime:nq 0endTime:nq 0spanLayer:HttpisError:falsespanType:EntrycomponentId:1tags:- {key: url, value:\u0026#39;http://localhost:{SERVER_OUTPUT_PORT}/httpclient-case/case/httpclient\u0026#39;}- {key: http.method, value:GET}- {key: http.status_code, value:\u0026#39;200\u0026#39;}logs:[]peer:nullSegmentB should only have one Tomcat entry span, but includes the Ref pointing to SegmentA.\nSegmentB span list should like following\n- segmentId:not nullspans:-operationName:/httpclient-case/case/context-propagateparentSpanId:-1spanId:0tags:- {key: url, value:\u0026#39;http://127.0.0.1:8080/httpclient-case/case/context-propagate\u0026#39;}- {key: http.method, value:GET}- {key: http.status_code, value:\u0026#39;200\u0026#39;}logs:[]startTime:nq 0endTime:nq 0spanLayer:HttpisError:falsespanType:EntrycomponentId:1peer:nullrefs:- {parentEndpoint: /httpclient-case/case/httpclient, networkAddress: \u0026#39;localhost:8080\u0026#39;, refType: CrossProcess, parentSpanId: 1, parentTraceSegmentId: not null, parentServiceInstance: not null, parentService: not null, traceId:not null}The example Process of Writing Meter Expected Data Expected data file, expectedData.yaml, include MeterItems part.\nWe are using the toolkit plugin to demonstrate how to write the expected data. When write the meter plugin, the expected data file keeps the same.\nThere is one key point of testing\n Build a meter and operate it.  Such as Counter:\nMeterFactory.counter(\u0026#34;test_counter\u0026#34;).tag(\u0026#34;ck1\u0026#34;, \u0026#34;cv1\u0026#34;).build().increment(1d); MeterFactory.histogram(\u0026#34;test_histogram\u0026#34;).tag(\u0026#34;hk1\u0026#34;, \u0026#34;hv1\u0026#34;).steps(1d, 5d, 10d).build().addValue(2d); +-------------+ +------------------+ | Plugin | | Agent core | | | | | +-----|-------+ +---------|--------+ | | | | | Build or operate +-+ +------------------------\u0026gt; |-| | |-] | |-| | |-| | |-| | |-| | \u0026lt;--------------------------| | +-+ | | | | | | | | + + meterItems By following the flow of the toolkit case, there should be two meters created.\n Meter test_counter created from MeterFactory#counter. Let\u0026rsquo;s name it as MeterA. Meter test_histogram created from MeterFactory#histogram. Let\u0026rsquo;s name it as MeterB.  meterItems:- serviceName:toolkit-casemeterSize:2They\u0026rsquo;re showing two kinds of meter, MeterA has a single value, MeterB has a histogram value.\nMeterA should like following, counter and gauge use the same data format.\n- meterId:name:test_countertags:- {name: ck1, value:cv1}singleValue:gt 0MeterB should like following.\n- meterId:name:test_histogramtags:- {name: hk1, value:hv1}histogramBuckets:- 0.0- 1.0- 5.0- 10.0Local Test and Pull Request To The Upstream First of all, the test case project could be compiled successfully, with right project structure and be able to deploy. The developer should test the start script could run in Linux/MacOS, and entryService/health services are able to provide the response.\nYou could run test by using following commands\ncd ${SKYWALKING_HOME} bash ./test/plugin/run.sh -f ${scenario_name} Notice,if codes in ./apm-sniffer have been changed, no matter because your change or git update, please recompile the skywalking-agent. Because the test framework will use the existing skywalking-agent folder, rather than recompiling it every time.\nUse ${SKYWALKING_HOME}/test/plugin/run.sh -h to know more command options.\nIf the local test passed, then you could add it to .github/workflows/plugins-test.\u0026lt;n\u0026gt;.yaml file, which will drive the tests running on the GitHub Actions of official SkyWalking repository. Based on your plugin\u0026rsquo;s name, please add the test case into file .github/workflows/plugins-test.\u0026lt;n\u0026gt;.yaml, by alphabetical orders.\nEvery test case is a GitHub Actions Job. Please use the scenario directory name as the case name, mostly you\u0026rsquo;ll just need to decide which file (plugins-test.\u0026lt;n\u0026gt;.yaml) to add your test case, and simply put one line (as follows) in it, take the existed cases as examples. You can run python3 tools/select-group.py to see which file contains the least cases and add your cases into it, in order to balance the running time of each group.\nIf a test case required to run in JDK 17 environment, please add you test case into file plugins-jdk17-test.\u0026lt;n\u0026gt;.yaml. If a test case required to run in JDK 21 environment, please add you test case into file plugins-jdk21-test.\u0026lt;n\u0026gt;.yaml.\njobs:PluginsTest:name:Pluginruns-on:ubuntu-latesttimeout-minutes:90strategy:fail-fast:truematrix:case:# ...- \u0026lt;your scenario test directory name\u0026gt;# ...","title":"Plugin automatic test framework","url":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/plugin-test/"},{"content":"Plugin Configurations    key environment key default value description     http.server_collect_parameters SW_AGENT_PLUGIN_CONFIG_HTTP_SERVER_COLLECT_PARAMETERS false Collect the parameters of the HTTP request on the server side.   mongo.collect_statement SW_AGENT_PLUGIN_CONFIG_MONGO_COLLECT_STATEMENT false Collect the statement of the MongoDB request.   sql.collect_parameter SW_AGENT_PLUGIN_CONFIG_SQL_COLLECT_PARAMETER false Collect the parameter of the SQL request.   redis.max_args_bytes SW_AGENT_PLUGIN_CONFIG_REDIS_MAX_ARGS_BYTES 1024 Limit the bytes size of redis args request.   reporter.discard SW_AGENT_REPORTER_DISCARD false Discard the reporter.    ","title":"Plugin Configurations","url":"/docs/skywalking-go/latest/en/agent/plugin-configurations/"},{"content":"Plugin Configurations    key environment key default value description     http.server_collect_parameters SW_AGENT_PLUGIN_CONFIG_HTTP_SERVER_COLLECT_PARAMETERS false Collect the parameters of the HTTP request on the server side.   mongo.collect_statement SW_AGENT_PLUGIN_CONFIG_MONGO_COLLECT_STATEMENT false Collect the statement of the MongoDB request.   sql.collect_parameter SW_AGENT_PLUGIN_CONFIG_SQL_COLLECT_PARAMETER false Collect the parameter of the SQL request.   redis.max_args_bytes SW_AGENT_PLUGIN_CONFIG_REDIS_MAX_ARGS_BYTES 1024 Limit the bytes size of redis args request.   reporter.discard SW_AGENT_REPORTER_DISCARD false Discard the reporter.   gin.collect_request_headers SW_AGENT_PLUGIN_CONFIG_GIN_COLLECT_REQUEST_HEADERS  Collect the http header of gin request.   gin.header_length_threshold SW_AGENT_PLUGIN_CONFIG_GIN_HEADER_LENGTH_THRESHOLD 2048 Controlling the length limitation of all header values.    ","title":"Plugin Configurations","url":"/docs/skywalking-go/next/en/agent/plugin-configurations/"},{"content":"Plugin Configurations    key environment key default value description     http.server_collect_parameters SW_AGENT_PLUGIN_CONFIG_HTTP_SERVER_COLLECT_PARAMETERS false Collect the parameters of the HTTP request on the server side.   mongo.collect_statement SW_AGENT_PLUGIN_CONFIG_MONGO_COLLECT_STATEMENT false Collect the statement of the MongoDB request.   sql.collect_parameter SW_AGENT_PLUGIN_CONFIG_SQL_COLLECT_PARAMETER false Collect the parameter of the SQL request.   redis.max_args_bytes SW_AGENT_PLUGIN_CONFIG_REDIS_MAX_ARGS_BYTES 1024 Limit the bytes size of redis args request.   reporter.discard SW_AGENT_REPORTER_DISCARD false Discard the reporter.    ","title":"Plugin Configurations","url":"/docs/skywalking-go/v0.4.0/en/agent/plugin-configurations/"},{"content":"Plugin Development Guide This documentation introduces how developers can create a plugin.\nAll plugins must follow these steps:\n Create a new plugin module: Create a new project in the specified directory and import the plugin API module. Define the enhancement object: Define the description for the plugin. Invoke the plugin API: Call the API provided by the core to complete the core invocation. Import the plugin module: Import the plugin into the management module for users to use.  Create a new plugin module The plugin must create a new module, which is currently stored in the project\u0026rsquo;s plugins directory.\nPlugins can import the following two modules:\n Agent core: This module provides all the dependencies needed for the plugin, including the plugin API, enhancement declaration objects, etc. Agent core plugin should be github.com/apache/skywalking-go/plugins/core and replaced by the relative location. Framework to be enhanced: Import the framework you wish to enhance.  Note: Plugins should NOT import and use any other modules, as this may cause compilation issues for users. If certain tools are needed, they should be provided by the agent core.\nDefine the enhancement object In the root directory of the project, create a new go file to define the basic information of the plugin. The basic information includes the following methods, corresponding to the Instrument interface:\n Name: The name of the plugin. Please keep this name consistent with the newly created project name. The reason will be explained later. Base Package: Declare which package this plugin intercepts. For example, if you want to intercept gin, you can write: \u0026ldquo;github.com/gin-gonic/gin\u0026rdquo;. Version Checker: This method passes the version number to the enhancement object to verify whether the specified version of the framework is supported. If not, the enhancement program will not be executed. Points: A plugin can define one or more enhancement points. This will be explained in more detail in the following sections. File System: Use //go:embed * in the current file to import all files in this module, which will be used for file copying during the mixed compilation process.  Note: Please declare //skywalking:nocopy at any position in this file to indicate that the file would not be copied. This file is only used for guidance during hybrid compilation. Also, this file involves the use of the embed package, and if the target framework does not import the package embed, a compilation error may occur.\nManage Instrument and Interceptor codes in hierarchy structure Instrument and interceptor codes are placed in root by default. In complex instrumentation scenarios, there could be dozens of interceptors, we provide PluginSourceCodePath to build a hierarchy folder structure to manage those codes.\nNotice: The instrumentation still works without proper setting of this, but the debug tool would lose the location of the source codes.\nExample For example, the framework needs to enhance two packages, as shown in the following directory structure:\n- plugins - test - go.mod - package1 - instrument.go - interceptor.go - package2 - instrument.go - interceptor.go ... In the above directory structure, the test framework needs to provide multiple different enhancement objects. In this case, a PluginSourceCodePath Source Code Path** method needs to be added for each enhancement object, the values of this method should be package1 and package2.\nInstrument Point Instrument points are used to declare that which methods and structs in the current package should be instrumented. They mainly include the following information:\n Package path: If the interception point that needs to be intercepted is not in the root directory of the current package, you need to fill in the relative path to the package. For example, if this interception point wants to instrument content in the github.com/gin-gonic/gin/render directory, you need to fill in render here. Package Name(optional): Define the package name of the current package. If the package name is not defined, the package name of the current package would be used by default. It\u0026rsquo;s used when the package path and package name are not same, such as the name of github.com/emicklei/go-restful/v3 is restful. Matcher(At): Specify which eligible content in the current package path needs to be enhanced. Interceptor: If the current method is being intercepted (whether it\u0026rsquo;s a static method or an instance method), the name of the interceptor must be specified.  Method Matcher Method matchers are used to intercept both static and non-static methods. The specific definitions are as follows:\n// NewStaticMethodEnhance creates a new EnhanceMatcher for static method. // name: method name needs to be enhanced.(Public and private methods are supported) // filters: filters for method. func NewStaticMethodEnhance(name string, filters ...MethodFilterOption) // NewMethodEnhance creates a new EnhanceMatcher for method. // receiver: receiver type name of method needs to be enhanced. // name: method name needs to be enhanced.(Public and private methods are supported) // filters: filters for method. func NewMethodEnhance(receiver, name string, filters ...MethodFilterOption) Filter Option Filter Options are used to validate the parameters or return values in the method. If the method name matches but the Options validation fails, the enhancement would not be performed.\n// WithArgsCount filter methods with specific count of arguments. func WithArgsCount(argsCount int) // WithResultCount filter methods with specific count of results. func WithResultCount(resultCount int) // WithArgType filter methods with specific type of the index of the argument. func WithArgType(argIndex int, dataType string) // WithResultType filter methods with specific type of the index of the result. func WithResultType(argIndex int, dataType string) Demo For example, if you have the following method that needs to be intercepted:\nfunc (c *Context) HandleMethod(name string) bool you can describe it using this condition:\ninstrument.NewMethodEnhance(\u0026#34;*Context\u0026#34;, \u0026#34;HandleMethod\u0026#34;, instrument.WithArgsCount(1), instrument.WithArgType(0, \u0026#34;string\u0026#34;), instrument.WithResultCount(1), instrument.WithResultType(0, \u0026#34;bool\u0026#34;)) Struct Matcher Enhancement structures can embed enhanced fields within specified structs. After the struct is instantiated, custom data content can be added to the specified struct in the method interceptor.\nStruct matchers are used to intercept struct methods. The specific definitions are as follows:\n// NewStructEnhance creates a new EnhanceMatcher for struct. // name: struct name needs to be enhanced.(Public and private structs are supported) // filters: filters for struct. func NewStructEnhance(name string, filters ...StructFilterOption) Filter Option Filter Options are used to validate the fields in the structure.\n// WithFieldExists filter the struct has the field with specific name. func WithFieldExists(fieldName string) // WithFiledType filter the struct has the field with specific name and type. func WithFiledType(filedName, filedType string) Enhanced Instance After completing the definition of the struct enhancement, you can convert the specified instance into the following interface when intercepting methods, and get or set custom field information. The interface definition is as follows:\ntype EnhancedInstance interface { // GetSkyWalkingDynamicField get the customized data from instance \tGetSkyWalkingDynamicField() interface{} // SetSkyWalkingDynamicField set the customized data into the instance \tSetSkyWalkingDynamicField(interface{}) } Demo For example, if you have the following struct that needs to be enhanced:\ntype Test struct { value *Context } you can describe it using this condition:\ninstrument.NewStructEnhance(\u0026#34;Test\u0026#34;, instrument.WithFieldExists(\u0026#34;value\u0026#34;), instrument.WithFiledType(\u0026#34;value\u0026#34;, \u0026#34;*Context\u0026#34;)) Next, you can set custom content for the specified enhanced instance when intercepting methods.\nins := testInstance.(instrument.EnhancedInstance) // setting custom content ins.SetSkyWalkingDynamicField(\u0026#34;custom content\u0026#34;) // getting custom content res := ins.GetSkyWalkingDynamicField() Interceptor Interceptors are used to define custom business logic before and after method execution, allowing you to access data from before and after method execution and interact with the Agent Core by using the Agent API.\nThe interceptor definition is as follows, you need to create a new structure and implement it:\ntype Interceptor interface { // BeforeInvoke would be called before the target method invocation.  BeforeInvoke(invocation Invocation) error // AfterInvoke would be called after the target method invocation.  AfterInvoke(invocation Invocation, result ...interface{}) error } Within the interface, you can see the Invocation interface, which defines the context of an interception. The specific definition is as follows:\ntype Invocation interface { // CallerInstance is the instance of the caller, nil if the method is static method.  CallerInstance() interface{} // Args is get the arguments of the method, please cast to the specific type to get more information.  Args() []interface{} // ChangeArg is change the argument value of the method  ChangeArg(int, interface{}) // IsContinue is the flag to control the method invocation, if it is true, the target method would not be invoked.  IsContinue() bool // DefineReturnValues are defined the return value of the method, and continue the method invoked  DefineReturnValues(...interface{}) // SetContext is the customized context of the method invocation, it should be propagated the tracing span.  SetContext(interface{}) // GetContext is get the customized context of the method invocation  GetContext() interface{} } Thread safe The Interceptor instance would define new instance at the current package level, rather than creating a new instance each time a method is intercepted.\nTherefore, do not declare objects in the interceptor, and instead use Invocation.Context to pass data.\nPackage Path If the method you want to intercept is not located in the root directory of the framework, place your interceptor code in the relative location within the plugin. The Agent would only copy files from the same package directory.\nFor example, if you want to intercept a method in github.com/gin-gonic/gin/render, create a render directory in the root of your plugin, and put the interceptor inside it. This ensures that the interceptor is properly included during the copy operation and can be correctly applied to the target package.\nPlugin Configuration Plugin configuration is used to add custom configuration parameters to a specified plugin. When users specify configuration items, the plugin can dynamically adapt the content needed in the plugin according to the user\u0026rsquo;s configuration items.\nDeclaration Please declare the configuration file you need in the package you want to use. Declare it using var, and add the //skywalking:config directive to specify that this variable requires dynamic updating.\nBy default, the configuration item belongs to the configuration of the current plugin. For example, if the name of my current plugin is gin, then this configuration item is under the gin plugin. Of course, you can also change it to the http plugin to reference the configuration information of the relevant plugin, in which case you need to specify it as //skywalking:config http.\nItem Each configuration item needs to add a config tag. This is used to specify the name of the current configuration content. By default, it would lowercase all letters and add an _ identifier before each uppercase letter.\nCurrently, it supports basic data types and struct types, and it also supports obtaining data values through environment variables.\nDemo For example, I have declared the following configuration item:\n//skywalking:config http var config struct { ServerCollectParameters bool `config:\u0026#34;server_collect_parameters\u0026#34;` Client struct{ CollectParameters bool `config:\u0026#34;collect_parameters\u0026#34;` } `config:\u0026#34;client\u0026#34;` } In the above example, I created a plugin configuration for http, which includes two configuration items.\n config.ServerCollectParameters: Its configuration is located at http.server_collect_parameters. config.Client.CollectParameter: Its configuration is located at http.client.collect_parameter.  When the plugin needs to be used, it can be accessed directly by reading the config configuration.\nAgent API The Agent API is used when a method is intercepted and interacts with the Agent Core.\nTracing API The Tracing API is used for building distributed tracing, and currently supports the following methods:\n// CreateEntrySpan creates a new entry span. // operationName is the name of the span. // extractor is the extractor to extract the context from the carrier. // opts is the options to create the span. func CreateEntrySpan(operationName string, extractor Extractor, opts ...SpanOption) // CreateLocalSpan creates a new local span. // operationName is the name of the span. // opts is the options to create the span. func CreateLocalSpan(operationName string, opts ...SpanOption) // CreateExitSpan creates a new exit span. // operationName is the name of the span. // peer is the peer address of the span. // injector is the injector to inject the context into the carrier. // opts is the options to create the span. func CreateExitSpan(operationName, peer string, injector Injector, opts ...SpanOption) // ActiveSpan returns the current active span, it can be got the current span in the current goroutine. // If the current goroutine is not in the context of the span, it will return nil. // If get the span from other goroutine, it can only get information but cannot be operated. func ActiveSpan() // GetRuntimeContextValue returns the value of the key in the runtime context, which is current goroutine. // The value can also read from the goroutine which is created by the current goroutine func GetRuntimeContextValue(key string) // SetRuntimeContextValue sets the value of the key in the runtime context. func SetRuntimeContextValue(key string, val interface{}) Context Carrier The context carrier is used to pass the context between the difference application.\nWhen creating an Entry Span, you need to obtain the context carrier from the request. When creating an Exit Span, you need to write the context carrier into the target RPC request.\n// Extractor is a tool specification which define how to // extract trace parent context from propagation context type Extractor func(headerKey string) (string, error) // Injector is a tool specification which define how to // inject trace context into propagation context type Injector func(headerKey, headerValue string) error The following demo demonstrates how to pass the Context Carrier in the Tracing API:\n// create a new entry span and extract the context carrier from the request tracing.CreateEntrySpan(fmt.Sprintf(\u0026#34;%s:%s\u0026#34;, request.Method, request.URL.Path), func(headerKey string) (string, error) { return request.Header.Get(headerKey), nil }) // create a new exit span and inject the context carrier into the request tracing.CreateExitSpan(fmt.Sprintf(\u0026#34;%s:%s\u0026#34;, request.Method, request.URL.Path), request.Host, func(headerKey, headerValue string) error { request.Header.Add(headerKey, headerValue) return nil } Span Option Span Options can be passed when creating a Span to configure the information in the Span.\nThe following options are currently supported:\n// WithLayer set the SpanLayer of the Span func WithLayer(layer SpanLayer) // WithComponent set the component id of the Span func WithComponent(componentID int32) // WithTag set the Tag of the Span func WithTag(key Tag, value string) Span Component The Component ID in Span is used to identify the current component, with its data defined in SkyWalking OAP. If the framework you are writing does not exist in this file, please submit a PR in the SkyWalking project to add the definition of this plugin.\nSpan Operation After creating a Span, you can perform additional operations on it.\n// Span for plugin API type Span interface { // AsyncSpan for the async API \tAsyncSpan // Tag set the Tag of the Span \tTag(Tag, string) // SetSpanLayer set the SpanLayer of the Span \tSetSpanLayer(SpanLayer) // SetOperationName re-set the operation name of the Span \tSetOperationName(string) // SetPeer re-set the peer address of the Span \tSetPeer(string) // Log add log to the Span \tLog(...string) // Error add error log to the Span \tError(...string) // End end the Span \tEnd() } Async Span There is a set of advanced APIs in Span which is specifically designed for async use cases. When setting name, tags, logs, and other operations (including end span) of the span in another goroutine, you should use these APIs.\ntype AsyncSpan interface { // PrepareAsync the span finished at current tracing context, but current span is still alive until AsyncFinish called  PrepareAsync() // AsyncFinish to finished current async span  AsyncFinish() } Following the previous API define, you should following these steps to use the async API:\n Call span.PrepareAsync() to prepare the span to do any operation in another goroutine. Use Span.End() in the original goroutine when your job in the current goroutine is complete. Propagate the span to any other goroutine in your plugin. Once the above steps are all set, call span.AsyncFinish() in any goroutine. When the span.AsyncFinish() is complete for all spans, the all spans would be finished and report to the backend.  Tracing Context Operation In the Go Agent, Trace Context would continue cross goroutines automatically by default. However, in some cases, goroutine would be context sharing due to be scheduled by the pool mechanism. Consider these advanced APIs to manipulate context and switch the current context.\n// CaptureContext capture current tracing context in the current goroutine. func CaptureContext() ContextSnapshot // ContinueContext continue the tracing context in the current goroutine. func ContinueContext(ctx ContextSnapshot) // CleanContext clean the tracing context in the current goroutine. func CleanContext() Typically, use APIs as following to control or switch the context:\n Use tracing.CaptureContext() to get the ContextSnapshot object. Propagate the snapshot context to any other goroutine in your plugin. Use tracing.ContinueContext(snapshot) to continue the snapshot context in the target goroutine.  Meter API The Meter API is used to record the metrics of the target program, and currently supports the following methods:\n// NewCounter creates a new counter metrics. // name is the name of the metrics // opts is the options for the metrics func NewCounter(name string, opts ...Opt) Counter // NewGauge creates a new gauge metrics. // name is the name of the metrics // getter is the function to get the value of the gauge meter // opts is the options for the metrics func NewGauge(name string, getter func() float64, opts ...Opt) Gauge // NewHistogram creates a new histogram metrics. // name is the name of the metrics // steps is the buckets of the histogram // opts is the options for the metrics func NewHistogram(name string, steps []float64, opts ...Opt) Histogram // NewHistogramWithMinValue creates a new histogram metrics. // name is the name of the metrics // minVal is the min value of the histogram bucket // steps is the buckets of the histogram // opts is the options for the metrics func NewHistogramWithMinValue(name string, minVal float64, steps []float64, opts ...Opt) Histogram // RegisterBeforeCollectHook registers a hook function which will be called before metrics collect. func RegisterBeforeCollectHook(f func()) Meter Option The Meter Options can be passed when creating a Meter to configure the information in the Meter.\n// WithLabel adds a label to the metrics. func WithLabel(key, value string) Opt Meter Type Counter Counter is a cumulative metric that represents a single monotonically increasing counter whose value can only increase.\ntype Counter interface { // Get returns the current value of the counter. \tGet() float64 // Inc increments the counter with value. \tInc(val float64) } Gauge Gauge is a metric that represents a single numerical value that can arbitrarily go up and down.\ntype Gauge interface { // Get returns the current value of the gauge.  Get() float64 } Histogram Histogram is a metric that represents the distribution of a set of values.\ntype Histogram interface { // Observe find the value associate bucket and add 1. \tObserve(val float64) // ObserveWithCount find the value associate bucket and add specific count. \tObserveWithCount(val float64, count int64) } Import Plugin Once you have finished developing the plugin, you need to import the completed module into the Agent program and define it in the corresponding file.\nAt this point, your plugin development process is complete. When the Agent performs hybrid compilation on the target program, your plugin will be executed as expected.\n","title":"Plugin Development Guide","url":"/docs/skywalking-go/latest/en/development-and-contribution/development-guide/"},{"content":"Plugin Development Guide This documentation introduces how developers can create a plugin.\nAll plugins must follow these steps:\n Create a new plugin module: Create a new project in the specified directory and import the plugin API module. Define the enhancement object: Define the description for the plugin. Invoke the plugin API: Call the API provided by the core to complete the core invocation. Import the plugin module: Import the plugin into the management module for users to use.  Create a new plugin module The plugin must create a new module, which is currently stored in the project\u0026rsquo;s plugins directory.\nPlugins can import the following two modules:\n Agent core: This module provides all the dependencies needed for the plugin, including the plugin API, enhancement declaration objects, etc. Agent core plugin should be github.com/apache/skywalking-go/plugins/core and replaced by the relative location. Framework to be enhanced: Import the framework you wish to enhance.  Note: Plugins should NOT import and use any other modules, as this may cause compilation issues for users. If certain tools are needed, they should be provided by the agent core.\nDefine the enhancement object In the root directory of the project, create a new go file to define the basic information of the plugin. The basic information includes the following methods, corresponding to the Instrument interface:\n Name: The name of the plugin. Please keep this name consistent with the newly created project name. The reason will be explained later. Base Package: Declare which package this plugin intercepts. For example, if you want to intercept gin, you can write: \u0026ldquo;github.com/gin-gonic/gin\u0026rdquo;. Version Checker: This method passes the version number to the enhancement object to verify whether the specified version of the framework is supported. If not, the enhancement program will not be executed. Points: A plugin can define one or more enhancement points. This will be explained in more detail in the following sections. File System: Use //go:embed * in the current file to import all files in this module, which will be used for file copying during the mixed compilation process.  Note: Please declare //skywalking:nocopy at any position in this file to indicate that the file would not be copied. This file is only used for guidance during hybrid compilation. Also, this file involves the use of the embed package, and if the target framework does not import the package embed, a compilation error may occur.\nManage Instrument and Interceptor codes in hierarchy structure Instrument and interceptor codes are placed in root by default. In complex instrumentation scenarios, there could be dozens of interceptors, we provide PluginSourceCodePath to build a hierarchy folder structure to manage those codes.\nNotice: The instrumentation still works without proper setting of this, but the debug tool would lose the location of the source codes.\nExample For example, the framework needs to enhance two packages, as shown in the following directory structure:\n- plugins - test - go.mod - package1 - instrument.go - interceptor.go - package2 - instrument.go - interceptor.go ... In the above directory structure, the test framework needs to provide multiple different enhancement objects. In this case, a PluginSourceCodePath Source Code Path** method needs to be added for each enhancement object, the values of this method should be package1 and package2.\nInstrument Point Instrument points are used to declare that which methods and structs in the current package should be instrumented. They mainly include the following information:\n Package path: If the interception point that needs to be intercepted is not in the root directory of the current package, you need to fill in the relative path to the package. For example, if this interception point wants to instrument content in the github.com/gin-gonic/gin/render directory, you need to fill in render here. Package Name(optional): Define the package name of the current package. If the package name is not defined, the package name of the current package would be used by default. It\u0026rsquo;s used when the package path and package name are not same, such as the name of github.com/emicklei/go-restful/v3 is restful. Matcher(At): Specify which eligible content in the current package path needs to be enhanced. Interceptor: If the current method is being intercepted (whether it\u0026rsquo;s a static method or an instance method), the name of the interceptor must be specified.  Method Matcher Method matchers are used to intercept both static and non-static methods. The specific definitions are as follows:\n// NewStaticMethodEnhance creates a new EnhanceMatcher for static method. // name: method name needs to be enhanced.(Public and private methods are supported) // filters: filters for method. func NewStaticMethodEnhance(name string, filters ...MethodFilterOption) // NewMethodEnhance creates a new EnhanceMatcher for method. // receiver: receiver type name of method needs to be enhanced. // name: method name needs to be enhanced.(Public and private methods are supported) // filters: filters for method. func NewMethodEnhance(receiver, name string, filters ...MethodFilterOption) Filter Option Filter Options are used to validate the parameters or return values in the method. If the method name matches but the Options validation fails, the enhancement would not be performed.\n// WithArgsCount filter methods with specific count of arguments. func WithArgsCount(argsCount int) // WithResultCount filter methods with specific count of results. func WithResultCount(resultCount int) // WithArgType filter methods with specific type of the index of the argument. func WithArgType(argIndex int, dataType string) // WithResultType filter methods with specific type of the index of the result. func WithResultType(argIndex int, dataType string) Demo For example, if you have the following method that needs to be intercepted:\nfunc (c *Context) HandleMethod(name string) bool you can describe it using this condition:\ninstrument.NewMethodEnhance(\u0026#34;*Context\u0026#34;, \u0026#34;HandleMethod\u0026#34;, instrument.WithArgsCount(1), instrument.WithArgType(0, \u0026#34;string\u0026#34;), instrument.WithResultCount(1), instrument.WithResultType(0, \u0026#34;bool\u0026#34;)) Struct Matcher Enhancement structures can embed enhanced fields within specified structs. After the struct is instantiated, custom data content can be added to the specified struct in the method interceptor.\nStruct matchers are used to intercept struct methods. The specific definitions are as follows:\n// NewStructEnhance creates a new EnhanceMatcher for struct. // name: struct name needs to be enhanced.(Public and private structs are supported) // filters: filters for struct. func NewStructEnhance(name string, filters ...StructFilterOption) Filter Option Filter Options are used to validate the fields in the structure.\n// WithFieldExists filter the struct has the field with specific name. func WithFieldExists(fieldName string) // WithFiledType filter the struct has the field with specific name and type. func WithFiledType(filedName, filedType string) Enhanced Instance After completing the definition of the struct enhancement, you can convert the specified instance into the following interface when intercepting methods, and get or set custom field information. The interface definition is as follows:\ntype EnhancedInstance interface { // GetSkyWalkingDynamicField get the customized data from instance \tGetSkyWalkingDynamicField() interface{} // SetSkyWalkingDynamicField set the customized data into the instance \tSetSkyWalkingDynamicField(interface{}) } Demo For example, if you have the following struct that needs to be enhanced:\ntype Test struct { value *Context } you can describe it using this condition:\ninstrument.NewStructEnhance(\u0026#34;Test\u0026#34;, instrument.WithFieldExists(\u0026#34;value\u0026#34;), instrument.WithFiledType(\u0026#34;value\u0026#34;, \u0026#34;*Context\u0026#34;)) Next, you can set custom content for the specified enhanced instance when intercepting methods.\nins := testInstance.(instrument.EnhancedInstance) // setting custom content ins.SetSkyWalkingDynamicField(\u0026#34;custom content\u0026#34;) // getting custom content res := ins.GetSkyWalkingDynamicField() Interceptor Interceptors are used to define custom business logic before and after method execution, allowing you to access data from before and after method execution and interact with the Agent Core by using the Agent API.\nThe interceptor definition is as follows, you need to create a new structure and implement it:\ntype Interceptor interface { // BeforeInvoke would be called before the target method invocation.  BeforeInvoke(invocation Invocation) error // AfterInvoke would be called after the target method invocation.  AfterInvoke(invocation Invocation, result ...interface{}) error } Within the interface, you can see the Invocation interface, which defines the context of an interception. The specific definition is as follows:\ntype Invocation interface { // CallerInstance is the instance of the caller, nil if the method is static method.  CallerInstance() interface{} // Args is get the arguments of the method, please cast to the specific type to get more information.  Args() []interface{} // ChangeArg is change the argument value of the method  ChangeArg(int, interface{}) // IsContinue is the flag to control the method invocation, if it is true, the target method would not be invoked.  IsContinue() bool // DefineReturnValues are defined the return value of the method, and continue the method invoked  DefineReturnValues(...interface{}) // SetContext is the customized context of the method invocation, it should be propagated the tracing span.  SetContext(interface{}) // GetContext is get the customized context of the method invocation  GetContext() interface{} } Thread safe The Interceptor instance would define new instance at the current package level, rather than creating a new instance each time a method is intercepted.\nTherefore, do not declare objects in the interceptor, and instead use Invocation.Context to pass data.\nPackage Path If the method you want to intercept is not located in the root directory of the framework, place your interceptor code in the relative location within the plugin. The Agent would only copy files from the same package directory.\nFor example, if you want to intercept a method in github.com/gin-gonic/gin/render, create a render directory in the root of your plugin, and put the interceptor inside it. This ensures that the interceptor is properly included during the copy operation and can be correctly applied to the target package.\nPlugin Configuration Plugin configuration is used to add custom configuration parameters to a specified plugin. When users specify configuration items, the plugin can dynamically adapt the content needed in the plugin according to the user\u0026rsquo;s configuration items.\nDeclaration Please declare the configuration file you need in the package you want to use. Declare it using var, and add the //skywalking:config directive to specify that this variable requires dynamic updating.\nBy default, the configuration item belongs to the configuration of the current plugin. For example, if the name of my current plugin is gin, then this configuration item is under the gin plugin. Of course, you can also change it to the http plugin to reference the configuration information of the relevant plugin, in which case you need to specify it as //skywalking:config http.\nItem Each configuration item needs to add a config tag. This is used to specify the name of the current configuration content. By default, it would lowercase all letters and add an _ identifier before each uppercase letter.\nCurrently, it supports basic data types and struct types, and it also supports obtaining data values through environment variables.\nDemo For example, I have declared the following configuration item:\n//skywalking:config http var config struct { ServerCollectParameters bool `config:\u0026#34;server_collect_parameters\u0026#34;` Client struct{ CollectParameters bool `config:\u0026#34;collect_parameters\u0026#34;` } `config:\u0026#34;client\u0026#34;` } In the above example, I created a plugin configuration for http, which includes two configuration items.\n config.ServerCollectParameters: Its configuration is located at http.server_collect_parameters. config.Client.CollectParameter: Its configuration is located at http.client.collect_parameter.  When the plugin needs to be used, it can be accessed directly by reading the config configuration.\nAgent API The Agent API is used when a method is intercepted and interacts with the Agent Core.\nTracing API The Tracing API is used for building distributed tracing, and currently supports the following methods:\n// CreateEntrySpan creates a new entry span. // operationName is the name of the span. // extractor is the extractor to extract the context from the carrier. // opts is the options to create the span. func CreateEntrySpan(operationName string, extractor Extractor, opts ...SpanOption) // CreateLocalSpan creates a new local span. // operationName is the name of the span. // opts is the options to create the span. func CreateLocalSpan(operationName string, opts ...SpanOption) // CreateExitSpan creates a new exit span. // operationName is the name of the span. // peer is the peer address of the span. // injector is the injector to inject the context into the carrier. // opts is the options to create the span. func CreateExitSpan(operationName, peer string, injector Injector, opts ...SpanOption) // ActiveSpan returns the current active span, it can be got the current span in the current goroutine. // If the current goroutine is not in the context of the span, it will return nil. // If get the span from other goroutine, it can only get information but cannot be operated. func ActiveSpan() // GetRuntimeContextValue returns the value of the key in the runtime context, which is current goroutine. // The value can also read from the goroutine which is created by the current goroutine func GetRuntimeContextValue(key string) // SetRuntimeContextValue sets the value of the key in the runtime context. func SetRuntimeContextValue(key string, val interface{}) Context Carrier The context carrier is used to pass the context between the difference application.\nWhen creating an Entry Span, you need to obtain the context carrier from the request. When creating an Exit Span, you need to write the context carrier into the target RPC request.\n// Extractor is a tool specification which define how to // extract trace parent context from propagation context type Extractor func(headerKey string) (string, error) // Injector is a tool specification which define how to // inject trace context into propagation context type Injector func(headerKey, headerValue string) error The following demo demonstrates how to pass the Context Carrier in the Tracing API:\n// create a new entry span and extract the context carrier from the request tracing.CreateEntrySpan(fmt.Sprintf(\u0026#34;%s:%s\u0026#34;, request.Method, request.URL.Path), func(headerKey string) (string, error) { return request.Header.Get(headerKey), nil }) // create a new exit span and inject the context carrier into the request tracing.CreateExitSpan(fmt.Sprintf(\u0026#34;%s:%s\u0026#34;, request.Method, request.URL.Path), request.Host, func(headerKey, headerValue string) error { request.Header.Add(headerKey, headerValue) return nil } Span Option Span Options can be passed when creating a Span to configure the information in the Span.\nThe following options are currently supported:\n// WithLayer set the SpanLayer of the Span func WithLayer(layer SpanLayer) // WithComponent set the component id of the Span func WithComponent(componentID int32) // WithTag set the Tag of the Span func WithTag(key Tag, value string) Span Component The Component ID in Span is used to identify the current component, with its data defined in SkyWalking OAP. If the framework you are writing does not exist in this file, please submit a PR in the SkyWalking project to add the definition of this plugin.\nSpan Operation After creating a Span, you can perform additional operations on it.\n// Span for plugin API type Span interface { // AsyncSpan for the async API \tAsyncSpan // Tag set the Tag of the Span \tTag(Tag, string) // SetSpanLayer set the SpanLayer of the Span \tSetSpanLayer(SpanLayer) // SetOperationName re-set the operation name of the Span \tSetOperationName(string) // SetPeer re-set the peer address of the Span \tSetPeer(string) // Log add log to the Span \tLog(...string) // Error add error log to the Span \tError(...string) // End end the Span \tEnd() } Async Span There is a set of advanced APIs in Span which is specifically designed for async use cases. When setting name, tags, logs, and other operations (including end span) of the span in another goroutine, you should use these APIs.\ntype AsyncSpan interface { // PrepareAsync the span finished at current tracing context, but current span is still alive until AsyncFinish called  PrepareAsync() // AsyncFinish to finished current async span  AsyncFinish() } Following the previous API define, you should following these steps to use the async API:\n Call span.PrepareAsync() to prepare the span to do any operation in another goroutine. Use Span.End() in the original goroutine when your job in the current goroutine is complete. Propagate the span to any other goroutine in your plugin. Once the above steps are all set, call span.AsyncFinish() in any goroutine. When the span.AsyncFinish() is complete for all spans, the all spans would be finished and report to the backend.  Tracing Context Operation In the Go Agent, Trace Context would continue cross goroutines automatically by default. However, in some cases, goroutine would be context sharing due to be scheduled by the pool mechanism. Consider these advanced APIs to manipulate context and switch the current context.\n// CaptureContext capture current tracing context in the current goroutine. func CaptureContext() ContextSnapshot // ContinueContext continue the tracing context in the current goroutine. func ContinueContext(ctx ContextSnapshot) // CleanContext clean the tracing context in the current goroutine. func CleanContext() Typically, use APIs as following to control or switch the context:\n Use tracing.CaptureContext() to get the ContextSnapshot object. Propagate the snapshot context to any other goroutine in your plugin. Use tracing.ContinueContext(snapshot) to continue the snapshot context in the target goroutine.  Meter API The Meter API is used to record the metrics of the target program, and currently supports the following methods:\n// NewCounter creates a new counter metrics. // name is the name of the metrics // opts is the options for the metrics func NewCounter(name string, opts ...Opt) Counter // NewGauge creates a new gauge metrics. // name is the name of the metrics // getter is the function to get the value of the gauge meter // opts is the options for the metrics func NewGauge(name string, getter func() float64, opts ...Opt) Gauge // NewHistogram creates a new histogram metrics. // name is the name of the metrics // steps is the buckets of the histogram // opts is the options for the metrics func NewHistogram(name string, steps []float64, opts ...Opt) Histogram // NewHistogramWithMinValue creates a new histogram metrics. // name is the name of the metrics // minVal is the min value of the histogram bucket // steps is the buckets of the histogram // opts is the options for the metrics func NewHistogramWithMinValue(name string, minVal float64, steps []float64, opts ...Opt) Histogram // RegisterBeforeCollectHook registers a hook function which will be called before metrics collect. func RegisterBeforeCollectHook(f func()) Meter Option The Meter Options can be passed when creating a Meter to configure the information in the Meter.\n// WithLabel adds a label to the metrics. func WithLabel(key, value string) Opt Meter Type Counter Counter is a cumulative metric that represents a single monotonically increasing counter whose value can only increase.\ntype Counter interface { // Get returns the current value of the counter. \tGet() float64 // Inc increments the counter with value. \tInc(val float64) } Gauge Gauge is a metric that represents a single numerical value that can arbitrarily go up and down.\ntype Gauge interface { // Get returns the current value of the gauge.  Get() float64 } Histogram Histogram is a metric that represents the distribution of a set of values.\ntype Histogram interface { // Observe find the value associate bucket and add 1. \tObserve(val float64) // ObserveWithCount find the value associate bucket and add specific count. \tObserveWithCount(val float64, count int64) } Import Plugin Once you have finished developing the plugin, you need to import the completed module into the Agent program and define it in the corresponding file.\nAt this point, your plugin development process is complete. When the Agent performs hybrid compilation on the target program, your plugin will be executed as expected.\n","title":"Plugin Development Guide","url":"/docs/skywalking-go/next/en/development-and-contribution/development-guide/"},{"content":"Plugin Development Guide This documentation introduces how developers can create a plugin.\nAll plugins must follow these steps:\n Create a new plugin module: Create a new project in the specified directory and import the plugin API module. Define the enhancement object: Define the description for the plugin. Invoke the plugin API: Call the API provided by the core to complete the core invocation. Import the plugin module: Import the plugin into the management module for users to use.  Create a new plugin module The plugin must create a new module, which is currently stored in the project\u0026rsquo;s plugins directory.\nPlugins can import the following two modules:\n Agent core: This module provides all the dependencies needed for the plugin, including the plugin API, enhancement declaration objects, etc. Agent core plugin should be github.com/apache/skywalking-go/plugins/core and replaced by the relative location. Framework to be enhanced: Import the framework you wish to enhance.  Note: Plugins should NOT import and use any other modules, as this may cause compilation issues for users. If certain tools are needed, they should be provided by the agent core.\nDefine the enhancement object In the root directory of the project, create a new go file to define the basic information of the plugin. The basic information includes the following methods, corresponding to the Instrument interface:\n Name: The name of the plugin. Please keep this name consistent with the newly created project name. The reason will be explained later. Base Package: Declare which package this plugin intercepts. For example, if you want to intercept gin, you can write: \u0026ldquo;github.com/gin-gonic/gin\u0026rdquo;. Version Checker: This method passes the version number to the enhancement object to verify whether the specified version of the framework is supported. If not, the enhancement program will not be executed. Points: A plugin can define one or more enhancement points. This will be explained in more detail in the following sections. File System: Use //go:embed * in the current file to import all files in this module, which will be used for file copying during the mixed compilation process.  Note: Please declare //skywalking:nocopy at any position in this file to indicate that the file would not be copied. This file is only used for guidance during hybrid compilation. Also, this file involves the use of the embed package, and if the target framework does not import the package embed, a compilation error may occur.\nManage Instrument and Interceptor codes in hierarchy structure Instrument and interceptor codes are placed in root by default. In complex instrumentation scenarios, there could be dozens of interceptors, we provide PluginSourceCodePath to build a hierarchy folder structure to manage those codes.\nNotice: The instrumentation still works without proper setting of this, but the debug tool would lose the location of the source codes.\nExample For example, the framework needs to enhance two packages, as shown in the following directory structure:\n- plugins - test - go.mod - package1 - instrument.go - interceptor.go - package2 - instrument.go - interceptor.go ... In the above directory structure, the test framework needs to provide multiple different enhancement objects. In this case, a PluginSourceCodePath Source Code Path** method needs to be added for each enhancement object, the values of this method should be package1 and package2.\nInstrument Point Instrument points are used to declare that which methods and structs in the current package should be instrumented. They mainly include the following information:\n Package path: If the interception point that needs to be intercepted is not in the root directory of the current package, you need to fill in the relative path to the package. For example, if this interception point wants to instrument content in the github.com/gin-gonic/gin/render directory, you need to fill in render here. Package Name(optional): Define the package name of the current package. If the package name is not defined, the package name of the current package would be used by default. It\u0026rsquo;s used when the package path and package name are not same, such as the name of github.com/emicklei/go-restful/v3 is restful. Matcher(At): Specify which eligible content in the current package path needs to be enhanced. Interceptor: If the current method is being intercepted (whether it\u0026rsquo;s a static method or an instance method), the name of the interceptor must be specified.  Method Matcher Method matchers are used to intercept both static and non-static methods. The specific definitions are as follows:\n// NewStaticMethodEnhance creates a new EnhanceMatcher for static method. // name: method name needs to be enhanced.(Public and private methods are supported) // filters: filters for method. func NewStaticMethodEnhance(name string, filters ...MethodFilterOption) // NewMethodEnhance creates a new EnhanceMatcher for method. // receiver: receiver type name of method needs to be enhanced. // name: method name needs to be enhanced.(Public and private methods are supported) // filters: filters for method. func NewMethodEnhance(receiver, name string, filters ...MethodFilterOption) Filter Option Filter Options are used to validate the parameters or return values in the method. If the method name matches but the Options validation fails, the enhancement would not be performed.\n// WithArgsCount filter methods with specific count of arguments. func WithArgsCount(argsCount int) // WithResultCount filter methods with specific count of results. func WithResultCount(resultCount int) // WithArgType filter methods with specific type of the index of the argument. func WithArgType(argIndex int, dataType string) // WithResultType filter methods with specific type of the index of the result. func WithResultType(argIndex int, dataType string) Demo For example, if you have the following method that needs to be intercepted:\nfunc (c *Context) HandleMethod(name string) bool you can describe it using this condition:\ninstrument.NewMethodEnhance(\u0026#34;*Context\u0026#34;, \u0026#34;HandleMethod\u0026#34;, instrument.WithArgsCount(1), instrument.WithArgType(0, \u0026#34;string\u0026#34;), instrument.WithResultCount(1), instrument.WithResultType(0, \u0026#34;bool\u0026#34;)) Struct Matcher Enhancement structures can embed enhanced fields within specified structs. After the struct is instantiated, custom data content can be added to the specified struct in the method interceptor.\nStruct matchers are used to intercept struct methods. The specific definitions are as follows:\n// NewStructEnhance creates a new EnhanceMatcher for struct. // name: struct name needs to be enhanced.(Public and private structs are supported) // filters: filters for struct. func NewStructEnhance(name string, filters ...StructFilterOption) Filter Option Filter Options are used to validate the fields in the structure.\n// WithFieldExists filter the struct has the field with specific name. func WithFieldExists(fieldName string) // WithFiledType filter the struct has the field with specific name and type. func WithFiledType(filedName, filedType string) Enhanced Instance After completing the definition of the struct enhancement, you can convert the specified instance into the following interface when intercepting methods, and get or set custom field information. The interface definition is as follows:\ntype EnhancedInstance interface { // GetSkyWalkingDynamicField get the customized data from instance \tGetSkyWalkingDynamicField() interface{} // SetSkyWalkingDynamicField set the customized data into the instance \tSetSkyWalkingDynamicField(interface{}) } Demo For example, if you have the following struct that needs to be enhanced:\ntype Test struct { value *Context } you can describe it using this condition:\ninstrument.NewStructEnhance(\u0026#34;Test\u0026#34;, instrument.WithFieldExists(\u0026#34;value\u0026#34;), instrument.WithFiledType(\u0026#34;value\u0026#34;, \u0026#34;*Context\u0026#34;)) Next, you can set custom content for the specified enhanced instance when intercepting methods.\nins := testInstance.(instrument.EnhancedInstance) // setting custom content ins.SetSkyWalkingDynamicField(\u0026#34;custom content\u0026#34;) // getting custom content res := ins.GetSkyWalkingDynamicField() Interceptor Interceptors are used to define custom business logic before and after method execution, allowing you to access data from before and after method execution and interact with the Agent Core by using the Agent API.\nThe interceptor definition is as follows, you need to create a new structure and implement it:\ntype Interceptor interface { // BeforeInvoke would be called before the target method invocation.  BeforeInvoke(invocation Invocation) error // AfterInvoke would be called after the target method invocation.  AfterInvoke(invocation Invocation, result ...interface{}) error } Within the interface, you can see the Invocation interface, which defines the context of an interception. The specific definition is as follows:\ntype Invocation interface { // CallerInstance is the instance of the caller, nil if the method is static method.  CallerInstance() interface{} // Args is get the arguments of the method, please cast to the specific type to get more information.  Args() []interface{} // ChangeArg is change the argument value of the method  ChangeArg(int, interface{}) // IsContinue is the flag to control the method invocation, if it is true, the target method would not be invoked.  IsContinue() bool // DefineReturnValues are defined the return value of the method, and continue the method invoked  DefineReturnValues(...interface{}) // SetContext is the customized context of the method invocation, it should be propagated the tracing span.  SetContext(interface{}) // GetContext is get the customized context of the method invocation  GetContext() interface{} } Thread safe The Interceptor instance would define new instance at the current package level, rather than creating a new instance each time a method is intercepted.\nTherefore, do not declare objects in the interceptor, and instead use Invocation.Context to pass data.\nPackage Path If the method you want to intercept is not located in the root directory of the framework, place your interceptor code in the relative location within the plugin. The Agent would only copy files from the same package directory.\nFor example, if you want to intercept a method in github.com/gin-gonic/gin/render, create a render directory in the root of your plugin, and put the interceptor inside it. This ensures that the interceptor is properly included during the copy operation and can be correctly applied to the target package.\nPlugin Configuration Plugin configuration is used to add custom configuration parameters to a specified plugin. When users specify configuration items, the plugin can dynamically adapt the content needed in the plugin according to the user\u0026rsquo;s configuration items.\nDeclaration Please declare the configuration file you need in the package you want to use. Declare it using var, and add the //skywalking:config directive to specify that this variable requires dynamic updating.\nBy default, the configuration item belongs to the configuration of the current plugin. For example, if the name of my current plugin is gin, then this configuration item is under the gin plugin. Of course, you can also change it to the http plugin to reference the configuration information of the relevant plugin, in which case you need to specify it as //skywalking:config http.\nItem Each configuration item needs to add a config tag. This is used to specify the name of the current configuration content. By default, it would lowercase all letters and add an _ identifier before each uppercase letter.\nCurrently, it supports basic data types and struct types, and it also supports obtaining data values through environment variables.\nDemo For example, I have declared the following configuration item:\n//skywalking:config http var config struct { ServerCollectParameters bool `config:\u0026#34;server_collect_parameters\u0026#34;` Client struct{ CollectParameters bool `config:\u0026#34;collect_parameters\u0026#34;` } `config:\u0026#34;client\u0026#34;` } In the above example, I created a plugin configuration for http, which includes two configuration items.\n config.ServerCollectParameters: Its configuration is located at http.server_collect_parameters. config.Client.CollectParameter: Its configuration is located at http.client.collect_parameter.  When the plugin needs to be used, it can be accessed directly by reading the config configuration.\nAgent API The Agent API is used when a method is intercepted and interacts with the Agent Core.\nTracing API The Tracing API is used for building distributed tracing, and currently supports the following methods:\n// CreateEntrySpan creates a new entry span. // operationName is the name of the span. // extractor is the extractor to extract the context from the carrier. // opts is the options to create the span. func CreateEntrySpan(operationName string, extractor Extractor, opts ...SpanOption) // CreateLocalSpan creates a new local span. // operationName is the name of the span. // opts is the options to create the span. func CreateLocalSpan(operationName string, opts ...SpanOption) // CreateExitSpan creates a new exit span. // operationName is the name of the span. // peer is the peer address of the span. // injector is the injector to inject the context into the carrier. // opts is the options to create the span. func CreateExitSpan(operationName, peer string, injector Injector, opts ...SpanOption) // ActiveSpan returns the current active span, it can be got the current span in the current goroutine. // If the current goroutine is not in the context of the span, it will return nil. // If get the span from other goroutine, it can only get information but cannot be operated. func ActiveSpan() // GetRuntimeContextValue returns the value of the key in the runtime context, which is current goroutine. // The value can also read from the goroutine which is created by the current goroutine func GetRuntimeContextValue(key string) // SetRuntimeContextValue sets the value of the key in the runtime context. func SetRuntimeContextValue(key string, val interface{}) Context Carrier The context carrier is used to pass the context between the difference application.\nWhen creating an Entry Span, you need to obtain the context carrier from the request. When creating an Exit Span, you need to write the context carrier into the target RPC request.\n// Extractor is a tool specification which define how to // extract trace parent context from propagation context type Extractor func(headerKey string) (string, error) // Injector is a tool specification which define how to // inject trace context into propagation context type Injector func(headerKey, headerValue string) error The following demo demonstrates how to pass the Context Carrier in the Tracing API:\n// create a new entry span and extract the context carrier from the request tracing.CreateEntrySpan(fmt.Sprintf(\u0026#34;%s:%s\u0026#34;, request.Method, request.URL.Path), func(headerKey string) (string, error) { return request.Header.Get(headerKey), nil }) // create a new exit span and inject the context carrier into the request tracing.CreateExitSpan(fmt.Sprintf(\u0026#34;%s:%s\u0026#34;, request.Method, request.URL.Path), request.Host, func(headerKey, headerValue string) error { request.Header.Add(headerKey, headerValue) return nil } Span Option Span Options can be passed when creating a Span to configure the information in the Span.\nThe following options are currently supported:\n// WithLayer set the SpanLayer of the Span func WithLayer(layer SpanLayer) // WithComponent set the component id of the Span func WithComponent(componentID int32) // WithTag set the Tag of the Span func WithTag(key Tag, value string) Span Component The Component ID in Span is used to identify the current component, with its data defined in SkyWalking OAP. If the framework you are writing does not exist in this file, please submit a PR in the SkyWalking project to add the definition of this plugin.\nSpan Operation After creating a Span, you can perform additional operations on it.\n// Span for plugin API type Span interface { // AsyncSpan for the async API \tAsyncSpan // Tag set the Tag of the Span \tTag(Tag, string) // SetSpanLayer set the SpanLayer of the Span \tSetSpanLayer(SpanLayer) // SetOperationName re-set the operation name of the Span \tSetOperationName(string) // SetPeer re-set the peer address of the Span \tSetPeer(string) // Log add log to the Span \tLog(...string) // Error add error log to the Span \tError(...string) // End end the Span \tEnd() } Async Span There is a set of advanced APIs in Span which is specifically designed for async use cases. When setting name, tags, logs, and other operations (including end span) of the span in another goroutine, you should use these APIs.\ntype AsyncSpan interface { // PrepareAsync the span finished at current tracing context, but current span is still alive until AsyncFinish called  PrepareAsync() // AsyncFinish to finished current async span  AsyncFinish() } Following the previous API define, you should following these steps to use the async API:\n Call span.PrepareAsync() to prepare the span to do any operation in another goroutine. Use Span.End() in the original goroutine when your job in the current goroutine is complete. Propagate the span to any other goroutine in your plugin. Once the above steps are all set, call span.AsyncFinish() in any goroutine. When the span.AsyncFinish() is complete for all spans, the all spans would be finished and report to the backend.  Tracing Context Operation In the Go Agent, Trace Context would continue cross goroutines automatically by default. However, in some cases, goroutine would be context sharing due to be scheduled by the pool mechanism. Consider these advanced APIs to manipulate context and switch the current context.\n// CaptureContext capture current tracing context in the current goroutine. func CaptureContext() ContextSnapshot // ContinueContext continue the tracing context in the current goroutine. func ContinueContext(ctx ContextSnapshot) // CleanContext clean the tracing context in the current goroutine. func CleanContext() Typically, use APIs as following to control or switch the context:\n Use tracing.CaptureContext() to get the ContextSnapshot object. Propagate the snapshot context to any other goroutine in your plugin. Use tracing.ContinueContext(snapshot) to continue the snapshot context in the target goroutine.  Meter API The Meter API is used to record the metrics of the target program, and currently supports the following methods:\n// NewCounter creates a new counter metrics. // name is the name of the metrics // opts is the options for the metrics func NewCounter(name string, opts ...Opt) Counter // NewGauge creates a new gauge metrics. // name is the name of the metrics // getter is the function to get the value of the gauge meter // opts is the options for the metrics func NewGauge(name string, getter func() float64, opts ...Opt) Gauge // NewHistogram creates a new histogram metrics. // name is the name of the metrics // steps is the buckets of the histogram // opts is the options for the metrics func NewHistogram(name string, steps []float64, opts ...Opt) Histogram // NewHistogramWithMinValue creates a new histogram metrics. // name is the name of the metrics // minVal is the min value of the histogram bucket // steps is the buckets of the histogram // opts is the options for the metrics func NewHistogramWithMinValue(name string, minVal float64, steps []float64, opts ...Opt) Histogram // RegisterBeforeCollectHook registers a hook function which will be called before metrics collect. func RegisterBeforeCollectHook(f func()) Meter Option The Meter Options can be passed when creating a Meter to configure the information in the Meter.\n// WithLabel adds a label to the metrics. func WithLabel(key, value string) Opt Meter Type Counter Counter is a cumulative metric that represents a single monotonically increasing counter whose value can only increase.\ntype Counter interface { // Get returns the current value of the counter. \tGet() float64 // Inc increments the counter with value. \tInc(val float64) } Gauge Gauge is a metric that represents a single numerical value that can arbitrarily go up and down.\ntype Gauge interface { // Get returns the current value of the gauge.  Get() float64 } Histogram Histogram is a metric that represents the distribution of a set of values.\ntype Histogram interface { // Observe find the value associate bucket and add 1. \tObserve(val float64) // ObserveWithCount find the value associate bucket and add specific count. \tObserveWithCount(val float64, count int64) } Import Plugin Once you have finished developing the plugin, you need to import the completed module into the Agent program and define it in the corresponding file.\nAt this point, your plugin development process is complete. When the Agent performs hybrid compilation on the target program, your plugin will be executed as expected.\n","title":"Plugin Development Guide","url":"/docs/skywalking-go/v0.4.0/en/development-and-contribution/development-guide/"},{"content":"Plugin Development Guide This document describes how to understand, develop and contribute a plugin.\nThere are 2 kinds of plugin:\n Tracing plugin. Follow the distributed tracing concept to collect spans with tags and logs. Meter plugin. Collect numeric metrics in Counter, Gauge, and Histogram formats.  We also provide the plugin test tool to verify the data collected and reported by the plugin. If you plan to contribute any plugin to our main repo, the data would be verified by this tool too.\nTracing plugin Concepts Span The span is an important and recognized concept in the distributed tracing system. Learn about the span from the Google Dapper Paper and OpenTracing\nSkyWalking has supported OpenTracing and OpenTracing-Java API since 2017. Our concepts of the span are similar to that of the Google Dapper Paper and OpenTracing. We have also extended the span.\nThere are three types of span:\n1.1 EntrySpan The EntrySpan represents a service provider. It is also an endpoint on the server end. As an APM system, our target is the application servers. Therefore, almost all the services and MQ-consumers are EntrySpan.\n1.2 LocalSpan The LocalSpan represents a normal Java method that does not concern remote services. It is neither a MQ producer/consumer nor a service (e.g. HTTP service) provider/consumer.\n1.3 ExitSpan The ExitSpan represents a client of service or MQ-producer. It is named the LeafSpan in the early versions of SkyWalking. For example, accessing DB through JDBC and reading Redis/Memcached are classified as an ExitSpan.\nContextCarrier In order to implement distributed tracing, cross-process tracing has to be bound, and the context must propagate across the process. This is where the ContextCarrier comes in.\nHere are the steps on how to use the ContextCarrier in an A-\u0026gt;B distributed call.\n Create a new and empty ContextCarrier on the client end. Create an ExitSpan by ContextManager#createExitSpan or use ContextManager#inject to initalize the ContextCarrier. Place all items of ContextCarrier into heads (e.g. HTTP HEAD), attachments (e.g. Dubbo RPC framework) or messages (e.g. Kafka). The ContextCarrier propagates to the server end through the service call. On the server end, obtain all items from the heads, attachments or messages. Create an EntrySpan by ContextManager#createEntrySpan or use ContextManager#extract to bind the client and server ends.  See the following examples, where we use the Apache HTTPComponent client plugin and Tomcat 7 server plugin:\n Using the Apache HTTPComponent client plugin on the client end  span = ContextManager.createExitSpan(\u0026#34;/span/operation/name\u0026#34;, contextCarrier, \u0026#34;ip:port\u0026#34;); CarrierItem next = contextCarrier.items(); while (next.hasNext()) { next = next.next(); httpRequest.setHeader(next.getHeadKey(), next.getHeadValue()); } Using the Tomcat 7 server plugin on the server end  ContextCarrier contextCarrier = new ContextCarrier(); CarrierItem next = contextCarrier.items(); while (next.hasNext()) { next = next.next(); next.setHeadValue(request.getHeader(next.getHeadKey())); } span = ContextManager.createEntrySpan(“/span/operation/name”, contextCarrier); ContextSnapshot Besides cross-process tracing, cross-thread tracing has to be supported as well. For instance, both async process (in-memory MQ) and batch process are common in Java. Cross-process and cross-thread tracing are very similar in that they both require propagating context, except that cross-thread tracing does not require serialization.\nHere are the three steps on cross-thread propagation:\n Use ContextManager#capture to get the ContextSnapshot object. Let the sub-thread access the ContextSnapshot through method arguments or being carried by existing arguments Use ContextManager#continued in sub-thread.  Core APIs ContextManager ContextManager provides all major and primary APIs.\n Create EntrySpan  public static AbstractSpan createEntrySpan(String endpointName, ContextCarrier carrier) Create EntrySpan according to the operation name (e.g. service name, uri) and ContextCarrier.\nCreate LocalSpan  public static AbstractSpan createLocalSpan(String endpointName) Create LocalSpan according to the operation name (e.g. full method signature).\nCreate ExitSpan  public static AbstractSpan createExitSpan(String endpointName, ContextCarrier carrier, String remotePeer) Create ExitSpan according to the operation name (e.g. service name, uri) and the new ContextCarrier and peer address (e.g. ip+port, hostname+port).\nAbstractSpan /** * Set the component id, which defines in {@link ComponentsDefine} * * @param component * @return the span for chaining. */ AbstractSpan setComponent(Component component); AbstractSpan setLayer(SpanLayer layer); /** * Set a key:value tag on the Span. * * @return this Span instance, for chaining */ AbstractSpan tag(String key, String value); /** * Record an exception event of the current walltime timestamp. * * @param t any subclass of {@link Throwable}, which occurs in this span. * @return the Span, for chaining */ AbstractSpan log(Throwable t); AbstractSpan errorOccurred(); /** * Record an event at a specific timestamp. * * @param timestamp The explicit timestamp for the log record. * @param event the events * @return the Span, for chaining */ AbstractSpan log(long timestamp, Map\u0026lt;String, ?\u0026gt; event); /** * Sets the string name for the logical operation this span represents. * * @return this Span instance, for chaining */ AbstractSpan setOperationName(String endpointName); Besides setting the operation name, tags and logs, two attributes must be set, namely the component and layer. This is especially important for the EntrySpan and ExitSpan.\nSpanLayer is the type of span. There are 5 values:\n UNKNOWN (default) DB RPC_FRAMEWORK (designed for the RPC framework, rather than an ordinary HTTP call) HTTP MQ  Component IDs are defined and reserved by the SkyWalking project. For extension of the component name/ID, please follow the OAP server Component library settings document.\nSpecial Span Tags All tags are available in the trace view. Meanwhile, in the OAP backend analysis, some special tags or tag combinations provide other advanced features.\nTag key http.status_code The value should be an integer. The response code of OAL entities corresponds to this value.\nTag keys db.statement and db.type. The value of db.statement should be a string that represents the database statement, such as SQL, or [No statement]/+span#operationName if the value is empty. When the exit span contains this tag, OAP samples the slow statements based on agent-analyzer/default/maxSlowSQLLength. The threshold of slow statement is defined in accordance with agent-analyzer/default/slowDBAccessThreshold. Check Slow Database Statement document of OAP server for details.\nExtension logic endpoint: Tag key x-le The logic endpoint is a concept that doesn\u0026rsquo;t represent a real RPC call, but requires the statistic. The value of x-le should be in JSON format. There are two options:\n Define a separated logic endpoint. Provide its own endpoint name, latency and status. Suitable for entry and local span.  { \u0026#34;name\u0026#34;: \u0026#34;GraphQL-service\u0026#34;, \u0026#34;latency\u0026#34;: 100, \u0026#34;status\u0026#34;: true } Declare the current local span representing a logic endpoint.  { \u0026#34;logic-span\u0026#34;: true } Virtual Database Relative Tags SkyWalking analysis Database(SQL-like) performance metrics through the following tags.\npublic static final StringTag DB_TYPE = new StringTag(3, \u0026#34;db.type\u0026#34;); public static final StringTag DB_STATEMENT = new StringTag(5, \u0026#34;db.statement\u0026#34;);  db.type records database type, such as sql, cassandra, Elasticsearch. db.statementrecords the sql statement of the database access.  Read backend\u0026rsquo;s virtual database doc for more details.\nVirtual Cache Relative Tags SkyWalking analysis cache performance related metrics through the following tags.\npublic static final StringTag CACHE_TYPE = new StringTag(15, \u0026#34;cache.type\u0026#34;); public static final StringTag CACHE_CMD = new StringTag(17, \u0026#34;cache.cmd\u0026#34;); public static final StringTag CACHE_OP = new StringTag(16, \u0026#34;cache.op\u0026#34;); public static final StringTag CACHE_KEY = new StringTag(18, \u0026#34;cache.key\u0026#34;);  cache.type indicates the cache type , usually it\u0026rsquo;s official name of cache (e.g. Redis) cache.cmd indicates the cache command that would be sent to cache server (e.g. setnx) cache.op indicates the command is used for write or read operation , usually the value is converting from command cache.key indicates the cache key that would be sent to cache server , this tag maybe null , as string type key would be collected usually.  In order to decide which op should be converted to flexibly , It\u0026rsquo;s better that providing config property . Reference Jedis-4.x-plugin\nVirtual Message Queue (MQ) Relative Tags SkyWalking analysis MQ performance related metrics through the following tags.\npublic static final StringTag MQ_QUEUE = new StringTag(7, \u0026#34;mq.queue\u0026#34;); public static final StringTag MQ_TOPIC = new StringTag(9, \u0026#34;mq.topic\u0026#34;); public static final StringTag TRANSMISSION_LATENCY = new StringTag(15, \u0026#34;transmission.latency\u0026#34;, false);  mq.queue indicates MQ queue name mq.topic indicates MQ topic name , It\u0026rsquo;s optional as some MQ don\u0026rsquo;t hava concept of topic transmission.latency The transmission latency from consumer to producer. Usually you needn\u0026rsquo;t to record this tag manually, instead to call contextCarrier.extensionInjector().injectSendingTimestamp(); to record tag sendingTimestamp on producer side , and SkyWalking would record this tag on consumer side if sw8-x context carrier(from producer side) contains sendingTimestamp  Notice , you should set peer at both sides(producer and consumer). And the value of peer should represent the MQ server cluster.\nAdvanced APIs Async Span APIs There is a set of advanced APIs in Span which is specifically designed for async use cases. When tags, logs, and attributes (including end time) of the span need to be set in another thread, you should use these APIs.\n/** * The span finish at current tracing context, but the current span is still alive, until {@link #asyncFinish} * called. * * This method must be called\u0026lt;br/\u0026gt; * 1. In original thread(tracing context). * 2. Current span is active span. * * During alive, tags, logs and attributes of the span could be changed, in any thread. * * The execution times of {@link #prepareForAsync} and {@link #asyncFinish()} must match. * * @return the current span */ AbstractSpan prepareForAsync(); /** * Notify the span, it could be finished. * * The execution times of {@link #prepareForAsync} and {@link #asyncFinish()} must match. * * @return the current span */ AbstractSpan asyncFinish();  Call #prepareForAsync in the original context. Run ContextManager#stopSpan in the original context when your job in the current thread is complete. Propagate the span to any other thread. Once the above steps are all set, call #asyncFinish in any thread. When #prepareForAsync is complete for all spans, the tracing context will be finished and will report to the backend (based on the count of API execution).  Develop a plugin Abstract The basic method to trace is to intercept a Java method, by using byte code manipulation tech and AOP concept. SkyWalking has packaged the byte code manipulation tech and tracing context propagation, so you simply have to define the intercept point (a.k.a. aspect pointcut in Spring).\nIntercept SkyWalking provides two common definitions to intercept constructor, instance method and class method.\nv1 APIs  Extend ClassInstanceMethodsEnhancePluginDefine to define constructor intercept points and instance method intercept points. Extend ClassStaticMethodsEnhancePluginDefine to define class method intercept points.  Of course, you can extend ClassEnhancePluginDefine to set all intercept points, although it is uncommon to do so.\nv2 APIs v2 APIs provide an enhanced interceptor, which could propagate context through MIC(MethodInvocationContext).\n Extend ClassInstanceMethodsEnhancePluginDefineV2 to define constructor intercept points and instance method intercept points. Extend ClassStaticMethodsEnhancePluginDefineV2 to define class method intercept points.  Of course, you can extend ClassEnhancePluginDefineV2 to set all intercept points, although it is uncommon to do so.\nImplement plugin See the following demonstration on how to implement a plugin by extending ClassInstanceMethodsEnhancePluginDefine.\n Define the target class name.  protected abstract ClassMatch enhanceClass(); ClassMatch represents how to match the target classes. There are 4 ways:\n byName: Based on the full class names (package name + . + class name). byClassAnnotationMatch: Depends on whether there are certain annotations in the target classes. byMethodAnnotationMatch: Depends on whether there are certain annotations in the methods of the target classes. byHierarchyMatch: Based on the parent classes or interfaces of the target classes.  Attention:\n Never use ThirdPartyClass.class in the instrumentation definitions, such as takesArguments(ThirdPartyClass.class), or byName(ThirdPartyClass.class.getName()), because of the fact that ThirdPartyClass dose not necessarily exist in the target application and this will break the agent; we have import checks to assist in checking this in CI, but it doesn\u0026rsquo;t cover all scenarios of this limitation, so never try to work around this limitation by something like using full-qualified-class-name (FQCN), i.e. takesArguments(full.qualified.ThirdPartyClass.class) and byName(full.qualified.ThirdPartyClass.class.getName()) will pass the CI check, but are still invalid in the agent codes. Therefore, Use Full Qualified Class Name String Literature Instead. Even if you are perfectly sure that the class to be intercepted exists in the target application (such as JDK classes), still, do not use *.class.getName() to get the class String name. We recommend you to use a literal string. This is to avoid ClassLoader issues. by*AnnotationMatch does not support inherited annotations. We do not recommend using byHierarchyMatch unless necessary. Using it may trigger the interception of many unexcepted methods, which would cause performance issues.  Example:\n@Override protected ClassMatch enhanceClassName() { return byName(\u0026#34;org.apache.catalina.core.StandardEngineValve\u0026#34;); } Define an instance method intercept point.  public InstanceMethodsInterceptPoint[] getInstanceMethodsInterceptPoints(); public interface InstanceMethodsInterceptPoint { /** * class instance methods matcher. * * @return methods matcher */ ElementMatcher\u0026lt;MethodDescription\u0026gt; getMethodsMatcher(); /** * @return represents a class name, the class instance must instanceof InstanceMethodsAroundInterceptor. */ String getMethodsInterceptor(); boolean isOverrideArgs(); } You may also use Matcher to set the target methods. Return true in isOverrideArgs, if you want to change the argument ref in interceptor. Please refer to bytebuddy for details of defining ElementMatcher.\nIn Skywalking, we provide 3 classes to facilitate ElementMatcher definition:\n AnnotationTypeNameMatch: Check on whether there is a certain annotation in the target method. ReturnTypeNameMatch: Check the return type name (package name + . + class name) of the target method. ArgumentTypeNameMatch: Check on the argument index and the type name (package name + . + class name) of the target method.  Attention:\n In case of using ReturnTypeNameMatch and ArgumentTypeNameMatch, use [Lxxx; (Java file format defined in JVM Specification) to define an Array type. For example, you should write [Ljava.lang.String; for java.lang.String[].  The following sections will tell you how to implement the interceptor.\nAdd plugin definition into the skywalking-plugin.def file.  tomcat-7.x/8.x=TomcatInstrumentation  Set up witnessClasses and/or witnessMethods if the instrumentation has to be activated in specific versions.\nExample:\n// The plugin is activated only when the foo.Bar class exists. @Override protected String[] witnessClasses() { return new String[] { \u0026#34;foo.Bar\u0026#34; }; } // The plugin is activated only when the foo.Bar#hello method exists. @Override protected List\u0026lt;WitnessMethod\u0026gt; witnessMethods() { List\u0026lt;WitnessMethod\u0026gt; witnessMethodList = new ArrayList\u0026lt;\u0026gt;(); WitnessMethod witnessMethod = new WitnessMethod(\u0026#34;foo.Bar\u0026#34;, ElementMatchers.named(\u0026#34;hello\u0026#34;)); witnessMethodList.add(witnessMethod); return witnessMethodList; } For more examples, see WitnessTest.java\n  Implement an interceptor As an interceptor for an instance method, it has to implement org.apache.skywalking.apm.agent.core.plugin.interceptor.enhance.InstanceMethodsAroundInterceptor\n/** * A interceptor, which intercept method\u0026#39;s invocation. The target methods will be defined in {@link * ClassEnhancePluginDefine}\u0026#39;s subclass, most likely in {@link ClassInstanceMethodsEnhancePluginDefine} */ public interface InstanceMethodsAroundInterceptor { /** * called before target method invocation. * * @param result change this result, if you want to truncate the method. * @throws Throwable */ void beforeMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, MethodInterceptResult result) throws Throwable; /** * called after target method invocation. Even method\u0026#39;s invocation triggers an exception. * * @param ret the method\u0026#39;s original return value. * @return the method\u0026#39;s actual return value. * @throws Throwable */ Object afterMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Object ret) throws Throwable; /** * called when occur exception. * * @param t the exception occur. */ void handleMethodException(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Throwable t); } Use the core APIs before and after calling the method, as well as during exception handling.\nV2 APIs The interceptor of V2 API uses MethodInvocationContext context to replace the MethodInterceptResult result in the beforeMethod, and be added as a new parameter in afterMethod and handleMethodException.\nMethodInvocationContext context is only shared in one time execution, and safe to use when face concurrency execution.\n/** * A v2 interceptor, which intercept method\u0026#39;s invocation. The target methods will be defined in {@link * ClassEnhancePluginDefineV2}\u0026#39;s subclass, most likely in {@link ClassInstanceMethodsEnhancePluginDefine} */ public interface InstanceMethodsAroundInterceptorV2 { /** * called before target method invocation. * * @param context the method invocation context including result context. */ void beforeMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, MethodInvocationContext context) throws Throwable; /** * called after target method invocation. Even method\u0026#39;s invocation triggers an exception. * * @param ret the method\u0026#39;s original return value. May be null if the method triggers an exception. * @return the method\u0026#39;s actual return value. */ Object afterMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Object ret, MethodInvocationContext context) throws Throwable; /** * called when occur exception. * * @param t the exception occur. */ void handleMethodException(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Throwable t, MethodInvocationContext context); } Bootstrap class instrumentation. SkyWalking has packaged the bootstrap instrumentation in the agent core. You can easily implement it by declaring it in the instrumentation definition.\nOverride the public boolean isBootstrapInstrumentation() and return true. Such as\npublic class URLInstrumentation extends ClassEnhancePluginDefine { private static String CLASS_NAME = \u0026#34;java.net.URL\u0026#34;; @Override protected ClassMatch enhanceClass() { return byName(CLASS_NAME); } @Override public ConstructorInterceptPoint[] getConstructorsInterceptPoints() { return new ConstructorInterceptPoint[] { new ConstructorInterceptPoint() { @Override public ElementMatcher\u0026lt;MethodDescription\u0026gt; getConstructorMatcher() { return any(); } @Override public String getConstructorInterceptor() { return \u0026#34;org.apache.skywalking.apm.plugin.jre.httpurlconnection.Interceptor2\u0026#34;; } } }; } @Override public InstanceMethodsInterceptPoint[] getInstanceMethodsInterceptPoints() { return new InstanceMethodsInterceptPoint[0]; } @Override public StaticMethodsInterceptPoint[] getStaticMethodsInterceptPoints() { return new StaticMethodsInterceptPoint[0]; } @Override public boolean isBootstrapInstrumentation() { return true; } } ClassEnhancePluginDefineV2 is provided in v2 APIs, #isBootstrapInstrumentation works too.\nNOTE: Bootstrap instrumentation should be used only where necessary. During its actual execution, it mostly affects the JRE core(rt.jar). Defining it other than where necessary could lead to unexpected results or side effects.\nProvide custom config for the plugin The config could provide different behaviours based on the configurations. The SkyWalking plugin mechanism provides the configuration injection and initialization system in the agent core.\nEvery plugin could declare one or more classes to represent the config by using @PluginConfig annotation. The agent core could initialize this class' static field through System environments, System properties, and agent.config static file.\nThe #root() method in the @PluginConfig annotation requires declaring the root class for the initialization process. Typically, SkyWalking prefers to use nested inner static classes for the hierarchy of the configuration. We recommend using Plugin/plugin-name/config-key as the nested classes structure of the config class.\nNOTE: because of the Java ClassLoader mechanism, the @PluginConfig annotation should be added on the real class used in the interceptor codes.\nIn the following example, @PluginConfig(root = SpringMVCPluginConfig.class) indicates that initialization should start with using SpringMVCPluginConfig as the root. Then, the config key of the attribute USE_QUALIFIED_NAME_AS_ENDPOINT_NAME should be plugin.springmvc.use_qualified_name_as_endpoint_name.\npublic class SpringMVCPluginConfig { public static class Plugin { // NOTE, if move this annotation on the `Plugin` or `SpringMVCPluginConfig` class, it no longer has any effect.  @PluginConfig(root = SpringMVCPluginConfig.class) public static class SpringMVC { /** * If true, the fully qualified method name will be used as the endpoint name instead of the request URL, * default is false. */ public static boolean USE_QUALIFIED_NAME_AS_ENDPOINT_NAME = false; /** * This config item controls that whether the SpringMVC plugin should collect the parameters of the * request. */ public static boolean COLLECT_HTTP_PARAMS = false; } @PluginConfig(root = SpringMVCPluginConfig.class) public static class Http { /** * When either {@link Plugin.SpringMVC#COLLECT_HTTP_PARAMS} is enabled, how many characters to keep and send * to the OAP backend, use negative values to keep and send the complete parameters, NB. this config item is * added for the sake of performance */ public static int HTTP_PARAMS_LENGTH_THRESHOLD = 1024; } } } Meter Plugin Java agent plugin could use meter APIs to collect metrics for backend analysis.\n Counter API represents a single monotonically increasing counter which automatically collects data and reports to the backend. import org.apache.skywalking.apm.agent.core.meter.MeterFactory; Counter counter = MeterFactory.counter(meterName).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).mode(Counter.Mode.INCREMENT).build(); counter.increment(1d);    MeterFactory.counter creates a new counter builder with the meter name. Counter.Builder.tag(String key, String value) marks a tag key/value pair. Counter.Builder.mode(Counter.Mode mode) changes the counter mode. RATE mode means the reporting rate to the backend. Counter.Builder.build() builds a new Counter which is collected and reported to the backend. Counter.increment(double count) increment counts to the Counter. It could be a positive value.   Gauge API represents a single numerical value.  import org.apache.skywalking.apm.agent.core.meter.MeterFactory; ThreadPoolExecutor threadPool = ...; Gauge gauge = MeterFactory.gauge(meterName, () -\u0026gt; threadPool.getActiveCount()).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).build();  MeterFactory.gauge(String name, Supplier\u0026lt;Double\u0026gt; getter) creates a new gauge builder with the meter name and supplier function. This function must return a double value. Gauge.Builder.tag(String key, String value) marks a tag key/value pair. Gauge.Builder.build() builds a new Gauge which is collected and reported to the backend.   Histogram API represents a summary sample observations with customized buckets.  import org.apache.skywalking.apm.agent.core.meter.MeterFactory; Histogram histogram = MeterFactory.histogram(\u0026#34;test\u0026#34;).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).steps(Arrays.asList(1, 5, 10)).minValue(0).build(); histogram.addValue(3);  MeterFactory.histogram(String name) creates a new histogram builder with the meter name. Histogram.Builder.tag(String key, String value) marks a tag key/value pair. Histogram.Builder.steps(List\u0026lt;Double\u0026gt; steps) sets up the max values of every histogram buckets. Histogram.Builder.minValue(double value) sets up the minimal value of this histogram. Default is 0. Histogram.Builder.build() builds a new Histogram which is collected and reported to the backend. Histogram.addValue(double value) adds value into the histogram, and automatically analyzes what bucket count needs to be incremented. Rule: count into [step1, step2).  Plugin Test Tool The Apache SkyWalking Agent Test Tool Suite is an incredibly useful test tool suite that is available in a wide variety of agent languages. It includes the mock collector and validator. The mock collector is a SkyWalking receiver, like the OAP server.\nYou could learn how to use this tool to test the plugin in this doc. This is a must if you want to contribute plugins to the SkyWalking official repo.\nContribute plugins to the Apache SkyWalking repository We welcome everyone to contribute their plugins.\nPlease follow these steps:\n Submit an issue for your plugin, including any supported versions. Create sub modules under apm-sniffer/apm-sdk-plugin or apm-sniffer/optional-plugins, and the name should include supported library name and versions. Follow this guide to develop. Make sure comments and test cases are provided. Develop and test. Provide the automatic test cases. Learn how to write the plugin test case from this doc Send a pull request and ask for review. The plugin committers will approve your plugins, plugin CI-with-IT, e2e, and the plugin tests will be passed. The plugin is accepted by SkyWalking.  ","title":"Plugin Development Guide","url":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/java-plugin-development-guide/"},{"content":"Plugin Development Guide This document describes how to understand, develop and contribute a plugin.\nThere are 2 kinds of plugin:\n Tracing plugin. Follow the distributed tracing concept to collect spans with tags and logs. Meter plugin. Collect numeric metrics in Counter, Gauge, and Histogram formats.  We also provide the plugin test tool to verify the data collected and reported by the plugin. If you plan to contribute any plugin to our main repo, the data would be verified by this tool too.\nTracing plugin Concepts Span The span is an important and recognized concept in the distributed tracing system. Learn about the span from the Google Dapper Paper and OpenTracing\nSkyWalking has supported OpenTracing and OpenTracing-Java API since 2017. Our concepts of the span are similar to that of the Google Dapper Paper and OpenTracing. We have also extended the span.\nThere are three types of span:\n1.1 EntrySpan The EntrySpan represents a service provider. It is also an endpoint on the server end. As an APM system, our target is the application servers. Therefore, almost all the services and MQ-consumers are EntrySpan.\n1.2 LocalSpan The LocalSpan represents a normal Java method that does not concern remote services. It is neither a MQ producer/consumer nor a service (e.g. HTTP service) provider/consumer.\n1.3 ExitSpan The ExitSpan represents a client of service or MQ-producer. It is named the LeafSpan in the early versions of SkyWalking. For example, accessing DB through JDBC and reading Redis/Memcached are classified as an ExitSpan.\nContextCarrier In order to implement distributed tracing, cross-process tracing has to be bound, and the context must propagate across the process. This is where the ContextCarrier comes in.\nHere are the steps on how to use the ContextCarrier in an A-\u0026gt;B distributed call.\n Create a new and empty ContextCarrier on the client end. Create an ExitSpan by ContextManager#createExitSpan or use ContextManager#inject to initalize the ContextCarrier. Place all items of ContextCarrier into heads (e.g. HTTP HEAD), attachments (e.g. Dubbo RPC framework) or messages (e.g. Kafka). The ContextCarrier propagates to the server end through the service call. On the server end, obtain all items from the heads, attachments or messages. Create an EntrySpan by ContextManager#createEntrySpan or use ContextManager#extract to bind the client and server ends.  See the following examples, where we use the Apache HTTPComponent client plugin and Tomcat 7 server plugin:\n Using the Apache HTTPComponent client plugin on the client end  span = ContextManager.createExitSpan(\u0026#34;/span/operation/name\u0026#34;, contextCarrier, \u0026#34;ip:port\u0026#34;); CarrierItem next = contextCarrier.items(); while (next.hasNext()) { next = next.next(); httpRequest.setHeader(next.getHeadKey(), next.getHeadValue()); } Using the Tomcat 7 server plugin on the server end  ContextCarrier contextCarrier = new ContextCarrier(); CarrierItem next = contextCarrier.items(); while (next.hasNext()) { next = next.next(); next.setHeadValue(request.getHeader(next.getHeadKey())); } span = ContextManager.createEntrySpan(“/span/operation/name”, contextCarrier); ContextSnapshot Besides cross-process tracing, cross-thread tracing has to be supported as well. For instance, both async process (in-memory MQ) and batch process are common in Java. Cross-process and cross-thread tracing are very similar in that they both require propagating context, except that cross-thread tracing does not require serialization.\nHere are the three steps on cross-thread propagation:\n Use ContextManager#capture to get the ContextSnapshot object. Let the sub-thread access the ContextSnapshot through method arguments or being carried by existing arguments Use ContextManager#continued in sub-thread.  Core APIs ContextManager ContextManager provides all major and primary APIs.\n Create EntrySpan  public static AbstractSpan createEntrySpan(String endpointName, ContextCarrier carrier) Create EntrySpan according to the operation name (e.g. service name, uri) and ContextCarrier.\nCreate LocalSpan  public static AbstractSpan createLocalSpan(String endpointName) Create LocalSpan according to the operation name (e.g. full method signature).\nCreate ExitSpan  public static AbstractSpan createExitSpan(String endpointName, ContextCarrier carrier, String remotePeer) Create ExitSpan according to the operation name (e.g. service name, uri) and the new ContextCarrier and peer address (e.g. ip+port, hostname+port).\nAbstractSpan /** * Set the component id, which defines in {@link ComponentsDefine} * * @param component * @return the span for chaining. */ AbstractSpan setComponent(Component component); AbstractSpan setLayer(SpanLayer layer); /** * Set a key:value tag on the Span. * * @return this Span instance, for chaining */ AbstractSpan tag(String key, String value); /** * Record an exception event of the current walltime timestamp. * * @param t any subclass of {@link Throwable}, which occurs in this span. * @return the Span, for chaining */ AbstractSpan log(Throwable t); AbstractSpan errorOccurred(); /** * Record an event at a specific timestamp. * * @param timestamp The explicit timestamp for the log record. * @param event the events * @return the Span, for chaining */ AbstractSpan log(long timestamp, Map\u0026lt;String, ?\u0026gt; event); /** * Sets the string name for the logical operation this span represents. * * @return this Span instance, for chaining */ AbstractSpan setOperationName(String endpointName); Besides setting the operation name, tags and logs, two attributes must be set, namely the component and layer. This is especially important for the EntrySpan and ExitSpan.\nSpanLayer is the type of span. There are 5 values:\n UNKNOWN (default) DB RPC_FRAMEWORK (designed for the RPC framework, rather than an ordinary HTTP call) HTTP MQ  Component IDs are defined and reserved by the SkyWalking project. For extension of the component name/ID, please follow the OAP server Component library settings document.\nSpecial Span Tags All tags are available in the trace view. Meanwhile, in the OAP backend analysis, some special tags or tag combinations provide other advanced features.\nTag key http.status_code The value should be an integer. The response code of OAL entities corresponds to this value.\nTag keys db.statement and db.type. The value of db.statement should be a string that represents the database statement, such as SQL, or [No statement]/+span#operationName if the value is empty. When the exit span contains this tag, OAP samples the slow statements based on agent-analyzer/default/maxSlowSQLLength. The threshold of slow statement is defined in accordance with agent-analyzer/default/slowDBAccessThreshold. Check Slow Database Statement document of OAP server for details.\nExtension logic endpoint: Tag key x-le The logic endpoint is a concept that doesn\u0026rsquo;t represent a real RPC call, but requires the statistic. The value of x-le should be in JSON format. There are two options:\n Define a separated logic endpoint. Provide its own endpoint name, latency and status. Suitable for entry and local span.  { \u0026#34;name\u0026#34;: \u0026#34;GraphQL-service\u0026#34;, \u0026#34;latency\u0026#34;: 100, \u0026#34;status\u0026#34;: true } Declare the current local span representing a logic endpoint.  { \u0026#34;logic-span\u0026#34;: true } Virtual Database Relative Tags SkyWalking analysis Database(SQL-like) performance metrics through the following tags.\npublic static final StringTag DB_TYPE = new StringTag(3, \u0026#34;db.type\u0026#34;); public static final StringTag DB_STATEMENT = new StringTag(5, \u0026#34;db.statement\u0026#34;);  db.type records database type, such as sql, cassandra, Elasticsearch. db.statementrecords the sql statement of the database access.  Read backend\u0026rsquo;s virtual database doc for more details.\nVirtual Cache Relative Tags SkyWalking analysis cache performance related metrics through the following tags.\npublic static final StringTag CACHE_TYPE = new StringTag(15, \u0026#34;cache.type\u0026#34;); public static final StringTag CACHE_CMD = new StringTag(17, \u0026#34;cache.cmd\u0026#34;); public static final StringTag CACHE_OP = new StringTag(16, \u0026#34;cache.op\u0026#34;); public static final StringTag CACHE_KEY = new StringTag(18, \u0026#34;cache.key\u0026#34;);  cache.type indicates the cache type , usually it\u0026rsquo;s official name of cache (e.g. Redis) cache.cmd indicates the cache command that would be sent to cache server (e.g. setnx) cache.op indicates the command is used for write or read operation , usually the value is converting from command cache.key indicates the cache key that would be sent to cache server , this tag maybe null , as string type key would be collected usually.  In order to decide which op should be converted to flexibly , It\u0026rsquo;s better that providing config property . Reference Jedis-4.x-plugin\nVirtual Message Queue (MQ) Relative Tags SkyWalking analysis MQ performance related metrics through the following tags.\npublic static final StringTag MQ_QUEUE = new StringTag(7, \u0026#34;mq.queue\u0026#34;); public static final StringTag MQ_TOPIC = new StringTag(9, \u0026#34;mq.topic\u0026#34;); public static final StringTag TRANSMISSION_LATENCY = new StringTag(15, \u0026#34;transmission.latency\u0026#34;, false);  mq.queue indicates MQ queue name mq.topic indicates MQ topic name , It\u0026rsquo;s optional as some MQ don\u0026rsquo;t hava concept of topic transmission.latency The transmission latency from consumer to producer. Usually you needn\u0026rsquo;t to record this tag manually, instead to call contextCarrier.extensionInjector().injectSendingTimestamp(); to record tag sendingTimestamp on producer side , and SkyWalking would record this tag on consumer side if sw8-x context carrier(from producer side) contains sendingTimestamp  Notice , you should set peer at both sides(producer and consumer). And the value of peer should represent the MQ server cluster.\nAdvanced APIs Async Span APIs There is a set of advanced APIs in Span which is specifically designed for async use cases. When tags, logs, and attributes (including end time) of the span need to be set in another thread, you should use these APIs.\n/** * The span finish at current tracing context, but the current span is still alive, until {@link #asyncFinish} * called. * * This method must be called\u0026lt;br/\u0026gt; * 1. In original thread(tracing context). * 2. Current span is active span. * * During alive, tags, logs and attributes of the span could be changed, in any thread. * * The execution times of {@link #prepareForAsync} and {@link #asyncFinish()} must match. * * @return the current span */ AbstractSpan prepareForAsync(); /** * Notify the span, it could be finished. * * The execution times of {@link #prepareForAsync} and {@link #asyncFinish()} must match. * * @return the current span */ AbstractSpan asyncFinish();  Call #prepareForAsync in the original context. Run ContextManager#stopSpan in the original context when your job in the current thread is complete. Propagate the span to any other thread. Once the above steps are all set, call #asyncFinish in any thread. When #prepareForAsync is complete for all spans, the tracing context will be finished and will report to the backend (based on the count of API execution).  Develop a plugin Abstract The basic method to trace is to intercept a Java method, by using byte code manipulation tech and AOP concept. SkyWalking has packaged the byte code manipulation tech and tracing context propagation, so you simply have to define the intercept point (a.k.a. aspect pointcut in Spring).\nIntercept SkyWalking provides two common definitions to intercept constructor, instance method and class method.\nv1 APIs  Extend ClassInstanceMethodsEnhancePluginDefine to define constructor intercept points and instance method intercept points. Extend ClassStaticMethodsEnhancePluginDefine to define class method intercept points.  Of course, you can extend ClassEnhancePluginDefine to set all intercept points, although it is uncommon to do so.\nv2 APIs v2 APIs provide an enhanced interceptor, which could propagate context through MIC(MethodInvocationContext).\n Extend ClassInstanceMethodsEnhancePluginDefineV2 to define constructor intercept points and instance method intercept points. Extend ClassStaticMethodsEnhancePluginDefineV2 to define class method intercept points.  Of course, you can extend ClassEnhancePluginDefineV2 to set all intercept points, although it is uncommon to do so.\nImplement plugin See the following demonstration on how to implement a plugin by extending ClassInstanceMethodsEnhancePluginDefine.\n Define the target class name.  protected abstract ClassMatch enhanceClass(); ClassMatch represents how to match the target classes. There are 4 ways:\n byName: Based on the full class names (package name + . + class name). byClassAnnotationMatch: Depends on whether there are certain annotations in the target classes. byMethodAnnotationMatch: Depends on whether there are certain annotations in the methods of the target classes. byHierarchyMatch: Based on the parent classes or interfaces of the target classes.  Attention:\n Never use ThirdPartyClass.class in the instrumentation definitions, such as takesArguments(ThirdPartyClass.class), or byName(ThirdPartyClass.class.getName()), because of the fact that ThirdPartyClass dose not necessarily exist in the target application and this will break the agent; we have import checks to assist in checking this in CI, but it doesn\u0026rsquo;t cover all scenarios of this limitation, so never try to work around this limitation by something like using full-qualified-class-name (FQCN), i.e. takesArguments(full.qualified.ThirdPartyClass.class) and byName(full.qualified.ThirdPartyClass.class.getName()) will pass the CI check, but are still invalid in the agent codes. Therefore, Use Full Qualified Class Name String Literature Instead. Even if you are perfectly sure that the class to be intercepted exists in the target application (such as JDK classes), still, do not use *.class.getName() to get the class String name. We recommend you to use a literal string. This is to avoid ClassLoader issues. by*AnnotationMatch does not support inherited annotations. We do not recommend using byHierarchyMatch unless necessary. Using it may trigger the interception of many unexcepted methods, which would cause performance issues.  Example:\n@Override protected ClassMatch enhanceClassName() { return byName(\u0026#34;org.apache.catalina.core.StandardEngineValve\u0026#34;); } Define an instance method intercept point.  public InstanceMethodsInterceptPoint[] getInstanceMethodsInterceptPoints(); public interface InstanceMethodsInterceptPoint { /** * class instance methods matcher. * * @return methods matcher */ ElementMatcher\u0026lt;MethodDescription\u0026gt; getMethodsMatcher(); /** * @return represents a class name, the class instance must instanceof InstanceMethodsAroundInterceptor. */ String getMethodsInterceptor(); boolean isOverrideArgs(); } You may also use Matcher to set the target methods. Return true in isOverrideArgs, if you want to change the argument ref in interceptor. Please refer to bytebuddy for details of defining ElementMatcher.\nIn Skywalking, we provide 3 classes to facilitate ElementMatcher definition:\n AnnotationTypeNameMatch: Check on whether there is a certain annotation in the target method. ReturnTypeNameMatch: Check the return type name (package name + . + class name) of the target method. ArgumentTypeNameMatch: Check on the argument index and the type name (package name + . + class name) of the target method.  Attention:\n In case of using ReturnTypeNameMatch and ArgumentTypeNameMatch, use [Lxxx; (Java file format defined in JVM Specification) to define an Array type. For example, you should write [Ljava.lang.String; for java.lang.String[].  The following sections will tell you how to implement the interceptor.\nAdd plugin definition into the skywalking-plugin.def file.  tomcat-7.x/8.x=TomcatInstrumentation  Set up witnessClasses and/or witnessMethods if the instrumentation has to be activated in specific versions.\nExample:\n// The plugin is activated only when the foo.Bar class exists. @Override protected String[] witnessClasses() { return new String[] { \u0026#34;foo.Bar\u0026#34; }; } // The plugin is activated only when the foo.Bar#hello method exists. @Override protected List\u0026lt;WitnessMethod\u0026gt; witnessMethods() { List\u0026lt;WitnessMethod\u0026gt; witnessMethodList = new ArrayList\u0026lt;\u0026gt;(); WitnessMethod witnessMethod = new WitnessMethod(\u0026#34;foo.Bar\u0026#34;, ElementMatchers.named(\u0026#34;hello\u0026#34;)); witnessMethodList.add(witnessMethod); return witnessMethodList; } For more examples, see WitnessTest.java\n  Implement an interceptor As an interceptor for an instance method, it has to implement org.apache.skywalking.apm.agent.core.plugin.interceptor.enhance.InstanceMethodsAroundInterceptor\n/** * A interceptor, which intercept method\u0026#39;s invocation. The target methods will be defined in {@link * ClassEnhancePluginDefine}\u0026#39;s subclass, most likely in {@link ClassInstanceMethodsEnhancePluginDefine} */ public interface InstanceMethodsAroundInterceptor { /** * called before target method invocation. * * @param result change this result, if you want to truncate the method. * @throws Throwable */ void beforeMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, MethodInterceptResult result) throws Throwable; /** * called after target method invocation. Even method\u0026#39;s invocation triggers an exception. * * @param ret the method\u0026#39;s original return value. * @return the method\u0026#39;s actual return value. * @throws Throwable */ Object afterMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Object ret) throws Throwable; /** * called when occur exception. * * @param t the exception occur. */ void handleMethodException(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Throwable t); } Use the core APIs before and after calling the method, as well as during exception handling.\nV2 APIs The interceptor of V2 API uses MethodInvocationContext context to replace the MethodInterceptResult result in the beforeMethod, and be added as a new parameter in afterMethod and handleMethodException.\nMethodInvocationContext context is only shared in one time execution, and safe to use when face concurrency execution.\n/** * A v2 interceptor, which intercept method\u0026#39;s invocation. The target methods will be defined in {@link * ClassEnhancePluginDefineV2}\u0026#39;s subclass, most likely in {@link ClassInstanceMethodsEnhancePluginDefine} */ public interface InstanceMethodsAroundInterceptorV2 { /** * called before target method invocation. * * @param context the method invocation context including result context. */ void beforeMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, MethodInvocationContext context) throws Throwable; /** * called after target method invocation. Even method\u0026#39;s invocation triggers an exception. * * @param ret the method\u0026#39;s original return value. May be null if the method triggers an exception. * @return the method\u0026#39;s actual return value. */ Object afterMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Object ret, MethodInvocationContext context) throws Throwable; /** * called when occur exception. * * @param t the exception occur. */ void handleMethodException(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Throwable t, MethodInvocationContext context); } Bootstrap class instrumentation. SkyWalking has packaged the bootstrap instrumentation in the agent core. You can easily implement it by declaring it in the instrumentation definition.\nOverride the public boolean isBootstrapInstrumentation() and return true. Such as\npublic class URLInstrumentation extends ClassEnhancePluginDefine { private static String CLASS_NAME = \u0026#34;java.net.URL\u0026#34;; @Override protected ClassMatch enhanceClass() { return byName(CLASS_NAME); } @Override public ConstructorInterceptPoint[] getConstructorsInterceptPoints() { return new ConstructorInterceptPoint[] { new ConstructorInterceptPoint() { @Override public ElementMatcher\u0026lt;MethodDescription\u0026gt; getConstructorMatcher() { return any(); } @Override public String getConstructorInterceptor() { return \u0026#34;org.apache.skywalking.apm.plugin.jre.httpurlconnection.Interceptor2\u0026#34;; } } }; } @Override public InstanceMethodsInterceptPoint[] getInstanceMethodsInterceptPoints() { return new InstanceMethodsInterceptPoint[0]; } @Override public StaticMethodsInterceptPoint[] getStaticMethodsInterceptPoints() { return new StaticMethodsInterceptPoint[0]; } @Override public boolean isBootstrapInstrumentation() { return true; } } ClassEnhancePluginDefineV2 is provided in v2 APIs, #isBootstrapInstrumentation works too.\nNOTE: Bootstrap instrumentation should be used only where necessary. During its actual execution, it mostly affects the JRE core(rt.jar). Defining it other than where necessary could lead to unexpected results or side effects.\nProvide custom config for the plugin The config could provide different behaviours based on the configurations. The SkyWalking plugin mechanism provides the configuration injection and initialization system in the agent core.\nEvery plugin could declare one or more classes to represent the config by using @PluginConfig annotation. The agent core could initialize this class' static field through System environments, System properties, and agent.config static file.\nThe #root() method in the @PluginConfig annotation requires declaring the root class for the initialization process. Typically, SkyWalking prefers to use nested inner static classes for the hierarchy of the configuration. We recommend using Plugin/plugin-name/config-key as the nested classes structure of the config class.\nNOTE: because of the Java ClassLoader mechanism, the @PluginConfig annotation should be added on the real class used in the interceptor codes.\nIn the following example, @PluginConfig(root = SpringMVCPluginConfig.class) indicates that initialization should start with using SpringMVCPluginConfig as the root. Then, the config key of the attribute USE_QUALIFIED_NAME_AS_ENDPOINT_NAME should be plugin.springmvc.use_qualified_name_as_endpoint_name.\npublic class SpringMVCPluginConfig { public static class Plugin { // NOTE, if move this annotation on the `Plugin` or `SpringMVCPluginConfig` class, it no longer has any effect.  @PluginConfig(root = SpringMVCPluginConfig.class) public static class SpringMVC { /** * If true, the fully qualified method name will be used as the endpoint name instead of the request URL, * default is false. */ public static boolean USE_QUALIFIED_NAME_AS_ENDPOINT_NAME = false; /** * This config item controls that whether the SpringMVC plugin should collect the parameters of the * request. */ public static boolean COLLECT_HTTP_PARAMS = false; } @PluginConfig(root = SpringMVCPluginConfig.class) public static class Http { /** * When either {@link Plugin.SpringMVC#COLLECT_HTTP_PARAMS} is enabled, how many characters to keep and send * to the OAP backend, use negative values to keep and send the complete parameters, NB. this config item is * added for the sake of performance */ public static int HTTP_PARAMS_LENGTH_THRESHOLD = 1024; } } } Meter Plugin Java agent plugin could use meter APIs to collect metrics for backend analysis.\n Counter API represents a single monotonically increasing counter which automatically collects data and reports to the backend. import org.apache.skywalking.apm.agent.core.meter.MeterFactory; Counter counter = MeterFactory.counter(meterName).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).mode(Counter.Mode.INCREMENT).build(); counter.increment(1d);    MeterFactory.counter creates a new counter builder with the meter name. Counter.Builder.tag(String key, String value) marks a tag key/value pair. Counter.Builder.mode(Counter.Mode mode) changes the counter mode. RATE mode means the reporting rate to the backend. Counter.Builder.build() builds a new Counter which is collected and reported to the backend. Counter.increment(double count) increment counts to the Counter. It could be a positive value.   Gauge API represents a single numerical value.  import org.apache.skywalking.apm.agent.core.meter.MeterFactory; ThreadPoolExecutor threadPool = ...; Gauge gauge = MeterFactory.gauge(meterName, () -\u0026gt; threadPool.getActiveCount()).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).build();  MeterFactory.gauge(String name, Supplier\u0026lt;Double\u0026gt; getter) creates a new gauge builder with the meter name and supplier function. This function must return a double value. Gauge.Builder.tag(String key, String value) marks a tag key/value pair. Gauge.Builder.build() builds a new Gauge which is collected and reported to the backend.   Histogram API represents a summary sample observations with customized buckets.  import org.apache.skywalking.apm.agent.core.meter.MeterFactory; Histogram histogram = MeterFactory.histogram(\u0026#34;test\u0026#34;).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).steps(Arrays.asList(1, 5, 10)).minValue(0).build(); histogram.addValue(3);  MeterFactory.histogram(String name) creates a new histogram builder with the meter name. Histogram.Builder.tag(String key, String value) marks a tag key/value pair. Histogram.Builder.steps(List\u0026lt;Double\u0026gt; steps) sets up the max values of every histogram buckets. Histogram.Builder.minValue(double value) sets up the minimal value of this histogram. Default is 0. Histogram.Builder.build() builds a new Histogram which is collected and reported to the backend. Histogram.addValue(double value) adds value into the histogram, and automatically analyzes what bucket count needs to be incremented. Rule: count into [step1, step2).  Plugin Test Tool The Apache SkyWalking Agent Test Tool Suite is an incredibly useful test tool suite that is available in a wide variety of agent languages. It includes the mock collector and validator. The mock collector is a SkyWalking receiver, like the OAP server.\nYou could learn how to use this tool to test the plugin in this doc. This is a must if you want to contribute plugins to the SkyWalking official repo.\nContribute plugins to the Apache SkyWalking repository We welcome everyone to contribute their plugins.\nPlease follow these steps:\n Submit an issue for your plugin, including any supported versions. Create sub modules under apm-sniffer/apm-sdk-plugin or apm-sniffer/optional-plugins, and the name should include supported library name and versions. Follow this guide to develop. Make sure comments and test cases are provided. Develop and test. Provide the automatic test cases. Learn how to write the plugin test case from this doc Send a pull request and ask for review. The plugin committers will approve your plugins, plugin CI-with-IT, e2e, and the plugin tests will be passed. The plugin is accepted by SkyWalking.  ","title":"Plugin Development Guide","url":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/java-plugin-development-guide/"},{"content":"Plugin Development Guide This document describes how to understand, develop and contribute a plugin.\nThere are 2 kinds of plugin:\n Tracing plugin. Follow the distributed tracing concept to collect spans with tags and logs. Meter plugin. Collect numeric metrics in Counter, Gauge, and Histogram formats.  We also provide the plugin test tool to verify the data collected and reported by the plugin. If you plan to contribute any plugin to our main repo, the data would be verified by this tool too.\nTracing plugin Concepts Span The span is an important and recognized concept in the distributed tracing system. Learn about the span from the Google Dapper Paper and OpenTracing\nSkyWalking has supported OpenTracing and OpenTracing-Java API since 2017. Our concepts of the span are similar to that of the Google Dapper Paper and OpenTracing. We have also extended the span.\nThere are three types of span:\n1.1 EntrySpan The EntrySpan represents a service provider. It is also an endpoint on the server end. As an APM system, our target is the application servers. Therefore, almost all the services and MQ-consumers are EntrySpan.\n1.2 LocalSpan The LocalSpan represents a normal Java method that does not concern remote services. It is neither a MQ producer/consumer nor a service (e.g. HTTP service) provider/consumer.\n1.3 ExitSpan The ExitSpan represents a client of service or MQ-producer. It is named the LeafSpan in the early versions of SkyWalking. For example, accessing DB through JDBC and reading Redis/Memcached are classified as an ExitSpan.\nContextCarrier In order to implement distributed tracing, cross-process tracing has to be bound, and the context must propagate across the process. This is where the ContextCarrier comes in.\nHere are the steps on how to use the ContextCarrier in an A-\u0026gt;B distributed call.\n Create a new and empty ContextCarrier on the client end. Create an ExitSpan by ContextManager#createExitSpan or use ContextManager#inject to initalize the ContextCarrier. Place all items of ContextCarrier into heads (e.g. HTTP HEAD), attachments (e.g. Dubbo RPC framework) or messages (e.g. Kafka). The ContextCarrier propagates to the server end through the service call. On the server end, obtain all items from the heads, attachments or messages. Create an EntrySpan by ContextManager#createEntrySpan or use ContextManager#extract to bind the client and server ends.  See the following examples, where we use the Apache HTTPComponent client plugin and Tomcat 7 server plugin:\n Using the Apache HTTPComponent client plugin on the client end  span = ContextManager.createExitSpan(\u0026#34;/span/operation/name\u0026#34;, contextCarrier, \u0026#34;ip:port\u0026#34;); CarrierItem next = contextCarrier.items(); while (next.hasNext()) { next = next.next(); httpRequest.setHeader(next.getHeadKey(), next.getHeadValue()); } Using the Tomcat 7 server plugin on the server end  ContextCarrier contextCarrier = new ContextCarrier(); CarrierItem next = contextCarrier.items(); while (next.hasNext()) { next = next.next(); next.setHeadValue(request.getHeader(next.getHeadKey())); } span = ContextManager.createEntrySpan(“/span/operation/name”, contextCarrier); ContextSnapshot Besides cross-process tracing, cross-thread tracing has to be supported as well. For instance, both async process (in-memory MQ) and batch process are common in Java. Cross-process and cross-thread tracing are very similar in that they both require propagating context, except that cross-thread tracing does not require serialization.\nHere are the three steps on cross-thread propagation:\n Use ContextManager#capture to get the ContextSnapshot object. Let the sub-thread access the ContextSnapshot through method arguments or being carried by existing arguments Use ContextManager#continued in sub-thread.  Core APIs ContextManager ContextManager provides all major and primary APIs.\n Create EntrySpan  public static AbstractSpan createEntrySpan(String endpointName, ContextCarrier carrier) Create EntrySpan according to the operation name (e.g. service name, uri) and ContextCarrier.\nCreate LocalSpan  public static AbstractSpan createLocalSpan(String endpointName) Create LocalSpan according to the operation name (e.g. full method signature).\nCreate ExitSpan  public static AbstractSpan createExitSpan(String endpointName, ContextCarrier carrier, String remotePeer) Create ExitSpan according to the operation name (e.g. service name, uri) and the new ContextCarrier and peer address (e.g. ip+port, hostname+port).\nAbstractSpan /** * Set the component id, which defines in {@link ComponentsDefine} * * @param component * @return the span for chaining. */ AbstractSpan setComponent(Component component); AbstractSpan setLayer(SpanLayer layer); /** * Set a key:value tag on the Span. * * @return this Span instance, for chaining */ AbstractSpan tag(String key, String value); /** * Record an exception event of the current walltime timestamp. * * @param t any subclass of {@link Throwable}, which occurs in this span. * @return the Span, for chaining */ AbstractSpan log(Throwable t); AbstractSpan errorOccurred(); /** * Record an event at a specific timestamp. * * @param timestamp The explicit timestamp for the log record. * @param event the events * @return the Span, for chaining */ AbstractSpan log(long timestamp, Map\u0026lt;String, ?\u0026gt; event); /** * Sets the string name for the logical operation this span represents. * * @return this Span instance, for chaining */ AbstractSpan setOperationName(String endpointName); Besides setting the operation name, tags and logs, two attributes must be set, namely the component and layer. This is especially important for the EntrySpan and ExitSpan.\nSpanLayer is the type of span. There are 5 values:\n UNKNOWN (default) DB RPC_FRAMEWORK (designed for the RPC framework, rather than an ordinary HTTP call) HTTP MQ  Component IDs are defined and reserved by the SkyWalking project. For extension of the component name/ID, please follow the OAP server Component library settings document.\nSpecial Span Tags All tags are available in the trace view. Meanwhile, in the OAP backend analysis, some special tags or tag combinations provide other advanced features.\nTag key http.status_code The value should be an integer. The response code of OAL entities corresponds to this value.\nTag keys db.statement and db.type. The value of db.statement should be a string that represents the database statement, such as SQL, or [No statement]/+span#operationName if the value is empty. When the exit span contains this tag, OAP samples the slow statements based on agent-analyzer/default/maxSlowSQLLength. The threshold of slow statement is defined in accordance with agent-analyzer/default/slowDBAccessThreshold. Check Slow Database Statement document of OAP server for details.\nExtension logic endpoint: Tag key x-le The logic endpoint is a concept that doesn\u0026rsquo;t represent a real RPC call, but requires the statistic. The value of x-le should be in JSON format. There are two options:\n Define a separated logic endpoint. Provide its own endpoint name, latency and status. Suitable for entry and local span.  { \u0026#34;name\u0026#34;: \u0026#34;GraphQL-service\u0026#34;, \u0026#34;latency\u0026#34;: 100, \u0026#34;status\u0026#34;: true } Declare the current local span representing a logic endpoint.  { \u0026#34;logic-span\u0026#34;: true } Virtual Database Relative Tags SkyWalking analysis Database(SQL-like) performance metrics through the following tags.\npublic static final StringTag DB_TYPE = new StringTag(3, \u0026#34;db.type\u0026#34;); public static final StringTag DB_STATEMENT = new StringTag(5, \u0026#34;db.statement\u0026#34;);  db.type records database type, such as sql, cassandra, Elasticsearch. db.statementrecords the sql statement of the database access.  Read backend\u0026rsquo;s virtual database doc for more details.\nVirtual Cache Relative Tags SkyWalking analysis cache performance related metrics through the following tags.\npublic static final StringTag CACHE_TYPE = new StringTag(15, \u0026#34;cache.type\u0026#34;); public static final StringTag CACHE_CMD = new StringTag(17, \u0026#34;cache.cmd\u0026#34;); public static final StringTag CACHE_OP = new StringTag(16, \u0026#34;cache.op\u0026#34;); public static final StringTag CACHE_KEY = new StringTag(18, \u0026#34;cache.key\u0026#34;);  cache.type indicates the cache type , usually it\u0026rsquo;s official name of cache (e.g. Redis) cache.cmd indicates the cache command that would be sent to cache server (e.g. setnx) cache.op indicates the command is used for write or read operation , usually the value is converting from command cache.key indicates the cache key that would be sent to cache server , this tag maybe null , as string type key would be collected usually.  In order to decide which op should be converted to flexibly , It\u0026rsquo;s better that providing config property . Reference Jedis-4.x-plugin\nVirtual Message Queue (MQ) Relative Tags SkyWalking analysis MQ performance related metrics through the following tags.\npublic static final StringTag MQ_QUEUE = new StringTag(7, \u0026#34;mq.queue\u0026#34;); public static final StringTag MQ_TOPIC = new StringTag(9, \u0026#34;mq.topic\u0026#34;); public static final StringTag TRANSMISSION_LATENCY = new StringTag(15, \u0026#34;transmission.latency\u0026#34;, false);  mq.queue indicates MQ queue name mq.topic indicates MQ topic name , It\u0026rsquo;s optional as some MQ don\u0026rsquo;t hava concept of topic transmission.latency The transmission latency from consumer to producer. Usually you needn\u0026rsquo;t to record this tag manually, instead to call contextCarrier.extensionInjector().injectSendingTimestamp(); to record tag sendingTimestamp on producer side , and SkyWalking would record this tag on consumer side if sw8-x context carrier(from producer side) contains sendingTimestamp  Notice , you should set peer at both sides(producer and consumer). And the value of peer should represent the MQ server cluster.\nAdvanced APIs Async Span APIs There is a set of advanced APIs in Span which is specifically designed for async use cases. When tags, logs, and attributes (including end time) of the span need to be set in another thread, you should use these APIs.\n/** * The span finish at current tracing context, but the current span is still alive, until {@link #asyncFinish} * called. * * This method must be called\u0026lt;br/\u0026gt; * 1. In original thread(tracing context). * 2. Current span is active span. * * During alive, tags, logs and attributes of the span could be changed, in any thread. * * The execution times of {@link #prepareForAsync} and {@link #asyncFinish()} must match. * * @return the current span */ AbstractSpan prepareForAsync(); /** * Notify the span, it could be finished. * * The execution times of {@link #prepareForAsync} and {@link #asyncFinish()} must match. * * @return the current span */ AbstractSpan asyncFinish();  Call #prepareForAsync in the original context. Run ContextManager#stopSpan in the original context when your job in the current thread is complete. Propagate the span to any other thread. Once the above steps are all set, call #asyncFinish in any thread. When #prepareForAsync is complete for all spans, the tracing context will be finished and will report to the backend (based on the count of API execution).  Develop a plugin Abstract The basic method to trace is to intercept a Java method, by using byte code manipulation tech and AOP concept. SkyWalking has packaged the byte code manipulation tech and tracing context propagation, so you simply have to define the intercept point (a.k.a. aspect pointcut in Spring).\nIntercept SkyWalking provides two common definitions to intercept constructor, instance method and class method.\nv1 APIs  Extend ClassInstanceMethodsEnhancePluginDefine to define constructor intercept points and instance method intercept points. Extend ClassStaticMethodsEnhancePluginDefine to define class method intercept points.  Of course, you can extend ClassEnhancePluginDefine to set all intercept points, although it is uncommon to do so.\nv2 APIs v2 APIs provide an enhanced interceptor, which could propagate context through MIC(MethodInvocationContext).\n Extend ClassInstanceMethodsEnhancePluginDefineV2 to define constructor intercept points and instance method intercept points. Extend ClassStaticMethodsEnhancePluginDefineV2 to define class method intercept points.  Of course, you can extend ClassEnhancePluginDefineV2 to set all intercept points, although it is uncommon to do so.\nImplement plugin See the following demonstration on how to implement a plugin by extending ClassInstanceMethodsEnhancePluginDefine.\n Define the target class name.  protected abstract ClassMatch enhanceClass(); ClassMatch represents how to match the target classes. There are 4 ways:\n byName: Based on the full class names (package name + . + class name). byClassAnnotationMatch: Depends on whether there are certain annotations in the target classes. byMethodAnnotationMatch: Depends on whether there are certain annotations in the methods of the target classes. byHierarchyMatch: Based on the parent classes or interfaces of the target classes.  Attention:\n Never use ThirdPartyClass.class in the instrumentation definitions, such as takesArguments(ThirdPartyClass.class), or byName(ThirdPartyClass.class.getName()), because of the fact that ThirdPartyClass dose not necessarily exist in the target application and this will break the agent; we have import checks to assist in checking this in CI, but it doesn\u0026rsquo;t cover all scenarios of this limitation, so never try to work around this limitation by something like using full-qualified-class-name (FQCN), i.e. takesArguments(full.qualified.ThirdPartyClass.class) and byName(full.qualified.ThirdPartyClass.class.getName()) will pass the CI check, but are still invalid in the agent codes. Therefore, Use Full Qualified Class Name String Literature Instead. Even if you are perfectly sure that the class to be intercepted exists in the target application (such as JDK classes), still, do not use *.class.getName() to get the class String name. We recommend you to use a literal string. This is to avoid ClassLoader issues. by*AnnotationMatch does not support inherited annotations. We do not recommend using byHierarchyMatch unless necessary. Using it may trigger the interception of many unexcepted methods, which would cause performance issues.  Example:\n@Override protected ClassMatch enhanceClassName() { return byName(\u0026#34;org.apache.catalina.core.StandardEngineValve\u0026#34;); } Define an instance method intercept point.  public InstanceMethodsInterceptPoint[] getInstanceMethodsInterceptPoints(); public interface InstanceMethodsInterceptPoint { /** * class instance methods matcher. * * @return methods matcher */ ElementMatcher\u0026lt;MethodDescription\u0026gt; getMethodsMatcher(); /** * @return represents a class name, the class instance must instanceof InstanceMethodsAroundInterceptor. */ String getMethodsInterceptor(); boolean isOverrideArgs(); } You may also use Matcher to set the target methods. Return true in isOverrideArgs, if you want to change the argument ref in interceptor. Please refer to bytebuddy for details of defining ElementMatcher.\nIn Skywalking, we provide 3 classes to facilitate ElementMatcher definition:\n AnnotationTypeNameMatch: Check on whether there is a certain annotation in the target method. ReturnTypeNameMatch: Check the return type name (package name + . + class name) of the target method. ArgumentTypeNameMatch: Check on the argument index and the type name (package name + . + class name) of the target method.  Attention:\n In case of using ReturnTypeNameMatch and ArgumentTypeNameMatch, use [Lxxx; (Java file format defined in JVM Specification) to define an Array type. For example, you should write [Ljava.lang.String; for java.lang.String[].  The following sections will tell you how to implement the interceptor.\nAdd plugin definition into the skywalking-plugin.def file.  tomcat-7.x/8.x=TomcatInstrumentation  Set up witnessClasses and/or witnessMethods if the instrumentation has to be activated in specific versions.\nExample:\n// The plugin is activated only when the foo.Bar class exists. @Override protected String[] witnessClasses() { return new String[] { \u0026#34;foo.Bar\u0026#34; }; } // The plugin is activated only when the foo.Bar#hello method exists. @Override protected List\u0026lt;WitnessMethod\u0026gt; witnessMethods() { List\u0026lt;WitnessMethod\u0026gt; witnessMethodList = new ArrayList\u0026lt;\u0026gt;(); WitnessMethod witnessMethod = new WitnessMethod(\u0026#34;foo.Bar\u0026#34;, ElementMatchers.named(\u0026#34;hello\u0026#34;)); witnessMethodList.add(witnessMethod); return witnessMethodList; } For more examples, see WitnessTest.java\n  Implement an interceptor As an interceptor for an instance method, it has to implement org.apache.skywalking.apm.agent.core.plugin.interceptor.enhance.InstanceMethodsAroundInterceptor\n/** * A interceptor, which intercept method\u0026#39;s invocation. The target methods will be defined in {@link * ClassEnhancePluginDefine}\u0026#39;s subclass, most likely in {@link ClassInstanceMethodsEnhancePluginDefine} */ public interface InstanceMethodsAroundInterceptor { /** * called before target method invocation. * * @param result change this result, if you want to truncate the method. * @throws Throwable */ void beforeMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, MethodInterceptResult result) throws Throwable; /** * called after target method invocation. Even method\u0026#39;s invocation triggers an exception. * * @param ret the method\u0026#39;s original return value. * @return the method\u0026#39;s actual return value. * @throws Throwable */ Object afterMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Object ret) throws Throwable; /** * called when occur exception. * * @param t the exception occur. */ void handleMethodException(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Throwable t); } Use the core APIs before and after calling the method, as well as during exception handling.\nV2 APIs The interceptor of V2 API uses MethodInvocationContext context to replace the MethodInterceptResult result in the beforeMethod, and be added as a new parameter in afterMethod and handleMethodException.\nMethodInvocationContext context is only shared in one time execution, and safe to use when face concurrency execution.\n/** * A v2 interceptor, which intercept method\u0026#39;s invocation. The target methods will be defined in {@link * ClassEnhancePluginDefineV2}\u0026#39;s subclass, most likely in {@link ClassInstanceMethodsEnhancePluginDefine} */ public interface InstanceMethodsAroundInterceptorV2 { /** * called before target method invocation. * * @param context the method invocation context including result context. */ void beforeMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, MethodInvocationContext context) throws Throwable; /** * called after target method invocation. Even method\u0026#39;s invocation triggers an exception. * * @param ret the method\u0026#39;s original return value. May be null if the method triggers an exception. * @return the method\u0026#39;s actual return value. */ Object afterMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Object ret, MethodInvocationContext context) throws Throwable; /** * called when occur exception. * * @param t the exception occur. */ void handleMethodException(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Throwable t, MethodInvocationContext context); } Bootstrap class instrumentation. SkyWalking has packaged the bootstrap instrumentation in the agent core. You can easily implement it by declaring it in the instrumentation definition.\nOverride the public boolean isBootstrapInstrumentation() and return true. Such as\npublic class URLInstrumentation extends ClassEnhancePluginDefine { private static String CLASS_NAME = \u0026#34;java.net.URL\u0026#34;; @Override protected ClassMatch enhanceClass() { return byName(CLASS_NAME); } @Override public ConstructorInterceptPoint[] getConstructorsInterceptPoints() { return new ConstructorInterceptPoint[] { new ConstructorInterceptPoint() { @Override public ElementMatcher\u0026lt;MethodDescription\u0026gt; getConstructorMatcher() { return any(); } @Override public String getConstructorInterceptor() { return \u0026#34;org.apache.skywalking.apm.plugin.jre.httpurlconnection.Interceptor2\u0026#34;; } } }; } @Override public InstanceMethodsInterceptPoint[] getInstanceMethodsInterceptPoints() { return new InstanceMethodsInterceptPoint[0]; } @Override public StaticMethodsInterceptPoint[] getStaticMethodsInterceptPoints() { return new StaticMethodsInterceptPoint[0]; } @Override public boolean isBootstrapInstrumentation() { return true; } } ClassEnhancePluginDefineV2 is provided in v2 APIs, #isBootstrapInstrumentation works too.\nNOTE: Bootstrap instrumentation should be used only where necessary. During its actual execution, it mostly affects the JRE core(rt.jar). Defining it other than where necessary could lead to unexpected results or side effects.\nProvide custom config for the plugin The config could provide different behaviours based on the configurations. The SkyWalking plugin mechanism provides the configuration injection and initialization system in the agent core.\nEvery plugin could declare one or more classes to represent the config by using @PluginConfig annotation. The agent core could initialize this class' static field through System environments, System properties, and agent.config static file.\nThe #root() method in the @PluginConfig annotation requires declaring the root class for the initialization process. Typically, SkyWalking prefers to use nested inner static classes for the hierarchy of the configuration. We recommend using Plugin/plugin-name/config-key as the nested classes structure of the config class.\nNOTE: because of the Java ClassLoader mechanism, the @PluginConfig annotation should be added on the real class used in the interceptor codes.\nIn the following example, @PluginConfig(root = SpringMVCPluginConfig.class) indicates that initialization should start with using SpringMVCPluginConfig as the root. Then, the config key of the attribute USE_QUALIFIED_NAME_AS_ENDPOINT_NAME should be plugin.springmvc.use_qualified_name_as_endpoint_name.\npublic class SpringMVCPluginConfig { public static class Plugin { // NOTE, if move this annotation on the `Plugin` or `SpringMVCPluginConfig` class, it no longer has any effect.  @PluginConfig(root = SpringMVCPluginConfig.class) public static class SpringMVC { /** * If true, the fully qualified method name will be used as the endpoint name instead of the request URL, * default is false. */ public static boolean USE_QUALIFIED_NAME_AS_ENDPOINT_NAME = false; /** * This config item controls that whether the SpringMVC plugin should collect the parameters of the * request. */ public static boolean COLLECT_HTTP_PARAMS = false; } @PluginConfig(root = SpringMVCPluginConfig.class) public static class Http { /** * When either {@link Plugin.SpringMVC#COLLECT_HTTP_PARAMS} is enabled, how many characters to keep and send * to the OAP backend, use negative values to keep and send the complete parameters, NB. this config item is * added for the sake of performance */ public static int HTTP_PARAMS_LENGTH_THRESHOLD = 1024; } } } Meter Plugin Java agent plugin could use meter APIs to collect metrics for backend analysis.\n Counter API represents a single monotonically increasing counter which automatically collects data and reports to the backend. import org.apache.skywalking.apm.agent.core.meter.MeterFactory; Counter counter = MeterFactory.counter(meterName).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).mode(Counter.Mode.INCREMENT).build(); counter.increment(1d);    MeterFactory.counter creates a new counter builder with the meter name. Counter.Builder.tag(String key, String value) marks a tag key/value pair. Counter.Builder.mode(Counter.Mode mode) changes the counter mode. RATE mode means the reporting rate to the backend. Counter.Builder.build() builds a new Counter which is collected and reported to the backend. Counter.increment(double count) increment counts to the Counter. It could be a positive value.   Gauge API represents a single numerical value.  import org.apache.skywalking.apm.agent.core.meter.MeterFactory; ThreadPoolExecutor threadPool = ...; Gauge gauge = MeterFactory.gauge(meterName, () -\u0026gt; threadPool.getActiveCount()).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).build();  MeterFactory.gauge(String name, Supplier\u0026lt;Double\u0026gt; getter) creates a new gauge builder with the meter name and supplier function. This function must return a double value. Gauge.Builder.tag(String key, String value) marks a tag key/value pair. Gauge.Builder.build() builds a new Gauge which is collected and reported to the backend.   Histogram API represents a summary sample observations with customized buckets.  import org.apache.skywalking.apm.agent.core.meter.MeterFactory; Histogram histogram = MeterFactory.histogram(\u0026#34;test\u0026#34;).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).steps(Arrays.asList(1, 5, 10)).minValue(0).build(); histogram.addValue(3);  MeterFactory.histogram(String name) creates a new histogram builder with the meter name. Histogram.Builder.tag(String key, String value) marks a tag key/value pair. Histogram.Builder.steps(List\u0026lt;Double\u0026gt; steps) sets up the max values of every histogram buckets. Histogram.Builder.minValue(double value) sets up the minimal value of this histogram. Default is 0. Histogram.Builder.build() builds a new Histogram which is collected and reported to the backend. Histogram.addValue(double value) adds value into the histogram, and automatically analyzes what bucket count needs to be incremented. Rule: count into [step1, step2).  Plugin Test Tool The Apache SkyWalking Agent Test Tool Suite is an incredibly useful test tool suite that is available in a wide variety of agent languages. It includes the mock collector and validator. The mock collector is a SkyWalking receiver, like the OAP server.\nYou could learn how to use this tool to test the plugin in this doc. This is a must if you want to contribute plugins to the SkyWalking official repo.\nContribute plugins to the Apache SkyWalking repository We welcome everyone to contribute their plugins.\nPlease follow these steps:\n Submit an issue for your plugin, including any supported versions. Create sub modules under apm-sniffer/apm-sdk-plugin or apm-sniffer/optional-plugins, and the name should include supported library name and versions. Follow this guide to develop. Make sure comments and test cases are provided. Develop and test. Provide the automatic test cases. Learn how to write the plugin test case from this doc Send a pull request and ask for review. The plugin committers will approve your plugins, plugin CI-with-IT, e2e, and the plugin tests will be passed. The plugin is accepted by SkyWalking.  ","title":"Plugin Development Guide","url":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/java-plugin-development-guide/"},{"content":"Plugin Development Guide This document describes how to understand, develop and contribute a plugin.\nThere are 2 kinds of plugin:\n Tracing plugin. Follow the distributed tracing concept to collect spans with tags and logs. Meter plugin. Collect numeric metrics in Counter, Gauge, and Histogram formats.  We also provide the plugin test tool to verify the data collected and reported by the plugin. If you plan to contribute any plugin to our main repo, the data would be verified by this tool too.\nTracing plugin Concepts Span The span is an important and recognized concept in the distributed tracing system. Learn about the span from the Google Dapper Paper and OpenTracing\nSkyWalking has supported OpenTracing and OpenTracing-Java API since 2017. Our concepts of the span are similar to that of the Google Dapper Paper and OpenTracing. We have also extended the span.\nThere are three types of span:\n1.1 EntrySpan The EntrySpan represents a service provider. It is also an endpoint on the server end. As an APM system, our target is the application servers. Therefore, almost all the services and MQ-consumers are EntrySpan.\n1.2 LocalSpan The LocalSpan represents a normal Java method that does not concern remote services. It is neither a MQ producer/consumer nor a service (e.g. HTTP service) provider/consumer.\n1.3 ExitSpan The ExitSpan represents a client of service or MQ-producer. It is named the LeafSpan in the early versions of SkyWalking. For example, accessing DB through JDBC and reading Redis/Memcached are classified as an ExitSpan.\nContextCarrier In order to implement distributed tracing, cross-process tracing has to be bound, and the context must propagate across the process. This is where the ContextCarrier comes in.\nHere are the steps on how to use the ContextCarrier in an A-\u0026gt;B distributed call.\n Create a new and empty ContextCarrier on the client end. Create an ExitSpan by ContextManager#createExitSpan or use ContextManager#inject to initalize the ContextCarrier. Place all items of ContextCarrier into heads (e.g. HTTP HEAD), attachments (e.g. Dubbo RPC framework) or messages (e.g. Kafka). The ContextCarrier propagates to the server end through the service call. On the server end, obtain all items from the heads, attachments or messages. Create an EntrySpan by ContextManager#createEntrySpan or use ContextManager#extract to bind the client and server ends.  See the following examples, where we use the Apache HTTPComponent client plugin and Tomcat 7 server plugin:\n Using the Apache HTTPComponent client plugin on the client end  span = ContextManager.createExitSpan(\u0026#34;/span/operation/name\u0026#34;, contextCarrier, \u0026#34;ip:port\u0026#34;); CarrierItem next = contextCarrier.items(); while (next.hasNext()) { next = next.next(); httpRequest.setHeader(next.getHeadKey(), next.getHeadValue()); } Using the Tomcat 7 server plugin on the server end  ContextCarrier contextCarrier = new ContextCarrier(); CarrierItem next = contextCarrier.items(); while (next.hasNext()) { next = next.next(); next.setHeadValue(request.getHeader(next.getHeadKey())); } span = ContextManager.createEntrySpan(“/span/operation/name”, contextCarrier); ContextSnapshot Besides cross-process tracing, cross-thread tracing has to be supported as well. For instance, both async process (in-memory MQ) and batch process are common in Java. Cross-process and cross-thread tracing are very similar in that they both require propagating context, except that cross-thread tracing does not require serialization.\nHere are the three steps on cross-thread propagation:\n Use ContextManager#capture to get the ContextSnapshot object. Let the sub-thread access the ContextSnapshot through method arguments or being carried by existing arguments Use ContextManager#continued in sub-thread.  Core APIs ContextManager ContextManager provides all major and primary APIs.\n Create EntrySpan  public static AbstractSpan createEntrySpan(String endpointName, ContextCarrier carrier) Create EntrySpan according to the operation name (e.g. service name, uri) and ContextCarrier.\nCreate LocalSpan  public static AbstractSpan createLocalSpan(String endpointName) Create LocalSpan according to the operation name (e.g. full method signature).\nCreate ExitSpan  public static AbstractSpan createExitSpan(String endpointName, ContextCarrier carrier, String remotePeer) Create ExitSpan according to the operation name (e.g. service name, uri) and the new ContextCarrier and peer address (e.g. ip+port, hostname+port).\nAbstractSpan /** * Set the component id, which defines in {@link ComponentsDefine} * * @param component * @return the span for chaining. */ AbstractSpan setComponent(Component component); AbstractSpan setLayer(SpanLayer layer); /** * Set a key:value tag on the Span. * * @return this Span instance, for chaining */ AbstractSpan tag(String key, String value); /** * Record an exception event of the current walltime timestamp. * * @param t any subclass of {@link Throwable}, which occurs in this span. * @return the Span, for chaining */ AbstractSpan log(Throwable t); AbstractSpan errorOccurred(); /** * Record an event at a specific timestamp. * * @param timestamp The explicit timestamp for the log record. * @param event the events * @return the Span, for chaining */ AbstractSpan log(long timestamp, Map\u0026lt;String, ?\u0026gt; event); /** * Sets the string name for the logical operation this span represents. * * @return this Span instance, for chaining */ AbstractSpan setOperationName(String endpointName); Besides setting the operation name, tags and logs, two attributes must be set, namely the component and layer. This is especially important for the EntrySpan and ExitSpan.\nSpanLayer is the type of span. There are 5 values:\n UNKNOWN (default) DB RPC_FRAMEWORK (designed for the RPC framework, rather than an ordinary HTTP call) HTTP MQ  Component IDs are defined and reserved by the SkyWalking project. For extension of the component name/ID, please follow the OAP server Component library settings document.\nSpecial Span Tags All tags are available in the trace view. Meanwhile, in the OAP backend analysis, some special tags or tag combinations provide other advanced features.\nTag key http.status_code The value should be an integer. The response code of OAL entities corresponds to this value.\nTag keys db.statement and db.type. The value of db.statement should be a string that represents the database statement, such as SQL, or [No statement]/+span#operationName if the value is empty. When the exit span contains this tag, OAP samples the slow statements based on agent-analyzer/default/maxSlowSQLLength. The threshold of slow statement is defined in accordance with agent-analyzer/default/slowDBAccessThreshold. Check Slow Database Statement document of OAP server for details.\nExtension logic endpoint: Tag key x-le The logic endpoint is a concept that doesn\u0026rsquo;t represent a real RPC call, but requires the statistic. The value of x-le should be in JSON format. There are two options:\n Define a separated logic endpoint. Provide its own endpoint name, latency and status. Suitable for entry and local span.  { \u0026#34;name\u0026#34;: \u0026#34;GraphQL-service\u0026#34;, \u0026#34;latency\u0026#34;: 100, \u0026#34;status\u0026#34;: true } Declare the current local span representing a logic endpoint.  { \u0026#34;logic-span\u0026#34;: true } Virtual Database Relative Tags SkyWalking analysis Database(SQL-like) performance metrics through the following tags.\npublic static final StringTag DB_TYPE = new StringTag(3, \u0026#34;db.type\u0026#34;); public static final StringTag DB_STATEMENT = new StringTag(5, \u0026#34;db.statement\u0026#34;);  db.type records database type, such as sql, cassandra, Elasticsearch. db.statementrecords the sql statement of the database access.  Read backend\u0026rsquo;s virtual database doc for more details.\nVirtual Cache Relative Tags SkyWalking analysis cache performance related metrics through the following tags.\npublic static final StringTag CACHE_TYPE = new StringTag(15, \u0026#34;cache.type\u0026#34;); public static final StringTag CACHE_CMD = new StringTag(17, \u0026#34;cache.cmd\u0026#34;); public static final StringTag CACHE_OP = new StringTag(16, \u0026#34;cache.op\u0026#34;); public static final StringTag CACHE_KEY = new StringTag(18, \u0026#34;cache.key\u0026#34;);  cache.type indicates the cache type , usually it\u0026rsquo;s official name of cache (e.g. Redis) cache.cmd indicates the cache command that would be sent to cache server (e.g. setnx) cache.op indicates the command is used for write or read operation , usually the value is converting from command cache.key indicates the cache key that would be sent to cache server , this tag maybe null , as string type key would be collected usually.  In order to decide which op should be converted to flexibly , It\u0026rsquo;s better that providing config property . Reference Jedis-4.x-plugin\nVirtual Message Queue (MQ) Relative Tags SkyWalking analysis MQ performance related metrics through the following tags.\npublic static final StringTag MQ_QUEUE = new StringTag(7, \u0026#34;mq.queue\u0026#34;); public static final StringTag MQ_TOPIC = new StringTag(9, \u0026#34;mq.topic\u0026#34;); public static final StringTag TRANSMISSION_LATENCY = new StringTag(15, \u0026#34;transmission.latency\u0026#34;, false);  mq.queue indicates MQ queue name mq.topic indicates MQ topic name , It\u0026rsquo;s optional as some MQ don\u0026rsquo;t hava concept of topic transmission.latency The transmission latency from consumer to producer. Usually you needn\u0026rsquo;t to record this tag manually, instead to call contextCarrier.extensionInjector().injectSendingTimestamp(); to record tag sendingTimestamp on producer side , and SkyWalking would record this tag on consumer side if sw8-x context carrier(from producer side) contains sendingTimestamp  Notice , you should set peer at both sides(producer and consumer). And the value of peer should represent the MQ server cluster.\nAdvanced APIs Async Span APIs There is a set of advanced APIs in Span which is specifically designed for async use cases. When tags, logs, and attributes (including end time) of the span need to be set in another thread, you should use these APIs.\n/** * The span finish at current tracing context, but the current span is still alive, until {@link #asyncFinish} * called. * * This method must be called\u0026lt;br/\u0026gt; * 1. In original thread(tracing context). * 2. Current span is active span. * * During alive, tags, logs and attributes of the span could be changed, in any thread. * * The execution times of {@link #prepareForAsync} and {@link #asyncFinish()} must match. * * @return the current span */ AbstractSpan prepareForAsync(); /** * Notify the span, it could be finished. * * The execution times of {@link #prepareForAsync} and {@link #asyncFinish()} must match. * * @return the current span */ AbstractSpan asyncFinish();  Call #prepareForAsync in the original context. Run ContextManager#stopSpan in the original context when your job in the current thread is complete. Propagate the span to any other thread. Once the above steps are all set, call #asyncFinish in any thread. When #prepareForAsync is complete for all spans, the tracing context will be finished and will report to the backend (based on the count of API execution).  Develop a plugin Abstract The basic method to trace is to intercept a Java method, by using byte code manipulation tech and AOP concept. SkyWalking has packaged the byte code manipulation tech and tracing context propagation, so you simply have to define the intercept point (a.k.a. aspect pointcut in Spring).\nIntercept SkyWalking provides two common definitions to intercept constructor, instance method and class method.\nv1 APIs  Extend ClassInstanceMethodsEnhancePluginDefine to define constructor intercept points and instance method intercept points. Extend ClassStaticMethodsEnhancePluginDefine to define class method intercept points.  Of course, you can extend ClassEnhancePluginDefine to set all intercept points, although it is uncommon to do so.\nv2 APIs v2 APIs provide an enhanced interceptor, which could propagate context through MIC(MethodInvocationContext).\n Extend ClassInstanceMethodsEnhancePluginDefineV2 to define constructor intercept points and instance method intercept points. Extend ClassStaticMethodsEnhancePluginDefineV2 to define class method intercept points.  Of course, you can extend ClassEnhancePluginDefineV2 to set all intercept points, although it is uncommon to do so.\nImplement plugin See the following demonstration on how to implement a plugin by extending ClassInstanceMethodsEnhancePluginDefine.\n Define the target class name.  protected abstract ClassMatch enhanceClass(); ClassMatch represents how to match the target classes. There are 4 ways:\n byName: Based on the full class names (package name + . + class name). byClassAnnotationMatch: Depends on whether there are certain annotations in the target classes. byMethodAnnotationMatch: Depends on whether there are certain annotations in the methods of the target classes. byHierarchyMatch: Based on the parent classes or interfaces of the target classes.  Attention:\n Never use ThirdPartyClass.class in the instrumentation definitions, such as takesArguments(ThirdPartyClass.class), or byName(ThirdPartyClass.class.getName()), because of the fact that ThirdPartyClass dose not necessarily exist in the target application and this will break the agent; we have import checks to assist in checking this in CI, but it doesn\u0026rsquo;t cover all scenarios of this limitation, so never try to work around this limitation by something like using full-qualified-class-name (FQCN), i.e. takesArguments(full.qualified.ThirdPartyClass.class) and byName(full.qualified.ThirdPartyClass.class.getName()) will pass the CI check, but are still invalid in the agent codes. Therefore, Use Full Qualified Class Name String Literature Instead. Even if you are perfectly sure that the class to be intercepted exists in the target application (such as JDK classes), still, do not use *.class.getName() to get the class String name. We recommend you to use a literal string. This is to avoid ClassLoader issues. by*AnnotationMatch does not support inherited annotations. We do not recommend using byHierarchyMatch unless necessary. Using it may trigger the interception of many unexcepted methods, which would cause performance issues.  Example:\n@Override protected ClassMatch enhanceClassName() { return byName(\u0026#34;org.apache.catalina.core.StandardEngineValve\u0026#34;); } Define an instance method intercept point.  public InstanceMethodsInterceptPoint[] getInstanceMethodsInterceptPoints(); public interface InstanceMethodsInterceptPoint { /** * class instance methods matcher. * * @return methods matcher */ ElementMatcher\u0026lt;MethodDescription\u0026gt; getMethodsMatcher(); /** * @return represents a class name, the class instance must instanceof InstanceMethodsAroundInterceptor. */ String getMethodsInterceptor(); boolean isOverrideArgs(); } You may also use Matcher to set the target methods. Return true in isOverrideArgs, if you want to change the argument ref in interceptor. Please refer to bytebuddy for details of defining ElementMatcher.\nIn Skywalking, we provide 3 classes to facilitate ElementMatcher definition:\n AnnotationTypeNameMatch: Check on whether there is a certain annotation in the target method. ReturnTypeNameMatch: Check the return type name (package name + . + class name) of the target method. ArgumentTypeNameMatch: Check on the argument index and the type name (package name + . + class name) of the target method.  Attention:\n In case of using ReturnTypeNameMatch and ArgumentTypeNameMatch, use [Lxxx; (Java file format defined in JVM Specification) to define an Array type. For example, you should write [Ljava.lang.String; for java.lang.String[].  The following sections will tell you how to implement the interceptor.\nAdd plugin definition into the skywalking-plugin.def file.  tomcat-7.x/8.x=TomcatInstrumentation  Set up witnessClasses and/or witnessMethods if the instrumentation has to be activated in specific versions.\nExample:\n// The plugin is activated only when the foo.Bar class exists. @Override protected String[] witnessClasses() { return new String[] { \u0026#34;foo.Bar\u0026#34; }; } // The plugin is activated only when the foo.Bar#hello method exists. @Override protected List\u0026lt;WitnessMethod\u0026gt; witnessMethods() { List\u0026lt;WitnessMethod\u0026gt; witnessMethodList = new ArrayList\u0026lt;\u0026gt;(); WitnessMethod witnessMethod = new WitnessMethod(\u0026#34;foo.Bar\u0026#34;, ElementMatchers.named(\u0026#34;hello\u0026#34;)); witnessMethodList.add(witnessMethod); return witnessMethodList; } For more examples, see WitnessTest.java\n  Implement an interceptor As an interceptor for an instance method, it has to implement org.apache.skywalking.apm.agent.core.plugin.interceptor.enhance.InstanceMethodsAroundInterceptor\n/** * A interceptor, which intercept method\u0026#39;s invocation. The target methods will be defined in {@link * ClassEnhancePluginDefine}\u0026#39;s subclass, most likely in {@link ClassInstanceMethodsEnhancePluginDefine} */ public interface InstanceMethodsAroundInterceptor { /** * called before target method invocation. * * @param result change this result, if you want to truncate the method. * @throws Throwable */ void beforeMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, MethodInterceptResult result) throws Throwable; /** * called after target method invocation. Even method\u0026#39;s invocation triggers an exception. * * @param ret the method\u0026#39;s original return value. * @return the method\u0026#39;s actual return value. * @throws Throwable */ Object afterMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Object ret) throws Throwable; /** * called when occur exception. * * @param t the exception occur. */ void handleMethodException(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Throwable t); } Use the core APIs before and after calling the method, as well as during exception handling.\nV2 APIs The interceptor of V2 API uses MethodInvocationContext context to replace the MethodInterceptResult result in the beforeMethod, and be added as a new parameter in afterMethod and handleMethodException.\nMethodInvocationContext context is only shared in one time execution, and safe to use when face concurrency execution.\n/** * A v2 interceptor, which intercept method\u0026#39;s invocation. The target methods will be defined in {@link * ClassEnhancePluginDefineV2}\u0026#39;s subclass, most likely in {@link ClassInstanceMethodsEnhancePluginDefine} */ public interface InstanceMethodsAroundInterceptorV2 { /** * called before target method invocation. * * @param context the method invocation context including result context. */ void beforeMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, MethodInvocationContext context) throws Throwable; /** * called after target method invocation. Even method\u0026#39;s invocation triggers an exception. * * @param ret the method\u0026#39;s original return value. May be null if the method triggers an exception. * @return the method\u0026#39;s actual return value. */ Object afterMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Object ret, MethodInvocationContext context) throws Throwable; /** * called when occur exception. * * @param t the exception occur. */ void handleMethodException(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Throwable t, MethodInvocationContext context); } Bootstrap class instrumentation. SkyWalking has packaged the bootstrap instrumentation in the agent core. You can easily implement it by declaring it in the instrumentation definition.\nOverride the public boolean isBootstrapInstrumentation() and return true. Such as\npublic class URLInstrumentation extends ClassEnhancePluginDefine { private static String CLASS_NAME = \u0026#34;java.net.URL\u0026#34;; @Override protected ClassMatch enhanceClass() { return byName(CLASS_NAME); } @Override public ConstructorInterceptPoint[] getConstructorsInterceptPoints() { return new ConstructorInterceptPoint[] { new ConstructorInterceptPoint() { @Override public ElementMatcher\u0026lt;MethodDescription\u0026gt; getConstructorMatcher() { return any(); } @Override public String getConstructorInterceptor() { return \u0026#34;org.apache.skywalking.apm.plugin.jre.httpurlconnection.Interceptor2\u0026#34;; } } }; } @Override public InstanceMethodsInterceptPoint[] getInstanceMethodsInterceptPoints() { return new InstanceMethodsInterceptPoint[0]; } @Override public StaticMethodsInterceptPoint[] getStaticMethodsInterceptPoints() { return new StaticMethodsInterceptPoint[0]; } @Override public boolean isBootstrapInstrumentation() { return true; } } ClassEnhancePluginDefineV2 is provided in v2 APIs, #isBootstrapInstrumentation works too.\nNOTE: Bootstrap instrumentation should be used only where necessary. During its actual execution, it mostly affects the JRE core(rt.jar). Defining it other than where necessary could lead to unexpected results or side effects.\nProvide custom config for the plugin The config could provide different behaviours based on the configurations. The SkyWalking plugin mechanism provides the configuration injection and initialization system in the agent core.\nEvery plugin could declare one or more classes to represent the config by using @PluginConfig annotation. The agent core could initialize this class' static field through System environments, System properties, and agent.config static file.\nThe #root() method in the @PluginConfig annotation requires declaring the root class for the initialization process. Typically, SkyWalking prefers to use nested inner static classes for the hierarchy of the configuration. We recommend using Plugin/plugin-name/config-key as the nested classes structure of the config class.\nNOTE: because of the Java ClassLoader mechanism, the @PluginConfig annotation should be added on the real class used in the interceptor codes.\nIn the following example, @PluginConfig(root = SpringMVCPluginConfig.class) indicates that initialization should start with using SpringMVCPluginConfig as the root. Then, the config key of the attribute USE_QUALIFIED_NAME_AS_ENDPOINT_NAME should be plugin.springmvc.use_qualified_name_as_endpoint_name.\npublic class SpringMVCPluginConfig { public static class Plugin { // NOTE, if move this annotation on the `Plugin` or `SpringMVCPluginConfig` class, it no longer has any effect.  @PluginConfig(root = SpringMVCPluginConfig.class) public static class SpringMVC { /** * If true, the fully qualified method name will be used as the endpoint name instead of the request URL, * default is false. */ public static boolean USE_QUALIFIED_NAME_AS_ENDPOINT_NAME = false; /** * This config item controls that whether the SpringMVC plugin should collect the parameters of the * request. */ public static boolean COLLECT_HTTP_PARAMS = false; } @PluginConfig(root = SpringMVCPluginConfig.class) public static class Http { /** * When either {@link Plugin.SpringMVC#COLLECT_HTTP_PARAMS} is enabled, how many characters to keep and send * to the OAP backend, use negative values to keep and send the complete parameters, NB. this config item is * added for the sake of performance */ public static int HTTP_PARAMS_LENGTH_THRESHOLD = 1024; } } } Meter Plugin Java agent plugin could use meter APIs to collect metrics for backend analysis.\n Counter API represents a single monotonically increasing counter which automatically collects data and reports to the backend. import org.apache.skywalking.apm.agent.core.meter.MeterFactory; Counter counter = MeterFactory.counter(meterName).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).mode(Counter.Mode.INCREMENT).build(); counter.increment(1d);    MeterFactory.counter creates a new counter builder with the meter name. Counter.Builder.tag(String key, String value) marks a tag key/value pair. Counter.Builder.mode(Counter.Mode mode) changes the counter mode. RATE mode means the reporting rate to the backend. Counter.Builder.build() builds a new Counter which is collected and reported to the backend. Counter.increment(double count) increment counts to the Counter. It could be a positive value.   Gauge API represents a single numerical value.  import org.apache.skywalking.apm.agent.core.meter.MeterFactory; ThreadPoolExecutor threadPool = ...; Gauge gauge = MeterFactory.gauge(meterName, () -\u0026gt; threadPool.getActiveCount()).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).build();  MeterFactory.gauge(String name, Supplier\u0026lt;Double\u0026gt; getter) creates a new gauge builder with the meter name and supplier function. This function must return a double value. Gauge.Builder.tag(String key, String value) marks a tag key/value pair. Gauge.Builder.build() builds a new Gauge which is collected and reported to the backend.   Histogram API represents a summary sample observations with customized buckets.  import org.apache.skywalking.apm.agent.core.meter.MeterFactory; Histogram histogram = MeterFactory.histogram(\u0026#34;test\u0026#34;).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).steps(Arrays.asList(1, 5, 10)).minValue(0).build(); histogram.addValue(3);  MeterFactory.histogram(String name) creates a new histogram builder with the meter name. Histogram.Builder.tag(String key, String value) marks a tag key/value pair. Histogram.Builder.steps(List\u0026lt;Double\u0026gt; steps) sets up the max values of every histogram buckets. Histogram.Builder.minValue(double value) sets up the minimal value of this histogram. Default is 0. Histogram.Builder.build() builds a new Histogram which is collected and reported to the backend. Histogram.addValue(double value) adds value into the histogram, and automatically analyzes what bucket count needs to be incremented. Rule: count into [step1, step2).  Plugin Test Tool The Apache SkyWalking Agent Test Tool Suite is an incredibly useful test tool suite that is available in a wide variety of agent languages. It includes the mock collector and validator. The mock collector is a SkyWalking receiver, like the OAP server.\nYou could learn how to use this tool to test the plugin in this doc. This is a must if you want to contribute plugins to the SkyWalking official repo.\nContribute plugins to the Apache SkyWalking repository We welcome everyone to contribute their plugins.\nPlease follow these steps:\n Submit an issue for your plugin, including any supported versions. Create sub modules under apm-sniffer/apm-sdk-plugin or apm-sniffer/optional-plugins, and the name should include supported library name and versions. Follow this guide to develop. Make sure comments and test cases are provided. Develop and test. Provide the automatic test cases. Learn how to write the plugin test case from this doc Send a pull request and ask for review. The plugin committers will approve your plugins, plugin CI-with-IT, e2e, and the plugin tests will be passed. The plugin is accepted by SkyWalking.  ","title":"Plugin Development Guide","url":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/java-plugin-development-guide/"},{"content":"Plugin Development Guide This document describes how to understand, develop and contribute a plugin.\nThere are 2 kinds of plugin:\n Tracing plugin. Follow the distributed tracing concept to collect spans with tags and logs. Meter plugin. Collect numeric metrics in Counter, Gauge, and Histogram formats.  We also provide the plugin test tool to verify the data collected and reported by the plugin. If you plan to contribute any plugin to our main repo, the data would be verified by this tool too.\nTracing plugin Concepts Span The span is an important and recognized concept in the distributed tracing system. Learn about the span from the Google Dapper Paper and OpenTracing\nSkyWalking has supported OpenTracing and OpenTracing-Java API since 2017. Our concepts of the span are similar to that of the Google Dapper Paper and OpenTracing. We have also extended the span.\nThere are three types of span:\n1.1 EntrySpan The EntrySpan represents a service provider. It is also an endpoint on the server end. As an APM system, our target is the application servers. Therefore, almost all the services and MQ-consumers are EntrySpan.\n1.2 LocalSpan The LocalSpan represents a normal Java method that does not concern remote services. It is neither a MQ producer/consumer nor a service (e.g. HTTP service) provider/consumer.\n1.3 ExitSpan The ExitSpan represents a client of service or MQ-producer. It is named the LeafSpan in the early versions of SkyWalking. For example, accessing DB through JDBC and reading Redis/Memcached are classified as an ExitSpan.\nContextCarrier In order to implement distributed tracing, cross-process tracing has to be bound, and the context must propagate across the process. This is where the ContextCarrier comes in.\nHere are the steps on how to use the ContextCarrier in an A-\u0026gt;B distributed call.\n Create a new and empty ContextCarrier on the client end. Create an ExitSpan by ContextManager#createExitSpan or use ContextManager#inject to initalize the ContextCarrier. Place all items of ContextCarrier into heads (e.g. HTTP HEAD), attachments (e.g. Dubbo RPC framework) or messages (e.g. Kafka). The ContextCarrier propagates to the server end through the service call. On the server end, obtain all items from the heads, attachments or messages. Create an EntrySpan by ContextManager#createEntrySpan or use ContextManager#extract to bind the client and server ends.  See the following examples, where we use the Apache HTTPComponent client plugin and Tomcat 7 server plugin:\n Using the Apache HTTPComponent client plugin on the client end  span = ContextManager.createExitSpan(\u0026#34;/span/operation/name\u0026#34;, contextCarrier, \u0026#34;ip:port\u0026#34;); CarrierItem next = contextCarrier.items(); while (next.hasNext()) { next = next.next(); httpRequest.setHeader(next.getHeadKey(), next.getHeadValue()); } Using the Tomcat 7 server plugin on the server end  ContextCarrier contextCarrier = new ContextCarrier(); CarrierItem next = contextCarrier.items(); while (next.hasNext()) { next = next.next(); next.setHeadValue(request.getHeader(next.getHeadKey())); } span = ContextManager.createEntrySpan(“/span/operation/name”, contextCarrier); ContextSnapshot Besides cross-process tracing, cross-thread tracing has to be supported as well. For instance, both async process (in-memory MQ) and batch process are common in Java. Cross-process and cross-thread tracing are very similar in that they both require propagating context, except that cross-thread tracing does not require serialization.\nHere are the three steps on cross-thread propagation:\n Use ContextManager#capture to get the ContextSnapshot object. Let the sub-thread access the ContextSnapshot through method arguments or being carried by existing arguments Use ContextManager#continued in sub-thread.  Core APIs ContextManager ContextManager provides all major and primary APIs.\n Create EntrySpan  public static AbstractSpan createEntrySpan(String endpointName, ContextCarrier carrier) Create EntrySpan according to the operation name (e.g. service name, uri) and ContextCarrier.\nCreate LocalSpan  public static AbstractSpan createLocalSpan(String endpointName) Create LocalSpan according to the operation name (e.g. full method signature).\nCreate ExitSpan  public static AbstractSpan createExitSpan(String endpointName, ContextCarrier carrier, String remotePeer) Create ExitSpan according to the operation name (e.g. service name, uri) and the new ContextCarrier and peer address (e.g. ip+port, hostname+port).\nAbstractSpan /** * Set the component id, which defines in {@link ComponentsDefine} * * @param component * @return the span for chaining. */ AbstractSpan setComponent(Component component); AbstractSpan setLayer(SpanLayer layer); /** * Set a key:value tag on the Span. * * @return this Span instance, for chaining */ AbstractSpan tag(String key, String value); /** * Record an exception event of the current walltime timestamp. * * @param t any subclass of {@link Throwable}, which occurs in this span. * @return the Span, for chaining */ AbstractSpan log(Throwable t); AbstractSpan errorOccurred(); /** * Record an event at a specific timestamp. * * @param timestamp The explicit timestamp for the log record. * @param event the events * @return the Span, for chaining */ AbstractSpan log(long timestamp, Map\u0026lt;String, ?\u0026gt; event); /** * Sets the string name for the logical operation this span represents. * * @return this Span instance, for chaining */ AbstractSpan setOperationName(String endpointName); Besides setting the operation name, tags and logs, two attributes must be set, namely the component and layer. This is especially important for the EntrySpan and ExitSpan.\nSpanLayer is the type of span. There are 5 values:\n UNKNOWN (default) DB RPC_FRAMEWORK (designed for the RPC framework, rather than an ordinary HTTP call) HTTP MQ  Component IDs are defined and reserved by the SkyWalking project. For extension of the component name/ID, please follow the OAP server Component library settings document.\nSpecial Span Tags All tags are available in the trace view. Meanwhile, in the OAP backend analysis, some special tags or tag combinations provide other advanced features.\nTag key http.status_code The value should be an integer. The response code of OAL entities corresponds to this value.\nTag keys db.statement and db.type. The value of db.statement should be a string that represents the database statement, such as SQL, or [No statement]/+span#operationName if the value is empty. When the exit span contains this tag, OAP samples the slow statements based on agent-analyzer/default/maxSlowSQLLength. The threshold of slow statement is defined in accordance with agent-analyzer/default/slowDBAccessThreshold. Check Slow Database Statement document of OAP server for details.\nExtension logic endpoint: Tag key x-le The logic endpoint is a concept that doesn\u0026rsquo;t represent a real RPC call, but requires the statistic. The value of x-le should be in JSON format. There are two options:\n Define a separated logic endpoint. Provide its own endpoint name, latency and status. Suitable for entry and local span.  { \u0026#34;name\u0026#34;: \u0026#34;GraphQL-service\u0026#34;, \u0026#34;latency\u0026#34;: 100, \u0026#34;status\u0026#34;: true } Declare the current local span representing a logic endpoint.  { \u0026#34;logic-span\u0026#34;: true } Virtual Database Relative Tags SkyWalking analysis Database(SQL-like) performance metrics through the following tags.\npublic static final StringTag DB_TYPE = new StringTag(3, \u0026#34;db.type\u0026#34;); public static final StringTag DB_STATEMENT = new StringTag(5, \u0026#34;db.statement\u0026#34;);  db.type records database type, such as sql, cassandra, Elasticsearch. db.statementrecords the sql statement of the database access.  Read backend\u0026rsquo;s virtual database doc for more details.\nVirtual Cache Relative Tags SkyWalking analysis cache performance related metrics through the following tags.\npublic static final StringTag CACHE_TYPE = new StringTag(15, \u0026#34;cache.type\u0026#34;); public static final StringTag CACHE_CMD = new StringTag(17, \u0026#34;cache.cmd\u0026#34;); public static final StringTag CACHE_OP = new StringTag(16, \u0026#34;cache.op\u0026#34;); public static final StringTag CACHE_KEY = new StringTag(18, \u0026#34;cache.key\u0026#34;);  cache.type indicates the cache type , usually it\u0026rsquo;s official name of cache (e.g. Redis) cache.cmd indicates the cache command that would be sent to cache server (e.g. setnx) cache.op indicates the command is used for write or read operation , usually the value is converting from command cache.key indicates the cache key that would be sent to cache server , this tag maybe null , as string type key would be collected usually.  In order to decide which op should be converted to flexibly , It\u0026rsquo;s better that providing config property . Reference Jedis-4.x-plugin\nVirtual Message Queue (MQ) Relative Tags SkyWalking analysis MQ performance related metrics through the following tags.\npublic static final StringTag MQ_QUEUE = new StringTag(7, \u0026#34;mq.queue\u0026#34;); public static final StringTag MQ_TOPIC = new StringTag(9, \u0026#34;mq.topic\u0026#34;); public static final StringTag TRANSMISSION_LATENCY = new StringTag(15, \u0026#34;transmission.latency\u0026#34;, false);  mq.queue indicates MQ queue name mq.topic indicates MQ topic name , It\u0026rsquo;s optional as some MQ don\u0026rsquo;t hava concept of topic transmission.latency The transmission latency from consumer to producer. Usually you needn\u0026rsquo;t to record this tag manually, instead to call contextCarrier.extensionInjector().injectSendingTimestamp(); to record tag sendingTimestamp on producer side , and SkyWalking would record this tag on consumer side if sw8-x context carrier(from producer side) contains sendingTimestamp  Notice , you should set peer at both sides(producer and consumer). And the value of peer should represent the MQ server cluster.\nAdvanced APIs Async Span APIs There is a set of advanced APIs in Span which is specifically designed for async use cases. When tags, logs, and attributes (including end time) of the span need to be set in another thread, you should use these APIs.\n/** * The span finish at current tracing context, but the current span is still alive, until {@link #asyncFinish} * called. * * This method must be called\u0026lt;br/\u0026gt; * 1. In original thread(tracing context). * 2. Current span is active span. * * During alive, tags, logs and attributes of the span could be changed, in any thread. * * The execution times of {@link #prepareForAsync} and {@link #asyncFinish()} must match. * * @return the current span */ AbstractSpan prepareForAsync(); /** * Notify the span, it could be finished. * * The execution times of {@link #prepareForAsync} and {@link #asyncFinish()} must match. * * @return the current span */ AbstractSpan asyncFinish();  Call #prepareForAsync in the original context. Run ContextManager#stopSpan in the original context when your job in the current thread is complete. Propagate the span to any other thread. Once the above steps are all set, call #asyncFinish in any thread. When #prepareForAsync is complete for all spans, the tracing context will be finished and will report to the backend (based on the count of API execution).  Develop a plugin Abstract The basic method to trace is to intercept a Java method, by using byte code manipulation tech and AOP concept. SkyWalking has packaged the byte code manipulation tech and tracing context propagation, so you simply have to define the intercept point (a.k.a. aspect pointcut in Spring).\nIntercept SkyWalking provides two common definitions to intercept constructor, instance method and class method.\nv1 APIs  Extend ClassInstanceMethodsEnhancePluginDefine to define constructor intercept points and instance method intercept points. Extend ClassStaticMethodsEnhancePluginDefine to define class method intercept points.  Of course, you can extend ClassEnhancePluginDefine to set all intercept points, although it is uncommon to do so.\nv2 APIs v2 APIs provide an enhanced interceptor, which could propagate context through MIC(MethodInvocationContext).\n Extend ClassInstanceMethodsEnhancePluginDefineV2 to define constructor intercept points and instance method intercept points. Extend ClassStaticMethodsEnhancePluginDefineV2 to define class method intercept points.  Of course, you can extend ClassEnhancePluginDefineV2 to set all intercept points, although it is uncommon to do so.\nImplement plugin See the following demonstration on how to implement a plugin by extending ClassInstanceMethodsEnhancePluginDefine.\n Define the target class name.  protected abstract ClassMatch enhanceClass(); ClassMatch represents how to match the target classes. There are 4 ways:\n byName: Based on the full class names (package name + . + class name). byClassAnnotationMatch: Depends on whether there are certain annotations in the target classes. byMethodAnnotationMatch: Depends on whether there are certain annotations in the methods of the target classes. byHierarchyMatch: Based on the parent classes or interfaces of the target classes.  Attention:\n Never use ThirdPartyClass.class in the instrumentation definitions, such as takesArguments(ThirdPartyClass.class), or byName(ThirdPartyClass.class.getName()), because of the fact that ThirdPartyClass dose not necessarily exist in the target application and this will break the agent; we have import checks to assist in checking this in CI, but it doesn\u0026rsquo;t cover all scenarios of this limitation, so never try to work around this limitation by something like using full-qualified-class-name (FQCN), i.e. takesArguments(full.qualified.ThirdPartyClass.class) and byName(full.qualified.ThirdPartyClass.class.getName()) will pass the CI check, but are still invalid in the agent codes. Therefore, Use Full Qualified Class Name String Literature Instead. Even if you are perfectly sure that the class to be intercepted exists in the target application (such as JDK classes), still, do not use *.class.getName() to get the class String name. We recommend you to use a literal string. This is to avoid ClassLoader issues. by*AnnotationMatch does not support inherited annotations. We do not recommend using byHierarchyMatch unless necessary. Using it may trigger the interception of many unexcepted methods, which would cause performance issues.  Example:\n@Override protected ClassMatch enhanceClassName() { return byName(\u0026#34;org.apache.catalina.core.StandardEngineValve\u0026#34;); } Define an instance method intercept point.  public InstanceMethodsInterceptPoint[] getInstanceMethodsInterceptPoints(); public interface InstanceMethodsInterceptPoint { /** * class instance methods matcher. * * @return methods matcher */ ElementMatcher\u0026lt;MethodDescription\u0026gt; getMethodsMatcher(); /** * @return represents a class name, the class instance must instanceof InstanceMethodsAroundInterceptor. */ String getMethodsInterceptor(); boolean isOverrideArgs(); } You may also use Matcher to set the target methods. Return true in isOverrideArgs, if you want to change the argument ref in interceptor. Please refer to bytebuddy for details of defining ElementMatcher.\nIn Skywalking, we provide 3 classes to facilitate ElementMatcher definition:\n AnnotationTypeNameMatch: Check on whether there is a certain annotation in the target method. ReturnTypeNameMatch: Check the return type name (package name + . + class name) of the target method. ArgumentTypeNameMatch: Check on the argument index and the type name (package name + . + class name) of the target method.  Attention:\n In case of using ReturnTypeNameMatch and ArgumentTypeNameMatch, use [Lxxx; (Java file format defined in JVM Specification) to define an Array type. For example, you should write [Ljava.lang.String; for java.lang.String[].  The following sections will tell you how to implement the interceptor.\nAdd plugin definition into the skywalking-plugin.def file.  tomcat-7.x/8.x=TomcatInstrumentation  Set up witnessClasses and/or witnessMethods if the instrumentation has to be activated in specific versions.\nExample:\n// The plugin is activated only when the foo.Bar class exists. @Override protected String[] witnessClasses() { return new String[] { \u0026#34;foo.Bar\u0026#34; }; } // The plugin is activated only when the foo.Bar#hello method exists. @Override protected List\u0026lt;WitnessMethod\u0026gt; witnessMethods() { List\u0026lt;WitnessMethod\u0026gt; witnessMethodList = new ArrayList\u0026lt;\u0026gt;(); WitnessMethod witnessMethod = new WitnessMethod(\u0026#34;foo.Bar\u0026#34;, ElementMatchers.named(\u0026#34;hello\u0026#34;)); witnessMethodList.add(witnessMethod); return witnessMethodList; } For more examples, see WitnessTest.java\n  Implement an interceptor As an interceptor for an instance method, it has to implement org.apache.skywalking.apm.agent.core.plugin.interceptor.enhance.InstanceMethodsAroundInterceptor\n/** * A interceptor, which intercept method\u0026#39;s invocation. The target methods will be defined in {@link * ClassEnhancePluginDefine}\u0026#39;s subclass, most likely in {@link ClassInstanceMethodsEnhancePluginDefine} */ public interface InstanceMethodsAroundInterceptor { /** * called before target method invocation. * * @param result change this result, if you want to truncate the method. * @throws Throwable */ void beforeMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, MethodInterceptResult result) throws Throwable; /** * called after target method invocation. Even method\u0026#39;s invocation triggers an exception. * * @param ret the method\u0026#39;s original return value. * @return the method\u0026#39;s actual return value. * @throws Throwable */ Object afterMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Object ret) throws Throwable; /** * called when occur exception. * * @param t the exception occur. */ void handleMethodException(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Throwable t); } Use the core APIs before and after calling the method, as well as during exception handling.\nV2 APIs The interceptor of V2 API uses MethodInvocationContext context to replace the MethodInterceptResult result in the beforeMethod, and be added as a new parameter in afterMethod and handleMethodException.\nMethodInvocationContext context is only shared in one time execution, and safe to use when face concurrency execution.\n/** * A v2 interceptor, which intercept method\u0026#39;s invocation. The target methods will be defined in {@link * ClassEnhancePluginDefineV2}\u0026#39;s subclass, most likely in {@link ClassInstanceMethodsEnhancePluginDefine} */ public interface InstanceMethodsAroundInterceptorV2 { /** * called before target method invocation. * * @param context the method invocation context including result context. */ void beforeMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, MethodInvocationContext context) throws Throwable; /** * called after target method invocation. Even method\u0026#39;s invocation triggers an exception. * * @param ret the method\u0026#39;s original return value. May be null if the method triggers an exception. * @return the method\u0026#39;s actual return value. */ Object afterMethod(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Object ret, MethodInvocationContext context) throws Throwable; /** * called when occur exception. * * @param t the exception occur. */ void handleMethodException(EnhancedInstance objInst, Method method, Object[] allArguments, Class\u0026lt;?\u0026gt;[] argumentsTypes, Throwable t, MethodInvocationContext context); } Bootstrap class instrumentation. SkyWalking has packaged the bootstrap instrumentation in the agent core. You can easily implement it by declaring it in the instrumentation definition.\nOverride the public boolean isBootstrapInstrumentation() and return true. Such as\npublic class URLInstrumentation extends ClassEnhancePluginDefine { private static String CLASS_NAME = \u0026#34;java.net.URL\u0026#34;; @Override protected ClassMatch enhanceClass() { return byName(CLASS_NAME); } @Override public ConstructorInterceptPoint[] getConstructorsInterceptPoints() { return new ConstructorInterceptPoint[] { new ConstructorInterceptPoint() { @Override public ElementMatcher\u0026lt;MethodDescription\u0026gt; getConstructorMatcher() { return any(); } @Override public String getConstructorInterceptor() { return \u0026#34;org.apache.skywalking.apm.plugin.jre.httpurlconnection.Interceptor2\u0026#34;; } } }; } @Override public InstanceMethodsInterceptPoint[] getInstanceMethodsInterceptPoints() { return new InstanceMethodsInterceptPoint[0]; } @Override public StaticMethodsInterceptPoint[] getStaticMethodsInterceptPoints() { return new StaticMethodsInterceptPoint[0]; } @Override public boolean isBootstrapInstrumentation() { return true; } } ClassEnhancePluginDefineV2 is provided in v2 APIs, #isBootstrapInstrumentation works too.\nNOTE: Bootstrap instrumentation should be used only where necessary. During its actual execution, it mostly affects the JRE core(rt.jar). Defining it other than where necessary could lead to unexpected results or side effects.\nProvide custom config for the plugin The config could provide different behaviours based on the configurations. The SkyWalking plugin mechanism provides the configuration injection and initialization system in the agent core.\nEvery plugin could declare one or more classes to represent the config by using @PluginConfig annotation. The agent core could initialize this class' static field through System environments, System properties, and agent.config static file.\nThe #root() method in the @PluginConfig annotation requires declaring the root class for the initialization process. Typically, SkyWalking prefers to use nested inner static classes for the hierarchy of the configuration. We recommend using Plugin/plugin-name/config-key as the nested classes structure of the config class.\nNOTE: because of the Java ClassLoader mechanism, the @PluginConfig annotation should be added on the real class used in the interceptor codes.\nIn the following example, @PluginConfig(root = SpringMVCPluginConfig.class) indicates that initialization should start with using SpringMVCPluginConfig as the root. Then, the config key of the attribute USE_QUALIFIED_NAME_AS_ENDPOINT_NAME should be plugin.springmvc.use_qualified_name_as_endpoint_name.\npublic class SpringMVCPluginConfig { public static class Plugin { // NOTE, if move this annotation on the `Plugin` or `SpringMVCPluginConfig` class, it no longer has any effect.  @PluginConfig(root = SpringMVCPluginConfig.class) public static class SpringMVC { /** * If true, the fully qualified method name will be used as the endpoint name instead of the request URL, * default is false. */ public static boolean USE_QUALIFIED_NAME_AS_ENDPOINT_NAME = false; /** * This config item controls that whether the SpringMVC plugin should collect the parameters of the * request. */ public static boolean COLLECT_HTTP_PARAMS = false; } @PluginConfig(root = SpringMVCPluginConfig.class) public static class Http { /** * When either {@link Plugin.SpringMVC#COLLECT_HTTP_PARAMS} is enabled, how many characters to keep and send * to the OAP backend, use negative values to keep and send the complete parameters, NB. this config item is * added for the sake of performance */ public static int HTTP_PARAMS_LENGTH_THRESHOLD = 1024; } } } Meter Plugin Java agent plugin could use meter APIs to collect metrics for backend analysis.\n Counter API represents a single monotonically increasing counter which automatically collects data and reports to the backend. import org.apache.skywalking.apm.agent.core.meter.MeterFactory; Counter counter = MeterFactory.counter(meterName).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).mode(Counter.Mode.INCREMENT).build(); counter.increment(1d);    MeterFactory.counter creates a new counter builder with the meter name. Counter.Builder.tag(String key, String value) marks a tag key/value pair. Counter.Builder.mode(Counter.Mode mode) changes the counter mode. RATE mode means the reporting rate to the backend. Counter.Builder.build() builds a new Counter which is collected and reported to the backend. Counter.increment(double count) increment counts to the Counter. It could be a positive value.   Gauge API represents a single numerical value.  import org.apache.skywalking.apm.agent.core.meter.MeterFactory; ThreadPoolExecutor threadPool = ...; Gauge gauge = MeterFactory.gauge(meterName, () -\u0026gt; threadPool.getActiveCount()).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).build();  MeterFactory.gauge(String name, Supplier\u0026lt;Double\u0026gt; getter) creates a new gauge builder with the meter name and supplier function. This function must return a double value. Gauge.Builder.tag(String key, String value) marks a tag key/value pair. Gauge.Builder.build() builds a new Gauge which is collected and reported to the backend.   Histogram API represents a summary sample observations with customized buckets.  import org.apache.skywalking.apm.agent.core.meter.MeterFactory; Histogram histogram = MeterFactory.histogram(\u0026#34;test\u0026#34;).tag(\u0026#34;tagKey\u0026#34;, \u0026#34;tagValue\u0026#34;).steps(Arrays.asList(1, 5, 10)).minValue(0).build(); histogram.addValue(3);  MeterFactory.histogram(String name) creates a new histogram builder with the meter name. Histogram.Builder.tag(String key, String value) marks a tag key/value pair. Histogram.Builder.steps(List\u0026lt;Double\u0026gt; steps) sets up the max values of every histogram buckets. Histogram.Builder.minValue(double value) sets up the minimal value of this histogram. Default is 0. Histogram.Builder.build() builds a new Histogram which is collected and reported to the backend. Histogram.addValue(double value) adds value into the histogram, and automatically analyzes what bucket count needs to be incremented. Rule: count into [step1, step2).  Plugin Test Tool The Apache SkyWalking Agent Test Tool Suite is an incredibly useful test tool suite that is available in a wide variety of agent languages. It includes the mock collector and validator. The mock collector is a SkyWalking receiver, like the OAP server.\nYou could learn how to use this tool to test the plugin in this doc. This is a must if you want to contribute plugins to the SkyWalking official repo.\nContribute plugins to the Apache SkyWalking repository We welcome everyone to contribute their plugins.\nPlease follow these steps:\n Submit an issue for your plugin, including any supported versions. Create sub modules under apm-sniffer/apm-sdk-plugin or apm-sniffer/optional-plugins, and the name should include supported library name and versions. Follow this guide to develop. Make sure comments and test cases are provided. Develop and test. Provide the automatic test cases. Learn how to write the plugin test case from this doc Send a pull request and ask for review. The plugin committers will approve your plugins, plugin CI-with-IT, e2e, and the plugin tests will be passed. The plugin is accepted by SkyWalking.  ","title":"Plugin Development Guide","url":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/java-plugin-development-guide/"},{"content":"Plugin Development Guide You can always take the existing plugins as examples, while there are some general ideas for all plugins.\n  A plugin is a module under the directory skywalking/plugins with an install method;\n  Inside the install method, you find out the relevant method(s) of the libraries that you plan to instrument, and create/close spans before/after those method(s).\n  You should also provide version rules in the plugin module, which means the version of package your plugin aim to test.\nAll below variables will be used by the tools/plugin_doc_gen.py to produce a latest Plugin Doc.\nlink_vector = [\u0026#39;https://www.python-httpx.org/\u0026#39;] # This should link to the official website/doc of this lib # The support matrix is for scenarios where some libraries don\u0026#39;t work for certain Python versions # Therefore, we use the matrix to instruct the CI testing pipeline to skip over plugin test for such Python version # The right side versions, should almost always use A.B.* to test the latest minor version of two recent major versions.  support_matrix = { \u0026#39;httpx\u0026#39;: { \u0026#39;\u0026gt;=3.7\u0026#39;: [\u0026#39;0.23.*\u0026#39;, \u0026#39;0.22.*\u0026#39;] } } # The note will be used when generating the plugin documentation for users. note = \u0026#34;\u0026#34;\u0026#34;\u0026#34;\u0026#34;\u0026#34;   Every plugin requires a corresponding test under tests/plugin before it can be merged, refer to the Plugin Test Guide when writing a plugin test.\n  Add the corresponding configuration options added/modified by the new plugin to the config.py and add new comments for each, then regenerate the configuration.md by make doc-gen.\n  Steps after coding If your PR introduces the need for a new non-standard library which needs to be pulled via pip or if it removes the need for a previously-used library:\n Run poetry add library --group plugins to pin the dependency to the plugins group, Do not add it to the main dependency! Run make doc-gen to generate a test matrix documentation for the plugin.  ","title":"Plugin Development Guide","url":"/docs/skywalking-python/latest/en/contribution/how-to-develop-plugin/"},{"content":"Plugin Development Guide You can always take the existing plugins as examples, while there are some general ideas for all plugins.\n  A plugin is a module under the directory skywalking/plugins with an install method;\n  Inside the install method, you find out the relevant method(s) of the libraries that you plan to instrument, and create/close spans before/after those method(s).\n  You should also provide version rules in the plugin module, which means the version of package your plugin aim to test.\nAll below variables will be used by the tools/plugin_doc_gen.py to produce a latest Plugin Doc.\nlink_vector = [\u0026#39;https://www.python-httpx.org/\u0026#39;] # This should link to the official website/doc of this lib # The support matrix is for scenarios where some libraries don\u0026#39;t work for certain Python versions # Therefore, we use the matrix to instruct the CI testing pipeline to skip over plugin test for such Python version # The right side versions, should almost always use A.B.* to test the latest minor version of two recent major versions.  support_matrix = { \u0026#39;httpx\u0026#39;: { \u0026#39;\u0026gt;=3.7\u0026#39;: [\u0026#39;0.23.*\u0026#39;, \u0026#39;0.22.*\u0026#39;] } } # The note will be used when generating the plugin documentation for users. note = \u0026#34;\u0026#34;\u0026#34;\u0026#34;\u0026#34;\u0026#34;   Every plugin requires a corresponding test under tests/plugin before it can be merged, refer to the Plugin Test Guide when writing a plugin test.\n  Add the corresponding configuration options added/modified by the new plugin to the config.py and add new comments for each, then regenerate the configuration.md by make doc-gen.\n  Steps after coding If your PR introduces the need for a new non-standard library which needs to be pulled via pip or if it removes the need for a previously-used library:\n Run poetry add library --group plugins to pin the dependency to the plugins group, Do not add it to the main dependency! Run make doc-gen to generate a test matrix documentation for the plugin.  ","title":"Plugin Development Guide","url":"/docs/skywalking-python/next/en/contribution/how-to-develop-plugin/"},{"content":"Plugin Development Guide You can always take the existing plugins as examples, while there are some general ideas for all plugins.\n  A plugin is a module under the directory skywalking/plugins with an install method;\n  Inside the install method, you find out the relevant method(s) of the libraries that you plan to instrument, and create/close spans before/after those method(s).\n  You should also provide version rules in the plugin module, which means the version of package your plugin aim to test.\nAll below variables will be used by the tools/plugin_doc_gen.py to produce a latest Plugin Doc.\nlink_vector = [\u0026#39;https://www.python-httpx.org/\u0026#39;] # This should link to the official website/doc of this lib # The support matrix is for scenarios where some libraries don\u0026#39;t work for certain Python versions # Therefore, we use the matrix to instruct the CI testing pipeline to skip over plugin test for such Python version # The right side versions, should almost always use A.B.* to test the latest minor version of two recent major versions.  support_matrix = { \u0026#39;httpx\u0026#39;: { \u0026#39;\u0026gt;=3.7\u0026#39;: [\u0026#39;0.23.*\u0026#39;, \u0026#39;0.22.*\u0026#39;] } } # The note will be used when generating the plugin documentation for users. note = \u0026#34;\u0026#34;\u0026#34;\u0026#34;\u0026#34;\u0026#34;   Every plugin requires a corresponding test under tests/plugin before it can be merged, refer to the Plugin Test Guide when writing a plugin test.\n  Add the corresponding configuration options added/modified by the new plugin to the config.py and add new comments for each, then regenerate the configuration.md by make doc-gen.\n  Steps after coding If your PR introduces the need for a new non-standard library which needs to be pulled via pip or if it removes the need for a previously-used library:\n Run poetry add library --group plugins to pin the dependency to the plugins group, Do not add it to the main dependency! Run make doc-gen to generate a test matrix documentation for the plugin.  ","title":"Plugin Development Guide","url":"/docs/skywalking-python/v1.0.1/en/contribution/how-to-develop-plugin/"},{"content":"Plugin Exclusion The plugin exclusion is used during the compilation phase to exclude specific plugins, through their names. Consequently, the codes of these excluded plugins will not be weaved in, then, no relative tracing and metrics.\nConfiguration plugin:# List the names of excluded plugins, multiple plugin names should be splitted by \u0026#34;,\u0026#34;# NOTE: This parameter only takes effect during the compilation phase.excluded:${SW_AGENT_PLUGIN_EXCLUDES:}This configuration option is also located in the existing configuration files and supports configuration based on environment variables. However, this environment variable only takes effect during the compilation phase.\nThe plugins name please refer to the Support Plugins Documentation.\n","title":"Plugin Exclusion","url":"/docs/skywalking-go/latest/en/advanced-features/plugin-exclusion/"},{"content":"Plugin Exclusion The plugin exclusion is used during the compilation phase to exclude specific plugins, through their names. Consequently, the codes of these excluded plugins will not be weaved in, then, no relative tracing and metrics.\nConfiguration plugin:# List the names of excluded plugins, multiple plugin names should be splitted by \u0026#34;,\u0026#34;# NOTE: This parameter only takes effect during the compilation phase.excluded:${SW_AGENT_PLUGIN_EXCLUDES:}This configuration option is also located in the existing configuration files and supports configuration based on environment variables. However, this environment variable only takes effect during the compilation phase.\nThe plugins name please refer to the Support Plugins Documentation.\n","title":"Plugin Exclusion","url":"/docs/skywalking-go/next/en/advanced-features/plugin-exclusion/"},{"content":"Plugin Exclusion The plugin exclusion is used during the compilation phase to exclude specific plugins, through their names. Consequently, the codes of these excluded plugins will not be weaved in, then, no relative tracing and metrics.\nConfiguration plugin:# List the names of excluded plugins, multiple plugin names should be splitted by \u0026#34;,\u0026#34;# NOTE: This parameter only takes effect during the compilation phase.excluded:${SW_AGENT_PLUGIN_EXCLUDES:}This configuration option is also located in the existing configuration files and supports configuration based on environment variables. However, this environment variable only takes effect during the compilation phase.\nThe plugins name please refer to the Support Plugins Documentation.\n","title":"Plugin Exclusion","url":"/docs/skywalking-go/v0.4.0/en/advanced-features/plugin-exclusion/"},{"content":"Plugin List  Client  GRPC Client Kafka Client   Fallbacker  None Fallbacker Timer Fallbacker   Fetcher Filter Forwarder  Envoy ALS v2 GRPC Forwarder Envoy ALS v3 GRPC Forwarder Envoy Metrics v2 GRPC Forwarder Envoy Metrics v3 GRPC Forwarder Native CDS GRPC Forwarder Native EBPF Profiling GRPC Forwarder Native Event GRPC Forwarder Native JVM GRPC Forwarder Native CLR GRPC Forwarder Native Log GRPC Forwarder Native Log Kafka Forwarder Native Management GRPC Forwarder Native Meter GRPC Forwarder Native Process GRPC Forwarder Native Profile GRPC Forwarder Native Tracing GRPC Forwarder OpenTelemetry Metrics v1 GRPC Forwarder   Parser Queue  Memory Queue Mmap Queue None Queue   Receiver  GRPC Envoy ALS v2 Receiver GRPC Envoy ALS v3 Receiver GRPC Envoy Metrics v2 Receiver GRPC Envoy Metrics v3 Receiver GRPC Native CDS Receiver GRPC Native EBFP Profiling Receiver GRPC Native Event Receiver GRPC Native JVM Receiver GRPC Native CLR Receiver GRPC Native Log Receiver GRPC Native Management Receiver GRPC Native Meter Receiver GRPC Native Process Receiver GRPC Native Profile Receiver GRPC Native Tracing Receiver GRPC OpenTelemetry Metrics v1 Receiver HTTP Native Log Receiver   Server  GRPC Server HTTP Server    ","title":"Plugin List","url":"/docs/skywalking-satellite/latest/en/setup/plugins/plugin-list/"},{"content":"Plugin List  Client  GRPC Client Kafka Client   Fallbacker  None Fallbacker Timer Fallbacker   Fetcher Filter Forwarder  Envoy ALS v2 GRPC Forwarder Envoy ALS v3 GRPC Forwarder Envoy Metrics v2 GRPC Forwarder Envoy Metrics v3 GRPC Forwarder Native CDS GRPC Forwarder Native CLR GRPC Forwarder GRPC Native EBFP Access Log Forwarder Native EBPF Profiling GRPC Forwarder Native Event GRPC Forwarder Native JVM GRPC Forwarder Native Log GRPC Forwarder Native Log Kafka Forwarder Native Management GRPC Forwarder Native Meter GRPC Forwarder Native Process GRPC Forwarder Native Profile GRPC Forwarder Native Tracing GRPC Forwarder OpenTelemetry Metrics v1 GRPC Forwarder   Parser Queue  Memory Queue Mmap Queue None Queue   Receiver  GRPC Envoy ALS v2 Receiver GRPC Envoy ALS v3 Receiver GRPC Envoy Metrics v2 Receiver GRPC Envoy Metrics v3 Receiver GRPC Native CDS Receiver GRPC Native CLR Receiver GRPC Native EBFP Accesslog Receiver GRPC Native EBFP Profiling Receiver GRPC Native Event Receiver GRPC Native JVM Receiver GRPC Native Log Receiver GRPC Native Management Receiver GRPC Native Meter Receiver GRPC Native Process Receiver GRPC Native Profile Receiver GRPC Native Tracing Receiver GRPC OpenTelemetry Metrics v1 Receiver HTTP Native Log Receiver   Server  GRPC Server HTTP Server    ","title":"Plugin List","url":"/docs/skywalking-satellite/next/en/setup/plugins/plugin-list/"},{"content":"Plugin List  Client  GRPC Client Kafka Client   Fallbacker  None Fallbacker Timer Fallbacker   Fetcher Filter Forwarder  Envoy ALS v2 GRPC Forwarder Envoy ALS v3 GRPC Forwarder Envoy Metrics v2 GRPC Forwarder Envoy Metrics v3 GRPC Forwarder Native CDS GRPC Forwarder Native EBPF Profiling GRPC Forwarder Native Event GRPC Forwarder Native JVM GRPC Forwarder Native CLR GRPC Forwarder Native Log GRPC Forwarder Native Log Kafka Forwarder Native Management GRPC Forwarder Native Meter GRPC Forwarder Native Process GRPC Forwarder Native Profile GRPC Forwarder Native Tracing GRPC Forwarder OpenTelemetry Metrics v1 GRPC Forwarder   Parser Queue  Memory Queue Mmap Queue None Queue   Receiver  GRPC Envoy ALS v2 Receiver GRPC Envoy ALS v3 Receiver GRPC Envoy Metrics v2 Receiver GRPC Envoy Metrics v3 Receiver GRPC Native CDS Receiver GRPC Native EBFP Profiling Receiver GRPC Native Event Receiver GRPC Native JVM Receiver GRPC Native CLR Receiver GRPC Native Log Receiver GRPC Native Management Receiver GRPC Native Meter Receiver GRPC Native Process Receiver GRPC Native Profile Receiver GRPC Native Tracing Receiver GRPC OpenTelemetry Metrics v1 Receiver HTTP Native Log Receiver   Server  GRPC Server HTTP Server    ","title":"Plugin List","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/plugin-list/"},{"content":"plugin structure Plugin is a common concept for Satellite, which is in all extension plugins.\nRegistration mechanism The Plugin registration mechanism in Satellite is similar to the SPI registration mechanism of Java. Plugin registration mechanism supports to register an interface and its implementation, that means different interfaces have different registration spaces. We can easily find the type of a specific plugin according to the interface and the plugin name and initialize it according to the type.\nstructure:\n code: map[reflect.Type]map[string]reflect.Value meaning: map[interface type]map[plugin name] plugin type  Initialization mechanism Users can easily find a plugin type and initialize an empty plugin instance according to the previous registration mechanism. For setting up the configuration of the extension convenience, we define the initialization mechanism in Plugin structure.\nIn the initialization mechanism, the plugin category(interface) and the init config is required.\nInitialize processing is like the following.\n Find the plugin name in the input config according to the fixed key plugin_name. Find plugin type according to the plugin category(interface) and the plugin name. Create an empty plugin. Initialize the plugin according to the merged config, which is created by the input config and the default config.  Plugin usage in Satellite Nowadays, the numbers of the Plugin categories is 2. One is the sharing Plugin, and another is the other normal Plugin.\n Extension Plugins:  sharing plugins  Server Plugin Client Plugin   normal plugins  Receiver Plugin Fetcher Plugin Parser Plugin Queue Plugin Filter Plugin Fallbacker Plugin Forwarder Plugin      ","title":"plugin structure","url":"/docs/skywalking-satellite/latest/en/concepts-and-designs/plugin_mechanism/"},{"content":"plugin structure Plugin is a common concept for Satellite, which is in all extension plugins.\nRegistration mechanism The Plugin registration mechanism in Satellite is similar to the SPI registration mechanism of Java. Plugin registration mechanism supports to register an interface and its implementation, that means different interfaces have different registration spaces. We can easily find the type of a specific plugin according to the interface and the plugin name and initialize it according to the type.\nstructure:\n code: map[reflect.Type]map[string]reflect.Value meaning: map[interface type]map[plugin name] plugin type  Initialization mechanism Users can easily find a plugin type and initialize an empty plugin instance according to the previous registration mechanism. For setting up the configuration of the extension convenience, we define the initialization mechanism in Plugin structure.\nIn the initialization mechanism, the plugin category(interface) and the init config is required.\nInitialize processing is like the following.\n Find the plugin name in the input config according to the fixed key plugin_name. Find plugin type according to the plugin category(interface) and the plugin name. Create an empty plugin. Initialize the plugin according to the merged config, which is created by the input config and the default config.  Plugin usage in Satellite Nowadays, the numbers of the Plugin categories is 2. One is the sharing Plugin, and another is the other normal Plugin.\n Extension Plugins:  sharing plugins  Server Plugin Client Plugin   normal plugins  Receiver Plugin Fetcher Plugin Parser Plugin Queue Plugin Filter Plugin Fallbacker Plugin Forwarder Plugin      ","title":"plugin structure","url":"/docs/skywalking-satellite/next/en/concepts-and-designs/plugin_mechanism/"},{"content":"plugin structure Plugin is a common concept for Satellite, which is in all extension plugins.\nRegistration mechanism The Plugin registration mechanism in Satellite is similar to the SPI registration mechanism of Java. Plugin registration mechanism supports to register an interface and its implementation, that means different interfaces have different registration spaces. We can easily find the type of a specific plugin according to the interface and the plugin name and initialize it according to the type.\nstructure:\n code: map[reflect.Type]map[string]reflect.Value meaning: map[interface type]map[plugin name] plugin type  Initialization mechanism Users can easily find a plugin type and initialize an empty plugin instance according to the previous registration mechanism. For setting up the configuration of the extension convenience, we define the initialization mechanism in Plugin structure.\nIn the initialization mechanism, the plugin category(interface) and the init config is required.\nInitialize processing is like the following.\n Find the plugin name in the input config according to the fixed key plugin_name. Find plugin type according to the plugin category(interface) and the plugin name. Create an empty plugin. Initialize the plugin according to the merged config, which is created by the input config and the default config.  Plugin usage in Satellite Nowadays, the numbers of the Plugin categories is 2. One is the sharing Plugin, and another is the other normal Plugin.\n Extension Plugins:  sharing plugins  Server Plugin Client Plugin   normal plugins  Receiver Plugin Fetcher Plugin Parser Plugin Queue Plugin Filter Plugin Fallbacker Plugin Forwarder Plugin      ","title":"plugin structure","url":"/docs/skywalking-satellite/v1.2.0/en/concepts-and-designs/plugin_mechanism/"},{"content":"Plugin Test Plugin tests are required and should pass before a new plugin is able to merge into the master branch. Specify a support matrix in each plugin in the skywalking/plugins folder, along with their website links, the matrix and links will be used for plugin support table documentation generation for this doc Plugins.md.\nUse make doc-gen to generate a table and paste into Plugins.md after all test passes.\nSkyWalking Agent Test Tool (Mock Collector) SkyWalking Agent Test Tool respects the same protocol as the SkyWalking backend, and thus receives the report data from the agent side, besides, it also exposes some HTTP endpoints for verification.\nTested Service A tested service is a service involving the plugin that is to be tested, and exposes some endpoints to trigger the instrumented code and report log/trace/meter data to the mock collector.\nDocker Compose docker-compose is used to orchestrate the mock collector and the tested service(s), the docker-compose.yml should be able to run with docker-compose -f docker-compose.yml up in standalone mode, which can be used in debugging too.\nExpected Data The expected.data.yml file contains the expected segment/log/meter data after we have triggered the instrumentation and report to mock collector.\nOnce the mock collector receives data, we post the expected data to the mock collector and verify whether they match.\nThis can be done through the /dataValidate of the mock collector, say http://collector:12800/dataValidate, for example.\nExample If we want to test the plugin for the built-in library http, we will:\n Build a tested service, which sets up an HTTP server by http library, and exposes an HTTP endpoint to be triggered in the test codes, say /trigger, take this provider service as example. Compose a docker-compose.yml file, orchestrating the service built in step 1 and the mock collector, take this docker-compose.yml as an example. Write test codes to trigger the endpoint in step 1, and send the expected data file to the mock collector to verify, take this test as example.  ","title":"Plugin Test","url":"/docs/skywalking-python/latest/en/contribution/how-to-test-plugin/"},{"content":"Plugin Test Plugin tests are required and should pass before a new plugin is able to merge into the master branch. Specify a support matrix in each plugin in the skywalking/plugins folder, along with their website links, the matrix and links will be used for plugin support table documentation generation for this doc Plugins.md.\nUse make doc-gen to generate a table and paste into Plugins.md after all test passes.\nSkyWalking Agent Test Tool (Mock Collector) SkyWalking Agent Test Tool respects the same protocol as the SkyWalking backend, and thus receives the report data from the agent side, besides, it also exposes some HTTP endpoints for verification.\nTested Service A tested service is a service involving the plugin that is to be tested, and exposes some endpoints to trigger the instrumented code and report log/trace/meter data to the mock collector.\nDocker Compose docker-compose is used to orchestrate the mock collector and the tested service(s), the docker-compose.yml should be able to run with docker-compose -f docker-compose.yml up in standalone mode, which can be used in debugging too.\nExpected Data The expected.data.yml file contains the expected segment/log/meter data after we have triggered the instrumentation and report to mock collector.\nOnce the mock collector receives data, we post the expected data to the mock collector and verify whether they match.\nThis can be done through the /dataValidate of the mock collector, say http://collector:12800/dataValidate, for example.\nExample If we want to test the plugin for the built-in library http, we will:\n Build a tested service, which sets up an HTTP server by http library, and exposes an HTTP endpoint to be triggered in the test codes, say /trigger, take this provider service as example. Compose a docker-compose.yml file, orchestrating the service built in step 1 and the mock collector, take this docker-compose.yml as an example. Write test codes to trigger the endpoint in step 1, and send the expected data file to the mock collector to verify, take this test as example.  ","title":"Plugin Test","url":"/docs/skywalking-python/next/en/contribution/how-to-test-plugin/"},{"content":"Plugin Test Plugin tests are required and should pass before a new plugin is able to merge into the master branch. Specify a support matrix in each plugin in the skywalking/plugins folder, along with their website links, the matrix and links will be used for plugin support table documentation generation for this doc Plugins.md.\nUse make doc-gen to generate a table and paste into Plugins.md after all test passes.\nSkyWalking Agent Test Tool (Mock Collector) SkyWalking Agent Test Tool respects the same protocol as the SkyWalking backend, and thus receives the report data from the agent side, besides, it also exposes some HTTP endpoints for verification.\nTested Service A tested service is a service involving the plugin that is to be tested, and exposes some endpoints to trigger the instrumented code and report log/trace/meter data to the mock collector.\nDocker Compose docker-compose is used to orchestrate the mock collector and the tested service(s), the docker-compose.yml should be able to run with docker-compose -f docker-compose.yml up in standalone mode, which can be used in debugging too.\nExpected Data The expected.data.yml file contains the expected segment/log/meter data after we have triggered the instrumentation and report to mock collector.\nOnce the mock collector receives data, we post the expected data to the mock collector and verify whether they match.\nThis can be done through the /dataValidate of the mock collector, say http://collector:12800/dataValidate, for example.\nExample If we want to test the plugin for the built-in library http, we will:\n Build a tested service, which sets up an HTTP server by http library, and exposes an HTTP endpoint to be triggered in the test codes, say /trigger, take this provider service as example. Compose a docker-compose.yml file, orchestrating the service built in step 1 and the mock collector, take this docker-compose.yml as an example. Write test codes to trigger the endpoint in step 1, and send the expected data file to the mock collector to verify, take this test as example.  ","title":"Plugin Test","url":"/docs/skywalking-python/v1.0.1/en/contribution/how-to-test-plugin/"},{"content":"PostgreSQL PostgreSQL JDBC driver uses version 42.3.2. It supports PostgreSQL 8.2 or newer. Activate PostgreSQL as storage, and set storage provider to postgresql.\nstorage:selector:${SW_STORAGE:postgresql}postgresql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:postgresql://localhost:5432/skywalking\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:postgres}dataSource.password:${SW_DATA_SOURCE_PASSWORD:123456}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfArrayColumn:${SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN:20}numOfSearchableValuesPerTag:${SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG:2}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password, are found in application.yml. Only part of the settings is listed here. Please follow HikariCP connection pool document for full settings.\n","title":"PostgreSQL","url":"/docs/main/latest/en/setup/backend/storages/postgresql/"},{"content":"PostgreSQL PostgreSQL JDBC driver uses version 42.3.2. It supports PostgreSQL 8.2 or newer. Activate PostgreSQL as storage, and set storage provider to postgresql.\nstorage:selector:${SW_STORAGE:postgresql}postgresql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:postgresql://localhost:5432/skywalking\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:postgres}dataSource.password:${SW_DATA_SOURCE_PASSWORD:123456}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfArrayColumn:${SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN:20}numOfSearchableValuesPerTag:${SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG:2}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password, are found in application.yml. Only part of the settings is listed here. Please follow HikariCP connection pool document for full settings.\n","title":"PostgreSQL","url":"/docs/main/next/en/setup/backend/storages/postgresql/"},{"content":"PostgreSQL PostgreSQL JDBC driver uses version 42.3.2. It supports PostgreSQL 8.2 or newer. Activate PostgreSQL as storage, and set storage provider to postgresql.\nstorage:selector:${SW_STORAGE:postgresql}postgresql:properties:jdbcUrl:${SW_JDBC_URL:\u0026#34;jdbc:postgresql://localhost:5432/skywalking\u0026#34;}dataSource.user:${SW_DATA_SOURCE_USER:postgres}dataSource.password:${SW_DATA_SOURCE_PASSWORD:123456}dataSource.cachePrepStmts:${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}dataSource.prepStmtCacheSize:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}dataSource.prepStmtCacheSqlLimit:${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}dataSource.useServerPrepStmts:${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}metadataQueryMaxSize:${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}maxSizeOfArrayColumn:${SW_STORAGE_MAX_SIZE_OF_ARRAY_COLUMN:20}numOfSearchableValuesPerTag:${SW_STORAGE_NUM_OF_SEARCHABLE_VALUES_PER_TAG:2}maxSizeOfBatchSql:${SW_STORAGE_MAX_SIZE_OF_BATCH_SQL:2000}asyncBatchPersistentPoolSize:${SW_STORAGE_ASYNC_BATCH_PERSISTENT_POOL_SIZE:4}All connection-related settings, including URL link, username, and password, are found in application.yml. Only part of the settings is listed here. Please follow HikariCP connection pool document for full settings.\n","title":"PostgreSQL","url":"/docs/main/v9.7.0/en/setup/backend/storages/postgresql/"},{"content":"PostgreSQL monitoring PostgreSQL server performance from postgres-exporter SkyWalking leverages postgres-exporter for collecting metrics data from PostgreSQL. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  postgres-exporter collect metrics data from PostgreSQL. OpenTelemetry Collector fetches metrics from postgres-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up postgres-exporter. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  PostgreSQL Monitoring PostgreSQL cluster is cataloged as a Layer: PostgreSQL Service in OAP. Each PostgreSQL server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Shared Buffers MB meter_pg_shared_buffers The number of shared memory buffers used by the server postgres-exporter   Effective Cache GB meter_pg_effective_cache The planner\u0026rsquo;s assumption about the total size of the data caches postgres-exporter   Maintenance Work Mem MB meter_pg_maintenance_work_mem The maximum memory to be used for maintenance operations postgres-exporter   Seq Page Cost  meter_pg_seq_page_cost The planner\u0026rsquo;s estimate of the cost of a sequentially fetched disk page. postgres-exporter   Random Page Cost  meter_pg_random_page_cost The planner\u0026rsquo;s estimate of the cost of a nonsequentially fetched disk page. postgres-exporter   Max Worker Processes  meter_pg_max_worker_processes Maximum number of concurrent worker processes postgres-exporter   Max WAL Size GB meter_max_wal_size The WAL size that triggers a checkpoint postgres-exporter   Max Parallel Workers  meter_pg_max_parallel_workers The maximum number of parallel processes per executor node postgres-exporter   Work Mem MB meter_pg_max_work_mem The maximum memory to be used for query workspaces. postgres-exporter   Fetched Row Trend  meter_pg_fetched_rows_rate The trend of the number of rows fetched by queries in this database. postgres-exporter   Inserted Row Trend  meter_pg_inserted_rows_rate The trend of the number of rows inserted by queries in this database. postgres-exporter   Updated Row Trend  meter_pg_updated_rows_rate The trend of the number of rows updated by queries in this database. postgres-exporter   Deleted Row Trend  meter_pg_deleted_rows_rate The trend of the number of rows deleted by queries in this database. postgres-exporter   Returned Row Trend  meter_pg_returned_rows_rate The trend of the number of rows returned by queries in this database. postgres-exporter   Committed Transactions Trend  meter_pg_committed_transactions_rate The trend of the number of transactions in this database that have been committed postgres-exporter   Rolled Back Transactions Trend  meter_pg_rolled_back_transactions_rate The trend of the number of transactions in this database that have been rolled back postgres-exporter   Buffers Trend  meter_pg_buffers_alloc  meter_pg_buffers_checkpoint meter_pg_buffers_clean meter_pg_buffers_backend_fsync meter_pg_buffers_backend The trend of the number of buffers postgres-exporter   Conflicts Trend  meter_pg_conflicts_rate The trend of the number of queries canceled due to conflicts with recovery in this database postgres-exporter   Deadlock Trend  meter_pg_deadlocks_rate The trend of the number of deadlocks detected in this database postgres-exporter   Cache Hit Rate % meter_pg_cache_hit_rate The rate of cache hit postgres-exporter   Temporary Files Trend  meter_pg_temporary_files_rate The rate of total amount of data written to temporary files by queries in this database. All temporary files are counted, regardless of why the temporary file was created, and regardless of the log_temp_files setting postgres-exporter   Checkpoint Stat Trend  meter_pg_checkpoint_write_time_rate  meter_pg_checkpoint_sync_time_rate  meter_pg_checkpoint_req_rate meter_pg_checkpoint_timed_rate The trend of checkpoint stat postgres-exporter   Active Sessions  meter_pg_active_sessions The number of connections which state is active postgres-exporter   Idle Sessions  meter_pg_idle_sessions The number of connections which state is idle,idle in transaction or idle in transaction (aborted) postgres-exporter   Locks Count  meter_pg_locks_count Number of locks postgres-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/postgresql. The PostgreSQL dashboard panel configurations are found in /config/ui-initialized-templates/postgresql.\nCollect sampled slow SQLs SkyWalking leverages fluentbit or other log agents for collecting slow SQL statements from PostgreSQL.\nData flow  fluentbit agent collects slow sql logs from PostgreSQL. fluentbit agent sends data to SkyWalking OAP Server using native log APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Set up fluentbit. Config fluentbit Config PostgreSQL to enable slow log. Example.  Slow SQL Monitoring Slow SQL monitoring provides monitoring of the slow SQL statements of the PostgreSQL server. PostgreSQL Cluster is cataloged as a Layer: POSTGRESQL Service in OAP. Each PostgreSQL server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Slow Statements ms top_n_database_statement The latency and statement of PostgreSQL slow SQLs fluentbit    Customizations You can customize your own metrics/expression/dashboard panel. The slowsql expression rules are found in /config/lal/pgsql-slowsql.yaml The PostgreSQL dashboard panel configurations are found in /config/ui-initialized-templates/postgresql.\n","title":"PostgreSQL monitoring","url":"/docs/main/latest/en/setup/backend/backend-postgresql-monitoring/"},{"content":"PostgreSQL monitoring PostgreSQL server performance from postgres-exporter SkyWalking leverages postgres-exporter for collecting metrics data from PostgreSQL. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  postgres-exporter collect metrics data from PostgreSQL. OpenTelemetry Collector fetches metrics from postgres-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up postgres-exporter. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  PostgreSQL Monitoring PostgreSQL cluster is cataloged as a Layer: PostgreSQL Service in OAP. Each PostgreSQL server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Shared Buffers MB meter_pg_shared_buffers The number of shared memory buffers used by the server postgres-exporter   Effective Cache GB meter_pg_effective_cache The planner\u0026rsquo;s assumption about the total size of the data caches postgres-exporter   Maintenance Work Mem MB meter_pg_maintenance_work_mem The maximum memory to be used for maintenance operations postgres-exporter   Seq Page Cost  meter_pg_seq_page_cost The planner\u0026rsquo;s estimate of the cost of a sequentially fetched disk page. postgres-exporter   Random Page Cost  meter_pg_random_page_cost The planner\u0026rsquo;s estimate of the cost of a nonsequentially fetched disk page. postgres-exporter   Max Worker Processes  meter_pg_max_worker_processes Maximum number of concurrent worker processes postgres-exporter   Max WAL Size GB meter_max_wal_size The WAL size that triggers a checkpoint postgres-exporter   Max Parallel Workers  meter_pg_max_parallel_workers The maximum number of parallel processes per executor node postgres-exporter   Work Mem MB meter_pg_max_work_mem The maximum memory to be used for query workspaces. postgres-exporter   Fetched Row Trend  meter_pg_fetched_rows_rate The trend of the number of rows fetched by queries in this database. postgres-exporter   Inserted Row Trend  meter_pg_inserted_rows_rate The trend of the number of rows inserted by queries in this database. postgres-exporter   Updated Row Trend  meter_pg_updated_rows_rate The trend of the number of rows updated by queries in this database. postgres-exporter   Deleted Row Trend  meter_pg_deleted_rows_rate The trend of the number of rows deleted by queries in this database. postgres-exporter   Returned Row Trend  meter_pg_returned_rows_rate The trend of the number of rows returned by queries in this database. postgres-exporter   Committed Transactions Trend  meter_pg_committed_transactions_rate The trend of the number of transactions in this database that have been committed postgres-exporter   Rolled Back Transactions Trend  meter_pg_rolled_back_transactions_rate The trend of the number of transactions in this database that have been rolled back postgres-exporter   Buffers Trend  meter_pg_buffers_alloc  meter_pg_buffers_checkpoint meter_pg_buffers_clean meter_pg_buffers_backend_fsync meter_pg_buffers_backend The trend of the number of buffers postgres-exporter   Conflicts Trend  meter_pg_conflicts_rate The trend of the number of queries canceled due to conflicts with recovery in this database postgres-exporter   Deadlock Trend  meter_pg_deadlocks_rate The trend of the number of deadlocks detected in this database postgres-exporter   Cache Hit Rate % meter_pg_cache_hit_rate The rate of cache hit postgres-exporter   Temporary Files Trend  meter_pg_temporary_files_rate The rate of total amount of data written to temporary files by queries in this database. All temporary files are counted, regardless of why the temporary file was created, and regardless of the log_temp_files setting postgres-exporter   Checkpoint Stat Trend  meter_pg_checkpoint_write_time_rate  meter_pg_checkpoint_sync_time_rate  meter_pg_checkpoint_req_rate meter_pg_checkpoint_timed_rate The trend of checkpoint stat postgres-exporter   Active Sessions  meter_pg_active_sessions The number of connections which state is active postgres-exporter   Idle Sessions  meter_pg_idle_sessions The number of connections which state is idle,idle in transaction or idle in transaction (aborted) postgres-exporter   Locks Count  meter_pg_locks_count Number of locks postgres-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/postgresql. The PostgreSQL dashboard panel configurations are found in /config/ui-initialized-templates/postgresql.\nCollect sampled slow SQLs SkyWalking leverages fluentbit or other log agents for collecting slow SQL statements from PostgreSQL.\nData flow  fluentbit agent collects slow sql logs from PostgreSQL. fluentbit agent sends data to SkyWalking OAP Server using native log APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Set up fluentbit. Config fluentbit Config PostgreSQL to enable slow log. Example.  Slow SQL Monitoring Slow SQL monitoring provides monitoring of the slow SQL statements of the PostgreSQL server. PostgreSQL Cluster is cataloged as a Layer: POSTGRESQL Service in OAP. Each PostgreSQL server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Slow Statements ms top_n_database_statement The latency and statement of PostgreSQL slow SQLs fluentbit    Customizations You can customize your own metrics/expression/dashboard panel. The slowsql expression rules are found in /config/lal/pgsql-slowsql.yaml The PostgreSQL dashboard panel configurations are found in /config/ui-initialized-templates/postgresql.\n","title":"PostgreSQL monitoring","url":"/docs/main/next/en/setup/backend/backend-postgresql-monitoring/"},{"content":"PostgreSQL monitoring SkyWalking leverages postgres-exporter for collecting metrics data from PostgreSQL. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  postgres-exporter collect metrics data from PostgreSQL. OpenTelemetry Collector fetches metrics from postgres-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via the OpenCensus gRPC Exporter or OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up postgres-exporter. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  PostgreSQL Monitoring PostgreSQL monitoring provides monitoring of the status and resources of the PostgreSQL server.PostgreSQL server as a Service in OAP, and land on the Layer: POSTGRESQL.\nPostgreSQL Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Shared Buffers MB meter_pg_shared_buffers The number of shared memory buffers used by the server postgres-exporter   Effective Cache GB meter_pg_effective_cache The planner\u0026rsquo;s assumption about the total size of the data caches postgres-exporter   Maintenance Work Mem MB meter_pg_maintenance_work_mem The maximum memory to be used for maintenance operations postgres-exporter   Seq Page Cost  meter_pg_seq_page_cost The planner\u0026rsquo;s estimate of the cost of a sequentially fetched disk page. postgres-exporter   Random Page Cost  meter_pg_random_page_cost The planner\u0026rsquo;s estimate of the cost of a nonsequentially fetched disk page. postgres-exporter   Max Worker Processes  meter_pg_max_worker_processes Maximum number of concurrent worker processes postgres-exporter   Max WAL Size GB meter_max_wal_size The WAL size that triggers a checkpoint postgres-exporter   Max Parallel Workers  meter_pg_max_parallel_workers The maximum number of parallel processes per executor node postgres-exporter   Work Mem MB meter_pg_max_work_mem The maximum memory to be used for query workspaces. postgres-exporter   Fetched Row Trend  meter_pg_fetched_rows_rate The trend of the number of rows fetched by queries in this database. postgres-exporter   Inserted Row Trend  meter_pg_inserted_rows_rate The trend of the number of rows inserted by queries in this database. postgres-exporter   Updated Row Trend  meter_pg_updated_rows_rate The trend of the number of rows updated by queries in this database. postgres-exporter   Deleted Row Trend  meter_pg_deleted_rows_rate The trend of the number of rows deleted by queries in this database. postgres-exporter   Returned Row Trend  meter_pg_returned_rows_rate The trend of the number of rows returned by queries in this database. postgres-exporter   Committed Transactions Trend  meter_pg_committed_transactions_rate The trend of the number of transactions in this database that have been committed postgres-exporter   Rolled Back Transactions Trend  meter_pg_rolled_back_transactions_rate The trend of the number of transactions in this database that have been rolled back postgres-exporter   Buffers Trend  meter_pg_buffers_alloc  meter_pg_buffers_checkpoint meter_pg_buffers_clean meter_pg_buffers_backend_fsync meter_pg_buffers_backend The trend of the number of buffers postgres-exporter   Conflicts Trend  meter_pg_conflicts_rate The trend of the number of queries canceled due to conflicts with recovery in this database postgres-exporter   Deadlock Trend  meter_pg_deadlocks_rate The trend of the number of deadlocks detected in this database postgres-exporter   Cache Hit Rate % meter_pg_cache_hit_rate The rate of cache hit postgres-exporter   Temporary Files Trend  meter_pg_temporary_files_rate The rate of total amount of data written to temporary files by queries in this database. All temporary files are counted, regardless of why the temporary file was created, and regardless of the log_temp_files setting postgres-exporter   Checkpoint Stat Trend  meter_pg_checkpoint_write_time_rate  meter_pg_checkpoint_sync_time_rate  meter_pg_checkpoint_req_rate meter_pg_checkpoint_timed_rate The trend of checkpoint stat postgres-exporter   Active Sessions  meter_pg_active_sessions The number of connections which state is active postgres-exporter   Idle Sessions  meter_pg_idle_sessions The number of connections which state is idle,idle in transaction or idle in transaction (aborted) postgres-exporter   Locks Count  meter_pg_locks_count Number of locks postgres-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/postgresql.yaml. The PostgreSQL dashboard panel configurations are found in /config/ui-initialized-templates/postgresql.\n","title":"PostgreSQL monitoring","url":"/docs/main/v9.2.0/en/setup/backend/backend-postgresql-monitoring/"},{"content":"PostgreSQL monitoring PostgreSQL server performance from postgres-exporter SkyWalking leverages postgres-exporter for collecting metrics data from PostgreSQL. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  postgres-exporter collect metrics data from PostgreSQL. OpenTelemetry Collector fetches metrics from postgres-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via the OpenCensus gRPC Exporter or OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up postgres-exporter. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  PostgreSQL Monitoring PostgreSQL monitoring provides monitoring of the status and resources of the PostgreSQL server.PostgreSQL server as a Service in OAP, and land on the Layer: POSTGRESQL.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Shared Buffers MB meter_pg_shared_buffers The number of shared memory buffers used by the server postgres-exporter   Effective Cache GB meter_pg_effective_cache The planner\u0026rsquo;s assumption about the total size of the data caches postgres-exporter   Maintenance Work Mem MB meter_pg_maintenance_work_mem The maximum memory to be used for maintenance operations postgres-exporter   Seq Page Cost  meter_pg_seq_page_cost The planner\u0026rsquo;s estimate of the cost of a sequentially fetched disk page. postgres-exporter   Random Page Cost  meter_pg_random_page_cost The planner\u0026rsquo;s estimate of the cost of a nonsequentially fetched disk page. postgres-exporter   Max Worker Processes  meter_pg_max_worker_processes Maximum number of concurrent worker processes postgres-exporter   Max WAL Size GB meter_max_wal_size The WAL size that triggers a checkpoint postgres-exporter   Max Parallel Workers  meter_pg_max_parallel_workers The maximum number of parallel processes per executor node postgres-exporter   Work Mem MB meter_pg_max_work_mem The maximum memory to be used for query workspaces. postgres-exporter   Fetched Row Trend  meter_pg_fetched_rows_rate The trend of the number of rows fetched by queries in this database. postgres-exporter   Inserted Row Trend  meter_pg_inserted_rows_rate The trend of the number of rows inserted by queries in this database. postgres-exporter   Updated Row Trend  meter_pg_updated_rows_rate The trend of the number of rows updated by queries in this database. postgres-exporter   Deleted Row Trend  meter_pg_deleted_rows_rate The trend of the number of rows deleted by queries in this database. postgres-exporter   Returned Row Trend  meter_pg_returned_rows_rate The trend of the number of rows returned by queries in this database. postgres-exporter   Committed Transactions Trend  meter_pg_committed_transactions_rate The trend of the number of transactions in this database that have been committed postgres-exporter   Rolled Back Transactions Trend  meter_pg_rolled_back_transactions_rate The trend of the number of transactions in this database that have been rolled back postgres-exporter   Buffers Trend  meter_pg_buffers_alloc  meter_pg_buffers_checkpoint meter_pg_buffers_clean meter_pg_buffers_backend_fsync meter_pg_buffers_backend The trend of the number of buffers postgres-exporter   Conflicts Trend  meter_pg_conflicts_rate The trend of the number of queries canceled due to conflicts with recovery in this database postgres-exporter   Deadlock Trend  meter_pg_deadlocks_rate The trend of the number of deadlocks detected in this database postgres-exporter   Cache Hit Rate % meter_pg_cache_hit_rate The rate of cache hit postgres-exporter   Temporary Files Trend  meter_pg_temporary_files_rate The rate of total amount of data written to temporary files by queries in this database. All temporary files are counted, regardless of why the temporary file was created, and regardless of the log_temp_files setting postgres-exporter   Checkpoint Stat Trend  meter_pg_checkpoint_write_time_rate  meter_pg_checkpoint_sync_time_rate  meter_pg_checkpoint_req_rate meter_pg_checkpoint_timed_rate The trend of checkpoint stat postgres-exporter   Active Sessions  meter_pg_active_sessions The number of connections which state is active postgres-exporter   Idle Sessions  meter_pg_idle_sessions The number of connections which state is idle,idle in transaction or idle in transaction (aborted) postgres-exporter   Locks Count  meter_pg_locks_count Number of locks postgres-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/postgresql.yaml. The PostgreSQL dashboard panel configurations are found in /config/ui-initialized-templates/postgresql.\nCollect sampled slow SQLs SkyWalking leverages fluentbit or other log agents for collecting slow SQL statements from PostgreSQL.\nData flow  fluentbit agent collects slow sql logs from PostgreSQL. fluentbit agent sends data to SkyWalking OAP Server using native log APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Set up fluentbit. Config fluentbit Config PostgreSQL to enable slow log. Example.  Slow SQL Monitoring Slow SQL monitoring provides monitoring of the slow SQL statements of the PostgreSQL server. PostgreSQL Cluster is cataloged as a Layer: POSTGRESQL Service in OAP. Each PostgreSQL server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Slow Statements ms top_n_database_statement The latency and statement of PostgreSQL slow SQLs fluentbit    Customizations You can customize your own metrics/expression/dashboard panel. The slowsql expression rules are found in /config/lal/pgsql-slowsql.yaml The PostgreSQL dashboard panel configurations are found in /config/ui-initialized-templates/postgresql.\n","title":"PostgreSQL monitoring","url":"/docs/main/v9.3.0/en/setup/backend/backend-postgresql-monitoring/"},{"content":"PostgreSQL monitoring PostgreSQL server performance from postgres-exporter SkyWalking leverages postgres-exporter for collecting metrics data from PostgreSQL. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  postgres-exporter collect metrics data from PostgreSQL. OpenTelemetry Collector fetches metrics from postgres-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via the OpenCensus gRPC Exporter or OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up postgres-exporter. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  PostgreSQL Monitoring PostgreSQL monitoring provides monitoring of the status and resources of the PostgreSQL server.PostgreSQL server as a Service in OAP, and land on the Layer: POSTGRESQL.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Shared Buffers MB meter_pg_shared_buffers The number of shared memory buffers used by the server postgres-exporter   Effective Cache GB meter_pg_effective_cache The planner\u0026rsquo;s assumption about the total size of the data caches postgres-exporter   Maintenance Work Mem MB meter_pg_maintenance_work_mem The maximum memory to be used for maintenance operations postgres-exporter   Seq Page Cost  meter_pg_seq_page_cost The planner\u0026rsquo;s estimate of the cost of a sequentially fetched disk page. postgres-exporter   Random Page Cost  meter_pg_random_page_cost The planner\u0026rsquo;s estimate of the cost of a nonsequentially fetched disk page. postgres-exporter   Max Worker Processes  meter_pg_max_worker_processes Maximum number of concurrent worker processes postgres-exporter   Max WAL Size GB meter_max_wal_size The WAL size that triggers a checkpoint postgres-exporter   Max Parallel Workers  meter_pg_max_parallel_workers The maximum number of parallel processes per executor node postgres-exporter   Work Mem MB meter_pg_max_work_mem The maximum memory to be used for query workspaces. postgres-exporter   Fetched Row Trend  meter_pg_fetched_rows_rate The trend of the number of rows fetched by queries in this database. postgres-exporter   Inserted Row Trend  meter_pg_inserted_rows_rate The trend of the number of rows inserted by queries in this database. postgres-exporter   Updated Row Trend  meter_pg_updated_rows_rate The trend of the number of rows updated by queries in this database. postgres-exporter   Deleted Row Trend  meter_pg_deleted_rows_rate The trend of the number of rows deleted by queries in this database. postgres-exporter   Returned Row Trend  meter_pg_returned_rows_rate The trend of the number of rows returned by queries in this database. postgres-exporter   Committed Transactions Trend  meter_pg_committed_transactions_rate The trend of the number of transactions in this database that have been committed postgres-exporter   Rolled Back Transactions Trend  meter_pg_rolled_back_transactions_rate The trend of the number of transactions in this database that have been rolled back postgres-exporter   Buffers Trend  meter_pg_buffers_alloc  meter_pg_buffers_checkpoint meter_pg_buffers_clean meter_pg_buffers_backend_fsync meter_pg_buffers_backend The trend of the number of buffers postgres-exporter   Conflicts Trend  meter_pg_conflicts_rate The trend of the number of queries canceled due to conflicts with recovery in this database postgres-exporter   Deadlock Trend  meter_pg_deadlocks_rate The trend of the number of deadlocks detected in this database postgres-exporter   Cache Hit Rate % meter_pg_cache_hit_rate The rate of cache hit postgres-exporter   Temporary Files Trend  meter_pg_temporary_files_rate The rate of total amount of data written to temporary files by queries in this database. All temporary files are counted, regardless of why the temporary file was created, and regardless of the log_temp_files setting postgres-exporter   Checkpoint Stat Trend  meter_pg_checkpoint_write_time_rate  meter_pg_checkpoint_sync_time_rate  meter_pg_checkpoint_req_rate meter_pg_checkpoint_timed_rate The trend of checkpoint stat postgres-exporter   Active Sessions  meter_pg_active_sessions The number of connections which state is active postgres-exporter   Idle Sessions  meter_pg_idle_sessions The number of connections which state is idle,idle in transaction or idle in transaction (aborted) postgres-exporter   Locks Count  meter_pg_locks_count Number of locks postgres-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/postgresql. The PostgreSQL dashboard panel configurations are found in /config/ui-initialized-templates/postgresql.\nCollect sampled slow SQLs SkyWalking leverages fluentbit or other log agents for collecting slow SQL statements from PostgreSQL.\nData flow  fluentbit agent collects slow sql logs from PostgreSQL. fluentbit agent sends data to SkyWalking OAP Server using native log APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Set up fluentbit. Config fluentbit Config PostgreSQL to enable slow log. Example.  Slow SQL Monitoring Slow SQL monitoring provides monitoring of the slow SQL statements of the PostgreSQL server. PostgreSQL Cluster is cataloged as a Layer: POSTGRESQL Service in OAP. Each PostgreSQL server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Slow Statements ms top_n_database_statement The latency and statement of PostgreSQL slow SQLs fluentbit    Customizations You can customize your own metrics/expression/dashboard panel. The slowsql expression rules are found in /config/lal/pgsql-slowsql.yaml The PostgreSQL dashboard panel configurations are found in /config/ui-initialized-templates/postgresql.\n","title":"PostgreSQL monitoring","url":"/docs/main/v9.4.0/en/setup/backend/backend-postgresql-monitoring/"},{"content":"PostgreSQL monitoring PostgreSQL server performance from postgres-exporter SkyWalking leverages postgres-exporter for collecting metrics data from PostgreSQL. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  postgres-exporter collect metrics data from PostgreSQL. OpenTelemetry Collector fetches metrics from postgres-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up postgres-exporter. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  PostgreSQL Monitoring PostgreSQL cluster is cataloged as a Layer: PostgreSQL Service in OAP. Each PostgreSQL server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Shared Buffers MB meter_pg_shared_buffers The number of shared memory buffers used by the server postgres-exporter   Effective Cache GB meter_pg_effective_cache The planner\u0026rsquo;s assumption about the total size of the data caches postgres-exporter   Maintenance Work Mem MB meter_pg_maintenance_work_mem The maximum memory to be used for maintenance operations postgres-exporter   Seq Page Cost  meter_pg_seq_page_cost The planner\u0026rsquo;s estimate of the cost of a sequentially fetched disk page. postgres-exporter   Random Page Cost  meter_pg_random_page_cost The planner\u0026rsquo;s estimate of the cost of a nonsequentially fetched disk page. postgres-exporter   Max Worker Processes  meter_pg_max_worker_processes Maximum number of concurrent worker processes postgres-exporter   Max WAL Size GB meter_max_wal_size The WAL size that triggers a checkpoint postgres-exporter   Max Parallel Workers  meter_pg_max_parallel_workers The maximum number of parallel processes per executor node postgres-exporter   Work Mem MB meter_pg_max_work_mem The maximum memory to be used for query workspaces. postgres-exporter   Fetched Row Trend  meter_pg_fetched_rows_rate The trend of the number of rows fetched by queries in this database. postgres-exporter   Inserted Row Trend  meter_pg_inserted_rows_rate The trend of the number of rows inserted by queries in this database. postgres-exporter   Updated Row Trend  meter_pg_updated_rows_rate The trend of the number of rows updated by queries in this database. postgres-exporter   Deleted Row Trend  meter_pg_deleted_rows_rate The trend of the number of rows deleted by queries in this database. postgres-exporter   Returned Row Trend  meter_pg_returned_rows_rate The trend of the number of rows returned by queries in this database. postgres-exporter   Committed Transactions Trend  meter_pg_committed_transactions_rate The trend of the number of transactions in this database that have been committed postgres-exporter   Rolled Back Transactions Trend  meter_pg_rolled_back_transactions_rate The trend of the number of transactions in this database that have been rolled back postgres-exporter   Buffers Trend  meter_pg_buffers_alloc  meter_pg_buffers_checkpoint meter_pg_buffers_clean meter_pg_buffers_backend_fsync meter_pg_buffers_backend The trend of the number of buffers postgres-exporter   Conflicts Trend  meter_pg_conflicts_rate The trend of the number of queries canceled due to conflicts with recovery in this database postgres-exporter   Deadlock Trend  meter_pg_deadlocks_rate The trend of the number of deadlocks detected in this database postgres-exporter   Cache Hit Rate % meter_pg_cache_hit_rate The rate of cache hit postgres-exporter   Temporary Files Trend  meter_pg_temporary_files_rate The rate of total amount of data written to temporary files by queries in this database. All temporary files are counted, regardless of why the temporary file was created, and regardless of the log_temp_files setting postgres-exporter   Checkpoint Stat Trend  meter_pg_checkpoint_write_time_rate  meter_pg_checkpoint_sync_time_rate  meter_pg_checkpoint_req_rate meter_pg_checkpoint_timed_rate The trend of checkpoint stat postgres-exporter   Active Sessions  meter_pg_active_sessions The number of connections which state is active postgres-exporter   Idle Sessions  meter_pg_idle_sessions The number of connections which state is idle,idle in transaction or idle in transaction (aborted) postgres-exporter   Locks Count  meter_pg_locks_count Number of locks postgres-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/postgresql. The PostgreSQL dashboard panel configurations are found in /config/ui-initialized-templates/postgresql.\nCollect sampled slow SQLs SkyWalking leverages fluentbit or other log agents for collecting slow SQL statements from PostgreSQL.\nData flow  fluentbit agent collects slow sql logs from PostgreSQL. fluentbit agent sends data to SkyWalking OAP Server using native log APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Set up fluentbit. Config fluentbit Config PostgreSQL to enable slow log. Example.  Slow SQL Monitoring Slow SQL monitoring provides monitoring of the slow SQL statements of the PostgreSQL server. PostgreSQL Cluster is cataloged as a Layer: POSTGRESQL Service in OAP. Each PostgreSQL server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Slow Statements ms top_n_database_statement The latency and statement of PostgreSQL slow SQLs fluentbit    Customizations You can customize your own metrics/expression/dashboard panel. The slowsql expression rules are found in /config/lal/pgsql-slowsql.yaml The PostgreSQL dashboard panel configurations are found in /config/ui-initialized-templates/postgresql.\n","title":"PostgreSQL monitoring","url":"/docs/main/v9.5.0/en/setup/backend/backend-postgresql-monitoring/"},{"content":"PostgreSQL monitoring PostgreSQL server performance from postgres-exporter SkyWalking leverages postgres-exporter for collecting metrics data from PostgreSQL. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  postgres-exporter collect metrics data from PostgreSQL. OpenTelemetry Collector fetches metrics from postgres-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up postgres-exporter. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  PostgreSQL Monitoring PostgreSQL cluster is cataloged as a Layer: PostgreSQL Service in OAP. Each PostgreSQL server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Shared Buffers MB meter_pg_shared_buffers The number of shared memory buffers used by the server postgres-exporter   Effective Cache GB meter_pg_effective_cache The planner\u0026rsquo;s assumption about the total size of the data caches postgres-exporter   Maintenance Work Mem MB meter_pg_maintenance_work_mem The maximum memory to be used for maintenance operations postgres-exporter   Seq Page Cost  meter_pg_seq_page_cost The planner\u0026rsquo;s estimate of the cost of a sequentially fetched disk page. postgres-exporter   Random Page Cost  meter_pg_random_page_cost The planner\u0026rsquo;s estimate of the cost of a nonsequentially fetched disk page. postgres-exporter   Max Worker Processes  meter_pg_max_worker_processes Maximum number of concurrent worker processes postgres-exporter   Max WAL Size GB meter_max_wal_size The WAL size that triggers a checkpoint postgres-exporter   Max Parallel Workers  meter_pg_max_parallel_workers The maximum number of parallel processes per executor node postgres-exporter   Work Mem MB meter_pg_max_work_mem The maximum memory to be used for query workspaces. postgres-exporter   Fetched Row Trend  meter_pg_fetched_rows_rate The trend of the number of rows fetched by queries in this database. postgres-exporter   Inserted Row Trend  meter_pg_inserted_rows_rate The trend of the number of rows inserted by queries in this database. postgres-exporter   Updated Row Trend  meter_pg_updated_rows_rate The trend of the number of rows updated by queries in this database. postgres-exporter   Deleted Row Trend  meter_pg_deleted_rows_rate The trend of the number of rows deleted by queries in this database. postgres-exporter   Returned Row Trend  meter_pg_returned_rows_rate The trend of the number of rows returned by queries in this database. postgres-exporter   Committed Transactions Trend  meter_pg_committed_transactions_rate The trend of the number of transactions in this database that have been committed postgres-exporter   Rolled Back Transactions Trend  meter_pg_rolled_back_transactions_rate The trend of the number of transactions in this database that have been rolled back postgres-exporter   Buffers Trend  meter_pg_buffers_alloc  meter_pg_buffers_checkpoint meter_pg_buffers_clean meter_pg_buffers_backend_fsync meter_pg_buffers_backend The trend of the number of buffers postgres-exporter   Conflicts Trend  meter_pg_conflicts_rate The trend of the number of queries canceled due to conflicts with recovery in this database postgres-exporter   Deadlock Trend  meter_pg_deadlocks_rate The trend of the number of deadlocks detected in this database postgres-exporter   Cache Hit Rate % meter_pg_cache_hit_rate The rate of cache hit postgres-exporter   Temporary Files Trend  meter_pg_temporary_files_rate The rate of total amount of data written to temporary files by queries in this database. All temporary files are counted, regardless of why the temporary file was created, and regardless of the log_temp_files setting postgres-exporter   Checkpoint Stat Trend  meter_pg_checkpoint_write_time_rate  meter_pg_checkpoint_sync_time_rate  meter_pg_checkpoint_req_rate meter_pg_checkpoint_timed_rate The trend of checkpoint stat postgres-exporter   Active Sessions  meter_pg_active_sessions The number of connections which state is active postgres-exporter   Idle Sessions  meter_pg_idle_sessions The number of connections which state is idle,idle in transaction or idle in transaction (aborted) postgres-exporter   Locks Count  meter_pg_locks_count Number of locks postgres-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/postgresql. The PostgreSQL dashboard panel configurations are found in /config/ui-initialized-templates/postgresql.\nCollect sampled slow SQLs SkyWalking leverages fluentbit or other log agents for collecting slow SQL statements from PostgreSQL.\nData flow  fluentbit agent collects slow sql logs from PostgreSQL. fluentbit agent sends data to SkyWalking OAP Server using native log APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Set up fluentbit. Config fluentbit Config PostgreSQL to enable slow log. Example.  Slow SQL Monitoring Slow SQL monitoring provides monitoring of the slow SQL statements of the PostgreSQL server. PostgreSQL Cluster is cataloged as a Layer: POSTGRESQL Service in OAP. Each PostgreSQL server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Slow Statements ms top_n_database_statement The latency and statement of PostgreSQL slow SQLs fluentbit    Customizations You can customize your own metrics/expression/dashboard panel. The slowsql expression rules are found in /config/lal/pgsql-slowsql.yaml The PostgreSQL dashboard panel configurations are found in /config/ui-initialized-templates/postgresql.\n","title":"PostgreSQL monitoring","url":"/docs/main/v9.6.0/en/setup/backend/backend-postgresql-monitoring/"},{"content":"PostgreSQL monitoring PostgreSQL server performance from postgres-exporter SkyWalking leverages postgres-exporter for collecting metrics data from PostgreSQL. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  postgres-exporter collect metrics data from PostgreSQL. OpenTelemetry Collector fetches metrics from postgres-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up postgres-exporter. Set up OpenTelemetry Collector . For details on Prometheus Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  PostgreSQL Monitoring PostgreSQL cluster is cataloged as a Layer: PostgreSQL Service in OAP. Each PostgreSQL server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Shared Buffers MB meter_pg_shared_buffers The number of shared memory buffers used by the server postgres-exporter   Effective Cache GB meter_pg_effective_cache The planner\u0026rsquo;s assumption about the total size of the data caches postgres-exporter   Maintenance Work Mem MB meter_pg_maintenance_work_mem The maximum memory to be used for maintenance operations postgres-exporter   Seq Page Cost  meter_pg_seq_page_cost The planner\u0026rsquo;s estimate of the cost of a sequentially fetched disk page. postgres-exporter   Random Page Cost  meter_pg_random_page_cost The planner\u0026rsquo;s estimate of the cost of a nonsequentially fetched disk page. postgres-exporter   Max Worker Processes  meter_pg_max_worker_processes Maximum number of concurrent worker processes postgres-exporter   Max WAL Size GB meter_max_wal_size The WAL size that triggers a checkpoint postgres-exporter   Max Parallel Workers  meter_pg_max_parallel_workers The maximum number of parallel processes per executor node postgres-exporter   Work Mem MB meter_pg_max_work_mem The maximum memory to be used for query workspaces. postgres-exporter   Fetched Row Trend  meter_pg_fetched_rows_rate The trend of the number of rows fetched by queries in this database. postgres-exporter   Inserted Row Trend  meter_pg_inserted_rows_rate The trend of the number of rows inserted by queries in this database. postgres-exporter   Updated Row Trend  meter_pg_updated_rows_rate The trend of the number of rows updated by queries in this database. postgres-exporter   Deleted Row Trend  meter_pg_deleted_rows_rate The trend of the number of rows deleted by queries in this database. postgres-exporter   Returned Row Trend  meter_pg_returned_rows_rate The trend of the number of rows returned by queries in this database. postgres-exporter   Committed Transactions Trend  meter_pg_committed_transactions_rate The trend of the number of transactions in this database that have been committed postgres-exporter   Rolled Back Transactions Trend  meter_pg_rolled_back_transactions_rate The trend of the number of transactions in this database that have been rolled back postgres-exporter   Buffers Trend  meter_pg_buffers_alloc  meter_pg_buffers_checkpoint meter_pg_buffers_clean meter_pg_buffers_backend_fsync meter_pg_buffers_backend The trend of the number of buffers postgres-exporter   Conflicts Trend  meter_pg_conflicts_rate The trend of the number of queries canceled due to conflicts with recovery in this database postgres-exporter   Deadlock Trend  meter_pg_deadlocks_rate The trend of the number of deadlocks detected in this database postgres-exporter   Cache Hit Rate % meter_pg_cache_hit_rate The rate of cache hit postgres-exporter   Temporary Files Trend  meter_pg_temporary_files_rate The rate of total amount of data written to temporary files by queries in this database. All temporary files are counted, regardless of why the temporary file was created, and regardless of the log_temp_files setting postgres-exporter   Checkpoint Stat Trend  meter_pg_checkpoint_write_time_rate  meter_pg_checkpoint_sync_time_rate  meter_pg_checkpoint_req_rate meter_pg_checkpoint_timed_rate The trend of checkpoint stat postgres-exporter   Active Sessions  meter_pg_active_sessions The number of connections which state is active postgres-exporter   Idle Sessions  meter_pg_idle_sessions The number of connections which state is idle,idle in transaction or idle in transaction (aborted) postgres-exporter   Locks Count  meter_pg_locks_count Number of locks postgres-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/postgresql. The PostgreSQL dashboard panel configurations are found in /config/ui-initialized-templates/postgresql.\nCollect sampled slow SQLs SkyWalking leverages fluentbit or other log agents for collecting slow SQL statements from PostgreSQL.\nData flow  fluentbit agent collects slow sql logs from PostgreSQL. fluentbit agent sends data to SkyWalking OAP Server using native log APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Set up fluentbit. Config fluentbit Config PostgreSQL to enable slow log. Example.  Slow SQL Monitoring Slow SQL monitoring provides monitoring of the slow SQL statements of the PostgreSQL server. PostgreSQL Cluster is cataloged as a Layer: POSTGRESQL Service in OAP. Each PostgreSQL server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Slow Statements ms top_n_database_statement The latency and statement of PostgreSQL slow SQLs fluentbit    Customizations You can customize your own metrics/expression/dashboard panel. The slowsql expression rules are found in /config/lal/pgsql-slowsql.yaml The PostgreSQL dashboard panel configurations are found in /config/ui-initialized-templates/postgresql.\n","title":"PostgreSQL monitoring","url":"/docs/main/v9.7.0/en/setup/backend/backend-postgresql-monitoring/"},{"content":"Probe Introduction In SkyWalking, probe means an agent or SDK library integrated into a target system that takes charge of collecting telemetry data, including tracing and metrics. Depending on the target system tech stack, there are very different ways how the probe performs such tasks. But ultimately, they all work towards the same goal — to collect and reformat data, and then to send them to the backend.\nOn a high level, there are four typical categories in all SkyWalking probes.\n  Language based native agent. These agents run in target service user spaces, such as a part of user codes. For example, the SkyWalking Java agent uses the -javaagent command line argument to manipulate codes in runtime, where manipulate means to change and inject user\u0026rsquo;s codes. Another example is SkyWalking agent, which leverage Golang compiling mechanism to weaves codes in the compiling time. For some static compilation languages, such as C++, manual library is the only choice. As you can see, these agents are based on languages and libraries, no matter we provide auto instrument or manual agents.\n  Service Mesh probes. Service Mesh probes collect data from sidecar, control plane in service mesh or proxy. In the old days, proxy is only used as an ingress of the whole cluster, but with the Service Mesh and sidecar, we can now perform observability functions.\n  3rd-party instrument library. SkyWalking accepts many widely used instrument libraries data formats. SkyWalking community is connected closely with Zipkin community, it could work as an alternative server for both v1 and v2 Zipkin traces. Also, OTEL trace format in gRPC is supported, and converted to Zipkin format inside SkyWalking. As an alternative Zipkin server, Zipkin lens UI could be used to visualize accepted traces when they are in Zipkin format. See Receiver for Zipkin traces and Receiver for OTEL traces for more information.\n  eBPF agent. The eBPF agent collects metrics and profiling the target service powered by the eBPF technology of Linux kernel.\n  You don\u0026rsquo;t have to install all probes to make SkyWalking up and running. There are several recommended ways on how to use these probes:\n Use Language based native agent only to build topology and metrics for your business application. Use 3rd-party instrument library only, like the Zipkin instrument ecosystem. Use Service Mesh probe if you prefer Service Mesh stack and don\u0026rsquo;t want to use native agents. Use Service Mesh probe with Language based native agent or 3rd-party instrument library in pure tracing status. (Advanced usage) Use eBPF agent only if you only want to profile on demand and/or activating automatic performance analysis. Use eBPF agent with Language based native agent collaboratively. Enhance the traces with the eBPF agent to collect extra information.  What is the meaning of in tracing status?\nBy default, Language based native agent and 3rd-party instrument library both send distributed traces to the backend, where analyses and aggregation on those traces are performed. In pure tracing status means that the backend considers these traces as something like logs. In other words, the backend saves them, but doesn\u0026rsquo;t run the metrics analysis from traces. As a result, there would not have data of service/instance/endpoint metrics and relationships.\nWhat is next?  Learn more about the probes supported by SkyWalking in Service auto instrument agent , Manual instrument SDK and Zipkin receiver. After understanding how the probe works, see the backend overview for more on analysis and persistence.  ","title":"Probe Introduction","url":"/docs/main/latest/en/concepts-and-designs/probe-introduction/"},{"content":"Probe Introduction In SkyWalking, probe means an agent or SDK library integrated into a target system that takes charge of collecting telemetry data, including tracing and metrics. Depending on the target system tech stack, there are very different ways how the probe performs such tasks. But ultimately, they all work towards the same goal — to collect and reformat data, and then to send them to the backend.\nOn a high level, there are four typical categories in all SkyWalking probes.\n  Language based native agent. These agents run in target service user spaces, such as a part of user codes. For example, the SkyWalking Java agent uses the -javaagent command line argument to manipulate codes in runtime, where manipulate means to change and inject user\u0026rsquo;s codes. Another example is SkyWalking agent, which leverage Golang compiling mechanism to weaves codes in the compiling time. For some static compilation languages, such as C++, manual library is the only choice. As you can see, these agents are based on languages and libraries, no matter we provide auto instrument or manual agents.\n  Service Mesh probes. Service Mesh probes collect data from sidecar, control plane in service mesh or proxy. In the old days, proxy is only used as an ingress of the whole cluster, but with the Service Mesh and sidecar, we can now perform observability functions.\n  3rd-party instrument library. SkyWalking accepts many widely used instrument libraries data formats. SkyWalking community is connected closely with Zipkin community, it could work as an alternative server for both v1 and v2 Zipkin traces. Also, OTEL trace format in gRPC is supported, and converted to Zipkin format inside SkyWalking. As an alternative Zipkin server, Zipkin lens UI could be used to visualize accepted traces when they are in Zipkin format. See Receiver for Zipkin traces and Receiver for OTEL traces for more information.\n  eBPF agent. The eBPF agent collects metrics and profiling the target service powered by the eBPF technology of Linux kernel.\n  You don\u0026rsquo;t have to install all probes to make SkyWalking up and running. There are several recommended ways on how to use these probes:\n Use Language based native agent only to build topology and metrics for your business application. Use 3rd-party instrument library only, like the Zipkin instrument ecosystem. Use Service Mesh probe if you prefer Service Mesh stack and don\u0026rsquo;t want to use native agents. Use Service Mesh probe with Language based native agent or 3rd-party instrument library in pure tracing status. (Advanced usage) Use eBPF agent only if you only want to profile on demand and/or activating automatic performance analysis. Use eBPF agent with Language based native agent collaboratively. Enhance the traces with the eBPF agent to collect extra information.  What is the meaning of in tracing status?\nBy default, Language based native agent and 3rd-party instrument library both send distributed traces to the backend, where analyses and aggregation on those traces are performed. In pure tracing status means that the backend considers these traces as something like logs. In other words, the backend saves them, but doesn\u0026rsquo;t run the metrics analysis from traces. As a result, there would not have data of service/instance/endpoint metrics and relationships.\nWhat is next?  Learn more about the probes supported by SkyWalking in Service auto instrument agent , Manual instrument SDK and Zipkin receiver. After understanding how the probe works, see the backend overview for more on analysis and persistence.  ","title":"Probe Introduction","url":"/docs/main/next/en/concepts-and-designs/probe-introduction/"},{"content":"Probe Introduction In SkyWalking, probe means an agent or SDK library integrated into a target system that takes charge of collecting telemetry data, including tracing and metrics. Depending on the target system tech stack, there are very different ways how the probe performs such tasks. But ultimately, they all work towards the same goal — to collect and reformat data, and then to send them to the backend.\nOn a high level, there are three typical categories in all SkyWalking probes.\n  Language based native agent. These agents run in target service user spaces, such as a part of user codes. For example, the SkyWalking Java agent uses the -javaagent command line argument to manipulate codes in runtime, where manipulate means to change and inject user\u0026rsquo;s codes. Another kind of agents uses certain hook or intercept mechanism provided by target libraries. As you can see, these agents are based on languages and libraries.\n  Service Mesh probes. Service Mesh probes collect data from sidecar, control plane in service mesh or proxy. In the old days, proxy is only used as an ingress of the whole cluster, but with the Service Mesh and sidecar, we can now perform observability functions.\n  3rd-party instrument library. SkyWalking accepts many widely used instrument libraries data formats. It analyzes the data, transfers it to SkyWalking\u0026rsquo;s formats of trace, metrics or both. This feature starts with accepting Zipkin span data. See Receiver for Zipkin traces for more information.\n  You don\u0026rsquo;t need to use Language based native agent and Service Mesh probe at the same time, since they both serve to collect metrics data. Otherwise, your system will suffer twice the payload, and the analytic numbers will be doubled.\nThere are several recommended ways on how to use these probes:\n Use Language based native agent only. Use 3rd-party instrument library only, like the Zipkin instrument ecosystem. Use Service Mesh probe only. Use Service Mesh probe with Language based native agent or 3rd-party instrument library in tracing status. (Advanced usage)  What is the meaning of in tracing status?\nBy default, Language based native agent and 3rd-party instrument library both send distributed traces to the backend, where analyses and aggregation on those traces are performed. In tracing status means that the backend considers these traces as something like logs. In other words, the backend saves them, and builds the links between traces and metrics, like which endpoint and service does the trace belong?.\nWhat is next?  Learn more about the probes supported by SkyWalking in Service auto instrument agent , Manual instrument SDK and Zipkin receiver. After understanding how the probe works, see the backend overview for more on analysis and persistence.  ","title":"Probe Introduction","url":"/docs/main/v9.0.0/en/concepts-and-designs/probe-introduction/"},{"content":"Probe Introduction In SkyWalking, probe means an agent or SDK library integrated into a target system that takes charge of collecting telemetry data, including tracing and metrics. Depending on the target system tech stack, there are very different ways how the probe performs such tasks. But ultimately, they all work towards the same goal — to collect and reformat data, and then to send them to the backend.\nOn a high level, there are four typical categories in all SkyWalking probes.\n  Language based native agent. These agents run in target service user spaces, such as a part of user codes. For example, the SkyWalking Java agent uses the -javaagent command line argument to manipulate codes in runtime, where manipulate means to change and inject user\u0026rsquo;s codes. Another kind of agents uses certain hook or intercept mechanism provided by target libraries. As you can see, these agents are based on languages and libraries.\n  Service Mesh probes. Service Mesh probes collect data from sidecar, control plane in service mesh or proxy. In the old days, proxy is only used as an ingress of the whole cluster, but with the Service Mesh and sidecar, we can now perform observability functions.\n  3rd-party instrument library. SkyWalking accepts many widely used instrument libraries data formats. It analyzes the data, transfers it to SkyWalking\u0026rsquo;s formats of trace, metrics or both. This feature starts with accepting Zipkin span data. See Receiver for Zipkin traces for more information.\n  eBPF agent. The eBPF agent collects metrics and proifiling the target service powered by the eBPF technology of Linux kernel.\n  You don\u0026rsquo;t need to use Language based native agent and Service Mesh probe at the same time, since they both serve to collect metrics data. Otherwise, your system will suffer twice the payload, and the analytic numbers will be doubled.\nThere are several recommended ways on how to use these probes:\n Use Language based native agent only. Use 3rd-party instrument library only, like the Zipkin instrument ecosystem. Use Service Mesh probe only. Use Service Mesh probe with Language based native agent or 3rd-party instrument library in tracing status. (Advanced usage) Use eBPF agent only. Use eBPF agent with Language based native agent collaboratively.  What is the meaning of in tracing status?\nBy default, Language based native agent and 3rd-party instrument library both send distributed traces to the backend, where analyses and aggregation on those traces are performed. In tracing status means that the backend considers these traces as something like logs. In other words, the backend saves them, and builds the links between traces and metrics, like which endpoint and service does the trace belong?.\nWhat is next?  Learn more about the probes supported by SkyWalking in Service auto instrument agent , Manual instrument SDK and Zipkin receiver. After understanding how the probe works, see the backend overview for more on analysis and persistence.  ","title":"Probe Introduction","url":"/docs/main/v9.1.0/en/concepts-and-designs/probe-introduction/"},{"content":"Probe Introduction In SkyWalking, probe means an agent or SDK library integrated into a target system that takes charge of collecting telemetry data, including tracing and metrics. Depending on the target system tech stack, there are very different ways how the probe performs such tasks. But ultimately, they all work towards the same goal — to collect and reformat data, and then to send them to the backend.\nOn a high level, there are four typical categories in all SkyWalking probes.\n  Language based native agent. These agents run in target service user spaces, such as a part of user codes. For example, the SkyWalking Java agent uses the -javaagent command line argument to manipulate codes in runtime, where manipulate means to change and inject user\u0026rsquo;s codes. Another kind of agents uses certain hook or intercept mechanism provided by target libraries. As you can see, these agents are based on languages and libraries.\n  Service Mesh probes. Service Mesh probes collect data from sidecar, control plane in service mesh or proxy. In the old days, proxy is only used as an ingress of the whole cluster, but with the Service Mesh and sidecar, we can now perform observability functions.\n  3rd-party instrument library. SkyWalking accepts many widely used instrument libraries data formats. It analyzes the data, transfers it to SkyWalking\u0026rsquo;s formats of trace, metrics or both. This feature starts with accepting Zipkin span data. See Receiver for Zipkin traces for more information.\n  eBPF agent. The eBPF agent collects metrics and profiling the target service powered by the eBPF technology of Linux kernel.\n  You don\u0026rsquo;t need to use Language based native agent and Service Mesh probe at the same time, since they both serve to collect metrics data. Otherwise, your system will suffer twice the payload, and the analytic numbers will be doubled.\nThere are several recommended ways on how to use these probes:\n Use Language based native agent only. Use 3rd-party instrument library only, like the Zipkin instrument ecosystem. Use Service Mesh probe only. Use Service Mesh probe with Language based native agent or 3rd-party instrument library in tracing status. (Advanced usage) Use eBPF agent only. Use eBPF agent with Language based native agent collaboratively.  What is the meaning of in tracing status?\nBy default, Language based native agent and 3rd-party instrument library both send distributed traces to the backend, where analyses and aggregation on those traces are performed. In tracing status means that the backend considers these traces as something like logs. In other words, the backend saves them, and builds the links between traces and metrics, like which endpoint and service does the trace belong?.\nWhat is next?  Learn more about the probes supported by SkyWalking in Service auto instrument agent , Manual instrument SDK and Zipkin receiver. After understanding how the probe works, see the backend overview for more on analysis and persistence.  ","title":"Probe Introduction","url":"/docs/main/v9.2.0/en/concepts-and-designs/probe-introduction/"},{"content":"Probe Introduction In SkyWalking, probe means an agent or SDK library integrated into a target system that takes charge of collecting telemetry data, including tracing and metrics. Depending on the target system tech stack, there are very different ways how the probe performs such tasks. But ultimately, they all work towards the same goal — to collect and reformat data, and then to send them to the backend.\nOn a high level, there are four typical categories in all SkyWalking probes.\n  Language based native agent. These agents run in target service user spaces, such as a part of user codes. For example, the SkyWalking Java agent uses the -javaagent command line argument to manipulate codes in runtime, where manipulate means to change and inject user\u0026rsquo;s codes. Another kind of agents uses certain hook or intercept mechanism provided by target libraries. As you can see, these agents are based on languages and libraries.\n  Service Mesh probes. Service Mesh probes collect data from sidecar, control plane in service mesh or proxy. In the old days, proxy is only used as an ingress of the whole cluster, but with the Service Mesh and sidecar, we can now perform observability functions.\n  3rd-party instrument library. SkyWalking accepts many widely used instrument libraries data formats. It analyzes the data, transfers it to SkyWalking\u0026rsquo;s formats of trace, metrics or both. This feature starts with accepting Zipkin span data. See Receiver for Zipkin traces for more information.\n  eBPF agent. The eBPF agent collects metrics and profiling the target service powered by the eBPF technology of Linux kernel.\n  You don\u0026rsquo;t need to use Language based native agent and Service Mesh probe at the same time, since they both serve to collect metrics data. Otherwise, your system will suffer twice the payload, and the analytic numbers will be doubled.\nThere are several recommended ways on how to use these probes:\n Use Language based native agent only. Use 3rd-party instrument library only, like the Zipkin instrument ecosystem. Use Service Mesh probe only. Use Service Mesh probe with Language based native agent or 3rd-party instrument library in tracing status. (Advanced usage) Use eBPF agent only. Use eBPF agent with Language based native agent collaboratively.  What is the meaning of in tracing status?\nBy default, Language based native agent and 3rd-party instrument library both send distributed traces to the backend, where analyses and aggregation on those traces are performed. In tracing status means that the backend considers these traces as something like logs. In other words, the backend saves them, and builds the links between traces and metrics, like which endpoint and service does the trace belong?.\nWhat is next?  Learn more about the probes supported by SkyWalking in Service auto instrument agent , Manual instrument SDK and Zipkin receiver. After understanding how the probe works, see the backend overview for more on analysis and persistence.  ","title":"Probe Introduction","url":"/docs/main/v9.3.0/en/concepts-and-designs/probe-introduction/"},{"content":"Probe Introduction In SkyWalking, probe means an agent or SDK library integrated into a target system that takes charge of collecting telemetry data, including tracing and metrics. Depending on the target system tech stack, there are very different ways how the probe performs such tasks. But ultimately, they all work towards the same goal — to collect and reformat data, and then to send them to the backend.\nOn a high level, there are four typical categories in all SkyWalking probes.\n  Language based native agent. These agents run in target service user spaces, such as a part of user codes. For example, the SkyWalking Java agent uses the -javaagent command line argument to manipulate codes in runtime, where manipulate means to change and inject user\u0026rsquo;s codes. Another kind of agents uses certain hook or intercept mechanism provided by target libraries. As you can see, these agents are based on languages and libraries.\n  Service Mesh probes. Service Mesh probes collect data from sidecar, control plane in service mesh or proxy. In the old days, proxy is only used as an ingress of the whole cluster, but with the Service Mesh and sidecar, we can now perform observability functions.\n  3rd-party instrument library. SkyWalking accepts many widely used instrument libraries data formats. It analyzes the data, transfers it to SkyWalking\u0026rsquo;s formats of trace, metrics or both. This feature starts with accepting Zipkin span data. See Receiver for Zipkin traces for more information.\n  eBPF agent. The eBPF agent collects metrics and profiling the target service powered by the eBPF technology of Linux kernel.\n  You don\u0026rsquo;t need to use Language based native agent and Service Mesh probe at the same time, since they both serve to collect metrics data. Otherwise, your system will suffer twice the payload, and the analytic numbers will be doubled.\nThere are several recommended ways on how to use these probes:\n Use Language based native agent only. Use 3rd-party instrument library only, like the Zipkin instrument ecosystem. Use Service Mesh probe only. Use Service Mesh probe with Language based native agent or 3rd-party instrument library in tracing status. (Advanced usage) Use eBPF agent only. Use eBPF agent with Language based native agent collaboratively.  What is the meaning of in tracing status?\nBy default, Language based native agent and 3rd-party instrument library both send distributed traces to the backend, where analyses and aggregation on those traces are performed. In tracing status means that the backend considers these traces as something like logs. In other words, the backend saves them, and builds the links between traces and metrics, like which endpoint and service does the trace belong?.\nWhat is next?  Learn more about the probes supported by SkyWalking in Service auto instrument agent , Manual instrument SDK and Zipkin receiver. After understanding how the probe works, see the backend overview for more on analysis and persistence.  ","title":"Probe Introduction","url":"/docs/main/v9.4.0/en/concepts-and-designs/probe-introduction/"},{"content":"Probe Introduction In SkyWalking, probe means an agent or SDK library integrated into a target system that takes charge of collecting telemetry data, including tracing and metrics. Depending on the target system tech stack, there are very different ways how the probe performs such tasks. But ultimately, they all work towards the same goal — to collect and reformat data, and then to send them to the backend.\nOn a high level, there are four typical categories in all SkyWalking probes.\n  Language based native agent. These agents run in target service user spaces, such as a part of user codes. For example, the SkyWalking Java agent uses the -javaagent command line argument to manipulate codes in runtime, where manipulate means to change and inject user\u0026rsquo;s codes. Another kind of agents uses certain hook or intercept mechanism provided by target libraries. As you can see, these agents are based on languages and libraries.\n  Service Mesh probes. Service Mesh probes collect data from sidecar, control plane in service mesh or proxy. In the old days, proxy is only used as an ingress of the whole cluster, but with the Service Mesh and sidecar, we can now perform observability functions.\n  3rd-party instrument library. SkyWalking accepts many widely used instrument libraries data formats. It analyzes the data, transfers it to SkyWalking\u0026rsquo;s formats of trace, metrics or both. This feature starts with accepting Zipkin span data. See Receiver for Zipkin traces for more information.\n  eBPF agent. The eBPF agent collects metrics and profiling the target service powered by the eBPF technology of Linux kernel.\n  You don\u0026rsquo;t need to use Language based native agent and Service Mesh probe at the same time, since they both serve to collect metrics data. Otherwise, your system will suffer twice the payload, and the analytic numbers will be doubled.\nThere are several recommended ways on how to use these probes:\n Use Language based native agent only. Use 3rd-party instrument library only, like the Zipkin instrument ecosystem. Use Service Mesh probe only. Use Service Mesh probe with Language based native agent or 3rd-party instrument library in tracing status. (Advanced usage) Use eBPF agent only. Use eBPF agent with Language based native agent collaboratively.  What is the meaning of in tracing status?\nBy default, Language based native agent and 3rd-party instrument library both send distributed traces to the backend, where analyses and aggregation on those traces are performed. In tracing status means that the backend considers these traces as something like logs. In other words, the backend saves them, and builds the links between traces and metrics, like which endpoint and service does the trace belong?.\nWhat is next?  Learn more about the probes supported by SkyWalking in Service auto instrument agent , Manual instrument SDK and Zipkin receiver. After understanding how the probe works, see the backend overview for more on analysis and persistence.  ","title":"Probe Introduction","url":"/docs/main/v9.5.0/en/concepts-and-designs/probe-introduction/"},{"content":"Probe Introduction In SkyWalking, probe means an agent or SDK library integrated into a target system that takes charge of collecting telemetry data, including tracing and metrics. Depending on the target system tech stack, there are very different ways how the probe performs such tasks. But ultimately, they all work towards the same goal — to collect and reformat data, and then to send them to the backend.\nOn a high level, there are four typical categories in all SkyWalking probes.\n  Language based native agent. These agents run in target service user spaces, such as a part of user codes. For example, the SkyWalking Java agent uses the -javaagent command line argument to manipulate codes in runtime, where manipulate means to change and inject user\u0026rsquo;s codes. Another kind of agents uses certain hook or intercept mechanism provided by target libraries. As you can see, these agents are based on languages and libraries.\n  Service Mesh probes. Service Mesh probes collect data from sidecar, control plane in service mesh or proxy. In the old days, proxy is only used as an ingress of the whole cluster, but with the Service Mesh and sidecar, we can now perform observability functions.\n  3rd-party instrument library. SkyWalking accepts many widely used instrument libraries data formats. It analyzes the data, transfers it to SkyWalking\u0026rsquo;s formats of trace, metrics or both. This feature starts with accepting Zipkin span data. See Receiver for Zipkin traces for more information.\n  eBPF agent. The eBPF agent collects metrics and profiling the target service powered by the eBPF technology of Linux kernel.\n  You don\u0026rsquo;t need to use Language based native agent and Service Mesh probe at the same time, since they both serve to collect metrics data. Otherwise, your system will suffer twice the payload, and the analytic numbers will be doubled.\nThere are several recommended ways on how to use these probes:\n Use Language based native agent only. Use 3rd-party instrument library only, like the Zipkin instrument ecosystem. Use Service Mesh probe only. Use Service Mesh probe with Language based native agent or 3rd-party instrument library in tracing status. (Advanced usage) Use eBPF agent only. Use eBPF agent with Language based native agent collaboratively.  What is the meaning of in tracing status?\nBy default, Language based native agent and 3rd-party instrument library both send distributed traces to the backend, where analyses and aggregation on those traces are performed. In tracing status means that the backend considers these traces as something like logs. In other words, the backend saves them, and builds the links between traces and metrics, like which endpoint and service does the trace belong?.\nWhat is next?  Learn more about the probes supported by SkyWalking in Service auto instrument agent , Manual instrument SDK and Zipkin receiver. After understanding how the probe works, see the backend overview for more on analysis and persistence.  ","title":"Probe Introduction","url":"/docs/main/v9.6.0/en/concepts-and-designs/probe-introduction/"},{"content":"Probe Introduction In SkyWalking, probe means an agent or SDK library integrated into a target system that takes charge of collecting telemetry data, including tracing and metrics. Depending on the target system tech stack, there are very different ways how the probe performs such tasks. But ultimately, they all work towards the same goal — to collect and reformat data, and then to send them to the backend.\nOn a high level, there are four typical categories in all SkyWalking probes.\n  Language based native agent. These agents run in target service user spaces, such as a part of user codes. For example, the SkyWalking Java agent uses the -javaagent command line argument to manipulate codes in runtime, where manipulate means to change and inject user\u0026rsquo;s codes. Another example is SkyWalking agent, which leverage Golang compiling mechanism to weaves codes in the compiling time. For some static compilation languages, such as C++, manual library is the only choice. As you can see, these agents are based on languages and libraries, no matter we provide auto instrument or manual agents.\n  Service Mesh probes. Service Mesh probes collect data from sidecar, control plane in service mesh or proxy. In the old days, proxy is only used as an ingress of the whole cluster, but with the Service Mesh and sidecar, we can now perform observability functions.\n  3rd-party instrument library. SkyWalking accepts many widely used instrument libraries data formats. SkyWalking community is connected closely with Zipkin community, it could work as an alternative server for both v1 and v2 Zipkin traces. Also, OTEL trace format in gRPC is supported, and converted to Zipkin format inside SkyWalking. As an alternative Zipkin server, Zipkin lens UI could be used to visualize accepted traces when they are in Zipkin format. See Receiver for Zipkin traces and Receiver for OTEL traces for more information.\n  eBPF agent. The eBPF agent collects metrics and profiling the target service powered by the eBPF technology of Linux kernel.\n  You don\u0026rsquo;t have to install all probes to make SkyWalking up and running. There are several recommended ways on how to use these probes:\n Use Language based native agent only to build topology and metrics for your business application. Use 3rd-party instrument library only, like the Zipkin instrument ecosystem. Use Service Mesh probe if you prefer Service Mesh stack and don\u0026rsquo;t want to use native agents. Use Service Mesh probe with Language based native agent or 3rd-party instrument library in pure tracing status. (Advanced usage) Use eBPF agent only if you only want to profile on demand and/or activating automatic performance analysis. Use eBPF agent with Language based native agent collaboratively. Enhance the traces with the eBPF agent to collect extra information.  What is the meaning of in tracing status?\nBy default, Language based native agent and 3rd-party instrument library both send distributed traces to the backend, where analyses and aggregation on those traces are performed. In pure tracing status means that the backend considers these traces as something like logs. In other words, the backend saves them, but doesn\u0026rsquo;t run the metrics analysis from traces. As a result, there would not have data of service/instance/endpoint metrics and relationships.\nWhat is next?  Learn more about the probes supported by SkyWalking in Service auto instrument agent , Manual instrument SDK and Zipkin receiver. After understanding how the probe works, see the backend overview for more on analysis and persistence.  ","title":"Probe Introduction","url":"/docs/main/v9.7.0/en/concepts-and-designs/probe-introduction/"},{"content":"Probe Protocols Probe protocols describe and define how agents send collected metrics, logs, traces, and events, as well as set out the format of each entity.\nTracing There are two types of protocols that help language agents work in distributed tracing.\n Cross Process Propagation Headers Protocol and Cross Process Correlation Headers Protocol come in in-wire data format. Agent/SDK usually uses HTTP/MQ/HTTP2 headers to carry the data with the RPC request. The remote agent will receive this in the request handler, and bind the context with this specific request.  Cross Process Propagation Headers Protocol v3 has been the new protocol for in-wire context propagation since the version 8.0.0 release.\nCross Process Correlation Headers Protocol v1 is a new in-wire context propagation protocol which is additional and optional. Please read SkyWalking language agents documentation to see whether it is supported.\n Trace Data Protocol is an out-of-wire data format. Agent/SDK uses this to send traces to SkyWalking OAP server.  SkyWalking Trace Data Protocol v3 defines the communication method and format between the agent and backend.\nLogging  Log Data Protocol is an out-of-wire data format. Agent/SDK and collector use this to send logs into SkyWalking OAP server. SkyWalking Log Data Protocol defines the communication method and format between the agent and backend.  Metrics SkyWalking has a native metrics format, and supports widely used metric formats, such as Prometheus, OpenCensus, and Zabbix.\nThe native metrics format definition could be found here. Typically, the agent meter plugin (e.g. Java Meter Plugin) and Satellite Prometheus fetcher would convert metrics into native format and forward them to SkyWalking OAP server.\nTo learn more about receiving 3rd party formats metrics, see Meter receiver and OpenTelemetry receiver.\nBrowser probe protocol The browser probe, such as skywalking-client-js, could use this protocol to send data to the backend. This service is provided by gRPC.\nSkyWalking Browser Protocol defines the communication method and format between skywalking-client-js and backend.\nEvents Report Protocol The protocol is used to report events to the backend. The doc introduces the definition of an event, and the protocol repository defines gRPC services and message formats of events.\nJSON format events can be reported via HTTP API. The endpoint is http://\u0026lt;oap-address\u0026gt;:12800/v3/events. Example of a JSON event record:\n[ { \u0026#34;uuid\u0026#34;: \u0026#34;f498b3c0-8bca-438d-a5b0-3701826ae21c\u0026#34;, \u0026#34;source\u0026#34;: { \u0026#34;service\u0026#34;: \u0026#34;SERVICE-A\u0026#34;, \u0026#34;instance\u0026#34;: \u0026#34;INSTANCE-1\u0026#34; }, \u0026#34;name\u0026#34;: \u0026#34;Reboot\u0026#34;, \u0026#34;type\u0026#34;: \u0026#34;Normal\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;App reboot.\u0026#34;, \u0026#34;parameters\u0026#34;: {}, \u0026#34;startTime\u0026#34;: 1628044330000, \u0026#34;endTime\u0026#34;: 1628044331000 } ] ","title":"Probe Protocols","url":"/docs/main/v9.0.0/en/protocols/readme/"},{"content":"Probe Protocols Probe protocols describe and define how agents send collected metrics, logs, traces, and events, as well as set out the format of each entity.\nTracing There are two types of protocols that help language agents work in distributed tracing.\n Cross Process Propagation Headers Protocol and Cross Process Correlation Headers Protocol come in in-wire data format. Agent/SDK usually uses HTTP/MQ/HTTP2 headers to carry the data with the RPC request. The remote agent will receive this in the request handler, and bind the context with this specific request.  Cross Process Propagation Headers Protocol v3 has been the new protocol for in-wire context propagation since the version 8.0.0 release.\nCross Process Correlation Headers Protocol v1 is a new in-wire context propagation protocol which is additional and optional. Please read SkyWalking language agents documentation to see whether it is supported.\n Trace Data Protocol is an out-of-wire data format. Agent/SDK uses this to send traces to SkyWalking OAP server.  SkyWalking Trace Data Protocol v3 defines the communication method and format between the agent and backend.\nLogging  Log Data Protocol is an out-of-wire data format. Agent/SDK and collector use this to send logs into SkyWalking OAP server. SkyWalking Log Data Protocol defines the communication method and format between the agent and backend.  Metrics SkyWalking has a native metrics format, and supports widely used metric formats, such as Prometheus, OpenCensus, and Zabbix.\nThe native metrics format definition could be found here. Typically, the agent meter plugin (e.g. Java Meter Plugin) and Satellite Prometheus fetcher would convert metrics into native format and forward them to SkyWalking OAP server.\nTo learn more about receiving 3rd party formats metrics, see Meter receiver and OpenTelemetry receiver.\nBrowser probe protocol The browser probe, such as skywalking-client-js, could use this protocol to send data to the backend. This service is provided by gRPC.\nSkyWalking Browser Protocol defines the communication method and format between skywalking-client-js and backend.\nEvents Report Protocol The protocol is used to report events to the backend. The doc introduces the definition of an event, and the protocol repository defines gRPC services and message formats of events.\nJSON format events can be reported via HTTP API. The endpoint is http://\u0026lt;oap-address\u0026gt;:12800/v3/events. Example of a JSON event record:\n[ { \u0026#34;uuid\u0026#34;: \u0026#34;f498b3c0-8bca-438d-a5b0-3701826ae21c\u0026#34;, \u0026#34;source\u0026#34;: { \u0026#34;service\u0026#34;: \u0026#34;SERVICE-A\u0026#34;, \u0026#34;instance\u0026#34;: \u0026#34;INSTANCE-1\u0026#34; }, \u0026#34;name\u0026#34;: \u0026#34;Reboot\u0026#34;, \u0026#34;type\u0026#34;: \u0026#34;Normal\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;App reboot.\u0026#34;, \u0026#34;parameters\u0026#34;: {}, \u0026#34;startTime\u0026#34;: 1628044330000, \u0026#34;endTime\u0026#34;: 1628044331000 } ] ","title":"Probe Protocols","url":"/docs/main/v9.1.0/en/protocols/readme/"},{"content":"Probe Protocols Probe protocols describe and define how agents send collected metrics, logs, traces, and events, as well as set out the format of each entity.\nTracing There are two types of protocols that help language agents work in distributed tracing.\n Cross Process Propagation Headers Protocol and Cross Process Correlation Headers Protocol come in in-wire data format. Agent/SDK usually uses HTTP/MQ/HTTP2 headers to carry the data with the RPC request. The remote agent will receive this in the request handler, and bind the context with this specific request.  Cross Process Propagation Headers Protocol v3 has been the new protocol for in-wire context propagation since the version 8.0.0 release.\nCross Process Correlation Headers Protocol v1 is a new in-wire context propagation protocol which is additional and optional. Please read SkyWalking language agents documentation to see whether it is supported.\n Trace Data Protocol is an out-of-wire data format. Agent/SDK uses this to send traces to SkyWalking OAP server.  SkyWalking Trace Data Protocol v3 defines the communication method and format between the agent and backend.\nLogging  Log Data Protocol is an out-of-wire data format. Agent/SDK and collector use this to send logs into SkyWalking OAP server. SkyWalking Log Data Protocol defines the communication method and format between the agent and backend.  Metrics SkyWalking has a native metrics format, and supports widely used metric formats, such as Prometheus, OpenCensus, OpenTelemetry, and Zabbix.\nThe native metrics format definition could be found here. Typically, the agent meter plugin (e.g. Java Meter Plugin) and Satellite Prometheus fetcher would convert metrics into native format and forward them to SkyWalking OAP server.\nTo learn more about receiving 3rd party formats metrics, see Meter receiver and OpenTelemetry receiver.\nBrowser probe protocol The browser probe, such as skywalking-client-js, could use this protocol to send data to the backend. This service is provided by gRPC.\nSkyWalking Browser Protocol defines the communication method and format between skywalking-client-js and backend.\nEvents Report Protocol The protocol is used to report events to the backend. The doc introduces the definition of an event, and the protocol repository defines gRPC services and message formats of events.\nJSON format events can be reported via HTTP API. The endpoint is http://\u0026lt;oap-address\u0026gt;:12800/v3/events. Example of a JSON event record:\n[ { \u0026#34;uuid\u0026#34;: \u0026#34;f498b3c0-8bca-438d-a5b0-3701826ae21c\u0026#34;, \u0026#34;source\u0026#34;: { \u0026#34;service\u0026#34;: \u0026#34;SERVICE-A\u0026#34;, \u0026#34;instance\u0026#34;: \u0026#34;INSTANCE-1\u0026#34; }, \u0026#34;name\u0026#34;: \u0026#34;Reboot\u0026#34;, \u0026#34;type\u0026#34;: \u0026#34;Normal\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;App reboot.\u0026#34;, \u0026#34;parameters\u0026#34;: {}, \u0026#34;startTime\u0026#34;: 1628044330000, \u0026#34;endTime\u0026#34;: 1628044331000 } ] ","title":"Probe Protocols","url":"/docs/main/v9.2.0/en/protocols/readme/"},{"content":"Probe Protocols Probe protocols describe and define how agents send collected metrics, logs, traces, and events, as well as set out the format of each entity.\nTracing There are two types of protocols that help language agents work in distributed tracing.\n Cross Process Propagation Headers Protocol and Cross Process Correlation Headers Protocol come in in-wire data format. Agent/SDK usually uses HTTP/MQ/HTTP2 headers to carry the data with the RPC request. The remote agent will receive this in the request handler, and bind the context with this specific request.  Cross Process Propagation Headers Protocol v3 has been the new protocol for in-wire context propagation since the version 8.0.0 release.\nCross Process Correlation Headers Protocol v1 is a new in-wire context propagation protocol which is additional and optional. Please read SkyWalking language agents documentation to see whether it is supported.\n Trace Data Protocol is an out-of-wire data format. Agent/SDK uses this to send traces to SkyWalking OAP server.  SkyWalking Trace Data Protocol v3.1 defines the communication method and format between the agent and backend.\nLogging  Log Data Protocol is an out-of-wire data format. Agent/SDK and collector use this to send logs into SkyWalking OAP server. SkyWalking Log Data Protocol defines the communication method and format between the agent and backend.  Metrics SkyWalking has a native metrics format, and supports widely used metric formats, such as Prometheus, OpenCensus, OpenTelemetry, and Zabbix.\nThe native metrics format definition could be found here. The agent meter plugin (e.g. Java Meter Plugin) uses the native metric format to report metrics.\nOpenTelemetry collector, Telegraf agents, Zabbix agents could use their native protocol(e.g. OTLP) and OAP server would convert metrics into native format and forward them to MAL engine.\nTo learn more about receiving 3rd party formats metrics, see Meter receiver and OpenTelemetry receiver.\nBrowser probe protocol The browser probe, such as skywalking-client-js, could use this protocol to send data to the backend. This service is provided by gRPC.\nSkyWalking Browser Protocol defines the communication method and format between skywalking-client-js and backend.\nEvents Report Protocol The protocol is used to report events to the backend. The doc introduces the definition of an event, and the protocol repository defines gRPC services and message formats of events.\nJSON format events can be reported via HTTP API. The endpoint is http://\u0026lt;oap-address\u0026gt;:12800/v3/events. Example of a JSON event record:\n[ { \u0026#34;uuid\u0026#34;: \u0026#34;f498b3c0-8bca-438d-a5b0-3701826ae21c\u0026#34;, \u0026#34;source\u0026#34;: { \u0026#34;service\u0026#34;: \u0026#34;SERVICE-A\u0026#34;, \u0026#34;instance\u0026#34;: \u0026#34;INSTANCE-1\u0026#34; }, \u0026#34;name\u0026#34;: \u0026#34;Reboot\u0026#34;, \u0026#34;type\u0026#34;: \u0026#34;Normal\u0026#34;, \u0026#34;message\u0026#34;: \u0026#34;App reboot.\u0026#34;, \u0026#34;parameters\u0026#34;: {}, \u0026#34;startTime\u0026#34;: 1628044330000, \u0026#34;endTime\u0026#34;: 1628044331000 } ] ","title":"Probe Protocols","url":"/docs/main/v9.3.0/en/protocols/readme/"},{"content":"Problem When you start your application with the skywalking agent, you may find this exception in your agent log which means that EnhanceRequireObjectCache cannot be casted to EnhanceRequireObjectCache. For example:\nERROR 2018-05-07 21:31:24 InstMethodsInter : class[class org.springframework.web.method.HandlerMethod] after method[getBean] intercept failure java.lang.ClassCastException: org.apache.skywalking.apm.plugin.spring.mvc.commons.EnhanceRequireObjectCache cannot be cast to org.apache.skywalking.apm.plugin.spring.mvc.commons.EnhanceRequireObjectCache at org.apache.skywalking.apm.plugin.spring.mvc.commons.interceptor.GetBeanInterceptor.afterMethod(GetBeanInterceptor.java:45) at org.apache.skywalking.apm.agent.core.plugin.interceptor.enhance.InstMethodsInter.intercept(InstMethodsInter.java:105) at org.springframework.web.method.HandlerMethod.getBean(HandlerMethod.java) at org.springframework.web.servlet.handler.AbstractHandlerMethodExceptionResolver.shouldApplyTo(AbstractHandlerMethodExceptionResolver.java:47) at org.springframework.web.servlet.handler.AbstractHandlerExceptionResolver.resolveException(AbstractHandlerExceptionResolver.java:131) at org.springframework.web.servlet.handler.HandlerExceptionResolverComposite.resolveException(HandlerExceptionResolverComposite.java:76) ... Reason This exception may be caused by hot deployment tools (spring-boot-devtool) or otherwise, which changes the classloader in runtime.\nResolution  This error does not occur under the production environment, since developer tools are automatically disabled: See spring-boot-devtools. If you would like to debug in your development environment as usual, you should temporarily remove such hot deployment package in your lib path.  ","title":"Problem","url":"/docs/main/latest/en/faq/enhancerequireobjectcache-cast-exception/"},{"content":"Problem  When importing the SkyWalking project to Eclipse, the following errors may occur:   Software being installed: Checkstyle configuration plugin for M2Eclipse 1.0.0.201705301746 (com.basistech.m2e.code.quality.checkstyle.feature.feature.group 1.0.0.201705301746) Missing requirement: Checkstyle configuration plugin for M2Eclipse 1.0.0.201705301746 (com.basistech.m2e.code.quality.checkstyle.feature.feature.group 1.0.0.201705301746) requires \u0026lsquo;net.sf.eclipsecs.core 5.2.0\u0026rsquo; but it could not be found\n Reason The Eclipse Checkstyle Plug-in has not been installed.\nResolution Download the plug-in at the link here: https://sourceforge.net/projects/eclipse-cs/?source=typ_redirect Eclipse Checkstyle Plug-in version 8.7.0.201801131309 is required. Plug-in notification: The Eclipse Checkstyle plug-in integrates the Checkstyle Java code auditor into the Eclipse IDE. The plug-in provides real-time feedback to the user on rule violations, including checking against coding style and error-prone code constructs.\n","title":"Problem","url":"/docs/main/latest/en/faq/import-project-eclipse-requireitems-exception/"},{"content":"Problem Tracing doesn\u0026rsquo;t work on the Kafka consumer end.\nReason The kafka client is responsible for pulling messages from the brokers, after which the data will be processed by user-defined codes. However, only the poll action can be traced by the plug-in and the subsequent data processing work inevitably goes beyond the scope of the trace context. Thus, in order to complete tracing on the client end, manual instrumentation is required, i.e. the poll action and the processing action should be wrapped manually.\nResolve For a native Kafka client, please use the Application Toolkit libraries to do the manual instrumentation, with the help of the @KafkaPollAndInvoke annotation in apm-toolkit-kafka or with OpenTracing API. If you\u0026rsquo;re using spring-kafka 1.3.x, 2.2.x or above, you can easily trace the consumer end without further configuration.\n","title":"Problem","url":"/docs/main/latest/en/faq/kafka-plugin/"},{"content":"Problem When using a thread pool, TraceSegment data in a thread cannot be reported and there are memory data that cannot be recycled (memory leaks).\nExample ThreadPoolTaskExecutor executor = new ThreadPoolTaskExecutor(); executor.setThreadFactory(r -\u0026gt; new Thread(RunnableWrapper.of(r))); Reason  Worker threads are enhanced when using the thread pool. Based on the design of the SkyWalking Java Agent, when tracing a cross thread, you must enhance the task thread.  Resolution   When using Thread Schedule Framework: See SkyWalking Thread Schedule Framework at SkyWalking Java agent supported list, such as Spring FrameWork @Async, which can implement tracing without any modification.\n  When using Custom Thread Pool: Enhance the task thread with the following code.\n  ExecutorService executorService = Executors.newFixedThreadPool(1); executorService.execute(RunnableWrapper.of(new Runnable() { @Override public void run() { //your code  } })); See across thread solution APIs for more use cases.\n","title":"Problem","url":"/docs/main/latest/en/faq/memory-leak-enhance-worker-thread/"},{"content":"Problem  In maven build, the following error may occur with the protoc-plugin:  [ERROR] Failed to execute goal org.xolstice.maven.plugins:protobuf-maven-plugin:0.5.0:compile-custom (default) on project apm-network: Unable to copy the file to \\skywalking\\apm-network\\target\\protoc-plugins: \\skywalking\\apm-network\\target\\protoc-plugins\\protoc-3.3.0-linux-x86_64.exe (The process cannot access the file because it is being used by another process) -\u0026gt; [Help 1] Reason  The Protobuf compiler is dependent on the glibc. However, glibc has not been installed, or there is an old version already installed in the system.  Resolution  Install or upgrade to the latest version of the glibc library. Under the container environment, the latest glibc version of the alpine system is recommended. Please refer to http://www.gnu.org/software/libc/documentation.html.  ","title":"Problem","url":"/docs/main/latest/en/faq/protoc-plugin-fails-when-build/"},{"content":"Problem The message with Field ID, 8888, must be reserved.\nReason Because Thrift cannot carry metadata to transport Trace Header in the original API, we transport them by wrapping TProtocolFactory.\nThrift allows us to append any additional fields in the message even if the receiver doesn\u0026rsquo;t deal with them. Those data will be skipped and left unread. Based on this, the 8888th field of the message is used to store Trace Header (or metadata) and to transport them. That means the message with Field ID, 8888, must be reserved.\nResolution Avoid using the Field(ID is 8888) in your application.\n","title":"Problem","url":"/docs/main/latest/en/faq/thrift-plugin/"},{"content":"Problem  There is no abnormal log in Agent log and Collector log. The traces can be seen, but no other information is available in UI.  Reason The operating system where the monitored system is located is not set as the current time zone, causing statistics collection time points to deviate.\nResolution Make sure the time is synchronized between collector servers and monitored application servers.\n","title":"Problem","url":"/docs/main/latest/en/faq/why-have-traces-no-others/"},{"content":"Problem When you start your application with the skywalking agent, you may find this exception in your agent log which means that EnhanceRequireObjectCache cannot be casted to EnhanceRequireObjectCache. For example:\nERROR 2018-05-07 21:31:24 InstMethodsInter : class[class org.springframework.web.method.HandlerMethod] after method[getBean] intercept failure java.lang.ClassCastException: org.apache.skywalking.apm.plugin.spring.mvc.commons.EnhanceRequireObjectCache cannot be cast to org.apache.skywalking.apm.plugin.spring.mvc.commons.EnhanceRequireObjectCache at org.apache.skywalking.apm.plugin.spring.mvc.commons.interceptor.GetBeanInterceptor.afterMethod(GetBeanInterceptor.java:45) at org.apache.skywalking.apm.agent.core.plugin.interceptor.enhance.InstMethodsInter.intercept(InstMethodsInter.java:105) at org.springframework.web.method.HandlerMethod.getBean(HandlerMethod.java) at org.springframework.web.servlet.handler.AbstractHandlerMethodExceptionResolver.shouldApplyTo(AbstractHandlerMethodExceptionResolver.java:47) at org.springframework.web.servlet.handler.AbstractHandlerExceptionResolver.resolveException(AbstractHandlerExceptionResolver.java:131) at org.springframework.web.servlet.handler.HandlerExceptionResolverComposite.resolveException(HandlerExceptionResolverComposite.java:76) ... Reason This exception may be caused by hot deployment tools (spring-boot-devtool) or otherwise, which changes the classloader in runtime.\nResolution  This error does not occur under the production environment, since developer tools are automatically disabled: See spring-boot-devtools. If you would like to debug in your development environment as usual, you should temporarily remove such hot deployment package in your lib path.  ","title":"Problem","url":"/docs/main/next/en/faq/enhancerequireobjectcache-cast-exception/"},{"content":"Problem  When importing the SkyWalking project to Eclipse, the following errors may occur:   Software being installed: Checkstyle configuration plugin for M2Eclipse 1.0.0.201705301746 (com.basistech.m2e.code.quality.checkstyle.feature.feature.group 1.0.0.201705301746) Missing requirement: Checkstyle configuration plugin for M2Eclipse 1.0.0.201705301746 (com.basistech.m2e.code.quality.checkstyle.feature.feature.group 1.0.0.201705301746) requires \u0026lsquo;net.sf.eclipsecs.core 5.2.0\u0026rsquo; but it could not be found\n Reason The Eclipse Checkstyle Plug-in has not been installed.\nResolution Download the plug-in at the link here: https://sourceforge.net/projects/eclipse-cs/?source=typ_redirect Eclipse Checkstyle Plug-in version 8.7.0.201801131309 is required. Plug-in notification: The Eclipse Checkstyle plug-in integrates the Checkstyle Java code auditor into the Eclipse IDE. The plug-in provides real-time feedback to the user on rule violations, including checking against coding style and error-prone code constructs.\n","title":"Problem","url":"/docs/main/next/en/faq/import-project-eclipse-requireitems-exception/"},{"content":"Problem Tracing doesn\u0026rsquo;t work on the Kafka consumer end.\nReason The kafka client is responsible for pulling messages from the brokers, after which the data will be processed by user-defined codes. However, only the poll action can be traced by the plug-in and the subsequent data processing work inevitably goes beyond the scope of the trace context. Thus, in order to complete tracing on the client end, manual instrumentation is required, i.e. the poll action and the processing action should be wrapped manually.\nResolve For a native Kafka client, please use the Application Toolkit libraries to do the manual instrumentation, with the help of the @KafkaPollAndInvoke annotation in apm-toolkit-kafka or with OpenTracing API. If you\u0026rsquo;re using spring-kafka 1.3.x, 2.2.x or above, you can easily trace the consumer end without further configuration.\n","title":"Problem","url":"/docs/main/next/en/faq/kafka-plugin/"},{"content":"Problem When using a thread pool, TraceSegment data in a thread cannot be reported and there are memory data that cannot be recycled (memory leaks).\nExample ThreadPoolTaskExecutor executor = new ThreadPoolTaskExecutor(); executor.setThreadFactory(r -\u0026gt; new Thread(RunnableWrapper.of(r))); Reason  Worker threads are enhanced when using the thread pool. Based on the design of the SkyWalking Java Agent, when tracing a cross thread, you must enhance the task thread.  Resolution   When using Thread Schedule Framework: See SkyWalking Thread Schedule Framework at SkyWalking Java agent supported list, such as Spring FrameWork @Async, which can implement tracing without any modification.\n  When using Custom Thread Pool: Enhance the task thread with the following code.\n  ExecutorService executorService = Executors.newFixedThreadPool(1); executorService.execute(RunnableWrapper.of(new Runnable() { @Override public void run() { //your code  } })); See across thread solution APIs for more use cases.\n","title":"Problem","url":"/docs/main/next/en/faq/memory-leak-enhance-worker-thread/"},{"content":"Problem  In maven build, the following error may occur with the protoc-plugin:  [ERROR] Failed to execute goal org.xolstice.maven.plugins:protobuf-maven-plugin:0.5.0:compile-custom (default) on project apm-network: Unable to copy the file to \\skywalking\\apm-network\\target\\protoc-plugins: \\skywalking\\apm-network\\target\\protoc-plugins\\protoc-3.3.0-linux-x86_64.exe (The process cannot access the file because it is being used by another process) -\u0026gt; [Help 1] Reason  The Protobuf compiler is dependent on the glibc. However, glibc has not been installed, or there is an old version already installed in the system.  Resolution  Install or upgrade to the latest version of the glibc library. Under the container environment, the latest glibc version of the alpine system is recommended. Please refer to http://www.gnu.org/software/libc/documentation.html.  ","title":"Problem","url":"/docs/main/next/en/faq/protoc-plugin-fails-when-build/"},{"content":"Problem The message with Field ID, 8888, must be reserved.\nReason Because Thrift cannot carry metadata to transport Trace Header in the original API, we transport them by wrapping TProtocolFactory.\nThrift allows us to append any additional fields in the message even if the receiver doesn\u0026rsquo;t deal with them. Those data will be skipped and left unread. Based on this, the 8888th field of the message is used to store Trace Header (or metadata) and to transport them. That means the message with Field ID, 8888, must be reserved.\nResolution Avoid using the Field(ID is 8888) in your application.\n","title":"Problem","url":"/docs/main/next/en/faq/thrift-plugin/"},{"content":"Problem  There is no abnormal log in Agent log and Collector log. The traces can be seen, but no other information is available in UI.  Reason The operating system where the monitored system is located is not set as the current time zone, causing statistics collection time points to deviate.\nResolution Make sure the time is synchronized between collector servers and monitored application servers.\n","title":"Problem","url":"/docs/main/next/en/faq/why-have-traces-no-others/"},{"content":"Problem When you start your application with the skywalking agent, you may find this exception in your agent log which means that EnhanceRequireObjectCache cannot be casted to EnhanceRequireObjectCache. For example:\nERROR 2018-05-07 21:31:24 InstMethodsInter : class[class org.springframework.web.method.HandlerMethod] after method[getBean] intercept failure java.lang.ClassCastException: org.apache.skywalking.apm.plugin.spring.mvc.commons.EnhanceRequireObjectCache cannot be cast to org.apache.skywalking.apm.plugin.spring.mvc.commons.EnhanceRequireObjectCache at org.apache.skywalking.apm.plugin.spring.mvc.commons.interceptor.GetBeanInterceptor.afterMethod(GetBeanInterceptor.java:45) at org.apache.skywalking.apm.agent.core.plugin.interceptor.enhance.InstMethodsInter.intercept(InstMethodsInter.java:105) at org.springframework.web.method.HandlerMethod.getBean(HandlerMethod.java) at org.springframework.web.servlet.handler.AbstractHandlerMethodExceptionResolver.shouldApplyTo(AbstractHandlerMethodExceptionResolver.java:47) at org.springframework.web.servlet.handler.AbstractHandlerExceptionResolver.resolveException(AbstractHandlerExceptionResolver.java:131) at org.springframework.web.servlet.handler.HandlerExceptionResolverComposite.resolveException(HandlerExceptionResolverComposite.java:76) ... Reason This exception may be caused by hot deployment tools (spring-boot-devtool) or otherwise, which changes the classloader in runtime.\nResolution  This error does not occur under the production environment, since developer tools are automatically disabled: See spring-boot-devtools. If you would like to debug in your development environment as usual, you should temporarily remove such hot deployment package in your lib path.  ","title":"Problem","url":"/docs/main/v9.0.0/en/faq/enhancerequireobjectcache-cast-exception/"},{"content":"Problem  When importing the SkyWalking project to Eclipse, the following errors may occur:   Software being installed: Checkstyle configuration plugin for M2Eclipse 1.0.0.201705301746 (com.basistech.m2e.code.quality.checkstyle.feature.feature.group 1.0.0.201705301746) Missing requirement: Checkstyle configuration plugin for M2Eclipse 1.0.0.201705301746 (com.basistech.m2e.code.quality.checkstyle.feature.feature.group 1.0.0.201705301746) requires \u0026lsquo;net.sf.eclipsecs.core 5.2.0\u0026rsquo; but it could not be found\n Reason The Eclipse Checkstyle Plug-in has not been installed.\nResolution Download the plug-in at the link here: https://sourceforge.net/projects/eclipse-cs/?source=typ_redirect Eclipse Checkstyle Plug-in version 8.7.0.201801131309 is required. Plug-in notification: The Eclipse Checkstyle plug-in integrates the Checkstyle Java code auditor into the Eclipse IDE. The plug-in provides real-time feedback to the user on rule violations, including checking against coding style and error-prone code constructs.\n","title":"Problem","url":"/docs/main/v9.0.0/en/faq/import-project-eclipse-requireitems-exception/"},{"content":"Problem Tracing doesn\u0026rsquo;t work on the Kafka consumer end.\nReason The kafka client is responsible for pulling messages from the brokers, after which the data will be processed by user-defined codes. However, only the poll action can be traced by the plug-in and the subsequent data processing work inevitably goes beyond the scope of the trace context. Thus, in order to complete tracing on the client end, manual instrumentation is required, i.e. the poll action and the processing action should be wrapped manually.\nResolve For a native Kafka client, please use the Application Toolkit libraries to do the manual instrumentation, with the help of the @KafkaPollAndInvoke annotation in apm-toolkit-kafka or with OpenTracing API. If you\u0026rsquo;re using spring-kafka 1.3.x, 2.2.x or above, you can easily trace the consumer end without further configuration.\n","title":"Problem","url":"/docs/main/v9.0.0/en/faq/kafka-plugin/"},{"content":"Problem When using a thread pool, TraceSegment data in a thread cannot be reported and there are memory data that cannot be recycled (memory leaks).\nExample ThreadPoolTaskExecutor executor = new ThreadPoolTaskExecutor(); executor.setThreadFactory(r -\u0026gt; new Thread(RunnableWrapper.of(r))); Reason  Worker threads are enhanced when using the thread pool. Based on the design of the SkyWalking Java Agent, when tracing a cross thread, you must enhance the task thread.  Resolution   When using Thread Schedule Framework: See SkyWalking Thread Schedule Framework at SkyWalking Java agent supported list, such as Spring FrameWork @Async, which can implement tracing without any modification.\n  When using Custom Thread Pool: Enhance the task thread with the following code.\n  ExecutorService executorService = Executors.newFixedThreadPool(1); executorService.execute(RunnableWrapper.of(new Runnable() { @Override public void run() { //your code  } })); See across thread solution APIs for more use cases.\n","title":"Problem","url":"/docs/main/v9.0.0/en/faq/memory-leak-enhance-worker-thread/"},{"content":"Problem  In maven build, the following error may occur with the protoc-plugin:  [ERROR] Failed to execute goal org.xolstice.maven.plugins:protobuf-maven-plugin:0.5.0:compile-custom (default) on project apm-network: Unable to copy the file to \\skywalking\\apm-network\\target\\protoc-plugins: \\skywalking\\apm-network\\target\\protoc-plugins\\protoc-3.3.0-linux-x86_64.exe (The process cannot access the file because it is being used by another process) -\u0026gt; [Help 1] Reason  The Protobuf compiler is dependent on the glibc. However, glibc has not been installed, or there is an old version already installed in the system.  Resolution  Install or upgrade to the latest version of the glibc library. Under the container environment, the latest glibc version of the alpine system is recommended. Please refer to http://www.gnu.org/software/libc/documentation.html.  ","title":"Problem","url":"/docs/main/v9.0.0/en/faq/protoc-plugin-fails-when-build/"},{"content":"Problem The message with Field ID, 8888, must be reserved.\nReason Because Thrift cannot carry metadata to transport Trace Header in the original API, we transport them by wrapping TProtocolFactory.\nThrift allows us to append any additional fields in the message even if the receiver doesn\u0026rsquo;t deal with them. Those data will be skipped and left unread. Based on this, the 8888th field of the message is used to store Trace Header (or metadata) and to transport them. That means the message with Field ID, 8888, must be reserved.\nResolution Avoid using the Field(ID is 8888) in your application.\n","title":"Problem","url":"/docs/main/v9.0.0/en/faq/thrift-plugin/"},{"content":"Problem  There is no abnormal log in Agent log and Collector log. The traces can be seen, but no other information is available in UI.  Reason The operating system where the monitored system is located is not set as the current time zone, causing statistics collection time points to deviate.\nResolution Make sure the time is synchronized between collector servers and monitored application servers.\n","title":"Problem","url":"/docs/main/v9.0.0/en/faq/why-have-traces-no-others/"},{"content":"Problem When you start your application with the skywalking agent, you may find this exception in your agent log which means that EnhanceRequireObjectCache cannot be casted to EnhanceRequireObjectCache. For example:\nERROR 2018-05-07 21:31:24 InstMethodsInter : class[class org.springframework.web.method.HandlerMethod] after method[getBean] intercept failure java.lang.ClassCastException: org.apache.skywalking.apm.plugin.spring.mvc.commons.EnhanceRequireObjectCache cannot be cast to org.apache.skywalking.apm.plugin.spring.mvc.commons.EnhanceRequireObjectCache at org.apache.skywalking.apm.plugin.spring.mvc.commons.interceptor.GetBeanInterceptor.afterMethod(GetBeanInterceptor.java:45) at org.apache.skywalking.apm.agent.core.plugin.interceptor.enhance.InstMethodsInter.intercept(InstMethodsInter.java:105) at org.springframework.web.method.HandlerMethod.getBean(HandlerMethod.java) at org.springframework.web.servlet.handler.AbstractHandlerMethodExceptionResolver.shouldApplyTo(AbstractHandlerMethodExceptionResolver.java:47) at org.springframework.web.servlet.handler.AbstractHandlerExceptionResolver.resolveException(AbstractHandlerExceptionResolver.java:131) at org.springframework.web.servlet.handler.HandlerExceptionResolverComposite.resolveException(HandlerExceptionResolverComposite.java:76) ... Reason This exception may be caused by hot deployment tools (spring-boot-devtool) or otherwise, which changes the classloader in runtime.\nResolution  This error does not occur under the production environment, since developer tools are automatically disabled: See spring-boot-devtools. If you would like to debug in your development environment as usual, you should temporarily remove such hot deployment package in your lib path.  ","title":"Problem","url":"/docs/main/v9.1.0/en/faq/enhancerequireobjectcache-cast-exception/"},{"content":"Problem  When importing the SkyWalking project to Eclipse, the following errors may occur:   Software being installed: Checkstyle configuration plugin for M2Eclipse 1.0.0.201705301746 (com.basistech.m2e.code.quality.checkstyle.feature.feature.group 1.0.0.201705301746) Missing requirement: Checkstyle configuration plugin for M2Eclipse 1.0.0.201705301746 (com.basistech.m2e.code.quality.checkstyle.feature.feature.group 1.0.0.201705301746) requires \u0026lsquo;net.sf.eclipsecs.core 5.2.0\u0026rsquo; but it could not be found\n Reason The Eclipse Checkstyle Plug-in has not been installed.\nResolution Download the plug-in at the link here: https://sourceforge.net/projects/eclipse-cs/?source=typ_redirect Eclipse Checkstyle Plug-in version 8.7.0.201801131309 is required. Plug-in notification: The Eclipse Checkstyle plug-in integrates the Checkstyle Java code auditor into the Eclipse IDE. The plug-in provides real-time feedback to the user on rule violations, including checking against coding style and error-prone code constructs.\n","title":"Problem","url":"/docs/main/v9.1.0/en/faq/import-project-eclipse-requireitems-exception/"},{"content":"Problem Tracing doesn\u0026rsquo;t work on the Kafka consumer end.\nReason The kafka client is responsible for pulling messages from the brokers, after which the data will be processed by user-defined codes. However, only the poll action can be traced by the plug-in and the subsequent data processing work inevitably goes beyond the scope of the trace context. Thus, in order to complete tracing on the client end, manual instrumentation is required, i.e. the poll action and the processing action should be wrapped manually.\nResolve For a native Kafka client, please use the Application Toolkit libraries to do the manual instrumentation, with the help of the @KafkaPollAndInvoke annotation in apm-toolkit-kafka or with OpenTracing API. If you\u0026rsquo;re using spring-kafka 1.3.x, 2.2.x or above, you can easily trace the consumer end without further configuration.\n","title":"Problem","url":"/docs/main/v9.1.0/en/faq/kafka-plugin/"},{"content":"Problem When using a thread pool, TraceSegment data in a thread cannot be reported and there are memory data that cannot be recycled (memory leaks).\nExample ThreadPoolTaskExecutor executor = new ThreadPoolTaskExecutor(); executor.setThreadFactory(r -\u0026gt; new Thread(RunnableWrapper.of(r))); Reason  Worker threads are enhanced when using the thread pool. Based on the design of the SkyWalking Java Agent, when tracing a cross thread, you must enhance the task thread.  Resolution   When using Thread Schedule Framework: See SkyWalking Thread Schedule Framework at SkyWalking Java agent supported list, such as Spring FrameWork @Async, which can implement tracing without any modification.\n  When using Custom Thread Pool: Enhance the task thread with the following code.\n  ExecutorService executorService = Executors.newFixedThreadPool(1); executorService.execute(RunnableWrapper.of(new Runnable() { @Override public void run() { //your code  } })); See across thread solution APIs for more use cases.\n","title":"Problem","url":"/docs/main/v9.1.0/en/faq/memory-leak-enhance-worker-thread/"},{"content":"Problem  In maven build, the following error may occur with the protoc-plugin:  [ERROR] Failed to execute goal org.xolstice.maven.plugins:protobuf-maven-plugin:0.5.0:compile-custom (default) on project apm-network: Unable to copy the file to \\skywalking\\apm-network\\target\\protoc-plugins: \\skywalking\\apm-network\\target\\protoc-plugins\\protoc-3.3.0-linux-x86_64.exe (The process cannot access the file because it is being used by another process) -\u0026gt; [Help 1] Reason  The Protobuf compiler is dependent on the glibc. However, glibc has not been installed, or there is an old version already installed in the system.  Resolution  Install or upgrade to the latest version of the glibc library. Under the container environment, the latest glibc version of the alpine system is recommended. Please refer to http://www.gnu.org/software/libc/documentation.html.  ","title":"Problem","url":"/docs/main/v9.1.0/en/faq/protoc-plugin-fails-when-build/"},{"content":"Problem The message with Field ID, 8888, must be reserved.\nReason Because Thrift cannot carry metadata to transport Trace Header in the original API, we transport them by wrapping TProtocolFactory.\nThrift allows us to append any additional fields in the message even if the receiver doesn\u0026rsquo;t deal with them. Those data will be skipped and left unread. Based on this, the 8888th field of the message is used to store Trace Header (or metadata) and to transport them. That means the message with Field ID, 8888, must be reserved.\nResolution Avoid using the Field(ID is 8888) in your application.\n","title":"Problem","url":"/docs/main/v9.1.0/en/faq/thrift-plugin/"},{"content":"Problem  There is no abnormal log in Agent log and Collector log. The traces can be seen, but no other information is available in UI.  Reason The operating system where the monitored system is located is not set as the current time zone, causing statistics collection time points to deviate.\nResolution Make sure the time is synchronized between collector servers and monitored application servers.\n","title":"Problem","url":"/docs/main/v9.1.0/en/faq/why-have-traces-no-others/"},{"content":"Problem When you start your application with the skywalking agent, you may find this exception in your agent log which means that EnhanceRequireObjectCache cannot be casted to EnhanceRequireObjectCache. For example:\nERROR 2018-05-07 21:31:24 InstMethodsInter : class[class org.springframework.web.method.HandlerMethod] after method[getBean] intercept failure java.lang.ClassCastException: org.apache.skywalking.apm.plugin.spring.mvc.commons.EnhanceRequireObjectCache cannot be cast to org.apache.skywalking.apm.plugin.spring.mvc.commons.EnhanceRequireObjectCache at org.apache.skywalking.apm.plugin.spring.mvc.commons.interceptor.GetBeanInterceptor.afterMethod(GetBeanInterceptor.java:45) at org.apache.skywalking.apm.agent.core.plugin.interceptor.enhance.InstMethodsInter.intercept(InstMethodsInter.java:105) at org.springframework.web.method.HandlerMethod.getBean(HandlerMethod.java) at org.springframework.web.servlet.handler.AbstractHandlerMethodExceptionResolver.shouldApplyTo(AbstractHandlerMethodExceptionResolver.java:47) at org.springframework.web.servlet.handler.AbstractHandlerExceptionResolver.resolveException(AbstractHandlerExceptionResolver.java:131) at org.springframework.web.servlet.handler.HandlerExceptionResolverComposite.resolveException(HandlerExceptionResolverComposite.java:76) ... Reason This exception may be caused by hot deployment tools (spring-boot-devtool) or otherwise, which changes the classloader in runtime.\nResolution  This error does not occur under the production environment, since developer tools are automatically disabled: See spring-boot-devtools. If you would like to debug in your development environment as usual, you should temporarily remove such hot deployment package in your lib path.  ","title":"Problem","url":"/docs/main/v9.2.0/en/faq/enhancerequireobjectcache-cast-exception/"},{"content":"Problem  When importing the SkyWalking project to Eclipse, the following errors may occur:   Software being installed: Checkstyle configuration plugin for M2Eclipse 1.0.0.201705301746 (com.basistech.m2e.code.quality.checkstyle.feature.feature.group 1.0.0.201705301746) Missing requirement: Checkstyle configuration plugin for M2Eclipse 1.0.0.201705301746 (com.basistech.m2e.code.quality.checkstyle.feature.feature.group 1.0.0.201705301746) requires \u0026lsquo;net.sf.eclipsecs.core 5.2.0\u0026rsquo; but it could not be found\n Reason The Eclipse Checkstyle Plug-in has not been installed.\nResolution Download the plug-in at the link here: https://sourceforge.net/projects/eclipse-cs/?source=typ_redirect Eclipse Checkstyle Plug-in version 8.7.0.201801131309 is required. Plug-in notification: The Eclipse Checkstyle plug-in integrates the Checkstyle Java code auditor into the Eclipse IDE. The plug-in provides real-time feedback to the user on rule violations, including checking against coding style and error-prone code constructs.\n","title":"Problem","url":"/docs/main/v9.2.0/en/faq/import-project-eclipse-requireitems-exception/"},{"content":"Problem Tracing doesn\u0026rsquo;t work on the Kafka consumer end.\nReason The kafka client is responsible for pulling messages from the brokers, after which the data will be processed by user-defined codes. However, only the poll action can be traced by the plug-in and the subsequent data processing work inevitably goes beyond the scope of the trace context. Thus, in order to complete tracing on the client end, manual instrumentation is required, i.e. the poll action and the processing action should be wrapped manually.\nResolve For a native Kafka client, please use the Application Toolkit libraries to do the manual instrumentation, with the help of the @KafkaPollAndInvoke annotation in apm-toolkit-kafka or with OpenTracing API. If you\u0026rsquo;re using spring-kafka 1.3.x, 2.2.x or above, you can easily trace the consumer end without further configuration.\n","title":"Problem","url":"/docs/main/v9.2.0/en/faq/kafka-plugin/"},{"content":"Problem When using a thread pool, TraceSegment data in a thread cannot be reported and there are memory data that cannot be recycled (memory leaks).\nExample ThreadPoolTaskExecutor executor = new ThreadPoolTaskExecutor(); executor.setThreadFactory(r -\u0026gt; new Thread(RunnableWrapper.of(r))); Reason  Worker threads are enhanced when using the thread pool. Based on the design of the SkyWalking Java Agent, when tracing a cross thread, you must enhance the task thread.  Resolution   When using Thread Schedule Framework: See SkyWalking Thread Schedule Framework at SkyWalking Java agent supported list, such as Spring FrameWork @Async, which can implement tracing without any modification.\n  When using Custom Thread Pool: Enhance the task thread with the following code.\n  ExecutorService executorService = Executors.newFixedThreadPool(1); executorService.execute(RunnableWrapper.of(new Runnable() { @Override public void run() { //your code  } })); See across thread solution APIs for more use cases.\n","title":"Problem","url":"/docs/main/v9.2.0/en/faq/memory-leak-enhance-worker-thread/"},{"content":"Problem  In maven build, the following error may occur with the protoc-plugin:  [ERROR] Failed to execute goal org.xolstice.maven.plugins:protobuf-maven-plugin:0.5.0:compile-custom (default) on project apm-network: Unable to copy the file to \\skywalking\\apm-network\\target\\protoc-plugins: \\skywalking\\apm-network\\target\\protoc-plugins\\protoc-3.3.0-linux-x86_64.exe (The process cannot access the file because it is being used by another process) -\u0026gt; [Help 1] Reason  The Protobuf compiler is dependent on the glibc. However, glibc has not been installed, or there is an old version already installed in the system.  Resolution  Install or upgrade to the latest version of the glibc library. Under the container environment, the latest glibc version of the alpine system is recommended. Please refer to http://www.gnu.org/software/libc/documentation.html.  ","title":"Problem","url":"/docs/main/v9.2.0/en/faq/protoc-plugin-fails-when-build/"},{"content":"Problem The message with Field ID, 8888, must be reserved.\nReason Because Thrift cannot carry metadata to transport Trace Header in the original API, we transport them by wrapping TProtocolFactory.\nThrift allows us to append any additional fields in the message even if the receiver doesn\u0026rsquo;t deal with them. Those data will be skipped and left unread. Based on this, the 8888th field of the message is used to store Trace Header (or metadata) and to transport them. That means the message with Field ID, 8888, must be reserved.\nResolution Avoid using the Field(ID is 8888) in your application.\n","title":"Problem","url":"/docs/main/v9.2.0/en/faq/thrift-plugin/"},{"content":"Problem  There is no abnormal log in Agent log and Collector log. The traces can be seen, but no other information is available in UI.  Reason The operating system where the monitored system is located is not set as the current time zone, causing statistics collection time points to deviate.\nResolution Make sure the time is synchronized between collector servers and monitored application servers.\n","title":"Problem","url":"/docs/main/v9.2.0/en/faq/why-have-traces-no-others/"},{"content":"Problem When you start your application with the skywalking agent, you may find this exception in your agent log which means that EnhanceRequireObjectCache cannot be casted to EnhanceRequireObjectCache. For example:\nERROR 2018-05-07 21:31:24 InstMethodsInter : class[class org.springframework.web.method.HandlerMethod] after method[getBean] intercept failure java.lang.ClassCastException: org.apache.skywalking.apm.plugin.spring.mvc.commons.EnhanceRequireObjectCache cannot be cast to org.apache.skywalking.apm.plugin.spring.mvc.commons.EnhanceRequireObjectCache at org.apache.skywalking.apm.plugin.spring.mvc.commons.interceptor.GetBeanInterceptor.afterMethod(GetBeanInterceptor.java:45) at org.apache.skywalking.apm.agent.core.plugin.interceptor.enhance.InstMethodsInter.intercept(InstMethodsInter.java:105) at org.springframework.web.method.HandlerMethod.getBean(HandlerMethod.java) at org.springframework.web.servlet.handler.AbstractHandlerMethodExceptionResolver.shouldApplyTo(AbstractHandlerMethodExceptionResolver.java:47) at org.springframework.web.servlet.handler.AbstractHandlerExceptionResolver.resolveException(AbstractHandlerExceptionResolver.java:131) at org.springframework.web.servlet.handler.HandlerExceptionResolverComposite.resolveException(HandlerExceptionResolverComposite.java:76) ... Reason This exception may be caused by hot deployment tools (spring-boot-devtool) or otherwise, which changes the classloader in runtime.\nResolution  This error does not occur under the production environment, since developer tools are automatically disabled: See spring-boot-devtools. If you would like to debug in your development environment as usual, you should temporarily remove such hot deployment package in your lib path.  ","title":"Problem","url":"/docs/main/v9.3.0/en/faq/enhancerequireobjectcache-cast-exception/"},{"content":"Problem  When importing the SkyWalking project to Eclipse, the following errors may occur:   Software being installed: Checkstyle configuration plugin for M2Eclipse 1.0.0.201705301746 (com.basistech.m2e.code.quality.checkstyle.feature.feature.group 1.0.0.201705301746) Missing requirement: Checkstyle configuration plugin for M2Eclipse 1.0.0.201705301746 (com.basistech.m2e.code.quality.checkstyle.feature.feature.group 1.0.0.201705301746) requires \u0026lsquo;net.sf.eclipsecs.core 5.2.0\u0026rsquo; but it could not be found\n Reason The Eclipse Checkstyle Plug-in has not been installed.\nResolution Download the plug-in at the link here: https://sourceforge.net/projects/eclipse-cs/?source=typ_redirect Eclipse Checkstyle Plug-in version 8.7.0.201801131309 is required. Plug-in notification: The Eclipse Checkstyle plug-in integrates the Checkstyle Java code auditor into the Eclipse IDE. The plug-in provides real-time feedback to the user on rule violations, including checking against coding style and error-prone code constructs.\n","title":"Problem","url":"/docs/main/v9.3.0/en/faq/import-project-eclipse-requireitems-exception/"},{"content":"Problem Tracing doesn\u0026rsquo;t work on the Kafka consumer end.\nReason The kafka client is responsible for pulling messages from the brokers, after which the data will be processed by user-defined codes. However, only the poll action can be traced by the plug-in and the subsequent data processing work inevitably goes beyond the scope of the trace context. Thus, in order to complete tracing on the client end, manual instrumentation is required, i.e. the poll action and the processing action should be wrapped manually.\nResolve For a native Kafka client, please use the Application Toolkit libraries to do the manual instrumentation, with the help of the @KafkaPollAndInvoke annotation in apm-toolkit-kafka or with OpenTracing API. If you\u0026rsquo;re using spring-kafka 1.3.x, 2.2.x or above, you can easily trace the consumer end without further configuration.\n","title":"Problem","url":"/docs/main/v9.3.0/en/faq/kafka-plugin/"},{"content":"Problem When using a thread pool, TraceSegment data in a thread cannot be reported and there are memory data that cannot be recycled (memory leaks).\nExample ThreadPoolTaskExecutor executor = new ThreadPoolTaskExecutor(); executor.setThreadFactory(r -\u0026gt; new Thread(RunnableWrapper.of(r))); Reason  Worker threads are enhanced when using the thread pool. Based on the design of the SkyWalking Java Agent, when tracing a cross thread, you must enhance the task thread.  Resolution   When using Thread Schedule Framework: See SkyWalking Thread Schedule Framework at SkyWalking Java agent supported list, such as Spring FrameWork @Async, which can implement tracing without any modification.\n  When using Custom Thread Pool: Enhance the task thread with the following code.\n  ExecutorService executorService = Executors.newFixedThreadPool(1); executorService.execute(RunnableWrapper.of(new Runnable() { @Override public void run() { //your code  } })); See across thread solution APIs for more use cases.\n","title":"Problem","url":"/docs/main/v9.3.0/en/faq/memory-leak-enhance-worker-thread/"},{"content":"Problem  In maven build, the following error may occur with the protoc-plugin:  [ERROR] Failed to execute goal org.xolstice.maven.plugins:protobuf-maven-plugin:0.5.0:compile-custom (default) on project apm-network: Unable to copy the file to \\skywalking\\apm-network\\target\\protoc-plugins: \\skywalking\\apm-network\\target\\protoc-plugins\\protoc-3.3.0-linux-x86_64.exe (The process cannot access the file because it is being used by another process) -\u0026gt; [Help 1] Reason  The Protobuf compiler is dependent on the glibc. However, glibc has not been installed, or there is an old version already installed in the system.  Resolution  Install or upgrade to the latest version of the glibc library. Under the container environment, the latest glibc version of the alpine system is recommended. Please refer to http://www.gnu.org/software/libc/documentation.html.  ","title":"Problem","url":"/docs/main/v9.3.0/en/faq/protoc-plugin-fails-when-build/"},{"content":"Problem The message with Field ID, 8888, must be reserved.\nReason Because Thrift cannot carry metadata to transport Trace Header in the original API, we transport them by wrapping TProtocolFactory.\nThrift allows us to append any additional fields in the message even if the receiver doesn\u0026rsquo;t deal with them. Those data will be skipped and left unread. Based on this, the 8888th field of the message is used to store Trace Header (or metadata) and to transport them. That means the message with Field ID, 8888, must be reserved.\nResolution Avoid using the Field(ID is 8888) in your application.\n","title":"Problem","url":"/docs/main/v9.3.0/en/faq/thrift-plugin/"},{"content":"Problem  There is no abnormal log in Agent log and Collector log. The traces can be seen, but no other information is available in UI.  Reason The operating system where the monitored system is located is not set as the current time zone, causing statistics collection time points to deviate.\nResolution Make sure the time is synchronized between collector servers and monitored application servers.\n","title":"Problem","url":"/docs/main/v9.3.0/en/faq/why-have-traces-no-others/"},{"content":"Problem When you start your application with the skywalking agent, you may find this exception in your agent log which means that EnhanceRequireObjectCache cannot be casted to EnhanceRequireObjectCache. For example:\nERROR 2018-05-07 21:31:24 InstMethodsInter : class[class org.springframework.web.method.HandlerMethod] after method[getBean] intercept failure java.lang.ClassCastException: org.apache.skywalking.apm.plugin.spring.mvc.commons.EnhanceRequireObjectCache cannot be cast to org.apache.skywalking.apm.plugin.spring.mvc.commons.EnhanceRequireObjectCache at org.apache.skywalking.apm.plugin.spring.mvc.commons.interceptor.GetBeanInterceptor.afterMethod(GetBeanInterceptor.java:45) at org.apache.skywalking.apm.agent.core.plugin.interceptor.enhance.InstMethodsInter.intercept(InstMethodsInter.java:105) at org.springframework.web.method.HandlerMethod.getBean(HandlerMethod.java) at org.springframework.web.servlet.handler.AbstractHandlerMethodExceptionResolver.shouldApplyTo(AbstractHandlerMethodExceptionResolver.java:47) at org.springframework.web.servlet.handler.AbstractHandlerExceptionResolver.resolveException(AbstractHandlerExceptionResolver.java:131) at org.springframework.web.servlet.handler.HandlerExceptionResolverComposite.resolveException(HandlerExceptionResolverComposite.java:76) ... Reason This exception may be caused by hot deployment tools (spring-boot-devtool) or otherwise, which changes the classloader in runtime.\nResolution  This error does not occur under the production environment, since developer tools are automatically disabled: See spring-boot-devtools. If you would like to debug in your development environment as usual, you should temporarily remove such hot deployment package in your lib path.  ","title":"Problem","url":"/docs/main/v9.4.0/en/faq/enhancerequireobjectcache-cast-exception/"},{"content":"Problem  When importing the SkyWalking project to Eclipse, the following errors may occur:   Software being installed: Checkstyle configuration plugin for M2Eclipse 1.0.0.201705301746 (com.basistech.m2e.code.quality.checkstyle.feature.feature.group 1.0.0.201705301746) Missing requirement: Checkstyle configuration plugin for M2Eclipse 1.0.0.201705301746 (com.basistech.m2e.code.quality.checkstyle.feature.feature.group 1.0.0.201705301746) requires \u0026lsquo;net.sf.eclipsecs.core 5.2.0\u0026rsquo; but it could not be found\n Reason The Eclipse Checkstyle Plug-in has not been installed.\nResolution Download the plug-in at the link here: https://sourceforge.net/projects/eclipse-cs/?source=typ_redirect Eclipse Checkstyle Plug-in version 8.7.0.201801131309 is required. Plug-in notification: The Eclipse Checkstyle plug-in integrates the Checkstyle Java code auditor into the Eclipse IDE. The plug-in provides real-time feedback to the user on rule violations, including checking against coding style and error-prone code constructs.\n","title":"Problem","url":"/docs/main/v9.4.0/en/faq/import-project-eclipse-requireitems-exception/"},{"content":"Problem Tracing doesn\u0026rsquo;t work on the Kafka consumer end.\nReason The kafka client is responsible for pulling messages from the brokers, after which the data will be processed by user-defined codes. However, only the poll action can be traced by the plug-in and the subsequent data processing work inevitably goes beyond the scope of the trace context. Thus, in order to complete tracing on the client end, manual instrumentation is required, i.e. the poll action and the processing action should be wrapped manually.\nResolve For a native Kafka client, please use the Application Toolkit libraries to do the manual instrumentation, with the help of the @KafkaPollAndInvoke annotation in apm-toolkit-kafka or with OpenTracing API. If you\u0026rsquo;re using spring-kafka 1.3.x, 2.2.x or above, you can easily trace the consumer end without further configuration.\n","title":"Problem","url":"/docs/main/v9.4.0/en/faq/kafka-plugin/"},{"content":"Problem When using a thread pool, TraceSegment data in a thread cannot be reported and there are memory data that cannot be recycled (memory leaks).\nExample ThreadPoolTaskExecutor executor = new ThreadPoolTaskExecutor(); executor.setThreadFactory(r -\u0026gt; new Thread(RunnableWrapper.of(r))); Reason  Worker threads are enhanced when using the thread pool. Based on the design of the SkyWalking Java Agent, when tracing a cross thread, you must enhance the task thread.  Resolution   When using Thread Schedule Framework: See SkyWalking Thread Schedule Framework at SkyWalking Java agent supported list, such as Spring FrameWork @Async, which can implement tracing without any modification.\n  When using Custom Thread Pool: Enhance the task thread with the following code.\n  ExecutorService executorService = Executors.newFixedThreadPool(1); executorService.execute(RunnableWrapper.of(new Runnable() { @Override public void run() { //your code  } })); See across thread solution APIs for more use cases.\n","title":"Problem","url":"/docs/main/v9.4.0/en/faq/memory-leak-enhance-worker-thread/"},{"content":"Problem  In maven build, the following error may occur with the protoc-plugin:  [ERROR] Failed to execute goal org.xolstice.maven.plugins:protobuf-maven-plugin:0.5.0:compile-custom (default) on project apm-network: Unable to copy the file to \\skywalking\\apm-network\\target\\protoc-plugins: \\skywalking\\apm-network\\target\\protoc-plugins\\protoc-3.3.0-linux-x86_64.exe (The process cannot access the file because it is being used by another process) -\u0026gt; [Help 1] Reason  The Protobuf compiler is dependent on the glibc. However, glibc has not been installed, or there is an old version already installed in the system.  Resolution  Install or upgrade to the latest version of the glibc library. Under the container environment, the latest glibc version of the alpine system is recommended. Please refer to http://www.gnu.org/software/libc/documentation.html.  ","title":"Problem","url":"/docs/main/v9.4.0/en/faq/protoc-plugin-fails-when-build/"},{"content":"Problem The message with Field ID, 8888, must be reserved.\nReason Because Thrift cannot carry metadata to transport Trace Header in the original API, we transport them by wrapping TProtocolFactory.\nThrift allows us to append any additional fields in the message even if the receiver doesn\u0026rsquo;t deal with them. Those data will be skipped and left unread. Based on this, the 8888th field of the message is used to store Trace Header (or metadata) and to transport them. That means the message with Field ID, 8888, must be reserved.\nResolution Avoid using the Field(ID is 8888) in your application.\n","title":"Problem","url":"/docs/main/v9.4.0/en/faq/thrift-plugin/"},{"content":"Problem  There is no abnormal log in Agent log and Collector log. The traces can be seen, but no other information is available in UI.  Reason The operating system where the monitored system is located is not set as the current time zone, causing statistics collection time points to deviate.\nResolution Make sure the time is synchronized between collector servers and monitored application servers.\n","title":"Problem","url":"/docs/main/v9.4.0/en/faq/why-have-traces-no-others/"},{"content":"Problem When you start your application with the skywalking agent, you may find this exception in your agent log which means that EnhanceRequireObjectCache cannot be casted to EnhanceRequireObjectCache. For example:\nERROR 2018-05-07 21:31:24 InstMethodsInter : class[class org.springframework.web.method.HandlerMethod] after method[getBean] intercept failure java.lang.ClassCastException: org.apache.skywalking.apm.plugin.spring.mvc.commons.EnhanceRequireObjectCache cannot be cast to org.apache.skywalking.apm.plugin.spring.mvc.commons.EnhanceRequireObjectCache at org.apache.skywalking.apm.plugin.spring.mvc.commons.interceptor.GetBeanInterceptor.afterMethod(GetBeanInterceptor.java:45) at org.apache.skywalking.apm.agent.core.plugin.interceptor.enhance.InstMethodsInter.intercept(InstMethodsInter.java:105) at org.springframework.web.method.HandlerMethod.getBean(HandlerMethod.java) at org.springframework.web.servlet.handler.AbstractHandlerMethodExceptionResolver.shouldApplyTo(AbstractHandlerMethodExceptionResolver.java:47) at org.springframework.web.servlet.handler.AbstractHandlerExceptionResolver.resolveException(AbstractHandlerExceptionResolver.java:131) at org.springframework.web.servlet.handler.HandlerExceptionResolverComposite.resolveException(HandlerExceptionResolverComposite.java:76) ... Reason This exception may be caused by hot deployment tools (spring-boot-devtool) or otherwise, which changes the classloader in runtime.\nResolution  This error does not occur under the production environment, since developer tools are automatically disabled: See spring-boot-devtools. If you would like to debug in your development environment as usual, you should temporarily remove such hot deployment package in your lib path.  ","title":"Problem","url":"/docs/main/v9.5.0/en/faq/enhancerequireobjectcache-cast-exception/"},{"content":"Problem  When importing the SkyWalking project to Eclipse, the following errors may occur:   Software being installed: Checkstyle configuration plugin for M2Eclipse 1.0.0.201705301746 (com.basistech.m2e.code.quality.checkstyle.feature.feature.group 1.0.0.201705301746) Missing requirement: Checkstyle configuration plugin for M2Eclipse 1.0.0.201705301746 (com.basistech.m2e.code.quality.checkstyle.feature.feature.group 1.0.0.201705301746) requires \u0026lsquo;net.sf.eclipsecs.core 5.2.0\u0026rsquo; but it could not be found\n Reason The Eclipse Checkstyle Plug-in has not been installed.\nResolution Download the plug-in at the link here: https://sourceforge.net/projects/eclipse-cs/?source=typ_redirect Eclipse Checkstyle Plug-in version 8.7.0.201801131309 is required. Plug-in notification: The Eclipse Checkstyle plug-in integrates the Checkstyle Java code auditor into the Eclipse IDE. The plug-in provides real-time feedback to the user on rule violations, including checking against coding style and error-prone code constructs.\n","title":"Problem","url":"/docs/main/v9.5.0/en/faq/import-project-eclipse-requireitems-exception/"},{"content":"Problem Tracing doesn\u0026rsquo;t work on the Kafka consumer end.\nReason The kafka client is responsible for pulling messages from the brokers, after which the data will be processed by user-defined codes. However, only the poll action can be traced by the plug-in and the subsequent data processing work inevitably goes beyond the scope of the trace context. Thus, in order to complete tracing on the client end, manual instrumentation is required, i.e. the poll action and the processing action should be wrapped manually.\nResolve For a native Kafka client, please use the Application Toolkit libraries to do the manual instrumentation, with the help of the @KafkaPollAndInvoke annotation in apm-toolkit-kafka or with OpenTracing API. If you\u0026rsquo;re using spring-kafka 1.3.x, 2.2.x or above, you can easily trace the consumer end without further configuration.\n","title":"Problem","url":"/docs/main/v9.5.0/en/faq/kafka-plugin/"},{"content":"Problem When using a thread pool, TraceSegment data in a thread cannot be reported and there are memory data that cannot be recycled (memory leaks).\nExample ThreadPoolTaskExecutor executor = new ThreadPoolTaskExecutor(); executor.setThreadFactory(r -\u0026gt; new Thread(RunnableWrapper.of(r))); Reason  Worker threads are enhanced when using the thread pool. Based on the design of the SkyWalking Java Agent, when tracing a cross thread, you must enhance the task thread.  Resolution   When using Thread Schedule Framework: See SkyWalking Thread Schedule Framework at SkyWalking Java agent supported list, such as Spring FrameWork @Async, which can implement tracing without any modification.\n  When using Custom Thread Pool: Enhance the task thread with the following code.\n  ExecutorService executorService = Executors.newFixedThreadPool(1); executorService.execute(RunnableWrapper.of(new Runnable() { @Override public void run() { //your code  } })); See across thread solution APIs for more use cases.\n","title":"Problem","url":"/docs/main/v9.5.0/en/faq/memory-leak-enhance-worker-thread/"},{"content":"Problem  In maven build, the following error may occur with the protoc-plugin:  [ERROR] Failed to execute goal org.xolstice.maven.plugins:protobuf-maven-plugin:0.5.0:compile-custom (default) on project apm-network: Unable to copy the file to \\skywalking\\apm-network\\target\\protoc-plugins: \\skywalking\\apm-network\\target\\protoc-plugins\\protoc-3.3.0-linux-x86_64.exe (The process cannot access the file because it is being used by another process) -\u0026gt; [Help 1] Reason  The Protobuf compiler is dependent on the glibc. However, glibc has not been installed, or there is an old version already installed in the system.  Resolution  Install or upgrade to the latest version of the glibc library. Under the container environment, the latest glibc version of the alpine system is recommended. Please refer to http://www.gnu.org/software/libc/documentation.html.  ","title":"Problem","url":"/docs/main/v9.5.0/en/faq/protoc-plugin-fails-when-build/"},{"content":"Problem The message with Field ID, 8888, must be reserved.\nReason Because Thrift cannot carry metadata to transport Trace Header in the original API, we transport them by wrapping TProtocolFactory.\nThrift allows us to append any additional fields in the message even if the receiver doesn\u0026rsquo;t deal with them. Those data will be skipped and left unread. Based on this, the 8888th field of the message is used to store Trace Header (or metadata) and to transport them. That means the message with Field ID, 8888, must be reserved.\nResolution Avoid using the Field(ID is 8888) in your application.\n","title":"Problem","url":"/docs/main/v9.5.0/en/faq/thrift-plugin/"},{"content":"Problem  There is no abnormal log in Agent log and Collector log. The traces can be seen, but no other information is available in UI.  Reason The operating system where the monitored system is located is not set as the current time zone, causing statistics collection time points to deviate.\nResolution Make sure the time is synchronized between collector servers and monitored application servers.\n","title":"Problem","url":"/docs/main/v9.5.0/en/faq/why-have-traces-no-others/"},{"content":"Problem When you start your application with the skywalking agent, you may find this exception in your agent log which means that EnhanceRequireObjectCache cannot be casted to EnhanceRequireObjectCache. For example:\nERROR 2018-05-07 21:31:24 InstMethodsInter : class[class org.springframework.web.method.HandlerMethod] after method[getBean] intercept failure java.lang.ClassCastException: org.apache.skywalking.apm.plugin.spring.mvc.commons.EnhanceRequireObjectCache cannot be cast to org.apache.skywalking.apm.plugin.spring.mvc.commons.EnhanceRequireObjectCache at org.apache.skywalking.apm.plugin.spring.mvc.commons.interceptor.GetBeanInterceptor.afterMethod(GetBeanInterceptor.java:45) at org.apache.skywalking.apm.agent.core.plugin.interceptor.enhance.InstMethodsInter.intercept(InstMethodsInter.java:105) at org.springframework.web.method.HandlerMethod.getBean(HandlerMethod.java) at org.springframework.web.servlet.handler.AbstractHandlerMethodExceptionResolver.shouldApplyTo(AbstractHandlerMethodExceptionResolver.java:47) at org.springframework.web.servlet.handler.AbstractHandlerExceptionResolver.resolveException(AbstractHandlerExceptionResolver.java:131) at org.springframework.web.servlet.handler.HandlerExceptionResolverComposite.resolveException(HandlerExceptionResolverComposite.java:76) ... Reason This exception may be caused by hot deployment tools (spring-boot-devtool) or otherwise, which changes the classloader in runtime.\nResolution  This error does not occur under the production environment, since developer tools are automatically disabled: See spring-boot-devtools. If you would like to debug in your development environment as usual, you should temporarily remove such hot deployment package in your lib path.  ","title":"Problem","url":"/docs/main/v9.6.0/en/faq/enhancerequireobjectcache-cast-exception/"},{"content":"Problem  When importing the SkyWalking project to Eclipse, the following errors may occur:   Software being installed: Checkstyle configuration plugin for M2Eclipse 1.0.0.201705301746 (com.basistech.m2e.code.quality.checkstyle.feature.feature.group 1.0.0.201705301746) Missing requirement: Checkstyle configuration plugin for M2Eclipse 1.0.0.201705301746 (com.basistech.m2e.code.quality.checkstyle.feature.feature.group 1.0.0.201705301746) requires \u0026lsquo;net.sf.eclipsecs.core 5.2.0\u0026rsquo; but it could not be found\n Reason The Eclipse Checkstyle Plug-in has not been installed.\nResolution Download the plug-in at the link here: https://sourceforge.net/projects/eclipse-cs/?source=typ_redirect Eclipse Checkstyle Plug-in version 8.7.0.201801131309 is required. Plug-in notification: The Eclipse Checkstyle plug-in integrates the Checkstyle Java code auditor into the Eclipse IDE. The plug-in provides real-time feedback to the user on rule violations, including checking against coding style and error-prone code constructs.\n","title":"Problem","url":"/docs/main/v9.6.0/en/faq/import-project-eclipse-requireitems-exception/"},{"content":"Problem Tracing doesn\u0026rsquo;t work on the Kafka consumer end.\nReason The kafka client is responsible for pulling messages from the brokers, after which the data will be processed by user-defined codes. However, only the poll action can be traced by the plug-in and the subsequent data processing work inevitably goes beyond the scope of the trace context. Thus, in order to complete tracing on the client end, manual instrumentation is required, i.e. the poll action and the processing action should be wrapped manually.\nResolve For a native Kafka client, please use the Application Toolkit libraries to do the manual instrumentation, with the help of the @KafkaPollAndInvoke annotation in apm-toolkit-kafka or with OpenTracing API. If you\u0026rsquo;re using spring-kafka 1.3.x, 2.2.x or above, you can easily trace the consumer end without further configuration.\n","title":"Problem","url":"/docs/main/v9.6.0/en/faq/kafka-plugin/"},{"content":"Problem When using a thread pool, TraceSegment data in a thread cannot be reported and there are memory data that cannot be recycled (memory leaks).\nExample ThreadPoolTaskExecutor executor = new ThreadPoolTaskExecutor(); executor.setThreadFactory(r -\u0026gt; new Thread(RunnableWrapper.of(r))); Reason  Worker threads are enhanced when using the thread pool. Based on the design of the SkyWalking Java Agent, when tracing a cross thread, you must enhance the task thread.  Resolution   When using Thread Schedule Framework: See SkyWalking Thread Schedule Framework at SkyWalking Java agent supported list, such as Spring FrameWork @Async, which can implement tracing without any modification.\n  When using Custom Thread Pool: Enhance the task thread with the following code.\n  ExecutorService executorService = Executors.newFixedThreadPool(1); executorService.execute(RunnableWrapper.of(new Runnable() { @Override public void run() { //your code  } })); See across thread solution APIs for more use cases.\n","title":"Problem","url":"/docs/main/v9.6.0/en/faq/memory-leak-enhance-worker-thread/"},{"content":"Problem  In maven build, the following error may occur with the protoc-plugin:  [ERROR] Failed to execute goal org.xolstice.maven.plugins:protobuf-maven-plugin:0.5.0:compile-custom (default) on project apm-network: Unable to copy the file to \\skywalking\\apm-network\\target\\protoc-plugins: \\skywalking\\apm-network\\target\\protoc-plugins\\protoc-3.3.0-linux-x86_64.exe (The process cannot access the file because it is being used by another process) -\u0026gt; [Help 1] Reason  The Protobuf compiler is dependent on the glibc. However, glibc has not been installed, or there is an old version already installed in the system.  Resolution  Install or upgrade to the latest version of the glibc library. Under the container environment, the latest glibc version of the alpine system is recommended. Please refer to http://www.gnu.org/software/libc/documentation.html.  ","title":"Problem","url":"/docs/main/v9.6.0/en/faq/protoc-plugin-fails-when-build/"},{"content":"Problem The message with Field ID, 8888, must be reserved.\nReason Because Thrift cannot carry metadata to transport Trace Header in the original API, we transport them by wrapping TProtocolFactory.\nThrift allows us to append any additional fields in the message even if the receiver doesn\u0026rsquo;t deal with them. Those data will be skipped and left unread. Based on this, the 8888th field of the message is used to store Trace Header (or metadata) and to transport them. That means the message with Field ID, 8888, must be reserved.\nResolution Avoid using the Field(ID is 8888) in your application.\n","title":"Problem","url":"/docs/main/v9.6.0/en/faq/thrift-plugin/"},{"content":"Problem  There is no abnormal log in Agent log and Collector log. The traces can be seen, but no other information is available in UI.  Reason The operating system where the monitored system is located is not set as the current time zone, causing statistics collection time points to deviate.\nResolution Make sure the time is synchronized between collector servers and monitored application servers.\n","title":"Problem","url":"/docs/main/v9.6.0/en/faq/why-have-traces-no-others/"},{"content":"Problem When you start your application with the skywalking agent, you may find this exception in your agent log which means that EnhanceRequireObjectCache cannot be casted to EnhanceRequireObjectCache. For example:\nERROR 2018-05-07 21:31:24 InstMethodsInter : class[class org.springframework.web.method.HandlerMethod] after method[getBean] intercept failure java.lang.ClassCastException: org.apache.skywalking.apm.plugin.spring.mvc.commons.EnhanceRequireObjectCache cannot be cast to org.apache.skywalking.apm.plugin.spring.mvc.commons.EnhanceRequireObjectCache at org.apache.skywalking.apm.plugin.spring.mvc.commons.interceptor.GetBeanInterceptor.afterMethod(GetBeanInterceptor.java:45) at org.apache.skywalking.apm.agent.core.plugin.interceptor.enhance.InstMethodsInter.intercept(InstMethodsInter.java:105) at org.springframework.web.method.HandlerMethod.getBean(HandlerMethod.java) at org.springframework.web.servlet.handler.AbstractHandlerMethodExceptionResolver.shouldApplyTo(AbstractHandlerMethodExceptionResolver.java:47) at org.springframework.web.servlet.handler.AbstractHandlerExceptionResolver.resolveException(AbstractHandlerExceptionResolver.java:131) at org.springframework.web.servlet.handler.HandlerExceptionResolverComposite.resolveException(HandlerExceptionResolverComposite.java:76) ... Reason This exception may be caused by hot deployment tools (spring-boot-devtool) or otherwise, which changes the classloader in runtime.\nResolution  This error does not occur under the production environment, since developer tools are automatically disabled: See spring-boot-devtools. If you would like to debug in your development environment as usual, you should temporarily remove such hot deployment package in your lib path.  ","title":"Problem","url":"/docs/main/v9.7.0/en/faq/enhancerequireobjectcache-cast-exception/"},{"content":"Problem  When importing the SkyWalking project to Eclipse, the following errors may occur:   Software being installed: Checkstyle configuration plugin for M2Eclipse 1.0.0.201705301746 (com.basistech.m2e.code.quality.checkstyle.feature.feature.group 1.0.0.201705301746) Missing requirement: Checkstyle configuration plugin for M2Eclipse 1.0.0.201705301746 (com.basistech.m2e.code.quality.checkstyle.feature.feature.group 1.0.0.201705301746) requires \u0026lsquo;net.sf.eclipsecs.core 5.2.0\u0026rsquo; but it could not be found\n Reason The Eclipse Checkstyle Plug-in has not been installed.\nResolution Download the plug-in at the link here: https://sourceforge.net/projects/eclipse-cs/?source=typ_redirect Eclipse Checkstyle Plug-in version 8.7.0.201801131309 is required. Plug-in notification: The Eclipse Checkstyle plug-in integrates the Checkstyle Java code auditor into the Eclipse IDE. The plug-in provides real-time feedback to the user on rule violations, including checking against coding style and error-prone code constructs.\n","title":"Problem","url":"/docs/main/v9.7.0/en/faq/import-project-eclipse-requireitems-exception/"},{"content":"Problem Tracing doesn\u0026rsquo;t work on the Kafka consumer end.\nReason The kafka client is responsible for pulling messages from the brokers, after which the data will be processed by user-defined codes. However, only the poll action can be traced by the plug-in and the subsequent data processing work inevitably goes beyond the scope of the trace context. Thus, in order to complete tracing on the client end, manual instrumentation is required, i.e. the poll action and the processing action should be wrapped manually.\nResolve For a native Kafka client, please use the Application Toolkit libraries to do the manual instrumentation, with the help of the @KafkaPollAndInvoke annotation in apm-toolkit-kafka or with OpenTracing API. If you\u0026rsquo;re using spring-kafka 1.3.x, 2.2.x or above, you can easily trace the consumer end without further configuration.\n","title":"Problem","url":"/docs/main/v9.7.0/en/faq/kafka-plugin/"},{"content":"Problem When using a thread pool, TraceSegment data in a thread cannot be reported and there are memory data that cannot be recycled (memory leaks).\nExample ThreadPoolTaskExecutor executor = new ThreadPoolTaskExecutor(); executor.setThreadFactory(r -\u0026gt; new Thread(RunnableWrapper.of(r))); Reason  Worker threads are enhanced when using the thread pool. Based on the design of the SkyWalking Java Agent, when tracing a cross thread, you must enhance the task thread.  Resolution   When using Thread Schedule Framework: See SkyWalking Thread Schedule Framework at SkyWalking Java agent supported list, such as Spring FrameWork @Async, which can implement tracing without any modification.\n  When using Custom Thread Pool: Enhance the task thread with the following code.\n  ExecutorService executorService = Executors.newFixedThreadPool(1); executorService.execute(RunnableWrapper.of(new Runnable() { @Override public void run() { //your code  } })); See across thread solution APIs for more use cases.\n","title":"Problem","url":"/docs/main/v9.7.0/en/faq/memory-leak-enhance-worker-thread/"},{"content":"Problem  In maven build, the following error may occur with the protoc-plugin:  [ERROR] Failed to execute goal org.xolstice.maven.plugins:protobuf-maven-plugin:0.5.0:compile-custom (default) on project apm-network: Unable to copy the file to \\skywalking\\apm-network\\target\\protoc-plugins: \\skywalking\\apm-network\\target\\protoc-plugins\\protoc-3.3.0-linux-x86_64.exe (The process cannot access the file because it is being used by another process) -\u0026gt; [Help 1] Reason  The Protobuf compiler is dependent on the glibc. However, glibc has not been installed, or there is an old version already installed in the system.  Resolution  Install or upgrade to the latest version of the glibc library. Under the container environment, the latest glibc version of the alpine system is recommended. Please refer to http://www.gnu.org/software/libc/documentation.html.  ","title":"Problem","url":"/docs/main/v9.7.0/en/faq/protoc-plugin-fails-when-build/"},{"content":"Problem The message with Field ID, 8888, must be reserved.\nReason Because Thrift cannot carry metadata to transport Trace Header in the original API, we transport them by wrapping TProtocolFactory.\nThrift allows us to append any additional fields in the message even if the receiver doesn\u0026rsquo;t deal with them. Those data will be skipped and left unread. Based on this, the 8888th field of the message is used to store Trace Header (or metadata) and to transport them. That means the message with Field ID, 8888, must be reserved.\nResolution Avoid using the Field(ID is 8888) in your application.\n","title":"Problem","url":"/docs/main/v9.7.0/en/faq/thrift-plugin/"},{"content":"Problem  There is no abnormal log in Agent log and Collector log. The traces can be seen, but no other information is available in UI.  Reason The operating system where the monitored system is located is not set as the current time zone, causing statistics collection time points to deviate.\nResolution Make sure the time is synchronized between collector servers and monitored application servers.\n","title":"Problem","url":"/docs/main/v9.7.0/en/faq/why-have-traces-no-others/"},{"content":"Problem: Maven compilation failure with error such as Error: not found: python2 When you compile the project via Maven, it fails at module apm-webapp and the following error occurs.\nPay attention to keywords such as node-sass and Error: not found: python2.\n[INFO] \u0026gt; node-sass@4.11.0 postinstall C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\node-sass [INFO] \u0026gt; node scripts/build.js [ERROR] gyp verb check python checking for Python executable \u0026quot;python2\u0026quot; in the PATH [ERROR] gyp verb `which` failed Error: not found: python2 [ERROR] gyp verb `which` failed at getNotFoundError (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:13:12) [ERROR] gyp verb `which` failed at F (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:68:19) [ERROR] gyp verb `which` failed at E (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:80:29) [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:89:16 [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\isexe\\index.js:42:5 [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\isexe\\windows.js:36:5 [ERROR] gyp verb `which` failed at FSReqWrap.oncomplete (fs.js:152:21) [ERROR] gyp verb `which` failed code: 'ENOENT' } [ERROR] gyp verb check python checking for Python executable \u0026quot;python\u0026quot; in the PATH [ERROR] gyp verb `which` succeeded python C:\\Users\\XXX\\AppData\\Local\\Programs\\Python\\Python37\\python.EXE [ERROR] gyp ERR! configure error [ERROR] gyp ERR! stack Error: Command failed: C:\\Users\\XXX\\AppData\\Local\\Programs\\Python\\Python37\\python.EXE -c import sys; print \u0026quot;%s.%s.%s\u0026quot; % sys.version_info[:3]; [ERROR] gyp ERR! stack File \u0026quot;\u0026lt;string\u0026gt;\u0026quot;, line 1 [ERROR] gyp ERR! stack import sys; print \u0026quot;%s.%s.%s\u0026quot; % sys.version_info[:3]; [ERROR] gyp ERR! stack ^ [ERROR] gyp ERR! stack SyntaxError: invalid syntax [ERROR] gyp ERR! stack [ERROR] gyp ERR! stack at ChildProcess.exithandler (child_process.js:275:12) [ERROR] gyp ERR! stack at emitTwo (events.js:126:13) [ERROR] gyp ERR! stack at ChildProcess.emit (events.js:214:7) [ERROR] gyp ERR! stack at maybeClose (internal/child_process.js:925:16) [ERROR] gyp ERR! stack at Process.ChildProcess._handle.onexit (internal/child_process.js:209:5) [ERROR] gyp ERR! System Windows_NT 10.0.17134 ...... [INFO] server-starter-es7 ................................. SUCCESS [ 11.657 s] [INFO] apm-webapp ......................................... FAILURE [ 25.857 s] [INFO] apache-skywalking-apm .............................. SKIPPED [INFO] apache-skywalking-apm-es7 .......................... SKIPPED Reason The error has nothing to do with SkyWalking.\nAccording to the issue here (https://github.com/sass/node-sass/issues/1176), if you live in countries where requesting resources from GitHub and npmjs.org runs slow, some precompiled binaries for dependency node-sass would fail to be downloaded during npm install, and npm would try to compile them itself. That\u0026rsquo;s why python2 is needed.\nResolution 1. Use mirror. For instance, if you\u0026rsquo;re in China, please edit skywalking\\apm-webapp\\pom.xml as follows. Find\n\u0026lt;configuration\u0026gt; \u0026lt;arguments\u0026gt;install --registry=https://registry.npmjs.org/\u0026lt;/arguments\u0026gt; \u0026lt;/configuration\u0026gt; Replace it with\n\u0026lt;configuration\u0026gt; \u0026lt;arguments\u0026gt;install --registry=https://registry.npmmirror.com/ --sass_binary_site=https://npmmirror.com/mirrors/node-sass/\u0026lt;/arguments\u0026gt; \u0026lt;/configuration\u0026gt; 2. Get a sufficiently powerful VPN. ","title":"Problem: Maven compilation failure with error such as `Error: not found: python2`","url":"/docs/main/latest/en/faq/maven-compile-npm-failure/"},{"content":"Problem: Maven compilation failure with error such as Error: not found: python2 When you compile the project via Maven, it fails at module apm-webapp and the following error occurs.\nPay attention to keywords such as node-sass and Error: not found: python2.\n[INFO] \u0026gt; node-sass@4.11.0 postinstall C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\node-sass [INFO] \u0026gt; node scripts/build.js [ERROR] gyp verb check python checking for Python executable \u0026quot;python2\u0026quot; in the PATH [ERROR] gyp verb `which` failed Error: not found: python2 [ERROR] gyp verb `which` failed at getNotFoundError (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:13:12) [ERROR] gyp verb `which` failed at F (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:68:19) [ERROR] gyp verb `which` failed at E (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:80:29) [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:89:16 [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\isexe\\index.js:42:5 [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\isexe\\windows.js:36:5 [ERROR] gyp verb `which` failed at FSReqWrap.oncomplete (fs.js:152:21) [ERROR] gyp verb `which` failed code: 'ENOENT' } [ERROR] gyp verb check python checking for Python executable \u0026quot;python\u0026quot; in the PATH [ERROR] gyp verb `which` succeeded python C:\\Users\\XXX\\AppData\\Local\\Programs\\Python\\Python37\\python.EXE [ERROR] gyp ERR! configure error [ERROR] gyp ERR! stack Error: Command failed: C:\\Users\\XXX\\AppData\\Local\\Programs\\Python\\Python37\\python.EXE -c import sys; print \u0026quot;%s.%s.%s\u0026quot; % sys.version_info[:3]; [ERROR] gyp ERR! stack File \u0026quot;\u0026lt;string\u0026gt;\u0026quot;, line 1 [ERROR] gyp ERR! stack import sys; print \u0026quot;%s.%s.%s\u0026quot; % sys.version_info[:3]; [ERROR] gyp ERR! stack ^ [ERROR] gyp ERR! stack SyntaxError: invalid syntax [ERROR] gyp ERR! stack [ERROR] gyp ERR! stack at ChildProcess.exithandler (child_process.js:275:12) [ERROR] gyp ERR! stack at emitTwo (events.js:126:13) [ERROR] gyp ERR! stack at ChildProcess.emit (events.js:214:7) [ERROR] gyp ERR! stack at maybeClose (internal/child_process.js:925:16) [ERROR] gyp ERR! stack at Process.ChildProcess._handle.onexit (internal/child_process.js:209:5) [ERROR] gyp ERR! System Windows_NT 10.0.17134 ...... [INFO] server-starter-es7 ................................. SUCCESS [ 11.657 s] [INFO] apm-webapp ......................................... FAILURE [ 25.857 s] [INFO] apache-skywalking-apm .............................. SKIPPED [INFO] apache-skywalking-apm-es7 .......................... SKIPPED Reason The error has nothing to do with SkyWalking.\nAccording to the issue here (https://github.com/sass/node-sass/issues/1176), if you live in countries where requesting resources from GitHub and npmjs.org runs slow, some precompiled binaries for dependency node-sass would fail to be downloaded during npm install, and npm would try to compile them itself. That\u0026rsquo;s why python2 is needed.\nResolution 1. Use mirror. For instance, if you\u0026rsquo;re in China, please edit skywalking\\apm-webapp\\pom.xml as follows. Find\n\u0026lt;configuration\u0026gt; \u0026lt;arguments\u0026gt;install --registry=https://registry.npmjs.org/\u0026lt;/arguments\u0026gt; \u0026lt;/configuration\u0026gt; Replace it with\n\u0026lt;configuration\u0026gt; \u0026lt;arguments\u0026gt;install --registry=https://registry.npmmirror.com/ --sass_binary_site=https://npmmirror.com/mirrors/node-sass/\u0026lt;/arguments\u0026gt; \u0026lt;/configuration\u0026gt; 2. Get a sufficiently powerful VPN. ","title":"Problem: Maven compilation failure with error such as `Error: not found: python2`","url":"/docs/main/next/en/faq/maven-compile-npm-failure/"},{"content":"Problem: Maven compilation failure with error such as Error: not found: python2 When you compile the project via Maven, it fails at module apm-webapp and the following error occurs.\nPay attention to keywords such as node-sass and Error: not found: python2.\n[INFO] \u0026gt; node-sass@4.11.0 postinstall C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\node-sass [INFO] \u0026gt; node scripts/build.js [ERROR] gyp verb check python checking for Python executable \u0026quot;python2\u0026quot; in the PATH [ERROR] gyp verb `which` failed Error: not found: python2 [ERROR] gyp verb `which` failed at getNotFoundError (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:13:12) [ERROR] gyp verb `which` failed at F (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:68:19) [ERROR] gyp verb `which` failed at E (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:80:29) [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:89:16 [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\isexe\\index.js:42:5 [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\isexe\\windows.js:36:5 [ERROR] gyp verb `which` failed at FSReqWrap.oncomplete (fs.js:152:21) [ERROR] gyp verb `which` failed code: 'ENOENT' } [ERROR] gyp verb check python checking for Python executable \u0026quot;python\u0026quot; in the PATH [ERROR] gyp verb `which` succeeded python C:\\Users\\XXX\\AppData\\Local\\Programs\\Python\\Python37\\python.EXE [ERROR] gyp ERR! configure error [ERROR] gyp ERR! stack Error: Command failed: C:\\Users\\XXX\\AppData\\Local\\Programs\\Python\\Python37\\python.EXE -c import sys; print \u0026quot;%s.%s.%s\u0026quot; % sys.version_info[:3]; [ERROR] gyp ERR! stack File \u0026quot;\u0026lt;string\u0026gt;\u0026quot;, line 1 [ERROR] gyp ERR! stack import sys; print \u0026quot;%s.%s.%s\u0026quot; % sys.version_info[:3]; [ERROR] gyp ERR! stack ^ [ERROR] gyp ERR! stack SyntaxError: invalid syntax [ERROR] gyp ERR! stack [ERROR] gyp ERR! stack at ChildProcess.exithandler (child_process.js:275:12) [ERROR] gyp ERR! stack at emitTwo (events.js:126:13) [ERROR] gyp ERR! stack at ChildProcess.emit (events.js:214:7) [ERROR] gyp ERR! stack at maybeClose (internal/child_process.js:925:16) [ERROR] gyp ERR! stack at Process.ChildProcess._handle.onexit (internal/child_process.js:209:5) [ERROR] gyp ERR! System Windows_NT 10.0.17134 ...... [INFO] server-starter-es7 ................................. SUCCESS [ 11.657 s] [INFO] apm-webapp ......................................... FAILURE [ 25.857 s] [INFO] apache-skywalking-apm .............................. SKIPPED [INFO] apache-skywalking-apm-es7 .......................... SKIPPED Reason The error has nothing to do with SkyWalking.\nAccording to the issue here (https://github.com/sass/node-sass/issues/1176), if you live in countries where requesting resources from GitHub and npmjs.org runs slow, some precompiled binaries for dependency node-sass would fail to be downloaded during npm install, and npm would try to compile them itself. That\u0026rsquo;s why python2 is needed.\nResolution 1. Use mirror. For instance, if you\u0026rsquo;re in China, please edit skywalking\\apm-webapp\\pom.xml as follows. Find\n\u0026lt;configuration\u0026gt; \u0026lt;arguments\u0026gt;install --registry=https://registry.npmjs.org/\u0026lt;/arguments\u0026gt; \u0026lt;/configuration\u0026gt; Replace it with\n\u0026lt;configuration\u0026gt; \u0026lt;arguments\u0026gt;install --registry=https://registry.npmmirror.com/ --sass_binary_site=https://npmmirror.com/mirrors/node-sass/\u0026lt;/arguments\u0026gt; \u0026lt;/configuration\u0026gt; 2. Get a sufficiently powerful VPN. ","title":"Problem: Maven compilation failure with error such as `Error: not found: python2`","url":"/docs/main/v9.0.0/en/faq/maven-compile-npm-failure/"},{"content":"Problem: Maven compilation failure with error such as Error: not found: python2 When you compile the project via Maven, it fails at module apm-webapp and the following error occurs.\nPay attention to keywords such as node-sass and Error: not found: python2.\n[INFO] \u0026gt; node-sass@4.11.0 postinstall C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\node-sass [INFO] \u0026gt; node scripts/build.js [ERROR] gyp verb check python checking for Python executable \u0026quot;python2\u0026quot; in the PATH [ERROR] gyp verb `which` failed Error: not found: python2 [ERROR] gyp verb `which` failed at getNotFoundError (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:13:12) [ERROR] gyp verb `which` failed at F (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:68:19) [ERROR] gyp verb `which` failed at E (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:80:29) [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:89:16 [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\isexe\\index.js:42:5 [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\isexe\\windows.js:36:5 [ERROR] gyp verb `which` failed at FSReqWrap.oncomplete (fs.js:152:21) [ERROR] gyp verb `which` failed code: 'ENOENT' } [ERROR] gyp verb check python checking for Python executable \u0026quot;python\u0026quot; in the PATH [ERROR] gyp verb `which` succeeded python C:\\Users\\XXX\\AppData\\Local\\Programs\\Python\\Python37\\python.EXE [ERROR] gyp ERR! configure error [ERROR] gyp ERR! stack Error: Command failed: C:\\Users\\XXX\\AppData\\Local\\Programs\\Python\\Python37\\python.EXE -c import sys; print \u0026quot;%s.%s.%s\u0026quot; % sys.version_info[:3]; [ERROR] gyp ERR! stack File \u0026quot;\u0026lt;string\u0026gt;\u0026quot;, line 1 [ERROR] gyp ERR! stack import sys; print \u0026quot;%s.%s.%s\u0026quot; % sys.version_info[:3]; [ERROR] gyp ERR! stack ^ [ERROR] gyp ERR! stack SyntaxError: invalid syntax [ERROR] gyp ERR! stack [ERROR] gyp ERR! stack at ChildProcess.exithandler (child_process.js:275:12) [ERROR] gyp ERR! stack at emitTwo (events.js:126:13) [ERROR] gyp ERR! stack at ChildProcess.emit (events.js:214:7) [ERROR] gyp ERR! stack at maybeClose (internal/child_process.js:925:16) [ERROR] gyp ERR! stack at Process.ChildProcess._handle.onexit (internal/child_process.js:209:5) [ERROR] gyp ERR! System Windows_NT 10.0.17134 ...... [INFO] server-starter-es7 ................................. SUCCESS [ 11.657 s] [INFO] apm-webapp ......................................... FAILURE [ 25.857 s] [INFO] apache-skywalking-apm .............................. SKIPPED [INFO] apache-skywalking-apm-es7 .......................... SKIPPED Reason The error has nothing to do with SkyWalking.\nAccording to the issue here (https://github.com/sass/node-sass/issues/1176), if you live in countries where requesting resources from GitHub and npmjs.org runs slow, some precompiled binaries for dependency node-sass would fail to be downloaded during npm install, and npm would try to compile them itself. That\u0026rsquo;s why python2 is needed.\nResolution 1. Use mirror. For instance, if you\u0026rsquo;re in China, please edit skywalking\\apm-webapp\\pom.xml as follows. Find\n\u0026lt;configuration\u0026gt; \u0026lt;arguments\u0026gt;install --registry=https://registry.npmjs.org/\u0026lt;/arguments\u0026gt; \u0026lt;/configuration\u0026gt; Replace it with\n\u0026lt;configuration\u0026gt; \u0026lt;arguments\u0026gt;install --registry=https://registry.npmmirror.com/ --sass_binary_site=https://npmmirror.com/mirrors/node-sass/\u0026lt;/arguments\u0026gt; \u0026lt;/configuration\u0026gt; 2. Get a sufficiently powerful VPN. ","title":"Problem: Maven compilation failure with error such as `Error: not found: python2`","url":"/docs/main/v9.1.0/en/faq/maven-compile-npm-failure/"},{"content":"Problem: Maven compilation failure with error such as Error: not found: python2 When you compile the project via Maven, it fails at module apm-webapp and the following error occurs.\nPay attention to keywords such as node-sass and Error: not found: python2.\n[INFO] \u0026gt; node-sass@4.11.0 postinstall C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\node-sass [INFO] \u0026gt; node scripts/build.js [ERROR] gyp verb check python checking for Python executable \u0026quot;python2\u0026quot; in the PATH [ERROR] gyp verb `which` failed Error: not found: python2 [ERROR] gyp verb `which` failed at getNotFoundError (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:13:12) [ERROR] gyp verb `which` failed at F (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:68:19) [ERROR] gyp verb `which` failed at E (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:80:29) [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:89:16 [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\isexe\\index.js:42:5 [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\isexe\\windows.js:36:5 [ERROR] gyp verb `which` failed at FSReqWrap.oncomplete (fs.js:152:21) [ERROR] gyp verb `which` failed code: 'ENOENT' } [ERROR] gyp verb check python checking for Python executable \u0026quot;python\u0026quot; in the PATH [ERROR] gyp verb `which` succeeded python C:\\Users\\XXX\\AppData\\Local\\Programs\\Python\\Python37\\python.EXE [ERROR] gyp ERR! configure error [ERROR] gyp ERR! stack Error: Command failed: C:\\Users\\XXX\\AppData\\Local\\Programs\\Python\\Python37\\python.EXE -c import sys; print \u0026quot;%s.%s.%s\u0026quot; % sys.version_info[:3]; [ERROR] gyp ERR! stack File \u0026quot;\u0026lt;string\u0026gt;\u0026quot;, line 1 [ERROR] gyp ERR! stack import sys; print \u0026quot;%s.%s.%s\u0026quot; % sys.version_info[:3]; [ERROR] gyp ERR! stack ^ [ERROR] gyp ERR! stack SyntaxError: invalid syntax [ERROR] gyp ERR! stack [ERROR] gyp ERR! stack at ChildProcess.exithandler (child_process.js:275:12) [ERROR] gyp ERR! stack at emitTwo (events.js:126:13) [ERROR] gyp ERR! stack at ChildProcess.emit (events.js:214:7) [ERROR] gyp ERR! stack at maybeClose (internal/child_process.js:925:16) [ERROR] gyp ERR! stack at Process.ChildProcess._handle.onexit (internal/child_process.js:209:5) [ERROR] gyp ERR! System Windows_NT 10.0.17134 ...... [INFO] server-starter-es7 ................................. SUCCESS [ 11.657 s] [INFO] apm-webapp ......................................... FAILURE [ 25.857 s] [INFO] apache-skywalking-apm .............................. SKIPPED [INFO] apache-skywalking-apm-es7 .......................... SKIPPED Reason The error has nothing to do with SkyWalking.\nAccording to the issue here (https://github.com/sass/node-sass/issues/1176), if you live in countries where requesting resources from GitHub and npmjs.org runs slow, some precompiled binaries for dependency node-sass would fail to be downloaded during npm install, and npm would try to compile them itself. That\u0026rsquo;s why python2 is needed.\nResolution 1. Use mirror. For instance, if you\u0026rsquo;re in China, please edit skywalking\\apm-webapp\\pom.xml as follows. Find\n\u0026lt;configuration\u0026gt; \u0026lt;arguments\u0026gt;install --registry=https://registry.npmjs.org/\u0026lt;/arguments\u0026gt; \u0026lt;/configuration\u0026gt; Replace it with\n\u0026lt;configuration\u0026gt; \u0026lt;arguments\u0026gt;install --registry=https://registry.npmmirror.com/ --sass_binary_site=https://npmmirror.com/mirrors/node-sass/\u0026lt;/arguments\u0026gt; \u0026lt;/configuration\u0026gt; 2. Get a sufficiently powerful VPN. ","title":"Problem: Maven compilation failure with error such as `Error: not found: python2`","url":"/docs/main/v9.2.0/en/faq/maven-compile-npm-failure/"},{"content":"Problem: Maven compilation failure with error such as Error: not found: python2 When you compile the project via Maven, it fails at module apm-webapp and the following error occurs.\nPay attention to keywords such as node-sass and Error: not found: python2.\n[INFO] \u0026gt; node-sass@4.11.0 postinstall C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\node-sass [INFO] \u0026gt; node scripts/build.js [ERROR] gyp verb check python checking for Python executable \u0026quot;python2\u0026quot; in the PATH [ERROR] gyp verb `which` failed Error: not found: python2 [ERROR] gyp verb `which` failed at getNotFoundError (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:13:12) [ERROR] gyp verb `which` failed at F (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:68:19) [ERROR] gyp verb `which` failed at E (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:80:29) [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:89:16 [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\isexe\\index.js:42:5 [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\isexe\\windows.js:36:5 [ERROR] gyp verb `which` failed at FSReqWrap.oncomplete (fs.js:152:21) [ERROR] gyp verb `which` failed code: 'ENOENT' } [ERROR] gyp verb check python checking for Python executable \u0026quot;python\u0026quot; in the PATH [ERROR] gyp verb `which` succeeded python C:\\Users\\XXX\\AppData\\Local\\Programs\\Python\\Python37\\python.EXE [ERROR] gyp ERR! configure error [ERROR] gyp ERR! stack Error: Command failed: C:\\Users\\XXX\\AppData\\Local\\Programs\\Python\\Python37\\python.EXE -c import sys; print \u0026quot;%s.%s.%s\u0026quot; % sys.version_info[:3]; [ERROR] gyp ERR! stack File \u0026quot;\u0026lt;string\u0026gt;\u0026quot;, line 1 [ERROR] gyp ERR! stack import sys; print \u0026quot;%s.%s.%s\u0026quot; % sys.version_info[:3]; [ERROR] gyp ERR! stack ^ [ERROR] gyp ERR! stack SyntaxError: invalid syntax [ERROR] gyp ERR! stack [ERROR] gyp ERR! stack at ChildProcess.exithandler (child_process.js:275:12) [ERROR] gyp ERR! stack at emitTwo (events.js:126:13) [ERROR] gyp ERR! stack at ChildProcess.emit (events.js:214:7) [ERROR] gyp ERR! stack at maybeClose (internal/child_process.js:925:16) [ERROR] gyp ERR! stack at Process.ChildProcess._handle.onexit (internal/child_process.js:209:5) [ERROR] gyp ERR! System Windows_NT 10.0.17134 ...... [INFO] server-starter-es7 ................................. SUCCESS [ 11.657 s] [INFO] apm-webapp ......................................... FAILURE [ 25.857 s] [INFO] apache-skywalking-apm .............................. SKIPPED [INFO] apache-skywalking-apm-es7 .......................... SKIPPED Reason The error has nothing to do with SkyWalking.\nAccording to the issue here (https://github.com/sass/node-sass/issues/1176), if you live in countries where requesting resources from GitHub and npmjs.org runs slow, some precompiled binaries for dependency node-sass would fail to be downloaded during npm install, and npm would try to compile them itself. That\u0026rsquo;s why python2 is needed.\nResolution 1. Use mirror. For instance, if you\u0026rsquo;re in China, please edit skywalking\\apm-webapp\\pom.xml as follows. Find\n\u0026lt;configuration\u0026gt; \u0026lt;arguments\u0026gt;install --registry=https://registry.npmjs.org/\u0026lt;/arguments\u0026gt; \u0026lt;/configuration\u0026gt; Replace it with\n\u0026lt;configuration\u0026gt; \u0026lt;arguments\u0026gt;install --registry=https://registry.npmmirror.com/ --sass_binary_site=https://npmmirror.com/mirrors/node-sass/\u0026lt;/arguments\u0026gt; \u0026lt;/configuration\u0026gt; 2. Get a sufficiently powerful VPN. ","title":"Problem: Maven compilation failure with error such as `Error: not found: python2`","url":"/docs/main/v9.3.0/en/faq/maven-compile-npm-failure/"},{"content":"Problem: Maven compilation failure with error such as Error: not found: python2 When you compile the project via Maven, it fails at module apm-webapp and the following error occurs.\nPay attention to keywords such as node-sass and Error: not found: python2.\n[INFO] \u0026gt; node-sass@4.11.0 postinstall C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\node-sass [INFO] \u0026gt; node scripts/build.js [ERROR] gyp verb check python checking for Python executable \u0026quot;python2\u0026quot; in the PATH [ERROR] gyp verb `which` failed Error: not found: python2 [ERROR] gyp verb `which` failed at getNotFoundError (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:13:12) [ERROR] gyp verb `which` failed at F (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:68:19) [ERROR] gyp verb `which` failed at E (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:80:29) [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:89:16 [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\isexe\\index.js:42:5 [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\isexe\\windows.js:36:5 [ERROR] gyp verb `which` failed at FSReqWrap.oncomplete (fs.js:152:21) [ERROR] gyp verb `which` failed code: 'ENOENT' } [ERROR] gyp verb check python checking for Python executable \u0026quot;python\u0026quot; in the PATH [ERROR] gyp verb `which` succeeded python C:\\Users\\XXX\\AppData\\Local\\Programs\\Python\\Python37\\python.EXE [ERROR] gyp ERR! configure error [ERROR] gyp ERR! stack Error: Command failed: C:\\Users\\XXX\\AppData\\Local\\Programs\\Python\\Python37\\python.EXE -c import sys; print \u0026quot;%s.%s.%s\u0026quot; % sys.version_info[:3]; [ERROR] gyp ERR! stack File \u0026quot;\u0026lt;string\u0026gt;\u0026quot;, line 1 [ERROR] gyp ERR! stack import sys; print \u0026quot;%s.%s.%s\u0026quot; % sys.version_info[:3]; [ERROR] gyp ERR! stack ^ [ERROR] gyp ERR! stack SyntaxError: invalid syntax [ERROR] gyp ERR! stack [ERROR] gyp ERR! stack at ChildProcess.exithandler (child_process.js:275:12) [ERROR] gyp ERR! stack at emitTwo (events.js:126:13) [ERROR] gyp ERR! stack at ChildProcess.emit (events.js:214:7) [ERROR] gyp ERR! stack at maybeClose (internal/child_process.js:925:16) [ERROR] gyp ERR! stack at Process.ChildProcess._handle.onexit (internal/child_process.js:209:5) [ERROR] gyp ERR! System Windows_NT 10.0.17134 ...... [INFO] server-starter-es7 ................................. SUCCESS [ 11.657 s] [INFO] apm-webapp ......................................... FAILURE [ 25.857 s] [INFO] apache-skywalking-apm .............................. SKIPPED [INFO] apache-skywalking-apm-es7 .......................... SKIPPED Reason The error has nothing to do with SkyWalking.\nAccording to the issue here (https://github.com/sass/node-sass/issues/1176), if you live in countries where requesting resources from GitHub and npmjs.org runs slow, some precompiled binaries for dependency node-sass would fail to be downloaded during npm install, and npm would try to compile them itself. That\u0026rsquo;s why python2 is needed.\nResolution 1. Use mirror. For instance, if you\u0026rsquo;re in China, please edit skywalking\\apm-webapp\\pom.xml as follows. Find\n\u0026lt;configuration\u0026gt; \u0026lt;arguments\u0026gt;install --registry=https://registry.npmjs.org/\u0026lt;/arguments\u0026gt; \u0026lt;/configuration\u0026gt; Replace it with\n\u0026lt;configuration\u0026gt; \u0026lt;arguments\u0026gt;install --registry=https://registry.npmmirror.com/ --sass_binary_site=https://npmmirror.com/mirrors/node-sass/\u0026lt;/arguments\u0026gt; \u0026lt;/configuration\u0026gt; 2. Get a sufficiently powerful VPN. ","title":"Problem: Maven compilation failure with error such as `Error: not found: python2`","url":"/docs/main/v9.4.0/en/faq/maven-compile-npm-failure/"},{"content":"Problem: Maven compilation failure with error such as Error: not found: python2 When you compile the project via Maven, it fails at module apm-webapp and the following error occurs.\nPay attention to keywords such as node-sass and Error: not found: python2.\n[INFO] \u0026gt; node-sass@4.11.0 postinstall C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\node-sass [INFO] \u0026gt; node scripts/build.js [ERROR] gyp verb check python checking for Python executable \u0026quot;python2\u0026quot; in the PATH [ERROR] gyp verb `which` failed Error: not found: python2 [ERROR] gyp verb `which` failed at getNotFoundError (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:13:12) [ERROR] gyp verb `which` failed at F (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:68:19) [ERROR] gyp verb `which` failed at E (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:80:29) [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:89:16 [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\isexe\\index.js:42:5 [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\isexe\\windows.js:36:5 [ERROR] gyp verb `which` failed at FSReqWrap.oncomplete (fs.js:152:21) [ERROR] gyp verb `which` failed code: 'ENOENT' } [ERROR] gyp verb check python checking for Python executable \u0026quot;python\u0026quot; in the PATH [ERROR] gyp verb `which` succeeded python C:\\Users\\XXX\\AppData\\Local\\Programs\\Python\\Python37\\python.EXE [ERROR] gyp ERR! configure error [ERROR] gyp ERR! stack Error: Command failed: C:\\Users\\XXX\\AppData\\Local\\Programs\\Python\\Python37\\python.EXE -c import sys; print \u0026quot;%s.%s.%s\u0026quot; % sys.version_info[:3]; [ERROR] gyp ERR! stack File \u0026quot;\u0026lt;string\u0026gt;\u0026quot;, line 1 [ERROR] gyp ERR! stack import sys; print \u0026quot;%s.%s.%s\u0026quot; % sys.version_info[:3]; [ERROR] gyp ERR! stack ^ [ERROR] gyp ERR! stack SyntaxError: invalid syntax [ERROR] gyp ERR! stack [ERROR] gyp ERR! stack at ChildProcess.exithandler (child_process.js:275:12) [ERROR] gyp ERR! stack at emitTwo (events.js:126:13) [ERROR] gyp ERR! stack at ChildProcess.emit (events.js:214:7) [ERROR] gyp ERR! stack at maybeClose (internal/child_process.js:925:16) [ERROR] gyp ERR! stack at Process.ChildProcess._handle.onexit (internal/child_process.js:209:5) [ERROR] gyp ERR! System Windows_NT 10.0.17134 ...... [INFO] server-starter-es7 ................................. SUCCESS [ 11.657 s] [INFO] apm-webapp ......................................... FAILURE [ 25.857 s] [INFO] apache-skywalking-apm .............................. SKIPPED [INFO] apache-skywalking-apm-es7 .......................... SKIPPED Reason The error has nothing to do with SkyWalking.\nAccording to the issue here (https://github.com/sass/node-sass/issues/1176), if you live in countries where requesting resources from GitHub and npmjs.org runs slow, some precompiled binaries for dependency node-sass would fail to be downloaded during npm install, and npm would try to compile them itself. That\u0026rsquo;s why python2 is needed.\nResolution 1. Use mirror. For instance, if you\u0026rsquo;re in China, please edit skywalking\\apm-webapp\\pom.xml as follows. Find\n\u0026lt;configuration\u0026gt; \u0026lt;arguments\u0026gt;install --registry=https://registry.npmjs.org/\u0026lt;/arguments\u0026gt; \u0026lt;/configuration\u0026gt; Replace it with\n\u0026lt;configuration\u0026gt; \u0026lt;arguments\u0026gt;install --registry=https://registry.npmmirror.com/ --sass_binary_site=https://npmmirror.com/mirrors/node-sass/\u0026lt;/arguments\u0026gt; \u0026lt;/configuration\u0026gt; 2. Get a sufficiently powerful VPN. ","title":"Problem: Maven compilation failure with error such as `Error: not found: python2`","url":"/docs/main/v9.5.0/en/faq/maven-compile-npm-failure/"},{"content":"Problem: Maven compilation failure with error such as Error: not found: python2 When you compile the project via Maven, it fails at module apm-webapp and the following error occurs.\nPay attention to keywords such as node-sass and Error: not found: python2.\n[INFO] \u0026gt; node-sass@4.11.0 postinstall C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\node-sass [INFO] \u0026gt; node scripts/build.js [ERROR] gyp verb check python checking for Python executable \u0026quot;python2\u0026quot; in the PATH [ERROR] gyp verb `which` failed Error: not found: python2 [ERROR] gyp verb `which` failed at getNotFoundError (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:13:12) [ERROR] gyp verb `which` failed at F (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:68:19) [ERROR] gyp verb `which` failed at E (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:80:29) [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:89:16 [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\isexe\\index.js:42:5 [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\isexe\\windows.js:36:5 [ERROR] gyp verb `which` failed at FSReqWrap.oncomplete (fs.js:152:21) [ERROR] gyp verb `which` failed code: 'ENOENT' } [ERROR] gyp verb check python checking for Python executable \u0026quot;python\u0026quot; in the PATH [ERROR] gyp verb `which` succeeded python C:\\Users\\XXX\\AppData\\Local\\Programs\\Python\\Python37\\python.EXE [ERROR] gyp ERR! configure error [ERROR] gyp ERR! stack Error: Command failed: C:\\Users\\XXX\\AppData\\Local\\Programs\\Python\\Python37\\python.EXE -c import sys; print \u0026quot;%s.%s.%s\u0026quot; % sys.version_info[:3]; [ERROR] gyp ERR! stack File \u0026quot;\u0026lt;string\u0026gt;\u0026quot;, line 1 [ERROR] gyp ERR! stack import sys; print \u0026quot;%s.%s.%s\u0026quot; % sys.version_info[:3]; [ERROR] gyp ERR! stack ^ [ERROR] gyp ERR! stack SyntaxError: invalid syntax [ERROR] gyp ERR! stack [ERROR] gyp ERR! stack at ChildProcess.exithandler (child_process.js:275:12) [ERROR] gyp ERR! stack at emitTwo (events.js:126:13) [ERROR] gyp ERR! stack at ChildProcess.emit (events.js:214:7) [ERROR] gyp ERR! stack at maybeClose (internal/child_process.js:925:16) [ERROR] gyp ERR! stack at Process.ChildProcess._handle.onexit (internal/child_process.js:209:5) [ERROR] gyp ERR! System Windows_NT 10.0.17134 ...... [INFO] server-starter-es7 ................................. SUCCESS [ 11.657 s] [INFO] apm-webapp ......................................... FAILURE [ 25.857 s] [INFO] apache-skywalking-apm .............................. SKIPPED [INFO] apache-skywalking-apm-es7 .......................... SKIPPED Reason The error has nothing to do with SkyWalking.\nAccording to the issue here (https://github.com/sass/node-sass/issues/1176), if you live in countries where requesting resources from GitHub and npmjs.org runs slow, some precompiled binaries for dependency node-sass would fail to be downloaded during npm install, and npm would try to compile them itself. That\u0026rsquo;s why python2 is needed.\nResolution 1. Use mirror. For instance, if you\u0026rsquo;re in China, please edit skywalking\\apm-webapp\\pom.xml as follows. Find\n\u0026lt;configuration\u0026gt; \u0026lt;arguments\u0026gt;install --registry=https://registry.npmjs.org/\u0026lt;/arguments\u0026gt; \u0026lt;/configuration\u0026gt; Replace it with\n\u0026lt;configuration\u0026gt; \u0026lt;arguments\u0026gt;install --registry=https://registry.npmmirror.com/ --sass_binary_site=https://npmmirror.com/mirrors/node-sass/\u0026lt;/arguments\u0026gt; \u0026lt;/configuration\u0026gt; 2. Get a sufficiently powerful VPN. ","title":"Problem: Maven compilation failure with error such as `Error: not found: python2`","url":"/docs/main/v9.6.0/en/faq/maven-compile-npm-failure/"},{"content":"Problem: Maven compilation failure with error such as Error: not found: python2 When you compile the project via Maven, it fails at module apm-webapp and the following error occurs.\nPay attention to keywords such as node-sass and Error: not found: python2.\n[INFO] \u0026gt; node-sass@4.11.0 postinstall C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\node-sass [INFO] \u0026gt; node scripts/build.js [ERROR] gyp verb check python checking for Python executable \u0026quot;python2\u0026quot; in the PATH [ERROR] gyp verb `which` failed Error: not found: python2 [ERROR] gyp verb `which` failed at getNotFoundError (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:13:12) [ERROR] gyp verb `which` failed at F (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:68:19) [ERROR] gyp verb `which` failed at E (C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:80:29) [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\which\\which.js:89:16 [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\isexe\\index.js:42:5 [ERROR] gyp verb `which` failed at C:\\XXX\\skywalking\\skywalking-ui\\node_modules\\isexe\\windows.js:36:5 [ERROR] gyp verb `which` failed at FSReqWrap.oncomplete (fs.js:152:21) [ERROR] gyp verb `which` failed code: 'ENOENT' } [ERROR] gyp verb check python checking for Python executable \u0026quot;python\u0026quot; in the PATH [ERROR] gyp verb `which` succeeded python C:\\Users\\XXX\\AppData\\Local\\Programs\\Python\\Python37\\python.EXE [ERROR] gyp ERR! configure error [ERROR] gyp ERR! stack Error: Command failed: C:\\Users\\XXX\\AppData\\Local\\Programs\\Python\\Python37\\python.EXE -c import sys; print \u0026quot;%s.%s.%s\u0026quot; % sys.version_info[:3]; [ERROR] gyp ERR! stack File \u0026quot;\u0026lt;string\u0026gt;\u0026quot;, line 1 [ERROR] gyp ERR! stack import sys; print \u0026quot;%s.%s.%s\u0026quot; % sys.version_info[:3]; [ERROR] gyp ERR! stack ^ [ERROR] gyp ERR! stack SyntaxError: invalid syntax [ERROR] gyp ERR! stack [ERROR] gyp ERR! stack at ChildProcess.exithandler (child_process.js:275:12) [ERROR] gyp ERR! stack at emitTwo (events.js:126:13) [ERROR] gyp ERR! stack at ChildProcess.emit (events.js:214:7) [ERROR] gyp ERR! stack at maybeClose (internal/child_process.js:925:16) [ERROR] gyp ERR! stack at Process.ChildProcess._handle.onexit (internal/child_process.js:209:5) [ERROR] gyp ERR! System Windows_NT 10.0.17134 ...... [INFO] server-starter-es7 ................................. SUCCESS [ 11.657 s] [INFO] apm-webapp ......................................... FAILURE [ 25.857 s] [INFO] apache-skywalking-apm .............................. SKIPPED [INFO] apache-skywalking-apm-es7 .......................... SKIPPED Reason The error has nothing to do with SkyWalking.\nAccording to the issue here (https://github.com/sass/node-sass/issues/1176), if you live in countries where requesting resources from GitHub and npmjs.org runs slow, some precompiled binaries for dependency node-sass would fail to be downloaded during npm install, and npm would try to compile them itself. That\u0026rsquo;s why python2 is needed.\nResolution 1. Use mirror. For instance, if you\u0026rsquo;re in China, please edit skywalking\\apm-webapp\\pom.xml as follows. Find\n\u0026lt;configuration\u0026gt; \u0026lt;arguments\u0026gt;install --registry=https://registry.npmjs.org/\u0026lt;/arguments\u0026gt; \u0026lt;/configuration\u0026gt; Replace it with\n\u0026lt;configuration\u0026gt; \u0026lt;arguments\u0026gt;install --registry=https://registry.npmmirror.com/ --sass_binary_site=https://npmmirror.com/mirrors/node-sass/\u0026lt;/arguments\u0026gt; \u0026lt;/configuration\u0026gt; 2. Get a sufficiently powerful VPN. ","title":"Problem: Maven compilation failure with error such as `Error: not found: python2`","url":"/docs/main/v9.7.0/en/faq/maven-compile-npm-failure/"},{"content":"Profiling The profiling is an on-demand diagnosing method to locate bottleneck of the services. These typical scenarios usually are suitable for profiling through various profiling tools\n Some methods slow down the API performance. Too many threads and/or high-frequency I/O per OS process reduce the CPU efficiency. Massive RPC requests block the network to cause responding slowly. Unexpected network requests caused by security issues or codes' bug.  In the SkyWalking landscape, we provided three ways to support profiling within reasonable resource cost.\n In-process profiling is bundled with auto-instrument agents. Out-of-process profiling is powered by eBPF agent. Continuous profiling is powered by eBPF agent.  In-process profiling In-process profiling is primarily provided by auto-instrument agents in the VM-based runtime. This feature resolves the issue \u0026lt;1\u0026gt; through capture the snapshot of the thread stacks periodically. The OAP would aggregate the thread stack per RPC request, and provide a hierarchy graph to indicate the slow methods based on continuous snapshot.\nThe period is usually every 10-100 milliseconds, which is not recommended to be less, due to this capture would usually cause classical stop-the-world for the VM, which would impact the whole process performance.\nLearn more tech details from the post, Use Profiling to Fix the Blind Spot of Distributed Tracing.\nFor now, Java and Python agents support this.\nOut-of-process profiling Out-of-process profiling leverage eBPF technology with origins in the Linux kernel. It provides a way to extend the capabilities of the kernel safely and efficiently.\nOn-CPU Profiling On-CPU profiling is suitable for analyzing thread stacks when service CPU usage is high.\nIf the stack is dumped more times, it means that the thread stack occupies more CPU resources.\nThis is pretty similar with in-process profiling to resolve the issue \u0026lt;1\u0026gt;, but it is made out-of-process and based on Linux eBPF. Meanwhile, this is made for languages without VM mechanism, which caused not supported by in-process agents, such as, C/C++, Rust. Golang is a special case, it exposed the metadata of the VM for eBPF, so, it could be profiled.\nOff-CPU Profiling Off-CPU profiling is suitable for performance issues that are not caused by high CPU usage, but may be on high CPU load. This profiling aims to resolve the issue \u0026lt;2\u0026gt;.\nFor example,\n When there are too many threads in one service, using off-CPU profiling could reveal which threads spend more time context switching. Codes heavily rely on disk I/O or remote service performance would slow down the whole process.  Off-CPU profiling provides two perspectives\n Thread switch count: The number of times a thread switches context. When the thread returns to the CPU, it completes one context switch. A thread stack with a higher switch count spends more time context switching. Thread switch duration: The time it takes a thread to switch the context. A thread stack with a higher switch duration spends more time off-CPU.  Learn more tech details about ON/OFF CPU profiling from the post, Pinpoint Service Mesh Critical Performance Impact by using eBPF\nNetwork Profiling Network profiling captures the network packages to analysis traffic at L4(TCP) and L7(HTTP) to recognize network traffic from a specific process or a k8s pod. Through this traffic analysis, locate the root causes of the issues \u0026lt;3\u0026gt; and \u0026lt;4\u0026gt;.\nNetwork profiling provides\n Network topology and identify processes. Observe TCP traffic metrics with TLS status. Observe HTTP traffic metrics. Sample HTTP request/response raw data within tracing context. Observe time costs for local I/O costing on the OS. Such as the time of Linux process HTTP request/response.  Learn more tech details from the post, Diagnose Service Mesh Network Performance with eBPF\nContinuous Profiling Continuous Profiling utilizes monitoring of system, processes, and network, and automatically initiates profiling tasks when conditions meet the configured thresholds and time windows.\nMonitor type Continuous profiling periodically collects the following types of performance metrics for processes and systems:\n System Load: Monitor current system load value. Process CPU: Monitor process CPU usage percent, value in [0-100]. Process Thread Count: Monitor process thread count. HTTP Error Rate: Monitor the process HTTP(/1.x) response error(response status \u0026gt;= 500) percent, value in [0-100]. HTTP Avg Response Time: Monitor the process HTTP(/1.x) response duration(ms).  Trigger Target When the collected metric data matches the configured threshold, the following types of profiling tasks could be triggered:\n On CPU Profiling: Perform eBPF On CPU Profiling on processes that meet the threshold. Off CPU Profiling: Perform eBPF Off CPU Profiling on processes that meet the threshold. Network Profiling: Perform eBPF Network Profiling on all processes within the same instance as the processes that meet the threshold.  ","title":"Profiling","url":"/docs/main/latest/en/concepts-and-designs/profiling/"},{"content":"Profiling The profiling is an on-demand diagnosing method to locate bottleneck of the services. These typical scenarios usually are suitable for profiling through various profiling tools\n Some methods slow down the API performance. Too many threads and/or high-frequency I/O per OS process reduce the CPU efficiency. Massive RPC requests block the network to cause responding slowly. Unexpected network requests caused by security issues or codes' bug.  In the SkyWalking landscape, we provided three ways to support profiling within reasonable resource cost.\n In-process profiling is bundled with auto-instrument agents. Out-of-process profiling is powered by eBPF agent. Continuous profiling is powered by eBPF agent.  In-process profiling In-process profiling is primarily provided by auto-instrument agents in the VM-based runtime. This feature resolves the issue \u0026lt;1\u0026gt; through capture the snapshot of the thread stacks periodically. The OAP would aggregate the thread stack per RPC request, and provide a hierarchy graph to indicate the slow methods based on continuous snapshot.\nThe period is usually every 10-100 milliseconds, which is not recommended to be less, due to this capture would usually cause classical stop-the-world for the VM, which would impact the whole process performance.\nLearn more tech details from the post, Use Profiling to Fix the Blind Spot of Distributed Tracing.\nFor now, Java and Python agents support this.\nOut-of-process profiling Out-of-process profiling leverage eBPF technology with origins in the Linux kernel. It provides a way to extend the capabilities of the kernel safely and efficiently.\nOn-CPU Profiling On-CPU profiling is suitable for analyzing thread stacks when service CPU usage is high.\nIf the stack is dumped more times, it means that the thread stack occupies more CPU resources.\nThis is pretty similar with in-process profiling to resolve the issue \u0026lt;1\u0026gt;, but it is made out-of-process and based on Linux eBPF. Meanwhile, this is made for languages without VM mechanism, which caused not supported by in-process agents, such as, C/C++, Rust. Golang is a special case, it exposed the metadata of the VM for eBPF, so, it could be profiled.\nOff-CPU Profiling Off-CPU profiling is suitable for performance issues that are not caused by high CPU usage, but may be on high CPU load. This profiling aims to resolve the issue \u0026lt;2\u0026gt;.\nFor example,\n When there are too many threads in one service, using off-CPU profiling could reveal which threads spend more time context switching. Codes heavily rely on disk I/O or remote service performance would slow down the whole process.  Off-CPU profiling provides two perspectives\n Thread switch count: The number of times a thread switches context. When the thread returns to the CPU, it completes one context switch. A thread stack with a higher switch count spends more time context switching. Thread switch duration: The time it takes a thread to switch the context. A thread stack with a higher switch duration spends more time off-CPU.  Learn more tech details about ON/OFF CPU profiling from the post, Pinpoint Service Mesh Critical Performance Impact by using eBPF\nNetwork Profiling Network profiling captures the network packages to analysis traffic at L4(TCP) and L7(HTTP) to recognize network traffic from a specific process or a k8s pod. Through this traffic analysis, locate the root causes of the issues \u0026lt;3\u0026gt; and \u0026lt;4\u0026gt;.\nNetwork profiling provides\n Network topology and identify processes. Observe TCP traffic metrics with TLS status. Observe HTTP traffic metrics. Sample HTTP request/response raw data within tracing context. Observe time costs for local I/O costing on the OS. Such as the time of Linux process HTTP request/response.  Learn more tech details from the post, Diagnose Service Mesh Network Performance with eBPF\nContinuous Profiling Continuous Profiling utilizes monitoring of system, processes, and network, and automatically initiates profiling tasks when conditions meet the configured thresholds and time windows.\nMonitor type Continuous profiling periodically collects the following types of performance metrics for processes and systems:\n System Load: Monitor current system load value. Process CPU: Monitor process CPU usage percent, value in [0-100]. Process Thread Count: Monitor process thread count. HTTP Error Rate: Monitor the process HTTP(/1.x) response error(response status \u0026gt;= 500) percent, value in [0-100]. HTTP Avg Response Time: Monitor the process HTTP(/1.x) response duration(ms).  Trigger Target When the collected metric data matches the configured threshold, the following types of profiling tasks could be triggered:\n On CPU Profiling: Perform eBPF On CPU Profiling on processes that meet the threshold. Off CPU Profiling: Perform eBPF Off CPU Profiling on processes that meet the threshold. Network Profiling: Perform eBPF Network Profiling on all processes within the same instance as the processes that meet the threshold.  ","title":"Profiling","url":"/docs/main/next/en/concepts-and-designs/profiling/"},{"content":"Profiling The profiling is an on-demand diagnosing method to locate bottleneck of the services. These typical scenarios usually are suitable for profiling through various profiling tools\n Some methods slow down the API performance. Too many threads and/or high-frequency I/O per OS process reduce the CPU efficiency. Massive RPC requests block the network to cause responding slowly. Unexpected network requests caused by security issues or codes' bug.  In the SkyWalking landscape, we provided two ways to support profiling within reasonable resource cost.\n In-process profiling is bundled with auto-instrument agents. Out-of-process profiling is powered by eBPF agent.  In-process profiling In-process profiling is primarily provided by auto-instrument agents in the VM-based runtime. This feature resolves the issue \u0026lt;1\u0026gt; through capture the snapshot of the thread stacks periodically. The OAP would aggregate the thread stack per RPC request, and provide a hierarchy graph to indicate the slow methods based on continuous snapshot.\nThe period is usually every 10-100 milliseconds, which is not recommended to be less, due to this capture would usually cause classical stop-the-world for the VM, which would impact the whole process performance.\nLearn more tech details from the post, Use Profiling to Fix the Blind Spot of Distributed Tracing.\nFor now, Java and Python agents support this.\nOut-of-process profiling Out-of-process profiling leverage eBPF technology with origins in the Linux kernel. It provides a way to extend the capabilities of the kernel safely and efficiently.\nOn-CPU Profiling On-CPU profiling is suitable for analyzing thread stacks when service CPU usage is high.\nIf the stack is dumped more times, it means that the thread stack occupies more CPU resources.\nThis is pretty similar with in-process profiling to resolve the issue \u0026lt;1\u0026gt;, but it is made out-of-process and based on Linux eBPF. Meanwhile, this is made for languages without VM mechanism, which caused not supported by in-process agents, such as, C/C++, Rust. Golang is a special case, it exposed the metadata of the VM for eBPF, so, it could be profiled.\nOff-CPU Profiling Off-CPU profiling is suitable for performance issues that are not caused by high CPU usage, but may be on high CPU load. This profiling aims to resolve the issue \u0026lt;2\u0026gt;.\nFor example,\n When there are too many threads in one service, using off-CPU profiling could reveal which threads spend more time context switching. Codes heavily rely on disk I/O or remote service performance would slow down the whole process.  Off-CPU profiling provides two perspectives\n Thread switch count: The number of times a thread switches context. When the thread returns to the CPU, it completes one context switch. A thread stack with a higher switch count spends more time context switching. Thread switch duration: The time it takes a thread to switch the context. A thread stack with a higher switch duration spends more time off-CPU.  Learn more tech details about ON/OFF CPU profiling from the post, Pinpoint Service Mesh Critical Performance Impact by using eBPF\nNetwork Profiling Network profiling captures the network packages to analysis traffic at L4(TCP) and L7(HTTP) to recognize network traffic from a specific process or a k8s pod. Through this traffic analysis, locate the root causes of the issues \u0026lt;3\u0026gt; and \u0026lt;4\u0026gt;.\nNetwork profiling provides\n Network topology and identify processes. Observe TCP traffic metrics with TLS status. Observe HTTP traffic metrics. Sample HTTP request/response raw data within tracing context. Observe time costs for local I/O costing on the OS. Such as the time of Linux process HTTP request/response.  Learn more tech details from the post, Diagnose Service Mesh Network Performance with eBPF\n","title":"Profiling","url":"/docs/main/v9.3.0/en/concepts-and-designs/profiling/"},{"content":"Profiling The profiling is an on-demand diagnosing method to locate bottleneck of the services. These typical scenarios usually are suitable for profiling through various profiling tools\n Some methods slow down the API performance. Too many threads and/or high-frequency I/O per OS process reduce the CPU efficiency. Massive RPC requests block the network to cause responding slowly. Unexpected network requests caused by security issues or codes' bug.  In the SkyWalking landscape, we provided two ways to support profiling within reasonable resource cost.\n In-process profiling is bundled with auto-instrument agents. Out-of-process profiling is powered by eBPF agent.  In-process profiling In-process profiling is primarily provided by auto-instrument agents in the VM-based runtime. This feature resolves the issue \u0026lt;1\u0026gt; through capture the snapshot of the thread stacks periodically. The OAP would aggregate the thread stack per RPC request, and provide a hierarchy graph to indicate the slow methods based on continuous snapshot.\nThe period is usually every 10-100 milliseconds, which is not recommended to be less, due to this capture would usually cause classical stop-the-world for the VM, which would impact the whole process performance.\nLearn more tech details from the post, Use Profiling to Fix the Blind Spot of Distributed Tracing.\nFor now, Java and Python agents support this.\nOut-of-process profiling Out-of-process profiling leverage eBPF technology with origins in the Linux kernel. It provides a way to extend the capabilities of the kernel safely and efficiently.\nOn-CPU Profiling On-CPU profiling is suitable for analyzing thread stacks when service CPU usage is high.\nIf the stack is dumped more times, it means that the thread stack occupies more CPU resources.\nThis is pretty similar with in-process profiling to resolve the issue \u0026lt;1\u0026gt;, but it is made out-of-process and based on Linux eBPF. Meanwhile, this is made for languages without VM mechanism, which caused not supported by in-process agents, such as, C/C++, Rust. Golang is a special case, it exposed the metadata of the VM for eBPF, so, it could be profiled.\nOff-CPU Profiling Off-CPU profiling is suitable for performance issues that are not caused by high CPU usage, but may be on high CPU load. This profiling aims to resolve the issue \u0026lt;2\u0026gt;.\nFor example,\n When there are too many threads in one service, using off-CPU profiling could reveal which threads spend more time context switching. Codes heavily rely on disk I/O or remote service performance would slow down the whole process.  Off-CPU profiling provides two perspectives\n Thread switch count: The number of times a thread switches context. When the thread returns to the CPU, it completes one context switch. A thread stack with a higher switch count spends more time context switching. Thread switch duration: The time it takes a thread to switch the context. A thread stack with a higher switch duration spends more time off-CPU.  Learn more tech details about ON/OFF CPU profiling from the post, Pinpoint Service Mesh Critical Performance Impact by using eBPF\nNetwork Profiling Network profiling captures the network packages to analysis traffic at L4(TCP) and L7(HTTP) to recognize network traffic from a specific process or a k8s pod. Through this traffic analysis, locate the root causes of the issues \u0026lt;3\u0026gt; and \u0026lt;4\u0026gt;.\nNetwork profiling provides\n Network topology and identify processes. Observe TCP traffic metrics with TLS status. Observe HTTP traffic metrics. Sample HTTP request/response raw data within tracing context. Observe time costs for local I/O costing on the OS. Such as the time of Linux process HTTP request/response.  Learn more tech details from the post, Diagnose Service Mesh Network Performance with eBPF\n","title":"Profiling","url":"/docs/main/v9.4.0/en/concepts-and-designs/profiling/"},{"content":"Profiling The profiling is an on-demand diagnosing method to locate bottleneck of the services. These typical scenarios usually are suitable for profiling through various profiling tools\n Some methods slow down the API performance. Too many threads and/or high-frequency I/O per OS process reduce the CPU efficiency. Massive RPC requests block the network to cause responding slowly. Unexpected network requests caused by security issues or codes' bug.  In the SkyWalking landscape, we provided three ways to support profiling within reasonable resource cost.\n In-process profiling is bundled with auto-instrument agents. Out-of-process profiling is powered by eBPF agent. Continuous profiling is powered by eBPF agent.  In-process profiling In-process profiling is primarily provided by auto-instrument agents in the VM-based runtime. This feature resolves the issue \u0026lt;1\u0026gt; through capture the snapshot of the thread stacks periodically. The OAP would aggregate the thread stack per RPC request, and provide a hierarchy graph to indicate the slow methods based on continuous snapshot.\nThe period is usually every 10-100 milliseconds, which is not recommended to be less, due to this capture would usually cause classical stop-the-world for the VM, which would impact the whole process performance.\nLearn more tech details from the post, Use Profiling to Fix the Blind Spot of Distributed Tracing.\nFor now, Java and Python agents support this.\nOut-of-process profiling Out-of-process profiling leverage eBPF technology with origins in the Linux kernel. It provides a way to extend the capabilities of the kernel safely and efficiently.\nOn-CPU Profiling On-CPU profiling is suitable for analyzing thread stacks when service CPU usage is high.\nIf the stack is dumped more times, it means that the thread stack occupies more CPU resources.\nThis is pretty similar with in-process profiling to resolve the issue \u0026lt;1\u0026gt;, but it is made out-of-process and based on Linux eBPF. Meanwhile, this is made for languages without VM mechanism, which caused not supported by in-process agents, such as, C/C++, Rust. Golang is a special case, it exposed the metadata of the VM for eBPF, so, it could be profiled.\nOff-CPU Profiling Off-CPU profiling is suitable for performance issues that are not caused by high CPU usage, but may be on high CPU load. This profiling aims to resolve the issue \u0026lt;2\u0026gt;.\nFor example,\n When there are too many threads in one service, using off-CPU profiling could reveal which threads spend more time context switching. Codes heavily rely on disk I/O or remote service performance would slow down the whole process.  Off-CPU profiling provides two perspectives\n Thread switch count: The number of times a thread switches context. When the thread returns to the CPU, it completes one context switch. A thread stack with a higher switch count spends more time context switching. Thread switch duration: The time it takes a thread to switch the context. A thread stack with a higher switch duration spends more time off-CPU.  Learn more tech details about ON/OFF CPU profiling from the post, Pinpoint Service Mesh Critical Performance Impact by using eBPF\nNetwork Profiling Network profiling captures the network packages to analysis traffic at L4(TCP) and L7(HTTP) to recognize network traffic from a specific process or a k8s pod. Through this traffic analysis, locate the root causes of the issues \u0026lt;3\u0026gt; and \u0026lt;4\u0026gt;.\nNetwork profiling provides\n Network topology and identify processes. Observe TCP traffic metrics with TLS status. Observe HTTP traffic metrics. Sample HTTP request/response raw data within tracing context. Observe time costs for local I/O costing on the OS. Such as the time of Linux process HTTP request/response.  Learn more tech details from the post, Diagnose Service Mesh Network Performance with eBPF\nContinuous Profiling Continuous Profiling utilizes monitoring of system, processes, and network, and automatically initiates profiling tasks when conditions meet the configured thresholds and time windows.\nMonitor type Continuous profiling periodically collects the following types of performance metrics for processes and systems:\n System Load: Monitor current system load value. Process CPU: Monitor process CPU usage percent, value in [0-100]. Process Thread Count: Monitor process thread count. HTTP Error Rate: Monitor the process HTTP(/1.x) response error(response status \u0026gt;= 500) percent, value in [0-100]. HTTP Avg Response Time: Monitor the process HTTP(/1.x) response duration(ms).  Trigger Target When the collected metric data matches the configured threshold, the following types of profiling tasks could be triggered:\n On CPU Profiling: Perform eBPF On CPU Profiling on processes that meet the threshold. Off CPU Profiling: Perform eBPF Off CPU Profiling on processes that meet the threshold. Network Profiling: Perform eBPF Network Profiling on all processes within the same instance as the processes that meet the threshold.  ","title":"Profiling","url":"/docs/main/v9.5.0/en/concepts-and-designs/profiling/"},{"content":"Profiling The profiling is an on-demand diagnosing method to locate bottleneck of the services. These typical scenarios usually are suitable for profiling through various profiling tools\n Some methods slow down the API performance. Too many threads and/or high-frequency I/O per OS process reduce the CPU efficiency. Massive RPC requests block the network to cause responding slowly. Unexpected network requests caused by security issues or codes' bug.  In the SkyWalking landscape, we provided three ways to support profiling within reasonable resource cost.\n In-process profiling is bundled with auto-instrument agents. Out-of-process profiling is powered by eBPF agent. Continuous profiling is powered by eBPF agent.  In-process profiling In-process profiling is primarily provided by auto-instrument agents in the VM-based runtime. This feature resolves the issue \u0026lt;1\u0026gt; through capture the snapshot of the thread stacks periodically. The OAP would aggregate the thread stack per RPC request, and provide a hierarchy graph to indicate the slow methods based on continuous snapshot.\nThe period is usually every 10-100 milliseconds, which is not recommended to be less, due to this capture would usually cause classical stop-the-world for the VM, which would impact the whole process performance.\nLearn more tech details from the post, Use Profiling to Fix the Blind Spot of Distributed Tracing.\nFor now, Java and Python agents support this.\nOut-of-process profiling Out-of-process profiling leverage eBPF technology with origins in the Linux kernel. It provides a way to extend the capabilities of the kernel safely and efficiently.\nOn-CPU Profiling On-CPU profiling is suitable for analyzing thread stacks when service CPU usage is high.\nIf the stack is dumped more times, it means that the thread stack occupies more CPU resources.\nThis is pretty similar with in-process profiling to resolve the issue \u0026lt;1\u0026gt;, but it is made out-of-process and based on Linux eBPF. Meanwhile, this is made for languages without VM mechanism, which caused not supported by in-process agents, such as, C/C++, Rust. Golang is a special case, it exposed the metadata of the VM for eBPF, so, it could be profiled.\nOff-CPU Profiling Off-CPU profiling is suitable for performance issues that are not caused by high CPU usage, but may be on high CPU load. This profiling aims to resolve the issue \u0026lt;2\u0026gt;.\nFor example,\n When there are too many threads in one service, using off-CPU profiling could reveal which threads spend more time context switching. Codes heavily rely on disk I/O or remote service performance would slow down the whole process.  Off-CPU profiling provides two perspectives\n Thread switch count: The number of times a thread switches context. When the thread returns to the CPU, it completes one context switch. A thread stack with a higher switch count spends more time context switching. Thread switch duration: The time it takes a thread to switch the context. A thread stack with a higher switch duration spends more time off-CPU.  Learn more tech details about ON/OFF CPU profiling from the post, Pinpoint Service Mesh Critical Performance Impact by using eBPF\nNetwork Profiling Network profiling captures the network packages to analysis traffic at L4(TCP) and L7(HTTP) to recognize network traffic from a specific process or a k8s pod. Through this traffic analysis, locate the root causes of the issues \u0026lt;3\u0026gt; and \u0026lt;4\u0026gt;.\nNetwork profiling provides\n Network topology and identify processes. Observe TCP traffic metrics with TLS status. Observe HTTP traffic metrics. Sample HTTP request/response raw data within tracing context. Observe time costs for local I/O costing on the OS. Such as the time of Linux process HTTP request/response.  Learn more tech details from the post, Diagnose Service Mesh Network Performance with eBPF\nContinuous Profiling Continuous Profiling utilizes monitoring of system, processes, and network, and automatically initiates profiling tasks when conditions meet the configured thresholds and time windows.\nMonitor type Continuous profiling periodically collects the following types of performance metrics for processes and systems:\n System Load: Monitor current system load value. Process CPU: Monitor process CPU usage percent, value in [0-100]. Process Thread Count: Monitor process thread count. HTTP Error Rate: Monitor the process HTTP(/1.x) response error(response status \u0026gt;= 500) percent, value in [0-100]. HTTP Avg Response Time: Monitor the process HTTP(/1.x) response duration(ms).  Trigger Target When the collected metric data matches the configured threshold, the following types of profiling tasks could be triggered:\n On CPU Profiling: Perform eBPF On CPU Profiling on processes that meet the threshold. Off CPU Profiling: Perform eBPF Off CPU Profiling on processes that meet the threshold. Network Profiling: Perform eBPF Network Profiling on all processes within the same instance as the processes that meet the threshold.  ","title":"Profiling","url":"/docs/main/v9.6.0/en/concepts-and-designs/profiling/"},{"content":"Profiling The profiling is an on-demand diagnosing method to locate bottleneck of the services. These typical scenarios usually are suitable for profiling through various profiling tools\n Some methods slow down the API performance. Too many threads and/or high-frequency I/O per OS process reduce the CPU efficiency. Massive RPC requests block the network to cause responding slowly. Unexpected network requests caused by security issues or codes' bug.  In the SkyWalking landscape, we provided three ways to support profiling within reasonable resource cost.\n In-process profiling is bundled with auto-instrument agents. Out-of-process profiling is powered by eBPF agent. Continuous profiling is powered by eBPF agent.  In-process profiling In-process profiling is primarily provided by auto-instrument agents in the VM-based runtime. This feature resolves the issue \u0026lt;1\u0026gt; through capture the snapshot of the thread stacks periodically. The OAP would aggregate the thread stack per RPC request, and provide a hierarchy graph to indicate the slow methods based on continuous snapshot.\nThe period is usually every 10-100 milliseconds, which is not recommended to be less, due to this capture would usually cause classical stop-the-world for the VM, which would impact the whole process performance.\nLearn more tech details from the post, Use Profiling to Fix the Blind Spot of Distributed Tracing.\nFor now, Java and Python agents support this.\nOut-of-process profiling Out-of-process profiling leverage eBPF technology with origins in the Linux kernel. It provides a way to extend the capabilities of the kernel safely and efficiently.\nOn-CPU Profiling On-CPU profiling is suitable for analyzing thread stacks when service CPU usage is high.\nIf the stack is dumped more times, it means that the thread stack occupies more CPU resources.\nThis is pretty similar with in-process profiling to resolve the issue \u0026lt;1\u0026gt;, but it is made out-of-process and based on Linux eBPF. Meanwhile, this is made for languages without VM mechanism, which caused not supported by in-process agents, such as, C/C++, Rust. Golang is a special case, it exposed the metadata of the VM for eBPF, so, it could be profiled.\nOff-CPU Profiling Off-CPU profiling is suitable for performance issues that are not caused by high CPU usage, but may be on high CPU load. This profiling aims to resolve the issue \u0026lt;2\u0026gt;.\nFor example,\n When there are too many threads in one service, using off-CPU profiling could reveal which threads spend more time context switching. Codes heavily rely on disk I/O or remote service performance would slow down the whole process.  Off-CPU profiling provides two perspectives\n Thread switch count: The number of times a thread switches context. When the thread returns to the CPU, it completes one context switch. A thread stack with a higher switch count spends more time context switching. Thread switch duration: The time it takes a thread to switch the context. A thread stack with a higher switch duration spends more time off-CPU.  Learn more tech details about ON/OFF CPU profiling from the post, Pinpoint Service Mesh Critical Performance Impact by using eBPF\nNetwork Profiling Network profiling captures the network packages to analysis traffic at L4(TCP) and L7(HTTP) to recognize network traffic from a specific process or a k8s pod. Through this traffic analysis, locate the root causes of the issues \u0026lt;3\u0026gt; and \u0026lt;4\u0026gt;.\nNetwork profiling provides\n Network topology and identify processes. Observe TCP traffic metrics with TLS status. Observe HTTP traffic metrics. Sample HTTP request/response raw data within tracing context. Observe time costs for local I/O costing on the OS. Such as the time of Linux process HTTP request/response.  Learn more tech details from the post, Diagnose Service Mesh Network Performance with eBPF\nContinuous Profiling Continuous Profiling utilizes monitoring of system, processes, and network, and automatically initiates profiling tasks when conditions meet the configured thresholds and time windows.\nMonitor type Continuous profiling periodically collects the following types of performance metrics for processes and systems:\n System Load: Monitor current system load value. Process CPU: Monitor process CPU usage percent, value in [0-100]. Process Thread Count: Monitor process thread count. HTTP Error Rate: Monitor the process HTTP(/1.x) response error(response status \u0026gt;= 500) percent, value in [0-100]. HTTP Avg Response Time: Monitor the process HTTP(/1.x) response duration(ms).  Trigger Target When the collected metric data matches the configured threshold, the following types of profiling tasks could be triggered:\n On CPU Profiling: Perform eBPF On CPU Profiling on processes that meet the threshold. Off CPU Profiling: Perform eBPF Off CPU Profiling on processes that meet the threshold. Network Profiling: Perform eBPF Network Profiling on all processes within the same instance as the processes that meet the threshold.  ","title":"Profiling","url":"/docs/main/v9.7.0/en/concepts-and-designs/profiling/"},{"content":"Profiling The profiling is used to profiling the processes from the Service Discovery, and send the snapshot to the backend server.\nConfiguration    Name Default Environment Key Description     profiling.active true ROVER_PROFILING_ACTIVE Is active the process profiling.   profiling.check_interval 10s ROVER_PROFILING_CHECK_INTERVAL Check the profiling task interval.   profiling.flush_interval 5s ROVER_PROFILING_FLUSH_INTERVAL Combine existing profiling data and report to the backend interval.   profiling.task.on_cpu.dump_period 9ms ROVER_PROFILING_TASK_ON_CPU_DUMP_PERIOD The profiling stack dump period.   profiling.task.network.report_interval 2s ROVER_PROFILING_TASK_NETWORK_TOPOLOGY_REPORT_INTERVAL The interval of send metrics to the backend.   profiling.task.network.meter_prefix rover_net_p ROVER_PROFILING_TASK_NETWORK_TOPOLOGY_METER_PREFIX The prefix of network profiling metrics name.   profiling.task.network.protocol_analyze.per_cpu_buffer 400KB ROVER_PROFILING_TASK_NETWORK_PROTOCOL_ANALYZE_PER_CPU_BUFFER The size of socket data buffer on each CPU.   profiling.task.network.protocol_analyze.parallels 2 ROVER_PROFILING_TASK_NETWORK_PROTOCOL_ANALYZE_PARALLELS The count of parallel protocol analyzer.   profiling.task.network.protocol_analyze.queue_size 5000 ROVER_PROFILING_TASK_NETWORK_PROTOCOL_ANALYZE_QUEUE_SIZE The size of per paralleled analyzer queue.   profiling.task.network.protocol_analyze.sampling.http.default_request_encoding UTF-8 ROVER_PROFILING_TASK_NETWORK_PROTOCOL_ANALYZE_SAMPLING_HTTP_DEFAULT_REQUEST_ENCODING The default body encoding when sampling the request.   profiling.task.network.protocol_analyze.sampling.http.default_response_encoding UTF-8 ROVER_PROFILING_TASK_NETWORK_PROTOCOL_ANALYZE_SAMPLING_HTTP_DEFAULT_RESPONSE_ENCODING The default body encoding when sampling the response.   profiling.continuous.meter_prefix rover_con_p ROVER_PROFILING_CONTINUOUS_METER_PREFIX The continuous related meters prefix name.   profiling.continuous.fetch_interval 1s ROVER_PROFILING_CONTINUOUS_FETCH_INTERVAL The interval of fetch metrics from the system, such as Process CPU, System Load, etc.   profiling.continuous.check_interval 5s ROVER_PROFILING_CONTINUOUS_CHECK_INTERVAL The interval of check metrics is reach the thresholds.   profiling.continuous.trigger.execute_duration 10m ROVER_PROFILING_CONTINUOUS_TRIGGER_EXECUTE_DURATION The duration of the profiling task.   profiling.continuous.trigger.silence_duration 20m ROVER_PROFILING_CONTINUOUS_TRIGGER_SILENCE_DURATION The minimal duration between the execution of the same profiling task.    Prepare service Before profiling your service, please make sure your service already has the symbol data inside the binary file. So we could locate the stack symbol, It could be checked following these ways:\n objdump: Using objdump --syms path/to/service. readelf: Using readelf --syms path/to/service.  Profiling Type All the profiling tasks are using the Linux Official Function and kprobe or uprobe to open perf event, and attach the eBPF Program to dump stacks.\nOn CPU On CPU Profiling task is using PERF_COUNT_SW_CPU_CLOCK to profiling the process with the CPU clock.\nOff CPU Off CPU Profiling task is attach the finish_task_switch in krobe to profiling the process.\nNetwork Network Profiling task is intercept IO-related syscall and urprobe in process to identify the network traffic and generate the metrics. Also, the following protocol are supported for analyzing using OpenSSL library, BoringSSL library, GoTLS, NodeTLS or plaintext:\n HTTP/1.x HTTP/2 MySQL CQL(The Cassandra Query Language) MongoDB Kafka DNS  Collecting data Network profiling uses metrics, logs send to the backend service.\nData Type The network profiling has customized the following two types of metrics to represent the network data:\n Counter: Records the total number of data in a certain period of time. Each counter containers the following data:  Count: The count of the execution. Bytes: The package size of the execution. Exe Time: The consumed time(nanosecond) of the execution.   Histogram: Records the distribution of the data in the bucket. TopN: Record the highest latency data in a certain period of time.  Labels Each metric contains the following labels to identify the process relationship:\n   Name Type Description     client_process_id or server_process_id string The ID of the current process, which is determined by the role of the current process in the connection as server or client.   client_local or server_local boolean The remote process is a local process.   client_address or server_address string The remote process address. ex: IP:port.   side enum The current process is either \u0026ldquo;client\u0026rdquo; or \u0026ldquo;server\u0026rdquo; in this connection.   protocol string Identification the protocol based on the package data content.   is_ssl bool Is the current connection using SSL.    Layer-4 Data Based on the above two data types, the following metrics are provided.\n   Name Type Unit Description     write Counter nanosecond The socket write counter   read Counter nanosecond The socket read counter   write RTT Counter microsecond The socket write RTT counter   connect Counter nanosecond The socket connect/accept with other server/client counter   close Counter nanosecond The socket close counter   retransmit Counter nanosecond The socket retransmit package counter   drop Counter nanosecond The socket drop package counter   write RTT Histogram microsecond The socket write RTT execute time histogram   write execute time Histogram nanosecond The socket write data execute time histogram   read execute time Histogram nanosecond The socket read data execute time histogram   connect execute time Histogram nanosecond The socket connect/accept with other server/client execute time histogram   close execute time Histogram nanosecond The socket close execute time histogram    HTTP/1.x Data Metrics    Name Type Unit Description     http1_request_cpm Counter count The HTTP request counter   http1_response_status_cpm Counter count The count of per HTTP response code   http1_request_package_size Histogram Byte size The request package size   http1_response_package_size Histogram Byte size The response package size   http1_client_duration Histogram millisecond The duration of single HTTP response on the client side   http1_server_duration Histogram millisecond The duration of single HTTP response on the server side    Logs    Name Type Unit Description     slow_traces TopN millisecond The Top N slow trace(id)s   status_4xx TopN millisecond The Top N trace(id)s with response status in 400-499   status_5xx TopN millisecond The Top N trace(id)s with response status in 500-599    Span Attached Event    Name Description     HTTP Request Sampling Complete information about the HTTP request, it\u0026rsquo;s only reported when it matches slow/4xx/5xx traces.   HTTP Response Sampling Complete information about the HTTP response, it\u0026rsquo;s only reported when it matches slow/4xx/5xx traces.   Syscall xxx The methods to use when the process invoke with the network-related syscall method. It\u0026rsquo;s only reported when it matches slow/4xx/5xx traces.    Continuous Profiling The continuous profiling feature monitors low-power target process information, including process CPU usage and network requests, based on configuration passed from the backend. When a threshold is met, it automatically initiates a profiling task(on/off CPU, Network) to provide more detailed analysis.\nMonitor Type System Load Monitor the average system load for the last minute, which is equivalent to using the first value of the load average in the uptime command.\nProcess CPU The target process utilizes a certain percentage of the CPU on the current host.\nProcess Thread Count The real-time number of threads in the target process.\nNetwork Network monitoring uses eBPF technology to collect real-time performance data of the current process responding to requests. Requests sent upstream are not monitored by the system.\nCurrently, network monitoring supports parsing of the HTTP/1.x protocol and supports the following types of monitoring:\n Error Rate: The percentage of network request errors, such as HTTP status codes within the range of [500-600), is considered as erroneous. Avg Response Time: Average response time(ms) for specified URI.  Metrics Rover would periodically send collected monitoring data to the backend using the Native Meter Protocol.\n   Name Unit Description     process_cpu (0-100)% The CPU usage percent   process_thread_count count The thread count of process   system_load count The average system load for the last minute, each process have same value   http_error_rate (0-100)% The network request error rate percentage   http_avg_response_time ms The network average response duration    ","title":"Profiling","url":"/docs/skywalking-rover/latest/en/setup/configuration/profiling/"},{"content":"Profiling The profiling is used to profiling the processes from the Service Discovery, and send the snapshot to the backend server.\nConfiguration    Name Default Environment Key Description     profiling.active true ROVER_PROFILING_ACTIVE Is active the process profiling.   profiling.check_interval 10s ROVER_PROFILING_CHECK_INTERVAL Check the profiling task interval.   profiling.flush_interval 5s ROVER_PROFILING_FLUSH_INTERVAL Combine existing profiling data and report to the backend interval.   profiling.task.on_cpu.dump_period 9ms ROVER_PROFILING_TASK_ON_CPU_DUMP_PERIOD The profiling stack dump period.   profiling.task.network.report_interval 2s ROVER_PROFILING_TASK_NETWORK_TOPOLOGY_REPORT_INTERVAL The interval of send metrics to the backend.   profiling.task.network.meter_prefix rover_net_p ROVER_PROFILING_TASK_NETWORK_TOPOLOGY_METER_PREFIX The prefix of network profiling metrics name.   profiling.task.network.protocol_analyze.per_cpu_buffer 400KB ROVER_PROFILING_TASK_NETWORK_PROTOCOL_ANALYZE_PER_CPU_BUFFER The size of socket data buffer on each CPU.   profiling.task.network.protocol_analyze.parallels 2 ROVER_PROFILING_TASK_NETWORK_PROTOCOL_ANALYZE_PARALLELS The count of parallel protocol analyzer.   profiling.task.network.protocol_analyze.queue_size 5000 ROVER_PROFILING_TASK_NETWORK_PROTOCOL_ANALYZE_QUEUE_SIZE The size of per paralleled analyzer queue.   profiling.task.network.protocol_analyze.sampling.http.default_request_encoding UTF-8 ROVER_PROFILING_TASK_NETWORK_PROTOCOL_ANALYZE_SAMPLING_HTTP_DEFAULT_REQUEST_ENCODING The default body encoding when sampling the request.   profiling.task.network.protocol_analyze.sampling.http.default_response_encoding UTF-8 ROVER_PROFILING_TASK_NETWORK_PROTOCOL_ANALYZE_SAMPLING_HTTP_DEFAULT_RESPONSE_ENCODING The default body encoding when sampling the response.   profiling.continuous.meter_prefix rover_con_p ROVER_PROFILING_CONTINUOUS_METER_PREFIX The continuous related meters prefix name.   profiling.continuous.fetch_interval 1s ROVER_PROFILING_CONTINUOUS_FETCH_INTERVAL The interval of fetch metrics from the system, such as Process CPU, System Load, etc.   profiling.continuous.check_interval 5s ROVER_PROFILING_CONTINUOUS_CHECK_INTERVAL The interval of check metrics is reach the thresholds.   profiling.continuous.trigger.execute_duration 10m ROVER_PROFILING_CONTINUOUS_TRIGGER_EXECUTE_DURATION The duration of the profiling task.   profiling.continuous.trigger.silence_duration 20m ROVER_PROFILING_CONTINUOUS_TRIGGER_SILENCE_DURATION The minimal duration between the execution of the same profiling task.    Prepare service Before profiling your service, please make sure your service already has the symbol data inside the binary file. So we could locate the stack symbol, It could be checked following these ways:\n objdump: Using objdump --syms path/to/service. readelf: Using readelf --syms path/to/service.  Profiling Type All the profiling tasks are using the Linux Official Function and kprobe or uprobe to open perf event, and attach the eBPF Program to dump stacks.\nOn CPU On CPU Profiling task is using PERF_COUNT_SW_CPU_CLOCK to profiling the process with the CPU clock.\nOff CPU Off CPU Profiling task is attach the finish_task_switch in krobe to profiling the process.\nNetwork Network Profiling task is intercept IO-related syscall and urprobe in process to identify the network traffic and generate the metrics. Also, the following protocol are supported for analyzing using OpenSSL library, BoringSSL library, GoTLS, NodeTLS or plaintext:\n HTTP/1.x HTTP/2 MySQL CQL(The Cassandra Query Language) MongoDB Kafka DNS  Collecting data Network profiling uses metrics, logs send to the backend service.\nData Type The network profiling has customized the following two types of metrics to represent the network data:\n Counter: Records the total number of data in a certain period of time. Each counter containers the following data:  Count: The count of the execution. Bytes: The package size of the execution. Exe Time: The consumed time(nanosecond) of the execution.   Histogram: Records the distribution of the data in the bucket. TopN: Record the highest latency data in a certain period of time.  Labels Each metric contains the following labels to identify the process relationship:\n   Name Type Description     client_process_id or server_process_id string The ID of the current process, which is determined by the role of the current process in the connection as server or client.   client_local or server_local boolean The remote process is a local process.   client_address or server_address string The remote process address. ex: IP:port.   side enum The current process is either \u0026ldquo;client\u0026rdquo; or \u0026ldquo;server\u0026rdquo; in this connection.   protocol string Identification the protocol based on the package data content.   is_ssl bool Is the current connection using SSL.    Layer-4 Data Based on the above two data types, the following metrics are provided.\n   Name Type Unit Description     write Counter nanosecond The socket write counter   read Counter nanosecond The socket read counter   write RTT Counter microsecond The socket write RTT counter   connect Counter nanosecond The socket connect/accept with other server/client counter   close Counter nanosecond The socket close counter   retransmit Counter nanosecond The socket retransmit package counter   drop Counter nanosecond The socket drop package counter   write RTT Histogram microsecond The socket write RTT execute time histogram   write execute time Histogram nanosecond The socket write data execute time histogram   read execute time Histogram nanosecond The socket read data execute time histogram   connect execute time Histogram nanosecond The socket connect/accept with other server/client execute time histogram   close execute time Histogram nanosecond The socket close execute time histogram    HTTP/1.x Data Metrics    Name Type Unit Description     http1_request_cpm Counter count The HTTP request counter   http1_response_status_cpm Counter count The count of per HTTP response code   http1_request_package_size Histogram Byte size The request package size   http1_response_package_size Histogram Byte size The response package size   http1_client_duration Histogram millisecond The duration of single HTTP response on the client side   http1_server_duration Histogram millisecond The duration of single HTTP response on the server side    Logs    Name Type Unit Description     slow_traces TopN millisecond The Top N slow trace(id)s   status_4xx TopN millisecond The Top N trace(id)s with response status in 400-499   status_5xx TopN millisecond The Top N trace(id)s with response status in 500-599    Span Attached Event    Name Description     HTTP Request Sampling Complete information about the HTTP request, it\u0026rsquo;s only reported when it matches slow/4xx/5xx traces.   HTTP Response Sampling Complete information about the HTTP response, it\u0026rsquo;s only reported when it matches slow/4xx/5xx traces.   Syscall xxx The methods to use when the process invoke with the network-related syscall method. It\u0026rsquo;s only reported when it matches slow/4xx/5xx traces.    Continuous Profiling The continuous profiling feature monitors low-power target process information, including process CPU usage and network requests, based on configuration passed from the backend. When a threshold is met, it automatically initiates a profiling task(on/off CPU, Network) to provide more detailed analysis.\nMonitor Type System Load Monitor the average system load for the last minute, which is equivalent to using the first value of the load average in the uptime command.\nProcess CPU The target process utilizes a certain percentage of the CPU on the current host.\nProcess Thread Count The real-time number of threads in the target process.\nNetwork Network monitoring uses eBPF technology to collect real-time performance data of the current process responding to requests. Requests sent upstream are not monitored by the system.\nCurrently, network monitoring supports parsing of the HTTP/1.x protocol and supports the following types of monitoring:\n Error Rate: The percentage of network request errors, such as HTTP status codes within the range of [500-600), is considered as erroneous. Avg Response Time: Average response time(ms) for specified URI.  Metrics Rover would periodically send collected monitoring data to the backend using the Native Meter Protocol.\n   Name Unit Description     process_cpu (0-100)% The CPU usage percent   process_thread_count count The thread count of process   system_load count The average system load for the last minute, each process have same value   http_error_rate (0-100)% The network request error rate percentage   http_avg_response_time ms The network average response duration    ","title":"Profiling","url":"/docs/skywalking-rover/next/en/setup/configuration/profiling/"},{"content":"Profiling The profiling is used to profiling the processes from the Service Discovery, and send the snapshot to the backend server.\nConfiguration    Name Default Environment Key Description     profiling.active true ROVER_PROFILING_ACTIVE Is active the process profiling.   profiling.check_interval 10s ROVER_PROFILING_CHECK_INTERVAL Check the profiling task interval.   profiling.flush_interval 5s ROVER_PROFILING_FLUSH_INTERVAL Combine existing profiling data and report to the backend interval.   profiling.task.on_cpu.dump_period 9ms ROVER_PROFILING_TASK_ON_CPU_DUMP_PERIOD The profiling stack dump period.   profiling.task.network.report_interval 2s ROVER_PROFILING_TASK_NETWORK_TOPOLOGY_REPORT_INTERVAL The interval of send metrics to the backend.   profiling.task.network.meter_prefix rover_net_p ROVER_PROFILING_TASK_NETWORK_TOPOLOGY_METER_PREFIX The prefix of network profiling metrics name.   profiling.task.network.protocol_analyze.per_cpu_buffer 400KB ROVER_PROFILING_TASK_NETWORK_PROTOCOL_ANALYZE_PER_CPU_BUFFER The size of socket data buffer on each CPU.   profiling.task.network.protocol_analyze.parallels 2 ROVER_PROFILING_TASK_NETWORK_PROTOCOL_ANALYZE_PARALLELS The count of parallel protocol analyzer.   profiling.task.network.protocol_analyze.queue_size 5000 ROVER_PROFILING_TASK_NETWORK_PROTOCOL_ANALYZE_QUEUE_SIZE The size of per paralleled analyzer queue.   profiling.task.network.protocol_analyze.sampling.http.default_request_encoding UTF-8 ROVER_PROFILING_TASK_NETWORK_PROTOCOL_ANALYZE_SAMPLING_HTTP_DEFAULT_REQUEST_ENCODING The default body encoding when sampling the request.   profiling.task.network.protocol_analyze.sampling.http.default_response_encoding UTF-8 ROVER_PROFILING_TASK_NETWORK_PROTOCOL_ANALYZE_SAMPLING_HTTP_DEFAULT_RESPONSE_ENCODING The default body encoding when sampling the response.   profiling.continuous.meter_prefix rover_con_p ROVER_PROFILING_CONTINUOUS_METER_PREFIX The continuous related meters prefix name.   profiling.continuous.fetch_interval 1s ROVER_PROFILING_CONTINUOUS_FETCH_INTERVAL The interval of fetch metrics from the system, such as Process CPU, System Load, etc.   profiling.continuous.check_interval 5s ROVER_PROFILING_CONTINUOUS_CHECK_INTERVAL The interval of check metrics is reach the thresholds.   profiling.continuous.trigger.execute_duration 10m ROVER_PROFILING_CONTINUOUS_TRIGGER_EXECUTE_DURATION The duration of the profiling task.   profiling.continuous.trigger.silence_duration 20m ROVER_PROFILING_CONTINUOUS_TRIGGER_SILENCE_DURATION The minimal duration between the execution of the same profiling task.    Prepare service Before profiling your service, please make sure your service already has the symbol data inside the binary file. So we could locate the stack symbol, It could be checked following these ways:\n objdump: Using objdump --syms path/to/service. readelf: Using readelf --syms path/to/service.  Profiling Type All the profiling tasks are using the Linux Official Function and kprobe or uprobe to open perf event, and attach the eBPF Program to dump stacks.\nOn CPU On CPU Profiling task is using PERF_COUNT_SW_CPU_CLOCK to profiling the process with the CPU clock.\nOff CPU Off CPU Profiling task is attach the finish_task_switch in krobe to profiling the process.\nNetwork Network Profiling task is intercept IO-related syscall and urprobe in process to identify the network traffic and generate the metrics. Also, the following protocol are supported for analyzing using OpenSSL library, BoringSSL library, GoTLS, NodeTLS or plaintext:\n HTTP/1.x HTTP/2 MySQL CQL(The Cassandra Query Language) MongoDB Kafka DNS  Collecting data Network profiling uses metrics, logs send to the backend service.\nData Type The network profiling has customized the following two types of metrics to represent the network data:\n Counter: Records the total number of data in a certain period of time. Each counter containers the following data:  Count: The count of the execution. Bytes: The package size of the execution. Exe Time: The consumed time(nanosecond) of the execution.   Histogram: Records the distribution of the data in the bucket. TopN: Record the highest latency data in a certain period of time.  Labels Each metric contains the following labels to identify the process relationship:\n   Name Type Description     client_process_id or server_process_id string The ID of the current process, which is determined by the role of the current process in the connection as server or client.   client_local or server_local boolean The remote process is a local process.   client_address or server_address string The remote process address. ex: IP:port.   side enum The current process is either \u0026ldquo;client\u0026rdquo; or \u0026ldquo;server\u0026rdquo; in this connection.   protocol string Identification the protocol based on the package data content.   is_ssl bool Is the current connection using SSL.    Layer-4 Data Based on the above two data types, the following metrics are provided.\n   Name Type Unit Description     write Counter nanosecond The socket write counter   read Counter nanosecond The socket read counter   write RTT Counter microsecond The socket write RTT counter   connect Counter nanosecond The socket connect/accept with other server/client counter   close Counter nanosecond The socket close counter   retransmit Counter nanosecond The socket retransmit package counter   drop Counter nanosecond The socket drop package counter   write RTT Histogram microsecond The socket write RTT execute time histogram   write execute time Histogram nanosecond The socket write data execute time histogram   read execute time Histogram nanosecond The socket read data execute time histogram   connect execute time Histogram nanosecond The socket connect/accept with other server/client execute time histogram   close execute time Histogram nanosecond The socket close execute time histogram    HTTP/1.x Data Metrics    Name Type Unit Description     http1_request_cpm Counter count The HTTP request counter   http1_response_status_cpm Counter count The count of per HTTP response code   http1_request_package_size Histogram Byte size The request package size   http1_response_package_size Histogram Byte size The response package size   http1_client_duration Histogram millisecond The duration of single HTTP response on the client side   http1_server_duration Histogram millisecond The duration of single HTTP response on the server side    Logs    Name Type Unit Description     slow_traces TopN millisecond The Top N slow trace(id)s   status_4xx TopN millisecond The Top N trace(id)s with response status in 400-499   status_5xx TopN millisecond The Top N trace(id)s with response status in 500-599    Span Attached Event    Name Description     HTTP Request Sampling Complete information about the HTTP request, it\u0026rsquo;s only reported when it matches slow/4xx/5xx traces.   HTTP Response Sampling Complete information about the HTTP response, it\u0026rsquo;s only reported when it matches slow/4xx/5xx traces.   Syscall xxx The methods to use when the process invoke with the network-related syscall method. It\u0026rsquo;s only reported when it matches slow/4xx/5xx traces.    Continuous Profiling The continuous profiling feature monitors low-power target process information, including process CPU usage and network requests, based on configuration passed from the backend. When a threshold is met, it automatically initiates a profiling task(on/off CPU, Network) to provide more detailed analysis.\nMonitor Type System Load Monitor the average system load for the last minute, which is equivalent to using the first value of the load average in the uptime command.\nProcess CPU The target process utilizes a certain percentage of the CPU on the current host.\nProcess Thread Count The real-time number of threads in the target process.\nNetwork Network monitoring uses eBPF technology to collect real-time performance data of the current process responding to requests. Requests sent upstream are not monitored by the system.\nCurrently, network monitoring supports parsing of the HTTP/1.x protocol and supports the following types of monitoring:\n Error Rate: The percentage of network request errors, such as HTTP status codes within the range of [500-600), is considered as erroneous. Avg Response Time: Average response time(ms) for specified URI.  Metrics Rover would periodically send collected monitoring data to the backend using the Native Meter Protocol.\n   Name Unit Description     process_cpu (0-100)% The CPU usage percent   process_thread_count count The thread count of process   system_load count The average system load for the last minute, each process have same value   http_error_rate (0-100)% The network request error rate percentage   http_avg_response_time ms The network average response duration    ","title":"Profiling","url":"/docs/skywalking-rover/v0.6.0/en/setup/configuration/profiling/"},{"content":"Profiling APIs SkyWalking offers two types of Profiling, in-process and out-process, each with its own API.\nIn-process profiling APIs In-process profiling commonly interacts with auto-instrument agents. It gathers stack traces of programs and sends the data to the OAP for further analysis.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.language.profile.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/language/profile/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;service ProfileTask { // query all sniffer need to execute profile task commands  rpc getProfileTaskCommands (ProfileTaskCommandQuery) returns (Commands) { } // collect dumped thread snapshot  rpc collectSnapshot (stream ThreadSnapshot) returns (Commands) { } // report profiling task finished  rpc reportTaskFinish (ProfileTaskFinishReport) returns (Commands) { }}message ProfileTaskCommandQuery { // current sniffer information  string service = 1; string serviceInstance = 2; // last command timestamp  int64 lastCommandTime = 3;}// dumped thread snapshot message ThreadSnapshot { // profile task id  string taskId = 1; // dumped segment id  string traceSegmentId = 2; // dump timestamp  int64 time = 3; // snapshot dump sequence, start with zero  int32 sequence = 4; // snapshot stack  ThreadStack stack = 5;}message ThreadStack { // stack code signature list  repeated string codeSignatures = 1;}// profile task finished report message ProfileTaskFinishReport { // current sniffer information  string service = 1; string serviceInstance = 2; // profile task  string taskId = 3;}Out-process profiling Out-process profiling interacts with eBPF agent, which receives tasks and captures data, then reports it to the OAP for further analysis.\nProcess APIs Similar to Service Instance, all processes must be reported to the OAP storage segment prior to analysis.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.ebpf.profiling.process.v3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/ebpf/profiling/process/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the detected processes and report them. service EBPFProcessService { // Report discovered process in Rover  rpc reportProcesses (EBPFProcessReportList) returns (EBPFReportProcessDownstream) { } // Keep the process alive in the backend.  rpc keepAlive (EBPFProcessPingPkgList) returns (Commands) { }}message EBPFProcessReportList { repeated EBPFProcessProperties processes = 1; // An ID generated by eBPF agent, should be unique globally.  string ebpfAgentID = 2;}message EBPFProcessProperties { // The Process metadata  oneof metadata { EBPFHostProcessMetadata hostProcess = 1; EBPFKubernetesProcessMetadata k8sProcess = 2; }}message EBPFHostProcessMetadata { // [required] Entity metadata  // Must ensure that entity information is unique at the time of reporting  EBPFProcessEntityMetadata entity = 1; // [required] The Process id of the host  int32 pid = 2; // [optional] properties of the process  repeated KeyStringValuePair properties = 3;}// Process Entity metadata message EBPFProcessEntityMetadata { // [required] Process belong layer name which define in the backend  string layer = 1; // [required] Process belong service name  string serviceName = 2; // [required] Process belong service instance name  string instanceName = 3; // [required] Process name  string processName = 4; // Process labels for aggregate from service  repeated string labels = 5;}// Kubernetes process metadata message EBPFKubernetesProcessMetadata { // [required] Entity metadata  // Must ensure that entity information is unique at the time of reporting  EBPFProcessEntityMetadata entity = 1; // [required] The Process id of the host  int32 pid = 2; // [optional] properties of the process  repeated KeyStringValuePair properties = 3;}message EBPFReportProcessDownstream { repeated EBPFProcessDownstream processes = 1;}message EBPFProcessDownstream { // Generated process id  string processId = 1; // Locate the process by basic information  oneof process { EBPFHostProcessDownstream hostProcess = 2; EBPFKubernetesProcessDownstream k8sProcess = 3; }}message EBPFHostProcessDownstream { int32 pid = 1; EBPFProcessEntityMetadata entityMetadata = 2;}// Kubernetes process downstream message EBPFKubernetesProcessDownstream { int32 pid = 1; EBPFProcessEntityMetadata entityMetadata = 2;}message EBPFProcessPingPkgList { repeated EBPFProcessPingPkg processes = 1; // An ID generated by eBPF agent, should be unique globally.  string ebpfAgentID = 2;}message EBPFProcessPingPkg { // Process entity  EBPFProcessEntityMetadata entityMetadata = 1; // Minimize necessary properties  repeated KeyStringValuePair properties = 2;}Out-process profiling APIs syntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.ebpf.profiling.v3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/ebpf/profiling/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the Rover Process profiling task and upload profiling data. service EBPFProfilingService { // Query profiling (start or stop) tasks  rpc queryTasks (EBPFProfilingTaskQuery) returns (Commands) { } // collect profiling data  rpc collectProfilingData (stream EBPFProfilingData) returns (Commands) { }}message EBPFProfilingTaskQuery { // rover instance id  string roverInstanceId = 1; // latest task update time  int64 latestUpdateTime = 2;}message EBPFProfilingData { // task metadata  EBPFProfilingTaskMetadata task = 1; // profiling data  oneof profiling { EBPFOnCPUProfiling onCPU = 2; EBPFOffCPUProfiling offCPU = 3; }}message EBPFProfilingTaskMetadata { // profiling task id  string taskId = 1; // profiling process id  string processId = 2; // the start time of this profiling process  int64 profilingStartTime = 3; // report time  int64 currentTime = 4;}message EBPFProfilingStackMetadata { // stack type  EBPFProfilingStackType stackType = 1; // stack id from kernel provide  int32 stackId = 2; // stack symbols  repeated string stackSymbols = 3;}enum EBPFProfilingStackType { PROCESS_KERNEL_SPACE = 0; PROCESS_USER_SPACE = 1;}message EBPFOnCPUProfiling { // stack data in one task(thread)  repeated EBPFProfilingStackMetadata stacks = 1; // stack counts  int32 dumpCount = 2;}message EBPFOffCPUProfiling { // stack data in one task(thread)  repeated EBPFProfilingStackMetadata stacks = 1; // total count of the process is switched to off cpu by the scheduler.  int32 switchCount = 2; // where time(nanoseconds) is spent waiting while blocked on I/O, locks, timers, paging/swapping, etc.  int64 duration = 3;}","title":"Profiling APIs","url":"/docs/main/latest/en/api/profiling-protocol/"},{"content":"Profiling APIs SkyWalking offers two types of Profiling, in-process and out-process, each with its own API.\nIn-process profiling APIs In-process profiling commonly interacts with auto-instrument agents. It gathers stack traces of programs and sends the data to the OAP for further analysis.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.language.profile.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/language/profile/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;service ProfileTask { // query all sniffer need to execute profile task commands  rpc getProfileTaskCommands (ProfileTaskCommandQuery) returns (Commands) { } // collect dumped thread snapshot  rpc collectSnapshot (stream ThreadSnapshot) returns (Commands) { } // report profiling task finished  rpc reportTaskFinish (ProfileTaskFinishReport) returns (Commands) { }}message ProfileTaskCommandQuery { // current sniffer information  string service = 1; string serviceInstance = 2; // last command timestamp  int64 lastCommandTime = 3;}// dumped thread snapshot message ThreadSnapshot { // profile task id  string taskId = 1; // dumped segment id  string traceSegmentId = 2; // dump timestamp  int64 time = 3; // snapshot dump sequence, start with zero  int32 sequence = 4; // snapshot stack  ThreadStack stack = 5;}message ThreadStack { // stack code signature list  repeated string codeSignatures = 1;}// profile task finished report message ProfileTaskFinishReport { // current sniffer information  string service = 1; string serviceInstance = 2; // profile task  string taskId = 3;}Out-process profiling Out-process profiling interacts with eBPF agent, which receives tasks and captures data, then reports it to the OAP for further analysis.\nProcess APIs Similar to Service Instance, all processes must be reported to the OAP storage segment prior to analysis.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.ebpf.profiling.process.v3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/ebpf/profiling/process/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the detected processes and report them. service EBPFProcessService { // Report discovered process in Rover  rpc reportProcesses (EBPFProcessReportList) returns (EBPFReportProcessDownstream) { } // Keep the process alive in the backend.  rpc keepAlive (EBPFProcessPingPkgList) returns (Commands) { }}message EBPFProcessReportList { repeated EBPFProcessProperties processes = 1; // An ID generated by eBPF agent, should be unique globally.  string ebpfAgentID = 2;}message EBPFProcessProperties { // The Process metadata  oneof metadata { EBPFHostProcessMetadata hostProcess = 1; EBPFKubernetesProcessMetadata k8sProcess = 2; }}message EBPFHostProcessMetadata { // [required] Entity metadata  // Must ensure that entity information is unique at the time of reporting  EBPFProcessEntityMetadata entity = 1; // [required] The Process id of the host  int32 pid = 2; // [optional] properties of the process  repeated KeyStringValuePair properties = 3;}// Process Entity metadata message EBPFProcessEntityMetadata { // [required] Process belong layer name which define in the backend  string layer = 1; // [required] Process belong service name  string serviceName = 2; // [required] Process belong service instance name  string instanceName = 3; // [required] Process name  string processName = 4; // Process labels for aggregate from service  repeated string labels = 5;}// Kubernetes process metadata message EBPFKubernetesProcessMetadata { // [required] Entity metadata  // Must ensure that entity information is unique at the time of reporting  EBPFProcessEntityMetadata entity = 1; // [required] The Process id of the host  int32 pid = 2; // [optional] properties of the process  repeated KeyStringValuePair properties = 3;}message EBPFReportProcessDownstream { repeated EBPFProcessDownstream processes = 1;}message EBPFProcessDownstream { // Generated process id  string processId = 1; // Locate the process by basic information  oneof process { EBPFHostProcessDownstream hostProcess = 2; EBPFKubernetesProcessDownstream k8sProcess = 3; }}message EBPFHostProcessDownstream { int32 pid = 1; EBPFProcessEntityMetadata entityMetadata = 2;}// Kubernetes process downstream message EBPFKubernetesProcessDownstream { int32 pid = 1; EBPFProcessEntityMetadata entityMetadata = 2;}message EBPFProcessPingPkgList { repeated EBPFProcessPingPkg processes = 1; // An ID generated by eBPF agent, should be unique globally.  string ebpfAgentID = 2;}message EBPFProcessPingPkg { // Process entity  EBPFProcessEntityMetadata entityMetadata = 1; // Minimize necessary properties  repeated KeyStringValuePair properties = 2;}Out-process profiling APIs syntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.ebpf.profiling.v3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/ebpf/profiling/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the Rover Process profiling task and upload profiling data. service EBPFProfilingService { // Query profiling (start or stop) tasks  rpc queryTasks (EBPFProfilingTaskQuery) returns (Commands) { } // collect profiling data  rpc collectProfilingData (stream EBPFProfilingData) returns (Commands) { }}message EBPFProfilingTaskQuery { // rover instance id  string roverInstanceId = 1; // latest task update time  int64 latestUpdateTime = 2;}message EBPFProfilingData { // task metadata  EBPFProfilingTaskMetadata task = 1; // profiling data  oneof profiling { EBPFOnCPUProfiling onCPU = 2; EBPFOffCPUProfiling offCPU = 3; }}message EBPFProfilingTaskMetadata { // profiling task id  string taskId = 1; // profiling process id  string processId = 2; // the start time of this profiling process  int64 profilingStartTime = 3; // report time  int64 currentTime = 4;}message EBPFProfilingStackMetadata { // stack type  EBPFProfilingStackType stackType = 1; // stack id from kernel provide  int32 stackId = 2; // stack symbols  repeated string stackSymbols = 3;}enum EBPFProfilingStackType { PROCESS_KERNEL_SPACE = 0; PROCESS_USER_SPACE = 1;}message EBPFOnCPUProfiling { // stack data in one task(thread)  repeated EBPFProfilingStackMetadata stacks = 1; // stack counts  int32 dumpCount = 2;}message EBPFOffCPUProfiling { // stack data in one task(thread)  repeated EBPFProfilingStackMetadata stacks = 1; // total count of the process is switched to off cpu by the scheduler.  int32 switchCount = 2; // where time(nanoseconds) is spent waiting while blocked on I/O, locks, timers, paging/swapping, etc.  int64 duration = 3;}","title":"Profiling APIs","url":"/docs/main/next/en/api/profiling-protocol/"},{"content":"Profiling APIs SkyWalking offers two types of Profiling, in-process and out-process, each with its own API.\nIn-process profiling APIs In-process profiling commonly interacts with auto-instrument agents. It gathers stack traces of programs and sends the data to the OAP for further analysis.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.language.profile.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/language/profile/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;service ProfileTask { // query all sniffer need to execute profile task commands  rpc getProfileTaskCommands (ProfileTaskCommandQuery) returns (Commands) { } // collect dumped thread snapshot  rpc collectSnapshot (stream ThreadSnapshot) returns (Commands) { } // report profiling task finished  rpc reportTaskFinish (ProfileTaskFinishReport) returns (Commands) { }}message ProfileTaskCommandQuery { // current sniffer information  string service = 1; string serviceInstance = 2; // last command timestamp  int64 lastCommandTime = 3;}// dumped thread snapshot message ThreadSnapshot { // profile task id  string taskId = 1; // dumped segment id  string traceSegmentId = 2; // dump timestamp  int64 time = 3; // snapshot dump sequence, start with zero  int32 sequence = 4; // snapshot stack  ThreadStack stack = 5;}message ThreadStack { // stack code signature list  repeated string codeSignatures = 1;}// profile task finished report message ProfileTaskFinishReport { // current sniffer information  string service = 1; string serviceInstance = 2; // profile task  string taskId = 3;}Out-process profiling Out-process profiling interacts with eBPF agent, which receives tasks and captures data, then reports it to the OAP for further analysis.\nProcess APIs Similar to Service Instance, all processes must be reported to the OAP storage segment prior to analysis.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.ebpf.profiling.process.v3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/ebpf/profiling/process/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the detected processes and report them. service EBPFProcessService { // Report discovered process in Rover  rpc reportProcesses (EBPFProcessReportList) returns (EBPFReportProcessDownstream) { } // Keep the process alive in the backend.  rpc keepAlive (EBPFProcessPingPkgList) returns (Commands) { }}message EBPFProcessReportList { repeated EBPFProcessProperties processes = 1; // An ID generated by eBPF agent, should be unique globally.  string ebpfAgentID = 2;}message EBPFProcessProperties { // The Process metadata  oneof metadata { EBPFHostProcessMetadata hostProcess = 1; EBPFKubernetesProcessMetadata k8sProcess = 2; }}message EBPFHostProcessMetadata { // [required] Entity metadata  // Must ensure that entity information is unique at the time of reporting  EBPFProcessEntityMetadata entity = 1; // [required] The Process id of the host  int32 pid = 2; // [optional] properties of the process  repeated KeyStringValuePair properties = 3;}// Process Entity metadata message EBPFProcessEntityMetadata { // [required] Process belong layer name which define in the backend  string layer = 1; // [required] Process belong service name  string serviceName = 2; // [required] Process belong service instance name  string instanceName = 3; // [required] Process name  string processName = 4; // Process labels for aggregate from service  repeated string labels = 5;}// Kubernetes process metadata message EBPFKubernetesProcessMetadata { // [required] Entity metadata  // Must ensure that entity information is unique at the time of reporting  EBPFProcessEntityMetadata entity = 1; // [required] The Process id of the host  int32 pid = 2; // [optional] properties of the process  repeated KeyStringValuePair properties = 3;}message EBPFReportProcessDownstream { repeated EBPFProcessDownstream processes = 1;}message EBPFProcessDownstream { // Generated process id  string processId = 1; // Locate the process by basic information  oneof process { EBPFHostProcessDownstream hostProcess = 2; EBPFKubernetesProcessDownstream k8sProcess = 3; }}message EBPFHostProcessDownstream { int32 pid = 1; EBPFProcessEntityMetadata entityMetadata = 2;}// Kubernetes process downstream message EBPFKubernetesProcessDownstream { int32 pid = 1; EBPFProcessEntityMetadata entityMetadata = 2;}message EBPFProcessPingPkgList { repeated EBPFProcessPingPkg processes = 1; // An ID generated by eBPF agent, should be unique globally.  string ebpfAgentID = 2;}message EBPFProcessPingPkg { // Process entity  EBPFProcessEntityMetadata entityMetadata = 1; // Minimize necessary properties  repeated KeyStringValuePair properties = 2;}Out-process profiling APIs syntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.ebpf.profiling.v3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/ebpf/profiling/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the Rover Process profiling task and upload profiling data. service EBPFProfilingService { // Query profiling (start or stop) tasks  rpc queryTasks (EBPFProfilingTaskQuery) returns (Commands) { } // collect profiling data  rpc collectProfilingData (stream EBPFProfilingData) returns (Commands) { }}message EBPFProfilingTaskQuery { // rover instance id  string roverInstanceId = 1; // latest task update time  int64 latestUpdateTime = 2;}message EBPFProfilingData { // task metadata  EBPFProfilingTaskMetadata task = 1; // profiling data  oneof profiling { EBPFOnCPUProfiling onCPU = 2; EBPFOffCPUProfiling offCPU = 3; }}message EBPFProfilingTaskMetadata { // profiling task id  string taskId = 1; // profiling process id  string processId = 2; // the start time of this profiling process  int64 profilingStartTime = 3; // report time  int64 currentTime = 4;}message EBPFProfilingStackMetadata { // stack type  EBPFProfilingStackType stackType = 1; // stack id from kernel provide  int32 stackId = 2; // stack symbols  repeated string stackSymbols = 3;}enum EBPFProfilingStackType { PROCESS_KERNEL_SPACE = 0; PROCESS_USER_SPACE = 1;}message EBPFOnCPUProfiling { // stack data in one task(thread)  repeated EBPFProfilingStackMetadata stacks = 1; // stack counts  int32 dumpCount = 2;}message EBPFOffCPUProfiling { // stack data in one task(thread)  repeated EBPFProfilingStackMetadata stacks = 1; // total count of the process is switched to off cpu by the scheduler.  int32 switchCount = 2; // where time(nanoseconds) is spent waiting while blocked on I/O, locks, timers, paging/swapping, etc.  int64 duration = 3;}","title":"Profiling APIs","url":"/docs/main/v9.4.0/en/api/profiling-protocol/"},{"content":"Profiling APIs SkyWalking offers two types of Profiling, in-process and out-process, each with its own API.\nIn-process profiling APIs In-process profiling commonly interacts with auto-instrument agents. It gathers stack traces of programs and sends the data to the OAP for further analysis.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.language.profile.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/language/profile/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;service ProfileTask { // query all sniffer need to execute profile task commands  rpc getProfileTaskCommands (ProfileTaskCommandQuery) returns (Commands) { } // collect dumped thread snapshot  rpc collectSnapshot (stream ThreadSnapshot) returns (Commands) { } // report profiling task finished  rpc reportTaskFinish (ProfileTaskFinishReport) returns (Commands) { }}message ProfileTaskCommandQuery { // current sniffer information  string service = 1; string serviceInstance = 2; // last command timestamp  int64 lastCommandTime = 3;}// dumped thread snapshot message ThreadSnapshot { // profile task id  string taskId = 1; // dumped segment id  string traceSegmentId = 2; // dump timestamp  int64 time = 3; // snapshot dump sequence, start with zero  int32 sequence = 4; // snapshot stack  ThreadStack stack = 5;}message ThreadStack { // stack code signature list  repeated string codeSignatures = 1;}// profile task finished report message ProfileTaskFinishReport { // current sniffer information  string service = 1; string serviceInstance = 2; // profile task  string taskId = 3;}Out-process profiling Out-process profiling interacts with eBPF agent, which receives tasks and captures data, then reports it to the OAP for further analysis.\nProcess APIs Similar to Service Instance, all processes must be reported to the OAP storage segment prior to analysis.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.ebpf.profiling.process.v3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/ebpf/profiling/process/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the detected processes and report them. service EBPFProcessService { // Report discovered process in Rover  rpc reportProcesses (EBPFProcessReportList) returns (EBPFReportProcessDownstream) { } // Keep the process alive in the backend.  rpc keepAlive (EBPFProcessPingPkgList) returns (Commands) { }}message EBPFProcessReportList { repeated EBPFProcessProperties processes = 1; // An ID generated by eBPF agent, should be unique globally.  string ebpfAgentID = 2;}message EBPFProcessProperties { // The Process metadata  oneof metadata { EBPFHostProcessMetadata hostProcess = 1; EBPFKubernetesProcessMetadata k8sProcess = 2; }}message EBPFHostProcessMetadata { // [required] Entity metadata  // Must ensure that entity information is unique at the time of reporting  EBPFProcessEntityMetadata entity = 1; // [required] The Process id of the host  int32 pid = 2; // [optional] properties of the process  repeated KeyStringValuePair properties = 3;}// Process Entity metadata message EBPFProcessEntityMetadata { // [required] Process belong layer name which define in the backend  string layer = 1; // [required] Process belong service name  string serviceName = 2; // [required] Process belong service instance name  string instanceName = 3; // [required] Process name  string processName = 4; // Process labels for aggregate from service  repeated string labels = 5;}// Kubernetes process metadata message EBPFKubernetesProcessMetadata { // [required] Entity metadata  // Must ensure that entity information is unique at the time of reporting  EBPFProcessEntityMetadata entity = 1; // [required] The Process id of the host  int32 pid = 2; // [optional] properties of the process  repeated KeyStringValuePair properties = 3;}message EBPFReportProcessDownstream { repeated EBPFProcessDownstream processes = 1;}message EBPFProcessDownstream { // Generated process id  string processId = 1; // Locate the process by basic information  oneof process { EBPFHostProcessDownstream hostProcess = 2; EBPFKubernetesProcessDownstream k8sProcess = 3; }}message EBPFHostProcessDownstream { int32 pid = 1; EBPFProcessEntityMetadata entityMetadata = 2;}// Kubernetes process downstream message EBPFKubernetesProcessDownstream { int32 pid = 1; EBPFProcessEntityMetadata entityMetadata = 2;}message EBPFProcessPingPkgList { repeated EBPFProcessPingPkg processes = 1; // An ID generated by eBPF agent, should be unique globally.  string ebpfAgentID = 2;}message EBPFProcessPingPkg { // Process entity  EBPFProcessEntityMetadata entityMetadata = 1; // Minimize necessary properties  repeated KeyStringValuePair properties = 2;}Out-process profiling APIs syntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.ebpf.profiling.v3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/ebpf/profiling/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the Rover Process profiling task and upload profiling data. service EBPFProfilingService { // Query profiling (start or stop) tasks  rpc queryTasks (EBPFProfilingTaskQuery) returns (Commands) { } // collect profiling data  rpc collectProfilingData (stream EBPFProfilingData) returns (Commands) { }}message EBPFProfilingTaskQuery { // rover instance id  string roverInstanceId = 1; // latest task update time  int64 latestUpdateTime = 2;}message EBPFProfilingData { // task metadata  EBPFProfilingTaskMetadata task = 1; // profiling data  oneof profiling { EBPFOnCPUProfiling onCPU = 2; EBPFOffCPUProfiling offCPU = 3; }}message EBPFProfilingTaskMetadata { // profiling task id  string taskId = 1; // profiling process id  string processId = 2; // the start time of this profiling process  int64 profilingStartTime = 3; // report time  int64 currentTime = 4;}message EBPFProfilingStackMetadata { // stack type  EBPFProfilingStackType stackType = 1; // stack id from kernel provide  int32 stackId = 2; // stack symbols  repeated string stackSymbols = 3;}enum EBPFProfilingStackType { PROCESS_KERNEL_SPACE = 0; PROCESS_USER_SPACE = 1;}message EBPFOnCPUProfiling { // stack data in one task(thread)  repeated EBPFProfilingStackMetadata stacks = 1; // stack counts  int32 dumpCount = 2;}message EBPFOffCPUProfiling { // stack data in one task(thread)  repeated EBPFProfilingStackMetadata stacks = 1; // total count of the process is switched to off cpu by the scheduler.  int32 switchCount = 2; // where time(nanoseconds) is spent waiting while blocked on I/O, locks, timers, paging/swapping, etc.  int64 duration = 3;}","title":"Profiling APIs","url":"/docs/main/v9.5.0/en/api/profiling-protocol/"},{"content":"Profiling APIs SkyWalking offers two types of Profiling, in-process and out-process, each with its own API.\nIn-process profiling APIs In-process profiling commonly interacts with auto-instrument agents. It gathers stack traces of programs and sends the data to the OAP for further analysis.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.language.profile.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/language/profile/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;service ProfileTask { // query all sniffer need to execute profile task commands  rpc getProfileTaskCommands (ProfileTaskCommandQuery) returns (Commands) { } // collect dumped thread snapshot  rpc collectSnapshot (stream ThreadSnapshot) returns (Commands) { } // report profiling task finished  rpc reportTaskFinish (ProfileTaskFinishReport) returns (Commands) { }}message ProfileTaskCommandQuery { // current sniffer information  string service = 1; string serviceInstance = 2; // last command timestamp  int64 lastCommandTime = 3;}// dumped thread snapshot message ThreadSnapshot { // profile task id  string taskId = 1; // dumped segment id  string traceSegmentId = 2; // dump timestamp  int64 time = 3; // snapshot dump sequence, start with zero  int32 sequence = 4; // snapshot stack  ThreadStack stack = 5;}message ThreadStack { // stack code signature list  repeated string codeSignatures = 1;}// profile task finished report message ProfileTaskFinishReport { // current sniffer information  string service = 1; string serviceInstance = 2; // profile task  string taskId = 3;}Out-process profiling Out-process profiling interacts with eBPF agent, which receives tasks and captures data, then reports it to the OAP for further analysis.\nProcess APIs Similar to Service Instance, all processes must be reported to the OAP storage segment prior to analysis.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.ebpf.profiling.process.v3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/ebpf/profiling/process/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the detected processes and report them. service EBPFProcessService { // Report discovered process in Rover  rpc reportProcesses (EBPFProcessReportList) returns (EBPFReportProcessDownstream) { } // Keep the process alive in the backend.  rpc keepAlive (EBPFProcessPingPkgList) returns (Commands) { }}message EBPFProcessReportList { repeated EBPFProcessProperties processes = 1; // An ID generated by eBPF agent, should be unique globally.  string ebpfAgentID = 2;}message EBPFProcessProperties { // The Process metadata  oneof metadata { EBPFHostProcessMetadata hostProcess = 1; EBPFKubernetesProcessMetadata k8sProcess = 2; }}message EBPFHostProcessMetadata { // [required] Entity metadata  // Must ensure that entity information is unique at the time of reporting  EBPFProcessEntityMetadata entity = 1; // [required] The Process id of the host  int32 pid = 2; // [optional] properties of the process  repeated KeyStringValuePair properties = 3;}// Process Entity metadata message EBPFProcessEntityMetadata { // [required] Process belong layer name which define in the backend  string layer = 1; // [required] Process belong service name  string serviceName = 2; // [required] Process belong service instance name  string instanceName = 3; // [required] Process name  string processName = 4; // Process labels for aggregate from service  repeated string labels = 5;}// Kubernetes process metadata message EBPFKubernetesProcessMetadata { // [required] Entity metadata  // Must ensure that entity information is unique at the time of reporting  EBPFProcessEntityMetadata entity = 1; // [required] The Process id of the host  int32 pid = 2; // [optional] properties of the process  repeated KeyStringValuePair properties = 3;}message EBPFReportProcessDownstream { repeated EBPFProcessDownstream processes = 1;}message EBPFProcessDownstream { // Generated process id  string processId = 1; // Locate the process by basic information  oneof process { EBPFHostProcessDownstream hostProcess = 2; EBPFKubernetesProcessDownstream k8sProcess = 3; }}message EBPFHostProcessDownstream { int32 pid = 1; EBPFProcessEntityMetadata entityMetadata = 2;}// Kubernetes process downstream message EBPFKubernetesProcessDownstream { int32 pid = 1; EBPFProcessEntityMetadata entityMetadata = 2;}message EBPFProcessPingPkgList { repeated EBPFProcessPingPkg processes = 1; // An ID generated by eBPF agent, should be unique globally.  string ebpfAgentID = 2;}message EBPFProcessPingPkg { // Process entity  EBPFProcessEntityMetadata entityMetadata = 1; // Minimize necessary properties  repeated KeyStringValuePair properties = 2;}Out-process profiling APIs syntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.ebpf.profiling.v3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/ebpf/profiling/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the Rover Process profiling task and upload profiling data. service EBPFProfilingService { // Query profiling (start or stop) tasks  rpc queryTasks (EBPFProfilingTaskQuery) returns (Commands) { } // collect profiling data  rpc collectProfilingData (stream EBPFProfilingData) returns (Commands) { }}message EBPFProfilingTaskQuery { // rover instance id  string roverInstanceId = 1; // latest task update time  int64 latestUpdateTime = 2;}message EBPFProfilingData { // task metadata  EBPFProfilingTaskMetadata task = 1; // profiling data  oneof profiling { EBPFOnCPUProfiling onCPU = 2; EBPFOffCPUProfiling offCPU = 3; }}message EBPFProfilingTaskMetadata { // profiling task id  string taskId = 1; // profiling process id  string processId = 2; // the start time of this profiling process  int64 profilingStartTime = 3; // report time  int64 currentTime = 4;}message EBPFProfilingStackMetadata { // stack type  EBPFProfilingStackType stackType = 1; // stack id from kernel provide  int32 stackId = 2; // stack symbols  repeated string stackSymbols = 3;}enum EBPFProfilingStackType { PROCESS_KERNEL_SPACE = 0; PROCESS_USER_SPACE = 1;}message EBPFOnCPUProfiling { // stack data in one task(thread)  repeated EBPFProfilingStackMetadata stacks = 1; // stack counts  int32 dumpCount = 2;}message EBPFOffCPUProfiling { // stack data in one task(thread)  repeated EBPFProfilingStackMetadata stacks = 1; // total count of the process is switched to off cpu by the scheduler.  int32 switchCount = 2; // where time(nanoseconds) is spent waiting while blocked on I/O, locks, timers, paging/swapping, etc.  int64 duration = 3;}","title":"Profiling APIs","url":"/docs/main/v9.6.0/en/api/profiling-protocol/"},{"content":"Profiling APIs SkyWalking offers two types of Profiling, in-process and out-process, each with its own API.\nIn-process profiling APIs In-process profiling commonly interacts with auto-instrument agents. It gathers stack traces of programs and sends the data to the OAP for further analysis.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.language.profile.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/language/profile/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;service ProfileTask { // query all sniffer need to execute profile task commands  rpc getProfileTaskCommands (ProfileTaskCommandQuery) returns (Commands) { } // collect dumped thread snapshot  rpc collectSnapshot (stream ThreadSnapshot) returns (Commands) { } // report profiling task finished  rpc reportTaskFinish (ProfileTaskFinishReport) returns (Commands) { }}message ProfileTaskCommandQuery { // current sniffer information  string service = 1; string serviceInstance = 2; // last command timestamp  int64 lastCommandTime = 3;}// dumped thread snapshot message ThreadSnapshot { // profile task id  string taskId = 1; // dumped segment id  string traceSegmentId = 2; // dump timestamp  int64 time = 3; // snapshot dump sequence, start with zero  int32 sequence = 4; // snapshot stack  ThreadStack stack = 5;}message ThreadStack { // stack code signature list  repeated string codeSignatures = 1;}// profile task finished report message ProfileTaskFinishReport { // current sniffer information  string service = 1; string serviceInstance = 2; // profile task  string taskId = 3;}Out-process profiling Out-process profiling interacts with eBPF agent, which receives tasks and captures data, then reports it to the OAP for further analysis.\nProcess APIs Similar to Service Instance, all processes must be reported to the OAP storage segment prior to analysis.\nsyntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.ebpf.profiling.process.v3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/ebpf/profiling/process/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the detected processes and report them. service EBPFProcessService { // Report discovered process in Rover  rpc reportProcesses (EBPFProcessReportList) returns (EBPFReportProcessDownstream) { } // Keep the process alive in the backend.  rpc keepAlive (EBPFProcessPingPkgList) returns (Commands) { }}message EBPFProcessReportList { repeated EBPFProcessProperties processes = 1; // An ID generated by eBPF agent, should be unique globally.  string ebpfAgentID = 2;}message EBPFProcessProperties { // The Process metadata  oneof metadata { EBPFHostProcessMetadata hostProcess = 1; EBPFKubernetesProcessMetadata k8sProcess = 2; }}message EBPFHostProcessMetadata { // [required] Entity metadata  // Must ensure that entity information is unique at the time of reporting  EBPFProcessEntityMetadata entity = 1; // [required] The Process id of the host  int32 pid = 2; // [optional] properties of the process  repeated KeyStringValuePair properties = 3;}// Process Entity metadata message EBPFProcessEntityMetadata { // [required] Process belong layer name which define in the backend  string layer = 1; // [required] Process belong service name  string serviceName = 2; // [required] Process belong service instance name  string instanceName = 3; // [required] Process name  string processName = 4; // Process labels for aggregate from service  repeated string labels = 5;}// Kubernetes process metadata message EBPFKubernetesProcessMetadata { // [required] Entity metadata  // Must ensure that entity information is unique at the time of reporting  EBPFProcessEntityMetadata entity = 1; // [required] The Process id of the host  int32 pid = 2; // [optional] properties of the process  repeated KeyStringValuePair properties = 3;}message EBPFReportProcessDownstream { repeated EBPFProcessDownstream processes = 1;}message EBPFProcessDownstream { // Generated process id  string processId = 1; // Locate the process by basic information  oneof process { EBPFHostProcessDownstream hostProcess = 2; EBPFKubernetesProcessDownstream k8sProcess = 3; }}message EBPFHostProcessDownstream { int32 pid = 1; EBPFProcessEntityMetadata entityMetadata = 2;}// Kubernetes process downstream message EBPFKubernetesProcessDownstream { int32 pid = 1; EBPFProcessEntityMetadata entityMetadata = 2;}message EBPFProcessPingPkgList { repeated EBPFProcessPingPkg processes = 1; // An ID generated by eBPF agent, should be unique globally.  string ebpfAgentID = 2;}message EBPFProcessPingPkg { // Process entity  EBPFProcessEntityMetadata entityMetadata = 1; // Minimize necessary properties  repeated KeyStringValuePair properties = 2;}Out-process profiling APIs syntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.ebpf.profiling.v3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/ebpf/profiling/v3\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the Rover Process profiling task and upload profiling data. service EBPFProfilingService { // Query profiling (start or stop) tasks  rpc queryTasks (EBPFProfilingTaskQuery) returns (Commands) { } // collect profiling data  rpc collectProfilingData (stream EBPFProfilingData) returns (Commands) { }}message EBPFProfilingTaskQuery { // rover instance id  string roverInstanceId = 1; // latest task update time  int64 latestUpdateTime = 2;}message EBPFProfilingData { // task metadata  EBPFProfilingTaskMetadata task = 1; // profiling data  oneof profiling { EBPFOnCPUProfiling onCPU = 2; EBPFOffCPUProfiling offCPU = 3; }}message EBPFProfilingTaskMetadata { // profiling task id  string taskId = 1; // profiling process id  string processId = 2; // the start time of this profiling process  int64 profilingStartTime = 3; // report time  int64 currentTime = 4;}message EBPFProfilingStackMetadata { // stack type  EBPFProfilingStackType stackType = 1; // stack id from kernel provide  int32 stackId = 2; // stack symbols  repeated string stackSymbols = 3;}enum EBPFProfilingStackType { PROCESS_KERNEL_SPACE = 0; PROCESS_USER_SPACE = 1;}message EBPFOnCPUProfiling { // stack data in one task(thread)  repeated EBPFProfilingStackMetadata stacks = 1; // stack counts  int32 dumpCount = 2;}message EBPFOffCPUProfiling { // stack data in one task(thread)  repeated EBPFProfilingStackMetadata stacks = 1; // total count of the process is switched to off cpu by the scheduler.  int32 switchCount = 2; // where time(nanoseconds) is spent waiting while blocked on I/O, locks, timers, paging/swapping, etc.  int64 duration = 3;}","title":"Profiling APIs","url":"/docs/main/v9.7.0/en/api/profiling-protocol/"},{"content":"Project Structure  agent: The agent core files copied when hybrid compilation. bin: The binary files of Go agent program. docs: The documentation of Go agent. log: The log configuration for adapt the Golang agent. plugins: The plugins for adapt the frameworks.  core: Agent core and API for the SkyWalking Agent, the plugins should import this module. xxx: The plugins for adapt the framework.   reporter: The reporter for adapt the SkyWalking backend. tools/go-agent: The Golang Agent enhancement program.  cmd: The agent starter. config: The application register configuration for agent. instrument: Perform enhancement on different packages during hybrid compilation.  agentcore: When compiling SkyWalking Go, enhance its code, mainly for Agent Core file copying. api: The API of the instrument. entry: When compiling the main package, enhance its code, mainly focusing on starting the Agent system. plugins: When detecting a framework that requires enhancement, enhance its. For specific operation details, please refer to the Key Principle document. reporter: When compiling the reporter package under agent, enhance its code, mainly focusing on starting the reporter. runtime: When compiling the runtime package, enhance its code. For specific operation details, please refer to the Key Principle document.   tools: helps to build the agent.    ","title":"Project Structure","url":"/docs/skywalking-go/latest/en/concepts-and-designs/project-structure/"},{"content":"Project Structure  agent: The agent core files copied when hybrid compilation. bin: The binary files of Go agent program. docs: The documentation of Go agent. log: The log configuration for adapt the Golang agent. plugins: The plugins for adapt the frameworks.  core: Agent core and API for the SkyWalking Agent, the plugins should import this module. xxx: The plugins for adapt the framework.   reporter: The reporter for adapt the SkyWalking backend. tools/go-agent: The Golang Agent enhancement program.  cmd: The agent starter. config: The application register configuration for agent. instrument: Perform enhancement on different packages during hybrid compilation.  agentcore: When compiling SkyWalking Go, enhance its code, mainly for Agent Core file copying. api: The API of the instrument. entry: When compiling the main package, enhance its code, mainly focusing on starting the Agent system. plugins: When detecting a framework that requires enhancement, enhance its. For specific operation details, please refer to the Key Principle document. reporter: When compiling the reporter package under agent, enhance its code, mainly focusing on starting the reporter. runtime: When compiling the runtime package, enhance its code. For specific operation details, please refer to the Key Principle document.   tools: helps to build the agent.    ","title":"Project Structure","url":"/docs/skywalking-go/next/en/concepts-and-designs/project-structure/"},{"content":"Project Structure  agent: The agent core files copied when hybrid compilation. bin: The binary files of Go agent program. docs: The documentation of Go agent. log: The log configuration for adapt the Golang agent. plugins: The plugins for adapt the frameworks.  core: Agent core and API for the SkyWalking Agent, the plugins should import this module. xxx: The plugins for adapt the framework.   reporter: The reporter for adapt the SkyWalking backend. tools/go-agent: The Golang Agent enhancement program.  cmd: The agent starter. config: The application register configuration for agent. instrument: Perform enhancement on different packages during hybrid compilation.  agentcore: When compiling SkyWalking Go, enhance its code, mainly for Agent Core file copying. api: The API of the instrument. entry: When compiling the main package, enhance its code, mainly focusing on starting the Agent system. plugins: When detecting a framework that requires enhancement, enhance its. For specific operation details, please refer to the Key Principle document. reporter: When compiling the reporter package under agent, enhance its code, mainly focusing on starting the reporter. runtime: When compiling the runtime package, enhance its code. For specific operation details, please refer to the Key Principle document.   tools: helps to build the agent.    ","title":"Project Structure","url":"/docs/skywalking-go/v0.4.0/en/concepts-and-designs/project-structure/"},{"content":"Project Structure  cmd: The starter of Satellite. configs: Satellite configs. internal: Core, API, and common utils.  internal/pkg: Sharing with Core and Plugins, such as api and utils. internal/satellite: The core of Satellite.   plugins: Contains all plugins.  plugins/{type}: Contains the plugins of this {type}. Satellite has 9 plugin types. plugins/{type}/api: Contains the plugin definition and initializer. plugins/{type}/{plugin-name}: Contains the specific plugin. init.go: Register the plugins to the plugin registry.    . ├── CHANGES.md ├── cmd ├── configs ├── docs ├── go.sum ├── internal │ ├── pkg │ └── satellite ├── plugins │ ├── client │ ├── fallbacker │ ├── fetcher │ ├── filter │ ├── forwarder │ ├── init.go │ ├── parser │ ├── queue │ ├── receiver │ └── server ","title":"Project Structure","url":"/docs/skywalking-satellite/latest/en/concepts-and-designs/project_structue/"},{"content":"Project Structure  cmd: The starter of Satellite. configs: Satellite configs. internal: Core, API, and common utils.  internal/pkg: Sharing with Core and Plugins, such as api and utils. internal/satellite: The core of Satellite.   plugins: Contains all plugins.  plugins/{type}: Contains the plugins of this {type}. Satellite has 9 plugin types. plugins/{type}/api: Contains the plugin definition and initializer. plugins/{type}/{plugin-name}: Contains the specific plugin. init.go: Register the plugins to the plugin registry.    . ├── CHANGES.md ├── cmd ├── configs ├── docs ├── go.sum ├── internal │ ├── pkg │ └── satellite ├── plugins │ ├── client │ ├── fallbacker │ ├── fetcher │ ├── filter │ ├── forwarder │ ├── init.go │ ├── parser │ ├── queue │ ├── receiver │ └── server ","title":"Project Structure","url":"/docs/skywalking-satellite/next/en/concepts-and-designs/project_structue/"},{"content":"Project Structure  cmd: The starter of Satellite. configs: Satellite configs. internal: Core, API, and common utils.  internal/pkg: Sharing with Core and Plugins, such as api and utils. internal/satellite: The core of Satellite.   plugins: Contains all plugins.  plugins/{type}: Contains the plugins of this {type}. Satellite has 9 plugin types. plugins/{type}/api: Contains the plugin definition and initializer. plugins/{type}/{plugin-name}: Contains the specific plugin. init.go: Register the plugins to the plugin registry.    . ├── CHANGES.md ├── cmd ├── configs ├── docs ├── go.sum ├── internal │ ├── pkg │ └── satellite ├── plugins │ ├── client │ ├── fallbacker │ ├── fetcher │ ├── filter │ ├── forwarder │ ├── init.go │ ├── parser │ ├── queue │ ├── receiver │ └── server ","title":"Project Structure","url":"/docs/skywalking-satellite/v1.2.0/en/concepts-and-designs/project_structue/"},{"content":"Prometheus Fetcher Prometheus fetcher reads metrics from Prometheus endpoint, and transfer the metrics into SkyWalking native format for the MAL engine.\nConfiguration file Prometheus fetcher is configured via a configuration file. The configuration file defines everything related to fetching services and their instances, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP fails to start up. The files are located at $CLASSPATH/fetcher-prom-rules.\nThe file is written in YAML format, defined by the scheme described below. Brackets indicate that a parameter is optional.\nA full example can be found here\nGeneric placeholders are defined as follows:\n \u0026lt;duration\u0026gt;: This is parsed into a textual representation of a duration. The formats accepted are based on the ISO-8601 duration format PnDTnHnMn.nS with days considered to be exactly 24 hours. \u0026lt;labelname\u0026gt;: A string matching the regular expression [a-zA-Z_][a-zA-Z0-9_]*. \u0026lt;labelvalue\u0026gt;: A string of unicode characters. \u0026lt;host\u0026gt;: A valid string consisting of a hostname or IP followed by an optional port number. \u0026lt;path\u0026gt;: A valid URL path. \u0026lt;string\u0026gt;: A regular string.  # How frequently to fetch targets.fetcherInterval:\u0026lt;duration\u0026gt;# Per-fetch timeout when fetching this target.fetcherTimeout:\u0026lt;duration\u0026gt;# The HTTP resource path on which to fetch metrics from targets.metricsPath:\u0026lt;path\u0026gt;#Statically configured targets.staticConfig:# The targets specified by the static config.targets:[- \u0026lt;target\u0026gt; ]# Labels assigned to all metrics fetched from the targets.labels:[ \u0026lt;labelname\u0026gt;:\u0026lt;labelvalue\u0026gt; ... ]# filter the metrics, only those metrics that satisfy this condition will be passed into the `metricsRules` below.filter: \u0026lt;closure\u0026gt; # example:\u0026#39;{ tags -\u0026gt; tags.job_name == \u0026#34;vm-monitoring\u0026#34; }\u0026#39;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# Metrics rule allow you to recompute queries.metricsRules:[- \u0026lt;metric_rules\u0026gt; ] # The url of target exporter. the format should be complied with \u0026#34;java.net.URI\u0026#34;url:\u0026lt;string\u0026gt;# The path of root CA file.sslCaFilePath:\u0026lt;string\u0026gt;\u0026lt;metric_rules\u0026gt; # The name of rule, which combinates with a prefix \u0026#39;meter_\u0026#39; as the index/table name in storage.name:\u0026lt;string\u0026gt;# MAL expression.exp:\u0026lt;string\u0026gt;To know more about MAL, please refer to mal.md\nActive Fetcher Rules Suppose you want to enable some metric-custom.yaml files stored at fetcher-prom-rules, append its name to enabledRules of prometheus-fetcher as follows:\nprometheus-fetcher:selector:${SW_PROMETHEUS_FETCHER:default}default:enabledRules:${SW_PROMETHEUS_FETCHER_ENABLED_RULES:\u0026#34;self,metric-custom\u0026#34;}","title":"Prometheus Fetcher","url":"/docs/main/v9.0.0/en/setup/backend/prometheus-metrics/"},{"content":"Prometheus Fetcher Prometheus fetcher reads metrics from the Prometheus endpoint and transfers the metrics into SkyWalking native format for the MAL engine.\nConfiguration file Prometheus fetcher is configured via a configuration file. The configuration file defines everything related to fetching services and their instances, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP fails to start up. The files are located at $CLASSPATH/fetcher-prom-rules.\nThe file is written in YAML format, defined by the scheme described below. Brackets indicate that a parameter is optional.\nA full example can be found here\nGeneric placeholders are defined as follows:\n \u0026lt;duration\u0026gt;: This is parsed into a textual representation of a duration. The accepted formats are based on the ISO-8601 duration format PnDTnHnMn.nS with days of exactly 24 hours. \u0026lt;labelname\u0026gt;: A string matching the regular expression [a-zA-Z_][a-zA-Z0-9_]*. \u0026lt;labelvalue\u0026gt;: A string of Unicode characters. \u0026lt;host\u0026gt;: A valid string consisting of a hostname or IP followed by an optional port number. \u0026lt;path\u0026gt;: A valid URL path. \u0026lt;string\u0026gt;: A regular string.  # How frequently to fetch targets.fetcherInterval:\u0026lt;duration\u0026gt;# Per-fetch timeout when fetching this target.fetcherTimeout:\u0026lt;duration\u0026gt;# The HTTP resource path on which to fetch metrics from targets.metricsPath:\u0026lt;path\u0026gt;#Statically configured targets.staticConfig:# The targets specified by the static config.targets:[- \u0026lt;target\u0026gt; ]# Labels assigned to all metrics fetched from the targets.labels:[ \u0026lt;labelname\u0026gt;:\u0026lt;labelvalue\u0026gt; ... ]# filter the metrics, only those metrics that satisfy this condition will be passed into the `metricsRules` below.filter: \u0026lt;closure\u0026gt; # example:\u0026#39;{ tags -\u0026gt; tags.job_name == \u0026#34;vm-monitoring\u0026#34; }\u0026#39;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# Metrics rule allow you to recompute queries.metricsRules:[- \u0026lt;metric_rules\u0026gt; ] # The url of target exporter. the format should be complied with \u0026#34;java.net.URI\u0026#34;url:\u0026lt;string\u0026gt;# The path of root CA file.sslCaFilePath:\u0026lt;string\u0026gt;\u0026lt;metric_rules\u0026gt; # The name of rule, which combinates with a prefix \u0026#39;meter_\u0026#39; as the index/table name in storage.name:\u0026lt;string\u0026gt;# MAL expression.exp:\u0026lt;string\u0026gt;To know more about MAL, please refer to mal.md\nActive Fetcher Rules Suppose you want to enable some metric-custom.yaml files stored at fetcher-prom-rules, append its name to enabledRules of prometheus-fetcher as follows:\nprometheus-fetcher:selector:${SW_PROMETHEUS_FETCHER:default}default:enabledRules:${SW_PROMETHEUS_FETCHER_ENABLED_RULES:\u0026#34;self,metric-custom\u0026#34;}","title":"Prometheus Fetcher","url":"/docs/main/v9.1.0/en/setup/backend/prometheus-metrics/"},{"content":"Prometheus Fetcher Prometheus fetcher reads metrics from the Prometheus endpoint and transfers the metrics into SkyWalking native format for the MAL engine.\nConfiguration file Prometheus fetcher is configured via a configuration file. The configuration file defines everything related to fetching services and their instances, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP fails to start up. The files are located at $CLASSPATH/fetcher-prom-rules.\nThe file is written in YAML format, defined by the scheme described below. Brackets indicate that a parameter is optional.\nA full example can be found here\nGeneric placeholders are defined as follows:\n \u0026lt;duration\u0026gt;: This is parsed into a textual representation of a duration. The accepted formats are based on the ISO-8601 duration format PnDTnHnMn.nS with days of exactly 24 hours. \u0026lt;labelname\u0026gt;: A string matching the regular expression [a-zA-Z_][a-zA-Z0-9_]*. \u0026lt;labelvalue\u0026gt;: A string of Unicode characters. \u0026lt;host\u0026gt;: A valid string consisting of a hostname or IP followed by an optional port number. \u0026lt;path\u0026gt;: A valid URL path. \u0026lt;string\u0026gt;: A regular string.  # How frequently to fetch targets.fetcherInterval:\u0026lt;duration\u0026gt;# Per-fetch timeout when fetching this target.fetcherTimeout:\u0026lt;duration\u0026gt;# The HTTP resource path on which to fetch metrics from targets.metricsPath:\u0026lt;path\u0026gt;#Statically configured targets.staticConfig:# The targets specified by the static config.targets:[- \u0026lt;target\u0026gt; ]# Labels assigned to all metrics fetched from the targets.labels:[ \u0026lt;labelname\u0026gt;:\u0026lt;labelvalue\u0026gt; ... ]# initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# filter the metrics, only those metrics that satisfy this condition will be passed into the `metricsRules` below.filter: \u0026lt;closure\u0026gt; # example:\u0026#39;{ tags -\u0026gt; tags.job_name == \u0026#34;vm-monitoring\u0026#34; }\u0026#39;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# Metrics rule allow you to recompute queries.metricsRules:[- \u0026lt;metric_rules\u0026gt; ] # The url of target exporter. the format should be complied with \u0026#34;java.net.URI\u0026#34;url:\u0026lt;string\u0026gt;# The path of root CA file.sslCaFilePath:\u0026lt;string\u0026gt;\u0026lt;metric_rules\u0026gt; # The name of rule, which combinates with a prefix \u0026#39;meter_\u0026#39; as the index/table name in storage.name:\u0026lt;string\u0026gt;# MAL expression.exp:\u0026lt;string\u0026gt;To know more about MAL, please refer to mal.md\nActive Fetcher Rules Suppose you want to enable some metric-custom.yaml files stored at fetcher-prom-rules, append its name to enabledRules of prometheus-fetcher as follows:\nprometheus-fetcher:selector:${SW_PROMETHEUS_FETCHER:default}default:enabledRules:${SW_PROMETHEUS_FETCHER_ENABLED_RULES:\u0026#34;self,metric-custom\u0026#34;}","title":"Prometheus Fetcher","url":"/docs/main/v9.2.0/en/setup/backend/prometheus-metrics/"},{"content":"PromQL Service PromQL(Prometheus Query Language) Service exposes Prometheus Querying HTTP APIs including the bundled PromQL expression system. Third-party systems or visualization platforms that already support PromQL (such as Grafana), could obtain metrics through PromQL Service.\nAs SkyWalking and Prometheus have fundamental differences in metrics classification, format, storage, etc. The PromQL Service supported will be a subset of the complete PromQL.\nDetails Of Supported Protocol The following doc describes the details of the supported protocol and compared it to the PromQL official documentation. If not mentioned, it will not be supported by default.\nTime series Selectors Instant Vector Selectors For example: select metric service_cpm which the service is $service and the layer is $layer.\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} Note: The label matching operators only support = instead of regular expressions.\nRange Vector Selectors For example: select metric service_cpm which the service is $service and the layer is $layer within the last 5 minutes.\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;}[5m] Time Durations    Unit Definition Support     ms milliseconds yes   s seconds yes   m minutes yes   h hours yes   d days yes   w weeks yes   y years no    Binary operators Arithmetic binary operators    Operator Definition Support     + addition yes   - subtraction yes   * multiplication yes   / division yes   % modulo yes   ^ power/exponentiation no    Between two scalars For example:\n1 + 2 Between an instant vector and a scalar For example:\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} / 100 Between two instant vectors For example:\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} + service_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} Note: The operations between vectors require the same metric and labels, and don\u0026rsquo;t support Vector matching.\nComparison binary operators    Operator Definition Support     == equal yes   != not-equal yes   \u0026gt; greater-than yes   \u0026lt; less-than yes   \u0026gt;= greater-or-equal yes   \u0026lt;= less-or-equal) yes    Between two scalars For example:\n1 \u0026gt; bool 2 Between an instant vector and a scalar For example:\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} \u0026gt; 1 Between two instant vectors For example:\nservice_cpm{service=\u0026#39;service_A\u0026#39;, layer=\u0026#39;$layer\u0026#39;} \u0026gt; service_cpm{service=\u0026#39;service_B\u0026#39;, layer=\u0026#39;$layer\u0026#39;} HTTP API Expression queries Instant queries GET|POST /api/v1/query    Parameter Definition Support Optional     query prometheus expression yes no   time The latest metrics value from current time to this time is returned. If time is empty, the default look-back time is 2 minutes. yes yes   timeout evaluation timeout no ignore    For example:\n/api/v1/query?query=service_cpm{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;} Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677548400, \u0026#34;6\u0026#34; ] } ] } } Range queries GET|POST /api/v1/query_range    Parameter Definition Support Optional     query prometheus expression yes no   start start timestamp, seconds yes no   end end timestamp, seconds yes no   step SkyWalking will automatically fit Step(DAY, HOUR, MINUTE) through start and end. no ignore   timeout evaluation timeout no ignore    For example:\n/api/v1/query_range?query=service_cpm{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;}\u0026amp;start=1677479336\u0026amp;end=1677479636 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;matrix\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;values\u0026#34;: [ [ 1677479280, \u0026#34;18\u0026#34; ], [ 1677479340, \u0026#34;18\u0026#34; ], [ 1677479400, \u0026#34;18\u0026#34; ], [ 1677479460, \u0026#34;18\u0026#34; ], [ 1677479520, \u0026#34;18\u0026#34; ], [ 1677479580, \u0026#34;18\u0026#34; ] ] } ] } } Querying metadata Finding series by label matchers GET|POST /api/v1/series    Parameter Definition Support Optional     match[] series selector yes no   start start timestamp, seconds yes no   end end timestamp, seconds yes no    For example:\n/api/v1/series?match[]=service_traffic{layer=\u0026#39;GENERAL\u0026#39;}\u0026amp;start=1677479336\u0026amp;end=1677479636 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::recommendation\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::app\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::gateway\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::frontend\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; } ] } Note: SkyWalking\u0026rsquo;s metadata exists in the following metrics(traffics):\n service_traffic instance_traffic endpoint_traffic  Getting label names GET|POST /api/v1/labels    Parameter Definition Support Optional     match[] series selector yes yes   start start timestamp no yes   end end timestamp no yes    For example:\n/api/v1/labels?match[]=instance_jvm_cpu\u0026#39; Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;layer\u0026#34;, \u0026#34;service\u0026#34;, \u0026#34;top_n\u0026#34;, \u0026#34;order\u0026#34;, \u0026#34;service_instance\u0026#34;, \u0026#34;parent_service\u0026#34; ] } Querying label values GET /api/v1/label/\u0026lt;label_name\u0026gt;/values    Parameter Definition Support Optional     match[] series selector yes no   start start timestamp no yes   end end timestamp no yes    For example:\n/api/v1/label/__name__/values Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;meter_mysql_instance_qps\u0026#34;, \u0026#34;service_cpm\u0026#34;, \u0026#34;envoy_cluster_up_rq_active\u0026#34;, \u0026#34;instance_jvm_class_loaded_class_count\u0026#34;, \u0026#34;k8s_cluster_memory_requests\u0026#34;, \u0026#34;meter_vm_memory_used\u0026#34;, \u0026#34;meter_apisix_sv_bandwidth_unmatched\u0026#34;, \u0026#34;meter_vm_memory_total\u0026#34;, \u0026#34;instance_jvm_thread_live_count\u0026#34;, \u0026#34;instance_jvm_thread_timed_waiting_state_thread_count\u0026#34;, \u0026#34;browser_app_page_first_pack_percentile\u0026#34;, \u0026#34;instance_clr_max_worker_threads\u0026#34;, ... ] } Querying metric metadata GET /api/v1/metadata    Parameter Definition Support Optional     limit maximum number of metrics to return yes yes   metric metric name, support regular expression yes yes    For example:\n/api/v1/metadata?limit=10 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;meter_mysql_instance_qps\u0026#34;: [ { \u0026#34;type\u0026#34;: \u0026#34;gauge\u0026#34;, \u0026#34;help\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;unit\u0026#34;: \u0026#34;\u0026#34; } ], \u0026#34;meter_apisix_sv_bandwidth_unmatched\u0026#34;: [ { \u0026#34;type\u0026#34;: \u0026#34;gauge\u0026#34;, \u0026#34;help\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;unit\u0026#34;: \u0026#34;\u0026#34; } ], \u0026#34;service_cpm\u0026#34;: [ { \u0026#34;type\u0026#34;: \u0026#34;gauge\u0026#34;, \u0026#34;help\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;unit\u0026#34;: \u0026#34;\u0026#34; } ], ... } } Metrics Type For Query Supported Metrics Scope(Catalog) Not all scopes are supported for now, please check the following table:\n   Scope Support     Service yes   ServiceInstance yes   Endpoint yes   ServiceRelation no   ServiceInstanceRelation no   Process no   ProcessRelation no    General labels Each metric contains general labels: layer. Different metrics will have different labels depending on their Scope and metric value type.\n   Query Labels Scope Expression Example     layer, service Service service_cpm{service='$service', layer='$layer'}   layer, service, service_instance ServiceInstance service_instance_cpm{service='$service', service_instance='$service_instance', layer='$layer'}   layer, service, endpoint Endpoint endpoint_cpm{service='$service', endpoint='$endpoint', layer='$layer'}    Common Value Metrics  Query Labels:  {General labels}  Expression Example:  service_cpm{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677490740, \u0026#34;3\u0026#34; ] } ] } } Labeled Value Metrics  Query Labels:  --{General labels} --labels: Used to filter the value labels to be returned --relabels: Used to rename the returned value labels note: The number and order of labels must match the number and order of relabels.  Expression Example:  service_percentile{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;, labels=\u0026#39;0,1,2\u0026#39;, relabels=\u0026#39;P50,P75,P90\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_percentile\u0026#34;, \u0026#34;label\u0026#34;: \u0026#34;P50\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677493380, \u0026#34;0\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_percentile\u0026#34;, \u0026#34;label\u0026#34;: \u0026#34;P75\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677493380, \u0026#34;0\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_percentile\u0026#34;, \u0026#34;label\u0026#34;: \u0026#34;P90\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677493380, \u0026#34;0\u0026#34; ] } ] } } Sort Metrics  Query Labels:  --parent_service: \u0026lt;optional\u0026gt; Name of the parent service. --top_n: The max number of the selected metric value --order: ASC/DES  Expression Example:  service_instance_cpm{parent_service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;, top_n=\u0026#39;10\u0026#39;, order=\u0026#39;DES\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_instance_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;ServiceInstance\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;651db53c0e3843d8b9c4c53a90b4992a@10.4.0.28\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677494280, \u0026#34;14\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_instance_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;ServiceInstance\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;4c04cf44d6bd408880556aa3c2cfb620@10.4.0.232\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677494280, \u0026#34;6\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_instance_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;ServiceInstance\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;f5ac8ead31af4e6795cae761729a2742@10.4.0.236\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677494280, \u0026#34;5\u0026#34; ] } ] } } Sampled Records  Query Labels:  --parent_service: Name of the parent service --top_n: The max number of the selected records value --order: ASC/DES  Expression Example:  top_n_database_statement{parent_service=\u0026#39;localhost:-1\u0026#39;, layer=\u0026#39;VIRTUAL_DATABASE\u0026#39;, top_n=\u0026#39;10\u0026#39;, order=\u0026#39;DES\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;top_n_database_statement\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;VIRTUAL_DATABASE\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;record\u0026#34;: \u0026#34;select song0_.id as id1_0_, song0_.artist as artist2_0_, song0_.genre as genre3_0_, song0_.liked as liked4_0_, song0_.name as name5_0_ from song song0_ where song0_.liked\u0026gt;?\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677501360, \u0026#34;1\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;top_n_database_statement\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;VIRTUAL_DATABASE\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;record\u0026#34;: \u0026#34;select song0_.id as id1_0_, song0_.artist as artist2_0_, song0_.genre as genre3_0_, song0_.liked as liked4_0_, song0_.name as name5_0_ from song song0_ where song0_.liked\u0026gt;?\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677501360, \u0026#34;1\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;top_n_database_statement\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;VIRTUAL_DATABASE\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;record\u0026#34;: \u0026#34;select song0_.id as id1_0_, song0_.artist as artist2_0_, song0_.genre as genre3_0_, song0_.liked as liked4_0_, song0_.name as name5_0_ from song song0_ where song0_.liked\u0026gt;?\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677501360, \u0026#34;1\u0026#34; ] } ] } } ","title":"PromQL Service","url":"/docs/main/latest/en/api/promql-service/"},{"content":"PromQL Service PromQL(Prometheus Query Language) Service exposes Prometheus Querying HTTP APIs including the bundled PromQL expression system. Third-party systems or visualization platforms that already support PromQL (such as Grafana), could obtain metrics through PromQL Service.\nAs SkyWalking and Prometheus have fundamental differences in metrics classification, format, storage, etc. The PromQL Service supported will be a subset of the complete PromQL.\nDetails Of Supported Protocol The following doc describes the details of the supported protocol and compared it to the PromQL official documentation. If not mentioned, it will not be supported by default.\nTime series Selectors Instant Vector Selectors For example: select metric service_cpm which the service is $service and the layer is $layer.\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} Note: The label matching operators only support = instead of regular expressions.\nRange Vector Selectors For example: select metric service_cpm which the service is $service and the layer is $layer within the last 5 minutes.\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;}[5m] Time Durations    Unit Definition Support     ms milliseconds yes   s seconds yes   m minutes yes   h hours yes   d days yes   w weeks yes   y years no    Binary operators Arithmetic binary operators    Operator Definition Support     + addition yes   - subtraction yes   * multiplication yes   / division yes   % modulo yes   ^ power/exponentiation no    Between two scalars For example:\n1 + 2 Between an instant vector and a scalar For example:\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} / 100 Between two instant vectors For example:\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} + service_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} Note: The operations between vectors require the same metric and labels, and don\u0026rsquo;t support Vector matching.\nComparison binary operators    Operator Definition Support     == equal yes   != not-equal yes   \u0026gt; greater-than yes   \u0026lt; less-than yes   \u0026gt;= greater-or-equal yes   \u0026lt;= less-or-equal) yes    Between two scalars For example:\n1 \u0026gt; bool 2 Between an instant vector and a scalar For example:\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} \u0026gt; 1 Between two instant vectors For example:\nservice_cpm{service=\u0026#39;service_A\u0026#39;, layer=\u0026#39;$layer\u0026#39;} \u0026gt; service_cpm{service=\u0026#39;service_B\u0026#39;, layer=\u0026#39;$layer\u0026#39;} HTTP API Expression queries Instant queries GET|POST /api/v1/query    Parameter Definition Support Optional     query prometheus expression yes no   time The latest metrics value from current time to this time is returned. If time is empty, the default look-back time is 2 minutes. yes yes   timeout evaluation timeout no ignore    For example:\n/api/v1/query?query=service_cpm{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;} Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677548400, \u0026#34;6\u0026#34; ] } ] } } Range queries GET|POST /api/v1/query_range    Parameter Definition Support Optional     query prometheus expression yes no   start start timestamp, seconds yes no   end end timestamp, seconds yes no   step SkyWalking will automatically fit Step(DAY, HOUR, MINUTE) through start and end. no ignore   timeout evaluation timeout no ignore    For example:\n/api/v1/query_range?query=service_cpm{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;}\u0026amp;start=1677479336\u0026amp;end=1677479636 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;matrix\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;values\u0026#34;: [ [ 1677479280, \u0026#34;18\u0026#34; ], [ 1677479340, \u0026#34;18\u0026#34; ], [ 1677479400, \u0026#34;18\u0026#34; ], [ 1677479460, \u0026#34;18\u0026#34; ], [ 1677479520, \u0026#34;18\u0026#34; ], [ 1677479580, \u0026#34;18\u0026#34; ] ] } ] } } Querying metadata Finding series by label matchers GET|POST /api/v1/series    Parameter Definition Support Optional     match[] series selector yes no   start start timestamp, seconds yes no   end end timestamp, seconds yes no    For example:\n/api/v1/series?match[]=service_traffic{layer=\u0026#39;GENERAL\u0026#39;}\u0026amp;start=1677479336\u0026amp;end=1677479636 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::recommendation\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::app\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::gateway\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::frontend\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; } ] } Note: SkyWalking\u0026rsquo;s metadata exists in the following metrics(traffics):\n service_traffic instance_traffic endpoint_traffic  Getting label names GET|POST /api/v1/labels    Parameter Definition Support Optional     match[] series selector yes yes   start start timestamp no yes   end end timestamp, if end time is not present, use current time as default end time yes yes    For example:\n/api/v1/labels?match[]=instance_jvm_cpu\u0026#39; Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;layer\u0026#34;, \u0026#34;service\u0026#34;, \u0026#34;top_n\u0026#34;, \u0026#34;order\u0026#34;, \u0026#34;service_instance\u0026#34;, \u0026#34;parent_service\u0026#34; ] } Querying label values GET /api/v1/label/\u0026lt;label_name\u0026gt;/values    Parameter Definition Support Optional     match[] series selector yes yes   start start timestamp no yes   end end timestamp, if end time is not present, use current time as default end time yes yes    For example:\n/api/v1/label/__name__/values Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;meter_mysql_instance_qps\u0026#34;, \u0026#34;service_cpm\u0026#34;, \u0026#34;envoy_cluster_up_rq_active\u0026#34;, \u0026#34;instance_jvm_class_loaded_class_count\u0026#34;, \u0026#34;k8s_cluster_memory_requests\u0026#34;, \u0026#34;meter_vm_memory_used\u0026#34;, \u0026#34;meter_apisix_sv_bandwidth_unmatched\u0026#34;, \u0026#34;meter_vm_memory_total\u0026#34;, \u0026#34;instance_jvm_thread_live_count\u0026#34;, \u0026#34;instance_jvm_thread_timed_waiting_state_thread_count\u0026#34;, \u0026#34;browser_app_page_first_pack_percentile\u0026#34;, \u0026#34;instance_clr_max_worker_threads\u0026#34;, ... ] } Querying metric metadata GET /api/v1/metadata    Parameter Definition Support Optional     limit maximum number of metrics to return yes yes   metric metric name, support regular expression yes yes    For example:\n/api/v1/metadata?limit=10 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;meter_mysql_instance_qps\u0026#34;: [ { \u0026#34;type\u0026#34;: \u0026#34;gauge\u0026#34;, \u0026#34;help\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;unit\u0026#34;: \u0026#34;\u0026#34; } ], \u0026#34;meter_apisix_sv_bandwidth_unmatched\u0026#34;: [ { \u0026#34;type\u0026#34;: \u0026#34;gauge\u0026#34;, \u0026#34;help\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;unit\u0026#34;: \u0026#34;\u0026#34; } ], \u0026#34;service_cpm\u0026#34;: [ { \u0026#34;type\u0026#34;: \u0026#34;gauge\u0026#34;, \u0026#34;help\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;unit\u0026#34;: \u0026#34;\u0026#34; } ], ... } } Metrics Type For Query Supported Metrics Scope(Catalog) Not all scopes are supported for now, please check the following table:\n   Scope Support     Service yes   ServiceInstance yes   Endpoint yes   ServiceRelation no   ServiceInstanceRelation no   Process no   ProcessRelation no    General labels Each metric contains general labels: layer. Different metrics will have different labels depending on their Scope and metric value type.\n   Query Labels Scope Expression Example     layer, service Service service_cpm{service='$service', layer='$layer'}   layer, service, service_instance ServiceInstance service_instance_cpm{service='$service', service_instance='$service_instance', layer='$layer'}   layer, service, endpoint Endpoint endpoint_cpm{service='$service', endpoint='$endpoint', layer='$layer'}    Common Value Metrics  Query Labels:  {General labels}  Expression Example:  service_cpm{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677490740, \u0026#34;3\u0026#34; ] } ] } } Labeled Value Metrics  Query Labels:  --{General labels} --metric labels: Used to filter the value labels to be returned  Expression Example:  service_percentile{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;, p=\u0026#39;50,75,90\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_percentile\u0026#34;, \u0026#34;p\u0026#34;: \u0026#34;50\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677493380, \u0026#34;0\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_percentile\u0026#34;, \u0026#34;p\u0026#34;: \u0026#34;75\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677493380, \u0026#34;0\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_percentile\u0026#34;, \u0026#34;p\u0026#34;: \u0026#34;90\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677493380, \u0026#34;0\u0026#34; ] } ] } } Sort Metrics  Query Labels:  --parent_service: \u0026lt;optional\u0026gt; Name of the parent service. --top_n: The max number of the selected metric value --order: ASC/DES  Expression Example:  service_instance_cpm{parent_service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;, top_n=\u0026#39;10\u0026#39;, order=\u0026#39;DES\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_instance_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;ServiceInstance\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;651db53c0e3843d8b9c4c53a90b4992a@10.4.0.28\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677494280, \u0026#34;14\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_instance_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;ServiceInstance\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;4c04cf44d6bd408880556aa3c2cfb620@10.4.0.232\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677494280, \u0026#34;6\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_instance_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;ServiceInstance\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;f5ac8ead31af4e6795cae761729a2742@10.4.0.236\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677494280, \u0026#34;5\u0026#34; ] } ] } } Sampled Records  Query Labels:  --parent_service: Name of the parent service --top_n: The max number of the selected records value --order: ASC/DES  Expression Example:  top_n_database_statement{parent_service=\u0026#39;localhost:-1\u0026#39;, layer=\u0026#39;VIRTUAL_DATABASE\u0026#39;, top_n=\u0026#39;10\u0026#39;, order=\u0026#39;DES\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;top_n_database_statement\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;VIRTUAL_DATABASE\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;record\u0026#34;: \u0026#34;select song0_.id as id1_0_, song0_.artist as artist2_0_, song0_.genre as genre3_0_, song0_.liked as liked4_0_, song0_.name as name5_0_ from song song0_ where song0_.liked\u0026gt;?\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677501360, \u0026#34;1\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;top_n_database_statement\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;VIRTUAL_DATABASE\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;record\u0026#34;: \u0026#34;select song0_.id as id1_0_, song0_.artist as artist2_0_, song0_.genre as genre3_0_, song0_.liked as liked4_0_, song0_.name as name5_0_ from song song0_ where song0_.liked\u0026gt;?\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677501360, \u0026#34;1\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;top_n_database_statement\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;VIRTUAL_DATABASE\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;record\u0026#34;: \u0026#34;select song0_.id as id1_0_, song0_.artist as artist2_0_, song0_.genre as genre3_0_, song0_.liked as liked4_0_, song0_.name as name5_0_ from song song0_ where song0_.liked\u0026gt;?\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677501360, \u0026#34;1\u0026#34; ] } ] } } ","title":"PromQL Service","url":"/docs/main/next/en/api/promql-service/"},{"content":"PromQL Service PromQL(Prometheus Query Language) Service exposes Prometheus Querying HTTP APIs including the bundled PromQL expression system. Third-party systems or visualization platforms that already support PromQL (such as Grafana), could obtain metrics through PromeQL Service.\nAs SkyWalking and Prometheus have fundamental differences in metrics classification, format, storage, etc. The PromQL Service supported will be a subset of the complete PromQL\nDetails Of Supported Protocol The following doc describes the details of the supported protocol and compared it to the PromQL official documentation. If not mentioned, it will not be supported by default.\nTime series Selectors Instant Vector Selectors For example: select metric service_cpm which the service is $service and the layer is $layer.\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} Note: The label matching operators only support = instead of regular expressions.\nRange Vector Selectors For example: select metric service_cpm which the service is $service and the layer is $layer within the last 5 minutes.\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;}[5m] Time Durations    Unit Definition Support     ms milliseconds yes   s seconds yes   m minutes yes   h hours yes   d days yes   w weeks yes   y years no    Binary operators Arithmetic binary operators    Operator Definition Support     + addition yes   - subtraction yes   * multiplication yes   / division yes   % modulo yes   ^ power/exponentiation no    Between two scalars For example:\n1 + 2 Between an instant vector and a scalar For example:\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} / 100 Between two instant vectors For example:\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} + service_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} Note: The operations between vectors require the same metric and labels, and don\u0026rsquo;t support Vector matching.\nComparison binary operators    Operator Definition Support     == equal yes   != not-equal yes   \u0026gt; greater-than yes   \u0026lt; less-than yes   \u0026gt;= greater-or-equal yes   \u0026lt;= less-or-equal) yes    Between two scalars For example:\n1 \u0026gt; bool 2 Between an instant vector and a scalar For example:\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} \u0026gt; 1 Between two instant vectors For example:\nservice_cpm{service=\u0026#39;service_A\u0026#39;, layer=\u0026#39;$layer\u0026#39;} \u0026gt; service_cpm{service=\u0026#39;service_B\u0026#39;, layer=\u0026#39;$layer\u0026#39;} HTTP API Expression queries Instant queries GET|POST /api/v1/query    Parameter Definition Support Optional     query prometheus expression yes no   time The latest metrics value from current time to this time is returned. If time is empty, the default look-back time is 2 minutes. yes yes   timeout evaluation timeout no ignore    For example:\n/api/v1/query?query=service_cpm{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;} Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677548400, \u0026#34;6\u0026#34; ] } ] } } Range queries GET|POST /api/v1/query_range    Parameter Definition Support Optional     query prometheus expression yes no   start start timestamp, seconds yes no   end end timestamp, seconds yes no   step SkyWalking will automatically fit Step(DAY, HOUR, MINUTE) through start and end. no ignore   timeout evaluation timeout no ignore    For example:\n/api/v1/query_range?query=service_cpm{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;}\u0026amp;start=1677479336\u0026amp;end=1677479636 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;matrix\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;values\u0026#34;: [ [ 1677479280, \u0026#34;18\u0026#34; ], [ 1677479340, \u0026#34;18\u0026#34; ], [ 1677479400, \u0026#34;18\u0026#34; ], [ 1677479460, \u0026#34;18\u0026#34; ], [ 1677479520, \u0026#34;18\u0026#34; ], [ 1677479580, \u0026#34;18\u0026#34; ] ] } ] } } Querying metadata Finding series by label matchers GET|POST /api/v1/series    Parameter Definition Support Optional     match[] series selector yes no   start start timestamp, seconds yes no   end end timestamp, seconds yes no    For example:\n/api/v1/series?match[]=service_traffic{layer=\u0026#39;GENERAL\u0026#39;}\u0026amp;start=1677479336\u0026amp;end=1677479636 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::recommendation\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::app\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::gateway\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::frontend\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; } ] } Note: SkyWalking\u0026rsquo;s metadata exists in the following metrics(traffics):\n service_traffic instance_traffic endpoint_traffic  Getting label names GET|POST /api/v1/labels    Parameter Definition Support Optional     match[] series selector yes yes   start start timestamp no yes   end end timestamp no yes    For example:\n/api/v1/labels?match[]=instance_jvm_cpu\u0026#39; Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;layer\u0026#34;, \u0026#34;scope\u0026#34;, \u0026#34;top_n\u0026#34;, \u0026#34;order\u0026#34;, \u0026#34;service_instance\u0026#34;, \u0026#34;parent_service\u0026#34; ] } Querying label values GET /api/v1/label/\u0026lt;label_name\u0026gt;/values    Parameter Definition Support Optional     match[] series selector yes no   start start timestamp no yes   end end timestamp no yes    For example:\n/api/v1/label/__name__/values Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;meter_mysql_instance_qps\u0026#34;, \u0026#34;service_cpm\u0026#34;, \u0026#34;envoy_cluster_up_rq_active\u0026#34;, \u0026#34;instance_jvm_class_loaded_class_count\u0026#34;, \u0026#34;k8s_cluster_memory_requests\u0026#34;, \u0026#34;meter_vm_memory_used\u0026#34;, \u0026#34;meter_apisix_sv_bandwidth_unmatched\u0026#34;, \u0026#34;meter_vm_memory_total\u0026#34;, \u0026#34;instance_jvm_thread_live_count\u0026#34;, \u0026#34;instance_jvm_thread_timed_waiting_state_thread_count\u0026#34;, \u0026#34;browser_app_page_first_pack_percentile\u0026#34;, \u0026#34;instance_clr_max_worker_threads\u0026#34;, ... ] } Querying metric metadata GET /api/v1/metadata    Parameter Definition Support Optional     limit maximum number of metrics to return yes yes   metric metric name, support regular expression yes yes    For example:\n/api/v1/metadata?limit=10 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;meter_mysql_instance_qps\u0026#34;: [ { \u0026#34;type\u0026#34;: \u0026#34;gauge\u0026#34;, \u0026#34;help\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;unit\u0026#34;: \u0026#34;\u0026#34; } ], \u0026#34;meter_apisix_sv_bandwidth_unmatched\u0026#34;: [ { \u0026#34;type\u0026#34;: \u0026#34;gauge\u0026#34;, \u0026#34;help\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;unit\u0026#34;: \u0026#34;\u0026#34; } ], \u0026#34;service_cpm\u0026#34;: [ { \u0026#34;type\u0026#34;: \u0026#34;gauge\u0026#34;, \u0026#34;help\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;unit\u0026#34;: \u0026#34;\u0026#34; } ], ... } } Metrics Type For Query Supported Metrics Scope(Catalog) All scopes are not supported completely, please check the following table:\n   Scope Support     Service yes   ServiceInstance yes   Endpoint yes   ServiceRelation no   ServiceInstanceRelation no   Process no   ProcessRelation no    General labels Each metric contains general labels: layer. Different metrics will have different labels depending on their Scope and metric value type.\n   Query Labels Scope Expression Example     layer, service Service service_cpm{service='$service', layer='$layer'}   layer, service, service_instance ServiceInstance service_instance_cpm{service='$service', service_instance='$service_instance', layer='$layer'}   layer, service, endpoint Endpoint endpoint_cpm{service='$service', endpoint='$endpoint', layer='$layer'}    Common Value Metrics  Query Labels:  {General labels}  Expression Example:  service_cpm{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677490740, \u0026#34;3\u0026#34; ] } ] } } Labeled Value Metrics  Query Labels:  --{General labels} --labels: Used to filter the value labels to be returned --relabels: Used to rename the returned value labels note: The number and order of labels must match the number and order of relabels.  Expression Example:  service_percentile{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;, labels=\u0026#39;0,1,2\u0026#39;, relabels=\u0026#39;P50,P75,P90\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_percentile\u0026#34;, \u0026#34;label\u0026#34;: \u0026#34;P50\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677493380, \u0026#34;0\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_percentile\u0026#34;, \u0026#34;label\u0026#34;: \u0026#34;P75\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677493380, \u0026#34;0\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_percentile\u0026#34;, \u0026#34;label\u0026#34;: \u0026#34;P90\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677493380, \u0026#34;0\u0026#34; ] } ] } } Sort Metrics  Query Labels:  --parent_service: \u0026lt;optional\u0026gt; Name of the parent service. --top_n: The max number of the selected metric value --order: ASC/DES  Expression Example:  service_instance_cpm{parent_service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;, top_n=\u0026#39;10\u0026#39;, order=\u0026#39;DES\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_instance_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;ServiceInstance\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;651db53c0e3843d8b9c4c53a90b4992a@10.4.0.28\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677494280, \u0026#34;14\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_instance_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;ServiceInstance\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;4c04cf44d6bd408880556aa3c2cfb620@10.4.0.232\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677494280, \u0026#34;6\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_instance_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;ServiceInstance\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;f5ac8ead31af4e6795cae761729a2742@10.4.0.236\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677494280, \u0026#34;5\u0026#34; ] } ] } } Sampled Records  Query Labels:  --parent_service: Name of the parent service --top_n: The max number of the selected records value --order: ASC/DES  Expression Example:  top_n_database_statement{parent_service=\u0026#39;localhost:-1\u0026#39;, layer=\u0026#39;VIRTUAL_DATABASE\u0026#39;, top_n=\u0026#39;10\u0026#39;, order=\u0026#39;DES\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;top_n_database_statement\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;VIRTUAL_DATABASE\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;record\u0026#34;: \u0026#34;select song0_.id as id1_0_, song0_.artist as artist2_0_, song0_.genre as genre3_0_, song0_.liked as liked4_0_, song0_.name as name5_0_ from song song0_ where song0_.liked\u0026gt;?\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677501360, \u0026#34;1\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;top_n_database_statement\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;VIRTUAL_DATABASE\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;record\u0026#34;: \u0026#34;select song0_.id as id1_0_, song0_.artist as artist2_0_, song0_.genre as genre3_0_, song0_.liked as liked4_0_, song0_.name as name5_0_ from song song0_ where song0_.liked\u0026gt;?\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677501360, \u0026#34;1\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;top_n_database_statement\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;VIRTUAL_DATABASE\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;record\u0026#34;: \u0026#34;select song0_.id as id1_0_, song0_.artist as artist2_0_, song0_.genre as genre3_0_, song0_.liked as liked4_0_, song0_.name as name5_0_ from song song0_ where song0_.liked\u0026gt;?\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677501360, \u0026#34;1\u0026#34; ] } ] } } ","title":"PromQL Service","url":"/docs/main/v9.4.0/en/api/promql-service/"},{"content":"PromQL Service PromQL(Prometheus Query Language) Service exposes Prometheus Querying HTTP APIs including the bundled PromQL expression system. Third-party systems or visualization platforms that already support PromQL (such as Grafana), could obtain metrics through PromQL Service.\nAs SkyWalking and Prometheus have fundamental differences in metrics classification, format, storage, etc. The PromQL Service supported will be a subset of the complete PromQL.\nDetails Of Supported Protocol The following doc describes the details of the supported protocol and compared it to the PromQL official documentation. If not mentioned, it will not be supported by default.\nTime series Selectors Instant Vector Selectors For example: select metric service_cpm which the service is $service and the layer is $layer.\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} Note: The label matching operators only support = instead of regular expressions.\nRange Vector Selectors For example: select metric service_cpm which the service is $service and the layer is $layer within the last 5 minutes.\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;}[5m] Time Durations    Unit Definition Support     ms milliseconds yes   s seconds yes   m minutes yes   h hours yes   d days yes   w weeks yes   y years no    Binary operators Arithmetic binary operators    Operator Definition Support     + addition yes   - subtraction yes   * multiplication yes   / division yes   % modulo yes   ^ power/exponentiation no    Between two scalars For example:\n1 + 2 Between an instant vector and a scalar For example:\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} / 100 Between two instant vectors For example:\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} + service_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} Note: The operations between vectors require the same metric and labels, and don\u0026rsquo;t support Vector matching.\nComparison binary operators    Operator Definition Support     == equal yes   != not-equal yes   \u0026gt; greater-than yes   \u0026lt; less-than yes   \u0026gt;= greater-or-equal yes   \u0026lt;= less-or-equal) yes    Between two scalars For example:\n1 \u0026gt; bool 2 Between an instant vector and a scalar For example:\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} \u0026gt; 1 Between two instant vectors For example:\nservice_cpm{service=\u0026#39;service_A\u0026#39;, layer=\u0026#39;$layer\u0026#39;} \u0026gt; service_cpm{service=\u0026#39;service_B\u0026#39;, layer=\u0026#39;$layer\u0026#39;} HTTP API Expression queries Instant queries GET|POST /api/v1/query    Parameter Definition Support Optional     query prometheus expression yes no   time The latest metrics value from current time to this time is returned. If time is empty, the default look-back time is 2 minutes. yes yes   timeout evaluation timeout no ignore    For example:\n/api/v1/query?query=service_cpm{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;} Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677548400, \u0026#34;6\u0026#34; ] } ] } } Range queries GET|POST /api/v1/query_range    Parameter Definition Support Optional     query prometheus expression yes no   start start timestamp, seconds yes no   end end timestamp, seconds yes no   step SkyWalking will automatically fit Step(DAY, HOUR, MINUTE) through start and end. no ignore   timeout evaluation timeout no ignore    For example:\n/api/v1/query_range?query=service_cpm{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;}\u0026amp;start=1677479336\u0026amp;end=1677479636 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;matrix\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;values\u0026#34;: [ [ 1677479280, \u0026#34;18\u0026#34; ], [ 1677479340, \u0026#34;18\u0026#34; ], [ 1677479400, \u0026#34;18\u0026#34; ], [ 1677479460, \u0026#34;18\u0026#34; ], [ 1677479520, \u0026#34;18\u0026#34; ], [ 1677479580, \u0026#34;18\u0026#34; ] ] } ] } } Querying metadata Finding series by label matchers GET|POST /api/v1/series    Parameter Definition Support Optional     match[] series selector yes no   start start timestamp, seconds yes no   end end timestamp, seconds yes no    For example:\n/api/v1/series?match[]=service_traffic{layer=\u0026#39;GENERAL\u0026#39;}\u0026amp;start=1677479336\u0026amp;end=1677479636 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::recommendation\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::app\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::gateway\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::frontend\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; } ] } Note: SkyWalking\u0026rsquo;s metadata exists in the following metrics(traffics):\n service_traffic instance_traffic endpoint_traffic  Getting label names GET|POST /api/v1/labels    Parameter Definition Support Optional     match[] series selector yes yes   start start timestamp no yes   end end timestamp no yes    For example:\n/api/v1/labels?match[]=instance_jvm_cpu\u0026#39; Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;layer\u0026#34;, \u0026#34;service\u0026#34;, \u0026#34;top_n\u0026#34;, \u0026#34;order\u0026#34;, \u0026#34;service_instance\u0026#34;, \u0026#34;parent_service\u0026#34; ] } Querying label values GET /api/v1/label/\u0026lt;label_name\u0026gt;/values    Parameter Definition Support Optional     match[] series selector yes no   start start timestamp no yes   end end timestamp no yes    For example:\n/api/v1/label/__name__/values Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;meter_mysql_instance_qps\u0026#34;, \u0026#34;service_cpm\u0026#34;, \u0026#34;envoy_cluster_up_rq_active\u0026#34;, \u0026#34;instance_jvm_class_loaded_class_count\u0026#34;, \u0026#34;k8s_cluster_memory_requests\u0026#34;, \u0026#34;meter_vm_memory_used\u0026#34;, \u0026#34;meter_apisix_sv_bandwidth_unmatched\u0026#34;, \u0026#34;meter_vm_memory_total\u0026#34;, \u0026#34;instance_jvm_thread_live_count\u0026#34;, \u0026#34;instance_jvm_thread_timed_waiting_state_thread_count\u0026#34;, \u0026#34;browser_app_page_first_pack_percentile\u0026#34;, \u0026#34;instance_clr_max_worker_threads\u0026#34;, ... ] } Querying metric metadata GET /api/v1/metadata    Parameter Definition Support Optional     limit maximum number of metrics to return yes yes   metric metric name, support regular expression yes yes    For example:\n/api/v1/metadata?limit=10 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;meter_mysql_instance_qps\u0026#34;: [ { \u0026#34;type\u0026#34;: \u0026#34;gauge\u0026#34;, \u0026#34;help\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;unit\u0026#34;: \u0026#34;\u0026#34; } ], \u0026#34;meter_apisix_sv_bandwidth_unmatched\u0026#34;: [ { \u0026#34;type\u0026#34;: \u0026#34;gauge\u0026#34;, \u0026#34;help\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;unit\u0026#34;: \u0026#34;\u0026#34; } ], \u0026#34;service_cpm\u0026#34;: [ { \u0026#34;type\u0026#34;: \u0026#34;gauge\u0026#34;, \u0026#34;help\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;unit\u0026#34;: \u0026#34;\u0026#34; } ], ... } } Metrics Type For Query Supported Metrics Scope(Catalog) Not all scopes are supported for now, please check the following table:\n   Scope Support     Service yes   ServiceInstance yes   Endpoint yes   ServiceRelation no   ServiceInstanceRelation no   Process no   ProcessRelation no    General labels Each metric contains general labels: layer. Different metrics will have different labels depending on their Scope and metric value type.\n   Query Labels Scope Expression Example     layer, service Service service_cpm{service='$service', layer='$layer'}   layer, service, service_instance ServiceInstance service_instance_cpm{service='$service', service_instance='$service_instance', layer='$layer'}   layer, service, endpoint Endpoint endpoint_cpm{service='$service', endpoint='$endpoint', layer='$layer'}    Common Value Metrics  Query Labels:  {General labels}  Expression Example:  service_cpm{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677490740, \u0026#34;3\u0026#34; ] } ] } } Labeled Value Metrics  Query Labels:  --{General labels} --labels: Used to filter the value labels to be returned --relabels: Used to rename the returned value labels note: The number and order of labels must match the number and order of relabels.  Expression Example:  service_percentile{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;, labels=\u0026#39;0,1,2\u0026#39;, relabels=\u0026#39;P50,P75,P90\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_percentile\u0026#34;, \u0026#34;label\u0026#34;: \u0026#34;P50\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677493380, \u0026#34;0\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_percentile\u0026#34;, \u0026#34;label\u0026#34;: \u0026#34;P75\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677493380, \u0026#34;0\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_percentile\u0026#34;, \u0026#34;label\u0026#34;: \u0026#34;P90\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677493380, \u0026#34;0\u0026#34; ] } ] } } Sort Metrics  Query Labels:  --parent_service: \u0026lt;optional\u0026gt; Name of the parent service. --top_n: The max number of the selected metric value --order: ASC/DES  Expression Example:  service_instance_cpm{parent_service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;, top_n=\u0026#39;10\u0026#39;, order=\u0026#39;DES\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_instance_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;ServiceInstance\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;651db53c0e3843d8b9c4c53a90b4992a@10.4.0.28\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677494280, \u0026#34;14\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_instance_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;ServiceInstance\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;4c04cf44d6bd408880556aa3c2cfb620@10.4.0.232\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677494280, \u0026#34;6\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_instance_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;ServiceInstance\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;f5ac8ead31af4e6795cae761729a2742@10.4.0.236\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677494280, \u0026#34;5\u0026#34; ] } ] } } Sampled Records  Query Labels:  --parent_service: Name of the parent service --top_n: The max number of the selected records value --order: ASC/DES  Expression Example:  top_n_database_statement{parent_service=\u0026#39;localhost:-1\u0026#39;, layer=\u0026#39;VIRTUAL_DATABASE\u0026#39;, top_n=\u0026#39;10\u0026#39;, order=\u0026#39;DES\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;top_n_database_statement\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;VIRTUAL_DATABASE\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;record\u0026#34;: \u0026#34;select song0_.id as id1_0_, song0_.artist as artist2_0_, song0_.genre as genre3_0_, song0_.liked as liked4_0_, song0_.name as name5_0_ from song song0_ where song0_.liked\u0026gt;?\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677501360, \u0026#34;1\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;top_n_database_statement\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;VIRTUAL_DATABASE\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;record\u0026#34;: \u0026#34;select song0_.id as id1_0_, song0_.artist as artist2_0_, song0_.genre as genre3_0_, song0_.liked as liked4_0_, song0_.name as name5_0_ from song song0_ where song0_.liked\u0026gt;?\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677501360, \u0026#34;1\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;top_n_database_statement\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;VIRTUAL_DATABASE\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;record\u0026#34;: \u0026#34;select song0_.id as id1_0_, song0_.artist as artist2_0_, song0_.genre as genre3_0_, song0_.liked as liked4_0_, song0_.name as name5_0_ from song song0_ where song0_.liked\u0026gt;?\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677501360, \u0026#34;1\u0026#34; ] } ] } } ","title":"PromQL Service","url":"/docs/main/v9.5.0/en/api/promql-service/"},{"content":"PromQL Service PromQL(Prometheus Query Language) Service exposes Prometheus Querying HTTP APIs including the bundled PromQL expression system. Third-party systems or visualization platforms that already support PromQL (such as Grafana), could obtain metrics through PromQL Service.\nAs SkyWalking and Prometheus have fundamental differences in metrics classification, format, storage, etc. The PromQL Service supported will be a subset of the complete PromQL.\nDetails Of Supported Protocol The following doc describes the details of the supported protocol and compared it to the PromQL official documentation. If not mentioned, it will not be supported by default.\nTime series Selectors Instant Vector Selectors For example: select metric service_cpm which the service is $service and the layer is $layer.\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} Note: The label matching operators only support = instead of regular expressions.\nRange Vector Selectors For example: select metric service_cpm which the service is $service and the layer is $layer within the last 5 minutes.\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;}[5m] Time Durations    Unit Definition Support     ms milliseconds yes   s seconds yes   m minutes yes   h hours yes   d days yes   w weeks yes   y years no    Binary operators Arithmetic binary operators    Operator Definition Support     + addition yes   - subtraction yes   * multiplication yes   / division yes   % modulo yes   ^ power/exponentiation no    Between two scalars For example:\n1 + 2 Between an instant vector and a scalar For example:\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} / 100 Between two instant vectors For example:\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} + service_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} Note: The operations between vectors require the same metric and labels, and don\u0026rsquo;t support Vector matching.\nComparison binary operators    Operator Definition Support     == equal yes   != not-equal yes   \u0026gt; greater-than yes   \u0026lt; less-than yes   \u0026gt;= greater-or-equal yes   \u0026lt;= less-or-equal) yes    Between two scalars For example:\n1 \u0026gt; bool 2 Between an instant vector and a scalar For example:\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} \u0026gt; 1 Between two instant vectors For example:\nservice_cpm{service=\u0026#39;service_A\u0026#39;, layer=\u0026#39;$layer\u0026#39;} \u0026gt; service_cpm{service=\u0026#39;service_B\u0026#39;, layer=\u0026#39;$layer\u0026#39;} HTTP API Expression queries Instant queries GET|POST /api/v1/query    Parameter Definition Support Optional     query prometheus expression yes no   time The latest metrics value from current time to this time is returned. If time is empty, the default look-back time is 2 minutes. yes yes   timeout evaluation timeout no ignore    For example:\n/api/v1/query?query=service_cpm{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;} Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677548400, \u0026#34;6\u0026#34; ] } ] } } Range queries GET|POST /api/v1/query_range    Parameter Definition Support Optional     query prometheus expression yes no   start start timestamp, seconds yes no   end end timestamp, seconds yes no   step SkyWalking will automatically fit Step(DAY, HOUR, MINUTE) through start and end. no ignore   timeout evaluation timeout no ignore    For example:\n/api/v1/query_range?query=service_cpm{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;}\u0026amp;start=1677479336\u0026amp;end=1677479636 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;matrix\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;values\u0026#34;: [ [ 1677479280, \u0026#34;18\u0026#34; ], [ 1677479340, \u0026#34;18\u0026#34; ], [ 1677479400, \u0026#34;18\u0026#34; ], [ 1677479460, \u0026#34;18\u0026#34; ], [ 1677479520, \u0026#34;18\u0026#34; ], [ 1677479580, \u0026#34;18\u0026#34; ] ] } ] } } Querying metadata Finding series by label matchers GET|POST /api/v1/series    Parameter Definition Support Optional     match[] series selector yes no   start start timestamp, seconds yes no   end end timestamp, seconds yes no    For example:\n/api/v1/series?match[]=service_traffic{layer=\u0026#39;GENERAL\u0026#39;}\u0026amp;start=1677479336\u0026amp;end=1677479636 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::recommendation\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::app\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::gateway\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::frontend\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; } ] } Note: SkyWalking\u0026rsquo;s metadata exists in the following metrics(traffics):\n service_traffic instance_traffic endpoint_traffic  Getting label names GET|POST /api/v1/labels    Parameter Definition Support Optional     match[] series selector yes yes   start start timestamp no yes   end end timestamp no yes    For example:\n/api/v1/labels?match[]=instance_jvm_cpu\u0026#39; Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;layer\u0026#34;, \u0026#34;service\u0026#34;, \u0026#34;top_n\u0026#34;, \u0026#34;order\u0026#34;, \u0026#34;service_instance\u0026#34;, \u0026#34;parent_service\u0026#34; ] } Querying label values GET /api/v1/label/\u0026lt;label_name\u0026gt;/values    Parameter Definition Support Optional     match[] series selector yes no   start start timestamp no yes   end end timestamp no yes    For example:\n/api/v1/label/__name__/values Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;meter_mysql_instance_qps\u0026#34;, \u0026#34;service_cpm\u0026#34;, \u0026#34;envoy_cluster_up_rq_active\u0026#34;, \u0026#34;instance_jvm_class_loaded_class_count\u0026#34;, \u0026#34;k8s_cluster_memory_requests\u0026#34;, \u0026#34;meter_vm_memory_used\u0026#34;, \u0026#34;meter_apisix_sv_bandwidth_unmatched\u0026#34;, \u0026#34;meter_vm_memory_total\u0026#34;, \u0026#34;instance_jvm_thread_live_count\u0026#34;, \u0026#34;instance_jvm_thread_timed_waiting_state_thread_count\u0026#34;, \u0026#34;browser_app_page_first_pack_percentile\u0026#34;, \u0026#34;instance_clr_max_worker_threads\u0026#34;, ... ] } Querying metric metadata GET /api/v1/metadata    Parameter Definition Support Optional     limit maximum number of metrics to return yes yes   metric metric name, support regular expression yes yes    For example:\n/api/v1/metadata?limit=10 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;meter_mysql_instance_qps\u0026#34;: [ { \u0026#34;type\u0026#34;: \u0026#34;gauge\u0026#34;, \u0026#34;help\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;unit\u0026#34;: \u0026#34;\u0026#34; } ], \u0026#34;meter_apisix_sv_bandwidth_unmatched\u0026#34;: [ { \u0026#34;type\u0026#34;: \u0026#34;gauge\u0026#34;, \u0026#34;help\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;unit\u0026#34;: \u0026#34;\u0026#34; } ], \u0026#34;service_cpm\u0026#34;: [ { \u0026#34;type\u0026#34;: \u0026#34;gauge\u0026#34;, \u0026#34;help\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;unit\u0026#34;: \u0026#34;\u0026#34; } ], ... } } Metrics Type For Query Supported Metrics Scope(Catalog) Not all scopes are supported for now, please check the following table:\n   Scope Support     Service yes   ServiceInstance yes   Endpoint yes   ServiceRelation no   ServiceInstanceRelation no   Process no   ProcessRelation no    General labels Each metric contains general labels: layer. Different metrics will have different labels depending on their Scope and metric value type.\n   Query Labels Scope Expression Example     layer, service Service service_cpm{service='$service', layer='$layer'}   layer, service, service_instance ServiceInstance service_instance_cpm{service='$service', service_instance='$service_instance', layer='$layer'}   layer, service, endpoint Endpoint endpoint_cpm{service='$service', endpoint='$endpoint', layer='$layer'}    Common Value Metrics  Query Labels:  {General labels}  Expression Example:  service_cpm{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677490740, \u0026#34;3\u0026#34; ] } ] } } Labeled Value Metrics  Query Labels:  --{General labels} --labels: Used to filter the value labels to be returned --relabels: Used to rename the returned value labels note: The number and order of labels must match the number and order of relabels.  Expression Example:  service_percentile{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;, labels=\u0026#39;0,1,2\u0026#39;, relabels=\u0026#39;P50,P75,P90\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_percentile\u0026#34;, \u0026#34;label\u0026#34;: \u0026#34;P50\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677493380, \u0026#34;0\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_percentile\u0026#34;, \u0026#34;label\u0026#34;: \u0026#34;P75\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677493380, \u0026#34;0\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_percentile\u0026#34;, \u0026#34;label\u0026#34;: \u0026#34;P90\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677493380, \u0026#34;0\u0026#34; ] } ] } } Sort Metrics  Query Labels:  --parent_service: \u0026lt;optional\u0026gt; Name of the parent service. --top_n: The max number of the selected metric value --order: ASC/DES  Expression Example:  service_instance_cpm{parent_service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;, top_n=\u0026#39;10\u0026#39;, order=\u0026#39;DES\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_instance_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;ServiceInstance\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;651db53c0e3843d8b9c4c53a90b4992a@10.4.0.28\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677494280, \u0026#34;14\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_instance_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;ServiceInstance\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;4c04cf44d6bd408880556aa3c2cfb620@10.4.0.232\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677494280, \u0026#34;6\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_instance_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;ServiceInstance\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;f5ac8ead31af4e6795cae761729a2742@10.4.0.236\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677494280, \u0026#34;5\u0026#34; ] } ] } } Sampled Records  Query Labels:  --parent_service: Name of the parent service --top_n: The max number of the selected records value --order: ASC/DES  Expression Example:  top_n_database_statement{parent_service=\u0026#39;localhost:-1\u0026#39;, layer=\u0026#39;VIRTUAL_DATABASE\u0026#39;, top_n=\u0026#39;10\u0026#39;, order=\u0026#39;DES\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;top_n_database_statement\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;VIRTUAL_DATABASE\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;record\u0026#34;: \u0026#34;select song0_.id as id1_0_, song0_.artist as artist2_0_, song0_.genre as genre3_0_, song0_.liked as liked4_0_, song0_.name as name5_0_ from song song0_ where song0_.liked\u0026gt;?\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677501360, \u0026#34;1\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;top_n_database_statement\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;VIRTUAL_DATABASE\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;record\u0026#34;: \u0026#34;select song0_.id as id1_0_, song0_.artist as artist2_0_, song0_.genre as genre3_0_, song0_.liked as liked4_0_, song0_.name as name5_0_ from song song0_ where song0_.liked\u0026gt;?\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677501360, \u0026#34;1\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;top_n_database_statement\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;VIRTUAL_DATABASE\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;record\u0026#34;: \u0026#34;select song0_.id as id1_0_, song0_.artist as artist2_0_, song0_.genre as genre3_0_, song0_.liked as liked4_0_, song0_.name as name5_0_ from song song0_ where song0_.liked\u0026gt;?\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677501360, \u0026#34;1\u0026#34; ] } ] } } ","title":"PromQL Service","url":"/docs/main/v9.6.0/en/api/promql-service/"},{"content":"PromQL Service PromQL(Prometheus Query Language) Service exposes Prometheus Querying HTTP APIs including the bundled PromQL expression system. Third-party systems or visualization platforms that already support PromQL (such as Grafana), could obtain metrics through PromQL Service.\nAs SkyWalking and Prometheus have fundamental differences in metrics classification, format, storage, etc. The PromQL Service supported will be a subset of the complete PromQL.\nDetails Of Supported Protocol The following doc describes the details of the supported protocol and compared it to the PromQL official documentation. If not mentioned, it will not be supported by default.\nTime series Selectors Instant Vector Selectors For example: select metric service_cpm which the service is $service and the layer is $layer.\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} Note: The label matching operators only support = instead of regular expressions.\nRange Vector Selectors For example: select metric service_cpm which the service is $service and the layer is $layer within the last 5 minutes.\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;}[5m] Time Durations    Unit Definition Support     ms milliseconds yes   s seconds yes   m minutes yes   h hours yes   d days yes   w weeks yes   y years no    Binary operators Arithmetic binary operators    Operator Definition Support     + addition yes   - subtraction yes   * multiplication yes   / division yes   % modulo yes   ^ power/exponentiation no    Between two scalars For example:\n1 + 2 Between an instant vector and a scalar For example:\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} / 100 Between two instant vectors For example:\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} + service_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} Note: The operations between vectors require the same metric and labels, and don\u0026rsquo;t support Vector matching.\nComparison binary operators    Operator Definition Support     == equal yes   != not-equal yes   \u0026gt; greater-than yes   \u0026lt; less-than yes   \u0026gt;= greater-or-equal yes   \u0026lt;= less-or-equal) yes    Between two scalars For example:\n1 \u0026gt; bool 2 Between an instant vector and a scalar For example:\nservice_cpm{service=\u0026#39;$service\u0026#39;, layer=\u0026#39;$layer\u0026#39;} \u0026gt; 1 Between two instant vectors For example:\nservice_cpm{service=\u0026#39;service_A\u0026#39;, layer=\u0026#39;$layer\u0026#39;} \u0026gt; service_cpm{service=\u0026#39;service_B\u0026#39;, layer=\u0026#39;$layer\u0026#39;} HTTP API Expression queries Instant queries GET|POST /api/v1/query    Parameter Definition Support Optional     query prometheus expression yes no   time The latest metrics value from current time to this time is returned. If time is empty, the default look-back time is 2 minutes. yes yes   timeout evaluation timeout no ignore    For example:\n/api/v1/query?query=service_cpm{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;} Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677548400, \u0026#34;6\u0026#34; ] } ] } } Range queries GET|POST /api/v1/query_range    Parameter Definition Support Optional     query prometheus expression yes no   start start timestamp, seconds yes no   end end timestamp, seconds yes no   step SkyWalking will automatically fit Step(DAY, HOUR, MINUTE) through start and end. no ignore   timeout evaluation timeout no ignore    For example:\n/api/v1/query_range?query=service_cpm{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;}\u0026amp;start=1677479336\u0026amp;end=1677479636 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;matrix\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;values\u0026#34;: [ [ 1677479280, \u0026#34;18\u0026#34; ], [ 1677479340, \u0026#34;18\u0026#34; ], [ 1677479400, \u0026#34;18\u0026#34; ], [ 1677479460, \u0026#34;18\u0026#34; ], [ 1677479520, \u0026#34;18\u0026#34; ], [ 1677479580, \u0026#34;18\u0026#34; ] ] } ] } } Querying metadata Finding series by label matchers GET|POST /api/v1/series    Parameter Definition Support Optional     match[] series selector yes no   start start timestamp, seconds yes no   end end timestamp, seconds yes no    For example:\n/api/v1/series?match[]=service_traffic{layer=\u0026#39;GENERAL\u0026#39;}\u0026amp;start=1677479336\u0026amp;end=1677479636 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::recommendation\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::app\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::gateway\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; }, { \u0026#34;__name__\u0026#34;: \u0026#34;service_traffic\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::frontend\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34; } ] } Note: SkyWalking\u0026rsquo;s metadata exists in the following metrics(traffics):\n service_traffic instance_traffic endpoint_traffic  Getting label names GET|POST /api/v1/labels    Parameter Definition Support Optional     match[] series selector yes yes   start start timestamp no yes   end end timestamp no yes    For example:\n/api/v1/labels?match[]=instance_jvm_cpu\u0026#39; Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;layer\u0026#34;, \u0026#34;service\u0026#34;, \u0026#34;top_n\u0026#34;, \u0026#34;order\u0026#34;, \u0026#34;service_instance\u0026#34;, \u0026#34;parent_service\u0026#34; ] } Querying label values GET /api/v1/label/\u0026lt;label_name\u0026gt;/values    Parameter Definition Support Optional     match[] series selector yes no   start start timestamp no yes   end end timestamp no yes    For example:\n/api/v1/label/__name__/values Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: [ \u0026#34;meter_mysql_instance_qps\u0026#34;, \u0026#34;service_cpm\u0026#34;, \u0026#34;envoy_cluster_up_rq_active\u0026#34;, \u0026#34;instance_jvm_class_loaded_class_count\u0026#34;, \u0026#34;k8s_cluster_memory_requests\u0026#34;, \u0026#34;meter_vm_memory_used\u0026#34;, \u0026#34;meter_apisix_sv_bandwidth_unmatched\u0026#34;, \u0026#34;meter_vm_memory_total\u0026#34;, \u0026#34;instance_jvm_thread_live_count\u0026#34;, \u0026#34;instance_jvm_thread_timed_waiting_state_thread_count\u0026#34;, \u0026#34;browser_app_page_first_pack_percentile\u0026#34;, \u0026#34;instance_clr_max_worker_threads\u0026#34;, ... ] } Querying metric metadata GET /api/v1/metadata    Parameter Definition Support Optional     limit maximum number of metrics to return yes yes   metric metric name, support regular expression yes yes    For example:\n/api/v1/metadata?limit=10 Result:\n{ \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;meter_mysql_instance_qps\u0026#34;: [ { \u0026#34;type\u0026#34;: \u0026#34;gauge\u0026#34;, \u0026#34;help\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;unit\u0026#34;: \u0026#34;\u0026#34; } ], \u0026#34;meter_apisix_sv_bandwidth_unmatched\u0026#34;: [ { \u0026#34;type\u0026#34;: \u0026#34;gauge\u0026#34;, \u0026#34;help\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;unit\u0026#34;: \u0026#34;\u0026#34; } ], \u0026#34;service_cpm\u0026#34;: [ { \u0026#34;type\u0026#34;: \u0026#34;gauge\u0026#34;, \u0026#34;help\u0026#34;: \u0026#34;\u0026#34;, \u0026#34;unit\u0026#34;: \u0026#34;\u0026#34; } ], ... } } Metrics Type For Query Supported Metrics Scope(Catalog) Not all scopes are supported for now, please check the following table:\n   Scope Support     Service yes   ServiceInstance yes   Endpoint yes   ServiceRelation no   ServiceInstanceRelation no   Process no   ProcessRelation no    General labels Each metric contains general labels: layer. Different metrics will have different labels depending on their Scope and metric value type.\n   Query Labels Scope Expression Example     layer, service Service service_cpm{service='$service', layer='$layer'}   layer, service, service_instance ServiceInstance service_instance_cpm{service='$service', service_instance='$service_instance', layer='$layer'}   layer, service, endpoint Endpoint endpoint_cpm{service='$service', endpoint='$endpoint', layer='$layer'}    Common Value Metrics  Query Labels:  {General labels}  Expression Example:  service_cpm{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677490740, \u0026#34;3\u0026#34; ] } ] } } Labeled Value Metrics  Query Labels:  --{General labels} --labels: Used to filter the value labels to be returned --relabels: Used to rename the returned value labels note: The number and order of labels must match the number and order of relabels.  Expression Example:  service_percentile{service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;, labels=\u0026#39;0,1,2\u0026#39;, relabels=\u0026#39;P50,P75,P90\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_percentile\u0026#34;, \u0026#34;label\u0026#34;: \u0026#34;P50\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677493380, \u0026#34;0\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_percentile\u0026#34;, \u0026#34;label\u0026#34;: \u0026#34;P75\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677493380, \u0026#34;0\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_percentile\u0026#34;, \u0026#34;label\u0026#34;: \u0026#34;P90\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;service\u0026#34;: \u0026#34;agent::songs\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677493380, \u0026#34;0\u0026#34; ] } ] } } Sort Metrics  Query Labels:  --parent_service: \u0026lt;optional\u0026gt; Name of the parent service. --top_n: The max number of the selected metric value --order: ASC/DES  Expression Example:  service_instance_cpm{parent_service=\u0026#39;agent::songs\u0026#39;, layer=\u0026#39;GENERAL\u0026#39;, top_n=\u0026#39;10\u0026#39;, order=\u0026#39;DES\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_instance_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;ServiceInstance\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;651db53c0e3843d8b9c4c53a90b4992a@10.4.0.28\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677494280, \u0026#34;14\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_instance_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;ServiceInstance\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;4c04cf44d6bd408880556aa3c2cfb620@10.4.0.232\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677494280, \u0026#34;6\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;service_instance_cpm\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;GENERAL\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;ServiceInstance\u0026#34;, \u0026#34;service_instance\u0026#34;: \u0026#34;f5ac8ead31af4e6795cae761729a2742@10.4.0.236\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677494280, \u0026#34;5\u0026#34; ] } ] } } Sampled Records  Query Labels:  --parent_service: Name of the parent service --top_n: The max number of the selected records value --order: ASC/DES  Expression Example:  top_n_database_statement{parent_service=\u0026#39;localhost:-1\u0026#39;, layer=\u0026#39;VIRTUAL_DATABASE\u0026#39;, top_n=\u0026#39;10\u0026#39;, order=\u0026#39;DES\u0026#39;}  Result (Instant Query):  { \u0026#34;status\u0026#34;: \u0026#34;success\u0026#34;, \u0026#34;data\u0026#34;: { \u0026#34;resultType\u0026#34;: \u0026#34;vector\u0026#34;, \u0026#34;result\u0026#34;: [ { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;top_n_database_statement\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;VIRTUAL_DATABASE\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;record\u0026#34;: \u0026#34;select song0_.id as id1_0_, song0_.artist as artist2_0_, song0_.genre as genre3_0_, song0_.liked as liked4_0_, song0_.name as name5_0_ from song song0_ where song0_.liked\u0026gt;?\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677501360, \u0026#34;1\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;top_n_database_statement\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;VIRTUAL_DATABASE\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;record\u0026#34;: \u0026#34;select song0_.id as id1_0_, song0_.artist as artist2_0_, song0_.genre as genre3_0_, song0_.liked as liked4_0_, song0_.name as name5_0_ from song song0_ where song0_.liked\u0026gt;?\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677501360, \u0026#34;1\u0026#34; ] }, { \u0026#34;metric\u0026#34;: { \u0026#34;__name__\u0026#34;: \u0026#34;top_n_database_statement\u0026#34;, \u0026#34;layer\u0026#34;: \u0026#34;VIRTUAL_DATABASE\u0026#34;, \u0026#34;scope\u0026#34;: \u0026#34;Service\u0026#34;, \u0026#34;record\u0026#34;: \u0026#34;select song0_.id as id1_0_, song0_.artist as artist2_0_, song0_.genre as genre3_0_, song0_.liked as liked4_0_, song0_.name as name5_0_ from song song0_ where song0_.liked\u0026gt;?\u0026#34; }, \u0026#34;value\u0026#34;: [ 1677501360, \u0026#34;1\u0026#34; ] } ] } } ","title":"PromQL Service","url":"/docs/main/v9.7.0/en/api/promql-service/"},{"content":"Protocol Documentation \nTable of Contents   banyandb/cluster/v1/rpc.proto\n  SendRequest\n  SendResponse\n  Service\n    banyandb/common/v1/common.proto\n  Group\n  IntervalRule\n  Metadata\n  ResourceOpts\n  Catalog\n  IntervalRule.Unit\n    banyandb/database/v1/database.proto\n  Node\n  Shard\n  Role\n    banyandb/model/v1/common.proto\n  FieldValue\n  Float\n  Int\n  IntArray\n  Str\n  StrArray\n  TagFamilyForWrite\n  TagValue\n  AggregationFunction\n    banyandb/model/v1/query.proto\n  Condition\n  Criteria\n  LogicalExpression\n  QueryOrder\n  Tag\n  TagFamily\n  TagProjection\n  TagProjection.TagFamily\n  TimeRange\n  Condition.BinaryOp\n  LogicalExpression.LogicalOp\n  Sort\n    banyandb/database/v1/schema.proto\n  Entity\n  FieldSpec\n  IndexRule\n  IndexRuleBinding\n  Measure\n  Stream\n  Subject\n  TagFamilySpec\n  TagSpec\n  TopNAggregation\n  CompressionMethod\n  EncodingMethod\n  FieldType\n  IndexRule.Analyzer\n  IndexRule.Location\n  IndexRule.Type\n  TagType\n    banyandb/database/v1/rpc.proto\n  GroupRegistryServiceCreateRequest\n  GroupRegistryServiceCreateResponse\n  GroupRegistryServiceDeleteRequest\n  GroupRegistryServiceDeleteResponse\n  GroupRegistryServiceExistRequest\n  GroupRegistryServiceExistResponse\n  GroupRegistryServiceGetRequest\n  GroupRegistryServiceGetResponse\n  GroupRegistryServiceListRequest\n  GroupRegistryServiceListResponse\n  GroupRegistryServiceUpdateRequest\n  GroupRegistryServiceUpdateResponse\n  IndexRuleBindingRegistryServiceCreateRequest\n  IndexRuleBindingRegistryServiceCreateResponse\n  IndexRuleBindingRegistryServiceDeleteRequest\n  IndexRuleBindingRegistryServiceDeleteResponse\n  IndexRuleBindingRegistryServiceExistRequest\n  IndexRuleBindingRegistryServiceExistResponse\n  IndexRuleBindingRegistryServiceGetRequest\n  IndexRuleBindingRegistryServiceGetResponse\n  IndexRuleBindingRegistryServiceListRequest\n  IndexRuleBindingRegistryServiceListResponse\n  IndexRuleBindingRegistryServiceUpdateRequest\n  IndexRuleBindingRegistryServiceUpdateResponse\n  IndexRuleRegistryServiceCreateRequest\n  IndexRuleRegistryServiceCreateResponse\n  IndexRuleRegistryServiceDeleteRequest\n  IndexRuleRegistryServiceDeleteResponse\n  IndexRuleRegistryServiceExistRequest\n  IndexRuleRegistryServiceExistResponse\n  IndexRuleRegistryServiceGetRequest\n  IndexRuleRegistryServiceGetResponse\n  IndexRuleRegistryServiceListRequest\n  IndexRuleRegistryServiceListResponse\n  IndexRuleRegistryServiceUpdateRequest\n  IndexRuleRegistryServiceUpdateResponse\n  MeasureRegistryServiceCreateRequest\n  MeasureRegistryServiceCreateResponse\n  MeasureRegistryServiceDeleteRequest\n  MeasureRegistryServiceDeleteResponse\n  MeasureRegistryServiceExistRequest\n  MeasureRegistryServiceExistResponse\n  MeasureRegistryServiceGetRequest\n  MeasureRegistryServiceGetResponse\n  MeasureRegistryServiceListRequest\n  MeasureRegistryServiceListResponse\n  MeasureRegistryServiceUpdateRequest\n  MeasureRegistryServiceUpdateResponse\n  StreamRegistryServiceCreateRequest\n  StreamRegistryServiceCreateResponse\n  StreamRegistryServiceDeleteRequest\n  StreamRegistryServiceDeleteResponse\n  StreamRegistryServiceExistRequest\n  StreamRegistryServiceExistResponse\n  StreamRegistryServiceGetRequest\n  StreamRegistryServiceGetResponse\n  StreamRegistryServiceListRequest\n  StreamRegistryServiceListResponse\n  StreamRegistryServiceUpdateRequest\n  StreamRegistryServiceUpdateResponse\n  TopNAggregationRegistryServiceCreateRequest\n  TopNAggregationRegistryServiceCreateResponse\n  TopNAggregationRegistryServiceDeleteRequest\n  TopNAggregationRegistryServiceDeleteResponse\n  TopNAggregationRegistryServiceExistRequest\n  TopNAggregationRegistryServiceExistResponse\n  TopNAggregationRegistryServiceGetRequest\n  TopNAggregationRegistryServiceGetResponse\n  TopNAggregationRegistryServiceListRequest\n  TopNAggregationRegistryServiceListResponse\n  TopNAggregationRegistryServiceUpdateRequest\n  TopNAggregationRegistryServiceUpdateResponse\n  GroupRegistryService\n  IndexRuleBindingRegistryService\n  IndexRuleRegistryService\n  MeasureRegistryService\n  StreamRegistryService\n  TopNAggregationRegistryService\n    banyandb/measure/v1/query.proto\n DataPoint DataPoint.Field QueryRequest QueryRequest.Aggregation QueryRequest.FieldProjection QueryRequest.GroupBy QueryRequest.Top QueryResponse    banyandb/measure/v1/topn.proto\n TopNList TopNList.Item TopNRequest TopNResponse    banyandb/model/v1/write.proto\n Status    banyandb/measure/v1/write.proto\n DataPointValue InternalWriteRequest WriteRequest WriteResponse    banyandb/measure/v1/rpc.proto\n MeasureService    banyandb/property/v1/property.proto\n Metadata Property    banyandb/property/v1/rpc.proto\n  ApplyRequest\n  ApplyResponse\n  DeleteRequest\n  DeleteResponse\n  GetRequest\n  GetResponse\n  KeepAliveRequest\n  KeepAliveResponse\n  ListRequest\n  ListResponse\n  ApplyRequest.Strategy\n  PropertyService\n    banyandb/stream/v1/query.proto\n Element QueryRequest QueryResponse    banyandb/stream/v1/write.proto\n ElementValue InternalWriteRequest WriteRequest WriteResponse    banyandb/stream/v1/rpc.proto\n StreamService    Scalar Value Types\n  \nTop\nbanyandb/cluster/v1/rpc.proto \nSendRequest    Field Type Label Description     topic string     message_id uint64     body google.protobuf.Any      \nSendResponse    Field Type Label Description     message_id uint64     error string     body google.protobuf.Any      \nService    Method Name Request Type Response Type Description     Send SendRequest stream SendResponse stream     \nTop\nbanyandb/common/v1/common.proto \nGroup Group is an internal object for Group management\n   Field Type Label Description     metadata Metadata  metadata define the group's identity   catalog Catalog  catalog denotes which type of data the group contains   resource_opts ResourceOpts  resourceOpts indicates the structure of the underlying kv storage   updated_at google.protobuf.Timestamp  updated_at indicates when resources of the group are updated    \nIntervalRule IntervalRule is a structured duration\n   Field Type Label Description     unit IntervalRule.Unit  unit can only be UNIT_HOUR or UNIT_DAY   num uint32      \nMetadata Metadata is for multi-tenant, multi-model use\n   Field Type Label Description     group string  group contains a set of options, like retention policy, max   name string  name of the entity   id uint32     create_revision int64  readonly. create_revision is the revision of last creation on this key.   mod_revision int64  readonly. mod_revision is the revision of last modification on this key.    \nResourceOpts    Field Type Label Description     shard_num uint32  shard_num is the number of shards   block_interval IntervalRule  block_interval indicates the length of a block block_interval should be less than or equal to segment_interval   segment_interval IntervalRule  segment_interval indicates the length of a segment   ttl IntervalRule  ttl indicates time to live, how long the data will be cached    \nCatalog    Name Number Description     CATALOG_UNSPECIFIED 0    CATALOG_STREAM 1    CATALOG_MEASURE 2     \nIntervalRule.Unit    Name Number Description     UNIT_UNSPECIFIED 0    UNIT_HOUR 1    UNIT_DAY 2     \nTop\nbanyandb/database/v1/database.proto \nNode    Field Type Label Description     metadata banyandb.common.v1.Metadata     roles Role repeated    grpc_address string     http_address string     created_at google.protobuf.Timestamp      \nShard    Field Type Label Description     id uint64     metadata banyandb.common.v1.Metadata     catalog banyandb.common.v1.Catalog     node string     total uint32     updated_at google.protobuf.Timestamp     created_at google.protobuf.Timestamp      \nRole    Name Number Description     ROLE_UNSPECIFIED 0    ROLE_META 1    ROLE_DATA 2    ROLE_LIAISON 3     \nTop\nbanyandb/model/v1/common.proto \nFieldValue    Field Type Label Description     null google.protobuf.NullValue     str Str     int Int     binary_data bytes     float Float      \nFloat    Field Type Label Description     value double      \nInt    Field Type Label Description     value int64      \nIntArray    Field Type Label Description     value int64 repeated     \nStr    Field Type Label Description     value string      \nStrArray    Field Type Label Description     value string repeated     \nTagFamilyForWrite    Field Type Label Description     tags TagValue repeated     \nTagValue    Field Type Label Description     null google.protobuf.NullValue     str Str     str_array StrArray     int Int     int_array IntArray     binary_data bytes      \nAggregationFunction    Name Number Description     AGGREGATION_FUNCTION_UNSPECIFIED 0    AGGREGATION_FUNCTION_MEAN 1    AGGREGATION_FUNCTION_MAX 2    AGGREGATION_FUNCTION_MIN 3    AGGREGATION_FUNCTION_COUNT 4    AGGREGATION_FUNCTION_SUM 5     \nTop\nbanyandb/model/v1/query.proto \nCondition Condition consists of the query condition with a single binary operator to be imposed For 1:1 BinaryOp, values in condition must be an array with length = 1, while for 1:N BinaryOp, values can be an array with length \u0026gt;= 1.\n   Field Type Label Description     name string     op Condition.BinaryOp     value TagValue      \nCriteria tag_families are indexed.\n   Field Type Label Description     le LogicalExpression     condition Condition      \nLogicalExpression LogicalExpression supports logical operation\n   Field Type Label Description     op LogicalExpression.LogicalOp  op is a logical operation   left Criteria     right Criteria      \nQueryOrder QueryOrder means a Sort operation to be done for a given index rule. The index_rule_name refers to the name of a index rule bound to the subject.\n   Field Type Label Description     index_rule_name string     sort Sort      \nTag Pair is the building block of a record which is equivalent to a key-value pair. In the context of Trace, it could be metadata of a trace such as service_name, service_instance, etc. Besides, other tags are organized in key-value pair in the underlying storage layer. One should notice that the values can be a multi-value.\n   Field Type Label Description     key string     value TagValue      \nTagFamily    Field Type Label Description     name string     tags Tag repeated     \nTagProjection TagProjection is used to select the names of keys to be returned.\n   Field Type Label Description     tag_families TagProjection.TagFamily repeated     \nTagProjection.TagFamily    Field Type Label Description     name string     tags string repeated     \nTimeRange TimeRange is a range query for uint64, the range here follows left-inclusive and right-exclusive rule, i.e. [begin, end) if both edges exist\n   Field Type Label Description     begin google.protobuf.Timestamp     end google.protobuf.Timestamp      \nCondition.BinaryOp BinaryOp specifies the operation imposed to the given query condition For EQ, NE, LT, GT, LE and GE, only one operand should be given, i.e. one-to-one relationship. HAVING and NOT_HAVING allow multi-value to be the operand such as array/vector, i.e. one-to-many relationship. For example, \u0026quot;keyA\u0026quot; contains \u0026quot;valueA\u0026quot; and \u0026quot;valueB\u0026quot; MATCH performances a full-text search if the tag is analyzed. The string value applies to the same analyzer as the tag, but string array value does not. Each item in a string array is seen as a token instead of a query expression.\n   Name Number Description     BINARY_OP_UNSPECIFIED 0    BINARY_OP_EQ 1    BINARY_OP_NE 2    BINARY_OP_LT 3    BINARY_OP_GT 4    BINARY_OP_LE 5    BINARY_OP_GE 6    BINARY_OP_HAVING 7    BINARY_OP_NOT_HAVING 8    BINARY_OP_IN 9    BINARY_OP_NOT_IN 10    BINARY_OP_MATCH 11     \nLogicalExpression.LogicalOp    Name Number Description     LOGICAL_OP_UNSPECIFIED 0    LOGICAL_OP_AND 1    LOGICAL_OP_OR 2     \nSort    Name Number Description     SORT_UNSPECIFIED 0    SORT_DESC 1    SORT_ASC 2     \nTop\nbanyandb/database/v1/schema.proto \nEntity    Field Type Label Description     tag_names string repeated     \nFieldSpec FieldSpec is the specification of field\n   Field Type Label Description     name string  name is the identity of a field   field_type FieldType  field_type denotes the type of field value   encoding_method EncodingMethod  encoding_method indicates how to encode data during writing   compression_method CompressionMethod  compression_method indicates how to compress data during writing    \nIndexRule IndexRule defines how to generate indices based on tags and the index type IndexRule should bind to a subject through an IndexRuleBinding to generate proper indices.\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  metadata define the rule's identity   tags string repeated tags are the combination that refers to an indexed object If the elements in tags are more than 1, the object will generate a multi-tag index Caveat: All tags in a multi-tag MUST have an identical IndexType   type IndexRule.Type  type is the IndexType of this IndexObject.   location IndexRule.Location  location indicates where to store index.   updated_at google.protobuf.Timestamp  updated_at indicates when the IndexRule is updated   analyzer IndexRule.Analyzer  analyzer analyzes tag value to support the full-text searching for TYPE_INVERTED indices.    \nIndexRuleBinding IndexRuleBinding is a bridge to connect severalIndexRules to a subject This binding is valid between begin_at_nanoseconds and expire_at_nanoseconds, that provides flexible strategies to control how to generate time series indices.\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  metadata is the identity of this binding   rules string repeated rules refers to the IndexRule   subject Subject  subject indicates the subject of binding action   begin_at google.protobuf.Timestamp  begin_at_nanoseconds is the timestamp, after which the binding will be active   expire_at google.protobuf.Timestamp  expire_at_nanoseconds it the timestamp, after which the binding will be inactive expire_at_nanoseconds must be larger than begin_at_nanoseconds   updated_at google.protobuf.Timestamp  updated_at indicates when the IndexRuleBinding is updated    \nMeasure Measure intends to store data point\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  metadata is the identity of a measure   tag_families TagFamilySpec repeated tag_families are for filter measures   fields FieldSpec repeated fields denote measure values   entity Entity  entity indicates which tags will be to generate a series and shard a measure   interval string  interval indicates how frequently to send a data point valid time units are \u0026quot;ns\u0026quot;, \u0026quot;us\u0026quot; (or \u0026quot;µs\u0026quot;), \u0026quot;ms\u0026quot;, \u0026quot;s\u0026quot;, \u0026quot;m\u0026quot;, \u0026quot;h\u0026quot;, \u0026quot;d\u0026quot;.   updated_at google.protobuf.Timestamp  updated_at indicates when the measure is updated    \nStream Stream intends to store streaming data, for example, traces or logs\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  metadata is the identity of a trace series   tag_families TagFamilySpec repeated tag_families   entity Entity  entity indicates how to generate a series and shard a stream   updated_at google.protobuf.Timestamp  updated_at indicates when the stream is updated    \nSubject Subject defines which stream or measure would generate indices\n   Field Type Label Description     catalog banyandb.common.v1.Catalog  catalog is where the subject belongs to todo validate plugin exist bug https://github.com/bufbuild/protoc-gen-validate/issues/672   name string  name refers to a stream or measure in a particular catalog    \nTagFamilySpec    Field Type Label Description     name string     tags TagSpec repeated tags defines accepted tags    \nTagSpec    Field Type Label Description     name string     type TagType     indexed_only bool  indexed_only indicates whether the tag is stored True: It's indexed only, but not stored False: it's stored and indexed    \nTopNAggregation TopNAggregation generates offline TopN statistics for a measure's TopN approximation\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  metadata is the identity of an aggregation   source_measure banyandb.common.v1.Metadata  source_measure denotes the data source of this aggregation   field_name string  field_name is the name of field used for ranking   field_value_sort banyandb.model.v1.Sort  field_value_sort indicates how to sort fields ASC: bottomN DESC: topN UNSPECIFIED: topN + bottomN todo validate plugin exist bug https://github.com/bufbuild/protoc-gen-validate/issues/672   group_by_tag_names string repeated group_by_tag_names groups data points into statistical counters   criteria banyandb.model.v1.Criteria  criteria select partial data points from measure   counters_number int32  counters_number sets the number of counters to be tracked. The default value is 1000   lru_size int32  lru_size defines how much entry is allowed to be maintained in the memory   updated_at google.protobuf.Timestamp  updated_at indicates when the measure is updated    \nCompressionMethod    Name Number Description     COMPRESSION_METHOD_UNSPECIFIED 0    COMPRESSION_METHOD_ZSTD 1     \nEncodingMethod    Name Number Description     ENCODING_METHOD_UNSPECIFIED 0    ENCODING_METHOD_GORILLA 1     \nFieldType    Name Number Description     FIELD_TYPE_UNSPECIFIED 0    FIELD_TYPE_STRING 1    FIELD_TYPE_INT 2    FIELD_TYPE_DATA_BINARY 3    FIELD_TYPE_FLOAT 4     \nIndexRule.Analyzer    Name Number Description     ANALYZER_UNSPECIFIED 0    ANALYZER_KEYWORD 1 Keyword analyzer is a “noop” analyzer which returns the entire input string as a single token.   ANALYZER_STANDARD 2 Standard analyzer provides grammar based tokenization   ANALYZER_SIMPLE 3 Simple analyzer breaks text into tokens at any non-letter character, such as numbers, spaces, hyphens and apostrophes, discards non-letter characters, and changes uppercase to lowercase.    \nIndexRule.Location    Name Number Description     LOCATION_UNSPECIFIED 0    LOCATION_SERIES 1    LOCATION_GLOBAL 2     \nIndexRule.Type Type determine the index structure under the hood\n   Name Number Description     TYPE_UNSPECIFIED 0    TYPE_TREE 1    TYPE_INVERTED 2     \nTagType    Name Number Description     TAG_TYPE_UNSPECIFIED 0    TAG_TYPE_STRING 1    TAG_TYPE_INT 2    TAG_TYPE_STRING_ARRAY 3    TAG_TYPE_INT_ARRAY 4    TAG_TYPE_DATA_BINARY 5     \nTop\nbanyandb/database/v1/rpc.proto \nGroupRegistryServiceCreateRequest    Field Type Label Description     group banyandb.common.v1.Group      \nGroupRegistryServiceCreateResponse \nGroupRegistryServiceDeleteRequest    Field Type Label Description     group string      \nGroupRegistryServiceDeleteResponse    Field Type Label Description     deleted bool      \nGroupRegistryServiceExistRequest    Field Type Label Description     group string      \nGroupRegistryServiceExistResponse    Field Type Label Description     has_group bool      \nGroupRegistryServiceGetRequest    Field Type Label Description     group string      \nGroupRegistryServiceGetResponse    Field Type Label Description     group banyandb.common.v1.Group      \nGroupRegistryServiceListRequest \nGroupRegistryServiceListResponse    Field Type Label Description     group banyandb.common.v1.Group repeated     \nGroupRegistryServiceUpdateRequest    Field Type Label Description     group banyandb.common.v1.Group      \nGroupRegistryServiceUpdateResponse \nIndexRuleBindingRegistryServiceCreateRequest    Field Type Label Description     index_rule_binding IndexRuleBinding      \nIndexRuleBindingRegistryServiceCreateResponse \nIndexRuleBindingRegistryServiceDeleteRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nIndexRuleBindingRegistryServiceDeleteResponse    Field Type Label Description     deleted bool      \nIndexRuleBindingRegistryServiceExistRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nIndexRuleBindingRegistryServiceExistResponse    Field Type Label Description     has_group bool     has_index_rule_binding bool      \nIndexRuleBindingRegistryServiceGetRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nIndexRuleBindingRegistryServiceGetResponse    Field Type Label Description     index_rule_binding IndexRuleBinding      \nIndexRuleBindingRegistryServiceListRequest    Field Type Label Description     group string      \nIndexRuleBindingRegistryServiceListResponse    Field Type Label Description     index_rule_binding IndexRuleBinding repeated     \nIndexRuleBindingRegistryServiceUpdateRequest    Field Type Label Description     index_rule_binding IndexRuleBinding      \nIndexRuleBindingRegistryServiceUpdateResponse \nIndexRuleRegistryServiceCreateRequest    Field Type Label Description     index_rule IndexRule      \nIndexRuleRegistryServiceCreateResponse \nIndexRuleRegistryServiceDeleteRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nIndexRuleRegistryServiceDeleteResponse    Field Type Label Description     deleted bool      \nIndexRuleRegistryServiceExistRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nIndexRuleRegistryServiceExistResponse    Field Type Label Description     has_group bool     has_index_rule bool      \nIndexRuleRegistryServiceGetRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nIndexRuleRegistryServiceGetResponse    Field Type Label Description     index_rule IndexRule      \nIndexRuleRegistryServiceListRequest    Field Type Label Description     group string      \nIndexRuleRegistryServiceListResponse    Field Type Label Description     index_rule IndexRule repeated     \nIndexRuleRegistryServiceUpdateRequest    Field Type Label Description     index_rule IndexRule      \nIndexRuleRegistryServiceUpdateResponse \nMeasureRegistryServiceCreateRequest    Field Type Label Description     measure Measure      \nMeasureRegistryServiceCreateResponse    Field Type Label Description     mod_revision int64      \nMeasureRegistryServiceDeleteRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nMeasureRegistryServiceDeleteResponse    Field Type Label Description     deleted bool      \nMeasureRegistryServiceExistRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nMeasureRegistryServiceExistResponse    Field Type Label Description     has_group bool     has_measure bool      \nMeasureRegistryServiceGetRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nMeasureRegistryServiceGetResponse    Field Type Label Description     measure Measure      \nMeasureRegistryServiceListRequest    Field Type Label Description     group string      \nMeasureRegistryServiceListResponse    Field Type Label Description     measure Measure repeated     \nMeasureRegistryServiceUpdateRequest    Field Type Label Description     measure Measure      \nMeasureRegistryServiceUpdateResponse    Field Type Label Description     mod_revision int64      \nStreamRegistryServiceCreateRequest    Field Type Label Description     stream Stream      \nStreamRegistryServiceCreateResponse    Field Type Label Description     mod_revision int64      \nStreamRegistryServiceDeleteRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nStreamRegistryServiceDeleteResponse    Field Type Label Description     deleted bool      \nStreamRegistryServiceExistRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nStreamRegistryServiceExistResponse    Field Type Label Description     has_group bool     has_stream bool      \nStreamRegistryServiceGetRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nStreamRegistryServiceGetResponse    Field Type Label Description     stream Stream      \nStreamRegistryServiceListRequest    Field Type Label Description     group string      \nStreamRegistryServiceListResponse    Field Type Label Description     stream Stream repeated     \nStreamRegistryServiceUpdateRequest    Field Type Label Description     stream Stream      \nStreamRegistryServiceUpdateResponse    Field Type Label Description     mod_revision int64      \nTopNAggregationRegistryServiceCreateRequest    Field Type Label Description     top_n_aggregation TopNAggregation      \nTopNAggregationRegistryServiceCreateResponse \nTopNAggregationRegistryServiceDeleteRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nTopNAggregationRegistryServiceDeleteResponse    Field Type Label Description     deleted bool      \nTopNAggregationRegistryServiceExistRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nTopNAggregationRegistryServiceExistResponse    Field Type Label Description     has_group bool     has_top_n_aggregation bool      \nTopNAggregationRegistryServiceGetRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nTopNAggregationRegistryServiceGetResponse    Field Type Label Description     top_n_aggregation TopNAggregation      \nTopNAggregationRegistryServiceListRequest    Field Type Label Description     group string      \nTopNAggregationRegistryServiceListResponse    Field Type Label Description     top_n_aggregation TopNAggregation repeated     \nTopNAggregationRegistryServiceUpdateRequest    Field Type Label Description     top_n_aggregation TopNAggregation      \nTopNAggregationRegistryServiceUpdateResponse \nGroupRegistryService    Method Name Request Type Response Type Description     Create GroupRegistryServiceCreateRequest GroupRegistryServiceCreateResponse    Update GroupRegistryServiceUpdateRequest GroupRegistryServiceUpdateResponse    Delete GroupRegistryServiceDeleteRequest GroupRegistryServiceDeleteResponse    Get GroupRegistryServiceGetRequest GroupRegistryServiceGetResponse    List GroupRegistryServiceListRequest GroupRegistryServiceListResponse    Exist GroupRegistryServiceExistRequest GroupRegistryServiceExistResponse Exist doesn't expose an HTTP endpoint. Please use HEAD method to touch Get instead    \nIndexRuleBindingRegistryService    Method Name Request Type Response Type Description     Create IndexRuleBindingRegistryServiceCreateRequest IndexRuleBindingRegistryServiceCreateResponse    Update IndexRuleBindingRegistryServiceUpdateRequest IndexRuleBindingRegistryServiceUpdateResponse    Delete IndexRuleBindingRegistryServiceDeleteRequest IndexRuleBindingRegistryServiceDeleteResponse    Get IndexRuleBindingRegistryServiceGetRequest IndexRuleBindingRegistryServiceGetResponse    List IndexRuleBindingRegistryServiceListRequest IndexRuleBindingRegistryServiceListResponse    Exist IndexRuleBindingRegistryServiceExistRequest IndexRuleBindingRegistryServiceExistResponse Exist doesn't expose an HTTP endpoint. Please use HEAD method to touch Get instead    \nIndexRuleRegistryService    Method Name Request Type Response Type Description     Create IndexRuleRegistryServiceCreateRequest IndexRuleRegistryServiceCreateResponse    Update IndexRuleRegistryServiceUpdateRequest IndexRuleRegistryServiceUpdateResponse    Delete IndexRuleRegistryServiceDeleteRequest IndexRuleRegistryServiceDeleteResponse    Get IndexRuleRegistryServiceGetRequest IndexRuleRegistryServiceGetResponse    List IndexRuleRegistryServiceListRequest IndexRuleRegistryServiceListResponse    Exist IndexRuleRegistryServiceExistRequest IndexRuleRegistryServiceExistResponse Exist doesn't expose an HTTP endpoint. Please use HEAD method to touch Get instead    \nMeasureRegistryService    Method Name Request Type Response Type Description     Create MeasureRegistryServiceCreateRequest MeasureRegistryServiceCreateResponse    Update MeasureRegistryServiceUpdateRequest MeasureRegistryServiceUpdateResponse    Delete MeasureRegistryServiceDeleteRequest MeasureRegistryServiceDeleteResponse    Get MeasureRegistryServiceGetRequest MeasureRegistryServiceGetResponse    List MeasureRegistryServiceListRequest MeasureRegistryServiceListResponse    Exist MeasureRegistryServiceExistRequest MeasureRegistryServiceExistResponse Exist doesn't expose an HTTP endpoint. Please use HEAD method to touch Get instead    \nStreamRegistryService    Method Name Request Type Response Type Description     Create StreamRegistryServiceCreateRequest StreamRegistryServiceCreateResponse    Update StreamRegistryServiceUpdateRequest StreamRegistryServiceUpdateResponse    Delete StreamRegistryServiceDeleteRequest StreamRegistryServiceDeleteResponse    Get StreamRegistryServiceGetRequest StreamRegistryServiceGetResponse    List StreamRegistryServiceListRequest StreamRegistryServiceListResponse    Exist StreamRegistryServiceExistRequest StreamRegistryServiceExistResponse Exist doesn't expose an HTTP endpoint. Please use HEAD method to touch Get instead    \nTopNAggregationRegistryService    Method Name Request Type Response Type Description     Create TopNAggregationRegistryServiceCreateRequest TopNAggregationRegistryServiceCreateResponse    Update TopNAggregationRegistryServiceUpdateRequest TopNAggregationRegistryServiceUpdateResponse    Delete TopNAggregationRegistryServiceDeleteRequest TopNAggregationRegistryServiceDeleteResponse    Get TopNAggregationRegistryServiceGetRequest TopNAggregationRegistryServiceGetResponse    List TopNAggregationRegistryServiceListRequest TopNAggregationRegistryServiceListResponse    Exist TopNAggregationRegistryServiceExistRequest TopNAggregationRegistryServiceExistResponse     \nTop\nbanyandb/measure/v1/query.proto \nDataPoint DataPoint is stored in Measures\n   Field Type Label Description     timestamp google.protobuf.Timestamp  timestamp is in the timeunit of milliseconds.   tag_families banyandb.model.v1.TagFamily repeated tag_families contains tags selected in the projection   fields DataPoint.Field repeated fields contains fields selected in the projection    \nDataPoint.Field    Field Type Label Description     name string     value banyandb.model.v1.FieldValue      \nQueryRequest QueryRequest is the request contract for query.\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  metadata is required   time_range banyandb.model.v1.TimeRange  time_range is a range query with begin/end time of entities in the timeunit of milliseconds.   criteria banyandb.model.v1.Criteria  tag_families are indexed.   tag_projection banyandb.model.v1.TagProjection  tag_projection can be used to select tags of the data points in the response   field_projection QueryRequest.FieldProjection  field_projection can be used to select fields of the data points in the response   group_by QueryRequest.GroupBy  group_by groups data points based on their field value for a specific tag and use field_name as the projection name   agg QueryRequest.Aggregation  agg aggregates data points based on a field   top QueryRequest.Top  top limits the result based on a particular field. If order_by is specified, top sorts the dataset based on order_by's output   offset uint32  offset is used to support pagination, together with the following limit. If top is specified, offset processes the dataset based on top's output   limit uint32  limit is used to impose a boundary on the number of records being returned. If top is specified, limit processes the dataset based on top's output   order_by banyandb.model.v1.QueryOrder  order_by is given to specify the sort for a tag.    \nQueryRequest.Aggregation    Field Type Label Description     function banyandb.model.v1.AggregationFunction     field_name string  field_name must be one of files indicated by the field_projection    \nQueryRequest.FieldProjection    Field Type Label Description     names string repeated     \nQueryRequest.GroupBy    Field Type Label Description     tag_projection banyandb.model.v1.TagProjection  tag_projection must be a subset of the tag_projection of QueryRequest   field_name string  field_name must be one of fields indicated by field_projection    \nQueryRequest.Top    Field Type Label Description     number int32  number set the how many items should be returned   field_name string  field_name must be one of files indicated by the field_projection   field_value_sort banyandb.model.v1.Sort  field_value_sort indicates how to sort fields ASC: bottomN DESC: topN UNSPECIFIED: topN    \nQueryResponse QueryResponse is the response for a query to the Query module.\n   Field Type Label Description     data_points DataPoint repeated data_points are the actual data returned    \nTop\nbanyandb/measure/v1/topn.proto \nTopNList TopNList contains a series of topN items\n   Field Type Label Description     timestamp google.protobuf.Timestamp  timestamp is in the timeunit of milliseconds.   items TopNList.Item repeated items contains top-n items in a list    \nTopNList.Item    Field Type Label Description     entity banyandb.model.v1.Tag repeated    value banyandb.model.v1.FieldValue      \nTopNRequest TopNRequest is the request contract for query.\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  metadata is required   time_range banyandb.model.v1.TimeRange  time_range is a range query with begin/end time of entities in the timeunit of milliseconds.   top_n int32  top_n set the how many items should be returned in each list.   agg banyandb.model.v1.AggregationFunction  agg aggregates lists grouped by field names in the time_range TODO validate enum defined_only   conditions banyandb.model.v1.Condition repeated criteria select counters. Only equals are acceptable.   field_value_sort banyandb.model.v1.Sort  field_value_sort indicates how to sort fields    \nTopNResponse TopNResponse is the response for a query to the Query module.\n   Field Type Label Description     lists TopNList repeated lists contain a series topN lists ranked by timestamp if agg_func in query request is specified, lists' size should be one.    \nTop\nbanyandb/model/v1/write.proto \nStatus Status is the response status for write\n   Name Number Description     STATUS_UNSPECIFIED 0    STATUS_SUCCEED 1    STATUS_INVALID_TIMESTAMP 2    STATUS_NOT_FOUND 3    STATUS_EXPIRED_SCHEMA 4    STATUS_INTERNAL_ERROR 5     \nTop\nbanyandb/measure/v1/write.proto \nDataPointValue DataPointValue is the data point for writing. It only contains values.\n   Field Type Label Description     timestamp google.protobuf.Timestamp  timestamp is in the timeunit of milliseconds.   tag_families banyandb.model.v1.TagFamilyForWrite repeated the order of tag_families' items match the measure schema   fields banyandb.model.v1.FieldValue repeated the order of fields match the measure schema    \nInternalWriteRequest    Field Type Label Description     shard_id uint32     series_hash bytes     entity_values banyandb.model.v1.TagValue repeated    request WriteRequest      \nWriteRequest WriteRequest is the request contract for write\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  the metadata is required.   data_point DataPointValue  the data_point is required.   message_id uint64  the message_id is required.    \nWriteResponse WriteResponse is the response contract for write\n   Field Type Label Description     message_id uint64  the message_id from request.   status banyandb.model.v1.Status  status indicates the request processing result   metadata banyandb.common.v1.Metadata  the metadata from request when request fails    \nTop\nbanyandb/measure/v1/rpc.proto \nMeasureService    Method Name Request Type Response Type Description     Query QueryRequest QueryResponse    Write WriteRequest stream WriteResponse stream    TopN TopNRequest TopNResponse     \nTop\nbanyandb/property/v1/property.proto \nMetadata Metadata is for multi-tenant use\n   Field Type Label Description     container banyandb.common.v1.Metadata  container is created when it receives the first property   id string  id identifies a property    \nProperty Property stores the user defined data\n   Field Type Label Description     metadata Metadata  metadata is the identity of a property   tags banyandb.model.v1.Tag repeated tag stores the content of a property   updated_at google.protobuf.Timestamp  updated_at indicates when the property is updated   lease_id int64  readonly. lease_id is the ID of the lease that attached to key.   ttl string  ttl indicates the time to live of the property. It's a string in the format of \u0026quot;1h\u0026quot;, \u0026quot;2m\u0026quot;, \u0026quot;3s\u0026quot;, \u0026quot;1500ms\u0026quot;. It defaults to 0s, which means the property never expires. The minimum allowed ttl is 1s.    \nTop\nbanyandb/property/v1/rpc.proto \nApplyRequest    Field Type Label Description     property Property     strategy ApplyRequest.Strategy  strategy indicates how to update a property. It defaults to STRATEGY_MERGE    \nApplyResponse    Field Type Label Description     created bool  created indicates whether the property existed. True: the property is absent. False: the property existed.   tags_num uint32     lease_id int64      \nDeleteRequest    Field Type Label Description     metadata Metadata     tags string repeated     \nDeleteResponse    Field Type Label Description     deleted bool     tags_num uint32      \nGetRequest    Field Type Label Description     metadata Metadata     tags string repeated     \nGetResponse    Field Type Label Description     property Property      \nKeepAliveRequest    Field Type Label Description     lease_id int64      \nKeepAliveResponse \nListRequest    Field Type Label Description     container banyandb.common.v1.Metadata     ids string repeated    tags string repeated     \nListResponse    Field Type Label Description     property Property repeated     \nApplyRequest.Strategy    Name Number Description     STRATEGY_UNSPECIFIED 0    STRATEGY_MERGE 1    STRATEGY_REPLACE 2     \nPropertyService    Method Name Request Type Response Type Description     Apply ApplyRequest ApplyResponse Apply creates a property if it's absent, or update a existed one based on a strategy.   Delete DeleteRequest DeleteResponse    Get GetRequest GetResponse    List ListRequest ListResponse    KeepAlive KeepAliveRequest KeepAliveResponse     \nTop\nbanyandb/stream/v1/query.proto \nElement Element represents (stream context) a Span defined in Google Dapper paper or equivalently a Segment in Skywalking. (Log context) a log\n   Field Type Label Description     element_id string  element_id could be span_id of a Span or segment_id of a Segment in the context of stream   timestamp google.protobuf.Timestamp  timestamp represents a millisecond 1) either the start time of a Span/Segment, 2) or the timestamp of a log   tag_families banyandb.model.v1.TagFamily repeated fields contains all indexed Field. Some typical names, - stream_id - duration - service_name - service_instance_id - end_time_milliseconds    \nQueryRequest QueryRequest is the request contract for query.\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  metadata is required   time_range banyandb.model.v1.TimeRange  time_range is a range query with begin/end time of entities in the timeunit of milliseconds. In the context of stream, it represents the range of the startTime for spans/segments, while in the context of Log, it means the range of the timestamp(s) for logs. it is always recommended to specify time range for performance reason   offset uint32  offset is used to support pagination, together with the following limit   limit uint32  limit is used to impose a boundary on the number of records being returned   order_by banyandb.model.v1.QueryOrder  order_by is given to specify the sort for a field. So far, only fields in the type of Integer are supported   criteria banyandb.model.v1.Criteria  tag_families are indexed.   projection banyandb.model.v1.TagProjection  projection can be used to select the key names of the element in the response    \nQueryResponse QueryResponse is the response for a query to the Query module.\n   Field Type Label Description     elements Element repeated elements are the actual data returned    \nTop\nbanyandb/stream/v1/write.proto \nElementValue    Field Type Label Description     element_id string  element_id could be span_id of a Span or segment_id of a Segment in the context of stream   timestamp google.protobuf.Timestamp  timestamp is in the timeunit of milliseconds. It represents 1) either the start time of a Span/Segment, 2) or the timestamp of a log   tag_families banyandb.model.v1.TagFamilyForWrite repeated the order of tag_families' items match the stream schema    \nInternalWriteRequest    Field Type Label Description     shard_id uint32     series_hash bytes     entity_values banyandb.model.v1.TagValue repeated    request WriteRequest      \nWriteRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata  the metadata is required.   element ElementValue  the element is required.   message_id uint64  the message_id is required.    \nWriteResponse    Field Type Label Description     message_id uint64  the message_id from request.   status banyandb.model.v1.Status  status indicates the request processing result   metadata banyandb.common.v1.Metadata  the metadata from request when request fails    \nTop\nbanyandb/stream/v1/rpc.proto \nStreamService    Method Name Request Type Response Type Description     Query QueryRequest QueryResponse    Write WriteRequest stream WriteResponse stream     Scalar Value Types    .proto Type Notes C++ Java Python Go C# PHP Ruby     double  double double float float64 double float Float   float  float float float float32 float float Float   int32 Uses variable-length encoding. Inefficient for encoding negative numbers – if your field is likely to have negative values, use sint32 instead. int32 int int int32 int integer Bignum or Fixnum (as required)   int64 Uses variable-length encoding. Inefficient for encoding negative numbers – if your field is likely to have negative values, use sint64 instead. int64 long int/long int64 long integer/string Bignum   uint32 Uses variable-length encoding. uint32 int int/long uint32 uint integer Bignum or Fixnum (as required)   uint64 Uses variable-length encoding. uint64 long int/long uint64 ulong integer/string Bignum or Fixnum (as required)   sint32 Uses variable-length encoding. Signed int value. These more efficiently encode negative numbers than regular int32s. int32 int int int32 int integer Bignum or Fixnum (as required)   sint64 Uses variable-length encoding. Signed int value. These more efficiently encode negative numbers than regular int64s. int64 long int/long int64 long integer/string Bignum   fixed32 Always four bytes. More efficient than uint32 if values are often greater than 2^28. uint32 int int uint32 uint integer Bignum or Fixnum (as required)   fixed64 Always eight bytes. More efficient than uint64 if values are often greater than 2^56. uint64 long int/long uint64 ulong integer/string Bignum   sfixed32 Always four bytes. int32 int int int32 int integer Bignum or Fixnum (as required)   sfixed64 Always eight bytes. int64 long int/long int64 long integer/string Bignum   bool  bool boolean boolean bool bool boolean TrueClass/FalseClass   string A string must always contain UTF-8 encoded or 7-bit ASCII text. string String str/unicode string string string String (UTF-8)   bytes May contain any arbitrary sequence of bytes. string ByteString str []byte ByteString string String (ASCII-8BIT)    ","title":"Protocol Documentation","url":"/docs/skywalking-banyandb/latest/api-reference/"},{"content":"Protocol Documentation \nTable of Contents   banyandb/cluster/v1/rpc.proto\n  SendRequest\n  SendResponse\n  Service\n    banyandb/common/v1/common.proto\n  Group\n  IntervalRule\n  Metadata\n  ResourceOpts\n  Catalog\n  IntervalRule.Unit\n    banyandb/database/v1/database.proto\n  Node\n  Shard\n  Role\n    banyandb/model/v1/common.proto\n  FieldValue\n  Float\n  Int\n  IntArray\n  Str\n  StrArray\n  TagFamilyForWrite\n  TagValue\n  AggregationFunction\n    banyandb/model/v1/query.proto\n  Condition\n  Criteria\n  LogicalExpression\n  QueryOrder\n  Tag\n  TagFamily\n  TagProjection\n  TagProjection.TagFamily\n  TimeRange\n  Condition.BinaryOp\n  LogicalExpression.LogicalOp\n  Sort\n    banyandb/database/v1/schema.proto\n  Entity\n  FieldSpec\n  IndexRule\n  IndexRuleBinding\n  Measure\n  Stream\n  Subject\n  TagFamilySpec\n  TagSpec\n  TopNAggregation\n  CompressionMethod\n  EncodingMethod\n  FieldType\n  IndexRule.Analyzer\n  IndexRule.Type\n  TagType\n    banyandb/database/v1/rpc.proto\n  GroupRegistryServiceCreateRequest\n  GroupRegistryServiceCreateResponse\n  GroupRegistryServiceDeleteRequest\n  GroupRegistryServiceDeleteResponse\n  GroupRegistryServiceExistRequest\n  GroupRegistryServiceExistResponse\n  GroupRegistryServiceGetRequest\n  GroupRegistryServiceGetResponse\n  GroupRegistryServiceListRequest\n  GroupRegistryServiceListResponse\n  GroupRegistryServiceUpdateRequest\n  GroupRegistryServiceUpdateResponse\n  IndexRuleBindingRegistryServiceCreateRequest\n  IndexRuleBindingRegistryServiceCreateResponse\n  IndexRuleBindingRegistryServiceDeleteRequest\n  IndexRuleBindingRegistryServiceDeleteResponse\n  IndexRuleBindingRegistryServiceExistRequest\n  IndexRuleBindingRegistryServiceExistResponse\n  IndexRuleBindingRegistryServiceGetRequest\n  IndexRuleBindingRegistryServiceGetResponse\n  IndexRuleBindingRegistryServiceListRequest\n  IndexRuleBindingRegistryServiceListResponse\n  IndexRuleBindingRegistryServiceUpdateRequest\n  IndexRuleBindingRegistryServiceUpdateResponse\n  IndexRuleRegistryServiceCreateRequest\n  IndexRuleRegistryServiceCreateResponse\n  IndexRuleRegistryServiceDeleteRequest\n  IndexRuleRegistryServiceDeleteResponse\n  IndexRuleRegistryServiceExistRequest\n  IndexRuleRegistryServiceExistResponse\n  IndexRuleRegistryServiceGetRequest\n  IndexRuleRegistryServiceGetResponse\n  IndexRuleRegistryServiceListRequest\n  IndexRuleRegistryServiceListResponse\n  IndexRuleRegistryServiceUpdateRequest\n  IndexRuleRegistryServiceUpdateResponse\n  MeasureRegistryServiceCreateRequest\n  MeasureRegistryServiceCreateResponse\n  MeasureRegistryServiceDeleteRequest\n  MeasureRegistryServiceDeleteResponse\n  MeasureRegistryServiceExistRequest\n  MeasureRegistryServiceExistResponse\n  MeasureRegistryServiceGetRequest\n  MeasureRegistryServiceGetResponse\n  MeasureRegistryServiceListRequest\n  MeasureRegistryServiceListResponse\n  MeasureRegistryServiceUpdateRequest\n  MeasureRegistryServiceUpdateResponse\n  StreamRegistryServiceCreateRequest\n  StreamRegistryServiceCreateResponse\n  StreamRegistryServiceDeleteRequest\n  StreamRegistryServiceDeleteResponse\n  StreamRegistryServiceExistRequest\n  StreamRegistryServiceExistResponse\n  StreamRegistryServiceGetRequest\n  StreamRegistryServiceGetResponse\n  StreamRegistryServiceListRequest\n  StreamRegistryServiceListResponse\n  StreamRegistryServiceUpdateRequest\n  StreamRegistryServiceUpdateResponse\n  TopNAggregationRegistryServiceCreateRequest\n  TopNAggregationRegistryServiceCreateResponse\n  TopNAggregationRegistryServiceDeleteRequest\n  TopNAggregationRegistryServiceDeleteResponse\n  TopNAggregationRegistryServiceExistRequest\n  TopNAggregationRegistryServiceExistResponse\n  TopNAggregationRegistryServiceGetRequest\n  TopNAggregationRegistryServiceGetResponse\n  TopNAggregationRegistryServiceListRequest\n  TopNAggregationRegistryServiceListResponse\n  TopNAggregationRegistryServiceUpdateRequest\n  TopNAggregationRegistryServiceUpdateResponse\n  GroupRegistryService\n  IndexRuleBindingRegistryService\n  IndexRuleRegistryService\n  MeasureRegistryService\n  StreamRegistryService\n  TopNAggregationRegistryService\n    banyandb/measure/v1/query.proto\n DataPoint DataPoint.Field QueryRequest QueryRequest.Aggregation QueryRequest.FieldProjection QueryRequest.GroupBy QueryRequest.Top QueryResponse    banyandb/measure/v1/topn.proto\n TopNList TopNList.Item TopNRequest TopNResponse    banyandb/model/v1/write.proto\n Status    banyandb/measure/v1/write.proto\n DataPointValue InternalWriteRequest WriteRequest WriteResponse    banyandb/measure/v1/rpc.proto\n MeasureService    banyandb/property/v1/property.proto\n Metadata Property    banyandb/property/v1/rpc.proto\n  ApplyRequest\n  ApplyResponse\n  DeleteRequest\n  DeleteResponse\n  GetRequest\n  GetResponse\n  KeepAliveRequest\n  KeepAliveResponse\n  ListRequest\n  ListResponse\n  ApplyRequest.Strategy\n  PropertyService\n    banyandb/stream/v1/query.proto\n Element QueryRequest QueryResponse    banyandb/stream/v1/write.proto\n ElementValue InternalWriteRequest WriteRequest WriteResponse    banyandb/stream/v1/rpc.proto\n StreamService    Scalar Value Types\n  \nTop\nbanyandb/cluster/v1/rpc.proto \nSendRequest    Field Type Label Description     topic string     message_id uint64     body google.protobuf.Any     batch_mod bool      \nSendResponse    Field Type Label Description     message_id uint64     error string     body google.protobuf.Any      \nService    Method Name Request Type Response Type Description     Send SendRequest stream SendResponse stream     \nTop\nbanyandb/common/v1/common.proto \nGroup Group is an internal object for Group management\n   Field Type Label Description     metadata Metadata  metadata define the group's identity   catalog Catalog  catalog denotes which type of data the group contains   resource_opts ResourceOpts  resourceOpts indicates the structure of the underlying kv storage   updated_at google.protobuf.Timestamp  updated_at indicates when resources of the group are updated    \nIntervalRule IntervalRule is a structured duration\n   Field Type Label Description     unit IntervalRule.Unit  unit can only be UNIT_HOUR or UNIT_DAY   num uint32      \nMetadata Metadata is for multi-tenant, multi-model use\n   Field Type Label Description     group string  group contains a set of options, like retention policy, max   name string  name of the entity   id uint32  id is the unique identifier of the entity if id is not set, the system will generate a unique id   create_revision int64  readonly. create_revision is the revision of last creation on this key.   mod_revision int64  readonly. mod_revision is the revision of last modification on this key.    \nResourceOpts    Field Type Label Description     shard_num uint32  shard_num is the number of shards   segment_interval IntervalRule  segment_interval indicates the length of a segment   ttl IntervalRule  ttl indicates time to live, how long the data will be cached    \nCatalog    Name Number Description     CATALOG_UNSPECIFIED 0    CATALOG_STREAM 1    CATALOG_MEASURE 2     \nIntervalRule.Unit    Name Number Description     UNIT_UNSPECIFIED 0    UNIT_HOUR 1    UNIT_DAY 2     \nTop\nbanyandb/database/v1/database.proto \nNode    Field Type Label Description     metadata banyandb.common.v1.Metadata     roles Role repeated    grpc_address string     http_address string     created_at google.protobuf.Timestamp      \nShard    Field Type Label Description     id uint64     metadata banyandb.common.v1.Metadata     catalog banyandb.common.v1.Catalog     node string     total uint32     updated_at google.protobuf.Timestamp     created_at google.protobuf.Timestamp      \nRole    Name Number Description     ROLE_UNSPECIFIED 0    ROLE_META 1    ROLE_DATA 2    ROLE_LIAISON 3     \nTop\nbanyandb/model/v1/common.proto \nFieldValue    Field Type Label Description     null google.protobuf.NullValue     str Str     int Int     binary_data bytes     float Float      \nFloat    Field Type Label Description     value double      \nInt    Field Type Label Description     value int64      \nIntArray    Field Type Label Description     value int64 repeated     \nStr    Field Type Label Description     value string      \nStrArray    Field Type Label Description     value string repeated     \nTagFamilyForWrite    Field Type Label Description     tags TagValue repeated     \nTagValue    Field Type Label Description     null google.protobuf.NullValue     str Str     str_array StrArray     int Int     int_array IntArray     binary_data bytes      \nAggregationFunction    Name Number Description     AGGREGATION_FUNCTION_UNSPECIFIED 0    AGGREGATION_FUNCTION_MEAN 1    AGGREGATION_FUNCTION_MAX 2    AGGREGATION_FUNCTION_MIN 3    AGGREGATION_FUNCTION_COUNT 4    AGGREGATION_FUNCTION_SUM 5     \nTop\nbanyandb/model/v1/query.proto \nCondition Condition consists of the query condition with a single binary operator to be imposed For 1:1 BinaryOp, values in condition must be an array with length = 1, while for 1:N BinaryOp, values can be an array with length \u0026gt;= 1.\n   Field Type Label Description     name string     op Condition.BinaryOp     value TagValue      \nCriteria tag_families are indexed.\n   Field Type Label Description     le LogicalExpression     condition Condition      \nLogicalExpression LogicalExpression supports logical operation\n   Field Type Label Description     op LogicalExpression.LogicalOp  op is a logical operation   left Criteria     right Criteria      \nQueryOrder QueryOrder means a Sort operation to be done for a given index rule. The index_rule_name refers to the name of a index rule bound to the subject.\n   Field Type Label Description     index_rule_name string     sort Sort      \nTag Pair is the building block of a record which is equivalent to a key-value pair. In the context of Trace, it could be metadata of a trace such as service_name, service_instance, etc. Besides, other tags are organized in key-value pair in the underlying storage layer. One should notice that the values can be a multi-value.\n   Field Type Label Description     key string     value TagValue      \nTagFamily    Field Type Label Description     name string     tags Tag repeated     \nTagProjection TagProjection is used to select the names of keys to be returned.\n   Field Type Label Description     tag_families TagProjection.TagFamily repeated     \nTagProjection.TagFamily    Field Type Label Description     name string     tags string repeated     \nTimeRange TimeRange is a range query for uint64, the range here follows left-inclusive and right-exclusive rule, i.e. [begin, end) if both edges exist\n   Field Type Label Description     begin google.protobuf.Timestamp     end google.protobuf.Timestamp      \nCondition.BinaryOp BinaryOp specifies the operation imposed to the given query condition For EQ, NE, LT, GT, LE and GE, only one operand should be given, i.e. one-to-one relationship. HAVING and NOT_HAVING allow multi-value to be the operand such as array/vector, i.e. one-to-many relationship. For example, \u0026quot;keyA\u0026quot; contains \u0026quot;valueA\u0026quot; and \u0026quot;valueB\u0026quot; MATCH performances a full-text search if the tag is analyzed. The string value applies to the same analyzer as the tag, but string array value does not. Each item in a string array is seen as a token instead of a query expression.\n   Name Number Description     BINARY_OP_UNSPECIFIED 0    BINARY_OP_EQ 1    BINARY_OP_NE 2    BINARY_OP_LT 3    BINARY_OP_GT 4    BINARY_OP_LE 5    BINARY_OP_GE 6    BINARY_OP_HAVING 7    BINARY_OP_NOT_HAVING 8    BINARY_OP_IN 9    BINARY_OP_NOT_IN 10    BINARY_OP_MATCH 11     \nLogicalExpression.LogicalOp    Name Number Description     LOGICAL_OP_UNSPECIFIED 0    LOGICAL_OP_AND 1    LOGICAL_OP_OR 2     \nSort    Name Number Description     SORT_UNSPECIFIED 0    SORT_DESC 1    SORT_ASC 2     \nTop\nbanyandb/database/v1/schema.proto \nEntity    Field Type Label Description     tag_names string repeated     \nFieldSpec FieldSpec is the specification of field\n   Field Type Label Description     name string  name is the identity of a field   field_type FieldType  field_type denotes the type of field value   encoding_method EncodingMethod  encoding_method indicates how to encode data during writing   compression_method CompressionMethod  compression_method indicates how to compress data during writing    \nIndexRule IndexRule defines how to generate indices based on tags and the index type IndexRule should bind to a subject through an IndexRuleBinding to generate proper indices.\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  metadata define the rule's identity   tags string repeated tags are the combination that refers to an indexed object If the elements in tags are more than 1, the object will generate a multi-tag index Caveat: All tags in a multi-tag MUST have an identical IndexType   type IndexRule.Type  type is the IndexType of this IndexObject.   updated_at google.protobuf.Timestamp  updated_at indicates when the IndexRule is updated   analyzer IndexRule.Analyzer  analyzer analyzes tag value to support the full-text searching for TYPE_INVERTED indices.    \nIndexRuleBinding IndexRuleBinding is a bridge to connect severalIndexRules to a subject This binding is valid between begin_at_nanoseconds and expire_at_nanoseconds, that provides flexible strategies to control how to generate time series indices.\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  metadata is the identity of this binding   rules string repeated rules refers to the IndexRule   subject Subject  subject indicates the subject of binding action   begin_at google.protobuf.Timestamp  begin_at_nanoseconds is the timestamp, after which the binding will be active   expire_at google.protobuf.Timestamp  expire_at_nanoseconds it the timestamp, after which the binding will be inactive expire_at_nanoseconds must be larger than begin_at_nanoseconds   updated_at google.protobuf.Timestamp  updated_at indicates when the IndexRuleBinding is updated    \nMeasure Measure intends to store data point\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  metadata is the identity of a measure   tag_families TagFamilySpec repeated tag_families are for filter measures   fields FieldSpec repeated fields denote measure values   entity Entity  entity indicates which tags will be to generate a series and shard a measure   interval string  interval indicates how frequently to send a data point valid time units are \u0026quot;ns\u0026quot;, \u0026quot;us\u0026quot; (or \u0026quot;µs\u0026quot;), \u0026quot;ms\u0026quot;, \u0026quot;s\u0026quot;, \u0026quot;m\u0026quot;, \u0026quot;h\u0026quot;, \u0026quot;d\u0026quot;.   updated_at google.protobuf.Timestamp  updated_at indicates when the measure is updated    \nStream Stream intends to store streaming data, for example, traces or logs\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  metadata is the identity of a trace series   tag_families TagFamilySpec repeated tag_families   entity Entity  entity indicates how to generate a series and shard a stream   updated_at google.protobuf.Timestamp  updated_at indicates when the stream is updated    \nSubject Subject defines which stream or measure would generate indices\n   Field Type Label Description     catalog banyandb.common.v1.Catalog  catalog is where the subject belongs to todo validate plugin exist bug https://github.com/bufbuild/protoc-gen-validate/issues/672   name string  name refers to a stream or measure in a particular catalog    \nTagFamilySpec    Field Type Label Description     name string     tags TagSpec repeated tags defines accepted tags    \nTagSpec    Field Type Label Description     name string     type TagType     indexed_only bool  indexed_only indicates whether the tag is stored True: It's indexed only, but not stored False: it's stored and indexed    \nTopNAggregation TopNAggregation generates offline TopN statistics for a measure's TopN approximation\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  metadata is the identity of an aggregation   source_measure banyandb.common.v1.Metadata  source_measure denotes the data source of this aggregation   field_name string  field_name is the name of field used for ranking   field_value_sort banyandb.model.v1.Sort  field_value_sort indicates how to sort fields ASC: bottomN DESC: topN UNSPECIFIED: topN + bottomN todo validate plugin exist bug https://github.com/bufbuild/protoc-gen-validate/issues/672   group_by_tag_names string repeated group_by_tag_names groups data points into statistical counters   criteria banyandb.model.v1.Criteria  criteria select partial data points from measure   counters_number int32  counters_number sets the number of counters to be tracked. The default value is 1000   lru_size int32  lru_size defines how much entry is allowed to be maintained in the memory   updated_at google.protobuf.Timestamp  updated_at indicates when the measure is updated    \nCompressionMethod    Name Number Description     COMPRESSION_METHOD_UNSPECIFIED 0    COMPRESSION_METHOD_ZSTD 1     \nEncodingMethod    Name Number Description     ENCODING_METHOD_UNSPECIFIED 0    ENCODING_METHOD_GORILLA 1     \nFieldType    Name Number Description     FIELD_TYPE_UNSPECIFIED 0    FIELD_TYPE_STRING 1    FIELD_TYPE_INT 2    FIELD_TYPE_DATA_BINARY 3    FIELD_TYPE_FLOAT 4     \nIndexRule.Analyzer    Name Number Description     ANALYZER_UNSPECIFIED 0    ANALYZER_KEYWORD 1 Keyword analyzer is a “noop” analyzer which returns the entire input string as a single token.   ANALYZER_STANDARD 2 Standard analyzer provides grammar based tokenization   ANALYZER_SIMPLE 3 Simple analyzer breaks text into tokens at any non-letter character, such as numbers, spaces, hyphens and apostrophes, discards non-letter characters, and changes uppercase to lowercase.    \nIndexRule.Type Type determine the index structure under the hood\n   Name Number Description     TYPE_UNSPECIFIED 0    TYPE_INVERTED 1     \nTagType    Name Number Description     TAG_TYPE_UNSPECIFIED 0    TAG_TYPE_STRING 1    TAG_TYPE_INT 2    TAG_TYPE_STRING_ARRAY 3    TAG_TYPE_INT_ARRAY 4    TAG_TYPE_DATA_BINARY 5     \nTop\nbanyandb/database/v1/rpc.proto \nGroupRegistryServiceCreateRequest    Field Type Label Description     group banyandb.common.v1.Group      \nGroupRegistryServiceCreateResponse \nGroupRegistryServiceDeleteRequest    Field Type Label Description     group string      \nGroupRegistryServiceDeleteResponse    Field Type Label Description     deleted bool      \nGroupRegistryServiceExistRequest    Field Type Label Description     group string      \nGroupRegistryServiceExistResponse    Field Type Label Description     has_group bool      \nGroupRegistryServiceGetRequest    Field Type Label Description     group string      \nGroupRegistryServiceGetResponse    Field Type Label Description     group banyandb.common.v1.Group      \nGroupRegistryServiceListRequest \nGroupRegistryServiceListResponse    Field Type Label Description     group banyandb.common.v1.Group repeated     \nGroupRegistryServiceUpdateRequest    Field Type Label Description     group banyandb.common.v1.Group      \nGroupRegistryServiceUpdateResponse \nIndexRuleBindingRegistryServiceCreateRequest    Field Type Label Description     index_rule_binding IndexRuleBinding      \nIndexRuleBindingRegistryServiceCreateResponse \nIndexRuleBindingRegistryServiceDeleteRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nIndexRuleBindingRegistryServiceDeleteResponse    Field Type Label Description     deleted bool      \nIndexRuleBindingRegistryServiceExistRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nIndexRuleBindingRegistryServiceExistResponse    Field Type Label Description     has_group bool     has_index_rule_binding bool      \nIndexRuleBindingRegistryServiceGetRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nIndexRuleBindingRegistryServiceGetResponse    Field Type Label Description     index_rule_binding IndexRuleBinding      \nIndexRuleBindingRegistryServiceListRequest    Field Type Label Description     group string      \nIndexRuleBindingRegistryServiceListResponse    Field Type Label Description     index_rule_binding IndexRuleBinding repeated     \nIndexRuleBindingRegistryServiceUpdateRequest    Field Type Label Description     index_rule_binding IndexRuleBinding      \nIndexRuleBindingRegistryServiceUpdateResponse \nIndexRuleRegistryServiceCreateRequest    Field Type Label Description     index_rule IndexRule      \nIndexRuleRegistryServiceCreateResponse \nIndexRuleRegistryServiceDeleteRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nIndexRuleRegistryServiceDeleteResponse    Field Type Label Description     deleted bool      \nIndexRuleRegistryServiceExistRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nIndexRuleRegistryServiceExistResponse    Field Type Label Description     has_group bool     has_index_rule bool      \nIndexRuleRegistryServiceGetRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nIndexRuleRegistryServiceGetResponse    Field Type Label Description     index_rule IndexRule      \nIndexRuleRegistryServiceListRequest    Field Type Label Description     group string      \nIndexRuleRegistryServiceListResponse    Field Type Label Description     index_rule IndexRule repeated     \nIndexRuleRegistryServiceUpdateRequest    Field Type Label Description     index_rule IndexRule      \nIndexRuleRegistryServiceUpdateResponse \nMeasureRegistryServiceCreateRequest    Field Type Label Description     measure Measure      \nMeasureRegistryServiceCreateResponse    Field Type Label Description     mod_revision int64      \nMeasureRegistryServiceDeleteRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nMeasureRegistryServiceDeleteResponse    Field Type Label Description     deleted bool      \nMeasureRegistryServiceExistRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nMeasureRegistryServiceExistResponse    Field Type Label Description     has_group bool     has_measure bool      \nMeasureRegistryServiceGetRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nMeasureRegistryServiceGetResponse    Field Type Label Description     measure Measure      \nMeasureRegistryServiceListRequest    Field Type Label Description     group string      \nMeasureRegistryServiceListResponse    Field Type Label Description     measure Measure repeated     \nMeasureRegistryServiceUpdateRequest    Field Type Label Description     measure Measure      \nMeasureRegistryServiceUpdateResponse    Field Type Label Description     mod_revision int64      \nStreamRegistryServiceCreateRequest    Field Type Label Description     stream Stream      \nStreamRegistryServiceCreateResponse    Field Type Label Description     mod_revision int64      \nStreamRegistryServiceDeleteRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nStreamRegistryServiceDeleteResponse    Field Type Label Description     deleted bool      \nStreamRegistryServiceExistRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nStreamRegistryServiceExistResponse    Field Type Label Description     has_group bool     has_stream bool      \nStreamRegistryServiceGetRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nStreamRegistryServiceGetResponse    Field Type Label Description     stream Stream      \nStreamRegistryServiceListRequest    Field Type Label Description     group string      \nStreamRegistryServiceListResponse    Field Type Label Description     stream Stream repeated     \nStreamRegistryServiceUpdateRequest    Field Type Label Description     stream Stream      \nStreamRegistryServiceUpdateResponse    Field Type Label Description     mod_revision int64      \nTopNAggregationRegistryServiceCreateRequest    Field Type Label Description     top_n_aggregation TopNAggregation      \nTopNAggregationRegistryServiceCreateResponse \nTopNAggregationRegistryServiceDeleteRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nTopNAggregationRegistryServiceDeleteResponse    Field Type Label Description     deleted bool      \nTopNAggregationRegistryServiceExistRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nTopNAggregationRegistryServiceExistResponse    Field Type Label Description     has_group bool     has_top_n_aggregation bool      \nTopNAggregationRegistryServiceGetRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nTopNAggregationRegistryServiceGetResponse    Field Type Label Description     top_n_aggregation TopNAggregation      \nTopNAggregationRegistryServiceListRequest    Field Type Label Description     group string      \nTopNAggregationRegistryServiceListResponse    Field Type Label Description     top_n_aggregation TopNAggregation repeated     \nTopNAggregationRegistryServiceUpdateRequest    Field Type Label Description     top_n_aggregation TopNAggregation      \nTopNAggregationRegistryServiceUpdateResponse \nGroupRegistryService    Method Name Request Type Response Type Description     Create GroupRegistryServiceCreateRequest GroupRegistryServiceCreateResponse    Update GroupRegistryServiceUpdateRequest GroupRegistryServiceUpdateResponse    Delete GroupRegistryServiceDeleteRequest GroupRegistryServiceDeleteResponse    Get GroupRegistryServiceGetRequest GroupRegistryServiceGetResponse    List GroupRegistryServiceListRequest GroupRegistryServiceListResponse    Exist GroupRegistryServiceExistRequest GroupRegistryServiceExistResponse Exist doesn't expose an HTTP endpoint. Please use HEAD method to touch Get instead    \nIndexRuleBindingRegistryService    Method Name Request Type Response Type Description     Create IndexRuleBindingRegistryServiceCreateRequest IndexRuleBindingRegistryServiceCreateResponse    Update IndexRuleBindingRegistryServiceUpdateRequest IndexRuleBindingRegistryServiceUpdateResponse    Delete IndexRuleBindingRegistryServiceDeleteRequest IndexRuleBindingRegistryServiceDeleteResponse    Get IndexRuleBindingRegistryServiceGetRequest IndexRuleBindingRegistryServiceGetResponse    List IndexRuleBindingRegistryServiceListRequest IndexRuleBindingRegistryServiceListResponse    Exist IndexRuleBindingRegistryServiceExistRequest IndexRuleBindingRegistryServiceExistResponse Exist doesn't expose an HTTP endpoint. Please use HEAD method to touch Get instead    \nIndexRuleRegistryService    Method Name Request Type Response Type Description     Create IndexRuleRegistryServiceCreateRequest IndexRuleRegistryServiceCreateResponse    Update IndexRuleRegistryServiceUpdateRequest IndexRuleRegistryServiceUpdateResponse    Delete IndexRuleRegistryServiceDeleteRequest IndexRuleRegistryServiceDeleteResponse    Get IndexRuleRegistryServiceGetRequest IndexRuleRegistryServiceGetResponse    List IndexRuleRegistryServiceListRequest IndexRuleRegistryServiceListResponse    Exist IndexRuleRegistryServiceExistRequest IndexRuleRegistryServiceExistResponse Exist doesn't expose an HTTP endpoint. Please use HEAD method to touch Get instead    \nMeasureRegistryService    Method Name Request Type Response Type Description     Create MeasureRegistryServiceCreateRequest MeasureRegistryServiceCreateResponse    Update MeasureRegistryServiceUpdateRequest MeasureRegistryServiceUpdateResponse    Delete MeasureRegistryServiceDeleteRequest MeasureRegistryServiceDeleteResponse    Get MeasureRegistryServiceGetRequest MeasureRegistryServiceGetResponse    List MeasureRegistryServiceListRequest MeasureRegistryServiceListResponse    Exist MeasureRegistryServiceExistRequest MeasureRegistryServiceExistResponse Exist doesn't expose an HTTP endpoint. Please use HEAD method to touch Get instead    \nStreamRegistryService    Method Name Request Type Response Type Description     Create StreamRegistryServiceCreateRequest StreamRegistryServiceCreateResponse    Update StreamRegistryServiceUpdateRequest StreamRegistryServiceUpdateResponse    Delete StreamRegistryServiceDeleteRequest StreamRegistryServiceDeleteResponse    Get StreamRegistryServiceGetRequest StreamRegistryServiceGetResponse    List StreamRegistryServiceListRequest StreamRegistryServiceListResponse    Exist StreamRegistryServiceExistRequest StreamRegistryServiceExistResponse Exist doesn't expose an HTTP endpoint. Please use HEAD method to touch Get instead    \nTopNAggregationRegistryService    Method Name Request Type Response Type Description     Create TopNAggregationRegistryServiceCreateRequest TopNAggregationRegistryServiceCreateResponse    Update TopNAggregationRegistryServiceUpdateRequest TopNAggregationRegistryServiceUpdateResponse    Delete TopNAggregationRegistryServiceDeleteRequest TopNAggregationRegistryServiceDeleteResponse    Get TopNAggregationRegistryServiceGetRequest TopNAggregationRegistryServiceGetResponse    List TopNAggregationRegistryServiceListRequest TopNAggregationRegistryServiceListResponse    Exist TopNAggregationRegistryServiceExistRequest TopNAggregationRegistryServiceExistResponse Exist doesn't expose an HTTP endpoint. Please use HEAD method to touch Get instead    \nTop\nbanyandb/measure/v1/query.proto \nDataPoint DataPoint is stored in Measures\n   Field Type Label Description     timestamp google.protobuf.Timestamp  timestamp is in the timeunit of milliseconds.   tag_families banyandb.model.v1.TagFamily repeated tag_families contains tags selected in the projection   fields DataPoint.Field repeated fields contains fields selected in the projection    \nDataPoint.Field    Field Type Label Description     name string     value banyandb.model.v1.FieldValue      \nQueryRequest QueryRequest is the request contract for query.\n   Field Type Label Description     groups string repeated groups indicate where the data points are stored.   name string  name is the identity of a measure.   time_range banyandb.model.v1.TimeRange  time_range is a range query with begin/end time of entities in the timeunit of milliseconds.   criteria banyandb.model.v1.Criteria  tag_families are indexed.   tag_projection banyandb.model.v1.TagProjection  tag_projection can be used to select tags of the data points in the response   field_projection QueryRequest.FieldProjection  field_projection can be used to select fields of the data points in the response   group_by QueryRequest.GroupBy  group_by groups data points based on their field value for a specific tag and use field_name as the projection name   agg QueryRequest.Aggregation  agg aggregates data points based on a field   top QueryRequest.Top  top limits the result based on a particular field. If order_by is specified, top sorts the dataset based on order_by's output   offset uint32  offset is used to support pagination, together with the following limit. If top is specified, offset processes the dataset based on top's output   limit uint32  limit is used to impose a boundary on the number of records being returned. If top is specified, limit processes the dataset based on top's output   order_by banyandb.model.v1.QueryOrder  order_by is given to specify the sort for a tag.    \nQueryRequest.Aggregation    Field Type Label Description     function banyandb.model.v1.AggregationFunction     field_name string  field_name must be one of files indicated by the field_projection    \nQueryRequest.FieldProjection    Field Type Label Description     names string repeated     \nQueryRequest.GroupBy    Field Type Label Description     tag_projection banyandb.model.v1.TagProjection  tag_projection must be a subset of the tag_projection of QueryRequest   field_name string  field_name must be one of fields indicated by field_projection    \nQueryRequest.Top    Field Type Label Description     number int32  number set the how many items should be returned   field_name string  field_name must be one of files indicated by the field_projection   field_value_sort banyandb.model.v1.Sort  field_value_sort indicates how to sort fields ASC: bottomN DESC: topN UNSPECIFIED: topN    \nQueryResponse QueryResponse is the response for a query to the Query module.\n   Field Type Label Description     data_points DataPoint repeated data_points are the actual data returned    \nTop\nbanyandb/measure/v1/topn.proto \nTopNList TopNList contains a series of topN items\n   Field Type Label Description     timestamp google.protobuf.Timestamp  timestamp is in the timeunit of milliseconds.   items TopNList.Item repeated items contains top-n items in a list    \nTopNList.Item    Field Type Label Description     entity banyandb.model.v1.Tag repeated    value banyandb.model.v1.FieldValue      \nTopNRequest TopNRequest is the request contract for query.\n   Field Type Label Description     groups string repeated groups indicate where the data points are stored.   name string  name is the identity of a measure.   time_range banyandb.model.v1.TimeRange  time_range is a range query with begin/end time of entities in the timeunit of milliseconds.   top_n int32  top_n set the how many items should be returned in each list.   agg banyandb.model.v1.AggregationFunction  agg aggregates lists grouped by field names in the time_range TODO validate enum defined_only   conditions banyandb.model.v1.Condition repeated criteria select counters. Only equals are acceptable.   field_value_sort banyandb.model.v1.Sort  field_value_sort indicates how to sort fields    \nTopNResponse TopNResponse is the response for a query to the Query module.\n   Field Type Label Description     lists TopNList repeated lists contain a series topN lists ranked by timestamp if agg_func in query request is specified, lists' size should be one.    \nTop\nbanyandb/model/v1/write.proto \nStatus Status is the response status for write\n   Name Number Description     STATUS_UNSPECIFIED 0    STATUS_SUCCEED 1    STATUS_INVALID_TIMESTAMP 2    STATUS_NOT_FOUND 3    STATUS_EXPIRED_SCHEMA 4    STATUS_INTERNAL_ERROR 5     \nTop\nbanyandb/measure/v1/write.proto \nDataPointValue DataPointValue is the data point for writing. It only contains values.\n   Field Type Label Description     timestamp google.protobuf.Timestamp  timestamp is in the timeunit of milliseconds.   tag_families banyandb.model.v1.TagFamilyForWrite repeated the order of tag_families' items match the measure schema   fields banyandb.model.v1.FieldValue repeated the order of fields match the measure schema    \nInternalWriteRequest    Field Type Label Description     shard_id uint32     series_hash bytes     entity_values banyandb.model.v1.TagValue repeated    request WriteRequest      \nWriteRequest WriteRequest is the request contract for write\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  the metadata is required.   data_point DataPointValue  the data_point is required.   message_id uint64  the message_id is required.    \nWriteResponse WriteResponse is the response contract for write\n   Field Type Label Description     message_id uint64  the message_id from request.   status banyandb.model.v1.Status  status indicates the request processing result   metadata banyandb.common.v1.Metadata  the metadata from request when request fails    \nTop\nbanyandb/measure/v1/rpc.proto \nMeasureService    Method Name Request Type Response Type Description     Query QueryRequest QueryResponse    Write WriteRequest stream WriteResponse stream    TopN TopNRequest TopNResponse     \nTop\nbanyandb/property/v1/property.proto \nMetadata Metadata is for multi-tenant use\n   Field Type Label Description     container banyandb.common.v1.Metadata  container is created when it receives the first property   id string  id identifies a property    \nProperty Property stores the user defined data\n   Field Type Label Description     metadata Metadata  metadata is the identity of a property   tags banyandb.model.v1.Tag repeated tag stores the content of a property   updated_at google.protobuf.Timestamp  updated_at indicates when the property is updated   lease_id int64  readonly. lease_id is the ID of the lease that attached to key.   ttl string  ttl indicates the time to live of the property. It's a string in the format of \u0026quot;1h\u0026quot;, \u0026quot;2m\u0026quot;, \u0026quot;3s\u0026quot;, \u0026quot;1500ms\u0026quot;. It defaults to 0s, which means the property never expires. The minimum allowed ttl is 1s.    \nTop\nbanyandb/property/v1/rpc.proto \nApplyRequest    Field Type Label Description     property Property     strategy ApplyRequest.Strategy  strategy indicates how to update a property. It defaults to STRATEGY_MERGE    \nApplyResponse    Field Type Label Description     created bool  created indicates whether the property existed. True: the property is absent. False: the property existed.   tags_num uint32     lease_id int64      \nDeleteRequest    Field Type Label Description     metadata Metadata     tags string repeated     \nDeleteResponse    Field Type Label Description     deleted bool     tags_num uint32      \nGetRequest    Field Type Label Description     metadata Metadata     tags string repeated     \nGetResponse    Field Type Label Description     property Property      \nKeepAliveRequest    Field Type Label Description     lease_id int64      \nKeepAliveResponse \nListRequest    Field Type Label Description     container banyandb.common.v1.Metadata     ids string repeated    tags string repeated     \nListResponse    Field Type Label Description     property Property repeated     \nApplyRequest.Strategy    Name Number Description     STRATEGY_UNSPECIFIED 0    STRATEGY_MERGE 1    STRATEGY_REPLACE 2     \nPropertyService    Method Name Request Type Response Type Description     Apply ApplyRequest ApplyResponse Apply creates a property if it's absent, or update a existed one based on a strategy.   Delete DeleteRequest DeleteResponse    Get GetRequest GetResponse    List ListRequest ListResponse    KeepAlive KeepAliveRequest KeepAliveResponse     \nTop\nbanyandb/stream/v1/query.proto \nElement Element represents (stream context) a Span defined in Google Dapper paper or equivalently a Segment in Skywalking. (Log context) a log\n   Field Type Label Description     element_id string  element_id could be span_id of a Span or segment_id of a Segment in the context of stream   timestamp google.protobuf.Timestamp  timestamp represents a millisecond 1) either the start time of a Span/Segment, 2) or the timestamp of a log   tag_families banyandb.model.v1.TagFamily repeated fields contains all indexed Field. Some typical names, - stream_id - duration - service_name - service_instance_id - end_time_milliseconds    \nQueryRequest QueryRequest is the request contract for query.\n   Field Type Label Description     groups string repeated groups indicate where the elements are stored.   name string  name is the identity of a stream.   time_range banyandb.model.v1.TimeRange  time_range is a range query with begin/end time of entities in the timeunit of milliseconds. In the context of stream, it represents the range of the startTime for spans/segments, while in the context of Log, it means the range of the timestamp(s) for logs. it is always recommended to specify time range for performance reason   offset uint32  offset is used to support pagination, together with the following limit   limit uint32  limit is used to impose a boundary on the number of records being returned   order_by banyandb.model.v1.QueryOrder  order_by is given to specify the sort for a field. So far, only fields in the type of Integer are supported   criteria banyandb.model.v1.Criteria  tag_families are indexed.   projection banyandb.model.v1.TagProjection  projection can be used to select the key names of the element in the response    \nQueryResponse QueryResponse is the response for a query to the Query module.\n   Field Type Label Description     elements Element repeated elements are the actual data returned    \nTop\nbanyandb/stream/v1/write.proto \nElementValue    Field Type Label Description     element_id string  element_id could be span_id of a Span or segment_id of a Segment in the context of stream   timestamp google.protobuf.Timestamp  timestamp is in the timeunit of milliseconds. It represents 1) either the start time of a Span/Segment, 2) or the timestamp of a log   tag_families banyandb.model.v1.TagFamilyForWrite repeated the order of tag_families' items match the stream schema    \nInternalWriteRequest    Field Type Label Description     shard_id uint32     series_hash bytes     entity_values banyandb.model.v1.TagValue repeated    request WriteRequest      \nWriteRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata  the metadata is required.   element ElementValue  the element is required.   message_id uint64  the message_id is required.    \nWriteResponse    Field Type Label Description     message_id uint64  the message_id from request.   status banyandb.model.v1.Status  status indicates the request processing result   metadata banyandb.common.v1.Metadata  the metadata from request when request fails    \nTop\nbanyandb/stream/v1/rpc.proto \nStreamService    Method Name Request Type Response Type Description     Query QueryRequest QueryResponse    Write WriteRequest stream WriteResponse stream     Scalar Value Types    .proto Type Notes C++ Java Python Go C# PHP Ruby     double  double double float float64 double float Float   float  float float float float32 float float Float   int32 Uses variable-length encoding. Inefficient for encoding negative numbers – if your field is likely to have negative values, use sint32 instead. int32 int int int32 int integer Bignum or Fixnum (as required)   int64 Uses variable-length encoding. Inefficient for encoding negative numbers – if your field is likely to have negative values, use sint64 instead. int64 long int/long int64 long integer/string Bignum   uint32 Uses variable-length encoding. uint32 int int/long uint32 uint integer Bignum or Fixnum (as required)   uint64 Uses variable-length encoding. uint64 long int/long uint64 ulong integer/string Bignum or Fixnum (as required)   sint32 Uses variable-length encoding. Signed int value. These more efficiently encode negative numbers than regular int32s. int32 int int int32 int integer Bignum or Fixnum (as required)   sint64 Uses variable-length encoding. Signed int value. These more efficiently encode negative numbers than regular int64s. int64 long int/long int64 long integer/string Bignum   fixed32 Always four bytes. More efficient than uint32 if values are often greater than 2^28. uint32 int int uint32 uint integer Bignum or Fixnum (as required)   fixed64 Always eight bytes. More efficient than uint64 if values are often greater than 2^56. uint64 long int/long uint64 ulong integer/string Bignum   sfixed32 Always four bytes. int32 int int int32 int integer Bignum or Fixnum (as required)   sfixed64 Always eight bytes. int64 long int/long int64 long integer/string Bignum   bool  bool boolean boolean bool bool boolean TrueClass/FalseClass   string A string must always contain UTF-8 encoded or 7-bit ASCII text. string String str/unicode string string string String (UTF-8)   bytes May contain any arbitrary sequence of bytes. string ByteString str []byte ByteString string String (ASCII-8BIT)    ","title":"Protocol Documentation","url":"/docs/skywalking-banyandb/next/api-reference/"},{"content":"Protocol Documentation \nTable of Contents   banyandb/cluster/v1/rpc.proto\n  SendRequest\n  SendResponse\n  Service\n    banyandb/common/v1/common.proto\n  Group\n  IntervalRule\n  Metadata\n  ResourceOpts\n  Catalog\n  IntervalRule.Unit\n    banyandb/database/v1/database.proto\n  Node\n  Shard\n  Role\n    banyandb/model/v1/common.proto\n  FieldValue\n  Float\n  Int\n  IntArray\n  Str\n  StrArray\n  TagFamilyForWrite\n  TagValue\n  AggregationFunction\n    banyandb/model/v1/query.proto\n  Condition\n  Criteria\n  LogicalExpression\n  QueryOrder\n  Tag\n  TagFamily\n  TagProjection\n  TagProjection.TagFamily\n  TimeRange\n  Condition.BinaryOp\n  LogicalExpression.LogicalOp\n  Sort\n    banyandb/database/v1/schema.proto\n  Entity\n  FieldSpec\n  IndexRule\n  IndexRuleBinding\n  Measure\n  Stream\n  Subject\n  TagFamilySpec\n  TagSpec\n  TopNAggregation\n  CompressionMethod\n  EncodingMethod\n  FieldType\n  IndexRule.Analyzer\n  IndexRule.Location\n  IndexRule.Type\n  TagType\n    banyandb/database/v1/rpc.proto\n  GroupRegistryServiceCreateRequest\n  GroupRegistryServiceCreateResponse\n  GroupRegistryServiceDeleteRequest\n  GroupRegistryServiceDeleteResponse\n  GroupRegistryServiceExistRequest\n  GroupRegistryServiceExistResponse\n  GroupRegistryServiceGetRequest\n  GroupRegistryServiceGetResponse\n  GroupRegistryServiceListRequest\n  GroupRegistryServiceListResponse\n  GroupRegistryServiceUpdateRequest\n  GroupRegistryServiceUpdateResponse\n  IndexRuleBindingRegistryServiceCreateRequest\n  IndexRuleBindingRegistryServiceCreateResponse\n  IndexRuleBindingRegistryServiceDeleteRequest\n  IndexRuleBindingRegistryServiceDeleteResponse\n  IndexRuleBindingRegistryServiceExistRequest\n  IndexRuleBindingRegistryServiceExistResponse\n  IndexRuleBindingRegistryServiceGetRequest\n  IndexRuleBindingRegistryServiceGetResponse\n  IndexRuleBindingRegistryServiceListRequest\n  IndexRuleBindingRegistryServiceListResponse\n  IndexRuleBindingRegistryServiceUpdateRequest\n  IndexRuleBindingRegistryServiceUpdateResponse\n  IndexRuleRegistryServiceCreateRequest\n  IndexRuleRegistryServiceCreateResponse\n  IndexRuleRegistryServiceDeleteRequest\n  IndexRuleRegistryServiceDeleteResponse\n  IndexRuleRegistryServiceExistRequest\n  IndexRuleRegistryServiceExistResponse\n  IndexRuleRegistryServiceGetRequest\n  IndexRuleRegistryServiceGetResponse\n  IndexRuleRegistryServiceListRequest\n  IndexRuleRegistryServiceListResponse\n  IndexRuleRegistryServiceUpdateRequest\n  IndexRuleRegistryServiceUpdateResponse\n  MeasureRegistryServiceCreateRequest\n  MeasureRegistryServiceCreateResponse\n  MeasureRegistryServiceDeleteRequest\n  MeasureRegistryServiceDeleteResponse\n  MeasureRegistryServiceExistRequest\n  MeasureRegistryServiceExistResponse\n  MeasureRegistryServiceGetRequest\n  MeasureRegistryServiceGetResponse\n  MeasureRegistryServiceListRequest\n  MeasureRegistryServiceListResponse\n  MeasureRegistryServiceUpdateRequest\n  MeasureRegistryServiceUpdateResponse\n  StreamRegistryServiceCreateRequest\n  StreamRegistryServiceCreateResponse\n  StreamRegistryServiceDeleteRequest\n  StreamRegistryServiceDeleteResponse\n  StreamRegistryServiceExistRequest\n  StreamRegistryServiceExistResponse\n  StreamRegistryServiceGetRequest\n  StreamRegistryServiceGetResponse\n  StreamRegistryServiceListRequest\n  StreamRegistryServiceListResponse\n  StreamRegistryServiceUpdateRequest\n  StreamRegistryServiceUpdateResponse\n  TopNAggregationRegistryServiceCreateRequest\n  TopNAggregationRegistryServiceCreateResponse\n  TopNAggregationRegistryServiceDeleteRequest\n  TopNAggregationRegistryServiceDeleteResponse\n  TopNAggregationRegistryServiceExistRequest\n  TopNAggregationRegistryServiceExistResponse\n  TopNAggregationRegistryServiceGetRequest\n  TopNAggregationRegistryServiceGetResponse\n  TopNAggregationRegistryServiceListRequest\n  TopNAggregationRegistryServiceListResponse\n  TopNAggregationRegistryServiceUpdateRequest\n  TopNAggregationRegistryServiceUpdateResponse\n  GroupRegistryService\n  IndexRuleBindingRegistryService\n  IndexRuleRegistryService\n  MeasureRegistryService\n  StreamRegistryService\n  TopNAggregationRegistryService\n    banyandb/measure/v1/query.proto\n DataPoint DataPoint.Field QueryRequest QueryRequest.Aggregation QueryRequest.FieldProjection QueryRequest.GroupBy QueryRequest.Top QueryResponse    banyandb/measure/v1/topn.proto\n TopNList TopNList.Item TopNRequest TopNResponse    banyandb/model/v1/write.proto\n Status    banyandb/measure/v1/write.proto\n DataPointValue InternalWriteRequest WriteRequest WriteResponse    banyandb/measure/v1/rpc.proto\n MeasureService    banyandb/property/v1/property.proto\n Metadata Property    banyandb/property/v1/rpc.proto\n  ApplyRequest\n  ApplyResponse\n  DeleteRequest\n  DeleteResponse\n  GetRequest\n  GetResponse\n  KeepAliveRequest\n  KeepAliveResponse\n  ListRequest\n  ListResponse\n  ApplyRequest.Strategy\n  PropertyService\n    banyandb/stream/v1/query.proto\n Element QueryRequest QueryResponse    banyandb/stream/v1/write.proto\n ElementValue InternalWriteRequest WriteRequest WriteResponse    banyandb/stream/v1/rpc.proto\n StreamService    Scalar Value Types\n  \nTop\nbanyandb/cluster/v1/rpc.proto \nSendRequest    Field Type Label Description     topic string     message_id uint64     body google.protobuf.Any      \nSendResponse    Field Type Label Description     message_id uint64     error string     body google.protobuf.Any      \nService    Method Name Request Type Response Type Description     Send SendRequest stream SendResponse stream     \nTop\nbanyandb/common/v1/common.proto \nGroup Group is an internal object for Group management\n   Field Type Label Description     metadata Metadata  metadata define the group's identity   catalog Catalog  catalog denotes which type of data the group contains   resource_opts ResourceOpts  resourceOpts indicates the structure of the underlying kv storage   updated_at google.protobuf.Timestamp  updated_at indicates when resources of the group are updated    \nIntervalRule IntervalRule is a structured duration\n   Field Type Label Description     unit IntervalRule.Unit  unit can only be UNIT_HOUR or UNIT_DAY   num uint32      \nMetadata Metadata is for multi-tenant, multi-model use\n   Field Type Label Description     group string  group contains a set of options, like retention policy, max   name string  name of the entity   id uint32     create_revision int64  readonly. create_revision is the revision of last creation on this key.   mod_revision int64  readonly. mod_revision is the revision of last modification on this key.    \nResourceOpts    Field Type Label Description     shard_num uint32  shard_num is the number of shards   block_interval IntervalRule  block_interval indicates the length of a block block_interval should be less than or equal to segment_interval   segment_interval IntervalRule  segment_interval indicates the length of a segment   ttl IntervalRule  ttl indicates time to live, how long the data will be cached    \nCatalog    Name Number Description     CATALOG_UNSPECIFIED 0    CATALOG_STREAM 1    CATALOG_MEASURE 2     \nIntervalRule.Unit    Name Number Description     UNIT_UNSPECIFIED 0    UNIT_HOUR 1    UNIT_DAY 2     \nTop\nbanyandb/database/v1/database.proto \nNode    Field Type Label Description     metadata banyandb.common.v1.Metadata     roles Role repeated    grpc_address string     http_address string     created_at google.protobuf.Timestamp      \nShard    Field Type Label Description     id uint64     metadata banyandb.common.v1.Metadata     catalog banyandb.common.v1.Catalog     node string     total uint32     updated_at google.protobuf.Timestamp     created_at google.protobuf.Timestamp      \nRole    Name Number Description     ROLE_UNSPECIFIED 0    ROLE_META 1    ROLE_DATA 2    ROLE_LIAISON 3     \nTop\nbanyandb/model/v1/common.proto \nFieldValue    Field Type Label Description     null google.protobuf.NullValue     str Str     int Int     binary_data bytes     float Float      \nFloat    Field Type Label Description     value double      \nInt    Field Type Label Description     value int64      \nIntArray    Field Type Label Description     value int64 repeated     \nStr    Field Type Label Description     value string      \nStrArray    Field Type Label Description     value string repeated     \nTagFamilyForWrite    Field Type Label Description     tags TagValue repeated     \nTagValue    Field Type Label Description     null google.protobuf.NullValue     str Str     str_array StrArray     int Int     int_array IntArray     binary_data bytes      \nAggregationFunction    Name Number Description     AGGREGATION_FUNCTION_UNSPECIFIED 0    AGGREGATION_FUNCTION_MEAN 1    AGGREGATION_FUNCTION_MAX 2    AGGREGATION_FUNCTION_MIN 3    AGGREGATION_FUNCTION_COUNT 4    AGGREGATION_FUNCTION_SUM 5     \nTop\nbanyandb/model/v1/query.proto \nCondition Condition consists of the query condition with a single binary operator to be imposed For 1:1 BinaryOp, values in condition must be an array with length = 1, while for 1:N BinaryOp, values can be an array with length \u0026gt;= 1.\n   Field Type Label Description     name string     op Condition.BinaryOp     value TagValue      \nCriteria tag_families are indexed.\n   Field Type Label Description     le LogicalExpression     condition Condition      \nLogicalExpression LogicalExpression supports logical operation\n   Field Type Label Description     op LogicalExpression.LogicalOp  op is a logical operation   left Criteria     right Criteria      \nQueryOrder QueryOrder means a Sort operation to be done for a given index rule. The index_rule_name refers to the name of a index rule bound to the subject.\n   Field Type Label Description     index_rule_name string     sort Sort      \nTag Pair is the building block of a record which is equivalent to a key-value pair. In the context of Trace, it could be metadata of a trace such as service_name, service_instance, etc. Besides, other tags are organized in key-value pair in the underlying storage layer. One should notice that the values can be a multi-value.\n   Field Type Label Description     key string     value TagValue      \nTagFamily    Field Type Label Description     name string     tags Tag repeated     \nTagProjection TagProjection is used to select the names of keys to be returned.\n   Field Type Label Description     tag_families TagProjection.TagFamily repeated     \nTagProjection.TagFamily    Field Type Label Description     name string     tags string repeated     \nTimeRange TimeRange is a range query for uint64, the range here follows left-inclusive and right-exclusive rule, i.e. [begin, end) if both edges exist\n   Field Type Label Description     begin google.protobuf.Timestamp     end google.protobuf.Timestamp      \nCondition.BinaryOp BinaryOp specifies the operation imposed to the given query condition For EQ, NE, LT, GT, LE and GE, only one operand should be given, i.e. one-to-one relationship. HAVING and NOT_HAVING allow multi-value to be the operand such as array/vector, i.e. one-to-many relationship. For example, \u0026quot;keyA\u0026quot; contains \u0026quot;valueA\u0026quot; and \u0026quot;valueB\u0026quot; MATCH performances a full-text search if the tag is analyzed. The string value applies to the same analyzer as the tag, but string array value does not. Each item in a string array is seen as a token instead of a query expression.\n   Name Number Description     BINARY_OP_UNSPECIFIED 0    BINARY_OP_EQ 1    BINARY_OP_NE 2    BINARY_OP_LT 3    BINARY_OP_GT 4    BINARY_OP_LE 5    BINARY_OP_GE 6    BINARY_OP_HAVING 7    BINARY_OP_NOT_HAVING 8    BINARY_OP_IN 9    BINARY_OP_NOT_IN 10    BINARY_OP_MATCH 11     \nLogicalExpression.LogicalOp    Name Number Description     LOGICAL_OP_UNSPECIFIED 0    LOGICAL_OP_AND 1    LOGICAL_OP_OR 2     \nSort    Name Number Description     SORT_UNSPECIFIED 0    SORT_DESC 1    SORT_ASC 2     \nTop\nbanyandb/database/v1/schema.proto \nEntity    Field Type Label Description     tag_names string repeated     \nFieldSpec FieldSpec is the specification of field\n   Field Type Label Description     name string  name is the identity of a field   field_type FieldType  field_type denotes the type of field value   encoding_method EncodingMethod  encoding_method indicates how to encode data during writing   compression_method CompressionMethod  compression_method indicates how to compress data during writing    \nIndexRule IndexRule defines how to generate indices based on tags and the index type IndexRule should bind to a subject through an IndexRuleBinding to generate proper indices.\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  metadata define the rule's identity   tags string repeated tags are the combination that refers to an indexed object If the elements in tags are more than 1, the object will generate a multi-tag index Caveat: All tags in a multi-tag MUST have an identical IndexType   type IndexRule.Type  type is the IndexType of this IndexObject.   location IndexRule.Location  location indicates where to store index.   updated_at google.protobuf.Timestamp  updated_at indicates when the IndexRule is updated   analyzer IndexRule.Analyzer  analyzer analyzes tag value to support the full-text searching for TYPE_INVERTED indices.    \nIndexRuleBinding IndexRuleBinding is a bridge to connect severalIndexRules to a subject This binding is valid between begin_at_nanoseconds and expire_at_nanoseconds, that provides flexible strategies to control how to generate time series indices.\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  metadata is the identity of this binding   rules string repeated rules refers to the IndexRule   subject Subject  subject indicates the subject of binding action   begin_at google.protobuf.Timestamp  begin_at_nanoseconds is the timestamp, after which the binding will be active   expire_at google.protobuf.Timestamp  expire_at_nanoseconds it the timestamp, after which the binding will be inactive expire_at_nanoseconds must be larger than begin_at_nanoseconds   updated_at google.protobuf.Timestamp  updated_at indicates when the IndexRuleBinding is updated    \nMeasure Measure intends to store data point\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  metadata is the identity of a measure   tag_families TagFamilySpec repeated tag_families are for filter measures   fields FieldSpec repeated fields denote measure values   entity Entity  entity indicates which tags will be to generate a series and shard a measure   interval string  interval indicates how frequently to send a data point valid time units are \u0026quot;ns\u0026quot;, \u0026quot;us\u0026quot; (or \u0026quot;µs\u0026quot;), \u0026quot;ms\u0026quot;, \u0026quot;s\u0026quot;, \u0026quot;m\u0026quot;, \u0026quot;h\u0026quot;, \u0026quot;d\u0026quot;.   updated_at google.protobuf.Timestamp  updated_at indicates when the measure is updated    \nStream Stream intends to store streaming data, for example, traces or logs\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  metadata is the identity of a trace series   tag_families TagFamilySpec repeated tag_families   entity Entity  entity indicates how to generate a series and shard a stream   updated_at google.protobuf.Timestamp  updated_at indicates when the stream is updated    \nSubject Subject defines which stream or measure would generate indices\n   Field Type Label Description     catalog banyandb.common.v1.Catalog  catalog is where the subject belongs to todo validate plugin exist bug https://github.com/bufbuild/protoc-gen-validate/issues/672   name string  name refers to a stream or measure in a particular catalog    \nTagFamilySpec    Field Type Label Description     name string     tags TagSpec repeated tags defines accepted tags    \nTagSpec    Field Type Label Description     name string     type TagType     indexed_only bool  indexed_only indicates whether the tag is stored True: It's indexed only, but not stored False: it's stored and indexed    \nTopNAggregation TopNAggregation generates offline TopN statistics for a measure's TopN approximation\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  metadata is the identity of an aggregation   source_measure banyandb.common.v1.Metadata  source_measure denotes the data source of this aggregation   field_name string  field_name is the name of field used for ranking   field_value_sort banyandb.model.v1.Sort  field_value_sort indicates how to sort fields ASC: bottomN DESC: topN UNSPECIFIED: topN + bottomN todo validate plugin exist bug https://github.com/bufbuild/protoc-gen-validate/issues/672   group_by_tag_names string repeated group_by_tag_names groups data points into statistical counters   criteria banyandb.model.v1.Criteria  criteria select partial data points from measure   counters_number int32  counters_number sets the number of counters to be tracked. The default value is 1000   lru_size int32  lru_size defines how much entry is allowed to be maintained in the memory   updated_at google.protobuf.Timestamp  updated_at indicates when the measure is updated    \nCompressionMethod    Name Number Description     COMPRESSION_METHOD_UNSPECIFIED 0    COMPRESSION_METHOD_ZSTD 1     \nEncodingMethod    Name Number Description     ENCODING_METHOD_UNSPECIFIED 0    ENCODING_METHOD_GORILLA 1     \nFieldType    Name Number Description     FIELD_TYPE_UNSPECIFIED 0    FIELD_TYPE_STRING 1    FIELD_TYPE_INT 2    FIELD_TYPE_DATA_BINARY 3    FIELD_TYPE_FLOAT 4     \nIndexRule.Analyzer    Name Number Description     ANALYZER_UNSPECIFIED 0    ANALYZER_KEYWORD 1 Keyword analyzer is a “noop” analyzer which returns the entire input string as a single token.   ANALYZER_STANDARD 2 Standard analyzer provides grammar based tokenization   ANALYZER_SIMPLE 3 Simple analyzer breaks text into tokens at any non-letter character, such as numbers, spaces, hyphens and apostrophes, discards non-letter characters, and changes uppercase to lowercase.    \nIndexRule.Location    Name Number Description     LOCATION_UNSPECIFIED 0    LOCATION_SERIES 1    LOCATION_GLOBAL 2     \nIndexRule.Type Type determine the index structure under the hood\n   Name Number Description     TYPE_UNSPECIFIED 0    TYPE_TREE 1    TYPE_INVERTED 2     \nTagType    Name Number Description     TAG_TYPE_UNSPECIFIED 0    TAG_TYPE_STRING 1    TAG_TYPE_INT 2    TAG_TYPE_STRING_ARRAY 3    TAG_TYPE_INT_ARRAY 4    TAG_TYPE_DATA_BINARY 5     \nTop\nbanyandb/database/v1/rpc.proto \nGroupRegistryServiceCreateRequest    Field Type Label Description     group banyandb.common.v1.Group      \nGroupRegistryServiceCreateResponse \nGroupRegistryServiceDeleteRequest    Field Type Label Description     group string      \nGroupRegistryServiceDeleteResponse    Field Type Label Description     deleted bool      \nGroupRegistryServiceExistRequest    Field Type Label Description     group string      \nGroupRegistryServiceExistResponse    Field Type Label Description     has_group bool      \nGroupRegistryServiceGetRequest    Field Type Label Description     group string      \nGroupRegistryServiceGetResponse    Field Type Label Description     group banyandb.common.v1.Group      \nGroupRegistryServiceListRequest \nGroupRegistryServiceListResponse    Field Type Label Description     group banyandb.common.v1.Group repeated     \nGroupRegistryServiceUpdateRequest    Field Type Label Description     group banyandb.common.v1.Group      \nGroupRegistryServiceUpdateResponse \nIndexRuleBindingRegistryServiceCreateRequest    Field Type Label Description     index_rule_binding IndexRuleBinding      \nIndexRuleBindingRegistryServiceCreateResponse \nIndexRuleBindingRegistryServiceDeleteRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nIndexRuleBindingRegistryServiceDeleteResponse    Field Type Label Description     deleted bool      \nIndexRuleBindingRegistryServiceExistRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nIndexRuleBindingRegistryServiceExistResponse    Field Type Label Description     has_group bool     has_index_rule_binding bool      \nIndexRuleBindingRegistryServiceGetRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nIndexRuleBindingRegistryServiceGetResponse    Field Type Label Description     index_rule_binding IndexRuleBinding      \nIndexRuleBindingRegistryServiceListRequest    Field Type Label Description     group string      \nIndexRuleBindingRegistryServiceListResponse    Field Type Label Description     index_rule_binding IndexRuleBinding repeated     \nIndexRuleBindingRegistryServiceUpdateRequest    Field Type Label Description     index_rule_binding IndexRuleBinding      \nIndexRuleBindingRegistryServiceUpdateResponse \nIndexRuleRegistryServiceCreateRequest    Field Type Label Description     index_rule IndexRule      \nIndexRuleRegistryServiceCreateResponse \nIndexRuleRegistryServiceDeleteRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nIndexRuleRegistryServiceDeleteResponse    Field Type Label Description     deleted bool      \nIndexRuleRegistryServiceExistRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nIndexRuleRegistryServiceExistResponse    Field Type Label Description     has_group bool     has_index_rule bool      \nIndexRuleRegistryServiceGetRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nIndexRuleRegistryServiceGetResponse    Field Type Label Description     index_rule IndexRule      \nIndexRuleRegistryServiceListRequest    Field Type Label Description     group string      \nIndexRuleRegistryServiceListResponse    Field Type Label Description     index_rule IndexRule repeated     \nIndexRuleRegistryServiceUpdateRequest    Field Type Label Description     index_rule IndexRule      \nIndexRuleRegistryServiceUpdateResponse \nMeasureRegistryServiceCreateRequest    Field Type Label Description     measure Measure      \nMeasureRegistryServiceCreateResponse    Field Type Label Description     mod_revision int64      \nMeasureRegistryServiceDeleteRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nMeasureRegistryServiceDeleteResponse    Field Type Label Description     deleted bool      \nMeasureRegistryServiceExistRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nMeasureRegistryServiceExistResponse    Field Type Label Description     has_group bool     has_measure bool      \nMeasureRegistryServiceGetRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nMeasureRegistryServiceGetResponse    Field Type Label Description     measure Measure      \nMeasureRegistryServiceListRequest    Field Type Label Description     group string      \nMeasureRegistryServiceListResponse    Field Type Label Description     measure Measure repeated     \nMeasureRegistryServiceUpdateRequest    Field Type Label Description     measure Measure      \nMeasureRegistryServiceUpdateResponse    Field Type Label Description     mod_revision int64      \nStreamRegistryServiceCreateRequest    Field Type Label Description     stream Stream      \nStreamRegistryServiceCreateResponse    Field Type Label Description     mod_revision int64      \nStreamRegistryServiceDeleteRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nStreamRegistryServiceDeleteResponse    Field Type Label Description     deleted bool      \nStreamRegistryServiceExistRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nStreamRegistryServiceExistResponse    Field Type Label Description     has_group bool     has_stream bool      \nStreamRegistryServiceGetRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nStreamRegistryServiceGetResponse    Field Type Label Description     stream Stream      \nStreamRegistryServiceListRequest    Field Type Label Description     group string      \nStreamRegistryServiceListResponse    Field Type Label Description     stream Stream repeated     \nStreamRegistryServiceUpdateRequest    Field Type Label Description     stream Stream      \nStreamRegistryServiceUpdateResponse    Field Type Label Description     mod_revision int64      \nTopNAggregationRegistryServiceCreateRequest    Field Type Label Description     top_n_aggregation TopNAggregation      \nTopNAggregationRegistryServiceCreateResponse \nTopNAggregationRegistryServiceDeleteRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nTopNAggregationRegistryServiceDeleteResponse    Field Type Label Description     deleted bool      \nTopNAggregationRegistryServiceExistRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nTopNAggregationRegistryServiceExistResponse    Field Type Label Description     has_group bool     has_top_n_aggregation bool      \nTopNAggregationRegistryServiceGetRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata      \nTopNAggregationRegistryServiceGetResponse    Field Type Label Description     top_n_aggregation TopNAggregation      \nTopNAggregationRegistryServiceListRequest    Field Type Label Description     group string      \nTopNAggregationRegistryServiceListResponse    Field Type Label Description     top_n_aggregation TopNAggregation repeated     \nTopNAggregationRegistryServiceUpdateRequest    Field Type Label Description     top_n_aggregation TopNAggregation      \nTopNAggregationRegistryServiceUpdateResponse \nGroupRegistryService    Method Name Request Type Response Type Description     Create GroupRegistryServiceCreateRequest GroupRegistryServiceCreateResponse    Update GroupRegistryServiceUpdateRequest GroupRegistryServiceUpdateResponse    Delete GroupRegistryServiceDeleteRequest GroupRegistryServiceDeleteResponse    Get GroupRegistryServiceGetRequest GroupRegistryServiceGetResponse    List GroupRegistryServiceListRequest GroupRegistryServiceListResponse    Exist GroupRegistryServiceExistRequest GroupRegistryServiceExistResponse Exist doesn't expose an HTTP endpoint. Please use HEAD method to touch Get instead    \nIndexRuleBindingRegistryService    Method Name Request Type Response Type Description     Create IndexRuleBindingRegistryServiceCreateRequest IndexRuleBindingRegistryServiceCreateResponse    Update IndexRuleBindingRegistryServiceUpdateRequest IndexRuleBindingRegistryServiceUpdateResponse    Delete IndexRuleBindingRegistryServiceDeleteRequest IndexRuleBindingRegistryServiceDeleteResponse    Get IndexRuleBindingRegistryServiceGetRequest IndexRuleBindingRegistryServiceGetResponse    List IndexRuleBindingRegistryServiceListRequest IndexRuleBindingRegistryServiceListResponse    Exist IndexRuleBindingRegistryServiceExistRequest IndexRuleBindingRegistryServiceExistResponse Exist doesn't expose an HTTP endpoint. Please use HEAD method to touch Get instead    \nIndexRuleRegistryService    Method Name Request Type Response Type Description     Create IndexRuleRegistryServiceCreateRequest IndexRuleRegistryServiceCreateResponse    Update IndexRuleRegistryServiceUpdateRequest IndexRuleRegistryServiceUpdateResponse    Delete IndexRuleRegistryServiceDeleteRequest IndexRuleRegistryServiceDeleteResponse    Get IndexRuleRegistryServiceGetRequest IndexRuleRegistryServiceGetResponse    List IndexRuleRegistryServiceListRequest IndexRuleRegistryServiceListResponse    Exist IndexRuleRegistryServiceExistRequest IndexRuleRegistryServiceExistResponse Exist doesn't expose an HTTP endpoint. Please use HEAD method to touch Get instead    \nMeasureRegistryService    Method Name Request Type Response Type Description     Create MeasureRegistryServiceCreateRequest MeasureRegistryServiceCreateResponse    Update MeasureRegistryServiceUpdateRequest MeasureRegistryServiceUpdateResponse    Delete MeasureRegistryServiceDeleteRequest MeasureRegistryServiceDeleteResponse    Get MeasureRegistryServiceGetRequest MeasureRegistryServiceGetResponse    List MeasureRegistryServiceListRequest MeasureRegistryServiceListResponse    Exist MeasureRegistryServiceExistRequest MeasureRegistryServiceExistResponse Exist doesn't expose an HTTP endpoint. Please use HEAD method to touch Get instead    \nStreamRegistryService    Method Name Request Type Response Type Description     Create StreamRegistryServiceCreateRequest StreamRegistryServiceCreateResponse    Update StreamRegistryServiceUpdateRequest StreamRegistryServiceUpdateResponse    Delete StreamRegistryServiceDeleteRequest StreamRegistryServiceDeleteResponse    Get StreamRegistryServiceGetRequest StreamRegistryServiceGetResponse    List StreamRegistryServiceListRequest StreamRegistryServiceListResponse    Exist StreamRegistryServiceExistRequest StreamRegistryServiceExistResponse Exist doesn't expose an HTTP endpoint. Please use HEAD method to touch Get instead    \nTopNAggregationRegistryService    Method Name Request Type Response Type Description     Create TopNAggregationRegistryServiceCreateRequest TopNAggregationRegistryServiceCreateResponse    Update TopNAggregationRegistryServiceUpdateRequest TopNAggregationRegistryServiceUpdateResponse    Delete TopNAggregationRegistryServiceDeleteRequest TopNAggregationRegistryServiceDeleteResponse    Get TopNAggregationRegistryServiceGetRequest TopNAggregationRegistryServiceGetResponse    List TopNAggregationRegistryServiceListRequest TopNAggregationRegistryServiceListResponse    Exist TopNAggregationRegistryServiceExistRequest TopNAggregationRegistryServiceExistResponse     \nTop\nbanyandb/measure/v1/query.proto \nDataPoint DataPoint is stored in Measures\n   Field Type Label Description     timestamp google.protobuf.Timestamp  timestamp is in the timeunit of milliseconds.   tag_families banyandb.model.v1.TagFamily repeated tag_families contains tags selected in the projection   fields DataPoint.Field repeated fields contains fields selected in the projection    \nDataPoint.Field    Field Type Label Description     name string     value banyandb.model.v1.FieldValue      \nQueryRequest QueryRequest is the request contract for query.\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  metadata is required   time_range banyandb.model.v1.TimeRange  time_range is a range query with begin/end time of entities in the timeunit of milliseconds.   criteria banyandb.model.v1.Criteria  tag_families are indexed.   tag_projection banyandb.model.v1.TagProjection  tag_projection can be used to select tags of the data points in the response   field_projection QueryRequest.FieldProjection  field_projection can be used to select fields of the data points in the response   group_by QueryRequest.GroupBy  group_by groups data points based on their field value for a specific tag and use field_name as the projection name   agg QueryRequest.Aggregation  agg aggregates data points based on a field   top QueryRequest.Top  top limits the result based on a particular field. If order_by is specified, top sorts the dataset based on order_by's output   offset uint32  offset is used to support pagination, together with the following limit. If top is specified, offset processes the dataset based on top's output   limit uint32  limit is used to impose a boundary on the number of records being returned. If top is specified, limit processes the dataset based on top's output   order_by banyandb.model.v1.QueryOrder  order_by is given to specify the sort for a tag.    \nQueryRequest.Aggregation    Field Type Label Description     function banyandb.model.v1.AggregationFunction     field_name string  field_name must be one of files indicated by the field_projection    \nQueryRequest.FieldProjection    Field Type Label Description     names string repeated     \nQueryRequest.GroupBy    Field Type Label Description     tag_projection banyandb.model.v1.TagProjection  tag_projection must be a subset of the tag_projection of QueryRequest   field_name string  field_name must be one of fields indicated by field_projection    \nQueryRequest.Top    Field Type Label Description     number int32  number set the how many items should be returned   field_name string  field_name must be one of files indicated by the field_projection   field_value_sort banyandb.model.v1.Sort  field_value_sort indicates how to sort fields ASC: bottomN DESC: topN UNSPECIFIED: topN    \nQueryResponse QueryResponse is the response for a query to the Query module.\n   Field Type Label Description     data_points DataPoint repeated data_points are the actual data returned    \nTop\nbanyandb/measure/v1/topn.proto \nTopNList TopNList contains a series of topN items\n   Field Type Label Description     timestamp google.protobuf.Timestamp  timestamp is in the timeunit of milliseconds.   items TopNList.Item repeated items contains top-n items in a list    \nTopNList.Item    Field Type Label Description     entity banyandb.model.v1.Tag repeated    value banyandb.model.v1.FieldValue      \nTopNRequest TopNRequest is the request contract for query.\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  metadata is required   time_range banyandb.model.v1.TimeRange  time_range is a range query with begin/end time of entities in the timeunit of milliseconds.   top_n int32  top_n set the how many items should be returned in each list.   agg banyandb.model.v1.AggregationFunction  agg aggregates lists grouped by field names in the time_range TODO validate enum defined_only   conditions banyandb.model.v1.Condition repeated criteria select counters. Only equals are acceptable.   field_value_sort banyandb.model.v1.Sort  field_value_sort indicates how to sort fields    \nTopNResponse TopNResponse is the response for a query to the Query module.\n   Field Type Label Description     lists TopNList repeated lists contain a series topN lists ranked by timestamp if agg_func in query request is specified, lists' size should be one.    \nTop\nbanyandb/model/v1/write.proto \nStatus Status is the response status for write\n   Name Number Description     STATUS_UNSPECIFIED 0    STATUS_SUCCEED 1    STATUS_INVALID_TIMESTAMP 2    STATUS_NOT_FOUND 3    STATUS_EXPIRED_SCHEMA 4    STATUS_INTERNAL_ERROR 5     \nTop\nbanyandb/measure/v1/write.proto \nDataPointValue DataPointValue is the data point for writing. It only contains values.\n   Field Type Label Description     timestamp google.protobuf.Timestamp  timestamp is in the timeunit of milliseconds.   tag_families banyandb.model.v1.TagFamilyForWrite repeated the order of tag_families' items match the measure schema   fields banyandb.model.v1.FieldValue repeated the order of fields match the measure schema    \nInternalWriteRequest    Field Type Label Description     shard_id uint32     series_hash bytes     entity_values banyandb.model.v1.TagValue repeated    request WriteRequest      \nWriteRequest WriteRequest is the request contract for write\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  the metadata is required.   data_point DataPointValue  the data_point is required.   message_id uint64  the message_id is required.    \nWriteResponse WriteResponse is the response contract for write\n   Field Type Label Description     message_id uint64  the message_id from request.   status banyandb.model.v1.Status  status indicates the request processing result   metadata banyandb.common.v1.Metadata  the metadata from request when request fails    \nTop\nbanyandb/measure/v1/rpc.proto \nMeasureService    Method Name Request Type Response Type Description     Query QueryRequest QueryResponse    Write WriteRequest stream WriteResponse stream    TopN TopNRequest TopNResponse     \nTop\nbanyandb/property/v1/property.proto \nMetadata Metadata is for multi-tenant use\n   Field Type Label Description     container banyandb.common.v1.Metadata  container is created when it receives the first property   id string  id identifies a property    \nProperty Property stores the user defined data\n   Field Type Label Description     metadata Metadata  metadata is the identity of a property   tags banyandb.model.v1.Tag repeated tag stores the content of a property   updated_at google.protobuf.Timestamp  updated_at indicates when the property is updated   lease_id int64  readonly. lease_id is the ID of the lease that attached to key.   ttl string  ttl indicates the time to live of the property. It's a string in the format of \u0026quot;1h\u0026quot;, \u0026quot;2m\u0026quot;, \u0026quot;3s\u0026quot;, \u0026quot;1500ms\u0026quot;. It defaults to 0s, which means the property never expires. The minimum allowed ttl is 1s.    \nTop\nbanyandb/property/v1/rpc.proto \nApplyRequest    Field Type Label Description     property Property     strategy ApplyRequest.Strategy  strategy indicates how to update a property. It defaults to STRATEGY_MERGE    \nApplyResponse    Field Type Label Description     created bool  created indicates whether the property existed. True: the property is absent. False: the property existed.   tags_num uint32     lease_id int64      \nDeleteRequest    Field Type Label Description     metadata Metadata     tags string repeated     \nDeleteResponse    Field Type Label Description     deleted bool     tags_num uint32      \nGetRequest    Field Type Label Description     metadata Metadata     tags string repeated     \nGetResponse    Field Type Label Description     property Property      \nKeepAliveRequest    Field Type Label Description     lease_id int64      \nKeepAliveResponse \nListRequest    Field Type Label Description     container banyandb.common.v1.Metadata     ids string repeated    tags string repeated     \nListResponse    Field Type Label Description     property Property repeated     \nApplyRequest.Strategy    Name Number Description     STRATEGY_UNSPECIFIED 0    STRATEGY_MERGE 1    STRATEGY_REPLACE 2     \nPropertyService    Method Name Request Type Response Type Description     Apply ApplyRequest ApplyResponse Apply creates a property if it's absent, or update a existed one based on a strategy.   Delete DeleteRequest DeleteResponse    Get GetRequest GetResponse    List ListRequest ListResponse    KeepAlive KeepAliveRequest KeepAliveResponse     \nTop\nbanyandb/stream/v1/query.proto \nElement Element represents (stream context) a Span defined in Google Dapper paper or equivalently a Segment in Skywalking. (Log context) a log\n   Field Type Label Description     element_id string  element_id could be span_id of a Span or segment_id of a Segment in the context of stream   timestamp google.protobuf.Timestamp  timestamp represents a millisecond 1) either the start time of a Span/Segment, 2) or the timestamp of a log   tag_families banyandb.model.v1.TagFamily repeated fields contains all indexed Field. Some typical names, - stream_id - duration - service_name - service_instance_id - end_time_milliseconds    \nQueryRequest QueryRequest is the request contract for query.\n   Field Type Label Description     metadata banyandb.common.v1.Metadata  metadata is required   time_range banyandb.model.v1.TimeRange  time_range is a range query with begin/end time of entities in the timeunit of milliseconds. In the context of stream, it represents the range of the startTime for spans/segments, while in the context of Log, it means the range of the timestamp(s) for logs. it is always recommended to specify time range for performance reason   offset uint32  offset is used to support pagination, together with the following limit   limit uint32  limit is used to impose a boundary on the number of records being returned   order_by banyandb.model.v1.QueryOrder  order_by is given to specify the sort for a field. So far, only fields in the type of Integer are supported   criteria banyandb.model.v1.Criteria  tag_families are indexed.   projection banyandb.model.v1.TagProjection  projection can be used to select the key names of the element in the response    \nQueryResponse QueryResponse is the response for a query to the Query module.\n   Field Type Label Description     elements Element repeated elements are the actual data returned    \nTop\nbanyandb/stream/v1/write.proto \nElementValue    Field Type Label Description     element_id string  element_id could be span_id of a Span or segment_id of a Segment in the context of stream   timestamp google.protobuf.Timestamp  timestamp is in the timeunit of milliseconds. It represents 1) either the start time of a Span/Segment, 2) or the timestamp of a log   tag_families banyandb.model.v1.TagFamilyForWrite repeated the order of tag_families' items match the stream schema    \nInternalWriteRequest    Field Type Label Description     shard_id uint32     series_hash bytes     entity_values banyandb.model.v1.TagValue repeated    request WriteRequest      \nWriteRequest    Field Type Label Description     metadata banyandb.common.v1.Metadata  the metadata is required.   element ElementValue  the element is required.   message_id uint64  the message_id is required.    \nWriteResponse    Field Type Label Description     message_id uint64  the message_id from request.   status banyandb.model.v1.Status  status indicates the request processing result   metadata banyandb.common.v1.Metadata  the metadata from request when request fails    \nTop\nbanyandb/stream/v1/rpc.proto \nStreamService    Method Name Request Type Response Type Description     Query QueryRequest QueryResponse    Write WriteRequest stream WriteResponse stream     Scalar Value Types    .proto Type Notes C++ Java Python Go C# PHP Ruby     double  double double float float64 double float Float   float  float float float float32 float float Float   int32 Uses variable-length encoding. Inefficient for encoding negative numbers – if your field is likely to have negative values, use sint32 instead. int32 int int int32 int integer Bignum or Fixnum (as required)   int64 Uses variable-length encoding. Inefficient for encoding negative numbers – if your field is likely to have negative values, use sint64 instead. int64 long int/long int64 long integer/string Bignum   uint32 Uses variable-length encoding. uint32 int int/long uint32 uint integer Bignum or Fixnum (as required)   uint64 Uses variable-length encoding. uint64 long int/long uint64 ulong integer/string Bignum or Fixnum (as required)   sint32 Uses variable-length encoding. Signed int value. These more efficiently encode negative numbers than regular int32s. int32 int int int32 int integer Bignum or Fixnum (as required)   sint64 Uses variable-length encoding. Signed int value. These more efficiently encode negative numbers than regular int64s. int64 long int/long int64 long integer/string Bignum   fixed32 Always four bytes. More efficient than uint32 if values are often greater than 2^28. uint32 int int uint32 uint integer Bignum or Fixnum (as required)   fixed64 Always eight bytes. More efficient than uint64 if values are often greater than 2^56. uint64 long int/long uint64 ulong integer/string Bignum   sfixed32 Always four bytes. int32 int int int32 int integer Bignum or Fixnum (as required)   sfixed64 Always eight bytes. int64 long int/long int64 long integer/string Bignum   bool  bool boolean boolean bool bool boolean TrueClass/FalseClass   string A string must always contain UTF-8 encoded or 7-bit ASCII text. string String str/unicode string string string String (UTF-8)   bytes May contain any arbitrary sequence of bytes. string ByteString str []byte ByteString string String (ASCII-8BIT)    ","title":"Protocol Documentation","url":"/docs/skywalking-banyandb/v0.5.0/api-reference/"},{"content":"Pulsar monitoring SkyWalking leverages OpenTelemetry Collector to collect metrics data in Prometheus format from the Pulsar and transfer the metrics to OpenTelemetry receiver and into the Meter System. Kafka entity as a Service in OAP and on the `Layer: PULSAR.\nData flow  Pulsar exposes metrics through Prometheus endpoint. OpenTelemetry Collector fetches metrics from Pulsar cluster via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.`  Setup  Set up Pulsar Cluster. (Pulsar cluster includes pulsar broker cluster and Bookkeeper bookie cluster.) Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  Pulsar Monitoring Pulsar monitoring provides multidimensional metrics monitoring of Pulsar cluster as Layer: PULSAR Service in the OAP. In each cluster, the nodes are represented as Instance.\nPulsar Cluster Supported Metrics    Monitoring Panel Metric Name Description Data Source     Total Topics meter_pulsar_total_topics The number of Pulsar topics in this cluster. Pulsar Cluster   Total Subscriptions meter_pulsar_total_subscriptions The number of Pulsar subscriptions in this cluster. Pulsar Cluster   Total Producers meter_pulsar_total_producers The number of active producers connected to this cluster. Pulsar Cluster   Total Consumers meter_pulsar_total_consumers The number of active consumers connected to this cluster. Pulsar Cluster   Message Rate In meter_pulsar_message_rate_in The total message rate coming into this cluster (message per second). Pulsar Cluster   Message Rate Out meter_pulsar_message_rate_out The total message rate going out from this cluster (message per second). Pulsar Cluster   Throughput In meter_pulsar_throughput_in The total throughput coming into this cluster (byte per second). Pulsar Cluster   Throughput Out meter_pulsar_throughput_out The total throughput going out from this cluster (byte per second). Pulsar Cluster   Storage Size meter_pulsar_storage_size The total storage size of all topics in this broker (in bytes). Pulsar Cluster   Storage Logical Size meter_pulsar_storage_logical_size The storage size of all topics in this broker without replicas (in bytes). Pulsar Cluster   Storage Write Rate meter_pulsar_storage_write_rate The total message batches (entries) written to the storage for this broker (message batch per second). Pulsar Cluster   Storage Read Rate meter_pulsar_storage_read_rate The total message batches (entries) read from the storage for this broker (message batch per second). Pulsar Cluster    Pulsar Node Supported Metrics    Monitoring Panel Metric Name Description Data Source     Active Connections meter_pulsar_broker_active_connections The number of active connections. Pulsar Broker   Total Connections meter_pulsar_broker_total_connections The total number of connections. Pulsar Broker   Connection Create Success Count meter_pulsar_broker_connection_create_success_count The number of successfully created connections. Pulsar Broker   Connection Create Fail Count meter_pulsar_broker_connection_create_fail_count The number of failed connections. Pulsar Broker   Connection Closed Total Count meter_pulsar_broker_connection_closed_total_count The total number of closed connections. Pulsar Broker   JVM Buffer Pool Used meter_pulsar_broker_jvm_buffer_pool_used_bytes The usage of jvm buffer pool. Pulsar Broker   JVM Memory Pool Used meter_pulsar_broker_jvm_memory_pool_used The usage of jvm memory pool. Pulsar Broker   JVM Memory meter_pulsar_broker_jvm_memory_init meter_pulsar_broker_jvm_memory_used meter_pulsar_broker_jvm_memory_committed The usage of jvm memory. Pulsar Broker   JVM Threads meter_pulsar_broker_jvm_threads_current meter_pulsar_broker_jvm_threads_daemon meter_pulsar_broker_jvm_threads_peak meter_pulsar_broker_jvm_threads_deadlocked The usage of jvm threads. Pulsar Broker   GC Time meter_pulsar_broker_jvm_gc_collection_seconds_sum Time spent in a given JVM garbage collector in seconds. Pulsar Broker   GC Count meter_pulsar_broker_jvm_gc_collection_seconds_count The count of a given JVM garbage collector. Pulsar Broker    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in otel-rules/pulsar/pulsar-cluster.yaml, otel-rules/pulsar/pulsar-broker.yaml. The RabbitMQ dashboard panel configurations are found in ui-initialized-templates/pulsar.\n","title":"Pulsar monitoring","url":"/docs/main/latest/en/setup/backend/backend-pulsar-monitoring/"},{"content":"Pulsar monitoring SkyWalking leverages OpenTelemetry Collector to collect metrics data in Prometheus format from the Pulsar and transfer the metrics to OpenTelemetry receiver and into the Meter System. Kafka entity as a Service in OAP and on the `Layer: PULSAR.\nData flow  Pulsar exposes metrics through Prometheus endpoint. OpenTelemetry Collector fetches metrics from Pulsar cluster via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.`  Setup  Set up Pulsar Cluster. (Pulsar cluster includes pulsar broker cluster and Bookkeeper bookie cluster.) Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  Pulsar Monitoring Pulsar monitoring provides multidimensional metrics monitoring of Pulsar cluster as Layer: PULSAR Service in the OAP. In each cluster, the nodes are represented as Instance.\nPulsar Cluster Supported Metrics    Monitoring Panel Metric Name Description Data Source     Total Topics meter_pulsar_total_topics The number of Pulsar topics in this cluster. Pulsar Cluster   Total Subscriptions meter_pulsar_total_subscriptions The number of Pulsar subscriptions in this cluster. Pulsar Cluster   Total Producers meter_pulsar_total_producers The number of active producers connected to this cluster. Pulsar Cluster   Total Consumers meter_pulsar_total_consumers The number of active consumers connected to this cluster. Pulsar Cluster   Message Rate In meter_pulsar_message_rate_in The total message rate coming into this cluster (message per second). Pulsar Cluster   Message Rate Out meter_pulsar_message_rate_out The total message rate going out from this cluster (message per second). Pulsar Cluster   Throughput In meter_pulsar_throughput_in The total throughput coming into this cluster (byte per second). Pulsar Cluster   Throughput Out meter_pulsar_throughput_out The total throughput going out from this cluster (byte per second). Pulsar Cluster   Storage Size meter_pulsar_storage_size The total storage size of all topics in this broker (in bytes). Pulsar Cluster   Storage Logical Size meter_pulsar_storage_logical_size The storage size of all topics in this broker without replicas (in bytes). Pulsar Cluster   Storage Write Rate meter_pulsar_storage_write_rate The total message batches (entries) written to the storage for this broker (message batch per second). Pulsar Cluster   Storage Read Rate meter_pulsar_storage_read_rate The total message batches (entries) read from the storage for this broker (message batch per second). Pulsar Cluster    Pulsar Node Supported Metrics    Monitoring Panel Metric Name Description Data Source     Active Connections meter_pulsar_broker_active_connections The number of active connections. Pulsar Broker   Total Connections meter_pulsar_broker_total_connections The total number of connections. Pulsar Broker   Connection Create Success Count meter_pulsar_broker_connection_create_success_count The number of successfully created connections. Pulsar Broker   Connection Create Fail Count meter_pulsar_broker_connection_create_fail_count The number of failed connections. Pulsar Broker   Connection Closed Total Count meter_pulsar_broker_connection_closed_total_count The total number of closed connections. Pulsar Broker   JVM Buffer Pool Used meter_pulsar_broker_jvm_buffer_pool_used_bytes The usage of jvm buffer pool. Pulsar Broker   JVM Memory Pool Used meter_pulsar_broker_jvm_memory_pool_used The usage of jvm memory pool. Pulsar Broker   JVM Memory meter_pulsar_broker_jvm_memory_init meter_pulsar_broker_jvm_memory_used meter_pulsar_broker_jvm_memory_committed The usage of jvm memory. Pulsar Broker   JVM Threads meter_pulsar_broker_jvm_threads_current meter_pulsar_broker_jvm_threads_daemon meter_pulsar_broker_jvm_threads_peak meter_pulsar_broker_jvm_threads_deadlocked The usage of jvm threads. Pulsar Broker   GC Time meter_pulsar_broker_jvm_gc_collection_seconds_sum Time spent in a given JVM garbage collector in seconds. Pulsar Broker   GC Count meter_pulsar_broker_jvm_gc_collection_seconds_count The count of a given JVM garbage collector. Pulsar Broker    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in otel-rules/pulsar/pulsar-cluster.yaml, otel-rules/pulsar/pulsar-broker.yaml. The Pulsar dashboard panel configurations are found in ui-initialized-templates/pulsar.\n","title":"Pulsar monitoring","url":"/docs/main/next/en/setup/backend/backend-pulsar-monitoring/"},{"content":"Pulsar monitoring SkyWalking leverages OpenTelemetry Collector to collect metrics data in Prometheus format from the Pulsar and transfer the metrics to OpenTelemetry receiver and into the Meter System. Kafka entity as a Service in OAP and on the `Layer: PULSAR.\nData flow  Pulsar exposes metrics through Prometheus endpoint. OpenTelemetry Collector fetches metrics from Pulsar cluster via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.`  Setup  Set up Pulsar Cluster. (Pulsar cluster includes pulsar broker cluster and Bookkeeper bookie cluster.) Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  Pulsar Monitoring Pulsar monitoring provides multidimensional metrics monitoring of Pulsar cluster as Layer: PULSAR Service in the OAP. In each cluster, the nodes are represented as Instance.\nPulsar Cluster Supported Metrics    Monitoring Panel Metric Name Description Data Source     Total Topics meter_pulsar_total_topics The number of Pulsar topics in this cluster. Pulsar Cluster   Total Subscriptions meter_pulsar_total_subscriptions The number of Pulsar subscriptions in this cluster. Pulsar Cluster   Total Producers meter_pulsar_total_producers The number of active producers connected to this cluster. Pulsar Cluster   Total Consumers meter_pulsar_total_consumers The number of active consumers connected to this cluster. Pulsar Cluster   Message Rate In meter_pulsar_message_rate_in The total message rate coming into this cluster (message per second). Pulsar Cluster   Message Rate Out meter_pulsar_message_rate_out The total message rate going out from this cluster (message per second). Pulsar Cluster   Throughput In meter_pulsar_throughput_in The total throughput coming into this cluster (byte per second). Pulsar Cluster   Throughput Out meter_pulsar_throughput_out The total throughput going out from this cluster (byte per second). Pulsar Cluster   Storage Size meter_pulsar_storage_size The total storage size of all topics in this broker (in bytes). Pulsar Cluster   Storage Logical Size meter_pulsar_storage_logical_size The storage size of all topics in this broker without replicas (in bytes). Pulsar Cluster   Storage Write Rate meter_pulsar_storage_write_rate The total message batches (entries) written to the storage for this broker (message batch per second). Pulsar Cluster   Storage Read Rate meter_pulsar_storage_read_rate The total message batches (entries) read from the storage for this broker (message batch per second). Pulsar Cluster    Pulsar Node Supported Metrics    Monitoring Panel Metric Name Description Data Source     Active Connections meter_pulsar_broker_active_connections The number of active connections. Pulsar Broker   Total Connections meter_pulsar_broker_total_connections The total number of connections. Pulsar Broker   Connection Create Success Count meter_pulsar_broker_connection_create_success_count The number of successfully created connections. Pulsar Broker   Connection Create Fail Count meter_pulsar_broker_connection_create_fail_count The number of failed connections. Pulsar Broker   Connection Closed Total Count meter_pulsar_broker_connection_closed_total_count The total number of closed connections. Pulsar Broker   JVM Buffer Pool Used meter_pulsar_broker_jvm_buffer_pool_used_bytes The usage of jvm buffer pool. Pulsar Broker   JVM Memory Pool Used meter_pulsar_broker_jvm_memory_pool_used The usage of jvm memory pool. Pulsar Broker   JVM Memory meter_pulsar_broker_jvm_memory_init meter_pulsar_broker_jvm_memory_used meter_pulsar_broker_jvm_memory_committed The usage of jvm memory. Pulsar Broker   JVM Threads meter_pulsar_broker_jvm_threads_current meter_pulsar_broker_jvm_threads_daemon meter_pulsar_broker_jvm_threads_peak meter_pulsar_broker_jvm_threads_deadlocked The usage of jvm threads. Pulsar Broker   GC Time meter_pulsar_broker_jvm_gc_collection_seconds_sum Time spent in a given JVM garbage collector in seconds. Pulsar Broker   GC Count meter_pulsar_broker_jvm_gc_collection_seconds_count The count of a given JVM garbage collector. Pulsar Broker    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in otel-rules/pulsar/pulsar-cluster.yaml, otel-rules/pulsar/pulsar-broker.yaml. The RabbitMQ dashboard panel configurations are found in ui-initialized-templates/pulsar.\n","title":"Pulsar monitoring","url":"/docs/main/v9.7.0/en/setup/backend/backend-pulsar-monitoring/"},{"content":"Python Agent Asynchronous Enhancement Since 1.1.0, the Python agent supports asynchronous reporting of ALL telemetry data, including traces, metrics, logs and profile. This feature is disabled by default, since it is still in the experimental stage. You can enable it by setting the SW_AGENT_ASYNCIO_ENHANCEMENT environment variable to true. See the configuration document for more information.\nexport SW_AGENT_ASYNCIO_ENHANCEMENT=true Why we need this feature Before version 1.1.0, SkyWalking Python agent had only an implementation with the Threading module to provide data reporters. Yet with the growth of the Python agent, it is now fully capable and requires more resources than when only tracing was supported (we start many threads and gRPC itself creates even more threads when streaming).\nAs well known, the Global Interpreter Lock (GIL) in Python can limit the true parallel execution of threads. This issue also effects the Python agent, especially on network communication with the SkyWalking OAP (gRPC, HTTP and Kafka).\nTherefore, we have decided to implement the reporter code for the SkyWalking Python agent based on the asyncio library. asyncio is an officially supported asynchronous programming library in Python that operates on a single-threaded, coroutine-driven model. Currently, it enjoys widespread adoption and boasts a rich ecosystem, making it the preferred choice for enhancing asynchronous capabilities in many Python projects.\nHow it works To keep the API unchanged, we have completely rewritten a new class called SkyWalkingAgentAsync (identical to the SkyWalkingAgent class). We use the environment variable mentioned above, SW_AGENT_ASYNCIO_ENHANCEMENT, to control which class implements the agent\u0026rsquo;s interface.\nIn the SkyWalkingAgentAsync class, we have employed asyncio coroutines and their related functions to replace the Python threading implementation in nearly all instances. And we have applied asyncio enhancements to all three primary reporting protocols of the current SkyWalking Python agent:\n  gRPC: We use the grpc.aio module to replace the grpc module. Since the grpc.aio module is also officially supported and included in the grpc package, we can use it directly without any additional installation.\n  HTTP: We use the aiohttp module to replace the requests module.\n  Kafka: We use the aiokafka module to replace the kafka-python module.\n  Performance improvement We use wrk to pressure test the network throughput of the Python agents in a FastAPI application.\n gRPC  The performance has been improved by about 32.8%\n   gRPC QPS TPS Avg Latency     sync (original) 899.26 146.66KB 545.97ms   async (new) 1194.55 194.81KB 410.97ms     HTTP  The performance has been improved by about 9.8%\n   HTTP QPS TPS Avg Latency     sync (original) 530.95 86.59KB 1.53s   async (new) 583.37 95.14KB 1.44s     Kafka  The performance has been improved by about 89.6%\n   Kafka QPS TPS Avg Latency     sync (original) 345.89 56.41KB 1.09s   async (new) 655.67 106.93KB 1.24s     In fact, only the performance improvement of gRPC is of more reference value. Because the other two protocols use third-party libraries with completely different implementations, the performance improvement depends to a certain extent on the performance of these third-party libraries.\n More details see this PR .\nPotential problems We have shown that the asynchronous enhancement function improves the transmission efficiency of metrics, traces and logs. But it improves the proformance of profile data very little, and even causes performance degradation.\nThis is mainly because a large part of the data in the profile part comes from the monitoring and measurement of Python threads, which is exactly what we need to avoid in asynchronous enhancement. Since operations on threads cannot be bypassed, we may need additional overhead to support cross-thread coroutine communication, which may lead to performance degradation instead of increase.\nAsynchronous enhancements involve many code changes and introduced some new dependencies. Since this feature is relatively new, it may cause some unexpected errors and problems. If you encounter them, please feel free to contact us or submit issues and PRs!\n","title":"Python Agent Asynchronous Enhancement","url":"/docs/skywalking-python/next/en/setup/advanced/asyncenhancement/"},{"content":"Python Agent Log Reporter This functionality reports logs collected from the Python logging module (in theory, also logging libraries depending on the core logging module) and loguru module.\nFrom Python agent 1.0.0, the log reporter is automatically enabled and can be disabled through agent_log_reporter_active=False or SW_AGENT_LOG_REPORTER_ACTIVE=False.\nLog reporter supports all three protocols including grpc, http and kafka, which shares the same config agent_protocol with trace reporter.\nIf chosen http protocol, the logs will be batch-reported to the collector REST endpoint oap/v3/logs.\nIf chosen kafka protocol, please make sure to config kafka-fetcher on the OAP side, and make sure Python agent config kafka_bootstrap_servers points to your Kafka brokers.\nPlease make sure OAP is consuming the same Kafka topic as your agent produces to, kafka_namespace must match OAP side configuration plugin.kafka.namespace\nagent_log_reporter_active=True - Enables the log reporter.\nagent_log_reporter_max_buffer_size - The maximum queue backlog size for sending log data to backend, logs beyond this are silently dropped.\nAlternatively, you can pass configurations through environment variables. Please refer to the Configuration Vocabulary for the list of environment variables associated with the log reporter.\nSpecify a logging level  [Important] Agent will only report logs that passes the default level threshold logging.getLogger().setLevel(logging.WARNING) For example, if your logger level is logging.INFO, agent will not report info logs even if you set agent_log_reporter_level to INFO\n Additional to the code level configuration, only the logs with a level equal to or higher than the specified configuration will be collected and reported.\nIn other words, the agent skips reporting some unwanted logs based on your level threshold even though they are still logged.\nlog_reporter_level - The string name of a logger level.\nNote that it also works with your custom logger levels, simply specify its string name in the config.\nIgnore log filters The following config is disabled by default. When enabled, the log reporter will collect logs disregarding your custom log filters.\nFor example, if you attach the filter below to the logger - the default behavior of log reporting aligns with the filter (not reporting any logs with a message starting with SW test)\nclass AppFilter(logging.Filter): def filter(self, record): return not record.getMessage().startswith(\u0026#39;SW test\u0026#39;) logger.addFilter(AppFilter()) However, if you do would like to report those filtered logs, set the log_reporter_ignore_filter to True.\nFormatting Note that regardless of the formatting, Python agent will always report the following three tags -\nlevel - the logger level name\nlogger - the logger name\nthread - the thread name\nLimit stacktrace depth You can set the cause_exception_depth config entry to a desired level(defaults to 10), which limits the output depth of exception stacktrace in reporting.\nThis config limits agent to report up to limit stacktrace, please refer to Python traceback for more explanations.\nCustomize the reported log format You can choose to report collected logs in a custom layout.\nIf not set, the agent uses the layout below by default, else the agent uses your custom layout set in log_reporter_layout.\n'%(asctime)s [%(threadName)s] %(levelname)s %(name)s - %(message)s'\nIf the layout is set to None, the reported log content will only contain the pre-formatted LogRecord.message(msg % args) without any additional styles or extra fields, stacktrace will be attached if an exception was raised.\nTransmit un-formatted logs You can also choose to report the log messages without any formatting. It separates the raw log msg logRecord.msg and logRecord.args, then puts them into message content and tags starting from argument.0, respectively, along with an exception tag if an exception was raised.\nNote when you set log_reporter_formatted to False, it ignores your custom layout introduced above.\nAs an example, the following code:\nlogger.info(\u0026#34;SW test log %s%s%s\u0026#34;, \u0026#39;arg0\u0026#39;, \u0026#39;arg1\u0026#39;, \u0026#39;arg2\u0026#39;) Will result in:\n{ \u0026#34;content\u0026#34;: \u0026#34;SW test log %s %s %s\u0026#34;, \u0026#34;tags\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;argument.0\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;arg0\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.1\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;arg1\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.2\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;arg2\u0026#34; } ] } ","title":"Python Agent Log Reporter","url":"/docs/skywalking-python/latest/en/setup/advanced/logreporter/"},{"content":"Python Agent Log Reporter This functionality reports logs collected from the Python logging module (in theory, also logging libraries depending on the core logging module) and loguru module.\nFrom Python agent 1.0.0, the log reporter is automatically enabled and can be disabled through agent_log_reporter_active=False or SW_AGENT_LOG_REPORTER_ACTIVE=False.\nLog reporter supports all three protocols including grpc, http and kafka, which shares the same config agent_protocol with trace reporter.\nIf chosen http protocol, the logs will be batch-reported to the collector REST endpoint oap/v3/logs.\nIf chosen kafka protocol, please make sure to config kafka-fetcher on the OAP side, and make sure Python agent config kafka_bootstrap_servers points to your Kafka brokers.\nPlease make sure OAP is consuming the same Kafka topic as your agent produces to, kafka_namespace must match OAP side configuration plugin.kafka.namespace\nagent_log_reporter_active=True - Enables the log reporter.\nagent_log_reporter_max_buffer_size - The maximum queue backlog size for sending log data to backend, logs beyond this are silently dropped.\nAlternatively, you can pass configurations through environment variables. Please refer to the Configuration Vocabulary for the list of environment variables associated with the log reporter.\nSpecify a logging level  [Important] Agent will only report logs that passes the default level threshold logging.getLogger().setLevel(logging.WARNING) For example, if your logger level is logging.INFO, agent will not report info logs even if you set agent_log_reporter_level to INFO\n Additional to the code level configuration, only the logs with a level equal to or higher than the specified configuration will be collected and reported.\nIn other words, the agent skips reporting some unwanted logs based on your level threshold even though they are still logged.\nlog_reporter_level - The string name of a logger level.\nNote that it also works with your custom logger levels, simply specify its string name in the config.\nIgnore log filters The following config is disabled by default. When enabled, the log reporter will collect logs disregarding your custom log filters.\nFor example, if you attach the filter below to the logger - the default behavior of log reporting aligns with the filter (not reporting any logs with a message starting with SW test)\nclass AppFilter(logging.Filter): def filter(self, record): return not record.getMessage().startswith(\u0026#39;SW test\u0026#39;) logger.addFilter(AppFilter()) However, if you do would like to report those filtered logs, set the log_reporter_ignore_filter to True.\nFormatting Note that regardless of the formatting, Python agent will always report the following three tags -\nlevel - the logger level name\nlogger - the logger name\nthread - the thread name\nLimit stacktrace depth You can set the cause_exception_depth config entry to a desired level(defaults to 10), which limits the output depth of exception stacktrace in reporting.\nThis config limits agent to report up to limit stacktrace, please refer to Python traceback for more explanations.\nCustomize the reported log format You can choose to report collected logs in a custom layout.\nIf not set, the agent uses the layout below by default, else the agent uses your custom layout set in log_reporter_layout.\n'%(asctime)s [%(threadName)s] %(levelname)s %(name)s - %(message)s'\nIf the layout is set to None, the reported log content will only contain the pre-formatted LogRecord.message(msg % args) without any additional styles or extra fields, stacktrace will be attached if an exception was raised.\nTransmit un-formatted logs You can also choose to report the log messages without any formatting. It separates the raw log msg logRecord.msg and logRecord.args, then puts them into message content and tags starting from argument.0, respectively, along with an exception tag if an exception was raised.\nNote when you set log_reporter_formatted to False, it ignores your custom layout introduced above.\nAs an example, the following code:\nlogger.info(\u0026#34;SW test log %s%s%s\u0026#34;, \u0026#39;arg0\u0026#39;, \u0026#39;arg1\u0026#39;, \u0026#39;arg2\u0026#39;) Will result in:\n{ \u0026#34;content\u0026#34;: \u0026#34;SW test log %s %s %s\u0026#34;, \u0026#34;tags\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;argument.0\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;arg0\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.1\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;arg1\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.2\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;arg2\u0026#34; } ] } Print trace ID in your logs To print out the trace IDs in the logs, simply add %(tid)s to the agent_log_reporter_layout.\nYou can take advantage of this feature to print out the trace IDs on any channel you desire, not limited to reporting logs to OAP, this can be achieved by using any formatter you prefer in your own application logic.\n","title":"Python Agent Log Reporter","url":"/docs/skywalking-python/next/en/setup/advanced/logreporter/"},{"content":"Python Agent Log Reporter This functionality reports logs collected from the Python logging module (in theory, also logging libraries depending on the core logging module) and loguru module.\nFrom Python agent 1.0.0, the log reporter is automatically enabled and can be disabled through agent_log_reporter_active=False or SW_AGENT_LOG_REPORTER_ACTIVE=False.\nLog reporter supports all three protocols including grpc, http and kafka, which shares the same config agent_protocol with trace reporter.\nIf chosen http protocol, the logs will be batch-reported to the collector REST endpoint oap/v3/logs.\nIf chosen kafka protocol, please make sure to config kafka-fetcher on the OAP side, and make sure Python agent config kafka_bootstrap_servers points to your Kafka brokers.\nPlease make sure OAP is consuming the same Kafka topic as your agent produces to, kafka_namespace must match OAP side configuration plugin.kafka.namespace\nagent_log_reporter_active=True - Enables the log reporter.\nagent_log_reporter_max_buffer_size - The maximum queue backlog size for sending log data to backend, logs beyond this are silently dropped.\nAlternatively, you can pass configurations through environment variables. Please refer to the Configuration Vocabulary for the list of environment variables associated with the log reporter.\nSpecify a logging level  [Important] Agent will only report logs that passes the default level threshold logging.getLogger().setLevel(logging.WARNING) For example, if your logger level is logging.INFO, agent will not report info logs even if you set agent_log_reporter_level to INFO\n Additional to the code level configuration, only the logs with a level equal to or higher than the specified configuration will be collected and reported.\nIn other words, the agent skips reporting some unwanted logs based on your level threshold even though they are still logged.\nlog_reporter_level - The string name of a logger level.\nNote that it also works with your custom logger levels, simply specify its string name in the config.\nIgnore log filters The following config is disabled by default. When enabled, the log reporter will collect logs disregarding your custom log filters.\nFor example, if you attach the filter below to the logger - the default behavior of log reporting aligns with the filter (not reporting any logs with a message starting with SW test)\nclass AppFilter(logging.Filter): def filter(self, record): return not record.getMessage().startswith(\u0026#39;SW test\u0026#39;) logger.addFilter(AppFilter()) However, if you do would like to report those filtered logs, set the log_reporter_ignore_filter to True.\nFormatting Note that regardless of the formatting, Python agent will always report the following three tags -\nlevel - the logger level name\nlogger - the logger name\nthread - the thread name\nLimit stacktrace depth You can set the cause_exception_depth config entry to a desired level(defaults to 10), which limits the output depth of exception stacktrace in reporting.\nThis config limits agent to report up to limit stacktrace, please refer to Python traceback for more explanations.\nCustomize the reported log format You can choose to report collected logs in a custom layout.\nIf not set, the agent uses the layout below by default, else the agent uses your custom layout set in log_reporter_layout.\n'%(asctime)s [%(threadName)s] %(levelname)s %(name)s - %(message)s'\nIf the layout is set to None, the reported log content will only contain the pre-formatted LogRecord.message(msg % args) without any additional styles or extra fields, stacktrace will be attached if an exception was raised.\nTransmit un-formatted logs You can also choose to report the log messages without any formatting. It separates the raw log msg logRecord.msg and logRecord.args, then puts them into message content and tags starting from argument.0, respectively, along with an exception tag if an exception was raised.\nNote when you set log_reporter_formatted to False, it ignores your custom layout introduced above.\nAs an example, the following code:\nlogger.info(\u0026#34;SW test log %s%s%s\u0026#34;, \u0026#39;arg0\u0026#39;, \u0026#39;arg1\u0026#39;, \u0026#39;arg2\u0026#39;) Will result in:\n{ \u0026#34;content\u0026#34;: \u0026#34;SW test log %s %s %s\u0026#34;, \u0026#34;tags\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;argument.0\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;arg0\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.1\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;arg1\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;argument.2\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;arg2\u0026#34; } ] } ","title":"Python Agent Log Reporter","url":"/docs/skywalking-python/v1.0.1/en/setup/advanced/logreporter/"},{"content":"Python Agent Meter Reporter Important Note: Meter reporter is currently available to send in gRPC and Kafka protocol, HTTP protocol is not implemented yet (requires additional handler on SkyWalking OAP side).\nEnabling the feature (default is enabled) PVM Reporter is also by default enabled, meaning useful Python metrics such as thread count/GC info will be shown in OAP General Services - Instance - PVM Tab) If you really don\u0026rsquo;t need such a feature, disable them through config.agent_pvm_meter_reporter_active or SW_AGENT_PVM_METER_REPORTER_ACTIVE\nconfig.agent_meter_reporter_active = True # Or os.environ[\u0026#39;SW_AGENT_METER_REPORTER_ACTIVE\u0026#39;] = \u0026#39;True\u0026#39; or\nexport SW_AGENT_METER_REPORTER_ACTIVE=True Disable the feature os.environ[\u0026#39;SW_AGENT_METER_REPORTER_ACTIVE\u0026#39;] = \u0026#39;False\u0026#39; or\nexport SW_AGENT_METER_REPORTER_ACTIVE=False Counter  Counter API represents a single monotonically increasing counter, automatic collect data and report to backend.  builder = Counter.Builder(\u0026#39;c2\u0026#39;, CounterMode.INCREMENT, ((\u0026#34;k1\u0026#34;, \u0026#34;v1\u0026#34;), (\u0026#34;k2\u0026#34;, \u0026#34;v2\u0026#34;))) # or this way # builder = Counter.Builder(\u0026#39;c2\u0026#39;, CounterMode.INCREMENT).tag(\u0026#39;key1\u0026#39;, \u0026#39;value1\u0026#39;).tag(\u0026#39;key2\u0026#39;, \u0026#39;value2\u0026#39;) c = builder.build() c.increment(2) Syntactic sugars builder = Counter.Builder(\u0026#39;c2\u0026#39;, CounterMode.INCREMENT) c = builder.build() # increase Counter c by the time the with-wrapped codes consumed with c.create_timer(): # some codes may consume a certain time builder = Counter.Builder(\u0026#39;c3\u0026#39;, CounterMode.INCREMENT) c = builder.build() # increase Counter c by num once counter_decorator_test gets called @Counter.increase(name=\u0026#39;c3\u0026#39;, num=2) def counter_decorator_test(): # some codes builder = Counter.Builder(\u0026#39;c4\u0026#39;, CounterMode.INCREMENT) c = builder.build() # increase Counter c by the time counter_decorator_test consumed @Counter.timer(name=\u0026#39;c4\u0026#39;) def counter_decorator_test(s): # some codes may consume a certain time  Counter.Builder(name, tags) Create a new counter builder with the meter name and optional tags. Counter.tag(key: str, value) Mark a tag key/value pair. Counter.mode(mode: CounterMode) Change the counter mode, RATE mode means reporting rate to the backend. Counter.increment(count) Increment count to the Counter, It could be a positive value.  Gauge  Gauge API represents a single numerical value.  # producer: iterable object builder = Gauge.Builder(\u0026#39;g1\u0026#39;, producer, ((\u0026#34;key\u0026#34;, \u0026#34;value\u0026#34;))) g = Builder.build()  Gauge.Builder(name, tags) Create a new gauge builder with the meter name and iterable object, this iterable object need to produce numeric value. Gauge.tag(key: str, value) Mark a tag key/value pair. Gauge.build() Build a new Gauge which is collected and reported to the backend.  Histogram  Histogram API represents a summary sample observations with customize buckets.  builder = Histogram.Builder(\u0026#39;h2\u0026#39;, [i / 10 for i in range(10)], (\u0026#34;key\u0026#34;, \u0026#34;value\u0026#34;)) h = builder.build() Syntactic sugars builder = Histogram.Builder(\u0026#39;h3\u0026#39;, [i / 10 for i in range(10)]) h = builder.build() # Histogram h will record the time the with-wprapped codes consumed with h.create_timer(): # some codes may consume a certain time builder = Histogram.Builder(\u0026#39;h2\u0026#39;, [i / 10 for i in range(10)]) h = builder.build() # Histogram h will record the time histogram_decorator_test consumed @Histogram.timer(name=\u0026#39;h2\u0026#39;) def histogram_decorator_test(s): time.sleep(s)  Histogram.Builder(name, tags) Create a new histogram builder with the meter name and optional tags. Histogram.tag(key: str, value) Mark a tag key/value pair. Histogram.minValue(value) Set up the minimal value of this histogram, default is 0. Histogram.build() Build a new Histogram which is collected and reported to the backend. Histogram.addValue(value) Add value into the histogram, automatically analyze what bucket count needs to be increment. rule: count into [step1, step2).  ","title":"Python Agent Meter Reporter","url":"/docs/skywalking-python/latest/en/setup/advanced/meterreporter/"},{"content":"Python Agent Meter Reporter Important Note: Meter reporter is currently available to send in gRPC and Kafka protocol, HTTP protocol is not implemented yet (requires additional handler on SkyWalking OAP side).\nEnabling the feature (default is enabled) PVM Reporter is also by default enabled, meaning useful Python metrics such as thread count/GC info will be shown in OAP General Services - Instance - PVM Tab) If you really don\u0026rsquo;t need such a feature, disable them through config.agent_pvm_meter_reporter_active or SW_AGENT_PVM_METER_REPORTER_ACTIVE\nconfig.agent_meter_reporter_active = True # Or os.environ[\u0026#39;SW_AGENT_METER_REPORTER_ACTIVE\u0026#39;] = \u0026#39;True\u0026#39; or\nexport SW_AGENT_METER_REPORTER_ACTIVE=True Disable the feature os.environ[\u0026#39;SW_AGENT_METER_REPORTER_ACTIVE\u0026#39;] = \u0026#39;False\u0026#39; or\nexport SW_AGENT_METER_REPORTER_ACTIVE=False Counter  Counter API represents a single monotonically increasing counter, automatic collect data and report to backend.  builder = Counter.Builder(\u0026#39;c2\u0026#39;, CounterMode.INCREMENT, ((\u0026#34;k1\u0026#34;, \u0026#34;v1\u0026#34;), (\u0026#34;k2\u0026#34;, \u0026#34;v2\u0026#34;))) # or this way # builder = Counter.Builder(\u0026#39;c2\u0026#39;, CounterMode.INCREMENT).tag(\u0026#39;key1\u0026#39;, \u0026#39;value1\u0026#39;).tag(\u0026#39;key2\u0026#39;, \u0026#39;value2\u0026#39;) c = builder.build() c.increment(2) Syntactic sugars builder = Counter.Builder(\u0026#39;c2\u0026#39;, CounterMode.INCREMENT) c = builder.build() # increase Counter c by the time the with-wrapped codes consumed with c.create_timer(): # some codes may consume a certain time builder = Counter.Builder(\u0026#39;c3\u0026#39;, CounterMode.INCREMENT) c = builder.build() # increase Counter c by num once counter_decorator_test gets called @Counter.increase(name=\u0026#39;c3\u0026#39;, num=2) def counter_decorator_test(): # some codes builder = Counter.Builder(\u0026#39;c4\u0026#39;, CounterMode.INCREMENT) c = builder.build() # increase Counter c by the time counter_decorator_test consumed @Counter.timer(name=\u0026#39;c4\u0026#39;) def counter_decorator_test(s): # some codes may consume a certain time  Counter.Builder(name, tags) Create a new counter builder with the meter name and optional tags. Counter.tag(key: str, value) Mark a tag key/value pair. Counter.mode(mode: CounterMode) Change the counter mode, RATE mode means reporting rate to the backend. Counter.increment(count) Increment count to the Counter, It could be a positive value.  Gauge  Gauge API represents a single numerical value.  # producer: iterable object builder = Gauge.Builder(\u0026#39;g1\u0026#39;, producer, ((\u0026#34;key\u0026#34;, \u0026#34;value\u0026#34;))) g = Builder.build()  Gauge.Builder(name, tags) Create a new gauge builder with the meter name and iterable object, this iterable object need to produce numeric value. Gauge.tag(key: str, value) Mark a tag key/value pair. Gauge.build() Build a new Gauge which is collected and reported to the backend.  Histogram  Histogram API represents a summary sample observations with customize buckets.  builder = Histogram.Builder(\u0026#39;h2\u0026#39;, [i / 10 for i in range(10)], (\u0026#34;key\u0026#34;, \u0026#34;value\u0026#34;)) h = builder.build() Syntactic sugars builder = Histogram.Builder(\u0026#39;h3\u0026#39;, [i / 10 for i in range(10)]) h = builder.build() # Histogram h will record the time the with-wprapped codes consumed with h.create_timer(): # some codes may consume a certain time builder = Histogram.Builder(\u0026#39;h2\u0026#39;, [i / 10 for i in range(10)]) h = builder.build() # Histogram h will record the time histogram_decorator_test consumed @Histogram.timer(name=\u0026#39;h2\u0026#39;) def histogram_decorator_test(s): time.sleep(s)  Histogram.Builder(name, tags) Create a new histogram builder with the meter name and optional tags. Histogram.tag(key: str, value) Mark a tag key/value pair. Histogram.minValue(value) Set up the minimal value of this histogram, default is 0. Histogram.build() Build a new Histogram which is collected and reported to the backend. Histogram.addValue(value) Add value into the histogram, automatically analyze what bucket count needs to be increment. rule: count into [step1, step2).  ","title":"Python Agent Meter Reporter","url":"/docs/skywalking-python/next/en/setup/advanced/meterreporter/"},{"content":"Python Agent Meter Reporter Important Note: Meter reporter is currently available to send in gRPC and Kafka protocol, HTTP protocol is not implemented yet (requires additional handler on SkyWalking OAP side).\nEnabling the feature (default is enabled) PVM Reporter is also by default enabled, meaning useful Python metrics such as thread count/GC info will be shown in OAP General Services - Instance - PVM Tab) If you really don\u0026rsquo;t need such a feature, disable them through config.agent_pvm_meter_reporter_active or SW_AGENT_PVM_METER_REPORTER_ACTIVE\nconfig.agent_meter_reporter_active = True # Or os.environ[\u0026#39;SW_AGENT_METER_REPORTER_ACTIVE\u0026#39;] = \u0026#39;True\u0026#39; or\nexport SW_AGENT_METER_REPORTER_ACTIVE=True Disable the feature os.environ[\u0026#39;SW_AGENT_METER_REPORTER_ACTIVE\u0026#39;] = \u0026#39;False\u0026#39; or\nexport SW_AGENT_METER_REPORTER_ACTIVE=False Counter  Counter API represents a single monotonically increasing counter, automatic collect data and report to backend.  builder = Counter.Builder(\u0026#39;c2\u0026#39;, CounterMode.INCREMENT, ((\u0026#34;k1\u0026#34;, \u0026#34;v1\u0026#34;), (\u0026#34;k2\u0026#34;, \u0026#34;v2\u0026#34;))) # or this way # builder = Counter.Builder(\u0026#39;c2\u0026#39;, CounterMode.INCREMENT).tag(\u0026#39;key1\u0026#39;, \u0026#39;value1\u0026#39;).tag(\u0026#39;key2\u0026#39;, \u0026#39;value2\u0026#39;) c = builder.build() c.increment(2) Syntactic sugars builder = Counter.Builder(\u0026#39;c2\u0026#39;, CounterMode.INCREMENT) c = builder.build() # increase Counter c by the time the with-wrapped codes consumed with c.create_timer(): # some codes may consume a certain time builder = Counter.Builder(\u0026#39;c3\u0026#39;, CounterMode.INCREMENT) c = builder.build() # increase Counter c by num once counter_decorator_test gets called @Counter.increase(name=\u0026#39;c3\u0026#39;, num=2) def counter_decorator_test(): # some codes builder = Counter.Builder(\u0026#39;c4\u0026#39;, CounterMode.INCREMENT) c = builder.build() # increase Counter c by the time counter_decorator_test consumed @Counter.timer(name=\u0026#39;c4\u0026#39;) def counter_decorator_test(s): # some codes may consume a certain time  Counter.Builder(name, tags) Create a new counter builder with the meter name and optional tags. Counter.tag(key: str, value) Mark a tag key/value pair. Counter.mode(mode: CounterMode) Change the counter mode, RATE mode means reporting rate to the backend. Counter.increment(count) Increment count to the Counter, It could be a positive value.  Gauge  Gauge API represents a single numerical value.  # producer: iterable object builder = Gauge.Builder(\u0026#39;g1\u0026#39;, producer, ((\u0026#34;key\u0026#34;, \u0026#34;value\u0026#34;))) g = Builder.build()  Gauge.Builder(name, tags) Create a new gauge builder with the meter name and iterable object, this iterable object need to produce numeric value. Gauge.tag(key: str, value) Mark a tag key/value pair. Gauge.build() Build a new Gauge which is collected and reported to the backend.  Histogram  Histogram API represents a summary sample observations with customize buckets.  builder = Histogram.Builder(\u0026#39;h2\u0026#39;, [i / 10 for i in range(10)], (\u0026#34;key\u0026#34;, \u0026#34;value\u0026#34;)) h = builder.build() Syntactic sugars builder = Histogram.Builder(\u0026#39;h3\u0026#39;, [i / 10 for i in range(10)]) h = builder.build() # Histogram h will record the time the with-wprapped codes consumed with h.create_timer(): # some codes may consume a certain time builder = Histogram.Builder(\u0026#39;h2\u0026#39;, [i / 10 for i in range(10)]) h = builder.build() # Histogram h will record the time histogram_decorator_test consumed @Histogram.timer(name=\u0026#39;h2\u0026#39;) def histogram_decorator_test(s): time.sleep(s)  Histogram.Builder(name, tags) Create a new histogram builder with the meter name and optional tags. Histogram.tag(key: str, value) Mark a tag key/value pair. Histogram.minValue(value) Set up the minimal value of this histogram, default is 0. Histogram.build() Build a new Histogram which is collected and reported to the backend. Histogram.addValue(value) Add value into the histogram, automatically analyze what bucket count needs to be increment. rule: count into [step1, step2).  ","title":"Python Agent Meter Reporter","url":"/docs/skywalking-python/v1.0.1/en/setup/advanced/meterreporter/"},{"content":"Query Measures Query operation queries the data in a measure.\nbydbctl is the command line tool in examples.\nThe input contains two parts:\n Request: a YAML-based text which is defined by the API Time Range: YAML and CLI\u0026rsquo;s flags both support it.  Time Range The query specification contains time_range field. The request should set absolute times to it. bydbctl also provides start and end flags to support passing absolute and relative times.\n\u0026ldquo;start\u0026rdquo; and \u0026ldquo;end\u0026rdquo; specify a time range during which the query is performed, they can be an absolute time like \u0026ldquo;2006-01-02T15:04:05Z07:00\u0026rdquo;, or relative time (to the current time) like \u0026ldquo;-30m\u0026rdquo;, or \u0026ldquo;30m\u0026rdquo;. They are both optional and their default values follow the rules below:\n when \u0026ldquo;start\u0026rdquo; and \u0026ldquo;end\u0026rdquo; are both absent, \u0026ldquo;start = now - 30 minutes\u0026rdquo; and \u0026ldquo;end = now\u0026rdquo;, namely past 30 minutes; when \u0026ldquo;start\u0026rdquo; is absent and \u0026ldquo;end\u0026rdquo; is present, this command calculates \u0026ldquo;start\u0026rdquo; (minus 30 units), e.g. \u0026ldquo;end = 2022-11-09T12:34:00Z\u0026rdquo;, so \u0026ldquo;start = end - 30 minutes = 2022-11-09T12:04:00Z\u0026rdquo;; when \u0026ldquo;start\u0026rdquo; is present and \u0026ldquo;end\u0026rdquo; is absent, this command calculates \u0026ldquo;end\u0026rdquo; (plus 30 units), e.g. \u0026ldquo;start = 2022-11-09T12:04:00Z\u0026rdquo;, so \u0026ldquo;end = start + 30 minutes = 2022-11-09T12:34:00Z\u0026rdquo;.  Examples To retrieve a series of data points between 2022-10-15T22:32:48Z and 2022-10-15T23:32:48Z could use the below command. These data points contain tags: id and entity_id that belong to a family default. They also choose fields: total and value.\n$ bydbctl measure query -f - \u0026lt;\u0026lt;EOF metadata: name: \u0026#34;service_cpm_minute\u0026#34; group: \u0026#34;sw_metric\u0026#34; tagProjection: tagFamilies: - name: \u0026#34;default\u0026#34; tags: [\u0026#34;id\u0026#34;, \u0026#34;entity_id\u0026#34;] fieldProjection: names: [\u0026#34;total\u0026#34;, \u0026#34;value\u0026#34;] timeRange: begin: 2022-10-15T22:32:48Z end: 2022-10-15T23:32:48Z EOF The below command could query data in the last 30 minutes using relative time duration :\n$ bydbctl measure query --start -30m -f - \u0026lt;\u0026lt;EOF metadata: name: \u0026#34;service_cpm_minute\u0026#34; group: \u0026#34;sw_metric\u0026#34; tagProjection: tagFamilies: - name: \u0026#34;default\u0026#34; tags: [\u0026#34;id\u0026#34;, \u0026#34;entity_id\u0026#34;] fieldProjection: names: [\u0026#34;total\u0026#34;, \u0026#34;value\u0026#34;] EOF API Reference MeasureService v1\n","title":"Query Measures","url":"/docs/skywalking-banyandb/latest/crud/measure/query/"},{"content":"Query Measures Query operation queries the data in a measure.\nbydbctl is the command line tool in examples.\nThe input contains two parts:\n Request: a YAML-based text which is defined by the API Time Range: YAML and CLI\u0026rsquo;s flags both support it.  Time Range The query specification contains time_range field. The request should set absolute times to it. bydbctl also provides start and end flags to support passing absolute and relative times.\n\u0026ldquo;start\u0026rdquo; and \u0026ldquo;end\u0026rdquo; specify a time range during which the query is performed, they can be an absolute time like \u0026ldquo;2006-01-02T15:04:05Z07:00\u0026rdquo;, or relative time (to the current time) like \u0026ldquo;-30m\u0026rdquo;, or \u0026ldquo;30m\u0026rdquo;. They are both optional and their default values follow the rules below:\n when \u0026ldquo;start\u0026rdquo; and \u0026ldquo;end\u0026rdquo; are both absent, \u0026ldquo;start = now - 30 minutes\u0026rdquo; and \u0026ldquo;end = now\u0026rdquo;, namely past 30 minutes; when \u0026ldquo;start\u0026rdquo; is absent and \u0026ldquo;end\u0026rdquo; is present, this command calculates \u0026ldquo;start\u0026rdquo; (minus 30 units), e.g. \u0026ldquo;end = 2022-11-09T12:34:00Z\u0026rdquo;, so \u0026ldquo;start = end - 30 minutes = 2022-11-09T12:04:00Z\u0026rdquo;; when \u0026ldquo;start\u0026rdquo; is present and \u0026ldquo;end\u0026rdquo; is absent, this command calculates \u0026ldquo;end\u0026rdquo; (plus 30 units), e.g. \u0026ldquo;start = 2022-11-09T12:04:00Z\u0026rdquo;, so \u0026ldquo;end = start + 30 minutes = 2022-11-09T12:34:00Z\u0026rdquo;.  Examples To retrieve a series of data points between 2022-10-15T22:32:48Z and 2022-10-15T23:32:48Z could use the below command. These data points contain tags: id and entity_id that belong to a family default. They also choose fields: total and value.\n$ bydbctl measure query -f - \u0026lt;\u0026lt;EOF metadata: name: \u0026#34;service_cpm_minute\u0026#34; group: \u0026#34;sw_metric\u0026#34; tagProjection: tagFamilies: - name: \u0026#34;default\u0026#34; tags: [\u0026#34;id\u0026#34;, \u0026#34;entity_id\u0026#34;] fieldProjection: names: [\u0026#34;total\u0026#34;, \u0026#34;value\u0026#34;] timeRange: begin: 2022-10-15T22:32:48Z end: 2022-10-15T23:32:48Z EOF The below command could query data in the last 30 minutes using relative time duration :\n$ bydbctl measure query --start -30m -f - \u0026lt;\u0026lt;EOF metadata: name: \u0026#34;service_cpm_minute\u0026#34; group: \u0026#34;sw_metric\u0026#34; tagProjection: tagFamilies: - name: \u0026#34;default\u0026#34; tags: [\u0026#34;id\u0026#34;, \u0026#34;entity_id\u0026#34;] fieldProjection: names: [\u0026#34;total\u0026#34;, \u0026#34;value\u0026#34;] EOF API Reference MeasureService v1\n","title":"Query Measures","url":"/docs/skywalking-banyandb/next/crud/measure/query/"},{"content":"Query Measures Query operation queries the data in a measure.\nbydbctl is the command line tool in examples.\nThe input contains two parts:\n Request: a YAML-based text which is defined by the API Time Range: YAML and CLI\u0026rsquo;s flags both support it.  Time Range The query specification contains time_range field. The request should set absolute times to it. bydbctl also provides start and end flags to support passing absolute and relative times.\n\u0026ldquo;start\u0026rdquo; and \u0026ldquo;end\u0026rdquo; specify a time range during which the query is performed, they can be an absolute time like \u0026ldquo;2006-01-02T15:04:05Z07:00\u0026rdquo;, or relative time (to the current time) like \u0026ldquo;-30m\u0026rdquo;, or \u0026ldquo;30m\u0026rdquo;. They are both optional and their default values follow the rules below:\n when \u0026ldquo;start\u0026rdquo; and \u0026ldquo;end\u0026rdquo; are both absent, \u0026ldquo;start = now - 30 minutes\u0026rdquo; and \u0026ldquo;end = now\u0026rdquo;, namely past 30 minutes; when \u0026ldquo;start\u0026rdquo; is absent and \u0026ldquo;end\u0026rdquo; is present, this command calculates \u0026ldquo;start\u0026rdquo; (minus 30 units), e.g. \u0026ldquo;end = 2022-11-09T12:34:00Z\u0026rdquo;, so \u0026ldquo;start = end - 30 minutes = 2022-11-09T12:04:00Z\u0026rdquo;; when \u0026ldquo;start\u0026rdquo; is present and \u0026ldquo;end\u0026rdquo; is absent, this command calculates \u0026ldquo;end\u0026rdquo; (plus 30 units), e.g. \u0026ldquo;start = 2022-11-09T12:04:00Z\u0026rdquo;, so \u0026ldquo;end = start + 30 minutes = 2022-11-09T12:34:00Z\u0026rdquo;.  Examples To retrieve a series of data points between 2022-10-15T22:32:48Z and 2022-10-15T23:32:48Z could use the below command. These data points contain tags: id and entity_id that belong to a family default. They also choose fields: total and value.\n$ bydbctl measure query -f - \u0026lt;\u0026lt;EOF metadata: name: \u0026#34;service_cpm_minute\u0026#34; group: \u0026#34;sw_metric\u0026#34; tagProjection: tagFamilies: - name: \u0026#34;default\u0026#34; tags: [\u0026#34;id\u0026#34;, \u0026#34;entity_id\u0026#34;] fieldProjection: names: [\u0026#34;total\u0026#34;, \u0026#34;value\u0026#34;] timeRange: begin: 2022-10-15T22:32:48Z end: 2022-10-15T23:32:48Z EOF The below command could query data in the last 30 minutes using relative time duration :\n$ bydbctl measure query --start -30m -f - \u0026lt;\u0026lt;EOF metadata: name: \u0026#34;service_cpm_minute\u0026#34; group: \u0026#34;sw_metric\u0026#34; tagProjection: tagFamilies: - name: \u0026#34;default\u0026#34; tags: [\u0026#34;id\u0026#34;, \u0026#34;entity_id\u0026#34;] fieldProjection: names: [\u0026#34;total\u0026#34;, \u0026#34;value\u0026#34;] EOF API Reference MeasureService v1\n","title":"Query Measures","url":"/docs/skywalking-banyandb/v0.5.0/crud/measure/query/"},{"content":"Query Protocol Query Protocol defines a set of APIs in GraphQL grammar to provide data query and interactive capabilities with SkyWalking native visualization tool or 3rd party system, including Web UI, CLI or private system.\nQuery protocol official repository, https://github.com/apache/skywalking-query-protocol.\nAll deprecated APIs are moved here.\nMetadata Metadata contains concise information on all services and their instances, endpoints, etc. under monitoring. You may query the metadata in different ways.\nextendtypeQuery{# Normal service related meta info getAllServices(duration:Duration!,group:String):[Service!]!searchServices(duration:Duration!,keyword:String!):[Service!]!searchService(serviceCode:String!):Service# Fetch all services of Browser typegetAllBrowserServices(duration:Duration!):[Service!]!searchBrowserServices(duration:Duration!,keyword:String!):[Service!]!searchBrowserService(serviceCode:String!):Service# Service instance querygetServiceInstances(duration:Duration!,serviceId:ID!):[ServiceInstance!]!# Endpoint query# Consider there are huge numbers of endpoint,# must use endpoint owner\u0026#39;s service id, keyword and limit filter to do query.searchEndpoint(keyword:String!,serviceId:ID!,limit:Int!):[Endpoint!]!getEndpointInfo(endpointId:ID!):EndpointInfo# Process query# Read process list.listProcesses(duration:Duration!,instanceId:ID!):[Process!]!# Find process according to given ID. Return null if not existing.getProcess(processId:ID!):Process# Get the number of matched processes through serviceId, labels# Labels: the matched process should contain all labels## The return is not a precise number, the process has its lifecycle, as it reboots and shutdowns with time.# The return number just gives an abstract of the scale of profiling that would be applied.estimateProcessScale(serviceId:ID!,labels:[String!]!):Long!# Database related meta info.getAllDatabases(duration:Duration!):[Database!]!getTimeInfo:TimeInfo}Topology The topology and dependency graphs among services, instances and endpoints. Includes direct relationships or global maps.\nextendtypeQuery{# Query the global topologygetGlobalTopology(duration:Duration!):Topology# Query the topology, based on the given servicegetServiceTopology(serviceId:ID!,duration:Duration!):Topology# Query the topology, based on the given services.# `#getServiceTopology` could be replaced by this.getServicesTopology(serviceIds:[ID!]!,duration:Duration!):Topology# Query the instance topology, based on the given clientServiceId and serverServiceIdgetServiceInstanceTopology(clientServiceId:ID!,serverServiceId:ID!,duration:Duration!):ServiceInstanceTopology# Query the topology, based on the given endpointgetEndpointTopology(endpointId:ID!,duration:Duration!):Topology# v2 of getEndpointTopologygetEndpointDependencies(endpointId:ID!,duration:Duration!):EndpointTopology}Metrics Metrics query targets all objects defined in OAL script and MAL.\nV3 APIs Provide Metrics V3 query APIs since 9.5.0, including metadata and MQE. SkyWalking Metrics Query Expression(MQE) is an extension query mechanism. MQE allows users to do simple query-stage calculation like well known PromQL through GraphQL. The expression\u0026rsquo;s syntax can refer to here.\nextendtypeQuery{# Metrics definition metadata query. Response the metrics type which determines the suitable query methods.typeOfMetrics(name:String!):MetricsType!# Get the list of all available metrics in the current OAP server.# Param, regex, could be used to filter the metrics by name.listMetrics(regex:String):[MetricDefinition!]!execExpression(expression:String!,entity:Entity!,duration:Duration!):ExpressionResult!}typeExpressionResult{type:ExpressionResultType!# When the type == TIME_SERIES_VALUES, the results would be a collection of MQEValues.# In other legal type cases, only one MQEValues is expected in the array.results:[MQEValues!]!# When type == ExpressionResultType.UNKNOWN,# the error message includes the expression resolving errors.error:String}enumExpressionResultType{# Can\u0026#39;t resolve the type of the given expression.UNKNOWN# A single valueSINGLE_VALUE# A collection of time-series values.# The value could have labels or not.TIME_SERIES_VALUES# A collection of aggregated values through metric sort functionSORTED_LIST# A collection of sampled records.# When the original metric type is sampled recordsRECORD_LIST}Logs extendtypeQuery{# Return true if the current storage implementation supports fuzzy query for logs.supportQueryLogsByKeywords:Boolean!queryLogs(condition:LogQueryCondition):Logs# Test the logs and get the results of the LAL output.test(requests:LogTestRequest!):LogTestResponse!}Log implementations vary between different database options. Some search engines like ElasticSearch and OpenSearch can support full log text fuzzy queries, while others do not due to considerations related to performance impact and end user experience.\ntest API serves as the debugging tool for native LAL parsing.\nTrace extendtypeQuery{queryBasicTraces(condition:TraceQueryCondition):TraceBriefqueryTrace(traceId:ID!):Trace}Trace query fetches trace segment lists and spans of given trace IDs.\nAlarm extendtypeQuery{getAlarmTrend(duration:Duration!):AlarmTrend!getAlarm(duration:Duration!,scope:Scope,keyword:String,paging:Pagination!,tags:[AlarmTag]):Alarms}Alarm query identifies alarms and related events.\nEvent extendtypeQuery{queryEvents(condition:EventQueryCondition):Events}Event query fetches the event list based on given sources and time range conditions.\nProfiling SkyWalking offers two types of profiling, in-process and out-process, allowing users to create tasks and check their execution status.\nIn-process profiling extendtypeMutation{# crate new profile taskcreateProfileTask(creationRequest:ProfileTaskCreationRequest):ProfileTaskCreationResult!}extendtypeQuery{# query all task list, order by ProfileTask#startTime descendinggetProfileTaskList(serviceId:ID,endpointName:String):[ProfileTask!]!# query all task logsgetProfileTaskLogs(taskID:String):[ProfileTaskLog!]!# query all task profiled segment listgetProfileTaskSegmentList(taskID:String):[BasicTrace!]!# query profiled segmentgetProfiledSegment(segmentId:String):ProfiledSegment# analyze profiled segment, start and end time use timestamp(millisecond)getProfileAnalyze(segmentId:String!,timeRanges:[ProfileAnalyzeTimeRange!]!):ProfileAnalyzation!}Out-process profiling extendtypeMutation{# create a new eBPF fixed time profiling taskcreateEBPFProfilingFixedTimeTask(request:EBPFProfilingTaskFixedTimeCreationRequest!):EBPFProfilingTaskCreationResult!# create a new eBPF network profiling taskcreateEBPFNetworkProfiling(request:EBPFProfilingNetworkTaskRequest!):EBPFProfilingTaskCreationResult!# keep alive the eBPF profiling taskkeepEBPFNetworkProfiling(taskId:ID!):EBPFNetworkKeepProfilingResult!}extendtypeQuery{# query eBPF profiling data for prepare create taskqueryPrepareCreateEBPFProfilingTaskData(serviceId:ID!):EBPFProfilingTaskPrepare!# query eBPF profiling task listqueryEBPFProfilingTasks(serviceId:ID,serviceInstanceId:ID,targets:[EBPFProfilingTargetType!]):[EBPFProfilingTask!]!# query schedules from profiling taskqueryEBPFProfilingSchedules(taskId:ID!):[EBPFProfilingSchedule!]!# analyze the profiling schedule# aggregateType is \u0026#34;EBPFProfilingAnalyzeAggregateType#COUNT\u0026#34; as default. analysisEBPFProfilingResult(scheduleIdList:[ID!]!,timeRanges:[EBPFProfilingAnalyzeTimeRange!]!,aggregateType:EBPFProfilingAnalyzeAggregateType):EBPFProfilingAnalyzation!}Condition Duration Duration is a widely used parameter type as the APM data is time-related. See the following for more details. Step relates to precision.\n# The Duration defines the start and end time for each query operation.# Fields: `start` and `end`# represents the time span. And each of them matches the step.# ref https://www.ietf.org/rfc/rfc3339.txt# The time formats are# `SECOND` step: yyyy-MM-dd HHmmss# `MINUTE` step: yyyy-MM-dd HHmm# `HOUR` step: yyyy-MM-dd HH# `DAY` step: yyyy-MM-dd# `MONTH` step: yyyy-MM# Field: `step`# represents the accurate time point.# e.g.# if step==HOUR , start=2017-11-08 09, end=2017-11-08 19# then# metrics from the following time points expected# 2017-11-08 9:00 -\u0026gt; 2017-11-08 19:00# there are 11 time points (hours) in the time span.inputDuration{start:String!end:String!step:Step!}enumStep{MONTHDAYHOURMINUTESECOND}","title":"Query Protocol","url":"/docs/main/latest/en/api/query-protocol/"},{"content":"Query Protocol Query Protocol defines a set of APIs in GraphQL grammar to provide data query and interactive capabilities with SkyWalking native visualization tool or 3rd party system, including Web UI, CLI or private system.\nQuery protocol official repository, https://github.com/apache/skywalking-query-protocol.\nAll deprecated APIs are moved here.\nMetadata Metadata contains concise information on all services and their instances, endpoints, etc. under monitoring. You may query the metadata in different ways.\nV2 APIs Provide Metadata V2 query APIs since 9.0.0, including Layer concept.\nextendtypeQuery{# Read all available layers# UI could use this list to determine available dashboards/panels# The available layers would change with time in the runtime, because new service could be detected in any time.# This list should be loaded periodically.listLayers:[String!]!# Read the service list according to layer.listServices(layer:String):[Service!]!# Find service according to given ID. Return null if not existing.getService(serviceId:String!):Service# Search and find service according to given name. Return null if not existing.findService(serviceName:String!):Service# Read service instance list.listInstances(duration:Duration!,serviceId:ID!):[ServiceInstance!]!# Search and find service instance according to given ID. Return null if not existing.getInstance(instanceId:String!):ServiceInstance# Search and find matched endpoints according to given service and keyword(optional)# If no keyword, randomly choose endpoint based on `limit` value.findEndpoint(keyword:String,serviceId:ID!,limit:Int!):[Endpoint!]!getEndpointInfo(endpointId:ID!):EndpointInfo# Read process list.listProcesses(duration:Duration!,instanceId:ID!):[Process!]!# Find process according to given ID. Return null if not existing.getProcess(processId:ID!):Process# Get the number of matched processes through serviceId, labels# Labels: the matched process should contain all labels## The return is not a precise number, the process has its lifecycle, as it reboots and shutdowns with time.# The return number just gives an abstract of the scale of profiling that would be applied.estimateProcessScale(serviceId:ID!,labels:[String!]!):Long!getTimeInfo:TimeInfo}Topology The topology and dependency graphs among services, instances and endpoints. Includes direct relationships or global maps.\nextendtypeQuery{# Query the global topology# When layer is specified, the topology of this layer would be queriedgetGlobalTopology(duration:Duration!,layer:String):Topology# Query the topology, based on the given servicegetServiceTopology(serviceId:ID!,duration:Duration!):Topology# Query the topology, based on the given services.# `#getServiceTopology` could be replaced by this.getServicesTopology(serviceIds:[ID!]!,duration:Duration!):Topology# Query the instance topology, based on the given clientServiceId and serverServiceIdgetServiceInstanceTopology(clientServiceId:ID!,serverServiceId:ID!,duration:Duration!):ServiceInstanceTopology# Query the topology, based on the given endpointgetEndpointTopology(endpointId:ID!,duration:Duration!):Topology# v2 of getEndpointTopologygetEndpointDependencies(endpointId:ID!,duration:Duration!):EndpointTopology# Query the topology, based on the given instancegetProcessTopology(serviceInstanceId:ID!,duration:Duration!):ProcessTopology}Metrics Metrics query targets all objects defined in OAL script and MAL.\nV3 APIs Provide Metrics V3 query APIs since 9.5.0, including metadata and MQE. SkyWalking Metrics Query Expression(MQE) is an extension query mechanism. MQE allows users to do simple query-stage calculation like well known PromQL through GraphQL. The expression\u0026rsquo;s syntax can refer to here.\nextendtypeQuery{# Metrics definition metadata query. Response the metrics type which determines the suitable query methods.typeOfMetrics(name:String!):MetricsType!# Get the list of all available metrics in the current OAP server.# Param, regex, could be used to filter the metrics by name.listMetrics(regex:String):[MetricDefinition!]!execExpression(expression:String!,entity:Entity!,duration:Duration!):ExpressionResult!}typeExpressionResult{type:ExpressionResultType!# When the type == TIME_SERIES_VALUES, the results would be a collection of MQEValues.# In other legal type cases, only one MQEValues is expected in the array.results:[MQEValues!]!# When type == ExpressionResultType.UNKNOWN,# the error message includes the expression resolving errors.error:String}enumExpressionResultType{# Can\u0026#39;t resolve the type of the given expression.UNKNOWN# A single valueSINGLE_VALUE# A collection of time-series values.# The value could have labels or not.TIME_SERIES_VALUES# A collection of aggregated values through metric sort functionSORTED_LIST# A collection of sampled records.# When the original metric type is sampled recordsRECORD_LIST}Logs extendtypeQuery{# Return true if the current storage implementation supports fuzzy query for logs.supportQueryLogsByKeywords:Boolean!queryLogs(condition:LogQueryCondition):Logs# Test the logs and get the results of the LAL output.test(requests:LogTestRequest!):LogTestResponse!# Read the list of searchable keysqueryLogTagAutocompleteKeys(duration:Duration!):[String!]# Search the available value options of the given key.queryLogTagAutocompleteValues(tagKey:String!,duration:Duration!):[String!]}Log implementations vary between different database options. Some search engines like ElasticSearch and OpenSearch can support full log text fuzzy queries, while others do not due to considerations related to performance impact and end user experience.\ntest API serves as the debugging tool for native LAL parsing.\nTrace extendtypeQuery{# Search segment list with given conditionsqueryBasicTraces(condition:TraceQueryCondition):TraceBrief# Read the specific trace ID with given trace IDqueryTrace(traceId:ID!):Trace# Read the list of searchable keysqueryTraceTagAutocompleteKeys(duration:Duration!):[String!]# Search the available value options of the given key.queryTraceTagAutocompleteValues(tagKey:String!,duration:Duration!):[String!]}Trace query fetches trace segment lists and spans of given trace IDs.\nAlarm extendtypeQuery{getAlarmTrend(duration:Duration!):AlarmTrend!getAlarm(duration:Duration!,scope:Scope,keyword:String,paging:Pagination!,tags:[AlarmTag]):Alarms}Alarm query identifies alarms and related events.\nEvent extendtypeQuery{queryEvents(condition:EventQueryCondition):Events}Event query fetches the event list based on given sources and time range conditions.\nProfiling SkyWalking offers two types of profiling, in-process and out-process, allowing users to create tasks and check their execution status.\nIn-process profiling extendtypeMutation{# crate new profile taskcreateProfileTask(creationRequest:ProfileTaskCreationRequest):ProfileTaskCreationResult!}extendtypeQuery{# query all task list, order by ProfileTask#startTime descendinggetProfileTaskList(serviceId:ID,endpointName:String):[ProfileTask!]!# query all task logsgetProfileTaskLogs(taskID:String):[ProfileTaskLog!]!# query all task profiled segment listgetProfileTaskSegments(taskID:ID!):[ProfiledTraceSegments!]!# analyze multiple profiled segments, start and end time use timestamp(millisecond)getSegmentsProfileAnalyze(queries:[SegmentProfileAnalyzeQuery!]!):ProfileAnalyzation!}Out-process profiling extendtypeMutation{# create a new eBPF fixed time profiling taskcreateEBPFProfilingFixedTimeTask(request:EBPFProfilingTaskFixedTimeCreationRequest!):EBPFProfilingTaskCreationResult!# create a new eBPF network profiling taskcreateEBPFNetworkProfiling(request:EBPFProfilingNetworkTaskRequest!):EBPFProfilingTaskCreationResult!# keep alive the eBPF profiling taskkeepEBPFNetworkProfiling(taskId:ID!):EBPFNetworkKeepProfilingResult!}extendtypeQuery{# query eBPF profiling data for prepare create taskqueryPrepareCreateEBPFProfilingTaskData(serviceId:ID!):EBPFProfilingTaskPrepare!# query eBPF profiling task list# query `triggerType == FIXED_TIME` when triggerType is absentqueryEBPFProfilingTasks(serviceId:ID,serviceInstanceId:ID,targets:[EBPFProfilingTargetType!],triggerType:EBPFProfilingTriggerType,duration:Duration):[EBPFProfilingTask!]!# query schedules from profiling taskqueryEBPFProfilingSchedules(taskId:ID!):[EBPFProfilingSchedule!]!# analyze the profiling schedule# aggregateType is \u0026#34;EBPFProfilingAnalyzeAggregateType#COUNT\u0026#34; as default. analysisEBPFProfilingResult(scheduleIdList:[ID!]!,timeRanges:[EBPFProfilingAnalyzeTimeRange!]!,aggregateType:EBPFProfilingAnalyzeAggregateType):EBPFProfilingAnalyzation!}On-Demand Pod Logs Provide APIs to query on-demand pod logs since 9.1.0.\nextendtypeQuery{listContainers(condition:OndemandContainergQueryCondition):PodContainersondemandPodLogs(condition:OndemandLogQueryCondition):Logs}Hierarchy Provide Hierarchy query APIs since 10.0.0, including service and instance hierarchy.\nextendtypeQuery{# Query the service hierarchy, based on the given service. Will recursively return all related layers services in the hierarchy.getServiceHierarchy(serviceId:ID!,layer:String!):ServiceHierarchy!# Query the instance hierarchy, based on the given instance. Will return all direct related layers instances in the hierarchy, no recursive.getInstanceHierarchy(instanceId:ID!,layer:String!):InstanceHierarchy!# List layer hierarchy levels. The layer levels are defined in the `hierarchy-definition.yml`.listLayerLevels:[LayerLevel!]!}Condition Duration Duration is a widely used parameter type as the APM data is time-related. See the following for more details. Step relates to precision.\n# The Duration defines the start and end time for each query operation.# Fields: `start` and `end`# represents the time span. And each of them matches the step.# ref https://www.ietf.org/rfc/rfc3339.txt# The time formats are# `SECOND` step: yyyy-MM-dd HHmmss# `MINUTE` step: yyyy-MM-dd HHmm# `HOUR` step: yyyy-MM-dd HH# `DAY` step: yyyy-MM-dd# `MONTH` step: yyyy-MM# Field: `step`# represents the accurate time point.# e.g.# if step==HOUR , start=2017-11-08 09, end=2017-11-08 19# then# metrics from the following time points expected# 2017-11-08 9:00 -\u0026gt; 2017-11-08 19:00# there are 11 time points (hours) in the time span.inputDuration{start:String!end:String!step:Step!}enumStep{MONTHDAYHOURMINUTESECOND}","title":"Query Protocol","url":"/docs/main/next/en/api/query-protocol/"},{"content":"Query Protocol Query Protocol defines a set of APIs in GraphQL grammar to provide data query and interactive capabilities with SkyWalking native visualization tool or 3rd party system, including Web UI, CLI or private system.\nQuery protocol official repository, https://github.com/apache/skywalking-query-protocol.\nMetadata Metadata contains concise information on all services and their instances, endpoints, etc. under monitoring. You may query the metadata in different ways.\nextendtypeQuery{# Normal service related meta info getAllServices(duration:Duration!,group:String):[Service!]!searchServices(duration:Duration!,keyword:String!):[Service!]!searchService(serviceCode:String!):Service# Fetch all services of Browser typegetAllBrowserServices(duration:Duration!):[Service!]!searchBrowserServices(duration:Duration!,keyword:String!):[Service!]!searchBrowserService(serviceCode:String!):Service# Service instance querygetServiceInstances(duration:Duration!,serviceId:ID!):[ServiceInstance!]!# Endpoint query# Consider there are huge numbers of endpoint,# must use endpoint owner\u0026#39;s service id, keyword and limit filter to do query.searchEndpoint(keyword:String!,serviceId:ID!,limit:Int!):[Endpoint!]!getEndpointInfo(endpointId:ID!):EndpointInfo# Database related meta info.getAllDatabases(duration:Duration!):[Database!]!getTimeInfo:TimeInfo}Topology The topology and dependency graphs among services, instances and endpoints. Includes direct relationships or global maps.\nextendtypeQuery{# Query the global topologygetGlobalTopology(duration:Duration!):Topology# Query the topology, based on the given servicegetServiceTopology(serviceId:ID!,duration:Duration!):Topology# Query the topology, based on the given services.# `#getServiceTopology` could be replaced by this.getServicesTopology(serviceIds:[ID!]!,duration:Duration!):Topology# Query the instance topology, based on the given clientServiceId and serverServiceIdgetServiceInstanceTopology(clientServiceId:ID!,serverServiceId:ID!,duration:Duration!):ServiceInstanceTopology# Query the topology, based on the given endpointgetEndpointTopology(endpointId:ID!,duration:Duration!):Topology# v2 of getEndpointTopologygetEndpointDependencies(endpointId:ID!,duration:Duration!):EndpointTopology}Metrics Metrics query targets all objects defined in OAL script and MAL. You may obtain the metrics data in linear or thermodynamic matrix formats based on the aggregation functions in script.\nV2 APIs Provide Metrics V2 query APIs since 8.0.0, including metadata, single/multiple values, heatmap, and sampled records metrics.\nextendtypeQuery{# Metrics definition metadata query. Response the metrics type which determines the suitable query methods.typeOfMetrics(name:String!):MetricsType!# Get the list of all available metrics in the current OAP server.# Param, regex, could be used to filter the metrics by name.listMetrics(regex:String):[MetricDefinition!]!# Read metrics single value in the duration of required metricsreadMetricsValue(condition:MetricsCondition!,duration:Duration!):Long!# Read time-series values in the duration of required metricsreadMetricsValues(condition:MetricsCondition!,duration:Duration!):MetricsValues!# Read entity list of required metrics and parent entity type.sortMetrics(condition:TopNCondition!,duration:Duration!):[SelectedRecord!]!# Read value in the given time duration, usually as a linear.# labels: the labels you need to query.readLabeledMetricsValues(condition:MetricsCondition!,labels:[String!]!,duration:Duration!):[MetricsValues!]!# Heatmap is bucket based value statistic result.readHeatMap(condition:MetricsCondition!,duration:Duration!):HeatMap# Read the sampled records# TopNCondition#scope is not required.readSampledRecords(condition:TopNCondition!,duration:Duration!):[SelectedRecord!]!}V1 APIs 3 types of metrics can be queried. V1 APIs were introduced since 6.x. Now they are a shell to V2 APIs.\n Single value. Most default metrics are in single value. getValues and getLinearIntValues are suitable for this purpose. Multiple value. A metric defined in OAL includes multiple value calculations. Use getMultipleLinearIntValues to obtain all values. percentile is a typical multiple value function in OAL. Heatmap value. Read Heatmap in WIKI for details. thermodynamic is the only OAL function. Use getThermodynamic to get the values.  extendtypeQuery{getValues(metric:BatchMetricConditions!,duration:Duration!):IntValuesgetLinearIntValues(metric:MetricCondition!,duration:Duration!):IntValues# Query the type of metrics including multiple values, and format them as multiple linears.# The seq of these multiple lines base on the calculation func in OAL# Such as, should us this to query the result of func percentile(50,75,90,95,99) in OAL,# then five lines will be responsed, p50 is the first element of return value.getMultipleLinearIntValues(metric:MetricCondition!,numOfLinear:Int!,duration:Duration!):[IntValues!]!getThermodynamic(metric:MetricCondition!,duration:Duration!):Thermodynamic}Metrics are defined in the config/oal/*.oal files.\nAggregation Aggregation query means that the metrics data need a secondary aggregation at query stage, which causes the query interfaces to have some different arguments. A typical example of aggregation query is the TopN list of services. Metrics stream aggregation simply calculates the metrics values of each service, but the expected list requires ordering metrics data by their values.\nAggregation query is for single value metrics only.\n# The aggregation query is different with the metric query.# All aggregation queries require backend or/and storage do aggregation in query time.extendtypeQuery{# TopN is an aggregation query.getServiceTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getAllServiceInstanceTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getServiceInstanceTopN(serviceId:ID!,name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getAllEndpointTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getEndpointTopN(serviceId:ID!,name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!}Logs extendtypeQuery{# Return true if the current storage implementation supports fuzzy query for logs.supportQueryLogsByKeywords:Boolean!queryLogs(condition:LogQueryCondition):Logs# Test the logs and get the results of the LAL output.test(requests:LogTestRequest!):LogTestResponse!}Log implementations vary between different database options. Some search engines like ElasticSearch and OpenSearch can support full log text fuzzy queries, while others do not due to considerations related to performance impact and end user experience.\ntest API serves as the debugging tool for native LAL parsing.\nTrace extendtypeQuery{queryBasicTraces(condition:TraceQueryCondition):TraceBriefqueryTrace(traceId:ID!):Trace}Trace query fetches trace segment lists and spans of given trace IDs.\nAlarm extendtypeQuery{getAlarmTrend(duration:Duration!):AlarmTrend!getAlarm(duration:Duration!,scope:Scope,keyword:String,paging:Pagination!,tags:[AlarmTag]):Alarms}Alarm query identifies alarms and related events.\nEvent extendtypeQuery{queryEvents(condition:EventQueryCondition):Events}Event query fetches the event list based on given sources and time range conditions.\nCondition Duration Duration is a widely used parameter type as the APM data is time-related. See the following for more details. Step relates to precision.\n# The Duration defines the start and end time for each query operation.# Fields: `start` and `end`# represents the time span. And each of them matches the step.# ref https://www.ietf.org/rfc/rfc3339.txt# The time formats are# `SECOND` step: yyyy-MM-dd HHmmss# `MINUTE` step: yyyy-MM-dd HHmm# `HOUR` step: yyyy-MM-dd HH# `DAY` step: yyyy-MM-dd# `MONTH` step: yyyy-MM# Field: `step`# represents the accurate time point.# e.g.# if step==HOUR , start=2017-11-08 09, end=2017-11-08 19# then# metrics from the following time points expected# 2017-11-08 9:00 -\u0026gt; 2017-11-08 19:00# there are 11 time points (hours) in the time span.inputDuration{start:String!end:String!step:Step!}enumStep{MONTHDAYHOURMINUTESECOND}","title":"Query Protocol","url":"/docs/main/v9.0.0/en/protocols/query-protocol/"},{"content":"Query Protocol Query Protocol defines a set of APIs in GraphQL grammar to provide data query and interactive capabilities with SkyWalking native visualization tool or 3rd party system, including Web UI, CLI or private system.\nQuery protocol official repository, https://github.com/apache/skywalking-query-protocol.\nMetadata Metadata contains concise information on all services and their instances, endpoints, etc. under monitoring. You may query the metadata in different ways.\nextendtypeQuery{# Normal service related meta info getAllServices(duration:Duration!,group:String):[Service!]!searchServices(duration:Duration!,keyword:String!):[Service!]!searchService(serviceCode:String!):Service# Fetch all services of Browser typegetAllBrowserServices(duration:Duration!):[Service!]!searchBrowserServices(duration:Duration!,keyword:String!):[Service!]!searchBrowserService(serviceCode:String!):Service# Service instance querygetServiceInstances(duration:Duration!,serviceId:ID!):[ServiceInstance!]!# Endpoint query# Consider there are huge numbers of endpoint,# must use endpoint owner\u0026#39;s service id, keyword and limit filter to do query.searchEndpoint(keyword:String!,serviceId:ID!,limit:Int!):[Endpoint!]!getEndpointInfo(endpointId:ID!):EndpointInfo# Database related meta info.getAllDatabases(duration:Duration!):[Database!]!getTimeInfo:TimeInfo}Topology The topology and dependency graphs among services, instances and endpoints. Includes direct relationships or global maps.\nextendtypeQuery{# Query the global topologygetGlobalTopology(duration:Duration!):Topology# Query the topology, based on the given servicegetServiceTopology(serviceId:ID!,duration:Duration!):Topology# Query the topology, based on the given services.# `#getServiceTopology` could be replaced by this.getServicesTopology(serviceIds:[ID!]!,duration:Duration!):Topology# Query the instance topology, based on the given clientServiceId and serverServiceIdgetServiceInstanceTopology(clientServiceId:ID!,serverServiceId:ID!,duration:Duration!):ServiceInstanceTopology# Query the topology, based on the given endpointgetEndpointTopology(endpointId:ID!,duration:Duration!):Topology# v2 of getEndpointTopologygetEndpointDependencies(endpointId:ID!,duration:Duration!):EndpointTopology}Metrics Metrics query targets all objects defined in OAL script and MAL. You may obtain the metrics data in linear or thermodynamic matrix formats based on the aggregation functions in script.\nV2 APIs Provide Metrics V2 query APIs since 8.0.0, including metadata, single/multiple values, heatmap, and sampled records metrics.\nextendtypeQuery{# Metrics definition metadata query. Response the metrics type which determines the suitable query methods.typeOfMetrics(name:String!):MetricsType!# Get the list of all available metrics in the current OAP server.# Param, regex, could be used to filter the metrics by name.listMetrics(regex:String):[MetricDefinition!]!# Read metrics single value in the duration of required metricsreadMetricsValue(condition:MetricsCondition!,duration:Duration!):Long!# Read time-series values in the duration of required metricsreadMetricsValues(condition:MetricsCondition!,duration:Duration!):MetricsValues!# Read entity list of required metrics and parent entity type.sortMetrics(condition:TopNCondition!,duration:Duration!):[SelectedRecord!]!# Read value in the given time duration, usually as a linear.# labels: the labels you need to query.readLabeledMetricsValues(condition:MetricsCondition!,labels:[String!]!,duration:Duration!):[MetricsValues!]!# Heatmap is bucket based value statistic result.readHeatMap(condition:MetricsCondition!,duration:Duration!):HeatMap# Read the sampled records# TopNCondition#scope is not required.readSampledRecords(condition:TopNCondition!,duration:Duration!):[SelectedRecord!]!}V1 APIs 3 types of metrics can be queried. V1 APIs were introduced since 6.x. Now they are a shell to V2 APIs.\n Single value. Most default metrics are in single value. getValues and getLinearIntValues are suitable for this purpose. Multiple value. A metric defined in OAL includes multiple value calculations. Use getMultipleLinearIntValues to obtain all values. percentile is a typical multiple value function in OAL. Heatmap value. Read Heatmap in WIKI for details. thermodynamic is the only OAL function. Use getThermodynamic to get the values.  extendtypeQuery{getValues(metric:BatchMetricConditions!,duration:Duration!):IntValuesgetLinearIntValues(metric:MetricCondition!,duration:Duration!):IntValues# Query the type of metrics including multiple values, and format them as multiple linears.# The seq of these multiple lines base on the calculation func in OAL# Such as, should us this to query the result of func percentile(50,75,90,95,99) in OAL,# then five lines will be responsed, p50 is the first element of return value.getMultipleLinearIntValues(metric:MetricCondition!,numOfLinear:Int!,duration:Duration!):[IntValues!]!getThermodynamic(metric:MetricCondition!,duration:Duration!):Thermodynamic}Metrics are defined in the config/oal/*.oal files.\nAggregation Aggregation query means that the metrics data need a secondary aggregation at query stage, which causes the query interfaces to have some different arguments. A typical example of aggregation query is the TopN list of services. Metrics stream aggregation simply calculates the metrics values of each service, but the expected list requires ordering metrics data by their values.\nAggregation query is for single value metrics only.\n# The aggregation query is different with the metric query.# All aggregation queries require backend or/and storage do aggregation in query time.extendtypeQuery{# TopN is an aggregation query.getServiceTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getAllServiceInstanceTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getServiceInstanceTopN(serviceId:ID!,name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getAllEndpointTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getEndpointTopN(serviceId:ID!,name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!}Logs extendtypeQuery{# Return true if the current storage implementation supports fuzzy query for logs.supportQueryLogsByKeywords:Boolean!queryLogs(condition:LogQueryCondition):Logs# Test the logs and get the results of the LAL output.test(requests:LogTestRequest!):LogTestResponse!}Log implementations vary between different database options. Some search engines like ElasticSearch and OpenSearch can support full log text fuzzy queries, while others do not due to considerations related to performance impact and end user experience.\ntest API serves as the debugging tool for native LAL parsing.\nTrace extendtypeQuery{queryBasicTraces(condition:TraceQueryCondition):TraceBriefqueryTrace(traceId:ID!):Trace}Trace query fetches trace segment lists and spans of given trace IDs.\nAlarm extendtypeQuery{getAlarmTrend(duration:Duration!):AlarmTrend!getAlarm(duration:Duration!,scope:Scope,keyword:String,paging:Pagination!,tags:[AlarmTag]):Alarms}Alarm query identifies alarms and related events.\nEvent extendtypeQuery{queryEvents(condition:EventQueryCondition):Events}Event query fetches the event list based on given sources and time range conditions.\nCondition Duration Duration is a widely used parameter type as the APM data is time-related. See the following for more details. Step relates to precision.\n# The Duration defines the start and end time for each query operation.# Fields: `start` and `end`# represents the time span. And each of them matches the step.# ref https://www.ietf.org/rfc/rfc3339.txt# The time formats are# `SECOND` step: yyyy-MM-dd HHmmss# `MINUTE` step: yyyy-MM-dd HHmm# `HOUR` step: yyyy-MM-dd HH# `DAY` step: yyyy-MM-dd# `MONTH` step: yyyy-MM# Field: `step`# represents the accurate time point.# e.g.# if step==HOUR , start=2017-11-08 09, end=2017-11-08 19# then# metrics from the following time points expected# 2017-11-08 9:00 -\u0026gt; 2017-11-08 19:00# there are 11 time points (hours) in the time span.inputDuration{start:String!end:String!step:Step!}enumStep{MONTHDAYHOURMINUTESECOND}","title":"Query Protocol","url":"/docs/main/v9.1.0/en/protocols/query-protocol/"},{"content":"Query Protocol Query Protocol defines a set of APIs in GraphQL grammar to provide data query and interactive capabilities with SkyWalking native visualization tool or 3rd party system, including Web UI, CLI or private system.\nQuery protocol official repository, https://github.com/apache/skywalking-query-protocol.\nMetadata Metadata contains concise information on all services and their instances, endpoints, etc. under monitoring. You may query the metadata in different ways.\nextendtypeQuery{# Normal service related meta info getAllServices(duration:Duration!,group:String):[Service!]!searchServices(duration:Duration!,keyword:String!):[Service!]!searchService(serviceCode:String!):Service# Fetch all services of Browser typegetAllBrowserServices(duration:Duration!):[Service!]!searchBrowserServices(duration:Duration!,keyword:String!):[Service!]!searchBrowserService(serviceCode:String!):Service# Service instance querygetServiceInstances(duration:Duration!,serviceId:ID!):[ServiceInstance!]!# Endpoint query# Consider there are huge numbers of endpoint,# must use endpoint owner\u0026#39;s service id, keyword and limit filter to do query.searchEndpoint(keyword:String!,serviceId:ID!,limit:Int!):[Endpoint!]!getEndpointInfo(endpointId:ID!):EndpointInfo# Database related meta info.getAllDatabases(duration:Duration!):[Database!]!getTimeInfo:TimeInfo}Topology The topology and dependency graphs among services, instances and endpoints. Includes direct relationships or global maps.\nextendtypeQuery{# Query the global topologygetGlobalTopology(duration:Duration!):Topology# Query the topology, based on the given servicegetServiceTopology(serviceId:ID!,duration:Duration!):Topology# Query the topology, based on the given services.# `#getServiceTopology` could be replaced by this.getServicesTopology(serviceIds:[ID!]!,duration:Duration!):Topology# Query the instance topology, based on the given clientServiceId and serverServiceIdgetServiceInstanceTopology(clientServiceId:ID!,serverServiceId:ID!,duration:Duration!):ServiceInstanceTopology# Query the topology, based on the given endpointgetEndpointTopology(endpointId:ID!,duration:Duration!):Topology# v2 of getEndpointTopologygetEndpointDependencies(endpointId:ID!,duration:Duration!):EndpointTopology}Metrics Metrics query targets all objects defined in OAL script and MAL. You may obtain the metrics data in linear or thermodynamic matrix formats based on the aggregation functions in script.\nV2 APIs Provide Metrics V2 query APIs since 8.0.0, including metadata, single/multiple values, heatmap, and sampled records metrics.\nextendtypeQuery{# Metrics definition metadata query. Response the metrics type which determines the suitable query methods.typeOfMetrics(name:String!):MetricsType!# Get the list of all available metrics in the current OAP server.# Param, regex, could be used to filter the metrics by name.listMetrics(regex:String):[MetricDefinition!]!# Read metrics single value in the duration of required metricsreadMetricsValue(condition:MetricsCondition!,duration:Duration!):Long!# Read time-series values in the duration of required metricsreadMetricsValues(condition:MetricsCondition!,duration:Duration!):MetricsValues!# Read entity list of required metrics and parent entity type.sortMetrics(condition:TopNCondition!,duration:Duration!):[SelectedRecord!]!# Read value in the given time duration, usually as a linear.# labels: the labels you need to query.readLabeledMetricsValues(condition:MetricsCondition!,labels:[String!]!,duration:Duration!):[MetricsValues!]!# Heatmap is bucket based value statistic result.readHeatMap(condition:MetricsCondition!,duration:Duration!):HeatMap# Read the sampled records# TopNCondition#scope is not required.readSampledRecords(condition:TopNCondition!,duration:Duration!):[SelectedRecord!]!}V1 APIs 3 types of metrics can be queried. V1 APIs were introduced since 6.x. Now they are a shell to V2 APIs.\n Single value. Most default metrics are in single value. getValues and getLinearIntValues are suitable for this purpose. Multiple value. A metric defined in OAL includes multiple value calculations. Use getMultipleLinearIntValues to obtain all values. percentile is a typical multiple value function in OAL. Heatmap value. Read Heatmap in WIKI for details. thermodynamic is the only OAL function. Use getThermodynamic to get the values.  extendtypeQuery{getValues(metric:BatchMetricConditions!,duration:Duration!):IntValuesgetLinearIntValues(metric:MetricCondition!,duration:Duration!):IntValues# Query the type of metrics including multiple values, and format them as multiple linears.# The seq of these multiple lines base on the calculation func in OAL# Such as, should us this to query the result of func percentile(50,75,90,95,99) in OAL,# then five lines will be responsed, p50 is the first element of return value.getMultipleLinearIntValues(metric:MetricCondition!,numOfLinear:Int!,duration:Duration!):[IntValues!]!getThermodynamic(metric:MetricCondition!,duration:Duration!):Thermodynamic}Metrics are defined in the config/oal/*.oal files.\nAggregation Aggregation query means that the metrics data need a secondary aggregation at query stage, which causes the query interfaces to have some different arguments. A typical example of aggregation query is the TopN list of services. Metrics stream aggregation simply calculates the metrics values of each service, but the expected list requires ordering metrics data by their values.\nAggregation query is for single value metrics only.\n# The aggregation query is different with the metric query.# All aggregation queries require backend or/and storage do aggregation in query time.extendtypeQuery{# TopN is an aggregation query.getServiceTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getAllServiceInstanceTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getServiceInstanceTopN(serviceId:ID!,name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getAllEndpointTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getEndpointTopN(serviceId:ID!,name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!}Logs extendtypeQuery{# Return true if the current storage implementation supports fuzzy query for logs.supportQueryLogsByKeywords:Boolean!queryLogs(condition:LogQueryCondition):Logs# Test the logs and get the results of the LAL output.test(requests:LogTestRequest!):LogTestResponse!}Log implementations vary between different database options. Some search engines like ElasticSearch and OpenSearch can support full log text fuzzy queries, while others do not due to considerations related to performance impact and end user experience.\ntest API serves as the debugging tool for native LAL parsing.\nTrace extendtypeQuery{queryBasicTraces(condition:TraceQueryCondition):TraceBriefqueryTrace(traceId:ID!):Trace}Trace query fetches trace segment lists and spans of given trace IDs.\nAlarm extendtypeQuery{getAlarmTrend(duration:Duration!):AlarmTrend!getAlarm(duration:Duration!,scope:Scope,keyword:String,paging:Pagination!,tags:[AlarmTag]):Alarms}Alarm query identifies alarms and related events.\nEvent extendtypeQuery{queryEvents(condition:EventQueryCondition):Events}Event query fetches the event list based on given sources and time range conditions.\nCondition Duration Duration is a widely used parameter type as the APM data is time-related. See the following for more details. Step relates to precision.\n# The Duration defines the start and end time for each query operation.# Fields: `start` and `end`# represents the time span. And each of them matches the step.# ref https://www.ietf.org/rfc/rfc3339.txt# The time formats are# `SECOND` step: yyyy-MM-dd HHmmss# `MINUTE` step: yyyy-MM-dd HHmm# `HOUR` step: yyyy-MM-dd HH# `DAY` step: yyyy-MM-dd# `MONTH` step: yyyy-MM# Field: `step`# represents the accurate time point.# e.g.# if step==HOUR , start=2017-11-08 09, end=2017-11-08 19# then# metrics from the following time points expected# 2017-11-08 9:00 -\u0026gt; 2017-11-08 19:00# there are 11 time points (hours) in the time span.inputDuration{start:String!end:String!step:Step!}enumStep{MONTHDAYHOURMINUTESECOND}","title":"Query Protocol","url":"/docs/main/v9.2.0/en/protocols/query-protocol/"},{"content":"Query Protocol Query Protocol defines a set of APIs in GraphQL grammar to provide data query and interactive capabilities with SkyWalking native visualization tool or 3rd party system, including Web UI, CLI or private system.\nQuery protocol official repository, https://github.com/apache/skywalking-query-protocol.\nMetadata Metadata contains concise information on all services and their instances, endpoints, etc. under monitoring. You may query the metadata in different ways.\nextendtypeQuery{# Normal service related meta info getAllServices(duration:Duration!,group:String):[Service!]!searchServices(duration:Duration!,keyword:String!):[Service!]!searchService(serviceCode:String!):Service# Fetch all services of Browser typegetAllBrowserServices(duration:Duration!):[Service!]!searchBrowserServices(duration:Duration!,keyword:String!):[Service!]!searchBrowserService(serviceCode:String!):Service# Service instance querygetServiceInstances(duration:Duration!,serviceId:ID!):[ServiceInstance!]!# Endpoint query# Consider there are huge numbers of endpoint,# must use endpoint owner\u0026#39;s service id, keyword and limit filter to do query.searchEndpoint(keyword:String!,serviceId:ID!,limit:Int!):[Endpoint!]!getEndpointInfo(endpointId:ID!):EndpointInfo# Database related meta info.getAllDatabases(duration:Duration!):[Database!]!getTimeInfo:TimeInfo}Topology The topology and dependency graphs among services, instances and endpoints. Includes direct relationships or global maps.\nextendtypeQuery{# Query the global topologygetGlobalTopology(duration:Duration!):Topology# Query the topology, based on the given servicegetServiceTopology(serviceId:ID!,duration:Duration!):Topology# Query the topology, based on the given services.# `#getServiceTopology` could be replaced by this.getServicesTopology(serviceIds:[ID!]!,duration:Duration!):Topology# Query the instance topology, based on the given clientServiceId and serverServiceIdgetServiceInstanceTopology(clientServiceId:ID!,serverServiceId:ID!,duration:Duration!):ServiceInstanceTopology# Query the topology, based on the given endpointgetEndpointTopology(endpointId:ID!,duration:Duration!):Topology# v2 of getEndpointTopologygetEndpointDependencies(endpointId:ID!,duration:Duration!):EndpointTopology}Metrics Metrics query targets all objects defined in OAL script and MAL. You may obtain the metrics data in linear or thermodynamic matrix formats based on the aggregation functions in script.\nV2 APIs Provide Metrics V2 query APIs since 8.0.0, including metadata, single/multiple values, heatmap, and sampled records metrics.\nextendtypeQuery{# Metrics definition metadata query. Response the metrics type which determines the suitable query methods.typeOfMetrics(name:String!):MetricsType!# Get the list of all available metrics in the current OAP server.# Param, regex, could be used to filter the metrics by name.listMetrics(regex:String):[MetricDefinition!]!# Read metrics single value in the duration of required metricsreadMetricsValue(condition:MetricsCondition!,duration:Duration!):Long!# Read time-series values in the duration of required metricsreadMetricsValues(condition:MetricsCondition!,duration:Duration!):MetricsValues!# Read entity list of required metrics and parent entity type.sortMetrics(condition:TopNCondition!,duration:Duration!):[SelectedRecord!]!# Read value in the given time duration, usually as a linear.# labels: the labels you need to query.readLabeledMetricsValues(condition:MetricsCondition!,labels:[String!]!,duration:Duration!):[MetricsValues!]!# Heatmap is bucket based value statistic result.readHeatMap(condition:MetricsCondition!,duration:Duration!):HeatMap# Deprecated since 9.3.0, replaced by readRecords defined in record.graphqls# Read the sampled records# TopNCondition#scope is not required.readSampledRecords(condition:TopNCondition!,duration:Duration!):[SelectedRecord!]!}V1 APIs 3 types of metrics can be queried. V1 APIs were introduced since 6.x. Now they are a shell to V2 APIs.\n Single value. Most default metrics are in single value. getValues and getLinearIntValues are suitable for this purpose. Multiple value. A metric defined in OAL includes multiple value calculations. Use getMultipleLinearIntValues to obtain all values. percentile is a typical multiple value function in OAL. Heatmap value. Read Heatmap in WIKI for details. thermodynamic is the only OAL function. Use getThermodynamic to get the values.  extendtypeQuery{getValues(metric:BatchMetricConditions!,duration:Duration!):IntValuesgetLinearIntValues(metric:MetricCondition!,duration:Duration!):IntValues# Query the type of metrics including multiple values, and format them as multiple lines.# The seq of these multiple lines base on the calculation func in OAL# Such as, should us this to query the result of func percentile(50,75,90,95,99) in OAL,# then five lines will be responded, p50 is the first element of return value.getMultipleLinearIntValues(metric:MetricCondition!,numOfLinear:Int!,duration:Duration!):[IntValues!]!getThermodynamic(metric:MetricCondition!,duration:Duration!):Thermodynamic}Metrics are defined in the config/oal/*.oal files.\nAggregation Aggregation query means that the metrics data need a secondary aggregation at query stage, which causes the query interfaces to have some different arguments. A typical example of aggregation query is the TopN list of services. Metrics stream aggregation simply calculates the metrics values of each service, but the expected list requires ordering metrics data by their values.\nAggregation query is for single value metrics only.\n# The aggregation query is different with the metric query.# All aggregation queries require backend or/and storage do aggregation in query time.extendtypeQuery{# TopN is an aggregation query.getServiceTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getAllServiceInstanceTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getServiceInstanceTopN(serviceId:ID!,name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getAllEndpointTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getEndpointTopN(serviceId:ID!,name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!}Record Record is a general and abstract type for collected raw data. In the observability, traces and logs have specific and well-defined meanings, meanwhile, the general records represent other collected records. Such as sampled slow SQL statement, HTTP request raw data(request/response header/body)\nextendtypeQuery{# Query collected records with given metric name and parent entity conditions, and return in the requested order.readRecords(condition:RecordCondition!,duration:Duration!):[Record!]!}Logs extendtypeQuery{# Return true if the current storage implementation supports fuzzy query for logs.supportQueryLogsByKeywords:Boolean!queryLogs(condition:LogQueryCondition):Logs# Test the logs and get the results of the LAL output.test(requests:LogTestRequest!):LogTestResponse!}Log implementations vary between different database options. Some search engines like ElasticSearch and OpenSearch can support full log text fuzzy queries, while others do not due to considerations related to performance impact and end user experience.\ntest API serves as the debugging tool for native LAL parsing.\nTrace extendtypeQuery{queryBasicTraces(condition:TraceQueryCondition):TraceBriefqueryTrace(traceId:ID!):Trace}Trace query fetches trace segment lists and spans of given trace IDs.\nAlarm extendtypeQuery{getAlarmTrend(duration:Duration!):AlarmTrend!getAlarm(duration:Duration!,scope:Scope,keyword:String,paging:Pagination!,tags:[AlarmTag]):Alarms}Alarm query identifies alarms and related events.\nEvent extendtypeQuery{queryEvents(condition:EventQueryCondition):Events}Event query fetches the event list based on given sources and time range conditions.\nCondition Duration Duration is a widely used parameter type as the APM data is time-related. See the following for more details. Step relates to precision.\n# The Duration defines the start and end time for each query operation.# Fields: `start` and `end`# represents the time span. And each of them matches the step.# ref https://www.ietf.org/rfc/rfc3339.txt# The time formats are# `SECOND` step: yyyy-MM-dd HHmmss# `MINUTE` step: yyyy-MM-dd HHmm# `HOUR` step: yyyy-MM-dd HH# `DAY` step: yyyy-MM-dd# `MONTH` step: yyyy-MM# Field: `step`# represents the accurate time point.# e.g.# if step==HOUR , start=2017-11-08 09, end=2017-11-08 19# then# metrics from the following time points expected# 2017-11-08 9:00 -\u0026gt; 2017-11-08 19:00# there are 11 time points (hours) in the time span.inputDuration{start:String!end:String!step:Step!}enumStep{MONTHDAYHOURMINUTESECOND}","title":"Query Protocol","url":"/docs/main/v9.3.0/en/protocols/query-protocol/"},{"content":"Query Protocol Query Protocol defines a set of APIs in GraphQL grammar to provide data query and interactive capabilities with SkyWalking native visualization tool or 3rd party system, including Web UI, CLI or private system.\nQuery protocol official repository, https://github.com/apache/skywalking-query-protocol.\nMetadata Metadata contains concise information on all services and their instances, endpoints, etc. under monitoring. You may query the metadata in different ways.\nextendtypeQuery{# Normal service related meta info getAllServices(duration:Duration!,group:String):[Service!]!searchServices(duration:Duration!,keyword:String!):[Service!]!searchService(serviceCode:String!):Service# Fetch all services of Browser typegetAllBrowserServices(duration:Duration!):[Service!]!searchBrowserServices(duration:Duration!,keyword:String!):[Service!]!searchBrowserService(serviceCode:String!):Service# Service instance querygetServiceInstances(duration:Duration!,serviceId:ID!):[ServiceInstance!]!# Endpoint query# Consider there are huge numbers of endpoint,# must use endpoint owner\u0026#39;s service id, keyword and limit filter to do query.searchEndpoint(keyword:String!,serviceId:ID!,limit:Int!):[Endpoint!]!getEndpointInfo(endpointId:ID!):EndpointInfo# Process query# Read process list.listProcesses(duration:Duration!,instanceId:ID!):[Process!]!# Find process according to given ID. Return null if not existing.getProcess(processId:ID!):Process# Get the number of matched processes through serviceId, labels# Labels: the matched process should contain all labels## The return is not a precise number, the process has its lifecycle, as it reboots and shutdowns with time.# The return number just gives an abstract of the scale of profiling that would be applied.estimateProcessScale(serviceId:ID!,labels:[String!]!):Long!# Database related meta info.getAllDatabases(duration:Duration!):[Database!]!getTimeInfo:TimeInfo}Topology The topology and dependency graphs among services, instances and endpoints. Includes direct relationships or global maps.\nextendtypeQuery{# Query the global topologygetGlobalTopology(duration:Duration!):Topology# Query the topology, based on the given servicegetServiceTopology(serviceId:ID!,duration:Duration!):Topology# Query the topology, based on the given services.# `#getServiceTopology` could be replaced by this.getServicesTopology(serviceIds:[ID!]!,duration:Duration!):Topology# Query the instance topology, based on the given clientServiceId and serverServiceIdgetServiceInstanceTopology(clientServiceId:ID!,serverServiceId:ID!,duration:Duration!):ServiceInstanceTopology# Query the topology, based on the given endpointgetEndpointTopology(endpointId:ID!,duration:Duration!):Topology# v2 of getEndpointTopologygetEndpointDependencies(endpointId:ID!,duration:Duration!):EndpointTopology}Metrics Metrics query targets all objects defined in OAL script and MAL. You may obtain the metrics data in linear or thermodynamic matrix formats based on the aggregation functions in script.\nV2 APIs Provide Metrics V2 query APIs since 8.0.0, including metadata, single/multiple values, heatmap, and sampled records metrics.\nextendtypeQuery{# Metrics definition metadata query. Response the metrics type which determines the suitable query methods.typeOfMetrics(name:String!):MetricsType!# Get the list of all available metrics in the current OAP server.# Param, regex, could be used to filter the metrics by name.listMetrics(regex:String):[MetricDefinition!]!# Read metrics single value in the duration of required metricsreadMetricsValue(condition:MetricsCondition!,duration:Duration!):Long!# Read time-series values in the duration of required metricsreadMetricsValues(condition:MetricsCondition!,duration:Duration!):MetricsValues!# Read entity list of required metrics and parent entity type.sortMetrics(condition:TopNCondition!,duration:Duration!):[SelectedRecord!]!# Read value in the given time duration, usually as a linear.# labels: the labels you need to query.readLabeledMetricsValues(condition:MetricsCondition!,labels:[String!]!,duration:Duration!):[MetricsValues!]!# Heatmap is bucket based value statistic result.readHeatMap(condition:MetricsCondition!,duration:Duration!):HeatMap# Deprecated since 9.3.0, replaced by readRecords defined in record.graphqls# Read the sampled records# TopNCondition#scope is not required.readSampledRecords(condition:TopNCondition!,duration:Duration!):[SelectedRecord!]!}V1 APIs 3 types of metrics can be queried. V1 APIs were introduced since 6.x. Now they are a shell to V2 APIs.\n Single value. Most default metrics are in single value. getValues and getLinearIntValues are suitable for this purpose. Multiple value. A metric defined in OAL includes multiple value calculations. Use getMultipleLinearIntValues to obtain all values. percentile is a typical multiple value function in OAL. Heatmap value. Read Heatmap in WIKI for details. thermodynamic is the only OAL function. Use getThermodynamic to get the values.  extendtypeQuery{getValues(metric:BatchMetricConditions!,duration:Duration!):IntValuesgetLinearIntValues(metric:MetricCondition!,duration:Duration!):IntValues# Query the type of metrics including multiple values, and format them as multiple lines.# The seq of these multiple lines base on the calculation func in OAL# Such as, should us this to query the result of func percentile(50,75,90,95,99) in OAL,# then five lines will be responded, p50 is the first element of return value.getMultipleLinearIntValues(metric:MetricCondition!,numOfLinear:Int!,duration:Duration!):[IntValues!]!getThermodynamic(metric:MetricCondition!,duration:Duration!):Thermodynamic}Metrics are defined in the config/oal/*.oal files.\nAggregation Aggregation query means that the metrics data need a secondary aggregation at query stage, which causes the query interfaces to have some different arguments. A typical example of aggregation query is the TopN list of services. Metrics stream aggregation simply calculates the metrics values of each service, but the expected list requires ordering metrics data by their values.\nAggregation query is for single value metrics only.\n# The aggregation query is different with the metric query.# All aggregation queries require backend or/and storage do aggregation in query time.extendtypeQuery{# TopN is an aggregation query.getServiceTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getAllServiceInstanceTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getServiceInstanceTopN(serviceId:ID!,name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getAllEndpointTopN(name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!getEndpointTopN(serviceId:ID!,name:String!,topN:Int!,duration:Duration!,order:Order!):[TopNEntity!]!}Record Record is a general and abstract type for collected raw data. In the observability, traces and logs have specific and well-defined meanings, meanwhile, the general records represent other collected records. Such as sampled slow SQL statement, HTTP request raw data(request/response header/body)\nextendtypeQuery{# Query collected records with given metric name and parent entity conditions, and return in the requested order.readRecords(condition:RecordCondition!,duration:Duration!):[Record!]!}Logs extendtypeQuery{# Return true if the current storage implementation supports fuzzy query for logs.supportQueryLogsByKeywords:Boolean!queryLogs(condition:LogQueryCondition):Logs# Test the logs and get the results of the LAL output.test(requests:LogTestRequest!):LogTestResponse!}Log implementations vary between different database options. Some search engines like ElasticSearch and OpenSearch can support full log text fuzzy queries, while others do not due to considerations related to performance impact and end user experience.\ntest API serves as the debugging tool for native LAL parsing.\nTrace extendtypeQuery{queryBasicTraces(condition:TraceQueryCondition):TraceBriefqueryTrace(traceId:ID!):Trace}Trace query fetches trace segment lists and spans of given trace IDs.\nAlarm extendtypeQuery{getAlarmTrend(duration:Duration!):AlarmTrend!getAlarm(duration:Duration!,scope:Scope,keyword:String,paging:Pagination!,tags:[AlarmTag]):Alarms}Alarm query identifies alarms and related events.\nEvent extendtypeQuery{queryEvents(condition:EventQueryCondition):Events}Event query fetches the event list based on given sources and time range conditions.\nProfiling SkyWalking offers two types of profiling, in-process and out-process, allowing users to create tasks and check their execution status.\nIn-process profiling extendtypeMutation{# crate new profile taskcreateProfileTask(creationRequest:ProfileTaskCreationRequest):ProfileTaskCreationResult!}extendtypeQuery{# query all task list, order by ProfileTask#startTime descendinggetProfileTaskList(serviceId:ID,endpointName:String):[ProfileTask!]!# query all task logsgetProfileTaskLogs(taskID:String):[ProfileTaskLog!]!# query all task profiled segment listgetProfileTaskSegmentList(taskID:String):[BasicTrace!]!# query profiled segmentgetProfiledSegment(segmentId:String):ProfiledSegment# analyze profiled segment, start and end time use timestamp(millisecond)getProfileAnalyze(segmentId:String!,timeRanges:[ProfileAnalyzeTimeRange!]!):ProfileAnalyzation!}Out-process profiling extendtypeMutation{# create a new eBPF fixed time profiling taskcreateEBPFProfilingFixedTimeTask(request:EBPFProfilingTaskFixedTimeCreationRequest!):EBPFProfilingTaskCreationResult!# create a new eBPF network profiling taskcreateEBPFNetworkProfiling(request:EBPFProfilingNetworkTaskRequest!):EBPFProfilingTaskCreationResult!# keep alive the eBPF profiling taskkeepEBPFNetworkProfiling(taskId:ID!):EBPFNetworkKeepProfilingResult!}extendtypeQuery{# query eBPF profiling data for prepare create taskqueryPrepareCreateEBPFProfilingTaskData(serviceId:ID!):EBPFProfilingTaskPrepare!# query eBPF profiling task listqueryEBPFProfilingTasks(serviceId:ID,serviceInstanceId:ID,targets:[EBPFProfilingTargetType!]):[EBPFProfilingTask!]!# query schedules from profiling taskqueryEBPFProfilingSchedules(taskId:ID!):[EBPFProfilingSchedule!]!# analyze the profiling schedule# aggregateType is \u0026#34;EBPFProfilingAnalyzeAggregateType#COUNT\u0026#34; as default. analysisEBPFProfilingResult(scheduleIdList:[ID!]!,timeRanges:[EBPFProfilingAnalyzeTimeRange!]!,aggregateType:EBPFProfilingAnalyzeAggregateType):EBPFProfilingAnalyzation!}Condition Duration Duration is a widely used parameter type as the APM data is time-related. See the following for more details. Step relates to precision.\n# The Duration defines the start and end time for each query operation.# Fields: `start` and `end`# represents the time span. And each of them matches the step.# ref https://www.ietf.org/rfc/rfc3339.txt# The time formats are# `SECOND` step: yyyy-MM-dd HHmmss# `MINUTE` step: yyyy-MM-dd HHmm# `HOUR` step: yyyy-MM-dd HH# `DAY` step: yyyy-MM-dd# `MONTH` step: yyyy-MM# Field: `step`# represents the accurate time point.# e.g.# if step==HOUR , start=2017-11-08 09, end=2017-11-08 19# then# metrics from the following time points expected# 2017-11-08 9:00 -\u0026gt; 2017-11-08 19:00# there are 11 time points (hours) in the time span.inputDuration{start:String!end:String!step:Step!}enumStep{MONTHDAYHOURMINUTESECOND}","title":"Query Protocol","url":"/docs/main/v9.4.0/en/api/query-protocol/"},{"content":"Query Protocol Query Protocol defines a set of APIs in GraphQL grammar to provide data query and interactive capabilities with SkyWalking native visualization tool or 3rd party system, including Web UI, CLI or private system.\nQuery protocol official repository, https://github.com/apache/skywalking-query-protocol.\nAll deprecated APIs are moved here.\nMetadata Metadata contains concise information on all services and their instances, endpoints, etc. under monitoring. You may query the metadata in different ways.\nextendtypeQuery{# Normal service related meta info getAllServices(duration:Duration!,group:String):[Service!]!searchServices(duration:Duration!,keyword:String!):[Service!]!searchService(serviceCode:String!):Service# Fetch all services of Browser typegetAllBrowserServices(duration:Duration!):[Service!]!searchBrowserServices(duration:Duration!,keyword:String!):[Service!]!searchBrowserService(serviceCode:String!):Service# Service instance querygetServiceInstances(duration:Duration!,serviceId:ID!):[ServiceInstance!]!# Endpoint query# Consider there are huge numbers of endpoint,# must use endpoint owner\u0026#39;s service id, keyword and limit filter to do query.searchEndpoint(keyword:String!,serviceId:ID!,limit:Int!):[Endpoint!]!getEndpointInfo(endpointId:ID!):EndpointInfo# Process query# Read process list.listProcesses(duration:Duration!,instanceId:ID!):[Process!]!# Find process according to given ID. Return null if not existing.getProcess(processId:ID!):Process# Get the number of matched processes through serviceId, labels# Labels: the matched process should contain all labels## The return is not a precise number, the process has its lifecycle, as it reboots and shutdowns with time.# The return number just gives an abstract of the scale of profiling that would be applied.estimateProcessScale(serviceId:ID!,labels:[String!]!):Long!# Database related meta info.getAllDatabases(duration:Duration!):[Database!]!getTimeInfo:TimeInfo}Topology The topology and dependency graphs among services, instances and endpoints. Includes direct relationships or global maps.\nextendtypeQuery{# Query the global topologygetGlobalTopology(duration:Duration!):Topology# Query the topology, based on the given servicegetServiceTopology(serviceId:ID!,duration:Duration!):Topology# Query the topology, based on the given services.# `#getServiceTopology` could be replaced by this.getServicesTopology(serviceIds:[ID!]!,duration:Duration!):Topology# Query the instance topology, based on the given clientServiceId and serverServiceIdgetServiceInstanceTopology(clientServiceId:ID!,serverServiceId:ID!,duration:Duration!):ServiceInstanceTopology# Query the topology, based on the given endpointgetEndpointTopology(endpointId:ID!,duration:Duration!):Topology# v2 of getEndpointTopologygetEndpointDependencies(endpointId:ID!,duration:Duration!):EndpointTopology}Metrics Metrics query targets all objects defined in OAL script and MAL.\nV3 APIs Provide Metrics V3 query APIs since 9.5.0, including metadata and MQE. SkyWalking Metrics Query Expression(MQE) is an extension query mechanism. MQE allows users to do simple query-stage calculation like well known PromQL through GraphQL. The expression\u0026rsquo;s syntax can refer to here.\nextendtypeQuery{# Metrics definition metadata query. Response the metrics type which determines the suitable query methods.typeOfMetrics(name:String!):MetricsType!# Get the list of all available metrics in the current OAP server.# Param, regex, could be used to filter the metrics by name.listMetrics(regex:String):[MetricDefinition!]!execExpression(expression:String!,entity:Entity!,duration:Duration!):ExpressionResult!}typeExpressionResult{type:ExpressionResultType!# When the type == TIME_SERIES_VALUES, the results would be a collection of MQEValues.# In other legal type cases, only one MQEValues is expected in the array.results:[MQEValues!]!# When type == ExpressionResultType.UNKNOWN,# the error message includes the expression resolving errors.error:String}enumExpressionResultType{# Can\u0026#39;t resolve the type of the given expression.UNKNOWN# A single valueSINGLE_VALUE# A collection of time-series values.# The value could have labels or not.TIME_SERIES_VALUES# A collection of aggregated values through metric sort functionSORTED_LIST# A collection of sampled records.# When the original metric type is sampled recordsRECORD_LIST}Logs extendtypeQuery{# Return true if the current storage implementation supports fuzzy query for logs.supportQueryLogsByKeywords:Boolean!queryLogs(condition:LogQueryCondition):Logs# Test the logs and get the results of the LAL output.test(requests:LogTestRequest!):LogTestResponse!}Log implementations vary between different database options. Some search engines like ElasticSearch and OpenSearch can support full log text fuzzy queries, while others do not due to considerations related to performance impact and end user experience.\ntest API serves as the debugging tool for native LAL parsing.\nTrace extendtypeQuery{queryBasicTraces(condition:TraceQueryCondition):TraceBriefqueryTrace(traceId:ID!):Trace}Trace query fetches trace segment lists and spans of given trace IDs.\nAlarm extendtypeQuery{getAlarmTrend(duration:Duration!):AlarmTrend!getAlarm(duration:Duration!,scope:Scope,keyword:String,paging:Pagination!,tags:[AlarmTag]):Alarms}Alarm query identifies alarms and related events.\nEvent extendtypeQuery{queryEvents(condition:EventQueryCondition):Events}Event query fetches the event list based on given sources and time range conditions.\nProfiling SkyWalking offers two types of profiling, in-process and out-process, allowing users to create tasks and check their execution status.\nIn-process profiling extendtypeMutation{# crate new profile taskcreateProfileTask(creationRequest:ProfileTaskCreationRequest):ProfileTaskCreationResult!}extendtypeQuery{# query all task list, order by ProfileTask#startTime descendinggetProfileTaskList(serviceId:ID,endpointName:String):[ProfileTask!]!# query all task logsgetProfileTaskLogs(taskID:String):[ProfileTaskLog!]!# query all task profiled segment listgetProfileTaskSegmentList(taskID:String):[BasicTrace!]!# query profiled segmentgetProfiledSegment(segmentId:String):ProfiledSegment# analyze profiled segment, start and end time use timestamp(millisecond)getProfileAnalyze(segmentId:String!,timeRanges:[ProfileAnalyzeTimeRange!]!):ProfileAnalyzation!}Out-process profiling extendtypeMutation{# create a new eBPF fixed time profiling taskcreateEBPFProfilingFixedTimeTask(request:EBPFProfilingTaskFixedTimeCreationRequest!):EBPFProfilingTaskCreationResult!# create a new eBPF network profiling taskcreateEBPFNetworkProfiling(request:EBPFProfilingNetworkTaskRequest!):EBPFProfilingTaskCreationResult!# keep alive the eBPF profiling taskkeepEBPFNetworkProfiling(taskId:ID!):EBPFNetworkKeepProfilingResult!}extendtypeQuery{# query eBPF profiling data for prepare create taskqueryPrepareCreateEBPFProfilingTaskData(serviceId:ID!):EBPFProfilingTaskPrepare!# query eBPF profiling task listqueryEBPFProfilingTasks(serviceId:ID,serviceInstanceId:ID,targets:[EBPFProfilingTargetType!]):[EBPFProfilingTask!]!# query schedules from profiling taskqueryEBPFProfilingSchedules(taskId:ID!):[EBPFProfilingSchedule!]!# analyze the profiling schedule# aggregateType is \u0026#34;EBPFProfilingAnalyzeAggregateType#COUNT\u0026#34; as default. analysisEBPFProfilingResult(scheduleIdList:[ID!]!,timeRanges:[EBPFProfilingAnalyzeTimeRange!]!,aggregateType:EBPFProfilingAnalyzeAggregateType):EBPFProfilingAnalyzation!}Condition Duration Duration is a widely used parameter type as the APM data is time-related. See the following for more details. Step relates to precision.\n# The Duration defines the start and end time for each query operation.# Fields: `start` and `end`# represents the time span. And each of them matches the step.# ref https://www.ietf.org/rfc/rfc3339.txt# The time formats are# `SECOND` step: yyyy-MM-dd HHmmss# `MINUTE` step: yyyy-MM-dd HHmm# `HOUR` step: yyyy-MM-dd HH# `DAY` step: yyyy-MM-dd# `MONTH` step: yyyy-MM# Field: `step`# represents the accurate time point.# e.g.# if step==HOUR , start=2017-11-08 09, end=2017-11-08 19# then# metrics from the following time points expected# 2017-11-08 9:00 -\u0026gt; 2017-11-08 19:00# there are 11 time points (hours) in the time span.inputDuration{start:String!end:String!step:Step!}enumStep{MONTHDAYHOURMINUTESECOND}","title":"Query Protocol","url":"/docs/main/v9.5.0/en/api/query-protocol/"},{"content":"Query Protocol Query Protocol defines a set of APIs in GraphQL grammar to provide data query and interactive capabilities with SkyWalking native visualization tool or 3rd party system, including Web UI, CLI or private system.\nQuery protocol official repository, https://github.com/apache/skywalking-query-protocol.\nAll deprecated APIs are moved here.\nMetadata Metadata contains concise information on all services and their instances, endpoints, etc. under monitoring. You may query the metadata in different ways.\nextendtypeQuery{# Normal service related meta info getAllServices(duration:Duration!,group:String):[Service!]!searchServices(duration:Duration!,keyword:String!):[Service!]!searchService(serviceCode:String!):Service# Fetch all services of Browser typegetAllBrowserServices(duration:Duration!):[Service!]!searchBrowserServices(duration:Duration!,keyword:String!):[Service!]!searchBrowserService(serviceCode:String!):Service# Service instance querygetServiceInstances(duration:Duration!,serviceId:ID!):[ServiceInstance!]!# Endpoint query# Consider there are huge numbers of endpoint,# must use endpoint owner\u0026#39;s service id, keyword and limit filter to do query.searchEndpoint(keyword:String!,serviceId:ID!,limit:Int!):[Endpoint!]!getEndpointInfo(endpointId:ID!):EndpointInfo# Process query# Read process list.listProcesses(duration:Duration!,instanceId:ID!):[Process!]!# Find process according to given ID. Return null if not existing.getProcess(processId:ID!):Process# Get the number of matched processes through serviceId, labels# Labels: the matched process should contain all labels## The return is not a precise number, the process has its lifecycle, as it reboots and shutdowns with time.# The return number just gives an abstract of the scale of profiling that would be applied.estimateProcessScale(serviceId:ID!,labels:[String!]!):Long!# Database related meta info.getAllDatabases(duration:Duration!):[Database!]!getTimeInfo:TimeInfo}Topology The topology and dependency graphs among services, instances and endpoints. Includes direct relationships or global maps.\nextendtypeQuery{# Query the global topologygetGlobalTopology(duration:Duration!):Topology# Query the topology, based on the given servicegetServiceTopology(serviceId:ID!,duration:Duration!):Topology# Query the topology, based on the given services.# `#getServiceTopology` could be replaced by this.getServicesTopology(serviceIds:[ID!]!,duration:Duration!):Topology# Query the instance topology, based on the given clientServiceId and serverServiceIdgetServiceInstanceTopology(clientServiceId:ID!,serverServiceId:ID!,duration:Duration!):ServiceInstanceTopology# Query the topology, based on the given endpointgetEndpointTopology(endpointId:ID!,duration:Duration!):Topology# v2 of getEndpointTopologygetEndpointDependencies(endpointId:ID!,duration:Duration!):EndpointTopology}Metrics Metrics query targets all objects defined in OAL script and MAL.\nV3 APIs Provide Metrics V3 query APIs since 9.5.0, including metadata and MQE. SkyWalking Metrics Query Expression(MQE) is an extension query mechanism. MQE allows users to do simple query-stage calculation like well known PromQL through GraphQL. The expression\u0026rsquo;s syntax can refer to here.\nextendtypeQuery{# Metrics definition metadata query. Response the metrics type which determines the suitable query methods.typeOfMetrics(name:String!):MetricsType!# Get the list of all available metrics in the current OAP server.# Param, regex, could be used to filter the metrics by name.listMetrics(regex:String):[MetricDefinition!]!execExpression(expression:String!,entity:Entity!,duration:Duration!):ExpressionResult!}typeExpressionResult{type:ExpressionResultType!# When the type == TIME_SERIES_VALUES, the results would be a collection of MQEValues.# In other legal type cases, only one MQEValues is expected in the array.results:[MQEValues!]!# When type == ExpressionResultType.UNKNOWN,# the error message includes the expression resolving errors.error:String}enumExpressionResultType{# Can\u0026#39;t resolve the type of the given expression.UNKNOWN# A single valueSINGLE_VALUE# A collection of time-series values.# The value could have labels or not.TIME_SERIES_VALUES# A collection of aggregated values through metric sort functionSORTED_LIST# A collection of sampled records.# When the original metric type is sampled recordsRECORD_LIST}Logs extendtypeQuery{# Return true if the current storage implementation supports fuzzy query for logs.supportQueryLogsByKeywords:Boolean!queryLogs(condition:LogQueryCondition):Logs# Test the logs and get the results of the LAL output.test(requests:LogTestRequest!):LogTestResponse!}Log implementations vary between different database options. Some search engines like ElasticSearch and OpenSearch can support full log text fuzzy queries, while others do not due to considerations related to performance impact and end user experience.\ntest API serves as the debugging tool for native LAL parsing.\nTrace extendtypeQuery{queryBasicTraces(condition:TraceQueryCondition):TraceBriefqueryTrace(traceId:ID!):Trace}Trace query fetches trace segment lists and spans of given trace IDs.\nAlarm extendtypeQuery{getAlarmTrend(duration:Duration!):AlarmTrend!getAlarm(duration:Duration!,scope:Scope,keyword:String,paging:Pagination!,tags:[AlarmTag]):Alarms}Alarm query identifies alarms and related events.\nEvent extendtypeQuery{queryEvents(condition:EventQueryCondition):Events}Event query fetches the event list based on given sources and time range conditions.\nProfiling SkyWalking offers two types of profiling, in-process and out-process, allowing users to create tasks and check their execution status.\nIn-process profiling extendtypeMutation{# crate new profile taskcreateProfileTask(creationRequest:ProfileTaskCreationRequest):ProfileTaskCreationResult!}extendtypeQuery{# query all task list, order by ProfileTask#startTime descendinggetProfileTaskList(serviceId:ID,endpointName:String):[ProfileTask!]!# query all task logsgetProfileTaskLogs(taskID:String):[ProfileTaskLog!]!# query all task profiled segment listgetProfileTaskSegmentList(taskID:String):[BasicTrace!]!# query profiled segmentgetProfiledSegment(segmentId:String):ProfiledSegment# analyze profiled segment, start and end time use timestamp(millisecond)getProfileAnalyze(segmentId:String!,timeRanges:[ProfileAnalyzeTimeRange!]!):ProfileAnalyzation!}Out-process profiling extendtypeMutation{# create a new eBPF fixed time profiling taskcreateEBPFProfilingFixedTimeTask(request:EBPFProfilingTaskFixedTimeCreationRequest!):EBPFProfilingTaskCreationResult!# create a new eBPF network profiling taskcreateEBPFNetworkProfiling(request:EBPFProfilingNetworkTaskRequest!):EBPFProfilingTaskCreationResult!# keep alive the eBPF profiling taskkeepEBPFNetworkProfiling(taskId:ID!):EBPFNetworkKeepProfilingResult!}extendtypeQuery{# query eBPF profiling data for prepare create taskqueryPrepareCreateEBPFProfilingTaskData(serviceId:ID!):EBPFProfilingTaskPrepare!# query eBPF profiling task listqueryEBPFProfilingTasks(serviceId:ID,serviceInstanceId:ID,targets:[EBPFProfilingTargetType!]):[EBPFProfilingTask!]!# query schedules from profiling taskqueryEBPFProfilingSchedules(taskId:ID!):[EBPFProfilingSchedule!]!# analyze the profiling schedule# aggregateType is \u0026#34;EBPFProfilingAnalyzeAggregateType#COUNT\u0026#34; as default. analysisEBPFProfilingResult(scheduleIdList:[ID!]!,timeRanges:[EBPFProfilingAnalyzeTimeRange!]!,aggregateType:EBPFProfilingAnalyzeAggregateType):EBPFProfilingAnalyzation!}Condition Duration Duration is a widely used parameter type as the APM data is time-related. See the following for more details. Step relates to precision.\n# The Duration defines the start and end time for each query operation.# Fields: `start` and `end`# represents the time span. And each of them matches the step.# ref https://www.ietf.org/rfc/rfc3339.txt# The time formats are# `SECOND` step: yyyy-MM-dd HHmmss# `MINUTE` step: yyyy-MM-dd HHmm# `HOUR` step: yyyy-MM-dd HH# `DAY` step: yyyy-MM-dd# `MONTH` step: yyyy-MM# Field: `step`# represents the accurate time point.# e.g.# if step==HOUR , start=2017-11-08 09, end=2017-11-08 19# then# metrics from the following time points expected# 2017-11-08 9:00 -\u0026gt; 2017-11-08 19:00# there are 11 time points (hours) in the time span.inputDuration{start:String!end:String!step:Step!}enumStep{MONTHDAYHOURMINUTESECOND}","title":"Query Protocol","url":"/docs/main/v9.6.0/en/api/query-protocol/"},{"content":"Query Protocol Query Protocol defines a set of APIs in GraphQL grammar to provide data query and interactive capabilities with SkyWalking native visualization tool or 3rd party system, including Web UI, CLI or private system.\nQuery protocol official repository, https://github.com/apache/skywalking-query-protocol.\nAll deprecated APIs are moved here.\nMetadata Metadata contains concise information on all services and their instances, endpoints, etc. under monitoring. You may query the metadata in different ways.\nextendtypeQuery{# Normal service related meta info getAllServices(duration:Duration!,group:String):[Service!]!searchServices(duration:Duration!,keyword:String!):[Service!]!searchService(serviceCode:String!):Service# Fetch all services of Browser typegetAllBrowserServices(duration:Duration!):[Service!]!searchBrowserServices(duration:Duration!,keyword:String!):[Service!]!searchBrowserService(serviceCode:String!):Service# Service instance querygetServiceInstances(duration:Duration!,serviceId:ID!):[ServiceInstance!]!# Endpoint query# Consider there are huge numbers of endpoint,# must use endpoint owner\u0026#39;s service id, keyword and limit filter to do query.searchEndpoint(keyword:String!,serviceId:ID!,limit:Int!):[Endpoint!]!getEndpointInfo(endpointId:ID!):EndpointInfo# Process query# Read process list.listProcesses(duration:Duration!,instanceId:ID!):[Process!]!# Find process according to given ID. Return null if not existing.getProcess(processId:ID!):Process# Get the number of matched processes through serviceId, labels# Labels: the matched process should contain all labels## The return is not a precise number, the process has its lifecycle, as it reboots and shutdowns with time.# The return number just gives an abstract of the scale of profiling that would be applied.estimateProcessScale(serviceId:ID!,labels:[String!]!):Long!# Database related meta info.getAllDatabases(duration:Duration!):[Database!]!getTimeInfo:TimeInfo}Topology The topology and dependency graphs among services, instances and endpoints. Includes direct relationships or global maps.\nextendtypeQuery{# Query the global topologygetGlobalTopology(duration:Duration!):Topology# Query the topology, based on the given servicegetServiceTopology(serviceId:ID!,duration:Duration!):Topology# Query the topology, based on the given services.# `#getServiceTopology` could be replaced by this.getServicesTopology(serviceIds:[ID!]!,duration:Duration!):Topology# Query the instance topology, based on the given clientServiceId and serverServiceIdgetServiceInstanceTopology(clientServiceId:ID!,serverServiceId:ID!,duration:Duration!):ServiceInstanceTopology# Query the topology, based on the given endpointgetEndpointTopology(endpointId:ID!,duration:Duration!):Topology# v2 of getEndpointTopologygetEndpointDependencies(endpointId:ID!,duration:Duration!):EndpointTopology}Metrics Metrics query targets all objects defined in OAL script and MAL.\nV3 APIs Provide Metrics V3 query APIs since 9.5.0, including metadata and MQE. SkyWalking Metrics Query Expression(MQE) is an extension query mechanism. MQE allows users to do simple query-stage calculation like well known PromQL through GraphQL. The expression\u0026rsquo;s syntax can refer to here.\nextendtypeQuery{# Metrics definition metadata query. Response the metrics type which determines the suitable query methods.typeOfMetrics(name:String!):MetricsType!# Get the list of all available metrics in the current OAP server.# Param, regex, could be used to filter the metrics by name.listMetrics(regex:String):[MetricDefinition!]!execExpression(expression:String!,entity:Entity!,duration:Duration!):ExpressionResult!}typeExpressionResult{type:ExpressionResultType!# When the type == TIME_SERIES_VALUES, the results would be a collection of MQEValues.# In other legal type cases, only one MQEValues is expected in the array.results:[MQEValues!]!# When type == ExpressionResultType.UNKNOWN,# the error message includes the expression resolving errors.error:String}enumExpressionResultType{# Can\u0026#39;t resolve the type of the given expression.UNKNOWN# A single valueSINGLE_VALUE# A collection of time-series values.# The value could have labels or not.TIME_SERIES_VALUES# A collection of aggregated values through metric sort functionSORTED_LIST# A collection of sampled records.# When the original metric type is sampled recordsRECORD_LIST}Logs extendtypeQuery{# Return true if the current storage implementation supports fuzzy query for logs.supportQueryLogsByKeywords:Boolean!queryLogs(condition:LogQueryCondition):Logs# Test the logs and get the results of the LAL output.test(requests:LogTestRequest!):LogTestResponse!}Log implementations vary between different database options. Some search engines like ElasticSearch and OpenSearch can support full log text fuzzy queries, while others do not due to considerations related to performance impact and end user experience.\ntest API serves as the debugging tool for native LAL parsing.\nTrace extendtypeQuery{queryBasicTraces(condition:TraceQueryCondition):TraceBriefqueryTrace(traceId:ID!):Trace}Trace query fetches trace segment lists and spans of given trace IDs.\nAlarm extendtypeQuery{getAlarmTrend(duration:Duration!):AlarmTrend!getAlarm(duration:Duration!,scope:Scope,keyword:String,paging:Pagination!,tags:[AlarmTag]):Alarms}Alarm query identifies alarms and related events.\nEvent extendtypeQuery{queryEvents(condition:EventQueryCondition):Events}Event query fetches the event list based on given sources and time range conditions.\nProfiling SkyWalking offers two types of profiling, in-process and out-process, allowing users to create tasks and check their execution status.\nIn-process profiling extendtypeMutation{# crate new profile taskcreateProfileTask(creationRequest:ProfileTaskCreationRequest):ProfileTaskCreationResult!}extendtypeQuery{# query all task list, order by ProfileTask#startTime descendinggetProfileTaskList(serviceId:ID,endpointName:String):[ProfileTask!]!# query all task logsgetProfileTaskLogs(taskID:String):[ProfileTaskLog!]!# query all task profiled segment listgetProfileTaskSegmentList(taskID:String):[BasicTrace!]!# query profiled segmentgetProfiledSegment(segmentId:String):ProfiledSegment# analyze profiled segment, start and end time use timestamp(millisecond)getProfileAnalyze(segmentId:String!,timeRanges:[ProfileAnalyzeTimeRange!]!):ProfileAnalyzation!}Out-process profiling extendtypeMutation{# create a new eBPF fixed time profiling taskcreateEBPFProfilingFixedTimeTask(request:EBPFProfilingTaskFixedTimeCreationRequest!):EBPFProfilingTaskCreationResult!# create a new eBPF network profiling taskcreateEBPFNetworkProfiling(request:EBPFProfilingNetworkTaskRequest!):EBPFProfilingTaskCreationResult!# keep alive the eBPF profiling taskkeepEBPFNetworkProfiling(taskId:ID!):EBPFNetworkKeepProfilingResult!}extendtypeQuery{# query eBPF profiling data for prepare create taskqueryPrepareCreateEBPFProfilingTaskData(serviceId:ID!):EBPFProfilingTaskPrepare!# query eBPF profiling task listqueryEBPFProfilingTasks(serviceId:ID,serviceInstanceId:ID,targets:[EBPFProfilingTargetType!]):[EBPFProfilingTask!]!# query schedules from profiling taskqueryEBPFProfilingSchedules(taskId:ID!):[EBPFProfilingSchedule!]!# analyze the profiling schedule# aggregateType is \u0026#34;EBPFProfilingAnalyzeAggregateType#COUNT\u0026#34; as default. analysisEBPFProfilingResult(scheduleIdList:[ID!]!,timeRanges:[EBPFProfilingAnalyzeTimeRange!]!,aggregateType:EBPFProfilingAnalyzeAggregateType):EBPFProfilingAnalyzation!}Condition Duration Duration is a widely used parameter type as the APM data is time-related. See the following for more details. Step relates to precision.\n# The Duration defines the start and end time for each query operation.# Fields: `start` and `end`# represents the time span. And each of them matches the step.# ref https://www.ietf.org/rfc/rfc3339.txt# The time formats are# `SECOND` step: yyyy-MM-dd HHmmss# `MINUTE` step: yyyy-MM-dd HHmm# `HOUR` step: yyyy-MM-dd HH# `DAY` step: yyyy-MM-dd# `MONTH` step: yyyy-MM# Field: `step`# represents the accurate time point.# e.g.# if step==HOUR , start=2017-11-08 09, end=2017-11-08 19# then# metrics from the following time points expected# 2017-11-08 9:00 -\u0026gt; 2017-11-08 19:00# there are 11 time points (hours) in the time span.inputDuration{start:String!end:String!step:Step!}enumStep{MONTHDAYHOURMINUTESECOND}","title":"Query Protocol","url":"/docs/main/v9.7.0/en/api/query-protocol/"},{"content":"Query Streams Query operation queries the data in a stream.\nbydbctl is the command line tool in examples.\nThe input contains two parts:\n Request: a YAML-based text which is defined by the API Time Range: YAML and CLI\u0026rsquo;s flags both support it.  Time Range The query specification contains time_range field. The request should set absolute times to it. bydbctl also provides start and end flags to support passing absolute and relative times.\n\u0026ldquo;start\u0026rdquo; and \u0026ldquo;end\u0026rdquo; specify a time range during which the query is performed, they can be an absolute time like \u0026ldquo;2006-01-02T15:04:05Z07:00\u0026rdquo;, or relative time (to the current time) like \u0026ldquo;-30m\u0026rdquo;, or \u0026ldquo;30m\u0026rdquo;. They are both optional and their default values follow the rules below:\n when \u0026ldquo;start\u0026rdquo; and \u0026ldquo;end\u0026rdquo; are both absent, \u0026ldquo;start = now - 30 minutes\u0026rdquo; and \u0026ldquo;end = now\u0026rdquo;, namely past 30 minutes; when \u0026ldquo;start\u0026rdquo; is absent and \u0026ldquo;end\u0026rdquo; is present, this command calculates \u0026ldquo;start\u0026rdquo; (minus 30 units), e.g. \u0026ldquo;end = 2022-11-09T12:34:00Z\u0026rdquo;, so \u0026ldquo;start = end - 30 minutes = 2022-11-09T12:04:00Z\u0026rdquo;; when \u0026ldquo;start\u0026rdquo; is present and \u0026ldquo;end\u0026rdquo; is absent, this command calculates \u0026ldquo;end\u0026rdquo; (plus 30 units), e.g. \u0026ldquo;start = 2022-11-09T12:04:00Z\u0026rdquo;, so \u0026ldquo;end = start + 30 minutes = 2022-11-09T12:34:00Z\u0026rdquo;.  Examples To retrieve elements in a stream named sw between 2022-10-15T22:32:48Z and 2022-10-15T23:32:48Z could use the below command. These elements also choose a tag trace_id which lives in a family named searchable.\n$ bydbctl stream query -f - \u0026lt;\u0026lt;EOF metadata: group: \u0026#34;default\u0026#34; name: \u0026#34;sw\u0026#34; projection: tagFamilies: - name: \u0026#34;searchable\u0026#34; tags: [\u0026#34;trace_id\u0026#34;] timeRange: begin: 2022-10-15T22:32:48+08:00 end: 2022-10-15T23:32:48+08:00 EOF The below command could query data in the last 30 minutes using relative time duration :\n$ bydbctl stream query --start -30m -f - \u0026lt;\u0026lt;EOF metadata: group: \u0026#34;default\u0026#34; name: \u0026#34;sw\u0026#34; projection: tagFamilies: - name: \u0026#34;searchable\u0026#34; tags: [\u0026#34;trace_id\u0026#34;] EOF API Reference StreamService v1\n","title":"Query Streams","url":"/docs/skywalking-banyandb/latest/crud/stream/query/"},{"content":"Query Streams Query operation queries the data in a stream.\nbydbctl is the command line tool in examples.\nThe input contains two parts:\n Request: a YAML-based text which is defined by the API Time Range: YAML and CLI\u0026rsquo;s flags both support it.  Time Range The query specification contains time_range field. The request should set absolute times to it. bydbctl also provides start and end flags to support passing absolute and relative times.\n\u0026ldquo;start\u0026rdquo; and \u0026ldquo;end\u0026rdquo; specify a time range during which the query is performed, they can be an absolute time like \u0026ldquo;2006-01-02T15:04:05Z07:00\u0026rdquo;, or relative time (to the current time) like \u0026ldquo;-30m\u0026rdquo;, or \u0026ldquo;30m\u0026rdquo;. They are both optional and their default values follow the rules below:\n when \u0026ldquo;start\u0026rdquo; and \u0026ldquo;end\u0026rdquo; are both absent, \u0026ldquo;start = now - 30 minutes\u0026rdquo; and \u0026ldquo;end = now\u0026rdquo;, namely past 30 minutes; when \u0026ldquo;start\u0026rdquo; is absent and \u0026ldquo;end\u0026rdquo; is present, this command calculates \u0026ldquo;start\u0026rdquo; (minus 30 units), e.g. \u0026ldquo;end = 2022-11-09T12:34:00Z\u0026rdquo;, so \u0026ldquo;start = end - 30 minutes = 2022-11-09T12:04:00Z\u0026rdquo;; when \u0026ldquo;start\u0026rdquo; is present and \u0026ldquo;end\u0026rdquo; is absent, this command calculates \u0026ldquo;end\u0026rdquo; (plus 30 units), e.g. \u0026ldquo;start = 2022-11-09T12:04:00Z\u0026rdquo;, so \u0026ldquo;end = start + 30 minutes = 2022-11-09T12:34:00Z\u0026rdquo;.  Examples To retrieve elements in a stream named sw between 2022-10-15T22:32:48Z and 2022-10-15T23:32:48Z could use the below command. These elements also choose a tag trace_id which lives in a family named searchable.\n$ bydbctl stream query -f - \u0026lt;\u0026lt;EOF metadata: group: \u0026#34;default\u0026#34; name: \u0026#34;sw\u0026#34; projection: tagFamilies: - name: \u0026#34;searchable\u0026#34; tags: [\u0026#34;trace_id\u0026#34;] timeRange: begin: 2022-10-15T22:32:48+08:00 end: 2022-10-15T23:32:48+08:00 EOF The below command could query data in the last 30 minutes using relative time duration :\n$ bydbctl stream query --start -30m -f - \u0026lt;\u0026lt;EOF metadata: group: \u0026#34;default\u0026#34; name: \u0026#34;sw\u0026#34; projection: tagFamilies: - name: \u0026#34;searchable\u0026#34; tags: [\u0026#34;trace_id\u0026#34;] EOF API Reference StreamService v1\n","title":"Query Streams","url":"/docs/skywalking-banyandb/next/crud/stream/query/"},{"content":"Query Streams Query operation queries the data in a stream.\nbydbctl is the command line tool in examples.\nThe input contains two parts:\n Request: a YAML-based text which is defined by the API Time Range: YAML and CLI\u0026rsquo;s flags both support it.  Time Range The query specification contains time_range field. The request should set absolute times to it. bydbctl also provides start and end flags to support passing absolute and relative times.\n\u0026ldquo;start\u0026rdquo; and \u0026ldquo;end\u0026rdquo; specify a time range during which the query is performed, they can be an absolute time like \u0026ldquo;2006-01-02T15:04:05Z07:00\u0026rdquo;, or relative time (to the current time) like \u0026ldquo;-30m\u0026rdquo;, or \u0026ldquo;30m\u0026rdquo;. They are both optional and their default values follow the rules below:\n when \u0026ldquo;start\u0026rdquo; and \u0026ldquo;end\u0026rdquo; are both absent, \u0026ldquo;start = now - 30 minutes\u0026rdquo; and \u0026ldquo;end = now\u0026rdquo;, namely past 30 minutes; when \u0026ldquo;start\u0026rdquo; is absent and \u0026ldquo;end\u0026rdquo; is present, this command calculates \u0026ldquo;start\u0026rdquo; (minus 30 units), e.g. \u0026ldquo;end = 2022-11-09T12:34:00Z\u0026rdquo;, so \u0026ldquo;start = end - 30 minutes = 2022-11-09T12:04:00Z\u0026rdquo;; when \u0026ldquo;start\u0026rdquo; is present and \u0026ldquo;end\u0026rdquo; is absent, this command calculates \u0026ldquo;end\u0026rdquo; (plus 30 units), e.g. \u0026ldquo;start = 2022-11-09T12:04:00Z\u0026rdquo;, so \u0026ldquo;end = start + 30 minutes = 2022-11-09T12:34:00Z\u0026rdquo;.  Examples To retrieve elements in a stream named sw between 2022-10-15T22:32:48Z and 2022-10-15T23:32:48Z could use the below command. These elements also choose a tag trace_id which lives in a family named searchable.\n$ bydbctl stream query -f - \u0026lt;\u0026lt;EOF metadata: group: \u0026#34;default\u0026#34; name: \u0026#34;sw\u0026#34; projection: tagFamilies: - name: \u0026#34;searchable\u0026#34; tags: [\u0026#34;trace_id\u0026#34;] timeRange: begin: 2022-10-15T22:32:48+08:00 end: 2022-10-15T23:32:48+08:00 EOF The below command could query data in the last 30 minutes using relative time duration :\n$ bydbctl stream query --start -30m -f - \u0026lt;\u0026lt;EOF metadata: group: \u0026#34;default\u0026#34; name: \u0026#34;sw\u0026#34; projection: tagFamilies: - name: \u0026#34;searchable\u0026#34; tags: [\u0026#34;trace_id\u0026#34;] EOF API Reference StreamService v1\n","title":"Query Streams","url":"/docs/skywalking-banyandb/v0.5.0/crud/stream/query/"},{"content":"Queue/memory-queue Description This is a memory queue to buffer the input event.\nDefaultConfig # The maximum buffer event size.event_buffer_size:5000# The partition count of queue.partition:1Configuration    Name Type Description     event_buffer_size int configThe maximum buffer event size.   partition int The total partition count.    ","title":"Queue/memory-queue","url":"/docs/skywalking-satellite/latest/en/setup/plugins/queue_memory-queue/"},{"content":"Queue/memory-queue Description This is a memory queue to buffer the input event.\nDefaultConfig # The maximum buffer event size.event_buffer_size:5000# The partition count of queue.partition:1Configuration    Name Type Description     event_buffer_size int configThe maximum buffer event size.   partition int The total partition count.    ","title":"Queue/memory-queue","url":"/docs/skywalking-satellite/next/en/setup/plugins/queue_memory-queue/"},{"content":"Queue/memory-queue Description This is a memory queue to buffer the input event.\nDefaultConfig # The maximum buffer event size.event_buffer_size:5000# The partition count of queue.partition:1Configuration    Name Type Description     event_buffer_size int configThe maximum buffer event size.   partition int The total partition count.    ","title":"Queue/memory-queue","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/queue_memory-queue/"},{"content":"Queue/mmap-queue Description This is a memory mapped queue to provide the persistent storage for the input event. Please note that this plugin does not support Windows platform.\nDefaultConfig # The size of each segment. Default value is 256K. The unit is Byte.segment_size:262114# The max num of segments in memory. Default value is 10.max_in_mem_segments:10# The capacity of Queue = segment_size * queue_capacity_segments.queue_capacity_segments:2000# The period flush time. The unit is ms. Default value is 1 second.flush_period:1000# The max number in one flush time. Default value is 10000.flush_ceiling_num:10000# The max size of the input event. Default value is 20k.max_event_size:20480# The partition count of queue.partition:1Configuration    Name Type Description     segment_size int The size of each segment. The unit is byte.   max_in_mem_segments int32 The max num of segments in memory.   queue_capacity_segments int The capacity of Queue = segment_size * queue_capacity_segments.   flush_period int The period flush time. The unit is ms.   flush_ceiling_num int The max number in one flush time.   max_event_size int The max size of the input event.   partition int The total partition count.    ","title":"Queue/mmap-queue","url":"/docs/skywalking-satellite/latest/en/setup/plugins/queue_mmap-queue/"},{"content":"Queue/mmap-queue Description This is a memory mapped queue to provide the persistent storage for the input event. Please note that this plugin does not support Windows platform.\nDefaultConfig # The size of each segment. Default value is 256K. The unit is Byte.segment_size:262114# The max num of segments in memory. Default value is 10.max_in_mem_segments:10# The capacity of Queue = segment_size * queue_capacity_segments.queue_capacity_segments:2000# The period flush time. The unit is ms. Default value is 1 second.flush_period:1000# The max number in one flush time. Default value is 10000.flush_ceiling_num:10000# The max size of the input event. Default value is 20k.max_event_size:20480# The partition count of queue.partition:1Configuration    Name Type Description     segment_size int The size of each segment. The unit is byte.   max_in_mem_segments int32 The max num of segments in memory.   queue_capacity_segments int The capacity of Queue = segment_size * queue_capacity_segments.   flush_period int The period flush time. The unit is ms.   flush_ceiling_num int The max number in one flush time.   max_event_size int The max size of the input event.   partition int The total partition count.    ","title":"Queue/mmap-queue","url":"/docs/skywalking-satellite/next/en/setup/plugins/queue_mmap-queue/"},{"content":"Queue/mmap-queue Description This is a memory mapped queue to provide the persistent storage for the input event. Please note that this plugin does not support Windows platform.\nDefaultConfig # The size of each segment. Default value is 256K. The unit is Byte.segment_size:262114# The max num of segments in memory. Default value is 10.max_in_mem_segments:10# The capacity of Queue = segment_size * queue_capacity_segments.queue_capacity_segments:2000# The period flush time. The unit is ms. Default value is 1 second.flush_period:1000# The max number in one flush time. Default value is 10000.flush_ceiling_num:10000# The max size of the input event. Default value is 20k.max_event_size:20480# The partition count of queue.partition:1Configuration    Name Type Description     segment_size int The size of each segment. The unit is byte.   max_in_mem_segments int32 The max num of segments in memory.   queue_capacity_segments int The capacity of Queue = segment_size * queue_capacity_segments.   flush_period int The period flush time. The unit is ms.   flush_ceiling_num int The max number in one flush time.   max_event_size int The max size of the input event.   partition int The total partition count.    ","title":"Queue/mmap-queue","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/queue_mmap-queue/"},{"content":"Queue/none-queue Description This is an empty queue for direct connection protocols, such as SkyWalking native configuration discovery service protocol.\nDefaultConfig # The partition count of queue.partition:1Configuration    Name Type Description     partition int The total partition count.    ","title":"Queue/none-queue","url":"/docs/skywalking-satellite/latest/en/setup/plugins/queue_none-queue/"},{"content":"Queue/none-queue Description This is an empty queue for direct connection protocols, such as SkyWalking native configuration discovery service protocol.\nDefaultConfig # The partition count of queue.partition:1Configuration    Name Type Description     partition int The total partition count.    ","title":"Queue/none-queue","url":"/docs/skywalking-satellite/next/en/setup/plugins/queue_none-queue/"},{"content":"Queue/none-queue Description This is an empty queue for direct connection protocols, such as SkyWalking native configuration discovery service protocol.\nDefaultConfig # The partition count of queue.partition:1Configuration    Name Type Description     partition int The total partition count.    ","title":"Queue/none-queue","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/queue_none-queue/"},{"content":"Quick Start for Contributors Make and Makefile We rely on Makefile to automate jobs, including setting up environments, testing and releasing.\nFirst you need to have the make command available:\n# ubuntu/wsl sudo apt-get update sudo apt-get -y install make or\n# windows powershell Set-ExecutionPolicy RemoteSigned -Scope CurrentUser # Optional: Needed to run a remote script the first time irm get.scoop.sh | iex scoop install make Poetry We have migrated from basic pip to Poetry to manage dependencies and package our project.\nOnce you have make ready, run make env, this will automatically install the right Poetry release, and create (plus manage) a .venv virtual environment for us based on the currently activated Python 3 version. Enjoy coding!\nNote: Make sure you have python3 aliased to python available on Windows computers instead of pointing to the Microsoft app store.\nSwitching between Multiple Python Versions Do not develop/test on Python \u0026lt; 3.7, since Poetry and some other functionalities we implement rely on Python 3.7+\nIf you would like to test on multiple Python versions, run the following to switch and recreate virtual environment:\nWithout Python Version Tools poetry env use python3.x poetry install With Python Version Tools pyenv shell 3.9.11 poetry env use $(pyenv which python) poetry install Or try: virtualenvs.prefer-active-python, which is an experimental poetry feature that can be set to true so that it will automatically follow environment.\nNext Refer to the Plugin Development Guide to learn how to build a new plugin for a library.\n","title":"Quick Start for Contributors","url":"/docs/skywalking-python/latest/en/contribution/developer/"},{"content":"Quick Start for Contributors Make and Makefile We rely on Makefile to automate jobs, including setting up environments, testing and releasing.\nFirst you need to have the make command available:\n# ubuntu/wsl sudo apt-get update sudo apt-get -y install make or\n# windows powershell Set-ExecutionPolicy RemoteSigned -Scope CurrentUser # Optional: Needed to run a remote script the first time irm get.scoop.sh | iex scoop install make Poetry We have migrated from basic pip to Poetry to manage dependencies and package our project.\nOnce you have make ready, run make env, this will automatically install the right Poetry release, and create (plus manage) a .venv virtual environment for us based on the currently activated Python 3 version. Enjoy coding!\nNote: Make sure you have python3 aliased to python available on Windows computers instead of pointing to the Microsoft app store.\nSwitching between Multiple Python Versions Do not develop/test on Python \u0026lt; 3.7, since Poetry and some other functionalities we implement rely on Python 3.7+\nIf you would like to test on multiple Python versions, run the following to switch and recreate virtual environment:\nWithout Python Version Tools poetry env use python3.x poetry install With Python Version Tools pyenv shell 3.9.11 poetry env use $(pyenv which python) poetry install Or try: virtualenvs.prefer-active-python, which is an experimental poetry feature that can be set to true so that it will automatically follow environment.\nNext Refer to the Plugin Development Guide to learn how to build a new plugin for a library.\n","title":"Quick Start for Contributors","url":"/docs/skywalking-python/next/en/contribution/developer/"},{"content":"Quick Start for Contributors Make and Makefile We rely on Makefile to automate jobs, including setting up environments, testing and releasing.\nFirst you need to have the make command available:\n# ubuntu/wsl sudo apt-get update sudo apt-get -y install make or\n# windows powershell Set-ExecutionPolicy RemoteSigned -Scope CurrentUser # Optional: Needed to run a remote script the first time irm get.scoop.sh | iex scoop install make Poetry We have migrated from basic pip to Poetry to manage dependencies and package our project.\nOnce you have make ready, run make env, this will automatically install the right Poetry release, and create (plus manage) a .venv virtual environment for us based on the currently activated Python 3 version. Enjoy coding!\nNote: Make sure you have python3 aliased to python available on Windows computers instead of pointing to the Microsoft app store.\nSwitching between Multiple Python Versions Do not develop/test on Python \u0026lt; 3.7, since Poetry and some other functionalities we implement rely on Python 3.7+\nIf you would like to test on multiple Python versions, run the following to switch and recreate virtual environment:\nWithout Python Version Tools poetry env use python3.x poetry install With Python Version Tools pyenv shell 3.9.11 poetry env use $(pyenv which python) poetry install Or try: virtualenvs.prefer-active-python, which is an experimental poetry feature that can be set to true so that it will automatically follow environment.\nNext Refer to the Plugin Development Guide to learn how to build a new plugin for a library.\n","title":"Quick Start for Contributors","url":"/docs/skywalking-python/v1.0.1/en/contribution/developer/"},{"content":"RabbitMQ monitoring SkyWalking leverages rabbitmq_prometheus plugin for collecting metrics data from RabbitMQ. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  The rabbitmq_prometheus plugin collect metrics data from RabbitMQ. Note: The RabbitMQ version is required to be 3.8.0+. The rabbitmq_prometheus plugin is built-in since RabbitMQ v3.8.0. OpenTelemetry Collector fetches metrics from rabbitmq_prometheus plugin via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup rabbitmq_prometheus. Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  RabbitMQ Monitoring RabbitMQ monitoring provides multidimensional metrics monitoring of RabbitMQ cluster as Layer: RABBITMQ Service in the OAP. In each cluster, the nodes are represented as Instance.\nRabbitMQ Cluster Supported Metrics    Monitoring Panel Metric Name Description Data Source     Memory Available Before Publishers Blocked (MB) meter_rabbitmq_memory_available_before_publisher_blocked If the value is zero or less, the memory alarm will be triggered and all publishing connections across all cluster nodes will be blocked. rabbitmq_prometheus plugin   Disk Space Available Before Publishers Blocked (GB) meter_rabbitmq_disk_space_available_before_publisher_blocked This metric is reported for the partition where the RabbitMQ data directory is stored. rabbitmq_prometheus plugin   File Descriptors Available meter_rabbitmq_file_descriptors_available When this value reaches zero, new connections will not be accepted and disk write operations may fail. rabbitmq_prometheus plugin   TCP Sockets Available meter_rabbitmq_tcp_socket_available When this value reaches zero, new connections will not be accepted. rabbitmq_prometheus plugin   Messages Ready To Be Delivered To Consumers meter_rabbitmq_message_ready_delivered_consumers Total number of ready messages ready to be delivered to consumers. rabbitmq_prometheus plugin   Messages Pending Consumer Acknowledgement meter_rabbitmq_message_unacknowledged_delivered_consumers The total number of messages that are either in-flight to consumers, currently being processed by consumers or simply waiting for the consumer acknowledgements to be processed by the queue. Until the queue processes the message acknowledgement, the message will remain unacknowledged. rabbitmq_prometheus plugin   Messages Published meter_rabbitmq_messages_published The incoming message rate before any routing rules are applied. rabbitmq_prometheus plugin   Messages Confirmed To Publishers meter_rabbitmq_messages_confirmed The rate of messages confirmed by the broker to publishers. Publishers must opt-in to receive message confirmations. rabbitmq_prometheus plugin   Messages Unconfirmed To Publishers meter_rabbitmq_messages_unconfirmed The rate of messages received from publishers that have publisher confirms enabled and the broker has not confirmed yet. rabbitmq_prometheus plugin   Messages Routed To Queues meter_rabbitmq_messages_routed The rate of messages received from publishers and successfully routed to the master queue replicas. rabbitmq_prometheus plugin   Unroutable Messages Returned To Publishers meter_rabbitmq_messages_unroutable_returned The rate of messages that cannot be routed and are returned back to publishers. rabbitmq_prometheus plugin   Unroutable Messages Dropped meter_rabbitmq_messages_unroutable_dropped The rate of messages that cannot be routed and are dropped. rabbitmq_prometheus plugin   Queues Total meter_rabbitmq_queues Total number of queue masters per node. rabbitmq_prometheus plugin   Queues Declared meter_rabbitmq_queues_declared_total The rate of queue declarations performed by clients. rabbitmq_prometheus plugin   Queues Created meter_rabbitmq_queues_created_total The rate of new queues created (as opposed to redeclarations). rabbitmq_prometheus plugin   Queues Deleted meter_rabbitmq_queues_deleted_total The rate of queues deleted. rabbitmq_prometheus plugin   Channels Total meter_rabbitmq_channels Total number of channels on all currently opened connections. rabbitmq_prometheus plugin   Channels Opened meter_rabbitmq_channels_opened_total The rate of new channels opened by applications across all connections. Channels are expected to be long-lived. rabbitmq_prometheus plugin   Channels Closed meter_rabbitmq_channels_closed_total The rate of channels closed by applications across all connections. Channels are expected to be long-lived. rabbitmq_prometheus plugin   Connections Total meter_rabbitmq_connections Total number of client connections. rabbitmq_prometheus plugin   Connections Opened meter_rabbitmq_connections_opened_total The rate of new connections opened by clients. Connections are expected to be long-lived. rabbitmq_prometheus plugin   Connections Closed meter_rabbitmq_connections_closed_total The rate of connections closed. Connections are expected to be long-lived. rabbitmq_prometheus plugin    RabbitMQ Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Ready Messages  meter_rabbitmq_node_queue_messages_ready Total number of ready messages ready to be delivered to consumers. rabbitmq_prometheus plugin   Unacknowledged Messages  meter_rabbitmq_node_unacknowledged_messages Messages delivered to consumers but not yet acknowledged rabbitmq_prometheus plugin   Incoming Messages  meter_rabbitmq_node_incoming_messages The incoming message rate before any routing rules are applied. rabbitmq_prometheus plugin   Outgoing Messages  meter_rabbitmq_node_outgoing_messages_total The outgoing message rate before any routing rules are applied. rabbitmq_prometheus plugin   Publishers  meter_rabbitmq_node_publisher_total Publishers rabbitmq_prometheus plugin   Consumers  meter_rabbitmq_node_consumer_total Consumers currently connect rabbitmq_prometheus plugin   Collections  meter_rabbitmq_node_connections_total Connections currently open rabbitmq_prometheus plugin   Channels  meter_rabbitmq_node_channel_total Channels currently open rabbitmq_prometheus plugin   Queues  meter_rabbitmq_node_queue_total Queues available rabbitmq_prometheus plugin   Allocated Used % meter_rabbitmq_node_allocated_used_percent Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Allocated Unused % meter_rabbitmq_node_allocated_unused_percent Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Allocated Used MB meter_rabbitmq_node_allocated_used_bytes Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Allocated Unused MB meter_rabbitmq_node_allocated_unused_bytes Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Allocated Total MB meter_rabbitmq_node_allocated_total_bytes Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Resident Set Size MB meter_rabbitmq_node_process_resident_memory_bytes Erlang VM Resident Set Size (RSS) As reported by the OS rabbitmq_prometheus plugin   Allocators MB meter_rabbitmq_node_allocated_unused_bytes meter_rabbitmq_node_allocated_total_bytes meter_rabbitmq_node_process_resident_memory_bytes  rabbitmq_prometheus plugin   Allocated By Type MB meter_rabbitmq_node_allocated_by_type Allocated by allocator type rabbitmq_prometheus plugin   Multiblock Used MB meter_rabbitmq_node_allocated_multiblock_used Multi block used rabbitmq_prometheus plugin   Multiblock Unused MB meter_rabbitmq_node_allocated_multiblock_unused Multi block used rabbitmq_prometheus plugin   Multiblock Pool Used MB meter_rabbitmq_node_allocated_multiblock_pool_used Multi block pool used rabbitmq_prometheus plugin   Multiblock Pool Unused MB meter_rabbitmq_node_allocated_multiblock_pool_unused Multi block pool unused rabbitmq_prometheus plugin   Singleblock Used MB meter_rabbitmq_node_allocated_singleblock_used Single block used rabbitmq_prometheus plugin   Singleblock Unused MB meter_rabbitmq_node_allocated_singleblock_unused Single block unused rabbitmq_prometheus plugin    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/rabbitmq/rabbitmq-cluster.yaml, /config/otel-rules/rabbitmq/rabbitmq-node.yaml. The RabbitMQ dashboard panel configurations are found in /config/ui-initialized-templates/rabbitmq.\n","title":"RabbitMQ monitoring","url":"/docs/main/latest/en/setup/backend/backend-rabbitmq-monitoring/"},{"content":"RabbitMQ monitoring SkyWalking leverages rabbitmq_prometheus plugin for collecting metrics data from RabbitMQ. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  The rabbitmq_prometheus plugin collect metrics data from RabbitMQ. Note: The RabbitMQ version is required to be 3.8.0+. The rabbitmq_prometheus plugin is built-in since RabbitMQ v3.8.0. OpenTelemetry Collector fetches metrics from rabbitmq_prometheus plugin via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup rabbitmq_prometheus. Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  RabbitMQ Monitoring RabbitMQ monitoring provides multidimensional metrics monitoring of RabbitMQ cluster as Layer: RABBITMQ Service in the OAP. In each cluster, the nodes are represented as Instance.\nRabbitMQ Cluster Supported Metrics    Monitoring Panel Metric Name Description Data Source     Memory Available Before Publishers Blocked (MB) meter_rabbitmq_memory_available_before_publisher_blocked If the value is zero or less, the memory alarm will be triggered and all publishing connections across all cluster nodes will be blocked. rabbitmq_prometheus plugin   Disk Space Available Before Publishers Blocked (GB) meter_rabbitmq_disk_space_available_before_publisher_blocked This metric is reported for the partition where the RabbitMQ data directory is stored. rabbitmq_prometheus plugin   File Descriptors Available meter_rabbitmq_file_descriptors_available When this value reaches zero, new connections will not be accepted and disk write operations may fail. rabbitmq_prometheus plugin   TCP Sockets Available meter_rabbitmq_tcp_socket_available When this value reaches zero, new connections will not be accepted. rabbitmq_prometheus plugin   Messages Ready To Be Delivered To Consumers meter_rabbitmq_message_ready_delivered_consumers Total number of ready messages ready to be delivered to consumers. rabbitmq_prometheus plugin   Messages Pending Consumer Acknowledgement meter_rabbitmq_message_unacknowledged_delivered_consumers The total number of messages that are either in-flight to consumers, currently being processed by consumers or simply waiting for the consumer acknowledgements to be processed by the queue. Until the queue processes the message acknowledgement, the message will remain unacknowledged. rabbitmq_prometheus plugin   Messages Published meter_rabbitmq_messages_published The incoming message rate before any routing rules are applied. rabbitmq_prometheus plugin   Messages Confirmed To Publishers meter_rabbitmq_messages_confirmed The rate of messages confirmed by the broker to publishers. Publishers must opt-in to receive message confirmations. rabbitmq_prometheus plugin   Messages Unconfirmed To Publishers meter_rabbitmq_messages_unconfirmed The rate of messages received from publishers that have publisher confirms enabled and the broker has not confirmed yet. rabbitmq_prometheus plugin   Messages Routed To Queues meter_rabbitmq_messages_routed The rate of messages received from publishers and successfully routed to the master queue replicas. rabbitmq_prometheus plugin   Unroutable Messages Returned To Publishers meter_rabbitmq_messages_unroutable_returned The rate of messages that cannot be routed and are returned back to publishers. rabbitmq_prometheus plugin   Unroutable Messages Dropped meter_rabbitmq_messages_unroutable_dropped The rate of messages that cannot be routed and are dropped. rabbitmq_prometheus plugin   Queues Total meter_rabbitmq_queues Total number of queue masters per node. rabbitmq_prometheus plugin   Queues Declared meter_rabbitmq_queues_declared_total The rate of queue declarations performed by clients. rabbitmq_prometheus plugin   Queues Created meter_rabbitmq_queues_created_total The rate of new queues created (as opposed to redeclarations). rabbitmq_prometheus plugin   Queues Deleted meter_rabbitmq_queues_deleted_total The rate of queues deleted. rabbitmq_prometheus plugin   Channels Total meter_rabbitmq_channels Total number of channels on all currently opened connections. rabbitmq_prometheus plugin   Channels Opened meter_rabbitmq_channels_opened_total The rate of new channels opened by applications across all connections. Channels are expected to be long-lived. rabbitmq_prometheus plugin   Channels Closed meter_rabbitmq_channels_closed_total The rate of channels closed by applications across all connections. Channels are expected to be long-lived. rabbitmq_prometheus plugin   Connections Total meter_rabbitmq_connections Total number of client connections. rabbitmq_prometheus plugin   Connections Opened meter_rabbitmq_connections_opened_total The rate of new connections opened by clients. Connections are expected to be long-lived. rabbitmq_prometheus plugin   Connections Closed meter_rabbitmq_connections_closed_total The rate of connections closed. Connections are expected to be long-lived. rabbitmq_prometheus plugin    RabbitMQ Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Ready Messages  meter_rabbitmq_node_queue_messages_ready Total number of ready messages ready to be delivered to consumers. rabbitmq_prometheus plugin   Unacknowledged Messages  meter_rabbitmq_node_unacknowledged_messages Messages delivered to consumers but not yet acknowledged rabbitmq_prometheus plugin   Incoming Messages  meter_rabbitmq_node_incoming_messages The incoming message rate before any routing rules are applied. rabbitmq_prometheus plugin   Outgoing Messages  meter_rabbitmq_node_outgoing_messages_total The outgoing message rate before any routing rules are applied. rabbitmq_prometheus plugin   Publishers  meter_rabbitmq_node_publisher_total Publishers rabbitmq_prometheus plugin   Consumers  meter_rabbitmq_node_consumer_total Consumers currently connect rabbitmq_prometheus plugin   Collections  meter_rabbitmq_node_connections_total Connections currently open rabbitmq_prometheus plugin   Channels  meter_rabbitmq_node_channel_total Channels currently open rabbitmq_prometheus plugin   Queues  meter_rabbitmq_node_queue_total Queues available rabbitmq_prometheus plugin   Allocated Used % meter_rabbitmq_node_allocated_used_percent Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Allocated Unused % meter_rabbitmq_node_allocated_unused_percent Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Allocated Used MB meter_rabbitmq_node_allocated_used_bytes Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Allocated Unused MB meter_rabbitmq_node_allocated_unused_bytes Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Allocated Total MB meter_rabbitmq_node_allocated_total_bytes Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Resident Set Size MB meter_rabbitmq_node_process_resident_memory_bytes Erlang VM Resident Set Size (RSS) As reported by the OS rabbitmq_prometheus plugin   Allocators MB meter_rabbitmq_node_allocated_unused_bytes meter_rabbitmq_node_allocated_total_bytes meter_rabbitmq_node_process_resident_memory_bytes  rabbitmq_prometheus plugin   Allocated By Type MB meter_rabbitmq_node_allocated_by_type Allocated by allocator type rabbitmq_prometheus plugin   Multiblock Used MB meter_rabbitmq_node_allocated_multiblock_used Multi block used rabbitmq_prometheus plugin   Multiblock Unused MB meter_rabbitmq_node_allocated_multiblock_unused Multi block used rabbitmq_prometheus plugin   Multiblock Pool Used MB meter_rabbitmq_node_allocated_multiblock_pool_used Multi block pool used rabbitmq_prometheus plugin   Multiblock Pool Unused MB meter_rabbitmq_node_allocated_multiblock_pool_unused Multi block pool unused rabbitmq_prometheus plugin   Singleblock Used MB meter_rabbitmq_node_allocated_singleblock_used Single block used rabbitmq_prometheus plugin   Singleblock Unused MB meter_rabbitmq_node_allocated_singleblock_unused Single block unused rabbitmq_prometheus plugin    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/rabbitmq/rabbitmq-cluster.yaml, /config/otel-rules/rabbitmq/rabbitmq-node.yaml. The RabbitMQ dashboard panel configurations are found in /config/ui-initialized-templates/rabbitmq.\n","title":"RabbitMQ monitoring","url":"/docs/main/next/en/setup/backend/backend-rabbitmq-monitoring/"},{"content":"RabbitMQ monitoring SkyWalking leverages rabbitmq_prometheus plugin for collecting metrics data from RabbitMQ. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  The rabbitmq_prometheus plugin collect metrics data from RabbitMQ. Note: The RabbitMQ version is required to be 3.8.0+. The rabbitmq_prometheus plugin is built-in since RabbitMQ v3.8.0. OpenTelemetry Collector fetches metrics from rabbitmq_prometheus plugin via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup rabbitmq_prometheus. Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  RabbitMQ Monitoring RabbitMQ monitoring provides multidimensional metrics monitoring of RabbitMQ cluster as Layer: RABBITMQ Service in the OAP. In each cluster, the nodes are represented as Instance.\nRabbitMQ Cluster Supported Metrics    Monitoring Panel Metric Name Description Data Source     Memory Available Before Publishers Blocked (MB) meter_rabbitmq_memory_available_before_publisher_blocked If the value is zero or less, the memory alarm will be triggered and all publishing connections across all cluster nodes will be blocked. rabbitmq_prometheus plugin   Disk Space Available Before Publishers Blocked (GB) meter_rabbitmq_disk_space_available_before_publisher_blocked This metric is reported for the partition where the RabbitMQ data directory is stored. rabbitmq_prometheus plugin   File Descriptors Available meter_rabbitmq_file_descriptors_available When this value reaches zero, new connections will not be accepted and disk write operations may fail. rabbitmq_prometheus plugin   TCP Sockets Available meter_rabbitmq_tcp_socket_available When this value reaches zero, new connections will not be accepted. rabbitmq_prometheus plugin   Messages Ready To Be Delivered To Consumers meter_rabbitmq_message_ready_delivered_consumers Total number of ready messages ready to be delivered to consumers. rabbitmq_prometheus plugin   Messages Pending Consumer Acknowledgement meter_rabbitmq_message_unacknowledged_delivered_consumers The total number of messages that are either in-flight to consumers, currently being processed by consumers or simply waiting for the consumer acknowledgements to be processed by the queue. Until the queue processes the message acknowledgement, the message will remain unacknowledged. rabbitmq_prometheus plugin   Messages Published meter_rabbitmq_messages_published The incoming message rate before any routing rules are applied. rabbitmq_prometheus plugin   Messages Confirmed To Publishers meter_rabbitmq_messages_confirmed The rate of messages confirmed by the broker to publishers. Publishers must opt-in to receive message confirmations. rabbitmq_prometheus plugin   Messages Unconfirmed To Publishers meter_rabbitmq_messages_unconfirmed The rate of messages received from publishers that have publisher confirms enabled and the broker has not confirmed yet. rabbitmq_prometheus plugin   Messages Routed To Queues meter_rabbitmq_messages_routed The rate of messages received from publishers and successfully routed to the master queue replicas. rabbitmq_prometheus plugin   Unroutable Messages Returned To Publishers meter_rabbitmq_messages_unroutable_returned The rate of messages that cannot be routed and are returned back to publishers. rabbitmq_prometheus plugin   Unroutable Messages Dropped meter_rabbitmq_messages_unroutable_dropped The rate of messages that cannot be routed and are dropped. rabbitmq_prometheus plugin   Queues Total meter_rabbitmq_queues Total number of queue masters per node. rabbitmq_prometheus plugin   Queues Declared meter_rabbitmq_queues_declared_total The rate of queue declarations performed by clients. rabbitmq_prometheus plugin   Queues Created meter_rabbitmq_queues_created_total The rate of new queues created (as opposed to redeclarations). rabbitmq_prometheus plugin   Queues Deleted meter_rabbitmq_queues_deleted_total The rate of queues deleted. rabbitmq_prometheus plugin   Channels Total meter_rabbitmq_channels Total number of channels on all currently opened connections. rabbitmq_prometheus plugin   Channels Opened meter_rabbitmq_channels_opened_total The rate of new channels opened by applications across all connections. Channels are expected to be long-lived. rabbitmq_prometheus plugin   Channels Closed meter_rabbitmq_channels_closed_total The rate of channels closed by applications across all connections. Channels are expected to be long-lived. rabbitmq_prometheus plugin   Connections Total meter_rabbitmq_connections Total number of client connections. rabbitmq_prometheus plugin   Connections Opened meter_rabbitmq_connections_opened_total The rate of new connections opened by clients. Connections are expected to be long-lived. rabbitmq_prometheus plugin   Connections Closed meter_rabbitmq_connections_closed_total The rate of connections closed. Connections are expected to be long-lived. rabbitmq_prometheus plugin    RabbitMQ Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Ready Messages  meter_rabbitmq_node_queue_messages_ready Total number of ready messages ready to be delivered to consumers. rabbitmq_prometheus plugin   Unacknowledged Messages  meter_rabbitmq_node_unacknowledged_messages Messages delivered to consumers but not yet acknowledged rabbitmq_prometheus plugin   Incoming Messages  meter_rabbitmq_node_incoming_messages The incoming message rate before any routing rules are applied. rabbitmq_prometheus plugin   Outgoing Messages  meter_rabbitmq_node_outgoing_messages_total The outgoing message rate before any routing rules are applied. rabbitmq_prometheus plugin   Publishers  meter_rabbitmq_node_publisher_total Publishers rabbitmq_prometheus plugin   Consumers  meter_rabbitmq_node_consumer_total Consumers currently connect rabbitmq_prometheus plugin   Collections  meter_rabbitmq_node_connections_total Connections currently open rabbitmq_prometheus plugin   Channels  meter_rabbitmq_node_channel_total Channels currently open rabbitmq_prometheus plugin   Queues  meter_rabbitmq_node_queue_total Queues available rabbitmq_prometheus plugin   Allocated Used % meter_rabbitmq_node_allocated_used_percent Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Allocated Unused % meter_rabbitmq_node_allocated_unused_percent Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Allocated Used MB meter_rabbitmq_node_allocated_used_bytes Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Allocated Unused MB meter_rabbitmq_node_allocated_unused_bytes Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Allocated Total MB meter_rabbitmq_node_allocated_total_bytes Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Resident Set Size MB meter_rabbitmq_node_process_resident_memory_bytes Erlang VM Resident Set Size (RSS) As reported by the OS rabbitmq_prometheus plugin   Allocators MB meter_rabbitmq_node_allocated_unused_bytes meter_rabbitmq_node_allocated_total_bytes meter_rabbitmq_node_process_resident_memory_bytes  rabbitmq_prometheus plugin   Allocated By Type MB meter_rabbitmq_node_allocated_by_type Allocated by allocator type rabbitmq_prometheus plugin   Multiblock Used MB meter_rabbitmq_node_allocated_multiblock_used Multi block used rabbitmq_prometheus plugin   Multiblock Unused MB meter_rabbitmq_node_allocated_multiblock_unused Multi block used rabbitmq_prometheus plugin   Multiblock Pool Used MB meter_rabbitmq_node_allocated_multiblock_pool_used Multi block pool used rabbitmq_prometheus plugin   Multiblock Pool Unused MB meter_rabbitmq_node_allocated_multiblock_pool_unused Multi block pool unused rabbitmq_prometheus plugin   Singleblock Used MB meter_rabbitmq_node_allocated_singleblock_used Single block used rabbitmq_prometheus plugin   Singleblock Unused MB meter_rabbitmq_node_allocated_singleblock_unused Single block unused rabbitmq_prometheus plugin    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/rabbitmq/rabbitmq-cluster.yaml, /config/otel-rules/rabbitmq/rabbitmq-node.yaml. The RabbitMQ dashboard panel configurations are found in /config/ui-initialized-templates/rabbitmq.\n","title":"RabbitMQ monitoring","url":"/docs/main/v9.5.0/en/setup/backend/backend-rabbitmq-monitoring/"},{"content":"RabbitMQ monitoring SkyWalking leverages rabbitmq_prometheus plugin for collecting metrics data from RabbitMQ. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  The rabbitmq_prometheus plugin collect metrics data from RabbitMQ. Note: The RabbitMQ version is required to be 3.8.0+. The rabbitmq_prometheus plugin is built-in since RabbitMQ v3.8.0. OpenTelemetry Collector fetches metrics from rabbitmq_prometheus plugin via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup rabbitmq_prometheus. Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  RabbitMQ Monitoring RabbitMQ monitoring provides multidimensional metrics monitoring of RabbitMQ cluster as Layer: RABBITMQ Service in the OAP. In each cluster, the nodes are represented as Instance.\nRabbitMQ Cluster Supported Metrics    Monitoring Panel Metric Name Description Data Source     Memory Available Before Publishers Blocked (MB) meter_rabbitmq_memory_available_before_publisher_blocked If the value is zero or less, the memory alarm will be triggered and all publishing connections across all cluster nodes will be blocked. rabbitmq_prometheus plugin   Disk Space Available Before Publishers Blocked (GB) meter_rabbitmq_disk_space_available_before_publisher_blocked This metric is reported for the partition where the RabbitMQ data directory is stored. rabbitmq_prometheus plugin   File Descriptors Available meter_rabbitmq_file_descriptors_available When this value reaches zero, new connections will not be accepted and disk write operations may fail. rabbitmq_prometheus plugin   TCP Sockets Available meter_rabbitmq_tcp_socket_available When this value reaches zero, new connections will not be accepted. rabbitmq_prometheus plugin   Messages Ready To Be Delivered To Consumers meter_rabbitmq_message_ready_delivered_consumers Total number of ready messages ready to be delivered to consumers. rabbitmq_prometheus plugin   Messages Pending Consumer Acknowledgement meter_rabbitmq_message_unacknowledged_delivered_consumers The total number of messages that are either in-flight to consumers, currently being processed by consumers or simply waiting for the consumer acknowledgements to be processed by the queue. Until the queue processes the message acknowledgement, the message will remain unacknowledged. rabbitmq_prometheus plugin   Messages Published meter_rabbitmq_messages_published The incoming message rate before any routing rules are applied. rabbitmq_prometheus plugin   Messages Confirmed To Publishers meter_rabbitmq_messages_confirmed The rate of messages confirmed by the broker to publishers. Publishers must opt-in to receive message confirmations. rabbitmq_prometheus plugin   Messages Unconfirmed To Publishers meter_rabbitmq_messages_unconfirmed The rate of messages received from publishers that have publisher confirms enabled and the broker has not confirmed yet. rabbitmq_prometheus plugin   Messages Routed To Queues meter_rabbitmq_messages_routed The rate of messages received from publishers and successfully routed to the master queue replicas. rabbitmq_prometheus plugin   Unroutable Messages Returned To Publishers meter_rabbitmq_messages_unroutable_returned The rate of messages that cannot be routed and are returned back to publishers. rabbitmq_prometheus plugin   Unroutable Messages Dropped meter_rabbitmq_messages_unroutable_dropped The rate of messages that cannot be routed and are dropped. rabbitmq_prometheus plugin   Queues Total meter_rabbitmq_queues Total number of queue masters per node. rabbitmq_prometheus plugin   Queues Declared meter_rabbitmq_queues_declared_total The rate of queue declarations performed by clients. rabbitmq_prometheus plugin   Queues Created meter_rabbitmq_queues_created_total The rate of new queues created (as opposed to redeclarations). rabbitmq_prometheus plugin   Queues Deleted meter_rabbitmq_queues_deleted_total The rate of queues deleted. rabbitmq_prometheus plugin   Channels Total meter_rabbitmq_channels Total number of channels on all currently opened connections. rabbitmq_prometheus plugin   Channels Opened meter_rabbitmq_channels_opened_total The rate of new channels opened by applications across all connections. Channels are expected to be long-lived. rabbitmq_prometheus plugin   Channels Closed meter_rabbitmq_channels_closed_total The rate of channels closed by applications across all connections. Channels are expected to be long-lived. rabbitmq_prometheus plugin   Connections Total meter_rabbitmq_connections Total number of client connections. rabbitmq_prometheus plugin   Connections Opened meter_rabbitmq_connections_opened_total The rate of new connections opened by clients. Connections are expected to be long-lived. rabbitmq_prometheus plugin   Connections Closed meter_rabbitmq_connections_closed_total The rate of connections closed. Connections are expected to be long-lived. rabbitmq_prometheus plugin    RabbitMQ Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Ready Messages  meter_rabbitmq_node_queue_messages_ready Total number of ready messages ready to be delivered to consumers. rabbitmq_prometheus plugin   Unacknowledged Messages  meter_rabbitmq_node_unacknowledged_messages Messages delivered to consumers but not yet acknowledged rabbitmq_prometheus plugin   Incoming Messages  meter_rabbitmq_node_incoming_messages The incoming message rate before any routing rules are applied. rabbitmq_prometheus plugin   Outgoing Messages  meter_rabbitmq_node_outgoing_messages_total The outgoing message rate before any routing rules are applied. rabbitmq_prometheus plugin   Publishers  meter_rabbitmq_node_publisher_total Publishers rabbitmq_prometheus plugin   Consumers  meter_rabbitmq_node_consumer_total Consumers currently connect rabbitmq_prometheus plugin   Collections  meter_rabbitmq_node_connections_total Connections currently open rabbitmq_prometheus plugin   Channels  meter_rabbitmq_node_channel_total Channels currently open rabbitmq_prometheus plugin   Queues  meter_rabbitmq_node_queue_total Queues available rabbitmq_prometheus plugin   Allocated Used % meter_rabbitmq_node_allocated_used_percent Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Allocated Unused % meter_rabbitmq_node_allocated_unused_percent Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Allocated Used MB meter_rabbitmq_node_allocated_used_bytes Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Allocated Unused MB meter_rabbitmq_node_allocated_unused_bytes Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Allocated Total MB meter_rabbitmq_node_allocated_total_bytes Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Resident Set Size MB meter_rabbitmq_node_process_resident_memory_bytes Erlang VM Resident Set Size (RSS) As reported by the OS rabbitmq_prometheus plugin   Allocators MB meter_rabbitmq_node_allocated_unused_bytes meter_rabbitmq_node_allocated_total_bytes meter_rabbitmq_node_process_resident_memory_bytes  rabbitmq_prometheus plugin   Allocated By Type MB meter_rabbitmq_node_allocated_by_type Allocated by allocator type rabbitmq_prometheus plugin   Multiblock Used MB meter_rabbitmq_node_allocated_multiblock_used Multi block used rabbitmq_prometheus plugin   Multiblock Unused MB meter_rabbitmq_node_allocated_multiblock_unused Multi block used rabbitmq_prometheus plugin   Multiblock Pool Used MB meter_rabbitmq_node_allocated_multiblock_pool_used Multi block pool used rabbitmq_prometheus plugin   Multiblock Pool Unused MB meter_rabbitmq_node_allocated_multiblock_pool_unused Multi block pool unused rabbitmq_prometheus plugin   Singleblock Used MB meter_rabbitmq_node_allocated_singleblock_used Single block used rabbitmq_prometheus plugin   Singleblock Unused MB meter_rabbitmq_node_allocated_singleblock_unused Single block unused rabbitmq_prometheus plugin    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/rabbitmq/rabbitmq-cluster.yaml, /config/otel-rules/rabbitmq/rabbitmq-node.yaml. The RabbitMQ dashboard panel configurations are found in /config/ui-initialized-templates/rabbitmq.\n","title":"RabbitMQ monitoring","url":"/docs/main/v9.6.0/en/setup/backend/backend-rabbitmq-monitoring/"},{"content":"RabbitMQ monitoring SkyWalking leverages rabbitmq_prometheus plugin for collecting metrics data from RabbitMQ. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  The rabbitmq_prometheus plugin collect metrics data from RabbitMQ. Note: The RabbitMQ version is required to be 3.8.0+. The rabbitmq_prometheus plugin is built-in since RabbitMQ v3.8.0. OpenTelemetry Collector fetches metrics from rabbitmq_prometheus plugin via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup rabbitmq_prometheus. Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  RabbitMQ Monitoring RabbitMQ monitoring provides multidimensional metrics monitoring of RabbitMQ cluster as Layer: RABBITMQ Service in the OAP. In each cluster, the nodes are represented as Instance.\nRabbitMQ Cluster Supported Metrics    Monitoring Panel Metric Name Description Data Source     Memory Available Before Publishers Blocked (MB) meter_rabbitmq_memory_available_before_publisher_blocked If the value is zero or less, the memory alarm will be triggered and all publishing connections across all cluster nodes will be blocked. rabbitmq_prometheus plugin   Disk Space Available Before Publishers Blocked (GB) meter_rabbitmq_disk_space_available_before_publisher_blocked This metric is reported for the partition where the RabbitMQ data directory is stored. rabbitmq_prometheus plugin   File Descriptors Available meter_rabbitmq_file_descriptors_available When this value reaches zero, new connections will not be accepted and disk write operations may fail. rabbitmq_prometheus plugin   TCP Sockets Available meter_rabbitmq_tcp_socket_available When this value reaches zero, new connections will not be accepted. rabbitmq_prometheus plugin   Messages Ready To Be Delivered To Consumers meter_rabbitmq_message_ready_delivered_consumers Total number of ready messages ready to be delivered to consumers. rabbitmq_prometheus plugin   Messages Pending Consumer Acknowledgement meter_rabbitmq_message_unacknowledged_delivered_consumers The total number of messages that are either in-flight to consumers, currently being processed by consumers or simply waiting for the consumer acknowledgements to be processed by the queue. Until the queue processes the message acknowledgement, the message will remain unacknowledged. rabbitmq_prometheus plugin   Messages Published meter_rabbitmq_messages_published The incoming message rate before any routing rules are applied. rabbitmq_prometheus plugin   Messages Confirmed To Publishers meter_rabbitmq_messages_confirmed The rate of messages confirmed by the broker to publishers. Publishers must opt-in to receive message confirmations. rabbitmq_prometheus plugin   Messages Unconfirmed To Publishers meter_rabbitmq_messages_unconfirmed The rate of messages received from publishers that have publisher confirms enabled and the broker has not confirmed yet. rabbitmq_prometheus plugin   Messages Routed To Queues meter_rabbitmq_messages_routed The rate of messages received from publishers and successfully routed to the master queue replicas. rabbitmq_prometheus plugin   Unroutable Messages Returned To Publishers meter_rabbitmq_messages_unroutable_returned The rate of messages that cannot be routed and are returned back to publishers. rabbitmq_prometheus plugin   Unroutable Messages Dropped meter_rabbitmq_messages_unroutable_dropped The rate of messages that cannot be routed and are dropped. rabbitmq_prometheus plugin   Queues Total meter_rabbitmq_queues Total number of queue masters per node. rabbitmq_prometheus plugin   Queues Declared meter_rabbitmq_queues_declared_total The rate of queue declarations performed by clients. rabbitmq_prometheus plugin   Queues Created meter_rabbitmq_queues_created_total The rate of new queues created (as opposed to redeclarations). rabbitmq_prometheus plugin   Queues Deleted meter_rabbitmq_queues_deleted_total The rate of queues deleted. rabbitmq_prometheus plugin   Channels Total meter_rabbitmq_channels Total number of channels on all currently opened connections. rabbitmq_prometheus plugin   Channels Opened meter_rabbitmq_channels_opened_total The rate of new channels opened by applications across all connections. Channels are expected to be long-lived. rabbitmq_prometheus plugin   Channels Closed meter_rabbitmq_channels_closed_total The rate of channels closed by applications across all connections. Channels are expected to be long-lived. rabbitmq_prometheus plugin   Connections Total meter_rabbitmq_connections Total number of client connections. rabbitmq_prometheus plugin   Connections Opened meter_rabbitmq_connections_opened_total The rate of new connections opened by clients. Connections are expected to be long-lived. rabbitmq_prometheus plugin   Connections Closed meter_rabbitmq_connections_closed_total The rate of connections closed. Connections are expected to be long-lived. rabbitmq_prometheus plugin    RabbitMQ Node Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Ready Messages  meter_rabbitmq_node_queue_messages_ready Total number of ready messages ready to be delivered to consumers. rabbitmq_prometheus plugin   Unacknowledged Messages  meter_rabbitmq_node_unacknowledged_messages Messages delivered to consumers but not yet acknowledged rabbitmq_prometheus plugin   Incoming Messages  meter_rabbitmq_node_incoming_messages The incoming message rate before any routing rules are applied. rabbitmq_prometheus plugin   Outgoing Messages  meter_rabbitmq_node_outgoing_messages_total The outgoing message rate before any routing rules are applied. rabbitmq_prometheus plugin   Publishers  meter_rabbitmq_node_publisher_total Publishers rabbitmq_prometheus plugin   Consumers  meter_rabbitmq_node_consumer_total Consumers currently connect rabbitmq_prometheus plugin   Collections  meter_rabbitmq_node_connections_total Connections currently open rabbitmq_prometheus plugin   Channels  meter_rabbitmq_node_channel_total Channels currently open rabbitmq_prometheus plugin   Queues  meter_rabbitmq_node_queue_total Queues available rabbitmq_prometheus plugin   Allocated Used % meter_rabbitmq_node_allocated_used_percent Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Allocated Unused % meter_rabbitmq_node_allocated_unused_percent Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Allocated Used MB meter_rabbitmq_node_allocated_used_bytes Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Allocated Unused MB meter_rabbitmq_node_allocated_unused_bytes Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Allocated Total MB meter_rabbitmq_node_allocated_total_bytes Erlang VM memory utilisation from erts_alloc perspective rabbitmq_prometheus plugin   Resident Set Size MB meter_rabbitmq_node_process_resident_memory_bytes Erlang VM Resident Set Size (RSS) As reported by the OS rabbitmq_prometheus plugin   Allocators MB meter_rabbitmq_node_allocated_unused_bytes meter_rabbitmq_node_allocated_total_bytes meter_rabbitmq_node_process_resident_memory_bytes  rabbitmq_prometheus plugin   Allocated By Type MB meter_rabbitmq_node_allocated_by_type Allocated by allocator type rabbitmq_prometheus plugin   Multiblock Used MB meter_rabbitmq_node_allocated_multiblock_used Multi block used rabbitmq_prometheus plugin   Multiblock Unused MB meter_rabbitmq_node_allocated_multiblock_unused Multi block used rabbitmq_prometheus plugin   Multiblock Pool Used MB meter_rabbitmq_node_allocated_multiblock_pool_used Multi block pool used rabbitmq_prometheus plugin   Multiblock Pool Unused MB meter_rabbitmq_node_allocated_multiblock_pool_unused Multi block pool unused rabbitmq_prometheus plugin   Singleblock Used MB meter_rabbitmq_node_allocated_singleblock_used Single block used rabbitmq_prometheus plugin   Singleblock Unused MB meter_rabbitmq_node_allocated_singleblock_unused Single block unused rabbitmq_prometheus plugin    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/rabbitmq/rabbitmq-cluster.yaml, /config/otel-rules/rabbitmq/rabbitmq-node.yaml. The RabbitMQ dashboard panel configurations are found in /config/ui-initialized-templates/rabbitmq.\n","title":"RabbitMQ monitoring","url":"/docs/main/v9.7.0/en/setup/backend/backend-rabbitmq-monitoring/"},{"content":"Reading Context All following APIs provide readonly features for the tracing context from tracing system. The values are only available when the current thread is traced.\n Use TraceContext.traceId() API to obtain traceId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;traceId\u0026#34;, TraceContext.traceId());  Use TraceContext.segmentId() API to obtain segmentId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;segmentId\u0026#34;, TraceContext.segmentId());  Use TraceContext.spanId() API to obtain spanId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;spanId\u0026#34;, TraceContext.spanId()); Sample codes only\n","title":"Reading Context","url":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/application-toolkit-trace-read-context/"},{"content":"Reading Context All following APIs provide readonly features for the tracing context from tracing system. The values are only available when the current thread is traced.\n Use TraceContext.traceId() API to obtain traceId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;traceId\u0026#34;, TraceContext.traceId());  Use TraceContext.segmentId() API to obtain segmentId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;segmentId\u0026#34;, TraceContext.segmentId());  Use TraceContext.spanId() API to obtain spanId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;spanId\u0026#34;, TraceContext.spanId()); Sample codes only\n","title":"Reading Context","url":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-trace-read-context/"},{"content":"Reading Context All following APIs provide readonly features for the tracing context from tracing system. The values are only available when the current thread is traced.\n Use TraceContext.traceId() API to obtain traceId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;traceId\u0026#34;, TraceContext.traceId());  Use TraceContext.segmentId() API to obtain segmentId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;segmentId\u0026#34;, TraceContext.segmentId());  Use TraceContext.spanId() API to obtain spanId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;spanId\u0026#34;, TraceContext.spanId()); Sample codes only\n","title":"Reading Context","url":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/application-toolkit-trace-read-context/"},{"content":"Reading Context All following APIs provide readonly features for the tracing context from tracing system. The values are only available when the current thread is traced.\n Use TraceContext.traceId() API to obtain traceId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;traceId\u0026#34;, TraceContext.traceId());  Use TraceContext.segmentId() API to obtain segmentId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;segmentId\u0026#34;, TraceContext.segmentId());  Use TraceContext.spanId() API to obtain spanId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;spanId\u0026#34;, TraceContext.spanId()); Sample codes only\n","title":"Reading Context","url":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/application-toolkit-trace-read-context/"},{"content":"Reading Context All following APIs provide readonly features for the tracing context from tracing system. The values are only available when the current thread is traced.\n Use TraceContext.traceId() API to obtain traceId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;traceId\u0026#34;, TraceContext.traceId());  Use TraceContext.segmentId() API to obtain segmentId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;segmentId\u0026#34;, TraceContext.segmentId());  Use TraceContext.spanId() API to obtain spanId.  import org.apache.skywalking.apm.toolkit.trace.TraceContext; ... modelAndView.addObject(\u0026#34;spanId\u0026#34;, TraceContext.spanId()); Sample codes only\n","title":"Reading Context","url":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/application-toolkit-trace-read-context/"},{"content":"Receiver/grpc-envoy-als-v2-receiver Description This is a receiver for Envoy ALS format, which is defined at https://github.com/envoyproxy/envoy/blob/v1.17.4/api/envoy/service/accesslog/v2/als.proto.\nSupport Forwarders  envoy-als-v2-grpc-forwarder  DefaultConfig # The time interval between two flush operations. And the time unit is millisecond.flush_time:1000# The max cache count when receive the messagelimit_count:500Configuration    Name Type Description     flush_time int The time interval between two flush operations. And the time unit is millisecond.   limit_count int The max cache count when receive the message    ","title":"Receiver/grpc-envoy-als-v2-receiver","url":"/docs/skywalking-satellite/latest/en/setup/plugins/receiver_grpc-envoy-als-v2-receiver/"},{"content":"Receiver/grpc-envoy-als-v2-receiver Description This is a receiver for Envoy ALS format, which is defined at https://github.com/envoyproxy/envoy/blob/v1.17.4/api/envoy/service/accesslog/v2/als.proto.\nSupport Forwarders  envoy-als-v2-grpc-forwarder  DefaultConfig # The time interval between two flush operations. And the time unit is millisecond.flush_time:1000# The max cache count when receive the messagelimit_count:500Configuration    Name Type Description     flush_time int The time interval between two flush operations. And the time unit is millisecond.   limit_count int The max cache count when receive the message    ","title":"Receiver/grpc-envoy-als-v2-receiver","url":"/docs/skywalking-satellite/next/en/setup/plugins/receiver_grpc-envoy-als-v2-receiver/"},{"content":"Receiver/grpc-envoy-als-v2-receiver Description This is a receiver for Envoy ALS format, which is defined at https://github.com/envoyproxy/envoy/blob/v1.17.4/api/envoy/service/accesslog/v2/als.proto.\nSupport Forwarders  envoy-als-v2-grpc-forwarder  DefaultConfig # The time interval between two flush operations. And the time unit is millisecond.flush_time:1000# The max cache count when receive the messagelimit_count:500Configuration    Name Type Description     flush_time int The time interval between two flush operations. And the time unit is millisecond.   limit_count int The max cache count when receive the message    ","title":"Receiver/grpc-envoy-als-v2-receiver","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/receiver_grpc-envoy-als-v2-receiver/"},{"content":"Receiver/grpc-envoy-als-v3-receiver Description This is a receiver for Envoy ALS format, which is defined at https://github.com/envoyproxy/envoy/blob/3791753e94edbac8a90c5485c68136886c40e719/api/envoy/config/accesslog/v3/accesslog.proto.\nSupport Forwarders  envoy-als-v3-grpc-forwarder  DefaultConfig # The time interval between two flush operations. And the time unit is millisecond.flush_time:1000# The max cache count when receive the messagelimit_count:500Configuration    Name Type Description     flush_time int The time interval between two flush operations. And the time unit is millisecond.   limit_count int The max cache count when receive the message    ","title":"Receiver/grpc-envoy-als-v3-receiver","url":"/docs/skywalking-satellite/latest/en/setup/plugins/receiver_grpc-envoy-als-v3-receiver/"},{"content":"Receiver/grpc-envoy-als-v3-receiver Description This is a receiver for Envoy ALS format, which is defined at https://github.com/envoyproxy/envoy/blob/3791753e94edbac8a90c5485c68136886c40e719/api/envoy/config/accesslog/v3/accesslog.proto.\nSupport Forwarders  envoy-als-v3-grpc-forwarder  DefaultConfig # The time interval between two flush operations. And the time unit is millisecond.flush_time:1000# The max cache count when receive the messagelimit_count:500Configuration    Name Type Description     flush_time int The time interval between two flush operations. And the time unit is millisecond.   limit_count int The max cache count when receive the message    ","title":"Receiver/grpc-envoy-als-v3-receiver","url":"/docs/skywalking-satellite/next/en/setup/plugins/receiver_grpc-envoy-als-v3-receiver/"},{"content":"Receiver/grpc-envoy-als-v3-receiver Description This is a receiver for Envoy ALS format, which is defined at https://github.com/envoyproxy/envoy/blob/3791753e94edbac8a90c5485c68136886c40e719/api/envoy/config/accesslog/v3/accesslog.proto.\nSupport Forwarders  envoy-als-v3-grpc-forwarder  DefaultConfig # The time interval between two flush operations. And the time unit is millisecond.flush_time:1000# The max cache count when receive the messagelimit_count:500Configuration    Name Type Description     flush_time int The time interval between two flush operations. And the time unit is millisecond.   limit_count int The max cache count when receive the message    ","title":"Receiver/grpc-envoy-als-v3-receiver","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/receiver_grpc-envoy-als-v3-receiver/"},{"content":"Receiver/grpc-envoy-metrics-v2-receiver Description This is a receiver for Envoy Metrics format, which is defined at https://github.com/envoyproxy/envoy/blob/v1.17.4/api/envoy/service/metrics/v2/metrics_service.proto.\nSupport Forwarders  envoy-metrics-v2-grpc-forwarder  DefaultConfig # The time interval between two flush operations. And the time unit is millisecond.flush_time:1000# The max cache count when receive the messagelimit_count:500Configuration    Name Type Description     flush_time int The time interval between two flush operations. And the time unit is millisecond.   limit_count int The max cache count when receive the message    ","title":"Receiver/grpc-envoy-metrics-v2-receiver","url":"/docs/skywalking-satellite/latest/en/setup/plugins/receiver_grpc-envoy-metrics-v2-receiver/"},{"content":"Receiver/grpc-envoy-metrics-v2-receiver Description This is a receiver for Envoy Metrics format, which is defined at https://github.com/envoyproxy/envoy/blob/v1.17.4/api/envoy/service/metrics/v2/metrics_service.proto.\nSupport Forwarders  envoy-metrics-v2-grpc-forwarder  DefaultConfig # The time interval between two flush operations. And the time unit is millisecond.flush_time:1000# The max cache count when receive the messagelimit_count:500Configuration    Name Type Description     flush_time int The time interval between two flush operations. And the time unit is millisecond.   limit_count int The max cache count when receive the message    ","title":"Receiver/grpc-envoy-metrics-v2-receiver","url":"/docs/skywalking-satellite/next/en/setup/plugins/receiver_grpc-envoy-metrics-v2-receiver/"},{"content":"Receiver/grpc-envoy-metrics-v2-receiver Description This is a receiver for Envoy Metrics format, which is defined at https://github.com/envoyproxy/envoy/blob/v1.17.4/api/envoy/service/metrics/v2/metrics_service.proto.\nSupport Forwarders  envoy-metrics-v2-grpc-forwarder  DefaultConfig # The time interval between two flush operations. And the time unit is millisecond.flush_time:1000# The max cache count when receive the messagelimit_count:500Configuration    Name Type Description     flush_time int The time interval between two flush operations. And the time unit is millisecond.   limit_count int The max cache count when receive the message    ","title":"Receiver/grpc-envoy-metrics-v2-receiver","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/receiver_grpc-envoy-metrics-v2-receiver/"},{"content":"Receiver/grpc-envoy-metrics-v3-receiver Description This is a receiver for Envoy Metrics format, which is defined at https://github.com/envoyproxy/envoy/blob/5f7d6efb5786ee3de31b1fb37c78fa281718b704/api/envoy/service/metrics/v3/metrics_service.proto.\nSupport Forwarders  envoy-metrics-v3-grpc-forwarder  DefaultConfig # The time interval between two flush operations. And the time unit is millisecond.flush_time:1000# The max cache count when receive the messagelimit_count:500Configuration    Name Type Description     flush_time int The time interval between two flush operations. And the time unit is millisecond.   limit_count int The max cache count when receive the message    ","title":"Receiver/grpc-envoy-metrics-v3-receiver","url":"/docs/skywalking-satellite/latest/en/setup/plugins/receiver_grpc-envoy-metrics-v3-receiver/"},{"content":"Receiver/grpc-envoy-metrics-v3-receiver Description This is a receiver for Envoy Metrics format, which is defined at https://github.com/envoyproxy/envoy/blob/5f7d6efb5786ee3de31b1fb37c78fa281718b704/api/envoy/service/metrics/v3/metrics_service.proto.\nSupport Forwarders  envoy-metrics-v3-grpc-forwarder  DefaultConfig # The time interval between two flush operations. And the time unit is millisecond.flush_time:1000# The max cache count when receive the messagelimit_count:500Configuration    Name Type Description     flush_time int The time interval between two flush operations. And the time unit is millisecond.   limit_count int The max cache count when receive the message    ","title":"Receiver/grpc-envoy-metrics-v3-receiver","url":"/docs/skywalking-satellite/next/en/setup/plugins/receiver_grpc-envoy-metrics-v3-receiver/"},{"content":"Receiver/grpc-envoy-metrics-v3-receiver Description This is a receiver for Envoy Metrics format, which is defined at https://github.com/envoyproxy/envoy/blob/5f7d6efb5786ee3de31b1fb37c78fa281718b704/api/envoy/service/metrics/v3/metrics_service.proto.\nSupport Forwarders  envoy-metrics-v3-grpc-forwarder  DefaultConfig # The time interval between two flush operations. And the time unit is millisecond.flush_time:1000# The max cache count when receive the messagelimit_count:500Configuration    Name Type Description     flush_time int The time interval between two flush operations. And the time unit is millisecond.   limit_count int The max cache count when receive the message    ","title":"Receiver/grpc-envoy-metrics-v3-receiver","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/receiver_grpc-envoy-metrics-v3-receiver/"},{"content":"Receiver/grpc-native-cds-receiver Description This is a receiver for SkyWalking native Configuration Discovery Service format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/language-agent/ConfigurationDiscoveryService.proto.\nSupport Forwarders  native-cds-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Receiver/grpc-native-cds-receiver","url":"/docs/skywalking-satellite/latest/en/setup/plugins/receiver_grpc-native-cds-receiver/"},{"content":"Receiver/grpc-native-cds-receiver Description This is a receiver for SkyWalking native Configuration Discovery Service format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/language-agent/ConfigurationDiscoveryService.proto.\nSupport Forwarders  native-cds-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Receiver/grpc-native-cds-receiver","url":"/docs/skywalking-satellite/next/en/setup/plugins/receiver_grpc-native-cds-receiver/"},{"content":"Receiver/grpc-native-cds-receiver Description This is a receiver for SkyWalking native Configuration Discovery Service format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/language-agent/ConfigurationDiscoveryService.proto.\nSupport Forwarders  native-cds-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Receiver/grpc-native-cds-receiver","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/receiver_grpc-native-cds-receiver/"},{"content":"Receiver/grpc-native-clr-receiver Description This is a receiver for SkyWalking native clr format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/language-agent/CLRMetric.proto.\nSupport Forwarders  native-clr-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Receiver/grpc-native-clr-receiver","url":"/docs/skywalking-satellite/latest/en/setup/plugins/receiver_grpc-native-clr-receiver/"},{"content":"Receiver/grpc-native-clr-receiver Description This is a receiver for SkyWalking native clr format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/language-agent/CLRMetric.proto.\nSupport Forwarders  native-clr-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Receiver/grpc-native-clr-receiver","url":"/docs/skywalking-satellite/next/en/setup/plugins/receiver_grpc-native-clr-receiver/"},{"content":"Receiver/grpc-native-clr-receiver Description This is a receiver for SkyWalking native clr format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/language-agent/CLRMetric.proto.\nSupport Forwarders  native-clr-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Receiver/grpc-native-clr-receiver","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/receiver_grpc-native-clr-receiver/"},{"content":"Receiver/grpc-native-ebpf-accesslog-receiver Description This is a receiver for SkyWalking native accesslog format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/ebpf/accesslog.proto.\nSupport Forwarders  native-ebpf-accesslog-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Receiver/grpc-native-ebpf-accesslog-receiver","url":"/docs/skywalking-satellite/next/en/setup/plugins/receiver_grpc-native-ebpf-accesslog-receiver/"},{"content":"Receiver/grpc-native-ebpf-profiling-receiver Description This is a receiver for SkyWalking native process format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/ebpf/profiling/Process.proto.\nSupport Forwarders  native-ebpf-profiling-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Receiver/grpc-native-ebpf-profiling-receiver","url":"/docs/skywalking-satellite/latest/en/setup/plugins/receiver_grpc-native-ebpf-profiling-receiver/"},{"content":"Receiver/grpc-native-ebpf-profiling-receiver Description This is a receiver for SkyWalking native process format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/ebpf/profiling/Process.proto.\nSupport Forwarders  native-ebpf-profiling-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Receiver/grpc-native-ebpf-profiling-receiver","url":"/docs/skywalking-satellite/next/en/setup/plugins/receiver_grpc-native-ebpf-profiling-receiver/"},{"content":"Receiver/grpc-native-ebpf-profiling-receiver Description This is a receiver for SkyWalking native process format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/ebpf/profiling/Process.proto.\nSupport Forwarders  native-ebpf-profiling-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Receiver/grpc-native-ebpf-profiling-receiver","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/receiver_grpc-native-ebpf-profiling-receiver/"},{"content":"Receiver/grpc-native-event-receiver Description This is a receiver for SkyWalking native meter format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/event/Event.proto.\nSupport Forwarders  native-event-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Receiver/grpc-native-event-receiver","url":"/docs/skywalking-satellite/latest/en/setup/plugins/receiver_grpc-native-event-receiver/"},{"content":"Receiver/grpc-native-event-receiver Description This is a receiver for SkyWalking native meter format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/event/Event.proto.\nSupport Forwarders  native-event-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Receiver/grpc-native-event-receiver","url":"/docs/skywalking-satellite/next/en/setup/plugins/receiver_grpc-native-event-receiver/"},{"content":"Receiver/grpc-native-event-receiver Description This is a receiver for SkyWalking native meter format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/event/Event.proto.\nSupport Forwarders  native-event-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Receiver/grpc-native-event-receiver","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/receiver_grpc-native-event-receiver/"},{"content":"Receiver/grpc-native-jvm-receiver Description This is a receiver for SkyWalking native jvm format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/language-agent/JVMMetric.proto.\nSupport Forwarders  native-jvm-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Receiver/grpc-native-jvm-receiver","url":"/docs/skywalking-satellite/latest/en/setup/plugins/receiver_grpc-native-jvm-receiver/"},{"content":"Receiver/grpc-native-jvm-receiver Description This is a receiver for SkyWalking native jvm format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/language-agent/JVMMetric.proto.\nSupport Forwarders  native-jvm-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Receiver/grpc-native-jvm-receiver","url":"/docs/skywalking-satellite/next/en/setup/plugins/receiver_grpc-native-jvm-receiver/"},{"content":"Receiver/grpc-native-jvm-receiver Description This is a receiver for SkyWalking native jvm format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/language-agent/JVMMetric.proto.\nSupport Forwarders  native-jvm-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Receiver/grpc-native-jvm-receiver","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/receiver_grpc-native-jvm-receiver/"},{"content":"Receiver/grpc-native-log-receiver Description This is a receiver for SkyWalking native logging format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/logging/Logging.proto.\nSupport Forwarders  native-log-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Receiver/grpc-native-log-receiver","url":"/docs/skywalking-satellite/latest/en/setup/plugins/receiver_grpc-native-log-receiver/"},{"content":"Receiver/grpc-native-log-receiver Description This is a receiver for SkyWalking native logging format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/logging/Logging.proto.\nSupport Forwarders  native-log-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Receiver/grpc-native-log-receiver","url":"/docs/skywalking-satellite/next/en/setup/plugins/receiver_grpc-native-log-receiver/"},{"content":"Receiver/grpc-native-log-receiver Description This is a receiver for SkyWalking native logging format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/logging/Logging.proto.\nSupport Forwarders  native-log-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Receiver/grpc-native-log-receiver","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/receiver_grpc-native-log-receiver/"},{"content":"Receiver/grpc-native-management-receiver Description This is a receiver for SkyWalking native management format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/management/Management.proto.\nSupport Forwarders  native-management-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Receiver/grpc-native-management-receiver","url":"/docs/skywalking-satellite/latest/en/setup/plugins/receiver_grpc-native-management-receiver/"},{"content":"Receiver/grpc-native-management-receiver Description This is a receiver for SkyWalking native management format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/management/Management.proto.\nSupport Forwarders  native-management-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Receiver/grpc-native-management-receiver","url":"/docs/skywalking-satellite/next/en/setup/plugins/receiver_grpc-native-management-receiver/"},{"content":"Receiver/grpc-native-management-receiver Description This is a receiver for SkyWalking native management format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/management/Management.proto.\nSupport Forwarders  native-management-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Receiver/grpc-native-management-receiver","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/receiver_grpc-native-management-receiver/"},{"content":"Receiver/grpc-native-meter-receiver Description This is a receiver for SkyWalking native meter format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/event/Event.proto.\nSupport Forwarders  native-meter-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Receiver/grpc-native-meter-receiver","url":"/docs/skywalking-satellite/latest/en/setup/plugins/receiver_grpc-native-meter-receiver/"},{"content":"Receiver/grpc-native-meter-receiver Description This is a receiver for SkyWalking native meter format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/event/Event.proto.\nSupport Forwarders  native-meter-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Receiver/grpc-native-meter-receiver","url":"/docs/skywalking-satellite/next/en/setup/plugins/receiver_grpc-native-meter-receiver/"},{"content":"Receiver/grpc-native-meter-receiver Description This is a receiver for SkyWalking native meter format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/event/Event.proto.\nSupport Forwarders  native-meter-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Receiver/grpc-native-meter-receiver","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/receiver_grpc-native-meter-receiver/"},{"content":"Receiver/grpc-native-process-receiver Description This is a receiver for SkyWalking native process format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/ebpf/profiling/Process.proto.\nSupport Forwarders  native-process-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Receiver/grpc-native-process-receiver","url":"/docs/skywalking-satellite/latest/en/setup/plugins/receiver_grpc-native-process-receiver/"},{"content":"Receiver/grpc-native-process-receiver Description This is a receiver for SkyWalking native process format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/ebpf/profiling/Process.proto.\nSupport Forwarders  native-process-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Receiver/grpc-native-process-receiver","url":"/docs/skywalking-satellite/next/en/setup/plugins/receiver_grpc-native-process-receiver/"},{"content":"Receiver/grpc-native-process-receiver Description This is a receiver for SkyWalking native process format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/ebpf/profiling/Process.proto.\nSupport Forwarders  native-process-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Receiver/grpc-native-process-receiver","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/receiver_grpc-native-process-receiver/"},{"content":"Receiver/grpc-native-profile-receiver Description This is a receiver for SkyWalking native profile format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/profile/Profile.proto.\nSupport Forwarders  native-profile-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Receiver/grpc-native-profile-receiver","url":"/docs/skywalking-satellite/latest/en/setup/plugins/receiver_grpc-native-profile-receiver/"},{"content":"Receiver/grpc-native-profile-receiver Description This is a receiver for SkyWalking native profile format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/profile/Profile.proto.\nSupport Forwarders  native-profile-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Receiver/grpc-native-profile-receiver","url":"/docs/skywalking-satellite/next/en/setup/plugins/receiver_grpc-native-profile-receiver/"},{"content":"Receiver/grpc-native-profile-receiver Description This is a receiver for SkyWalking native profile format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/profile/Profile.proto.\nSupport Forwarders  native-profile-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Receiver/grpc-native-profile-receiver","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/receiver_grpc-native-profile-receiver/"},{"content":"Receiver/grpc-native-tracing-receiver Description This is a receiver for SkyWalking native tracing and span attached event format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/language-agent/Tracing.proto.\nSupport Forwarders  native-tracing-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Receiver/grpc-native-tracing-receiver","url":"/docs/skywalking-satellite/latest/en/setup/plugins/receiver_grpc-native-tracing-receiver/"},{"content":"Receiver/grpc-native-tracing-receiver Description This is a receiver for SkyWalking native tracing and span attached event format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/language-agent/Tracing.proto.\nSupport Forwarders  native-tracing-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Receiver/grpc-native-tracing-receiver","url":"/docs/skywalking-satellite/next/en/setup/plugins/receiver_grpc-native-tracing-receiver/"},{"content":"Receiver/grpc-native-tracing-receiver Description This is a receiver for SkyWalking native tracing and span attached event format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/language-agent/Tracing.proto.\nSupport Forwarders  native-tracing-grpc-forwarder  DefaultConfig yaml\nConfiguration    Name Type Description    ","title":"Receiver/grpc-native-tracing-receiver","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/receiver_grpc-native-tracing-receiver/"},{"content":"Receiver/grpc-otlp-metrics-v1-receiver Description This is a receiver for OpenTelemetry Metrics v1 format, which is defined at https://github.com/open-telemetry/opentelemetry-proto/blob/724e427879e3d2bae2edc0218fff06e37b9eb46e/opentelemetry/proto/collector/metrics/v1/metrics_service.proto.\nSupport Forwarders  otlp-metrics-v1-grpc-forwarder  DefaultConfig yaml \nConfiguration    Name Type Description    ","title":"Receiver/grpc-otlp-metrics-v1-receiver","url":"/docs/skywalking-satellite/latest/en/setup/plugins/receiver_grpc-otlp-metrics-v1-receiver/"},{"content":"Receiver/grpc-otlp-metrics-v1-receiver Description This is a receiver for OpenTelemetry Metrics v1 format, which is defined at https://github.com/open-telemetry/opentelemetry-proto/blob/724e427879e3d2bae2edc0218fff06e37b9eb46e/opentelemetry/proto/collector/metrics/v1/metrics_service.proto.\nSupport Forwarders  otlp-metrics-v1-grpc-forwarder  DefaultConfig yaml \nConfiguration    Name Type Description    ","title":"Receiver/grpc-otlp-metrics-v1-receiver","url":"/docs/skywalking-satellite/next/en/setup/plugins/receiver_grpc-otlp-metrics-v1-receiver/"},{"content":"Receiver/grpc-otlp-metrics-v1-receiver Description This is a receiver for OpenTelemetry Metrics v1 format, which is defined at https://github.com/open-telemetry/opentelemetry-proto/blob/724e427879e3d2bae2edc0218fff06e37b9eb46e/opentelemetry/proto/collector/metrics/v1/metrics_service.proto.\nSupport Forwarders  otlp-metrics-v1-grpc-forwarder  DefaultConfig yaml \nConfiguration    Name Type Description    ","title":"Receiver/grpc-otlp-metrics-v1-receiver","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/receiver_grpc-otlp-metrics-v1-receiver/"},{"content":"Receiver/http-native-log-receiver Description This is a receiver for SkyWalking http logging format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/logging/Logging.proto.\nSupport Forwarders  native-log-grpc-forwarder  DefaultConfig # The native log request URI.uri:\u0026#34;/logging\u0026#34;# The request timeout seconds.timeout:5Configuration    Name Type Description     uri string config   timeout int     ","title":"Receiver/http-native-log-receiver","url":"/docs/skywalking-satellite/latest/en/setup/plugins/receiver_http-native-log-receiver/"},{"content":"Receiver/http-native-log-receiver Description This is a receiver for SkyWalking http logging format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/logging/Logging.proto.\nSupport Forwarders  native-log-grpc-forwarder  DefaultConfig # The native log request URI.uri:\u0026#34;/logging\u0026#34;# The request timeout seconds.timeout:5Configuration    Name Type Description     uri string config   timeout int     ","title":"Receiver/http-native-log-receiver","url":"/docs/skywalking-satellite/next/en/setup/plugins/receiver_http-native-log-receiver/"},{"content":"Receiver/http-native-log-receiver Description This is a receiver for SkyWalking http logging format, which is defined at https://github.com/apache/skywalking-data-collect-protocol/blob/master/logging/Logging.proto.\nSupport Forwarders  native-log-grpc-forwarder  DefaultConfig # The native log request URI.uri:\u0026#34;/logging\u0026#34;# The request timeout seconds.timeout:5Configuration    Name Type Description     uri string config   timeout int     ","title":"Receiver/http-native-log-receiver","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/receiver_http-native-log-receiver/"},{"content":"Redis monitoring Redis server performance from redis-exporter SkyWalking leverages redis-exporter for collecting metrics data from Redis. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  redis-exporter collect metrics data from Redis. OpenTelemetry Collector fetches metrics from redis-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up redis-exporter. Set up OpenTelemetry Collector. For details on Redis Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  Redis Monitoring Redis monitoring provides monitoring of the status and resources of the Redis server. Redis cluster is cataloged as a Layer: REDIS Service in OAP. Each Redis server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Uptime day meter_redis_uptime The uptime of Redis. redis-exporter   Connected Clients  meter_redis_connected_clients The number of connected clients. redis-exporter   Blocked Clients  meter_redis_blocked_clients The number of blocked clients. redis-exporter   Memory Max Bytes MB meter_redis_memory_max_bytes The max bytes of memory. redis-exporter   Hits Rate % meter_redis_hit_rate Hit rate of redis when used as a cache. redis-exporter   Average Time Spend By Command second meter_redis_average_time_spent_by_command Average time to execute various types of commands. redis-exporter   Total Commands Trend  meter_redis_total_commands_rate The Trend of total commands. redis-exporter   DB keys  meter_redis_evicted_keys_total  meter_redis_expired_keys_total  meter_redis_db_keys The number of Expired / Evicted / total keys. redis-exporter   Net Input/Output Bytes KB meter_redis_net_input_bytes  meter_redis_net_output_bytes Total bytes of input / output of redis net. redis-exporter   Memory Usage % meter_redis_memory_usage Percentage of used memory. redis-exporter   Total Time Spend By Command Trend  meter_redis_commands_duration_seconds_total_rate The trend of total time spend by command redis-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/redis. The Redis dashboard panel configurations are found in /config/ui-initialized-templates/redis.\nCollect sampled slow commands SkyWalking leverages fluentbit or other log agents for collecting slow commands from Redis.\nData flow  Execute commands periodically to collect slow logs from Redis and save the result locally. Fluent-bit agent collects slow logs from local file. fluent-bit agent sends data to SkyWalking OAP Server using native meter APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Set up fluentbit. Config fluentbit from here for Redis. Config slow log from here for Redis. Periodically execute the commands.  Notice:\n1.The slowlog-log-slower-than and slowlog-max-len configuration items in the configuration file are for the slow log, the former indicating that execution time longer than the specified time (in milliseconds) will be logged to the slowlog, and the latter indicating the maximum number of slow logs that will be stored in the slow log file. 2.In the e2e test, SkyWalking uses cron to periodically execute the redis command to fetch the slow logs and write them to a local file, which is then collected by fluent-bit to send the data to the OAP. You can see the relevant configuration files here.You can also get slow logs periodically and send them to OAP in other ways than using cron and fluent-bit.\nSlow Commands Monitoring Slow SQL monitoring provides monitoring of the slow commands of the Redis servers. Redis servers are cataloged as a Layer: REDIS Service in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Slow Statements ms top_n_database_statement The latency and statement of Redis slow commands fluentbit    Customizations You can customize your own metrics/expression/dashboard panel. The slowsql expression rules are found in /config/lal/redis-slowsql.yaml The Redis dashboard panel configurations are found in /config/ui-initialized-templates/redis. `\n","title":"Redis monitoring","url":"/docs/main/latest/en/setup/backend/backend-redis-monitoring/"},{"content":"Redis monitoring Redis server performance from redis-exporter SkyWalking leverages redis-exporter for collecting metrics data from Redis. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  redis-exporter collect metrics data from Redis. OpenTelemetry Collector fetches metrics from redis-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up redis-exporter. Set up OpenTelemetry Collector. For details on Redis Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  Redis Monitoring Redis monitoring provides monitoring of the status and resources of the Redis server. Redis cluster is cataloged as a Layer: REDIS Service in OAP. Each Redis server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Uptime day meter_redis_uptime The uptime of Redis. redis-exporter   Connected Clients  meter_redis_connected_clients The number of connected clients. redis-exporter   Blocked Clients  meter_redis_blocked_clients The number of blocked clients. redis-exporter   Memory Max Bytes MB meter_redis_memory_max_bytes The max bytes of memory. redis-exporter   Hits Rate % meter_redis_hit_rate Hit rate of redis when used as a cache. redis-exporter   Average Time Spend By Command second meter_redis_average_time_spent_by_command Average time to execute various types of commands. redis-exporter   Total Commands Trend  meter_redis_total_commands_rate The Trend of total commands. redis-exporter   DB keys  meter_redis_evicted_keys_total  meter_redis_expired_keys_total  meter_redis_db_keys The number of Expired / Evicted / total keys. redis-exporter   Net Input/Output Bytes KB meter_redis_net_input_bytes  meter_redis_net_output_bytes Total bytes of input / output of redis net. redis-exporter   Memory Usage % meter_redis_memory_used_bytes  meter_redis_memory_max_bytes Percentage of used memory. redis-exporter   Total Time Spend By Command Trend  meter_redis_commands_duration  meter_redis_commands_total The trend of total time spend by command redis-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/redis. The Redis dashboard panel configurations are found in /config/ui-initialized-templates/redis.\nCollect sampled slow commands SkyWalking leverages fluentbit or other log agents for collecting slow commands from Redis.\nData flow  Execute commands periodically to collect slow logs from Redis and save the result locally. Fluent-bit agent collects slow logs from local file. fluent-bit agent sends data to SkyWalking OAP Server using native meter APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Set up fluentbit. Config fluentbit from here for Redis. Config slow log from here for Redis. Periodically execute the commands.  Notice:\n1.The slowlog-log-slower-than and slowlog-max-len configuration items in the configuration file are for the slow log, the former indicating that execution time longer than the specified time (in milliseconds) will be logged to the slowlog, and the latter indicating the maximum number of slow logs that will be stored in the slow log file. 2.In the e2e test, SkyWalking uses cron to periodically execute the redis command to fetch the slow logs and write them to a local file, which is then collected by fluent-bit to send the data to the OAP. You can see the relevant configuration files here.You can also get slow logs periodically and send them to OAP in other ways than using cron and fluent-bit.\nSlow Commands Monitoring Slow SQL monitoring provides monitoring of the slow commands of the Redis servers. Redis servers are cataloged as a Layer: REDIS Service in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Slow Statements ms top_n_database_statement The latency and statement of Redis slow commands fluentbit    Customizations You can customize your own metrics/expression/dashboard panel. The slowsql expression rules are found in /config/lal/redis-slowsql.yaml The Redis dashboard panel configurations are found in /config/ui-initialized-templates/redis. `\n","title":"Redis monitoring","url":"/docs/main/next/en/setup/backend/backend-redis-monitoring/"},{"content":"Redis monitoring Redis server performance from redis-exporter SkyWalking leverages redis-exporter for collecting metrics data from Redis. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  redis-exporter collect metrics data from Redis. OpenTelemetry Collector fetches metrics from redis-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up redis-exporter. Set up OpenTelemetry Collector. For details on Redis Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  Redis Monitoring Redis monitoring provides monitoring of the status and resources of the Redis server. Redis cluster is cataloged as a Layer: REDIS Service in OAP. Each Redis server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Uptime day meter_redis_uptime The uptime of Redis. redis-exporter   Connected Clients  meter_redis_connected_clients The number of connected clients. redis-exporter   Blocked Clients  meter_redis_blocked_clients The number of blocked clients. redis-exporter   Memory Max Bytes MB meter_redis_memory_max_bytes The max bytes of memory. redis-exporter   Hits Rate % meter_redis_hit_rate Hit rate of redis when used as a cache. redis-exporter   Average Time Spend By Command second meter_redis_average_time_spent_by_command Average time to execute various types of commands. redis-exporter   Total Commands Trend  meter_redis_total_commands_rate The Trend of total commands. redis-exporter   DB keys  meter_redis_evicted_keys_total  meter_redis_expired_keys_total  meter_redis_db_keys The number of Expired / Evicted / total keys. redis-exporter   Net Input/Output Bytes KB meter_redis_net_input_bytes  meter_redis_net_output_bytes Total bytes of input / output of redis net. redis-exporter   Memory Usage % meter_redis_memory_usage Percentage of used memory. redis-exporter   Total Time Spend By Command Trend  meter_redis_commands_duration_seconds_total_rate The trend of total time spend by command redis-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/redis. The Redis dashboard panel configurations are found in /config/ui-initialized-templates/redis.\nCollect sampled slow commands SkyWalking leverages fluentbit or other log agents for collecting slow commands from Redis.\nData flow  Execute commands periodically to collect slow logs from Redis and save the result locally. Fluent-bit agent collects slow logs from local file. fluent-bit agent sends data to SkyWalking OAP Server using native meter APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Set up fluentbit. Config fluentbit from here for Redis. Config slow log from here for Redis. Periodically execute the commands.  Notice:\n1.The slowlog-log-slower-than and slowlog-max-len configuration items in the configuration file are for the slow log, the former indicating that execution time longer than the specified time (in milliseconds) will be logged to the slowlog, and the latter indicating the maximum number of slow logs that will be stored in the slow log file. 2.In the e2e test, SkyWalking uses cron to periodically execute the redis command to fetch the slow logs and write them to a local file, which is then collected by fluent-bit to send the data to the OAP. You can see the relevant configuration files here.You can also get slow logs periodically and send them to OAP in other ways than using cron and fluent-bit.\nSlow Commands Monitoring Slow SQL monitoring provides monitoring of the slow commands of the Redis servers. Redis servers are cataloged as a Layer: REDIS Service in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Slow Statements ms top_n_database_statement The latency and statement of Redis slow commands fluentbit    Customizations You can customize your own metrics/expression/dashboard panel. The slowsql expression rules are found in /config/lal/redis-slowsql.yaml The Redis dashboard panel configurations are found in /config/ui-initialized-templates/redis. `\n","title":"Redis monitoring","url":"/docs/main/v9.5.0/en/setup/backend/backend-redis-monitoring/"},{"content":"Redis monitoring Redis server performance from redis-exporter SkyWalking leverages redis-exporter for collecting metrics data from Redis. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  redis-exporter collect metrics data from Redis. OpenTelemetry Collector fetches metrics from redis-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up redis-exporter. Set up OpenTelemetry Collector. For details on Redis Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  Redis Monitoring Redis monitoring provides monitoring of the status and resources of the Redis server. Redis cluster is cataloged as a Layer: REDIS Service in OAP. Each Redis server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Uptime day meter_redis_uptime The uptime of Redis. redis-exporter   Connected Clients  meter_redis_connected_clients The number of connected clients. redis-exporter   Blocked Clients  meter_redis_blocked_clients The number of blocked clients. redis-exporter   Memory Max Bytes MB meter_redis_memory_max_bytes The max bytes of memory. redis-exporter   Hits Rate % meter_redis_hit_rate Hit rate of redis when used as a cache. redis-exporter   Average Time Spend By Command second meter_redis_average_time_spent_by_command Average time to execute various types of commands. redis-exporter   Total Commands Trend  meter_redis_total_commands_rate The Trend of total commands. redis-exporter   DB keys  meter_redis_evicted_keys_total  meter_redis_expired_keys_total  meter_redis_db_keys The number of Expired / Evicted / total keys. redis-exporter   Net Input/Output Bytes KB meter_redis_net_input_bytes  meter_redis_net_output_bytes Total bytes of input / output of redis net. redis-exporter   Memory Usage % meter_redis_memory_usage Percentage of used memory. redis-exporter   Total Time Spend By Command Trend  meter_redis_commands_duration_seconds_total_rate The trend of total time spend by command redis-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/redis. The Redis dashboard panel configurations are found in /config/ui-initialized-templates/redis.\nCollect sampled slow commands SkyWalking leverages fluentbit or other log agents for collecting slow commands from Redis.\nData flow  Execute commands periodically to collect slow logs from Redis and save the result locally. Fluent-bit agent collects slow logs from local file. fluent-bit agent sends data to SkyWalking OAP Server using native meter APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Set up fluentbit. Config fluentbit from here for Redis. Config slow log from here for Redis. Periodically execute the commands.  Notice:\n1.The slowlog-log-slower-than and slowlog-max-len configuration items in the configuration file are for the slow log, the former indicating that execution time longer than the specified time (in milliseconds) will be logged to the slowlog, and the latter indicating the maximum number of slow logs that will be stored in the slow log file. 2.In the e2e test, SkyWalking uses cron to periodically execute the redis command to fetch the slow logs and write them to a local file, which is then collected by fluent-bit to send the data to the OAP. You can see the relevant configuration files here.You can also get slow logs periodically and send them to OAP in other ways than using cron and fluent-bit.\nSlow Commands Monitoring Slow SQL monitoring provides monitoring of the slow commands of the Redis servers. Redis servers are cataloged as a Layer: REDIS Service in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Slow Statements ms top_n_database_statement The latency and statement of Redis slow commands fluentbit    Customizations You can customize your own metrics/expression/dashboard panel. The slowsql expression rules are found in /config/lal/redis-slowsql.yaml The Redis dashboard panel configurations are found in /config/ui-initialized-templates/redis. `\n","title":"Redis monitoring","url":"/docs/main/v9.6.0/en/setup/backend/backend-redis-monitoring/"},{"content":"Redis monitoring Redis server performance from redis-exporter SkyWalking leverages redis-exporter for collecting metrics data from Redis. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  redis-exporter collect metrics data from Redis. OpenTelemetry Collector fetches metrics from redis-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up redis-exporter. Set up OpenTelemetry Collector. For details on Redis Receiver in OpenTelemetry Collector, refer to here. Config SkyWalking OpenTelemetry receiver.  Redis Monitoring Redis monitoring provides monitoring of the status and resources of the Redis server. Redis cluster is cataloged as a Layer: REDIS Service in OAP. Each Redis server is cataloged as an Instance in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Uptime day meter_redis_uptime The uptime of Redis. redis-exporter   Connected Clients  meter_redis_connected_clients The number of connected clients. redis-exporter   Blocked Clients  meter_redis_blocked_clients The number of blocked clients. redis-exporter   Memory Max Bytes MB meter_redis_memory_max_bytes The max bytes of memory. redis-exporter   Hits Rate % meter_redis_hit_rate Hit rate of redis when used as a cache. redis-exporter   Average Time Spend By Command second meter_redis_average_time_spent_by_command Average time to execute various types of commands. redis-exporter   Total Commands Trend  meter_redis_total_commands_rate The Trend of total commands. redis-exporter   DB keys  meter_redis_evicted_keys_total  meter_redis_expired_keys_total  meter_redis_db_keys The number of Expired / Evicted / total keys. redis-exporter   Net Input/Output Bytes KB meter_redis_net_input_bytes  meter_redis_net_output_bytes Total bytes of input / output of redis net. redis-exporter   Memory Usage % meter_redis_memory_usage Percentage of used memory. redis-exporter   Total Time Spend By Command Trend  meter_redis_commands_duration_seconds_total_rate The trend of total time spend by command redis-exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/redis. The Redis dashboard panel configurations are found in /config/ui-initialized-templates/redis.\nCollect sampled slow commands SkyWalking leverages fluentbit or other log agents for collecting slow commands from Redis.\nData flow  Execute commands periodically to collect slow logs from Redis and save the result locally. Fluent-bit agent collects slow logs from local file. fluent-bit agent sends data to SkyWalking OAP Server using native meter APIs via HTTP. The SkyWalking OAP Server parses the expression with LAL to parse/extract and store the results.  Set up  Set up fluentbit. Config fluentbit from here for Redis. Config slow log from here for Redis. Periodically execute the commands.  Notice:\n1.The slowlog-log-slower-than and slowlog-max-len configuration items in the configuration file are for the slow log, the former indicating that execution time longer than the specified time (in milliseconds) will be logged to the slowlog, and the latter indicating the maximum number of slow logs that will be stored in the slow log file. 2.In the e2e test, SkyWalking uses cron to periodically execute the redis command to fetch the slow logs and write them to a local file, which is then collected by fluent-bit to send the data to the OAP. You can see the relevant configuration files here.You can also get slow logs periodically and send them to OAP in other ways than using cron and fluent-bit.\nSlow Commands Monitoring Slow SQL monitoring provides monitoring of the slow commands of the Redis servers. Redis servers are cataloged as a Layer: REDIS Service in OAP.\nSupported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Slow Statements ms top_n_database_statement The latency and statement of Redis slow commands fluentbit    Customizations You can customize your own metrics/expression/dashboard panel. The slowsql expression rules are found in /config/lal/redis-slowsql.yaml The Redis dashboard panel configurations are found in /config/ui-initialized-templates/redis. `\n","title":"Redis monitoring","url":"/docs/main/v9.7.0/en/setup/backend/backend-redis-monitoring/"},{"content":"Register mechanism is no longer required for local / exit span Since version 6.6.0, SkyWalking has removed the local and exit span registers. If an old java agent (before 6.6.0) is still running, which registers to the 6.6.0+ backend, you will face the following warning message.\nclass=RegisterServiceHandler, message = Unexpected endpoint register, endpoint isn't detected from server side. This will not harm the backend or cause any issues, but serves as a reminder that your agent or other clients should follow the new protocol requirements.\nYou could simply use log4j2.xml to filter this warning message out.\n","title":"Register mechanism is no longer required for local / exit span","url":"/docs/main/latest/en/faq/unexpected-endpoint-register/"},{"content":"Register mechanism is no longer required for local / exit span Since version 6.6.0, SkyWalking has removed the local and exit span registers. If an old java agent (before 6.6.0) is still running, which registers to the 6.6.0+ backend, you will face the following warning message.\nclass=RegisterServiceHandler, message = Unexpected endpoint register, endpoint isn't detected from server side. This will not harm the backend or cause any issues, but serves as a reminder that your agent or other clients should follow the new protocol requirements.\nYou could simply use log4j2.xml to filter this warning message out.\n","title":"Register mechanism is no longer required for local / exit span","url":"/docs/main/next/en/faq/unexpected-endpoint-register/"},{"content":"Register mechanism is no longer required for local / exit span Since version 6.6.0, SkyWalking has removed the local and exit span registers. If an old java agent (before 6.6.0) is still running, which registers to the 6.6.0+ backend, you will face the following warning message.\nclass=RegisterServiceHandler, message = Unexpected endpoint register, endpoint isn't detected from server side. This will not harm the backend or cause any issues, but serves as a reminder that your agent or other clients should follow the new protocol requirements.\nYou could simply use log4j2.xml to filter this warning message out.\n","title":"Register mechanism is no longer required for local / exit span","url":"/docs/main/v9.0.0/en/faq/unexpected-endpoint-register/"},{"content":"Register mechanism is no longer required for local / exit span Since version 6.6.0, SkyWalking has removed the local and exit span registers. If an old java agent (before 6.6.0) is still running, which registers to the 6.6.0+ backend, you will face the following warning message.\nclass=RegisterServiceHandler, message = Unexpected endpoint register, endpoint isn't detected from server side. This will not harm the backend or cause any issues, but serves as a reminder that your agent or other clients should follow the new protocol requirements.\nYou could simply use log4j2.xml to filter this warning message out.\n","title":"Register mechanism is no longer required for local / exit span","url":"/docs/main/v9.1.0/en/faq/unexpected-endpoint-register/"},{"content":"Register mechanism is no longer required for local / exit span Since version 6.6.0, SkyWalking has removed the local and exit span registers. If an old java agent (before 6.6.0) is still running, which registers to the 6.6.0+ backend, you will face the following warning message.\nclass=RegisterServiceHandler, message = Unexpected endpoint register, endpoint isn't detected from server side. This will not harm the backend or cause any issues, but serves as a reminder that your agent or other clients should follow the new protocol requirements.\nYou could simply use log4j2.xml to filter this warning message out.\n","title":"Register mechanism is no longer required for local / exit span","url":"/docs/main/v9.2.0/en/faq/unexpected-endpoint-register/"},{"content":"Register mechanism is no longer required for local / exit span Since version 6.6.0, SkyWalking has removed the local and exit span registers. If an old java agent (before 6.6.0) is still running, which registers to the 6.6.0+ backend, you will face the following warning message.\nclass=RegisterServiceHandler, message = Unexpected endpoint register, endpoint isn't detected from server side. This will not harm the backend or cause any issues, but serves as a reminder that your agent or other clients should follow the new protocol requirements.\nYou could simply use log4j2.xml to filter this warning message out.\n","title":"Register mechanism is no longer required for local / exit span","url":"/docs/main/v9.3.0/en/faq/unexpected-endpoint-register/"},{"content":"Register mechanism is no longer required for local / exit span Since version 6.6.0, SkyWalking has removed the local and exit span registers. If an old java agent (before 6.6.0) is still running, which registers to the 6.6.0+ backend, you will face the following warning message.\nclass=RegisterServiceHandler, message = Unexpected endpoint register, endpoint isn't detected from server side. This will not harm the backend or cause any issues, but serves as a reminder that your agent or other clients should follow the new protocol requirements.\nYou could simply use log4j2.xml to filter this warning message out.\n","title":"Register mechanism is no longer required for local / exit span","url":"/docs/main/v9.4.0/en/faq/unexpected-endpoint-register/"},{"content":"Register mechanism is no longer required for local / exit span Since version 6.6.0, SkyWalking has removed the local and exit span registers. If an old java agent (before 6.6.0) is still running, which registers to the 6.6.0+ backend, you will face the following warning message.\nclass=RegisterServiceHandler, message = Unexpected endpoint register, endpoint isn't detected from server side. This will not harm the backend or cause any issues, but serves as a reminder that your agent or other clients should follow the new protocol requirements.\nYou could simply use log4j2.xml to filter this warning message out.\n","title":"Register mechanism is no longer required for local / exit span","url":"/docs/main/v9.5.0/en/faq/unexpected-endpoint-register/"},{"content":"Register mechanism is no longer required for local / exit span Since version 6.6.0, SkyWalking has removed the local and exit span registers. If an old java agent (before 6.6.0) is still running, which registers to the 6.6.0+ backend, you will face the following warning message.\nclass=RegisterServiceHandler, message = Unexpected endpoint register, endpoint isn't detected from server side. This will not harm the backend or cause any issues, but serves as a reminder that your agent or other clients should follow the new protocol requirements.\nYou could simply use log4j2.xml to filter this warning message out.\n","title":"Register mechanism is no longer required for local / exit span","url":"/docs/main/v9.6.0/en/faq/unexpected-endpoint-register/"},{"content":"Register mechanism is no longer required for local / exit span Since version 6.6.0, SkyWalking has removed the local and exit span registers. If an old java agent (before 6.6.0) is still running, which registers to the 6.6.0+ backend, you will face the following warning message.\nclass=RegisterServiceHandler, message = Unexpected endpoint register, endpoint isn't detected from server side. This will not harm the backend or cause any issues, but serves as a reminder that your agent or other clients should follow the new protocol requirements.\nYou could simply use log4j2.xml to filter this warning message out.\n","title":"Register mechanism is no longer required for local / exit span","url":"/docs/main/v9.7.0/en/faq/unexpected-endpoint-register/"},{"content":"Report service instance status   Service Instance Properties Service instance contains more information than just a name. In order for the agent to report service instance status, use ManagementService#reportInstanceProperties service to provide a string-key/string-value pair list as the parameter. The language of target instance must be provided as the minimum requirement.\n  Service Ping Service instance should keep alive with the backend. The agent should set a scheduler using ManagementService#keepAlive service every minute.\n  syntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.management.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/management/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the service reporting the extra information of the instance. service ManagementService { // Report custom properties of a service instance.  rpc reportInstanceProperties (InstanceProperties) returns (Commands) { } // Keep the instance alive in the backend analysis.  // Only recommend to do separate keepAlive report when no trace and metrics needs to be reported.  // Otherwise, it is duplicated.  rpc keepAlive (InstancePingPkg) returns (Commands) { }}message InstanceProperties { string service = 1; string serviceInstance = 2; repeated KeyStringValuePair properties = 3; // Instance belong layer name which define in the backend, general is default.  string layer = 4;}message InstancePingPkg { string service = 1; string serviceInstance = 2; // Instance belong layer name which define in the backend, general is default.  string layer = 3;}Via HTTP Endpoint  Report service instance properties   POST http://localhost:12800/v3/management/reportProperties\n Input:\n{ \u0026#34;service\u0026#34;: \u0026#34;User Service Name\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User Service Instance Name\u0026#34;, \u0026#34;properties\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;language\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;Lua\u0026#34; } ] } Output JSON Array:\n{}  Service instance ping   POST http://localhost:12800/v3/management/keepAlive\n Input:\n{ \u0026#34;service\u0026#34;: \u0026#34;User Service Name\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User Service Instance Name\u0026#34; } OutPut:\n{} ","title":"Report service instance status","url":"/docs/main/latest/en/api/instance-properties/"},{"content":"Report service instance status   Service Instance Properties Service instance contains more information than just a name. In order for the agent to report service instance status, use ManagementService#reportInstanceProperties service to provide a string-key/string-value pair list as the parameter. The language of target instance must be provided as the minimum requirement.\n  Service Ping Service instance should keep alive with the backend. The agent should set a scheduler using ManagementService#keepAlive service every minute.\n  syntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.management.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/management/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the service reporting the extra information of the instance. service ManagementService { // Report custom properties of a service instance.  rpc reportInstanceProperties (InstanceProperties) returns (Commands) { } // Keep the instance alive in the backend analysis.  // Only recommend to do separate keepAlive report when no trace and metrics needs to be reported.  // Otherwise, it is duplicated.  rpc keepAlive (InstancePingPkg) returns (Commands) { }}message InstanceProperties { string service = 1; string serviceInstance = 2; repeated KeyStringValuePair properties = 3; // Instance belong layer name which define in the backend, general is default.  string layer = 4;}message InstancePingPkg { string service = 1; string serviceInstance = 2; // Instance belong layer name which define in the backend, general is default.  string layer = 3;}Via HTTP Endpoint  Report service instance properties   POST http://localhost:12800/v3/management/reportProperties\n Input:\n{ \u0026#34;service\u0026#34;: \u0026#34;User Service Name\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User Service Instance Name\u0026#34;, \u0026#34;properties\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;language\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;Lua\u0026#34; } ] } Output JSON Array:\n{}  Service instance ping   POST http://localhost:12800/v3/management/keepAlive\n Input:\n{ \u0026#34;service\u0026#34;: \u0026#34;User Service Name\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User Service Instance Name\u0026#34; } OutPut:\n{} ","title":"Report service instance status","url":"/docs/main/next/en/api/instance-properties/"},{"content":"Report service instance status   Service Instance Properties Service instance contains more information than just a name. In order for the agent to report service instance status, use ManagementService#reportInstanceProperties service to provide a string-key/string-value pair list as the parameter. The language of target instance must be provided as the minimum requirement.\n  Service Ping Service instance should keep alive with the backend. The agent should set a scheduler using ManagementService#keepAlive service every minute.\n  syntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.management.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/management/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the service reporting the extra information of the instance. service ManagementService { // Report custom properties of a service instance.  rpc reportInstanceProperties (InstanceProperties) returns (Commands) { } // Keep the instance alive in the backend analysis.  // Only recommend to do separate keepAlive report when no trace and metrics needs to be reported.  // Otherwise, it is duplicated.  rpc keepAlive (InstancePingPkg) returns (Commands) { }}message InstanceProperties { string service = 1; string serviceInstance = 2; repeated KeyStringValuePair properties = 3; // Instance belong layer name which define in the backend, general is default.  string layer = 4;}message InstancePingPkg { string service = 1; string serviceInstance = 2; // Instance belong layer name which define in the backend, general is default.  string layer = 3;}Via HTTP Endpoint  Report service instance properties   POST http://localhost:12800/v3/management/reportProperties\n Input:\n{ \u0026#34;service\u0026#34;: \u0026#34;User Service Name\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User Service Instance Name\u0026#34;, \u0026#34;properties\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;language\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;Lua\u0026#34; } ] } Output JSON Array:\n{}  Service instance ping   POST http://localhost:12800/v3/management/keepAlive\n Input:\n{ \u0026#34;service\u0026#34;: \u0026#34;User Service Name\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User Service Instance Name\u0026#34; } OutPut:\n{} ","title":"Report service instance status","url":"/docs/main/v9.4.0/en/api/instance-properties/"},{"content":"Report service instance status   Service Instance Properties Service instance contains more information than just a name. In order for the agent to report service instance status, use ManagementService#reportInstanceProperties service to provide a string-key/string-value pair list as the parameter. The language of target instance must be provided as the minimum requirement.\n  Service Ping Service instance should keep alive with the backend. The agent should set a scheduler using ManagementService#keepAlive service every minute.\n  syntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.management.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/management/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the service reporting the extra information of the instance. service ManagementService { // Report custom properties of a service instance.  rpc reportInstanceProperties (InstanceProperties) returns (Commands) { } // Keep the instance alive in the backend analysis.  // Only recommend to do separate keepAlive report when no trace and metrics needs to be reported.  // Otherwise, it is duplicated.  rpc keepAlive (InstancePingPkg) returns (Commands) { }}message InstanceProperties { string service = 1; string serviceInstance = 2; repeated KeyStringValuePair properties = 3; // Instance belong layer name which define in the backend, general is default.  string layer = 4;}message InstancePingPkg { string service = 1; string serviceInstance = 2; // Instance belong layer name which define in the backend, general is default.  string layer = 3;}Via HTTP Endpoint  Report service instance properties   POST http://localhost:12800/v3/management/reportProperties\n Input:\n{ \u0026#34;service\u0026#34;: \u0026#34;User Service Name\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User Service Instance Name\u0026#34;, \u0026#34;properties\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;language\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;Lua\u0026#34; } ] } Output JSON Array:\n{}  Service instance ping   POST http://localhost:12800/v3/management/keepAlive\n Input:\n{ \u0026#34;service\u0026#34;: \u0026#34;User Service Name\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User Service Instance Name\u0026#34; } OutPut:\n{} ","title":"Report service instance status","url":"/docs/main/v9.5.0/en/api/instance-properties/"},{"content":"Report service instance status   Service Instance Properties Service instance contains more information than just a name. In order for the agent to report service instance status, use ManagementService#reportInstanceProperties service to provide a string-key/string-value pair list as the parameter. The language of target instance must be provided as the minimum requirement.\n  Service Ping Service instance should keep alive with the backend. The agent should set a scheduler using ManagementService#keepAlive service every minute.\n  syntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.management.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/management/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the service reporting the extra information of the instance. service ManagementService { // Report custom properties of a service instance.  rpc reportInstanceProperties (InstanceProperties) returns (Commands) { } // Keep the instance alive in the backend analysis.  // Only recommend to do separate keepAlive report when no trace and metrics needs to be reported.  // Otherwise, it is duplicated.  rpc keepAlive (InstancePingPkg) returns (Commands) { }}message InstanceProperties { string service = 1; string serviceInstance = 2; repeated KeyStringValuePair properties = 3; // Instance belong layer name which define in the backend, general is default.  string layer = 4;}message InstancePingPkg { string service = 1; string serviceInstance = 2; // Instance belong layer name which define in the backend, general is default.  string layer = 3;}Via HTTP Endpoint  Report service instance properties   POST http://localhost:12800/v3/management/reportProperties\n Input:\n{ \u0026#34;service\u0026#34;: \u0026#34;User Service Name\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User Service Instance Name\u0026#34;, \u0026#34;properties\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;language\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;Lua\u0026#34; } ] } Output JSON Array:\n{}  Service instance ping   POST http://localhost:12800/v3/management/keepAlive\n Input:\n{ \u0026#34;service\u0026#34;: \u0026#34;User Service Name\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User Service Instance Name\u0026#34; } OutPut:\n{} ","title":"Report service instance status","url":"/docs/main/v9.6.0/en/api/instance-properties/"},{"content":"Report service instance status   Service Instance Properties Service instance contains more information than just a name. In order for the agent to report service instance status, use ManagementService#reportInstanceProperties service to provide a string-key/string-value pair list as the parameter. The language of target instance must be provided as the minimum requirement.\n  Service Ping Service instance should keep alive with the backend. The agent should set a scheduler using ManagementService#keepAlive service every minute.\n  syntax = \u0026#34;proto3\u0026#34;;package skywalking.v3;option java_multiple_files = true;option java_package = \u0026#34;org.apache.skywalking.apm.network.management.v3\u0026#34;;option csharp_namespace = \u0026#34;SkyWalking.NetworkProtocol.V3\u0026#34;;option go_package = \u0026#34;skywalking.apache.org/repo/goapi/collect/management/v3\u0026#34;;import \u0026#34;common/Common.proto\u0026#34;;import \u0026#34;common/Command.proto\u0026#34;;// Define the service reporting the extra information of the instance. service ManagementService { // Report custom properties of a service instance.  rpc reportInstanceProperties (InstanceProperties) returns (Commands) { } // Keep the instance alive in the backend analysis.  // Only recommend to do separate keepAlive report when no trace and metrics needs to be reported.  // Otherwise, it is duplicated.  rpc keepAlive (InstancePingPkg) returns (Commands) { }}message InstanceProperties { string service = 1; string serviceInstance = 2; repeated KeyStringValuePair properties = 3; // Instance belong layer name which define in the backend, general is default.  string layer = 4;}message InstancePingPkg { string service = 1; string serviceInstance = 2; // Instance belong layer name which define in the backend, general is default.  string layer = 3;}Via HTTP Endpoint  Report service instance properties   POST http://localhost:12800/v3/management/reportProperties\n Input:\n{ \u0026#34;service\u0026#34;: \u0026#34;User Service Name\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User Service Instance Name\u0026#34;, \u0026#34;properties\u0026#34;: [ { \u0026#34;key\u0026#34;: \u0026#34;language\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;Lua\u0026#34; } ] } Output JSON Array:\n{}  Service instance ping   POST http://localhost:12800/v3/management/keepAlive\n Input:\n{ \u0026#34;service\u0026#34;: \u0026#34;User Service Name\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User Service Instance Name\u0026#34; } OutPut:\n{} ","title":"Report service instance status","url":"/docs/main/v9.7.0/en/api/instance-properties/"},{"content":"RocketMQ monitoring SkyWalking leverages rocketmq-exporter for collecting metrics data from RocketMQ. It leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System.\nData flow  The rocketmq-exporter (https://github.com/apache/rocketmq-exporter?tab=readme-ov-file#readme) collects metrics data from RocketMQ, The RocketMQ version is required to be 4.3.2+. OpenTelemetry Collector fetches metrics from rocketmq-exporter via Prometheus Receiver and pushes metrics to SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup  Setup rocketmq-exporter. Set up OpenTelemetry Collector. The example for OpenTelemetry Collector configuration, refer to here. Config SkyWalking OpenTelemetry receiver.  RocketMQ Monitoring RocketMQ monitoring provides multidimensional metrics monitoring of RocketMQ Exporter as Layer: RocketMQ Service in the OAP. In each cluster, the broker is represented as Instance and the topic is represented as Endpoint.\nRocketMQ Cluster Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Messages Produced Today Count meter_rocketmq_cluster_messages_produced_today The number of cluster messages produced today. RocketMQ Exporter   Messages Consumed Today Count meter_rocketmq_cluster_messages_consumed_today The number of cluster messages consumed today. RocketMQ Exporter   Total Producer Tps Msg/sec meter_rocketmq_cluster_total_producer_tps The number of messages produced per second. RocketMQ Exporter   Total Consume Tps Msg/sec meter_rocketmq_cluster_total_consumer_tps The number of messages consumed per second. RocketMQ Exporter   Producer Message Size Bytes/sec meter_rocketmq_cluster_producer_message_size The max size of a message produced per second. RocketMQ Exporter   Consumer Message Size Bytes/sec meter_rocketmq_cluster_consumer_message_size The max size of the consumed message per second. RocketMQ Exporter   Messages Produced Until Yesterday Count meter_rocketmq_cluster_messages_produced_until_yesterday The total number of messages put until 12 o\u0026rsquo;clock last night. RocketMQ Exporter   Messages Consumed Until Yesterday Count meter_rocketmq_cluster_messages_consumed_until_yesterday The total number of messages read until 12 o\u0026rsquo;clock last night. RocketMQ Exporter   Max Consumer Latency ms meter_rocketmq_cluster_max_consumer_latency The max number of consumer latency. RocketMQ Exporter   Max CommitLog Disk Ratio % meter_rocketmq_cluster_max_commitLog_disk_ratio The max utilization ratio of the commit log disk. RocketMQ Exporter   CommitLog Disk Ratio % meter_rocketmq_cluster_commitLog_disk_ratio The utilization ratio of the commit log disk per broker IP. RocketMQ Exporter   Pull ThreadPool Queue Head Wait Time ms meter_rocketmq_cluster_pull_threadPool_queue_head_wait_time The wait time in milliseconds for pulling threadPool queue per broker IP. RocketMQ Exporter   Send ThreadPool Queue Head Wait Time ms meter_rocketmq_cluster_send_threadPool_queue_head_wait_time The wait time in milliseconds for sending threadPool queue per broker IP. RocketMQ Exporter    RocketMQ Broker Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Produce TPS Msg/sec meter_rocketmq_broker_produce_tps The number of broker produces messages per second. RocketMQ Exporter   Consume QPS Msg/sec meter_rocketmq_broker_consume_qps The number of broker consumes messages per second. RocketMQ Exporter   Producer Message Size Bytes/sec meter_rocketmq_broker_producer_message_size The max size of the messages produced per second. RocketMQ Exporter   Consumer Message Size Bytes/sec meter_rocketmq_broker_consumer_message_size The max size of the messages consumed per second. RocketMQ Exporter    RocketMQ Topic Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Max Producer Message Size Byte meter_rocketmq_topic_max_producer_message_size The maximum number of messages produced. RocketMQ Exporter   Max Consumer Message Size Byte meter_rocketmq_topic_max_consumer_message_size The maximum number of messages consumed. RocketMQ Exporter   Consumer Latency ms meter_rocketmq_topic_consumer_latency Consumption delay time of a consumer group. RocketMQ Exporter   Producer Tps Msg/sec meter_rocketmq_topic_producer_tps The number of messages produced per second. RocketMQ Exporter   Consumer Group Tps Msg/sec meter_rocketmq_topic_consumer_group_tps The number of messages consumed per second per consumer group. RocketMQ Exporter   Producer Offset Count meter_rocketmq_topic_producer_offset The max progress of a topic\u0026rsquo;s production message. RocketMQ Exporter   Consumer Group Offset Count meter_rocketmq_topic_consumer_group_offset The max progress of a topic\u0026rsquo;s consumption message per consumer group. RocketMQ Exporter   Producer Message Size Byte/sec meter_rocketmq_topic_producer_message_size The max size of messages produced per second. RocketMQ Exporter   Consumer Message Size Byte/sec meter_rocketmq_topic_consumer_message_size The max size of messages consumed per second. RocketMQ Exporter    Customizations You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in otel-rules/rocketmq/rocketmq-cluster.yaml, otel-rules/rocketmq/rocketmq-broker.yaml, otel-rules/rocketmq/rocketmq-topic.yaml. The RocketMQ dashboard panel configurations are found in ui-initialized-templates/rocketmq.\n","title":"RocketMQ monitoring","url":"/docs/main/next/en/setup/backend/backend-rocketmq-monitoring/"},{"content":"Running and Debugging Debugging is essential when developing plugins, as it helps you verify your plugin logic. If you want to perform debugging, follow these steps:\n Write test code: Write a sample application that includes the framework content you need to test. Build the Agent: In the project root directory, run the make build command to compile the Agent program into a binary file. Adjust the test program\u0026rsquo;s Debug configuration: Modify the test program\u0026rsquo;s Debug configuration, which will be explained in more detail later. Launch the program and add breakpoints: Start your sample application and add breakpoints in your plugin code where you want to pause the execution and inspect the program state.  Write test code Please make sure that you have imported github.com/apache/skywalking-go in your test code. You can refer to the documentation on how to compile using go build for specific steps.\nAdjust the test program\u0026rsquo;s Debug configuration Please locate the following two paths:\n Go Agent: Locate the binary file generated through make build in the previous step. Current project path: Find the root directory of the current project, which will be used to search for source files in subsequent steps.  Then, please enter the following command in the tool arguments section of the debug configuration:\n-toolexec '/path/to/skywalking-go-agent -debug /path/to/current-project-path' -a\u0026quot;. ","title":"Running and Debugging","url":"/docs/skywalking-go/latest/en/development-and-contribution/running-and-debugging/"},{"content":"Running and Debugging Debugging is essential when developing plugins, as it helps you verify your plugin logic. If you want to perform debugging, follow these steps:\n Write test code: Write a sample application that includes the framework content you need to test. Build the Agent: In the project root directory, run the make build command to compile the Agent program into a binary file. Adjust the test program\u0026rsquo;s Debug configuration: Modify the test program\u0026rsquo;s Debug configuration, which will be explained in more detail later. Launch the program and add breakpoints: Start your sample application and add breakpoints in your plugin code where you want to pause the execution and inspect the program state.  Write test code Please make sure that you have imported github.com/apache/skywalking-go in your test code. You can refer to the documentation on how to compile using go build for specific steps.\nAdjust the test program\u0026rsquo;s Debug configuration Please locate the following two paths:\n Go Agent: Locate the binary file generated through make build in the previous step. Current project path: Find the root directory of the current project, which will be used to search for source files in subsequent steps.  Then, please enter the following command in the tool arguments section of the debug configuration:\n-toolexec '/path/to/skywalking-go-agent -debug /path/to/current-project-path' -a\u0026quot;. ","title":"Running and Debugging","url":"/docs/skywalking-go/next/en/development-and-contribution/running-and-debugging/"},{"content":"Running and Debugging Debugging is essential when developing plugins, as it helps you verify your plugin logic. If you want to perform debugging, follow these steps:\n Write test code: Write a sample application that includes the framework content you need to test. Build the Agent: In the project root directory, run the make build command to compile the Agent program into a binary file. Adjust the test program\u0026rsquo;s Debug configuration: Modify the test program\u0026rsquo;s Debug configuration, which will be explained in more detail later. Launch the program and add breakpoints: Start your sample application and add breakpoints in your plugin code where you want to pause the execution and inspect the program state.  Write test code Please make sure that you have imported github.com/apache/skywalking-go in your test code. You can refer to the documentation on how to compile using go build for specific steps.\nAdjust the test program\u0026rsquo;s Debug configuration Please locate the following two paths:\n Go Agent: Locate the binary file generated through make build in the previous step. Current project path: Find the root directory of the current project, which will be used to search for source files in subsequent steps.  Then, please enter the following command in the tool arguments section of the debug configuration:\n-toolexec '/path/to/skywalking-go-agent -debug /path/to/current-project-path' -a\u0026quot;. ","title":"Running and Debugging","url":"/docs/skywalking-go/v0.4.0/en/development-and-contribution/running-and-debugging/"},{"content":"Satellite self observability dashboard SkyWalking Satellite collects and exports metrics in Prometheus format and SkyWalking metrics service protobuffer format for consuming, it also provides a dashboard to visualize the Satellite metrics.\nData flow  SkyWalking Satellite collects metrics data internally and pushes the metrics to SkyWalking OAP. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up SkyWalking Satellite Telemetry Exporter. Config SkyWalking OpenTelemetry receiver.  Self observability monitoring Self observability monitoring provides monitoring of the status and resources of the OAP server itself. oap-server is a Service in OAP, and land on the Layer: SO11Y_OAP.\nSelf observability metrics    Monitoring Panel Unit Metric Name Description Data Source      Count satellite_service_grpc_connect_count Connection Count SkyWalking Satellite    Percentage satellite_service_server_cpu_utilization CPU (%) SkyWalking Satellite    Count satellite_service_queue_used_count The used count of queue of pipeline SkyWalking Satellite    Count satellite_service_receive_event_count Receive count of event from downstream SkyWalking Satellite    Count satellite_service_fetch_event_count Fetch count of event from downstream SkyWalking Satellite    Count satellite_service_queue_input_count The event count of push to the queue SkyWalking Satellite    Count satellite_service_send_event_count The event count of push data to the upstream SkyWalking Satellite    Customizations You can customize your own metrics/expression/dashboard panel. The self observability dashboard panel configurations are found in /config/ui-initialized-templates/so11y_satellite/so11y-root.json.\n","title":"Satellite self observability dashboard","url":"/docs/main/latest/en/setup/backend/dashboards-so11y-satellite/"},{"content":"Satellite self observability dashboard SkyWalking Satellite collects and exports metrics in Prometheus format and SkyWalking metrics service protobuffer format for consuming, it also provides a dashboard to visualize the Satellite metrics.\nData flow  SkyWalking Satellite collects metrics data internally and pushes the metrics to SkyWalking OAP. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up SkyWalking Satellite Telemetry Exporter. Config SkyWalking OpenTelemetry receiver.  Self observability monitoring Self observability monitoring provides monitoring of the status and resources of the OAP server itself. oap-server is a Service in OAP, and land on the Layer: SO11Y_OAP.\nSelf observability metrics    Monitoring Panel Unit Metric Name Description Data Source      Count satellite_service_grpc_connect_count Connection Count SkyWalking Satellite    Percentage satellite_service_server_cpu_utilization CPU (%) SkyWalking Satellite    Count satellite_service_queue_used_count The used count of queue of pipeline SkyWalking Satellite    Count satellite_service_receive_event_count Receive count of event from downstream SkyWalking Satellite    Count satellite_service_fetch_event_count Fetch count of event from downstream SkyWalking Satellite    Count satellite_service_queue_input_count The event count of push to the queue SkyWalking Satellite    Count satellite_service_send_event_count The event count of push data to the upstream SkyWalking Satellite    Customizations You can customize your own metrics/expression/dashboard panel. The self observability dashboard panel configurations are found in /config/ui-initialized-templates/so11y_satellite/so11y-root.json.\n","title":"Satellite self observability dashboard","url":"/docs/main/next/en/setup/backend/dashboards-so11y-satellite/"},{"content":"Satellite self observability dashboard SkyWalking Satellite collects and exports metrics in Prometheus format and SkyWalking metrics service protobuffer format for consuming, it also provides a dashboard to visualize the Satellite metrics.\nData flow  SkyWalking Satellite collects metrics data internally and pushes the metrics to SkyWalking OAP. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up SkyWalking Satellite Telemetry Exporter. Config SkyWalking OpenTelemetry receiver.  Self observability monitoring Self observability monitoring provides monitoring of the status and resources of the OAP server itself. oap-server is a Service in OAP, and land on the Layer: SO11Y_OAP.\nSelf observability metrics    Monitoring Panel Unit Metric Name Description Data Source      Count satellite_service_grpc_connect_count Connection Count SkyWalking Satellite    Percentage satellite_service_server_cpu_utilization CPU (%) SkyWalking Satellite    Count satellite_service_queue_used_count The used count of queue of pipeline SkyWalking Satellite    Count satellite_service_receive_event_count Receive count of event from downstream SkyWalking Satellite    Count satellite_service_fetch_event_count Fetch count of event from downstream SkyWalking Satellite    Count satellite_service_queue_input_count The event count of push to the queue SkyWalking Satellite    Count satellite_service_send_event_count The event count of push data to the upstream SkyWalking Satellite    Customizations You can customize your own metrics/expression/dashboard panel. The self observability dashboard panel configurations are found in /config/ui-initialized-templates/so11y_satellite/so11y-root.json.\n","title":"Satellite self observability dashboard","url":"/docs/main/v9.3.0/en/setup/backend/dashboards-so11y-satellite/"},{"content":"Satellite self observability dashboard SkyWalking Satellite collects and exports metrics in Prometheus format and SkyWalking metrics service protobuffer format for consuming, it also provides a dashboard to visualize the Satellite metrics.\nData flow  SkyWalking Satellite collects metrics data internally and pushes the metrics to SkyWalking OAP. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up SkyWalking Satellite Telemetry Exporter. Config SkyWalking OpenTelemetry receiver.  Self observability monitoring Self observability monitoring provides monitoring of the status and resources of the OAP server itself. oap-server is a Service in OAP, and land on the Layer: SO11Y_OAP.\nSelf observability metrics    Monitoring Panel Unit Metric Name Description Data Source      Count satellite_service_grpc_connect_count Connection Count SkyWalking Satellite    Percentage satellite_service_server_cpu_utilization CPU (%) SkyWalking Satellite    Count satellite_service_queue_used_count The used count of queue of pipeline SkyWalking Satellite    Count satellite_service_receive_event_count Receive count of event from downstream SkyWalking Satellite    Count satellite_service_fetch_event_count Fetch count of event from downstream SkyWalking Satellite    Count satellite_service_queue_input_count The event count of push to the queue SkyWalking Satellite    Count satellite_service_send_event_count The event count of push data to the upstream SkyWalking Satellite    Customizations You can customize your own metrics/expression/dashboard panel. The self observability dashboard panel configurations are found in /config/ui-initialized-templates/so11y_satellite/so11y-root.json.\n","title":"Satellite self observability dashboard","url":"/docs/main/v9.4.0/en/setup/backend/dashboards-so11y-satellite/"},{"content":"Satellite self observability dashboard SkyWalking Satellite collects and exports metrics in Prometheus format and SkyWalking metrics service protobuffer format for consuming, it also provides a dashboard to visualize the Satellite metrics.\nData flow  SkyWalking Satellite collects metrics data internally and pushes the metrics to SkyWalking OAP. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up SkyWalking Satellite Telemetry Exporter. Config SkyWalking OpenTelemetry receiver.  Self observability monitoring Self observability monitoring provides monitoring of the status and resources of the OAP server itself. oap-server is a Service in OAP, and land on the Layer: SO11Y_OAP.\nSelf observability metrics    Monitoring Panel Unit Metric Name Description Data Source      Count satellite_service_grpc_connect_count Connection Count SkyWalking Satellite    Percentage satellite_service_server_cpu_utilization CPU (%) SkyWalking Satellite    Count satellite_service_queue_used_count The used count of queue of pipeline SkyWalking Satellite    Count satellite_service_receive_event_count Receive count of event from downstream SkyWalking Satellite    Count satellite_service_fetch_event_count Fetch count of event from downstream SkyWalking Satellite    Count satellite_service_queue_input_count The event count of push to the queue SkyWalking Satellite    Count satellite_service_send_event_count The event count of push data to the upstream SkyWalking Satellite    Customizations You can customize your own metrics/expression/dashboard panel. The self observability dashboard panel configurations are found in /config/ui-initialized-templates/so11y_satellite/so11y-root.json.\n","title":"Satellite self observability dashboard","url":"/docs/main/v9.5.0/en/setup/backend/dashboards-so11y-satellite/"},{"content":"Satellite self observability dashboard SkyWalking Satellite collects and exports metrics in Prometheus format and SkyWalking metrics service protobuffer format for consuming, it also provides a dashboard to visualize the Satellite metrics.\nData flow  SkyWalking Satellite collects metrics data internally and pushes the metrics to SkyWalking OAP. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up SkyWalking Satellite Telemetry Exporter. Config SkyWalking OpenTelemetry receiver.  Self observability monitoring Self observability monitoring provides monitoring of the status and resources of the OAP server itself. oap-server is a Service in OAP, and land on the Layer: SO11Y_OAP.\nSelf observability metrics    Monitoring Panel Unit Metric Name Description Data Source      Count satellite_service_grpc_connect_count Connection Count SkyWalking Satellite    Percentage satellite_service_server_cpu_utilization CPU (%) SkyWalking Satellite    Count satellite_service_queue_used_count The used count of queue of pipeline SkyWalking Satellite    Count satellite_service_receive_event_count Receive count of event from downstream SkyWalking Satellite    Count satellite_service_fetch_event_count Fetch count of event from downstream SkyWalking Satellite    Count satellite_service_queue_input_count The event count of push to the queue SkyWalking Satellite    Count satellite_service_send_event_count The event count of push data to the upstream SkyWalking Satellite    Customizations You can customize your own metrics/expression/dashboard panel. The self observability dashboard panel configurations are found in /config/ui-initialized-templates/so11y_satellite/so11y-root.json.\n","title":"Satellite self observability dashboard","url":"/docs/main/v9.6.0/en/setup/backend/dashboards-so11y-satellite/"},{"content":"Satellite self observability dashboard SkyWalking Satellite collects and exports metrics in Prometheus format and SkyWalking metrics service protobuffer format for consuming, it also provides a dashboard to visualize the Satellite metrics.\nData flow  SkyWalking Satellite collects metrics data internally and pushes the metrics to SkyWalking OAP. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Set up  Set up SkyWalking Satellite Telemetry Exporter. Config SkyWalking OpenTelemetry receiver.  Self observability monitoring Self observability monitoring provides monitoring of the status and resources of the OAP server itself. oap-server is a Service in OAP, and land on the Layer: SO11Y_OAP.\nSelf observability metrics    Monitoring Panel Unit Metric Name Description Data Source      Count satellite_service_grpc_connect_count Connection Count SkyWalking Satellite    Percentage satellite_service_server_cpu_utilization CPU (%) SkyWalking Satellite    Count satellite_service_queue_used_count The used count of queue of pipeline SkyWalking Satellite    Count satellite_service_receive_event_count Receive count of event from downstream SkyWalking Satellite    Count satellite_service_fetch_event_count Fetch count of event from downstream SkyWalking Satellite    Count satellite_service_queue_input_count The event count of push to the queue SkyWalking Satellite    Count satellite_service_send_event_count The event count of push data to the upstream SkyWalking Satellite    Customizations You can customize your own metrics/expression/dashboard panel. The self observability dashboard panel configurations are found in /config/ui-initialized-templates/so11y_satellite/so11y-root.json.\n","title":"Satellite self observability dashboard","url":"/docs/main/v9.7.0/en/setup/backend/dashboards-so11y-satellite/"},{"content":"Satellite Usage In this example, you will learn how to use the Satellite.\nInstall Satellite Install the Satellite component.\nInstall Operator And Backend  Follow Operator installation instrument to install the operator. Follow Deploy OAP server and UI to install backend.  Deploy Satellite with default setting  Deploy the Storage use the below command:  Clone this repo, then change current directory to samples.\nIssue the below command to deploy an OAP server and UI.\nkubectl apply -f satellite.yaml Check the Satellite in Kubernetes:  $ kubectl get satellite NAME INSTANCES RUNNING ADDRESS default 1 1 default-satellite.default Satellite With HPA  Follow Custom Metrics Adapter to install the metrics adapter. Update the config in the Satellite CRD and re-apply it to activate the metrics service in satellite.  config: - name: SATELLITE_TELEMETRY_EXPORT_TYPE value: metrics_service Update the config in the OAP CRD and re-apply it to activate the satellite MAL.  config: - name: SW_METER_ANALYZER_ACTIVE_FILES value: satellite Add the HorizontalPodAutoScaler CRD, and update the config file the service and target to your excepted config. It\u0026rsquo;s recommend to set the stabilizationWindowSeconds and selectPolicy of scaling up in HPA, which would help prevent continuous scaling up of pods due to metric delay fluctuations. Check the HorizontalPodAutoScaler in the Kubernetes:  $ kubectl get HorizontalPodAutoscaler NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE hpa-demo Deployment/skywalking-system-satellite 2/1900, 5/75 1 3 1 92m ","title":"Satellite Usage","url":"/docs/skywalking-swck/latest/examples/satellite/"},{"content":"Satellite Usage In this example, you will learn how to use the Satellite.\nInstall Satellite Install the Satellite component.\nInstall Operator And Backend  Follow Operator installation instrument to install the operator. Follow Deploy OAP server and UI to install backend.  Deploy Satellite with default setting  Deploy the Storage use the below command:  Clone this repo, then change current directory to samples.\nIssue the below command to deploy an OAP server and UI.\nkubectl apply -f satellite.yaml Check the Satellite in Kubernetes:  $ kubectl get satellite NAME INSTANCES RUNNING ADDRESS default 1 1 default-satellite.default Satellite With HPA  Follow Custom Metrics Adapter to install the metrics adapter. Update the config in the Satellite CRD and re-apply it to activate the metrics service in satellite.  config: - name: SATELLITE_TELEMETRY_EXPORT_TYPE value: metrics_service Update the config in the OAP CRD and re-apply it to activate the satellite MAL.  config: - name: SW_METER_ANALYZER_ACTIVE_FILES value: satellite Add the HorizontalPodAutoScaler CRD, and update the config file the service and target to your excepted config. It\u0026rsquo;s recommend to set the stabilizationWindowSeconds and selectPolicy of scaling up in HPA, which would help prevent continuous scaling up of pods due to metric delay fluctuations. Check the HorizontalPodAutoScaler in the Kubernetes:  $ kubectl get HorizontalPodAutoscaler NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE hpa-demo Deployment/skywalking-system-satellite 2/1900, 5/75 1 3 1 92m ","title":"Satellite Usage","url":"/docs/skywalking-swck/next/examples/satellite/"},{"content":"Satellite Usage In this example, you will learn how to use the Satellite.\nInstall Satellite Install the Satellite component.\nInstall Operator And Backend  Follow Operator installation instrument to install the operator. Follow Deploy OAP server and UI to install backend.  Deploy Satellite with default setting  Deploy the Storage use the below command:  Clone this repo, then change current directory to samples.\nIssue the below command to deploy an OAP server and UI.\nkubectl apply -f satellite.yaml Check the Satellite in Kubernetes:  $ kubectl get satellite NAME INSTANCES RUNNING ADDRESS default 1 1 default-satellite.default Satellite With HPA  Follow Custom Metrics Adapter to install the metrics adapter. Update the config in the Satellite CRD and re-apply it to activate the metrics service in satellite.  config: - name: SATELLITE_TELEMETRY_EXPORT_TYPE value: metrics_service Update the config in the OAP CRD and re-apply it to activate the satellite MAL.  config: - name: SW_METER_ANALYZER_ACTIVE_FILES value: satellite Add the HorizontalPodAutoScaler CRD, and update the config file the service and target to your excepted config. It\u0026rsquo;s recommend to set the stabilizationWindowSeconds and selectPolicy of scaling up in HPA, which would help prevent continuous scaling up of pods due to metric delay fluctuations. Check the HorizontalPodAutoScaler in the Kubernetes:  $ kubectl get HorizontalPodAutoscaler NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE hpa-demo Deployment/skywalking-system-satellite 2/1900, 5/75 1 3 1 92m ","title":"Satellite Usage","url":"/docs/skywalking-swck/v0.9.0/examples/satellite/"},{"content":"Scaling with Apache SkyWalking Background In the Apache SkyWalking ecosystem, the OAP obtains metrics, traces, logs, and event data through SkyWalking Agent, Envoy, or other data sources. Under the gRPC protocol, it transmits data by communicating with a single server node. Only when the connection is broken, the reconnecting policy would be used based on DNS round-robin mode. When new services are added at runtime or the OAP load is kept high due to increased traffic of observed services, the OAP cluster needs to scale out for increased traffic. The load of the new OAP node would be less due to all existing agents having connected to previous nodes. Even without scaling, the load of OAP nodes would be unbalanced, because the agent would keep the connection due to random policy at the booting stage. In these cases, it would become a challenge to keep up the health status of all nodes, and be able to scale out when needed.\nIn this article, we mainly discuss how to solve this challenge in SkyWalking.\nHow to Load Balance SkyWalking mainly uses the gRPC protocol for data transmission, so this article mainly introduces load balancing in the gRPC protocol.\nProxy Or Client-side Based on the gRPC official Load Balancing blog, there are two approaches to load balancing:\n Client-side: The client perceives multiple back-end services and uses a load-balancing algorithm to select a back-end service for each RPC. Proxy: The client sends the message to the proxy server, and the proxy server load balances the message to the back-end service.  From the perspective of observability system architecture:\n    Pros Cons     Client-side High performance because of the elimination of extra hop Complex client (cluster awareness, load balancing, health check, etc.)Ensure each data source to be connected provides complex client capabilities   Proxy Simple Client Higher latency    We choose Proxy mode for the following reasons:\n Observable data is not very time-sensitive, a little latency caused by transmission is acceptable. A little extra hop is acceptable and there is no impact on the client-side. As an observability platform, we cannot/should not ask clients to change. They make their own tech decisions and may have their own commercial considerations.  Transmission Policy In the proxy mode, we should determine the transmission path between downstream and upstream.\nDifferent data protocols require different processing policies. There are two transmission policies:\n Synchronous: Suitable for protocols that require data exchange in the client, such as SkyWalking Dynamic Configuration Service. This type of protocol provides real-time results. Asynchronous batch: Used when the client doesn’t care about the upstream processing results, but only the transmitted data (e.g., trace report, log report, etc.)  The synchronization policy requires that the proxy send the message to the upstream server when receiving the client message, and synchronously return the response data to the downstream client. Usually, only a few protocols need to use the synchronization policy.\nAs shown below, after the client sends the request to the Proxy, the proxy would send the message to the server synchronously. When the proxy receives the result, it returns to the client.\nThe asynchronous batch policy means that the data is sent to the upstream server in batches asynchronously. This policy is more common because most protocols in SkyWalking are primarily based on data reporting. We think using the queue as a buffer could have a good effect. The asynchronous batch policy is executed according to the following steps:\n The proxy receives the data and wraps it as an Event object. An event is added into the queue. When the cycle time is reached or when the queue elements reach the fixed number, the elements in the queue will parallel consume and send to the OAP.  The advantage of using queues is:\n Separate data receiving and sending to reduce the mutual influence. The interval quantization mechanism can be used to combine events, which helps to speed up sending events to the OAP. Using multi-threaded consumption queue events can make fuller use of network IO.  As shown below, after the proxy receives the message, the proxy would wrap the message as an event and push it to the queue. The message sender would take batch events from the queue and send them to the upstream OAP.\nRouting Routing algorithms are used to route messages to a single upstream server node.\nThe Round-Robin algorithm selects nodes in order from the list of upstream service nodes. The advantage of this algorithm is that the number of times each node is selected is average. When the size of the data is close to the same, each upstream node can handle the same quantity of data content.\nWith the Weight Round-Robin, each upstream server node has a corresponding routing weight ratio. The difference from Round-Robin is that each upstream node has more chances to be routed according to its weight. This algorithm is more suitable to use when the upstream server node machine configuration is not the same.\nThe Fixed algorithm is a hybrid algorithm. It can ensure that the same data is routed to the same upstream server node, and when the upstream server scales out, it still maintains routing to the same node; unless the upstream node does not exist, it will reroute. This algorithm is mainly used in the SkyWalking Meter protocol because this protocol needs to ensure that the metrics of the same service instance are sent to the same OAP node. The Routing steps are as follows:\n Generate a unique identification string based on the data content, as short as possible. The amount of data is controllable. Get the upstream node of identity from LRU Cache, and use it if it exists. According to the identification, generate the corresponding hash value, and find the upstream server node from the upstream list. Save the mapping relationship between the upstream server node and identification to LRU Cache.  The advantage of this algorithm is to bind the data with the upstream server node as much as possible, so the upstream server can better process continuous data. The disadvantage is that it takes up a certain amount of memory space to save the corresponding relationship.\nAs shown below, the image is divided into two parts:\n The left side represents that the same data content always is routed to the same server node. The right side represents the data routing algorithm. Get the number from the data, and use the remainder algorithm to obtain the position.  We choose to use a combination of Round-Robin and Fixed algorithm for routing:\n The Fixed routing algorithm is suitable for specific protocols, mainly used when passing metrics data to the SkyWalking Meter protocol The Round-Robin algorithm is used by default. When the SkyWalking OAP cluster is deployed, the configuration of the nodes needs to be as much the same as possible, so there would be no need to use the Weight Round-Robin algorithm.  How to balance the load balancer itself? Proxy still needs to deal with the load balancing problem from client to itself, especially when deploying a Proxy cluster in a production environment.\nThere are three ways to solve this problem:\n Connection management: Use the max_connection config on the client-side to specify the maximum connection duration of each connection. For more information, please read the proposal. Cluster awareness: The proxy has cluster awareness, and actively disconnects the connection when the load is unbalanced to allow the client to re-pick up the proxy. Resource limit+HPA: Restrict the connection resource situation of each proxy, and no longer accept new connections when the resource limit is reached. And use the HPA mechanism of Kubernetes to dynamically scale out the number of the proxy.      Connection management Cluster awareness Resource Limit+HPA     Pros Simple to use Ensure that the number of connections in each proxy is relatively  Simple to use   Cons Each client needs to ensure that data is not lostThe client is required to accept GOWAY responses May cause a sudden increase in traffic on some nodesEach client needs to ensure that data is not lost  Traffic will not be particularly balanced in each instance    We choose Limit+HPA for these reasons:\n Easy to config and use the proxy and easy to understand based on basic data metrics. No data loss due to broken connection. There is no need for the client to implement any other protocols to prevent data loss, especially when the client is a commercial product. The connection of each node in the proxy cluster does not need to be particularly balanced, as long as the proxy node itself is high-performance.  SkyWalking-Satellite We have implemented this Proxy in the SkyWalking-Satellite project. It’s used between Client and SkyWalking OAP, effectively solving the load balancing problem.\nAfter the system is deployed, the Satellite would accept the traffic from the Client, and the Satellite will perceive all the nodes of the OAP through Kubernetes Label Selector or manual configuration, and load balance the traffic to the upstream OAP node.\nAs shown below, a single client still maintains a connection with a single Satellite, Satellite would establish the connection with each OAP, and load balance message to the OAP node.\nWhen scaling Satellite, we need to deploy the SWCK adapter and configure the HPA in Kubernetes. SWCK is a platform for the SkyWalking users, provisions, upgrades, maintains SkyWalking relevant components, and makes them work natively on Kubernetes.\nAfter deployment is finished, the following steps would be performed:\n Read metrics from OAP: HPA requests the SWCK metrics adapter to dynamically read the metrics in the OAP. Scaling the Satellite: Kubernetes HPA senses that the metrics values are in line with expectations, so the Satellite would be scaling automatically.  As shown below, use the dotted line to divide the two parts. HPA uses SWCK Adapter to read the metrics in the OAP. When the threshold is met, HPA would scale the Satellite deployment.\nExample In this section, we will demonstrate two cases:\n SkyWalking Scaling: After SkyWalking OAP scaling, the traffic would auto load balancing through Satellite. Satellite Scaling: Satellite’s own traffic load balancing.  NOTE: All commands could be accessed through GitHub.\nSkyWalking Scaling We will use the bookinfo application to demonstrate how to integrate Apache SkyWalking 8.9.1 with Apache SkyWalking-Satellite 0.5.0, and observe the service mesh through the Envoy ALS protocol.\nBefore starting, please make sure that you already have a Kubernetes environment.\nInstall Istio Istio provides a very convenient way to configure the Envoy proxy and enable the access log service. The following step:\n Install the istioctl locally to help manage the Istio mesh. Install Istio into the Kubernetes environment with a demo configuration profile, and enable the Envoy ALS. Transmit the ALS message to the satellite. The satellite we will deploy later. Add the label into the default namespace so Istio could automatically inject Envoy sidecar proxies when you deploy your application later.  # install istioctl export ISTIO_VERSION=1.12.0 curl -L https://istio.io/downloadIstio | sh - sudo mv $PWD/istio-$ISTIO_VERSION/bin/istioctl /usr/local/bin/ # install istio istioctl install -y --set profile=demo \\ \t--set meshConfig.enableEnvoyAccessLogService=true \\ \t--set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-system-satellite.skywalking-system:11800 # enbale envoy proxy in default namespace kubectl label namespace default istio-injection=enabled Install SWCK SWCK provides convenience for users to deploy and upgrade SkyWalking related components based on Kubernetes. The automatic scale function of Satellite also mainly relies on SWCK. For more information, you could refer to the official documentation.\n# Install cert-manager kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.3.1/cert-manager.yaml # Deploy SWCK mkdir -p skywalking-swck \u0026amp;\u0026amp; cd skywalking-swck wget https://dlcdn.apache.org/skywalking/swck/0.6.1/skywalking-swck-0.6.1-bin.tgz tar -zxvf skywalking-swck-0.6.1-bin.tgz cd config kubectl apply -f operator-bundle.yaml Deploy Apache SkyWalking And Apache SkyWalking-Satellite We have provided a simple script to deploy the skywalking OAP, UI, and Satellite.\n# Create the skywalking components namespace kubectl create namespace skywalking-system kubectl label namespace skywalking-system swck-injection=enabled # Deploy components kubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/sw-components.yaml Deploy Bookinfo Application export ISTIO_VERSION=1.12.0 kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/platform/kube/bookinfo.yaml kubectl wait --for=condition=Ready pods --all --timeout=1200s kubectl port-forward service/productpage 9080 Next, please open your browser and visit http://localhost:9080. You should be able to see the Bookinfo application. Refresh the webpage several times to generate enough access logs.\nThen, you can see the topology and metrics of the Bookinfo application on SkyWalking WebUI. At this time, you can see that the Satellite is working!\nDeploy Monitor We need to install OpenTelemetry Collector to collect metrics in OAPs and analyze them.\n# Add OTEL collector kubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/otel-collector-oap.yaml kubectl port-forward -n skywalking-system service/skywalking-system-ui 8080:80 Next, please open your browser and visit http://localhost:8080/ and create a new item on the dashboard. The SkyWalking Web UI pictured below shows how the data content is applied.\nScaling OAP Scaling the number of OAPs by deployment.\nkubectl scale --replicas=3 -n skywalking-system deployment/skywalking-system-oap Done! After a period of time, you will see that the number of OAPs becomes 3, and the ALS traffic is balanced to each OAP.\nSatellite Scaling After we have completed the SkyWalking Scaling, we would carry out the Satellite Scaling demo.\nDeploy SWCK HPA SWCK provides an adapter to implement the Kubernetes external metrics to adapt the HPA through reading the metrics in SkyWalking OAP. We expose the metrics service in Satellite to OAP and configure HPA Resource to auto-scaling the Satellite.\nInstall the SWCK adapter into the Kubernetes environment:\nkubectl apply -f skywalking-swck/config/adapter-bundle.yaml Create the HPA resource, and limit each Satellite to handle a maximum of 10 connections:\nkubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/satellite-hpa.yaml Then, you could see we have 9 connections in one satellite. One envoy proxy may establish multiple connections to the satellite.\n$ kubectl get HorizontalPodAutoscaler -n skywalking-system NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE hpa-demo Deployment/skywalking-system-satellite 9/10 1 3 1 5m18s Scaling Application The scaling application could establish more connections to the satellite, to verify whether the HPA is in effect.\nkubectl scale --replicas=3 deployment/productpage-v1 deployment/details-v1 Done! By default, Satellite will deploy a single instance and a single instance will only accept 11 connections. HPA resources limit one Satellite to handle 10 connections and use a stabilization window to make Satellite stable scaling up. In this case, we deploy the Bookinfo application in 10+ instances after scaling, which means that 10+ connections will be established to the Satellite.\nSo after HPA resources are running, the Satellite would be automatically scaled up to 2 instances. You can learn about the calculation algorithm of replicas through the official documentation. Run the following command to view the running status:\n$ kubectl get HorizontalPodAutoscaler -n skywalking-system --watch NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 1 3m31s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 1 4m20s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 2 4m38s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 2 5m8s hpa-demo Deployment/skywalking-system-satellite 6/10 1 3 2 5m23s By observing the “number of connections” metric, we would be able to see that when the number of connections of each gRPC exceeds 10 connections, then the satellite automatically scales through the HPA rule. As a result, the connection number is down to normal status (in this example, less than 10)\nswctl metrics linear --name satellite_service_grpc_connect_count --service-name satellite::satellite-service ","title":"Scaling with Apache SkyWalking","url":"/docs/main/latest/en/academy/scaling-with-apache-skywalking/"},{"content":"Scaling with Apache SkyWalking Background In the Apache SkyWalking ecosystem, the OAP obtains metrics, traces, logs, and event data through SkyWalking Agent, Envoy, or other data sources. Under the gRPC protocol, it transmits data by communicating with a single server node. Only when the connection is broken, the reconnecting policy would be used based on DNS round-robin mode. When new services are added at runtime or the OAP load is kept high due to increased traffic of observed services, the OAP cluster needs to scale out for increased traffic. The load of the new OAP node would be less due to all existing agents having connected to previous nodes. Even without scaling, the load of OAP nodes would be unbalanced, because the agent would keep the connection due to random policy at the booting stage. In these cases, it would become a challenge to keep up the health status of all nodes, and be able to scale out when needed.\nIn this article, we mainly discuss how to solve this challenge in SkyWalking.\nHow to Load Balance SkyWalking mainly uses the gRPC protocol for data transmission, so this article mainly introduces load balancing in the gRPC protocol.\nProxy Or Client-side Based on the gRPC official Load Balancing blog, there are two approaches to load balancing:\n Client-side: The client perceives multiple back-end services and uses a load-balancing algorithm to select a back-end service for each RPC. Proxy: The client sends the message to the proxy server, and the proxy server load balances the message to the back-end service.  From the perspective of observability system architecture:\n    Pros Cons     Client-side High performance because of the elimination of extra hop Complex client (cluster awareness, load balancing, health check, etc.)Ensure each data source to be connected provides complex client capabilities   Proxy Simple Client Higher latency    We choose Proxy mode for the following reasons:\n Observable data is not very time-sensitive, a little latency caused by transmission is acceptable. A little extra hop is acceptable and there is no impact on the client-side. As an observability platform, we cannot/should not ask clients to change. They make their own tech decisions and may have their own commercial considerations.  Transmission Policy In the proxy mode, we should determine the transmission path between downstream and upstream.\nDifferent data protocols require different processing policies. There are two transmission policies:\n Synchronous: Suitable for protocols that require data exchange in the client, such as SkyWalking Dynamic Configuration Service. This type of protocol provides real-time results. Asynchronous batch: Used when the client doesn’t care about the upstream processing results, but only the transmitted data (e.g., trace report, log report, etc.)  The synchronization policy requires that the proxy send the message to the upstream server when receiving the client message, and synchronously return the response data to the downstream client. Usually, only a few protocols need to use the synchronization policy.\nAs shown below, after the client sends the request to the Proxy, the proxy would send the message to the server synchronously. When the proxy receives the result, it returns to the client.\nThe asynchronous batch policy means that the data is sent to the upstream server in batches asynchronously. This policy is more common because most protocols in SkyWalking are primarily based on data reporting. We think using the queue as a buffer could have a good effect. The asynchronous batch policy is executed according to the following steps:\n The proxy receives the data and wraps it as an Event object. An event is added into the queue. When the cycle time is reached or when the queue elements reach the fixed number, the elements in the queue will parallel consume and send to the OAP.  The advantage of using queues is:\n Separate data receiving and sending to reduce the mutual influence. The interval quantization mechanism can be used to combine events, which helps to speed up sending events to the OAP. Using multi-threaded consumption queue events can make fuller use of network IO.  As shown below, after the proxy receives the message, the proxy would wrap the message as an event and push it to the queue. The message sender would take batch events from the queue and send them to the upstream OAP.\nRouting Routing algorithms are used to route messages to a single upstream server node.\nThe Round-Robin algorithm selects nodes in order from the list of upstream service nodes. The advantage of this algorithm is that the number of times each node is selected is average. When the size of the data is close to the same, each upstream node can handle the same quantity of data content.\nWith the Weight Round-Robin, each upstream server node has a corresponding routing weight ratio. The difference from Round-Robin is that each upstream node has more chances to be routed according to its weight. This algorithm is more suitable to use when the upstream server node machine configuration is not the same.\nThe Fixed algorithm is a hybrid algorithm. It can ensure that the same data is routed to the same upstream server node, and when the upstream server scales out, it still maintains routing to the same node; unless the upstream node does not exist, it will reroute. This algorithm is mainly used in the SkyWalking Meter protocol because this protocol needs to ensure that the metrics of the same service instance are sent to the same OAP node. The Routing steps are as follows:\n Generate a unique identification string based on the data content, as short as possible. The amount of data is controllable. Get the upstream node of identity from LRU Cache, and use it if it exists. According to the identification, generate the corresponding hash value, and find the upstream server node from the upstream list. Save the mapping relationship between the upstream server node and identification to LRU Cache.  The advantage of this algorithm is to bind the data with the upstream server node as much as possible, so the upstream server can better process continuous data. The disadvantage is that it takes up a certain amount of memory space to save the corresponding relationship.\nAs shown below, the image is divided into two parts:\n The left side represents that the same data content always is routed to the same server node. The right side represents the data routing algorithm. Get the number from the data, and use the remainder algorithm to obtain the position.  We choose to use a combination of Round-Robin and Fixed algorithm for routing:\n The Fixed routing algorithm is suitable for specific protocols, mainly used when passing metrics data to the SkyWalking Meter protocol The Round-Robin algorithm is used by default. When the SkyWalking OAP cluster is deployed, the configuration of the nodes needs to be as much the same as possible, so there would be no need to use the Weight Round-Robin algorithm.  How to balance the load balancer itself? Proxy still needs to deal with the load balancing problem from client to itself, especially when deploying a Proxy cluster in a production environment.\nThere are three ways to solve this problem:\n Connection management: Use the max_connection config on the client-side to specify the maximum connection duration of each connection. For more information, please read the proposal. Cluster awareness: The proxy has cluster awareness, and actively disconnects the connection when the load is unbalanced to allow the client to re-pick up the proxy. Resource limit+HPA: Restrict the connection resource situation of each proxy, and no longer accept new connections when the resource limit is reached. And use the HPA mechanism of Kubernetes to dynamically scale out the number of the proxy.      Connection management Cluster awareness Resource Limit+HPA     Pros Simple to use Ensure that the number of connections in each proxy is relatively  Simple to use   Cons Each client needs to ensure that data is not lostThe client is required to accept GOWAY responses May cause a sudden increase in traffic on some nodesEach client needs to ensure that data is not lost  Traffic will not be particularly balanced in each instance    We choose Limit+HPA for these reasons:\n Easy to config and use the proxy and easy to understand based on basic data metrics. No data loss due to broken connection. There is no need for the client to implement any other protocols to prevent data loss, especially when the client is a commercial product. The connection of each node in the proxy cluster does not need to be particularly balanced, as long as the proxy node itself is high-performance.  SkyWalking-Satellite We have implemented this Proxy in the SkyWalking-Satellite project. It’s used between Client and SkyWalking OAP, effectively solving the load balancing problem.\nAfter the system is deployed, the Satellite would accept the traffic from the Client, and the Satellite will perceive all the nodes of the OAP through Kubernetes Label Selector or manual configuration, and load balance the traffic to the upstream OAP node.\nAs shown below, a single client still maintains a connection with a single Satellite, Satellite would establish the connection with each OAP, and load balance message to the OAP node.\nWhen scaling Satellite, we need to deploy the SWCK adapter and configure the HPA in Kubernetes. SWCK is a platform for the SkyWalking users, provisions, upgrades, maintains SkyWalking relevant components, and makes them work natively on Kubernetes.\nAfter deployment is finished, the following steps would be performed:\n Read metrics from OAP: HPA requests the SWCK metrics adapter to dynamically read the metrics in the OAP. Scaling the Satellite: Kubernetes HPA senses that the metrics values are in line with expectations, so the Satellite would be scaling automatically.  As shown below, use the dotted line to divide the two parts. HPA uses SWCK Adapter to read the metrics in the OAP. When the threshold is met, HPA would scale the Satellite deployment.\nExample In this section, we will demonstrate two cases:\n SkyWalking Scaling: After SkyWalking OAP scaling, the traffic would auto load balancing through Satellite. Satellite Scaling: Satellite’s own traffic load balancing.  NOTE: All commands could be accessed through GitHub.\nSkyWalking Scaling We will use the bookinfo application to demonstrate how to integrate Apache SkyWalking 8.9.1 with Apache SkyWalking-Satellite 0.5.0, and observe the service mesh through the Envoy ALS protocol.\nBefore starting, please make sure that you already have a Kubernetes environment.\nInstall Istio Istio provides a very convenient way to configure the Envoy proxy and enable the access log service. The following step:\n Install the istioctl locally to help manage the Istio mesh. Install Istio into the Kubernetes environment with a demo configuration profile, and enable the Envoy ALS. Transmit the ALS message to the satellite. The satellite we will deploy later. Add the label into the default namespace so Istio could automatically inject Envoy sidecar proxies when you deploy your application later.  # install istioctl export ISTIO_VERSION=1.12.0 curl -L https://istio.io/downloadIstio | sh - sudo mv $PWD/istio-$ISTIO_VERSION/bin/istioctl /usr/local/bin/ # install istio istioctl install -y --set profile=demo \\ \t--set meshConfig.enableEnvoyAccessLogService=true \\ \t--set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-system-satellite.skywalking-system:11800 # enbale envoy proxy in default namespace kubectl label namespace default istio-injection=enabled Install SWCK SWCK provides convenience for users to deploy and upgrade SkyWalking related components based on Kubernetes. The automatic scale function of Satellite also mainly relies on SWCK. For more information, you could refer to the official documentation.\n# Install cert-manager kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.3.1/cert-manager.yaml # Deploy SWCK mkdir -p skywalking-swck \u0026amp;\u0026amp; cd skywalking-swck wget https://dlcdn.apache.org/skywalking/swck/0.6.1/skywalking-swck-0.6.1-bin.tgz tar -zxvf skywalking-swck-0.6.1-bin.tgz cd config kubectl apply -f operator-bundle.yaml Deploy Apache SkyWalking And Apache SkyWalking-Satellite We have provided a simple script to deploy the skywalking OAP, UI, and Satellite.\n# Create the skywalking components namespace kubectl create namespace skywalking-system kubectl label namespace skywalking-system swck-injection=enabled # Deploy components kubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/sw-components.yaml Deploy Bookinfo Application export ISTIO_VERSION=1.12.0 kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/platform/kube/bookinfo.yaml kubectl wait --for=condition=Ready pods --all --timeout=1200s kubectl port-forward service/productpage 9080 Next, please open your browser and visit http://localhost:9080. You should be able to see the Bookinfo application. Refresh the webpage several times to generate enough access logs.\nThen, you can see the topology and metrics of the Bookinfo application on SkyWalking WebUI. At this time, you can see that the Satellite is working!\nDeploy Monitor We need to install OpenTelemetry Collector to collect metrics in OAPs and analyze them.\n# Add OTEL collector kubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/otel-collector-oap.yaml kubectl port-forward -n skywalking-system service/skywalking-system-ui 8080:80 Next, please open your browser and visit http://localhost:8080/ and create a new item on the dashboard. The SkyWalking Web UI pictured below shows how the data content is applied.\nScaling OAP Scaling the number of OAPs by deployment.\nkubectl scale --replicas=3 -n skywalking-system deployment/skywalking-system-oap Done! After a period of time, you will see that the number of OAPs becomes 3, and the ALS traffic is balanced to each OAP.\nSatellite Scaling After we have completed the SkyWalking Scaling, we would carry out the Satellite Scaling demo.\nDeploy SWCK HPA SWCK provides an adapter to implement the Kubernetes external metrics to adapt the HPA through reading the metrics in SkyWalking OAP. We expose the metrics service in Satellite to OAP and configure HPA Resource to auto-scaling the Satellite.\nInstall the SWCK adapter into the Kubernetes environment:\nkubectl apply -f skywalking-swck/config/adapter-bundle.yaml Create the HPA resource, and limit each Satellite to handle a maximum of 10 connections:\nkubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/satellite-hpa.yaml Then, you could see we have 9 connections in one satellite. One envoy proxy may establish multiple connections to the satellite.\n$ kubectl get HorizontalPodAutoscaler -n skywalking-system NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE hpa-demo Deployment/skywalking-system-satellite 9/10 1 3 1 5m18s Scaling Application The scaling application could establish more connections to the satellite, to verify whether the HPA is in effect.\nkubectl scale --replicas=3 deployment/productpage-v1 deployment/details-v1 Done! By default, Satellite will deploy a single instance and a single instance will only accept 11 connections. HPA resources limit one Satellite to handle 10 connections and use a stabilization window to make Satellite stable scaling up. In this case, we deploy the Bookinfo application in 10+ instances after scaling, which means that 10+ connections will be established to the Satellite.\nSo after HPA resources are running, the Satellite would be automatically scaled up to 2 instances. You can learn about the calculation algorithm of replicas through the official documentation. Run the following command to view the running status:\n$ kubectl get HorizontalPodAutoscaler -n skywalking-system --watch NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 1 3m31s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 1 4m20s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 2 4m38s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 2 5m8s hpa-demo Deployment/skywalking-system-satellite 6/10 1 3 2 5m23s By observing the “number of connections” metric, we would be able to see that when the number of connections of each gRPC exceeds 10 connections, then the satellite automatically scales through the HPA rule. As a result, the connection number is down to normal status (in this example, less than 10)\nswctl metrics linear --name satellite_service_grpc_connect_count --service-name satellite::satellite-service ","title":"Scaling with Apache SkyWalking","url":"/docs/main/next/en/academy/scaling-with-apache-skywalking/"},{"content":"Scaling with Apache SkyWalking Background In the Apache SkyWalking ecosystem, the OAP obtains metrics, traces, logs, and event data through SkyWalking Agent, Envoy, or other data sources. Under the gRPC protocol, it transmits data by communicating with a single server node. Only when the connection is broken, the reconnecting policy would be used based on DNS round-robin mode. When new services are added at runtime or the OAP load is kept high due to increased traffic of observed services, the OAP cluster needs to scale out for increased traffic. The load of the new OAP node would be less due to all existing agents having connected to previous nodes. Even without scaling, the load of OAP nodes would be unbalanced, because the agent would keep the connection due to random policy at the booting stage. In these cases, it would become a challenge to keep up the health status of all nodes, and be able to scale out when needed.\nIn this article, we mainly discuss how to solve this challenge in SkyWalking.\nHow to Load Balance SkyWalking mainly uses the gRPC protocol for data transmission, so this article mainly introduces load balancing in the gRPC protocol.\nProxy Or Client-side Based on the gRPC official Load Balancing blog, there are two approaches to load balancing:\n Client-side: The client perceives multiple back-end services and uses a load-balancing algorithm to select a back-end service for each RPC. Proxy: The client sends the message to the proxy server, and the proxy server load balances the message to the back-end service.  From the perspective of observability system architecture:\n    Pros Cons     Client-side High performance because of the elimination of extra hop Complex client (cluster awareness, load balancing, health check, etc.)Ensure each data source to be connected provides complex client capabilities   Proxy Simple Client Higher latency    We choose Proxy mode for the following reasons:\n Observable data is not very time-sensitive, a little latency caused by transmission is acceptable. A little extra hop is acceptable and there is no impact on the client-side. As an observability platform, we cannot/should not ask clients to change. They make their own tech decisions and may have their own commercial considerations.  Transmission Policy In the proxy mode, we should determine the transmission path between downstream and upstream.\nDifferent data protocols require different processing policies. There are two transmission policies:\n Synchronous: Suitable for protocols that require data exchange in the client, such as SkyWalking Dynamic Configuration Service. This type of protocol provides real-time results. Asynchronous batch: Used when the client doesn’t care about the upstream processing results, but only the transmitted data (e.g., trace report, log report, etc.)  The synchronization policy requires that the proxy send the message to the upstream server when receiving the client message, and synchronously return the response data to the downstream client. Usually, only a few protocols need to use the synchronization policy.\nAs shown below, after the client sends the request to the Proxy, the proxy would send the message to the server synchronously. When the proxy receives the result, it returns to the client.\nThe asynchronous batch policy means that the data is sent to the upstream server in batches asynchronously. This policy is more common because most protocols in SkyWalking are primarily based on data reporting. We think using the queue as a buffer could have a good effect. The asynchronous batch policy is executed according to the following steps:\n The proxy receives the data and wraps it as an Event object. An event is added into the queue. When the cycle time is reached or when the queue elements reach the fixed number, the elements in the queue will parallel consume and send to the OAP.  The advantage of using queues is:\n Separate data receiving and sending to reduce the mutual influence. The interval quantization mechanism can be used to combine events, which helps to speed up sending events to the OAP. Using multi-threaded consumption queue events can make fuller use of network IO.  As shown below, after the proxy receives the message, the proxy would wrap the message as an event and push it to the queue. The message sender would take batch events from the queue and send them to the upstream OAP.\nRouting Routing algorithms are used to route messages to a single upstream server node.\nThe Round-Robin algorithm selects nodes in order from the list of upstream service nodes. The advantage of this algorithm is that the number of times each node is selected is average. When the size of the data is close to the same, each upstream node can handle the same quantity of data content.\nWith the Weight Round-Robin, each upstream server node has a corresponding routing weight ratio. The difference from Round-Robin is that each upstream node has more chances to be routed according to its weight. This algorithm is more suitable to use when the upstream server node machine configuration is not the same.\nThe Fixed algorithm is a hybrid algorithm. It can ensure that the same data is routed to the same upstream server node, and when the upstream server scales out, it still maintains routing to the same node; unless the upstream node does not exist, it will reroute. This algorithm is mainly used in the SkyWalking Meter protocol because this protocol needs to ensure that the metrics of the same service instance are sent to the same OAP node. The Routing steps are as follows:\n Generate a unique identification string based on the data content, as short as possible. The amount of data is controllable. Get the upstream node of identity from LRU Cache, and use it if it exists. According to the identification, generate the corresponding hash value, and find the upstream server node from the upstream list. Save the mapping relationship between the upstream server node and identification to LRU Cache.  The advantage of this algorithm is to bind the data with the upstream server node as much as possible, so the upstream server can better process continuous data. The disadvantage is that it takes up a certain amount of memory space to save the corresponding relationship.\nAs shown below, the image is divided into two parts:\n The left side represents that the same data content always is routed to the same server node. The right side represents the data routing algorithm. Get the number from the data, and use the remainder algorithm to obtain the position.  We choose to use a combination of Round-Robin and Fixed algorithm for routing:\n The Fixed routing algorithm is suitable for specific protocols, mainly used when passing metrics data to the SkyWalking Meter protocol The Round-Robin algorithm is used by default. When the SkyWalking OAP cluster is deployed, the configuration of the nodes needs to be as much the same as possible, so there would be no need to use the Weight Round-Robin algorithm.  How to balance the load balancer itself? Proxy still needs to deal with the load balancing problem from client to itself, especially when deploying a Proxy cluster in a production environment.\nThere are three ways to solve this problem:\n Connection management: Use the max_connection config on the client-side to specify the maximum connection duration of each connection. For more information, please read the proposal. Cluster awareness: The proxy has cluster awareness, and actively disconnects the connection when the load is unbalanced to allow the client to re-pick up the proxy. Resource limit+HPA: Restrict the connection resource situation of each proxy, and no longer accept new connections when the resource limit is reached. And use the HPA mechanism of Kubernetes to dynamically scale out the number of the proxy.      Connection management Cluster awareness Resource Limit+HPA     Pros Simple to use Ensure that the number of connections in each proxy is relatively  Simple to use   Cons Each client needs to ensure that data is not lostThe client is required to accept GOWAY responses May cause a sudden increase in traffic on some nodesEach client needs to ensure that data is not lost  Traffic will not be particularly balanced in each instance    We choose Limit+HPA for these reasons:\n Easy to config and use the proxy and easy to understand based on basic data metrics. No data loss due to broken connection. There is no need for the client to implement any other protocols to prevent data loss, especially when the client is a commercial product. The connection of each node in the proxy cluster does not need to be particularly balanced, as long as the proxy node itself is high-performance.  SkyWalking-Satellite We have implemented this Proxy in the SkyWalking-Satellite project. It’s used between Client and SkyWalking OAP, effectively solving the load balancing problem.\nAfter the system is deployed, the Satellite would accept the traffic from the Client, and the Satellite will perceive all the nodes of the OAP through Kubernetes Label Selector or manual configuration, and load balance the traffic to the upstream OAP node.\nAs shown below, a single client still maintains a connection with a single Satellite, Satellite would establish the connection with each OAP, and load balance message to the OAP node.\nWhen scaling Satellite, we need to deploy the SWCK adapter and configure the HPA in Kubernetes. SWCK is a platform for the SkyWalking users, provisions, upgrades, maintains SkyWalking relevant components, and makes them work natively on Kubernetes.\nAfter deployment is finished, the following steps would be performed:\n Read metrics from OAP: HPA requests the SWCK metrics adapter to dynamically read the metrics in the OAP. Scaling the Satellite: Kubernetes HPA senses that the metrics values are in line with expectations, so the Satellite would be scaling automatically.  As shown below, use the dotted line to divide the two parts. HPA uses SWCK Adapter to read the metrics in the OAP. When the threshold is met, HPA would scale the Satellite deployment.\nExample In this section, we will demonstrate two cases:\n SkyWalking Scaling: After SkyWalking OAP scaling, the traffic would auto load balancing through Satellite. Satellite Scaling: Satellite’s own traffic load balancing.  NOTE: All commands could be accessed through GitHub.\nSkyWalking Scaling We will use the bookinfo application to demonstrate how to integrate Apache SkyWalking 8.9.1 with Apache SkyWalking-Satellite 0.5.0, and observe the service mesh through the Envoy ALS protocol.\nBefore starting, please make sure that you already have a Kubernetes environment.\nInstall Istio Istio provides a very convenient way to configure the Envoy proxy and enable the access log service. The following step:\n Install the istioctl locally to help manage the Istio mesh. Install Istio into the Kubernetes environment with a demo configuration profile, and enable the Envoy ALS. Transmit the ALS message to the satellite. The satellite we will deploy later. Add the label into the default namespace so Istio could automatically inject Envoy sidecar proxies when you deploy your application later.  # install istioctl export ISTIO_VERSION=1.12.0 curl -L https://istio.io/downloadIstio | sh - sudo mv $PWD/istio-$ISTIO_VERSION/bin/istioctl /usr/local/bin/ # install istio istioctl install -y --set profile=demo \\ \t--set meshConfig.enableEnvoyAccessLogService=true \\ \t--set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-system-satellite.skywalking-system:11800 # enbale envoy proxy in default namespace kubectl label namespace default istio-injection=enabled Install SWCK SWCK provides convenience for users to deploy and upgrade SkyWalking related components based on Kubernetes. The automatic scale function of Satellite also mainly relies on SWCK. For more information, you could refer to the official documentation.\n# Install cert-manager kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.3.1/cert-manager.yaml # Deploy SWCK mkdir -p skywalking-swck \u0026amp;\u0026amp; cd skywalking-swck wget https://dlcdn.apache.org/skywalking/swck/0.6.1/skywalking-swck-0.6.1-bin.tgz tar -zxvf skywalking-swck-0.6.1-bin.tgz cd config kubectl apply -f operator-bundle.yaml Deploy Apache SkyWalking And Apache SkyWalking-Satellite We have provided a simple script to deploy the skywalking OAP, UI, and Satellite.\n# Create the skywalking components namespace kubectl create namespace skywalking-system kubectl label namespace skywalking-system swck-injection=enabled # Deploy components kubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/sw-components.yaml Deploy Bookinfo Application export ISTIO_VERSION=1.12.0 kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/platform/kube/bookinfo.yaml kubectl wait --for=condition=Ready pods --all --timeout=1200s kubectl port-forward service/productpage 9080 Next, please open your browser and visit http://localhost:9080. You should be able to see the Bookinfo application. Refresh the webpage several times to generate enough access logs.\nThen, you can see the topology and metrics of the Bookinfo application on SkyWalking WebUI. At this time, you can see that the Satellite is working!\nDeploy Monitor We need to install OpenTelemetry Collector to collect metrics in OAPs and analyze them.\n# Add OTEL collector kubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/otel-collector-oap.yaml kubectl port-forward -n skywalking-system service/skywalking-system-ui 8080:80 Next, please open your browser and visit http://localhost:8080/ and create a new item on the dashboard. The SkyWalking Web UI pictured below shows how the data content is applied.\nScaling OAP Scaling the number of OAPs by deployment.\nkubectl scale --replicas=3 -n skywalking-system deployment/skywalking-system-oap Done! After a period of time, you will see that the number of OAPs becomes 3, and the ALS traffic is balanced to each OAP.\nSatellite Scaling After we have completed the SkyWalking Scaling, we would carry out the Satellite Scaling demo.\nDeploy SWCK HPA SWCK provides an adapter to implement the Kubernetes external metrics to adapt the HPA through reading the metrics in SkyWalking OAP. We expose the metrics service in Satellite to OAP and configure HPA Resource to auto-scaling the Satellite.\nInstall the SWCK adapter into the Kubernetes environment:\nkubectl apply -f skywalking-swck/config/adapter-bundle.yaml Create the HPA resource, and limit each Satellite to handle a maximum of 10 connections:\nkubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/satellite-hpa.yaml Then, you could see we have 9 connections in one satellite. One envoy proxy may establish multiple connections to the satellite.\n$ kubectl get HorizontalPodAutoscaler -n skywalking-system NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE hpa-demo Deployment/skywalking-system-satellite 9/10 1 3 1 5m18s Scaling Application The scaling application could establish more connections to the satellite, to verify whether the HPA is in effect.\nkubectl scale --replicas=3 deployment/productpage-v1 deployment/details-v1 Done! By default, Satellite will deploy a single instance and a single instance will only accept 11 connections. HPA resources limit one Satellite to handle 10 connections and use a stabilization window to make Satellite stable scaling up. In this case, we deploy the Bookinfo application in 10+ instances after scaling, which means that 10+ connections will be established to the Satellite.\nSo after HPA resources are running, the Satellite would be automatically scaled up to 2 instances. You can learn about the calculation algorithm of replicas through the official documentation. Run the following command to view the running status:\n$ kubectl get HorizontalPodAutoscaler -n skywalking-system --watch NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 1 3m31s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 1 4m20s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 2 4m38s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 2 5m8s hpa-demo Deployment/skywalking-system-satellite 6/10 1 3 2 5m23s By observing the “number of connections” metric, we would be able to see that when the number of connections of each gRPC exceeds 10 connections, then the satellite automatically scales through the HPA rule. As a result, the connection number is down to normal status (in this example, less than 10)\nswctl metrics linear --name satellite_service_grpc_connect_count --service-name satellite::satellite-service ","title":"Scaling with Apache SkyWalking","url":"/docs/main/v9.3.0/en/academy/scaling-with-apache-skywalking/"},{"content":"Scaling with Apache SkyWalking Background In the Apache SkyWalking ecosystem, the OAP obtains metrics, traces, logs, and event data through SkyWalking Agent, Envoy, or other data sources. Under the gRPC protocol, it transmits data by communicating with a single server node. Only when the connection is broken, the reconnecting policy would be used based on DNS round-robin mode. When new services are added at runtime or the OAP load is kept high due to increased traffic of observed services, the OAP cluster needs to scale out for increased traffic. The load of the new OAP node would be less due to all existing agents having connected to previous nodes. Even without scaling, the load of OAP nodes would be unbalanced, because the agent would keep the connection due to random policy at the booting stage. In these cases, it would become a challenge to keep up the health status of all nodes, and be able to scale out when needed.\nIn this article, we mainly discuss how to solve this challenge in SkyWalking.\nHow to Load Balance SkyWalking mainly uses the gRPC protocol for data transmission, so this article mainly introduces load balancing in the gRPC protocol.\nProxy Or Client-side Based on the gRPC official Load Balancing blog, there are two approaches to load balancing:\n Client-side: The client perceives multiple back-end services and uses a load-balancing algorithm to select a back-end service for each RPC. Proxy: The client sends the message to the proxy server, and the proxy server load balances the message to the back-end service.  From the perspective of observability system architecture:\n    Pros Cons     Client-side High performance because of the elimination of extra hop Complex client (cluster awareness, load balancing, health check, etc.)Ensure each data source to be connected provides complex client capabilities   Proxy Simple Client Higher latency    We choose Proxy mode for the following reasons:\n Observable data is not very time-sensitive, a little latency caused by transmission is acceptable. A little extra hop is acceptable and there is no impact on the client-side. As an observability platform, we cannot/should not ask clients to change. They make their own tech decisions and may have their own commercial considerations.  Transmission Policy In the proxy mode, we should determine the transmission path between downstream and upstream.\nDifferent data protocols require different processing policies. There are two transmission policies:\n Synchronous: Suitable for protocols that require data exchange in the client, such as SkyWalking Dynamic Configuration Service. This type of protocol provides real-time results. Asynchronous batch: Used when the client doesn’t care about the upstream processing results, but only the transmitted data (e.g., trace report, log report, etc.)  The synchronization policy requires that the proxy send the message to the upstream server when receiving the client message, and synchronously return the response data to the downstream client. Usually, only a few protocols need to use the synchronization policy.\nAs shown below, after the client sends the request to the Proxy, the proxy would send the message to the server synchronously. When the proxy receives the result, it returns to the client.\nThe asynchronous batch policy means that the data is sent to the upstream server in batches asynchronously. This policy is more common because most protocols in SkyWalking are primarily based on data reporting. We think using the queue as a buffer could have a good effect. The asynchronous batch policy is executed according to the following steps:\n The proxy receives the data and wraps it as an Event object. An event is added into the queue. When the cycle time is reached or when the queue elements reach the fixed number, the elements in the queue will parallel consume and send to the OAP.  The advantage of using queues is:\n Separate data receiving and sending to reduce the mutual influence. The interval quantization mechanism can be used to combine events, which helps to speed up sending events to the OAP. Using multi-threaded consumption queue events can make fuller use of network IO.  As shown below, after the proxy receives the message, the proxy would wrap the message as an event and push it to the queue. The message sender would take batch events from the queue and send them to the upstream OAP.\nRouting Routing algorithms are used to route messages to a single upstream server node.\nThe Round-Robin algorithm selects nodes in order from the list of upstream service nodes. The advantage of this algorithm is that the number of times each node is selected is average. When the size of the data is close to the same, each upstream node can handle the same quantity of data content.\nWith the Weight Round-Robin, each upstream server node has a corresponding routing weight ratio. The difference from Round-Robin is that each upstream node has more chances to be routed according to its weight. This algorithm is more suitable to use when the upstream server node machine configuration is not the same.\nThe Fixed algorithm is a hybrid algorithm. It can ensure that the same data is routed to the same upstream server node, and when the upstream server scales out, it still maintains routing to the same node; unless the upstream node does not exist, it will reroute. This algorithm is mainly used in the SkyWalking Meter protocol because this protocol needs to ensure that the metrics of the same service instance are sent to the same OAP node. The Routing steps are as follows:\n Generate a unique identification string based on the data content, as short as possible. The amount of data is controllable. Get the upstream node of identity from LRU Cache, and use it if it exists. According to the identification, generate the corresponding hash value, and find the upstream server node from the upstream list. Save the mapping relationship between the upstream server node and identification to LRU Cache.  The advantage of this algorithm is to bind the data with the upstream server node as much as possible, so the upstream server can better process continuous data. The disadvantage is that it takes up a certain amount of memory space to save the corresponding relationship.\nAs shown below, the image is divided into two parts:\n The left side represents that the same data content always is routed to the same server node. The right side represents the data routing algorithm. Get the number from the data, and use the remainder algorithm to obtain the position.  We choose to use a combination of Round-Robin and Fixed algorithm for routing:\n The Fixed routing algorithm is suitable for specific protocols, mainly used when passing metrics data to the SkyWalking Meter protocol The Round-Robin algorithm is used by default. When the SkyWalking OAP cluster is deployed, the configuration of the nodes needs to be as much the same as possible, so there would be no need to use the Weight Round-Robin algorithm.  How to balance the load balancer itself? Proxy still needs to deal with the load balancing problem from client to itself, especially when deploying a Proxy cluster in a production environment.\nThere are three ways to solve this problem:\n Connection management: Use the max_connection config on the client-side to specify the maximum connection duration of each connection. For more information, please read the proposal. Cluster awareness: The proxy has cluster awareness, and actively disconnects the connection when the load is unbalanced to allow the client to re-pick up the proxy. Resource limit+HPA: Restrict the connection resource situation of each proxy, and no longer accept new connections when the resource limit is reached. And use the HPA mechanism of Kubernetes to dynamically scale out the number of the proxy.      Connection management Cluster awareness Resource Limit+HPA     Pros Simple to use Ensure that the number of connections in each proxy is relatively  Simple to use   Cons Each client needs to ensure that data is not lostThe client is required to accept GOWAY responses May cause a sudden increase in traffic on some nodesEach client needs to ensure that data is not lost  Traffic will not be particularly balanced in each instance    We choose Limit+HPA for these reasons:\n Easy to config and use the proxy and easy to understand based on basic data metrics. No data loss due to broken connection. There is no need for the client to implement any other protocols to prevent data loss, especially when the client is a commercial product. The connection of each node in the proxy cluster does not need to be particularly balanced, as long as the proxy node itself is high-performance.  SkyWalking-Satellite We have implemented this Proxy in the SkyWalking-Satellite project. It’s used between Client and SkyWalking OAP, effectively solving the load balancing problem.\nAfter the system is deployed, the Satellite would accept the traffic from the Client, and the Satellite will perceive all the nodes of the OAP through Kubernetes Label Selector or manual configuration, and load balance the traffic to the upstream OAP node.\nAs shown below, a single client still maintains a connection with a single Satellite, Satellite would establish the connection with each OAP, and load balance message to the OAP node.\nWhen scaling Satellite, we need to deploy the SWCK adapter and configure the HPA in Kubernetes. SWCK is a platform for the SkyWalking users, provisions, upgrades, maintains SkyWalking relevant components, and makes them work natively on Kubernetes.\nAfter deployment is finished, the following steps would be performed:\n Read metrics from OAP: HPA requests the SWCK metrics adapter to dynamically read the metrics in the OAP. Scaling the Satellite: Kubernetes HPA senses that the metrics values are in line with expectations, so the Satellite would be scaling automatically.  As shown below, use the dotted line to divide the two parts. HPA uses SWCK Adapter to read the metrics in the OAP. When the threshold is met, HPA would scale the Satellite deployment.\nExample In this section, we will demonstrate two cases:\n SkyWalking Scaling: After SkyWalking OAP scaling, the traffic would auto load balancing through Satellite. Satellite Scaling: Satellite’s own traffic load balancing.  NOTE: All commands could be accessed through GitHub.\nSkyWalking Scaling We will use the bookinfo application to demonstrate how to integrate Apache SkyWalking 8.9.1 with Apache SkyWalking-Satellite 0.5.0, and observe the service mesh through the Envoy ALS protocol.\nBefore starting, please make sure that you already have a Kubernetes environment.\nInstall Istio Istio provides a very convenient way to configure the Envoy proxy and enable the access log service. The following step:\n Install the istioctl locally to help manage the Istio mesh. Install Istio into the Kubernetes environment with a demo configuration profile, and enable the Envoy ALS. Transmit the ALS message to the satellite. The satellite we will deploy later. Add the label into the default namespace so Istio could automatically inject Envoy sidecar proxies when you deploy your application later.  # install istioctl export ISTIO_VERSION=1.12.0 curl -L https://istio.io/downloadIstio | sh - sudo mv $PWD/istio-$ISTIO_VERSION/bin/istioctl /usr/local/bin/ # install istio istioctl install -y --set profile=demo \\ \t--set meshConfig.enableEnvoyAccessLogService=true \\ \t--set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-system-satellite.skywalking-system:11800 # enbale envoy proxy in default namespace kubectl label namespace default istio-injection=enabled Install SWCK SWCK provides convenience for users to deploy and upgrade SkyWalking related components based on Kubernetes. The automatic scale function of Satellite also mainly relies on SWCK. For more information, you could refer to the official documentation.\n# Install cert-manager kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.3.1/cert-manager.yaml # Deploy SWCK mkdir -p skywalking-swck \u0026amp;\u0026amp; cd skywalking-swck wget https://dlcdn.apache.org/skywalking/swck/0.6.1/skywalking-swck-0.6.1-bin.tgz tar -zxvf skywalking-swck-0.6.1-bin.tgz cd config kubectl apply -f operator-bundle.yaml Deploy Apache SkyWalking And Apache SkyWalking-Satellite We have provided a simple script to deploy the skywalking OAP, UI, and Satellite.\n# Create the skywalking components namespace kubectl create namespace skywalking-system kubectl label namespace skywalking-system swck-injection=enabled # Deploy components kubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/sw-components.yaml Deploy Bookinfo Application export ISTIO_VERSION=1.12.0 kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/platform/kube/bookinfo.yaml kubectl wait --for=condition=Ready pods --all --timeout=1200s kubectl port-forward service/productpage 9080 Next, please open your browser and visit http://localhost:9080. You should be able to see the Bookinfo application. Refresh the webpage several times to generate enough access logs.\nThen, you can see the topology and metrics of the Bookinfo application on SkyWalking WebUI. At this time, you can see that the Satellite is working!\nDeploy Monitor We need to install OpenTelemetry Collector to collect metrics in OAPs and analyze them.\n# Add OTEL collector kubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/otel-collector-oap.yaml kubectl port-forward -n skywalking-system service/skywalking-system-ui 8080:80 Next, please open your browser and visit http://localhost:8080/ and create a new item on the dashboard. The SkyWalking Web UI pictured below shows how the data content is applied.\nScaling OAP Scaling the number of OAPs by deployment.\nkubectl scale --replicas=3 -n skywalking-system deployment/skywalking-system-oap Done! After a period of time, you will see that the number of OAPs becomes 3, and the ALS traffic is balanced to each OAP.\nSatellite Scaling After we have completed the SkyWalking Scaling, we would carry out the Satellite Scaling demo.\nDeploy SWCK HPA SWCK provides an adapter to implement the Kubernetes external metrics to adapt the HPA through reading the metrics in SkyWalking OAP. We expose the metrics service in Satellite to OAP and configure HPA Resource to auto-scaling the Satellite.\nInstall the SWCK adapter into the Kubernetes environment:\nkubectl apply -f skywalking-swck/config/adapter-bundle.yaml Create the HPA resource, and limit each Satellite to handle a maximum of 10 connections:\nkubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/satellite-hpa.yaml Then, you could see we have 9 connections in one satellite. One envoy proxy may establish multiple connections to the satellite.\n$ kubectl get HorizontalPodAutoscaler -n skywalking-system NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE hpa-demo Deployment/skywalking-system-satellite 9/10 1 3 1 5m18s Scaling Application The scaling application could establish more connections to the satellite, to verify whether the HPA is in effect.\nkubectl scale --replicas=3 deployment/productpage-v1 deployment/details-v1 Done! By default, Satellite will deploy a single instance and a single instance will only accept 11 connections. HPA resources limit one Satellite to handle 10 connections and use a stabilization window to make Satellite stable scaling up. In this case, we deploy the Bookinfo application in 10+ instances after scaling, which means that 10+ connections will be established to the Satellite.\nSo after HPA resources are running, the Satellite would be automatically scaled up to 2 instances. You can learn about the calculation algorithm of replicas through the official documentation. Run the following command to view the running status:\n$ kubectl get HorizontalPodAutoscaler -n skywalking-system --watch NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 1 3m31s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 1 4m20s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 2 4m38s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 2 5m8s hpa-demo Deployment/skywalking-system-satellite 6/10 1 3 2 5m23s By observing the “number of connections” metric, we would be able to see that when the number of connections of each gRPC exceeds 10 connections, then the satellite automatically scales through the HPA rule. As a result, the connection number is down to normal status (in this example, less than 10)\nswctl metrics linear --name satellite_service_grpc_connect_count --service-name satellite::satellite-service ","title":"Scaling with Apache SkyWalking","url":"/docs/main/v9.4.0/en/academy/scaling-with-apache-skywalking/"},{"content":"Scaling with Apache SkyWalking Background In the Apache SkyWalking ecosystem, the OAP obtains metrics, traces, logs, and event data through SkyWalking Agent, Envoy, or other data sources. Under the gRPC protocol, it transmits data by communicating with a single server node. Only when the connection is broken, the reconnecting policy would be used based on DNS round-robin mode. When new services are added at runtime or the OAP load is kept high due to increased traffic of observed services, the OAP cluster needs to scale out for increased traffic. The load of the new OAP node would be less due to all existing agents having connected to previous nodes. Even without scaling, the load of OAP nodes would be unbalanced, because the agent would keep the connection due to random policy at the booting stage. In these cases, it would become a challenge to keep up the health status of all nodes, and be able to scale out when needed.\nIn this article, we mainly discuss how to solve this challenge in SkyWalking.\nHow to Load Balance SkyWalking mainly uses the gRPC protocol for data transmission, so this article mainly introduces load balancing in the gRPC protocol.\nProxy Or Client-side Based on the gRPC official Load Balancing blog, there are two approaches to load balancing:\n Client-side: The client perceives multiple back-end services and uses a load-balancing algorithm to select a back-end service for each RPC. Proxy: The client sends the message to the proxy server, and the proxy server load balances the message to the back-end service.  From the perspective of observability system architecture:\n    Pros Cons     Client-side High performance because of the elimination of extra hop Complex client (cluster awareness, load balancing, health check, etc.)Ensure each data source to be connected provides complex client capabilities   Proxy Simple Client Higher latency    We choose Proxy mode for the following reasons:\n Observable data is not very time-sensitive, a little latency caused by transmission is acceptable. A little extra hop is acceptable and there is no impact on the client-side. As an observability platform, we cannot/should not ask clients to change. They make their own tech decisions and may have their own commercial considerations.  Transmission Policy In the proxy mode, we should determine the transmission path between downstream and upstream.\nDifferent data protocols require different processing policies. There are two transmission policies:\n Synchronous: Suitable for protocols that require data exchange in the client, such as SkyWalking Dynamic Configuration Service. This type of protocol provides real-time results. Asynchronous batch: Used when the client doesn’t care about the upstream processing results, but only the transmitted data (e.g., trace report, log report, etc.)  The synchronization policy requires that the proxy send the message to the upstream server when receiving the client message, and synchronously return the response data to the downstream client. Usually, only a few protocols need to use the synchronization policy.\nAs shown below, after the client sends the request to the Proxy, the proxy would send the message to the server synchronously. When the proxy receives the result, it returns to the client.\nThe asynchronous batch policy means that the data is sent to the upstream server in batches asynchronously. This policy is more common because most protocols in SkyWalking are primarily based on data reporting. We think using the queue as a buffer could have a good effect. The asynchronous batch policy is executed according to the following steps:\n The proxy receives the data and wraps it as an Event object. An event is added into the queue. When the cycle time is reached or when the queue elements reach the fixed number, the elements in the queue will parallel consume and send to the OAP.  The advantage of using queues is:\n Separate data receiving and sending to reduce the mutual influence. The interval quantization mechanism can be used to combine events, which helps to speed up sending events to the OAP. Using multi-threaded consumption queue events can make fuller use of network IO.  As shown below, after the proxy receives the message, the proxy would wrap the message as an event and push it to the queue. The message sender would take batch events from the queue and send them to the upstream OAP.\nRouting Routing algorithms are used to route messages to a single upstream server node.\nThe Round-Robin algorithm selects nodes in order from the list of upstream service nodes. The advantage of this algorithm is that the number of times each node is selected is average. When the size of the data is close to the same, each upstream node can handle the same quantity of data content.\nWith the Weight Round-Robin, each upstream server node has a corresponding routing weight ratio. The difference from Round-Robin is that each upstream node has more chances to be routed according to its weight. This algorithm is more suitable to use when the upstream server node machine configuration is not the same.\nThe Fixed algorithm is a hybrid algorithm. It can ensure that the same data is routed to the same upstream server node, and when the upstream server scales out, it still maintains routing to the same node; unless the upstream node does not exist, it will reroute. This algorithm is mainly used in the SkyWalking Meter protocol because this protocol needs to ensure that the metrics of the same service instance are sent to the same OAP node. The Routing steps are as follows:\n Generate a unique identification string based on the data content, as short as possible. The amount of data is controllable. Get the upstream node of identity from LRU Cache, and use it if it exists. According to the identification, generate the corresponding hash value, and find the upstream server node from the upstream list. Save the mapping relationship between the upstream server node and identification to LRU Cache.  The advantage of this algorithm is to bind the data with the upstream server node as much as possible, so the upstream server can better process continuous data. The disadvantage is that it takes up a certain amount of memory space to save the corresponding relationship.\nAs shown below, the image is divided into two parts:\n The left side represents that the same data content always is routed to the same server node. The right side represents the data routing algorithm. Get the number from the data, and use the remainder algorithm to obtain the position.  We choose to use a combination of Round-Robin and Fixed algorithm for routing:\n The Fixed routing algorithm is suitable for specific protocols, mainly used when passing metrics data to the SkyWalking Meter protocol The Round-Robin algorithm is used by default. When the SkyWalking OAP cluster is deployed, the configuration of the nodes needs to be as much the same as possible, so there would be no need to use the Weight Round-Robin algorithm.  How to balance the load balancer itself? Proxy still needs to deal with the load balancing problem from client to itself, especially when deploying a Proxy cluster in a production environment.\nThere are three ways to solve this problem:\n Connection management: Use the max_connection config on the client-side to specify the maximum connection duration of each connection. For more information, please read the proposal. Cluster awareness: The proxy has cluster awareness, and actively disconnects the connection when the load is unbalanced to allow the client to re-pick up the proxy. Resource limit+HPA: Restrict the connection resource situation of each proxy, and no longer accept new connections when the resource limit is reached. And use the HPA mechanism of Kubernetes to dynamically scale out the number of the proxy.      Connection management Cluster awareness Resource Limit+HPA     Pros Simple to use Ensure that the number of connections in each proxy is relatively  Simple to use   Cons Each client needs to ensure that data is not lostThe client is required to accept GOWAY responses May cause a sudden increase in traffic on some nodesEach client needs to ensure that data is not lost  Traffic will not be particularly balanced in each instance    We choose Limit+HPA for these reasons:\n Easy to config and use the proxy and easy to understand based on basic data metrics. No data loss due to broken connection. There is no need for the client to implement any other protocols to prevent data loss, especially when the client is a commercial product. The connection of each node in the proxy cluster does not need to be particularly balanced, as long as the proxy node itself is high-performance.  SkyWalking-Satellite We have implemented this Proxy in the SkyWalking-Satellite project. It’s used between Client and SkyWalking OAP, effectively solving the load balancing problem.\nAfter the system is deployed, the Satellite would accept the traffic from the Client, and the Satellite will perceive all the nodes of the OAP through Kubernetes Label Selector or manual configuration, and load balance the traffic to the upstream OAP node.\nAs shown below, a single client still maintains a connection with a single Satellite, Satellite would establish the connection with each OAP, and load balance message to the OAP node.\nWhen scaling Satellite, we need to deploy the SWCK adapter and configure the HPA in Kubernetes. SWCK is a platform for the SkyWalking users, provisions, upgrades, maintains SkyWalking relevant components, and makes them work natively on Kubernetes.\nAfter deployment is finished, the following steps would be performed:\n Read metrics from OAP: HPA requests the SWCK metrics adapter to dynamically read the metrics in the OAP. Scaling the Satellite: Kubernetes HPA senses that the metrics values are in line with expectations, so the Satellite would be scaling automatically.  As shown below, use the dotted line to divide the two parts. HPA uses SWCK Adapter to read the metrics in the OAP. When the threshold is met, HPA would scale the Satellite deployment.\nExample In this section, we will demonstrate two cases:\n SkyWalking Scaling: After SkyWalking OAP scaling, the traffic would auto load balancing through Satellite. Satellite Scaling: Satellite’s own traffic load balancing.  NOTE: All commands could be accessed through GitHub.\nSkyWalking Scaling We will use the bookinfo application to demonstrate how to integrate Apache SkyWalking 8.9.1 with Apache SkyWalking-Satellite 0.5.0, and observe the service mesh through the Envoy ALS protocol.\nBefore starting, please make sure that you already have a Kubernetes environment.\nInstall Istio Istio provides a very convenient way to configure the Envoy proxy and enable the access log service. The following step:\n Install the istioctl locally to help manage the Istio mesh. Install Istio into the Kubernetes environment with a demo configuration profile, and enable the Envoy ALS. Transmit the ALS message to the satellite. The satellite we will deploy later. Add the label into the default namespace so Istio could automatically inject Envoy sidecar proxies when you deploy your application later.  # install istioctl export ISTIO_VERSION=1.12.0 curl -L https://istio.io/downloadIstio | sh - sudo mv $PWD/istio-$ISTIO_VERSION/bin/istioctl /usr/local/bin/ # install istio istioctl install -y --set profile=demo \\ \t--set meshConfig.enableEnvoyAccessLogService=true \\ \t--set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-system-satellite.skywalking-system:11800 # enbale envoy proxy in default namespace kubectl label namespace default istio-injection=enabled Install SWCK SWCK provides convenience for users to deploy and upgrade SkyWalking related components based on Kubernetes. The automatic scale function of Satellite also mainly relies on SWCK. For more information, you could refer to the official documentation.\n# Install cert-manager kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.3.1/cert-manager.yaml # Deploy SWCK mkdir -p skywalking-swck \u0026amp;\u0026amp; cd skywalking-swck wget https://dlcdn.apache.org/skywalking/swck/0.6.1/skywalking-swck-0.6.1-bin.tgz tar -zxvf skywalking-swck-0.6.1-bin.tgz cd config kubectl apply -f operator-bundle.yaml Deploy Apache SkyWalking And Apache SkyWalking-Satellite We have provided a simple script to deploy the skywalking OAP, UI, and Satellite.\n# Create the skywalking components namespace kubectl create namespace skywalking-system kubectl label namespace skywalking-system swck-injection=enabled # Deploy components kubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/sw-components.yaml Deploy Bookinfo Application export ISTIO_VERSION=1.12.0 kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/platform/kube/bookinfo.yaml kubectl wait --for=condition=Ready pods --all --timeout=1200s kubectl port-forward service/productpage 9080 Next, please open your browser and visit http://localhost:9080. You should be able to see the Bookinfo application. Refresh the webpage several times to generate enough access logs.\nThen, you can see the topology and metrics of the Bookinfo application on SkyWalking WebUI. At this time, you can see that the Satellite is working!\nDeploy Monitor We need to install OpenTelemetry Collector to collect metrics in OAPs and analyze them.\n# Add OTEL collector kubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/otel-collector-oap.yaml kubectl port-forward -n skywalking-system service/skywalking-system-ui 8080:80 Next, please open your browser and visit http://localhost:8080/ and create a new item on the dashboard. The SkyWalking Web UI pictured below shows how the data content is applied.\nScaling OAP Scaling the number of OAPs by deployment.\nkubectl scale --replicas=3 -n skywalking-system deployment/skywalking-system-oap Done! After a period of time, you will see that the number of OAPs becomes 3, and the ALS traffic is balanced to each OAP.\nSatellite Scaling After we have completed the SkyWalking Scaling, we would carry out the Satellite Scaling demo.\nDeploy SWCK HPA SWCK provides an adapter to implement the Kubernetes external metrics to adapt the HPA through reading the metrics in SkyWalking OAP. We expose the metrics service in Satellite to OAP and configure HPA Resource to auto-scaling the Satellite.\nInstall the SWCK adapter into the Kubernetes environment:\nkubectl apply -f skywalking-swck/config/adapter-bundle.yaml Create the HPA resource, and limit each Satellite to handle a maximum of 10 connections:\nkubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/satellite-hpa.yaml Then, you could see we have 9 connections in one satellite. One envoy proxy may establish multiple connections to the satellite.\n$ kubectl get HorizontalPodAutoscaler -n skywalking-system NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE hpa-demo Deployment/skywalking-system-satellite 9/10 1 3 1 5m18s Scaling Application The scaling application could establish more connections to the satellite, to verify whether the HPA is in effect.\nkubectl scale --replicas=3 deployment/productpage-v1 deployment/details-v1 Done! By default, Satellite will deploy a single instance and a single instance will only accept 11 connections. HPA resources limit one Satellite to handle 10 connections and use a stabilization window to make Satellite stable scaling up. In this case, we deploy the Bookinfo application in 10+ instances after scaling, which means that 10+ connections will be established to the Satellite.\nSo after HPA resources are running, the Satellite would be automatically scaled up to 2 instances. You can learn about the calculation algorithm of replicas through the official documentation. Run the following command to view the running status:\n$ kubectl get HorizontalPodAutoscaler -n skywalking-system --watch NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 1 3m31s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 1 4m20s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 2 4m38s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 2 5m8s hpa-demo Deployment/skywalking-system-satellite 6/10 1 3 2 5m23s By observing the “number of connections” metric, we would be able to see that when the number of connections of each gRPC exceeds 10 connections, then the satellite automatically scales through the HPA rule. As a result, the connection number is down to normal status (in this example, less than 10)\nswctl metrics linear --name satellite_service_grpc_connect_count --service-name satellite::satellite-service ","title":"Scaling with Apache SkyWalking","url":"/docs/main/v9.5.0/en/academy/scaling-with-apache-skywalking/"},{"content":"Scaling with Apache SkyWalking Background In the Apache SkyWalking ecosystem, the OAP obtains metrics, traces, logs, and event data through SkyWalking Agent, Envoy, or other data sources. Under the gRPC protocol, it transmits data by communicating with a single server node. Only when the connection is broken, the reconnecting policy would be used based on DNS round-robin mode. When new services are added at runtime or the OAP load is kept high due to increased traffic of observed services, the OAP cluster needs to scale out for increased traffic. The load of the new OAP node would be less due to all existing agents having connected to previous nodes. Even without scaling, the load of OAP nodes would be unbalanced, because the agent would keep the connection due to random policy at the booting stage. In these cases, it would become a challenge to keep up the health status of all nodes, and be able to scale out when needed.\nIn this article, we mainly discuss how to solve this challenge in SkyWalking.\nHow to Load Balance SkyWalking mainly uses the gRPC protocol for data transmission, so this article mainly introduces load balancing in the gRPC protocol.\nProxy Or Client-side Based on the gRPC official Load Balancing blog, there are two approaches to load balancing:\n Client-side: The client perceives multiple back-end services and uses a load-balancing algorithm to select a back-end service for each RPC. Proxy: The client sends the message to the proxy server, and the proxy server load balances the message to the back-end service.  From the perspective of observability system architecture:\n    Pros Cons     Client-side High performance because of the elimination of extra hop Complex client (cluster awareness, load balancing, health check, etc.)Ensure each data source to be connected provides complex client capabilities   Proxy Simple Client Higher latency    We choose Proxy mode for the following reasons:\n Observable data is not very time-sensitive, a little latency caused by transmission is acceptable. A little extra hop is acceptable and there is no impact on the client-side. As an observability platform, we cannot/should not ask clients to change. They make their own tech decisions and may have their own commercial considerations.  Transmission Policy In the proxy mode, we should determine the transmission path between downstream and upstream.\nDifferent data protocols require different processing policies. There are two transmission policies:\n Synchronous: Suitable for protocols that require data exchange in the client, such as SkyWalking Dynamic Configuration Service. This type of protocol provides real-time results. Asynchronous batch: Used when the client doesn’t care about the upstream processing results, but only the transmitted data (e.g., trace report, log report, etc.)  The synchronization policy requires that the proxy send the message to the upstream server when receiving the client message, and synchronously return the response data to the downstream client. Usually, only a few protocols need to use the synchronization policy.\nAs shown below, after the client sends the request to the Proxy, the proxy would send the message to the server synchronously. When the proxy receives the result, it returns to the client.\nThe asynchronous batch policy means that the data is sent to the upstream server in batches asynchronously. This policy is more common because most protocols in SkyWalking are primarily based on data reporting. We think using the queue as a buffer could have a good effect. The asynchronous batch policy is executed according to the following steps:\n The proxy receives the data and wraps it as an Event object. An event is added into the queue. When the cycle time is reached or when the queue elements reach the fixed number, the elements in the queue will parallel consume and send to the OAP.  The advantage of using queues is:\n Separate data receiving and sending to reduce the mutual influence. The interval quantization mechanism can be used to combine events, which helps to speed up sending events to the OAP. Using multi-threaded consumption queue events can make fuller use of network IO.  As shown below, after the proxy receives the message, the proxy would wrap the message as an event and push it to the queue. The message sender would take batch events from the queue and send them to the upstream OAP.\nRouting Routing algorithms are used to route messages to a single upstream server node.\nThe Round-Robin algorithm selects nodes in order from the list of upstream service nodes. The advantage of this algorithm is that the number of times each node is selected is average. When the size of the data is close to the same, each upstream node can handle the same quantity of data content.\nWith the Weight Round-Robin, each upstream server node has a corresponding routing weight ratio. The difference from Round-Robin is that each upstream node has more chances to be routed according to its weight. This algorithm is more suitable to use when the upstream server node machine configuration is not the same.\nThe Fixed algorithm is a hybrid algorithm. It can ensure that the same data is routed to the same upstream server node, and when the upstream server scales out, it still maintains routing to the same node; unless the upstream node does not exist, it will reroute. This algorithm is mainly used in the SkyWalking Meter protocol because this protocol needs to ensure that the metrics of the same service instance are sent to the same OAP node. The Routing steps are as follows:\n Generate a unique identification string based on the data content, as short as possible. The amount of data is controllable. Get the upstream node of identity from LRU Cache, and use it if it exists. According to the identification, generate the corresponding hash value, and find the upstream server node from the upstream list. Save the mapping relationship between the upstream server node and identification to LRU Cache.  The advantage of this algorithm is to bind the data with the upstream server node as much as possible, so the upstream server can better process continuous data. The disadvantage is that it takes up a certain amount of memory space to save the corresponding relationship.\nAs shown below, the image is divided into two parts:\n The left side represents that the same data content always is routed to the same server node. The right side represents the data routing algorithm. Get the number from the data, and use the remainder algorithm to obtain the position.  We choose to use a combination of Round-Robin and Fixed algorithm for routing:\n The Fixed routing algorithm is suitable for specific protocols, mainly used when passing metrics data to the SkyWalking Meter protocol The Round-Robin algorithm is used by default. When the SkyWalking OAP cluster is deployed, the configuration of the nodes needs to be as much the same as possible, so there would be no need to use the Weight Round-Robin algorithm.  How to balance the load balancer itself? Proxy still needs to deal with the load balancing problem from client to itself, especially when deploying a Proxy cluster in a production environment.\nThere are three ways to solve this problem:\n Connection management: Use the max_connection config on the client-side to specify the maximum connection duration of each connection. For more information, please read the proposal. Cluster awareness: The proxy has cluster awareness, and actively disconnects the connection when the load is unbalanced to allow the client to re-pick up the proxy. Resource limit+HPA: Restrict the connection resource situation of each proxy, and no longer accept new connections when the resource limit is reached. And use the HPA mechanism of Kubernetes to dynamically scale out the number of the proxy.      Connection management Cluster awareness Resource Limit+HPA     Pros Simple to use Ensure that the number of connections in each proxy is relatively  Simple to use   Cons Each client needs to ensure that data is not lostThe client is required to accept GOWAY responses May cause a sudden increase in traffic on some nodesEach client needs to ensure that data is not lost  Traffic will not be particularly balanced in each instance    We choose Limit+HPA for these reasons:\n Easy to config and use the proxy and easy to understand based on basic data metrics. No data loss due to broken connection. There is no need for the client to implement any other protocols to prevent data loss, especially when the client is a commercial product. The connection of each node in the proxy cluster does not need to be particularly balanced, as long as the proxy node itself is high-performance.  SkyWalking-Satellite We have implemented this Proxy in the SkyWalking-Satellite project. It’s used between Client and SkyWalking OAP, effectively solving the load balancing problem.\nAfter the system is deployed, the Satellite would accept the traffic from the Client, and the Satellite will perceive all the nodes of the OAP through Kubernetes Label Selector or manual configuration, and load balance the traffic to the upstream OAP node.\nAs shown below, a single client still maintains a connection with a single Satellite, Satellite would establish the connection with each OAP, and load balance message to the OAP node.\nWhen scaling Satellite, we need to deploy the SWCK adapter and configure the HPA in Kubernetes. SWCK is a platform for the SkyWalking users, provisions, upgrades, maintains SkyWalking relevant components, and makes them work natively on Kubernetes.\nAfter deployment is finished, the following steps would be performed:\n Read metrics from OAP: HPA requests the SWCK metrics adapter to dynamically read the metrics in the OAP. Scaling the Satellite: Kubernetes HPA senses that the metrics values are in line with expectations, so the Satellite would be scaling automatically.  As shown below, use the dotted line to divide the two parts. HPA uses SWCK Adapter to read the metrics in the OAP. When the threshold is met, HPA would scale the Satellite deployment.\nExample In this section, we will demonstrate two cases:\n SkyWalking Scaling: After SkyWalking OAP scaling, the traffic would auto load balancing through Satellite. Satellite Scaling: Satellite’s own traffic load balancing.  NOTE: All commands could be accessed through GitHub.\nSkyWalking Scaling We will use the bookinfo application to demonstrate how to integrate Apache SkyWalking 8.9.1 with Apache SkyWalking-Satellite 0.5.0, and observe the service mesh through the Envoy ALS protocol.\nBefore starting, please make sure that you already have a Kubernetes environment.\nInstall Istio Istio provides a very convenient way to configure the Envoy proxy and enable the access log service. The following step:\n Install the istioctl locally to help manage the Istio mesh. Install Istio into the Kubernetes environment with a demo configuration profile, and enable the Envoy ALS. Transmit the ALS message to the satellite. The satellite we will deploy later. Add the label into the default namespace so Istio could automatically inject Envoy sidecar proxies when you deploy your application later.  # install istioctl export ISTIO_VERSION=1.12.0 curl -L https://istio.io/downloadIstio | sh - sudo mv $PWD/istio-$ISTIO_VERSION/bin/istioctl /usr/local/bin/ # install istio istioctl install -y --set profile=demo \\ \t--set meshConfig.enableEnvoyAccessLogService=true \\ \t--set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-system-satellite.skywalking-system:11800 # enbale envoy proxy in default namespace kubectl label namespace default istio-injection=enabled Install SWCK SWCK provides convenience for users to deploy and upgrade SkyWalking related components based on Kubernetes. The automatic scale function of Satellite also mainly relies on SWCK. For more information, you could refer to the official documentation.\n# Install cert-manager kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.3.1/cert-manager.yaml # Deploy SWCK mkdir -p skywalking-swck \u0026amp;\u0026amp; cd skywalking-swck wget https://dlcdn.apache.org/skywalking/swck/0.6.1/skywalking-swck-0.6.1-bin.tgz tar -zxvf skywalking-swck-0.6.1-bin.tgz cd config kubectl apply -f operator-bundle.yaml Deploy Apache SkyWalking And Apache SkyWalking-Satellite We have provided a simple script to deploy the skywalking OAP, UI, and Satellite.\n# Create the skywalking components namespace kubectl create namespace skywalking-system kubectl label namespace skywalking-system swck-injection=enabled # Deploy components kubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/sw-components.yaml Deploy Bookinfo Application export ISTIO_VERSION=1.12.0 kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/platform/kube/bookinfo.yaml kubectl wait --for=condition=Ready pods --all --timeout=1200s kubectl port-forward service/productpage 9080 Next, please open your browser and visit http://localhost:9080. You should be able to see the Bookinfo application. Refresh the webpage several times to generate enough access logs.\nThen, you can see the topology and metrics of the Bookinfo application on SkyWalking WebUI. At this time, you can see that the Satellite is working!\nDeploy Monitor We need to install OpenTelemetry Collector to collect metrics in OAPs and analyze them.\n# Add OTEL collector kubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/otel-collector-oap.yaml kubectl port-forward -n skywalking-system service/skywalking-system-ui 8080:80 Next, please open your browser and visit http://localhost:8080/ and create a new item on the dashboard. The SkyWalking Web UI pictured below shows how the data content is applied.\nScaling OAP Scaling the number of OAPs by deployment.\nkubectl scale --replicas=3 -n skywalking-system deployment/skywalking-system-oap Done! After a period of time, you will see that the number of OAPs becomes 3, and the ALS traffic is balanced to each OAP.\nSatellite Scaling After we have completed the SkyWalking Scaling, we would carry out the Satellite Scaling demo.\nDeploy SWCK HPA SWCK provides an adapter to implement the Kubernetes external metrics to adapt the HPA through reading the metrics in SkyWalking OAP. We expose the metrics service in Satellite to OAP and configure HPA Resource to auto-scaling the Satellite.\nInstall the SWCK adapter into the Kubernetes environment:\nkubectl apply -f skywalking-swck/config/adapter-bundle.yaml Create the HPA resource, and limit each Satellite to handle a maximum of 10 connections:\nkubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/satellite-hpa.yaml Then, you could see we have 9 connections in one satellite. One envoy proxy may establish multiple connections to the satellite.\n$ kubectl get HorizontalPodAutoscaler -n skywalking-system NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE hpa-demo Deployment/skywalking-system-satellite 9/10 1 3 1 5m18s Scaling Application The scaling application could establish more connections to the satellite, to verify whether the HPA is in effect.\nkubectl scale --replicas=3 deployment/productpage-v1 deployment/details-v1 Done! By default, Satellite will deploy a single instance and a single instance will only accept 11 connections. HPA resources limit one Satellite to handle 10 connections and use a stabilization window to make Satellite stable scaling up. In this case, we deploy the Bookinfo application in 10+ instances after scaling, which means that 10+ connections will be established to the Satellite.\nSo after HPA resources are running, the Satellite would be automatically scaled up to 2 instances. You can learn about the calculation algorithm of replicas through the official documentation. Run the following command to view the running status:\n$ kubectl get HorizontalPodAutoscaler -n skywalking-system --watch NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 1 3m31s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 1 4m20s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 2 4m38s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 2 5m8s hpa-demo Deployment/skywalking-system-satellite 6/10 1 3 2 5m23s By observing the “number of connections” metric, we would be able to see that when the number of connections of each gRPC exceeds 10 connections, then the satellite automatically scales through the HPA rule. As a result, the connection number is down to normal status (in this example, less than 10)\nswctl metrics linear --name satellite_service_grpc_connect_count --service-name satellite::satellite-service ","title":"Scaling with Apache SkyWalking","url":"/docs/main/v9.6.0/en/academy/scaling-with-apache-skywalking/"},{"content":"Scaling with Apache SkyWalking Background In the Apache SkyWalking ecosystem, the OAP obtains metrics, traces, logs, and event data through SkyWalking Agent, Envoy, or other data sources. Under the gRPC protocol, it transmits data by communicating with a single server node. Only when the connection is broken, the reconnecting policy would be used based on DNS round-robin mode. When new services are added at runtime or the OAP load is kept high due to increased traffic of observed services, the OAP cluster needs to scale out for increased traffic. The load of the new OAP node would be less due to all existing agents having connected to previous nodes. Even without scaling, the load of OAP nodes would be unbalanced, because the agent would keep the connection due to random policy at the booting stage. In these cases, it would become a challenge to keep up the health status of all nodes, and be able to scale out when needed.\nIn this article, we mainly discuss how to solve this challenge in SkyWalking.\nHow to Load Balance SkyWalking mainly uses the gRPC protocol for data transmission, so this article mainly introduces load balancing in the gRPC protocol.\nProxy Or Client-side Based on the gRPC official Load Balancing blog, there are two approaches to load balancing:\n Client-side: The client perceives multiple back-end services and uses a load-balancing algorithm to select a back-end service for each RPC. Proxy: The client sends the message to the proxy server, and the proxy server load balances the message to the back-end service.  From the perspective of observability system architecture:\n    Pros Cons     Client-side High performance because of the elimination of extra hop Complex client (cluster awareness, load balancing, health check, etc.)Ensure each data source to be connected provides complex client capabilities   Proxy Simple Client Higher latency    We choose Proxy mode for the following reasons:\n Observable data is not very time-sensitive, a little latency caused by transmission is acceptable. A little extra hop is acceptable and there is no impact on the client-side. As an observability platform, we cannot/should not ask clients to change. They make their own tech decisions and may have their own commercial considerations.  Transmission Policy In the proxy mode, we should determine the transmission path between downstream and upstream.\nDifferent data protocols require different processing policies. There are two transmission policies:\n Synchronous: Suitable for protocols that require data exchange in the client, such as SkyWalking Dynamic Configuration Service. This type of protocol provides real-time results. Asynchronous batch: Used when the client doesn’t care about the upstream processing results, but only the transmitted data (e.g., trace report, log report, etc.)  The synchronization policy requires that the proxy send the message to the upstream server when receiving the client message, and synchronously return the response data to the downstream client. Usually, only a few protocols need to use the synchronization policy.\nAs shown below, after the client sends the request to the Proxy, the proxy would send the message to the server synchronously. When the proxy receives the result, it returns to the client.\nThe asynchronous batch policy means that the data is sent to the upstream server in batches asynchronously. This policy is more common because most protocols in SkyWalking are primarily based on data reporting. We think using the queue as a buffer could have a good effect. The asynchronous batch policy is executed according to the following steps:\n The proxy receives the data and wraps it as an Event object. An event is added into the queue. When the cycle time is reached or when the queue elements reach the fixed number, the elements in the queue will parallel consume and send to the OAP.  The advantage of using queues is:\n Separate data receiving and sending to reduce the mutual influence. The interval quantization mechanism can be used to combine events, which helps to speed up sending events to the OAP. Using multi-threaded consumption queue events can make fuller use of network IO.  As shown below, after the proxy receives the message, the proxy would wrap the message as an event and push it to the queue. The message sender would take batch events from the queue and send them to the upstream OAP.\nRouting Routing algorithms are used to route messages to a single upstream server node.\nThe Round-Robin algorithm selects nodes in order from the list of upstream service nodes. The advantage of this algorithm is that the number of times each node is selected is average. When the size of the data is close to the same, each upstream node can handle the same quantity of data content.\nWith the Weight Round-Robin, each upstream server node has a corresponding routing weight ratio. The difference from Round-Robin is that each upstream node has more chances to be routed according to its weight. This algorithm is more suitable to use when the upstream server node machine configuration is not the same.\nThe Fixed algorithm is a hybrid algorithm. It can ensure that the same data is routed to the same upstream server node, and when the upstream server scales out, it still maintains routing to the same node; unless the upstream node does not exist, it will reroute. This algorithm is mainly used in the SkyWalking Meter protocol because this protocol needs to ensure that the metrics of the same service instance are sent to the same OAP node. The Routing steps are as follows:\n Generate a unique identification string based on the data content, as short as possible. The amount of data is controllable. Get the upstream node of identity from LRU Cache, and use it if it exists. According to the identification, generate the corresponding hash value, and find the upstream server node from the upstream list. Save the mapping relationship between the upstream server node and identification to LRU Cache.  The advantage of this algorithm is to bind the data with the upstream server node as much as possible, so the upstream server can better process continuous data. The disadvantage is that it takes up a certain amount of memory space to save the corresponding relationship.\nAs shown below, the image is divided into two parts:\n The left side represents that the same data content always is routed to the same server node. The right side represents the data routing algorithm. Get the number from the data, and use the remainder algorithm to obtain the position.  We choose to use a combination of Round-Robin and Fixed algorithm for routing:\n The Fixed routing algorithm is suitable for specific protocols, mainly used when passing metrics data to the SkyWalking Meter protocol The Round-Robin algorithm is used by default. When the SkyWalking OAP cluster is deployed, the configuration of the nodes needs to be as much the same as possible, so there would be no need to use the Weight Round-Robin algorithm.  How to balance the load balancer itself? Proxy still needs to deal with the load balancing problem from client to itself, especially when deploying a Proxy cluster in a production environment.\nThere are three ways to solve this problem:\n Connection management: Use the max_connection config on the client-side to specify the maximum connection duration of each connection. For more information, please read the proposal. Cluster awareness: The proxy has cluster awareness, and actively disconnects the connection when the load is unbalanced to allow the client to re-pick up the proxy. Resource limit+HPA: Restrict the connection resource situation of each proxy, and no longer accept new connections when the resource limit is reached. And use the HPA mechanism of Kubernetes to dynamically scale out the number of the proxy.      Connection management Cluster awareness Resource Limit+HPA     Pros Simple to use Ensure that the number of connections in each proxy is relatively  Simple to use   Cons Each client needs to ensure that data is not lostThe client is required to accept GOWAY responses May cause a sudden increase in traffic on some nodesEach client needs to ensure that data is not lost  Traffic will not be particularly balanced in each instance    We choose Limit+HPA for these reasons:\n Easy to config and use the proxy and easy to understand based on basic data metrics. No data loss due to broken connection. There is no need for the client to implement any other protocols to prevent data loss, especially when the client is a commercial product. The connection of each node in the proxy cluster does not need to be particularly balanced, as long as the proxy node itself is high-performance.  SkyWalking-Satellite We have implemented this Proxy in the SkyWalking-Satellite project. It’s used between Client and SkyWalking OAP, effectively solving the load balancing problem.\nAfter the system is deployed, the Satellite would accept the traffic from the Client, and the Satellite will perceive all the nodes of the OAP through Kubernetes Label Selector or manual configuration, and load balance the traffic to the upstream OAP node.\nAs shown below, a single client still maintains a connection with a single Satellite, Satellite would establish the connection with each OAP, and load balance message to the OAP node.\nWhen scaling Satellite, we need to deploy the SWCK adapter and configure the HPA in Kubernetes. SWCK is a platform for the SkyWalking users, provisions, upgrades, maintains SkyWalking relevant components, and makes them work natively on Kubernetes.\nAfter deployment is finished, the following steps would be performed:\n Read metrics from OAP: HPA requests the SWCK metrics adapter to dynamically read the metrics in the OAP. Scaling the Satellite: Kubernetes HPA senses that the metrics values are in line with expectations, so the Satellite would be scaling automatically.  As shown below, use the dotted line to divide the two parts. HPA uses SWCK Adapter to read the metrics in the OAP. When the threshold is met, HPA would scale the Satellite deployment.\nExample In this section, we will demonstrate two cases:\n SkyWalking Scaling: After SkyWalking OAP scaling, the traffic would auto load balancing through Satellite. Satellite Scaling: Satellite’s own traffic load balancing.  NOTE: All commands could be accessed through GitHub.\nSkyWalking Scaling We will use the bookinfo application to demonstrate how to integrate Apache SkyWalking 8.9.1 with Apache SkyWalking-Satellite 0.5.0, and observe the service mesh through the Envoy ALS protocol.\nBefore starting, please make sure that you already have a Kubernetes environment.\nInstall Istio Istio provides a very convenient way to configure the Envoy proxy and enable the access log service. The following step:\n Install the istioctl locally to help manage the Istio mesh. Install Istio into the Kubernetes environment with a demo configuration profile, and enable the Envoy ALS. Transmit the ALS message to the satellite. The satellite we will deploy later. Add the label into the default namespace so Istio could automatically inject Envoy sidecar proxies when you deploy your application later.  # install istioctl export ISTIO_VERSION=1.12.0 curl -L https://istio.io/downloadIstio | sh - sudo mv $PWD/istio-$ISTIO_VERSION/bin/istioctl /usr/local/bin/ # install istio istioctl install -y --set profile=demo \\ \t--set meshConfig.enableEnvoyAccessLogService=true \\ \t--set meshConfig.defaultConfig.envoyAccessLogService.address=skywalking-system-satellite.skywalking-system:11800 # enbale envoy proxy in default namespace kubectl label namespace default istio-injection=enabled Install SWCK SWCK provides convenience for users to deploy and upgrade SkyWalking related components based on Kubernetes. The automatic scale function of Satellite also mainly relies on SWCK. For more information, you could refer to the official documentation.\n# Install cert-manager kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.3.1/cert-manager.yaml # Deploy SWCK mkdir -p skywalking-swck \u0026amp;\u0026amp; cd skywalking-swck wget https://dlcdn.apache.org/skywalking/swck/0.6.1/skywalking-swck-0.6.1-bin.tgz tar -zxvf skywalking-swck-0.6.1-bin.tgz cd config kubectl apply -f operator-bundle.yaml Deploy Apache SkyWalking And Apache SkyWalking-Satellite We have provided a simple script to deploy the skywalking OAP, UI, and Satellite.\n# Create the skywalking components namespace kubectl create namespace skywalking-system kubectl label namespace skywalking-system swck-injection=enabled # Deploy components kubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/sw-components.yaml Deploy Bookinfo Application export ISTIO_VERSION=1.12.0 kubectl apply -f https://raw.githubusercontent.com/istio/istio/$ISTIO_VERSION/samples/bookinfo/platform/kube/bookinfo.yaml kubectl wait --for=condition=Ready pods --all --timeout=1200s kubectl port-forward service/productpage 9080 Next, please open your browser and visit http://localhost:9080. You should be able to see the Bookinfo application. Refresh the webpage several times to generate enough access logs.\nThen, you can see the topology and metrics of the Bookinfo application on SkyWalking WebUI. At this time, you can see that the Satellite is working!\nDeploy Monitor We need to install OpenTelemetry Collector to collect metrics in OAPs and analyze them.\n# Add OTEL collector kubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/otel-collector-oap.yaml kubectl port-forward -n skywalking-system service/skywalking-system-ui 8080:80 Next, please open your browser and visit http://localhost:8080/ and create a new item on the dashboard. The SkyWalking Web UI pictured below shows how the data content is applied.\nScaling OAP Scaling the number of OAPs by deployment.\nkubectl scale --replicas=3 -n skywalking-system deployment/skywalking-system-oap Done! After a period of time, you will see that the number of OAPs becomes 3, and the ALS traffic is balanced to each OAP.\nSatellite Scaling After we have completed the SkyWalking Scaling, we would carry out the Satellite Scaling demo.\nDeploy SWCK HPA SWCK provides an adapter to implement the Kubernetes external metrics to adapt the HPA through reading the metrics in SkyWalking OAP. We expose the metrics service in Satellite to OAP and configure HPA Resource to auto-scaling the Satellite.\nInstall the SWCK adapter into the Kubernetes environment:\nkubectl apply -f skywalking-swck/config/adapter-bundle.yaml Create the HPA resource, and limit each Satellite to handle a maximum of 10 connections:\nkubectl apply -f https://raw.githubusercontent.com/mrproliu/sw-satellite-demo-scripts/5821a909b647f7c8f99c70378e197630836f45f7/resources/satellite-hpa.yaml Then, you could see we have 9 connections in one satellite. One envoy proxy may establish multiple connections to the satellite.\n$ kubectl get HorizontalPodAutoscaler -n skywalking-system NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE hpa-demo Deployment/skywalking-system-satellite 9/10 1 3 1 5m18s Scaling Application The scaling application could establish more connections to the satellite, to verify whether the HPA is in effect.\nkubectl scale --replicas=3 deployment/productpage-v1 deployment/details-v1 Done! By default, Satellite will deploy a single instance and a single instance will only accept 11 connections. HPA resources limit one Satellite to handle 10 connections and use a stabilization window to make Satellite stable scaling up. In this case, we deploy the Bookinfo application in 10+ instances after scaling, which means that 10+ connections will be established to the Satellite.\nSo after HPA resources are running, the Satellite would be automatically scaled up to 2 instances. You can learn about the calculation algorithm of replicas through the official documentation. Run the following command to view the running status:\n$ kubectl get HorizontalPodAutoscaler -n skywalking-system --watch NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 1 3m31s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 1 4m20s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 2 4m38s hpa-demo Deployment/skywalking-system-satellite 11/10 1 3 2 5m8s hpa-demo Deployment/skywalking-system-satellite 6/10 1 3 2 5m23s By observing the “number of connections” metric, we would be able to see that when the number of connections of each gRPC exceeds 10 connections, then the satellite automatically scales through the HPA rule. As a result, the connection number is down to normal status (in this example, less than 10)\nswctl metrics linear --name satellite_service_grpc_connect_count --service-name satellite::satellite-service ","title":"Scaling with Apache SkyWalking","url":"/docs/main/v9.7.0/en/academy/scaling-with-apache-skywalking/"},{"content":"Scopes and Fields Using the Aggregation Function, the requests will be grouped by time and Group Key(s) in each scope.\nSCOPE Service This calculates the metrics data from each request of the service.\n   Name Remarks Group Key Type     name The name of the service.  string   layer Layer represents an abstract framework in the computer science, such as operation system(OS_LINUX layer), Kubernetes(k8s layer)  enum   serviceInstanceName The name of the service instance ID.  string   endpointName The name of the endpoint, such as a full path of HTTP URI.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request. Such as: Database, HTTP, RPC, gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPService This calculates the metrics data from each request of the TCP service.\n   Name Remarks Group Key Type     name The name of the service.  string   layer Layer represents an abstract framework in the computer science, such as operation system(OS_LINUX layer), Kubernetes(k8s layer)  enum   serviceInstanceName The name of the service instance ID.  string   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    SCOPE ServiceInstance This calculates the metrics data from each request of the service instance.\n   Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   endpointName The name of the endpoint, such as a full path of the HTTP URI.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPServiceInstance This calculates the metrics data from each request of the service instance.\n   Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    Secondary scopes of ServiceInstance This calculates the metrics data if the service instance is a JVM and collects through javaagent.\n SCOPE ServiceInstanceJVMCPU     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   usePercent The percentage of CPU time spent.  double    SCOPE ServiceInstanceJVMMemory     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   heapStatus Indicates whether the metric has a heap property or not.  bool   init See the JVM documentation.  long   max See the JVM documentation.  long   used See the JVM documentation.  long   committed See the JVM documentation.  long    SCOPE ServiceInstanceJVMMemoryPool     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   poolType The type may be CODE_CACHE_USAGE, NEWGEN_USAGE, OLDGEN_USAGE, SURVIVOR_USAGE, PERMGEN_USAGE, or METASPACE_USAGE based on different versions of JVM.  enum   init See the JVM documentation.  long   max See the JVM documentation.  long   used See the JVM documentation.  long   committed See the JVM documentation.  long    SCOPE ServiceInstanceJVMGC     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   phase Includes both NEW and OLD.  Enum   time The time spent in GC.  long   count The count in GC operations.  long    SCOPE ServiceInstanceJVMThread     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   liveCount The current number of live threads.  long   daemonCount The current number of daemon threads.  long   peakCount The current number of peak threads.  long   runnableStateThreadCount The current number of threads in runnable state.  long   blockedStateThreadCount The current number of threads in blocked state.  long   waitingStateThreadCount The current number of threads in waiting state.  long   timedWaitingStateThreadCount The current number of threads in time-waiting state.  long    SCOPE ServiceInstanceJVMClass     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   loadedClassCount The number of classes that are currently loaded in the JVM.  long   totalUnloadedClassCount The total number of classes unloaded since the JVM has started execution.  long   totalLoadedClassCount The total number of classes that have been loaded since the JVM has started execution.  long    SCOPE Endpoint This calculates the metrics data from each request of the endpoint in the service.\n   Name Remarks Group Key Type     name The name of the endpoint, such as a full path of the HTTP URI.  string   serviceName The name of the service.  string   serviceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  enum   serviceInstanceName The name of the service instance ID.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE ServiceRelation This calculates the metrics data from each request between services.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceLayer The layer of the source service.  enum   destServiceName The name of the destination service.  string   destServiceInstanceName The name of the destination service instance.  string   destLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination services, such as service_relation_mtls_cpm = from(ServiceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPServiceRelation This calculates the metrics data from each request between services.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceLayer The layer of the source service.  enum   destServiceName The name of the destination service.  string   destServiceInstanceName The name of the destination service instance.  string   destLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination services, such as service_relation_mtls_cpm = from(ServiceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    SCOPE ServiceInstanceRelation This calculates the metrics data from each request between service instances.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceServiceLayer The layer of the source service.  enum   destServiceName The name of the destination service.     destServiceInstanceName The name of the destination service instance.  string   destServiceLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of the component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination service instances, such as service_instance_relation_mtls_cpm = from(ServiceInstanceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPServiceInstanceRelation This calculates the metrics data from each request between service instances.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceServiceLayer The layer of the source service.  enum   destServiceName The name of the destination service.     destServiceInstanceName The name of the destination service instance.  string   destServiceLayer The layer of the destination service.  enum   componentId The ID of the component used in this call. yes string   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination service instances, such as service_instance_relation_mtls_cpm = from(ServiceInstanceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    SCOPE EndpointRelation This calculates the metrics data of the dependency between endpoints. This relation is hard to detect, and it depends on the tracing library to propagate the previous endpoint. Therefore, the EndpointRelation scope aggregation comes into effect only in services under tracing by SkyWalking native agents, including auto instrument agents (like Java and .NET), or other tracing context propagation in SkyWalking specification.\n   Name Remarks Group Key Type     endpoint The parent endpoint in the dependency.  string   serviceName The name of the service.  string   serviceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  enum   childEndpoint The endpoint used by the parent endpoint in row(1).  string   childServiceName The endpoint used by the parent service in row(1).  string   childServiceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  string   childServiceInstanceName The endpoint used by the parent service instance in row(1).  string   rpcLatency The latency of the RPC between the parent endpoint and childEndpoint, excluding the latency caused by the parent endpoint itself.     componentId The ID of the component used in this call. yes string   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Indicates where the relation is detected. The value may be client, server, or proxy. yes enum    SCOPE BrowserAppTraffic This calculates the metrics data from each request of the browser application (browser only).\n   Name Remarks Group Key Type     name The browser application name of each request.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppSingleVersionTraffic This calculates the metrics data from each request of a single version in the browser application (browser only).\n   Name Remarks Group Key Type     name The single version name of each request.  string   serviceName The name of the browser application.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppPageTraffic This calculates the metrics data from each request of the page in the browser application (browser only).\n   Name Remarks Group Key Type     name The page name of each request.  string   serviceName The name of the browser application.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppPagePerf This calculates the metrics data from each request of the page in the browser application (browser only).\n   Name Remarks Group Key Type     name The page name of each request.  string   serviceName The name of the browser application.  string   redirectTime The time taken to redirect.  int(in ms)   dnsTime The DNS query time.  int(in ms)   ttfbTime Time to first byte.  int(in ms)   tcpTime TCP connection time.  int(in ms)   transTime Content transfer time.  int(in ms)   domAnalysisTime Dom parsing time.  int(in ms)   fptTime First paint time or blank screen time.  int(in ms)   domReadyTime Dom ready time.  int(in ms)   loadPageTime Page full load time.  int(in ms)   resTime Synchronous load resources in the page.  int(in ms)   sslTime Only valid for HTTPS.  int(in ms)   ttlTime Time to interact.  int(in ms)   firstPackTime First pack time.  int(in ms)   fmpTime First Meaningful Paint.  int(in ms)    SCOPE Event This calculates the metrics data from events.\n   Name Remarks Group Key Type     name The name of the event.  string   service The service name to which the event belongs to.  string   serviceInstance The service instance to which the event belongs to, if any.  string   endpoint The service endpoint to which the event belongs to, if any.  string   type The type of the event, Normal or Error.  string   message The message of the event.  string   parameters The parameters in the message, see parameters.  string    SCOPE DatabaseAccess This calculates the metrics data from each request of database.\n   Name Remarks Group Key Type     name The service name of virtual database service.  string   databaseTypeId The ID of the component used in this call.  int   latency The time taken by each request.  int(in ms)   status Indicates the success or failure of the request.  boolean    SCOPE DatabaseSlowStatement This calculates the metrics data from slow request of database.\n   Name Remarks Group Key Type     databaseServiceId The service id of virtual cache service.  string   statement The sql statement .  string   latency The time taken by each request.  int(in ms)   traceId The traceId of this slow statement  string    SCOPE CacheAccess This calculates the metrics data from each request of cache system.\n   Name Remarks Group Key Type     name The service name of virtual cache service.  string   cacheTypeId The ID of the component used in this call.  int   latency The time taken by each request.  int(in ms)   status Indicates the success or failure of the request.  boolean   operation Indicates this access is used for write or read  string    SCOPE CacheSlowAccess This calculates the metrics data from slow request of cache system , which is used for write or read operation.\n   Name Remarks Group Key Type     cacheServiceId The service id of virtual cache service.  string   command The cache command .  string   key The cache command key.  string   latency The time taken by each request.  int(in ms)   traceId The traceId of this slow access  string   status Indicates the success or failure of the request.  boolean   operation Indicates this access is used for write or read  string    SCOPE MQAccess This calculates the service dimensional metrics data from each request of MQ system on consume/produce side\n   Name Remarks Group Key Type     name The service name , usually it\u0026rsquo;s MQ address(es)  string   transmissionLatency The latency from produce side to consume side .  int(in ms)   status Indicates the success or failure of the request.  boolean   operation Indicates this access is on Produce or Consume side  enum    SCOPE MQEndpointAccess This calculates the endpoint dimensional metrics data from each request of MQ system on consume/produce side\n   Name Remarks Group Key Type     serviceName The service name that this endpoint belongs to.  string   endpoint The endpoint name , usually it\u0026rsquo;s combined by queue,topic  string   transmissionLatency The latency from produce side to consume side .  int(in ms)   status Indicates the success or failure of the request.  boolean   operation Indicates this access is on Produce or Consume side  enum    ","title":"Scopes and Fields","url":"/docs/main/latest/en/concepts-and-designs/scope-definitions/"},{"content":"Scopes and Fields Using the Aggregation Function, the requests will be grouped by time and Group Key(s) in each scope.\nSCOPE Service This calculates the metrics data from each request of the service.\n   Name Remarks Group Key Type     name The name of the service.  string   layer Layer represents an abstract framework in the computer science, such as operation system(OS_LINUX layer), Kubernetes(k8s layer)  enum   serviceInstanceName The name of the service instance ID.  string   endpointName The name of the endpoint, such as a full path of HTTP URI.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request. Such as: Database, HTTP, RPC, gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPService This calculates the metrics data from each request of the TCP service.\n   Name Remarks Group Key Type     name The name of the service.  string   layer Layer represents an abstract framework in the computer science, such as operation system(OS_LINUX layer), Kubernetes(k8s layer)  enum   serviceInstanceName The name of the service instance ID.  string   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    SCOPE ServiceInstance This calculates the metrics data from each request of the service instance.\n   Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   endpointName The name of the endpoint, such as a full path of the HTTP URI.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPServiceInstance This calculates the metrics data from each request of the service instance.\n   Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    Secondary scopes of ServiceInstance This calculates the metrics data if the service instance is a JVM and collects through javaagent.\n SCOPE ServiceInstanceJVMCPU     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   usePercent The percentage of CPU time spent.  double    SCOPE ServiceInstanceJVMMemory     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   heapStatus Indicates whether the metric has a heap property or not.  bool   init See the JVM documentation.  long   max See the JVM documentation.  long   used See the JVM documentation.  long   committed See the JVM documentation.  long    SCOPE ServiceInstanceJVMMemoryPool     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   poolType The type may be CODE_CACHE_USAGE, NEWGEN_USAGE, OLDGEN_USAGE, SURVIVOR_USAGE, PERMGEN_USAGE, or METASPACE_USAGE based on different versions of JVM.  enum   init See the JVM documentation.  long   max See the JVM documentation.  long   used See the JVM documentation.  long   committed See the JVM documentation.  long    SCOPE ServiceInstanceJVMGC     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   phase Includes both NEW and OLD.  Enum   time The time spent in GC.  long   count The count in GC operations.  long    SCOPE ServiceInstanceJVMThread     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   liveCount The current number of live threads.  long   daemonCount The current number of daemon threads.  long   peakCount The current number of peak threads.  long   runnableStateThreadCount The current number of threads in runnable state.  long   blockedStateThreadCount The current number of threads in blocked state.  long   waitingStateThreadCount The current number of threads in waiting state.  long   timedWaitingStateThreadCount The current number of threads in time-waiting state.  long    SCOPE ServiceInstanceJVMClass     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   loadedClassCount The number of classes that are currently loaded in the JVM.  long   totalUnloadedClassCount The total number of classes unloaded since the JVM has started execution.  long   totalLoadedClassCount The total number of classes that have been loaded since the JVM has started execution.  long    SCOPE Endpoint This calculates the metrics data from each request of the endpoint in the service.\n   Name Remarks Group Key Type     name The name of the endpoint, such as a full path of the HTTP URI.  string   serviceName The name of the service.  string   serviceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  enum   serviceInstanceName The name of the service instance ID.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE ServiceRelation This calculates the metrics data from each request between services.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceLayer The layer of the source service.  enum   destServiceName The name of the destination service.  string   destServiceInstanceName The name of the destination service instance.  string   destLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination services, such as service_relation_mtls_cpm = from(ServiceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPServiceRelation This calculates the metrics data from each request between services.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceLayer The layer of the source service.  enum   destServiceName The name of the destination service.  string   destServiceInstanceName The name of the destination service instance.  string   destLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination services, such as service_relation_mtls_cpm = from(ServiceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    SCOPE ServiceInstanceRelation This calculates the metrics data from each request between service instances.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceServiceLayer The layer of the source service.  enum   destServiceName The name of the destination service.     destServiceInstanceName The name of the destination service instance.  string   destServiceLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of the component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination service instances, such as service_instance_relation_mtls_cpm = from(ServiceInstanceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPServiceInstanceRelation This calculates the metrics data from each request between service instances.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceServiceLayer The layer of the source service.  enum   destServiceName The name of the destination service.     destServiceInstanceName The name of the destination service instance.  string   destServiceLayer The layer of the destination service.  enum   componentId The ID of the component used in this call. yes string   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination service instances, such as service_instance_relation_mtls_cpm = from(ServiceInstanceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    SCOPE EndpointRelation This calculates the metrics data of the dependency between endpoints. This relation is hard to detect, and it depends on the tracing library to propagate the previous endpoint. Therefore, the EndpointRelation scope aggregation comes into effect only in services under tracing by SkyWalking native agents, including auto instrument agents (like Java and .NET), or other tracing context propagation in SkyWalking specification.\n   Name Remarks Group Key Type     endpoint The parent endpoint in the dependency.  string   serviceName The name of the service.  string   serviceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  enum   childEndpoint The endpoint used by the parent endpoint in row(1).  string   childServiceName The endpoint used by the parent service in row(1).  string   childServiceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  string   childServiceInstanceName The endpoint used by the parent service instance in row(1).  string   rpcLatency The latency of the RPC between the parent endpoint and childEndpoint, excluding the latency caused by the parent endpoint itself.     componentId The ID of the component used in this call. yes string   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Indicates where the relation is detected. The value may be client, server, or proxy. yes enum    SCOPE BrowserAppTraffic This calculates the metrics data from each request of the browser application (browser only).\n   Name Remarks Group Key Type     name The browser application name of each request.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppSingleVersionTraffic This calculates the metrics data from each request of a single version in the browser application (browser only).\n   Name Remarks Group Key Type     name The single version name of each request.  string   serviceName The name of the browser application.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppPageTraffic This calculates the metrics data from each request of the page in the browser application (browser only).\n   Name Remarks Group Key Type     name The page name of each request.  string   serviceName The name of the browser application.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppPagePerf This calculates the metrics data from each request of the page in the browser application (browser only).\n   Name Remarks Group Key Type     name The page name of each request.  string   serviceName The name of the browser application.  string   redirectTime The time taken to redirect.  int(in ms)   dnsTime The DNS query time.  int(in ms)   ttfbTime Time to first byte.  int(in ms)   tcpTime TCP connection time.  int(in ms)   transTime Content transfer time.  int(in ms)   domAnalysisTime Dom parsing time.  int(in ms)   fptTime First paint time or blank screen time.  int(in ms)   domReadyTime Dom ready time.  int(in ms)   loadPageTime Page full load time.  int(in ms)   resTime Synchronous load resources in the page.  int(in ms)   sslTime Only valid for HTTPS.  int(in ms)   ttlTime Time to interact.  int(in ms)   firstPackTime First pack time.  int(in ms)   fmpTime First Meaningful Paint.  int(in ms)    SCOPE Event This calculates the metrics data from events.\n   Name Remarks Group Key Type     name The name of the event.  string   service The service name to which the event belongs to.  string   serviceInstance The service instance to which the event belongs to, if any.  string   endpoint The service endpoint to which the event belongs to, if any.  string   type The type of the event, Normal or Error.  string   message The message of the event.  string   parameters The parameters in the message, see parameters.  string    SCOPE DatabaseAccess This calculates the metrics data from each request of database.\n   Name Remarks Group Key Type     name The service name of virtual database service.  string   databaseTypeId The ID of the component used in this call.  int   latency The time taken by each request.  int(in ms)   status Indicates the success or failure of the request.  boolean    SCOPE DatabaseSlowStatement This calculates the metrics data from slow request of database.\n   Name Remarks Group Key Type     databaseServiceId The service id of virtual cache service.  string   statement The sql statement .  string   latency The time taken by each request.  int(in ms)   traceId The traceId of this slow statement  string    SCOPE CacheAccess This calculates the metrics data from each request of cache system.\n   Name Remarks Group Key Type     name The service name of virtual cache service.  string   cacheTypeId The ID of the component used in this call.  int   latency The time taken by each request.  int(in ms)   status Indicates the success or failure of the request.  boolean   operation Indicates this access is used for write or read  string    SCOPE CacheSlowAccess This calculates the metrics data from slow request of cache system , which is used for write or read operation.\n   Name Remarks Group Key Type     cacheServiceId The service id of virtual cache service.  string   command The cache command .  string   key The cache command key.  string   latency The time taken by each request.  int(in ms)   traceId The traceId of this slow access  string   status Indicates the success or failure of the request.  boolean   operation Indicates this access is used for write or read  string    SCOPE MQAccess This calculates the service dimensional metrics data from each request of MQ system on consume/produce side\n   Name Remarks Group Key Type     name The service name , usually it\u0026rsquo;s MQ address(es)  string   transmissionLatency The latency from produce side to consume side .  int(in ms)   status Indicates the success or failure of the request.  boolean   operation Indicates this access is on Produce or Consume side  enum    SCOPE MQEndpointAccess This calculates the endpoint dimensional metrics data from each request of MQ system on consume/produce side\n   Name Remarks Group Key Type     serviceName The service name that this endpoint belongs to.  string   endpoint The endpoint name , usually it\u0026rsquo;s combined by queue,topic  string   transmissionLatency The latency from produce side to consume side .  int(in ms)   status Indicates the success or failure of the request.  boolean   operation Indicates this access is on Produce or Consume side  enum    SCOPES with K8S Prefix All metrics starting with K8S are derived from Kubernetes monitoring by Rover(eBPF agent).\nService, Service Instance and relations For all K8SService, K8SServiceInstance, K8SServiceRelation and K8SServiceInstanceRelation, they all have the following package/protocol level metric contents.\n   Name Remarks Group Key Type     type The metrics from log type, the following names should have the type prefix. The value may be connect, accept, close, write, read, protocol.  string   connect.duration Connect to other service use duration.  long(in nanoseconds)   connect.success The connect is success or not.  boolean   accept.duration Accept connection from client use duration.  long(in nanoseconds)   close.duration Close one connection use duration.  long(in nanoseconds)   close.success Close one connection is success or not.  boolean   write.duration Write data to the connection use duration.  long(in nanoseconds)   write.syscall Write data to the connection syscall name. The value should be Write, Writev, Send, SendTo, SendMsg, SendMmsg, SendFile, SendFile64.  string   write.l4.duration Write data to the connection use duration on Linux Layer 4.  long(in nanoseconds)   write.l4.transmitPackageCount Total package count on write data to the connection.  long   write.l4.retransmitPackageCount Total retransmit package count on write data to the connection.  long   write.l4.totalPackageSize Total transmit package size on write data to the connection.  long(bytes)   write.l3.duration Write data to the connection use duration on Linux Layer 3.  long(in nanoseconds)   write.l3.localDuration Write data to the connection use local duration on Linux Layer 3.  long(in nanoseconds)   write.l3.outputDuration Write data to the connection use output duration on Linux Layer 3.  long(in nanoseconds)   write.l3.resolveMACCount Total resolve remote MAC address count on write data to the connection.  long   write.l3.resolveMACDuration Total resolve remote MAC address use duration on write data to the connection.  long(in nanoseconds)   write.l3.netFilterCount Total do net filtering count on write data to the connection.  long   write.l3.netFilterDuration Total do net filtering use duration on write data to the connection.  long(in nanoseconds)   write.l2.duration Write data to the connection use duration on Linux L2.  long(nanoseconds)   write.l2.networkDeviceName The network device name on write data to the connection.  string   write.l2.enterQueueBufferCount The write package count to the network device queue on write data to the connection.  long   write.l2.readySendDuration Total ready send buffer duration on write data to the connection.  long(in nanoseconds)   write.l2.networkDeviceSendDuration Total network send buffer use duration on write data to the connection.  long(in nanoseconds)   read.duration Read data from the connection use duration.  long(in nanoseconds)   read.syscall Read data from the connection syscall name. The value should Read, Readv, Recv, RecvFrom, RecvMsg, RecvMmsg.  string   read.l4.duration Read data to the connection use duration on Linux Layer 4.  long(in nanoseconds)   read.l3.duration Read data to the connection use duration on Linux Layer 3.  long(in nanoseconds)   read.l3.rcvDuration Read data to the connection use receive duration on Linux Layer 3.  long(in nanoseconds)   read.l3.localDuration Read data to the connection use local duration on Linux Layer 3.  long(in nanoseconds)   read.l3.netFilterCount Total do net filtering count on read data from the connection.  long   read.l3.netFilterDuration Total do net filtering use duration on read data from the connection.  long(in nanoseconds)   read.l2.netDeviceName The network device name on read data from the connection.  string   read.l2.packageCount Total read package count on the connection.  long   read.l2.totalPackageSize Total read package size on the connection.  long(bytes)   read.l2.packageToQueueDuration Total read package to the queue duration on the connection.  long(in nanoseconds)   read.l2.rcvPackageFromQueueDuration Total read package from the queue duration on the connection.  long(in nanoseconds)   protocol.type The protocol type name, the following names should have the type prefix. The value should be HTTP.  string   protocol.success This protocol request and response is success or not.  boolean   protocol.http.latency The latency of HTTP response.  long(in nanoseconds)   protocol.http.url The url path of HTTP request.  string   protocol.http.method The method name of HTTP request.  string   protocol.http.statusCode The response code of HTTP response.  int   protocol.http.sizeOfRequestHeader The header size of HTTP request.  long(bytes)   protocol.http.sizeOfRequestBody The body size of HTTP request.  long(bytes)   protocol.http.sizeOfResponseHeader The header size of HTTP response.  long(bytes)   protocol.http.sizeOfResponseBody The body size of HTTP response.  long(bytes)    SCOPE K8SService    Name Remarks Group Key Type     name The service name in kubernetes.  string   layer The layer in kubernetes service.  string   detectPoint Where the relation is detected. The value may be client or server.  enum    SCOPE K8SServiceInstance    Name Remarks Group Key Type     serviceName The service name in kubernetes.  string   serviceInstanceName The pod name in kubernetes.  string   layer The layer of kubernetes service.  string   detectPoint Where the relation is detected. The value may be client or server.  enum    SCOPE K8SServiceRelation    Name Remarks Group Key Type     sourceServiceName The source service name in kubernetes.  string   sourceLayer The source layer service in kubernetes.  string   detectPoint Where the relation is detected. The value may be client or server.  enum   componentId The ID of component used in this call.  string   tlsMode The TLS mode of relation. The value may be Plain or TLS.  enum   destServiceName The dest service name in kubernetes.  string   destLayer The dest layer service in kubernetes.  string    SCOPE K8SServiceRelation    Name Remarks Group Key Type     sourceServiceName The source service name in kubernetes.  string   sourceLayer The source layer service in kubernetes.  string   detectPoint Where the relation is detected. The value may be client or server.  enum   componentId The ID of component used in this call.  string   tlsMode The TLS mode of relation. The value may be Plain or TLS.  enum   destServiceName The dest service name in kubernetes.  string   destLayer The dest layer service in kubernetes.  string    SCOPE K8SServiceInstanceRelation    Name Remarks Group Key Type     sourceServiceName The source service name in kubernetes.  string   sourceServiceInstanceName The source pod name in kubernetes.  string   sourceLayer The source layer service in kubernetes.  string   detectPoint Where the relation is detected. The value may be client or server.  enum   componentId The ID of component used in this call.  string   tlsMode The TLS mode of relation. The value may be Plain or TLS.  enum   destServiceName The dest service name in kubernetes.  string   destServiceInstanceName The dest pod name in kubernetes.  string   destLayer The dest layer service in kubernetes.  string    Endpoint and Endpoint Relation For K8SEndpoint and K8SEndpointRelation, they only have the following protocol level metric contents.\n   Name Remarks Group Key Type     protocol.type The protocol type name, the following names should have the type prefix. The value should be HTTP.  string   protocol.success This protocol request and response is success or not.  boolean   protocol.http.latency The latency of HTTP response.  long(in nanoseconds)   protocol.http.url The url path of HTTP request.  string   protocol.http.method The method name of HTTP request.  string   protocol.http.statusCode The response code of HTTP response.  int   protocol.http.sizeOfRequestHeader The header size of HTTP request.  long(bytes)   protocol.http.sizeOfRequestBody The body size of HTTP request.  long(bytes)   protocol.http.sizeOfResponseHeader The header size of HTTP response.  long(bytes)   protocol.http.sizeOfResponseBody The body size of HTTP response.  long(bytes)    SCOPE K8SEndpoint    Name Remarks Group Key Type     serviceName The service name in kubernetes.  string   layer The layer in kubernetes service.  string   endpointName The endpoint name detect in kubernetes service.  string   duration The duration of the service endpoint response latency.  long    SCOPE K8SEndpointRelation    Name Remarks Group Key Type     sourceServiceName The source service name in kubernetes.  string   sourceServiceName The layer in kubernetes source service.  string   sourceEndpointName The endpoint name detect in kubernetes source service.  string   detectPoint Where the relation is detected. The value may be client or server.  enum   componentId The ID of component used in this call.  string   destServiceName The dest service name in kubernetes.  string   destServiceName The layer in kubernetes dest service.  string   destEndpointName The endpoint name detect in kubernetes dest service.  string    ","title":"Scopes and Fields","url":"/docs/main/next/en/concepts-and-designs/scope-definitions/"},{"content":"Scopes and Fields Using the Aggregation Function, the requests will be grouped by time and Group Key(s) in each scope.\nSCOPE Service This calculates the metrics data from each request of the service.\n   Name Remarks Group Key Type     name The name of the service.  string   layer Layer represents an abstract framework in the computer science, such as operation system(OS_LINUX layer), Kubernetes(k8s layer)  enum   serviceInstanceName The name of the service instance ID.  string   endpointName The name of the endpoint, such as a full path of HTTP URI.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   responseCode Deprecated.The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request. Such as: Database, HTTP, RPC, gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   tcpInfo.receivedBytes The received bytes of the TCP traffic, if this request is a TCP call.  long   tcpInfo.sentBytes The sent bytes of the TCP traffic, if this request is a TCP call.  long    SCOPE ServiceInstance This calculates the metrics data from each request of the service instance.\n   Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   layer Layer represents an abstract framework in the computer science, such as operation system(OS_LINUX layer), Kubernetes(k8s layer)  enum   endpointName The name of the endpoint, such as a full path of the HTTP URI.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   responseCode Deprecated.The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   tcpInfo.receivedBytes The received bytes of the TCP traffic, if this request is a TCP call.  long   tcpInfo.sentBytes The sent bytes of the TCP traffic, if this request is a TCP call.  long    Secondary scopes of ServiceInstance This calculates the metrics data if the service instance is a JVM and collects through javaagent.\n SCOPE ServiceInstanceJVMCPU     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   usePercent The percentage of CPU time spent.  double    SCOPE ServiceInstanceJVMMemory     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   heapStatus Indicates whether the metric has a heap property or not.  bool   init See the JVM documentation.  long   max See the JVM documentation.  long   used See the JVM documentation.  long   committed See the JVM documentation.  long    SCOPE ServiceInstanceJVMMemoryPool     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   poolType The type may be CODE_CACHE_USAGE, NEWGEN_USAGE, OLDGEN_USAGE, SURVIVOR_USAGE, PERMGEN_USAGE, or METASPACE_USAGE based on different versions of JVM.  enum   init See the JVM documentation.  long   max See the JVM documentation.  long   used See the JVM documentation.  long   committed See the JVM documentation.  long    SCOPE ServiceInstanceJVMGC     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   phase Includes both NEW and OLD.  Enum   time The time spent in GC.  long   count The count in GC operations.  long    SCOPE ServiceInstanceJVMThread     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   liveCount The current number of live threads.  long   daemonCount The current number of daemon threads.  long   peakCount The current number of peak threads.  long   runnableStateThreadCount The current number of threads in runnable state.  long   blockedStateThreadCount The current number of threads in blocked state.  long   waitingStateThreadCount The current number of threads in waiting state.  long   timedWaitingStateThreadCount The current number of threads in time-waiting state.  long    SCOPE ServiceInstanceJVMClass     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   loadedClassCount The number of classes that are currently loaded in the JVM.  long   totalUnloadedClassCount The total number of classes unloaded since the JVM has started execution.  long   totalLoadedClassCount The total number of classes that have been loaded since the JVM has started execution.  long    SCOPE Endpoint This calculates the metrics data from each request of the endpoint in the service.\n   Name Remarks Group Key Type     name The name of the endpoint, such as a full path of the HTTP URI.  string   serviceName The name of the service.  string   serviceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  enum   serviceInstanceName The name of the service instance ID.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   responseCode Deprecated.The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   tcpInfo.receivedBytes The received bytes of the TCP traffic, if this request is a TCP call.  long   tcpInfo.sentBytes The sent bytes of the TCP traffic, if this request is a TCP call.  long    SCOPE ServiceRelation This calculates the metrics data from each request between services.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceLayer The layer of the source service.  enum   destServiceName The name of the destination service.  string   destServiceInstanceName The name of the destination service instance.  string   destLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   responseCode Deprecated.The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination services, such as service_relation_mtls_cpm = from(ServiceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   tcpInfo.receivedBytes The received bytes of the TCP traffic, if this request is a TCP call.  long   tcpInfo.sentBytes The sent bytes of the TCP traffic, if this request is a TCP call.  long    SCOPE ServiceInstanceRelation This calculates the metrics data from each request between service instances.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceServiceLayer The layer of the source service.  enum   destServiceName The name of the destination service.     destServiceInstanceName The name of the destination service instance.  string   destServiceLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of the component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   responseCode Deprecated.The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination service instances, such as service_instance_relation_mtls_cpm = from(ServiceInstanceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   tcpInfo.receivedBytes The received bytes of the TCP traffic, if this request is a TCP call.  long   tcpInfo.sentBytes The sent bytes of the TCP traffic, if this request is a TCP call.  long    SCOPE EndpointRelation This calculates the metrics data of the dependency between endpoints. This relation is hard to detect, and it depends on the tracing library to propagate the previous endpoint. Therefore, the EndpointRelation scope aggregation comes into effect only in services under tracing by SkyWalking native agents, including auto instrument agents (like Java and .NET), OpenCensus SkyWalking exporter implementation, or other tracing context propagation in SkyWalking specification.\n   Name Remarks Group Key Type     endpoint The parent endpoint in the dependency.  string   serviceName The name of the service.  string   serviceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  enum   childEndpoint The endpoint used by the parent endpoint in row(1).  string   childServiceName The endpoint used by the parent service in row(1).  string   childServiceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  string   childServiceInstanceName The endpoint used by the parent service instance in row(1).  string   rpcLatency The latency of the RPC between the parent endpoint and childEndpoint, excluding the latency caused by the parent endpoint itself.     componentId The ID of the component used in this call. yes string   status Indicates the success or failure of the request.  bool(true for success)   responseCode Deprecated.The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Indicates where the relation is detected. The value may be client, server, or proxy. yes enum    SCOPE BrowserAppTraffic This calculates the metrics data from each request of the browser application (browser only).\n   Name Remarks Group Key Type     name The browser application name of each request.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppSingleVersionTraffic This calculates the metrics data from each request of a single version in the browser application (browser only).\n   Name Remarks Group Key Type     name The single version name of each request.  string   serviceName The name of the browser application.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppPageTraffic This calculates the metrics data from each request of the page in the browser application (browser only).\n   Name Remarks Group Key Type     name The page name of each request.  string   serviceName The name of the browser application.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppPagePerf This calculates the metrics data from each request of the page in the browser application (browser only).\n   Name Remarks Group Key Type     name The page name of each request.  string   serviceName The name of the browser application.  string   redirectTime The time taken to redirect.  int(in ms)   dnsTime The DNS query time.  int(in ms)   ttfbTime Time to first byte.  int(in ms)   tcpTime TCP connection time.  int(in ms)   transTime Content transfer time.  int(in ms)   domAnalysisTime Dom parsing time.  int(in ms)   fptTime First paint time or blank screen time.  int(in ms)   domReadyTime Dom ready time.  int(in ms)   loadPageTime Page full load time.  int(in ms)   resTime Synchronous load resources in the page.  int(in ms)   sslTime Only valid for HTTPS.  int(in ms)   ttlTime Time to interact.  int(in ms)   firstPackTime First pack time.  int(in ms)   fmpTime First Meaningful Paint.  int(in ms)    SCOPE Event This calculates the metrics data from events.\n   Name Remarks Group Key Type     name The name of the event.  string   service The service name to which the event belongs to.  string   serviceInstance The service instance to which the event belongs to, if any.  string   endpoint The service endpoint to which the event belongs to, if any.  string   type The type of the event, Normal or Error.  string   message The message of the event.  string   parameters The parameters in the message, see parameters.  string    ","title":"Scopes and Fields","url":"/docs/main/v9.0.0/en/concepts-and-designs/scope-definitions/"},{"content":"Scopes and Fields Using the Aggregation Function, the requests will be grouped by time and Group Key(s) in each scope.\nSCOPE Service This calculates the metrics data from each request of the service.\n   Name Remarks Group Key Type     name The name of the service.  string   layer Layer represents an abstract framework in the computer science, such as operation system(OS_LINUX layer), Kubernetes(k8s layer)  enum   serviceInstanceName The name of the service instance ID.  string   endpointName The name of the endpoint, such as a full path of HTTP URI.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   responseCode Deprecated.The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request. Such as: Database, HTTP, RPC, gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   tcpInfo.receivedBytes The received bytes of the TCP traffic, if this request is a TCP call.  long   tcpInfo.sentBytes The sent bytes of the TCP traffic, if this request is a TCP call.  long    SCOPE ServiceInstance This calculates the metrics data from each request of the service instance.\n   Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   endpointName The name of the endpoint, such as a full path of the HTTP URI.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   responseCode Deprecated.The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   tcpInfo.receivedBytes The received bytes of the TCP traffic, if this request is a TCP call.  long   tcpInfo.sentBytes The sent bytes of the TCP traffic, if this request is a TCP call.  long    Secondary scopes of ServiceInstance This calculates the metrics data if the service instance is a JVM and collects through javaagent.\n SCOPE ServiceInstanceJVMCPU     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   usePercent The percentage of CPU time spent.  double    SCOPE ServiceInstanceJVMMemory     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   heapStatus Indicates whether the metric has a heap property or not.  bool   init See the JVM documentation.  long   max See the JVM documentation.  long   used See the JVM documentation.  long   committed See the JVM documentation.  long    SCOPE ServiceInstanceJVMMemoryPool     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   poolType The type may be CODE_CACHE_USAGE, NEWGEN_USAGE, OLDGEN_USAGE, SURVIVOR_USAGE, PERMGEN_USAGE, or METASPACE_USAGE based on different versions of JVM.  enum   init See the JVM documentation.  long   max See the JVM documentation.  long   used See the JVM documentation.  long   committed See the JVM documentation.  long    SCOPE ServiceInstanceJVMGC     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   phase Includes both NEW and OLD.  Enum   time The time spent in GC.  long   count The count in GC operations.  long    SCOPE ServiceInstanceJVMThread     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   liveCount The current number of live threads.  long   daemonCount The current number of daemon threads.  long   peakCount The current number of peak threads.  long   runnableStateThreadCount The current number of threads in runnable state.  long   blockedStateThreadCount The current number of threads in blocked state.  long   waitingStateThreadCount The current number of threads in waiting state.  long   timedWaitingStateThreadCount The current number of threads in time-waiting state.  long    SCOPE ServiceInstanceJVMClass     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   loadedClassCount The number of classes that are currently loaded in the JVM.  long   totalUnloadedClassCount The total number of classes unloaded since the JVM has started execution.  long   totalLoadedClassCount The total number of classes that have been loaded since the JVM has started execution.  long    SCOPE Endpoint This calculates the metrics data from each request of the endpoint in the service.\n   Name Remarks Group Key Type     name The name of the endpoint, such as a full path of the HTTP URI.  string   serviceName The name of the service.  string   serviceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  enum   serviceInstanceName The name of the service instance ID.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   responseCode Deprecated.The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   tcpInfo.receivedBytes The received bytes of the TCP traffic, if this request is a TCP call.  long   tcpInfo.sentBytes The sent bytes of the TCP traffic, if this request is a TCP call.  long    SCOPE ServiceRelation This calculates the metrics data from each request between services.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceLayer The layer of the source service.  enum   destServiceName The name of the destination service.  string   destServiceInstanceName The name of the destination service instance.  string   destLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   responseCode Deprecated.The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination services, such as service_relation_mtls_cpm = from(ServiceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   tcpInfo.receivedBytes The received bytes of the TCP traffic, if this request is a TCP call.  long   tcpInfo.sentBytes The sent bytes of the TCP traffic, if this request is a TCP call.  long    SCOPE ServiceInstanceRelation This calculates the metrics data from each request between service instances.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceServiceLayer The layer of the source service.  enum   destServiceName The name of the destination service.     destServiceInstanceName The name of the destination service instance.  string   destServiceLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of the component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   responseCode Deprecated.The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination service instances, such as service_instance_relation_mtls_cpm = from(ServiceInstanceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   tcpInfo.receivedBytes The received bytes of the TCP traffic, if this request is a TCP call.  long   tcpInfo.sentBytes The sent bytes of the TCP traffic, if this request is a TCP call.  long    SCOPE EndpointRelation This calculates the metrics data of the dependency between endpoints. This relation is hard to detect, and it depends on the tracing library to propagate the previous endpoint. Therefore, the EndpointRelation scope aggregation comes into effect only in services under tracing by SkyWalking native agents, including auto instrument agents (like Java and .NET), OpenCensus SkyWalking exporter implementation, or other tracing context propagation in SkyWalking specification.\n   Name Remarks Group Key Type     endpoint The parent endpoint in the dependency.  string   serviceName The name of the service.  string   serviceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  enum   childEndpoint The endpoint used by the parent endpoint in row(1).  string   childServiceName The endpoint used by the parent service in row(1).  string   childServiceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  string   childServiceInstanceName The endpoint used by the parent service instance in row(1).  string   rpcLatency The latency of the RPC between the parent endpoint and childEndpoint, excluding the latency caused by the parent endpoint itself.     componentId The ID of the component used in this call. yes string   status Indicates the success or failure of the request.  bool(true for success)   responseCode Deprecated.The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Indicates where the relation is detected. The value may be client, server, or proxy. yes enum    SCOPE BrowserAppTraffic This calculates the metrics data from each request of the browser application (browser only).\n   Name Remarks Group Key Type     name The browser application name of each request.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppSingleVersionTraffic This calculates the metrics data from each request of a single version in the browser application (browser only).\n   Name Remarks Group Key Type     name The single version name of each request.  string   serviceName The name of the browser application.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppPageTraffic This calculates the metrics data from each request of the page in the browser application (browser only).\n   Name Remarks Group Key Type     name The page name of each request.  string   serviceName The name of the browser application.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppPagePerf This calculates the metrics data from each request of the page in the browser application (browser only).\n   Name Remarks Group Key Type     name The page name of each request.  string   serviceName The name of the browser application.  string   redirectTime The time taken to redirect.  int(in ms)   dnsTime The DNS query time.  int(in ms)   ttfbTime Time to first byte.  int(in ms)   tcpTime TCP connection time.  int(in ms)   transTime Content transfer time.  int(in ms)   domAnalysisTime Dom parsing time.  int(in ms)   fptTime First paint time or blank screen time.  int(in ms)   domReadyTime Dom ready time.  int(in ms)   loadPageTime Page full load time.  int(in ms)   resTime Synchronous load resources in the page.  int(in ms)   sslTime Only valid for HTTPS.  int(in ms)   ttlTime Time to interact.  int(in ms)   firstPackTime First pack time.  int(in ms)   fmpTime First Meaningful Paint.  int(in ms)    SCOPE Event This calculates the metrics data from events.\n   Name Remarks Group Key Type     name The name of the event.  string   service The service name to which the event belongs to.  string   serviceInstance The service instance to which the event belongs to, if any.  string   endpoint The service endpoint to which the event belongs to, if any.  string   type The type of the event, Normal or Error.  string   message The message of the event.  string   parameters The parameters in the message, see parameters.  string    ","title":"Scopes and Fields","url":"/docs/main/v9.1.0/en/concepts-and-designs/scope-definitions/"},{"content":"Scopes and Fields Using the Aggregation Function, the requests will be grouped by time and Group Key(s) in each scope.\nSCOPE Service This calculates the metrics data from each request of the service.\n   Name Remarks Group Key Type     name The name of the service.  string   layer Layer represents an abstract framework in the computer science, such as operation system(OS_LINUX layer), Kubernetes(k8s layer)  enum   serviceInstanceName The name of the service instance ID.  string   endpointName The name of the endpoint, such as a full path of HTTP URI.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   responseCode Deprecated.The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request. Such as: Database, HTTP, RPC, gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   tcpInfo.receivedBytes The received bytes of the TCP traffic, if this request is a TCP call.  long   tcpInfo.sentBytes The sent bytes of the TCP traffic, if this request is a TCP call.  long    SCOPE ServiceInstance This calculates the metrics data from each request of the service instance.\n   Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   endpointName The name of the endpoint, such as a full path of the HTTP URI.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   responseCode Deprecated.The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   tcpInfo.receivedBytes The received bytes of the TCP traffic, if this request is a TCP call.  long   tcpInfo.sentBytes The sent bytes of the TCP traffic, if this request is a TCP call.  long    Secondary scopes of ServiceInstance This calculates the metrics data if the service instance is a JVM and collects through javaagent.\n SCOPE ServiceInstanceJVMCPU     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   usePercent The percentage of CPU time spent.  double    SCOPE ServiceInstanceJVMMemory     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   heapStatus Indicates whether the metric has a heap property or not.  bool   init See the JVM documentation.  long   max See the JVM documentation.  long   used See the JVM documentation.  long   committed See the JVM documentation.  long    SCOPE ServiceInstanceJVMMemoryPool     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   poolType The type may be CODE_CACHE_USAGE, NEWGEN_USAGE, OLDGEN_USAGE, SURVIVOR_USAGE, PERMGEN_USAGE, or METASPACE_USAGE based on different versions of JVM.  enum   init See the JVM documentation.  long   max See the JVM documentation.  long   used See the JVM documentation.  long   committed See the JVM documentation.  long    SCOPE ServiceInstanceJVMGC     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   phase Includes both NEW and OLD.  Enum   time The time spent in GC.  long   count The count in GC operations.  long    SCOPE ServiceInstanceJVMThread     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   liveCount The current number of live threads.  long   daemonCount The current number of daemon threads.  long   peakCount The current number of peak threads.  long   runnableStateThreadCount The current number of threads in runnable state.  long   blockedStateThreadCount The current number of threads in blocked state.  long   waitingStateThreadCount The current number of threads in waiting state.  long   timedWaitingStateThreadCount The current number of threads in time-waiting state.  long    SCOPE ServiceInstanceJVMClass     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   loadedClassCount The number of classes that are currently loaded in the JVM.  long   totalUnloadedClassCount The total number of classes unloaded since the JVM has started execution.  long   totalLoadedClassCount The total number of classes that have been loaded since the JVM has started execution.  long    SCOPE Endpoint This calculates the metrics data from each request of the endpoint in the service.\n   Name Remarks Group Key Type     name The name of the endpoint, such as a full path of the HTTP URI.  string   serviceName The name of the service.  string   serviceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  enum   serviceInstanceName The name of the service instance ID.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   responseCode Deprecated.The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   tcpInfo.receivedBytes The received bytes of the TCP traffic, if this request is a TCP call.  long   tcpInfo.sentBytes The sent bytes of the TCP traffic, if this request is a TCP call.  long    SCOPE ServiceRelation This calculates the metrics data from each request between services.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceLayer The layer of the source service.  enum   destServiceName The name of the destination service.  string   destServiceInstanceName The name of the destination service instance.  string   destLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   responseCode Deprecated.The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination services, such as service_relation_mtls_cpm = from(ServiceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   tcpInfo.receivedBytes The received bytes of the TCP traffic, if this request is a TCP call.  long   tcpInfo.sentBytes The sent bytes of the TCP traffic, if this request is a TCP call.  long    SCOPE ServiceInstanceRelation This calculates the metrics data from each request between service instances.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceServiceLayer The layer of the source service.  enum   destServiceName The name of the destination service.     destServiceInstanceName The name of the destination service instance.  string   destServiceLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of the component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   responseCode Deprecated.The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination service instances, such as service_instance_relation_mtls_cpm = from(ServiceInstanceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   tcpInfo.receivedBytes The received bytes of the TCP traffic, if this request is a TCP call.  long   tcpInfo.sentBytes The sent bytes of the TCP traffic, if this request is a TCP call.  long    SCOPE EndpointRelation This calculates the metrics data of the dependency between endpoints. This relation is hard to detect, and it depends on the tracing library to propagate the previous endpoint. Therefore, the EndpointRelation scope aggregation comes into effect only in services under tracing by SkyWalking native agents, including auto instrument agents (like Java and .NET), OpenCensus SkyWalking exporter implementation, or other tracing context propagation in SkyWalking specification.\n   Name Remarks Group Key Type     endpoint The parent endpoint in the dependency.  string   serviceName The name of the service.  string   serviceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  enum   childEndpoint The endpoint used by the parent endpoint in row(1).  string   childServiceName The endpoint used by the parent service in row(1).  string   childServiceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  string   childServiceInstanceName The endpoint used by the parent service instance in row(1).  string   rpcLatency The latency of the RPC between the parent endpoint and childEndpoint, excluding the latency caused by the parent endpoint itself.     componentId The ID of the component used in this call. yes string   status Indicates the success or failure of the request.  bool(true for success)   responseCode Deprecated.The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Indicates where the relation is detected. The value may be client, server, or proxy. yes enum    SCOPE BrowserAppTraffic This calculates the metrics data from each request of the browser application (browser only).\n   Name Remarks Group Key Type     name The browser application name of each request.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppSingleVersionTraffic This calculates the metrics data from each request of a single version in the browser application (browser only).\n   Name Remarks Group Key Type     name The single version name of each request.  string   serviceName The name of the browser application.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppPageTraffic This calculates the metrics data from each request of the page in the browser application (browser only).\n   Name Remarks Group Key Type     name The page name of each request.  string   serviceName The name of the browser application.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppPagePerf This calculates the metrics data from each request of the page in the browser application (browser only).\n   Name Remarks Group Key Type     name The page name of each request.  string   serviceName The name of the browser application.  string   redirectTime The time taken to redirect.  int(in ms)   dnsTime The DNS query time.  int(in ms)   ttfbTime Time to first byte.  int(in ms)   tcpTime TCP connection time.  int(in ms)   transTime Content transfer time.  int(in ms)   domAnalysisTime Dom parsing time.  int(in ms)   fptTime First paint time or blank screen time.  int(in ms)   domReadyTime Dom ready time.  int(in ms)   loadPageTime Page full load time.  int(in ms)   resTime Synchronous load resources in the page.  int(in ms)   sslTime Only valid for HTTPS.  int(in ms)   ttlTime Time to interact.  int(in ms)   firstPackTime First pack time.  int(in ms)   fmpTime First Meaningful Paint.  int(in ms)    SCOPE Event This calculates the metrics data from events.\n   Name Remarks Group Key Type     name The name of the event.  string   service The service name to which the event belongs to.  string   serviceInstance The service instance to which the event belongs to, if any.  string   endpoint The service endpoint to which the event belongs to, if any.  string   type The type of the event, Normal or Error.  string   message The message of the event.  string   parameters The parameters in the message, see parameters.  string    ","title":"Scopes and Fields","url":"/docs/main/v9.2.0/en/concepts-and-designs/scope-definitions/"},{"content":"Scopes and Fields Using the Aggregation Function, the requests will be grouped by time and Group Key(s) in each scope.\nSCOPE Service This calculates the metrics data from each request of the service.\n   Name Remarks Group Key Type     name The name of the service.  string   layer Layer represents an abstract framework in the computer science, such as operation system(OS_LINUX layer), Kubernetes(k8s layer)  enum   serviceInstanceName The name of the service instance ID.  string   endpointName The name of the endpoint, such as a full path of HTTP URI.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request. Such as: Database, HTTP, RPC, gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPService This calculates the metrics data from each request of the TCP service.\n   Name Remarks Group Key Type     name The name of the service.  string   layer Layer represents an abstract framework in the computer science, such as operation system(OS_LINUX layer), Kubernetes(k8s layer)  enum   serviceInstanceName The name of the service instance ID.  string   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    SCOPE ServiceInstance This calculates the metrics data from each request of the service instance.\n   Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   endpointName The name of the endpoint, such as a full path of the HTTP URI.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPServiceInstance This calculates the metrics data from each request of the service instance.\n   Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    Secondary scopes of ServiceInstance This calculates the metrics data if the service instance is a JVM and collects through javaagent.\n SCOPE ServiceInstanceJVMCPU     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   usePercent The percentage of CPU time spent.  double    SCOPE ServiceInstanceJVMMemory     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   heapStatus Indicates whether the metric has a heap property or not.  bool   init See the JVM documentation.  long   max See the JVM documentation.  long   used See the JVM documentation.  long   committed See the JVM documentation.  long    SCOPE ServiceInstanceJVMMemoryPool     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   poolType The type may be CODE_CACHE_USAGE, NEWGEN_USAGE, OLDGEN_USAGE, SURVIVOR_USAGE, PERMGEN_USAGE, or METASPACE_USAGE based on different versions of JVM.  enum   init See the JVM documentation.  long   max See the JVM documentation.  long   used See the JVM documentation.  long   committed See the JVM documentation.  long    SCOPE ServiceInstanceJVMGC     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   phase Includes both NEW and OLD.  Enum   time The time spent in GC.  long   count The count in GC operations.  long    SCOPE ServiceInstanceJVMThread     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   liveCount The current number of live threads.  long   daemonCount The current number of daemon threads.  long   peakCount The current number of peak threads.  long   runnableStateThreadCount The current number of threads in runnable state.  long   blockedStateThreadCount The current number of threads in blocked state.  long   waitingStateThreadCount The current number of threads in waiting state.  long   timedWaitingStateThreadCount The current number of threads in time-waiting state.  long    SCOPE ServiceInstanceJVMClass     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   loadedClassCount The number of classes that are currently loaded in the JVM.  long   totalUnloadedClassCount The total number of classes unloaded since the JVM has started execution.  long   totalLoadedClassCount The total number of classes that have been loaded since the JVM has started execution.  long    SCOPE Endpoint This calculates the metrics data from each request of the endpoint in the service.\n   Name Remarks Group Key Type     name The name of the endpoint, such as a full path of the HTTP URI.  string   serviceName The name of the service.  string   serviceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  enum   serviceInstanceName The name of the service instance ID.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE ServiceRelation This calculates the metrics data from each request between services.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceLayer The layer of the source service.  enum   destServiceName The name of the destination service.  string   destServiceInstanceName The name of the destination service instance.  string   destLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination services, such as service_relation_mtls_cpm = from(ServiceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPServiceRelation This calculates the metrics data from each request between services.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceLayer The layer of the source service.  enum   destServiceName The name of the destination service.  string   destServiceInstanceName The name of the destination service instance.  string   destLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination services, such as service_relation_mtls_cpm = from(ServiceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    SCOPE ServiceInstanceRelation This calculates the metrics data from each request between service instances.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceServiceLayer The layer of the source service.  enum   destServiceName The name of the destination service.     destServiceInstanceName The name of the destination service instance.  string   destServiceLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of the component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination service instances, such as service_instance_relation_mtls_cpm = from(ServiceInstanceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPServiceInstanceRelation This calculates the metrics data from each request between service instances.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceServiceLayer The layer of the source service.  enum   destServiceName The name of the destination service.     destServiceInstanceName The name of the destination service instance.  string   destServiceLayer The layer of the destination service.  enum   componentId The ID of the component used in this call. yes string   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination service instances, such as service_instance_relation_mtls_cpm = from(ServiceInstanceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    SCOPE EndpointRelation This calculates the metrics data of the dependency between endpoints. This relation is hard to detect, and it depends on the tracing library to propagate the previous endpoint. Therefore, the EndpointRelation scope aggregation comes into effect only in services under tracing by SkyWalking native agents, including auto instrument agents (like Java and .NET), OpenCensus SkyWalking exporter implementation, or other tracing context propagation in SkyWalking specification.\n   Name Remarks Group Key Type     endpoint The parent endpoint in the dependency.  string   serviceName The name of the service.  string   serviceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  enum   childEndpoint The endpoint used by the parent endpoint in row(1).  string   childServiceName The endpoint used by the parent service in row(1).  string   childServiceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  string   childServiceInstanceName The endpoint used by the parent service instance in row(1).  string   rpcLatency The latency of the RPC between the parent endpoint and childEndpoint, excluding the latency caused by the parent endpoint itself.     componentId The ID of the component used in this call. yes string   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Indicates where the relation is detected. The value may be client, server, or proxy. yes enum    SCOPE BrowserAppTraffic This calculates the metrics data from each request of the browser application (browser only).\n   Name Remarks Group Key Type     name The browser application name of each request.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppSingleVersionTraffic This calculates the metrics data from each request of a single version in the browser application (browser only).\n   Name Remarks Group Key Type     name The single version name of each request.  string   serviceName The name of the browser application.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppPageTraffic This calculates the metrics data from each request of the page in the browser application (browser only).\n   Name Remarks Group Key Type     name The page name of each request.  string   serviceName The name of the browser application.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppPagePerf This calculates the metrics data from each request of the page in the browser application (browser only).\n   Name Remarks Group Key Type     name The page name of each request.  string   serviceName The name of the browser application.  string   redirectTime The time taken to redirect.  int(in ms)   dnsTime The DNS query time.  int(in ms)   ttfbTime Time to first byte.  int(in ms)   tcpTime TCP connection time.  int(in ms)   transTime Content transfer time.  int(in ms)   domAnalysisTime Dom parsing time.  int(in ms)   fptTime First paint time or blank screen time.  int(in ms)   domReadyTime Dom ready time.  int(in ms)   loadPageTime Page full load time.  int(in ms)   resTime Synchronous load resources in the page.  int(in ms)   sslTime Only valid for HTTPS.  int(in ms)   ttlTime Time to interact.  int(in ms)   firstPackTime First pack time.  int(in ms)   fmpTime First Meaningful Paint.  int(in ms)    SCOPE Event This calculates the metrics data from events.\n   Name Remarks Group Key Type     name The name of the event.  string   service The service name to which the event belongs to.  string   serviceInstance The service instance to which the event belongs to, if any.  string   endpoint The service endpoint to which the event belongs to, if any.  string   type The type of the event, Normal or Error.  string   message The message of the event.  string   parameters The parameters in the message, see parameters.  string    SCOPE DatabaseAccess This calculates the metrics data from each request of database.\n   Name Remarks Group Key Type     name The service name of virtual database service.  string   databaseTypeId The ID of the component used in this call.  int   latency The time taken by each request.  int(in ms)   status Indicates the success or failure of the request.  boolean    SCOPE DatabaseSlowStatement This calculates the metrics data from slow request of database.\n   Name Remarks Group Key Type     databaseServiceId The service id of virtual cache service.  string   statement The sql statement .  string   latency The time taken by each request.  int(in ms)   traceId The traceId of this slow statement  string    SCOPE CacheAccess This calculates the metrics data from each request of cache system.\n   Name Remarks Group Key Type     name The service name of virtual cache service.  string   cacheTypeId The ID of the component used in this call.  int   latency The time taken by each request.  int(in ms)   status Indicates the success or failure of the request.  boolean   operation Indicates this access is used for write or read  string    SCOPE CacheSlowAccess This calculates the metrics data from slow request of cache system , which is used for write or read operation.\n   Name Remarks Group Key Type     cacheServiceId The service id of virtual cache service.  string   command The cache command .  string   key The cache command key.  string   latency The time taken by each request.  int(in ms)   traceId The traceId of this slow access  string   status Indicates the success or failure of the request.  boolean   operation Indicates this access is used for write or read  string    SCOPE MQAccess This calculates the service dimensional metrics data from each request of MQ system on consume/produce side\n   Name Remarks Group Key Type     name The service name , usually it\u0026rsquo;s MQ address(es)  string   transmissionLatency The latency from produce side to consume side .  int(in ms)   status Indicates the success or failure of the request.  boolean   operation Indicates this access is on Produce or Consume side  enum    SCOPE MQEndpointAccess This calculates the endpoint dimensional metrics data from each request of MQ system on consume/produce side\n   Name Remarks Group Key Type     serviceName The service name that this endpoint belongs to.  string   endpoint The endpoint name , usually it\u0026rsquo;s combined by queue,topic  string   transmissionLatency The latency from produce side to consume side .  int(in ms)   status Indicates the success or failure of the request.  boolean   operation Indicates this access is on Produce or Consume side  enum    ","title":"Scopes and Fields","url":"/docs/main/v9.3.0/en/concepts-and-designs/scope-definitions/"},{"content":"Scopes and Fields Using the Aggregation Function, the requests will be grouped by time and Group Key(s) in each scope.\nSCOPE Service This calculates the metrics data from each request of the service.\n   Name Remarks Group Key Type     name The name of the service.  string   layer Layer represents an abstract framework in the computer science, such as operation system(OS_LINUX layer), Kubernetes(k8s layer)  enum   serviceInstanceName The name of the service instance ID.  string   endpointName The name of the endpoint, such as a full path of HTTP URI.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request. Such as: Database, HTTP, RPC, gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPService This calculates the metrics data from each request of the TCP service.\n   Name Remarks Group Key Type     name The name of the service.  string   layer Layer represents an abstract framework in the computer science, such as operation system(OS_LINUX layer), Kubernetes(k8s layer)  enum   serviceInstanceName The name of the service instance ID.  string   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    SCOPE ServiceInstance This calculates the metrics data from each request of the service instance.\n   Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   endpointName The name of the endpoint, such as a full path of the HTTP URI.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPServiceInstance This calculates the metrics data from each request of the service instance.\n   Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    Secondary scopes of ServiceInstance This calculates the metrics data if the service instance is a JVM and collects through javaagent.\n SCOPE ServiceInstanceJVMCPU     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   usePercent The percentage of CPU time spent.  double    SCOPE ServiceInstanceJVMMemory     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   heapStatus Indicates whether the metric has a heap property or not.  bool   init See the JVM documentation.  long   max See the JVM documentation.  long   used See the JVM documentation.  long   committed See the JVM documentation.  long    SCOPE ServiceInstanceJVMMemoryPool     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   poolType The type may be CODE_CACHE_USAGE, NEWGEN_USAGE, OLDGEN_USAGE, SURVIVOR_USAGE, PERMGEN_USAGE, or METASPACE_USAGE based on different versions of JVM.  enum   init See the JVM documentation.  long   max See the JVM documentation.  long   used See the JVM documentation.  long   committed See the JVM documentation.  long    SCOPE ServiceInstanceJVMGC     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   phase Includes both NEW and OLD.  Enum   time The time spent in GC.  long   count The count in GC operations.  long    SCOPE ServiceInstanceJVMThread     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   liveCount The current number of live threads.  long   daemonCount The current number of daemon threads.  long   peakCount The current number of peak threads.  long   runnableStateThreadCount The current number of threads in runnable state.  long   blockedStateThreadCount The current number of threads in blocked state.  long   waitingStateThreadCount The current number of threads in waiting state.  long   timedWaitingStateThreadCount The current number of threads in time-waiting state.  long    SCOPE ServiceInstanceJVMClass     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   loadedClassCount The number of classes that are currently loaded in the JVM.  long   totalUnloadedClassCount The total number of classes unloaded since the JVM has started execution.  long   totalLoadedClassCount The total number of classes that have been loaded since the JVM has started execution.  long    SCOPE Endpoint This calculates the metrics data from each request of the endpoint in the service.\n   Name Remarks Group Key Type     name The name of the endpoint, such as a full path of the HTTP URI.  string   serviceName The name of the service.  string   serviceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  enum   serviceInstanceName The name of the service instance ID.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE ServiceRelation This calculates the metrics data from each request between services.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceLayer The layer of the source service.  enum   destServiceName The name of the destination service.  string   destServiceInstanceName The name of the destination service instance.  string   destLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination services, such as service_relation_mtls_cpm = from(ServiceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPServiceRelation This calculates the metrics data from each request between services.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceLayer The layer of the source service.  enum   destServiceName The name of the destination service.  string   destServiceInstanceName The name of the destination service instance.  string   destLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination services, such as service_relation_mtls_cpm = from(ServiceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    SCOPE ServiceInstanceRelation This calculates the metrics data from each request between service instances.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceServiceLayer The layer of the source service.  enum   destServiceName The name of the destination service.     destServiceInstanceName The name of the destination service instance.  string   destServiceLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of the component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination service instances, such as service_instance_relation_mtls_cpm = from(ServiceInstanceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPServiceInstanceRelation This calculates the metrics data from each request between service instances.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceServiceLayer The layer of the source service.  enum   destServiceName The name of the destination service.     destServiceInstanceName The name of the destination service instance.  string   destServiceLayer The layer of the destination service.  enum   componentId The ID of the component used in this call. yes string   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination service instances, such as service_instance_relation_mtls_cpm = from(ServiceInstanceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    SCOPE EndpointRelation This calculates the metrics data of the dependency between endpoints. This relation is hard to detect, and it depends on the tracing library to propagate the previous endpoint. Therefore, the EndpointRelation scope aggregation comes into effect only in services under tracing by SkyWalking native agents, including auto instrument agents (like Java and .NET), OpenCensus SkyWalking exporter implementation, or other tracing context propagation in SkyWalking specification.\n   Name Remarks Group Key Type     endpoint The parent endpoint in the dependency.  string   serviceName The name of the service.  string   serviceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  enum   childEndpoint The endpoint used by the parent endpoint in row(1).  string   childServiceName The endpoint used by the parent service in row(1).  string   childServiceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  string   childServiceInstanceName The endpoint used by the parent service instance in row(1).  string   rpcLatency The latency of the RPC between the parent endpoint and childEndpoint, excluding the latency caused by the parent endpoint itself.     componentId The ID of the component used in this call. yes string   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Indicates where the relation is detected. The value may be client, server, or proxy. yes enum    SCOPE BrowserAppTraffic This calculates the metrics data from each request of the browser application (browser only).\n   Name Remarks Group Key Type     name The browser application name of each request.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppSingleVersionTraffic This calculates the metrics data from each request of a single version in the browser application (browser only).\n   Name Remarks Group Key Type     name The single version name of each request.  string   serviceName The name of the browser application.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppPageTraffic This calculates the metrics data from each request of the page in the browser application (browser only).\n   Name Remarks Group Key Type     name The page name of each request.  string   serviceName The name of the browser application.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppPagePerf This calculates the metrics data from each request of the page in the browser application (browser only).\n   Name Remarks Group Key Type     name The page name of each request.  string   serviceName The name of the browser application.  string   redirectTime The time taken to redirect.  int(in ms)   dnsTime The DNS query time.  int(in ms)   ttfbTime Time to first byte.  int(in ms)   tcpTime TCP connection time.  int(in ms)   transTime Content transfer time.  int(in ms)   domAnalysisTime Dom parsing time.  int(in ms)   fptTime First paint time or blank screen time.  int(in ms)   domReadyTime Dom ready time.  int(in ms)   loadPageTime Page full load time.  int(in ms)   resTime Synchronous load resources in the page.  int(in ms)   sslTime Only valid for HTTPS.  int(in ms)   ttlTime Time to interact.  int(in ms)   firstPackTime First pack time.  int(in ms)   fmpTime First Meaningful Paint.  int(in ms)    SCOPE Event This calculates the metrics data from events.\n   Name Remarks Group Key Type     name The name of the event.  string   service The service name to which the event belongs to.  string   serviceInstance The service instance to which the event belongs to, if any.  string   endpoint The service endpoint to which the event belongs to, if any.  string   type The type of the event, Normal or Error.  string   message The message of the event.  string   parameters The parameters in the message, see parameters.  string    SCOPE DatabaseAccess This calculates the metrics data from each request of database.\n   Name Remarks Group Key Type     name The service name of virtual database service.  string   databaseTypeId The ID of the component used in this call.  int   latency The time taken by each request.  int(in ms)   status Indicates the success or failure of the request.  boolean    SCOPE DatabaseSlowStatement This calculates the metrics data from slow request of database.\n   Name Remarks Group Key Type     databaseServiceId The service id of virtual cache service.  string   statement The sql statement .  string   latency The time taken by each request.  int(in ms)   traceId The traceId of this slow statement  string    SCOPE CacheAccess This calculates the metrics data from each request of cache system.\n   Name Remarks Group Key Type     name The service name of virtual cache service.  string   cacheTypeId The ID of the component used in this call.  int   latency The time taken by each request.  int(in ms)   status Indicates the success or failure of the request.  boolean   operation Indicates this access is used for write or read  string    SCOPE CacheSlowAccess This calculates the metrics data from slow request of cache system , which is used for write or read operation.\n   Name Remarks Group Key Type     cacheServiceId The service id of virtual cache service.  string   command The cache command .  string   key The cache command key.  string   latency The time taken by each request.  int(in ms)   traceId The traceId of this slow access  string   status Indicates the success or failure of the request.  boolean   operation Indicates this access is used for write or read  string    SCOPE MQAccess This calculates the service dimensional metrics data from each request of MQ system on consume/produce side\n   Name Remarks Group Key Type     name The service name , usually it\u0026rsquo;s MQ address(es)  string   transmissionLatency The latency from produce side to consume side .  int(in ms)   status Indicates the success or failure of the request.  boolean   operation Indicates this access is on Produce or Consume side  enum    SCOPE MQEndpointAccess This calculates the endpoint dimensional metrics data from each request of MQ system on consume/produce side\n   Name Remarks Group Key Type     serviceName The service name that this endpoint belongs to.  string   endpoint The endpoint name , usually it\u0026rsquo;s combined by queue,topic  string   transmissionLatency The latency from produce side to consume side .  int(in ms)   status Indicates the success or failure of the request.  boolean   operation Indicates this access is on Produce or Consume side  enum    ","title":"Scopes and Fields","url":"/docs/main/v9.4.0/en/concepts-and-designs/scope-definitions/"},{"content":"Scopes and Fields Using the Aggregation Function, the requests will be grouped by time and Group Key(s) in each scope.\nSCOPE Service This calculates the metrics data from each request of the service.\n   Name Remarks Group Key Type     name The name of the service.  string   layer Layer represents an abstract framework in the computer science, such as operation system(OS_LINUX layer), Kubernetes(k8s layer)  enum   serviceInstanceName The name of the service instance ID.  string   endpointName The name of the endpoint, such as a full path of HTTP URI.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request. Such as: Database, HTTP, RPC, gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPService This calculates the metrics data from each request of the TCP service.\n   Name Remarks Group Key Type     name The name of the service.  string   layer Layer represents an abstract framework in the computer science, such as operation system(OS_LINUX layer), Kubernetes(k8s layer)  enum   serviceInstanceName The name of the service instance ID.  string   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    SCOPE ServiceInstance This calculates the metrics data from each request of the service instance.\n   Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   endpointName The name of the endpoint, such as a full path of the HTTP URI.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPServiceInstance This calculates the metrics data from each request of the service instance.\n   Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    Secondary scopes of ServiceInstance This calculates the metrics data if the service instance is a JVM and collects through javaagent.\n SCOPE ServiceInstanceJVMCPU     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   usePercent The percentage of CPU time spent.  double    SCOPE ServiceInstanceJVMMemory     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   heapStatus Indicates whether the metric has a heap property or not.  bool   init See the JVM documentation.  long   max See the JVM documentation.  long   used See the JVM documentation.  long   committed See the JVM documentation.  long    SCOPE ServiceInstanceJVMMemoryPool     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   poolType The type may be CODE_CACHE_USAGE, NEWGEN_USAGE, OLDGEN_USAGE, SURVIVOR_USAGE, PERMGEN_USAGE, or METASPACE_USAGE based on different versions of JVM.  enum   init See the JVM documentation.  long   max See the JVM documentation.  long   used See the JVM documentation.  long   committed See the JVM documentation.  long    SCOPE ServiceInstanceJVMGC     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   phase Includes both NEW and OLD.  Enum   time The time spent in GC.  long   count The count in GC operations.  long    SCOPE ServiceInstanceJVMThread     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   liveCount The current number of live threads.  long   daemonCount The current number of daemon threads.  long   peakCount The current number of peak threads.  long   runnableStateThreadCount The current number of threads in runnable state.  long   blockedStateThreadCount The current number of threads in blocked state.  long   waitingStateThreadCount The current number of threads in waiting state.  long   timedWaitingStateThreadCount The current number of threads in time-waiting state.  long    SCOPE ServiceInstanceJVMClass     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   loadedClassCount The number of classes that are currently loaded in the JVM.  long   totalUnloadedClassCount The total number of classes unloaded since the JVM has started execution.  long   totalLoadedClassCount The total number of classes that have been loaded since the JVM has started execution.  long    SCOPE Endpoint This calculates the metrics data from each request of the endpoint in the service.\n   Name Remarks Group Key Type     name The name of the endpoint, such as a full path of the HTTP URI.  string   serviceName The name of the service.  string   serviceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  enum   serviceInstanceName The name of the service instance ID.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE ServiceRelation This calculates the metrics data from each request between services.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceLayer The layer of the source service.  enum   destServiceName The name of the destination service.  string   destServiceInstanceName The name of the destination service instance.  string   destLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination services, such as service_relation_mtls_cpm = from(ServiceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPServiceRelation This calculates the metrics data from each request between services.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceLayer The layer of the source service.  enum   destServiceName The name of the destination service.  string   destServiceInstanceName The name of the destination service instance.  string   destLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination services, such as service_relation_mtls_cpm = from(ServiceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    SCOPE ServiceInstanceRelation This calculates the metrics data from each request between service instances.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceServiceLayer The layer of the source service.  enum   destServiceName The name of the destination service.     destServiceInstanceName The name of the destination service instance.  string   destServiceLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of the component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination service instances, such as service_instance_relation_mtls_cpm = from(ServiceInstanceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPServiceInstanceRelation This calculates the metrics data from each request between service instances.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceServiceLayer The layer of the source service.  enum   destServiceName The name of the destination service.     destServiceInstanceName The name of the destination service instance.  string   destServiceLayer The layer of the destination service.  enum   componentId The ID of the component used in this call. yes string   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination service instances, such as service_instance_relation_mtls_cpm = from(ServiceInstanceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    SCOPE EndpointRelation This calculates the metrics data of the dependency between endpoints. This relation is hard to detect, and it depends on the tracing library to propagate the previous endpoint. Therefore, the EndpointRelation scope aggregation comes into effect only in services under tracing by SkyWalking native agents, including auto instrument agents (like Java and .NET), or other tracing context propagation in SkyWalking specification.\n   Name Remarks Group Key Type     endpoint The parent endpoint in the dependency.  string   serviceName The name of the service.  string   serviceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  enum   childEndpoint The endpoint used by the parent endpoint in row(1).  string   childServiceName The endpoint used by the parent service in row(1).  string   childServiceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  string   childServiceInstanceName The endpoint used by the parent service instance in row(1).  string   rpcLatency The latency of the RPC between the parent endpoint and childEndpoint, excluding the latency caused by the parent endpoint itself.     componentId The ID of the component used in this call. yes string   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Indicates where the relation is detected. The value may be client, server, or proxy. yes enum    SCOPE BrowserAppTraffic This calculates the metrics data from each request of the browser application (browser only).\n   Name Remarks Group Key Type     name The browser application name of each request.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppSingleVersionTraffic This calculates the metrics data from each request of a single version in the browser application (browser only).\n   Name Remarks Group Key Type     name The single version name of each request.  string   serviceName The name of the browser application.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppPageTraffic This calculates the metrics data from each request of the page in the browser application (browser only).\n   Name Remarks Group Key Type     name The page name of each request.  string   serviceName The name of the browser application.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppPagePerf This calculates the metrics data from each request of the page in the browser application (browser only).\n   Name Remarks Group Key Type     name The page name of each request.  string   serviceName The name of the browser application.  string   redirectTime The time taken to redirect.  int(in ms)   dnsTime The DNS query time.  int(in ms)   ttfbTime Time to first byte.  int(in ms)   tcpTime TCP connection time.  int(in ms)   transTime Content transfer time.  int(in ms)   domAnalysisTime Dom parsing time.  int(in ms)   fptTime First paint time or blank screen time.  int(in ms)   domReadyTime Dom ready time.  int(in ms)   loadPageTime Page full load time.  int(in ms)   resTime Synchronous load resources in the page.  int(in ms)   sslTime Only valid for HTTPS.  int(in ms)   ttlTime Time to interact.  int(in ms)   firstPackTime First pack time.  int(in ms)   fmpTime First Meaningful Paint.  int(in ms)    SCOPE Event This calculates the metrics data from events.\n   Name Remarks Group Key Type     name The name of the event.  string   service The service name to which the event belongs to.  string   serviceInstance The service instance to which the event belongs to, if any.  string   endpoint The service endpoint to which the event belongs to, if any.  string   type The type of the event, Normal or Error.  string   message The message of the event.  string   parameters The parameters in the message, see parameters.  string    SCOPE DatabaseAccess This calculates the metrics data from each request of database.\n   Name Remarks Group Key Type     name The service name of virtual database service.  string   databaseTypeId The ID of the component used in this call.  int   latency The time taken by each request.  int(in ms)   status Indicates the success or failure of the request.  boolean    SCOPE DatabaseSlowStatement This calculates the metrics data from slow request of database.\n   Name Remarks Group Key Type     databaseServiceId The service id of virtual cache service.  string   statement The sql statement .  string   latency The time taken by each request.  int(in ms)   traceId The traceId of this slow statement  string    SCOPE CacheAccess This calculates the metrics data from each request of cache system.\n   Name Remarks Group Key Type     name The service name of virtual cache service.  string   cacheTypeId The ID of the component used in this call.  int   latency The time taken by each request.  int(in ms)   status Indicates the success or failure of the request.  boolean   operation Indicates this access is used for write or read  string    SCOPE CacheSlowAccess This calculates the metrics data from slow request of cache system , which is used for write or read operation.\n   Name Remarks Group Key Type     cacheServiceId The service id of virtual cache service.  string   command The cache command .  string   key The cache command key.  string   latency The time taken by each request.  int(in ms)   traceId The traceId of this slow access  string   status Indicates the success or failure of the request.  boolean   operation Indicates this access is used for write or read  string    SCOPE MQAccess This calculates the service dimensional metrics data from each request of MQ system on consume/produce side\n   Name Remarks Group Key Type     name The service name , usually it\u0026rsquo;s MQ address(es)  string   transmissionLatency The latency from produce side to consume side .  int(in ms)   status Indicates the success or failure of the request.  boolean   operation Indicates this access is on Produce or Consume side  enum    SCOPE MQEndpointAccess This calculates the endpoint dimensional metrics data from each request of MQ system on consume/produce side\n   Name Remarks Group Key Type     serviceName The service name that this endpoint belongs to.  string   endpoint The endpoint name , usually it\u0026rsquo;s combined by queue,topic  string   transmissionLatency The latency from produce side to consume side .  int(in ms)   status Indicates the success or failure of the request.  boolean   operation Indicates this access is on Produce or Consume side  enum    ","title":"Scopes and Fields","url":"/docs/main/v9.5.0/en/concepts-and-designs/scope-definitions/"},{"content":"Scopes and Fields Using the Aggregation Function, the requests will be grouped by time and Group Key(s) in each scope.\nSCOPE Service This calculates the metrics data from each request of the service.\n   Name Remarks Group Key Type     name The name of the service.  string   layer Layer represents an abstract framework in the computer science, such as operation system(OS_LINUX layer), Kubernetes(k8s layer)  enum   serviceInstanceName The name of the service instance ID.  string   endpointName The name of the endpoint, such as a full path of HTTP URI.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request. Such as: Database, HTTP, RPC, gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPService This calculates the metrics data from each request of the TCP service.\n   Name Remarks Group Key Type     name The name of the service.  string   layer Layer represents an abstract framework in the computer science, such as operation system(OS_LINUX layer), Kubernetes(k8s layer)  enum   serviceInstanceName The name of the service instance ID.  string   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    SCOPE ServiceInstance This calculates the metrics data from each request of the service instance.\n   Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   endpointName The name of the endpoint, such as a full path of the HTTP URI.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPServiceInstance This calculates the metrics data from each request of the service instance.\n   Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    Secondary scopes of ServiceInstance This calculates the metrics data if the service instance is a JVM and collects through javaagent.\n SCOPE ServiceInstanceJVMCPU     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   usePercent The percentage of CPU time spent.  double    SCOPE ServiceInstanceJVMMemory     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   heapStatus Indicates whether the metric has a heap property or not.  bool   init See the JVM documentation.  long   max See the JVM documentation.  long   used See the JVM documentation.  long   committed See the JVM documentation.  long    SCOPE ServiceInstanceJVMMemoryPool     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   poolType The type may be CODE_CACHE_USAGE, NEWGEN_USAGE, OLDGEN_USAGE, SURVIVOR_USAGE, PERMGEN_USAGE, or METASPACE_USAGE based on different versions of JVM.  enum   init See the JVM documentation.  long   max See the JVM documentation.  long   used See the JVM documentation.  long   committed See the JVM documentation.  long    SCOPE ServiceInstanceJVMGC     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   phase Includes both NEW and OLD.  Enum   time The time spent in GC.  long   count The count in GC operations.  long    SCOPE ServiceInstanceJVMThread     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   liveCount The current number of live threads.  long   daemonCount The current number of daemon threads.  long   peakCount The current number of peak threads.  long   runnableStateThreadCount The current number of threads in runnable state.  long   blockedStateThreadCount The current number of threads in blocked state.  long   waitingStateThreadCount The current number of threads in waiting state.  long   timedWaitingStateThreadCount The current number of threads in time-waiting state.  long    SCOPE ServiceInstanceJVMClass     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   loadedClassCount The number of classes that are currently loaded in the JVM.  long   totalUnloadedClassCount The total number of classes unloaded since the JVM has started execution.  long   totalLoadedClassCount The total number of classes that have been loaded since the JVM has started execution.  long    SCOPE Endpoint This calculates the metrics data from each request of the endpoint in the service.\n   Name Remarks Group Key Type     name The name of the endpoint, such as a full path of the HTTP URI.  string   serviceName The name of the service.  string   serviceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  enum   serviceInstanceName The name of the service instance ID.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE ServiceRelation This calculates the metrics data from each request between services.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceLayer The layer of the source service.  enum   destServiceName The name of the destination service.  string   destServiceInstanceName The name of the destination service instance.  string   destLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination services, such as service_relation_mtls_cpm = from(ServiceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPServiceRelation This calculates the metrics data from each request between services.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceLayer The layer of the source service.  enum   destServiceName The name of the destination service.  string   destServiceInstanceName The name of the destination service instance.  string   destLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination services, such as service_relation_mtls_cpm = from(ServiceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    SCOPE ServiceInstanceRelation This calculates the metrics data from each request between service instances.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceServiceLayer The layer of the source service.  enum   destServiceName The name of the destination service.     destServiceInstanceName The name of the destination service instance.  string   destServiceLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of the component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination service instances, such as service_instance_relation_mtls_cpm = from(ServiceInstanceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPServiceInstanceRelation This calculates the metrics data from each request between service instances.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceServiceLayer The layer of the source service.  enum   destServiceName The name of the destination service.     destServiceInstanceName The name of the destination service instance.  string   destServiceLayer The layer of the destination service.  enum   componentId The ID of the component used in this call. yes string   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination service instances, such as service_instance_relation_mtls_cpm = from(ServiceInstanceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    SCOPE EndpointRelation This calculates the metrics data of the dependency between endpoints. This relation is hard to detect, and it depends on the tracing library to propagate the previous endpoint. Therefore, the EndpointRelation scope aggregation comes into effect only in services under tracing by SkyWalking native agents, including auto instrument agents (like Java and .NET), or other tracing context propagation in SkyWalking specification.\n   Name Remarks Group Key Type     endpoint The parent endpoint in the dependency.  string   serviceName The name of the service.  string   serviceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  enum   childEndpoint The endpoint used by the parent endpoint in row(1).  string   childServiceName The endpoint used by the parent service in row(1).  string   childServiceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  string   childServiceInstanceName The endpoint used by the parent service instance in row(1).  string   rpcLatency The latency of the RPC between the parent endpoint and childEndpoint, excluding the latency caused by the parent endpoint itself.     componentId The ID of the component used in this call. yes string   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Indicates where the relation is detected. The value may be client, server, or proxy. yes enum    SCOPE BrowserAppTraffic This calculates the metrics data from each request of the browser application (browser only).\n   Name Remarks Group Key Type     name The browser application name of each request.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppSingleVersionTraffic This calculates the metrics data from each request of a single version in the browser application (browser only).\n   Name Remarks Group Key Type     name The single version name of each request.  string   serviceName The name of the browser application.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppPageTraffic This calculates the metrics data from each request of the page in the browser application (browser only).\n   Name Remarks Group Key Type     name The page name of each request.  string   serviceName The name of the browser application.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppPagePerf This calculates the metrics data from each request of the page in the browser application (browser only).\n   Name Remarks Group Key Type     name The page name of each request.  string   serviceName The name of the browser application.  string   redirectTime The time taken to redirect.  int(in ms)   dnsTime The DNS query time.  int(in ms)   ttfbTime Time to first byte.  int(in ms)   tcpTime TCP connection time.  int(in ms)   transTime Content transfer time.  int(in ms)   domAnalysisTime Dom parsing time.  int(in ms)   fptTime First paint time or blank screen time.  int(in ms)   domReadyTime Dom ready time.  int(in ms)   loadPageTime Page full load time.  int(in ms)   resTime Synchronous load resources in the page.  int(in ms)   sslTime Only valid for HTTPS.  int(in ms)   ttlTime Time to interact.  int(in ms)   firstPackTime First pack time.  int(in ms)   fmpTime First Meaningful Paint.  int(in ms)    SCOPE Event This calculates the metrics data from events.\n   Name Remarks Group Key Type     name The name of the event.  string   service The service name to which the event belongs to.  string   serviceInstance The service instance to which the event belongs to, if any.  string   endpoint The service endpoint to which the event belongs to, if any.  string   type The type of the event, Normal or Error.  string   message The message of the event.  string   parameters The parameters in the message, see parameters.  string    SCOPE DatabaseAccess This calculates the metrics data from each request of database.\n   Name Remarks Group Key Type     name The service name of virtual database service.  string   databaseTypeId The ID of the component used in this call.  int   latency The time taken by each request.  int(in ms)   status Indicates the success or failure of the request.  boolean    SCOPE DatabaseSlowStatement This calculates the metrics data from slow request of database.\n   Name Remarks Group Key Type     databaseServiceId The service id of virtual cache service.  string   statement The sql statement .  string   latency The time taken by each request.  int(in ms)   traceId The traceId of this slow statement  string    SCOPE CacheAccess This calculates the metrics data from each request of cache system.\n   Name Remarks Group Key Type     name The service name of virtual cache service.  string   cacheTypeId The ID of the component used in this call.  int   latency The time taken by each request.  int(in ms)   status Indicates the success or failure of the request.  boolean   operation Indicates this access is used for write or read  string    SCOPE CacheSlowAccess This calculates the metrics data from slow request of cache system , which is used for write or read operation.\n   Name Remarks Group Key Type     cacheServiceId The service id of virtual cache service.  string   command The cache command .  string   key The cache command key.  string   latency The time taken by each request.  int(in ms)   traceId The traceId of this slow access  string   status Indicates the success or failure of the request.  boolean   operation Indicates this access is used for write or read  string    SCOPE MQAccess This calculates the service dimensional metrics data from each request of MQ system on consume/produce side\n   Name Remarks Group Key Type     name The service name , usually it\u0026rsquo;s MQ address(es)  string   transmissionLatency The latency from produce side to consume side .  int(in ms)   status Indicates the success or failure of the request.  boolean   operation Indicates this access is on Produce or Consume side  enum    SCOPE MQEndpointAccess This calculates the endpoint dimensional metrics data from each request of MQ system on consume/produce side\n   Name Remarks Group Key Type     serviceName The service name that this endpoint belongs to.  string   endpoint The endpoint name , usually it\u0026rsquo;s combined by queue,topic  string   transmissionLatency The latency from produce side to consume side .  int(in ms)   status Indicates the success or failure of the request.  boolean   operation Indicates this access is on Produce or Consume side  enum    ","title":"Scopes and Fields","url":"/docs/main/v9.6.0/en/concepts-and-designs/scope-definitions/"},{"content":"Scopes and Fields Using the Aggregation Function, the requests will be grouped by time and Group Key(s) in each scope.\nSCOPE Service This calculates the metrics data from each request of the service.\n   Name Remarks Group Key Type     name The name of the service.  string   layer Layer represents an abstract framework in the computer science, such as operation system(OS_LINUX layer), Kubernetes(k8s layer)  enum   serviceInstanceName The name of the service instance ID.  string   endpointName The name of the endpoint, such as a full path of HTTP URI.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request. Such as: Database, HTTP, RPC, gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPService This calculates the metrics data from each request of the TCP service.\n   Name Remarks Group Key Type     name The name of the service.  string   layer Layer represents an abstract framework in the computer science, such as operation system(OS_LINUX layer), Kubernetes(k8s layer)  enum   serviceInstanceName The name of the service instance ID.  string   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    SCOPE ServiceInstance This calculates the metrics data from each request of the service instance.\n   Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   endpointName The name of the endpoint, such as a full path of the HTTP URI.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPServiceInstance This calculates the metrics data from each request of the service instance.\n   Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    Secondary scopes of ServiceInstance This calculates the metrics data if the service instance is a JVM and collects through javaagent.\n SCOPE ServiceInstanceJVMCPU     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   usePercent The percentage of CPU time spent.  double    SCOPE ServiceInstanceJVMMemory     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   heapStatus Indicates whether the metric has a heap property or not.  bool   init See the JVM documentation.  long   max See the JVM documentation.  long   used See the JVM documentation.  long   committed See the JVM documentation.  long    SCOPE ServiceInstanceJVMMemoryPool     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   poolType The type may be CODE_CACHE_USAGE, NEWGEN_USAGE, OLDGEN_USAGE, SURVIVOR_USAGE, PERMGEN_USAGE, or METASPACE_USAGE based on different versions of JVM.  enum   init See the JVM documentation.  long   max See the JVM documentation.  long   used See the JVM documentation.  long   committed See the JVM documentation.  long    SCOPE ServiceInstanceJVMGC     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   phase Includes both NEW and OLD.  Enum   time The time spent in GC.  long   count The count in GC operations.  long    SCOPE ServiceInstanceJVMThread     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   liveCount The current number of live threads.  long   daemonCount The current number of daemon threads.  long   peakCount The current number of peak threads.  long   runnableStateThreadCount The current number of threads in runnable state.  long   blockedStateThreadCount The current number of threads in blocked state.  long   waitingStateThreadCount The current number of threads in waiting state.  long   timedWaitingStateThreadCount The current number of threads in time-waiting state.  long    SCOPE ServiceInstanceJVMClass     Name Remarks Group Key Type     name The name of the service instance, such as ip:port@Service Name. Note: Currently, the native agent uses uuid@ipv4 as the instance name, which does not assist in setting up a filter in aggregation.  string   serviceName The name of the service.  string   loadedClassCount The number of classes that are currently loaded in the JVM.  long   totalUnloadedClassCount The total number of classes unloaded since the JVM has started execution.  long   totalLoadedClassCount The total number of classes that have been loaded since the JVM has started execution.  long    SCOPE Endpoint This calculates the metrics data from each request of the endpoint in the service.\n   Name Remarks Group Key Type     name The name of the endpoint, such as a full path of the HTTP URI.  string   serviceName The name of the service.  string   serviceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  enum   serviceInstanceName The name of the service instance ID.  string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   tags The labels of each request. Each value is made up by TagKey:TagValue in the segment.  List\u0026lt;String\u0026gt;   tag The key-value pair of span tags in the segment.  Map\u0026lt;String, String\u0026gt;   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE ServiceRelation This calculates the metrics data from each request between services.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceLayer The layer of the source service.  enum   destServiceName The name of the destination service.  string   destServiceInstanceName The name of the destination service instance.  string   destLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination services, such as service_relation_mtls_cpm = from(ServiceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPServiceRelation This calculates the metrics data from each request between services.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceLayer The layer of the source service.  enum   destServiceName The name of the destination service.  string   destServiceInstanceName The name of the destination service instance.  string   destLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination services, such as service_relation_mtls_cpm = from(ServiceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    SCOPE ServiceInstanceRelation This calculates the metrics data from each request between service instances.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceServiceLayer The layer of the source service.  enum   destServiceName The name of the destination service.     destServiceInstanceName The name of the destination service instance.  string   destServiceLayer The layer of the destination service.  enum   endpoint The endpoint used in this call.  string   componentId The ID of the component used in this call. yes string   latency The time taken by each request.  int   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination service instances, such as service_instance_relation_mtls_cpm = from(ServiceInstanceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string    SCOPE TCPServiceInstanceRelation This calculates the metrics data from each request between service instances.\n   Name Remarks Group Key Type     sourceServiceName The name of the source service.  string   sourceServiceInstanceName The name of the source service instance.  string   sourceServiceLayer The layer of the source service.  enum   destServiceName The name of the destination service.     destServiceInstanceName The name of the destination service instance.  string   destServiceLayer The layer of the destination service.  enum   componentId The ID of the component used in this call. yes string   detectPoint Where the relation is detected. The value may be client, server, or proxy. yes enum   tlsMode The TLS mode between source and destination service instances, such as service_instance_relation_mtls_cpm = from(ServiceInstanceRelation.*).filter(tlsMode == \u0026quot;mTLS\u0026quot;).cpm()  string   sideCar.internalErrorCode The sidecar/gateway proxy internal error code. The value is based on the implementation.  string   receivedBytes The received bytes of the TCP traffic.  long   sentBytes The sent bytes of the TCP traffic.  long    SCOPE EndpointRelation This calculates the metrics data of the dependency between endpoints. This relation is hard to detect, and it depends on the tracing library to propagate the previous endpoint. Therefore, the EndpointRelation scope aggregation comes into effect only in services under tracing by SkyWalking native agents, including auto instrument agents (like Java and .NET), or other tracing context propagation in SkyWalking specification.\n   Name Remarks Group Key Type     endpoint The parent endpoint in the dependency.  string   serviceName The name of the service.  string   serviceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  enum   childEndpoint The endpoint used by the parent endpoint in row(1).  string   childServiceName The endpoint used by the parent service in row(1).  string   childServiceNodeType The type of node to which the Service or Network address belongs, such as Normal, Database, MQ, or Cache.  string   childServiceInstanceName The endpoint used by the parent service instance in row(1).  string   rpcLatency The latency of the RPC between the parent endpoint and childEndpoint, excluding the latency caused by the parent endpoint itself.     componentId The ID of the component used in this call. yes string   status Indicates the success or failure of the request.  bool(true for success)   httpResponseStatusCode The response code of the HTTP response, and if this request is the HTTP call. E.g. 200, 404, 302  int   rpcStatusCode The string value of the rpc response code.  string   type The type of each request, such as Database, HTTP, RPC, or gRPC.  enum   detectPoint Indicates where the relation is detected. The value may be client, server, or proxy. yes enum    SCOPE BrowserAppTraffic This calculates the metrics data from each request of the browser application (browser only).\n   Name Remarks Group Key Type     name The browser application name of each request.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppSingleVersionTraffic This calculates the metrics data from each request of a single version in the browser application (browser only).\n   Name Remarks Group Key Type     name The single version name of each request.  string   serviceName The name of the browser application.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppPageTraffic This calculates the metrics data from each request of the page in the browser application (browser only).\n   Name Remarks Group Key Type     name The page name of each request.  string   serviceName The name of the browser application.  string   count The number of request, which is fixed at 1.  int   trafficCategory The traffic category. The value may be NORMAL, FIRST_ERROR, or ERROR.  enum   errorCategory The error category. The value may be AJAX, RESOURCE, VUE, PROMISE, or UNKNOWN.  enum    SCOPE BrowserAppPagePerf This calculates the metrics data from each request of the page in the browser application (browser only).\n   Name Remarks Group Key Type     name The page name of each request.  string   serviceName The name of the browser application.  string   redirectTime The time taken to redirect.  int(in ms)   dnsTime The DNS query time.  int(in ms)   ttfbTime Time to first byte.  int(in ms)   tcpTime TCP connection time.  int(in ms)   transTime Content transfer time.  int(in ms)   domAnalysisTime Dom parsing time.  int(in ms)   fptTime First paint time or blank screen time.  int(in ms)   domReadyTime Dom ready time.  int(in ms)   loadPageTime Page full load time.  int(in ms)   resTime Synchronous load resources in the page.  int(in ms)   sslTime Only valid for HTTPS.  int(in ms)   ttlTime Time to interact.  int(in ms)   firstPackTime First pack time.  int(in ms)   fmpTime First Meaningful Paint.  int(in ms)    SCOPE Event This calculates the metrics data from events.\n   Name Remarks Group Key Type     name The name of the event.  string   service The service name to which the event belongs to.  string   serviceInstance The service instance to which the event belongs to, if any.  string   endpoint The service endpoint to which the event belongs to, if any.  string   type The type of the event, Normal or Error.  string   message The message of the event.  string   parameters The parameters in the message, see parameters.  string    SCOPE DatabaseAccess This calculates the metrics data from each request of database.\n   Name Remarks Group Key Type     name The service name of virtual database service.  string   databaseTypeId The ID of the component used in this call.  int   latency The time taken by each request.  int(in ms)   status Indicates the success or failure of the request.  boolean    SCOPE DatabaseSlowStatement This calculates the metrics data from slow request of database.\n   Name Remarks Group Key Type     databaseServiceId The service id of virtual cache service.  string   statement The sql statement .  string   latency The time taken by each request.  int(in ms)   traceId The traceId of this slow statement  string    SCOPE CacheAccess This calculates the metrics data from each request of cache system.\n   Name Remarks Group Key Type     name The service name of virtual cache service.  string   cacheTypeId The ID of the component used in this call.  int   latency The time taken by each request.  int(in ms)   status Indicates the success or failure of the request.  boolean   operation Indicates this access is used for write or read  string    SCOPE CacheSlowAccess This calculates the metrics data from slow request of cache system , which is used for write or read operation.\n   Name Remarks Group Key Type     cacheServiceId The service id of virtual cache service.  string   command The cache command .  string   key The cache command key.  string   latency The time taken by each request.  int(in ms)   traceId The traceId of this slow access  string   status Indicates the success or failure of the request.  boolean   operation Indicates this access is used for write or read  string    SCOPE MQAccess This calculates the service dimensional metrics data from each request of MQ system on consume/produce side\n   Name Remarks Group Key Type     name The service name , usually it\u0026rsquo;s MQ address(es)  string   transmissionLatency The latency from produce side to consume side .  int(in ms)   status Indicates the success or failure of the request.  boolean   operation Indicates this access is on Produce or Consume side  enum    SCOPE MQEndpointAccess This calculates the endpoint dimensional metrics data from each request of MQ system on consume/produce side\n   Name Remarks Group Key Type     serviceName The service name that this endpoint belongs to.  string   endpoint The endpoint name , usually it\u0026rsquo;s combined by queue,topic  string   transmissionLatency The latency from produce side to consume side .  int(in ms)   status Indicates the success or failure of the request.  boolean   operation Indicates this access is on Produce or Consume side  enum    ","title":"Scopes and Fields","url":"/docs/main/v9.7.0/en/concepts-and-designs/scope-definitions/"},{"content":"Scratch The OAP Config Dump SkyWalking OAP behaviors could be controlled through hundreds of configurations. It is hard to know what is the final configuration as all the configurations could be overrided by system environments.\nThe core config file application.yml lists all the configurations and their default values. However, it is still hard to know the runtime value.\nScratch is a tool to dump the final configuration. It is provided within OAP rest server, which could be accessed through HTTP GET http://{core restHost}:{core restPort}/debugging/config/dump.\n\u0026gt; curl http://127.0.0.1:12800/debugging/config/dump cluster.provider=standalone core.provider=default core.default.prepareThreads=2 core.default.restHost=0.0.0.0 core.default.searchableLogsTags=level,http.status_code core.default.role=Mixed core.default.persistentPeriod=25 core.default.syncPeriodHttpUriRecognitionPattern=10 core.default.restIdleTimeOut=30000 core.default.dataKeeperExecutePeriod=5 core.default.topNReportPeriod=10 core.default.gRPCSslTrustedCAPath= core.default.downsampling=[Hour, Day] core.default.serviceNameMaxLength=70 core.default.gRPCSslEnabled=false core.default.restPort=12800 core.default.serviceCacheRefreshInterval=10 ... All booting configurations with their runtime values are listed, including the selected provider for each module.\nProtect The Secrets Some of the configurations contain sensitive values, such as username, password, token, etc. These values would be masked in the dump result. For example, the storage.elasticsearch.password in the following configurations,\nstorage:selector:${SW_STORAGE:h2}elasticsearch:password:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}It would be masked and shown as ******** in the dump result.\n\u0026gt; curl http://127.0.0.1:12800/debugging/config/dump ... storage.elasticsearch.password=******** ... By default, we mask the config keys through the following configurations.\n# Include the list of keywords to filter configurations including secrets. Separate keywords by a comma.keywords4MaskingSecretsOfConfig:${SW_DEBUGGING_QUERY_KEYWORDS_FOR_MASKING_SECRETS:user,password,token,accessKey,secretKey,authentication}Disable The Config Dump Service By default, this service is open for helping users to debug and diagnose. If you want to disable it, you need to diable the whole debugging-query module through setting selector=-.\ndebugging-query:selector:${SW_DEBUGGING_QUERY:-}","title":"Scratch The OAP Config Dump","url":"/docs/main/latest/en/debugging/config_dump/"},{"content":"Scratch The OAP Config Dump SkyWalking OAP behaviors could be controlled through hundreds of configurations. It is hard to know what is the final configuration as all the configurations could be overrided by system environments.\nThe core config file application.yml lists all the configurations and their default values. However, it is still hard to know the runtime value.\nScratch is a tool to dump the final configuration. It is provided within OAP rest server, which could be accessed through HTTP GET http://{core restHost}:{core restPort}/debugging/config/dump.\n\u0026gt; curl http://127.0.0.1:12800/debugging/config/dump cluster.provider=standalone core.provider=default core.default.prepareThreads=2 core.default.restHost=0.0.0.0 core.default.searchableLogsTags=level,http.status_code core.default.role=Mixed core.default.persistentPeriod=25 core.default.syncPeriodHttpUriRecognitionPattern=10 core.default.restIdleTimeOut=30000 core.default.dataKeeperExecutePeriod=5 core.default.topNReportPeriod=10 core.default.gRPCSslTrustedCAPath= core.default.downsampling=[Hour, Day] core.default.serviceNameMaxLength=70 core.default.gRPCSslEnabled=false core.default.restPort=12800 core.default.serviceCacheRefreshInterval=10 ... All booting configurations with their runtime values are listed, including the selected provider for each module.\nProtect The Secrets Some of the configurations contain sensitive values, such as username, password, token, etc. These values would be masked in the dump result. For example, the storage.elasticsearch.password in the following configurations,\nstorage:selector:${SW_STORAGE:h2}elasticsearch:password:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}It would be masked and shown as ******** in the dump result.\n\u0026gt; curl http://127.0.0.1:12800/debugging/config/dump ... storage.elasticsearch.password=******** ... By default, we mask the config keys through the following configurations.\n# Include the list of keywords to filter configurations including secrets. Separate keywords by a comma.keywords4MaskingSecretsOfConfig:${SW_DEBUGGING_QUERY_KEYWORDS_FOR_MASKING_SECRETS:user,password,token,accessKey,secretKey,authentication}Disable The Config Dump Service By default, this service is open for helping users to debug and diagnose. If you want to disable it, you need to diable the whole debugging-query module through setting selector=-.\ndebugging-query:selector:${SW_DEBUGGING_QUERY:-}","title":"Scratch The OAP Config Dump","url":"/docs/main/next/en/debugging/config_dump/"},{"content":"Scratch The OAP Config Dump SkyWalking OAP behaviors could be controlled through hundreds of configurations. It is hard to know what is the final configuration as all the configurations could be overrided by system environments.\nThe core config file application.yml lists all the configurations and their default values. However, it is still hard to know the runtime value.\nScratch is a tool to dump the final configuration. It is provided within OAP rest server, which could be accessed through HTTP GET http://{core restHost}:{core restPort}/debugging/config/dump.\n\u0026gt; curl http://127.0.0.1:12800/debugging/config/dump cluster.provider=standalone core.provider=default core.default.prepareThreads=2 core.default.restHost=0.0.0.0 core.default.searchableLogsTags=level,http.status_code core.default.role=Mixed core.default.persistentPeriod=25 core.default.syncPeriodHttpUriRecognitionPattern=10 core.default.restIdleTimeOut=30000 core.default.dataKeeperExecutePeriod=5 core.default.topNReportPeriod=10 core.default.gRPCSslTrustedCAPath= core.default.downsampling=[Hour, Day] core.default.serviceNameMaxLength=70 core.default.gRPCSslEnabled=false core.default.restPort=12800 core.default.serviceCacheRefreshInterval=10 ... All booting configurations with their runtime values are listed, including the selected provider for each module.\nProtect The Secrets Some of the configurations contain sensitive values, such as username, password, token, etc. These values would be masked in the dump result. For example, the storage.elasticsearch.password in the following configurations,\nstorage:selector:${SW_STORAGE:h2}elasticsearch:password:${SW_ES_PASSWORD:\u0026#34;\u0026#34;}It would be masked and shown as ******** in the dump result.\n\u0026gt; curl http://127.0.0.1:12800/debugging/config/dump ... storage.elasticsearch.password=******** ... By default, we mask the config keys through the following configurations.\n# Include the list of keywords to filter configurations including secrets. Separate keywords by a comma.keywords4MaskingSecretsOfConfig:${SW_DEBUGGING_QUERY_KEYWORDS_FOR_MASKING_SECRETS:user,password,token,accessKey,secretKey,authentication}Disable The Config Dump Service By default, this service is open for helping users to debug and diagnose. If you want to disable it, you need to diable the whole debugging-query module through setting selector=-.\ndebugging-query:selector:${SW_DEBUGGING_QUERY:-}","title":"Scratch The OAP Config Dump","url":"/docs/main/v9.7.0/en/debugging/config_dump/"},{"content":"","title":"Search Results","url":"/search/"},{"content":"Security Notice The SkyWalking OAP server, UI, and agent deployments should run in a secure environment, such as only inside your data center. OAP server, UI, and agent deployments should only be reachable by the operation team on default deployment.\nAll telemetry data are trusted. The OAP server would not validate any field of the telemetry data to avoid extra load for the server.\nIt is up to the operator(OPS team) whether to expose the OAP server, UI, or some agent deployment to unsecured environment. The following security policies should be considered to add to secure your SkyWalking deployment.\n HTTPs and gRPC+TLS should be used between agents and OAP servers, as well as UI. Set up TOKEN or username/password based authentications for the OAP server and UI through your Gateway. Validate all fields of the traceable RPC(including HTTP 1/2, MQ) headers(header names are sw8, sw8-x and sw8-correlation) when requests are from out of the trusted zone. Or simply block/remove those headers unless you are using the client-js agent. All fields of telemetry data(HTTP in raw text or encoded Protobuf format) should be validated and reject malicious data.  Without these protections, an attacker could embed executable Javascript code in those fields, causing XSS or even Remote Code Execution (RCE) issues.\nFor some sensitive environment, consider to limit the telemetry report frequency in case of DoS/DDoS for exposed OAP and UI services.\nappendix The SkyWalking client-js agent is always running out of the secured environment. Please follow its security notice for more details.\n","title":"Security Notice","url":"/docs/main/latest/en/security/readme/"},{"content":"Security Notice The SkyWalking OAP server, UI, and agent deployments should run in a secure environment, such as only inside your data center. OAP server, UI, and agent deployments should only be reachable by the operation team on default deployment.\nAll telemetry data are trusted. The OAP server would not validate any field of the telemetry data to avoid extra load for the server.\nIt is up to the operator(OPS team) whether to expose the OAP server, UI, or some agent deployment to unsecured environment. The following security policies should be considered to add to secure your SkyWalking deployment.\n HTTPs and gRPC+TLS should be used between agents and OAP servers, as well as UI. Set up TOKEN or username/password based authentications for the OAP server and UI through your Gateway. Validate all fields of the traceable RPC(including HTTP 1/2, MQ) headers(header names are sw8, sw8-x and sw8-correlation) when requests are from out of the trusted zone. Or simply block/remove those headers unless you are using the client-js agent. All fields of telemetry data(HTTP in raw text or encoded Protobuf format) should be validated and reject malicious data.  Without these protections, an attacker could embed executable Javascript code in those fields, causing XSS or even Remote Code Execution (RCE) issues.\nFor some sensitive environment, consider to limit the telemetry report frequency in case of DoS/DDoS for exposed OAP and UI services.\nappendix The SkyWalking client-js agent is always running out of the secured environment. Please follow its security notice for more details.\n","title":"Security Notice","url":"/docs/main/next/en/security/readme/"},{"content":"Security Notice The SkyWalking OAP server, UI, and agent deployments should run in a secure environment, such as only inside your data center. OAP server, UI, and agent deployments should only be reachable by the operation team on default deployment.\nAll telemetry data are trusted. The OAP server would not validate any field of the telemetry data to avoid extra load for the server.\nIt is up to the operator(OPS team) whether to expose the OAP server, UI, or some agent deployment to unsecured environment. The following security policies should be considered to add to secure your SkyWalking deployment.\n HTTPs and gRPC+TLS should be used between agents and OAP servers, as well as UI. Set up TOKEN or username/password based authentications for the OAP server and UI through your Gateway. Validate all fields of the traceable RPC(including HTTP 1/2, MQ) headers(header names are sw8, sw8-x and sw8-correlation) when requests are from out of the trusted zone. Or simply block/remove those headers unless you are using the client-js agent. All fields of telemetry data(HTTP in raw text or encoded Protobuf format) should be validated and reject malicious data.  Without these protections, an attacker could embed executable Javascript code in those fields, causing XSS or even Remote Code Execution (RCE) issues.\nFor some sensitive environment, consider to limit the telemetry report frequency in case of DoS/DDoS for exposed OAP and UI services.\nappendix The SkyWalking client-js agent is always running out of the secured environment. Please follow its security notice for more details.\n","title":"Security Notice","url":"/docs/main/v9.3.0/en/security/readme/"},{"content":"Security Notice The SkyWalking OAP server, UI, and agent deployments should run in a secure environment, such as only inside your data center. OAP server, UI, and agent deployments should only be reachable by the operation team on default deployment.\nAll telemetry data are trusted. The OAP server would not validate any field of the telemetry data to avoid extra load for the server.\nIt is up to the operator(OPS team) whether to expose the OAP server, UI, or some agent deployment to unsecured environment. The following security policies should be considered to add to secure your SkyWalking deployment.\n HTTPs and gRPC+TLS should be used between agents and OAP servers, as well as UI. Set up TOKEN or username/password based authentications for the OAP server and UI through your Gateway. Validate all fields of the traceable RPC(including HTTP 1/2, MQ) headers(header names are sw8, sw8-x and sw8-correlation) when requests are from out of the trusted zone. Or simply block/remove those headers unless you are using the client-js agent. All fields of telemetry data(HTTP in raw text or encoded Protobuf format) should be validated and reject malicious data.  Without these protections, an attacker could embed executable Javascript code in those fields, causing XSS or even Remote Code Execution (RCE) issues.\nFor some sensitive environment, consider to limit the telemetry report frequency in case of DoS/DDoS for exposed OAP and UI services.\nappendix The SkyWalking client-js agent is always running out of the secured environment. Please follow its security notice for more details.\n","title":"Security Notice","url":"/docs/main/v9.4.0/en/security/readme/"},{"content":"Security Notice The SkyWalking OAP server, UI, and agent deployments should run in a secure environment, such as only inside your data center. OAP server, UI, and agent deployments should only be reachable by the operation team on default deployment.\nAll telemetry data are trusted. The OAP server would not validate any field of the telemetry data to avoid extra load for the server.\nIt is up to the operator(OPS team) whether to expose the OAP server, UI, or some agent deployment to unsecured environment. The following security policies should be considered to add to secure your SkyWalking deployment.\n HTTPs and gRPC+TLS should be used between agents and OAP servers, as well as UI. Set up TOKEN or username/password based authentications for the OAP server and UI through your Gateway. Validate all fields of the traceable RPC(including HTTP 1/2, MQ) headers(header names are sw8, sw8-x and sw8-correlation) when requests are from out of the trusted zone. Or simply block/remove those headers unless you are using the client-js agent. All fields of telemetry data(HTTP in raw text or encoded Protobuf format) should be validated and reject malicious data.  Without these protections, an attacker could embed executable Javascript code in those fields, causing XSS or even Remote Code Execution (RCE) issues.\nFor some sensitive environment, consider to limit the telemetry report frequency in case of DoS/DDoS for exposed OAP and UI services.\nappendix The SkyWalking client-js agent is always running out of the secured environment. Please follow its security notice for more details.\n","title":"Security Notice","url":"/docs/main/v9.5.0/en/security/readme/"},{"content":"Security Notice The SkyWalking OAP server, UI, and agent deployments should run in a secure environment, such as only inside your data center. OAP server, UI, and agent deployments should only be reachable by the operation team on default deployment.\nAll telemetry data are trusted. The OAP server would not validate any field of the telemetry data to avoid extra load for the server.\nIt is up to the operator(OPS team) whether to expose the OAP server, UI, or some agent deployment to unsecured environment. The following security policies should be considered to add to secure your SkyWalking deployment.\n HTTPs and gRPC+TLS should be used between agents and OAP servers, as well as UI. Set up TOKEN or username/password based authentications for the OAP server and UI through your Gateway. Validate all fields of the traceable RPC(including HTTP 1/2, MQ) headers(header names are sw8, sw8-x and sw8-correlation) when requests are from out of the trusted zone. Or simply block/remove those headers unless you are using the client-js agent. All fields of telemetry data(HTTP in raw text or encoded Protobuf format) should be validated and reject malicious data.  Without these protections, an attacker could embed executable Javascript code in those fields, causing XSS or even Remote Code Execution (RCE) issues.\nFor some sensitive environment, consider to limit the telemetry report frequency in case of DoS/DDoS for exposed OAP and UI services.\nappendix The SkyWalking client-js agent is always running out of the secured environment. Please follow its security notice for more details.\n","title":"Security Notice","url":"/docs/main/v9.6.0/en/security/readme/"},{"content":"Security Notice The SkyWalking OAP server, UI, and agent deployments should run in a secure environment, such as only inside your data center. OAP server, UI, and agent deployments should only be reachable by the operation team on default deployment.\nAll telemetry data are trusted. The OAP server would not validate any field of the telemetry data to avoid extra load for the server.\nIt is up to the operator(OPS team) whether to expose the OAP server, UI, or some agent deployment to unsecured environment. The following security policies should be considered to add to secure your SkyWalking deployment.\n HTTPs and gRPC+TLS should be used between agents and OAP servers, as well as UI. Set up TOKEN or username/password based authentications for the OAP server and UI through your Gateway. Validate all fields of the traceable RPC(including HTTP 1/2, MQ) headers(header names are sw8, sw8-x and sw8-correlation) when requests are from out of the trusted zone. Or simply block/remove those headers unless you are using the client-js agent. All fields of telemetry data(HTTP in raw text or encoded Protobuf format) should be validated and reject malicious data.  Without these protections, an attacker could embed executable Javascript code in those fields, causing XSS or even Remote Code Execution (RCE) issues.\nFor some sensitive environment, consider to limit the telemetry report frequency in case of DoS/DDoS for exposed OAP and UI services.\nappendix The SkyWalking client-js agent is always running out of the secured environment. Please follow its security notice for more details.\n","title":"Security Notice","url":"/docs/main/v9.7.0/en/security/readme/"},{"content":"Send Envoy metrics to SkyWalking with / without Istio Envoy defines a gRPC service to emit metrics, and whatever is used to implement this protocol can be used to receive the metrics. SkyWalking has a built-in receiver that implements this protocol, so you can configure Envoy to emit its metrics to SkyWalking.\nAs an APM system, SkyWalking does not only receive and store the metrics emitted by Envoy, but it also analyzes the topology of services and service instances.\nAttention: There are two versions of Envoy metrics service protocol currently: v2 and v3. SkyWalking (8.3.0+) supports both of them.\nConfigure Envoy to send metrics to SkyWalking without Istio Envoy can be used with / without Istio. This section explains how you can configure the standalone Envoy to send metrics to SkyWalking.\nIn order to let Envoy send metrics to SkyWalking, we need to feed Envoy with a configuration that contains stats_sinks, which in turn includes envoy.metrics_service. This envoy.metrics_service should be configured as a config.grpc_service entry.\nThe noteworthy parts of the config are shown below:\nstats_sinks:- name:envoy.metrics_serviceconfig:grpc_service:# Note: we can use google_grpc implementation as well.envoy_grpc:cluster_name:service_skywalkingstatic_resources:...clusters:- name:service_skywalkingconnect_timeout:5stype:LOGICAL_DNShttp2_protocol_options:{}dns_lookup_family:V4_ONLYlb_policy:ROUND_ROBINload_assignment:cluster_name:service_skywalkingendpoints:- lb_endpoints:- endpoint:address:socket_address:address:skywalking# This is the port where SkyWalking serving the Envoy Metrics Service gRPC stream.port_value:11800The comprehensive static configuration can be found here.\nNote that Envoy can also be configured dynamically through xDS Protocol.\nAs mentioned above, SkyWalking also builds the topology of services from the metrics, since Envoy also carries service metadata along with the metrics. To feed Envoy such metadata, see the other part of the configuration as follows:\nnode:# ... other configsmetadata:LABELS:app:test-appNAME:service-instance-nameConfigure Envoy to send metrics to SkyWalking with Istio Typically, Envoy can also be used with Istio. In this case, the configurations are much simpler because Istio composes the configurations for you and sends them to Envoy via xDS Protocol. Istio also automatically injects the metadata, such as service name and instance name, into the bootstrap configurations.\nEmitting the metrics to SkyWalking is as simple as adding the option --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; to Istio install command, like this:\nistioctl install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*\u0026#39; If you already have Istio installed, you can use the following command to apply the config without re-installing Istio:\nistioctl manifest install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*\u0026#39; Note: proxyStatsMatcher is only supported by Istio 1.8+. We recommend using inclusionRegexps to reserve specific metrics which need to be analyzed, in order to reduce memory usage and avoid CPU overhead. For example, OAP uses these metrics:\nistioctl manifest install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*membership_healthy.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[1]=.*upstream_cx_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[2]=.*upstream_cx_total.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[3]=.*upstream_rq_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[4]=.*upstream_rq_total.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[5]=.*upstream_rq_pending_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[6]=.*lb_healthy_panic.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[7]=.*upstream_cx_none_healthy.*\u0026#39; Metrics data Some Envoy statistics are listed here. Sample data that contain identifiers can be found here, while the metrics can be found here.\n","title":"Send Envoy metrics to SkyWalking with / without Istio","url":"/docs/main/v9.0.0/en/setup/envoy/metrics_service_setting/"},{"content":"Send Envoy metrics to SkyWalking with/without Istio Envoy defines a gRPC service to emit metrics, and whatever is used to implement this protocol can be used to receive the metrics. SkyWalking has a built-in receiver that implements this protocol, so you can configure Envoy to emit its metrics to SkyWalking.\nAs an APM system, SkyWalking not only receives and stores the metrics emitted by Envoy but also analyzes the topology of services and service instances.\nAttention: There are two versions of the Envoy metrics service protocol currently: v2 and v3. SkyWalking (8.3.0+) supports both of them.\nConfigure Envoy to send metrics to SkyWalking without Istio Envoy can be used with/without Istio. This section explains how you can configure the standalone Envoy to send metrics to SkyWalking.\nTo let Envoy send metrics to SkyWalking, we need to feed Envoy with a configuration that contains stats_sinks, which in turn includes envoy.metrics_service. This envoy.metrics_service should be configured as a config.grpc_service entry.\nThe noteworthy parts of the config are shown below:\nstats_sinks:- name:envoy.metrics_serviceconfig:grpc_service:# Note: we can use google_grpc implementation as well.envoy_grpc:cluster_name:service_skywalkingstatic_resources:...clusters:- name:service_skywalkingconnect_timeout:5stype:LOGICAL_DNShttp2_protocol_options:{}dns_lookup_family:V4_ONLYlb_policy:ROUND_ROBINload_assignment:cluster_name:service_skywalkingendpoints:- lb_endpoints:- endpoint:address:socket_address:address:skywalking# This is the port where SkyWalking serving the Envoy Metrics Service gRPC stream.port_value:11800The comprehensive static configuration can be found here.\nNote that Envoy can also be configured dynamically through xDS Protocol.\nAs mentioned above, SkyWalking also builds the topology of services from the metrics since Envoy also carries service metadata along with the metrics. To feed Envoy such metadata, see the other part of the configuration as follows:\nnode:# ... other configsmetadata:LABELS:app:test-appNAME:service-instance-nameConfigure Envoy to send metrics to SkyWalking with Istio Typically, Envoy can also be used with Istio. In this case, the configurations are much simpler because Istio composes the configurations for you and sends them to Envoy via xDS Protocol. Istio also automatically injects the metadata, such as service name and instance name, into the bootstrap configurations.\nEmitting the metrics to SkyWalking is as simple as adding the option --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; to Istio install command, like this:\nistioctl install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*\u0026#39; If you already have Istio installed, you can use the following command to apply the config without re-installing Istio:\nistioctl manifest install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*\u0026#39; Note: proxyStatsMatcher is only supported by Istio 1.8+. We recommend using inclusionRegexps to reserve specific metrics that need to be analyzed to reduce memory usage and avoid CPU overhead. For example, OAP uses these metrics:\nistioctl manifest install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*membership_healthy.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[1]=.*upstream_cx_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[2]=.*upstream_cx_total.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[3]=.*upstream_rq_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[4]=.*upstream_rq_total.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[5]=.*upstream_rq_pending_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[6]=.*lb_healthy_panic.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[7]=.*upstream_cx_none_healthy.*\u0026#39; Metrics data Some Envoy statistics are listed here. Sample data that contain identifiers can be found here, while the metrics can be found here.\nNetwork Monitoring SkyWalking supports network monitoring of the data plane in the Service Mesh. Read this documentation for learn more.\n","title":"Send Envoy metrics to SkyWalking with/without Istio","url":"/docs/main/latest/en/setup/envoy/metrics_service_setting/"},{"content":"Send Envoy metrics to SkyWalking with/without Istio Envoy defines a gRPC service to emit metrics, and whatever is used to implement this protocol can be used to receive the metrics. SkyWalking has a built-in receiver that implements this protocol, so you can configure Envoy to emit its metrics to SkyWalking.\nAs an APM system, SkyWalking not only receives and stores the metrics emitted by Envoy but also analyzes the topology of services and service instances.\nAttention: There are two versions of the Envoy metrics service protocol currently: v2 and v3. SkyWalking (8.3.0+) supports both of them.\nConfigure Envoy to send metrics to SkyWalking without Istio Envoy can be used with/without Istio. This section explains how you can configure the standalone Envoy to send metrics to SkyWalking.\nTo let Envoy send metrics to SkyWalking, we need to feed Envoy with a configuration that contains stats_sinks, which in turn includes envoy.metrics_service. This envoy.metrics_service should be configured as a config.grpc_service entry.\nThe noteworthy parts of the config are shown below:\nstats_sinks:- name:envoy.metrics_serviceconfig:grpc_service:# Note: we can use google_grpc implementation as well.envoy_grpc:cluster_name:service_skywalkingstatic_resources:...clusters:- name:service_skywalkingconnect_timeout:5stype:LOGICAL_DNShttp2_protocol_options:{}dns_lookup_family:V4_ONLYlb_policy:ROUND_ROBINload_assignment:cluster_name:service_skywalkingendpoints:- lb_endpoints:- endpoint:address:socket_address:address:skywalking# This is the port where SkyWalking serving the Envoy Metrics Service gRPC stream.port_value:11800The comprehensive static configuration can be found here.\nNote that Envoy can also be configured dynamically through xDS Protocol.\nAs mentioned above, SkyWalking also builds the topology of services from the metrics since Envoy also carries service metadata along with the metrics. To feed Envoy such metadata, see the other part of the configuration as follows:\nnode:# ... other configsmetadata:LABELS:app:test-appNAME:service-instance-nameConfigure Envoy to send metrics to SkyWalking with Istio Typically, Envoy can also be used with Istio. In this case, the configurations are much simpler because Istio composes the configurations for you and sends them to Envoy via xDS Protocol. Istio also automatically injects the metadata, such as service name and instance name, into the bootstrap configurations.\nEmitting the metrics to SkyWalking is as simple as adding the option --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; to Istio install command, like this:\nistioctl install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*\u0026#39; If you already have Istio installed, you can use the following command to apply the config without re-installing Istio:\nistioctl manifest install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*\u0026#39; Note: proxyStatsMatcher is only supported by Istio 1.8+. We recommend using inclusionRegexps to reserve specific metrics that need to be analyzed to reduce memory usage and avoid CPU overhead. For example, OAP uses these metrics:\nistioctl manifest install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*membership_healthy.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[1]=.*upstream_cx_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[2]=.*upstream_cx_total.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[3]=.*upstream_rq_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[4]=.*upstream_rq_total.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[5]=.*upstream_rq_pending_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[6]=.*lb_healthy_panic.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[7]=.*upstream_cx_none_healthy.*\u0026#39; Metrics data Some Envoy statistics are listed here. Sample data that contain identifiers can be found here, while the metrics can be found here.\nNetwork Monitoring SkyWalking supports network monitoring of the data plane in the Service Mesh. Read this documentation for learn more.\n","title":"Send Envoy metrics to SkyWalking with/without Istio","url":"/docs/main/next/en/setup/envoy/metrics_service_setting/"},{"content":"Send Envoy metrics to SkyWalking with/without Istio Envoy defines a gRPC service to emit metrics, and whatever is used to implement this protocol can be used to receive the metrics. SkyWalking has a built-in receiver that implements this protocol, so you can configure Envoy to emit its metrics to SkyWalking.\nAs an APM system, SkyWalking not only receives and stores the metrics emitted by Envoy but also analyzes the topology of services and service instances.\nAttention: There are two versions of the Envoy metrics service protocol currently: v2 and v3. SkyWalking (8.3.0+) supports both of them.\nConfigure Envoy to send metrics to SkyWalking without Istio Envoy can be used with/without Istio. This section explains how you can configure the standalone Envoy to send metrics to SkyWalking.\nTo let Envoy send metrics to SkyWalking, we need to feed Envoy with a configuration that contains stats_sinks, which in turn includes envoy.metrics_service. This envoy.metrics_service should be configured as a config.grpc_service entry.\nThe noteworthy parts of the config are shown below:\nstats_sinks:- name:envoy.metrics_serviceconfig:grpc_service:# Note: we can use google_grpc implementation as well.envoy_grpc:cluster_name:service_skywalkingstatic_resources:...clusters:- name:service_skywalkingconnect_timeout:5stype:LOGICAL_DNShttp2_protocol_options:{}dns_lookup_family:V4_ONLYlb_policy:ROUND_ROBINload_assignment:cluster_name:service_skywalkingendpoints:- lb_endpoints:- endpoint:address:socket_address:address:skywalking# This is the port where SkyWalking serving the Envoy Metrics Service gRPC stream.port_value:11800The comprehensive static configuration can be found here.\nNote that Envoy can also be configured dynamically through xDS Protocol.\nAs mentioned above, SkyWalking also builds the topology of services from the metrics since Envoy also carries service metadata along with the metrics. To feed Envoy such metadata, see the other part of the configuration as follows:\nnode:# ... other configsmetadata:LABELS:app:test-appNAME:service-instance-nameConfigure Envoy to send metrics to SkyWalking with Istio Typically, Envoy can also be used with Istio. In this case, the configurations are much simpler because Istio composes the configurations for you and sends them to Envoy via xDS Protocol. Istio also automatically injects the metadata, such as service name and instance name, into the bootstrap configurations.\nEmitting the metrics to SkyWalking is as simple as adding the option --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; to Istio install command, like this:\nistioctl install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*\u0026#39; If you already have Istio installed, you can use the following command to apply the config without re-installing Istio:\nistioctl manifest install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*\u0026#39; Note: proxyStatsMatcher is only supported by Istio 1.8+. We recommend using inclusionRegexps to reserve specific metrics that need to be analyzed to reduce memory usage and avoid CPU overhead. For example, OAP uses these metrics:\nistioctl manifest install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*membership_healthy.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[1]=.*upstream_cx_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[2]=.*upstream_cx_total.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[3]=.*upstream_rq_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[4]=.*upstream_rq_total.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[5]=.*upstream_rq_pending_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[6]=.*lb_healthy_panic.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[7]=.*upstream_cx_none_healthy.*\u0026#39; Metrics data Some Envoy statistics are listed here. Sample data that contain identifiers can be found here, while the metrics can be found here.\n","title":"Send Envoy metrics to SkyWalking with/without Istio","url":"/docs/main/v9.1.0/en/setup/envoy/metrics_service_setting/"},{"content":"Send Envoy metrics to SkyWalking with/without Istio Envoy defines a gRPC service to emit metrics, and whatever is used to implement this protocol can be used to receive the metrics. SkyWalking has a built-in receiver that implements this protocol, so you can configure Envoy to emit its metrics to SkyWalking.\nAs an APM system, SkyWalking not only receives and stores the metrics emitted by Envoy but also analyzes the topology of services and service instances.\nAttention: There are two versions of the Envoy metrics service protocol currently: v2 and v3. SkyWalking (8.3.0+) supports both of them.\nConfigure Envoy to send metrics to SkyWalking without Istio Envoy can be used with/without Istio. This section explains how you can configure the standalone Envoy to send metrics to SkyWalking.\nTo let Envoy send metrics to SkyWalking, we need to feed Envoy with a configuration that contains stats_sinks, which in turn includes envoy.metrics_service. This envoy.metrics_service should be configured as a config.grpc_service entry.\nThe noteworthy parts of the config are shown below:\nstats_sinks:- name:envoy.metrics_serviceconfig:grpc_service:# Note: we can use google_grpc implementation as well.envoy_grpc:cluster_name:service_skywalkingstatic_resources:...clusters:- name:service_skywalkingconnect_timeout:5stype:LOGICAL_DNShttp2_protocol_options:{}dns_lookup_family:V4_ONLYlb_policy:ROUND_ROBINload_assignment:cluster_name:service_skywalkingendpoints:- lb_endpoints:- endpoint:address:socket_address:address:skywalking# This is the port where SkyWalking serving the Envoy Metrics Service gRPC stream.port_value:11800The comprehensive static configuration can be found here.\nNote that Envoy can also be configured dynamically through xDS Protocol.\nAs mentioned above, SkyWalking also builds the topology of services from the metrics since Envoy also carries service metadata along with the metrics. To feed Envoy such metadata, see the other part of the configuration as follows:\nnode:# ... other configsmetadata:LABELS:app:test-appNAME:service-instance-nameConfigure Envoy to send metrics to SkyWalking with Istio Typically, Envoy can also be used with Istio. In this case, the configurations are much simpler because Istio composes the configurations for you and sends them to Envoy via xDS Protocol. Istio also automatically injects the metadata, such as service name and instance name, into the bootstrap configurations.\nEmitting the metrics to SkyWalking is as simple as adding the option --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; to Istio install command, like this:\nistioctl install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*\u0026#39; If you already have Istio installed, you can use the following command to apply the config without re-installing Istio:\nistioctl manifest install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*\u0026#39; Note: proxyStatsMatcher is only supported by Istio 1.8+. We recommend using inclusionRegexps to reserve specific metrics that need to be analyzed to reduce memory usage and avoid CPU overhead. For example, OAP uses these metrics:\nistioctl manifest install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*membership_healthy.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[1]=.*upstream_cx_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[2]=.*upstream_cx_total.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[3]=.*upstream_rq_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[4]=.*upstream_rq_total.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[5]=.*upstream_rq_pending_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[6]=.*lb_healthy_panic.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[7]=.*upstream_cx_none_healthy.*\u0026#39; Metrics data Some Envoy statistics are listed here. Sample data that contain identifiers can be found here, while the metrics can be found here.\nNetwork Monitoring SkyWalking supports network monitoring of the data plane in the Service Mesh. Read this documentation for learn more.\n","title":"Send Envoy metrics to SkyWalking with/without Istio","url":"/docs/main/v9.2.0/en/setup/envoy/metrics_service_setting/"},{"content":"Send Envoy metrics to SkyWalking with/without Istio Envoy defines a gRPC service to emit metrics, and whatever is used to implement this protocol can be used to receive the metrics. SkyWalking has a built-in receiver that implements this protocol, so you can configure Envoy to emit its metrics to SkyWalking.\nAs an APM system, SkyWalking not only receives and stores the metrics emitted by Envoy but also analyzes the topology of services and service instances.\nAttention: There are two versions of the Envoy metrics service protocol currently: v2 and v3. SkyWalking (8.3.0+) supports both of them.\nConfigure Envoy to send metrics to SkyWalking without Istio Envoy can be used with/without Istio. This section explains how you can configure the standalone Envoy to send metrics to SkyWalking.\nTo let Envoy send metrics to SkyWalking, we need to feed Envoy with a configuration that contains stats_sinks, which in turn includes envoy.metrics_service. This envoy.metrics_service should be configured as a config.grpc_service entry.\nThe noteworthy parts of the config are shown below:\nstats_sinks:- name:envoy.metrics_serviceconfig:grpc_service:# Note: we can use google_grpc implementation as well.envoy_grpc:cluster_name:service_skywalkingstatic_resources:...clusters:- name:service_skywalkingconnect_timeout:5stype:LOGICAL_DNShttp2_protocol_options:{}dns_lookup_family:V4_ONLYlb_policy:ROUND_ROBINload_assignment:cluster_name:service_skywalkingendpoints:- lb_endpoints:- endpoint:address:socket_address:address:skywalking# This is the port where SkyWalking serving the Envoy Metrics Service gRPC stream.port_value:11800The comprehensive static configuration can be found here.\nNote that Envoy can also be configured dynamically through xDS Protocol.\nAs mentioned above, SkyWalking also builds the topology of services from the metrics since Envoy also carries service metadata along with the metrics. To feed Envoy such metadata, see the other part of the configuration as follows:\nnode:# ... other configsmetadata:LABELS:app:test-appNAME:service-instance-nameConfigure Envoy to send metrics to SkyWalking with Istio Typically, Envoy can also be used with Istio. In this case, the configurations are much simpler because Istio composes the configurations for you and sends them to Envoy via xDS Protocol. Istio also automatically injects the metadata, such as service name and instance name, into the bootstrap configurations.\nEmitting the metrics to SkyWalking is as simple as adding the option --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; to Istio install command, like this:\nistioctl install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*\u0026#39; If you already have Istio installed, you can use the following command to apply the config without re-installing Istio:\nistioctl manifest install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*\u0026#39; Note: proxyStatsMatcher is only supported by Istio 1.8+. We recommend using inclusionRegexps to reserve specific metrics that need to be analyzed to reduce memory usage and avoid CPU overhead. For example, OAP uses these metrics:\nistioctl manifest install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*membership_healthy.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[1]=.*upstream_cx_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[2]=.*upstream_cx_total.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[3]=.*upstream_rq_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[4]=.*upstream_rq_total.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[5]=.*upstream_rq_pending_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[6]=.*lb_healthy_panic.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[7]=.*upstream_cx_none_healthy.*\u0026#39; Metrics data Some Envoy statistics are listed here. Sample data that contain identifiers can be found here, while the metrics can be found here.\nNetwork Monitoring SkyWalking supports network monitoring of the data plane in the Service Mesh. Read this documentation for learn more.\n","title":"Send Envoy metrics to SkyWalking with/without Istio","url":"/docs/main/v9.3.0/en/setup/envoy/metrics_service_setting/"},{"content":"Send Envoy metrics to SkyWalking with/without Istio Envoy defines a gRPC service to emit metrics, and whatever is used to implement this protocol can be used to receive the metrics. SkyWalking has a built-in receiver that implements this protocol, so you can configure Envoy to emit its metrics to SkyWalking.\nAs an APM system, SkyWalking not only receives and stores the metrics emitted by Envoy but also analyzes the topology of services and service instances.\nAttention: There are two versions of the Envoy metrics service protocol currently: v2 and v3. SkyWalking (8.3.0+) supports both of them.\nConfigure Envoy to send metrics to SkyWalking without Istio Envoy can be used with/without Istio. This section explains how you can configure the standalone Envoy to send metrics to SkyWalking.\nTo let Envoy send metrics to SkyWalking, we need to feed Envoy with a configuration that contains stats_sinks, which in turn includes envoy.metrics_service. This envoy.metrics_service should be configured as a config.grpc_service entry.\nThe noteworthy parts of the config are shown below:\nstats_sinks:- name:envoy.metrics_serviceconfig:grpc_service:# Note: we can use google_grpc implementation as well.envoy_grpc:cluster_name:service_skywalkingstatic_resources:...clusters:- name:service_skywalkingconnect_timeout:5stype:LOGICAL_DNShttp2_protocol_options:{}dns_lookup_family:V4_ONLYlb_policy:ROUND_ROBINload_assignment:cluster_name:service_skywalkingendpoints:- lb_endpoints:- endpoint:address:socket_address:address:skywalking# This is the port where SkyWalking serving the Envoy Metrics Service gRPC stream.port_value:11800The comprehensive static configuration can be found here.\nNote that Envoy can also be configured dynamically through xDS Protocol.\nAs mentioned above, SkyWalking also builds the topology of services from the metrics since Envoy also carries service metadata along with the metrics. To feed Envoy such metadata, see the other part of the configuration as follows:\nnode:# ... other configsmetadata:LABELS:app:test-appNAME:service-instance-nameConfigure Envoy to send metrics to SkyWalking with Istio Typically, Envoy can also be used with Istio. In this case, the configurations are much simpler because Istio composes the configurations for you and sends them to Envoy via xDS Protocol. Istio also automatically injects the metadata, such as service name and instance name, into the bootstrap configurations.\nEmitting the metrics to SkyWalking is as simple as adding the option --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; to Istio install command, like this:\nistioctl install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*\u0026#39; If you already have Istio installed, you can use the following command to apply the config without re-installing Istio:\nistioctl manifest install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*\u0026#39; Note: proxyStatsMatcher is only supported by Istio 1.8+. We recommend using inclusionRegexps to reserve specific metrics that need to be analyzed to reduce memory usage and avoid CPU overhead. For example, OAP uses these metrics:\nistioctl manifest install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*membership_healthy.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[1]=.*upstream_cx_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[2]=.*upstream_cx_total.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[3]=.*upstream_rq_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[4]=.*upstream_rq_total.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[5]=.*upstream_rq_pending_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[6]=.*lb_healthy_panic.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[7]=.*upstream_cx_none_healthy.*\u0026#39; Metrics data Some Envoy statistics are listed here. Sample data that contain identifiers can be found here, while the metrics can be found here.\nNetwork Monitoring SkyWalking supports network monitoring of the data plane in the Service Mesh. Read this documentation for learn more.\n","title":"Send Envoy metrics to SkyWalking with/without Istio","url":"/docs/main/v9.4.0/en/setup/envoy/metrics_service_setting/"},{"content":"Send Envoy metrics to SkyWalking with/without Istio Envoy defines a gRPC service to emit metrics, and whatever is used to implement this protocol can be used to receive the metrics. SkyWalking has a built-in receiver that implements this protocol, so you can configure Envoy to emit its metrics to SkyWalking.\nAs an APM system, SkyWalking not only receives and stores the metrics emitted by Envoy but also analyzes the topology of services and service instances.\nAttention: There are two versions of the Envoy metrics service protocol currently: v2 and v3. SkyWalking (8.3.0+) supports both of them.\nConfigure Envoy to send metrics to SkyWalking without Istio Envoy can be used with/without Istio. This section explains how you can configure the standalone Envoy to send metrics to SkyWalking.\nTo let Envoy send metrics to SkyWalking, we need to feed Envoy with a configuration that contains stats_sinks, which in turn includes envoy.metrics_service. This envoy.metrics_service should be configured as a config.grpc_service entry.\nThe noteworthy parts of the config are shown below:\nstats_sinks:- name:envoy.metrics_serviceconfig:grpc_service:# Note: we can use google_grpc implementation as well.envoy_grpc:cluster_name:service_skywalkingstatic_resources:...clusters:- name:service_skywalkingconnect_timeout:5stype:LOGICAL_DNShttp2_protocol_options:{}dns_lookup_family:V4_ONLYlb_policy:ROUND_ROBINload_assignment:cluster_name:service_skywalkingendpoints:- lb_endpoints:- endpoint:address:socket_address:address:skywalking# This is the port where SkyWalking serving the Envoy Metrics Service gRPC stream.port_value:11800The comprehensive static configuration can be found here.\nNote that Envoy can also be configured dynamically through xDS Protocol.\nAs mentioned above, SkyWalking also builds the topology of services from the metrics since Envoy also carries service metadata along with the metrics. To feed Envoy such metadata, see the other part of the configuration as follows:\nnode:# ... other configsmetadata:LABELS:app:test-appNAME:service-instance-nameConfigure Envoy to send metrics to SkyWalking with Istio Typically, Envoy can also be used with Istio. In this case, the configurations are much simpler because Istio composes the configurations for you and sends them to Envoy via xDS Protocol. Istio also automatically injects the metadata, such as service name and instance name, into the bootstrap configurations.\nEmitting the metrics to SkyWalking is as simple as adding the option --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; to Istio install command, like this:\nistioctl install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*\u0026#39; If you already have Istio installed, you can use the following command to apply the config without re-installing Istio:\nistioctl manifest install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*\u0026#39; Note: proxyStatsMatcher is only supported by Istio 1.8+. We recommend using inclusionRegexps to reserve specific metrics that need to be analyzed to reduce memory usage and avoid CPU overhead. For example, OAP uses these metrics:\nistioctl manifest install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*membership_healthy.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[1]=.*upstream_cx_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[2]=.*upstream_cx_total.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[3]=.*upstream_rq_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[4]=.*upstream_rq_total.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[5]=.*upstream_rq_pending_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[6]=.*lb_healthy_panic.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[7]=.*upstream_cx_none_healthy.*\u0026#39; Metrics data Some Envoy statistics are listed here. Sample data that contain identifiers can be found here, while the metrics can be found here.\nNetwork Monitoring SkyWalking supports network monitoring of the data plane in the Service Mesh. Read this documentation for learn more.\n","title":"Send Envoy metrics to SkyWalking with/without Istio","url":"/docs/main/v9.5.0/en/setup/envoy/metrics_service_setting/"},{"content":"Send Envoy metrics to SkyWalking with/without Istio Envoy defines a gRPC service to emit metrics, and whatever is used to implement this protocol can be used to receive the metrics. SkyWalking has a built-in receiver that implements this protocol, so you can configure Envoy to emit its metrics to SkyWalking.\nAs an APM system, SkyWalking not only receives and stores the metrics emitted by Envoy but also analyzes the topology of services and service instances.\nAttention: There are two versions of the Envoy metrics service protocol currently: v2 and v3. SkyWalking (8.3.0+) supports both of them.\nConfigure Envoy to send metrics to SkyWalking without Istio Envoy can be used with/without Istio. This section explains how you can configure the standalone Envoy to send metrics to SkyWalking.\nTo let Envoy send metrics to SkyWalking, we need to feed Envoy with a configuration that contains stats_sinks, which in turn includes envoy.metrics_service. This envoy.metrics_service should be configured as a config.grpc_service entry.\nThe noteworthy parts of the config are shown below:\nstats_sinks:- name:envoy.metrics_serviceconfig:grpc_service:# Note: we can use google_grpc implementation as well.envoy_grpc:cluster_name:service_skywalkingstatic_resources:...clusters:- name:service_skywalkingconnect_timeout:5stype:LOGICAL_DNShttp2_protocol_options:{}dns_lookup_family:V4_ONLYlb_policy:ROUND_ROBINload_assignment:cluster_name:service_skywalkingendpoints:- lb_endpoints:- endpoint:address:socket_address:address:skywalking# This is the port where SkyWalking serving the Envoy Metrics Service gRPC stream.port_value:11800The comprehensive static configuration can be found here.\nNote that Envoy can also be configured dynamically through xDS Protocol.\nAs mentioned above, SkyWalking also builds the topology of services from the metrics since Envoy also carries service metadata along with the metrics. To feed Envoy such metadata, see the other part of the configuration as follows:\nnode:# ... other configsmetadata:LABELS:app:test-appNAME:service-instance-nameConfigure Envoy to send metrics to SkyWalking with Istio Typically, Envoy can also be used with Istio. In this case, the configurations are much simpler because Istio composes the configurations for you and sends them to Envoy via xDS Protocol. Istio also automatically injects the metadata, such as service name and instance name, into the bootstrap configurations.\nEmitting the metrics to SkyWalking is as simple as adding the option --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; to Istio install command, like this:\nistioctl install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*\u0026#39; If you already have Istio installed, you can use the following command to apply the config without re-installing Istio:\nistioctl manifest install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*\u0026#39; Note: proxyStatsMatcher is only supported by Istio 1.8+. We recommend using inclusionRegexps to reserve specific metrics that need to be analyzed to reduce memory usage and avoid CPU overhead. For example, OAP uses these metrics:\nistioctl manifest install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*membership_healthy.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[1]=.*upstream_cx_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[2]=.*upstream_cx_total.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[3]=.*upstream_rq_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[4]=.*upstream_rq_total.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[5]=.*upstream_rq_pending_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[6]=.*lb_healthy_panic.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[7]=.*upstream_cx_none_healthy.*\u0026#39; Metrics data Some Envoy statistics are listed here. Sample data that contain identifiers can be found here, while the metrics can be found here.\nNetwork Monitoring SkyWalking supports network monitoring of the data plane in the Service Mesh. Read this documentation for learn more.\n","title":"Send Envoy metrics to SkyWalking with/without Istio","url":"/docs/main/v9.6.0/en/setup/envoy/metrics_service_setting/"},{"content":"Send Envoy metrics to SkyWalking with/without Istio Envoy defines a gRPC service to emit metrics, and whatever is used to implement this protocol can be used to receive the metrics. SkyWalking has a built-in receiver that implements this protocol, so you can configure Envoy to emit its metrics to SkyWalking.\nAs an APM system, SkyWalking not only receives and stores the metrics emitted by Envoy but also analyzes the topology of services and service instances.\nAttention: There are two versions of the Envoy metrics service protocol currently: v2 and v3. SkyWalking (8.3.0+) supports both of them.\nConfigure Envoy to send metrics to SkyWalking without Istio Envoy can be used with/without Istio. This section explains how you can configure the standalone Envoy to send metrics to SkyWalking.\nTo let Envoy send metrics to SkyWalking, we need to feed Envoy with a configuration that contains stats_sinks, which in turn includes envoy.metrics_service. This envoy.metrics_service should be configured as a config.grpc_service entry.\nThe noteworthy parts of the config are shown below:\nstats_sinks:- name:envoy.metrics_serviceconfig:grpc_service:# Note: we can use google_grpc implementation as well.envoy_grpc:cluster_name:service_skywalkingstatic_resources:...clusters:- name:service_skywalkingconnect_timeout:5stype:LOGICAL_DNShttp2_protocol_options:{}dns_lookup_family:V4_ONLYlb_policy:ROUND_ROBINload_assignment:cluster_name:service_skywalkingendpoints:- lb_endpoints:- endpoint:address:socket_address:address:skywalking# This is the port where SkyWalking serving the Envoy Metrics Service gRPC stream.port_value:11800The comprehensive static configuration can be found here.\nNote that Envoy can also be configured dynamically through xDS Protocol.\nAs mentioned above, SkyWalking also builds the topology of services from the metrics since Envoy also carries service metadata along with the metrics. To feed Envoy such metadata, see the other part of the configuration as follows:\nnode:# ... other configsmetadata:LABELS:app:test-appNAME:service-instance-nameConfigure Envoy to send metrics to SkyWalking with Istio Typically, Envoy can also be used with Istio. In this case, the configurations are much simpler because Istio composes the configurations for you and sends them to Envoy via xDS Protocol. Istio also automatically injects the metadata, such as service name and instance name, into the bootstrap configurations.\nEmitting the metrics to SkyWalking is as simple as adding the option --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; to Istio install command, like this:\nistioctl install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*\u0026#39; If you already have Istio installed, you can use the following command to apply the config without re-installing Istio:\nistioctl manifest install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*\u0026#39; Note: proxyStatsMatcher is only supported by Istio 1.8+. We recommend using inclusionRegexps to reserve specific metrics that need to be analyzed to reduce memory usage and avoid CPU overhead. For example, OAP uses these metrics:\nistioctl manifest install -y \\  --set profile=demo # replace the profile as per your need \\ --set meshConfig.defaultConfig.envoyMetricsService.address=\u0026lt;skywalking.address.port.11800\u0026gt; \\ # replace \u0026lt;skywalking.address.port.11800\u0026gt; with your actual SkyWalking OAP address --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[0]=.*membership_healthy.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[1]=.*upstream_cx_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[2]=.*upstream_cx_total.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[3]=.*upstream_rq_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[4]=.*upstream_rq_total.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[5]=.*upstream_rq_pending_active.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[6]=.*lb_healthy_panic.*\u0026#39; \\  --set \u0026#39;meshConfig.defaultConfig.proxyStatsMatcher.inclusionRegexps[7]=.*upstream_cx_none_healthy.*\u0026#39; Metrics data Some Envoy statistics are listed here. Sample data that contain identifiers can be found here, while the metrics can be found here.\nNetwork Monitoring SkyWalking supports network monitoring of the data plane in the Service Mesh. Read this documentation for learn more.\n","title":"Send Envoy metrics to SkyWalking with/without Istio","url":"/docs/main/v9.7.0/en/setup/envoy/metrics_service_setting/"},{"content":"Sending Envoy Metrics to SkyWalking OAP Server Example This is an example of sending Envoy Stats to the SkyWalking OAP server through Metric Service v2 and v3.\nRunning the example The example requires docker and docker-compose to be installed in your local system. It fetches images from Docker Hub.\nNote that in this setup, we override the log4j2.xml config to set the org.apache.skywalking.oap.server.receiver.envoy logger level to DEBUG. This enables us to see the messages sent by Envoy to the SkyWalking OAP server.\nYou can also find the Envoy Metric Service V3 API example in docker-compose-envoy-v3-api.yaml\n$ make up $ docker-compose logs -f skywalking $ # Please wait for a moment until SkyWalking is ready and Envoy starts sending the stats. You will see similar messages like the following: skywalking_1 | 2021-07-23 13:25:30,683 - org.apache.skywalking.oap.server.receiver.envoy.MetricServiceGRPCHandler -19437 [grpcServerPool-1-thread-2] DEBUG [] - Received msg identifier { skywalking_1 | node { skywalking_1 | id: \u0026quot;ingress\u0026quot; skywalking_1 | cluster: \u0026quot;envoy-proxy\u0026quot; skywalking_1 | metadata { skywalking_1 | fields { skywalking_1 | key: \u0026quot;LABELS\u0026quot; skywalking_1 | value { skywalking_1 | struct_value { skywalking_1 | fields { skywalking_1 | key: \u0026quot;app\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;test-app\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;NAME\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;service-instance-name\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;envoy\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;isawesome\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;skywalking\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;iscool\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | locality { skywalking_1 | region: \u0026quot;ap-southeast-1\u0026quot; skywalking_1 | zone: \u0026quot;zone1\u0026quot; skywalking_1 | sub_zone: \u0026quot;subzone1\u0026quot; skywalking_1 | } skywalking_1 | user_agent_name: \u0026quot;envoy\u0026quot; skywalking_1 | user_agent_build_version { skywalking_1 | version { skywalking_1 | major_number: 1 skywalking_1 | minor_number: 19 skywalking_1 | } skywalking_1 | metadata { skywalking_1 | fields { skywalking_1 | key: \u0026quot;build.type\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;RELEASE\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;revision.sha\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;68fe53a889416fd8570506232052b06f5a531541\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;revision.status\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;Clean\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;ssl.version\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;BoringSSL\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | extensions { skywalking_1 | name: \u0026quot;composite-action\u0026quot; skywalking_1 | category: \u0026quot;envoy.matching.action\u0026quot; skywalking_1 | } ...... skywalking_1 | } skywalking_1 | } skywalking_1 | envoy_metrics { skywalking_1 | name: \u0026quot;cluster.service_google.update_no_rebuild\u0026quot; skywalking_1 | type: COUNTER skywalking_1 | metric { skywalking_1 | counter { skywalking_1 | value: 1.0 skywalking_1 | } skywalking_1 | timestamp_ms: 1627046729718 skywalking_1 | } ...... skywalking_1 | } ... $ # To tear down: $ make down ","title":"Sending Envoy Metrics to SkyWalking OAP Server Example","url":"/docs/main/latest/en/setup/envoy/examples/metrics/readme/"},{"content":"Sending Envoy Metrics to SkyWalking OAP Server Example This is an example of sending Envoy Stats to the SkyWalking OAP server through Metric Service v2 and v3.\nRunning the example The example requires docker and docker-compose to be installed in your local system. It fetches images from Docker Hub.\nNote that in this setup, we override the log4j2.xml config to set the org.apache.skywalking.oap.server.receiver.envoy logger level to DEBUG. This enables us to see the messages sent by Envoy to the SkyWalking OAP server.\nYou can also find the Envoy Metric Service V3 API example in docker-compose-envoy-v3-api.yaml\n$ make up $ docker-compose logs -f skywalking $ # Please wait for a moment until SkyWalking is ready and Envoy starts sending the stats. You will see similar messages like the following: skywalking_1 | 2021-07-23 13:25:30,683 - org.apache.skywalking.oap.server.receiver.envoy.MetricServiceGRPCHandler -19437 [grpcServerPool-1-thread-2] DEBUG [] - Received msg identifier { skywalking_1 | node { skywalking_1 | id: \u0026quot;ingress\u0026quot; skywalking_1 | cluster: \u0026quot;envoy-proxy\u0026quot; skywalking_1 | metadata { skywalking_1 | fields { skywalking_1 | key: \u0026quot;LABELS\u0026quot; skywalking_1 | value { skywalking_1 | struct_value { skywalking_1 | fields { skywalking_1 | key: \u0026quot;app\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;test-app\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;NAME\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;service-instance-name\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;envoy\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;isawesome\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;skywalking\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;iscool\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | locality { skywalking_1 | region: \u0026quot;ap-southeast-1\u0026quot; skywalking_1 | zone: \u0026quot;zone1\u0026quot; skywalking_1 | sub_zone: \u0026quot;subzone1\u0026quot; skywalking_1 | } skywalking_1 | user_agent_name: \u0026quot;envoy\u0026quot; skywalking_1 | user_agent_build_version { skywalking_1 | version { skywalking_1 | major_number: 1 skywalking_1 | minor_number: 19 skywalking_1 | } skywalking_1 | metadata { skywalking_1 | fields { skywalking_1 | key: \u0026quot;build.type\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;RELEASE\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;revision.sha\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;68fe53a889416fd8570506232052b06f5a531541\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;revision.status\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;Clean\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;ssl.version\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;BoringSSL\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | extensions { skywalking_1 | name: \u0026quot;composite-action\u0026quot; skywalking_1 | category: \u0026quot;envoy.matching.action\u0026quot; skywalking_1 | } ...... skywalking_1 | } skywalking_1 | } skywalking_1 | envoy_metrics { skywalking_1 | name: \u0026quot;cluster.service_google.update_no_rebuild\u0026quot; skywalking_1 | type: COUNTER skywalking_1 | metric { skywalking_1 | counter { skywalking_1 | value: 1.0 skywalking_1 | } skywalking_1 | timestamp_ms: 1627046729718 skywalking_1 | } ...... skywalking_1 | } ... $ # To tear down: $ make down ","title":"Sending Envoy Metrics to SkyWalking OAP Server Example","url":"/docs/main/next/en/setup/envoy/examples/metrics/readme/"},{"content":"Sending Envoy Metrics to SkyWalking OAP Server Example This is an example of sending Envoy Stats to SkyWalking OAP server through Metric Service v2 and v3.\nRunning the example The example requires docker and docker-compose to be installed in your local system. It fetches images from Docker Hub.\nNote that in this setup, we override the log4j2.xml config to set the org.apache.skywalking.oap.server.receiver.envoy logger level to DEBUG. This enables us to see the messages sent by Envoy to SkyWalking OAP server.\nYou can also find the Envoy Metric Service V3 API example in docker-compose-envoy-v3-api.yaml\n$ make up $ docker-compose logs -f skywalking $ # Please wait for a moment until SkyWalking is ready and Envoy starts sending the stats. You will see similar messages like the following: skywalking_1 | 2021-07-23 13:25:30,683 - org.apache.skywalking.oap.server.receiver.envoy.MetricServiceGRPCHandler -19437 [grpcServerPool-1-thread-2] DEBUG [] - Received msg identifier { skywalking_1 | node { skywalking_1 | id: \u0026quot;ingress\u0026quot; skywalking_1 | cluster: \u0026quot;envoy-proxy\u0026quot; skywalking_1 | metadata { skywalking_1 | fields { skywalking_1 | key: \u0026quot;LABELS\u0026quot; skywalking_1 | value { skywalking_1 | struct_value { skywalking_1 | fields { skywalking_1 | key: \u0026quot;app\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;test-app\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;NAME\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;service-instance-name\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;envoy\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;isawesome\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;skywalking\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;iscool\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | locality { skywalking_1 | region: \u0026quot;ap-southeast-1\u0026quot; skywalking_1 | zone: \u0026quot;zone1\u0026quot; skywalking_1 | sub_zone: \u0026quot;subzone1\u0026quot; skywalking_1 | } skywalking_1 | user_agent_name: \u0026quot;envoy\u0026quot; skywalking_1 | user_agent_build_version { skywalking_1 | version { skywalking_1 | major_number: 1 skywalking_1 | minor_number: 19 skywalking_1 | } skywalking_1 | metadata { skywalking_1 | fields { skywalking_1 | key: \u0026quot;build.type\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;RELEASE\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;revision.sha\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;68fe53a889416fd8570506232052b06f5a531541\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;revision.status\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;Clean\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;ssl.version\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;BoringSSL\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | extensions { skywalking_1 | name: \u0026quot;composite-action\u0026quot; skywalking_1 | category: \u0026quot;envoy.matching.action\u0026quot; skywalking_1 | } ...... skywalking_1 | } skywalking_1 | } skywalking_1 | envoy_metrics { skywalking_1 | name: \u0026quot;cluster.service_google.update_no_rebuild\u0026quot; skywalking_1 | type: COUNTER skywalking_1 | metric { skywalking_1 | counter { skywalking_1 | value: 1.0 skywalking_1 | } skywalking_1 | timestamp_ms: 1627046729718 skywalking_1 | } ...... skywalking_1 | } ... $ # To tear down: $ make down ","title":"Sending Envoy Metrics to SkyWalking OAP Server Example","url":"/docs/main/v9.0.0/en/setup/envoy/examples/metrics/readme/"},{"content":"Sending Envoy Metrics to SkyWalking OAP Server Example This is an example of sending Envoy Stats to the SkyWalking OAP server through Metric Service v2 and v3.\nRunning the example The example requires docker and docker-compose to be installed in your local system. It fetches images from Docker Hub.\nNote that in this setup, we override the log4j2.xml config to set the org.apache.skywalking.oap.server.receiver.envoy logger level to DEBUG. This enables us to see the messages sent by Envoy to the SkyWalking OAP server.\nYou can also find the Envoy Metric Service V3 API example in docker-compose-envoy-v3-api.yaml\n$ make up $ docker-compose logs -f skywalking $ # Please wait for a moment until SkyWalking is ready and Envoy starts sending the stats. You will see similar messages like the following: skywalking_1 | 2021-07-23 13:25:30,683 - org.apache.skywalking.oap.server.receiver.envoy.MetricServiceGRPCHandler -19437 [grpcServerPool-1-thread-2] DEBUG [] - Received msg identifier { skywalking_1 | node { skywalking_1 | id: \u0026quot;ingress\u0026quot; skywalking_1 | cluster: \u0026quot;envoy-proxy\u0026quot; skywalking_1 | metadata { skywalking_1 | fields { skywalking_1 | key: \u0026quot;LABELS\u0026quot; skywalking_1 | value { skywalking_1 | struct_value { skywalking_1 | fields { skywalking_1 | key: \u0026quot;app\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;test-app\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;NAME\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;service-instance-name\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;envoy\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;isawesome\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;skywalking\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;iscool\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | locality { skywalking_1 | region: \u0026quot;ap-southeast-1\u0026quot; skywalking_1 | zone: \u0026quot;zone1\u0026quot; skywalking_1 | sub_zone: \u0026quot;subzone1\u0026quot; skywalking_1 | } skywalking_1 | user_agent_name: \u0026quot;envoy\u0026quot; skywalking_1 | user_agent_build_version { skywalking_1 | version { skywalking_1 | major_number: 1 skywalking_1 | minor_number: 19 skywalking_1 | } skywalking_1 | metadata { skywalking_1 | fields { skywalking_1 | key: \u0026quot;build.type\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;RELEASE\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;revision.sha\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;68fe53a889416fd8570506232052b06f5a531541\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;revision.status\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;Clean\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;ssl.version\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;BoringSSL\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | extensions { skywalking_1 | name: \u0026quot;composite-action\u0026quot; skywalking_1 | category: \u0026quot;envoy.matching.action\u0026quot; skywalking_1 | } ...... skywalking_1 | } skywalking_1 | } skywalking_1 | envoy_metrics { skywalking_1 | name: \u0026quot;cluster.service_google.update_no_rebuild\u0026quot; skywalking_1 | type: COUNTER skywalking_1 | metric { skywalking_1 | counter { skywalking_1 | value: 1.0 skywalking_1 | } skywalking_1 | timestamp_ms: 1627046729718 skywalking_1 | } ...... skywalking_1 | } ... $ # To tear down: $ make down ","title":"Sending Envoy Metrics to SkyWalking OAP Server Example","url":"/docs/main/v9.1.0/en/setup/envoy/examples/metrics/readme/"},{"content":"Sending Envoy Metrics to SkyWalking OAP Server Example This is an example of sending Envoy Stats to the SkyWalking OAP server through Metric Service v2 and v3.\nRunning the example The example requires docker and docker-compose to be installed in your local system. It fetches images from Docker Hub.\nNote that in this setup, we override the log4j2.xml config to set the org.apache.skywalking.oap.server.receiver.envoy logger level to DEBUG. This enables us to see the messages sent by Envoy to the SkyWalking OAP server.\nYou can also find the Envoy Metric Service V3 API example in docker-compose-envoy-v3-api.yaml\n$ make up $ docker-compose logs -f skywalking $ # Please wait for a moment until SkyWalking is ready and Envoy starts sending the stats. You will see similar messages like the following: skywalking_1 | 2021-07-23 13:25:30,683 - org.apache.skywalking.oap.server.receiver.envoy.MetricServiceGRPCHandler -19437 [grpcServerPool-1-thread-2] DEBUG [] - Received msg identifier { skywalking_1 | node { skywalking_1 | id: \u0026quot;ingress\u0026quot; skywalking_1 | cluster: \u0026quot;envoy-proxy\u0026quot; skywalking_1 | metadata { skywalking_1 | fields { skywalking_1 | key: \u0026quot;LABELS\u0026quot; skywalking_1 | value { skywalking_1 | struct_value { skywalking_1 | fields { skywalking_1 | key: \u0026quot;app\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;test-app\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;NAME\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;service-instance-name\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;envoy\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;isawesome\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;skywalking\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;iscool\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | locality { skywalking_1 | region: \u0026quot;ap-southeast-1\u0026quot; skywalking_1 | zone: \u0026quot;zone1\u0026quot; skywalking_1 | sub_zone: \u0026quot;subzone1\u0026quot; skywalking_1 | } skywalking_1 | user_agent_name: \u0026quot;envoy\u0026quot; skywalking_1 | user_agent_build_version { skywalking_1 | version { skywalking_1 | major_number: 1 skywalking_1 | minor_number: 19 skywalking_1 | } skywalking_1 | metadata { skywalking_1 | fields { skywalking_1 | key: \u0026quot;build.type\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;RELEASE\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;revision.sha\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;68fe53a889416fd8570506232052b06f5a531541\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;revision.status\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;Clean\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;ssl.version\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;BoringSSL\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | extensions { skywalking_1 | name: \u0026quot;composite-action\u0026quot; skywalking_1 | category: \u0026quot;envoy.matching.action\u0026quot; skywalking_1 | } ...... skywalking_1 | } skywalking_1 | } skywalking_1 | envoy_metrics { skywalking_1 | name: \u0026quot;cluster.service_google.update_no_rebuild\u0026quot; skywalking_1 | type: COUNTER skywalking_1 | metric { skywalking_1 | counter { skywalking_1 | value: 1.0 skywalking_1 | } skywalking_1 | timestamp_ms: 1627046729718 skywalking_1 | } ...... skywalking_1 | } ... $ # To tear down: $ make down ","title":"Sending Envoy Metrics to SkyWalking OAP Server Example","url":"/docs/main/v9.2.0/en/setup/envoy/examples/metrics/readme/"},{"content":"Sending Envoy Metrics to SkyWalking OAP Server Example This is an example of sending Envoy Stats to the SkyWalking OAP server through Metric Service v2 and v3.\nRunning the example The example requires docker and docker-compose to be installed in your local system. It fetches images from Docker Hub.\nNote that in this setup, we override the log4j2.xml config to set the org.apache.skywalking.oap.server.receiver.envoy logger level to DEBUG. This enables us to see the messages sent by Envoy to the SkyWalking OAP server.\nYou can also find the Envoy Metric Service V3 API example in docker-compose-envoy-v3-api.yaml\n$ make up $ docker-compose logs -f skywalking $ # Please wait for a moment until SkyWalking is ready and Envoy starts sending the stats. You will see similar messages like the following: skywalking_1 | 2021-07-23 13:25:30,683 - org.apache.skywalking.oap.server.receiver.envoy.MetricServiceGRPCHandler -19437 [grpcServerPool-1-thread-2] DEBUG [] - Received msg identifier { skywalking_1 | node { skywalking_1 | id: \u0026quot;ingress\u0026quot; skywalking_1 | cluster: \u0026quot;envoy-proxy\u0026quot; skywalking_1 | metadata { skywalking_1 | fields { skywalking_1 | key: \u0026quot;LABELS\u0026quot; skywalking_1 | value { skywalking_1 | struct_value { skywalking_1 | fields { skywalking_1 | key: \u0026quot;app\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;test-app\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;NAME\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;service-instance-name\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;envoy\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;isawesome\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;skywalking\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;iscool\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | locality { skywalking_1 | region: \u0026quot;ap-southeast-1\u0026quot; skywalking_1 | zone: \u0026quot;zone1\u0026quot; skywalking_1 | sub_zone: \u0026quot;subzone1\u0026quot; skywalking_1 | } skywalking_1 | user_agent_name: \u0026quot;envoy\u0026quot; skywalking_1 | user_agent_build_version { skywalking_1 | version { skywalking_1 | major_number: 1 skywalking_1 | minor_number: 19 skywalking_1 | } skywalking_1 | metadata { skywalking_1 | fields { skywalking_1 | key: \u0026quot;build.type\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;RELEASE\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;revision.sha\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;68fe53a889416fd8570506232052b06f5a531541\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;revision.status\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;Clean\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;ssl.version\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;BoringSSL\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | extensions { skywalking_1 | name: \u0026quot;composite-action\u0026quot; skywalking_1 | category: \u0026quot;envoy.matching.action\u0026quot; skywalking_1 | } ...... skywalking_1 | } skywalking_1 | } skywalking_1 | envoy_metrics { skywalking_1 | name: \u0026quot;cluster.service_google.update_no_rebuild\u0026quot; skywalking_1 | type: COUNTER skywalking_1 | metric { skywalking_1 | counter { skywalking_1 | value: 1.0 skywalking_1 | } skywalking_1 | timestamp_ms: 1627046729718 skywalking_1 | } ...... skywalking_1 | } ... $ # To tear down: $ make down ","title":"Sending Envoy Metrics to SkyWalking OAP Server Example","url":"/docs/main/v9.3.0/en/setup/envoy/examples/metrics/readme/"},{"content":"Sending Envoy Metrics to SkyWalking OAP Server Example This is an example of sending Envoy Stats to the SkyWalking OAP server through Metric Service v2 and v3.\nRunning the example The example requires docker and docker-compose to be installed in your local system. It fetches images from Docker Hub.\nNote that in this setup, we override the log4j2.xml config to set the org.apache.skywalking.oap.server.receiver.envoy logger level to DEBUG. This enables us to see the messages sent by Envoy to the SkyWalking OAP server.\nYou can also find the Envoy Metric Service V3 API example in docker-compose-envoy-v3-api.yaml\n$ make up $ docker-compose logs -f skywalking $ # Please wait for a moment until SkyWalking is ready and Envoy starts sending the stats. You will see similar messages like the following: skywalking_1 | 2021-07-23 13:25:30,683 - org.apache.skywalking.oap.server.receiver.envoy.MetricServiceGRPCHandler -19437 [grpcServerPool-1-thread-2] DEBUG [] - Received msg identifier { skywalking_1 | node { skywalking_1 | id: \u0026quot;ingress\u0026quot; skywalking_1 | cluster: \u0026quot;envoy-proxy\u0026quot; skywalking_1 | metadata { skywalking_1 | fields { skywalking_1 | key: \u0026quot;LABELS\u0026quot; skywalking_1 | value { skywalking_1 | struct_value { skywalking_1 | fields { skywalking_1 | key: \u0026quot;app\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;test-app\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;NAME\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;service-instance-name\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;envoy\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;isawesome\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;skywalking\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;iscool\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | locality { skywalking_1 | region: \u0026quot;ap-southeast-1\u0026quot; skywalking_1 | zone: \u0026quot;zone1\u0026quot; skywalking_1 | sub_zone: \u0026quot;subzone1\u0026quot; skywalking_1 | } skywalking_1 | user_agent_name: \u0026quot;envoy\u0026quot; skywalking_1 | user_agent_build_version { skywalking_1 | version { skywalking_1 | major_number: 1 skywalking_1 | minor_number: 19 skywalking_1 | } skywalking_1 | metadata { skywalking_1 | fields { skywalking_1 | key: \u0026quot;build.type\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;RELEASE\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;revision.sha\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;68fe53a889416fd8570506232052b06f5a531541\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;revision.status\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;Clean\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;ssl.version\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;BoringSSL\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | extensions { skywalking_1 | name: \u0026quot;composite-action\u0026quot; skywalking_1 | category: \u0026quot;envoy.matching.action\u0026quot; skywalking_1 | } ...... skywalking_1 | } skywalking_1 | } skywalking_1 | envoy_metrics { skywalking_1 | name: \u0026quot;cluster.service_google.update_no_rebuild\u0026quot; skywalking_1 | type: COUNTER skywalking_1 | metric { skywalking_1 | counter { skywalking_1 | value: 1.0 skywalking_1 | } skywalking_1 | timestamp_ms: 1627046729718 skywalking_1 | } ...... skywalking_1 | } ... $ # To tear down: $ make down ","title":"Sending Envoy Metrics to SkyWalking OAP Server Example","url":"/docs/main/v9.4.0/en/setup/envoy/examples/metrics/readme/"},{"content":"Sending Envoy Metrics to SkyWalking OAP Server Example This is an example of sending Envoy Stats to the SkyWalking OAP server through Metric Service v2 and v3.\nRunning the example The example requires docker and docker-compose to be installed in your local system. It fetches images from Docker Hub.\nNote that in this setup, we override the log4j2.xml config to set the org.apache.skywalking.oap.server.receiver.envoy logger level to DEBUG. This enables us to see the messages sent by Envoy to the SkyWalking OAP server.\nYou can also find the Envoy Metric Service V3 API example in docker-compose-envoy-v3-api.yaml\n$ make up $ docker-compose logs -f skywalking $ # Please wait for a moment until SkyWalking is ready and Envoy starts sending the stats. You will see similar messages like the following: skywalking_1 | 2021-07-23 13:25:30,683 - org.apache.skywalking.oap.server.receiver.envoy.MetricServiceGRPCHandler -19437 [grpcServerPool-1-thread-2] DEBUG [] - Received msg identifier { skywalking_1 | node { skywalking_1 | id: \u0026quot;ingress\u0026quot; skywalking_1 | cluster: \u0026quot;envoy-proxy\u0026quot; skywalking_1 | metadata { skywalking_1 | fields { skywalking_1 | key: \u0026quot;LABELS\u0026quot; skywalking_1 | value { skywalking_1 | struct_value { skywalking_1 | fields { skywalking_1 | key: \u0026quot;app\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;test-app\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;NAME\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;service-instance-name\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;envoy\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;isawesome\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;skywalking\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;iscool\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | locality { skywalking_1 | region: \u0026quot;ap-southeast-1\u0026quot; skywalking_1 | zone: \u0026quot;zone1\u0026quot; skywalking_1 | sub_zone: \u0026quot;subzone1\u0026quot; skywalking_1 | } skywalking_1 | user_agent_name: \u0026quot;envoy\u0026quot; skywalking_1 | user_agent_build_version { skywalking_1 | version { skywalking_1 | major_number: 1 skywalking_1 | minor_number: 19 skywalking_1 | } skywalking_1 | metadata { skywalking_1 | fields { skywalking_1 | key: \u0026quot;build.type\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;RELEASE\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;revision.sha\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;68fe53a889416fd8570506232052b06f5a531541\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;revision.status\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;Clean\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;ssl.version\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;BoringSSL\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | extensions { skywalking_1 | name: \u0026quot;composite-action\u0026quot; skywalking_1 | category: \u0026quot;envoy.matching.action\u0026quot; skywalking_1 | } ...... skywalking_1 | } skywalking_1 | } skywalking_1 | envoy_metrics { skywalking_1 | name: \u0026quot;cluster.service_google.update_no_rebuild\u0026quot; skywalking_1 | type: COUNTER skywalking_1 | metric { skywalking_1 | counter { skywalking_1 | value: 1.0 skywalking_1 | } skywalking_1 | timestamp_ms: 1627046729718 skywalking_1 | } ...... skywalking_1 | } ... $ # To tear down: $ make down ","title":"Sending Envoy Metrics to SkyWalking OAP Server Example","url":"/docs/main/v9.5.0/en/setup/envoy/examples/metrics/readme/"},{"content":"Sending Envoy Metrics to SkyWalking OAP Server Example This is an example of sending Envoy Stats to the SkyWalking OAP server through Metric Service v2 and v3.\nRunning the example The example requires docker and docker-compose to be installed in your local system. It fetches images from Docker Hub.\nNote that in this setup, we override the log4j2.xml config to set the org.apache.skywalking.oap.server.receiver.envoy logger level to DEBUG. This enables us to see the messages sent by Envoy to the SkyWalking OAP server.\nYou can also find the Envoy Metric Service V3 API example in docker-compose-envoy-v3-api.yaml\n$ make up $ docker-compose logs -f skywalking $ # Please wait for a moment until SkyWalking is ready and Envoy starts sending the stats. You will see similar messages like the following: skywalking_1 | 2021-07-23 13:25:30,683 - org.apache.skywalking.oap.server.receiver.envoy.MetricServiceGRPCHandler -19437 [grpcServerPool-1-thread-2] DEBUG [] - Received msg identifier { skywalking_1 | node { skywalking_1 | id: \u0026quot;ingress\u0026quot; skywalking_1 | cluster: \u0026quot;envoy-proxy\u0026quot; skywalking_1 | metadata { skywalking_1 | fields { skywalking_1 | key: \u0026quot;LABELS\u0026quot; skywalking_1 | value { skywalking_1 | struct_value { skywalking_1 | fields { skywalking_1 | key: \u0026quot;app\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;test-app\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;NAME\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;service-instance-name\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;envoy\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;isawesome\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;skywalking\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;iscool\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | locality { skywalking_1 | region: \u0026quot;ap-southeast-1\u0026quot; skywalking_1 | zone: \u0026quot;zone1\u0026quot; skywalking_1 | sub_zone: \u0026quot;subzone1\u0026quot; skywalking_1 | } skywalking_1 | user_agent_name: \u0026quot;envoy\u0026quot; skywalking_1 | user_agent_build_version { skywalking_1 | version { skywalking_1 | major_number: 1 skywalking_1 | minor_number: 19 skywalking_1 | } skywalking_1 | metadata { skywalking_1 | fields { skywalking_1 | key: \u0026quot;build.type\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;RELEASE\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;revision.sha\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;68fe53a889416fd8570506232052b06f5a531541\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;revision.status\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;Clean\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;ssl.version\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;BoringSSL\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | extensions { skywalking_1 | name: \u0026quot;composite-action\u0026quot; skywalking_1 | category: \u0026quot;envoy.matching.action\u0026quot; skywalking_1 | } ...... skywalking_1 | } skywalking_1 | } skywalking_1 | envoy_metrics { skywalking_1 | name: \u0026quot;cluster.service_google.update_no_rebuild\u0026quot; skywalking_1 | type: COUNTER skywalking_1 | metric { skywalking_1 | counter { skywalking_1 | value: 1.0 skywalking_1 | } skywalking_1 | timestamp_ms: 1627046729718 skywalking_1 | } ...... skywalking_1 | } ... $ # To tear down: $ make down ","title":"Sending Envoy Metrics to SkyWalking OAP Server Example","url":"/docs/main/v9.6.0/en/setup/envoy/examples/metrics/readme/"},{"content":"Sending Envoy Metrics to SkyWalking OAP Server Example This is an example of sending Envoy Stats to the SkyWalking OAP server through Metric Service v2 and v3.\nRunning the example The example requires docker and docker-compose to be installed in your local system. It fetches images from Docker Hub.\nNote that in this setup, we override the log4j2.xml config to set the org.apache.skywalking.oap.server.receiver.envoy logger level to DEBUG. This enables us to see the messages sent by Envoy to the SkyWalking OAP server.\nYou can also find the Envoy Metric Service V3 API example in docker-compose-envoy-v3-api.yaml\n$ make up $ docker-compose logs -f skywalking $ # Please wait for a moment until SkyWalking is ready and Envoy starts sending the stats. You will see similar messages like the following: skywalking_1 | 2021-07-23 13:25:30,683 - org.apache.skywalking.oap.server.receiver.envoy.MetricServiceGRPCHandler -19437 [grpcServerPool-1-thread-2] DEBUG [] - Received msg identifier { skywalking_1 | node { skywalking_1 | id: \u0026quot;ingress\u0026quot; skywalking_1 | cluster: \u0026quot;envoy-proxy\u0026quot; skywalking_1 | metadata { skywalking_1 | fields { skywalking_1 | key: \u0026quot;LABELS\u0026quot; skywalking_1 | value { skywalking_1 | struct_value { skywalking_1 | fields { skywalking_1 | key: \u0026quot;app\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;test-app\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;NAME\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;service-instance-name\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;envoy\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;isawesome\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;skywalking\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;iscool\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | locality { skywalking_1 | region: \u0026quot;ap-southeast-1\u0026quot; skywalking_1 | zone: \u0026quot;zone1\u0026quot; skywalking_1 | sub_zone: \u0026quot;subzone1\u0026quot; skywalking_1 | } skywalking_1 | user_agent_name: \u0026quot;envoy\u0026quot; skywalking_1 | user_agent_build_version { skywalking_1 | version { skywalking_1 | major_number: 1 skywalking_1 | minor_number: 19 skywalking_1 | } skywalking_1 | metadata { skywalking_1 | fields { skywalking_1 | key: \u0026quot;build.type\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;RELEASE\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;revision.sha\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;68fe53a889416fd8570506232052b06f5a531541\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;revision.status\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;Clean\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | fields { skywalking_1 | key: \u0026quot;ssl.version\u0026quot; skywalking_1 | value { skywalking_1 | string_value: \u0026quot;BoringSSL\u0026quot; skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | } skywalking_1 | extensions { skywalking_1 | name: \u0026quot;composite-action\u0026quot; skywalking_1 | category: \u0026quot;envoy.matching.action\u0026quot; skywalking_1 | } ...... skywalking_1 | } skywalking_1 | } skywalking_1 | envoy_metrics { skywalking_1 | name: \u0026quot;cluster.service_google.update_no_rebuild\u0026quot; skywalking_1 | type: COUNTER skywalking_1 | metric { skywalking_1 | counter { skywalking_1 | value: 1.0 skywalking_1 | } skywalking_1 | timestamp_ms: 1627046729718 skywalking_1 | } ...... skywalking_1 | } ... $ # To tear down: $ make down ","title":"Sending Envoy Metrics to SkyWalking OAP Server Example","url":"/docs/main/v9.7.0/en/setup/envoy/examples/metrics/readme/"},{"content":"Server Agents Server agents in various languages provide auto-instrumentation or/and manual-instrumentation(APIs-based) mechanisms to integrate with target services. They support collecting traces, logs, metrics, and events using SkyWalking\u0026rsquo;s native format and maximize the analysis capabilities of the SkyWalking OAP server.\nInstalling language agents in services   Java agent. Learn how to install the Java agent in your service without affecting your code.\n  LUA agent. Learn how to install the Lua agent in Nginx + LUA module or OpenResty.\n  Kong agent. Learn how to install the Lua agent in Kong.\n  Python Agent. Learn how to install the Python Agent in a Python service without affecting your code.\n  Node.js agent. Learn how to install the NodeJS Agent in a NodeJS service.\n  Rust agent. Learn how to integrate the Rust agent with a rust service.\n  PHP agent. Learn how to install the PHP agent in your service without affecting your code.\n  Go agent. Learn how to integrate the Go agent with a golang service.\n  The following agents and SDKs are compatible with SkyWalking\u0026rsquo;s data formats and network protocols but are maintained by third parties. See their project repositories for guides and releases.\n  SkyAPM .NET Core agent. See .NET Core agent project documentation for more details.\n  SkyAPM C++ SDK. See cpp2sky project documentation for more details.\n  ","title":"Server Agents","url":"/docs/main/latest/en/setup/service-agent/server-agents/"},{"content":"Server Agents Server agents in various languages provide auto-instrumentation or/and manual-instrumentation(APIs-based) mechanisms to integrate with target services. They support collecting traces, logs, metrics, and events using SkyWalking\u0026rsquo;s native format and maximize the analysis capabilities of the SkyWalking OAP server.\nInstalling language agents in services   Java agent. Learn how to install the Java agent in your service without affecting your code.\n  LUA agent. Learn how to install the Lua agent in Nginx + LUA module or OpenResty.\n  Kong agent. Learn how to install the Lua agent in Kong.\n  Python Agent. Learn how to install the Python Agent in a Python service without affecting your code.\n  Node.js agent. Learn how to install the NodeJS Agent in a NodeJS service.\n  Rust agent. Learn how to integrate the Rust agent with a rust service.\n  PHP agent. Learn how to install the PHP agent in your service without affecting your code.\n  Go agent. Learn how to integrate the Go agent with a golang service.\n  The following agents and SDKs are compatible with SkyWalking\u0026rsquo;s data formats and network protocols but are maintained by third parties. See their project repositories for guides and releases.\n  SkyAPM .NET Core agent. See .NET Core agent project documentation for more details.\n  SkyAPM C++ SDK. See cpp2sky project documentation for more details.\n  ","title":"Server Agents","url":"/docs/main/next/en/setup/service-agent/server-agents/"},{"content":"Server Agents Server agents in various languages provide auto-instrumentation or/and manual-instrumentation(APIs-based) mechanism to integrate with target services. They support collecting traces, logs, metrics and events by using SkyWalking\u0026rsquo;s native format, and maximum the analysis capabilities of SkyWalking OAP server.\nInstalling language agents in services   Java agent. Learn how to install the Java agent in your service without affecting your code.\n  LUA agent. Learn how to install the Lua agent in Nginx + LUA module or OpenResty.\n  Kong agent. Learn how to install the Lua agent in Kong.\n  Python Agent. Learn how to install the Python Agent in a Python service.\n  Node.js agent. Learn how to install the NodeJS Agent in a NodeJS service.\n  Rust agent. Learn how to integrate the rust agent in a rust service.\n  The following agents and SDKs are compatible with SkyWalking\u0026rsquo;s data formats and network protocols, but are maintained by third parties. See their project repositories for guides and releases.\n  SkyAPM .NET Core agent. See .NET Core agent project document for more details.\n  SkyAPM PHP agent. See PHP agent project document for more details.\n  SkyAPM Go SDK. See go2sky project document for more details.\n  SkyAPM C++ SDK. See cpp2sky project document for more details.\n  ","title":"Server Agents","url":"/docs/main/v9.0.0/en/setup/service-agent/server-agents/"},{"content":"Server Agents Server agents in various languages provide auto-instrumentation or/and manual-instrumentation(APIs-based) mechanisms to integrate with target services. They support collecting traces, logs, metrics, and events using SkyWalking\u0026rsquo;s native format and maximize the analysis capabilities of the SkyWalking OAP server.\nInstalling language agents in services   Java agent. Learn how to install the Java agent in your service without affecting your code.\n  LUA agent. Learn how to install the Lua agent in Nginx + LUA module or OpenResty.\n  Kong agent. Learn how to install the Lua agent in Kong.\n  Python Agent. Learn how to install the Python Agent in a Python service without affecting your code.\n  Node.js agent. Learn how to install the NodeJS Agent in a NodeJS service.\n  Rust agent. Learn how to integrate the Rust agent with a rust service.\n  The following agents and SDKs are compatible with SkyWalking\u0026rsquo;s data formats and network protocols but are maintained by third parties. See their project repositories for guides and releases.\n  SkyAPM .NET Core agent. See .NET Core agent project documentation for more details.\n  SkyAPM PHP agent. See PHP agent project documentation for more details.\n  SkyAPM Go SDK. See go2sky project documentation for more details.\n  SkyAPM C++ SDK. See cpp2sky project documentation for more details.\n  ","title":"Server Agents","url":"/docs/main/v9.1.0/en/setup/service-agent/server-agents/"},{"content":"Server Agents Server agents in various languages provide auto-instrumentation or/and manual-instrumentation(APIs-based) mechanisms to integrate with target services. They support collecting traces, logs, metrics, and events using SkyWalking\u0026rsquo;s native format and maximize the analysis capabilities of the SkyWalking OAP server.\nInstalling language agents in services   Java agent. Learn how to install the Java agent in your service without affecting your code.\n  LUA agent. Learn how to install the Lua agent in Nginx + LUA module or OpenResty.\n  Kong agent. Learn how to install the Lua agent in Kong.\n  Python Agent. Learn how to install the Python Agent in a Python service without affecting your code.\n  Node.js agent. Learn how to install the NodeJS Agent in a NodeJS service.\n  Rust agent. Learn how to integrate the Rust agent with a rust service.\n  The following agents and SDKs are compatible with SkyWalking\u0026rsquo;s data formats and network protocols but are maintained by third parties. See their project repositories for guides and releases.\n  SkyAPM .NET Core agent. See .NET Core agent project documentation for more details.\n  SkyAPM PHP agent. See PHP agent project documentation for more details.\n  SkyAPM Go SDK. See go2sky project documentation for more details.\n  SkyAPM C++ SDK. See cpp2sky project documentation for more details.\n  ","title":"Server Agents","url":"/docs/main/v9.2.0/en/setup/service-agent/server-agents/"},{"content":"Server Agents Server agents in various languages provide auto-instrumentation or/and manual-instrumentation(APIs-based) mechanisms to integrate with target services. They support collecting traces, logs, metrics, and events using SkyWalking\u0026rsquo;s native format and maximize the analysis capabilities of the SkyWalking OAP server.\nInstalling language agents in services   Java agent. Learn how to install the Java agent in your service without affecting your code.\n  LUA agent. Learn how to install the Lua agent in Nginx + LUA module or OpenResty.\n  Kong agent. Learn how to install the Lua agent in Kong.\n  Python Agent. Learn how to install the Python Agent in a Python service without affecting your code.\n  Node.js agent. Learn how to install the NodeJS Agent in a NodeJS service.\n  Rust agent. Learn how to integrate the Rust agent with a rust service.\n  PHP agent. Learn how to install the PHP agent in your service without affecting your code.\n  The following agents and SDKs are compatible with SkyWalking\u0026rsquo;s data formats and network protocols but are maintained by third parties. See their project repositories for guides and releases.\n  SkyAPM .NET Core agent. See .NET Core agent project documentation for more details.\n  SkyAPM Go SDK. See go2sky project documentation for more details.\n  SkyAPM C++ SDK. See cpp2sky project documentation for more details.\n  ","title":"Server Agents","url":"/docs/main/v9.3.0/en/setup/service-agent/server-agents/"},{"content":"Server Agents Server agents in various languages provide auto-instrumentation or/and manual-instrumentation(APIs-based) mechanisms to integrate with target services. They support collecting traces, logs, metrics, and events using SkyWalking\u0026rsquo;s native format and maximize the analysis capabilities of the SkyWalking OAP server.\nInstalling language agents in services   Java agent. Learn how to install the Java agent in your service without affecting your code.\n  LUA agent. Learn how to install the Lua agent in Nginx + LUA module or OpenResty.\n  Kong agent. Learn how to install the Lua agent in Kong.\n  Python Agent. Learn how to install the Python Agent in a Python service without affecting your code.\n  Node.js agent. Learn how to install the NodeJS Agent in a NodeJS service.\n  Rust agent. Learn how to integrate the Rust agent with a rust service.\n  PHP agent. Learn how to install the PHP agent in your service without affecting your code.\n  The following agents and SDKs are compatible with SkyWalking\u0026rsquo;s data formats and network protocols but are maintained by third parties. See their project repositories for guides and releases.\n  SkyAPM .NET Core agent. See .NET Core agent project documentation for more details.\n  SkyAPM Go SDK. See go2sky project documentation for more details.\n  SkyAPM C++ SDK. See cpp2sky project documentation for more details.\n  ","title":"Server Agents","url":"/docs/main/v9.4.0/en/setup/service-agent/server-agents/"},{"content":"Server Agents Server agents in various languages provide auto-instrumentation or/and manual-instrumentation(APIs-based) mechanisms to integrate with target services. They support collecting traces, logs, metrics, and events using SkyWalking\u0026rsquo;s native format and maximize the analysis capabilities of the SkyWalking OAP server.\nInstalling language agents in services   Java agent. Learn how to install the Java agent in your service without affecting your code.\n  LUA agent. Learn how to install the Lua agent in Nginx + LUA module or OpenResty.\n  Kong agent. Learn how to install the Lua agent in Kong.\n  Python Agent. Learn how to install the Python Agent in a Python service without affecting your code.\n  Node.js agent. Learn how to install the NodeJS Agent in a NodeJS service.\n  Rust agent. Learn how to integrate the Rust agent with a rust service.\n  PHP agent. Learn how to install the PHP agent in your service without affecting your code.\n  The following agents and SDKs are compatible with SkyWalking\u0026rsquo;s data formats and network protocols but are maintained by third parties. See their project repositories for guides and releases.\n  SkyAPM .NET Core agent. See .NET Core agent project documentation for more details.\n  SkyAPM Go SDK. See go2sky project documentation for more details.\n  SkyAPM C++ SDK. See cpp2sky project documentation for more details.\n  ","title":"Server Agents","url":"/docs/main/v9.5.0/en/setup/service-agent/server-agents/"},{"content":"Server Agents Server agents in various languages provide auto-instrumentation or/and manual-instrumentation(APIs-based) mechanisms to integrate with target services. They support collecting traces, logs, metrics, and events using SkyWalking\u0026rsquo;s native format and maximize the analysis capabilities of the SkyWalking OAP server.\nInstalling language agents in services   Java agent. Learn how to install the Java agent in your service without affecting your code.\n  LUA agent. Learn how to install the Lua agent in Nginx + LUA module or OpenResty.\n  Kong agent. Learn how to install the Lua agent in Kong.\n  Python Agent. Learn how to install the Python Agent in a Python service without affecting your code.\n  Node.js agent. Learn how to install the NodeJS Agent in a NodeJS service.\n  Rust agent. Learn how to integrate the Rust agent with a rust service.\n  PHP agent. Learn how to install the PHP agent in your service without affecting your code.\n  Go agent. Learn how to integrate the Go agent with a golang service.\n  The following agents and SDKs are compatible with SkyWalking\u0026rsquo;s data formats and network protocols but are maintained by third parties. See their project repositories for guides and releases.\n  SkyAPM .NET Core agent. See .NET Core agent project documentation for more details.\n  SkyAPM C++ SDK. See cpp2sky project documentation for more details.\n  ","title":"Server Agents","url":"/docs/main/v9.6.0/en/setup/service-agent/server-agents/"},{"content":"Server Agents Server agents in various languages provide auto-instrumentation or/and manual-instrumentation(APIs-based) mechanisms to integrate with target services. They support collecting traces, logs, metrics, and events using SkyWalking\u0026rsquo;s native format and maximize the analysis capabilities of the SkyWalking OAP server.\nInstalling language agents in services   Java agent. Learn how to install the Java agent in your service without affecting your code.\n  LUA agent. Learn how to install the Lua agent in Nginx + LUA module or OpenResty.\n  Kong agent. Learn how to install the Lua agent in Kong.\n  Python Agent. Learn how to install the Python Agent in a Python service without affecting your code.\n  Node.js agent. Learn how to install the NodeJS Agent in a NodeJS service.\n  Rust agent. Learn how to integrate the Rust agent with a rust service.\n  PHP agent. Learn how to install the PHP agent in your service without affecting your code.\n  Go agent. Learn how to integrate the Go agent with a golang service.\n  The following agents and SDKs are compatible with SkyWalking\u0026rsquo;s data formats and network protocols but are maintained by third parties. See their project repositories for guides and releases.\n  SkyAPM .NET Core agent. See .NET Core agent project documentation for more details.\n  SkyAPM C++ SDK. See cpp2sky project documentation for more details.\n  ","title":"Server Agents","url":"/docs/main/v9.7.0/en/setup/service-agent/server-agents/"},{"content":"Server/grpc-server Description This is a sharing plugin, which would start a gRPC server.\nDefaultConfig # The address of grpc server. Default value is :11800address::11800# The network of grpc. Default value is :tcpnetwork:tcp# The max size of receiving log. Default value is 2M. The unit is Byte.max_recv_msg_size:2097152# The max concurrent stream channels.max_concurrent_streams:32# The TLS cert file path.tls_cert_file:\u0026#34;\u0026#34;# The TLS key file path.tls_key_file:\u0026#34;\u0026#34;# To Accept Connection Limiter when reach the resourceaccept_limit:# The max CPU utilization limitcpu_utilization:75# The max connection countconnection_count:4000Configuration    Name Type Description     address string The address of grpc server.   network string The network of grpc.   max_recv_msg_size int The max size of the received log.   max_concurrent_streams uint32 The max concurrent stream channels.   tls_cert_file string The TLS cert file path.   tls_key_file string The TLS key file path.   accept_limit grpc.AcceptConnectionConfig To Accept Connection Limiter when reach the resource    ","title":"Server/grpc-server","url":"/docs/skywalking-satellite/latest/en/setup/plugins/server_grpc-server/"},{"content":"Server/grpc-server Description This is a sharing plugin, which would start a gRPC server.\nDefaultConfig # The address of grpc server. Default value is :11800address::11800# The network of grpc. Default value is :tcpnetwork:tcp# The max size of receiving log. Default value is 2M. The unit is Byte.max_recv_msg_size:2097152# The max concurrent stream channels.max_concurrent_streams:32# The TLS cert file path.tls_cert_file:\u0026#34;\u0026#34;# The TLS key file path.tls_key_file:\u0026#34;\u0026#34;# To Accept Connection Limiter when reach the resourceaccept_limit:# The max CPU utilization limitcpu_utilization:75# The max connection countconnection_count:4000Configuration    Name Type Description     address string The address of grpc server.   network string The network of grpc.   max_recv_msg_size int The max size of the received log.   max_concurrent_streams uint32 The max concurrent stream channels.   tls_cert_file string The TLS cert file path.   tls_key_file string The TLS key file path.   accept_limit grpc.AcceptConnectionConfig To Accept Connection Limiter when reach the resource    ","title":"Server/grpc-server","url":"/docs/skywalking-satellite/next/en/setup/plugins/server_grpc-server/"},{"content":"Server/grpc-server Description This is a sharing plugin, which would start a gRPC server.\nDefaultConfig # The address of grpc server. Default value is :11800address::11800# The network of grpc. Default value is :tcpnetwork:tcp# The max size of receiving log. Default value is 2M. The unit is Byte.max_recv_msg_size:2097152# The max concurrent stream channels.max_concurrent_streams:32# The TLS cert file path.tls_cert_file:\u0026#34;\u0026#34;# The TLS key file path.tls_key_file:\u0026#34;\u0026#34;# To Accept Connection Limiter when reach the resourceaccept_limit:# The max CPU utilization limitcpu_utilization:75# The max connection countconnection_count:4000Configuration    Name Type Description     address string The address of grpc server.   network string The network of grpc.   max_recv_msg_size int The max size of the received log.   max_concurrent_streams uint32 The max concurrent stream channels.   tls_cert_file string The TLS cert file path.   tls_key_file string The TLS key file path.   accept_limit grpc.AcceptConnectionConfig To Accept Connection Limiter when reach the resource    ","title":"Server/grpc-server","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/server_grpc-server/"},{"content":"Server/http-server Description This is a sharing plugin, which would start a http server.\nDefaultConfig # The http server address.address:\u0026#34;:12800\u0026#34;Configuration    Name Type Description     address string     ","title":"Server/http-server","url":"/docs/skywalking-satellite/latest/en/setup/plugins/server_http-server/"},{"content":"Server/http-server Description This is a sharing plugin, which would start a http server.\nDefaultConfig # The http server address.address:\u0026#34;:12800\u0026#34;Configuration    Name Type Description     address string     ","title":"Server/http-server","url":"/docs/skywalking-satellite/next/en/setup/plugins/server_http-server/"},{"content":"Server/http-server Description This is a sharing plugin, which would start a http server.\nDefaultConfig # The http server address.address:\u0026#34;:12800\u0026#34;Configuration    Name Type Description     address string     ","title":"Server/http-server","url":"/docs/skywalking-satellite/v1.2.0/en/setup/plugins/server_http-server/"},{"content":"Service Auto Grouping SkyWalking supports various default and customized dashboard templates. Each template provides an appropriate layout for services in a particular field. For example, the metrics for services with language agents installed may be different from that of services detected by the service mesh observability solution as well as SkyWalking\u0026rsquo;s self-observability metrics dashboard.\nTherefore, since version 8.3.0, the SkyWalking OAP has generated the groups based on this simple naming format:\n${service name} = [${group name}::]${logic name} If the service name includes double colons (::), the literal string before the colons is taken as the group name. In the latest GraphQL query, the group name has been provided as an optional parameter.\n getAllServices(duration: Duration!, group: String): [Service!]!\n RocketBot UI dashboards (Standard type) support the group name for default and custom configurations.\n","title":"Service Auto Grouping","url":"/docs/main/latest/en/setup/backend/service-auto-grouping/"},{"content":"Service Auto Grouping SkyWalking supports various default and customized dashboard templates. Each template provides an appropriate layout for services in a particular field. For example, the metrics for services with language agents installed may be different from that of services detected by the service mesh observability solution as well as SkyWalking\u0026rsquo;s self-observability metrics dashboard.\nTherefore, since version 8.3.0, the SkyWalking OAP has generated the groups based on this simple naming format:\n${service name} = [${group name}::]${logic name} If the service name includes double colons (::), the literal string before the colons is taken as the group name. In the latest GraphQL query, the group name has been provided as an optional parameter.\n getAllServices(duration: Duration!, group: String): [Service!]!\n RocketBot UI dashboards (Standard type) support the group name for default and custom configurations.\n","title":"Service Auto Grouping","url":"/docs/main/next/en/setup/backend/service-auto-grouping/"},{"content":"Service Auto Grouping SkyWalking supports various default and customized dashboard templates. Each template provides an appropriate layout for services in a particular field. For example, the metrics for services with language agents installed may be different from that of services detected by the service mesh observability solution as well as SkyWalking\u0026rsquo;s self-observability metrics dashboard.\nTherefore, since version 8.3.0, the SkyWalking OAP has generated the groups based on this simple naming format:\n${service name} = [${group name}::]${logic name} If the service name includes double colons (::), the literal string before the colons is taken as the group name. In the latest GraphQL query, the group name has been provided as an option parameter.\n getAllServices(duration: Duration!, group: String): [Service!]!\n RocketBot UI dashboards (Standard type) support the group name for default and custom configurations.\n","title":"Service Auto Grouping","url":"/docs/main/v9.0.0/en/setup/backend/service-auto-grouping/"},{"content":"Service Auto Grouping SkyWalking supports various default and customized dashboard templates. Each template provides an appropriate layout for services in a particular field. For example, the metrics for services with language agents installed may be different from that of services detected by the service mesh observability solution as well as SkyWalking\u0026rsquo;s self-observability metrics dashboard.\nTherefore, since version 8.3.0, the SkyWalking OAP has generated the groups based on this simple naming format:\n${service name} = [${group name}::]${logic name} If the service name includes double colons (::), the literal string before the colons is taken as the group name. In the latest GraphQL query, the group name has been provided as an optional parameter.\n getAllServices(duration: Duration!, group: String): [Service!]!\n RocketBot UI dashboards (Standard type) support the group name for default and custom configurations.\n","title":"Service Auto Grouping","url":"/docs/main/v9.1.0/en/setup/backend/service-auto-grouping/"},{"content":"Service Auto Grouping SkyWalking supports various default and customized dashboard templates. Each template provides an appropriate layout for services in a particular field. For example, the metrics for services with language agents installed may be different from that of services detected by the service mesh observability solution as well as SkyWalking\u0026rsquo;s self-observability metrics dashboard.\nTherefore, since version 8.3.0, the SkyWalking OAP has generated the groups based on this simple naming format:\n${service name} = [${group name}::]${logic name} If the service name includes double colons (::), the literal string before the colons is taken as the group name. In the latest GraphQL query, the group name has been provided as an optional parameter.\n getAllServices(duration: Duration!, group: String): [Service!]!\n RocketBot UI dashboards (Standard type) support the group name for default and custom configurations.\n","title":"Service Auto Grouping","url":"/docs/main/v9.2.0/en/setup/backend/service-auto-grouping/"},{"content":"Service Auto Grouping SkyWalking supports various default and customized dashboard templates. Each template provides an appropriate layout for services in a particular field. For example, the metrics for services with language agents installed may be different from that of services detected by the service mesh observability solution as well as SkyWalking\u0026rsquo;s self-observability metrics dashboard.\nTherefore, since version 8.3.0, the SkyWalking OAP has generated the groups based on this simple naming format:\n${service name} = [${group name}::]${logic name} If the service name includes double colons (::), the literal string before the colons is taken as the group name. In the latest GraphQL query, the group name has been provided as an optional parameter.\n getAllServices(duration: Duration!, group: String): [Service!]!\n RocketBot UI dashboards (Standard type) support the group name for default and custom configurations.\n","title":"Service Auto Grouping","url":"/docs/main/v9.3.0/en/setup/backend/service-auto-grouping/"},{"content":"Service Auto Grouping SkyWalking supports various default and customized dashboard templates. Each template provides an appropriate layout for services in a particular field. For example, the metrics for services with language agents installed may be different from that of services detected by the service mesh observability solution as well as SkyWalking\u0026rsquo;s self-observability metrics dashboard.\nTherefore, since version 8.3.0, the SkyWalking OAP has generated the groups based on this simple naming format:\n${service name} = [${group name}::]${logic name} If the service name includes double colons (::), the literal string before the colons is taken as the group name. In the latest GraphQL query, the group name has been provided as an optional parameter.\n getAllServices(duration: Duration!, group: String): [Service!]!\n RocketBot UI dashboards (Standard type) support the group name for default and custom configurations.\n","title":"Service Auto Grouping","url":"/docs/main/v9.4.0/en/setup/backend/service-auto-grouping/"},{"content":"Service Auto Grouping SkyWalking supports various default and customized dashboard templates. Each template provides an appropriate layout for services in a particular field. For example, the metrics for services with language agents installed may be different from that of services detected by the service mesh observability solution as well as SkyWalking\u0026rsquo;s self-observability metrics dashboard.\nTherefore, since version 8.3.0, the SkyWalking OAP has generated the groups based on this simple naming format:\n${service name} = [${group name}::]${logic name} If the service name includes double colons (::), the literal string before the colons is taken as the group name. In the latest GraphQL query, the group name has been provided as an optional parameter.\n getAllServices(duration: Duration!, group: String): [Service!]!\n RocketBot UI dashboards (Standard type) support the group name for default and custom configurations.\n","title":"Service Auto Grouping","url":"/docs/main/v9.5.0/en/setup/backend/service-auto-grouping/"},{"content":"Service Auto Grouping SkyWalking supports various default and customized dashboard templates. Each template provides an appropriate layout for services in a particular field. For example, the metrics for services with language agents installed may be different from that of services detected by the service mesh observability solution as well as SkyWalking\u0026rsquo;s self-observability metrics dashboard.\nTherefore, since version 8.3.0, the SkyWalking OAP has generated the groups based on this simple naming format:\n${service name} = [${group name}::]${logic name} If the service name includes double colons (::), the literal string before the colons is taken as the group name. In the latest GraphQL query, the group name has been provided as an optional parameter.\n getAllServices(duration: Duration!, group: String): [Service!]!\n RocketBot UI dashboards (Standard type) support the group name for default and custom configurations.\n","title":"Service Auto Grouping","url":"/docs/main/v9.6.0/en/setup/backend/service-auto-grouping/"},{"content":"Service Auto Grouping SkyWalking supports various default and customized dashboard templates. Each template provides an appropriate layout for services in a particular field. For example, the metrics for services with language agents installed may be different from that of services detected by the service mesh observability solution as well as SkyWalking\u0026rsquo;s self-observability metrics dashboard.\nTherefore, since version 8.3.0, the SkyWalking OAP has generated the groups based on this simple naming format:\n${service name} = [${group name}::]${logic name} If the service name includes double colons (::), the literal string before the colons is taken as the group name. In the latest GraphQL query, the group name has been provided as an optional parameter.\n getAllServices(duration: Duration!, group: String): [Service!]!\n RocketBot UI dashboards (Standard type) support the group name for default and custom configurations.\n","title":"Service Auto Grouping","url":"/docs/main/v9.7.0/en/setup/backend/service-auto-grouping/"},{"content":"Service Auto Instrument Agent The service auto instrument agent is a subset of language-based native agents. This kind of agents is based on some language-specific features, especially those of a VM-based language.\nWhat does Auto Instrument mean? Many users learned about these agents when they first heard that \u0026ldquo;Not a single line of code has to be changed\u0026rdquo;. SkyWalking used to mention this in its readme page as well. However, this does not reflect the full picture. For end users, it is true that they no longer have to modify their codes in most cases. But it is important to understand that the codes are in fact still modified by the agent, which is usually known as \u0026ldquo;runtime code manipulation\u0026rdquo;. The underlying logic is that the auto instrument agent uses the VM interface for code modification to dynamically add in the instrument code, such as modifying the class in Java through javaagent premain.\nIn fact, although the SkyWalking team has mentioned that most auto instrument agents are VM-based, you may build such tools during compiling time rather than runtime.\nWhat are the limitations? Auto instrument is very helpful, as you may perform auto instrument during compiling time, without having to depend on VM features. But there are also certain limitations that come with it:\n  Higher possibility of in-process propagation in many cases. Many high-level languages, such as Java and .NET, are used for building business systems. Most business logic codes run in the same thread for each request, which causes propagation to be based on thread ID, in order for the stack module to make sure that the context is safe.\n  Only works in certain frameworks or libraries. Since the agents are responsible for modifying the codes during runtime, the codes are already known to the agent plugin developers. There is usually a list of frameworks or libraries supported by this kind of probes. For example, see the SkyWalking Java agent supported list.\n  Cross-thread operations are not always supported. Like what is mentioned above regarding in-process propagation, most codes (especially business codes) run in a single thread per request. But in some other cases, they operate across different threads, such as assigning tasks to other threads, task pools or batch processes. Some languages may even provide coroutine or similar components like Goroutine, which allows developers to run async process with low payload. In such cases, auto instrument will face problems.\n  So, there\u0026rsquo;s nothing mysterious about auto instrument. In short, agent developers write an activation script to make instrument codes work for you. That\u0026rsquo;s it!\nWhat is next? If you want to learn about manual instrument libs in SkyWalking, see the Manual instrument SDK section.\n","title":"Service Auto Instrument Agent","url":"/docs/main/latest/en/concepts-and-designs/service-agent/"},{"content":"Service Auto Instrument Agent The service auto instrument agent is a subset of language-based native agents. This kind of agents is based on some language-specific features, especially those of a VM-based language.\nWhat does Auto Instrument mean? Many users learned about these agents when they first heard that \u0026ldquo;Not a single line of code has to be changed\u0026rdquo;. SkyWalking used to mention this in its readme page as well. However, this does not reflect the full picture. For end users, it is true that they no longer have to modify their codes in most cases. But it is important to understand that the codes are in fact still modified by the agent, which is usually known as \u0026ldquo;runtime code manipulation\u0026rdquo;. The underlying logic is that the auto instrument agent uses the VM interface for code modification to dynamically add in the instrument code, such as modifying the class in Java through javaagent premain.\nIn fact, although the SkyWalking team has mentioned that most auto instrument agents are VM-based, you may build such tools during compiling time rather than runtime.\nWhat are the limitations? Auto instrument is very helpful, as you may perform auto instrument during compiling time, without having to depend on VM features. But there are also certain limitations that come with it:\n  Higher possibility of in-process propagation in many cases. Many high-level languages, such as Java and .NET, are used for building business systems. Most business logic codes run in the same thread for each request, which causes propagation to be based on thread ID, in order for the stack module to make sure that the context is safe.\n  Only works in certain frameworks or libraries. Since the agents are responsible for modifying the codes during runtime, the codes are already known to the agent plugin developers. There is usually a list of frameworks or libraries supported by this kind of probes. For example, see the SkyWalking Java agent supported list.\n  Cross-thread operations are not always supported. Like what is mentioned above regarding in-process propagation, most codes (especially business codes) run in a single thread per request. But in some other cases, they operate across different threads, such as assigning tasks to other threads, task pools or batch processes. Some languages may even provide coroutine or similar components like Goroutine, which allows developers to run async process with low payload. In such cases, auto instrument will face problems.\n  So, there\u0026rsquo;s nothing mysterious about auto instrument. In short, agent developers write an activation script to make instrument codes work for you. That\u0026rsquo;s it!\nWhat is next? If you want to learn about manual instrument libs in SkyWalking, see the Manual instrument SDK section.\n","title":"Service Auto Instrument Agent","url":"/docs/main/next/en/concepts-and-designs/service-agent/"},{"content":"Service Auto Instrument Agent The service auto instrument agent is a subset of language-based native agents. This kind of agents is based on some language-specific features, especially those of a VM-based language.\nWhat does Auto Instrument mean? Many users learned about these agents when they first heard that \u0026ldquo;Not a single line of code has to be changed\u0026rdquo;. SkyWalking used to mention this in its readme page as well. However, this does not reflect the full picture. For end users, it is true that they no longer have to modify their codes in most cases. But it is important to understand that the codes are in fact still modified by the agent, which is usually known as \u0026ldquo;runtime code manipulation\u0026rdquo;. The underlying logic is that the auto instrument agent uses the VM interface for code modification to dynamically add in the instrument code, such as modifying the class in Java through javaagent premain.\nIn fact, although the SkyWalking team has mentioned that most auto instrument agents are VM-based, you may build such tools during compiling time rather than runtime.\nWhat are the limitations? Auto instrument is very helpful, as you may perform auto instrument during compiling time, without having to depend on VM features. But there are also certain limitations that come with it:\n  Higher possibility of in-process propagation in many cases. Many high-level languages, such as Java and .NET, are used for building business systems. Most business logic codes run in the same thread for each request, which causes propagation to be based on thread ID, in order for the stack module to make sure that the context is safe.\n  Only works in certain frameworks or libraries. Since the agents are responsible for modifying the codes during runtime, the codes are already known to the agent plugin developers. There is usually a list of frameworks or libraries supported by this kind of probes. For example, see the SkyWalking Java agent supported list.\n  Cross-thread operations are not always supported. Like what is mentioned above regarding in-process propagation, most codes (especially business codes) run in a single thread per request. But in some other cases, they operate across different threads, such as assigning tasks to other threads, task pools or batch processes. Some languages may even provide coroutine or similar components like Goroutine, which allows developers to run async process with low payload. In such cases, auto instrument will face problems.\n  So, there\u0026rsquo;s nothing mysterious about auto instrument. In short, agent developers write an activation script to make instrument codes work for you. That\u0026rsquo;s it!\nWhat is next? If you want to learn about manual instrument libs in SkyWalking, see the Manual instrument SDK section.\n","title":"Service Auto Instrument Agent","url":"/docs/main/v9.0.0/en/concepts-and-designs/service-agent/"},{"content":"Service Auto Instrument Agent The service auto instrument agent is a subset of language-based native agents. This kind of agents is based on some language-specific features, especially those of a VM-based language.\nWhat does Auto Instrument mean? Many users learned about these agents when they first heard that \u0026ldquo;Not a single line of code has to be changed\u0026rdquo;. SkyWalking used to mention this in its readme page as well. However, this does not reflect the full picture. For end users, it is true that they no longer have to modify their codes in most cases. But it is important to understand that the codes are in fact still modified by the agent, which is usually known as \u0026ldquo;runtime code manipulation\u0026rdquo;. The underlying logic is that the auto instrument agent uses the VM interface for code modification to dynamically add in the instrument code, such as modifying the class in Java through javaagent premain.\nIn fact, although the SkyWalking team has mentioned that most auto instrument agents are VM-based, you may build such tools during compiling time rather than runtime.\nWhat are the limitations? Auto instrument is very helpful, as you may perform auto instrument during compiling time, without having to depend on VM features. But there are also certain limitations that come with it:\n  Higher possibility of in-process propagation in many cases. Many high-level languages, such as Java and .NET, are used for building business systems. Most business logic codes run in the same thread for each request, which causes propagation to be based on thread ID, in order for the stack module to make sure that the context is safe.\n  Only works in certain frameworks or libraries. Since the agents are responsible for modifying the codes during runtime, the codes are already known to the agent plugin developers. There is usually a list of frameworks or libraries supported by this kind of probes. For example, see the SkyWalking Java agent supported list.\n  Cross-thread operations are not always supported. Like what is mentioned above regarding in-process propagation, most codes (especially business codes) run in a single thread per request. But in some other cases, they operate across different threads, such as assigning tasks to other threads, task pools or batch processes. Some languages may even provide coroutine or similar components like Goroutine, which allows developers to run async process with low payload. In such cases, auto instrument will face problems.\n  So, there\u0026rsquo;s nothing mysterious about auto instrument. In short, agent developers write an activation script to make instrument codes work for you. That\u0026rsquo;s it!\nWhat is next? If you want to learn about manual instrument libs in SkyWalking, see the Manual instrument SDK section.\n","title":"Service Auto Instrument Agent","url":"/docs/main/v9.1.0/en/concepts-and-designs/service-agent/"},{"content":"Service Auto Instrument Agent The service auto instrument agent is a subset of language-based native agents. This kind of agents is based on some language-specific features, especially those of a VM-based language.\nWhat does Auto Instrument mean? Many users learned about these agents when they first heard that \u0026ldquo;Not a single line of code has to be changed\u0026rdquo;. SkyWalking used to mention this in its readme page as well. However, this does not reflect the full picture. For end users, it is true that they no longer have to modify their codes in most cases. But it is important to understand that the codes are in fact still modified by the agent, which is usually known as \u0026ldquo;runtime code manipulation\u0026rdquo;. The underlying logic is that the auto instrument agent uses the VM interface for code modification to dynamically add in the instrument code, such as modifying the class in Java through javaagent premain.\nIn fact, although the SkyWalking team has mentioned that most auto instrument agents are VM-based, you may build such tools during compiling time rather than runtime.\nWhat are the limitations? Auto instrument is very helpful, as you may perform auto instrument during compiling time, without having to depend on VM features. But there are also certain limitations that come with it:\n  Higher possibility of in-process propagation in many cases. Many high-level languages, such as Java and .NET, are used for building business systems. Most business logic codes run in the same thread for each request, which causes propagation to be based on thread ID, in order for the stack module to make sure that the context is safe.\n  Only works in certain frameworks or libraries. Since the agents are responsible for modifying the codes during runtime, the codes are already known to the agent plugin developers. There is usually a list of frameworks or libraries supported by this kind of probes. For example, see the SkyWalking Java agent supported list.\n  Cross-thread operations are not always supported. Like what is mentioned above regarding in-process propagation, most codes (especially business codes) run in a single thread per request. But in some other cases, they operate across different threads, such as assigning tasks to other threads, task pools or batch processes. Some languages may even provide coroutine or similar components like Goroutine, which allows developers to run async process with low payload. In such cases, auto instrument will face problems.\n  So, there\u0026rsquo;s nothing mysterious about auto instrument. In short, agent developers write an activation script to make instrument codes work for you. That\u0026rsquo;s it!\nWhat is next? If you want to learn about manual instrument libs in SkyWalking, see the Manual instrument SDK section.\n","title":"Service Auto Instrument Agent","url":"/docs/main/v9.2.0/en/concepts-and-designs/service-agent/"},{"content":"Service Auto Instrument Agent The service auto instrument agent is a subset of language-based native agents. This kind of agents is based on some language-specific features, especially those of a VM-based language.\nWhat does Auto Instrument mean? Many users learned about these agents when they first heard that \u0026ldquo;Not a single line of code has to be changed\u0026rdquo;. SkyWalking used to mention this in its readme page as well. However, this does not reflect the full picture. For end users, it is true that they no longer have to modify their codes in most cases. But it is important to understand that the codes are in fact still modified by the agent, which is usually known as \u0026ldquo;runtime code manipulation\u0026rdquo;. The underlying logic is that the auto instrument agent uses the VM interface for code modification to dynamically add in the instrument code, such as modifying the class in Java through javaagent premain.\nIn fact, although the SkyWalking team has mentioned that most auto instrument agents are VM-based, you may build such tools during compiling time rather than runtime.\nWhat are the limitations? Auto instrument is very helpful, as you may perform auto instrument during compiling time, without having to depend on VM features. But there are also certain limitations that come with it:\n  Higher possibility of in-process propagation in many cases. Many high-level languages, such as Java and .NET, are used for building business systems. Most business logic codes run in the same thread for each request, which causes propagation to be based on thread ID, in order for the stack module to make sure that the context is safe.\n  Only works in certain frameworks or libraries. Since the agents are responsible for modifying the codes during runtime, the codes are already known to the agent plugin developers. There is usually a list of frameworks or libraries supported by this kind of probes. For example, see the SkyWalking Java agent supported list.\n  Cross-thread operations are not always supported. Like what is mentioned above regarding in-process propagation, most codes (especially business codes) run in a single thread per request. But in some other cases, they operate across different threads, such as assigning tasks to other threads, task pools or batch processes. Some languages may even provide coroutine or similar components like Goroutine, which allows developers to run async process with low payload. In such cases, auto instrument will face problems.\n  So, there\u0026rsquo;s nothing mysterious about auto instrument. In short, agent developers write an activation script to make instrument codes work for you. That\u0026rsquo;s it!\nWhat is next? If you want to learn about manual instrument libs in SkyWalking, see the Manual instrument SDK section.\n","title":"Service Auto Instrument Agent","url":"/docs/main/v9.3.0/en/concepts-and-designs/service-agent/"},{"content":"Service Auto Instrument Agent The service auto instrument agent is a subset of language-based native agents. This kind of agents is based on some language-specific features, especially those of a VM-based language.\nWhat does Auto Instrument mean? Many users learned about these agents when they first heard that \u0026ldquo;Not a single line of code has to be changed\u0026rdquo;. SkyWalking used to mention this in its readme page as well. However, this does not reflect the full picture. For end users, it is true that they no longer have to modify their codes in most cases. But it is important to understand that the codes are in fact still modified by the agent, which is usually known as \u0026ldquo;runtime code manipulation\u0026rdquo;. The underlying logic is that the auto instrument agent uses the VM interface for code modification to dynamically add in the instrument code, such as modifying the class in Java through javaagent premain.\nIn fact, although the SkyWalking team has mentioned that most auto instrument agents are VM-based, you may build such tools during compiling time rather than runtime.\nWhat are the limitations? Auto instrument is very helpful, as you may perform auto instrument during compiling time, without having to depend on VM features. But there are also certain limitations that come with it:\n  Higher possibility of in-process propagation in many cases. Many high-level languages, such as Java and .NET, are used for building business systems. Most business logic codes run in the same thread for each request, which causes propagation to be based on thread ID, in order for the stack module to make sure that the context is safe.\n  Only works in certain frameworks or libraries. Since the agents are responsible for modifying the codes during runtime, the codes are already known to the agent plugin developers. There is usually a list of frameworks or libraries supported by this kind of probes. For example, see the SkyWalking Java agent supported list.\n  Cross-thread operations are not always supported. Like what is mentioned above regarding in-process propagation, most codes (especially business codes) run in a single thread per request. But in some other cases, they operate across different threads, such as assigning tasks to other threads, task pools or batch processes. Some languages may even provide coroutine or similar components like Goroutine, which allows developers to run async process with low payload. In such cases, auto instrument will face problems.\n  So, there\u0026rsquo;s nothing mysterious about auto instrument. In short, agent developers write an activation script to make instrument codes work for you. That\u0026rsquo;s it!\nWhat is next? If you want to learn about manual instrument libs in SkyWalking, see the Manual instrument SDK section.\n","title":"Service Auto Instrument Agent","url":"/docs/main/v9.4.0/en/concepts-and-designs/service-agent/"},{"content":"Service Auto Instrument Agent The service auto instrument agent is a subset of language-based native agents. This kind of agents is based on some language-specific features, especially those of a VM-based language.\nWhat does Auto Instrument mean? Many users learned about these agents when they first heard that \u0026ldquo;Not a single line of code has to be changed\u0026rdquo;. SkyWalking used to mention this in its readme page as well. However, this does not reflect the full picture. For end users, it is true that they no longer have to modify their codes in most cases. But it is important to understand that the codes are in fact still modified by the agent, which is usually known as \u0026ldquo;runtime code manipulation\u0026rdquo;. The underlying logic is that the auto instrument agent uses the VM interface for code modification to dynamically add in the instrument code, such as modifying the class in Java through javaagent premain.\nIn fact, although the SkyWalking team has mentioned that most auto instrument agents are VM-based, you may build such tools during compiling time rather than runtime.\nWhat are the limitations? Auto instrument is very helpful, as you may perform auto instrument during compiling time, without having to depend on VM features. But there are also certain limitations that come with it:\n  Higher possibility of in-process propagation in many cases. Many high-level languages, such as Java and .NET, are used for building business systems. Most business logic codes run in the same thread for each request, which causes propagation to be based on thread ID, in order for the stack module to make sure that the context is safe.\n  Only works in certain frameworks or libraries. Since the agents are responsible for modifying the codes during runtime, the codes are already known to the agent plugin developers. There is usually a list of frameworks or libraries supported by this kind of probes. For example, see the SkyWalking Java agent supported list.\n  Cross-thread operations are not always supported. Like what is mentioned above regarding in-process propagation, most codes (especially business codes) run in a single thread per request. But in some other cases, they operate across different threads, such as assigning tasks to other threads, task pools or batch processes. Some languages may even provide coroutine or similar components like Goroutine, which allows developers to run async process with low payload. In such cases, auto instrument will face problems.\n  So, there\u0026rsquo;s nothing mysterious about auto instrument. In short, agent developers write an activation script to make instrument codes work for you. That\u0026rsquo;s it!\nWhat is next? If you want to learn about manual instrument libs in SkyWalking, see the Manual instrument SDK section.\n","title":"Service Auto Instrument Agent","url":"/docs/main/v9.5.0/en/concepts-and-designs/service-agent/"},{"content":"Service Auto Instrument Agent The service auto instrument agent is a subset of language-based native agents. This kind of agents is based on some language-specific features, especially those of a VM-based language.\nWhat does Auto Instrument mean? Many users learned about these agents when they first heard that \u0026ldquo;Not a single line of code has to be changed\u0026rdquo;. SkyWalking used to mention this in its readme page as well. However, this does not reflect the full picture. For end users, it is true that they no longer have to modify their codes in most cases. But it is important to understand that the codes are in fact still modified by the agent, which is usually known as \u0026ldquo;runtime code manipulation\u0026rdquo;. The underlying logic is that the auto instrument agent uses the VM interface for code modification to dynamically add in the instrument code, such as modifying the class in Java through javaagent premain.\nIn fact, although the SkyWalking team has mentioned that most auto instrument agents are VM-based, you may build such tools during compiling time rather than runtime.\nWhat are the limitations? Auto instrument is very helpful, as you may perform auto instrument during compiling time, without having to depend on VM features. But there are also certain limitations that come with it:\n  Higher possibility of in-process propagation in many cases. Many high-level languages, such as Java and .NET, are used for building business systems. Most business logic codes run in the same thread for each request, which causes propagation to be based on thread ID, in order for the stack module to make sure that the context is safe.\n  Only works in certain frameworks or libraries. Since the agents are responsible for modifying the codes during runtime, the codes are already known to the agent plugin developers. There is usually a list of frameworks or libraries supported by this kind of probes. For example, see the SkyWalking Java agent supported list.\n  Cross-thread operations are not always supported. Like what is mentioned above regarding in-process propagation, most codes (especially business codes) run in a single thread per request. But in some other cases, they operate across different threads, such as assigning tasks to other threads, task pools or batch processes. Some languages may even provide coroutine or similar components like Goroutine, which allows developers to run async process with low payload. In such cases, auto instrument will face problems.\n  So, there\u0026rsquo;s nothing mysterious about auto instrument. In short, agent developers write an activation script to make instrument codes work for you. That\u0026rsquo;s it!\nWhat is next? If you want to learn about manual instrument libs in SkyWalking, see the Manual instrument SDK section.\n","title":"Service Auto Instrument Agent","url":"/docs/main/v9.6.0/en/concepts-and-designs/service-agent/"},{"content":"Service Auto Instrument Agent The service auto instrument agent is a subset of language-based native agents. This kind of agents is based on some language-specific features, especially those of a VM-based language.\nWhat does Auto Instrument mean? Many users learned about these agents when they first heard that \u0026ldquo;Not a single line of code has to be changed\u0026rdquo;. SkyWalking used to mention this in its readme page as well. However, this does not reflect the full picture. For end users, it is true that they no longer have to modify their codes in most cases. But it is important to understand that the codes are in fact still modified by the agent, which is usually known as \u0026ldquo;runtime code manipulation\u0026rdquo;. The underlying logic is that the auto instrument agent uses the VM interface for code modification to dynamically add in the instrument code, such as modifying the class in Java through javaagent premain.\nIn fact, although the SkyWalking team has mentioned that most auto instrument agents are VM-based, you may build such tools during compiling time rather than runtime.\nWhat are the limitations? Auto instrument is very helpful, as you may perform auto instrument during compiling time, without having to depend on VM features. But there are also certain limitations that come with it:\n  Higher possibility of in-process propagation in many cases. Many high-level languages, such as Java and .NET, are used for building business systems. Most business logic codes run in the same thread for each request, which causes propagation to be based on thread ID, in order for the stack module to make sure that the context is safe.\n  Only works in certain frameworks or libraries. Since the agents are responsible for modifying the codes during runtime, the codes are already known to the agent plugin developers. There is usually a list of frameworks or libraries supported by this kind of probes. For example, see the SkyWalking Java agent supported list.\n  Cross-thread operations are not always supported. Like what is mentioned above regarding in-process propagation, most codes (especially business codes) run in a single thread per request. But in some other cases, they operate across different threads, such as assigning tasks to other threads, task pools or batch processes. Some languages may even provide coroutine or similar components like Goroutine, which allows developers to run async process with low payload. In such cases, auto instrument will face problems.\n  So, there\u0026rsquo;s nothing mysterious about auto instrument. In short, agent developers write an activation script to make instrument codes work for you. That\u0026rsquo;s it!\nWhat is next? If you want to learn about manual instrument libs in SkyWalking, see the Manual instrument SDK section.\n","title":"Service Auto Instrument Agent","url":"/docs/main/v9.7.0/en/concepts-and-designs/service-agent/"},{"content":"Service Discovery Service discovery is used to discover all Kubernetes services process in the current node and report them to backend services. After the process upload is completed, the other modules could perform more operations with the process, such as process profiling and collecting process metrics.\nConfiguration    Name Default Environment Key Description     process_discovery.heartbeat_period 20s ROVER_PROCESS_DISCOVERY_HEARTBEAT_PERIOD The period of report or keep-alive process to the backend.   process_discovery.properties_report_period 10 ROVER_PROCESS_DISCOVERY_PROPERTIES_REPORT_PERIOD The agent sends the process properties to the backend every: heartbeart period * properties report period.   process_discovery.kubernetes.active false ROVER_PROCESS_DISCOVERY_KUBERNETES_ACTIVE Is active the kubernetes process discovery.   process_discovery.kubernetes.node_name  ROVER_PROCESS_DISCOVERY_KUBERNETES_NODE_NAME Current deployed node name, it could be inject by spec.nodeName.   process_discovery.kubernetes.namespaces  ROVER_PROCESS_DISCOVERY_KUBERNETES_NAMESPACES Including pod by namespaces, if empty means including all namespaces. Multiple namespaces split by \u0026ldquo;,\u0026rdquo;.   process_discovery.kubernetes.analyzers   Declare how to build the process. The istio and k8s resources are active by default.   process_discovery.kubernetes.analyzers.active   Set is active analyzer.   process_discovery.kubernetes.analyzers.filters   Define which process is match to current process builder.   process_discovery.kubernetes.analyzers.service_name   The Service Name of the process entity.   process_discovery.kubernetes.analyzers.instance_name   The Service Instance Name of the process entity, by default, the instance name is the host IP v4 address from \u0026ldquo;en0\u0026rdquo; net interface.   process_discovery.kubernetes.analyzers.process_name   The Process Name of the process entity, by default, the process name is the executable name of the process.   process_discovery.kubernetes.analyzers.labels   The Process Labels, used to aggregate similar process from service entity. Multiple labels split by \u0026ldquo;,\u0026rdquo;.    Kubernetes Process Detector The Kubernetes process detector could detect any process under the Kubernetes container. If active the Kubernetes process detector, the rover must be deployed in the Kubernetes cluster. After finding the process, it would collect the metadata of the process when the report to the backend.\nProcess Analyze The process analysis declares which process could be profiled and how to build the process entity. The Istio and Kubernetes resources are active on default.\nFilter The filter provides an expression(go template) mechanism to match the process that can build the entity. Multiple expressions work together to determine whether the process can create the entity. Each expression must return the boolean value. Otherwise, the decision throws an error.\nThe context is similar to the entity builder. Using context could help the rover understand which process could build the entity.\nProcess Context Is the same with the process context in scanner, but doesn\u0026rsquo;t need to add the {{ and }} in prefix and suffix.\nPod Context Provide current pod information and judgments.\n   Name Argument Example Description     Name None eq .Pod.Name \u0026quot;test-pod-name\u0026quot; The name of the current pod. The example shows the pod name is equal to test-pod-name.   Namespace None eq .Pod.Namespace \u0026quot;test-namesapce\u0026quot; The name of the current pod namespace. The example shows the pod namespace name is equal to test-namespace.   Node None eq .Pod.Node \u0026quot;test-node\u0026quot; The name of the node deployed. The example shows the pod node name is equal to test-node.   LabelValue KeyNames eq .Pod.LavelValue \u0026quot;a,b\u0026quot; \u0026quot;v\u0026quot; The label value of the label keys, If provide multiple keys, if any key has value, then don\u0026rsquo;t need to get other values. The example shows the pod has anyone a or b label key, and the value matches to v.   ServiceName None eq .Pod.ServiceName \u0026quot;test-service\u0026quot; The service name of the pod. The example shows current pods matched service name is test-service.   HasContainer Container name .Pod.HasContainer \u0026quot;istio-proxy\u0026quot; The pod has the appointed container name.   LabelSelector selector .Pod.LabelSelector The pod is matches the label selector. For more details, please read the official documentation.   HasServiceName None .Pod.HasServiceName The pod has the matched service.   HasOwnerName kindNames .Pod.HasOwnerName \u0026quot;Service,Deployment\u0026quot; The pod has the matched owner name.    Container Context Provide current container(under the pod) information.\n   Name Argument Example Description     Name None eq .Container.Name \u0026quot;istio-proxy\u0026quot; The name of the current container under the pod. The examples show the container name is equal to istio-proxy.    Entity The entity including layer, serviceName, instanceName, processName and labels properties.\nThe entity also could use expression to build(serviceName, instanceName and processName).\nRover Rover context provides the context of the rover process instance and VM data.\n   Name Argument Example Description     InstanceID None {{.Rover.InstanceID}} Get the Instance ID of the rover.   HostIPV4 The Interface name {{.Rover.HostIPV4 \u0026quot;en0\u0026quot;}} Get the ipv4 address from the appointed network interface name.   HostIPV6 The Interface name {{.Rover.HostIPV6 \u0026quot;en0\u0026quot;}} Get the ipv6 address from the appointed network interface name.   HostName None {{.Rover.HostName}} Get the host name of current machine.    Process Process context provides the context relate to which process is matched.\n   Name Argument Example Description     ExeFilePath None {{.Process.ExeFilePath}} The execute file path of process.   ExeName None {{.Process.ExeName}} The execute file name.   CommandLine None {{.Process.CommandLine}} The command line of process.   Pid None {{.Process.Pid}} The id of the process.   WorkDir None {{.Process.WorkDir}} The work directory path of the process.    Pod The information on the current pod.\n   Name Argument Example Description     Name None {{.Pod.Name}} The name of current pod.   Namespace None {{.Pod.Namespace}} The name of current pod namespace.   Node None {{.Pod.Node}} The name of the node deployed.   LabelValue KeyNames, Default {{.Pod.LabelValue \u0026quot;a,b\u0026quot; \u0026quot;v\u0026quot;}} The label value of the label keys, If provide multiple keys, if any key has value, then don\u0026rsquo;t need to get other values. If all keys don\u0026rsquo;t have value, then return the default value.   ServiceName None {{.Pod.ServiceName}} The service name of the pod. If the pod hasn\u0026rsquo;t matched service, then return an empty string.   FindContainer ContainerName {{.Pod.FindContainer \u0026quot;test\u0026quot;}} Find the Container context by container name.   OwnerName KindNames {{.Pod.OwnerName \u0026quot;Service,Deployment\u0026quot;}} Find the Owner name by owner kind name.    Container The information of the current container under the pod.\n   Name Argument Example Description     Name None {{.Container.Name}} The name of the current container under the pod.    ID None {{.Container.ID}} The id of the current container under the pod.   EnvValue KeyNames {{.Container.EnvValue \u0026quot;a,b\u0026quot;}} The environment value of the first non-value key in the provided candidates(Iterate from left to right).    ","title":"Service Discovery","url":"/docs/skywalking-rover/latest/en/setup/configuration/service-discovery/"},{"content":"Service Discovery Service discovery is used to discover all Kubernetes services process in the current node and report them to backend services. After the process upload is completed, the other modules could perform more operations with the process, such as process profiling and collecting process metrics.\nConfiguration    Name Default Environment Key Description     process_discovery.heartbeat_period 20s ROVER_PROCESS_DISCOVERY_HEARTBEAT_PERIOD The period of report or keep-alive process to the backend.   process_discovery.properties_report_period 10 ROVER_PROCESS_DISCOVERY_PROPERTIES_REPORT_PERIOD The agent sends the process properties to the backend every: heartbeart period * properties report period.   process_discovery.kubernetes.active false ROVER_PROCESS_DISCOVERY_KUBERNETES_ACTIVE Is active the kubernetes process discovery.   process_discovery.kubernetes.node_name  ROVER_PROCESS_DISCOVERY_KUBERNETES_NODE_NAME Current deployed node name, it could be inject by spec.nodeName.   process_discovery.kubernetes.namespaces  ROVER_PROCESS_DISCOVERY_KUBERNETES_NAMESPACES Including pod by namespaces, if empty means including all namespaces. Multiple namespaces split by \u0026ldquo;,\u0026rdquo;.   process_discovery.kubernetes.analyzers   Declare how to build the process. The istio and k8s resources are active by default.   process_discovery.kubernetes.analyzers.active   Set is active analyzer.   process_discovery.kubernetes.analyzers.filters   Define which process is match to current process builder.   process_discovery.kubernetes.analyzers.service_name   The Service Name of the process entity.   process_discovery.kubernetes.analyzers.instance_name   The Service Instance Name of the process entity, by default, the instance name is the host IP v4 address from \u0026ldquo;en0\u0026rdquo; net interface.   process_discovery.kubernetes.analyzers.process_name   The Process Name of the process entity, by default, the process name is the executable name of the process.   process_discovery.kubernetes.analyzers.labels   The Process Labels, used to aggregate similar process from service entity. Multiple labels split by \u0026ldquo;,\u0026rdquo;.    Kubernetes Process Detector The Kubernetes process detector could detect any process under the Kubernetes container. If active the Kubernetes process detector, the rover must be deployed in the Kubernetes cluster. After finding the process, it would collect the metadata of the process when the report to the backend.\nProcess Analyze The process analysis declares which process could be profiled and how to build the process entity. The Istio and Kubernetes resources are active on default.\nFilter The filter provides an expression(go template) mechanism to match the process that can build the entity. Multiple expressions work together to determine whether the process can create the entity. Each expression must return the boolean value. Otherwise, the decision throws an error.\nThe context is similar to the entity builder. Using context could help the rover understand which process could build the entity.\nProcess Context Is the same with the process context in scanner, but doesn\u0026rsquo;t need to add the {{ and }} in prefix and suffix.\nPod Context Provide current pod information and judgments.\n   Name Argument Example Description     Name None eq .Pod.Name \u0026quot;test-pod-name\u0026quot; The name of the current pod. The example shows the pod name is equal to test-pod-name.   Namespace None eq .Pod.Namespace \u0026quot;test-namesapce\u0026quot; The name of the current pod namespace. The example shows the pod namespace name is equal to test-namespace.   Node None eq .Pod.Node \u0026quot;test-node\u0026quot; The name of the node deployed. The example shows the pod node name is equal to test-node.   LabelValue KeyNames eq .Pod.LavelValue \u0026quot;a,b\u0026quot; \u0026quot;v\u0026quot; The label value of the label keys, If provide multiple keys, if any key has value, then don\u0026rsquo;t need to get other values. The example shows the pod has anyone a or b label key, and the value matches to v.   ServiceName None eq .Pod.ServiceName \u0026quot;test-service\u0026quot; The service name of the pod. The example shows current pods matched service name is test-service.   HasContainer Container name .Pod.HasContainer \u0026quot;istio-proxy\u0026quot; The pod has the appointed container name.   LabelSelector selector .Pod.LabelSelector The pod is matches the label selector. For more details, please read the official documentation.   HasServiceName None .Pod.HasServiceName The pod has the matched service.   HasOwnerName kindNames .Pod.HasOwnerName \u0026quot;Service,Deployment\u0026quot; The pod has the matched owner name.    Container Context Provide current container(under the pod) information.\n   Name Argument Example Description     Name None eq .Container.Name \u0026quot;istio-proxy\u0026quot; The name of the current container under the pod. The examples show the container name is equal to istio-proxy.    Entity The entity including layer, serviceName, instanceName, processName and labels properties.\nThe entity also could use expression to build(serviceName, instanceName and processName).\nRover Rover context provides the context of the rover process instance and VM data.\n   Name Argument Example Description     InstanceID None {{.Rover.InstanceID}} Get the Instance ID of the rover.   HostIPV4 The Interface name {{.Rover.HostIPV4 \u0026quot;en0\u0026quot;}} Get the ipv4 address from the appointed network interface name.   HostIPV6 The Interface name {{.Rover.HostIPV6 \u0026quot;en0\u0026quot;}} Get the ipv6 address from the appointed network interface name.   HostName None {{.Rover.HostName}} Get the host name of current machine.    Process Process context provides the context relate to which process is matched.\n   Name Argument Example Description     ExeFilePath None {{.Process.ExeFilePath}} The execute file path of process.   ExeName None {{.Process.ExeName}} The execute file name.   CommandLine None {{.Process.CommandLine}} The command line of process.   Pid None {{.Process.Pid}} The id of the process.   WorkDir None {{.Process.WorkDir}} The work directory path of the process.    Pod The information on the current pod.\n   Name Argument Example Description     Name None {{.Pod.Name}} The name of current pod.   Namespace None {{.Pod.Namespace}} The name of current pod namespace.   Node None {{.Pod.Node}} The name of the node deployed.   LabelValue KeyNames, Default {{.Pod.LabelValue \u0026quot;a,b\u0026quot; \u0026quot;v\u0026quot;}} The label value of the label keys, If provide multiple keys, if any key has value, then don\u0026rsquo;t need to get other values. If all keys don\u0026rsquo;t have value, then return the default value.   ServiceName None {{.Pod.ServiceName}} The service name of the pod. If the pod hasn\u0026rsquo;t matched service, then return an empty string.   FindContainer ContainerName {{.Pod.FindContainer \u0026quot;test\u0026quot;}} Find the Container context by container name.   OwnerName KindNames {{.Pod.OwnerName \u0026quot;Service,Deployment\u0026quot;}} Find the Owner name by owner kind name.    Container The information of the current container under the pod.\n   Name Argument Example Description     Name None {{.Container.Name}} The name of the current container under the pod.    ID None {{.Container.ID}} The id of the current container under the pod.   EnvValue KeyNames {{.Container.EnvValue \u0026quot;a,b\u0026quot;}} The environment value of the first non-value key in the provided candidates(Iterate from left to right).    ","title":"Service Discovery","url":"/docs/skywalking-rover/next/en/setup/configuration/service-discovery/"},{"content":"Service Discovery Service discovery is used to discover all Kubernetes services process in the current node and report them to backend services. After the process upload is completed, the other modules could perform more operations with the process, such as process profiling and collecting process metrics.\nConfiguration    Name Default Environment Key Description     process_discovery.heartbeat_period 20s ROVER_PROCESS_DISCOVERY_HEARTBEAT_PERIOD The period of report or keep-alive process to the backend.   process_discovery.properties_report_period 10 ROVER_PROCESS_DISCOVERY_PROPERTIES_REPORT_PERIOD The agent sends the process properties to the backend every: heartbeart period * properties report period.   process_discovery.kubernetes.active false ROVER_PROCESS_DISCOVERY_KUBERNETES_ACTIVE Is active the kubernetes process discovery.   process_discovery.kubernetes.node_name  ROVER_PROCESS_DISCOVERY_KUBERNETES_NODE_NAME Current deployed node name, it could be inject by spec.nodeName.   process_discovery.kubernetes.namespaces  ROVER_PROCESS_DISCOVERY_KUBERNETES_NAMESPACES Including pod by namespaces, if empty means including all namespaces. Multiple namespaces split by \u0026ldquo;,\u0026rdquo;.   process_discovery.kubernetes.analyzers   Declare how to build the process. The istio and k8s resources are active by default.   process_discovery.kubernetes.analyzers.active   Set is active analyzer.   process_discovery.kubernetes.analyzers.filters   Define which process is match to current process builder.   process_discovery.kubernetes.analyzers.service_name   The Service Name of the process entity.   process_discovery.kubernetes.analyzers.instance_name   The Service Instance Name of the process entity, by default, the instance name is the host IP v4 address from \u0026ldquo;en0\u0026rdquo; net interface.   process_discovery.kubernetes.analyzers.process_name   The Process Name of the process entity, by default, the process name is the executable name of the process.   process_discovery.kubernetes.analyzers.labels   The Process Labels, used to aggregate similar process from service entity. Multiple labels split by \u0026ldquo;,\u0026rdquo;.    Kubernetes Process Detector The Kubernetes process detector could detect any process under the Kubernetes container. If active the Kubernetes process detector, the rover must be deployed in the Kubernetes cluster. After finding the process, it would collect the metadata of the process when the report to the backend.\nProcess Analyze The process analysis declares which process could be profiled and how to build the process entity. The Istio and Kubernetes resources are active on default.\nFilter The filter provides an expression(go template) mechanism to match the process that can build the entity. Multiple expressions work together to determine whether the process can create the entity. Each expression must return the boolean value. Otherwise, the decision throws an error.\nThe context is similar to the entity builder. Using context could help the rover understand which process could build the entity.\nProcess Context Is the same with the process context in scanner, but doesn\u0026rsquo;t need to add the {{ and }} in prefix and suffix.\nPod Context Provide current pod information and judgments.\n   Name Argument Example Description     Name None eq .Pod.Name \u0026quot;test-pod-name\u0026quot; The name of the current pod. The example shows the pod name is equal to test-pod-name.   Namespace None eq .Pod.Namespace \u0026quot;test-namesapce\u0026quot; The name of the current pod namespace. The example shows the pod namespace name is equal to test-namespace.   Node None eq .Pod.Node \u0026quot;test-node\u0026quot; The name of the node deployed. The example shows the pod node name is equal to test-node.   LabelValue KeyNames eq .Pod.LavelValue \u0026quot;a,b\u0026quot; \u0026quot;v\u0026quot; The label value of the label keys, If provide multiple keys, if any key has value, then don\u0026rsquo;t need to get other values. The example shows the pod has anyone a or b label key, and the value matches to v.   ServiceName None eq .Pod.ServiceName \u0026quot;test-service\u0026quot; The service name of the pod. The example shows current pods matched service name is test-service.   HasContainer Container name .Pod.HasContainer \u0026quot;istio-proxy\u0026quot; The pod has the appointed container name.   LabelSelector selector .Pod.LabelSelector The pod is matches the label selector. For more details, please read the official documentation.   HasServiceName None .Pod.HasServiceName The pod has the matched service.   HasOwnerName kindNames .Pod.HasOwnerName \u0026quot;Service,Deployment\u0026quot; The pod has the matched owner name.    Container Context Provide current container(under the pod) information.\n   Name Argument Example Description     Name None eq .Container.Name \u0026quot;istio-proxy\u0026quot; The name of the current container under the pod. The examples show the container name is equal to istio-proxy.    Entity The entity including layer, serviceName, instanceName, processName and labels properties.\nThe entity also could use expression to build(serviceName, instanceName and processName).\nRover Rover context provides the context of the rover process instance and VM data.\n   Name Argument Example Description     InstanceID None {{.Rover.InstanceID}} Get the Instance ID of the rover.   HostIPV4 The Interface name {{.Rover.HostIPV4 \u0026quot;en0\u0026quot;}} Get the ipv4 address from the appointed network interface name.   HostIPV6 The Interface name {{.Rover.HostIPV6 \u0026quot;en0\u0026quot;}} Get the ipv6 address from the appointed network interface name.   HostName None {{.Rover.HostName}} Get the host name of current machine.    Process Process context provides the context relate to which process is matched.\n   Name Argument Example Description     ExeFilePath None {{.Process.ExeFilePath}} The execute file path of process.   ExeName None {{.Process.ExeName}} The execute file name.   CommandLine None {{.Process.CommandLine}} The command line of process.   Pid None {{.Process.Pid}} The id of the process.   WorkDir None {{.Process.WorkDir}} The work directory path of the process.    Pod The information on the current pod.\n   Name Argument Example Description     Name None {{.Pod.Name}} The name of current pod.   Namespace None {{.Pod.Namespace}} The name of current pod namespace.   Node None {{.Pod.Node}} The name of the node deployed.   LabelValue KeyNames, Default {{.Pod.LabelValue \u0026quot;a,b\u0026quot; \u0026quot;v\u0026quot;}} The label value of the label keys, If provide multiple keys, if any key has value, then don\u0026rsquo;t need to get other values. If all keys don\u0026rsquo;t have value, then return the default value.   ServiceName None {{.Pod.ServiceName}} The service name of the pod. If the pod hasn\u0026rsquo;t matched service, then return an empty string.   FindContainer ContainerName {{.Pod.FindContainer \u0026quot;test\u0026quot;}} Find the Container context by container name.   OwnerName KindNames {{.Pod.OwnerName \u0026quot;Service,Deployment\u0026quot;}} Find the Owner name by owner kind name.    Container The information of the current container under the pod.\n   Name Argument Example Description     Name None {{.Container.Name}} The name of the current container under the pod.    ID None {{.Container.ID}} The id of the current container under the pod.   EnvValue KeyNames {{.Container.EnvValue \u0026quot;a,b\u0026quot;}} The environment value of the first non-value key in the provided candidates(Iterate from left to right).    ","title":"Service Discovery","url":"/docs/skywalking-rover/v0.6.0/en/setup/configuration/service-discovery/"},{"content":"Service Hierarchy SkyWalking v10 introduces a new concept Service Hierarchy which defines the relationships of existing logically same services in various layers. OAP will detect the services from different layers, and try to build the connections.\nDetect Service Hierarchy Connections There 2 ways to detect the connections:\n Automatically matching through OAP internal mechanism, no extra work is required. Build the connections through specific agents.  Note: All the relationships and auto-matching rules should be defined in the config/hierarchy-definition.yml file. If you want to customize it according to your own needs, please refer to Service Hierarchy Configuration.\nAutomatically Matching    Upper layer Lower layer Matching rule     GENERAL K8S_SERVICE GENERAL On K8S_SERVICE   GENERAL APISIX GENERAL On APISIX   VIRTUAL_DATABASE MYSQL VIRTUAL_DATABASE On MYSQL   VIRTUAL_DATABASE POSTGRESQL VIRTUAL_DATABASE On POSTGRESQL   VIRTUAL_DATABASE CLICKHOUSE VIRTUAL_DATABASE On CLICKHOUSE   VIRTUAL_MQ RABBITMQ VIRTUAL_MQ On RABBITMQ   VIRTUAL_MQ ROCKETMQ VIRTUAL_MQ On K8S_SERVICE   VIRTUAL_MQ KAFKA VIRTUAL_MQ On KAFKA   VIRTUAL_MQ RABBITMQ VIRTUAL_MQ On RABBITMQ   VIRTUAL_MQ PULSAR VIRTUAL_MQ On PULSAR   MESH MESH_DP MESH On MESH_DP   MESH K8S_SERVICE MESH On K8S_SERVICE   MESH_DP K8S_SERVICE MESH_DP On K8S_SERVICE   MYSQL K8S_SERVICE MYSQL On K8S_SERVICE   POSTGRESQL K8S_SERVICE POSTGRESQL On K8S_SERVICE   CLICKHOUSE K8S_SERVICE CLICKHOUSE On K8S_SERVICE   NGINX K8S_SERVICE NGINX On K8S_SERVICE   APISIX K8S_SERVICE APISIX On K8S_SERVICE   ROCKETMQ K8S_SERVICE ROCKETMQ On K8S_SERVICE   RABBITMQ K8S_SERVICE RABBITMQ On K8S_SERVICE   KAFKA K8S_SERVICE KAFKA On K8S_SERVICE   PULSAR K8S_SERVICE PULSAR On K8S_SERVICE   SO11Y_OAP K8S_SERVICE SO11Y_OAP On K8S_SERVICE     The following sections will describe the default matching rules in detail and use the upper-layer On lower-layer format. The example service name are based on SkyWalking Showcase default deployment. In SkyWalking the service name could be composed of group and short name with :: separator.  GENERAL On K8S_SERVICE  Rule name: lower-short-name-remove-ns Groovy script: { (u, l) -\u0026gt; u.shortName == l.shortName.substring(0, l.shortName.lastIndexOf('.')) } Description: GENERAL.service.shortName == K8S_SERVICE.service.shortName without namespace Matched Example:  GENERAL.service.name: agent::songs K8S_SERVICE.service.name: skywalking-showcase::songs.sample-services    GENERAL On APISIX  Rule name: lower-short-name-remove-ns Groovy script: { (u, l) -\u0026gt; u.shortName == l.shortName.substring(0, l.shortName.lastIndexOf('.')) } Description: GENERAL.service.shortName == APISIX.service.shortName without namespace Matched Example:  GENERAL.service.name: agent::frontend APISIX.service.name: APISIX::frontend.sample-services    VIRTUAL_DATABASE On MYSQL  Rule name: lower-short-name-with-fqdn Groovy script: { (u, l) -\u0026gt; u.shortName.substring(0, u.shortName.lastIndexOf(':')) == l.shortName.concat('.svc.cluster.local') } Description: VIRTUAL_DATABASE.service.shortName remove port == MYSQL.service.shortName with fqdn suffix Matched Example:  VIRTUAL_DATABASE.service.name: mysql.skywalking-showcase.svc.cluster.local:3306 MYSQL.service.name: mysql::mysql.skywalking-showcase    VIRTUAL_DATABASE On POSTGRESQL  Rule name: lower-short-name-with-fqdn Groovy script: { (u, l) -\u0026gt; u.shortName.substring(0, u.shortName.lastIndexOf(':')) == l.shortName.concat('.svc.cluster.local') } Description: VIRTUAL_DATABASE.service.shortName remove port == POSTGRESQL.service.shortName with fqdn suffix Matched Example:  VIRTUAL_DATABASE.service.name: psql.skywalking-showcase.svc.cluster.local:5432 POSTGRESQL.service.name: postgresql::psql.skywalking-showcase    VIRTUAL_DATABASE On CLICKHOUSE  Rule name: lower-short-name-with-fqdn Groovy script: { (u, l) -\u0026gt; u.shortName.substring(0, u.shortName.lastIndexOf(':')) == l.shortName.concat('.svc.cluster.local') } Description: VIRTUAL_DATABASE.service.shortName remove port == CLICKHOUSE.service.shortName with fqdn suffix Matched Example:  VIRTUAL_DATABASE.service.name: clickhouse.skywalking-showcase.svc.cluster.local:8123 CLICKHOUSE.service.name: clickhouse::clickhouse.skywalking-showcase    VIRTUAL_MQ On ROCKETMQ  Rule name: lower-short-name-with-fqdn Groovy script: { (u, l) -\u0026gt; u.shortName.substring(0, u.shortName.lastIndexOf(':')) == l.shortName.concat('.svc.cluster.local') } Description: VIRTUAL_MQ.service.shortName remove port == ROCKETMQ.service.shortName with fqdn suffix Matched Example:  VIRTUAL_MQ.service.name: rocketmq.skywalking-showcase.svc.cluster.local:9876 ROCKETMQ.service.name: rocketmq::rocketmq.skywalking-showcase    VIRTUAL_MQ On RABBITMQ  Rule name: lower-short-name-with-fqdn Groovy script: { (u, l) -\u0026gt; u.shortName.substring(0, u.shortName.lastIndexOf(':')) == l.shortName.concat('.svc.cluster.local') } Description: VIRTUAL_MQ.service.shortName remove port == RABBITMQ.service.shortName with fqdn suffix Matched Example:  VIRTUAL_MQ.service.name: rabbitmq.skywalking-showcase.svc.cluster.local:5672 RABBITMQ.service.name: rabbitmq::rabbitmq.skywalking-showcase     VIRTUAL_MQ On KAFKA  Rule name: lower-short-name-with-fqdn Groovy script: { (u, l) -\u0026gt; u.shortName.substring(0, u.shortName.lastIndexOf(':')) == l.shortName.concat('.svc.cluster.local') } Description: VIRTUAL_MQ.service.shortName remove port == KAFKA.service.shortName with fqdn suffix Matched Example:  VIRTUAL_MQ.service.name: kafka.skywalking-showcase.svc.cluster.local:9092 KAFKA.service.name: kafka::rocketmq.skywalking-showcase    VIRTUAL_MQ On PULSAR  Rule name: lower-short-name-with-fqdn Groovy script: { (u, l) -\u0026gt; u.shortName.substring(0, u.shortName.lastIndexOf(':')) == l.shortName.concat('.svc.cluster.local') } Description: VIRTUAL_MQ.service.shortName remove port == PULSAR.service.shortName with fqdn suffix Matched Example:  VIRTUAL_MQ.service.name: pulsar.skywalking-showcase.svc.cluster.local:6650 PULSAR.service.name: pulsar::pulsar.skywalking-showcase    MESH On MESH_DP  Rule name: name Groovy script: { (u, l) -\u0026gt; u.name == l.name } Description: MESH.service.name == MESH_DP.service.name Matched Example:  MESH.service.name: mesh-svr::songs.sample-services MESH_DP.service.name: mesh-svr::songs.sample-services    MESH On K8S_SERVICE  Rule name: short-name Groovy script: { (u, l) -\u0026gt; u.shortName == l.shortName } Description: MESH.service.shortName == K8S_SERVICE.service.shortName Matched Example:  MESH.service.name: mesh-svr::songs.sample-services K8S_SERVICE.service.name: skywalking-showcase::songs.sample-services    MESH_DP On K8S_SERVICE  Rule name: short-name Groovy script: { (u, l) -\u0026gt; u.shortName == l.shortName } Description: MESH_DP.service.shortName == K8S_SERVICE.service.shortName Matched Example:  MESH_DP.service.name: mesh-svr::songs.sample-services K8S_SERVICE.service.name: skywalking-showcase::songs.sample-services    MYSQL On K8S_SERVICE  Rule name: short-name Groovy script: { (u, l) -\u0026gt; u.shortName == l.shortName } Description: MYSQL.service.shortName == K8S_SERVICE.service.shortName Matched Example:  MYSQL.service.name: mysql::mysql.skywalking-showcase K8S_SERVICE.service.name: skywalking-showcase::mysql.skywalking-showcase    POSTGRESQL On K8S_SERVICE  Rule name: short-name Groovy script: { (u, l) -\u0026gt; u.shortName == l.shortName } Description: POSTGRESQL.service.shortName == K8S_SERVICE.service.shortName Matched Example:  POSTGRESQL.service.name: postgresql::psql.skywalking-showcase K8S_SERVICE.service.name: skywalking-showcase::psql.skywalking-showcase    CLICKHOUSE On K8S_SERVICE  Rule name: short-name Groovy script: { (u, l) -\u0026gt; u.shortName == l.shortName } Description: CLICKHOUSE.service.shortName == K8S_SERVICE.service.shortName Matched Example:  CLICKHOUSE.service.name: clickhouse::clickhouse.skywalking-showcase K8S_SERVICE.service.name: skywalking-showcase::clickhouse.skywalking-showcase    NGINX On K8S_SERVICE  Rule name: short-name Groovy script: { (u, l) -\u0026gt; u.shortName == l.shortName } Description: NGINX.service.shortName == K8S_SERVICE.service.shortName Matched Example:  NGINX.service.name: nginx::nginx.skywalking-showcase K8S_SERVICE.service.name: skywalking-showcase::nginx.skywalking-showcase    APISIX On K8S_SERVICE  Rule name: short-name Groovy script: { (u, l) -\u0026gt; u.shortName == l.shortName } Description: APISIX.service.shortName == K8S_SERVICE.service.shortName Matched Example:  APISIX.service.name: APISIX::frontend.sample-services K8S_SERVICE.service.name: skywalking-showcase::frontend.sample-services    ROCKETMQ On K8S_SERVICE  Rule name: short-name Groovy script: { (u, l) -\u0026gt; u.shortName == l.shortName } Description: ROCKETMQ.service.shortName == K8S_SERVICE.service.shortName Matched Example:  ROCKETMQ.service.name: rocketmq::rocketmq.skywalking-showcase K8S_SERVICE.service.name: skywalking-showcase::rocketmq.skywalking-showcase    RABBITMQ On K8S_SERVICE  Rule name: short-name Groovy script: { (u, l) -\u0026gt; u.shortName == l.shortName } Description: RABBITMQ.service.shortName == K8S_SERVICE.service.shortName Matched Example:  RABBITMQ.service.name: rabbitmq::rabbitmq.skywalking-showcase K8S_SERVICE.service.name: skywalking-showcase::rabbitmq.skywalking-showcase    KAFKA On K8S_SERVICE  Rule name: short-name Groovy script: { (u, l) -\u0026gt; u.shortName == l.shortName } Description: KAFKA.service.shortName == K8S_SERVICE.service.shortName Matched Example:  KAFKA.service.name: kafka::kafka.skywalking-showcase K8S_SERVICE.service.name: skywalking-showcase::kafka.skywalking-showcase    PULSAR On K8S_SERVICE  Rule name: short-name Groovy script: { (u, l) -\u0026gt; u.shortName == l.shortName } Description: PULSAR.service.shortName == K8S_SERVICE.service.shortName Matched Example:  PULSAR.service.name: pulsar::pulsar.skywalking-showcase K8S_SERVICE.service.name: skywalking-showcase::pulsar.skywalking-showcase    SO11Y_OAP On K8S_SERVICE  Rule name: short-name Groovy script: { (u, l) -\u0026gt; u.shortName == l.shortName } Description: SO11Y_OAP.service.shortName == K8S_SERVICE.service.shortName Matched Example:  SO11Y_OAP.service.name: demo-oap.skywalking-showcase K8S_SERVICE.service.name: skywalking-showcase::demo-oap.skywalking-showcase    Build Through Specific Agents Use agent tech involved(such as eBPF) and deployment tools(such as operator and agent injector) to detect the service hierarchy relations.\n   Upper layer Lower layer Agent    Instance Hierarchy Instance Hierarchy relationship follows the same definition as Service Hierarchy.\nAutomatically Matching If the service hierarchy is built, the instance hierarchy relationship could be detected automatically through the following rules:\n The upper instance name equals the lower instance name. The upper instance attribute pod/hostname equals the lower instance attribute pod/hostname. The upper instance attribute pod/hostname equals the lower instance name. The upper instance name equals the lower instance attribute pod/hostname.  Build Through Specific Agents ","title":"Service Hierarchy","url":"/docs/main/next/en/concepts-and-designs/service-hierarchy/"},{"content":"Setting Override SkyWalking backend supports setting overrides by system properties and system environment variables. You may override the settings in application.yml\nSystem properties key rule ModuleName.ProviderName.SettingKey.\n  Example\nOverride restHost in this setting segment\n  core:default:restHost:${SW_CORE_REST_HOST:0.0.0.0}restPort:${SW_CORE_REST_PORT:12800}restContextPath:${SW_CORE_REST_CONTEXT_PATH:/}gRPCHost:${SW_CORE_GRPC_HOST:0.0.0.0}gRPCPort:${SW_CORE_GRPC_PORT:11800}Use command arg\n-Dcore.default.restHost=172.0.4.12 System environment variables   Example\nOverride restHost in this setting segment through environment variables\n  core:default:restHost:${REST_HOST:0.0.0.0}restPort:${SW_CORE_REST_PORT:12800}restContextPath:${SW_CORE_REST_CONTEXT_PATH:/}gRPCHost:${SW_CORE_GRPC_HOST:0.0.0.0}gRPCPort:${SW_CORE_GRPC_PORT:11800}If the REST_HOST  environment variable exists in your operating system and its value is 172.0.4.12, then the value of restHost here will be overwritten to 172.0.4.12; otherwise, it will be set to 0.0.0.0.\nPlaceholder nesting is also supported, like ${REST_HOST:${ANOTHER_REST_HOST:127.0.0.1}}. In this case, if the REST_HOST  environment variable does not exist, but the REST_ANOTHER_REST_HOSTHOST environment variable exists, and its value is 172.0.4.12, then the value of restHost here will be overwritten to 172.0.4.12; otherwise, it will be set to 127.0.0.1.\n","title":"Setting Override","url":"/docs/main/latest/en/setup/backend/backend-setting-override/"},{"content":"Setting Override SkyWalking backend supports setting overrides by system properties and system environment variables. You may override the settings in application.yml\nSystem properties key rule ModuleName.ProviderName.SettingKey.\n  Example\nOverride restHost in this setting segment\n  core:default:restHost:${SW_CORE_REST_HOST:0.0.0.0}restPort:${SW_CORE_REST_PORT:12800}restContextPath:${SW_CORE_REST_CONTEXT_PATH:/}gRPCHost:${SW_CORE_GRPC_HOST:0.0.0.0}gRPCPort:${SW_CORE_GRPC_PORT:11800}Use command arg\n-Dcore.default.restHost=172.0.4.12 System environment variables   Example\nOverride restHost in this setting segment through environment variables\n  core:default:restHost:${REST_HOST:0.0.0.0}restPort:${SW_CORE_REST_PORT:12800}restContextPath:${SW_CORE_REST_CONTEXT_PATH:/}gRPCHost:${SW_CORE_GRPC_HOST:0.0.0.0}gRPCPort:${SW_CORE_GRPC_PORT:11800}If the REST_HOST  environment variable exists in your operating system and its value is 172.0.4.12, then the value of restHost here will be overwritten to 172.0.4.12; otherwise, it will be set to 0.0.0.0.\nPlaceholder nesting is also supported, like ${REST_HOST:${ANOTHER_REST_HOST:127.0.0.1}}. In this case, if the REST_HOST  environment variable does not exist, but the REST_ANOTHER_REST_HOSTHOST environment variable exists, and its value is 172.0.4.12, then the value of restHost here will be overwritten to 172.0.4.12; otherwise, it will be set to 127.0.0.1.\n","title":"Setting Override","url":"/docs/main/next/en/setup/backend/backend-setting-override/"},{"content":"Setting Override SkyWalking backend supports setting overrides by system properties and system environment variables. You may override the settings in application.yml\nSystem properties key rule ModuleName.ProviderName.SettingKey.\n  Example\nOverride restHost in this setting segment\n  core:default:restHost:${SW_CORE_REST_HOST:0.0.0.0}restPort:${SW_CORE_REST_PORT:12800}restContextPath:${SW_CORE_REST_CONTEXT_PATH:/}gRPCHost:${SW_CORE_GRPC_HOST:0.0.0.0}gRPCPort:${SW_CORE_GRPC_PORT:11800}Use command arg\n-Dcore.default.restHost=172.0.4.12 System environment variables   Example\nOverride restHost in this setting segment through environment variables\n  core:default:restHost:${REST_HOST:0.0.0.0}restPort:${SW_CORE_REST_PORT:12800}restContextPath:${SW_CORE_REST_CONTEXT_PATH:/}gRPCHost:${SW_CORE_GRPC_HOST:0.0.0.0}gRPCPort:${SW_CORE_GRPC_PORT:11800}If the REST_HOST  environment variable exists in your operating system and its value is 172.0.4.12, then the value of restHost here will be overwritten to 172.0.4.12; otherwise, it will be set to 0.0.0.0.\nPlaceholder nesting is also supported, like ${REST_HOST:${ANOTHER_REST_HOST:127.0.0.1}}. In this case, if the REST_HOST  environment variable does not exist, but the REST_ANOTHER_REST_HOSTHOST environment variable exists and its value is 172.0.4.12, then the value of restHost here will be overwritten to 172.0.4.12; otherwise, it will be set to 127.0.0.1.\n","title":"Setting Override","url":"/docs/main/v9.0.0/en/setup/backend/backend-setting-override/"},{"content":"Setting Override SkyWalking backend supports setting overrides by system properties and system environment variables. You may override the settings in application.yml\nSystem properties key rule ModuleName.ProviderName.SettingKey.\n  Example\nOverride restHost in this setting segment\n  core:default:restHost:${SW_CORE_REST_HOST:0.0.0.0}restPort:${SW_CORE_REST_PORT:12800}restContextPath:${SW_CORE_REST_CONTEXT_PATH:/}gRPCHost:${SW_CORE_GRPC_HOST:0.0.0.0}gRPCPort:${SW_CORE_GRPC_PORT:11800}Use command arg\n-Dcore.default.restHost=172.0.4.12 System environment variables   Example\nOverride restHost in this setting segment through environment variables\n  core:default:restHost:${REST_HOST:0.0.0.0}restPort:${SW_CORE_REST_PORT:12800}restContextPath:${SW_CORE_REST_CONTEXT_PATH:/}gRPCHost:${SW_CORE_GRPC_HOST:0.0.0.0}gRPCPort:${SW_CORE_GRPC_PORT:11800}If the REST_HOST  environment variable exists in your operating system and its value is 172.0.4.12, then the value of restHost here will be overwritten to 172.0.4.12; otherwise, it will be set to 0.0.0.0.\nPlaceholder nesting is also supported, like ${REST_HOST:${ANOTHER_REST_HOST:127.0.0.1}}. In this case, if the REST_HOST  environment variable does not exist, but the REST_ANOTHER_REST_HOSTHOST environment variable exists, and its value is 172.0.4.12, then the value of restHost here will be overwritten to 172.0.4.12; otherwise, it will be set to 127.0.0.1.\n","title":"Setting Override","url":"/docs/main/v9.1.0/en/setup/backend/backend-setting-override/"},{"content":"Setting Override SkyWalking backend supports setting overrides by system properties and system environment variables. You may override the settings in application.yml\nSystem properties key rule ModuleName.ProviderName.SettingKey.\n  Example\nOverride restHost in this setting segment\n  core:default:restHost:${SW_CORE_REST_HOST:0.0.0.0}restPort:${SW_CORE_REST_PORT:12800}restContextPath:${SW_CORE_REST_CONTEXT_PATH:/}gRPCHost:${SW_CORE_GRPC_HOST:0.0.0.0}gRPCPort:${SW_CORE_GRPC_PORT:11800}Use command arg\n-Dcore.default.restHost=172.0.4.12 System environment variables   Example\nOverride restHost in this setting segment through environment variables\n  core:default:restHost:${REST_HOST:0.0.0.0}restPort:${SW_CORE_REST_PORT:12800}restContextPath:${SW_CORE_REST_CONTEXT_PATH:/}gRPCHost:${SW_CORE_GRPC_HOST:0.0.0.0}gRPCPort:${SW_CORE_GRPC_PORT:11800}If the REST_HOST  environment variable exists in your operating system and its value is 172.0.4.12, then the value of restHost here will be overwritten to 172.0.4.12; otherwise, it will be set to 0.0.0.0.\nPlaceholder nesting is also supported, like ${REST_HOST:${ANOTHER_REST_HOST:127.0.0.1}}. In this case, if the REST_HOST  environment variable does not exist, but the REST_ANOTHER_REST_HOSTHOST environment variable exists, and its value is 172.0.4.12, then the value of restHost here will be overwritten to 172.0.4.12; otherwise, it will be set to 127.0.0.1.\n","title":"Setting Override","url":"/docs/main/v9.2.0/en/setup/backend/backend-setting-override/"},{"content":"Setting Override SkyWalking backend supports setting overrides by system properties and system environment variables. You may override the settings in application.yml\nSystem properties key rule ModuleName.ProviderName.SettingKey.\n  Example\nOverride restHost in this setting segment\n  core:default:restHost:${SW_CORE_REST_HOST:0.0.0.0}restPort:${SW_CORE_REST_PORT:12800}restContextPath:${SW_CORE_REST_CONTEXT_PATH:/}gRPCHost:${SW_CORE_GRPC_HOST:0.0.0.0}gRPCPort:${SW_CORE_GRPC_PORT:11800}Use command arg\n-Dcore.default.restHost=172.0.4.12 System environment variables   Example\nOverride restHost in this setting segment through environment variables\n  core:default:restHost:${REST_HOST:0.0.0.0}restPort:${SW_CORE_REST_PORT:12800}restContextPath:${SW_CORE_REST_CONTEXT_PATH:/}gRPCHost:${SW_CORE_GRPC_HOST:0.0.0.0}gRPCPort:${SW_CORE_GRPC_PORT:11800}If the REST_HOST  environment variable exists in your operating system and its value is 172.0.4.12, then the value of restHost here will be overwritten to 172.0.4.12; otherwise, it will be set to 0.0.0.0.\nPlaceholder nesting is also supported, like ${REST_HOST:${ANOTHER_REST_HOST:127.0.0.1}}. In this case, if the REST_HOST  environment variable does not exist, but the REST_ANOTHER_REST_HOSTHOST environment variable exists, and its value is 172.0.4.12, then the value of restHost here will be overwritten to 172.0.4.12; otherwise, it will be set to 127.0.0.1.\n","title":"Setting Override","url":"/docs/main/v9.3.0/en/setup/backend/backend-setting-override/"},{"content":"Setting Override SkyWalking backend supports setting overrides by system properties and system environment variables. You may override the settings in application.yml\nSystem properties key rule ModuleName.ProviderName.SettingKey.\n  Example\nOverride restHost in this setting segment\n  core:default:restHost:${SW_CORE_REST_HOST:0.0.0.0}restPort:${SW_CORE_REST_PORT:12800}restContextPath:${SW_CORE_REST_CONTEXT_PATH:/}gRPCHost:${SW_CORE_GRPC_HOST:0.0.0.0}gRPCPort:${SW_CORE_GRPC_PORT:11800}Use command arg\n-Dcore.default.restHost=172.0.4.12 System environment variables   Example\nOverride restHost in this setting segment through environment variables\n  core:default:restHost:${REST_HOST:0.0.0.0}restPort:${SW_CORE_REST_PORT:12800}restContextPath:${SW_CORE_REST_CONTEXT_PATH:/}gRPCHost:${SW_CORE_GRPC_HOST:0.0.0.0}gRPCPort:${SW_CORE_GRPC_PORT:11800}If the REST_HOST  environment variable exists in your operating system and its value is 172.0.4.12, then the value of restHost here will be overwritten to 172.0.4.12; otherwise, it will be set to 0.0.0.0.\nPlaceholder nesting is also supported, like ${REST_HOST:${ANOTHER_REST_HOST:127.0.0.1}}. In this case, if the REST_HOST  environment variable does not exist, but the REST_ANOTHER_REST_HOSTHOST environment variable exists, and its value is 172.0.4.12, then the value of restHost here will be overwritten to 172.0.4.12; otherwise, it will be set to 127.0.0.1.\n","title":"Setting Override","url":"/docs/main/v9.4.0/en/setup/backend/backend-setting-override/"},{"content":"Setting Override SkyWalking backend supports setting overrides by system properties and system environment variables. You may override the settings in application.yml\nSystem properties key rule ModuleName.ProviderName.SettingKey.\n  Example\nOverride restHost in this setting segment\n  core:default:restHost:${SW_CORE_REST_HOST:0.0.0.0}restPort:${SW_CORE_REST_PORT:12800}restContextPath:${SW_CORE_REST_CONTEXT_PATH:/}gRPCHost:${SW_CORE_GRPC_HOST:0.0.0.0}gRPCPort:${SW_CORE_GRPC_PORT:11800}Use command arg\n-Dcore.default.restHost=172.0.4.12 System environment variables   Example\nOverride restHost in this setting segment through environment variables\n  core:default:restHost:${REST_HOST:0.0.0.0}restPort:${SW_CORE_REST_PORT:12800}restContextPath:${SW_CORE_REST_CONTEXT_PATH:/}gRPCHost:${SW_CORE_GRPC_HOST:0.0.0.0}gRPCPort:${SW_CORE_GRPC_PORT:11800}If the REST_HOST  environment variable exists in your operating system and its value is 172.0.4.12, then the value of restHost here will be overwritten to 172.0.4.12; otherwise, it will be set to 0.0.0.0.\nPlaceholder nesting is also supported, like ${REST_HOST:${ANOTHER_REST_HOST:127.0.0.1}}. In this case, if the REST_HOST  environment variable does not exist, but the REST_ANOTHER_REST_HOSTHOST environment variable exists, and its value is 172.0.4.12, then the value of restHost here will be overwritten to 172.0.4.12; otherwise, it will be set to 127.0.0.1.\n","title":"Setting Override","url":"/docs/main/v9.5.0/en/setup/backend/backend-setting-override/"},{"content":"Setting Override SkyWalking backend supports setting overrides by system properties and system environment variables. You may override the settings in application.yml\nSystem properties key rule ModuleName.ProviderName.SettingKey.\n  Example\nOverride restHost in this setting segment\n  core:default:restHost:${SW_CORE_REST_HOST:0.0.0.0}restPort:${SW_CORE_REST_PORT:12800}restContextPath:${SW_CORE_REST_CONTEXT_PATH:/}gRPCHost:${SW_CORE_GRPC_HOST:0.0.0.0}gRPCPort:${SW_CORE_GRPC_PORT:11800}Use command arg\n-Dcore.default.restHost=172.0.4.12 System environment variables   Example\nOverride restHost in this setting segment through environment variables\n  core:default:restHost:${REST_HOST:0.0.0.0}restPort:${SW_CORE_REST_PORT:12800}restContextPath:${SW_CORE_REST_CONTEXT_PATH:/}gRPCHost:${SW_CORE_GRPC_HOST:0.0.0.0}gRPCPort:${SW_CORE_GRPC_PORT:11800}If the REST_HOST  environment variable exists in your operating system and its value is 172.0.4.12, then the value of restHost here will be overwritten to 172.0.4.12; otherwise, it will be set to 0.0.0.0.\nPlaceholder nesting is also supported, like ${REST_HOST:${ANOTHER_REST_HOST:127.0.0.1}}. In this case, if the REST_HOST  environment variable does not exist, but the REST_ANOTHER_REST_HOSTHOST environment variable exists, and its value is 172.0.4.12, then the value of restHost here will be overwritten to 172.0.4.12; otherwise, it will be set to 127.0.0.1.\n","title":"Setting Override","url":"/docs/main/v9.6.0/en/setup/backend/backend-setting-override/"},{"content":"Setting Override SkyWalking backend supports setting overrides by system properties and system environment variables. You may override the settings in application.yml\nSystem properties key rule ModuleName.ProviderName.SettingKey.\n  Example\nOverride restHost in this setting segment\n  core:default:restHost:${SW_CORE_REST_HOST:0.0.0.0}restPort:${SW_CORE_REST_PORT:12800}restContextPath:${SW_CORE_REST_CONTEXT_PATH:/}gRPCHost:${SW_CORE_GRPC_HOST:0.0.0.0}gRPCPort:${SW_CORE_GRPC_PORT:11800}Use command arg\n-Dcore.default.restHost=172.0.4.12 System environment variables   Example\nOverride restHost in this setting segment through environment variables\n  core:default:restHost:${REST_HOST:0.0.0.0}restPort:${SW_CORE_REST_PORT:12800}restContextPath:${SW_CORE_REST_CONTEXT_PATH:/}gRPCHost:${SW_CORE_GRPC_HOST:0.0.0.0}gRPCPort:${SW_CORE_GRPC_PORT:11800}If the REST_HOST  environment variable exists in your operating system and its value is 172.0.4.12, then the value of restHost here will be overwritten to 172.0.4.12; otherwise, it will be set to 0.0.0.0.\nPlaceholder nesting is also supported, like ${REST_HOST:${ANOTHER_REST_HOST:127.0.0.1}}. In this case, if the REST_HOST  environment variable does not exist, but the REST_ANOTHER_REST_HOSTHOST environment variable exists, and its value is 172.0.4.12, then the value of restHost here will be overwritten to 172.0.4.12; otherwise, it will be set to 127.0.0.1.\n","title":"Setting Override","url":"/docs/main/v9.7.0/en/setup/backend/backend-setting-override/"},{"content":"Setting Override By default, SkyWalking Go agent provides a default agent.default.yaml to define the default configuration options.\nThis configuration file is used during hybrid compilation to write the configuration information of the Agent into the program. When the program boots, the agent would read the pre-configured content.\nConfiguration Changes The values in the config file should be updated by following the user requirements. They are applied during the hybrid compilation process.\nFor missing configuration items in the custom file, the Agent would use the values from the default configuration.\nEnvironment Variables In the default configuration, you can see that most of the configurations are in the format ${xxx:config_value}. It means that when the program starts, the agent would first read the xxx from the system environment variables in the runtime. If it cannot be found, the value would be used as the config_value as value.\nNote: that the search for environment variables is at runtime, not compile time.\n","title":"Setting Override","url":"/docs/skywalking-go/latest/en/advanced-features/settings-override/"},{"content":"Setting Override By default, SkyWalking Go agent provides a default agent.default.yaml to define the default configuration options.\nThis configuration file is used during hybrid compilation to write the configuration information of the Agent into the program. When the program boots, the agent would read the pre-configured content.\nConfiguration Changes The values in the config file should be updated by following the user requirements. They are applied during the hybrid compilation process.\nFor missing configuration items in the custom file, the Agent would use the values from the default configuration.\nEnvironment Variables In the default configuration, you can see that most of the configurations are in the format ${xxx:config_value}. It means that when the program starts, the agent would first read the xxx from the system environment variables in the runtime. If it cannot be found, the value would be used as the config_value as value.\nNote: that the search for environment variables is at runtime, not compile time.\n","title":"Setting Override","url":"/docs/skywalking-go/next/en/advanced-features/settings-override/"},{"content":"Setting Override By default, SkyWalking Go agent provides a default agent.default.yaml to define the default configuration options.\nThis configuration file is used during hybrid compilation to write the configuration information of the Agent into the program. When the program boots, the agent would read the pre-configured content.\nConfiguration Changes The values in the config file should be updated by following the user requirements. They are applied during the hybrid compilation process.\nFor missing configuration items in the custom file, the Agent would use the values from the default configuration.\nEnvironment Variables In the default configuration, you can see that most of the configurations are in the format ${xxx:config_value}. It means that when the program starts, the agent would first read the xxx from the system environment variables in the runtime. If it cannot be found, the value would be used as the config_value as value.\nNote: that the search for environment variables is at runtime, not compile time.\n","title":"Setting Override","url":"/docs/skywalking-go/v0.4.0/en/advanced-features/settings-override/"},{"content":"Setting Override In default, SkyWalking provide agent.config for agent.\nSetting override means end user can override the settings in these config file, through using system properties or agent options.\nSystem properties Use skywalking. + key in config file as system properties key, to override the value.\n  Why need this prefix?\nThe agent system properties and env share with target application, this prefix can avoid variable conflict.\n  Example\nOverride agent.application_code by this.\n  -Dskywalking.agent.application_code=31200 Agent options Add the properties after the agent path in JVM arguments.\n-javaagent:/path/to/skywalking-agent.jar=[option1]=[value1],[option2]=[value2]   Example\nOverride agent.application_code and logging.level by this.\n  -javaagent:/path/to/skywalking-agent.jar=agent.application_code=31200,logging.level=debug   Special characters\nIf a separator(, or =) in the option or value, it should be wrapped in quotes.\n  -javaagent:/path/to/skywalking-agent.jar=agent.ignore_suffix='.jpg,.jpeg' System environment variables   Example\nOverride agent.application_code and logging.level by this.\n  # The service name in UI agent.service_name=${SW_AGENT_NAME:Your_ApplicationName} # Logging level logging.level=${SW_LOGGING_LEVEL:INFO} If the SW_AGENT_NAME  environment variable exists in your operating system and its value is skywalking-agent-demo, then the value of agent.service_name here will be overwritten to skywalking-agent-demo, otherwise, it will be set to Your_ApplicationName.\nBy the way, Placeholder nesting is also supported, like ${SW_AGENT_NAME:${ANOTHER_AGENT_NAME:Your_ApplicationName}}. In this case, if the SW_AGENT_NAME  environment variable not exists, but the ANOTHER_AGENT_NAME environment variable exists and its value is skywalking-agent-demo, then the value of agent.service_name here will be overwritten to skywalking-agent-demo,otherwise, it will be set to Your_ApplicationName.\nOverride priority Agent Options \u0026gt; System.Properties(-D) \u0026gt; System environment variables \u0026gt; Config file\n","title":"Setting Override","url":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/setting-override/"},{"content":"Setting Override In default, SkyWalking provide agent.config for agent.\nSetting override means end user can override the settings in these config file, through using system properties or agent options.\nSystem properties Use skywalking. + key in config file as system properties key, to override the value.\n  Why need this prefix?\nThe agent system properties and env share with target application, this prefix can avoid variable conflict.\n  Example\nOverride agent.application_code by this.\n  -Dskywalking.agent.application_code=31200 Agent options Add the properties after the agent path in JVM arguments.\n-javaagent:/path/to/skywalking-agent.jar=[option1]=[value1],[option2]=[value2]   Example\nOverride agent.application_code and logging.level by this.\n  -javaagent:/path/to/skywalking-agent.jar=agent.application_code=31200,logging.level=debug   Special characters\nIf a separator(, or =) in the option or value, it should be wrapped in quotes.\n  -javaagent:/path/to/skywalking-agent.jar=agent.ignore_suffix='.jpg,.jpeg' System environment variables   Example\nOverride agent.application_code and logging.level by this.\n  # The service name in UI agent.service_name=${SW_AGENT_NAME:Your_ApplicationName} # Logging level logging.level=${SW_LOGGING_LEVEL:INFO} If the SW_AGENT_NAME  environment variable exists in your operating system and its value is skywalking-agent-demo, then the value of agent.service_name here will be overwritten to skywalking-agent-demo, otherwise, it will be set to Your_ApplicationName.\nBy the way, Placeholder nesting is also supported, like ${SW_AGENT_NAME:${ANOTHER_AGENT_NAME:Your_ApplicationName}}. In this case, if the SW_AGENT_NAME  environment variable not exists, but the ANOTHER_AGENT_NAME environment variable exists and its value is skywalking-agent-demo, then the value of agent.service_name here will be overwritten to skywalking-agent-demo,otherwise, it will be set to Your_ApplicationName.\nOverride priority Agent Options \u0026gt; System.Properties(-D) \u0026gt; System environment variables \u0026gt; Config file\n","title":"Setting Override","url":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/setting-override/"},{"content":"Setting Override In default, SkyWalking provide agent.config for agent.\nSetting override means end user can override the settings in these config file, through using system properties or agent options.\nSystem properties Use skywalking. + key in config file as system properties key, to override the value.\n  Why need this prefix?\nThe agent system properties and env share with target application, this prefix can avoid variable conflict.\n  Example\nOverride agent.application_code by this.\n  -Dskywalking.agent.application_code=31200 Agent options Add the properties after the agent path in JVM arguments.\n-javaagent:/path/to/skywalking-agent.jar=[option1]=[value1],[option2]=[value2]   Example\nOverride agent.application_code and logging.level by this.\n  -javaagent:/path/to/skywalking-agent.jar=agent.application_code=31200,logging.level=debug   Special characters\nIf a separator(, or =) in the option or value, it should be wrapped in quotes.\n  -javaagent:/path/to/skywalking-agent.jar=agent.ignore_suffix='.jpg,.jpeg' System environment variables   Example\nOverride agent.application_code and logging.level by this.\n  # The service name in UI agent.service_name=${SW_AGENT_NAME:Your_ApplicationName} # Logging level logging.level=${SW_LOGGING_LEVEL:INFO} If the SW_AGENT_NAME  environment variable exists in your operating system and its value is skywalking-agent-demo, then the value of agent.service_name here will be overwritten to skywalking-agent-demo, otherwise, it will be set to Your_ApplicationName.\nBy the way, Placeholder nesting is also supported, like ${SW_AGENT_NAME:${ANOTHER_AGENT_NAME:Your_ApplicationName}}. In this case, if the SW_AGENT_NAME  environment variable not exists, but the ANOTHER_AGENT_NAME environment variable exists and its value is skywalking-agent-demo, then the value of agent.service_name here will be overwritten to skywalking-agent-demo,otherwise, it will be set to Your_ApplicationName.\nOverride priority Agent Options \u0026gt; System.Properties(-D) \u0026gt; System environment variables \u0026gt; Config file\n","title":"Setting Override","url":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/setting-override/"},{"content":"Setting Override In default, SkyWalking provide agent.config for agent.\nSetting override means end user can override the settings in these config file, through using system properties or agent options.\nSystem properties Use skywalking. + key in config file as system properties key, to override the value.\n  Why need this prefix?\nThe agent system properties and env share with target application, this prefix can avoid variable conflict.\n  Example\nOverride agent.application_code by this.\n  -Dskywalking.agent.application_code=31200 Agent options Add the properties after the agent path in JVM arguments.\n-javaagent:/path/to/skywalking-agent.jar=[option1]=[value1],[option2]=[value2]   Example\nOverride agent.application_code and logging.level by this.\n  -javaagent:/path/to/skywalking-agent.jar=agent.application_code=31200,logging.level=debug   Special characters\nIf a separator(, or =) in the option or value, it should be wrapped in quotes.\n  -javaagent:/path/to/skywalking-agent.jar=agent.ignore_suffix='.jpg,.jpeg' System environment variables   Example\nOverride agent.application_code and logging.level by this.\n  # The service name in UI agent.service_name=${SW_AGENT_NAME:Your_ApplicationName} # Logging level logging.level=${SW_LOGGING_LEVEL:INFO} If the SW_AGENT_NAME  environment variable exists in your operating system and its value is skywalking-agent-demo, then the value of agent.service_name here will be overwritten to skywalking-agent-demo, otherwise, it will be set to Your_ApplicationName.\nBy the way, Placeholder nesting is also supported, like ${SW_AGENT_NAME:${ANOTHER_AGENT_NAME:Your_ApplicationName}}. In this case, if the SW_AGENT_NAME  environment variable not exists, but the ANOTHER_AGENT_NAME environment variable exists and its value is skywalking-agent-demo, then the value of agent.service_name here will be overwritten to skywalking-agent-demo,otherwise, it will be set to Your_ApplicationName.\nOverride priority Agent Options \u0026gt; System.Properties(-D) \u0026gt; System environment variables \u0026gt; Config file\n","title":"Setting Override","url":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/setting-override/"},{"content":"Setting Override In default, SkyWalking provide agent.config for agent.\nSetting override means end user can override the settings in these config file, through using system properties or agent options.\nSystem properties Use skywalking. + key in config file as system properties key, to override the value.\n  Why need this prefix?\nThe agent system properties and env share with target application, this prefix can avoid variable conflict.\n  Example\nOverride agent.application_code by this.\n  -Dskywalking.agent.application_code=31200 Agent options Add the properties after the agent path in JVM arguments.\n-javaagent:/path/to/skywalking-agent.jar=[option1]=[value1],[option2]=[value2]   Example\nOverride agent.application_code and logging.level by this.\n  -javaagent:/path/to/skywalking-agent.jar=agent.application_code=31200,logging.level=debug   Special characters\nIf a separator(, or =) in the option or value, it should be wrapped in quotes.\n  -javaagent:/path/to/skywalking-agent.jar=agent.ignore_suffix='.jpg,.jpeg' System environment variables   Example\nOverride agent.application_code and logging.level by this.\n  # The service name in UI agent.service_name=${SW_AGENT_NAME:Your_ApplicationName} # Logging level logging.level=${SW_LOGGING_LEVEL:INFO} If the SW_AGENT_NAME  environment variable exists in your operating system and its value is skywalking-agent-demo, then the value of agent.service_name here will be overwritten to skywalking-agent-demo, otherwise, it will be set to Your_ApplicationName.\nBy the way, Placeholder nesting is also supported, like ${SW_AGENT_NAME:${ANOTHER_AGENT_NAME:Your_ApplicationName}}. In this case, if the SW_AGENT_NAME  environment variable not exists, but the ANOTHER_AGENT_NAME environment variable exists and its value is skywalking-agent-demo, then the value of agent.service_name here will be overwritten to skywalking-agent-demo,otherwise, it will be set to Your_ApplicationName.\nOverride priority Agent Options \u0026gt; System.Properties(-D) \u0026gt; System environment variables \u0026gt; Config file\n","title":"Setting Override","url":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/setting-override/"},{"content":"Setting Override SkyWalking Rover supports setting overrides by system environment variables. You could override the settings in rover_configs.yaml\nSystem environment variables   Example\nOverride core.backend.addr in this setting segment through environment variables\n  core:backend:addr:${ROVER_BACKEND_ADDR:localhost:11800}If the ROVER_BACKEND_ADDR  environment variable exists in your operating system and its value is oap:11800, then the value of core.backend.addr here will be overwritten to oap:11800, otherwise, it will be set to localhost:11800.\n","title":"Setting Override","url":"/docs/skywalking-rover/latest/en/setup/configuration/override-settings/"},{"content":"Setting Override SkyWalking Rover supports setting overrides by system environment variables. You could override the settings in rover_configs.yaml\nSystem environment variables   Example\nOverride core.backend.addr in this setting segment through environment variables\n  core:backend:addr:${ROVER_BACKEND_ADDR:localhost:11800}If the ROVER_BACKEND_ADDR  environment variable exists in your operating system and its value is oap:11800, then the value of core.backend.addr here will be overwritten to oap:11800, otherwise, it will be set to localhost:11800.\n","title":"Setting Override","url":"/docs/skywalking-rover/next/en/setup/configuration/override-settings/"},{"content":"Setting Override SkyWalking Rover supports setting overrides by system environment variables. You could override the settings in rover_configs.yaml\nSystem environment variables   Example\nOverride core.backend.addr in this setting segment through environment variables\n  core:backend:addr:${ROVER_BACKEND_ADDR:localhost:11800}If the ROVER_BACKEND_ADDR  environment variable exists in your operating system and its value is oap:11800, then the value of core.backend.addr here will be overwritten to oap:11800, otherwise, it will be set to localhost:11800.\n","title":"Setting Override","url":"/docs/skywalking-rover/v0.6.0/en/setup/configuration/override-settings/"},{"content":"Setting Override SkyWalking Satellite supports setting overrides by system environment variables. You could override the settings in satellite_config.yaml\nSystem environment variables   Example\nOverride log_pattern in this setting segment through environment variables\n  logger:log_pattern:${SATELLITE_LOGGER_LOG_PATTERN:%time [%level][%field] - %msg}time_pattern:${SATELLITE_LOGGER_TIME_PATTERN:2006-01-02 15:04:05.000}level:${SATELLITE_LOGGER_LEVEL:info}If the SATELLITE_LOGGER_LOG_PATTERN  environment variable exists in your operating system and its value is %msg, then the value of log_pattern here will be overwritten to %msg, otherwise, it will be set to %time [%level][%field] - %msg.\n","title":"Setting Override","url":"/docs/skywalking-satellite/latest/en/setup/configuration/override-settings/"},{"content":"Setting Override SkyWalking Satellite supports setting overrides by system environment variables. You could override the settings in satellite_config.yaml\nSystem environment variables   Example\nOverride log_pattern in this setting segment through environment variables\n  logger:log_pattern:${SATELLITE_LOGGER_LOG_PATTERN:%time [%level][%field] - %msg}time_pattern:${SATELLITE_LOGGER_TIME_PATTERN:2006-01-02 15:04:05.000}level:${SATELLITE_LOGGER_LEVEL:info}If the SATELLITE_LOGGER_LOG_PATTERN  environment variable exists in your operating system and its value is %msg, then the value of log_pattern here will be overwritten to %msg, otherwise, it will be set to %time [%level][%field] - %msg.\n","title":"Setting Override","url":"/docs/skywalking-satellite/next/en/setup/configuration/override-settings/"},{"content":"Setting Override SkyWalking Satellite supports setting overrides by system environment variables. You could override the settings in satellite_config.yaml\nSystem environment variables   Example\nOverride log_pattern in this setting segment through environment variables\n  logger:log_pattern:${SATELLITE_LOGGER_LOG_PATTERN:%time [%level][%field] - %msg}time_pattern:${SATELLITE_LOGGER_TIME_PATTERN:2006-01-02 15:04:05.000}level:${SATELLITE_LOGGER_LEVEL:info}If the SATELLITE_LOGGER_LOG_PATTERN  environment variable exists in your operating system and its value is %msg, then the value of log_pattern here will be overwritten to %msg, otherwise, it will be set to %time [%level][%field] - %msg.\n","title":"Setting Override","url":"/docs/skywalking-satellite/v1.2.0/en/setup/configuration/override-settings/"},{"content":"Setup The most important thing in E2E Testing is that it uses a separate configuration file and command to execute. If you haven\u0026rsquo;t read the Module Design, recommend read this document first.\n Installation Configuration file Run E2E Tests  ","title":"Setup","url":"/docs/skywalking-infra-e2e/latest/en/setup/readme/"},{"content":"Setup The most important thing in E2E Testing is that it uses a separate configuration file and command to execute. If you haven\u0026rsquo;t read the Module Design, recommend read this document first.\n Installation Configuration file Run E2E Tests  ","title":"Setup","url":"/docs/skywalking-infra-e2e/next/en/setup/readme/"},{"content":"Setup The most important thing in E2E Testing is that it uses a separate configuration file and command to execute. If you haven\u0026rsquo;t read the Module Design, recommend read this document first.\n Installation Configuration file Run E2E Tests  ","title":"Setup","url":"/docs/skywalking-infra-e2e/v1.3.0/en/setup/readme/"},{"content":"Setup The first and most important thing is, that SkyWalking Rover startup behaviors are driven by configs/rover_configs.yaml. Understanding the setting file will help you to read this document.\nFollow Deploy on Kubernetes document to run rover in your cluster.\nRequirements and default settings Before you start, you should know that the main purpose of quickstart is to help you obtain a basic configuration for previews/demos. Usually, the process to be monitored is first declared.\nThen, you can use bin/startup.sh to start up the rover with their config.\nSkyWalking OAP Compatibility The SkyWalking Rover requires specialized protocols to communicate with SkyWalking OAP.\n   SkyWalking Rover Version SkyWalking OAP Notice     0.6.0+ \u0026gt; = 10.0.0 Only support Kubernetes.   0.1.0+ \u0026gt; = 9.1.0     Configuration  Common configurations about logs, backend address, cert files, etc. Service Discovery includes advanced setups about the ways of discovering services on your Kubernetes cluster. Access logs reports L2 to L4 network traffic relative information through access logs, to help OAP backend to do topology and metrics analysis. Profiling is an on-demand feature to enhance general observability besides access logs. It provides eBPF powered process ON_CPU, OFF_CPU profiling and network advanced profiling to link HTTP traffic with SkyWalking and Zipkin traces.  To adjust the configurations, refer to Overriding Setting document for more details.\nPrerequisites Currently, Linux operating systems are supported from version 4.9 and above, except for network profiling which requires version 4.16 or higher.\nThe following table lists currently supported/tested operating systems.\n   System Kernel Version On CPU Profiling Off CPU Profiling Network Profiling     CentOS 7 3.10.0 No No No   CentOS Stream 8 4.18.0 Yes Yes Yes   CentOS Stream 9 5.47.0 Yes Yes Yes   Debian 10 4.19.0 Yes Yes Yes   Debian 11 5.10.0 Yes Yes Yes(TCP Drop Monitor Excluded)   Fedora 35 5.14.10 Yes Yes Yes(TCP Drop Monitor Excluded)   RHEL 7 3.10.0 No No No   RHEL 8 4.18.0 Yes Yes Yes   RHEL 9 5.14.0 Yes Yes Yes   Rocky Linux 8 4.18.0 Yes Yes Yes   Rocky Linux 9 5.14.0 Yes Yes Yes   Ubuntu 1804 5.4.0 Yes Yes Yes   Ubuntu 20.04 5.15.0 Yes Yes Yes   Ubuntu 20.04 5.15.0 Yes Yes Yes   Ubuntu 22.04 5.15.0 Yes Yes Yes   Ubuntu 22.04 5.15.0 Yes Yes Yes   Ubuntu 22.10 5.19.0 Yes Yes Yes   Ubuntu Pro 16.04 4.15.0 Yes Yes No   Ubuntu Pro 18.04 5.4.0 Yes Yes Yes   Ubuntu Pro 20.04 5.15.0 Yes Yes Yes   Ubuntu Pro 22.04 5.15.0 Yes Yes Yes   Ubuntu Pro 22.04 5.15.0 Yes Yes Yes    ","title":"Setup","url":"/docs/skywalking-rover/latest/en/setup/overview/"},{"content":"Setup The first and most important thing is, that SkyWalking Rover startup behaviors are driven by configs/rover_configs.yaml. Understanding the setting file will help you to read this document.\nFollow Deploy on Kubernetes document to run rover in your cluster.\nRequirements and default settings Before you start, you should know that the main purpose of quickstart is to help you obtain a basic configuration for previews/demos. Usually, the process to be monitored is first declared.\nThen, you can use bin/startup.sh to start up the rover with their config.\nSkyWalking OAP Compatibility The SkyWalking Rover requires specialized protocols to communicate with SkyWalking OAP.\n   SkyWalking Rover Version SkyWalking OAP Notice     0.6.0+ \u0026gt; = 10.0.0 Only support Kubernetes.   0.1.0+ \u0026gt; = 9.1.0     Configuration  Common configurations about logs, backend address, cert files, etc. Service Discovery includes advanced setups about the ways of discovering services on your Kubernetes cluster. Access logs reports L2 to L4 network traffic relative information through access logs, to help OAP backend to do topology and metrics analysis. Profiling is an on-demand feature to enhance general observability besides access logs. It provides eBPF powered process ON_CPU, OFF_CPU profiling and network advanced profiling to link HTTP traffic with SkyWalking and Zipkin traces.  To adjust the configurations, refer to Overriding Setting document for more details.\nPrerequisites Currently, Linux operating systems are supported from version 4.9 and above, except for network profiling which requires version 4.16 or higher.\nThe following table lists currently supported/tested operating systems.\n   System Kernel Version On CPU Profiling Off CPU Profiling Network Profiling     CentOS 7 3.10.0 No No No   CentOS Stream 8 4.18.0 Yes Yes Yes   CentOS Stream 9 5.47.0 Yes Yes Yes   Debian 10 4.19.0 Yes Yes Yes   Debian 11 5.10.0 Yes Yes Yes(TCP Drop Monitor Excluded)   Fedora 35 5.14.10 Yes Yes Yes(TCP Drop Monitor Excluded)   RHEL 7 3.10.0 No No No   RHEL 8 4.18.0 Yes Yes Yes   RHEL 9 5.14.0 Yes Yes Yes   Rocky Linux 8 4.18.0 Yes Yes Yes   Rocky Linux 9 5.14.0 Yes Yes Yes   Ubuntu 1804 5.4.0 Yes Yes Yes   Ubuntu 20.04 5.15.0 Yes Yes Yes   Ubuntu 20.04 5.15.0 Yes Yes Yes   Ubuntu 22.04 5.15.0 Yes Yes Yes   Ubuntu 22.04 5.15.0 Yes Yes Yes   Ubuntu 22.10 5.19.0 Yes Yes Yes   Ubuntu Pro 16.04 4.15.0 Yes Yes No   Ubuntu Pro 18.04 5.4.0 Yes Yes Yes   Ubuntu Pro 20.04 5.15.0 Yes Yes Yes   Ubuntu Pro 22.04 5.15.0 Yes Yes Yes   Ubuntu Pro 22.04 5.15.0 Yes Yes Yes    ","title":"Setup","url":"/docs/skywalking-rover/next/en/setup/overview/"},{"content":"Setup The first and most important thing is, that SkyWalking Rover startup behaviors are driven by configs/rover_configs.yaml. Understanding the setting file will help you to read this document.\nFollow Deploy on Kubernetes document to run rover in your cluster.\nRequirements and default settings Before you start, you should know that the main purpose of quickstart is to help you obtain a basic configuration for previews/demos. Usually, the process to be monitored is first declared.\nThen, you can use bin/startup.sh to start up the rover with their config.\nSkyWalking OAP Compatibility The SkyWalking Rover requires specialized protocols to communicate with SkyWalking OAP.\n   SkyWalking Rover Version SkyWalking OAP Notice     0.6.0+ \u0026gt; = 10.0.0 Only support Kubernetes.   0.1.0+ \u0026gt; = 9.1.0     Configuration  Common configurations about logs, backend address, cert files, etc. Service Discovery includes advanced setups about the ways of discovering services on your Kubernetes cluster. Access logs reports L2 to L4 network traffic relative information through access logs, to help OAP backend to do topology and metrics analysis. Profiling is an on-demand feature to enhance general observability besides access logs. It provides eBPF powered process ON_CPU, OFF_CPU profiling and network advanced profiling to link HTTP traffic with SkyWalking and Zipkin traces.  To adjust the configurations, refer to Overriding Setting document for more details.\nPrerequisites Currently, Linux operating systems are supported from version 4.9 and above, except for network profiling which requires version 4.16 or higher.\nThe following table lists currently supported/tested operating systems.\n   System Kernel Version On CPU Profiling Off CPU Profiling Network Profiling     CentOS 7 3.10.0 No No No   CentOS Stream 8 4.18.0 Yes Yes Yes   CentOS Stream 9 5.47.0 Yes Yes Yes   Debian 10 4.19.0 Yes Yes Yes   Debian 11 5.10.0 Yes Yes Yes(TCP Drop Monitor Excluded)   Fedora 35 5.14.10 Yes Yes Yes(TCP Drop Monitor Excluded)   RHEL 7 3.10.0 No No No   RHEL 8 4.18.0 Yes Yes Yes   RHEL 9 5.14.0 Yes Yes Yes   Rocky Linux 8 4.18.0 Yes Yes Yes   Rocky Linux 9 5.14.0 Yes Yes Yes   Ubuntu 1804 5.4.0 Yes Yes Yes   Ubuntu 20.04 5.15.0 Yes Yes Yes   Ubuntu 20.04 5.15.0 Yes Yes Yes   Ubuntu 22.04 5.15.0 Yes Yes Yes   Ubuntu 22.04 5.15.0 Yes Yes Yes   Ubuntu 22.10 5.19.0 Yes Yes Yes   Ubuntu Pro 16.04 4.15.0 Yes Yes No   Ubuntu Pro 18.04 5.4.0 Yes Yes Yes   Ubuntu Pro 20.04 5.15.0 Yes Yes Yes   Ubuntu Pro 22.04 5.15.0 Yes Yes Yes   Ubuntu Pro 22.04 5.15.0 Yes Yes Yes    ","title":"Setup","url":"/docs/skywalking-rover/v0.6.0/en/setup/overview/"},{"content":"Setup First and most important thing is, SkyWalking Satellite startup behaviours are driven by configs/satellite_config.yaml. Understanding the setting file will help you to read this document.\nRequirements and default settings Before you start, you should know that the main purpose of quickstart is to help you obtain a basic configuration for previews/demo. Performance and long-term running are not our goals.\nYou can use bin/startup.sh (or cmd) to start up the satellite with their default settings, set out as follows:\n Receive SkyWalking related protocols through grpc(listens on 0.0.0.0/11800) and transmit them to SkyWalking backend(to 0.0.0.0/11800). Expose Self-Observability telemetry data to Prometheus(listens on 0.0.0.0/1234)  Startup script Startup Script\nbin/startup.sh Examples You can quickly build your satellite according to the following examples:\nDeploy  Deploy on Linux and Windows Deploy on Kubernetes  More Use Cases  Transmit Log to Kafka Enable/Disable Channel Telemetry Exporter  satellite_config.yaml The core concept behind this setting file is, SkyWalking Satellite is based on pure modularization design. End user can switch or assemble the collector features by their own requirements.\nSo, in satellite_config.yaml, there are three parts.\n The common configurations. The sharing plugin configurations. The pipe plugin configurations.  Advanced feature document link list  Overriding settings in satellite_config.yaml is supported  Performance  ALS Load Balance.  ","title":"Setup","url":"/docs/skywalking-satellite/latest/en/setup/readme/"},{"content":"Setup First and most important thing is, SkyWalking Satellite startup behaviours are driven by configs/satellite_config.yaml. Understanding the setting file will help you to read this document.\nRequirements and default settings Before you start, you should know that the main purpose of quickstart is to help you obtain a basic configuration for previews/demo. Performance and long-term running are not our goals.\nYou can use bin/startup.sh (or cmd) to start up the satellite with their default settings, set out as follows:\n Receive SkyWalking related protocols through grpc(listens on 0.0.0.0/11800) and transmit them to SkyWalking backend(to 0.0.0.0/11800). Expose Self-Observability telemetry data to Prometheus(listens on 0.0.0.0/1234)  Startup script Startup Script\nbin/startup.sh Examples You can quickly build your satellite according to the following examples:\nDeploy  Deploy on Linux and Windows Deploy on Kubernetes  More Use Cases  Transmit Log to Kafka Enable/Disable Channel Telemetry Exporter  satellite_config.yaml The core concept behind this setting file is, SkyWalking Satellite is based on pure modularization design. End user can switch or assemble the collector features by their own requirements.\nSo, in satellite_config.yaml, there are three parts.\n The common configurations. The sharing plugin configurations. The pipe plugin configurations.  Advanced feature document link list  Overriding settings in satellite_config.yaml is supported  Performance  ALS Load Balance.  ","title":"Setup","url":"/docs/skywalking-satellite/next/en/setup/readme/"},{"content":"Setup First and most important thing is, SkyWalking Satellite startup behaviours are driven by configs/satellite_config.yaml. Understanding the setting file will help you to read this document.\nRequirements and default settings Before you start, you should know that the main purpose of quickstart is to help you obtain a basic configuration for previews/demo. Performance and long-term running are not our goals.\nYou can use bin/startup.sh (or cmd) to start up the satellite with their default settings, set out as follows:\n Receive SkyWalking related protocols through grpc(listens on 0.0.0.0/11800) and transmit them to SkyWalking backend(to 0.0.0.0/11800). Expose Self-Observability telemetry data to Prometheus(listens on 0.0.0.0/1234)  Startup script Startup Script\nbin/startup.sh Examples You can quickly build your satellite according to the following examples:\nDeploy  Deploy on Linux and Windows Deploy on Kubernetes  More Use Cases  Transmit Log to Kafka Enable/Disable Channel Telemetry Exporter  satellite_config.yaml The core concept behind this setting file is, SkyWalking Satellite is based on pure modularization design. End user can switch or assemble the collector features by their own requirements.\nSo, in satellite_config.yaml, there are three parts.\n The common configurations. The sharing plugin configurations. The pipe plugin configurations.  Advanced feature document link list  Overriding settings in satellite_config.yaml is supported  Performance  ALS Load Balance.  ","title":"Setup","url":"/docs/skywalking-satellite/v1.2.0/en/setup/readme/"},{"content":"Setup External Communication Channels SkyWalking has default activated gRPC/HTTP servers in the core module, which serve for both internal communication and external data report or query.\nIn some advanced scenarios, such as security requirements, specific gRPC/HTTP servers should be exposed for external requests.\nreceiver-sharing-server:selector:${SW_RECEIVER_SHARING_SERVER:default}default:# For REST serverrestHost:${SW_RECEIVER_SHARING_REST_HOST:0.0.0.0}restPort:${SW_RECEIVER_SHARING_REST_PORT:0}restContextPath:${SW_RECEIVER_SHARING_REST_CONTEXT_PATH:/}restMaxThreads:${SW_RECEIVER_SHARING_REST_MAX_THREADS:200}restIdleTimeOut:${SW_RECEIVER_SHARING_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_RECEIVER_SHARING_REST_QUEUE_SIZE:0}httpMaxRequestHeaderSize:${SW_RECEIVER_SHARING_HTTP_MAX_REQUEST_HEADER_SIZE:8192}# For gRPC servergRPCHost:${SW_RECEIVER_GRPC_HOST:0.0.0.0}gRPCPort:${SW_RECEIVER_GRPC_PORT:0}maxConcurrentCallsPerConnection:${SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL:0}maxMessageSize:${SW_RECEIVER_GRPC_MAX_MESSAGE_SIZE:0}gRPCThreadPoolQueueSize:${SW_RECEIVER_GRPC_POOL_QUEUE_SIZE:0}gRPCThreadPoolSize:${SW_RECEIVER_GRPC_THREAD_POOL_SIZE:0}gRPCSslEnabled:${SW_RECEIVER_GRPC_SSL_ENABLED:false}gRPCSslKeyPath:${SW_RECEIVER_GRPC_SSL_KEY_PATH:\u0026#34;\u0026#34;}gRPCSslCertChainPath:${SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH:\u0026#34;\u0026#34;}gRPCSslTrustedCAsPath:${SW_RECEIVER_GRPC_SSL_TRUSTED_CAS_PATH:\u0026#34;\u0026#34;}authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}Set restPort(HTTP) and gRPCPort(gRPC) to a legal port(greater than 0), would initialize new gRPC/HTTP servers for external requests with other relative settings. In this case, core/gRPC and core/rest could be served for cluster internal communication only.\n","title":"Setup External Communication Channels","url":"/docs/main/latest/en/setup/backend/backend-expose/"},{"content":"Setup External Communication Channels SkyWalking has default activated gRPC/HTTP servers in the core module, which serve for both internal communication and external data report or query.\nIn some advanced scenarios, such as security requirements, specific gRPC/HTTP servers should be exposed for external requests.\nreceiver-sharing-server:selector:${SW_RECEIVER_SHARING_SERVER:default}default:# For REST serverrestHost:${SW_RECEIVER_SHARING_REST_HOST:0.0.0.0}restPort:${SW_RECEIVER_SHARING_REST_PORT:0}restContextPath:${SW_RECEIVER_SHARING_REST_CONTEXT_PATH:/}restMaxThreads:${SW_RECEIVER_SHARING_REST_MAX_THREADS:200}restIdleTimeOut:${SW_RECEIVER_SHARING_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_RECEIVER_SHARING_REST_QUEUE_SIZE:0}httpMaxRequestHeaderSize:${SW_RECEIVER_SHARING_HTTP_MAX_REQUEST_HEADER_SIZE:8192}# For gRPC servergRPCHost:${SW_RECEIVER_GRPC_HOST:0.0.0.0}gRPCPort:${SW_RECEIVER_GRPC_PORT:0}maxConcurrentCallsPerConnection:${SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL:0}maxMessageSize:${SW_RECEIVER_GRPC_MAX_MESSAGE_SIZE:0}gRPCThreadPoolSize:${SW_RECEIVER_GRPC_THREAD_POOL_SIZE:0}gRPCSslEnabled:${SW_RECEIVER_GRPC_SSL_ENABLED:false}gRPCSslKeyPath:${SW_RECEIVER_GRPC_SSL_KEY_PATH:\u0026#34;\u0026#34;}gRPCSslCertChainPath:${SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH:\u0026#34;\u0026#34;}gRPCSslTrustedCAsPath:${SW_RECEIVER_GRPC_SSL_TRUSTED_CAS_PATH:\u0026#34;\u0026#34;}authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}Set restPort(HTTP) and gRPCPort(gRPC) to a legal port(greater than 0), would initialize new gRPC/HTTP servers for external requests with other relative settings. In this case, core/gRPC and core/rest could be served for cluster internal communication only.\n","title":"Setup External Communication Channels","url":"/docs/main/next/en/setup/backend/backend-expose/"},{"content":"Setup External Communication Channels SkyWalking has default activated gRPC/HTTP servers in the core module, which serve for both internal communication and external data report or query.\nIn some advanced scenarios, such as security requirements, specific gRPC/HTTP servers should be exposed for external requests.\nreceiver-sharing-server:selector:${SW_RECEIVER_SHARING_SERVER:default}default:# For Jetty serverrestHost:${SW_RECEIVER_SHARING_REST_HOST:0.0.0.0}restPort:${SW_RECEIVER_SHARING_REST_PORT:0}restContextPath:${SW_RECEIVER_SHARING_REST_CONTEXT_PATH:/}restMinThreads:${SW_RECEIVER_SHARING_JETTY_MIN_THREADS:1}restMaxThreads:${SW_RECEIVER_SHARING_JETTY_MAX_THREADS:200}restIdleTimeOut:${SW_RECEIVER_SHARING_JETTY_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_RECEIVER_SHARING_JETTY_QUEUE_SIZE:0}httpMaxRequestHeaderSize:${SW_RECEIVER_SHARING_HTTP_MAX_REQUEST_HEADER_SIZE:8192}# For gRPC servergRPCHost:${SW_RECEIVER_GRPC_HOST:0.0.0.0}gRPCPort:${SW_RECEIVER_GRPC_PORT:0}maxConcurrentCallsPerConnection:${SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL:0}maxMessageSize:${SW_RECEIVER_GRPC_MAX_MESSAGE_SIZE:0}gRPCThreadPoolQueueSize:${SW_RECEIVER_GRPC_POOL_QUEUE_SIZE:0}gRPCThreadPoolSize:${SW_RECEIVER_GRPC_THREAD_POOL_SIZE:0}gRPCSslEnabled:${SW_RECEIVER_GRPC_SSL_ENABLED:false}gRPCSslKeyPath:${SW_RECEIVER_GRPC_SSL_KEY_PATH:\u0026#34;\u0026#34;}gRPCSslCertChainPath:${SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH:\u0026#34;\u0026#34;}gRPCSslTrustedCAsPath:${SW_RECEIVER_GRPC_SSL_TRUSTED_CAS_PATH:\u0026#34;\u0026#34;}authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}Set restPort(HTTP) and gRPCPort(gRPC) to a legal port(greater than 0), would initialize new gRPC/HTTP servers for external requests with other relative settings. In this case, core/gRPC and core/rest could be served for cluster internal communication only.\n","title":"Setup External Communication Channels","url":"/docs/main/v9.0.0/en/setup/backend/backend-expose/"},{"content":"Setup External Communication Channels SkyWalking has default activated gRPC/HTTP servers in the core module, which serve for both internal communication and external data report or query.\nIn some advanced scenarios, such as security requirements, specific gRPC/HTTP servers should be exposed for external requests.\nreceiver-sharing-server:selector:${SW_RECEIVER_SHARING_SERVER:default}default:# For REST serverrestHost:${SW_RECEIVER_SHARING_REST_HOST:0.0.0.0}restPort:${SW_RECEIVER_SHARING_REST_PORT:0}restContextPath:${SW_RECEIVER_SHARING_REST_CONTEXT_PATH:/}restMaxThreads:${SW_RECEIVER_SHARING_REST_MAX_THREADS:200}restIdleTimeOut:${SW_RECEIVER_SHARING_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_RECEIVER_SHARING_REST_QUEUE_SIZE:0}httpMaxRequestHeaderSize:${SW_RECEIVER_SHARING_HTTP_MAX_REQUEST_HEADER_SIZE:8192}# For gRPC servergRPCHost:${SW_RECEIVER_GRPC_HOST:0.0.0.0}gRPCPort:${SW_RECEIVER_GRPC_PORT:0}maxConcurrentCallsPerConnection:${SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL:0}maxMessageSize:${SW_RECEIVER_GRPC_MAX_MESSAGE_SIZE:0}gRPCThreadPoolQueueSize:${SW_RECEIVER_GRPC_POOL_QUEUE_SIZE:0}gRPCThreadPoolSize:${SW_RECEIVER_GRPC_THREAD_POOL_SIZE:0}gRPCSslEnabled:${SW_RECEIVER_GRPC_SSL_ENABLED:false}gRPCSslKeyPath:${SW_RECEIVER_GRPC_SSL_KEY_PATH:\u0026#34;\u0026#34;}gRPCSslCertChainPath:${SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH:\u0026#34;\u0026#34;}gRPCSslTrustedCAsPath:${SW_RECEIVER_GRPC_SSL_TRUSTED_CAS_PATH:\u0026#34;\u0026#34;}authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}Set restPort(HTTP) and gRPCPort(gRPC) to a legal port(greater than 0), would initialize new gRPC/HTTP servers for external requests with other relative settings. In this case, core/gRPC and core/rest could be served for cluster internal communication only.\n","title":"Setup External Communication Channels","url":"/docs/main/v9.1.0/en/setup/backend/backend-expose/"},{"content":"Setup External Communication Channels SkyWalking has default activated gRPC/HTTP servers in the core module, which serve for both internal communication and external data report or query.\nIn some advanced scenarios, such as security requirements, specific gRPC/HTTP servers should be exposed for external requests.\nreceiver-sharing-server:selector:${SW_RECEIVER_SHARING_SERVER:default}default:# For REST serverrestHost:${SW_RECEIVER_SHARING_REST_HOST:0.0.0.0}restPort:${SW_RECEIVER_SHARING_REST_PORT:0}restContextPath:${SW_RECEIVER_SHARING_REST_CONTEXT_PATH:/}restMaxThreads:${SW_RECEIVER_SHARING_REST_MAX_THREADS:200}restIdleTimeOut:${SW_RECEIVER_SHARING_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_RECEIVER_SHARING_REST_QUEUE_SIZE:0}httpMaxRequestHeaderSize:${SW_RECEIVER_SHARING_HTTP_MAX_REQUEST_HEADER_SIZE:8192}# For gRPC servergRPCHost:${SW_RECEIVER_GRPC_HOST:0.0.0.0}gRPCPort:${SW_RECEIVER_GRPC_PORT:0}maxConcurrentCallsPerConnection:${SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL:0}maxMessageSize:${SW_RECEIVER_GRPC_MAX_MESSAGE_SIZE:0}gRPCThreadPoolQueueSize:${SW_RECEIVER_GRPC_POOL_QUEUE_SIZE:0}gRPCThreadPoolSize:${SW_RECEIVER_GRPC_THREAD_POOL_SIZE:0}gRPCSslEnabled:${SW_RECEIVER_GRPC_SSL_ENABLED:false}gRPCSslKeyPath:${SW_RECEIVER_GRPC_SSL_KEY_PATH:\u0026#34;\u0026#34;}gRPCSslCertChainPath:${SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH:\u0026#34;\u0026#34;}gRPCSslTrustedCAsPath:${SW_RECEIVER_GRPC_SSL_TRUSTED_CAS_PATH:\u0026#34;\u0026#34;}authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}Set restPort(HTTP) and gRPCPort(gRPC) to a legal port(greater than 0), would initialize new gRPC/HTTP servers for external requests with other relative settings. In this case, core/gRPC and core/rest could be served for cluster internal communication only.\n","title":"Setup External Communication Channels","url":"/docs/main/v9.2.0/en/setup/backend/backend-expose/"},{"content":"Setup External Communication Channels SkyWalking has default activated gRPC/HTTP servers in the core module, which serve for both internal communication and external data report or query.\nIn some advanced scenarios, such as security requirements, specific gRPC/HTTP servers should be exposed for external requests.\nreceiver-sharing-server:selector:${SW_RECEIVER_SHARING_SERVER:default}default:# For REST serverrestHost:${SW_RECEIVER_SHARING_REST_HOST:0.0.0.0}restPort:${SW_RECEIVER_SHARING_REST_PORT:0}restContextPath:${SW_RECEIVER_SHARING_REST_CONTEXT_PATH:/}restMaxThreads:${SW_RECEIVER_SHARING_REST_MAX_THREADS:200}restIdleTimeOut:${SW_RECEIVER_SHARING_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_RECEIVER_SHARING_REST_QUEUE_SIZE:0}httpMaxRequestHeaderSize:${SW_RECEIVER_SHARING_HTTP_MAX_REQUEST_HEADER_SIZE:8192}# For gRPC servergRPCHost:${SW_RECEIVER_GRPC_HOST:0.0.0.0}gRPCPort:${SW_RECEIVER_GRPC_PORT:0}maxConcurrentCallsPerConnection:${SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL:0}maxMessageSize:${SW_RECEIVER_GRPC_MAX_MESSAGE_SIZE:0}gRPCThreadPoolQueueSize:${SW_RECEIVER_GRPC_POOL_QUEUE_SIZE:0}gRPCThreadPoolSize:${SW_RECEIVER_GRPC_THREAD_POOL_SIZE:0}gRPCSslEnabled:${SW_RECEIVER_GRPC_SSL_ENABLED:false}gRPCSslKeyPath:${SW_RECEIVER_GRPC_SSL_KEY_PATH:\u0026#34;\u0026#34;}gRPCSslCertChainPath:${SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH:\u0026#34;\u0026#34;}gRPCSslTrustedCAsPath:${SW_RECEIVER_GRPC_SSL_TRUSTED_CAS_PATH:\u0026#34;\u0026#34;}authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}Set restPort(HTTP) and gRPCPort(gRPC) to a legal port(greater than 0), would initialize new gRPC/HTTP servers for external requests with other relative settings. In this case, core/gRPC and core/rest could be served for cluster internal communication only.\n","title":"Setup External Communication Channels","url":"/docs/main/v9.3.0/en/setup/backend/backend-expose/"},{"content":"Setup External Communication Channels SkyWalking has default activated gRPC/HTTP servers in the core module, which serve for both internal communication and external data report or query.\nIn some advanced scenarios, such as security requirements, specific gRPC/HTTP servers should be exposed for external requests.\nreceiver-sharing-server:selector:${SW_RECEIVER_SHARING_SERVER:default}default:# For REST serverrestHost:${SW_RECEIVER_SHARING_REST_HOST:0.0.0.0}restPort:${SW_RECEIVER_SHARING_REST_PORT:0}restContextPath:${SW_RECEIVER_SHARING_REST_CONTEXT_PATH:/}restMaxThreads:${SW_RECEIVER_SHARING_REST_MAX_THREADS:200}restIdleTimeOut:${SW_RECEIVER_SHARING_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_RECEIVER_SHARING_REST_QUEUE_SIZE:0}httpMaxRequestHeaderSize:${SW_RECEIVER_SHARING_HTTP_MAX_REQUEST_HEADER_SIZE:8192}# For gRPC servergRPCHost:${SW_RECEIVER_GRPC_HOST:0.0.0.0}gRPCPort:${SW_RECEIVER_GRPC_PORT:0}maxConcurrentCallsPerConnection:${SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL:0}maxMessageSize:${SW_RECEIVER_GRPC_MAX_MESSAGE_SIZE:0}gRPCThreadPoolQueueSize:${SW_RECEIVER_GRPC_POOL_QUEUE_SIZE:0}gRPCThreadPoolSize:${SW_RECEIVER_GRPC_THREAD_POOL_SIZE:0}gRPCSslEnabled:${SW_RECEIVER_GRPC_SSL_ENABLED:false}gRPCSslKeyPath:${SW_RECEIVER_GRPC_SSL_KEY_PATH:\u0026#34;\u0026#34;}gRPCSslCertChainPath:${SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH:\u0026#34;\u0026#34;}gRPCSslTrustedCAsPath:${SW_RECEIVER_GRPC_SSL_TRUSTED_CAS_PATH:\u0026#34;\u0026#34;}authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}Set restPort(HTTP) and gRPCPort(gRPC) to a legal port(greater than 0), would initialize new gRPC/HTTP servers for external requests with other relative settings. In this case, core/gRPC and core/rest could be served for cluster internal communication only.\n","title":"Setup External Communication Channels","url":"/docs/main/v9.4.0/en/setup/backend/backend-expose/"},{"content":"Setup External Communication Channels SkyWalking has default activated gRPC/HTTP servers in the core module, which serve for both internal communication and external data report or query.\nIn some advanced scenarios, such as security requirements, specific gRPC/HTTP servers should be exposed for external requests.\nreceiver-sharing-server:selector:${SW_RECEIVER_SHARING_SERVER:default}default:# For REST serverrestHost:${SW_RECEIVER_SHARING_REST_HOST:0.0.0.0}restPort:${SW_RECEIVER_SHARING_REST_PORT:0}restContextPath:${SW_RECEIVER_SHARING_REST_CONTEXT_PATH:/}restMaxThreads:${SW_RECEIVER_SHARING_REST_MAX_THREADS:200}restIdleTimeOut:${SW_RECEIVER_SHARING_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_RECEIVER_SHARING_REST_QUEUE_SIZE:0}httpMaxRequestHeaderSize:${SW_RECEIVER_SHARING_HTTP_MAX_REQUEST_HEADER_SIZE:8192}# For gRPC servergRPCHost:${SW_RECEIVER_GRPC_HOST:0.0.0.0}gRPCPort:${SW_RECEIVER_GRPC_PORT:0}maxConcurrentCallsPerConnection:${SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL:0}maxMessageSize:${SW_RECEIVER_GRPC_MAX_MESSAGE_SIZE:0}gRPCThreadPoolQueueSize:${SW_RECEIVER_GRPC_POOL_QUEUE_SIZE:0}gRPCThreadPoolSize:${SW_RECEIVER_GRPC_THREAD_POOL_SIZE:0}gRPCSslEnabled:${SW_RECEIVER_GRPC_SSL_ENABLED:false}gRPCSslKeyPath:${SW_RECEIVER_GRPC_SSL_KEY_PATH:\u0026#34;\u0026#34;}gRPCSslCertChainPath:${SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH:\u0026#34;\u0026#34;}gRPCSslTrustedCAsPath:${SW_RECEIVER_GRPC_SSL_TRUSTED_CAS_PATH:\u0026#34;\u0026#34;}authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}Set restPort(HTTP) and gRPCPort(gRPC) to a legal port(greater than 0), would initialize new gRPC/HTTP servers for external requests with other relative settings. In this case, core/gRPC and core/rest could be served for cluster internal communication only.\n","title":"Setup External Communication Channels","url":"/docs/main/v9.5.0/en/setup/backend/backend-expose/"},{"content":"Setup External Communication Channels SkyWalking has default activated gRPC/HTTP servers in the core module, which serve for both internal communication and external data report or query.\nIn some advanced scenarios, such as security requirements, specific gRPC/HTTP servers should be exposed for external requests.\nreceiver-sharing-server:selector:${SW_RECEIVER_SHARING_SERVER:default}default:# For REST serverrestHost:${SW_RECEIVER_SHARING_REST_HOST:0.0.0.0}restPort:${SW_RECEIVER_SHARING_REST_PORT:0}restContextPath:${SW_RECEIVER_SHARING_REST_CONTEXT_PATH:/}restMaxThreads:${SW_RECEIVER_SHARING_REST_MAX_THREADS:200}restIdleTimeOut:${SW_RECEIVER_SHARING_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_RECEIVER_SHARING_REST_QUEUE_SIZE:0}httpMaxRequestHeaderSize:${SW_RECEIVER_SHARING_HTTP_MAX_REQUEST_HEADER_SIZE:8192}# For gRPC servergRPCHost:${SW_RECEIVER_GRPC_HOST:0.0.0.0}gRPCPort:${SW_RECEIVER_GRPC_PORT:0}maxConcurrentCallsPerConnection:${SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL:0}maxMessageSize:${SW_RECEIVER_GRPC_MAX_MESSAGE_SIZE:0}gRPCThreadPoolQueueSize:${SW_RECEIVER_GRPC_POOL_QUEUE_SIZE:0}gRPCThreadPoolSize:${SW_RECEIVER_GRPC_THREAD_POOL_SIZE:0}gRPCSslEnabled:${SW_RECEIVER_GRPC_SSL_ENABLED:false}gRPCSslKeyPath:${SW_RECEIVER_GRPC_SSL_KEY_PATH:\u0026#34;\u0026#34;}gRPCSslCertChainPath:${SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH:\u0026#34;\u0026#34;}gRPCSslTrustedCAsPath:${SW_RECEIVER_GRPC_SSL_TRUSTED_CAS_PATH:\u0026#34;\u0026#34;}authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}Set restPort(HTTP) and gRPCPort(gRPC) to a legal port(greater than 0), would initialize new gRPC/HTTP servers for external requests with other relative settings. In this case, core/gRPC and core/rest could be served for cluster internal communication only.\n","title":"Setup External Communication Channels","url":"/docs/main/v9.6.0/en/setup/backend/backend-expose/"},{"content":"Setup External Communication Channels SkyWalking has default activated gRPC/HTTP servers in the core module, which serve for both internal communication and external data report or query.\nIn some advanced scenarios, such as security requirements, specific gRPC/HTTP servers should be exposed for external requests.\nreceiver-sharing-server:selector:${SW_RECEIVER_SHARING_SERVER:default}default:# For REST serverrestHost:${SW_RECEIVER_SHARING_REST_HOST:0.0.0.0}restPort:${SW_RECEIVER_SHARING_REST_PORT:0}restContextPath:${SW_RECEIVER_SHARING_REST_CONTEXT_PATH:/}restMaxThreads:${SW_RECEIVER_SHARING_REST_MAX_THREADS:200}restIdleTimeOut:${SW_RECEIVER_SHARING_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_RECEIVER_SHARING_REST_QUEUE_SIZE:0}httpMaxRequestHeaderSize:${SW_RECEIVER_SHARING_HTTP_MAX_REQUEST_HEADER_SIZE:8192}# For gRPC servergRPCHost:${SW_RECEIVER_GRPC_HOST:0.0.0.0}gRPCPort:${SW_RECEIVER_GRPC_PORT:0}maxConcurrentCallsPerConnection:${SW_RECEIVER_GRPC_MAX_CONCURRENT_CALL:0}maxMessageSize:${SW_RECEIVER_GRPC_MAX_MESSAGE_SIZE:0}gRPCThreadPoolQueueSize:${SW_RECEIVER_GRPC_POOL_QUEUE_SIZE:0}gRPCThreadPoolSize:${SW_RECEIVER_GRPC_THREAD_POOL_SIZE:0}gRPCSslEnabled:${SW_RECEIVER_GRPC_SSL_ENABLED:false}gRPCSslKeyPath:${SW_RECEIVER_GRPC_SSL_KEY_PATH:\u0026#34;\u0026#34;}gRPCSslCertChainPath:${SW_RECEIVER_GRPC_SSL_CERT_CHAIN_PATH:\u0026#34;\u0026#34;}gRPCSslTrustedCAsPath:${SW_RECEIVER_GRPC_SSL_TRUSTED_CAS_PATH:\u0026#34;\u0026#34;}authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}Set restPort(HTTP) and gRPCPort(gRPC) to a legal port(greater than 0), would initialize new gRPC/HTTP servers for external requests with other relative settings. In this case, core/gRPC and core/rest could be served for cluster internal communication only.\n","title":"Setup External Communication Channels","url":"/docs/main/v9.7.0/en/setup/backend/backend-expose/"},{"content":"Setup in build When you want to integrate the Agent using the original go build command, you need to follow these steps.\n1. Download Agent Download the Agent from the official website.\n2. Install SkyWalking Go SkyWalking Go offers two ways for integration into your project.\n2.1 Agent Injector Agent injector is recommended when you only want to include SkyWalking Go agent in the compiling pipeline or shell.\nPlease execute the following command, which would automatically import SkyWalking Go into your project.\n/path/to/agent -inject /path/to/your/project [-all]  /path/to/agent is the path to the agent which your downloaded. /path/to/your/project is the home path to your project, support absolute and related with current directory path. -all is the parameter for injecting all submodules in your project.  2.2 Code Dependency Use go get to import the skywalking-go program.\ngo get github.com/apache/skywalking-go Also, import the module to your main package:\nimport _ \u0026#34;github.com/apache/skywalking-go\u0026#34; NOTICE: Please ensure that the version of the Agent you downloaded is consistent with the version installed via go get in the previous section, to prevent errors such as missing package references during compilation.\n3. Build with SkyWalking Go Agent Add the following parameters in go build:\n-toolexec=\u0026#34;/path/to/go-agent\u0026#34; -a  -toolexec is the path to the Golang enhancement program. -a is the parameter for rebuilding all packages forcibly.  If you want to customize the configuration information for the current service, please add the following parameters, read more please refer the settings override documentation):\n-toolexec=\u0026#34;/path/to/go-agent -config /path/to/config.yaml\u0026#34; -a Binary Output The binary would be weaved and instrumented by SkyWalking Go.\n","title":"Setup in build","url":"/docs/skywalking-go/latest/en/setup/gobuild/"},{"content":"Setup in build When you want to integrate the Agent using the original go build command, you need to follow these steps.\n1. Download Agent Download the Agent from the official website.\n2. Install SkyWalking Go SkyWalking Go offers two ways for integration into your project.\n2.1 Agent Injector Agent injector is recommended when you only want to include SkyWalking Go agent in the compiling pipeline or shell.\nPlease execute the following command, which would automatically import SkyWalking Go into your project.\n/path/to/agent -inject /path/to/your/project [-all]  /path/to/agent is the path to the agent which your downloaded. /path/to/your/project is the home path to your project, support absolute and related with current directory path. -all is the parameter for injecting all submodules in your project.  2.2 Code Dependency Use go get to import the skywalking-go program.\ngo get github.com/apache/skywalking-go Also, import the module to your main package:\nimport _ \u0026#34;github.com/apache/skywalking-go\u0026#34; NOTICE: Please ensure that the version of the Agent you downloaded is consistent with the version installed via go get in the previous section, to prevent errors such as missing package references during compilation.\n3. Build with SkyWalking Go Agent Add the following parameters in go build:\n-toolexec=\u0026#34;/path/to/go-agent\u0026#34; -a  -toolexec is the path to the Golang enhancement program. -a is the parameter for rebuilding all packages forcibly.  If you want to customize the configuration information for the current service, please add the following parameters, read more please refer the settings override documentation):\n-toolexec=\u0026#34;/path/to/go-agent -config /path/to/config.yaml\u0026#34; -a Binary Output The binary would be weaved and instrumented by SkyWalking Go.\n","title":"Setup in build","url":"/docs/skywalking-go/next/en/setup/gobuild/"},{"content":"Setup in build When you want to integrate the Agent using the original go build command, you need to follow these steps.\n1. Download Agent Download the Agent from the official website.\n2. Install SkyWalking Go SkyWalking Go offers two ways for integration into your project.\n2.1 Agent Injector Agent injector is recommended when you only want to include SkyWalking Go agent in the compiling pipeline or shell.\nPlease execute the following command, which would automatically import SkyWalking Go into your project.\n/path/to/agent -inject /path/to/your/project [-all]  /path/to/agent is the path to the agent which your downloaded. /path/to/your/project is the home path to your project, support absolute and related with current directory path. -all is the parameter for injecting all submodules in your project.  2.2 Code Dependency Use go get to import the skywalking-go program.\ngo get github.com/apache/skywalking-go Also, import the module to your main package:\nimport _ \u0026#34;github.com/apache/skywalking-go\u0026#34; NOTICE: Please ensure that the version of the Agent you downloaded is consistent with the version installed via go get in the previous section, to prevent errors such as missing package references during compilation.\n3. Build with SkyWalking Go Agent Add the following parameters in go build:\n-toolexec=\u0026#34;/path/to/go-agent\u0026#34; -a  -toolexec is the path to the Golang enhancement program. -a is the parameter for rebuilding all packages forcibly.  If you want to customize the configuration information for the current service, please add the following parameters, read more please refer the settings override documentation):\n-toolexec=\u0026#34;/path/to/go-agent -config /path/to/config.yaml\u0026#34; -a Binary Output The binary would be weaved and instrumented by SkyWalking Go.\n","title":"Setup in build","url":"/docs/skywalking-go/v0.4.0/en/setup/gobuild/"},{"content":"Setup in docker SkyWalking Go supports building user applications using Docker as the base container image.\nCustomized Dockerfile Using the SkyWalking Go provided image as the base image, perform file copying and other operations in the Dockerfile.\n# import the skywalking go base imageFROMapache/skywalking-go:\u0026lt;version\u0026gt;-go\u0026lt;go version\u0026gt;# Copy application codeCOPY /path/to/project /path/to/project# Inject the agent into the project or get dependencies by application selfRUN skywalking-go-agent -inject /path/to/project# Building the project including the agentRUN go build -toolexec=\u0026#34;skywalking-go-agent\u0026#34; -a /path/to/project# More operations...In the above code, we have performed the following actions:\n Used the SkyWalking Go provided image as the base image, which currently supports the following Go versions: 1.16, 1.17, 1.18, 1.19, 1.20. Copied the project into the Docker image. Installed SkyWalking Go and compiled the project, read this documentation for more detail. The SkyWalking Go agent is already installed in the /usr/local/bin directory with the name skywalking-go-agent.  ","title":"Setup in docker","url":"/docs/skywalking-go/latest/en/setup/docker/"},{"content":"Setup in docker SkyWalking Go supports building user applications using Docker as the base container image.\nCustomized Dockerfile Using the SkyWalking Go provided image as the base image, perform file copying and other operations in the Dockerfile.\n# import the skywalking go base imageFROMapache/skywalking-go:\u0026lt;version\u0026gt;-go\u0026lt;go version\u0026gt;# Copy application codeCOPY /path/to/project /path/to/project# Inject the agent into the project or get dependencies by application selfRUN skywalking-go-agent -inject /path/to/project# Building the project including the agentRUN go build -toolexec=\u0026#34;skywalking-go-agent\u0026#34; -a /path/to/project# More operations...In the above code, we have performed the following actions:\n Used the SkyWalking Go provided image as the base image, which currently supports the following Go versions: 1.16, 1.17, 1.18, 1.19, 1.20. Copied the project into the Docker image. Installed SkyWalking Go and compiled the project, read this documentation for more detail. The SkyWalking Go agent is already installed in the /usr/local/bin directory with the name skywalking-go-agent.  ","title":"Setup in docker","url":"/docs/skywalking-go/next/en/setup/docker/"},{"content":"Setup in docker SkyWalking Go supports building user applications using Docker as the base container image.\nCustomized Dockerfile Using the SkyWalking Go provided image as the base image, perform file copying and other operations in the Dockerfile.\n# import the skywalking go base imageFROMapache/skywalking-go:\u0026lt;version\u0026gt;-go\u0026lt;go version\u0026gt;# Copy application codeCOPY /path/to/project /path/to/project# Inject the agent into the project or get dependencies by application selfRUN skywalking-go-agent -inject /path/to/project# Building the project including the agentRUN go build -toolexec=\u0026#34;skywalking-go-agent\u0026#34; -a /path/to/project# More operations...In the above code, we have performed the following actions:\n Used the SkyWalking Go provided image as the base image, which currently supports the following Go versions: 1.16, 1.17, 1.18, 1.19, 1.20. Copied the project into the Docker image. Installed SkyWalking Go and compiled the project, read this documentation for more detail. The SkyWalking Go agent is already installed in the /usr/local/bin directory with the name skywalking-go-agent.  ","title":"Setup in docker","url":"/docs/skywalking-go/v0.4.0/en/setup/docker/"},{"content":"Setup java agent  Agent is available for JDK 8 - 21. Find agent folder in SkyWalking release package Set agent.service_name in config/agent.config. Could be any String in English. Set collector.backend_service in config/agent.config. Default point to 127.0.0.1:11800, only works for local backend. Add -javaagent:/path/to/skywalking-package/agent/skywalking-agent.jar to JVM argument. And make sure to add it before the -jar argument.  Require SkyWalking OAP server 9.7.0+ if the agent works on the JRE using ZGC.\nThe agent release dist is included in Apache official release. New agent package looks like this.\n+-- agent +-- activations apm-toolkit-log4j-1.x-activation.jar apm-toolkit-log4j-2.x-activation.jar apm-toolkit-logback-1.x-activation.jar ... +-- config agent.config +-- plugins apm-dubbo-plugin.jar apm-feign-default-http-9.x.jar apm-httpClient-4.x-plugin.jar ..... +-- optional-plugins apm-gson-2.x-plugin.jar ..... +-- bootstrap-plugins jdk-http-plugin.jar ..... +-- expired-plugins # Expired plugins are moved to this folder. No guarantee of working and maintenance. apm-impala-2.6.x-plugin.jar ..... +-- logs skywalking-agent.jar  Start your application.  Install javaagent FAQs  Linux Tomcat 7, Tomcat 8, Tomcat 9\nChange the first line of tomcat/bin/catalina.sh.  CATALINA_OPTS=\u0026#34;$CATALINA_OPTS-javaagent:/path/to/skywalking-agent/skywalking-agent.jar\u0026#34;; export CATALINA_OPTS  Windows Tomcat 7, Tomcat 8, Tomcat 9\nChange the first line of tomcat/bin/catalina.bat.  set \u0026#34;CATALINA_OPTS=-javaagent:/path/to/skywalking-agent/skywalking-agent.jar\u0026#34;  JAR file\nAdd -javaagent argument to command line in which you start your app. eg:  java -javaagent:/path/to/skywalking-agent/skywalking-agent.jar -jar yourApp.jar  Jetty\nModify jetty.sh, add -javaagent argument to command line in which you start your app. eg:  export JAVA_OPTIONS=\u0026#34;${JAVA_OPTIONS}-javaagent:/path/to/skywalking-agent/skywalking-agent.jar\u0026#34; Plugins SkyWalking agent has supported various middlewares, frameworks and libraries. Read supported list to get them and supported version. If the plugin is in Optional² catalog, go to optional plugins and bootstrap class plugin section to learn how to active it.\n All plugins in /plugins folder are active. Remove the plugin jar, it disabled. The default logging output folder is /logs.  ","title":"Setup java agent","url":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/readme/"},{"content":"Setup java agent  Agent is available for JDK 8 - 21. Find agent folder in SkyWalking release package Set agent.service_name in config/agent.config. Could be any String in English. Set collector.backend_service in config/agent.config. Default point to 127.0.0.1:11800, only works for local backend. Add -javaagent:/path/to/skywalking-package/agent/skywalking-agent.jar to JVM argument. And make sure to add it before the -jar argument.  Require SkyWalking OAP server 9.7.0+ if the agent works on the JRE using ZGC.\nThe agent release dist is included in Apache official release. New agent package looks like this.\n+-- agent +-- activations apm-toolkit-log4j-1.x-activation.jar apm-toolkit-log4j-2.x-activation.jar apm-toolkit-logback-1.x-activation.jar ... +-- config agent.config +-- plugins apm-dubbo-plugin.jar apm-feign-default-http-9.x.jar apm-httpClient-4.x-plugin.jar ..... +-- optional-plugins apm-gson-2.x-plugin.jar ..... +-- bootstrap-plugins jdk-http-plugin.jar ..... +-- expired-plugins # Expired plugins are moved to this folder. No guarantee of working and maintenance. apm-impala-2.6.x-plugin.jar ..... +-- logs skywalking-agent.jar  Start your application.  Install javaagent FAQs  Linux Tomcat 7, Tomcat 8, Tomcat 9\nChange the first line of tomcat/bin/catalina.sh.  CATALINA_OPTS=\u0026#34;$CATALINA_OPTS-javaagent:/path/to/skywalking-agent/skywalking-agent.jar\u0026#34;; export CATALINA_OPTS  Windows Tomcat 7, Tomcat 8, Tomcat 9\nChange the first line of tomcat/bin/catalina.bat.  set \u0026#34;CATALINA_OPTS=-javaagent:/path/to/skywalking-agent/skywalking-agent.jar\u0026#34;  JAR file\nAdd -javaagent argument to command line in which you start your app. eg:  java -javaagent:/path/to/skywalking-agent/skywalking-agent.jar -jar yourApp.jar  Jetty\nModify jetty.sh, add -javaagent argument to command line in which you start your app. eg:  export JAVA_OPTIONS=\u0026#34;${JAVA_OPTIONS}-javaagent:/path/to/skywalking-agent/skywalking-agent.jar\u0026#34; Plugins SkyWalking agent has supported various middlewares, frameworks and libraries. Read supported list to get them and supported version. If the plugin is in Optional² catalog, go to optional plugins and bootstrap class plugin section to learn how to active it.\n All plugins in /plugins folder are active. Remove the plugin jar, it disabled. The default logging output folder is /logs.  ","title":"Setup java agent","url":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/readme/"},{"content":"Setup java agent  Agent is available for JDK 8 - 17. Find agent folder in SkyWalking release package Set agent.service_name in config/agent.config. Could be any String in English. Set collector.backend_service in config/agent.config. Default point to 127.0.0.1:11800, only works for local backend. Add -javaagent:/path/to/skywalking-package/agent/skywalking-agent.jar to JVM argument. And make sure to add it before the -jar argument.  The agent release dist is included in Apache official release. New agent package looks like this.\n+-- agent +-- activations apm-toolkit-log4j-1.x-activation.jar apm-toolkit-log4j-2.x-activation.jar apm-toolkit-logback-1.x-activation.jar ... +-- config agent.config +-- plugins apm-dubbo-plugin.jar apm-feign-default-http-9.x.jar apm-httpClient-4.x-plugin.jar ..... +-- optional-plugins apm-gson-2.x-plugin.jar ..... +-- bootstrap-plugins jdk-http-plugin.jar ..... +-- logs skywalking-agent.jar  Start your application.  Install javaagent FAQs  Linux Tomcat 7, Tomcat 8, Tomcat 9\nChange the first line of tomcat/bin/catalina.sh.  CATALINA_OPTS=\u0026#34;$CATALINA_OPTS-javaagent:/path/to/skywalking-agent/skywalking-agent.jar\u0026#34;; export CATALINA_OPTS  Windows Tomcat 7, Tomcat 8, Tomcat 9\nChange the first line of tomcat/bin/catalina.bat.  set \u0026#34;CATALINA_OPTS=-javaagent:/path/to/skywalking-agent/skywalking-agent.jar\u0026#34;  JAR file\nAdd -javaagent argument to command line in which you start your app. eg:  java -javaagent:/path/to/skywalking-agent/skywalking-agent.jar -jar yourApp.jar  Jetty\nModify jetty.sh, add -javaagent argument to command line in which you start your app. eg:  export JAVA_OPTIONS=\u0026#34;${JAVA_OPTIONS}-javaagent:/path/to/skywalking-agent/skywalking-agent.jar\u0026#34; Plugins SkyWalking agent has supported various middlewares, frameworks and libraries. Read supported list to get them and supported version. If the plugin is in Optional² catalog, go to optional plugins and bootstrap class plugin section to learn how to active it.\n All plugins in /plugins folder are active. Remove the plugin jar, it disabled. The default logging output folder is /logs.  ","title":"Setup java agent","url":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/readme/"},{"content":"Setup java agent  Agent is available for JDK 8 - 21. Find agent folder in SkyWalking release package Set agent.service_name in config/agent.config. Could be any String in English. Set collector.backend_service in config/agent.config. Default point to 127.0.0.1:11800, only works for local backend. Add -javaagent:/path/to/skywalking-package/agent/skywalking-agent.jar to JVM argument. And make sure to add it before the -jar argument.  Require SkyWalking OAP server 9.7.0+ if the agent works on the JRE using ZGC.\nThe agent release dist is included in Apache official release. New agent package looks like this.\n+-- agent +-- activations apm-toolkit-log4j-1.x-activation.jar apm-toolkit-log4j-2.x-activation.jar apm-toolkit-logback-1.x-activation.jar ... +-- config agent.config +-- plugins apm-dubbo-plugin.jar apm-feign-default-http-9.x.jar apm-httpClient-4.x-plugin.jar ..... +-- optional-plugins apm-gson-2.x-plugin.jar ..... +-- bootstrap-plugins jdk-http-plugin.jar ..... +-- logs skywalking-agent.jar  Start your application.  Install javaagent FAQs  Linux Tomcat 7, Tomcat 8, Tomcat 9\nChange the first line of tomcat/bin/catalina.sh.  CATALINA_OPTS=\u0026#34;$CATALINA_OPTS-javaagent:/path/to/skywalking-agent/skywalking-agent.jar\u0026#34;; export CATALINA_OPTS  Windows Tomcat 7, Tomcat 8, Tomcat 9\nChange the first line of tomcat/bin/catalina.bat.  set \u0026#34;CATALINA_OPTS=-javaagent:/path/to/skywalking-agent/skywalking-agent.jar\u0026#34;  JAR file\nAdd -javaagent argument to command line in which you start your app. eg:  java -javaagent:/path/to/skywalking-agent/skywalking-agent.jar -jar yourApp.jar  Jetty\nModify jetty.sh, add -javaagent argument to command line in which you start your app. eg:  export JAVA_OPTIONS=\u0026#34;${JAVA_OPTIONS}-javaagent:/path/to/skywalking-agent/skywalking-agent.jar\u0026#34; Plugins SkyWalking agent has supported various middlewares, frameworks and libraries. Read supported list to get them and supported version. If the plugin is in Optional² catalog, go to optional plugins and bootstrap class plugin section to learn how to active it.\n All plugins in /plugins folder are active. Remove the plugin jar, it disabled. The default logging output folder is /logs.  ","title":"Setup java agent","url":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/readme/"},{"content":"Setup java agent  Agent is available for JDK 8 - 21. Find agent folder in SkyWalking release package Set agent.service_name in config/agent.config. Could be any String in English. Set collector.backend_service in config/agent.config. Default point to 127.0.0.1:11800, only works for local backend. Add -javaagent:/path/to/skywalking-package/agent/skywalking-agent.jar to JVM argument. And make sure to add it before the -jar argument.  Require SkyWalking OAP server 9.7.0+ if the agent works on the JRE using ZGC.\nThe agent release dist is included in Apache official release. New agent package looks like this.\n+-- agent +-- activations apm-toolkit-log4j-1.x-activation.jar apm-toolkit-log4j-2.x-activation.jar apm-toolkit-logback-1.x-activation.jar ... +-- config agent.config +-- plugins apm-dubbo-plugin.jar apm-feign-default-http-9.x.jar apm-httpClient-4.x-plugin.jar ..... +-- optional-plugins apm-gson-2.x-plugin.jar ..... +-- bootstrap-plugins jdk-http-plugin.jar ..... +-- expired-plugins # Expired plugins are moved to this folder. No guarantee of working and maintenance. apm-impala-2.6.x-plugin.jar ..... +-- logs skywalking-agent.jar  Start your application.  Install javaagent FAQs  Linux Tomcat 7, Tomcat 8, Tomcat 9\nChange the first line of tomcat/bin/catalina.sh.  CATALINA_OPTS=\u0026#34;$CATALINA_OPTS-javaagent:/path/to/skywalking-agent/skywalking-agent.jar\u0026#34;; export CATALINA_OPTS  Windows Tomcat 7, Tomcat 8, Tomcat 9\nChange the first line of tomcat/bin/catalina.bat.  set \u0026#34;CATALINA_OPTS=-javaagent:/path/to/skywalking-agent/skywalking-agent.jar\u0026#34;  JAR file\nAdd -javaagent argument to command line in which you start your app. eg:  java -javaagent:/path/to/skywalking-agent/skywalking-agent.jar -jar yourApp.jar  Jetty\nModify jetty.sh, add -javaagent argument to command line in which you start your app. eg:  export JAVA_OPTIONS=\u0026#34;${JAVA_OPTIONS}-javaagent:/path/to/skywalking-agent/skywalking-agent.jar\u0026#34; Plugins SkyWalking agent has supported various middlewares, frameworks and libraries. Read supported list to get them and supported version. If the plugin is in Optional² catalog, go to optional plugins and bootstrap class plugin section to learn how to active it.\n All plugins in /plugins folder are active. Remove the plugin jar, it disabled. The default logging output folder is /logs.  ","title":"Setup java agent","url":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/readme/"},{"content":"Setup PHP Agent  Agent is available for PHP 7.2 - 8.x. Build from source. Configure php.ini.  Requirements  GCC Rustc 1.65+ Cargo Libclang 9.0+ Make Protoc  Install dependencies For Debian-base OS sudo apt install gcc make llvm-13-dev libclang-13-dev protobuf-c-compiler protobuf-compiler For Alpine Linux apk add gcc make musl-dev llvm15-dev clang15-dev protobuf-c-compiler Install Rust globally The officially recommended way to install Rust is via rustup.\nBut because the source code toolchain is override by rust-toolchain.toml, so if you don\u0026rsquo;t need multi version Rust, we recommend to install Rust by these way:\n  Install through OS package manager (The Rust version in the source must be \u0026gt;= 1.65).\n  Through standalone installers.\nFor linux x86_64 user:\nwget https://static.rust-lang.org/dist/rust-1.65.0-x86_64-unknown-linux-gnu.tar.gz tar zxvf rust-1.65.0-x86_64-unknown-linux-gnu.tar.gz cd rust-1.65.0-x86_64-unknown-linux-gnu ./install.sh   Through rustup but set default-toolchain to none.\ncurl --proto \u0026#39;=https\u0026#39; --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --default-toolchain none   Install  Notice: If you compile skywalking_agent in Alpine Linux, you have to disable crt-static, otherwise the problem will be throw: \u0026ldquo;the libclang shared library at /usr/lib/libclang.so.15.0.7 could not be opened: Dynamic loading not supported\u0026rdquo;.\nYou can disable crt-static by environment variable:\nexport RUSTFLAGS=\u0026#34;-C target-feature=-crt-static\u0026#34;  Install from pecl.net pecl install skywalking_agent Install from the source codes git clone --recursive https://github.com/apache/skywalking-php.git cd skywalking-php phpize ./configure make make install Configure Configure skywalking agent in your php.ini.\n[skywalking_agent] extension = skywalking_agent.so ; Enable skywalking_agent extension or not. skywalking_agent.enable = Off ; Log file path. skywalking_agent.log_file = /tmp/skywalking-agent.log ; Log level: one of `OFF`, `TRACE`, `DEBUG`, `INFO`, `WARN`, `ERROR`. skywalking_agent.log_level = INFO ; Address of skywalking oap server. skywalking_agent.server_addr = 127.0.0.1:11800 ; Application service name. skywalking_agent.service_name = hello-skywalking Refer to the Configuration section for more configuration items.\n Notice: It is not recommended to enable skywalking_agent.enable by default globally, because skywalking agent will modify the hook function and fork a new process to be a worker. Enabling it by default will cause extra meaningless consumption when skywalking agent is not needed (such as simply executing a php script).\n Run Start php-fpm server:\nphp-fpm -F -d \u0026#34;skywalking_agent.enable=On\u0026#34;  Notice: It is necessary to keep the php-fpm process running in the foreground (by specifying the \u0026gt; -F parameter, etc.), running php-fpm as a daemon will cause the skywalking-agent reporter process immediately exit.\n ","title":"Setup PHP Agent","url":"/docs/skywalking-php/latest/en/setup/service-agent/php-agent/readme/"},{"content":"Setup PHP Agent  Agent is available for PHP 7.2 - 8.x. Build from source. Configure php.ini.  Requirements  GCC Rustc 1.65+ Cargo Libclang 9.0+ Make Protoc  Install dependencies For Debian-base OS sudo apt install gcc make llvm-13-dev libclang-13-dev protobuf-c-compiler protobuf-compiler For Alpine Linux apk add gcc make musl-dev llvm15-dev clang15-dev protobuf-c-compiler Install Rust globally The officially recommended way to install Rust is via rustup.\nBut because the source code toolchain is override by rust-toolchain.toml, so if you don\u0026rsquo;t need multi version Rust, we recommend to install Rust by these way:\n  Install through OS package manager (The Rust version in the source must be \u0026gt;= 1.65).\n  Through standalone installers.\nFor linux x86_64 user:\nwget https://static.rust-lang.org/dist/rust-1.65.0-x86_64-unknown-linux-gnu.tar.gz tar zxvf rust-1.65.0-x86_64-unknown-linux-gnu.tar.gz cd rust-1.65.0-x86_64-unknown-linux-gnu ./install.sh   Through rustup but set default-toolchain to none.\ncurl --proto \u0026#39;=https\u0026#39; --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --default-toolchain none   Install  Notice: If you compile skywalking_agent in Alpine Linux, you have to disable crt-static, otherwise the problem will be throw: \u0026ldquo;the libclang shared library at /usr/lib/libclang.so.15.0.7 could not be opened: Dynamic loading not supported\u0026rdquo;.\nYou can disable crt-static by environment variable:\nexport RUSTFLAGS=\u0026#34;-C target-feature=-crt-static\u0026#34;  Install from pecl.net pecl install skywalking_agent Install from the source codes git clone --recursive https://github.com/apache/skywalking-php.git cd skywalking-php phpize ./configure make make install Configure Configure skywalking agent in your php.ini.\n[skywalking_agent] extension = skywalking_agent.so ; Enable skywalking_agent extension or not. skywalking_agent.enable = Off ; Log file path. skywalking_agent.log_file = /tmp/skywalking-agent.log ; Log level: one of `OFF`, `TRACE`, `DEBUG`, `INFO`, `WARN`, `ERROR`. skywalking_agent.log_level = INFO ; Address of skywalking oap server. skywalking_agent.server_addr = 127.0.0.1:11800 ; Application service name. skywalking_agent.service_name = hello-skywalking Refer to the Configuration section for more configuration items.\n Notice: It is not recommended to enable skywalking_agent.enable by default globally, because skywalking agent will modify the hook function and fork a new process to be a worker. Enabling it by default will cause extra meaningless consumption when skywalking agent is not needed (such as simply executing a php script).\n Run Start php-fpm server:\nphp-fpm -F -d \u0026#34;skywalking_agent.enable=On\u0026#34;  Notice: It is necessary to keep the php-fpm process running in the foreground (by specifying the \u0026gt; -F parameter, etc.), running php-fpm as a daemon will cause the skywalking-agent reporter process immediately exit.\n ","title":"Setup PHP Agent","url":"/docs/skywalking-php/next/en/setup/service-agent/php-agent/readme/"},{"content":"Setup PHP Agent  Agent is available for PHP 7.2 - 8.x. Build from source. Configure php.ini.  Requirements  GCC Rustc 1.65+ Cargo Libclang 9.0+ Make Protoc  Install dependencies For Debian-base OS sudo apt install gcc make llvm-13-dev libclang-13-dev protobuf-c-compiler protobuf-compiler For Alpine Linux apk add gcc make musl-dev llvm15-dev clang15-dev protobuf-c-compiler Install Rust globally The officially recommended way to install Rust is via rustup.\nBut because the source code toolchain is override by rust-toolchain.toml, so if you don\u0026rsquo;t need multi version Rust, we recommend to install Rust by these way:\n  Install through OS package manager (The Rust version in the source must be \u0026gt;= 1.65).\n  Through standalone installers.\nFor linux x86_64 user:\nwget https://static.rust-lang.org/dist/rust-1.65.0-x86_64-unknown-linux-gnu.tar.gz tar zxvf rust-1.65.0-x86_64-unknown-linux-gnu.tar.gz cd rust-1.65.0-x86_64-unknown-linux-gnu ./install.sh   Through rustup but set default-toolchain to none.\ncurl --proto \u0026#39;=https\u0026#39; --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --default-toolchain none   Install  Notice: If you compile skywalking_agent in Alpine Linux, you have to disable crt-static, otherwise the problem will be throw: \u0026ldquo;the libclang shared library at /usr/lib/libclang.so.15.0.7 could not be opened: Dynamic loading not supported\u0026rdquo;.\nYou can disable crt-static by environment variable:\nexport RUSTFLAGS=\u0026#34;-C target-feature=-crt-static\u0026#34;  Install from pecl.net pecl install skywalking_agent Install from the source codes git clone --recursive https://github.com/apache/skywalking-php.git cd skywalking-php phpize ./configure make make install Configure Configure skywalking agent in your php.ini.\n[skywalking_agent] extension = skywalking_agent.so ; Enable skywalking_agent extension or not. skywalking_agent.enable = Off ; Log file path. skywalking_agent.log_file = /tmp/skywalking-agent.log ; Log level: one of `OFF`, `TRACE`, `DEBUG`, `INFO`, `WARN`, `ERROR`. skywalking_agent.log_level = INFO ; Address of skywalking oap server. skywalking_agent.server_addr = 127.0.0.1:11800 ; Application service name. skywalking_agent.service_name = hello-skywalking Refer to the Configuration section for more configuration items.\n Notice: It is not recommended to enable skywalking_agent.enable by default globally, because skywalking agent will modify the hook function and fork a new process to be a worker. Enabling it by default will cause extra meaningless consumption when skywalking agent is not needed (such as simply executing a php script).\n Run Start php-fpm server:\nphp-fpm -F -d \u0026#34;skywalking_agent.enable=On\u0026#34;  Notice: It is necessary to keep the php-fpm process running in the foreground (by specifying the \u0026gt; -F parameter, etc.), running php-fpm as a daemon will cause the skywalking-agent reporter process immediately exit.\n ","title":"Setup PHP Agent","url":"/docs/skywalking-php/v0.7.0/en/setup/service-agent/php-agent/readme/"},{"content":"Sharing Plugins Sharing plugin configurations has three 3 parts, which are common_config, clients and servers.\nCommon Configuration    Config Default Description     pipe_name sharing The group name of sharing plugins    Clients Clients have a series of client plugins, which would be sharing with the plugins of the other pipes. Please read the doc to find all client plugin configurations.\nServers Servers have a series of server plugins, which would be sharing with the plugins of the other pipes. Please read the doc to find all server plugin configurations.\nExample # The sharing plugins referenced by the specific plugins in the different pipes.sharing:common_config:pipe_name:sharingclients:- plugin_name:\u0026#34;kafka-client\u0026#34;brokers:${SATELLITE_KAFKA_CLIENT_BROKERS:127.0.0.1:9092}version:${SATELLITE_KAFKA_VERSION:\u0026#34;2.1.1\u0026#34;}servers:- plugin_name:\u0026#34;grpc-server\u0026#34;- plugin_name:\u0026#34;prometheus-server\u0026#34;address:${SATELLITE_PROMETHEUS_ADDRESS:\u0026#34;:8090\u0026#34;}","title":"Sharing Plugins","url":"/docs/skywalking-satellite/latest/en/setup/configuration/sharing-plugins/"},{"content":"Sharing Plugins Sharing plugin configurations has three 3 parts, which are common_config, clients and servers.\nCommon Configuration    Config Default Description     pipe_name sharing The group name of sharing plugins    Clients Clients have a series of client plugins, which would be sharing with the plugins of the other pipes. Please read the doc to find all client plugin configurations.\nServers Servers have a series of server plugins, which would be sharing with the plugins of the other pipes. Please read the doc to find all server plugin configurations.\nExample # The sharing plugins referenced by the specific plugins in the different pipes.sharing:common_config:pipe_name:sharingclients:- plugin_name:\u0026#34;kafka-client\u0026#34;brokers:${SATELLITE_KAFKA_CLIENT_BROKERS:127.0.0.1:9092}version:${SATELLITE_KAFKA_VERSION:\u0026#34;2.1.1\u0026#34;}servers:- plugin_name:\u0026#34;grpc-server\u0026#34;- plugin_name:\u0026#34;prometheus-server\u0026#34;address:${SATELLITE_PROMETHEUS_ADDRESS:\u0026#34;:8090\u0026#34;}","title":"Sharing Plugins","url":"/docs/skywalking-satellite/next/en/setup/configuration/sharing-plugins/"},{"content":"Sharing Plugins Sharing plugin configurations has three 3 parts, which are common_config, clients and servers.\nCommon Configuration    Config Default Description     pipe_name sharing The group name of sharing plugins    Clients Clients have a series of client plugins, which would be sharing with the plugins of the other pipes. Please read the doc to find all client plugin configurations.\nServers Servers have a series of server plugins, which would be sharing with the plugins of the other pipes. Please read the doc to find all server plugin configurations.\nExample # The sharing plugins referenced by the specific plugins in the different pipes.sharing:common_config:pipe_name:sharingclients:- plugin_name:\u0026#34;kafka-client\u0026#34;brokers:${SATELLITE_KAFKA_CLIENT_BROKERS:127.0.0.1:9092}version:${SATELLITE_KAFKA_VERSION:\u0026#34;2.1.1\u0026#34;}servers:- plugin_name:\u0026#34;grpc-server\u0026#34;- plugin_name:\u0026#34;prometheus-server\u0026#34;address:${SATELLITE_PROMETHEUS_ADDRESS:\u0026#34;:8090\u0026#34;}","title":"Sharing Plugins","url":"/docs/skywalking-satellite/v1.2.0/en/setup/configuration/sharing-plugins/"},{"content":"SkyWalking 9.x showcase This showcase would follow the latest changes of SkyWalking 9.x, even before the official release.\nThis showcase repository includes an example music application and other manifests to demonstrate the main features of SkyWalking. The music application is composed of several microservices that are written in different programming languages. Here is the architecture:\n%% please read this doc in our official website, otherwise the graph is not correctly rendered. graph LR; loadgen[load generator] --\u0026gt; ui(\u0026quot;UI (React)\u0026quot;) --\u0026gt; Traffic1(\u0026quot;HTTP Request for backend serv\u0026quot;) --\u0026gt; apisix(\u0026quot;APISIX as UI container\u0026quot;) --\u0026gt; app(\u0026quot;app server (NodeJS)\u0026quot;) --\u0026gt; gateway(\u0026quot;gateway (Spring)\u0026quot;); ui(\u0026quot;UI (React)\u0026quot;) --\u0026gt; Traffic2(\u0026quot;HTTP Request for UI codes\u0026quot;) --\u0026gt; apisix(\u0026quot;APISIX with UI container\u0026quot;) gateway --\u0026gt; songs(\u0026quot;songs (Spring)\u0026quot;) \u0026amp; rcmd(\u0026quot;recommendations (Python)\u0026quot;); rcmd --\u0026gt; rating(\u0026quot;rating (Go)\u0026quot;); songs --\u0026gt; activeMQ activeMQ --\u0026gt; songs rcmd --\u0026gt; songs; songs --\u0026gt; db(\u0026quot;database (H2)\u0026quot;); Usage Please run the showcase in a brand new test cluster, otherwise the undeploy process may delete some resources that you have installed before running this showcase (for example cert-manager). If you don\u0026rsquo;t do this in a new test cluster, it\u0026rsquo;s all on your own risks!\nThe showcase uses GNU Make and Docker containers to run commands, so please make sure you have make installed and Docker daemon running.\nPrerequisites To deploy the full features of this showcase application, you may need up to 8 CPU cores and 32 GB memory, please increase the Docker daemon resources or Kubernetes cluster resources if you find containers / Pods failed to start up. Alternatively, you can also only deploy part of the features that interest you if you don\u0026rsquo;t want to increase the resources, via the guide in Customization.\nQuick Start Make sure you have a running Kubernetes cluster and kubectl can access to that cluster.\ngit clone https://github.com/apache/skywalking-showcase.git cd skywalking-showcase make deploy.kubernetes This will install SkyWalking components, including OAP in cluster mode with 2 nodes, SkyWalking UI, microservices with SkyWalking agent, microservices without SkyWalking agent but managed by Istio, 2 Pods to mimic virtual machines and export metrics to SkyWalking, and enable kubernetes cluster monitoring as well as SkyWalking self observability.\nFor more advanced deployments, check Customization documentation below.\nNotice, when run this showcase locally such as KinD, the images are downloaded inside the KinD, which could take over 10 mins(depend on local network). Rerun make deploy.kubernetes if some timeout errors break the process.\nCustomization The variables defined in Makefile.in can be overridden to customize the showcase, by specifying an environment variable with the same name, e.g.:\nexport ES_VERSION=7.14.0 make \u0026lt;target\u0026gt; or directly specifying in the make command, e.g.: make \u0026lt;target\u0026gt; ES_VERSION=7.14.0.\nRun make help to get more information.\nFeatures The showcase is composed of a set of scenarios with feature flags, you can deploy some of them that interest you by overriding the FEATURE_FLAGS variable defined in Makefile.in, as documented in Customization, e.g.:\nmake deploy.kubernetes FEATURE_FLAGS=single-node,agent Feature flags for different platforms (Kubernetes and Docker Compose) are not necessarily the same so make sure to specify the right feature flags.\nCurrently, the features supported are:\n   Name Description Note     java-agent-injector Use the java agent injector to inject the Skywalking Java agent and deploy microservices with other SkyWalking agent enabled. The microservices include agents for Java, NodeJS server, browser, Python.   agent Deploy microservices with SkyWalking agent pre-installed. In Kubernetes scenarios, please use java-agent-injector instead of this, if possible.   cluster Deploy SkyWalking OAP in cluster mode, with 2 nodes, and SkyWalking UI. Only one of cluster or single-node can be enabled.   single-node Deploy only one single node of SkyWalking OAP, and SkyWalking UI, ElasticSearch as storage. Only one of cluster or single-node can be enabled.   elasticsearch Deploy ElasticSearch as storage, you may want to disable this if you want to use your own ElasticSearch deployments.    postgresql Deploy PostgreSQL as storage, you may want to disable this if you want to use your own PostgreSQL deployments.    so11y Enable SkyWalking self observability. This is enabled by default for platform Docker Compose.   vm-monitor Start 2 virtual machines and export their metrics to SkyWalking. The \u0026ldquo;virtual machines\u0026rdquo; are mimicked by Docker containers or Pods.   als Start microservices WITHOUT SkyWalking agent enabled, and configure SkyWalking to analyze the topology and metrics from their access logs. Command istioctl is required to run this feature. The agentless microservices will be running at namespace ${NAMESPACE}-agentless   kubernetes-monitor Deploy OpenTelemetry and export Kubernetes monitoring metrics to SkyWalking for analysis and display on UI.    istiod-monitor Deploy OpenTelemetry and export Istio control plane metrics to SkyWalking for analysis and display on UI.    event Deploy tools to trigger events, and SkyWalking Kubernetes event exporter to export events into SkyWalking.    satellite Deploy SkyWalking Satellite to load balance the monitoring data.    trace-profiling Deploy tools to submit trace profiling tasks. Only support deployment with SkyWalking agents installed, currently Java agent and Python agent support trace profiling.   rover Deploy SkyWalking Rover and detect the processes in the Kubernetes environment. Only support deployment in the Kubernetes environment, docker is not supported.   mysql-monitor Start a MySQL server and load generator to execute the sample SQLs periodically, set up fluent bit to fetch slow logs and export to OAP, and export their metrics to SkyWalking.    postgresql-monitor Start a PostgreSQL server, and load generator to execute the sample SQLs periodically, set up fluent bit to fetch slow logs and export to OAP, and export their metrics to SkyWalking.    elasticsearch-monitor Deploy OpenTelemetry and export Elasticsearch monitoring metrics to SkyWalking for analysis and display on UI.    mongodb-monitor Deploy OpenTelemetry and export MongoDB monitoring metrics to SkyWalking for analysis and display on UI.    nginx-monitor Deploy OpenTelemetry and export Nginx metrics and logs to SkyWalking for analysis and display on UI    apisix-monitor Deploy OpenTelemetry and export APISIX metrics to SkyWalking for analysis and display on UI    mesh-with-agent Deploy services with java agent in the service mesh environment. Only support deployment in the Kubernetes environment, docker is not supported.   grafana Deploy a Grafana to show SkyWalking metrics and logs on the Grafana UI. Feel free to modify the Grafana config when deploy your own environment.   r3 Deploy R3 as RESTful URL recognition service.    rocketmq-monitor Deploy OpenTelemetry and export RocketMQ monitoring metrics to SkyWalking for analysis and display on UI.    pulsar-monitor Deploy OpenTelemetry and export Pulsar monitoring metrics to SkyWalking for analysis and display on UI.    rabbitmq-monitor Deploy OpenTelemetry and export RabbitMQ monitoring metrics to SkyWalking for analysis and display on UI.     Kubernetes To deploy the example application in Kubernetes, please make sure that you have kubectl command available, and it can connect to the Kubernetes cluster successfully.\nIf you don\u0026rsquo;t have a running cluster, you can also leverage KinD (Kubernetes in Docker) or minikube to create a cluster.\nRun kubectl get nodes to check the connectivity before going to next step. The typical error message that indicates your kubectl cannot connect to a cluster is:\nThe connection to the server localhost:8080 was refused - did you specify the right host or port? Deploy # Deploy make deploy.kubernetes # Undeploy make undeploy.kubernetes # Redeploy make redeploy.kubernetes # equivalent to make undeploy.kubernetes deploy.kubernetes Docker Compose Deploy # Deploy make deploy.docker # Undeploy make undeploy.docker # Redeploy make redeploy.docker # equivalent to make undeploy.docker deploy.docker Traffic Flow After deploy the showcase, the business system would send monitoring traffic to the OAP node, and one agent/sidecar connect to one OAP node directly.\nSatellite If the business traffic is unbalanced, it would cause the OAP node receive unbalanced monitoring data. So, you could add the Satellite component. After deploy the showcase with the satellite component, the monitoring traffic would send to the Satellite service, and satellite load balances the traffic to the OAP nodes.\n%% please read this doc in our official website, otherwise the graph is not correctly rendered. graph LR; agent[\u0026quot;business app(agent)\u0026quot;] --\u0026gt; satellite(\u0026quot;satellite\u0026quot;) --\u0026gt; oap(\u0026quot;oap\u0026quot;); envoy[\u0026quot;sidecar(envoy)\u0026quot;] --\u0026gt; satellite; Troubleshooting If you encounter any problems, please add DEBUG=true to the command line to get the output of the resources that will be applied.\nmake deploy.kubernetes DEBUG=true # this will print the resources that will be applied to Kubernetes make deploy.docker DEBUG=true # this will print the merged docker-compose.yaml content that will be used to run in Docker Compose ","title":"SkyWalking 9.x showcase","url":"/docs/skywalking-showcase/next/readme/"},{"content":"Skywalking Agent List  aerospike activemq-5.x armeria-063-084 armeria-085 armeria-086 armeria-098 armeria-100 async-http-client-2.x avro-1.x brpc-java brpc-java-3.x canal-1.x cassandra-java-driver-3.x dbcp-2.x druid-1.x dubbo dubbo-2.7.x dubbo-3.x dubbo-threadpool dubbo-threadpool-2.7.x ehcache-2.x elastic-job-2.x elasticjob-3.x elasticsearch-5.x elasticsearch-6.x elasticsearch-7.x fastjson-1.2.x feign-default-http-9.x feign-pathvar-9.x finagle graphql-8.x graphql-9.x graphql-12.x-15.x graphql-16plus grpc-1.x gson-2.8.x guava-cache h2-1.x hbase-1.x/2.x hikaricp-3.x/4.x httpasyncclient-4.x httpclient-3.x httpclient-4.x httpclient-5.x hystrix-1.x influxdb-2.x jackson-2.x jdk-http-plugin jdk-threading-plugin jedis-2.x-3.x jedis-4.x jetty-client-9.0 jetty-client-9.x jetty-server-9.x kafka-0.11.x/1.x/2.x kotlin-coroutine lettuce-5.x light4j mariadb-2.x micrometer-1.10.x memcache-2.x mongodb-2.x mongodb-3.x mongodb-4.x motan-0.x mybatis-3.x mysql-5.x mysql-6.x mysql-8.x nacos-client-2.x netty-socketio netty-http-4.1.x nutz-http-1.x nutz-mvc-annotation-1.x okhttp-3.x okhttp-4.x play-2.x postgresql-8.x pulsar-2.2-2.7 quasar quartz-scheduler-2.x rabbitmq redisson-3.x resteasy-server-3.x resteasy-server-4.x resteasy-server-6.x rocketMQ-3.x rocketMQ-4.x rocketMQ-5.x rocketMQ-client-java-5.x sentinel-1.x servicecomb-2.x sharding-sphere-3.x sharding-sphere-4.0.0 sharding-sphere-4.1.0 sharding-sphere-5.0.0 sofarpc solrj-7.x spring-annotation spring-async-annotation-5.x spring-cloud-feign-1.x spring-cloud-feign-2.x spring-cloud-gateway-2.0.x spring-cloud-gateway-2.1.x spring-concurrent-util-4.x spring-core-patch spring-kafka-1.x spring-kafka-2.x spring-mvc-annotation spring-mvc-annotation-3.x spring-mvc-annotation-4.x spring-mvc-annotation-5.x spring-mvc-annotation-6.x spring-resttemplate-3.x spring-resttemplate-4.x spring-resttemplate-6.x spring-scheduled-annotation spring-tx spring-webflux-5.x spring-webflux-5.x-webclient spymemcached-2.x struts2-2.x thrift tomcat-7.x/8.x tomcat-10.x toolkit-counter toolkit-gauge toolkit-histogram toolkit-kafka toolkit-log4j toolkit-log4j2 toolkit-logback toolkit-opentracing toolkit-tag toolkit-trace toolkit-exception toolkit-tracer toolkit-webflux undertow-2.x-plugin vertx-core-3.x vertx-core-4.x xxl-job-2.x zookeeper-3.4.x mssql-jtds-1.x mssql-jdbc apache-cxf-3.x jsonrpc4j spring-cloud-gateway-3.x neo4j-4.x clickhouse-0.3.1 clickhouse-0.3.2.x kylin-jdbc-2.6.x-3.x-4.x okhttp-2.x pulsar-2.8.x undertow-worker-thread-pool tomcat-thread-pool guava-eventbus shenyu-2.4.x jdk-threadpool-plugin hutool-http-5.x micronaut-http-client-3.2.x-3.6.x micronaut-http-server-3.2.x-3.6.x nats-client-2.14.x-2.15.x impala-jdbc-2.6.x jdk-forkjoinpool-plugin jetty-thread-pool jersey-2.x jersey-3.x grizzly-2.3.x-4.x grizzly-2.3.x-4.x-threadpool jetty-server-11.x jetty-client-11.x websphere-liberty-23.x spring-cloud-gateway-4.x spring-webflux-6.x spring-webflux-6.x-webclient activemq-artemis-jakarta-client-2.x  ","title":"Skywalking Agent List","url":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/plugin-list/"},{"content":"Skywalking Agent List  aerospike activemq-5.x armeria-063-084 armeria-085 armeria-086 armeria-098 armeria-100 async-http-client-2.x avro-1.x brpc-java brpc-java-3.x canal-1.x cassandra-java-driver-3.x dbcp-2.x druid-1.x dubbo dubbo-2.7.x dubbo-3.x dubbo-threadpool dubbo-threadpool-2.7.x ehcache-2.x elastic-job-2.x elasticjob-3.x elasticsearch-5.x elasticsearch-6.x elasticsearch-7.x fastjson-1.2.x feign-default-http-9.x feign-pathvar-9.x finagle graphql-8.x graphql-9.x graphql-12.x-15.x graphql-16plus grpc-1.x gson-2.8.x guava-cache h2-1.x hbase-1.x/2.x hikaricp-3.x/4.x httpasyncclient-4.x httpclient-3.x httpclient-4.x httpclient-5.x hystrix-1.x influxdb-2.x jackson-2.x jdk-http-plugin jdk-threading-plugin jedis-2.x-3.x jedis-4.x jetty-client-9.0 jetty-client-9.x jetty-server-9.x kafka-0.11.x/1.x/2.x kotlin-coroutine lettuce-5.x light4j mariadb-2.x micrometer-1.10.x memcache-2.x mongodb-2.x mongodb-3.x mongodb-4.x motan-0.x mybatis-3.x mysql-5.x mysql-6.x mysql-8.x nacos-client-2.x netty-socketio netty-http-4.1.x nutz-http-1.x nutz-mvc-annotation-1.x okhttp-3.x okhttp-4.x play-2.x postgresql-8.x pulsar-2.2-2.7 quasar quartz-scheduler-2.x rabbitmq redisson-3.x resteasy-server-3.x resteasy-server-4.x resteasy-server-6.x rocketMQ-3.x rocketMQ-4.x rocketMQ-5.x rocketMQ-client-java-5.x sentinel-1.x servicecomb-2.x sharding-sphere-3.x sharding-sphere-4.0.0 sharding-sphere-4.1.0 sharding-sphere-5.0.0 sofarpc solrj-7.x spring-annotation spring-async-annotation-5.x spring-cloud-feign-1.x spring-cloud-feign-2.x spring-cloud-gateway-2.0.x spring-cloud-gateway-2.1.x spring-concurrent-util-4.x spring-core-patch spring-kafka-1.x spring-kafka-2.x spring-mvc-annotation spring-mvc-annotation-3.x spring-mvc-annotation-4.x spring-mvc-annotation-5.x spring-mvc-annotation-6.x spring-resttemplate-3.x spring-resttemplate-4.x spring-resttemplate-6.x spring-scheduled-annotation spring-tx spring-webflux-5.x spring-webflux-5.x-webclient spymemcached-2.x struts2-2.x thrift tomcat-7.x/8.x tomcat-10.x toolkit-counter toolkit-gauge toolkit-histogram toolkit-kafka toolkit-log4j toolkit-log4j2 toolkit-logback toolkit-opentracing toolkit-tag toolkit-trace toolkit-exception toolkit-tracer toolkit-webflux undertow-2.x-plugin vertx-core-3.x vertx-core-4.x xxl-job-2.x zookeeper-3.4.x mssql-jtds-1.x mssql-jdbc apache-cxf-3.x jsonrpc4j spring-cloud-gateway-3.x neo4j-4.x clickhouse-0.3.1 clickhouse-0.3.2.x kylin-jdbc-2.6.x-3.x-4.x okhttp-2.x pulsar-2.8.x undertow-worker-thread-pool tomcat-thread-pool guava-eventbus shenyu-2.4.x jdk-threadpool-plugin hutool-http-5.x micronaut-http-client-3.2.x-3.6.x micronaut-http-server-3.2.x-3.6.x nats-client-2.14.x-2.15.x impala-jdbc-2.6.x jdk-forkjoinpool-plugin jetty-thread-pool jersey-2.x jersey-3.x grizzly-2.3.x-4.x grizzly-2.3.x-4.x-threadpool jetty-server-11.x jetty-client-11.x websphere-liberty-23.x spring-cloud-gateway-4.x spring-webflux-6.x spring-webflux-6.x-webclient activemq-artemis-jakarta-client-2.x c3p0-0.9.x  ","title":"Skywalking Agent List","url":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/plugin-list/"},{"content":"Skywalking Agent List  aerospike activemq-5.x armeria-063-084 armeria-085 armeria-086 armeria-098 armeria-100 async-http-client-2.x avro-1.x brpc-java brpc-java-3.x canal-1.x cassandra-java-driver-3.x dbcp-2.x druid-1.x dubbo dubbo-2.7.x dubbo-3.x dubbo-threadpool dubbo-threadpool-2.7.x ehcache-2.x elastic-job-2.x elasticjob-3.x elasticsearch-5.x elasticsearch-6.x elasticsearch-7.x fastjson-1.2.x feign-default-http-9.x feign-pathvar-9.x finagle graphql-8.x graphql-9.x graphql-12.x-15.x graphql-16plus grpc-1.x gson-2.8.x guava-cache h2-1.x hbase-1.x/2.x hikaricp-3.x/4.x httpasyncclient-4.x httpclient-3.x httpclient-4.x httpclient-5.x hystrix-1.x influxdb-2.x jackson-2.x jdk-http-plugin jdk-threading-plugin jedis-2.x-3.x jedis-4.x jetty-client-9.0 jetty-client-9.x jetty-server-9.x kafka-0.11.x/1.x/2.x kotlin-coroutine lettuce-5.x light4j mariadb-2.x micrometer-1.10.x memcache-2.x mongodb-2.x mongodb-3.x mongodb-4.x motan-0.x mybatis-3.x mysql-5.x mysql-6.x mysql-8.x nacos-client-2.x netty-socketio nutz-http-1.x nutz-mvc-annotation-1.x okhttp-3.x okhttp-4.x play-2.x postgresql-8.x pulsar-2.2-2.7 quasar quartz-scheduler-2.x rabbitmq redisson-3.x resteasy-server-3.x resteasy-server-4.x resteasy-server-6.x rocketMQ-3.x rocketMQ-4.x rocketMQ-5.x rocketMQ-client-java-5.x sentinel-1.x servicecomb-2.x sharding-sphere-3.x sharding-sphere-4.0.0 sharding-sphere-4.1.0 sharding-sphere-5.0.0 sofarpc solrj-7.x spring-annotation spring-async-annotation-5.x spring-cloud-feign-1.x spring-cloud-feign-2.x spring-cloud-gateway-2.0.x spring-cloud-gateway-2.1.x spring-concurrent-util-4.x spring-core-patch spring-kafka-1.x spring-kafka-2.x spring-mvc-annotation spring-mvc-annotation-3.x spring-mvc-annotation-4.x spring-mvc-annotation-5.x spring-mvc-annotation-6.x spring-resttemplate-3.x spring-resttemplate-4.x spring-resttemplate-6.x spring-scheduled-annotation spring-tx spring-webflux-5.x spring-webflux-5.x-webclient spymemcached-2.x struts2-2.x thrift tomcat-7.x/8.x tomcat-10.x toolkit-counter toolkit-gauge toolkit-histogram toolkit-kafka toolkit-log4j toolkit-log4j2 toolkit-logback toolkit-opentracing toolkit-tag toolkit-trace toolkit-exception toolkit-tracer toolkit-webflux undertow-2.x-plugin vertx-core-3.x vertx-core-4.x xxl-job-2.x zookeeper-3.4.x mssql-jtds-1.x mssql-jdbc apache-cxf-3.x jsonrpc4j spring-cloud-gateway-3.x neo4j-4.x clickhouse-0.3.1 clickhouse-0.3.2.x kylin-jdbc-2.6.x-3.x-4.x okhttp-2.x pulsar-2.8.x undertow-worker-thread-pool tomcat-thread-pool guava-eventbus shenyu-2.4.x jdk-threadpool-plugin hutool-http-5.x micronaut-http-client-3.2.x-3.6.x micronaut-http-server-3.2.x-3.6.x nats-client-2.14.x-2.15.x impala-jdbc-2.6.x jdk-forkjoinpool-plugin jetty-thread-pool jersey-2.x jersey-3.x grizzly-2.3.x-4.x grizzly-2.3.x-4.x-threadpool jetty-server-11.x jetty-client-11.x websphere-liberty-23.x  ","title":"Skywalking Agent List","url":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/plugin-list/"},{"content":"Skywalking Agent List  aerospike activemq-5.x armeria-063-084 armeria-085 armeria-086 armeria-098 armeria-100 async-http-client-2.x avro-1.x brpc-java brpc-java-3.x canal-1.x cassandra-java-driver-3.x dbcp-2.x druid-1.x dubbo dubbo-2.7.x dubbo-3.x dubbo-threadpool dubbo-threadpool-2.7.x ehcache-2.x elastic-job-2.x elasticjob-3.x elasticsearch-5.x elasticsearch-6.x elasticsearch-7.x fastjson-1.2.x feign-default-http-9.x feign-pathvar-9.x finagle graphql-8.x graphql-9.x graphql-12.x-15.x graphql-16plus grpc-1.x gson-2.8.x guava-cache h2-1.x hbase-1.x/2.x hikaricp-3.x/4.x httpasyncclient-4.x httpclient-3.x httpclient-4.x httpclient-5.x hystrix-1.x influxdb-2.x jackson-2.x jdk-http-plugin jdk-threading-plugin jedis-2.x-3.x jedis-4.x jetty-client-9.0 jetty-client-9.x jetty-server-9.x kafka-0.11.x/1.x/2.x kotlin-coroutine lettuce-5.x light4j mariadb-2.x micrometer-1.10.x memcache-2.x mongodb-2.x mongodb-3.x mongodb-4.x motan-0.x mybatis-3.x mysql-5.x mysql-6.x mysql-8.x nacos-client-2.x netty-socketio netty-http-4.1.x nutz-http-1.x nutz-mvc-annotation-1.x okhttp-3.x okhttp-4.x play-2.x postgresql-8.x pulsar-2.2-2.7 quasar quartz-scheduler-2.x rabbitmq redisson-3.x resteasy-server-3.x resteasy-server-4.x resteasy-server-6.x rocketMQ-3.x rocketMQ-4.x rocketMQ-5.x rocketMQ-client-java-5.x sentinel-1.x servicecomb-2.x sharding-sphere-3.x sharding-sphere-4.0.0 sharding-sphere-4.1.0 sharding-sphere-5.0.0 sofarpc solrj-7.x spring-annotation spring-async-annotation-5.x spring-cloud-feign-1.x spring-cloud-feign-2.x spring-cloud-gateway-2.0.x spring-cloud-gateway-2.1.x spring-concurrent-util-4.x spring-core-patch spring-kafka-1.x spring-kafka-2.x spring-mvc-annotation spring-mvc-annotation-3.x spring-mvc-annotation-4.x spring-mvc-annotation-5.x spring-mvc-annotation-6.x spring-resttemplate-3.x spring-resttemplate-4.x spring-resttemplate-6.x spring-scheduled-annotation spring-tx spring-webflux-5.x spring-webflux-5.x-webclient spymemcached-2.x struts2-2.x thrift tomcat-7.x/8.x tomcat-10.x toolkit-counter toolkit-gauge toolkit-histogram toolkit-kafka toolkit-log4j toolkit-log4j2 toolkit-logback toolkit-opentracing toolkit-tag toolkit-trace toolkit-exception toolkit-tracer toolkit-webflux undertow-2.x-plugin vertx-core-3.x vertx-core-4.x xxl-job-2.x zookeeper-3.4.x mssql-jtds-1.x mssql-jdbc apache-cxf-3.x jsonrpc4j spring-cloud-gateway-3.x neo4j-4.x clickhouse-0.3.1 clickhouse-0.3.2.x kylin-jdbc-2.6.x-3.x-4.x okhttp-2.x pulsar-2.8.x undertow-worker-thread-pool tomcat-thread-pool guava-eventbus shenyu-2.4.x jdk-threadpool-plugin hutool-http-5.x micronaut-http-client-3.2.x-3.6.x micronaut-http-server-3.2.x-3.6.x nats-client-2.14.x-2.15.x impala-jdbc-2.6.x jdk-forkjoinpool-plugin jetty-thread-pool jersey-2.x jersey-3.x grizzly-2.3.x-4.x grizzly-2.3.x-4.x-threadpool jetty-server-11.x jetty-client-11.x websphere-liberty-23.x  ","title":"Skywalking Agent List","url":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/plugin-list/"},{"content":"Skywalking Agent List  aerospike activemq-5.x armeria-063-084 armeria-085 armeria-086 armeria-098 armeria-100 async-http-client-2.x avro-1.x brpc-java brpc-java-3.x canal-1.x cassandra-java-driver-3.x dbcp-2.x druid-1.x dubbo dubbo-2.7.x dubbo-3.x dubbo-threadpool dubbo-threadpool-2.7.x ehcache-2.x elastic-job-2.x elasticjob-3.x elasticsearch-5.x elasticsearch-6.x elasticsearch-7.x fastjson-1.2.x feign-default-http-9.x feign-pathvar-9.x finagle graphql-8.x graphql-9.x graphql-12.x-15.x graphql-16plus grpc-1.x gson-2.8.x guava-cache h2-1.x hbase-1.x/2.x hikaricp-3.x/4.x httpasyncclient-4.x httpclient-3.x httpclient-4.x httpclient-5.x hystrix-1.x influxdb-2.x jackson-2.x jdk-http-plugin jdk-threading-plugin jedis-2.x-3.x jedis-4.x jetty-client-9.0 jetty-client-9.x jetty-server-9.x kafka-0.11.x/1.x/2.x kotlin-coroutine lettuce-5.x light4j mariadb-2.x micrometer-1.10.x memcache-2.x mongodb-2.x mongodb-3.x mongodb-4.x motan-0.x mybatis-3.x mysql-5.x mysql-6.x mysql-8.x nacos-client-2.x netty-socketio netty-http-4.1.x nutz-http-1.x nutz-mvc-annotation-1.x okhttp-3.x okhttp-4.x play-2.x postgresql-8.x pulsar-2.2-2.7 quasar quartz-scheduler-2.x rabbitmq redisson-3.x resteasy-server-3.x resteasy-server-4.x resteasy-server-6.x rocketMQ-3.x rocketMQ-4.x rocketMQ-5.x rocketMQ-client-java-5.x sentinel-1.x servicecomb-2.x sharding-sphere-3.x sharding-sphere-4.0.0 sharding-sphere-4.1.0 sharding-sphere-5.0.0 sofarpc solrj-7.x spring-annotation spring-async-annotation-5.x spring-cloud-feign-1.x spring-cloud-feign-2.x spring-cloud-gateway-2.0.x spring-cloud-gateway-2.1.x spring-concurrent-util-4.x spring-core-patch spring-kafka-1.x spring-kafka-2.x spring-mvc-annotation spring-mvc-annotation-3.x spring-mvc-annotation-4.x spring-mvc-annotation-5.x spring-mvc-annotation-6.x spring-resttemplate-3.x spring-resttemplate-4.x spring-resttemplate-6.x spring-scheduled-annotation spring-tx spring-webflux-5.x spring-webflux-5.x-webclient spymemcached-2.x struts2-2.x thrift tomcat-7.x/8.x tomcat-10.x toolkit-counter toolkit-gauge toolkit-histogram toolkit-kafka toolkit-log4j toolkit-log4j2 toolkit-logback toolkit-opentracing toolkit-tag toolkit-trace toolkit-exception toolkit-tracer toolkit-webflux undertow-2.x-plugin vertx-core-3.x vertx-core-4.x xxl-job-2.x zookeeper-3.4.x mssql-jtds-1.x mssql-jdbc apache-cxf-3.x jsonrpc4j spring-cloud-gateway-3.x neo4j-4.x clickhouse-0.3.1 clickhouse-0.3.2.x kylin-jdbc-2.6.x-3.x-4.x okhttp-2.x pulsar-2.8.x undertow-worker-thread-pool tomcat-thread-pool guava-eventbus shenyu-2.4.x jdk-threadpool-plugin hutool-http-5.x micronaut-http-client-3.2.x-3.6.x micronaut-http-server-3.2.x-3.6.x nats-client-2.14.x-2.15.x impala-jdbc-2.6.x jdk-forkjoinpool-plugin jetty-thread-pool jersey-2.x jersey-3.x grizzly-2.3.x-4.x grizzly-2.3.x-4.x-threadpool jetty-server-11.x jetty-client-11.x websphere-liberty-23.x spring-cloud-gateway-4.x spring-webflux-6.x spring-webflux-6.x-webclient activemq-artemis-jakarta-client-2.x  ","title":"Skywalking Agent List","url":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/plugin-list/"},{"content":"Apache SkyWalking Cloud on Kubernetes A bridge project between Apache SkyWalking and Kubernetes.\nSWCK is a platform for the SkyWalking user, provisions, upgrades, maintains SkyWalking relevant components, and makes them work natively on Kubernetes.\nFeatures  Java Agent Injector: Inject the java agent into the application pod natively. Operator: Provision and maintain SkyWalking backend components. Custom Metrics Adapter: Provides custom metrics come from SkyWalking OAP cluster for autoscaling by Kubernetes HPA  Build images Issue below instrument to get the docker image:\nmake or\nmake build To onboard operator or adapter, you should push the image to a registry where the kubernetes cluster can pull it.\nOnboard Java Agent Injector and Operator The java agent injector and operator share a same binary. To onboard them, you should follow:\n To install the java agent injector and operator in an existing cluster, make sure you have cert-manager installed. Apply the manifests for the Controller and CRDs in config:  kubectl apply -f config/operator-bundle.yaml Onboard Custom Metrics Adapter  Deploy OAP server by referring to Operator Quick Start. Apply the manifests for an adapter in config:  kubectl apply -f config/adapter-bundle.yaml License Apache 2.0 License.\n","title":"SkyWalking Cloud on Kubernetes","url":"/docs/skywalking-swck/latest/binary-readme/"},{"content":"Apache SkyWalking Cloud on Kubernetes A bridge project between Apache SkyWalking and Kubernetes.\nSWCK is a platform for the SkyWalking user, provisions, upgrades, maintains SkyWalking relevant components, and makes them work natively on Kubernetes.\nFeatures  Java Agent Injector: Inject the java agent into the application pod natively. Operator: Provision and maintain SkyWalking backend components. Custom Metrics Adapter: Provides custom metrics come from SkyWalking OAP cluster for autoscaling by Kubernetes HPA  Build images Issue below instrument to get the docker image:\nmake or\nmake build To onboard operator or adapter, you should push the image to a registry where the kubernetes cluster can pull it.\nOnboard Java Agent Injector and Operator The java agent injector and operator share a same binary. To onboard them, you should follow:\n To install the java agent injector and operator in an existing cluster, make sure you have cert-manager installed. Apply the manifests for the Controller and CRDs in config:  kubectl apply -f config/operator-bundle.yaml Onboard Custom Metrics Adapter  Deploy OAP server by referring to Operator Quick Start. Apply the manifests for an adapter in config:  kubectl apply -f config/adapter-bundle.yaml License Apache 2.0 License.\n","title":"SkyWalking Cloud on Kubernetes","url":"/docs/skywalking-swck/next/binary-readme/"},{"content":"Apache SkyWalking Cloud on Kubernetes A bridge project between Apache SkyWalking and Kubernetes.\nSWCK is a platform for the SkyWalking user, provisions, upgrades, maintains SkyWalking relevant components, and makes them work natively on Kubernetes.\nFeatures  Java Agent Injector: Inject the java agent into the application pod natively. Operator: Provision and maintain SkyWalking backend components. Custom Metrics Adapter: Provides custom metrics come from SkyWalking OAP cluster for autoscaling by Kubernetes HPA  Build images Issue below instrument to get the docker image:\nmake or\nmake build To onboard operator or adapter, you should push the image to a registry where the kubernetes cluster can pull it.\nOnboard Java Agent Injector and Operator The java agent injector and operator share a same binary. To onboard them, you should follow:\n To install the java agent injector and operator in an existing cluster, make sure you have cert-manager installed. Apply the manifests for the Controller and CRDs in config:  kubectl apply -f config/operator-bundle.yaml Onboard Custom Metrics Adapter  Deploy OAP server by referring to Operator Quick Start. Apply the manifests for an adapter in config:  kubectl apply -f config/adapter-bundle.yaml License Apache 2.0 License.\n","title":"SkyWalking Cloud on Kubernetes","url":"/docs/skywalking-swck/v0.9.0/binary-readme/"},{"content":"SkyWalking Cross Process Correlation Headers Protocol  Version 1.0  SkyWalking Cross Process Correlation Headers Protocol is a new in-wire context propagation protocol which is additional and optional. Please read SkyWalking language agents documentation to see whether it is supported.\nThis is an optional and additional protocol for language tracer implementation. All tracer implementation could consider implementing this. Cross Process Correlation Header key is sw8-correlation. The value is the encoded(key):encoded(value) list with elements splitted by , such as base64(string key):base64(string value),base64(string key2):base64(string value2).\nRecommendations for language APIs The following implementation method is recommended for different language APIs.\n TraceContext#putCorrelation and TraceContext#getCorrelation are recommended to write and read the correlation context, with key/value string. The key should be added if it is absent. The latter writes should override the previous value. The total number of all keys should be less than 3, and the length of each value should be less than 128 bytes. The context should be propagated as well when tracing context is propagated across threads and processes.  ","title":"SkyWalking Cross Process Correlation Headers Protocol","url":"/docs/main/latest/en/api/x-process-correlation-headers-v1/"},{"content":"SkyWalking Cross Process Correlation Headers Protocol  Version 1.0  SkyWalking Cross Process Correlation Headers Protocol is a new in-wire context propagation protocol which is additional and optional. Please read SkyWalking language agents documentation to see whether it is supported.\nThis is an optional and additional protocol for language tracer implementation. All tracer implementation could consider implementing this. Cross Process Correlation Header key is sw8-correlation. The value is the encoded(key):encoded(value) list with elements splitted by , such as base64(string key):base64(string value),base64(string key2):base64(string value2).\nRecommendations for language APIs The following implementation method is recommended for different language APIs.\n TraceContext#putCorrelation and TraceContext#getCorrelation are recommended to write and read the correlation context, with key/value string. The key should be added if it is absent. The latter writes should override the previous value. The total number of all keys should be less than 3, and the length of each value should be less than 128 bytes. The context should be propagated as well when tracing context is propagated across threads and processes.  ","title":"SkyWalking Cross Process Correlation Headers Protocol","url":"/docs/main/next/en/api/x-process-correlation-headers-v1/"},{"content":"SkyWalking Cross Process Correlation Headers Protocol  Version 1.0  The Cross Process Correlation Headers Protocol is used to transport custom data by leveraging the capability of Cross Process Propagation Headers Protocol.\nThis is an optional and additional protocol for language tracer implementation. All tracer implementation could consider implementing this. Cross Process Correlation Header key is sw8-correlation. The value is the encoded(key):encoded(value) list with elements splitted by , such as base64(string key):base64(string value),base64(string key2):base64(string value2).\nRecommendations for language APIs The following implementation method is recommended for different language APIs.\n TraceContext#putCorrelation and TraceContext#getCorrelation are recommended to write and read the correlation context, with key/value string. The key should be added if it is absent. The latter writes should override the previous value. The total number of all keys should be less than 3, and the length of each value should be less than 128 bytes. The context should be propagated as well when tracing context is propagated across threads and processes.  ","title":"SkyWalking Cross Process Correlation Headers Protocol","url":"/docs/main/v9.0.0/en/protocols/skywalking-cross-process-correlation-headers-protocol-v1/"},{"content":"SkyWalking Cross Process Correlation Headers Protocol  Version 1.0  The Cross Process Correlation Headers Protocol is used to transport custom data by leveraging the capability of Cross Process Propagation Headers Protocol.\nThis is an optional and additional protocol for language tracer implementation. All tracer implementation could consider implementing this. Cross Process Correlation Header key is sw8-correlation. The value is the encoded(key):encoded(value) list with elements splitted by , such as base64(string key):base64(string value),base64(string key2):base64(string value2).\nRecommendations for language APIs The following implementation method is recommended for different language APIs.\n TraceContext#putCorrelation and TraceContext#getCorrelation are recommended to write and read the correlation context, with key/value string. The key should be added if it is absent. The latter writes should override the previous value. The total number of all keys should be less than 3, and the length of each value should be less than 128 bytes. The context should be propagated as well when tracing context is propagated across threads and processes.  ","title":"SkyWalking Cross Process Correlation Headers Protocol","url":"/docs/main/v9.1.0/en/protocols/skywalking-cross-process-correlation-headers-protocol-v1/"},{"content":"SkyWalking Cross Process Correlation Headers Protocol  Version 1.0  The Cross Process Correlation Headers Protocol is used to transport custom data by leveraging the capability of Cross Process Propagation Headers Protocol.\nThis is an optional and additional protocol for language tracer implementation. All tracer implementation could consider implementing this. Cross Process Correlation Header key is sw8-correlation. The value is the encoded(key):encoded(value) list with elements splitted by , such as base64(string key):base64(string value),base64(string key2):base64(string value2).\nRecommendations for language APIs The following implementation method is recommended for different language APIs.\n TraceContext#putCorrelation and TraceContext#getCorrelation are recommended to write and read the correlation context, with key/value string. The key should be added if it is absent. The latter writes should override the previous value. The total number of all keys should be less than 3, and the length of each value should be less than 128 bytes. The context should be propagated as well when tracing context is propagated across threads and processes.  ","title":"SkyWalking Cross Process Correlation Headers Protocol","url":"/docs/main/v9.2.0/en/protocols/skywalking-cross-process-correlation-headers-protocol-v1/"},{"content":"SkyWalking Cross Process Correlation Headers Protocol  Version 1.0  The Cross Process Correlation Headers Protocol is used to transport custom data by leveraging the capability of Cross Process Propagation Headers Protocol.\nThis is an optional and additional protocol for language tracer implementation. All tracer implementation could consider implementing this. Cross Process Correlation Header key is sw8-correlation. The value is the encoded(key):encoded(value) list with elements splitted by , such as base64(string key):base64(string value),base64(string key2):base64(string value2).\nRecommendations for language APIs The following implementation method is recommended for different language APIs.\n TraceContext#putCorrelation and TraceContext#getCorrelation are recommended to write and read the correlation context, with key/value string. The key should be added if it is absent. The latter writes should override the previous value. The total number of all keys should be less than 3, and the length of each value should be less than 128 bytes. The context should be propagated as well when tracing context is propagated across threads and processes.  ","title":"SkyWalking Cross Process Correlation Headers Protocol","url":"/docs/main/v9.3.0/en/protocols/skywalking-cross-process-correlation-headers-protocol-v1/"},{"content":"SkyWalking Cross Process Correlation Headers Protocol  Version 1.0  SkyWalking Cross Process Correlation Headers Protocol is a new in-wire context propagation protocol which is additional and optional. Please read SkyWalking language agents documentation to see whether it is supported.\nThis is an optional and additional protocol for language tracer implementation. All tracer implementation could consider implementing this. Cross Process Correlation Header key is sw8-correlation. The value is the encoded(key):encoded(value) list with elements splitted by , such as base64(string key):base64(string value),base64(string key2):base64(string value2).\nRecommendations for language APIs The following implementation method is recommended for different language APIs.\n TraceContext#putCorrelation and TraceContext#getCorrelation are recommended to write and read the correlation context, with key/value string. The key should be added if it is absent. The latter writes should override the previous value. The total number of all keys should be less than 3, and the length of each value should be less than 128 bytes. The context should be propagated as well when tracing context is propagated across threads and processes.  ","title":"SkyWalking Cross Process Correlation Headers Protocol","url":"/docs/main/v9.4.0/en/api/x-process-correlation-headers-v1/"},{"content":"SkyWalking Cross Process Correlation Headers Protocol  Version 1.0  SkyWalking Cross Process Correlation Headers Protocol is a new in-wire context propagation protocol which is additional and optional. Please read SkyWalking language agents documentation to see whether it is supported.\nThis is an optional and additional protocol for language tracer implementation. All tracer implementation could consider implementing this. Cross Process Correlation Header key is sw8-correlation. The value is the encoded(key):encoded(value) list with elements splitted by , such as base64(string key):base64(string value),base64(string key2):base64(string value2).\nRecommendations for language APIs The following implementation method is recommended for different language APIs.\n TraceContext#putCorrelation and TraceContext#getCorrelation are recommended to write and read the correlation context, with key/value string. The key should be added if it is absent. The latter writes should override the previous value. The total number of all keys should be less than 3, and the length of each value should be less than 128 bytes. The context should be propagated as well when tracing context is propagated across threads and processes.  ","title":"SkyWalking Cross Process Correlation Headers Protocol","url":"/docs/main/v9.5.0/en/api/x-process-correlation-headers-v1/"},{"content":"SkyWalking Cross Process Correlation Headers Protocol  Version 1.0  SkyWalking Cross Process Correlation Headers Protocol is a new in-wire context propagation protocol which is additional and optional. Please read SkyWalking language agents documentation to see whether it is supported.\nThis is an optional and additional protocol for language tracer implementation. All tracer implementation could consider implementing this. Cross Process Correlation Header key is sw8-correlation. The value is the encoded(key):encoded(value) list with elements splitted by , such as base64(string key):base64(string value),base64(string key2):base64(string value2).\nRecommendations for language APIs The following implementation method is recommended for different language APIs.\n TraceContext#putCorrelation and TraceContext#getCorrelation are recommended to write and read the correlation context, with key/value string. The key should be added if it is absent. The latter writes should override the previous value. The total number of all keys should be less than 3, and the length of each value should be less than 128 bytes. The context should be propagated as well when tracing context is propagated across threads and processes.  ","title":"SkyWalking Cross Process Correlation Headers Protocol","url":"/docs/main/v9.6.0/en/api/x-process-correlation-headers-v1/"},{"content":"SkyWalking Cross Process Correlation Headers Protocol  Version 1.0  SkyWalking Cross Process Correlation Headers Protocol is a new in-wire context propagation protocol which is additional and optional. Please read SkyWalking language agents documentation to see whether it is supported.\nThis is an optional and additional protocol for language tracer implementation. All tracer implementation could consider implementing this. Cross Process Correlation Header key is sw8-correlation. The value is the encoded(key):encoded(value) list with elements splitted by , such as base64(string key):base64(string value),base64(string key2):base64(string value2).\nRecommendations for language APIs The following implementation method is recommended for different language APIs.\n TraceContext#putCorrelation and TraceContext#getCorrelation are recommended to write and read the correlation context, with key/value string. The key should be added if it is absent. The latter writes should override the previous value. The total number of all keys should be less than 3, and the length of each value should be less than 128 bytes. The context should be propagated as well when tracing context is propagated across threads and processes.  ","title":"SkyWalking Cross Process Correlation Headers Protocol","url":"/docs/main/v9.7.0/en/api/x-process-correlation-headers-v1/"},{"content":"SkyWalking Cross Process Propagation Headers Protocol  Version 3.0  SkyWalking is more akin to an APM system, rather than a common distributed tracing system. SkyWalking\u0026rsquo;s headers are much more complex than those found in a common distributed tracing system. The reason behind their complexity is for better analysis performance of the OAP. You can find many similar mechanisms in other commercial APM systems (some of which are even more complex than ours!).\nAbstract The SkyWalking Cross Process Propagation Headers Protocol v3, also known as the sw8 protocol, is designed for context propagation.\nStandard Header Item The standard header is the minimal requirement for context propagation.\n Header Name: sw8. Header Value: 8 fields split by -. The length of header value must be less than 2k (default).  Example of the value format: XXXXX-XXXXX-XXXX-XXXX\nValues Values must include the following segments, and all string type values are in BASE64 encoding.\n Required:   Sample. 0 or 1. 0 means that the context exists, but it could (and most likely will) be ignored. 1 means this trace needs to be sampled and sent to the backend. Trace ID. String(BASE64 encoded). A literal string that is globally unique. Parent trace segment ID. String(BASE64 encoded). A literal string that is globally unique. Parent span ID. Must be an integer. It begins with 0. This span ID points to the parent span in parent trace segment. Parent service. String(BASE64 encoded). Its length should be no more than 50 UTF-8 characters. Parent service instance. String(BASE64 encoded). Its length should be no more than 50 UTF-8 characters. Parent endpoint. String(BASE64 encoded). The operation name of the first entry span in the parent segment. Its length should be less than 150 UTF-8 characters. Target address of this request used on the client end. String(BASE64 encoded). The network address (not necessarily IP + port) used on the client end to access this target service.   Sample values: 1-TRACEID-SEGMENTID-3-PARENT_SERVICE-PARENT_INSTANCE-PARENT_ENDPOINT-IPPORT  Extension Header Item The extension header item is designed for advanced features. It provides interaction capabilities between the agents deployed in upstream and downstream services.\n Header Name: sw8-x Header Value: Split by -. The fields are extendable.  Values The current value includes fields.\n Tracing Mode. Empty, 0, or 1. Empty or 0 is the default. 1 indicates that all spans generated in this context will skip analysis, spanObject#skipAnalysis=true. This context is propagated to upstream by default, unless it is changed in the tracing process. The timestamp of sending on the client end. This is used in async RPC, such as MQ. Once it is set, the consumer end would calculate the latency between sending and receiving, and tag the latency in the span by using key transmission.latency automatically.  ","title":"SkyWalking Cross Process Propagation Headers Protocol","url":"/docs/main/latest/en/api/x-process-propagation-headers-v3/"},{"content":"SkyWalking Cross Process Propagation Headers Protocol  Version 3.0  SkyWalking is more akin to an APM system, rather than a common distributed tracing system. SkyWalking\u0026rsquo;s headers are much more complex than those found in a common distributed tracing system. The reason behind their complexity is for better analysis performance of the OAP. You can find many similar mechanisms in other commercial APM systems (some of which are even more complex than ours!).\nAbstract The SkyWalking Cross Process Propagation Headers Protocol v3, also known as the sw8 protocol, is designed for context propagation.\nStandard Header Item The standard header is the minimal requirement for context propagation.\n Header Name: sw8. Header Value: 8 fields split by -. The length of header value must be less than 2k (default).  Example of the value format: XXXXX-XXXXX-XXXX-XXXX\nValues Values must include the following segments, and all string type values are in BASE64 encoding.\n Required:   Sample. 0 or 1. 0 means that the context exists, but it could (and most likely will) be ignored. 1 means this trace needs to be sampled and sent to the backend. Trace ID. String(BASE64 encoded). A literal string that is globally unique. Parent trace segment ID. String(BASE64 encoded). A literal string that is globally unique. Parent span ID. Must be an integer. It begins with 0. This span ID points to the parent span in parent trace segment. Parent service. String(BASE64 encoded). Its length should be no more than 50 UTF-8 characters. Parent service instance. String(BASE64 encoded). Its length should be no more than 50 UTF-8 characters. Parent endpoint. String(BASE64 encoded). The operation name of the first entry span in the parent segment. Its length should be less than 150 UTF-8 characters. Target address of this request used on the client end. String(BASE64 encoded). The network address (not necessarily IP + port) used on the client end to access this target service.   Sample values: 1-TRACEID-SEGMENTID-3-PARENT_SERVICE-PARENT_INSTANCE-PARENT_ENDPOINT-IPPORT  Extension Header Item The extension header item is designed for advanced features. It provides interaction capabilities between the agents deployed in upstream and downstream services.\n Header Name: sw8-x Header Value: Split by -. The fields are extendable.  Values The current value includes fields.\n Tracing Mode. Empty, 0, or 1. Empty or 0 is the default. 1 indicates that all spans generated in this context will skip analysis, spanObject#skipAnalysis=true. This context is propagated to upstream by default, unless it is changed in the tracing process. The timestamp of sending on the client end. This is used in async RPC, such as MQ. Once it is set, the consumer end would calculate the latency between sending and receiving, and tag the latency in the span by using key transmission.latency automatically.  ","title":"SkyWalking Cross Process Propagation Headers Protocol","url":"/docs/main/next/en/api/x-process-propagation-headers-v3/"},{"content":"SkyWalking Cross Process Propagation Headers Protocol  Version 3.0  SkyWalking is more akin to an APM system, rather than a common distributed tracing system. SkyWalking\u0026rsquo;s headers are much more complex than those found in a common distributed tracing system. The reason behind their complexity is for better analysis performance of the OAP. You can find many similar mechanisms in other commercial APM systems (some of which are even more complex than ours!).\nAbstract The SkyWalking Cross Process Propagation Headers Protocol v3, also known as the sw8 protocol, is designed for context propagation.\nStandard Header Item The standard header is the minimal requirement for context propagation.\n Header Name: sw8. Header Value: 8 fields split by -. The length of header value must be less than 2k (default).  Example of the value format: XXXXX-XXXXX-XXXX-XXXX\nValues Values must include the following segments, and all string type values are in BASE64 encoding.\n Required:   Sample. 0 or 1. 0 means that the context exists, but it could (and most likely will) be ignored. 1 means this trace needs to be sampled and sent to the backend. Trace ID. String(BASE64 encoded). A literal string that is globally unique. Parent trace segment ID. String(BASE64 encoded). A literal string that is globally unique. Parent span ID. Must be an integer. It begins with 0. This span ID points to the parent span in parent trace segment. Parent service. String(BASE64 encoded). Its length should be no more than 50 UTF-8 characters. Parent service instance. String(BASE64 encoded). Its length should be no more than 50 UTF-8 characters. Parent endpoint. String(BASE64 encoded). The operation name of the first entry span in the parent segment. Its length should be less than 150 UTF-8 characters. Target address of this request used on the client end. String(BASE64 encoded). The network address (not necessarily IP + port) used on the client end to access this target service.   Sample values: 1-TRACEID-SEGMENTID-3-PARENT_SERVICE-PARENT_INSTANCE-PARENT_ENDPOINT-IPPORT  Extension Header Item The extension header item is designed for advanced features. It provides interaction capabilities between the agents deployed in upstream and downstream services.\n Header Name: sw8-x Header Value: Split by -. The fields are extendable.  Values The current value includes fields.\n Tracing Mode. Empty, 0, or 1. Empty or 0 is the default. 1 indicates that all spans generated in this context will skip analysis, spanObject#skipAnalysis=true. This context is propagated to upstream by default, unless it is changed in the tracing process. The timestamp of sending on the client end. This is used in async RPC, such as MQ. Once it is set, the consumer end would calculate the latency between sending and receiving, and tag the latency in the span by using key transmission.latency automatically.  ","title":"SkyWalking Cross Process Propagation Headers Protocol","url":"/docs/main/v9.0.0/en/protocols/skywalking-cross-process-propagation-headers-protocol-v3/"},{"content":"SkyWalking Cross Process Propagation Headers Protocol  Version 3.0  SkyWalking is more akin to an APM system, rather than a common distributed tracing system. SkyWalking\u0026rsquo;s headers are much more complex than those found in a common distributed tracing system. The reason behind their complexity is for better analysis performance of the OAP. You can find many similar mechanisms in other commercial APM systems (some of which are even more complex than ours!).\nAbstract The SkyWalking Cross Process Propagation Headers Protocol v3, also known as the sw8 protocol, is designed for context propagation.\nStandard Header Item The standard header is the minimal requirement for context propagation.\n Header Name: sw8. Header Value: 8 fields split by -. The length of header value must be less than 2k (default).  Example of the value format: XXXXX-XXXXX-XXXX-XXXX\nValues Values must include the following segments, and all string type values are in BASE64 encoding.\n Required:   Sample. 0 or 1. 0 means that the context exists, but it could (and most likely will) be ignored. 1 means this trace needs to be sampled and sent to the backend. Trace ID. String(BASE64 encoded). A literal string that is globally unique. Parent trace segment ID. String(BASE64 encoded). A literal string that is globally unique. Parent span ID. Must be an integer. It begins with 0. This span ID points to the parent span in parent trace segment. Parent service. String(BASE64 encoded). Its length should be no more than 50 UTF-8 characters. Parent service instance. String(BASE64 encoded). Its length should be no more than 50 UTF-8 characters. Parent endpoint. String(BASE64 encoded). The operation name of the first entry span in the parent segment. Its length should be less than 150 UTF-8 characters. Target address of this request used on the client end. String(BASE64 encoded). The network address (not necessarily IP + port) used on the client end to access this target service.   Sample values: 1-TRACEID-SEGMENTID-3-PARENT_SERVICE-PARENT_INSTANCE-PARENT_ENDPOINT-IPPORT  Extension Header Item The extension header item is designed for advanced features. It provides interaction capabilities between the agents deployed in upstream and downstream services.\n Header Name: sw8-x Header Value: Split by -. The fields are extendable.  Values The current value includes fields.\n Tracing Mode. Empty, 0, or 1. Empty or 0 is the default. 1 indicates that all spans generated in this context will skip analysis, spanObject#skipAnalysis=true. This context is propagated to upstream by default, unless it is changed in the tracing process. The timestamp of sending on the client end. This is used in async RPC, such as MQ. Once it is set, the consumer end would calculate the latency between sending and receiving, and tag the latency in the span by using key transmission.latency automatically.  ","title":"SkyWalking Cross Process Propagation Headers Protocol","url":"/docs/main/v9.1.0/en/protocols/skywalking-cross-process-propagation-headers-protocol-v3/"},{"content":"SkyWalking Cross Process Propagation Headers Protocol  Version 3.0  SkyWalking is more akin to an APM system, rather than a common distributed tracing system. SkyWalking\u0026rsquo;s headers are much more complex than those found in a common distributed tracing system. The reason behind their complexity is for better analysis performance of the OAP. You can find many similar mechanisms in other commercial APM systems (some of which are even more complex than ours!).\nAbstract The SkyWalking Cross Process Propagation Headers Protocol v3, also known as the sw8 protocol, is designed for context propagation.\nStandard Header Item The standard header is the minimal requirement for context propagation.\n Header Name: sw8. Header Value: 8 fields split by -. The length of header value must be less than 2k (default).  Example of the value format: XXXXX-XXXXX-XXXX-XXXX\nValues Values must include the following segments, and all string type values are in BASE64 encoding.\n Required:   Sample. 0 or 1. 0 means that the context exists, but it could (and most likely will) be ignored. 1 means this trace needs to be sampled and sent to the backend. Trace ID. String(BASE64 encoded). A literal string that is globally unique. Parent trace segment ID. String(BASE64 encoded). A literal string that is globally unique. Parent span ID. Must be an integer. It begins with 0. This span ID points to the parent span in parent trace segment. Parent service. String(BASE64 encoded). Its length should be no more than 50 UTF-8 characters. Parent service instance. String(BASE64 encoded). Its length should be no more than 50 UTF-8 characters. Parent endpoint. String(BASE64 encoded). The operation name of the first entry span in the parent segment. Its length should be less than 150 UTF-8 characters. Target address of this request used on the client end. String(BASE64 encoded). The network address (not necessarily IP + port) used on the client end to access this target service.   Sample values: 1-TRACEID-SEGMENTID-3-PARENT_SERVICE-PARENT_INSTANCE-PARENT_ENDPOINT-IPPORT  Extension Header Item The extension header item is designed for advanced features. It provides interaction capabilities between the agents deployed in upstream and downstream services.\n Header Name: sw8-x Header Value: Split by -. The fields are extendable.  Values The current value includes fields.\n Tracing Mode. Empty, 0, or 1. Empty or 0 is the default. 1 indicates that all spans generated in this context will skip analysis, spanObject#skipAnalysis=true. This context is propagated to upstream by default, unless it is changed in the tracing process. The timestamp of sending on the client end. This is used in async RPC, such as MQ. Once it is set, the consumer end would calculate the latency between sending and receiving, and tag the latency in the span by using key transmission.latency automatically.  ","title":"SkyWalking Cross Process Propagation Headers Protocol","url":"/docs/main/v9.2.0/en/protocols/skywalking-cross-process-propagation-headers-protocol-v3/"},{"content":"SkyWalking Cross Process Propagation Headers Protocol  Version 3.0  SkyWalking is more akin to an APM system, rather than a common distributed tracing system. SkyWalking\u0026rsquo;s headers are much more complex than those found in a common distributed tracing system. The reason behind their complexity is for better analysis performance of the OAP. You can find many similar mechanisms in other commercial APM systems (some of which are even more complex than ours!).\nAbstract The SkyWalking Cross Process Propagation Headers Protocol v3, also known as the sw8 protocol, is designed for context propagation.\nStandard Header Item The standard header is the minimal requirement for context propagation.\n Header Name: sw8. Header Value: 8 fields split by -. The length of header value must be less than 2k (default).  Example of the value format: XXXXX-XXXXX-XXXX-XXXX\nValues Values must include the following segments, and all string type values are in BASE64 encoding.\n Required:   Sample. 0 or 1. 0 means that the context exists, but it could (and most likely will) be ignored. 1 means this trace needs to be sampled and sent to the backend. Trace ID. String(BASE64 encoded). A literal string that is globally unique. Parent trace segment ID. String(BASE64 encoded). A literal string that is globally unique. Parent span ID. Must be an integer. It begins with 0. This span ID points to the parent span in parent trace segment. Parent service. String(BASE64 encoded). Its length should be no more than 50 UTF-8 characters. Parent service instance. String(BASE64 encoded). Its length should be no more than 50 UTF-8 characters. Parent endpoint. String(BASE64 encoded). The operation name of the first entry span in the parent segment. Its length should be less than 150 UTF-8 characters. Target address of this request used on the client end. String(BASE64 encoded). The network address (not necessarily IP + port) used on the client end to access this target service.   Sample values: 1-TRACEID-SEGMENTID-3-PARENT_SERVICE-PARENT_INSTANCE-PARENT_ENDPOINT-IPPORT  Extension Header Item The extension header item is designed for advanced features. It provides interaction capabilities between the agents deployed in upstream and downstream services.\n Header Name: sw8-x Header Value: Split by -. The fields are extendable.  Values The current value includes fields.\n Tracing Mode. Empty, 0, or 1. Empty or 0 is the default. 1 indicates that all spans generated in this context will skip analysis, spanObject#skipAnalysis=true. This context is propagated to upstream by default, unless it is changed in the tracing process. The timestamp of sending on the client end. This is used in async RPC, such as MQ. Once it is set, the consumer end would calculate the latency between sending and receiving, and tag the latency in the span by using key transmission.latency automatically.  ","title":"SkyWalking Cross Process Propagation Headers Protocol","url":"/docs/main/v9.3.0/en/protocols/skywalking-cross-process-propagation-headers-protocol-v3/"},{"content":"SkyWalking Cross Process Propagation Headers Protocol  Version 3.0  SkyWalking is more akin to an APM system, rather than a common distributed tracing system. SkyWalking\u0026rsquo;s headers are much more complex than those found in a common distributed tracing system. The reason behind their complexity is for better analysis performance of the OAP. You can find many similar mechanisms in other commercial APM systems (some of which are even more complex than ours!).\nAbstract The SkyWalking Cross Process Propagation Headers Protocol v3, also known as the sw8 protocol, is designed for context propagation.\nStandard Header Item The standard header is the minimal requirement for context propagation.\n Header Name: sw8. Header Value: 8 fields split by -. The length of header value must be less than 2k (default).  Example of the value format: XXXXX-XXXXX-XXXX-XXXX\nValues Values must include the following segments, and all string type values are in BASE64 encoding.\n Required:   Sample. 0 or 1. 0 means that the context exists, but it could (and most likely will) be ignored. 1 means this trace needs to be sampled and sent to the backend. Trace ID. String(BASE64 encoded). A literal string that is globally unique. Parent trace segment ID. String(BASE64 encoded). A literal string that is globally unique. Parent span ID. Must be an integer. It begins with 0. This span ID points to the parent span in parent trace segment. Parent service. String(BASE64 encoded). Its length should be no more than 50 UTF-8 characters. Parent service instance. String(BASE64 encoded). Its length should be no more than 50 UTF-8 characters. Parent endpoint. String(BASE64 encoded). The operation name of the first entry span in the parent segment. Its length should be less than 150 UTF-8 characters. Target address of this request used on the client end. String(BASE64 encoded). The network address (not necessarily IP + port) used on the client end to access this target service.   Sample values: 1-TRACEID-SEGMENTID-3-PARENT_SERVICE-PARENT_INSTANCE-PARENT_ENDPOINT-IPPORT  Extension Header Item The extension header item is designed for advanced features. It provides interaction capabilities between the agents deployed in upstream and downstream services.\n Header Name: sw8-x Header Value: Split by -. The fields are extendable.  Values The current value includes fields.\n Tracing Mode. Empty, 0, or 1. Empty or 0 is the default. 1 indicates that all spans generated in this context will skip analysis, spanObject#skipAnalysis=true. This context is propagated to upstream by default, unless it is changed in the tracing process. The timestamp of sending on the client end. This is used in async RPC, such as MQ. Once it is set, the consumer end would calculate the latency between sending and receiving, and tag the latency in the span by using key transmission.latency automatically.  ","title":"SkyWalking Cross Process Propagation Headers Protocol","url":"/docs/main/v9.4.0/en/api/x-process-propagation-headers-v3/"},{"content":"SkyWalking Cross Process Propagation Headers Protocol  Version 3.0  SkyWalking is more akin to an APM system, rather than a common distributed tracing system. SkyWalking\u0026rsquo;s headers are much more complex than those found in a common distributed tracing system. The reason behind their complexity is for better analysis performance of the OAP. You can find many similar mechanisms in other commercial APM systems (some of which are even more complex than ours!).\nAbstract The SkyWalking Cross Process Propagation Headers Protocol v3, also known as the sw8 protocol, is designed for context propagation.\nStandard Header Item The standard header is the minimal requirement for context propagation.\n Header Name: sw8. Header Value: 8 fields split by -. The length of header value must be less than 2k (default).  Example of the value format: XXXXX-XXXXX-XXXX-XXXX\nValues Values must include the following segments, and all string type values are in BASE64 encoding.\n Required:   Sample. 0 or 1. 0 means that the context exists, but it could (and most likely will) be ignored. 1 means this trace needs to be sampled and sent to the backend. Trace ID. String(BASE64 encoded). A literal string that is globally unique. Parent trace segment ID. String(BASE64 encoded). A literal string that is globally unique. Parent span ID. Must be an integer. It begins with 0. This span ID points to the parent span in parent trace segment. Parent service. String(BASE64 encoded). Its length should be no more than 50 UTF-8 characters. Parent service instance. String(BASE64 encoded). Its length should be no more than 50 UTF-8 characters. Parent endpoint. String(BASE64 encoded). The operation name of the first entry span in the parent segment. Its length should be less than 150 UTF-8 characters. Target address of this request used on the client end. String(BASE64 encoded). The network address (not necessarily IP + port) used on the client end to access this target service.   Sample values: 1-TRACEID-SEGMENTID-3-PARENT_SERVICE-PARENT_INSTANCE-PARENT_ENDPOINT-IPPORT  Extension Header Item The extension header item is designed for advanced features. It provides interaction capabilities between the agents deployed in upstream and downstream services.\n Header Name: sw8-x Header Value: Split by -. The fields are extendable.  Values The current value includes fields.\n Tracing Mode. Empty, 0, or 1. Empty or 0 is the default. 1 indicates that all spans generated in this context will skip analysis, spanObject#skipAnalysis=true. This context is propagated to upstream by default, unless it is changed in the tracing process. The timestamp of sending on the client end. This is used in async RPC, such as MQ. Once it is set, the consumer end would calculate the latency between sending and receiving, and tag the latency in the span by using key transmission.latency automatically.  ","title":"SkyWalking Cross Process Propagation Headers Protocol","url":"/docs/main/v9.5.0/en/api/x-process-propagation-headers-v3/"},{"content":"SkyWalking Cross Process Propagation Headers Protocol  Version 3.0  SkyWalking is more akin to an APM system, rather than a common distributed tracing system. SkyWalking\u0026rsquo;s headers are much more complex than those found in a common distributed tracing system. The reason behind their complexity is for better analysis performance of the OAP. You can find many similar mechanisms in other commercial APM systems (some of which are even more complex than ours!).\nAbstract The SkyWalking Cross Process Propagation Headers Protocol v3, also known as the sw8 protocol, is designed for context propagation.\nStandard Header Item The standard header is the minimal requirement for context propagation.\n Header Name: sw8. Header Value: 8 fields split by -. The length of header value must be less than 2k (default).  Example of the value format: XXXXX-XXXXX-XXXX-XXXX\nValues Values must include the following segments, and all string type values are in BASE64 encoding.\n Required:   Sample. 0 or 1. 0 means that the context exists, but it could (and most likely will) be ignored. 1 means this trace needs to be sampled and sent to the backend. Trace ID. String(BASE64 encoded). A literal string that is globally unique. Parent trace segment ID. String(BASE64 encoded). A literal string that is globally unique. Parent span ID. Must be an integer. It begins with 0. This span ID points to the parent span in parent trace segment. Parent service. String(BASE64 encoded). Its length should be no more than 50 UTF-8 characters. Parent service instance. String(BASE64 encoded). Its length should be no more than 50 UTF-8 characters. Parent endpoint. String(BASE64 encoded). The operation name of the first entry span in the parent segment. Its length should be less than 150 UTF-8 characters. Target address of this request used on the client end. String(BASE64 encoded). The network address (not necessarily IP + port) used on the client end to access this target service.   Sample values: 1-TRACEID-SEGMENTID-3-PARENT_SERVICE-PARENT_INSTANCE-PARENT_ENDPOINT-IPPORT  Extension Header Item The extension header item is designed for advanced features. It provides interaction capabilities between the agents deployed in upstream and downstream services.\n Header Name: sw8-x Header Value: Split by -. The fields are extendable.  Values The current value includes fields.\n Tracing Mode. Empty, 0, or 1. Empty or 0 is the default. 1 indicates that all spans generated in this context will skip analysis, spanObject#skipAnalysis=true. This context is propagated to upstream by default, unless it is changed in the tracing process. The timestamp of sending on the client end. This is used in async RPC, such as MQ. Once it is set, the consumer end would calculate the latency between sending and receiving, and tag the latency in the span by using key transmission.latency automatically.  ","title":"SkyWalking Cross Process Propagation Headers Protocol","url":"/docs/main/v9.6.0/en/api/x-process-propagation-headers-v3/"},{"content":"SkyWalking Cross Process Propagation Headers Protocol  Version 3.0  SkyWalking is more akin to an APM system, rather than a common distributed tracing system. SkyWalking\u0026rsquo;s headers are much more complex than those found in a common distributed tracing system. The reason behind their complexity is for better analysis performance of the OAP. You can find many similar mechanisms in other commercial APM systems (some of which are even more complex than ours!).\nAbstract The SkyWalking Cross Process Propagation Headers Protocol v3, also known as the sw8 protocol, is designed for context propagation.\nStandard Header Item The standard header is the minimal requirement for context propagation.\n Header Name: sw8. Header Value: 8 fields split by -. The length of header value must be less than 2k (default).  Example of the value format: XXXXX-XXXXX-XXXX-XXXX\nValues Values must include the following segments, and all string type values are in BASE64 encoding.\n Required:   Sample. 0 or 1. 0 means that the context exists, but it could (and most likely will) be ignored. 1 means this trace needs to be sampled and sent to the backend. Trace ID. String(BASE64 encoded). A literal string that is globally unique. Parent trace segment ID. String(BASE64 encoded). A literal string that is globally unique. Parent span ID. Must be an integer. It begins with 0. This span ID points to the parent span in parent trace segment. Parent service. String(BASE64 encoded). Its length should be no more than 50 UTF-8 characters. Parent service instance. String(BASE64 encoded). Its length should be no more than 50 UTF-8 characters. Parent endpoint. String(BASE64 encoded). The operation name of the first entry span in the parent segment. Its length should be less than 150 UTF-8 characters. Target address of this request used on the client end. String(BASE64 encoded). The network address (not necessarily IP + port) used on the client end to access this target service.   Sample values: 1-TRACEID-SEGMENTID-3-PARENT_SERVICE-PARENT_INSTANCE-PARENT_ENDPOINT-IPPORT  Extension Header Item The extension header item is designed for advanced features. It provides interaction capabilities between the agents deployed in upstream and downstream services.\n Header Name: sw8-x Header Value: Split by -. The fields are extendable.  Values The current value includes fields.\n Tracing Mode. Empty, 0, or 1. Empty or 0 is the default. 1 indicates that all spans generated in this context will skip analysis, spanObject#skipAnalysis=true. This context is propagated to upstream by default, unless it is changed in the tracing process. The timestamp of sending on the client end. This is used in async RPC, such as MQ. Once it is set, the consumer end would calculate the latency between sending and receiving, and tag the latency in the span by using key transmission.latency automatically.  ","title":"SkyWalking Cross Process Propagation Headers Protocol","url":"/docs/main/v9.7.0/en/api/x-process-propagation-headers-v3/"},{"content":"All SkyWalking exporter(metrics, trace, log) instructions had been moved here.\n","title":"SkyWalking exporter(metrics, trace, log) instructions had been moved [here](../exporter).","url":"/docs/main/latest/en/setup/backend/metrics-exporter/"},{"content":"All SkyWalking exporter(metrics, trace, log) instructions had been moved here.\n","title":"SkyWalking exporter(metrics, trace, log) instructions had been moved [here](../exporter).","url":"/docs/main/next/en/setup/backend/metrics-exporter/"},{"content":"All SkyWalking exporter(metrics, trace, log) instructions had been moved here.\n","title":"SkyWalking exporter(metrics, trace, log) instructions had been moved [here](../exporter).","url":"/docs/main/v9.3.0/en/setup/backend/metrics-exporter/"},{"content":"All SkyWalking exporter(metrics, trace, log) instructions had been moved here.\n","title":"SkyWalking exporter(metrics, trace, log) instructions had been moved [here](../exporter).","url":"/docs/main/v9.4.0/en/setup/backend/metrics-exporter/"},{"content":"All SkyWalking exporter(metrics, trace, log) instructions had been moved here.\n","title":"SkyWalking exporter(metrics, trace, log) instructions had been moved [here](../exporter).","url":"/docs/main/v9.5.0/en/setup/backend/metrics-exporter/"},{"content":"All SkyWalking exporter(metrics, trace, log) instructions had been moved here.\n","title":"SkyWalking exporter(metrics, trace, log) instructions had been moved [here](../exporter).","url":"/docs/main/v9.6.0/en/setup/backend/metrics-exporter/"},{"content":"All SkyWalking exporter(metrics, trace, log) instructions had been moved here.\n","title":"SkyWalking exporter(metrics, trace, log) instructions had been moved [here](../exporter).","url":"/docs/main/v9.7.0/en/setup/backend/metrics-exporter/"},{"content":"SkyWalking Go Agent This is the official documentation of SkyWalking Go agent. Welcome to the SkyWalking community!\nSkyWalking Go is an open-source Golang auto-instrument agent that provides support for distributed tracing across different frameworks within the Golang language.\nTo use SkyWalking Go, simply import the base dependencies into your code and take advantage of the -toolexec parameter in Golang to enable hybrid compilation capabilities for various frameworks in your application.\n","title":"SkyWalking Go Agent","url":"/docs/skywalking-go/latest/readme/"},{"content":"SkyWalking Go Agent This is the official documentation of SkyWalking Go agent. Welcome to the SkyWalking community!\nSkyWalking Go is an open-source Golang auto-instrument agent that provides support for distributed tracing across different frameworks within the Golang language.\nTo use SkyWalking Go, simply import the base dependencies into your code and take advantage of the -toolexec parameter in Golang to enable hybrid compilation capabilities for various frameworks in your application.\n","title":"SkyWalking Go Agent","url":"/docs/skywalking-go/next/readme/"},{"content":"SkyWalking Go Agent This is the official documentation of SkyWalking Go agent. Welcome to the SkyWalking community!\nSkyWalking Go is an open-source Golang auto-instrument agent that provides support for distributed tracing across different frameworks within the Golang language.\nTo use SkyWalking Go, simply import the base dependencies into your code and take advantage of the -toolexec parameter in Golang to enable hybrid compilation capabilities for various frameworks in your application.\n","title":"SkyWalking Go Agent","url":"/docs/skywalking-go/v0.4.0/readme/"},{"content":"SkyWalking Infra E2E Configuration Guide The configuration file is used to integrate all the step configuration content. You can see the sample configuration files for different environments in the examples directory.\nThere is a quick view about the configuration file, and using the yaml format.\nsetup:# set up the environmentcleanup:# clean up the environmenttrigger:# generate trafficverify:# test casesSetup Support two kinds of the environment to set up the system.\nKinD setup:env:kindfile:path/to/kind.yaml # Specified kinD manifest file pathkubeconfig:path/.kube/config # The path of kubeconfigtimeout:20m # timeout durationinit-system-environment:path/to/env # Import environment filesteps:# customize steps for prepare the environment- name:customize setups # step name# one of command line or kinD manifest filecommand:command lines # use command line to setup path:/path/to/manifest.yaml # the manifest file pathwait:# how to verify the manifest is set up finish- namespace:# The pod namespaceresource:# The pod resource namelabel-selector:# The resource label selectorfor:# The wait conditionkind:import-images:# import docker images to KinD- image:version # support using env to expand image, such as `${env_key}` or `$env_key`expose-ports:# Expose resource for host access- namespace:# The resource namespaceresource:# The resource name, such as `pod/foo` or `service/foo`port:# Want to expose port from resource NOTE: The fields file and kubeconfig are mutually exclusive.\n The KinD environment follow these steps:\n [optional]Start the KinD cluster according to the config file, expose KUBECONFIG to environment for help execute kubectl in the next steps. [optional]Setup the kubeconfig field for help execute kubectl in the next steps. Load docker images from kind.import-images if needed. Apply the resources files (--manifests) or/and run the custom init command (--commands) by steps. Wait until all steps are finished and all services are ready with the timeout(second). Expose all resource ports for host access.  Import docker image If you want to import docker image from private registries, there are several ways to do this:\n Using imagePullSecrets to pull images, please take reference from document. Using kind.import-images to load images from host. kind:import-images:- skywalking/oap:${OAP_HASH}# support using environment to expand the image name  Resource Export If you want to access the resource from host, should follow these steps:\n Declare which resource and ports need to be accessible from host. setup:kind:expose-ports:- namespace:default # Need to expose resource namespaceresource:pod/foo # Resource description, such as `pod/foo` or `service/foo`port:8080# Resource port want to expose, support `\u0026lt;resource_port\u0026gt;`, `\u0026lt;bind_to_host_port\u0026gt;:\u0026lt;resource_port\u0026gt;` Follow this format to get the host and port mapping by the environment, and it\u0026rsquo;s available in steps(trigger, verify). trigger:# trigger with specified mapped port, the resource name replace all `/` or `-` as `_`# host format: \u0026lt;resource_name\u0026gt;_host# port format: \u0026lt;resource_name\u0026gt;_\u0026lt;container_port\u0026gt;url:http://${pod_foo_host}:${pod_foo_8080}/  Log The console output of each pod could be found in ${workDir}/logs/${namespace}/${podName}.log.\nCompose setup:env:composefile:path/to/compose.yaml # Specified docker-compose file pathtimeout:20m # Timeout durationinit-system-environment:path/to/env # Import environment filesteps:# Customize steps for prepare the environment- name:customize setups # Step namecommand:command lines # Use command line to setup The docker-compose environment follow these steps:\n Import init-system-environment file for help build service and execute steps. Each line of the file content is an environment variable, and the key value is separate by \u0026ldquo;=\u0026rdquo;. Start the docker-compose services. Check the services' healthiness. Wait until all services are ready according to the interval, etc. Execute command to set up the testing environment or help verify.  Service Export If you want to get the service host and port mapping, should follow these steps:\n declare the port in the docker-compose service ports config. oap:image:xx.xx:1.0.0ports:# define the port- 8080 Follow this format to get the host and port mapping by the environment, and it\u0026rsquo;s available in steps(trigger, verify). trigger:# trigger with specified mappinged porturl:http://${oap_host}:${oap_8080}/  Log The console output of each service could be found in ${workDir}/logs/{serviceName}/std.log.\nTrigger After the Setup step is finished, use the Trigger step to generate traffic.\ntrigger:action:http # The action of the trigger. support HTTP invoke.interval:3s # Trigger the action every 3 seconds.times:5# The retry count before the request success.url:http://apache.skywalking.com/# Http trigger url link.method:GET # Http trigger method.headers:\u0026#34;Content-Type\u0026#34;: \u0026#34;application/json\u0026#34;\u0026#34;Authorization\u0026#34;: \u0026#34;Basic whatever\u0026#34;body:\u0026#39;{\u0026#34;k1\u0026#34;:\u0026#34;v1\u0026#34;, \u0026#34;k2\u0026#34;:\u0026#34;v2\u0026#34;}\u0026#39;The Trigger executed successfully at least once, after success, the next stage could be continued. Otherwise, there is an error and exit.\nVerify After the Trigger step is finished, running test cases.\nverify:retry:# verify with retry strategycount:10# max retry countinterval:10s # the interval between two attempts, e.g. 10s, 1m.fail-fast:true# when a case fails, whether to stop verifying other cases. This property defaults to true.concurrency:false# whether to verify cases concurrently. This property defaults to false.cases:# verify test cases- actual:path/to/actual.yaml # verify by actual file pathexpected:path/to/expected.yaml # excepted content file path- query:echo \u0026#39;foo\u0026#39; # verify by command execute outputexpected:path/to/expected.yaml # excepted content file path- includes:# including cases- path/to/cases.yaml # cases file pathThe test cases are executed in the order of declaration from top to bottom. When the execution of a case fails and the retry strategy is exceeded, it will stop verifying other cases if fail-fast is true. Otherwise, the process will continue to verify other cases.\nRetry strategy The retry strategy could retry automatically on the test case failure, and restart by the failed test case.\nCase source Support two kind source to verify, one case only supports one kind source type:\n source file: verify by generated yaml format file. command: use command line output as they need to verify content, also only support yaml format.  Excepted verify template After clarifying the content that needs to be verified, you need to write content to verify the real content and ensure that the data is correct.\nYou need to use the form of Go Template to write the verification file, and the data content to be rendered comes from the real data. By verifying whether the rendered data is consistent with the real data, it is verified whether the content is consistent. You could see many test cases in this directory.\nWe use go-cmp to show the parts where excepted do not match the actual data. - prefix represents the expected data content, + prefix represents the actual data content.\nWe have done a lot of extension functions for verification functions on the original Go Template.\nExtension functions Extension functions are used to help users quickly locate the problem content and write test cases that are easier to use.\nBasic Matches Verify that the number fits the range.\n   Function Description Grammar Verify success Verify failure     gt Verify the first param is greater than second param {{gt param1 param2}} param1 \u0026lt;wanted gt $param2, but was $param1\u0026gt;   ge Verify the first param is greater than or equals second param {{ge param1 param2}} param1 \u0026lt;wanted gt $param2, but was $param1\u0026gt;   lt Verify the first param is less than second param {{lt param1 param2}} param1 \u0026lt;wanted gt $param2, but was $param1\u0026gt;   le Verify the first param is less than or equals second param {{le param1 param2}} param1 \u0026lt;wanted gt $param2, but was $param1\u0026gt;   regexp Verify the first param matches the second regular expression {{regexp param1 param2}} param1 \u0026lt;\u0026quot;$param1\u0026quot; does not match the pattern $param2\u0026quot;\u0026gt;   notEmpty Verify The param is not empty {{notEmpty param}} param \u0026lt;\u0026quot;\u0026quot; is empty, wanted is not empty\u0026gt;   hasPrefix Verify The string param has the same prefix. {{hasPrefix param1 param2}} true false   hasSuffix Verify The string param has the same suffix. {{hasSuffix param1 param2}} true false    List Matches Verify the data in the condition list, Currently, it is only supported when all the conditions in the list are executed, it is considered as successful.\nHere is an example, It\u0026rsquo;s means the list values must have value is greater than 0, also have value greater than 1, Otherwise verify is failure.\n{{- contains .list }}- key:{{gt .value 0 }}- key:{{gt .value 1 }}{{- end }}Encoding In order to make the program easier for users to read and use, some code conversions are provided.\n   Function Description Grammar Result     b64enc Base64 encode {{ b64enc \u0026ldquo;Foo\u0026rdquo; }} Zm9v   sha256enc Sha256 encode {{ sha256enc \u0026ldquo;Foo\u0026rdquo; }} 1cbec737f863e4922cee63cc2ebbfaafcd1cff8b790d8cfd2e6a5d550b648afa   sha512enc Sha512 encode {{ sha512enc \u0026ldquo;Foo\u0026rdquo; }} 4abcd2639957cb23e33f63d70659b602a5923fafcfd2768ef79b0badea637e5c837161aa101a557a1d4deacbd912189e2bb11bf3c0c0c70ef7797217da7e8207    Reuse cases You could include multiple cases into one single E2E verify, It\u0026rsquo;s helpful for reusing the same verify cases.\nHere is the reused verify cases, and using includes configuration item to include this into E2E config.\ncases:- actual:path/to/actual.yaml # verify by actual file pathexpected:path/to/expected.yaml # excepted content file path- query:echo \u0026#39;foo\u0026#39; # verify by command execute outputexpected:path/to/expected.yaml # excepted content file pathCleanup After the E2E finished, how to clean up the environment.\ncleanup:on:always # Clean up strategyIf the on option under cleanup is not set, it will be automatically set to always if there is environment variable CI=true, which is present on many popular CI services, such as GitHub Actions, CircleCI, etc., otherwise it will be set to success, so the testing environment can be preserved when tests failed in your local machine.\nAll available strategies:\n always: No matter the execution result is success or failure, cleanup will be performed. success: Only when the execution succeeds. failure: Only when the execution failed. never: Never clean up the environment.  ","title":"SkyWalking Infra E2E Configuration Guide","url":"/docs/skywalking-infra-e2e/latest/en/setup/configuration-file/"},{"content":"SkyWalking Infra E2E Configuration Guide The configuration file is used to integrate all the step configuration content. You can see the sample configuration files for different environments in the examples directory.\nThere is a quick view about the configuration file, and using the yaml format.\nsetup:# set up the environmentcleanup:# clean up the environmenttrigger:# generate trafficverify:# test casesSetup Support two kinds of the environment to set up the system.\nKinD setup:env:kindfile:path/to/kind.yaml # Specified kinD manifest file pathkubeconfig:path/.kube/config # The path of kubeconfigtimeout:20m # timeout durationinit-system-environment:path/to/env # Import environment filesteps:# customize steps for prepare the environment- name:customize setups # step name# one of command line or kinD manifest filecommand:command lines # use command line to setup path:/path/to/manifest.yaml # the manifest file pathwait:# how to verify the manifest is set up finish- namespace:# The pod namespaceresource:# The pod resource namelabel-selector:# The resource label selectorfor:# The wait conditionkind:import-images:# import docker images to KinD- image:version # support using env to expand image, such as `${env_key}` or `$env_key`expose-ports:# Expose resource for host access- namespace:# The resource namespaceresource:# The resource name, such as `pod/foo` or `service/foo`port:# Want to expose port from resource NOTE: The fields file and kubeconfig are mutually exclusive.\n The KinD environment follow these steps:\n [optional]Start the KinD cluster according to the config file, expose KUBECONFIG to environment for help execute kubectl in the next steps. [optional]Setup the kubeconfig field for help execute kubectl in the next steps. Load docker images from kind.import-images if needed. Apply the resources files (--manifests) or/and run the custom init command (--commands) by steps. Wait until all steps are finished and all services are ready with the timeout(second). Expose all resource ports for host access.  Import docker image If you want to import docker image from private registries, there are several ways to do this:\n Using imagePullSecrets to pull images, please take reference from document. Using kind.import-images to load images from host. kind:import-images:- skywalking/oap:${OAP_HASH}# support using environment to expand the image name  Resource Export If you want to access the resource from host, should follow these steps:\n Declare which resource and ports need to be accessible from host. setup:kind:expose-ports:- namespace:default # Need to expose resource namespaceresource:pod/foo # Resource description, such as `pod/foo` or `service/foo`port:8080# Resource port want to expose, support `\u0026lt;resource_port\u0026gt;`, `\u0026lt;bind_to_host_port\u0026gt;:\u0026lt;resource_port\u0026gt;` Follow this format to get the host and port mapping by the environment, and it\u0026rsquo;s available in steps(trigger, verify). trigger:# trigger with specified mapped port, the resource name replace all `/` or `-` as `_`# host format: \u0026lt;resource_name\u0026gt;_host# port format: \u0026lt;resource_name\u0026gt;_\u0026lt;container_port\u0026gt;url:http://${pod_foo_host}:${pod_foo_8080}/  Log The console output of each pod could be found in ${workDir}/logs/${namespace}/${podName}.log.\nCompose setup:env:composefile:path/to/compose.yaml # Specified docker-compose file pathtimeout:20m # Timeout durationinit-system-environment:path/to/env # Import environment filesteps:# Customize steps for prepare the environment- name:customize setups # Step namecommand:command lines # Use command line to setup The docker-compose environment follow these steps:\n Import init-system-environment file for help build service and execute steps. Each line of the file content is an environment variable, and the key value is separate by \u0026ldquo;=\u0026rdquo;. Start the docker-compose services. Check the services' healthiness. Wait until all services are ready according to the interval, etc. Execute command to set up the testing environment or help verify.  Service Export If you want to get the service host and port mapping, should follow these steps:\n declare the port in the docker-compose service ports config. oap:image:xx.xx:1.0.0ports:# define the port- 8080 Follow this format to get the host and port mapping by the environment, and it\u0026rsquo;s available in steps(trigger, verify). trigger:# trigger with specified mappinged porturl:http://${oap_host}:${oap_8080}/  Log The console output of each service could be found in ${workDir}/logs/{serviceName}/std.log.\nTrigger After the Setup step is finished, use the Trigger step to generate traffic.\ntrigger:action:http # The action of the trigger. support HTTP invoke.interval:3s # Trigger the action every 3 seconds.times:5# The retry count before the request success.url:http://apache.skywalking.com/# Http trigger url link.method:GET # Http trigger method.headers:\u0026#34;Content-Type\u0026#34;: \u0026#34;application/json\u0026#34;\u0026#34;Authorization\u0026#34;: \u0026#34;Basic whatever\u0026#34;body:\u0026#39;{\u0026#34;k1\u0026#34;:\u0026#34;v1\u0026#34;, \u0026#34;k2\u0026#34;:\u0026#34;v2\u0026#34;}\u0026#39;The Trigger executed successfully at least once, after success, the next stage could be continued. Otherwise, there is an error and exit.\nVerify After the Trigger step is finished, running test cases.\nverify:retry:# verify with retry strategycount:10# max retry countinterval:10s # the interval between two attempts, e.g. 10s, 1m.fail-fast:true# when a case fails, whether to stop verifying other cases. This property defaults to true.concurrency:false# whether to verify cases concurrently. This property defaults to false.cases:# verify test cases- actual:path/to/actual.yaml # verify by actual file pathexpected:path/to/expected.yaml # excepted content file path- query:echo \u0026#39;foo\u0026#39; # verify by command execute outputexpected:path/to/expected.yaml # excepted content file path- includes:# including cases- path/to/cases.yaml # cases file pathThe test cases are executed in the order of declaration from top to bottom. When the execution of a case fails and the retry strategy is exceeded, it will stop verifying other cases if fail-fast is true. Otherwise, the process will continue to verify other cases.\nRetry strategy The retry strategy could retry automatically on the test case failure, and restart by the failed test case.\nCase source Support two kind source to verify, one case only supports one kind source type:\n source file: verify by generated yaml format file. command: use command line output as they need to verify content, also only support yaml format.  Excepted verify template After clarifying the content that needs to be verified, you need to write content to verify the real content and ensure that the data is correct.\nYou need to use the form of Go Template to write the verification file, and the data content to be rendered comes from the real data. By verifying whether the rendered data is consistent with the real data, it is verified whether the content is consistent. You could see many test cases in this directory.\nWe use go-cmp to show the parts where excepted do not match the actual data. - prefix represents the expected data content, + prefix represents the actual data content.\nWe have done a lot of extension functions for verification functions on the original Go Template.\nExtension functions Extension functions are used to help users quickly locate the problem content and write test cases that are easier to use.\nBasic Matches Verify that the number fits the range.\n   Function Description Grammar Verify success Verify failure     gt Verify the first param is greater than second param {{gt param1 param2}} param1 \u0026lt;wanted gt $param2, but was $param1\u0026gt;   ge Verify the first param is greater than or equals second param {{ge param1 param2}} param1 \u0026lt;wanted gt $param2, but was $param1\u0026gt;   lt Verify the first param is less than second param {{lt param1 param2}} param1 \u0026lt;wanted gt $param2, but was $param1\u0026gt;   le Verify the first param is less than or equals second param {{le param1 param2}} param1 \u0026lt;wanted gt $param2, but was $param1\u0026gt;   regexp Verify the first param matches the second regular expression {{regexp param1 param2}} param1 \u0026lt;\u0026quot;$param1\u0026quot; does not match the pattern $param2\u0026quot;\u0026gt;   notEmpty Verify The param is not empty {{notEmpty param}} param \u0026lt;\u0026quot;\u0026quot; is empty, wanted is not empty\u0026gt;   hasPrefix Verify The string param has the same prefix. {{hasPrefix param1 param2}} true false   hasSuffix Verify The string param has the same suffix. {{hasSuffix param1 param2}} true false    List Matches Verify the data in the condition list, Currently, it is only supported when all the conditions in the list are executed, it is considered as successful.\nHere is an example, It\u0026rsquo;s means the list values must have value is greater than 0, also have value greater than 1, Otherwise verify is failure.\n{{- contains .list }}- key:{{gt .value 0 }}- key:{{gt .value 1 }}{{- end }}Encoding In order to make the program easier for users to read and use, some code conversions are provided.\n   Function Description Grammar Result     b64enc Base64 encode {{ b64enc \u0026ldquo;Foo\u0026rdquo; }} Zm9v   sha256enc Sha256 encode {{ sha256enc \u0026ldquo;Foo\u0026rdquo; }} 1cbec737f863e4922cee63cc2ebbfaafcd1cff8b790d8cfd2e6a5d550b648afa   sha512enc Sha512 encode {{ sha512enc \u0026ldquo;Foo\u0026rdquo; }} 4abcd2639957cb23e33f63d70659b602a5923fafcfd2768ef79b0badea637e5c837161aa101a557a1d4deacbd912189e2bb11bf3c0c0c70ef7797217da7e8207    Reuse cases You could include multiple cases into one single E2E verify, It\u0026rsquo;s helpful for reusing the same verify cases.\nHere is the reused verify cases, and using includes configuration item to include this into E2E config.\ncases:- actual:path/to/actual.yaml # verify by actual file pathexpected:path/to/expected.yaml # excepted content file path- query:echo \u0026#39;foo\u0026#39; # verify by command execute outputexpected:path/to/expected.yaml # excepted content file pathCleanup After the E2E finished, how to clean up the environment.\ncleanup:on:always # Clean up strategyIf the on option under cleanup is not set, it will be automatically set to always if there is environment variable CI=true, which is present on many popular CI services, such as GitHub Actions, CircleCI, etc., otherwise it will be set to success, so the testing environment can be preserved when tests failed in your local machine.\nAll available strategies:\n always: No matter the execution result is success or failure, cleanup will be performed. success: Only when the execution succeeds. failure: Only when the execution failed. never: Never clean up the environment.  ","title":"SkyWalking Infra E2E Configuration Guide","url":"/docs/skywalking-infra-e2e/next/en/setup/configuration-file/"},{"content":"SkyWalking Infra E2E Configuration Guide The configuration file is used to integrate all the step configuration content. You can see the sample configuration files for different environments in the examples directory.\nThere is a quick view about the configuration file, and using the yaml format.\nsetup:# set up the environmentcleanup:# clean up the environmenttrigger:# generate trafficverify:# test casesSetup Support two kinds of the environment to set up the system.\nKinD setup:env:kindfile:path/to/kind.yaml # Specified kinD manifest file pathkubeconfig:path/.kube/config # The path of kubeconfigtimeout:20m # timeout durationinit-system-environment:path/to/env # Import environment filesteps:# customize steps for prepare the environment- name:customize setups # step name# one of command line or kinD manifest filecommand:command lines # use command line to setup path:/path/to/manifest.yaml # the manifest file pathwait:# how to verify the manifest is set up finish- namespace:# The pod namespaceresource:# The pod resource namelabel-selector:# The resource label selectorfor:# The wait conditionkind:import-images:# import docker images to KinD- image:version # support using env to expand image, such as `${env_key}` or `$env_key`expose-ports:# Expose resource for host access- namespace:# The resource namespaceresource:# The resource name, such as `pod/foo` or `service/foo`port:# Want to expose port from resource NOTE: The fields file and kubeconfig are mutually exclusive.\n The KinD environment follow these steps:\n [optional]Start the KinD cluster according to the config file, expose KUBECONFIG to environment for help execute kubectl in the next steps. [optional]Setup the kubeconfig field for help execute kubectl in the next steps. Load docker images from kind.import-images if needed. Apply the resources files (--manifests) or/and run the custom init command (--commands) by steps. Wait until all steps are finished and all services are ready with the timeout(second). Expose all resource ports for host access.  Import docker image If you want to import docker image from private registries, there are several ways to do this:\n Using imagePullSecrets to pull images, please take reference from document. Using kind.import-images to load images from host. kind:import-images:- skywalking/oap:${OAP_HASH}# support using environment to expand the image name  Resource Export If you want to access the resource from host, should follow these steps:\n Declare which resource and ports need to be accessible from host. setup:kind:expose-ports:- namespace:default # Need to expose resource namespaceresource:pod/foo # Resource description, such as `pod/foo` or `service/foo`port:8080# Resource port want to expose, support `\u0026lt;resource_port\u0026gt;`, `\u0026lt;bind_to_host_port\u0026gt;:\u0026lt;resource_port\u0026gt;` Follow this format to get the host and port mapping by the environment, and it\u0026rsquo;s available in steps(trigger, verify). trigger:# trigger with specified mapped port, the resource name replace all `/` or `-` as `_`# host format: \u0026lt;resource_name\u0026gt;_host# port format: \u0026lt;resource_name\u0026gt;_\u0026lt;container_port\u0026gt;url:http://${pod_foo_host}:${pod_foo_8080}/  Log The console output of each pod could be found in ${workDir}/logs/${namespace}/${podName}.log.\nCompose setup:env:composefile:path/to/compose.yaml # Specified docker-compose file pathtimeout:20m # Timeout durationinit-system-environment:path/to/env # Import environment filesteps:# Customize steps for prepare the environment- name:customize setups # Step namecommand:command lines # Use command line to setup The docker-compose environment follow these steps:\n Import init-system-environment file for help build service and execute steps. Each line of the file content is an environment variable, and the key value is separate by \u0026ldquo;=\u0026rdquo;. Start the docker-compose services. Check the services' healthiness. Wait until all services are ready according to the interval, etc. Execute command to set up the testing environment or help verify.  Service Export If you want to get the service host and port mapping, should follow these steps:\n declare the port in the docker-compose service ports config. oap:image:xx.xx:1.0.0ports:# define the port- 8080 Follow this format to get the host and port mapping by the environment, and it\u0026rsquo;s available in steps(trigger, verify). trigger:# trigger with specified mappinged porturl:http://${oap_host}:${oap_8080}/  Log The console output of each service could be found in ${workDir}/logs/{serviceName}/std.log.\nTrigger After the Setup step is finished, use the Trigger step to generate traffic.\ntrigger:action:http # The action of the trigger. support HTTP invoke.interval:3s # Trigger the action every 3 seconds.times:5# The retry count before the request success.url:http://apache.skywalking.com/# Http trigger url link.method:GET # Http trigger method.headers:\u0026#34;Content-Type\u0026#34;: \u0026#34;application/json\u0026#34;\u0026#34;Authorization\u0026#34;: \u0026#34;Basic whatever\u0026#34;body:\u0026#39;{\u0026#34;k1\u0026#34;:\u0026#34;v1\u0026#34;, \u0026#34;k2\u0026#34;:\u0026#34;v2\u0026#34;}\u0026#39;The Trigger executed successfully at least once, after success, the next stage could be continued. Otherwise, there is an error and exit.\nVerify After the Trigger step is finished, running test cases.\nverify:retry:# verify with retry strategycount:10# max retry countinterval:10s # the interval between two attempts, e.g. 10s, 1m.fail-fast:true# when a case fails, whether to stop verifying other cases. This property defaults to true.concurrency:false# whether to verify cases concurrently. This property defaults to false.cases:# verify test cases- actual:path/to/actual.yaml # verify by actual file pathexpected:path/to/expected.yaml # excepted content file path- query:echo \u0026#39;foo\u0026#39; # verify by command execute outputexpected:path/to/expected.yaml # excepted content file path- includes:# including cases- path/to/cases.yaml # cases file pathThe test cases are executed in the order of declaration from top to bottom. When the execution of a case fails and the retry strategy is exceeded, it will stop verifying other cases if fail-fast is true. Otherwise, the process will continue to verify other cases.\nRetry strategy The retry strategy could retry automatically on the test case failure, and restart by the failed test case.\nCase source Support two kind source to verify, one case only supports one kind source type:\n source file: verify by generated yaml format file. command: use command line output as they need to verify content, also only support yaml format.  Excepted verify template After clarifying the content that needs to be verified, you need to write content to verify the real content and ensure that the data is correct.\nYou need to use the form of Go Template to write the verification file, and the data content to be rendered comes from the real data. By verifying whether the rendered data is consistent with the real data, it is verified whether the content is consistent. You could see many test cases in this directory.\nWe use go-cmp to show the parts where excepted do not match the actual data. - prefix represents the expected data content, + prefix represents the actual data content.\nWe have done a lot of extension functions for verification functions on the original Go Template.\nExtension functions Extension functions are used to help users quickly locate the problem content and write test cases that are easier to use.\nBasic Matches Verify that the number fits the range.\n   Function Description Grammar Verify success Verify failure     gt Verify the first param is greater than second param {{gt param1 param2}} param1 \u0026lt;wanted gt $param2, but was $param1\u0026gt;   ge Verify the first param is greater than or equals second param {{ge param1 param2}} param1 \u0026lt;wanted gt $param2, but was $param1\u0026gt;   lt Verify the first param is less than second param {{lt param1 param2}} param1 \u0026lt;wanted gt $param2, but was $param1\u0026gt;   le Verify the first param is less than or equals second param {{le param1 param2}} param1 \u0026lt;wanted gt $param2, but was $param1\u0026gt;   regexp Verify the first param matches the second regular expression {{regexp param1 param2}} param1 \u0026lt;\u0026quot;$param1\u0026quot; does not match the pattern $param2\u0026quot;\u0026gt;   notEmpty Verify The param is not empty {{notEmpty param}} param \u0026lt;\u0026quot;\u0026quot; is empty, wanted is not empty\u0026gt;   hasPrefix Verify The string param has the same prefix. {{hasPrefix param1 param2}} true false   hasSuffix Verify The string param has the same suffix. {{hasSuffix param1 param2}} true false    List Matches Verify the data in the condition list, Currently, it is only supported when all the conditions in the list are executed, it is considered as successful.\nHere is an example, It\u0026rsquo;s means the list values must have value is greater than 0, also have value greater than 1, Otherwise verify is failure.\n{{- contains .list }}- key:{{gt .value 0 }}- key:{{gt .value 1 }}{{- end }}Encoding In order to make the program easier for users to read and use, some code conversions are provided.\n   Function Description Grammar Result     b64enc Base64 encode {{ b64enc \u0026ldquo;Foo\u0026rdquo; }} Zm9v   sha256enc Sha256 encode {{ sha256enc \u0026ldquo;Foo\u0026rdquo; }} 1cbec737f863e4922cee63cc2ebbfaafcd1cff8b790d8cfd2e6a5d550b648afa   sha512enc Sha512 encode {{ sha512enc \u0026ldquo;Foo\u0026rdquo; }} 4abcd2639957cb23e33f63d70659b602a5923fafcfd2768ef79b0badea637e5c837161aa101a557a1d4deacbd912189e2bb11bf3c0c0c70ef7797217da7e8207    Reuse cases You could include multiple cases into one single E2E verify, It\u0026rsquo;s helpful for reusing the same verify cases.\nHere is the reused verify cases, and using includes configuration item to include this into E2E config.\ncases:- actual:path/to/actual.yaml # verify by actual file pathexpected:path/to/expected.yaml # excepted content file path- query:echo \u0026#39;foo\u0026#39; # verify by command execute outputexpected:path/to/expected.yaml # excepted content file pathCleanup After the E2E finished, how to clean up the environment.\ncleanup:on:always # Clean up strategyIf the on option under cleanup is not set, it will be automatically set to always if there is environment variable CI=true, which is present on many popular CI services, such as GitHub Actions, CircleCI, etc., otherwise it will be set to success, so the testing environment can be preserved when tests failed in your local machine.\nAll available strategies:\n always: No matter the execution result is success or failure, cleanup will be performed. success: Only when the execution succeeds. failure: Only when the execution failed. never: Never clean up the environment.  ","title":"SkyWalking Infra E2E Configuration Guide","url":"/docs/skywalking-infra-e2e/v1.3.0/en/setup/configuration-file/"},{"content":"SkyWalking Infra E2E Execute Guide There are two ways to perform E2E Testing:\n Command: Suitable for local debugging and operation. GitHub Action: Suitable for automated execution in GitHub projects.  Command Through commands, you can execute a complete Controller.\n# e2e.yaml configuration file in current directory e2e run # or  # Specified the e2e.yaml file path e2e run -c /path/to/the/test/e2e.yaml Also, could run the separate step in the command line, these commands are all done by reading the configuration.\ne2e setup e2e trigger e2e verify e2e cleanup GitHub Action To use skywalking-infra-e2e in GitHub Actions, add a step in your GitHub workflow.\nThe working directory could be uploaded to GitHub Action Artifact after the task is completed, which contains environment variables and container logs in the environment.\n- name:Run E2E Testuses:apache/skywalking-infra-e2e@main # always prefer to use a revision instead of `main`.with:e2e-file:e2e.yaml # (required)need to run E2E file pathlog-dir:/path/to/log/dir # (Optional)Use `\u0026lt;work_dir\u0026gt;/logs/\u0026lt;job_name\u0026gt;_\u0026lt;matrix_value\u0026gt;`(if have GHA matrix) or `\u0026lt;work_dir\u0026gt;/logs/\u0026lt;job_name\u0026gt;` in GHA, and output logs into `\u0026lt;work_dir\u0026gt;/logs` out of GHA env, such as running locally.If you want to upload the log directory to the GitHub Action Artifact when this E2E test failure, you could define the below content in your GitHub Action Job.\n- name:Upload E2E Loguses:actions/upload-artifact@v2if:${{ failure() }} # Only upload the artifact when E2E testing failurewith:name:e2e-logpath:\u0026#34;${{ env.SW_INFRA_E2E_LOG_DIR }}\u0026#34;# The SkyWalking Infra E2E action sets SW_INFRA_E2E_LOG_DIR automatically. ","title":"SkyWalking Infra E2E Execute Guide","url":"/docs/skywalking-infra-e2e/latest/en/setup/run-e2e-tests/"},{"content":"SkyWalking Infra E2E Execute Guide There are two ways to perform E2E Testing:\n Command: Suitable for local debugging and operation. GitHub Action: Suitable for automated execution in GitHub projects.  Command Through commands, you can execute a complete Controller.\n# e2e.yaml configuration file in current directory e2e run # or  # Specified the e2e.yaml file path e2e run -c /path/to/the/test/e2e.yaml Also, could run the separate step in the command line, these commands are all done by reading the configuration.\ne2e setup e2e trigger e2e verify e2e cleanup GitHub Action To use skywalking-infra-e2e in GitHub Actions, add a step in your GitHub workflow.\nThe working directory could be uploaded to GitHub Action Artifact after the task is completed, which contains environment variables and container logs in the environment.\n- name:Run E2E Testuses:apache/skywalking-infra-e2e@main # always prefer to use a revision instead of `main`.with:e2e-file:e2e.yaml # (required)need to run E2E file pathlog-dir:/path/to/log/dir # (Optional)Use `\u0026lt;work_dir\u0026gt;/logs/\u0026lt;job_name\u0026gt;_\u0026lt;matrix_value\u0026gt;`(if have GHA matrix) or `\u0026lt;work_dir\u0026gt;/logs/\u0026lt;job_name\u0026gt;` in GHA, and output logs into `\u0026lt;work_dir\u0026gt;/logs` out of GHA env, such as running locally.If you want to upload the log directory to the GitHub Action Artifact when this E2E test failure, you could define the below content in your GitHub Action Job.\n- name:Upload E2E Loguses:actions/upload-artifact@v2if:${{ failure() }} # Only upload the artifact when E2E testing failurewith:name:e2e-logpath:\u0026#34;${{ env.SW_INFRA_E2E_LOG_DIR }}\u0026#34;# The SkyWalking Infra E2E action sets SW_INFRA_E2E_LOG_DIR automatically. ","title":"SkyWalking Infra E2E Execute Guide","url":"/docs/skywalking-infra-e2e/next/en/setup/run-e2e-tests/"},{"content":"SkyWalking Infra E2E Execute Guide There are two ways to perform E2E Testing:\n Command: Suitable for local debugging and operation. GitHub Action: Suitable for automated execution in GitHub projects.  Command Through commands, you can execute a complete Controller.\n# e2e.yaml configuration file in current directory e2e run # or  # Specified the e2e.yaml file path e2e run -c /path/to/the/test/e2e.yaml Also, could run the separate step in the command line, these commands are all done by reading the configuration.\ne2e setup e2e trigger e2e verify e2e cleanup GitHub Action To use skywalking-infra-e2e in GitHub Actions, add a step in your GitHub workflow.\nThe working directory could be uploaded to GitHub Action Artifact after the task is completed, which contains environment variables and container logs in the environment.\n- name:Run E2E Testuses:apache/skywalking-infra-e2e@main # always prefer to use a revision instead of `main`.with:e2e-file:e2e.yaml # (required)need to run E2E file pathlog-dir:/path/to/log/dir # (Optional)Use `\u0026lt;work_dir\u0026gt;/logs/\u0026lt;job_name\u0026gt;_\u0026lt;matrix_value\u0026gt;`(if have GHA matrix) or `\u0026lt;work_dir\u0026gt;/logs/\u0026lt;job_name\u0026gt;` in GHA, and output logs into `\u0026lt;work_dir\u0026gt;/logs` out of GHA env, such as running locally.If you want to upload the log directory to the GitHub Action Artifact when this E2E test failure, you could define the below content in your GitHub Action Job.\n- name:Upload E2E Loguses:actions/upload-artifact@v2if:${{ failure() }} # Only upload the artifact when E2E testing failurewith:name:e2e-logpath:\u0026#34;${{ env.SW_INFRA_E2E_LOG_DIR }}\u0026#34;# The SkyWalking Infra E2E action sets SW_INFRA_E2E_LOG_DIR automatically. ","title":"SkyWalking Infra E2E Execute Guide","url":"/docs/skywalking-infra-e2e/v1.3.0/en/setup/run-e2e-tests/"},{"content":"SkyWalking Java Agent This is the official documentation of SkyWalking Java agent. Welcome to the SkyWalking community!\nThe Java Agent for Apache SkyWalking, which provides the native tracing/metrics/logging/event abilities for Java projects.\nIn here, you could learn how to set up Java agent for the Java Runtime Envrionment services.\n","title":"SkyWalking Java Agent","url":"/docs/skywalking-java/latest/readme/"},{"content":"SkyWalking Java Agent This is the official documentation of SkyWalking Java agent. Welcome to the SkyWalking community!\nThe Java Agent for Apache SkyWalking, which provides the native tracing/metrics/logging/event abilities for Java projects.\nIn here, you could learn how to set up Java agent for the Java Runtime Envrionment services.\n","title":"SkyWalking Java Agent","url":"/docs/skywalking-java/next/readme/"},{"content":"SkyWalking Java Agent This is the official documentation of SkyWalking Java agent. Welcome to the SkyWalking community!\nThe Java Agent for Apache SkyWalking, which provides the native tracing/metrics/logging/event abilities for Java projects.\nIn here, you could learn how to set up Java agent for the Java Runtime Envrionment services.\n","title":"SkyWalking Java Agent","url":"/docs/skywalking-java/v9.0.0/readme/"},{"content":"SkyWalking Java Agent This is the official documentation of SkyWalking Java agent. Welcome to the SkyWalking community!\nThe Java Agent for Apache SkyWalking, which provides the native tracing/metrics/logging/event abilities for Java projects.\nIn here, you could learn how to set up Java agent for the Java Runtime Envrionment services.\n","title":"SkyWalking Java Agent","url":"/docs/skywalking-java/v9.1.0/readme/"},{"content":"SkyWalking Java Agent This is the official documentation of SkyWalking Java agent. Welcome to the SkyWalking community!\nThe Java Agent for Apache SkyWalking, which provides the native tracing/metrics/logging/event abilities for Java projects.\nIn here, you could learn how to set up Java agent for the Java Runtime Envrionment services.\n","title":"SkyWalking Java Agent","url":"/docs/skywalking-java/v9.2.0/readme/"},{"content":"Apache SkyWalking Java Agent Release Guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking in The Apache Way and start the voting process by reading this document.\nSet up your development environment Follow the steps in the Apache maven deployment environment document to set gpg tool and encrypt passwords.\nUse the following block as a template and place it in ~/.m2/settings.xml.\n\u0026lt;settings\u0026gt; ... \u0026lt;servers\u0026gt; \u0026lt;!-- To publish a snapshot of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.snapshots.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; \u0026lt;!-- To stage a release of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.releases.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; ... \u0026lt;/servers\u0026gt; \u0026lt;/settings\u0026gt; Add your GPG public key  Add your GPG public key into the SkyWalking GPG KEYS file. If you are a committer, use your Apache ID and password to log in this svn, and update the file. Don\u0026rsquo;t override the existing file. Upload your GPG public key to the public GPG site, such as MIT\u0026rsquo;s site. This site should be in the Apache maven staging repository checklist.  Test your settings This step is only for testing purpose. If your env is correctly set, you don\u0026rsquo;t need to check every time.\n./mvnw clean install(this will build artifacts, sources and sign) Prepare for the release ./mvnw release:clean ./mvnw release:prepare -DautoVersionSubmodules=true -Pall  Set version number as x.y.z, and tag as vx.y.z (The version tag must start with v. You will find out why this is necessary in the next step.)  You could do a GPG signature before preparing for the release. If you need to input the password to sign, and the maven doesn\u0026rsquo;t provide you with the opportunity to do so, this may lead to failure of the release. To resolve this, you may run gpg --sign xxx in any file. This will allow it to remember the password for long enough to prepare for the release.\nStage the release ./mvnw release:perform -DskipTests -Pall  The release will be automatically inserted into a temporary staging repository.  Build and sign the source code and binary package export RELEASE_VERSION=x.y.z (example: RELEASE_VERSION=5.0.0-alpha) cd tools/releasing bash create_release.sh This script takes care of the following things:\n Use v + RELEASE_VERSION as tag to clone the codes. Complete git submodule init/update. Exclude all unnecessary files in the target source tar, such as .git, .github, and .gitmodules. See the script for more details. Execute gpg and shasum 512 for source code tar. Use maven package to build the agent tar. Execute gpg and shasum 512 for binary tar.  apache-skywalking-java-agent-x.y.z-src.tgz and files ending with .asc and .sha512 may be found in the tools/releasing folder. apache-skywalking-java-agent-x.y.z.tgz and files ending with .asc and .sha512 may be found in the tools/releasing/apache-skywalking-java-agent-x.y.z folder.\nUpload to Apache svn  Use your Apache ID to log in to https://dist.apache.org/repos/dist/dev/skywalking/java-agent/. Create a folder and name it by the release version and round, such as: x.y.z Upload the source code package to the folder with files ending with .asc and .sha512. Upload the distribution package to the folder with files ending with .asc and .sha512.  Make the internal announcements Send an announcement mail in dev mail list.\nMail title: [ANNOUNCE] SkyWalking Java Agent x.y.z test build available Mail content: The test build of Java Agent x.y.z is available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking-java/blob/master/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/java-agent/xxxx * sha512 checksums Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking-java/ Release Tag : * (Git Tag) x.y.z Release CommitID : * https://github.com/apache/skywalking-java/tree/(Git Commit ID) * Git submodule * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : \u0026gt; ./mvnw clean package A vote regarding the quality of this test build will be initiated within the next couple of days. Wait for at least 48 hours for test responses Any PMC member, committer or contributor can test the release features and provide feedback. Based on that, the PMC will decide whether to start the voting process.\nCall a vote in dev Call a vote in dev@skywalking.apache.org\nMail title: [VOTE] Release Apache SkyWalking Java Agent version x.y.z Mail content: Hi All, This is a call for vote to release Apache SkyWalking Java Agent version x.y.z. Release notes: * https://github.com/apache/skywalking-java/blob/master/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/java-agent/xxxx * sha512 checksums Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) x.y.z Release CommitID : * https://github.com/apache/skywalking-java/tree/(Git Commit ID) * Git submodule * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : \u0026gt; ./mvnw clean package Voting will start now (xxxx date) and will remain open for at least 72 hours, Request all PMC members to give their vote. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Vote Check All PMC members and committers should check these before casting +1 votes.\n Features test. All artifacts in staging repository are published with .asc, .md5, and *sha1 files. Source code and distribution package (apache-skywalking-java-agent-x.y.z-src.tar.gz, apache-skywalking-java-agent-x.y.z.tar.gz) are found in https://dist.apache.org/repos/dist/dev/skywalking/java-agent/x.y.z with .asc and .sha512. LICENSE and NOTICE are in the source code and distribution package. Check shasum -c apache-skywalking-java-agent-x.y.z-src.tgz.sha512. Check gpg --verify apache-skywalking-java-agent-x.y.z-src.tgz.asc apache-skywalking-apm-x.y.z-src.tgz Build a distribution package from the source code package (apache-skywalking-java-agent-x.y.z-src.tar.gz). Check the Apache License Header. Run docker run --rm -v $(pwd):/github/workspace apache/skywalking-eyes header check. (No binaries in source codes)  The voting process is as follows:\n All PMC member votes are +1 binding, and all other votes are +1 but non-binding. If you obtain at least 3 (+1 binding) votes with more +1 than -1 votes within 72 hours, the release will be approved.  Publish the release  Move source codes tar and distribution packages to https://dist.apache.org/repos/dist/release/skywalking/java-agent/.  \u0026gt; export SVN_EDITOR=vim \u0026gt; svn mv https://dist.apache.org/repos/dist/dev/skywalking/java-agent/x.y.z https://dist.apache.org/repos/dist/release/skywalking/java-agent .... enter your apache password .... Release in the nexus staging repo. Public download source and distribution tar/zip are located in http://www.apache.org/dyn/closer.cgi/skywalking/java-agent/x.y.z/xxx. The Apache mirror path is the only release information that we publish. Public asc and sha512 are located in https://www.apache.org/dist/skywalking/java-agent/x.y.z/xxx. Public KEYS point to https://www.apache.org/dist/skywalking/KEYS. Update the website download page. http://skywalking.apache.org/downloads/ . Add a new download source, distribution, sha512, asc, and document links. The links can be found following rules (3) to (6) above. Add a release event on the website homepage and event page. Announce the public release with changelog or key features. Send ANNOUNCE email to dev@skywalking.apache.org, announce@apache.org. The sender should use the Apache email account.  Mail title: [ANNOUNCE] Apache SkyWalking Java Agent x.y.z released Mail content: Hi all, Apache SkyWalking Team is glad to announce the first release of Apache SkyWalking Java Agent x.y.z. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. The Java Agent for Apache SkyWalking, which provides the native tracing/metrics/logging abilities for Java projects. This release contains a number of new features, bug fixes and improvements compared to version a.b.c(last release). The notable changes since x.y.z include: (Highlight key changes) 1. ... 2. ... 3. ... Please refer to the change log for the complete list of changes: https://github.com/apache/skywalking-java/blob/master/changes/changes-x.y.z.md Apache SkyWalking website: http://skywalking.apache.org/ Downloads: http://skywalking.apache.org/downloads/ Twitter: https://twitter.com/AsfSkyWalking SkyWalking Resources: - GitHub: https://github.com/apache/skywalking-java - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Apache SkyWalking Team Release Docker images export SW_VERSION=x.y.z git clone --depth 1 --branch v$SW_VERSION https://github.com/apache/skywalking-java.git cd skywalking-java curl -O https://dist.apache.org/repos/dist/release/skywalking/java-agent/$SW_VERSION/apache-skywalking-java-agent-$SW_VERSION.tgz tar -xzvf apache-skywalking-java-agent-$SW_VERSION.tgz export NAME=skywalking-java-agent export HUB=apache export TAG=$SW_VERSION make docker.push.alpine docker.push.java8 docker.push.java11 docker.push.java17 docker.push.java21 Clean up the old releases Once the latest release has been published, you should clean up the old releases from the mirror system.\n Update the download links (source, dist, asc, and sha512) on the website to the archive repo (https://archive.apache.org/dist/skywalking). Remove previous releases from https://dist.apache.org/repos/dist/release/skywalking/java-agent.  ","title":"SkyWalking Java Agent Release Guide","url":"/docs/skywalking-java/latest/en/contribution/release-java-agent/"},{"content":"Apache SkyWalking Java Agent Release Guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking in The Apache Way and start the voting process by reading this document.\nSet up your development environment Follow the steps in the Apache maven deployment environment document to set gpg tool and encrypt passwords.\nUse the following block as a template and place it in ~/.m2/settings.xml.\n\u0026lt;settings\u0026gt; ... \u0026lt;servers\u0026gt; \u0026lt;!-- To publish a snapshot of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.snapshots.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; \u0026lt;!-- To stage a release of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.releases.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; ... \u0026lt;/servers\u0026gt; \u0026lt;/settings\u0026gt; Add your GPG public key  Add your GPG public key into the SkyWalking GPG KEYS file. If you are a committer, use your Apache ID and password to log in this svn, and update the file. Don\u0026rsquo;t override the existing file. Upload your GPG public key to the public GPG site, such as MIT\u0026rsquo;s site. This site should be in the Apache maven staging repository checklist.  Test your settings This step is only for testing purpose. If your env is correctly set, you don\u0026rsquo;t need to check every time.\n./mvnw clean install(this will build artifacts, sources and sign) Prepare for the release ./mvnw release:clean ./mvnw release:prepare -DautoVersionSubmodules=true -Pall  Set version number as x.y.z, and tag as vx.y.z (The version tag must start with v. You will find out why this is necessary in the next step.)  You could do a GPG signature before preparing for the release. If you need to input the password to sign, and the maven doesn\u0026rsquo;t provide you with the opportunity to do so, this may lead to failure of the release. To resolve this, you may run gpg --sign xxx in any file. This will allow it to remember the password for long enough to prepare for the release.\nStage the release ./mvnw release:perform -DskipTests -Pall  The release will be automatically inserted into a temporary staging repository.  Build and sign the source code and binary package export RELEASE_VERSION=x.y.z (example: RELEASE_VERSION=5.0.0-alpha) cd tools/releasing bash create_release.sh This script takes care of the following things:\n Use v + RELEASE_VERSION as tag to clone the codes. Complete git submodule init/update. Exclude all unnecessary files in the target source tar, such as .git, .github, and .gitmodules. See the script for more details. Execute gpg and shasum 512 for source code tar. Use maven package to build the agent tar. Execute gpg and shasum 512 for binary tar.  apache-skywalking-java-agent-x.y.z-src.tgz and files ending with .asc and .sha512 may be found in the tools/releasing folder. apache-skywalking-java-agent-x.y.z.tgz and files ending with .asc and .sha512 may be found in the tools/releasing/apache-skywalking-java-agent-x.y.z folder.\nUpload to Apache svn  Use your Apache ID to log in to https://dist.apache.org/repos/dist/dev/skywalking/java-agent/. Create a folder and name it by the release version and round, such as: x.y.z Upload the source code package to the folder with files ending with .asc and .sha512. Upload the distribution package to the folder with files ending with .asc and .sha512.  Make the internal announcements Send an announcement mail in dev mail list.\nMail title: [ANNOUNCE] SkyWalking Java Agent x.y.z test build available Mail content: The test build of Java Agent x.y.z is available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking-java/blob/master/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/java-agent/xxxx * sha512 checksums Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking-java/ Release Tag : * (Git Tag) x.y.z Release CommitID : * https://github.com/apache/skywalking-java/tree/(Git Commit ID) * Git submodule * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : \u0026gt; ./mvnw clean package A vote regarding the quality of this test build will be initiated within the next couple of days. Wait for at least 48 hours for test responses Any PMC member, committer or contributor can test the release features and provide feedback. Based on that, the PMC will decide whether to start the voting process.\nCall a vote in dev Call a vote in dev@skywalking.apache.org\nMail title: [VOTE] Release Apache SkyWalking Java Agent version x.y.z Mail content: Hi All, This is a call for vote to release Apache SkyWalking Java Agent version x.y.z. Release notes: * https://github.com/apache/skywalking-java/blob/master/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/java-agent/xxxx * sha512 checksums Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) x.y.z Release CommitID : * https://github.com/apache/skywalking-java/tree/(Git Commit ID) * Git submodule * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : \u0026gt; ./mvnw clean package Voting will start now (xxxx date) and will remain open for at least 72 hours, Request all PMC members to give their vote. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Vote Check All PMC members and committers should check these before casting +1 votes.\n Features test. All artifacts in staging repository are published with .asc, .md5, and *sha1 files. Source code and distribution package (apache-skywalking-java-agent-x.y.z-src.tar.gz, apache-skywalking-java-agent-x.y.z.tar.gz) are found in https://dist.apache.org/repos/dist/dev/skywalking/java-agent/x.y.z with .asc and .sha512. LICENSE and NOTICE are in the source code and distribution package. Check shasum -c apache-skywalking-java-agent-x.y.z-src.tgz.sha512. Check gpg --verify apache-skywalking-java-agent-x.y.z-src.tgz.asc apache-skywalking-apm-x.y.z-src.tgz Build a distribution package from the source code package (apache-skywalking-java-agent-x.y.z-src.tar.gz). Check the Apache License Header. Run docker run --rm -v $(pwd):/github/workspace apache/skywalking-eyes header check. (No binaries in source codes)  The voting process is as follows:\n All PMC member votes are +1 binding, and all other votes are +1 but non-binding. If you obtain at least 3 (+1 binding) votes with more +1 than -1 votes within 72 hours, the release will be approved.  Publish the release  Move source codes tar and distribution packages to https://dist.apache.org/repos/dist/release/skywalking/java-agent/.  \u0026gt; export SVN_EDITOR=vim \u0026gt; svn mv https://dist.apache.org/repos/dist/dev/skywalking/java-agent/x.y.z https://dist.apache.org/repos/dist/release/skywalking/java-agent .... enter your apache password .... Release in the nexus staging repo. Public download source and distribution tar/zip are located in http://www.apache.org/dyn/closer.cgi/skywalking/java-agent/x.y.z/xxx. The Apache mirror path is the only release information that we publish. Public asc and sha512 are located in https://www.apache.org/dist/skywalking/java-agent/x.y.z/xxx. Public KEYS point to https://www.apache.org/dist/skywalking/KEYS. Update the website download page. http://skywalking.apache.org/downloads/ . Add a new download source, distribution, sha512, asc, and document links. The links can be found following rules (3) to (6) above. Add a release event on the website homepage and event page. Announce the public release with changelog or key features. Send ANNOUNCE email to dev@skywalking.apache.org, announce@apache.org. The sender should use the Apache email account.  Mail title: [ANNOUNCE] Apache SkyWalking Java Agent x.y.z released Mail content: Hi all, Apache SkyWalking Team is glad to announce the first release of Apache SkyWalking Java Agent x.y.z. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. The Java Agent for Apache SkyWalking, which provides the native tracing/metrics/logging abilities for Java projects. This release contains a number of new features, bug fixes and improvements compared to version a.b.c(last release). The notable changes since x.y.z include: (Highlight key changes) 1. ... 2. ... 3. ... Please refer to the change log for the complete list of changes: https://github.com/apache/skywalking-java/blob/master/changes/changes-x.y.z.md Apache SkyWalking website: http://skywalking.apache.org/ Downloads: http://skywalking.apache.org/downloads/ Twitter: https://twitter.com/AsfSkyWalking SkyWalking Resources: - GitHub: https://github.com/apache/skywalking-java - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Apache SkyWalking Team Release Docker images export SW_VERSION=x.y.z git clone --depth 1 --branch v$SW_VERSION https://github.com/apache/skywalking-java.git cd skywalking-java curl -O https://dist.apache.org/repos/dist/release/skywalking/java-agent/$SW_VERSION/apache-skywalking-java-agent-$SW_VERSION.tgz tar -xzvf apache-skywalking-java-agent-$SW_VERSION.tgz export NAME=skywalking-java-agent export HUB=apache export TAG=$SW_VERSION make docker.push.alpine docker.push.java8 docker.push.java11 docker.push.java17 docker.push.java21 Clean up the old releases Once the latest release has been published, you should clean up the old releases from the mirror system.\n Update the download links (source, dist, asc, and sha512) on the website to the archive repo (https://archive.apache.org/dist/skywalking). Remove previous releases from https://dist.apache.org/repos/dist/release/skywalking/java-agent.  ","title":"SkyWalking Java Agent Release Guide","url":"/docs/skywalking-java/next/en/contribution/release-java-agent/"},{"content":"Apache SkyWalking Java Agent Release Guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking in The Apache Way and start the voting process by reading this document.\nSet up your development environment Follow the steps in the Apache maven deployment environment document to set gpg tool and encrypt passwords.\nUse the following block as a template and place it in ~/.m2/settings.xml.\n\u0026lt;settings\u0026gt; ... \u0026lt;servers\u0026gt; \u0026lt;!-- To publish a snapshot of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.snapshots.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; \u0026lt;!-- To stage a release of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.releases.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; ... \u0026lt;/servers\u0026gt; \u0026lt;/settings\u0026gt; Add your GPG public key  Add your GPG public key into the SkyWalking GPG KEYS file. If you are a committer, use your Apache ID and password to log in this svn, and update the file. Don\u0026rsquo;t override the existing file. Upload your GPG public key to the public GPG site, such as MIT\u0026rsquo;s site. This site should be in the Apache maven staging repository checklist.  Test your settings This step is only for testing purpose. If your env is correctly set, you don\u0026rsquo;t need to check every time.\n./mvnw clean install(this will build artifacts, sources and sign) Prepare for the release ./mvnw release:clean ./mvnw release:prepare -DautoVersionSubmodules=true -Pall  Set version number as x.y.z, and tag as vx.y.z (The version tag must start with v. You will find out why this is necessary in the next step.)  You could do a GPG signature before preparing for the release. If you need to input the password to sign, and the maven doesn\u0026rsquo;t provide you with the opportunity to do so, this may lead to failure of the release. To resolve this, you may run gpg --sign xxx in any file. This will allow it to remember the password for long enough to prepare for the release.\nStage the release ./mvnw release:perform -DskipTests -Pall  The release will be automatically inserted into a temporary staging repository.  Build and sign the source code and binary package export RELEASE_VERSION=x.y.z (example: RELEASE_VERSION=5.0.0-alpha) cd tools/releasing bash create_release.sh This script takes care of the following things:\n Use v + RELEASE_VERSION as tag to clone the codes. Complete git submodule init/update. Exclude all unnecessary files in the target source tar, such as .git, .github, and .gitmodules. See the script for more details. Execute gpg and shasum 512 for source code tar. Use maven package to build the agent tar. Execute gpg and shasum 512 for binary tar.  apache-skywalking-java-agent-x.y.z-src.tgz and files ending with .asc and .sha512 may be found in the tools/releasing folder. apache-skywalking-java-agent-x.y.z.tgz and files ending with .asc and .sha512 may be found in the tools/releasing/apache-skywalking-java-agent-x.y.z folder.\nUpload to Apache svn  Use your Apache ID to log in to https://dist.apache.org/repos/dist/dev/skywalking/java-agent/. Create a folder and name it by the release version and round, such as: x.y.z Upload the source code package to the folder with files ending with .asc and .sha512. Upload the distribution package to the folder with files ending with .asc and .sha512.  Make the internal announcements Send an announcement mail in dev mail list.\nMail title: [ANNOUNCE] SkyWalking Java Agent x.y.z test build available Mail content: The test build of Java Agent x.y.z is available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking-java/blob/master/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/java-agent/xxxx * sha512 checksums Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking-java/ Release Tag : * (Git Tag) x.y.z Release CommitID : * https://github.com/apache/skywalking-java/tree/(Git Commit ID) * Git submodule * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : \u0026gt; ./mvnw clean package A vote regarding the quality of this test build will be initiated within the next couple of days. Wait for at least 48 hours for test responses Any PMC member, committer or contributor can test the release features and provide feedback. Based on that, the PMC will decide whether to start the voting process.\nCall a vote in dev Call a vote in dev@skywalking.apache.org\nMail title: [VOTE] Release Apache SkyWalking Java Agent version x.y.z Mail content: Hi All, This is a call for vote to release Apache SkyWalking Java Agent version x.y.z. Release notes: * https://github.com/apache/skywalking-java/blob/master/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/java-agent/xxxx * sha512 checksums Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) x.y.z Release CommitID : * https://github.com/apache/skywalking-java/tree/(Git Commit ID) * Git submodule * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : \u0026gt; ./mvnw clean package Voting will start now (xxxx date) and will remain open for at least 72 hours, Request all PMC members to give their vote. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Vote Check All PMC members and committers should check these before casting +1 votes.\n Features test. All artifacts in staging repository are published with .asc, .md5, and *sha1 files. Source code and distribution package (apache-skywalking-java-agent-x.y.z-src.tar.gz, apache-skywalking-java-agent-x.y.z.tar.gz) are found in https://dist.apache.org/repos/dist/dev/skywalking/java-agent/x.y.z with .asc and .sha512. LICENSE and NOTICE are in the source code and distribution package. Check shasum -c apache-skywalking-java-agent-x.y.z-src.tgz.sha512. Check gpg --verify apache-skywalking-java-agent-x.y.z-src.tgz.asc apache-skywalking-apm-x.y.z-src.tgz Build a distribution package from the source code package (apache-skywalking-java-agent-x.y.z-src.tar.gz). Check the Apache License Header. Run docker run --rm -v $(pwd):/github/workspace apache/skywalking-eyes header check. (No binaries in source codes)  The voting process is as follows:\n All PMC member votes are +1 binding, and all other votes are +1 but non-binding. If you obtain at least 3 (+1 binding) votes with more +1 than -1 votes within 72 hours, the release will be approved.  Publish the release  Move source codes tar and distribution packages to https://dist.apache.org/repos/dist/release/skywalking/java-agent/.  \u0026gt; export SVN_EDITOR=vim \u0026gt; svn mv https://dist.apache.org/repos/dist/dev/skywalking/java-agent/x.y.z https://dist.apache.org/repos/dist/release/skywalking/java-agent .... enter your apache password .... Release in the nexus staging repo. Public download source and distribution tar/zip are located in http://www.apache.org/dyn/closer.cgi/skywalking/java-agent/x.y.z/xxx. The Apache mirror path is the only release information that we publish. Public asc and sha512 are located in https://www.apache.org/dist/skywalking/java-agent/x.y.z/xxx. Public KEYS point to https://www.apache.org/dist/skywalking/KEYS. Update the website download page. http://skywalking.apache.org/downloads/ . Add a new download source, distribution, sha512, asc, and document links. The links can be found following rules (3) to (6) above. Add a release event on the website homepage and event page. Announce the public release with changelog or key features. Send ANNOUNCE email to dev@skywalking.apache.org, announce@apache.org. The sender should use the Apache email account.  Mail title: [ANNOUNCE] Apache SkyWalking Java Agent x.y.z released Mail content: Hi all, Apache SkyWalking Team is glad to announce the first release of Apache SkyWalking Java Agent x.y.z. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. The Java Agent for Apache SkyWalking, which provides the native tracing/metrics/logging abilities for Java projects. This release contains a number of new features, bug fixes and improvements compared to version a.b.c(last release). The notable changes since x.y.z include: (Highlight key changes) 1. ... 2. ... 3. ... Please refer to the change log for the complete list of changes: https://github.com/apache/skywalking-java/blob/master/changes/changes-x.y.z.md Apache SkyWalking website: http://skywalking.apache.org/ Downloads: http://skywalking.apache.org/downloads/ Twitter: https://twitter.com/AsfSkyWalking SkyWalking Resources: - GitHub: https://github.com/apache/skywalking-java - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Apache SkyWalking Team Release Docker images export SW_VERSION=x.y.z git clone --depth 1 --branch v$SW_VERSION https://github.com/apache/skywalking-java.git cd skywalking-java curl -O https://dist.apache.org/repos/dist/release/skywalking/java-agent/$SW_VERSION/apache-skywalking-java-agent-$SW_VERSION.tgz tar -xzvf apache-skywalking-java-agent-$SW_VERSION.tgz export NAME=skywalking-java-agent export HUB=apache export TAG=$SW_VERSION make docker.push.alpine docker.push.java8 docker.push.java11 docker.push.java17 Clean up the old releases Once the latest release has been published, you should clean up the old releases from the mirror system.\n Update the download links (source, dist, asc, and sha512) on the website to the archive repo (https://archive.apache.org/dist/skywalking). Remove previous releases from https://dist.apache.org/repos/dist/release/skywalking/java-agent.  ","title":"SkyWalking Java Agent Release Guide","url":"/docs/skywalking-java/v9.0.0/en/contribution/release-java-agent/"},{"content":"Apache SkyWalking Java Agent Release Guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking in The Apache Way and start the voting process by reading this document.\nSet up your development environment Follow the steps in the Apache maven deployment environment document to set gpg tool and encrypt passwords.\nUse the following block as a template and place it in ~/.m2/settings.xml.\n\u0026lt;settings\u0026gt; ... \u0026lt;servers\u0026gt; \u0026lt;!-- To publish a snapshot of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.snapshots.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; \u0026lt;!-- To stage a release of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.releases.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; ... \u0026lt;/servers\u0026gt; \u0026lt;/settings\u0026gt; Add your GPG public key  Add your GPG public key into the SkyWalking GPG KEYS file. If you are a committer, use your Apache ID and password to log in this svn, and update the file. Don\u0026rsquo;t override the existing file. Upload your GPG public key to the public GPG site, such as MIT\u0026rsquo;s site. This site should be in the Apache maven staging repository checklist.  Test your settings This step is only for testing purpose. If your env is correctly set, you don\u0026rsquo;t need to check every time.\n./mvnw clean install(this will build artifacts, sources and sign) Prepare for the release ./mvnw release:clean ./mvnw release:prepare -DautoVersionSubmodules=true -Pall  Set version number as x.y.z, and tag as vx.y.z (The version tag must start with v. You will find out why this is necessary in the next step.)  You could do a GPG signature before preparing for the release. If you need to input the password to sign, and the maven doesn\u0026rsquo;t provide you with the opportunity to do so, this may lead to failure of the release. To resolve this, you may run gpg --sign xxx in any file. This will allow it to remember the password for long enough to prepare for the release.\nStage the release ./mvnw release:perform -DskipTests -Pall  The release will be automatically inserted into a temporary staging repository.  Build and sign the source code and binary package export RELEASE_VERSION=x.y.z (example: RELEASE_VERSION=5.0.0-alpha) cd tools/releasing bash create_release.sh This script takes care of the following things:\n Use v + RELEASE_VERSION as tag to clone the codes. Complete git submodule init/update. Exclude all unnecessary files in the target source tar, such as .git, .github, and .gitmodules. See the script for more details. Execute gpg and shasum 512 for source code tar. Use maven package to build the agent tar. Execute gpg and shasum 512 for binary tar.  apache-skywalking-java-agent-x.y.z-src.tgz and files ending with .asc and .sha512 may be found in the tools/releasing folder. apache-skywalking-java-agent-x.y.z.tgz and files ending with .asc and .sha512 may be found in the tools/releasing/apache-skywalking-java-agent-x.y.z folder.\nUpload to Apache svn  Use your Apache ID to log in to https://dist.apache.org/repos/dist/dev/skywalking/java-agent/. Create a folder and name it by the release version and round, such as: x.y.z Upload the source code package to the folder with files ending with .asc and .sha512. Upload the distribution package to the folder with files ending with .asc and .sha512.  Make the internal announcements Send an announcement mail in dev mail list.\nMail title: [ANNOUNCE] SkyWalking Java Agent x.y.z test build available Mail content: The test build of Java Agent x.y.z is available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking-java/blob/master/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/java-agent/xxxx * sha512 checksums Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking-java/ Release Tag : * (Git Tag) x.y.z Release CommitID : * https://github.com/apache/skywalking-java/tree/(Git Commit ID) * Git submodule * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : \u0026gt; ./mvnw clean package A vote regarding the quality of this test build will be initiated within the next couple of days. Wait for at least 48 hours for test responses Any PMC member, committer or contributor can test the release features and provide feedback. Based on that, the PMC will decide whether to start the voting process.\nCall a vote in dev Call a vote in dev@skywalking.apache.org\nMail title: [VOTE] Release Apache SkyWalking Java Agent version x.y.z Mail content: Hi All, This is a call for vote to release Apache SkyWalking Java Agent version x.y.z. Release notes: * https://github.com/apache/skywalking-java/blob/master/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/java-agent/xxxx * sha512 checksums Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) x.y.z Release CommitID : * https://github.com/apache/skywalking-java/tree/(Git Commit ID) * Git submodule * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : \u0026gt; ./mvnw clean package Voting will start now (xxxx date) and will remain open for at least 72 hours, Request all PMC members to give their vote. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Vote Check All PMC members and committers should check these before casting +1 votes.\n Features test. All artifacts in staging repository are published with .asc, .md5, and *sha1 files. Source code and distribution package (apache-skywalking-java-agent-x.y.z-src.tar.gz, apache-skywalking-java-agent-x.y.z.tar.gz) are found in https://dist.apache.org/repos/dist/dev/skywalking/java-agent/x.y.z with .asc and .sha512. LICENSE and NOTICE are in the source code and distribution package. Check shasum -c apache-skywalking-java-agent-x.y.z-src.tgz.sha512. Check gpg --verify apache-skywalking-java-agent-x.y.z-src.tgz.asc apache-skywalking-apm-x.y.z-src.tgz Build a distribution package from the source code package (apache-skywalking-java-agent-x.y.z-src.tar.gz). Check the Apache License Header. Run docker run --rm -v $(pwd):/github/workspace apache/skywalking-eyes header check. (No binaries in source codes)  The voting process is as follows:\n All PMC member votes are +1 binding, and all other votes are +1 but non-binding. If you obtain at least 3 (+1 binding) votes with more +1 than -1 votes within 72 hours, the release will be approved.  Publish the release  Move source codes tar and distribution packages to https://dist.apache.org/repos/dist/release/skywalking/java-agent/.  \u0026gt; export SVN_EDITOR=vim \u0026gt; svn mv https://dist.apache.org/repos/dist/dev/skywalking/java-agent/x.y.z https://dist.apache.org/repos/dist/release/skywalking/java-agent .... enter your apache password .... Release in the nexus staging repo. Public download source and distribution tar/zip are located in http://www.apache.org/dyn/closer.cgi/skywalking/java-agent/x.y.z/xxx. The Apache mirror path is the only release information that we publish. Public asc and sha512 are located in https://www.apache.org/dist/skywalking/java-agent/x.y.z/xxx. Public KEYS point to https://www.apache.org/dist/skywalking/KEYS. Update the website download page. http://skywalking.apache.org/downloads/ . Add a new download source, distribution, sha512, asc, and document links. The links can be found following rules (3) to (6) above. Add a release event on the website homepage and event page. Announce the public release with changelog or key features. Send ANNOUNCE email to dev@skywalking.apache.org, announce@apache.org. The sender should use the Apache email account.  Mail title: [ANNOUNCE] Apache SkyWalking Java Agent x.y.z released Mail content: Hi all, Apache SkyWalking Team is glad to announce the first release of Apache SkyWalking Java Agent x.y.z. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. The Java Agent for Apache SkyWalking, which provides the native tracing/metrics/logging abilities for Java projects. This release contains a number of new features, bug fixes and improvements compared to version a.b.c(last release). The notable changes since x.y.z include: (Highlight key changes) 1. ... 2. ... 3. ... Please refer to the change log for the complete list of changes: https://github.com/apache/skywalking-java/blob/master/changes/changes-x.y.z.md Apache SkyWalking website: http://skywalking.apache.org/ Downloads: http://skywalking.apache.org/downloads/ Twitter: https://twitter.com/AsfSkyWalking SkyWalking Resources: - GitHub: https://github.com/apache/skywalking-java - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Apache SkyWalking Team Release Docker images export SW_VERSION=x.y.z git clone --depth 1 --branch v$SW_VERSION https://github.com/apache/skywalking-java.git cd skywalking-java curl -O https://dist.apache.org/repos/dist/release/skywalking/java-agent/$SW_VERSION/apache-skywalking-java-agent-$SW_VERSION.tgz tar -xzvf apache-skywalking-java-agent-$SW_VERSION.tgz export NAME=skywalking-java-agent export HUB=apache export TAG=$SW_VERSION make docker.push.alpine docker.push.java8 docker.push.java11 docker.push.java17 Clean up the old releases Once the latest release has been published, you should clean up the old releases from the mirror system.\n Update the download links (source, dist, asc, and sha512) on the website to the archive repo (https://archive.apache.org/dist/skywalking). Remove previous releases from https://dist.apache.org/repos/dist/release/skywalking/java-agent.  ","title":"SkyWalking Java Agent Release Guide","url":"/docs/skywalking-java/v9.1.0/en/contribution/release-java-agent/"},{"content":"Apache SkyWalking Java Agent Release Guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking in The Apache Way and start the voting process by reading this document.\nSet up your development environment Follow the steps in the Apache maven deployment environment document to set gpg tool and encrypt passwords.\nUse the following block as a template and place it in ~/.m2/settings.xml.\n\u0026lt;settings\u0026gt; ... \u0026lt;servers\u0026gt; \u0026lt;!-- To publish a snapshot of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.snapshots.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; \u0026lt;!-- To stage a release of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.releases.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; ... \u0026lt;/servers\u0026gt; \u0026lt;/settings\u0026gt; Add your GPG public key  Add your GPG public key into the SkyWalking GPG KEYS file. If you are a committer, use your Apache ID and password to log in this svn, and update the file. Don\u0026rsquo;t override the existing file. Upload your GPG public key to the public GPG site, such as MIT\u0026rsquo;s site. This site should be in the Apache maven staging repository checklist.  Test your settings This step is only for testing purpose. If your env is correctly set, you don\u0026rsquo;t need to check every time.\n./mvnw clean install(this will build artifacts, sources and sign) Prepare for the release ./mvnw release:clean ./mvnw release:prepare -DautoVersionSubmodules=true -Pall  Set version number as x.y.z, and tag as vx.y.z (The version tag must start with v. You will find out why this is necessary in the next step.)  You could do a GPG signature before preparing for the release. If you need to input the password to sign, and the maven doesn\u0026rsquo;t provide you with the opportunity to do so, this may lead to failure of the release. To resolve this, you may run gpg --sign xxx in any file. This will allow it to remember the password for long enough to prepare for the release.\nStage the release ./mvnw release:perform -DskipTests -Pall  The release will be automatically inserted into a temporary staging repository.  Build and sign the source code and binary package export RELEASE_VERSION=x.y.z (example: RELEASE_VERSION=5.0.0-alpha) cd tools/releasing bash create_release.sh This script takes care of the following things:\n Use v + RELEASE_VERSION as tag to clone the codes. Complete git submodule init/update. Exclude all unnecessary files in the target source tar, such as .git, .github, and .gitmodules. See the script for more details. Execute gpg and shasum 512 for source code tar. Use maven package to build the agent tar. Execute gpg and shasum 512 for binary tar.  apache-skywalking-java-agent-x.y.z-src.tgz and files ending with .asc and .sha512 may be found in the tools/releasing folder. apache-skywalking-java-agent-x.y.z.tgz and files ending with .asc and .sha512 may be found in the tools/releasing/apache-skywalking-java-agent-x.y.z folder.\nUpload to Apache svn  Use your Apache ID to log in to https://dist.apache.org/repos/dist/dev/skywalking/java-agent/. Create a folder and name it by the release version and round, such as: x.y.z Upload the source code package to the folder with files ending with .asc and .sha512. Upload the distribution package to the folder with files ending with .asc and .sha512.  Make the internal announcements Send an announcement mail in dev mail list.\nMail title: [ANNOUNCE] SkyWalking Java Agent x.y.z test build available Mail content: The test build of Java Agent x.y.z is available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking-java/blob/master/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/java-agent/xxxx * sha512 checksums Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking-java/ Release Tag : * (Git Tag) x.y.z Release CommitID : * https://github.com/apache/skywalking-java/tree/(Git Commit ID) * Git submodule * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : \u0026gt; ./mvnw clean package A vote regarding the quality of this test build will be initiated within the next couple of days. Wait for at least 48 hours for test responses Any PMC member, committer or contributor can test the release features and provide feedback. Based on that, the PMC will decide whether to start the voting process.\nCall a vote in dev Call a vote in dev@skywalking.apache.org\nMail title: [VOTE] Release Apache SkyWalking Java Agent version x.y.z Mail content: Hi All, This is a call for vote to release Apache SkyWalking Java Agent version x.y.z. Release notes: * https://github.com/apache/skywalking-java/blob/master/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/java-agent/xxxx * sha512 checksums Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) x.y.z Release CommitID : * https://github.com/apache/skywalking-java/tree/(Git Commit ID) * Git submodule * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : \u0026gt; ./mvnw clean package Voting will start now (xxxx date) and will remain open for at least 72 hours, Request all PMC members to give their vote. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Vote Check All PMC members and committers should check these before casting +1 votes.\n Features test. All artifacts in staging repository are published with .asc, .md5, and *sha1 files. Source code and distribution package (apache-skywalking-java-agent-x.y.z-src.tar.gz, apache-skywalking-java-agent-x.y.z.tar.gz) are found in https://dist.apache.org/repos/dist/dev/skywalking/java-agent/x.y.z with .asc and .sha512. LICENSE and NOTICE are in the source code and distribution package. Check shasum -c apache-skywalking-java-agent-x.y.z-src.tgz.sha512. Check gpg --verify apache-skywalking-java-agent-x.y.z-src.tgz.asc apache-skywalking-apm-x.y.z-src.tgz Build a distribution package from the source code package (apache-skywalking-java-agent-x.y.z-src.tar.gz). Check the Apache License Header. Run docker run --rm -v $(pwd):/github/workspace apache/skywalking-eyes header check. (No binaries in source codes)  The voting process is as follows:\n All PMC member votes are +1 binding, and all other votes are +1 but non-binding. If you obtain at least 3 (+1 binding) votes with more +1 than -1 votes within 72 hours, the release will be approved.  Publish the release  Move source codes tar and distribution packages to https://dist.apache.org/repos/dist/release/skywalking/java-agent/.  \u0026gt; export SVN_EDITOR=vim \u0026gt; svn mv https://dist.apache.org/repos/dist/dev/skywalking/java-agent/x.y.z https://dist.apache.org/repos/dist/release/skywalking/java-agent .... enter your apache password .... Release in the nexus staging repo. Public download source and distribution tar/zip are located in http://www.apache.org/dyn/closer.cgi/skywalking/java-agent/x.y.z/xxx. The Apache mirror path is the only release information that we publish. Public asc and sha512 are located in https://www.apache.org/dist/skywalking/java-agent/x.y.z/xxx. Public KEYS point to https://www.apache.org/dist/skywalking/KEYS. Update the website download page. http://skywalking.apache.org/downloads/ . Add a new download source, distribution, sha512, asc, and document links. The links can be found following rules (3) to (6) above. Add a release event on the website homepage and event page. Announce the public release with changelog or key features. Send ANNOUNCE email to dev@skywalking.apache.org, announce@apache.org. The sender should use the Apache email account.  Mail title: [ANNOUNCE] Apache SkyWalking Java Agent x.y.z released Mail content: Hi all, Apache SkyWalking Team is glad to announce the first release of Apache SkyWalking Java Agent x.y.z. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. The Java Agent for Apache SkyWalking, which provides the native tracing/metrics/logging abilities for Java projects. This release contains a number of new features, bug fixes and improvements compared to version a.b.c(last release). The notable changes since x.y.z include: (Highlight key changes) 1. ... 2. ... 3. ... Please refer to the change log for the complete list of changes: https://github.com/apache/skywalking-java/blob/master/changes/changes-x.y.z.md Apache SkyWalking website: http://skywalking.apache.org/ Downloads: http://skywalking.apache.org/downloads/ Twitter: https://twitter.com/AsfSkyWalking SkyWalking Resources: - GitHub: https://github.com/apache/skywalking-java - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Apache SkyWalking Team Release Docker images export SW_VERSION=x.y.z git clone --depth 1 --branch v$SW_VERSION https://github.com/apache/skywalking-java.git cd skywalking-java curl -O https://dist.apache.org/repos/dist/release/skywalking/java-agent/$SW_VERSION/apache-skywalking-java-agent-$SW_VERSION.tgz tar -xzvf apache-skywalking-java-agent-$SW_VERSION.tgz export NAME=skywalking-java-agent export HUB=apache export TAG=$SW_VERSION make docker.push.alpine docker.push.java8 docker.push.java11 docker.push.java17 docker.push.java21 Clean up the old releases Once the latest release has been published, you should clean up the old releases from the mirror system.\n Update the download links (source, dist, asc, and sha512) on the website to the archive repo (https://archive.apache.org/dist/skywalking). Remove previous releases from https://dist.apache.org/repos/dist/release/skywalking/java-agent.  ","title":"SkyWalking Java Agent Release Guide","url":"/docs/skywalking-java/v9.2.0/en/contribution/release-java-agent/"},{"content":"SkyWalking Kubernetes Event Exporter User Guide SkyWalking Kubernetes Event Exporter is able to watch, filter, and send Kubernetes events into the Apache SkyWalking backend.\nDemo Step 1: Create a Local Kubernetes Cluster Please follow step 1 to 3 in getting started to create a cluster.\nStep 2: Deploy OAP server and Event Exporter Create the skywalking-system namespace.\n$ kubectl create namespace skywalking-system Deploy an OAP server and an event exporter.\ncat \u0026lt;\u0026lt;EOF | kubectl apply -f - apiVersion: operator.skywalking.apache.org/v1alpha1 kind: OAPServer metadata: name: skywalking-system namespace: skywalking-system spec: version: 9.5.0 instances: 1 image: apache/skywalking-oap-server:9.5.0 service: template: type: ClusterIP --- apiVersion: operator.skywalking.apache.org/v1alpha1 kind: EventExporter metadata: name: skywalking-system namespace: skywalking-system spec: replicas: 1 config: | filters: - reason: \u0026#34;\u0026#34; message: \u0026#34;\u0026#34; minCount: 1 type: \u0026#34;\u0026#34; action: \u0026#34;\u0026#34; kind: \u0026#34;Pod|Service\u0026#34; namespace: \u0026#34;^skywalking-system$\u0026#34; name: \u0026#34;\u0026#34; service: \u0026#34;[^\\\\s]{1,}\u0026#34; exporters: - skywalking exporters: skywalking: template: source: service: \u0026#34;{{ .Service.Name }}\u0026#34; serviceInstance: \u0026#34;{{ .Pod.Name }}\u0026#34; endpoint: \u0026#34;\u0026#34; message: \u0026#34;{{ .Event.Message }}\u0026#34; address: \u0026#34;skywalking-system-oap.skywalking-system:11800\u0026#34; EOF Wait until both components are ready\u0026hellip;\n$ kubectl get pod -n skywalking-system NAME READY STATUS RESTARTS AGE skywalking-system-eventexporter-566db46fb6-npx8v 1/1 Running 0 50s skywalking-system-oap-68bd877f57-zs8hw 1/1 Running 0 50s Step 3: Check Reported Events We can verify k8s events is reported to the OAP server by using skywalking-cli.\nFirst, port-forward the OAP http service to your local machine.\n$ kubectl port-forward svc/skywalking-system-oap 12800:12800 -n skywalking-system Next, use swctl to list reported events in YAML format.\n$ swctl --display yaml event ls The output should contain k8s events of the OAP server.\nevents:- uuid:1d5bfe48-bc8d-4f5a-9680-188f59793459source:service:skywalking-system-oapserviceinstance:skywalking-system-oap-68bd877f57-cvkjbendpoint:\u0026#34;\u0026#34;name:Pulledtype:Normalmessage:Successfully pulled image \u0026#34;apache/skywalking-oap-server:9.5.0\u0026#34; in 6m4.108914335sparameters:[]starttime:1713793327000endtime:1713793327000layer:K8S- uuid:f576f6ad-748d-4cec -9260-6587c145550esource:service:skywalking-system-oapserviceinstance:skywalking-system-oap-68bd877f57-cvkjbendpoint:\u0026#34;\u0026#34;name:Createdtype:Normalmessage:Created container oapparameters:[]starttime:1713793327000endtime:1713793327000layer:K8S- uuid:0cec5b55-4cb0-4ff7-a670-a097609c531fsource:service:skywalking-system-oapserviceinstance:skywalking-system-oap-68bd877f57-cvkjbendpoint:\u0026#34;\u0026#34;name:Startedtype:Normalmessage:Started container oapparameters:[]starttime:1713793327000endtime:1713793327000layer:K8S- uuid:28f0d004-befe-4c27-a7b7-dfdc4dd755fasource:service:skywalking-system-oapserviceinstance:skywalking-system-oap-68bd877f57-cvkjbendpoint:\u0026#34;\u0026#34;name:Pullingtype:Normalmessage:Pulling image \u0026#34;apache/skywalking-oap-server:9.5.0\u0026#34;parameters:[]starttime:1713792963000endtime:1713792963000layer:K8S- uuid:6d766801-5057-42c0-aa63-93ce1e201418source:service:skywalking-system-oapserviceinstance:skywalking-system-oap-68bd877f57-cvkjbendpoint:\u0026#34;\u0026#34;name:Scheduledtype:Normalmessage:Successfully assigned skywalking-system/skywalking-system-oap-68bd877f57-cvkjbto kind-workerparameters:[]starttime:1713792963000endtime:1713792963000layer:K8SWe can also verify by checking logs of the event exporter.\nkubectl logs -f skywalking-system-eventexporter-566db46fb6-npx8v -n skywalking-system ... DEBUG done: rendered event is: uuid:\u0026#34;8d8c2bd1-1812-4b0c-8237-560688366280\u0026#34; source:{service:\u0026#34;skywalking-system-oap\u0026#34; serviceInstance:\u0026#34;skywalking-system-oap-68bd877f57-zs8hw\u0026#34;} name:\u0026#34;Started\u0026#34; message:\u0026#34;Started container oap\u0026#34; startTime:1713795214000 endTime:1713795214000 layer:\u0026#34;K8S\u0026#34; Spec    name description default value     image Docker image of the event exporter. apache/skywalking-kubernetes-event-exporter:latest   replicas Number of event exporter pods. 1   config Configuration of filters and exporters in YAML format. \u0026quot;\u0026quot;    Please note: if you ignore the config field, no filters or exporter will be created.\nThis is because the EventExporter controller creates a configMap for all config values and attach the configMap to the event exporter container as configuration file. Ignoring the config field means an empty configuration file (with content \u0026quot;\u0026quot;) is provided to the event exporter.\nStatus    name description     availableReplicas Total number of available event exporter pods.   conditions Latest available observations of the underlying deployment\u0026rsquo;s current state   configMapName Name of the underlying configMap.    Configuration The event exporter supports reporting specific events by different exporters. We can add filter configs to choose which events we are interested in, and include exporter names in each filter config to tell event exporter how to export filtered events.\nAn example configuration is listed below:\nfilters:- reason:\u0026#34;\u0026#34;message:\u0026#34;\u0026#34;minCount:1type:\u0026#34;\u0026#34;action:\u0026#34;\u0026#34;kind:\u0026#34;Pod|Service\u0026#34;namespace:\u0026#34;^default$\u0026#34;name:\u0026#34;\u0026#34;service:\u0026#34;[^\\\\s]{1,}\u0026#34;exporters:- skywalkingexporters:skywalking:template:source:service:\u0026#34;{{ .Service.Name }}\u0026#34;serviceInstance:\u0026#34;{{ .Pod.Name }}\u0026#34;endpoint:\u0026#34;\u0026#34;message:\u0026#34;{{ .Event.Message }}\u0026#34;address:\u0026#34;skywalking-system-oap.skywalking-system:11800\u0026#34;Filter Config    name description example     reason Filter events of the specified reason, regular expression like \u0026quot;Killing\\|Killed\u0026quot; is supported. \u0026quot;\u0026quot;   message Filter events of the specified message, regular expression like \u0026quot;Pulling container.*\u0026quot; is supported. \u0026quot;\u0026quot;   minCount Filter events whose count is \u0026gt;= the specified value. 1   type Filter events of the specified type, regular expression like \u0026quot;Normal\\|Error\u0026quot; is supported. \u0026quot;\u0026quot;   action Filter events of the specified action, regular expression is supported. \u0026quot;\u0026quot;   kind Filter events of the specified kind, regular expression like \u0026quot;Pod\\|Service\u0026quot; is supported. \u0026quot;Pod\\|Service\u0026quot;   namespace Filter events from the specified namespace, regular expression like \u0026quot;default\\|bookinfo\u0026quot; is supported, empty means all namespaces. \u0026quot;^default$\u0026quot;   name Filter events of the specified involved object name, regular expression like \u0026quot;.*bookinfo.*\u0026quot; is supported. \u0026quot;\u0026quot;   service Filter events belonging to services whose name is not empty. \u0026quot;[^\\\\s]{1,}\u0026quot;   exporters Events satisfy this filter can be exported into several exporters that are defined below. [\u0026quot;skywalking\u0026quot;]    Skywalking Exporter Config SkyWalking exporter exports the events into Apache SkyWalking OAP server using grpc.\n   name description example     address The SkyWalking backend address where this exporter will export to. \u0026quot;skywalking-system-oap.skywalking-system:11800\u0026quot;   enableTLS Whether to use TLS for grpc server connection validation.  If TLS is enabled, the trustedCertPath is required, but clientCertPath and clientKeyPath are optional. false   clientCertPath Path of the X.509 certificate file. \u0026quot;\u0026quot;   clientKeyPath Path of the X.509 private key file. \u0026quot;\u0026quot;   trustedCertPath Path of the root certificate file. \u0026quot;\u0026quot;   insecureSkipVerify Whether a client verifies the server\u0026rsquo;s certificate chain and host name. Check tls.Config for more details. false   template The event template of SkyWalking exporter, it can be composed of metadata like Event, Pod, and Service.    template.source Event source information.    template.source.service Service name, can be a template string. \u0026quot;{{ .Service.Name }}\u0026quot;   template.source.serviceInstance Service instance name, can be a template string. \u0026quot;{{ .Pod.Name }}\u0026quot;   template.source.endpoint Endpoint, can be a template string. \u0026quot;\u0026quot;   template.message Message format, can be a template string. \u0026quot;{{ .Event.Message }}\u0026quot;    Console Exporter Config Console exporter exports the events into console logs, this exporter is typically used for debugging.\n   name description example     template The event template of SkyWalking exporter, it can be composed of metadata like Event, Pod, and Service.    template.source Event source information.    template.source.service Service name, can be a template string. \u0026quot;{{ .Service.Name }}\u0026quot;   template.source.serviceInstance Service instance name, can be a template string. \u0026quot;{{ .Pod.Name }}\u0026quot;   template.source.endpoint Endpoint, can be a template string. \u0026quot;\u0026quot;   template.message Message format, can be a template string. \u0026quot;{{ .Event.Message }}\u0026quot;    ","title":"SkyWalking Kubernetes Event Exporter User Guide","url":"/docs/skywalking-swck/next/examples/event-exporter/"},{"content":"SkyWalking PHP Agent This is the official documentation of SkyWalking PHP Agent. Welcome to the SkyWalking community!\nIn here, you could learn how to set up PHP agent for the PHP services.\n","title":"SkyWalking PHP Agent","url":"/docs/skywalking-php/latest/readme/"},{"content":"SkyWalking PHP Agent This is the official documentation of SkyWalking PHP Agent. Welcome to the SkyWalking community!\nIn here, you could learn how to set up PHP agent for the PHP services.\n","title":"SkyWalking PHP Agent","url":"/docs/skywalking-php/next/readme/"},{"content":"SkyWalking PHP Agent This is the official documentation of SkyWalking PHP Agent. Welcome to the SkyWalking community!\nIn here, you could learn how to set up PHP agent for the PHP services.\n","title":"SkyWalking PHP Agent","url":"/docs/skywalking-php/v0.7.0/readme/"},{"content":"SkyWalking Python Agent This is the official documentation of SkyWalking Python agent. Welcome to the SkyWalking community!\nThe Python Agent for Apache SkyWalking provides the native tracing/metrics/logging/profiling abilities for Python projects.\nThis documentation covers a number of ways to set up the Python agent for various use cases.\n \nCapabilities The following table demonstrates the currently supported telemetry collection capabilities in SkyWalking Python agent:\n   Reporter Supported? Details     Trace ✅ (default: ON) Automatic instrumentation + Manual SDK   Log ✅ (default: ON) Direct reporter only. (Tracing context in log planned)   Meter ✅ (default: ON) Meter API + Automatic PVM metrics   Event ❌ (Planned) Report lifecycle events of your awesome Python application   Profiling ✅ (default: ON) Threading and Greenlet Profiler    Live Demo  Find the live demo with Python agent on our website. Follow the showcase to set up preview deployment quickly.  ","title":"SkyWalking Python Agent","url":"/docs/skywalking-python/latest/readme/"},{"content":"SkyWalking Python Agent This is the official documentation of SkyWalking Python agent. Welcome to the SkyWalking community!\nThe Python Agent for Apache SkyWalking provides the native tracing/metrics/logging/profiling abilities for Python projects.\nThis documentation covers a number of ways to set up the Python agent for various use cases.\n \nCapabilities The following table demonstrates the currently supported telemetry collection capabilities in SkyWalking Python agent:\n   Reporter Supported? Details     Trace ✅ (default: ON) Automatic instrumentation + Manual SDK   Log ✅ (default: ON) Direct reporter only. (Tracing context in log planned)   Meter ✅ (default: ON) Meter API + Automatic PVM metrics   Event ❌ (Planned) Report lifecycle events of your awesome Python application   Profiling ✅ (default: ON) Threading and Greenlet Profiler    Live Demo  Find the live demo with Python agent on our website. Follow the showcase to set up preview deployment quickly.  ","title":"SkyWalking Python Agent","url":"/docs/skywalking-python/next/readme/"},{"content":"SkyWalking Python Agent This is the official documentation of SkyWalking Python agent. Welcome to the SkyWalking community!\nThe Python Agent for Apache SkyWalking provides the native tracing/metrics/logging/profiling abilities for Python projects.\nThis documentation covers a number of ways to set up the Python agent for various use cases.\n \nCapabilities The following table demonstrates the currently supported telemetry collection capabilities in SkyWalking Python agent:\n   Reporter Supported? Details     Trace ✅ (default: ON) Automatic instrumentation + Manual SDK   Log ✅ (default: ON) Direct reporter only. (Tracing context in log planned)   Meter ✅ (default: ON) Meter API + Automatic PVM metrics   Event ❌ (Planned) Report lifecycle events of your awesome Python application   Profiling ✅ (default: ON) Threading and Greenlet Profiler    Live Demo  Find the live demo with Python agent on our website. Follow the showcase to set up preview deployment quickly.  ","title":"SkyWalking Python Agent","url":"/docs/skywalking-python/v1.0.1/readme/"},{"content":"SkyWalking Python Agent Command Line Interface (sw-python CLI) Now, SkyWalking Python Agent CLI is the recommended way of running your application with Python agent, the CLI is well-tested and used by all agent E2E \u0026amp; Plugin tests.\nIn releases before 0.7.0, you would at least need to add the following lines to your applications to get the agent attached and running, this can be tedious in many cases due to large number of services, DevOps practices and can cause problem when used with prefork servers.\nfrom skywalking import agent, config config.init(SomeConfig) agent.start() The SkyWalking Python agent implements a command-line interface that can be utilized to attach the agent to your awesome applications during deployment without changing any application code, just like the SkyWalking Java Agent.\n The following feature is added in v1.0.0 as experimental flag, so you need to specify the -p flag to sw-python run -p. In the future, this flag will be removed and agent will automatically enable prefork/fork support in a more comprehensive manner.\n Especially with the new automatic postfork injection feature, you no longer have to worry about threading and forking incompatibility.\nCheck How to use with uWSGI and How to use with Gunicorn to understand the detailed background on what is post_fork, why you need them and how to easily overcome the trouble with sw-python CLI.\nYou should still read the legacy way to integrate agent in case the sw-python CLI is not working for you.\nUsage Upon successful installation of the SkyWalking Python agent via pip, a command-line script sw-python is installed in your environment (virtual env preferred).\n run sw-python to see if it is available, you will need to pass configuration by environment variables.\n For example: export SW_AGENT_COLLECTOR_BACKEND_SERVICES=localhost:11800\nThe run option The sw-python CLI provides a run option, which you can use to execute your applications (either begins with the python command or Python-based programs like gunicorn on your path) just like you invoke them normally, plus a prefix, the following example demonstrates the usage.\nIf your previous command to run your gunicorn/uwsgi application is:\ngunicorn your_app:app --workers 2 --worker-class uvicorn.workers.UvicornWorker --bind 0.0.0.0:8088\nor\nuwsgi --die-on-term --http 0.0.0.0:5000 --http-manage-expect --master --workers 3 --enable-threads --threads 3 --manage-script-name --mount /=main:app\nPlease change it to (the -p option starts one agent in each process, which is the correct behavior):\nImportant: if the call to uwsgi/gunicorn is prefixed with other commands, this approach will fail since agent currently looks for the command line input at index 0 for safety as an experimental feature.\nsw-python run -p gunicorn your_app:app --workers 2 --worker-class uvicorn.workers.UvicornWorker --bind 0.0.0.0:8088\nor\nsw-python run -p uwsgi --die-on-term --http 0.0.0.0:5000 --http-manage-expect --master --workers 3 --enable-threads --threads 3 --manage-script-name --mount /=main:app\nThe SkyWalking Python agent will start up along with all your application workers shortly.\nNote that sw-python also work with spawned subprocess (os.exec*/subprocess) as long as the PYTHONPATH is inherited.\nAdditionally, sw-python started agent works well with os.fork when your application forks workers, as long as the SW_AGENT_EXPERIMENTAL_FORK_SUPPORT is turned on. (It will be automatically turned on when gunicorn is detected)\nConfiguring the agent You would normally want to provide additional configurations other than the default ones.\nThrough environment variables The currently supported method is to provide the environment variables listed and explained in the Environment Variables List.\nThrough a sw-config.toml (TBD) Currently, only environment variable configuration is supported; an optional toml configuration is to be implemented.\nEnabling CLI DEBUG mode Note the CLI is a feature that manipulates the Python interpreter bootstrap behaviour, there could be unsupported cases.\nIf you encounter unexpected problems, please turn on the DEBUG mode by adding the -d or --debug flag to your sw-python command, as shown below.\nFrom: sw-python run command\nTo: sw-python -d run command\nPlease attach the debug logs to the SkyWalking Issues section if you believe it is a bug, idea discussions and pull requests are always welcomed.\nAdditional Remarks When executing commands with sw-python run command, your command\u0026rsquo;s Python interpreter will pick up the SkyWalking loader module.\nIt is not safe to attach SkyWalking Agent to those commands that resides in another Python installation because incompatible Python versions and mismatched SkyWalking versions can cause problems. Therefore, any attempt to pass a command that uses a different Python interpreter/ environment will not bring up SkyWalking Python Agent even if another SkyWalking Python agent is installed there(no matter the version), and will force exit with an error message indicating the reasoning.\nDisabling spawned processes from starting new agents Sometimes you don\u0026rsquo;t actually need the agent to monitor anything in a new process (when it\u0026rsquo;s not a web service worker). (here we mean process spawned by subprocess and os.exec*(), os.fork() is not controlled by this flag but experimental_fork_support)\nIf you do not need the agent to get loaded for application child processes, you can turn off the behavior by setting an environment variable.\nSW_AGENT_SW_PYTHON_BOOTSTRAP_PROPAGATE to False\nNote the auto bootstrap depends on the environment inherited by child processes, thus prepending a new sitecustomize path to or removing the loader path from the PYTHONPATH could also prevent the agent from loading in a child process.\nKnown limitations  The CLI may not work properly with arguments that involve double quotation marks in some shells. The CLI and bootstrapper stdout logs could get messy in Windows shells.  ","title":"SkyWalking Python Agent Command Line Interface (sw-python CLI)","url":"/docs/skywalking-python/latest/en/setup/cli/"},{"content":"SkyWalking Python Agent Command Line Interface (sw-python CLI) Now, SkyWalking Python Agent CLI is the recommended way of running your application with Python agent, the CLI is well-tested and used by all agent E2E \u0026amp; Plugin tests.\nIn releases before 0.7.0, you would at least need to add the following lines to your applications to get the agent attached and running, this can be tedious in many cases due to large number of services, DevOps practices and can cause problem when used with prefork servers.\nfrom skywalking import agent, config config.init(SomeConfig) agent.start() The SkyWalking Python agent implements a command-line interface that can be utilized to attach the agent to your awesome applications during deployment without changing any application code, just like the SkyWalking Java Agent.\n The following feature is added in v1.0.0 as experimental flag, so you need to specify the -p flag to sw-python run -p. In the future, this flag will be removed and agent will automatically enable prefork/fork support in a more comprehensive manner.\n Especially with the new automatic postfork injection feature, you no longer have to worry about threading and forking incompatibility.\nCheck How to use with uWSGI and How to use with Gunicorn to understand the detailed background on what is post_fork, why you need them and how to easily overcome the trouble with sw-python CLI.\nYou should still read the legacy way to integrate agent in case the sw-python CLI is not working for you.\nUsage Upon successful installation of the SkyWalking Python agent via pip, a command-line script sw-python is installed in your environment (virtual env preferred).\n run sw-python to see if it is available, you will need to pass configuration by environment variables.\n For example: export SW_AGENT_COLLECTOR_BACKEND_SERVICES=localhost:11800\nThe run option The sw-python CLI provides a run option, which you can use to execute your applications (either begins with the python command or Python-based programs like gunicorn on your path) just like you invoke them normally, plus a prefix, the following example demonstrates the usage.\nIf your previous command to run your gunicorn/uwsgi application is:\ngunicorn your_app:app --workers 2 --worker-class uvicorn.workers.UvicornWorker --bind 0.0.0.0:8088\nor\nuwsgi --die-on-term --http 0.0.0.0:5000 --http-manage-expect --master --workers 3 --enable-threads --threads 3 --manage-script-name --mount /=main:app\nPlease change it to (the -p option starts one agent in each process, which is the correct behavior):\nImportant: if the call to uwsgi/gunicorn is prefixed with other commands, this approach will fail since agent currently looks for the command line input at index 0 for safety as an experimental feature.\nsw-python run -p gunicorn your_app:app --workers 2 --worker-class uvicorn.workers.UvicornWorker --bind 0.0.0.0:8088\nor\nsw-python run -p uwsgi --die-on-term --http 0.0.0.0:5000 --http-manage-expect --master --workers 3 --enable-threads --threads 3 --manage-script-name --mount /=main:app\nThe SkyWalking Python agent will start up along with all your application workers shortly.\nNote that sw-python also work with spawned subprocess (os.exec*/subprocess) as long as the PYTHONPATH is inherited.\nAdditionally, sw-python started agent works well with os.fork when your application forks workers, as long as the SW_AGENT_EXPERIMENTAL_FORK_SUPPORT is turned on. (It will be automatically turned on when gunicorn is detected)\nConfiguring the agent You would normally want to provide additional configurations other than the default ones.\nThrough environment variables The currently supported method is to provide the environment variables listed and explained in the Environment Variables List.\nThrough a sw-config.toml (TBD) Currently, only environment variable configuration is supported; an optional toml configuration is to be implemented.\nEnabling CLI DEBUG mode Note the CLI is a feature that manipulates the Python interpreter bootstrap behaviour, there could be unsupported cases.\nIf you encounter unexpected problems, please turn on the DEBUG mode by adding the -d or --debug flag to your sw-python command, as shown below.\nFrom: sw-python run command\nTo: sw-python -d run command\nPlease attach the debug logs to the SkyWalking Issues section if you believe it is a bug, idea discussions and pull requests are always welcomed.\nAdditional Remarks When executing commands with sw-python run command, your command\u0026rsquo;s Python interpreter will pick up the SkyWalking loader module.\nIt is not safe to attach SkyWalking Agent to those commands that resides in another Python installation because incompatible Python versions and mismatched SkyWalking versions can cause problems. Therefore, any attempt to pass a command that uses a different Python interpreter/ environment will not bring up SkyWalking Python Agent even if another SkyWalking Python agent is installed there(no matter the version), and will force exit with an error message indicating the reasoning.\nDisabling spawned processes from starting new agents Sometimes you don\u0026rsquo;t actually need the agent to monitor anything in a new process (when it\u0026rsquo;s not a web service worker). (here we mean process spawned by subprocess and os.exec*(), os.fork() is not controlled by this flag but experimental_fork_support)\nIf you do not need the agent to get loaded for application child processes, you can turn off the behavior by setting an environment variable.\nSW_AGENT_SW_PYTHON_BOOTSTRAP_PROPAGATE to False\nNote the auto bootstrap depends on the environment inherited by child processes, thus prepending a new sitecustomize path to or removing the loader path from the PYTHONPATH could also prevent the agent from loading in a child process.\nKnown limitations  The CLI may not work properly with arguments that involve double quotation marks in some shells. The CLI and bootstrapper stdout logs could get messy in Windows shells.  ","title":"SkyWalking Python Agent Command Line Interface (sw-python CLI)","url":"/docs/skywalking-python/next/en/setup/cli/"},{"content":"SkyWalking Python Agent Command Line Interface (sw-python CLI) Now, SkyWalking Python Agent CLI is the recommended way of running your application with Python agent, the CLI is well-tested and used by all agent E2E \u0026amp; Plugin tests.\nIn releases before 0.7.0, you would at least need to add the following lines to your applications to get the agent attached and running, this can be tedious in many cases due to large number of services, DevOps practices and can cause problem when used with prefork servers.\nfrom skywalking import agent, config config.init(SomeConfig) agent.start() The SkyWalking Python agent implements a command-line interface that can be utilized to attach the agent to your awesome applications during deployment without changing any application code, just like the SkyWalking Java Agent.\n The following feature is added in v1.0.0 as experimental flag, so you need to specify the -p flag to sw-python run -p. In the future, this flag will be removed and agent will automatically enable prefork/fork support in a more comprehensive manner.\n Especially with the new automatic postfork injection feature, you no longer have to worry about threading and forking incompatibility.\nCheck How to use with uWSGI and How to use with Gunicorn to understand the detailed background on what is post_fork, why you need them and how to easily overcome the trouble with sw-python CLI.\nYou should still read the legacy way to integrate agent in case the sw-python CLI is not working for you.\nUsage Upon successful installation of the SkyWalking Python agent via pip, a command-line script sw-python is installed in your environment (virtual env preferred).\n run sw-python to see if it is available, you will need to pass configuration by environment variables.\n For example: export SW_AGENT_COLLECTOR_BACKEND_SERVICES=localhost:11800\nThe run option The sw-python CLI provides a run option, which you can use to execute your applications (either begins with the python command or Python-based programs like gunicorn on your path) just like you invoke them normally, plus a prefix, the following example demonstrates the usage.\nIf your previous command to run your gunicorn/uwsgi application is:\ngunicorn your_app:app --workers 2 --worker-class uvicorn.workers.UvicornWorker --bind 0.0.0.0:8088\nor\nuwsgi --die-on-term --http 0.0.0.0:5000 --http-manage-expect --master --workers 3 --enable-threads --threads 3 --manage-script-name --mount /=main:app\nPlease change it to (the -p option starts one agent in each process, which is the correct behavior):\nImportant: if the call to uwsgi/gunicorn is prefixed with other commands, this approach will fail since agent currently looks for the command line input at index 0 for safety as an experimental feature.\nsw-python run -p gunicorn your_app:app --workers 2 --worker-class uvicorn.workers.UvicornWorker --bind 0.0.0.0:8088\nor\nsw-python run -p uwsgi --die-on-term --http 0.0.0.0:5000 --http-manage-expect --master --workers 3 --enable-threads --threads 3 --manage-script-name --mount /=main:app\nThe SkyWalking Python agent will start up along with all your application workers shortly.\nNote that sw-python also work with spawned subprocess (os.exec*/subprocess) as long as the PYTHONPATH is inherited.\nAdditionally, sw-python started agent works well with os.fork when your application forks workers, as long as the SW_AGENT_EXPERIMENTAL_FORK_SUPPORT is turned on. (It will be automatically turned on when gunicorn is detected)\nConfiguring the agent You would normally want to provide additional configurations other than the default ones.\nThrough environment variables The currently supported method is to provide the environment variables listed and explained in the Environment Variables List.\nThrough a sw-config.toml (TBD) Currently, only environment variable configuration is supported; an optional toml configuration is to be implemented.\nEnabling CLI DEBUG mode Note the CLI is a feature that manipulates the Python interpreter bootstrap behaviour, there could be unsupported cases.\nIf you encounter unexpected problems, please turn on the DEBUG mode by adding the -d or --debug flag to your sw-python command, as shown below.\nFrom: sw-python run command\nTo: sw-python -d run command\nPlease attach the debug logs to the SkyWalking Issues section if you believe it is a bug, idea discussions and pull requests are always welcomed.\nAdditional Remarks When executing commands with sw-python run command, your command\u0026rsquo;s Python interpreter will pick up the SkyWalking loader module.\nIt is not safe to attach SkyWalking Agent to those commands that resides in another Python installation because incompatible Python versions and mismatched SkyWalking versions can cause problems. Therefore, any attempt to pass a command that uses a different Python interpreter/ environment will not bring up SkyWalking Python Agent even if another SkyWalking Python agent is installed there(no matter the version), and will force exit with an error message indicating the reasoning.\nDisabling spawned processes from starting new agents Sometimes you don\u0026rsquo;t actually need the agent to monitor anything in a new process (when it\u0026rsquo;s not a web service worker). (here we mean process spawned by subprocess and os.exec*(), os.fork() is not controlled by this flag but experimental_fork_support)\nIf you do not need the agent to get loaded for application child processes, you can turn off the behavior by setting an environment variable.\nSW_AGENT_SW_PYTHON_BOOTSTRAP_PROPAGATE to False\nNote the auto bootstrap depends on the environment inherited by child processes, thus prepending a new sitecustomize path to or removing the loader path from the PYTHONPATH could also prevent the agent from loading in a child process.\nKnown limitations  The CLI may not work properly with arguments that involve double quotation marks in some shells. The CLI and bootstrapper stdout logs could get messy in Windows shells.  ","title":"SkyWalking Python Agent Command Line Interface (sw-python CLI)","url":"/docs/skywalking-python/v1.0.1/en/setup/cli/"},{"content":"SkyWalking Python Instrumentation API Apart from the supported libraries that can be instrumented automatically, SkyWalking also provides some APIs to enable manual instrumentation.\nCreate Spans The code snippet below shows how to create entry span, exit span and local span.\nfrom skywalking import Component from skywalking.trace.context import SpanContext, get_context from skywalking.trace.tags import Tag context: SpanContext = get_context() # get a tracing context # create an entry span, by using `with` statement, # the span automatically starts/stops when entering/exiting the context with context.new_entry_span(op=\u0026#39;https://github.com/apache\u0026#39;) as span: span.component = Component.Flask # the span automatically stops when exiting the `with` context class TagSinger(Tag): key = \u0026#39;Singer\u0026#39; with context.new_exit_span(op=\u0026#39;https://github.com/apache\u0026#39;, peer=\u0026#39;localhost:8080\u0026#39;, component=Component.Flask) as span: span.tag(TagSinger(\u0026#39;Nakajima\u0026#39;)) with context.new_local_span(op=\u0026#39;https://github.com/apache\u0026#39;) as span: span.tag(TagSinger(\u0026#39;Nakajima\u0026#39;)) Decorators from time import sleep from skywalking import Component from skywalking.decorators import trace, runnable from skywalking.trace.context import SpanContext, get_context @trace() # the operation name is the method name(\u0026#39;some_other_method\u0026#39;) by default def some_other_method(): sleep(1) @trace(op=\u0026#39;awesome\u0026#39;) # customize the operation name to \u0026#39;awesome\u0026#39; def some_method(): some_other_method() @trace(op=\u0026#39;async_functions_are_also_supported\u0026#39;) async def async_func(): return \u0026#39;asynchronous\u0026#39; @trace() async def async_func2(): return await async_func() @runnable() # cross thread propagation def some_method(): some_other_method() from threading import Thread t = Thread(target=some_method) t.start() context: SpanContext = get_context() with context.new_entry_span(op=str(\u0026#39;https://github.com/apache/skywalking\u0026#39;)) as span: span.component = Component.Flask some_method() ","title":"SkyWalking Python Instrumentation API","url":"/docs/skywalking-python/latest/en/setup/advanced/api/"},{"content":"SkyWalking Python Instrumentation API Apart from the supported libraries that can be instrumented automatically, SkyWalking also provides some APIs to enable manual instrumentation.\nCreate Spans The code snippet below shows how to create entry span, exit span and local span.\nfrom skywalking import Component from skywalking.trace.context import SpanContext, get_context from skywalking.trace.tags import Tag context: SpanContext = get_context() # get a tracing context # create an entry span, by using `with` statement, # the span automatically starts/stops when entering/exiting the context with context.new_entry_span(op=\u0026#39;https://github.com/apache\u0026#39;) as span: span.component = Component.Flask # the span automatically stops when exiting the `with` context class TagSinger(Tag): key = \u0026#39;Singer\u0026#39; with context.new_exit_span(op=\u0026#39;https://github.com/apache\u0026#39;, peer=\u0026#39;localhost:8080\u0026#39;, component=Component.Flask) as span: span.tag(TagSinger(\u0026#39;Nakajima\u0026#39;)) with context.new_local_span(op=\u0026#39;https://github.com/apache\u0026#39;) as span: span.tag(TagSinger(\u0026#39;Nakajima\u0026#39;)) Decorators from time import sleep from skywalking import Component from skywalking.decorators import trace, runnable from skywalking.trace.context import SpanContext, get_context @trace() # the operation name is the method name(\u0026#39;some_other_method\u0026#39;) by default def some_other_method(): sleep(1) @trace(op=\u0026#39;awesome\u0026#39;) # customize the operation name to \u0026#39;awesome\u0026#39; def some_method(): some_other_method() @trace(op=\u0026#39;async_functions_are_also_supported\u0026#39;) async def async_func(): return \u0026#39;asynchronous\u0026#39; @trace() async def async_func2(): return await async_func() @runnable() # cross thread propagation def some_method(): some_other_method() from threading import Thread t = Thread(target=some_method) t.start() context: SpanContext = get_context() with context.new_entry_span(op=str(\u0026#39;https://github.com/apache/skywalking\u0026#39;)) as span: span.component = Component.Flask some_method() ","title":"SkyWalking Python Instrumentation API","url":"/docs/skywalking-python/next/en/setup/advanced/api/"},{"content":"SkyWalking Python Instrumentation API Apart from the supported libraries that can be instrumented automatically, SkyWalking also provides some APIs to enable manual instrumentation.\nCreate Spans The code snippet below shows how to create entry span, exit span and local span.\nfrom skywalking import Component from skywalking.trace.context import SpanContext, get_context from skywalking.trace.tags import Tag context: SpanContext = get_context() # get a tracing context # create an entry span, by using `with` statement, # the span automatically starts/stops when entering/exiting the context with context.new_entry_span(op=\u0026#39;https://github.com/apache\u0026#39;) as span: span.component = Component.Flask # the span automatically stops when exiting the `with` context class TagSinger(Tag): key = \u0026#39;Singer\u0026#39; with context.new_exit_span(op=\u0026#39;https://github.com/apache\u0026#39;, peer=\u0026#39;localhost:8080\u0026#39;, component=Component.Flask) as span: span.tag(TagSinger(\u0026#39;Nakajima\u0026#39;)) with context.new_local_span(op=\u0026#39;https://github.com/apache\u0026#39;) as span: span.tag(TagSinger(\u0026#39;Nakajima\u0026#39;)) Decorators from time import sleep from skywalking import Component from skywalking.decorators import trace, runnable from skywalking.trace.context import SpanContext, get_context @trace() # the operation name is the method name(\u0026#39;some_other_method\u0026#39;) by default def some_other_method(): sleep(1) @trace(op=\u0026#39;awesome\u0026#39;) # customize the operation name to \u0026#39;awesome\u0026#39; def some_method(): some_other_method() @trace(op=\u0026#39;async_functions_are_also_supported\u0026#39;) async def async_func(): return \u0026#39;asynchronous\u0026#39; @trace() async def async_func2(): return await async_func() @runnable() # cross thread propagation def some_method(): some_other_method() from threading import Thread t = Thread(target=some_method) t.start() context: SpanContext = get_context() with context.new_entry_span(op=str(\u0026#39;https://github.com/apache/skywalking\u0026#39;)) as span: span.component = Component.Flask some_method() ","title":"SkyWalking Python Instrumentation API","url":"/docs/skywalking-python/v1.0.1/en/setup/advanced/api/"},{"content":"Apache SkyWalking release guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking in The Apache Way and start the voting process by reading this document.\nSet up your development environment Follow the steps in the Apache maven deployment environment document to set gpg tool and encrypt passwords.\nUse the following block as a template and place it in ~/.m2/settings.xml.\n\u0026lt;settings\u0026gt; ... \u0026lt;servers\u0026gt; \u0026lt;!-- To publish a snapshot of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.snapshots.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; \u0026lt;!-- To stage a release of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.releases.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; ... \u0026lt;/servers\u0026gt; \u0026lt;/settings\u0026gt; Add your GPG public key  Add your GPG public key into the SkyWalking GPG KEYS file. If you are a committer, use your Apache ID and password to log in this svn, and update the file. Don\u0026rsquo;t override the existing file. Upload your GPG public key to the public GPG site, such as MIT\u0026rsquo;s site. This site should be in the Apache maven staging repository checklist.  Test your settings This step is only for testing purpose. If your env is correctly set, you don\u0026rsquo;t need to check every time.\n./mvnw clean install -Pall (this will build artifacts, sources and sign) Prepare for the release ./mvnw release:clean ./mvnw release:prepare -DautoVersionSubmodules=true -Darguments='-Dmaven.test.skip' -Pall  Set version number as x.y.z, and tag as vx.y.z (The version tag must start with v. You will find out why this is necessary in the next step.)  You could do a GPG signature before preparing for the release. If you need to input the password to sign, and the maven doesn\u0026rsquo;t provide you with the opportunity to do so, this may lead to failure of the release. To resolve this, you may run gpg --sign xxx in any file. This will allow it to remember the password for long enough to prepare for the release.\nStage the release ./mvnw release:perform -Darguments='-Dmaven.test.skip' -Pall  The release will be automatically inserted into a temporary staging repository.  Build and sign the source code package export RELEASE_VERSION=x.y.z (example: RELEASE_VERSION=5.0.0-alpha) cd tools/releasing bash create_source_release.sh This script takes care of the following things:\n Use v + RELEASE_VERSION as tag to clone the codes. Complete git submodule init/update. Exclude all unnecessary files in the target source tar, such as .git, .github, and .gitmodules. See the script for more details. Execute gpg and shasum 512.  apache-skywalking-apm-x.y.z-src.tgz and files ending with .asc and .sha512 may be found in the tools/releasing folder.\nLocate and download the distribution package in Apache Nexus Staging repositories  Use your Apache ID to log in to https://repository.apache.org/. Go to https://repository.apache.org/#stagingRepositories. Search skywalking and find your staging repository. Close the repository and wait for all checks to pass. In this step, your GPG KEYS will be checked. See the set PGP document, if you haven\u0026rsquo;t done it before. Go to {REPO_URL}/org/apache/skywalking/apache-skywalking-apm/x.y.z. Download .tar.gz and .zip and files ending with .asc and .sha1.  Upload to Apache svn  Use your Apache ID to log in to https://dist.apache.org/repos/dist/dev/skywalking/. Create a folder and name it by the release version and round, such as: x.y.z Upload the source code package to the folder with files ending with .asc and .sha512.  Package name: apache-skywalking-x.y.z-src.tar.gz See Section \u0026ldquo;Build and sign the source code package\u0026rdquo; for more details   Upload the distribution package to the folder with files ending with .asc and .sha512.  Package name: apache-skywalking-bin-x.y.z.tar.gz. See Section \u0026ldquo;Locate and download the distribution package in Apache Nexus Staging repositories\u0026rdquo; for more details. Create a .sha512 package: shasum -a 512 file \u0026gt; file.sha512    Make the internal announcements Send an announcement mail in dev mail list.\nMail title: [ANNOUNCE] SkyWalking x.y.z test build available Mail content: The test build of x.y.z is available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/xxxx * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-apm-x.x.x-src.tgz - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.tar.gz Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) vx.y.z Release CommitID : * https://github.com/apache/skywalking/tree/(Git Commit ID) * Git submodule * skywalking-ui: https://github.com/apache/skywalking-booster-ui/tree/(Git Commit ID) * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) * oap-server/server-query-plugin/query-graphql-plugin/src/main/resources/query-protocol https://github.com/apache/skywalking-query-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking/blob/vx.y.z/docs/en/guides/How-to-build.md A vote regarding the quality of this test build will be initiated within the next couple of days. Wait for at least 48 hours for test responses Any PMC member, committer or contributor can test the release features and provide feedback. Based on that, the PMC will decide whether to start the voting process.\nCall a vote in dev Call a vote in dev@skywalking.apache.org\nMail title: [VOTE] Release Apache SkyWalking version x.y.z Mail content: Hi All, This is a call for vote to release Apache SkyWalking version x.y.z. Release notes: * https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/xxxx * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-apm-x.x.x-src.tgz - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.tar.gz Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) vx.y.z Release CommitID : * https://github.com/apache/skywalking/tree/(Git Commit ID) * Git submodule * skywalking-ui: https://github.com/apache/skywalking-booster-ui/tree/(Git Commit ID) * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) * oap-server/server-query-plugin/query-graphql-plugin/src/main/resources/query-protocol https://github.com/apache/skywalking-query-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking/blob/vx.y.z/docs/en/guides/How-to-build.md Voting will start now (xxxx date) and will remain open for at least 72 hours, Request all PMC members to give their vote. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Vote Check All PMC members and committers should check these before casting +1 votes.\n Features test. All artifacts in staging repository are published with .asc, .md5, and *sha1 files. Source code and distribution package (apache-skywalking-x.y.z-src.tar.gz, apache-skywalking-bin-x.y.z.tar.gz, apache-skywalking-bin-x.y.z.zip) are found in https://dist.apache.org/repos/dist/dev/skywalking/x.y.z with .asc and .sha512. LICENSE and NOTICE are in the source code and distribution package. Check shasum -c apache-skywalking-apm-x.y.z-src.tgz.sha512. Check gpg --verify apache-skywalking-apm-x.y.z-src.tgz.asc apache-skywalking-apm-x.y.z-src.tgz Build a distribution package from the source code package (apache-skywalking-x.y.z-src.tar.gz) by following this doc. Check the Apache License Header. Run docker run --rm -v $(pwd):/github/workspace apache/skywalking-eyes header check. (No binaries in source codes)  The voting process is as follows:\n All PMC member votes are +1 binding, and all other votes are +1 but non-binding. If you obtain at least 3 (+1 binding) votes with more +1 than -1 votes within 72 hours, the release will be approved.  Publish the release  Move source codes tar and distribution packages to https://dist.apache.org/repos/dist/release/skywalking/.  \u0026gt; export SVN_EDITOR=vim \u0026gt; svn mv https://dist.apache.org/repos/dist/dev/skywalking/x.y.z https://dist.apache.org/repos/dist/release/skywalking .... enter your apache password .... Release in the nexus staging repo. Public download source and distribution tar/zip are located in http://www.apache.org/dyn/closer.cgi/skywalking/x.y.z/xxx. The Apache mirror path is the only release information that we publish. Public asc and sha512 are located in https://www.apache.org/dist/skywalking/x.y.z/xxx. Public KEYS point to https://www.apache.org/dist/skywalking/KEYS. Update the website download page. http://skywalking.apache.org/downloads/ . Add a new download source, distribution, sha512, asc, and document links. The links can be found following rules (3) to (6) above. Add a release event on the website homepage and event page. Announce the public release with changelog or key features. Send ANNOUNCE email to dev@skywalking.apache.org, announce@apache.org. The sender should use the Apache email account.  Mail title: [ANNOUNCE] Apache SkyWalking x.y.z released Mail content: Hi all, Apache SkyWalking Team is glad to announce the first release of Apache SkyWalking x.y.z. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. This release contains a number of new features, bug fixes and improvements compared to version a.b.c(last release). The notable changes since x.y.z include: (Highlight key changes) 1. ... 2. ... 3. ... Please refer to the change log for the complete list of changes: https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Apache SkyWalking website: http://skywalking.apache.org/ Downloads: http://skywalking.apache.org/downloads/ Twitter: https://twitter.com/ASFSkyWalking SkyWalking Resources: - GitHub: https://github.com/apache/skywalking - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Apache SkyWalking Team Publish the Docker images We have a GitHub workflow to automatically publish the Docker images to Docker Hub after you set the version from pre-release to release, all you need to do is to watch that workflow and see whether it succeeds, if it fails, you can use the following steps to publish the Docker images in your local machine.\nexport SW_VERSION=x.y.z git clone --depth 1 --branch v$SW_VERSION https://github.com/apache/skywalking.git cd skywalking svn co https://dist.apache.org/repos/dist/release/skywalking/$SW_VERSION release # (1) export CONTEXT=release export HUB=apache export OAP_NAME=skywalking-oap-server export UI_NAME=skywalking-ui export TAG=$SW_VERSION export DIST=\u0026lt;the binary package name inside (1), e.g. apache-skywalking-apm-8.8.0.tar.gz\u0026gt; make docker.push Clean up the old releases Once the latest release has been published, you should clean up the old releases from the mirror system.\n Update the download links (source, dist, asc, and sha512) on the website to the archive repo (https://archive.apache.org/dist/skywalking). Remove previous releases from https://dist.apache.org/repos/dist/release/skywalking/.  ","title":"SkyWalking release guide","url":"/docs/main/latest/en/guides/how-to-release/"},{"content":"Apache SkyWalking release guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking in The Apache Way and start the voting process by reading this document.\nSet up your development environment Follow the steps in the Apache maven deployment environment document to set gpg tool and encrypt passwords.\nUse the following block as a template and place it in ~/.m2/settings.xml.\n\u0026lt;settings\u0026gt; ... \u0026lt;servers\u0026gt; \u0026lt;!-- To publish a snapshot of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.snapshots.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; \u0026lt;!-- To stage a release of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.releases.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; ... \u0026lt;/servers\u0026gt; \u0026lt;/settings\u0026gt; Add your GPG public key  Add your GPG public key into the SkyWalking GPG KEYS file. If you are a committer, use your Apache ID and password to log in this svn, and update the file. Don\u0026rsquo;t override the existing file. Upload your GPG public key to the public GPG site, such as MIT\u0026rsquo;s site. This site should be in the Apache maven staging repository checklist.  Test your settings This step is only for testing purpose. If your env is correctly set, you don\u0026rsquo;t need to check every time.\n./mvnw clean install -Pall (this will build artifacts, sources and sign) Prepare for the release ./mvnw release:clean ./mvnw release:prepare -DautoVersionSubmodules=true -Darguments='-Dmaven.test.skip' -Pall  Set version number as x.y.z, and tag as vx.y.z (The version tag must start with v. You will find out why this is necessary in the next step.)  You could do a GPG signature before preparing for the release. If you need to input the password to sign, and the maven doesn\u0026rsquo;t provide you with the opportunity to do so, this may lead to failure of the release. To resolve this, you may run gpg --sign xxx in any file. This will allow it to remember the password for long enough to prepare for the release.\nStage the release ./mvnw release:perform -Darguments='-Dmaven.test.skip' -Pall  The release will be automatically inserted into a temporary staging repository.  Build and sign the source code package export RELEASE_VERSION=x.y.z (example: RELEASE_VERSION=5.0.0-alpha) cd tools/releasing bash create_source_release.sh This script takes care of the following things:\n Use v + RELEASE_VERSION as tag to clone the codes. Complete git submodule init/update. Exclude all unnecessary files in the target source tar, such as .git, .github, and .gitmodules. See the script for more details. Execute gpg and shasum 512.  apache-skywalking-apm-x.y.z-src.tgz and files ending with .asc and .sha512 may be found in the tools/releasing folder.\nLocate and download the distribution package in Apache Nexus Staging repositories  Use your Apache ID to log in to https://repository.apache.org/. Go to https://repository.apache.org/#stagingRepositories. Search skywalking and find your staging repository. Close the repository and wait for all checks to pass. In this step, your GPG KEYS will be checked. See the set PGP document, if you haven\u0026rsquo;t done it before. Go to {REPO_URL}/org/apache/skywalking/apache-skywalking-apm/x.y.z. Download .tar.gz and .zip and files ending with .asc and .sha1.  Upload to Apache svn  Use your Apache ID to log in to https://dist.apache.org/repos/dist/dev/skywalking/. Create a folder and name it by the release version and round, such as: x.y.z Upload the source code package to the folder with files ending with .asc and .sha512.  Package name: apache-skywalking-x.y.z-src.tar.gz See Section \u0026ldquo;Build and sign the source code package\u0026rdquo; for more details   Upload the distribution package to the folder with files ending with .asc and .sha512.  Package name: apache-skywalking-bin-x.y.z.tar.gz. See Section \u0026ldquo;Locate and download the distribution package in Apache Nexus Staging repositories\u0026rdquo; for more details. Create a .sha512 package: shasum -a 512 file \u0026gt; file.sha512    Call a vote in dev Call a vote in dev@skywalking.apache.org\nMail title: [VOTE] Release Apache SkyWalking version x.y.z Mail content: Hi All, This is a call for vote to release Apache SkyWalking version x.y.z. Release notes: * https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/xxxx * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-apm-x.x.x-src.tgz - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.tar.gz Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) vx.y.z Release CommitID : * https://github.com/apache/skywalking/tree/(Git Commit ID) * Git submodule * skywalking-ui: https://github.com/apache/skywalking-booster-ui/tree/(Git Commit ID) * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) * oap-server/server-query-plugin/query-graphql-plugin/src/main/resources/query-protocol https://github.com/apache/skywalking-query-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking/blob/vx.y.z/docs/en/guides/How-to-build.md Voting will start now (xxxx date) and will remain open for at least 72 hours, Request all PMC members to give their vote. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Vote Check All PMC members and committers should check these before casting +1 votes.\n Features test. All artifacts in staging repository are published with .asc, .md5, and *sha1 files. Source code and distribution package (apache-skywalking-x.y.z-src.tar.gz, apache-skywalking-bin-x.y.z.tar.gz, apache-skywalking-bin-x.y.z.zip) are found in https://dist.apache.org/repos/dist/dev/skywalking/x.y.z with .asc and .sha512. LICENSE and NOTICE are in the source code and distribution package. Check shasum -c apache-skywalking-apm-x.y.z-src.tgz.sha512. Check gpg --verify apache-skywalking-apm-x.y.z-src.tgz.asc apache-skywalking-apm-x.y.z-src.tgz Build a distribution package from the source code package (apache-skywalking-x.y.z-src.tar.gz) by following this doc. Check the Apache License Header. Run docker run --rm -v $(pwd):/github/workspace apache/skywalking-eyes header check. (No binaries in source codes)  The voting process is as follows:\n All PMC member votes are +1 binding, and all other votes are +1 but non-binding. If you obtain at least 3 (+1 binding) votes with more +1 than -1 votes within 72 hours, the release will be approved.  Publish the release  Move source codes tar and distribution packages to https://dist.apache.org/repos/dist/release/skywalking/.  \u0026gt; export SVN_EDITOR=vim \u0026gt; svn mv https://dist.apache.org/repos/dist/dev/skywalking/x.y.z https://dist.apache.org/repos/dist/release/skywalking .... enter your apache password .... Release in the nexus staging repo. Public download source and distribution tar/zip are located in http://www.apache.org/dyn/closer.cgi/skywalking/x.y.z/xxx. The Apache mirror path is the only release information that we publish. Public asc and sha512 are located in https://www.apache.org/dist/skywalking/x.y.z/xxx. Public KEYS point to https://www.apache.org/dist/skywalking/KEYS. Update the website download page. http://skywalking.apache.org/downloads/ . Add a new download source, distribution, sha512, asc, and document links. The links can be found following rules (3) to (6) above. Add a release event on the website homepage and event page. Announce the public release with changelog or key features. Send ANNOUNCE email to dev@skywalking.apache.org, announce@apache.org. The sender should use the Apache email account.  Mail title: [ANNOUNCE] Apache SkyWalking x.y.z released Mail content: Hi all, Apache SkyWalking Team is glad to announce the first release of Apache SkyWalking x.y.z. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based architectures. This release contains a number of new features, bug fixes and improvements compared to version a.b.c(last release). The notable changes since x.y.z include: (Highlight key changes) 1. ... 2. ... 3. ... Please refer to the change log for the complete list of changes: https://skywalking.apache.org/docs/main/vx.y.z/en/changes/changes/ Apache SkyWalking website: http://skywalking.apache.org/ Downloads: https://skywalking.apache.org/downloads/#SkyWalkingAPM Twitter: https://twitter.com/ASFSkyWalking SkyWalking Resources: - GitHub: https://github.com/apache/skywalking - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Apache SkyWalking Team Publish the Docker images We have a GitHub workflow to automatically publish the Docker images to Docker Hub after you set the version from pre-release to release, all you need to do is to watch that workflow and see whether it succeeds, if it fails, you can use the following steps to publish the Docker images in your local machine.\nexport SW_VERSION=x.y.z git clone --depth 1 --branch v$SW_VERSION https://github.com/apache/skywalking.git cd skywalking svn co https://dist.apache.org/repos/dist/release/skywalking/$SW_VERSION release # (1) export CONTEXT=release export HUB=apache export OAP_NAME=skywalking-oap-server export UI_NAME=skywalking-ui export TAG=$SW_VERSION export DIST=\u0026lt;the binary package name inside (1), e.g. apache-skywalking-apm-8.8.0.tar.gz\u0026gt; make docker.push Clean up the old releases Once the latest release has been published, you should clean up the old releases from the mirror system.\n Update the download links (source, dist, asc, and sha512) on the website to the archive repo (https://archive.apache.org/dist/skywalking). Remove previous releases from https://dist.apache.org/repos/dist/release/skywalking/.  ","title":"SkyWalking release guide","url":"/docs/main/next/en/guides/how-to-release/"},{"content":"Apache SkyWalking release guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking in The Apache Way and start the voting process by reading this document.\nSet up your development environment Follow the steps in the Apache maven deployment environment document to set gpg tool and encrypt passwords.\nUse the following block as a template and place it in ~/.m2/settings.xml.\n\u0026lt;settings\u0026gt; ... \u0026lt;servers\u0026gt; \u0026lt;!-- To publish a snapshot of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.snapshots.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; \u0026lt;!-- To stage a release of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.releases.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; ... \u0026lt;/servers\u0026gt; \u0026lt;/settings\u0026gt; Add your GPG public key  Add your GPG public key into the SkyWalking GPG KEYS file. If you are a committer, use your Apache ID and password to log in this svn, and update the file. Don\u0026rsquo;t override the existing file. Upload your GPG public key to the public GPG site, such as MIT\u0026rsquo;s site. This site should be in the Apache maven staging repository checklist.  Test your settings This step is only for testing purpose. If your env is correctly set, you don\u0026rsquo;t need to check every time.\n./mvnw clean install -Pall (this will build artifacts, sources and sign) Prepare for the release ./mvnw release:clean ./mvnw release:prepare -DautoVersionSubmodules=true -Pall  Set version number as x.y.z, and tag as vx.y.z (The version tag must start with v. You will find out why this is necessary in the next step.)  You could do a GPG signature before preparing for the release. If you need to input the password to sign, and the maven doesn\u0026rsquo;t provide you with the opportunity to do so, this may lead to failure of the release. To resolve this, you may run gpg --sign xxx in any file. This will allow it to remember the password for long enough to prepare for the release.\nStage the release ./mvnw release:perform -Dmaven.test.skip -Pall  The release will be automatically inserted into a temporary staging repository.  Build and sign the source code package export RELEASE_VERSION=x.y.z (example: RELEASE_VERSION=5.0.0-alpha) cd tools/releasing bash create_source_release.sh This script takes care of the following things:\n Use v + RELEASE_VERSION as tag to clone the codes. Complete git submodule init/update. Exclude all unnecessary files in the target source tar, such as .git, .github, and .gitmodules. See the script for more details. Execute gpg and shasum 512.  apache-skywalking-apm-x.y.z-src.tgz and files ending with .asc and .sha512 may be found in the tools/releasing folder.\nLocate and download the distribution package in Apache Nexus Staging repositories  Use your Apache ID to log in to https://repository.apache.org/. Go to https://repository.apache.org/#stagingRepositories. Search skywalking and find your staging repository. Close the repository and wait for all checks to pass. In this step, your GPG KEYS will be checked. See the set PGP document, if you haven\u0026rsquo;t done it before. Go to {REPO_URL}/org/apache/skywalking/apache-skywalking-apm/x.y.z. Download .tar.gz and .zip and files ending with .asc and .sha1.  Upload to Apache svn  Use your Apache ID to log in to https://dist.apache.org/repos/dist/dev/skywalking/. Create a folder and name it by the release version and round, such as: x.y.z Upload the source code package to the folder with files ending with .asc and .sha512.  Package name: apache-skywalking-x.y.z-src.tar.gz See Section \u0026ldquo;Build and sign the source code package\u0026rdquo; for more details   Upload the distribution package to the folder with files ending with .asc and .sha512.  Package name: apache-skywalking-bin-x.y.z.tar.gz. See Section \u0026ldquo;Locate and download the distribution package in Apache Nexus Staging repositories\u0026rdquo; for more details. Create a .sha512 package: shasum -a 512 file \u0026gt; file.sha512    Make the internal announcements Send an announcement mail in dev mail list.\nMail title: [ANNOUNCE] SkyWalking x.y.z test build available Mail content: The test build of x.y.z is available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking/blob/master/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/xxxx * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-apm-x.x.x-src.tgz - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.tar.gz - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.zip Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) x.y.z Release CommitID : * https://github.com/apache/skywalking/tree/(Git Commit ID) * Git submodule * skywalking-ui: https://github.com/apache/skywalking-rocketbot-ui/tree/(Git Commit ID) * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) * oap-server/server-query-plugin/query-graphql-plugin/src/main/resources/query-protocol https://github.com/apache/skywalking-query-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking/blob/x.y.z/docs/en/guides/How-to-build.md A vote regarding the quality of this test build will be initiated within the next couple of days. Wait for at least 48 hours for test responses Any PMC member, committer or contributor can test the release features and provide feedback. Based on that, the PMC will decide whether to start the voting process.\nCall a vote in dev Call a vote in dev@skywalking.apache.org\nMail title: [VOTE] Release Apache SkyWalking version x.y.z Mail content: Hi All, This is a call for vote to release Apache SkyWalking version x.y.z. Release notes: * https://github.com/apache/skywalking/blob/master/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/xxxx * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-apm-x.x.x-src.tgz - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.tar.gz - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.zip Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) x.y.z Release CommitID : * https://github.com/apache/skywalking/tree/(Git Commit ID) * Git submodule * skywalking-ui: https://github.com/apache/skywalking-rocketbot-ui/tree/(Git Commit ID) * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) * oap-server/server-query-plugin/query-graphql-plugin/src/main/resources/query-protocol https://github.com/apache/skywalking-query-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking/blob/x.y.z/docs/en/guides/How-to-build.md Voting will start now (xxxx date) and will remain open for at least 72 hours, Request all PMC members to give their vote. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Vote Check All PMC members and committers should check these before casting +1 votes.\n Features test. All artifacts in staging repository are published with .asc, .md5, and *sha1 files. Source code and distribution package (apache-skywalking-x.y.z-src.tar.gz, apache-skywalking-bin-x.y.z.tar.gz, apache-skywalking-bin-x.y.z.zip) are found in https://dist.apache.org/repos/dist/dev/skywalking/x.y.z with .asc and .sha512. LICENSE and NOTICE are in the source code and distribution package. Check shasum -c apache-skywalking-apm-x.y.z-src.tgz.sha512. Check gpg --verify apache-skywalking-apm-x.y.z-src.tgz.asc apache-skywalking-apm-x.y.z-src.tgz Build a distribution package from the source code package (apache-skywalking-x.y.z-src.tar.gz) by following this doc. Check the Apache License Header. Run docker run --rm -v $(pwd):/github/workspace apache/skywalking-eyes header check. (No binaries in source codes)  The voting process is as follows:\n All PMC member votes are +1 binding, and all other votes are +1 but non-binding. If you obtain at least 3 (+1 binding) votes with more +1 than -1 votes within 72 hours, the release will be approved.  Publish the release  Move source codes tar and distribution packages to https://dist.apache.org/repos/dist/release/skywalking/.  \u0026gt; export SVN_EDITOR=vim \u0026gt; svn mv https://dist.apache.org/repos/dist/dev/skywalking/x.y.z https://dist.apache.org/repos/dist/release/skywalking .... enter your apache password .... Release in the nexus staging repo. Public download source and distribution tar/zip are located in http://www.apache.org/dyn/closer.cgi/skywalking/x.y.z/xxx. The Apache mirror path is the only release information that we publish. Public asc and sha512 are located in https://www.apache.org/dist/skywalking/x.y.z/xxx. Public KEYS point to https://www.apache.org/dist/skywalking/KEYS. Update the website download page. http://skywalking.apache.org/downloads/ . Add a new download source, distribution, sha512, asc, and document links. The links can be found following rules (3) to (6) above. Add a release event on the website homepage and event page. Announce the public release with changelog or key features. Send ANNOUNCE email to dev@skywalking.apache.org, announce@apache.org. The sender should use the Apache email account.  Mail title: [ANNOUNCE] Apache SkyWalking x.y.z released Mail content: Hi all, Apache SkyWalking Team is glad to announce the first release of Apache SkyWalking x.y.z. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. This release contains a number of new features, bug fixes and improvements compared to version a.b.c(last release). The notable changes since x.y.z include: (Highlight key changes) 1. ... 2. ... 3. ... Please refer to the change log for the complete list of changes: https://github.com/apache/skywalking/blob/master/changes/changes-x.y.z.md Apache SkyWalking website: http://skywalking.apache.org/ Downloads: http://skywalking.apache.org/downloads/ Twitter: https://twitter.com/ASFSkyWalking SkyWalking Resources: - GitHub: https://github.com/apache/skywalking - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Apache SkyWalking Team Publish the Docker images export SW_VERSION=x.y.z git clone --depth 1 --branch v$SW_VERSION https://github.com/apache/skywalking.git cd skywalking svn co https://dist.apache.org/repos/dist/release/skywalking/$SW_VERSION release # (1) export CONTEXT=release export HUB=apache export OAP_NAME=skywalking-oap-server export UI_NAME=skywalking-ui export TAG=$SW_VERSION export DIST=\u0026lt;the binary package name inside (1), e.g. apache-skywalking-apm-8.8.0.tar.gz\u0026gt; make docker.push Clean up the old releases Once the latest release has been published, you should clean up the old releases from the mirror system.\n Update the download links (source, dist, asc, and sha512) on the website to the archive repo (https://archive.apache.org/dist/skywalking). Remove previous releases from https://dist.apache.org/repos/dist/release/skywalking/.  ","title":"SkyWalking release guide","url":"/docs/main/v9.0.0/en/guides/how-to-release/"},{"content":"Apache SkyWalking release guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking in The Apache Way and start the voting process by reading this document.\nSet up your development environment Follow the steps in the Apache maven deployment environment document to set gpg tool and encrypt passwords.\nUse the following block as a template and place it in ~/.m2/settings.xml.\n\u0026lt;settings\u0026gt; ... \u0026lt;servers\u0026gt; \u0026lt;!-- To publish a snapshot of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.snapshots.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; \u0026lt;!-- To stage a release of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.releases.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; ... \u0026lt;/servers\u0026gt; \u0026lt;/settings\u0026gt; Add your GPG public key  Add your GPG public key into the SkyWalking GPG KEYS file. If you are a committer, use your Apache ID and password to log in this svn, and update the file. Don\u0026rsquo;t override the existing file. Upload your GPG public key to the public GPG site, such as MIT\u0026rsquo;s site. This site should be in the Apache maven staging repository checklist.  Test your settings This step is only for testing purpose. If your env is correctly set, you don\u0026rsquo;t need to check every time.\n./mvnw clean install -Pall (this will build artifacts, sources and sign) Prepare for the release ./mvnw release:clean ./mvnw release:prepare -DautoVersionSubmodules=true -Pall  Set version number as x.y.z, and tag as vx.y.z (The version tag must start with v. You will find out why this is necessary in the next step.)  You could do a GPG signature before preparing for the release. If you need to input the password to sign, and the maven doesn\u0026rsquo;t provide you with the opportunity to do so, this may lead to failure of the release. To resolve this, you may run gpg --sign xxx in any file. This will allow it to remember the password for long enough to prepare for the release.\nStage the release ./mvnw release:perform -Dmaven.test.skip -Pall  The release will be automatically inserted into a temporary staging repository.  Build and sign the source code package export RELEASE_VERSION=x.y.z (example: RELEASE_VERSION=5.0.0-alpha) cd tools/releasing bash create_source_release.sh This script takes care of the following things:\n Use v + RELEASE_VERSION as tag to clone the codes. Complete git submodule init/update. Exclude all unnecessary files in the target source tar, such as .git, .github, and .gitmodules. See the script for more details. Execute gpg and shasum 512.  apache-skywalking-apm-x.y.z-src.tgz and files ending with .asc and .sha512 may be found in the tools/releasing folder.\nLocate and download the distribution package in Apache Nexus Staging repositories  Use your Apache ID to log in to https://repository.apache.org/. Go to https://repository.apache.org/#stagingRepositories. Search skywalking and find your staging repository. Close the repository and wait for all checks to pass. In this step, your GPG KEYS will be checked. See the set PGP document, if you haven\u0026rsquo;t done it before. Go to {REPO_URL}/org/apache/skywalking/apache-skywalking-apm/x.y.z. Download .tar.gz and .zip and files ending with .asc and .sha1.  Upload to Apache svn  Use your Apache ID to log in to https://dist.apache.org/repos/dist/dev/skywalking/. Create a folder and name it by the release version and round, such as: x.y.z Upload the source code package to the folder with files ending with .asc and .sha512.  Package name: apache-skywalking-x.y.z-src.tar.gz See Section \u0026ldquo;Build and sign the source code package\u0026rdquo; for more details   Upload the distribution package to the folder with files ending with .asc and .sha512.  Package name: apache-skywalking-bin-x.y.z.tar.gz. See Section \u0026ldquo;Locate and download the distribution package in Apache Nexus Staging repositories\u0026rdquo; for more details. Create a .sha512 package: shasum -a 512 file \u0026gt; file.sha512    Make the internal announcements Send an announcement mail in dev mail list.\nMail title: [ANNOUNCE] SkyWalking x.y.z test build available Mail content: The test build of x.y.z is available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking/blob/master/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/xxxx * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-apm-x.x.x-src.tgz - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.tar.gz Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) x.y.z Release CommitID : * https://github.com/apache/skywalking/tree/(Git Commit ID) * Git submodule * skywalking-ui: https://github.com/apache/skywalking-booster-ui/tree/(Git Commit ID) * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) * oap-server/server-query-plugin/query-graphql-plugin/src/main/resources/query-protocol https://github.com/apache/skywalking-query-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking/blob/x.y.z/docs/en/guides/How-to-build.md A vote regarding the quality of this test build will be initiated within the next couple of days. Wait for at least 48 hours for test responses Any PMC member, committer or contributor can test the release features and provide feedback. Based on that, the PMC will decide whether to start the voting process.\nCall a vote in dev Call a vote in dev@skywalking.apache.org\nMail title: [VOTE] Release Apache SkyWalking version x.y.z Mail content: Hi All, This is a call for vote to release Apache SkyWalking version x.y.z. Release notes: * https://github.com/apache/skywalking/blob/master/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/xxxx * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-apm-x.x.x-src.tgz - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.tar.gz Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) x.y.z Release CommitID : * https://github.com/apache/skywalking/tree/(Git Commit ID) * Git submodule * skywalking-ui: https://github.com/apache/skywalking-booster-ui/tree/(Git Commit ID) * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) * oap-server/server-query-plugin/query-graphql-plugin/src/main/resources/query-protocol https://github.com/apache/skywalking-query-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking/blob/x.y.z/docs/en/guides/How-to-build.md Voting will start now (xxxx date) and will remain open for at least 72 hours, Request all PMC members to give their vote. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Vote Check All PMC members and committers should check these before casting +1 votes.\n Features test. All artifacts in staging repository are published with .asc, .md5, and *sha1 files. Source code and distribution package (apache-skywalking-x.y.z-src.tar.gz, apache-skywalking-bin-x.y.z.tar.gz, apache-skywalking-bin-x.y.z.zip) are found in https://dist.apache.org/repos/dist/dev/skywalking/x.y.z with .asc and .sha512. LICENSE and NOTICE are in the source code and distribution package. Check shasum -c apache-skywalking-apm-x.y.z-src.tgz.sha512. Check gpg --verify apache-skywalking-apm-x.y.z-src.tgz.asc apache-skywalking-apm-x.y.z-src.tgz Build a distribution package from the source code package (apache-skywalking-x.y.z-src.tar.gz) by following this doc. Check the Apache License Header. Run docker run --rm -v $(pwd):/github/workspace apache/skywalking-eyes header check. (No binaries in source codes)  The voting process is as follows:\n All PMC member votes are +1 binding, and all other votes are +1 but non-binding. If you obtain at least 3 (+1 binding) votes with more +1 than -1 votes within 72 hours, the release will be approved.  Publish the release  Move source codes tar and distribution packages to https://dist.apache.org/repos/dist/release/skywalking/.  \u0026gt; export SVN_EDITOR=vim \u0026gt; svn mv https://dist.apache.org/repos/dist/dev/skywalking/x.y.z https://dist.apache.org/repos/dist/release/skywalking .... enter your apache password .... Release in the nexus staging repo. Public download source and distribution tar/zip are located in http://www.apache.org/dyn/closer.cgi/skywalking/x.y.z/xxx. The Apache mirror path is the only release information that we publish. Public asc and sha512 are located in https://www.apache.org/dist/skywalking/x.y.z/xxx. Public KEYS point to https://www.apache.org/dist/skywalking/KEYS. Update the website download page. http://skywalking.apache.org/downloads/ . Add a new download source, distribution, sha512, asc, and document links. The links can be found following rules (3) to (6) above. Add a release event on the website homepage and event page. Announce the public release with changelog or key features. Send ANNOUNCE email to dev@skywalking.apache.org, announce@apache.org. The sender should use the Apache email account.  Mail title: [ANNOUNCE] Apache SkyWalking x.y.z released Mail content: Hi all, Apache SkyWalking Team is glad to announce the first release of Apache SkyWalking x.y.z. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. This release contains a number of new features, bug fixes and improvements compared to version a.b.c(last release). The notable changes since x.y.z include: (Highlight key changes) 1. ... 2. ... 3. ... Please refer to the change log for the complete list of changes: https://github.com/apache/skywalking/blob/master/changes/changes-x.y.z.md Apache SkyWalking website: http://skywalking.apache.org/ Downloads: http://skywalking.apache.org/downloads/ Twitter: https://twitter.com/ASFSkyWalking SkyWalking Resources: - GitHub: https://github.com/apache/skywalking - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Apache SkyWalking Team Publish the Docker images export SW_VERSION=x.y.z git clone --depth 1 --branch v$SW_VERSION https://github.com/apache/skywalking.git cd skywalking svn co https://dist.apache.org/repos/dist/release/skywalking/$SW_VERSION release # (1) export CONTEXT=release export HUB=apache export OAP_NAME=skywalking-oap-server export UI_NAME=skywalking-ui export TAG=$SW_VERSION export DIST=\u0026lt;the binary package name inside (1), e.g. apache-skywalking-apm-8.8.0.tar.gz\u0026gt; make docker.push Clean up the old releases Once the latest release has been published, you should clean up the old releases from the mirror system.\n Update the download links (source, dist, asc, and sha512) on the website to the archive repo (https://archive.apache.org/dist/skywalking). Remove previous releases from https://dist.apache.org/repos/dist/release/skywalking/.  ","title":"SkyWalking release guide","url":"/docs/main/v9.1.0/en/guides/how-to-release/"},{"content":"Apache SkyWalking release guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking in The Apache Way and start the voting process by reading this document.\nSet up your development environment Follow the steps in the Apache maven deployment environment document to set gpg tool and encrypt passwords.\nUse the following block as a template and place it in ~/.m2/settings.xml.\n\u0026lt;settings\u0026gt; ... \u0026lt;servers\u0026gt; \u0026lt;!-- To publish a snapshot of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.snapshots.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; \u0026lt;!-- To stage a release of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.releases.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; ... \u0026lt;/servers\u0026gt; \u0026lt;/settings\u0026gt; Add your GPG public key  Add your GPG public key into the SkyWalking GPG KEYS file. If you are a committer, use your Apache ID and password to log in this svn, and update the file. Don\u0026rsquo;t override the existing file. Upload your GPG public key to the public GPG site, such as MIT\u0026rsquo;s site. This site should be in the Apache maven staging repository checklist.  Test your settings This step is only for testing purpose. If your env is correctly set, you don\u0026rsquo;t need to check every time.\n./mvnw clean install -Pall (this will build artifacts, sources and sign) Prepare for the release ./mvnw release:clean ./mvnw release:prepare -DautoVersionSubmodules=true -Pall  Set version number as x.y.z, and tag as vx.y.z (The version tag must start with v. You will find out why this is necessary in the next step.)  You could do a GPG signature before preparing for the release. If you need to input the password to sign, and the maven doesn\u0026rsquo;t provide you with the opportunity to do so, this may lead to failure of the release. To resolve this, you may run gpg --sign xxx in any file. This will allow it to remember the password for long enough to prepare for the release.\nStage the release ./mvnw release:perform -Dmaven.test.skip -Pall  The release will be automatically inserted into a temporary staging repository.  Build and sign the source code package export RELEASE_VERSION=x.y.z (example: RELEASE_VERSION=5.0.0-alpha) cd tools/releasing bash create_source_release.sh This script takes care of the following things:\n Use v + RELEASE_VERSION as tag to clone the codes. Complete git submodule init/update. Exclude all unnecessary files in the target source tar, such as .git, .github, and .gitmodules. See the script for more details. Execute gpg and shasum 512.  apache-skywalking-apm-x.y.z-src.tgz and files ending with .asc and .sha512 may be found in the tools/releasing folder.\nLocate and download the distribution package in Apache Nexus Staging repositories  Use your Apache ID to log in to https://repository.apache.org/. Go to https://repository.apache.org/#stagingRepositories. Search skywalking and find your staging repository. Close the repository and wait for all checks to pass. In this step, your GPG KEYS will be checked. See the set PGP document, if you haven\u0026rsquo;t done it before. Go to {REPO_URL}/org/apache/skywalking/apache-skywalking-apm/x.y.z. Download .tar.gz and .zip and files ending with .asc and .sha1.  Upload to Apache svn  Use your Apache ID to log in to https://dist.apache.org/repos/dist/dev/skywalking/. Create a folder and name it by the release version and round, such as: x.y.z Upload the source code package to the folder with files ending with .asc and .sha512.  Package name: apache-skywalking-x.y.z-src.tar.gz See Section \u0026ldquo;Build and sign the source code package\u0026rdquo; for more details   Upload the distribution package to the folder with files ending with .asc and .sha512.  Package name: apache-skywalking-bin-x.y.z.tar.gz. See Section \u0026ldquo;Locate and download the distribution package in Apache Nexus Staging repositories\u0026rdquo; for more details. Create a .sha512 package: shasum -a 512 file \u0026gt; file.sha512    Make the internal announcements Send an announcement mail in dev mail list.\nMail title: [ANNOUNCE] SkyWalking x.y.z test build available Mail content: The test build of x.y.z is available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/xxxx * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-apm-x.x.x-src.tgz - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.tar.gz Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) vx.y.z Release CommitID : * https://github.com/apache/skywalking/tree/(Git Commit ID) * Git submodule * skywalking-ui: https://github.com/apache/skywalking-booster-ui/tree/(Git Commit ID) * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) * oap-server/server-query-plugin/query-graphql-plugin/src/main/resources/query-protocol https://github.com/apache/skywalking-query-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking/blob/vx.y.z/docs/en/guides/How-to-build.md A vote regarding the quality of this test build will be initiated within the next couple of days. Wait for at least 48 hours for test responses Any PMC member, committer or contributor can test the release features and provide feedback. Based on that, the PMC will decide whether to start the voting process.\nCall a vote in dev Call a vote in dev@skywalking.apache.org\nMail title: [VOTE] Release Apache SkyWalking version x.y.z Mail content: Hi All, This is a call for vote to release Apache SkyWalking version x.y.z. Release notes: * https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/xxxx * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-apm-x.x.x-src.tgz - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.tar.gz Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) vx.y.z Release CommitID : * https://github.com/apache/skywalking/tree/(Git Commit ID) * Git submodule * skywalking-ui: https://github.com/apache/skywalking-booster-ui/tree/(Git Commit ID) * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) * oap-server/server-query-plugin/query-graphql-plugin/src/main/resources/query-protocol https://github.com/apache/skywalking-query-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking/blob/vx.y.z/docs/en/guides/How-to-build.md Voting will start now (xxxx date) and will remain open for at least 72 hours, Request all PMC members to give their vote. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Vote Check All PMC members and committers should check these before casting +1 votes.\n Features test. All artifacts in staging repository are published with .asc, .md5, and *sha1 files. Source code and distribution package (apache-skywalking-x.y.z-src.tar.gz, apache-skywalking-bin-x.y.z.tar.gz, apache-skywalking-bin-x.y.z.zip) are found in https://dist.apache.org/repos/dist/dev/skywalking/x.y.z with .asc and .sha512. LICENSE and NOTICE are in the source code and distribution package. Check shasum -c apache-skywalking-apm-x.y.z-src.tgz.sha512. Check gpg --verify apache-skywalking-apm-x.y.z-src.tgz.asc apache-skywalking-apm-x.y.z-src.tgz Build a distribution package from the source code package (apache-skywalking-x.y.z-src.tar.gz) by following this doc. Check the Apache License Header. Run docker run --rm -v $(pwd):/github/workspace apache/skywalking-eyes header check. (No binaries in source codes)  The voting process is as follows:\n All PMC member votes are +1 binding, and all other votes are +1 but non-binding. If you obtain at least 3 (+1 binding) votes with more +1 than -1 votes within 72 hours, the release will be approved.  Publish the release  Move source codes tar and distribution packages to https://dist.apache.org/repos/dist/release/skywalking/.  \u0026gt; export SVN_EDITOR=vim \u0026gt; svn mv https://dist.apache.org/repos/dist/dev/skywalking/x.y.z https://dist.apache.org/repos/dist/release/skywalking .... enter your apache password .... Release in the nexus staging repo. Public download source and distribution tar/zip are located in http://www.apache.org/dyn/closer.cgi/skywalking/x.y.z/xxx. The Apache mirror path is the only release information that we publish. Public asc and sha512 are located in https://www.apache.org/dist/skywalking/x.y.z/xxx. Public KEYS point to https://www.apache.org/dist/skywalking/KEYS. Update the website download page. http://skywalking.apache.org/downloads/ . Add a new download source, distribution, sha512, asc, and document links. The links can be found following rules (3) to (6) above. Add a release event on the website homepage and event page. Announce the public release with changelog or key features. Send ANNOUNCE email to dev@skywalking.apache.org, announce@apache.org. The sender should use the Apache email account.  Mail title: [ANNOUNCE] Apache SkyWalking x.y.z released Mail content: Hi all, Apache SkyWalking Team is glad to announce the first release of Apache SkyWalking x.y.z. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. This release contains a number of new features, bug fixes and improvements compared to version a.b.c(last release). The notable changes since x.y.z include: (Highlight key changes) 1. ... 2. ... 3. ... Please refer to the change log for the complete list of changes: https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Apache SkyWalking website: http://skywalking.apache.org/ Downloads: http://skywalking.apache.org/downloads/ Twitter: https://twitter.com/ASFSkyWalking SkyWalking Resources: - GitHub: https://github.com/apache/skywalking - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Apache SkyWalking Team Publish the Docker images export SW_VERSION=x.y.z git clone --depth 1 --branch v$SW_VERSION https://github.com/apache/skywalking.git cd skywalking svn co https://dist.apache.org/repos/dist/release/skywalking/$SW_VERSION release # (1) export CONTEXT=release export HUB=apache export OAP_NAME=skywalking-oap-server export UI_NAME=skywalking-ui export TAG=$SW_VERSION export DIST=\u0026lt;the binary package name inside (1), e.g. apache-skywalking-apm-8.8.0.tar.gz\u0026gt; make docker.push Clean up the old releases Once the latest release has been published, you should clean up the old releases from the mirror system.\n Update the download links (source, dist, asc, and sha512) on the website to the archive repo (https://archive.apache.org/dist/skywalking). Remove previous releases from https://dist.apache.org/repos/dist/release/skywalking/.  ","title":"SkyWalking release guide","url":"/docs/main/v9.2.0/en/guides/how-to-release/"},{"content":"Apache SkyWalking release guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking in The Apache Way and start the voting process by reading this document.\nSet up your development environment Follow the steps in the Apache maven deployment environment document to set gpg tool and encrypt passwords.\nUse the following block as a template and place it in ~/.m2/settings.xml.\n\u0026lt;settings\u0026gt; ... \u0026lt;servers\u0026gt; \u0026lt;!-- To publish a snapshot of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.snapshots.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; \u0026lt;!-- To stage a release of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.releases.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; ... \u0026lt;/servers\u0026gt; \u0026lt;/settings\u0026gt; Add your GPG public key  Add your GPG public key into the SkyWalking GPG KEYS file. If you are a committer, use your Apache ID and password to log in this svn, and update the file. Don\u0026rsquo;t override the existing file. Upload your GPG public key to the public GPG site, such as MIT\u0026rsquo;s site. This site should be in the Apache maven staging repository checklist.  Test your settings This step is only for testing purpose. If your env is correctly set, you don\u0026rsquo;t need to check every time.\n./mvnw clean install -Pall (this will build artifacts, sources and sign) Prepare for the release ./mvnw release:clean ./mvnw release:prepare -DautoVersionSubmodules=true -Pall  Set version number as x.y.z, and tag as vx.y.z (The version tag must start with v. You will find out why this is necessary in the next step.)  You could do a GPG signature before preparing for the release. If you need to input the password to sign, and the maven doesn\u0026rsquo;t provide you with the opportunity to do so, this may lead to failure of the release. To resolve this, you may run gpg --sign xxx in any file. This will allow it to remember the password for long enough to prepare for the release.\nStage the release ./mvnw release:perform -Dmaven.test.skip -Pall  The release will be automatically inserted into a temporary staging repository.  Build and sign the source code package export RELEASE_VERSION=x.y.z (example: RELEASE_VERSION=5.0.0-alpha) cd tools/releasing bash create_source_release.sh This script takes care of the following things:\n Use v + RELEASE_VERSION as tag to clone the codes. Complete git submodule init/update. Exclude all unnecessary files in the target source tar, such as .git, .github, and .gitmodules. See the script for more details. Execute gpg and shasum 512.  apache-skywalking-apm-x.y.z-src.tgz and files ending with .asc and .sha512 may be found in the tools/releasing folder.\nLocate and download the distribution package in Apache Nexus Staging repositories  Use your Apache ID to log in to https://repository.apache.org/. Go to https://repository.apache.org/#stagingRepositories. Search skywalking and find your staging repository. Close the repository and wait for all checks to pass. In this step, your GPG KEYS will be checked. See the set PGP document, if you haven\u0026rsquo;t done it before. Go to {REPO_URL}/org/apache/skywalking/apache-skywalking-apm/x.y.z. Download .tar.gz and .zip and files ending with .asc and .sha1.  Upload to Apache svn  Use your Apache ID to log in to https://dist.apache.org/repos/dist/dev/skywalking/. Create a folder and name it by the release version and round, such as: x.y.z Upload the source code package to the folder with files ending with .asc and .sha512.  Package name: apache-skywalking-x.y.z-src.tar.gz See Section \u0026ldquo;Build and sign the source code package\u0026rdquo; for more details   Upload the distribution package to the folder with files ending with .asc and .sha512.  Package name: apache-skywalking-bin-x.y.z.tar.gz. See Section \u0026ldquo;Locate and download the distribution package in Apache Nexus Staging repositories\u0026rdquo; for more details. Create a .sha512 package: shasum -a 512 file \u0026gt; file.sha512    Make the internal announcements Send an announcement mail in dev mail list.\nMail title: [ANNOUNCE] SkyWalking x.y.z test build available Mail content: The test build of x.y.z is available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/xxxx * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-apm-x.x.x-src.tgz - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.tar.gz Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) vx.y.z Release CommitID : * https://github.com/apache/skywalking/tree/(Git Commit ID) * Git submodule * skywalking-ui: https://github.com/apache/skywalking-booster-ui/tree/(Git Commit ID) * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) * oap-server/server-query-plugin/query-graphql-plugin/src/main/resources/query-protocol https://github.com/apache/skywalking-query-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking/blob/vx.y.z/docs/en/guides/How-to-build.md A vote regarding the quality of this test build will be initiated within the next couple of days. Wait for at least 48 hours for test responses Any PMC member, committer or contributor can test the release features and provide feedback. Based on that, the PMC will decide whether to start the voting process.\nCall a vote in dev Call a vote in dev@skywalking.apache.org\nMail title: [VOTE] Release Apache SkyWalking version x.y.z Mail content: Hi All, This is a call for vote to release Apache SkyWalking version x.y.z. Release notes: * https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/xxxx * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-apm-x.x.x-src.tgz - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.tar.gz Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) vx.y.z Release CommitID : * https://github.com/apache/skywalking/tree/(Git Commit ID) * Git submodule * skywalking-ui: https://github.com/apache/skywalking-booster-ui/tree/(Git Commit ID) * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) * oap-server/server-query-plugin/query-graphql-plugin/src/main/resources/query-protocol https://github.com/apache/skywalking-query-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking/blob/vx.y.z/docs/en/guides/How-to-build.md Voting will start now (xxxx date) and will remain open for at least 72 hours, Request all PMC members to give their vote. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Vote Check All PMC members and committers should check these before casting +1 votes.\n Features test. All artifacts in staging repository are published with .asc, .md5, and *sha1 files. Source code and distribution package (apache-skywalking-x.y.z-src.tar.gz, apache-skywalking-bin-x.y.z.tar.gz, apache-skywalking-bin-x.y.z.zip) are found in https://dist.apache.org/repos/dist/dev/skywalking/x.y.z with .asc and .sha512. LICENSE and NOTICE are in the source code and distribution package. Check shasum -c apache-skywalking-apm-x.y.z-src.tgz.sha512. Check gpg --verify apache-skywalking-apm-x.y.z-src.tgz.asc apache-skywalking-apm-x.y.z-src.tgz Build a distribution package from the source code package (apache-skywalking-x.y.z-src.tar.gz) by following this doc. Check the Apache License Header. Run docker run --rm -v $(pwd):/github/workspace apache/skywalking-eyes header check. (No binaries in source codes)  The voting process is as follows:\n All PMC member votes are +1 binding, and all other votes are +1 but non-binding. If you obtain at least 3 (+1 binding) votes with more +1 than -1 votes within 72 hours, the release will be approved.  Publish the release  Move source codes tar and distribution packages to https://dist.apache.org/repos/dist/release/skywalking/.  \u0026gt; export SVN_EDITOR=vim \u0026gt; svn mv https://dist.apache.org/repos/dist/dev/skywalking/x.y.z https://dist.apache.org/repos/dist/release/skywalking .... enter your apache password .... Release in the nexus staging repo. Public download source and distribution tar/zip are located in http://www.apache.org/dyn/closer.cgi/skywalking/x.y.z/xxx. The Apache mirror path is the only release information that we publish. Public asc and sha512 are located in https://www.apache.org/dist/skywalking/x.y.z/xxx. Public KEYS point to https://www.apache.org/dist/skywalking/KEYS. Update the website download page. http://skywalking.apache.org/downloads/ . Add a new download source, distribution, sha512, asc, and document links. The links can be found following rules (3) to (6) above. Add a release event on the website homepage and event page. Announce the public release with changelog or key features. Send ANNOUNCE email to dev@skywalking.apache.org, announce@apache.org. The sender should use the Apache email account.  Mail title: [ANNOUNCE] Apache SkyWalking x.y.z released Mail content: Hi all, Apache SkyWalking Team is glad to announce the first release of Apache SkyWalking x.y.z. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. This release contains a number of new features, bug fixes and improvements compared to version a.b.c(last release). The notable changes since x.y.z include: (Highlight key changes) 1. ... 2. ... 3. ... Please refer to the change log for the complete list of changes: https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Apache SkyWalking website: http://skywalking.apache.org/ Downloads: http://skywalking.apache.org/downloads/ Twitter: https://twitter.com/ASFSkyWalking SkyWalking Resources: - GitHub: https://github.com/apache/skywalking - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Apache SkyWalking Team Publish the Docker images export SW_VERSION=x.y.z git clone --depth 1 --branch v$SW_VERSION https://github.com/apache/skywalking.git cd skywalking svn co https://dist.apache.org/repos/dist/release/skywalking/$SW_VERSION release # (1) export CONTEXT=release export HUB=apache export OAP_NAME=skywalking-oap-server export UI_NAME=skywalking-ui export TAG=$SW_VERSION export DIST=\u0026lt;the binary package name inside (1), e.g. apache-skywalking-apm-8.8.0.tar.gz\u0026gt; make docker.push Clean up the old releases Once the latest release has been published, you should clean up the old releases from the mirror system.\n Update the download links (source, dist, asc, and sha512) on the website to the archive repo (https://archive.apache.org/dist/skywalking). Remove previous releases from https://dist.apache.org/repos/dist/release/skywalking/.  ","title":"SkyWalking release guide","url":"/docs/main/v9.3.0/en/guides/how-to-release/"},{"content":"Apache SkyWalking release guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking in The Apache Way and start the voting process by reading this document.\nSet up your development environment Follow the steps in the Apache maven deployment environment document to set gpg tool and encrypt passwords.\nUse the following block as a template and place it in ~/.m2/settings.xml.\n\u0026lt;settings\u0026gt; ... \u0026lt;servers\u0026gt; \u0026lt;!-- To publish a snapshot of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.snapshots.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; \u0026lt;!-- To stage a release of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.releases.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; ... \u0026lt;/servers\u0026gt; \u0026lt;/settings\u0026gt; Add your GPG public key  Add your GPG public key into the SkyWalking GPG KEYS file. If you are a committer, use your Apache ID and password to log in this svn, and update the file. Don\u0026rsquo;t override the existing file. Upload your GPG public key to the public GPG site, such as MIT\u0026rsquo;s site. This site should be in the Apache maven staging repository checklist.  Test your settings This step is only for testing purpose. If your env is correctly set, you don\u0026rsquo;t need to check every time.\n./mvnw clean install -Pall (this will build artifacts, sources and sign) Prepare for the release ./mvnw release:clean ./mvnw release:prepare -DautoVersionSubmodules=true -Pall  Set version number as x.y.z, and tag as vx.y.z (The version tag must start with v. You will find out why this is necessary in the next step.)  You could do a GPG signature before preparing for the release. If you need to input the password to sign, and the maven doesn\u0026rsquo;t provide you with the opportunity to do so, this may lead to failure of the release. To resolve this, you may run gpg --sign xxx in any file. This will allow it to remember the password for long enough to prepare for the release.\nStage the release ./mvnw release:perform -Dmaven.test.skip -Pall  The release will be automatically inserted into a temporary staging repository.  Build and sign the source code package export RELEASE_VERSION=x.y.z (example: RELEASE_VERSION=5.0.0-alpha) cd tools/releasing bash create_source_release.sh This script takes care of the following things:\n Use v + RELEASE_VERSION as tag to clone the codes. Complete git submodule init/update. Exclude all unnecessary files in the target source tar, such as .git, .github, and .gitmodules. See the script for more details. Execute gpg and shasum 512.  apache-skywalking-apm-x.y.z-src.tgz and files ending with .asc and .sha512 may be found in the tools/releasing folder.\nLocate and download the distribution package in Apache Nexus Staging repositories  Use your Apache ID to log in to https://repository.apache.org/. Go to https://repository.apache.org/#stagingRepositories. Search skywalking and find your staging repository. Close the repository and wait for all checks to pass. In this step, your GPG KEYS will be checked. See the set PGP document, if you haven\u0026rsquo;t done it before. Go to {REPO_URL}/org/apache/skywalking/apache-skywalking-apm/x.y.z. Download .tar.gz and .zip and files ending with .asc and .sha1.  Upload to Apache svn  Use your Apache ID to log in to https://dist.apache.org/repos/dist/dev/skywalking/. Create a folder and name it by the release version and round, such as: x.y.z Upload the source code package to the folder with files ending with .asc and .sha512.  Package name: apache-skywalking-x.y.z-src.tar.gz See Section \u0026ldquo;Build and sign the source code package\u0026rdquo; for more details   Upload the distribution package to the folder with files ending with .asc and .sha512.  Package name: apache-skywalking-bin-x.y.z.tar.gz. See Section \u0026ldquo;Locate and download the distribution package in Apache Nexus Staging repositories\u0026rdquo; for more details. Create a .sha512 package: shasum -a 512 file \u0026gt; file.sha512    Make the internal announcements Send an announcement mail in dev mail list.\nMail title: [ANNOUNCE] SkyWalking x.y.z test build available Mail content: The test build of x.y.z is available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/xxxx * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-apm-x.x.x-src.tgz - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.tar.gz Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) vx.y.z Release CommitID : * https://github.com/apache/skywalking/tree/(Git Commit ID) * Git submodule * skywalking-ui: https://github.com/apache/skywalking-booster-ui/tree/(Git Commit ID) * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) * oap-server/server-query-plugin/query-graphql-plugin/src/main/resources/query-protocol https://github.com/apache/skywalking-query-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking/blob/vx.y.z/docs/en/guides/How-to-build.md A vote regarding the quality of this test build will be initiated within the next couple of days. Wait for at least 48 hours for test responses Any PMC member, committer or contributor can test the release features and provide feedback. Based on that, the PMC will decide whether to start the voting process.\nCall a vote in dev Call a vote in dev@skywalking.apache.org\nMail title: [VOTE] Release Apache SkyWalking version x.y.z Mail content: Hi All, This is a call for vote to release Apache SkyWalking version x.y.z. Release notes: * https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/xxxx * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-apm-x.x.x-src.tgz - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.tar.gz Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) vx.y.z Release CommitID : * https://github.com/apache/skywalking/tree/(Git Commit ID) * Git submodule * skywalking-ui: https://github.com/apache/skywalking-booster-ui/tree/(Git Commit ID) * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) * oap-server/server-query-plugin/query-graphql-plugin/src/main/resources/query-protocol https://github.com/apache/skywalking-query-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking/blob/vx.y.z/docs/en/guides/How-to-build.md Voting will start now (xxxx date) and will remain open for at least 72 hours, Request all PMC members to give their vote. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Vote Check All PMC members and committers should check these before casting +1 votes.\n Features test. All artifacts in staging repository are published with .asc, .md5, and *sha1 files. Source code and distribution package (apache-skywalking-x.y.z-src.tar.gz, apache-skywalking-bin-x.y.z.tar.gz, apache-skywalking-bin-x.y.z.zip) are found in https://dist.apache.org/repos/dist/dev/skywalking/x.y.z with .asc and .sha512. LICENSE and NOTICE are in the source code and distribution package. Check shasum -c apache-skywalking-apm-x.y.z-src.tgz.sha512. Check gpg --verify apache-skywalking-apm-x.y.z-src.tgz.asc apache-skywalking-apm-x.y.z-src.tgz Build a distribution package from the source code package (apache-skywalking-x.y.z-src.tar.gz) by following this doc. Check the Apache License Header. Run docker run --rm -v $(pwd):/github/workspace apache/skywalking-eyes header check. (No binaries in source codes)  The voting process is as follows:\n All PMC member votes are +1 binding, and all other votes are +1 but non-binding. If you obtain at least 3 (+1 binding) votes with more +1 than -1 votes within 72 hours, the release will be approved.  Publish the release  Move source codes tar and distribution packages to https://dist.apache.org/repos/dist/release/skywalking/.  \u0026gt; export SVN_EDITOR=vim \u0026gt; svn mv https://dist.apache.org/repos/dist/dev/skywalking/x.y.z https://dist.apache.org/repos/dist/release/skywalking .... enter your apache password .... Release in the nexus staging repo. Public download source and distribution tar/zip are located in http://www.apache.org/dyn/closer.cgi/skywalking/x.y.z/xxx. The Apache mirror path is the only release information that we publish. Public asc and sha512 are located in https://www.apache.org/dist/skywalking/x.y.z/xxx. Public KEYS point to https://www.apache.org/dist/skywalking/KEYS. Update the website download page. http://skywalking.apache.org/downloads/ . Add a new download source, distribution, sha512, asc, and document links. The links can be found following rules (3) to (6) above. Add a release event on the website homepage and event page. Announce the public release with changelog or key features. Send ANNOUNCE email to dev@skywalking.apache.org, announce@apache.org. The sender should use the Apache email account.  Mail title: [ANNOUNCE] Apache SkyWalking x.y.z released Mail content: Hi all, Apache SkyWalking Team is glad to announce the first release of Apache SkyWalking x.y.z. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. This release contains a number of new features, bug fixes and improvements compared to version a.b.c(last release). The notable changes since x.y.z include: (Highlight key changes) 1. ... 2. ... 3. ... Please refer to the change log for the complete list of changes: https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Apache SkyWalking website: http://skywalking.apache.org/ Downloads: http://skywalking.apache.org/downloads/ Twitter: https://twitter.com/ASFSkyWalking SkyWalking Resources: - GitHub: https://github.com/apache/skywalking - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Apache SkyWalking Team Publish the Docker images We have a GitHub workflow to automatically publish the Docker images to Docker Hub after you set the version from pre-release to release, all you need to do is to watch that workflow and see whether it succeeds, if it fails, you can use the following steps to publish the Docker images in your local machine.\nexport SW_VERSION=x.y.z git clone --depth 1 --branch v$SW_VERSION https://github.com/apache/skywalking.git cd skywalking svn co https://dist.apache.org/repos/dist/release/skywalking/$SW_VERSION release # (1) export CONTEXT=release export HUB=apache export OAP_NAME=skywalking-oap-server export UI_NAME=skywalking-ui export TAG=$SW_VERSION export DIST=\u0026lt;the binary package name inside (1), e.g. apache-skywalking-apm-8.8.0.tar.gz\u0026gt; make docker.push Clean up the old releases Once the latest release has been published, you should clean up the old releases from the mirror system.\n Update the download links (source, dist, asc, and sha512) on the website to the archive repo (https://archive.apache.org/dist/skywalking). Remove previous releases from https://dist.apache.org/repos/dist/release/skywalking/.  ","title":"SkyWalking release guide","url":"/docs/main/v9.4.0/en/guides/how-to-release/"},{"content":"Apache SkyWalking release guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking in The Apache Way and start the voting process by reading this document.\nSet up your development environment Follow the steps in the Apache maven deployment environment document to set gpg tool and encrypt passwords.\nUse the following block as a template and place it in ~/.m2/settings.xml.\n\u0026lt;settings\u0026gt; ... \u0026lt;servers\u0026gt; \u0026lt;!-- To publish a snapshot of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.snapshots.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; \u0026lt;!-- To stage a release of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.releases.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; ... \u0026lt;/servers\u0026gt; \u0026lt;/settings\u0026gt; Add your GPG public key  Add your GPG public key into the SkyWalking GPG KEYS file. If you are a committer, use your Apache ID and password to log in this svn, and update the file. Don\u0026rsquo;t override the existing file. Upload your GPG public key to the public GPG site, such as MIT\u0026rsquo;s site. This site should be in the Apache maven staging repository checklist.  Test your settings This step is only for testing purpose. If your env is correctly set, you don\u0026rsquo;t need to check every time.\n./mvnw clean install -Pall (this will build artifacts, sources and sign) Prepare for the release ./mvnw release:clean ./mvnw release:prepare -DautoVersionSubmodules=true -Darguments='-Dmaven.test.skip' -Pall  Set version number as x.y.z, and tag as vx.y.z (The version tag must start with v. You will find out why this is necessary in the next step.)  You could do a GPG signature before preparing for the release. If you need to input the password to sign, and the maven doesn\u0026rsquo;t provide you with the opportunity to do so, this may lead to failure of the release. To resolve this, you may run gpg --sign xxx in any file. This will allow it to remember the password for long enough to prepare for the release.\nStage the release ./mvnw release:perform -Darguments='-Dmaven.test.skip' -Pall  The release will be automatically inserted into a temporary staging repository.  Build and sign the source code package export RELEASE_VERSION=x.y.z (example: RELEASE_VERSION=5.0.0-alpha) cd tools/releasing bash create_source_release.sh This script takes care of the following things:\n Use v + RELEASE_VERSION as tag to clone the codes. Complete git submodule init/update. Exclude all unnecessary files in the target source tar, such as .git, .github, and .gitmodules. See the script for more details. Execute gpg and shasum 512.  apache-skywalking-apm-x.y.z-src.tgz and files ending with .asc and .sha512 may be found in the tools/releasing folder.\nLocate and download the distribution package in Apache Nexus Staging repositories  Use your Apache ID to log in to https://repository.apache.org/. Go to https://repository.apache.org/#stagingRepositories. Search skywalking and find your staging repository. Close the repository and wait for all checks to pass. In this step, your GPG KEYS will be checked. See the set PGP document, if you haven\u0026rsquo;t done it before. Go to {REPO_URL}/org/apache/skywalking/apache-skywalking-apm/x.y.z. Download .tar.gz and .zip and files ending with .asc and .sha1.  Upload to Apache svn  Use your Apache ID to log in to https://dist.apache.org/repos/dist/dev/skywalking/. Create a folder and name it by the release version and round, such as: x.y.z Upload the source code package to the folder with files ending with .asc and .sha512.  Package name: apache-skywalking-x.y.z-src.tar.gz See Section \u0026ldquo;Build and sign the source code package\u0026rdquo; for more details   Upload the distribution package to the folder with files ending with .asc and .sha512.  Package name: apache-skywalking-bin-x.y.z.tar.gz. See Section \u0026ldquo;Locate and download the distribution package in Apache Nexus Staging repositories\u0026rdquo; for more details. Create a .sha512 package: shasum -a 512 file \u0026gt; file.sha512    Make the internal announcements Send an announcement mail in dev mail list.\nMail title: [ANNOUNCE] SkyWalking x.y.z test build available Mail content: The test build of x.y.z is available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/xxxx * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-apm-x.x.x-src.tgz - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.tar.gz Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) vx.y.z Release CommitID : * https://github.com/apache/skywalking/tree/(Git Commit ID) * Git submodule * skywalking-ui: https://github.com/apache/skywalking-booster-ui/tree/(Git Commit ID) * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) * oap-server/server-query-plugin/query-graphql-plugin/src/main/resources/query-protocol https://github.com/apache/skywalking-query-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking/blob/vx.y.z/docs/en/guides/How-to-build.md A vote regarding the quality of this test build will be initiated within the next couple of days. Wait for at least 48 hours for test responses Any PMC member, committer or contributor can test the release features and provide feedback. Based on that, the PMC will decide whether to start the voting process.\nCall a vote in dev Call a vote in dev@skywalking.apache.org\nMail title: [VOTE] Release Apache SkyWalking version x.y.z Mail content: Hi All, This is a call for vote to release Apache SkyWalking version x.y.z. Release notes: * https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/xxxx * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-apm-x.x.x-src.tgz - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.tar.gz Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) vx.y.z Release CommitID : * https://github.com/apache/skywalking/tree/(Git Commit ID) * Git submodule * skywalking-ui: https://github.com/apache/skywalking-booster-ui/tree/(Git Commit ID) * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) * oap-server/server-query-plugin/query-graphql-plugin/src/main/resources/query-protocol https://github.com/apache/skywalking-query-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking/blob/vx.y.z/docs/en/guides/How-to-build.md Voting will start now (xxxx date) and will remain open for at least 72 hours, Request all PMC members to give their vote. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Vote Check All PMC members and committers should check these before casting +1 votes.\n Features test. All artifacts in staging repository are published with .asc, .md5, and *sha1 files. Source code and distribution package (apache-skywalking-x.y.z-src.tar.gz, apache-skywalking-bin-x.y.z.tar.gz, apache-skywalking-bin-x.y.z.zip) are found in https://dist.apache.org/repos/dist/dev/skywalking/x.y.z with .asc and .sha512. LICENSE and NOTICE are in the source code and distribution package. Check shasum -c apache-skywalking-apm-x.y.z-src.tgz.sha512. Check gpg --verify apache-skywalking-apm-x.y.z-src.tgz.asc apache-skywalking-apm-x.y.z-src.tgz Build a distribution package from the source code package (apache-skywalking-x.y.z-src.tar.gz) by following this doc. Check the Apache License Header. Run docker run --rm -v $(pwd):/github/workspace apache/skywalking-eyes header check. (No binaries in source codes)  The voting process is as follows:\n All PMC member votes are +1 binding, and all other votes are +1 but non-binding. If you obtain at least 3 (+1 binding) votes with more +1 than -1 votes within 72 hours, the release will be approved.  Publish the release  Move source codes tar and distribution packages to https://dist.apache.org/repos/dist/release/skywalking/.  \u0026gt; export SVN_EDITOR=vim \u0026gt; svn mv https://dist.apache.org/repos/dist/dev/skywalking/x.y.z https://dist.apache.org/repos/dist/release/skywalking .... enter your apache password .... Release in the nexus staging repo. Public download source and distribution tar/zip are located in http://www.apache.org/dyn/closer.cgi/skywalking/x.y.z/xxx. The Apache mirror path is the only release information that we publish. Public asc and sha512 are located in https://www.apache.org/dist/skywalking/x.y.z/xxx. Public KEYS point to https://www.apache.org/dist/skywalking/KEYS. Update the website download page. http://skywalking.apache.org/downloads/ . Add a new download source, distribution, sha512, asc, and document links. The links can be found following rules (3) to (6) above. Add a release event on the website homepage and event page. Announce the public release with changelog or key features. Send ANNOUNCE email to dev@skywalking.apache.org, announce@apache.org. The sender should use the Apache email account.  Mail title: [ANNOUNCE] Apache SkyWalking x.y.z released Mail content: Hi all, Apache SkyWalking Team is glad to announce the first release of Apache SkyWalking x.y.z. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. This release contains a number of new features, bug fixes and improvements compared to version a.b.c(last release). The notable changes since x.y.z include: (Highlight key changes) 1. ... 2. ... 3. ... Please refer to the change log for the complete list of changes: https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Apache SkyWalking website: http://skywalking.apache.org/ Downloads: http://skywalking.apache.org/downloads/ Twitter: https://twitter.com/ASFSkyWalking SkyWalking Resources: - GitHub: https://github.com/apache/skywalking - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Apache SkyWalking Team Publish the Docker images We have a GitHub workflow to automatically publish the Docker images to Docker Hub after you set the version from pre-release to release, all you need to do is to watch that workflow and see whether it succeeds, if it fails, you can use the following steps to publish the Docker images in your local machine.\nexport SW_VERSION=x.y.z git clone --depth 1 --branch v$SW_VERSION https://github.com/apache/skywalking.git cd skywalking svn co https://dist.apache.org/repos/dist/release/skywalking/$SW_VERSION release # (1) export CONTEXT=release export HUB=apache export OAP_NAME=skywalking-oap-server export UI_NAME=skywalking-ui export TAG=$SW_VERSION export DIST=\u0026lt;the binary package name inside (1), e.g. apache-skywalking-apm-8.8.0.tar.gz\u0026gt; make docker.push Clean up the old releases Once the latest release has been published, you should clean up the old releases from the mirror system.\n Update the download links (source, dist, asc, and sha512) on the website to the archive repo (https://archive.apache.org/dist/skywalking). Remove previous releases from https://dist.apache.org/repos/dist/release/skywalking/.  ","title":"SkyWalking release guide","url":"/docs/main/v9.5.0/en/guides/how-to-release/"},{"content":"Apache SkyWalking release guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking in The Apache Way and start the voting process by reading this document.\nSet up your development environment Follow the steps in the Apache maven deployment environment document to set gpg tool and encrypt passwords.\nUse the following block as a template and place it in ~/.m2/settings.xml.\n\u0026lt;settings\u0026gt; ... \u0026lt;servers\u0026gt; \u0026lt;!-- To publish a snapshot of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.snapshots.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; \u0026lt;!-- To stage a release of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.releases.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; ... \u0026lt;/servers\u0026gt; \u0026lt;/settings\u0026gt; Add your GPG public key  Add your GPG public key into the SkyWalking GPG KEYS file. If you are a committer, use your Apache ID and password to log in this svn, and update the file. Don\u0026rsquo;t override the existing file. Upload your GPG public key to the public GPG site, such as MIT\u0026rsquo;s site. This site should be in the Apache maven staging repository checklist.  Test your settings This step is only for testing purpose. If your env is correctly set, you don\u0026rsquo;t need to check every time.\n./mvnw clean install -Pall (this will build artifacts, sources and sign) Prepare for the release ./mvnw release:clean ./mvnw release:prepare -DautoVersionSubmodules=true -Darguments='-Dmaven.test.skip' -Pall  Set version number as x.y.z, and tag as vx.y.z (The version tag must start with v. You will find out why this is necessary in the next step.)  You could do a GPG signature before preparing for the release. If you need to input the password to sign, and the maven doesn\u0026rsquo;t provide you with the opportunity to do so, this may lead to failure of the release. To resolve this, you may run gpg --sign xxx in any file. This will allow it to remember the password for long enough to prepare for the release.\nStage the release ./mvnw release:perform -Darguments='-Dmaven.test.skip' -Pall  The release will be automatically inserted into a temporary staging repository.  Build and sign the source code package export RELEASE_VERSION=x.y.z (example: RELEASE_VERSION=5.0.0-alpha) cd tools/releasing bash create_source_release.sh This script takes care of the following things:\n Use v + RELEASE_VERSION as tag to clone the codes. Complete git submodule init/update. Exclude all unnecessary files in the target source tar, such as .git, .github, and .gitmodules. See the script for more details. Execute gpg and shasum 512.  apache-skywalking-apm-x.y.z-src.tgz and files ending with .asc and .sha512 may be found in the tools/releasing folder.\nLocate and download the distribution package in Apache Nexus Staging repositories  Use your Apache ID to log in to https://repository.apache.org/. Go to https://repository.apache.org/#stagingRepositories. Search skywalking and find your staging repository. Close the repository and wait for all checks to pass. In this step, your GPG KEYS will be checked. See the set PGP document, if you haven\u0026rsquo;t done it before. Go to {REPO_URL}/org/apache/skywalking/apache-skywalking-apm/x.y.z. Download .tar.gz and .zip and files ending with .asc and .sha1.  Upload to Apache svn  Use your Apache ID to log in to https://dist.apache.org/repos/dist/dev/skywalking/. Create a folder and name it by the release version and round, such as: x.y.z Upload the source code package to the folder with files ending with .asc and .sha512.  Package name: apache-skywalking-x.y.z-src.tar.gz See Section \u0026ldquo;Build and sign the source code package\u0026rdquo; for more details   Upload the distribution package to the folder with files ending with .asc and .sha512.  Package name: apache-skywalking-bin-x.y.z.tar.gz. See Section \u0026ldquo;Locate and download the distribution package in Apache Nexus Staging repositories\u0026rdquo; for more details. Create a .sha512 package: shasum -a 512 file \u0026gt; file.sha512    Make the internal announcements Send an announcement mail in dev mail list.\nMail title: [ANNOUNCE] SkyWalking x.y.z test build available Mail content: The test build of x.y.z is available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/xxxx * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-apm-x.x.x-src.tgz - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.tar.gz Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) vx.y.z Release CommitID : * https://github.com/apache/skywalking/tree/(Git Commit ID) * Git submodule * skywalking-ui: https://github.com/apache/skywalking-booster-ui/tree/(Git Commit ID) * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) * oap-server/server-query-plugin/query-graphql-plugin/src/main/resources/query-protocol https://github.com/apache/skywalking-query-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking/blob/vx.y.z/docs/en/guides/How-to-build.md A vote regarding the quality of this test build will be initiated within the next couple of days. Wait for at least 48 hours for test responses Any PMC member, committer or contributor can test the release features and provide feedback. Based on that, the PMC will decide whether to start the voting process.\nCall a vote in dev Call a vote in dev@skywalking.apache.org\nMail title: [VOTE] Release Apache SkyWalking version x.y.z Mail content: Hi All, This is a call for vote to release Apache SkyWalking version x.y.z. Release notes: * https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/xxxx * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-apm-x.x.x-src.tgz - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.tar.gz Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) vx.y.z Release CommitID : * https://github.com/apache/skywalking/tree/(Git Commit ID) * Git submodule * skywalking-ui: https://github.com/apache/skywalking-booster-ui/tree/(Git Commit ID) * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) * oap-server/server-query-plugin/query-graphql-plugin/src/main/resources/query-protocol https://github.com/apache/skywalking-query-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking/blob/vx.y.z/docs/en/guides/How-to-build.md Voting will start now (xxxx date) and will remain open for at least 72 hours, Request all PMC members to give their vote. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Vote Check All PMC members and committers should check these before casting +1 votes.\n Features test. All artifacts in staging repository are published with .asc, .md5, and *sha1 files. Source code and distribution package (apache-skywalking-x.y.z-src.tar.gz, apache-skywalking-bin-x.y.z.tar.gz, apache-skywalking-bin-x.y.z.zip) are found in https://dist.apache.org/repos/dist/dev/skywalking/x.y.z with .asc and .sha512. LICENSE and NOTICE are in the source code and distribution package. Check shasum -c apache-skywalking-apm-x.y.z-src.tgz.sha512. Check gpg --verify apache-skywalking-apm-x.y.z-src.tgz.asc apache-skywalking-apm-x.y.z-src.tgz Build a distribution package from the source code package (apache-skywalking-x.y.z-src.tar.gz) by following this doc. Check the Apache License Header. Run docker run --rm -v $(pwd):/github/workspace apache/skywalking-eyes header check. (No binaries in source codes)  The voting process is as follows:\n All PMC member votes are +1 binding, and all other votes are +1 but non-binding. If you obtain at least 3 (+1 binding) votes with more +1 than -1 votes within 72 hours, the release will be approved.  Publish the release  Move source codes tar and distribution packages to https://dist.apache.org/repos/dist/release/skywalking/.  \u0026gt; export SVN_EDITOR=vim \u0026gt; svn mv https://dist.apache.org/repos/dist/dev/skywalking/x.y.z https://dist.apache.org/repos/dist/release/skywalking .... enter your apache password .... Release in the nexus staging repo. Public download source and distribution tar/zip are located in http://www.apache.org/dyn/closer.cgi/skywalking/x.y.z/xxx. The Apache mirror path is the only release information that we publish. Public asc and sha512 are located in https://www.apache.org/dist/skywalking/x.y.z/xxx. Public KEYS point to https://www.apache.org/dist/skywalking/KEYS. Update the website download page. http://skywalking.apache.org/downloads/ . Add a new download source, distribution, sha512, asc, and document links. The links can be found following rules (3) to (6) above. Add a release event on the website homepage and event page. Announce the public release with changelog or key features. Send ANNOUNCE email to dev@skywalking.apache.org, announce@apache.org. The sender should use the Apache email account.  Mail title: [ANNOUNCE] Apache SkyWalking x.y.z released Mail content: Hi all, Apache SkyWalking Team is glad to announce the first release of Apache SkyWalking x.y.z. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. This release contains a number of new features, bug fixes and improvements compared to version a.b.c(last release). The notable changes since x.y.z include: (Highlight key changes) 1. ... 2. ... 3. ... Please refer to the change log for the complete list of changes: https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Apache SkyWalking website: http://skywalking.apache.org/ Downloads: http://skywalking.apache.org/downloads/ Twitter: https://twitter.com/ASFSkyWalking SkyWalking Resources: - GitHub: https://github.com/apache/skywalking - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Apache SkyWalking Team Publish the Docker images We have a GitHub workflow to automatically publish the Docker images to Docker Hub after you set the version from pre-release to release, all you need to do is to watch that workflow and see whether it succeeds, if it fails, you can use the following steps to publish the Docker images in your local machine.\nexport SW_VERSION=x.y.z git clone --depth 1 --branch v$SW_VERSION https://github.com/apache/skywalking.git cd skywalking svn co https://dist.apache.org/repos/dist/release/skywalking/$SW_VERSION release # (1) export CONTEXT=release export HUB=apache export OAP_NAME=skywalking-oap-server export UI_NAME=skywalking-ui export TAG=$SW_VERSION export DIST=\u0026lt;the binary package name inside (1), e.g. apache-skywalking-apm-8.8.0.tar.gz\u0026gt; make docker.push Clean up the old releases Once the latest release has been published, you should clean up the old releases from the mirror system.\n Update the download links (source, dist, asc, and sha512) on the website to the archive repo (https://archive.apache.org/dist/skywalking). Remove previous releases from https://dist.apache.org/repos/dist/release/skywalking/.  ","title":"SkyWalking release guide","url":"/docs/main/v9.6.0/en/guides/how-to-release/"},{"content":"Apache SkyWalking release guide If you\u0026rsquo;re a committer, you can learn how to release SkyWalking in The Apache Way and start the voting process by reading this document.\nSet up your development environment Follow the steps in the Apache maven deployment environment document to set gpg tool and encrypt passwords.\nUse the following block as a template and place it in ~/.m2/settings.xml.\n\u0026lt;settings\u0026gt; ... \u0026lt;servers\u0026gt; \u0026lt;!-- To publish a snapshot of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.snapshots.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; \u0026lt;!-- To stage a release of some part of Maven --\u0026gt; \u0026lt;server\u0026gt; \u0026lt;id\u0026gt;apache.releases.https\u0026lt;/id\u0026gt; \u0026lt;username\u0026gt; \u0026lt;!-- YOUR APACHE LDAP USERNAME --\u0026gt; \u0026lt;/username\u0026gt; \u0026lt;password\u0026gt; \u0026lt;!-- YOUR APACHE LDAP PASSWORD (encrypted) --\u0026gt; \u0026lt;/password\u0026gt; \u0026lt;/server\u0026gt; ... \u0026lt;/servers\u0026gt; \u0026lt;/settings\u0026gt; Add your GPG public key  Add your GPG public key into the SkyWalking GPG KEYS file. If you are a committer, use your Apache ID and password to log in this svn, and update the file. Don\u0026rsquo;t override the existing file. Upload your GPG public key to the public GPG site, such as MIT\u0026rsquo;s site. This site should be in the Apache maven staging repository checklist.  Test your settings This step is only for testing purpose. If your env is correctly set, you don\u0026rsquo;t need to check every time.\n./mvnw clean install -Pall (this will build artifacts, sources and sign) Prepare for the release ./mvnw release:clean ./mvnw release:prepare -DautoVersionSubmodules=true -Darguments='-Dmaven.test.skip' -Pall  Set version number as x.y.z, and tag as vx.y.z (The version tag must start with v. You will find out why this is necessary in the next step.)  You could do a GPG signature before preparing for the release. If you need to input the password to sign, and the maven doesn\u0026rsquo;t provide you with the opportunity to do so, this may lead to failure of the release. To resolve this, you may run gpg --sign xxx in any file. This will allow it to remember the password for long enough to prepare for the release.\nStage the release ./mvnw release:perform -Darguments='-Dmaven.test.skip' -Pall  The release will be automatically inserted into a temporary staging repository.  Build and sign the source code package export RELEASE_VERSION=x.y.z (example: RELEASE_VERSION=5.0.0-alpha) cd tools/releasing bash create_source_release.sh This script takes care of the following things:\n Use v + RELEASE_VERSION as tag to clone the codes. Complete git submodule init/update. Exclude all unnecessary files in the target source tar, such as .git, .github, and .gitmodules. See the script for more details. Execute gpg and shasum 512.  apache-skywalking-apm-x.y.z-src.tgz and files ending with .asc and .sha512 may be found in the tools/releasing folder.\nLocate and download the distribution package in Apache Nexus Staging repositories  Use your Apache ID to log in to https://repository.apache.org/. Go to https://repository.apache.org/#stagingRepositories. Search skywalking and find your staging repository. Close the repository and wait for all checks to pass. In this step, your GPG KEYS will be checked. See the set PGP document, if you haven\u0026rsquo;t done it before. Go to {REPO_URL}/org/apache/skywalking/apache-skywalking-apm/x.y.z. Download .tar.gz and .zip and files ending with .asc and .sha1.  Upload to Apache svn  Use your Apache ID to log in to https://dist.apache.org/repos/dist/dev/skywalking/. Create a folder and name it by the release version and round, such as: x.y.z Upload the source code package to the folder with files ending with .asc and .sha512.  Package name: apache-skywalking-x.y.z-src.tar.gz See Section \u0026ldquo;Build and sign the source code package\u0026rdquo; for more details   Upload the distribution package to the folder with files ending with .asc and .sha512.  Package name: apache-skywalking-bin-x.y.z.tar.gz. See Section \u0026ldquo;Locate and download the distribution package in Apache Nexus Staging repositories\u0026rdquo; for more details. Create a .sha512 package: shasum -a 512 file \u0026gt; file.sha512    Make the internal announcements Send an announcement mail in dev mail list.\nMail title: [ANNOUNCE] SkyWalking x.y.z test build available Mail content: The test build of x.y.z is available. We welcome any comments you may have, and will take all feedback into account if a quality vote is called for this build. Release notes: * https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/xxxx * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-apm-x.x.x-src.tgz - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.tar.gz Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) vx.y.z Release CommitID : * https://github.com/apache/skywalking/tree/(Git Commit ID) * Git submodule * skywalking-ui: https://github.com/apache/skywalking-booster-ui/tree/(Git Commit ID) * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) * oap-server/server-query-plugin/query-graphql-plugin/src/main/resources/query-protocol https://github.com/apache/skywalking-query-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking/blob/vx.y.z/docs/en/guides/How-to-build.md A vote regarding the quality of this test build will be initiated within the next couple of days. Wait for at least 48 hours for test responses Any PMC member, committer or contributor can test the release features and provide feedback. Based on that, the PMC will decide whether to start the voting process.\nCall a vote in dev Call a vote in dev@skywalking.apache.org\nMail title: [VOTE] Release Apache SkyWalking version x.y.z Mail content: Hi All, This is a call for vote to release Apache SkyWalking version x.y.z. Release notes: * https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Release Candidate: * https://dist.apache.org/repos/dist/dev/skywalking/xxxx * sha512 checksums - sha512xxxxyyyzzz apache-skywalking-apm-x.x.x-src.tgz - sha512xxxxyyyzzz apache-skywalking-apm-bin-x.x.x.tar.gz Maven 2 staging repository: * https://repository.apache.org/content/repositories/xxxx/org/apache/skywalking/ Release Tag : * (Git Tag) vx.y.z Release CommitID : * https://github.com/apache/skywalking/tree/(Git Commit ID) * Git submodule * skywalking-ui: https://github.com/apache/skywalking-booster-ui/tree/(Git Commit ID) * apm-protocol/apm-network/src/main/proto: https://github.com/apache/skywalking-data-collect-protocol/tree/(Git Commit ID) * oap-server/server-query-plugin/query-graphql-plugin/src/main/resources/query-protocol https://github.com/apache/skywalking-query-protocol/tree/(Git Commit ID) Keys to verify the Release Candidate : * https://dist.apache.org/repos/dist/release/skywalking/KEYS Guide to build the release from source : * https://github.com/apache/skywalking/blob/vx.y.z/docs/en/guides/How-to-build.md Voting will start now (xxxx date) and will remain open for at least 72 hours, Request all PMC members to give their vote. [ ] +1 Release this package. [ ] +0 No opinion. [ ] -1 Do not release this package because.... Vote Check All PMC members and committers should check these before casting +1 votes.\n Features test. All artifacts in staging repository are published with .asc, .md5, and *sha1 files. Source code and distribution package (apache-skywalking-x.y.z-src.tar.gz, apache-skywalking-bin-x.y.z.tar.gz, apache-skywalking-bin-x.y.z.zip) are found in https://dist.apache.org/repos/dist/dev/skywalking/x.y.z with .asc and .sha512. LICENSE and NOTICE are in the source code and distribution package. Check shasum -c apache-skywalking-apm-x.y.z-src.tgz.sha512. Check gpg --verify apache-skywalking-apm-x.y.z-src.tgz.asc apache-skywalking-apm-x.y.z-src.tgz Build a distribution package from the source code package (apache-skywalking-x.y.z-src.tar.gz) by following this doc. Check the Apache License Header. Run docker run --rm -v $(pwd):/github/workspace apache/skywalking-eyes header check. (No binaries in source codes)  The voting process is as follows:\n All PMC member votes are +1 binding, and all other votes are +1 but non-binding. If you obtain at least 3 (+1 binding) votes with more +1 than -1 votes within 72 hours, the release will be approved.  Publish the release  Move source codes tar and distribution packages to https://dist.apache.org/repos/dist/release/skywalking/.  \u0026gt; export SVN_EDITOR=vim \u0026gt; svn mv https://dist.apache.org/repos/dist/dev/skywalking/x.y.z https://dist.apache.org/repos/dist/release/skywalking .... enter your apache password .... Release in the nexus staging repo. Public download source and distribution tar/zip are located in http://www.apache.org/dyn/closer.cgi/skywalking/x.y.z/xxx. The Apache mirror path is the only release information that we publish. Public asc and sha512 are located in https://www.apache.org/dist/skywalking/x.y.z/xxx. Public KEYS point to https://www.apache.org/dist/skywalking/KEYS. Update the website download page. http://skywalking.apache.org/downloads/ . Add a new download source, distribution, sha512, asc, and document links. The links can be found following rules (3) to (6) above. Add a release event on the website homepage and event page. Announce the public release with changelog or key features. Send ANNOUNCE email to dev@skywalking.apache.org, announce@apache.org. The sender should use the Apache email account.  Mail title: [ANNOUNCE] Apache SkyWalking x.y.z released Mail content: Hi all, Apache SkyWalking Team is glad to announce the first release of Apache SkyWalking x.y.z. SkyWalking: APM (application performance monitor) tool for distributed systems, especially designed for microservices, cloud native and container-based (Docker, Kubernetes, Mesos) architectures. This release contains a number of new features, bug fixes and improvements compared to version a.b.c(last release). The notable changes since x.y.z include: (Highlight key changes) 1. ... 2. ... 3. ... Please refer to the change log for the complete list of changes: https://github.com/apache/skywalking/blob/master/docs/en/changes/changes-x.y.z.md Apache SkyWalking website: http://skywalking.apache.org/ Downloads: http://skywalking.apache.org/downloads/ Twitter: https://twitter.com/ASFSkyWalking SkyWalking Resources: - GitHub: https://github.com/apache/skywalking - Issue: https://github.com/apache/skywalking/issues - Mailing list: dev@skywalkiing.apache.org - Apache SkyWalking Team Publish the Docker images We have a GitHub workflow to automatically publish the Docker images to Docker Hub after you set the version from pre-release to release, all you need to do is to watch that workflow and see whether it succeeds, if it fails, you can use the following steps to publish the Docker images in your local machine.\nexport SW_VERSION=x.y.z git clone --depth 1 --branch v$SW_VERSION https://github.com/apache/skywalking.git cd skywalking svn co https://dist.apache.org/repos/dist/release/skywalking/$SW_VERSION release # (1) export CONTEXT=release export HUB=apache export OAP_NAME=skywalking-oap-server export UI_NAME=skywalking-ui export TAG=$SW_VERSION export DIST=\u0026lt;the binary package name inside (1), e.g. apache-skywalking-apm-8.8.0.tar.gz\u0026gt; make docker.push Clean up the old releases Once the latest release has been published, you should clean up the old releases from the mirror system.\n Update the download links (source, dist, asc, and sha512) on the website to the archive repo (https://archive.apache.org/dist/skywalking). Remove previous releases from https://dist.apache.org/repos/dist/release/skywalking/.  ","title":"SkyWalking release guide","url":"/docs/main/v9.7.0/en/guides/how-to-release/"},{"content":"Skywalking with Kotlin coroutine This Plugin provides an auto instrument support plugin for Kotlin coroutine based on context snapshot.\nDescription SkyWalking provide tracing context propagation inside thread. In order to support Kotlin Coroutine, we provide this additional plugin.\nImplementation principle As we know, Kotlin coroutine switches the execution thread by CoroutineDispatcher.\n Create a snapshot of the current context before dispatch the continuation. Then create a coroutine span after thread switched, mark the span continued with the snapshot. Every new span which created in the new thread will be a child of this coroutine span. So we can link those span together in a tracing. After the original runnable executed, we need to stop the coroutine span for cleaning thread state.  Some screenshots Run without the plugin We run a Kotlin coroutine based gRPC server without this coroutine plugin.\nYou can find, the one call (client -\u0026gt; server1 -\u0026gt; server2) has been split two tracing paths.\n Server1 without exit span and server2 tracing path.  Server2 tracing path.   Run with the plugin Without changing codes manually, just install the plugin. We can find the spans be connected together. We can get all info of one client call.\n","title":"Skywalking with Kotlin coroutine","url":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/agent-optional-plugins/kotlin-coroutine-plugin/"},{"content":"Skywalking with Kotlin coroutine This Plugin provides an auto instrument support plugin for Kotlin coroutine based on context snapshot.\nDescription SkyWalking provide tracing context propagation inside thread. In order to support Kotlin Coroutine, we provide this additional plugin.\nImplementation principle As we know, Kotlin coroutine switches the execution thread by CoroutineDispatcher.\n Create a snapshot of the current context before dispatch the continuation. Then create a coroutine span after thread switched, mark the span continued with the snapshot. Every new span which created in the new thread will be a child of this coroutine span. So we can link those span together in a tracing. After the original runnable executed, we need to stop the coroutine span for cleaning thread state.  Some screenshots Run without the plugin We run a Kotlin coroutine based gRPC server without this coroutine plugin.\nYou can find, the one call (client -\u0026gt; server1 -\u0026gt; server2) has been split two tracing paths.\n Server1 without exit span and server2 tracing path.  Server2 tracing path.   Run with the plugin Without changing codes manually, just install the plugin. We can find the spans be connected together. We can get all info of one client call.\n","title":"Skywalking with Kotlin coroutine","url":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/agent-optional-plugins/kotlin-coroutine-plugin/"},{"content":"Skywalking with Kotlin coroutine This Plugin provides an auto instrument support plugin for Kotlin coroutine based on context snapshot.\nDescription SkyWalking provide tracing context propagation inside thread. In order to support Kotlin Coroutine, we provide this additional plugin.\nImplementation principle As we know, Kotlin coroutine switches the execution thread by CoroutineDispatcher.\n Create a snapshot of the current context before dispatch the continuation. Then create a coroutine span after thread switched, mark the span continued with the snapshot. Every new span which created in the new thread will be a child of this coroutine span. So we can link those span together in a tracing. After the original runnable executed, we need to stop the coroutine span for cleaning thread state.  Some screenshots Run without the plugin We run a Kotlin coroutine based gRPC server without this coroutine plugin.\nYou can find, the one call (client -\u0026gt; server1 -\u0026gt; server2) has been split two tracing paths.\n Server1 without exit span and server2 tracing path.  Server2 tracing path.   Run with the plugin Without changing codes manually, just install the plugin. We can find the spans be connected together. We can get all info of one client call.\n","title":"Skywalking with Kotlin coroutine","url":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/agent-optional-plugins/kotlin-coroutine-plugin/"},{"content":"Skywalking with Kotlin coroutine This Plugin provides an auto instrument support plugin for Kotlin coroutine based on context snapshot.\nDescription SkyWalking provide tracing context propagation inside thread. In order to support Kotlin Coroutine, we provide this additional plugin.\nImplementation principle As we know, Kotlin coroutine switches the execution thread by CoroutineDispatcher.\n Create a snapshot of the current context before dispatch the continuation. Then create a coroutine span after thread switched, mark the span continued with the snapshot. Every new span which created in the new thread will be a child of this coroutine span. So we can link those span together in a tracing. After the original runnable executed, we need to stop the coroutine span for cleaning thread state.  Some screenshots Run without the plugin We run a Kotlin coroutine based gRPC server without this coroutine plugin.\nYou can find, the one call (client -\u0026gt; server1 -\u0026gt; server2) has been split two tracing paths.\n Server1 without exit span and server2 tracing path.  Server2 tracing path.   Run with the plugin Without changing codes manually, just install the plugin. We can find the spans be connected together. We can get all info of one client call.\n","title":"Skywalking with Kotlin coroutine","url":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/agent-optional-plugins/kotlin-coroutine-plugin/"},{"content":"Skywalking with Kotlin coroutine This Plugin provides an auto instrument support plugin for Kotlin coroutine based on context snapshot.\nDescription SkyWalking provide tracing context propagation inside thread. In order to support Kotlin Coroutine, we provide this additional plugin.\nImplementation principle As we know, Kotlin coroutine switches the execution thread by CoroutineDispatcher.\n Create a snapshot of the current context before dispatch the continuation. Then create a coroutine span after thread switched, mark the span continued with the snapshot. Every new span which created in the new thread will be a child of this coroutine span. So we can link those span together in a tracing. After the original runnable executed, we need to stop the coroutine span for cleaning thread state.  Some screenshots Run without the plugin We run a Kotlin coroutine based gRPC server without this coroutine plugin.\nYou can find, the one call (client -\u0026gt; server1 -\u0026gt; server2) has been split two tracing paths.\n Server1 without exit span and server2 tracing path.  Server2 tracing path.   Run with the plugin Without changing codes manually, just install the plugin. We can find the spans be connected together. We can get all info of one client call.\n","title":"Skywalking with Kotlin coroutine","url":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/agent-optional-plugins/kotlin-coroutine-plugin/"},{"content":"Slow Cache Command Slow Cache command are sensitive for you to identify bottlenecks of a system which relies on cache system.\nSlow Cache command are based on sampling. Right now, the core samples are the top 50 slowest every 10 minutes. Note that the duration of these command must be slower than the threshold.\nHere\u0026rsquo;s the format of the settings (in milliseconds):\n cache-type:thresholdValue,cache-type2:thresholdValue2\n The default settings are default:20,redis:10. Reserved Cache type is default, which is the default threshold for all cache types, unless set explicitly.\nNote:\n The threshold should not be set too small, like 1ms. Although it works in theory, OAP performance issues may arise if your system statement access time is usually more than 1ms. The OAP server would run statistic per service and only persistent the top 50 every 10(controlled by topNReportPeriod: ${SW_CORE_TOPN_REPORT_PERIOD:10}) minutes by default.  ","title":"Slow Cache Command","url":"/docs/main/latest/en/setup/backend/slow-cache-command/"},{"content":"Slow Cache Command Slow Cache command are sensitive for you to identify bottlenecks of a system which relies on cache system.\nSlow Cache command are based on sampling. Right now, the core samples are the top 50 slowest every 10 minutes. Note that the duration of these command must be slower than the threshold.\nHere\u0026rsquo;s the format of the settings (in milliseconds):\n cache-type:thresholdValue,cache-type2:thresholdValue2\n The default settings are default:20,redis:10. Reserved Cache type is default, which is the default threshold for all cache types, unless set explicitly.\nNote:\n The threshold should not be set too small, like 1ms. Although it works in theory, OAP performance issues may arise if your system statement access time is usually more than 1ms. The OAP server would run statistic per service and only persistent the top 50 every 10(controlled by topNReportPeriod: ${SW_CORE_TOPN_REPORT_PERIOD:10}) minutes by default.  ","title":"Slow Cache Command","url":"/docs/main/next/en/setup/backend/slow-cache-command/"},{"content":"Slow Cache Command Slow Cache command are sensitive for you to identify bottlenecks of a system which relies on cache system.\nSlow Cache command are based on sampling. Right now, the core samples are the top 50 slowest every 10 minutes. Note that the duration of these command must be slower than the threshold.\nHere\u0026rsquo;s the format of the settings (in milliseconds):\n cache-type:thresholdValue,cache-type2:thresholdValue2\n The default settings are default:20,redis:10. Reserved Cache type is default, which is the default threshold for all cache types, unless set explicitly.\nNote:\n The threshold should not be set too small, like 1ms. Although it works in theory, OAP performance issues may arise if your system statement access time is usually more than 1ms. The OAP server would run statistic per service and only persistent the top 50 every 10(controlled by topNReportPeriod: ${SW_CORE_TOPN_REPORT_PERIOD:10}) minutes by default.  ","title":"Slow Cache Command","url":"/docs/main/v9.3.0/en/setup/backend/slow-cache-command/"},{"content":"Slow Cache Command Slow Cache command are sensitive for you to identify bottlenecks of a system which relies on cache system.\nSlow Cache command are based on sampling. Right now, the core samples are the top 50 slowest every 10 minutes. Note that the duration of these command must be slower than the threshold.\nHere\u0026rsquo;s the format of the settings (in milliseconds):\n cache-type:thresholdValue,cache-type2:thresholdValue2\n The default settings are default:20,redis:10. Reserved Cache type is default, which is the default threshold for all cache types, unless set explicitly.\nNote:\n The threshold should not be set too small, like 1ms. Although it works in theory, OAP performance issues may arise if your system statement access time is usually more than 1ms. The OAP server would run statistic per service and only persistent the top 50 every 10(controlled by topNReportPeriod: ${SW_CORE_TOPN_REPORT_PERIOD:10}) minutes by default.  ","title":"Slow Cache Command","url":"/docs/main/v9.4.0/en/setup/backend/slow-cache-command/"},{"content":"Slow Cache Command Slow Cache command are sensitive for you to identify bottlenecks of a system which relies on cache system.\nSlow Cache command are based on sampling. Right now, the core samples are the top 50 slowest every 10 minutes. Note that the duration of these command must be slower than the threshold.\nHere\u0026rsquo;s the format of the settings (in milliseconds):\n cache-type:thresholdValue,cache-type2:thresholdValue2\n The default settings are default:20,redis:10. Reserved Cache type is default, which is the default threshold for all cache types, unless set explicitly.\nNote:\n The threshold should not be set too small, like 1ms. Although it works in theory, OAP performance issues may arise if your system statement access time is usually more than 1ms. The OAP server would run statistic per service and only persistent the top 50 every 10(controlled by topNReportPeriod: ${SW_CORE_TOPN_REPORT_PERIOD:10}) minutes by default.  ","title":"Slow Cache Command","url":"/docs/main/v9.5.0/en/setup/backend/slow-cache-command/"},{"content":"Slow Cache Command Slow Cache command are sensitive for you to identify bottlenecks of a system which relies on cache system.\nSlow Cache command are based on sampling. Right now, the core samples are the top 50 slowest every 10 minutes. Note that the duration of these command must be slower than the threshold.\nHere\u0026rsquo;s the format of the settings (in milliseconds):\n cache-type:thresholdValue,cache-type2:thresholdValue2\n The default settings are default:20,redis:10. Reserved Cache type is default, which is the default threshold for all cache types, unless set explicitly.\nNote:\n The threshold should not be set too small, like 1ms. Although it works in theory, OAP performance issues may arise if your system statement access time is usually more than 1ms. The OAP server would run statistic per service and only persistent the top 50 every 10(controlled by topNReportPeriod: ${SW_CORE_TOPN_REPORT_PERIOD:10}) minutes by default.  ","title":"Slow Cache Command","url":"/docs/main/v9.6.0/en/setup/backend/slow-cache-command/"},{"content":"Slow Cache Command Slow Cache command are sensitive for you to identify bottlenecks of a system which relies on cache system.\nSlow Cache command are based on sampling. Right now, the core samples are the top 50 slowest every 10 minutes. Note that the duration of these command must be slower than the threshold.\nHere\u0026rsquo;s the format of the settings (in milliseconds):\n cache-type:thresholdValue,cache-type2:thresholdValue2\n The default settings are default:20,redis:10. Reserved Cache type is default, which is the default threshold for all cache types, unless set explicitly.\nNote:\n The threshold should not be set too small, like 1ms. Although it works in theory, OAP performance issues may arise if your system statement access time is usually more than 1ms. The OAP server would run statistic per service and only persistent the top 50 every 10(controlled by topNReportPeriod: ${SW_CORE_TOPN_REPORT_PERIOD:10}) minutes by default.  ","title":"Slow Cache Command","url":"/docs/main/v9.7.0/en/setup/backend/slow-cache-command/"},{"content":"Slow Database Statement Slow Database statements are crucial for you to identify bottlenecks of a system which relies on databases.\nSlow DB statements are based on sampling. Right now, the core samples are the top 50 slowest every 10 minutes. Note that the duration of these statements must be slower than the threshold.\nHere\u0026rsquo;s the format of the settings (in milliseconds):\n database-type:thresholdValue,database-type2:thresholdValue2\n The default settings are default:200,mongodb:100. Reserved DB type is default, which is the default threshold for all database types, unless set explicitly.\nNote:\n The threshold should not be set too small, like 1ms. Although it works in theory, OAP performance issues may arise if your system statement access time is usually more than 1ms. The OAP server would run statistic per service and only persistent the top 50 every 10(controlled by topNReportPeriod: ${SW_CORE_TOPN_REPORT_PERIOD:10}) minutes by default.  ","title":"Slow Database Statement","url":"/docs/main/latest/en/setup/backend/slow-db-statement/"},{"content":"Slow Database Statement Slow Database statements are crucial for you to identify bottlenecks of a system which relies on databases.\nSlow DB statements are based on sampling. Right now, the core samples are the top 50 slowest every 10 minutes. Note that the duration of these statements must be slower than the threshold.\nHere\u0026rsquo;s the format of the settings (in milliseconds):\n database-type:thresholdValue,database-type2:thresholdValue2\n The default settings are default:200,mongodb:100. Reserved DB type is default, which is the default threshold for all database types, unless set explicitly.\nNote:\n The threshold should not be set too small, like 1ms. Although it works in theory, OAP performance issues may arise if your system statement access time is usually more than 1ms. The OAP server would run statistic per service and only persistent the top 50 every 10(controlled by topNReportPeriod: ${SW_CORE_TOPN_REPORT_PERIOD:10}) minutes by default.  ","title":"Slow Database Statement","url":"/docs/main/next/en/setup/backend/slow-db-statement/"},{"content":"Slow Database Statement Slow Database statements are crucial in order for you to identify bottlenecks of a system which relies on the database.\nSlow DB statements are based on sampling. Right now, the core samples the top 50 slowest every 10 minutes. Note that the duration of these statements must be slower than the threshold.\nHere\u0026rsquo;s the format of the settings (in milliseconds):\n database-type:thresholdValue,database-type2:thresholdValue2\n The default settings are default:200,mongodb:100. Reserved DB type is default, which is the default threshold for all database types, unless set explicitly.\nNote: The threshold should not be set too small, like 1ms. Although it works in theory, OAP performance issues may arise if your system statement access time is usually more than 1ms.\n","title":"Slow Database Statement","url":"/docs/main/v9.0.0/en/setup/backend/slow-db-statement/"},{"content":"Slow Database Statement Slow Database statements are crucial for you to identify bottlenecks of a system which relies on databases.\nSlow DB statements are based on sampling. Right now, the core samples are the top 50 slowest every 10 minutes. Note that the duration of these statements must be slower than the threshold.\nHere\u0026rsquo;s the format of the settings (in milliseconds):\n database-type:thresholdValue,database-type2:thresholdValue2\n The default settings are default:200,mongodb:100. Reserved DB type is default, which is the default threshold for all database types, unless set explicitly.\nNote: The threshold should not be set too small, like 1ms. Although it works in theory, OAP performance issues may arise if your system statement access time is usually more than 1ms.\n","title":"Slow Database Statement","url":"/docs/main/v9.1.0/en/setup/backend/slow-db-statement/"},{"content":"Slow Database Statement Slow Database statements are crucial for you to identify bottlenecks of a system which relies on databases.\nSlow DB statements are based on sampling. Right now, the core samples are the top 50 slowest every 10 minutes. Note that the duration of these statements must be slower than the threshold.\nHere\u0026rsquo;s the format of the settings (in milliseconds):\n database-type:thresholdValue,database-type2:thresholdValue2\n The default settings are default:200,mongodb:100. Reserved DB type is default, which is the default threshold for all database types, unless set explicitly.\nNote: The threshold should not be set too small, like 1ms. Although it works in theory, OAP performance issues may arise if your system statement access time is usually more than 1ms.\n","title":"Slow Database Statement","url":"/docs/main/v9.2.0/en/setup/backend/slow-db-statement/"},{"content":"Slow Database Statement Slow Database statements are crucial for you to identify bottlenecks of a system which relies on databases.\nSlow DB statements are based on sampling. Right now, the core samples are the top 50 slowest every 10 minutes. Note that the duration of these statements must be slower than the threshold.\nHere\u0026rsquo;s the format of the settings (in milliseconds):\n database-type:thresholdValue,database-type2:thresholdValue2\n The default settings are default:200,mongodb:100. Reserved DB type is default, which is the default threshold for all database types, unless set explicitly.\nNote:\n The threshold should not be set too small, like 1ms. Although it works in theory, OAP performance issues may arise if your system statement access time is usually more than 1ms. The OAP server would run statistic per service and only persistent the top 50 every 10(controlled by topNReportPeriod: ${SW_CORE_TOPN_REPORT_PERIOD:10}) minutes by default.  ","title":"Slow Database Statement","url":"/docs/main/v9.3.0/en/setup/backend/slow-db-statement/"},{"content":"Slow Database Statement Slow Database statements are crucial for you to identify bottlenecks of a system which relies on databases.\nSlow DB statements are based on sampling. Right now, the core samples are the top 50 slowest every 10 minutes. Note that the duration of these statements must be slower than the threshold.\nHere\u0026rsquo;s the format of the settings (in milliseconds):\n database-type:thresholdValue,database-type2:thresholdValue2\n The default settings are default:200,mongodb:100. Reserved DB type is default, which is the default threshold for all database types, unless set explicitly.\nNote:\n The threshold should not be set too small, like 1ms. Although it works in theory, OAP performance issues may arise if your system statement access time is usually more than 1ms. The OAP server would run statistic per service and only persistent the top 50 every 10(controlled by topNReportPeriod: ${SW_CORE_TOPN_REPORT_PERIOD:10}) minutes by default.  ","title":"Slow Database Statement","url":"/docs/main/v9.4.0/en/setup/backend/slow-db-statement/"},{"content":"Slow Database Statement Slow Database statements are crucial for you to identify bottlenecks of a system which relies on databases.\nSlow DB statements are based on sampling. Right now, the core samples are the top 50 slowest every 10 minutes. Note that the duration of these statements must be slower than the threshold.\nHere\u0026rsquo;s the format of the settings (in milliseconds):\n database-type:thresholdValue,database-type2:thresholdValue2\n The default settings are default:200,mongodb:100. Reserved DB type is default, which is the default threshold for all database types, unless set explicitly.\nNote:\n The threshold should not be set too small, like 1ms. Although it works in theory, OAP performance issues may arise if your system statement access time is usually more than 1ms. The OAP server would run statistic per service and only persistent the top 50 every 10(controlled by topNReportPeriod: ${SW_CORE_TOPN_REPORT_PERIOD:10}) minutes by default.  ","title":"Slow Database Statement","url":"/docs/main/v9.5.0/en/setup/backend/slow-db-statement/"},{"content":"Slow Database Statement Slow Database statements are crucial for you to identify bottlenecks of a system which relies on databases.\nSlow DB statements are based on sampling. Right now, the core samples are the top 50 slowest every 10 minutes. Note that the duration of these statements must be slower than the threshold.\nHere\u0026rsquo;s the format of the settings (in milliseconds):\n database-type:thresholdValue,database-type2:thresholdValue2\n The default settings are default:200,mongodb:100. Reserved DB type is default, which is the default threshold for all database types, unless set explicitly.\nNote:\n The threshold should not be set too small, like 1ms. Although it works in theory, OAP performance issues may arise if your system statement access time is usually more than 1ms. The OAP server would run statistic per service and only persistent the top 50 every 10(controlled by topNReportPeriod: ${SW_CORE_TOPN_REPORT_PERIOD:10}) minutes by default.  ","title":"Slow Database Statement","url":"/docs/main/v9.6.0/en/setup/backend/slow-db-statement/"},{"content":"Slow Database Statement Slow Database statements are crucial for you to identify bottlenecks of a system which relies on databases.\nSlow DB statements are based on sampling. Right now, the core samples are the top 50 slowest every 10 minutes. Note that the duration of these statements must be slower than the threshold.\nHere\u0026rsquo;s the format of the settings (in milliseconds):\n database-type:thresholdValue,database-type2:thresholdValue2\n The default settings are default:200,mongodb:100. Reserved DB type is default, which is the default threshold for all database types, unless set explicitly.\nNote:\n The threshold should not be set too small, like 1ms. Although it works in theory, OAP performance issues may arise if your system statement access time is usually more than 1ms. The OAP server would run statistic per service and only persistent the top 50 every 10(controlled by topNReportPeriod: ${SW_CORE_TOPN_REPORT_PERIOD:10}) minutes by default.  ","title":"Slow Database Statement","url":"/docs/main/v9.7.0/en/setup/backend/slow-db-statement/"},{"content":"Source and scope extension for new metrics From the OAL scope introduction, you should already have understood what a scope is. If you would like to create more extensions, you need to have a deeper understanding of what a source is.\nSource and scope are interrelated concepts. Scope declares the ID (int) and name, while source declares the attributes. Follow these steps to create a new source and sccope.\n In the OAP core module, it provides SourceReceiver internal services.  public interface SourceReceiver extends Service { void receive(Source source); } All data of the analysis must be a org.apache.skywalking.oap.server.core.source.Source sub class that is tagged by @SourceType annotation, and included in the org.apache.skywalking package. Then, it can be supported by the OAL script and OAP core.  Take the existing source service as an example.\n@ScopeDeclaration(id = SERVICE_INSTANCE, name = \u0026#34;ServiceInstance\u0026#34;, catalog = SERVICE_INSTANCE_CATALOG_NAME) @ScopeDefaultColumn.VirtualColumnDefinition(fieldName = \u0026#34;entityId\u0026#34;, columnName = \u0026#34;entity_id\u0026#34;, isID = true, type = String.class) public class ServiceInstance extends Source { @Override public int scope() { return DefaultScopeDefine.SERVICE_INSTANCE; } @Override public String getEntityId() { return String.valueOf(id); } @Getter @Setter private int id; @Getter @Setter @ScopeDefaultColumn.DefinedByField(columnName = \u0026#34;service_id\u0026#34;) private int serviceId; @Getter @Setter private String name; @Getter @Setter private String serviceName; @Getter @Setter private String endpointName; @Getter @Setter private int latency; @Getter @Setter private boolean status; @Getter @Setter private int responseCode; @Getter @Setter private RequestType type; }  The scope() method in source returns an ID, which is not a random value. This ID must be declared through the @ScopeDeclaration annotation too. The ID in @ScopeDeclaration and ID in scope() method must be the same for this source.\n  The String getEntityId() method in source requests the return value representing the unique entity to which the scope relates. For example, in this service scope, the ID is the service ID, which represents a particular service, like the Order service. This value is used in the OAL group mechanism.\n  @ScopeDefaultColumn.VirtualColumnDefinition and @ScopeDefaultColumn.DefinedByField are required. All declared fields (virtual/byField) will be pushed into a persistent entity, and maps to lists such as the ElasticSearch index and Database table column. For example, the entity ID and service ID for endpoint and service instance level scope are usually included. Take a reference from all existing scopes. All these fields are detected by OAL Runtime, and are required during query.\n  Add scope name as keyword to OAL grammar definition file, OALLexer.g4, which is at the antlr4 folder of the generate-tool-grammar module.\n  Add scope name as keyword to the parser definition file, OALParser.g4, which is located in the same folder as OALLexer.g4.\n   After finishing these steps, you could build a receiver, which do\n Obtain the original data of the metrics. Build the source, and send to SourceReceiver. Complete your OAL scripts. Repackage the project.  ","title":"Source and scope extension for new metrics","url":"/docs/main/latest/en/guides/source-extension/"},{"content":"Source and scope extension for new metrics From the OAL scope introduction, you should already have understood what a scope is. If you would like to create more extensions, you need to have a deeper understanding of what a source is.\nSource and scope are interrelated concepts. Scope declares the ID (int) and name, while source declares the attributes. Follow these steps to create a new source and sccope.\n In the OAP core module, it provides SourceReceiver internal services.  public interface SourceReceiver extends Service { void receive(Source source); } All data of the analysis must be a org.apache.skywalking.oap.server.core.source.Source sub class that is tagged by @SourceType annotation, and included in the org.apache.skywalking package. Then, it can be supported by the OAL script and OAP core.  Take the existing source service as an example.\n@ScopeDeclaration(id = SERVICE_INSTANCE, name = \u0026#34;ServiceInstance\u0026#34;, catalog = SERVICE_INSTANCE_CATALOG_NAME) @ScopeDefaultColumn.VirtualColumnDefinition(fieldName = \u0026#34;entityId\u0026#34;, columnName = \u0026#34;entity_id\u0026#34;, isID = true, type = String.class) public class ServiceInstance extends Source { @Override public int scope() { return DefaultScopeDefine.SERVICE_INSTANCE; } @Override public String getEntityId() { return String.valueOf(id); } @Getter @Setter private int id; @Getter @Setter @ScopeDefaultColumn.DefinedByField(columnName = \u0026#34;service_id\u0026#34;) private int serviceId; @Getter @Setter private String name; @Getter @Setter private String serviceName; @Getter @Setter private String endpointName; @Getter @Setter private int latency; @Getter @Setter private boolean status; @Getter @Setter private int responseCode; @Getter @Setter private RequestType type; }  The scope() method in source returns an ID, which is not a random value. This ID must be declared through the @ScopeDeclaration annotation too. The ID in @ScopeDeclaration and ID in scope() method must be the same for this source.\n  The String getEntityId() method in source requests the return value representing the unique entity to which the scope relates. For example, in this service scope, the ID is the service ID, which represents a particular service, like the Order service. This value is used in the OAL group mechanism.\n  @ScopeDefaultColumn.VirtualColumnDefinition and @ScopeDefaultColumn.DefinedByField are required. All declared fields (virtual/byField) will be pushed into a persistent entity, and maps to lists such as the ElasticSearch index and Database table column. For example, the entity ID and service ID for endpoint and service instance level scope are usually included. Take a reference from all existing scopes. All these fields are detected by OAL Runtime, and are required during query.\n  Add scope name as keyword to OAL grammar definition file, OALLexer.g4, which is at the antlr4 folder of the generate-tool-grammar module.\n  Add scope name as keyword to the parser definition file, OALParser.g4, which is located in the same folder as OALLexer.g4.\n   After finishing these steps, you could build a receiver, which do\n Obtain the original data of the metrics. Build the source, and send to SourceReceiver. Complete your OAL scripts. Repackage the project.  ","title":"Source and scope extension for new metrics","url":"/docs/main/next/en/guides/source-extension/"},{"content":"Source and scope extension for new metrics From the OAL scope introduction, you should already have understood what a scope is. If you would like to create more extensions, you need to have a deeper understanding of what a source is.\nSource and scope are interrelated concepts. Scope declares the ID (int) and name, while source declares the attributes. Follow these steps to create a new source and sccope.\n In the OAP core module, it provides SourceReceiver internal services.  public interface SourceReceiver extends Service { void receive(Source source); } All data of the analysis must be a org.apache.skywalking.oap.server.core.source.Source sub class that is tagged by @SourceType annotation, and included in the org.apache.skywalking package. Then, it can be supported by the OAL script and OAP core.  Take the existing source service as an example.\n@ScopeDeclaration(id = SERVICE_INSTANCE, name = \u0026#34;ServiceInstance\u0026#34;, catalog = SERVICE_INSTANCE_CATALOG_NAME) @ScopeDefaultColumn.VirtualColumnDefinition(fieldName = \u0026#34;entityId\u0026#34;, columnName = \u0026#34;entity_id\u0026#34;, isID = true, type = String.class) public class ServiceInstance extends Source { @Override public int scope() { return DefaultScopeDefine.SERVICE_INSTANCE; } @Override public String getEntityId() { return String.valueOf(id); } @Getter @Setter private int id; @Getter @Setter @ScopeDefaultColumn.DefinedByField(columnName = \u0026#34;service_id\u0026#34;) private int serviceId; @Getter @Setter private String name; @Getter @Setter private String serviceName; @Getter @Setter private String endpointName; @Getter @Setter private int latency; @Getter @Setter private boolean status; @Getter @Setter private int responseCode; @Getter @Setter private RequestType type; }  The scope() method in source returns an ID, which is not a random value. This ID must be declared through the @ScopeDeclaration annotation too. The ID in @ScopeDeclaration and ID in scope() method must be the same for this source.\n  The String getEntityId() method in source requests the return value representing the unique entity to which the scope relates. For example, in this service scope, the ID is the service ID, which represents a particular service, like the Order service. This value is used in the OAL group mechanism.\n  @ScopeDefaultColumn.VirtualColumnDefinition and @ScopeDefaultColumn.DefinedByField are required. All declared fields (virtual/byField) will be pushed into a persistent entity, and maps to lists such as the ElasticSearch index and Database table column. For example, the entity ID and service ID for endpoint and service instance level scope are usually included. Take a reference from all existing scopes. All these fields are detected by OAL Runtime, and are required during query.\n  Add scope name as keyword to OAL grammar definition file, OALLexer.g4, which is at the antlr4 folder of the generate-tool-grammar module.\n  Add scope name as keyword to the parser definition file, OALParser.g4, which is located in the same folder as OALLexer.g4.\n   After finishing these steps, you could build a receiver, which do\n Obtain the original data of the metrics. Build the source, and send to SourceReceiver. Complete your OAL scripts. Repackage the project.  ","title":"Source and scope extension for new metrics","url":"/docs/main/v9.0.0/en/guides/source-extension/"},{"content":"Source and scope extension for new metrics From the OAL scope introduction, you should already have understood what a scope is. If you would like to create more extensions, you need to have a deeper understanding of what a source is.\nSource and scope are interrelated concepts. Scope declares the ID (int) and name, while source declares the attributes. Follow these steps to create a new source and sccope.\n In the OAP core module, it provides SourceReceiver internal services.  public interface SourceReceiver extends Service { void receive(Source source); } All data of the analysis must be a org.apache.skywalking.oap.server.core.source.Source sub class that is tagged by @SourceType annotation, and included in the org.apache.skywalking package. Then, it can be supported by the OAL script and OAP core.  Take the existing source service as an example.\n@ScopeDeclaration(id = SERVICE_INSTANCE, name = \u0026#34;ServiceInstance\u0026#34;, catalog = SERVICE_INSTANCE_CATALOG_NAME) @ScopeDefaultColumn.VirtualColumnDefinition(fieldName = \u0026#34;entityId\u0026#34;, columnName = \u0026#34;entity_id\u0026#34;, isID = true, type = String.class) public class ServiceInstance extends Source { @Override public int scope() { return DefaultScopeDefine.SERVICE_INSTANCE; } @Override public String getEntityId() { return String.valueOf(id); } @Getter @Setter private int id; @Getter @Setter @ScopeDefaultColumn.DefinedByField(columnName = \u0026#34;service_id\u0026#34;) private int serviceId; @Getter @Setter private String name; @Getter @Setter private String serviceName; @Getter @Setter private String endpointName; @Getter @Setter private int latency; @Getter @Setter private boolean status; @Getter @Setter private int responseCode; @Getter @Setter private RequestType type; }  The scope() method in source returns an ID, which is not a random value. This ID must be declared through the @ScopeDeclaration annotation too. The ID in @ScopeDeclaration and ID in scope() method must be the same for this source.\n  The String getEntityId() method in source requests the return value representing the unique entity to which the scope relates. For example, in this service scope, the ID is the service ID, which represents a particular service, like the Order service. This value is used in the OAL group mechanism.\n  @ScopeDefaultColumn.VirtualColumnDefinition and @ScopeDefaultColumn.DefinedByField are required. All declared fields (virtual/byField) will be pushed into a persistent entity, and maps to lists such as the ElasticSearch index and Database table column. For example, the entity ID and service ID for endpoint and service instance level scope are usually included. Take a reference from all existing scopes. All these fields are detected by OAL Runtime, and are required during query.\n  Add scope name as keyword to OAL grammar definition file, OALLexer.g4, which is at the antlr4 folder of the generate-tool-grammar module.\n  Add scope name as keyword to the parser definition file, OALParser.g4, which is located in the same folder as OALLexer.g4.\n   After finishing these steps, you could build a receiver, which do\n Obtain the original data of the metrics. Build the source, and send to SourceReceiver. Complete your OAL scripts. Repackage the project.  ","title":"Source and scope extension for new metrics","url":"/docs/main/v9.1.0/en/guides/source-extension/"},{"content":"Source and scope extension for new metrics From the OAL scope introduction, you should already have understood what a scope is. If you would like to create more extensions, you need to have a deeper understanding of what a source is.\nSource and scope are interrelated concepts. Scope declares the ID (int) and name, while source declares the attributes. Follow these steps to create a new source and sccope.\n In the OAP core module, it provides SourceReceiver internal services.  public interface SourceReceiver extends Service { void receive(Source source); } All data of the analysis must be a org.apache.skywalking.oap.server.core.source.Source sub class that is tagged by @SourceType annotation, and included in the org.apache.skywalking package. Then, it can be supported by the OAL script and OAP core.  Take the existing source service as an example.\n@ScopeDeclaration(id = SERVICE_INSTANCE, name = \u0026#34;ServiceInstance\u0026#34;, catalog = SERVICE_INSTANCE_CATALOG_NAME) @ScopeDefaultColumn.VirtualColumnDefinition(fieldName = \u0026#34;entityId\u0026#34;, columnName = \u0026#34;entity_id\u0026#34;, isID = true, type = String.class) public class ServiceInstance extends Source { @Override public int scope() { return DefaultScopeDefine.SERVICE_INSTANCE; } @Override public String getEntityId() { return String.valueOf(id); } @Getter @Setter private int id; @Getter @Setter @ScopeDefaultColumn.DefinedByField(columnName = \u0026#34;service_id\u0026#34;) private int serviceId; @Getter @Setter private String name; @Getter @Setter private String serviceName; @Getter @Setter private String endpointName; @Getter @Setter private int latency; @Getter @Setter private boolean status; @Getter @Setter private int responseCode; @Getter @Setter private RequestType type; }  The scope() method in source returns an ID, which is not a random value. This ID must be declared through the @ScopeDeclaration annotation too. The ID in @ScopeDeclaration and ID in scope() method must be the same for this source.\n  The String getEntityId() method in source requests the return value representing the unique entity to which the scope relates. For example, in this service scope, the ID is the service ID, which represents a particular service, like the Order service. This value is used in the OAL group mechanism.\n  @ScopeDefaultColumn.VirtualColumnDefinition and @ScopeDefaultColumn.DefinedByField are required. All declared fields (virtual/byField) will be pushed into a persistent entity, and maps to lists such as the ElasticSearch index and Database table column. For example, the entity ID and service ID for endpoint and service instance level scope are usually included. Take a reference from all existing scopes. All these fields are detected by OAL Runtime, and are required during query.\n  Add scope name as keyword to OAL grammar definition file, OALLexer.g4, which is at the antlr4 folder of the generate-tool-grammar module.\n  Add scope name as keyword to the parser definition file, OALParser.g4, which is located in the same folder as OALLexer.g4.\n   After finishing these steps, you could build a receiver, which do\n Obtain the original data of the metrics. Build the source, and send to SourceReceiver. Complete your OAL scripts. Repackage the project.  ","title":"Source and scope extension for new metrics","url":"/docs/main/v9.2.0/en/guides/source-extension/"},{"content":"Source and scope extension for new metrics From the OAL scope introduction, you should already have understood what a scope is. If you would like to create more extensions, you need to have a deeper understanding of what a source is.\nSource and scope are interrelated concepts. Scope declares the ID (int) and name, while source declares the attributes. Follow these steps to create a new source and sccope.\n In the OAP core module, it provides SourceReceiver internal services.  public interface SourceReceiver extends Service { void receive(Source source); } All data of the analysis must be a org.apache.skywalking.oap.server.core.source.Source sub class that is tagged by @SourceType annotation, and included in the org.apache.skywalking package. Then, it can be supported by the OAL script and OAP core.  Take the existing source service as an example.\n@ScopeDeclaration(id = SERVICE_INSTANCE, name = \u0026#34;ServiceInstance\u0026#34;, catalog = SERVICE_INSTANCE_CATALOG_NAME) @ScopeDefaultColumn.VirtualColumnDefinition(fieldName = \u0026#34;entityId\u0026#34;, columnName = \u0026#34;entity_id\u0026#34;, isID = true, type = String.class) public class ServiceInstance extends Source { @Override public int scope() { return DefaultScopeDefine.SERVICE_INSTANCE; } @Override public String getEntityId() { return String.valueOf(id); } @Getter @Setter private int id; @Getter @Setter @ScopeDefaultColumn.DefinedByField(columnName = \u0026#34;service_id\u0026#34;) private int serviceId; @Getter @Setter private String name; @Getter @Setter private String serviceName; @Getter @Setter private String endpointName; @Getter @Setter private int latency; @Getter @Setter private boolean status; @Getter @Setter private int responseCode; @Getter @Setter private RequestType type; }  The scope() method in source returns an ID, which is not a random value. This ID must be declared through the @ScopeDeclaration annotation too. The ID in @ScopeDeclaration and ID in scope() method must be the same for this source.\n  The String getEntityId() method in source requests the return value representing the unique entity to which the scope relates. For example, in this service scope, the ID is the service ID, which represents a particular service, like the Order service. This value is used in the OAL group mechanism.\n  @ScopeDefaultColumn.VirtualColumnDefinition and @ScopeDefaultColumn.DefinedByField are required. All declared fields (virtual/byField) will be pushed into a persistent entity, and maps to lists such as the ElasticSearch index and Database table column. For example, the entity ID and service ID for endpoint and service instance level scope are usually included. Take a reference from all existing scopes. All these fields are detected by OAL Runtime, and are required during query.\n  Add scope name as keyword to OAL grammar definition file, OALLexer.g4, which is at the antlr4 folder of the generate-tool-grammar module.\n  Add scope name as keyword to the parser definition file, OALParser.g4, which is located in the same folder as OALLexer.g4.\n   After finishing these steps, you could build a receiver, which do\n Obtain the original data of the metrics. Build the source, and send to SourceReceiver. Complete your OAL scripts. Repackage the project.  ","title":"Source and scope extension for new metrics","url":"/docs/main/v9.3.0/en/guides/source-extension/"},{"content":"Source and scope extension for new metrics From the OAL scope introduction, you should already have understood what a scope is. If you would like to create more extensions, you need to have a deeper understanding of what a source is.\nSource and scope are interrelated concepts. Scope declares the ID (int) and name, while source declares the attributes. Follow these steps to create a new source and sccope.\n In the OAP core module, it provides SourceReceiver internal services.  public interface SourceReceiver extends Service { void receive(Source source); } All data of the analysis must be a org.apache.skywalking.oap.server.core.source.Source sub class that is tagged by @SourceType annotation, and included in the org.apache.skywalking package. Then, it can be supported by the OAL script and OAP core.  Take the existing source service as an example.\n@ScopeDeclaration(id = SERVICE_INSTANCE, name = \u0026#34;ServiceInstance\u0026#34;, catalog = SERVICE_INSTANCE_CATALOG_NAME) @ScopeDefaultColumn.VirtualColumnDefinition(fieldName = \u0026#34;entityId\u0026#34;, columnName = \u0026#34;entity_id\u0026#34;, isID = true, type = String.class) public class ServiceInstance extends Source { @Override public int scope() { return DefaultScopeDefine.SERVICE_INSTANCE; } @Override public String getEntityId() { return String.valueOf(id); } @Getter @Setter private int id; @Getter @Setter @ScopeDefaultColumn.DefinedByField(columnName = \u0026#34;service_id\u0026#34;) private int serviceId; @Getter @Setter private String name; @Getter @Setter private String serviceName; @Getter @Setter private String endpointName; @Getter @Setter private int latency; @Getter @Setter private boolean status; @Getter @Setter private int responseCode; @Getter @Setter private RequestType type; }  The scope() method in source returns an ID, which is not a random value. This ID must be declared through the @ScopeDeclaration annotation too. The ID in @ScopeDeclaration and ID in scope() method must be the same for this source.\n  The String getEntityId() method in source requests the return value representing the unique entity to which the scope relates. For example, in this service scope, the ID is the service ID, which represents a particular service, like the Order service. This value is used in the OAL group mechanism.\n  @ScopeDefaultColumn.VirtualColumnDefinition and @ScopeDefaultColumn.DefinedByField are required. All declared fields (virtual/byField) will be pushed into a persistent entity, and maps to lists such as the ElasticSearch index and Database table column. For example, the entity ID and service ID for endpoint and service instance level scope are usually included. Take a reference from all existing scopes. All these fields are detected by OAL Runtime, and are required during query.\n  Add scope name as keyword to OAL grammar definition file, OALLexer.g4, which is at the antlr4 folder of the generate-tool-grammar module.\n  Add scope name as keyword to the parser definition file, OALParser.g4, which is located in the same folder as OALLexer.g4.\n   After finishing these steps, you could build a receiver, which do\n Obtain the original data of the metrics. Build the source, and send to SourceReceiver. Complete your OAL scripts. Repackage the project.  ","title":"Source and scope extension for new metrics","url":"/docs/main/v9.4.0/en/guides/source-extension/"},{"content":"Source and scope extension for new metrics From the OAL scope introduction, you should already have understood what a scope is. If you would like to create more extensions, you need to have a deeper understanding of what a source is.\nSource and scope are interrelated concepts. Scope declares the ID (int) and name, while source declares the attributes. Follow these steps to create a new source and sccope.\n In the OAP core module, it provides SourceReceiver internal services.  public interface SourceReceiver extends Service { void receive(Source source); } All data of the analysis must be a org.apache.skywalking.oap.server.core.source.Source sub class that is tagged by @SourceType annotation, and included in the org.apache.skywalking package. Then, it can be supported by the OAL script and OAP core.  Take the existing source service as an example.\n@ScopeDeclaration(id = SERVICE_INSTANCE, name = \u0026#34;ServiceInstance\u0026#34;, catalog = SERVICE_INSTANCE_CATALOG_NAME) @ScopeDefaultColumn.VirtualColumnDefinition(fieldName = \u0026#34;entityId\u0026#34;, columnName = \u0026#34;entity_id\u0026#34;, isID = true, type = String.class) public class ServiceInstance extends Source { @Override public int scope() { return DefaultScopeDefine.SERVICE_INSTANCE; } @Override public String getEntityId() { return String.valueOf(id); } @Getter @Setter private int id; @Getter @Setter @ScopeDefaultColumn.DefinedByField(columnName = \u0026#34;service_id\u0026#34;) private int serviceId; @Getter @Setter private String name; @Getter @Setter private String serviceName; @Getter @Setter private String endpointName; @Getter @Setter private int latency; @Getter @Setter private boolean status; @Getter @Setter private int responseCode; @Getter @Setter private RequestType type; }  The scope() method in source returns an ID, which is not a random value. This ID must be declared through the @ScopeDeclaration annotation too. The ID in @ScopeDeclaration and ID in scope() method must be the same for this source.\n  The String getEntityId() method in source requests the return value representing the unique entity to which the scope relates. For example, in this service scope, the ID is the service ID, which represents a particular service, like the Order service. This value is used in the OAL group mechanism.\n  @ScopeDefaultColumn.VirtualColumnDefinition and @ScopeDefaultColumn.DefinedByField are required. All declared fields (virtual/byField) will be pushed into a persistent entity, and maps to lists such as the ElasticSearch index and Database table column. For example, the entity ID and service ID for endpoint and service instance level scope are usually included. Take a reference from all existing scopes. All these fields are detected by OAL Runtime, and are required during query.\n  Add scope name as keyword to OAL grammar definition file, OALLexer.g4, which is at the antlr4 folder of the generate-tool-grammar module.\n  Add scope name as keyword to the parser definition file, OALParser.g4, which is located in the same folder as OALLexer.g4.\n   After finishing these steps, you could build a receiver, which do\n Obtain the original data of the metrics. Build the source, and send to SourceReceiver. Complete your OAL scripts. Repackage the project.  ","title":"Source and scope extension for new metrics","url":"/docs/main/v9.5.0/en/guides/source-extension/"},{"content":"Source and scope extension for new metrics From the OAL scope introduction, you should already have understood what a scope is. If you would like to create more extensions, you need to have a deeper understanding of what a source is.\nSource and scope are interrelated concepts. Scope declares the ID (int) and name, while source declares the attributes. Follow these steps to create a new source and sccope.\n In the OAP core module, it provides SourceReceiver internal services.  public interface SourceReceiver extends Service { void receive(Source source); } All data of the analysis must be a org.apache.skywalking.oap.server.core.source.Source sub class that is tagged by @SourceType annotation, and included in the org.apache.skywalking package. Then, it can be supported by the OAL script and OAP core.  Take the existing source service as an example.\n@ScopeDeclaration(id = SERVICE_INSTANCE, name = \u0026#34;ServiceInstance\u0026#34;, catalog = SERVICE_INSTANCE_CATALOG_NAME) @ScopeDefaultColumn.VirtualColumnDefinition(fieldName = \u0026#34;entityId\u0026#34;, columnName = \u0026#34;entity_id\u0026#34;, isID = true, type = String.class) public class ServiceInstance extends Source { @Override public int scope() { return DefaultScopeDefine.SERVICE_INSTANCE; } @Override public String getEntityId() { return String.valueOf(id); } @Getter @Setter private int id; @Getter @Setter @ScopeDefaultColumn.DefinedByField(columnName = \u0026#34;service_id\u0026#34;) private int serviceId; @Getter @Setter private String name; @Getter @Setter private String serviceName; @Getter @Setter private String endpointName; @Getter @Setter private int latency; @Getter @Setter private boolean status; @Getter @Setter private int responseCode; @Getter @Setter private RequestType type; }  The scope() method in source returns an ID, which is not a random value. This ID must be declared through the @ScopeDeclaration annotation too. The ID in @ScopeDeclaration and ID in scope() method must be the same for this source.\n  The String getEntityId() method in source requests the return value representing the unique entity to which the scope relates. For example, in this service scope, the ID is the service ID, which represents a particular service, like the Order service. This value is used in the OAL group mechanism.\n  @ScopeDefaultColumn.VirtualColumnDefinition and @ScopeDefaultColumn.DefinedByField are required. All declared fields (virtual/byField) will be pushed into a persistent entity, and maps to lists such as the ElasticSearch index and Database table column. For example, the entity ID and service ID for endpoint and service instance level scope are usually included. Take a reference from all existing scopes. All these fields are detected by OAL Runtime, and are required during query.\n  Add scope name as keyword to OAL grammar definition file, OALLexer.g4, which is at the antlr4 folder of the generate-tool-grammar module.\n  Add scope name as keyword to the parser definition file, OALParser.g4, which is located in the same folder as OALLexer.g4.\n   After finishing these steps, you could build a receiver, which do\n Obtain the original data of the metrics. Build the source, and send to SourceReceiver. Complete your OAL scripts. Repackage the project.  ","title":"Source and scope extension for new metrics","url":"/docs/main/v9.6.0/en/guides/source-extension/"},{"content":"Source and scope extension for new metrics From the OAL scope introduction, you should already have understood what a scope is. If you would like to create more extensions, you need to have a deeper understanding of what a source is.\nSource and scope are interrelated concepts. Scope declares the ID (int) and name, while source declares the attributes. Follow these steps to create a new source and sccope.\n In the OAP core module, it provides SourceReceiver internal services.  public interface SourceReceiver extends Service { void receive(Source source); } All data of the analysis must be a org.apache.skywalking.oap.server.core.source.Source sub class that is tagged by @SourceType annotation, and included in the org.apache.skywalking package. Then, it can be supported by the OAL script and OAP core.  Take the existing source service as an example.\n@ScopeDeclaration(id = SERVICE_INSTANCE, name = \u0026#34;ServiceInstance\u0026#34;, catalog = SERVICE_INSTANCE_CATALOG_NAME) @ScopeDefaultColumn.VirtualColumnDefinition(fieldName = \u0026#34;entityId\u0026#34;, columnName = \u0026#34;entity_id\u0026#34;, isID = true, type = String.class) public class ServiceInstance extends Source { @Override public int scope() { return DefaultScopeDefine.SERVICE_INSTANCE; } @Override public String getEntityId() { return String.valueOf(id); } @Getter @Setter private int id; @Getter @Setter @ScopeDefaultColumn.DefinedByField(columnName = \u0026#34;service_id\u0026#34;) private int serviceId; @Getter @Setter private String name; @Getter @Setter private String serviceName; @Getter @Setter private String endpointName; @Getter @Setter private int latency; @Getter @Setter private boolean status; @Getter @Setter private int responseCode; @Getter @Setter private RequestType type; }  The scope() method in source returns an ID, which is not a random value. This ID must be declared through the @ScopeDeclaration annotation too. The ID in @ScopeDeclaration and ID in scope() method must be the same for this source.\n  The String getEntityId() method in source requests the return value representing the unique entity to which the scope relates. For example, in this service scope, the ID is the service ID, which represents a particular service, like the Order service. This value is used in the OAL group mechanism.\n  @ScopeDefaultColumn.VirtualColumnDefinition and @ScopeDefaultColumn.DefinedByField are required. All declared fields (virtual/byField) will be pushed into a persistent entity, and maps to lists such as the ElasticSearch index and Database table column. For example, the entity ID and service ID for endpoint and service instance level scope are usually included. Take a reference from all existing scopes. All these fields are detected by OAL Runtime, and are required during query.\n  Add scope name as keyword to OAL grammar definition file, OALLexer.g4, which is at the antlr4 folder of the generate-tool-grammar module.\n  Add scope name as keyword to the parser definition file, OALParser.g4, which is located in the same folder as OALLexer.g4.\n   After finishing these steps, you could build a receiver, which do\n Obtain the original data of the metrics. Build the source, and send to SourceReceiver. Complete your OAL scripts. Repackage the project.  ","title":"Source and scope extension for new metrics","url":"/docs/main/v9.7.0/en/guides/source-extension/"},{"content":"Spring annotation plugin This plugin allows to trace all methods of beans in Spring context, which are annotated with @Bean, @Service, @Component and @Repository.\n Why does this plugin optional?  Tracing all methods in Spring context all creates a lot of spans, which also spend more CPU, memory and network. Of course you want to have spans as many as possible, but please make sure your system payload can support these.\n","title":"Spring annotation plugin","url":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/agent-optional-plugins/spring-annotation-plugin/"},{"content":"Spring annotation plugin This plugin allows to trace all methods of beans in Spring context, which are annotated with @Bean, @Service, @Component and @Repository.\n Why does this plugin optional?  Tracing all methods in Spring context all creates a lot of spans, which also spend more CPU, memory and network. Of course you want to have spans as many as possible, but please make sure your system payload can support these.\n","title":"Spring annotation plugin","url":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/agent-optional-plugins/spring-annotation-plugin/"},{"content":"Spring annotation plugin This plugin allows to trace all methods of beans in Spring context, which are annotated with @Bean, @Service, @Component and @Repository.\n Why does this plugin optional?  Tracing all methods in Spring context all creates a lot of spans, which also spend more CPU, memory and network. Of course you want to have spans as many as possible, but please make sure your system payload can support these.\n","title":"Spring annotation plugin","url":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/agent-optional-plugins/spring-annotation-plugin/"},{"content":"Spring annotation plugin This plugin allows to trace all methods of beans in Spring context, which are annotated with @Bean, @Service, @Component and @Repository.\n Why does this plugin optional?  Tracing all methods in Spring context all creates a lot of spans, which also spend more CPU, memory and network. Of course you want to have spans as many as possible, but please make sure your system payload can support these.\n","title":"Spring annotation plugin","url":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/agent-optional-plugins/spring-annotation-plugin/"},{"content":"Spring annotation plugin This plugin allows to trace all methods of beans in Spring context, which are annotated with @Bean, @Service, @Component and @Repository.\n Why does this plugin optional?  Tracing all methods in Spring context all creates a lot of spans, which also spend more CPU, memory and network. Of course you want to have spans as many as possible, but please make sure your system payload can support these.\n","title":"Spring annotation plugin","url":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/agent-optional-plugins/spring-annotation-plugin/"},{"content":"Spring sleuth setup Spring Sleuth provides Spring Boot auto-configuration for distributed tracing. Skywalking integrates its micrometer so that it can send metrics to the Skywalking Meter System.\nSet up agent  Add micrometer and Skywalking meter registry dependency into the project\u0026rsquo;s pom.xml file. You can find more details at Toolkit micrometer.  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.springframework.boot\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;spring-boot-starter-actuator\u0026lt;/artifactId\u0026gt; \u0026lt;/dependency\u0026gt; \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-micrometer-registry\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Create Skywalking meter registry in spring bean management.  @Bean SkywalkingMeterRegistry skywalkingMeterRegistry() { // Add rate configs If you need, otherwise using none args construct  SkywalkingConfig config = new SkywalkingConfig(Arrays.asList(\u0026#34;\u0026#34;)); return new SkywalkingMeterRegistry(config); } Set up backend receiver  Make sure to enable meter receiver in application.yml.  receiver-meter:selector:${SW_RECEIVER_METER:default}default: Configure the meter config file. It already has the spring sleuth meter config. If you have a customized meter at the agent side, please configure the meter using the steps set out in the meter document.\n  Enable Spring sleuth config in application.yml.\n  agent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:meterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:spring-sleuth}Add UI dashboard   Open the dashboard view. Click edit button to edit the templates.\n  Create a new template. Template type: Standard -\u0026gt; Template Configuration: Spring -\u0026gt; Input the Template Name.\n  Click view button. You\u0026rsquo;ll see the spring sleuth dashboard.\n  Supported meter Three types of information are supported: Application, System, and JVM.\n Application: HTTP request count and duration, JDBC max/idle/active connection count, and Tomcat session active/reject count. System: CPU system/process usage, OS system load, and OS process file count. JVM: GC pause count and duration, memory max/used/committed size, thread peak/live/daemon count, and classes loaded/unloaded count.  ","title":"Spring sleuth setup","url":"/docs/main/v9.0.0/en/setup/backend/spring-sleuth-setup/"},{"content":"Spring sleuth setup Spring Sleuth provides Spring Boot auto-configuration for distributed tracing. Skywalking integrates its micrometer so that it can send metrics to the Skywalking Meter System.\nSet up agent  Add micrometer and Skywalking meter registry dependency into the project\u0026rsquo;s pom.xml file. You can find more details at Toolkit micrometer.  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.springframework.boot\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;spring-boot-starter-actuator\u0026lt;/artifactId\u0026gt; \u0026lt;/dependency\u0026gt; \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-micrometer-registry\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Create Skywalking meter registry in spring bean management.  @Bean SkywalkingMeterRegistry skywalkingMeterRegistry() { // Add rate configs If you need, otherwise using none args construct  SkywalkingConfig config = new SkywalkingConfig(Arrays.asList(\u0026#34;\u0026#34;)); return new SkywalkingMeterRegistry(config); } Set up backend receiver  Make sure to enable meter receiver in application.yml.  receiver-meter:selector:${SW_RECEIVER_METER:default}default: Configure the meter config file. It already has the spring sleuth meter config. If you have a customized meter at the agent side, please configure the meter using the steps set out in the meter document.\n  Enable Spring sleuth config in application.yml.\n  agent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:meterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:spring-sleuth}Add UI dashboard   Open the dashboard view. Click edit button to edit the templates.\n  Create a new template. Template type: Standard -\u0026gt; Template Configuration: Spring -\u0026gt; Input the Template Name.\n  Click view button. You\u0026rsquo;ll see the spring sleuth dashboard.\n  Supported meter Three types of information are supported: Application, System, and JVM.\n Application: HTTP request count and duration, JDBC max/idle/active connection count, and Tomcat session active/reject count. System: CPU system/process usage, OS system load, and OS process file count. JVM: GC pause count and duration, memory max/used/committed size, thread peak/live/daemon count, and classes loaded/unloaded count.  ","title":"Spring sleuth setup","url":"/docs/main/v9.1.0/en/setup/backend/spring-sleuth-setup/"},{"content":"Spring sleuth setup Spring Sleuth provides Spring Boot auto-configuration for distributed tracing. Skywalking integrates its micrometer so that it can send metrics to the Skywalking Meter System.\nSet up agent  Add micrometer and Skywalking meter registry dependency into the project\u0026rsquo;s pom.xml file. You can find more details at Toolkit micrometer.  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.springframework.boot\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;spring-boot-starter-actuator\u0026lt;/artifactId\u0026gt; \u0026lt;/dependency\u0026gt; \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-micrometer-registry\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Create Skywalking meter registry in spring bean management.  @Bean SkywalkingMeterRegistry skywalkingMeterRegistry() { // Add rate configs If you need, otherwise using none args construct  SkywalkingConfig config = new SkywalkingConfig(Arrays.asList(\u0026#34;\u0026#34;)); return new SkywalkingMeterRegistry(config); } Set up backend receiver  Make sure to enable meter receiver in application.yml.  receiver-meter:selector:${SW_RECEIVER_METER:default}default: Configure the meter config file. It already has the spring sleuth meter config. If you have a customized meter at the agent side, please configure the meter using the steps set out in the meter document.\n  Enable Spring sleuth config in application.yml.\n  agent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:meterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:spring-sleuth}Dashboard configuration SkyWalking provides the Spring Sleuth dashboard by default under the general service instance, which contains the metrics provided by Spring Sleuth by default. Once you have added customized metrics in the application and configuration the meter config file in the backend. Please following the customized dashboard documentation to add the metrics in the dashboard.\nSupported meter Three types of information are supported: Application, System, and JVM.\n Application: HTTP request count and duration, JDBC max/idle/active connection count, and Tomcat session active/reject count. System: CPU system/process usage, OS system load, and OS process file count. JVM: GC pause count and duration, memory max/used/committed size, thread peak/live/daemon count, and classes loaded/unloaded count.  ","title":"Spring sleuth setup","url":"/docs/main/v9.2.0/en/setup/backend/spring-sleuth-setup/"},{"content":"Spring sleuth setup Spring Sleuth provides Spring Boot auto-configuration for distributed tracing. Skywalking integrates its micrometer so that it can send metrics to the Skywalking Meter System.\nSet up agent  Add micrometer and Skywalking meter registry dependency into the project\u0026rsquo;s pom.xml file. You can find more details at Toolkit micrometer.  \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.springframework.boot\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;spring-boot-starter-actuator\u0026lt;/artifactId\u0026gt; \u0026lt;/dependency\u0026gt; \u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-micrometer-registry\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; Create Skywalking meter registry in spring bean management.  @Bean SkywalkingMeterRegistry skywalkingMeterRegistry() { // Add rate configs If you need, otherwise using none args construct  SkywalkingConfig config = new SkywalkingConfig(Arrays.asList(\u0026#34;\u0026#34;)); return new SkywalkingMeterRegistry(config); } Set up backend receiver  Make sure to enable meter receiver in application.yml.  receiver-meter:selector:${SW_RECEIVER_METER:default}default: Configure the meter config file. It already has the spring sleuth meter config. If you have a customized meter at the agent side, please configure the meter using the steps set out in the meter document.\n  Enable Spring sleuth config in application.yml.\n  agent-analyzer:selector:${SW_AGENT_ANALYZER:default}default:meterAnalyzerActiveFiles:${SW_METER_ANALYZER_ACTIVE_FILES:spring-sleuth}Dashboard configuration SkyWalking provides the Spring Sleuth dashboard by default under the general service instance, which contains the metrics provided by Spring Sleuth by default. Once you have added customized metrics in the application and configuration the meter config file in the backend. Please following the customized dashboard documentation to add the metrics in the dashboard.\nSupported meter Three types of information are supported: Application, System, and JVM.\n Application: HTTP request count and duration, JDBC max/idle/active connection count, and Tomcat session active/reject count. System: CPU system/process usage, OS system load, and OS process file count. JVM: GC pause count and duration, memory max/used/committed size, thread peak/live/daemon count, and classes loaded/unloaded count.  ","title":"Spring sleuth setup","url":"/docs/main/v9.3.0/en/setup/backend/spring-sleuth-setup/"},{"content":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System  Sheng Wu 吴 晟 wusheng@apache.org  Editor\u0026rsquo;s note This paper was written by Sheng Wu, project founder, in 2017, to describe the fundamental theory of all current agent core concepts. Readers could learn why SkyWalking agents are significantly different from other tracing system and Dapper[1] Paper\u0026rsquo;s description.\nAbstract Monitoring, visualizing and troubleshooting a large-scale distributed system is a major challenge. One common tool used today is the distributed tracing system (e.g., Google Dapper)[1], and detecting topology and metrics based on the tracing data. One big limitation of today’s topology detection is that the analysis depends on aggregating the client-side and server-side tracing spans in a given time window to generate the dependency of services. This causes more latency and memory use, because the client and server spans of every RPC must be matched in millions of randomly occurring requests in a highly distributed system. More importantly, it could fail to match if the duration of RPC between client and server is longer than the prior setup time window, or across the two windows.\nIn this paper, we present the STAM, Streaming Topology Analysis Method. In STAM, we could use auto instrumentation or a manual instrumentation mechanism to intercept and manipulate RPC at both client-side and server-side. In the case of auto instrumentation, STAM manipulates application codes at runtime, such as Java agent. As such, this monitoring system doesn’t require any source code changes from the application development team or RPC framework development team. The STAM injects an RPC network address used at client side, a service name and a service instance name into the RPC context, and binds the server-side service name and service instance name as the alias name for this network address used at the client side. Freeing the dependency analysis from the mechanisms that cause blocking and delay, the analysis core can process the monitoring data in stream mode and generate the accurate topology.\nThe STAM has been implemented in the Apache SkyWalking[2], an open source APM (application performance monitoring system) project of the Apache Software Foundation, which is widely used in many big enterprises[3] including Alibaba, Huawei, Tencent, Didi, Xiaomi, China Mobile and other enterprises (airlines, financial institutions and others) to support their large-scale distributed systems in the production environment. It reduces the load and memory cost significantly, with better horizontal scale capability.\nIntroduction Monitoring the highly distributed system, especially with a micro-service architecture, is very complex. Many RPCs, including HTTP, gRPC, MQ, Cache, and Database accesses, are behind a single client-side request. Allowing the IT team to understand the dependency relationships among thousands of services is the key feature and first step for observability of a whole distributed system. A distributed tracing system is capable of collecting traces, including all distributed request paths. Dependency relationships have been logically included in the trace data. A distributed tracing system, such as Zipkin [4] or Jaeger Tracing [10], provides built-in dependency analysis features, but many analysis features build on top of that. There are at least two fundamental limitations: timeliness and consistent accuracy.\nStrong timeliness is required to match the mutability of distributed application system dependency relationship, including service level and service instance level dependency.\nA Service is a logic group of instances which have the same functions or codes.\nA Service Instance is usually an OS level process, such as a JVM process. The relationships between services and instances are mutable, depending on the configuration, codes and network status. The dependency could change over time.\n Figure 1, Generated spans in traditional Dapper based tracing system. The span model in the Dapper paper and existing tracing systems,such as Zipkin instrumenting mode[9], just propagates the span id to the server side. Due to this model, dependency analysis requires a certain time window. The tracing spans are collected at both client- and server-sides, because the relationship is recorded. Due to that, the analysis process has to wait for the client and server spans to match in the same time window, in order to output the result, Service A depending on Service B. So, this time window must be over the duration of this RPC request; otherwise, the conclusion will be lost. This condition makes the analysis would not react the dependency mutation in second level, in production, it sometimes has to set the window duration in 3-5 mins. Also, because of the Windows-based design, if one side involves a long duration task, it can’t easily achieve consistent accuracy. Because in order to make the analysis as fast as possible, the analysis period is less than 5 minutes. But some spans can’t match its parent or children if the analysis is incomplete or crosses two time windows. Even if we added a mechanism to process the spans left in the previous stages, still some would have to be abandoned to keep the dataset size and memory usage reasonable.\nIn the STAM, we introduce a new span and context propagation models, with the new analysis method. These new models add the peer network address (IP or hostname) used at client side, client service instance name and client service name, into the context propagation model. Then it passes the RPC call from client to server, just as the original trace id and span id in the existing tracing system, and collects it in the server-side span. The new analysis method can easily generate the client-server relationship directly without waiting on the client span. It also sets the peer network address as one alias of the server service. After the across cluster node data sync, the client-side span analysis could use this alias metadata to generate the client-server relationship directly too. By using these new models and method in Apache SkyWalking, we remove the time windows-based analysis permanently, and fully use the streaming analysis mode with less than 5 seconds latency and consistent accuracy\nNew Span Model and Context Model The traditional span of a tracing system includes the following fields [1][6][10].\n A trace id to represent the whole trace. A span id to represent the current span. An operation name to describe what operation this span did. A start timestamp. A finish timestamp Service and Service Instance names of current span. A set of zero or more key:value Span Tags. A set of zero or more Span Logs, each of which is itself a key:value map paired with a timestamp. References to zero or more causally related Spans. Reference includes the parent span id and trace id.  In the new span model of STAM we add the following fields in the span.\nSpan type. Enumeration, including exit, local and entry. Entry and Exit spans are used in a networking related library. Entry spans represent a server-side networking library, such as Apache Tomcat[7]. Exit spans represent the client-side networking library, such as Apache HttpComponents [8].\nPeer Network Address. Remote \u0026ldquo;address,\u0026rdquo; suitable for use in exit and entry spans. In Exit spans, the peer network address is the address by the client library to access the server.\nThese fields usually are optionally included in many tracing system,. But in STAM, we require them in all RPC cases.\nContext Model is used to propagate the client-side information to server-side carried by the original RPC call, usually in the header, such as HTTP header or MQ header. In the old design, it carries the trace id and span id of client-side span. In the STAM, we enhance this model, adding the parent service name, parent service instance name and peer of exit span. The names could be literal strings. All these extra fields will help to remove the block of streaming analysis. Compared to the existing context model, this uses a little more bandwidth, but it could be optimized. In Apache SkyWalking, we design a register mechanism to exchange unique IDs to represent these names. As a result, only 3 integers are added in the RPC context, so the increase of bandwidth is at least less than 1% in the production environment.\nThe changes of two models could eliminate the time windows in the analysis process. Server-side span analysis enhances the context aware capability.\nNew Topology Analysis Method The new topology analysis method at the core of STAM is processing the span in stream mode. The analysis of the server-side span, also named entry span, includes the parent service name, parent service instance name and peer of exit span. So the analysis process could establish the following results.\n Set the peer of exit span as client using alias name of current service and instance. Peer network address \u0026lt;-\u0026gt; service name and peer network address \u0026lt;-\u0026gt; Service instance name aliases created. These two will sync with all analysis nodes and persistent in the storage, allowing more analysis processers to have this alias information. Generate relationships of parent service name -\u0026gt; current service name and parent service instance name -\u0026gt; current service instance name, unless there is another different Peer network address \u0026lt;-\u0026gt; Service Instance Name mapping found. In that case, only generate relationships of peer network address \u0026lt;-\u0026gt; service name and peer network address \u0026lt;-\u0026gt; Service instance name.  For analysis of the client-side span (exit span), there could three possibilities.\n The peer in the exit span already has the alias names established by server-side span analysis from step (1). Then use alias names to replace the peer, and generate traffic of current service name -\u0026gt; alias service name and current service instance name -\u0026gt; alias service instance name. If the alias could not be found, then just simply generate traffic for current service name -\u0026gt; peer and current service instance name -\u0026gt; peer. If multiple alias names of peer network address \u0026lt;-\u0026gt; Service Instance Name could be found, then keep generating traffic for current service name -\u0026gt; peer network address and current service instance name -\u0026gt; peer network address.   Figure 2, Apache SkyWalking uses STAM to detect and visualize the topology of distributed systems. Evaluation In this section, we evaluate the new models and analysis method in the context of several typical cases in which the old method loses timeliness and consistent accuracy.\n 1.New Service Online or Auto Scale Out  New services could be added into the whole topology by the developer team randomly, or container operation platform automatically by some scale out policy, like Kubernetes [5]. The monitoring system could not be notified in any case manually. By using STAM, we could detect the new node automatically and also keep the analysis process unblocked and consistent with detected nodes. In this case, a new service and network address (could be IP, port or both) are used. The peer network address \u0026lt;-\u0026gt; service mapping does not exist, the traffic of client service -\u0026gt; peer network address will be generated and persistent in the storage first. After mapping is generated, further traffic of client-service to server-service could be identified, generated and aggregated in the analysis platform. For filling the gap of a few traffic before the mapping generated, we require doing peer network address \u0026lt;-\u0026gt; service mapping translation again in query stage, to merge client service-\u0026gt;peer network address and client-service to server-service. In production, the amount of VM for the whole SkyWalking analysis platform deployment is less than 100, syncing among them will finish less than 10 seconds, in most cases it only takes 3-5 seconds. And in the query stage, the data has been aggregated in minutes or seconds at least. The query merge performance is not related to how much traffic happens before the mapping generated, only affected by sync duration, in here, only 3 seconds. Due to that, in minute level aggregation topology, it only adds 1 or 2 relationship records in the whole topology relationship dataset. Considering an over 100 services topology having over 500 relationship records per minute, the payload increase for this query merge is very limited and affordable. This feature is significant in a large and high load distributed system, as we don’t need to concern its scaling capability. And in some fork versions, they choose to update the existing client service-\u0026gt;peer network address to client-service to server-service after detecting the new mapping for peer generated, in order to remove the extra load at query stage permanently.\n Figure 3, Span analysis by using the new topology analysis method  2.Existing Uninstrumented Nodes  Every topology detection method has to work in this case. In many cases, there are nodes in the production environment that can’t be instrumented. Causes for this might include:(1) Restriction of the technology. In some golang or C++ written applications, there is no easy way in Java or .Net to do auto instrumentation by the agent. So, the codes may not be instrumented automatically. (2) The middleware, such as MQ, database server, has not adopted the tracing system. This would make it difficult or time consuming to implement the middleware instrumentation. (3) A 3rd party service or cloud service doesn’t support work with the current tracing system. (4) Lack of resources: e.g., the developer or operation team lacks time to make the instrumentation ready.\nThe STAM works well even if the client or server side has no instrumentation. It still keeps the topology as accurate as possible.\nIf the client side hasn’t instrumented, the server-side span wouldn’t get any reference through RPC context, so, it would simply use peer to generate traffic, as shown in Figure 4.\n Figure 4, STAM traffic generation when no client-side instrumentation As shown in Figure 5, in the other case, with no server-side instrumentation, the client span analysis doesn’t need to process this case. The STAM analysis core just simply keeps generating client service-\u0026gt;peer network address traffic. As there is no mapping for peer network address generated, there is no merging.\n Figure 5, STAM traffic generation when no server-side instrumentation  3.Uninstrumented Node Having Header Forward Capability  Besides the cases we evaluated in (2) Uninstrumented Nodes, there is one complex and special case: the instrumented node has the capability to propagate the header from downstream to upstream, typically in all proxy, such as Envoy[11], Nginx[12], Spring Cloud Gateway[13]. As proxy, it has the capability to forward all headers from downstream to upstream to keep some of information in the header, including the tracing context, authentication, browser information, and routing information, in order to make them accessible by the business services behind the proxy, like Envoy route configuration. When some proxy can’t be instrumented, no matter what the reason, it should not affect the topology detection.\nIn this case, the proxy address would be used at the client side and propagate through RPC context as peer network address, and the proxy forwards this to different upstream services. Then STAM could detect this case and generate the proxy as a conjectural node. In the STAM, more than one alias names for this network address should be generated. After those two are detected and synchronized to the analysis node, the analysis core knows there is at least one uninstrumented service standing between client and servers. So, it will generate the relationships of client service-\u0026gt;peer network address, peer-\u0026gt;server service B and peer network address -\u0026gt;server service C, as shown in Figure 6.\n Figure 6, STAM traffic generation when the proxy uninstrumentation Conclusion This paper described the STAM, which is to the best of our knowledge the best topology detection method for distributed tracing systems. It replaces the time-window based topology analysis method for tracing-based monitoring systems. It removes the resource cost of disk and memory for time-window baseds analysis permanently and totally, and the barriers of horizontal scale. One STAM implementation, Apache SkyWalking, is widely used for monitoring hundreds of applications in production. Some of them generated over 100 TB tracing data per day and topology for over 200 services in real time.\nAcknowledgments We thank all contributors of Apache SkyWalking project for suggestions, code contributions to implement the STAM, and feedback from using the STAM and SkyWalking in their production environment.\nLicense This paper and the STAM are licensed in the Apache 2.0.\nReferences  Dapper, a Large-Scale Distributed Systems Tracing Infrastructure, https://research.google.com/pubs/pub36356.html?spm=5176.100239.blogcont60165.11.OXME9Z Apache SkyWalking, http://skywalking.apache.org/ Apache Open Users, https://skywalking.apache.org/users/ Zipkin, https://zipkin.io/ Kubernetes, Production-Grade Container Orchestration. Automated container deployment, scaling, and management. https://kubernetes.io/ OpenTracing Specification https://github.com/opentracing/specification/blob/master/specification.md Apache Tomcat, http://tomcat.apache.org/ Apache HttpComponents, https://hc.apache.org/ Zipkin doc, ‘Instrumenting a library’ section, ‘Communicating trace information’ paragraph. https://zipkin.io/pages/instrumenting Jaeger Tracing, https://jaegertracing.io/ Envoy Proxy, http://envoyproxy.io/ Nginx, http://nginx.org/ Spring Cloud Gateway, https://spring.io/projects/spring-cloud-gateway  ","title":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System","url":"/docs/main/latest/en/papers/stam/"},{"content":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System  Sheng Wu 吴 晟 wusheng@apache.org  Editor\u0026rsquo;s note This paper was written by Sheng Wu, project founder, in 2017, to describe the fundamental theory of all current agent core concepts. Readers could learn why SkyWalking agents are significantly different from other tracing system and Dapper[1] Paper\u0026rsquo;s description.\nAbstract Monitoring, visualizing and troubleshooting a large-scale distributed system is a major challenge. One common tool used today is the distributed tracing system (e.g., Google Dapper)[1], and detecting topology and metrics based on the tracing data. One big limitation of today’s topology detection is that the analysis depends on aggregating the client-side and server-side tracing spans in a given time window to generate the dependency of services. This causes more latency and memory use, because the client and server spans of every RPC must be matched in millions of randomly occurring requests in a highly distributed system. More importantly, it could fail to match if the duration of RPC between client and server is longer than the prior setup time window, or across the two windows.\nIn this paper, we present the STAM, Streaming Topology Analysis Method. In STAM, we could use auto instrumentation or a manual instrumentation mechanism to intercept and manipulate RPC at both client-side and server-side. In the case of auto instrumentation, STAM manipulates application codes at runtime, such as Java agent. As such, this monitoring system doesn’t require any source code changes from the application development team or RPC framework development team. The STAM injects an RPC network address used at client side, a service name and a service instance name into the RPC context, and binds the server-side service name and service instance name as the alias name for this network address used at the client side. Freeing the dependency analysis from the mechanisms that cause blocking and delay, the analysis core can process the monitoring data in stream mode and generate the accurate topology.\nThe STAM has been implemented in the Apache SkyWalking[2], an open source APM (application performance monitoring system) project of the Apache Software Foundation, which is widely used in many big enterprises[3] including Alibaba, Huawei, Tencent, Didi, Xiaomi, China Mobile and other enterprises (airlines, financial institutions and others) to support their large-scale distributed systems in the production environment. It reduces the load and memory cost significantly, with better horizontal scale capability.\nIntroduction Monitoring the highly distributed system, especially with a micro-service architecture, is very complex. Many RPCs, including HTTP, gRPC, MQ, Cache, and Database accesses, are behind a single client-side request. Allowing the IT team to understand the dependency relationships among thousands of services is the key feature and first step for observability of a whole distributed system. A distributed tracing system is capable of collecting traces, including all distributed request paths. Dependency relationships have been logically included in the trace data. A distributed tracing system, such as Zipkin [4] or Jaeger Tracing [10], provides built-in dependency analysis features, but many analysis features build on top of that. There are at least two fundamental limitations: timeliness and consistent accuracy.\nStrong timeliness is required to match the mutability of distributed application system dependency relationship, including service level and service instance level dependency.\nA Service is a logic group of instances which have the same functions or codes.\nA Service Instance is usually an OS level process, such as a JVM process. The relationships between services and instances are mutable, depending on the configuration, codes and network status. The dependency could change over time.\n Figure 1, Generated spans in traditional Dapper based tracing system. The span model in the Dapper paper and existing tracing systems,such as Zipkin instrumenting mode[9], just propagates the span id to the server side. Due to this model, dependency analysis requires a certain time window. The tracing spans are collected at both client- and server-sides, because the relationship is recorded. Due to that, the analysis process has to wait for the client and server spans to match in the same time window, in order to output the result, Service A depending on Service B. So, this time window must be over the duration of this RPC request; otherwise, the conclusion will be lost. This condition makes the analysis would not react the dependency mutation in second level, in production, it sometimes has to set the window duration in 3-5 mins. Also, because of the Windows-based design, if one side involves a long duration task, it can’t easily achieve consistent accuracy. Because in order to make the analysis as fast as possible, the analysis period is less than 5 minutes. But some spans can’t match its parent or children if the analysis is incomplete or crosses two time windows. Even if we added a mechanism to process the spans left in the previous stages, still some would have to be abandoned to keep the dataset size and memory usage reasonable.\nIn the STAM, we introduce a new span and context propagation models, with the new analysis method. These new models add the peer network address (IP or hostname) used at client side, client service instance name and client service name, into the context propagation model. Then it passes the RPC call from client to server, just as the original trace id and span id in the existing tracing system, and collects it in the server-side span. The new analysis method can easily generate the client-server relationship directly without waiting on the client span. It also sets the peer network address as one alias of the server service. After the across cluster node data sync, the client-side span analysis could use this alias metadata to generate the client-server relationship directly too. By using these new models and method in Apache SkyWalking, we remove the time windows-based analysis permanently, and fully use the streaming analysis mode with less than 5 seconds latency and consistent accuracy\nNew Span Model and Context Model The traditional span of a tracing system includes the following fields [1][6][10].\n A trace id to represent the whole trace. A span id to represent the current span. An operation name to describe what operation this span did. A start timestamp. A finish timestamp Service and Service Instance names of current span. A set of zero or more key:value Span Tags. A set of zero or more Span Logs, each of which is itself a key:value map paired with a timestamp. References to zero or more causally related Spans. Reference includes the parent span id and trace id.  In the new span model of STAM we add the following fields in the span.\nSpan type. Enumeration, including exit, local and entry. Entry and Exit spans are used in a networking related library. Entry spans represent a server-side networking library, such as Apache Tomcat[7]. Exit spans represent the client-side networking library, such as Apache HttpComponents [8].\nPeer Network Address. Remote \u0026ldquo;address,\u0026rdquo; suitable for use in exit and entry spans. In Exit spans, the peer network address is the address by the client library to access the server.\nThese fields usually are optionally included in many tracing system,. But in STAM, we require them in all RPC cases.\nContext Model is used to propagate the client-side information to server-side carried by the original RPC call, usually in the header, such as HTTP header or MQ header. In the old design, it carries the trace id and span id of client-side span. In the STAM, we enhance this model, adding the parent service name, parent service instance name and peer of exit span. The names could be literal strings. All these extra fields will help to remove the block of streaming analysis. Compared to the existing context model, this uses a little more bandwidth, but it could be optimized. In Apache SkyWalking, we design a register mechanism to exchange unique IDs to represent these names. As a result, only 3 integers are added in the RPC context, so the increase of bandwidth is at least less than 1% in the production environment.\nThe changes of two models could eliminate the time windows in the analysis process. Server-side span analysis enhances the context aware capability.\nNew Topology Analysis Method The new topology analysis method at the core of STAM is processing the span in stream mode. The analysis of the server-side span, also named entry span, includes the parent service name, parent service instance name and peer of exit span. So the analysis process could establish the following results.\n Set the peer of exit span as client using alias name of current service and instance. Peer network address \u0026lt;-\u0026gt; service name and peer network address \u0026lt;-\u0026gt; Service instance name aliases created. These two will sync with all analysis nodes and persistent in the storage, allowing more analysis processers to have this alias information. Generate relationships of parent service name -\u0026gt; current service name and parent service instance name -\u0026gt; current service instance name, unless there is another different Peer network address \u0026lt;-\u0026gt; Service Instance Name mapping found. In that case, only generate relationships of peer network address \u0026lt;-\u0026gt; service name and peer network address \u0026lt;-\u0026gt; Service instance name.  For analysis of the client-side span (exit span), there could three possibilities.\n The peer in the exit span already has the alias names established by server-side span analysis from step (1). Then use alias names to replace the peer, and generate traffic of current service name -\u0026gt; alias service name and current service instance name -\u0026gt; alias service instance name. If the alias could not be found, then just simply generate traffic for current service name -\u0026gt; peer and current service instance name -\u0026gt; peer. If multiple alias names of peer network address \u0026lt;-\u0026gt; Service Instance Name could be found, then keep generating traffic for current service name -\u0026gt; peer network address and current service instance name -\u0026gt; peer network address.   Figure 2, Apache SkyWalking uses STAM to detect and visualize the topology of distributed systems. Evaluation In this section, we evaluate the new models and analysis method in the context of several typical cases in which the old method loses timeliness and consistent accuracy.\n 1.New Service Online or Auto Scale Out  New services could be added into the whole topology by the developer team randomly, or container operation platform automatically by some scale out policy, like Kubernetes [5]. The monitoring system could not be notified in any case manually. By using STAM, we could detect the new node automatically and also keep the analysis process unblocked and consistent with detected nodes. In this case, a new service and network address (could be IP, port or both) are used. The peer network address \u0026lt;-\u0026gt; service mapping does not exist, the traffic of client service -\u0026gt; peer network address will be generated and persistent in the storage first. After mapping is generated, further traffic of client-service to server-service could be identified, generated and aggregated in the analysis platform. For filling the gap of a few traffic before the mapping generated, we require doing peer network address \u0026lt;-\u0026gt; service mapping translation again in query stage, to merge client service-\u0026gt;peer network address and client-service to server-service. In production, the amount of VM for the whole SkyWalking analysis platform deployment is less than 100, syncing among them will finish less than 10 seconds, in most cases it only takes 3-5 seconds. And in the query stage, the data has been aggregated in minutes or seconds at least. The query merge performance is not related to how much traffic happens before the mapping generated, only affected by sync duration, in here, only 3 seconds. Due to that, in minute level aggregation topology, it only adds 1 or 2 relationship records in the whole topology relationship dataset. Considering an over 100 services topology having over 500 relationship records per minute, the payload increase for this query merge is very limited and affordable. This feature is significant in a large and high load distributed system, as we don’t need to concern its scaling capability. And in some fork versions, they choose to update the existing client service-\u0026gt;peer network address to client-service to server-service after detecting the new mapping for peer generated, in order to remove the extra load at query stage permanently.\n Figure 3, Span analysis by using the new topology analysis method  2.Existing Uninstrumented Nodes  Every topology detection method has to work in this case. In many cases, there are nodes in the production environment that can’t be instrumented. Causes for this might include:(1) Restriction of the technology. In some golang or C++ written applications, there is no easy way in Java or .Net to do auto instrumentation by the agent. So, the codes may not be instrumented automatically. (2) The middleware, such as MQ, database server, has not adopted the tracing system. This would make it difficult or time consuming to implement the middleware instrumentation. (3) A 3rd party service or cloud service doesn’t support work with the current tracing system. (4) Lack of resources: e.g., the developer or operation team lacks time to make the instrumentation ready.\nThe STAM works well even if the client or server side has no instrumentation. It still keeps the topology as accurate as possible.\nIf the client side hasn’t instrumented, the server-side span wouldn’t get any reference through RPC context, so, it would simply use peer to generate traffic, as shown in Figure 4.\n Figure 4, STAM traffic generation when no client-side instrumentation As shown in Figure 5, in the other case, with no server-side instrumentation, the client span analysis doesn’t need to process this case. The STAM analysis core just simply keeps generating client service-\u0026gt;peer network address traffic. As there is no mapping for peer network address generated, there is no merging.\n Figure 5, STAM traffic generation when no server-side instrumentation  3.Uninstrumented Node Having Header Forward Capability  Besides the cases we evaluated in (2) Uninstrumented Nodes, there is one complex and special case: the instrumented node has the capability to propagate the header from downstream to upstream, typically in all proxy, such as Envoy[11], Nginx[12], Spring Cloud Gateway[13]. As proxy, it has the capability to forward all headers from downstream to upstream to keep some of information in the header, including the tracing context, authentication, browser information, and routing information, in order to make them accessible by the business services behind the proxy, like Envoy route configuration. When some proxy can’t be instrumented, no matter what the reason, it should not affect the topology detection.\nIn this case, the proxy address would be used at the client side and propagate through RPC context as peer network address, and the proxy forwards this to different upstream services. Then STAM could detect this case and generate the proxy as a conjectural node. In the STAM, more than one alias names for this network address should be generated. After those two are detected and synchronized to the analysis node, the analysis core knows there is at least one uninstrumented service standing between client and servers. So, it will generate the relationships of client service-\u0026gt;peer network address, peer-\u0026gt;server service B and peer network address -\u0026gt;server service C, as shown in Figure 6.\n Figure 6, STAM traffic generation when the proxy uninstrumentation Conclusion This paper described the STAM, which is to the best of our knowledge the best topology detection method for distributed tracing systems. It replaces the time-window based topology analysis method for tracing-based monitoring systems. It removes the resource cost of disk and memory for time-window baseds analysis permanently and totally, and the barriers of horizontal scale. One STAM implementation, Apache SkyWalking, is widely used for monitoring hundreds of applications in production. Some of them generated over 100 TB tracing data per day and topology for over 200 services in real time.\nAcknowledgments We thank all contributors of Apache SkyWalking project for suggestions, code contributions to implement the STAM, and feedback from using the STAM and SkyWalking in their production environment.\nLicense This paper and the STAM are licensed in the Apache 2.0.\nReferences  Dapper, a Large-Scale Distributed Systems Tracing Infrastructure, https://research.google.com/pubs/pub36356.html?spm=5176.100239.blogcont60165.11.OXME9Z Apache SkyWalking, http://skywalking.apache.org/ Apache Open Users, https://skywalking.apache.org/users/ Zipkin, https://zipkin.io/ Kubernetes, Production-Grade Container Orchestration. Automated container deployment, scaling, and management. https://kubernetes.io/ OpenTracing Specification https://github.com/opentracing/specification/blob/master/specification.md Apache Tomcat, http://tomcat.apache.org/ Apache HttpComponents, https://hc.apache.org/ Zipkin doc, ‘Instrumenting a library’ section, ‘Communicating trace information’ paragraph. https://zipkin.io/pages/instrumenting Jaeger Tracing, https://jaegertracing.io/ Envoy Proxy, http://envoyproxy.io/ Nginx, http://nginx.org/ Spring Cloud Gateway, https://spring.io/projects/spring-cloud-gateway  ","title":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System","url":"/docs/main/next/en/papers/stam/"},{"content":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System  Sheng Wu 吴 晟 wusheng@apache.org  Editor\u0026rsquo;s note This paper was written by Sheng Wu, project founder, in 2017, to describe the fundamental theory of all current agent core concepts. Readers could learn why SkyWalking agents are significantly different from other tracing system and Dapper[1] Paper\u0026rsquo;s description.\nAbstract Monitoring, visualizing and troubleshooting a large-scale distributed system is a major challenge. One common tool used today is the distributed tracing system (e.g., Google Dapper)[1], and detecting topology and metrics based on the tracing data. One big limitation of today’s topology detection is that the analysis depends on aggregating the client-side and server-side tracing spans in a given time window to generate the dependency of services. This causes more latency and memory use, because the client and server spans of every RPC must be matched in millions of randomly occurring requests in a highly distributed system. More importantly, it could fail to match if the duration of RPC between client and server is longer than the prior setup time window, or across the two windows.\nIn this paper, we present the STAM, Streaming Topology Analysis Method. In STAM, we could use auto instrumentation or a manual instrumentation mechanism to intercept and manipulate RPC at both client-side and server-side. In the case of auto instrumentation, STAM manipulates application codes at runtime, such as Java agent. As such, this monitoring system doesn’t require any source code changes from the application development team or RPC framework development team. The STAM injects an RPC network address used at client side, a service name and a service instance name into the RPC context, and binds the server-side service name and service instance name as the alias name for this network address used at the client side. Freeing the dependency analysis from the mechanisms that cause blocking and delay, the analysis core can process the monitoring data in stream mode and generate the accurate topology.\nThe STAM has been implemented in the Apache SkyWalking[2], an open source APM (application performance monitoring system) project of the Apache Software Foundation, which is widely used in many big enterprises[3] including Alibaba, Huawei, Tencent, Didi, Xiaomi, China Mobile and other enterprises (airlines, financial institutions and others) to support their large-scale distributed systems in the production environment. It reduces the load and memory cost significantly, with better horizontal scale capability.\nIntroduction Monitoring the highly distributed system, especially with a micro-service architecture, is very complex. Many RPCs, including HTTP, gRPC, MQ, Cache, and Database accesses, are behind a single client-side request. Allowing the IT team to understand the dependency relationships among thousands of services is the key feature and first step for observability of a whole distributed system. A distributed tracing system is capable of collecting traces, including all distributed request paths. Dependency relationships have been logically included in the trace data. A distributed tracing system, such as Zipkin [4] or Jaeger Tracing [10], provides built-in dependency analysis features, but many analysis features build on top of that. There are at least two fundamental limitations: timeliness and consistent accuracy.\nStrong timeliness is required to match the mutability of distributed application system dependency relationship, including service level and service instance level dependency.\nA Service is a logic group of instances which have the same functions or codes.\nA Service Instance is usually an OS level process, such as a JVM process. The relationships between services and instances are mutable, depending on the configuration, codes and network status. The dependency could change over time.\n Figure 1, Generated spans in traditional Dapper based tracing system. The span model in the Dapper paper and existing tracing systems,such as Zipkin instrumenting mode[9], just propagates the span id to the server side. Due to this model, dependency analysis requires a certain time window. The tracing spans are collected at both client- and server-sides, because the relationship is recorded. Due to that, the analysis process has to wait for the client and server spans to match in the same time window, in order to output the result, Service A depending on Service B. So, this time window must be over the duration of this RPC request; otherwise, the conclusion will be lost. This condition makes the analysis would not react the dependency mutation in second level, in production, it sometimes has to set the window duration in 3-5 mins. Also, because of the Windows-based design, if one side involves a long duration task, it can’t easily achieve consistent accuracy. Because in order to make the analysis as fast as possible, the analysis period is less than 5 minutes. But some spans can’t match its parent or children if the analysis is incomplete or crosses two time windows. Even if we added a mechanism to process the spans left in the previous stages, still some would have to be abandoned to keep the dataset size and memory usage reasonable.\nIn the STAM, we introduce a new span and context propagation models, with the new analysis method. These new models add the peer network address (IP or hostname) used at client side, client service instance name and client service name, into the context propagation model. Then it passes the RPC call from client to server, just as the original trace id and span id in the existing tracing system, and collects it in the server-side span. The new analysis method can easily generate the client-server relationship directly without waiting on the client span. It also sets the peer network address as one alias of the server service. After the across cluster node data sync, the client-side span analysis could use this alias metadata to generate the client-server relationship directly too. By using these new models and method in Apache SkyWalking, we remove the time windows-based analysis permanently, and fully use the streaming analysis mode with less than 5 seconds latency and consistent accuracy\nNew Span Model and Context Model The traditional span of a tracing system includes the following fields [1][6][10].\n A trace id to represent the whole trace. A span id to represent the current span. An operation name to describe what operation this span did. A start timestamp. A finish timestamp Service and Service Instance names of current span. A set of zero or more key:value Span Tags. A set of zero or more Span Logs, each of which is itself a key:value map paired with a timestamp. References to zero or more causally related Spans. Reference includes the parent span id and trace id.  In the new span model of STAM we add the following fields in the span.\nSpan type. Enumeration, including exit, local and entry. Entry and Exit spans are used in a networking related library. Entry spans represent a server-side networking library, such as Apache Tomcat[7]. Exit spans represent the client-side networking library, such as Apache HttpComponents [8].\nPeer Network Address. Remote \u0026ldquo;address,\u0026rdquo; suitable for use in exit and entry spans. In Exit spans, the peer network address is the address by the client library to access the server.\nThese fields usually are optionally included in many tracing system,. But in STAM, we require them in all RPC cases.\nContext Model is used to propagate the client-side information to server-side carried by the original RPC call, usually in the header, such as HTTP header or MQ header. In the old design, it carries the trace id and span id of client-side span. In the STAM, we enhance this model, adding the parent service name, parent service instance name and peer of exit span. The names could be literal strings. All these extra fields will help to remove the block of streaming analysis. Compared to the existing context model, this uses a little more bandwidth, but it could be optimized. In Apache SkyWalking, we design a register mechanism to exchange unique IDs to represent these names. As a result, only 3 integers are added in the RPC context, so the increase of bandwidth is at least less than 1% in the production environment.\nThe changes of two models could eliminate the time windows in the analysis process. Server-side span analysis enhances the context aware capability.\nNew Topology Analysis Method The new topology analysis method at the core of STAM is processing the span in stream mode. The analysis of the server-side span, also named entry span, includes the parent service name, parent service instance name and peer of exit span. So the analysis process could establish the following results.\n Set the peer of exit span as client using alias name of current service and instance. Peer network address \u0026lt;-\u0026gt; service name and peer network address \u0026lt;-\u0026gt; Service instance name aliases created. These two will sync with all analysis nodes and persistent in the storage, allowing more analysis processers to have this alias information. Generate relationships of parent service name -\u0026gt; current service name and parent service instance name -\u0026gt; current service instance name, unless there is another different Peer network address \u0026lt;-\u0026gt; Service Instance Name mapping found. In that case, only generate relationships of peer network address \u0026lt;-\u0026gt; service name and peer network address \u0026lt;-\u0026gt; Service instance name.  For analysis of the client-side span (exit span), there could three possibilities.\n The peer in the exit span already has the alias names established by server-side span analysis from step (1). Then use alias names to replace the peer, and generate traffic of current service name -\u0026gt; alias service name and current service instance name -\u0026gt; alias service instance name. If the alias could not be found, then just simply generate traffic for current service name -\u0026gt; peer and current service instance name -\u0026gt; peer. If multiple alias names of peer network address \u0026lt;-\u0026gt; Service Instance Name could be found, then keep generating traffic for current service name -\u0026gt; peer network address and current service instance name -\u0026gt; peer network address.   Figure 2, Apache SkyWalking uses STAM to detect and visualize the topology of distributed systems. Evaluation In this section, we evaluate the new models and analysis method in the context of several typical cases in which the old method loses timeliness and consistent accuracy.\n 1.New Service Online or Auto Scale Out  New services could be added into the whole topology by the developer team randomly, or container operation platform automatically by some scale out policy, like Kubernetes [5]. The monitoring system could not be notified in any case manually. By using STAM, we could detect the new node automatically and also keep the analysis process unblocked and consistent with detected nodes. In this case, a new service and network address (could be IP, port or both) are used. The peer network address \u0026lt;-\u0026gt; service mapping does not exist, the traffic of client service -\u0026gt; peer network address will be generated and persistent in the storage first. After mapping is generated, further traffic of client-service to server-service could be identified, generated and aggregated in the analysis platform. For filling the gap of a few traffic before the mapping generated, we require doing peer network address \u0026lt;-\u0026gt; service mapping translation again in query stage, to merge client service-\u0026gt;peer network address and client-service to server-service. In production, the amount of VM for the whole SkyWalking analysis platform deployment is less than 100, syncing among them will finish less than 10 seconds, in most cases it only takes 3-5 seconds. And in the query stage, the data has been aggregated in minutes or seconds at least. The query merge performance is not related to how much traffic happens before the mapping generated, only affected by sync duration, in here, only 3 seconds. Due to that, in minute level aggregation topology, it only adds 1 or 2 relationship records in the whole topology relationship dataset. Considering an over 100 services topology having over 500 relationship records per minute, the payload increase for this query merge is very limited and affordable. This feature is significant in a large and high load distributed system, as we don’t need to concern its scaling capability. And in some fork versions, they choose to update the existing client service-\u0026gt;peer network address to client-service to server-service after detecting the new mapping for peer generated, in order to remove the extra load at query stage permanently.\n Figure 3, Span analysis by using the new topology analysis method  2.Existing Uninstrumented Nodes  Every topology detection method has to work in this case. In many cases, there are nodes in the production environment that can’t be instrumented. Causes for this might include:(1) Restriction of the technology. In some golang or C++ written applications, there is no easy way in Java or .Net to do auto instrumentation by the agent. So, the codes may not be instrumented automatically. (2) The middleware, such as MQ, database server, has not adopted the tracing system. This would make it difficult or time consuming to implement the middleware instrumentation. (3) A 3rd party service or cloud service doesn’t support work with the current tracing system. (4) Lack of resources: e.g., the developer or operation team lacks time to make the instrumentation ready.\nThe STAM works well even if the client or server side has no instrumentation. It still keeps the topology as accurate as possible.\nIf the client side hasn’t instrumented, the server-side span wouldn’t get any reference through RPC context, so, it would simply use peer to generate traffic, as shown in Figure 4.\n Figure 4, STAM traffic generation when no client-side instrumentation As shown in Figure 5, in the other case, with no server-side instrumentation, the client span analysis doesn’t need to process this case. The STAM analysis core just simply keeps generating client service-\u0026gt;peer network address traffic. As there is no mapping for peer network address generated, there is no merging.\n Figure 5, STAM traffic generation when no server-side instrumentation  3.Uninstrumented Node Having Header Forward Capability  Besides the cases we evaluated in (2) Uninstrumented Nodes, there is one complex and special case: the instrumented node has the capability to propagate the header from downstream to upstream, typically in all proxy, such as Envoy[11], Nginx[12], Spring Cloud Gateway[13]. As proxy, it has the capability to forward all headers from downstream to upstream to keep some of information in the header, including the tracing context, authentication, browser information, and routing information, in order to make them accessible by the business services behind the proxy, like Envoy route configuration [14]. When some proxy can’t be instrumented, no matter what the reason, it should not affect the topology detection.\nIn this case, the proxy address would be used at the client side and propagate through RPC context as peer network address, and the proxy forwards this to different upstream services. Then STAM could detect this case and generate the proxy as a conjectural node. In the STAM, more than one alias names for this network address should be generated. After those two are detected and synchronized to the analysis node, the analysis core knows there is at least one uninstrumented service standing between client and servers. So, it will generate the relationships of client service-\u0026gt;peer network address, peer-\u0026gt;server service B and peer network address -\u0026gt;server service C, as shown in Figure 6.\n Figure 6, STAM traffic generation when the proxy uninstrumentatio Conclusion This paper described the STAM, which is to the best of our knowledge the best topology detection method for distributed tracing systems. It replaces the time-window based topology analysis method for tracing-based monitoring systems. It removes the resource cost of disk and memory for time-window baseds analysis permanently and totally, and the barriers of horizontal scale. One STAM implementation, Apache SkyWalking, is widely used for monitoring hundreds of applications in production. Some of them generated over 100 TB tracing data per day and topology for over 200 services in real time.\nAcknowledgments We thank all contributors of Apache SkyWalking project for suggestions, code contributions to implement the STAM, and feedback from using the STAM and SkyWalking in their production environment.\nLicense This paper and the STAM are licensed in the Apache 2.0.\nReferences  Dapper, a Large-Scale Distributed Systems Tracing Infrastructure, https://research.google.com/pubs/pub36356.html?spm=5176.100239.blogcont60165.11.OXME9Z Apache SkyWalking, http://skywalking.apache.org/ Apache Open Users, https://skywalking.apache.org/users/ Zipkin, https://zipkin.io/ Kubernetes, Production-Grade Container Orchestration. Automated container deployment, scaling, and management. https://kubernetes.io/ OpenTracing Specification https://github.com/opentracing/specification/blob/master/specification.md Apache Tomcat, http://tomcat.apache.org/ Apache HttpComponents, https://hc.apache.org/ Zipkin doc, ‘Instrumenting a library’ section, ‘Communicating trace information’ paragraph. https://zipkin.io/pages/instrumenting Jaeger Tracing, https://jaegertracing.io/ Envoy Proxy, http://envoyproxy.io/ Nginx, http://nginx.org/ Spring Cloud Gateway, https://spring.io/projects/spring-cloud-gateway Envoy Route Configuration, https://www.envoyproxy.io/docs/envoy/latest/api-v2/api/v2/rds.proto.html?highlight=request_headers_to_  ","title":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System","url":"/docs/main/v9.0.0/en/papers/stam/"},{"content":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System  Sheng Wu 吴 晟 wusheng@apache.org  Editor\u0026rsquo;s note This paper was written by Sheng Wu, project founder, in 2017, to describe the fundamental theory of all current agent core concepts. Readers could learn why SkyWalking agents are significantly different from other tracing system and Dapper[1] Paper\u0026rsquo;s description.\nAbstract Monitoring, visualizing and troubleshooting a large-scale distributed system is a major challenge. One common tool used today is the distributed tracing system (e.g., Google Dapper)[1], and detecting topology and metrics based on the tracing data. One big limitation of today’s topology detection is that the analysis depends on aggregating the client-side and server-side tracing spans in a given time window to generate the dependency of services. This causes more latency and memory use, because the client and server spans of every RPC must be matched in millions of randomly occurring requests in a highly distributed system. More importantly, it could fail to match if the duration of RPC between client and server is longer than the prior setup time window, or across the two windows.\nIn this paper, we present the STAM, Streaming Topology Analysis Method. In STAM, we could use auto instrumentation or a manual instrumentation mechanism to intercept and manipulate RPC at both client-side and server-side. In the case of auto instrumentation, STAM manipulates application codes at runtime, such as Java agent. As such, this monitoring system doesn’t require any source code changes from the application development team or RPC framework development team. The STAM injects an RPC network address used at client side, a service name and a service instance name into the RPC context, and binds the server-side service name and service instance name as the alias name for this network address used at the client side. Freeing the dependency analysis from the mechanisms that cause blocking and delay, the analysis core can process the monitoring data in stream mode and generate the accurate topology.\nThe STAM has been implemented in the Apache SkyWalking[2], an open source APM (application performance monitoring system) project of the Apache Software Foundation, which is widely used in many big enterprises[3] including Alibaba, Huawei, Tencent, Didi, Xiaomi, China Mobile and other enterprises (airlines, financial institutions and others) to support their large-scale distributed systems in the production environment. It reduces the load and memory cost significantly, with better horizontal scale capability.\nIntroduction Monitoring the highly distributed system, especially with a micro-service architecture, is very complex. Many RPCs, including HTTP, gRPC, MQ, Cache, and Database accesses, are behind a single client-side request. Allowing the IT team to understand the dependency relationships among thousands of services is the key feature and first step for observability of a whole distributed system. A distributed tracing system is capable of collecting traces, including all distributed request paths. Dependency relationships have been logically included in the trace data. A distributed tracing system, such as Zipkin [4] or Jaeger Tracing [10], provides built-in dependency analysis features, but many analysis features build on top of that. There are at least two fundamental limitations: timeliness and consistent accuracy.\nStrong timeliness is required to match the mutability of distributed application system dependency relationship, including service level and service instance level dependency.\nA Service is a logic group of instances which have the same functions or codes.\nA Service Instance is usually an OS level process, such as a JVM process. The relationships between services and instances are mutable, depending on the configuration, codes and network status. The dependency could change over time.\n Figure 1, Generated spans in traditional Dapper based tracing system. The span model in the Dapper paper and existing tracing systems,such as Zipkin instrumenting mode[9], just propagates the span id to the server side. Due to this model, dependency analysis requires a certain time window. The tracing spans are collected at both client- and server-sides, because the relationship is recorded. Due to that, the analysis process has to wait for the client and server spans to match in the same time window, in order to output the result, Service A depending on Service B. So, this time window must be over the duration of this RPC request; otherwise, the conclusion will be lost. This condition makes the analysis would not react the dependency mutation in second level, in production, it sometimes has to set the window duration in 3-5 mins. Also, because of the Windows-based design, if one side involves a long duration task, it can’t easily achieve consistent accuracy. Because in order to make the analysis as fast as possible, the analysis period is less than 5 minutes. But some spans can’t match its parent or children if the analysis is incomplete or crosses two time windows. Even if we added a mechanism to process the spans left in the previous stages, still some would have to be abandoned to keep the dataset size and memory usage reasonable.\nIn the STAM, we introduce a new span and context propagation models, with the new analysis method. These new models add the peer network address (IP or hostname) used at client side, client service instance name and client service name, into the context propagation model. Then it passes the RPC call from client to server, just as the original trace id and span id in the existing tracing system, and collects it in the server-side span. The new analysis method can easily generate the client-server relationship directly without waiting on the client span. It also sets the peer network address as one alias of the server service. After the across cluster node data sync, the client-side span analysis could use this alias metadata to generate the client-server relationship directly too. By using these new models and method in Apache SkyWalking, we remove the time windows-based analysis permanently, and fully use the streaming analysis mode with less than 5 seconds latency and consistent accuracy\nNew Span Model and Context Model The traditional span of a tracing system includes the following fields [1][6][10].\n A trace id to represent the whole trace. A span id to represent the current span. An operation name to describe what operation this span did. A start timestamp. A finish timestamp Service and Service Instance names of current span. A set of zero or more key:value Span Tags. A set of zero or more Span Logs, each of which is itself a key:value map paired with a timestamp. References to zero or more causally related Spans. Reference includes the parent span id and trace id.  In the new span model of STAM we add the following fields in the span.\nSpan type. Enumeration, including exit, local and entry. Entry and Exit spans are used in a networking related library. Entry spans represent a server-side networking library, such as Apache Tomcat[7]. Exit spans represent the client-side networking library, such as Apache HttpComponents [8].\nPeer Network Address. Remote \u0026ldquo;address,\u0026rdquo; suitable for use in exit and entry spans. In Exit spans, the peer network address is the address by the client library to access the server.\nThese fields usually are optionally included in many tracing system,. But in STAM, we require them in all RPC cases.\nContext Model is used to propagate the client-side information to server-side carried by the original RPC call, usually in the header, such as HTTP header or MQ header. In the old design, it carries the trace id and span id of client-side span. In the STAM, we enhance this model, adding the parent service name, parent service instance name and peer of exit span. The names could be literal strings. All these extra fields will help to remove the block of streaming analysis. Compared to the existing context model, this uses a little more bandwidth, but it could be optimized. In Apache SkyWalking, we design a register mechanism to exchange unique IDs to represent these names. As a result, only 3 integers are added in the RPC context, so the increase of bandwidth is at least less than 1% in the production environment.\nThe changes of two models could eliminate the time windows in the analysis process. Server-side span analysis enhances the context aware capability.\nNew Topology Analysis Method The new topology analysis method at the core of STAM is processing the span in stream mode. The analysis of the server-side span, also named entry span, includes the parent service name, parent service instance name and peer of exit span. So the analysis process could establish the following results.\n Set the peer of exit span as client using alias name of current service and instance. Peer network address \u0026lt;-\u0026gt; service name and peer network address \u0026lt;-\u0026gt; Service instance name aliases created. These two will sync with all analysis nodes and persistent in the storage, allowing more analysis processers to have this alias information. Generate relationships of parent service name -\u0026gt; current service name and parent service instance name -\u0026gt; current service instance name, unless there is another different Peer network address \u0026lt;-\u0026gt; Service Instance Name mapping found. In that case, only generate relationships of peer network address \u0026lt;-\u0026gt; service name and peer network address \u0026lt;-\u0026gt; Service instance name.  For analysis of the client-side span (exit span), there could three possibilities.\n The peer in the exit span already has the alias names established by server-side span analysis from step (1). Then use alias names to replace the peer, and generate traffic of current service name -\u0026gt; alias service name and current service instance name -\u0026gt; alias service instance name. If the alias could not be found, then just simply generate traffic for current service name -\u0026gt; peer and current service instance name -\u0026gt; peer. If multiple alias names of peer network address \u0026lt;-\u0026gt; Service Instance Name could be found, then keep generating traffic for current service name -\u0026gt; peer network address and current service instance name -\u0026gt; peer network address.   Figure 2, Apache SkyWalking uses STAM to detect and visualize the topology of distributed systems. Evaluation In this section, we evaluate the new models and analysis method in the context of several typical cases in which the old method loses timeliness and consistent accuracy.\n 1.New Service Online or Auto Scale Out  New services could be added into the whole topology by the developer team randomly, or container operation platform automatically by some scale out policy, like Kubernetes [5]. The monitoring system could not be notified in any case manually. By using STAM, we could detect the new node automatically and also keep the analysis process unblocked and consistent with detected nodes. In this case, a new service and network address (could be IP, port or both) are used. The peer network address \u0026lt;-\u0026gt; service mapping does not exist, the traffic of client service -\u0026gt; peer network address will be generated and persistent in the storage first. After mapping is generated, further traffic of client-service to server-service could be identified, generated and aggregated in the analysis platform. For filling the gap of a few traffic before the mapping generated, we require doing peer network address \u0026lt;-\u0026gt; service mapping translation again in query stage, to merge client service-\u0026gt;peer network address and client-service to server-service. In production, the amount of VM for the whole SkyWalking analysis platform deployment is less than 100, syncing among them will finish less than 10 seconds, in most cases it only takes 3-5 seconds. And in the query stage, the data has been aggregated in minutes or seconds at least. The query merge performance is not related to how much traffic happens before the mapping generated, only affected by sync duration, in here, only 3 seconds. Due to that, in minute level aggregation topology, it only adds 1 or 2 relationship records in the whole topology relationship dataset. Considering an over 100 services topology having over 500 relationship records per minute, the payload increase for this query merge is very limited and affordable. This feature is significant in a large and high load distributed system, as we don’t need to concern its scaling capability. And in some fork versions, they choose to update the existing client service-\u0026gt;peer network address to client-service to server-service after detecting the new mapping for peer generated, in order to remove the extra load at query stage permanently.\n Figure 3, Span analysis by using the new topology analysis method  2.Existing Uninstrumented Nodes  Every topology detection method has to work in this case. In many cases, there are nodes in the production environment that can’t be instrumented. Causes for this might include:(1) Restriction of the technology. In some golang or C++ written applications, there is no easy way in Java or .Net to do auto instrumentation by the agent. So, the codes may not be instrumented automatically. (2) The middleware, such as MQ, database server, has not adopted the tracing system. This would make it difficult or time consuming to implement the middleware instrumentation. (3) A 3rd party service or cloud service doesn’t support work with the current tracing system. (4) Lack of resources: e.g., the developer or operation team lacks time to make the instrumentation ready.\nThe STAM works well even if the client or server side has no instrumentation. It still keeps the topology as accurate as possible.\nIf the client side hasn’t instrumented, the server-side span wouldn’t get any reference through RPC context, so, it would simply use peer to generate traffic, as shown in Figure 4.\n Figure 4, STAM traffic generation when no client-side instrumentation As shown in Figure 5, in the other case, with no server-side instrumentation, the client span analysis doesn’t need to process this case. The STAM analysis core just simply keeps generating client service-\u0026gt;peer network address traffic. As there is no mapping for peer network address generated, there is no merging.\n Figure 5, STAM traffic generation when no server-side instrumentation  3.Uninstrumented Node Having Header Forward Capability  Besides the cases we evaluated in (2) Uninstrumented Nodes, there is one complex and special case: the instrumented node has the capability to propagate the header from downstream to upstream, typically in all proxy, such as Envoy[11], Nginx[12], Spring Cloud Gateway[13]. As proxy, it has the capability to forward all headers from downstream to upstream to keep some of information in the header, including the tracing context, authentication, browser information, and routing information, in order to make them accessible by the business services behind the proxy, like Envoy route configuration. When some proxy can’t be instrumented, no matter what the reason, it should not affect the topology detection.\nIn this case, the proxy address would be used at the client side and propagate through RPC context as peer network address, and the proxy forwards this to different upstream services. Then STAM could detect this case and generate the proxy as a conjectural node. In the STAM, more than one alias names for this network address should be generated. After those two are detected and synchronized to the analysis node, the analysis core knows there is at least one uninstrumented service standing between client and servers. So, it will generate the relationships of client service-\u0026gt;peer network address, peer-\u0026gt;server service B and peer network address -\u0026gt;server service C, as shown in Figure 6.\n Figure 6, STAM traffic generation when the proxy uninstrumentation Conclusion This paper described the STAM, which is to the best of our knowledge the best topology detection method for distributed tracing systems. It replaces the time-window based topology analysis method for tracing-based monitoring systems. It removes the resource cost of disk and memory for time-window baseds analysis permanently and totally, and the barriers of horizontal scale. One STAM implementation, Apache SkyWalking, is widely used for monitoring hundreds of applications in production. Some of them generated over 100 TB tracing data per day and topology for over 200 services in real time.\nAcknowledgments We thank all contributors of Apache SkyWalking project for suggestions, code contributions to implement the STAM, and feedback from using the STAM and SkyWalking in their production environment.\nLicense This paper and the STAM are licensed in the Apache 2.0.\nReferences  Dapper, a Large-Scale Distributed Systems Tracing Infrastructure, https://research.google.com/pubs/pub36356.html?spm=5176.100239.blogcont60165.11.OXME9Z Apache SkyWalking, http://skywalking.apache.org/ Apache Open Users, https://skywalking.apache.org/users/ Zipkin, https://zipkin.io/ Kubernetes, Production-Grade Container Orchestration. Automated container deployment, scaling, and management. https://kubernetes.io/ OpenTracing Specification https://github.com/opentracing/specification/blob/master/specification.md Apache Tomcat, http://tomcat.apache.org/ Apache HttpComponents, https://hc.apache.org/ Zipkin doc, ‘Instrumenting a library’ section, ‘Communicating trace information’ paragraph. https://zipkin.io/pages/instrumenting Jaeger Tracing, https://jaegertracing.io/ Envoy Proxy, http://envoyproxy.io/ Nginx, http://nginx.org/ Spring Cloud Gateway, https://spring.io/projects/spring-cloud-gateway  ","title":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System","url":"/docs/main/v9.1.0/en/papers/stam/"},{"content":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System  Sheng Wu 吴 晟 wusheng@apache.org  Editor\u0026rsquo;s note This paper was written by Sheng Wu, project founder, in 2017, to describe the fundamental theory of all current agent core concepts. Readers could learn why SkyWalking agents are significantly different from other tracing system and Dapper[1] Paper\u0026rsquo;s description.\nAbstract Monitoring, visualizing and troubleshooting a large-scale distributed system is a major challenge. One common tool used today is the distributed tracing system (e.g., Google Dapper)[1], and detecting topology and metrics based on the tracing data. One big limitation of today’s topology detection is that the analysis depends on aggregating the client-side and server-side tracing spans in a given time window to generate the dependency of services. This causes more latency and memory use, because the client and server spans of every RPC must be matched in millions of randomly occurring requests in a highly distributed system. More importantly, it could fail to match if the duration of RPC between client and server is longer than the prior setup time window, or across the two windows.\nIn this paper, we present the STAM, Streaming Topology Analysis Method. In STAM, we could use auto instrumentation or a manual instrumentation mechanism to intercept and manipulate RPC at both client-side and server-side. In the case of auto instrumentation, STAM manipulates application codes at runtime, such as Java agent. As such, this monitoring system doesn’t require any source code changes from the application development team or RPC framework development team. The STAM injects an RPC network address used at client side, a service name and a service instance name into the RPC context, and binds the server-side service name and service instance name as the alias name for this network address used at the client side. Freeing the dependency analysis from the mechanisms that cause blocking and delay, the analysis core can process the monitoring data in stream mode and generate the accurate topology.\nThe STAM has been implemented in the Apache SkyWalking[2], an open source APM (application performance monitoring system) project of the Apache Software Foundation, which is widely used in many big enterprises[3] including Alibaba, Huawei, Tencent, Didi, Xiaomi, China Mobile and other enterprises (airlines, financial institutions and others) to support their large-scale distributed systems in the production environment. It reduces the load and memory cost significantly, with better horizontal scale capability.\nIntroduction Monitoring the highly distributed system, especially with a micro-service architecture, is very complex. Many RPCs, including HTTP, gRPC, MQ, Cache, and Database accesses, are behind a single client-side request. Allowing the IT team to understand the dependency relationships among thousands of services is the key feature and first step for observability of a whole distributed system. A distributed tracing system is capable of collecting traces, including all distributed request paths. Dependency relationships have been logically included in the trace data. A distributed tracing system, such as Zipkin [4] or Jaeger Tracing [10], provides built-in dependency analysis features, but many analysis features build on top of that. There are at least two fundamental limitations: timeliness and consistent accuracy.\nStrong timeliness is required to match the mutability of distributed application system dependency relationship, including service level and service instance level dependency.\nA Service is a logic group of instances which have the same functions or codes.\nA Service Instance is usually an OS level process, such as a JVM process. The relationships between services and instances are mutable, depending on the configuration, codes and network status. The dependency could change over time.\n Figure 1, Generated spans in traditional Dapper based tracing system. The span model in the Dapper paper and existing tracing systems,such as Zipkin instrumenting mode[9], just propagates the span id to the server side. Due to this model, dependency analysis requires a certain time window. The tracing spans are collected at both client- and server-sides, because the relationship is recorded. Due to that, the analysis process has to wait for the client and server spans to match in the same time window, in order to output the result, Service A depending on Service B. So, this time window must be over the duration of this RPC request; otherwise, the conclusion will be lost. This condition makes the analysis would not react the dependency mutation in second level, in production, it sometimes has to set the window duration in 3-5 mins. Also, because of the Windows-based design, if one side involves a long duration task, it can’t easily achieve consistent accuracy. Because in order to make the analysis as fast as possible, the analysis period is less than 5 minutes. But some spans can’t match its parent or children if the analysis is incomplete or crosses two time windows. Even if we added a mechanism to process the spans left in the previous stages, still some would have to be abandoned to keep the dataset size and memory usage reasonable.\nIn the STAM, we introduce a new span and context propagation models, with the new analysis method. These new models add the peer network address (IP or hostname) used at client side, client service instance name and client service name, into the context propagation model. Then it passes the RPC call from client to server, just as the original trace id and span id in the existing tracing system, and collects it in the server-side span. The new analysis method can easily generate the client-server relationship directly without waiting on the client span. It also sets the peer network address as one alias of the server service. After the across cluster node data sync, the client-side span analysis could use this alias metadata to generate the client-server relationship directly too. By using these new models and method in Apache SkyWalking, we remove the time windows-based analysis permanently, and fully use the streaming analysis mode with less than 5 seconds latency and consistent accuracy\nNew Span Model and Context Model The traditional span of a tracing system includes the following fields [1][6][10].\n A trace id to represent the whole trace. A span id to represent the current span. An operation name to describe what operation this span did. A start timestamp. A finish timestamp Service and Service Instance names of current span. A set of zero or more key:value Span Tags. A set of zero or more Span Logs, each of which is itself a key:value map paired with a timestamp. References to zero or more causally related Spans. Reference includes the parent span id and trace id.  In the new span model of STAM we add the following fields in the span.\nSpan type. Enumeration, including exit, local and entry. Entry and Exit spans are used in a networking related library. Entry spans represent a server-side networking library, such as Apache Tomcat[7]. Exit spans represent the client-side networking library, such as Apache HttpComponents [8].\nPeer Network Address. Remote \u0026ldquo;address,\u0026rdquo; suitable for use in exit and entry spans. In Exit spans, the peer network address is the address by the client library to access the server.\nThese fields usually are optionally included in many tracing system,. But in STAM, we require them in all RPC cases.\nContext Model is used to propagate the client-side information to server-side carried by the original RPC call, usually in the header, such as HTTP header or MQ header. In the old design, it carries the trace id and span id of client-side span. In the STAM, we enhance this model, adding the parent service name, parent service instance name and peer of exit span. The names could be literal strings. All these extra fields will help to remove the block of streaming analysis. Compared to the existing context model, this uses a little more bandwidth, but it could be optimized. In Apache SkyWalking, we design a register mechanism to exchange unique IDs to represent these names. As a result, only 3 integers are added in the RPC context, so the increase of bandwidth is at least less than 1% in the production environment.\nThe changes of two models could eliminate the time windows in the analysis process. Server-side span analysis enhances the context aware capability.\nNew Topology Analysis Method The new topology analysis method at the core of STAM is processing the span in stream mode. The analysis of the server-side span, also named entry span, includes the parent service name, parent service instance name and peer of exit span. So the analysis process could establish the following results.\n Set the peer of exit span as client using alias name of current service and instance. Peer network address \u0026lt;-\u0026gt; service name and peer network address \u0026lt;-\u0026gt; Service instance name aliases created. These two will sync with all analysis nodes and persistent in the storage, allowing more analysis processers to have this alias information. Generate relationships of parent service name -\u0026gt; current service name and parent service instance name -\u0026gt; current service instance name, unless there is another different Peer network address \u0026lt;-\u0026gt; Service Instance Name mapping found. In that case, only generate relationships of peer network address \u0026lt;-\u0026gt; service name and peer network address \u0026lt;-\u0026gt; Service instance name.  For analysis of the client-side span (exit span), there could three possibilities.\n The peer in the exit span already has the alias names established by server-side span analysis from step (1). Then use alias names to replace the peer, and generate traffic of current service name -\u0026gt; alias service name and current service instance name -\u0026gt; alias service instance name. If the alias could not be found, then just simply generate traffic for current service name -\u0026gt; peer and current service instance name -\u0026gt; peer. If multiple alias names of peer network address \u0026lt;-\u0026gt; Service Instance Name could be found, then keep generating traffic for current service name -\u0026gt; peer network address and current service instance name -\u0026gt; peer network address.   Figure 2, Apache SkyWalking uses STAM to detect and visualize the topology of distributed systems. Evaluation In this section, we evaluate the new models and analysis method in the context of several typical cases in which the old method loses timeliness and consistent accuracy.\n 1.New Service Online or Auto Scale Out  New services could be added into the whole topology by the developer team randomly, or container operation platform automatically by some scale out policy, like Kubernetes [5]. The monitoring system could not be notified in any case manually. By using STAM, we could detect the new node automatically and also keep the analysis process unblocked and consistent with detected nodes. In this case, a new service and network address (could be IP, port or both) are used. The peer network address \u0026lt;-\u0026gt; service mapping does not exist, the traffic of client service -\u0026gt; peer network address will be generated and persistent in the storage first. After mapping is generated, further traffic of client-service to server-service could be identified, generated and aggregated in the analysis platform. For filling the gap of a few traffic before the mapping generated, we require doing peer network address \u0026lt;-\u0026gt; service mapping translation again in query stage, to merge client service-\u0026gt;peer network address and client-service to server-service. In production, the amount of VM for the whole SkyWalking analysis platform deployment is less than 100, syncing among them will finish less than 10 seconds, in most cases it only takes 3-5 seconds. And in the query stage, the data has been aggregated in minutes or seconds at least. The query merge performance is not related to how much traffic happens before the mapping generated, only affected by sync duration, in here, only 3 seconds. Due to that, in minute level aggregation topology, it only adds 1 or 2 relationship records in the whole topology relationship dataset. Considering an over 100 services topology having over 500 relationship records per minute, the payload increase for this query merge is very limited and affordable. This feature is significant in a large and high load distributed system, as we don’t need to concern its scaling capability. And in some fork versions, they choose to update the existing client service-\u0026gt;peer network address to client-service to server-service after detecting the new mapping for peer generated, in order to remove the extra load at query stage permanently.\n Figure 3, Span analysis by using the new topology analysis method  2.Existing Uninstrumented Nodes  Every topology detection method has to work in this case. In many cases, there are nodes in the production environment that can’t be instrumented. Causes for this might include:(1) Restriction of the technology. In some golang or C++ written applications, there is no easy way in Java or .Net to do auto instrumentation by the agent. So, the codes may not be instrumented automatically. (2) The middleware, such as MQ, database server, has not adopted the tracing system. This would make it difficult or time consuming to implement the middleware instrumentation. (3) A 3rd party service or cloud service doesn’t support work with the current tracing system. (4) Lack of resources: e.g., the developer or operation team lacks time to make the instrumentation ready.\nThe STAM works well even if the client or server side has no instrumentation. It still keeps the topology as accurate as possible.\nIf the client side hasn’t instrumented, the server-side span wouldn’t get any reference through RPC context, so, it would simply use peer to generate traffic, as shown in Figure 4.\n Figure 4, STAM traffic generation when no client-side instrumentation As shown in Figure 5, in the other case, with no server-side instrumentation, the client span analysis doesn’t need to process this case. The STAM analysis core just simply keeps generating client service-\u0026gt;peer network address traffic. As there is no mapping for peer network address generated, there is no merging.\n Figure 5, STAM traffic generation when no server-side instrumentation  3.Uninstrumented Node Having Header Forward Capability  Besides the cases we evaluated in (2) Uninstrumented Nodes, there is one complex and special case: the instrumented node has the capability to propagate the header from downstream to upstream, typically in all proxy, such as Envoy[11], Nginx[12], Spring Cloud Gateway[13]. As proxy, it has the capability to forward all headers from downstream to upstream to keep some of information in the header, including the tracing context, authentication, browser information, and routing information, in order to make them accessible by the business services behind the proxy, like Envoy route configuration. When some proxy can’t be instrumented, no matter what the reason, it should not affect the topology detection.\nIn this case, the proxy address would be used at the client side and propagate through RPC context as peer network address, and the proxy forwards this to different upstream services. Then STAM could detect this case and generate the proxy as a conjectural node. In the STAM, more than one alias names for this network address should be generated. After those two are detected and synchronized to the analysis node, the analysis core knows there is at least one uninstrumented service standing between client and servers. So, it will generate the relationships of client service-\u0026gt;peer network address, peer-\u0026gt;server service B and peer network address -\u0026gt;server service C, as shown in Figure 6.\n Figure 6, STAM traffic generation when the proxy uninstrumentation Conclusion This paper described the STAM, which is to the best of our knowledge the best topology detection method for distributed tracing systems. It replaces the time-window based topology analysis method for tracing-based monitoring systems. It removes the resource cost of disk and memory for time-window baseds analysis permanently and totally, and the barriers of horizontal scale. One STAM implementation, Apache SkyWalking, is widely used for monitoring hundreds of applications in production. Some of them generated over 100 TB tracing data per day and topology for over 200 services in real time.\nAcknowledgments We thank all contributors of Apache SkyWalking project for suggestions, code contributions to implement the STAM, and feedback from using the STAM and SkyWalking in their production environment.\nLicense This paper and the STAM are licensed in the Apache 2.0.\nReferences  Dapper, a Large-Scale Distributed Systems Tracing Infrastructure, https://research.google.com/pubs/pub36356.html?spm=5176.100239.blogcont60165.11.OXME9Z Apache SkyWalking, http://skywalking.apache.org/ Apache Open Users, https://skywalking.apache.org/users/ Zipkin, https://zipkin.io/ Kubernetes, Production-Grade Container Orchestration. Automated container deployment, scaling, and management. https://kubernetes.io/ OpenTracing Specification https://github.com/opentracing/specification/blob/master/specification.md Apache Tomcat, http://tomcat.apache.org/ Apache HttpComponents, https://hc.apache.org/ Zipkin doc, ‘Instrumenting a library’ section, ‘Communicating trace information’ paragraph. https://zipkin.io/pages/instrumenting Jaeger Tracing, https://jaegertracing.io/ Envoy Proxy, http://envoyproxy.io/ Nginx, http://nginx.org/ Spring Cloud Gateway, https://spring.io/projects/spring-cloud-gateway  ","title":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System","url":"/docs/main/v9.2.0/en/papers/stam/"},{"content":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System  Sheng Wu 吴 晟 wusheng@apache.org  Editor\u0026rsquo;s note This paper was written by Sheng Wu, project founder, in 2017, to describe the fundamental theory of all current agent core concepts. Readers could learn why SkyWalking agents are significantly different from other tracing system and Dapper[1] Paper\u0026rsquo;s description.\nAbstract Monitoring, visualizing and troubleshooting a large-scale distributed system is a major challenge. One common tool used today is the distributed tracing system (e.g., Google Dapper)[1], and detecting topology and metrics based on the tracing data. One big limitation of today’s topology detection is that the analysis depends on aggregating the client-side and server-side tracing spans in a given time window to generate the dependency of services. This causes more latency and memory use, because the client and server spans of every RPC must be matched in millions of randomly occurring requests in a highly distributed system. More importantly, it could fail to match if the duration of RPC between client and server is longer than the prior setup time window, or across the two windows.\nIn this paper, we present the STAM, Streaming Topology Analysis Method. In STAM, we could use auto instrumentation or a manual instrumentation mechanism to intercept and manipulate RPC at both client-side and server-side. In the case of auto instrumentation, STAM manipulates application codes at runtime, such as Java agent. As such, this monitoring system doesn’t require any source code changes from the application development team or RPC framework development team. The STAM injects an RPC network address used at client side, a service name and a service instance name into the RPC context, and binds the server-side service name and service instance name as the alias name for this network address used at the client side. Freeing the dependency analysis from the mechanisms that cause blocking and delay, the analysis core can process the monitoring data in stream mode and generate the accurate topology.\nThe STAM has been implemented in the Apache SkyWalking[2], an open source APM (application performance monitoring system) project of the Apache Software Foundation, which is widely used in many big enterprises[3] including Alibaba, Huawei, Tencent, Didi, Xiaomi, China Mobile and other enterprises (airlines, financial institutions and others) to support their large-scale distributed systems in the production environment. It reduces the load and memory cost significantly, with better horizontal scale capability.\nIntroduction Monitoring the highly distributed system, especially with a micro-service architecture, is very complex. Many RPCs, including HTTP, gRPC, MQ, Cache, and Database accesses, are behind a single client-side request. Allowing the IT team to understand the dependency relationships among thousands of services is the key feature and first step for observability of a whole distributed system. A distributed tracing system is capable of collecting traces, including all distributed request paths. Dependency relationships have been logically included in the trace data. A distributed tracing system, such as Zipkin [4] or Jaeger Tracing [10], provides built-in dependency analysis features, but many analysis features build on top of that. There are at least two fundamental limitations: timeliness and consistent accuracy.\nStrong timeliness is required to match the mutability of distributed application system dependency relationship, including service level and service instance level dependency.\nA Service is a logic group of instances which have the same functions or codes.\nA Service Instance is usually an OS level process, such as a JVM process. The relationships between services and instances are mutable, depending on the configuration, codes and network status. The dependency could change over time.\n Figure 1, Generated spans in traditional Dapper based tracing system. The span model in the Dapper paper and existing tracing systems,such as Zipkin instrumenting mode[9], just propagates the span id to the server side. Due to this model, dependency analysis requires a certain time window. The tracing spans are collected at both client- and server-sides, because the relationship is recorded. Due to that, the analysis process has to wait for the client and server spans to match in the same time window, in order to output the result, Service A depending on Service B. So, this time window must be over the duration of this RPC request; otherwise, the conclusion will be lost. This condition makes the analysis would not react the dependency mutation in second level, in production, it sometimes has to set the window duration in 3-5 mins. Also, because of the Windows-based design, if one side involves a long duration task, it can’t easily achieve consistent accuracy. Because in order to make the analysis as fast as possible, the analysis period is less than 5 minutes. But some spans can’t match its parent or children if the analysis is incomplete or crosses two time windows. Even if we added a mechanism to process the spans left in the previous stages, still some would have to be abandoned to keep the dataset size and memory usage reasonable.\nIn the STAM, we introduce a new span and context propagation models, with the new analysis method. These new models add the peer network address (IP or hostname) used at client side, client service instance name and client service name, into the context propagation model. Then it passes the RPC call from client to server, just as the original trace id and span id in the existing tracing system, and collects it in the server-side span. The new analysis method can easily generate the client-server relationship directly without waiting on the client span. It also sets the peer network address as one alias of the server service. After the across cluster node data sync, the client-side span analysis could use this alias metadata to generate the client-server relationship directly too. By using these new models and method in Apache SkyWalking, we remove the time windows-based analysis permanently, and fully use the streaming analysis mode with less than 5 seconds latency and consistent accuracy\nNew Span Model and Context Model The traditional span of a tracing system includes the following fields [1][6][10].\n A trace id to represent the whole trace. A span id to represent the current span. An operation name to describe what operation this span did. A start timestamp. A finish timestamp Service and Service Instance names of current span. A set of zero or more key:value Span Tags. A set of zero or more Span Logs, each of which is itself a key:value map paired with a timestamp. References to zero or more causally related Spans. Reference includes the parent span id and trace id.  In the new span model of STAM we add the following fields in the span.\nSpan type. Enumeration, including exit, local and entry. Entry and Exit spans are used in a networking related library. Entry spans represent a server-side networking library, such as Apache Tomcat[7]. Exit spans represent the client-side networking library, such as Apache HttpComponents [8].\nPeer Network Address. Remote \u0026ldquo;address,\u0026rdquo; suitable for use in exit and entry spans. In Exit spans, the peer network address is the address by the client library to access the server.\nThese fields usually are optionally included in many tracing system,. But in STAM, we require them in all RPC cases.\nContext Model is used to propagate the client-side information to server-side carried by the original RPC call, usually in the header, such as HTTP header or MQ header. In the old design, it carries the trace id and span id of client-side span. In the STAM, we enhance this model, adding the parent service name, parent service instance name and peer of exit span. The names could be literal strings. All these extra fields will help to remove the block of streaming analysis. Compared to the existing context model, this uses a little more bandwidth, but it could be optimized. In Apache SkyWalking, we design a register mechanism to exchange unique IDs to represent these names. As a result, only 3 integers are added in the RPC context, so the increase of bandwidth is at least less than 1% in the production environment.\nThe changes of two models could eliminate the time windows in the analysis process. Server-side span analysis enhances the context aware capability.\nNew Topology Analysis Method The new topology analysis method at the core of STAM is processing the span in stream mode. The analysis of the server-side span, also named entry span, includes the parent service name, parent service instance name and peer of exit span. So the analysis process could establish the following results.\n Set the peer of exit span as client using alias name of current service and instance. Peer network address \u0026lt;-\u0026gt; service name and peer network address \u0026lt;-\u0026gt; Service instance name aliases created. These two will sync with all analysis nodes and persistent in the storage, allowing more analysis processers to have this alias information. Generate relationships of parent service name -\u0026gt; current service name and parent service instance name -\u0026gt; current service instance name, unless there is another different Peer network address \u0026lt;-\u0026gt; Service Instance Name mapping found. In that case, only generate relationships of peer network address \u0026lt;-\u0026gt; service name and peer network address \u0026lt;-\u0026gt; Service instance name.  For analysis of the client-side span (exit span), there could three possibilities.\n The peer in the exit span already has the alias names established by server-side span analysis from step (1). Then use alias names to replace the peer, and generate traffic of current service name -\u0026gt; alias service name and current service instance name -\u0026gt; alias service instance name. If the alias could not be found, then just simply generate traffic for current service name -\u0026gt; peer and current service instance name -\u0026gt; peer. If multiple alias names of peer network address \u0026lt;-\u0026gt; Service Instance Name could be found, then keep generating traffic for current service name -\u0026gt; peer network address and current service instance name -\u0026gt; peer network address.   Figure 2, Apache SkyWalking uses STAM to detect and visualize the topology of distributed systems. Evaluation In this section, we evaluate the new models and analysis method in the context of several typical cases in which the old method loses timeliness and consistent accuracy.\n 1.New Service Online or Auto Scale Out  New services could be added into the whole topology by the developer team randomly, or container operation platform automatically by some scale out policy, like Kubernetes [5]. The monitoring system could not be notified in any case manually. By using STAM, we could detect the new node automatically and also keep the analysis process unblocked and consistent with detected nodes. In this case, a new service and network address (could be IP, port or both) are used. The peer network address \u0026lt;-\u0026gt; service mapping does not exist, the traffic of client service -\u0026gt; peer network address will be generated and persistent in the storage first. After mapping is generated, further traffic of client-service to server-service could be identified, generated and aggregated in the analysis platform. For filling the gap of a few traffic before the mapping generated, we require doing peer network address \u0026lt;-\u0026gt; service mapping translation again in query stage, to merge client service-\u0026gt;peer network address and client-service to server-service. In production, the amount of VM for the whole SkyWalking analysis platform deployment is less than 100, syncing among them will finish less than 10 seconds, in most cases it only takes 3-5 seconds. And in the query stage, the data has been aggregated in minutes or seconds at least. The query merge performance is not related to how much traffic happens before the mapping generated, only affected by sync duration, in here, only 3 seconds. Due to that, in minute level aggregation topology, it only adds 1 or 2 relationship records in the whole topology relationship dataset. Considering an over 100 services topology having over 500 relationship records per minute, the payload increase for this query merge is very limited and affordable. This feature is significant in a large and high load distributed system, as we don’t need to concern its scaling capability. And in some fork versions, they choose to update the existing client service-\u0026gt;peer network address to client-service to server-service after detecting the new mapping for peer generated, in order to remove the extra load at query stage permanently.\n Figure 3, Span analysis by using the new topology analysis method  2.Existing Uninstrumented Nodes  Every topology detection method has to work in this case. In many cases, there are nodes in the production environment that can’t be instrumented. Causes for this might include:(1) Restriction of the technology. In some golang or C++ written applications, there is no easy way in Java or .Net to do auto instrumentation by the agent. So, the codes may not be instrumented automatically. (2) The middleware, such as MQ, database server, has not adopted the tracing system. This would make it difficult or time consuming to implement the middleware instrumentation. (3) A 3rd party service or cloud service doesn’t support work with the current tracing system. (4) Lack of resources: e.g., the developer or operation team lacks time to make the instrumentation ready.\nThe STAM works well even if the client or server side has no instrumentation. It still keeps the topology as accurate as possible.\nIf the client side hasn’t instrumented, the server-side span wouldn’t get any reference through RPC context, so, it would simply use peer to generate traffic, as shown in Figure 4.\n Figure 4, STAM traffic generation when no client-side instrumentation As shown in Figure 5, in the other case, with no server-side instrumentation, the client span analysis doesn’t need to process this case. The STAM analysis core just simply keeps generating client service-\u0026gt;peer network address traffic. As there is no mapping for peer network address generated, there is no merging.\n Figure 5, STAM traffic generation when no server-side instrumentation  3.Uninstrumented Node Having Header Forward Capability  Besides the cases we evaluated in (2) Uninstrumented Nodes, there is one complex and special case: the instrumented node has the capability to propagate the header from downstream to upstream, typically in all proxy, such as Envoy[11], Nginx[12], Spring Cloud Gateway[13]. As proxy, it has the capability to forward all headers from downstream to upstream to keep some of information in the header, including the tracing context, authentication, browser information, and routing information, in order to make them accessible by the business services behind the proxy, like Envoy route configuration. When some proxy can’t be instrumented, no matter what the reason, it should not affect the topology detection.\nIn this case, the proxy address would be used at the client side and propagate through RPC context as peer network address, and the proxy forwards this to different upstream services. Then STAM could detect this case and generate the proxy as a conjectural node. In the STAM, more than one alias names for this network address should be generated. After those two are detected and synchronized to the analysis node, the analysis core knows there is at least one uninstrumented service standing between client and servers. So, it will generate the relationships of client service-\u0026gt;peer network address, peer-\u0026gt;server service B and peer network address -\u0026gt;server service C, as shown in Figure 6.\n Figure 6, STAM traffic generation when the proxy uninstrumentation Conclusion This paper described the STAM, which is to the best of our knowledge the best topology detection method for distributed tracing systems. It replaces the time-window based topology analysis method for tracing-based monitoring systems. It removes the resource cost of disk and memory for time-window baseds analysis permanently and totally, and the barriers of horizontal scale. One STAM implementation, Apache SkyWalking, is widely used for monitoring hundreds of applications in production. Some of them generated over 100 TB tracing data per day and topology for over 200 services in real time.\nAcknowledgments We thank all contributors of Apache SkyWalking project for suggestions, code contributions to implement the STAM, and feedback from using the STAM and SkyWalking in their production environment.\nLicense This paper and the STAM are licensed in the Apache 2.0.\nReferences  Dapper, a Large-Scale Distributed Systems Tracing Infrastructure, https://research.google.com/pubs/pub36356.html?spm=5176.100239.blogcont60165.11.OXME9Z Apache SkyWalking, http://skywalking.apache.org/ Apache Open Users, https://skywalking.apache.org/users/ Zipkin, https://zipkin.io/ Kubernetes, Production-Grade Container Orchestration. Automated container deployment, scaling, and management. https://kubernetes.io/ OpenTracing Specification https://github.com/opentracing/specification/blob/master/specification.md Apache Tomcat, http://tomcat.apache.org/ Apache HttpComponents, https://hc.apache.org/ Zipkin doc, ‘Instrumenting a library’ section, ‘Communicating trace information’ paragraph. https://zipkin.io/pages/instrumenting Jaeger Tracing, https://jaegertracing.io/ Envoy Proxy, http://envoyproxy.io/ Nginx, http://nginx.org/ Spring Cloud Gateway, https://spring.io/projects/spring-cloud-gateway  ","title":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System","url":"/docs/main/v9.3.0/en/papers/stam/"},{"content":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System  Sheng Wu 吴 晟 wusheng@apache.org  Editor\u0026rsquo;s note This paper was written by Sheng Wu, project founder, in 2017, to describe the fundamental theory of all current agent core concepts. Readers could learn why SkyWalking agents are significantly different from other tracing system and Dapper[1] Paper\u0026rsquo;s description.\nAbstract Monitoring, visualizing and troubleshooting a large-scale distributed system is a major challenge. One common tool used today is the distributed tracing system (e.g., Google Dapper)[1], and detecting topology and metrics based on the tracing data. One big limitation of today’s topology detection is that the analysis depends on aggregating the client-side and server-side tracing spans in a given time window to generate the dependency of services. This causes more latency and memory use, because the client and server spans of every RPC must be matched in millions of randomly occurring requests in a highly distributed system. More importantly, it could fail to match if the duration of RPC between client and server is longer than the prior setup time window, or across the two windows.\nIn this paper, we present the STAM, Streaming Topology Analysis Method. In STAM, we could use auto instrumentation or a manual instrumentation mechanism to intercept and manipulate RPC at both client-side and server-side. In the case of auto instrumentation, STAM manipulates application codes at runtime, such as Java agent. As such, this monitoring system doesn’t require any source code changes from the application development team or RPC framework development team. The STAM injects an RPC network address used at client side, a service name and a service instance name into the RPC context, and binds the server-side service name and service instance name as the alias name for this network address used at the client side. Freeing the dependency analysis from the mechanisms that cause blocking and delay, the analysis core can process the monitoring data in stream mode and generate the accurate topology.\nThe STAM has been implemented in the Apache SkyWalking[2], an open source APM (application performance monitoring system) project of the Apache Software Foundation, which is widely used in many big enterprises[3] including Alibaba, Huawei, Tencent, Didi, Xiaomi, China Mobile and other enterprises (airlines, financial institutions and others) to support their large-scale distributed systems in the production environment. It reduces the load and memory cost significantly, with better horizontal scale capability.\nIntroduction Monitoring the highly distributed system, especially with a micro-service architecture, is very complex. Many RPCs, including HTTP, gRPC, MQ, Cache, and Database accesses, are behind a single client-side request. Allowing the IT team to understand the dependency relationships among thousands of services is the key feature and first step for observability of a whole distributed system. A distributed tracing system is capable of collecting traces, including all distributed request paths. Dependency relationships have been logically included in the trace data. A distributed tracing system, such as Zipkin [4] or Jaeger Tracing [10], provides built-in dependency analysis features, but many analysis features build on top of that. There are at least two fundamental limitations: timeliness and consistent accuracy.\nStrong timeliness is required to match the mutability of distributed application system dependency relationship, including service level and service instance level dependency.\nA Service is a logic group of instances which have the same functions or codes.\nA Service Instance is usually an OS level process, such as a JVM process. The relationships between services and instances are mutable, depending on the configuration, codes and network status. The dependency could change over time.\n Figure 1, Generated spans in traditional Dapper based tracing system. The span model in the Dapper paper and existing tracing systems,such as Zipkin instrumenting mode[9], just propagates the span id to the server side. Due to this model, dependency analysis requires a certain time window. The tracing spans are collected at both client- and server-sides, because the relationship is recorded. Due to that, the analysis process has to wait for the client and server spans to match in the same time window, in order to output the result, Service A depending on Service B. So, this time window must be over the duration of this RPC request; otherwise, the conclusion will be lost. This condition makes the analysis would not react the dependency mutation in second level, in production, it sometimes has to set the window duration in 3-5 mins. Also, because of the Windows-based design, if one side involves a long duration task, it can’t easily achieve consistent accuracy. Because in order to make the analysis as fast as possible, the analysis period is less than 5 minutes. But some spans can’t match its parent or children if the analysis is incomplete or crosses two time windows. Even if we added a mechanism to process the spans left in the previous stages, still some would have to be abandoned to keep the dataset size and memory usage reasonable.\nIn the STAM, we introduce a new span and context propagation models, with the new analysis method. These new models add the peer network address (IP or hostname) used at client side, client service instance name and client service name, into the context propagation model. Then it passes the RPC call from client to server, just as the original trace id and span id in the existing tracing system, and collects it in the server-side span. The new analysis method can easily generate the client-server relationship directly without waiting on the client span. It also sets the peer network address as one alias of the server service. After the across cluster node data sync, the client-side span analysis could use this alias metadata to generate the client-server relationship directly too. By using these new models and method in Apache SkyWalking, we remove the time windows-based analysis permanently, and fully use the streaming analysis mode with less than 5 seconds latency and consistent accuracy\nNew Span Model and Context Model The traditional span of a tracing system includes the following fields [1][6][10].\n A trace id to represent the whole trace. A span id to represent the current span. An operation name to describe what operation this span did. A start timestamp. A finish timestamp Service and Service Instance names of current span. A set of zero or more key:value Span Tags. A set of zero or more Span Logs, each of which is itself a key:value map paired with a timestamp. References to zero or more causally related Spans. Reference includes the parent span id and trace id.  In the new span model of STAM we add the following fields in the span.\nSpan type. Enumeration, including exit, local and entry. Entry and Exit spans are used in a networking related library. Entry spans represent a server-side networking library, such as Apache Tomcat[7]. Exit spans represent the client-side networking library, such as Apache HttpComponents [8].\nPeer Network Address. Remote \u0026ldquo;address,\u0026rdquo; suitable for use in exit and entry spans. In Exit spans, the peer network address is the address by the client library to access the server.\nThese fields usually are optionally included in many tracing system,. But in STAM, we require them in all RPC cases.\nContext Model is used to propagate the client-side information to server-side carried by the original RPC call, usually in the header, such as HTTP header or MQ header. In the old design, it carries the trace id and span id of client-side span. In the STAM, we enhance this model, adding the parent service name, parent service instance name and peer of exit span. The names could be literal strings. All these extra fields will help to remove the block of streaming analysis. Compared to the existing context model, this uses a little more bandwidth, but it could be optimized. In Apache SkyWalking, we design a register mechanism to exchange unique IDs to represent these names. As a result, only 3 integers are added in the RPC context, so the increase of bandwidth is at least less than 1% in the production environment.\nThe changes of two models could eliminate the time windows in the analysis process. Server-side span analysis enhances the context aware capability.\nNew Topology Analysis Method The new topology analysis method at the core of STAM is processing the span in stream mode. The analysis of the server-side span, also named entry span, includes the parent service name, parent service instance name and peer of exit span. So the analysis process could establish the following results.\n Set the peer of exit span as client using alias name of current service and instance. Peer network address \u0026lt;-\u0026gt; service name and peer network address \u0026lt;-\u0026gt; Service instance name aliases created. These two will sync with all analysis nodes and persistent in the storage, allowing more analysis processers to have this alias information. Generate relationships of parent service name -\u0026gt; current service name and parent service instance name -\u0026gt; current service instance name, unless there is another different Peer network address \u0026lt;-\u0026gt; Service Instance Name mapping found. In that case, only generate relationships of peer network address \u0026lt;-\u0026gt; service name and peer network address \u0026lt;-\u0026gt; Service instance name.  For analysis of the client-side span (exit span), there could three possibilities.\n The peer in the exit span already has the alias names established by server-side span analysis from step (1). Then use alias names to replace the peer, and generate traffic of current service name -\u0026gt; alias service name and current service instance name -\u0026gt; alias service instance name. If the alias could not be found, then just simply generate traffic for current service name -\u0026gt; peer and current service instance name -\u0026gt; peer. If multiple alias names of peer network address \u0026lt;-\u0026gt; Service Instance Name could be found, then keep generating traffic for current service name -\u0026gt; peer network address and current service instance name -\u0026gt; peer network address.   Figure 2, Apache SkyWalking uses STAM to detect and visualize the topology of distributed systems. Evaluation In this section, we evaluate the new models and analysis method in the context of several typical cases in which the old method loses timeliness and consistent accuracy.\n 1.New Service Online or Auto Scale Out  New services could be added into the whole topology by the developer team randomly, or container operation platform automatically by some scale out policy, like Kubernetes [5]. The monitoring system could not be notified in any case manually. By using STAM, we could detect the new node automatically and also keep the analysis process unblocked and consistent with detected nodes. In this case, a new service and network address (could be IP, port or both) are used. The peer network address \u0026lt;-\u0026gt; service mapping does not exist, the traffic of client service -\u0026gt; peer network address will be generated and persistent in the storage first. After mapping is generated, further traffic of client-service to server-service could be identified, generated and aggregated in the analysis platform. For filling the gap of a few traffic before the mapping generated, we require doing peer network address \u0026lt;-\u0026gt; service mapping translation again in query stage, to merge client service-\u0026gt;peer network address and client-service to server-service. In production, the amount of VM for the whole SkyWalking analysis platform deployment is less than 100, syncing among them will finish less than 10 seconds, in most cases it only takes 3-5 seconds. And in the query stage, the data has been aggregated in minutes or seconds at least. The query merge performance is not related to how much traffic happens before the mapping generated, only affected by sync duration, in here, only 3 seconds. Due to that, in minute level aggregation topology, it only adds 1 or 2 relationship records in the whole topology relationship dataset. Considering an over 100 services topology having over 500 relationship records per minute, the payload increase for this query merge is very limited and affordable. This feature is significant in a large and high load distributed system, as we don’t need to concern its scaling capability. And in some fork versions, they choose to update the existing client service-\u0026gt;peer network address to client-service to server-service after detecting the new mapping for peer generated, in order to remove the extra load at query stage permanently.\n Figure 3, Span analysis by using the new topology analysis method  2.Existing Uninstrumented Nodes  Every topology detection method has to work in this case. In many cases, there are nodes in the production environment that can’t be instrumented. Causes for this might include:(1) Restriction of the technology. In some golang or C++ written applications, there is no easy way in Java or .Net to do auto instrumentation by the agent. So, the codes may not be instrumented automatically. (2) The middleware, such as MQ, database server, has not adopted the tracing system. This would make it difficult or time consuming to implement the middleware instrumentation. (3) A 3rd party service or cloud service doesn’t support work with the current tracing system. (4) Lack of resources: e.g., the developer or operation team lacks time to make the instrumentation ready.\nThe STAM works well even if the client or server side has no instrumentation. It still keeps the topology as accurate as possible.\nIf the client side hasn’t instrumented, the server-side span wouldn’t get any reference through RPC context, so, it would simply use peer to generate traffic, as shown in Figure 4.\n Figure 4, STAM traffic generation when no client-side instrumentation As shown in Figure 5, in the other case, with no server-side instrumentation, the client span analysis doesn’t need to process this case. The STAM analysis core just simply keeps generating client service-\u0026gt;peer network address traffic. As there is no mapping for peer network address generated, there is no merging.\n Figure 5, STAM traffic generation when no server-side instrumentation  3.Uninstrumented Node Having Header Forward Capability  Besides the cases we evaluated in (2) Uninstrumented Nodes, there is one complex and special case: the instrumented node has the capability to propagate the header from downstream to upstream, typically in all proxy, such as Envoy[11], Nginx[12], Spring Cloud Gateway[13]. As proxy, it has the capability to forward all headers from downstream to upstream to keep some of information in the header, including the tracing context, authentication, browser information, and routing information, in order to make them accessible by the business services behind the proxy, like Envoy route configuration. When some proxy can’t be instrumented, no matter what the reason, it should not affect the topology detection.\nIn this case, the proxy address would be used at the client side and propagate through RPC context as peer network address, and the proxy forwards this to different upstream services. Then STAM could detect this case and generate the proxy as a conjectural node. In the STAM, more than one alias names for this network address should be generated. After those two are detected and synchronized to the analysis node, the analysis core knows there is at least one uninstrumented service standing between client and servers. So, it will generate the relationships of client service-\u0026gt;peer network address, peer-\u0026gt;server service B and peer network address -\u0026gt;server service C, as shown in Figure 6.\n Figure 6, STAM traffic generation when the proxy uninstrumentation Conclusion This paper described the STAM, which is to the best of our knowledge the best topology detection method for distributed tracing systems. It replaces the time-window based topology analysis method for tracing-based monitoring systems. It removes the resource cost of disk and memory for time-window baseds analysis permanently and totally, and the barriers of horizontal scale. One STAM implementation, Apache SkyWalking, is widely used for monitoring hundreds of applications in production. Some of them generated over 100 TB tracing data per day and topology for over 200 services in real time.\nAcknowledgments We thank all contributors of Apache SkyWalking project for suggestions, code contributions to implement the STAM, and feedback from using the STAM and SkyWalking in their production environment.\nLicense This paper and the STAM are licensed in the Apache 2.0.\nReferences  Dapper, a Large-Scale Distributed Systems Tracing Infrastructure, https://research.google.com/pubs/pub36356.html?spm=5176.100239.blogcont60165.11.OXME9Z Apache SkyWalking, http://skywalking.apache.org/ Apache Open Users, https://skywalking.apache.org/users/ Zipkin, https://zipkin.io/ Kubernetes, Production-Grade Container Orchestration. Automated container deployment, scaling, and management. https://kubernetes.io/ OpenTracing Specification https://github.com/opentracing/specification/blob/master/specification.md Apache Tomcat, http://tomcat.apache.org/ Apache HttpComponents, https://hc.apache.org/ Zipkin doc, ‘Instrumenting a library’ section, ‘Communicating trace information’ paragraph. https://zipkin.io/pages/instrumenting Jaeger Tracing, https://jaegertracing.io/ Envoy Proxy, http://envoyproxy.io/ Nginx, http://nginx.org/ Spring Cloud Gateway, https://spring.io/projects/spring-cloud-gateway  ","title":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System","url":"/docs/main/v9.4.0/en/papers/stam/"},{"content":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System  Sheng Wu 吴 晟 wusheng@apache.org  Editor\u0026rsquo;s note This paper was written by Sheng Wu, project founder, in 2017, to describe the fundamental theory of all current agent core concepts. Readers could learn why SkyWalking agents are significantly different from other tracing system and Dapper[1] Paper\u0026rsquo;s description.\nAbstract Monitoring, visualizing and troubleshooting a large-scale distributed system is a major challenge. One common tool used today is the distributed tracing system (e.g., Google Dapper)[1], and detecting topology and metrics based on the tracing data. One big limitation of today’s topology detection is that the analysis depends on aggregating the client-side and server-side tracing spans in a given time window to generate the dependency of services. This causes more latency and memory use, because the client and server spans of every RPC must be matched in millions of randomly occurring requests in a highly distributed system. More importantly, it could fail to match if the duration of RPC between client and server is longer than the prior setup time window, or across the two windows.\nIn this paper, we present the STAM, Streaming Topology Analysis Method. In STAM, we could use auto instrumentation or a manual instrumentation mechanism to intercept and manipulate RPC at both client-side and server-side. In the case of auto instrumentation, STAM manipulates application codes at runtime, such as Java agent. As such, this monitoring system doesn’t require any source code changes from the application development team or RPC framework development team. The STAM injects an RPC network address used at client side, a service name and a service instance name into the RPC context, and binds the server-side service name and service instance name as the alias name for this network address used at the client side. Freeing the dependency analysis from the mechanisms that cause blocking and delay, the analysis core can process the monitoring data in stream mode and generate the accurate topology.\nThe STAM has been implemented in the Apache SkyWalking[2], an open source APM (application performance monitoring system) project of the Apache Software Foundation, which is widely used in many big enterprises[3] including Alibaba, Huawei, Tencent, Didi, Xiaomi, China Mobile and other enterprises (airlines, financial institutions and others) to support their large-scale distributed systems in the production environment. It reduces the load and memory cost significantly, with better horizontal scale capability.\nIntroduction Monitoring the highly distributed system, especially with a micro-service architecture, is very complex. Many RPCs, including HTTP, gRPC, MQ, Cache, and Database accesses, are behind a single client-side request. Allowing the IT team to understand the dependency relationships among thousands of services is the key feature and first step for observability of a whole distributed system. A distributed tracing system is capable of collecting traces, including all distributed request paths. Dependency relationships have been logically included in the trace data. A distributed tracing system, such as Zipkin [4] or Jaeger Tracing [10], provides built-in dependency analysis features, but many analysis features build on top of that. There are at least two fundamental limitations: timeliness and consistent accuracy.\nStrong timeliness is required to match the mutability of distributed application system dependency relationship, including service level and service instance level dependency.\nA Service is a logic group of instances which have the same functions or codes.\nA Service Instance is usually an OS level process, such as a JVM process. The relationships between services and instances are mutable, depending on the configuration, codes and network status. The dependency could change over time.\n Figure 1, Generated spans in traditional Dapper based tracing system. The span model in the Dapper paper and existing tracing systems,such as Zipkin instrumenting mode[9], just propagates the span id to the server side. Due to this model, dependency analysis requires a certain time window. The tracing spans are collected at both client- and server-sides, because the relationship is recorded. Due to that, the analysis process has to wait for the client and server spans to match in the same time window, in order to output the result, Service A depending on Service B. So, this time window must be over the duration of this RPC request; otherwise, the conclusion will be lost. This condition makes the analysis would not react the dependency mutation in second level, in production, it sometimes has to set the window duration in 3-5 mins. Also, because of the Windows-based design, if one side involves a long duration task, it can’t easily achieve consistent accuracy. Because in order to make the analysis as fast as possible, the analysis period is less than 5 minutes. But some spans can’t match its parent or children if the analysis is incomplete or crosses two time windows. Even if we added a mechanism to process the spans left in the previous stages, still some would have to be abandoned to keep the dataset size and memory usage reasonable.\nIn the STAM, we introduce a new span and context propagation models, with the new analysis method. These new models add the peer network address (IP or hostname) used at client side, client service instance name and client service name, into the context propagation model. Then it passes the RPC call from client to server, just as the original trace id and span id in the existing tracing system, and collects it in the server-side span. The new analysis method can easily generate the client-server relationship directly without waiting on the client span. It also sets the peer network address as one alias of the server service. After the across cluster node data sync, the client-side span analysis could use this alias metadata to generate the client-server relationship directly too. By using these new models and method in Apache SkyWalking, we remove the time windows-based analysis permanently, and fully use the streaming analysis mode with less than 5 seconds latency and consistent accuracy\nNew Span Model and Context Model The traditional span of a tracing system includes the following fields [1][6][10].\n A trace id to represent the whole trace. A span id to represent the current span. An operation name to describe what operation this span did. A start timestamp. A finish timestamp Service and Service Instance names of current span. A set of zero or more key:value Span Tags. A set of zero or more Span Logs, each of which is itself a key:value map paired with a timestamp. References to zero or more causally related Spans. Reference includes the parent span id and trace id.  In the new span model of STAM we add the following fields in the span.\nSpan type. Enumeration, including exit, local and entry. Entry and Exit spans are used in a networking related library. Entry spans represent a server-side networking library, such as Apache Tomcat[7]. Exit spans represent the client-side networking library, such as Apache HttpComponents [8].\nPeer Network Address. Remote \u0026ldquo;address,\u0026rdquo; suitable for use in exit and entry spans. In Exit spans, the peer network address is the address by the client library to access the server.\nThese fields usually are optionally included in many tracing system,. But in STAM, we require them in all RPC cases.\nContext Model is used to propagate the client-side information to server-side carried by the original RPC call, usually in the header, such as HTTP header or MQ header. In the old design, it carries the trace id and span id of client-side span. In the STAM, we enhance this model, adding the parent service name, parent service instance name and peer of exit span. The names could be literal strings. All these extra fields will help to remove the block of streaming analysis. Compared to the existing context model, this uses a little more bandwidth, but it could be optimized. In Apache SkyWalking, we design a register mechanism to exchange unique IDs to represent these names. As a result, only 3 integers are added in the RPC context, so the increase of bandwidth is at least less than 1% in the production environment.\nThe changes of two models could eliminate the time windows in the analysis process. Server-side span analysis enhances the context aware capability.\nNew Topology Analysis Method The new topology analysis method at the core of STAM is processing the span in stream mode. The analysis of the server-side span, also named entry span, includes the parent service name, parent service instance name and peer of exit span. So the analysis process could establish the following results.\n Set the peer of exit span as client using alias name of current service and instance. Peer network address \u0026lt;-\u0026gt; service name and peer network address \u0026lt;-\u0026gt; Service instance name aliases created. These two will sync with all analysis nodes and persistent in the storage, allowing more analysis processers to have this alias information. Generate relationships of parent service name -\u0026gt; current service name and parent service instance name -\u0026gt; current service instance name, unless there is another different Peer network address \u0026lt;-\u0026gt; Service Instance Name mapping found. In that case, only generate relationships of peer network address \u0026lt;-\u0026gt; service name and peer network address \u0026lt;-\u0026gt; Service instance name.  For analysis of the client-side span (exit span), there could three possibilities.\n The peer in the exit span already has the alias names established by server-side span analysis from step (1). Then use alias names to replace the peer, and generate traffic of current service name -\u0026gt; alias service name and current service instance name -\u0026gt; alias service instance name. If the alias could not be found, then just simply generate traffic for current service name -\u0026gt; peer and current service instance name -\u0026gt; peer. If multiple alias names of peer network address \u0026lt;-\u0026gt; Service Instance Name could be found, then keep generating traffic for current service name -\u0026gt; peer network address and current service instance name -\u0026gt; peer network address.   Figure 2, Apache SkyWalking uses STAM to detect and visualize the topology of distributed systems. Evaluation In this section, we evaluate the new models and analysis method in the context of several typical cases in which the old method loses timeliness and consistent accuracy.\n 1.New Service Online or Auto Scale Out  New services could be added into the whole topology by the developer team randomly, or container operation platform automatically by some scale out policy, like Kubernetes [5]. The monitoring system could not be notified in any case manually. By using STAM, we could detect the new node automatically and also keep the analysis process unblocked and consistent with detected nodes. In this case, a new service and network address (could be IP, port or both) are used. The peer network address \u0026lt;-\u0026gt; service mapping does not exist, the traffic of client service -\u0026gt; peer network address will be generated and persistent in the storage first. After mapping is generated, further traffic of client-service to server-service could be identified, generated and aggregated in the analysis platform. For filling the gap of a few traffic before the mapping generated, we require doing peer network address \u0026lt;-\u0026gt; service mapping translation again in query stage, to merge client service-\u0026gt;peer network address and client-service to server-service. In production, the amount of VM for the whole SkyWalking analysis platform deployment is less than 100, syncing among them will finish less than 10 seconds, in most cases it only takes 3-5 seconds. And in the query stage, the data has been aggregated in minutes or seconds at least. The query merge performance is not related to how much traffic happens before the mapping generated, only affected by sync duration, in here, only 3 seconds. Due to that, in minute level aggregation topology, it only adds 1 or 2 relationship records in the whole topology relationship dataset. Considering an over 100 services topology having over 500 relationship records per minute, the payload increase for this query merge is very limited and affordable. This feature is significant in a large and high load distributed system, as we don’t need to concern its scaling capability. And in some fork versions, they choose to update the existing client service-\u0026gt;peer network address to client-service to server-service after detecting the new mapping for peer generated, in order to remove the extra load at query stage permanently.\n Figure 3, Span analysis by using the new topology analysis method  2.Existing Uninstrumented Nodes  Every topology detection method has to work in this case. In many cases, there are nodes in the production environment that can’t be instrumented. Causes for this might include:(1) Restriction of the technology. In some golang or C++ written applications, there is no easy way in Java or .Net to do auto instrumentation by the agent. So, the codes may not be instrumented automatically. (2) The middleware, such as MQ, database server, has not adopted the tracing system. This would make it difficult or time consuming to implement the middleware instrumentation. (3) A 3rd party service or cloud service doesn’t support work with the current tracing system. (4) Lack of resources: e.g., the developer or operation team lacks time to make the instrumentation ready.\nThe STAM works well even if the client or server side has no instrumentation. It still keeps the topology as accurate as possible.\nIf the client side hasn’t instrumented, the server-side span wouldn’t get any reference through RPC context, so, it would simply use peer to generate traffic, as shown in Figure 4.\n Figure 4, STAM traffic generation when no client-side instrumentation As shown in Figure 5, in the other case, with no server-side instrumentation, the client span analysis doesn’t need to process this case. The STAM analysis core just simply keeps generating client service-\u0026gt;peer network address traffic. As there is no mapping for peer network address generated, there is no merging.\n Figure 5, STAM traffic generation when no server-side instrumentation  3.Uninstrumented Node Having Header Forward Capability  Besides the cases we evaluated in (2) Uninstrumented Nodes, there is one complex and special case: the instrumented node has the capability to propagate the header from downstream to upstream, typically in all proxy, such as Envoy[11], Nginx[12], Spring Cloud Gateway[13]. As proxy, it has the capability to forward all headers from downstream to upstream to keep some of information in the header, including the tracing context, authentication, browser information, and routing information, in order to make them accessible by the business services behind the proxy, like Envoy route configuration. When some proxy can’t be instrumented, no matter what the reason, it should not affect the topology detection.\nIn this case, the proxy address would be used at the client side and propagate through RPC context as peer network address, and the proxy forwards this to different upstream services. Then STAM could detect this case and generate the proxy as a conjectural node. In the STAM, more than one alias names for this network address should be generated. After those two are detected and synchronized to the analysis node, the analysis core knows there is at least one uninstrumented service standing between client and servers. So, it will generate the relationships of client service-\u0026gt;peer network address, peer-\u0026gt;server service B and peer network address -\u0026gt;server service C, as shown in Figure 6.\n Figure 6, STAM traffic generation when the proxy uninstrumentation Conclusion This paper described the STAM, which is to the best of our knowledge the best topology detection method for distributed tracing systems. It replaces the time-window based topology analysis method for tracing-based monitoring systems. It removes the resource cost of disk and memory for time-window baseds analysis permanently and totally, and the barriers of horizontal scale. One STAM implementation, Apache SkyWalking, is widely used for monitoring hundreds of applications in production. Some of them generated over 100 TB tracing data per day and topology for over 200 services in real time.\nAcknowledgments We thank all contributors of Apache SkyWalking project for suggestions, code contributions to implement the STAM, and feedback from using the STAM and SkyWalking in their production environment.\nLicense This paper and the STAM are licensed in the Apache 2.0.\nReferences  Dapper, a Large-Scale Distributed Systems Tracing Infrastructure, https://research.google.com/pubs/pub36356.html?spm=5176.100239.blogcont60165.11.OXME9Z Apache SkyWalking, http://skywalking.apache.org/ Apache Open Users, https://skywalking.apache.org/users/ Zipkin, https://zipkin.io/ Kubernetes, Production-Grade Container Orchestration. Automated container deployment, scaling, and management. https://kubernetes.io/ OpenTracing Specification https://github.com/opentracing/specification/blob/master/specification.md Apache Tomcat, http://tomcat.apache.org/ Apache HttpComponents, https://hc.apache.org/ Zipkin doc, ‘Instrumenting a library’ section, ‘Communicating trace information’ paragraph. https://zipkin.io/pages/instrumenting Jaeger Tracing, https://jaegertracing.io/ Envoy Proxy, http://envoyproxy.io/ Nginx, http://nginx.org/ Spring Cloud Gateway, https://spring.io/projects/spring-cloud-gateway  ","title":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System","url":"/docs/main/v9.5.0/en/papers/stam/"},{"content":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System  Sheng Wu 吴 晟 wusheng@apache.org  Editor\u0026rsquo;s note This paper was written by Sheng Wu, project founder, in 2017, to describe the fundamental theory of all current agent core concepts. Readers could learn why SkyWalking agents are significantly different from other tracing system and Dapper[1] Paper\u0026rsquo;s description.\nAbstract Monitoring, visualizing and troubleshooting a large-scale distributed system is a major challenge. One common tool used today is the distributed tracing system (e.g., Google Dapper)[1], and detecting topology and metrics based on the tracing data. One big limitation of today’s topology detection is that the analysis depends on aggregating the client-side and server-side tracing spans in a given time window to generate the dependency of services. This causes more latency and memory use, because the client and server spans of every RPC must be matched in millions of randomly occurring requests in a highly distributed system. More importantly, it could fail to match if the duration of RPC between client and server is longer than the prior setup time window, or across the two windows.\nIn this paper, we present the STAM, Streaming Topology Analysis Method. In STAM, we could use auto instrumentation or a manual instrumentation mechanism to intercept and manipulate RPC at both client-side and server-side. In the case of auto instrumentation, STAM manipulates application codes at runtime, such as Java agent. As such, this monitoring system doesn’t require any source code changes from the application development team or RPC framework development team. The STAM injects an RPC network address used at client side, a service name and a service instance name into the RPC context, and binds the server-side service name and service instance name as the alias name for this network address used at the client side. Freeing the dependency analysis from the mechanisms that cause blocking and delay, the analysis core can process the monitoring data in stream mode and generate the accurate topology.\nThe STAM has been implemented in the Apache SkyWalking[2], an open source APM (application performance monitoring system) project of the Apache Software Foundation, which is widely used in many big enterprises[3] including Alibaba, Huawei, Tencent, Didi, Xiaomi, China Mobile and other enterprises (airlines, financial institutions and others) to support their large-scale distributed systems in the production environment. It reduces the load and memory cost significantly, with better horizontal scale capability.\nIntroduction Monitoring the highly distributed system, especially with a micro-service architecture, is very complex. Many RPCs, including HTTP, gRPC, MQ, Cache, and Database accesses, are behind a single client-side request. Allowing the IT team to understand the dependency relationships among thousands of services is the key feature and first step for observability of a whole distributed system. A distributed tracing system is capable of collecting traces, including all distributed request paths. Dependency relationships have been logically included in the trace data. A distributed tracing system, such as Zipkin [4] or Jaeger Tracing [10], provides built-in dependency analysis features, but many analysis features build on top of that. There are at least two fundamental limitations: timeliness and consistent accuracy.\nStrong timeliness is required to match the mutability of distributed application system dependency relationship, including service level and service instance level dependency.\nA Service is a logic group of instances which have the same functions or codes.\nA Service Instance is usually an OS level process, such as a JVM process. The relationships between services and instances are mutable, depending on the configuration, codes and network status. The dependency could change over time.\n Figure 1, Generated spans in traditional Dapper based tracing system. The span model in the Dapper paper and existing tracing systems,such as Zipkin instrumenting mode[9], just propagates the span id to the server side. Due to this model, dependency analysis requires a certain time window. The tracing spans are collected at both client- and server-sides, because the relationship is recorded. Due to that, the analysis process has to wait for the client and server spans to match in the same time window, in order to output the result, Service A depending on Service B. So, this time window must be over the duration of this RPC request; otherwise, the conclusion will be lost. This condition makes the analysis would not react the dependency mutation in second level, in production, it sometimes has to set the window duration in 3-5 mins. Also, because of the Windows-based design, if one side involves a long duration task, it can’t easily achieve consistent accuracy. Because in order to make the analysis as fast as possible, the analysis period is less than 5 minutes. But some spans can’t match its parent or children if the analysis is incomplete or crosses two time windows. Even if we added a mechanism to process the spans left in the previous stages, still some would have to be abandoned to keep the dataset size and memory usage reasonable.\nIn the STAM, we introduce a new span and context propagation models, with the new analysis method. These new models add the peer network address (IP or hostname) used at client side, client service instance name and client service name, into the context propagation model. Then it passes the RPC call from client to server, just as the original trace id and span id in the existing tracing system, and collects it in the server-side span. The new analysis method can easily generate the client-server relationship directly without waiting on the client span. It also sets the peer network address as one alias of the server service. After the across cluster node data sync, the client-side span analysis could use this alias metadata to generate the client-server relationship directly too. By using these new models and method in Apache SkyWalking, we remove the time windows-based analysis permanently, and fully use the streaming analysis mode with less than 5 seconds latency and consistent accuracy\nNew Span Model and Context Model The traditional span of a tracing system includes the following fields [1][6][10].\n A trace id to represent the whole trace. A span id to represent the current span. An operation name to describe what operation this span did. A start timestamp. A finish timestamp Service and Service Instance names of current span. A set of zero or more key:value Span Tags. A set of zero or more Span Logs, each of which is itself a key:value map paired with a timestamp. References to zero or more causally related Spans. Reference includes the parent span id and trace id.  In the new span model of STAM we add the following fields in the span.\nSpan type. Enumeration, including exit, local and entry. Entry and Exit spans are used in a networking related library. Entry spans represent a server-side networking library, such as Apache Tomcat[7]. Exit spans represent the client-side networking library, such as Apache HttpComponents [8].\nPeer Network Address. Remote \u0026ldquo;address,\u0026rdquo; suitable for use in exit and entry spans. In Exit spans, the peer network address is the address by the client library to access the server.\nThese fields usually are optionally included in many tracing system,. But in STAM, we require them in all RPC cases.\nContext Model is used to propagate the client-side information to server-side carried by the original RPC call, usually in the header, such as HTTP header or MQ header. In the old design, it carries the trace id and span id of client-side span. In the STAM, we enhance this model, adding the parent service name, parent service instance name and peer of exit span. The names could be literal strings. All these extra fields will help to remove the block of streaming analysis. Compared to the existing context model, this uses a little more bandwidth, but it could be optimized. In Apache SkyWalking, we design a register mechanism to exchange unique IDs to represent these names. As a result, only 3 integers are added in the RPC context, so the increase of bandwidth is at least less than 1% in the production environment.\nThe changes of two models could eliminate the time windows in the analysis process. Server-side span analysis enhances the context aware capability.\nNew Topology Analysis Method The new topology analysis method at the core of STAM is processing the span in stream mode. The analysis of the server-side span, also named entry span, includes the parent service name, parent service instance name and peer of exit span. So the analysis process could establish the following results.\n Set the peer of exit span as client using alias name of current service and instance. Peer network address \u0026lt;-\u0026gt; service name and peer network address \u0026lt;-\u0026gt; Service instance name aliases created. These two will sync with all analysis nodes and persistent in the storage, allowing more analysis processers to have this alias information. Generate relationships of parent service name -\u0026gt; current service name and parent service instance name -\u0026gt; current service instance name, unless there is another different Peer network address \u0026lt;-\u0026gt; Service Instance Name mapping found. In that case, only generate relationships of peer network address \u0026lt;-\u0026gt; service name and peer network address \u0026lt;-\u0026gt; Service instance name.  For analysis of the client-side span (exit span), there could three possibilities.\n The peer in the exit span already has the alias names established by server-side span analysis from step (1). Then use alias names to replace the peer, and generate traffic of current service name -\u0026gt; alias service name and current service instance name -\u0026gt; alias service instance name. If the alias could not be found, then just simply generate traffic for current service name -\u0026gt; peer and current service instance name -\u0026gt; peer. If multiple alias names of peer network address \u0026lt;-\u0026gt; Service Instance Name could be found, then keep generating traffic for current service name -\u0026gt; peer network address and current service instance name -\u0026gt; peer network address.   Figure 2, Apache SkyWalking uses STAM to detect and visualize the topology of distributed systems. Evaluation In this section, we evaluate the new models and analysis method in the context of several typical cases in which the old method loses timeliness and consistent accuracy.\n 1.New Service Online or Auto Scale Out  New services could be added into the whole topology by the developer team randomly, or container operation platform automatically by some scale out policy, like Kubernetes [5]. The monitoring system could not be notified in any case manually. By using STAM, we could detect the new node automatically and also keep the analysis process unblocked and consistent with detected nodes. In this case, a new service and network address (could be IP, port or both) are used. The peer network address \u0026lt;-\u0026gt; service mapping does not exist, the traffic of client service -\u0026gt; peer network address will be generated and persistent in the storage first. After mapping is generated, further traffic of client-service to server-service could be identified, generated and aggregated in the analysis platform. For filling the gap of a few traffic before the mapping generated, we require doing peer network address \u0026lt;-\u0026gt; service mapping translation again in query stage, to merge client service-\u0026gt;peer network address and client-service to server-service. In production, the amount of VM for the whole SkyWalking analysis platform deployment is less than 100, syncing among them will finish less than 10 seconds, in most cases it only takes 3-5 seconds. And in the query stage, the data has been aggregated in minutes or seconds at least. The query merge performance is not related to how much traffic happens before the mapping generated, only affected by sync duration, in here, only 3 seconds. Due to that, in minute level aggregation topology, it only adds 1 or 2 relationship records in the whole topology relationship dataset. Considering an over 100 services topology having over 500 relationship records per minute, the payload increase for this query merge is very limited and affordable. This feature is significant in a large and high load distributed system, as we don’t need to concern its scaling capability. And in some fork versions, they choose to update the existing client service-\u0026gt;peer network address to client-service to server-service after detecting the new mapping for peer generated, in order to remove the extra load at query stage permanently.\n Figure 3, Span analysis by using the new topology analysis method  2.Existing Uninstrumented Nodes  Every topology detection method has to work in this case. In many cases, there are nodes in the production environment that can’t be instrumented. Causes for this might include:(1) Restriction of the technology. In some golang or C++ written applications, there is no easy way in Java or .Net to do auto instrumentation by the agent. So, the codes may not be instrumented automatically. (2) The middleware, such as MQ, database server, has not adopted the tracing system. This would make it difficult or time consuming to implement the middleware instrumentation. (3) A 3rd party service or cloud service doesn’t support work with the current tracing system. (4) Lack of resources: e.g., the developer or operation team lacks time to make the instrumentation ready.\nThe STAM works well even if the client or server side has no instrumentation. It still keeps the topology as accurate as possible.\nIf the client side hasn’t instrumented, the server-side span wouldn’t get any reference through RPC context, so, it would simply use peer to generate traffic, as shown in Figure 4.\n Figure 4, STAM traffic generation when no client-side instrumentation As shown in Figure 5, in the other case, with no server-side instrumentation, the client span analysis doesn’t need to process this case. The STAM analysis core just simply keeps generating client service-\u0026gt;peer network address traffic. As there is no mapping for peer network address generated, there is no merging.\n Figure 5, STAM traffic generation when no server-side instrumentation  3.Uninstrumented Node Having Header Forward Capability  Besides the cases we evaluated in (2) Uninstrumented Nodes, there is one complex and special case: the instrumented node has the capability to propagate the header from downstream to upstream, typically in all proxy, such as Envoy[11], Nginx[12], Spring Cloud Gateway[13]. As proxy, it has the capability to forward all headers from downstream to upstream to keep some of information in the header, including the tracing context, authentication, browser information, and routing information, in order to make them accessible by the business services behind the proxy, like Envoy route configuration. When some proxy can’t be instrumented, no matter what the reason, it should not affect the topology detection.\nIn this case, the proxy address would be used at the client side and propagate through RPC context as peer network address, and the proxy forwards this to different upstream services. Then STAM could detect this case and generate the proxy as a conjectural node. In the STAM, more than one alias names for this network address should be generated. After those two are detected and synchronized to the analysis node, the analysis core knows there is at least one uninstrumented service standing between client and servers. So, it will generate the relationships of client service-\u0026gt;peer network address, peer-\u0026gt;server service B and peer network address -\u0026gt;server service C, as shown in Figure 6.\n Figure 6, STAM traffic generation when the proxy uninstrumentation Conclusion This paper described the STAM, which is to the best of our knowledge the best topology detection method for distributed tracing systems. It replaces the time-window based topology analysis method for tracing-based monitoring systems. It removes the resource cost of disk and memory for time-window baseds analysis permanently and totally, and the barriers of horizontal scale. One STAM implementation, Apache SkyWalking, is widely used for monitoring hundreds of applications in production. Some of them generated over 100 TB tracing data per day and topology for over 200 services in real time.\nAcknowledgments We thank all contributors of Apache SkyWalking project for suggestions, code contributions to implement the STAM, and feedback from using the STAM and SkyWalking in their production environment.\nLicense This paper and the STAM are licensed in the Apache 2.0.\nReferences  Dapper, a Large-Scale Distributed Systems Tracing Infrastructure, https://research.google.com/pubs/pub36356.html?spm=5176.100239.blogcont60165.11.OXME9Z Apache SkyWalking, http://skywalking.apache.org/ Apache Open Users, https://skywalking.apache.org/users/ Zipkin, https://zipkin.io/ Kubernetes, Production-Grade Container Orchestration. Automated container deployment, scaling, and management. https://kubernetes.io/ OpenTracing Specification https://github.com/opentracing/specification/blob/master/specification.md Apache Tomcat, http://tomcat.apache.org/ Apache HttpComponents, https://hc.apache.org/ Zipkin doc, ‘Instrumenting a library’ section, ‘Communicating trace information’ paragraph. https://zipkin.io/pages/instrumenting Jaeger Tracing, https://jaegertracing.io/ Envoy Proxy, http://envoyproxy.io/ Nginx, http://nginx.org/ Spring Cloud Gateway, https://spring.io/projects/spring-cloud-gateway  ","title":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System","url":"/docs/main/v9.6.0/en/papers/stam/"},{"content":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System  Sheng Wu 吴 晟 wusheng@apache.org  Editor\u0026rsquo;s note This paper was written by Sheng Wu, project founder, in 2017, to describe the fundamental theory of all current agent core concepts. Readers could learn why SkyWalking agents are significantly different from other tracing system and Dapper[1] Paper\u0026rsquo;s description.\nAbstract Monitoring, visualizing and troubleshooting a large-scale distributed system is a major challenge. One common tool used today is the distributed tracing system (e.g., Google Dapper)[1], and detecting topology and metrics based on the tracing data. One big limitation of today’s topology detection is that the analysis depends on aggregating the client-side and server-side tracing spans in a given time window to generate the dependency of services. This causes more latency and memory use, because the client and server spans of every RPC must be matched in millions of randomly occurring requests in a highly distributed system. More importantly, it could fail to match if the duration of RPC between client and server is longer than the prior setup time window, or across the two windows.\nIn this paper, we present the STAM, Streaming Topology Analysis Method. In STAM, we could use auto instrumentation or a manual instrumentation mechanism to intercept and manipulate RPC at both client-side and server-side. In the case of auto instrumentation, STAM manipulates application codes at runtime, such as Java agent. As such, this monitoring system doesn’t require any source code changes from the application development team or RPC framework development team. The STAM injects an RPC network address used at client side, a service name and a service instance name into the RPC context, and binds the server-side service name and service instance name as the alias name for this network address used at the client side. Freeing the dependency analysis from the mechanisms that cause blocking and delay, the analysis core can process the monitoring data in stream mode and generate the accurate topology.\nThe STAM has been implemented in the Apache SkyWalking[2], an open source APM (application performance monitoring system) project of the Apache Software Foundation, which is widely used in many big enterprises[3] including Alibaba, Huawei, Tencent, Didi, Xiaomi, China Mobile and other enterprises (airlines, financial institutions and others) to support their large-scale distributed systems in the production environment. It reduces the load and memory cost significantly, with better horizontal scale capability.\nIntroduction Monitoring the highly distributed system, especially with a micro-service architecture, is very complex. Many RPCs, including HTTP, gRPC, MQ, Cache, and Database accesses, are behind a single client-side request. Allowing the IT team to understand the dependency relationships among thousands of services is the key feature and first step for observability of a whole distributed system. A distributed tracing system is capable of collecting traces, including all distributed request paths. Dependency relationships have been logically included in the trace data. A distributed tracing system, such as Zipkin [4] or Jaeger Tracing [10], provides built-in dependency analysis features, but many analysis features build on top of that. There are at least two fundamental limitations: timeliness and consistent accuracy.\nStrong timeliness is required to match the mutability of distributed application system dependency relationship, including service level and service instance level dependency.\nA Service is a logic group of instances which have the same functions or codes.\nA Service Instance is usually an OS level process, such as a JVM process. The relationships between services and instances are mutable, depending on the configuration, codes and network status. The dependency could change over time.\n Figure 1, Generated spans in traditional Dapper based tracing system. The span model in the Dapper paper and existing tracing systems,such as Zipkin instrumenting mode[9], just propagates the span id to the server side. Due to this model, dependency analysis requires a certain time window. The tracing spans are collected at both client- and server-sides, because the relationship is recorded. Due to that, the analysis process has to wait for the client and server spans to match in the same time window, in order to output the result, Service A depending on Service B. So, this time window must be over the duration of this RPC request; otherwise, the conclusion will be lost. This condition makes the analysis would not react the dependency mutation in second level, in production, it sometimes has to set the window duration in 3-5 mins. Also, because of the Windows-based design, if one side involves a long duration task, it can’t easily achieve consistent accuracy. Because in order to make the analysis as fast as possible, the analysis period is less than 5 minutes. But some spans can’t match its parent or children if the analysis is incomplete or crosses two time windows. Even if we added a mechanism to process the spans left in the previous stages, still some would have to be abandoned to keep the dataset size and memory usage reasonable.\nIn the STAM, we introduce a new span and context propagation models, with the new analysis method. These new models add the peer network address (IP or hostname) used at client side, client service instance name and client service name, into the context propagation model. Then it passes the RPC call from client to server, just as the original trace id and span id in the existing tracing system, and collects it in the server-side span. The new analysis method can easily generate the client-server relationship directly without waiting on the client span. It also sets the peer network address as one alias of the server service. After the across cluster node data sync, the client-side span analysis could use this alias metadata to generate the client-server relationship directly too. By using these new models and method in Apache SkyWalking, we remove the time windows-based analysis permanently, and fully use the streaming analysis mode with less than 5 seconds latency and consistent accuracy\nNew Span Model and Context Model The traditional span of a tracing system includes the following fields [1][6][10].\n A trace id to represent the whole trace. A span id to represent the current span. An operation name to describe what operation this span did. A start timestamp. A finish timestamp Service and Service Instance names of current span. A set of zero or more key:value Span Tags. A set of zero or more Span Logs, each of which is itself a key:value map paired with a timestamp. References to zero or more causally related Spans. Reference includes the parent span id and trace id.  In the new span model of STAM we add the following fields in the span.\nSpan type. Enumeration, including exit, local and entry. Entry and Exit spans are used in a networking related library. Entry spans represent a server-side networking library, such as Apache Tomcat[7]. Exit spans represent the client-side networking library, such as Apache HttpComponents [8].\nPeer Network Address. Remote \u0026ldquo;address,\u0026rdquo; suitable for use in exit and entry spans. In Exit spans, the peer network address is the address by the client library to access the server.\nThese fields usually are optionally included in many tracing system,. But in STAM, we require them in all RPC cases.\nContext Model is used to propagate the client-side information to server-side carried by the original RPC call, usually in the header, such as HTTP header or MQ header. In the old design, it carries the trace id and span id of client-side span. In the STAM, we enhance this model, adding the parent service name, parent service instance name and peer of exit span. The names could be literal strings. All these extra fields will help to remove the block of streaming analysis. Compared to the existing context model, this uses a little more bandwidth, but it could be optimized. In Apache SkyWalking, we design a register mechanism to exchange unique IDs to represent these names. As a result, only 3 integers are added in the RPC context, so the increase of bandwidth is at least less than 1% in the production environment.\nThe changes of two models could eliminate the time windows in the analysis process. Server-side span analysis enhances the context aware capability.\nNew Topology Analysis Method The new topology analysis method at the core of STAM is processing the span in stream mode. The analysis of the server-side span, also named entry span, includes the parent service name, parent service instance name and peer of exit span. So the analysis process could establish the following results.\n Set the peer of exit span as client using alias name of current service and instance. Peer network address \u0026lt;-\u0026gt; service name and peer network address \u0026lt;-\u0026gt; Service instance name aliases created. These two will sync with all analysis nodes and persistent in the storage, allowing more analysis processers to have this alias information. Generate relationships of parent service name -\u0026gt; current service name and parent service instance name -\u0026gt; current service instance name, unless there is another different Peer network address \u0026lt;-\u0026gt; Service Instance Name mapping found. In that case, only generate relationships of peer network address \u0026lt;-\u0026gt; service name and peer network address \u0026lt;-\u0026gt; Service instance name.  For analysis of the client-side span (exit span), there could three possibilities.\n The peer in the exit span already has the alias names established by server-side span analysis from step (1). Then use alias names to replace the peer, and generate traffic of current service name -\u0026gt; alias service name and current service instance name -\u0026gt; alias service instance name. If the alias could not be found, then just simply generate traffic for current service name -\u0026gt; peer and current service instance name -\u0026gt; peer. If multiple alias names of peer network address \u0026lt;-\u0026gt; Service Instance Name could be found, then keep generating traffic for current service name -\u0026gt; peer network address and current service instance name -\u0026gt; peer network address.   Figure 2, Apache SkyWalking uses STAM to detect and visualize the topology of distributed systems. Evaluation In this section, we evaluate the new models and analysis method in the context of several typical cases in which the old method loses timeliness and consistent accuracy.\n 1.New Service Online or Auto Scale Out  New services could be added into the whole topology by the developer team randomly, or container operation platform automatically by some scale out policy, like Kubernetes [5]. The monitoring system could not be notified in any case manually. By using STAM, we could detect the new node automatically and also keep the analysis process unblocked and consistent with detected nodes. In this case, a new service and network address (could be IP, port or both) are used. The peer network address \u0026lt;-\u0026gt; service mapping does not exist, the traffic of client service -\u0026gt; peer network address will be generated and persistent in the storage first. After mapping is generated, further traffic of client-service to server-service could be identified, generated and aggregated in the analysis platform. For filling the gap of a few traffic before the mapping generated, we require doing peer network address \u0026lt;-\u0026gt; service mapping translation again in query stage, to merge client service-\u0026gt;peer network address and client-service to server-service. In production, the amount of VM for the whole SkyWalking analysis platform deployment is less than 100, syncing among them will finish less than 10 seconds, in most cases it only takes 3-5 seconds. And in the query stage, the data has been aggregated in minutes or seconds at least. The query merge performance is not related to how much traffic happens before the mapping generated, only affected by sync duration, in here, only 3 seconds. Due to that, in minute level aggregation topology, it only adds 1 or 2 relationship records in the whole topology relationship dataset. Considering an over 100 services topology having over 500 relationship records per minute, the payload increase for this query merge is very limited and affordable. This feature is significant in a large and high load distributed system, as we don’t need to concern its scaling capability. And in some fork versions, they choose to update the existing client service-\u0026gt;peer network address to client-service to server-service after detecting the new mapping for peer generated, in order to remove the extra load at query stage permanently.\n Figure 3, Span analysis by using the new topology analysis method  2.Existing Uninstrumented Nodes  Every topology detection method has to work in this case. In many cases, there are nodes in the production environment that can’t be instrumented. Causes for this might include:(1) Restriction of the technology. In some golang or C++ written applications, there is no easy way in Java or .Net to do auto instrumentation by the agent. So, the codes may not be instrumented automatically. (2) The middleware, such as MQ, database server, has not adopted the tracing system. This would make it difficult or time consuming to implement the middleware instrumentation. (3) A 3rd party service or cloud service doesn’t support work with the current tracing system. (4) Lack of resources: e.g., the developer or operation team lacks time to make the instrumentation ready.\nThe STAM works well even if the client or server side has no instrumentation. It still keeps the topology as accurate as possible.\nIf the client side hasn’t instrumented, the server-side span wouldn’t get any reference through RPC context, so, it would simply use peer to generate traffic, as shown in Figure 4.\n Figure 4, STAM traffic generation when no client-side instrumentation As shown in Figure 5, in the other case, with no server-side instrumentation, the client span analysis doesn’t need to process this case. The STAM analysis core just simply keeps generating client service-\u0026gt;peer network address traffic. As there is no mapping for peer network address generated, there is no merging.\n Figure 5, STAM traffic generation when no server-side instrumentation  3.Uninstrumented Node Having Header Forward Capability  Besides the cases we evaluated in (2) Uninstrumented Nodes, there is one complex and special case: the instrumented node has the capability to propagate the header from downstream to upstream, typically in all proxy, such as Envoy[11], Nginx[12], Spring Cloud Gateway[13]. As proxy, it has the capability to forward all headers from downstream to upstream to keep some of information in the header, including the tracing context, authentication, browser information, and routing information, in order to make them accessible by the business services behind the proxy, like Envoy route configuration. When some proxy can’t be instrumented, no matter what the reason, it should not affect the topology detection.\nIn this case, the proxy address would be used at the client side and propagate through RPC context as peer network address, and the proxy forwards this to different upstream services. Then STAM could detect this case and generate the proxy as a conjectural node. In the STAM, more than one alias names for this network address should be generated. After those two are detected and synchronized to the analysis node, the analysis core knows there is at least one uninstrumented service standing between client and servers. So, it will generate the relationships of client service-\u0026gt;peer network address, peer-\u0026gt;server service B and peer network address -\u0026gt;server service C, as shown in Figure 6.\n Figure 6, STAM traffic generation when the proxy uninstrumentation Conclusion This paper described the STAM, which is to the best of our knowledge the best topology detection method for distributed tracing systems. It replaces the time-window based topology analysis method for tracing-based monitoring systems. It removes the resource cost of disk and memory for time-window baseds analysis permanently and totally, and the barriers of horizontal scale. One STAM implementation, Apache SkyWalking, is widely used for monitoring hundreds of applications in production. Some of them generated over 100 TB tracing data per day and topology for over 200 services in real time.\nAcknowledgments We thank all contributors of Apache SkyWalking project for suggestions, code contributions to implement the STAM, and feedback from using the STAM and SkyWalking in their production environment.\nLicense This paper and the STAM are licensed in the Apache 2.0.\nReferences  Dapper, a Large-Scale Distributed Systems Tracing Infrastructure, https://research.google.com/pubs/pub36356.html?spm=5176.100239.blogcont60165.11.OXME9Z Apache SkyWalking, http://skywalking.apache.org/ Apache Open Users, https://skywalking.apache.org/users/ Zipkin, https://zipkin.io/ Kubernetes, Production-Grade Container Orchestration. Automated container deployment, scaling, and management. https://kubernetes.io/ OpenTracing Specification https://github.com/opentracing/specification/blob/master/specification.md Apache Tomcat, http://tomcat.apache.org/ Apache HttpComponents, https://hc.apache.org/ Zipkin doc, ‘Instrumenting a library’ section, ‘Communicating trace information’ paragraph. https://zipkin.io/pages/instrumenting Jaeger Tracing, https://jaegertracing.io/ Envoy Proxy, http://envoyproxy.io/ Nginx, http://nginx.org/ Spring Cloud Gateway, https://spring.io/projects/spring-cloud-gateway  ","title":"STAM: Enhancing Topology Auto Detection For A Highly Distributed and Large-Scale Application System","url":"/docs/main/v9.7.0/en/papers/stam/"},{"content":"Standalone Mode The standalone mode is the simplest way to run Banyand. It is suitable for the development and testing environment. The standalone mode is running as a standalone process by\n$ ./banyand-server standalone ██████╗ █████╗ ███╗ ██╗██╗ ██╗ █████╗ ███╗ ██╗██████╗ ██████╗ ██╔══██╗██╔══██╗████╗ ██║╚██╗ ██╔╝██╔══██╗████╗ ██║██╔══██╗██╔══██╗ ██████╔╝███████║██╔██╗ ██║ ╚████╔╝ ███████║██╔██╗ ██║██║ ██║██████╔╝ ██╔══██╗██╔══██║██║╚██╗██║ ╚██╔╝ ██╔══██║██║╚██╗██║██║ ██║██╔══██╗ ██████╔╝██║ ██║██║ ╚████║ ██║ ██║ ██║██║ ╚████║██████╔╝██████╔╝ ╚═════╝ ╚═╝ ╚═╝╚═╝ ╚═══╝ ╚═╝ ╚═╝ ╚═╝╚═╝ ╚═══╝╚═════╝ ╚═════╝ ***starting as a standalone server**** ... ... ***Listening to**** addr::17912 module:LIAISON-GRPC The banyand-server would be listening on the 0.0.0.0:17912 to access gRPC requests. if no errors occurred.\nAt the same time, the banyand-server would be listening on the 0.0.0.0:17913 to access HTTP requests. if no errors occurred. The HTTP server is used for CLI and Web UI.\n","title":"Standalone Mode","url":"/docs/skywalking-banyandb/latest/installation/standalone/"},{"content":"Standalone Mode The standalone mode is the simplest way to run Banyand. It is suitable for the development and testing environment. Once you unpack and extract the skywalking-banyandb-x.x.x-bin.tgz, you could startup BanyanDB server, the standalone mode is running as a standalone process.\n$ cd skywalking-banyandb-x.x.x-bin/bin $ ./banyand-server-static standalone ██████╗ █████╗ ███╗ ██╗██╗ ██╗ █████╗ ███╗ ██╗██████╗ ██████╗ ██╔══██╗██╔══██╗████╗ ██║╚██╗ ██╔╝██╔══██╗████╗ ██║██╔══██╗██╔══██╗ ██████╔╝███████║██╔██╗ ██║ ╚████╔╝ ███████║██╔██╗ ██║██║ ██║██████╔╝ ██╔══██╗██╔══██║██║╚██╗██║ ╚██╔╝ ██╔══██║██║╚██╗██║██║ ██║██╔══██╗ ██████╔╝██║ ██║██║ ╚████║ ██║ ██║ ██║██║ ╚████║██████╔╝██████╔╝ ╚═════╝ ╚═╝ ╚═╝╚═╝ ╚═══╝ ╚═╝ ╚═╝ ╚═╝╚═╝ ╚═══╝╚═════╝ ╚═════╝ ***starting as a standalone server**** ... ... ***Listening to**** addr::17912 module:LIAISON-GRPC The banyand server would be listening on the 0.0.0.0:17912 to access gRPC requests. if no errors occurred.\nAt the same time, the banyand server would be listening on the 0.0.0.0:17913 to access HTTP requests. if no errors occurred. The HTTP server is used for CLI and Web UI.\n","title":"Standalone Mode","url":"/docs/skywalking-banyandb/next/installation/standalone/"},{"content":"Standalone Mode The standalone mode is the simplest way to run Banyand. It is suitable for the development and testing environment. The standalone mode is running as a standalone process by\n$ ./banyand-server standalone ██████╗ █████╗ ███╗ ██╗██╗ ██╗ █████╗ ███╗ ██╗██████╗ ██████╗ ██╔══██╗██╔══██╗████╗ ██║╚██╗ ██╔╝██╔══██╗████╗ ██║██╔══██╗██╔══██╗ ██████╔╝███████║██╔██╗ ██║ ╚████╔╝ ███████║██╔██╗ ██║██║ ██║██████╔╝ ██╔══██╗██╔══██║██║╚██╗██║ ╚██╔╝ ██╔══██║██║╚██╗██║██║ ██║██╔══██╗ ██████╔╝██║ ██║██║ ╚████║ ██║ ██║ ██║██║ ╚████║██████╔╝██████╔╝ ╚═════╝ ╚═╝ ╚═╝╚═╝ ╚═══╝ ╚═╝ ╚═╝ ╚═╝╚═╝ ╚═══╝╚═════╝ ╚═════╝ ***starting as a standalone server**** ... ... ***Listening to**** addr::17912 module:LIAISON-GRPC The banyand-server would be listening on the 0.0.0.0:17912 to access gRPC requests. if no errors occurred.\nAt the same time, the banyand-server would be listening on the 0.0.0.0:17913 to access HTTP requests. if no errors occurred. The HTTP server is used for CLI and Web UI.\n","title":"Standalone Mode","url":"/docs/skywalking-banyandb/v0.5.0/installation/standalone/"},{"content":"Start up mode You may need different startup modes in different deployment tools, such as k8s. We provide two additional optional startup modes.\nDefault mode The default mode carries out tasks to initialize as necessary, starts to listen, and provides services.\nRun /bin/oapService.sh(.bat) to start in this mode. This is also applicable when you\u0026rsquo;re using startup.sh(.bat) to start.\nInit mode In this mode, the OAP server starts up to carry out initialization and then exits. You could use this mode to initialize your storage (such as ElasticSearch indexes, MySQL, and TiDB tables) as well as your data.\nRun /bin/oapServiceInit.sh(.bat) to start in this mode.\nNo-init mode In this mode, the OAP server starts up without carrying out initialization. Rather, it watches out for the ElasticSearch indexes, MySQL, TiDB and other storage tables, starts listening and provides services. In other words, the OAP server would anticipate having another OAP server carrying out the initialization.\nRun /bin/oapServiceNoInit.sh(.bat) to start in this mode.\n","title":"Start up mode","url":"/docs/main/latest/en/setup/backend/backend-start-up-mode/"},{"content":"Start up mode You may need different startup modes in different deployment tools, such as k8s. We provide two additional optional startup modes.\nDefault mode The default mode carries out tasks to initialize as necessary, starts to listen, and provides services.\nRun /bin/oapService.sh(.bat) to start in this mode. This is also applicable when you\u0026rsquo;re using startup.sh(.bat) to start.\nInit mode In this mode, the OAP server starts up to carry out initialization and then exits. You could use this mode to initialize your storage (such as ElasticSearch indexes, MySQL, and TiDB tables) as well as your data.\nRun /bin/oapServiceInit.sh(.bat) to start in this mode.\nNo-init mode In this mode, the OAP server starts up without carrying out initialization. Rather, it watches out for the ElasticSearch indexes, MySQL, TiDB and other storage tables, starts listening and provides services. In other words, the OAP server would anticipate having another OAP server carrying out the initialization.\nRun /bin/oapServiceNoInit.sh(.bat) to start in this mode.\n","title":"Start up mode","url":"/docs/main/next/en/setup/backend/backend-start-up-mode/"},{"content":"Start up mode In different deployment tools, such as k8s, you may need different startup modes. We provide two other optional startup modes.\nDefault mode The default mode carries out tasks to initialize as necessary, starts to listen, and provide services.\nRun /bin/oapService.sh(.bat) to start in this mode. This is also applicable when you\u0026rsquo;re using startup.sh(.bat) to start.\nInit mode In this mode, the OAP server starts up to carry out initialization, and then exits. You could use this mode to initialize your storage (such as ElasticSearch indexes, MySQL, and TiDB tables), as well as your data.\nRun /bin/oapServiceInit.sh(.bat) to start in this mode.\nNo-init mode In this mode, the OAP server starts up without carrying out initialization. Rather, it watches out for the ElasticSearch indexes, MySQL, and TiDB tables, starts to listen, and provide services. In other words, the OAP server would anticipate having another OAP server to carry out the initialization.\nRun /bin/oapServiceNoInit.sh(.bat) to start in this mode.\n","title":"Start up mode","url":"/docs/main/v9.0.0/en/setup/backend/backend-start-up-mode/"},{"content":"Start up mode You may need different startup modes in different deployment tools, such as k8s. We provide two additional optional startup modes.\nDefault mode The default mode carries out tasks to initialize as necessary, starts to listen, and provides services.\nRun /bin/oapService.sh(.bat) to start in this mode. This is also applicable when you\u0026rsquo;re using startup.sh(.bat) to start.\nInit mode In this mode, the OAP server starts up to carry out initialization and then exits. You could use this mode to initialize your storage (such as ElasticSearch indexes, MySQL, and TiDB tables) as well as your data.\nRun /bin/oapServiceInit.sh(.bat) to start in this mode.\nNo-init mode In this mode, the OAP server starts up without carrying out initialization. Rather, it watches out for the ElasticSearch indexes, MySQL, TiDB and other storage tables, starts listening and provides services. In other words, the OAP server would anticipate having another OAP server carrying out the initialization.\nRun /bin/oapServiceNoInit.sh(.bat) to start in this mode.\n","title":"Start up mode","url":"/docs/main/v9.1.0/en/setup/backend/backend-start-up-mode/"},{"content":"Start up mode You may need different startup modes in different deployment tools, such as k8s. We provide two additional optional startup modes.\nDefault mode The default mode carries out tasks to initialize as necessary, starts to listen, and provides services.\nRun /bin/oapService.sh(.bat) to start in this mode. This is also applicable when you\u0026rsquo;re using startup.sh(.bat) to start.\nInit mode In this mode, the OAP server starts up to carry out initialization and then exits. You could use this mode to initialize your storage (such as ElasticSearch indexes, MySQL, and TiDB tables) as well as your data.\nRun /bin/oapServiceInit.sh(.bat) to start in this mode.\nNo-init mode In this mode, the OAP server starts up without carrying out initialization. Rather, it watches out for the ElasticSearch indexes, MySQL, TiDB and other storage tables, starts listening and provides services. In other words, the OAP server would anticipate having another OAP server carrying out the initialization.\nRun /bin/oapServiceNoInit.sh(.bat) to start in this mode.\n","title":"Start up mode","url":"/docs/main/v9.2.0/en/setup/backend/backend-start-up-mode/"},{"content":"Start up mode You may need different startup modes in different deployment tools, such as k8s. We provide two additional optional startup modes.\nDefault mode The default mode carries out tasks to initialize as necessary, starts to listen, and provides services.\nRun /bin/oapService.sh(.bat) to start in this mode. This is also applicable when you\u0026rsquo;re using startup.sh(.bat) to start.\nInit mode In this mode, the OAP server starts up to carry out initialization and then exits. You could use this mode to initialize your storage (such as ElasticSearch indexes, MySQL, and TiDB tables) as well as your data.\nRun /bin/oapServiceInit.sh(.bat) to start in this mode.\nNo-init mode In this mode, the OAP server starts up without carrying out initialization. Rather, it watches out for the ElasticSearch indexes, MySQL, TiDB and other storage tables, starts listening and provides services. In other words, the OAP server would anticipate having another OAP server carrying out the initialization.\nRun /bin/oapServiceNoInit.sh(.bat) to start in this mode.\n","title":"Start up mode","url":"/docs/main/v9.3.0/en/setup/backend/backend-start-up-mode/"},{"content":"Start up mode You may need different startup modes in different deployment tools, such as k8s. We provide two additional optional startup modes.\nDefault mode The default mode carries out tasks to initialize as necessary, starts to listen, and provides services.\nRun /bin/oapService.sh(.bat) to start in this mode. This is also applicable when you\u0026rsquo;re using startup.sh(.bat) to start.\nInit mode In this mode, the OAP server starts up to carry out initialization and then exits. You could use this mode to initialize your storage (such as ElasticSearch indexes, MySQL, and TiDB tables) as well as your data.\nRun /bin/oapServiceInit.sh(.bat) to start in this mode.\nNo-init mode In this mode, the OAP server starts up without carrying out initialization. Rather, it watches out for the ElasticSearch indexes, MySQL, TiDB and other storage tables, starts listening and provides services. In other words, the OAP server would anticipate having another OAP server carrying out the initialization.\nRun /bin/oapServiceNoInit.sh(.bat) to start in this mode.\n","title":"Start up mode","url":"/docs/main/v9.4.0/en/setup/backend/backend-start-up-mode/"},{"content":"Start up mode You may need different startup modes in different deployment tools, such as k8s. We provide two additional optional startup modes.\nDefault mode The default mode carries out tasks to initialize as necessary, starts to listen, and provides services.\nRun /bin/oapService.sh(.bat) to start in this mode. This is also applicable when you\u0026rsquo;re using startup.sh(.bat) to start.\nInit mode In this mode, the OAP server starts up to carry out initialization and then exits. You could use this mode to initialize your storage (such as ElasticSearch indexes, MySQL, and TiDB tables) as well as your data.\nRun /bin/oapServiceInit.sh(.bat) to start in this mode.\nNo-init mode In this mode, the OAP server starts up without carrying out initialization. Rather, it watches out for the ElasticSearch indexes, MySQL, TiDB and other storage tables, starts listening and provides services. In other words, the OAP server would anticipate having another OAP server carrying out the initialization.\nRun /bin/oapServiceNoInit.sh(.bat) to start in this mode.\n","title":"Start up mode","url":"/docs/main/v9.5.0/en/setup/backend/backend-start-up-mode/"},{"content":"Start up mode You may need different startup modes in different deployment tools, such as k8s. We provide two additional optional startup modes.\nDefault mode The default mode carries out tasks to initialize as necessary, starts to listen, and provides services.\nRun /bin/oapService.sh(.bat) to start in this mode. This is also applicable when you\u0026rsquo;re using startup.sh(.bat) to start.\nInit mode In this mode, the OAP server starts up to carry out initialization and then exits. You could use this mode to initialize your storage (such as ElasticSearch indexes, MySQL, and TiDB tables) as well as your data.\nRun /bin/oapServiceInit.sh(.bat) to start in this mode.\nNo-init mode In this mode, the OAP server starts up without carrying out initialization. Rather, it watches out for the ElasticSearch indexes, MySQL, TiDB and other storage tables, starts listening and provides services. In other words, the OAP server would anticipate having another OAP server carrying out the initialization.\nRun /bin/oapServiceNoInit.sh(.bat) to start in this mode.\n","title":"Start up mode","url":"/docs/main/v9.6.0/en/setup/backend/backend-start-up-mode/"},{"content":"Start up mode You may need different startup modes in different deployment tools, such as k8s. We provide two additional optional startup modes.\nDefault mode The default mode carries out tasks to initialize as necessary, starts to listen, and provides services.\nRun /bin/oapService.sh(.bat) to start in this mode. This is also applicable when you\u0026rsquo;re using startup.sh(.bat) to start.\nInit mode In this mode, the OAP server starts up to carry out initialization and then exits. You could use this mode to initialize your storage (such as ElasticSearch indexes, MySQL, and TiDB tables) as well as your data.\nRun /bin/oapServiceInit.sh(.bat) to start in this mode.\nNo-init mode In this mode, the OAP server starts up without carrying out initialization. Rather, it watches out for the ElasticSearch indexes, MySQL, TiDB and other storage tables, starts listening and provides services. In other words, the OAP server would anticipate having another OAP server carrying out the initialization.\nRun /bin/oapServiceNoInit.sh(.bat) to start in this mode.\n","title":"Start up mode","url":"/docs/main/v9.7.0/en/setup/backend/backend-start-up-mode/"},{"content":"Storage Usage In this example, you will learn how to use the Storage.\nInstall Operator Follow Operator installation instrument to install the operator.\nDefine Storage with default setting  sample.yaml(use the internal type)  apiVersion:operator.skywalking.apache.org/v1alpha1kind:Storagemetadata:name:samplespec:type:elasticsearchconnectType:internalversion:7.5.1instances:3image:docker.elastic.co/elasticsearch/elasticsearch:7.5.1security:user:secretName:defaulttls:truesample.yaml(use the external type)  apiVersion:operator.skywalking.apache.org/v1alpha1kind:Storagemetadata:name:samplespec:type:elasticsearchconnectType:externaladdress:\u0026#34;https://elasticsearch\u0026#34;security:user:secretName:defaultDeploy Storage  Deploy the Storage use the below command:  $ kubectl apply -f sample.yaml Check the Storage in Kubernetes:   If you deploy the storage with the internal type:  $ kubectl get storage NAME INSTANCES TYPE VERSION CONNECTTYPE sample 3 elasticsearch 7.5.1 internal  If you deploy the storage with the external type:  $ kubectl get storage NAME INSTANCES TYPE VERSION CONNECTTYPE sample elasticsearch 7.5.1 external Check the Statefulset in Kubernetes:  $ kubectl get statefulset NAME READY AGE sample-elasticsearch 3/3 7s Specify Storage Name in OAP server Here we modify the default OAP server configuration file,the new yaml file as follows:\napiVersion:operator.skywalking.apache.org/v1alpha1kind:OAPServermetadata:name:defaultspec:version:9.5.0instances:1image:apache/skywalking-oap-server:9.5.0service:template:type:ClusterIPstorage:name:sample Deploy the OAP server use the new yaml file:  $ kubectl apply -f oap.yaml Check the OAP server in Kubernetes:  $ kubectl get oapserver NAME INSTANCES RUNNING ADDRESS sample 1 1 sample-oap.default Check whether the pod generated by OAP server is running correctly.  $ kubectl get pod -l app=oap NAME READY STATUS RESTARTS AGE sample-oap-5bc79567b7-tkw6q 1/1 Running 0 6m31s ","title":"Storage Usage","url":"/docs/skywalking-swck/latest/examples/storage/"},{"content":"Storage Usage In this example, you will learn how to use the Storage.\nInstall Operator Follow Operator installation instrument to install the operator.\nDefine Storage with default setting  sample.yaml(use the internal type)  apiVersion:operator.skywalking.apache.org/v1alpha1kind:Storagemetadata:name:samplespec:type:elasticsearchconnectType:internalversion:7.5.1instances:3image:docker.elastic.co/elasticsearch/elasticsearch:7.5.1security:user:secretName:defaulttls:truesample.yaml(use the external type)  apiVersion:operator.skywalking.apache.org/v1alpha1kind:Storagemetadata:name:samplespec:type:elasticsearchconnectType:externaladdress:\u0026#34;https://elasticsearch\u0026#34;security:user:secretName:defaultDeploy Storage  Deploy the Storage use the below command:  $ kubectl apply -f sample.yaml Check the Storage in Kubernetes:   If you deploy the storage with the internal type:  $ kubectl get storage NAME INSTANCES TYPE VERSION CONNECTTYPE sample 3 elasticsearch 7.5.1 internal  If you deploy the storage with the external type:  $ kubectl get storage NAME INSTANCES TYPE VERSION CONNECTTYPE sample elasticsearch 7.5.1 external Check the Statefulset in Kubernetes:  $ kubectl get statefulset NAME READY AGE sample-elasticsearch 3/3 7s Specify Storage Name in OAP server Here we modify the default OAP server configuration file,the new yaml file as follows:\napiVersion:operator.skywalking.apache.org/v1alpha1kind:OAPServermetadata:name:defaultspec:version:9.5.0instances:1image:apache/skywalking-oap-server:9.5.0service:template:type:ClusterIPstorage:name:sample Deploy the OAP server use the new yaml file:  $ kubectl apply -f oap.yaml Check the OAP server in Kubernetes:  $ kubectl get oapserver NAME INSTANCES RUNNING ADDRESS sample 1 1 sample-oap.default Check whether the pod generated by OAP server is running correctly.  $ kubectl get pod -l app=oap NAME READY STATUS RESTARTS AGE sample-oap-5bc79567b7-tkw6q 1/1 Running 0 6m31s ","title":"Storage Usage","url":"/docs/skywalking-swck/next/examples/storage/"},{"content":"Storage Usage In this example, you will learn how to use the Storage.\nInstall Operator Follow Operator installation instrument to install the operator.\nDefine Storage with default setting  sample.yaml(use the internal type)  apiVersion:operator.skywalking.apache.org/v1alpha1kind:Storagemetadata:name:samplespec:type:elasticsearchconnectType:internalversion:7.5.1instances:3image:docker.elastic.co/elasticsearch/elasticsearch:7.5.1security:user:secretName:defaulttls:truesample.yaml(use the external type)  apiVersion:operator.skywalking.apache.org/v1alpha1kind:Storagemetadata:name:samplespec:type:elasticsearchconnectType:externaladdress:\u0026#34;https://elasticsearch\u0026#34;security:user:secretName:defaultDeploy Storage  Deploy the Storage use the below command:  $ kubectl apply -f sample.yaml Check the Storage in Kubernetes:   If you deploy the storage with the internal type:  $ kubectl get storage NAME INSTANCES TYPE VERSION CONNECTTYPE sample 3 elasticsearch 7.5.1 internal  If you deploy the storage with the external type:  $ kubectl get storage NAME INSTANCES TYPE VERSION CONNECTTYPE sample elasticsearch 7.5.1 external Check the Statefulset in Kubernetes:  $ kubectl get statefulset NAME READY AGE sample-elasticsearch 3/3 7s Specify Storage Name in OAP server Here we modify the default OAP server configuration file,the new yaml file as follows:\napiVersion:operator.skywalking.apache.org/v1alpha1kind:OAPServermetadata:name:defaultspec:version:9.5.0instances:1image:apache/skywalking-oap-server:9.5.0service:template:type:ClusterIPstorage:name:sample Deploy the OAP server use the new yaml file:  $ kubectl apply -f oap.yaml Check the OAP server in Kubernetes:  $ kubectl get oapserver NAME INSTANCES RUNNING ADDRESS sample 1 1 sample-oap.default Check whether the pod generated by OAP server is running correctly.  $ kubectl get pod -l app=oap NAME READY STATUS RESTARTS AGE sample-oap-5bc79567b7-tkw6q 1/1 Running 0 6m31s ","title":"Storage Usage","url":"/docs/skywalking-swck/v0.9.0/examples/storage/"},{"content":"Summary The SkyWalking Cloud on Kubernetes is proposed in order to:\n Managing and Monitoring Scaling backend cluster capacity up and down Changing backend cluster configuration Injecting configuration into the target cluster. Securing traffic between target clusters and backend cluster, or between backend cluster with TLS certificate  Motivation If the user of SkyWalking decided to deploy it into Kubernetes, there’re some critical challenges for them.\nFirst of them is the complex of deployment, it doesn’t only mean the OAP server and storage cluster, but also include configuring target cluster to send data to backend. Then they might struggle to keep all of them reliable. The size of the data transferred is very big and the cost of data stored is very high. The user usually faces some problems, for instance, OAP server stuck, Elasticsearch cluster GC rate sharply increases, the system load of some OAP instances is much more than others, and etc.\nWith the help of CRDs and the Controller, we can figure out the above problems and give users a more pleasing experience when using SWCK.\nProposal Production Design I proposed two crucial components for SWCK, backend operator and target injector. The first one intends to solve the problems of the backend operation, and another focus on simplifying the configuration of the target cluster.\nThey should be built as two separate binary/image, then are installed according to user’s requirements.\nBackend Operator The operator might be a GO application that manages and monitors other components, for example, OAP pods, storage pods(ES, MySQL, and etc.), ingress/entry and configuration.\nIt should be capable of HA, performance, and scalability.\nIt should also have the following capabilities:\n Defining CRDs for provisioning and configuring Provisioning backend automatically Splitting OAP instances according to their type(L1/L2), improving the ratio of them. Performance tuning of OAP and storage. Updating configuration dynamically, irrespectively it’s dynamic or not. Upgrading mirror version seamlessly. Health checking and failure recovery Collecting and analyzing metrics and logs, abnormal detection Horizontal scaling and scheduling tuning. Loadbalancing input gPRC stream and GraphQL querying. Supporting externally hosted storage service. Securing traffic  The above items should be accomplished in several versions/releases. The developer should sort the priority of them and grind the design.\nTarget injector The injector can inject agent lib and configuration into the target cluster automatically, enable/disable distributed tracing according to labels marked on resources or namespace.\nIt also integrates backend with service mesh platform, for example, Istio.\nIt should be a GO application and a GO lib to be invoked by swctl to generate pod YAMLs manually.\nTechnology Selection  Development Language: GO Operator dev tool: TBD Building tool: Make(Docker for windows) Installation: Helm3 chart Repository: github.com/apache/skywalking-swck CI: Github action  ","title":"Summary","url":"/docs/skywalking-swck/latest/design/proposal/"},{"content":"Summary The SkyWalking Cloud on Kubernetes is proposed in order to:\n Managing and Monitoring Scaling backend cluster capacity up and down Changing backend cluster configuration Injecting configuration into the target cluster. Securing traffic between target clusters and backend cluster, or between backend cluster with TLS certificate  Motivation If the user of SkyWalking decided to deploy it into Kubernetes, there’re some critical challenges for them.\nFirst of them is the complex of deployment, it doesn’t only mean the OAP server and storage cluster, but also include configuring target cluster to send data to backend. Then they might struggle to keep all of them reliable. The size of the data transferred is very big and the cost of data stored is very high. The user usually faces some problems, for instance, OAP server stuck, Elasticsearch cluster GC rate sharply increases, the system load of some OAP instances is much more than others, and etc.\nWith the help of CRDs and the Controller, we can figure out the above problems and give users a more pleasing experience when using SWCK.\nProposal Production Design I proposed two crucial components for SWCK, backend operator and target injector. The first one intends to solve the problems of the backend operation, and another focus on simplifying the configuration of the target cluster.\nThey should be built as two separate binary/image, then are installed according to user’s requirements.\nBackend Operator The operator might be a GO application that manages and monitors other components, for example, OAP pods, storage pods(ES, MySQL, and etc.), ingress/entry and configuration.\nIt should be capable of HA, performance, and scalability.\nIt should also have the following capabilities:\n Defining CRDs for provisioning and configuring Provisioning backend automatically Splitting OAP instances according to their type(L1/L2), improving the ratio of them. Performance tuning of OAP and storage. Updating configuration dynamically, irrespectively it’s dynamic or not. Upgrading mirror version seamlessly. Health checking and failure recovery Collecting and analyzing metrics and logs, abnormal detection Horizontal scaling and scheduling tuning. Loadbalancing input gPRC stream and GraphQL querying. Supporting externally hosted storage service. Securing traffic  The above items should be accomplished in several versions/releases. The developer should sort the priority of them and grind the design.\nTarget injector The injector can inject agent lib and configuration into the target cluster automatically, enable/disable distributed tracing according to labels marked on resources or namespace.\nIt also integrates backend with service mesh platform, for example, Istio.\nIt should be a GO application and a GO lib to be invoked by swctl to generate pod YAMLs manually.\nTechnology Selection  Development Language: GO Operator dev tool: TBD Building tool: Make(Docker for windows) Installation: Helm3 chart Repository: github.com/apache/skywalking-swck CI: Github action  ","title":"Summary","url":"/docs/skywalking-swck/next/design/proposal/"},{"content":"Summary The SkyWalking Cloud on Kubernetes is proposed in order to:\n Managing and Monitoring Scaling backend cluster capacity up and down Changing backend cluster configuration Injecting configuration into the target cluster. Securing traffic between target clusters and backend cluster, or between backend cluster with TLS certificate  Motivation If the user of SkyWalking decided to deploy it into Kubernetes, there’re some critical challenges for them.\nFirst of them is the complex of deployment, it doesn’t only mean the OAP server and storage cluster, but also include configuring target cluster to send data to backend. Then they might struggle to keep all of them reliable. The size of the data transferred is very big and the cost of data stored is very high. The user usually faces some problems, for instance, OAP server stuck, Elasticsearch cluster GC rate sharply increases, the system load of some OAP instances is much more than others, and etc.\nWith the help of CRDs and the Controller, we can figure out the above problems and give users a more pleasing experience when using SWCK.\nProposal Production Design I proposed two crucial components for SWCK, backend operator and target injector. The first one intends to solve the problems of the backend operation, and another focus on simplifying the configuration of the target cluster.\nThey should be built as two separate binary/image, then are installed according to user’s requirements.\nBackend Operator The operator might be a GO application that manages and monitors other components, for example, OAP pods, storage pods(ES, MySQL, and etc.), ingress/entry and configuration.\nIt should be capable of HA, performance, and scalability.\nIt should also have the following capabilities:\n Defining CRDs for provisioning and configuring Provisioning backend automatically Splitting OAP instances according to their type(L1/L2), improving the ratio of them. Performance tuning of OAP and storage. Updating configuration dynamically, irrespectively it’s dynamic or not. Upgrading mirror version seamlessly. Health checking and failure recovery Collecting and analyzing metrics and logs, abnormal detection Horizontal scaling and scheduling tuning. Loadbalancing input gPRC stream and GraphQL querying. Supporting externally hosted storage service. Securing traffic  The above items should be accomplished in several versions/releases. The developer should sort the priority of them and grind the design.\nTarget injector The injector can inject agent lib and configuration into the target cluster automatically, enable/disable distributed tracing according to labels marked on resources or namespace.\nIt also integrates backend with service mesh platform, for example, Istio.\nIt should be a GO application and a GO lib to be invoked by swctl to generate pod YAMLs manually.\nTechnology Selection  Development Language: GO Operator dev tool: TBD Building tool: Make(Docker for windows) Installation: Helm3 chart Repository: github.com/apache/skywalking-swck CI: Github action  ","title":"Summary","url":"/docs/skywalking-swck/v0.9.0/design/proposal/"},{"content":"Support ActiveMQ classic Monitoring Motivation Apache ActiveMQ Classic is a popular and powerful open source messaging and Integration Patterns server. It supports many Cross Language Clients and Protocols, comes with easy to use Enterprise Integration Patterns and many advanced features.\nNow I want to add ActiveMQ Classic monitoring via the OpenTelemetry Collector which fetches metrics from jmx prometheus exporter run as a Java Agent.\nArchitecture Graph There is no significant architecture-level change.\nProposed Changes Apache ActiveMQ Classic has extensive support for JMX to allow you to monitor and control the behavior of the broker via the JMX MBeans.\nJmx prometheus exporter collects metrics data from ActiveMQ classic, this exporter is intended to be run as a Java Agent, exposing a HTTP server and serving metrics of the local JVM.\nUsing openTelemetry receiver to fetch these metrics to SkyWalking OAP server.\nActiveMQ Cluster Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     System Load Average Count meter_activemq_cluster_system_load_average The average system load, range:[0, 10000]. JMX Prometheus Exporter   Thread Count Count meter_activemq_cluster_thread_count Threads currently used by the JVM. JMX Prometheus Exporter   Init Heap Memory Usage Bytes meter_activemq_cluster_heap_memory_usage_init The initial amount of heap memory available. JMX Prometheus Exporter   Committed Heap Memory Usage Bytes meter_activemq_cluster_heap_memory_usage_committed The memory is guaranteed to be available for the JVM to use. JMX Prometheus Exporter   Used Heap Memory Usage Bytes meter_activemq_cluster_heap_memory_usage_used The amount of JVM heap memory currently in use. JMX Prometheus Exporter   Max Heap Memory Usage Bytes meter_activemq_cluster_heap_memory_usage_max The maximum possible size of the heap memory. JMX Prometheus Exporter   GC G1 Old Collection Count Count meter_activemq_cluster_gc_g1_old_collection_count The gc count of G1 Old Generation(JDK[9,17]). JMX Prometheus Exporter   GC G1 Young Collection Count Count meter_activemq_cluster_gc_g1_young_collection_count The gc count of G1 Young Generation(JDK[9,17]). JMX Prometheus Exporter   GC G1 Old Collection Time ms meter_activemq_cluster_gc_g1_old_collection_time The gc time spent in G1 Old Generation in milliseconds(JDK[9,17]). JMX Prometheus Exporter   GC G1 Young Collection Time ms meter_activemq_cluster_gc_g1_young_collection_time The gc time spent in G1 Young Generation in milliseconds(JDK[9,17]). JMX Prometheus Exporter   GC Parallel Old Collection Count Count meter_activemq_cluster_gc_parallel_old_collection_count The gc count of Parallel Old Generation(JDK[6,8]). JMX Prometheus Exporter   GC Parallel Young Collection Count Count meter_activemq_cluster_gc_parallel_young_collection_count The gc count of Parallel Young Generation(JDK[6,8]). JMX Prometheus Exporter   GC Parallel Old Collection Time ms meter_activemq_cluster_gc_parallel_old_collection_time The gc time spent in Parallel Old Generation in milliseconds(JDK[6,8]). JMX Prometheus Exporter   GC Parallel Young Collection Time ms meter_activemq_cluster_gc_parallel_young_collection_time The gc time spent in Parallel Young Generation in milliseconds(JDK[6,8]). JMX Prometheus Exporter   Enqueue Rate Count/s meter_activemq_cluster_enqueue_rate Number of messages that have been sent to the cluster per second(JDK[6,8]). JMX Prometheus Exporter   Dequeue Rate Count/s meter_activemq_cluster_dequeue_rate Number of messages that have been acknowledged or discarded on the cluster per second. JMX Prometheus Exporter   Dispatch Rate Count/s meter_activemq_cluster_dispatch_rate Number of messages that has been delivered to consumers per second. JMX Prometheus Exporter   Expired Rate Count/s meter_activemq_cluster_expired_rate Number of messages that have been expired per second. JMX Prometheus Exporter   Average Enqueue Time ms meter_activemq_cluster_average_enqueue_time The average time a message was held on this cluster. JMX Prometheus Exporter   Max Enqueue Time ms meter_activemq_cluster_max_enqueue_time The max time a message was held on this cluster. JMX Prometheus Exporter    ActiveMQ Broker Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Uptime sec meter_activemq_broker_uptime Uptime of the broker in day. JMX Prometheus Exporter   State  meter_activemq_broker_state If slave broker 1 else 0. JMX Prometheus Exporter   Current Connections Count meter_activemq_broker_current_connections The number of clients connected to the broker currently. JMX Prometheus Exporter   Current Producer Count Count meter_activemq_broker_current_producer_count The number of producers currently attached to the broker. JMX Prometheus Exporter   Current Consumer Count Count meter_activemq_broker_current_consumer_count The number of consumers consuming messages from the broker. JMX Prometheus Exporter   Producer Count Count meter_activemq_broker_producer_count Number of message producers active on destinations. JMX Prometheus Exporter   Consumer Count Count meter_activemq_broker_consumer_count Number of message consumers subscribed to destinations. JMX Prometheus Exporter   Enqueue Count Count meter_activemq_broker_enqueue_count The total number of messages sent to the broker. JMX Prometheus Exporter   Dequeue Count Count meter_activemq_broker_dequeue_count The total number of messages the broker has delivered to consumers. JMX Prometheus Exporter   Enqueue Rate Count/sec meter_activemq_broker_enqueue_rate The total number of messages sent to the broker per second. JMX Prometheus Exporter   Dequeue Rate Count/sec meter_activemq_broker_dequeue_rate The total number of messages the broker has delivered to consumers per second. JMX Prometheus Exporter   Memory Percent Usage % meter_activemq_broker_memory_percent_usage Percentage of configured memory used by the broker. JMX Prometheus Exporter   Memory Usage Bytes meter_activemq_broker_memory_percent_usage Memory used by undelivered messages in bytes. JMX Prometheus Exporter   Memory Limit Bytes meter_activemq_broker_memory_limit Memory limited used for holding undelivered messages before paging to temporary storage. JMX Prometheus Exporter   Store Percent Usage % meter_activemq_broker_store_percent_usage Percentage of available disk space used for persistent message storage. JMX Prometheus Exporter   Store Limit Bytes meter_activemq_broker_store_limit Disk limited used for persistent messages before producers are blocked. JMX Prometheus Exporter   Temp Percent Usage Bytes meter_activemq_broker_temp_percent_usage Percentage of available disk space used for non-persistent message storage. JMX Prometheus Exporter   Temp Limit Bytes meter_activemq_broker_temp_limit Disk limited used for non-persistent messages and temporary data before producers are blocked. JMX Prometheus Exporter   Average Message Size Bytes meter_activemq_broker_average_message_size Average message size on this broker. JMX Prometheus Exporter   Max Message Size Bytes meter_activemq_broker_max_message_size Max message size on this broker. JMX Prometheus Exporter   Queue Size Count meter_activemq_broker_queue_size Number of messages on this broker that have been dispatched but not acknowledged. JMX Prometheus Exporter    ActiveMQ Destination Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Producer Count Count meter_activemq_destination_producer_count Number of producers attached to this destination. JMX Prometheus Exporter   Consumer Count Count meter_activemq_destination_consumer_count Number of consumers subscribed to this destination. JMX Prometheus Exporter   Topic Consumer Count Count meter_activemq_destination_topic_consumer_count Number of consumers subscribed to the topics. JMX Prometheus Exporter   Queue Size Count meter_activemq_destination_queue_size The number of messages that have not been acknowledged by a consumer. JMX Prometheus Exporter   Memory Usage Bytes meter_activemq_destination_memory_usage Memory used by undelivered messages in bytes. JMX Prometheus Exporter   Memory Percent Usage % meter_activemq_destination_memory_percent_usage Percentage of configured memory used by the destination. JMX Prometheus Exporter   Enqueue Count Count meter_activemq_destination_enqueue_count The number of messages sent to the destination. JMX Prometheus Exporter   Dequeue Count Count meter_activemq_destination_dequeue_count The number of messages the destination has delivered to consumers. JMX Prometheus Exporter   Average Enqueue Time ms meter_activemq_destination_average_enqueue_time The average time a message was held on this destination. JMX Prometheus Exporter   Max Enqueue Time ms meter_activemq_destination_max_enqueue_time The max time a message was held on this destination. JMX Prometheus Exporter   Dispatch Count Count meter_activemq_destination_dispatch_count Number of messages that has been delivered to consumers. JMX Prometheus Exporter   Expired Count Count meter_activemq_destination_expired_count Number of messages that have been expired. JMX Prometheus Exporter   Inflight Count Count meter_activemq_destination_inflight_count Number of messages that have been dispatched to but not acknowledged by consumers. JMX Prometheus Exporter   Average Message Size Bytes meter_activemq_destination_average_message_size Average message size on this destination. JMX Prometheus Exporter   Max Message Size Bytes meter_activemq_destination_max_message_size Max message size on this destination. JMX Prometheus Exporter    Imported Dependencies libs and their licenses. No new dependency.\nCompatibility no breaking changes.\nGeneral usage docs ","title":"Support ActiveMQ classic Monitoring","url":"/docs/main/next/en/swip/swip-8/"},{"content":"Support available layers of service in the topology. Motivation UI could jump to the service dashboard and query service hierarchy from the topology node. For now topology node includes name and ID but without layer, as the service could have multiple layers, the limitation is that it is only works on the current layer which the topology represents:\n UI could not jump into another layer\u0026rsquo;s dashboard of the service. UI could not query the service hierarchy from the topology node if the node is not in current layer.  Here are typical use cases: should have a chance to jump into another layer\u0026rsquo;s dashboard of the service:\n In the mesh topology, mesh(layer MESH) and mesh-dp(layer MESH_DP) share a similar topology, one node will have two layers. In the mesh topology, agent(layer GENERAL) + virtual database(layer VIRTUAL_DATABASE), the node is in different layers.  Both of these two cases have hybrid layer topology. If we could support that, we could have a better x-layer interaction.\nArchitecture Graph There is no significant architecture-level change.\nPropose Changes Add the layers info into topology node:\n When building the topology node fetch the layers info from the service according to the service id. Return layers info in the Node when query the topology.  Imported Dependencies libs and their licenses. No new library is planned to be added to the codebase.\nCompatibility About the protocol, there should be no breaking changes, but enhancements only. New field layers is going to be added to the Node in the query protocol topology.graphqls.\ntypeNode{# The service ID of the node.id:ID!# The literal name of the #id.name:String!# The type name may be# 1. The service provider/middleware tech, such as: Tomcat, SpringMVC# 2. Conjectural Service, e.g. MySQL, Redis, Kafkatype:String# It is a conjecture node or real node, to represent a service or endpoint.isReal:Boolean!# The layers of the service.layers:[String!]!}General usage docs This proposal doesn\u0026rsquo;t impact the end user in any way of using SkyWalking. The remarkable change will be in the UI topology map, users could jump into the proper layer\u0026rsquo;s service dashboard and query the service hierarchy from the topology node.\n","title":"Support available layers of service in the topology.","url":"/docs/main/next/en/swip/swip-4/"},{"content":"Support ClickHouse Monitoring Motivation ClickHouse is a high-performance, column-oriented SQL database management system (DBMS) for online analytical processing (OLAP). It is available as both an open-source software and a cloud offering.\nNow I want to add ClickHouse monitoring via the OpenTelemetry Collector which fetches metrics from it\u0026rsquo;s own HTTP endpoint to expose metrics data for Prometheus (since ClickHouse v20.1.2.4). Clickhouse Exporter used only for old ClickHouse versions, modern versions have embedded prometheus endpoint.\nArchitecture Graph There is no significant architecture-level change.\nProposed Changes ClickHouse expose own metrics via HTTP endpoint to opentelemetry collector, using skyWalking openTelemetry receiver to fetch these metrics.\nThe exposed metrics are from the system.metrics table / the system.events table / the system.asynchronous_metrics table.\nClickHouse Instance Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     CpuUsage count meter_clickhouse_instance_cpu_usage CPU time spent seen by OS per second(according to ClickHouse.system.dashboard.CPU Usage (cores)). ClickHouse   MemoryUsage percentage meter_clickhouse_instance_memory_usage Total amount of memory (bytes) allocated by the server/ total amount of OS memory. ClickHouse   MemoryAvailable percentage meter_clickhouse_instance_memory_available Total amount of memory (bytes) available for program / total amount of OS memory. ClickHouse   Uptime sec meter_clickhouse_instance_uptime The server uptime in seconds. It includes the time spent for server initialization before accepting connections. ClickHouse   Version string meter_clickhouse_instance_version Version of the server in a single integer number in base-1000. ClickHouse   FileOpen count meter_clickhouse_instance_file_open Number of files opened. ClickHouse    ClickHouse Network Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     TcpConnections count meter_clickhouse_instance_tcp_connectionsmeter_clickhouse_tcp_connections Number of connections to TCP server. ClickHouse   MysqlConnections count meter_clickhouse_instance_mysql_connectionsmeter_clickhouse_mysql_connections Number of client connections using MySQL protocol. ClickHouse   HttpConnections count meter_clickhouse_instance_http_connectionsmeter_clickhouse_mysql_connections Number of connections to HTTP server. ClickHouse   InterserverConnections count meter_clickhouse_instance_interserver_connectionsmeter_clickhouse_interserver_connections Number of connections from other replicas to fetch parts. ClickHouse   PostgresqlConnections count meter_clickhouse_instance_postgresql_connectionsmeter_clickhouse_postgresql_connections Number of client connections using PostgreSQL protocol. ClickHouse   ReceiveBytes bytes meter_clickhouse_instance_network_receive_bytesmeter_clickhouse_network_receive_bytes Total number of bytes received from network. ClickHouse   SendBytes bytes meter_clickhouse_instance_network_send_bytesmeter_clickhouse_network_send_bytes Total number of bytes send to network. ClickHouse    ClickHouse Query Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     QueryCount count meter_clickhouse_instance_querymeter_clickhouse_query Number of executing queries. ClickHouse   SelectQueryCount count meter_clickhouse_instance_query_selectmeter_clickhouse_query_select Number of executing queries, but only for SELECT queries. ClickHouse   InsertQueryCount count meter_clickhouse_instance_query_insertmeter_clickhouse_query_insert Number of executing queries, but only for INSERT queries. ClickHouse   SelectQueryRate count/sec meter_clickhouse_instance_query_select_ratemeter_clickhouse_query_select_rate Number of SELECT queries per second. ClickHouse   InsertQueryRate count/sec meter_clickhouse_instance_query_insert_ratemeter_clickhouse_query_insert_rate Number of INSERT queries per second. ClickHouse   Querytime microsec meter_clickhouse_instance_querytime_microsecondsmeter_clickhouse_querytime_microseconds Total time of all queries. ClickHouse   SelectQuerytime microsec meter_clickhouse_instance_querytime_select_microsecondsmeter_clickhouse_querytime_select_microseconds Total time of SELECT queries. ClickHouse   InsertQuerytime microsec meter_clickhouse_instance_querytime_insert_microsecondsmeter_clickhouse_querytime_insert_microseconds Total time of INSERT queries. ClickHouse   OtherQuerytime microsec meter_clickhouse_instance_querytime_other_microsecondsmeter_clickhouse_querytime_other_microseconds Total time of queries that are not SELECT or INSERT. ClickHouse   QuerySlowCount count meter_clickhouse_instance_query_slowmeter_clickhouse_query_slow Number of reads from a file that were slow. ClickHouse    ClickHouse Insertion Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     InsertQueryCount count meter_clickhouse_instance_query_insertmeter_clickhouse_query_insert Number of executing queries, but only for INSERT queries. ClickHouse   InsertedRowCount count meter_clickhouse_instance_inserted_rowsmeter_clickhouse_inserted_rows Number of rows INSERTed to all tables. ClickHouse   InsertedBytes bytes meter_clickhouse_instance_inserted_bytesmeter_clickhouse_inserted_bytes Number of bytes INSERTed to all tables. ClickHouse   DelayedInsertCount count meter_clickhouse_instance_delayed_insertmeter_clickhouse_delayed_insert Number of times the INSERT of a block to a MergeTree table was throttled due to high number of active data parts for partition. ClickHouse    ClickHouse Replicas Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     ReplicatedChecks count meter_clickhouse_instance_replicated_checksmeter_clickhouse_replicated_checks Number of data parts checking for consistency. ClickHouse   ReplicatedFetch count meter_clickhouse_instance_replicated_fetchmeter_clickhouse_replicated_fetch Number of data parts being fetched from replica. ClickHouse   ReplicatedSend count meter_clickhouse_instance_replicated_sendmeter_clickhouse_replicated_send Number of data parts being sent to replicas. ClickHouse    ClickHouse MergeTree Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     BackgroundMergeCount count meter_clickhouse_instance_background_mergemeter_clickhouse_background_merge Number of executing background merges. ClickHouse   MergeRows count meter_clickhouse_instance_merge_rowsmeter_clickhouse_merge_rows Rows read for background merges. This is the number of rows before merge. ClickHouse   MergeUncompressedBytes bytes meter_clickhouse_instance_merge_uncompressed_bytesmeter_clickhouse_merge_uncompressed_bytes Uncompressed bytes (for columns as they stored in memory) that was read for background merges. This is the number before merge. ClickHouse   MoveCount count meter_clickhouse_instance_movemeter_clickhouse_move Number of currently executing moves. ClickHouse   PartsActive Count meter_clickhouse_instance_parts_activemeter_clickhouse_parts_active Active data part, used by current and upcoming SELECTs. ClickHouse   MutationsCount count meter_clickhouse_instance_mutationsmeter_clickhouse_mutations Number of mutations (ALTER DELETE/UPDATE). ClickHouse    ClickHouse Kafka Table Engine Supported Metrics When table engine works with Apache Kafka.\nKafka lets you:\n Publish or subscribe to data flows. Organize fault-tolerant storage. Process streams as they become available.     Monitoring Panel Unit Metric Name Description Data Source     KafkaMessagesRead count meter_clickhouse_instance_kafka_messages_readmeter_clickhouse_kafka_messages_read Number of Kafka messages already processed by ClickHouse. ClickHouse   KafkaWrites count meter_clickhouse_instance_kafka_writesmeter_clickhouse_kafka_writes Number of writes (inserts) to Kafka tables. ClickHouse   KafkaConsumers count meter_clickhouse_instance_kafka_consumersmeter_clickhouse_kafka_consumers Number of active Kafka consumers. ClickHouse   KafkProducers count meter_clickhouse_instance_kafka_producersmeter_clickhouse_kafka_producers Number of active Kafka producer created. ClickHouse    ClickHouse ZooKeeper Supported Metrics ClickHouse uses ZooKeeper for storing metadata of replicas when using replicated tables. If replicated tables are not used, this section of parameters can be omitted.\n   Monitoring Panel Unit Metric Name Description Data Source     ZookeeperSession count meter_clickhouse_instance_zookeeper_sessionmeter_clickhouse_zookeeper_session Number of sessions (connections) to ZooKeeper. ClickHouse   ZookeeperWatch count meter_clickhouse_instance_zookeeper_watchmeter_clickhouse_zookeeper_watch Number of watches (event subscriptions) in ZooKeeper. ClickHouse   ZookeeperBytesSent bytes meter_clickhouse_instance_zookeeper_bytes_sentmeter_clickhouse_zookeeper_bytes_sent Number of bytes send over network while communicating with ZooKeeper. ClickHouse   ZookeeperBytesReceive bytes meter_clickhouse_instance_zookeeper_bytes_receivedmeter_clickhouse_zookeeper_bytes_received Number of bytes send over network while communicating with ZooKeeper. ClickHouse    ClickHouse Keeper Supported Metrics ClickHouse Keeper provides the coordination system for data replication and distributed DDL queries execution. ClickHouse Keeper is compatible with ZooKeeper.\nClickHouse Keeper can work in embedded mode or standalone cluster mode, the metrics below are for embedded mode.\n   Monitoring Panel Unit Metric Name Description Data Source     KeeperAliveConnections count meter_clickhouse_instance_keeper_connections_alivemeter_clickhouse_keeper_connections_alive Number of alive connections for embedded ClickHouse Keeper. ClickHouse   KeeperOutstandingRequets count meter_clickhouse_instance_keeper_outstanding_requestsmeter_clickhouse_keeper_outstanding_requests Number of outstanding requests for embedded ClickHouse Keeper. ClickHouse    Imported Dependencies libs and their licenses. No new dependency.\nCompatibility no breaking changes.\nGeneral usage docs ","title":"Support ClickHouse Monitoring","url":"/docs/main/next/en/swip/swip-5/"},{"content":"Support custom enhance Here is an optional plugin apm-customize-enhance-plugin\nIntroduce SkyWalking has provided Java agent plugin development guide to help developers to build new plugin.\nThis plugin is not designed for replacement but for user convenience. The behaviour is very similar with @Trace toolkit, but without code change requirement, and more powerful, such as provide tag and log.\nHow to configure Implementing enhancements to custom classes requires two steps.\n Active the plugin, move the optional-plugins/apm-customize-enhance-plugin.jar to plugin/apm-customize-enhance-plugin.jar. Set plugin.customize.enhance_file in agent.config, which targets to rule file, such as /absolute/path/to/customize_enhance.xml. Set enhancement rules in customize_enhance.xml. \u0026lt;?xml version=\u0026#34;1.0\u0026#34; encoding=\u0026#34;UTF-8\u0026#34;?\u0026gt; \u0026lt;enhanced\u0026gt; \u0026lt;class class_name=\u0026#34;test.apache.skywalking.testcase.customize.service.TestService1\u0026#34;\u0026gt; \u0026lt;method method=\u0026#34;staticMethod()\u0026#34; operation_name=\u0026#34;/is_static_method\u0026#34; static=\u0026#34;true\u0026#34;/\u0026gt; \u0026lt;method method=\u0026#34;staticMethod(java.lang.String,int.class,java.util.Map,java.util.List,[Ljava.lang.Object;)\u0026#34; operation_name=\u0026#34;/is_static_method_args\u0026#34; static=\u0026#34;true\u0026#34;\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0]\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[1]\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[3].[0]\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;tag key=\u0026#34;tag_1\u0026#34;\u0026gt;arg[2].[\u0026#39;k1\u0026#39;]\u0026lt;/tag\u0026gt; \u0026lt;tag key=\u0026#34;tag_2\u0026#34;\u0026gt;arg[4].[1]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_1\u0026#34;\u0026gt;arg[4].[2]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;method()\u0026#34; static=\u0026#34;false\u0026#34;/\u0026gt; \u0026lt;method method=\u0026#34;method(java.lang.String,int.class)\u0026#34; operation_name=\u0026#34;/method_2\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0]\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;tag key=\u0026#34;tag_1\u0026#34;\u0026gt;arg[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_1\u0026#34;\u0026gt;arg[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;method(test.apache.skywalking.testcase.customize.model.Model0,java.lang.String,int.class)\u0026#34; operation_name=\u0026#34;/method_3\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0].id\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0].model1.name\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0].model1.getId()\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;tag key=\u0026#34;tag_os\u0026#34;\u0026gt;arg[0].os.[1]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;arg[0].getM().[\u0026#39;k1\u0026#39;]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retString(java.lang.String)\u0026#34; operation_name=\u0026#34;/retString\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retModel0(test.apache.skywalking.apm.testcase.customize.model.Model0)\u0026#34; operation_name=\u0026#34;/retModel0\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj.model1.id\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj.model1.getId()\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;/class\u0026gt; \u0026lt;class class_name=\u0026#34;test.apache.skywalking.testcase.customize.service.TestService2\u0026#34;\u0026gt; \u0026lt;method method=\u0026#34;staticMethod(java.lang.String,int.class)\u0026#34; operation_name=\u0026#34;/is_2_static_method\u0026#34; static=\u0026#34;true\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_2_1\u0026#34;\u0026gt;arg[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_1_1\u0026#34;\u0026gt;arg[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;method([Ljava.lang.Object;)\u0026#34; operation_name=\u0026#34;/method_4\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_4_1\u0026#34;\u0026gt;arg[0].[0]\u0026lt;/tag\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;method(java.util.List,int.class)\u0026#34; operation_name=\u0026#34;/method_5\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_5_1\u0026#34;\u0026gt;arg[0].[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_5_1\u0026#34;\u0026gt;arg[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retArray([Ljava.lang.Object;)\u0026#34; operation_name=\u0026#34;/retArray\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj.[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj.[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retList(java.util.List)\u0026#34; operation_name=\u0026#34;/retList\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj.[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj.[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retMap(java.util.Map)\u0026#34; operation_name=\u0026#34;/retMap\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj.[\u0026#39;k1\u0026#39;]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj.[\u0026#39;k2\u0026#39;]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;/class\u0026gt; \u0026lt;/enhanced\u0026gt;    Explanation of the configuration in the file    configuration explanation     class_name The enhanced class   method The interceptor method of the class   operation_name If fill it out, will use it instead of the default operation_name.   operation_name_suffix What it means adding dynamic data after the operation_name.   static Is this method static.   tag Will add a tag in local span. The value of key needs to be represented on the XML node.   log Will add a log in local span. The value of key needs to be represented on the XML node.   arg[x] What it means is to get the input arguments. such as arg[0] is means get first arguments.   .[x] When the parsing object is Array or List, you can use it to get the object at the specified index.   .[\u0026lsquo;key\u0026rsquo;] When the parsing object is Map, you can get the map \u0026lsquo;key\u0026rsquo; through it.   returnedObj What it means is to get the return value.      ","title":"Support custom enhance","url":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/customize-enhance-trace/"},{"content":"Support custom enhance Here is an optional plugin apm-customize-enhance-plugin\nIntroduce SkyWalking has provided Java agent plugin development guide to help developers to build new plugin.\nThis plugin is not designed for replacement but for user convenience. The behaviour is very similar with @Trace toolkit, but without code change requirement, and more powerful, such as provide tag and log.\nHow to configure Implementing enhancements to custom classes requires two steps.\n Active the plugin, move the optional-plugins/apm-customize-enhance-plugin.jar to plugin/apm-customize-enhance-plugin.jar. Set plugin.customize.enhance_file in agent.config, which targets to rule file, such as /absolute/path/to/customize_enhance.xml. Set enhancement rules in customize_enhance.xml. \u0026lt;?xml version=\u0026#34;1.0\u0026#34; encoding=\u0026#34;UTF-8\u0026#34;?\u0026gt; \u0026lt;enhanced\u0026gt; \u0026lt;class class_name=\u0026#34;test.apache.skywalking.testcase.customize.service.TestService1\u0026#34;\u0026gt; \u0026lt;method method=\u0026#34;staticMethod()\u0026#34; operation_name=\u0026#34;/is_static_method\u0026#34; static=\u0026#34;true\u0026#34;/\u0026gt; \u0026lt;method method=\u0026#34;staticMethod(java.lang.String,int.class,java.util.Map,java.util.List,[Ljava.lang.Object;)\u0026#34; operation_name=\u0026#34;/is_static_method_args\u0026#34; static=\u0026#34;true\u0026#34;\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0]\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[1]\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[3].[0]\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;tag key=\u0026#34;tag_1\u0026#34;\u0026gt;arg[2].[\u0026#39;k1\u0026#39;]\u0026lt;/tag\u0026gt; \u0026lt;tag key=\u0026#34;tag_2\u0026#34;\u0026gt;arg[4].[1]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_1\u0026#34;\u0026gt;arg[4].[2]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;method()\u0026#34; static=\u0026#34;false\u0026#34;/\u0026gt; \u0026lt;method method=\u0026#34;method(java.lang.String,int.class)\u0026#34; operation_name=\u0026#34;/method_2\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0]\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;tag key=\u0026#34;tag_1\u0026#34;\u0026gt;arg[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_1\u0026#34;\u0026gt;arg[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;method(test.apache.skywalking.testcase.customize.model.Model0,java.lang.String,int.class)\u0026#34; operation_name=\u0026#34;/method_3\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0].id\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0].model1.name\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0].model1.getId()\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;tag key=\u0026#34;tag_os\u0026#34;\u0026gt;arg[0].os.[1]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;arg[0].getM().[\u0026#39;k1\u0026#39;]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retString(java.lang.String)\u0026#34; operation_name=\u0026#34;/retString\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retModel0(test.apache.skywalking.apm.testcase.customize.model.Model0)\u0026#34; operation_name=\u0026#34;/retModel0\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj.model1.id\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj.model1.getId()\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;/class\u0026gt; \u0026lt;class class_name=\u0026#34;test.apache.skywalking.testcase.customize.service.TestService2\u0026#34;\u0026gt; \u0026lt;method method=\u0026#34;staticMethod(java.lang.String,int.class)\u0026#34; operation_name=\u0026#34;/is_2_static_method\u0026#34; static=\u0026#34;true\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_2_1\u0026#34;\u0026gt;arg[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_1_1\u0026#34;\u0026gt;arg[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;method([Ljava.lang.Object;)\u0026#34; operation_name=\u0026#34;/method_4\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_4_1\u0026#34;\u0026gt;arg[0].[0]\u0026lt;/tag\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;method(java.util.List,int.class)\u0026#34; operation_name=\u0026#34;/method_5\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_5_1\u0026#34;\u0026gt;arg[0].[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_5_1\u0026#34;\u0026gt;arg[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retArray([Ljava.lang.Object;)\u0026#34; operation_name=\u0026#34;/retArray\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj.[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj.[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retList(java.util.List)\u0026#34; operation_name=\u0026#34;/retList\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj.[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj.[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retMap(java.util.Map)\u0026#34; operation_name=\u0026#34;/retMap\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj.[\u0026#39;k1\u0026#39;]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj.[\u0026#39;k2\u0026#39;]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;/class\u0026gt; \u0026lt;/enhanced\u0026gt;    Explanation of the configuration in the file    configuration explanation     class_name The enhanced class   method The interceptor method of the class   operation_name If fill it out, will use it instead of the default operation_name.   operation_name_suffix What it means adding dynamic data after the operation_name.   static Is this method static.   tag Will add a tag in local span. The value of key needs to be represented on the XML node.   log Will add a log in local span. The value of key needs to be represented on the XML node.   arg[x] What it means is to get the input arguments. such as arg[0] is means get first arguments.   .[x] When the parsing object is Array or List, you can use it to get the object at the specified index.   .[\u0026lsquo;key\u0026rsquo;] When the parsing object is Map, you can get the map \u0026lsquo;key\u0026rsquo; through it.   returnedObj What it means is to get the return value.      ","title":"Support custom enhance","url":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/customize-enhance-trace/"},{"content":"Support custom enhance Here is an optional plugin apm-customize-enhance-plugin\nIntroduce SkyWalking has provided Java agent plugin development guide to help developers to build new plugin.\nThis plugin is not designed for replacement but for user convenience. The behaviour is very similar with @Trace toolkit, but without code change requirement, and more powerful, such as provide tag and log.\nHow to configure Implementing enhancements to custom classes requires two steps.\n Active the plugin, move the optional-plugins/apm-customize-enhance-plugin.jar to plugin/apm-customize-enhance-plugin.jar. Set plugin.customize.enhance_file in agent.config, which targets to rule file, such as /absolute/path/to/customize_enhance.xml. Set enhancement rules in customize_enhance.xml. \u0026lt;?xml version=\u0026#34;1.0\u0026#34; encoding=\u0026#34;UTF-8\u0026#34;?\u0026gt; \u0026lt;enhanced\u0026gt; \u0026lt;class class_name=\u0026#34;test.apache.skywalking.testcase.customize.service.TestService1\u0026#34;\u0026gt; \u0026lt;method method=\u0026#34;staticMethod()\u0026#34; operation_name=\u0026#34;/is_static_method\u0026#34; static=\u0026#34;true\u0026#34;/\u0026gt; \u0026lt;method method=\u0026#34;staticMethod(java.lang.String,int.class,java.util.Map,java.util.List,[Ljava.lang.Object;)\u0026#34; operation_name=\u0026#34;/is_static_method_args\u0026#34; static=\u0026#34;true\u0026#34;\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0]\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[1]\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[3].[0]\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;tag key=\u0026#34;tag_1\u0026#34;\u0026gt;arg[2].[\u0026#39;k1\u0026#39;]\u0026lt;/tag\u0026gt; \u0026lt;tag key=\u0026#34;tag_2\u0026#34;\u0026gt;arg[4].[1]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_1\u0026#34;\u0026gt;arg[4].[2]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;method()\u0026#34; static=\u0026#34;false\u0026#34;/\u0026gt; \u0026lt;method method=\u0026#34;method(java.lang.String,int.class)\u0026#34; operation_name=\u0026#34;/method_2\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0]\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;tag key=\u0026#34;tag_1\u0026#34;\u0026gt;arg[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_1\u0026#34;\u0026gt;arg[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;method(test.apache.skywalking.testcase.customize.model.Model0,java.lang.String,int.class)\u0026#34; operation_name=\u0026#34;/method_3\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0].id\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0].model1.name\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0].model1.getId()\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;tag key=\u0026#34;tag_os\u0026#34;\u0026gt;arg[0].os.[1]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;arg[0].getM().[\u0026#39;k1\u0026#39;]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retString(java.lang.String)\u0026#34; operation_name=\u0026#34;/retString\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retModel0(test.apache.skywalking.apm.testcase.customize.model.Model0)\u0026#34; operation_name=\u0026#34;/retModel0\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj.model1.id\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj.model1.getId()\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;/class\u0026gt; \u0026lt;class class_name=\u0026#34;test.apache.skywalking.testcase.customize.service.TestService2\u0026#34;\u0026gt; \u0026lt;method method=\u0026#34;staticMethod(java.lang.String,int.class)\u0026#34; operation_name=\u0026#34;/is_2_static_method\u0026#34; static=\u0026#34;true\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_2_1\u0026#34;\u0026gt;arg[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_1_1\u0026#34;\u0026gt;arg[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;method([Ljava.lang.Object;)\u0026#34; operation_name=\u0026#34;/method_4\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_4_1\u0026#34;\u0026gt;arg[0].[0]\u0026lt;/tag\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;method(java.util.List,int.class)\u0026#34; operation_name=\u0026#34;/method_5\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_5_1\u0026#34;\u0026gt;arg[0].[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_5_1\u0026#34;\u0026gt;arg[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retArray([Ljava.lang.Object;)\u0026#34; operation_name=\u0026#34;/retArray\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj.[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj.[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retList(java.util.List)\u0026#34; operation_name=\u0026#34;/retList\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj.[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj.[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retMap(java.util.Map)\u0026#34; operation_name=\u0026#34;/retMap\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj.[\u0026#39;k1\u0026#39;]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj.[\u0026#39;k2\u0026#39;]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;/class\u0026gt; \u0026lt;/enhanced\u0026gt;    Explanation of the configuration in the file    configuration explanation     class_name The enhanced class   method The interceptor method of the class   operation_name If fill it out, will use it instead of the default operation_name.   operation_name_suffix What it means adding dynamic data after the operation_name.   static Is this method static.   tag Will add a tag in local span. The value of key needs to be represented on the XML node.   log Will add a log in local span. The value of key needs to be represented on the XML node.   arg[x] What it means is to get the input arguments. such as arg[0] is means get first arguments.   .[x] When the parsing object is Array or List, you can use it to get the object at the specified index.   .[\u0026lsquo;key\u0026rsquo;] When the parsing object is Map, you can get the map \u0026lsquo;key\u0026rsquo; through it.   returnedObj What it means is to get the return value.      ","title":"Support custom enhance","url":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/customize-enhance-trace/"},{"content":"Support custom enhance Here is an optional plugin apm-customize-enhance-plugin\nIntroduce SkyWalking has provided Java agent plugin development guide to help developers to build new plugin.\nThis plugin is not designed for replacement but for user convenience. The behaviour is very similar with @Trace toolkit, but without code change requirement, and more powerful, such as provide tag and log.\nHow to configure Implementing enhancements to custom classes requires two steps.\n Active the plugin, move the optional-plugins/apm-customize-enhance-plugin.jar to plugin/apm-customize-enhance-plugin.jar. Set plugin.customize.enhance_file in agent.config, which targets to rule file, such as /absolute/path/to/customize_enhance.xml. Set enhancement rules in customize_enhance.xml. \u0026lt;?xml version=\u0026#34;1.0\u0026#34; encoding=\u0026#34;UTF-8\u0026#34;?\u0026gt; \u0026lt;enhanced\u0026gt; \u0026lt;class class_name=\u0026#34;test.apache.skywalking.testcase.customize.service.TestService1\u0026#34;\u0026gt; \u0026lt;method method=\u0026#34;staticMethod()\u0026#34; operation_name=\u0026#34;/is_static_method\u0026#34; static=\u0026#34;true\u0026#34;/\u0026gt; \u0026lt;method method=\u0026#34;staticMethod(java.lang.String,int.class,java.util.Map,java.util.List,[Ljava.lang.Object;)\u0026#34; operation_name=\u0026#34;/is_static_method_args\u0026#34; static=\u0026#34;true\u0026#34;\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0]\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[1]\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[3].[0]\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;tag key=\u0026#34;tag_1\u0026#34;\u0026gt;arg[2].[\u0026#39;k1\u0026#39;]\u0026lt;/tag\u0026gt; \u0026lt;tag key=\u0026#34;tag_2\u0026#34;\u0026gt;arg[4].[1]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_1\u0026#34;\u0026gt;arg[4].[2]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;method()\u0026#34; static=\u0026#34;false\u0026#34;/\u0026gt; \u0026lt;method method=\u0026#34;method(java.lang.String,int.class)\u0026#34; operation_name=\u0026#34;/method_2\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0]\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;tag key=\u0026#34;tag_1\u0026#34;\u0026gt;arg[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_1\u0026#34;\u0026gt;arg[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;method(test.apache.skywalking.testcase.customize.model.Model0,java.lang.String,int.class)\u0026#34; operation_name=\u0026#34;/method_3\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0].id\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0].model1.name\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0].model1.getId()\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;tag key=\u0026#34;tag_os\u0026#34;\u0026gt;arg[0].os.[1]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;arg[0].getM().[\u0026#39;k1\u0026#39;]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retString(java.lang.String)\u0026#34; operation_name=\u0026#34;/retString\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retModel0(test.apache.skywalking.apm.testcase.customize.model.Model0)\u0026#34; operation_name=\u0026#34;/retModel0\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj.model1.id\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj.model1.getId()\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;/class\u0026gt; \u0026lt;class class_name=\u0026#34;test.apache.skywalking.testcase.customize.service.TestService2\u0026#34;\u0026gt; \u0026lt;method method=\u0026#34;staticMethod(java.lang.String,int.class)\u0026#34; operation_name=\u0026#34;/is_2_static_method\u0026#34; static=\u0026#34;true\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_2_1\u0026#34;\u0026gt;arg[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_1_1\u0026#34;\u0026gt;arg[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;method([Ljava.lang.Object;)\u0026#34; operation_name=\u0026#34;/method_4\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_4_1\u0026#34;\u0026gt;arg[0].[0]\u0026lt;/tag\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;method(java.util.List,int.class)\u0026#34; operation_name=\u0026#34;/method_5\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_5_1\u0026#34;\u0026gt;arg[0].[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_5_1\u0026#34;\u0026gt;arg[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retArray([Ljava.lang.Object;)\u0026#34; operation_name=\u0026#34;/retArray\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj.[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj.[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retList(java.util.List)\u0026#34; operation_name=\u0026#34;/retList\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj.[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj.[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retMap(java.util.Map)\u0026#34; operation_name=\u0026#34;/retMap\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj.[\u0026#39;k1\u0026#39;]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj.[\u0026#39;k2\u0026#39;]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;/class\u0026gt; \u0026lt;/enhanced\u0026gt;    Explanation of the configuration in the file    configuration explanation     class_name The enhanced class   method The interceptor method of the class   operation_name If fill it out, will use it instead of the default operation_name.   operation_name_suffix What it means adding dynamic data after the operation_name.   static Is this method static.   tag Will add a tag in local span. The value of key needs to be represented on the XML node.   log Will add a log in local span. The value of key needs to be represented on the XML node.   arg[x] What it means is to get the input arguments. such as arg[0] is means get first arguments.   .[x] When the parsing object is Array or List, you can use it to get the object at the specified index.   .[\u0026lsquo;key\u0026rsquo;] When the parsing object is Map, you can get the map \u0026lsquo;key\u0026rsquo; through it.   returnedObj What it means is to get the return value.      ","title":"Support custom enhance","url":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/customize-enhance-trace/"},{"content":"Support custom enhance Here is an optional plugin apm-customize-enhance-plugin\nIntroduce SkyWalking has provided Java agent plugin development guide to help developers to build new plugin.\nThis plugin is not designed for replacement but for user convenience. The behaviour is very similar with @Trace toolkit, but without code change requirement, and more powerful, such as provide tag and log.\nHow to configure Implementing enhancements to custom classes requires two steps.\n Active the plugin, move the optional-plugins/apm-customize-enhance-plugin.jar to plugin/apm-customize-enhance-plugin.jar. Set plugin.customize.enhance_file in agent.config, which targets to rule file, such as /absolute/path/to/customize_enhance.xml. Set enhancement rules in customize_enhance.xml. \u0026lt;?xml version=\u0026#34;1.0\u0026#34; encoding=\u0026#34;UTF-8\u0026#34;?\u0026gt; \u0026lt;enhanced\u0026gt; \u0026lt;class class_name=\u0026#34;test.apache.skywalking.testcase.customize.service.TestService1\u0026#34;\u0026gt; \u0026lt;method method=\u0026#34;staticMethod()\u0026#34; operation_name=\u0026#34;/is_static_method\u0026#34; static=\u0026#34;true\u0026#34;/\u0026gt; \u0026lt;method method=\u0026#34;staticMethod(java.lang.String,int.class,java.util.Map,java.util.List,[Ljava.lang.Object;)\u0026#34; operation_name=\u0026#34;/is_static_method_args\u0026#34; static=\u0026#34;true\u0026#34;\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0]\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[1]\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[3].[0]\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;tag key=\u0026#34;tag_1\u0026#34;\u0026gt;arg[2].[\u0026#39;k1\u0026#39;]\u0026lt;/tag\u0026gt; \u0026lt;tag key=\u0026#34;tag_2\u0026#34;\u0026gt;arg[4].[1]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_1\u0026#34;\u0026gt;arg[4].[2]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;method()\u0026#34; static=\u0026#34;false\u0026#34;/\u0026gt; \u0026lt;method method=\u0026#34;method(java.lang.String,int.class)\u0026#34; operation_name=\u0026#34;/method_2\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0]\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;tag key=\u0026#34;tag_1\u0026#34;\u0026gt;arg[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_1\u0026#34;\u0026gt;arg[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;method(test.apache.skywalking.testcase.customize.model.Model0,java.lang.String,int.class)\u0026#34; operation_name=\u0026#34;/method_3\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0].id\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0].model1.name\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;operation_name_suffix\u0026gt;arg[0].model1.getId()\u0026lt;/operation_name_suffix\u0026gt; \u0026lt;tag key=\u0026#34;tag_os\u0026#34;\u0026gt;arg[0].os.[1]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;arg[0].getM().[\u0026#39;k1\u0026#39;]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retString(java.lang.String)\u0026#34; operation_name=\u0026#34;/retString\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retModel0(test.apache.skywalking.apm.testcase.customize.model.Model0)\u0026#34; operation_name=\u0026#34;/retModel0\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj.model1.id\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj.model1.getId()\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;/class\u0026gt; \u0026lt;class class_name=\u0026#34;test.apache.skywalking.testcase.customize.service.TestService2\u0026#34;\u0026gt; \u0026lt;method method=\u0026#34;staticMethod(java.lang.String,int.class)\u0026#34; operation_name=\u0026#34;/is_2_static_method\u0026#34; static=\u0026#34;true\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_2_1\u0026#34;\u0026gt;arg[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_1_1\u0026#34;\u0026gt;arg[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;method([Ljava.lang.Object;)\u0026#34; operation_name=\u0026#34;/method_4\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_4_1\u0026#34;\u0026gt;arg[0].[0]\u0026lt;/tag\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;method(java.util.List,int.class)\u0026#34; operation_name=\u0026#34;/method_5\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_5_1\u0026#34;\u0026gt;arg[0].[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_5_1\u0026#34;\u0026gt;arg[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retArray([Ljava.lang.Object;)\u0026#34; operation_name=\u0026#34;/retArray\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj.[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj.[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retList(java.util.List)\u0026#34; operation_name=\u0026#34;/retList\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj.[0]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj.[1]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;method method=\u0026#34;retMap(java.util.Map)\u0026#34; operation_name=\u0026#34;/retMap\u0026#34; static=\u0026#34;false\u0026#34;\u0026gt; \u0026lt;tag key=\u0026#34;tag_ret\u0026#34;\u0026gt;returnedObj.[\u0026#39;k1\u0026#39;]\u0026lt;/tag\u0026gt; \u0026lt;log key=\u0026#34;log_map\u0026#34;\u0026gt;returnedObj.[\u0026#39;k2\u0026#39;]\u0026lt;/log\u0026gt; \u0026lt;/method\u0026gt; \u0026lt;/class\u0026gt; \u0026lt;/enhanced\u0026gt;    Explanation of the configuration in the file    configuration explanation     class_name The enhanced class   method The interceptor method of the class   operation_name If fill it out, will use it instead of the default operation_name.   operation_name_suffix What it means adding dynamic data after the operation_name.   static Is this method static.   tag Will add a tag in local span. The value of key needs to be represented on the XML node.   log Will add a log in local span. The value of key needs to be represented on the XML node.   arg[x] What it means is to get the input arguments. such as arg[0] is means get first arguments.   .[x] When the parsing object is Array or List, you can use it to get the object at the specified index.   .[\u0026lsquo;key\u0026rsquo;] When the parsing object is Map, you can get the map \u0026lsquo;key\u0026rsquo; through it.   returnedObj What it means is to get the return value.      ","title":"Support custom enhance","url":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/customize-enhance-trace/"},{"content":"Support custom trace ignore Here is an optional plugin apm-trace-ignore-plugin\nNotice: Sampling still works when the trace ignores plug-in activation.\nIntroduce  The purpose of this plugin is to filter endpoint which are expected to be ignored by the tracing system. You can setup multiple URL path patterns, The endpoints match these patterns wouldn\u0026rsquo;t be traced. The current matching rules follow Ant Path match style , like /path/*, /path/**, /path/?. Copy apm-trace-ignore-plugin-x.jar to agent/plugins, restarting the agent can effect the plugin.  How to configure There are two ways to configure ignore patterns. Settings through system env has higher priority.\n Set through the system environment variable,you need to add skywalking.trace.ignore_path to the system variables, the value is the path that you need to ignore, multiple paths should be separated by , Create file named as apm-trace-ignore-plugin.config in /agent/config/ dir, and add rules to filter traces  trace.ignore_path=/your/path/1/**,/your/path/2/** ","title":"Support custom trace ignore","url":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/agent-optional-plugins/trace-ignore-plugin/"},{"content":"Support custom trace ignore Here is an optional plugin apm-trace-ignore-plugin\nNotice: Sampling still works when the trace ignores plug-in activation.\nIntroduce  The purpose of this plugin is to filter endpoint which are expected to be ignored by the tracing system. You can setup multiple URL path patterns, The endpoints match these patterns wouldn\u0026rsquo;t be traced. The current matching rules follow Ant Path match style , like /path/*, /path/**, /path/?. Copy apm-trace-ignore-plugin-x.jar to agent/plugins, restarting the agent can effect the plugin.  How to configure There are two ways to configure ignore patterns. Settings through system env has higher priority.\n Set through the system environment variable,you need to add skywalking.trace.ignore_path to the system variables, the value is the path that you need to ignore, multiple paths should be separated by , Create file named as apm-trace-ignore-plugin.config in /agent/config/ dir, and add rules to filter traces  trace.ignore_path=/your/path/1/**,/your/path/2/** ","title":"Support custom trace ignore","url":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/agent-optional-plugins/trace-ignore-plugin/"},{"content":"Support custom trace ignore Here is an optional plugin apm-trace-ignore-plugin\nNotice: Sampling still works when the trace ignores plug-in activation.\nIntroduce  The purpose of this plugin is to filter endpoint which are expected to be ignored by the tracing system. You can setup multiple URL path patterns, The endpoints match these patterns wouldn\u0026rsquo;t be traced. The current matching rules follow Ant Path match style , like /path/*, /path/**, /path/?. Copy apm-trace-ignore-plugin-x.jar to agent/plugins, restarting the agent can effect the plugin.  How to configure There are two ways to configure ignore patterns. Settings through system env has higher priority.\n Set through the system environment variable,you need to add skywalking.trace.ignore_path to the system variables, the value is the path that you need to ignore, multiple paths should be separated by , Create file named as apm-trace-ignore-plugin.config in /agent/config/ dir, and add rules to filter traces  trace.ignore_path=/your/path/1/**,/your/path/2/** ","title":"Support custom trace ignore","url":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/agent-optional-plugins/trace-ignore-plugin/"},{"content":"Support custom trace ignore Here is an optional plugin apm-trace-ignore-plugin\nNotice: Sampling still works when the trace ignores plug-in activation.\nIntroduce  The purpose of this plugin is to filter endpoint which are expected to be ignored by the tracing system. You can setup multiple URL path patterns, The endpoints match these patterns wouldn\u0026rsquo;t be traced. The current matching rules follow Ant Path match style , like /path/*, /path/**, /path/?. Copy apm-trace-ignore-plugin-x.jar to agent/plugins, restarting the agent can effect the plugin.  How to configure There are two ways to configure ignore patterns. Settings through system env has higher priority.\n Set through the system environment variable,you need to add skywalking.trace.ignore_path to the system variables, the value is the path that you need to ignore, multiple paths should be separated by , Create file named as apm-trace-ignore-plugin.config in /agent/config/ dir, and add rules to filter traces  trace.ignore_path=/your/path/1/**,/your/path/2/** ","title":"Support custom trace ignore","url":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/agent-optional-plugins/trace-ignore-plugin/"},{"content":"Support custom trace ignore Here is an optional plugin apm-trace-ignore-plugin\nNotice: Sampling still works when the trace ignores plug-in activation.\nIntroduce  The purpose of this plugin is to filter endpoint which are expected to be ignored by the tracing system. You can setup multiple URL path patterns, The endpoints match these patterns wouldn\u0026rsquo;t be traced. The current matching rules follow Ant Path match style , like /path/*, /path/**, /path/?. Copy apm-trace-ignore-plugin-x.jar to agent/plugins, restarting the agent can effect the plugin.  How to configure There are two ways to configure ignore patterns. Settings through system env has higher priority.\n Set through the system environment variable,you need to add skywalking.trace.ignore_path to the system variables, the value is the path that you need to ignore, multiple paths should be separated by , Create file named as apm-trace-ignore-plugin.config in /agent/config/ dir, and add rules to filter traces  trace.ignore_path=/your/path/1/**,/your/path/2/** ","title":"Support custom trace ignore","url":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/agent-optional-plugins/trace-ignore-plugin/"},{"content":"Support RocketMQ Monitoring Motivation RocketMQ is a cloud native messaging and streaming platform, making it simple to build event-driven applications. Now that Skywalking can monitor OpenTelemetry metrics, I want to add RocketMQ monitoring via the OpenTelemetry Collector, which fetches metrics from the RocketMQ Exporter\nArchitecture Graph There is no significant architecture-level change.\nProposed Changes rocketmq-exporter collects metrics from RocketMQ and transport the data to OpenTelemetry collector, using SkyWalking openTelemetry receiver to receive these metrics。 Provide cluster, broker, and topic dimensions monitoring.\nRocketMQ Cluster Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Messages Produced Today Count meter_rocketmq_cluster_messages_produced_today The number of cluster messages produced today. RocketMQ Exporter   Messages Consumed Today Count meter_rocketmq_cluster_messages_consumed_today The number of cluster messages consumed today. RocketMQ Exporter   Total Producer Tps Msg/sec meter_rocketmq_cluster_total_producer_tps The number of messages produced per second. RocketMQ Exporter   Total Consume Tps Msg/sec meter_rocketmq_cluster_total_consumer_tps The number of messages consumed per second. RocketMQ Exporter   Producer Message Size Bytes/sec meter_rocketmq_cluster_producer_message_size The max size of a message produced per second. RocketMQ Exporter   Consumer Message Size Bytes/sec meter_rocketmq_cluster_consumer_message_size The max size of the consumed message per second. RocketMQ Exporter   Messages Produced Until Yesterday Count meter_rocketmq_cluster_messages_produced_until_yesterday The total number of messages put until 12 o\u0026rsquo;clock last night. RocketMQ Exporter   Messages Consumed Until Yesterday Count meter_rocketmq_cluster_messages_consumed_until_yesterday The total number of messages read until 12 o\u0026rsquo;clock last night. RocketMQ Exporter   Max Consumer Latency ms meter_rocketmq_cluster_max_consumer_latency The max number of consumer latency. RocketMQ Exporter   Max CommitLog Disk Ratio % meter_rocketmq_cluster_max_commitLog_disk_ratio The max utilization ratio of the commit log disk. RocketMQ Exporter   CommitLog Disk Ratio % meter_rocketmq_cluster_commitLog_disk_ratio The utilization ratio of the commit log disk per broker IP. RocketMQ Exporter   Pull ThreadPool Queue Head Wait Time ms meter_rocketmq_cluster_pull_threadPool_queue_head_wait_time The wait time in milliseconds for pulling threadPool queue per broker IP. RocketMQ Exporter   Send ThreadPool Queue Head Wait Time ms meter_rocketmq_cluster_send_threadPool_queue_head_wait_time The wait time in milliseconds for sending threadPool queue per broker IP. RocketMQ Exporter   Topic Count Count meter_rocketmq_cluster_topic_count The number of topics that received messages from the producer. RocketMQ Exporter   Broker Count Count meter_rocketmq_cluster_broker_count The number of brokers that received messages from the producer. RocketMQ Exporter    RocketMQ Broker Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Produce TPS Msg/sec meter_rocketmq_broker_produce_tps The number of broker produces messages per second. RocketMQ Exporter   Consume QPS Msg/sec meter_rocketmq_broker_consume_qps The number of broker consumes messages per second. RocketMQ Exporter   Producer Message Size Bytes/sec meter_rocketmq_broker_producer_message_size The max size of the messages produced per second. RocketMQ Exporter   Consumer Message Size Bytes/sec meter_rocketmq_broker_consumer_message_size The max size of the messages consumed per second. RocketMQ Exporter    RocketMQ Topic Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     Max Producer Message Size Byte meter_rocketmq_topic_max_producer_message_size The maximum number of messages produced. RocketMQ Exporter   Max Consumer Message Size Byte meter_rocketmq_topic_max_consumer_message_size The maximum number of messages consumed. RocketMQ Exporter   Consumer Latency ms meter_rocketmq_topic_consumer_latency Consumption delay time of a consumer group. RocketMQ Exporter   Producer Tps Msg/sec meter_rocketmq_topic_producer_tps The number of messages produced per second. RocketMQ Exporter   Consumer Group Tps Msg/sec meter_rocketmq_topic_consumer_group_tps The number of messages consumed per second per consumer group. RocketMQ Exporter   Producer Offset Count meter_rocketmq_topic_producer_offset The max progress of a topic\u0026rsquo;s production message. RocketMQ Exporter   Consumer Group Offset Count meter_rocketmq_topic_consumer_group_offset The max progress of a topic\u0026rsquo;s consumption message per consumer group. RocketMQ Exporter   Producer Message Size Byte/sec meter_rocketmq_topic_producer_message_size The max size of messages produced per second. RocketMQ Exporter   Consumer Message Size Byte/sec meter_rocketmq_topic_consumer_message_size The max size of messages consumed per second. RocketMQ Exporter   Consumer Group_Count Count meter_rocketmq_topic_consumer_group_count The number of consumer groups. RocketMQ Exporter   Broker Count Count meter_rocketmq_topic_broker_count The number of topics that received messages from the producer. RocketMQ Exporter    Imported Dependencies libs and their licenses. No new dependency.\nCompatibility no breaking changes.\nGeneral usage docs This feature is out of the box.\n","title":"Support RocketMQ Monitoring","url":"/docs/main/next/en/swip/swip-3/"},{"content":"Support Transport Layer Security (TLS) Transport Layer Security (TLS) is a very common security way when transport data through Internet. In some use cases, end users report the background:\nCreating SSL/TLS Certificates The first step is to generate certificates and key files for encrypting communication. This is fairly straightforward: use openssl from the command line.\nUse this script if you are not familiar with how to generate key files.\nWe need the following files:\n client.pem: A private RSA key to sign and authenticate the public key. It\u0026rsquo;s either a PKCS#8(PEM) or PKCS#1(DER). client.crt: Self-signed X.509 public keys for distribution. ca.crt: A certificate authority public key for a client to validate the server\u0026rsquo;s certificate.  Authentication Mode  Find ca.crt, and use it at client side. In mTLS mode, client.crt and client.pem are required at client side. Find server.crt, server.pem and ca.crt. Use them at server side. Please refer to gRPC Security of the OAP server doc for more details.  Enable TLS  Enable (m)TLS on the OAP server side, read more on this documentation. Following the configuration to enable (m)TLS on the agent side.     Name Environment Variable Required Type Description     reporter.grpc.tls.enable SW_AGENT_REPORTER_GRPC_TLS_ENABLE TLS/mTLS Enable (m)TLS on the gRPC reporter.   reporter.grpc.tls.ca_path SW_AGENT_REPORTER_GRPC_TLS_CA_PATH TLS The path of the CA certificate file. eg: /path/to/ca.cert.   reporter.grpc.tls.client.key_path SW_AGENT_REPORTER_GRPC_TLS_CLIENT_KEY_PATH mTLS The path of the client private key file, eg: /path/to/client.pem.   reporter.grpc.tls.client.client_cert_chain_path SW_AGENT_REPORTER_GRPC_TLS_CLIENT_CERT_CHAIN_PATH mTLS The path of the client certificate file, eg: /path/to/client.crt.   reporter.grpc.tls.insecure_skip_verify SW_AGENT_REPORTER_GRPC_TLS_INSECURE_SKIP_VERIFY TLS/mTLS Skip the server certificate and domain name verification.    ","title":"Support Transport Layer Security (TLS)","url":"/docs/skywalking-go/latest/en/advanced-features/grpc-tls/"},{"content":"Support Transport Layer Security (TLS) Transport Layer Security (TLS) is a very common security way when transport data through Internet. In some use cases, end users report the background:\nCreating SSL/TLS Certificates The first step is to generate certificates and key files for encrypting communication. This is fairly straightforward: use openssl from the command line.\nUse this script if you are not familiar with how to generate key files.\nWe need the following files:\n client.pem: A private RSA key to sign and authenticate the public key. It\u0026rsquo;s either a PKCS#8(PEM) or PKCS#1(DER). client.crt: Self-signed X.509 public keys for distribution. ca.crt: A certificate authority public key for a client to validate the server\u0026rsquo;s certificate.  Authentication Mode  Find ca.crt, and use it at client side. In mTLS mode, client.crt and client.pem are required at client side. Find server.crt, server.pem and ca.crt. Use them at server side. Please refer to gRPC Security of the OAP server doc for more details.  Enable TLS  Enable (m)TLS on the OAP server side, read more on this documentation. Following the configuration to enable (m)TLS on the agent side.     Name Environment Variable Required Type Description     reporter.grpc.tls.enable SW_AGENT_REPORTER_GRPC_TLS_ENABLE TLS/mTLS Enable (m)TLS on the gRPC reporter.   reporter.grpc.tls.ca_path SW_AGENT_REPORTER_GRPC_TLS_CA_PATH TLS The path of the CA certificate file. eg: /path/to/ca.cert.   reporter.grpc.tls.client.key_path SW_AGENT_REPORTER_GRPC_TLS_CLIENT_KEY_PATH mTLS The path of the client private key file, eg: /path/to/client.pem.   reporter.grpc.tls.client.client_cert_chain_path SW_AGENT_REPORTER_GRPC_TLS_CLIENT_CERT_CHAIN_PATH mTLS The path of the client certificate file, eg: /path/to/client.crt.   reporter.grpc.tls.insecure_skip_verify SW_AGENT_REPORTER_GRPC_TLS_INSECURE_SKIP_VERIFY TLS/mTLS Skip the server certificate and domain name verification.    ","title":"Support Transport Layer Security (TLS)","url":"/docs/skywalking-go/next/en/advanced-features/grpc-tls/"},{"content":"Support Transport Layer Security (TLS) Transport Layer Security (TLS) is a very common security way when transport data through Internet. In some use cases, end users report the background:\nCreating SSL/TLS Certificates The first step is to generate certificates and key files for encrypting communication. This is fairly straightforward: use openssl from the command line.\nUse this script if you are not familiar with how to generate key files.\nWe need the following files:\n client.pem: A private RSA key to sign and authenticate the public key. It\u0026rsquo;s either a PKCS#8(PEM) or PKCS#1(DER). client.crt: Self-signed X.509 public keys for distribution. ca.crt: A certificate authority public key for a client to validate the server\u0026rsquo;s certificate.  Authentication Mode  Find ca.crt, and use it at client side. In mTLS mode, client.crt and client.pem are required at client side. Find server.crt, server.pem and ca.crt. Use them at server side. Please refer to gRPC Security of the OAP server doc for more details.  Enable TLS  Enable (m)TLS on the OAP server side, read more on this documentation. Following the configuration to enable (m)TLS on the agent side.     Name Environment Variable Required Type Description     reporter.grpc.tls.enable SW_AGENT_REPORTER_GRPC_TLS_ENABLE TLS/mTLS Enable (m)TLS on the gRPC reporter.   reporter.grpc.tls.ca_path SW_AGENT_REPORTER_GRPC_TLS_CA_PATH TLS The path of the CA certificate file. eg: /path/to/ca.cert.   reporter.grpc.tls.client.key_path SW_AGENT_REPORTER_GRPC_TLS_CLIENT_KEY_PATH mTLS The path of the client private key file, eg: /path/to/client.pem.   reporter.grpc.tls.client.client_cert_chain_path SW_AGENT_REPORTER_GRPC_TLS_CLIENT_CERT_CHAIN_PATH mTLS The path of the client certificate file, eg: /path/to/client.crt.   reporter.grpc.tls.insecure_skip_verify SW_AGENT_REPORTER_GRPC_TLS_INSECURE_SKIP_VERIFY TLS/mTLS Skip the server certificate and domain name verification.    ","title":"Support Transport Layer Security (TLS)","url":"/docs/skywalking-go/v0.4.0/en/advanced-features/grpc-tls/"},{"content":"Support Transport Layer Security (TLS) Transport Layer Security (TLS) is a very common security way when transport data through Internet. In some use cases, end users report the background:\n Target(under monitoring) applications are in a region, which also named VPC, at the same time, the SkyWalking backend is in another region (VPC).\nBecause of that, security requirement is very obvious.\n Creating SSL/TLS Certificates The first step is to generate certificates and key files for encrypting communication. This is fairly straightforward: use openssl from the command line.\nUse this script if you are not familiar with how to generate key files.\nWe need the following files:\n client.pem: A private RSA key to sign and authenticate the public key. It\u0026rsquo;s either a PKCS#8(PEM) or PKCS#1(DER). client.crt: Self-signed X.509 public keys for distribution. ca.crt: A certificate authority public key for a client to validate the server\u0026rsquo;s certificate.  Authentication Mode  Find ca.crt, and use it at client side. In mTLS mode, client.crt and client.pem are required at client side. Find server.crt, server.pem and ca.crt. Use them at server side. Please refer to gRPC Security of the OAP server doc for more details.  Open and config TLS Agent config  Agent enables TLS automatically after the ca.crt(by default /ca folder in agent package) file is detected. TLS with no CA mode could be activated by this setting.  agent.force_tls=${SW_AGENT_FORCE_TLS:true} Enable mutual TLS  Sharing gRPC server must be started with mTLS enabled. More details can be found in receiver-sharing-server section in application.yaml. Please refer to gRPC Security and gRPC/HTTP server for receiver. Copy CA certificate, certificate and private key of client into agent/ca. Configure client-side SSL/TLS in agent.conf. Change SW_AGENT_COLLECTOR_BACKEND_SERVICES targeting to host and port of receiver-sharing-server.  For example:\nagent.force_tls=${SW_AGENT_FORCE_TLS:true} agent.ssl_trusted_ca_path=${SW_AGENT_SSL_TRUSTED_CA_PATH:/ca/ca.crt} agent.ssl_key_path=${SW_AGENT_SSL_KEY_PATH:/ca/client.pem} agent.ssl_cert_chain_path=${SW_AGENT_SSL_CERT_CHAIN_PATH:/ca/client.crt} collector.backend_service=${SW_AGENT_COLLECTOR_BACKEND_SERVICES:skywalking-oap:11801} Notice, the client-side\u0026rsquo;s certificate and the private key are from the same CA certificate with server-side.\n","title":"Support Transport Layer Security (TLS)","url":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/tls/"},{"content":"Support Transport Layer Security (TLS) Transport Layer Security (TLS) is a very common security way when transport data through Internet. In some use cases, end users report the background:\n Target(under monitoring) applications are in a region, which also named VPC, at the same time, the SkyWalking backend is in another region (VPC).\nBecause of that, security requirement is very obvious.\n Creating SSL/TLS Certificates The first step is to generate certificates and key files for encrypting communication. This is fairly straightforward: use openssl from the command line.\nUse this script if you are not familiar with how to generate key files.\nWe need the following files:\n client.pem: A private RSA key to sign and authenticate the public key. It\u0026rsquo;s either a PKCS#8(PEM) or PKCS#1(DER). client.crt: Self-signed X.509 public keys for distribution. ca.crt: A certificate authority public key for a client to validate the server\u0026rsquo;s certificate.  Authentication Mode  Find ca.crt, and use it at client side. In mTLS mode, client.crt and client.pem are required at client side. Find server.crt, server.pem and ca.crt. Use them at server side. Please refer to gRPC Security of the OAP server doc for more details.  Open and config TLS Agent config  Agent enables TLS automatically after the ca.crt(by default /ca folder in agent package) file is detected. TLS with no CA mode could be activated by this setting.  agent.force_tls=${SW_AGENT_FORCE_TLS:true} Enable mutual TLS  Sharing gRPC server must be started with mTLS enabled. More details can be found in receiver-sharing-server section in application.yaml. Please refer to gRPC Security and gRPC/HTTP server for receiver. Copy CA certificate, certificate and private key of client into agent/ca. Configure client-side SSL/TLS in agent.conf. Change SW_AGENT_COLLECTOR_BACKEND_SERVICES targeting to host and port of receiver-sharing-server.  For example:\nagent.force_tls=${SW_AGENT_FORCE_TLS:true} agent.ssl_trusted_ca_path=${SW_AGENT_SSL_TRUSTED_CA_PATH:/ca/ca.crt} agent.ssl_key_path=${SW_AGENT_SSL_KEY_PATH:/ca/client.pem} agent.ssl_cert_chain_path=${SW_AGENT_SSL_CERT_CHAIN_PATH:/ca/client.crt} collector.backend_service=${SW_AGENT_COLLECTOR_BACKEND_SERVICES:skywalking-oap:11801} Notice, the client-side\u0026rsquo;s certificate and the private key are from the same CA certificate with server-side.\n","title":"Support Transport Layer Security (TLS)","url":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/tls/"},{"content":"Support Transport Layer Security (TLS) Transport Layer Security (TLS) is a very common security way when transport data through Internet. In some use cases, end users report the background:\n Target(under monitoring) applications are in a region, which also named VPC, at the same time, the SkyWalking backend is in another region (VPC).\nBecause of that, security requirement is very obvious.\n Creating SSL/TLS Certificates The first step is to generate certificates and key files for encrypting communication. This is fairly straightforward: use openssl from the command line.\nUse this script if you are not familiar with how to generate key files.\nWe need the following files:\n client.pem: A private RSA key to sign and authenticate the public key. It\u0026rsquo;s either a PKCS#8(PEM) or PKCS#1(DER). client.crt: Self-signed X.509 public keys for distribution. ca.crt: A certificate authority public key for a client to validate the server\u0026rsquo;s certificate.  Authentication Mode  Find ca.crt, and use it at client side. In mTLS mode, client.crt and client.pem are required at client side. Find server.crt, server.pem and ca.crt. Use them at server side. Please refer to gRPC Security of the OAP server doc for more details.  Open and config TLS Agent config  Agent enables TLS automatically after the ca.crt(by default /ca folder in agent package) file is detected. TLS with no CA mode could be activated by this setting.  agent.force_tls=${SW_AGENT_FORCE_TLS:true} Enable mutual TLS  Sharing gRPC server must be started with mTLS enabled. More details can be found in receiver-sharing-server section in application.yaml. Please refer to gRPC Security and gRPC/HTTP server for receiver. Copy CA certificate, certificate and private key of client into agent/ca. Configure client-side SSL/TLS in agent.conf. Change SW_AGENT_COLLECTOR_BACKEND_SERVICES targeting to host and port of receiver-sharing-server.  For example:\nagent.force_tls=${SW_AGENT_FORCE_TLS:true} agent.ssl_trusted_ca_path=${SW_AGENT_SSL_TRUSTED_CA_PATH:/ca/ca.crt} agent.ssl_key_path=${SW_AGENT_SSL_KEY_PATH:/ca/client.pem} agent.ssl_cert_chain_path=${SW_AGENT_SSL_CERT_CHAIN_PATH:/ca/client.crt} collector.backend_service=${SW_AGENT_COLLECTOR_BACKEND_SERVICES:skywalking-oap:11801} Notice, the client-side\u0026rsquo;s certificate and the private key are from the same CA certificate with server-side.\n","title":"Support Transport Layer Security (TLS)","url":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/tls/"},{"content":"Support Transport Layer Security (TLS) Transport Layer Security (TLS) is a very common security way when transport data through Internet. In some use cases, end users report the background:\n Target(under monitoring) applications are in a region, which also named VPC, at the same time, the SkyWalking backend is in another region (VPC).\nBecause of that, security requirement is very obvious.\n Creating SSL/TLS Certificates The first step is to generate certificates and key files for encrypting communication. This is fairly straightforward: use openssl from the command line.\nUse this script if you are not familiar with how to generate key files.\nWe need the following files:\n client.pem: A private RSA key to sign and authenticate the public key. It\u0026rsquo;s either a PKCS#8(PEM) or PKCS#1(DER). client.crt: Self-signed X.509 public keys for distribution. ca.crt: A certificate authority public key for a client to validate the server\u0026rsquo;s certificate.  Authentication Mode  Find ca.crt, and use it at client side. In mTLS mode, client.crt and client.pem are required at client side. Find server.crt, server.pem and ca.crt. Use them at server side. Please refer to gRPC Security of the OAP server doc for more details.  Open and config TLS Agent config  Agent enables TLS automatically after the ca.crt(by default /ca folder in agent package) file is detected. TLS with no CA mode could be activated by this setting.  agent.force_tls=${SW_AGENT_FORCE_TLS:true} Enable mutual TLS  Sharing gRPC server must be started with mTLS enabled. More details can be found in receiver-sharing-server section in application.yaml. Please refer to gRPC Security and gRPC/HTTP server for receiver. Copy CA certificate, certificate and private key of client into agent/ca. Configure client-side SSL/TLS in agent.conf. Change SW_AGENT_COLLECTOR_BACKEND_SERVICES targeting to host and port of receiver-sharing-server.  For example:\nagent.force_tls=${SW_AGENT_FORCE_TLS:true} agent.ssl_trusted_ca_path=${SW_AGENT_SSL_TRUSTED_CA_PATH:/ca/ca.crt} agent.ssl_key_path=${SW_AGENT_SSL_KEY_PATH:/ca/client.pem} agent.ssl_cert_chain_path=${SW_AGENT_SSL_CERT_CHAIN_PATH:/ca/client.crt} collector.backend_service=${SW_AGENT_COLLECTOR_BACKEND_SERVICES:skywalking-oap:11801} Notice, the client-side\u0026rsquo;s certificate and the private key are from the same CA certificate with server-side.\n","title":"Support Transport Layer Security (TLS)","url":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/tls/"},{"content":"Support Transport Layer Security (TLS) Transport Layer Security (TLS) is a very common security way when transport data through Internet. In some use cases, end users report the background:\n Target(under monitoring) applications are in a region, which also named VPC, at the same time, the SkyWalking backend is in another region (VPC).\nBecause of that, security requirement is very obvious.\n Creating SSL/TLS Certificates The first step is to generate certificates and key files for encrypting communication. This is fairly straightforward: use openssl from the command line.\nUse this script if you are not familiar with how to generate key files.\nWe need the following files:\n client.pem: A private RSA key to sign and authenticate the public key. It\u0026rsquo;s either a PKCS#8(PEM) or PKCS#1(DER). client.crt: Self-signed X.509 public keys for distribution. ca.crt: A certificate authority public key for a client to validate the server\u0026rsquo;s certificate.  Authentication Mode  Find ca.crt, and use it at client side. In mTLS mode, client.crt and client.pem are required at client side. Find server.crt, server.pem and ca.crt. Use them at server side. Please refer to gRPC Security of the OAP server doc for more details.  Open and config TLS Agent config  Agent enables TLS automatically after the ca.crt(by default /ca folder in agent package) file is detected. TLS with no CA mode could be activated by this setting.  agent.force_tls=${SW_AGENT_FORCE_TLS:true} Enable mutual TLS  Sharing gRPC server must be started with mTLS enabled. More details can be found in receiver-sharing-server section in application.yaml. Please refer to gRPC Security and gRPC/HTTP server for receiver. Copy CA certificate, certificate and private key of client into agent/ca. Configure client-side SSL/TLS in agent.conf. Change SW_AGENT_COLLECTOR_BACKEND_SERVICES targeting to host and port of receiver-sharing-server.  For example:\nagent.force_tls=${SW_AGENT_FORCE_TLS:true} agent.ssl_trusted_ca_path=${SW_AGENT_SSL_TRUSTED_CA_PATH:/ca/ca.crt} agent.ssl_key_path=${SW_AGENT_SSL_KEY_PATH:/ca/client.pem} agent.ssl_cert_chain_path=${SW_AGENT_SSL_CERT_CHAIN_PATH:/ca/client.crt} collector.backend_service=${SW_AGENT_COLLECTOR_BACKEND_SERVICES:skywalking-oap:11801} Notice, the client-side\u0026rsquo;s certificate and the private key are from the same CA certificate with server-side.\n","title":"Support Transport Layer Security (TLS)","url":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/tls/"},{"content":"Supported Agent Configuration Options Below is the full list of supported configurations you can set to customize the agent behavior, please take some time to read the descriptions for what they can achieve.\n Usage: (Pass in intrusive setup)\n from skywalking import config, agent config.init(YourConfiguration=YourValue)) agent.start()  Usage: (Pass by environment variables)\n export SW_AGENT_YourConfiguration=YourValue Agent Core Configuration Options    Configuration Environment Variable Type Default Value Description     agent_collector_backend_services SW_AGENT_COLLECTOR_BACKEND_SERVICES \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; oap_host:oap_port The backend OAP server address, 11800 is default OAP gRPC port, 12800 is HTTP, Kafka ignores this option and uses kafka_bootstrap_servers option. This option should be changed accordingly with selected protocol   agent_protocol SW_AGENT_PROTOCOL \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; grpc The protocol to communicate with the backend OAP, http, grpc or kafka, we highly suggest using grpc in production as it\u0026rsquo;s well optimized than http. The kafka protocol provides an alternative way to submit data to the backend.   agent_name SW_AGENT_NAME \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; Python Service Name The name of your awesome Python service   agent_instance_name SW_AGENT_INSTANCE_NAME \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; str(uuid.uuid1()).replace('-', \u0026lsquo;') The name of this particular awesome Python service instance   agent_namespace SW_AGENT_NAMESPACE \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt;  The agent namespace of the Python service (available as tag and the suffix of service name)   kafka_bootstrap_servers SW_KAFKA_BOOTSTRAP_SERVERS \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; localhost:9092 A list of host/port pairs to use for establishing the initial connection to your Kafka cluster. It is in the form of host1:port1,host2:port2,\u0026hellip; (used for Kafka reporter protocol)   kafka_namespace SW_KAFKA_NAMESPACE \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt;  The kafka namespace specified by OAP side SW_NAMESPACE, prepends the following kafka topic names with a -.   kafka_topic_management SW_KAFKA_TOPIC_MANAGEMENT \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; skywalking-managements Specifying Kafka topic name for service instance reporting and registering, this should be in sync with OAP   kafka_topic_segment SW_KAFKA_TOPIC_SEGMENT \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; skywalking-segments Specifying Kafka topic name for Tracing data, this should be in sync with OAP   kafka_topic_log SW_KAFKA_TOPIC_LOG \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; skywalking-logs Specifying Kafka topic name for Log data, this should be in sync with OAP   kafka_topic_meter SW_KAFKA_TOPIC_METER \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; skywalking-meters Specifying Kafka topic name for Meter data, this should be in sync with OAP   kafka_reporter_custom_configurations SW_KAFKA_REPORTER_CUSTOM_CONFIGURATIONS \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt;  The configs to init KafkaProducer, supports the basic arguments (whose type is either str, bool, or int) listed here This config only works from env variables, each one should be passed in SW_KAFKA_REPORTER_CONFIG_\u0026lt;KEY_NAME\u0026gt;   agent_force_tls SW_AGENT_FORCE_TLS \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False Use TLS for communication with SkyWalking OAP (no cert required)   agent_authentication SW_AGENT_AUTHENTICATION \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt;  The authentication token to verify that the agent is trusted by the backend OAP, as for how to configure the backend, refer to the yaml.   agent_logging_level SW_AGENT_LOGGING_LEVEL \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; INFO The level of agent self-logs, could be one of CRITICAL, FATAL, ERROR, WARN(WARNING), INFO, DEBUG. Please turn on debug if an issue is encountered to find out what\u0026rsquo;s going on    Agent Core Danger Zone    Configuration Environment Variable Type Default Value Description     agent_collector_heartbeat_period SW_AGENT_COLLECTOR_HEARTBEAT_PERIOD \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 30 The agent will exchange heartbeat message with SkyWalking OAP backend every period seconds   agent_collector_properties_report_period_factor SW_AGENT_COLLECTOR_PROPERTIES_REPORT_PERIOD_FACTOR \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 10 The agent will report service instance properties every factor * heartbeat period seconds default: 10*30 = 300 seconds   agent_instance_properties_json SW_AGENT_INSTANCE_PROPERTIES_JSON \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt;  A custom JSON string to be reported as service instance properties, e.g. {\u0026quot;key\u0026quot;: \u0026quot;value\u0026quot;}   agent_experimental_fork_support SW_AGENT_EXPERIMENTAL_FORK_SUPPORT \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False The agent will restart itself in any os.fork()-ed child process. Important Note: it\u0026rsquo;s not suitable for short-lived processes as each one will create a new instance in SkyWalking dashboard in format of service_instance-child(pid). This feature may not work when a precise combination of gRPC + Python 3.7 + subprocess (not fork) is used together. The agent will output a warning log when using on Python 3.7 for such a reason.   agent_queue_timeout SW_AGENT_QUEUE_TIMEOUT \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 1 DANGEROUS - This option controls the interval of each bulk report from telemetry data queues Do not modify unless you have evaluated its impact given your service load.    SW_PYTHON Auto Instrumentation CLI    Configuration Environment Variable Type Default Value Description     agent_sw_python_bootstrap_propagate SW_AGENT_SW_PYTHON_BOOTSTRAP_PROPAGATE \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False Special: can only be passed via environment. This config controls the child process agent bootstrap behavior in sw-python CLI, if set to False, a valid child process will not boot up a SkyWalking Agent. Please refer to the CLI Guide for details.   agent_sw_python_cli_debug_enabled SW_AGENT_SW_PYTHON_CLI_DEBUG_ENABLED \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False Special: can only be passed via environment. This config controls the CLI and agent logging debug mode, if set to True, the CLI and agent will print out debug logs. Please refer to the CLI Guide for details. Important: this config will set agent logging level to DEBUG as well, do not use it in production otherwise it will flood your logs. This normally shouldn\u0026rsquo;t be pass as a simple flag -d will be the same.    Trace Reporter Configurations    Configuration Environment Variable Type Default Value Description     agent_trace_reporter_max_buffer_size SW_AGENT_TRACE_REPORTER_MAX_BUFFER_SIZE \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 10000 The maximum queue backlog size for sending the segment data to backend, segments beyond this are silently dropped   agent_trace_ignore_path SW_AGENT_TRACE_IGNORE_PATH \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt;  You can setup multiple URL path patterns, The endpoints match these patterns wouldn\u0026rsquo;t be traced. the current matching rules follow Ant Path match style , like /path/*, /path/**, /path/?.   agent_ignore_suffix SW_AGENT_IGNORE_SUFFIX \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; .jpg,.jpeg,.js,.css,.png,.bmp,.gif,.ico,.mp3,.mp4,.html,.svg If the operation name of the first span is included in this set, this segment should be ignored.   correlation_element_max_number SW_CORRELATION_ELEMENT_MAX_NUMBER \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 3 Max element count of the correlation context.   correlation_value_max_length SW_CORRELATION_VALUE_MAX_LENGTH \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 128 Max value length of correlation context element.    Profiling Configurations    Configuration Environment Variable Type Default Value Description     agent_profile_active SW_AGENT_PROFILE_ACTIVE \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; True If True, Python agent will enable profiler when user create a new profiling task.   agent_collector_get_profile_task_interval SW_AGENT_COLLECTOR_GET_PROFILE_TASK_INTERVAL \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 20 The number of seconds between two profile task query.   agent_profile_max_parallel SW_AGENT_PROFILE_MAX_PARALLEL \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 5 The number of parallel monitor segment count.   agent_profile_duration SW_AGENT_PROFILE_DURATION \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 10 The maximum monitor segment time(minutes), if current segment monitor time out of limit, then stop it.   agent_profile_dump_max_stack_depth SW_AGENT_PROFILE_DUMP_MAX_STACK_DEPTH \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 500 The number of max dump thread stack depth   agent_profile_snapshot_transport_buffer_size SW_AGENT_PROFILE_SNAPSHOT_TRANSPORT_BUFFER_SIZE \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 50 The number of snapshot transport to backend buffer size    Log Reporter Configurations    Configuration Environment Variable Type Default Value Description     agent_log_reporter_active SW_AGENT_LOG_REPORTER_ACTIVE \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; True If True, Python agent will report collected logs to the OAP or Satellite. Otherwise, it disables the feature.   agent_log_reporter_safe_mode SW_AGENT_LOG_REPORTER_SAFE_MODE \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False If True, Python agent will filter out HTTP basic auth information from log records. By default, it disables the feature due to potential performance impact brought by regular expression   agent_log_reporter_max_buffer_size SW_AGENT_LOG_REPORTER_MAX_BUFFER_SIZE \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 10000 The maximum queue backlog size for sending log data to backend, logs beyond this are silently dropped.   agent_log_reporter_level SW_AGENT_LOG_REPORTER_LEVEL \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; WARNING This config specifies the logger levels of concern, any logs with a level below the config will be ignored.   agent_log_reporter_ignore_filter SW_AGENT_LOG_REPORTER_IGNORE_FILTER \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False This config customizes whether to ignore the application-defined logger filters, if True, all logs are reported disregarding any filter rules.   agent_log_reporter_formatted SW_AGENT_LOG_REPORTER_FORMATTED \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; True If True, the log reporter will transmit the logs as formatted. Otherwise, puts logRecord.msg and logRecord.args into message content and tags(argument.n), respectively. Along with an exception tag if an exception was raised. Only applies to logging module.   agent_log_reporter_layout SW_AGENT_LOG_REPORTER_LAYOUT \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; %(asctime)s [%(threadName)s] %(levelname)s %(name)s - %(message)s The log reporter formats the logRecord message based on the layout given. Only applies to logging module.   agent_cause_exception_depth SW_AGENT_CAUSE_EXCEPTION_DEPTH \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 10 This configuration is shared by log reporter and tracer. This config limits agent to report up to limit stacktrace, please refer to [Python traceback](../ https://docs.python.org/3/library/traceback.html#traceback.print_tb) for more explanations.    Meter Reporter Configurations    Configuration Environment Variable Type Default Value Description     agent_meter_reporter_active SW_AGENT_METER_REPORTER_ACTIVE \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; True If True, Python agent will report collected meters to the OAP or Satellite. Otherwise, it disables the feature.   agent_meter_reporter_max_buffer_size SW_AGENT_METER_REPORTER_MAX_BUFFER_SIZE \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 10000 The maximum queue backlog size for sending meter data to backend, meters beyond this are silently dropped.   agent_meter_reporter_period SW_AGENT_METER_REPORTER_PERIOD \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 20 The interval in seconds between each meter data report   agent_pvm_meter_reporter_active SW_AGENT_PVM_METER_REPORTER_ACTIVE \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; True If True, Python agent will report collected Python Virtual Machine (PVM) meters to the OAP or Satellite. Otherwise, it disables the feature.    Plugin Related configurations    Configuration Environment Variable Type Default Value Description     agent_disable_plugins SW_AGENT_DISABLE_PLUGINS \u0026lt;class \u0026lsquo;list\u0026rsquo;\u0026gt; [''] The name patterns in comma-separated pattern, plugins whose name matches one of the pattern won\u0026rsquo;t be installed   plugin_http_http_params_length_threshold SW_PLUGIN_HTTP_HTTP_PARAMS_LENGTH_THRESHOLD \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 1024 When COLLECT_HTTP_PARAMS is enabled, how many characters to keep and send to the OAP backend, use negative values to keep and send the complete parameters, NB. this config item is added for the sake of performance.   plugin_http_ignore_method SW_PLUGIN_HTTP_IGNORE_METHOD \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt;  Comma-delimited list of http methods to ignore (GET, POST, HEAD, OPTIONS, etc\u0026hellip;)   plugin_sql_parameters_max_length SW_PLUGIN_SQL_PARAMETERS_MAX_LENGTH \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 0 The maximum length of the collected parameter, parameters longer than the specified length will be truncated, length 0 turns off parameter tracing   plugin_pymongo_trace_parameters SW_PLUGIN_PYMONGO_TRACE_PARAMETERS \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False Indicates whether to collect the filters of pymongo   plugin_pymongo_parameters_max_length SW_PLUGIN_PYMONGO_PARAMETERS_MAX_LENGTH \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 512 The maximum length of the collected filters, filters longer than the specified length will be truncated   plugin_elasticsearch_trace_dsl SW_PLUGIN_ELASTICSEARCH_TRACE_DSL \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False If true, trace all the DSL(Domain Specific Language) in ElasticSearch access, default is false   plugin_flask_collect_http_params SW_PLUGIN_FLASK_COLLECT_HTTP_PARAMS \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False This config item controls that whether the Flask plugin should collect the parameters of the request.   plugin_sanic_collect_http_params SW_PLUGIN_SANIC_COLLECT_HTTP_PARAMS \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False This config item controls that whether the Sanic plugin should collect the parameters of the request.   plugin_django_collect_http_params SW_PLUGIN_DJANGO_COLLECT_HTTP_PARAMS \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False This config item controls that whether the Django plugin should collect the parameters of the request.   plugin_fastapi_collect_http_params SW_PLUGIN_FASTAPI_COLLECT_HTTP_PARAMS \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False This config item controls that whether the FastAPI plugin should collect the parameters of the request.   plugin_bottle_collect_http_params SW_PLUGIN_BOTTLE_COLLECT_HTTP_PARAMS \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False This config item controls that whether the Bottle plugin should collect the parameters of the request.   plugin_celery_parameters_length SW_PLUGIN_CELERY_PARAMETERS_LENGTH \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 512 The maximum length of celery functions parameters, longer than this will be truncated, 0 turns off    ","title":"Supported Agent Configuration Options","url":"/docs/skywalking-python/latest/en/setup/configuration/"},{"content":"Supported Agent Configuration Options Below is the full list of supported configurations you can set to customize the agent behavior, please take some time to read the descriptions for what they can achieve.\n Usage: (Pass in intrusive setup)\n from skywalking import config, agent config.init(YourConfiguration=YourValue)) agent.start()  Usage: (Pass by environment variables)\n export SW_AGENT_YourConfiguration=YourValue Agent Core Configuration Options    Configuration Environment Variable Type Default Value Description     agent_collector_backend_services SW_AGENT_COLLECTOR_BACKEND_SERVICES \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; oap_host:oap_port The backend OAP server address, 11800 is default OAP gRPC port, 12800 is HTTP, Kafka ignores this option and uses kafka_bootstrap_servers option. This option should be changed accordingly with selected protocol   agent_protocol SW_AGENT_PROTOCOL \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; grpc The protocol to communicate with the backend OAP, http, grpc or kafka, we highly suggest using grpc in production as it\u0026rsquo;s well optimized than http. The kafka protocol provides an alternative way to submit data to the backend.   agent_name SW_AGENT_NAME \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; Python Service Name The name of your awesome Python service   agent_instance_name SW_AGENT_INSTANCE_NAME \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; str(uuid.uuid1()).replace('-', \u0026lsquo;') The name of this particular awesome Python service instance   agent_namespace SW_AGENT_NAMESPACE \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt;  The agent namespace of the Python service (available as tag and the suffix of service name)   kafka_bootstrap_servers SW_KAFKA_BOOTSTRAP_SERVERS \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; localhost:9092 A list of host/port pairs to use for establishing the initial connection to your Kafka cluster. It is in the form of host1:port1,host2:port2,\u0026hellip; (used for Kafka reporter protocol)   kafka_namespace SW_KAFKA_NAMESPACE \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt;  The kafka namespace specified by OAP side SW_NAMESPACE, prepends the following kafka topic names with a -.   kafka_topic_management SW_KAFKA_TOPIC_MANAGEMENT \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; skywalking-managements Specifying Kafka topic name for service instance reporting and registering, this should be in sync with OAP   kafka_topic_segment SW_KAFKA_TOPIC_SEGMENT \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; skywalking-segments Specifying Kafka topic name for Tracing data, this should be in sync with OAP   kafka_topic_log SW_KAFKA_TOPIC_LOG \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; skywalking-logs Specifying Kafka topic name for Log data, this should be in sync with OAP   kafka_topic_meter SW_KAFKA_TOPIC_METER \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; skywalking-meters Specifying Kafka topic name for Meter data, this should be in sync with OAP   kafka_reporter_custom_configurations SW_KAFKA_REPORTER_CUSTOM_CONFIGURATIONS \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt;  The configs to init KafkaProducer, supports the basic arguments (whose type is either str, bool, or int) listed here This config only works from env variables, each one should be passed in SW_KAFKA_REPORTER_CONFIG_\u0026lt;KEY_NAME\u0026gt;   agent_force_tls SW_AGENT_FORCE_TLS \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False Use TLS for communication with SkyWalking OAP (no cert required)   agent_authentication SW_AGENT_AUTHENTICATION \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt;  The authentication token to verify that the agent is trusted by the backend OAP, as for how to configure the backend, refer to the yaml.   agent_logging_level SW_AGENT_LOGGING_LEVEL \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; INFO The level of agent self-logs, could be one of CRITICAL, FATAL, ERROR, WARN(WARNING), INFO, DEBUG. Please turn on debug if an issue is encountered to find out what\u0026rsquo;s going on    Agent Core Danger Zone    Configuration Environment Variable Type Default Value Description     agent_collector_heartbeat_period SW_AGENT_COLLECTOR_HEARTBEAT_PERIOD \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 30 The agent will exchange heartbeat message with SkyWalking OAP backend every period seconds   agent_collector_properties_report_period_factor SW_AGENT_COLLECTOR_PROPERTIES_REPORT_PERIOD_FACTOR \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 10 The agent will report service instance properties every factor * heartbeat period seconds default: 10*30 = 300 seconds   agent_instance_properties_json SW_AGENT_INSTANCE_PROPERTIES_JSON \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt;  A custom JSON string to be reported as service instance properties, e.g. {\u0026quot;key\u0026quot;: \u0026quot;value\u0026quot;}   agent_experimental_fork_support SW_AGENT_EXPERIMENTAL_FORK_SUPPORT \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False The agent will restart itself in any os.fork()-ed child process. Important Note: it\u0026rsquo;s not suitable for short-lived processes as each one will create a new instance in SkyWalking dashboard in format of service_instance-child(pid). This feature may not work when a precise combination of gRPC + Python 3.7 + subprocess (not fork) is used together. The agent will output a warning log when using on Python 3.7 for such a reason.   agent_queue_timeout SW_AGENT_QUEUE_TIMEOUT \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 1 DANGEROUS - This option controls the interval of each bulk report from telemetry data queues Do not modify unless you have evaluated its impact given your service load.   agent_asyncio_enhancement SW_AGENT_ASYNCIO_ENHANCEMENT \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False Replace the threads to asyncio coroutines to report telemetry data to the OAP. This option is experimental and may not work as expected.    SW_PYTHON Auto Instrumentation CLI    Configuration Environment Variable Type Default Value Description     agent_sw_python_bootstrap_propagate SW_AGENT_SW_PYTHON_BOOTSTRAP_PROPAGATE \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False Special: can only be passed via environment. This config controls the child process agent bootstrap behavior in sw-python CLI, if set to False, a valid child process will not boot up a SkyWalking Agent. Please refer to the CLI Guide for details.   agent_sw_python_cli_debug_enabled SW_AGENT_SW_PYTHON_CLI_DEBUG_ENABLED \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False Special: can only be passed via environment. This config controls the CLI and agent logging debug mode, if set to True, the CLI and agent will print out debug logs. Please refer to the CLI Guide for details. Important: this config will set agent logging level to DEBUG as well, do not use it in production otherwise it will flood your logs. This normally shouldn\u0026rsquo;t be pass as a simple flag -d will be the same.    Trace Reporter Configurations    Configuration Environment Variable Type Default Value Description     agent_trace_reporter_max_buffer_size SW_AGENT_TRACE_REPORTER_MAX_BUFFER_SIZE \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 10000 The maximum queue backlog size for sending the segment data to backend, segments beyond this are silently dropped   agent_trace_ignore_path SW_AGENT_TRACE_IGNORE_PATH \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt;  You can setup multiple URL path patterns, The endpoints match these patterns wouldn\u0026rsquo;t be traced. the current matching rules follow Ant Path match style , like /path/*, /path/**, /path/?.   agent_ignore_suffix SW_AGENT_IGNORE_SUFFIX \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; .jpg,.jpeg,.js,.css,.png,.bmp,.gif,.ico,.mp3,.mp4,.html,.svg If the operation name of the first span is included in this set, this segment should be ignored.   correlation_element_max_number SW_CORRELATION_ELEMENT_MAX_NUMBER \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 3 Max element count of the correlation context.   correlation_value_max_length SW_CORRELATION_VALUE_MAX_LENGTH \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 128 Max value length of correlation context element.    Profiling Configurations    Configuration Environment Variable Type Default Value Description     agent_profile_active SW_AGENT_PROFILE_ACTIVE \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; True If True, Python agent will enable profiler when user create a new profiling task.   agent_collector_get_profile_task_interval SW_AGENT_COLLECTOR_GET_PROFILE_TASK_INTERVAL \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 20 The number of seconds between two profile task query.   agent_profile_max_parallel SW_AGENT_PROFILE_MAX_PARALLEL \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 5 The number of parallel monitor segment count.   agent_profile_duration SW_AGENT_PROFILE_DURATION \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 10 The maximum monitor segment time(minutes), if current segment monitor time out of limit, then stop it.   agent_profile_dump_max_stack_depth SW_AGENT_PROFILE_DUMP_MAX_STACK_DEPTH \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 500 The number of max dump thread stack depth   agent_profile_snapshot_transport_buffer_size SW_AGENT_PROFILE_SNAPSHOT_TRANSPORT_BUFFER_SIZE \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 50 The number of snapshot transport to backend buffer size    Log Reporter Configurations    Configuration Environment Variable Type Default Value Description     agent_log_reporter_active SW_AGENT_LOG_REPORTER_ACTIVE \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; True If True, Python agent will report collected logs to the OAP or Satellite. Otherwise, it disables the feature.   agent_log_reporter_safe_mode SW_AGENT_LOG_REPORTER_SAFE_MODE \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False If True, Python agent will filter out HTTP basic auth information from log records. By default, it disables the feature due to potential performance impact brought by regular expression   agent_log_reporter_max_buffer_size SW_AGENT_LOG_REPORTER_MAX_BUFFER_SIZE \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 10000 The maximum queue backlog size for sending log data to backend, logs beyond this are silently dropped.   agent_log_reporter_level SW_AGENT_LOG_REPORTER_LEVEL \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; WARNING This config specifies the logger levels of concern, any logs with a level below the config will be ignored.   agent_log_reporter_ignore_filter SW_AGENT_LOG_REPORTER_IGNORE_FILTER \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False This config customizes whether to ignore the application-defined logger filters, if True, all logs are reported disregarding any filter rules.   agent_log_reporter_formatted SW_AGENT_LOG_REPORTER_FORMATTED \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; True If True, the log reporter will transmit the logs as formatted. Otherwise, puts logRecord.msg and logRecord.args into message content and tags(argument.n), respectively. Along with an exception tag if an exception was raised. Only applies to logging module.   agent_log_reporter_layout SW_AGENT_LOG_REPORTER_LAYOUT \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; %(asctime)s [%(threadName)s] %(levelname)s %(name)s - %(message)s The log reporter formats the logRecord message based on the layout given. Only applies to logging module.   agent_cause_exception_depth SW_AGENT_CAUSE_EXCEPTION_DEPTH \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 10 This configuration is shared by log reporter and tracer. This config limits agent to report up to limit stacktrace, please refer to [Python traceback](../ https://docs.python.org/3/library/traceback.html#traceback.print_tb) for more explanations.    Meter Reporter Configurations    Configuration Environment Variable Type Default Value Description     agent_meter_reporter_active SW_AGENT_METER_REPORTER_ACTIVE \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; True If True, Python agent will report collected meters to the OAP or Satellite. Otherwise, it disables the feature.   agent_meter_reporter_max_buffer_size SW_AGENT_METER_REPORTER_MAX_BUFFER_SIZE \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 10000 The maximum queue backlog size for sending meter data to backend, meters beyond this are silently dropped.   agent_meter_reporter_period SW_AGENT_METER_REPORTER_PERIOD \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 20 The interval in seconds between each meter data report   agent_pvm_meter_reporter_active SW_AGENT_PVM_METER_REPORTER_ACTIVE \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; True If True, Python agent will report collected Python Virtual Machine (PVM) meters to the OAP or Satellite. Otherwise, it disables the feature.    Plugin Related configurations    Configuration Environment Variable Type Default Value Description     agent_disable_plugins SW_AGENT_DISABLE_PLUGINS \u0026lt;class \u0026lsquo;list\u0026rsquo;\u0026gt; [''] The name patterns in comma-separated pattern, plugins whose name matches one of the pattern won\u0026rsquo;t be installed   plugin_http_http_params_length_threshold SW_PLUGIN_HTTP_HTTP_PARAMS_LENGTH_THRESHOLD \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 1024 When COLLECT_HTTP_PARAMS is enabled, how many characters to keep and send to the OAP backend, use negative values to keep and send the complete parameters, NB. this config item is added for the sake of performance.   plugin_http_ignore_method SW_PLUGIN_HTTP_IGNORE_METHOD \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt;  Comma-delimited list of http methods to ignore (GET, POST, HEAD, OPTIONS, etc\u0026hellip;)   plugin_sql_parameters_max_length SW_PLUGIN_SQL_PARAMETERS_MAX_LENGTH \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 0 The maximum length of the collected parameter, parameters longer than the specified length will be truncated, length 0 turns off parameter tracing   plugin_pymongo_trace_parameters SW_PLUGIN_PYMONGO_TRACE_PARAMETERS \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False Indicates whether to collect the filters of pymongo   plugin_pymongo_parameters_max_length SW_PLUGIN_PYMONGO_PARAMETERS_MAX_LENGTH \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 512 The maximum length of the collected filters, filters longer than the specified length will be truncated   plugin_elasticsearch_trace_dsl SW_PLUGIN_ELASTICSEARCH_TRACE_DSL \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False If true, trace all the DSL(Domain Specific Language) in ElasticSearch access, default is false   plugin_flask_collect_http_params SW_PLUGIN_FLASK_COLLECT_HTTP_PARAMS \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False This config item controls that whether the Flask plugin should collect the parameters of the request.   plugin_sanic_collect_http_params SW_PLUGIN_SANIC_COLLECT_HTTP_PARAMS \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False This config item controls that whether the Sanic plugin should collect the parameters of the request.   plugin_django_collect_http_params SW_PLUGIN_DJANGO_COLLECT_HTTP_PARAMS \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False This config item controls that whether the Django plugin should collect the parameters of the request.   plugin_fastapi_collect_http_params SW_PLUGIN_FASTAPI_COLLECT_HTTP_PARAMS \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False This config item controls that whether the FastAPI plugin should collect the parameters of the request.   plugin_bottle_collect_http_params SW_PLUGIN_BOTTLE_COLLECT_HTTP_PARAMS \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False This config item controls that whether the Bottle plugin should collect the parameters of the request.   plugin_celery_parameters_length SW_PLUGIN_CELERY_PARAMETERS_LENGTH \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 512 The maximum length of celery functions parameters, longer than this will be truncated, 0 turns off    ","title":"Supported Agent Configuration Options","url":"/docs/skywalking-python/next/en/setup/configuration/"},{"content":"Supported Agent Configuration Options Below is the full list of supported configurations you can set to customize the agent behavior, please take some time to read the descriptions for what they can achieve.\n Usage: (Pass in intrusive setup)\n from skywalking import config, agent config.init(YourConfiguration=YourValue)) agent.start()  Usage: (Pass by environment variables)\n export SW_AGENT_YourConfiguration=YourValue Agent Core Configuration Options    Configuration Environment Variable Type Default Value Description     agent_collector_backend_services SW_AGENT_COLLECTOR_BACKEND_SERVICES \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; oap_host:oap_port The backend OAP server address, 11800 is default OAP gRPC port, 12800 is HTTP, Kafka ignores this option and uses kafka_bootstrap_servers option. This option should be changed accordingly with selected protocol   agent_protocol SW_AGENT_PROTOCOL \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; grpc The protocol to communicate with the backend OAP, http, grpc or kafka, we highly suggest using grpc in production as it\u0026rsquo;s well optimized than http. The kafka protocol provides an alternative way to submit data to the backend.   agent_name SW_AGENT_NAME \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; Python Service Name The name of your awesome Python service   agent_instance_name SW_AGENT_INSTANCE_NAME \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; str(uuid.uuid1()).replace('-', \u0026lsquo;') The name of this particular awesome Python service instance   agent_namespace SW_AGENT_NAMESPACE \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt;  The agent namespace of the Python service (available as tag and the suffix of service name)   kafka_bootstrap_servers SW_KAFKA_BOOTSTRAP_SERVERS \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; localhost:9092 A list of host/port pairs to use for establishing the initial connection to your Kafka cluster. It is in the form of host1:port1,host2:port2,\u0026hellip; (used for Kafka reporter protocol)   kafka_namespace SW_KAFKA_NAMESPACE \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt;  The kafka namespace specified by OAP side SW_NAMESPACE, prepends the following kafka topic names with a -.   kafka_topic_management SW_KAFKA_TOPIC_MANAGEMENT \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; skywalking-managements Specifying Kafka topic name for service instance reporting and registering, this should be in sync with OAP   kafka_topic_segment SW_KAFKA_TOPIC_SEGMENT \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; skywalking-segments Specifying Kafka topic name for Tracing data, this should be in sync with OAP   kafka_topic_log SW_KAFKA_TOPIC_LOG \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; skywalking-logs Specifying Kafka topic name for Log data, this should be in sync with OAP   kafka_topic_meter SW_KAFKA_TOPIC_METER \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; skywalking-meters Specifying Kafka topic name for Meter data, this should be in sync with OAP   kafka_reporter_custom_configurations SW_KAFKA_REPORTER_CUSTOM_CONFIGURATIONS \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt;  The configs to init KafkaProducer, supports the basic arguments (whose type is either str, bool, or int) listed here This config only works from env variables, each one should be passed in SW_KAFKA_REPORTER_CONFIG_\u0026lt;KEY_NAME\u0026gt;   agent_force_tls SW_AGENT_FORCE_TLS \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False Use TLS for communication with SkyWalking OAP (no cert required)   agent_authentication SW_AGENT_AUTHENTICATION \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt;  The authentication token to verify that the agent is trusted by the backend OAP, as for how to configure the backend, refer to the yaml.   agent_logging_level SW_AGENT_LOGGING_LEVEL \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; INFO The level of agent self-logs, could be one of CRITICAL, FATAL, ERROR, WARN(WARNING), INFO, DEBUG. Please turn on debug if an issue is encountered to find out what\u0026rsquo;s going on    Agent Core Danger Zone    Configuration Environment Variable Type Default Value Description     agent_collector_heartbeat_period SW_AGENT_COLLECTOR_HEARTBEAT_PERIOD \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 30 The agent will exchange heartbeat message with SkyWalking OAP backend every period seconds   agent_collector_properties_report_period_factor SW_AGENT_COLLECTOR_PROPERTIES_REPORT_PERIOD_FACTOR \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 10 The agent will report service instance properties every factor * heartbeat period seconds default: 10*30 = 300 seconds   agent_instance_properties_json SW_AGENT_INSTANCE_PROPERTIES_JSON \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt;  A custom JSON string to be reported as service instance properties, e.g. {\u0026quot;key\u0026quot;: \u0026quot;value\u0026quot;}   agent_experimental_fork_support SW_AGENT_EXPERIMENTAL_FORK_SUPPORT \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False The agent will restart itself in any os.fork()-ed child process. Important Note: it\u0026rsquo;s not suitable for short-lived processes as each one will create a new instance in SkyWalking dashboard in format of service_instance-child(pid). This feature may not work when a precise combination of gRPC + Python 3.7 + subprocess (not fork) is used together. The agent will output a warning log when using on Python 3.7 for such a reason.   agent_queue_timeout SW_AGENT_QUEUE_TIMEOUT \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 1 DANGEROUS - This option controls the interval of each bulk report from telemetry data queues Do not modify unless you have evaluated its impact given your service load.    SW_PYTHON Auto Instrumentation CLI    Configuration Environment Variable Type Default Value Description     agent_sw_python_bootstrap_propagate SW_AGENT_SW_PYTHON_BOOTSTRAP_PROPAGATE \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False Special: can only be passed via environment. This config controls the child process agent bootstrap behavior in sw-python CLI, if set to False, a valid child process will not boot up a SkyWalking Agent. Please refer to the CLI Guide for details.   agent_sw_python_cli_debug_enabled SW_AGENT_SW_PYTHON_CLI_DEBUG_ENABLED \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False Special: can only be passed via environment. This config controls the CLI and agent logging debug mode, if set to True, the CLI and agent will print out debug logs. Please refer to the CLI Guide for details. Important: this config will set agent logging level to DEBUG as well, do not use it in production otherwise it will flood your logs. This normally shouldn\u0026rsquo;t be pass as a simple flag -d will be the same.    Trace Reporter Configurations    Configuration Environment Variable Type Default Value Description     agent_trace_reporter_max_buffer_size SW_AGENT_TRACE_REPORTER_MAX_BUFFER_SIZE \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 10000 The maximum queue backlog size for sending the segment data to backend, segments beyond this are silently dropped   agent_trace_ignore_path SW_AGENT_TRACE_IGNORE_PATH \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt;  You can setup multiple URL path patterns, The endpoints match these patterns wouldn\u0026rsquo;t be traced. the current matching rules follow Ant Path match style , like /path/*, /path/**, /path/?.   agent_ignore_suffix SW_AGENT_IGNORE_SUFFIX \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; .jpg,.jpeg,.js,.css,.png,.bmp,.gif,.ico,.mp3,.mp4,.html,.svg If the operation name of the first span is included in this set, this segment should be ignored.   correlation_element_max_number SW_CORRELATION_ELEMENT_MAX_NUMBER \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 3 Max element count of the correlation context.   correlation_value_max_length SW_CORRELATION_VALUE_MAX_LENGTH \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 128 Max value length of correlation context element.    Profiling Configurations    Configuration Environment Variable Type Default Value Description     agent_profile_active SW_AGENT_PROFILE_ACTIVE \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; True If True, Python agent will enable profiler when user create a new profiling task.   agent_collector_get_profile_task_interval SW_AGENT_COLLECTOR_GET_PROFILE_TASK_INTERVAL \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 20 The number of seconds between two profile task query.   agent_profile_max_parallel SW_AGENT_PROFILE_MAX_PARALLEL \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 5 The number of parallel monitor segment count.   agent_profile_duration SW_AGENT_PROFILE_DURATION \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 10 The maximum monitor segment time(minutes), if current segment monitor time out of limit, then stop it.   agent_profile_dump_max_stack_depth SW_AGENT_PROFILE_DUMP_MAX_STACK_DEPTH \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 500 The number of max dump thread stack depth   agent_profile_snapshot_transport_buffer_size SW_AGENT_PROFILE_SNAPSHOT_TRANSPORT_BUFFER_SIZE \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 50 The number of snapshot transport to backend buffer size    Log Reporter Configurations    Configuration Environment Variable Type Default Value Description     agent_log_reporter_active SW_AGENT_LOG_REPORTER_ACTIVE \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; True If True, Python agent will report collected logs to the OAP or Satellite. Otherwise, it disables the feature.   agent_log_reporter_safe_mode SW_AGENT_LOG_REPORTER_SAFE_MODE \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False If True, Python agent will filter out HTTP basic auth information from log records. By default, it disables the feature due to potential performance impact brought by regular expression   agent_log_reporter_max_buffer_size SW_AGENT_LOG_REPORTER_MAX_BUFFER_SIZE \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 10000 The maximum queue backlog size for sending log data to backend, logs beyond this are silently dropped.   agent_log_reporter_level SW_AGENT_LOG_REPORTER_LEVEL \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; WARNING This config specifies the logger levels of concern, any logs with a level below the config will be ignored.   agent_log_reporter_ignore_filter SW_AGENT_LOG_REPORTER_IGNORE_FILTER \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False This config customizes whether to ignore the application-defined logger filters, if True, all logs are reported disregarding any filter rules.   agent_log_reporter_formatted SW_AGENT_LOG_REPORTER_FORMATTED \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; True If True, the log reporter will transmit the logs as formatted. Otherwise, puts logRecord.msg and logRecord.args into message content and tags(argument.n), respectively. Along with an exception tag if an exception was raised. Only applies to logging module.   agent_log_reporter_layout SW_AGENT_LOG_REPORTER_LAYOUT \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt; %(asctime)s [%(threadName)s] %(levelname)s %(name)s - %(message)s The log reporter formats the logRecord message based on the layout given. Only applies to logging module.   agent_cause_exception_depth SW_AGENT_CAUSE_EXCEPTION_DEPTH \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 10 This configuration is shared by log reporter and tracer. This config limits agent to report up to limit stacktrace, please refer to [Python traceback](../ https://docs.python.org/3/library/traceback.html#traceback.print_tb) for more explanations.    Meter Reporter Configurations    Configuration Environment Variable Type Default Value Description     agent_meter_reporter_active SW_AGENT_METER_REPORTER_ACTIVE \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; True If True, Python agent will report collected meters to the OAP or Satellite. Otherwise, it disables the feature.   agent_meter_reporter_max_buffer_size SW_AGENT_METER_REPORTER_MAX_BUFFER_SIZE \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 10000 The maximum queue backlog size for sending meter data to backend, meters beyond this are silently dropped.   agent_meter_reporter_period SW_AGENT_METER_REPORTER_PERIOD \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 20 The interval in seconds between each meter data report   agent_pvm_meter_reporter_active SW_AGENT_PVM_METER_REPORTER_ACTIVE \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; True If True, Python agent will report collected Python Virtual Machine (PVM) meters to the OAP or Satellite. Otherwise, it disables the feature.    Plugin Related configurations    Configuration Environment Variable Type Default Value Description     agent_disable_plugins SW_AGENT_DISABLE_PLUGINS \u0026lt;class \u0026lsquo;list\u0026rsquo;\u0026gt; [''] The name patterns in comma-separated pattern, plugins whose name matches one of the pattern won\u0026rsquo;t be installed   plugin_http_http_params_length_threshold SW_PLUGIN_HTTP_HTTP_PARAMS_LENGTH_THRESHOLD \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 1024 When COLLECT_HTTP_PARAMS is enabled, how many characters to keep and send to the OAP backend, use negative values to keep and send the complete parameters, NB. this config item is added for the sake of performance.   plugin_http_ignore_method SW_PLUGIN_HTTP_IGNORE_METHOD \u0026lt;class \u0026lsquo;str\u0026rsquo;\u0026gt;  Comma-delimited list of http methods to ignore (GET, POST, HEAD, OPTIONS, etc\u0026hellip;)   plugin_sql_parameters_max_length SW_PLUGIN_SQL_PARAMETERS_MAX_LENGTH \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 0 The maximum length of the collected parameter, parameters longer than the specified length will be truncated, length 0 turns off parameter tracing   plugin_pymongo_trace_parameters SW_PLUGIN_PYMONGO_TRACE_PARAMETERS \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False Indicates whether to collect the filters of pymongo   plugin_pymongo_parameters_max_length SW_PLUGIN_PYMONGO_PARAMETERS_MAX_LENGTH \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 512 The maximum length of the collected filters, filters longer than the specified length will be truncated   plugin_elasticsearch_trace_dsl SW_PLUGIN_ELASTICSEARCH_TRACE_DSL \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False If true, trace all the DSL(Domain Specific Language) in ElasticSearch access, default is false   plugin_flask_collect_http_params SW_PLUGIN_FLASK_COLLECT_HTTP_PARAMS \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False This config item controls that whether the Flask plugin should collect the parameters of the request.   plugin_sanic_collect_http_params SW_PLUGIN_SANIC_COLLECT_HTTP_PARAMS \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False This config item controls that whether the Sanic plugin should collect the parameters of the request.   plugin_django_collect_http_params SW_PLUGIN_DJANGO_COLLECT_HTTP_PARAMS \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False This config item controls that whether the Django plugin should collect the parameters of the request.   plugin_fastapi_collect_http_params SW_PLUGIN_FASTAPI_COLLECT_HTTP_PARAMS \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False This config item controls that whether the FastAPI plugin should collect the parameters of the request.   plugin_bottle_collect_http_params SW_PLUGIN_BOTTLE_COLLECT_HTTP_PARAMS \u0026lt;class \u0026lsquo;bool\u0026rsquo;\u0026gt; False This config item controls that whether the Bottle plugin should collect the parameters of the request.   plugin_celery_parameters_length SW_PLUGIN_CELERY_PARAMETERS_LENGTH \u0026lt;class \u0026lsquo;int\u0026rsquo;\u0026gt; 512 The maximum length of celery functions parameters, longer than this will be truncated, 0 turns off    ","title":"Supported Agent Configuration Options","url":"/docs/skywalking-python/v1.0.1/en/setup/configuration/"},{"content":"Supported Libraries This document is automatically generated from the SkyWalking Python testing matrix.\nThe column of versions only indicates the set of library versions tested in a best-effort manner.\nIf you find newer major versions that are missing from the following table, and it\u0026rsquo;s not documented as a limitation, please PR to update the test matrix in the plugin.\nVersions marked as NOT SUPPORTED may be due to an incompatible version with Python in the original library or a limitation of SkyWalking auto-instrumentation (welcome to contribute!)\nPlugin Support Table    Library Python Version - Lib Version Plugin Name     aiohttp Python \u0026gt;=3.7 - [\u0026lsquo;3.7.*']; sw_aiohttp   aioredis Python \u0026gt;=3.7 - [\u0026lsquo;2.0.*']; sw_aioredis   aiormq Python \u0026gt;=3.7 - [\u0026lsquo;6.3\u0026rsquo;, \u0026lsquo;6.4\u0026rsquo;]; sw_aiormq   amqp Python \u0026gt;=3.7 - [\u0026lsquo;2.6.1\u0026rsquo;]; sw_amqp   asyncpg Python \u0026gt;=3.7 - [\u0026lsquo;0.25.0\u0026rsquo;]; sw_asyncpg   bottle Python \u0026gt;=3.7 - [\u0026lsquo;0.12.23\u0026rsquo;]; sw_bottle   celery Python \u0026gt;=3.7 - [\u0026lsquo;5.1\u0026rsquo;]; sw_celery   confluent_kafka Python \u0026gt;=3.7 - [\u0026lsquo;1.5.0\u0026rsquo;, \u0026lsquo;1.7.0\u0026rsquo;, \u0026lsquo;1.8.2\u0026rsquo;]; sw_confluent_kafka   django Python \u0026gt;=3.7 - [\u0026lsquo;3.2\u0026rsquo;]; sw_django   elasticsearch Python \u0026gt;=3.7 - [\u0026lsquo;7.13\u0026rsquo;, \u0026lsquo;7.14\u0026rsquo;, \u0026lsquo;7.15\u0026rsquo;]; sw_elasticsearch   hug Python \u0026gt;=3.11 - NOT SUPPORTED YET; Python \u0026gt;=3.10 - [\u0026lsquo;2.5\u0026rsquo;, \u0026lsquo;2.6\u0026rsquo;]; Python \u0026gt;=3.7 - [\u0026lsquo;2.4.1\u0026rsquo;, \u0026lsquo;2.5\u0026rsquo;, \u0026lsquo;2.6\u0026rsquo;]; sw_falcon   fastapi Python \u0026gt;=3.7 - [\u0026lsquo;0.89.\u0026rsquo;, \u0026lsquo;0.88.']; sw_fastapi   flask Python \u0026gt;=3.7 - [\u0026lsquo;2.0\u0026rsquo;]; sw_flask   happybase Python \u0026gt;=3.7 - [\u0026lsquo;1.2.0\u0026rsquo;]; sw_happybase   http_server Python \u0026gt;=3.7 - ['*']; sw_http_server   werkzeug Python \u0026gt;=3.7 - [\u0026lsquo;1.0.1\u0026rsquo;, \u0026lsquo;2.0\u0026rsquo;]; sw_http_server   httpx Python \u0026gt;=3.7 - [\u0026lsquo;0.23.\u0026rsquo;, \u0026lsquo;0.22.']; sw_httpx   kafka-python Python \u0026gt;=3.7 - [\u0026lsquo;2.0\u0026rsquo;]; sw_kafka   loguru Python \u0026gt;=3.7 - [\u0026lsquo;0.6.0\u0026rsquo;, \u0026lsquo;0.7.0\u0026rsquo;]; sw_loguru   mysqlclient Python \u0026gt;=3.7 - [\u0026lsquo;2.1.*']; sw_mysqlclient   psycopg[binary] Python \u0026gt;=3.11 - [\u0026lsquo;3.1.']; Python \u0026gt;=3.7 - [\u0026lsquo;3.0.18\u0026rsquo;, \u0026lsquo;3.1.']; sw_psycopg   psycopg2-binary Python \u0026gt;=3.10 - NOT SUPPORTED YET; Python \u0026gt;=3.7 - [\u0026lsquo;2.9\u0026rsquo;]; sw_psycopg2   pymongo Python \u0026gt;=3.7 - [\u0026lsquo;3.11.*']; sw_pymongo   pymysql Python \u0026gt;=3.7 - [\u0026lsquo;1.0\u0026rsquo;]; sw_pymysql   pyramid Python \u0026gt;=3.7 - [\u0026lsquo;1.10\u0026rsquo;, \u0026lsquo;2.0\u0026rsquo;]; sw_pyramid   pika Python \u0026gt;=3.7 - [\u0026lsquo;1.2\u0026rsquo;]; sw_rabbitmq   redis Python \u0026gt;=3.7 - [\u0026lsquo;3.5.*\u0026rsquo;, \u0026lsquo;4.5.1\u0026rsquo;]; sw_redis   requests Python \u0026gt;=3.7 - [\u0026lsquo;2.26\u0026rsquo;, \u0026lsquo;2.25\u0026rsquo;]; sw_requests   sanic Python \u0026gt;=3.10 - NOT SUPPORTED YET; Python \u0026gt;=3.7 - [\u0026lsquo;20.12\u0026rsquo;]; sw_sanic   tornado Python \u0026gt;=3.7 - [\u0026lsquo;6.0\u0026rsquo;, \u0026lsquo;6.1\u0026rsquo;]; sw_tornado   urllib3 Python \u0026gt;=3.7 - [\u0026lsquo;1.26\u0026rsquo;, \u0026lsquo;1.25\u0026rsquo;]; sw_urllib3   urllib_request Python \u0026gt;=3.7 - ['*']; sw_urllib_request   websockets Python \u0026gt;=3.7 - [\u0026lsquo;10.3\u0026rsquo;, \u0026lsquo;10.4\u0026rsquo;]; sw_websockets    Notes  The celery server running with \u0026ldquo;celery -A \u0026hellip;\u0026rdquo; should be run with the HTTP protocol as it uses multiprocessing by default which is not compatible with the gRPC protocol implementation in SkyWalking currently. Celery clients can use whatever protocol they want. While Falcon is instrumented, only Hug is tested. Hug is believed to be abandoned project, use this plugin with a bit more caution. Instead of Hug, plugin test should move to test actual Falcon. The websocket instrumentation only traces client side connection handshake, the actual message exchange (send/recv) is not traced since injecting headers to socket message body is the only way to propagate the trace context, which requires customization of message structure and extreme care. (Feel free to add this feature by instrumenting the send/recv methods commented out in the code by either injecting sw8 headers or propagate the trace context in a separate message)  ","title":"Supported Libraries","url":"/docs/skywalking-python/latest/en/setup/plugins/"},{"content":"Supported Libraries This document is automatically generated from the SkyWalking Python testing matrix.\nThe column of versions only indicates the set of library versions tested in a best-effort manner.\nIf you find newer major versions that are missing from the following table, and it\u0026rsquo;s not documented as a limitation, please PR to update the test matrix in the plugin.\nVersions marked as NOT SUPPORTED may be due to an incompatible version with Python in the original library or a limitation of SkyWalking auto-instrumentation (welcome to contribute!)\nPlugin Support Table    Library Python Version - Lib Version Plugin Name     aiohttp Python \u0026gt;=3.7 - [\u0026lsquo;3.7.*']; sw_aiohttp   aioredis Python \u0026gt;=3.7 - [\u0026lsquo;2.0.*']; sw_aioredis   aiormq Python \u0026gt;=3.7 - [\u0026lsquo;6.3\u0026rsquo;, \u0026lsquo;6.4\u0026rsquo;]; sw_aiormq   amqp Python \u0026gt;=3.7 - [\u0026lsquo;2.6.1\u0026rsquo;]; sw_amqp   asyncpg Python \u0026gt;=3.7 - [\u0026lsquo;0.25.0\u0026rsquo;]; sw_asyncpg   bottle Python \u0026gt;=3.7 - [\u0026lsquo;0.12.23\u0026rsquo;]; sw_bottle   celery Python \u0026gt;=3.7 - [\u0026lsquo;5.1\u0026rsquo;]; sw_celery   confluent_kafka Python \u0026gt;=3.7 - [\u0026lsquo;1.5.0\u0026rsquo;, \u0026lsquo;1.7.0\u0026rsquo;, \u0026lsquo;1.8.2\u0026rsquo;]; sw_confluent_kafka   django Python \u0026gt;=3.7 - [\u0026lsquo;3.2\u0026rsquo;]; sw_django   elasticsearch Python \u0026gt;=3.7 - [\u0026lsquo;7.13\u0026rsquo;, \u0026lsquo;7.14\u0026rsquo;, \u0026lsquo;7.15\u0026rsquo;]; sw_elasticsearch   hug Python \u0026gt;=3.11 - NOT SUPPORTED YET; Python \u0026gt;=3.10 - [\u0026lsquo;2.5\u0026rsquo;, \u0026lsquo;2.6\u0026rsquo;]; Python \u0026gt;=3.7 - [\u0026lsquo;2.4.1\u0026rsquo;, \u0026lsquo;2.5\u0026rsquo;, \u0026lsquo;2.6\u0026rsquo;]; sw_falcon   fastapi Python \u0026gt;=3.7 - [\u0026lsquo;0.89.\u0026rsquo;, \u0026lsquo;0.88.']; sw_fastapi   flask Python \u0026gt;=3.7 - [\u0026lsquo;2.0\u0026rsquo;]; sw_flask   happybase Python \u0026gt;=3.7 - [\u0026lsquo;1.2.0\u0026rsquo;]; sw_happybase   http_server Python \u0026gt;=3.7 - ['*']; sw_http_server   werkzeug Python \u0026gt;=3.7 - [\u0026lsquo;1.0.1\u0026rsquo;, \u0026lsquo;2.0\u0026rsquo;]; sw_http_server   httpx Python \u0026gt;=3.7 - [\u0026lsquo;0.23.\u0026rsquo;, \u0026lsquo;0.22.']; sw_httpx   kafka-python Python \u0026gt;=3.7 - [\u0026lsquo;2.0\u0026rsquo;]; sw_kafka   loguru Python \u0026gt;=3.7 - [\u0026lsquo;0.6.0\u0026rsquo;, \u0026lsquo;0.7.0\u0026rsquo;]; sw_loguru   mysqlclient Python \u0026gt;=3.7 - [\u0026lsquo;2.1.*']; sw_mysqlclient   neo4j Python \u0026gt;=3.7 - [\u0026lsquo;5.*']; sw_neo4j   psycopg[binary] Python \u0026gt;=3.11 - [\u0026lsquo;3.1.']; Python \u0026gt;=3.7 - [\u0026lsquo;3.0.18\u0026rsquo;, \u0026lsquo;3.1.']; sw_psycopg   psycopg2-binary Python \u0026gt;=3.10 - NOT SUPPORTED YET; Python \u0026gt;=3.7 - [\u0026lsquo;2.9\u0026rsquo;]; sw_psycopg2   pymongo Python \u0026gt;=3.7 - [\u0026lsquo;3.11.*']; sw_pymongo   pymysql Python \u0026gt;=3.7 - [\u0026lsquo;1.0\u0026rsquo;]; sw_pymysql   pyramid Python \u0026gt;=3.7 - [\u0026lsquo;1.10\u0026rsquo;, \u0026lsquo;2.0\u0026rsquo;]; sw_pyramid   pika Python \u0026gt;=3.7 - [\u0026lsquo;1.2\u0026rsquo;]; sw_rabbitmq   redis Python \u0026gt;=3.7 - [\u0026lsquo;3.5.*\u0026rsquo;, \u0026lsquo;4.5.1\u0026rsquo;]; sw_redis   requests Python \u0026gt;=3.7 - [\u0026lsquo;2.26\u0026rsquo;, \u0026lsquo;2.25\u0026rsquo;]; sw_requests   sanic Python \u0026gt;=3.10 - NOT SUPPORTED YET; Python \u0026gt;=3.7 - [\u0026lsquo;20.12\u0026rsquo;]; sw_sanic   tornado Python \u0026gt;=3.7 - [\u0026lsquo;6.0\u0026rsquo;, \u0026lsquo;6.1\u0026rsquo;]; sw_tornado   urllib3 Python \u0026gt;=3.7 - [\u0026lsquo;1.26\u0026rsquo;, \u0026lsquo;1.25\u0026rsquo;]; sw_urllib3   urllib_request Python \u0026gt;=3.7 - ['*']; sw_urllib_request   websockets Python \u0026gt;=3.7 - [\u0026lsquo;10.3\u0026rsquo;, \u0026lsquo;10.4\u0026rsquo;]; sw_websockets    Notes  The celery server running with \u0026ldquo;celery -A \u0026hellip;\u0026rdquo; should be run with the HTTP protocol as it uses multiprocessing by default which is not compatible with the gRPC protocol implementation in SkyWalking currently. Celery clients can use whatever protocol they want. While Falcon is instrumented, only Hug is tested. Hug is believed to be abandoned project, use this plugin with a bit more caution. Instead of Hug, plugin test should move to test actual Falcon. The Neo4j plugin integrates neo4j python driver 5.x.x versions which support both Neo4j 5 and 4.4 DBMS. The websocket instrumentation only traces client side connection handshake, the actual message exchange (send/recv) is not traced since injecting headers to socket message body is the only way to propagate the trace context, which requires customization of message structure and extreme care. (Feel free to add this feature by instrumenting the send/recv methods commented out in the code by either injecting sw8 headers or propagate the trace context in a separate message)  ","title":"Supported Libraries","url":"/docs/skywalking-python/next/en/setup/plugins/"},{"content":"Supported Libraries This document is automatically generated from the SkyWalking Python testing matrix.\nThe column of versions only indicates the set of library versions tested in a best-effort manner.\nIf you find newer major versions that are missing from the following table, and it\u0026rsquo;s not documented as a limitation, please PR to update the test matrix in the plugin.\nVersions marked as NOT SUPPORTED may be due to an incompatible version with Python in the original library or a limitation of SkyWalking auto-instrumentation (welcome to contribute!)\nPlugin Support Table    Library Python Version - Lib Version Plugin Name     aiohttp Python \u0026gt;=3.7 - [\u0026lsquo;3.7.*']; sw_aiohttp   aioredis Python \u0026gt;=3.7 - [\u0026lsquo;2.0.*']; sw_aioredis   aiormq Python \u0026gt;=3.7 - [\u0026lsquo;6.3\u0026rsquo;, \u0026lsquo;6.4\u0026rsquo;]; sw_aiormq   amqp Python \u0026gt;=3.7 - [\u0026lsquo;2.6.1\u0026rsquo;]; sw_amqp   asyncpg Python \u0026gt;=3.7 - [\u0026lsquo;0.25.0\u0026rsquo;]; sw_asyncpg   bottle Python \u0026gt;=3.7 - [\u0026lsquo;0.12.23\u0026rsquo;]; sw_bottle   celery Python \u0026gt;=3.7 - [\u0026lsquo;5.1\u0026rsquo;]; sw_celery   confluent_kafka Python \u0026gt;=3.7 - [\u0026lsquo;1.5.0\u0026rsquo;, \u0026lsquo;1.7.0\u0026rsquo;, \u0026lsquo;1.8.2\u0026rsquo;]; sw_confluent_kafka   django Python \u0026gt;=3.7 - [\u0026lsquo;3.2\u0026rsquo;]; sw_django   elasticsearch Python \u0026gt;=3.7 - [\u0026lsquo;7.13\u0026rsquo;, \u0026lsquo;7.14\u0026rsquo;, \u0026lsquo;7.15\u0026rsquo;]; sw_elasticsearch   hug Python \u0026gt;=3.11 - NOT SUPPORTED YET; Python \u0026gt;=3.10 - [\u0026lsquo;2.5\u0026rsquo;, \u0026lsquo;2.6\u0026rsquo;]; Python \u0026gt;=3.7 - [\u0026lsquo;2.4.1\u0026rsquo;, \u0026lsquo;2.5\u0026rsquo;, \u0026lsquo;2.6\u0026rsquo;]; sw_falcon   fastapi Python \u0026gt;=3.7 - [\u0026lsquo;0.89.\u0026rsquo;, \u0026lsquo;0.88.']; sw_fastapi   flask Python \u0026gt;=3.7 - [\u0026lsquo;2.0\u0026rsquo;]; sw_flask   happybase Python \u0026gt;=3.7 - [\u0026lsquo;1.2.0\u0026rsquo;]; sw_happybase   http_server Python \u0026gt;=3.7 - ['*']; sw_http_server   werkzeug Python \u0026gt;=3.7 - [\u0026lsquo;1.0.1\u0026rsquo;, \u0026lsquo;2.0\u0026rsquo;]; sw_http_server   httpx Python \u0026gt;=3.7 - [\u0026lsquo;0.23.\u0026rsquo;, \u0026lsquo;0.22.']; sw_httpx   kafka-python Python \u0026gt;=3.7 - [\u0026lsquo;2.0\u0026rsquo;]; sw_kafka   loguru Python \u0026gt;=3.7 - [\u0026lsquo;0.6.0\u0026rsquo;, \u0026lsquo;0.7.0\u0026rsquo;]; sw_loguru   mysqlclient Python \u0026gt;=3.7 - [\u0026lsquo;2.1.*']; sw_mysqlclient   psycopg[binary] Python \u0026gt;=3.11 - [\u0026lsquo;3.1.']; Python \u0026gt;=3.7 - [\u0026lsquo;3.0.18\u0026rsquo;, \u0026lsquo;3.1.']; sw_psycopg   psycopg2-binary Python \u0026gt;=3.10 - NOT SUPPORTED YET; Python \u0026gt;=3.7 - [\u0026lsquo;2.9\u0026rsquo;]; sw_psycopg2   pymongo Python \u0026gt;=3.7 - [\u0026lsquo;3.11.*']; sw_pymongo   pymysql Python \u0026gt;=3.7 - [\u0026lsquo;1.0\u0026rsquo;]; sw_pymysql   pyramid Python \u0026gt;=3.7 - [\u0026lsquo;1.10\u0026rsquo;, \u0026lsquo;2.0\u0026rsquo;]; sw_pyramid   pika Python \u0026gt;=3.7 - [\u0026lsquo;1.2\u0026rsquo;]; sw_rabbitmq   redis Python \u0026gt;=3.7 - [\u0026lsquo;3.5.*\u0026rsquo;, \u0026lsquo;4.5.1\u0026rsquo;]; sw_redis   requests Python \u0026gt;=3.7 - [\u0026lsquo;2.26\u0026rsquo;, \u0026lsquo;2.25\u0026rsquo;]; sw_requests   sanic Python \u0026gt;=3.10 - NOT SUPPORTED YET; Python \u0026gt;=3.7 - [\u0026lsquo;20.12\u0026rsquo;]; sw_sanic   tornado Python \u0026gt;=3.7 - [\u0026lsquo;6.0\u0026rsquo;, \u0026lsquo;6.1\u0026rsquo;]; sw_tornado   urllib3 Python \u0026gt;=3.7 - [\u0026lsquo;1.26\u0026rsquo;, \u0026lsquo;1.25\u0026rsquo;]; sw_urllib3   urllib_request Python \u0026gt;=3.7 - ['*']; sw_urllib_request   websockets Python \u0026gt;=3.7 - [\u0026lsquo;10.3\u0026rsquo;, \u0026lsquo;10.4\u0026rsquo;]; sw_websockets    Notes  The celery server running with \u0026ldquo;celery -A \u0026hellip;\u0026rdquo; should be run with the HTTP protocol as it uses multiprocessing by default which is not compatible with the gRPC protocol implementation in SkyWalking currently. Celery clients can use whatever protocol they want. While Falcon is instrumented, only Hug is tested. Hug is believed to be abandoned project, use this plugin with a bit more caution. Instead of Hug, plugin test should move to test actual Falcon. The websocket instrumentation only traces client side connection handshake, the actual message exchange (send/recv) is not traced since injecting headers to socket message body is the only way to propagate the trace context, which requires customization of message structure and extreme care. (Feel free to add this feature by instrumenting the send/recv methods commented out in the code by either injecting sw8 headers or propagate the trace context in a separate message)  ","title":"Supported Libraries","url":"/docs/skywalking-python/v1.0.1/en/setup/plugins/"},{"content":"Supported SAPI, extension and library The following plugins provide the distributed tracing capability.\nSupported SAPI  PHP-FPM CLI under Swoole  Supported PHP extension  cURL PDO MySQL Improved Memcached phpredis MongoDB Memcache  Supported PHP library  predis php-amqplib for Message Queuing Producer  ","title":"Supported SAPI, extension and library","url":"/docs/skywalking-php/latest/en/setup/service-agent/php-agent/supported-list/"},{"content":"Supported SAPI, extension and library The following plugins provide the distributed tracing capability.\nSupported SAPI  PHP-FPM CLI under Swoole  Supported PHP extension  cURL PDO MySQL Improved Memcached phpredis MongoDB Memcache  Supported PHP library  predis php-amqplib for Message Queuing Producer  ","title":"Supported SAPI, extension and library","url":"/docs/skywalking-php/next/en/setup/service-agent/php-agent/supported-list/"},{"content":"Supported SAPI, extension and library The following plugins provide the distributed tracing capability.\nSupported SAPI  PHP-FPM CLI under Swoole  Supported PHP extension  cURL PDO MySQL Improved Memcached phpredis MongoDB Memcache  Supported PHP library  predis php-amqplib for Message Queuing Producer  ","title":"Supported SAPI, extension and library","url":"/docs/skywalking-php/v0.7.0/en/setup/service-agent/php-agent/supported-list/"},{"content":"SWIP - SkyWalking Improvement Proposal SWIP - SkyWalking Improvement Proposal, is an official document to propose a new feature and/or feature improvement, which are relative to end users and developers.\nSkyWalking has been very stable since v9.x. We are getting over the rapid changing stage. The core concepts, protocols for reporting telemetry and query, 3rd party integration, and the streaming process kernel are very stable. From now(2024) on, SkyWalking community would focus more on improvement and controllable improvement. All major changes should be evaluated more seriously, and try as good as possible to avoid incompatible breaking changes.\nWhat is considered a major change? The catalogs of a major change are listed as follows\n New Feature. A feature doesn\u0026rsquo;t exist for the latest version. Any change of the network Interfaces, especially for Query Protocol, Data Collect Protocols, Dynamic Configuration APIs, Exporting APIs, AI pipeline APIs. Any change of storage structure.  Q: Is Agent side feature or change considered a SWIP?\nA: Right now, SWIP targets OAP and UI side changes. All agent side changes are pending on the reviews from the committers of those agents.\nSWIP Template The purpose of this template should not be considered a hard requirement. The major purpose of SWIP is helping the PMC and community member to understand the proposal better.\n# Title: SWIP-1234 xxxx  ## Motivation The description of new feature or improvement. ## Architecture Graph Describe the relationship between your new proposal part and existing components. ## Proposed Changes State your proposal in detail. ## Imported Dependencies libs and their licenses.  ## Compatibility Whether breaking configuration, storage structure, or protocols. ## General usage docs This doesn\u0026#39;t have to be a final version, but helps the reviewers to understand how to use this new feature. SWIP Process Here is the process for starting a SWIP.\n Start a SWIP discussion at GitHub Discussion Page with title [DISCUSS] xxxx. Fill in the sections as described above in SWIP Template. At least one SkyWalking committer commented on the discussion to show interest in adopting it. This committer could update this page to grant a SWIP ID, and update the title to [SWIP-ID NO.] [DISCUSS] xxxx. All further discussion could happen on the discussion page. Once the consensus is made by enough committer supporters, and/or through a mail list vote, this SWIP should be added here as SWIP-ID NO.md and listed in the below as Known SWIPs.  All accepted and proposed SWIPs can be found in here.\nKnown SWIPs Next SWIP Number: 8\nAccepted SWIPs  SWIP-8 Support ActiveMQ Monitoring SWIP-5 Support ClickHouse Monitoring SWIP-4 Support available layers of service in the topology SWIP-3 Support RocketMQ Monitoring SWIP-2 Collecting and Gathering Kubernetes Monitoring Data SWIP-1 Create and detect Service Hierarchy Relationship  ","title":"SWIP - SkyWalking Improvement Proposal","url":"/docs/main/next/en/swip/readme/"},{"content":"Table of Agent Configuration Properties This is the properties list supported in agent/config/agent.config.\n   property key Description System Environment Variable Default     agent.service_name The service name to represent a logic group providing the same capabilities/logic. Suggestion: set a unique name for every logic service group, service instance nodes share the same code, Max length is 50(UTF-8 char). Optional, once service_name follows \u0026lt;group name\u0026gt;::\u0026lt;logic name\u0026gt; format, OAP server assigns the group name to the service metadata. SW_AGENT_NAME Your_ApplicationName   agent.namespace Namespace represents a subnet, such as kubernetes namespace, or 172.10.. SW_AGENT_NAMESPACE Not set   agent.cluster Cluster defines the physical cluster in a data center or same network segment. SW_AGENT_CLUSTER Not set   agent.sample_n_per_3_secs Negative or zero means off, by default.SAMPLE_N_PER_3_SECS means sampling N TraceSegment in 3 seconds tops. SW_AGENT_SAMPLE Not set   agent.authentication Authentication active is based on backend setting, see application.yml for more details.For most scenarios, this needs backend extensions, only basic match auth provided in default implementation. SW_AGENT_AUTHENTICATION Not set   agent.trace_segment_ref_limit_per_span The max number of TraceSegmentRef in a single span to keep memory cost estimatable. SW_TRACE_SEGMENT_LIMIT 500   agent.span_limit_per_segment The max number of spans in a single segment. Through this config item, SkyWalking keep your application memory cost estimated. SW_AGENT_SPAN_LIMIT 300   agent.ignore_suffix If the operation name of the first span is included in this set, this segment should be ignored. SW_AGENT_IGNORE_SUFFIX Not set   agent.is_open_debugging_class If true, skywalking agent will save all instrumented classes files in /debugging folder. SkyWalking team may ask for these files in order to resolve compatible problem. SW_AGENT_OPEN_DEBUG Not set   agent.instance_name Instance name is the identity of an instance, should be unique in the service. If empty, SkyWalking agent will generate an 32-bit uuid. Default, use UUID@hostname as the instance name. Max length is 50(UTF-8 char) SW_AGENT_INSTANCE_NAME \u0026quot;\u0026quot;   agent.instance_properties_json={\u0026quot;key\u0026quot;:\u0026quot;value\u0026quot;} Add service instance custom properties in json format. SW_INSTANCE_PROPERTIES_JSON Not set   agent.cause_exception_depth How depth the agent goes, when log all cause exceptions. SW_AGENT_CAUSE_EXCEPTION_DEPTH 5   agent.force_reconnection_period  Force reconnection period of grpc, based on grpc_channel_check_interval. SW_AGENT_FORCE_RECONNECTION_PERIOD 1   agent.operation_name_threshold  The operationName max length, setting this value \u0026gt; 190 is not recommended. SW_AGENT_OPERATION_NAME_THRESHOLD 150   agent.keep_tracing Keep tracing even the backend is not available if this value is true. SW_AGENT_KEEP_TRACING false   agent.force_tls Force open TLS for gRPC channel if this value is true. SW_AGENT_FORCE_TLS false   agent.ssl_trusted_ca_path gRPC SSL trusted ca file. SW_AGENT_SSL_TRUSTED_CA_PATH /ca/ca.crt   agent.ssl_key_path The private key file. Enable mTLS when ssl_key_path and ssl_cert_chain_path exist. SW_AGENT_SSL_KEY_PATH \u0026quot;\u0026quot;   agent.ssl_cert_chain_path The certificate file. Enable mTLS when ssl_key_path and ssl_cert_chain_path exist. SW_AGENT_SSL_CERT_CHAIN_PATH \u0026quot;\u0026quot;   agent.enable Enable the agent kernel services and instrumentation. SW_AGENT_ENABLE true   osinfo.ipv4_list_size Limit the length of the ipv4 list size. SW_AGENT_OSINFO_IPV4_LIST_SIZE 10   collector.grpc_channel_check_interval grpc channel status check interval. SW_AGENT_COLLECTOR_GRPC_CHANNEL_CHECK_INTERVAL 30   collector.heartbeat_period agent heartbeat report period. Unit, second. SW_AGENT_COLLECTOR_HEARTBEAT_PERIOD 30   collector.properties_report_period_factor The agent sends the instance properties to the backend every collector.heartbeat_period * collector.properties_report_period_factor seconds SW_AGENT_COLLECTOR_PROPERTIES_REPORT_PERIOD_FACTOR 10   collector.backend_service Collector SkyWalking trace receiver service addresses. SW_AGENT_COLLECTOR_BACKEND_SERVICES 127.0.0.1:11800   collector.grpc_upstream_timeout How long grpc client will timeout in sending data to upstream. Unit is second. SW_AGENT_COLLECTOR_GRPC_UPSTREAM_TIMEOUT 30 seconds   collector.get_profile_task_interval Sniffer get profile task list interval. SW_AGENT_COLLECTOR_GET_PROFILE_TASK_INTERVAL 20   collector.get_agent_dynamic_config_interval Sniffer get agent dynamic config interval SW_AGENT_COLLECTOR_GET_AGENT_DYNAMIC_CONFIG_INTERVAL 20   collector.is_resolve_dns_periodically If true, skywalking agent will enable periodically resolving DNS to update receiver service addresses. SW_AGENT_COLLECTOR_IS_RESOLVE_DNS_PERIODICALLY false   logging.level Log level: TRACE, DEBUG, INFO, WARN, ERROR, OFF. Default is info. SW_LOGGING_LEVEL INFO   logging.file_name Log file name. SW_LOGGING_FILE_NAME skywalking-api.log   logging.output Log output. Default is FILE. Use CONSOLE means output to stdout. SW_LOGGING_OUTPUT FILE   logging.dir Log files directory. Default is blank string, means, use \u0026ldquo;{theSkywalkingAgentJarDir}/logs \u0026quot; to output logs. {theSkywalkingAgentJarDir} is the directory where the skywalking agent jar file is located SW_LOGGING_DIR \u0026quot;\u0026quot;   logging.resolver Logger resolver: PATTERN or JSON. The default is PATTERN, which uses logging.pattern to print traditional text logs. JSON resolver prints logs in JSON format. SW_LOGGING_RESOLVER PATTERN   logging.pattern  Logging format. There are all conversion specifiers: * %level means log level. * %timestamp means now of time with format yyyy-MM-dd HH:mm:ss:SSS.\n* %thread means name of current thread.\n* %msg means some message which user logged. * %class means SimpleName of TargetClass. * %throwable means a throwable which user called. * %agent_name means agent.service_name. Only apply to the PatternLogger. SW_LOGGING_PATTERN %level %timestamp %thread %class : %msg %throwable   logging.max_file_size The max size of log file. If the size is bigger than this, archive the current file, and write into a new file. SW_LOGGING_MAX_FILE_SIZE 300 * 1024 * 1024   logging.max_history_files The max history log files. When rollover happened, if log files exceed this number,then the oldest file will be delete. Negative or zero means off, by default. SW_LOGGING_MAX_HISTORY_FILES -1   statuscheck.ignored_exceptions Listed exceptions would not be treated as an error. Because in some codes, the exception is being used as a way of controlling business flow. SW_STATUSCHECK_IGNORED_EXCEPTIONS \u0026quot;\u0026quot;   statuscheck.max_recursive_depth The max recursive depth when checking the exception traced by the agent. Typically, we don\u0026rsquo;t recommend setting this more than 10, which could cause a performance issue. Negative value and 0 would be ignored, which means all exceptions would make the span tagged in error status. SW_STATUSCHECK_MAX_RECURSIVE_DEPTH 1   correlation.element_max_number Max element count in the correlation context. SW_CORRELATION_ELEMENT_MAX_NUMBER 3   correlation.value_max_length Max value length of each element. SW_CORRELATION_VALUE_MAX_LENGTH 128   correlation.auto_tag_keys Tag the span by the key/value in the correlation context, when the keys listed here exist. SW_CORRELATION_AUTO_TAG_KEYS \u0026quot;\u0026quot;   jvm.buffer_size The buffer size of collected JVM info. SW_JVM_BUFFER_SIZE 60 * 10   jvm.metrics_collect_period The period in seconds of JVM metrics collection. Unit is second. SW_JVM_METRICS_COLLECT_PERIOD 1   buffer.channel_size The buffer channel size. SW_BUFFER_CHANNEL_SIZE 5   buffer.buffer_size The buffer size. SW_BUFFER_BUFFER_SIZE 300   profile.active If true, skywalking agent will enable profile when user create a new profile task. Otherwise disable profile. SW_AGENT_PROFILE_ACTIVE true   profile.max_parallel Parallel monitor segment count SW_AGENT_PROFILE_MAX_PARALLEL 5   profile.max_accept_sub_parallel Max monitoring sub-tasks count of one single endpoint access SW_AGENT_PROFILE_MAX_ACCEPT_SUB_PARALLEL 5   profile.duration Max monitor segment time(minutes), if current segment monitor time out of limit, then stop it. SW_AGENT_PROFILE_DURATION 10   profile.dump_max_stack_depth Max dump thread stack depth SW_AGENT_PROFILE_DUMP_MAX_STACK_DEPTH 500   profile.snapshot_transport_buffer_size Snapshot transport to backend buffer size SW_AGENT_PROFILE_SNAPSHOT_TRANSPORT_BUFFER_SIZE 4500   meter.active If true, the agent collects and reports metrics to the backend. SW_METER_ACTIVE true   meter.report_interval Report meters interval. The unit is second SW_METER_REPORT_INTERVAL 20   meter.max_meter_size Max size of the meter pool SW_METER_MAX_METER_SIZE 500   log.max_message_size The max size of message to send to server.Default is 10 MB. SW_GRPC_LOG_MAX_MESSAGE_SIZE 10485760   plugin.mount Mount the specific folders of the plugins. Plugins in mounted folders would work. SW_MOUNT_FOLDERS plugins,activations   plugin.peer_max_length  Peer maximum description limit. SW_PLUGIN_PEER_MAX_LENGTH 200   plugin.exclude_plugins  Exclude some plugins define in plugins dir,Multiple plugins are separated by comma.Plugin names is defined in Agent plugin list SW_EXCLUDE_PLUGINS \u0026quot;\u0026quot;   plugin.mongodb.trace_param If true, trace all the parameters in MongoDB access, default is false. Only trace the operation, not include parameters. SW_PLUGIN_MONGODB_TRACE_PARAM false   plugin.mongodb.filter_length_limit If set to positive number, the WriteRequest.params would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_MONGODB_FILTER_LENGTH_LIMIT 256   plugin.elasticsearch.trace_dsl If true, trace all the DSL(Domain Specific Language) in ElasticSearch access, default is false. SW_PLUGIN_ELASTICSEARCH_TRACE_DSL false   plugin.springmvc.use_qualified_name_as_endpoint_name If true, the fully qualified method name will be used as the endpoint name instead of the request URL, default is false. SW_PLUGIN_SPRINGMVC_USE_QUALIFIED_NAME_AS_ENDPOINT_NAME false   plugin.toolkit.use_qualified_name_as_operation_name If true, the fully qualified method name will be used as the operation name instead of the given operation name, default is false. SW_PLUGIN_TOOLKIT_USE_QUALIFIED_NAME_AS_OPERATION_NAME false   plugin.jdbc.trace_sql_parameters If set to true, the parameters of the sql (typically java.sql.PreparedStatement) would be collected. SW_JDBC_TRACE_SQL_PARAMETERS false   plugin.jdbc.sql_parameters_max_length If set to positive number, the db.sql.parameters would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_JDBC_SQL_PARAMETERS_MAX_LENGTH 512   plugin.jdbc.sql_body_max_length If set to positive number, the db.statement would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_JDBC_SQL_BODY_MAX_LENGTH 2048   plugin.solrj.trace_statement If true, trace all the query parameters(include deleteByIds and deleteByQuery) in Solr query request, default is false. SW_PLUGIN_SOLRJ_TRACE_STATEMENT false   plugin.solrj.trace_ops_params If true, trace all the operation parameters in Solr request, default is false. SW_PLUGIN_SOLRJ_TRACE_OPS_PARAMS false   plugin.light4j.trace_handler_chain If true, trace all middleware/business handlers that are part of the Light4J handler chain for a request. SW_PLUGIN_LIGHT4J_TRACE_HANDLER_CHAIN false   plugin.springtransaction.simplify_transaction_definition_name If true, the transaction definition name will be simplified. SW_PLUGIN_SPRINGTRANSACTION_SIMPLIFY_TRANSACTION_DEFINITION_NAME false   plugin.jdkthreading.threading_class_prefixes Threading classes (java.lang.Runnable and java.util.concurrent.Callable) and their subclasses, including anonymous inner classes whose name match any one of the THREADING_CLASS_PREFIXES (splitted by ,) will be instrumented, make sure to only specify as narrow prefixes as what you\u0026rsquo;re expecting to instrument, (java. and javax. will be ignored due to safety issues) SW_PLUGIN_JDKTHREADING_THREADING_CLASS_PREFIXES Not set   plugin.tomcat.collect_http_params This config item controls that whether the Tomcat plugin should collect the parameters of the request. Also, activate implicitly in the profiled trace. SW_PLUGIN_TOMCAT_COLLECT_HTTP_PARAMS false   plugin.springmvc.collect_http_params This config item controls that whether the SpringMVC plugin should collect the parameters of the request, when your Spring application is based on Tomcat, consider only setting either plugin.tomcat.collect_http_params or plugin.springmvc.collect_http_params. Also, activate implicitly in the profiled trace. SW_PLUGIN_SPRINGMVC_COLLECT_HTTP_PARAMS false   plugin.httpclient.collect_http_params This config item controls that whether the HttpClient plugin should collect the parameters of the request SW_PLUGIN_HTTPCLIENT_COLLECT_HTTP_PARAMS false   plugin.http.http_params_length_threshold When COLLECT_HTTP_PARAMS is enabled, how many characters to keep and send to the OAP backend, use negative values to keep and send the complete parameters, NB. this config item is added for the sake of performance. SW_PLUGIN_HTTP_HTTP_PARAMS_LENGTH_THRESHOLD 1024   plugin.http.http_headers_length_threshold When include_http_headers declares header names, this threshold controls the length limitation of all header values. use negative values to keep and send the complete headers. Note. this config item is added for the sake of performance. SW_PLUGIN_HTTP_HTTP_HEADERS_LENGTH_THRESHOLD 2048   plugin.http.include_http_headers Set the header names, which should be collected by the plugin. Header name must follow javax.servlet.http definition. Multiple names should be split by comma. SW_PLUGIN_HTTP_INCLUDE_HTTP_HEADERS ``(No header would be collected) |   plugin.feign.collect_request_body This config item controls that whether the Feign plugin should collect the http body of the request. SW_PLUGIN_FEIGN_COLLECT_REQUEST_BODY false   plugin.feign.filter_length_limit When COLLECT_REQUEST_BODY is enabled, how many characters to keep and send to the OAP backend, use negative values to keep and send the complete body. SW_PLUGIN_FEIGN_FILTER_LENGTH_LIMIT 1024   plugin.feign.supported_content_types_prefix When COLLECT_REQUEST_BODY is enabled and content-type start with SUPPORTED_CONTENT_TYPES_PREFIX, collect the body of the request , multiple paths should be separated by , SW_PLUGIN_FEIGN_SUPPORTED_CONTENT_TYPES_PREFIX application/json,text/   plugin.influxdb.trace_influxql If true, trace all the influxql(query and write) in InfluxDB access, default is true. SW_PLUGIN_INFLUXDB_TRACE_INFLUXQL true   plugin.dubbo.collect_consumer_arguments Apache Dubbo consumer collect arguments in RPC call, use Object#toString to collect arguments. SW_PLUGIN_DUBBO_COLLECT_CONSUMER_ARGUMENTS false   plugin.dubbo.consumer_arguments_length_threshold When plugin.dubbo.collect_consumer_arguments is true, Arguments of length from the front will to the OAP backend SW_PLUGIN_DUBBO_CONSUMER_ARGUMENTS_LENGTH_THRESHOLD 256   plugin.dubbo.collect_provider_arguments Apache Dubbo provider collect arguments in RPC call, use Object#toString to collect arguments. SW_PLUGIN_DUBBO_COLLECT_PROVIDER_ARGUMENTS false   plugin.dubbo.provider_arguments_length_threshold When plugin.dubbo.collect_provider_arguments is true, Arguments of length from the front will to the OAP backend SW_PLUGIN_DUBBO_PROVIDER_ARGUMENTS_LENGTH_THRESHOLD 256   plugin.kafka.bootstrap_servers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_KAFKA_BOOTSTRAP_SERVERS localhost:9092   plugin.kafka.get_topic_timeout Timeout period of reading topics from the Kafka server, the unit is second. SW_GET_TOPIC_TIMEOUT 10   plugin.kafka.producer_config Kafka producer configuration. Read producer configure to get more details. Check Kafka report doc for more details and examples. SW_PLUGIN_KAFKA_PRODUCER_CONFIG    plugin.kafka.producer_config_json Configure Kafka Producer configuration in JSON format. Notice it will be overridden by plugin.kafka.producer_config[key], if the key duplication. SW_PLUGIN_KAFKA_PRODUCER_CONFIG_JSON    plugin.kafka.topic_meter Specify which Kafka topic name for Meter System data to report to. SW_PLUGIN_KAFKA_TOPIC_METER skywalking-meters   plugin.kafka.topic_metrics Specify which Kafka topic name for JVM metrics data to report to. SW_PLUGIN_KAFKA_TOPIC_METRICS skywalking-metrics   plugin.kafka.topic_segment Specify which Kafka topic name for traces data to report to. SW_PLUGIN_KAFKA_TOPIC_SEGMENT skywalking-segments   plugin.kafka.topic_profiling Specify which Kafka topic name for Thread Profiling snapshot to report to. SW_PLUGIN_KAFKA_TOPIC_PROFILINGS skywalking-profilings   plugin.kafka.topic_management Specify which Kafka topic name for the register or heartbeat data of Service Instance to report to. SW_PLUGIN_KAFKA_TOPIC_MANAGEMENT skywalking-managements   plugin.kafka.topic_logging Specify which Kafka topic name for the logging data to report to. SW_PLUGIN_KAFKA_TOPIC_LOGGING skywalking-logging   plugin.kafka.namespace isolate multi OAP server when using same Kafka cluster (final topic name will append namespace before Kafka topics with - ). SW_KAFKA_NAMESPACE `` |   plugin.kafka.decode_class Specify which class to decode encoded configuration of kafka.You can set encoded information in plugin.kafka.producer_config_json or plugin.kafka.producer_config if you need. SW_KAFKA_DECODE_CLASS `` |   plugin.springannotation.classname_match_regex Match spring beans with regular expression for the class name. Multiple expressions could be separated by a comma. This only works when Spring annotation plugin has been activated. SW_SPRINGANNOTATION_CLASSNAME_MATCH_REGEX All the spring beans tagged with @Bean,@Service,@Dao, or @Repository.   plugin.toolkit.log.transmit_formatted Whether or not to transmit logged data as formatted or un-formatted. SW_PLUGIN_TOOLKIT_LOG_TRANSMIT_FORMATTED true   plugin.lettuce.trace_redis_parameters If set to true, the parameters of Redis commands would be collected by Lettuce agent. SW_PLUGIN_LETTUCE_TRACE_REDIS_PARAMETERS false   plugin.lettuce.redis_parameter_max_length If set to positive number and plugin.lettuce.trace_redis_parameters is set to true, Redis command parameters would be collected and truncated to this length. SW_PLUGIN_LETTUCE_REDIS_PARAMETER_MAX_LENGTH 128   plugin.lettuce.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_LETTUCE_OPERATION_MAPPING_WRITE    plugin.lettuce.operation_mapping_read  Specify which command should be converted to read operation SW_PLUGIN_LETTUCE_OPERATION_MAPPING_READ Referenc Lettuce-5.x-plugin   plugin.jedis.trace_redis_parameters If set to true, the parameters of Redis commands would be collected by Jedis agent. SW_PLUGIN_JEDIS_TRACE_REDIS_PARAMETERS false   plugin.jedis.redis_parameter_max_length If set to positive number and plugin.jedis.trace_redis_parameters is set to true, Redis command parameters would be collected and truncated to this length. SW_PLUGIN_JEDIS_REDIS_PARAMETER_MAX_LENGTH 128   plugin.jedis.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_JEDIS_OPERATION_MAPPING_WRITE    plugin.jedis.operation_mapping_read  Specify which command should be converted to read operation SW_PLUGIN_JEDIS_OPERATION_MAPPING_READ Referenc Jedis-4.x-plugin jedis-2.x-3.x-plugin   plugin.redisson.trace_redis_parameters If set to true, the parameters of Redis commands would be collected by Redisson agent. SW_PLUGIN_REDISSON_TRACE_REDIS_PARAMETERS false   plugin.redisson.redis_parameter_max_length If set to positive number and plugin.redisson.trace_redis_parameters is set to true, Redis command parameters would be collected and truncated to this length. SW_PLUGIN_REDISSON_REDIS_PARAMETER_MAX_LENGTH 128   plugin.redisson.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_REDISSON_OPERATION_MAPPING_WRITE    plugin.redisson.operation_mapping_read  Specify which command should be converted to read operation SW_PLUGIN_REDISSON_OPERATION_MAPPING_READ Referenc Redisson-3.x-plugin   plugin.neo4j.trace_cypher_parameters If set to true, the parameters of the cypher would be collected. SW_PLUGIN_NEO4J_TRACE_CYPHER_PARAMETERS false   plugin.neo4j.cypher_parameters_max_length If set to positive number, the db.cypher.parameters would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_NEO4J_CYPHER_PARAMETERS_MAX_LENGTH 512   plugin.neo4j.cypher_body_max_length If set to positive number, the db.statement would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_NEO4J_CYPHER_BODY_MAX_LENGTH 2048   plugin.cpupolicy.sample_cpu_usage_percent_limit If set to a positive number and activate trace sampler CPU policy plugin, the trace would not be collected when agent process CPU usage percent is greater than plugin.cpupolicy.sample_cpu_usage_percent_limit. SW_SAMPLE_CPU_USAGE_PERCENT_LIMIT -1   plugin.micronauthttpclient.collect_http_params This config item controls that whether the Micronaut http client plugin should collect the parameters of the request. Also, activate implicitly in the profiled trace. SW_PLUGIN_MICRONAUTHTTPCLIENT_COLLECT_HTTP_PARAMS false   plugin.micronauthttpserver.collect_http_params This config item controls that whether the Micronaut http server plugin should collect the parameters of the request. Also, activate implicitly in the profiled trace. SW_PLUGIN_MICRONAUTHTTPSERVER_COLLECT_HTTP_PARAMS false   plugin.memcached.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_MEMCACHED_OPERATION_MAPPING_WRITE get,gets,getAndTouch,getKeys,getKeysWithExpiryCheck,getKeysNoDuplicateCheck   plugin.memcached.operation_mapping_read Specify which command should be converted to read operation SW_PLUGIN_MEMCACHED_OPERATION_MAPPING_READ set,add,replace,append,prepend,cas,delete,touch,incr,decr   plugin.ehcache.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_EHCACHE_OPERATION_MAPPING_WRITE get,getAll,getQuiet,getKeys,getKeysWithExpiryCheck,getKeysNoDuplicateCheck,releaseRead,tryRead,getWithLoader,getAll,loadAll,getAllWithLoader   plugin.ehcache.operation_mapping_read Specify which command should be converted to read operation SW_PLUGIN_EHCACHE_OPERATION_MAPPING_READ tryRemoveImmediately,remove,removeAndReturnElement,removeAll,removeQuiet,removeWithWriter,put,putAll,replace,removeQuiet,removeWithWriter,removeElement,removeAll,putWithWriter,putQuiet,putIfAbsent,putIfAbsent   plugin.guavacache.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_GUAVACACHE_OPERATION_MAPPING_WRITE getIfPresent,get,getAllPresent,size   plugin.guavacache.operation_mapping_read Specify which command should be converted to read operation SW_PLUGIN_GUAVACACHE_OPERATION_MAPPING_READ put,putAll,invalidate,invalidateAll,invalidateAll,cleanUp   plugin.nettyhttp.collect_request_body This config item controls that whether the Netty-http plugin should collect the http body of the request. SW_PLUGIN_NETTY_HTTP_COLLECT_REQUEST_BODY false   plugin.nettyhttp.filter_length_limit When COLLECT_REQUEST_BODY is enabled, how many characters to keep and send to the OAP backend, use negative values to keep and send the complete body. SW_PLUGIN_NETTY_HTTP_FILTER_LENGTH_LIMIT 1024   plugin.nettyhttp.supported_content_types_prefix When COLLECT_REQUEST_BODY is enabled and content-type start with HTTP_SUPPORTED_CONTENT_TYPES_PREFIX, collect the body of the request , multiple paths should be separated by , SW_PLUGIN_NETTY_HTTP_SUPPORTED_CONTENT_TYPES_PREFIX application/json,text/   plugin.rocketmqclient.collect_message_keys If set to true, the keys of messages would be collected by the plugin for RocketMQ Java client.     plugin.rocketmqclient.collect_message_tags If set to true, the tags of messages would be collected by the plugin for RocketMQ Java client.            Reset Collection/Map type configurations as empty collection.  Collection type config, e.g. using  plugin.kafka.topics= to override default plugin.kafka.topics=a,b,c,d Map type config, e.g. using plugin.kafka.producer_config[]= to override default plugin.kafka.producer_config[key]=value  Dynamic Configurations All configurations above are static, if you need to change some agent settings at runtime, please read CDS - Configuration Discovery Service document for more details.\n","title":"Table of Agent Configuration Properties","url":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/configurations/"},{"content":"Table of Agent Configuration Properties This is the properties list supported in agent/config/agent.config.\n   property key Description System Environment Variable Default     agent.service_name The service name to represent a logic group providing the same capabilities/logic. Suggestion: set a unique name for every logic service group, service instance nodes share the same code, Max length is 50(UTF-8 char). Optional, once service_name follows \u0026lt;group name\u0026gt;::\u0026lt;logic name\u0026gt; format, OAP server assigns the group name to the service metadata. SW_AGENT_NAME Your_ApplicationName   agent.namespace Namespace represents a subnet, such as kubernetes namespace, or 172.10.. SW_AGENT_NAMESPACE Not set   agent.cluster Cluster defines the physical cluster in a data center or same network segment. SW_AGENT_CLUSTER Not set   agent.sample_n_per_3_secs Negative or zero means off, by default.SAMPLE_N_PER_3_SECS means sampling N TraceSegment in 3 seconds tops. SW_AGENT_SAMPLE Not set   agent.authentication Authentication active is based on backend setting, see application.yml for more details.For most scenarios, this needs backend extensions, only basic match auth provided in default implementation. SW_AGENT_AUTHENTICATION Not set   agent.trace_segment_ref_limit_per_span The max number of TraceSegmentRef in a single span to keep memory cost estimatable. SW_TRACE_SEGMENT_LIMIT 500   agent.span_limit_per_segment The max number of spans in a single segment. Through this config item, SkyWalking keep your application memory cost estimated. SW_AGENT_SPAN_LIMIT 300   agent.ignore_suffix If the operation name of the first span is included in this set, this segment should be ignored. SW_AGENT_IGNORE_SUFFIX Not set   agent.is_open_debugging_class If true, skywalking agent will save all instrumented classes files in /debugging folder. SkyWalking team may ask for these files in order to resolve compatible problem. SW_AGENT_OPEN_DEBUG Not set   agent.instance_name Instance name is the identity of an instance, should be unique in the service. If empty, SkyWalking agent will generate an 32-bit uuid. Default, use UUID@hostname as the instance name. Max length is 50(UTF-8 char) SW_AGENT_INSTANCE_NAME \u0026quot;\u0026quot;   agent.instance_properties_json={\u0026quot;key\u0026quot;:\u0026quot;value\u0026quot;} Add service instance custom properties in json format. SW_INSTANCE_PROPERTIES_JSON Not set   agent.cause_exception_depth How depth the agent goes, when log all cause exceptions. SW_AGENT_CAUSE_EXCEPTION_DEPTH 5   agent.force_reconnection_period  Force reconnection period of grpc, based on grpc_channel_check_interval. SW_AGENT_FORCE_RECONNECTION_PERIOD 1   agent.operation_name_threshold  The operationName max length, setting this value \u0026gt; 190 is not recommended. SW_AGENT_OPERATION_NAME_THRESHOLD 150   agent.keep_tracing Keep tracing even the backend is not available if this value is true. SW_AGENT_KEEP_TRACING false   agent.force_tls Force open TLS for gRPC channel if this value is true. SW_AGENT_FORCE_TLS false   agent.ssl_trusted_ca_path gRPC SSL trusted ca file. SW_AGENT_SSL_TRUSTED_CA_PATH /ca/ca.crt   agent.ssl_key_path The private key file. Enable mTLS when ssl_key_path and ssl_cert_chain_path exist. SW_AGENT_SSL_KEY_PATH \u0026quot;\u0026quot;   agent.ssl_cert_chain_path The certificate file. Enable mTLS when ssl_key_path and ssl_cert_chain_path exist. SW_AGENT_SSL_CERT_CHAIN_PATH \u0026quot;\u0026quot;   agent.enable Enable the agent kernel services and instrumentation. SW_AGENT_ENABLE true   osinfo.ipv4_list_size Limit the length of the ipv4 list size. SW_AGENT_OSINFO_IPV4_LIST_SIZE 10   collector.grpc_channel_check_interval grpc channel status check interval. SW_AGENT_COLLECTOR_GRPC_CHANNEL_CHECK_INTERVAL 30   collector.heartbeat_period agent heartbeat report period. Unit, second. SW_AGENT_COLLECTOR_HEARTBEAT_PERIOD 30   collector.properties_report_period_factor The agent sends the instance properties to the backend every collector.heartbeat_period * collector.properties_report_period_factor seconds SW_AGENT_COLLECTOR_PROPERTIES_REPORT_PERIOD_FACTOR 10   collector.backend_service Collector SkyWalking trace receiver service addresses. SW_AGENT_COLLECTOR_BACKEND_SERVICES 127.0.0.1:11800   collector.grpc_upstream_timeout How long grpc client will timeout in sending data to upstream. Unit is second. SW_AGENT_COLLECTOR_GRPC_UPSTREAM_TIMEOUT 30 seconds   collector.get_profile_task_interval Sniffer get profile task list interval. SW_AGENT_COLLECTOR_GET_PROFILE_TASK_INTERVAL 20   collector.get_agent_dynamic_config_interval Sniffer get agent dynamic config interval SW_AGENT_COLLECTOR_GET_AGENT_DYNAMIC_CONFIG_INTERVAL 20   collector.is_resolve_dns_periodically If true, skywalking agent will enable periodically resolving DNS to update receiver service addresses. SW_AGENT_COLLECTOR_IS_RESOLVE_DNS_PERIODICALLY false   logging.level Log level: TRACE, DEBUG, INFO, WARN, ERROR, OFF. Default is info. SW_LOGGING_LEVEL INFO   logging.file_name Log file name. SW_LOGGING_FILE_NAME skywalking-api.log   logging.output Log output. Default is FILE. Use CONSOLE means output to stdout. SW_LOGGING_OUTPUT FILE   logging.dir Log files directory. Default is blank string, means, use \u0026ldquo;{theSkywalkingAgentJarDir}/logs \u0026quot; to output logs. {theSkywalkingAgentJarDir} is the directory where the skywalking agent jar file is located SW_LOGGING_DIR \u0026quot;\u0026quot;   logging.resolver Logger resolver: PATTERN or JSON. The default is PATTERN, which uses logging.pattern to print traditional text logs. JSON resolver prints logs in JSON format. SW_LOGGING_RESOLVER PATTERN   logging.pattern  Logging format. There are all conversion specifiers: * %level means log level. * %timestamp means now of time with format yyyy-MM-dd HH:mm:ss:SSS.\n* %thread means name of current thread.\n* %msg means some message which user logged. * %class means SimpleName of TargetClass. * %throwable means a throwable which user called. * %agent_name means agent.service_name. Only apply to the PatternLogger. SW_LOGGING_PATTERN %level %timestamp %thread %class : %msg %throwable   logging.max_file_size The max size of log file. If the size is bigger than this, archive the current file, and write into a new file. SW_LOGGING_MAX_FILE_SIZE 300 * 1024 * 1024   logging.max_history_files The max history log files. When rollover happened, if log files exceed this number,then the oldest file will be delete. Negative or zero means off, by default. SW_LOGGING_MAX_HISTORY_FILES -1   statuscheck.ignored_exceptions Listed exceptions would not be treated as an error. Because in some codes, the exception is being used as a way of controlling business flow. SW_STATUSCHECK_IGNORED_EXCEPTIONS \u0026quot;\u0026quot;   statuscheck.max_recursive_depth The max recursive depth when checking the exception traced by the agent. Typically, we don\u0026rsquo;t recommend setting this more than 10, which could cause a performance issue. Negative value and 0 would be ignored, which means all exceptions would make the span tagged in error status. SW_STATUSCHECK_MAX_RECURSIVE_DEPTH 1   correlation.element_max_number Max element count in the correlation context. SW_CORRELATION_ELEMENT_MAX_NUMBER 3   correlation.value_max_length Max value length of each element. SW_CORRELATION_VALUE_MAX_LENGTH 128   correlation.auto_tag_keys Tag the span by the key/value in the correlation context, when the keys listed here exist. SW_CORRELATION_AUTO_TAG_KEYS \u0026quot;\u0026quot;   jvm.buffer_size The buffer size of collected JVM info. SW_JVM_BUFFER_SIZE 60 * 10   jvm.metrics_collect_period The period in seconds of JVM metrics collection. Unit is second. SW_JVM_METRICS_COLLECT_PERIOD 1   buffer.channel_size The buffer channel size. SW_BUFFER_CHANNEL_SIZE 5   buffer.buffer_size The buffer size. SW_BUFFER_BUFFER_SIZE 300   profile.active If true, skywalking agent will enable profile when user create a new profile task. Otherwise disable profile. SW_AGENT_PROFILE_ACTIVE true   profile.max_parallel Parallel monitor segment count SW_AGENT_PROFILE_MAX_PARALLEL 5   profile.max_accept_sub_parallel Max monitoring sub-tasks count of one single endpoint access SW_AGENT_PROFILE_MAX_ACCEPT_SUB_PARALLEL 5   profile.duration Max monitor segment time(minutes), if current segment monitor time out of limit, then stop it. SW_AGENT_PROFILE_DURATION 10   profile.dump_max_stack_depth Max dump thread stack depth SW_AGENT_PROFILE_DUMP_MAX_STACK_DEPTH 500   profile.snapshot_transport_buffer_size Snapshot transport to backend buffer size SW_AGENT_PROFILE_SNAPSHOT_TRANSPORT_BUFFER_SIZE 4500   meter.active If true, the agent collects and reports metrics to the backend. SW_METER_ACTIVE true   meter.report_interval Report meters interval. The unit is second SW_METER_REPORT_INTERVAL 20   meter.max_meter_size Max size of the meter pool SW_METER_MAX_METER_SIZE 500   log.max_message_size The max size of message to send to server.Default is 10 MB. SW_GRPC_LOG_MAX_MESSAGE_SIZE 10485760   plugin.mount Mount the specific folders of the plugins. Plugins in mounted folders would work. SW_MOUNT_FOLDERS plugins,activations   plugin.peer_max_length  Peer maximum description limit. SW_PLUGIN_PEER_MAX_LENGTH 200   plugin.exclude_plugins  Exclude some plugins define in plugins dir,Multiple plugins are separated by comma.Plugin names is defined in Agent plugin list SW_EXCLUDE_PLUGINS \u0026quot;\u0026quot;   plugin.mongodb.trace_param If true, trace all the parameters in MongoDB access, default is false. Only trace the operation, not include parameters. SW_PLUGIN_MONGODB_TRACE_PARAM false   plugin.mongodb.filter_length_limit If set to positive number, the WriteRequest.params would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_MONGODB_FILTER_LENGTH_LIMIT 256   plugin.elasticsearch.trace_dsl If true, trace all the DSL(Domain Specific Language) in ElasticSearch access, default is false. SW_PLUGIN_ELASTICSEARCH_TRACE_DSL false   plugin.springmvc.use_qualified_name_as_endpoint_name If true, the fully qualified method name will be used as the endpoint name instead of the request URL, default is false. SW_PLUGIN_SPRINGMVC_USE_QUALIFIED_NAME_AS_ENDPOINT_NAME false   plugin.toolkit.use_qualified_name_as_operation_name If true, the fully qualified method name will be used as the operation name instead of the given operation name, default is false. SW_PLUGIN_TOOLKIT_USE_QUALIFIED_NAME_AS_OPERATION_NAME false   plugin.jdbc.trace_sql_parameters If set to true, the parameters of the sql (typically java.sql.PreparedStatement) would be collected. SW_JDBC_TRACE_SQL_PARAMETERS false   plugin.jdbc.sql_parameters_max_length If set to positive number, the db.sql.parameters would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_JDBC_SQL_PARAMETERS_MAX_LENGTH 512   plugin.jdbc.sql_body_max_length If set to positive number, the db.statement would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_JDBC_SQL_BODY_MAX_LENGTH 2048   plugin.solrj.trace_statement If true, trace all the query parameters(include deleteByIds and deleteByQuery) in Solr query request, default is false. SW_PLUGIN_SOLRJ_TRACE_STATEMENT false   plugin.solrj.trace_ops_params If true, trace all the operation parameters in Solr request, default is false. SW_PLUGIN_SOLRJ_TRACE_OPS_PARAMS false   plugin.light4j.trace_handler_chain If true, trace all middleware/business handlers that are part of the Light4J handler chain for a request. SW_PLUGIN_LIGHT4J_TRACE_HANDLER_CHAIN false   plugin.springtransaction.simplify_transaction_definition_name If true, the transaction definition name will be simplified. SW_PLUGIN_SPRINGTRANSACTION_SIMPLIFY_TRANSACTION_DEFINITION_NAME false   plugin.jdkthreading.threading_class_prefixes Threading classes (java.lang.Runnable and java.util.concurrent.Callable) and their subclasses, including anonymous inner classes whose name match any one of the THREADING_CLASS_PREFIXES (splitted by ,) will be instrumented, make sure to only specify as narrow prefixes as what you\u0026rsquo;re expecting to instrument, (java. and javax. will be ignored due to safety issues) SW_PLUGIN_JDKTHREADING_THREADING_CLASS_PREFIXES Not set   plugin.tomcat.collect_http_params This config item controls that whether the Tomcat plugin should collect the parameters of the request. Also, activate implicitly in the profiled trace. SW_PLUGIN_TOMCAT_COLLECT_HTTP_PARAMS false   plugin.springmvc.collect_http_params This config item controls that whether the SpringMVC plugin should collect the parameters of the request, when your Spring application is based on Tomcat, consider only setting either plugin.tomcat.collect_http_params or plugin.springmvc.collect_http_params. Also, activate implicitly in the profiled trace. SW_PLUGIN_SPRINGMVC_COLLECT_HTTP_PARAMS false   plugin.httpclient.collect_http_params This config item controls that whether the HttpClient plugin should collect the parameters of the request SW_PLUGIN_HTTPCLIENT_COLLECT_HTTP_PARAMS false   plugin.http.http_params_length_threshold When COLLECT_HTTP_PARAMS is enabled, how many characters to keep and send to the OAP backend, use negative values to keep and send the complete parameters, NB. this config item is added for the sake of performance. SW_PLUGIN_HTTP_HTTP_PARAMS_LENGTH_THRESHOLD 1024   plugin.http.http_headers_length_threshold When include_http_headers declares header names, this threshold controls the length limitation of all header values. use negative values to keep and send the complete headers. Note. this config item is added for the sake of performance. SW_PLUGIN_HTTP_HTTP_HEADERS_LENGTH_THRESHOLD 2048   plugin.http.include_http_headers Set the header names, which should be collected by the plugin. Header name must follow javax.servlet.http definition. Multiple names should be split by comma. SW_PLUGIN_HTTP_INCLUDE_HTTP_HEADERS ``(No header would be collected) |   plugin.feign.collect_request_body This config item controls that whether the Feign plugin should collect the http body of the request. SW_PLUGIN_FEIGN_COLLECT_REQUEST_BODY false   plugin.feign.filter_length_limit When COLLECT_REQUEST_BODY is enabled, how many characters to keep and send to the OAP backend, use negative values to keep and send the complete body. SW_PLUGIN_FEIGN_FILTER_LENGTH_LIMIT 1024   plugin.feign.supported_content_types_prefix When COLLECT_REQUEST_BODY is enabled and content-type start with SUPPORTED_CONTENT_TYPES_PREFIX, collect the body of the request , multiple paths should be separated by , SW_PLUGIN_FEIGN_SUPPORTED_CONTENT_TYPES_PREFIX application/json,text/   plugin.influxdb.trace_influxql If true, trace all the influxql(query and write) in InfluxDB access, default is true. SW_PLUGIN_INFLUXDB_TRACE_INFLUXQL true   plugin.dubbo.collect_consumer_arguments Apache Dubbo consumer collect arguments in RPC call, use Object#toString to collect arguments. SW_PLUGIN_DUBBO_COLLECT_CONSUMER_ARGUMENTS false   plugin.dubbo.consumer_arguments_length_threshold When plugin.dubbo.collect_consumer_arguments is true, Arguments of length from the front will to the OAP backend SW_PLUGIN_DUBBO_CONSUMER_ARGUMENTS_LENGTH_THRESHOLD 256   plugin.dubbo.collect_provider_arguments Apache Dubbo provider collect arguments in RPC call, use Object#toString to collect arguments. SW_PLUGIN_DUBBO_COLLECT_PROVIDER_ARGUMENTS false   plugin.dubbo.provider_arguments_length_threshold When plugin.dubbo.collect_provider_arguments is true, Arguments of length from the front will to the OAP backend SW_PLUGIN_DUBBO_PROVIDER_ARGUMENTS_LENGTH_THRESHOLD 256   plugin.kafka.bootstrap_servers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_KAFKA_BOOTSTRAP_SERVERS localhost:9092   plugin.kafka.get_topic_timeout Timeout period of reading topics from the Kafka server, the unit is second. SW_GET_TOPIC_TIMEOUT 10   plugin.kafka.producer_config Kafka producer configuration. Read producer configure to get more details. Check Kafka report doc for more details and examples. SW_PLUGIN_KAFKA_PRODUCER_CONFIG    plugin.kafka.producer_config_json Configure Kafka Producer configuration in JSON format. Notice it will be overridden by plugin.kafka.producer_config[key], if the key duplication. SW_PLUGIN_KAFKA_PRODUCER_CONFIG_JSON    plugin.kafka.topic_meter Specify which Kafka topic name for Meter System data to report to. SW_PLUGIN_KAFKA_TOPIC_METER skywalking-meters   plugin.kafka.topic_metrics Specify which Kafka topic name for JVM metrics data to report to. SW_PLUGIN_KAFKA_TOPIC_METRICS skywalking-metrics   plugin.kafka.topic_segment Specify which Kafka topic name for traces data to report to. SW_PLUGIN_KAFKA_TOPIC_SEGMENT skywalking-segments   plugin.kafka.topic_profiling Specify which Kafka topic name for Thread Profiling snapshot to report to. SW_PLUGIN_KAFKA_TOPIC_PROFILINGS skywalking-profilings   plugin.kafka.topic_management Specify which Kafka topic name for the register or heartbeat data of Service Instance to report to. SW_PLUGIN_KAFKA_TOPIC_MANAGEMENT skywalking-managements   plugin.kafka.topic_logging Specify which Kafka topic name for the logging data to report to. SW_PLUGIN_KAFKA_TOPIC_LOGGING skywalking-logging   plugin.kafka.namespace isolate multi OAP server when using same Kafka cluster (final topic name will append namespace before Kafka topics with - ). SW_KAFKA_NAMESPACE `` |   plugin.kafka.decode_class Specify which class to decode encoded configuration of kafka.You can set encoded information in plugin.kafka.producer_config_json or plugin.kafka.producer_config if you need. SW_KAFKA_DECODE_CLASS `` |   plugin.springannotation.classname_match_regex Match spring beans with regular expression for the class name. Multiple expressions could be separated by a comma. This only works when Spring annotation plugin has been activated. SW_SPRINGANNOTATION_CLASSNAME_MATCH_REGEX All the spring beans tagged with @Bean,@Service,@Dao, or @Repository.   plugin.toolkit.log.transmit_formatted Whether or not to transmit logged data as formatted or un-formatted. SW_PLUGIN_TOOLKIT_LOG_TRANSMIT_FORMATTED true   plugin.lettuce.trace_redis_parameters If set to true, the parameters of Redis commands would be collected by Lettuce agent. SW_PLUGIN_LETTUCE_TRACE_REDIS_PARAMETERS false   plugin.lettuce.redis_parameter_max_length If set to positive number and plugin.lettuce.trace_redis_parameters is set to true, Redis command parameters would be collected and truncated to this length. SW_PLUGIN_LETTUCE_REDIS_PARAMETER_MAX_LENGTH 128   plugin.lettuce.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_LETTUCE_OPERATION_MAPPING_WRITE    plugin.lettuce.operation_mapping_read  Specify which command should be converted to read operation SW_PLUGIN_LETTUCE_OPERATION_MAPPING_READ Referenc Lettuce-5.x-plugin   plugin.jedis.trace_redis_parameters If set to true, the parameters of Redis commands would be collected by Jedis agent. SW_PLUGIN_JEDIS_TRACE_REDIS_PARAMETERS false   plugin.jedis.redis_parameter_max_length If set to positive number and plugin.jedis.trace_redis_parameters is set to true, Redis command parameters would be collected and truncated to this length. SW_PLUGIN_JEDIS_REDIS_PARAMETER_MAX_LENGTH 128   plugin.jedis.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_JEDIS_OPERATION_MAPPING_WRITE    plugin.jedis.operation_mapping_read  Specify which command should be converted to read operation SW_PLUGIN_JEDIS_OPERATION_MAPPING_READ Referenc Jedis-4.x-plugin jedis-2.x-3.x-plugin   plugin.redisson.trace_redis_parameters If set to true, the parameters of Redis commands would be collected by Redisson agent. SW_PLUGIN_REDISSON_TRACE_REDIS_PARAMETERS false   plugin.redisson.redis_parameter_max_length If set to positive number and plugin.redisson.trace_redis_parameters is set to true, Redis command parameters would be collected and truncated to this length. SW_PLUGIN_REDISSON_REDIS_PARAMETER_MAX_LENGTH 128   plugin.redisson.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_REDISSON_OPERATION_MAPPING_WRITE    plugin.redisson.operation_mapping_read  Specify which command should be converted to read operation SW_PLUGIN_REDISSON_OPERATION_MAPPING_READ Referenc Redisson-3.x-plugin   plugin.neo4j.trace_cypher_parameters If set to true, the parameters of the cypher would be collected. SW_PLUGIN_NEO4J_TRACE_CYPHER_PARAMETERS false   plugin.neo4j.cypher_parameters_max_length If set to positive number, the db.cypher.parameters would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_NEO4J_CYPHER_PARAMETERS_MAX_LENGTH 512   plugin.neo4j.cypher_body_max_length If set to positive number, the db.statement would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_NEO4J_CYPHER_BODY_MAX_LENGTH 2048   plugin.cpupolicy.sample_cpu_usage_percent_limit If set to a positive number and activate trace sampler CPU policy plugin, the trace would not be collected when agent process CPU usage percent is greater than plugin.cpupolicy.sample_cpu_usage_percent_limit. SW_SAMPLE_CPU_USAGE_PERCENT_LIMIT -1   plugin.micronauthttpclient.collect_http_params This config item controls that whether the Micronaut http client plugin should collect the parameters of the request. Also, activate implicitly in the profiled trace. SW_PLUGIN_MICRONAUTHTTPCLIENT_COLLECT_HTTP_PARAMS false   plugin.micronauthttpserver.collect_http_params This config item controls that whether the Micronaut http server plugin should collect the parameters of the request. Also, activate implicitly in the profiled trace. SW_PLUGIN_MICRONAUTHTTPSERVER_COLLECT_HTTP_PARAMS false   plugin.memcached.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_MEMCACHED_OPERATION_MAPPING_WRITE get,gets,getAndTouch,getKeys,getKeysWithExpiryCheck,getKeysNoDuplicateCheck   plugin.memcached.operation_mapping_read Specify which command should be converted to read operation SW_PLUGIN_MEMCACHED_OPERATION_MAPPING_READ set,add,replace,append,prepend,cas,delete,touch,incr,decr   plugin.ehcache.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_EHCACHE_OPERATION_MAPPING_WRITE get,getAll,getQuiet,getKeys,getKeysWithExpiryCheck,getKeysNoDuplicateCheck,releaseRead,tryRead,getWithLoader,getAll,loadAll,getAllWithLoader   plugin.ehcache.operation_mapping_read Specify which command should be converted to read operation SW_PLUGIN_EHCACHE_OPERATION_MAPPING_READ tryRemoveImmediately,remove,removeAndReturnElement,removeAll,removeQuiet,removeWithWriter,put,putAll,replace,removeQuiet,removeWithWriter,removeElement,removeAll,putWithWriter,putQuiet,putIfAbsent,putIfAbsent   plugin.guavacache.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_GUAVACACHE_OPERATION_MAPPING_WRITE getIfPresent,get,getAllPresent,size   plugin.guavacache.operation_mapping_read Specify which command should be converted to read operation SW_PLUGIN_GUAVACACHE_OPERATION_MAPPING_READ put,putAll,invalidate,invalidateAll,invalidateAll,cleanUp   plugin.nettyhttp.collect_request_body This config item controls that whether the Netty-http plugin should collect the http body of the request. SW_PLUGIN_NETTY_HTTP_COLLECT_REQUEST_BODY false   plugin.nettyhttp.filter_length_limit When COLLECT_REQUEST_BODY is enabled, how many characters to keep and send to the OAP backend, use negative values to keep and send the complete body. SW_PLUGIN_NETTY_HTTP_FILTER_LENGTH_LIMIT 1024   plugin.nettyhttp.supported_content_types_prefix When COLLECT_REQUEST_BODY is enabled and content-type start with HTTP_SUPPORTED_CONTENT_TYPES_PREFIX, collect the body of the request , multiple paths should be separated by , SW_PLUGIN_NETTY_HTTP_SUPPORTED_CONTENT_TYPES_PREFIX application/json,text/   plugin.rocketmqclient.collect_message_keys If set to true, the keys of messages would be collected by the plugin for RocketMQ Java client.     plugin.rocketmqclient.collect_message_tags If set to true, the tags of messages would be collected by the plugin for RocketMQ Java client.            Reset Collection/Map type configurations as empty collection.  Collection type config, e.g. using  plugin.kafka.topics= to override default plugin.kafka.topics=a,b,c,d Map type config, e.g. using plugin.kafka.producer_config[]= to override default plugin.kafka.producer_config[key]=value  Dynamic Configurations All configurations above are static, if you need to change some agent settings at runtime, please read CDS - Configuration Discovery Service document for more details.\n","title":"Table of Agent Configuration Properties","url":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/configurations/"},{"content":"Table of Agent Configuration Properties This is the properties list supported in agent/config/agent.config.\n   property key Description System Environment Variable Default     agent.service_name The service name to represent a logic group providing the same capabilities/logic. Suggestion: set a unique name for every logic service group, service instance nodes share the same code, Max length is 50(UTF-8 char). Optional, once service_name follows \u0026lt;group name\u0026gt;::\u0026lt;logic name\u0026gt; format, OAP server assigns the group name to the service metadata. SW_AGENT_NAME Your_ApplicationName   agent.namespace Namespace represents a subnet, such as kubernetes namespace, or 172.10.. SW_AGENT_NAMESPACE Not set   agent.cluster Cluster defines the physical cluster in a data center or same network segment. SW_AGENT_CLUSTER Not set   agent.sample_n_per_3_secs Negative or zero means off, by default.SAMPLE_N_PER_3_SECS means sampling N TraceSegment in 3 seconds tops. SW_AGENT_SAMPLE Not set   agent.authentication Authentication active is based on backend setting, see application.yml for more details.For most scenarios, this needs backend extensions, only basic match auth provided in default implementation. SW_AGENT_AUTHENTICATION Not set   agent.trace_segment_ref_limit_per_span The max number of TraceSegmentRef in a single span to keep memory cost estimatable. SW_TRACE_SEGMENT_LIMIT 500   agent.span_limit_per_segment The max number of spans in a single segment. Through this config item, SkyWalking keep your application memory cost estimated. SW_AGENT_SPAN_LIMIT 300   agent.ignore_suffix If the operation name of the first span is included in this set, this segment should be ignored. SW_AGENT_IGNORE_SUFFIX Not set   agent.is_open_debugging_class If true, skywalking agent will save all instrumented classes files in /debugging folder. SkyWalking team may ask for these files in order to resolve compatible problem. SW_AGENT_OPEN_DEBUG Not set   agent.instance_name Instance name is the identity of an instance, should be unique in the service. If empty, SkyWalking agent will generate an 32-bit uuid. Default, use UUID@hostname as the instance name. Max length is 50(UTF-8 char) SW_AGENT_INSTANCE_NAME \u0026quot;\u0026quot;   agent.instance_properties_json={\u0026quot;key\u0026quot;:\u0026quot;value\u0026quot;} Add service instance custom properties in json format. SW_INSTANCE_PROPERTIES_JSON Not set   agent.cause_exception_depth How depth the agent goes, when log all cause exceptions. SW_AGENT_CAUSE_EXCEPTION_DEPTH 5   agent.force_reconnection_period  Force reconnection period of grpc, based on grpc_channel_check_interval. SW_AGENT_FORCE_RECONNECTION_PERIOD 1   agent.operation_name_threshold  The operationName max length, setting this value \u0026gt; 190 is not recommended. SW_AGENT_OPERATION_NAME_THRESHOLD 150   agent.keep_tracing Keep tracing even the backend is not available if this value is true. SW_AGENT_KEEP_TRACING false   agent.force_tls Force open TLS for gRPC channel if this value is true. SW_AGENT_FORCE_TLS false   agent.ssl_trusted_ca_path gRPC SSL trusted ca file. SW_AGENT_SSL_TRUSTED_CA_PATH /ca/ca.crt   agent.ssl_key_path The private key file. Enable mTLS when ssl_key_path and ssl_cert_chain_path exist. SW_AGENT_SSL_KEY_PATH \u0026quot;\u0026quot;   agent.ssl_cert_chain_path The certificate file. Enable mTLS when ssl_key_path and ssl_cert_chain_path exist. SW_AGENT_SSL_CERT_CHAIN_PATH \u0026quot;\u0026quot;   agent.enable Enable the agent kernel services and instrumentation. SW_AGENT_ENABLE true   osinfo.ipv4_list_size Limit the length of the ipv4 list size. SW_AGENT_OSINFO_IPV4_LIST_SIZE 10   collector.grpc_channel_check_interval grpc channel status check interval. SW_AGENT_COLLECTOR_GRPC_CHANNEL_CHECK_INTERVAL 30   collector.heartbeat_period agent heartbeat report period. Unit, second. SW_AGENT_COLLECTOR_HEARTBEAT_PERIOD 30   collector.properties_report_period_factor The agent sends the instance properties to the backend every collector.heartbeat_period * collector.properties_report_period_factor seconds SW_AGENT_COLLECTOR_PROPERTIES_REPORT_PERIOD_FACTOR 10   collector.backend_service Collector SkyWalking trace receiver service addresses. SW_AGENT_COLLECTOR_BACKEND_SERVICES 127.0.0.1:11800   collector.grpc_upstream_timeout How long grpc client will timeout in sending data to upstream. Unit is second. SW_AGENT_COLLECTOR_GRPC_UPSTREAM_TIMEOUT 30 seconds   collector.get_profile_task_interval Sniffer get profile task list interval. SW_AGENT_COLLECTOR_GET_PROFILE_TASK_INTERVAL 20   collector.get_agent_dynamic_config_interval Sniffer get agent dynamic config interval SW_AGENT_COLLECTOR_GET_AGENT_DYNAMIC_CONFIG_INTERVAL 20   collector.is_resolve_dns_periodically If true, skywalking agent will enable periodically resolving DNS to update receiver service addresses. SW_AGENT_COLLECTOR_IS_RESOLVE_DNS_PERIODICALLY false   logging.level Log level: TRACE, DEBUG, INFO, WARN, ERROR, OFF. Default is info. SW_LOGGING_LEVEL INFO   logging.file_name Log file name. SW_LOGGING_FILE_NAME skywalking-api.log   logging.output Log output. Default is FILE. Use CONSOLE means output to stdout. SW_LOGGING_OUTPUT FILE   logging.dir Log files directory. Default is blank string, means, use \u0026ldquo;{theSkywalkingAgentJarDir}/logs \u0026quot; to output logs. {theSkywalkingAgentJarDir} is the directory where the skywalking agent jar file is located SW_LOGGING_DIR \u0026quot;\u0026quot;   logging.resolver Logger resolver: PATTERN or JSON. The default is PATTERN, which uses logging.pattern to print traditional text logs. JSON resolver prints logs in JSON format. SW_LOGGING_RESOLVER PATTERN   logging.pattern  Logging format. There are all conversion specifiers: * %level means log level. * %timestamp means now of time with format yyyy-MM-dd HH:mm:ss:SSS.\n* %thread means name of current thread.\n* %msg means some message which user logged. * %class means SimpleName of TargetClass. * %throwable means a throwable which user called. * %agent_name means agent.service_name. Only apply to the PatternLogger. SW_LOGGING_PATTERN %level %timestamp %thread %class : %msg %throwable   logging.max_file_size The max size of log file. If the size is bigger than this, archive the current file, and write into a new file. SW_LOGGING_MAX_FILE_SIZE 300 * 1024 * 1024   logging.max_history_files The max history log files. When rollover happened, if log files exceed this number,then the oldest file will be delete. Negative or zero means off, by default. SW_LOGGING_MAX_HISTORY_FILES -1   statuscheck.ignored_exceptions Listed exceptions would not be treated as an error. Because in some codes, the exception is being used as a way of controlling business flow. SW_STATUSCHECK_IGNORED_EXCEPTIONS \u0026quot;\u0026quot;   statuscheck.max_recursive_depth The max recursive depth when checking the exception traced by the agent. Typically, we don\u0026rsquo;t recommend setting this more than 10, which could cause a performance issue. Negative value and 0 would be ignored, which means all exceptions would make the span tagged in error status. SW_STATUSCHECK_MAX_RECURSIVE_DEPTH 1   correlation.element_max_number Max element count in the correlation context. SW_CORRELATION_ELEMENT_MAX_NUMBER 3   correlation.value_max_length Max value length of each element. SW_CORRELATION_VALUE_MAX_LENGTH 128   correlation.auto_tag_keys Tag the span by the key/value in the correlation context, when the keys listed here exist. SW_CORRELATION_AUTO_TAG_KEYS \u0026quot;\u0026quot;   jvm.buffer_size The buffer size of collected JVM info. SW_JVM_BUFFER_SIZE 60 * 10   jvm.metrics_collect_period The period in seconds of JVM metrics collection. Unit is second. SW_JVM_METRICS_COLLECT_PERIOD 1   buffer.channel_size The buffer channel size. SW_BUFFER_CHANNEL_SIZE 5   buffer.buffer_size The buffer size. SW_BUFFER_BUFFER_SIZE 300   profile.active If true, skywalking agent will enable profile when user create a new profile task. Otherwise disable profile. SW_AGENT_PROFILE_ACTIVE true   profile.max_parallel Parallel monitor segment count SW_AGENT_PROFILE_MAX_PARALLEL 5   profile.max_accept_sub_parallel Max monitoring sub-tasks count of one single endpoint access SW_AGENT_PROFILE_MAX_ACCEPT_SUB_PARALLEL 5   profile.duration Max monitor segment time(minutes), if current segment monitor time out of limit, then stop it. SW_AGENT_PROFILE_DURATION 10   profile.dump_max_stack_depth Max dump thread stack depth SW_AGENT_PROFILE_DUMP_MAX_STACK_DEPTH 500   profile.snapshot_transport_buffer_size Snapshot transport to backend buffer size SW_AGENT_PROFILE_SNAPSHOT_TRANSPORT_BUFFER_SIZE 4500   meter.active If true, the agent collects and reports metrics to the backend. SW_METER_ACTIVE true   meter.report_interval Report meters interval. The unit is second SW_METER_REPORT_INTERVAL 20   meter.max_meter_size Max size of the meter pool SW_METER_MAX_METER_SIZE 500   log.max_message_size The max size of message to send to server.Default is 10 MB. SW_GRPC_LOG_MAX_MESSAGE_SIZE 10485760   plugin.mount Mount the specific folders of the plugins. Plugins in mounted folders would work. SW_MOUNT_FOLDERS plugins,activations   plugin.peer_max_length  Peer maximum description limit. SW_PLUGIN_PEER_MAX_LENGTH 200   plugin.exclude_plugins  Exclude some plugins define in plugins dir,Multiple plugins are separated by comma.Plugin names is defined in Agent plugin list SW_EXCLUDE_PLUGINS \u0026quot;\u0026quot;   plugin.mongodb.trace_param If true, trace all the parameters in MongoDB access, default is false. Only trace the operation, not include parameters. SW_PLUGIN_MONGODB_TRACE_PARAM false   plugin.mongodb.filter_length_limit If set to positive number, the WriteRequest.params would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_MONGODB_FILTER_LENGTH_LIMIT 256   plugin.elasticsearch.trace_dsl If true, trace all the DSL(Domain Specific Language) in ElasticSearch access, default is false. SW_PLUGIN_ELASTICSEARCH_TRACE_DSL false   plugin.springmvc.use_qualified_name_as_endpoint_name If true, the fully qualified method name will be used as the endpoint name instead of the request URL, default is false. SW_PLUGIN_SPRINGMVC_USE_QUALIFIED_NAME_AS_ENDPOINT_NAME false   plugin.toolkit.use_qualified_name_as_operation_name If true, the fully qualified method name will be used as the operation name instead of the given operation name, default is false. SW_PLUGIN_TOOLKIT_USE_QUALIFIED_NAME_AS_OPERATION_NAME false   plugin.jdbc.trace_sql_parameters If set to true, the parameters of the sql (typically java.sql.PreparedStatement) would be collected. SW_JDBC_TRACE_SQL_PARAMETERS false   plugin.jdbc.sql_parameters_max_length If set to positive number, the db.sql.parameters would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_JDBC_SQL_PARAMETERS_MAX_LENGTH 512   plugin.jdbc.sql_body_max_length If set to positive number, the db.statement would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_JDBC_SQL_BODY_MAX_LENGTH 2048   plugin.solrj.trace_statement If true, trace all the query parameters(include deleteByIds and deleteByQuery) in Solr query request, default is false. SW_PLUGIN_SOLRJ_TRACE_STATEMENT false   plugin.solrj.trace_ops_params If true, trace all the operation parameters in Solr request, default is false. SW_PLUGIN_SOLRJ_TRACE_OPS_PARAMS false   plugin.light4j.trace_handler_chain If true, trace all middleware/business handlers that are part of the Light4J handler chain for a request. SW_PLUGIN_LIGHT4J_TRACE_HANDLER_CHAIN false   plugin.springtransaction.simplify_transaction_definition_name If true, the transaction definition name will be simplified. SW_PLUGIN_SPRINGTRANSACTION_SIMPLIFY_TRANSACTION_DEFINITION_NAME false   plugin.jdkthreading.threading_class_prefixes Threading classes (java.lang.Runnable and java.util.concurrent.Callable) and their subclasses, including anonymous inner classes whose name match any one of the THREADING_CLASS_PREFIXES (splitted by ,) will be instrumented, make sure to only specify as narrow prefixes as what you\u0026rsquo;re expecting to instrument, (java. and javax. will be ignored due to safety issues) SW_PLUGIN_JDKTHREADING_THREADING_CLASS_PREFIXES Not set   plugin.tomcat.collect_http_params This config item controls that whether the Tomcat plugin should collect the parameters of the request. Also, activate implicitly in the profiled trace. SW_PLUGIN_TOMCAT_COLLECT_HTTP_PARAMS false   plugin.springmvc.collect_http_params This config item controls that whether the SpringMVC plugin should collect the parameters of the request, when your Spring application is based on Tomcat, consider only setting either plugin.tomcat.collect_http_params or plugin.springmvc.collect_http_params. Also, activate implicitly in the profiled trace. SW_PLUGIN_SPRINGMVC_COLLECT_HTTP_PARAMS false   plugin.httpclient.collect_http_params This config item controls that whether the HttpClient plugin should collect the parameters of the request SW_PLUGIN_HTTPCLIENT_COLLECT_HTTP_PARAMS false   plugin.http.http_params_length_threshold When COLLECT_HTTP_PARAMS is enabled, how many characters to keep and send to the OAP backend, use negative values to keep and send the complete parameters, NB. this config item is added for the sake of performance. SW_PLUGIN_HTTP_HTTP_PARAMS_LENGTH_THRESHOLD 1024   plugin.http.http_headers_length_threshold When include_http_headers declares header names, this threshold controls the length limitation of all header values. use negative values to keep and send the complete headers. Note. this config item is added for the sake of performance. SW_PLUGIN_HTTP_HTTP_HEADERS_LENGTH_THRESHOLD 2048   plugin.http.include_http_headers Set the header names, which should be collected by the plugin. Header name must follow javax.servlet.http definition. Multiple names should be split by comma. SW_PLUGIN_HTTP_INCLUDE_HTTP_HEADERS ``(No header would be collected) |   plugin.feign.collect_request_body This config item controls that whether the Feign plugin should collect the http body of the request. SW_PLUGIN_FEIGN_COLLECT_REQUEST_BODY false   plugin.feign.filter_length_limit When COLLECT_REQUEST_BODY is enabled, how many characters to keep and send to the OAP backend, use negative values to keep and send the complete body. SW_PLUGIN_FEIGN_FILTER_LENGTH_LIMIT 1024   plugin.feign.supported_content_types_prefix When COLLECT_REQUEST_BODY is enabled and content-type start with SUPPORTED_CONTENT_TYPES_PREFIX, collect the body of the request , multiple paths should be separated by , SW_PLUGIN_FEIGN_SUPPORTED_CONTENT_TYPES_PREFIX application/json,text/   plugin.influxdb.trace_influxql If true, trace all the influxql(query and write) in InfluxDB access, default is true. SW_PLUGIN_INFLUXDB_TRACE_INFLUXQL true   plugin.dubbo.collect_consumer_arguments Apache Dubbo consumer collect arguments in RPC call, use Object#toString to collect arguments. SW_PLUGIN_DUBBO_COLLECT_CONSUMER_ARGUMENTS false   plugin.dubbo.consumer_arguments_length_threshold When plugin.dubbo.collect_consumer_arguments is true, Arguments of length from the front will to the OAP backend SW_PLUGIN_DUBBO_CONSUMER_ARGUMENTS_LENGTH_THRESHOLD 256   plugin.dubbo.collect_provider_arguments Apache Dubbo provider collect arguments in RPC call, use Object#toString to collect arguments. SW_PLUGIN_DUBBO_COLLECT_PROVIDER_ARGUMENTS false   plugin.dubbo.provider_arguments_length_threshold When plugin.dubbo.collect_provider_arguments is true, Arguments of length from the front will to the OAP backend SW_PLUGIN_DUBBO_PROVIDER_ARGUMENTS_LENGTH_THRESHOLD 256   plugin.kafka.bootstrap_servers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_KAFKA_BOOTSTRAP_SERVERS localhost:9092   plugin.kafka.get_topic_timeout Timeout period of reading topics from the Kafka server, the unit is second. SW_GET_TOPIC_TIMEOUT 10   plugin.kafka.producer_config Kafka producer configuration. Read producer configure to get more details. Check Kafka report doc for more details and examples. sw_plugin_kafka_producer_config    plugin.kafka.producer_config_json Configure Kafka Producer configuration in JSON format. Notice it will be overridden by plugin.kafka.producer_config[key], if the key duplication. SW_PLUGIN_KAFKA_PRODUCER_CONFIG_JSON    plugin.kafka.topic_meter Specify which Kafka topic name for Meter System data to report to. SW_PLUGIN_KAFKA_TOPIC_METER skywalking-meters   plugin.kafka.topic_metrics Specify which Kafka topic name for JVM metrics data to report to. SW_PLUGIN_KAFKA_TOPIC_METRICS skywalking-metrics   plugin.kafka.topic_segment Specify which Kafka topic name for traces data to report to. SW_PLUGIN_KAFKA_TOPIC_SEGMENT skywalking-segments   plugin.kafka.topic_profiling Specify which Kafka topic name for Thread Profiling snapshot to report to. SW_PLUGIN_KAFKA_TOPIC_PROFILINGS skywalking-profilings   plugin.kafka.topic_management Specify which Kafka topic name for the register or heartbeat data of Service Instance to report to. SW_PLUGIN_KAFKA_TOPIC_MANAGEMENT skywalking-managements   plugin.kafka.topic_logging Specify which Kafka topic name for the logging data to report to. SW_PLUGIN_KAFKA_TOPIC_LOGGING skywalking-logging   plugin.kafka.namespace isolate multi OAP server when using same Kafka cluster (final topic name will append namespace before Kafka topics with - ). SW_KAFKA_NAMESPACE `` |   plugin.kafka.decode_class Specify which class to decode encoded configuration of kafka.You can set encoded information in plugin.kafka.producer_config_json or plugin.kafka.producer_config if you need. SW_KAFKA_DECODE_CLASS `` |   plugin.springannotation.classname_match_regex Match spring beans with regular expression for the class name. Multiple expressions could be separated by a comma. This only works when Spring annotation plugin has been activated. SW_SPRINGANNOTATION_CLASSNAME_MATCH_REGEX All the spring beans tagged with @Bean,@Service,@Dao, or @Repository.   plugin.toolkit.log.transmit_formatted Whether or not to transmit logged data as formatted or un-formatted. SW_PLUGIN_TOOLKIT_LOG_TRANSMIT_FORMATTED true   plugin.lettuce.trace_redis_parameters If set to true, the parameters of Redis commands would be collected by Lettuce agent. SW_PLUGIN_LETTUCE_TRACE_REDIS_PARAMETERS false   plugin.lettuce.redis_parameter_max_length If set to positive number and plugin.lettuce.trace_redis_parameters is set to true, Redis command parameters would be collected and truncated to this length. SW_PLUGIN_LETTUCE_REDIS_PARAMETER_MAX_LENGTH 128   plugin.lettuce.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_LETTUCE_OPERATION_MAPPING_WRITE    plugin.lettuce.operation_mapping_read  Specify which command should be converted to read operation SW_PLUGIN_LETTUCE_OPERATION_MAPPING_READ Referenc Lettuce-5.x-plugin   plugin.jedis.trace_redis_parameters If set to true, the parameters of Redis commands would be collected by Jedis agent. SW_PLUGIN_JEDIS_TRACE_REDIS_PARAMETERS false   plugin.jedis.redis_parameter_max_length If set to positive number and plugin.jedis.trace_redis_parameters is set to true, Redis command parameters would be collected and truncated to this length. SW_PLUGIN_JEDIS_REDIS_PARAMETER_MAX_LENGTH 128   plugin.jedis.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_JEDIS_OPERATION_MAPPING_WRITE    plugin.jedis.operation_mapping_read  Specify which command should be converted to read operation SW_PLUGIN_JEDIS_OPERATION_MAPPING_READ Referenc Jedis-4.x-plugin jedis-2.x-3.x-plugin   plugin.redisson.trace_redis_parameters If set to true, the parameters of Redis commands would be collected by Redisson agent. SW_PLUGIN_REDISSON_TRACE_REDIS_PARAMETERS false   plugin.redisson.redis_parameter_max_length If set to positive number and plugin.redisson.trace_redis_parameters is set to true, Redis command parameters would be collected and truncated to this length. SW_PLUGIN_REDISSON_REDIS_PARAMETER_MAX_LENGTH 128   plugin.redisson.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_REDISSON_OPERATION_MAPPING_WRITE    plugin.redisson.operation_mapping_read  Specify which command should be converted to read operation SW_PLUGIN_REDISSON_OPERATION_MAPPING_READ Referenc Redisson-3.x-plugin   plugin.neo4j.trace_cypher_parameters If set to true, the parameters of the cypher would be collected. SW_PLUGIN_NEO4J_TRACE_CYPHER_PARAMETERS false   plugin.neo4j.cypher_parameters_max_length If set to positive number, the db.cypher.parameters would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_NEO4J_CYPHER_PARAMETERS_MAX_LENGTH 512   plugin.neo4j.cypher_body_max_length If set to positive number, the db.statement would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_NEO4J_CYPHER_BODY_MAX_LENGTH 2048   plugin.cpupolicy.sample_cpu_usage_percent_limit If set to a positive number and activate trace sampler CPU policy plugin, the trace would not be collected when agent process CPU usage percent is greater than plugin.cpupolicy.sample_cpu_usage_percent_limit. SW_SAMPLE_CPU_USAGE_PERCENT_LIMIT -1   plugin.micronauthttpclient.collect_http_params This config item controls that whether the Micronaut http client plugin should collect the parameters of the request. Also, activate implicitly in the profiled trace. SW_PLUGIN_MICRONAUTHTTPCLIENT_COLLECT_HTTP_PARAMS false   plugin.micronauthttpserver.collect_http_params This config item controls that whether the Micronaut http server plugin should collect the parameters of the request. Also, activate implicitly in the profiled trace. SW_PLUGIN_MICRONAUTHTTPSERVER_COLLECT_HTTP_PARAMS false   plugin.memcached.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_MEMCACHED_OPERATION_MAPPING_WRITE get,gets,getAndTouch,getKeys,getKeysWithExpiryCheck,getKeysNoDuplicateCheck   plugin.memcached.operation_mapping_read Specify which command should be converted to read operation SW_PLUGIN_MEMCACHED_OPERATION_MAPPING_READ set,add,replace,append,prepend,cas,delete,touch,incr,decr   plugin.ehcache.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_EHCACHE_OPERATION_MAPPING_WRITE get,getAll,getQuiet,getKeys,getKeysWithExpiryCheck,getKeysNoDuplicateCheck,releaseRead,tryRead,getWithLoader,getAll,loadAll,getAllWithLoader   plugin.ehcache.operation_mapping_read Specify which command should be converted to read operation SW_PLUGIN_EHCACHE_OPERATION_MAPPING_READ tryRemoveImmediately,remove,removeAndReturnElement,removeAll,removeQuiet,removeWithWriter,put,putAll,replace,removeQuiet,removeWithWriter,removeElement,removeAll,putWithWriter,putQuiet,putIfAbsent,putIfAbsent   plugin.guavacache.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_GUAVACACHE_OPERATION_MAPPING_WRITE getIfPresent,get,getAllPresent,size   plugin.guavacache.operation_mapping_read Specify which command should be converted to read operation SW_PLUGIN_GUAVACACHE_OPERATION_MAPPING_READ put,putAll,invalidate,invalidateAll,invalidateAll,cleanUp    Reset Collection/Map type configurations as empty collection.  Collection type config, e.g. using  plugin.kafka.topics= to override default plugin.kafka.topics=a,b,c,d Map type config, e.g. using plugin.kafka.producer_config[]= to override default plugin.kafka.producer_config[key]=value  Dynamic Configurations All configurations above are static, if you need to change some agent settings at runtime, please read CDS - Configuration Discovery Service document for more details.\n","title":"Table of Agent Configuration Properties","url":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/configurations/"},{"content":"Table of Agent Configuration Properties This is the properties list supported in agent/config/agent.config.\n   property key Description System Environment Variable Default     agent.service_name The service name to represent a logic group providing the same capabilities/logic. Suggestion: set a unique name for every logic service group, service instance nodes share the same code, Max length is 50(UTF-8 char). Optional, once service_name follows \u0026lt;group name\u0026gt;::\u0026lt;logic name\u0026gt; format, OAP server assigns the group name to the service metadata. SW_AGENT_NAME Your_ApplicationName   agent.namespace Namespace represents a subnet, such as kubernetes namespace, or 172.10.. SW_AGENT_NAMESPACE Not set   agent.cluster Cluster defines the physical cluster in a data center or same network segment. SW_AGENT_CLUSTER Not set   agent.sample_n_per_3_secs Negative or zero means off, by default.SAMPLE_N_PER_3_SECS means sampling N TraceSegment in 3 seconds tops. SW_AGENT_SAMPLE Not set   agent.authentication Authentication active is based on backend setting, see application.yml for more details.For most scenarios, this needs backend extensions, only basic match auth provided in default implementation. SW_AGENT_AUTHENTICATION Not set   agent.trace_segment_ref_limit_per_span The max number of TraceSegmentRef in a single span to keep memory cost estimatable. SW_TRACE_SEGMENT_LIMIT 500   agent.span_limit_per_segment The max number of spans in a single segment. Through this config item, SkyWalking keep your application memory cost estimated. SW_AGENT_SPAN_LIMIT 300   agent.ignore_suffix If the operation name of the first span is included in this set, this segment should be ignored. SW_AGENT_IGNORE_SUFFIX Not set   agent.is_open_debugging_class If true, skywalking agent will save all instrumented classes files in /debugging folder. SkyWalking team may ask for these files in order to resolve compatible problem. SW_AGENT_OPEN_DEBUG Not set   agent.instance_name Instance name is the identity of an instance, should be unique in the service. If empty, SkyWalking agent will generate an 32-bit uuid. Default, use UUID@hostname as the instance name. Max length is 50(UTF-8 char) SW_AGENT_INSTANCE_NAME \u0026quot;\u0026quot;   agent.instance_properties_json={\u0026quot;key\u0026quot;:\u0026quot;value\u0026quot;} Add service instance custom properties in json format. SW_INSTANCE_PROPERTIES_JSON Not set   agent.cause_exception_depth How depth the agent goes, when log all cause exceptions. SW_AGENT_CAUSE_EXCEPTION_DEPTH 5   agent.force_reconnection_period  Force reconnection period of grpc, based on grpc_channel_check_interval. SW_AGENT_FORCE_RECONNECTION_PERIOD 1   agent.operation_name_threshold  The operationName max length, setting this value \u0026gt; 190 is not recommended. SW_AGENT_OPERATION_NAME_THRESHOLD 150   agent.keep_tracing Keep tracing even the backend is not available if this value is true. SW_AGENT_KEEP_TRACING false   agent.force_tls Force open TLS for gRPC channel if this value is true. SW_AGENT_FORCE_TLS false   agent.ssl_trusted_ca_path gRPC SSL trusted ca file. SW_AGENT_SSL_TRUSTED_CA_PATH /ca/ca.crt   agent.ssl_key_path The private key file. Enable mTLS when ssl_key_path and ssl_cert_chain_path exist. SW_AGENT_SSL_KEY_PATH \u0026quot;\u0026quot;   agent.ssl_cert_chain_path The certificate file. Enable mTLS when ssl_key_path and ssl_cert_chain_path exist. SW_AGENT_SSL_CERT_CHAIN_PATH \u0026quot;\u0026quot;   agent.enable Enable the agent kernel services and instrumentation. SW_AGENT_ENABLE true   osinfo.ipv4_list_size Limit the length of the ipv4 list size. SW_AGENT_OSINFO_IPV4_LIST_SIZE 10   collector.grpc_channel_check_interval grpc channel status check interval. SW_AGENT_COLLECTOR_GRPC_CHANNEL_CHECK_INTERVAL 30   collector.heartbeat_period agent heartbeat report period. Unit, second. SW_AGENT_COLLECTOR_HEARTBEAT_PERIOD 30   collector.properties_report_period_factor The agent sends the instance properties to the backend every collector.heartbeat_period * collector.properties_report_period_factor seconds SW_AGENT_COLLECTOR_PROPERTIES_REPORT_PERIOD_FACTOR 10   collector.backend_service Collector SkyWalking trace receiver service addresses. SW_AGENT_COLLECTOR_BACKEND_SERVICES 127.0.0.1:11800   collector.grpc_upstream_timeout How long grpc client will timeout in sending data to upstream. Unit is second. SW_AGENT_COLLECTOR_GRPC_UPSTREAM_TIMEOUT 30 seconds   collector.get_profile_task_interval Sniffer get profile task list interval. SW_AGENT_COLLECTOR_GET_PROFILE_TASK_INTERVAL 20   collector.get_agent_dynamic_config_interval Sniffer get agent dynamic config interval SW_AGENT_COLLECTOR_GET_AGENT_DYNAMIC_CONFIG_INTERVAL 20   collector.is_resolve_dns_periodically If true, skywalking agent will enable periodically resolving DNS to update receiver service addresses. SW_AGENT_COLLECTOR_IS_RESOLVE_DNS_PERIODICALLY false   logging.level Log level: TRACE, DEBUG, INFO, WARN, ERROR, OFF. Default is info. SW_LOGGING_LEVEL INFO   logging.file_name Log file name. SW_LOGGING_FILE_NAME skywalking-api.log   logging.output Log output. Default is FILE. Use CONSOLE means output to stdout. SW_LOGGING_OUTPUT FILE   logging.dir Log files directory. Default is blank string, means, use \u0026ldquo;{theSkywalkingAgentJarDir}/logs \u0026quot; to output logs. {theSkywalkingAgentJarDir} is the directory where the skywalking agent jar file is located SW_LOGGING_DIR \u0026quot;\u0026quot;   logging.resolver Logger resolver: PATTERN or JSON. The default is PATTERN, which uses logging.pattern to print traditional text logs. JSON resolver prints logs in JSON format. SW_LOGGING_RESOLVER PATTERN   logging.pattern  Logging format. There are all conversion specifiers: * %level means log level. * %timestamp means now of time with format yyyy-MM-dd HH:mm:ss:SSS.\n* %thread means name of current thread.\n* %msg means some message which user logged. * %class means SimpleName of TargetClass. * %throwable means a throwable which user called. * %agent_name means agent.service_name. Only apply to the PatternLogger. SW_LOGGING_PATTERN %level %timestamp %thread %class : %msg %throwable   logging.max_file_size The max size of log file. If the size is bigger than this, archive the current file, and write into a new file. SW_LOGGING_MAX_FILE_SIZE 300 * 1024 * 1024   logging.max_history_files The max history log files. When rollover happened, if log files exceed this number,then the oldest file will be delete. Negative or zero means off, by default. SW_LOGGING_MAX_HISTORY_FILES -1   statuscheck.ignored_exceptions Listed exceptions would not be treated as an error. Because in some codes, the exception is being used as a way of controlling business flow. SW_STATUSCHECK_IGNORED_EXCEPTIONS \u0026quot;\u0026quot;   statuscheck.max_recursive_depth The max recursive depth when checking the exception traced by the agent. Typically, we don\u0026rsquo;t recommend setting this more than 10, which could cause a performance issue. Negative value and 0 would be ignored, which means all exceptions would make the span tagged in error status. SW_STATUSCHECK_MAX_RECURSIVE_DEPTH 1   correlation.element_max_number Max element count in the correlation context. SW_CORRELATION_ELEMENT_MAX_NUMBER 3   correlation.value_max_length Max value length of each element. SW_CORRELATION_VALUE_MAX_LENGTH 128   correlation.auto_tag_keys Tag the span by the key/value in the correlation context, when the keys listed here exist. SW_CORRELATION_AUTO_TAG_KEYS \u0026quot;\u0026quot;   jvm.buffer_size The buffer size of collected JVM info. SW_JVM_BUFFER_SIZE 60 * 10   jvm.metrics_collect_period The period in seconds of JVM metrics collection. Unit is second. SW_JVM_METRICS_COLLECT_PERIOD 1   buffer.channel_size The buffer channel size. SW_BUFFER_CHANNEL_SIZE 5   buffer.buffer_size The buffer size. SW_BUFFER_BUFFER_SIZE 300   profile.active If true, skywalking agent will enable profile when user create a new profile task. Otherwise disable profile. SW_AGENT_PROFILE_ACTIVE true   profile.max_parallel Parallel monitor segment count SW_AGENT_PROFILE_MAX_PARALLEL 5   profile.max_accept_sub_parallel Max monitoring sub-tasks count of one single endpoint access SW_AGENT_PROFILE_MAX_ACCEPT_SUB_PARALLEL 5   profile.duration Max monitor segment time(minutes), if current segment monitor time out of limit, then stop it. SW_AGENT_PROFILE_DURATION 10   profile.dump_max_stack_depth Max dump thread stack depth SW_AGENT_PROFILE_DUMP_MAX_STACK_DEPTH 500   profile.snapshot_transport_buffer_size Snapshot transport to backend buffer size SW_AGENT_PROFILE_SNAPSHOT_TRANSPORT_BUFFER_SIZE 4500   meter.active If true, the agent collects and reports metrics to the backend. SW_METER_ACTIVE true   meter.report_interval Report meters interval. The unit is second SW_METER_REPORT_INTERVAL 20   meter.max_meter_size Max size of the meter pool SW_METER_MAX_METER_SIZE 500   log.max_message_size The max size of message to send to server.Default is 10 MB. SW_GRPC_LOG_MAX_MESSAGE_SIZE 10485760   plugin.mount Mount the specific folders of the plugins. Plugins in mounted folders would work. SW_MOUNT_FOLDERS plugins,activations   plugin.peer_max_length  Peer maximum description limit. SW_PLUGIN_PEER_MAX_LENGTH 200   plugin.exclude_plugins  Exclude some plugins define in plugins dir,Multiple plugins are separated by comma.Plugin names is defined in Agent plugin list SW_EXCLUDE_PLUGINS \u0026quot;\u0026quot;   plugin.mongodb.trace_param If true, trace all the parameters in MongoDB access, default is false. Only trace the operation, not include parameters. SW_PLUGIN_MONGODB_TRACE_PARAM false   plugin.mongodb.filter_length_limit If set to positive number, the WriteRequest.params would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_MONGODB_FILTER_LENGTH_LIMIT 256   plugin.elasticsearch.trace_dsl If true, trace all the DSL(Domain Specific Language) in ElasticSearch access, default is false. SW_PLUGIN_ELASTICSEARCH_TRACE_DSL false   plugin.springmvc.use_qualified_name_as_endpoint_name If true, the fully qualified method name will be used as the endpoint name instead of the request URL, default is false. SW_PLUGIN_SPRINGMVC_USE_QUALIFIED_NAME_AS_ENDPOINT_NAME false   plugin.toolkit.use_qualified_name_as_operation_name If true, the fully qualified method name will be used as the operation name instead of the given operation name, default is false. SW_PLUGIN_TOOLKIT_USE_QUALIFIED_NAME_AS_OPERATION_NAME false   plugin.jdbc.trace_sql_parameters If set to true, the parameters of the sql (typically java.sql.PreparedStatement) would be collected. SW_JDBC_TRACE_SQL_PARAMETERS false   plugin.jdbc.sql_parameters_max_length If set to positive number, the db.sql.parameters would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_JDBC_SQL_PARAMETERS_MAX_LENGTH 512   plugin.jdbc.sql_body_max_length If set to positive number, the db.statement would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_JDBC_SQL_BODY_MAX_LENGTH 2048   plugin.solrj.trace_statement If true, trace all the query parameters(include deleteByIds and deleteByQuery) in Solr query request, default is false. SW_PLUGIN_SOLRJ_TRACE_STATEMENT false   plugin.solrj.trace_ops_params If true, trace all the operation parameters in Solr request, default is false. SW_PLUGIN_SOLRJ_TRACE_OPS_PARAMS false   plugin.light4j.trace_handler_chain If true, trace all middleware/business handlers that are part of the Light4J handler chain for a request. SW_PLUGIN_LIGHT4J_TRACE_HANDLER_CHAIN false   plugin.springtransaction.simplify_transaction_definition_name If true, the transaction definition name will be simplified. SW_PLUGIN_SPRINGTRANSACTION_SIMPLIFY_TRANSACTION_DEFINITION_NAME false   plugin.jdkthreading.threading_class_prefixes Threading classes (java.lang.Runnable and java.util.concurrent.Callable) and their subclasses, including anonymous inner classes whose name match any one of the THREADING_CLASS_PREFIXES (splitted by ,) will be instrumented, make sure to only specify as narrow prefixes as what you\u0026rsquo;re expecting to instrument, (java. and javax. will be ignored due to safety issues) SW_PLUGIN_JDKTHREADING_THREADING_CLASS_PREFIXES Not set   plugin.tomcat.collect_http_params This config item controls that whether the Tomcat plugin should collect the parameters of the request. Also, activate implicitly in the profiled trace. SW_PLUGIN_TOMCAT_COLLECT_HTTP_PARAMS false   plugin.springmvc.collect_http_params This config item controls that whether the SpringMVC plugin should collect the parameters of the request, when your Spring application is based on Tomcat, consider only setting either plugin.tomcat.collect_http_params or plugin.springmvc.collect_http_params. Also, activate implicitly in the profiled trace. SW_PLUGIN_SPRINGMVC_COLLECT_HTTP_PARAMS false   plugin.httpclient.collect_http_params This config item controls that whether the HttpClient plugin should collect the parameters of the request SW_PLUGIN_HTTPCLIENT_COLLECT_HTTP_PARAMS false   plugin.http.http_params_length_threshold When COLLECT_HTTP_PARAMS is enabled, how many characters to keep and send to the OAP backend, use negative values to keep and send the complete parameters, NB. this config item is added for the sake of performance. SW_PLUGIN_HTTP_HTTP_PARAMS_LENGTH_THRESHOLD 1024   plugin.http.http_headers_length_threshold When include_http_headers declares header names, this threshold controls the length limitation of all header values. use negative values to keep and send the complete headers. Note. this config item is added for the sake of performance. SW_PLUGIN_HTTP_HTTP_HEADERS_LENGTH_THRESHOLD 2048   plugin.http.include_http_headers Set the header names, which should be collected by the plugin. Header name must follow javax.servlet.http definition. Multiple names should be split by comma. SW_PLUGIN_HTTP_INCLUDE_HTTP_HEADERS ``(No header would be collected) |   plugin.feign.collect_request_body This config item controls that whether the Feign plugin should collect the http body of the request. SW_PLUGIN_FEIGN_COLLECT_REQUEST_BODY false   plugin.feign.filter_length_limit When COLLECT_REQUEST_BODY is enabled, how many characters to keep and send to the OAP backend, use negative values to keep and send the complete body. SW_PLUGIN_FEIGN_FILTER_LENGTH_LIMIT 1024   plugin.feign.supported_content_types_prefix When COLLECT_REQUEST_BODY is enabled and content-type start with SUPPORTED_CONTENT_TYPES_PREFIX, collect the body of the request , multiple paths should be separated by , SW_PLUGIN_FEIGN_SUPPORTED_CONTENT_TYPES_PREFIX application/json,text/   plugin.influxdb.trace_influxql If true, trace all the influxql(query and write) in InfluxDB access, default is true. SW_PLUGIN_INFLUXDB_TRACE_INFLUXQL true   plugin.dubbo.collect_consumer_arguments Apache Dubbo consumer collect arguments in RPC call, use Object#toString to collect arguments. SW_PLUGIN_DUBBO_COLLECT_CONSUMER_ARGUMENTS false   plugin.dubbo.consumer_arguments_length_threshold When plugin.dubbo.collect_consumer_arguments is true, Arguments of length from the front will to the OAP backend SW_PLUGIN_DUBBO_CONSUMER_ARGUMENTS_LENGTH_THRESHOLD 256   plugin.dubbo.collect_provider_arguments Apache Dubbo provider collect arguments in RPC call, use Object#toString to collect arguments. SW_PLUGIN_DUBBO_COLLECT_PROVIDER_ARGUMENTS false   plugin.dubbo.provider_arguments_length_threshold When plugin.dubbo.collect_provider_arguments is true, Arguments of length from the front will to the OAP backend SW_PLUGIN_DUBBO_PROVIDER_ARGUMENTS_LENGTH_THRESHOLD 256   plugin.kafka.bootstrap_servers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_KAFKA_BOOTSTRAP_SERVERS localhost:9092   plugin.kafka.get_topic_timeout Timeout period of reading topics from the Kafka server, the unit is second. SW_GET_TOPIC_TIMEOUT 10   plugin.kafka.producer_config Kafka producer configuration. Read producer configure to get more details. Check Kafka report doc for more details and examples. sw_plugin_kafka_producer_config    plugin.kafka.producer_config_json Configure Kafka Producer configuration in JSON format. Notice it will be overridden by plugin.kafka.producer_config[key], if the key duplication. SW_PLUGIN_KAFKA_PRODUCER_CONFIG_JSON    plugin.kafka.topic_meter Specify which Kafka topic name for Meter System data to report to. SW_PLUGIN_KAFKA_TOPIC_METER skywalking-meters   plugin.kafka.topic_metrics Specify which Kafka topic name for JVM metrics data to report to. SW_PLUGIN_KAFKA_TOPIC_METRICS skywalking-metrics   plugin.kafka.topic_segment Specify which Kafka topic name for traces data to report to. SW_PLUGIN_KAFKA_TOPIC_SEGMENT skywalking-segments   plugin.kafka.topic_profiling Specify which Kafka topic name for Thread Profiling snapshot to report to. SW_PLUGIN_KAFKA_TOPIC_PROFILINGS skywalking-profilings   plugin.kafka.topic_management Specify which Kafka topic name for the register or heartbeat data of Service Instance to report to. SW_PLUGIN_KAFKA_TOPIC_MANAGEMENT skywalking-managements   plugin.kafka.topic_logging Specify which Kafka topic name for the logging data to report to. SW_PLUGIN_KAFKA_TOPIC_LOGGING skywalking-logging   plugin.kafka.namespace isolate multi OAP server when using same Kafka cluster (final topic name will append namespace before Kafka topics with - ). SW_KAFKA_NAMESPACE `` |   plugin.kafka.decode_class Specify which class to decode encoded configuration of kafka.You can set encoded information in plugin.kafka.producer_config_json or plugin.kafka.producer_config if you need. SW_KAFKA_DECODE_CLASS `` |   plugin.springannotation.classname_match_regex Match spring beans with regular expression for the class name. Multiple expressions could be separated by a comma. This only works when Spring annotation plugin has been activated. SW_SPRINGANNOTATION_CLASSNAME_MATCH_REGEX All the spring beans tagged with @Bean,@Service,@Dao, or @Repository.   plugin.toolkit.log.transmit_formatted Whether or not to transmit logged data as formatted or un-formatted. SW_PLUGIN_TOOLKIT_LOG_TRANSMIT_FORMATTED true   plugin.lettuce.trace_redis_parameters If set to true, the parameters of Redis commands would be collected by Lettuce agent. SW_PLUGIN_LETTUCE_TRACE_REDIS_PARAMETERS false   plugin.lettuce.redis_parameter_max_length If set to positive number and plugin.lettuce.trace_redis_parameters is set to true, Redis command parameters would be collected and truncated to this length. SW_PLUGIN_LETTUCE_REDIS_PARAMETER_MAX_LENGTH 128   plugin.lettuce.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_LETTUCE_OPERATION_MAPPING_WRITE    plugin.lettuce.operation_mapping_read  Specify which command should be converted to read operation SW_PLUGIN_LETTUCE_OPERATION_MAPPING_READ Referenc Lettuce-5.x-plugin   plugin.jedis.trace_redis_parameters If set to true, the parameters of Redis commands would be collected by Jedis agent. SW_PLUGIN_JEDIS_TRACE_REDIS_PARAMETERS false   plugin.jedis.redis_parameter_max_length If set to positive number and plugin.jedis.trace_redis_parameters is set to true, Redis command parameters would be collected and truncated to this length. SW_PLUGIN_JEDIS_REDIS_PARAMETER_MAX_LENGTH 128   plugin.jedis.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_JEDIS_OPERATION_MAPPING_WRITE    plugin.jedis.operation_mapping_read  Specify which command should be converted to read operation SW_PLUGIN_JEDIS_OPERATION_MAPPING_READ Referenc Jedis-4.x-plugin jedis-2.x-3.x-plugin   plugin.redisson.trace_redis_parameters If set to true, the parameters of Redis commands would be collected by Redisson agent. SW_PLUGIN_REDISSON_TRACE_REDIS_PARAMETERS false   plugin.redisson.redis_parameter_max_length If set to positive number and plugin.redisson.trace_redis_parameters is set to true, Redis command parameters would be collected and truncated to this length. SW_PLUGIN_REDISSON_REDIS_PARAMETER_MAX_LENGTH 128   plugin.redisson.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_REDISSON_OPERATION_MAPPING_WRITE    plugin.redisson.operation_mapping_read  Specify which command should be converted to read operation SW_PLUGIN_REDISSON_OPERATION_MAPPING_READ Referenc Redisson-3.x-plugin   plugin.neo4j.trace_cypher_parameters If set to true, the parameters of the cypher would be collected. SW_PLUGIN_NEO4J_TRACE_CYPHER_PARAMETERS false   plugin.neo4j.cypher_parameters_max_length If set to positive number, the db.cypher.parameters would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_NEO4J_CYPHER_PARAMETERS_MAX_LENGTH 512   plugin.neo4j.cypher_body_max_length If set to positive number, the db.statement would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_NEO4J_CYPHER_BODY_MAX_LENGTH 2048   plugin.cpupolicy.sample_cpu_usage_percent_limit If set to a positive number and activate trace sampler CPU policy plugin, the trace would not be collected when agent process CPU usage percent is greater than plugin.cpupolicy.sample_cpu_usage_percent_limit. SW_SAMPLE_CPU_USAGE_PERCENT_LIMIT -1   plugin.micronauthttpclient.collect_http_params This config item controls that whether the Micronaut http client plugin should collect the parameters of the request. Also, activate implicitly in the profiled trace. SW_PLUGIN_MICRONAUTHTTPCLIENT_COLLECT_HTTP_PARAMS false   plugin.micronauthttpserver.collect_http_params This config item controls that whether the Micronaut http server plugin should collect the parameters of the request. Also, activate implicitly in the profiled trace. SW_PLUGIN_MICRONAUTHTTPSERVER_COLLECT_HTTP_PARAMS false   plugin.memcached.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_MEMCACHED_OPERATION_MAPPING_WRITE get,gets,getAndTouch,getKeys,getKeysWithExpiryCheck,getKeysNoDuplicateCheck   plugin.memcached.operation_mapping_read Specify which command should be converted to read operation SW_PLUGIN_MEMCACHED_OPERATION_MAPPING_READ set,add,replace,append,prepend,cas,delete,touch,incr,decr   plugin.ehcache.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_EHCACHE_OPERATION_MAPPING_WRITE get,getAll,getQuiet,getKeys,getKeysWithExpiryCheck,getKeysNoDuplicateCheck,releaseRead,tryRead,getWithLoader,getAll,loadAll,getAllWithLoader   plugin.ehcache.operation_mapping_read Specify which command should be converted to read operation SW_PLUGIN_EHCACHE_OPERATION_MAPPING_READ tryRemoveImmediately,remove,removeAndReturnElement,removeAll,removeQuiet,removeWithWriter,put,putAll,replace,removeQuiet,removeWithWriter,removeElement,removeAll,putWithWriter,putQuiet,putIfAbsent,putIfAbsent   plugin.guavacache.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_GUAVACACHE_OPERATION_MAPPING_WRITE getIfPresent,get,getAllPresent,size   plugin.guavacache.operation_mapping_read Specify which command should be converted to read operation SW_PLUGIN_GUAVACACHE_OPERATION_MAPPING_READ put,putAll,invalidate,invalidateAll,invalidateAll,cleanUp   plugin.nettyhttp.collect_request_body This config item controls that whether the Netty-http plugin should collect the http body of the request. SW_PLUGIN_NETTY_HTTP_COLLECT_REQUEST_BODY false   plugin.nettyhttp.filter_length_limit When COLLECT_REQUEST_BODY is enabled, how many characters to keep and send to the OAP backend, use negative values to keep and send the complete body. SW_PLUGIN_NETTY_HTTP_FILTER_LENGTH_LIMIT 1024   plugin.nettyhttp.supported_content_types_prefix When COLLECT_REQUEST_BODY is enabled and content-type start with HTTP_SUPPORTED_CONTENT_TYPES_PREFIX, collect the body of the request , multiple paths should be separated by , SW_PLUGIN_NETTY_HTTP_SUPPORTED_CONTENT_TYPES_PREFIX application/json,text/          Reset Collection/Map type configurations as empty collection.  Collection type config, e.g. using  plugin.kafka.topics= to override default plugin.kafka.topics=a,b,c,d Map type config, e.g. using plugin.kafka.producer_config[]= to override default plugin.kafka.producer_config[key]=value  Dynamic Configurations All configurations above are static, if you need to change some agent settings at runtime, please read CDS - Configuration Discovery Service document for more details.\n","title":"Table of Agent Configuration Properties","url":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/configurations/"},{"content":"Table of Agent Configuration Properties This is the properties list supported in agent/config/agent.config.\n   property key Description System Environment Variable Default     agent.service_name The service name to represent a logic group providing the same capabilities/logic. Suggestion: set a unique name for every logic service group, service instance nodes share the same code, Max length is 50(UTF-8 char). Optional, once service_name follows \u0026lt;group name\u0026gt;::\u0026lt;logic name\u0026gt; format, OAP server assigns the group name to the service metadata. SW_AGENT_NAME Your_ApplicationName   agent.namespace Namespace represents a subnet, such as kubernetes namespace, or 172.10.. SW_AGENT_NAMESPACE Not set   agent.cluster Cluster defines the physical cluster in a data center or same network segment. SW_AGENT_CLUSTER Not set   agent.sample_n_per_3_secs Negative or zero means off, by default.SAMPLE_N_PER_3_SECS means sampling N TraceSegment in 3 seconds tops. SW_AGENT_SAMPLE Not set   agent.authentication Authentication active is based on backend setting, see application.yml for more details.For most scenarios, this needs backend extensions, only basic match auth provided in default implementation. SW_AGENT_AUTHENTICATION Not set   agent.trace_segment_ref_limit_per_span The max number of TraceSegmentRef in a single span to keep memory cost estimatable. SW_TRACE_SEGMENT_LIMIT 500   agent.span_limit_per_segment The max number of spans in a single segment. Through this config item, SkyWalking keep your application memory cost estimated. SW_AGENT_SPAN_LIMIT 300   agent.ignore_suffix If the operation name of the first span is included in this set, this segment should be ignored. SW_AGENT_IGNORE_SUFFIX Not set   agent.is_open_debugging_class If true, skywalking agent will save all instrumented classes files in /debugging folder. SkyWalking team may ask for these files in order to resolve compatible problem. SW_AGENT_OPEN_DEBUG Not set   agent.instance_name Instance name is the identity of an instance, should be unique in the service. If empty, SkyWalking agent will generate an 32-bit uuid. Default, use UUID@hostname as the instance name. Max length is 50(UTF-8 char) SW_AGENT_INSTANCE_NAME \u0026quot;\u0026quot;   agent.instance_properties_json={\u0026quot;key\u0026quot;:\u0026quot;value\u0026quot;} Add service instance custom properties in json format. SW_INSTANCE_PROPERTIES_JSON Not set   agent.cause_exception_depth How depth the agent goes, when log all cause exceptions. SW_AGENT_CAUSE_EXCEPTION_DEPTH 5   agent.force_reconnection_period  Force reconnection period of grpc, based on grpc_channel_check_interval. SW_AGENT_FORCE_RECONNECTION_PERIOD 1   agent.operation_name_threshold  The operationName max length, setting this value \u0026gt; 190 is not recommended. SW_AGENT_OPERATION_NAME_THRESHOLD 150   agent.keep_tracing Keep tracing even the backend is not available if this value is true. SW_AGENT_KEEP_TRACING false   agent.force_tls Force open TLS for gRPC channel if this value is true. SW_AGENT_FORCE_TLS false   agent.ssl_trusted_ca_path gRPC SSL trusted ca file. SW_AGENT_SSL_TRUSTED_CA_PATH /ca/ca.crt   agent.ssl_key_path The private key file. Enable mTLS when ssl_key_path and ssl_cert_chain_path exist. SW_AGENT_SSL_KEY_PATH \u0026quot;\u0026quot;   agent.ssl_cert_chain_path The certificate file. Enable mTLS when ssl_key_path and ssl_cert_chain_path exist. SW_AGENT_SSL_CERT_CHAIN_PATH \u0026quot;\u0026quot;   agent.enable Enable the agent kernel services and instrumentation. SW_AGENT_ENABLE true   osinfo.ipv4_list_size Limit the length of the ipv4 list size. SW_AGENT_OSINFO_IPV4_LIST_SIZE 10   collector.grpc_channel_check_interval grpc channel status check interval. SW_AGENT_COLLECTOR_GRPC_CHANNEL_CHECK_INTERVAL 30   collector.heartbeat_period agent heartbeat report period. Unit, second. SW_AGENT_COLLECTOR_HEARTBEAT_PERIOD 30   collector.properties_report_period_factor The agent sends the instance properties to the backend every collector.heartbeat_period * collector.properties_report_period_factor seconds SW_AGENT_COLLECTOR_PROPERTIES_REPORT_PERIOD_FACTOR 10   collector.backend_service Collector SkyWalking trace receiver service addresses. SW_AGENT_COLLECTOR_BACKEND_SERVICES 127.0.0.1:11800   collector.grpc_upstream_timeout How long grpc client will timeout in sending data to upstream. Unit is second. SW_AGENT_COLLECTOR_GRPC_UPSTREAM_TIMEOUT 30 seconds   collector.get_profile_task_interval Sniffer get profile task list interval. SW_AGENT_COLLECTOR_GET_PROFILE_TASK_INTERVAL 20   collector.get_agent_dynamic_config_interval Sniffer get agent dynamic config interval SW_AGENT_COLLECTOR_GET_AGENT_DYNAMIC_CONFIG_INTERVAL 20   collector.is_resolve_dns_periodically If true, skywalking agent will enable periodically resolving DNS to update receiver service addresses. SW_AGENT_COLLECTOR_IS_RESOLVE_DNS_PERIODICALLY false   logging.level Log level: TRACE, DEBUG, INFO, WARN, ERROR, OFF. Default is info. SW_LOGGING_LEVEL INFO   logging.file_name Log file name. SW_LOGGING_FILE_NAME skywalking-api.log   logging.output Log output. Default is FILE. Use CONSOLE means output to stdout. SW_LOGGING_OUTPUT FILE   logging.dir Log files directory. Default is blank string, means, use \u0026ldquo;{theSkywalkingAgentJarDir}/logs \u0026quot; to output logs. {theSkywalkingAgentJarDir} is the directory where the skywalking agent jar file is located SW_LOGGING_DIR \u0026quot;\u0026quot;   logging.resolver Logger resolver: PATTERN or JSON. The default is PATTERN, which uses logging.pattern to print traditional text logs. JSON resolver prints logs in JSON format. SW_LOGGING_RESOLVER PATTERN   logging.pattern  Logging format. There are all conversion specifiers: * %level means log level. * %timestamp means now of time with format yyyy-MM-dd HH:mm:ss:SSS.\n* %thread means name of current thread.\n* %msg means some message which user logged. * %class means SimpleName of TargetClass. * %throwable means a throwable which user called. * %agent_name means agent.service_name. Only apply to the PatternLogger. SW_LOGGING_PATTERN %level %timestamp %thread %class : %msg %throwable   logging.max_file_size The max size of log file. If the size is bigger than this, archive the current file, and write into a new file. SW_LOGGING_MAX_FILE_SIZE 300 * 1024 * 1024   logging.max_history_files The max history log files. When rollover happened, if log files exceed this number,then the oldest file will be delete. Negative or zero means off, by default. SW_LOGGING_MAX_HISTORY_FILES -1   statuscheck.ignored_exceptions Listed exceptions would not be treated as an error. Because in some codes, the exception is being used as a way of controlling business flow. SW_STATUSCHECK_IGNORED_EXCEPTIONS \u0026quot;\u0026quot;   statuscheck.max_recursive_depth The max recursive depth when checking the exception traced by the agent. Typically, we don\u0026rsquo;t recommend setting this more than 10, which could cause a performance issue. Negative value and 0 would be ignored, which means all exceptions would make the span tagged in error status. SW_STATUSCHECK_MAX_RECURSIVE_DEPTH 1   correlation.element_max_number Max element count in the correlation context. SW_CORRELATION_ELEMENT_MAX_NUMBER 3   correlation.value_max_length Max value length of each element. SW_CORRELATION_VALUE_MAX_LENGTH 128   correlation.auto_tag_keys Tag the span by the key/value in the correlation context, when the keys listed here exist. SW_CORRELATION_AUTO_TAG_KEYS \u0026quot;\u0026quot;   jvm.buffer_size The buffer size of collected JVM info. SW_JVM_BUFFER_SIZE 60 * 10   jvm.metrics_collect_period The period in seconds of JVM metrics collection. Unit is second. SW_JVM_METRICS_COLLECT_PERIOD 1   buffer.channel_size The buffer channel size. SW_BUFFER_CHANNEL_SIZE 5   buffer.buffer_size The buffer size. SW_BUFFER_BUFFER_SIZE 300   profile.active If true, skywalking agent will enable profile when user create a new profile task. Otherwise disable profile. SW_AGENT_PROFILE_ACTIVE true   profile.max_parallel Parallel monitor segment count SW_AGENT_PROFILE_MAX_PARALLEL 5   profile.max_accept_sub_parallel Max monitoring sub-tasks count of one single endpoint access SW_AGENT_PROFILE_MAX_ACCEPT_SUB_PARALLEL 5   profile.duration Max monitor segment time(minutes), if current segment monitor time out of limit, then stop it. SW_AGENT_PROFILE_DURATION 10   profile.dump_max_stack_depth Max dump thread stack depth SW_AGENT_PROFILE_DUMP_MAX_STACK_DEPTH 500   profile.snapshot_transport_buffer_size Snapshot transport to backend buffer size SW_AGENT_PROFILE_SNAPSHOT_TRANSPORT_BUFFER_SIZE 4500   meter.active If true, the agent collects and reports metrics to the backend. SW_METER_ACTIVE true   meter.report_interval Report meters interval. The unit is second SW_METER_REPORT_INTERVAL 20   meter.max_meter_size Max size of the meter pool SW_METER_MAX_METER_SIZE 500   log.max_message_size The max size of message to send to server.Default is 10 MB. SW_GRPC_LOG_MAX_MESSAGE_SIZE 10485760   plugin.mount Mount the specific folders of the plugins. Plugins in mounted folders would work. SW_MOUNT_FOLDERS plugins,activations   plugin.peer_max_length  Peer maximum description limit. SW_PLUGIN_PEER_MAX_LENGTH 200   plugin.exclude_plugins  Exclude some plugins define in plugins dir,Multiple plugins are separated by comma.Plugin names is defined in Agent plugin list SW_EXCLUDE_PLUGINS \u0026quot;\u0026quot;   plugin.mongodb.trace_param If true, trace all the parameters in MongoDB access, default is false. Only trace the operation, not include parameters. SW_PLUGIN_MONGODB_TRACE_PARAM false   plugin.mongodb.filter_length_limit If set to positive number, the WriteRequest.params would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_MONGODB_FILTER_LENGTH_LIMIT 256   plugin.elasticsearch.trace_dsl If true, trace all the DSL(Domain Specific Language) in ElasticSearch access, default is false. SW_PLUGIN_ELASTICSEARCH_TRACE_DSL false   plugin.springmvc.use_qualified_name_as_endpoint_name If true, the fully qualified method name will be used as the endpoint name instead of the request URL, default is false. SW_PLUGIN_SPRINGMVC_USE_QUALIFIED_NAME_AS_ENDPOINT_NAME false   plugin.toolkit.use_qualified_name_as_operation_name If true, the fully qualified method name will be used as the operation name instead of the given operation name, default is false. SW_PLUGIN_TOOLKIT_USE_QUALIFIED_NAME_AS_OPERATION_NAME false   plugin.jdbc.trace_sql_parameters If set to true, the parameters of the sql (typically java.sql.PreparedStatement) would be collected. SW_JDBC_TRACE_SQL_PARAMETERS false   plugin.jdbc.sql_parameters_max_length If set to positive number, the db.sql.parameters would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_JDBC_SQL_PARAMETERS_MAX_LENGTH 512   plugin.jdbc.sql_body_max_length If set to positive number, the db.statement would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_JDBC_SQL_BODY_MAX_LENGTH 2048   plugin.solrj.trace_statement If true, trace all the query parameters(include deleteByIds and deleteByQuery) in Solr query request, default is false. SW_PLUGIN_SOLRJ_TRACE_STATEMENT false   plugin.solrj.trace_ops_params If true, trace all the operation parameters in Solr request, default is false. SW_PLUGIN_SOLRJ_TRACE_OPS_PARAMS false   plugin.light4j.trace_handler_chain If true, trace all middleware/business handlers that are part of the Light4J handler chain for a request. SW_PLUGIN_LIGHT4J_TRACE_HANDLER_CHAIN false   plugin.springtransaction.simplify_transaction_definition_name If true, the transaction definition name will be simplified. SW_PLUGIN_SPRINGTRANSACTION_SIMPLIFY_TRANSACTION_DEFINITION_NAME false   plugin.jdkthreading.threading_class_prefixes Threading classes (java.lang.Runnable and java.util.concurrent.Callable) and their subclasses, including anonymous inner classes whose name match any one of the THREADING_CLASS_PREFIXES (splitted by ,) will be instrumented, make sure to only specify as narrow prefixes as what you\u0026rsquo;re expecting to instrument, (java. and javax. will be ignored due to safety issues) SW_PLUGIN_JDKTHREADING_THREADING_CLASS_PREFIXES Not set   plugin.tomcat.collect_http_params This config item controls that whether the Tomcat plugin should collect the parameters of the request. Also, activate implicitly in the profiled trace. SW_PLUGIN_TOMCAT_COLLECT_HTTP_PARAMS false   plugin.springmvc.collect_http_params This config item controls that whether the SpringMVC plugin should collect the parameters of the request, when your Spring application is based on Tomcat, consider only setting either plugin.tomcat.collect_http_params or plugin.springmvc.collect_http_params. Also, activate implicitly in the profiled trace. SW_PLUGIN_SPRINGMVC_COLLECT_HTTP_PARAMS false   plugin.httpclient.collect_http_params This config item controls that whether the HttpClient plugin should collect the parameters of the request SW_PLUGIN_HTTPCLIENT_COLLECT_HTTP_PARAMS false   plugin.http.http_params_length_threshold When COLLECT_HTTP_PARAMS is enabled, how many characters to keep and send to the OAP backend, use negative values to keep and send the complete parameters, NB. this config item is added for the sake of performance. SW_PLUGIN_HTTP_HTTP_PARAMS_LENGTH_THRESHOLD 1024   plugin.http.http_headers_length_threshold When include_http_headers declares header names, this threshold controls the length limitation of all header values. use negative values to keep and send the complete headers. Note. this config item is added for the sake of performance. SW_PLUGIN_HTTP_HTTP_HEADERS_LENGTH_THRESHOLD 2048   plugin.http.include_http_headers Set the header names, which should be collected by the plugin. Header name must follow javax.servlet.http definition. Multiple names should be split by comma. SW_PLUGIN_HTTP_INCLUDE_HTTP_HEADERS ``(No header would be collected) |   plugin.feign.collect_request_body This config item controls that whether the Feign plugin should collect the http body of the request. SW_PLUGIN_FEIGN_COLLECT_REQUEST_BODY false   plugin.feign.filter_length_limit When COLLECT_REQUEST_BODY is enabled, how many characters to keep and send to the OAP backend, use negative values to keep and send the complete body. SW_PLUGIN_FEIGN_FILTER_LENGTH_LIMIT 1024   plugin.feign.supported_content_types_prefix When COLLECT_REQUEST_BODY is enabled and content-type start with SUPPORTED_CONTENT_TYPES_PREFIX, collect the body of the request , multiple paths should be separated by , SW_PLUGIN_FEIGN_SUPPORTED_CONTENT_TYPES_PREFIX application/json,text/   plugin.influxdb.trace_influxql If true, trace all the influxql(query and write) in InfluxDB access, default is true. SW_PLUGIN_INFLUXDB_TRACE_INFLUXQL true   plugin.dubbo.collect_consumer_arguments Apache Dubbo consumer collect arguments in RPC call, use Object#toString to collect arguments. SW_PLUGIN_DUBBO_COLLECT_CONSUMER_ARGUMENTS false   plugin.dubbo.consumer_arguments_length_threshold When plugin.dubbo.collect_consumer_arguments is true, Arguments of length from the front will to the OAP backend SW_PLUGIN_DUBBO_CONSUMER_ARGUMENTS_LENGTH_THRESHOLD 256   plugin.dubbo.collect_provider_arguments Apache Dubbo provider collect arguments in RPC call, use Object#toString to collect arguments. SW_PLUGIN_DUBBO_COLLECT_PROVIDER_ARGUMENTS false   plugin.dubbo.provider_arguments_length_threshold When plugin.dubbo.collect_provider_arguments is true, Arguments of length from the front will to the OAP backend SW_PLUGIN_DUBBO_PROVIDER_ARGUMENTS_LENGTH_THRESHOLD 256   plugin.kafka.bootstrap_servers A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. SW_KAFKA_BOOTSTRAP_SERVERS localhost:9092   plugin.kafka.get_topic_timeout Timeout period of reading topics from the Kafka server, the unit is second. SW_GET_TOPIC_TIMEOUT 10   plugin.kafka.producer_config Kafka producer configuration. Read producer configure to get more details. Check Kafka report doc for more details and examples. SW_PLUGIN_KAFKA_PRODUCER_CONFIG    plugin.kafka.producer_config_json Configure Kafka Producer configuration in JSON format. Notice it will be overridden by plugin.kafka.producer_config[key], if the key duplication. SW_PLUGIN_KAFKA_PRODUCER_CONFIG_JSON    plugin.kafka.topic_meter Specify which Kafka topic name for Meter System data to report to. SW_PLUGIN_KAFKA_TOPIC_METER skywalking-meters   plugin.kafka.topic_metrics Specify which Kafka topic name for JVM metrics data to report to. SW_PLUGIN_KAFKA_TOPIC_METRICS skywalking-metrics   plugin.kafka.topic_segment Specify which Kafka topic name for traces data to report to. SW_PLUGIN_KAFKA_TOPIC_SEGMENT skywalking-segments   plugin.kafka.topic_profiling Specify which Kafka topic name for Thread Profiling snapshot to report to. SW_PLUGIN_KAFKA_TOPIC_PROFILINGS skywalking-profilings   plugin.kafka.topic_management Specify which Kafka topic name for the register or heartbeat data of Service Instance to report to. SW_PLUGIN_KAFKA_TOPIC_MANAGEMENT skywalking-managements   plugin.kafka.topic_logging Specify which Kafka topic name for the logging data to report to. SW_PLUGIN_KAFKA_TOPIC_LOGGING skywalking-logging   plugin.kafka.namespace isolate multi OAP server when using same Kafka cluster (final topic name will append namespace before Kafka topics with - ). SW_KAFKA_NAMESPACE `` |   plugin.kafka.decode_class Specify which class to decode encoded configuration of kafka.You can set encoded information in plugin.kafka.producer_config_json or plugin.kafka.producer_config if you need. SW_KAFKA_DECODE_CLASS `` |   plugin.springannotation.classname_match_regex Match spring beans with regular expression for the class name. Multiple expressions could be separated by a comma. This only works when Spring annotation plugin has been activated. SW_SPRINGANNOTATION_CLASSNAME_MATCH_REGEX All the spring beans tagged with @Bean,@Service,@Dao, or @Repository.   plugin.toolkit.log.transmit_formatted Whether or not to transmit logged data as formatted or un-formatted. SW_PLUGIN_TOOLKIT_LOG_TRANSMIT_FORMATTED true   plugin.lettuce.trace_redis_parameters If set to true, the parameters of Redis commands would be collected by Lettuce agent. SW_PLUGIN_LETTUCE_TRACE_REDIS_PARAMETERS false   plugin.lettuce.redis_parameter_max_length If set to positive number and plugin.lettuce.trace_redis_parameters is set to true, Redis command parameters would be collected and truncated to this length. SW_PLUGIN_LETTUCE_REDIS_PARAMETER_MAX_LENGTH 128   plugin.lettuce.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_LETTUCE_OPERATION_MAPPING_WRITE    plugin.lettuce.operation_mapping_read  Specify which command should be converted to read operation SW_PLUGIN_LETTUCE_OPERATION_MAPPING_READ Referenc Lettuce-5.x-plugin   plugin.jedis.trace_redis_parameters If set to true, the parameters of Redis commands would be collected by Jedis agent. SW_PLUGIN_JEDIS_TRACE_REDIS_PARAMETERS false   plugin.jedis.redis_parameter_max_length If set to positive number and plugin.jedis.trace_redis_parameters is set to true, Redis command parameters would be collected and truncated to this length. SW_PLUGIN_JEDIS_REDIS_PARAMETER_MAX_LENGTH 128   plugin.jedis.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_JEDIS_OPERATION_MAPPING_WRITE    plugin.jedis.operation_mapping_read  Specify which command should be converted to read operation SW_PLUGIN_JEDIS_OPERATION_MAPPING_READ Referenc Jedis-4.x-plugin jedis-2.x-3.x-plugin   plugin.redisson.trace_redis_parameters If set to true, the parameters of Redis commands would be collected by Redisson agent. SW_PLUGIN_REDISSON_TRACE_REDIS_PARAMETERS false   plugin.redisson.redis_parameter_max_length If set to positive number and plugin.redisson.trace_redis_parameters is set to true, Redis command parameters would be collected and truncated to this length. SW_PLUGIN_REDISSON_REDIS_PARAMETER_MAX_LENGTH 128   plugin.redisson.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_REDISSON_OPERATION_MAPPING_WRITE    plugin.redisson.operation_mapping_read  Specify which command should be converted to read operation SW_PLUGIN_REDISSON_OPERATION_MAPPING_READ Referenc Redisson-3.x-plugin   plugin.neo4j.trace_cypher_parameters If set to true, the parameters of the cypher would be collected. SW_PLUGIN_NEO4J_TRACE_CYPHER_PARAMETERS false   plugin.neo4j.cypher_parameters_max_length If set to positive number, the db.cypher.parameters would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_NEO4J_CYPHER_PARAMETERS_MAX_LENGTH 512   plugin.neo4j.cypher_body_max_length If set to positive number, the db.statement would be truncated to this length, otherwise it would be completely saved, which may cause performance problem. SW_PLUGIN_NEO4J_CYPHER_BODY_MAX_LENGTH 2048   plugin.cpupolicy.sample_cpu_usage_percent_limit If set to a positive number and activate trace sampler CPU policy plugin, the trace would not be collected when agent process CPU usage percent is greater than plugin.cpupolicy.sample_cpu_usage_percent_limit. SW_SAMPLE_CPU_USAGE_PERCENT_LIMIT -1   plugin.micronauthttpclient.collect_http_params This config item controls that whether the Micronaut http client plugin should collect the parameters of the request. Also, activate implicitly in the profiled trace. SW_PLUGIN_MICRONAUTHTTPCLIENT_COLLECT_HTTP_PARAMS false   plugin.micronauthttpserver.collect_http_params This config item controls that whether the Micronaut http server plugin should collect the parameters of the request. Also, activate implicitly in the profiled trace. SW_PLUGIN_MICRONAUTHTTPSERVER_COLLECT_HTTP_PARAMS false   plugin.memcached.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_MEMCACHED_OPERATION_MAPPING_WRITE get,gets,getAndTouch,getKeys,getKeysWithExpiryCheck,getKeysNoDuplicateCheck   plugin.memcached.operation_mapping_read Specify which command should be converted to read operation SW_PLUGIN_MEMCACHED_OPERATION_MAPPING_READ set,add,replace,append,prepend,cas,delete,touch,incr,decr   plugin.ehcache.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_EHCACHE_OPERATION_MAPPING_WRITE get,getAll,getQuiet,getKeys,getKeysWithExpiryCheck,getKeysNoDuplicateCheck,releaseRead,tryRead,getWithLoader,getAll,loadAll,getAllWithLoader   plugin.ehcache.operation_mapping_read Specify which command should be converted to read operation SW_PLUGIN_EHCACHE_OPERATION_MAPPING_READ tryRemoveImmediately,remove,removeAndReturnElement,removeAll,removeQuiet,removeWithWriter,put,putAll,replace,removeQuiet,removeWithWriter,removeElement,removeAll,putWithWriter,putQuiet,putIfAbsent,putIfAbsent   plugin.guavacache.operation_mapping_write Specify which command should be converted to write operation SW_PLUGIN_GUAVACACHE_OPERATION_MAPPING_WRITE getIfPresent,get,getAllPresent,size   plugin.guavacache.operation_mapping_read Specify which command should be converted to read operation SW_PLUGIN_GUAVACACHE_OPERATION_MAPPING_READ put,putAll,invalidate,invalidateAll,invalidateAll,cleanUp   plugin.nettyhttp.collect_request_body This config item controls that whether the Netty-http plugin should collect the http body of the request. SW_PLUGIN_NETTY_HTTP_COLLECT_REQUEST_BODY false   plugin.nettyhttp.filter_length_limit When COLLECT_REQUEST_BODY is enabled, how many characters to keep and send to the OAP backend, use negative values to keep and send the complete body. SW_PLUGIN_NETTY_HTTP_FILTER_LENGTH_LIMIT 1024   plugin.nettyhttp.supported_content_types_prefix When COLLECT_REQUEST_BODY is enabled and content-type start with HTTP_SUPPORTED_CONTENT_TYPES_PREFIX, collect the body of the request , multiple paths should be separated by , SW_PLUGIN_NETTY_HTTP_SUPPORTED_CONTENT_TYPES_PREFIX application/json,text/   plugin.rocketmqclient.collect_message_keys If set to true, the keys of messages would be collected by the plugin for RocketMQ Java client.     plugin.rocketmqclient.collect_message_tags If set to true, the tags of messages would be collected by the plugin for RocketMQ Java client.            Reset Collection/Map type configurations as empty collection.  Collection type config, e.g. using  plugin.kafka.topics= to override default plugin.kafka.topics=a,b,c,d Map type config, e.g. using plugin.kafka.producer_config[]= to override default plugin.kafka.producer_config[key]=value  Dynamic Configurations All configurations above are static, if you need to change some agent settings at runtime, please read CDS - Configuration Discovery Service document for more details.\n","title":"Table of Agent Configuration Properties","url":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/configurations/"},{"content":"Telegraf receiver The Telegraf receiver supports receiving InfluxDB Telegraf\u0026rsquo;s metrics by meter-system. The OAP can load the configuration at bootstrap. The files are located at $CLASSPATH/telegraf-rules. If the new configuration is not well-formed, the OAP may fail to start up.\nThis is the InfluxDB Telegraf Document, the Telegraf receiver can handle Telegraf\u0026rsquo;s CPU Input Plugin, Memory Input Plugin.\nThere are many other telegraf input plugins, users can customize different input plugins' rule files. The rule file should be in YAML format, defined by the scheme described in MAL. Please see the telegraf plugin directory for more input plugins information.\nNotice:\n  The Telegraf receiver module uses HTTP to receive telegraf\u0026rsquo;s metrics, so the outputs method should be set [[outputs.http]] in telegraf.conf file. Please see the http outputs for more details.\n  The Telegraf receiver module only process telegraf\u0026rsquo;s JSON metrics format, the data format should be set data_format = \u0026quot;json\u0026quot; in telegraf.conf file. Please see the JSON data format for more details.\n  The default json_timestamp_units is second in JSON output, and the Telegraf receiver module only process second timestamp unit. If users configure json_timestamp_units in telegraf.conf file, json_timestamp_units = \u0026quot;1s\u0026quot; is feasible. Please see the JSON data format for more details.\n  The following is the default telegraf receiver YAML rule file in the application.yml, Set SW_RECEIVER_TELEGRAF:default through system environment or change SW_RECEIVER_TELEGRAF_ACTIVE_FILES:vm to activate the OpenTelemetry receiver with vm.yml in telegraf-rules.\nreceiver-telegraf:selector:${SW_RECEIVER_TELEGRAF:default}default:activeFiles:${SW_RECEIVER_TELEGRAF_ACTIVE_FILES:vm}   Rule Name Description Configuration File Data Source     vm Metrics of VMs telegraf-rules/vm.yaml Telegraf inputs plugins \u0026ndash;\u0026gt; Telegraf Receiver \u0026ndash;\u0026gt; SkyWalking OAP Server    ","title":"Telegraf receiver","url":"/docs/main/latest/en/setup/backend/telegraf-receiver/"},{"content":"Telegraf receiver The Telegraf receiver supports receiving InfluxDB Telegraf\u0026rsquo;s metrics by meter-system. The OAP can load the configuration at bootstrap. The files are located at $CLASSPATH/telegraf-rules. If the new configuration is not well-formed, the OAP may fail to start up.\nThis is the InfluxDB Telegraf Document, the Telegraf receiver can handle Telegraf\u0026rsquo;s CPU Input Plugin, Memory Input Plugin.\nThere are many other telegraf input plugins, users can customize different input plugins' rule files. The rule file should be in YAML format, defined by the scheme described in MAL. Please see the telegraf plugin directory for more input plugins information.\nNotice:\n  The Telegraf receiver module uses HTTP to receive telegraf\u0026rsquo;s metrics, so the outputs method should be set [[outputs.http]] in telegraf.conf file. Please see the http outputs for more details.\n  The Telegraf receiver module only process telegraf\u0026rsquo;s JSON metrics format, the data format should be set data_format = \u0026quot;json\u0026quot; in telegraf.conf file. Please see the JSON data format for more details.\n  The default json_timestamp_units is second in JSON output, and the Telegraf receiver module only process second timestamp unit. If users configure json_timestamp_units in telegraf.conf file, json_timestamp_units = \u0026quot;1s\u0026quot; is feasible. Please see the JSON data format for more details.\n  The following is the default telegraf receiver YAML rule file in the application.yml, Set SW_RECEIVER_TELEGRAF:default through system environment or change SW_RECEIVER_TELEGRAF_ACTIVE_FILES:vm to activate the OpenTelemetry receiver with vm.yml in telegraf-rules.\nreceiver-telegraf:selector:${SW_RECEIVER_TELEGRAF:default}default:activeFiles:${SW_RECEIVER_TELEGRAF_ACTIVE_FILES:vm}   Rule Name Description Configuration File Data Source     vm Metrics of VMs telegraf-rules/vm.yaml Telegraf inputs plugins \u0026ndash;\u0026gt; Telegraf Receiver \u0026ndash;\u0026gt; SkyWalking OAP Server    ","title":"Telegraf receiver","url":"/docs/main/next/en/setup/backend/telegraf-receiver/"},{"content":"Telegraf receiver The Telegraf receiver supports receiving InfluxDB Telegraf\u0026rsquo;s metrics by meter-system. The OAP can load the configuration at bootstrap. The files are located at $CLASSPATH/telegraf-rules. If the new configuration is not well-formed, the OAP may fail to start up.\nThis is the InfluxDB Telegraf Document, the Telegraf receiver can handle Telegraf\u0026rsquo;s CPU Input Plugin, Memory Input Plugin.\nThere are many other telegraf input plugins, users can customize different input plugins' rule files. The rule file should be in YAML format, defined by the scheme described in MAL. Please see the telegraf plugin directory for more input plugins information.\nNotice:\n  The Telegraf receiver module uses HTTP to receive telegraf\u0026rsquo;s metrics, so the outputs method should be set [[outputs.http]] in telegraf.conf file. Please see the http outputs for more details.\n  The Telegraf receiver module only process telegraf\u0026rsquo;s JSON metrics format, the data format should be set data_format = \u0026quot;json\u0026quot; in telegraf.conf file. Please see the JSON data format for more details.\n  The default json_timestamp_units is second in JSON output, and the Telegraf receiver module only process second timestamp unit. If users configure json_timestamp_units in telegraf.conf file, json_timestamp_units = \u0026quot;1s\u0026quot; is feasible. Please see the JSON data format for more details.\n  The following is the default telegraf receiver YAML rule file in the application.yml, Set SW_RECEIVER_TELEGRAF:default through system environment or change SW_RECEIVER_TELEGRAF_ACTIVE_FILES:vm to activate the OpenTelemetry receiver with vm.yml in telegraf-rules.\nreceiver-telegraf:selector:${SW_RECEIVER_TELEGRAF:default}default:activeFiles:${SW_RECEIVER_TELEGRAF_ACTIVE_FILES:vm}   Rule Name Description Configuration File Data Source     vm Metrics of VMs telegraf-rules/vm.yaml Telegraf inputs plugins \u0026ndash;\u0026gt; Telegraf Receiver \u0026ndash;\u0026gt; SkyWalking OAP Server    ","title":"Telegraf receiver","url":"/docs/main/v9.3.0/en/setup/backend/telegraf-receiver/"},{"content":"Telegraf receiver The Telegraf receiver supports receiving InfluxDB Telegraf\u0026rsquo;s metrics by meter-system. The OAP can load the configuration at bootstrap. The files are located at $CLASSPATH/telegraf-rules. If the new configuration is not well-formed, the OAP may fail to start up.\nThis is the InfluxDB Telegraf Document, the Telegraf receiver can handle Telegraf\u0026rsquo;s CPU Input Plugin, Memory Input Plugin.\nThere are many other telegraf input plugins, users can customize different input plugins' rule files. The rule file should be in YAML format, defined by the scheme described in MAL. Please see the telegraf plugin directory for more input plugins information.\nNotice:\n  The Telegraf receiver module uses HTTP to receive telegraf\u0026rsquo;s metrics, so the outputs method should be set [[outputs.http]] in telegraf.conf file. Please see the http outputs for more details.\n  The Telegraf receiver module only process telegraf\u0026rsquo;s JSON metrics format, the data format should be set data_format = \u0026quot;json\u0026quot; in telegraf.conf file. Please see the JSON data format for more details.\n  The default json_timestamp_units is second in JSON output, and the Telegraf receiver module only process second timestamp unit. If users configure json_timestamp_units in telegraf.conf file, json_timestamp_units = \u0026quot;1s\u0026quot; is feasible. Please see the JSON data format for more details.\n  The following is the default telegraf receiver YAML rule file in the application.yml, Set SW_RECEIVER_TELEGRAF:default through system environment or change SW_RECEIVER_TELEGRAF_ACTIVE_FILES:vm to activate the OpenTelemetry receiver with vm.yml in telegraf-rules.\nreceiver-telegraf:selector:${SW_RECEIVER_TELEGRAF:default}default:activeFiles:${SW_RECEIVER_TELEGRAF_ACTIVE_FILES:vm}   Rule Name Description Configuration File Data Source     vm Metrics of VMs telegraf-rules/vm.yaml Telegraf inputs plugins \u0026ndash;\u0026gt; Telegraf Receiver \u0026ndash;\u0026gt; SkyWalking OAP Server    ","title":"Telegraf receiver","url":"/docs/main/v9.4.0/en/setup/backend/telegraf-receiver/"},{"content":"Telegraf receiver The Telegraf receiver supports receiving InfluxDB Telegraf\u0026rsquo;s metrics by meter-system. The OAP can load the configuration at bootstrap. The files are located at $CLASSPATH/telegraf-rules. If the new configuration is not well-formed, the OAP may fail to start up.\nThis is the InfluxDB Telegraf Document, the Telegraf receiver can handle Telegraf\u0026rsquo;s CPU Input Plugin, Memory Input Plugin.\nThere are many other telegraf input plugins, users can customize different input plugins' rule files. The rule file should be in YAML format, defined by the scheme described in MAL. Please see the telegraf plugin directory for more input plugins information.\nNotice:\n  The Telegraf receiver module uses HTTP to receive telegraf\u0026rsquo;s metrics, so the outputs method should be set [[outputs.http]] in telegraf.conf file. Please see the http outputs for more details.\n  The Telegraf receiver module only process telegraf\u0026rsquo;s JSON metrics format, the data format should be set data_format = \u0026quot;json\u0026quot; in telegraf.conf file. Please see the JSON data format for more details.\n  The default json_timestamp_units is second in JSON output, and the Telegraf receiver module only process second timestamp unit. If users configure json_timestamp_units in telegraf.conf file, json_timestamp_units = \u0026quot;1s\u0026quot; is feasible. Please see the JSON data format for more details.\n  The following is the default telegraf receiver YAML rule file in the application.yml, Set SW_RECEIVER_TELEGRAF:default through system environment or change SW_RECEIVER_TELEGRAF_ACTIVE_FILES:vm to activate the OpenTelemetry receiver with vm.yml in telegraf-rules.\nreceiver-telegraf:selector:${SW_RECEIVER_TELEGRAF:default}default:activeFiles:${SW_RECEIVER_TELEGRAF_ACTIVE_FILES:vm}   Rule Name Description Configuration File Data Source     vm Metrics of VMs telegraf-rules/vm.yaml Telegraf inputs plugins \u0026ndash;\u0026gt; Telegraf Receiver \u0026ndash;\u0026gt; SkyWalking OAP Server    ","title":"Telegraf receiver","url":"/docs/main/v9.5.0/en/setup/backend/telegraf-receiver/"},{"content":"Telegraf receiver The Telegraf receiver supports receiving InfluxDB Telegraf\u0026rsquo;s metrics by meter-system. The OAP can load the configuration at bootstrap. The files are located at $CLASSPATH/telegraf-rules. If the new configuration is not well-formed, the OAP may fail to start up.\nThis is the InfluxDB Telegraf Document, the Telegraf receiver can handle Telegraf\u0026rsquo;s CPU Input Plugin, Memory Input Plugin.\nThere are many other telegraf input plugins, users can customize different input plugins' rule files. The rule file should be in YAML format, defined by the scheme described in MAL. Please see the telegraf plugin directory for more input plugins information.\nNotice:\n  The Telegraf receiver module uses HTTP to receive telegraf\u0026rsquo;s metrics, so the outputs method should be set [[outputs.http]] in telegraf.conf file. Please see the http outputs for more details.\n  The Telegraf receiver module only process telegraf\u0026rsquo;s JSON metrics format, the data format should be set data_format = \u0026quot;json\u0026quot; in telegraf.conf file. Please see the JSON data format for more details.\n  The default json_timestamp_units is second in JSON output, and the Telegraf receiver module only process second timestamp unit. If users configure json_timestamp_units in telegraf.conf file, json_timestamp_units = \u0026quot;1s\u0026quot; is feasible. Please see the JSON data format for more details.\n  The following is the default telegraf receiver YAML rule file in the application.yml, Set SW_RECEIVER_TELEGRAF:default through system environment or change SW_RECEIVER_TELEGRAF_ACTIVE_FILES:vm to activate the OpenTelemetry receiver with vm.yml in telegraf-rules.\nreceiver-telegraf:selector:${SW_RECEIVER_TELEGRAF:default}default:activeFiles:${SW_RECEIVER_TELEGRAF_ACTIVE_FILES:vm}   Rule Name Description Configuration File Data Source     vm Metrics of VMs telegraf-rules/vm.yaml Telegraf inputs plugins \u0026ndash;\u0026gt; Telegraf Receiver \u0026ndash;\u0026gt; SkyWalking OAP Server    ","title":"Telegraf receiver","url":"/docs/main/v9.6.0/en/setup/backend/telegraf-receiver/"},{"content":"Telegraf receiver The Telegraf receiver supports receiving InfluxDB Telegraf\u0026rsquo;s metrics by meter-system. The OAP can load the configuration at bootstrap. The files are located at $CLASSPATH/telegraf-rules. If the new configuration is not well-formed, the OAP may fail to start up.\nThis is the InfluxDB Telegraf Document, the Telegraf receiver can handle Telegraf\u0026rsquo;s CPU Input Plugin, Memory Input Plugin.\nThere are many other telegraf input plugins, users can customize different input plugins' rule files. The rule file should be in YAML format, defined by the scheme described in MAL. Please see the telegraf plugin directory for more input plugins information.\nNotice:\n  The Telegraf receiver module uses HTTP to receive telegraf\u0026rsquo;s metrics, so the outputs method should be set [[outputs.http]] in telegraf.conf file. Please see the http outputs for more details.\n  The Telegraf receiver module only process telegraf\u0026rsquo;s JSON metrics format, the data format should be set data_format = \u0026quot;json\u0026quot; in telegraf.conf file. Please see the JSON data format for more details.\n  The default json_timestamp_units is second in JSON output, and the Telegraf receiver module only process second timestamp unit. If users configure json_timestamp_units in telegraf.conf file, json_timestamp_units = \u0026quot;1s\u0026quot; is feasible. Please see the JSON data format for more details.\n  The following is the default telegraf receiver YAML rule file in the application.yml, Set SW_RECEIVER_TELEGRAF:default through system environment or change SW_RECEIVER_TELEGRAF_ACTIVE_FILES:vm to activate the OpenTelemetry receiver with vm.yml in telegraf-rules.\nreceiver-telegraf:selector:${SW_RECEIVER_TELEGRAF:default}default:activeFiles:${SW_RECEIVER_TELEGRAF_ACTIVE_FILES:vm}   Rule Name Description Configuration File Data Source     vm Metrics of VMs telegraf-rules/vm.yaml Telegraf inputs plugins \u0026ndash;\u0026gt; Telegraf Receiver \u0026ndash;\u0026gt; SkyWalking OAP Server    ","title":"Telegraf receiver","url":"/docs/main/v9.7.0/en/setup/backend/telegraf-receiver/"},{"content":"Telemetry Exporter Satellite supports three ways to export its own telemetry data, prometheus, metrics-service or pprof.\nMultiple export methods are supported simultaneously, separated by commas.\nPrometheus Start HTTP port to export the satellite telemetry metrics.\nWhen the following configuration is completed, then the satellite telemetry metrics export to: http://localhost${SATELLITE_TELEMETRY_PROMETHEUS_ADDRESS}${SATELLITE_TELEMETRY_PROMETHEUS_ENDPOINT}, and all the metrics contain the cluster, service and instance tag.\n# The Satellite self telemetry configuration. telemetry: # The space concept for the deployment, such as the namespace concept in the Kubernetes. cluster: ${SATELLITE_TELEMETRY_CLUSTER:satellite-cluster} # The group concept for the deployment, such as the service resource concept in the Kubernetes. service: ${SATELLITE_TELEMETRY_SERVICE:satellite-service} # The minimum running unit, such as the pod concept in the Kubernetes. instance: ${SATELLITE_TELEMETRY_SERVICE:satellite-instance} # Telemetry export type, support \u0026#34;prometheus\u0026#34;, \u0026#34;metrics_service\u0026#34;, \u0026#34;pprof\u0026#34; or \u0026#34;none\u0026#34; export_type: ${SATELLITE_TELEMETRY_EXPORT_TYPE:prometheus} # Export telemetry data through Prometheus server, only works on \u0026#34;export_type=prometheus\u0026#34;. prometheus: # The prometheus server address. address: ${SATELLITE_TELEMETRY_PROMETHEUS_ADDRESS::1234} # The prometheus server metrics endpoint. endpoint: ${SATELLITE_TELEMETRY_PROMETHEUS_ENDPOINT:/metrics} Metrics Service Send the message to the gRPC service that supports SkyWalking\u0026rsquo;s native Meter protocol with interval.\nWhen the following configuration is completed, send the message to the specified grpc-client component at the specified time interval. Among them, service and instance will correspond to the services and service instances in SkyWalking.\n# The Satellite self telemetry configuration. telemetry: # The space concept for the deployment, such as the namespace concept in the Kubernetes. cluster: ${SATELLITE_TELEMETRY_CLUSTER:satellite-cluster} # The group concept for the deployment, such as the service resource concept in the Kubernetes. service: ${SATELLITE_TELEMETRY_SERVICE:satellite-service} # The minimum running unit, such as the pod concept in the Kubernetes. instance: ${SATELLITE_TELEMETRY_SERVICE:satellite-instance} # Telemetry export type, support \u0026#34;prometheus\u0026#34;, \u0026#34;metrics_service\u0026#34;, \u0026#34;pprof\u0026#34; or \u0026#34;none\u0026#34; export_type: ${SATELLITE_TELEMETRY_EXPORT_TYPE:metrics_service} # Export telemetry data through native meter format to OAP backend, only works on \u0026#34;export_type=metrics_service\u0026#34;. metrics_service: # The grpc-client plugin name, using the SkyWalking native batch meter protocol client_name: ${SATELLITE_TELEMETRY_METRICS_SERVICE_CLIENT_NAME:grpc-client} # The interval second for sending metrics interval: ${SATELLITE_TELEMETRY_METRICS_SERVICE_INTERVAL:10} # The prefix of telemetry metric name metric_prefix: ${SATELLITE_TELEMETRY_METRICS_SERVICE_METRIC_PREFIX:sw_stl_} pprof pprof can provide HTTP services to allow remote viewing of service execution status, helping you discover performance issues.\n# The Satellite self telemetry configuration. telemetry: # Telemetry export type, support \u0026#34;prometheus\u0026#34;, \u0026#34;metrics_service\u0026#34;, \u0026#34;pprof\u0026#34; or \u0026#34;none\u0026#34; export_type: ${SATELLITE_TELEMETRY_EXPORT_TYPE:pprof} # Export pprof service for detect performance issue pprof: # The pprof server address. address: ${SATELLITE_TELEMETRY_PPROF_ADDRESS::6060} ","title":"Telemetry Exporter","url":"/docs/skywalking-satellite/latest/en/setup/examples/feature/telemetry-exporter/readme/"},{"content":"Telemetry Exporter Satellite supports three ways to export its own telemetry data, prometheus, metrics-service or pprof.\nMultiple export methods are supported simultaneously, separated by commas.\nPrometheus Start HTTP port to export the satellite telemetry metrics.\nWhen the following configuration is completed, then the satellite telemetry metrics export to: http://localhost${SATELLITE_TELEMETRY_PROMETHEUS_ADDRESS}${SATELLITE_TELEMETRY_PROMETHEUS_ENDPOINT}, and all the metrics contain the cluster, service and instance tag.\n# The Satellite self telemetry configuration. telemetry: # The space concept for the deployment, such as the namespace concept in the Kubernetes. cluster: ${SATELLITE_TELEMETRY_CLUSTER:satellite-cluster} # The group concept for the deployment, such as the service resource concept in the Kubernetes. service: ${SATELLITE_TELEMETRY_SERVICE:satellite-service} # The minimum running unit, such as the pod concept in the Kubernetes. instance: ${SATELLITE_TELEMETRY_SERVICE:satellite-instance} # Telemetry export type, support \u0026#34;prometheus\u0026#34;, \u0026#34;metrics_service\u0026#34;, \u0026#34;pprof\u0026#34; or \u0026#34;none\u0026#34; export_type: ${SATELLITE_TELEMETRY_EXPORT_TYPE:prometheus} # Export telemetry data through Prometheus server, only works on \u0026#34;export_type=prometheus\u0026#34;. prometheus: # The prometheus server address. address: ${SATELLITE_TELEMETRY_PROMETHEUS_ADDRESS::1234} # The prometheus server metrics endpoint. endpoint: ${SATELLITE_TELEMETRY_PROMETHEUS_ENDPOINT:/metrics} Metrics Service Send the message to the gRPC service that supports SkyWalking\u0026rsquo;s native Meter protocol with interval.\nWhen the following configuration is completed, send the message to the specified grpc-client component at the specified time interval. Among them, service and instance will correspond to the services and service instances in SkyWalking.\n# The Satellite self telemetry configuration. telemetry: # The space concept for the deployment, such as the namespace concept in the Kubernetes. cluster: ${SATELLITE_TELEMETRY_CLUSTER:satellite-cluster} # The group concept for the deployment, such as the service resource concept in the Kubernetes. service: ${SATELLITE_TELEMETRY_SERVICE:satellite-service} # The minimum running unit, such as the pod concept in the Kubernetes. instance: ${SATELLITE_TELEMETRY_SERVICE:satellite-instance} # Telemetry export type, support \u0026#34;prometheus\u0026#34;, \u0026#34;metrics_service\u0026#34;, \u0026#34;pprof\u0026#34; or \u0026#34;none\u0026#34; export_type: ${SATELLITE_TELEMETRY_EXPORT_TYPE:metrics_service} # Export telemetry data through native meter format to OAP backend, only works on \u0026#34;export_type=metrics_service\u0026#34;. metrics_service: # The grpc-client plugin name, using the SkyWalking native batch meter protocol client_name: ${SATELLITE_TELEMETRY_METRICS_SERVICE_CLIENT_NAME:grpc-client} # The interval second for sending metrics interval: ${SATELLITE_TELEMETRY_METRICS_SERVICE_INTERVAL:10} # The prefix of telemetry metric name metric_prefix: ${SATELLITE_TELEMETRY_METRICS_SERVICE_METRIC_PREFIX:sw_stl_} pprof pprof can provide HTTP services to allow remote viewing of service execution status, helping you discover performance issues.\n# The Satellite self telemetry configuration. telemetry: # Telemetry export type, support \u0026#34;prometheus\u0026#34;, \u0026#34;metrics_service\u0026#34;, \u0026#34;pprof\u0026#34; or \u0026#34;none\u0026#34; export_type: ${SATELLITE_TELEMETRY_EXPORT_TYPE:pprof} # Export pprof service for detect performance issue pprof: # The pprof server address. address: ${SATELLITE_TELEMETRY_PPROF_ADDRESS::6060} ","title":"Telemetry Exporter","url":"/docs/skywalking-satellite/next/en/setup/examples/feature/telemetry-exporter/readme/"},{"content":"Telemetry Exporter Satellite supports three ways to export its own telemetry data, prometheus, metrics-service or pprof.\nMultiple export methods are supported simultaneously, separated by commas.\nPrometheus Start HTTP port to export the satellite telemetry metrics.\nWhen the following configuration is completed, then the satellite telemetry metrics export to: http://localhost${SATELLITE_TELEMETRY_PROMETHEUS_ADDRESS}${SATELLITE_TELEMETRY_PROMETHEUS_ENDPOINT}, and all the metrics contain the cluster, service and instance tag.\n# The Satellite self telemetry configuration. telemetry: # The space concept for the deployment, such as the namespace concept in the Kubernetes. cluster: ${SATELLITE_TELEMETRY_CLUSTER:satellite-cluster} # The group concept for the deployment, such as the service resource concept in the Kubernetes. service: ${SATELLITE_TELEMETRY_SERVICE:satellite-service} # The minimum running unit, such as the pod concept in the Kubernetes. instance: ${SATELLITE_TELEMETRY_SERVICE:satellite-instance} # Telemetry export type, support \u0026#34;prometheus\u0026#34;, \u0026#34;metrics_service\u0026#34;, \u0026#34;pprof\u0026#34; or \u0026#34;none\u0026#34; export_type: ${SATELLITE_TELEMETRY_EXPORT_TYPE:prometheus} # Export telemetry data through Prometheus server, only works on \u0026#34;export_type=prometheus\u0026#34;. prometheus: # The prometheus server address. address: ${SATELLITE_TELEMETRY_PROMETHEUS_ADDRESS::1234} # The prometheus server metrics endpoint. endpoint: ${SATELLITE_TELEMETRY_PROMETHEUS_ENDPOINT:/metrics} Metrics Service Send the message to the gRPC service that supports SkyWalking\u0026rsquo;s native Meter protocol with interval.\nWhen the following configuration is completed, send the message to the specified grpc-client component at the specified time interval. Among them, service and instance will correspond to the services and service instances in SkyWalking.\n# The Satellite self telemetry configuration. telemetry: # The space concept for the deployment, such as the namespace concept in the Kubernetes. cluster: ${SATELLITE_TELEMETRY_CLUSTER:satellite-cluster} # The group concept for the deployment, such as the service resource concept in the Kubernetes. service: ${SATELLITE_TELEMETRY_SERVICE:satellite-service} # The minimum running unit, such as the pod concept in the Kubernetes. instance: ${SATELLITE_TELEMETRY_SERVICE:satellite-instance} # Telemetry export type, support \u0026#34;prometheus\u0026#34;, \u0026#34;metrics_service\u0026#34;, \u0026#34;pprof\u0026#34; or \u0026#34;none\u0026#34; export_type: ${SATELLITE_TELEMETRY_EXPORT_TYPE:metrics_service} # Export telemetry data through native meter format to OAP backend, only works on \u0026#34;export_type=metrics_service\u0026#34;. metrics_service: # The grpc-client plugin name, using the SkyWalking native batch meter protocol client_name: ${SATELLITE_TELEMETRY_METRICS_SERVICE_CLIENT_NAME:grpc-client} # The interval second for sending metrics interval: ${SATELLITE_TELEMETRY_METRICS_SERVICE_INTERVAL:10} # The prefix of telemetry metric name metric_prefix: ${SATELLITE_TELEMETRY_METRICS_SERVICE_METRIC_PREFIX:sw_stl_} pprof pprof can provide HTTP services to allow remote viewing of service execution status, helping you discover performance issues.\n# The Satellite self telemetry configuration. telemetry: # Telemetry export type, support \u0026#34;prometheus\u0026#34;, \u0026#34;metrics_service\u0026#34;, \u0026#34;pprof\u0026#34; or \u0026#34;none\u0026#34; export_type: ${SATELLITE_TELEMETRY_EXPORT_TYPE:pprof} # Export pprof service for detect performance issue pprof: # The pprof server address. address: ${SATELLITE_TELEMETRY_PPROF_ADDRESS::6060} ","title":"Telemetry Exporter","url":"/docs/skywalking-satellite/v1.2.0/en/setup/examples/feature/telemetry-exporter/readme/"},{"content":"Telemetry for backend The OAP backend cluster itself is a distributed streaming process system. To assist the Ops team, we provide the telemetry for the OAP backend itself, also known as self-observability (so11y)\nBy default, the telemetry is disabled by setting selector to none, like this:\ntelemetry:selector:${SW_TELEMETRY:none}none:prometheus:host:${SW_TELEMETRY_PROMETHEUS_HOST:0.0.0.0}port:${SW_TELEMETRY_PROMETHEUS_PORT:1234}sslEnabled:${SW_TELEMETRY_PROMETHEUS_SSL_ENABLED:false}sslKeyPath:${SW_TELEMETRY_PROMETHEUS_SSL_KEY_PATH:\u0026#34;\u0026#34;}sslCertChainPath:${SW_TELEMETRY_PROMETHEUS_SSL_CERT_CHAIN_PATH:\u0026#34;\u0026#34;}You may also set Prometheus to enable them. For more information, refer to the details below.\nSelf Observability SkyWalking supports exposing telemetry data representing OAP running status through Prometheus endpoint. Users could set up OpenTelemetry collector to scrap and forward telemetry data to OAP server for further analysis, eventually showing up UI or GraphQL API.\nStatic IP or hostname Add the following configuration to enable self-observability-related modules.\n Set up prometheus telemetry.  telemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543Set up OpenTelemetry to scrape the metrics from OAP telemetry.  Refer to the E2E test case as an example.\nFor Kubernetes deployments, read the following section, otherwise you should be able to adjust the configurations below to fit your scenarios.\nService discovery on Kubernetes If you deploy an OAP server cluster on Kubernetes, the oap-server instance (pod) would not have a static IP or hostname. We can leverage OpenTelemetry Collector to discover the oap-server instance, and scrape \u0026amp; transfer the metrics to OAP OpenTelemetry receiver.\nOn how to install SkyWalking on k8s, you can refer to Apache SkyWalking Kubernetes.\nSet this up following these steps:\n Set up oap-server.    Set the metrics port.\nprometheus-port: 1234   Set environment variables.\nSW_TELEMETRY=prometheus SW_OTEL_RECEIVER=default SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES=oap Here is an example to install by Apache SkyWalking Kubernetes:\nhelm -n istio-system install skywalking skywalking \\ --set elasticsearch.replicas=1 \\ --set elasticsearch.minimumMasterNodes=1 \\ --set elasticsearch.imageTag=7.5.1 \\ --set oap.replicas=2 \\ --set ui.image.repository=$HUB/skywalking-ui \\ --set ui.image.tag=$TAG \\ --set oap.image.tag=$TAG \\ --set oap.image.repository=$HUB/skywalking-oap \\ --set oap.storageType=elasticsearch \\ --set oap.ports.prometheus-port=1234 \\ # \u0026lt;\u0026lt;\u0026lt; Expose self observability metrics port --set oap.env.SW_TELEMETRY=prometheus \\ --set oap.env.SW_OTEL_RECEIVER=default \\ # \u0026lt;\u0026lt;\u0026lt; Enable Otel receiver --set oap.env.SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES=oap # \u0026lt;\u0026lt;\u0026lt; Add oap analyzer for Otel metrics   Set up OpenTelemetry Collector and config a scrape job:  - job_name:\u0026#39;skywalking-so11y\u0026#39;# make sure to use this in the so11y.yaml to filter only so11y metricsmetrics_path:\u0026#39;/metrics\u0026#39;kubernetes_sd_configs:- role:podrelabel_configs:- source_labels:[__meta_kubernetes_pod_container_name, __meta_kubernetes_pod_container_port_name]action:keepregex:oap;prometheus-port- source_labels:[]target_label:servicereplacement:oap-server- source_labels:[__meta_kubernetes_pod_name]target_label:host_nameregex:(.+)replacement:$$1For the full example for OpenTelemetry Collector configuration and recommended version, you can refer to showcase.\n Users also could leverage the Prometheus endpoint for their own Prometheus and Grafana.\nNOTE: Since Apr 21, 2021, the Grafana project has been relicensed to AGPL-v3, and is no longer licensed for Apache 2.0. Check the LICENSE details. The following Prometheus + Grafana solution is optional rather than recommended.\nPrometheus Prometheus is supported as a telemetry implementor, which collects metrics from SkyWalking\u0026rsquo;s backend.\nSet prometheus to provider. The endpoint opens at http://0.0.0.0:1234/ and http://0.0.0.0:1234/metrics.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:Set host and port if needed.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543Set relevant SSL settings to expose a secure endpoint. Note that the private key file and cert chain file could be uploaded once changes are applied to them.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543sslEnabled:truesslKeyPath:/etc/ssl/key.pemsslCertChainPath:/etc/ssl/cert-chain.pemGrafana Visualization Provide the Grafana dashboard settings. Check SkyWalking OAP Cluster Monitor Dashboard config and SkyWalking OAP Instance Monitor Dashboard config.\n","title":"Telemetry for backend","url":"/docs/main/latest/en/setup/backend/backend-telemetry/"},{"content":"Telemetry for backend The OAP backend cluster itself is a distributed streaming process system. To assist the Ops team, we provide the telemetry for the OAP backend itself, also known as self-observability (so11y)\nBy default, the telemetry is disabled by setting selector to none, like this:\ntelemetry:selector:${SW_TELEMETRY:none}none:prometheus:host:${SW_TELEMETRY_PROMETHEUS_HOST:0.0.0.0}port:${SW_TELEMETRY_PROMETHEUS_PORT:1234}sslEnabled:${SW_TELEMETRY_PROMETHEUS_SSL_ENABLED:false}sslKeyPath:${SW_TELEMETRY_PROMETHEUS_SSL_KEY_PATH:\u0026#34;\u0026#34;}sslCertChainPath:${SW_TELEMETRY_PROMETHEUS_SSL_CERT_CHAIN_PATH:\u0026#34;\u0026#34;}You may also set Prometheus to enable them. For more information, refer to the details below.\nSelf Observability SkyWalking supports exposing telemetry data representing OAP running status through Prometheus endpoint. Users could set up OpenTelemetry collector to scrap and forward telemetry data to OAP server for further analysis, eventually showing up UI or GraphQL API.\nStatic IP or hostname Add the following configuration to enable self-observability-related modules.\n Set up prometheus telemetry.  telemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543Set up OpenTelemetry to scrape the metrics from OAP telemetry.  Refer to the E2E test case as an example.\nFor Kubernetes deployments, read the following section, otherwise you should be able to adjust the configurations below to fit your scenarios.\nService discovery on Kubernetes If you deploy an OAP server cluster on Kubernetes, the oap-server instance (pod) would not have a static IP or hostname. We can leverage OpenTelemetry Collector to discover the oap-server instance, and scrape \u0026amp; transfer the metrics to OAP OpenTelemetry receiver.\nOn how to install SkyWalking on k8s, you can refer to Apache SkyWalking Kubernetes.\nSet this up following these steps:\n Set up oap-server.    Set the metrics port.\nprometheus-port: 1234   Set environment variables.\nSW_TELEMETRY=prometheus SW_OTEL_RECEIVER=default SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES=oap Here is an example to install by Apache SkyWalking Kubernetes:\nhelm -n istio-system install skywalking skywalking \\ --set elasticsearch.replicas=1 \\ --set elasticsearch.minimumMasterNodes=1 \\ --set elasticsearch.imageTag=7.5.1 \\ --set oap.replicas=2 \\ --set ui.image.repository=$HUB/skywalking-ui \\ --set ui.image.tag=$TAG \\ --set oap.image.tag=$TAG \\ --set oap.image.repository=$HUB/skywalking-oap \\ --set oap.storageType=elasticsearch \\ --set oap.ports.prometheus-port=1234 \\ # \u0026lt;\u0026lt;\u0026lt; Expose self observability metrics port --set oap.env.SW_TELEMETRY=prometheus \\ --set oap.env.SW_OTEL_RECEIVER=default \\ # \u0026lt;\u0026lt;\u0026lt; Enable Otel receiver --set oap.env.SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES=oap # \u0026lt;\u0026lt;\u0026lt; Add oap analyzer for Otel metrics   Set up OpenTelemetry Collector and config a scrape job:  - job_name:\u0026#39;skywalking-so11y\u0026#39;# make sure to use this in the so11y.yaml to filter only so11y metricsmetrics_path:\u0026#39;/metrics\u0026#39;kubernetes_sd_configs:- role:podrelabel_configs:- source_labels:[__meta_kubernetes_pod_container_name, __meta_kubernetes_pod_container_port_name]action:keepregex:oap;prometheus-port- source_labels:[]target_label:servicereplacement:oap-server- source_labels:[__meta_kubernetes_pod_name]target_label:host_nameregex:(.+)replacement:$$1For the full example for OpenTelemetry Collector configuration and recommended version, you can refer to showcase.\n Users also could leverage the Prometheus endpoint for their own Prometheus and Grafana.\nNOTE: Since Apr 21, 2021, the Grafana project has been relicensed to AGPL-v3, and is no longer licensed for Apache 2.0. Check the LICENSE details. The following Prometheus + Grafana solution is optional rather than recommended.\nPrometheus Prometheus is supported as a telemetry implementor, which collects metrics from SkyWalking\u0026rsquo;s backend.\nSet prometheus to provider. The endpoint opens at http://0.0.0.0:1234/ and http://0.0.0.0:1234/metrics.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:Set host and port if needed.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543Set relevant SSL settings to expose a secure endpoint. Note that the private key file and cert chain file could be uploaded once changes are applied to them.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543sslEnabled:truesslKeyPath:/etc/ssl/key.pemsslCertChainPath:/etc/ssl/cert-chain.pemGrafana Visualization Provide the Grafana dashboard settings. Check SkyWalking OAP Cluster Monitor Dashboard config and SkyWalking OAP Instance Monitor Dashboard config.\n","title":"Telemetry for backend","url":"/docs/main/next/en/setup/backend/backend-telemetry/"},{"content":"Telemetry for backend The OAP backend cluster itself is a distributed streaming process system. To assist the Ops team, we provide the telemetry for the OAP backend itself.\nBy default, the telemetry is disabled by setting selector to none, like this:\ntelemetry:selector:${SW_TELEMETRY:none}none:prometheus:host:${SW_TELEMETRY_PROMETHEUS_HOST:0.0.0.0}port:${SW_TELEMETRY_PROMETHEUS_PORT:1234}sslEnabled:${SW_TELEMETRY_PROMETHEUS_SSL_ENABLED:false}sslKeyPath:${SW_TELEMETRY_PROMETHEUS_SSL_KEY_PATH:\u0026#34;\u0026#34;}sslCertChainPath:${SW_TELEMETRY_PROMETHEUS_SSL_CERT_CHAIN_PATH:\u0026#34;\u0026#34;}You may also set Prometheus to enable them. For more information, refer to the details below.\nSelf Observability Static IP or hostname SkyWalking supports collecting telemetry data into OAP backend directly. Users could check them out through UI or GraphQL API.\nAdd the following configuration to enable self-observability related modules.\n Set up prometheus telemetry.  telemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543Set up prometheus fetcher.  prometheus-fetcher:selector:${SW_PROMETHEUS_FETCHER:default}default:enabledRules:${SW_PROMETHEUS_FETCHER_ENABLED_RULES:\u0026#34;self\u0026#34;}Make sure config/fetcher-prom-rules/self.yaml exists.  Once you deploy an oap-server cluster, the target host should be replaced with a dedicated IP or hostname. For instances, there are three OAP servers in your cluster. Their host is service1, service2, and service3 respectively. You should update each self.yaml to switch the target host.\nservice1:\nfetcherInterval:PT15SfetcherTimeout:PT10SmetricsPath:/metricsstaticConfig:# targets will be labeled as \u0026#34;instance\u0026#34;targets:- service1:1234labels:service:oap-server...service2:\nfetcherInterval:PT15SfetcherTimeout:PT10SmetricsPath:/metricsstaticConfig:# targets will be labeled as \u0026#34;instance\u0026#34;targets:- service2:1234labels:service:oap-server...service3:\nfetcherInterval:PT15SfetcherTimeout:PT10SmetricsPath:/metricsstaticConfig:# targets will be labeled as \u0026#34;instance\u0026#34;targets:- service3:1234labels:service:oap-server...Service discovery (k8s) If you deploy an oap-server cluster on k8s, the oap-server instance (pod) would not have a static IP or hostname. We can leverage OpenTelemetry Collector to discover the oap-server instance, and scrape \u0026amp; transfer the metrics to OAP OpenTelemetry receiver.\nOn how to install SkyWalking on k8s, you can refer to Apache SkyWalking Kubernetes.\nSet this up following these steps:\n Set up oap-server.    Set the metrics port.\nprometheus-port: 1234   Set environment variables.\nSW_TELEMETRY=prometheus SW_OTEL_RECEIVER=default SW_OTEL_RECEIVER_ENABLED_OC_RULES=oap Here is an example to install by Apache SkyWalking Kubernetes:\nhelm -n istio-system install skywalking skywalking \\ --set elasticsearch.replicas=1 \\ --set elasticsearch.minimumMasterNodes=1 \\ --set elasticsearch.imageTag=7.5.1 \\ --set oap.replicas=2 \\ --set ui.image.repository=$HUB/skywalking-ui \\ --set ui.image.tag=$TAG \\ --set oap.image.tag=$TAG \\ --set oap.image.repository=$HUB/skywalking-oap \\ --set oap.storageType=elasticsearch \\ --set oap.ports.prometheus-port=1234 \\ # \u0026lt;\u0026lt;\u0026lt; Expose self observability metrics port --set oap.env.SW_TELEMETRY=prometheus \\ --set oap.env.SW_OTEL_RECEIVER=default \\ # \u0026lt;\u0026lt;\u0026lt; Enable Otel receiver --set oap.env.SW_OTEL_RECEIVER_ENABLED_OC_RULES=oap # \u0026lt;\u0026lt;\u0026lt; Add oap analyzer for Otel metrics   Set up OpenTelemetry Collector and config a scrape job:  - job_name:\u0026#39;skywalking-so11y\u0026#39;# make sure to use this in the so11y.yaml to filter only so11y metricsmetrics_path:\u0026#39;/metrics\u0026#39;kubernetes_sd_configs:- role:podrelabel_configs:- source_labels:[__meta_kubernetes_pod_container_name, __meta_kubernetes_pod_container_port_name]action:keepregex:oap;prometheus-port - source_labels:[]target_label:servicereplacement:oap-server- source_labels:[__meta_kubernetes_pod_name]target_label:host_nameregex:(.+)replacement:$$1 For the full example for OpenTelemetry Collector configuration and recommended version, you can refer to showcase.\n NOTE: Since Apr 21, 2021, the Grafana project has been relicensed to AGPL-v3, and is no longer licensed for Apache 2.0. Check the LICENSE details. The following Prometheus + Grafana solution is optional, rather than recommended.\nPrometheus Prometheus is supported as a telemetry implementor, which collects metrics from SkyWalking\u0026rsquo;s backend.\nSet prometheus to provider. The endpoint opens at http://0.0.0.0:1234/ and http://0.0.0.0:1234/metrics.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:Set host and port if needed.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543Set relevant SSL settings to expose a secure endpoint. Note that the private key file and cert chain file could be uploaded once changes are applied to them.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543sslEnabled:truesslKeyPath:/etc/ssl/key.pemsslCertChainPath:/etc/ssl/cert-chain.pemGrafana Visualization Provide the Grafana dashboard settings. Check SkyWalking OAP Cluster Monitor Dashboard config and SkyWalking OAP Instance Monitor Dashboard config.\n","title":"Telemetry for backend","url":"/docs/main/v9.0.0/en/setup/backend/backend-telemetry/"},{"content":"Telemetry for backend The OAP backend cluster itself is a distributed streaming process system. To assist the Ops team, we provide the telemetry for the OAP backend itself, also known as self-observability (so11y)\nBy default, the telemetry is disabled by setting selector to none, like this:\ntelemetry:selector:${SW_TELEMETRY:none}none:prometheus:host:${SW_TELEMETRY_PROMETHEUS_HOST:0.0.0.0}port:${SW_TELEMETRY_PROMETHEUS_PORT:1234}sslEnabled:${SW_TELEMETRY_PROMETHEUS_SSL_ENABLED:false}sslKeyPath:${SW_TELEMETRY_PROMETHEUS_SSL_KEY_PATH:\u0026#34;\u0026#34;}sslCertChainPath:${SW_TELEMETRY_PROMETHEUS_SSL_CERT_CHAIN_PATH:\u0026#34;\u0026#34;}You may also set Prometheus to enable them. For more information, refer to the details below.\nSelf Observability Static IP or hostname SkyWalking supports collecting telemetry data into the OAP backend directly. Users could check them out through UI or GraphQL API.\nAdd the following configuration to enable self-observability-related modules.\n Set up prometheus telemetry.  telemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543Set up Prometheus fetcher.  prometheus-fetcher:selector:${SW_PROMETHEUS_FETCHER:default}default:enabledRules:${SW_PROMETHEUS_FETCHER_ENABLED_RULES:\u0026#34;self\u0026#34;}Make sure config/fetcher-prom-rules/self.yaml exists.  Once you deploy an OAP server cluster, the target host should be replaced with a dedicated IP or hostname. For instance, if there are three OAP servers in your cluster, their hosts are service1, service2, and service3, respectively. You should update each self.yaml to switch the target host.\nservice1:\nfetcherInterval:PT15SfetcherTimeout:PT10SmetricsPath:/metricsstaticConfig:# targets will be labeled as \u0026#34;instance\u0026#34;targets:- service1:1234labels:service:oap-server...service2:\nfetcherInterval:PT15SfetcherTimeout:PT10SmetricsPath:/metricsstaticConfig:# targets will be labeled as \u0026#34;instance\u0026#34;targets:- service2:1234labels:service:oap-server...service3:\nfetcherInterval:PT15SfetcherTimeout:PT10SmetricsPath:/metricsstaticConfig:# targets will be labeled as \u0026#34;instance\u0026#34;targets:- service3:1234labels:service:oap-server...Service discovery on Kubernetes If you deploy an OAP server cluster on Kubernetes, the oap-server instance (pod) would not have a static IP or hostname. We can leverage OpenTelemetry Collector to discover the oap-server instance, and scrape \u0026amp; transfer the metrics to OAP OpenTelemetry receiver.\nOn how to install SkyWalking on k8s, you can refer to Apache SkyWalking Kubernetes.\nSet this up following these steps:\n Set up oap-server.    Set the metrics port.\nprometheus-port: 1234   Set environment variables.\nSW_TELEMETRY=prometheus SW_OTEL_RECEIVER=default SW_OTEL_RECEIVER_ENABLED_OC_RULES=oap Here is an example to install by Apache SkyWalking Kubernetes:\nhelm -n istio-system install skywalking skywalking \\ --set elasticsearch.replicas=1 \\ --set elasticsearch.minimumMasterNodes=1 \\ --set elasticsearch.imageTag=7.5.1 \\ --set oap.replicas=2 \\ --set ui.image.repository=$HUB/skywalking-ui \\ --set ui.image.tag=$TAG \\ --set oap.image.tag=$TAG \\ --set oap.image.repository=$HUB/skywalking-oap \\ --set oap.storageType=elasticsearch \\ --set oap.ports.prometheus-port=1234 \\ # \u0026lt;\u0026lt;\u0026lt; Expose self observability metrics port --set oap.env.SW_TELEMETRY=prometheus \\ --set oap.env.SW_OTEL_RECEIVER=default \\ # \u0026lt;\u0026lt;\u0026lt; Enable Otel receiver --set oap.env.SW_OTEL_RECEIVER_ENABLED_OC_RULES=oap # \u0026lt;\u0026lt;\u0026lt; Add oap analyzer for Otel metrics   Set up OpenTelemetry Collector and config a scrape job:  - job_name:\u0026#39;skywalking-so11y\u0026#39;# make sure to use this in the so11y.yaml to filter only so11y metricsmetrics_path:\u0026#39;/metrics\u0026#39;kubernetes_sd_configs:- role:podrelabel_configs:- source_labels:[__meta_kubernetes_pod_container_name, __meta_kubernetes_pod_container_port_name]action:keepregex:oap;prometheus-port - source_labels:[]target_label:servicereplacement:oap-server- source_labels:[__meta_kubernetes_pod_name]target_label:host_nameregex:(.+)replacement:$$1 For the full example for OpenTelemetry Collector configuration and recommended version, you can refer to showcase.\n NOTE: Since Apr 21, 2021, the Grafana project has been relicensed to AGPL-v3, and is no longer licensed for Apache 2.0. Check the LICENSE details. The following Prometheus + Grafana solution is optional rather than recommended.\nPrometheus Prometheus is supported as a telemetry implementor, which collects metrics from SkyWalking\u0026rsquo;s backend.\nSet prometheus to provider. The endpoint opens at http://0.0.0.0:1234/ and http://0.0.0.0:1234/metrics.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:Set host and port if needed.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543Set relevant SSL settings to expose a secure endpoint. Note that the private key file and cert chain file could be uploaded once changes are applied to them.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543sslEnabled:truesslKeyPath:/etc/ssl/key.pemsslCertChainPath:/etc/ssl/cert-chain.pemGrafana Visualization Provide the Grafana dashboard settings. Check SkyWalking OAP Cluster Monitor Dashboard config and SkyWalking OAP Instance Monitor Dashboard config.\n","title":"Telemetry for backend","url":"/docs/main/v9.1.0/en/setup/backend/backend-telemetry/"},{"content":"Telemetry for backend The OAP backend cluster itself is a distributed streaming process system. To assist the Ops team, we provide the telemetry for the OAP backend itself, also known as self-observability (so11y)\nBy default, the telemetry is disabled by setting selector to none, like this:\ntelemetry:selector:${SW_TELEMETRY:none}none:prometheus:host:${SW_TELEMETRY_PROMETHEUS_HOST:0.0.0.0}port:${SW_TELEMETRY_PROMETHEUS_PORT:1234}sslEnabled:${SW_TELEMETRY_PROMETHEUS_SSL_ENABLED:false}sslKeyPath:${SW_TELEMETRY_PROMETHEUS_SSL_KEY_PATH:\u0026#34;\u0026#34;}sslCertChainPath:${SW_TELEMETRY_PROMETHEUS_SSL_CERT_CHAIN_PATH:\u0026#34;\u0026#34;}You may also set Prometheus to enable them. For more information, refer to the details below.\nSelf Observability Static IP or hostname SkyWalking supports collecting telemetry data into the OAP backend directly. Users could check them out through UI or GraphQL API.\nAdd the following configuration to enable self-observability-related modules.\n Set up prometheus telemetry.  telemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543Set up Prometheus fetcher.  prometheus-fetcher:selector:${SW_PROMETHEUS_FETCHER:default}default:enabledRules:${SW_PROMETHEUS_FETCHER_ENABLED_RULES:\u0026#34;self\u0026#34;}Make sure config/fetcher-prom-rules/self.yaml exists.  Once you deploy an OAP server cluster, the target host should be replaced with a dedicated IP or hostname. For instance, if there are three OAP servers in your cluster, their hosts are service1, service2, and service3, respectively. You should update each self.yaml to switch the target host.\nservice1:\nfetcherInterval:PT15SfetcherTimeout:PT10SmetricsPath:/metricsstaticConfig:# targets will be labeled as \u0026#34;instance\u0026#34;targets:- service1:1234labels:service:oap-server...service2:\nfetcherInterval:PT15SfetcherTimeout:PT10SmetricsPath:/metricsstaticConfig:# targets will be labeled as \u0026#34;instance\u0026#34;targets:- service2:1234labels:service:oap-server...service3:\nfetcherInterval:PT15SfetcherTimeout:PT10SmetricsPath:/metricsstaticConfig:# targets will be labeled as \u0026#34;instance\u0026#34;targets:- service3:1234labels:service:oap-server...Service discovery on Kubernetes If you deploy an OAP server cluster on Kubernetes, the oap-server instance (pod) would not have a static IP or hostname. We can leverage OpenTelemetry Collector to discover the oap-server instance, and scrape \u0026amp; transfer the metrics to OAP OpenTelemetry receiver.\nOn how to install SkyWalking on k8s, you can refer to Apache SkyWalking Kubernetes.\nSet this up following these steps:\n Set up oap-server.    Set the metrics port.\nprometheus-port: 1234   Set environment variables.\nSW_TELEMETRY=prometheus SW_OTEL_RECEIVER=default SW_OTEL_RECEIVER_ENABLED_OTEL_RULES=oap Here is an example to install by Apache SkyWalking Kubernetes:\nhelm -n istio-system install skywalking skywalking \\ --set elasticsearch.replicas=1 \\ --set elasticsearch.minimumMasterNodes=1 \\ --set elasticsearch.imageTag=7.5.1 \\ --set oap.replicas=2 \\ --set ui.image.repository=$HUB/skywalking-ui \\ --set ui.image.tag=$TAG \\ --set oap.image.tag=$TAG \\ --set oap.image.repository=$HUB/skywalking-oap \\ --set oap.storageType=elasticsearch \\ --set oap.ports.prometheus-port=1234 \\ # \u0026lt;\u0026lt;\u0026lt; Expose self observability metrics port --set oap.env.SW_TELEMETRY=prometheus \\ --set oap.env.SW_OTEL_RECEIVER=default \\ # \u0026lt;\u0026lt;\u0026lt; Enable Otel receiver --set oap.env.SW_OTEL_RECEIVER_ENABLED_OTEL_RULES=oap # \u0026lt;\u0026lt;\u0026lt; Add oap analyzer for Otel metrics   Set up OpenTelemetry Collector and config a scrape job:  - job_name:\u0026#39;skywalking-so11y\u0026#39;# make sure to use this in the so11y.yaml to filter only so11y metricsmetrics_path:\u0026#39;/metrics\u0026#39;kubernetes_sd_configs:- role:podrelabel_configs:- source_labels:[__meta_kubernetes_pod_container_name, __meta_kubernetes_pod_container_port_name]action:keepregex:oap;prometheus-port- source_labels:[]target_label:servicereplacement:oap-server- source_labels:[__meta_kubernetes_pod_name]target_label:host_nameregex:(.+)replacement:$$1For the full example for OpenTelemetry Collector configuration and recommended version, you can refer to showcase.\n NOTE: Since Apr 21, 2021, the Grafana project has been relicensed to AGPL-v3, and is no longer licensed for Apache 2.0. Check the LICENSE details. The following Prometheus + Grafana solution is optional rather than recommended.\nPrometheus Prometheus is supported as a telemetry implementor, which collects metrics from SkyWalking\u0026rsquo;s backend.\nSet prometheus to provider. The endpoint opens at http://0.0.0.0:1234/ and http://0.0.0.0:1234/metrics.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:Set host and port if needed.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543Set relevant SSL settings to expose a secure endpoint. Note that the private key file and cert chain file could be uploaded once changes are applied to them.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543sslEnabled:truesslKeyPath:/etc/ssl/key.pemsslCertChainPath:/etc/ssl/cert-chain.pemGrafana Visualization Provide the Grafana dashboard settings. Check SkyWalking OAP Cluster Monitor Dashboard config and SkyWalking OAP Instance Monitor Dashboard config.\n","title":"Telemetry for backend","url":"/docs/main/v9.2.0/en/setup/backend/backend-telemetry/"},{"content":"Telemetry for backend The OAP backend cluster itself is a distributed streaming process system. To assist the Ops team, we provide the telemetry for the OAP backend itself, also known as self-observability (so11y)\nBy default, the telemetry is disabled by setting selector to none, like this:\ntelemetry:selector:${SW_TELEMETRY:none}none:prometheus:host:${SW_TELEMETRY_PROMETHEUS_HOST:0.0.0.0}port:${SW_TELEMETRY_PROMETHEUS_PORT:1234}sslEnabled:${SW_TELEMETRY_PROMETHEUS_SSL_ENABLED:false}sslKeyPath:${SW_TELEMETRY_PROMETHEUS_SSL_KEY_PATH:\u0026#34;\u0026#34;}sslCertChainPath:${SW_TELEMETRY_PROMETHEUS_SSL_CERT_CHAIN_PATH:\u0026#34;\u0026#34;}You may also set Prometheus to enable them. For more information, refer to the details below.\nSelf Observability Static IP or hostname SkyWalking supports collecting telemetry data into the OAP backend directly. Users could check them out through UI or GraphQL API.\nAdd the following configuration to enable self-observability-related modules.\n Set up prometheus telemetry.  telemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543Set up OpenTelemetry to scrape the metrics from OAP telemetry.  Refer to the E2E test case as an example.\nFor Kubernetes deployments, read the following section, otherwise you should be able to adjust the configurations below to fit your scenarios.\nService discovery on Kubernetes If you deploy an OAP server cluster on Kubernetes, the oap-server instance (pod) would not have a static IP or hostname. We can leverage OpenTelemetry Collector to discover the oap-server instance, and scrape \u0026amp; transfer the metrics to OAP OpenTelemetry receiver.\nOn how to install SkyWalking on k8s, you can refer to Apache SkyWalking Kubernetes.\nSet this up following these steps:\n Set up oap-server.    Set the metrics port.\nprometheus-port: 1234   Set environment variables.\nSW_TELEMETRY=prometheus SW_OTEL_RECEIVER=default SW_OTEL_RECEIVER_ENABLED_OTEL_RULES=oap Here is an example to install by Apache SkyWalking Kubernetes:\nhelm -n istio-system install skywalking skywalking \\ --set elasticsearch.replicas=1 \\ --set elasticsearch.minimumMasterNodes=1 \\ --set elasticsearch.imageTag=7.5.1 \\ --set oap.replicas=2 \\ --set ui.image.repository=$HUB/skywalking-ui \\ --set ui.image.tag=$TAG \\ --set oap.image.tag=$TAG \\ --set oap.image.repository=$HUB/skywalking-oap \\ --set oap.storageType=elasticsearch \\ --set oap.ports.prometheus-port=1234 \\ # \u0026lt;\u0026lt;\u0026lt; Expose self observability metrics port --set oap.env.SW_TELEMETRY=prometheus \\ --set oap.env.SW_OTEL_RECEIVER=default \\ # \u0026lt;\u0026lt;\u0026lt; Enable Otel receiver --set oap.env.SW_OTEL_RECEIVER_ENABLED_OTEL_RULES=oap # \u0026lt;\u0026lt;\u0026lt; Add oap analyzer for Otel metrics   Set up OpenTelemetry Collector and config a scrape job:  - job_name:\u0026#39;skywalking-so11y\u0026#39;# make sure to use this in the so11y.yaml to filter only so11y metricsmetrics_path:\u0026#39;/metrics\u0026#39;kubernetes_sd_configs:- role:podrelabel_configs:- source_labels:[__meta_kubernetes_pod_container_name, __meta_kubernetes_pod_container_port_name]action:keepregex:oap;prometheus-port- source_labels:[]target_label:servicereplacement:oap-server- source_labels:[__meta_kubernetes_pod_name]target_label:host_nameregex:(.+)replacement:$$1For the full example for OpenTelemetry Collector configuration and recommended version, you can refer to showcase.\n NOTE: Since Apr 21, 2021, the Grafana project has been relicensed to AGPL-v3, and is no longer licensed for Apache 2.0. Check the LICENSE details. The following Prometheus + Grafana solution is optional rather than recommended.\nPrometheus Prometheus is supported as a telemetry implementor, which collects metrics from SkyWalking\u0026rsquo;s backend.\nSet prometheus to provider. The endpoint opens at http://0.0.0.0:1234/ and http://0.0.0.0:1234/metrics.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:Set host and port if needed.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543Set relevant SSL settings to expose a secure endpoint. Note that the private key file and cert chain file could be uploaded once changes are applied to them.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543sslEnabled:truesslKeyPath:/etc/ssl/key.pemsslCertChainPath:/etc/ssl/cert-chain.pemGrafana Visualization Provide the Grafana dashboard settings. Check SkyWalking OAP Cluster Monitor Dashboard config and SkyWalking OAP Instance Monitor Dashboard config.\n","title":"Telemetry for backend","url":"/docs/main/v9.3.0/en/setup/backend/backend-telemetry/"},{"content":"Telemetry for backend The OAP backend cluster itself is a distributed streaming process system. To assist the Ops team, we provide the telemetry for the OAP backend itself, also known as self-observability (so11y)\nBy default, the telemetry is disabled by setting selector to none, like this:\ntelemetry:selector:${SW_TELEMETRY:none}none:prometheus:host:${SW_TELEMETRY_PROMETHEUS_HOST:0.0.0.0}port:${SW_TELEMETRY_PROMETHEUS_PORT:1234}sslEnabled:${SW_TELEMETRY_PROMETHEUS_SSL_ENABLED:false}sslKeyPath:${SW_TELEMETRY_PROMETHEUS_SSL_KEY_PATH:\u0026#34;\u0026#34;}sslCertChainPath:${SW_TELEMETRY_PROMETHEUS_SSL_CERT_CHAIN_PATH:\u0026#34;\u0026#34;}You may also set Prometheus to enable them. For more information, refer to the details below.\nSelf Observability SkyWalking supports exposing telemetry data representing OAP running status through Prometheus endpoint. Users could set up OpenTelemetry collector to scrap and forward telemetry data to OAP server for further analysis, eventually showing up UI or GraphQL API.\nStatic IP or hostname Add the following configuration to enable self-observability-related modules.\n Set up prometheus telemetry.  telemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543Set up OpenTelemetry to scrape the metrics from OAP telemetry.  Refer to the E2E test case as an example.\nFor Kubernetes deployments, read the following section, otherwise you should be able to adjust the configurations below to fit your scenarios.\nService discovery on Kubernetes If you deploy an OAP server cluster on Kubernetes, the oap-server instance (pod) would not have a static IP or hostname. We can leverage OpenTelemetry Collector to discover the oap-server instance, and scrape \u0026amp; transfer the metrics to OAP OpenTelemetry receiver.\nOn how to install SkyWalking on k8s, you can refer to Apache SkyWalking Kubernetes.\nSet this up following these steps:\n Set up oap-server.    Set the metrics port.\nprometheus-port: 1234   Set environment variables.\nSW_TELEMETRY=prometheus SW_OTEL_RECEIVER=default SW_OTEL_RECEIVER_ENABLED_OTEL_RULES=oap Here is an example to install by Apache SkyWalking Kubernetes:\nhelm -n istio-system install skywalking skywalking \\ --set elasticsearch.replicas=1 \\ --set elasticsearch.minimumMasterNodes=1 \\ --set elasticsearch.imageTag=7.5.1 \\ --set oap.replicas=2 \\ --set ui.image.repository=$HUB/skywalking-ui \\ --set ui.image.tag=$TAG \\ --set oap.image.tag=$TAG \\ --set oap.image.repository=$HUB/skywalking-oap \\ --set oap.storageType=elasticsearch \\ --set oap.ports.prometheus-port=1234 \\ # \u0026lt;\u0026lt;\u0026lt; Expose self observability metrics port --set oap.env.SW_TELEMETRY=prometheus \\ --set oap.env.SW_OTEL_RECEIVER=default \\ # \u0026lt;\u0026lt;\u0026lt; Enable Otel receiver --set oap.env.SW_OTEL_RECEIVER_ENABLED_OTEL_RULES=oap # \u0026lt;\u0026lt;\u0026lt; Add oap analyzer for Otel metrics   Set up OpenTelemetry Collector and config a scrape job:  - job_name:\u0026#39;skywalking-so11y\u0026#39;# make sure to use this in the so11y.yaml to filter only so11y metricsmetrics_path:\u0026#39;/metrics\u0026#39;kubernetes_sd_configs:- role:podrelabel_configs:- source_labels:[__meta_kubernetes_pod_container_name, __meta_kubernetes_pod_container_port_name]action:keepregex:oap;prometheus-port- source_labels:[]target_label:servicereplacement:oap-server- source_labels:[__meta_kubernetes_pod_name]target_label:host_nameregex:(.+)replacement:$$1For the full example for OpenTelemetry Collector configuration and recommended version, you can refer to showcase.\n Users also could leverage the Prometheus endpoint for their own Prometheus and Grafana.\nNOTE: Since Apr 21, 2021, the Grafana project has been relicensed to AGPL-v3, and is no longer licensed for Apache 2.0. Check the LICENSE details. The following Prometheus + Grafana solution is optional rather than recommended.\nPrometheus Prometheus is supported as a telemetry implementor, which collects metrics from SkyWalking\u0026rsquo;s backend.\nSet prometheus to provider. The endpoint opens at http://0.0.0.0:1234/ and http://0.0.0.0:1234/metrics.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:Set host and port if needed.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543Set relevant SSL settings to expose a secure endpoint. Note that the private key file and cert chain file could be uploaded once changes are applied to them.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543sslEnabled:truesslKeyPath:/etc/ssl/key.pemsslCertChainPath:/etc/ssl/cert-chain.pemGrafana Visualization Provide the Grafana dashboard settings. Check SkyWalking OAP Cluster Monitor Dashboard config and SkyWalking OAP Instance Monitor Dashboard config.\n","title":"Telemetry for backend","url":"/docs/main/v9.4.0/en/setup/backend/backend-telemetry/"},{"content":"Telemetry for backend The OAP backend cluster itself is a distributed streaming process system. To assist the Ops team, we provide the telemetry for the OAP backend itself, also known as self-observability (so11y)\nBy default, the telemetry is disabled by setting selector to none, like this:\ntelemetry:selector:${SW_TELEMETRY:none}none:prometheus:host:${SW_TELEMETRY_PROMETHEUS_HOST:0.0.0.0}port:${SW_TELEMETRY_PROMETHEUS_PORT:1234}sslEnabled:${SW_TELEMETRY_PROMETHEUS_SSL_ENABLED:false}sslKeyPath:${SW_TELEMETRY_PROMETHEUS_SSL_KEY_PATH:\u0026#34;\u0026#34;}sslCertChainPath:${SW_TELEMETRY_PROMETHEUS_SSL_CERT_CHAIN_PATH:\u0026#34;\u0026#34;}You may also set Prometheus to enable them. For more information, refer to the details below.\nSelf Observability SkyWalking supports exposing telemetry data representing OAP running status through Prometheus endpoint. Users could set up OpenTelemetry collector to scrap and forward telemetry data to OAP server for further analysis, eventually showing up UI or GraphQL API.\nStatic IP or hostname Add the following configuration to enable self-observability-related modules.\n Set up prometheus telemetry.  telemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543Set up OpenTelemetry to scrape the metrics from OAP telemetry.  Refer to the E2E test case as an example.\nFor Kubernetes deployments, read the following section, otherwise you should be able to adjust the configurations below to fit your scenarios.\nService discovery on Kubernetes If you deploy an OAP server cluster on Kubernetes, the oap-server instance (pod) would not have a static IP or hostname. We can leverage OpenTelemetry Collector to discover the oap-server instance, and scrape \u0026amp; transfer the metrics to OAP OpenTelemetry receiver.\nOn how to install SkyWalking on k8s, you can refer to Apache SkyWalking Kubernetes.\nSet this up following these steps:\n Set up oap-server.    Set the metrics port.\nprometheus-port: 1234   Set environment variables.\nSW_TELEMETRY=prometheus SW_OTEL_RECEIVER=default SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES=oap Here is an example to install by Apache SkyWalking Kubernetes:\nhelm -n istio-system install skywalking skywalking \\ --set elasticsearch.replicas=1 \\ --set elasticsearch.minimumMasterNodes=1 \\ --set elasticsearch.imageTag=7.5.1 \\ --set oap.replicas=2 \\ --set ui.image.repository=$HUB/skywalking-ui \\ --set ui.image.tag=$TAG \\ --set oap.image.tag=$TAG \\ --set oap.image.repository=$HUB/skywalking-oap \\ --set oap.storageType=elasticsearch \\ --set oap.ports.prometheus-port=1234 \\ # \u0026lt;\u0026lt;\u0026lt; Expose self observability metrics port --set oap.env.SW_TELEMETRY=prometheus \\ --set oap.env.SW_OTEL_RECEIVER=default \\ # \u0026lt;\u0026lt;\u0026lt; Enable Otel receiver --set oap.env.SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES=oap # \u0026lt;\u0026lt;\u0026lt; Add oap analyzer for Otel metrics   Set up OpenTelemetry Collector and config a scrape job:  - job_name:\u0026#39;skywalking-so11y\u0026#39;# make sure to use this in the so11y.yaml to filter only so11y metricsmetrics_path:\u0026#39;/metrics\u0026#39;kubernetes_sd_configs:- role:podrelabel_configs:- source_labels:[__meta_kubernetes_pod_container_name, __meta_kubernetes_pod_container_port_name]action:keepregex:oap;prometheus-port- source_labels:[]target_label:servicereplacement:oap-server- source_labels:[__meta_kubernetes_pod_name]target_label:host_nameregex:(.+)replacement:$$1For the full example for OpenTelemetry Collector configuration and recommended version, you can refer to showcase.\n Users also could leverage the Prometheus endpoint for their own Prometheus and Grafana.\nNOTE: Since Apr 21, 2021, the Grafana project has been relicensed to AGPL-v3, and is no longer licensed for Apache 2.0. Check the LICENSE details. The following Prometheus + Grafana solution is optional rather than recommended.\nPrometheus Prometheus is supported as a telemetry implementor, which collects metrics from SkyWalking\u0026rsquo;s backend.\nSet prometheus to provider. The endpoint opens at http://0.0.0.0:1234/ and http://0.0.0.0:1234/metrics.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:Set host and port if needed.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543Set relevant SSL settings to expose a secure endpoint. Note that the private key file and cert chain file could be uploaded once changes are applied to them.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543sslEnabled:truesslKeyPath:/etc/ssl/key.pemsslCertChainPath:/etc/ssl/cert-chain.pemGrafana Visualization Provide the Grafana dashboard settings. Check SkyWalking OAP Cluster Monitor Dashboard config and SkyWalking OAP Instance Monitor Dashboard config.\n","title":"Telemetry for backend","url":"/docs/main/v9.5.0/en/setup/backend/backend-telemetry/"},{"content":"Telemetry for backend The OAP backend cluster itself is a distributed streaming process system. To assist the Ops team, we provide the telemetry for the OAP backend itself, also known as self-observability (so11y)\nBy default, the telemetry is disabled by setting selector to none, like this:\ntelemetry:selector:${SW_TELEMETRY:none}none:prometheus:host:${SW_TELEMETRY_PROMETHEUS_HOST:0.0.0.0}port:${SW_TELEMETRY_PROMETHEUS_PORT:1234}sslEnabled:${SW_TELEMETRY_PROMETHEUS_SSL_ENABLED:false}sslKeyPath:${SW_TELEMETRY_PROMETHEUS_SSL_KEY_PATH:\u0026#34;\u0026#34;}sslCertChainPath:${SW_TELEMETRY_PROMETHEUS_SSL_CERT_CHAIN_PATH:\u0026#34;\u0026#34;}You may also set Prometheus to enable them. For more information, refer to the details below.\nSelf Observability SkyWalking supports exposing telemetry data representing OAP running status through Prometheus endpoint. Users could set up OpenTelemetry collector to scrap and forward telemetry data to OAP server for further analysis, eventually showing up UI or GraphQL API.\nStatic IP or hostname Add the following configuration to enable self-observability-related modules.\n Set up prometheus telemetry.  telemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543Set up OpenTelemetry to scrape the metrics from OAP telemetry.  Refer to the E2E test case as an example.\nFor Kubernetes deployments, read the following section, otherwise you should be able to adjust the configurations below to fit your scenarios.\nService discovery on Kubernetes If you deploy an OAP server cluster on Kubernetes, the oap-server instance (pod) would not have a static IP or hostname. We can leverage OpenTelemetry Collector to discover the oap-server instance, and scrape \u0026amp; transfer the metrics to OAP OpenTelemetry receiver.\nOn how to install SkyWalking on k8s, you can refer to Apache SkyWalking Kubernetes.\nSet this up following these steps:\n Set up oap-server.    Set the metrics port.\nprometheus-port: 1234   Set environment variables.\nSW_TELEMETRY=prometheus SW_OTEL_RECEIVER=default SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES=oap Here is an example to install by Apache SkyWalking Kubernetes:\nhelm -n istio-system install skywalking skywalking \\ --set elasticsearch.replicas=1 \\ --set elasticsearch.minimumMasterNodes=1 \\ --set elasticsearch.imageTag=7.5.1 \\ --set oap.replicas=2 \\ --set ui.image.repository=$HUB/skywalking-ui \\ --set ui.image.tag=$TAG \\ --set oap.image.tag=$TAG \\ --set oap.image.repository=$HUB/skywalking-oap \\ --set oap.storageType=elasticsearch \\ --set oap.ports.prometheus-port=1234 \\ # \u0026lt;\u0026lt;\u0026lt; Expose self observability metrics port --set oap.env.SW_TELEMETRY=prometheus \\ --set oap.env.SW_OTEL_RECEIVER=default \\ # \u0026lt;\u0026lt;\u0026lt; Enable Otel receiver --set oap.env.SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES=oap # \u0026lt;\u0026lt;\u0026lt; Add oap analyzer for Otel metrics   Set up OpenTelemetry Collector and config a scrape job:  - job_name:\u0026#39;skywalking-so11y\u0026#39;# make sure to use this in the so11y.yaml to filter only so11y metricsmetrics_path:\u0026#39;/metrics\u0026#39;kubernetes_sd_configs:- role:podrelabel_configs:- source_labels:[__meta_kubernetes_pod_container_name, __meta_kubernetes_pod_container_port_name]action:keepregex:oap;prometheus-port- source_labels:[]target_label:servicereplacement:oap-server- source_labels:[__meta_kubernetes_pod_name]target_label:host_nameregex:(.+)replacement:$$1For the full example for OpenTelemetry Collector configuration and recommended version, you can refer to showcase.\n Users also could leverage the Prometheus endpoint for their own Prometheus and Grafana.\nNOTE: Since Apr 21, 2021, the Grafana project has been relicensed to AGPL-v3, and is no longer licensed for Apache 2.0. Check the LICENSE details. The following Prometheus + Grafana solution is optional rather than recommended.\nPrometheus Prometheus is supported as a telemetry implementor, which collects metrics from SkyWalking\u0026rsquo;s backend.\nSet prometheus to provider. The endpoint opens at http://0.0.0.0:1234/ and http://0.0.0.0:1234/metrics.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:Set host and port if needed.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543Set relevant SSL settings to expose a secure endpoint. Note that the private key file and cert chain file could be uploaded once changes are applied to them.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543sslEnabled:truesslKeyPath:/etc/ssl/key.pemsslCertChainPath:/etc/ssl/cert-chain.pemGrafana Visualization Provide the Grafana dashboard settings. Check SkyWalking OAP Cluster Monitor Dashboard config and SkyWalking OAP Instance Monitor Dashboard config.\n","title":"Telemetry for backend","url":"/docs/main/v9.6.0/en/setup/backend/backend-telemetry/"},{"content":"Telemetry for backend The OAP backend cluster itself is a distributed streaming process system. To assist the Ops team, we provide the telemetry for the OAP backend itself, also known as self-observability (so11y)\nBy default, the telemetry is disabled by setting selector to none, like this:\ntelemetry:selector:${SW_TELEMETRY:none}none:prometheus:host:${SW_TELEMETRY_PROMETHEUS_HOST:0.0.0.0}port:${SW_TELEMETRY_PROMETHEUS_PORT:1234}sslEnabled:${SW_TELEMETRY_PROMETHEUS_SSL_ENABLED:false}sslKeyPath:${SW_TELEMETRY_PROMETHEUS_SSL_KEY_PATH:\u0026#34;\u0026#34;}sslCertChainPath:${SW_TELEMETRY_PROMETHEUS_SSL_CERT_CHAIN_PATH:\u0026#34;\u0026#34;}You may also set Prometheus to enable them. For more information, refer to the details below.\nSelf Observability SkyWalking supports exposing telemetry data representing OAP running status through Prometheus endpoint. Users could set up OpenTelemetry collector to scrap and forward telemetry data to OAP server for further analysis, eventually showing up UI or GraphQL API.\nStatic IP or hostname Add the following configuration to enable self-observability-related modules.\n Set up prometheus telemetry.  telemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543Set up OpenTelemetry to scrape the metrics from OAP telemetry.  Refer to the E2E test case as an example.\nFor Kubernetes deployments, read the following section, otherwise you should be able to adjust the configurations below to fit your scenarios.\nService discovery on Kubernetes If you deploy an OAP server cluster on Kubernetes, the oap-server instance (pod) would not have a static IP or hostname. We can leverage OpenTelemetry Collector to discover the oap-server instance, and scrape \u0026amp; transfer the metrics to OAP OpenTelemetry receiver.\nOn how to install SkyWalking on k8s, you can refer to Apache SkyWalking Kubernetes.\nSet this up following these steps:\n Set up oap-server.    Set the metrics port.\nprometheus-port: 1234   Set environment variables.\nSW_TELEMETRY=prometheus SW_OTEL_RECEIVER=default SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES=oap Here is an example to install by Apache SkyWalking Kubernetes:\nhelm -n istio-system install skywalking skywalking \\ --set elasticsearch.replicas=1 \\ --set elasticsearch.minimumMasterNodes=1 \\ --set elasticsearch.imageTag=7.5.1 \\ --set oap.replicas=2 \\ --set ui.image.repository=$HUB/skywalking-ui \\ --set ui.image.tag=$TAG \\ --set oap.image.tag=$TAG \\ --set oap.image.repository=$HUB/skywalking-oap \\ --set oap.storageType=elasticsearch \\ --set oap.ports.prometheus-port=1234 \\ # \u0026lt;\u0026lt;\u0026lt; Expose self observability metrics port --set oap.env.SW_TELEMETRY=prometheus \\ --set oap.env.SW_OTEL_RECEIVER=default \\ # \u0026lt;\u0026lt;\u0026lt; Enable Otel receiver --set oap.env.SW_OTEL_RECEIVER_ENABLED_OTEL_METRICS_RULES=oap # \u0026lt;\u0026lt;\u0026lt; Add oap analyzer for Otel metrics   Set up OpenTelemetry Collector and config a scrape job:  - job_name:\u0026#39;skywalking-so11y\u0026#39;# make sure to use this in the so11y.yaml to filter only so11y metricsmetrics_path:\u0026#39;/metrics\u0026#39;kubernetes_sd_configs:- role:podrelabel_configs:- source_labels:[__meta_kubernetes_pod_container_name, __meta_kubernetes_pod_container_port_name]action:keepregex:oap;prometheus-port- source_labels:[]target_label:servicereplacement:oap-server- source_labels:[__meta_kubernetes_pod_name]target_label:host_nameregex:(.+)replacement:$$1For the full example for OpenTelemetry Collector configuration and recommended version, you can refer to showcase.\n Users also could leverage the Prometheus endpoint for their own Prometheus and Grafana.\nNOTE: Since Apr 21, 2021, the Grafana project has been relicensed to AGPL-v3, and is no longer licensed for Apache 2.0. Check the LICENSE details. The following Prometheus + Grafana solution is optional rather than recommended.\nPrometheus Prometheus is supported as a telemetry implementor, which collects metrics from SkyWalking\u0026rsquo;s backend.\nSet prometheus to provider. The endpoint opens at http://0.0.0.0:1234/ and http://0.0.0.0:1234/metrics.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:Set host and port if needed.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543Set relevant SSL settings to expose a secure endpoint. Note that the private key file and cert chain file could be uploaded once changes are applied to them.\ntelemetry:selector:${SW_TELEMETRY:prometheus}prometheus:host:127.0.0.1port:1543sslEnabled:truesslKeyPath:/etc/ssl/key.pemsslCertChainPath:/etc/ssl/cert-chain.pemGrafana Visualization Provide the Grafana dashboard settings. Check SkyWalking OAP Cluster Monitor Dashboard config and SkyWalking OAP Instance Monitor Dashboard config.\n","title":"Telemetry for backend","url":"/docs/main/v9.7.0/en/setup/backend/backend-telemetry/"},{"content":"The Logic Endpoint In default, all the RPC server-side names as entry spans, such as RESTFul API path and gRPC service name, would be endpoints with metrics. At the same time, SkyWalking introduces the logic endpoint concept, which allows plugins and users to add new endpoints without adding new spans. The following logic endpoints are added automatically by plugins.\n GraphQL Query and Mutation are logic endpoints by using the names of them. Spring\u0026rsquo;s ScheduledMethodRunnable jobs are logic endpoints. The name format is SpringScheduled/${className}/${methodName}. Apache ShardingSphere ElasticJob\u0026rsquo;s jobs are logic endpoints. The name format is ElasticJob/${jobName}. XXLJob\u0026rsquo;s jobs are logic endpoints. The name formats include xxl-job/MethodJob/${className}.${methodName}, xxl-job/ScriptJob/${GlueType}/id/${jobId}, and xxl-job/SimpleJob/${className}. Quartz(optional plugin)\u0026rsquo;s jobs are logic endpoints. the name format is quartz-scheduler/${className}.  User could use the SkyWalking\u0026rsquo;s application toolkits to add the tag into the local span to label the span as a logic endpoint in the analysis stage. The tag is, key=x-le and value = {\u0026quot;logic-span\u0026quot;:true}.\n","title":"The Logic Endpoint","url":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/logic-endpoint/"},{"content":"The Logic Endpoint In default, all the RPC server-side names as entry spans, such as RESTFul API path and gRPC service name, would be endpoints with metrics. At the same time, SkyWalking introduces the logic endpoint concept, which allows plugins and users to add new endpoints without adding new spans. The following logic endpoints are added automatically by plugins.\n GraphQL Query and Mutation are logic endpoints by using the names of them. Spring\u0026rsquo;s ScheduledMethodRunnable jobs are logic endpoints. The name format is SpringScheduled/${className}/${methodName}. Apache ShardingSphere ElasticJob\u0026rsquo;s jobs are logic endpoints. The name format is ElasticJob/${jobName}. XXLJob\u0026rsquo;s jobs are logic endpoints. The name formats include xxl-job/MethodJob/${className}.${methodName}, xxl-job/ScriptJob/${GlueType}/id/${jobId}, and xxl-job/SimpleJob/${className}. Quartz(optional plugin)\u0026rsquo;s jobs are logic endpoints. the name format is quartz-scheduler/${className}.  User could use the SkyWalking\u0026rsquo;s application toolkits to add the tag into the local span to label the span as a logic endpoint in the analysis stage. The tag is, key=x-le and value = {\u0026quot;logic-span\u0026quot;:true}.\n","title":"The Logic Endpoint","url":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/logic-endpoint/"},{"content":"The Logic Endpoint In default, all the RPC server-side names as entry spans, such as RESTFul API path and gRPC service name, would be endpoints with metrics. At the same time, SkyWalking introduces the logic endpoint concept, which allows plugins and users to add new endpoints without adding new spans. The following logic endpoints are added automatically by plugins.\n GraphQL Query and Mutation are logic endpoints by using the names of them. Spring\u0026rsquo;s ScheduledMethodRunnable jobs are logic endpoints. The name format is SpringScheduled/${className}/${methodName}. Apache ShardingSphere ElasticJob\u0026rsquo;s jobs are logic endpoints. The name format is ElasticJob/${jobName}. XXLJob\u0026rsquo;s jobs are logic endpoints. The name formats include xxl-job/MethodJob/${className}.${methodName}, xxl-job/ScriptJob/${GlueType}/id/${jobId}, and xxl-job/SimpleJob/${className}. Quartz(optional plugin)\u0026rsquo;s jobs are logic endpoints. the name format is quartz-scheduler/${className}.  User could use the SkyWalking\u0026rsquo;s application toolkits to add the tag into the local span to label the span as a logic endpoint in the analysis stage. The tag is, key=x-le and value = {\u0026quot;logic-span\u0026quot;:true}.\n","title":"The Logic Endpoint","url":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/logic-endpoint/"},{"content":"The Logic Endpoint In default, all the RPC server-side names as entry spans, such as RESTFul API path and gRPC service name, would be endpoints with metrics. At the same time, SkyWalking introduces the logic endpoint concept, which allows plugins and users to add new endpoints without adding new spans. The following logic endpoints are added automatically by plugins.\n GraphQL Query and Mutation are logic endpoints by using the names of them. Spring\u0026rsquo;s ScheduledMethodRunnable jobs are logic endpoints. The name format is SpringScheduled/${className}/${methodName}. Apache ShardingSphere ElasticJob\u0026rsquo;s jobs are logic endpoints. The name format is ElasticJob/${jobName}. XXLJob\u0026rsquo;s jobs are logic endpoints. The name formats include xxl-job/MethodJob/${className}.${methodName}, xxl-job/ScriptJob/${GlueType}/id/${jobId}, and xxl-job/SimpleJob/${className}. Quartz(optional plugin)\u0026rsquo;s jobs are logic endpoints. the name format is quartz-scheduler/${className}.  User could use the SkyWalking\u0026rsquo;s application toolkits to add the tag into the local span to label the span as a logic endpoint in the analysis stage. The tag is, key=x-le and value = {\u0026quot;logic-span\u0026quot;:true}.\n","title":"The Logic Endpoint","url":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/logic-endpoint/"},{"content":"The Logic Endpoint In default, all the RPC server-side names as entry spans, such as RESTFul API path and gRPC service name, would be endpoints with metrics. At the same time, SkyWalking introduces the logic endpoint concept, which allows plugins and users to add new endpoints without adding new spans. The following logic endpoints are added automatically by plugins.\n GraphQL Query and Mutation are logic endpoints by using the names of them. Spring\u0026rsquo;s ScheduledMethodRunnable jobs are logic endpoints. The name format is SpringScheduled/${className}/${methodName}. Apache ShardingSphere ElasticJob\u0026rsquo;s jobs are logic endpoints. The name format is ElasticJob/${jobName}. XXLJob\u0026rsquo;s jobs are logic endpoints. The name formats include xxl-job/MethodJob/${className}.${methodName}, xxl-job/ScriptJob/${GlueType}/id/${jobId}, and xxl-job/SimpleJob/${className}. Quartz(optional plugin)\u0026rsquo;s jobs are logic endpoints. the name format is quartz-scheduler/${className}.  User could use the SkyWalking\u0026rsquo;s application toolkits to add the tag into the local span to label the span as a logic endpoint in the analysis stage. The tag is, key=x-le and value = {\u0026quot;logic-span\u0026quot;:true}.\n","title":"The Logic Endpoint","url":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/logic-endpoint/"},{"content":"Dependency the toolkit, such as using maven or gradle\nAdd Trace Toolkit apm-toolkit-trace provides the APIs to enhance the trace context, such as createLocalSpan, createExitSpan, createEntrySpan, log, tag, prepareForAsync and asyncFinish. Add the toolkit dependency to your project.\n\u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-trace\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; ","title":"the toolkit, such as using maven or gradle","url":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/application-toolkit-dependency/"},{"content":"Dependency the toolkit, such as using maven or gradle\nAdd Trace Toolkit apm-toolkit-trace provides the APIs to enhance the trace context, such as createLocalSpan, createExitSpan, createEntrySpan, log, tag, prepareForAsync and asyncFinish. Add the toolkit dependency to your project.\n\u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-trace\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; ","title":"the toolkit, such as using maven or gradle","url":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-dependency/"},{"content":"Dependency the toolkit, such as using maven or gradle\nAdd Trace Toolkit apm-toolkit-trace provides the APIs to enhance the trace context, such as createLocalSpan, createExitSpan, createEntrySpan, log, tag, prepareForAsync and asyncFinish. Add the toolkit dependency to your project.\n\u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-trace\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; ","title":"the toolkit, such as using maven or gradle","url":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/application-toolkit-dependency/"},{"content":"Dependency the toolkit, such as using maven or gradle\nAdd Trace Toolkit apm-toolkit-trace provides the APIs to enhance the trace context, such as createLocalSpan, createExitSpan, createEntrySpan, log, tag, prepareForAsync and asyncFinish. Add the toolkit dependency to your project.\n\u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-trace\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; ","title":"the toolkit, such as using maven or gradle","url":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/application-toolkit-dependency/"},{"content":"Dependency the toolkit, such as using maven or gradle\nAdd Trace Toolkit apm-toolkit-trace provides the APIs to enhance the trace context, such as createLocalSpan, createExitSpan, createEntrySpan, log, tag, prepareForAsync and asyncFinish. Add the toolkit dependency to your project.\n\u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-trace\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; ","title":"the toolkit, such as using maven or gradle","url":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/application-toolkit-dependency/"},{"content":"Thread dump merging mechanism The performance profile is an enhancement feature in the APM system. We are using the thread dump to estimate the method execution time, rather than adding multiple local spans. In this way, the resource cost would be much less than using distributed tracing to locate slow method. This feature is suitable in the production environment. This document introduces how thread dumps are merged into the final report as a stack tree(s).\nThread analyst Read data and transform Read the data from the database and convert it to a data structure in gRPC.\nst=\u0026gt;start: Start e=\u0026gt;end: End op1=\u0026gt;operation: Load data using paging op2=\u0026gt;operation: Transform data using parallel st(right)-\u0026gt;op1(right)-\u0026gt;op2 op2(right)-\u0026gt;e Copy the code and paste it into this link to generate flow chart.\n Use the stream to read data by page (50 records per page). Convert the data into gRPC data structures in the form of parallel streams. Merge into a list of data.  Data analysis Use the group-by and collector modes in the Java parallel stream to group according to the first stack element in the database records, and use the collector to perform data aggregation. Generate a multi-root tree.\nst=\u0026gt;start: Start e=\u0026gt;end: End op1=\u0026gt;operation: Group by first stack element sup=\u0026gt;operation: Generate empty stack tree acc=\u0026gt;operation: Accumulator data to stack tree com=\u0026gt;operation: Combine stack trees fin=\u0026gt;operation: Calculate durations and build result st(right)-\u0026gt;op1-\u0026gt;sup(right)-\u0026gt;acc acc(right)-\u0026gt;com(right)-\u0026gt;fin-\u0026gt;e Copy the code and paste it into this link to generate a flow chart.\n Group by first stack element: Use the first level element in each stack to group, ensuring that the stacks have the same root node. Generate empty stack tree: Generate multiple top-level empty trees to prepare for the following steps. The reason for generating multiple top-level trees is that original data can be added in parallel without generating locks. Accumulator data to stack tree: Add every thread dump into the generated trees.  Iterate through each element in the thread dump to find if there is any child element with the same code signature and same stack depth in the parent element. If not, add this element. Keep the dump sequences and timestamps in each nodes from the source.   Combine stack trees: Combine all trees structures into one by using the same rules as the Accumulator.  Use LDR to traverse the tree node. Use the Stack data structure to avoid recursive calls. Each stack element represents the node that needs to be merged. The task of merging two nodes is to merge the list of children nodes. If they have the same code signature and same parents, save the dump sequences and timestamps in this node. Otherwise, the node needs to be added into the target node as a new child.   Calculate durations and build result: Calculate relevant statistics and generate response.  Use the same traversal node logic as in the Combine stack trees step. Convert to a GraphQL data structure, and put all nodes into a list for subsequent duration calculations. Calculate each node\u0026rsquo;s duration in parallel. For each node, sort the sequences. If there are two continuous sequences, the duration should add the duration of these two seq\u0026rsquo;s timestamp. Calculate each node execution in parallel. For each node, the duration of the current node should deduct the time consumed by all children.    Profile data debugging Please follow the exporter tool to package profile data. Unzip the profile data and use analyzer main function to run it.\n","title":"Thread dump merging mechanism","url":"/docs/main/latest/en/setup/backend/backend-profile-thread-merging/"},{"content":"Thread dump merging mechanism The performance profile is an enhancement feature in the APM system. We are using the thread dump to estimate the method execution time, rather than adding multiple local spans. In this way, the resource cost would be much less than using distributed tracing to locate slow method. This feature is suitable in the production environment. This document introduces how thread dumps are merged into the final report as a stack tree(s).\nThread analyst Read data and transform Read the data from the database and convert it to a data structure in gRPC.\nst=\u0026gt;start: Start e=\u0026gt;end: End op1=\u0026gt;operation: Load data using paging op2=\u0026gt;operation: Transform data using parallel st(right)-\u0026gt;op1(right)-\u0026gt;op2 op2(right)-\u0026gt;e Copy the code and paste it into this link to generate flow chart.\n Use the stream to read data by page (50 records per page). Convert the data into gRPC data structures in the form of parallel streams. Merge into a list of data.  Data analysis Use the group-by and collector modes in the Java parallel stream to group according to the first stack element in the database records, and use the collector to perform data aggregation. Generate a multi-root tree.\nst=\u0026gt;start: Start e=\u0026gt;end: End op1=\u0026gt;operation: Group by first stack element sup=\u0026gt;operation: Generate empty stack tree acc=\u0026gt;operation: Accumulator data to stack tree com=\u0026gt;operation: Combine stack trees fin=\u0026gt;operation: Calculate durations and build result st(right)-\u0026gt;op1-\u0026gt;sup(right)-\u0026gt;acc acc(right)-\u0026gt;com(right)-\u0026gt;fin-\u0026gt;e Copy the code and paste it into this link to generate a flow chart.\n Group by first stack element: Use the first level element in each stack to group, ensuring that the stacks have the same root node. Generate empty stack tree: Generate multiple top-level empty trees to prepare for the following steps. The reason for generating multiple top-level trees is that original data can be added in parallel without generating locks. Accumulator data to stack tree: Add every thread dump into the generated trees.  Iterate through each element in the thread dump to find if there is any child element with the same code signature and same stack depth in the parent element. If not, add this element. Keep the dump sequences and timestamps in each nodes from the source.   Combine stack trees: Combine all trees structures into one by using the same rules as the Accumulator.  Use LDR to traverse the tree node. Use the Stack data structure to avoid recursive calls. Each stack element represents the node that needs to be merged. The task of merging two nodes is to merge the list of children nodes. If they have the same code signature and same parents, save the dump sequences and timestamps in this node. Otherwise, the node needs to be added into the target node as a new child.   Calculate durations and build result: Calculate relevant statistics and generate response.  Use the same traversal node logic as in the Combine stack trees step. Convert to a GraphQL data structure, and put all nodes into a list for subsequent duration calculations. Calculate each node\u0026rsquo;s duration in parallel. For each node, sort the sequences. If there are two continuous sequences, the duration should add the duration of these two seq\u0026rsquo;s timestamp. Calculate each node execution in parallel. For each node, the duration of the current node should deduct the time consumed by all children.    Profile data debugging Please follow the exporter tool to package profile data. Unzip the profile data and use analyzer main function to run it.\n","title":"Thread dump merging mechanism","url":"/docs/main/next/en/setup/backend/backend-profile-thread-merging/"},{"content":"Thread dump merging mechanism The performance profile is an enhancement feature in the APM system. We are using the thread dump to estimate the method execution time, rather than adding multiple local spans. In this way, the resource cost would be much less than using distributed tracing to locate slow method. This feature is suitable in the production environment. This document introduces how thread dumps are merged into the final report as a stack tree(s).\nThread analyst Read data and transform Read the data from the database and convert it to a data structure in gRPC.\nst=\u0026gt;start: Start e=\u0026gt;end: End op1=\u0026gt;operation: Load data using paging op2=\u0026gt;operation: Transform data using parallel st(right)-\u0026gt;op1(right)-\u0026gt;op2 op2(right)-\u0026gt;e Copy the code and paste it into this link to generate flow chart.\n Use the stream to read data by page (50 records per page). Convert the data into gRPC data structures in the form of parallel streams. Merge into a list of data.  Data analysis Use the group-by and collector modes in the Java parallel stream to group according to the first stack element in the database records, and use the collector to perform data aggregation. Generate a multi-root tree.\nst=\u0026gt;start: Start e=\u0026gt;end: End op1=\u0026gt;operation: Group by first stack element sup=\u0026gt;operation: Generate empty stack tree acc=\u0026gt;operation: Accumulator data to stack tree com=\u0026gt;operation: Combine stack trees fin=\u0026gt;operation: Calculate durations and build result st(right)-\u0026gt;op1-\u0026gt;sup(right)-\u0026gt;acc acc(right)-\u0026gt;com(right)-\u0026gt;fin-\u0026gt;e Copy the code and paste it into this link to generate a flow chart.\n Group by first stack element: Use the first level element in each stack to group, ensuring that the stacks have the same root node. Generate empty stack tree: Generate multiple top-level empty trees to prepare for the following steps. The reason for generating multiple top-level trees is that original data can be added in parallel without generating locks. Accumulator data to stack tree: Add every thread dump into the generated trees.  Iterate through each element in the thread dump to find if there is any child element with the same code signature and same stack depth in the parent element. If not, add this element. Keep the dump sequences and timestamps in each nodes from the source.   Combine stack trees: Combine all trees structures into one by using the same rules as the Accumulator.  Use LDR to traverse the tree node. Use the Stack data structure to avoid recursive calls. Each stack element represents the node that needs to be merged. The task of merging two nodes is to merge the list of children nodes. If they have the same code signature and same parents, save the dump sequences and timestamps in this node. Otherwise, the node needs to be added into the target node as a new child.   Calculate durations and build result: Calculate relevant statistics and generate response.  Use the same traversal node logic as in the Combine stack trees step. Convert to a GraphQL data structure, and put all nodes into a list for subsequent duration calculations. Calculate each node\u0026rsquo;s duration in parallel. For each node, sort the sequences. If there are two continuous sequences, the duration should add the duration of these two seq\u0026rsquo;s timestamp. Calculate each node execution in parallel. For each node, the duration of the current node should deduct the time consumed by all children.    Profile data debugging Please follow the exporter tool to package profile data. Unzip the profile data and use analyzer main function to run it.\n","title":"Thread dump merging mechanism","url":"/docs/main/v9.0.0/en/guides/backend-profile/"},{"content":"Thread dump merging mechanism The performance profile is an enhancement feature in the APM system. We are using the thread dump to estimate the method execution time, rather than adding multiple local spans. In this way, the resource cost would be much less than using distributed tracing to locate slow method. This feature is suitable in the production environment. This document introduces how thread dumps are merged into the final report as a stack tree(s).\nThread analyst Read data and transform Read the data from the database and convert it to a data structure in gRPC.\nst=\u0026gt;start: Start e=\u0026gt;end: End op1=\u0026gt;operation: Load data using paging op2=\u0026gt;operation: Transform data using parallel st(right)-\u0026gt;op1(right)-\u0026gt;op2 op2(right)-\u0026gt;e Copy the code and paste it into this link to generate flow chart.\n Use the stream to read data by page (50 records per page). Convert the data into gRPC data structures in the form of parallel streams. Merge into a list of data.  Data analysis Use the group-by and collector modes in the Java parallel stream to group according to the first stack element in the database records, and use the collector to perform data aggregation. Generate a multi-root tree.\nst=\u0026gt;start: Start e=\u0026gt;end: End op1=\u0026gt;operation: Group by first stack element sup=\u0026gt;operation: Generate empty stack tree acc=\u0026gt;operation: Accumulator data to stack tree com=\u0026gt;operation: Combine stack trees fin=\u0026gt;operation: Calculate durations and build result st(right)-\u0026gt;op1-\u0026gt;sup(right)-\u0026gt;acc acc(right)-\u0026gt;com(right)-\u0026gt;fin-\u0026gt;e Copy the code and paste it into this link to generate a flow chart.\n Group by first stack element: Use the first level element in each stack to group, ensuring that the stacks have the same root node. Generate empty stack tree: Generate multiple top-level empty trees to prepare for the following steps. The reason for generating multiple top-level trees is that original data can be added in parallel without generating locks. Accumulator data to stack tree: Add every thread dump into the generated trees.  Iterate through each element in the thread dump to find if there is any child element with the same code signature and same stack depth in the parent element. If not, add this element. Keep the dump sequences and timestamps in each nodes from the source.   Combine stack trees: Combine all trees structures into one by using the same rules as the Accumulator.  Use LDR to traverse the tree node. Use the Stack data structure to avoid recursive calls. Each stack element represents the node that needs to be merged. The task of merging two nodes is to merge the list of children nodes. If they have the same code signature and same parents, save the dump sequences and timestamps in this node. Otherwise, the node needs to be added into the target node as a new child.   Calculate durations and build result: Calculate relevant statistics and generate response.  Use the same traversal node logic as in the Combine stack trees step. Convert to a GraphQL data structure, and put all nodes into a list for subsequent duration calculations. Calculate each node\u0026rsquo;s duration in parallel. For each node, sort the sequences. If there are two continuous sequences, the duration should add the duration of these two seq\u0026rsquo;s timestamp. Calculate each node execution in parallel. For each node, the duration of the current node should deduct the time consumed by all children.    Profile data debugging Please follow the exporter tool to package profile data. Unzip the profile data and use analyzer main function to run it.\n","title":"Thread dump merging mechanism","url":"/docs/main/v9.1.0/en/guides/backend-profile/"},{"content":"Thread dump merging mechanism The performance profile is an enhancement feature in the APM system. We are using the thread dump to estimate the method execution time, rather than adding multiple local spans. In this way, the resource cost would be much less than using distributed tracing to locate slow method. This feature is suitable in the production environment. This document introduces how thread dumps are merged into the final report as a stack tree(s).\nThread analyst Read data and transform Read the data from the database and convert it to a data structure in gRPC.\nst=\u0026gt;start: Start e=\u0026gt;end: End op1=\u0026gt;operation: Load data using paging op2=\u0026gt;operation: Transform data using parallel st(right)-\u0026gt;op1(right)-\u0026gt;op2 op2(right)-\u0026gt;e Copy the code and paste it into this link to generate flow chart.\n Use the stream to read data by page (50 records per page). Convert the data into gRPC data structures in the form of parallel streams. Merge into a list of data.  Data analysis Use the group-by and collector modes in the Java parallel stream to group according to the first stack element in the database records, and use the collector to perform data aggregation. Generate a multi-root tree.\nst=\u0026gt;start: Start e=\u0026gt;end: End op1=\u0026gt;operation: Group by first stack element sup=\u0026gt;operation: Generate empty stack tree acc=\u0026gt;operation: Accumulator data to stack tree com=\u0026gt;operation: Combine stack trees fin=\u0026gt;operation: Calculate durations and build result st(right)-\u0026gt;op1-\u0026gt;sup(right)-\u0026gt;acc acc(right)-\u0026gt;com(right)-\u0026gt;fin-\u0026gt;e Copy the code and paste it into this link to generate a flow chart.\n Group by first stack element: Use the first level element in each stack to group, ensuring that the stacks have the same root node. Generate empty stack tree: Generate multiple top-level empty trees to prepare for the following steps. The reason for generating multiple top-level trees is that original data can be added in parallel without generating locks. Accumulator data to stack tree: Add every thread dump into the generated trees.  Iterate through each element in the thread dump to find if there is any child element with the same code signature and same stack depth in the parent element. If not, add this element. Keep the dump sequences and timestamps in each nodes from the source.   Combine stack trees: Combine all trees structures into one by using the same rules as the Accumulator.  Use LDR to traverse the tree node. Use the Stack data structure to avoid recursive calls. Each stack element represents the node that needs to be merged. The task of merging two nodes is to merge the list of children nodes. If they have the same code signature and same parents, save the dump sequences and timestamps in this node. Otherwise, the node needs to be added into the target node as a new child.   Calculate durations and build result: Calculate relevant statistics and generate response.  Use the same traversal node logic as in the Combine stack trees step. Convert to a GraphQL data structure, and put all nodes into a list for subsequent duration calculations. Calculate each node\u0026rsquo;s duration in parallel. For each node, sort the sequences. If there are two continuous sequences, the duration should add the duration of these two seq\u0026rsquo;s timestamp. Calculate each node execution in parallel. For each node, the duration of the current node should deduct the time consumed by all children.    Profile data debugging Please follow the exporter tool to package profile data. Unzip the profile data and use analyzer main function to run it.\n","title":"Thread dump merging mechanism","url":"/docs/main/v9.2.0/en/guides/backend-profile/"},{"content":"Thread dump merging mechanism The performance profile is an enhancement feature in the APM system. We are using the thread dump to estimate the method execution time, rather than adding multiple local spans. In this way, the resource cost would be much less than using distributed tracing to locate slow method. This feature is suitable in the production environment. This document introduces how thread dumps are merged into the final report as a stack tree(s).\nThread analyst Read data and transform Read the data from the database and convert it to a data structure in gRPC.\nst=\u0026gt;start: Start e=\u0026gt;end: End op1=\u0026gt;operation: Load data using paging op2=\u0026gt;operation: Transform data using parallel st(right)-\u0026gt;op1(right)-\u0026gt;op2 op2(right)-\u0026gt;e Copy the code and paste it into this link to generate flow chart.\n Use the stream to read data by page (50 records per page). Convert the data into gRPC data structures in the form of parallel streams. Merge into a list of data.  Data analysis Use the group-by and collector modes in the Java parallel stream to group according to the first stack element in the database records, and use the collector to perform data aggregation. Generate a multi-root tree.\nst=\u0026gt;start: Start e=\u0026gt;end: End op1=\u0026gt;operation: Group by first stack element sup=\u0026gt;operation: Generate empty stack tree acc=\u0026gt;operation: Accumulator data to stack tree com=\u0026gt;operation: Combine stack trees fin=\u0026gt;operation: Calculate durations and build result st(right)-\u0026gt;op1-\u0026gt;sup(right)-\u0026gt;acc acc(right)-\u0026gt;com(right)-\u0026gt;fin-\u0026gt;e Copy the code and paste it into this link to generate a flow chart.\n Group by first stack element: Use the first level element in each stack to group, ensuring that the stacks have the same root node. Generate empty stack tree: Generate multiple top-level empty trees to prepare for the following steps. The reason for generating multiple top-level trees is that original data can be added in parallel without generating locks. Accumulator data to stack tree: Add every thread dump into the generated trees.  Iterate through each element in the thread dump to find if there is any child element with the same code signature and same stack depth in the parent element. If not, add this element. Keep the dump sequences and timestamps in each nodes from the source.   Combine stack trees: Combine all trees structures into one by using the same rules as the Accumulator.  Use LDR to traverse the tree node. Use the Stack data structure to avoid recursive calls. Each stack element represents the node that needs to be merged. The task of merging two nodes is to merge the list of children nodes. If they have the same code signature and same parents, save the dump sequences and timestamps in this node. Otherwise, the node needs to be added into the target node as a new child.   Calculate durations and build result: Calculate relevant statistics and generate response.  Use the same traversal node logic as in the Combine stack trees step. Convert to a GraphQL data structure, and put all nodes into a list for subsequent duration calculations. Calculate each node\u0026rsquo;s duration in parallel. For each node, sort the sequences. If there are two continuous sequences, the duration should add the duration of these two seq\u0026rsquo;s timestamp. Calculate each node execution in parallel. For each node, the duration of the current node should deduct the time consumed by all children.    Profile data debugging Please follow the exporter tool to package profile data. Unzip the profile data and use analyzer main function to run it.\n","title":"Thread dump merging mechanism","url":"/docs/main/v9.3.0/en/guides/backend-profile/"},{"content":"Thread dump merging mechanism The performance profile is an enhancement feature in the APM system. We are using the thread dump to estimate the method execution time, rather than adding multiple local spans. In this way, the resource cost would be much less than using distributed tracing to locate slow method. This feature is suitable in the production environment. This document introduces how thread dumps are merged into the final report as a stack tree(s).\nThread analyst Read data and transform Read the data from the database and convert it to a data structure in gRPC.\nst=\u0026gt;start: Start e=\u0026gt;end: End op1=\u0026gt;operation: Load data using paging op2=\u0026gt;operation: Transform data using parallel st(right)-\u0026gt;op1(right)-\u0026gt;op2 op2(right)-\u0026gt;e Copy the code and paste it into this link to generate flow chart.\n Use the stream to read data by page (50 records per page). Convert the data into gRPC data structures in the form of parallel streams. Merge into a list of data.  Data analysis Use the group-by and collector modes in the Java parallel stream to group according to the first stack element in the database records, and use the collector to perform data aggregation. Generate a multi-root tree.\nst=\u0026gt;start: Start e=\u0026gt;end: End op1=\u0026gt;operation: Group by first stack element sup=\u0026gt;operation: Generate empty stack tree acc=\u0026gt;operation: Accumulator data to stack tree com=\u0026gt;operation: Combine stack trees fin=\u0026gt;operation: Calculate durations and build result st(right)-\u0026gt;op1-\u0026gt;sup(right)-\u0026gt;acc acc(right)-\u0026gt;com(right)-\u0026gt;fin-\u0026gt;e Copy the code and paste it into this link to generate a flow chart.\n Group by first stack element: Use the first level element in each stack to group, ensuring that the stacks have the same root node. Generate empty stack tree: Generate multiple top-level empty trees to prepare for the following steps. The reason for generating multiple top-level trees is that original data can be added in parallel without generating locks. Accumulator data to stack tree: Add every thread dump into the generated trees.  Iterate through each element in the thread dump to find if there is any child element with the same code signature and same stack depth in the parent element. If not, add this element. Keep the dump sequences and timestamps in each nodes from the source.   Combine stack trees: Combine all trees structures into one by using the same rules as the Accumulator.  Use LDR to traverse the tree node. Use the Stack data structure to avoid recursive calls. Each stack element represents the node that needs to be merged. The task of merging two nodes is to merge the list of children nodes. If they have the same code signature and same parents, save the dump sequences and timestamps in this node. Otherwise, the node needs to be added into the target node as a new child.   Calculate durations and build result: Calculate relevant statistics and generate response.  Use the same traversal node logic as in the Combine stack trees step. Convert to a GraphQL data structure, and put all nodes into a list for subsequent duration calculations. Calculate each node\u0026rsquo;s duration in parallel. For each node, sort the sequences. If there are two continuous sequences, the duration should add the duration of these two seq\u0026rsquo;s timestamp. Calculate each node execution in parallel. For each node, the duration of the current node should deduct the time consumed by all children.    Profile data debugging Please follow the exporter tool to package profile data. Unzip the profile data and use analyzer main function to run it.\n","title":"Thread dump merging mechanism","url":"/docs/main/v9.4.0/en/guides/backend-profile/"},{"content":"Thread dump merging mechanism The performance profile is an enhancement feature in the APM system. We are using the thread dump to estimate the method execution time, rather than adding multiple local spans. In this way, the resource cost would be much less than using distributed tracing to locate slow method. This feature is suitable in the production environment. This document introduces how thread dumps are merged into the final report as a stack tree(s).\nThread analyst Read data and transform Read the data from the database and convert it to a data structure in gRPC.\nst=\u0026gt;start: Start e=\u0026gt;end: End op1=\u0026gt;operation: Load data using paging op2=\u0026gt;operation: Transform data using parallel st(right)-\u0026gt;op1(right)-\u0026gt;op2 op2(right)-\u0026gt;e Copy the code and paste it into this link to generate flow chart.\n Use the stream to read data by page (50 records per page). Convert the data into gRPC data structures in the form of parallel streams. Merge into a list of data.  Data analysis Use the group-by and collector modes in the Java parallel stream to group according to the first stack element in the database records, and use the collector to perform data aggregation. Generate a multi-root tree.\nst=\u0026gt;start: Start e=\u0026gt;end: End op1=\u0026gt;operation: Group by first stack element sup=\u0026gt;operation: Generate empty stack tree acc=\u0026gt;operation: Accumulator data to stack tree com=\u0026gt;operation: Combine stack trees fin=\u0026gt;operation: Calculate durations and build result st(right)-\u0026gt;op1-\u0026gt;sup(right)-\u0026gt;acc acc(right)-\u0026gt;com(right)-\u0026gt;fin-\u0026gt;e Copy the code and paste it into this link to generate a flow chart.\n Group by first stack element: Use the first level element in each stack to group, ensuring that the stacks have the same root node. Generate empty stack tree: Generate multiple top-level empty trees to prepare for the following steps. The reason for generating multiple top-level trees is that original data can be added in parallel without generating locks. Accumulator data to stack tree: Add every thread dump into the generated trees.  Iterate through each element in the thread dump to find if there is any child element with the same code signature and same stack depth in the parent element. If not, add this element. Keep the dump sequences and timestamps in each nodes from the source.   Combine stack trees: Combine all trees structures into one by using the same rules as the Accumulator.  Use LDR to traverse the tree node. Use the Stack data structure to avoid recursive calls. Each stack element represents the node that needs to be merged. The task of merging two nodes is to merge the list of children nodes. If they have the same code signature and same parents, save the dump sequences and timestamps in this node. Otherwise, the node needs to be added into the target node as a new child.   Calculate durations and build result: Calculate relevant statistics and generate response.  Use the same traversal node logic as in the Combine stack trees step. Convert to a GraphQL data structure, and put all nodes into a list for subsequent duration calculations. Calculate each node\u0026rsquo;s duration in parallel. For each node, sort the sequences. If there are two continuous sequences, the duration should add the duration of these two seq\u0026rsquo;s timestamp. Calculate each node execution in parallel. For each node, the duration of the current node should deduct the time consumed by all children.    Profile data debugging Please follow the exporter tool to package profile data. Unzip the profile data and use analyzer main function to run it.\n","title":"Thread dump merging mechanism","url":"/docs/main/v9.5.0/en/setup/backend/backend-profile-thread-merging/"},{"content":"Thread dump merging mechanism The performance profile is an enhancement feature in the APM system. We are using the thread dump to estimate the method execution time, rather than adding multiple local spans. In this way, the resource cost would be much less than using distributed tracing to locate slow method. This feature is suitable in the production environment. This document introduces how thread dumps are merged into the final report as a stack tree(s).\nThread analyst Read data and transform Read the data from the database and convert it to a data structure in gRPC.\nst=\u0026gt;start: Start e=\u0026gt;end: End op1=\u0026gt;operation: Load data using paging op2=\u0026gt;operation: Transform data using parallel st(right)-\u0026gt;op1(right)-\u0026gt;op2 op2(right)-\u0026gt;e Copy the code and paste it into this link to generate flow chart.\n Use the stream to read data by page (50 records per page). Convert the data into gRPC data structures in the form of parallel streams. Merge into a list of data.  Data analysis Use the group-by and collector modes in the Java parallel stream to group according to the first stack element in the database records, and use the collector to perform data aggregation. Generate a multi-root tree.\nst=\u0026gt;start: Start e=\u0026gt;end: End op1=\u0026gt;operation: Group by first stack element sup=\u0026gt;operation: Generate empty stack tree acc=\u0026gt;operation: Accumulator data to stack tree com=\u0026gt;operation: Combine stack trees fin=\u0026gt;operation: Calculate durations and build result st(right)-\u0026gt;op1-\u0026gt;sup(right)-\u0026gt;acc acc(right)-\u0026gt;com(right)-\u0026gt;fin-\u0026gt;e Copy the code and paste it into this link to generate a flow chart.\n Group by first stack element: Use the first level element in each stack to group, ensuring that the stacks have the same root node. Generate empty stack tree: Generate multiple top-level empty trees to prepare for the following steps. The reason for generating multiple top-level trees is that original data can be added in parallel without generating locks. Accumulator data to stack tree: Add every thread dump into the generated trees.  Iterate through each element in the thread dump to find if there is any child element with the same code signature and same stack depth in the parent element. If not, add this element. Keep the dump sequences and timestamps in each nodes from the source.   Combine stack trees: Combine all trees structures into one by using the same rules as the Accumulator.  Use LDR to traverse the tree node. Use the Stack data structure to avoid recursive calls. Each stack element represents the node that needs to be merged. The task of merging two nodes is to merge the list of children nodes. If they have the same code signature and same parents, save the dump sequences and timestamps in this node. Otherwise, the node needs to be added into the target node as a new child.   Calculate durations and build result: Calculate relevant statistics and generate response.  Use the same traversal node logic as in the Combine stack trees step. Convert to a GraphQL data structure, and put all nodes into a list for subsequent duration calculations. Calculate each node\u0026rsquo;s duration in parallel. For each node, sort the sequences. If there are two continuous sequences, the duration should add the duration of these two seq\u0026rsquo;s timestamp. Calculate each node execution in parallel. For each node, the duration of the current node should deduct the time consumed by all children.    Profile data debugging Please follow the exporter tool to package profile data. Unzip the profile data and use analyzer main function to run it.\n","title":"Thread dump merging mechanism","url":"/docs/main/v9.6.0/en/setup/backend/backend-profile-thread-merging/"},{"content":"Thread dump merging mechanism The performance profile is an enhancement feature in the APM system. We are using the thread dump to estimate the method execution time, rather than adding multiple local spans. In this way, the resource cost would be much less than using distributed tracing to locate slow method. This feature is suitable in the production environment. This document introduces how thread dumps are merged into the final report as a stack tree(s).\nThread analyst Read data and transform Read the data from the database and convert it to a data structure in gRPC.\nst=\u0026gt;start: Start e=\u0026gt;end: End op1=\u0026gt;operation: Load data using paging op2=\u0026gt;operation: Transform data using parallel st(right)-\u0026gt;op1(right)-\u0026gt;op2 op2(right)-\u0026gt;e Copy the code and paste it into this link to generate flow chart.\n Use the stream to read data by page (50 records per page). Convert the data into gRPC data structures in the form of parallel streams. Merge into a list of data.  Data analysis Use the group-by and collector modes in the Java parallel stream to group according to the first stack element in the database records, and use the collector to perform data aggregation. Generate a multi-root tree.\nst=\u0026gt;start: Start e=\u0026gt;end: End op1=\u0026gt;operation: Group by first stack element sup=\u0026gt;operation: Generate empty stack tree acc=\u0026gt;operation: Accumulator data to stack tree com=\u0026gt;operation: Combine stack trees fin=\u0026gt;operation: Calculate durations and build result st(right)-\u0026gt;op1-\u0026gt;sup(right)-\u0026gt;acc acc(right)-\u0026gt;com(right)-\u0026gt;fin-\u0026gt;e Copy the code and paste it into this link to generate a flow chart.\n Group by first stack element: Use the first level element in each stack to group, ensuring that the stacks have the same root node. Generate empty stack tree: Generate multiple top-level empty trees to prepare for the following steps. The reason for generating multiple top-level trees is that original data can be added in parallel without generating locks. Accumulator data to stack tree: Add every thread dump into the generated trees.  Iterate through each element in the thread dump to find if there is any child element with the same code signature and same stack depth in the parent element. If not, add this element. Keep the dump sequences and timestamps in each nodes from the source.   Combine stack trees: Combine all trees structures into one by using the same rules as the Accumulator.  Use LDR to traverse the tree node. Use the Stack data structure to avoid recursive calls. Each stack element represents the node that needs to be merged. The task of merging two nodes is to merge the list of children nodes. If they have the same code signature and same parents, save the dump sequences and timestamps in this node. Otherwise, the node needs to be added into the target node as a new child.   Calculate durations and build result: Calculate relevant statistics and generate response.  Use the same traversal node logic as in the Combine stack trees step. Convert to a GraphQL data structure, and put all nodes into a list for subsequent duration calculations. Calculate each node\u0026rsquo;s duration in parallel. For each node, sort the sequences. If there are two continuous sequences, the duration should add the duration of these two seq\u0026rsquo;s timestamp. Calculate each node execution in parallel. For each node, the duration of the current node should deduct the time consumed by all children.    Profile data debugging Please follow the exporter tool to package profile data. Unzip the profile data and use analyzer main function to run it.\n","title":"Thread dump merging mechanism","url":"/docs/main/v9.7.0/en/setup/backend/backend-profile-thread-merging/"},{"content":"TimeSeries Database(TSDB) TSDB is a time-series storage engine designed to store and query large volumes of time-series data. One of the key features of TSDB is its ability to automatically manage data storage over time, optimize performance and ensure that the system can scale to handle large workloads. TSDB empowers Measure and Stream relevant data.\nShard In TSDB, the data in a group is partitioned into shards based on a configurable sharding scheme. Each shard is assigned to a specific set of storage nodes, and those nodes store and process the data within that shard. This allows BanyanDB to scale horizontally by adding more storage nodes to the cluster as needed.\nshard\n Buffer: It is typically implemented as an in-memory queue managed by a shard. When new time-series data is ingested into the system, it is added to the end of the queue, and when the buffer reaches a specific size, the data is flushed to disk in batches. SST: When a bucket of buffer becomes full or reaches a certain size threshold, it is flushed to disk as a new Sorted String Table (SST) file. This process is known as compaction. Segments and Blocks: Time-series data is stored in data segments/blocks within each shard. Blocks contain a fixed number of data points and are organized into time windows. Each data segment includes an index that efficiently retrieves data within the block. Block Cache: It manages the in-memory cache of data blocks, improving query performance by caching frequently accessed data blocks in memory.  Write Path The write path of TSDB begins when time-series data is ingested into the system. TSDB will consult the schema repository to check if the group exists, and if it does, then it will hash the SeriesID to determine which shard it belongs to.\nEach shard in TSDB is responsible for storing a subset of the time-series data, and it uses a write-ahead log to record incoming writes in a durable and fault-tolerant manner. The shard also holds an in-memory index allowing fast lookups of time-series data.\nWhen a shard receives a write request, the data is written to the buffer as a series of buckets. Each bucket is a fixed-size chunk of time-series data typically configured to be several minutes or hours long. As new data is written to the buffer, it is appended to the current bucket until it is full. Once the bucket is full, it is closed, and a new bucket is created to continue buffering writes.\nOnce a bucket is closed, it is stored as a single SST in a shard. The file is indexed and added to the index for the corresponding time range and resolution.\nRead Path The read path in TSDB retrieves time-series data from disk or memory and returns it to the query engine. The read path comprises several components: the buffer, cache, and SST file. The following is a high-level overview of how these components work together to retrieve time-series data in TSDB.\nThe first step in the read path is to perform an index lookup to determine which blocks contain the desired time range. The index contains metadata about each data block, including its start and end time and its location on disk.\nIf the requested data is present in the buffer (i.e., it has been recently written but not yet persisted to disk), the buffer is checked to see if the data can be returned directly from memory. The read path determines which bucket(s) contain the requested time range. If the data is not present in the buffer, the read path proceeds to the next step.\nIf the requested data is present in the cache (i.e., it has been recently read from disk and is still in memory), it is checked to see if the data can be returned directly from memory. The read path proceeds to the next step if the data is not in the cache.\nThe final step in the read path is to look up the appropriate SST file on disk. Files are the on-disk representation of data blocks and are organized by shard and time range. The read path determines which SST files contain the requested time range and reads the appropriate data blocks from the disk.\n","title":"TimeSeries Database(TSDB)","url":"/docs/skywalking-banyandb/latest/concept/tsdb/"},{"content":"TimeSeries Database(TSDB) TSDB is a time-series storage engine designed to store and query large volumes of time-series data. One of the key features of TSDB is its ability to automatically manage data storage over time, optimize performance and ensure that the system can scale to handle large workloads. TSDB empowers Measure and Stream relevant data.\nShard In TSDB, the data in a group is partitioned into shards based on a configurable sharding scheme. Each shard is assigned to a specific set of storage nodes, and those nodes store and process the data within that shard. This allows BanyanDB to scale horizontally by adding more storage nodes to the cluster as needed.\nWithin each shard, data is stored in different segments based on time ranges. The series indexes are generated based on entities, and the indexes generated based on indexing rules of the Measure types are also stored under the shard.\nSegment Each segment is composed of multiple parts. Whenever SkyWalking sends a batch of data, BanyanDB writes this batch of data into a new part. For data of the Stream type, the inverted indexes generated based on the indexing rules are also stored in the segment. Since BanyanDB adopts a snapshot approach for data read and write operations, the segment also needs to maintain additional snapshot information to record the validity of the parts.\nPart Within a part, data is split into multiple files in a columnar manner. The timestamps are stored in the timestamps.bin file, tags are organized in persistent tag families as various files with the .tf suffix, and fields are stored separately in the fields.bin file.\nIn addition, each part maintains several metadata files. Among them, metadata.json is the metadata file for the part, storing descriptive information, such as start and end times, part size, etc.\nThe meta.bin is a skipping index file that serves as the entry file for the entire part, helping to index the primary.bin file.\nThe primary.bin file contains the index of each block. Through it, the actual data files or the tagFamily metadata files ending with .tfm can be indexed, which in turn helps to locate the data in blocks.\nNotably, for data of the Stream type, since there are no field columns, the fields.bin file does not exist, while the rest of the structure is entirely consistent with the Measure type.\nBlock Each block holds data with the same series ID. The max size of the measure block is controlled by data volume and the number of rows. Meanwhile, the max size of the stream block is controlled by data volume. The diagram below shows the detailed fields within each block. The block is the minimal unit of TSDB, which contains several rows of data. Due to the column-based design, each block is spread over several files.\nWrite Path The write path of TSDB begins when time-series data is ingested into the system. TSDB will consult the schema repository to check if the group exists, and if it does, then it will hash the SeriesID to determine which shard it belongs to.\nEach shard in TSDB is responsible for storing a subset of the time-series data. The shard also holds an in-memory index allowing fast lookups of time-series data.\nWhen a shard receives a write request, the data is written to the buffer as a memory part. Meanwhile, the series index and inverted index will also be updated. The worker in the background periodically flushes data, writing the memory part to the disk. After the flush operation is completed, it triggers a merge operation to combine the parts and remove invalid data.\nWhenever a new memory part is generated, or when a flush or merge operation is triggered, they initiate an update of the snapshot and delete outdated snapshots. The parts in a persistent snapshot could be accessible to the reader.\nRead Path The read path in TSDB retrieves time-series data from disk or memory, and returns it to the query engine. The read path comprises several components: the buffer and parts. The following is a high-level overview of how these components work together to retrieve time-series data in TSDB.\nThe first step in the read path is to perform an index lookup to determine which parts contain the desired time range. The index contains metadata about each data part, including its start and end time.\nIf the requested data is present in the buffer (i.e., it has been recently written but not yet persisted to disk), the buffer is checked to see if the data can be returned directly from memory. The read path determines which memory part(s) contain the requested time range. If the data is not present in the buffer, the read path proceeds to the next step.\nThe next step in the read path is to look up the appropriate parts on disk. Files are the on-disk representation of blocks and are organized by shard and time range. The read path determines which parts contain the requested time range and reads the appropriate blocks from the disk. Due to the column-based storage design, it may be necessary to read multiple data files.\n","title":"TimeSeries Database(TSDB)","url":"/docs/skywalking-banyandb/next/concept/tsdb/"},{"content":"TimeSeries Database(TSDB) TSDB is a time-series storage engine designed to store and query large volumes of time-series data. One of the key features of TSDB is its ability to automatically manage data storage over time, optimize performance and ensure that the system can scale to handle large workloads. TSDB empowers Measure and Stream relevant data.\nShard In TSDB, the data in a group is partitioned into shards based on a configurable sharding scheme. Each shard is assigned to a specific set of storage nodes, and those nodes store and process the data within that shard. This allows BanyanDB to scale horizontally by adding more storage nodes to the cluster as needed.\nshard\n Buffer: It is typically implemented as an in-memory queue managed by a shard. When new time-series data is ingested into the system, it is added to the end of the queue, and when the buffer reaches a specific size, the data is flushed to disk in batches. SST: When a bucket of buffer becomes full or reaches a certain size threshold, it is flushed to disk as a new Sorted String Table (SST) file. This process is known as compaction. Segments and Blocks: Time-series data is stored in data segments/blocks within each shard. Blocks contain a fixed number of data points and are organized into time windows. Each data segment includes an index that efficiently retrieves data within the block. Block Cache: It manages the in-memory cache of data blocks, improving query performance by caching frequently accessed data blocks in memory.  Write Path The write path of TSDB begins when time-series data is ingested into the system. TSDB will consult the schema repository to check if the group exists, and if it does, then it will hash the SeriesID to determine which shard it belongs to.\nEach shard in TSDB is responsible for storing a subset of the time-series data, and it uses a write-ahead log to record incoming writes in a durable and fault-tolerant manner. The shard also holds an in-memory index allowing fast lookups of time-series data.\nWhen a shard receives a write request, the data is written to the buffer as a series of buckets. Each bucket is a fixed-size chunk of time-series data typically configured to be several minutes or hours long. As new data is written to the buffer, it is appended to the current bucket until it is full. Once the bucket is full, it is closed, and a new bucket is created to continue buffering writes.\nOnce a bucket is closed, it is stored as a single SST in a shard. The file is indexed and added to the index for the corresponding time range and resolution.\nRead Path The read path in TSDB retrieves time-series data from disk or memory and returns it to the query engine. The read path comprises several components: the buffer, cache, and SST file. The following is a high-level overview of how these components work together to retrieve time-series data in TSDB.\nThe first step in the read path is to perform an index lookup to determine which blocks contain the desired time range. The index contains metadata about each data block, including its start and end time and its location on disk.\nIf the requested data is present in the buffer (i.e., it has been recently written but not yet persisted to disk), the buffer is checked to see if the data can be returned directly from memory. The read path determines which bucket(s) contain the requested time range. If the data is not present in the buffer, the read path proceeds to the next step.\nIf the requested data is present in the cache (i.e., it has been recently read from disk and is still in memory), it is checked to see if the data can be returned directly from memory. The read path proceeds to the next step if the data is not in the cache.\nThe final step in the read path is to look up the appropriate SST file on disk. Files are the on-disk representation of data blocks and are organized by shard and time range. The read path determines which SST files contain the requested time range and reads the appropriate data blocks from the disk.\n","title":"TimeSeries Database(TSDB)","url":"/docs/skywalking-banyandb/v0.5.0/concept/tsdb/"},{"content":"Welcome to Apache SkyWalking Cloud on Kubernetes Document Repository Here you can lean all you need to know about Apache SkyWalking Cloud on Kubernetes(SWCK). This repository provides how to onboard and customize the agent injector, operator and adapter.\n Design. Some materials regarding the design decision under the hood. Setup. Several instruments to onboard the agent injector, operator and adapter. Examples. A number of examples of how to use SWCK.  We\u0026rsquo;re always looking for help to improve our documentation and codes, so please don’t hesitate to file an issue if you see any problems. Or better yet, directly contribute by submitting a pull request to help us get better!\n","title":"to Apache SkyWalking Cloud on Kubernetes Document Repository","url":"/docs/skywalking-swck/latest/readme/"},{"content":"Welcome to Apache SkyWalking Cloud on Kubernetes Document Repository Here you can lean all you need to know about Apache SkyWalking Cloud on Kubernetes(SWCK). This repository provides how to onboard and customize the agent injector, operator and adapter.\n Design. Some materials regarding the design decision under the hood. Setup. Several instruments to onboard the agent injector, operator and adapter. Examples. A number of examples of how to use SWCK.  We\u0026rsquo;re always looking for help to improve our documentation and codes, so please don’t hesitate to file an issue if you see any problems. Or better yet, directly contribute by submitting a pull request to help us get better!\n","title":"to Apache SkyWalking Cloud on Kubernetes Document Repository","url":"/docs/skywalking-swck/next/readme/"},{"content":"Welcome to Apache SkyWalking Cloud on Kubernetes Document Repository Here you can lean all you need to know about Apache SkyWalking Cloud on Kubernetes(SWCK). This repository provides how to onboard and customize the agent injector, operator and adapter.\n Design. Some materials regarding the design decision under the hood. Setup. Several instruments to onboard the agent injector, operator and adapter. Examples. A number of examples of how to use SWCK.  We\u0026rsquo;re always looking for help to improve our documentation and codes, so please don’t hesitate to file an issue if you see any problems. Or better yet, directly contribute by submitting a pull request to help us get better!\n","title":"to Apache SkyWalking Cloud on Kubernetes Document Repository","url":"/docs/skywalking-swck/v0.9.0/readme/"},{"content":"Token Authentication Supported version 7.0.0+\nWhy do we need token authentication after TLS? TLS is about transport security, ensuring a trusted network. On the other hand, token authentication is about monitoring whether application data can be trusted.\nToken In the current version, a token is considered a simple string.\nSet Token  Set token in agent.config file  # Authentication active is based on backend setting, see application.yml for more details. agent.authentication = ${SW_AGENT_AUTHENTICATION:xxxx} Set token in application.yml file  ······receiver-sharing-server:default:authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}······Authentication failure The Skywalking OAP verifies every request from the agent and only allows requests whose token matches the one configured in application.yml to pass through.\nIf the token does not match, you will see the following log in the agent:\norg.apache.skywalking.apm.dependencies.io.grpc.StatusRuntimeException: PERMISSION_DENIED FAQ Can I use token authentication instead of TLS? No, you shouldn\u0026rsquo;t. Of course, it\u0026rsquo;s technically possible, but token and TLS are used for untrusted network environments. In these circumstances, TLS has a higher priority. Tokens can be trusted only under TLS protection, and they can be easily stolen if sent through a non-TLS network.\nDo you support other authentication mechanisms, such as ak/sk? Not for now. But we welcome contributions to this feature.\n","title":"Token Authentication","url":"/docs/main/latest/en/setup/backend/backend-token-auth/"},{"content":"Token Authentication Supported version 7.0.0+\nWhy do we need token authentication after TLS? TLS is about transport security, ensuring a trusted network. On the other hand, token authentication is about monitoring whether application data can be trusted.\nToken In the current version, a token is considered a simple string.\nSet Token  Set token in agent.config file  # Authentication active is based on backend setting, see application.yml for more details. agent.authentication = ${SW_AGENT_AUTHENTICATION:xxxx} Set token in application.yml file  ······receiver-sharing-server:default:authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}······Authentication failure The Skywalking OAP verifies every request from the agent and only allows requests whose token matches the one configured in application.yml to pass through.\nIf the token does not match, you will see the following log in the agent:\norg.apache.skywalking.apm.dependencies.io.grpc.StatusRuntimeException: PERMISSION_DENIED FAQ Can I use token authentication instead of TLS? No, you shouldn\u0026rsquo;t. Of course, it\u0026rsquo;s technically possible, but token and TLS are used for untrusted network environments. In these circumstances, TLS has a higher priority. Tokens can be trusted only under TLS protection, and they can be easily stolen if sent through a non-TLS network.\nDo you support other authentication mechanisms, such as ak/sk? Not for now. But we welcome contributions to this feature.\n","title":"Token Authentication","url":"/docs/main/next/en/setup/backend/backend-token-auth/"},{"content":"Token Authentication Supported version 7.0.0+\nWhy do we need token authentication after TLS? TLS is about transport security, which makes sure that a network can be trusted. On the other hand, token authentication is about monitoring whether application data can be trusted.\nToken In the current version, token is considered a simple string.\nSet Token  Set token in agent.config file  # Authentication active is based on backend setting, see application.yml for more details. agent.authentication = ${SW_AGENT_AUTHENTICATION:xxxx} Set token in application.yml file  ······receiver-sharing-server:default:authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}······Authentication failure The Skywalking OAP verifies every request from the agent, and only allows requests whose token matches the one configured in application.yml to pass through.\nIf the token does not match, you will see the following log in the agent:\norg.apache.skywalking.apm.dependencies.io.grpc.StatusRuntimeException: PERMISSION_DENIED FAQ Can I use token authentication instead of TLS? No, you shouldn\u0026rsquo;t. Of course it\u0026rsquo;s technically possible, but token and TLS are used for untrusted network environments. In these circumstances, TLS has a higher priority. Tokens can be trusted only under TLS protection, and they can be easily stolen if sent through a non-TLS network.\nDo you support other authentication mechanisms, such as ak/sk? Not for now. But we welcome contributions on this feature.\n","title":"Token Authentication","url":"/docs/main/v9.0.0/en/setup/backend/backend-token-auth/"},{"content":"Token Authentication Supported version 7.0.0+\nWhy do we need token authentication after TLS? TLS is about transport security, ensuring a trusted network. On the other hand, token authentication is about monitoring whether application data can be trusted.\nToken In the current version, a token is considered a simple string.\nSet Token  Set token in agent.config file  # Authentication active is based on backend setting, see application.yml for more details. agent.authentication = ${SW_AGENT_AUTHENTICATION:xxxx} Set token in application.yml file  ······receiver-sharing-server:default:authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}······Authentication failure The Skywalking OAP verifies every request from the agent and only allows requests whose token matches the one configured in application.yml to pass through.\nIf the token does not match, you will see the following log in the agent:\norg.apache.skywalking.apm.dependencies.io.grpc.StatusRuntimeException: PERMISSION_DENIED FAQ Can I use token authentication instead of TLS? No, you shouldn\u0026rsquo;t. Of course, it\u0026rsquo;s technically possible, but token and TLS are used for untrusted network environments. In these circumstances, TLS has a higher priority. Tokens can be trusted only under TLS protection, and they can be easily stolen if sent through a non-TLS network.\nDo you support other authentication mechanisms, such as ak/sk? Not for now. But we welcome contributions to this feature.\n","title":"Token Authentication","url":"/docs/main/v9.1.0/en/setup/backend/backend-token-auth/"},{"content":"Token Authentication Supported version 7.0.0+\nWhy do we need token authentication after TLS? TLS is about transport security, ensuring a trusted network. On the other hand, token authentication is about monitoring whether application data can be trusted.\nToken In the current version, a token is considered a simple string.\nSet Token  Set token in agent.config file  # Authentication active is based on backend setting, see application.yml for more details. agent.authentication = ${SW_AGENT_AUTHENTICATION:xxxx} Set token in application.yml file  ······receiver-sharing-server:default:authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}······Authentication failure The Skywalking OAP verifies every request from the agent and only allows requests whose token matches the one configured in application.yml to pass through.\nIf the token does not match, you will see the following log in the agent:\norg.apache.skywalking.apm.dependencies.io.grpc.StatusRuntimeException: PERMISSION_DENIED FAQ Can I use token authentication instead of TLS? No, you shouldn\u0026rsquo;t. Of course, it\u0026rsquo;s technically possible, but token and TLS are used for untrusted network environments. In these circumstances, TLS has a higher priority. Tokens can be trusted only under TLS protection, and they can be easily stolen if sent through a non-TLS network.\nDo you support other authentication mechanisms, such as ak/sk? Not for now. But we welcome contributions to this feature.\n","title":"Token Authentication","url":"/docs/main/v9.2.0/en/setup/backend/backend-token-auth/"},{"content":"Token Authentication Supported version 7.0.0+\nWhy do we need token authentication after TLS? TLS is about transport security, ensuring a trusted network. On the other hand, token authentication is about monitoring whether application data can be trusted.\nToken In the current version, a token is considered a simple string.\nSet Token  Set token in agent.config file  # Authentication active is based on backend setting, see application.yml for more details. agent.authentication = ${SW_AGENT_AUTHENTICATION:xxxx} Set token in application.yml file  ······receiver-sharing-server:default:authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}······Authentication failure The Skywalking OAP verifies every request from the agent and only allows requests whose token matches the one configured in application.yml to pass through.\nIf the token does not match, you will see the following log in the agent:\norg.apache.skywalking.apm.dependencies.io.grpc.StatusRuntimeException: PERMISSION_DENIED FAQ Can I use token authentication instead of TLS? No, you shouldn\u0026rsquo;t. Of course, it\u0026rsquo;s technically possible, but token and TLS are used for untrusted network environments. In these circumstances, TLS has a higher priority. Tokens can be trusted only under TLS protection, and they can be easily stolen if sent through a non-TLS network.\nDo you support other authentication mechanisms, such as ak/sk? Not for now. But we welcome contributions to this feature.\n","title":"Token Authentication","url":"/docs/main/v9.3.0/en/setup/backend/backend-token-auth/"},{"content":"Token Authentication Supported version 7.0.0+\nWhy do we need token authentication after TLS? TLS is about transport security, ensuring a trusted network. On the other hand, token authentication is about monitoring whether application data can be trusted.\nToken In the current version, a token is considered a simple string.\nSet Token  Set token in agent.config file  # Authentication active is based on backend setting, see application.yml for more details. agent.authentication = ${SW_AGENT_AUTHENTICATION:xxxx} Set token in application.yml file  ······receiver-sharing-server:default:authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}······Authentication failure The Skywalking OAP verifies every request from the agent and only allows requests whose token matches the one configured in application.yml to pass through.\nIf the token does not match, you will see the following log in the agent:\norg.apache.skywalking.apm.dependencies.io.grpc.StatusRuntimeException: PERMISSION_DENIED FAQ Can I use token authentication instead of TLS? No, you shouldn\u0026rsquo;t. Of course, it\u0026rsquo;s technically possible, but token and TLS are used for untrusted network environments. In these circumstances, TLS has a higher priority. Tokens can be trusted only under TLS protection, and they can be easily stolen if sent through a non-TLS network.\nDo you support other authentication mechanisms, such as ak/sk? Not for now. But we welcome contributions to this feature.\n","title":"Token Authentication","url":"/docs/main/v9.4.0/en/setup/backend/backend-token-auth/"},{"content":"Token Authentication Supported version 7.0.0+\nWhy do we need token authentication after TLS? TLS is about transport security, ensuring a trusted network. On the other hand, token authentication is about monitoring whether application data can be trusted.\nToken In the current version, a token is considered a simple string.\nSet Token  Set token in agent.config file  # Authentication active is based on backend setting, see application.yml for more details. agent.authentication = ${SW_AGENT_AUTHENTICATION:xxxx} Set token in application.yml file  ······receiver-sharing-server:default:authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}······Authentication failure The Skywalking OAP verifies every request from the agent and only allows requests whose token matches the one configured in application.yml to pass through.\nIf the token does not match, you will see the following log in the agent:\norg.apache.skywalking.apm.dependencies.io.grpc.StatusRuntimeException: PERMISSION_DENIED FAQ Can I use token authentication instead of TLS? No, you shouldn\u0026rsquo;t. Of course, it\u0026rsquo;s technically possible, but token and TLS are used for untrusted network environments. In these circumstances, TLS has a higher priority. Tokens can be trusted only under TLS protection, and they can be easily stolen if sent through a non-TLS network.\nDo you support other authentication mechanisms, such as ak/sk? Not for now. But we welcome contributions to this feature.\n","title":"Token Authentication","url":"/docs/main/v9.5.0/en/setup/backend/backend-token-auth/"},{"content":"Token Authentication Supported version 7.0.0+\nWhy do we need token authentication after TLS? TLS is about transport security, ensuring a trusted network. On the other hand, token authentication is about monitoring whether application data can be trusted.\nToken In the current version, a token is considered a simple string.\nSet Token  Set token in agent.config file  # Authentication active is based on backend setting, see application.yml for more details. agent.authentication = ${SW_AGENT_AUTHENTICATION:xxxx} Set token in application.yml file  ······receiver-sharing-server:default:authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}······Authentication failure The Skywalking OAP verifies every request from the agent and only allows requests whose token matches the one configured in application.yml to pass through.\nIf the token does not match, you will see the following log in the agent:\norg.apache.skywalking.apm.dependencies.io.grpc.StatusRuntimeException: PERMISSION_DENIED FAQ Can I use token authentication instead of TLS? No, you shouldn\u0026rsquo;t. Of course, it\u0026rsquo;s technically possible, but token and TLS are used for untrusted network environments. In these circumstances, TLS has a higher priority. Tokens can be trusted only under TLS protection, and they can be easily stolen if sent through a non-TLS network.\nDo you support other authentication mechanisms, such as ak/sk? Not for now. But we welcome contributions to this feature.\n","title":"Token Authentication","url":"/docs/main/v9.6.0/en/setup/backend/backend-token-auth/"},{"content":"Token Authentication Supported version 7.0.0+\nWhy do we need token authentication after TLS? TLS is about transport security, ensuring a trusted network. On the other hand, token authentication is about monitoring whether application data can be trusted.\nToken In the current version, a token is considered a simple string.\nSet Token  Set token in agent.config file  # Authentication active is based on backend setting, see application.yml for more details. agent.authentication = ${SW_AGENT_AUTHENTICATION:xxxx} Set token in application.yml file  ······receiver-sharing-server:default:authentication:${SW_AUTHENTICATION:\u0026#34;\u0026#34;}······Authentication failure The Skywalking OAP verifies every request from the agent and only allows requests whose token matches the one configured in application.yml to pass through.\nIf the token does not match, you will see the following log in the agent:\norg.apache.skywalking.apm.dependencies.io.grpc.StatusRuntimeException: PERMISSION_DENIED FAQ Can I use token authentication instead of TLS? No, you shouldn\u0026rsquo;t. Of course, it\u0026rsquo;s technically possible, but token and TLS are used for untrusted network environments. In these circumstances, TLS has a higher priority. Tokens can be trusted only under TLS protection, and they can be easily stolen if sent through a non-TLS network.\nDo you support other authentication mechanisms, such as ak/sk? Not for now. But we welcome contributions to this feature.\n","title":"Token Authentication","url":"/docs/main/v9.7.0/en/setup/backend/backend-token-auth/"},{"content":"Token Authentication Token In current version, Token is considered as a simple string.\nSet Token Set token in agent.config file\n# Authentication active is based on backend setting, see application.yml for more details. agent.authentication = xxxx Meanwhile, open the backend token authentication.\nAuthentication fails The Collector verifies every request from agent, allowed only the token match.\nIf the token is not right, you will see the following log in agent\norg.apache.skywalking.apm.dependencies.io.grpc.StatusRuntimeException: PERMISSION_DENIED FAQ Can I use token authentication instead of TLS? No, you shouldn\u0026rsquo;t. In tech way, you can of course, but token and TLS are used for untrusted network env. In that circumstance, TLS has higher priority than this. Token can be trusted only under TLS protection.Token can be stolen easily if you send it through a non-TLS network.\nDo you support other authentication mechanisms? Such as ak/sk? For now, no. But we appreciate someone contributes this feature.\n","title":"Token Authentication","url":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/token-auth/"},{"content":"Token Authentication Token In current version, Token is considered as a simple string.\nSet Token Set token in agent.config file\n# Authentication active is based on backend setting, see application.yml for more details. agent.authentication = xxxx Meanwhile, open the backend token authentication.\nAuthentication fails The Collector verifies every request from agent, allowed only the token match.\nIf the token is not right, you will see the following log in agent\norg.apache.skywalking.apm.dependencies.io.grpc.StatusRuntimeException: PERMISSION_DENIED FAQ Can I use token authentication instead of TLS? No, you shouldn\u0026rsquo;t. In tech way, you can of course, but token and TLS are used for untrusted network env. In that circumstance, TLS has higher priority than this. Token can be trusted only under TLS protection.Token can be stolen easily if you send it through a non-TLS network.\nDo you support other authentication mechanisms? Such as ak/sk? For now, no. But we appreciate someone contributes this feature.\n","title":"Token Authentication","url":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/token-auth/"},{"content":"Token Authentication Token In current version, Token is considered as a simple string.\nSet Token Set token in agent.config file\n# Authentication active is based on backend setting, see application.yml for more details. agent.authentication = xxxx Meanwhile, open the backend token authentication.\nAuthentication fails The Collector verifies every request from agent, allowed only the token match.\nIf the token is not right, you will see the following log in agent\norg.apache.skywalking.apm.dependencies.io.grpc.StatusRuntimeException: PERMISSION_DENIED FAQ Can I use token authentication instead of TLS? No, you shouldn\u0026rsquo;t. In tech way, you can of course, but token and TLS are used for untrusted network env. In that circumstance, TLS has higher priority than this. Token can be trusted only under TLS protection.Token can be stolen easily if you send it through a non-TLS network.\nDo you support other authentication mechanisms? Such as ak/sk? For now, no. But we appreciate someone contributes this feature.\n","title":"Token Authentication","url":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/token-auth/"},{"content":"Token Authentication Token In current version, Token is considered as a simple string.\nSet Token Set token in agent.config file\n# Authentication active is based on backend setting, see application.yml for more details. agent.authentication = xxxx Meanwhile, open the backend token authentication.\nAuthentication fails The Collector verifies every request from agent, allowed only the token match.\nIf the token is not right, you will see the following log in agent\norg.apache.skywalking.apm.dependencies.io.grpc.StatusRuntimeException: PERMISSION_DENIED FAQ Can I use token authentication instead of TLS? No, you shouldn\u0026rsquo;t. In tech way, you can of course, but token and TLS are used for untrusted network env. In that circumstance, TLS has higher priority than this. Token can be trusted only under TLS protection.Token can be stolen easily if you send it through a non-TLS network.\nDo you support other authentication mechanisms? Such as ak/sk? For now, no. But we appreciate someone contributes this feature.\n","title":"Token Authentication","url":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/token-auth/"},{"content":"Token Authentication Token In current version, Token is considered as a simple string.\nSet Token Set token in agent.config file\n# Authentication active is based on backend setting, see application.yml for more details. agent.authentication = xxxx Meanwhile, open the backend token authentication.\nAuthentication fails The Collector verifies every request from agent, allowed only the token match.\nIf the token is not right, you will see the following log in agent\norg.apache.skywalking.apm.dependencies.io.grpc.StatusRuntimeException: PERMISSION_DENIED FAQ Can I use token authentication instead of TLS? No, you shouldn\u0026rsquo;t. In tech way, you can of course, but token and TLS are used for untrusted network env. In that circumstance, TLS has higher priority than this. Token can be trusted only under TLS protection.Token can be stolen easily if you send it through a non-TLS network.\nDo you support other authentication mechanisms? Such as ak/sk? For now, no. But we appreciate someone contributes this feature.\n","title":"Token Authentication","url":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/token-auth/"},{"content":"Trace Correlation Context Trace correlation context APIs provide a way to put custom data in tracing context. All the data in the context will be propagated with the in-wire process automatically.\n Use TraceContext.putCorrelation() API to put custom data in tracing context.  Optional\u0026lt;String\u0026gt; previous = TraceContext.putCorrelation(\u0026#34;customKey\u0026#34;, \u0026#34;customValue\u0026#34;); CorrelationContext will remove the item when the value is null or empty.\n Use TraceContext.getCorrelation() API to get custom data.  Optional\u0026lt;String\u0026gt; value = TraceContext.getCorrelation(\u0026#34;customKey\u0026#34;); CorrelationContext configuration descriptions could be found in the agent configuration documentation, with correlation. as the prefix. Sample codes only\n","title":"Trace Correlation Context","url":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/application-toolkit-trace-correlation-context/"},{"content":"Trace Correlation Context Trace correlation context APIs provide a way to put custom data in tracing context. All the data in the context will be propagated with the in-wire process automatically.\n Use TraceContext.putCorrelation() API to put custom data in tracing context.  Optional\u0026lt;String\u0026gt; previous = TraceContext.putCorrelation(\u0026#34;customKey\u0026#34;, \u0026#34;customValue\u0026#34;); CorrelationContext will remove the item when the value is null or empty.\n Use TraceContext.getCorrelation() API to get custom data.  Optional\u0026lt;String\u0026gt; value = TraceContext.getCorrelation(\u0026#34;customKey\u0026#34;); CorrelationContext configuration descriptions could be found in the agent configuration documentation, with correlation. as the prefix. Sample codes only\n","title":"Trace Correlation Context","url":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-trace-correlation-context/"},{"content":"Trace Correlation Context Trace correlation context APIs provide a way to put custom data in tracing context. All the data in the context will be propagated with the in-wire process automatically.\n Use TraceContext.putCorrelation() API to put custom data in tracing context.  Optional\u0026lt;String\u0026gt; previous = TraceContext.putCorrelation(\u0026#34;customKey\u0026#34;, \u0026#34;customValue\u0026#34;); CorrelationContext will remove the item when the value is null or empty.\n Use TraceContext.getCorrelation() API to get custom data.  Optional\u0026lt;String\u0026gt; value = TraceContext.getCorrelation(\u0026#34;customKey\u0026#34;); CorrelationContext configuration descriptions could be found in the agent configuration documentation, with correlation. as the prefix. Sample codes only\n","title":"Trace Correlation Context","url":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/application-toolkit-trace-correlation-context/"},{"content":"Trace Correlation Context Trace correlation context APIs provide a way to put custom data in tracing context. All the data in the context will be propagated with the in-wire process automatically.\n Use TraceContext.putCorrelation() API to put custom data in tracing context.  Optional\u0026lt;String\u0026gt; previous = TraceContext.putCorrelation(\u0026#34;customKey\u0026#34;, \u0026#34;customValue\u0026#34;); CorrelationContext will remove the item when the value is null or empty.\n Use TraceContext.getCorrelation() API to get custom data.  Optional\u0026lt;String\u0026gt; value = TraceContext.getCorrelation(\u0026#34;customKey\u0026#34;); CorrelationContext configuration descriptions could be found in the agent configuration documentation, with correlation. as the prefix. Sample codes only\n","title":"Trace Correlation Context","url":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/application-toolkit-trace-correlation-context/"},{"content":"Trace Correlation Context Trace correlation context APIs provide a way to put custom data in tracing context. All the data in the context will be propagated with the in-wire process automatically.\n Use TraceContext.putCorrelation() API to put custom data in tracing context.  Optional\u0026lt;String\u0026gt; previous = TraceContext.putCorrelation(\u0026#34;customKey\u0026#34;, \u0026#34;customValue\u0026#34;); CorrelationContext will remove the item when the value is null or empty.\n Use TraceContext.getCorrelation() API to get custom data.  Optional\u0026lt;String\u0026gt; value = TraceContext.getCorrelation(\u0026#34;customKey\u0026#34;); CorrelationContext configuration descriptions could be found in the agent configuration documentation, with correlation. as the prefix. Sample codes only\n","title":"Trace Correlation Context","url":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/application-toolkit-trace-correlation-context/"},{"content":"Trace Cross Thread These APIs provide ways to continuous tracing in the cross thread scenario with minimal code changes. All following are sample codes only to demonstrate how to adopt cross thread cases easier.\n Case 1.  @TraceCrossThread public static class MyCallable\u0026lt;String\u0026gt; implements Callable\u0026lt;String\u0026gt; { @Override public String call() throws Exception { return null; } } ... ExecutorService executorService = Executors.newFixedThreadPool(1); executorService.submit(new MyCallable());  Case 2.  ExecutorService executorService = Executors.newFixedThreadPool(1); executorService.submit(CallableWrapper.of(new Callable\u0026lt;String\u0026gt;() { @Override public String call() throws Exception { return null; } })); or\nExecutorService executorService = Executors.newFixedThreadPool(1); executorService.execute(RunnableWrapper.of(new Runnable() { @Override public void run() { //your code  } }));  Case 3.  @TraceCrossThread public class MySupplier\u0026lt;String\u0026gt; implements Supplier\u0026lt;String\u0026gt; { @Override public String get() { return null; } } ... CompletableFuture.supplyAsync(new MySupplier\u0026lt;String\u0026gt;()); or\nCompletableFuture.supplyAsync(SupplierWrapper.of(()-\u0026gt;{ return \u0026#34;SupplierWrapper\u0026#34;; })).thenAccept(System.out::println);  Case 4.  CompletableFuture.supplyAsync(SupplierWrapper.of(() -\u0026gt; { return \u0026#34;SupplierWrapper\u0026#34;; })).thenAcceptAsync(ConsumerWrapper.of(c -\u0026gt; { // your code visit(url)  System.out.println(\u0026#34;ConsumerWrapper\u0026#34;); })); or\nCompletableFuture.supplyAsync(SupplierWrapper.of(() -\u0026gt; { return \u0026#34;SupplierWrapper\u0026#34;; })).thenApplyAsync(FunctionWrapper.of(f -\u0026gt; { // your code visit(url)  return \u0026#34;FunctionWrapper\u0026#34;; })); ","title":"Trace Cross Thread","url":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/application-toolkit-trace-cross-thread/"},{"content":"Trace Cross Thread These APIs provide ways to continuous tracing in the cross thread scenario with minimal code changes. All following are sample codes only to demonstrate how to adopt cross thread cases easier.\n Case 1.  @TraceCrossThread public static class MyCallable\u0026lt;String\u0026gt; implements Callable\u0026lt;String\u0026gt; { @Override public String call() throws Exception { return null; } } ... ExecutorService executorService = Executors.newFixedThreadPool(1); executorService.submit(new MyCallable());  Case 2.  ExecutorService executorService = Executors.newFixedThreadPool(1); executorService.submit(CallableWrapper.of(new Callable\u0026lt;String\u0026gt;() { @Override public String call() throws Exception { return null; } })); or\nExecutorService executorService = Executors.newFixedThreadPool(1); executorService.execute(RunnableWrapper.of(new Runnable() { @Override public void run() { //your code  } }));  Case 3.  @TraceCrossThread public class MySupplier\u0026lt;String\u0026gt; implements Supplier\u0026lt;String\u0026gt; { @Override public String get() { return null; } } ... CompletableFuture.supplyAsync(new MySupplier\u0026lt;String\u0026gt;()); or\nCompletableFuture.supplyAsync(SupplierWrapper.of(()-\u0026gt;{ return \u0026#34;SupplierWrapper\u0026#34;; })).thenAccept(System.out::println);  Case 4.  CompletableFuture.supplyAsync(SupplierWrapper.of(() -\u0026gt; { return \u0026#34;SupplierWrapper\u0026#34;; })).thenAcceptAsync(ConsumerWrapper.of(c -\u0026gt; { // your code visit(url)  System.out.println(\u0026#34;ConsumerWrapper\u0026#34;); })); or\nCompletableFuture.supplyAsync(SupplierWrapper.of(() -\u0026gt; { return \u0026#34;SupplierWrapper\u0026#34;; })).thenApplyAsync(FunctionWrapper.of(f -\u0026gt; { // your code visit(url)  return \u0026#34;FunctionWrapper\u0026#34;; })); ","title":"Trace Cross Thread","url":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-trace-cross-thread/"},{"content":"Trace Cross Thread These APIs provide ways to continuous tracing in the cross thread scenario with minimal code changes. All following are sample codes only to demonstrate how to adopt cross thread cases easier.\n Case 1.  @TraceCrossThread public static class MyCallable\u0026lt;String\u0026gt; implements Callable\u0026lt;String\u0026gt; { @Override public String call() throws Exception { return null; } } ... ExecutorService executorService = Executors.newFixedThreadPool(1); executorService.submit(new MyCallable());  Case 2.  ExecutorService executorService = Executors.newFixedThreadPool(1); executorService.submit(CallableWrapper.of(new Callable\u0026lt;String\u0026gt;() { @Override public String call() throws Exception { return null; } })); or\nExecutorService executorService = Executors.newFixedThreadPool(1); executorService.execute(RunnableWrapper.of(new Runnable() { @Override public void run() { //your code  } }));  Case 3.  @TraceCrossThread public class MySupplier\u0026lt;String\u0026gt; implements Supplier\u0026lt;String\u0026gt; { @Override public String get() { return null; } } ... CompletableFuture.supplyAsync(new MySupplier\u0026lt;String\u0026gt;()); or\nCompletableFuture.supplyAsync(SupplierWrapper.of(()-\u0026gt;{ return \u0026#34;SupplierWrapper\u0026#34;; })).thenAccept(System.out::println);  Case 4.  CompletableFuture.supplyAsync(SupplierWrapper.of(() -\u0026gt; { return \u0026#34;SupplierWrapper\u0026#34;; })).thenAcceptAsync(ConsumerWrapper.of(c -\u0026gt; { // your code visit(url)  System.out.println(\u0026#34;ConsumerWrapper\u0026#34;); })); or\nCompletableFuture.supplyAsync(SupplierWrapper.of(() -\u0026gt; { return \u0026#34;SupplierWrapper\u0026#34;; })).thenApplyAsync(FunctionWrapper.of(f -\u0026gt; { // your code visit(url)  return \u0026#34;FunctionWrapper\u0026#34;; })); ","title":"Trace Cross Thread","url":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/application-toolkit-trace-cross-thread/"},{"content":"Trace Cross Thread These APIs provide ways to continuous tracing in the cross thread scenario with minimal code changes. All following are sample codes only to demonstrate how to adopt cross thread cases easier.\n Case 1.  @TraceCrossThread public static class MyCallable\u0026lt;String\u0026gt; implements Callable\u0026lt;String\u0026gt; { @Override public String call() throws Exception { return null; } } ... ExecutorService executorService = Executors.newFixedThreadPool(1); executorService.submit(new MyCallable());  Case 2.  ExecutorService executorService = Executors.newFixedThreadPool(1); executorService.submit(CallableWrapper.of(new Callable\u0026lt;String\u0026gt;() { @Override public String call() throws Exception { return null; } })); or\nExecutorService executorService = Executors.newFixedThreadPool(1); executorService.execute(RunnableWrapper.of(new Runnable() { @Override public void run() { //your code  } }));  Case 3.  @TraceCrossThread public class MySupplier\u0026lt;String\u0026gt; implements Supplier\u0026lt;String\u0026gt; { @Override public String get() { return null; } } ... CompletableFuture.supplyAsync(new MySupplier\u0026lt;String\u0026gt;()); or\nCompletableFuture.supplyAsync(SupplierWrapper.of(()-\u0026gt;{ return \u0026#34;SupplierWrapper\u0026#34;; })).thenAccept(System.out::println);  Case 4.  CompletableFuture.supplyAsync(SupplierWrapper.of(() -\u0026gt; { return \u0026#34;SupplierWrapper\u0026#34;; })).thenAcceptAsync(ConsumerWrapper.of(c -\u0026gt; { // your code visit(url)  System.out.println(\u0026#34;ConsumerWrapper\u0026#34;); })); or\nCompletableFuture.supplyAsync(SupplierWrapper.of(() -\u0026gt; { return \u0026#34;SupplierWrapper\u0026#34;; })).thenApplyAsync(FunctionWrapper.of(f -\u0026gt; { // your code visit(url)  return \u0026#34;FunctionWrapper\u0026#34;; })); ","title":"Trace Cross Thread","url":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/application-toolkit-trace-cross-thread/"},{"content":"Trace Cross Thread These APIs provide ways to continuous tracing in the cross thread scenario with minimal code changes. All following are sample codes only to demonstrate how to adopt cross thread cases easier.\n Case 1.  @TraceCrossThread public static class MyCallable\u0026lt;String\u0026gt; implements Callable\u0026lt;String\u0026gt; { @Override public String call() throws Exception { return null; } } ... ExecutorService executorService = Executors.newFixedThreadPool(1); executorService.submit(new MyCallable());  Case 2.  ExecutorService executorService = Executors.newFixedThreadPool(1); executorService.submit(CallableWrapper.of(new Callable\u0026lt;String\u0026gt;() { @Override public String call() throws Exception { return null; } })); or\nExecutorService executorService = Executors.newFixedThreadPool(1); executorService.execute(RunnableWrapper.of(new Runnable() { @Override public void run() { //your code  } }));  Case 3.  @TraceCrossThread public class MySupplier\u0026lt;String\u0026gt; implements Supplier\u0026lt;String\u0026gt; { @Override public String get() { return null; } } ... CompletableFuture.supplyAsync(new MySupplier\u0026lt;String\u0026gt;()); or\nCompletableFuture.supplyAsync(SupplierWrapper.of(()-\u0026gt;{ return \u0026#34;SupplierWrapper\u0026#34;; })).thenAccept(System.out::println);  Case 4.  CompletableFuture.supplyAsync(SupplierWrapper.of(() -\u0026gt; { return \u0026#34;SupplierWrapper\u0026#34;; })).thenAcceptAsync(ConsumerWrapper.of(c -\u0026gt; { // your code visit(url)  System.out.println(\u0026#34;ConsumerWrapper\u0026#34;); })); or\nCompletableFuture.supplyAsync(SupplierWrapper.of(() -\u0026gt; { return \u0026#34;SupplierWrapper\u0026#34;; })).thenApplyAsync(FunctionWrapper.of(f -\u0026gt; { // your code visit(url)  return \u0026#34;FunctionWrapper\u0026#34;; })); ","title":"Trace Cross Thread","url":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/application-toolkit-trace-cross-thread/"},{"content":"Trace Data Protocol  Version, v3.1  Trace Data Protocol describes the data format between SkyWalking agent/sniffer and backend.\nTrace data protocol is defined and provided in gRPC format, and implemented in HTTP 1.1.\nFor trace format, note that:\n The segment is a unique concept in SkyWalking. It should include all spans for each request in a single OS process, which is usually a single language-based thread. There are three types of spans.    EntrySpan EntrySpan represents a service provider, which is also the endpoint on the server end. As an APM system, SkyWalking targets the application servers. Therefore, almost all the services and MQ-consumers are EntrySpans.\n  LocalSpan LocalSpan represents a typical Java method which is not related to remote services. It is neither a MQ producer/consumer nor a provider/consumer of a service (e.g. HTTP service).\n  ExitSpan ExitSpan represents a client of service or MQ-producer. It is known as the LeafSpan in the early stages of SkyWalking. For example, accessing DB by JDBC, and reading Redis/Memcached are classified as ExitSpans.\n   Cross-thread/process span parent information is called \u0026ldquo;reference\u0026rdquo;. Reference carries the trace ID, segment ID, span ID, service name, service instance name, endpoint name, and target address used on the client end (note: this is not required in cross-thread operations) of this request in the parent. See Cross Process Propagation Headers Protocol v3 for more details.\n  Span#skipAnalysis may be TRUE, if this span doesn\u0026rsquo;t require backend analysis.\n  Trace Report Protocol // The segment is a collection of spans. It includes all collected spans in a simple one request context, such as a HTTP request process. // // We recommend the agent/SDK report all tracked data of one request once for all, such as, // typically, such as in Java, one segment represent all tracked operations(spans) of one request context in the same thread. // At the same time, in some language there is not a clear concept like golang, it could represent all tracked operations of one request context. message SegmentObject { // A string id represents the whole trace.  string traceId = 1; // A unique id represents this segment. Other segments could use this id to reference as a child segment.  string traceSegmentId = 2; // Span collections included in this segment.  repeated SpanObject spans = 3; // **Service**. Represents a set/group of workloads which provide the same behaviours for incoming requests.  //  // The logic name represents the service. This would show as a separate node in the topology.  // The metrics analyzed from the spans, would be aggregated for this entity as the service level.  string service = 4; // **Service Instance**. Each individual workload in the Service group is known as an instance. Like `pods` in Kubernetes, it  // doesn\u0026#39;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process.  //  // The logic name represents the service instance. This would show as a separate node in the instance relationship.  // The metrics analyzed from the spans, would be aggregated for this entity as the service instance level.  string serviceInstance = 5; // Whether the segment includes all tracked spans.  // In the production environment tracked, some tasks could include too many spans for one request context, such as a batch update for a cache, or an async job.  // The agent/SDK could optimize or ignore some tracked spans for better performance.  // In this case, the value should be flagged as TRUE.  bool isSizeLimited = 6;}// Segment reference represents the link between two existing segment. message SegmentReference { // Represent the reference type. It could be across thread or across process.  // Across process means there is a downstream RPC call for this.  // Typically, refType == CrossProcess means SpanObject#spanType = entry.  RefType refType = 1; // A string id represents the whole trace.  string traceId = 2; // Another segment id as the parent.  string parentTraceSegmentId = 3; // The span id in the parent trace segment.  int32 parentSpanId = 4; // The service logic name of the parent segment.  // If refType == CrossThread, this name is as same as the trace segment.  string parentService = 5; // The service logic name instance of the parent segment.  // If refType == CrossThread, this name is as same as the trace segment.  string parentServiceInstance = 6; // The endpoint name of the parent segment.  // **Endpoint**. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature.  // In a trace segment, the endpoint name is the name of first entry span.  string parentEndpoint = 7; // The network address, including ip/hostname and port, which is used in the client side.  // Such as Client --\u0026gt; use 127.0.11.8:913 -\u0026gt; Server  // then, in the reference of entry span reported by Server, the value of this field is 127.0.11.8:913.  // This plays the important role in the SkyWalking STAM(Streaming Topology Analysis Method)  // For more details, read https://wu-sheng.github.io/STAM/  string networkAddressUsedAtPeer = 8;}// Span represents a execution unit in the system, with duration and many other attributes. // Span could be a method, a RPC, MQ message produce or consume. // In the practice, the span should be added when it is really necessary, to avoid payload overhead. // We recommend to creating spans in across process(client/server of RPC/MQ) and across thread cases only. message SpanObject { // The number id of the span. Should be unique in the whole segment.  // Starting at 0.  int32 spanId = 1; // The number id of the parent span in the whole segment.  // -1 represents no parent span.  // Also, be known as the root/first span of the segment.  int32 parentSpanId = 2; // Start timestamp in milliseconds of this span,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 startTime = 3; // End timestamp in milliseconds of this span,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 endTime = 4; // \u0026lt;Optional\u0026gt;  // In the across thread and across process, these references targeting the parent segments.  // The references usually have only one element, but in batch consumer case, such as in MQ or async batch process, it could be multiple.  repeated SegmentReference refs = 5; // A logic name represents this span.  //  // We don\u0026#39;t recommend to include the parameter, such as HTTP request parameters, as a part of the operation, especially this is the name of the entry span.  // All statistic for the endpoints are aggregated base on this name. Those parameters should be added in the tags if necessary.  // If in some cases, it have to be a part of the operation name,  // users should use the Group Parameterized Endpoints capability at the backend to get the meaningful metrics.  // Read https://github.com/apache/skywalking/blob/master/docs/en/setup/backend/endpoint-grouping-rules.md  string operationName = 6; // Remote address of the peer in RPC/MQ case.  // This is required when spanType = Exit, as it is a part of the SkyWalking STAM(Streaming Topology Analysis Method).  // For more details, read https://wu-sheng.github.io/STAM/  string peer = 7; // Span type represents the role in the RPC context.  SpanType spanType = 8; // Span layer represent the component tech stack, related to the network tech.  SpanLayer spanLayer = 9; // Component id is a predefined number id in the SkyWalking.  // It represents the framework, tech stack used by this tracked span, such as Spring.  // All IDs are defined in the https://github.com/apache/skywalking/blob/master/oap-server/server-bootstrap/src/main/resources/component-libraries.yml  // Send a pull request if you want to add languages, components or mapping definitions,  // all public components could be accepted.  // Follow this doc for more details, https://github.com/apache/skywalking/blob/master/docs/en/guides/Component-library-settings.md  int32 componentId = 10; // The status of the span. False means the tracked execution ends in the unexpected status.  // This affects the successful rate statistic in the backend.  // Exception or error code happened in the tracked process doesn\u0026#39;t mean isError == true, the implementations of agent plugin and tracing SDK make the final decision.  bool isError = 11; // String key, String value pair.  // Tags provides more information, includes parameters.  //  // In the OAP backend analysis, some special tag or tag combination could provide other advanced features.  // https://github.com/apache/skywalking/blob/master/docs/en/guides/Java-Plugin-Development-Guide.md#special-span-tags  repeated KeyStringValuePair tags = 12; // String key, String value pair with an accurate timestamp.  // Logging some events happening in the context of the span duration.  repeated Log logs = 13; // Force the backend don\u0026#39;t do analysis, if the value is TRUE.  // The backend has its own configurations to follow or override this.  //  // Use this mostly because the agent/SDK could know more context of the service role.  bool skipAnalysis = 14;}message Log { // The timestamp in milliseconds of this event.,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 time = 1; // String key, String value pair.  repeated KeyStringValuePair data = 2;}// Map to the type of span enum SpanType { // Server side of RPC. Consumer side of MQ.  Entry = 0; // Client side of RPC. Producer side of MQ.  Exit = 1; // A common local code execution.  Local = 2;}// A ID could be represented by multiple string sections. message ID { repeated string id = 1;}// Type of the reference enum RefType { // Map to the reference targeting the segment in another OS process.  CrossProcess = 0; // Map to the reference targeting the segment in the same process of the current one, just across thread.  // This is only used when the coding language has the thread concept.  CrossThread = 1;}// Map to the layer of span enum SpanLayer { // Unknown layer. Could be anything.  Unknown = 0; // A database layer, used in tracing the database client component.  Database = 1; // A RPC layer, used in both client and server sides of RPC component.  RPCFramework = 2; // HTTP is a more specific RPCFramework.  Http = 3; // A MQ layer, used in both producer and consumer sides of the MQ component.  MQ = 4; // A cache layer, used in tracing the cache client component.  Cache = 5;}// The segment collections for trace report in batch and sync mode. message SegmentCollection { repeated SegmentObject segments = 1;}Report Span Attached Events Besides in-process agents, there are other out-of-process agent, such as ebpf agent, could report additional information as attached events for the relative spans.\nSpanAttachedEventReportService#collect for attached event reporting.\n//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // ebpf agent(SkyWalking Rover) collects extra information from the OS(Linux Only) level to attach on the traced span. // Since v3.1 //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// service SpanAttachedEventReportService { // Collect SpanAttachedEvent to the OAP server in the streaming mode.  rpc collect (stream SpanAttachedEvent) returns (Commands) { }}// SpanAttachedEvent represents an attached event for a traced RPC. // // When an RPC is being traced by the in-process language agent, a span would be reported by the client-side agent. // And the rover would be aware of this RPC due to the existing tracing header. // Then, the rover agent collects extra information from the OS level to provide assistance information to diagnose network performance. message SpanAttachedEvent { // The nanosecond timestamp of the event\u0026#39;s start time.  // Notice, most unit of timestamp in SkyWalking is milliseconds, but NANO-SECOND is required here.  // Because the attached event happens in the OS syscall level, most of them are executed rapidly.  Instant startTime = 1; // The official event name.  // For example, the event name is a method signature from syscall stack.  string event = 2; // [Optional] The nanosecond timestamp of the event\u0026#39;s end time.  Instant endTime = 3; // The tags for this event includes some extra OS level information,  // such as  // 1. net_device used for this exit span.  // 2. network L7 protocol  repeated KeyStringValuePair tags = 4; // The summary of statistics during this event.  // Each statistic provides a name(metric name) to represent the name, and an int64/long as the value.  repeated KeyIntValuePair summary = 5; // Refer to a trace context decoded from `sw8` header through network, such as HTTP header, MQ metadata  // https://skywalking.apache.org/docs/main/next/en/protocols/skywalking-cross-process-propagation-headers-protocol-v3/#standard-header-item  SpanReference traceContext = 6; message SpanReference { SpanReferenceType type = 1; // [Optional] A string id represents the whole trace.  string traceId = 2; // A unique id represents this segment. Other segments could use this id to reference as a child segment.  // [Optional] when this span reference  string traceSegmentId = 3; // If type == SKYWALKING  // The number id of the span. Should be unique in the whole segment.  // Starting at 0  //  // If type == ZIPKIN  // The type of span ID is string.  string spanId = 4; } enum SpanReferenceType { SKYWALKING = 0; ZIPKIN = 1; }}Via HTTP Endpoint Detailed information about data format can be found in Instance Management. There are two ways to report segment data: one segment per request or segment array in bulk mode.\nPOST http://localhost:12800/v3/segment Send a single segment object in JSON format.\nInput:\n{ \u0026#34;traceId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34; } OutPut:\nPOST http://localhost:12800/v3/segments Send a segment object list in JSON format.\nInput:\n[{ \u0026#34;traceId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34; }, { \u0026#34;traceId\u0026#34;: \u0026#34;f956699e-5106-4ea3-95e5-da748c55bac1\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577250, \u0026#34;endTime\u0026#34;: 1588664577250, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577250, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577250, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;f956699e-5106-4ea3-95e5-da748c55bac1\u0026#34; }] OutPut:\n","title":"Trace Data Protocol","url":"/docs/main/latest/en/api/trace-data-protocol-v3/"},{"content":"Trace Data Protocol  Version, v3.1  Trace Data Protocol describes the data format between SkyWalking agent/sniffer and backend.\nTrace data protocol is defined and provided in gRPC format, and implemented in HTTP 1.1.\nFor trace format, note that:\n The segment is a unique concept in SkyWalking. It should include all spans for each request in a single OS process, which is usually a single language-based thread. There are three types of spans.    EntrySpan EntrySpan represents a service provider, which is also the endpoint on the server end. As an APM system, SkyWalking targets the application servers. Therefore, almost all the services and MQ-consumers are EntrySpans.\n  LocalSpan LocalSpan represents a typical Java method which is not related to remote services. It is neither a MQ producer/consumer nor a provider/consumer of a service (e.g. HTTP service).\n  ExitSpan ExitSpan represents a client of service or MQ-producer. It is known as the LeafSpan in the early stages of SkyWalking. For example, accessing DB by JDBC, and reading Redis/Memcached are classified as ExitSpans.\n   Cross-thread/process span parent information is called \u0026ldquo;reference\u0026rdquo;. Reference carries the trace ID, segment ID, span ID, service name, service instance name, endpoint name, and target address used on the client end (note: this is not required in cross-thread operations) of this request in the parent. See Cross Process Propagation Headers Protocol v3 for more details.\n  Span#skipAnalysis may be TRUE, if this span doesn\u0026rsquo;t require backend analysis.\n  Trace Report Protocol // The segment is a collection of spans. It includes all collected spans in a simple one request context, such as a HTTP request process. // // We recommend the agent/SDK report all tracked data of one request once for all, such as, // typically, such as in Java, one segment represent all tracked operations(spans) of one request context in the same thread. // At the same time, in some language there is not a clear concept like golang, it could represent all tracked operations of one request context. message SegmentObject { // A string id represents the whole trace.  string traceId = 1; // A unique id represents this segment. Other segments could use this id to reference as a child segment.  string traceSegmentId = 2; // Span collections included in this segment.  repeated SpanObject spans = 3; // **Service**. Represents a set/group of workloads which provide the same behaviours for incoming requests.  //  // The logic name represents the service. This would show as a separate node in the topology.  // The metrics analyzed from the spans, would be aggregated for this entity as the service level.  string service = 4; // **Service Instance**. Each individual workload in the Service group is known as an instance. Like `pods` in Kubernetes, it  // doesn\u0026#39;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process.  //  // The logic name represents the service instance. This would show as a separate node in the instance relationship.  // The metrics analyzed from the spans, would be aggregated for this entity as the service instance level.  string serviceInstance = 5; // Whether the segment includes all tracked spans.  // In the production environment tracked, some tasks could include too many spans for one request context, such as a batch update for a cache, or an async job.  // The agent/SDK could optimize or ignore some tracked spans for better performance.  // In this case, the value should be flagged as TRUE.  bool isSizeLimited = 6;}// Segment reference represents the link between two existing segment. message SegmentReference { // Represent the reference type. It could be across thread or across process.  // Across process means there is a downstream RPC call for this.  // Typically, refType == CrossProcess means SpanObject#spanType = entry.  RefType refType = 1; // A string id represents the whole trace.  string traceId = 2; // Another segment id as the parent.  string parentTraceSegmentId = 3; // The span id in the parent trace segment.  int32 parentSpanId = 4; // The service logic name of the parent segment.  // If refType == CrossThread, this name is as same as the trace segment.  string parentService = 5; // The service logic name instance of the parent segment.  // If refType == CrossThread, this name is as same as the trace segment.  string parentServiceInstance = 6; // The endpoint name of the parent segment.  // **Endpoint**. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature.  // In a trace segment, the endpoint name is the name of first entry span.  string parentEndpoint = 7; // The network address, including ip/hostname and port, which is used in the client side.  // Such as Client --\u0026gt; use 127.0.11.8:913 -\u0026gt; Server  // then, in the reference of entry span reported by Server, the value of this field is 127.0.11.8:913.  // This plays the important role in the SkyWalking STAM(Streaming Topology Analysis Method)  // For more details, read https://wu-sheng.github.io/STAM/  string networkAddressUsedAtPeer = 8;}// Span represents a execution unit in the system, with duration and many other attributes. // Span could be a method, a RPC, MQ message produce or consume. // In the practice, the span should be added when it is really necessary, to avoid payload overhead. // We recommend to creating spans in across process(client/server of RPC/MQ) and across thread cases only. message SpanObject { // The number id of the span. Should be unique in the whole segment.  // Starting at 0.  int32 spanId = 1; // The number id of the parent span in the whole segment.  // -1 represents no parent span.  // Also, be known as the root/first span of the segment.  int32 parentSpanId = 2; // Start timestamp in milliseconds of this span,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 startTime = 3; // End timestamp in milliseconds of this span,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 endTime = 4; // \u0026lt;Optional\u0026gt;  // In the across thread and across process, these references targeting the parent segments.  // The references usually have only one element, but in batch consumer case, such as in MQ or async batch process, it could be multiple.  repeated SegmentReference refs = 5; // A logic name represents this span.  //  // We don\u0026#39;t recommend to include the parameter, such as HTTP request parameters, as a part of the operation, especially this is the name of the entry span.  // All statistic for the endpoints are aggregated base on this name. Those parameters should be added in the tags if necessary.  // If in some cases, it have to be a part of the operation name,  // users should use the Group Parameterized Endpoints capability at the backend to get the meaningful metrics.  // Read https://github.com/apache/skywalking/blob/master/docs/en/setup/backend/endpoint-grouping-rules.md  string operationName = 6; // Remote address of the peer in RPC/MQ case.  // This is required when spanType = Exit, as it is a part of the SkyWalking STAM(Streaming Topology Analysis Method).  // For more details, read https://wu-sheng.github.io/STAM/  string peer = 7; // Span type represents the role in the RPC context.  SpanType spanType = 8; // Span layer represent the component tech stack, related to the network tech.  SpanLayer spanLayer = 9; // Component id is a predefined number id in the SkyWalking.  // It represents the framework, tech stack used by this tracked span, such as Spring.  // All IDs are defined in the https://github.com/apache/skywalking/blob/master/oap-server/server-bootstrap/src/main/resources/component-libraries.yml  // Send a pull request if you want to add languages, components or mapping definitions,  // all public components could be accepted.  // Follow this doc for more details, https://github.com/apache/skywalking/blob/master/docs/en/guides/Component-library-settings.md  int32 componentId = 10; // The status of the span. False means the tracked execution ends in the unexpected status.  // This affects the successful rate statistic in the backend.  // Exception or error code happened in the tracked process doesn\u0026#39;t mean isError == true, the implementations of agent plugin and tracing SDK make the final decision.  bool isError = 11; // String key, String value pair.  // Tags provides more information, includes parameters.  //  // In the OAP backend analysis, some special tag or tag combination could provide other advanced features.  // https://github.com/apache/skywalking/blob/master/docs/en/guides/Java-Plugin-Development-Guide.md#special-span-tags  repeated KeyStringValuePair tags = 12; // String key, String value pair with an accurate timestamp.  // Logging some events happening in the context of the span duration.  repeated Log logs = 13; // Force the backend don\u0026#39;t do analysis, if the value is TRUE.  // The backend has its own configurations to follow or override this.  //  // Use this mostly because the agent/SDK could know more context of the service role.  bool skipAnalysis = 14;}message Log { // The timestamp in milliseconds of this event.,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 time = 1; // String key, String value pair.  repeated KeyStringValuePair data = 2;}// Map to the type of span enum SpanType { // Server side of RPC. Consumer side of MQ.  Entry = 0; // Client side of RPC. Producer side of MQ.  Exit = 1; // A common local code execution.  Local = 2;}// A ID could be represented by multiple string sections. message ID { repeated string id = 1;}// Type of the reference enum RefType { // Map to the reference targeting the segment in another OS process.  CrossProcess = 0; // Map to the reference targeting the segment in the same process of the current one, just across thread.  // This is only used when the coding language has the thread concept.  CrossThread = 1;}// Map to the layer of span enum SpanLayer { // Unknown layer. Could be anything.  Unknown = 0; // A database layer, used in tracing the database client component.  Database = 1; // A RPC layer, used in both client and server sides of RPC component.  RPCFramework = 2; // HTTP is a more specific RPCFramework.  Http = 3; // A MQ layer, used in both producer and consumer sides of the MQ component.  MQ = 4; // A cache layer, used in tracing the cache client component.  Cache = 5;}// The segment collections for trace report in batch and sync mode. message SegmentCollection { repeated SegmentObject segments = 1;}Report Span Attached Events Besides in-process agents, there are other out-of-process agent, such as ebpf agent, could report additional information as attached events for the relative spans.\nSpanAttachedEventReportService#collect for attached event reporting.\n//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // ebpf agent(SkyWalking Rover) collects extra information from the OS(Linux Only) level to attach on the traced span. // Since v3.1 //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// service SpanAttachedEventReportService { // Collect SpanAttachedEvent to the OAP server in the streaming mode.  rpc collect (stream SpanAttachedEvent) returns (Commands) { }}// SpanAttachedEvent represents an attached event for a traced RPC. // // When an RPC is being traced by the in-process language agent, a span would be reported by the client-side agent. // And the rover would be aware of this RPC due to the existing tracing header. // Then, the rover agent collects extra information from the OS level to provide assistance information to diagnose network performance. message SpanAttachedEvent { // The nanosecond timestamp of the event\u0026#39;s start time.  // Notice, most unit of timestamp in SkyWalking is milliseconds, but NANO-SECOND is required here.  // Because the attached event happens in the OS syscall level, most of them are executed rapidly.  Instant startTime = 1; // The official event name.  // For example, the event name is a method signature from syscall stack.  string event = 2; // [Optional] The nanosecond timestamp of the event\u0026#39;s end time.  Instant endTime = 3; // The tags for this event includes some extra OS level information,  // such as  // 1. net_device used for this exit span.  // 2. network L7 protocol  repeated KeyStringValuePair tags = 4; // The summary of statistics during this event.  // Each statistic provides a name(metric name) to represent the name, and an int64/long as the value.  repeated KeyIntValuePair summary = 5; // Refer to a trace context decoded from `sw8` header through network, such as HTTP header, MQ metadata  // https://skywalking.apache.org/docs/main/next/en/protocols/skywalking-cross-process-propagation-headers-protocol-v3/#standard-header-item  SpanReference traceContext = 6; message SpanReference { SpanReferenceType type = 1; // [Optional] A string id represents the whole trace.  string traceId = 2; // A unique id represents this segment. Other segments could use this id to reference as a child segment.  // [Optional] when this span reference  string traceSegmentId = 3; // If type == SKYWALKING  // The number id of the span. Should be unique in the whole segment.  // Starting at 0  //  // If type == ZIPKIN  // The type of span ID is string.  string spanId = 4; } enum SpanReferenceType { SKYWALKING = 0; ZIPKIN = 1; }}Via HTTP Endpoint Detailed information about data format can be found in Instance Management. There are two ways to report segment data: one segment per request or segment array in bulk mode.\nPOST http://localhost:12800/v3/segment Send a single segment object in JSON format.\nInput:\n{ \u0026#34;traceId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34; } OutPut:\nPOST http://localhost:12800/v3/segments Send a segment object list in JSON format.\nInput:\n[{ \u0026#34;traceId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34; }, { \u0026#34;traceId\u0026#34;: \u0026#34;f956699e-5106-4ea3-95e5-da748c55bac1\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577250, \u0026#34;endTime\u0026#34;: 1588664577250, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577250, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577250, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;f956699e-5106-4ea3-95e5-da748c55bac1\u0026#34; }] OutPut:\n","title":"Trace Data Protocol","url":"/docs/main/next/en/api/trace-data-protocol-v3/"},{"content":"Trace Data Protocol  Version, v3.1  Trace Data Protocol describes the data format between SkyWalking agent/sniffer and backend.\nTrace data protocol is defined and provided in gRPC format, and implemented in HTTP 1.1.\nFor trace format, note that:\n The segment is a unique concept in SkyWalking. It should include all spans for each request in a single OS process, which is usually a single language-based thread. There are three types of spans.    EntrySpan EntrySpan represents a service provider, which is also the endpoint on the server end. As an APM system, SkyWalking targets the application servers. Therefore, almost all the services and MQ-consumers are EntrySpans.\n  LocalSpan LocalSpan represents a typical Java method which is not related to remote services. It is neither a MQ producer/consumer nor a provider/consumer of a service (e.g. HTTP service).\n  ExitSpan ExitSpan represents a client of service or MQ-producer. It is known as the LeafSpan in the early stages of SkyWalking. For example, accessing DB by JDBC, and reading Redis/Memcached are classified as ExitSpans.\n   Cross-thread/process span parent information is called \u0026ldquo;reference\u0026rdquo;. Reference carries the trace ID, segment ID, span ID, service name, service instance name, endpoint name, and target address used on the client end (note: this is not required in cross-thread operations) of this request in the parent. See Cross Process Propagation Headers Protocol v3 for more details.\n  Span#skipAnalysis may be TRUE, if this span doesn\u0026rsquo;t require backend analysis.\n  Trace Report Protocol // The segment is a collection of spans. It includes all collected spans in a simple one request context, such as a HTTP request process. // // We recommend the agent/SDK report all tracked data of one request once for all, such as, // typically, such as in Java, one segment represent all tracked operations(spans) of one request context in the same thread. // At the same time, in some language there is not a clear concept like golang, it could represent all tracked operations of one request context. message SegmentObject { // A string id represents the whole trace.  string traceId = 1; // A unique id represents this segment. Other segments could use this id to reference as a child segment.  string traceSegmentId = 2; // Span collections included in this segment.  repeated SpanObject spans = 3; // **Service**. Represents a set/group of workloads which provide the same behaviours for incoming requests.  //  // The logic name represents the service. This would show as a separate node in the topology.  // The metrics analyzed from the spans, would be aggregated for this entity as the service level.  string service = 4; // **Service Instance**. Each individual workload in the Service group is known as an instance. Like `pods` in Kubernetes, it  // doesn\u0026#39;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process.  //  // The logic name represents the service instance. This would show as a separate node in the instance relationship.  // The metrics analyzed from the spans, would be aggregated for this entity as the service instance level.  string serviceInstance = 5; // Whether the segment includes all tracked spans.  // In the production environment tracked, some tasks could include too many spans for one request context, such as a batch update for a cache, or an async job.  // The agent/SDK could optimize or ignore some tracked spans for better performance.  // In this case, the value should be flagged as TRUE.  bool isSizeLimited = 6;}// Segment reference represents the link between two existing segment. message SegmentReference { // Represent the reference type. It could be across thread or across process.  // Across process means there is a downstream RPC call for this.  // Typically, refType == CrossProcess means SpanObject#spanType = entry.  RefType refType = 1; // A string id represents the whole trace.  string traceId = 2; // Another segment id as the parent.  string parentTraceSegmentId = 3; // The span id in the parent trace segment.  int32 parentSpanId = 4; // The service logic name of the parent segment.  // If refType == CrossThread, this name is as same as the trace segment.  string parentService = 5; // The service logic name instance of the parent segment.  // If refType == CrossThread, this name is as same as the trace segment.  string parentServiceInstance = 6; // The endpoint name of the parent segment.  // **Endpoint**. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature.  // In a trace segment, the endpoint name is the name of first entry span.  string parentEndpoint = 7; // The network address, including ip/hostname and port, which is used in the client side.  // Such as Client --\u0026gt; use 127.0.11.8:913 -\u0026gt; Server  // then, in the reference of entry span reported by Server, the value of this field is 127.0.11.8:913.  // This plays the important role in the SkyWalking STAM(Streaming Topology Analysis Method)  // For more details, read https://wu-sheng.github.io/STAM/  string networkAddressUsedAtPeer = 8;}// Span represents a execution unit in the system, with duration and many other attributes. // Span could be a method, a RPC, MQ message produce or consume. // In the practice, the span should be added when it is really necessary, to avoid payload overhead. // We recommend to creating spans in across process(client/server of RPC/MQ) and across thread cases only. message SpanObject { // The number id of the span. Should be unique in the whole segment.  // Starting at 0.  int32 spanId = 1; // The number id of the parent span in the whole segment.  // -1 represents no parent span.  // Also, be known as the root/first span of the segment.  int32 parentSpanId = 2; // Start timestamp in milliseconds of this span,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 startTime = 3; // End timestamp in milliseconds of this span,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 endTime = 4; // \u0026lt;Optional\u0026gt;  // In the across thread and across process, these references targeting the parent segments.  // The references usually have only one element, but in batch consumer case, such as in MQ or async batch process, it could be multiple.  repeated SegmentReference refs = 5; // A logic name represents this span.  //  // We don\u0026#39;t recommend to include the parameter, such as HTTP request parameters, as a part of the operation, especially this is the name of the entry span.  // All statistic for the endpoints are aggregated base on this name. Those parameters should be added in the tags if necessary.  // If in some cases, it have to be a part of the operation name,  // users should use the Group Parameterized Endpoints capability at the backend to get the meaningful metrics.  // Read https://github.com/apache/skywalking/blob/master/docs/en/setup/backend/endpoint-grouping-rules.md  string operationName = 6; // Remote address of the peer in RPC/MQ case.  // This is required when spanType = Exit, as it is a part of the SkyWalking STAM(Streaming Topology Analysis Method).  // For more details, read https://wu-sheng.github.io/STAM/  string peer = 7; // Span type represents the role in the RPC context.  SpanType spanType = 8; // Span layer represent the component tech stack, related to the network tech.  SpanLayer spanLayer = 9; // Component id is a predefined number id in the SkyWalking.  // It represents the framework, tech stack used by this tracked span, such as Spring.  // All IDs are defined in the https://github.com/apache/skywalking/blob/master/oap-server/server-bootstrap/src/main/resources/component-libraries.yml  // Send a pull request if you want to add languages, components or mapping definitions,  // all public components could be accepted.  // Follow this doc for more details, https://github.com/apache/skywalking/blob/master/docs/en/guides/Component-library-settings.md  int32 componentId = 10; // The status of the span. False means the tracked execution ends in the unexpected status.  // This affects the successful rate statistic in the backend.  // Exception or error code happened in the tracked process doesn\u0026#39;t mean isError == true, the implementations of agent plugin and tracing SDK make the final decision.  bool isError = 11; // String key, String value pair.  // Tags provides more information, includes parameters.  //  // In the OAP backend analysis, some special tag or tag combination could provide other advanced features.  // https://github.com/apache/skywalking/blob/master/docs/en/guides/Java-Plugin-Development-Guide.md#special-span-tags  repeated KeyStringValuePair tags = 12; // String key, String value pair with an accurate timestamp.  // Logging some events happening in the context of the span duration.  repeated Log logs = 13; // Force the backend don\u0026#39;t do analysis, if the value is TRUE.  // The backend has its own configurations to follow or override this.  //  // Use this mostly because the agent/SDK could know more context of the service role.  bool skipAnalysis = 14;}message Log { // The timestamp in milliseconds of this event.,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 time = 1; // String key, String value pair.  repeated KeyStringValuePair data = 2;}// Map to the type of span enum SpanType { // Server side of RPC. Consumer side of MQ.  Entry = 0; // Client side of RPC. Producer side of MQ.  Exit = 1; // A common local code execution.  Local = 2;}// A ID could be represented by multiple string sections. message ID { repeated string id = 1;}// Type of the reference enum RefType { // Map to the reference targeting the segment in another OS process.  CrossProcess = 0; // Map to the reference targeting the segment in the same process of the current one, just across thread.  // This is only used when the coding language has the thread concept.  CrossThread = 1;}// Map to the layer of span enum SpanLayer { // Unknown layer. Could be anything.  Unknown = 0; // A database layer, used in tracing the database client component.  Database = 1; // A RPC layer, used in both client and server sides of RPC component.  RPCFramework = 2; // HTTP is a more specific RPCFramework.  Http = 3; // A MQ layer, used in both producer and consumer sides of the MQ component.  MQ = 4; // A cache layer, used in tracing the cache client component.  Cache = 5;}// The segment collections for trace report in batch and sync mode. message SegmentCollection { repeated SegmentObject segments = 1;}Report Span Attached Events Besides in-process agents, there are other out-of-process agent, such as ebpf agent, could report additional information as attached events for the relative spans.\nSpanAttachedEventReportService#collect for attached event reporting.\n//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // ebpf agent(SkyWalking Rover) collects extra information from the OS(Linux Only) level to attach on the traced span. // Since v3.1 //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// service SpanAttachedEventReportService { // Collect SpanAttachedEvent to the OAP server in the streaming mode.  rpc collect (stream SpanAttachedEvent) returns (Commands) { }}// SpanAttachedEvent represents an attached event for a traced RPC. // // When an RPC is being traced by the in-process language agent, a span would be reported by the client-side agent. // And the rover would be aware of this RPC due to the existing tracing header. // Then, the rover agent collects extra information from the OS level to provide assistance information to diagnose network performance. message SpanAttachedEvent { // The nanosecond timestamp of the event\u0026#39;s start time.  // Notice, most unit of timestamp in SkyWalking is milliseconds, but NANO-SECOND is required here.  // Because the attached event happens in the OS syscall level, most of them are executed rapidly.  Instant startTime = 1; // The official event name.  // For example, the event name is a method signature from syscall stack.  string event = 2; // [Optional] The nanosecond timestamp of the event\u0026#39;s end time.  Instant endTime = 3; // The tags for this event includes some extra OS level information,  // such as  // 1. net_device used for this exit span.  // 2. network L7 protocol  repeated KeyStringValuePair tags = 4; // The summary of statistics during this event.  // Each statistic provides a name(metric name) to represent the name, and an int64/long as the value.  repeated KeyIntValuePair summary = 5; // Refer to a trace context decoded from `sw8` header through network, such as HTTP header, MQ metadata  // https://skywalking.apache.org/docs/main/next/en/protocols/skywalking-cross-process-propagation-headers-protocol-v3/#standard-header-item  SpanReference traceContext = 6; message SpanReference { SpanReferenceType type = 1; // [Optional] A string id represents the whole trace.  string traceId = 2; // A unique id represents this segment. Other segments could use this id to reference as a child segment.  // [Optional] when this span reference  string traceSegmentId = 3; // If type == SKYWALKING  // The number id of the span. Should be unique in the whole segment.  // Starting at 0  //  // If type == ZIPKIN  // The type of span ID is string.  string spanId = 4; } enum SpanReferenceType { SKYWALKING = 0; ZIPKIN = 1; }}Via HTTP Endpoint Detailed information about data format can be found in Instance Management. There are two ways to report segment data: one segment per request or segment array in bulk mode.\nPOST http://localhost:12800/v3/segment Send a single segment object in JSON format.\nInput:\n{ \u0026#34;traceId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34; } OutPut:\nPOST http://localhost:12800/v3/segments Send a segment object list in JSON format.\nInput:\n[{ \u0026#34;traceId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34; }, { \u0026#34;traceId\u0026#34;: \u0026#34;f956699e-5106-4ea3-95e5-da748c55bac1\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577250, \u0026#34;endTime\u0026#34;: 1588664577250, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577250, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577250, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;f956699e-5106-4ea3-95e5-da748c55bac1\u0026#34; }] OutPut:\n","title":"Trace Data Protocol","url":"/docs/main/v9.4.0/en/api/trace-data-protocol-v3/"},{"content":"Trace Data Protocol  Version, v3.1  Trace Data Protocol describes the data format between SkyWalking agent/sniffer and backend.\nTrace data protocol is defined and provided in gRPC format, and implemented in HTTP 1.1.\nFor trace format, note that:\n The segment is a unique concept in SkyWalking. It should include all spans for each request in a single OS process, which is usually a single language-based thread. There are three types of spans.    EntrySpan EntrySpan represents a service provider, which is also the endpoint on the server end. As an APM system, SkyWalking targets the application servers. Therefore, almost all the services and MQ-consumers are EntrySpans.\n  LocalSpan LocalSpan represents a typical Java method which is not related to remote services. It is neither a MQ producer/consumer nor a provider/consumer of a service (e.g. HTTP service).\n  ExitSpan ExitSpan represents a client of service or MQ-producer. It is known as the LeafSpan in the early stages of SkyWalking. For example, accessing DB by JDBC, and reading Redis/Memcached are classified as ExitSpans.\n   Cross-thread/process span parent information is called \u0026ldquo;reference\u0026rdquo;. Reference carries the trace ID, segment ID, span ID, service name, service instance name, endpoint name, and target address used on the client end (note: this is not required in cross-thread operations) of this request in the parent. See Cross Process Propagation Headers Protocol v3 for more details.\n  Span#skipAnalysis may be TRUE, if this span doesn\u0026rsquo;t require backend analysis.\n  Trace Report Protocol // The segment is a collection of spans. It includes all collected spans in a simple one request context, such as a HTTP request process. // // We recommend the agent/SDK report all tracked data of one request once for all, such as, // typically, such as in Java, one segment represent all tracked operations(spans) of one request context in the same thread. // At the same time, in some language there is not a clear concept like golang, it could represent all tracked operations of one request context. message SegmentObject { // A string id represents the whole trace.  string traceId = 1; // A unique id represents this segment. Other segments could use this id to reference as a child segment.  string traceSegmentId = 2; // Span collections included in this segment.  repeated SpanObject spans = 3; // **Service**. Represents a set/group of workloads which provide the same behaviours for incoming requests.  //  // The logic name represents the service. This would show as a separate node in the topology.  // The metrics analyzed from the spans, would be aggregated for this entity as the service level.  string service = 4; // **Service Instance**. Each individual workload in the Service group is known as an instance. Like `pods` in Kubernetes, it  // doesn\u0026#39;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process.  //  // The logic name represents the service instance. This would show as a separate node in the instance relationship.  // The metrics analyzed from the spans, would be aggregated for this entity as the service instance level.  string serviceInstance = 5; // Whether the segment includes all tracked spans.  // In the production environment tracked, some tasks could include too many spans for one request context, such as a batch update for a cache, or an async job.  // The agent/SDK could optimize or ignore some tracked spans for better performance.  // In this case, the value should be flagged as TRUE.  bool isSizeLimited = 6;}// Segment reference represents the link between two existing segment. message SegmentReference { // Represent the reference type. It could be across thread or across process.  // Across process means there is a downstream RPC call for this.  // Typically, refType == CrossProcess means SpanObject#spanType = entry.  RefType refType = 1; // A string id represents the whole trace.  string traceId = 2; // Another segment id as the parent.  string parentTraceSegmentId = 3; // The span id in the parent trace segment.  int32 parentSpanId = 4; // The service logic name of the parent segment.  // If refType == CrossThread, this name is as same as the trace segment.  string parentService = 5; // The service logic name instance of the parent segment.  // If refType == CrossThread, this name is as same as the trace segment.  string parentServiceInstance = 6; // The endpoint name of the parent segment.  // **Endpoint**. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature.  // In a trace segment, the endpoint name is the name of first entry span.  string parentEndpoint = 7; // The network address, including ip/hostname and port, which is used in the client side.  // Such as Client --\u0026gt; use 127.0.11.8:913 -\u0026gt; Server  // then, in the reference of entry span reported by Server, the value of this field is 127.0.11.8:913.  // This plays the important role in the SkyWalking STAM(Streaming Topology Analysis Method)  // For more details, read https://wu-sheng.github.io/STAM/  string networkAddressUsedAtPeer = 8;}// Span represents a execution unit in the system, with duration and many other attributes. // Span could be a method, a RPC, MQ message produce or consume. // In the practice, the span should be added when it is really necessary, to avoid payload overhead. // We recommend to creating spans in across process(client/server of RPC/MQ) and across thread cases only. message SpanObject { // The number id of the span. Should be unique in the whole segment.  // Starting at 0.  int32 spanId = 1; // The number id of the parent span in the whole segment.  // -1 represents no parent span.  // Also, be known as the root/first span of the segment.  int32 parentSpanId = 2; // Start timestamp in milliseconds of this span,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 startTime = 3; // End timestamp in milliseconds of this span,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 endTime = 4; // \u0026lt;Optional\u0026gt;  // In the across thread and across process, these references targeting the parent segments.  // The references usually have only one element, but in batch consumer case, such as in MQ or async batch process, it could be multiple.  repeated SegmentReference refs = 5; // A logic name represents this span.  //  // We don\u0026#39;t recommend to include the parameter, such as HTTP request parameters, as a part of the operation, especially this is the name of the entry span.  // All statistic for the endpoints are aggregated base on this name. Those parameters should be added in the tags if necessary.  // If in some cases, it have to be a part of the operation name,  // users should use the Group Parameterized Endpoints capability at the backend to get the meaningful metrics.  // Read https://github.com/apache/skywalking/blob/master/docs/en/setup/backend/endpoint-grouping-rules.md  string operationName = 6; // Remote address of the peer in RPC/MQ case.  // This is required when spanType = Exit, as it is a part of the SkyWalking STAM(Streaming Topology Analysis Method).  // For more details, read https://wu-sheng.github.io/STAM/  string peer = 7; // Span type represents the role in the RPC context.  SpanType spanType = 8; // Span layer represent the component tech stack, related to the network tech.  SpanLayer spanLayer = 9; // Component id is a predefined number id in the SkyWalking.  // It represents the framework, tech stack used by this tracked span, such as Spring.  // All IDs are defined in the https://github.com/apache/skywalking/blob/master/oap-server/server-bootstrap/src/main/resources/component-libraries.yml  // Send a pull request if you want to add languages, components or mapping definitions,  // all public components could be accepted.  // Follow this doc for more details, https://github.com/apache/skywalking/blob/master/docs/en/guides/Component-library-settings.md  int32 componentId = 10; // The status of the span. False means the tracked execution ends in the unexpected status.  // This affects the successful rate statistic in the backend.  // Exception or error code happened in the tracked process doesn\u0026#39;t mean isError == true, the implementations of agent plugin and tracing SDK make the final decision.  bool isError = 11; // String key, String value pair.  // Tags provides more information, includes parameters.  //  // In the OAP backend analysis, some special tag or tag combination could provide other advanced features.  // https://github.com/apache/skywalking/blob/master/docs/en/guides/Java-Plugin-Development-Guide.md#special-span-tags  repeated KeyStringValuePair tags = 12; // String key, String value pair with an accurate timestamp.  // Logging some events happening in the context of the span duration.  repeated Log logs = 13; // Force the backend don\u0026#39;t do analysis, if the value is TRUE.  // The backend has its own configurations to follow or override this.  //  // Use this mostly because the agent/SDK could know more context of the service role.  bool skipAnalysis = 14;}message Log { // The timestamp in milliseconds of this event.,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 time = 1; // String key, String value pair.  repeated KeyStringValuePair data = 2;}// Map to the type of span enum SpanType { // Server side of RPC. Consumer side of MQ.  Entry = 0; // Client side of RPC. Producer side of MQ.  Exit = 1; // A common local code execution.  Local = 2;}// A ID could be represented by multiple string sections. message ID { repeated string id = 1;}// Type of the reference enum RefType { // Map to the reference targeting the segment in another OS process.  CrossProcess = 0; // Map to the reference targeting the segment in the same process of the current one, just across thread.  // This is only used when the coding language has the thread concept.  CrossThread = 1;}// Map to the layer of span enum SpanLayer { // Unknown layer. Could be anything.  Unknown = 0; // A database layer, used in tracing the database client component.  Database = 1; // A RPC layer, used in both client and server sides of RPC component.  RPCFramework = 2; // HTTP is a more specific RPCFramework.  Http = 3; // A MQ layer, used in both producer and consumer sides of the MQ component.  MQ = 4; // A cache layer, used in tracing the cache client component.  Cache = 5;}// The segment collections for trace report in batch and sync mode. message SegmentCollection { repeated SegmentObject segments = 1;}Report Span Attached Events Besides in-process agents, there are other out-of-process agent, such as ebpf agent, could report additional information as attached events for the relative spans.\nSpanAttachedEventReportService#collect for attached event reporting.\n//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // ebpf agent(SkyWalking Rover) collects extra information from the OS(Linux Only) level to attach on the traced span. // Since v3.1 //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// service SpanAttachedEventReportService { // Collect SpanAttachedEvent to the OAP server in the streaming mode.  rpc collect (stream SpanAttachedEvent) returns (Commands) { }}// SpanAttachedEvent represents an attached event for a traced RPC. // // When an RPC is being traced by the in-process language agent, a span would be reported by the client-side agent. // And the rover would be aware of this RPC due to the existing tracing header. // Then, the rover agent collects extra information from the OS level to provide assistance information to diagnose network performance. message SpanAttachedEvent { // The nanosecond timestamp of the event\u0026#39;s start time.  // Notice, most unit of timestamp in SkyWalking is milliseconds, but NANO-SECOND is required here.  // Because the attached event happens in the OS syscall level, most of them are executed rapidly.  Instant startTime = 1; // The official event name.  // For example, the event name is a method signature from syscall stack.  string event = 2; // [Optional] The nanosecond timestamp of the event\u0026#39;s end time.  Instant endTime = 3; // The tags for this event includes some extra OS level information,  // such as  // 1. net_device used for this exit span.  // 2. network L7 protocol  repeated KeyStringValuePair tags = 4; // The summary of statistics during this event.  // Each statistic provides a name(metric name) to represent the name, and an int64/long as the value.  repeated KeyIntValuePair summary = 5; // Refer to a trace context decoded from `sw8` header through network, such as HTTP header, MQ metadata  // https://skywalking.apache.org/docs/main/next/en/protocols/skywalking-cross-process-propagation-headers-protocol-v3/#standard-header-item  SpanReference traceContext = 6; message SpanReference { SpanReferenceType type = 1; // [Optional] A string id represents the whole trace.  string traceId = 2; // A unique id represents this segment. Other segments could use this id to reference as a child segment.  // [Optional] when this span reference  string traceSegmentId = 3; // If type == SKYWALKING  // The number id of the span. Should be unique in the whole segment.  // Starting at 0  //  // If type == ZIPKIN  // The type of span ID is string.  string spanId = 4; } enum SpanReferenceType { SKYWALKING = 0; ZIPKIN = 1; }}Via HTTP Endpoint Detailed information about data format can be found in Instance Management. There are two ways to report segment data: one segment per request or segment array in bulk mode.\nPOST http://localhost:12800/v3/segment Send a single segment object in JSON format.\nInput:\n{ \u0026#34;traceId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34; } OutPut:\nPOST http://localhost:12800/v3/segments Send a segment object list in JSON format.\nInput:\n[{ \u0026#34;traceId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34; }, { \u0026#34;traceId\u0026#34;: \u0026#34;f956699e-5106-4ea3-95e5-da748c55bac1\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577250, \u0026#34;endTime\u0026#34;: 1588664577250, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577250, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577250, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;f956699e-5106-4ea3-95e5-da748c55bac1\u0026#34; }] OutPut:\n","title":"Trace Data Protocol","url":"/docs/main/v9.5.0/en/api/trace-data-protocol-v3/"},{"content":"Trace Data Protocol  Version, v3.1  Trace Data Protocol describes the data format between SkyWalking agent/sniffer and backend.\nTrace data protocol is defined and provided in gRPC format, and implemented in HTTP 1.1.\nFor trace format, note that:\n The segment is a unique concept in SkyWalking. It should include all spans for each request in a single OS process, which is usually a single language-based thread. There are three types of spans.    EntrySpan EntrySpan represents a service provider, which is also the endpoint on the server end. As an APM system, SkyWalking targets the application servers. Therefore, almost all the services and MQ-consumers are EntrySpans.\n  LocalSpan LocalSpan represents a typical Java method which is not related to remote services. It is neither a MQ producer/consumer nor a provider/consumer of a service (e.g. HTTP service).\n  ExitSpan ExitSpan represents a client of service or MQ-producer. It is known as the LeafSpan in the early stages of SkyWalking. For example, accessing DB by JDBC, and reading Redis/Memcached are classified as ExitSpans.\n   Cross-thread/process span parent information is called \u0026ldquo;reference\u0026rdquo;. Reference carries the trace ID, segment ID, span ID, service name, service instance name, endpoint name, and target address used on the client end (note: this is not required in cross-thread operations) of this request in the parent. See Cross Process Propagation Headers Protocol v3 for more details.\n  Span#skipAnalysis may be TRUE, if this span doesn\u0026rsquo;t require backend analysis.\n  Trace Report Protocol // The segment is a collection of spans. It includes all collected spans in a simple one request context, such as a HTTP request process. // // We recommend the agent/SDK report all tracked data of one request once for all, such as, // typically, such as in Java, one segment represent all tracked operations(spans) of one request context in the same thread. // At the same time, in some language there is not a clear concept like golang, it could represent all tracked operations of one request context. message SegmentObject { // A string id represents the whole trace.  string traceId = 1; // A unique id represents this segment. Other segments could use this id to reference as a child segment.  string traceSegmentId = 2; // Span collections included in this segment.  repeated SpanObject spans = 3; // **Service**. Represents a set/group of workloads which provide the same behaviours for incoming requests.  //  // The logic name represents the service. This would show as a separate node in the topology.  // The metrics analyzed from the spans, would be aggregated for this entity as the service level.  string service = 4; // **Service Instance**. Each individual workload in the Service group is known as an instance. Like `pods` in Kubernetes, it  // doesn\u0026#39;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process.  //  // The logic name represents the service instance. This would show as a separate node in the instance relationship.  // The metrics analyzed from the spans, would be aggregated for this entity as the service instance level.  string serviceInstance = 5; // Whether the segment includes all tracked spans.  // In the production environment tracked, some tasks could include too many spans for one request context, such as a batch update for a cache, or an async job.  // The agent/SDK could optimize or ignore some tracked spans for better performance.  // In this case, the value should be flagged as TRUE.  bool isSizeLimited = 6;}// Segment reference represents the link between two existing segment. message SegmentReference { // Represent the reference type. It could be across thread or across process.  // Across process means there is a downstream RPC call for this.  // Typically, refType == CrossProcess means SpanObject#spanType = entry.  RefType refType = 1; // A string id represents the whole trace.  string traceId = 2; // Another segment id as the parent.  string parentTraceSegmentId = 3; // The span id in the parent trace segment.  int32 parentSpanId = 4; // The service logic name of the parent segment.  // If refType == CrossThread, this name is as same as the trace segment.  string parentService = 5; // The service logic name instance of the parent segment.  // If refType == CrossThread, this name is as same as the trace segment.  string parentServiceInstance = 6; // The endpoint name of the parent segment.  // **Endpoint**. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature.  // In a trace segment, the endpoint name is the name of first entry span.  string parentEndpoint = 7; // The network address, including ip/hostname and port, which is used in the client side.  // Such as Client --\u0026gt; use 127.0.11.8:913 -\u0026gt; Server  // then, in the reference of entry span reported by Server, the value of this field is 127.0.11.8:913.  // This plays the important role in the SkyWalking STAM(Streaming Topology Analysis Method)  // For more details, read https://wu-sheng.github.io/STAM/  string networkAddressUsedAtPeer = 8;}// Span represents a execution unit in the system, with duration and many other attributes. // Span could be a method, a RPC, MQ message produce or consume. // In the practice, the span should be added when it is really necessary, to avoid payload overhead. // We recommend to creating spans in across process(client/server of RPC/MQ) and across thread cases only. message SpanObject { // The number id of the span. Should be unique in the whole segment.  // Starting at 0.  int32 spanId = 1; // The number id of the parent span in the whole segment.  // -1 represents no parent span.  // Also, be known as the root/first span of the segment.  int32 parentSpanId = 2; // Start timestamp in milliseconds of this span,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 startTime = 3; // End timestamp in milliseconds of this span,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 endTime = 4; // \u0026lt;Optional\u0026gt;  // In the across thread and across process, these references targeting the parent segments.  // The references usually have only one element, but in batch consumer case, such as in MQ or async batch process, it could be multiple.  repeated SegmentReference refs = 5; // A logic name represents this span.  //  // We don\u0026#39;t recommend to include the parameter, such as HTTP request parameters, as a part of the operation, especially this is the name of the entry span.  // All statistic for the endpoints are aggregated base on this name. Those parameters should be added in the tags if necessary.  // If in some cases, it have to be a part of the operation name,  // users should use the Group Parameterized Endpoints capability at the backend to get the meaningful metrics.  // Read https://github.com/apache/skywalking/blob/master/docs/en/setup/backend/endpoint-grouping-rules.md  string operationName = 6; // Remote address of the peer in RPC/MQ case.  // This is required when spanType = Exit, as it is a part of the SkyWalking STAM(Streaming Topology Analysis Method).  // For more details, read https://wu-sheng.github.io/STAM/  string peer = 7; // Span type represents the role in the RPC context.  SpanType spanType = 8; // Span layer represent the component tech stack, related to the network tech.  SpanLayer spanLayer = 9; // Component id is a predefined number id in the SkyWalking.  // It represents the framework, tech stack used by this tracked span, such as Spring.  // All IDs are defined in the https://github.com/apache/skywalking/blob/master/oap-server/server-bootstrap/src/main/resources/component-libraries.yml  // Send a pull request if you want to add languages, components or mapping definitions,  // all public components could be accepted.  // Follow this doc for more details, https://github.com/apache/skywalking/blob/master/docs/en/guides/Component-library-settings.md  int32 componentId = 10; // The status of the span. False means the tracked execution ends in the unexpected status.  // This affects the successful rate statistic in the backend.  // Exception or error code happened in the tracked process doesn\u0026#39;t mean isError == true, the implementations of agent plugin and tracing SDK make the final decision.  bool isError = 11; // String key, String value pair.  // Tags provides more information, includes parameters.  //  // In the OAP backend analysis, some special tag or tag combination could provide other advanced features.  // https://github.com/apache/skywalking/blob/master/docs/en/guides/Java-Plugin-Development-Guide.md#special-span-tags  repeated KeyStringValuePair tags = 12; // String key, String value pair with an accurate timestamp.  // Logging some events happening in the context of the span duration.  repeated Log logs = 13; // Force the backend don\u0026#39;t do analysis, if the value is TRUE.  // The backend has its own configurations to follow or override this.  //  // Use this mostly because the agent/SDK could know more context of the service role.  bool skipAnalysis = 14;}message Log { // The timestamp in milliseconds of this event.,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 time = 1; // String key, String value pair.  repeated KeyStringValuePair data = 2;}// Map to the type of span enum SpanType { // Server side of RPC. Consumer side of MQ.  Entry = 0; // Client side of RPC. Producer side of MQ.  Exit = 1; // A common local code execution.  Local = 2;}// A ID could be represented by multiple string sections. message ID { repeated string id = 1;}// Type of the reference enum RefType { // Map to the reference targeting the segment in another OS process.  CrossProcess = 0; // Map to the reference targeting the segment in the same process of the current one, just across thread.  // This is only used when the coding language has the thread concept.  CrossThread = 1;}// Map to the layer of span enum SpanLayer { // Unknown layer. Could be anything.  Unknown = 0; // A database layer, used in tracing the database client component.  Database = 1; // A RPC layer, used in both client and server sides of RPC component.  RPCFramework = 2; // HTTP is a more specific RPCFramework.  Http = 3; // A MQ layer, used in both producer and consumer sides of the MQ component.  MQ = 4; // A cache layer, used in tracing the cache client component.  Cache = 5;}// The segment collections for trace report in batch and sync mode. message SegmentCollection { repeated SegmentObject segments = 1;}Report Span Attached Events Besides in-process agents, there are other out-of-process agent, such as ebpf agent, could report additional information as attached events for the relative spans.\nSpanAttachedEventReportService#collect for attached event reporting.\n//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // ebpf agent(SkyWalking Rover) collects extra information from the OS(Linux Only) level to attach on the traced span. // Since v3.1 //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// service SpanAttachedEventReportService { // Collect SpanAttachedEvent to the OAP server in the streaming mode.  rpc collect (stream SpanAttachedEvent) returns (Commands) { }}// SpanAttachedEvent represents an attached event for a traced RPC. // // When an RPC is being traced by the in-process language agent, a span would be reported by the client-side agent. // And the rover would be aware of this RPC due to the existing tracing header. // Then, the rover agent collects extra information from the OS level to provide assistance information to diagnose network performance. message SpanAttachedEvent { // The nanosecond timestamp of the event\u0026#39;s start time.  // Notice, most unit of timestamp in SkyWalking is milliseconds, but NANO-SECOND is required here.  // Because the attached event happens in the OS syscall level, most of them are executed rapidly.  Instant startTime = 1; // The official event name.  // For example, the event name is a method signature from syscall stack.  string event = 2; // [Optional] The nanosecond timestamp of the event\u0026#39;s end time.  Instant endTime = 3; // The tags for this event includes some extra OS level information,  // such as  // 1. net_device used for this exit span.  // 2. network L7 protocol  repeated KeyStringValuePair tags = 4; // The summary of statistics during this event.  // Each statistic provides a name(metric name) to represent the name, and an int64/long as the value.  repeated KeyIntValuePair summary = 5; // Refer to a trace context decoded from `sw8` header through network, such as HTTP header, MQ metadata  // https://skywalking.apache.org/docs/main/next/en/protocols/skywalking-cross-process-propagation-headers-protocol-v3/#standard-header-item  SpanReference traceContext = 6; message SpanReference { SpanReferenceType type = 1; // [Optional] A string id represents the whole trace.  string traceId = 2; // A unique id represents this segment. Other segments could use this id to reference as a child segment.  // [Optional] when this span reference  string traceSegmentId = 3; // If type == SKYWALKING  // The number id of the span. Should be unique in the whole segment.  // Starting at 0  //  // If type == ZIPKIN  // The type of span ID is string.  string spanId = 4; } enum SpanReferenceType { SKYWALKING = 0; ZIPKIN = 1; }}Via HTTP Endpoint Detailed information about data format can be found in Instance Management. There are two ways to report segment data: one segment per request or segment array in bulk mode.\nPOST http://localhost:12800/v3/segment Send a single segment object in JSON format.\nInput:\n{ \u0026#34;traceId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34; } OutPut:\nPOST http://localhost:12800/v3/segments Send a segment object list in JSON format.\nInput:\n[{ \u0026#34;traceId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34; }, { \u0026#34;traceId\u0026#34;: \u0026#34;f956699e-5106-4ea3-95e5-da748c55bac1\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577250, \u0026#34;endTime\u0026#34;: 1588664577250, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577250, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577250, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;f956699e-5106-4ea3-95e5-da748c55bac1\u0026#34; }] OutPut:\n","title":"Trace Data Protocol","url":"/docs/main/v9.6.0/en/api/trace-data-protocol-v3/"},{"content":"Trace Data Protocol  Version, v3.1  Trace Data Protocol describes the data format between SkyWalking agent/sniffer and backend.\nTrace data protocol is defined and provided in gRPC format, and implemented in HTTP 1.1.\nFor trace format, note that:\n The segment is a unique concept in SkyWalking. It should include all spans for each request in a single OS process, which is usually a single language-based thread. There are three types of spans.    EntrySpan EntrySpan represents a service provider, which is also the endpoint on the server end. As an APM system, SkyWalking targets the application servers. Therefore, almost all the services and MQ-consumers are EntrySpans.\n  LocalSpan LocalSpan represents a typical Java method which is not related to remote services. It is neither a MQ producer/consumer nor a provider/consumer of a service (e.g. HTTP service).\n  ExitSpan ExitSpan represents a client of service or MQ-producer. It is known as the LeafSpan in the early stages of SkyWalking. For example, accessing DB by JDBC, and reading Redis/Memcached are classified as ExitSpans.\n   Cross-thread/process span parent information is called \u0026ldquo;reference\u0026rdquo;. Reference carries the trace ID, segment ID, span ID, service name, service instance name, endpoint name, and target address used on the client end (note: this is not required in cross-thread operations) of this request in the parent. See Cross Process Propagation Headers Protocol v3 for more details.\n  Span#skipAnalysis may be TRUE, if this span doesn\u0026rsquo;t require backend analysis.\n  Trace Report Protocol // The segment is a collection of spans. It includes all collected spans in a simple one request context, such as a HTTP request process. // // We recommend the agent/SDK report all tracked data of one request once for all, such as, // typically, such as in Java, one segment represent all tracked operations(spans) of one request context in the same thread. // At the same time, in some language there is not a clear concept like golang, it could represent all tracked operations of one request context. message SegmentObject { // A string id represents the whole trace.  string traceId = 1; // A unique id represents this segment. Other segments could use this id to reference as a child segment.  string traceSegmentId = 2; // Span collections included in this segment.  repeated SpanObject spans = 3; // **Service**. Represents a set/group of workloads which provide the same behaviours for incoming requests.  //  // The logic name represents the service. This would show as a separate node in the topology.  // The metrics analyzed from the spans, would be aggregated for this entity as the service level.  string service = 4; // **Service Instance**. Each individual workload in the Service group is known as an instance. Like `pods` in Kubernetes, it  // doesn\u0026#39;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process.  //  // The logic name represents the service instance. This would show as a separate node in the instance relationship.  // The metrics analyzed from the spans, would be aggregated for this entity as the service instance level.  string serviceInstance = 5; // Whether the segment includes all tracked spans.  // In the production environment tracked, some tasks could include too many spans for one request context, such as a batch update for a cache, or an async job.  // The agent/SDK could optimize or ignore some tracked spans for better performance.  // In this case, the value should be flagged as TRUE.  bool isSizeLimited = 6;}// Segment reference represents the link between two existing segment. message SegmentReference { // Represent the reference type. It could be across thread or across process.  // Across process means there is a downstream RPC call for this.  // Typically, refType == CrossProcess means SpanObject#spanType = entry.  RefType refType = 1; // A string id represents the whole trace.  string traceId = 2; // Another segment id as the parent.  string parentTraceSegmentId = 3; // The span id in the parent trace segment.  int32 parentSpanId = 4; // The service logic name of the parent segment.  // If refType == CrossThread, this name is as same as the trace segment.  string parentService = 5; // The service logic name instance of the parent segment.  // If refType == CrossThread, this name is as same as the trace segment.  string parentServiceInstance = 6; // The endpoint name of the parent segment.  // **Endpoint**. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature.  // In a trace segment, the endpoint name is the name of first entry span.  string parentEndpoint = 7; // The network address, including ip/hostname and port, which is used in the client side.  // Such as Client --\u0026gt; use 127.0.11.8:913 -\u0026gt; Server  // then, in the reference of entry span reported by Server, the value of this field is 127.0.11.8:913.  // This plays the important role in the SkyWalking STAM(Streaming Topology Analysis Method)  // For more details, read https://wu-sheng.github.io/STAM/  string networkAddressUsedAtPeer = 8;}// Span represents a execution unit in the system, with duration and many other attributes. // Span could be a method, a RPC, MQ message produce or consume. // In the practice, the span should be added when it is really necessary, to avoid payload overhead. // We recommend to creating spans in across process(client/server of RPC/MQ) and across thread cases only. message SpanObject { // The number id of the span. Should be unique in the whole segment.  // Starting at 0.  int32 spanId = 1; // The number id of the parent span in the whole segment.  // -1 represents no parent span.  // Also, be known as the root/first span of the segment.  int32 parentSpanId = 2; // Start timestamp in milliseconds of this span,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 startTime = 3; // End timestamp in milliseconds of this span,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 endTime = 4; // \u0026lt;Optional\u0026gt;  // In the across thread and across process, these references targeting the parent segments.  // The references usually have only one element, but in batch consumer case, such as in MQ or async batch process, it could be multiple.  repeated SegmentReference refs = 5; // A logic name represents this span.  //  // We don\u0026#39;t recommend to include the parameter, such as HTTP request parameters, as a part of the operation, especially this is the name of the entry span.  // All statistic for the endpoints are aggregated base on this name. Those parameters should be added in the tags if necessary.  // If in some cases, it have to be a part of the operation name,  // users should use the Group Parameterized Endpoints capability at the backend to get the meaningful metrics.  // Read https://github.com/apache/skywalking/blob/master/docs/en/setup/backend/endpoint-grouping-rules.md  string operationName = 6; // Remote address of the peer in RPC/MQ case.  // This is required when spanType = Exit, as it is a part of the SkyWalking STAM(Streaming Topology Analysis Method).  // For more details, read https://wu-sheng.github.io/STAM/  string peer = 7; // Span type represents the role in the RPC context.  SpanType spanType = 8; // Span layer represent the component tech stack, related to the network tech.  SpanLayer spanLayer = 9; // Component id is a predefined number id in the SkyWalking.  // It represents the framework, tech stack used by this tracked span, such as Spring.  // All IDs are defined in the https://github.com/apache/skywalking/blob/master/oap-server/server-bootstrap/src/main/resources/component-libraries.yml  // Send a pull request if you want to add languages, components or mapping definitions,  // all public components could be accepted.  // Follow this doc for more details, https://github.com/apache/skywalking/blob/master/docs/en/guides/Component-library-settings.md  int32 componentId = 10; // The status of the span. False means the tracked execution ends in the unexpected status.  // This affects the successful rate statistic in the backend.  // Exception or error code happened in the tracked process doesn\u0026#39;t mean isError == true, the implementations of agent plugin and tracing SDK make the final decision.  bool isError = 11; // String key, String value pair.  // Tags provides more information, includes parameters.  //  // In the OAP backend analysis, some special tag or tag combination could provide other advanced features.  // https://github.com/apache/skywalking/blob/master/docs/en/guides/Java-Plugin-Development-Guide.md#special-span-tags  repeated KeyStringValuePair tags = 12; // String key, String value pair with an accurate timestamp.  // Logging some events happening in the context of the span duration.  repeated Log logs = 13; // Force the backend don\u0026#39;t do analysis, if the value is TRUE.  // The backend has its own configurations to follow or override this.  //  // Use this mostly because the agent/SDK could know more context of the service role.  bool skipAnalysis = 14;}message Log { // The timestamp in milliseconds of this event.,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 time = 1; // String key, String value pair.  repeated KeyStringValuePair data = 2;}// Map to the type of span enum SpanType { // Server side of RPC. Consumer side of MQ.  Entry = 0; // Client side of RPC. Producer side of MQ.  Exit = 1; // A common local code execution.  Local = 2;}// A ID could be represented by multiple string sections. message ID { repeated string id = 1;}// Type of the reference enum RefType { // Map to the reference targeting the segment in another OS process.  CrossProcess = 0; // Map to the reference targeting the segment in the same process of the current one, just across thread.  // This is only used when the coding language has the thread concept.  CrossThread = 1;}// Map to the layer of span enum SpanLayer { // Unknown layer. Could be anything.  Unknown = 0; // A database layer, used in tracing the database client component.  Database = 1; // A RPC layer, used in both client and server sides of RPC component.  RPCFramework = 2; // HTTP is a more specific RPCFramework.  Http = 3; // A MQ layer, used in both producer and consumer sides of the MQ component.  MQ = 4; // A cache layer, used in tracing the cache client component.  Cache = 5;}// The segment collections for trace report in batch and sync mode. message SegmentCollection { repeated SegmentObject segments = 1;}Report Span Attached Events Besides in-process agents, there are other out-of-process agent, such as ebpf agent, could report additional information as attached events for the relative spans.\nSpanAttachedEventReportService#collect for attached event reporting.\n//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // ebpf agent(SkyWalking Rover) collects extra information from the OS(Linux Only) level to attach on the traced span. // Since v3.1 //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// service SpanAttachedEventReportService { // Collect SpanAttachedEvent to the OAP server in the streaming mode.  rpc collect (stream SpanAttachedEvent) returns (Commands) { }}// SpanAttachedEvent represents an attached event for a traced RPC. // // When an RPC is being traced by the in-process language agent, a span would be reported by the client-side agent. // And the rover would be aware of this RPC due to the existing tracing header. // Then, the rover agent collects extra information from the OS level to provide assistance information to diagnose network performance. message SpanAttachedEvent { // The nanosecond timestamp of the event\u0026#39;s start time.  // Notice, most unit of timestamp in SkyWalking is milliseconds, but NANO-SECOND is required here.  // Because the attached event happens in the OS syscall level, most of them are executed rapidly.  Instant startTime = 1; // The official event name.  // For example, the event name is a method signature from syscall stack.  string event = 2; // [Optional] The nanosecond timestamp of the event\u0026#39;s end time.  Instant endTime = 3; // The tags for this event includes some extra OS level information,  // such as  // 1. net_device used for this exit span.  // 2. network L7 protocol  repeated KeyStringValuePair tags = 4; // The summary of statistics during this event.  // Each statistic provides a name(metric name) to represent the name, and an int64/long as the value.  repeated KeyIntValuePair summary = 5; // Refer to a trace context decoded from `sw8` header through network, such as HTTP header, MQ metadata  // https://skywalking.apache.org/docs/main/next/en/protocols/skywalking-cross-process-propagation-headers-protocol-v3/#standard-header-item  SpanReference traceContext = 6; message SpanReference { SpanReferenceType type = 1; // [Optional] A string id represents the whole trace.  string traceId = 2; // A unique id represents this segment. Other segments could use this id to reference as a child segment.  // [Optional] when this span reference  string traceSegmentId = 3; // If type == SKYWALKING  // The number id of the span. Should be unique in the whole segment.  // Starting at 0  //  // If type == ZIPKIN  // The type of span ID is string.  string spanId = 4; } enum SpanReferenceType { SKYWALKING = 0; ZIPKIN = 1; }}Via HTTP Endpoint Detailed information about data format can be found in Instance Management. There are two ways to report segment data: one segment per request or segment array in bulk mode.\nPOST http://localhost:12800/v3/segment Send a single segment object in JSON format.\nInput:\n{ \u0026#34;traceId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34; } OutPut:\nPOST http://localhost:12800/v3/segments Send a segment object list in JSON format.\nInput:\n[{ \u0026#34;traceId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577013, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577028, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;a12ff60b-5807-463b-a1f8-fb1c8608219e\u0026#34; }, { \u0026#34;traceId\u0026#34;: \u0026#34;f956699e-5106-4ea3-95e5-da748c55bac1\u0026#34;, \u0026#34;serviceInstance\u0026#34;: \u0026#34;User_Service_Instance_Name\u0026#34;, \u0026#34;spans\u0026#34;: [{ \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577250, \u0026#34;endTime\u0026#34;: 1588664577250, \u0026#34;spanType\u0026#34;: \u0026#34;Exit\u0026#34;, \u0026#34;spanId\u0026#34;: 1, \u0026#34;isError\u0026#34;: false, \u0026#34;parentSpanId\u0026#34;: 0, \u0026#34;componentId\u0026#34;: 6000, \u0026#34;peer\u0026#34;: \u0026#34;upstream service\u0026#34;, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34; }, { \u0026#34;operationName\u0026#34;: \u0026#34;/ingress\u0026#34;, \u0026#34;startTime\u0026#34;: 1588664577250, \u0026#34;tags\u0026#34;: [{ \u0026#34;key\u0026#34;: \u0026#34;http.method\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;GET\u0026#34; }, { \u0026#34;key\u0026#34;: \u0026#34;http.params\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;http://localhost/ingress\u0026#34; }], \u0026#34;endTime\u0026#34;: 1588664577250, \u0026#34;spanType\u0026#34;: \u0026#34;Entry\u0026#34;, \u0026#34;spanId\u0026#34;: 0, \u0026#34;parentSpanId\u0026#34;: -1, \u0026#34;isError\u0026#34;: false, \u0026#34;spanLayer\u0026#34;: \u0026#34;Http\u0026#34;, \u0026#34;componentId\u0026#34;: 6000 }], \u0026#34;service\u0026#34;: \u0026#34;User_Service_Name\u0026#34;, \u0026#34;traceSegmentId\u0026#34;: \u0026#34;f956699e-5106-4ea3-95e5-da748c55bac1\u0026#34; }] OutPut:\n","title":"Trace Data Protocol","url":"/docs/main/v9.7.0/en/api/trace-data-protocol-v3/"},{"content":"Trace Data Protocol v3 Trace Data Protocol describes the data format between SkyWalking agent/sniffer and backend.\nOverview Trace data protocol is defined and provided in gRPC format, and implemented in HTTP 1.1.\nReport service instance status   Service Instance Properties Service instance contains more information than just a name. In order for the agent to report service instance status, use ManagementService#reportInstanceProperties service to provide a string-key/string-value pair list as the parameter. The language of target instance must be provided as the minimum requirement.\n  Service Ping Service instance should keep alive with the backend. The agent should set a scheduler using ManagementService#keepAlive service every minute.\n  Send trace and metrics After you have the service ID and service instance ID ready, you could send traces and metrics. Now we have\n TraceSegmentReportService#collect for the SkyWalking native trace format JVMMetricReportService#collect for the SkyWalking native JVM format  For trace format, note that:\n The segment is a unique concept in SkyWalking. It should include all spans for each request in a single OS process, which is usually a single language-based thread. There are three types of spans.    EntrySpan EntrySpan represents a service provider, which is also the endpoint on the server end. As an APM system, SkyWalking targets the application servers. Therefore, almost all the services and MQ-consumers are EntrySpans.\n  LocalSpan LocalSpan represents a typical Java method which is not related to remote services. It is neither a MQ producer/consumer nor a provider/consumer of a service (e.g. HTTP service).\n  ExitSpan ExitSpan represents a client of service or MQ-producer. It is known as the LeafSpan in the early stages of SkyWalking. For example, accessing DB by JDBC, and reading Redis/Memcached are classified as ExitSpans.\n   Cross-thread/process span parent information is called \u0026ldquo;reference\u0026rdquo;. Reference carries the trace ID, segment ID, span ID, service name, service instance name, endpoint name, and target address used on the client end (note: this is not required in cross-thread operations) of this request in the parent. See Cross Process Propagation Headers Protocol v3 for more details.\n  Span#skipAnalysis may be TRUE, if this span doesn\u0026rsquo;t require backend analysis.\n  Protocol Definition // The segment is a collection of spans. It includes all collected spans in a simple one request context, such as a HTTP request process. // // We recommend the agent/SDK report all tracked data of one request once for all, such as, // typically, such as in Java, one segment represent all tracked operations(spans) of one request context in the same thread. // At the same time, in some language there is not a clear concept like golang, it could represent all tracked operations of one request context. message SegmentObject { // A string id represents the whole trace.  string traceId = 1; // A unique id represents this segment. Other segments could use this id to reference as a child segment.  string traceSegmentId = 2; // Span collections included in this segment.  repeated SpanObject spans = 3; // **Service**. Represents a set/group of workloads which provide the same behaviours for incoming requests.  //  // The logic name represents the service. This would show as a separate node in the topology.  // The metrics analyzed from the spans, would be aggregated for this entity as the service level.  string service = 4; // **Service Instance**. Each individual workload in the Service group is known as an instance. Like `pods` in Kubernetes, it  // doesn\u0026#39;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process.  //  // The logic name represents the service instance. This would show as a separate node in the instance relationship.  // The metrics analyzed from the spans, would be aggregated for this entity as the service instance level.  string serviceInstance = 5; // Whether the segment includes all tracked spans.  // In the production environment tracked, some tasks could include too many spans for one request context, such as a batch update for a cache, or an async job.  // The agent/SDK could optimize or ignore some tracked spans for better performance.  // In this case, the value should be flagged as TRUE.  bool isSizeLimited = 6;}// Segment reference represents the link between two existing segment. message SegmentReference { // Represent the reference type. It could be across thread or across process.  // Across process means there is a downstream RPC call for this.  // Typically, refType == CrossProcess means SpanObject#spanType = entry.  RefType refType = 1; // A string id represents the whole trace.  string traceId = 2; // Another segment id as the parent.  string parentTraceSegmentId = 3; // The span id in the parent trace segment.  int32 parentSpanId = 4; // The service logic name of the parent segment.  // If refType == CrossThread, this name is as same as the trace segment.  string parentService = 5; // The service logic name instance of the parent segment.  // If refType == CrossThread, this name is as same as the trace segment.  string parentServiceInstance = 6; // The endpoint name of the parent segment.  // **Endpoint**. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature.  // In a trace segment, the endpoint name is the name of first entry span.  string parentEndpoint = 7; // The network address, including ip/hostname and port, which is used in the client side.  // Such as Client --\u0026gt; use 127.0.11.8:913 -\u0026gt; Server  // then, in the reference of entry span reported by Server, the value of this field is 127.0.11.8:913.  // This plays the important role in the SkyWalking STAM(Streaming Topology Analysis Method)  // For more details, read https://wu-sheng.github.io/STAM/  string networkAddressUsedAtPeer = 8;}// Span represents a execution unit in the system, with duration and many other attributes. // Span could be a method, a RPC, MQ message produce or consume. // In the practice, the span should be added when it is really necessary, to avoid payload overhead. // We recommend to creating spans in across process(client/server of RPC/MQ) and across thread cases only. message SpanObject { // The number id of the span. Should be unique in the whole segment.  // Starting at 0.  int32 spanId = 1; // The number id of the parent span in the whole segment.  // -1 represents no parent span.  // Also, be known as the root/first span of the segment.  int32 parentSpanId = 2; // Start timestamp in milliseconds of this span,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 startTime = 3; // End timestamp in milliseconds of this span,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 endTime = 4; // \u0026lt;Optional\u0026gt;  // In the across thread and across process, these references targeting the parent segments.  // The references usually have only one element, but in batch consumer case, such as in MQ or async batch process, it could be multiple.  repeated SegmentReference refs = 5; // A logic name represents this span.  //  // We don\u0026#39;t recommend to include the parameter, such as HTTP request parameters, as a part of the operation, especially this is the name of the entry span.  // All statistic for the endpoints are aggregated base on this name. Those parameters should be added in the tags if necessary.  // If in some cases, it have to be a part of the operation name,  // users should use the Group Parameterized Endpoints capability at the backend to get the meaningful metrics.  // Read https://github.com/apache/skywalking/blob/master/docs/en/setup/backend/endpoint-grouping-rules.md  string operationName = 6; // Remote address of the peer in RPC/MQ case.  // This is required when spanType = Exit, as it is a part of the SkyWalking STAM(Streaming Topology Analysis Method).  // For more details, read https://wu-sheng.github.io/STAM/  string peer = 7; // Span type represents the role in the RPC context.  SpanType spanType = 8; // Span layer represent the component tech stack, related to the network tech.  SpanLayer spanLayer = 9; // Component id is a predefinited number id in the SkyWalking.  // It represents the framework, tech stack used by this tracked span, such as Spring.  // All IDs are defined in the https://github.com/apache/skywalking/blob/master/oap-server/server-bootstrap/src/main/resources/component-libraries.yml  // Send a pull request if you want to add languages, components or mapping defintions,  // all public components could be accepted.  // Follow this doc for more details, https://github.com/apache/skywalking/blob/master/docs/en/guides/Component-library-settings.md  int32 componentId = 10; // The status of the span. False means the tracked execution ends in the unexpected status.  // This affects the successful rate statistic in the backend.  // Exception or error code happened in the tracked process doesn\u0026#39;t mean isError == true, the implementations of agent plugin and tracing SDK make the final decision.  bool isError = 11; // String key, String value pair.  // Tags provides more informance, includes parameters.  //  // In the OAP backend analysis, some special tag or tag combination could provide other advanced features.  // https://github.com/apache/skywalking/blob/master/docs/en/guides/Java-Plugin-Development-Guide.md#special-span-tags  repeated KeyStringValuePair tags = 12; // String key, String value pair with an accurate timestamp.  // Logging some events happening in the context of the span duration.  repeated Log logs = 13; // Force the backend don\u0026#39;t do analysis, if the value is TRUE.  // The backend has its own configurations to follow or override this.  //  // Use this mostly because the agent/SDK could know more context of the service role.  bool skipAnalysis = 14;}message Log { // The timestamp in milliseconds of this event.,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 time = 1; // String key, String value pair.  repeated KeyStringValuePair data = 2;}// Map to the type of span enum SpanType { // Server side of RPC. Consumer side of MQ.  Entry = 0; // Client side of RPC. Producer side of MQ.  Exit = 1; // A common local code execution.  Local = 2;}// A ID could be represented by multiple string sections. message ID { repeated string id = 1;}// Type of the reference enum RefType { // Map to the reference targeting the segment in another OS process.  CrossProcess = 0; // Map to the reference targeting the segment in the same process of the current one, just across thread.  // This is only used when the coding language has the thread concept.  CrossThread = 1;}// Map to the layer of span enum SpanLayer { // Unknown layer. Could be anything.  Unknown = 0; // A database layer, used in tracing the database client component.  Database = 1; // A RPC layer, used in both client and server sides of RPC component.  RPCFramework = 2; // HTTP is a more specific RPCFramework.  Http = 3; // A MQ layer, used in both producer and consuer sides of the MQ component.  MQ = 4; // A cache layer, used in tracing the cache client component.  Cache = 5;}// The segment collections for trace report in batch and sync mode. message SegmentCollection { repeated SegmentObject segments = 1;}","title":"Trace Data Protocol v3","url":"/docs/main/v9.0.0/en/protocols/trace-data-protocol-v3/"},{"content":"Trace Data Protocol v3 Trace Data Protocol describes the data format between SkyWalking agent/sniffer and backend.\nOverview Trace data protocol is defined and provided in gRPC format, and implemented in HTTP 1.1.\nReport service instance status   Service Instance Properties Service instance contains more information than just a name. In order for the agent to report service instance status, use ManagementService#reportInstanceProperties service to provide a string-key/string-value pair list as the parameter. The language of target instance must be provided as the minimum requirement.\n  Service Ping Service instance should keep alive with the backend. The agent should set a scheduler using ManagementService#keepAlive service every minute.\n  Send trace and metrics After you have the service ID and service instance ID ready, you could send traces and metrics. Now we have\n TraceSegmentReportService#collect for the SkyWalking native trace format JVMMetricReportService#collect for the SkyWalking native JVM format  For trace format, note that:\n The segment is a unique concept in SkyWalking. It should include all spans for each request in a single OS process, which is usually a single language-based thread. There are three types of spans.    EntrySpan EntrySpan represents a service provider, which is also the endpoint on the server end. As an APM system, SkyWalking targets the application servers. Therefore, almost all the services and MQ-consumers are EntrySpans.\n  LocalSpan LocalSpan represents a typical Java method which is not related to remote services. It is neither a MQ producer/consumer nor a provider/consumer of a service (e.g. HTTP service).\n  ExitSpan ExitSpan represents a client of service or MQ-producer. It is known as the LeafSpan in the early stages of SkyWalking. For example, accessing DB by JDBC, and reading Redis/Memcached are classified as ExitSpans.\n   Cross-thread/process span parent information is called \u0026ldquo;reference\u0026rdquo;. Reference carries the trace ID, segment ID, span ID, service name, service instance name, endpoint name, and target address used on the client end (note: this is not required in cross-thread operations) of this request in the parent. See Cross Process Propagation Headers Protocol v3 for more details.\n  Span#skipAnalysis may be TRUE, if this span doesn\u0026rsquo;t require backend analysis.\n  Protocol Definition // The segment is a collection of spans. It includes all collected spans in a simple one request context, such as a HTTP request process. // // We recommend the agent/SDK report all tracked data of one request once for all, such as, // typically, such as in Java, one segment represent all tracked operations(spans) of one request context in the same thread. // At the same time, in some language there is not a clear concept like golang, it could represent all tracked operations of one request context. message SegmentObject { // A string id represents the whole trace.  string traceId = 1; // A unique id represents this segment. Other segments could use this id to reference as a child segment.  string traceSegmentId = 2; // Span collections included in this segment.  repeated SpanObject spans = 3; // **Service**. Represents a set/group of workloads which provide the same behaviours for incoming requests.  //  // The logic name represents the service. This would show as a separate node in the topology.  // The metrics analyzed from the spans, would be aggregated for this entity as the service level.  string service = 4; // **Service Instance**. Each individual workload in the Service group is known as an instance. Like `pods` in Kubernetes, it  // doesn\u0026#39;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process.  //  // The logic name represents the service instance. This would show as a separate node in the instance relationship.  // The metrics analyzed from the spans, would be aggregated for this entity as the service instance level.  string serviceInstance = 5; // Whether the segment includes all tracked spans.  // In the production environment tracked, some tasks could include too many spans for one request context, such as a batch update for a cache, or an async job.  // The agent/SDK could optimize or ignore some tracked spans for better performance.  // In this case, the value should be flagged as TRUE.  bool isSizeLimited = 6;}// Segment reference represents the link between two existing segment. message SegmentReference { // Represent the reference type. It could be across thread or across process.  // Across process means there is a downstream RPC call for this.  // Typically, refType == CrossProcess means SpanObject#spanType = entry.  RefType refType = 1; // A string id represents the whole trace.  string traceId = 2; // Another segment id as the parent.  string parentTraceSegmentId = 3; // The span id in the parent trace segment.  int32 parentSpanId = 4; // The service logic name of the parent segment.  // If refType == CrossThread, this name is as same as the trace segment.  string parentService = 5; // The service logic name instance of the parent segment.  // If refType == CrossThread, this name is as same as the trace segment.  string parentServiceInstance = 6; // The endpoint name of the parent segment.  // **Endpoint**. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature.  // In a trace segment, the endpoint name is the name of first entry span.  string parentEndpoint = 7; // The network address, including ip/hostname and port, which is used in the client side.  // Such as Client --\u0026gt; use 127.0.11.8:913 -\u0026gt; Server  // then, in the reference of entry span reported by Server, the value of this field is 127.0.11.8:913.  // This plays the important role in the SkyWalking STAM(Streaming Topology Analysis Method)  // For more details, read https://wu-sheng.github.io/STAM/  string networkAddressUsedAtPeer = 8;}// Span represents a execution unit in the system, with duration and many other attributes. // Span could be a method, a RPC, MQ message produce or consume. // In the practice, the span should be added when it is really necessary, to avoid payload overhead. // We recommend to creating spans in across process(client/server of RPC/MQ) and across thread cases only. message SpanObject { // The number id of the span. Should be unique in the whole segment.  // Starting at 0.  int32 spanId = 1; // The number id of the parent span in the whole segment.  // -1 represents no parent span.  // Also, be known as the root/first span of the segment.  int32 parentSpanId = 2; // Start timestamp in milliseconds of this span,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 startTime = 3; // End timestamp in milliseconds of this span,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 endTime = 4; // \u0026lt;Optional\u0026gt;  // In the across thread and across process, these references targeting the parent segments.  // The references usually have only one element, but in batch consumer case, such as in MQ or async batch process, it could be multiple.  repeated SegmentReference refs = 5; // A logic name represents this span.  //  // We don\u0026#39;t recommend to include the parameter, such as HTTP request parameters, as a part of the operation, especially this is the name of the entry span.  // All statistic for the endpoints are aggregated base on this name. Those parameters should be added in the tags if necessary.  // If in some cases, it have to be a part of the operation name,  // users should use the Group Parameterized Endpoints capability at the backend to get the meaningful metrics.  // Read https://github.com/apache/skywalking/blob/master/docs/en/setup/backend/endpoint-grouping-rules.md  string operationName = 6; // Remote address of the peer in RPC/MQ case.  // This is required when spanType = Exit, as it is a part of the SkyWalking STAM(Streaming Topology Analysis Method).  // For more details, read https://wu-sheng.github.io/STAM/  string peer = 7; // Span type represents the role in the RPC context.  SpanType spanType = 8; // Span layer represent the component tech stack, related to the network tech.  SpanLayer spanLayer = 9; // Component id is a predefinited number id in the SkyWalking.  // It represents the framework, tech stack used by this tracked span, such as Spring.  // All IDs are defined in the https://github.com/apache/skywalking/blob/master/oap-server/server-bootstrap/src/main/resources/component-libraries.yml  // Send a pull request if you want to add languages, components or mapping defintions,  // all public components could be accepted.  // Follow this doc for more details, https://github.com/apache/skywalking/blob/master/docs/en/guides/Component-library-settings.md  int32 componentId = 10; // The status of the span. False means the tracked execution ends in the unexpected status.  // This affects the successful rate statistic in the backend.  // Exception or error code happened in the tracked process doesn\u0026#39;t mean isError == true, the implementations of agent plugin and tracing SDK make the final decision.  bool isError = 11; // String key, String value pair.  // Tags provides more informance, includes parameters.  //  // In the OAP backend analysis, some special tag or tag combination could provide other advanced features.  // https://github.com/apache/skywalking/blob/master/docs/en/guides/Java-Plugin-Development-Guide.md#special-span-tags  repeated KeyStringValuePair tags = 12; // String key, String value pair with an accurate timestamp.  // Logging some events happening in the context of the span duration.  repeated Log logs = 13; // Force the backend don\u0026#39;t do analysis, if the value is TRUE.  // The backend has its own configurations to follow or override this.  //  // Use this mostly because the agent/SDK could know more context of the service role.  bool skipAnalysis = 14;}message Log { // The timestamp in milliseconds of this event.,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 time = 1; // String key, String value pair.  repeated KeyStringValuePair data = 2;}// Map to the type of span enum SpanType { // Server side of RPC. Consumer side of MQ.  Entry = 0; // Client side of RPC. Producer side of MQ.  Exit = 1; // A common local code execution.  Local = 2;}// A ID could be represented by multiple string sections. message ID { repeated string id = 1;}// Type of the reference enum RefType { // Map to the reference targeting the segment in another OS process.  CrossProcess = 0; // Map to the reference targeting the segment in the same process of the current one, just across thread.  // This is only used when the coding language has the thread concept.  CrossThread = 1;}// Map to the layer of span enum SpanLayer { // Unknown layer. Could be anything.  Unknown = 0; // A database layer, used in tracing the database client component.  Database = 1; // A RPC layer, used in both client and server sides of RPC component.  RPCFramework = 2; // HTTP is a more specific RPCFramework.  Http = 3; // A MQ layer, used in both producer and consuer sides of the MQ component.  MQ = 4; // A cache layer, used in tracing the cache client component.  Cache = 5;}// The segment collections for trace report in batch and sync mode. message SegmentCollection { repeated SegmentObject segments = 1;}","title":"Trace Data Protocol v3","url":"/docs/main/v9.1.0/en/protocols/trace-data-protocol-v3/"},{"content":"Trace Data Protocol v3 Trace Data Protocol describes the data format between SkyWalking agent/sniffer and backend.\nOverview Trace data protocol is defined and provided in gRPC format, and implemented in HTTP 1.1.\nReport service instance status   Service Instance Properties Service instance contains more information than just a name. In order for the agent to report service instance status, use ManagementService#reportInstanceProperties service to provide a string-key/string-value pair list as the parameter. The language of target instance must be provided as the minimum requirement.\n  Service Ping Service instance should keep alive with the backend. The agent should set a scheduler using ManagementService#keepAlive service every minute.\n  Send trace and metrics After you have the service ID and service instance ID ready, you could send traces and metrics. Now we have\n TraceSegmentReportService#collect for the SkyWalking native trace format JVMMetricReportService#collect for the SkyWalking native JVM format  For trace format, note that:\n The segment is a unique concept in SkyWalking. It should include all spans for each request in a single OS process, which is usually a single language-based thread. There are three types of spans.    EntrySpan EntrySpan represents a service provider, which is also the endpoint on the server end. As an APM system, SkyWalking targets the application servers. Therefore, almost all the services and MQ-consumers are EntrySpans.\n  LocalSpan LocalSpan represents a typical Java method which is not related to remote services. It is neither a MQ producer/consumer nor a provider/consumer of a service (e.g. HTTP service).\n  ExitSpan ExitSpan represents a client of service or MQ-producer. It is known as the LeafSpan in the early stages of SkyWalking. For example, accessing DB by JDBC, and reading Redis/Memcached are classified as ExitSpans.\n   Cross-thread/process span parent information is called \u0026ldquo;reference\u0026rdquo;. Reference carries the trace ID, segment ID, span ID, service name, service instance name, endpoint name, and target address used on the client end (note: this is not required in cross-thread operations) of this request in the parent. See Cross Process Propagation Headers Protocol v3 for more details.\n  Span#skipAnalysis may be TRUE, if this span doesn\u0026rsquo;t require backend analysis.\n  Protocol Definition // The segment is a collection of spans. It includes all collected spans in a simple one request context, such as a HTTP request process. // // We recommend the agent/SDK report all tracked data of one request once for all, such as, // typically, such as in Java, one segment represent all tracked operations(spans) of one request context in the same thread. // At the same time, in some language there is not a clear concept like golang, it could represent all tracked operations of one request context. message SegmentObject { // A string id represents the whole trace.  string traceId = 1; // A unique id represents this segment. Other segments could use this id to reference as a child segment.  string traceSegmentId = 2; // Span collections included in this segment.  repeated SpanObject spans = 3; // **Service**. Represents a set/group of workloads which provide the same behaviours for incoming requests.  //  // The logic name represents the service. This would show as a separate node in the topology.  // The metrics analyzed from the spans, would be aggregated for this entity as the service level.  string service = 4; // **Service Instance**. Each individual workload in the Service group is known as an instance. Like `pods` in Kubernetes, it  // doesn\u0026#39;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process.  //  // The logic name represents the service instance. This would show as a separate node in the instance relationship.  // The metrics analyzed from the spans, would be aggregated for this entity as the service instance level.  string serviceInstance = 5; // Whether the segment includes all tracked spans.  // In the production environment tracked, some tasks could include too many spans for one request context, such as a batch update for a cache, or an async job.  // The agent/SDK could optimize or ignore some tracked spans for better performance.  // In this case, the value should be flagged as TRUE.  bool isSizeLimited = 6;}// Segment reference represents the link between two existing segment. message SegmentReference { // Represent the reference type. It could be across thread or across process.  // Across process means there is a downstream RPC call for this.  // Typically, refType == CrossProcess means SpanObject#spanType = entry.  RefType refType = 1; // A string id represents the whole trace.  string traceId = 2; // Another segment id as the parent.  string parentTraceSegmentId = 3; // The span id in the parent trace segment.  int32 parentSpanId = 4; // The service logic name of the parent segment.  // If refType == CrossThread, this name is as same as the trace segment.  string parentService = 5; // The service logic name instance of the parent segment.  // If refType == CrossThread, this name is as same as the trace segment.  string parentServiceInstance = 6; // The endpoint name of the parent segment.  // **Endpoint**. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature.  // In a trace segment, the endpoint name is the name of first entry span.  string parentEndpoint = 7; // The network address, including ip/hostname and port, which is used in the client side.  // Such as Client --\u0026gt; use 127.0.11.8:913 -\u0026gt; Server  // then, in the reference of entry span reported by Server, the value of this field is 127.0.11.8:913.  // This plays the important role in the SkyWalking STAM(Streaming Topology Analysis Method)  // For more details, read https://wu-sheng.github.io/STAM/  string networkAddressUsedAtPeer = 8;}// Span represents a execution unit in the system, with duration and many other attributes. // Span could be a method, a RPC, MQ message produce or consume. // In the practice, the span should be added when it is really necessary, to avoid payload overhead. // We recommend to creating spans in across process(client/server of RPC/MQ) and across thread cases only. message SpanObject { // The number id of the span. Should be unique in the whole segment.  // Starting at 0.  int32 spanId = 1; // The number id of the parent span in the whole segment.  // -1 represents no parent span.  // Also, be known as the root/first span of the segment.  int32 parentSpanId = 2; // Start timestamp in milliseconds of this span,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 startTime = 3; // End timestamp in milliseconds of this span,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 endTime = 4; // \u0026lt;Optional\u0026gt;  // In the across thread and across process, these references targeting the parent segments.  // The references usually have only one element, but in batch consumer case, such as in MQ or async batch process, it could be multiple.  repeated SegmentReference refs = 5; // A logic name represents this span.  //  // We don\u0026#39;t recommend to include the parameter, such as HTTP request parameters, as a part of the operation, especially this is the name of the entry span.  // All statistic for the endpoints are aggregated base on this name. Those parameters should be added in the tags if necessary.  // If in some cases, it have to be a part of the operation name,  // users should use the Group Parameterized Endpoints capability at the backend to get the meaningful metrics.  // Read https://github.com/apache/skywalking/blob/master/docs/en/setup/backend/endpoint-grouping-rules.md  string operationName = 6; // Remote address of the peer in RPC/MQ case.  // This is required when spanType = Exit, as it is a part of the SkyWalking STAM(Streaming Topology Analysis Method).  // For more details, read https://wu-sheng.github.io/STAM/  string peer = 7; // Span type represents the role in the RPC context.  SpanType spanType = 8; // Span layer represent the component tech stack, related to the network tech.  SpanLayer spanLayer = 9; // Component id is a predefined number id in the SkyWalking.  // It represents the framework, tech stack used by this tracked span, such as Spring.  // All IDs are defined in the https://github.com/apache/skywalking/blob/master/oap-server/server-bootstrap/src/main/resources/component-libraries.yml  // Send a pull request if you want to add languages, components or mapping definitions,  // all public components could be accepted.  // Follow this doc for more details, https://github.com/apache/skywalking/blob/master/docs/en/guides/Component-library-settings.md  int32 componentId = 10; // The status of the span. False means the tracked execution ends in the unexpected status.  // This affects the successful rate statistic in the backend.  // Exception or error code happened in the tracked process doesn\u0026#39;t mean isError == true, the implementations of agent plugin and tracing SDK make the final decision.  bool isError = 11; // String key, String value pair.  // Tags provides more informance, includes parameters.  //  // In the OAP backend analysis, some special tag or tag combination could provide other advanced features.  // https://github.com/apache/skywalking/blob/master/docs/en/guides/Java-Plugin-Development-Guide.md#special-span-tags  repeated KeyStringValuePair tags = 12; // String key, String value pair with an accurate timestamp.  // Logging some events happening in the context of the span duration.  repeated Log logs = 13; // Force the backend don\u0026#39;t do analysis, if the value is TRUE.  // The backend has its own configurations to follow or override this.  //  // Use this mostly because the agent/SDK could know more context of the service role.  bool skipAnalysis = 14;}message Log { // The timestamp in milliseconds of this event.,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 time = 1; // String key, String value pair.  repeated KeyStringValuePair data = 2;}// Map to the type of span enum SpanType { // Server side of RPC. Consumer side of MQ.  Entry = 0; // Client side of RPC. Producer side of MQ.  Exit = 1; // A common local code execution.  Local = 2;}// A ID could be represented by multiple string sections. message ID { repeated string id = 1;}// Type of the reference enum RefType { // Map to the reference targeting the segment in another OS process.  CrossProcess = 0; // Map to the reference targeting the segment in the same process of the current one, just across thread.  // This is only used when the coding language has the thread concept.  CrossThread = 1;}// Map to the layer of span enum SpanLayer { // Unknown layer. Could be anything.  Unknown = 0; // A database layer, used in tracing the database client component.  Database = 1; // A RPC layer, used in both client and server sides of RPC component.  RPCFramework = 2; // HTTP is a more specific RPCFramework.  Http = 3; // A MQ layer, used in both producer and consuer sides of the MQ component.  MQ = 4; // A cache layer, used in tracing the cache client component.  Cache = 5;}// The segment collections for trace report in batch and sync mode. message SegmentCollection { repeated SegmentObject segments = 1;}","title":"Trace Data Protocol v3","url":"/docs/main/v9.2.0/en/protocols/trace-data-protocol-v3/"},{"content":"Trace Data Protocol v3.1 Trace Data Protocol describes the data format between SkyWalking agent/sniffer and backend.\nTrace data protocol is defined and provided in gRPC format, and implemented in HTTP 1.1.\nReport service instance status   Service Instance Properties Service instance contains more information than just a name. In order for the agent to report service instance status, use ManagementService#reportInstanceProperties service to provide a string-key/string-value pair list as the parameter. The language of target instance must be provided as the minimum requirement.\n  Service Ping Service instance should keep alive with the backend. The agent should set a scheduler using ManagementService#keepAlive service every minute.\n  Send trace and JVM metrics After you have the service ID and service instance ID ready, you could send traces and metrics. Now we have\n TraceSegmentReportService#collect for the SkyWalking native trace format JVMMetricReportService#collect for the SkyWalking native JVM format  For trace format, note that:\n The segment is a unique concept in SkyWalking. It should include all spans for each request in a single OS process, which is usually a single language-based thread. There are three types of spans.    EntrySpan EntrySpan represents a service provider, which is also the endpoint on the server end. As an APM system, SkyWalking targets the application servers. Therefore, almost all the services and MQ-consumers are EntrySpans.\n  LocalSpan LocalSpan represents a typical Java method which is not related to remote services. It is neither a MQ producer/consumer nor a provider/consumer of a service (e.g. HTTP service).\n  ExitSpan ExitSpan represents a client of service or MQ-producer. It is known as the LeafSpan in the early stages of SkyWalking. For example, accessing DB by JDBC, and reading Redis/Memcached are classified as ExitSpans.\n   Cross-thread/process span parent information is called \u0026ldquo;reference\u0026rdquo;. Reference carries the trace ID, segment ID, span ID, service name, service instance name, endpoint name, and target address used on the client end (note: this is not required in cross-thread operations) of this request in the parent. See Cross Process Propagation Headers Protocol v3 for more details.\n  Span#skipAnalysis may be TRUE, if this span doesn\u0026rsquo;t require backend analysis.\n  Trace Report Protocol // The segment is a collection of spans. It includes all collected spans in a simple one request context, such as a HTTP request process. // // We recommend the agent/SDK report all tracked data of one request once for all, such as, // typically, such as in Java, one segment represent all tracked operations(spans) of one request context in the same thread. // At the same time, in some language there is not a clear concept like golang, it could represent all tracked operations of one request context. message SegmentObject { // A string id represents the whole trace.  string traceId = 1; // A unique id represents this segment. Other segments could use this id to reference as a child segment.  string traceSegmentId = 2; // Span collections included in this segment.  repeated SpanObject spans = 3; // **Service**. Represents a set/group of workloads which provide the same behaviours for incoming requests.  //  // The logic name represents the service. This would show as a separate node in the topology.  // The metrics analyzed from the spans, would be aggregated for this entity as the service level.  string service = 4; // **Service Instance**. Each individual workload in the Service group is known as an instance. Like `pods` in Kubernetes, it  // doesn\u0026#39;t need to be a single OS process, however, if you are using instrument agents, an instance is actually a real OS process.  //  // The logic name represents the service instance. This would show as a separate node in the instance relationship.  // The metrics analyzed from the spans, would be aggregated for this entity as the service instance level.  string serviceInstance = 5; // Whether the segment includes all tracked spans.  // In the production environment tracked, some tasks could include too many spans for one request context, such as a batch update for a cache, or an async job.  // The agent/SDK could optimize or ignore some tracked spans for better performance.  // In this case, the value should be flagged as TRUE.  bool isSizeLimited = 6;}// Segment reference represents the link between two existing segment. message SegmentReference { // Represent the reference type. It could be across thread or across process.  // Across process means there is a downstream RPC call for this.  // Typically, refType == CrossProcess means SpanObject#spanType = entry.  RefType refType = 1; // A string id represents the whole trace.  string traceId = 2; // Another segment id as the parent.  string parentTraceSegmentId = 3; // The span id in the parent trace segment.  int32 parentSpanId = 4; // The service logic name of the parent segment.  // If refType == CrossThread, this name is as same as the trace segment.  string parentService = 5; // The service logic name instance of the parent segment.  // If refType == CrossThread, this name is as same as the trace segment.  string parentServiceInstance = 6; // The endpoint name of the parent segment.  // **Endpoint**. A path in a service for incoming requests, such as an HTTP URI path or a gRPC service class + method signature.  // In a trace segment, the endpoint name is the name of first entry span.  string parentEndpoint = 7; // The network address, including ip/hostname and port, which is used in the client side.  // Such as Client --\u0026gt; use 127.0.11.8:913 -\u0026gt; Server  // then, in the reference of entry span reported by Server, the value of this field is 127.0.11.8:913.  // This plays the important role in the SkyWalking STAM(Streaming Topology Analysis Method)  // For more details, read https://wu-sheng.github.io/STAM/  string networkAddressUsedAtPeer = 8;}// Span represents a execution unit in the system, with duration and many other attributes. // Span could be a method, a RPC, MQ message produce or consume. // In the practice, the span should be added when it is really necessary, to avoid payload overhead. // We recommend to creating spans in across process(client/server of RPC/MQ) and across thread cases only. message SpanObject { // The number id of the span. Should be unique in the whole segment.  // Starting at 0.  int32 spanId = 1; // The number id of the parent span in the whole segment.  // -1 represents no parent span.  // Also, be known as the root/first span of the segment.  int32 parentSpanId = 2; // Start timestamp in milliseconds of this span,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 startTime = 3; // End timestamp in milliseconds of this span,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 endTime = 4; // \u0026lt;Optional\u0026gt;  // In the across thread and across process, these references targeting the parent segments.  // The references usually have only one element, but in batch consumer case, such as in MQ or async batch process, it could be multiple.  repeated SegmentReference refs = 5; // A logic name represents this span.  //  // We don\u0026#39;t recommend to include the parameter, such as HTTP request parameters, as a part of the operation, especially this is the name of the entry span.  // All statistic for the endpoints are aggregated base on this name. Those parameters should be added in the tags if necessary.  // If in some cases, it have to be a part of the operation name,  // users should use the Group Parameterized Endpoints capability at the backend to get the meaningful metrics.  // Read https://github.com/apache/skywalking/blob/master/docs/en/setup/backend/endpoint-grouping-rules.md  string operationName = 6; // Remote address of the peer in RPC/MQ case.  // This is required when spanType = Exit, as it is a part of the SkyWalking STAM(Streaming Topology Analysis Method).  // For more details, read https://wu-sheng.github.io/STAM/  string peer = 7; // Span type represents the role in the RPC context.  SpanType spanType = 8; // Span layer represent the component tech stack, related to the network tech.  SpanLayer spanLayer = 9; // Component id is a predefined number id in the SkyWalking.  // It represents the framework, tech stack used by this tracked span, such as Spring.  // All IDs are defined in the https://github.com/apache/skywalking/blob/master/oap-server/server-bootstrap/src/main/resources/component-libraries.yml  // Send a pull request if you want to add languages, components or mapping definitions,  // all public components could be accepted.  // Follow this doc for more details, https://github.com/apache/skywalking/blob/master/docs/en/guides/Component-library-settings.md  int32 componentId = 10; // The status of the span. False means the tracked execution ends in the unexpected status.  // This affects the successful rate statistic in the backend.  // Exception or error code happened in the tracked process doesn\u0026#39;t mean isError == true, the implementations of agent plugin and tracing SDK make the final decision.  bool isError = 11; // String key, String value pair.  // Tags provides more information, includes parameters.  //  // In the OAP backend analysis, some special tag or tag combination could provide other advanced features.  // https://github.com/apache/skywalking/blob/master/docs/en/guides/Java-Plugin-Development-Guide.md#special-span-tags  repeated KeyStringValuePair tags = 12; // String key, String value pair with an accurate timestamp.  // Logging some events happening in the context of the span duration.  repeated Log logs = 13; // Force the backend don\u0026#39;t do analysis, if the value is TRUE.  // The backend has its own configurations to follow or override this.  //  // Use this mostly because the agent/SDK could know more context of the service role.  bool skipAnalysis = 14;}message Log { // The timestamp in milliseconds of this event.,  // measured between the current time and midnight, January 1, 1970 UTC.  int64 time = 1; // String key, String value pair.  repeated KeyStringValuePair data = 2;}// Map to the type of span enum SpanType { // Server side of RPC. Consumer side of MQ.  Entry = 0; // Client side of RPC. Producer side of MQ.  Exit = 1; // A common local code execution.  Local = 2;}// A ID could be represented by multiple string sections. message ID { repeated string id = 1;}// Type of the reference enum RefType { // Map to the reference targeting the segment in another OS process.  CrossProcess = 0; // Map to the reference targeting the segment in the same process of the current one, just across thread.  // This is only used when the coding language has the thread concept.  CrossThread = 1;}// Map to the layer of span enum SpanLayer { // Unknown layer. Could be anything.  Unknown = 0; // A database layer, used in tracing the database client component.  Database = 1; // A RPC layer, used in both client and server sides of RPC component.  RPCFramework = 2; // HTTP is a more specific RPCFramework.  Http = 3; // A MQ layer, used in both producer and consumer sides of the MQ component.  MQ = 4; // A cache layer, used in tracing the cache client component.  Cache = 5;}// The segment collections for trace report in batch and sync mode. message SegmentCollection { repeated SegmentObject segments = 1;}Report Span Attached Events Besides in-process agents, there are other out-of-process agent, such as ebpf agent, could report additional information as attached events for the relative spans.\nSpanAttachedEventReportService#collect for attached event reporting.\n//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // ebpf agent(SkyWalking Rover) collects extra information from the OS(Linux Only) level to attach on the traced span. // Since v3.1 //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// service SpanAttachedEventReportService { // Collect SpanAttachedEvent to the OAP server in the streaming mode.  rpc collect (stream SpanAttachedEvent) returns (Commands) { }}// SpanAttachedEvent represents an attached event for a traced RPC. // // When an RPC is being traced by the in-process language agent, a span would be reported by the client-side agent. // And the rover would be aware of this RPC due to the existing tracing header. // Then, the rover agent collects extra information from the OS level to provide assistance information to diagnose network performance. message SpanAttachedEvent { // The nanosecond timestamp of the event\u0026#39;s start time.  // Notice, most unit of timestamp in SkyWalking is milliseconds, but NANO-SECOND is required here.  // Because the attached event happens in the OS syscall level, most of them are executed rapidly.  Instant startTime = 1; // The official event name.  // For example, the event name is a method signature from syscall stack.  string event = 2; // [Optional] The nanosecond timestamp of the event\u0026#39;s end time.  Instant endTime = 3; // The tags for this event includes some extra OS level information,  // such as  // 1. net_device used for this exit span.  // 2. network L7 protocol  repeated KeyStringValuePair tags = 4; // The summary of statistics during this event.  // Each statistic provides a name(metric name) to represent the name, and an int64/long as the value.  repeated KeyIntValuePair summary = 5; // Refer to a trace context decoded from `sw8` header through network, such as HTTP header, MQ metadata  // https://skywalking.apache.org/docs/main/next/en/protocols/skywalking-cross-process-propagation-headers-protocol-v3/#standard-header-item  SpanReference traceContext = 6; message SpanReference { SpanReferenceType type = 1; // [Optional] A string id represents the whole trace.  string traceId = 2; // A unique id represents this segment. Other segments could use this id to reference as a child segment.  // [Optional] when this span reference  string traceSegmentId = 3; // If type == SKYWALKING  // The number id of the span. Should be unique in the whole segment.  // Starting at 0  //  // If type == ZIPKIN  // The type of span ID is string.  string spanId = 4; } enum SpanReferenceType { SKYWALKING = 0; ZIPKIN = 1; }}","title":"Trace Data Protocol v3.1","url":"/docs/main/v9.3.0/en/protocols/trace-data-protocol-v3/"},{"content":"Trace Profiling Trace Profiling is bound within the auto-instrument agent and corresponds to In-Process Profiling.\nIt is delivered to the agent in the form of a task, allowing for dynamic enabling or disabling. Trace Profiling tasks can be created when an endpoint within a service experiences high latency. When the agent receives the task, it periodically samples the thread stack related to the endpoint when requested. Once the sampling is complete, the thread stack within the endpoint can be analyzed to determine the specific line of business code causing the performance issue.\nLean more about the trace profiling, please read this blog.\nActive in the OAP OAP and the agent use a brand-new protocol to exchange Trace Profiling data, so it is necessary to start OAP with the following configuration:\nreceiver-profile:selector:${SW_RECEIVER_PROFILE:default}default:Trace Profiling Task with Analysis To use the Trace Profiling feature, please follow these steps:\n Create profiling task: Use the UI or CLI tool to create a task. Generate requests: Ensure that the service has generated requests. Query task details: Check that the created task has Trace data generated. Analyze the data: Analyze the Trace data to determine where performance bottlenecks exist in the service.  Create profiling task Creating a Trace Profiling task is used to notify all agent nodes that execute the service entity which endpoint needs to perform the Trace Profiling feature. This Endpoint is typically an HTTP request or an RPC request address.\nWhen creating a task, the following configuration fields are required:\n Service: Which agent under the service needs to be monitored. Endpoint: The specific endpoint name, such as \u0026ldquo;POST:/path/to/request.\u0026rdquo; Start Time: The start time of the task, which can be executed immediately or at a future time. Duration: The duration of the task execution. Min Duration Threshold: The monitoring will only be triggered when the specified endpoint\u0026rsquo;s execution time exceeds this threshold. This effectively prevents the collection of ineffective data due to short execution times. Dump Period: The thread stack collection period, which will trigger thread sampling every specified number of milliseconds. Max Sampling Count: The maximum number of traces that can be collected in a task. This effectively prevents the program execution from being affected by excessive trace sampling, such as the Stop The World situation in Java.  When the Agent receives a Trace Profiling task from OAP, it automatically generates a log to notify that the task has been acknowledged. The log contains the following field information:\n Instance: The name of the instance where the Agent is located. Type: Supports \u0026ldquo;NOTIFIED\u0026rdquo; and \u0026ldquo;EXECUTION_FINISHED\u0026rdquo;, with the current log displaying \u0026ldquo;NOTIFIED\u0026rdquo;. Time: The time when the Agent received the task.  Generate Requests At this point, Tracing requests matching the specified Endpoint and other conditions would undergo Profiling.\nNotice, whether profiling is thread sensitive, it relies on the agent side implementation. The Java Agent already supports cross-thread requests, so when a request involves cross-thread operations, it would also be periodically sampled for thread stack.\nQuery task details Once the Tracing request is completed, we can query the Tracing data associated with this Trace Profiling task, which includes the following information:\n TraceId: The Trace ID of the current request. Instance: The instance to which the current profiling data belongs. Duration: The total time taken by the current instance to process the Tracing request. Spans: The list of Spans associated with the current Tracing.  SpanId: The ID of the current span. Parent Span Id: The ID of the parent span, allowing for a tree structure. SegmentId: The ID of the segment to which the span belongs. Refs: References of the current span, note that it only includes \u0026ldquo;CROSS_THREAD\u0026rdquo; type references. Service: The service entity information to which the current span belongs. Instance: The instance entity information to which the current span belongs. Time: The start and end time of the current span. Endpoint Name: The name of the current Span. Type: The type of the current span, either \u0026ldquo;Entry\u0026rdquo;, \u0026ldquo;Local\u0026rdquo;, or \u0026ldquo;Exit\u0026rdquo;. Peer: The remote network address. Component: The name of the component used by the current span. Layer: The layer to which the current span belongs. Tags: The tags information contained in the current span. Logs: The log information in the current span. Profiled: Whether the current span supports Profiling data analysis.    Analyze the data Once we know which segments can be analyzed for profiling, we can then determine the time ranges available for thread stack analysis based on the \u0026ldquo;profiled\u0026rdquo; field in the span. Next, we can provide the following query content to analyze the data:\n segmentId: The segment to be analyzed. Segments are usually bound to individual threads, so we can determine which thread needs to be analyzed. time range: Includes the start and end time.  By combining the segmentId with the time range, we can confirm the data for a specific thread during a specific time period. This allows us to merge the thread stack data from the specified thread and time range and analyze which lines of code take longer to execute. The following fields help you understand the program execution:\n Id: Used to identify the current thread stack frame. Parent Id: Combined with \u0026ldquo;id\u0026rdquo; to determine the hierarchical relationship. Code Signature: The method signature of the current thread stack frame. Duration: The total time consumed by the current thread stack frame. Duration Child Excluded: Excludes the child method calls of the current method, only obtaining the time consumed by the current method. Count: The number of times the current thread stack frame was sampled.  If you want to learn more about the thread stack merging mechanism, please read this documentation.\nExporter If you find that the results of profiling data are not correct, you can report an issue through this documentation.\n","title":"Trace Profiling","url":"/docs/main/latest/en/setup/backend/backend-trace-profiling/"},{"content":"Trace Profiling Trace Profiling is bound within the auto-instrument agent and corresponds to In-Process Profiling.\nIt is delivered to the agent in the form of a task, allowing for dynamic enabling or disabling. Trace Profiling tasks can be created when an endpoint within a service experiences high latency. When the agent receives the task, it periodically samples the thread stack related to the endpoint when requested. Once the sampling is complete, the thread stack within the endpoint can be analyzed to determine the specific line of business code causing the performance issue.\nLean more about the trace profiling, please read this blog.\nActive in the OAP OAP and the agent use a brand-new protocol to exchange Trace Profiling data, so it is necessary to start OAP with the following configuration:\nreceiver-profile:selector:${SW_RECEIVER_PROFILE:default}default:Trace Profiling Task with Analysis To use the Trace Profiling feature, please follow these steps:\n Create profiling task: Use the UI or CLI tool to create a task. Generate requests: Ensure that the service has generated requests. Query task details: Check that the created task has Trace data generated. Analyze the data: Analyze the Trace data to determine where performance bottlenecks exist in the service.  Create profiling task Creating a Trace Profiling task is used to notify all agent nodes that execute the service entity which endpoint needs to perform the Trace Profiling feature. This Endpoint is typically an HTTP request or an RPC request address.\nWhen creating a task, the following configuration fields are required:\n Service: Which agent under the service needs to be monitored. Endpoint: The specific endpoint name, such as \u0026ldquo;POST:/path/to/request.\u0026rdquo; Start Time: The start time of the task, which can be executed immediately or at a future time. Duration: The duration of the task execution. Min Duration Threshold: The monitoring will only be triggered when the specified endpoint\u0026rsquo;s execution time exceeds this threshold. This effectively prevents the collection of ineffective data due to short execution times. Dump Period: The thread stack collection period, which will trigger thread sampling every specified number of milliseconds. Max Sampling Count: The maximum number of traces that can be collected in a task. This effectively prevents the program execution from being affected by excessive trace sampling, such as the Stop The World situation in Java.  When the Agent receives a Trace Profiling task from OAP, it automatically generates a log to notify that the task has been acknowledged. The log contains the following field information:\n Instance: The name of the instance where the Agent is located. Type: Supports \u0026ldquo;NOTIFIED\u0026rdquo; and \u0026ldquo;EXECUTION_FINISHED\u0026rdquo;, with the current log displaying \u0026ldquo;NOTIFIED\u0026rdquo;. Time: The time when the Agent received the task.  Generate Requests At this point, Tracing requests matching the specified Endpoint and other conditions would undergo Profiling.\nNotice, whether profiling is thread sensitive, it relies on the agent side implementation. The Java Agent already supports cross-thread requests, so when a request involves cross-thread operations, it would also be periodically sampled for thread stack.\nQuery task details Once the Tracing request is completed, we can query the Tracing data associated with this Trace Profiling task, which includes the following information:\n TraceId: The Trace ID of the current request. Instance: The instance to which the current profiling data belongs. Duration: The total time taken by the current instance to process the Tracing request. Spans: The list of Spans associated with the current Tracing.  SpanId: The ID of the current span. Parent Span Id: The ID of the parent span, allowing for a tree structure. SegmentId: The ID of the segment to which the span belongs. Refs: References of the current span, note that it only includes \u0026ldquo;CROSS_THREAD\u0026rdquo; type references. Service: The service entity information to which the current span belongs. Instance: The instance entity information to which the current span belongs. Time: The start and end time of the current span. Endpoint Name: The name of the current Span. Type: The type of the current span, either \u0026ldquo;Entry\u0026rdquo;, \u0026ldquo;Local\u0026rdquo;, or \u0026ldquo;Exit\u0026rdquo;. Peer: The remote network address. Component: The name of the component used by the current span. Layer: The layer to which the current span belongs. Tags: The tags information contained in the current span. Logs: The log information in the current span. Profiled: Whether the current span supports Profiling data analysis.    Analyze the data Once we know which segments can be analyzed for profiling, we can then determine the time ranges available for thread stack analysis based on the \u0026ldquo;profiled\u0026rdquo; field in the span. Next, we can provide the following query content to analyze the data:\n segmentId: The segment to be analyzed. Segments are usually bound to individual threads, so we can determine which thread needs to be analyzed. time range: Includes the start and end time.  By combining the segmentId with the time range, we can confirm the data for a specific thread during a specific time period. This allows us to merge the thread stack data from the specified thread and time range and analyze which lines of code take longer to execute. The following fields help you understand the program execution:\n Id: Used to identify the current thread stack frame. Parent Id: Combined with \u0026ldquo;id\u0026rdquo; to determine the hierarchical relationship. Code Signature: The method signature of the current thread stack frame. Duration: The total time consumed by the current thread stack frame. Duration Child Excluded: Excludes the child method calls of the current method, only obtaining the time consumed by the current method. Count: The number of times the current thread stack frame was sampled.  If you want to learn more about the thread stack merging mechanism, please read this documentation.\nExporter If you find that the results of profiling data are not correct, you can report an issue through this documentation.\n","title":"Trace Profiling","url":"/docs/main/next/en/setup/backend/backend-trace-profiling/"},{"content":"Trace Profiling Trace Profiling is bound within the auto-instrument agent and corresponds to In-Process Profiling.\nIt is delivered to the agent in the form of a task, allowing for dynamic enabling or disabling. Trace Profiling tasks can be created when an endpoint within a service experiences high latency. When the agent receives the task, it periodically samples the thread stack related to the endpoint when requested. Once the sampling is complete, the thread stack within the endpoint can be analyzed to determine the specific line of business code causing the performance issue.\nLean more about the trace profiling, please read this blog.\nActive in the OAP OAP and the agent use a brand-new protocol to exchange Trace Profiling data, so it is necessary to start OAP with the following configuration:\nreceiver-profile:selector:${SW_RECEIVER_PROFILE:default}default:Trace Profiling Task with Analysis To use the Trace Profiling feature, please follow these steps:\n Create profiling task: Use the UI or CLI tool to create a task. Generate requests: Ensure that the service has generated requests. Query task details: Check that the created task has Trace data generated. Analyze the data: Analyze the Trace data to determine where performance bottlenecks exist in the service.  Create profiling task Creating a Trace Profiling task is used to notify all agent nodes that execute the service entity which endpoint needs to perform the Trace Profiling feature. This Endpoint is typically an HTTP request or an RPC request address.\nWhen creating a task, the following configuration fields are required:\n Service: Which agent under the service needs to be monitored. Endpoint: The specific endpoint name, such as \u0026ldquo;POST:/path/to/request.\u0026rdquo; Start Time: The start time of the task, which can be executed immediately or at a future time. Duration: The duration of the task execution. Min Duration Threshold: The monitoring will only be triggered when the specified endpoint\u0026rsquo;s execution time exceeds this threshold. This effectively prevents the collection of ineffective data due to short execution times. Dump Period: The thread stack collection period, which will trigger thread sampling every specified number of milliseconds. Max Sampling Count: The maximum number of traces that can be collected in a task. This effectively prevents the program execution from being affected by excessive trace sampling, such as the Stop The World situation in Java.  When the Agent receives a Trace Profiling task from OAP, it automatically generates a log to notify that the task has been acknowledged. The log contains the following field information:\n Instance: The name of the instance where the Agent is located. Type: Supports \u0026ldquo;NOTIFIED\u0026rdquo; and \u0026ldquo;EXECUTION_FINISHED\u0026rdquo;, with the current log displaying \u0026ldquo;NOTIFIED\u0026rdquo;. Time: The time when the Agent received the task.  Generate Requests At this point, Tracing requests matching the specified Endpoint and other conditions would undergo Profiling.\nNotice, whether profiling is thread sensitive, it relies on the agent side implementation. The Java Agent already supports cross-thread requests, so when a request involves cross-thread operations, it would also be periodically sampled for thread stack.\nQuery task details Once the Tracing request is completed, we can query the Tracing data associated with this Trace Profiling task, which includes the following information:\n TraceId: The Trace ID of the current request. Instance: The instance to which the current profiling data belongs. Duration: The total time taken by the current instance to process the Tracing request. Spans: The list of Spans associated with the current Tracing.  SpanId: The ID of the current span. Parent Span Id: The ID of the parent span, allowing for a tree structure. SegmentId: The ID of the segment to which the span belongs. Refs: References of the current span, note that it only includes \u0026ldquo;CROSS_THREAD\u0026rdquo; type references. Service: The service entity information to which the current span belongs. Instance: The instance entity information to which the current span belongs. Time: The start and end time of the current span. Endpoint Name: The name of the current Span. Type: The type of the current span, either \u0026ldquo;Entry\u0026rdquo;, \u0026ldquo;Local\u0026rdquo;, or \u0026ldquo;Exit\u0026rdquo;. Peer: The remote network address. Component: The name of the component used by the current span. Layer: The layer to which the current span belongs. Tags: The tags information contained in the current span. Logs: The log information in the current span. Profiled: Whether the current span supports Profiling data analysis.    Analyze the data Once we know which segments can be analyzed for profiling, we can then determine the time ranges available for thread stack analysis based on the \u0026ldquo;profiled\u0026rdquo; field in the span. Next, we can provide the following query content to analyze the data:\n segmentId: The segment to be analyzed. Segments are usually bound to individual threads, so we can determine which thread needs to be analyzed. time range: Includes the start and end time.  By combining the segmentId with the time range, we can confirm the data for a specific thread during a specific time period. This allows us to merge the thread stack data from the specified thread and time range and analyze which lines of code take longer to execute. The following fields help you understand the program execution:\n Id: Used to identify the current thread stack frame. Parent Id: Combined with \u0026ldquo;id\u0026rdquo; to determine the hierarchical relationship. Code Signature: The method signature of the current thread stack frame. Duration: The total time consumed by the current thread stack frame. Duration Child Excluded: Excludes the child method calls of the current method, only obtaining the time consumed by the current method. Count: The number of times the current thread stack frame was sampled.  If you want to learn more about the thread stack merging mechanism, please read this documentation.\nExporter If you find that the results of profiling data are not correct, you can report an issue through this documentation.\n","title":"Trace Profiling","url":"/docs/main/v9.5.0/en/setup/backend/backend-trace-profiling/"},{"content":"Trace Profiling Trace Profiling is bound within the auto-instrument agent and corresponds to In-Process Profiling.\nIt is delivered to the agent in the form of a task, allowing for dynamic enabling or disabling. Trace Profiling tasks can be created when an endpoint within a service experiences high latency. When the agent receives the task, it periodically samples the thread stack related to the endpoint when requested. Once the sampling is complete, the thread stack within the endpoint can be analyzed to determine the specific line of business code causing the performance issue.\nLean more about the trace profiling, please read this blog.\nActive in the OAP OAP and the agent use a brand-new protocol to exchange Trace Profiling data, so it is necessary to start OAP with the following configuration:\nreceiver-profile:selector:${SW_RECEIVER_PROFILE:default}default:Trace Profiling Task with Analysis To use the Trace Profiling feature, please follow these steps:\n Create profiling task: Use the UI or CLI tool to create a task. Generate requests: Ensure that the service has generated requests. Query task details: Check that the created task has Trace data generated. Analyze the data: Analyze the Trace data to determine where performance bottlenecks exist in the service.  Create profiling task Creating a Trace Profiling task is used to notify all agent nodes that execute the service entity which endpoint needs to perform the Trace Profiling feature. This Endpoint is typically an HTTP request or an RPC request address.\nWhen creating a task, the following configuration fields are required:\n Service: Which agent under the service needs to be monitored. Endpoint: The specific endpoint name, such as \u0026ldquo;POST:/path/to/request.\u0026rdquo; Start Time: The start time of the task, which can be executed immediately or at a future time. Duration: The duration of the task execution. Min Duration Threshold: The monitoring will only be triggered when the specified endpoint\u0026rsquo;s execution time exceeds this threshold. This effectively prevents the collection of ineffective data due to short execution times. Dump Period: The thread stack collection period, which will trigger thread sampling every specified number of milliseconds. Max Sampling Count: The maximum number of traces that can be collected in a task. This effectively prevents the program execution from being affected by excessive trace sampling, such as the Stop The World situation in Java.  When the Agent receives a Trace Profiling task from OAP, it automatically generates a log to notify that the task has been acknowledged. The log contains the following field information:\n Instance: The name of the instance where the Agent is located. Type: Supports \u0026ldquo;NOTIFIED\u0026rdquo; and \u0026ldquo;EXECUTION_FINISHED\u0026rdquo;, with the current log displaying \u0026ldquo;NOTIFIED\u0026rdquo;. Time: The time when the Agent received the task.  Generate Requests At this point, Tracing requests matching the specified Endpoint and other conditions would undergo Profiling.\nNotice, whether profiling is thread sensitive, it relies on the agent side implementation. The Java Agent already supports cross-thread requests, so when a request involves cross-thread operations, it would also be periodically sampled for thread stack.\nQuery task details Once the Tracing request is completed, we can query the Tracing data associated with this Trace Profiling task, which includes the following information:\n TraceId: The Trace ID of the current request. Instance: The instance to which the current profiling data belongs. Duration: The total time taken by the current instance to process the Tracing request. Spans: The list of Spans associated with the current Tracing.  SpanId: The ID of the current span. Parent Span Id: The ID of the parent span, allowing for a tree structure. SegmentId: The ID of the segment to which the span belongs. Refs: References of the current span, note that it only includes \u0026ldquo;CROSS_THREAD\u0026rdquo; type references. Service: The service entity information to which the current span belongs. Instance: The instance entity information to which the current span belongs. Time: The start and end time of the current span. Endpoint Name: The name of the current Span. Type: The type of the current span, either \u0026ldquo;Entry\u0026rdquo;, \u0026ldquo;Local\u0026rdquo;, or \u0026ldquo;Exit\u0026rdquo;. Peer: The remote network address. Component: The name of the component used by the current span. Layer: The layer to which the current span belongs. Tags: The tags information contained in the current span. Logs: The log information in the current span. Profiled: Whether the current span supports Profiling data analysis.    Analyze the data Once we know which segments can be analyzed for profiling, we can then determine the time ranges available for thread stack analysis based on the \u0026ldquo;profiled\u0026rdquo; field in the span. Next, we can provide the following query content to analyze the data:\n segmentId: The segment to be analyzed. Segments are usually bound to individual threads, so we can determine which thread needs to be analyzed. time range: Includes the start and end time.  By combining the segmentId with the time range, we can confirm the data for a specific thread during a specific time period. This allows us to merge the thread stack data from the specified thread and time range and analyze which lines of code take longer to execute. The following fields help you understand the program execution:\n Id: Used to identify the current thread stack frame. Parent Id: Combined with \u0026ldquo;id\u0026rdquo; to determine the hierarchical relationship. Code Signature: The method signature of the current thread stack frame. Duration: The total time consumed by the current thread stack frame. Duration Child Excluded: Excludes the child method calls of the current method, only obtaining the time consumed by the current method. Count: The number of times the current thread stack frame was sampled.  If you want to learn more about the thread stack merging mechanism, please read this documentation.\nExporter If you find that the results of profiling data are not correct, you can report an issue through this documentation.\n","title":"Trace Profiling","url":"/docs/main/v9.6.0/en/setup/backend/backend-trace-profiling/"},{"content":"Trace Profiling Trace Profiling is bound within the auto-instrument agent and corresponds to In-Process Profiling.\nIt is delivered to the agent in the form of a task, allowing for dynamic enabling or disabling. Trace Profiling tasks can be created when an endpoint within a service experiences high latency. When the agent receives the task, it periodically samples the thread stack related to the endpoint when requested. Once the sampling is complete, the thread stack within the endpoint can be analyzed to determine the specific line of business code causing the performance issue.\nLean more about the trace profiling, please read this blog.\nActive in the OAP OAP and the agent use a brand-new protocol to exchange Trace Profiling data, so it is necessary to start OAP with the following configuration:\nreceiver-profile:selector:${SW_RECEIVER_PROFILE:default}default:Trace Profiling Task with Analysis To use the Trace Profiling feature, please follow these steps:\n Create profiling task: Use the UI or CLI tool to create a task. Generate requests: Ensure that the service has generated requests. Query task details: Check that the created task has Trace data generated. Analyze the data: Analyze the Trace data to determine where performance bottlenecks exist in the service.  Create profiling task Creating a Trace Profiling task is used to notify all agent nodes that execute the service entity which endpoint needs to perform the Trace Profiling feature. This Endpoint is typically an HTTP request or an RPC request address.\nWhen creating a task, the following configuration fields are required:\n Service: Which agent under the service needs to be monitored. Endpoint: The specific endpoint name, such as \u0026ldquo;POST:/path/to/request.\u0026rdquo; Start Time: The start time of the task, which can be executed immediately or at a future time. Duration: The duration of the task execution. Min Duration Threshold: The monitoring will only be triggered when the specified endpoint\u0026rsquo;s execution time exceeds this threshold. This effectively prevents the collection of ineffective data due to short execution times. Dump Period: The thread stack collection period, which will trigger thread sampling every specified number of milliseconds. Max Sampling Count: The maximum number of traces that can be collected in a task. This effectively prevents the program execution from being affected by excessive trace sampling, such as the Stop The World situation in Java.  When the Agent receives a Trace Profiling task from OAP, it automatically generates a log to notify that the task has been acknowledged. The log contains the following field information:\n Instance: The name of the instance where the Agent is located. Type: Supports \u0026ldquo;NOTIFIED\u0026rdquo; and \u0026ldquo;EXECUTION_FINISHED\u0026rdquo;, with the current log displaying \u0026ldquo;NOTIFIED\u0026rdquo;. Time: The time when the Agent received the task.  Generate Requests At this point, Tracing requests matching the specified Endpoint and other conditions would undergo Profiling.\nNotice, whether profiling is thread sensitive, it relies on the agent side implementation. The Java Agent already supports cross-thread requests, so when a request involves cross-thread operations, it would also be periodically sampled for thread stack.\nQuery task details Once the Tracing request is completed, we can query the Tracing data associated with this Trace Profiling task, which includes the following information:\n TraceId: The Trace ID of the current request. Instance: The instance to which the current profiling data belongs. Duration: The total time taken by the current instance to process the Tracing request. Spans: The list of Spans associated with the current Tracing.  SpanId: The ID of the current span. Parent Span Id: The ID of the parent span, allowing for a tree structure. SegmentId: The ID of the segment to which the span belongs. Refs: References of the current span, note that it only includes \u0026ldquo;CROSS_THREAD\u0026rdquo; type references. Service: The service entity information to which the current span belongs. Instance: The instance entity information to which the current span belongs. Time: The start and end time of the current span. Endpoint Name: The name of the current Span. Type: The type of the current span, either \u0026ldquo;Entry\u0026rdquo;, \u0026ldquo;Local\u0026rdquo;, or \u0026ldquo;Exit\u0026rdquo;. Peer: The remote network address. Component: The name of the component used by the current span. Layer: The layer to which the current span belongs. Tags: The tags information contained in the current span. Logs: The log information in the current span. Profiled: Whether the current span supports Profiling data analysis.    Analyze the data Once we know which segments can be analyzed for profiling, we can then determine the time ranges available for thread stack analysis based on the \u0026ldquo;profiled\u0026rdquo; field in the span. Next, we can provide the following query content to analyze the data:\n segmentId: The segment to be analyzed. Segments are usually bound to individual threads, so we can determine which thread needs to be analyzed. time range: Includes the start and end time.  By combining the segmentId with the time range, we can confirm the data for a specific thread during a specific time period. This allows us to merge the thread stack data from the specified thread and time range and analyze which lines of code take longer to execute. The following fields help you understand the program execution:\n Id: Used to identify the current thread stack frame. Parent Id: Combined with \u0026ldquo;id\u0026rdquo; to determine the hierarchical relationship. Code Signature: The method signature of the current thread stack frame. Duration: The total time consumed by the current thread stack frame. Duration Child Excluded: Excludes the child method calls of the current method, only obtaining the time consumed by the current method. Count: The number of times the current thread stack frame was sampled.  If you want to learn more about the thread stack merging mechanism, please read this documentation.\nExporter If you find that the results of profiling data are not correct, you can report an issue through this documentation.\n","title":"Trace Profiling","url":"/docs/main/v9.7.0/en/setup/backend/backend-trace-profiling/"},{"content":"Trace Sampling at server side An advantage of a distributed tracing system is that detailed information from the traces can be obtained. However, the downside is that these traces use up a lot of storage.\nIf you enable the trace sampling mechanism at the server-side, you will find that the service metrics, service instance, endpoint, and topology all have the same accuracy as before. The only difference is that they do not save all traces in storage.\nOf course, even if you enable sampling, the traces will be kept as consistent as possible. Being consistent means that once the trace segments have been collected and reported by agents, the backend would make its best effort not to split the traces. See our recommendation to understand why you should keep the traces as consistent as possible and try not to split them.\nSet the sample rate In the agent-analyzer module, you will find the sampleRate setting by the configuration traceSamplingPolicySettingsFile.\nagent-analyzer:default:...# The default sampling rate and the default trace latency time configured by the \u0026#39;traceSamplingPolicySettingsFile\u0026#39; file.traceSamplingPolicySettingsFile:${SW_TRACE_SAMPLING_POLICY_SETTINGS_FILE:trace-sampling-policy-settings.yml}forceSampleErrorSegment:${SW_FORCE_SAMPLE_ERROR_SEGMENT:true}# When sampling mechanism activated, this config would make the error status segment sampled, ignoring the sampling rate.The default trace-sampling-policy-settings.yml uses the following format. Could use dynamic configuration to update the settings in the runtime.\ndefault:# Default sampling rate that replaces the \u0026#39;agent-analyzer.default.sampleRate\u0026#39;# The sample rate precision is 1/10000. 10000 means 100% sample in default.rate:10000# Default trace latency time that replaces the \u0026#39;agent-analyzer.default.slowTraceSegmentThreshold\u0026#39;# Setting this threshold about the latency would make the slow trace segments sampled if they cost more time, even the sampling mechanism is activated. The default value is `-1`, which would not sample slow traces. Unit, millisecond.duration:-1#services:# - name: serverName# rate: 1000 # Sampling rate of this specific service# duration: 10000 # Trace latency threshold for trace sampling for this specific serviceduration.rate allows you to set the sample rate to this backend. The sample rate precision is 1/10000. 10000 means 100% sample by default.\nforceSampleErrorSegment allows you to save all error segments when the sampling mechanism is activated. This config will cause the error status segment to be sampled when the sampling mechanism is activated, ignoring the sampling rate.\ndefault.duration allows you to save all slow trace segments when the sampling mechanism is activated. Setting this threshold on latency (in milliseconds) would cause slow trace segments to be sampled if they use up more time, even if the sampling mechanism is activated. The default value is -1, which means that slow traces would not be sampled.\nNote: services.[].rate and services.[].duration has a higher priority than default.rare and default.duration.\nRecommendation You may choose to set different backend instances with different sampleRate values, although we recommend that you set the values to be the same.\nWhen you set the different rates, let\u0026rsquo;s say:\n Backend-InstanceA.sampleRate = 35 Backend-InstanceB.sampleRate = 55  Assume the agents have reported all trace segments to the backend. 35% of the traces at the global level will be collected and saved in storage consistently/completely together with all spans. 20% of the trace segments reported to Backend-Instance B will be saved in storage, whereas some trace segments may be missed, as they are reported to Backend-InstanceA and ignored.\nNote When you enable sampling, the actual sample rate may exceed sampleRate. The reason is that currently, all error/slow segments will be saved; meanwhile, the upstream and downstream may not be sampled. This feature ensures that you have the error/slow stacks and segments, although it is not guaranteed that you would have the whole traces.\nNote that if most of the accesses have failed or are slow, the sampling rate would be close to 100%. This may cause the backend or storage clusters to crash.\n","title":"Trace Sampling at server side","url":"/docs/main/latest/en/setup/backend/trace-sampling/"},{"content":"Trace Sampling at server side An advantage of a distributed tracing system is that detailed information from the traces can be obtained. However, the downside is that these traces use up a lot of storage.\nIf you enable the trace sampling mechanism at the server-side, you will find that the service metrics, service instance, endpoint, and topology all have the same accuracy as before. The only difference is that they do not save all traces in storage.\nOf course, even if you enable sampling, the traces will be kept as consistent as possible. Being consistent means that once the trace segments have been collected and reported by agents, the backend would make its best effort not to split the traces. See our recommendation to understand why you should keep the traces as consistent as possible and try not to split them.\nSet the sample rate In the agent-analyzer module, you will find the sampleRate setting by the configuration traceSamplingPolicySettingsFile.\nagent-analyzer:default:...# The default sampling rate and the default trace latency time configured by the \u0026#39;traceSamplingPolicySettingsFile\u0026#39; file.traceSamplingPolicySettingsFile:${SW_TRACE_SAMPLING_POLICY_SETTINGS_FILE:trace-sampling-policy-settings.yml}forceSampleErrorSegment:${SW_FORCE_SAMPLE_ERROR_SEGMENT:true}# When sampling mechanism activated, this config would make the error status segment sampled, ignoring the sampling rate.The default trace-sampling-policy-settings.yml uses the following format. Could use dynamic configuration to update the settings in the runtime.\ndefault:# Default sampling rate that replaces the \u0026#39;agent-analyzer.default.sampleRate\u0026#39;# The sample rate precision is 1/10000. 10000 means 100% sample in default.rate:10000# Default trace latency time that replaces the \u0026#39;agent-analyzer.default.slowTraceSegmentThreshold\u0026#39;# Setting this threshold about the latency would make the slow trace segments sampled if they cost more time, even the sampling mechanism is activated. The default value is `-1`, which would not sample slow traces. Unit, millisecond.duration:-1#services:# - name: serverName# rate: 1000 # Sampling rate of this specific service# duration: 10000 # Trace latency threshold for trace sampling for this specific serviceduration.rate allows you to set the sample rate to this backend. The sample rate precision is 1/10000. 10000 means 100% sample by default.\nforceSampleErrorSegment allows you to save all error segments when the sampling mechanism is activated. This config will cause the error status segment to be sampled when the sampling mechanism is activated, ignoring the sampling rate.\ndefault.duration allows you to save all slow trace segments when the sampling mechanism is activated. Setting this threshold on latency (in milliseconds) would cause slow trace segments to be sampled if they use up more time, even if the sampling mechanism is activated. The default value is -1, which means that slow traces would not be sampled.\nNote: services.[].rate and services.[].duration has a higher priority than default.rare and default.duration.\nRecommendation You may choose to set different backend instances with different sampleRate values, although we recommend that you set the values to be the same.\nWhen you set the different rates, let\u0026rsquo;s say:\n Backend-InstanceA.sampleRate = 35 Backend-InstanceB.sampleRate = 55  Assume the agents have reported all trace segments to the backend. 35% of the traces at the global level will be collected and saved in storage consistently/completely together with all spans. 20% of the trace segments reported to Backend-Instance B will be saved in storage, whereas some trace segments may be missed, as they are reported to Backend-InstanceA and ignored.\nNote When you enable sampling, the actual sample rate may exceed sampleRate. The reason is that currently, all error/slow segments will be saved; meanwhile, the upstream and downstream may not be sampled. This feature ensures that you have the error/slow stacks and segments, although it is not guaranteed that you would have the whole traces.\nNote that if most of the accesses have failed or are slow, the sampling rate would be close to 100%. This may cause the backend or storage clusters to crash.\n","title":"Trace Sampling at server side","url":"/docs/main/next/en/setup/backend/trace-sampling/"},{"content":"Trace Sampling at server side An advantage of a distributed tracing system is that detailed information from the traces can be obtained. However, the downside is that these traces use up a lot of storage. If you enable the trace sampling mechanism at the server side, you will find that the metrics of the service, service instance, endpoint, and topology all have the same accuracy as before. The only difference is that they do not save all traces into storage.\nOf course, even if you enable sampling, the traces will be kept as consistent as possible. Being consistent means that once the trace segments have been collected and reported by agents, the backend would do their best not to split the traces. See our recommendation to understand why you should keep the traces as consistent as possible and try not to split them.\nSet the sample rate In the agent-analyzer module, you will find the sampleRate setting by the configuration traceSamplingPolicySettingsFile.\nagent-analyzer:default:...# The default sampling rate and the default trace latency time configured by the \u0026#39;traceSamplingPolicySettingsFile\u0026#39; file.traceSamplingPolicySettingsFile:${SW_TRACE_SAMPLING_POLICY_SETTINGS_FILE:trace-sampling-policy-settings.yml}forceSampleErrorSegment:${SW_FORCE_SAMPLE_ERROR_SEGMENT:true}# When sampling mechanism activated, this config would make the error status segment sampled, ignoring the sampling rate.The default trace-sampling-policy-settings.yml uses the following format. Could use dynamic configuration to update the settings in the runtime.\ndefault:# Default sampling rate that replaces the \u0026#39;agent-analyzer.default.sampleRate\u0026#39;# The sample rate precision is 1/10000. 10000 means 100% sample in default.rate:10000# Default trace latency time that replaces the \u0026#39;agent-analyzer.default.slowTraceSegmentThreshold\u0026#39;# Setting this threshold about the latency would make the slow trace segments sampled if they cost more time, even the sampling mechanism activated. The default value is `-1`, which means would not sample slow traces. Unit, millisecond.duration:-1#services:# - name: serverName# rate: 1000 # Sampling rate of this specific service# duration: 10000 # Trace latency threshold for trace sampling for this specific serviceduration.rate allows you to set the sample rate to this backend. The sample rate precision is 1/10000. 10000 means 100% sample by default.\nforceSampleErrorSegment allows you to save all error segments when sampling mechanism is activated. When sampling mechanism is activated, this config would cause the error status segment to be sampled, ignoring the sampling rate.\ndefault.duration allows you to save all slow trace segments when sampling mechanism is activated. Setting this threshold on latency (in milliseconds) would cause slow trace segments to be sampled if they use up more time, even if the sampling mechanism is activated. The default value is -1, which means that slow traces would not be sampled.\nNote: services.[].rate and services.[].duration has a higher priority than default.rare and default.duration.\nRecommendation You may choose to set different backend instances with different sampleRate values, although we recommend that you set the values to be the same.\nWhen you set the different rates, let\u0026rsquo;s say:\n Backend-InstanceA.sampleRate = 35 Backend-InstanceB.sampleRate = 55  Assume the agents have reported all trace segments to the backend. 35% of the traces at the global level will be collected and saved in storage consistently/completely together with all spans. 20% of the trace segments which are reported to Backend-Instance B will be saved in storage, whereas some trace segments may be missed, as they are reported to Backend-InstanceA and ignored.\nNote When you enable sampling, the actual sample rate may exceed sampleRate. The reason is that currently all error/slow segments will be saved; meanwhile, the upstream and downstream may not be sampled. This feature ensures that you have the error/slow stacks and segments, although it is not guaranteed that you would have the whole traces.\nNote also if most of the access have failed or are slow, the sampling rate would be close to 100%. This may cause the backend or storage clusters to crash.\n","title":"Trace Sampling at server side","url":"/docs/main/v9.0.0/en/setup/backend/trace-sampling/"},{"content":"Trace Sampling at server side An advantage of a distributed tracing system is that detailed information from the traces can be obtained. However, the downside is that these traces use up a lot of storage.\nIf you enable the trace sampling mechanism at the server-side, you will find that the service metrics, service instance, endpoint, and topology all have the same accuracy as before. The only difference is that they do not save all traces in storage.\nOf course, even if you enable sampling, the traces will be kept as consistent as possible. Being consistent means that once the trace segments have been collected and reported by agents, the backend would make its best effort not to split the traces. See our recommendation to understand why you should keep the traces as consistent as possible and try not to split them.\nSet the sample rate In the agent-analyzer module, you will find the sampleRate setting by the configuration traceSamplingPolicySettingsFile.\nagent-analyzer:default:...# The default sampling rate and the default trace latency time configured by the \u0026#39;traceSamplingPolicySettingsFile\u0026#39; file.traceSamplingPolicySettingsFile:${SW_TRACE_SAMPLING_POLICY_SETTINGS_FILE:trace-sampling-policy-settings.yml}forceSampleErrorSegment:${SW_FORCE_SAMPLE_ERROR_SEGMENT:true}# When sampling mechanism activated, this config would make the error status segment sampled, ignoring the sampling rate.The default trace-sampling-policy-settings.yml uses the following format. Could use dynamic configuration to update the settings in the runtime.\ndefault:# Default sampling rate that replaces the \u0026#39;agent-analyzer.default.sampleRate\u0026#39;# The sample rate precision is 1/10000. 10000 means 100% sample in default.rate:10000# Default trace latency time that replaces the \u0026#39;agent-analyzer.default.slowTraceSegmentThreshold\u0026#39;# Setting this threshold about the latency would make the slow trace segments sampled if they cost more time, even the sampling mechanism is activated. The default value is `-1`, which would not sample slow traces. Unit, millisecond.duration:-1#services:# - name: serverName# rate: 1000 # Sampling rate of this specific service# duration: 10000 # Trace latency threshold for trace sampling for this specific serviceduration.rate allows you to set the sample rate to this backend. The sample rate precision is 1/10000. 10000 means 100% sample by default.\nforceSampleErrorSegment allows you to save all error segments when the sampling mechanism is activated. This config will cause the error status segment to be sampled when the sampling mechanism is activated, ignoring the sampling rate.\ndefault.duration allows you to save all slow trace segments when the sampling mechanism is activated. Setting this threshold on latency (in milliseconds) would cause slow trace segments to be sampled if they use up more time, even if the sampling mechanism is activated. The default value is -1, which means that slow traces would not be sampled.\nNote: services.[].rate and services.[].duration has a higher priority than default.rare and default.duration.\nRecommendation You may choose to set different backend instances with different sampleRate values, although we recommend that you set the values to be the same.\nWhen you set the different rates, let\u0026rsquo;s say:\n Backend-InstanceA.sampleRate = 35 Backend-InstanceB.sampleRate = 55  Assume the agents have reported all trace segments to the backend. 35% of the traces at the global level will be collected and saved in storage consistently/completely together with all spans. 20% of the trace segments reported to Backend-Instance B will be saved in storage, whereas some trace segments may be missed, as they are reported to Backend-InstanceA and ignored.\nNote When you enable sampling, the actual sample rate may exceed sampleRate. The reason is that currently, all error/slow segments will be saved; meanwhile, the upstream and downstream may not be sampled. This feature ensures that you have the error/slow stacks and segments, although it is not guaranteed that you would have the whole traces.\nNote that if most of the accesses have failed or are slow, the sampling rate would be close to 100%. This may cause the backend or storage clusters to crash.\n","title":"Trace Sampling at server side","url":"/docs/main/v9.1.0/en/setup/backend/trace-sampling/"},{"content":"Trace Sampling at server side An advantage of a distributed tracing system is that detailed information from the traces can be obtained. However, the downside is that these traces use up a lot of storage.\nIf you enable the trace sampling mechanism at the server-side, you will find that the service metrics, service instance, endpoint, and topology all have the same accuracy as before. The only difference is that they do not save all traces in storage.\nOf course, even if you enable sampling, the traces will be kept as consistent as possible. Being consistent means that once the trace segments have been collected and reported by agents, the backend would make its best effort not to split the traces. See our recommendation to understand why you should keep the traces as consistent as possible and try not to split them.\nSet the sample rate In the agent-analyzer module, you will find the sampleRate setting by the configuration traceSamplingPolicySettingsFile.\nagent-analyzer:default:...# The default sampling rate and the default trace latency time configured by the \u0026#39;traceSamplingPolicySettingsFile\u0026#39; file.traceSamplingPolicySettingsFile:${SW_TRACE_SAMPLING_POLICY_SETTINGS_FILE:trace-sampling-policy-settings.yml}forceSampleErrorSegment:${SW_FORCE_SAMPLE_ERROR_SEGMENT:true}# When sampling mechanism activated, this config would make the error status segment sampled, ignoring the sampling rate.The default trace-sampling-policy-settings.yml uses the following format. Could use dynamic configuration to update the settings in the runtime.\ndefault:# Default sampling rate that replaces the \u0026#39;agent-analyzer.default.sampleRate\u0026#39;# The sample rate precision is 1/10000. 10000 means 100% sample in default.rate:10000# Default trace latency time that replaces the \u0026#39;agent-analyzer.default.slowTraceSegmentThreshold\u0026#39;# Setting this threshold about the latency would make the slow trace segments sampled if they cost more time, even the sampling mechanism is activated. The default value is `-1`, which would not sample slow traces. Unit, millisecond.duration:-1#services:# - name: serverName# rate: 1000 # Sampling rate of this specific service# duration: 10000 # Trace latency threshold for trace sampling for this specific serviceduration.rate allows you to set the sample rate to this backend. The sample rate precision is 1/10000. 10000 means 100% sample by default.\nforceSampleErrorSegment allows you to save all error segments when the sampling mechanism is activated. This config will cause the error status segment to be sampled when the sampling mechanism is activated, ignoring the sampling rate.\ndefault.duration allows you to save all slow trace segments when the sampling mechanism is activated. Setting this threshold on latency (in milliseconds) would cause slow trace segments to be sampled if they use up more time, even if the sampling mechanism is activated. The default value is -1, which means that slow traces would not be sampled.\nNote: services.[].rate and services.[].duration has a higher priority than default.rare and default.duration.\nRecommendation You may choose to set different backend instances with different sampleRate values, although we recommend that you set the values to be the same.\nWhen you set the different rates, let\u0026rsquo;s say:\n Backend-InstanceA.sampleRate = 35 Backend-InstanceB.sampleRate = 55  Assume the agents have reported all trace segments to the backend. 35% of the traces at the global level will be collected and saved in storage consistently/completely together with all spans. 20% of the trace segments reported to Backend-Instance B will be saved in storage, whereas some trace segments may be missed, as they are reported to Backend-InstanceA and ignored.\nNote When you enable sampling, the actual sample rate may exceed sampleRate. The reason is that currently, all error/slow segments will be saved; meanwhile, the upstream and downstream may not be sampled. This feature ensures that you have the error/slow stacks and segments, although it is not guaranteed that you would have the whole traces.\nNote that if most of the accesses have failed or are slow, the sampling rate would be close to 100%. This may cause the backend or storage clusters to crash.\n","title":"Trace Sampling at server side","url":"/docs/main/v9.2.0/en/setup/backend/trace-sampling/"},{"content":"Trace Sampling at server side An advantage of a distributed tracing system is that detailed information from the traces can be obtained. However, the downside is that these traces use up a lot of storage.\nIf you enable the trace sampling mechanism at the server-side, you will find that the service metrics, service instance, endpoint, and topology all have the same accuracy as before. The only difference is that they do not save all traces in storage.\nOf course, even if you enable sampling, the traces will be kept as consistent as possible. Being consistent means that once the trace segments have been collected and reported by agents, the backend would make its best effort not to split the traces. See our recommendation to understand why you should keep the traces as consistent as possible and try not to split them.\nSet the sample rate In the agent-analyzer module, you will find the sampleRate setting by the configuration traceSamplingPolicySettingsFile.\nagent-analyzer:default:...# The default sampling rate and the default trace latency time configured by the \u0026#39;traceSamplingPolicySettingsFile\u0026#39; file.traceSamplingPolicySettingsFile:${SW_TRACE_SAMPLING_POLICY_SETTINGS_FILE:trace-sampling-policy-settings.yml}forceSampleErrorSegment:${SW_FORCE_SAMPLE_ERROR_SEGMENT:true}# When sampling mechanism activated, this config would make the error status segment sampled, ignoring the sampling rate.The default trace-sampling-policy-settings.yml uses the following format. Could use dynamic configuration to update the settings in the runtime.\ndefault:# Default sampling rate that replaces the \u0026#39;agent-analyzer.default.sampleRate\u0026#39;# The sample rate precision is 1/10000. 10000 means 100% sample in default.rate:10000# Default trace latency time that replaces the \u0026#39;agent-analyzer.default.slowTraceSegmentThreshold\u0026#39;# Setting this threshold about the latency would make the slow trace segments sampled if they cost more time, even the sampling mechanism is activated. The default value is `-1`, which would not sample slow traces. Unit, millisecond.duration:-1#services:# - name: serverName# rate: 1000 # Sampling rate of this specific service# duration: 10000 # Trace latency threshold for trace sampling for this specific serviceduration.rate allows you to set the sample rate to this backend. The sample rate precision is 1/10000. 10000 means 100% sample by default.\nforceSampleErrorSegment allows you to save all error segments when the sampling mechanism is activated. This config will cause the error status segment to be sampled when the sampling mechanism is activated, ignoring the sampling rate.\ndefault.duration allows you to save all slow trace segments when the sampling mechanism is activated. Setting this threshold on latency (in milliseconds) would cause slow trace segments to be sampled if they use up more time, even if the sampling mechanism is activated. The default value is -1, which means that slow traces would not be sampled.\nNote: services.[].rate and services.[].duration has a higher priority than default.rare and default.duration.\nRecommendation You may choose to set different backend instances with different sampleRate values, although we recommend that you set the values to be the same.\nWhen you set the different rates, let\u0026rsquo;s say:\n Backend-InstanceA.sampleRate = 35 Backend-InstanceB.sampleRate = 55  Assume the agents have reported all trace segments to the backend. 35% of the traces at the global level will be collected and saved in storage consistently/completely together with all spans. 20% of the trace segments reported to Backend-Instance B will be saved in storage, whereas some trace segments may be missed, as they are reported to Backend-InstanceA and ignored.\nNote When you enable sampling, the actual sample rate may exceed sampleRate. The reason is that currently, all error/slow segments will be saved; meanwhile, the upstream and downstream may not be sampled. This feature ensures that you have the error/slow stacks and segments, although it is not guaranteed that you would have the whole traces.\nNote that if most of the accesses have failed or are slow, the sampling rate would be close to 100%. This may cause the backend or storage clusters to crash.\n","title":"Trace Sampling at server side","url":"/docs/main/v9.3.0/en/setup/backend/trace-sampling/"},{"content":"Trace Sampling at server side An advantage of a distributed tracing system is that detailed information from the traces can be obtained. However, the downside is that these traces use up a lot of storage.\nIf you enable the trace sampling mechanism at the server-side, you will find that the service metrics, service instance, endpoint, and topology all have the same accuracy as before. The only difference is that they do not save all traces in storage.\nOf course, even if you enable sampling, the traces will be kept as consistent as possible. Being consistent means that once the trace segments have been collected and reported by agents, the backend would make its best effort not to split the traces. See our recommendation to understand why you should keep the traces as consistent as possible and try not to split them.\nSet the sample rate In the agent-analyzer module, you will find the sampleRate setting by the configuration traceSamplingPolicySettingsFile.\nagent-analyzer:default:...# The default sampling rate and the default trace latency time configured by the \u0026#39;traceSamplingPolicySettingsFile\u0026#39; file.traceSamplingPolicySettingsFile:${SW_TRACE_SAMPLING_POLICY_SETTINGS_FILE:trace-sampling-policy-settings.yml}forceSampleErrorSegment:${SW_FORCE_SAMPLE_ERROR_SEGMENT:true}# When sampling mechanism activated, this config would make the error status segment sampled, ignoring the sampling rate.The default trace-sampling-policy-settings.yml uses the following format. Could use dynamic configuration to update the settings in the runtime.\ndefault:# Default sampling rate that replaces the \u0026#39;agent-analyzer.default.sampleRate\u0026#39;# The sample rate precision is 1/10000. 10000 means 100% sample in default.rate:10000# Default trace latency time that replaces the \u0026#39;agent-analyzer.default.slowTraceSegmentThreshold\u0026#39;# Setting this threshold about the latency would make the slow trace segments sampled if they cost more time, even the sampling mechanism is activated. The default value is `-1`, which would not sample slow traces. Unit, millisecond.duration:-1#services:# - name: serverName# rate: 1000 # Sampling rate of this specific service# duration: 10000 # Trace latency threshold for trace sampling for this specific serviceduration.rate allows you to set the sample rate to this backend. The sample rate precision is 1/10000. 10000 means 100% sample by default.\nforceSampleErrorSegment allows you to save all error segments when the sampling mechanism is activated. This config will cause the error status segment to be sampled when the sampling mechanism is activated, ignoring the sampling rate.\ndefault.duration allows you to save all slow trace segments when the sampling mechanism is activated. Setting this threshold on latency (in milliseconds) would cause slow trace segments to be sampled if they use up more time, even if the sampling mechanism is activated. The default value is -1, which means that slow traces would not be sampled.\nNote: services.[].rate and services.[].duration has a higher priority than default.rare and default.duration.\nRecommendation You may choose to set different backend instances with different sampleRate values, although we recommend that you set the values to be the same.\nWhen you set the different rates, let\u0026rsquo;s say:\n Backend-InstanceA.sampleRate = 35 Backend-InstanceB.sampleRate = 55  Assume the agents have reported all trace segments to the backend. 35% of the traces at the global level will be collected and saved in storage consistently/completely together with all spans. 20% of the trace segments reported to Backend-Instance B will be saved in storage, whereas some trace segments may be missed, as they are reported to Backend-InstanceA and ignored.\nNote When you enable sampling, the actual sample rate may exceed sampleRate. The reason is that currently, all error/slow segments will be saved; meanwhile, the upstream and downstream may not be sampled. This feature ensures that you have the error/slow stacks and segments, although it is not guaranteed that you would have the whole traces.\nNote that if most of the accesses have failed or are slow, the sampling rate would be close to 100%. This may cause the backend or storage clusters to crash.\n","title":"Trace Sampling at server side","url":"/docs/main/v9.4.0/en/setup/backend/trace-sampling/"},{"content":"Trace Sampling at server side An advantage of a distributed tracing system is that detailed information from the traces can be obtained. However, the downside is that these traces use up a lot of storage.\nIf you enable the trace sampling mechanism at the server-side, you will find that the service metrics, service instance, endpoint, and topology all have the same accuracy as before. The only difference is that they do not save all traces in storage.\nOf course, even if you enable sampling, the traces will be kept as consistent as possible. Being consistent means that once the trace segments have been collected and reported by agents, the backend would make its best effort not to split the traces. See our recommendation to understand why you should keep the traces as consistent as possible and try not to split them.\nSet the sample rate In the agent-analyzer module, you will find the sampleRate setting by the configuration traceSamplingPolicySettingsFile.\nagent-analyzer:default:...# The default sampling rate and the default trace latency time configured by the \u0026#39;traceSamplingPolicySettingsFile\u0026#39; file.traceSamplingPolicySettingsFile:${SW_TRACE_SAMPLING_POLICY_SETTINGS_FILE:trace-sampling-policy-settings.yml}forceSampleErrorSegment:${SW_FORCE_SAMPLE_ERROR_SEGMENT:true}# When sampling mechanism activated, this config would make the error status segment sampled, ignoring the sampling rate.The default trace-sampling-policy-settings.yml uses the following format. Could use dynamic configuration to update the settings in the runtime.\ndefault:# Default sampling rate that replaces the \u0026#39;agent-analyzer.default.sampleRate\u0026#39;# The sample rate precision is 1/10000. 10000 means 100% sample in default.rate:10000# Default trace latency time that replaces the \u0026#39;agent-analyzer.default.slowTraceSegmentThreshold\u0026#39;# Setting this threshold about the latency would make the slow trace segments sampled if they cost more time, even the sampling mechanism is activated. The default value is `-1`, which would not sample slow traces. Unit, millisecond.duration:-1#services:# - name: serverName# rate: 1000 # Sampling rate of this specific service# duration: 10000 # Trace latency threshold for trace sampling for this specific serviceduration.rate allows you to set the sample rate to this backend. The sample rate precision is 1/10000. 10000 means 100% sample by default.\nforceSampleErrorSegment allows you to save all error segments when the sampling mechanism is activated. This config will cause the error status segment to be sampled when the sampling mechanism is activated, ignoring the sampling rate.\ndefault.duration allows you to save all slow trace segments when the sampling mechanism is activated. Setting this threshold on latency (in milliseconds) would cause slow trace segments to be sampled if they use up more time, even if the sampling mechanism is activated. The default value is -1, which means that slow traces would not be sampled.\nNote: services.[].rate and services.[].duration has a higher priority than default.rare and default.duration.\nRecommendation You may choose to set different backend instances with different sampleRate values, although we recommend that you set the values to be the same.\nWhen you set the different rates, let\u0026rsquo;s say:\n Backend-InstanceA.sampleRate = 35 Backend-InstanceB.sampleRate = 55  Assume the agents have reported all trace segments to the backend. 35% of the traces at the global level will be collected and saved in storage consistently/completely together with all spans. 20% of the trace segments reported to Backend-Instance B will be saved in storage, whereas some trace segments may be missed, as they are reported to Backend-InstanceA and ignored.\nNote When you enable sampling, the actual sample rate may exceed sampleRate. The reason is that currently, all error/slow segments will be saved; meanwhile, the upstream and downstream may not be sampled. This feature ensures that you have the error/slow stacks and segments, although it is not guaranteed that you would have the whole traces.\nNote that if most of the accesses have failed or are slow, the sampling rate would be close to 100%. This may cause the backend or storage clusters to crash.\n","title":"Trace Sampling at server side","url":"/docs/main/v9.5.0/en/setup/backend/trace-sampling/"},{"content":"Trace Sampling at server side An advantage of a distributed tracing system is that detailed information from the traces can be obtained. However, the downside is that these traces use up a lot of storage.\nIf you enable the trace sampling mechanism at the server-side, you will find that the service metrics, service instance, endpoint, and topology all have the same accuracy as before. The only difference is that they do not save all traces in storage.\nOf course, even if you enable sampling, the traces will be kept as consistent as possible. Being consistent means that once the trace segments have been collected and reported by agents, the backend would make its best effort not to split the traces. See our recommendation to understand why you should keep the traces as consistent as possible and try not to split them.\nSet the sample rate In the agent-analyzer module, you will find the sampleRate setting by the configuration traceSamplingPolicySettingsFile.\nagent-analyzer:default:...# The default sampling rate and the default trace latency time configured by the \u0026#39;traceSamplingPolicySettingsFile\u0026#39; file.traceSamplingPolicySettingsFile:${SW_TRACE_SAMPLING_POLICY_SETTINGS_FILE:trace-sampling-policy-settings.yml}forceSampleErrorSegment:${SW_FORCE_SAMPLE_ERROR_SEGMENT:true}# When sampling mechanism activated, this config would make the error status segment sampled, ignoring the sampling rate.The default trace-sampling-policy-settings.yml uses the following format. Could use dynamic configuration to update the settings in the runtime.\ndefault:# Default sampling rate that replaces the \u0026#39;agent-analyzer.default.sampleRate\u0026#39;# The sample rate precision is 1/10000. 10000 means 100% sample in default.rate:10000# Default trace latency time that replaces the \u0026#39;agent-analyzer.default.slowTraceSegmentThreshold\u0026#39;# Setting this threshold about the latency would make the slow trace segments sampled if they cost more time, even the sampling mechanism is activated. The default value is `-1`, which would not sample slow traces. Unit, millisecond.duration:-1#services:# - name: serverName# rate: 1000 # Sampling rate of this specific service# duration: 10000 # Trace latency threshold for trace sampling for this specific serviceduration.rate allows you to set the sample rate to this backend. The sample rate precision is 1/10000. 10000 means 100% sample by default.\nforceSampleErrorSegment allows you to save all error segments when the sampling mechanism is activated. This config will cause the error status segment to be sampled when the sampling mechanism is activated, ignoring the sampling rate.\ndefault.duration allows you to save all slow trace segments when the sampling mechanism is activated. Setting this threshold on latency (in milliseconds) would cause slow trace segments to be sampled if they use up more time, even if the sampling mechanism is activated. The default value is -1, which means that slow traces would not be sampled.\nNote: services.[].rate and services.[].duration has a higher priority than default.rare and default.duration.\nRecommendation You may choose to set different backend instances with different sampleRate values, although we recommend that you set the values to be the same.\nWhen you set the different rates, let\u0026rsquo;s say:\n Backend-InstanceA.sampleRate = 35 Backend-InstanceB.sampleRate = 55  Assume the agents have reported all trace segments to the backend. 35% of the traces at the global level will be collected and saved in storage consistently/completely together with all spans. 20% of the trace segments reported to Backend-Instance B will be saved in storage, whereas some trace segments may be missed, as they are reported to Backend-InstanceA and ignored.\nNote When you enable sampling, the actual sample rate may exceed sampleRate. The reason is that currently, all error/slow segments will be saved; meanwhile, the upstream and downstream may not be sampled. This feature ensures that you have the error/slow stacks and segments, although it is not guaranteed that you would have the whole traces.\nNote that if most of the accesses have failed or are slow, the sampling rate would be close to 100%. This may cause the backend or storage clusters to crash.\n","title":"Trace Sampling at server side","url":"/docs/main/v9.6.0/en/setup/backend/trace-sampling/"},{"content":"Trace Sampling at server side An advantage of a distributed tracing system is that detailed information from the traces can be obtained. However, the downside is that these traces use up a lot of storage.\nIf you enable the trace sampling mechanism at the server-side, you will find that the service metrics, service instance, endpoint, and topology all have the same accuracy as before. The only difference is that they do not save all traces in storage.\nOf course, even if you enable sampling, the traces will be kept as consistent as possible. Being consistent means that once the trace segments have been collected and reported by agents, the backend would make its best effort not to split the traces. See our recommendation to understand why you should keep the traces as consistent as possible and try not to split them.\nSet the sample rate In the agent-analyzer module, you will find the sampleRate setting by the configuration traceSamplingPolicySettingsFile.\nagent-analyzer:default:...# The default sampling rate and the default trace latency time configured by the \u0026#39;traceSamplingPolicySettingsFile\u0026#39; file.traceSamplingPolicySettingsFile:${SW_TRACE_SAMPLING_POLICY_SETTINGS_FILE:trace-sampling-policy-settings.yml}forceSampleErrorSegment:${SW_FORCE_SAMPLE_ERROR_SEGMENT:true}# When sampling mechanism activated, this config would make the error status segment sampled, ignoring the sampling rate.The default trace-sampling-policy-settings.yml uses the following format. Could use dynamic configuration to update the settings in the runtime.\ndefault:# Default sampling rate that replaces the \u0026#39;agent-analyzer.default.sampleRate\u0026#39;# The sample rate precision is 1/10000. 10000 means 100% sample in default.rate:10000# Default trace latency time that replaces the \u0026#39;agent-analyzer.default.slowTraceSegmentThreshold\u0026#39;# Setting this threshold about the latency would make the slow trace segments sampled if they cost more time, even the sampling mechanism is activated. The default value is `-1`, which would not sample slow traces. Unit, millisecond.duration:-1#services:# - name: serverName# rate: 1000 # Sampling rate of this specific service# duration: 10000 # Trace latency threshold for trace sampling for this specific serviceduration.rate allows you to set the sample rate to this backend. The sample rate precision is 1/10000. 10000 means 100% sample by default.\nforceSampleErrorSegment allows you to save all error segments when the sampling mechanism is activated. This config will cause the error status segment to be sampled when the sampling mechanism is activated, ignoring the sampling rate.\ndefault.duration allows you to save all slow trace segments when the sampling mechanism is activated. Setting this threshold on latency (in milliseconds) would cause slow trace segments to be sampled if they use up more time, even if the sampling mechanism is activated. The default value is -1, which means that slow traces would not be sampled.\nNote: services.[].rate and services.[].duration has a higher priority than default.rare and default.duration.\nRecommendation You may choose to set different backend instances with different sampleRate values, although we recommend that you set the values to be the same.\nWhen you set the different rates, let\u0026rsquo;s say:\n Backend-InstanceA.sampleRate = 35 Backend-InstanceB.sampleRate = 55  Assume the agents have reported all trace segments to the backend. 35% of the traces at the global level will be collected and saved in storage consistently/completely together with all spans. 20% of the trace segments reported to Backend-Instance B will be saved in storage, whereas some trace segments may be missed, as they are reported to Backend-InstanceA and ignored.\nNote When you enable sampling, the actual sample rate may exceed sampleRate. The reason is that currently, all error/slow segments will be saved; meanwhile, the upstream and downstream may not be sampled. This feature ensures that you have the error/slow stacks and segments, although it is not guaranteed that you would have the whole traces.\nNote that if most of the accesses have failed or are slow, the sampling rate would be close to 100%. This may cause the backend or storage clusters to crash.\n","title":"Trace Sampling at server side","url":"/docs/main/v9.7.0/en/setup/backend/trace-sampling/"},{"content":"Tracing and Tracing based Metrics Analyze Plugins The following plugins provide the distributed tracing capability, and the OAP backend would analyze the topology and metrics based on the tracing data.\n HTTP Server  Tomcat 7 Tomcat 8 Tomcat 9 Tomcat 10 Spring Boot Web 4.x Spring MVC 3.x, 4.x 5.x with servlet 3.x Spring MVC 6.x (Optional²) Nutz Web Framework 1.x Struts2 MVC 2.3.x -\u0026gt; 2.5.x Resin 3 (Optional¹) Resin 4 (Optional¹) Jetty Server 9.x -\u0026gt; 11.x Spring WebFlux 5.x (Optional¹) -\u0026gt; 6.x (Optional¹) Undertow 1.3.0.Final -\u0026gt; 2.0.27.Final RESTEasy 3.1.0.Final -\u0026gt; 6.2.4.Final Play Framework 2.6.x -\u0026gt; 2.8.x Light4J Microservices Framework 1.6.x -\u0026gt; 2.x Netty SocketIO 1.x Micronaut HTTP Server 3.2.x -\u0026gt; 3.6.x Jersey REST framework 2.x -\u0026gt; 3.x Grizzly 2.3.x -\u0026gt; 4.x WebSphere Liberty 23.x Netty HTTP 4.1.x (Optional²)   HTTP Client  Feign 9.x Netflix Spring Cloud Feign 1.1.x -\u0026gt; 2.x Okhttp 2.x -\u0026gt; 3.x -\u0026gt; 4.x Apache httpcomponent HttpClient 2.0 -\u0026gt; 3.1, 4.2, 4.3, 5.0, 5.1 Spring RestTemplate 4.x Spring RestTemplate 6.x (Optional²) Jetty Client 9.x -\u0026gt; 11.x Apache httpcomponent AsyncClient 4.x AsyncHttpClient 2.1+ Spring Webflux WebClient 5.x -\u0026gt; 6.x JRE HttpURLConnection (Optional²) Hutool-http client 5.x Micronaut HTTP Client 3.2.x -\u0026gt; 3.6.x   HTTP Gateway  Spring Cloud Gateway 2.0.2.RELEASE -\u0026gt; 4.1.x (Optional²) Apache ShenYu (Rich protocol support: HTTP,Spring Cloud,gRPC,Dubbo,SOFARPC,Motan,Tars) 2.4.x (Optional²)   JDBC  Mysql Driver 5.x, 6.x, 8.x Oracle Driver (Optional¹) H2 Driver 1.3.x -\u0026gt; 1.4.x ShardingSphere 3.0.0, 4.0.0, 4.0.1, 4.1.0, 4.1.1, 5.0.0 PostgreSQL Driver 8.x, 9.x, 42.x Mariadb Driver 2.x, 1.8 InfluxDB 2.5 -\u0026gt; 2.17 Mssql-Jtds 1.x Mssql-jdbc 6.x -\u0026gt; 8.x ClickHouse-jdbc 0.3.x Apache-Kylin-Jdbc 2.6.x -\u0026gt; 3.x -\u0026gt; 4.x Impala-jdbc 2.6.x (Optional³)   RPC Frameworks  Dubbo 2.5.4 -\u0026gt; 2.6.0 Dubbox 2.8.4 Apache Dubbo 2.7.x -\u0026gt; 3.x Motan 0.2.x -\u0026gt; 1.1.0 gRPC 1.x Apache ServiceComb Java Chassis 1.x, 2.x SOFARPC 5.4.0 Armeria 0.63.0 -\u0026gt; 1.22.0 Apache Avro 1.7.0 - 1.8.x Finagle 6.44.0 -\u0026gt; 20.1.0 (6.25.0 -\u0026gt; 6.44.0 not tested) Brpc-Java 2.3.7 -\u0026gt; 3.0.5 Thrift 0.10.0 -\u0026gt; 0.12.0 Apache CXF 3.x JSONRPC4J 1.2.0 -\u0026gt; 1.6 Nacos-Client 2.x (Optional²)   MQ  RocketMQ 3.x-\u0026gt; 5.x RocketMQ-gRPC 5.x Kafka 0.11.0.0 -\u0026gt; 3.2.3 Spring-Kafka Spring Kafka Consumer 1.3.x -\u0026gt; 2.3.x (2.0.x and 2.1.x not tested and not recommended by the official document) ActiveMQ 5.10.0 -\u0026gt; 5.15.4 RabbitMQ 3.x-\u0026gt; 5.x Pulsar 2.2.x -\u0026gt; 2.9.x NATS 2.14.x -\u0026gt; 2.15.x ActiveMQ-Artemis 2.30.0 -\u0026gt; 2.31.2 Aliyun ONS 1.x (Optional¹)   NoSQL  aerospike 3.x -\u0026gt; 6.x Redis  Jedis 2.x-4.x Redisson Easy Java Redis client 3.5.2+ Lettuce 5.x   MongoDB Java Driver 2.13-2.14, 3.4.0-3.12.7, 4.0.0-4.1.0 Memcached Client  Spymemcached 2.x Xmemcached 2.x   Elasticsearch  transport-client 5.2.x-5.6.x transport-client 6.2.3-6.8.4 transport-client 7.0.0-7.5.2 rest-high-level-client 6.7.1-6.8.4 rest-high-level-client 7.0.0-7.5.2   Solr  SolrJ 7.x   Cassandra 3.x  cassandra-java-driver 3.7.0-3.7.2   HBase  hbase-client HTable 1.0.0-2.4.2   Neo4j  Neo4j-java 4.x     Service Discovery  Netflix Eureka   Distributed Coordination  Zookeeper 3.4.x (Optional² \u0026amp; Except 3.4.4)   Spring Ecosystem  Spring Bean annotations(@Bean, @Service, @Component, @Repository) 3.x and 4.x (Optional²) Spring Core Async SuccessCallback/FailureCallback/ListenableFutureCallback 4.x Spring Transaction 4.x and 5.x (Optional²)   Hystrix: Latency and Fault Tolerance for Distributed Systems 1.4.20 -\u0026gt; 1.5.18 Sentinel: The Sentinel of Your Microservices 1.7.0 -\u0026gt; 1.8.1 Scheduler  Elastic Job 2.x Apache ShardingSphere-Elasticjob 3.x Spring @Scheduled 3.1+ Quartz Scheduler 2.x (Optional²) XXL Job 2.x   OpenTracing community supported Canal: Alibaba mysql database binlog incremental subscription \u0026amp; consumer components 1.0.25 -\u0026gt; 1.1.2 JSON  GSON 2.8.x (Optional²) Fastjson 1.2.x (Optional²) Jackson 2.x (Optional²)   Vert.x Ecosystem  Vert.x Eventbus 3.2 -\u0026gt; 4.x Vert.x Web 3.x -\u0026gt; 4.x   Thread Schedule Framework  Spring @Async 4.x and 5.x Quasar 0.7.x JRE Callable and Runnable (Optional²) JRE ForkJoinPool (Optional²)   Cache  Ehcache 2.x GuavaCache 18.x -\u0026gt; 23.x (Optional²)   Kotlin  Coroutine 1.0.1 -\u0026gt; 1.3.x (Optional²)   GraphQL  Graphql 8.0 -\u0026gt; 17.x   Pool  Apache Commons DBCP 2.x Alibaba Druid 1.x HikariCP 3.x -\u0026gt; 4.x   Logging Framework  log4j 2.x log4j2 1.2.x logback 1.2.x   ORM  MyBatis 3.4.x -\u0026gt; 3.5.x   Event  GuavaEventBus 19.x -\u0026gt; 31.x-jre    Meter Plugins The meter plugin provides the advanced metrics collections, which are not a part of tracing.\n Thread Pool  Undertow 2.1.x -\u0026gt; 2.6.x Tomcat 7.0.x -\u0026gt; 10.0.x Dubbo 2.5.x -\u0026gt; 2.7.x Jetty 9.1.x -\u0026gt; 11.x Grizzly 2.3.x -\u0026gt; 4.x     ¹Due to license incompatibilities/restrictions these plugins are hosted and released in 3rd part repository, go to SkyAPM java plugin extension repository to get these.\n²These plugins affect the performance or must be used under some conditions, from experiences. So only released in /optional-plugins or /bootstrap-plugins, copy to /plugins in order to make them work.\n³These plugins are not tested in the CI/CD pipeline, as the previous added tests are not able to run according to the latest CI/CD infrastructure limitations, lack of maintenance, or dependencies/images not available(e.g. removed from DockerHub).\n","title":"Tracing and Tracing based Metrics Analyze Plugins","url":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/supported-list/"},{"content":"Tracing and Tracing based Metrics Analyze Plugins The following plugins provide the distributed tracing capability, and the OAP backend would analyze the topology and metrics based on the tracing data.\n HTTP Server  Tomcat 7 Tomcat 8 Tomcat 9 Tomcat 10 Spring Boot Web 4.x Spring MVC 3.x, 4.x 5.x with servlet 3.x Spring MVC 6.x (Optional²) Nutz Web Framework 1.x Struts2 MVC 2.3.x -\u0026gt; 2.5.x Resin 3 (Optional¹) Resin 4 (Optional¹) Jetty Server 9.x -\u0026gt; 11.x Spring WebFlux 5.x (Optional¹) -\u0026gt; 6.x (Optional¹) Undertow 1.3.0.Final -\u0026gt; 2.0.27.Final RESTEasy 3.1.0.Final -\u0026gt; 6.2.4.Final Play Framework 2.6.x -\u0026gt; 2.8.x Light4J Microservices Framework 1.6.x -\u0026gt; 2.x Netty SocketIO 1.x Micronaut HTTP Server 3.2.x -\u0026gt; 3.6.x Jersey REST framework 2.x -\u0026gt; 3.x Grizzly 2.3.x -\u0026gt; 4.x WebSphere Liberty 23.x Netty HTTP 4.1.x (Optional²)   HTTP Client  Feign 9.x Netflix Spring Cloud Feign 1.1.x -\u0026gt; 2.x Okhttp 2.x -\u0026gt; 3.x -\u0026gt; 4.x Apache httpcomponent HttpClient 2.0 -\u0026gt; 3.1, 4.2, 4.3, 5.0, 5.1 Spring RestTemplate 4.x Spring RestTemplate 6.x (Optional²) Jetty Client 9.x -\u0026gt; 11.x Apache httpcomponent AsyncClient 4.x AsyncHttpClient 2.1+ Spring Webflux WebClient 5.x -\u0026gt; 6.x JRE HttpURLConnection (Optional²) Hutool-http client 5.x Micronaut HTTP Client 3.2.x -\u0026gt; 3.6.x   HTTP Gateway  Spring Cloud Gateway 2.0.2.RELEASE -\u0026gt; 4.1.x (Optional²) Apache ShenYu (Rich protocol support: HTTP,Spring Cloud,gRPC,Dubbo,SOFARPC,Motan,Tars) 2.4.x (Optional²)   JDBC  Mysql Driver 5.x, 6.x, 8.x Oracle Driver (Optional¹) H2 Driver 1.3.x -\u0026gt; 1.4.x ShardingSphere 3.0.0, 4.0.0, 4.0.1, 4.1.0, 4.1.1, 5.0.0 PostgreSQL Driver 8.x, 9.x, 42.x Mariadb Driver 2.x, 1.8 InfluxDB 2.5 -\u0026gt; 2.17 Mssql-Jtds 1.x Mssql-jdbc 6.x -\u0026gt; 8.x ClickHouse-jdbc 0.3.x Apache-Kylin-Jdbc 2.6.x -\u0026gt; 3.x -\u0026gt; 4.x Impala-jdbc 2.6.x (Optional³)   RPC Frameworks  Dubbo 2.5.4 -\u0026gt; 2.6.0 Dubbox 2.8.4 Apache Dubbo 2.7.x -\u0026gt; 3.x Motan 0.2.x -\u0026gt; 1.1.0 gRPC 1.x Apache ServiceComb Java Chassis 1.x, 2.x SOFARPC 5.4.0 Armeria 0.63.0 -\u0026gt; 1.22.0 Apache Avro 1.7.0 - 1.8.x Finagle 6.44.0 -\u0026gt; 20.1.0 (6.25.0 -\u0026gt; 6.44.0 not tested) Brpc-Java 2.3.7 -\u0026gt; 3.0.5 Thrift 0.10.0 -\u0026gt; 0.12.0 Apache CXF 3.x JSONRPC4J 1.2.0 -\u0026gt; 1.6 Nacos-Client 2.x (Optional²)   MQ  RocketMQ 3.x-\u0026gt; 5.x RocketMQ-gRPC 5.x Kafka 0.11.0.0 -\u0026gt; 3.2.3 Spring-Kafka Spring Kafka Consumer 1.3.x -\u0026gt; 2.3.x (2.0.x and 2.1.x not tested and not recommended by the official document) ActiveMQ 5.10.0 -\u0026gt; 5.15.4 RabbitMQ 3.x-\u0026gt; 5.x Pulsar 2.2.x -\u0026gt; 2.9.x NATS 2.14.x -\u0026gt; 2.15.x ActiveMQ-Artemis 2.30.0 -\u0026gt; 2.31.2 Aliyun ONS 1.x (Optional¹)   NoSQL  aerospike 3.x -\u0026gt; 6.x Redis  Jedis 2.x-4.x Redisson Easy Java Redis client 3.5.2+ Lettuce 5.x   MongoDB Java Driver 2.13-2.14, 3.4.0-3.12.7, 4.0.0-4.1.0 Memcached Client  Spymemcached 2.x Xmemcached 2.x   Elasticsearch  transport-client 5.2.x-5.6.x transport-client 6.2.3-6.8.4 transport-client 7.0.0-7.5.2 rest-high-level-client 6.7.1-6.8.4 rest-high-level-client 7.0.0-7.5.2   Solr  SolrJ 7.x   Cassandra 3.x  cassandra-java-driver 3.7.0-3.7.2   HBase  hbase-client HTable 1.0.0-2.4.2   Neo4j  Neo4j-java 4.x     Service Discovery  Netflix Eureka   Distributed Coordination  Zookeeper 3.4.x (Optional² \u0026amp; Except 3.4.4)   Spring Ecosystem  Spring Bean annotations(@Bean, @Service, @Component, @Repository) 3.x and 4.x (Optional²) Spring Core Async SuccessCallback/FailureCallback/ListenableFutureCallback 4.x Spring Transaction 4.x and 5.x (Optional²)   Hystrix: Latency and Fault Tolerance for Distributed Systems 1.4.20 -\u0026gt; 1.5.18 Sentinel: The Sentinel of Your Microservices 1.7.0 -\u0026gt; 1.8.1 Scheduler  Elastic Job 2.x Apache ShardingSphere-Elasticjob 3.x Spring @Scheduled 3.1+ Quartz Scheduler 2.x (Optional²) XXL Job 2.x   OpenTracing community supported Canal: Alibaba mysql database binlog incremental subscription \u0026amp; consumer components 1.0.25 -\u0026gt; 1.1.2 JSON  GSON 2.8.x (Optional²) Fastjson 1.2.x (Optional²) Jackson 2.x (Optional²)   Vert.x Ecosystem  Vert.x Eventbus 3.2 -\u0026gt; 4.x Vert.x Web 3.x -\u0026gt; 4.x   Thread Schedule Framework  Spring @Async 4.x and 5.x Quasar 0.7.x JRE Callable and Runnable (Optional²) JRE ForkJoinPool (Optional²)   Cache  Ehcache 2.x GuavaCache 18.x -\u0026gt; 23.x (Optional²)   Kotlin  Coroutine 1.0.1 -\u0026gt; 1.3.x (Optional²)   GraphQL  Graphql 8.0 -\u0026gt; 17.x   Pool  Apache Commons DBCP 2.x Alibaba Druid 1.x HikariCP 3.x -\u0026gt; 4.x C3P0 0.9.0 -\u0026gt; 0.10.0   Logging Framework  log4j 2.x log4j2 1.2.x logback 1.2.x   ORM  MyBatis 3.4.x -\u0026gt; 3.5.x   Event  GuavaEventBus 19.x -\u0026gt; 31.x-jre    Meter Plugins The meter plugin provides the advanced metrics collections, which are not a part of tracing.\n Thread Pool  Undertow 2.1.x -\u0026gt; 2.6.x Tomcat 7.0.x -\u0026gt; 10.0.x Dubbo 2.5.x -\u0026gt; 2.7.x Jetty 9.1.x -\u0026gt; 11.x Grizzly 2.3.x -\u0026gt; 4.x   Connection Pool  Apache Commons DBCP 2.x Alibaba Druid 1.x HikariCP 3.x -\u0026gt; 4.x C3P0 0.9.0 -\u0026gt; 0.10.0     ¹Due to license incompatibilities/restrictions these plugins are hosted and released in 3rd part repository, go to SkyAPM java plugin extension repository to get these.\n²These plugins affect the performance or must be used under some conditions, from experiences. So only released in /optional-plugins or /bootstrap-plugins, copy to /plugins in order to make them work.\n³These plugins are not tested in the CI/CD pipeline, as the previous added tests are not able to run according to the latest CI/CD infrastructure limitations, lack of maintenance, or dependencies/images not available(e.g. removed from DockerHub).\n","title":"Tracing and Tracing based Metrics Analyze Plugins","url":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/supported-list/"},{"content":"Tracing and Tracing based Metrics Analyze Plugins The following plugins provide the distributed tracing capability, and the OAP backend would analyze the topology and metrics based on the tracing data.\n HTTP Server  Tomcat 7 Tomcat 8 Tomcat 9 Tomcat 10 Spring Boot Web 4.x Spring MVC 3.x, 4.x 5.x with servlet 3.x Spring MVC 6.x (Optional²) Nutz Web Framework 1.x Struts2 MVC 2.3.x -\u0026gt; 2.5.x Resin 3 (Optional¹) Resin 4 (Optional¹) Jetty Server 9.x -\u0026gt; 11.x Spring WebFlux 5.x (Optional¹) Undertow 1.3.0.Final -\u0026gt; 2.0.27.Final RESTEasy 3.1.0.Final -\u0026gt; 6.2.4.Final Play Framework 2.6.x -\u0026gt; 2.8.x Light4J Microservices Framework 1.6.x -\u0026gt; 2.x Netty SocketIO 1.x Micronaut HTTP Server 3.2.x -\u0026gt; 3.6.x Jersey REST framework 2.x -\u0026gt; 3.x Grizzly 2.3.x -\u0026gt; 4.x WebSphere Liberty 23.x   HTTP Client  Feign 9.x Netflix Spring Cloud Feign 1.1.x -\u0026gt; 2.x Okhttp 2.x -\u0026gt; 3.x -\u0026gt; 4.x Apache httpcomponent HttpClient 2.0 -\u0026gt; 3.1, 4.2, 4.3, 5.0, 5.1 Spring RestTemplate 4.x Spring RestTemplate 6.x (Optional²) Jetty Client 9.x -\u0026gt; 11.x Apache httpcomponent AsyncClient 4.x AsyncHttpClient 2.1+ JRE HttpURLConnection (Optional²) Hutool-http client 5.x Micronaut HTTP Client 3.2.x -\u0026gt; 3.6.x   HTTP Gateway  Spring Cloud Gateway 2.0.2.RELEASE -\u0026gt; 3.x (Optional²) Apache ShenYu (Rich protocol support: HTTP,Spring Cloud,gRPC,Dubbo,SOFARPC,Motan,Tars) 2.4.x (Optional²)   JDBC  Mysql Driver 5.x, 6.x, 8.x Oracle Driver (Optional¹) H2 Driver 1.3.x -\u0026gt; 1.4.x ShardingSphere 3.0.0, 4.0.0, 4.0.1, 4.1.0, 4.1.1, 5.0.0 PostgreSQL Driver 8.x, 9.x, 42.x Mariadb Driver 2.x, 1.8 InfluxDB 2.5 -\u0026gt; 2.17 Mssql-Jtds 1.x Mssql-jdbc 6.x -\u0026gt; 8.x ClickHouse-jdbc 0.3.x Apache-Kylin-Jdbc 2.6.x -\u0026gt; 3.x -\u0026gt; 4.x Impala-jdbc 2.6.x   RPC Frameworks  Dubbo 2.5.4 -\u0026gt; 2.6.0 Dubbox 2.8.4 Apache Dubbo 2.7.x -\u0026gt; 3.x Motan 0.2.x -\u0026gt; 1.1.0 gRPC 1.x Apache ServiceComb Java Chassis 1.x, 2.x SOFARPC 5.4.0 Armeria 0.63.0 -\u0026gt; 1.22.0 Apache Avro 1.7.0 - 1.8.x Finagle 6.44.0 -\u0026gt; 20.1.0 (6.25.0 -\u0026gt; 6.44.0 not tested) Brpc-Java 2.3.7 -\u0026gt; 3.0.5 Thrift 0.10.0 -\u0026gt; 0.12.0 Apache CXF 3.x JSONRPC4J 1.2.0 -\u0026gt; 1.6 Nacos-Client 2.x (Optional²)   MQ  RocketMQ 3.x-\u0026gt; 5.x RocketMQ-gRPC 5.x Kafka 0.11.0.0 -\u0026gt; 3.2.3 Spring-Kafka Spring Kafka Consumer 1.3.x -\u0026gt; 2.3.x (2.0.x and 2.1.x not tested and not recommended by the official document) ActiveMQ 5.10.0 -\u0026gt; 5.15.4 RabbitMQ 3.x-\u0026gt; 5.x Pulsar 2.2.x -\u0026gt; 2.9.x NATS 2.14.x -\u0026gt; 2.15.x Aliyun ONS 1.x (Optional¹)   NoSQL  aerospike 3.x -\u0026gt; 6.x Redis  Jedis 2.x-4.x Redisson Easy Java Redis client 3.5.2+ Lettuce 5.x   MongoDB Java Driver 2.13-2.14, 3.4.0-3.12.7, 4.0.0-4.1.0 Memcached Client  Spymemcached 2.x Xmemcached 2.x   Elasticsearch  transport-client 5.2.x-5.6.x transport-client 6.2.3-6.8.4 transport-client 7.0.0-7.5.2 rest-high-level-client 6.7.1-6.8.4 rest-high-level-client 7.0.0-7.5.2   Solr  SolrJ 7.x   Cassandra 3.x  cassandra-java-driver 3.7.0-3.7.2   HBase  hbase-client HTable 1.0.0-2.4.2   Neo4j  Neo4j-java 4.x     Service Discovery  Netflix Eureka   Distributed Coordination  Zookeeper 3.4.x (Optional² \u0026amp; Except 3.4.4)   Spring Ecosystem  Spring Bean annotations(@Bean, @Service, @Component, @Repository) 3.x and 4.x (Optional²) Spring Core Async SuccessCallback/FailureCallback/ListenableFutureCallback 4.x Spring Transaction 4.x and 5.x (Optional²)   Hystrix: Latency and Fault Tolerance for Distributed Systems 1.4.20 -\u0026gt; 1.5.18 Sentinel: The Sentinel of Your Microservices 1.7.0 -\u0026gt; 1.8.1 Scheduler  Elastic Job 2.x Apache ShardingSphere-Elasticjob 3.x Spring @Scheduled 3.1+ Quartz Scheduler 2.x (Optional²) XXL Job 2.x   OpenTracing community supported Canal: Alibaba mysql database binlog incremental subscription \u0026amp; consumer components 1.0.25 -\u0026gt; 1.1.2 JSON  GSON 2.8.x (Optional²) Fastjson 1.2.x (Optional²) Jackson 2.x (Optional²)   Vert.x Ecosystem  Vert.x Eventbus 3.2 -\u0026gt; 4.x Vert.x Web 3.x -\u0026gt; 4.x   Thread Schedule Framework  Spring @Async 4.x and 5.x Quasar 0.7.x JRE Callable and Runnable (Optional²) JRE ForkJoinPool (Optional²)   Cache  Ehcache 2.x GuavaCache 18.x -\u0026gt; 23.x (Optional²)   Kotlin  Coroutine 1.0.1 -\u0026gt; 1.3.x (Optional²)   GraphQL  Graphql 8.0 -\u0026gt; 17.x   Pool  Apache Commons DBCP 2.x Alibaba Druid 1.x HikariCP 3.x -\u0026gt; 4.x   Logging Framework  log4j 2.x log4j2 1.2.x logback 1.2.x   ORM  MyBatis 3.4.x -\u0026gt; 3.5.x   Event  GuavaEventBus 19.x -\u0026gt; 31.x-jre    Meter Plugins The meter plugin provides the advanced metrics collections, which are not a part of tracing.\n Thread Pool  Undertow 2.1.x -\u0026gt; 2.6.x Tomcat 7.0.x -\u0026gt; 10.0.x Dubbo 2.5.x -\u0026gt; 2.7.x Jetty 9.1.x -\u0026gt; 11.x Grizzly 2.3.x -\u0026gt; 4.x     ¹Due to license incompatibilities/restrictions these plugins are hosted and released in 3rd part repository, go to SkyAPM java plugin extension repository to get these.\n²These plugins affect the performance or must be used under some conditions, from experiences. So only released in /optional-plugins or /bootstrap-plugins, copy to /plugins in order to make them work.\n","title":"Tracing and Tracing based Metrics Analyze Plugins","url":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/supported-list/"},{"content":"Tracing and Tracing based Metrics Analyze Plugins The following plugins provide the distributed tracing capability, and the OAP backend would analyze the topology and metrics based on the tracing data.\n HTTP Server  Tomcat 7 Tomcat 8 Tomcat 9 Tomcat 10 Spring Boot Web 4.x Spring MVC 3.x, 4.x 5.x with servlet 3.x Spring MVC 6.x (Optional²) Nutz Web Framework 1.x Struts2 MVC 2.3.x -\u0026gt; 2.5.x Resin 3 (Optional¹) Resin 4 (Optional¹) Jetty Server 9.x -\u0026gt; 11.x Spring WebFlux 5.x (Optional¹) Undertow 1.3.0.Final -\u0026gt; 2.0.27.Final RESTEasy 3.1.0.Final -\u0026gt; 6.2.4.Final Play Framework 2.6.x -\u0026gt; 2.8.x Light4J Microservices Framework 1.6.x -\u0026gt; 2.x Netty SocketIO 1.x Micronaut HTTP Server 3.2.x -\u0026gt; 3.6.x Jersey REST framework 2.x -\u0026gt; 3.x Grizzly 2.3.x -\u0026gt; 4.x WebSphere Liberty 23.x Netty HTTP 4.1.x (Optional²)   HTTP Client  Feign 9.x Netflix Spring Cloud Feign 1.1.x -\u0026gt; 2.x Okhttp 2.x -\u0026gt; 3.x -\u0026gt; 4.x Apache httpcomponent HttpClient 2.0 -\u0026gt; 3.1, 4.2, 4.3, 5.0, 5.1 Spring RestTemplate 4.x Spring RestTemplate 6.x (Optional²) Jetty Client 9.x -\u0026gt; 11.x Apache httpcomponent AsyncClient 4.x AsyncHttpClient 2.1+ JRE HttpURLConnection (Optional²) Hutool-http client 5.x Micronaut HTTP Client 3.2.x -\u0026gt; 3.6.x   HTTP Gateway  Spring Cloud Gateway 2.0.2.RELEASE -\u0026gt; 3.x (Optional²) Apache ShenYu (Rich protocol support: HTTP,Spring Cloud,gRPC,Dubbo,SOFARPC,Motan,Tars) 2.4.x (Optional²)   JDBC  Mysql Driver 5.x, 6.x, 8.x Oracle Driver (Optional¹) H2 Driver 1.3.x -\u0026gt; 1.4.x ShardingSphere 3.0.0, 4.0.0, 4.0.1, 4.1.0, 4.1.1, 5.0.0 PostgreSQL Driver 8.x, 9.x, 42.x Mariadb Driver 2.x, 1.8 InfluxDB 2.5 -\u0026gt; 2.17 Mssql-Jtds 1.x Mssql-jdbc 6.x -\u0026gt; 8.x ClickHouse-jdbc 0.3.x Apache-Kylin-Jdbc 2.6.x -\u0026gt; 3.x -\u0026gt; 4.x Impala-jdbc 2.6.x   RPC Frameworks  Dubbo 2.5.4 -\u0026gt; 2.6.0 Dubbox 2.8.4 Apache Dubbo 2.7.x -\u0026gt; 3.x Motan 0.2.x -\u0026gt; 1.1.0 gRPC 1.x Apache ServiceComb Java Chassis 1.x, 2.x SOFARPC 5.4.0 Armeria 0.63.0 -\u0026gt; 1.22.0 Apache Avro 1.7.0 - 1.8.x Finagle 6.44.0 -\u0026gt; 20.1.0 (6.25.0 -\u0026gt; 6.44.0 not tested) Brpc-Java 2.3.7 -\u0026gt; 3.0.5 Thrift 0.10.0 -\u0026gt; 0.12.0 Apache CXF 3.x JSONRPC4J 1.2.0 -\u0026gt; 1.6 Nacos-Client 2.x (Optional²)   MQ  RocketMQ 3.x-\u0026gt; 5.x RocketMQ-gRPC 5.x Kafka 0.11.0.0 -\u0026gt; 3.2.3 Spring-Kafka Spring Kafka Consumer 1.3.x -\u0026gt; 2.3.x (2.0.x and 2.1.x not tested and not recommended by the official document) ActiveMQ 5.10.0 -\u0026gt; 5.15.4 RabbitMQ 3.x-\u0026gt; 5.x Pulsar 2.2.x -\u0026gt; 2.9.x NATS 2.14.x -\u0026gt; 2.15.x Aliyun ONS 1.x (Optional¹)   NoSQL  aerospike 3.x -\u0026gt; 6.x Redis  Jedis 2.x-4.x Redisson Easy Java Redis client 3.5.2+ Lettuce 5.x   MongoDB Java Driver 2.13-2.14, 3.4.0-3.12.7, 4.0.0-4.1.0 Memcached Client  Spymemcached 2.x Xmemcached 2.x   Elasticsearch  transport-client 5.2.x-5.6.x transport-client 6.2.3-6.8.4 transport-client 7.0.0-7.5.2 rest-high-level-client 6.7.1-6.8.4 rest-high-level-client 7.0.0-7.5.2   Solr  SolrJ 7.x   Cassandra 3.x  cassandra-java-driver 3.7.0-3.7.2   HBase  hbase-client HTable 1.0.0-2.4.2   Neo4j  Neo4j-java 4.x     Service Discovery  Netflix Eureka   Distributed Coordination  Zookeeper 3.4.x (Optional² \u0026amp; Except 3.4.4)   Spring Ecosystem  Spring Bean annotations(@Bean, @Service, @Component, @Repository) 3.x and 4.x (Optional²) Spring Core Async SuccessCallback/FailureCallback/ListenableFutureCallback 4.x Spring Transaction 4.x and 5.x (Optional²)   Hystrix: Latency and Fault Tolerance for Distributed Systems 1.4.20 -\u0026gt; 1.5.18 Sentinel: The Sentinel of Your Microservices 1.7.0 -\u0026gt; 1.8.1 Scheduler  Elastic Job 2.x Apache ShardingSphere-Elasticjob 3.x Spring @Scheduled 3.1+ Quartz Scheduler 2.x (Optional²) XXL Job 2.x   OpenTracing community supported Canal: Alibaba mysql database binlog incremental subscription \u0026amp; consumer components 1.0.25 -\u0026gt; 1.1.2 JSON  GSON 2.8.x (Optional²) Fastjson 1.2.x (Optional²) Jackson 2.x (Optional²)   Vert.x Ecosystem  Vert.x Eventbus 3.2 -\u0026gt; 4.x Vert.x Web 3.x -\u0026gt; 4.x   Thread Schedule Framework  Spring @Async 4.x and 5.x Quasar 0.7.x JRE Callable and Runnable (Optional²) JRE ForkJoinPool (Optional²)   Cache  Ehcache 2.x GuavaCache 18.x -\u0026gt; 23.x (Optional²)   Kotlin  Coroutine 1.0.1 -\u0026gt; 1.3.x (Optional²)   GraphQL  Graphql 8.0 -\u0026gt; 17.x   Pool  Apache Commons DBCP 2.x Alibaba Druid 1.x HikariCP 3.x -\u0026gt; 4.x   Logging Framework  log4j 2.x log4j2 1.2.x logback 1.2.x   ORM  MyBatis 3.4.x -\u0026gt; 3.5.x   Event  GuavaEventBus 19.x -\u0026gt; 31.x-jre    Meter Plugins The meter plugin provides the advanced metrics collections, which are not a part of tracing.\n Thread Pool  Undertow 2.1.x -\u0026gt; 2.6.x Tomcat 7.0.x -\u0026gt; 10.0.x Dubbo 2.5.x -\u0026gt; 2.7.x Jetty 9.1.x -\u0026gt; 11.x Grizzly 2.3.x -\u0026gt; 4.x     ¹Due to license incompatibilities/restrictions these plugins are hosted and released in 3rd part repository, go to SkyAPM java plugin extension repository to get these.\n²These plugins affect the performance or must be used under some conditions, from experiences. So only released in /optional-plugins or /bootstrap-plugins, copy to /plugins in order to make them work.\n","title":"Tracing and Tracing based Metrics Analyze Plugins","url":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/supported-list/"},{"content":"Tracing and Tracing based Metrics Analyze Plugins The following plugins provide the distributed tracing capability, and the OAP backend would analyze the topology and metrics based on the tracing data.\n HTTP Server  Tomcat 7 Tomcat 8 Tomcat 9 Tomcat 10 Spring Boot Web 4.x Spring MVC 3.x, 4.x 5.x with servlet 3.x Spring MVC 6.x (Optional²) Nutz Web Framework 1.x Struts2 MVC 2.3.x -\u0026gt; 2.5.x Resin 3 (Optional¹) Resin 4 (Optional¹) Jetty Server 9.x -\u0026gt; 11.x Spring WebFlux 5.x (Optional¹) -\u0026gt; 6.x (Optional¹) Undertow 1.3.0.Final -\u0026gt; 2.0.27.Final RESTEasy 3.1.0.Final -\u0026gt; 6.2.4.Final Play Framework 2.6.x -\u0026gt; 2.8.x Light4J Microservices Framework 1.6.x -\u0026gt; 2.x Netty SocketIO 1.x Micronaut HTTP Server 3.2.x -\u0026gt; 3.6.x Jersey REST framework 2.x -\u0026gt; 3.x Grizzly 2.3.x -\u0026gt; 4.x WebSphere Liberty 23.x Netty HTTP 4.1.x (Optional²)   HTTP Client  Feign 9.x Netflix Spring Cloud Feign 1.1.x -\u0026gt; 2.x Okhttp 2.x -\u0026gt; 3.x -\u0026gt; 4.x Apache httpcomponent HttpClient 2.0 -\u0026gt; 3.1, 4.2, 4.3, 5.0, 5.1 Spring RestTemplate 4.x Spring RestTemplate 6.x (Optional²) Jetty Client 9.x -\u0026gt; 11.x Apache httpcomponent AsyncClient 4.x AsyncHttpClient 2.1+ Spring Webflux WebClient 5.x -\u0026gt; 6.x JRE HttpURLConnection (Optional²) Hutool-http client 5.x Micronaut HTTP Client 3.2.x -\u0026gt; 3.6.x   HTTP Gateway  Spring Cloud Gateway 2.0.2.RELEASE -\u0026gt; 4.1.x (Optional²) Apache ShenYu (Rich protocol support: HTTP,Spring Cloud,gRPC,Dubbo,SOFARPC,Motan,Tars) 2.4.x (Optional²)   JDBC  Mysql Driver 5.x, 6.x, 8.x Oracle Driver (Optional¹) H2 Driver 1.3.x -\u0026gt; 1.4.x ShardingSphere 3.0.0, 4.0.0, 4.0.1, 4.1.0, 4.1.1, 5.0.0 PostgreSQL Driver 8.x, 9.x, 42.x Mariadb Driver 2.x, 1.8 InfluxDB 2.5 -\u0026gt; 2.17 Mssql-Jtds 1.x Mssql-jdbc 6.x -\u0026gt; 8.x ClickHouse-jdbc 0.3.x Apache-Kylin-Jdbc 2.6.x -\u0026gt; 3.x -\u0026gt; 4.x Impala-jdbc 2.6.x (Optional³)   RPC Frameworks  Dubbo 2.5.4 -\u0026gt; 2.6.0 Dubbox 2.8.4 Apache Dubbo 2.7.x -\u0026gt; 3.x Motan 0.2.x -\u0026gt; 1.1.0 gRPC 1.x Apache ServiceComb Java Chassis 1.x, 2.x SOFARPC 5.4.0 Armeria 0.63.0 -\u0026gt; 1.22.0 Apache Avro 1.7.0 - 1.8.x Finagle 6.44.0 -\u0026gt; 20.1.0 (6.25.0 -\u0026gt; 6.44.0 not tested) Brpc-Java 2.3.7 -\u0026gt; 3.0.5 Thrift 0.10.0 -\u0026gt; 0.12.0 Apache CXF 3.x JSONRPC4J 1.2.0 -\u0026gt; 1.6 Nacos-Client 2.x (Optional²)   MQ  RocketMQ 3.x-\u0026gt; 5.x RocketMQ-gRPC 5.x Kafka 0.11.0.0 -\u0026gt; 3.2.3 Spring-Kafka Spring Kafka Consumer 1.3.x -\u0026gt; 2.3.x (2.0.x and 2.1.x not tested and not recommended by the official document) ActiveMQ 5.10.0 -\u0026gt; 5.15.4 RabbitMQ 3.x-\u0026gt; 5.x Pulsar 2.2.x -\u0026gt; 2.9.x NATS 2.14.x -\u0026gt; 2.15.x ActiveMQ-Artemis 2.30.0 -\u0026gt; 2.31.2 Aliyun ONS 1.x (Optional¹)   NoSQL  aerospike 3.x -\u0026gt; 6.x Redis  Jedis 2.x-4.x Redisson Easy Java Redis client 3.5.2+ Lettuce 5.x   MongoDB Java Driver 2.13-2.14, 3.4.0-3.12.7, 4.0.0-4.1.0 Memcached Client  Spymemcached 2.x Xmemcached 2.x   Elasticsearch  transport-client 5.2.x-5.6.x transport-client 6.2.3-6.8.4 transport-client 7.0.0-7.5.2 rest-high-level-client 6.7.1-6.8.4 rest-high-level-client 7.0.0-7.5.2   Solr  SolrJ 7.x   Cassandra 3.x  cassandra-java-driver 3.7.0-3.7.2   HBase  hbase-client HTable 1.0.0-2.4.2   Neo4j  Neo4j-java 4.x     Service Discovery  Netflix Eureka   Distributed Coordination  Zookeeper 3.4.x (Optional² \u0026amp; Except 3.4.4)   Spring Ecosystem  Spring Bean annotations(@Bean, @Service, @Component, @Repository) 3.x and 4.x (Optional²) Spring Core Async SuccessCallback/FailureCallback/ListenableFutureCallback 4.x Spring Transaction 4.x and 5.x (Optional²)   Hystrix: Latency and Fault Tolerance for Distributed Systems 1.4.20 -\u0026gt; 1.5.18 Sentinel: The Sentinel of Your Microservices 1.7.0 -\u0026gt; 1.8.1 Scheduler  Elastic Job 2.x Apache ShardingSphere-Elasticjob 3.x Spring @Scheduled 3.1+ Quartz Scheduler 2.x (Optional²) XXL Job 2.x   OpenTracing community supported Canal: Alibaba mysql database binlog incremental subscription \u0026amp; consumer components 1.0.25 -\u0026gt; 1.1.2 JSON  GSON 2.8.x (Optional²) Fastjson 1.2.x (Optional²) Jackson 2.x (Optional²)   Vert.x Ecosystem  Vert.x Eventbus 3.2 -\u0026gt; 4.x Vert.x Web 3.x -\u0026gt; 4.x   Thread Schedule Framework  Spring @Async 4.x and 5.x Quasar 0.7.x JRE Callable and Runnable (Optional²) JRE ForkJoinPool (Optional²)   Cache  Ehcache 2.x GuavaCache 18.x -\u0026gt; 23.x (Optional²)   Kotlin  Coroutine 1.0.1 -\u0026gt; 1.3.x (Optional²)   GraphQL  Graphql 8.0 -\u0026gt; 17.x   Pool  Apache Commons DBCP 2.x Alibaba Druid 1.x HikariCP 3.x -\u0026gt; 4.x   Logging Framework  log4j 2.x log4j2 1.2.x logback 1.2.x   ORM  MyBatis 3.4.x -\u0026gt; 3.5.x   Event  GuavaEventBus 19.x -\u0026gt; 31.x-jre    Meter Plugins The meter plugin provides the advanced metrics collections, which are not a part of tracing.\n Thread Pool  Undertow 2.1.x -\u0026gt; 2.6.x Tomcat 7.0.x -\u0026gt; 10.0.x Dubbo 2.5.x -\u0026gt; 2.7.x Jetty 9.1.x -\u0026gt; 11.x Grizzly 2.3.x -\u0026gt; 4.x     ¹Due to license incompatibilities/restrictions these plugins are hosted and released in 3rd part repository, go to SkyAPM java plugin extension repository to get these.\n²These plugins affect the performance or must be used under some conditions, from experiences. So only released in /optional-plugins or /bootstrap-plugins, copy to /plugins in order to make them work.\n³These plugins are not tested in the CI/CD pipeline, as the previous added tests are not able to run according to the latest CI/CD infrastructure limitations, lack of maintenance, or dependencies/images not available(e.g. removed from DockerHub).\n","title":"Tracing and Tracing based Metrics Analyze Plugins","url":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/supported-list/"},{"content":"Tracing APIs Add trace Toolkit toolkit/trace provides the APIs to enhance the trace context, such as createLocalSpan, createExitSpan, createEntrySpan, log, tag, prepareForAsync and asyncFinish. Add the toolkit dependency to your project.\nimport \u0026#34;github.com/apache/skywalking-go/toolkit/trace\u0026#34; Use Native Tracing Context Carrier The context carrier is used to pass the context between the difference application.\nWhen creating an Entry Span, you need to obtain the context carrier from the request. When creating an Exit Span, you need to write the context carrier into the target RPC request.\ntype ExtractorRef func(headerKey string) (string, error) type InjectorRef func(headerKey, headerValue string) error The following demo demonstrates how to pass the Context Carrier in the Tracing API:\n// create a new entry span and extract the context carrier from the request trace.CreateEntrySpan(\u0026#34;EntrySpan\u0026#34;, func(headerKey string) (string, error) { return request.Header.Get(headerKey), nil }) // create a new exit span and inject the context carrier into the request trace.CreateExitSpan(\u0026#34;ExitSpan\u0026#34;, request.Host, func(headerKey, headerValue string) error { request.Header.Add(headerKey, headerValue) return nil }) Create Span Use trace.CreateEntrySpan() API to create entry span, and then use SpanRef to contain the reference of created span in agent kernel.\n The first parameter is operation name of span the second parameter is InjectorRef.  spanRef, err := trace.CreateEntrySpan(\u0026#34;operationName\u0026#34;, InjectorRef) Use trace.CreateLocalSpan() API to create local span\n the only parameter is the operation name of span.  spanRef, err := trace.CreateLocalSpan(\u0026#34;operationName\u0026#34;) Use trace.CreateExitSpan() API to create exit span.\n the first parameter is the operation name of span the second parameter is the remote peer which means the peer address of exit operation. the third parameter is the ExtractorRef  spanRef, err := trace.CreateExitSpan(\u0026#34;operationName\u0026#34;, \u0026#34;peer\u0026#34;, ExtractorRef) Use trace.StopSpan() API to stop current span\ntrace.StopSpan() Add Span’s Tag and Log Use trace.AddLog() to record log in span.\nUse trace.SetTag() to add tag to span, the parameters of tag are two String which are key and value respectively.\ntrace.AddLog(...string) trace.SetTag(\u0026#34;key\u0026#34;,\u0026#34;value\u0026#34;) Set ComponentID Use trace.SetComponent() to set the component id of the Span\n the type of parameter is int32.  trace.SetComponent(ComponentID) The Component ID in Span is used to identify the current component, which is declared in the component libraries YAML from the OAP server side.\nAsync Prepare/Finish SpanRef is the return value of CreateSpan.Use SpanRef.PrepareAsync() to make current span still alive until SpanRef.AsyncFinish() called.\n Call PrepareAsync(). Use trace.StopSpan() to stop span in the original goroutine. Propagate the SpanRef to any other goroutine. Call SpanRef.AsyncFinish() in any goroutine.  Capture/Continue Context Snapshot  Use trace.CaptureContext() to get the segment info and store it in ContextSnapshotRef. Propagate the snapshot context to any other goroutine. Use trace.ContinueContext(snapshotRef) to load the snapshotRef in the target goroutine.  Reading Context All following APIs provide readonly features for the tracing context from tracing system. The values are only available when the current thread is traced.\n  Use trace.GetTraceID() API to get traceID.\ntraceID := trace.GetTraceID()   Use trace.GetSegmentID() API to get segmentID.\nsegmentID := trace.GetSegmentID()   Use trace.GetSpanID() API to get spanID.\nspanID := trace.GetSpanID()   Trace Correlation Context Trace correlation context APIs provide a way to put custom data in tracing context. All the data in the context will be propagated with the in-wire process automatically.\nUse trace.SetCorrelation() API to set custom data in tracing context.\ntrace.SetCorrelation(\u0026#34;key\u0026#34;,\u0026#34;value\u0026#34;)  Max element count in the correlation context is 3 Max value length of each element is 128  CorrelationContext will remove the key when the value is empty.\nUse trace.GetCorrelation() API to get custom data.\nvalue := trace.GetCorrelation(\u0026#34;key\u0026#34;) ","title":"Tracing APIs","url":"/docs/skywalking-go/latest/en/advanced-features/manual-apis/toolkit-trace/"},{"content":"Tracing APIs Add trace Toolkit toolkit/trace provides the APIs to enhance the trace context, such as createLocalSpan, createExitSpan, createEntrySpan, log, tag, prepareForAsync and asyncFinish. Add the toolkit dependency to your project.\nimport \u0026#34;github.com/apache/skywalking-go/toolkit/trace\u0026#34; Use Native Tracing Context Carrier The context carrier is used to pass the context between the difference application.\nWhen creating an Entry Span, you need to obtain the context carrier from the request. When creating an Exit Span, you need to write the context carrier into the target RPC request.\ntype ExtractorRef func(headerKey string) (string, error) type InjectorRef func(headerKey, headerValue string) error The following demo demonstrates how to pass the Context Carrier in the Tracing API:\n// create a new entry span and extract the context carrier from the request trace.CreateEntrySpan(\u0026#34;EntrySpan\u0026#34;, func(headerKey string) (string, error) { return request.Header.Get(headerKey), nil }) // create a new exit span and inject the context carrier into the request trace.CreateExitSpan(\u0026#34;ExitSpan\u0026#34;, request.Host, func(headerKey, headerValue string) error { request.Header.Add(headerKey, headerValue) return nil }) Create Span Use trace.CreateEntrySpan() API to create entry span, and then use SpanRef to contain the reference of created span in agent kernel.\n The first parameter is operation name of span the second parameter is InjectorRef.  spanRef, err := trace.CreateEntrySpan(\u0026#34;operationName\u0026#34;, InjectorRef) Use trace.CreateLocalSpan() API to create local span\n the only parameter is the operation name of span.  spanRef, err := trace.CreateLocalSpan(\u0026#34;operationName\u0026#34;) Use trace.CreateExitSpan() API to create exit span.\n the first parameter is the operation name of span the second parameter is the remote peer which means the peer address of exit operation. the third parameter is the ExtractorRef  spanRef, err := trace.CreateExitSpan(\u0026#34;operationName\u0026#34;, \u0026#34;peer\u0026#34;, ExtractorRef) Use trace.StopSpan() API to stop current span\ntrace.StopSpan() Add Span’s Tag and Log Use trace.AddLog() to record log in span.\nUse trace.SetTag() to add tag to span, the parameters of tag are two String which are key and value respectively.\ntrace.AddLog(...string) trace.SetTag(\u0026#34;key\u0026#34;,\u0026#34;value\u0026#34;) Set ComponentID Use trace.SetComponent() to set the component id of the Span\n the type of parameter is int32.  trace.SetComponent(ComponentID) The Component ID in Span is used to identify the current component, which is declared in the component libraries YAML from the OAP server side.\nAsync Prepare/Finish SpanRef is the return value of CreateSpan.Use SpanRef.PrepareAsync() to make current span still alive until SpanRef.AsyncFinish() called.\n Call PrepareAsync(). Use trace.StopSpan() to stop span in the original goroutine. Propagate the SpanRef to any other goroutine. Call SpanRef.AsyncFinish() in any goroutine.  Capture/Continue Context Snapshot  Use trace.CaptureContext() to get the segment info and store it in ContextSnapshotRef. Propagate the snapshot context to any other goroutine. Use trace.ContinueContext(snapshotRef) to load the snapshotRef in the target goroutine.  Reading Context All following APIs provide readonly features for the tracing context from tracing system. The values are only available when the current thread is traced.\n  Use trace.GetTraceID() API to get traceID.\ntraceID := trace.GetTraceID()   Use trace.GetSegmentID() API to get segmentID.\nsegmentID := trace.GetSegmentID()   Use trace.GetSpanID() API to get spanID.\nspanID := trace.GetSpanID()   Trace Correlation Context Trace correlation context APIs provide a way to put custom data in tracing context. All the data in the context will be propagated with the in-wire process automatically.\nUse trace.SetCorrelation() API to set custom data in tracing context.\ntrace.SetCorrelation(\u0026#34;key\u0026#34;,\u0026#34;value\u0026#34;)  Max element count in the correlation context is 3 Max value length of each element is 128  CorrelationContext will remove the key when the value is empty.\nUse trace.GetCorrelation() API to get custom data.\nvalue := trace.GetCorrelation(\u0026#34;key\u0026#34;) ","title":"Tracing APIs","url":"/docs/skywalking-go/next/en/advanced-features/manual-apis/toolkit-trace/"},{"content":"Tracing APIs Add trace Toolkit toolkit/trace provides the APIs to enhance the trace context, such as createLocalSpan, createExitSpan, createEntrySpan, log, tag, prepareForAsync and asyncFinish. Add the toolkit dependency to your project.\nimport \u0026#34;github.com/apache/skywalking-go/toolkit/trace\u0026#34; Use Native Tracing Context Carrier The context carrier is used to pass the context between the difference application.\nWhen creating an Entry Span, you need to obtain the context carrier from the request. When creating an Exit Span, you need to write the context carrier into the target RPC request.\ntype ExtractorRef func(headerKey string) (string, error) type InjectorRef func(headerKey, headerValue string) error The following demo demonstrates how to pass the Context Carrier in the Tracing API:\n// create a new entry span and extract the context carrier from the request trace.CreateEntrySpan(\u0026#34;EntrySpan\u0026#34;, func(headerKey string) (string, error) { return request.Header.Get(headerKey), nil }) // create a new exit span and inject the context carrier into the request trace.CreateExitSpan(\u0026#34;ExitSpan\u0026#34;, request.Host, func(headerKey, headerValue string) error { request.Header.Add(headerKey, headerValue) return nil }) Create Span Use trace.CreateEntrySpan() API to create entry span, and then use SpanRef to contain the reference of created span in agent kernel.\n The first parameter is operation name of span the second parameter is InjectorRef.  spanRef, err := trace.CreateEntrySpan(\u0026#34;operationName\u0026#34;, InjectorRef) Use trace.CreateLocalSpan() API to create local span\n the only parameter is the operation name of span.  spanRef, err := trace.CreateLocalSpan(\u0026#34;operationName\u0026#34;) Use trace.CreateExitSpan() API to create exit span.\n the first parameter is the operation name of span the second parameter is the remote peer which means the peer address of exit operation. the third parameter is the ExtractorRef  spanRef, err := trace.CreateExitSpan(\u0026#34;operationName\u0026#34;, \u0026#34;peer\u0026#34;, ExtractorRef) Use trace.StopSpan() API to stop current span\ntrace.StopSpan() Add Span’s Tag and Log Use trace.AddLog() to record log in span.\nUse trace.SetTag() to add tag to span, the parameters of tag are two String which are key and value respectively.\ntrace.AddLog(...string) trace.SetTag(\u0026#34;key\u0026#34;,\u0026#34;value\u0026#34;) Set ComponentID Use trace.SetComponent() to set the component id of the Span\n the type of parameter is int32.  trace.SetComponent(ComponentID) The Component ID in Span is used to identify the current component, which is declared in the component libraries YAML from the OAP server side.\nAsync Prepare/Finish SpanRef is the return value of CreateSpan.Use SpanRef.PrepareAsync() to make current span still alive until SpanRef.AsyncFinish() called.\n Call PrepareAsync(). Use trace.StopSpan() to stop span in the original goroutine. Propagate the SpanRef to any other goroutine. Call SpanRef.AsyncFinish() in any goroutine.  Capture/Continue Context Snapshot  Use trace.CaptureContext() to get the segment info and store it in ContextSnapshotRef. Propagate the snapshot context to any other goroutine. Use trace.ContinueContext(snapshotRef) to load the snapshotRef in the target goroutine.  Reading Context All following APIs provide readonly features for the tracing context from tracing system. The values are only available when the current thread is traced.\n  Use trace.GetTraceID() API to get traceID.\ntraceID := trace.GetTraceID()   Use trace.GetSegmentID() API to get segmentID.\nsegmentID := trace.GetSegmentID()   Use trace.GetSpanID() API to get spanID.\nspanID := trace.GetSpanID()   Trace Correlation Context Trace correlation context APIs provide a way to put custom data in tracing context. All the data in the context will be propagated with the in-wire process automatically.\nUse trace.SetCorrelation() API to set custom data in tracing context.\ntrace.SetCorrelation(\u0026#34;key\u0026#34;,\u0026#34;value\u0026#34;)  Max element count in the correlation context is 3 Max value length of each element is 128  CorrelationContext will remove the key when the value is empty.\nUse trace.GetCorrelation() API to get custom data.\nvalue := trace.GetCorrelation(\u0026#34;key\u0026#34;) ","title":"Tracing APIs","url":"/docs/skywalking-go/v0.4.0/en/advanced-features/manual-apis/toolkit-trace/"},{"content":"Tracing Plugins The following plugins provide the distributed tracing capability, and the OAP backend would analyze the topology and metrics based on the tracing data.\n HTTP Server  gin: Gin tested v1.7.0 to v1.9.0. http: Native HTTP tested go v1.17 to go v1.20. go-restfulv3: Go-Restful tested v3.7.1 to 3.10.2. mux: Mux tested v1.7.0 to v1.8.0. iris: Iris tested v12.1.0 to 12.2.5. fasthttp: FastHttp tested v1.10.0 to v1.50.0. fiber: Fiber tested v2.49.0 to v2.50.0. echov4: Echov4 tested v4.0.0 to v4.11.4   HTTP Client  http: Native HTTP tested go v1.17 to go v1.20. fasthttp: FastHttp tested v1.10.0 to v1.50.0.   RPC Frameworks  dubbo: Dubbo tested v3.0.1 to v3.0.5. kratosv2: Kratos tested v2.3.1 to v2.6.2. microv4: Go-Micro tested v4.6.0 to v4.10.2. grpc : gRPC tested v1.55.0 to v1.57.0.   Database Client  gorm: GORM tested v1.22.0 to v1.25.1.  MySQL Driver   mongo: Mongo tested v1.11.1 to v1.11.7. sql: Native SQL tested go v1.17 to go v1.20.  MySQL Driver tested v1.4.0 to v1.7.1.     Cache Client  go-redisv9: go-redis tested v9.0.3 to v9.0.5.   MQ Client  rocketMQ: rocketmq-client-go tested v2.1.2. amqp: AMQP tested v1.9.0.    Metrics Plugins The meter plugin provides the advanced metrics collections.\n runtimemetrics: Native Runtime Metrics tested go v1.17 to go v1.20.  Logging Plugins The logging plugin provides the advanced logging collections.\n logrus: Logrus tested v1.8.2 to v1.9.3. zap: Zap tested v1.17.0 to v1.24.0.  ","title":"Tracing Plugins","url":"/docs/skywalking-go/latest/en/agent/support-plugins/"},{"content":"Tracing Plugins The following plugins provide the distributed tracing capability, and the OAP backend would analyze the topology and metrics based on the tracing data.\n HTTP Server  gin: Gin tested v1.7.0 to v1.9.0. http: Native HTTP tested go v1.17 to go v1.20. go-restfulv3: Go-Restful tested v3.7.1 to 3.10.2. mux: Mux tested v1.7.0 to v1.8.0. iris: Iris tested v12.1.0 to 12.2.5. fasthttp: FastHttp tested v1.10.0 to v1.50.0. fiber: Fiber tested v2.49.0 to v2.50.0. echov4: Echov4 tested v4.0.0 to v4.11.4   HTTP Client  http: Native HTTP tested go v1.17 to go v1.20. fasthttp: FastHttp tested v1.10.0 to v1.50.0.   RPC Frameworks  dubbo: Dubbo tested v3.0.1 to v3.0.5. kratosv2: Kratos tested v2.3.1 to v2.6.2. microv4: Go-Micro tested v4.6.0 to v4.10.2. grpc : gRPC tested v1.55.0 to v1.57.0.   Database Client  gorm: GORM tested v1.22.0 to v1.25.1.  MySQL Driver   mongo: Mongo tested v1.11.1 to v1.11.7. sql: Native SQL tested go v1.17 to go v1.20.  MySQL Driver tested v1.4.0 to v1.7.1.     Cache Client  go-redisv9: go-redis tested v9.0.3 to v9.0.5.   MQ Client  rocketMQ: rocketmq-client-go tested v2.1.2. amqp: AMQP tested v1.9.0. pulsar: pulsar-client-go tested v0.12.0. segmentio-kafka: segmentio-kafka tested v0.4.47.    Metrics Plugins The meter plugin provides the advanced metrics collections.\n runtimemetrics: Native Runtime Metrics tested go v1.17 to go v1.20.  Logging Plugins The logging plugin provides the advanced logging collections.\n logrus: Logrus tested v1.8.2 to v1.9.3. zap: Zap tested v1.17.0 to v1.24.0.  ","title":"Tracing Plugins","url":"/docs/skywalking-go/next/en/agent/support-plugins/"},{"content":"Tracing Plugins The following plugins provide the distributed tracing capability, and the OAP backend would analyze the topology and metrics based on the tracing data.\n HTTP Server  gin: Gin tested v1.7.0 to v1.9.0. http: Native HTTP tested go v1.17 to go v1.20. go-restfulv3: Go-Restful tested v3.7.1 to 3.10.2. mux: Mux tested v1.7.0 to v1.8.0. iris: Iris tested v12.1.0 to 12.2.5. fasthttp: FastHttp tested v1.10.0 to v1.50.0. fiber: Fiber tested v2.49.0 to v2.50.0. echov4: Echov4 tested v4.0.0 to v4.11.4   HTTP Client  http: Native HTTP tested go v1.17 to go v1.20. fasthttp: FastHttp tested v1.10.0 to v1.50.0.   RPC Frameworks  dubbo: Dubbo tested v3.0.1 to v3.0.5. kratosv2: Kratos tested v2.3.1 to v2.6.2. microv4: Go-Micro tested v4.6.0 to v4.10.2. grpc : gRPC tested v1.55.0 to v1.57.0.   Database Client  gorm: GORM tested v1.22.0 to v1.25.1.  MySQL Driver   mongo: Mongo tested v1.11.1 to v1.11.7. sql: Native SQL tested go v1.17 to go v1.20.  MySQL Driver tested v1.4.0 to v1.7.1.     Cache Client  go-redisv9: go-redis tested v9.0.3 to v9.0.5.   MQ Client  rocketMQ: rocketmq-client-go tested v2.1.2. amqp: AMQP tested v1.9.0.    Metrics Plugins The meter plugin provides the advanced metrics collections.\n runtimemetrics: Native Runtime Metrics tested go v1.17 to go v1.20.  Logging Plugins The logging plugin provides the advanced logging collections.\n logrus: Logrus tested v1.8.2 to v1.9.3. zap: Zap tested v1.17.0 to v1.24.0.  ","title":"Tracing Plugins","url":"/docs/skywalking-go/v0.4.0/en/agent/support-plugins/"},{"content":"Tracing, Metrics and Logging with Go Agent All plugins in SkyWalking Go Agent are designed to provide functionality for distributed tracing, metrics, and logging data. For a detailed list of supported plugins, please refer to the documentation. This document aims to provide you with some configuration information for your usage. Please ensure that you have followed the documentation to successfully install the SkyWalking Go Agent into your application.\nMetadata Mechanism The Go Agent would be identified by the SkyWalking backend after startup and maintain a heartbeat to keep alive.\n   Name Environment Key Default Value Description     agent.service_name SW_AGENT_NAME Your_Application_Name The name of the service which showed in UI.   agent.instance_env_name  SW_AGENT_INSTANCE_NAME To obtain the environment variable key for the instance name, if it cannot be obtained, an instance name will be automatically generated.    Tracing Distributed tracing is the most common form of plugin in the Go Agent, and it becomes active with each new incoming request. By default, all plugins are enabled. For a specific list of plugins, please refer to the documentation.\nIf you wish to disable a particular plugin to prevent enhancements related to that plugin, please consult the documentation on how to disable plugins.\nThe basic configuration is as follows:\n   Name Environment Key Default Value Description     agent.sampler SW_AGENT_SAMPLER 1 Sampling rate of tracing data, which is a floating-point value that must be between 0 and 1.   agent.ignore_suffix SW_AGENT_IGNORE_SUFFIX .jpg,.jpeg,.js,.css,.png,.bmp,.gif,.ico,.mp3,.mp4,.html,.svg If the operation name of the first span is included in this set, this segment should be ignored.(multiple split by \u0026ldquo;,\u0026quot;).    Metrics The metrics plugin can dynamically monitor the execution status of the current program and aggregate the data into corresponding metrics. Eventually, the data is reported to the SkyWalking backend at a specified interval. For a specific list of plugins, please refer to the documentation.\nThe current configuration information is as follows:\n   Name Environment Key Default Value Description     agent.meter.collect_interval SW_AGENT_METER_COLLECT_INTERVAL 20 The interval of collecting metrics, in seconds.    Logging The logging plugin in SkyWalking Go Agent are used to handle agent and application logs, as well as application log querying. They primarily consist of the following three functionalities:\n Agent Log Adaptation: The plugin detects the logging framework used in the current system and integrates the agent\u0026rsquo;s logs with the system\u0026rsquo;s logging framework. Distributed Tracing Enhancement: It combines the distributed tracing information from the current request with the application logs, allowing you to have real-time visibility into all log contents related to specific requests. Log Reporting: The plugin reports both application and agent logs to the SkyWalking backend for data retrieval and display purposes.  For more details, please refer to the documentation to learn more detail.\n","title":"Tracing, Metrics and Logging with Go Agent","url":"/docs/skywalking-go/latest/en/agent/tracing-metrics-logging/"},{"content":"Tracing, Metrics and Logging with Go Agent All plugins in SkyWalking Go Agent are designed to provide functionality for distributed tracing, metrics, and logging data. For a detailed list of supported plugins, please refer to the documentation. This document aims to provide you with some configuration information for your usage. Please ensure that you have followed the documentation to successfully install the SkyWalking Go Agent into your application.\nMetadata Mechanism The Go Agent would be identified by the SkyWalking backend after startup and maintain a heartbeat to keep alive.\n   Name Environment Key Default Value Description     agent.service_name SW_AGENT_NAME Your_Application_Name The name of the service which showed in UI.   agent.instance_env_name  SW_AGENT_INSTANCE_NAME To obtain the environment variable key for the instance name, if it cannot be obtained, an instance name will be automatically generated.    Tracing Distributed tracing is the most common form of plugin in the Go Agent, and it becomes active with each new incoming request. By default, all plugins are enabled. For a specific list of plugins, please refer to the documentation.\nIf you wish to disable a particular plugin to prevent enhancements related to that plugin, please consult the documentation on how to disable plugins.\nThe basic configuration is as follows:\n   Name Environment Key Default Value Description     agent.sampler SW_AGENT_SAMPLER 1 Sampling rate of tracing data, which is a floating-point value that must be between 0 and 1.   agent.ignore_suffix SW_AGENT_IGNORE_SUFFIX .jpg,.jpeg,.js,.css,.png,.bmp,.gif,.ico,.mp3,.mp4,.html,.svg If the suffix obtained by splitting the operation name by the last index of \u0026ldquo;.\u0026rdquo; in this set, this segment should be ignored.(multiple split by \u0026ldquo;,\u0026quot;).   agent.trace_ignore_path SW_AGENT_TRACE_IGNORE_PATH  If the operation name of the first span is matching, this segment should be ignored.(multiple split by \u0026ldquo;,\u0026quot;).    Metrics The metrics plugin can dynamically monitor the execution status of the current program and aggregate the data into corresponding metrics. Eventually, the data is reported to the SkyWalking backend at a specified interval. For a specific list of plugins, please refer to the documentation.\nThe current configuration information is as follows:\n   Name Environment Key Default Value Description     agent.meter.collect_interval SW_AGENT_METER_COLLECT_INTERVAL 20 The interval of collecting metrics, in seconds.    Logging The logging plugin in SkyWalking Go Agent are used to handle agent and application logs, as well as application log querying. They primarily consist of the following three functionalities:\n Agent Log Adaptation: The plugin detects the logging framework used in the current system and integrates the agent\u0026rsquo;s logs with the system\u0026rsquo;s logging framework. Distributed Tracing Enhancement: It combines the distributed tracing information from the current request with the application logs, allowing you to have real-time visibility into all log contents related to specific requests. Log Reporting: The plugin reports both application and agent logs to the SkyWalking backend for data retrieval and display purposes.  For more details, please refer to the documentation to learn more detail.\n","title":"Tracing, Metrics and Logging with Go Agent","url":"/docs/skywalking-go/next/en/agent/tracing-metrics-logging/"},{"content":"Tracing, Metrics and Logging with Go Agent All plugins in SkyWalking Go Agent are designed to provide functionality for distributed tracing, metrics, and logging data. For a detailed list of supported plugins, please refer to the documentation. This document aims to provide you with some configuration information for your usage. Please ensure that you have followed the documentation to successfully install the SkyWalking Go Agent into your application.\nMetadata Mechanism The Go Agent would be identified by the SkyWalking backend after startup and maintain a heartbeat to keep alive.\n   Name Environment Key Default Value Description     agent.service_name SW_AGENT_NAME Your_Application_Name The name of the service which showed in UI.   agent.instance_env_name  SW_AGENT_INSTANCE_NAME To obtain the environment variable key for the instance name, if it cannot be obtained, an instance name will be automatically generated.    Tracing Distributed tracing is the most common form of plugin in the Go Agent, and it becomes active with each new incoming request. By default, all plugins are enabled. For a specific list of plugins, please refer to the documentation.\nIf you wish to disable a particular plugin to prevent enhancements related to that plugin, please consult the documentation on how to disable plugins.\nThe basic configuration is as follows:\n   Name Environment Key Default Value Description     agent.sampler SW_AGENT_SAMPLER 1 Sampling rate of tracing data, which is a floating-point value that must be between 0 and 1.   agent.ignore_suffix SW_AGENT_IGNORE_SUFFIX .jpg,.jpeg,.js,.css,.png,.bmp,.gif,.ico,.mp3,.mp4,.html,.svg If the operation name of the first span is included in this set, this segment should be ignored.(multiple split by \u0026ldquo;,\u0026quot;).    Metrics The metrics plugin can dynamically monitor the execution status of the current program and aggregate the data into corresponding metrics. Eventually, the data is reported to the SkyWalking backend at a specified interval. For a specific list of plugins, please refer to the documentation.\nThe current configuration information is as follows:\n   Name Environment Key Default Value Description     agent.meter.collect_interval SW_AGENT_METER_COLLECT_INTERVAL 20 The interval of collecting metrics, in seconds.    Logging The logging plugin in SkyWalking Go Agent are used to handle agent and application logs, as well as application log querying. They primarily consist of the following three functionalities:\n Agent Log Adaptation: The plugin detects the logging framework used in the current system and integrates the agent\u0026rsquo;s logs with the system\u0026rsquo;s logging framework. Distributed Tracing Enhancement: It combines the distributed tracing information from the current request with the application logs, allowing you to have real-time visibility into all log contents related to specific requests. Log Reporting: The plugin reports both application and agent logs to the SkyWalking backend for data retrieval and display purposes.  For more details, please refer to the documentation to learn more detail.\n","title":"Tracing, Metrics and Logging with Go Agent","url":"/docs/skywalking-go/v0.4.0/en/agent/tracing-metrics-logging/"},{"content":"Traffic The traffic is used to collecting the network access logs from services through the Service Discovery, and send access logs to the backend server for analyze.\nConfiguration    Name Default Environment Key Description     access_log.active false ROVER_ACCESS_LOG_ACTIVE Is active the access log monitoring.   access_log.exclude_namespaces istio-system,cert-manager,kube-system ROVER_ACCESS_LOG_EXCLUDE_NAMESPACES Exclude processes in the specified Kubernetes namespace. Multiple namespaces split by \u0026ldquo;,\u0026rdquo;   access_log.exclude_cluster  ROVER_ACCESS_LOG_EXCLUDE_CLUSTER Exclude processes in the specified cluster which defined in the process module. Multiple clusters split by \u0026ldquo;,\u0026rdquo;   access_log.flush.max_count 2000 ROVER_ACCESS_LOG_FLUSH_MAX_COUNT The max count of the access log when flush to the backend.   access_log.flush.period 5s ROVER_ACCESS_LOG_FLUSH_PERIOD The period of flush access log to the backend.   access_log_protocol_analyze.per_cpu_buffer 400KB ROVER_ACCESS_LOG_PROTOCOL_ANALYZE_PER_CPU_BUFFER The size of socket data buffer on each CPU.   access_log.protocol_analyze.parallels 2 ROVER_ACCESS_LOG_PROTOCOL_ANALYZE_PARALLELS The count of parallel protocol analyzer.   access_log.protocol_analyze.queue_size 5000 ROVER_ACCESS_LOG_PROTOCOL_ANALYZE_QUEUE_SIZE The size of per paralleled analyze queue.    Collectors Socket Connect/Accept/Close Monitor all socket connect, accept, and close events from monitored processes by attaching eBPF program to the respective trace points.\nSocket traffic Capture all socket traffic from monitored processes by attaching eBPF program to network syscalls.\nProtocol Data collection is followed by protocol analysis. Currently, the supported protocols include:\n HTTP/1.x HTTP/2  Note: As HTTP2 is a stateful protocol, it only supports monitoring processes that start after monitor. Processes already running at the time of monitoring may fail to provide complete data, leading to unsuccessful analysis.\nTLS When a process uses the TLS protocol for data transfer, Rover monitors libraries such as OpenSSL, BoringSSL, GoTLS, and NodeTLS to access the raw content. This feature is also applicable for protocol analysis.\nNote: the parsing of TLS protocols in Java is currently not supported.\nL2-L4 During data transmission, Rover records each packet\u0026rsquo;s through the network layers L2 to L4 using kprobes. This approach enhances the understanding of each packet\u0026rsquo;s transmission process, facilitating easier localization and troubleshooting of network issues.\n","title":"Traffic","url":"/docs/skywalking-rover/latest/en/setup/configuration/traffic/"},{"content":"Traffic The traffic is used to collecting the network access logs from services through the Service Discovery, and send access logs to the backend server for analyze.\nConfiguration    Name Default Environment Key Description     access_log.active false ROVER_ACCESS_LOG_ACTIVE Is active the access log monitoring.   access_log.exclude_namespaces istio-system,cert-manager,kube-system ROVER_ACCESS_LOG_EXCLUDE_NAMESPACES Exclude processes in the specified Kubernetes namespace. Multiple namespaces split by \u0026ldquo;,\u0026rdquo;   access_log.exclude_cluster  ROVER_ACCESS_LOG_EXCLUDE_CLUSTER Exclude processes in the specified cluster which defined in the process module. Multiple clusters split by \u0026ldquo;,\u0026rdquo;   access_log.flush.max_count 2000 ROVER_ACCESS_LOG_FLUSH_MAX_COUNT The max count of the access log when flush to the backend.   access_log.flush.period 5s ROVER_ACCESS_LOG_FLUSH_PERIOD The period of flush access log to the backend.   access_log_protocol_analyze.per_cpu_buffer 400KB ROVER_ACCESS_LOG_PROTOCOL_ANALYZE_PER_CPU_BUFFER The size of socket data buffer on each CPU.   access_log.protocol_analyze.parallels 2 ROVER_ACCESS_LOG_PROTOCOL_ANALYZE_PARALLELS The count of parallel protocol analyzer.   access_log.protocol_analyze.queue_size 5000 ROVER_ACCESS_LOG_PROTOCOL_ANALYZE_QUEUE_SIZE The size of per paralleled analyze queue.    Collectors Socket Connect/Accept/Close Monitor all socket connect, accept, and close events from monitored processes by attaching eBPF program to the respective trace points.\nSocket traffic Capture all socket traffic from monitored processes by attaching eBPF program to network syscalls.\nProtocol Data collection is followed by protocol analysis. Currently, the supported protocols include:\n HTTP/1.x HTTP/2  Note: As HTTP2 is a stateful protocol, it only supports monitoring processes that start after monitor. Processes already running at the time of monitoring may fail to provide complete data, leading to unsuccessful analysis.\nTLS When a process uses the TLS protocol for data transfer, Rover monitors libraries such as OpenSSL, BoringSSL, GoTLS, and NodeTLS to access the raw content. This feature is also applicable for protocol analysis.\nNote: the parsing of TLS protocols in Java is currently not supported.\nL2-L4 During data transmission, Rover records each packet\u0026rsquo;s through the network layers L2 to L4 using kprobes. This approach enhances the understanding of each packet\u0026rsquo;s transmission process, facilitating easier localization and troubleshooting of network issues.\n","title":"Traffic","url":"/docs/skywalking-rover/next/en/setup/configuration/traffic/"},{"content":"Traffic The traffic is used to collecting the network access logs from services through the Service Discovery, and send access logs to the backend server for analyze.\nConfiguration    Name Default Environment Key Description     access_log.active false ROVER_ACCESS_LOG_ACTIVE Is active the access log monitoring.   access_log.exclude_namespaces istio-system,cert-manager,kube-system ROVER_ACCESS_LOG_EXCLUDE_NAMESPACES Exclude processes in the specified Kubernetes namespace. Multiple namespaces split by \u0026ldquo;,\u0026rdquo;   access_log.exclude_cluster  ROVER_ACCESS_LOG_EXCLUDE_CLUSTER Exclude processes in the specified cluster which defined in the process module. Multiple clusters split by \u0026ldquo;,\u0026rdquo;   access_log.flush.max_count 2000 ROVER_ACCESS_LOG_FLUSH_MAX_COUNT The max count of the access log when flush to the backend.   access_log.flush.period 5s ROVER_ACCESS_LOG_FLUSH_PERIOD The period of flush access log to the backend.   access_log_protocol_analyze.per_cpu_buffer 400KB ROVER_ACCESS_LOG_PROTOCOL_ANALYZE_PER_CPU_BUFFER The size of socket data buffer on each CPU.   access_log.protocol_analyze.parallels 2 ROVER_ACCESS_LOG_PROTOCOL_ANALYZE_PARALLELS The count of parallel protocol analyzer.   access_log.protocol_analyze.queue_size 5000 ROVER_ACCESS_LOG_PROTOCOL_ANALYZE_QUEUE_SIZE The size of per paralleled analyze queue.    Collectors Socket Connect/Accept/Close Monitor all socket connect, accept, and close events from monitored processes by attaching eBPF program to the respective trace points.\nSocket traffic Capture all socket traffic from monitored processes by attaching eBPF program to network syscalls.\nProtocol Data collection is followed by protocol analysis. Currently, the supported protocols include:\n HTTP/1.x HTTP/2  Note: As HTTP2 is a stateful protocol, it only supports monitoring processes that start after monitor. Processes already running at the time of monitoring may fail to provide complete data, leading to unsuccessful analysis.\nTLS When a process uses the TLS protocol for data transfer, Rover monitors libraries such as OpenSSL, BoringSSL, GoTLS, and NodeTLS to access the raw content. This feature is also applicable for protocol analysis.\nNote: the parsing of TLS protocols in Java is currently not supported.\nL2-L4 During data transmission, Rover records each packet\u0026rsquo;s through the network layers L2 to L4 using kprobes. This approach enhances the understanding of each packet\u0026rsquo;s transmission process, facilitating easier localization and troubleshooting of network issues.\n","title":"Traffic","url":"/docs/skywalking-rover/v0.6.0/en/setup/configuration/traffic/"},{"content":"Transmit Log to Kafka Using Satellite to receive the SkyWalking log protocol from agent, and transport data to the Kafka Topic.\nConfig Here is config file, set out as follows:\n Declare gRPC server and kafka client to receive and transmit data. Declare the SkyWalking Log protocol gatherer and sender to transmit protocol via pipeline. Expose Self-Observability telemetry data to Prometheus.  ","title":"Transmit Log to Kafka","url":"/docs/skywalking-satellite/latest/en/setup/examples/feature/transmit-log-to-kafka/readme/"},{"content":"Transmit Log to Kafka Using Satellite to receive the SkyWalking log protocol from agent, and transport data to the Kafka Topic.\nConfig Here is config file, set out as follows:\n Declare gRPC server and kafka client to receive and transmit data. Declare the SkyWalking Log protocol gatherer and sender to transmit protocol via pipeline. Expose Self-Observability telemetry data to Prometheus.  ","title":"Transmit Log to Kafka","url":"/docs/skywalking-satellite/next/en/setup/examples/feature/transmit-log-to-kafka/readme/"},{"content":"Transmit Log to Kafka Using Satellite to receive the SkyWalking log protocol from agent, and transport data to the Kafka Topic.\nConfig Here is config file, set out as follows:\n Declare gRPC server and kafka client to receive and transmit data. Declare the SkyWalking Log protocol gatherer and sender to transmit protocol via pipeline. Expose Self-Observability telemetry data to Prometheus.  ","title":"Transmit Log to Kafka","url":"/docs/skywalking-satellite/v1.2.0/en/setup/examples/feature/transmit-log-to-kafka/readme/"},{"content":"TTL In SkyWalking, there are two types of observability data:\n Records include traces, logs, topN sampled statements and alarm. recordDataTTL applies to record data. Metrics include all metrics for service, instance, endpoint, and topology map. Metadata(lists of services, instances, or endpoints) also belongs to metrics. metricsDataTTL applies to Metrics data.  These are the settings for the different types:\n# Set a timeout on metrics data. After the timeout has expired, the metrics data will automatically be deleted.recordDataTTL:${SW_CORE_RECORD_DATA_TTL:3}# Unit is daymetricsDataTTL:${SW_CORE_METRICS_DATA_TTL:7}# Unit is day","title":"TTL","url":"/docs/main/latest/en/setup/backend/ttl/"},{"content":"TTL In SkyWalking, there are two types of observability data:\n Records include traces, logs, topN sampled statements and alarm. recordDataTTL applies to record data. Metrics include all metrics for service, instance, endpoint, and topology map. Metadata(lists of services, instances, or endpoints) also belongs to metrics. metricsDataTTL applies to Metrics data.  These are the settings for the different types:\n# Set a timeout on metrics data. After the timeout has expired, the metrics data will automatically be deleted.recordDataTTL:${SW_CORE_RECORD_DATA_TTL:3}# Unit is daymetricsDataTTL:${SW_CORE_METRICS_DATA_TTL:7}# Unit is day","title":"TTL","url":"/docs/main/next/en/setup/backend/ttl/"},{"content":"TTL In SkyWalking, there are two types of observability data:\n Records include traces, logs, topN sampled statements and alarm. recordDataTTL applies to record data. Metrics include all metrics for service, instance, endpoint, and topology map. Metadata(lists of services, instances, or endpoints) also belongs to metrics. metricsDataTTL applies to Metrics data.  These are the settings for the different types:\n# Set a timeout on metrics data. After the timeout has expired, the metrics data will automatically be deleted.recordDataTTL:${SW_CORE_RECORD_DATA_TTL:3}# Unit is daymetricsDataTTL:${SW_CORE_METRICS_DATA_TTL:7}# Unit is day","title":"TTL","url":"/docs/main/v9.0.0/en/setup/backend/ttl/"},{"content":"TTL In SkyWalking, there are two types of observability data:\n Records include traces, logs, topN sampled statements and alarm. recordDataTTL applies to record data. Metrics include all metrics for service, instance, endpoint, and topology map. Metadata(lists of services, instances, or endpoints) also belongs to metrics. metricsDataTTL applies to Metrics data.  These are the settings for the different types:\n# Set a timeout on metrics data. After the timeout has expired, the metrics data will automatically be deleted.recordDataTTL:${SW_CORE_RECORD_DATA_TTL:3}# Unit is daymetricsDataTTL:${SW_CORE_METRICS_DATA_TTL:7}# Unit is day","title":"TTL","url":"/docs/main/v9.1.0/en/setup/backend/ttl/"},{"content":"TTL In SkyWalking, there are two types of observability data:\n Records include traces, logs, topN sampled statements and alarm. recordDataTTL applies to record data. Metrics include all metrics for service, instance, endpoint, and topology map. Metadata(lists of services, instances, or endpoints) also belongs to metrics. metricsDataTTL applies to Metrics data.  These are the settings for the different types:\n# Set a timeout on metrics data. After the timeout has expired, the metrics data will automatically be deleted.recordDataTTL:${SW_CORE_RECORD_DATA_TTL:3}# Unit is daymetricsDataTTL:${SW_CORE_METRICS_DATA_TTL:7}# Unit is day","title":"TTL","url":"/docs/main/v9.2.0/en/setup/backend/ttl/"},{"content":"TTL In SkyWalking, there are two types of observability data:\n Records include traces, logs, topN sampled statements and alarm. recordDataTTL applies to record data. Metrics include all metrics for service, instance, endpoint, and topology map. Metadata(lists of services, instances, or endpoints) also belongs to metrics. metricsDataTTL applies to Metrics data.  These are the settings for the different types:\n# Set a timeout on metrics data. After the timeout has expired, the metrics data will automatically be deleted.recordDataTTL:${SW_CORE_RECORD_DATA_TTL:3}# Unit is daymetricsDataTTL:${SW_CORE_METRICS_DATA_TTL:7}# Unit is day","title":"TTL","url":"/docs/main/v9.3.0/en/setup/backend/ttl/"},{"content":"TTL In SkyWalking, there are two types of observability data:\n Records include traces, logs, topN sampled statements and alarm. recordDataTTL applies to record data. Metrics include all metrics for service, instance, endpoint, and topology map. Metadata(lists of services, instances, or endpoints) also belongs to metrics. metricsDataTTL applies to Metrics data.  These are the settings for the different types:\n# Set a timeout on metrics data. After the timeout has expired, the metrics data will automatically be deleted.recordDataTTL:${SW_CORE_RECORD_DATA_TTL:3}# Unit is daymetricsDataTTL:${SW_CORE_METRICS_DATA_TTL:7}# Unit is day","title":"TTL","url":"/docs/main/v9.4.0/en/setup/backend/ttl/"},{"content":"TTL In SkyWalking, there are two types of observability data:\n Records include traces, logs, topN sampled statements and alarm. recordDataTTL applies to record data. Metrics include all metrics for service, instance, endpoint, and topology map. Metadata(lists of services, instances, or endpoints) also belongs to metrics. metricsDataTTL applies to Metrics data.  These are the settings for the different types:\n# Set a timeout on metrics data. After the timeout has expired, the metrics data will automatically be deleted.recordDataTTL:${SW_CORE_RECORD_DATA_TTL:3}# Unit is daymetricsDataTTL:${SW_CORE_METRICS_DATA_TTL:7}# Unit is day","title":"TTL","url":"/docs/main/v9.5.0/en/setup/backend/ttl/"},{"content":"TTL In SkyWalking, there are two types of observability data:\n Records include traces, logs, topN sampled statements and alarm. recordDataTTL applies to record data. Metrics include all metrics for service, instance, endpoint, and topology map. Metadata(lists of services, instances, or endpoints) also belongs to metrics. metricsDataTTL applies to Metrics data.  These are the settings for the different types:\n# Set a timeout on metrics data. After the timeout has expired, the metrics data will automatically be deleted.recordDataTTL:${SW_CORE_RECORD_DATA_TTL:3}# Unit is daymetricsDataTTL:${SW_CORE_METRICS_DATA_TTL:7}# Unit is day","title":"TTL","url":"/docs/main/v9.6.0/en/setup/backend/ttl/"},{"content":"TTL In SkyWalking, there are two types of observability data:\n Records include traces, logs, topN sampled statements and alarm. recordDataTTL applies to record data. Metrics include all metrics for service, instance, endpoint, and topology map. Metadata(lists of services, instances, or endpoints) also belongs to metrics. metricsDataTTL applies to Metrics data.  These are the settings for the different types:\n# Set a timeout on metrics data. After the timeout has expired, the metrics data will automatically be deleted.recordDataTTL:${SW_CORE_RECORD_DATA_TTL:3}# Unit is daymetricsDataTTL:${SW_CORE_METRICS_DATA_TTL:7}# Unit is day","title":"TTL","url":"/docs/main/v9.7.0/en/setup/backend/ttl/"},{"content":"UI SkyWalking UI distribution is already included in our Apache official release.\nStartup Startup script is also in /bin/webappService.sh(.bat). UI runs as a Java process, powered-by Armeria.\nSettings The settings file of UI is webapp/webapp.yml in the distribution package. It has three parts.\n Listening port. Backend connect info.  serverPort:${SW_SERVER_PORT:-8080}# Comma separated list of OAP addresses, with `http://` or `https://` prefix.oapServices:${SW_OAP_ADDRESS:-http://localhost:12800}zipkinServices:${SW_ZIPKIN_ADDRESS:http://localhost:9412}Start with Docker Image Start a container to connect OAP server whose address is http://oap:12800.\nexport version=9.0.0 docker run --name oap --restart always -d -e SW_OAP_ADDRESS=http://oap:12800 -e SW_ZIPKIN_ADDRESS=http://oap:9412 apache/skywalking-ui:$version Configuration We could set up environment variables to configure this image.\nSW_OAP_ADDRESS The address of your OAP server. The default value is http://127.0.0.1:12800.\nSW_ZIPKIN_ADDRESS The address of your Zipkin server. The default value is http://127.0.0.1:9412.\n","title":"UI","url":"/docs/main/latest/en/setup/backend/ui-setup/"},{"content":"UI SkyWalking UI distribution is already included in our Apache official release.\nStartup Startup script is also in /bin/webappService.sh(.bat). UI runs as a Java process, powered-by Armeria.\nSettings The settings file of UI is webapp/webapp.yml in the distribution package. It has three parts.\n Listening port. Backend connect info.  serverPort:${SW_SERVER_PORT:-8080}# Comma separated list of OAP addresses, with `http://` or `https://` prefix.oapServices:${SW_OAP_ADDRESS:-http://localhost:12800}zipkinServices:${SW_ZIPKIN_ADDRESS:http://localhost:9412}Start with Docker Image Start a container to connect OAP server whose address is http://oap:12800.\nexport version=9.0.0 docker run --name oap --restart always -d -e SW_OAP_ADDRESS=http://oap:12800 -e SW_ZIPKIN_ADDRESS=http://oap:9412 apache/skywalking-ui:$version Configuration We could set up environment variables to configure this image.\nSW_OAP_ADDRESS The address of your OAP server. The default value is http://127.0.0.1:12800.\nSW_ZIPKIN_ADDRESS The address of your Zipkin server. The default value is http://127.0.0.1:9412.\n","title":"UI","url":"/docs/main/next/en/setup/backend/ui-setup/"},{"content":"UI SkyWalking UI distribution is already included in our Apache official release.\nStartup Startup script is also in /bin/webappService.sh(.bat). UI runs as an OS Java process, powered-by Zuul.\nSettings Settings file of UI is webapp/webapp.yml in distribution package. It has three parts.\n Listening port. Backend connect info.  server:port:8080spring:cloud:gateway:routes:- id:oap-routeuri:lb://oap-servicepredicates:- Path=/graphql/**discovery:client:simple:instances:oap-service:# Point to all backend\u0026#39;s restHost:restPort, split by URI arrays.- uri:http://127.0.0.1:12800- uri:http://instance-2:12800Start with Docker Image Start a container to connect oap server whose address is http://oap:12800.\ndocker run --name oap --restart always -d -e SW_OAP_ADDRESS=http://oap:12800 apache/skywalking-ui:8.8.0 Configuration We could set up environment variables to configure this image.\nSW_OAP_ADDRESS The address of OAP server. Default value is http://127.0.0.1:12800.\n","title":"UI","url":"/docs/main/v9.0.0/en/setup/backend/ui-setup/"},{"content":"UI SkyWalking UI distribution is already included in our Apache official release.\nStartup Startup script is also in /bin/webappService.sh(.bat). UI runs as an OS Java process, powered-by Zuul.\nSettings The settings file of UI is webapp/webapp.yml in the distribution package. It has three parts.\n Listening port. Backend connect info.  server:port:8080spring:cloud:gateway:routes:- id:oap-routeuri:lb://oap-servicepredicates:- Path=/graphql/**discovery:client:simple:instances:oap-service:# Point to all backend\u0026#39;s restHost:restPort, split by URI arrays.- uri:http://127.0.0.1:12800- uri:http://instance-2:12800Start with Docker Image Start a container to connect OAP server whose address is http://oap:12800.\ndocker run --name oap --restart always -d -e SW_OAP_ADDRESS=http://oap:12800 apache/skywalking-ui:8.8.0 Configuration We could set up environment variables to configure this image.\nSW_OAP_ADDRESS The address of your OAP server. The default value is http://127.0.0.1:12800.\n","title":"UI","url":"/docs/main/v9.1.0/en/setup/backend/ui-setup/"},{"content":"UI SkyWalking UI distribution is already included in our Apache official release.\nStartup Startup script is also in /bin/webappService.sh(.bat). UI runs as an OS Java process, powered-by Zuul.\nSettings The settings file of UI is webapp/webapp.yml in the distribution package. It has three parts.\n Listening port. Backend connect info.  server:port:8080spring:cloud:gateway:routes:- id:oap-routeuri:lb://oap-servicepredicates:- Path=/graphql/**discovery:client:simple:instances:oap-service:# Point to all backend\u0026#39;s restHost:restPort, split by URI arrays.- uri:http://127.0.0.1:12800- uri:http://instance-2:12800Start with Docker Image Start a container to connect OAP server whose address is http://oap:12800.\ndocker run --name oap --restart always -d -e SW_OAP_ADDRESS=http://oap:12800 apache/skywalking-ui:8.8.0 Configuration We could set up environment variables to configure this image.\nSW_OAP_ADDRESS The address of your OAP server. The default value is http://127.0.0.1:12800.\n","title":"UI","url":"/docs/main/v9.2.0/en/setup/backend/ui-setup/"},{"content":"UI SkyWalking UI distribution is already included in our Apache official release.\nStartup Startup script is also in /bin/webappService.sh(.bat). UI runs as an OS Java process, powered-by Zuul.\nSettings The settings file of UI is webapp/webapp.yml in the distribution package. It has three parts.\n Listening port. Backend connect info.  serverPort:${SW_SERVER_PORT:-8080}# Comma separated list of OAP addresses, without http:// prefix.oapServices:${SW_OAP_ADDRESS:-localhost:12800}Start with Docker Image Start a container to connect OAP server whose address is http://oap:12800.\ndocker run --name oap --restart always -d -e SW_OAP_ADDRESS=http://oap:12800 apache/skywalking-ui:8.8.0 Configuration We could set up environment variables to configure this image.\nSW_OAP_ADDRESS The address of your OAP server. The default value is http://127.0.0.1:12800.\n","title":"UI","url":"/docs/main/v9.3.0/en/setup/backend/ui-setup/"},{"content":"UI SkyWalking UI distribution is already included in our Apache official release.\nStartup Startup script is also in /bin/webappService.sh(.bat). UI runs as an OS Java process, powered-by Zuul.\nSettings The settings file of UI is webapp/webapp.yml in the distribution package. It has three parts.\n Listening port. Backend connect info.  serverPort:${SW_SERVER_PORT:-8080}# Comma separated list of OAP addresses, without http:// prefix.oapServices:${SW_OAP_ADDRESS:-localhost:12800}zipkinServices:${SW_ZIPKIN_ADDRESS:localhost:9412}Start with Docker Image Start a container to connect OAP server whose address is http://oap:12800.\ndocker run --name oap --restart always -d -e SW_OAP_ADDRESS=http://oap:12800 -e SW_ZIPKIN_ADDRESS=http://oap:9412 apache/skywalking-ui:8.8.0 Configuration We could set up environment variables to configure this image.\nSW_OAP_ADDRESS The address of your OAP server. The default value is http://127.0.0.1:12800.\nSW_ZIPKIN_ADDRESS The address of your Zipkin server. The default value is http://127.0.0.1:9412.\n","title":"UI","url":"/docs/main/v9.4.0/en/setup/backend/ui-setup/"},{"content":"UI SkyWalking UI distribution is already included in our Apache official release.\nStartup Startup script is also in /bin/webappService.sh(.bat). UI runs as a Java process, powered-by Armeria.\nSettings The settings file of UI is webapp/webapp.yml in the distribution package. It has three parts.\n Listening port. Backend connect info.  serverPort:${SW_SERVER_PORT:-8080}# Comma separated list of OAP addresses, with `http://` or `https://` prefix.oapServices:${SW_OAP_ADDRESS:-http://localhost:12800}zipkinServices:${SW_ZIPKIN_ADDRESS:http://localhost:9412}Start with Docker Image Start a container to connect OAP server whose address is http://oap:12800.\ndocker run --name oap --restart always -d -e SW_OAP_ADDRESS=http://oap:12800 -e SW_ZIPKIN_ADDRESS=http://oap:9412 apache/skywalking-ui:8.8.0 Configuration We could set up environment variables to configure this image.\nSW_OAP_ADDRESS The address of your OAP server. The default value is http://127.0.0.1:12800.\nSW_ZIPKIN_ADDRESS The address of your Zipkin server. The default value is http://127.0.0.1:9412.\n","title":"UI","url":"/docs/main/v9.5.0/en/setup/backend/ui-setup/"},{"content":"UI SkyWalking UI distribution is already included in our Apache official release.\nStartup Startup script is also in /bin/webappService.sh(.bat). UI runs as a Java process, powered-by Armeria.\nSettings The settings file of UI is webapp/webapp.yml in the distribution package. It has three parts.\n Listening port. Backend connect info.  serverPort:${SW_SERVER_PORT:-8080}# Comma separated list of OAP addresses, with `http://` or `https://` prefix.oapServices:${SW_OAP_ADDRESS:-http://localhost:12800}zipkinServices:${SW_ZIPKIN_ADDRESS:http://localhost:9412}Start with Docker Image Start a container to connect OAP server whose address is http://oap:12800.\nexport version=9.0.0 docker run --name oap --restart always -d -e SW_OAP_ADDRESS=http://oap:12800 -e SW_ZIPKIN_ADDRESS=http://oap:9412 apache/skywalking-ui:$version Configuration We could set up environment variables to configure this image.\nSW_OAP_ADDRESS The address of your OAP server. The default value is http://127.0.0.1:12800.\nSW_ZIPKIN_ADDRESS The address of your Zipkin server. The default value is http://127.0.0.1:9412.\n","title":"UI","url":"/docs/main/v9.6.0/en/setup/backend/ui-setup/"},{"content":"UI SkyWalking UI distribution is already included in our Apache official release.\nStartup Startup script is also in /bin/webappService.sh(.bat). UI runs as a Java process, powered-by Armeria.\nSettings The settings file of UI is webapp/webapp.yml in the distribution package. It has three parts.\n Listening port. Backend connect info.  serverPort:${SW_SERVER_PORT:-8080}# Comma separated list of OAP addresses, with `http://` or `https://` prefix.oapServices:${SW_OAP_ADDRESS:-http://localhost:12800}zipkinServices:${SW_ZIPKIN_ADDRESS:http://localhost:9412}Start with Docker Image Start a container to connect OAP server whose address is http://oap:12800.\nexport version=9.0.0 docker run --name oap --restart always -d -e SW_OAP_ADDRESS=http://oap:12800 -e SW_ZIPKIN_ADDRESS=http://oap:9412 apache/skywalking-ui:$version Configuration We could set up environment variables to configure this image.\nSW_OAP_ADDRESS The address of your OAP server. The default value is http://127.0.0.1:12800.\nSW_ZIPKIN_ADDRESS The address of your Zipkin server. The default value is http://127.0.0.1:9412.\n","title":"UI","url":"/docs/main/v9.7.0/en/setup/backend/ui-setup/"},{"content":"Uninstrumented Gateways/Proxies The uninstrumented gateways are not instrumented by the SkyWalking agent plugin when they start, but they can be configured in gateways.yml file or via Dynamic Configuration. The reason why they can\u0026rsquo;t register to backend automatically is that there are no suitable agent plugins. For example, there are no agent plugins for Nginx, HAProxy, etc. So to visualize the real topology, we provide a way to configure the gateways/proxies manually.\nConfiguration Format The configuration content includes gateway names and their instances:\ngateways:- name:proxy0# the name is not used for nowinstances:- host:127.0.0.1# the host/IP of this gateway instanceport:9099# the port of this gateway instance defaults to 80Note: The host of the instance must be the one that is actually used on the client-side. For example, if instance proxyA has 2 IPs, say 192.168.1.110 and 192.168.1.111, both of which delegate the target service, and the client connects to 192.168.1.110, then configuring 192.168.1.111 as the host won\u0026rsquo;t work properly.\n","title":"Uninstrumented Gateways/Proxies","url":"/docs/main/latest/en/setup/backend/uninstrumented-gateways/"},{"content":"Uninstrumented Gateways/Proxies The uninstrumented gateways are not instrumented by the SkyWalking agent plugin when they start, but they can be configured in gateways.yml file or via Dynamic Configuration. The reason why they can\u0026rsquo;t register to backend automatically is that there are no suitable agent plugins. For example, there are no agent plugins for Nginx, HAProxy, etc. So to visualize the real topology, we provide a way to configure the gateways/proxies manually.\nConfiguration Format The configuration content includes gateway names and their instances:\ngateways:- name:proxy0# the name is not used for nowinstances:- host:127.0.0.1# the host/IP of this gateway instanceport:9099# the port of this gateway instance defaults to 80Note: The host of the instance must be the one that is actually used on the client-side. For example, if instance proxyA has 2 IPs, say 192.168.1.110 and 192.168.1.111, both of which delegate the target service, and the client connects to 192.168.1.110, then configuring 192.168.1.111 as the host won\u0026rsquo;t work properly.\n","title":"Uninstrumented Gateways/Proxies","url":"/docs/main/next/en/setup/backend/uninstrumented-gateways/"},{"content":"Uninstrumented Gateways/Proxies The uninstrumented gateways are not instrumented by SkyWalking agent plugin when they are started, but they can be configured in gateways.yml file or via Dynamic Configuration. The reason why they can\u0026rsquo;t register to backend automatically is that there\u0026rsquo;re no suitable agent plugins. For example, there are no agent plugins for Nginx, haproxy, etc. So in order to visualize the real topology, we provide a way to configure the gateways/proxies manually.\nConfiguration Format The configuration content includes gateway names and their instances:\ngateways:- name:proxy0# the name is not used for nowinstances:- host:127.0.0.1# the host/ip of this gateway instanceport:9099# the port of this gateway instance, defaults to 80Note: The host of the instance must be the one that is actually used at client side. For example, if instance proxyA has 2 IPs, say 192.168.1.110 and 192.168.1.111, both of which delegate the target service, and the client connects to 192.168.1.110, then configuring 192.168.1.111 as the host won\u0026rsquo;t work properly.\n","title":"Uninstrumented Gateways/Proxies","url":"/docs/main/v9.0.0/en/setup/backend/uninstrumented-gateways/"},{"content":"Uninstrumented Gateways/Proxies The uninstrumented gateways are not instrumented by the SkyWalking agent plugin when they start, but they can be configured in gateways.yml file or via Dynamic Configuration. The reason why they can\u0026rsquo;t register to backend automatically is that there are no suitable agent plugins. For example, there are no agent plugins for Nginx, HAProxy, etc. So to visualize the real topology, we provide a way to configure the gateways/proxies manually.\nConfiguration Format The configuration content includes gateway names and their instances:\ngateways:- name:proxy0# the name is not used for nowinstances:- host:127.0.0.1# the host/IP of this gateway instanceport:9099# the port of this gateway instance defaults to 80Note: The host of the instance must be the one that is actually used on the client-side. For example, if instance proxyA has 2 IPs, say 192.168.1.110 and 192.168.1.111, both of which delegate the target service, and the client connects to 192.168.1.110, then configuring 192.168.1.111 as the host won\u0026rsquo;t work properly.\n","title":"Uninstrumented Gateways/Proxies","url":"/docs/main/v9.1.0/en/setup/backend/uninstrumented-gateways/"},{"content":"Uninstrumented Gateways/Proxies The uninstrumented gateways are not instrumented by the SkyWalking agent plugin when they start, but they can be configured in gateways.yml file or via Dynamic Configuration. The reason why they can\u0026rsquo;t register to backend automatically is that there are no suitable agent plugins. For example, there are no agent plugins for Nginx, HAProxy, etc. So to visualize the real topology, we provide a way to configure the gateways/proxies manually.\nConfiguration Format The configuration content includes gateway names and their instances:\ngateways:- name:proxy0# the name is not used for nowinstances:- host:127.0.0.1# the host/IP of this gateway instanceport:9099# the port of this gateway instance defaults to 80Note: The host of the instance must be the one that is actually used on the client-side. For example, if instance proxyA has 2 IPs, say 192.168.1.110 and 192.168.1.111, both of which delegate the target service, and the client connects to 192.168.1.110, then configuring 192.168.1.111 as the host won\u0026rsquo;t work properly.\n","title":"Uninstrumented Gateways/Proxies","url":"/docs/main/v9.2.0/en/setup/backend/uninstrumented-gateways/"},{"content":"Uninstrumented Gateways/Proxies The uninstrumented gateways are not instrumented by the SkyWalking agent plugin when they start, but they can be configured in gateways.yml file or via Dynamic Configuration. The reason why they can\u0026rsquo;t register to backend automatically is that there are no suitable agent plugins. For example, there are no agent plugins for Nginx, HAProxy, etc. So to visualize the real topology, we provide a way to configure the gateways/proxies manually.\nConfiguration Format The configuration content includes gateway names and their instances:\ngateways:- name:proxy0# the name is not used for nowinstances:- host:127.0.0.1# the host/IP of this gateway instanceport:9099# the port of this gateway instance defaults to 80Note: The host of the instance must be the one that is actually used on the client-side. For example, if instance proxyA has 2 IPs, say 192.168.1.110 and 192.168.1.111, both of which delegate the target service, and the client connects to 192.168.1.110, then configuring 192.168.1.111 as the host won\u0026rsquo;t work properly.\n","title":"Uninstrumented Gateways/Proxies","url":"/docs/main/v9.3.0/en/setup/backend/uninstrumented-gateways/"},{"content":"Uninstrumented Gateways/Proxies The uninstrumented gateways are not instrumented by the SkyWalking agent plugin when they start, but they can be configured in gateways.yml file or via Dynamic Configuration. The reason why they can\u0026rsquo;t register to backend automatically is that there are no suitable agent plugins. For example, there are no agent plugins for Nginx, HAProxy, etc. So to visualize the real topology, we provide a way to configure the gateways/proxies manually.\nConfiguration Format The configuration content includes gateway names and their instances:\ngateways:- name:proxy0# the name is not used for nowinstances:- host:127.0.0.1# the host/IP of this gateway instanceport:9099# the port of this gateway instance defaults to 80Note: The host of the instance must be the one that is actually used on the client-side. For example, if instance proxyA has 2 IPs, say 192.168.1.110 and 192.168.1.111, both of which delegate the target service, and the client connects to 192.168.1.110, then configuring 192.168.1.111 as the host won\u0026rsquo;t work properly.\n","title":"Uninstrumented Gateways/Proxies","url":"/docs/main/v9.4.0/en/setup/backend/uninstrumented-gateways/"},{"content":"Uninstrumented Gateways/Proxies The uninstrumented gateways are not instrumented by the SkyWalking agent plugin when they start, but they can be configured in gateways.yml file or via Dynamic Configuration. The reason why they can\u0026rsquo;t register to backend automatically is that there are no suitable agent plugins. For example, there are no agent plugins for Nginx, HAProxy, etc. So to visualize the real topology, we provide a way to configure the gateways/proxies manually.\nConfiguration Format The configuration content includes gateway names and their instances:\ngateways:- name:proxy0# the name is not used for nowinstances:- host:127.0.0.1# the host/IP of this gateway instanceport:9099# the port of this gateway instance defaults to 80Note: The host of the instance must be the one that is actually used on the client-side. For example, if instance proxyA has 2 IPs, say 192.168.1.110 and 192.168.1.111, both of which delegate the target service, and the client connects to 192.168.1.110, then configuring 192.168.1.111 as the host won\u0026rsquo;t work properly.\n","title":"Uninstrumented Gateways/Proxies","url":"/docs/main/v9.5.0/en/setup/backend/uninstrumented-gateways/"},{"content":"Uninstrumented Gateways/Proxies The uninstrumented gateways are not instrumented by the SkyWalking agent plugin when they start, but they can be configured in gateways.yml file or via Dynamic Configuration. The reason why they can\u0026rsquo;t register to backend automatically is that there are no suitable agent plugins. For example, there are no agent plugins for Nginx, HAProxy, etc. So to visualize the real topology, we provide a way to configure the gateways/proxies manually.\nConfiguration Format The configuration content includes gateway names and their instances:\ngateways:- name:proxy0# the name is not used for nowinstances:- host:127.0.0.1# the host/IP of this gateway instanceport:9099# the port of this gateway instance defaults to 80Note: The host of the instance must be the one that is actually used on the client-side. For example, if instance proxyA has 2 IPs, say 192.168.1.110 and 192.168.1.111, both of which delegate the target service, and the client connects to 192.168.1.110, then configuring 192.168.1.111 as the host won\u0026rsquo;t work properly.\n","title":"Uninstrumented Gateways/Proxies","url":"/docs/main/v9.6.0/en/setup/backend/uninstrumented-gateways/"},{"content":"Uninstrumented Gateways/Proxies The uninstrumented gateways are not instrumented by the SkyWalking agent plugin when they start, but they can be configured in gateways.yml file or via Dynamic Configuration. The reason why they can\u0026rsquo;t register to backend automatically is that there are no suitable agent plugins. For example, there are no agent plugins for Nginx, HAProxy, etc. So to visualize the real topology, we provide a way to configure the gateways/proxies manually.\nConfiguration Format The configuration content includes gateway names and their instances:\ngateways:- name:proxy0# the name is not used for nowinstances:- host:127.0.0.1# the host/IP of this gateway instanceport:9099# the port of this gateway instance defaults to 80Note: The host of the instance must be the one that is actually used on the client-side. For example, if instance proxyA has 2 IPs, say 192.168.1.110 and 192.168.1.111, both of which delegate the target service, and the client connects to 192.168.1.110, then configuring 192.168.1.111 as the host won\u0026rsquo;t work properly.\n","title":"Uninstrumented Gateways/Proxies","url":"/docs/main/v9.7.0/en/setup/backend/uninstrumented-gateways/"},{"content":"Unit Test For Satellite, the specific plugin may have some common dependencies. So we provide a global test initializer to init the dependencies.\nimport ( _ \u0026quot;github.com/apache/skywalking-satellite/internal/satellite/test\u0026quot; ) ","title":"Unit Test","url":"/docs/skywalking-satellite/latest/en/guides/test/how-to-unit-test/"},{"content":"Unit Test For Satellite, the specific plugin may have some common dependencies. So we provide a global test initializer to init the dependencies.\nimport ( _ \u0026quot;github.com/apache/skywalking-satellite/internal/satellite/test\u0026quot; ) ","title":"Unit Test","url":"/docs/skywalking-satellite/next/en/guides/test/how-to-unit-test/"},{"content":"Unit Test For Satellite, the specific plugin may have some common dependencies. So we provide a global test initializer to init the dependencies.\nimport ( _ \u0026quot;github.com/apache/skywalking-satellite/internal/satellite/test\u0026quot; ) ","title":"Unit Test","url":"/docs/skywalking-satellite/v1.2.0/en/guides/test/how-to-unit-test/"},{"content":"Use annotation to mark the method you want to trace.  Add @Trace to any method you want to trace. After that, you can see the span in the Stack. Methods annotated with @Tag will try to tag the current active span with the given key (Tag#key()) and (Tag#value()), if there is no active span at all, this annotation takes no effect. @Tag can be repeated, and can be used in companion with @Trace, see examples below. The value of Tag is the same as what are supported in Customize Enhance Trace.  /** * The codes below will generate a span, * and two types of tags, one type tag: keys are `tag1` and `tag2`, values are the passed-in parameters, respectively, the other type tag: keys are `username` and `age`, values are the return value in User, respectively */ @Trace @Tag(key = \u0026#34;tag1\u0026#34;, value = \u0026#34;arg[0]\u0026#34;) @Tag(key = \u0026#34;tag2\u0026#34;, value = \u0026#34;arg[1]\u0026#34;) @Tag(key = \u0026#34;username\u0026#34;, value = \u0026#34;returnedObj.username\u0026#34;) @Tag(key = \u0026#34;age\u0026#34;, value = \u0026#34;returnedObj.age\u0026#34;) public User methodYouWantToTrace(String param1, String param2) { // ... } Sample codes only\n","title":"Use annotation to mark the method you want to trace.","url":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/application-toolkit-trace-annotation/"},{"content":"Use annotation to mark the method you want to trace.  Add @Trace to any method you want to trace. After that, you can see the span in the Stack. Methods annotated with @Tag will try to tag the current active span with the given key (Tag#key()) and (Tag#value()), if there is no active span at all, this annotation takes no effect. @Tag can be repeated, and can be used in companion with @Trace, see examples below. The value of Tag is the same as what are supported in Customize Enhance Trace.  /** * The codes below will generate a span, * and two types of tags, one type tag: keys are `tag1` and `tag2`, values are the passed-in parameters, respectively, the other type tag: keys are `username` and `age`, values are the return value in User, respectively */ @Trace @Tag(key = \u0026#34;tag1\u0026#34;, value = \u0026#34;arg[0]\u0026#34;) @Tag(key = \u0026#34;tag2\u0026#34;, value = \u0026#34;arg[1]\u0026#34;) @Tag(key = \u0026#34;username\u0026#34;, value = \u0026#34;returnedObj.username\u0026#34;) @Tag(key = \u0026#34;age\u0026#34;, value = \u0026#34;returnedObj.age\u0026#34;) public User methodYouWantToTrace(String param1, String param2) { // ... } Sample codes only\n","title":"Use annotation to mark the method you want to trace.","url":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-trace-annotation/"},{"content":"Use annotation to mark the method you want to trace.  Add @Trace to any method you want to trace. After that, you can see the span in the Stack. Methods annotated with @Tag will try to tag the current active span with the given key (Tag#key()) and (Tag#value()), if there is no active span at all, this annotation takes no effect. @Tag can be repeated, and can be used in companion with @Trace, see examples below. The value of Tag is the same as what are supported in Customize Enhance Trace.  /** * The codes below will generate a span, * and two types of tags, one type tag: keys are `tag1` and `tag2`, values are the passed-in parameters, respectively, the other type tag: keys are `username` and `age`, values are the return value in User, respectively */ @Trace @Tag(key = \u0026#34;tag1\u0026#34;, value = \u0026#34;arg[0]\u0026#34;) @Tag(key = \u0026#34;tag2\u0026#34;, value = \u0026#34;arg[1]\u0026#34;) @Tag(key = \u0026#34;username\u0026#34;, value = \u0026#34;returnedObj.username\u0026#34;) @Tag(key = \u0026#34;age\u0026#34;, value = \u0026#34;returnedObj.age\u0026#34;) public User methodYouWantToTrace(String param1, String param2) { // ... } Sample codes only\n","title":"Use annotation to mark the method you want to trace.","url":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/application-toolkit-trace-annotation/"},{"content":"Use annotation to mark the method you want to trace.  Add @Trace to any method you want to trace. After that, you can see the span in the Stack. Methods annotated with @Tag will try to tag the current active span with the given key (Tag#key()) and (Tag#value()), if there is no active span at all, this annotation takes no effect. @Tag can be repeated, and can be used in companion with @Trace, see examples below. The value of Tag is the same as what are supported in Customize Enhance Trace.  /** * The codes below will generate a span, * and two types of tags, one type tag: keys are `tag1` and `tag2`, values are the passed-in parameters, respectively, the other type tag: keys are `username` and `age`, values are the return value in User, respectively */ @Trace @Tag(key = \u0026#34;tag1\u0026#34;, value = \u0026#34;arg[0]\u0026#34;) @Tag(key = \u0026#34;tag2\u0026#34;, value = \u0026#34;arg[1]\u0026#34;) @Tag(key = \u0026#34;username\u0026#34;, value = \u0026#34;returnedObj.username\u0026#34;) @Tag(key = \u0026#34;age\u0026#34;, value = \u0026#34;returnedObj.age\u0026#34;) public User methodYouWantToTrace(String param1, String param2) { // ... } Sample codes only\n","title":"Use annotation to mark the method you want to trace.","url":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/application-toolkit-trace-annotation/"},{"content":"Use annotation to mark the method you want to trace.  Add @Trace to any method you want to trace. After that, you can see the span in the Stack. Methods annotated with @Tag will try to tag the current active span with the given key (Tag#key()) and (Tag#value()), if there is no active span at all, this annotation takes no effect. @Tag can be repeated, and can be used in companion with @Trace, see examples below. The value of Tag is the same as what are supported in Customize Enhance Trace.  /** * The codes below will generate a span, * and two types of tags, one type tag: keys are `tag1` and `tag2`, values are the passed-in parameters, respectively, the other type tag: keys are `username` and `age`, values are the return value in User, respectively */ @Trace @Tag(key = \u0026#34;tag1\u0026#34;, value = \u0026#34;arg[0]\u0026#34;) @Tag(key = \u0026#34;tag2\u0026#34;, value = \u0026#34;arg[1]\u0026#34;) @Tag(key = \u0026#34;username\u0026#34;, value = \u0026#34;returnedObj.username\u0026#34;) @Tag(key = \u0026#34;age\u0026#34;, value = \u0026#34;returnedObj.age\u0026#34;) public User methodYouWantToTrace(String param1, String param2) { // ... } Sample codes only\n","title":"Use annotation to mark the method you want to trace.","url":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/application-toolkit-trace-annotation/"},{"content":"Use Grafana As The UI SkyWalking provide PromQL Service since 9.4.0 and LogQL Service since 9.6.0. You can choose Grafana as the SkyWalking UI. About the installation and how to use please refer to the official document.\nNotice \u0026lt;1\u0026gt;, Gafana is AGPL-3.0 license, which is very different from Apache 2.0. Please follow AGPL 3.0 license requirements.\nNotice \u0026lt;2\u0026gt;, SkyWalking always uses its native UI as first class. All visualization features are only available on native UI. Grafana UI is an extension on our support of PromQL APIs. We don\u0026rsquo;t maintain or promise the complete Grafana UI dashboard setup.\nConfigure Data Source Prometheus Data Source In the data source config panel, chose the Prometheus and set the url to the OAP server address, the default port is 9090. SkyWalking Data Source Before you start, please install the SkyWalking data source plugin. In the data source config panel, chose the SkyWalking and set the url to the OAP server graphql service address, the default port is 12800. Loki Data Source In the data source config panel, chose the Loki and set the url to the OAP server address, the default port is 3100. Configure Metric Dashboards Dashboards Settings The following steps are the example of config a General Service dashboard:\n Create a dashboard named General Service. A layer is recommended as a dashboard. Configure variables for the dashboard: After configure, you can select the service/instance/endpoint on the top of the dashboard:   Add Panels The following contents show how to add several typical metrics panels. General settings:\n Chose the metrics and chart. Set Query options --\u0026gt; Min interval = 1m, because the metrics min time bucket in SkyWalking is 1m. Add PromQL expressions, use the variables configured above for the labels then you can select the labels value from top. Note: Some metrics values may be required calculations to match units. Select the returned labels you want to show on panel. Test query and save the panel.  Common Value Metrics  For example service_apdex and Time series chart. Add PromQL expression, the metric scope is Service, so add labels service and layer for match. Set Connect null values --\u0026gt; Always and Show points --\u0026gt; Always because when the query interval \u0026gt; 1hour or 1day SkyWalking return the hour/day step metrics values.   Labeled Value Metrics  For example service_percentile and Time series chart. Add PromQL expressions, the metric scope is Service, add labels service and layer for match. And it\u0026rsquo;s a labeled value metric, add labels='0,1,2,3,4' filter the result label, and addrelabels='P50,P75,P90,P95,P99' rename the result label. Set Connect null values --\u0026gt; Always and Show points --\u0026gt; Always because when the query interval \u0026gt; 1hour or 1day SkyWalking return the hour/day step metrics values.   Sort Metrics  For example service_instance_cpm and Bar gauge chart. Add PromQL expressions, add labels parent_service and layer for match, add top_n='10' and order='DES' filter the result. Set the Calculation --\u0026gt; Latest*.   Sampled Records Same as the Sort Metrics.\nConfigure Topology Dashboards Dashboards Settings For now, SkyWalking support General Service and Service Mesh topology dashboards, the layer is GENERAL and MESH respectively. The following configuration can reuse the above General Service dashboard and add a new variable Plugin_SkyWalking for the dashboard: Add Topology Panel  Chose the Node Graph chart. Set Layer and Service by the variables. If you want to show all services in this layer, set Service empty. Set Node Metrics and Edge Metrics which you want to show on the topology.   Configure Log Dashboard Dashboards Settings The following steps are the example of config a log dashboard:\n Create a dashboard named Log. Configure variables for the dashboard:  Please make sure service_instance and endpoint variable enabled Include All option and set Custom all value to * or blank (typed by space button on the keyboard):  Tags variable is a little different from others, for more details, please refer Ad hoc filters:  After configure, you can select log query variables on the top of the dashboard:   Add Log Panel The following steps show how to add a log panel.\n Choose Logs chart. Set the Line limit value (The max number of logs to return in a query) and Order value (Determines the sort order of logs). Add LogQL expressions, use the variables configured above for the labels and searching keyword. service_instance \u0026amp; endpoint variable ref should use raw variable-format-options to prevent it value be escaped. Test query and save the panel.   Preview on demo.skywalking.a.o SkyWalking community provides a preview site for services of General and Service Mesh layers from the demo environment. You could take a glance through Preview metrics on Grafana of the demo deployment.\nNotice, we don\u0026rsquo;t provide all setups due to our monitoring target expanding fast. This demo is for helping you understand the above documents only.\n","title":"Use Grafana As The UI","url":"/docs/main/latest/en/setup/backend/ui-grafana/"},{"content":"Use Grafana As The UI SkyWalking provide PromQL Service since 9.4.0 and LogQL Service since 9.6.0. You can choose Grafana as the SkyWalking UI. About the installation and how to use please refer to the official document.\nNotice \u0026lt;1\u0026gt;, Gafana is AGPL-3.0 license, which is very different from Apache 2.0. Please follow AGPL 3.0 license requirements.\nNotice \u0026lt;2\u0026gt;, SkyWalking always uses its native UI as first class. All visualization features are only available on native UI. Grafana UI is an extension on our support of PromQL APIs. We don\u0026rsquo;t maintain or promise the complete Grafana UI dashboard setup.\nConfigure Data Source Prometheus Data Source In the data source config panel, chose the Prometheus and set the url to the OAP server address, the default port is 9090. SkyWalking Data Source Before you start, please install the SkyWalking data source plugin. In the data source config panel, chose the SkyWalking and set the url to the OAP server graphql service address, the default port is 12800. Loki Data Source In the data source config panel, chose the Loki and set the url to the OAP server address, the default port is 3100. Configure Metric Dashboards Dashboards Settings The following steps are the example of config a General Service dashboard:\n Create a dashboard named General Service. A layer is recommended as a dashboard. Configure variables for the dashboard: After configure, you can select the service/instance/endpoint on the top of the dashboard:   Add Panels The following contents show how to add several typical metrics panels. General settings:\n Chose the metrics and chart. Set Query options --\u0026gt; Min interval = 1m, because the metrics min time bucket in SkyWalking is 1m. Add PromQL expressions, use the variables configured above for the labels then you can select the labels value from top. Note: Some metrics values may be required calculations to match units. Select the returned labels you want to show on panel. Test query and save the panel.  Common Value Metrics  For example service_apdex and Time series chart. Add PromQL expression, the metric scope is Service, so add labels service and layer for match. Set Connect null values --\u0026gt; Always and Show points --\u0026gt; Always because when the query interval \u0026gt; 1hour or 1day SkyWalking return the hour/day step metrics values.   Labeled Value Metrics  For example service_percentile and Time series chart. Add PromQL expressions, the metric scope is Service, add labels service and layer for match. And it\u0026rsquo;s a labeled value metric, add labels='0,1,2,3,4' filter the result label, and addrelabels='P50,P75,P90,P95,P99' rename the result label. Set Connect null values --\u0026gt; Always and Show points --\u0026gt; Always because when the query interval \u0026gt; 1hour or 1day SkyWalking return the hour/day step metrics values.   Sort Metrics  For example service_instance_cpm and Bar gauge chart. Add PromQL expressions, add labels parent_service and layer for match, add top_n='10' and order='DES' filter the result. Set the Calculation --\u0026gt; Latest*.   Sampled Records Same as the Sort Metrics.\nConfigure Topology Dashboards Dashboards Settings For now, SkyWalking support General Service and Service Mesh topology dashboards, the layer is GENERAL and MESH respectively. The following configuration can reuse the above General Service dashboard and add a new variable Plugin_SkyWalking for the dashboard: Add Topology Panel  Chose the Node Graph chart. Set Layer and Service by the variables. If you want to show all services in this layer, set Service empty. Set Node Metrics and Edge Metrics which you want to show on the topology.   Configure Log Dashboard Dashboards Settings The following steps are the example of config a log dashboard:\n Create a dashboard named Log. Configure variables for the dashboard:  Please make sure service_instance and endpoint variable enabled Include All option and set Custom all value to * or blank (typed by space button on the keyboard):  Tags variable is a little different from others, for more details, please refer Ad hoc filters:  After configure, you can select log query variables on the top of the dashboard:   Add Log Panel The following steps show how to add a log panel.\n Choose Logs chart. Set the Line limit value (The max number of logs to return in a query) and Order value (Determines the sort order of logs). Add LogQL expressions, use the variables configured above for the labels and searching keyword. service_instance \u0026amp; endpoint variable ref should use raw variable-format-options to prevent it value be escaped. Test query and save the panel.   Preview on demo.skywalking.a.o SkyWalking community provides a preview site for services of General and Service Mesh layers from the demo environment. You could take a glance through Preview metrics on Grafana of the demo deployment.\nNotice, we don\u0026rsquo;t provide all setups due to our monitoring target expanding fast. This demo is for helping you understand the above documents only.\n","title":"Use Grafana As The UI","url":"/docs/main/next/en/setup/backend/ui-grafana/"},{"content":"Use Grafana As The UI Since 9.4.0, SkyWalking provide PromQL Service. You can choose Grafana as the SkyWalking UI. About the installation and how to use please refer to the official document.\nNotice \u0026lt;1\u0026gt;, Gafana is AGPL-3.0 license, which is very different from Apache 2.0. Please follow AGPL 3.0 license requirements.\nNotice \u0026lt;2\u0026gt;, SkyWalking always uses its native UI as first class. All visualization features are only available on native UI. Grafana UI is an extension on our support of PromQL APIs. We don\u0026rsquo;t maintain or promise the complete Grafana UI dashboard setup.\nConfigure Data Source In the data source config panel, chose the Prometheus and set the url to the OAP server address, the default port is 9090. Configure Dashboards Dashboards Settings The following steps are the example of config a General Service dashboard:\n Create a dashboard named General Service. A layer is recommended as a dashboard. Configure variables for the dashboard: After configure, you can select the service/instance/endpoint on the top of the dashboard:   Add Panels The following contents show how to add several typical metrics panels. General settings:\n Chose the metrics and chart. Set Query options --\u0026gt; Min interval = 1m, because the metrics min time bucket in SkyWalking is 1m. Add PromQL expressions, use the variables configured above for the labels then you can select the labels value from top. Note: Some metrics values may be required calculations to match units. Select the returned labels you want to show on panel. Test query and save the panel.  Common Value Metrics  For example service_apdex and Time series chart. Add PromQL expression, the metric scope is Service, so add labels service and layer for match. Set Connect null values --\u0026gt; Always and Show points --\u0026gt; Always because when the query interval \u0026gt; 1hour or 1day SkyWalking return the hour/day step metrics values.   Labeled Value Metrics  For example service_percentile and Time series chart. Add PromQL expressions, the metric scope is Service, add labels service and layer for match. And it\u0026rsquo;s a labeled value metric, add labels='0,1,2,3,4' filter the result label, and addrelabels='P50,P75,P90,P95,P99' rename the result label. Set Connect null values --\u0026gt; Always and Show points --\u0026gt; Always because when the query interval \u0026gt; 1hour or 1day SkyWalking return the hour/day step metrics values.   Sort Metrics  For example service_instance_cpm and Bar gauge chart. Add PromQL expressions, add labels parent_service and layer for match, add top_n='10' and order='DES' filter the result. Set the Calculation --\u0026gt; Latest*.   Sampled Records Same as the Sort Metrics.\nPreview on demo.skywalking.a.o SkyWalking community provides a preview site for services of General and Service Mesh layers from the demo environment. You could take a glance through Preview metrics on Grafana of the demo deployment.\nNotice, we don\u0026rsquo;t provide all setups due to our monitoring target expanding fast. This demo is for helping you understand the above documents only.\n","title":"Use Grafana As The UI","url":"/docs/main/v9.4.0/en/setup/backend/ui-grafana/"},{"content":"Use Grafana As The UI Since 9.4.0, SkyWalking provide PromQL Service. You can choose Grafana as the SkyWalking UI. About the installation and how to use please refer to the official document.\nNotice \u0026lt;1\u0026gt;, Gafana is AGPL-3.0 license, which is very different from Apache 2.0. Please follow AGPL 3.0 license requirements.\nNotice \u0026lt;2\u0026gt;, SkyWalking always uses its native UI as first class. All visualization features are only available on native UI. Grafana UI is an extension on our support of PromQL APIs. We don\u0026rsquo;t maintain or promise the complete Grafana UI dashboard setup.\nConfigure Data Source In the data source config panel, chose the Prometheus and set the url to the OAP server address, the default port is 9090. Configure Dashboards Dashboards Settings The following steps are the example of config a General Service dashboard:\n Create a dashboard named General Service. A layer is recommended as a dashboard. Configure variables for the dashboard: After configure, you can select the service/instance/endpoint on the top of the dashboard:   Add Panels The following contents show how to add several typical metrics panels. General settings:\n Chose the metrics and chart. Set Query options --\u0026gt; Min interval = 1m, because the metrics min time bucket in SkyWalking is 1m. Add PromQL expressions, use the variables configured above for the labels then you can select the labels value from top. Note: Some metrics values may be required calculations to match units. Select the returned labels you want to show on panel. Test query and save the panel.  Common Value Metrics  For example service_apdex and Time series chart. Add PromQL expression, the metric scope is Service, so add labels service and layer for match. Set Connect null values --\u0026gt; Always and Show points --\u0026gt; Always because when the query interval \u0026gt; 1hour or 1day SkyWalking return the hour/day step metrics values.   Labeled Value Metrics  For example service_percentile and Time series chart. Add PromQL expressions, the metric scope is Service, add labels service and layer for match. And it\u0026rsquo;s a labeled value metric, add labels='0,1,2,3,4' filter the result label, and addrelabels='P50,P75,P90,P95,P99' rename the result label. Set Connect null values --\u0026gt; Always and Show points --\u0026gt; Always because when the query interval \u0026gt; 1hour or 1day SkyWalking return the hour/day step metrics values.   Sort Metrics  For example service_instance_cpm and Bar gauge chart. Add PromQL expressions, add labels parent_service and layer for match, add top_n='10' and order='DES' filter the result. Set the Calculation --\u0026gt; Latest*.   Sampled Records Same as the Sort Metrics.\nPreview on demo.skywalking.a.o SkyWalking community provides a preview site for services of General and Service Mesh layers from the demo environment. You could take a glance through Preview metrics on Grafana of the demo deployment.\nNotice, we don\u0026rsquo;t provide all setups due to our monitoring target expanding fast. This demo is for helping you understand the above documents only.\n","title":"Use Grafana As The UI","url":"/docs/main/v9.5.0/en/setup/backend/ui-grafana/"},{"content":"Use Grafana As The UI SkyWalking provide PromQL Service since 9.4.0 and LogQL Service since 9.6.0. You can choose Grafana as the SkyWalking UI. About the installation and how to use please refer to the official document.\nNotice \u0026lt;1\u0026gt;, Gafana is AGPL-3.0 license, which is very different from Apache 2.0. Please follow AGPL 3.0 license requirements.\nNotice \u0026lt;2\u0026gt;, SkyWalking always uses its native UI as first class. All visualization features are only available on native UI. Grafana UI is an extension on our support of PromQL APIs. We don\u0026rsquo;t maintain or promise the complete Grafana UI dashboard setup.\nConfigure Data Source Prometheus Data Source In the data source config panel, chose the Prometheus and set the url to the OAP server address, the default port is 9090. Loki Data Source In the data source config panel, chose the Loki and set the url to the OAP server address, the default port is 3100. Configure Metric Dashboards Dashboards Settings The following steps are the example of config a General Service dashboard:\n Create a dashboard named General Service. A layer is recommended as a dashboard. Configure variables for the dashboard: After configure, you can select the service/instance/endpoint on the top of the dashboard:   Add Panels The following contents show how to add several typical metrics panels. General settings:\n Chose the metrics and chart. Set Query options --\u0026gt; Min interval = 1m, because the metrics min time bucket in SkyWalking is 1m. Add PromQL expressions, use the variables configured above for the labels then you can select the labels value from top. Note: Some metrics values may be required calculations to match units. Select the returned labels you want to show on panel. Test query and save the panel.  Common Value Metrics  For example service_apdex and Time series chart. Add PromQL expression, the metric scope is Service, so add labels service and layer for match. Set Connect null values --\u0026gt; Always and Show points --\u0026gt; Always because when the query interval \u0026gt; 1hour or 1day SkyWalking return the hour/day step metrics values.   Labeled Value Metrics  For example service_percentile and Time series chart. Add PromQL expressions, the metric scope is Service, add labels service and layer for match. And it\u0026rsquo;s a labeled value metric, add labels='0,1,2,3,4' filter the result label, and addrelabels='P50,P75,P90,P95,P99' rename the result label. Set Connect null values --\u0026gt; Always and Show points --\u0026gt; Always because when the query interval \u0026gt; 1hour or 1day SkyWalking return the hour/day step metrics values.   Sort Metrics  For example service_instance_cpm and Bar gauge chart. Add PromQL expressions, add labels parent_service and layer for match, add top_n='10' and order='DES' filter the result. Set the Calculation --\u0026gt; Latest*.   Sampled Records Same as the Sort Metrics.\nConfigure Log Dashboard Dashboards Settings The following steps are the example of config a log dashboard:\n Create a dashboard named Log. Configure variables for the dashboard:  Please make sure service_instance and endpoint variable enabled Include All option and set Custom all value to * or blank (typed by space button on the keyboard):  Tags variable is a little different from others, for more details, please refer Ad hoc filters:  After configure, you can select log query variables on the top of the dashboard:   Add Log Panel The following steps show how to add a log panel.\n Choose Logs chart. Set the Line limit value (The max number of logs to return in a query) and Order value (Determines the sort order of logs). Add LogQL expressions, use the variables configured above for the labels and searching keyword. service_instance \u0026amp; endpoint variable ref should use raw variable-format-options to prevent it value be escaped. Test query and save the panel.   Preview on demo.skywalking.a.o SkyWalking community provides a preview site for services of General and Service Mesh layers from the demo environment. You could take a glance through Preview metrics on Grafana of the demo deployment.\nNotice, we don\u0026rsquo;t provide all setups due to our monitoring target expanding fast. This demo is for helping you understand the above documents only.\n","title":"Use Grafana As The UI","url":"/docs/main/v9.6.0/en/setup/backend/ui-grafana/"},{"content":"Use Grafana As The UI SkyWalking provide PromQL Service since 9.4.0 and LogQL Service since 9.6.0. You can choose Grafana as the SkyWalking UI. About the installation and how to use please refer to the official document.\nNotice \u0026lt;1\u0026gt;, Gafana is AGPL-3.0 license, which is very different from Apache 2.0. Please follow AGPL 3.0 license requirements.\nNotice \u0026lt;2\u0026gt;, SkyWalking always uses its native UI as first class. All visualization features are only available on native UI. Grafana UI is an extension on our support of PromQL APIs. We don\u0026rsquo;t maintain or promise the complete Grafana UI dashboard setup.\nConfigure Data Source Prometheus Data Source In the data source config panel, chose the Prometheus and set the url to the OAP server address, the default port is 9090. SkyWalking Data Source Before you start, please install the SkyWalking data source plugin. In the data source config panel, chose the SkyWalking and set the url to the OAP server graphql service address, the default port is 12800. Loki Data Source In the data source config panel, chose the Loki and set the url to the OAP server address, the default port is 3100. Configure Metric Dashboards Dashboards Settings The following steps are the example of config a General Service dashboard:\n Create a dashboard named General Service. A layer is recommended as a dashboard. Configure variables for the dashboard: After configure, you can select the service/instance/endpoint on the top of the dashboard:   Add Panels The following contents show how to add several typical metrics panels. General settings:\n Chose the metrics and chart. Set Query options --\u0026gt; Min interval = 1m, because the metrics min time bucket in SkyWalking is 1m. Add PromQL expressions, use the variables configured above for the labels then you can select the labels value from top. Note: Some metrics values may be required calculations to match units. Select the returned labels you want to show on panel. Test query and save the panel.  Common Value Metrics  For example service_apdex and Time series chart. Add PromQL expression, the metric scope is Service, so add labels service and layer for match. Set Connect null values --\u0026gt; Always and Show points --\u0026gt; Always because when the query interval \u0026gt; 1hour or 1day SkyWalking return the hour/day step metrics values.   Labeled Value Metrics  For example service_percentile and Time series chart. Add PromQL expressions, the metric scope is Service, add labels service and layer for match. And it\u0026rsquo;s a labeled value metric, add labels='0,1,2,3,4' filter the result label, and addrelabels='P50,P75,P90,P95,P99' rename the result label. Set Connect null values --\u0026gt; Always and Show points --\u0026gt; Always because when the query interval \u0026gt; 1hour or 1day SkyWalking return the hour/day step metrics values.   Sort Metrics  For example service_instance_cpm and Bar gauge chart. Add PromQL expressions, add labels parent_service and layer for match, add top_n='10' and order='DES' filter the result. Set the Calculation --\u0026gt; Latest*.   Sampled Records Same as the Sort Metrics.\nConfigure Topology Dashboards Dashboards Settings For now, SkyWalking support General Service and Service Mesh topology dashboards, the layer is GENERAL and MESH respectively. The following configuration can reuse the above General Service dashboard and add a new variable Plugin_SkyWalking for the dashboard: Add Topology Panel  Chose the Node Graph chart. Set Layer and Service by the variables. If you want to show all services in this layer, set Service empty. Set Node Metrics and Edge Metrics which you want to show on the topology.   Configure Log Dashboard Dashboards Settings The following steps are the example of config a log dashboard:\n Create a dashboard named Log. Configure variables for the dashboard:  Please make sure service_instance and endpoint variable enabled Include All option and set Custom all value to * or blank (typed by space button on the keyboard):  Tags variable is a little different from others, for more details, please refer Ad hoc filters:  After configure, you can select log query variables on the top of the dashboard:   Add Log Panel The following steps show how to add a log panel.\n Choose Logs chart. Set the Line limit value (The max number of logs to return in a query) and Order value (Determines the sort order of logs). Add LogQL expressions, use the variables configured above for the labels and searching keyword. service_instance \u0026amp; endpoint variable ref should use raw variable-format-options to prevent it value be escaped. Test query and save the panel.   Preview on demo.skywalking.a.o SkyWalking community provides a preview site for services of General and Service Mesh layers from the demo environment. You could take a glance through Preview metrics on Grafana of the demo deployment.\nNotice, we don\u0026rsquo;t provide all setups due to our monitoring target expanding fast. This demo is for helping you understand the above documents only.\n","title":"Use Grafana As The UI","url":"/docs/main/v9.7.0/en/setup/backend/ui-grafana/"},{"content":"Use Profiling to Fix the Blind Spot of Distributed Tracing  This post introduces a way to automatically profile code in production with Apache SkyWalking. We believe the profile method helps reduce maintenance and overhead while increasing the precision in root cause analysis.\n This post introduces a way to automatically profile code in production with Apache SkyWalking. We believe the profile method helps reduce maintenance and overhead while increasing the precision in root cause analysis.\nLimitations of the Distributed Tracing In the early days, metrics and logging systems were the key solutions in monitoring platforms. With the adoption of microservice and distributed system-based architecture, distributed tracing has become more important. Distributed tracing provides relevant service context, such as system topology map and RPC parent-child relationships.\nSome claim that distributed tracing is the best way to discover the cause of performance issues in a distributed system. It’s good at finding issues at the RPC abstraction, or in the scope of components instrumented with spans. However, it isn’t that perfect.\nHave you been surprised to find a span duration longer than expected, but no insight into why? What should you do next? Some may think that the next step is to add more instrumentation, more spans into the trace, thinking that you would eventually find the root cause, with more data points. We’ll argue this is not a good option within a production environment. Here’s why:\n There is a risk of application overhead and system overload. Ad-hoc spans measure the performance of specific scopes or methods, but picking the right place can be difficult. To identify the precise cause, you can “instrument” (add spans to) many suspicious places. The additional instrumentation costs more CPU and memory in the production environment. Next, ad-hoc instrumentation that didn’t help is often forgotten, not deleted. This creates a valueless overhead load. In the worst case, excess instrumentation can cause performance problems in the production app or overload the tracing system. The process of ad-hoc (manual) instrumentation usually implies at least a restart. Trace instrumentation libraries, like Zipkin Brave, are integrated into many framework libraries. To instrument a method’s performance typically implies changing code, even if only an annotation. This implies a re-deploy. Even if you have the way to do auto instrumentation, like Apache SkyWalking, you still need to change the configuration and reboot the app. Otherwise, you take the risk of GC caused by hot dynamic instrumentation. Injecting instrumentation into an uninstrumented third party library is hard and complex. It takes more time and many won’t know how to do this. Usually, we don’t have code line numbers in the distributed tracing. Particularly when lambdas are in use, it can be difficult to identify the line of code associated with a span. Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.  Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.\nProfiling in Production Introduction To reuse distributed tracing to achieve method scope precision requires an understanding of the above limitations and a different approach. We called it PROFILE.\nMost high-level languages build and run on a thread concept. The profile approach takes continuous thread dumps. We merge the thread dumps to estimate the execution time of every method shown in the thread dumps. The key for distributed tracing is the tracing context, identifiers active (or current) for the profiled method. Using this trace context, we can weave data harvested from profiling into existing traces. This allows the system to automate otherwise ad-hoc instrumentation. Let’s dig deeper into how profiling works:\nWe consider a method invocation with the same stack depth and signature (method, line number etc), the same operation. We derive span timestamps from the thread dumps the same operation is in. Let’s put this visually:\nAbove, represents 10 successive thread dumps. If this method is in dumps 4-8, we assume it started before dump 4 and finished after dump 8. We can’t tell exactly when the method started and stopped. but the timestamps of thread dumps are close enough.\nTo reduce overhead caused by thread dumps, we only profile methods enclosed by a specific entry point, such as a URI or MVC Controller method. We identify these entry points through the trace context and the APM system.\nThe profile does thread dump analysis and gives us:\n The root cause, precise to the line number in the code. Reduced maintenance as ad-hoc instrumentation is obviated. Reduced overload risk caused by ad-hoc instrumentation. Dynamic activation: only when necessary and with a very clear profile target.  Implementing Precise Profiling Distributed profiling is built-into Apache SkyWalking application performance monitoring (APM). Let’s demonstrate how the profiling approach locates the root cause of the performance issue.\nfinal CountDownLatchcountDownLatch= new CountDownLatch(2); threadPool.submit(new Task1(countDownLatch)); threadPool.submit(new Task2(countDownLatch)); try { countDownLatch.await(500, TimeUnit.MILLISECONDS); } catch (InterruptedException) { } Task1 and Task2 have a race condition and unstable execution time: they will impact the performance of each other and anything calling them. While this code looks suspicious, it is representative of real life. People in the OPS/SRE team are not usually aware of all code changes and who did them. They only know something in the new code is causing a problem.\nTo make matters interesting, the above code is not always slow: it only happens when the condition is locked. In SkyWalking APM, we have metrics of endpoint p99/p95 latency, so, we are easy to find out the p99 of this endpoint is far from the avg response time. However, this is not the same as understanding the cause of the latency. To locate the root cause, add a profile condition to this endpoint: duration greater than 500ms. This means faster executions will not add profiling load.\nThis is a typical profiled trace segment (part of the whole distributed trace) shown on the SkyWalking UI. We now notice the “service/processWithThreadPool” span is slow as we expected, but why? This method is the one we added the faulty code to. As the UI shows that method, we know the profiler is working. Now, let’s see what the profile analysis result say.\nThis is the profile analysis stack view. We see the stack element names, duration (include/exclude the children) and slowest methods have been highlighted. It shows clearly, “sun.misc.Unsafe.park” costs the most time. If we look for the caller, it is the code we added: CountDownLatch.await.\nThe Limitations of the Profile Method No diagnostic tool can fit all cases, not even the profile method.\nThe first consideration is mistaking a repeatedly called method for a slow method. Thread dumps are periodic. If there is a loop of calling one method, the profile analysis result would say the target method is slow because it is captured every time in the dump process. There could be another reason. A method called many times can also end up captured in each thread dump. Even so, the profile did what it is designed for. It still helps the OPS/SRE team to locate the code having the issue.\nThe second consideration is overhead, the impact of repeated thread dumps is real and can’t be ignored. In SkyWalking, we set the profile dump period to at least 10ms. This means we can’t locate method performance issues if they complete in less than 10ms. SkyWalking has a threshold to control the maximum parallel degree as well.\nThe third consideration is profiling wouldn\u0026rsquo;t work for a low latency trace. Because the trace could be completed before profiling starts. But in reality, this is not an issue, profiling targets slow requests.\nUnderstanding the above keeps distributed tracing and APM systems useful for your OPS/SRE team.\nSupported Agents This feature was first implemented in Java agent since 7.0. The Python agent supported this since 0.7.0. Read this for more details\n","title":"Use Profiling to Fix the Blind Spot of Distributed Tracing","url":"/docs/main/latest/en/concepts-and-designs/sdk-profiling/"},{"content":"Use Profiling to Fix the Blind Spot of Distributed Tracing  This post introduces a way to automatically profile code in production with Apache SkyWalking. We believe the profile method helps reduce maintenance and overhead while increasing the precision in root cause analysis.\n This post introduces a way to automatically profile code in production with Apache SkyWalking. We believe the profile method helps reduce maintenance and overhead while increasing the precision in root cause analysis.\nLimitations of the Distributed Tracing In the early days, metrics and logging systems were the key solutions in monitoring platforms. With the adoption of microservice and distributed system-based architecture, distributed tracing has become more important. Distributed tracing provides relevant service context, such as system topology map and RPC parent-child relationships.\nSome claim that distributed tracing is the best way to discover the cause of performance issues in a distributed system. It’s good at finding issues at the RPC abstraction, or in the scope of components instrumented with spans. However, it isn’t that perfect.\nHave you been surprised to find a span duration longer than expected, but no insight into why? What should you do next? Some may think that the next step is to add more instrumentation, more spans into the trace, thinking that you would eventually find the root cause, with more data points. We’ll argue this is not a good option within a production environment. Here’s why:\n There is a risk of application overhead and system overload. Ad-hoc spans measure the performance of specific scopes or methods, but picking the right place can be difficult. To identify the precise cause, you can “instrument” (add spans to) many suspicious places. The additional instrumentation costs more CPU and memory in the production environment. Next, ad-hoc instrumentation that didn’t help is often forgotten, not deleted. This creates a valueless overhead load. In the worst case, excess instrumentation can cause performance problems in the production app or overload the tracing system. The process of ad-hoc (manual) instrumentation usually implies at least a restart. Trace instrumentation libraries, like Zipkin Brave, are integrated into many framework libraries. To instrument a method’s performance typically implies changing code, even if only an annotation. This implies a re-deploy. Even if you have the way to do auto instrumentation, like Apache SkyWalking, you still need to change the configuration and reboot the app. Otherwise, you take the risk of GC caused by hot dynamic instrumentation. Injecting instrumentation into an uninstrumented third party library is hard and complex. It takes more time and many won’t know how to do this. Usually, we don’t have code line numbers in the distributed tracing. Particularly when lambdas are in use, it can be difficult to identify the line of code associated with a span. Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.  Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.\nProfiling in Production Introduction To reuse distributed tracing to achieve method scope precision requires an understanding of the above limitations and a different approach. We called it PROFILE.\nMost high-level languages build and run on a thread concept. The profile approach takes continuous thread dumps. We merge the thread dumps to estimate the execution time of every method shown in the thread dumps. The key for distributed tracing is the tracing context, identifiers active (or current) for the profiled method. Using this trace context, we can weave data harvested from profiling into existing traces. This allows the system to automate otherwise ad-hoc instrumentation. Let’s dig deeper into how profiling works:\nWe consider a method invocation with the same stack depth and signature (method, line number etc), the same operation. We derive span timestamps from the thread dumps the same operation is in. Let’s put this visually:\nAbove, represents 10 successive thread dumps. If this method is in dumps 4-8, we assume it started before dump 4 and finished after dump 8. We can’t tell exactly when the method started and stopped. but the timestamps of thread dumps are close enough.\nTo reduce overhead caused by thread dumps, we only profile methods enclosed by a specific entry point, such as a URI or MVC Controller method. We identify these entry points through the trace context and the APM system.\nThe profile does thread dump analysis and gives us:\n The root cause, precise to the line number in the code. Reduced maintenance as ad-hoc instrumentation is obviated. Reduced overload risk caused by ad-hoc instrumentation. Dynamic activation: only when necessary and with a very clear profile target.  Implementing Precise Profiling Distributed profiling is built-into Apache SkyWalking application performance monitoring (APM). Let’s demonstrate how the profiling approach locates the root cause of the performance issue.\nfinal CountDownLatchcountDownLatch= new CountDownLatch(2); threadPool.submit(new Task1(countDownLatch)); threadPool.submit(new Task2(countDownLatch)); try { countDownLatch.await(500, TimeUnit.MILLISECONDS); } catch (InterruptedException) { } Task1 and Task2 have a race condition and unstable execution time: they will impact the performance of each other and anything calling them. While this code looks suspicious, it is representative of real life. People in the OPS/SRE team are not usually aware of all code changes and who did them. They only know something in the new code is causing a problem.\nTo make matters interesting, the above code is not always slow: it only happens when the condition is locked. In SkyWalking APM, we have metrics of endpoint p99/p95 latency, so, we are easy to find out the p99 of this endpoint is far from the avg response time. However, this is not the same as understanding the cause of the latency. To locate the root cause, add a profile condition to this endpoint: duration greater than 500ms. This means faster executions will not add profiling load.\nThis is a typical profiled trace segment (part of the whole distributed trace) shown on the SkyWalking UI. We now notice the “service/processWithThreadPool” span is slow as we expected, but why? This method is the one we added the faulty code to. As the UI shows that method, we know the profiler is working. Now, let’s see what the profile analysis result say.\nThis is the profile analysis stack view. We see the stack element names, duration (include/exclude the children) and slowest methods have been highlighted. It shows clearly, “sun.misc.Unsafe.park” costs the most time. If we look for the caller, it is the code we added: CountDownLatch.await.\nThe Limitations of the Profile Method No diagnostic tool can fit all cases, not even the profile method.\nThe first consideration is mistaking a repeatedly called method for a slow method. Thread dumps are periodic. If there is a loop of calling one method, the profile analysis result would say the target method is slow because it is captured every time in the dump process. There could be another reason. A method called many times can also end up captured in each thread dump. Even so, the profile did what it is designed for. It still helps the OPS/SRE team to locate the code having the issue.\nThe second consideration is overhead, the impact of repeated thread dumps is real and can’t be ignored. In SkyWalking, we set the profile dump period to at least 10ms. This means we can’t locate method performance issues if they complete in less than 10ms. SkyWalking has a threshold to control the maximum parallel degree as well.\nThe third consideration is profiling wouldn\u0026rsquo;t work for a low latency trace. Because the trace could be completed before profiling starts. But in reality, this is not an issue, profiling targets slow requests.\nUnderstanding the above keeps distributed tracing and APM systems useful for your OPS/SRE team.\nSupported Agents This feature was first implemented in Java agent since 7.0. The Python agent supported this since 0.7.0. Read this for more details\n","title":"Use Profiling to Fix the Blind Spot of Distributed Tracing","url":"/docs/main/next/en/concepts-and-designs/sdk-profiling/"},{"content":"Use Profiling to Fix the Blind Spot of Distributed Tracing  This post introduces a way to automatically profile code in production with Apache SkyWalking. We believe the profile method helps reduce maintenance and overhead while increasing the precision in root cause analysis.\n This post introduces a way to automatically profile code in production with Apache SkyWalking. We believe the profile method helps reduce maintenance and overhead while increasing the precision in root cause analysis.\nLimitations of the Distributed Tracing In the early days, metrics and logging systems were the key solutions in monitoring platforms. With the adoption of microservice and distributed system-based architecture, distributed tracing has become more important. Distributed tracing provides relevant service context, such as system topology map and RPC parent-child relationships.\nSome claim that distributed tracing is the best way to discover the cause of performance issues in a distributed system. It’s good at finding issues at the RPC abstraction, or in the scope of components instrumented with spans. However, it isn’t that perfect.\nHave you been surprised to find a span duration longer than expected, but no insight into why? What should you do next? Some may think that the next step is to add more instrumentation, more spans into the trace, thinking that you would eventually find the root cause, with more data points. We’ll argue this is not a good option within a production environment. Here’s why:\n There is a risk of application overhead and system overload. Ad-hoc spans measure the performance of specific scopes or methods, but picking the right place can be difficult. To identify the precise cause, you can “instrument” (add spans to) many suspicious places. The additional instrumentation costs more CPU and memory in the production environment. Next, ad-hoc instrumentation that didn’t help is often forgotten, not deleted. This creates a valueless overhead load. In the worst case, excess instrumentation can cause performance problems in the production app or overload the tracing system. The process of ad-hoc (manual) instrumentation usually implies at least a restart. Trace instrumentation libraries, like Zipkin Brave, are integrated into many framework libraries. To instrument a method’s performance typically implies changing code, even if only an annotation. This implies a re-deploy. Even if you have the way to do auto instrumentation, like Apache SkyWalking, you still need to change the configuration and reboot the app. Otherwise, you take the risk of GC caused by hot dynamic instrumentation. Injecting instrumentation into an uninstrumented third party library is hard and complex. It takes more time and many won’t know how to do this. Usually, we don’t have code line numbers in the distributed tracing. Particularly when lambdas are in use, it can be difficult to identify the line of code associated with a span. Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.  Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.\nProfiling in Production Introduction To reuse distributed tracing to achieve method scope precision requires an understanding of the above limitations and a different approach. We called it PROFILE.\nMost high-level languages build and run on a thread concept. The profile approach takes continuous thread dumps. We merge the thread dumps to estimate the execution time of every method shown in the thread dumps. The key for distributed tracing is the tracing context, identifiers active (or current) for the profiled method. Using this trace context, we can weave data harvested from profiling into existing traces. This allows the system to automate otherwise ad-hoc instrumentation. Let’s dig deeper into how profiling works:\nWe consider a method invocation with the same stack depth and signature (method, line number etc), the same operation. We derive span timestamps from the thread dumps the same operation is in. Let’s put this visually:\nAbove, represents 10 successive thread dumps. If this method is in dumps 4-8, we assume it started before dump 4 and finished after dump 8. We can’t tell exactly when the method started and stopped. but the timestamps of thread dumps are close enough.\nTo reduce overhead caused by thread dumps, we only profile methods enclosed by a specific entry point, such as a URI or MVC Controller method. We identify these entry points through the trace context and the APM system.\nThe profile does thread dump analysis and gives us:\n The root cause, precise to the line number in the code. Reduced maintenance as ad-hoc instrumentation is obviated. Reduced overload risk caused by ad-hoc instrumentation. Dynamic activation: only when necessary and with a very clear profile target.  Implementing Precise Profiling Distributed profiling is built-into Apache SkyWalking application performance monitoring (APM). Let’s demonstrate how the profiling approach locates the root cause of the performance issue.\nfinal CountDownLatchcountDownLatch= new CountDownLatch(2); threadPool.submit(new Task1(countDownLatch)); threadPool.submit(new Task2(countDownLatch)); try { countDownLatch.await(500, TimeUnit.MILLISECONDS); } catch (InterruptedExceptione) { } Task1 and Task2 have a race condition and unstable execution time: they will impact the performance of each other and anything calling them. While this code looks suspicious, it is representative of real life. People in the OPS/SRE team are not usually aware of all code changes and who did them. They only know something in the new code is causing a problem.\nTo make matters interesting, the above code is not always slow: it only happens when the condition is locked. In SkyWalking APM, we have metrics of endpoint p99/p95 latency, so, we are easy to find out the p99 of this endpoint is far from the avg response time. However, this is not the same as understanding the cause of the latency. To locate the root cause, add a profile condition to this endpoint: duration greater than 500ms. This means faster executions will not add profiling load.\nThis is a typical profiled trace segment (part of the whole distributed trace) shown on the SkyWalking UI. We now notice the “service/processWithThreadPool” span is slow as we expected, but why? This method is the one we added the faulty code to. As the UI shows that method, we know the profiler is working. Now, let’s see what the profile analysis result say.\nThis is the profile analysis stack view. We see the stack element names, duration (include/exclude the children) and slowest methods have been highlighted. It shows clearly, “sun.misc.Unsafe.park” costs the most time. If we look for the caller, it is the code we added: CountDownLatch.await.\nThe Limitations of the Profile Method No diagnostic tool can fit all cases, not even the profile method.\nThe first consideration is mistaking a repeatedly called method for a slow method. Thread dumps are periodic. If there is a loop of calling one method, the profile analysis result would say the target method is slow because it is captured every time in the dump process. There could be another reason. A method called many times can also end up captured in each thread dump. Even so, the profile did what it is designed for. It still helps the OPS/SRE team to locate the code having the issue.\nThe second consideration is overhead, the impact of repeated thread dumps is real and can’t be ignored. In SkyWalking, we set the profile dump period to at least 10ms. This means we can’t locate method performance issues if they complete in less than 10ms. SkyWalking has a threshold to control the maximum parallel degree as well.\nThe third consideration is profiling wouldn\u0026rsquo;t work for a low latency trace. Because the trace could be completed before profiling starts. But in reality, this is not an issue, profiling targets slow requests.\nUnderstanding the above keeps distributed tracing and APM systems useful for your OPS/SRE team.\nSupported Agents This feature was first implemented in Java agent since 7.0. The Python agent supported this since 0.7.0. Read this for more details\n","title":"Use Profiling to Fix the Blind Spot of Distributed Tracing","url":"/docs/main/v9.0.0/en/concepts-and-designs/sdk-profiling/"},{"content":"Use Profiling to Fix the Blind Spot of Distributed Tracing  This post introduces a way to automatically profile code in production with Apache SkyWalking. We believe the profile method helps reduce maintenance and overhead while increasing the precision in root cause analysis.\n This post introduces a way to automatically profile code in production with Apache SkyWalking. We believe the profile method helps reduce maintenance and overhead while increasing the precision in root cause analysis.\nLimitations of the Distributed Tracing In the early days, metrics and logging systems were the key solutions in monitoring platforms. With the adoption of microservice and distributed system-based architecture, distributed tracing has become more important. Distributed tracing provides relevant service context, such as system topology map and RPC parent-child relationships.\nSome claim that distributed tracing is the best way to discover the cause of performance issues in a distributed system. It’s good at finding issues at the RPC abstraction, or in the scope of components instrumented with spans. However, it isn’t that perfect.\nHave you been surprised to find a span duration longer than expected, but no insight into why? What should you do next? Some may think that the next step is to add more instrumentation, more spans into the trace, thinking that you would eventually find the root cause, with more data points. We’ll argue this is not a good option within a production environment. Here’s why:\n There is a risk of application overhead and system overload. Ad-hoc spans measure the performance of specific scopes or methods, but picking the right place can be difficult. To identify the precise cause, you can “instrument” (add spans to) many suspicious places. The additional instrumentation costs more CPU and memory in the production environment. Next, ad-hoc instrumentation that didn’t help is often forgotten, not deleted. This creates a valueless overhead load. In the worst case, excess instrumentation can cause performance problems in the production app or overload the tracing system. The process of ad-hoc (manual) instrumentation usually implies at least a restart. Trace instrumentation libraries, like Zipkin Brave, are integrated into many framework libraries. To instrument a method’s performance typically implies changing code, even if only an annotation. This implies a re-deploy. Even if you have the way to do auto instrumentation, like Apache SkyWalking, you still need to change the configuration and reboot the app. Otherwise, you take the risk of GC caused by hot dynamic instrumentation. Injecting instrumentation into an uninstrumented third party library is hard and complex. It takes more time and many won’t know how to do this. Usually, we don’t have code line numbers in the distributed tracing. Particularly when lambdas are in use, it can be difficult to identify the line of code associated with a span. Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.  Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.\nProfiling in Production Introduction To reuse distributed tracing to achieve method scope precision requires an understanding of the above limitations and a different approach. We called it PROFILE.\nMost high-level languages build and run on a thread concept. The profile approach takes continuous thread dumps. We merge the thread dumps to estimate the execution time of every method shown in the thread dumps. The key for distributed tracing is the tracing context, identifiers active (or current) for the profiled method. Using this trace context, we can weave data harvested from profiling into existing traces. This allows the system to automate otherwise ad-hoc instrumentation. Let’s dig deeper into how profiling works:\nWe consider a method invocation with the same stack depth and signature (method, line number etc), the same operation. We derive span timestamps from the thread dumps the same operation is in. Let’s put this visually:\nAbove, represents 10 successive thread dumps. If this method is in dumps 4-8, we assume it started before dump 4 and finished after dump 8. We can’t tell exactly when the method started and stopped. but the timestamps of thread dumps are close enough.\nTo reduce overhead caused by thread dumps, we only profile methods enclosed by a specific entry point, such as a URI or MVC Controller method. We identify these entry points through the trace context and the APM system.\nThe profile does thread dump analysis and gives us:\n The root cause, precise to the line number in the code. Reduced maintenance as ad-hoc instrumentation is obviated. Reduced overload risk caused by ad-hoc instrumentation. Dynamic activation: only when necessary and with a very clear profile target.  Implementing Precise Profiling Distributed profiling is built-into Apache SkyWalking application performance monitoring (APM). Let’s demonstrate how the profiling approach locates the root cause of the performance issue.\nfinal CountDownLatchcountDownLatch= new CountDownLatch(2); threadPool.submit(new Task1(countDownLatch)); threadPool.submit(new Task2(countDownLatch)); try { countDownLatch.await(500, TimeUnit.MILLISECONDS); } catch (InterruptedExceptione) { } Task1 and Task2 have a race condition and unstable execution time: they will impact the performance of each other and anything calling them. While this code looks suspicious, it is representative of real life. People in the OPS/SRE team are not usually aware of all code changes and who did them. They only know something in the new code is causing a problem.\nTo make matters interesting, the above code is not always slow: it only happens when the condition is locked. In SkyWalking APM, we have metrics of endpoint p99/p95 latency, so, we are easy to find out the p99 of this endpoint is far from the avg response time. However, this is not the same as understanding the cause of the latency. To locate the root cause, add a profile condition to this endpoint: duration greater than 500ms. This means faster executions will not add profiling load.\nThis is a typical profiled trace segment (part of the whole distributed trace) shown on the SkyWalking UI. We now notice the “service/processWithThreadPool” span is slow as we expected, but why? This method is the one we added the faulty code to. As the UI shows that method, we know the profiler is working. Now, let’s see what the profile analysis result say.\nThis is the profile analysis stack view. We see the stack element names, duration (include/exclude the children) and slowest methods have been highlighted. It shows clearly, “sun.misc.Unsafe.park” costs the most time. If we look for the caller, it is the code we added: CountDownLatch.await.\nThe Limitations of the Profile Method No diagnostic tool can fit all cases, not even the profile method.\nThe first consideration is mistaking a repeatedly called method for a slow method. Thread dumps are periodic. If there is a loop of calling one method, the profile analysis result would say the target method is slow because it is captured every time in the dump process. There could be another reason. A method called many times can also end up captured in each thread dump. Even so, the profile did what it is designed for. It still helps the OPS/SRE team to locate the code having the issue.\nThe second consideration is overhead, the impact of repeated thread dumps is real and can’t be ignored. In SkyWalking, we set the profile dump period to at least 10ms. This means we can’t locate method performance issues if they complete in less than 10ms. SkyWalking has a threshold to control the maximum parallel degree as well.\nThe third consideration is profiling wouldn\u0026rsquo;t work for a low latency trace. Because the trace could be completed before profiling starts. But in reality, this is not an issue, profiling targets slow requests.\nUnderstanding the above keeps distributed tracing and APM systems useful for your OPS/SRE team.\nSupported Agents This feature was first implemented in Java agent since 7.0. The Python agent supported this since 0.7.0. Read this for more details\n","title":"Use Profiling to Fix the Blind Spot of Distributed Tracing","url":"/docs/main/v9.1.0/en/concepts-and-designs/sdk-profiling/"},{"content":"Use Profiling to Fix the Blind Spot of Distributed Tracing  This post introduces a way to automatically profile code in production with Apache SkyWalking. We believe the profile method helps reduce maintenance and overhead while increasing the precision in root cause analysis.\n This post introduces a way to automatically profile code in production with Apache SkyWalking. We believe the profile method helps reduce maintenance and overhead while increasing the precision in root cause analysis.\nLimitations of the Distributed Tracing In the early days, metrics and logging systems were the key solutions in monitoring platforms. With the adoption of microservice and distributed system-based architecture, distributed tracing has become more important. Distributed tracing provides relevant service context, such as system topology map and RPC parent-child relationships.\nSome claim that distributed tracing is the best way to discover the cause of performance issues in a distributed system. It’s good at finding issues at the RPC abstraction, or in the scope of components instrumented with spans. However, it isn’t that perfect.\nHave you been surprised to find a span duration longer than expected, but no insight into why? What should you do next? Some may think that the next step is to add more instrumentation, more spans into the trace, thinking that you would eventually find the root cause, with more data points. We’ll argue this is not a good option within a production environment. Here’s why:\n There is a risk of application overhead and system overload. Ad-hoc spans measure the performance of specific scopes or methods, but picking the right place can be difficult. To identify the precise cause, you can “instrument” (add spans to) many suspicious places. The additional instrumentation costs more CPU and memory in the production environment. Next, ad-hoc instrumentation that didn’t help is often forgotten, not deleted. This creates a valueless overhead load. In the worst case, excess instrumentation can cause performance problems in the production app or overload the tracing system. The process of ad-hoc (manual) instrumentation usually implies at least a restart. Trace instrumentation libraries, like Zipkin Brave, are integrated into many framework libraries. To instrument a method’s performance typically implies changing code, even if only an annotation. This implies a re-deploy. Even if you have the way to do auto instrumentation, like Apache SkyWalking, you still need to change the configuration and reboot the app. Otherwise, you take the risk of GC caused by hot dynamic instrumentation. Injecting instrumentation into an uninstrumented third party library is hard and complex. It takes more time and many won’t know how to do this. Usually, we don’t have code line numbers in the distributed tracing. Particularly when lambdas are in use, it can be difficult to identify the line of code associated with a span. Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.  Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.\nProfiling in Production Introduction To reuse distributed tracing to achieve method scope precision requires an understanding of the above limitations and a different approach. We called it PROFILE.\nMost high-level languages build and run on a thread concept. The profile approach takes continuous thread dumps. We merge the thread dumps to estimate the execution time of every method shown in the thread dumps. The key for distributed tracing is the tracing context, identifiers active (or current) for the profiled method. Using this trace context, we can weave data harvested from profiling into existing traces. This allows the system to automate otherwise ad-hoc instrumentation. Let’s dig deeper into how profiling works:\nWe consider a method invocation with the same stack depth and signature (method, line number etc), the same operation. We derive span timestamps from the thread dumps the same operation is in. Let’s put this visually:\nAbove, represents 10 successive thread dumps. If this method is in dumps 4-8, we assume it started before dump 4 and finished after dump 8. We can’t tell exactly when the method started and stopped. but the timestamps of thread dumps are close enough.\nTo reduce overhead caused by thread dumps, we only profile methods enclosed by a specific entry point, such as a URI or MVC Controller method. We identify these entry points through the trace context and the APM system.\nThe profile does thread dump analysis and gives us:\n The root cause, precise to the line number in the code. Reduced maintenance as ad-hoc instrumentation is obviated. Reduced overload risk caused by ad-hoc instrumentation. Dynamic activation: only when necessary and with a very clear profile target.  Implementing Precise Profiling Distributed profiling is built-into Apache SkyWalking application performance monitoring (APM). Let’s demonstrate how the profiling approach locates the root cause of the performance issue.\nfinal CountDownLatchcountDownLatch= new CountDownLatch(2); threadPool.submit(new Task1(countDownLatch)); threadPool.submit(new Task2(countDownLatch)); try { countDownLatch.await(500, TimeUnit.MILLISECONDS); } catch (InterruptedException) { } Task1 and Task2 have a race condition and unstable execution time: they will impact the performance of each other and anything calling them. While this code looks suspicious, it is representative of real life. People in the OPS/SRE team are not usually aware of all code changes and who did them. They only know something in the new code is causing a problem.\nTo make matters interesting, the above code is not always slow: it only happens when the condition is locked. In SkyWalking APM, we have metrics of endpoint p99/p95 latency, so, we are easy to find out the p99 of this endpoint is far from the avg response time. However, this is not the same as understanding the cause of the latency. To locate the root cause, add a profile condition to this endpoint: duration greater than 500ms. This means faster executions will not add profiling load.\nThis is a typical profiled trace segment (part of the whole distributed trace) shown on the SkyWalking UI. We now notice the “service/processWithThreadPool” span is slow as we expected, but why? This method is the one we added the faulty code to. As the UI shows that method, we know the profiler is working. Now, let’s see what the profile analysis result say.\nThis is the profile analysis stack view. We see the stack element names, duration (include/exclude the children) and slowest methods have been highlighted. It shows clearly, “sun.misc.Unsafe.park” costs the most time. If we look for the caller, it is the code we added: CountDownLatch.await.\nThe Limitations of the Profile Method No diagnostic tool can fit all cases, not even the profile method.\nThe first consideration is mistaking a repeatedly called method for a slow method. Thread dumps are periodic. If there is a loop of calling one method, the profile analysis result would say the target method is slow because it is captured every time in the dump process. There could be another reason. A method called many times can also end up captured in each thread dump. Even so, the profile did what it is designed for. It still helps the OPS/SRE team to locate the code having the issue.\nThe second consideration is overhead, the impact of repeated thread dumps is real and can’t be ignored. In SkyWalking, we set the profile dump period to at least 10ms. This means we can’t locate method performance issues if they complete in less than 10ms. SkyWalking has a threshold to control the maximum parallel degree as well.\nThe third consideration is profiling wouldn\u0026rsquo;t work for a low latency trace. Because the trace could be completed before profiling starts. But in reality, this is not an issue, profiling targets slow requests.\nUnderstanding the above keeps distributed tracing and APM systems useful for your OPS/SRE team.\nSupported Agents This feature was first implemented in Java agent since 7.0. The Python agent supported this since 0.7.0. Read this for more details\n","title":"Use Profiling to Fix the Blind Spot of Distributed Tracing","url":"/docs/main/v9.2.0/en/concepts-and-designs/sdk-profiling/"},{"content":"Use Profiling to Fix the Blind Spot of Distributed Tracing  This post introduces a way to automatically profile code in production with Apache SkyWalking. We believe the profile method helps reduce maintenance and overhead while increasing the precision in root cause analysis.\n This post introduces a way to automatically profile code in production with Apache SkyWalking. We believe the profile method helps reduce maintenance and overhead while increasing the precision in root cause analysis.\nLimitations of the Distributed Tracing In the early days, metrics and logging systems were the key solutions in monitoring platforms. With the adoption of microservice and distributed system-based architecture, distributed tracing has become more important. Distributed tracing provides relevant service context, such as system topology map and RPC parent-child relationships.\nSome claim that distributed tracing is the best way to discover the cause of performance issues in a distributed system. It’s good at finding issues at the RPC abstraction, or in the scope of components instrumented with spans. However, it isn’t that perfect.\nHave you been surprised to find a span duration longer than expected, but no insight into why? What should you do next? Some may think that the next step is to add more instrumentation, more spans into the trace, thinking that you would eventually find the root cause, with more data points. We’ll argue this is not a good option within a production environment. Here’s why:\n There is a risk of application overhead and system overload. Ad-hoc spans measure the performance of specific scopes or methods, but picking the right place can be difficult. To identify the precise cause, you can “instrument” (add spans to) many suspicious places. The additional instrumentation costs more CPU and memory in the production environment. Next, ad-hoc instrumentation that didn’t help is often forgotten, not deleted. This creates a valueless overhead load. In the worst case, excess instrumentation can cause performance problems in the production app or overload the tracing system. The process of ad-hoc (manual) instrumentation usually implies at least a restart. Trace instrumentation libraries, like Zipkin Brave, are integrated into many framework libraries. To instrument a method’s performance typically implies changing code, even if only an annotation. This implies a re-deploy. Even if you have the way to do auto instrumentation, like Apache SkyWalking, you still need to change the configuration and reboot the app. Otherwise, you take the risk of GC caused by hot dynamic instrumentation. Injecting instrumentation into an uninstrumented third party library is hard and complex. It takes more time and many won’t know how to do this. Usually, we don’t have code line numbers in the distributed tracing. Particularly when lambdas are in use, it can be difficult to identify the line of code associated with a span. Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.  Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.\nProfiling in Production Introduction To reuse distributed tracing to achieve method scope precision requires an understanding of the above limitations and a different approach. We called it PROFILE.\nMost high-level languages build and run on a thread concept. The profile approach takes continuous thread dumps. We merge the thread dumps to estimate the execution time of every method shown in the thread dumps. The key for distributed tracing is the tracing context, identifiers active (or current) for the profiled method. Using this trace context, we can weave data harvested from profiling into existing traces. This allows the system to automate otherwise ad-hoc instrumentation. Let’s dig deeper into how profiling works:\nWe consider a method invocation with the same stack depth and signature (method, line number etc), the same operation. We derive span timestamps from the thread dumps the same operation is in. Let’s put this visually:\nAbove, represents 10 successive thread dumps. If this method is in dumps 4-8, we assume it started before dump 4 and finished after dump 8. We can’t tell exactly when the method started and stopped. but the timestamps of thread dumps are close enough.\nTo reduce overhead caused by thread dumps, we only profile methods enclosed by a specific entry point, such as a URI or MVC Controller method. We identify these entry points through the trace context and the APM system.\nThe profile does thread dump analysis and gives us:\n The root cause, precise to the line number in the code. Reduced maintenance as ad-hoc instrumentation is obviated. Reduced overload risk caused by ad-hoc instrumentation. Dynamic activation: only when necessary and with a very clear profile target.  Implementing Precise Profiling Distributed profiling is built-into Apache SkyWalking application performance monitoring (APM). Let’s demonstrate how the profiling approach locates the root cause of the performance issue.\nfinal CountDownLatchcountDownLatch= new CountDownLatch(2); threadPool.submit(new Task1(countDownLatch)); threadPool.submit(new Task2(countDownLatch)); try { countDownLatch.await(500, TimeUnit.MILLISECONDS); } catch (InterruptedException) { } Task1 and Task2 have a race condition and unstable execution time: they will impact the performance of each other and anything calling them. While this code looks suspicious, it is representative of real life. People in the OPS/SRE team are not usually aware of all code changes and who did them. They only know something in the new code is causing a problem.\nTo make matters interesting, the above code is not always slow: it only happens when the condition is locked. In SkyWalking APM, we have metrics of endpoint p99/p95 latency, so, we are easy to find out the p99 of this endpoint is far from the avg response time. However, this is not the same as understanding the cause of the latency. To locate the root cause, add a profile condition to this endpoint: duration greater than 500ms. This means faster executions will not add profiling load.\nThis is a typical profiled trace segment (part of the whole distributed trace) shown on the SkyWalking UI. We now notice the “service/processWithThreadPool” span is slow as we expected, but why? This method is the one we added the faulty code to. As the UI shows that method, we know the profiler is working. Now, let’s see what the profile analysis result say.\nThis is the profile analysis stack view. We see the stack element names, duration (include/exclude the children) and slowest methods have been highlighted. It shows clearly, “sun.misc.Unsafe.park” costs the most time. If we look for the caller, it is the code we added: CountDownLatch.await.\nThe Limitations of the Profile Method No diagnostic tool can fit all cases, not even the profile method.\nThe first consideration is mistaking a repeatedly called method for a slow method. Thread dumps are periodic. If there is a loop of calling one method, the profile analysis result would say the target method is slow because it is captured every time in the dump process. There could be another reason. A method called many times can also end up captured in each thread dump. Even so, the profile did what it is designed for. It still helps the OPS/SRE team to locate the code having the issue.\nThe second consideration is overhead, the impact of repeated thread dumps is real and can’t be ignored. In SkyWalking, we set the profile dump period to at least 10ms. This means we can’t locate method performance issues if they complete in less than 10ms. SkyWalking has a threshold to control the maximum parallel degree as well.\nThe third consideration is profiling wouldn\u0026rsquo;t work for a low latency trace. Because the trace could be completed before profiling starts. But in reality, this is not an issue, profiling targets slow requests.\nUnderstanding the above keeps distributed tracing and APM systems useful for your OPS/SRE team.\nSupported Agents This feature was first implemented in Java agent since 7.0. The Python agent supported this since 0.7.0. Read this for more details\n","title":"Use Profiling to Fix the Blind Spot of Distributed Tracing","url":"/docs/main/v9.3.0/en/concepts-and-designs/sdk-profiling/"},{"content":"Use Profiling to Fix the Blind Spot of Distributed Tracing  This post introduces a way to automatically profile code in production with Apache SkyWalking. We believe the profile method helps reduce maintenance and overhead while increasing the precision in root cause analysis.\n This post introduces a way to automatically profile code in production with Apache SkyWalking. We believe the profile method helps reduce maintenance and overhead while increasing the precision in root cause analysis.\nLimitations of the Distributed Tracing In the early days, metrics and logging systems were the key solutions in monitoring platforms. With the adoption of microservice and distributed system-based architecture, distributed tracing has become more important. Distributed tracing provides relevant service context, such as system topology map and RPC parent-child relationships.\nSome claim that distributed tracing is the best way to discover the cause of performance issues in a distributed system. It’s good at finding issues at the RPC abstraction, or in the scope of components instrumented with spans. However, it isn’t that perfect.\nHave you been surprised to find a span duration longer than expected, but no insight into why? What should you do next? Some may think that the next step is to add more instrumentation, more spans into the trace, thinking that you would eventually find the root cause, with more data points. We’ll argue this is not a good option within a production environment. Here’s why:\n There is a risk of application overhead and system overload. Ad-hoc spans measure the performance of specific scopes or methods, but picking the right place can be difficult. To identify the precise cause, you can “instrument” (add spans to) many suspicious places. The additional instrumentation costs more CPU and memory in the production environment. Next, ad-hoc instrumentation that didn’t help is often forgotten, not deleted. This creates a valueless overhead load. In the worst case, excess instrumentation can cause performance problems in the production app or overload the tracing system. The process of ad-hoc (manual) instrumentation usually implies at least a restart. Trace instrumentation libraries, like Zipkin Brave, are integrated into many framework libraries. To instrument a method’s performance typically implies changing code, even if only an annotation. This implies a re-deploy. Even if you have the way to do auto instrumentation, like Apache SkyWalking, you still need to change the configuration and reboot the app. Otherwise, you take the risk of GC caused by hot dynamic instrumentation. Injecting instrumentation into an uninstrumented third party library is hard and complex. It takes more time and many won’t know how to do this. Usually, we don’t have code line numbers in the distributed tracing. Particularly when lambdas are in use, it can be difficult to identify the line of code associated with a span. Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.  Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.\nProfiling in Production Introduction To reuse distributed tracing to achieve method scope precision requires an understanding of the above limitations and a different approach. We called it PROFILE.\nMost high-level languages build and run on a thread concept. The profile approach takes continuous thread dumps. We merge the thread dumps to estimate the execution time of every method shown in the thread dumps. The key for distributed tracing is the tracing context, identifiers active (or current) for the profiled method. Using this trace context, we can weave data harvested from profiling into existing traces. This allows the system to automate otherwise ad-hoc instrumentation. Let’s dig deeper into how profiling works:\nWe consider a method invocation with the same stack depth and signature (method, line number etc), the same operation. We derive span timestamps from the thread dumps the same operation is in. Let’s put this visually:\nAbove, represents 10 successive thread dumps. If this method is in dumps 4-8, we assume it started before dump 4 and finished after dump 8. We can’t tell exactly when the method started and stopped. but the timestamps of thread dumps are close enough.\nTo reduce overhead caused by thread dumps, we only profile methods enclosed by a specific entry point, such as a URI or MVC Controller method. We identify these entry points through the trace context and the APM system.\nThe profile does thread dump analysis and gives us:\n The root cause, precise to the line number in the code. Reduced maintenance as ad-hoc instrumentation is obviated. Reduced overload risk caused by ad-hoc instrumentation. Dynamic activation: only when necessary and with a very clear profile target.  Implementing Precise Profiling Distributed profiling is built-into Apache SkyWalking application performance monitoring (APM). Let’s demonstrate how the profiling approach locates the root cause of the performance issue.\nfinal CountDownLatchcountDownLatch= new CountDownLatch(2); threadPool.submit(new Task1(countDownLatch)); threadPool.submit(new Task2(countDownLatch)); try { countDownLatch.await(500, TimeUnit.MILLISECONDS); } catch (InterruptedException) { } Task1 and Task2 have a race condition and unstable execution time: they will impact the performance of each other and anything calling them. While this code looks suspicious, it is representative of real life. People in the OPS/SRE team are not usually aware of all code changes and who did them. They only know something in the new code is causing a problem.\nTo make matters interesting, the above code is not always slow: it only happens when the condition is locked. In SkyWalking APM, we have metrics of endpoint p99/p95 latency, so, we are easy to find out the p99 of this endpoint is far from the avg response time. However, this is not the same as understanding the cause of the latency. To locate the root cause, add a profile condition to this endpoint: duration greater than 500ms. This means faster executions will not add profiling load.\nThis is a typical profiled trace segment (part of the whole distributed trace) shown on the SkyWalking UI. We now notice the “service/processWithThreadPool” span is slow as we expected, but why? This method is the one we added the faulty code to. As the UI shows that method, we know the profiler is working. Now, let’s see what the profile analysis result say.\nThis is the profile analysis stack view. We see the stack element names, duration (include/exclude the children) and slowest methods have been highlighted. It shows clearly, “sun.misc.Unsafe.park” costs the most time. If we look for the caller, it is the code we added: CountDownLatch.await.\nThe Limitations of the Profile Method No diagnostic tool can fit all cases, not even the profile method.\nThe first consideration is mistaking a repeatedly called method for a slow method. Thread dumps are periodic. If there is a loop of calling one method, the profile analysis result would say the target method is slow because it is captured every time in the dump process. There could be another reason. A method called many times can also end up captured in each thread dump. Even so, the profile did what it is designed for. It still helps the OPS/SRE team to locate the code having the issue.\nThe second consideration is overhead, the impact of repeated thread dumps is real and can’t be ignored. In SkyWalking, we set the profile dump period to at least 10ms. This means we can’t locate method performance issues if they complete in less than 10ms. SkyWalking has a threshold to control the maximum parallel degree as well.\nThe third consideration is profiling wouldn\u0026rsquo;t work for a low latency trace. Because the trace could be completed before profiling starts. But in reality, this is not an issue, profiling targets slow requests.\nUnderstanding the above keeps distributed tracing and APM systems useful for your OPS/SRE team.\nSupported Agents This feature was first implemented in Java agent since 7.0. The Python agent supported this since 0.7.0. Read this for more details\n","title":"Use Profiling to Fix the Blind Spot of Distributed Tracing","url":"/docs/main/v9.4.0/en/concepts-and-designs/sdk-profiling/"},{"content":"Use Profiling to Fix the Blind Spot of Distributed Tracing  This post introduces a way to automatically profile code in production with Apache SkyWalking. We believe the profile method helps reduce maintenance and overhead while increasing the precision in root cause analysis.\n This post introduces a way to automatically profile code in production with Apache SkyWalking. We believe the profile method helps reduce maintenance and overhead while increasing the precision in root cause analysis.\nLimitations of the Distributed Tracing In the early days, metrics and logging systems were the key solutions in monitoring platforms. With the adoption of microservice and distributed system-based architecture, distributed tracing has become more important. Distributed tracing provides relevant service context, such as system topology map and RPC parent-child relationships.\nSome claim that distributed tracing is the best way to discover the cause of performance issues in a distributed system. It’s good at finding issues at the RPC abstraction, or in the scope of components instrumented with spans. However, it isn’t that perfect.\nHave you been surprised to find a span duration longer than expected, but no insight into why? What should you do next? Some may think that the next step is to add more instrumentation, more spans into the trace, thinking that you would eventually find the root cause, with more data points. We’ll argue this is not a good option within a production environment. Here’s why:\n There is a risk of application overhead and system overload. Ad-hoc spans measure the performance of specific scopes or methods, but picking the right place can be difficult. To identify the precise cause, you can “instrument” (add spans to) many suspicious places. The additional instrumentation costs more CPU and memory in the production environment. Next, ad-hoc instrumentation that didn’t help is often forgotten, not deleted. This creates a valueless overhead load. In the worst case, excess instrumentation can cause performance problems in the production app or overload the tracing system. The process of ad-hoc (manual) instrumentation usually implies at least a restart. Trace instrumentation libraries, like Zipkin Brave, are integrated into many framework libraries. To instrument a method’s performance typically implies changing code, even if only an annotation. This implies a re-deploy. Even if you have the way to do auto instrumentation, like Apache SkyWalking, you still need to change the configuration and reboot the app. Otherwise, you take the risk of GC caused by hot dynamic instrumentation. Injecting instrumentation into an uninstrumented third party library is hard and complex. It takes more time and many won’t know how to do this. Usually, we don’t have code line numbers in the distributed tracing. Particularly when lambdas are in use, it can be difficult to identify the line of code associated with a span. Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.  Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.\nProfiling in Production Introduction To reuse distributed tracing to achieve method scope precision requires an understanding of the above limitations and a different approach. We called it PROFILE.\nMost high-level languages build and run on a thread concept. The profile approach takes continuous thread dumps. We merge the thread dumps to estimate the execution time of every method shown in the thread dumps. The key for distributed tracing is the tracing context, identifiers active (or current) for the profiled method. Using this trace context, we can weave data harvested from profiling into existing traces. This allows the system to automate otherwise ad-hoc instrumentation. Let’s dig deeper into how profiling works:\nWe consider a method invocation with the same stack depth and signature (method, line number etc), the same operation. We derive span timestamps from the thread dumps the same operation is in. Let’s put this visually:\nAbove, represents 10 successive thread dumps. If this method is in dumps 4-8, we assume it started before dump 4 and finished after dump 8. We can’t tell exactly when the method started and stopped. but the timestamps of thread dumps are close enough.\nTo reduce overhead caused by thread dumps, we only profile methods enclosed by a specific entry point, such as a URI or MVC Controller method. We identify these entry points through the trace context and the APM system.\nThe profile does thread dump analysis and gives us:\n The root cause, precise to the line number in the code. Reduced maintenance as ad-hoc instrumentation is obviated. Reduced overload risk caused by ad-hoc instrumentation. Dynamic activation: only when necessary and with a very clear profile target.  Implementing Precise Profiling Distributed profiling is built-into Apache SkyWalking application performance monitoring (APM). Let’s demonstrate how the profiling approach locates the root cause of the performance issue.\nfinal CountDownLatchcountDownLatch= new CountDownLatch(2); threadPool.submit(new Task1(countDownLatch)); threadPool.submit(new Task2(countDownLatch)); try { countDownLatch.await(500, TimeUnit.MILLISECONDS); } catch (InterruptedException) { } Task1 and Task2 have a race condition and unstable execution time: they will impact the performance of each other and anything calling them. While this code looks suspicious, it is representative of real life. People in the OPS/SRE team are not usually aware of all code changes and who did them. They only know something in the new code is causing a problem.\nTo make matters interesting, the above code is not always slow: it only happens when the condition is locked. In SkyWalking APM, we have metrics of endpoint p99/p95 latency, so, we are easy to find out the p99 of this endpoint is far from the avg response time. However, this is not the same as understanding the cause of the latency. To locate the root cause, add a profile condition to this endpoint: duration greater than 500ms. This means faster executions will not add profiling load.\nThis is a typical profiled trace segment (part of the whole distributed trace) shown on the SkyWalking UI. We now notice the “service/processWithThreadPool” span is slow as we expected, but why? This method is the one we added the faulty code to. As the UI shows that method, we know the profiler is working. Now, let’s see what the profile analysis result say.\nThis is the profile analysis stack view. We see the stack element names, duration (include/exclude the children) and slowest methods have been highlighted. It shows clearly, “sun.misc.Unsafe.park” costs the most time. If we look for the caller, it is the code we added: CountDownLatch.await.\nThe Limitations of the Profile Method No diagnostic tool can fit all cases, not even the profile method.\nThe first consideration is mistaking a repeatedly called method for a slow method. Thread dumps are periodic. If there is a loop of calling one method, the profile analysis result would say the target method is slow because it is captured every time in the dump process. There could be another reason. A method called many times can also end up captured in each thread dump. Even so, the profile did what it is designed for. It still helps the OPS/SRE team to locate the code having the issue.\nThe second consideration is overhead, the impact of repeated thread dumps is real and can’t be ignored. In SkyWalking, we set the profile dump period to at least 10ms. This means we can’t locate method performance issues if they complete in less than 10ms. SkyWalking has a threshold to control the maximum parallel degree as well.\nThe third consideration is profiling wouldn\u0026rsquo;t work for a low latency trace. Because the trace could be completed before profiling starts. But in reality, this is not an issue, profiling targets slow requests.\nUnderstanding the above keeps distributed tracing and APM systems useful for your OPS/SRE team.\nSupported Agents This feature was first implemented in Java agent since 7.0. The Python agent supported this since 0.7.0. Read this for more details\n","title":"Use Profiling to Fix the Blind Spot of Distributed Tracing","url":"/docs/main/v9.5.0/en/concepts-and-designs/sdk-profiling/"},{"content":"Use Profiling to Fix the Blind Spot of Distributed Tracing  This post introduces a way to automatically profile code in production with Apache SkyWalking. We believe the profile method helps reduce maintenance and overhead while increasing the precision in root cause analysis.\n This post introduces a way to automatically profile code in production with Apache SkyWalking. We believe the profile method helps reduce maintenance and overhead while increasing the precision in root cause analysis.\nLimitations of the Distributed Tracing In the early days, metrics and logging systems were the key solutions in monitoring platforms. With the adoption of microservice and distributed system-based architecture, distributed tracing has become more important. Distributed tracing provides relevant service context, such as system topology map and RPC parent-child relationships.\nSome claim that distributed tracing is the best way to discover the cause of performance issues in a distributed system. It’s good at finding issues at the RPC abstraction, or in the scope of components instrumented with spans. However, it isn’t that perfect.\nHave you been surprised to find a span duration longer than expected, but no insight into why? What should you do next? Some may think that the next step is to add more instrumentation, more spans into the trace, thinking that you would eventually find the root cause, with more data points. We’ll argue this is not a good option within a production environment. Here’s why:\n There is a risk of application overhead and system overload. Ad-hoc spans measure the performance of specific scopes or methods, but picking the right place can be difficult. To identify the precise cause, you can “instrument” (add spans to) many suspicious places. The additional instrumentation costs more CPU and memory in the production environment. Next, ad-hoc instrumentation that didn’t help is often forgotten, not deleted. This creates a valueless overhead load. In the worst case, excess instrumentation can cause performance problems in the production app or overload the tracing system. The process of ad-hoc (manual) instrumentation usually implies at least a restart. Trace instrumentation libraries, like Zipkin Brave, are integrated into many framework libraries. To instrument a method’s performance typically implies changing code, even if only an annotation. This implies a re-deploy. Even if you have the way to do auto instrumentation, like Apache SkyWalking, you still need to change the configuration and reboot the app. Otherwise, you take the risk of GC caused by hot dynamic instrumentation. Injecting instrumentation into an uninstrumented third party library is hard and complex. It takes more time and many won’t know how to do this. Usually, we don’t have code line numbers in the distributed tracing. Particularly when lambdas are in use, it can be difficult to identify the line of code associated with a span. Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.  Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.\nProfiling in Production Introduction To reuse distributed tracing to achieve method scope precision requires an understanding of the above limitations and a different approach. We called it PROFILE.\nMost high-level languages build and run on a thread concept. The profile approach takes continuous thread dumps. We merge the thread dumps to estimate the execution time of every method shown in the thread dumps. The key for distributed tracing is the tracing context, identifiers active (or current) for the profiled method. Using this trace context, we can weave data harvested from profiling into existing traces. This allows the system to automate otherwise ad-hoc instrumentation. Let’s dig deeper into how profiling works:\nWe consider a method invocation with the same stack depth and signature (method, line number etc), the same operation. We derive span timestamps from the thread dumps the same operation is in. Let’s put this visually:\nAbove, represents 10 successive thread dumps. If this method is in dumps 4-8, we assume it started before dump 4 and finished after dump 8. We can’t tell exactly when the method started and stopped. but the timestamps of thread dumps are close enough.\nTo reduce overhead caused by thread dumps, we only profile methods enclosed by a specific entry point, such as a URI or MVC Controller method. We identify these entry points through the trace context and the APM system.\nThe profile does thread dump analysis and gives us:\n The root cause, precise to the line number in the code. Reduced maintenance as ad-hoc instrumentation is obviated. Reduced overload risk caused by ad-hoc instrumentation. Dynamic activation: only when necessary and with a very clear profile target.  Implementing Precise Profiling Distributed profiling is built-into Apache SkyWalking application performance monitoring (APM). Let’s demonstrate how the profiling approach locates the root cause of the performance issue.\nfinal CountDownLatchcountDownLatch= new CountDownLatch(2); threadPool.submit(new Task1(countDownLatch)); threadPool.submit(new Task2(countDownLatch)); try { countDownLatch.await(500, TimeUnit.MILLISECONDS); } catch (InterruptedException) { } Task1 and Task2 have a race condition and unstable execution time: they will impact the performance of each other and anything calling them. While this code looks suspicious, it is representative of real life. People in the OPS/SRE team are not usually aware of all code changes and who did them. They only know something in the new code is causing a problem.\nTo make matters interesting, the above code is not always slow: it only happens when the condition is locked. In SkyWalking APM, we have metrics of endpoint p99/p95 latency, so, we are easy to find out the p99 of this endpoint is far from the avg response time. However, this is not the same as understanding the cause of the latency. To locate the root cause, add a profile condition to this endpoint: duration greater than 500ms. This means faster executions will not add profiling load.\nThis is a typical profiled trace segment (part of the whole distributed trace) shown on the SkyWalking UI. We now notice the “service/processWithThreadPool” span is slow as we expected, but why? This method is the one we added the faulty code to. As the UI shows that method, we know the profiler is working. Now, let’s see what the profile analysis result say.\nThis is the profile analysis stack view. We see the stack element names, duration (include/exclude the children) and slowest methods have been highlighted. It shows clearly, “sun.misc.Unsafe.park” costs the most time. If we look for the caller, it is the code we added: CountDownLatch.await.\nThe Limitations of the Profile Method No diagnostic tool can fit all cases, not even the profile method.\nThe first consideration is mistaking a repeatedly called method for a slow method. Thread dumps are periodic. If there is a loop of calling one method, the profile analysis result would say the target method is slow because it is captured every time in the dump process. There could be another reason. A method called many times can also end up captured in each thread dump. Even so, the profile did what it is designed for. It still helps the OPS/SRE team to locate the code having the issue.\nThe second consideration is overhead, the impact of repeated thread dumps is real and can’t be ignored. In SkyWalking, we set the profile dump period to at least 10ms. This means we can’t locate method performance issues if they complete in less than 10ms. SkyWalking has a threshold to control the maximum parallel degree as well.\nThe third consideration is profiling wouldn\u0026rsquo;t work for a low latency trace. Because the trace could be completed before profiling starts. But in reality, this is not an issue, profiling targets slow requests.\nUnderstanding the above keeps distributed tracing and APM systems useful for your OPS/SRE team.\nSupported Agents This feature was first implemented in Java agent since 7.0. The Python agent supported this since 0.7.0. Read this for more details\n","title":"Use Profiling to Fix the Blind Spot of Distributed Tracing","url":"/docs/main/v9.6.0/en/concepts-and-designs/sdk-profiling/"},{"content":"Use Profiling to Fix the Blind Spot of Distributed Tracing  This post introduces a way to automatically profile code in production with Apache SkyWalking. We believe the profile method helps reduce maintenance and overhead while increasing the precision in root cause analysis.\n This post introduces a way to automatically profile code in production with Apache SkyWalking. We believe the profile method helps reduce maintenance and overhead while increasing the precision in root cause analysis.\nLimitations of the Distributed Tracing In the early days, metrics and logging systems were the key solutions in monitoring platforms. With the adoption of microservice and distributed system-based architecture, distributed tracing has become more important. Distributed tracing provides relevant service context, such as system topology map and RPC parent-child relationships.\nSome claim that distributed tracing is the best way to discover the cause of performance issues in a distributed system. It’s good at finding issues at the RPC abstraction, or in the scope of components instrumented with spans. However, it isn’t that perfect.\nHave you been surprised to find a span duration longer than expected, but no insight into why? What should you do next? Some may think that the next step is to add more instrumentation, more spans into the trace, thinking that you would eventually find the root cause, with more data points. We’ll argue this is not a good option within a production environment. Here’s why:\n There is a risk of application overhead and system overload. Ad-hoc spans measure the performance of specific scopes or methods, but picking the right place can be difficult. To identify the precise cause, you can “instrument” (add spans to) many suspicious places. The additional instrumentation costs more CPU and memory in the production environment. Next, ad-hoc instrumentation that didn’t help is often forgotten, not deleted. This creates a valueless overhead load. In the worst case, excess instrumentation can cause performance problems in the production app or overload the tracing system. The process of ad-hoc (manual) instrumentation usually implies at least a restart. Trace instrumentation libraries, like Zipkin Brave, are integrated into many framework libraries. To instrument a method’s performance typically implies changing code, even if only an annotation. This implies a re-deploy. Even if you have the way to do auto instrumentation, like Apache SkyWalking, you still need to change the configuration and reboot the app. Otherwise, you take the risk of GC caused by hot dynamic instrumentation. Injecting instrumentation into an uninstrumented third party library is hard and complex. It takes more time and many won’t know how to do this. Usually, we don’t have code line numbers in the distributed tracing. Particularly when lambdas are in use, it can be difficult to identify the line of code associated with a span. Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.  Regardless of the above choices, to dive deeper requires collaboration with your Ops or SRE team, and a shared deep level of knowledge in distributed tracing.\nProfiling in Production Introduction To reuse distributed tracing to achieve method scope precision requires an understanding of the above limitations and a different approach. We called it PROFILE.\nMost high-level languages build and run on a thread concept. The profile approach takes continuous thread dumps. We merge the thread dumps to estimate the execution time of every method shown in the thread dumps. The key for distributed tracing is the tracing context, identifiers active (or current) for the profiled method. Using this trace context, we can weave data harvested from profiling into existing traces. This allows the system to automate otherwise ad-hoc instrumentation. Let’s dig deeper into how profiling works:\nWe consider a method invocation with the same stack depth and signature (method, line number etc), the same operation. We derive span timestamps from the thread dumps the same operation is in. Let’s put this visually:\nAbove, represents 10 successive thread dumps. If this method is in dumps 4-8, we assume it started before dump 4 and finished after dump 8. We can’t tell exactly when the method started and stopped. but the timestamps of thread dumps are close enough.\nTo reduce overhead caused by thread dumps, we only profile methods enclosed by a specific entry point, such as a URI or MVC Controller method. We identify these entry points through the trace context and the APM system.\nThe profile does thread dump analysis and gives us:\n The root cause, precise to the line number in the code. Reduced maintenance as ad-hoc instrumentation is obviated. Reduced overload risk caused by ad-hoc instrumentation. Dynamic activation: only when necessary and with a very clear profile target.  Implementing Precise Profiling Distributed profiling is built-into Apache SkyWalking application performance monitoring (APM). Let’s demonstrate how the profiling approach locates the root cause of the performance issue.\nfinal CountDownLatchcountDownLatch= new CountDownLatch(2); threadPool.submit(new Task1(countDownLatch)); threadPool.submit(new Task2(countDownLatch)); try { countDownLatch.await(500, TimeUnit.MILLISECONDS); } catch (InterruptedException) { } Task1 and Task2 have a race condition and unstable execution time: they will impact the performance of each other and anything calling them. While this code looks suspicious, it is representative of real life. People in the OPS/SRE team are not usually aware of all code changes and who did them. They only know something in the new code is causing a problem.\nTo make matters interesting, the above code is not always slow: it only happens when the condition is locked. In SkyWalking APM, we have metrics of endpoint p99/p95 latency, so, we are easy to find out the p99 of this endpoint is far from the avg response time. However, this is not the same as understanding the cause of the latency. To locate the root cause, add a profile condition to this endpoint: duration greater than 500ms. This means faster executions will not add profiling load.\nThis is a typical profiled trace segment (part of the whole distributed trace) shown on the SkyWalking UI. We now notice the “service/processWithThreadPool” span is slow as we expected, but why? This method is the one we added the faulty code to. As the UI shows that method, we know the profiler is working. Now, let’s see what the profile analysis result say.\nThis is the profile analysis stack view. We see the stack element names, duration (include/exclude the children) and slowest methods have been highlighted. It shows clearly, “sun.misc.Unsafe.park” costs the most time. If we look for the caller, it is the code we added: CountDownLatch.await.\nThe Limitations of the Profile Method No diagnostic tool can fit all cases, not even the profile method.\nThe first consideration is mistaking a repeatedly called method for a slow method. Thread dumps are periodic. If there is a loop of calling one method, the profile analysis result would say the target method is slow because it is captured every time in the dump process. There could be another reason. A method called many times can also end up captured in each thread dump. Even so, the profile did what it is designed for. It still helps the OPS/SRE team to locate the code having the issue.\nThe second consideration is overhead, the impact of repeated thread dumps is real and can’t be ignored. In SkyWalking, we set the profile dump period to at least 10ms. This means we can’t locate method performance issues if they complete in less than 10ms. SkyWalking has a threshold to control the maximum parallel degree as well.\nThe third consideration is profiling wouldn\u0026rsquo;t work for a low latency trace. Because the trace could be completed before profiling starts. But in reality, this is not an issue, profiling targets slow requests.\nUnderstanding the above keeps distributed tracing and APM systems useful for your OPS/SRE team.\nSupported Agents This feature was first implemented in Java agent since 7.0. The Python agent supported this since 0.7.0. Read this for more details\n","title":"Use Profiling to Fix the Blind Spot of Distributed Tracing","url":"/docs/main/v9.7.0/en/concepts-and-designs/sdk-profiling/"},{"content":"V6 upgrade SkyWalking v6 is widely used in many production environments. Follow the steps in the guide below to learn how to upgrade to a new release.\nNOTE: The ways to upgrade are not limited to the steps below.\nUse Canary Release Like all applications, you may upgrade SkyWalking using the canary release method through the following steps.\n Deploy a new cluster by using the latest version of SkyWalking OAP cluster with the new database cluster. Once the target service (i.e. the service being monitored) has upgraded the agent.jar (or simply by rebooting), have collector.backend_service pointing to the new OAP backend, and use/add a new namespace(agent.namespace in Table of Agent Configuration Properties). The namespace will prevent conflicts from arising between different versions. When all target services have been rebooted, the old OAP clusters could be discarded.  The Canary Release method works for any version upgrades.\nOnline Hot Reboot Upgrade The reason we require Canary Release is that the SkyWalking agent has cache mechanisms, and switching to a new cluster causes the cache to become unavailable for new OAP clusters. In version 6.5.0+ (especially for agent versions), we have Agent hot reboot trigger mechanism. This streamlines the upgrade process as we deploy a new cluster by using the latest version of SkyWalking OAP cluster with the new database cluster, and shift the traffic to the new cluster once and for all. Based on the mechanism, all agents will enter the cool_down mode, and come back online. For more details, see the backend setup documentation.\nNOTE: A known bug in 6.4.0 is that its agent may have re-connection issues; therefore, even though this bot reboot mechanism has been included in 6.4.0, it may not work under some network scenarios, especially in Kubernetes.\nAgent Compatibility All versions of SkyWalking 6.x (and even 7.x) are compatible with each other, so users could simply upgrade the OAP servers. As the agent has also been enhanced in the latest versions, according to the SkyWalking team\u0026rsquo;s recommendation, upgrade the agent as soon as practicable.\n","title":"V6 upgrade","url":"/docs/main/latest/en/faq/v6-version-upgrade/"},{"content":"V6 upgrade SkyWalking v6 is widely used in many production environments. Follow the steps in the guide below to learn how to upgrade to a new release.\nNOTE: The ways to upgrade are not limited to the steps below.\nUse Canary Release Like all applications, you may upgrade SkyWalking using the canary release method through the following steps.\n Deploy a new cluster by using the latest version of SkyWalking OAP cluster with the new database cluster. Once the target service (i.e. the service being monitored) has upgraded the agent.jar (or simply by rebooting), have collector.backend_service pointing to the new OAP backend, and use/add a new namespace(agent.namespace in Table of Agent Configuration Properties). The namespace will prevent conflicts from arising between different versions. When all target services have been rebooted, the old OAP clusters could be discarded.  The Canary Release method works for any version upgrades.\nOnline Hot Reboot Upgrade The reason we require Canary Release is that the SkyWalking agent has cache mechanisms, and switching to a new cluster causes the cache to become unavailable for new OAP clusters. In version 6.5.0+ (especially for agent versions), we have Agent hot reboot trigger mechanism. This streamlines the upgrade process as we deploy a new cluster by using the latest version of SkyWalking OAP cluster with the new database cluster, and shift the traffic to the new cluster once and for all. Based on the mechanism, all agents will enter the cool_down mode, and come back online. For more details, see the backend setup documentation.\nNOTE: A known bug in 6.4.0 is that its agent may have re-connection issues; therefore, even though this bot reboot mechanism has been included in 6.4.0, it may not work under some network scenarios, especially in Kubernetes.\nAgent Compatibility All versions of SkyWalking 6.x (and even 7.x) are compatible with each other, so users could simply upgrade the OAP servers. As the agent has also been enhanced in the latest versions, according to the SkyWalking team\u0026rsquo;s recommendation, upgrade the agent as soon as practicable.\n","title":"V6 upgrade","url":"/docs/main/next/en/faq/v6-version-upgrade/"},{"content":"V6 upgrade SkyWalking v6 is widely used in many production environments. Follow the steps in the guide below to learn how to upgrade to a new release.\nNOTE: The ways to upgrade are not limited to the steps below.\nUse Canary Release Like all applications, you may upgrade SkyWalking using the canary release method through the following steps.\n Deploy a new cluster by using the latest version of SkyWalking OAP cluster with the new database cluster. Once the target service (i.e. the service being monitored) has upgraded the agent.jar (or simply by rebooting), have collector.backend_service pointing to the new OAP backend, and use/add a new namespace(agent.namespace in Table of Agent Configuration Properties). The namespace will prevent conflicts from arising between different versions. When all target services have been rebooted, the old OAP clusters could be discarded.  The Canary Release method works for any version upgrades.\nOnline Hot Reboot Upgrade The reason we require Canary Release is that the SkyWalking agent has cache mechanisms, and switching to a new cluster causes the cache to become unavailable for new OAP clusters. In version 6.5.0+ (especially for agent versions), we have Agent hot reboot trigger mechanism. This streamlines the upgrade process as we deploy a new cluster by using the latest version of SkyWalking OAP cluster with the new database cluster, and shift the traffic to the new cluster once and for all. Based on the mechanism, all agents will enter the cool_down mode, and come back online. For more details, see the backend setup documentation.\nNOTE: A known bug in 6.4.0 is that its agent may have re-connection issues; therefore, even though this bot reboot mechanism has been included in 6.4.0, it may not work under some network scenarios, especially in Kubernetes.\nAgent Compatibility All versions of SkyWalking 6.x (and even 7.x) are compatible with each other, so users could simply upgrade the OAP servers. As the agent has also been enhanced in the latest versions, according to the SkyWalking team\u0026rsquo;s recommendation, upgrade the agent as soon as practicable.\n","title":"V6 upgrade","url":"/docs/main/v9.0.0/en/faq/v6-version-upgrade/"},{"content":"V6 upgrade SkyWalking v6 is widely used in many production environments. Follow the steps in the guide below to learn how to upgrade to a new release.\nNOTE: The ways to upgrade are not limited to the steps below.\nUse Canary Release Like all applications, you may upgrade SkyWalking using the canary release method through the following steps.\n Deploy a new cluster by using the latest version of SkyWalking OAP cluster with the new database cluster. Once the target service (i.e. the service being monitored) has upgraded the agent.jar (or simply by rebooting), have collector.backend_service pointing to the new OAP backend, and use/add a new namespace(agent.namespace in Table of Agent Configuration Properties). The namespace will prevent conflicts from arising between different versions. When all target services have been rebooted, the old OAP clusters could be discarded.  The Canary Release method works for any version upgrades.\nOnline Hot Reboot Upgrade The reason we require Canary Release is that the SkyWalking agent has cache mechanisms, and switching to a new cluster causes the cache to become unavailable for new OAP clusters. In version 6.5.0+ (especially for agent versions), we have Agent hot reboot trigger mechanism. This streamlines the upgrade process as we deploy a new cluster by using the latest version of SkyWalking OAP cluster with the new database cluster, and shift the traffic to the new cluster once and for all. Based on the mechanism, all agents will enter the cool_down mode, and come back online. For more details, see the backend setup documentation.\nNOTE: A known bug in 6.4.0 is that its agent may have re-connection issues; therefore, even though this bot reboot mechanism has been included in 6.4.0, it may not work under some network scenarios, especially in Kubernetes.\nAgent Compatibility All versions of SkyWalking 6.x (and even 7.x) are compatible with each other, so users could simply upgrade the OAP servers. As the agent has also been enhanced in the latest versions, according to the SkyWalking team\u0026rsquo;s recommendation, upgrade the agent as soon as practicable.\n","title":"V6 upgrade","url":"/docs/main/v9.1.0/en/faq/v6-version-upgrade/"},{"content":"V6 upgrade SkyWalking v6 is widely used in many production environments. Follow the steps in the guide below to learn how to upgrade to a new release.\nNOTE: The ways to upgrade are not limited to the steps below.\nUse Canary Release Like all applications, you may upgrade SkyWalking using the canary release method through the following steps.\n Deploy a new cluster by using the latest version of SkyWalking OAP cluster with the new database cluster. Once the target service (i.e. the service being monitored) has upgraded the agent.jar (or simply by rebooting), have collector.backend_service pointing to the new OAP backend, and use/add a new namespace(agent.namespace in Table of Agent Configuration Properties). The namespace will prevent conflicts from arising between different versions. When all target services have been rebooted, the old OAP clusters could be discarded.  The Canary Release method works for any version upgrades.\nOnline Hot Reboot Upgrade The reason we require Canary Release is that the SkyWalking agent has cache mechanisms, and switching to a new cluster causes the cache to become unavailable for new OAP clusters. In version 6.5.0+ (especially for agent versions), we have Agent hot reboot trigger mechanism. This streamlines the upgrade process as we deploy a new cluster by using the latest version of SkyWalking OAP cluster with the new database cluster, and shift the traffic to the new cluster once and for all. Based on the mechanism, all agents will enter the cool_down mode, and come back online. For more details, see the backend setup documentation.\nNOTE: A known bug in 6.4.0 is that its agent may have re-connection issues; therefore, even though this bot reboot mechanism has been included in 6.4.0, it may not work under some network scenarios, especially in Kubernetes.\nAgent Compatibility All versions of SkyWalking 6.x (and even 7.x) are compatible with each other, so users could simply upgrade the OAP servers. As the agent has also been enhanced in the latest versions, according to the SkyWalking team\u0026rsquo;s recommendation, upgrade the agent as soon as practicable.\n","title":"V6 upgrade","url":"/docs/main/v9.2.0/en/faq/v6-version-upgrade/"},{"content":"V6 upgrade SkyWalking v6 is widely used in many production environments. Follow the steps in the guide below to learn how to upgrade to a new release.\nNOTE: The ways to upgrade are not limited to the steps below.\nUse Canary Release Like all applications, you may upgrade SkyWalking using the canary release method through the following steps.\n Deploy a new cluster by using the latest version of SkyWalking OAP cluster with the new database cluster. Once the target service (i.e. the service being monitored) has upgraded the agent.jar (or simply by rebooting), have collector.backend_service pointing to the new OAP backend, and use/add a new namespace(agent.namespace in Table of Agent Configuration Properties). The namespace will prevent conflicts from arising between different versions. When all target services have been rebooted, the old OAP clusters could be discarded.  The Canary Release method works for any version upgrades.\nOnline Hot Reboot Upgrade The reason we require Canary Release is that the SkyWalking agent has cache mechanisms, and switching to a new cluster causes the cache to become unavailable for new OAP clusters. In version 6.5.0+ (especially for agent versions), we have Agent hot reboot trigger mechanism. This streamlines the upgrade process as we deploy a new cluster by using the latest version of SkyWalking OAP cluster with the new database cluster, and shift the traffic to the new cluster once and for all. Based on the mechanism, all agents will enter the cool_down mode, and come back online. For more details, see the backend setup documentation.\nNOTE: A known bug in 6.4.0 is that its agent may have re-connection issues; therefore, even though this bot reboot mechanism has been included in 6.4.0, it may not work under some network scenarios, especially in Kubernetes.\nAgent Compatibility All versions of SkyWalking 6.x (and even 7.x) are compatible with each other, so users could simply upgrade the OAP servers. As the agent has also been enhanced in the latest versions, according to the SkyWalking team\u0026rsquo;s recommendation, upgrade the agent as soon as practicable.\n","title":"V6 upgrade","url":"/docs/main/v9.3.0/en/faq/v6-version-upgrade/"},{"content":"V6 upgrade SkyWalking v6 is widely used in many production environments. Follow the steps in the guide below to learn how to upgrade to a new release.\nNOTE: The ways to upgrade are not limited to the steps below.\nUse Canary Release Like all applications, you may upgrade SkyWalking using the canary release method through the following steps.\n Deploy a new cluster by using the latest version of SkyWalking OAP cluster with the new database cluster. Once the target service (i.e. the service being monitored) has upgraded the agent.jar (or simply by rebooting), have collector.backend_service pointing to the new OAP backend, and use/add a new namespace(agent.namespace in Table of Agent Configuration Properties). The namespace will prevent conflicts from arising between different versions. When all target services have been rebooted, the old OAP clusters could be discarded.  The Canary Release method works for any version upgrades.\nOnline Hot Reboot Upgrade The reason we require Canary Release is that the SkyWalking agent has cache mechanisms, and switching to a new cluster causes the cache to become unavailable for new OAP clusters. In version 6.5.0+ (especially for agent versions), we have Agent hot reboot trigger mechanism. This streamlines the upgrade process as we deploy a new cluster by using the latest version of SkyWalking OAP cluster with the new database cluster, and shift the traffic to the new cluster once and for all. Based on the mechanism, all agents will enter the cool_down mode, and come back online. For more details, see the backend setup documentation.\nNOTE: A known bug in 6.4.0 is that its agent may have re-connection issues; therefore, even though this bot reboot mechanism has been included in 6.4.0, it may not work under some network scenarios, especially in Kubernetes.\nAgent Compatibility All versions of SkyWalking 6.x (and even 7.x) are compatible with each other, so users could simply upgrade the OAP servers. As the agent has also been enhanced in the latest versions, according to the SkyWalking team\u0026rsquo;s recommendation, upgrade the agent as soon as practicable.\n","title":"V6 upgrade","url":"/docs/main/v9.4.0/en/faq/v6-version-upgrade/"},{"content":"V6 upgrade SkyWalking v6 is widely used in many production environments. Follow the steps in the guide below to learn how to upgrade to a new release.\nNOTE: The ways to upgrade are not limited to the steps below.\nUse Canary Release Like all applications, you may upgrade SkyWalking using the canary release method through the following steps.\n Deploy a new cluster by using the latest version of SkyWalking OAP cluster with the new database cluster. Once the target service (i.e. the service being monitored) has upgraded the agent.jar (or simply by rebooting), have collector.backend_service pointing to the new OAP backend, and use/add a new namespace(agent.namespace in Table of Agent Configuration Properties). The namespace will prevent conflicts from arising between different versions. When all target services have been rebooted, the old OAP clusters could be discarded.  The Canary Release method works for any version upgrades.\nOnline Hot Reboot Upgrade The reason we require Canary Release is that the SkyWalking agent has cache mechanisms, and switching to a new cluster causes the cache to become unavailable for new OAP clusters. In version 6.5.0+ (especially for agent versions), we have Agent hot reboot trigger mechanism. This streamlines the upgrade process as we deploy a new cluster by using the latest version of SkyWalking OAP cluster with the new database cluster, and shift the traffic to the new cluster once and for all. Based on the mechanism, all agents will enter the cool_down mode, and come back online. For more details, see the backend setup documentation.\nNOTE: A known bug in 6.4.0 is that its agent may have re-connection issues; therefore, even though this bot reboot mechanism has been included in 6.4.0, it may not work under some network scenarios, especially in Kubernetes.\nAgent Compatibility All versions of SkyWalking 6.x (and even 7.x) are compatible with each other, so users could simply upgrade the OAP servers. As the agent has also been enhanced in the latest versions, according to the SkyWalking team\u0026rsquo;s recommendation, upgrade the agent as soon as practicable.\n","title":"V6 upgrade","url":"/docs/main/v9.5.0/en/faq/v6-version-upgrade/"},{"content":"V6 upgrade SkyWalking v6 is widely used in many production environments. Follow the steps in the guide below to learn how to upgrade to a new release.\nNOTE: The ways to upgrade are not limited to the steps below.\nUse Canary Release Like all applications, you may upgrade SkyWalking using the canary release method through the following steps.\n Deploy a new cluster by using the latest version of SkyWalking OAP cluster with the new database cluster. Once the target service (i.e. the service being monitored) has upgraded the agent.jar (or simply by rebooting), have collector.backend_service pointing to the new OAP backend, and use/add a new namespace(agent.namespace in Table of Agent Configuration Properties). The namespace will prevent conflicts from arising between different versions. When all target services have been rebooted, the old OAP clusters could be discarded.  The Canary Release method works for any version upgrades.\nOnline Hot Reboot Upgrade The reason we require Canary Release is that the SkyWalking agent has cache mechanisms, and switching to a new cluster causes the cache to become unavailable for new OAP clusters. In version 6.5.0+ (especially for agent versions), we have Agent hot reboot trigger mechanism. This streamlines the upgrade process as we deploy a new cluster by using the latest version of SkyWalking OAP cluster with the new database cluster, and shift the traffic to the new cluster once and for all. Based on the mechanism, all agents will enter the cool_down mode, and come back online. For more details, see the backend setup documentation.\nNOTE: A known bug in 6.4.0 is that its agent may have re-connection issues; therefore, even though this bot reboot mechanism has been included in 6.4.0, it may not work under some network scenarios, especially in Kubernetes.\nAgent Compatibility All versions of SkyWalking 6.x (and even 7.x) are compatible with each other, so users could simply upgrade the OAP servers. As the agent has also been enhanced in the latest versions, according to the SkyWalking team\u0026rsquo;s recommendation, upgrade the agent as soon as practicable.\n","title":"V6 upgrade","url":"/docs/main/v9.6.0/en/faq/v6-version-upgrade/"},{"content":"V6 upgrade SkyWalking v6 is widely used in many production environments. Follow the steps in the guide below to learn how to upgrade to a new release.\nNOTE: The ways to upgrade are not limited to the steps below.\nUse Canary Release Like all applications, you may upgrade SkyWalking using the canary release method through the following steps.\n Deploy a new cluster by using the latest version of SkyWalking OAP cluster with the new database cluster. Once the target service (i.e. the service being monitored) has upgraded the agent.jar (or simply by rebooting), have collector.backend_service pointing to the new OAP backend, and use/add a new namespace(agent.namespace in Table of Agent Configuration Properties). The namespace will prevent conflicts from arising between different versions. When all target services have been rebooted, the old OAP clusters could be discarded.  The Canary Release method works for any version upgrades.\nOnline Hot Reboot Upgrade The reason we require Canary Release is that the SkyWalking agent has cache mechanisms, and switching to a new cluster causes the cache to become unavailable for new OAP clusters. In version 6.5.0+ (especially for agent versions), we have Agent hot reboot trigger mechanism. This streamlines the upgrade process as we deploy a new cluster by using the latest version of SkyWalking OAP cluster with the new database cluster, and shift the traffic to the new cluster once and for all. Based on the mechanism, all agents will enter the cool_down mode, and come back online. For more details, see the backend setup documentation.\nNOTE: A known bug in 6.4.0 is that its agent may have re-connection issues; therefore, even though this bot reboot mechanism has been included in 6.4.0, it may not work under some network scenarios, especially in Kubernetes.\nAgent Compatibility All versions of SkyWalking 6.x (and even 7.x) are compatible with each other, so users could simply upgrade the OAP servers. As the agent has also been enhanced in the latest versions, according to the SkyWalking team\u0026rsquo;s recommendation, upgrade the agent as soon as practicable.\n","title":"V6 upgrade","url":"/docs/main/v9.7.0/en/faq/v6-version-upgrade/"},{"content":"V8 upgrade Starting from SkyWalking v8, the v3 protocol has been used. This makes it incompatible with previous releases. Users who intend to upgrade in v8 series releases could follow the steps below.\nRegisters in v6 and v7 have been removed in v8 for better scaling out performance. Please upgrade following the instructions below.\n Use a different storage or a new namespace. You may also consider erasing the whole storage indexes or tables related to SkyWalking. Deploy the whole SkyWalking cluster, and expose it in a new network address. If you are using language agents, upgrade the new agents too; meanwhile, make sure the agents are supported in a different language. Then, set up the backend address to the new SkyWalking OAP cluster.  ","title":"V8 upgrade","url":"/docs/main/latest/en/faq/v8-version-upgrade/"},{"content":"V8 upgrade Starting from SkyWalking v8, the v3 protocol has been used. This makes it incompatible with previous releases. Users who intend to upgrade in v8 series releases could follow the steps below.\nRegisters in v6 and v7 have been removed in v8 for better scaling out performance. Please upgrade following the instructions below.\n Use a different storage or a new namespace. You may also consider erasing the whole storage indexes or tables related to SkyWalking. Deploy the whole SkyWalking cluster, and expose it in a new network address. If you are using language agents, upgrade the new agents too; meanwhile, make sure the agents are supported in a different language. Then, set up the backend address to the new SkyWalking OAP cluster.  ","title":"V8 upgrade","url":"/docs/main/next/en/faq/v8-version-upgrade/"},{"content":"V8 upgrade Starting from SkyWalking v8, the v3 protocol has been used. This makes it incompatible with previous releases. Users who intend to upgrade in v8 series releases could follow the steps below.\nRegisters in v6 and v7 have been removed in v8 for better scaling out performance. Please upgrade following the instructions below.\n Use a different storage or a new namespace. You may also consider erasing the whole storage indexes or tables related to SkyWalking. Deploy the whole SkyWalking cluster, and expose it in a new network address. If you are using language agents, upgrade the new agents too; meanwhile, make sure the agents are supported in a different language. Then, set up the backend address to the new SkyWalking OAP cluster.  ","title":"V8 upgrade","url":"/docs/main/v9.0.0/en/faq/v8-version-upgrade/"},{"content":"V8 upgrade Starting from SkyWalking v8, the v3 protocol has been used. This makes it incompatible with previous releases. Users who intend to upgrade in v8 series releases could follow the steps below.\nRegisters in v6 and v7 have been removed in v8 for better scaling out performance. Please upgrade following the instructions below.\n Use a different storage or a new namespace. You may also consider erasing the whole storage indexes or tables related to SkyWalking. Deploy the whole SkyWalking cluster, and expose it in a new network address. If you are using language agents, upgrade the new agents too; meanwhile, make sure the agents are supported in a different language. Then, set up the backend address to the new SkyWalking OAP cluster.  ","title":"V8 upgrade","url":"/docs/main/v9.1.0/en/faq/v8-version-upgrade/"},{"content":"V8 upgrade Starting from SkyWalking v8, the v3 protocol has been used. This makes it incompatible with previous releases. Users who intend to upgrade in v8 series releases could follow the steps below.\nRegisters in v6 and v7 have been removed in v8 for better scaling out performance. Please upgrade following the instructions below.\n Use a different storage or a new namespace. You may also consider erasing the whole storage indexes or tables related to SkyWalking. Deploy the whole SkyWalking cluster, and expose it in a new network address. If you are using language agents, upgrade the new agents too; meanwhile, make sure the agents are supported in a different language. Then, set up the backend address to the new SkyWalking OAP cluster.  ","title":"V8 upgrade","url":"/docs/main/v9.2.0/en/faq/v8-version-upgrade/"},{"content":"V8 upgrade Starting from SkyWalking v8, the v3 protocol has been used. This makes it incompatible with previous releases. Users who intend to upgrade in v8 series releases could follow the steps below.\nRegisters in v6 and v7 have been removed in v8 for better scaling out performance. Please upgrade following the instructions below.\n Use a different storage or a new namespace. You may also consider erasing the whole storage indexes or tables related to SkyWalking. Deploy the whole SkyWalking cluster, and expose it in a new network address. If you are using language agents, upgrade the new agents too; meanwhile, make sure the agents are supported in a different language. Then, set up the backend address to the new SkyWalking OAP cluster.  ","title":"V8 upgrade","url":"/docs/main/v9.3.0/en/faq/v8-version-upgrade/"},{"content":"V8 upgrade Starting from SkyWalking v8, the v3 protocol has been used. This makes it incompatible with previous releases. Users who intend to upgrade in v8 series releases could follow the steps below.\nRegisters in v6 and v7 have been removed in v8 for better scaling out performance. Please upgrade following the instructions below.\n Use a different storage or a new namespace. You may also consider erasing the whole storage indexes or tables related to SkyWalking. Deploy the whole SkyWalking cluster, and expose it in a new network address. If you are using language agents, upgrade the new agents too; meanwhile, make sure the agents are supported in a different language. Then, set up the backend address to the new SkyWalking OAP cluster.  ","title":"V8 upgrade","url":"/docs/main/v9.4.0/en/faq/v8-version-upgrade/"},{"content":"V8 upgrade Starting from SkyWalking v8, the v3 protocol has been used. This makes it incompatible with previous releases. Users who intend to upgrade in v8 series releases could follow the steps below.\nRegisters in v6 and v7 have been removed in v8 for better scaling out performance. Please upgrade following the instructions below.\n Use a different storage or a new namespace. You may also consider erasing the whole storage indexes or tables related to SkyWalking. Deploy the whole SkyWalking cluster, and expose it in a new network address. If you are using language agents, upgrade the new agents too; meanwhile, make sure the agents are supported in a different language. Then, set up the backend address to the new SkyWalking OAP cluster.  ","title":"V8 upgrade","url":"/docs/main/v9.5.0/en/faq/v8-version-upgrade/"},{"content":"V8 upgrade Starting from SkyWalking v8, the v3 protocol has been used. This makes it incompatible with previous releases. Users who intend to upgrade in v8 series releases could follow the steps below.\nRegisters in v6 and v7 have been removed in v8 for better scaling out performance. Please upgrade following the instructions below.\n Use a different storage or a new namespace. You may also consider erasing the whole storage indexes or tables related to SkyWalking. Deploy the whole SkyWalking cluster, and expose it in a new network address. If you are using language agents, upgrade the new agents too; meanwhile, make sure the agents are supported in a different language. Then, set up the backend address to the new SkyWalking OAP cluster.  ","title":"V8 upgrade","url":"/docs/main/v9.6.0/en/faq/v8-version-upgrade/"},{"content":"V8 upgrade Starting from SkyWalking v8, the v3 protocol has been used. This makes it incompatible with previous releases. Users who intend to upgrade in v8 series releases could follow the steps below.\nRegisters in v6 and v7 have been removed in v8 for better scaling out performance. Please upgrade following the instructions below.\n Use a different storage or a new namespace. You may also consider erasing the whole storage indexes or tables related to SkyWalking. Deploy the whole SkyWalking cluster, and expose it in a new network address. If you are using language agents, upgrade the new agents too; meanwhile, make sure the agents are supported in a different language. Then, set up the backend address to the new SkyWalking OAP cluster.  ","title":"V8 upgrade","url":"/docs/main/v9.7.0/en/faq/v8-version-upgrade/"},{"content":"V9 upgrade Starting from v9, SkyWalking introduces the new core concept Layer. A layer represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), Kubernetes(k8s layer). This kind of layer would be catalogs on the new booster UI of various services/instances detected by different technologies. The query-protocol metadata-v2 has been used. The compatibility with previous releases is as below.\nQuery compatibility from previous version  The query-protocol metadata-v1 is provided on the top of the v2 implementation. All metrics are compatible with the previous data format, so you wouldn\u0026rsquo;t lose metrics.  Notice Incompatibility (1), the UI template configuration protocol is incompatible.\nIncompatibility  The UI configuration protocol has been changed by following the design of new booster UI. So, the RocketBot UI can\u0026rsquo;t work with the v9 backend. You need to remove ui_template index/template/table in your chosen storage, and reboot OAP in default or init mode. MAL: metric level function add an required argument Layer. Previous MAL expressions should add this argument. LAL: Extractor add function layer. If don\u0026rsquo;t set it manual, the default layer is GENERAL and the logs from ALS the default layer is mesh. Storage:Add service_id, short_name and layer columns to table ServiceTraffic. These data would be incompatible with previous versions. Make sure to remove the older ServiceTraffic table before OAP(v9) starts. OAP would generate the new table in the start procedure, and recreate all existing services when traffic comes. Since V9.1, SQL Database: move Tags list from Segment, Logs, Alarms to their additional tables, remove them before OAP starts. UI-template: Re-design for V9. Make sure to remove the older ui_template table before OAP(v9) starts.  ","title":"V9 upgrade","url":"/docs/main/latest/en/faq/v9-version-upgrade/"},{"content":"V9 upgrade Starting from v9, SkyWalking introduces the new core concept Layer. A layer represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), Kubernetes(k8s layer). This kind of layer would be catalogs on the new booster UI of various services/instances detected by different technologies. The query-protocol metadata-v2 has been used. The compatibility with previous releases is as below.\nQuery compatibility from previous version  The query-protocol metadata-v1 is provided on the top of the v2 implementation. All metrics are compatible with the previous data format, so you wouldn\u0026rsquo;t lose metrics.  Notice Incompatibility (1), the UI template configuration protocol is incompatible.\nIncompatibility  The UI configuration protocol has been changed by following the design of new booster UI. So, the RocketBot UI can\u0026rsquo;t work with the v9 backend. You need to remove ui_template index/template/table in your chosen storage, and reboot OAP in default or init mode. MAL: metric level function add an required argument Layer. Previous MAL expressions should add this argument. LAL: Extractor add function layer. If don\u0026rsquo;t set it manual, the default layer is GENERAL and the logs from ALS the default layer is mesh. Storage:Add service_id, short_name and layer columns to table ServiceTraffic. These data would be incompatible with previous versions. Make sure to remove the older ServiceTraffic table before OAP(v9) starts. OAP would generate the new table in the start procedure, and recreate all existing services when traffic comes. Since V9.1, SQL Database: move Tags list from Segment, Logs, Alarms to their additional tables, remove them before OAP starts. UI-template: Re-design for V9. Make sure to remove the older ui_template table before OAP(v9) starts.  ","title":"V9 upgrade","url":"/docs/main/next/en/faq/v9-version-upgrade/"},{"content":"V9 upgrade Starting from v9, SkyWalking introduces the new core concept Layer. A layer represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), Kubernetes(k8s layer). This kind of layer would be catalogs on the new booster UI of various services/instances detected by different technologies. The query-protocol metadata-v2 has been used. The compatibility with previous releases is as below.\nQuery compatibility from previous version  The query-protocol metadata-v1 is provided on the top of the v2 implementation. All metrics are compatible with the previous data format, so you wouldn\u0026rsquo;t lose metrics.  Notice Incompatibility (1), the UI template configuration protocol is incompatible.\nIncompatibility  The UI configuration protocol has been changed by following the design of new booster UI. So, the RocketBot UI can\u0026rsquo;t work with the v9 backend. You need to remove ui_template index/template/table in your chosen storage, and reboot OAP in default or init mode. MAL: metric level function add an required argument Layer. Previous MAL expressions should add this argument. LAL: Extractor add function layer. If don\u0026rsquo;t set it manual, the default layer is GENERAL and the logs from ALS the default layer is mesh. Storage:Add service_id, short_name and layer columns to table ServiceTraffic, add layer column to table InstanceTraffic. These data would be incompatible with previous versions. Make sure to remove the older ServiceTraffic and InstanceTraffic tables before OAP(v9) starts. OAP would generate the new table in the start procedure, and recreate all existing services and instances when traffic comes. UI-template: Re-design for V9. Make sure to remove the older ui_template table before OAP(v9) starts.  ","title":"V9 upgrade","url":"/docs/main/v9.0.0/en/faq/v9-version-upgrade/"},{"content":"V9 upgrade Starting from v9, SkyWalking introduces the new core concept Layer. A layer represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), Kubernetes(k8s layer). This kind of layer would be catalogs on the new booster UI of various services/instances detected by different technologies. The query-protocol metadata-v2 has been used. The compatibility with previous releases is as below.\nQuery compatibility from previous version  The query-protocol metadata-v1 is provided on the top of the v2 implementation. All metrics are compatible with the previous data format, so you wouldn\u0026rsquo;t lose metrics.  Notice Incompatibility (1), the UI template configuration protocol is incompatible.\nIncompatibility  The UI configuration protocol has been changed by following the design of new booster UI. So, the RocketBot UI can\u0026rsquo;t work with the v9 backend. You need to remove ui_template index/template/table in your chosen storage, and reboot OAP in default or init mode. MAL: metric level function add an required argument Layer. Previous MAL expressions should add this argument. LAL: Extractor add function layer. If don\u0026rsquo;t set it manual, the default layer is GENERAL and the logs from ALS the default layer is mesh. Storage:Add service_id, short_name and layer columns to table ServiceTraffic. These data would be incompatible with previous versions. Make sure to remove the older ServiceTraffic table before OAP(v9) starts. OAP would generate the new table in the start procedure, and recreate all existing services when traffic comes. Since V9.1, SQL Database: move Tags list from Segment, Logs, Alarms to their additional tables, remove them before OAP starts. UI-template: Re-design for V9. Make sure to remove the older ui_template table before OAP(v9) starts.  ","title":"V9 upgrade","url":"/docs/main/v9.1.0/en/faq/v9-version-upgrade/"},{"content":"V9 upgrade Starting from v9, SkyWalking introduces the new core concept Layer. A layer represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), Kubernetes(k8s layer). This kind of layer would be catalogs on the new booster UI of various services/instances detected by different technologies. The query-protocol metadata-v2 has been used. The compatibility with previous releases is as below.\nQuery compatibility from previous version  The query-protocol metadata-v1 is provided on the top of the v2 implementation. All metrics are compatible with the previous data format, so you wouldn\u0026rsquo;t lose metrics.  Notice Incompatibility (1), the UI template configuration protocol is incompatible.\nIncompatibility  The UI configuration protocol has been changed by following the design of new booster UI. So, the RocketBot UI can\u0026rsquo;t work with the v9 backend. You need to remove ui_template index/template/table in your chosen storage, and reboot OAP in default or init mode. MAL: metric level function add an required argument Layer. Previous MAL expressions should add this argument. LAL: Extractor add function layer. If don\u0026rsquo;t set it manual, the default layer is GENERAL and the logs from ALS the default layer is mesh. Storage:Add service_id, short_name and layer columns to table ServiceTraffic. These data would be incompatible with previous versions. Make sure to remove the older ServiceTraffic table before OAP(v9) starts. OAP would generate the new table in the start procedure, and recreate all existing services when traffic comes. Since V9.1, SQL Database: move Tags list from Segment, Logs, Alarms to their additional tables, remove them before OAP starts. UI-template: Re-design for V9. Make sure to remove the older ui_template table before OAP(v9) starts.  ","title":"V9 upgrade","url":"/docs/main/v9.2.0/en/faq/v9-version-upgrade/"},{"content":"V9 upgrade Starting from v9, SkyWalking introduces the new core concept Layer. A layer represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), Kubernetes(k8s layer). This kind of layer would be catalogs on the new booster UI of various services/instances detected by different technologies. The query-protocol metadata-v2 has been used. The compatibility with previous releases is as below.\nQuery compatibility from previous version  The query-protocol metadata-v1 is provided on the top of the v2 implementation. All metrics are compatible with the previous data format, so you wouldn\u0026rsquo;t lose metrics.  Notice Incompatibility (1), the UI template configuration protocol is incompatible.\nIncompatibility  The UI configuration protocol has been changed by following the design of new booster UI. So, the RocketBot UI can\u0026rsquo;t work with the v9 backend. You need to remove ui_template index/template/table in your chosen storage, and reboot OAP in default or init mode. MAL: metric level function add an required argument Layer. Previous MAL expressions should add this argument. LAL: Extractor add function layer. If don\u0026rsquo;t set it manual, the default layer is GENERAL and the logs from ALS the default layer is mesh. Storage:Add service_id, short_name and layer columns to table ServiceTraffic. These data would be incompatible with previous versions. Make sure to remove the older ServiceTraffic table before OAP(v9) starts. OAP would generate the new table in the start procedure, and recreate all existing services when traffic comes. Since V9.1, SQL Database: move Tags list from Segment, Logs, Alarms to their additional tables, remove them before OAP starts. UI-template: Re-design for V9. Make sure to remove the older ui_template table before OAP(v9) starts.  ","title":"V9 upgrade","url":"/docs/main/v9.3.0/en/faq/v9-version-upgrade/"},{"content":"V9 upgrade Starting from v9, SkyWalking introduces the new core concept Layer. A layer represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), Kubernetes(k8s layer). This kind of layer would be catalogs on the new booster UI of various services/instances detected by different technologies. The query-protocol metadata-v2 has been used. The compatibility with previous releases is as below.\nQuery compatibility from previous version  The query-protocol metadata-v1 is provided on the top of the v2 implementation. All metrics are compatible with the previous data format, so you wouldn\u0026rsquo;t lose metrics.  Notice Incompatibility (1), the UI template configuration protocol is incompatible.\nIncompatibility  The UI configuration protocol has been changed by following the design of new booster UI. So, the RocketBot UI can\u0026rsquo;t work with the v9 backend. You need to remove ui_template index/template/table in your chosen storage, and reboot OAP in default or init mode. MAL: metric level function add an required argument Layer. Previous MAL expressions should add this argument. LAL: Extractor add function layer. If don\u0026rsquo;t set it manual, the default layer is GENERAL and the logs from ALS the default layer is mesh. Storage:Add service_id, short_name and layer columns to table ServiceTraffic. These data would be incompatible with previous versions. Make sure to remove the older ServiceTraffic table before OAP(v9) starts. OAP would generate the new table in the start procedure, and recreate all existing services when traffic comes. Since V9.1, SQL Database: move Tags list from Segment, Logs, Alarms to their additional tables, remove them before OAP starts. UI-template: Re-design for V9. Make sure to remove the older ui_template table before OAP(v9) starts.  ","title":"V9 upgrade","url":"/docs/main/v9.4.0/en/faq/v9-version-upgrade/"},{"content":"V9 upgrade Starting from v9, SkyWalking introduces the new core concept Layer. A layer represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), Kubernetes(k8s layer). This kind of layer would be catalogs on the new booster UI of various services/instances detected by different technologies. The query-protocol metadata-v2 has been used. The compatibility with previous releases is as below.\nQuery compatibility from previous version  The query-protocol metadata-v1 is provided on the top of the v2 implementation. All metrics are compatible with the previous data format, so you wouldn\u0026rsquo;t lose metrics.  Notice Incompatibility (1), the UI template configuration protocol is incompatible.\nIncompatibility  The UI configuration protocol has been changed by following the design of new booster UI. So, the RocketBot UI can\u0026rsquo;t work with the v9 backend. You need to remove ui_template index/template/table in your chosen storage, and reboot OAP in default or init mode. MAL: metric level function add an required argument Layer. Previous MAL expressions should add this argument. LAL: Extractor add function layer. If don\u0026rsquo;t set it manual, the default layer is GENERAL and the logs from ALS the default layer is mesh. Storage:Add service_id, short_name and layer columns to table ServiceTraffic. These data would be incompatible with previous versions. Make sure to remove the older ServiceTraffic table before OAP(v9) starts. OAP would generate the new table in the start procedure, and recreate all existing services when traffic comes. Since V9.1, SQL Database: move Tags list from Segment, Logs, Alarms to their additional tables, remove them before OAP starts. UI-template: Re-design for V9. Make sure to remove the older ui_template table before OAP(v9) starts.  ","title":"V9 upgrade","url":"/docs/main/v9.5.0/en/faq/v9-version-upgrade/"},{"content":"V9 upgrade Starting from v9, SkyWalking introduces the new core concept Layer. A layer represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), Kubernetes(k8s layer). This kind of layer would be catalogs on the new booster UI of various services/instances detected by different technologies. The query-protocol metadata-v2 has been used. The compatibility with previous releases is as below.\nQuery compatibility from previous version  The query-protocol metadata-v1 is provided on the top of the v2 implementation. All metrics are compatible with the previous data format, so you wouldn\u0026rsquo;t lose metrics.  Notice Incompatibility (1), the UI template configuration protocol is incompatible.\nIncompatibility  The UI configuration protocol has been changed by following the design of new booster UI. So, the RocketBot UI can\u0026rsquo;t work with the v9 backend. You need to remove ui_template index/template/table in your chosen storage, and reboot OAP in default or init mode. MAL: metric level function add an required argument Layer. Previous MAL expressions should add this argument. LAL: Extractor add function layer. If don\u0026rsquo;t set it manual, the default layer is GENERAL and the logs from ALS the default layer is mesh. Storage:Add service_id, short_name and layer columns to table ServiceTraffic. These data would be incompatible with previous versions. Make sure to remove the older ServiceTraffic table before OAP(v9) starts. OAP would generate the new table in the start procedure, and recreate all existing services when traffic comes. Since V9.1, SQL Database: move Tags list from Segment, Logs, Alarms to their additional tables, remove them before OAP starts. UI-template: Re-design for V9. Make sure to remove the older ui_template table before OAP(v9) starts.  ","title":"V9 upgrade","url":"/docs/main/v9.6.0/en/faq/v9-version-upgrade/"},{"content":"V9 upgrade Starting from v9, SkyWalking introduces the new core concept Layer. A layer represents an abstract framework in computer science, such as Operating System(OS_LINUX layer), Kubernetes(k8s layer). This kind of layer would be catalogs on the new booster UI of various services/instances detected by different technologies. The query-protocol metadata-v2 has been used. The compatibility with previous releases is as below.\nQuery compatibility from previous version  The query-protocol metadata-v1 is provided on the top of the v2 implementation. All metrics are compatible with the previous data format, so you wouldn\u0026rsquo;t lose metrics.  Notice Incompatibility (1), the UI template configuration protocol is incompatible.\nIncompatibility  The UI configuration protocol has been changed by following the design of new booster UI. So, the RocketBot UI can\u0026rsquo;t work with the v9 backend. You need to remove ui_template index/template/table in your chosen storage, and reboot OAP in default or init mode. MAL: metric level function add an required argument Layer. Previous MAL expressions should add this argument. LAL: Extractor add function layer. If don\u0026rsquo;t set it manual, the default layer is GENERAL and the logs from ALS the default layer is mesh. Storage:Add service_id, short_name and layer columns to table ServiceTraffic. These data would be incompatible with previous versions. Make sure to remove the older ServiceTraffic table before OAP(v9) starts. OAP would generate the new table in the start procedure, and recreate all existing services when traffic comes. Since V9.1, SQL Database: move Tags list from Segment, Logs, Alarms to their additional tables, remove them before OAP starts. UI-template: Re-design for V9. Make sure to remove the older ui_template table before OAP(v9) starts.  ","title":"V9 upgrade","url":"/docs/main/v9.7.0/en/faq/v9-version-upgrade/"},{"content":"Version 3.x -\u0026gt; 5.0.0-alpha Upgrade FAQs Collector Problem There is no information showing in the UI.\nCause In the upgrade from version 3.2.6 to 5.0.0, the existing Elasticsearch indexes are kept, but aren\u0026rsquo;t compatible with 5.0.0-alpha. When service name is registered, ElasticSearch will create this column by default type string, which will lead to an error.\nSolution Clean the data folder in ElasticSearch and restart ElasticSearch, collector and your application under monitoring.\n","title":"Version 3.x -\u003e 5.0.0-alpha Upgrade FAQs","url":"/docs/main/latest/en/faq/v3-version-upgrade/"},{"content":"Version 3.x -\u0026gt; 5.0.0-alpha Upgrade FAQs Collector Problem There is no information showing in the UI.\nCause In the upgrade from version 3.2.6 to 5.0.0, the existing Elasticsearch indexes are kept, but aren\u0026rsquo;t compatible with 5.0.0-alpha. When service name is registered, ElasticSearch will create this column by default type string, which will lead to an error.\nSolution Clean the data folder in ElasticSearch and restart ElasticSearch, collector and your application under monitoring.\n","title":"Version 3.x -\u003e 5.0.0-alpha Upgrade FAQs","url":"/docs/main/next/en/faq/v3-version-upgrade/"},{"content":"Version 3.x -\u0026gt; 5.0.0-alpha Upgrade FAQs Collector Problem There is no information showing in the UI.\nCause In the upgrade from version 3.2.6 to 5.0.0, the existing Elasticsearch indexes are kept, but aren\u0026rsquo;t compatible with 5.0.0-alpha. When service name is registered, ElasticSearch will create this column by default type string, which will lead to an error.\nSolution Clean the data folder in ElasticSearch and restart ElasticSearch, collector and your application under monitoring.\n","title":"Version 3.x -\u003e 5.0.0-alpha Upgrade FAQs","url":"/docs/main/v9.0.0/en/faq/v3-version-upgrade/"},{"content":"Version 3.x -\u0026gt; 5.0.0-alpha Upgrade FAQs Collector Problem There is no information showing in the UI.\nCause In the upgrade from version 3.2.6 to 5.0.0, the existing Elasticsearch indexes are kept, but aren\u0026rsquo;t compatible with 5.0.0-alpha. When service name is registered, ElasticSearch will create this column by default type string, which will lead to an error.\nSolution Clean the data folder in ElasticSearch and restart ElasticSearch, collector and your application under monitoring.\n","title":"Version 3.x -\u003e 5.0.0-alpha Upgrade FAQs","url":"/docs/main/v9.1.0/en/faq/v3-version-upgrade/"},{"content":"Version 3.x -\u0026gt; 5.0.0-alpha Upgrade FAQs Collector Problem There is no information showing in the UI.\nCause In the upgrade from version 3.2.6 to 5.0.0, the existing Elasticsearch indexes are kept, but aren\u0026rsquo;t compatible with 5.0.0-alpha. When service name is registered, ElasticSearch will create this column by default type string, which will lead to an error.\nSolution Clean the data folder in ElasticSearch and restart ElasticSearch, collector and your application under monitoring.\n","title":"Version 3.x -\u003e 5.0.0-alpha Upgrade FAQs","url":"/docs/main/v9.2.0/en/faq/v3-version-upgrade/"},{"content":"Version 3.x -\u0026gt; 5.0.0-alpha Upgrade FAQs Collector Problem There is no information showing in the UI.\nCause In the upgrade from version 3.2.6 to 5.0.0, the existing Elasticsearch indexes are kept, but aren\u0026rsquo;t compatible with 5.0.0-alpha. When service name is registered, ElasticSearch will create this column by default type string, which will lead to an error.\nSolution Clean the data folder in ElasticSearch and restart ElasticSearch, collector and your application under monitoring.\n","title":"Version 3.x -\u003e 5.0.0-alpha Upgrade FAQs","url":"/docs/main/v9.3.0/en/faq/v3-version-upgrade/"},{"content":"Version 3.x -\u0026gt; 5.0.0-alpha Upgrade FAQs Collector Problem There is no information showing in the UI.\nCause In the upgrade from version 3.2.6 to 5.0.0, the existing Elasticsearch indexes are kept, but aren\u0026rsquo;t compatible with 5.0.0-alpha. When service name is registered, ElasticSearch will create this column by default type string, which will lead to an error.\nSolution Clean the data folder in ElasticSearch and restart ElasticSearch, collector and your application under monitoring.\n","title":"Version 3.x -\u003e 5.0.0-alpha Upgrade FAQs","url":"/docs/main/v9.4.0/en/faq/v3-version-upgrade/"},{"content":"Version 3.x -\u0026gt; 5.0.0-alpha Upgrade FAQs Collector Problem There is no information showing in the UI.\nCause In the upgrade from version 3.2.6 to 5.0.0, the existing Elasticsearch indexes are kept, but aren\u0026rsquo;t compatible with 5.0.0-alpha. When service name is registered, ElasticSearch will create this column by default type string, which will lead to an error.\nSolution Clean the data folder in ElasticSearch and restart ElasticSearch, collector and your application under monitoring.\n","title":"Version 3.x -\u003e 5.0.0-alpha Upgrade FAQs","url":"/docs/main/v9.5.0/en/faq/v3-version-upgrade/"},{"content":"Version 3.x -\u0026gt; 5.0.0-alpha Upgrade FAQs Collector Problem There is no information showing in the UI.\nCause In the upgrade from version 3.2.6 to 5.0.0, the existing Elasticsearch indexes are kept, but aren\u0026rsquo;t compatible with 5.0.0-alpha. When service name is registered, ElasticSearch will create this column by default type string, which will lead to an error.\nSolution Clean the data folder in ElasticSearch and restart ElasticSearch, collector and your application under monitoring.\n","title":"Version 3.x -\u003e 5.0.0-alpha Upgrade FAQs","url":"/docs/main/v9.6.0/en/faq/v3-version-upgrade/"},{"content":"Version 3.x -\u0026gt; 5.0.0-alpha Upgrade FAQs Collector Problem There is no information showing in the UI.\nCause In the upgrade from version 3.2.6 to 5.0.0, the existing Elasticsearch indexes are kept, but aren\u0026rsquo;t compatible with 5.0.0-alpha. When service name is registered, ElasticSearch will create this column by default type string, which will lead to an error.\nSolution Clean the data folder in ElasticSearch and restart ElasticSearch, collector and your application under monitoring.\n","title":"Version 3.x -\u003e 5.0.0-alpha Upgrade FAQs","url":"/docs/main/v9.7.0/en/faq/v3-version-upgrade/"},{"content":"Virtual Cache Virtual cache represent the cache nodes detected by server agents' plugins. The performance metrics of the cache are also from the Cache client-side perspective.\nFor example, Redis plugins in the Java agent could detect the latency of command As a result, SkyWalking would show traffic, latency, success rate, and sampled slow operations(write/read) powered by backend analysis capabilities in this dashboard.\nThe cache operation span should have\n It is an Exit or Local span Span\u0026rsquo;s layer == CACHE Tag key = cache.type, value = The type of cache system , e.g. redis Tag key = cache.op, value = read or write , indicates the value of tag cache.cmd is used for write or read operation Tag key = cache.cmd, value = the cache command , e.g. get,set,del Tag key = cache.key, value = the cache key If the cache system is in-memory (e.g. Guava-cache), agents' plugin would create a local span usually, and the span\u0026rsquo;s peer would be null ,otherwise the peer is the network address(IP or domain) of Cache server.  Ref slow cache doc to know more slow Cache commands settings.\n","title":"Virtual Cache","url":"/docs/main/latest/en/setup/service-agent/virtual-cache/"},{"content":"Virtual Cache Virtual cache represent the cache nodes detected by server agents' plugins. The performance metrics of the cache are also from the Cache client-side perspective.\nFor example, Redis plugins in the Java agent could detect the latency of command As a result, SkyWalking would show traffic, latency, success rate, and sampled slow operations(write/read) powered by backend analysis capabilities in this dashboard.\nThe cache operation span should have\n It is an Exit or Local span Span\u0026rsquo;s layer == CACHE Tag key = cache.type, value = The type of cache system , e.g. redis Tag key = cache.op, value = read or write , indicates the value of tag cache.cmd is used for write or read operation Tag key = cache.cmd, value = the cache command , e.g. get,set,del Tag key = cache.key, value = the cache key If the cache system is in-memory (e.g. Guava-cache), agents' plugin would create a local span usually, and the span\u0026rsquo;s peer would be null ,otherwise the peer is the network address(IP or domain) of Cache server.  Ref slow cache doc to know more slow Cache commands settings.\n","title":"Virtual Cache","url":"/docs/main/next/en/setup/service-agent/virtual-cache/"},{"content":"Virtual Cache Virtual cache represent the cache nodes detected by server agents' plugins. The performance metrics of the cache are also from the Cache client-side perspective.\nFor example, Redis plugins in the Java agent could detect the latency of command As a result, SkyWalking would show traffic, latency, success rate, and sampled slow operations(write/read) powered by backend analysis capabilities in this dashboard.\nThe cache operation span should have\n It is an Exit or Local span Span\u0026rsquo;s layer == CACHE Tag key = cache.type, value = The type of cache system , e.g. redis Tag key = cache.op, value = read or write , indicates the value of tag cache.cmd is used for write or read operation Tag key = cache.cmd, value = the cache command , e.g. get,set,del Tag key = cache.key, value = the cache key If the cache system is in-memory (e.g. Guava-cache), agents' plugin would create a local span usually, and the span\u0026rsquo;s peer would be null ,otherwise the peer is the network address(IP or domain) of Cache server.  Ref slow cache doc to know more slow Cache commands settings.\n","title":"Virtual Cache","url":"/docs/main/v9.3.0/en/setup/service-agent/virtual-cache/"},{"content":"Virtual Cache Virtual cache represent the cache nodes detected by server agents' plugins. The performance metrics of the cache are also from the Cache client-side perspective.\nFor example, Redis plugins in the Java agent could detect the latency of command As a result, SkyWalking would show traffic, latency, success rate, and sampled slow operations(write/read) powered by backend analysis capabilities in this dashboard.\nThe cache operation span should have\n It is an Exit or Local span Span\u0026rsquo;s layer == CACHE Tag key = cache.type, value = The type of cache system , e.g. redis Tag key = cache.op, value = read or write , indicates the value of tag cache.cmd is used for write or read operation Tag key = cache.cmd, value = the cache command , e.g. get,set,del Tag key = cache.key, value = the cache key If the cache system is in-memory (e.g. Guava-cache), agents' plugin would create a local span usually, and the span\u0026rsquo;s peer would be null ,otherwise the peer is the network address(IP or domain) of Cache server.  Ref slow cache doc to know more slow Cache commands settings.\n","title":"Virtual Cache","url":"/docs/main/v9.4.0/en/setup/service-agent/virtual-cache/"},{"content":"Virtual Cache Virtual cache represent the cache nodes detected by server agents' plugins. The performance metrics of the cache are also from the Cache client-side perspective.\nFor example, Redis plugins in the Java agent could detect the latency of command As a result, SkyWalking would show traffic, latency, success rate, and sampled slow operations(write/read) powered by backend analysis capabilities in this dashboard.\nThe cache operation span should have\n It is an Exit or Local span Span\u0026rsquo;s layer == CACHE Tag key = cache.type, value = The type of cache system , e.g. redis Tag key = cache.op, value = read or write , indicates the value of tag cache.cmd is used for write or read operation Tag key = cache.cmd, value = the cache command , e.g. get,set,del Tag key = cache.key, value = the cache key If the cache system is in-memory (e.g. Guava-cache), agents' plugin would create a local span usually, and the span\u0026rsquo;s peer would be null ,otherwise the peer is the network address(IP or domain) of Cache server.  Ref slow cache doc to know more slow Cache commands settings.\n","title":"Virtual Cache","url":"/docs/main/v9.5.0/en/setup/service-agent/virtual-cache/"},{"content":"Virtual Cache Virtual cache represent the cache nodes detected by server agents' plugins. The performance metrics of the cache are also from the Cache client-side perspective.\nFor example, Redis plugins in the Java agent could detect the latency of command As a result, SkyWalking would show traffic, latency, success rate, and sampled slow operations(write/read) powered by backend analysis capabilities in this dashboard.\nThe cache operation span should have\n It is an Exit or Local span Span\u0026rsquo;s layer == CACHE Tag key = cache.type, value = The type of cache system , e.g. redis Tag key = cache.op, value = read or write , indicates the value of tag cache.cmd is used for write or read operation Tag key = cache.cmd, value = the cache command , e.g. get,set,del Tag key = cache.key, value = the cache key If the cache system is in-memory (e.g. Guava-cache), agents' plugin would create a local span usually, and the span\u0026rsquo;s peer would be null ,otherwise the peer is the network address(IP or domain) of Cache server.  Ref slow cache doc to know more slow Cache commands settings.\n","title":"Virtual Cache","url":"/docs/main/v9.6.0/en/setup/service-agent/virtual-cache/"},{"content":"Virtual Cache Virtual cache represent the cache nodes detected by server agents' plugins. The performance metrics of the cache are also from the Cache client-side perspective.\nFor example, Redis plugins in the Java agent could detect the latency of command As a result, SkyWalking would show traffic, latency, success rate, and sampled slow operations(write/read) powered by backend analysis capabilities in this dashboard.\nThe cache operation span should have\n It is an Exit or Local span Span\u0026rsquo;s layer == CACHE Tag key = cache.type, value = The type of cache system , e.g. redis Tag key = cache.op, value = read or write , indicates the value of tag cache.cmd is used for write or read operation Tag key = cache.cmd, value = the cache command , e.g. get,set,del Tag key = cache.key, value = the cache key If the cache system is in-memory (e.g. Guava-cache), agents' plugin would create a local span usually, and the span\u0026rsquo;s peer would be null ,otherwise the peer is the network address(IP or domain) of Cache server.  Ref slow cache doc to know more slow Cache commands settings.\n","title":"Virtual Cache","url":"/docs/main/v9.7.0/en/setup/service-agent/virtual-cache/"},{"content":"Virtual Database Virtual databases represent the database nodes detected by server agents' plugins. The performance metrics of the databases are also from the Database client-side perspective.\nFor example, JDBC plugins(MySQL, PostgreSQL, MariaDB, MSSQL) in the Java agent could detect the latency of SQL performance and SQL statements. As a result, SkyWalking would show database traffic, latency, success rate, and sampled slow SQLs powered by backend analysis capabilities in this dashboard.\nThe Database access span should have\n It is an Exit span Span\u0026rsquo;s layer == DATABASE Tag key = db.statement, value = SQL statement Tag key = db.type, value = the type of Database Span\u0026rsquo;s peer is the network address(IP or domain) of Database server.  Ref slow cache doc to know more slow SQL settings.\n","title":"Virtual Database","url":"/docs/main/latest/en/setup/service-agent/virtual-database/"},{"content":"Virtual Database Virtual databases represent the database nodes detected by server agents' plugins. The performance metrics of the databases are also from the Database client-side perspective.\nFor example, JDBC plugins(MySQL, PostgreSQL, MariaDB, MSSQL) in the Java agent could detect the latency of SQL performance and SQL statements. As a result, SkyWalking would show database traffic, latency, success rate, and sampled slow SQLs powered by backend analysis capabilities in this dashboard.\nThe Database access span should have\n It is an Exit span Span\u0026rsquo;s layer == DATABASE Tag key = db.statement, value = SQL statement Tag key = db.type, value = the type of Database Span\u0026rsquo;s peer is the network address(IP or domain) of Database server.  Ref slow cache doc to know more slow SQL settings.\n","title":"Virtual Database","url":"/docs/main/next/en/setup/service-agent/virtual-database/"},{"content":"Virtual Database Virtual databases represents the database nodes detected by server agents' plugins. The performance metrics of the databases are also from Database client side perspective.\nFor example, JDBC plugins(MySQL, PostgreSQL, Mariadb, MSSQL) in the Java agent could detect the latency of SQL performance, as well as SQL statements. As a result, in this dashboard, SkyWalking would show database traffic, latency, success rate and sampled slow SQLs powered by backend analysis capabilities.\nThe Database access span should have\n It is an Exit span Span\u0026rsquo;s layer == DATABASE Tag key = db.statement, value = SQL statement Tag key = db.type, value = the type of Database Span\u0026rsquo;s peer is the network address(IP or domain) of Database server.  ","title":"Virtual Database","url":"/docs/main/v9.0.0/en/setup/service-agent/virtual-database/"},{"content":"Virtual Database Virtual databases represent the database nodes detected by server agents' plugins. The performance metrics of the databases are also from the Database client-side perspective.\nFor example, JDBC plugins(MySQL, PostgreSQL, MariaDB, MSSQL) in the Java agent could detect the latency of SQL performance and SQL statements. As a result, SkyWalking would show database traffic, latency, success rate, and sampled slow SQLs powered by backend analysis capabilities in this dashboard.\nThe Database access span should have\n It is an Exit span Span\u0026rsquo;s layer == DATABASE Tag key = db.statement, value = SQL statement Tag key = db.type, value = the type of Database Span\u0026rsquo;s peer is the network address(IP or domain) of Database server.  ","title":"Virtual Database","url":"/docs/main/v9.1.0/en/setup/service-agent/virtual-database/"},{"content":"Virtual Database Virtual databases represent the database nodes detected by server agents' plugins. The performance metrics of the databases are also from the Database client-side perspective.\nFor example, JDBC plugins(MySQL, PostgreSQL, MariaDB, MSSQL) in the Java agent could detect the latency of SQL performance and SQL statements. As a result, SkyWalking would show database traffic, latency, success rate, and sampled slow SQLs powered by backend analysis capabilities in this dashboard.\nThe Database access span should have\n It is an Exit span Span\u0026rsquo;s layer == DATABASE Tag key = db.statement, value = SQL statement Tag key = db.type, value = the type of Database Span\u0026rsquo;s peer is the network address(IP or domain) of Database server.  ","title":"Virtual Database","url":"/docs/main/v9.2.0/en/setup/service-agent/virtual-database/"},{"content":"Virtual Database Virtual databases represent the database nodes detected by server agents' plugins. The performance metrics of the databases are also from the Database client-side perspective.\nFor example, JDBC plugins(MySQL, PostgreSQL, MariaDB, MSSQL) in the Java agent could detect the latency of SQL performance and SQL statements. As a result, SkyWalking would show database traffic, latency, success rate, and sampled slow SQLs powered by backend analysis capabilities in this dashboard.\nThe Database access span should have\n It is an Exit span Span\u0026rsquo;s layer == DATABASE Tag key = db.statement, value = SQL statement Tag key = db.type, value = the type of Database Span\u0026rsquo;s peer is the network address(IP or domain) of Database server.  Ref slow cache doc to know more slow SQL settings.\n","title":"Virtual Database","url":"/docs/main/v9.3.0/en/setup/service-agent/virtual-database/"},{"content":"Virtual Database Virtual databases represent the database nodes detected by server agents' plugins. The performance metrics of the databases are also from the Database client-side perspective.\nFor example, JDBC plugins(MySQL, PostgreSQL, MariaDB, MSSQL) in the Java agent could detect the latency of SQL performance and SQL statements. As a result, SkyWalking would show database traffic, latency, success rate, and sampled slow SQLs powered by backend analysis capabilities in this dashboard.\nThe Database access span should have\n It is an Exit span Span\u0026rsquo;s layer == DATABASE Tag key = db.statement, value = SQL statement Tag key = db.type, value = the type of Database Span\u0026rsquo;s peer is the network address(IP or domain) of Database server.  Ref slow cache doc to know more slow SQL settings.\n","title":"Virtual Database","url":"/docs/main/v9.4.0/en/setup/service-agent/virtual-database/"},{"content":"Virtual Database Virtual databases represent the database nodes detected by server agents' plugins. The performance metrics of the databases are also from the Database client-side perspective.\nFor example, JDBC plugins(MySQL, PostgreSQL, MariaDB, MSSQL) in the Java agent could detect the latency of SQL performance and SQL statements. As a result, SkyWalking would show database traffic, latency, success rate, and sampled slow SQLs powered by backend analysis capabilities in this dashboard.\nThe Database access span should have\n It is an Exit span Span\u0026rsquo;s layer == DATABASE Tag key = db.statement, value = SQL statement Tag key = db.type, value = the type of Database Span\u0026rsquo;s peer is the network address(IP or domain) of Database server.  Ref slow cache doc to know more slow SQL settings.\n","title":"Virtual Database","url":"/docs/main/v9.5.0/en/setup/service-agent/virtual-database/"},{"content":"Virtual Database Virtual databases represent the database nodes detected by server agents' plugins. The performance metrics of the databases are also from the Database client-side perspective.\nFor example, JDBC plugins(MySQL, PostgreSQL, MariaDB, MSSQL) in the Java agent could detect the latency of SQL performance and SQL statements. As a result, SkyWalking would show database traffic, latency, success rate, and sampled slow SQLs powered by backend analysis capabilities in this dashboard.\nThe Database access span should have\n It is an Exit span Span\u0026rsquo;s layer == DATABASE Tag key = db.statement, value = SQL statement Tag key = db.type, value = the type of Database Span\u0026rsquo;s peer is the network address(IP or domain) of Database server.  Ref slow cache doc to know more slow SQL settings.\n","title":"Virtual Database","url":"/docs/main/v9.6.0/en/setup/service-agent/virtual-database/"},{"content":"Virtual Database Virtual databases represent the database nodes detected by server agents' plugins. The performance metrics of the databases are also from the Database client-side perspective.\nFor example, JDBC plugins(MySQL, PostgreSQL, MariaDB, MSSQL) in the Java agent could detect the latency of SQL performance and SQL statements. As a result, SkyWalking would show database traffic, latency, success rate, and sampled slow SQLs powered by backend analysis capabilities in this dashboard.\nThe Database access span should have\n It is an Exit span Span\u0026rsquo;s layer == DATABASE Tag key = db.statement, value = SQL statement Tag key = db.type, value = the type of Database Span\u0026rsquo;s peer is the network address(IP or domain) of Database server.  Ref slow cache doc to know more slow SQL settings.\n","title":"Virtual Database","url":"/docs/main/v9.7.0/en/setup/service-agent/virtual-database/"},{"content":"Virtual Message Queue (MQ) Virtual MQ represent the MQ nodes detected by server agents' plugins. The performance metrics of the MQ are also from the MQ client-side perspective.\nFor example, Kafka plugins in the Java agent could detect the transmission latency of message As a result, SkyWalking would show message count, transmission latency, success rate powered by backend analysis capabilities in this dashboard.\nThe MQ operation span should have\n It is an Exit(at producer side) or Entry(at consumer side) span Span\u0026rsquo;s layer == MQ Tag key = mq.queue, value = MQ queue name Tag key = mq.topic, value = MQ queue topic , it\u0026rsquo;s optional as some MQ don\u0026rsquo;t have topic concept. Tag key = transmission.latency, value = Transmission latency from consumer to producer Set peer at both sides(producer and consumer). And the value of peer should represent the MQ server cluster.  ","title":"Virtual Message Queue (MQ)","url":"/docs/main/latest/en/setup/service-agent/virtual-mq/"},{"content":"Virtual Message Queue (MQ) Virtual MQ represent the MQ nodes detected by server agents' plugins. The performance metrics of the MQ are also from the MQ client-side perspective.\nFor example, Kafka plugins in the Java agent could detect the transmission latency of message As a result, SkyWalking would show message count, transmission latency, success rate powered by backend analysis capabilities in this dashboard.\nThe MQ operation span should have\n It is an Exit(at producer side) or Entry(at consumer side) span Span\u0026rsquo;s layer == MQ Tag key = mq.queue, value = MQ queue name Tag key = mq.topic, value = MQ queue topic , it\u0026rsquo;s optional as some MQ don\u0026rsquo;t have topic concept. Tag key = transmission.latency, value = Transmission latency from consumer to producer Set peer at both sides(producer and consumer). And the value of peer should represent the MQ server cluster.  ","title":"Virtual Message Queue (MQ)","url":"/docs/main/next/en/setup/service-agent/virtual-mq/"},{"content":"Virtual Message Queue (MQ) Virtual MQ represent the MQ nodes detected by server agents' plugins. The performance metrics of the MQ are also from the MQ client-side perspective.\nFor example, Kafka plugins in the Java agent could detect the transmission latency of message As a result, SkyWalking would show message count, transmission latency, success rate powered by backend analysis capabilities in this dashboard.\nThe MQ operation span should have\n It is an Exit(at producer side) or Entry(at consumer side) span Span\u0026rsquo;s layer == MQ Tag key = mq.queue, value = MQ queue name Tag key = mq.topic, value = MQ queue topic , it\u0026rsquo;s optional as some MQ don\u0026rsquo;t have topic concept. Tag key = transmission.latency, value = Transmission latency from consumer to producer Set peer at both sides(producer and consumer). And the value of peer should represent the MQ server cluster.  ","title":"Virtual Message Queue (MQ)","url":"/docs/main/v9.3.0/en/setup/service-agent/virtual-mq/"},{"content":"Virtual Message Queue (MQ) Virtual MQ represent the MQ nodes detected by server agents' plugins. The performance metrics of the MQ are also from the MQ client-side perspective.\nFor example, Kafka plugins in the Java agent could detect the transmission latency of message As a result, SkyWalking would show message count, transmission latency, success rate powered by backend analysis capabilities in this dashboard.\nThe MQ operation span should have\n It is an Exit(at producer side) or Entry(at consumer side) span Span\u0026rsquo;s layer == MQ Tag key = mq.queue, value = MQ queue name Tag key = mq.topic, value = MQ queue topic , it\u0026rsquo;s optional as some MQ don\u0026rsquo;t have topic concept. Tag key = transmission.latency, value = Transmission latency from consumer to producer Set peer at both sides(producer and consumer). And the value of peer should represent the MQ server cluster.  ","title":"Virtual Message Queue (MQ)","url":"/docs/main/v9.4.0/en/setup/service-agent/virtual-mq/"},{"content":"Virtual Message Queue (MQ) Virtual MQ represent the MQ nodes detected by server agents' plugins. The performance metrics of the MQ are also from the MQ client-side perspective.\nFor example, Kafka plugins in the Java agent could detect the transmission latency of message As a result, SkyWalking would show message count, transmission latency, success rate powered by backend analysis capabilities in this dashboard.\nThe MQ operation span should have\n It is an Exit(at producer side) or Entry(at consumer side) span Span\u0026rsquo;s layer == MQ Tag key = mq.queue, value = MQ queue name Tag key = mq.topic, value = MQ queue topic , it\u0026rsquo;s optional as some MQ don\u0026rsquo;t have topic concept. Tag key = transmission.latency, value = Transmission latency from consumer to producer Set peer at both sides(producer and consumer). And the value of peer should represent the MQ server cluster.  ","title":"Virtual Message Queue (MQ)","url":"/docs/main/v9.5.0/en/setup/service-agent/virtual-mq/"},{"content":"Virtual Message Queue (MQ) Virtual MQ represent the MQ nodes detected by server agents' plugins. The performance metrics of the MQ are also from the MQ client-side perspective.\nFor example, Kafka plugins in the Java agent could detect the transmission latency of message As a result, SkyWalking would show message count, transmission latency, success rate powered by backend analysis capabilities in this dashboard.\nThe MQ operation span should have\n It is an Exit(at producer side) or Entry(at consumer side) span Span\u0026rsquo;s layer == MQ Tag key = mq.queue, value = MQ queue name Tag key = mq.topic, value = MQ queue topic , it\u0026rsquo;s optional as some MQ don\u0026rsquo;t have topic concept. Tag key = transmission.latency, value = Transmission latency from consumer to producer Set peer at both sides(producer and consumer). And the value of peer should represent the MQ server cluster.  ","title":"Virtual Message Queue (MQ)","url":"/docs/main/v9.6.0/en/setup/service-agent/virtual-mq/"},{"content":"Virtual Message Queue (MQ) Virtual MQ represent the MQ nodes detected by server agents' plugins. The performance metrics of the MQ are also from the MQ client-side perspective.\nFor example, Kafka plugins in the Java agent could detect the transmission latency of message As a result, SkyWalking would show message count, transmission latency, success rate powered by backend analysis capabilities in this dashboard.\nThe MQ operation span should have\n It is an Exit(at producer side) or Entry(at consumer side) span Span\u0026rsquo;s layer == MQ Tag key = mq.queue, value = MQ queue name Tag key = mq.topic, value = MQ queue topic , it\u0026rsquo;s optional as some MQ don\u0026rsquo;t have topic concept. Tag key = transmission.latency, value = Transmission latency from consumer to producer Set peer at both sides(producer and consumer). And the value of peer should represent the MQ server cluster.  ","title":"Virtual Message Queue (MQ)","url":"/docs/main/v9.7.0/en/setup/service-agent/virtual-mq/"},{"content":"Webflux Tracing Assistant APIs These APIs provide advanced features to enhance interaction capabilities in Webflux cases.\nAdd the toolkit to your project dependency, through Maven or Gradle\n\u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-webflux\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; The following scenarios are supported for tracing assistance.\nContinue Tracing from Client The WebFluxSkyWalkingOperators#continueTracing provides manual tracing continuous capabilities to adopt native Webflux APIs\n@GetMapping(\u0026#34;/testcase/annotation/mono/onnext\u0026#34;) public Mono\u0026lt;String\u0026gt; monoOnNext(@RequestBody(required = false) String body) { return Mono.subscriberContext() .flatMap(ctx -\u0026gt; WebFluxSkyWalkingOperators.continueTracing(ctx, () -\u0026gt; { visit(\u0026#34;http://localhost:\u0026#34; + serverPort + \u0026#34;/testcase/success\u0026#34;); return Mono.just(\u0026#34;Hello World\u0026#34;); })); } @GetMapping(\u0026#34;/login/userFunctions\u0026#34;) public Mono\u0026lt;Response\u0026lt;FunctionInfoResult\u0026gt;\u0026gt; functionInfo(ServerWebExchange exchange, @RequestParam String userId) { return ReactiveSecurityContextHolder.getContext() .flatMap(context -\u0026gt; { return exchange.getSession().map(session -\u0026gt; WebFluxSkyWalkingOperators.continueTracing(exchange, () -\u0026gt; handle(session, userId))); }); } private Response\u0026lt;FunctionInfoResult\u0026gt; handle(WebSession session, String userId) { //...dubbo rpc  } Mono.just(\u0026#34;key\u0026#34;).subscribeOn(Schedulers.boundedElastic()) .doOnEach(WebFluxSkyWalkingOperators.continueTracing(SignalType.ON_NEXT, () -\u0026gt; log.info(\u0026#34;test log with tid\u0026#34;))) .flatMap(key -\u0026gt; Mono.deferContextual(ctx -\u0026gt; WebFluxSkyWalkingOperators.continueTracing(Context.of(ctx), () -\u0026gt; { redis.hasKey(key); return Mono.just(\u0026#34;SUCCESS\u0026#34;); }) )); ... Fetch trace context relative IDs @Override public Mono\u0026lt;Void\u0026gt; filter(ServerWebExchange exchange, GatewayFilterChain chain){ // fetch trace ID  String traceId = WebFluxSkyWalkingTraceContext.traceId(exchange); // fetch segment ID  String segmentId = WebFluxSkyWalkingTraceContext.segmentId(exchange); // fetch span ID  int spanId = WebFluxSkyWalkingTraceContext.spanId(exchange); return chain.filter(exchange); } Manipulate Correlation Context @Override public Mono\u0026lt;Void\u0026gt; filter(ServerWebExchange exchange, GatewayFilterChain chain){ // Set correlation data can be retrieved by upstream nodes.  WebFluxSkyWalkingTraceContext.putCorrelation(exchange, \u0026#34;key1\u0026#34;, \u0026#34;value\u0026#34;); // Get correlation data  Optional\u0026lt;String\u0026gt; value2 = WebFluxSkyWalkingTraceContext.getCorrelation(exchange, \u0026#34;key2\u0026#34;); // dosomething...  return chain.filter(exchange); } Sample codes only\n","title":"Webflux Tracing Assistant APIs","url":"/docs/skywalking-java/latest/en/setup/service-agent/java-agent/application-toolkit-webflux/"},{"content":"Webflux Tracing Assistant APIs These APIs provide advanced features to enhance interaction capabilities in Webflux cases.\nAdd the toolkit to your project dependency, through Maven or Gradle\n\u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-webflux\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; The following scenarios are supported for tracing assistance.\nContinue Tracing from Client The WebFluxSkyWalkingOperators#continueTracing provides manual tracing continuous capabilities to adopt native Webflux APIs\n@GetMapping(\u0026#34;/testcase/annotation/mono/onnext\u0026#34;) public Mono\u0026lt;String\u0026gt; monoOnNext(@RequestBody(required = false) String body) { return Mono.subscriberContext() .flatMap(ctx -\u0026gt; WebFluxSkyWalkingOperators.continueTracing(ctx, () -\u0026gt; { visit(\u0026#34;http://localhost:\u0026#34; + serverPort + \u0026#34;/testcase/success\u0026#34;); return Mono.just(\u0026#34;Hello World\u0026#34;); })); } @GetMapping(\u0026#34;/login/userFunctions\u0026#34;) public Mono\u0026lt;Response\u0026lt;FunctionInfoResult\u0026gt;\u0026gt; functionInfo(ServerWebExchange exchange, @RequestParam String userId) { return ReactiveSecurityContextHolder.getContext() .flatMap(context -\u0026gt; { return exchange.getSession().map(session -\u0026gt; WebFluxSkyWalkingOperators.continueTracing(exchange, () -\u0026gt; handle(session, userId))); }); } private Response\u0026lt;FunctionInfoResult\u0026gt; handle(WebSession session, String userId) { //...dubbo rpc  } Mono.just(\u0026#34;key\u0026#34;).subscribeOn(Schedulers.boundedElastic()) .doOnEach(WebFluxSkyWalkingOperators.continueTracing(SignalType.ON_NEXT, () -\u0026gt; log.info(\u0026#34;test log with tid\u0026#34;))) .flatMap(key -\u0026gt; Mono.deferContextual(ctx -\u0026gt; WebFluxSkyWalkingOperators.continueTracing(Context.of(ctx), () -\u0026gt; { redis.hasKey(key); return Mono.just(\u0026#34;SUCCESS\u0026#34;); }) )); ... Fetch trace context relative IDs @Override public Mono\u0026lt;Void\u0026gt; filter(ServerWebExchange exchange, GatewayFilterChain chain){ // fetch trace ID  String traceId = WebFluxSkyWalkingTraceContext.traceId(exchange); // fetch segment ID  String segmentId = WebFluxSkyWalkingTraceContext.segmentId(exchange); // fetch span ID  int spanId = WebFluxSkyWalkingTraceContext.spanId(exchange); return chain.filter(exchange); } Manipulate Correlation Context @Override public Mono\u0026lt;Void\u0026gt; filter(ServerWebExchange exchange, GatewayFilterChain chain){ // Set correlation data can be retrieved by upstream nodes.  WebFluxSkyWalkingTraceContext.putCorrelation(exchange, \u0026#34;key1\u0026#34;, \u0026#34;value\u0026#34;); // Get correlation data  Optional\u0026lt;String\u0026gt; value2 = WebFluxSkyWalkingTraceContext.getCorrelation(exchange, \u0026#34;key2\u0026#34;); // dosomething...  return chain.filter(exchange); } Sample codes only\n","title":"Webflux Tracing Assistant APIs","url":"/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-webflux/"},{"content":"Webflux Tracing Assistant APIs These APIs provide advanced features to enhance interaction capabilities in Webflux cases.\nAdd the toolkit to your project dependency, through Maven or Gradle\n\u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-webflux\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; The following scenarios are supported for tracing assistance.\nContinue Tracing from Client The WebFluxSkyWalkingOperators#continueTracing provides manual tracing continuous capabilities to adopt native Webflux APIs\n@GetMapping(\u0026#34;/testcase/annotation/mono/onnext\u0026#34;) public Mono\u0026lt;String\u0026gt; monoOnNext(@RequestBody(required = false) String body) { return Mono.subscriberContext() .flatMap(ctx -\u0026gt; WebFluxSkyWalkingOperators.continueTracing(ctx, () -\u0026gt; { visit(\u0026#34;http://localhost:\u0026#34; + serverPort + \u0026#34;/testcase/success\u0026#34;); return Mono.just(\u0026#34;Hello World\u0026#34;); })); } @GetMapping(\u0026#34;/login/userFunctions\u0026#34;) public Mono\u0026lt;Response\u0026lt;FunctionInfoResult\u0026gt;\u0026gt; functionInfo(ServerWebExchange exchange, @RequestParam String userId) { return ReactiveSecurityContextHolder.getContext() .flatMap(context -\u0026gt; { return exchange.getSession().map(session -\u0026gt; WebFluxSkyWalkingOperators.continueTracing(exchange, () -\u0026gt; handle(session, userId))); }); } private Response\u0026lt;FunctionInfoResult\u0026gt; handle(WebSession session, String userId) { //...dubbo rpc  } Mono.just(\u0026#34;key\u0026#34;).subscribeOn(Schedulers.boundedElastic()) .doOnEach(WebFluxSkyWalkingOperators.continueTracing(SignalType.ON_NEXT, () -\u0026gt; log.info(\u0026#34;test log with tid\u0026#34;))) .flatMap(key -\u0026gt; Mono.deferContextual(ctx -\u0026gt; WebFluxSkyWalkingOperators.continueTracing(Context.of(ctx), () -\u0026gt; { redis.hasKey(key); return Mono.just(\u0026#34;SUCCESS\u0026#34;); }) )); ... Fetch trace context relative IDs @Override public Mono\u0026lt;Void\u0026gt; filter(ServerWebExchange exchange, GatewayFilterChain chain){ // fetch trace ID  String traceId = WebFluxSkyWalkingTraceContext.traceId(exchange); // fetch segment ID  String segmentId = WebFluxSkyWalkingTraceContext.segmentId(exchange); // fetch span ID  int spanId = WebFluxSkyWalkingTraceContext.spanId(exchange); return chain.filter(exchange); } Manipulate Correlation Context @Override public Mono\u0026lt;Void\u0026gt; filter(ServerWebExchange exchange, GatewayFilterChain chain){ // Set correlation data can be retrieved by upstream nodes.  WebFluxSkyWalkingTraceContext.putCorrelation(exchange, \u0026#34;key1\u0026#34;, \u0026#34;value\u0026#34;); // Get correlation data  Optional\u0026lt;String\u0026gt; value2 = WebFluxSkyWalkingTraceContext.getCorrelation(exchange, \u0026#34;key2\u0026#34;); // dosomething...  return chain.filter(exchange); } Sample codes only\n","title":"Webflux Tracing Assistant APIs","url":"/docs/skywalking-java/v9.0.0/en/setup/service-agent/java-agent/application-toolkit-webflux/"},{"content":"Webflux Tracing Assistant APIs These APIs provide advanced features to enhance interaction capabilities in Webflux cases.\nAdd the toolkit to your project dependency, through Maven or Gradle\n\u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-webflux\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; The following scenarios are supported for tracing assistance.\nContinue Tracing from Client The WebFluxSkyWalkingOperators#continueTracing provides manual tracing continuous capabilities to adopt native Webflux APIs\n@GetMapping(\u0026#34;/testcase/annotation/mono/onnext\u0026#34;) public Mono\u0026lt;String\u0026gt; monoOnNext(@RequestBody(required = false) String body) { return Mono.subscriberContext() .flatMap(ctx -\u0026gt; WebFluxSkyWalkingOperators.continueTracing(ctx, () -\u0026gt; { visit(\u0026#34;http://localhost:\u0026#34; + serverPort + \u0026#34;/testcase/success\u0026#34;); return Mono.just(\u0026#34;Hello World\u0026#34;); })); } @GetMapping(\u0026#34;/login/userFunctions\u0026#34;) public Mono\u0026lt;Response\u0026lt;FunctionInfoResult\u0026gt;\u0026gt; functionInfo(ServerWebExchange exchange, @RequestParam String userId) { return ReactiveSecurityContextHolder.getContext() .flatMap(context -\u0026gt; { return exchange.getSession().map(session -\u0026gt; WebFluxSkyWalkingOperators.continueTracing(exchange, () -\u0026gt; handle(session, userId))); }); } private Response\u0026lt;FunctionInfoResult\u0026gt; handle(WebSession session, String userId) { //...dubbo rpc  } Mono.just(\u0026#34;key\u0026#34;).subscribeOn(Schedulers.boundedElastic()) .doOnEach(WebFluxSkyWalkingOperators.continueTracing(SignalType.ON_NEXT, () -\u0026gt; log.info(\u0026#34;test log with tid\u0026#34;))) .flatMap(key -\u0026gt; Mono.deferContextual(ctx -\u0026gt; WebFluxSkyWalkingOperators.continueTracing(Context.of(ctx), () -\u0026gt; { redis.hasKey(key); return Mono.just(\u0026#34;SUCCESS\u0026#34;); }) )); ... Fetch trace context relative IDs @Override public Mono\u0026lt;Void\u0026gt; filter(ServerWebExchange exchange, GatewayFilterChain chain){ // fetch trace ID  String traceId = WebFluxSkyWalkingTraceContext.traceId(exchange); // fetch segment ID  String segmentId = WebFluxSkyWalkingTraceContext.segmentId(exchange); // fetch span ID  int spanId = WebFluxSkyWalkingTraceContext.spanId(exchange); return chain.filter(exchange); } Manipulate Correlation Context @Override public Mono\u0026lt;Void\u0026gt; filter(ServerWebExchange exchange, GatewayFilterChain chain){ // Set correlation data can be retrieved by upstream nodes.  WebFluxSkyWalkingTraceContext.putCorrelation(exchange, \u0026#34;key1\u0026#34;, \u0026#34;value\u0026#34;); // Get correlation data  Optional\u0026lt;String\u0026gt; value2 = WebFluxSkyWalkingTraceContext.getCorrelation(exchange, \u0026#34;key2\u0026#34;); // dosomething...  return chain.filter(exchange); } Sample codes only\n","title":"Webflux Tracing Assistant APIs","url":"/docs/skywalking-java/v9.1.0/en/setup/service-agent/java-agent/application-toolkit-webflux/"},{"content":"Webflux Tracing Assistant APIs These APIs provide advanced features to enhance interaction capabilities in Webflux cases.\nAdd the toolkit to your project dependency, through Maven or Gradle\n\u0026lt;dependency\u0026gt; \u0026lt;groupId\u0026gt;org.apache.skywalking\u0026lt;/groupId\u0026gt; \u0026lt;artifactId\u0026gt;apm-toolkit-webflux\u0026lt;/artifactId\u0026gt; \u0026lt;version\u0026gt;${skywalking.version}\u0026lt;/version\u0026gt; \u0026lt;/dependency\u0026gt; The following scenarios are supported for tracing assistance.\nContinue Tracing from Client The WebFluxSkyWalkingOperators#continueTracing provides manual tracing continuous capabilities to adopt native Webflux APIs\n@GetMapping(\u0026#34;/testcase/annotation/mono/onnext\u0026#34;) public Mono\u0026lt;String\u0026gt; monoOnNext(@RequestBody(required = false) String body) { return Mono.subscriberContext() .flatMap(ctx -\u0026gt; WebFluxSkyWalkingOperators.continueTracing(ctx, () -\u0026gt; { visit(\u0026#34;http://localhost:\u0026#34; + serverPort + \u0026#34;/testcase/success\u0026#34;); return Mono.just(\u0026#34;Hello World\u0026#34;); })); } @GetMapping(\u0026#34;/login/userFunctions\u0026#34;) public Mono\u0026lt;Response\u0026lt;FunctionInfoResult\u0026gt;\u0026gt; functionInfo(ServerWebExchange exchange, @RequestParam String userId) { return ReactiveSecurityContextHolder.getContext() .flatMap(context -\u0026gt; { return exchange.getSession().map(session -\u0026gt; WebFluxSkyWalkingOperators.continueTracing(exchange, () -\u0026gt; handle(session, userId))); }); } private Response\u0026lt;FunctionInfoResult\u0026gt; handle(WebSession session, String userId) { //...dubbo rpc  } Mono.just(\u0026#34;key\u0026#34;).subscribeOn(Schedulers.boundedElastic()) .doOnEach(WebFluxSkyWalkingOperators.continueTracing(SignalType.ON_NEXT, () -\u0026gt; log.info(\u0026#34;test log with tid\u0026#34;))) .flatMap(key -\u0026gt; Mono.deferContextual(ctx -\u0026gt; WebFluxSkyWalkingOperators.continueTracing(Context.of(ctx), () -\u0026gt; { redis.hasKey(key); return Mono.just(\u0026#34;SUCCESS\u0026#34;); }) )); ... Fetch trace context relative IDs @Override public Mono\u0026lt;Void\u0026gt; filter(ServerWebExchange exchange, GatewayFilterChain chain){ // fetch trace ID  String traceId = WebFluxSkyWalkingTraceContext.traceId(exchange); // fetch segment ID  String segmentId = WebFluxSkyWalkingTraceContext.segmentId(exchange); // fetch span ID  int spanId = WebFluxSkyWalkingTraceContext.spanId(exchange); return chain.filter(exchange); } Manipulate Correlation Context @Override public Mono\u0026lt;Void\u0026gt; filter(ServerWebExchange exchange, GatewayFilterChain chain){ // Set correlation data can be retrieved by upstream nodes.  WebFluxSkyWalkingTraceContext.putCorrelation(exchange, \u0026#34;key1\u0026#34;, \u0026#34;value\u0026#34;); // Get correlation data  Optional\u0026lt;String\u0026gt; value2 = WebFluxSkyWalkingTraceContext.getCorrelation(exchange, \u0026#34;key2\u0026#34;); // dosomething...  return chain.filter(exchange); } Sample codes only\n","title":"Webflux Tracing Assistant APIs","url":"/docs/skywalking-java/v9.2.0/en/setup/service-agent/java-agent/application-toolkit-webflux/"},{"content":"Welcome This is the official documentation of SkyWalking 9. Welcome to the SkyWalking community!\nHere you can learn all you need to know about SkyWalking’s architecture, understand how to deploy and use SkyWalking, and contribute to the project based on SkyWalking\u0026rsquo;s contributing guidelines.\n  Concepts and Designs. You\u0026rsquo;ll find the core logic behind SkyWalking. You may start from here if you want to understand what is going on under our cool features and visualization.\n  Setup. A guide to installing SkyWalking for different use cases. It is an observability platform that supports multiple observability modes.\n  Contributing Guides. If you are a PMC member, a committer, or a new contributor, learn how to start contributing with these guides!\n  Protocols. The protocols show how agents/probes and the backend communicate with one another. Anyone interested in uplink telemetry data should definitely read this.\n  FAQs. A manifest of known issues with setup and secondary developments processes. Should you encounter any problems, check here first.\n  You might also find these links interesting:\n  The latest and old releases are all available at Apache SkyWalking release page. The change logs can be found here.\n  SkyWalking WIKI hosts the context of some changes and events.\n  You can find the conference schedules, video recordings, and articles about SkyWalking in the community resource catalog.\n  We\u0026rsquo;re always looking for help to improve our documentation and codes, so please don’t hesitate to file an issue if you see any problems. Or better yet, directly contribute by submitting a pull request to help us get better!\n","title":"Welcome","url":"/docs/main/latest/readme/"},{"content":"Welcome This is the official documentation of SkyWalking 10. Welcome to the SkyWalking community!\nHere you can learn all you need to know about SkyWalking\u0026rsquo;s architecture, understand how to deploy and use SkyWalking, and contribute to the project based on SkyWalking\u0026rsquo;s contributing guidelines.\n  Concepts and Designs. You\u0026rsquo;ll find the core logic behind SkyWalking. You may start from here if you want to understand what is going on under our cool features and visualization.\n  Setup. A guide to install SkyWalking for different use cases. It is an observability platform that supports multiple observability modes.\n  Contributing Guides. If you are a PMC member, a committer, or a new contributor, learn how to start contributing with these guides!\n  Protocols. The protocols show how agents/probes and the backend communicate with one another. Anyone interested in uplinking telemetry data should definitely read this.\n  FAQs. A manifest of known issues with setup and secondary developments processes. Should you encounter any problems, check here first.\n  You might also find these links interesting:\n  The latest and old releases are all available at Apache SkyWalking release page. The change logs can be found here.\n  SkyWalking WIKI hosts the context of some changes and events.\n  You can find the conference schedules, video recordings, and articles about SkyWalking in the community resource catalog.\n  We\u0026rsquo;re always looking for help to improve our documentation and codes, so please don’t hesitate to file an issue if you see any problems. Or better yet, directly contribute by submitting a pull request to help us get better!\n","title":"Welcome","url":"/docs/main/next/readme/"},{"content":"Welcome This is the official documentation of SkyWalking 9. Welcome to the SkyWalking community!\nHere you can learn all you need to know about SkyWalking’s architecture, understand how to deploy and use SkyWalking, and contribute to the project based on SkyWalking\u0026rsquo;s contributing guidelines.\n  Concepts and Designs. You\u0026rsquo;ll find the core logic behind SkyWalking. You may start from here if you want to understand what is going on under our cool features and visualization.\n  Setup. A guide to installing SkyWalking for different use cases. It is an observability platform that supports multiple observability modes.\n  Contributing Guides. If you are a PMC member, a committer, or a new contributor, learn how to start contributing with these guides!\n  Protocols. The protocols show how agents/probes and the backend communicate with one another. Anyone interested in uplink telemetry data should definitely read this.\n  FAQs. A manifest of known issues with setup and secondary developments processes. Should you encounter any problems, check here first.\n  You might also find these links interesting:\n  The latest and old releases are all available at Apache SkyWalking release page. The change logs can be found here.\n  SkyWalking WIKI hosts the context of some changes and events.\n  You can find the conference schedules, video recordings, and articles about SkyWalking in the community resource catalog.\n  We\u0026rsquo;re always looking for help to improve our documentation and codes, so please don’t hesitate to file an issue if you see any problems. Or better yet, directly contribute by submitting a pull request to help us get better!\n","title":"Welcome","url":"/docs/main/v9.0.0/readme/"},{"content":"Welcome This is the official documentation of SkyWalking 9. Welcome to the SkyWalking community!\nHere you can learn all you need to know about SkyWalking’s architecture, understand how to deploy and use SkyWalking, and contribute to the project based on SkyWalking\u0026rsquo;s contributing guidelines.\n  Concepts and Designs. You\u0026rsquo;ll find the core logic behind SkyWalking. You may start from here if you want to understand what is going on under our cool features and visualization.\n  Setup. A guide to installing SkyWalking for different use cases. It is an observability platform that supports multiple observability modes.\n  Contributing Guides. If you are a PMC member, a committer, or a new contributor, learn how to start contributing with these guides!\n  Protocols. The protocols show how agents/probes and the backend communicate with one another. Anyone interested in uplink telemetry data should definitely read this.\n  FAQs. A manifest of known issues with setup and secondary developments processes. Should you encounter any problems, check here first.\n  You might also find these links interesting:\n  The latest and old releases are all available at Apache SkyWalking release page. The change logs can be found here.\n  SkyWalking WIKI hosts the context of some changes and events.\n  You can find the conference schedules, video recordings, and articles about SkyWalking in the community resource catalog.\n  We\u0026rsquo;re always looking for help to improve our documentation and codes, so please don’t hesitate to file an issue if you see any problems. Or better yet, directly contribute by submitting a pull request to help us get better!\n","title":"Welcome","url":"/docs/main/v9.1.0/readme/"},{"content":"Welcome This is the official documentation of SkyWalking 9. Welcome to the SkyWalking community!\nHere you can learn all you need to know about SkyWalking’s architecture, understand how to deploy and use SkyWalking, and contribute to the project based on SkyWalking\u0026rsquo;s contributing guidelines.\n  Concepts and Designs. You\u0026rsquo;ll find the core logic behind SkyWalking. You may start from here if you want to understand what is going on under our cool features and visualization.\n  Setup. A guide to installing SkyWalking for different use cases. It is an observability platform that supports multiple observability modes.\n  Contributing Guides. If you are a PMC member, a committer, or a new contributor, learn how to start contributing with these guides!\n  Protocols. The protocols show how agents/probes and the backend communicate with one another. Anyone interested in uplink telemetry data should definitely read this.\n  FAQs. A manifest of known issues with setup and secondary developments processes. Should you encounter any problems, check here first.\n  You might also find these links interesting:\n  The latest and old releases are all available at Apache SkyWalking release page. The change logs can be found here.\n  SkyWalking WIKI hosts the context of some changes and events.\n  You can find the conference schedules, video recordings, and articles about SkyWalking in the community resource catalog.\n  We\u0026rsquo;re always looking for help to improve our documentation and codes, so please don’t hesitate to file an issue if you see any problems. Or better yet, directly contribute by submitting a pull request to help us get better!\n","title":"Welcome","url":"/docs/main/v9.2.0/readme/"},{"content":"Welcome This is the official documentation of SkyWalking 9. Welcome to the SkyWalking community!\nHere you can learn all you need to know about SkyWalking’s architecture, understand how to deploy and use SkyWalking, and contribute to the project based on SkyWalking\u0026rsquo;s contributing guidelines.\n  Concepts and Designs. You\u0026rsquo;ll find the core logic behind SkyWalking. You may start from here if you want to understand what is going on under our cool features and visualization.\n  Setup. A guide to installing SkyWalking for different use cases. It is an observability platform that supports multiple observability modes.\n  Contributing Guides. If you are a PMC member, a committer, or a new contributor, learn how to start contributing with these guides!\n  Protocols. The protocols show how agents/probes and the backend communicate with one another. Anyone interested in uplink telemetry data should definitely read this.\n  FAQs. A manifest of known issues with setup and secondary developments processes. Should you encounter any problems, check here first.\n  You might also find these links interesting:\n  The latest and old releases are all available at Apache SkyWalking release page. The change logs can be found here.\n  SkyWalking WIKI hosts the context of some changes and events.\n  You can find the conference schedules, video recordings, and articles about SkyWalking in the community resource catalog.\n  We\u0026rsquo;re always looking for help to improve our documentation and codes, so please don’t hesitate to file an issue if you see any problems. Or better yet, directly contribute by submitting a pull request to help us get better!\n","title":"Welcome","url":"/docs/main/v9.3.0/readme/"},{"content":"Welcome This is the official documentation of SkyWalking 9. Welcome to the SkyWalking community!\nHere you can learn all you need to know about SkyWalking’s architecture, understand how to deploy and use SkyWalking, and contribute to the project based on SkyWalking\u0026rsquo;s contributing guidelines.\n  Concepts and Designs. You\u0026rsquo;ll find the core logic behind SkyWalking. You may start from here if you want to understand what is going on under our cool features and visualization.\n  Setup. A guide to installing SkyWalking for different use cases. It is an observability platform that supports multiple observability modes.\n  Contributing Guides. If you are a PMC member, a committer, or a new contributor, learn how to start contributing with these guides!\n  Protocols. The protocols show how agents/probes and the backend communicate with one another. Anyone interested in uplink telemetry data should definitely read this.\n  FAQs. A manifest of known issues with setup and secondary developments processes. Should you encounter any problems, check here first.\n  You might also find these links interesting:\n  The latest and old releases are all available at Apache SkyWalking release page. The change logs can be found here.\n  SkyWalking WIKI hosts the context of some changes and events.\n  You can find the conference schedules, video recordings, and articles about SkyWalking in the community resource catalog.\n  We\u0026rsquo;re always looking for help to improve our documentation and codes, so please don’t hesitate to file an issue if you see any problems. Or better yet, directly contribute by submitting a pull request to help us get better!\n","title":"Welcome","url":"/docs/main/v9.4.0/readme/"},{"content":"Welcome This is the official documentation of SkyWalking 9. Welcome to the SkyWalking community!\nHere you can learn all you need to know about SkyWalking’s architecture, understand how to deploy and use SkyWalking, and contribute to the project based on SkyWalking\u0026rsquo;s contributing guidelines.\n  Concepts and Designs. You\u0026rsquo;ll find the core logic behind SkyWalking. You may start from here if you want to understand what is going on under our cool features and visualization.\n  Setup. A guide to installing SkyWalking for different use cases. It is an observability platform that supports multiple observability modes.\n  Contributing Guides. If you are a PMC member, a committer, or a new contributor, learn how to start contributing with these guides!\n  Protocols. The protocols show how agents/probes and the backend communicate with one another. Anyone interested in uplink telemetry data should definitely read this.\n  FAQs. A manifest of known issues with setup and secondary developments processes. Should you encounter any problems, check here first.\n  You might also find these links interesting:\n  The latest and old releases are all available at Apache SkyWalking release page. The change logs can be found here.\n  SkyWalking WIKI hosts the context of some changes and events.\n  You can find the conference schedules, video recordings, and articles about SkyWalking in the community resource catalog.\n  We\u0026rsquo;re always looking for help to improve our documentation and codes, so please don’t hesitate to file an issue if you see any problems. Or better yet, directly contribute by submitting a pull request to help us get better!\n","title":"Welcome","url":"/docs/main/v9.5.0/readme/"},{"content":"Welcome This is the official documentation of SkyWalking 9. Welcome to the SkyWalking community!\nHere you can learn all you need to know about SkyWalking’s architecture, understand how to deploy and use SkyWalking, and contribute to the project based on SkyWalking\u0026rsquo;s contributing guidelines.\n  Concepts and Designs. You\u0026rsquo;ll find the core logic behind SkyWalking. You may start from here if you want to understand what is going on under our cool features and visualization.\n  Setup. A guide to installing SkyWalking for different use cases. It is an observability platform that supports multiple observability modes.\n  Contributing Guides. If you are a PMC member, a committer, or a new contributor, learn how to start contributing with these guides!\n  Protocols. The protocols show how agents/probes and the backend communicate with one another. Anyone interested in uplink telemetry data should definitely read this.\n  FAQs. A manifest of known issues with setup and secondary developments processes. Should you encounter any problems, check here first.\n  You might also find these links interesting:\n  The latest and old releases are all available at Apache SkyWalking release page. The change logs can be found here.\n  SkyWalking WIKI hosts the context of some changes and events.\n  You can find the conference schedules, video recordings, and articles about SkyWalking in the community resource catalog.\n  We\u0026rsquo;re always looking for help to improve our documentation and codes, so please don’t hesitate to file an issue if you see any problems. Or better yet, directly contribute by submitting a pull request to help us get better!\n","title":"Welcome","url":"/docs/main/v9.6.0/readme/"},{"content":"Welcome This is the official documentation of SkyWalking 9. Welcome to the SkyWalking community!\nHere you can learn all you need to know about SkyWalking’s architecture, understand how to deploy and use SkyWalking, and contribute to the project based on SkyWalking\u0026rsquo;s contributing guidelines.\n  Concepts and Designs. You\u0026rsquo;ll find the core logic behind SkyWalking. You may start from here if you want to understand what is going on under our cool features and visualization.\n  Setup. A guide to installing SkyWalking for different use cases. It is an observability platform that supports multiple observability modes.\n  Contributing Guides. If you are a PMC member, a committer, or a new contributor, learn how to start contributing with these guides!\n  Protocols. The protocols show how agents/probes and the backend communicate with one another. Anyone interested in uplink telemetry data should definitely read this.\n  FAQs. A manifest of known issues with setup and secondary developments processes. Should you encounter any problems, check here first.\n  You might also find these links interesting:\n  The latest and old releases are all available at Apache SkyWalking release page. The change logs can be found here.\n  SkyWalking WIKI hosts the context of some changes and events.\n  You can find the conference schedules, video recordings, and articles about SkyWalking in the community resource catalog.\n  We\u0026rsquo;re always looking for help to improve our documentation and codes, so please don’t hesitate to file an issue if you see any problems. Or better yet, directly contribute by submitting a pull request to help us get better!\n","title":"Welcome","url":"/docs/main/v9.7.0/readme/"},{"content":"Welcome Welcome to the BanyanDB Here you can learn all you need to know about BanyanDB.\n Installation. Instruments about how to download and onboard BanyanDB server, Banyand. Clients. Some native clients to access Banyand. Observability. Learn how to effectively monitor, diagnose and optimize Banyand. Concept. Learn the concepts of Banyand. Includes the architecture, data model, and so on. CRUD Operations. To create, read, update, and delete data points or entities on resources in the schema.  You might also find these links interesting:\n  The latest and old releases are all available at Apache SkyWalking release page. The change logs can be found here.\n  SkyWalking WIKI hosts the context of some changes and events.\n  You can find the conference schedules, video recordings, and articles about SkyWalking in the community resource catalog.\n  We\u0026rsquo;re always looking for help to improve our documentation and codes, so please don’t hesitate to file an issue if you see any problems. Or better yet, directly contribute by submitting a pull request to help us get better!\n","title":"Welcome","url":"/docs/skywalking-banyandb/latest/readme/"},{"content":"Welcome Welcome to the BanyanDB Here you can learn all you need to know about BanyanDB.\n Installation. Instruments about how to download and onboard BanyanDB server, Banyand. Clients. Some native clients to access Banyand. Observability. Learn how to effectively monitor, diagnose and optimize Banyand. Concept. Learn the concepts of Banyand. Includes the architecture, data model, and so on. CRUD Operations. To create, read, update, and delete data points or entities on resources in the schema.  You might also find these links interesting:\n  The latest and old releases are all available at Apache SkyWalking release page. The change logs can be found here.\n  SkyWalking WIKI hosts the context of some changes and events.\n  You can find the conference schedules, video recordings, and articles about SkyWalking in the community resource catalog.\n  We\u0026rsquo;re always looking for help to improve our documentation and codes, so please don’t hesitate to file an issue if you see any problems. Or better yet, directly contribute by submitting a pull request to help us get better!\n","title":"Welcome","url":"/docs/skywalking-banyandb/next/readme/"},{"content":"Welcome Welcome to the BanyanDB Here you can learn all you need to know about BanyanDB.\n Installation. Instruments about how to download and onboard BanyanDB server, Banyand. Clients. Some native clients to access Banyand. Observability. Learn how to effectively monitor, diagnose and optimize Banyand. Concept. Learn the concepts of Banyand. Includes the architecture, data model, and so on. CRUD Operations. To create, read, update, and delete data points or entities on resources in the schema.  You might also find these links interesting:\n  The latest and old releases are all available at Apache SkyWalking release page. The change logs can be found here.\n  SkyWalking WIKI hosts the context of some changes and events.\n  You can find the conference schedules, video recordings, and articles about SkyWalking in the community resource catalog.\n  We\u0026rsquo;re always looking for help to improve our documentation and codes, so please don’t hesitate to file an issue if you see any problems. Or better yet, directly contribute by submitting a pull request to help us get better!\n","title":"Welcome","url":"/docs/skywalking-banyandb/v0.5.0/readme/"},{"content":"Welcome Here are SkyWalking Infra E2E official documentations. Welcome to use it.\nSkyWalking Infra E2E is an End-to-End Testing framework that aims to help developers to set up, debug, and verify E2E tests with ease. It’s built based on the lessons learned from tens of hundreds of test cases in the SkyWalking main repo.\nFrom here you can learn all about SkyWalking Infra E2E\u0026rsquo;s architecture, how to set up E2E testing.\n  Concepts and Designs. The most important core ideas about SkyWalking Infra E2E. You can learn from here if you want to understand what is going on under our cool features.\n  Setup. Introduce how to set up and running E2E testing.\n  Contribution. Introduce how to contribute SkyWalking Infra E2E.\n  We\u0026rsquo;re always looking for help improve our documentation and codes, so please don’t hesitate to file an issue if you see any problem. Or better yet, submit your contributions through the pull request to help make them better.\n","title":"Welcome","url":"/docs/skywalking-infra-e2e/latest/readme/"},{"content":"Welcome Here are SkyWalking Infra E2E official documentations. Welcome to use it.\nSkyWalking Infra E2E is an End-to-End Testing framework that aims to help developers to set up, debug, and verify E2E tests with ease. It’s built based on the lessons learned from tens of hundreds of test cases in the SkyWalking main repo.\nFrom here you can learn all about SkyWalking Infra E2E\u0026rsquo;s architecture, how to set up E2E testing.\n  Concepts and Designs. The most important core ideas about SkyWalking Infra E2E. You can learn from here if you want to understand what is going on under our cool features.\n  Setup. Introduce how to set up and running E2E testing.\n  Contribution. Introduce how to contribute SkyWalking Infra E2E.\n  We\u0026rsquo;re always looking for help improve our documentation and codes, so please don’t hesitate to file an issue if you see any problem. Or better yet, submit your contributions through the pull request to help make them better.\n","title":"Welcome","url":"/docs/skywalking-infra-e2e/next/readme/"},{"content":"Welcome Here are SkyWalking Infra E2E official documentations. Welcome to use it.\nSkyWalking Infra E2E is an End-to-End Testing framework that aims to help developers to set up, debug, and verify E2E tests with ease. It’s built based on the lessons learned from tens of hundreds of test cases in the SkyWalking main repo.\nFrom here you can learn all about SkyWalking Infra E2E\u0026rsquo;s architecture, how to set up E2E testing.\n  Concepts and Designs. The most important core ideas about SkyWalking Infra E2E. You can learn from here if you want to understand what is going on under our cool features.\n  Setup. Introduce how to set up and running E2E testing.\n  Contribution. Introduce how to contribute SkyWalking Infra E2E.\n  We\u0026rsquo;re always looking for help improve our documentation and codes, so please don’t hesitate to file an issue if you see any problem. Or better yet, submit your contributions through the pull request to help make them better.\n","title":"Welcome","url":"/docs/skywalking-infra-e2e/v1.3.0/readme/"},{"content":"Welcome Here are SkyWalking Rover official documentation. You\u0026rsquo;re welcome to join us.\nFrom here you can learn all about SkyWalking Rover\u0026rsquo;s architecture, and how to deploy and use SkyWalking Rover.\n  Concepts and Designs. The most important core ideas about SkyWalking Rover. You can learn from here if you want to understand what is going on under our cool features.\n  Setup. Introduce how to set up the SkyWalking Rover.\n  Guides. Guide users to develop or debug SkyWalking Rover.\n  We\u0026rsquo;re always looking for help to improve our documentation and codes, so please don’t hesitate to file an issue if you see any problem. Or better yet, submit your contributions through a pull request to help make them better.\n","title":"Welcome","url":"/docs/skywalking-rover/latest/readme/"},{"content":"Welcome Here are SkyWalking Rover official documentation. You\u0026rsquo;re welcome to join us.\nFrom here you can learn all about SkyWalking Rover\u0026rsquo;s architecture, and how to deploy and use SkyWalking Rover.\n  Concepts and Designs. The most important core ideas about SkyWalking Rover. You can learn from here if you want to understand what is going on under our cool features.\n  Setup. Introduce how to set up the SkyWalking Rover.\n  Guides. Guide users to develop or debug SkyWalking Rover.\n  We\u0026rsquo;re always looking for help to improve our documentation and codes, so please don’t hesitate to file an issue if you see any problem. Or better yet, submit your contributions through a pull request to help make them better.\n","title":"Welcome","url":"/docs/skywalking-rover/next/readme/"},{"content":"Welcome Here are SkyWalking Rover official documentation. You\u0026rsquo;re welcome to join us.\nFrom here you can learn all about SkyWalking Rover\u0026rsquo;s architecture, and how to deploy and use SkyWalking Rover.\n  Concepts and Designs. The most important core ideas about SkyWalking Rover. You can learn from here if you want to understand what is going on under our cool features.\n  Setup. Introduce how to set up the SkyWalking Rover.\n  Guides. Guide users to develop or debug SkyWalking Rover.\n  We\u0026rsquo;re always looking for help to improve our documentation and codes, so please don’t hesitate to file an issue if you see any problem. Or better yet, submit your contributions through a pull request to help make them better.\n","title":"Welcome","url":"/docs/skywalking-rover/v0.6.0/readme/"},{"content":"Welcome Here are SkyWalking Satellite official documentations. You\u0026rsquo;re welcome to join us.\nFrom here you can learn all about SkyWalking Satellite\u0026rsquo;s architecture, how to deploy and use SkyWalking Satellite.\n  Concepts and Designs. The most important core ideas about SkyWalking Satellite. You can learn from here if you want to understand what is going on under our cool features.\n  Setup. Introduce how to set up the SkyWalking Satellite.\n  Guides. Guide users to develop or debug SkyWalking Satellite.\n  Protocols. Protocols show the communication ways between agents/probes, Satellite and SkyWalking. Anyone interested in uplink telemetry data should definitely read this.\n  Change logs. The feature records of the different versions.\n  FAQs. A manifest of already known setup problems, secondary developments experiments. When you are facing a problem, check here first.\n  We\u0026rsquo;re always looking for help improve our documentation and codes, so please don’t hesitate to file an issue if you see any problem. Or better yet, submit your own contributions through pull request to help make them better.\n","title":"Welcome","url":"/docs/skywalking-satellite/latest/readme/"},{"content":"Welcome Here are SkyWalking Satellite official documentations. You\u0026rsquo;re welcome to join us.\nFrom here you can learn all about SkyWalking Satellite\u0026rsquo;s architecture, how to deploy and use SkyWalking Satellite.\n  Concepts and Designs. The most important core ideas about SkyWalking Satellite. You can learn from here if you want to understand what is going on under our cool features.\n  Setup. Introduce how to set up the SkyWalking Satellite.\n  Guides. Guide users to develop or debug SkyWalking Satellite.\n  Protocols. Protocols show the communication ways between agents/probes, Satellite and SkyWalking. Anyone interested in uplink telemetry data should definitely read this.\n  Change logs. The feature records of the different versions.\n  FAQs. A manifest of already known setup problems, secondary developments experiments. When you are facing a problem, check here first.\n  We\u0026rsquo;re always looking for help improve our documentation and codes, so please don’t hesitate to file an issue if you see any problem. Or better yet, submit your own contributions through pull request to help make them better.\n","title":"Welcome","url":"/docs/skywalking-satellite/next/readme/"},{"content":"Welcome Here are SkyWalking Satellite official documentations. You\u0026rsquo;re welcome to join us.\nFrom here you can learn all about SkyWalking Satellite\u0026rsquo;s architecture, how to deploy and use SkyWalking Satellite.\n  Concepts and Designs. The most important core ideas about SkyWalking Satellite. You can learn from here if you want to understand what is going on under our cool features.\n  Setup. Introduce how to set up the SkyWalking Satellite.\n  Guides. Guide users to develop or debug SkyWalking Satellite.\n  Protocols. Protocols show the communication ways between agents/probes, Satellite and SkyWalking. Anyone interested in uplink telemetry data should definitely read this.\n  Change logs. The feature records of the different versions.\n  FAQs. A manifest of already known setup problems, secondary developments experiments. When you are facing a problem, check here first.\n  We\u0026rsquo;re always looking for help improve our documentation and codes, so please don’t hesitate to file an issue if you see any problem. Or better yet, submit your own contributions through pull request to help make them better.\n","title":"Welcome","url":"/docs/skywalking-satellite/v1.2.0/readme/"},{"content":"What is VNode? On the trace page, you may sometimes find nodes with their spans named VNode, and that there are no attributes for such spans.\nVNode is created by the UI itself, rather than being reported by the agent or tracing SDK. It indicates that some spans are missed in the trace data in this query.\nHow does the UI detect the missing span(s)? The UI checks the parent spans and reference segments of all spans in real time. If no parent id(segment id + span id) could be found, then it creates a VNode automatically.\nHow did this happen? The VNode appears when the trace data is incomplete.\n The agent fail-safe mechanism has been activated. The SkyWalking agent could abandon the trace data if there are any network issues between the agent and the OAP (e.g. failure to connect, slow network speeds, etc.), or if the OAP cluster is not capable of processing all traces. Some plug-ins may have bugs, and some segments in the trace do not stop correctly and are held in the memory.  In such case, the trace would not exist in the query, thus the VNode shows up.\n","title":"What is VNode?","url":"/docs/main/latest/en/faq/vnode/"},{"content":"What is VNode? On the trace page, you may sometimes find nodes with their spans named VNode, and that there are no attributes for such spans.\nVNode is created by the UI itself, rather than being reported by the agent or tracing SDK. It indicates that some spans are missed in the trace data in this query.\nHow does the UI detect the missing span(s)? The UI checks the parent spans and reference segments of all spans in real time. If no parent id(segment id + span id) could be found, then it creates a VNode automatically.\nHow did this happen? The VNode appears when the trace data is incomplete.\n The agent fail-safe mechanism has been activated. The SkyWalking agent could abandon the trace data if there are any network issues between the agent and the OAP (e.g. failure to connect, slow network speeds, etc.), or if the OAP cluster is not capable of processing all traces. Some plug-ins may have bugs, and some segments in the trace do not stop correctly and are held in the memory.  In such case, the trace would not exist in the query, thus the VNode shows up.\n","title":"What is VNode?","url":"/docs/main/next/en/faq/vnode/"},{"content":"What is VNode? On the trace page, you may sometimes find nodes with their spans named VNode, and that there are no attributes for such spans.\nVNode is created by the UI itself, rather than being reported by the agent or tracing SDK. It indicates that some spans are missed in the trace data in this query.\nHow does the UI detect the missing span(s)? The UI checks the parent spans and reference segments of all spans in real time. If no parent id(segment id + span id) could be found, then it creates a VNode automatically.\nHow did this happen? The VNode appears when the trace data is incomplete.\n The agent fail-safe mechanism has been activated. The SkyWalking agent could abandon the trace data if there are any network issues between the agent and the OAP (e.g. failure to connect, slow network speeds, etc.), or if the OAP cluster is not capable of processing all traces. Some plug-ins may have bugs, and some segments in the trace do not stop correctly and are held in the memory.  In such case, the trace would not exist in the query, thus the VNode shows up.\n","title":"What is VNode?","url":"/docs/main/v9.0.0/en/faq/vnode/"},{"content":"What is VNode? On the trace page, you may sometimes find nodes with their spans named VNode, and that there are no attributes for such spans.\nVNode is created by the UI itself, rather than being reported by the agent or tracing SDK. It indicates that some spans are missed in the trace data in this query.\nHow does the UI detect the missing span(s)? The UI checks the parent spans and reference segments of all spans in real time. If no parent id(segment id + span id) could be found, then it creates a VNode automatically.\nHow did this happen? The VNode appears when the trace data is incomplete.\n The agent fail-safe mechanism has been activated. The SkyWalking agent could abandon the trace data if there are any network issues between the agent and the OAP (e.g. failure to connect, slow network speeds, etc.), or if the OAP cluster is not capable of processing all traces. Some plug-ins may have bugs, and some segments in the trace do not stop correctly and are held in the memory.  In such case, the trace would not exist in the query, thus the VNode shows up.\n","title":"What is VNode?","url":"/docs/main/v9.1.0/en/faq/vnode/"},{"content":"What is VNode? On the trace page, you may sometimes find nodes with their spans named VNode, and that there are no attributes for such spans.\nVNode is created by the UI itself, rather than being reported by the agent or tracing SDK. It indicates that some spans are missed in the trace data in this query.\nHow does the UI detect the missing span(s)? The UI checks the parent spans and reference segments of all spans in real time. If no parent id(segment id + span id) could be found, then it creates a VNode automatically.\nHow did this happen? The VNode appears when the trace data is incomplete.\n The agent fail-safe mechanism has been activated. The SkyWalking agent could abandon the trace data if there are any network issues between the agent and the OAP (e.g. failure to connect, slow network speeds, etc.), or if the OAP cluster is not capable of processing all traces. Some plug-ins may have bugs, and some segments in the trace do not stop correctly and are held in the memory.  In such case, the trace would not exist in the query, thus the VNode shows up.\n","title":"What is VNode?","url":"/docs/main/v9.2.0/en/faq/vnode/"},{"content":"What is VNode? On the trace page, you may sometimes find nodes with their spans named VNode, and that there are no attributes for such spans.\nVNode is created by the UI itself, rather than being reported by the agent or tracing SDK. It indicates that some spans are missed in the trace data in this query.\nHow does the UI detect the missing span(s)? The UI checks the parent spans and reference segments of all spans in real time. If no parent id(segment id + span id) could be found, then it creates a VNode automatically.\nHow did this happen? The VNode appears when the trace data is incomplete.\n The agent fail-safe mechanism has been activated. The SkyWalking agent could abandon the trace data if there are any network issues between the agent and the OAP (e.g. failure to connect, slow network speeds, etc.), or if the OAP cluster is not capable of processing all traces. Some plug-ins may have bugs, and some segments in the trace do not stop correctly and are held in the memory.  In such case, the trace would not exist in the query, thus the VNode shows up.\n","title":"What is VNode?","url":"/docs/main/v9.3.0/en/faq/vnode/"},{"content":"What is VNode? On the trace page, you may sometimes find nodes with their spans named VNode, and that there are no attributes for such spans.\nVNode is created by the UI itself, rather than being reported by the agent or tracing SDK. It indicates that some spans are missed in the trace data in this query.\nHow does the UI detect the missing span(s)? The UI checks the parent spans and reference segments of all spans in real time. If no parent id(segment id + span id) could be found, then it creates a VNode automatically.\nHow did this happen? The VNode appears when the trace data is incomplete.\n The agent fail-safe mechanism has been activated. The SkyWalking agent could abandon the trace data if there are any network issues between the agent and the OAP (e.g. failure to connect, slow network speeds, etc.), or if the OAP cluster is not capable of processing all traces. Some plug-ins may have bugs, and some segments in the trace do not stop correctly and are held in the memory.  In such case, the trace would not exist in the query, thus the VNode shows up.\n","title":"What is VNode?","url":"/docs/main/v9.4.0/en/faq/vnode/"},{"content":"What is VNode? On the trace page, you may sometimes find nodes with their spans named VNode, and that there are no attributes for such spans.\nVNode is created by the UI itself, rather than being reported by the agent or tracing SDK. It indicates that some spans are missed in the trace data in this query.\nHow does the UI detect the missing span(s)? The UI checks the parent spans and reference segments of all spans in real time. If no parent id(segment id + span id) could be found, then it creates a VNode automatically.\nHow did this happen? The VNode appears when the trace data is incomplete.\n The agent fail-safe mechanism has been activated. The SkyWalking agent could abandon the trace data if there are any network issues between the agent and the OAP (e.g. failure to connect, slow network speeds, etc.), or if the OAP cluster is not capable of processing all traces. Some plug-ins may have bugs, and some segments in the trace do not stop correctly and are held in the memory.  In such case, the trace would not exist in the query, thus the VNode shows up.\n","title":"What is VNode?","url":"/docs/main/v9.5.0/en/faq/vnode/"},{"content":"What is VNode? On the trace page, you may sometimes find nodes with their spans named VNode, and that there are no attributes for such spans.\nVNode is created by the UI itself, rather than being reported by the agent or tracing SDK. It indicates that some spans are missed in the trace data in this query.\nHow does the UI detect the missing span(s)? The UI checks the parent spans and reference segments of all spans in real time. If no parent id(segment id + span id) could be found, then it creates a VNode automatically.\nHow did this happen? The VNode appears when the trace data is incomplete.\n The agent fail-safe mechanism has been activated. The SkyWalking agent could abandon the trace data if there are any network issues between the agent and the OAP (e.g. failure to connect, slow network speeds, etc.), or if the OAP cluster is not capable of processing all traces. Some plug-ins may have bugs, and some segments in the trace do not stop correctly and are held in the memory.  In such case, the trace would not exist in the query, thus the VNode shows up.\n","title":"What is VNode?","url":"/docs/main/v9.6.0/en/faq/vnode/"},{"content":"What is VNode? On the trace page, you may sometimes find nodes with their spans named VNode, and that there are no attributes for such spans.\nVNode is created by the UI itself, rather than being reported by the agent or tracing SDK. It indicates that some spans are missed in the trace data in this query.\nHow does the UI detect the missing span(s)? The UI checks the parent spans and reference segments of all spans in real time. If no parent id(segment id + span id) could be found, then it creates a VNode automatically.\nHow did this happen? The VNode appears when the trace data is incomplete.\n The agent fail-safe mechanism has been activated. The SkyWalking agent could abandon the trace data if there are any network issues between the agent and the OAP (e.g. failure to connect, slow network speeds, etc.), or if the OAP cluster is not capable of processing all traces. Some plug-ins may have bugs, and some segments in the trace do not stop correctly and are held in the memory.  In such case, the trace would not exist in the query, thus the VNode shows up.\n","title":"What is VNode?","url":"/docs/main/v9.7.0/en/faq/vnode/"},{"content":"Why can\u0026rsquo;t I see any data in the UI? There are three main reasons no data can be shown by the UI:\n No traces have been sent to the collector. Traces have been sent, but the timezone of your containers is incorrect. Traces are in the collector, but you\u0026rsquo;re not watching the correct timeframe in the UI.  No traces Be sure to check the logs of your agents to see if they are connected to the collector and traces are being sent.\nIncorrect timezone in containers Be sure to check the time in your containers.\nThe UI isn\u0026rsquo;t showing any data Be sure to configure the timeframe shown by the UI.\n","title":"Why can't I see any data in the UI?","url":"/docs/main/latest/en/faq/time-and-timezone/"},{"content":"Why can\u0026rsquo;t I see any data in the UI? There are three main reasons no data can be shown by the UI:\n No traces have been sent to the collector. Traces have been sent, but the timezone of your containers is incorrect. Traces are in the collector, but you\u0026rsquo;re not watching the correct timeframe in the UI.  No traces Be sure to check the logs of your agents to see if they are connected to the collector and traces are being sent.\nIncorrect timezone in containers Be sure to check the time in your containers.\nThe UI isn\u0026rsquo;t showing any data Be sure to configure the timeframe shown by the UI.\n","title":"Why can't I see any data in the UI?","url":"/docs/main/next/en/faq/time-and-timezone/"},{"content":"Why can\u0026rsquo;t I see any data in the UI? There are three main reasons no data can be shown by the UI:\n No traces have been sent to the collector. Traces have been sent, but the timezone of your containers is incorrect. Traces are in the collector, but you\u0026rsquo;re not watching the correct timeframe in the UI.  No traces Be sure to check the logs of your agents to see if they are connected to the collector and traces are being sent.\nIncorrect timezone in containers Be sure to check the time in your containers.\nThe UI isn\u0026rsquo;t showing any data Be sure to configure the timeframe shown by the UI.\n","title":"Why can't I see any data in the UI?","url":"/docs/main/v9.0.0/en/faq/time-and-timezone/"},{"content":"Why can\u0026rsquo;t I see any data in the UI? There are three main reasons no data can be shown by the UI:\n No traces have been sent to the collector. Traces have been sent, but the timezone of your containers is incorrect. Traces are in the collector, but you\u0026rsquo;re not watching the correct timeframe in the UI.  No traces Be sure to check the logs of your agents to see if they are connected to the collector and traces are being sent.\nIncorrect timezone in containers Be sure to check the time in your containers.\nThe UI isn\u0026rsquo;t showing any data Be sure to configure the timeframe shown by the UI.\n","title":"Why can't I see any data in the UI?","url":"/docs/main/v9.1.0/en/faq/time-and-timezone/"},{"content":"Why can\u0026rsquo;t I see any data in the UI? There are three main reasons no data can be shown by the UI:\n No traces have been sent to the collector. Traces have been sent, but the timezone of your containers is incorrect. Traces are in the collector, but you\u0026rsquo;re not watching the correct timeframe in the UI.  No traces Be sure to check the logs of your agents to see if they are connected to the collector and traces are being sent.\nIncorrect timezone in containers Be sure to check the time in your containers.\nThe UI isn\u0026rsquo;t showing any data Be sure to configure the timeframe shown by the UI.\n","title":"Why can't I see any data in the UI?","url":"/docs/main/v9.2.0/en/faq/time-and-timezone/"},{"content":"Why can\u0026rsquo;t I see any data in the UI? There are three main reasons no data can be shown by the UI:\n No traces have been sent to the collector. Traces have been sent, but the timezone of your containers is incorrect. Traces are in the collector, but you\u0026rsquo;re not watching the correct timeframe in the UI.  No traces Be sure to check the logs of your agents to see if they are connected to the collector and traces are being sent.\nIncorrect timezone in containers Be sure to check the time in your containers.\nThe UI isn\u0026rsquo;t showing any data Be sure to configure the timeframe shown by the UI.\n","title":"Why can't I see any data in the UI?","url":"/docs/main/v9.3.0/en/faq/time-and-timezone/"},{"content":"Why can\u0026rsquo;t I see any data in the UI? There are three main reasons no data can be shown by the UI:\n No traces have been sent to the collector. Traces have been sent, but the timezone of your containers is incorrect. Traces are in the collector, but you\u0026rsquo;re not watching the correct timeframe in the UI.  No traces Be sure to check the logs of your agents to see if they are connected to the collector and traces are being sent.\nIncorrect timezone in containers Be sure to check the time in your containers.\nThe UI isn\u0026rsquo;t showing any data Be sure to configure the timeframe shown by the UI.\n","title":"Why can't I see any data in the UI?","url":"/docs/main/v9.4.0/en/faq/time-and-timezone/"},{"content":"Why can\u0026rsquo;t I see any data in the UI? There are three main reasons no data can be shown by the UI:\n No traces have been sent to the collector. Traces have been sent, but the timezone of your containers is incorrect. Traces are in the collector, but you\u0026rsquo;re not watching the correct timeframe in the UI.  No traces Be sure to check the logs of your agents to see if they are connected to the collector and traces are being sent.\nIncorrect timezone in containers Be sure to check the time in your containers.\nThe UI isn\u0026rsquo;t showing any data Be sure to configure the timeframe shown by the UI.\n","title":"Why can't I see any data in the UI?","url":"/docs/main/v9.5.0/en/faq/time-and-timezone/"},{"content":"Why can\u0026rsquo;t I see any data in the UI? There are three main reasons no data can be shown by the UI:\n No traces have been sent to the collector. Traces have been sent, but the timezone of your containers is incorrect. Traces are in the collector, but you\u0026rsquo;re not watching the correct timeframe in the UI.  No traces Be sure to check the logs of your agents to see if they are connected to the collector and traces are being sent.\nIncorrect timezone in containers Be sure to check the time in your containers.\nThe UI isn\u0026rsquo;t showing any data Be sure to configure the timeframe shown by the UI.\n","title":"Why can't I see any data in the UI?","url":"/docs/main/v9.6.0/en/faq/time-and-timezone/"},{"content":"Why can\u0026rsquo;t I see any data in the UI? There are three main reasons no data can be shown by the UI:\n No traces have been sent to the collector. Traces have been sent, but the timezone of your containers is incorrect. Traces are in the collector, but you\u0026rsquo;re not watching the correct timeframe in the UI.  No traces Be sure to check the logs of your agents to see if they are connected to the collector and traces are being sent.\nIncorrect timezone in containers Be sure to check the time in your containers.\nThe UI isn\u0026rsquo;t showing any data Be sure to configure the timeframe shown by the UI.\n","title":"Why can't I see any data in the UI?","url":"/docs/main/v9.7.0/en/faq/time-and-timezone/"},{"content":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? This issue is to be expected with an upgrade from 6.x to 7.x. See the Downsampling Data Packing feature of the ElasticSearch storage.\nYou may simply delete all expired *-day_xxxxx and *-hour_xxxxx(xxxxx is a timestamp) indexes. Currently, SkyWalking uses the metrics name-xxxxx and metrics name-month_xxxxx indexes only.\n","title":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x?","url":"/docs/main/latest/en/faq/hour-day-metrics-stopping/"},{"content":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? This issue is to be expected with an upgrade from 6.x to 7.x. See the Downsampling Data Packing feature of the ElasticSearch storage.\nYou may simply delete all expired *-day_xxxxx and *-hour_xxxxx(xxxxx is a timestamp) indexes. Currently, SkyWalking uses the metrics name-xxxxx and metrics name-month_xxxxx indexes only.\n","title":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x?","url":"/docs/main/next/en/faq/hour-day-metrics-stopping/"},{"content":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? This issue is to be expected with an upgrade from 6.x to 7.x. See the Downsampling Data Packing feature of the ElasticSearch storage.\nYou may simply delete all expired *-day_xxxxx and *-hour_xxxxx(xxxxx is a timestamp) indexes. Currently, SkyWalking uses the metrics name-xxxxx and metrics name-month_xxxxx indexes only.\n","title":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x?","url":"/docs/main/v9.0.0/en/faq/hour-day-metrics-stopping/"},{"content":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? This issue is to be expected with an upgrade from 6.x to 7.x. See the Downsampling Data Packing feature of the ElasticSearch storage.\nYou may simply delete all expired *-day_xxxxx and *-hour_xxxxx(xxxxx is a timestamp) indexes. Currently, SkyWalking uses the metrics name-xxxxx and metrics name-month_xxxxx indexes only.\n","title":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x?","url":"/docs/main/v9.1.0/en/faq/hour-day-metrics-stopping/"},{"content":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? This issue is to be expected with an upgrade from 6.x to 7.x. See the Downsampling Data Packing feature of the ElasticSearch storage.\nYou may simply delete all expired *-day_xxxxx and *-hour_xxxxx(xxxxx is a timestamp) indexes. Currently, SkyWalking uses the metrics name-xxxxx and metrics name-month_xxxxx indexes only.\n","title":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x?","url":"/docs/main/v9.2.0/en/faq/hour-day-metrics-stopping/"},{"content":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? This issue is to be expected with an upgrade from 6.x to 7.x. See the Downsampling Data Packing feature of the ElasticSearch storage.\nYou may simply delete all expired *-day_xxxxx and *-hour_xxxxx(xxxxx is a timestamp) indexes. Currently, SkyWalking uses the metrics name-xxxxx and metrics name-month_xxxxx indexes only.\n","title":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x?","url":"/docs/main/v9.3.0/en/faq/hour-day-metrics-stopping/"},{"content":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? This issue is to be expected with an upgrade from 6.x to 7.x. See the Downsampling Data Packing feature of the ElasticSearch storage.\nYou may simply delete all expired *-day_xxxxx and *-hour_xxxxx(xxxxx is a timestamp) indexes. Currently, SkyWalking uses the metrics name-xxxxx and metrics name-month_xxxxx indexes only.\n","title":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x?","url":"/docs/main/v9.4.0/en/faq/hour-day-metrics-stopping/"},{"content":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? This issue is to be expected with an upgrade from 6.x to 7.x. See the Downsampling Data Packing feature of the ElasticSearch storage.\nYou may simply delete all expired *-day_xxxxx and *-hour_xxxxx(xxxxx is a timestamp) indexes. Currently, SkyWalking uses the metrics name-xxxxx and metrics name-month_xxxxx indexes only.\n","title":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x?","url":"/docs/main/v9.5.0/en/faq/hour-day-metrics-stopping/"},{"content":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? This issue is to be expected with an upgrade from 6.x to 7.x. See the Downsampling Data Packing feature of the ElasticSearch storage.\nYou may simply delete all expired *-day_xxxxx and *-hour_xxxxx(xxxxx is a timestamp) indexes. Currently, SkyWalking uses the metrics name-xxxxx and metrics name-month_xxxxx indexes only.\n","title":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x?","url":"/docs/main/v9.6.0/en/faq/hour-day-metrics-stopping/"},{"content":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x? This issue is to be expected with an upgrade from 6.x to 7.x. See the Downsampling Data Packing feature of the ElasticSearch storage.\nYou may simply delete all expired *-day_xxxxx and *-hour_xxxxx(xxxxx is a timestamp) indexes. Currently, SkyWalking uses the metrics name-xxxxx and metrics name-month_xxxxx indexes only.\n","title":"Why do metrics indexes with Hour and Day precisions stop updating after upgrade to 7.x?","url":"/docs/main/v9.7.0/en/faq/hour-day-metrics-stopping/"},{"content":"Why does SkyWalking use RPC(gRPC and RESTful) rather than MQ as transport layer by default? This is often asked by those who are first introduced to SkyWalking. Many believe that MQ should have better performance and should be able to support higher throughput, like the following:\nHere\u0026rsquo;s what we think.\nIs MQ appropriate for communicating with the OAP backend? This question arises when users consider the circumstances where the OAP cluster may not be powerful enough or becomes offline. But the following issues must first be addressed:\n Why do you think that the OAP is not powerful enough? Were it not powerful, the speed of data analysis wouldn\u0026rsquo;t have caught up with the producers (or agents). Then what is the point of adding new deployment requirements? Some may argue that the payload is sometimes higher than usual during peak times. But we must consider how much higher the payload really is. If it is higher by less than 40%, how many resources would you use for the new MQ cluster? How about moving them to new OAP and ES nodes? Say it is higher by 40% or more, such as by 70% to 200%. Then, it is likely that your MQ would use up more resources than it saves. Your MQ would support 2 to 3 times the payload using 10%-20% of the cost during usual times. Furthermore, in this case, if the payload/throughput are so high, how long would it take for the OAP cluster to catch up? The challenge here is that well before it catches up, the next peak times would have come.  With the analysis above in mind, why would you still want the traces to be 100%, given the resources they would cost? The preferred way to do this would be adding a better dynamic trace sampling mechanism at the backend. When throughput exceeds the threshold, gradually modify the active sampling rate from 100% to 10%, which means you could get the OAP and ES 3 times more powerful than usual, while ignoring the traces at peak times.\nIs MQ transport recommended despite its side effects? Even though MQ transport is not recommended from the production perspective, SkyWalking still provides optional plugins named kafka-reporter and kafka-fetcher for this feature since 8.1.0.\nHow about MQ metrics data exporter? Log and trace exporters are using MQ as transport channel. And metrics exporter uses gRPC, as considering the scale.\n","title":"Why does SkyWalking use RPC(gRPC and RESTful) rather than MQ as transport layer by default?","url":"/docs/main/next/en/faq/why_mq_not_involved/"},{"content":"Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture? This is often asked by those who are first introduced to SkyWalking. Many believe that MQ should have better performance and should be able to support higher throughput, like the following:\nHere\u0026rsquo;s what we think.\nIs MQ appropriate for communicating with the OAP backend? This question arises when users consider the circumstances where the OAP cluster may not be powerful enough or becomes offline. But the following issues must first be addressed:\n Why do you think that the OAP is not powerful enough? Were it not powerful, the speed of data analysis wouldn\u0026rsquo;t have caught up with the producers (or agents). Then what is the point of adding new deployment requirements? Some may argue that the payload is sometimes higher than usual during peak times. But we must consider how much higher the payload really is. If it is higher by less than 40%, how many resources would you use for the new MQ cluster? How about moving them to new OAP and ES nodes? Say it is higher by 40% or more, such as by 70% to 200%. Then, it is likely that your MQ would use up more resources than it saves. Your MQ would support 2 to 3 times the payload using 10%-20% of the cost during usual times. Furthermore, in this case, if the payload/throughput are so high, how long would it take for the OAP cluster to catch up? The challenge here is that well before it catches up, the next peak times would have come.  With the analysis above in mind, why would you still want the traces to be 100%, given the resources they would cost? The preferred way to do this would be adding a better dynamic trace sampling mechanism at the backend. When throughput exceeds the threshold, gradually modify the active sampling rate from 100% to 10%, which means you could get the OAP and ES 3 times more powerful than usual, while ignoring the traces at peak times.\nIs MQ transport recommended despite its side effects? Even though MQ transport is not recommended from the production perspective, SkyWalking still provides optional plugins named kafka-reporter and kafka-fetcher for this feature since 8.1.0.\nHow about MQ metrics data exporter? The answer is that the MQ metrics data exporter is already readily available. The exporter module with gRPC default mechanism is there, and you can easily provide a new implementor of this module.\n","title":"Why doesn't SkyWalking involve MQ in its architecture?","url":"/docs/main/latest/en/faq/why_mq_not_involved/"},{"content":"Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture? This is often asked by those who are first introduced to SkyWalking. Many believe that MQ should have better performance and should be able to support higher throughput, like the following:\nHere\u0026rsquo;s what we think.\nIs MQ appropriate for communicating with the OAP backend? This question arises when users consider the circumstances where the OAP cluster may not be powerful enough or becomes offline. But the following issues must first be addressed:\n Why do you think that the OAP is not powerful enough? Were it not powerful, the speed of data analysis wouldn\u0026rsquo;t have caught up with the producers (or agents). Then what is the point of adding new deployment requirements? Some may argue that the payload is sometimes higher than usual during peak times. But we must consider how much higher the payload really is. If it is higher by less than 40%, how many resources would you use for the new MQ cluster? How about moving them to new OAP and ES nodes? Say it is higher by 40% or more, such as by 70% to 200%. Then, it is likely that your MQ would use up more resources than it saves. Your MQ would support 2 to 3 times the payload using 10%-20% of the cost during usual times. Furthermore, in this case, if the payload/throughput are so high, how long would it take for the OAP cluster to catch up? The challenge here is that well before it catches up, the next peak times would have come.  With the analysis above in mind, why would you still want the traces to be 100%, given the resources they would cost? The preferred way to do this would be adding a better dynamic trace sampling mechanism at the backend. When throughput exceeds the threshold, gradually modify the active sampling rate from 100% to 10%, which means you could get the OAP and ES 3 times more powerful than usual, while ignoring the traces at peak times.\nIs MQ transport recommended despite its side effects? Even though MQ transport is not recommended from the production perspective, SkyWalking still provides optional plugins named kafka-reporter and kafka-fetcher for this feature since 8.1.0.\nHow about MQ metrics data exporter? The answer is that the MQ metrics data exporter is already readily available. The exporter module with gRPC default mechanism is there, and you can easily provide a new implementor of this module.\n","title":"Why doesn't SkyWalking involve MQ in its architecture?","url":"/docs/main/v9.0.0/en/faq/why_mq_not_involved/"},{"content":"Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture? This is often asked by those who are first introduced to SkyWalking. Many believe that MQ should have better performance and should be able to support higher throughput, like the following:\nHere\u0026rsquo;s what we think.\nIs MQ appropriate for communicating with the OAP backend? This question arises when users consider the circumstances where the OAP cluster may not be powerful enough or becomes offline. But the following issues must first be addressed:\n Why do you think that the OAP is not powerful enough? Were it not powerful, the speed of data analysis wouldn\u0026rsquo;t have caught up with the producers (or agents). Then what is the point of adding new deployment requirements? Some may argue that the payload is sometimes higher than usual during peak times. But we must consider how much higher the payload really is. If it is higher by less than 40%, how many resources would you use for the new MQ cluster? How about moving them to new OAP and ES nodes? Say it is higher by 40% or more, such as by 70% to 200%. Then, it is likely that your MQ would use up more resources than it saves. Your MQ would support 2 to 3 times the payload using 10%-20% of the cost during usual times. Furthermore, in this case, if the payload/throughput are so high, how long would it take for the OAP cluster to catch up? The challenge here is that well before it catches up, the next peak times would have come.  With the analysis above in mind, why would you still want the traces to be 100%, given the resources they would cost? The preferred way to do this would be adding a better dynamic trace sampling mechanism at the backend. When throughput exceeds the threshold, gradually modify the active sampling rate from 100% to 10%, which means you could get the OAP and ES 3 times more powerful than usual, while ignoring the traces at peak times.\nIs MQ transport recommended despite its side effects? Even though MQ transport is not recommended from the production perspective, SkyWalking still provides optional plugins named kafka-reporter and kafka-fetcher for this feature since 8.1.0.\nHow about MQ metrics data exporter? The answer is that the MQ metrics data exporter is already readily available. The exporter module with gRPC default mechanism is there, and you can easily provide a new implementor of this module.\n","title":"Why doesn't SkyWalking involve MQ in its architecture?","url":"/docs/main/v9.1.0/en/faq/why_mq_not_involved/"},{"content":"Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture? This is often asked by those who are first introduced to SkyWalking. Many believe that MQ should have better performance and should be able to support higher throughput, like the following:\nHere\u0026rsquo;s what we think.\nIs MQ appropriate for communicating with the OAP backend? This question arises when users consider the circumstances where the OAP cluster may not be powerful enough or becomes offline. But the following issues must first be addressed:\n Why do you think that the OAP is not powerful enough? Were it not powerful, the speed of data analysis wouldn\u0026rsquo;t have caught up with the producers (or agents). Then what is the point of adding new deployment requirements? Some may argue that the payload is sometimes higher than usual during peak times. But we must consider how much higher the payload really is. If it is higher by less than 40%, how many resources would you use for the new MQ cluster? How about moving them to new OAP and ES nodes? Say it is higher by 40% or more, such as by 70% to 200%. Then, it is likely that your MQ would use up more resources than it saves. Your MQ would support 2 to 3 times the payload using 10%-20% of the cost during usual times. Furthermore, in this case, if the payload/throughput are so high, how long would it take for the OAP cluster to catch up? The challenge here is that well before it catches up, the next peak times would have come.  With the analysis above in mind, why would you still want the traces to be 100%, given the resources they would cost? The preferred way to do this would be adding a better dynamic trace sampling mechanism at the backend. When throughput exceeds the threshold, gradually modify the active sampling rate from 100% to 10%, which means you could get the OAP and ES 3 times more powerful than usual, while ignoring the traces at peak times.\nIs MQ transport recommended despite its side effects? Even though MQ transport is not recommended from the production perspective, SkyWalking still provides optional plugins named kafka-reporter and kafka-fetcher for this feature since 8.1.0.\nHow about MQ metrics data exporter? The answer is that the MQ metrics data exporter is already readily available. The exporter module with gRPC default mechanism is there, and you can easily provide a new implementor of this module.\n","title":"Why doesn't SkyWalking involve MQ in its architecture?","url":"/docs/main/v9.2.0/en/faq/why_mq_not_involved/"},{"content":"Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture? This is often asked by those who are first introduced to SkyWalking. Many believe that MQ should have better performance and should be able to support higher throughput, like the following:\nHere\u0026rsquo;s what we think.\nIs MQ appropriate for communicating with the OAP backend? This question arises when users consider the circumstances where the OAP cluster may not be powerful enough or becomes offline. But the following issues must first be addressed:\n Why do you think that the OAP is not powerful enough? Were it not powerful, the speed of data analysis wouldn\u0026rsquo;t have caught up with the producers (or agents). Then what is the point of adding new deployment requirements? Some may argue that the payload is sometimes higher than usual during peak times. But we must consider how much higher the payload really is. If it is higher by less than 40%, how many resources would you use for the new MQ cluster? How about moving them to new OAP and ES nodes? Say it is higher by 40% or more, such as by 70% to 200%. Then, it is likely that your MQ would use up more resources than it saves. Your MQ would support 2 to 3 times the payload using 10%-20% of the cost during usual times. Furthermore, in this case, if the payload/throughput are so high, how long would it take for the OAP cluster to catch up? The challenge here is that well before it catches up, the next peak times would have come.  With the analysis above in mind, why would you still want the traces to be 100%, given the resources they would cost? The preferred way to do this would be adding a better dynamic trace sampling mechanism at the backend. When throughput exceeds the threshold, gradually modify the active sampling rate from 100% to 10%, which means you could get the OAP and ES 3 times more powerful than usual, while ignoring the traces at peak times.\nIs MQ transport recommended despite its side effects? Even though MQ transport is not recommended from the production perspective, SkyWalking still provides optional plugins named kafka-reporter and kafka-fetcher for this feature since 8.1.0.\nHow about MQ metrics data exporter? The answer is that the MQ metrics data exporter is already readily available. The exporter module with gRPC default mechanism is there, and you can easily provide a new implementor of this module.\n","title":"Why doesn't SkyWalking involve MQ in its architecture?","url":"/docs/main/v9.3.0/en/faq/why_mq_not_involved/"},{"content":"Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture? This is often asked by those who are first introduced to SkyWalking. Many believe that MQ should have better performance and should be able to support higher throughput, like the following:\nHere\u0026rsquo;s what we think.\nIs MQ appropriate for communicating with the OAP backend? This question arises when users consider the circumstances where the OAP cluster may not be powerful enough or becomes offline. But the following issues must first be addressed:\n Why do you think that the OAP is not powerful enough? Were it not powerful, the speed of data analysis wouldn\u0026rsquo;t have caught up with the producers (or agents). Then what is the point of adding new deployment requirements? Some may argue that the payload is sometimes higher than usual during peak times. But we must consider how much higher the payload really is. If it is higher by less than 40%, how many resources would you use for the new MQ cluster? How about moving them to new OAP and ES nodes? Say it is higher by 40% or more, such as by 70% to 200%. Then, it is likely that your MQ would use up more resources than it saves. Your MQ would support 2 to 3 times the payload using 10%-20% of the cost during usual times. Furthermore, in this case, if the payload/throughput are so high, how long would it take for the OAP cluster to catch up? The challenge here is that well before it catches up, the next peak times would have come.  With the analysis above in mind, why would you still want the traces to be 100%, given the resources they would cost? The preferred way to do this would be adding a better dynamic trace sampling mechanism at the backend. When throughput exceeds the threshold, gradually modify the active sampling rate from 100% to 10%, which means you could get the OAP and ES 3 times more powerful than usual, while ignoring the traces at peak times.\nIs MQ transport recommended despite its side effects? Even though MQ transport is not recommended from the production perspective, SkyWalking still provides optional plugins named kafka-reporter and kafka-fetcher for this feature since 8.1.0.\nHow about MQ metrics data exporter? The answer is that the MQ metrics data exporter is already readily available. The exporter module with gRPC default mechanism is there, and you can easily provide a new implementor of this module.\n","title":"Why doesn't SkyWalking involve MQ in its architecture?","url":"/docs/main/v9.4.0/en/faq/why_mq_not_involved/"},{"content":"Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture? This is often asked by those who are first introduced to SkyWalking. Many believe that MQ should have better performance and should be able to support higher throughput, like the following:\nHere\u0026rsquo;s what we think.\nIs MQ appropriate for communicating with the OAP backend? This question arises when users consider the circumstances where the OAP cluster may not be powerful enough or becomes offline. But the following issues must first be addressed:\n Why do you think that the OAP is not powerful enough? Were it not powerful, the speed of data analysis wouldn\u0026rsquo;t have caught up with the producers (or agents). Then what is the point of adding new deployment requirements? Some may argue that the payload is sometimes higher than usual during peak times. But we must consider how much higher the payload really is. If it is higher by less than 40%, how many resources would you use for the new MQ cluster? How about moving them to new OAP and ES nodes? Say it is higher by 40% or more, such as by 70% to 200%. Then, it is likely that your MQ would use up more resources than it saves. Your MQ would support 2 to 3 times the payload using 10%-20% of the cost during usual times. Furthermore, in this case, if the payload/throughput are so high, how long would it take for the OAP cluster to catch up? The challenge here is that well before it catches up, the next peak times would have come.  With the analysis above in mind, why would you still want the traces to be 100%, given the resources they would cost? The preferred way to do this would be adding a better dynamic trace sampling mechanism at the backend. When throughput exceeds the threshold, gradually modify the active sampling rate from 100% to 10%, which means you could get the OAP and ES 3 times more powerful than usual, while ignoring the traces at peak times.\nIs MQ transport recommended despite its side effects? Even though MQ transport is not recommended from the production perspective, SkyWalking still provides optional plugins named kafka-reporter and kafka-fetcher for this feature since 8.1.0.\nHow about MQ metrics data exporter? The answer is that the MQ metrics data exporter is already readily available. The exporter module with gRPC default mechanism is there, and you can easily provide a new implementor of this module.\n","title":"Why doesn't SkyWalking involve MQ in its architecture?","url":"/docs/main/v9.5.0/en/faq/why_mq_not_involved/"},{"content":"Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture? This is often asked by those who are first introduced to SkyWalking. Many believe that MQ should have better performance and should be able to support higher throughput, like the following:\nHere\u0026rsquo;s what we think.\nIs MQ appropriate for communicating with the OAP backend? This question arises when users consider the circumstances where the OAP cluster may not be powerful enough or becomes offline. But the following issues must first be addressed:\n Why do you think that the OAP is not powerful enough? Were it not powerful, the speed of data analysis wouldn\u0026rsquo;t have caught up with the producers (or agents). Then what is the point of adding new deployment requirements? Some may argue that the payload is sometimes higher than usual during peak times. But we must consider how much higher the payload really is. If it is higher by less than 40%, how many resources would you use for the new MQ cluster? How about moving them to new OAP and ES nodes? Say it is higher by 40% or more, such as by 70% to 200%. Then, it is likely that your MQ would use up more resources than it saves. Your MQ would support 2 to 3 times the payload using 10%-20% of the cost during usual times. Furthermore, in this case, if the payload/throughput are so high, how long would it take for the OAP cluster to catch up? The challenge here is that well before it catches up, the next peak times would have come.  With the analysis above in mind, why would you still want the traces to be 100%, given the resources they would cost? The preferred way to do this would be adding a better dynamic trace sampling mechanism at the backend. When throughput exceeds the threshold, gradually modify the active sampling rate from 100% to 10%, which means you could get the OAP and ES 3 times more powerful than usual, while ignoring the traces at peak times.\nIs MQ transport recommended despite its side effects? Even though MQ transport is not recommended from the production perspective, SkyWalking still provides optional plugins named kafka-reporter and kafka-fetcher for this feature since 8.1.0.\nHow about MQ metrics data exporter? The answer is that the MQ metrics data exporter is already readily available. The exporter module with gRPC default mechanism is there, and you can easily provide a new implementor of this module.\n","title":"Why doesn't SkyWalking involve MQ in its architecture?","url":"/docs/main/v9.6.0/en/faq/why_mq_not_involved/"},{"content":"Why doesn\u0026rsquo;t SkyWalking involve MQ in its architecture? This is often asked by those who are first introduced to SkyWalking. Many believe that MQ should have better performance and should be able to support higher throughput, like the following:\nHere\u0026rsquo;s what we think.\nIs MQ appropriate for communicating with the OAP backend? This question arises when users consider the circumstances where the OAP cluster may not be powerful enough or becomes offline. But the following issues must first be addressed:\n Why do you think that the OAP is not powerful enough? Were it not powerful, the speed of data analysis wouldn\u0026rsquo;t have caught up with the producers (or agents). Then what is the point of adding new deployment requirements? Some may argue that the payload is sometimes higher than usual during peak times. But we must consider how much higher the payload really is. If it is higher by less than 40%, how many resources would you use for the new MQ cluster? How about moving them to new OAP and ES nodes? Say it is higher by 40% or more, such as by 70% to 200%. Then, it is likely that your MQ would use up more resources than it saves. Your MQ would support 2 to 3 times the payload using 10%-20% of the cost during usual times. Furthermore, in this case, if the payload/throughput are so high, how long would it take for the OAP cluster to catch up? The challenge here is that well before it catches up, the next peak times would have come.  With the analysis above in mind, why would you still want the traces to be 100%, given the resources they would cost? The preferred way to do this would be adding a better dynamic trace sampling mechanism at the backend. When throughput exceeds the threshold, gradually modify the active sampling rate from 100% to 10%, which means you could get the OAP and ES 3 times more powerful than usual, while ignoring the traces at peak times.\nIs MQ transport recommended despite its side effects? Even though MQ transport is not recommended from the production perspective, SkyWalking still provides optional plugins named kafka-reporter and kafka-fetcher for this feature since 8.1.0.\nHow about MQ metrics data exporter? The answer is that the MQ metrics data exporter is already readily available. The exporter module with gRPC default mechanism is there, and you can easily provide a new implementor of this module.\n","title":"Why doesn't SkyWalking involve MQ in its architecture?","url":"/docs/main/v9.7.0/en/faq/why_mq_not_involved/"},{"content":"Why is -Djava.ext.dirs not supported? -Djava.ext.dirs provides the extension class loader mechanism which was introduced in JDK 1.2, which was released in 1998. According to JEP 220: Modular Run-Time Images, it ends in JDK 9, to simplify both the Java SE Platform and the JDK we have removed the extension mechanism, including the java.ext.dirs system property and the lib/ext directory.\nThis JEP has been applied since JDK11, which is the most active LTS JDK version. When use -Djava.ext.dirs in JDK11+, the JVM would not be able to boot with following error.\n\u0026lt;JAVA_HOME\u0026gt;/lib/ext exists, extensions mechanism no longer supported; Use -classpath instead. .Error: Could not create the Java Virtual Machine. Error: A fatal exception has occurred. Program will exit. So, SkyWalking agent would not support the extension class loader mechanism.\nHow to resolve this issue? If you are using JDK8 and -Djava.ext.dirs, follow the JRE recommendations, Use -classpath instead. This should be a transparent change, which only affects your booting script.\nAlso, if you insist on keeping using -Djava.ext.dirs, the community had a pull request, which leverages the bootstrap instrumentation core of the agent to support the extension class loader.\nIn theory, this should work, but the SkyWalking doesn\u0026rsquo;t officially verify it before noticing the above JEP. You could take it as a reference.\nThe official recommendation still keeps as Use -classpath instead.\n","title":"Why is `-Djava.ext.dirs` not supported?","url":"/docs/skywalking-java/latest/en/faq/ext-dirs/"},{"content":"Why is -Djava.ext.dirs not supported? -Djava.ext.dirs provides the extension class loader mechanism which was introduced in JDK 1.2, which was released in 1998. According to JEP 220: Modular Run-Time Images, it ends in JDK 9, to simplify both the Java SE Platform and the JDK we have removed the extension mechanism, including the java.ext.dirs system property and the lib/ext directory.\nThis JEP has been applied since JDK11, which is the most active LTS JDK version. When use -Djava.ext.dirs in JDK11+, the JVM would not be able to boot with following error.\n\u0026lt;JAVA_HOME\u0026gt;/lib/ext exists, extensions mechanism no longer supported; Use -classpath instead. .Error: Could not create the Java Virtual Machine. Error: A fatal exception has occurred. Program will exit. So, SkyWalking agent would not support the extension class loader mechanism.\nHow to resolve this issue? If you are using JDK8 and -Djava.ext.dirs, follow the JRE recommendations, Use -classpath instead. This should be a transparent change, which only affects your booting script.\nAlso, if you insist on keeping using -Djava.ext.dirs, the community had a pull request, which leverages the bootstrap instrumentation core of the agent to support the extension class loader.\nIn theory, this should work, but the SkyWalking doesn\u0026rsquo;t officially verify it before noticing the above JEP. You could take it as a reference.\nThe official recommendation still keeps as Use -classpath instead.\n","title":"Why is `-Djava.ext.dirs` not supported?","url":"/docs/skywalking-java/next/en/faq/ext-dirs/"},{"content":"Why is -Djava.ext.dirs not supported? -Djava.ext.dirs provides the extension class loader mechanism which was introduced in JDK 1.2, which was released in 1998. According to JEP 220: Modular Run-Time Images, it ends in JDK 9, to simplify both the Java SE Platform and the JDK we have removed the extension mechanism, including the java.ext.dirs system property and the lib/ext directory.\nThis JEP has been applied since JDK11, which is the most active LTS JDK version. When use -Djava.ext.dirs in JDK11+, the JVM would not be able to boot with following error.\n\u0026lt;JAVA_HOME\u0026gt;/lib/ext exists, extensions mechanism no longer supported; Use -classpath instead. .Error: Could not create the Java Virtual Machine. Error: A fatal exception has occurred. Program will exit. So, SkyWalking agent would not support the extension class loader mechanism.\nHow to resolve this issue? If you are using JDK8 and -Djava.ext.dirs, follow the JRE recommendations, Use -classpath instead. This should be a transparent change, which only affects your booting script.\nAlso, if you insist on keeping using -Djava.ext.dirs, the community had a pull request, which leverages the bootstrap instrumentation core of the agent to support the extension class loader.\nIn theory, this should work, but the SkyWalking doesn\u0026rsquo;t officially verify it before noticing the above JEP. You could take it as a reference.\nThe official recommendation still keeps as Use -classpath instead.\n","title":"Why is `-Djava.ext.dirs` not supported?","url":"/docs/skywalking-java/v9.0.0/en/faq/ext-dirs/"},{"content":"Why is -Djava.ext.dirs not supported? -Djava.ext.dirs provides the extension class loader mechanism which was introduced in JDK 1.2, which was released in 1998. According to JEP 220: Modular Run-Time Images, it ends in JDK 9, to simplify both the Java SE Platform and the JDK we have removed the extension mechanism, including the java.ext.dirs system property and the lib/ext directory.\nThis JEP has been applied since JDK11, which is the most active LTS JDK version. When use -Djava.ext.dirs in JDK11+, the JVM would not be able to boot with following error.\n\u0026lt;JAVA_HOME\u0026gt;/lib/ext exists, extensions mechanism no longer supported; Use -classpath instead. .Error: Could not create the Java Virtual Machine. Error: A fatal exception has occurred. Program will exit. So, SkyWalking agent would not support the extension class loader mechanism.\nHow to resolve this issue? If you are using JDK8 and -Djava.ext.dirs, follow the JRE recommendations, Use -classpath instead. This should be a transparent change, which only affects your booting script.\nAlso, if you insist on keeping using -Djava.ext.dirs, the community had a pull request, which leverages the bootstrap instrumentation core of the agent to support the extension class loader.\nIn theory, this should work, but the SkyWalking doesn\u0026rsquo;t officially verify it before noticing the above JEP. You could take it as a reference.\nThe official recommendation still keeps as Use -classpath instead.\n","title":"Why is `-Djava.ext.dirs` not supported?","url":"/docs/skywalking-java/v9.1.0/en/faq/ext-dirs/"},{"content":"Why is -Djava.ext.dirs not supported? -Djava.ext.dirs provides the extension class loader mechanism which was introduced in JDK 1.2, which was released in 1998. According to JEP 220: Modular Run-Time Images, it ends in JDK 9, to simplify both the Java SE Platform and the JDK we have removed the extension mechanism, including the java.ext.dirs system property and the lib/ext directory.\nThis JEP has been applied since JDK11, which is the most active LTS JDK version. When use -Djava.ext.dirs in JDK11+, the JVM would not be able to boot with following error.\n\u0026lt;JAVA_HOME\u0026gt;/lib/ext exists, extensions mechanism no longer supported; Use -classpath instead. .Error: Could not create the Java Virtual Machine. Error: A fatal exception has occurred. Program will exit. So, SkyWalking agent would not support the extension class loader mechanism.\nHow to resolve this issue? If you are using JDK8 and -Djava.ext.dirs, follow the JRE recommendations, Use -classpath instead. This should be a transparent change, which only affects your booting script.\nAlso, if you insist on keeping using -Djava.ext.dirs, the community had a pull request, which leverages the bootstrap instrumentation core of the agent to support the extension class loader.\nIn theory, this should work, but the SkyWalking doesn\u0026rsquo;t officially verify it before noticing the above JEP. You could take it as a reference.\nThe official recommendation still keeps as Use -classpath instead.\n","title":"Why is `-Djava.ext.dirs` not supported?","url":"/docs/skywalking-java/v9.2.0/en/faq/ext-dirs/"},{"content":"Why is Clickhouse or Loki or xxx not supported as a storage option? Background In the past several years, community users have asked why Clickhouse, Loki, or some other storage is not supported in the upstream. We have repeated the answer many times, but it is still happening, at here, I would like to write down the summary to help people understand more\nPrevious Discussions All the following issues were about discussing new storage extension topics.\n Loki as storage  https://github.com/apache/skywalking/discussions/9836   ClickHouse  https://github.com/apache/skywalking/issues/11924 https://github.com/apache/skywalking/discussions/9011   Vertica  https://github.com/apache/skywalking/discussions/8817    Generally, all those asking are about adding a new kind of storage.\nWhy they don\u0026rsquo;t exist ? First of all, WHY is not a suitable question. SkyWalking is a volunteer-driven community, the volunteers build this project including bug fixes, maintenance work, and new features from their personal and employer interests. What you saw about the current status is the combination of all those interests rather than responsibilities. So, in SkyWalking, anything you saw existing is/was someone\u0026rsquo;s interest and contributed to upstream.\nThis logic is the same as this question, SkyWalking active maintainers are focusing on JDBC(MySQL and PostgreSQL ecosystem) Database and Elasticsearch for existing users, and moving forward on BanyanDB as the native one. We for now don\u0026rsquo;t have people interested in ClickHouse or any other database. That is why they are not there.\nHow could add one? To add a new feature, including a new storage plugin, you should go through SWIP - SkyWalking Improvement Proposal workflow, and have a full discussion with the maintenance team. SkyWalking has a pluggable storage system, so, ideally new storage option is possible to implement a new provider for the storage module. Meanwhile, in practice, as storage implementation should be in high performance and well optimized, considering our experiences with JDBC and Elasticsearch implementations, some flags and annotations may need to be added in the kernel level and data model declarations.\nFurthermore, as current maintainers are not a fun of Clickhouse or others(otherwise, you should have seen those implementations), they are not going to be involved in the code implementations and they don\u0026rsquo;t know much more from a general perspective about which kind of implementation in that specific database will have a better behavior and performance. So, if you want to propose this to upstream, you should be very experienced in that database, and have enough scale and environments to provide solid benchmark.\nWhat happens next if the new implementation gets accepted/merged/released? Who proposed this new implementation(such as clickhouse storage), has to take the responsibilities of the maintenance. The maintenance means they need to\n Join storage relative discussion to make sure SkyWalking can move forward on a kernel-level optimization without being blocked by these specific storage options. Respond to this storage relative questions, bugs, CVEs, and performance issues. Make the implementation performance match the expectation of the original proposal. Such as, about clickhouse, people are talking about how they are faster and have higher efficiency than Elasticsearch for large-scale deployments. Then we should always be able to see it has better benchmark and product side practice.  Even if the storage gets accepted/merged/released, but no one can\u0026rsquo;t take the above responsibilities or the community doesn\u0026rsquo;t receive the feedback and questions about those storages, SkyWalking PMC(Project Management Committee) will start the process to remove the implementations. This happened before for Apache IoTDB and InfluxDB storage options. Here is the last vote about this,\n https://github.com/apache/skywalking/discussions/9059  ","title":"Why is Clickhouse or Loki or xxx not supported as a storage option?","url":"/docs/main/next/en/faq/why-clickhouse-not-supported/"},{"content":"Windows Monitoring SkyWalking leverages Prometheus windows_exporter to collect metrics data from the Windows and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. Windows entity as a Service in OAP and on the Layer: OS_WINDOWS.\nData flow For OpenTelemetry receiver:\n The Prometheus windows_exporter collects metrics data from the VMs. The OpenTelemetry Collector fetches metrics from windows_exporter via Prometheus Receiver and pushes metrics to the SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup For OpenTelemetry receiver:\n Setup Prometheus windows_exporter. Setup OpenTelemetry Collector . This is an example for OpenTelemetry Collector configuration otel-collector-config.yaml. Config SkyWalking OpenTelemetry receiver.  Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     CPU Usage % meter_win_cpu_total_percentage The total percentage usage of the CPU core. If there are 2 cores, the maximum usage is 200%. Prometheus windows_exporter   Memory RAM Usage MB meter_win_memory_used The total RAM usage Prometheus windows_exporter   Memory Swap Usage % meter_win_memory_swap_percentage The percentage usage of swap memory Prometheus windows_exporter   CPU Average Used % meter_win_cpu_average_used The percentage usage of the CPU core in each mode Prometheus windows_exporter   Memory RAM MB meter_win_memory_total\nmeter_win_memory_available\nmeter_win_memory_used The RAM statistics, including Total / Available / Used Prometheus windows_exporter   Memory Swap MB meter_win_memory_swap_free\nmeter_win_memory_swap_total Swap memory statistics, including Free / Total Prometheus windows_exporter   Disk R/W KB/s meter_win_disk_read,meter_win_disk_written The disk read and written Prometheus windows_exporter   Network Bandwidth Usage KB/s meter_win_network_receive\nmeter_win_network_transmit The network receive and transmit Prometheus windows_exporter    Customizing You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/windows.yaml. The dashboard panel confirmations are found in /config/ui-initialized-templates/os_windows.\n","title":"Windows Monitoring","url":"/docs/main/latest/en/setup/backend/backend-win-monitoring/"},{"content":"Windows Monitoring SkyWalking leverages Prometheus windows_exporter to collect metrics data from the Windows and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. Windows entity as a Service in OAP and on the Layer: OS_WINDOWS.\nData flow For OpenTelemetry receiver:\n The Prometheus windows_exporter collects metrics data from the VMs. The OpenTelemetry Collector fetches metrics from windows_exporter via Prometheus Receiver and pushes metrics to the SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup For OpenTelemetry receiver:\n Setup Prometheus windows_exporter. Setup OpenTelemetry Collector . This is an example for OpenTelemetry Collector configuration otel-collector-config.yaml. Config SkyWalking OpenTelemetry receiver.  Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     CPU Usage % meter_win_cpu_total_percentage The total percentage usage of the CPU core. If there are 2 cores, the maximum usage is 200%. Prometheus windows_exporter   Memory RAM Usage MB meter_win_memory_used The total RAM usage Prometheus windows_exporter   Memory Swap Usage % meter_win_memory_swap_percentage The percentage usage of swap memory Prometheus windows_exporter   CPU Average Used % meter_win_cpu_average_used The percentage usage of the CPU core in each mode Prometheus windows_exporter   Memory RAM MB meter_win_memory_total\nmeter_win_memory_available\nmeter_win_memory_used The RAM statistics, including Total / Available / Used Prometheus windows_exporter   Memory Swap MB meter_win_memory_swap_free\nmeter_win_memory_swap_total Swap memory statistics, including Free / Total Prometheus windows_exporter   Disk R/W KB/s meter_win_disk_read,meter_win_disk_written The disk read and written Prometheus windows_exporter   Network Bandwidth Usage KB/s meter_win_network_receive\nmeter_win_network_transmit The network receive and transmit Prometheus windows_exporter    Customizing You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/windows.yaml. The dashboard panel confirmations are found in /config/ui-initialized-templates/os_windows.\n","title":"Windows Monitoring","url":"/docs/main/next/en/setup/backend/backend-win-monitoring/"},{"content":"Windows Monitoring SkyWalking leverages Prometheus windows_exporter to collect metrics data from the Windows and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. Windows entity as a Service in OAP and on the Layer: OS_WINDOWS.\nData flow For OpenTelemetry receiver:\n The Prometheus windows_exporter collects metrics data from the VMs. The OpenTelemetry Collector fetches metrics from windows_exporter via Prometheus Receiver and pushes metrics to the SkyWalking OAP Server via the OpenCensus gRPC Exporter or OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup For OpenTelemetry receiver:\n Setup Prometheus windows_exporter. Setup OpenTelemetry Collector . This is an example for OpenTelemetry Collector configuration otel-collector-config.yaml. Config SkyWalking OpenTelemetry receiver.  Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     CPU Usage % meter_win_cpu_total_percentage The total percentage usage of the CPU core. If there are 2 cores, the maximum usage is 200%. Prometheus windows_exporter   Memory RAM Usage MB meter_win_memory_used The total RAM usage Prometheus windows_exporter   Memory Swap Usage % meter_win_memory_swap_percentage The percentage usage of swap memory Prometheus windows_exporter   CPU Average Used % meter_win_cpu_average_used The percentage usage of the CPU core in each mode Prometheus windows_exporter   Memory RAM MB meter_win_memory_total\nmeter_win_memory_available\nmeter_win_memory_used The RAM statistics, including Total / Available / Used Prometheus windows_exporter   Memory Swap MB meter_win_memory_swap_free\nmeter_win_memory_swap_total Swap memory statistics, including Free / Total Prometheus windows_exporter   Disk R/W KB/s meter_win_disk_read,meter_win_disk_written The disk read and written Prometheus windows_exporter   Network Bandwidth Usage KB/s meter_win_network_receive\nmeter_win_network_transmit The network receive and transmit Prometheus windows_exporter    Customizing You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/windows.yaml. The dashboard panel confirmations are found in /config/ui-initialized-templates/os_windows.\n","title":"Windows Monitoring","url":"/docs/main/v9.4.0/en/setup/backend/backend-win-monitoring/"},{"content":"Windows Monitoring SkyWalking leverages Prometheus windows_exporter to collect metrics data from the Windows and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. Windows entity as a Service in OAP and on the Layer: OS_WINDOWS.\nData flow For OpenTelemetry receiver:\n The Prometheus windows_exporter collects metrics data from the VMs. The OpenTelemetry Collector fetches metrics from windows_exporter via Prometheus Receiver and pushes metrics to the SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup For OpenTelemetry receiver:\n Setup Prometheus windows_exporter. Setup OpenTelemetry Collector . This is an example for OpenTelemetry Collector configuration otel-collector-config.yaml. Config SkyWalking OpenTelemetry receiver.  Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     CPU Usage % meter_win_cpu_total_percentage The total percentage usage of the CPU core. If there are 2 cores, the maximum usage is 200%. Prometheus windows_exporter   Memory RAM Usage MB meter_win_memory_used The total RAM usage Prometheus windows_exporter   Memory Swap Usage % meter_win_memory_swap_percentage The percentage usage of swap memory Prometheus windows_exporter   CPU Average Used % meter_win_cpu_average_used The percentage usage of the CPU core in each mode Prometheus windows_exporter   Memory RAM MB meter_win_memory_total\nmeter_win_memory_available\nmeter_win_memory_used The RAM statistics, including Total / Available / Used Prometheus windows_exporter   Memory Swap MB meter_win_memory_swap_free\nmeter_win_memory_swap_total Swap memory statistics, including Free / Total Prometheus windows_exporter   Disk R/W KB/s meter_win_disk_read,meter_win_disk_written The disk read and written Prometheus windows_exporter   Network Bandwidth Usage KB/s meter_win_network_receive\nmeter_win_network_transmit The network receive and transmit Prometheus windows_exporter    Customizing You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/windows.yaml. The dashboard panel confirmations are found in /config/ui-initialized-templates/os_windows.\n","title":"Windows Monitoring","url":"/docs/main/v9.5.0/en/setup/backend/backend-win-monitoring/"},{"content":"Windows Monitoring SkyWalking leverages Prometheus windows_exporter to collect metrics data from the Windows and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. Windows entity as a Service in OAP and on the Layer: OS_WINDOWS.\nData flow For OpenTelemetry receiver:\n The Prometheus windows_exporter collects metrics data from the VMs. The OpenTelemetry Collector fetches metrics from windows_exporter via Prometheus Receiver and pushes metrics to the SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup For OpenTelemetry receiver:\n Setup Prometheus windows_exporter. Setup OpenTelemetry Collector . This is an example for OpenTelemetry Collector configuration otel-collector-config.yaml. Config SkyWalking OpenTelemetry receiver.  Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     CPU Usage % meter_win_cpu_total_percentage The total percentage usage of the CPU core. If there are 2 cores, the maximum usage is 200%. Prometheus windows_exporter   Memory RAM Usage MB meter_win_memory_used The total RAM usage Prometheus windows_exporter   Memory Swap Usage % meter_win_memory_swap_percentage The percentage usage of swap memory Prometheus windows_exporter   CPU Average Used % meter_win_cpu_average_used The percentage usage of the CPU core in each mode Prometheus windows_exporter   Memory RAM MB meter_win_memory_total\nmeter_win_memory_available\nmeter_win_memory_used The RAM statistics, including Total / Available / Used Prometheus windows_exporter   Memory Swap MB meter_win_memory_swap_free\nmeter_win_memory_swap_total Swap memory statistics, including Free / Total Prometheus windows_exporter   Disk R/W KB/s meter_win_disk_read,meter_win_disk_written The disk read and written Prometheus windows_exporter   Network Bandwidth Usage KB/s meter_win_network_receive\nmeter_win_network_transmit The network receive and transmit Prometheus windows_exporter    Customizing You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/windows.yaml. The dashboard panel confirmations are found in /config/ui-initialized-templates/os_windows.\n","title":"Windows Monitoring","url":"/docs/main/v9.6.0/en/setup/backend/backend-win-monitoring/"},{"content":"Windows Monitoring SkyWalking leverages Prometheus windows_exporter to collect metrics data from the Windows and leverages OpenTelemetry Collector to transfer the metrics to OpenTelemetry receiver and into the Meter System. Windows entity as a Service in OAP and on the Layer: OS_WINDOWS.\nData flow For OpenTelemetry receiver:\n The Prometheus windows_exporter collects metrics data from the VMs. The OpenTelemetry Collector fetches metrics from windows_exporter via Prometheus Receiver and pushes metrics to the SkyWalking OAP Server via OpenTelemetry gRPC exporter. The SkyWalking OAP Server parses the expression with MAL to filter/calculate/aggregate and store the results.  Setup For OpenTelemetry receiver:\n Setup Prometheus windows_exporter. Setup OpenTelemetry Collector . This is an example for OpenTelemetry Collector configuration otel-collector-config.yaml. Config SkyWalking OpenTelemetry receiver.  Supported Metrics    Monitoring Panel Unit Metric Name Description Data Source     CPU Usage % meter_win_cpu_total_percentage The total percentage usage of the CPU core. If there are 2 cores, the maximum usage is 200%. Prometheus windows_exporter   Memory RAM Usage MB meter_win_memory_used The total RAM usage Prometheus windows_exporter   Memory Swap Usage % meter_win_memory_swap_percentage The percentage usage of swap memory Prometheus windows_exporter   CPU Average Used % meter_win_cpu_average_used The percentage usage of the CPU core in each mode Prometheus windows_exporter   Memory RAM MB meter_win_memory_total\nmeter_win_memory_available\nmeter_win_memory_used The RAM statistics, including Total / Available / Used Prometheus windows_exporter   Memory Swap MB meter_win_memory_swap_free\nmeter_win_memory_swap_total Swap memory statistics, including Free / Total Prometheus windows_exporter   Disk R/W KB/s meter_win_disk_read,meter_win_disk_written The disk read and written Prometheus windows_exporter   Network Bandwidth Usage KB/s meter_win_network_receive\nmeter_win_network_transmit The network receive and transmit Prometheus windows_exporter    Customizing You can customize your own metrics/expression/dashboard panel. The metrics definition and expression rules are found in /config/otel-rules/windows.yaml. The dashboard panel confirmations are found in /config/ui-initialized-templates/os_windows.\n","title":"Windows Monitoring","url":"/docs/main/v9.7.0/en/setup/backend/backend-win-monitoring/"},{"content":"Working with Istio This document provides instructions on transporting Istio\u0026rsquo;s metrics to the SkyWalking OAP server.\nPrerequisites Istio should be installed in a Kubernetes cluster. Simply follow the steps in Getting Started in Istio.\nDeploy SkyWalking backend Follow the steps in deploying backend in Kubernetes to install the OAP server in the Kubernetes cluster. Refer to OpenTelemetry receiver to ingest metrics.\nDeploy OpenTelemetry Collector OpenTelemetry Collector is the location where Istio telemetry sends metrics, which are then processed and shipped to SkyWalking backend.\nTo deploy this collector, follow the steps in Getting Started in OpenTelemetry Collector. Several components are available in the collector, and they could be combined for different use cases.\nAfter installing the collector, you may configure it to scrape metrics from Istio and send them to SkyWalking backend.\nThe job configuration to scrape metrics from Istio and send them to SkyWalking backend is as follows:\nreceivers:prometheus:config:scrape_configs:- job_name:\u0026#39;istiod-monitor\u0026#39;kubernetes_sd_configs:- role:endpointsrelabel_configs:- source_labels:[__meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name ]action:keepregex:istiod;http-monitoring- action:labelmapregex:__meta_kubernetes_service_label_(.+)- source_labels:[]target_label:clusterreplacement:your-cluster# replace this with your cluster nameexporters:otlp:endpoint:oap.skywalking:11800# replace this with the OAP gRPC service addresstls:insecure:trueservice:pipelines:metrics:receivers:[prometheus ]exporters:[otlp,logging ]Observing Istio Open Istio Dashboard in SkyWaling UI by clicking Dashboard -\u0026gt; Istio. You can then view charts and diagrams generated by Istio metrics. You may also view them through swctl and set up alarm rules based on them.\nNote: If you would like to see metrics of Istio managed services, including topology, you may try our ALS solution.\n","title":"Working with Istio","url":"/docs/main/latest/en/setup/istio/readme/"},{"content":"Working with Istio This document provides instructions on transporting Istio\u0026rsquo;s metrics to the SkyWalking OAP server.\nPrerequisites Istio should be installed in a Kubernetes cluster. Simply follow the steps in Getting Started in Istio.\nDeploy SkyWalking backend Follow the steps in deploying backend in Kubernetes to install the OAP server in the Kubernetes cluster. Refer to OpenTelemetry receiver to ingest metrics.\nDeploy OpenTelemetry Collector OpenTelemetry Collector is the location where Istio telemetry sends metrics, which are then processed and shipped to SkyWalking backend.\nTo deploy this collector, follow the steps in Getting Started in OpenTelemetry Collector. Several components are available in the collector, and they could be combined for different use cases.\nAfter installing the collector, you may configure it to scrape metrics from Istio and send them to SkyWalking backend.\nThe job configuration to scrape metrics from Istio and send them to SkyWalking backend is as follows:\nreceivers:prometheus:config:scrape_configs:- job_name:\u0026#39;istiod-monitor\u0026#39;kubernetes_sd_configs:- role:endpointsrelabel_configs:- source_labels:[__meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name ]action:keepregex:istiod;http-monitoring- action:labelmapregex:__meta_kubernetes_service_label_(.+)- source_labels:[]target_label:clusterreplacement:your-cluster# replace this with your cluster nameexporters:otlp:endpoint:oap.skywalking:11800# replace this with the OAP gRPC service addresstls:insecure:trueservice:pipelines:metrics:receivers:[prometheus ]exporters:[otlp,logging ]Observing Istio Open Istio Dashboard in SkyWaling UI by clicking Dashboard -\u0026gt; Istio. You can then view charts and diagrams generated by Istio metrics. You may also view them through swctl and set up alarm rules based on them.\nNote: If you would like to see metrics of Istio managed services, including topology, you may try our ALS solution.\n","title":"Working with Istio","url":"/docs/main/next/en/setup/istio/readme/"},{"content":"Working with Istio This document provides instructions on transporting Istio\u0026rsquo;s metrics to the SkyWalking OAP server.\nPrerequisites Istio should be installed in the Kubernetes cluster. Simply follow the steps in Getting Started in Istio.\nDeploying SkyWalking backend Follow the steps in deploying backend in Kubernetes to install the OAP server in the Kubernetes cluster. Refer to OpenTelemetry receiver to ingest metrics. otel-receiver is disabled by default. Set env var SW_OTEL_RECEIVER to default to enable it.\nDeploying OpenTelemetry Collector OpenTelemetry Collector is the location where Istio telemetry sends metrics, which is then processed and sent to SkyWalking backend.\nFollow the steps in Getting Started in OpenTelemetry Collector to deploy this collector. There are several components available in the collector, and they could be combined for different use cases. For the sake of brevity, we use the Prometheus receiver to retrieve metrics from Istio control and data plane, then send them to SkyWalking by OpenCensus exporter.\nPrometheus Receiver Refer to Prometheus Receiver to set up this receiver. You could find more configuration details in Prometheus Integration of Istio to figure out how to direct Prometheus Receiver to query Istio metrics.\nSkyWalking supports receiving multi-cluster metrics in a single OAP cluster. A cluster label should be appended to every metric fetched by this receiver even if there\u0026rsquo;s only a single cluster needed to be collected. You could use relabel to add it, like this:\nrelabel_configs: - source_labels: [] target_label: cluster replacement: \u0026lt;cluster name\u0026gt; or you can do so through Resource Processor:\nprocessors: resource: attributes: - key: cluster value: \u0026quot;\u0026lt;cluster name\u0026gt;\u0026quot; action: upsert Note: If you try the sample Istio Prometheus Kubernetes configuration, you may experience an issue. Try to fix it using the solution described in the issue.\nOpenCensus exporter Follow OpenCensus exporter configuration to set up a connection between OpenTelemetry Collector and OAP cluster. endpoint is the address of OAP gRPC service.\nObserving Istio Open Istio Dashboard in SkyWaling UI by clicking Dashboard -\u0026gt; Istio. You can then view charts and diagrams generated by Istio metrics. You may also view them through swctl and set up alarm rules based on them.\nNOTE: If you would like to see metrics of Istio managed services, including topology, you may try our ALS solution.\n","title":"Working with Istio","url":"/docs/main/v9.0.0/en/setup/istio/readme/"},{"content":"Working with Istio This document provides instructions on transporting Istio\u0026rsquo;s metrics to the SkyWalking OAP server.\nPrerequisites Istio should be installed in a Kubernetes cluster. Simply follow the steps in Getting Started in Istio.\nDeploying SkyWalking backend Follow the steps in deploying backend in Kubernetes to install the OAP server in the Kubernetes cluster. Refer to OpenTelemetry receiver to ingest metrics. otel-receiver is disabled by default. Set env var SW_OTEL_RECEIVER to default to enable it.\nDeploying OpenTelemetry Collector OpenTelemetry Collector is the location where Istio telemetry sends metrics, which are then processed and shipped to SkyWalking backend.\nTo deploy this collector, follow the steps in Getting Started in OpenTelemetry Collector. Several components are available in the collector, and they could be combined for different use cases.\nFor the sake of brevity, we use the Prometheus receiver to retrieve metrics from Istio control and data plane, then send them to SkyWalking by OpenCensus exporter.\nPrometheus Receiver Refer to Prometheus Receiver to set up this receiver. You could find more configuration details in Prometheus Integration of Istio to figure out how to direct Prometheus Receiver to query Istio metrics.\nSkyWalking supports receiving multi-cluster metrics in a single OAP cluster. A cluster label should be appended to every metric fetched by this receiver even if there\u0026rsquo;s only a single cluster needed to be collected. You could use relabel to add it, like this:\nrelabel_configs: - source_labels: [] target_label: cluster replacement: \u0026lt;cluster name\u0026gt; or you can do so through Resource Processor:\nprocessors: resource: attributes: - key: cluster value: \u0026quot;\u0026lt;cluster name\u0026gt;\u0026quot; action: upsert Note: If you try the sample Istio Prometheus Kubernetes configuration, you may experience an issue. Try to fix it using the solution described in the issue.\nOpenCensus exporter Follow OpenCensus exporter configuration to set up a connection between OpenTelemetry Collector and OAP cluster. endpoint is the address of the OAP gRPC service.\nObserving Istio Open Istio Dashboard in SkyWaling UI by clicking Dashboard -\u0026gt; Istio. You can then view charts and diagrams generated by Istio metrics. You may also view them through swctl and set up alarm rules based on them.\nNote: If you would like to see metrics of Istio managed services, including topology, you may try our ALS solution.\n","title":"Working with Istio","url":"/docs/main/v9.1.0/en/setup/istio/readme/"},{"content":"Working with Istio This document provides instructions on transporting Istio\u0026rsquo;s metrics to the SkyWalking OAP server.\nPrerequisites Istio should be installed in a Kubernetes cluster. Simply follow the steps in Getting Started in Istio.\nDeploying SkyWalking backend Follow the steps in deploying backend in Kubernetes to install the OAP server in the Kubernetes cluster. Refer to OpenTelemetry receiver to ingest metrics. otel-receiver is disabled by default. Set env var SW_OTEL_RECEIVER to default to enable it.\nDeploying OpenTelemetry Collector OpenTelemetry Collector is the location where Istio telemetry sends metrics, which are then processed and shipped to SkyWalking backend.\nTo deploy this collector, follow the steps in Getting Started in OpenTelemetry Collector. Several components are available in the collector, and they could be combined for different use cases.\nFor the sake of brevity, we use the Prometheus receiver to retrieve metrics from Istio control and data plane, then send them to SkyWalking by OpenCensus exporter.\nPrometheus Receiver Refer to Prometheus Receiver to set up this receiver. You could find more configuration details in Prometheus Integration of Istio to figure out how to direct Prometheus Receiver to query Istio metrics.\nSkyWalking supports receiving multi-cluster metrics in a single OAP cluster. A cluster label should be appended to every metric fetched by this receiver even if there\u0026rsquo;s only a single cluster needed to be collected. You could use relabel to add it, like this:\nrelabel_configs: - source_labels: [] target_label: cluster replacement: \u0026lt;cluster name\u0026gt; or you can do so through Resource Processor:\nprocessors: resource: attributes: - key: cluster value: \u0026quot;\u0026lt;cluster name\u0026gt;\u0026quot; action: upsert Note: If you try the sample Istio Prometheus Kubernetes configuration, you may experience an issue. Try to fix it using the solution described in the issue.\nOpenCensus exporter Follow OpenCensus exporter configuration to set up a connection between OpenTelemetry Collector and OAP cluster. endpoint is the address of the OAP gRPC service.\nObserving Istio Open Istio Dashboard in SkyWaling UI by clicking Dashboard -\u0026gt; Istio. You can then view charts and diagrams generated by Istio metrics. You may also view them through swctl and set up alarm rules based on them.\nNote: If you would like to see metrics of Istio managed services, including topology, you may try our ALS solution.\n","title":"Working with Istio","url":"/docs/main/v9.2.0/en/setup/istio/readme/"},{"content":"Working with Istio This document provides instructions on transporting Istio\u0026rsquo;s metrics to the SkyWalking OAP server.\nPrerequisites Istio should be installed in a Kubernetes cluster. Simply follow the steps in Getting Started in Istio.\nDeploying SkyWalking backend Follow the steps in deploying backend in Kubernetes to install the OAP server in the Kubernetes cluster. Refer to OpenTelemetry receiver to ingest metrics. otel-receiver is disabled by default. Set env var SW_OTEL_RECEIVER to default to enable it.\nDeploying OpenTelemetry Collector OpenTelemetry Collector is the location where Istio telemetry sends metrics, which are then processed and shipped to SkyWalking backend.\nTo deploy this collector, follow the steps in Getting Started in OpenTelemetry Collector. Several components are available in the collector, and they could be combined for different use cases.\nFor the sake of brevity, we use the Prometheus receiver to retrieve metrics from Istio control and data plane, then send them to SkyWalking by OpenCensus exporter.\nPrometheus Receiver Refer to Prometheus Receiver to set up this receiver. You could find more configuration details in Prometheus Integration of Istio to figure out how to direct Prometheus Receiver to query Istio metrics.\nSkyWalking supports receiving multi-cluster metrics in a single OAP cluster. A cluster label should be appended to every metric fetched by this receiver even if there\u0026rsquo;s only a single cluster needed to be collected. You could use relabel to add it, like this:\nrelabel_configs: - source_labels: [] target_label: cluster replacement: \u0026lt;cluster name\u0026gt; or you can do so through Resource Processor:\nprocessors: resource: attributes: - key: cluster value: \u0026quot;\u0026lt;cluster name\u0026gt;\u0026quot; action: upsert Note: If you try the sample Istio Prometheus Kubernetes configuration, you may experience an issue. Try to fix it using the solution described in the issue.\nOpenCensus exporter Follow OpenCensus exporter configuration to set up a connection between OpenTelemetry Collector and OAP cluster. endpoint is the address of the OAP gRPC service.\nObserving Istio Open Istio Dashboard in SkyWaling UI by clicking Dashboard -\u0026gt; Istio. You can then view charts and diagrams generated by Istio metrics. You may also view them through swctl and set up alarm rules based on them.\nNote: If you would like to see metrics of Istio managed services, including topology, you may try our ALS solution.\n","title":"Working with Istio","url":"/docs/main/v9.3.0/en/setup/istio/readme/"},{"content":"Working with Istio This document provides instructions on transporting Istio\u0026rsquo;s metrics to the SkyWalking OAP server.\nPrerequisites Istio should be installed in a Kubernetes cluster. Simply follow the steps in Getting Started in Istio.\nDeploying SkyWalking backend Follow the steps in deploying backend in Kubernetes to install the OAP server in the Kubernetes cluster. Refer to OpenTelemetry receiver to ingest metrics. otel-receiver is disabled by default. Set env var SW_OTEL_RECEIVER to default to enable it.\nDeploying OpenTelemetry Collector OpenTelemetry Collector is the location where Istio telemetry sends metrics, which are then processed and shipped to SkyWalking backend.\nTo deploy this collector, follow the steps in Getting Started in OpenTelemetry Collector. Several components are available in the collector, and they could be combined for different use cases.\nFor the sake of brevity, we use the Prometheus receiver to retrieve metrics from Istio control and data plane, then send them to SkyWalking by OpenCensus exporter.\nPrometheus Receiver Refer to Prometheus Receiver to set up this receiver. You could find more configuration details in Prometheus Integration of Istio to figure out how to direct Prometheus Receiver to query Istio metrics.\nSkyWalking supports receiving multi-cluster metrics in a single OAP cluster. A cluster label should be appended to every metric fetched by this receiver even if there\u0026rsquo;s only a single cluster needed to be collected. You could use relabel to add it, like this:\nrelabel_configs: - source_labels: [] target_label: cluster replacement: \u0026lt;cluster name\u0026gt; or you can do so through Resource Processor:\nprocessors: resource: attributes: - key: cluster value: \u0026quot;\u0026lt;cluster name\u0026gt;\u0026quot; action: upsert Note: If you try the sample Istio Prometheus Kubernetes configuration, you may experience an issue. Try to fix it using the solution described in the issue.\nOpenCensus exporter Follow OpenCensus exporter configuration to set up a connection between OpenTelemetry Collector and OAP cluster. endpoint is the address of the OAP gRPC service.\nObserving Istio Open Istio Dashboard in SkyWaling UI by clicking Dashboard -\u0026gt; Istio. You can then view charts and diagrams generated by Istio metrics. You may also view them through swctl and set up alarm rules based on them.\nNote: If you would like to see metrics of Istio managed services, including topology, you may try our ALS solution.\n","title":"Working with Istio","url":"/docs/main/v9.4.0/en/setup/istio/readme/"},{"content":"Working with Istio This document provides instructions on transporting Istio\u0026rsquo;s metrics to the SkyWalking OAP server.\nPrerequisites Istio should be installed in a Kubernetes cluster. Simply follow the steps in Getting Started in Istio.\nDeploy SkyWalking backend Follow the steps in deploying backend in Kubernetes to install the OAP server in the Kubernetes cluster. Refer to OpenTelemetry receiver to ingest metrics.\nDeploy OpenTelemetry Collector OpenTelemetry Collector is the location where Istio telemetry sends metrics, which are then processed and shipped to SkyWalking backend.\nTo deploy this collector, follow the steps in Getting Started in OpenTelemetry Collector. Several components are available in the collector, and they could be combined for different use cases.\nAfter installing the collector, you may configure it to scrape metrics from Istio and send them to SkyWalking backend.\nThe job configuration to scrape metrics from Istio and send them to SkyWalking backend is as follows:\nreceivers:prometheus:config:scrape_configs:- job_name:\u0026#39;istiod-monitor\u0026#39;kubernetes_sd_configs:- role:endpointsrelabel_configs:- source_labels:[__meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name ]action:keepregex:istiod;http-monitoring- action:labelmapregex:__meta_kubernetes_service_label_(.+)- source_labels:[]target_label:clusterreplacement:your-cluster# replace this with your cluster nameexporters:otlp:endpoint:oap.skywalking:11800# replace this with the OAP gRPC service addresstls:insecure:trueservice:pipelines:metrics:receivers:[prometheus ]exporters:[otlp,logging ]Observing Istio Open Istio Dashboard in SkyWaling UI by clicking Dashboard -\u0026gt; Istio. You can then view charts and diagrams generated by Istio metrics. You may also view them through swctl and set up alarm rules based on them.\nNote: If you would like to see metrics of Istio managed services, including topology, you may try our ALS solution.\n","title":"Working with Istio","url":"/docs/main/v9.5.0/en/setup/istio/readme/"},{"content":"Working with Istio This document provides instructions on transporting Istio\u0026rsquo;s metrics to the SkyWalking OAP server.\nPrerequisites Istio should be installed in a Kubernetes cluster. Simply follow the steps in Getting Started in Istio.\nDeploy SkyWalking backend Follow the steps in deploying backend in Kubernetes to install the OAP server in the Kubernetes cluster. Refer to OpenTelemetry receiver to ingest metrics.\nDeploy OpenTelemetry Collector OpenTelemetry Collector is the location where Istio telemetry sends metrics, which are then processed and shipped to SkyWalking backend.\nTo deploy this collector, follow the steps in Getting Started in OpenTelemetry Collector. Several components are available in the collector, and they could be combined for different use cases.\nAfter installing the collector, you may configure it to scrape metrics from Istio and send them to SkyWalking backend.\nThe job configuration to scrape metrics from Istio and send them to SkyWalking backend is as follows:\nreceivers:prometheus:config:scrape_configs:- job_name:\u0026#39;istiod-monitor\u0026#39;kubernetes_sd_configs:- role:endpointsrelabel_configs:- source_labels:[__meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name ]action:keepregex:istiod;http-monitoring- action:labelmapregex:__meta_kubernetes_service_label_(.+)- source_labels:[]target_label:clusterreplacement:your-cluster# replace this with your cluster nameexporters:otlp:endpoint:oap.skywalking:11800# replace this with the OAP gRPC service addresstls:insecure:trueservice:pipelines:metrics:receivers:[prometheus ]exporters:[otlp,logging ]Observing Istio Open Istio Dashboard in SkyWaling UI by clicking Dashboard -\u0026gt; Istio. You can then view charts and diagrams generated by Istio metrics. You may also view them through swctl and set up alarm rules based on them.\nNote: If you would like to see metrics of Istio managed services, including topology, you may try our ALS solution.\n","title":"Working with Istio","url":"/docs/main/v9.6.0/en/setup/istio/readme/"},{"content":"Working with Istio This document provides instructions on transporting Istio\u0026rsquo;s metrics to the SkyWalking OAP server.\nPrerequisites Istio should be installed in a Kubernetes cluster. Simply follow the steps in Getting Started in Istio.\nDeploy SkyWalking backend Follow the steps in deploying backend in Kubernetes to install the OAP server in the Kubernetes cluster. Refer to OpenTelemetry receiver to ingest metrics.\nDeploy OpenTelemetry Collector OpenTelemetry Collector is the location where Istio telemetry sends metrics, which are then processed and shipped to SkyWalking backend.\nTo deploy this collector, follow the steps in Getting Started in OpenTelemetry Collector. Several components are available in the collector, and they could be combined for different use cases.\nAfter installing the collector, you may configure it to scrape metrics from Istio and send them to SkyWalking backend.\nThe job configuration to scrape metrics from Istio and send them to SkyWalking backend is as follows:\nreceivers:prometheus:config:scrape_configs:- job_name:\u0026#39;istiod-monitor\u0026#39;kubernetes_sd_configs:- role:endpointsrelabel_configs:- source_labels:[__meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name ]action:keepregex:istiod;http-monitoring- action:labelmapregex:__meta_kubernetes_service_label_(.+)- source_labels:[]target_label:clusterreplacement:your-cluster# replace this with your cluster nameexporters:otlp:endpoint:oap.skywalking:11800# replace this with the OAP gRPC service addresstls:insecure:trueservice:pipelines:metrics:receivers:[prometheus ]exporters:[otlp,logging ]Observing Istio Open Istio Dashboard in SkyWaling UI by clicking Dashboard -\u0026gt; Istio. You can then view charts and diagrams generated by Istio metrics. You may also view them through swctl and set up alarm rules based on them.\nNote: If you would like to see metrics of Istio managed services, including topology, you may try our ALS solution.\n","title":"Working with Istio","url":"/docs/main/v9.7.0/en/setup/istio/readme/"},{"content":"Write Plugin Test Writing plugin test cases can greatly help you determine if your plugin is running well across multiple versions. If you haven\u0026rsquo;t started developing your plugin yet, please read this Plugin Development Guide first.\nDeveloping a plugin involves the following steps:\n Create a new module: Please create a new module in the specified directory, and it is recommended to name the module the same as the plugin for easy reference. Write the configuration file: This file serves as the declaration file for the plugin, and test cases would be run based on this file. Write the test code: Simulate the actual service operation, including the plugin you want to test. Test execution: Check if the plugin is running properly.  Write Configuration File The configuration file is used to define the basic information of the test plugin. You can use the gin plugin configuration file as an example to write your own. It includes the following information:\n entry-service: The test HTTP service entry URL. When this address is accessed, the plugin code should be triggered. health-checker: Executed before the entry-service is accessed to ensure that the service starts without any issues. Status code of 200 is considered a successful service start. start-script: The script execution file path. Please compile and start the service in this file. framework: The access address of the current framework to be tested. During testing, this address would be used to switch between different framework versions. export-port: The port number for the external service entry. support-version: The version information supported by the current plugin.  go: The supported Golang language version for the current plugin. framework: A list of plugin version information. It would be used to switch between multiple framework versions.   dependencies: If your program relies on certain containers, please declare them here. The syntax is largely similar to the services in docker-compose.  image: The image name of service. hostname: The hostname of the container which deployed. port: The port list of the container which deployed. expose: The export port list of the container which deployed. environment: The environment variables of the container which deployed. command: The start command of the container. healthcheck: The health check command of the container. If the service defines a healthcheck, then the service being tested would depend on the current service\u0026rsquo;s service_healthy status. Otherwise, it depends on the service_started status.    URL Access When the service address is accessed, please use ${HTTP_HOST} and ${HTTP_PORT} to represent the domain name and port number to be accessed. The port number corresponds to the export-port field.\nStart Script The startup script is used to compile and execute the program.\nWhen starting, please add the ${GO_BUILD_OPTS} parameter, which specifies the Go Agent program information for hybrid compilation.\nWhen starting, just let the program keep running.\nVersion Matrix Multi-version support is a crucial step in plugin testing. It can test whether the plugin runs stably across multiple framework versions and go versions.\nPlugin testing would use the go get command to modify the plugin version. Please make sure you have filled in the correct framework and support-version.framework. The format is: ${framework}@${support-version.framework}\nDuring plugin execution, the specified official Golang image would be used, allowing the plugin to run in the designated Golang version.\nExcepted File For each plugin, you need to define the config/expected.yml file, which is used to define the observable data generated after the plugin runs. After the plugin runs, this file would be used to validate the data.\nPlease refer to the documentation to write this file.\nWrite Test Code In the test code, please start an HTTP service and expose the following two interfaces:\n Check service: Used to ensure that the service is running properly. This corresponds to the health-checker address in configuration. Entry service: Write the complete framework business logic at this address. Validate all the features provided by the plugin as much as possible. This corresponds to the entry-service address in configuration.  The test code, like a regular program, needs to import the github.com/apache/skywalking-go package.\nTest Execution Once you have completed the plugin configuration and test code writing, you can proceed to test the framework. Please follow these steps:\n Build tools: Execute the make build command in the test/plugins directory. It would generate some tools needed for testing in the dist folder of this directory. Run the plugin locally: Start the plugin test program and iterate through all framework versions for testing on your local environment. Add to GitHub Action: Fill in the name of the test plugin in this file, and the plugin test would be executed and validated each time a pull request is submitted.  Run the Plugin Test Locally Please execute the run.sh script in the test/plugins directory and pass in the name of the plugin you wrote (the folder name). At this point, the script would read the configuration file of the plugin test and create a workspace directory in this location for temporarily storing files generated by each plugin. Finally, it would start the test code and validate the data sequentially according to the supported version information.\nThe script supports the following two parameters:\n \u0026ndash;clean: Clean up the files and containers generated by the current running environment. \u0026ndash;debug: Enable debug mode for plugin testing. In this mode, the content generated by each framework in the workspace would not be cleared, and the temporary files generated during hybrid compilation would be saved.  ","title":"Write Plugin Test","url":"/docs/skywalking-go/latest/en/development-and-contribution/write-plugin-testing/"},{"content":"Write Plugin Test Writing plugin test cases can greatly help you determine if your plugin is running well across multiple versions. If you haven\u0026rsquo;t started developing your plugin yet, please read this Plugin Development Guide first.\nDeveloping a plugin involves the following steps:\n Create a new module: Please create a new module in the specified directory, and it is recommended to name the module the same as the plugin for easy reference. Write the configuration file: This file serves as the declaration file for the plugin, and test cases would be run based on this file. Write the test code: Simulate the actual service operation, including the plugin you want to test. Test execution: Check if the plugin is running properly.  Write Configuration File The configuration file is used to define the basic information of the test plugin. You can use the gin plugin configuration file as an example to write your own. It includes the following information:\n entry-service: The test HTTP service entry URL. When this address is accessed, the plugin code should be triggered. health-checker: Executed before the entry-service is accessed to ensure that the service starts without any issues. Status code of 200 is considered a successful service start. start-script: The script execution file path. Please compile and start the service in this file. framework: The access address of the current framework to be tested. During testing, this address would be used to switch between different framework versions. export-port: The port number for the external service entry. support-version: The version information supported by the current plugin.  go: The supported Golang language version for the current plugin. framework: A list of plugin version information. It would be used to switch between multiple framework versions.   dependencies: If your program relies on certain containers, please declare them here. The syntax is largely similar to the services in docker-compose.  image: The image name of service. hostname: The hostname of the container which deployed. port: The port list of the container which deployed. expose: The export port list of the container which deployed. environment: The environment variables of the container which deployed. command: The start command of the container. healthcheck: The health check command of the container. If the service defines a healthcheck, then the service being tested would depend on the current service\u0026rsquo;s service_healthy status. Otherwise, it depends on the service_started status.    URL Access When the service address is accessed, please use ${HTTP_HOST} and ${HTTP_PORT} to represent the domain name and port number to be accessed. The port number corresponds to the export-port field.\nStart Script The startup script is used to compile and execute the program.\nWhen starting, please add the ${GO_BUILD_OPTS} parameter, which specifies the Go Agent program information for hybrid compilation.\nWhen starting, just let the program keep running.\nVersion Matrix Multi-version support is a crucial step in plugin testing. It can test whether the plugin runs stably across multiple framework versions and go versions.\nPlugin testing would use the go get command to modify the plugin version. Please make sure you have filled in the correct framework and support-version.framework. The format is: ${framework}@${support-version.framework}\nDuring plugin execution, the specified official Golang image would be used, allowing the plugin to run in the designated Golang version.\nExcepted File For each plugin, you need to define the config/expected.yml file, which is used to define the observable data generated after the plugin runs. After the plugin runs, this file would be used to validate the data.\nPlease refer to the documentation to write this file.\nWrite Test Code In the test code, please start an HTTP service and expose the following two interfaces:\n Check service: Used to ensure that the service is running properly. This corresponds to the health-checker address in configuration. Entry service: Write the complete framework business logic at this address. Validate all the features provided by the plugin as much as possible. This corresponds to the entry-service address in configuration.  The test code, like a regular program, needs to import the github.com/apache/skywalking-go package.\nTest Execution Once you have completed the plugin configuration and test code writing, you can proceed to test the framework. Please follow these steps:\n Build tools: Execute the make build command in the test/plugins directory. It would generate some tools needed for testing in the dist folder of this directory. Run the plugin locally: Start the plugin test program and iterate through all framework versions for testing on your local environment. Add to GitHub Action: Fill in the name of the test plugin in this file, and the plugin test would be executed and validated each time a pull request is submitted.  Run the Plugin Test Locally Please execute the run.sh script in the test/plugins directory and pass in the name of the plugin you wrote (the folder name). At this point, the script would read the configuration file of the plugin test and create a workspace directory in this location for temporarily storing files generated by each plugin. Finally, it would start the test code and validate the data sequentially according to the supported version information.\nThe script supports the following two parameters:\n \u0026ndash;clean: Clean up the files and containers generated by the current running environment. \u0026ndash;debug: Enable debug mode for plugin testing. In this mode, the content generated by each framework in the workspace would not be cleared, and the temporary files generated during hybrid compilation would be saved.  ","title":"Write Plugin Test","url":"/docs/skywalking-go/next/en/development-and-contribution/write-plugin-testing/"},{"content":"Write Plugin Test Writing plugin test cases can greatly help you determine if your plugin is running well across multiple versions. If you haven\u0026rsquo;t started developing your plugin yet, please read this Plugin Development Guide first.\nDeveloping a plugin involves the following steps:\n Create a new module: Please create a new module in the specified directory, and it is recommended to name the module the same as the plugin for easy reference. Write the configuration file: This file serves as the declaration file for the plugin, and test cases would be run based on this file. Write the test code: Simulate the actual service operation, including the plugin you want to test. Test execution: Check if the plugin is running properly.  Write Configuration File The configuration file is used to define the basic information of the test plugin. You can use the gin plugin configuration file as an example to write your own. It includes the following information:\n entry-service: The test HTTP service entry URL. When this address is accessed, the plugin code should be triggered. health-checker: Executed before the entry-service is accessed to ensure that the service starts without any issues. Status code of 200 is considered a successful service start. start-script: The script execution file path. Please compile and start the service in this file. framework: The access address of the current framework to be tested. During testing, this address would be used to switch between different framework versions. export-port: The port number for the external service entry. support-version: The version information supported by the current plugin.  go: The supported Golang language version for the current plugin. framework: A list of plugin version information. It would be used to switch between multiple framework versions.   dependencies: If your program relies on certain containers, please declare them here. The syntax is largely similar to the services in docker-compose.  image: The image name of service. hostname: The hostname of the container which deployed. port: The port list of the container which deployed. expose: The export port list of the container which deployed. environment: The environment variables of the container which deployed. command: The start command of the container. healthcheck: The health check command of the container. If the service defines a healthcheck, then the service being tested would depend on the current service\u0026rsquo;s service_healthy status. Otherwise, it depends on the service_started status.    URL Access When the service address is accessed, please use ${HTTP_HOST} and ${HTTP_PORT} to represent the domain name and port number to be accessed. The port number corresponds to the export-port field.\nStart Script The startup script is used to compile and execute the program.\nWhen starting, please add the ${GO_BUILD_OPTS} parameter, which specifies the Go Agent program information for hybrid compilation.\nWhen starting, just let the program keep running.\nVersion Matrix Multi-version support is a crucial step in plugin testing. It can test whether the plugin runs stably across multiple framework versions and go versions.\nPlugin testing would use the go get command to modify the plugin version. Please make sure you have filled in the correct framework and support-version.framework. The format is: ${framework}@${support-version.framework}\nDuring plugin execution, the specified official Golang image would be used, allowing the plugin to run in the designated Golang version.\nExcepted File For each plugin, you need to define the config/expected.yml file, which is used to define the observable data generated after the plugin runs. After the plugin runs, this file would be used to validate the data.\nPlease refer to the documentation to write this file.\nWrite Test Code In the test code, please start an HTTP service and expose the following two interfaces:\n Check service: Used to ensure that the service is running properly. This corresponds to the health-checker address in configuration. Entry service: Write the complete framework business logic at this address. Validate all the features provided by the plugin as much as possible. This corresponds to the entry-service address in configuration.  The test code, like a regular program, needs to import the github.com/apache/skywalking-go package.\nTest Execution Once you have completed the plugin configuration and test code writing, you can proceed to test the framework. Please follow these steps:\n Build tools: Execute the make build command in the test/plugins directory. It would generate some tools needed for testing in the dist folder of this directory. Run the plugin locally: Start the plugin test program and iterate through all framework versions for testing on your local environment. Add to GitHub Action: Fill in the name of the test plugin in this file, and the plugin test would be executed and validated each time a pull request is submitted.  Run the Plugin Test Locally Please execute the run.sh script in the test/plugins directory and pass in the name of the plugin you wrote (the folder name). At this point, the script would read the configuration file of the plugin test and create a workspace directory in this location for temporarily storing files generated by each plugin. Finally, it would start the test code and validate the data sequentially according to the supported version information.\nThe script supports the following two parameters:\n \u0026ndash;clean: Clean up the files and containers generated by the current running environment. \u0026ndash;debug: Enable debug mode for plugin testing. In this mode, the content generated by each framework in the workspace would not be cleared, and the temporary files generated during hybrid compilation would be saved.  ","title":"Write Plugin Test","url":"/docs/skywalking-go/v0.4.0/en/development-and-contribution/write-plugin-testing/"},{"content":"Zabbix Receiver The Zabbix receiver accepts metrics of Zabbix Agent Active Checks protocol format into the Meter System. Zabbix Agent is based on GPL-2.0 License.\nModule definition receiver-zabbix:selector:${SW_RECEIVER_ZABBIX:default}default:# Export tcp port, Zabbix agent could connected and transport dataport:10051# Bind to hosthost:0.0.0.0# Enable config when receive agent requestactiveFiles:agentConfiguration file The Zabbix receiver is configured via a configuration file that defines everything related to receiving from agents, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP fails to start up. The files are located at $CLASSPATH/zabbix-rules.\nThe file is written in YAML format, defined by the scheme described below. Square brackets indicate that a parameter is optional.\nAn example for Zabbix agent configuration could be found here. You can find details on Zabbix agent items from Zabbix Agent documentation.\nConfiguration file # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# Datasource from Zabbix Item keys.requiredZabbixItemKeys:- \u0026lt;zabbix item keys\u0026gt;# Support agent entities information.entities:# Allow hostname patterns to build metrics.hostPatterns:- \u0026lt;regex string\u0026gt;# Customized metrics label before parse to meter system.labels:[- \u0026lt;labels\u0026gt; ]# Metrics rule allow you to recompute queries.metrics:[- \u0026lt;metrics_rules\u0026gt; ] # Define the label name. The label value must query from `value` or `fromItem` attribute.name:\u0026lt;string\u0026gt;# Appoint value to label.[value:\u0026lt;string\u0026gt;]# Query label value from Zabbix Agent Item key.[fromItem:\u0026lt;string\u0026gt;]\u0026lt;metric_rules\u0026gt; # The name of rule, which combinates with a prefix \u0026#39;meter_\u0026#39; as the index/table name in storage.name:\u0026lt;string\u0026gt;# MAL expression.exp:\u0026lt;string\u0026gt;For more on MAL, please refer to mal.md.\n","title":"Zabbix Receiver","url":"/docs/main/latest/en/setup/backend/backend-zabbix/"},{"content":"Zabbix Receiver The Zabbix receiver accepts metrics of Zabbix Agent Active Checks protocol format into the Meter System. Zabbix Agent is based on GPL-2.0 License.\nModule definition receiver-zabbix:selector:${SW_RECEIVER_ZABBIX:default}default:# Export tcp port, Zabbix agent could connected and transport dataport:10051# Bind to hosthost:0.0.0.0# Enable config when receive agent requestactiveFiles:agentConfiguration file The Zabbix receiver is configured via a configuration file that defines everything related to receiving from agents, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP fails to start up. The files are located at $CLASSPATH/zabbix-rules.\nThe file is written in YAML format, defined by the scheme described below. Square brackets indicate that a parameter is optional.\nAn example for Zabbix agent configuration could be found here. You can find details on Zabbix agent items from Zabbix Agent documentation.\nConfiguration file # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# Datasource from Zabbix Item keys.requiredZabbixItemKeys:- \u0026lt;zabbix item keys\u0026gt;# Support agent entities information.entities:# Allow hostname patterns to build metrics.hostPatterns:- \u0026lt;regex string\u0026gt;# Customized metrics label before parse to meter system.labels:[- \u0026lt;labels\u0026gt; ]# Metrics rule allow you to recompute queries.metrics:[- \u0026lt;metrics_rules\u0026gt; ] # Define the label name. The label value must query from `value` or `fromItem` attribute.name:\u0026lt;string\u0026gt;# Appoint value to label.[value:\u0026lt;string\u0026gt;]# Query label value from Zabbix Agent Item key.[fromItem:\u0026lt;string\u0026gt;]\u0026lt;metric_rules\u0026gt; # The name of rule, which combinates with a prefix \u0026#39;meter_\u0026#39; as the index/table name in storage.name:\u0026lt;string\u0026gt;# MAL expression.exp:\u0026lt;string\u0026gt;For more on MAL, please refer to mal.md.\n","title":"Zabbix Receiver","url":"/docs/main/next/en/setup/backend/backend-zabbix/"},{"content":"Zabbix Receiver The Zabbix receiver accepts metrics of Zabbix Agent Active Checks protocol format into the Meter System. Zabbix Agent is based on GPL-2.0 License.\nModule definition receiver-zabbix:selector:${SW_RECEIVER_ZABBIX:default}default:# Export tcp port, Zabbix agent could connected and transport dataport:10051# Bind to hosthost:0.0.0.0# Enable config when receive agent requestactiveFiles:agentConfiguration file The Zabbix receiver is configured via a configuration file that defines everything related to receiving from agents, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP fails to start up. The files are located at $CLASSPATH/zabbix-rules.\nThe file is written in YAML format, defined by the scheme described below. Square brackets indicate that a parameter is optional.\nAn example for Zabbix agent configuration could be found here. You could find details on Zabbix agent items from Zabbix Agent documentation.\nConfiguration file # insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# Datasource from Zabbix Item keys.requiredZabbixItemKeys:- \u0026lt;zabbix item keys\u0026gt;# Support agent entities information.entities:# Allow hostname patterns to build metrics.hostPatterns:- \u0026lt;regex string\u0026gt;# Customized metrics label before parse to meter system.labels:[- \u0026lt;labels\u0026gt; ]# Metrics rule allow you to recompute queries.metrics:[- \u0026lt;metrics_rules\u0026gt; ] # Define the label name. The label value must query from `value` or `fromItem` attribute.name:\u0026lt;string\u0026gt;# Appoint value to label.[value:\u0026lt;string\u0026gt;]# Query label value from Zabbix Agent Item key.[fromItem:\u0026lt;string\u0026gt;]\u0026lt;metric_rules\u0026gt; # The name of rule, which combinates with a prefix \u0026#39;meter_\u0026#39; as the index/table name in storage.name:\u0026lt;string\u0026gt;# MAL expression.exp:\u0026lt;string\u0026gt;For more on MAL, please refer to mal.md.\n","title":"Zabbix Receiver","url":"/docs/main/v9.0.0/en/setup/backend/backend-zabbix/"},{"content":"Zabbix Receiver The Zabbix receiver accepts metrics of Zabbix Agent Active Checks protocol format into the Meter System. Zabbix Agent is based on GPL-2.0 License.\nModule definition receiver-zabbix:selector:${SW_RECEIVER_ZABBIX:default}default:# Export tcp port, Zabbix agent could connected and transport dataport:10051# Bind to hosthost:0.0.0.0# Enable config when receive agent requestactiveFiles:agentConfiguration file The Zabbix receiver is configured via a configuration file that defines everything related to receiving from agents, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP fails to start up. The files are located at $CLASSPATH/zabbix-rules.\nThe file is written in YAML format, defined by the scheme described below. Square brackets indicate that a parameter is optional.\nAn example for Zabbix agent configuration could be found here. You can find details on Zabbix agent items from Zabbix Agent documentation.\nConfiguration file # insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# Datasource from Zabbix Item keys.requiredZabbixItemKeys:- \u0026lt;zabbix item keys\u0026gt;# Support agent entities information.entities:# Allow hostname patterns to build metrics.hostPatterns:- \u0026lt;regex string\u0026gt;# Customized metrics label before parse to meter system.labels:[- \u0026lt;labels\u0026gt; ]# Metrics rule allow you to recompute queries.metrics:[- \u0026lt;metrics_rules\u0026gt; ] # Define the label name. The label value must query from `value` or `fromItem` attribute.name:\u0026lt;string\u0026gt;# Appoint value to label.[value:\u0026lt;string\u0026gt;]# Query label value from Zabbix Agent Item key.[fromItem:\u0026lt;string\u0026gt;]\u0026lt;metric_rules\u0026gt; # The name of rule, which combinates with a prefix \u0026#39;meter_\u0026#39; as the index/table name in storage.name:\u0026lt;string\u0026gt;# MAL expression.exp:\u0026lt;string\u0026gt;For more on MAL, please refer to mal.md.\n","title":"Zabbix Receiver","url":"/docs/main/v9.1.0/en/setup/backend/backend-zabbix/"},{"content":"Zabbix Receiver The Zabbix receiver accepts metrics of Zabbix Agent Active Checks protocol format into the Meter System. Zabbix Agent is based on GPL-2.0 License.\nModule definition receiver-zabbix:selector:${SW_RECEIVER_ZABBIX:default}default:# Export tcp port, Zabbix agent could connected and transport dataport:10051# Bind to hosthost:0.0.0.0# Enable config when receive agent requestactiveFiles:agentConfiguration file The Zabbix receiver is configured via a configuration file that defines everything related to receiving from agents, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP fails to start up. The files are located at $CLASSPATH/zabbix-rules.\nThe file is written in YAML format, defined by the scheme described below. Square brackets indicate that a parameter is optional.\nAn example for Zabbix agent configuration could be found here. You can find details on Zabbix agent items from Zabbix Agent documentation.\nConfiguration file # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# Datasource from Zabbix Item keys.requiredZabbixItemKeys:- \u0026lt;zabbix item keys\u0026gt;# Support agent entities information.entities:# Allow hostname patterns to build metrics.hostPatterns:- \u0026lt;regex string\u0026gt;# Customized metrics label before parse to meter system.labels:[- \u0026lt;labels\u0026gt; ]# Metrics rule allow you to recompute queries.metrics:[- \u0026lt;metrics_rules\u0026gt; ] # Define the label name. The label value must query from `value` or `fromItem` attribute.name:\u0026lt;string\u0026gt;# Appoint value to label.[value:\u0026lt;string\u0026gt;]# Query label value from Zabbix Agent Item key.[fromItem:\u0026lt;string\u0026gt;]\u0026lt;metric_rules\u0026gt; # The name of rule, which combinates with a prefix \u0026#39;meter_\u0026#39; as the index/table name in storage.name:\u0026lt;string\u0026gt;# MAL expression.exp:\u0026lt;string\u0026gt;For more on MAL, please refer to mal.md.\n","title":"Zabbix Receiver","url":"/docs/main/v9.2.0/en/setup/backend/backend-zabbix/"},{"content":"Zabbix Receiver The Zabbix receiver accepts metrics of Zabbix Agent Active Checks protocol format into the Meter System. Zabbix Agent is based on GPL-2.0 License.\nModule definition receiver-zabbix:selector:${SW_RECEIVER_ZABBIX:default}default:# Export tcp port, Zabbix agent could connected and transport dataport:10051# Bind to hosthost:0.0.0.0# Enable config when receive agent requestactiveFiles:agentConfiguration file The Zabbix receiver is configured via a configuration file that defines everything related to receiving from agents, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP fails to start up. The files are located at $CLASSPATH/zabbix-rules.\nThe file is written in YAML format, defined by the scheme described below. Square brackets indicate that a parameter is optional.\nAn example for Zabbix agent configuration could be found here. You can find details on Zabbix agent items from Zabbix Agent documentation.\nConfiguration file # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# Datasource from Zabbix Item keys.requiredZabbixItemKeys:- \u0026lt;zabbix item keys\u0026gt;# Support agent entities information.entities:# Allow hostname patterns to build metrics.hostPatterns:- \u0026lt;regex string\u0026gt;# Customized metrics label before parse to meter system.labels:[- \u0026lt;labels\u0026gt; ]# Metrics rule allow you to recompute queries.metrics:[- \u0026lt;metrics_rules\u0026gt; ] # Define the label name. The label value must query from `value` or `fromItem` attribute.name:\u0026lt;string\u0026gt;# Appoint value to label.[value:\u0026lt;string\u0026gt;]# Query label value from Zabbix Agent Item key.[fromItem:\u0026lt;string\u0026gt;]\u0026lt;metric_rules\u0026gt; # The name of rule, which combinates with a prefix \u0026#39;meter_\u0026#39; as the index/table name in storage.name:\u0026lt;string\u0026gt;# MAL expression.exp:\u0026lt;string\u0026gt;For more on MAL, please refer to mal.md.\n","title":"Zabbix Receiver","url":"/docs/main/v9.3.0/en/setup/backend/backend-zabbix/"},{"content":"Zabbix Receiver The Zabbix receiver accepts metrics of Zabbix Agent Active Checks protocol format into the Meter System. Zabbix Agent is based on GPL-2.0 License.\nModule definition receiver-zabbix:selector:${SW_RECEIVER_ZABBIX:default}default:# Export tcp port, Zabbix agent could connected and transport dataport:10051# Bind to hosthost:0.0.0.0# Enable config when receive agent requestactiveFiles:agentConfiguration file The Zabbix receiver is configured via a configuration file that defines everything related to receiving from agents, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP fails to start up. The files are located at $CLASSPATH/zabbix-rules.\nThe file is written in YAML format, defined by the scheme described below. Square brackets indicate that a parameter is optional.\nAn example for Zabbix agent configuration could be found here. You can find details on Zabbix agent items from Zabbix Agent documentation.\nConfiguration file # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# Datasource from Zabbix Item keys.requiredZabbixItemKeys:- \u0026lt;zabbix item keys\u0026gt;# Support agent entities information.entities:# Allow hostname patterns to build metrics.hostPatterns:- \u0026lt;regex string\u0026gt;# Customized metrics label before parse to meter system.labels:[- \u0026lt;labels\u0026gt; ]# Metrics rule allow you to recompute queries.metrics:[- \u0026lt;metrics_rules\u0026gt; ] # Define the label name. The label value must query from `value` or `fromItem` attribute.name:\u0026lt;string\u0026gt;# Appoint value to label.[value:\u0026lt;string\u0026gt;]# Query label value from Zabbix Agent Item key.[fromItem:\u0026lt;string\u0026gt;]\u0026lt;metric_rules\u0026gt; # The name of rule, which combinates with a prefix \u0026#39;meter_\u0026#39; as the index/table name in storage.name:\u0026lt;string\u0026gt;# MAL expression.exp:\u0026lt;string\u0026gt;For more on MAL, please refer to mal.md.\n","title":"Zabbix Receiver","url":"/docs/main/v9.4.0/en/setup/backend/backend-zabbix/"},{"content":"Zabbix Receiver The Zabbix receiver accepts metrics of Zabbix Agent Active Checks protocol format into the Meter System. Zabbix Agent is based on GPL-2.0 License.\nModule definition receiver-zabbix:selector:${SW_RECEIVER_ZABBIX:default}default:# Export tcp port, Zabbix agent could connected and transport dataport:10051# Bind to hosthost:0.0.0.0# Enable config when receive agent requestactiveFiles:agentConfiguration file The Zabbix receiver is configured via a configuration file that defines everything related to receiving from agents, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP fails to start up. The files are located at $CLASSPATH/zabbix-rules.\nThe file is written in YAML format, defined by the scheme described below. Square brackets indicate that a parameter is optional.\nAn example for Zabbix agent configuration could be found here. You can find details on Zabbix agent items from Zabbix Agent documentation.\nConfiguration file # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# Datasource from Zabbix Item keys.requiredZabbixItemKeys:- \u0026lt;zabbix item keys\u0026gt;# Support agent entities information.entities:# Allow hostname patterns to build metrics.hostPatterns:- \u0026lt;regex string\u0026gt;# Customized metrics label before parse to meter system.labels:[- \u0026lt;labels\u0026gt; ]# Metrics rule allow you to recompute queries.metrics:[- \u0026lt;metrics_rules\u0026gt; ] # Define the label name. The label value must query from `value` or `fromItem` attribute.name:\u0026lt;string\u0026gt;# Appoint value to label.[value:\u0026lt;string\u0026gt;]# Query label value from Zabbix Agent Item key.[fromItem:\u0026lt;string\u0026gt;]\u0026lt;metric_rules\u0026gt; # The name of rule, which combinates with a prefix \u0026#39;meter_\u0026#39; as the index/table name in storage.name:\u0026lt;string\u0026gt;# MAL expression.exp:\u0026lt;string\u0026gt;For more on MAL, please refer to mal.md.\n","title":"Zabbix Receiver","url":"/docs/main/v9.5.0/en/setup/backend/backend-zabbix/"},{"content":"Zabbix Receiver The Zabbix receiver accepts metrics of Zabbix Agent Active Checks protocol format into the Meter System. Zabbix Agent is based on GPL-2.0 License.\nModule definition receiver-zabbix:selector:${SW_RECEIVER_ZABBIX:default}default:# Export tcp port, Zabbix agent could connected and transport dataport:10051# Bind to hosthost:0.0.0.0# Enable config when receive agent requestactiveFiles:agentConfiguration file The Zabbix receiver is configured via a configuration file that defines everything related to receiving from agents, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP fails to start up. The files are located at $CLASSPATH/zabbix-rules.\nThe file is written in YAML format, defined by the scheme described below. Square brackets indicate that a parameter is optional.\nAn example for Zabbix agent configuration could be found here. You can find details on Zabbix agent items from Zabbix Agent documentation.\nConfiguration file # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# Datasource from Zabbix Item keys.requiredZabbixItemKeys:- \u0026lt;zabbix item keys\u0026gt;# Support agent entities information.entities:# Allow hostname patterns to build metrics.hostPatterns:- \u0026lt;regex string\u0026gt;# Customized metrics label before parse to meter system.labels:[- \u0026lt;labels\u0026gt; ]# Metrics rule allow you to recompute queries.metrics:[- \u0026lt;metrics_rules\u0026gt; ] # Define the label name. The label value must query from `value` or `fromItem` attribute.name:\u0026lt;string\u0026gt;# Appoint value to label.[value:\u0026lt;string\u0026gt;]# Query label value from Zabbix Agent Item key.[fromItem:\u0026lt;string\u0026gt;]\u0026lt;metric_rules\u0026gt; # The name of rule, which combinates with a prefix \u0026#39;meter_\u0026#39; as the index/table name in storage.name:\u0026lt;string\u0026gt;# MAL expression.exp:\u0026lt;string\u0026gt;For more on MAL, please refer to mal.md.\n","title":"Zabbix Receiver","url":"/docs/main/v9.6.0/en/setup/backend/backend-zabbix/"},{"content":"Zabbix Receiver The Zabbix receiver accepts metrics of Zabbix Agent Active Checks protocol format into the Meter System. Zabbix Agent is based on GPL-2.0 License.\nModule definition receiver-zabbix:selector:${SW_RECEIVER_ZABBIX:default}default:# Export tcp port, Zabbix agent could connected and transport dataport:10051# Bind to hosthost:0.0.0.0# Enable config when receive agent requestactiveFiles:agentConfiguration file The Zabbix receiver is configured via a configuration file that defines everything related to receiving from agents, as well as which rule files to load.\nThe OAP can load the configuration at bootstrap. If the new configuration is not well-formed, the OAP fails to start up. The files are located at $CLASSPATH/zabbix-rules.\nThe file is written in YAML format, defined by the scheme described below. Square brackets indicate that a parameter is optional.\nAn example for Zabbix agent configuration could be found here. You can find details on Zabbix agent items from Zabbix Agent documentation.\nConfiguration file # initExp is the expression that initializes the current configuration fileinitExp:\u0026lt;string\u0026gt;# insert metricPrefix into metric name: \u0026lt;metricPrefix\u0026gt;_\u0026lt;raw_metric_name\u0026gt;metricPrefix:\u0026lt;string\u0026gt;# expPrefix is executed before the metrics executes other functions.expPrefix:\u0026lt;string\u0026gt;# expSuffix is appended to all expression in this file.expSuffix:\u0026lt;string\u0026gt;# Datasource from Zabbix Item keys.requiredZabbixItemKeys:- \u0026lt;zabbix item keys\u0026gt;# Support agent entities information.entities:# Allow hostname patterns to build metrics.hostPatterns:- \u0026lt;regex string\u0026gt;# Customized metrics label before parse to meter system.labels:[- \u0026lt;labels\u0026gt; ]# Metrics rule allow you to recompute queries.metrics:[- \u0026lt;metrics_rules\u0026gt; ] # Define the label name. The label value must query from `value` or `fromItem` attribute.name:\u0026lt;string\u0026gt;# Appoint value to label.[value:\u0026lt;string\u0026gt;]# Query label value from Zabbix Agent Item key.[fromItem:\u0026lt;string\u0026gt;]\u0026lt;metric_rules\u0026gt; # The name of rule, which combinates with a prefix \u0026#39;meter_\u0026#39; as the index/table name in storage.name:\u0026lt;string\u0026gt;# MAL expression.exp:\u0026lt;string\u0026gt;For more on MAL, please refer to mal.md.\n","title":"Zabbix Receiver","url":"/docs/main/v9.7.0/en/setup/backend/backend-zabbix/"},{"content":"Zend observer  Refer to: https://www.datadoghq.com/blog/engineering/php-8-observability-baked-right-in/#the-observability-landscape-before-php-8\n By default, skywalking-php hooks the zend_execute_internal and zend_execute_ex functions to implement auto instrumentation.\nBut there are some drawbacks:\n All PHP function calls are placed on the native C stack, which is limited by the value set in ulimit -s. Not compatible with the new JIT added in PHP 8.  The observer API in PHP 8+ Now, zend observer api is a new generation method, and it is also a method currently recommended by PHP8.\nThis method has no stack problem and will not affect JIT.\nConfiguration The following configuration example enables JIT in PHP8 and zend observer support in skywalking-php at the same time.\n[opcache] zend_extension = opcache ; Enable JIT opcache.jit = tracing [skywalking_agent] extension = skywalking_agent.so ; Switch to use zend observer api to implement auto instrumentation. skywalking_agent.enable_zend_observer = On ","title":"Zend observer","url":"/docs/skywalking-php/latest/en/configuration/zend-observer/"},{"content":"Zend observer  Refer to: https://www.datadoghq.com/blog/engineering/php-8-observability-baked-right-in/#the-observability-landscape-before-php-8\n By default, skywalking-php hooks the zend_execute_internal and zend_execute_ex functions to implement auto instrumentation.\nBut there are some drawbacks:\n All PHP function calls are placed on the native C stack, which is limited by the value set in ulimit -s. Not compatible with the new JIT added in PHP 8.  The observer API in PHP 8+ Now, zend observer api is a new generation method, and it is also a method currently recommended by PHP8.\nThis method has no stack problem and will not affect JIT.\nConfiguration The following configuration example enables JIT in PHP8 and zend observer support in skywalking-php at the same time.\n[opcache] zend_extension = opcache ; Enable JIT opcache.jit = tracing [skywalking_agent] extension = skywalking_agent.so ; Switch to use zend observer api to implement auto instrumentation. skywalking_agent.enable_zend_observer = On ","title":"Zend observer","url":"/docs/skywalking-php/next/en/configuration/zend-observer/"},{"content":"Zend observer  Refer to: https://www.datadoghq.com/blog/engineering/php-8-observability-baked-right-in/#the-observability-landscape-before-php-8\n By default, skywalking-php hooks the zend_execute_internal and zend_execute_ex functions to implement auto instrumentation.\nBut there are some drawbacks:\n All PHP function calls are placed on the native C stack, which is limited by the value set in ulimit -s. Not compatible with the new JIT added in PHP 8.  The observer API in PHP 8+ Now, zend observer api is a new generation method, and it is also a method currently recommended by PHP8.\nThis method has no stack problem and will not affect JIT.\nConfiguration The following configuration example enables JIT in PHP8 and zend observer support in skywalking-php at the same time.\n[opcache] zend_extension = opcache ; Enable JIT opcache.jit = tracing [skywalking_agent] extension = skywalking_agent.so ; Switch to use zend observer api to implement auto instrumentation. skywalking_agent.enable_zend_observer = On ","title":"Zend observer","url":"/docs/skywalking-php/v0.7.0/en/configuration/zend-observer/"},{"content":"Zipkin receiver The Zipkin receiver makes the OAP server work as an alternative Zipkin server implementation for collecting traces. It supports Zipkin v1/v2 formats through the HTTP collector and Kafka collector.\nNOTICE, Zipkin trace would not be analyzed like SkyWalking native trace format.\nUse the following config to activate it. Set enableHttpCollector to enable HTTP collector and enableKafkaCollector to enable Kafka collector.\nreceiver-zipkin:selector:${SW_RECEIVER_ZIPKIN:default}default:# Defines a set of span tag keys which are searchable.# The max length of key=value should be less than 256 or will be dropped.searchableTracesTags:${SW_ZIPKIN_SEARCHABLE_TAG_KEYS:http.method}# The sample rate precision is 1/10000, should be between 0 and 10000sampleRate:${SW_ZIPKIN_SAMPLE_RATE:10000}## The below configs are for OAP collect zipkin trace from HTTPenableHttpCollector:${SW_ZIPKIN_HTTP_COLLECTOR_ENABLED:true}restHost:${SW_RECEIVER_ZIPKIN_REST_HOST:0.0.0.0}restPort:${SW_RECEIVER_ZIPKIN_REST_PORT:9411}restContextPath:${SW_RECEIVER_ZIPKIN_REST_CONTEXT_PATH:/}restMaxThreads:${SW_RECEIVER_ZIPKIN_REST_MAX_THREADS:200}restIdleTimeOut:${SW_RECEIVER_ZIPKIN_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_RECEIVER_ZIPKIN_REST_QUEUE_SIZE:0}## The below configs are for OAP collect zipkin trace from kafkaenableKafkaCollector:${SW_ZIPKIN_KAFKA_COLLECTOR_ENABLED:true}kafkaBootstrapServers:${SW_ZIPKIN_KAFKA_SERVERS:localhost:9092}kafkaGroupId:${SW_ZIPKIN_KAFKA_Group_Id:zipkin}kafkaTopic:${SW_ZIPKIN_KAFKA_TOPIC:zipkin}# Kafka consumer config, JSON format as Properties. If it contains the same key with above, would override.kafkaConsumerConfig:${SW_ZIPKIN_KAFKA_CONSUMER_CONFIG:\u0026#34;{\\\u0026#34;auto.offset.reset\\\u0026#34;:\\\u0026#34;earliest\\\u0026#34;,\\\u0026#34;enable.auto.commit\\\u0026#34;:true}\u0026#34;}# The Count of the topic consumerskafkaConsumers:${SW_ZIPKIN_KAFKA_CONSUMERS:1}kafkaHandlerThreadPoolSize:${SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_SIZE:-1}kafkaHandlerThreadPoolQueueSize:${SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE:-1}Zipkin query The Zipkin receiver makes the OAP server work as an alternative Zipkin server implementation for query traces. It implemented ZipkinQueryApiV2 through the HTTP service, supporting Zipkin-lens UI.\nUse the following config to activate it.\nquery-zipkin:selector:${SW_QUERY_ZIPKIN:default}default:# For HTTP serverrestHost:${SW_QUERY_ZIPKIN_REST_HOST:0.0.0.0}restPort:${SW_QUERY_ZIPKIN_REST_PORT:9412}restContextPath:${SW_QUERY_ZIPKIN_REST_CONTEXT_PATH:/zipkin}restMaxThreads:${SW_QUERY_ZIPKIN_REST_MAX_THREADS:200}restIdleTimeOut:${SW_QUERY_ZIPKIN_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_QUERY_ZIPKIN_REST_QUEUE_SIZE:0}# Default look back for serviceNames, remoteServiceNames and spanNames, 1 day in millislookback:${SW_QUERY_ZIPKIN_LOOKBACK:86400000}# The Cache-Control max-age (seconds) for serviceNames, remoteServiceNames and spanNamesnamesMaxAge:${SW_QUERY_ZIPKIN_NAMES_MAX_AGE:300}## The below config are OAP support for zipkin-lens UI# Default traces query max sizeuiQueryLimit:${SW_QUERY_ZIPKIN_UI_QUERY_LIMIT:10}# Default look back for search traces, 15 minutes in millisuiDefaultLookback:${SW_QUERY_ZIPKIN_UI_DEFAULT_LOOKBACK:900000}Lens UI Lens UI is Zipkin native UI. SkyWalking webapp has bundled it in the binary distribution. {webapp IP}:{webapp port}/zipkin is exposed and accessible for the browser. Meanwhile, Iframe UI component could be used to host Zipkin Lens UI on the SkyWalking booster UI dashboard.(link=/zipkin)\nZipkin Lens UI source codes could be found here.\n","title":"Zipkin receiver","url":"/docs/main/latest/en/setup/backend/zipkin-trace/"},{"content":"Zipkin receiver The Zipkin receiver makes the OAP server work as an alternative Zipkin server implementation for collecting traces. It supports Zipkin v1/v2 formats through the HTTP collector and Kafka collector.\nNOTICE, Zipkin trace would not be analyzed like SkyWalking native trace format.\nUse the following config to activate it. Set enableHttpCollector to enable HTTP collector and enableKafkaCollector to enable Kafka collector.\nreceiver-zipkin:selector:${SW_RECEIVER_ZIPKIN:default}default:# Defines a set of span tag keys which are searchable.# The max length of key=value should be less than 256 or will be dropped.searchableTracesTags:${SW_ZIPKIN_SEARCHABLE_TAG_KEYS:http.method}# The sample rate precision is 1/10000, should be between 0 and 10000sampleRate:${SW_ZIPKIN_SAMPLE_RATE:10000}## The below configs are for OAP collect zipkin trace from HTTPenableHttpCollector:${SW_ZIPKIN_HTTP_COLLECTOR_ENABLED:true}restHost:${SW_RECEIVER_ZIPKIN_REST_HOST:0.0.0.0}restPort:${SW_RECEIVER_ZIPKIN_REST_PORT:9411}restContextPath:${SW_RECEIVER_ZIPKIN_REST_CONTEXT_PATH:/}restMaxThreads:${SW_RECEIVER_ZIPKIN_REST_MAX_THREADS:200}restIdleTimeOut:${SW_RECEIVER_ZIPKIN_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_RECEIVER_ZIPKIN_REST_QUEUE_SIZE:0}## The below configs are for OAP collect zipkin trace from kafkaenableKafkaCollector:${SW_ZIPKIN_KAFKA_COLLECTOR_ENABLED:true}kafkaBootstrapServers:${SW_ZIPKIN_KAFKA_SERVERS:localhost:9092}kafkaGroupId:${SW_ZIPKIN_KAFKA_Group_Id:zipkin}kafkaTopic:${SW_ZIPKIN_KAFKA_TOPIC:zipkin}# Kafka consumer config, JSON format as Properties. If it contains the same key with above, would override.kafkaConsumerConfig:${SW_ZIPKIN_KAFKA_CONSUMER_CONFIG:\u0026#34;{\\\u0026#34;auto.offset.reset\\\u0026#34;:\\\u0026#34;earliest\\\u0026#34;,\\\u0026#34;enable.auto.commit\\\u0026#34;:true}\u0026#34;}# The Count of the topic consumerskafkaConsumers:${SW_ZIPKIN_KAFKA_CONSUMERS:1}kafkaHandlerThreadPoolSize:${SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_SIZE:-1}kafkaHandlerThreadPoolQueueSize:${SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE:-1}Zipkin query The Zipkin receiver makes the OAP server work as an alternative Zipkin server implementation for query traces. It implemented ZipkinQueryApiV2 through the HTTP service, supporting Zipkin-lens UI.\nUse the following config to activate it.\nquery-zipkin:selector:${SW_QUERY_ZIPKIN:default}default:# For HTTP serverrestHost:${SW_QUERY_ZIPKIN_REST_HOST:0.0.0.0}restPort:${SW_QUERY_ZIPKIN_REST_PORT:9412}restContextPath:${SW_QUERY_ZIPKIN_REST_CONTEXT_PATH:/zipkin}restMaxThreads:${SW_QUERY_ZIPKIN_REST_MAX_THREADS:200}restIdleTimeOut:${SW_QUERY_ZIPKIN_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_QUERY_ZIPKIN_REST_QUEUE_SIZE:0}# Default look back for serviceNames, remoteServiceNames and spanNames, 1 day in millislookback:${SW_QUERY_ZIPKIN_LOOKBACK:86400000}# The Cache-Control max-age (seconds) for serviceNames, remoteServiceNames and spanNamesnamesMaxAge:${SW_QUERY_ZIPKIN_NAMES_MAX_AGE:300}## The below config are OAP support for zipkin-lens UI# Default traces query max sizeuiQueryLimit:${SW_QUERY_ZIPKIN_UI_QUERY_LIMIT:10}# Default look back for search traces, 15 minutes in millisuiDefaultLookback:${SW_QUERY_ZIPKIN_UI_DEFAULT_LOOKBACK:900000}Lens UI Lens UI is Zipkin native UI. SkyWalking webapp has bundled it in the binary distribution. {webapp IP}:{webapp port}/zipkin is exposed and accessible for the browser. Meanwhile, Iframe UI component could be used to host Zipkin Lens UI on the SkyWalking booster UI dashboard.(link=/zipkin)\nZipkin Lens UI source codes could be found here.\n","title":"Zipkin receiver","url":"/docs/main/next/en/setup/backend/zipkin-trace/"},{"content":"Zipkin receiver The Zipkin receiver makes the OAP server work as an alternative Zipkin server implementation. It supports Zipkin v1/v2 formats through HTTP service. Make sure you use this with SW_STORAGE=zipkin-elasticsearch option to activate Zipkin storage implementation. Once this receiver and storage are activated, SkyWalking\u0026rsquo;s native traces would be ignored, and SkyWalking wouldn\u0026rsquo;t analyze topology, metrics, and endpoint dependency from Zipkin\u0026rsquo;s trace.\nUse the following config to activate it.\nreceiver-zipkin:selector:${SW_RECEIVER_ZIPKIN:-}default:host:${SW_RECEIVER_ZIPKIN_HOST:0.0.0.0}port:${SW_RECEIVER_ZIPKIN_PORT:9411}contextPath:${SW_RECEIVER_ZIPKIN_CONTEXT_PATH:/}jettyMinThreads:${SW_RECEIVER_ZIPKIN_JETTY_MIN_THREADS:1}jettyMaxThreads:${SW_RECEIVER_ZIPKIN_JETTY_MAX_THREADS:200}jettyIdleTimeOut:${SW_RECEIVER_ZIPKIN_JETTY_IDLE_TIMEOUT:30000}jettyAcceptorPriorityDelta:${SW_RECEIVER_ZIPKIN_JETTY_DELTA:0}jettyAcceptQueueSize:${SW_RECEIVER_ZIPKIN_QUEUE_SIZE:0}NOTE: Zipkin receiver requires zipkin-elasticsearch storage implementation to be activated. Read this doc to learn about Zipkin as a storage option.\n","title":"Zipkin receiver","url":"/docs/main/v9.0.0/en/setup/backend/zipkin-trace/"},{"content":"Zipkin receiver The Zipkin receiver makes the OAP server work as an alternative Zipkin server implementation for collecting traces. It supports Zipkin v1/v2 formats through the HTTP service.\nUse the following config to activate it.\nreceiver-zipkin:selector:${SW_RECEIVER_ZIPKIN:default}default:# For HTTP serverrestHost:${SW_RECEIVER_ZIPKIN_REST_HOST:0.0.0.0}restPort:${SW_RECEIVER_ZIPKIN_REST_PORT:9411}restContextPath:${SW_RECEIVER_ZIPKIN_REST_CONTEXT_PATH:/}restMaxThreads:${SW_RECEIVER_ZIPKIN_REST_MAX_THREADS:200}restIdleTimeOut:${SW_RECEIVER_ZIPKIN_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_RECEIVER_ZIPKIN_REST_QUEUE_SIZE:0}searchableTracesTags:${SW_ZIPKIN_SEARCHABLE_TAG_KEYS:http.method}# The sample rate precision is 1/10000, should be between 0 and 10000sampleRate:${SW_ZIPKIN_SAMPLE_RATE:10000}Zipkin query The Zipkin receiver makes the OAP server work as an alternative Zipkin server implementation for query traces. It implemented ZipkinQueryApiV2 through the HTTP service, supporting Zipkin-lens UI. Notice: Zipkin query API implementation does not support BanyanDB yet.\nUse the following config to activate it.\nquery-zipkin:selector:${SW_QUERY_ZIPKIN:default}default:# For HTTP serverrestHost:${SW_QUERY_ZIPKIN_REST_HOST:0.0.0.0}restPort:${SW_QUERY_ZIPKIN_REST_PORT:9412}restContextPath:${SW_QUERY_ZIPKIN_REST_CONTEXT_PATH:/zipkin}restMaxThreads:${SW_QUERY_ZIPKIN_REST_MAX_THREADS:200}restIdleTimeOut:${SW_QUERY_ZIPKIN_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_QUERY_ZIPKIN_REST_QUEUE_SIZE:0}# Default look back for serviceNames, remoteServiceNames and spanNames, 1 day in millislookback:${SW_QUERY_ZIPKIN_LOOKBACK:86400000}# The Cache-Control max-age (seconds) for serviceNames, remoteServiceNames and spanNamesnamesMaxAge:${SW_QUERY_ZIPKIN_NAMES_MAX_AGE:300}## The below config are OAP support for zipkin-lens UI# Default traces query max sizeuiQueryLimit:${SW_QUERY_ZIPKIN_UI_QUERY_LIMIT:10}# Default look back for search traces, 15 minutes in millisuiDefaultLookback:${SW_QUERY_ZIPKIN_UI_DEFAULT_LOOKBACK:900000}","title":"Zipkin receiver","url":"/docs/main/v9.1.0/en/setup/backend/zipkin-trace/"},{"content":"Zipkin receiver The Zipkin receiver makes the OAP server work as an alternative Zipkin server implementation for collecting traces. It supports Zipkin v1/v2 formats through the HTTP collector and Kafka collector.\nUse the following config to activate it. Set enableHttpCollector to enable HTTP collector and enableKafkaCollector to enable Kafka collector.\nreceiver-zipkin:selector:${SW_RECEIVER_ZIPKIN:default}default:searchableTracesTags:${SW_ZIPKIN_SEARCHABLE_TAG_KEYS:http.method}# The sample rate precision is 1/10000, should be between 0 and 10000sampleRate:${SW_ZIPKIN_SAMPLE_RATE:10000}## The below configs are for OAP collect zipkin trace from HTTPenableHttpCollector:${SW_ZIPKIN_HTTP_COLLECTOR_ENABLED:true}restHost:${SW_RECEIVER_ZIPKIN_REST_HOST:0.0.0.0}restPort:${SW_RECEIVER_ZIPKIN_REST_PORT:9411}restContextPath:${SW_RECEIVER_ZIPKIN_REST_CONTEXT_PATH:/}restMaxThreads:${SW_RECEIVER_ZIPKIN_REST_MAX_THREADS:200}restIdleTimeOut:${SW_RECEIVER_ZIPKIN_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_RECEIVER_ZIPKIN_REST_QUEUE_SIZE:0}## The below configs are for OAP collect zipkin trace from kafkaenableKafkaCollector:${SW_ZIPKIN_KAFKA_COLLECTOR_ENABLED:true}kafkaBootstrapServers:${SW_ZIPKIN_KAFKA_SERVERS:localhost:9092}kafkaGroupId:${SW_ZIPKIN_KAFKA_Group_Id:zipkin}kafkaTopic:${SW_ZIPKIN_KAFKA_TOPIC:zipkin}# Kafka consumer config, JSON format as Properties. If it contains the same key with above, would override.kafkaConsumerConfig:${SW_ZIPKIN_KAFKA_CONSUMER_CONFIG:\u0026#34;{\\\u0026#34;auto.offset.reset\\\u0026#34;:\\\u0026#34;earliest\\\u0026#34;,\\\u0026#34;enable.auto.commit\\\u0026#34;:true}\u0026#34;}# The Count of the topic consumerskafkaConsumers:${SW_ZIPKIN_KAFKA_CONSUMERS:1}kafkaHandlerThreadPoolSize:${SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_SIZE:-1}kafkaHandlerThreadPoolQueueSize:${SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE:-1}Zipkin query The Zipkin receiver makes the OAP server work as an alternative Zipkin server implementation for query traces. It implemented ZipkinQueryApiV2 through the HTTP service, supporting Zipkin-lens UI. Notice: Zipkin query API implementation does not support BanyanDB yet.\nUse the following config to activate it.\nquery-zipkin:selector:${SW_QUERY_ZIPKIN:default}default:# For HTTP serverrestHost:${SW_QUERY_ZIPKIN_REST_HOST:0.0.0.0}restPort:${SW_QUERY_ZIPKIN_REST_PORT:9412}restContextPath:${SW_QUERY_ZIPKIN_REST_CONTEXT_PATH:/zipkin}restMaxThreads:${SW_QUERY_ZIPKIN_REST_MAX_THREADS:200}restIdleTimeOut:${SW_QUERY_ZIPKIN_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_QUERY_ZIPKIN_REST_QUEUE_SIZE:0}# Default look back for serviceNames, remoteServiceNames and spanNames, 1 day in millislookback:${SW_QUERY_ZIPKIN_LOOKBACK:86400000}# The Cache-Control max-age (seconds) for serviceNames, remoteServiceNames and spanNamesnamesMaxAge:${SW_QUERY_ZIPKIN_NAMES_MAX_AGE:300}## The below config are OAP support for zipkin-lens UI# Default traces query max sizeuiQueryLimit:${SW_QUERY_ZIPKIN_UI_QUERY_LIMIT:10}# Default look back for search traces, 15 minutes in millisuiDefaultLookback:${SW_QUERY_ZIPKIN_UI_DEFAULT_LOOKBACK:900000}","title":"Zipkin receiver","url":"/docs/main/v9.2.0/en/setup/backend/zipkin-trace/"},{"content":"Zipkin receiver The Zipkin receiver makes the OAP server work as an alternative Zipkin server implementation for collecting traces. It supports Zipkin v1/v2 formats through the HTTP collector and Kafka collector.\nUse the following config to activate it. Set enableHttpCollector to enable HTTP collector and enableKafkaCollector to enable Kafka collector.\nreceiver-zipkin:selector:${SW_RECEIVER_ZIPKIN:default}default:# Defines a set of span tag keys which are searchable.# The max length of key=value should be less than 256 or will be dropped.searchableTracesTags:${SW_ZIPKIN_SEARCHABLE_TAG_KEYS:http.method}# The sample rate precision is 1/10000, should be between 0 and 10000sampleRate:${SW_ZIPKIN_SAMPLE_RATE:10000}## The below configs are for OAP collect zipkin trace from HTTPenableHttpCollector:${SW_ZIPKIN_HTTP_COLLECTOR_ENABLED:true}restHost:${SW_RECEIVER_ZIPKIN_REST_HOST:0.0.0.0}restPort:${SW_RECEIVER_ZIPKIN_REST_PORT:9411}restContextPath:${SW_RECEIVER_ZIPKIN_REST_CONTEXT_PATH:/}restMaxThreads:${SW_RECEIVER_ZIPKIN_REST_MAX_THREADS:200}restIdleTimeOut:${SW_RECEIVER_ZIPKIN_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_RECEIVER_ZIPKIN_REST_QUEUE_SIZE:0}## The below configs are for OAP collect zipkin trace from kafkaenableKafkaCollector:${SW_ZIPKIN_KAFKA_COLLECTOR_ENABLED:true}kafkaBootstrapServers:${SW_ZIPKIN_KAFKA_SERVERS:localhost:9092}kafkaGroupId:${SW_ZIPKIN_KAFKA_Group_Id:zipkin}kafkaTopic:${SW_ZIPKIN_KAFKA_TOPIC:zipkin}# Kafka consumer config, JSON format as Properties. If it contains the same key with above, would override.kafkaConsumerConfig:${SW_ZIPKIN_KAFKA_CONSUMER_CONFIG:\u0026#34;{\\\u0026#34;auto.offset.reset\\\u0026#34;:\\\u0026#34;earliest\\\u0026#34;,\\\u0026#34;enable.auto.commit\\\u0026#34;:true}\u0026#34;}# The Count of the topic consumerskafkaConsumers:${SW_ZIPKIN_KAFKA_CONSUMERS:1}kafkaHandlerThreadPoolSize:${SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_SIZE:-1}kafkaHandlerThreadPoolQueueSize:${SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE:-1}Zipkin query The Zipkin receiver makes the OAP server work as an alternative Zipkin server implementation for query traces. It implemented ZipkinQueryApiV2 through the HTTP service, supporting Zipkin-lens UI.\nUse the following config to activate it.\nquery-zipkin:selector:${SW_QUERY_ZIPKIN:default}default:# For HTTP serverrestHost:${SW_QUERY_ZIPKIN_REST_HOST:0.0.0.0}restPort:${SW_QUERY_ZIPKIN_REST_PORT:9412}restContextPath:${SW_QUERY_ZIPKIN_REST_CONTEXT_PATH:/zipkin}restMaxThreads:${SW_QUERY_ZIPKIN_REST_MAX_THREADS:200}restIdleTimeOut:${SW_QUERY_ZIPKIN_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_QUERY_ZIPKIN_REST_QUEUE_SIZE:0}# Default look back for serviceNames, remoteServiceNames and spanNames, 1 day in millislookback:${SW_QUERY_ZIPKIN_LOOKBACK:86400000}# The Cache-Control max-age (seconds) for serviceNames, remoteServiceNames and spanNamesnamesMaxAge:${SW_QUERY_ZIPKIN_NAMES_MAX_AGE:300}## The below config are OAP support for zipkin-lens UI# Default traces query max sizeuiQueryLimit:${SW_QUERY_ZIPKIN_UI_QUERY_LIMIT:10}# Default look back for search traces, 15 minutes in millisuiDefaultLookback:${SW_QUERY_ZIPKIN_UI_DEFAULT_LOOKBACK:900000}","title":"Zipkin receiver","url":"/docs/main/v9.3.0/en/setup/backend/zipkin-trace/"},{"content":"Zipkin receiver The Zipkin receiver makes the OAP server work as an alternative Zipkin server implementation for collecting traces. It supports Zipkin v1/v2 formats through the HTTP collector and Kafka collector.\nUse the following config to activate it. Set enableHttpCollector to enable HTTP collector and enableKafkaCollector to enable Kafka collector.\nreceiver-zipkin:selector:${SW_RECEIVER_ZIPKIN:default}default:# Defines a set of span tag keys which are searchable.# The max length of key=value should be less than 256 or will be dropped.searchableTracesTags:${SW_ZIPKIN_SEARCHABLE_TAG_KEYS:http.method}# The sample rate precision is 1/10000, should be between 0 and 10000sampleRate:${SW_ZIPKIN_SAMPLE_RATE:10000}## The below configs are for OAP collect zipkin trace from HTTPenableHttpCollector:${SW_ZIPKIN_HTTP_COLLECTOR_ENABLED:true}restHost:${SW_RECEIVER_ZIPKIN_REST_HOST:0.0.0.0}restPort:${SW_RECEIVER_ZIPKIN_REST_PORT:9411}restContextPath:${SW_RECEIVER_ZIPKIN_REST_CONTEXT_PATH:/}restMaxThreads:${SW_RECEIVER_ZIPKIN_REST_MAX_THREADS:200}restIdleTimeOut:${SW_RECEIVER_ZIPKIN_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_RECEIVER_ZIPKIN_REST_QUEUE_SIZE:0}## The below configs are for OAP collect zipkin trace from kafkaenableKafkaCollector:${SW_ZIPKIN_KAFKA_COLLECTOR_ENABLED:true}kafkaBootstrapServers:${SW_ZIPKIN_KAFKA_SERVERS:localhost:9092}kafkaGroupId:${SW_ZIPKIN_KAFKA_Group_Id:zipkin}kafkaTopic:${SW_ZIPKIN_KAFKA_TOPIC:zipkin}# Kafka consumer config, JSON format as Properties. If it contains the same key with above, would override.kafkaConsumerConfig:${SW_ZIPKIN_KAFKA_CONSUMER_CONFIG:\u0026#34;{\\\u0026#34;auto.offset.reset\\\u0026#34;:\\\u0026#34;earliest\\\u0026#34;,\\\u0026#34;enable.auto.commit\\\u0026#34;:true}\u0026#34;}# The Count of the topic consumerskafkaConsumers:${SW_ZIPKIN_KAFKA_CONSUMERS:1}kafkaHandlerThreadPoolSize:${SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_SIZE:-1}kafkaHandlerThreadPoolQueueSize:${SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE:-1}Zipkin query The Zipkin receiver makes the OAP server work as an alternative Zipkin server implementation for query traces. It implemented ZipkinQueryApiV2 through the HTTP service, supporting Zipkin-lens UI.\nUse the following config to activate it.\nquery-zipkin:selector:${SW_QUERY_ZIPKIN:default}default:# For HTTP serverrestHost:${SW_QUERY_ZIPKIN_REST_HOST:0.0.0.0}restPort:${SW_QUERY_ZIPKIN_REST_PORT:9412}restContextPath:${SW_QUERY_ZIPKIN_REST_CONTEXT_PATH:/zipkin}restMaxThreads:${SW_QUERY_ZIPKIN_REST_MAX_THREADS:200}restIdleTimeOut:${SW_QUERY_ZIPKIN_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_QUERY_ZIPKIN_REST_QUEUE_SIZE:0}# Default look back for serviceNames, remoteServiceNames and spanNames, 1 day in millislookback:${SW_QUERY_ZIPKIN_LOOKBACK:86400000}# The Cache-Control max-age (seconds) for serviceNames, remoteServiceNames and spanNamesnamesMaxAge:${SW_QUERY_ZIPKIN_NAMES_MAX_AGE:300}## The below config are OAP support for zipkin-lens UI# Default traces query max sizeuiQueryLimit:${SW_QUERY_ZIPKIN_UI_QUERY_LIMIT:10}# Default look back for search traces, 15 minutes in millisuiDefaultLookback:${SW_QUERY_ZIPKIN_UI_DEFAULT_LOOKBACK:900000}Lens UI Lens UI is Zipkin native UI. SkyWalking webapp has bundled it in the binary distribution. {webapp IP}:{webapp port}/zipkin is exposed and accessible for the browser. Meanwhile, Iframe UI component could be used to host Zipkin Lens UI on the SkyWalking booster UI dashboard.(link=/zipkin)\nZipkin Lens UI source codes could be found here.\n","title":"Zipkin receiver","url":"/docs/main/v9.4.0/en/setup/backend/zipkin-trace/"},{"content":"Zipkin receiver The Zipkin receiver makes the OAP server work as an alternative Zipkin server implementation for collecting traces. It supports Zipkin v1/v2 formats through the HTTP collector and Kafka collector.\nUse the following config to activate it. Set enableHttpCollector to enable HTTP collector and enableKafkaCollector to enable Kafka collector.\nreceiver-zipkin:selector:${SW_RECEIVER_ZIPKIN:default}default:# Defines a set of span tag keys which are searchable.# The max length of key=value should be less than 256 or will be dropped.searchableTracesTags:${SW_ZIPKIN_SEARCHABLE_TAG_KEYS:http.method}# The sample rate precision is 1/10000, should be between 0 and 10000sampleRate:${SW_ZIPKIN_SAMPLE_RATE:10000}## The below configs are for OAP collect zipkin trace from HTTPenableHttpCollector:${SW_ZIPKIN_HTTP_COLLECTOR_ENABLED:true}restHost:${SW_RECEIVER_ZIPKIN_REST_HOST:0.0.0.0}restPort:${SW_RECEIVER_ZIPKIN_REST_PORT:9411}restContextPath:${SW_RECEIVER_ZIPKIN_REST_CONTEXT_PATH:/}restMaxThreads:${SW_RECEIVER_ZIPKIN_REST_MAX_THREADS:200}restIdleTimeOut:${SW_RECEIVER_ZIPKIN_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_RECEIVER_ZIPKIN_REST_QUEUE_SIZE:0}## The below configs are for OAP collect zipkin trace from kafkaenableKafkaCollector:${SW_ZIPKIN_KAFKA_COLLECTOR_ENABLED:true}kafkaBootstrapServers:${SW_ZIPKIN_KAFKA_SERVERS:localhost:9092}kafkaGroupId:${SW_ZIPKIN_KAFKA_Group_Id:zipkin}kafkaTopic:${SW_ZIPKIN_KAFKA_TOPIC:zipkin}# Kafka consumer config, JSON format as Properties. If it contains the same key with above, would override.kafkaConsumerConfig:${SW_ZIPKIN_KAFKA_CONSUMER_CONFIG:\u0026#34;{\\\u0026#34;auto.offset.reset\\\u0026#34;:\\\u0026#34;earliest\\\u0026#34;,\\\u0026#34;enable.auto.commit\\\u0026#34;:true}\u0026#34;}# The Count of the topic consumerskafkaConsumers:${SW_ZIPKIN_KAFKA_CONSUMERS:1}kafkaHandlerThreadPoolSize:${SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_SIZE:-1}kafkaHandlerThreadPoolQueueSize:${SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE:-1}Zipkin query The Zipkin receiver makes the OAP server work as an alternative Zipkin server implementation for query traces. It implemented ZipkinQueryApiV2 through the HTTP service, supporting Zipkin-lens UI.\nUse the following config to activate it.\nquery-zipkin:selector:${SW_QUERY_ZIPKIN:default}default:# For HTTP serverrestHost:${SW_QUERY_ZIPKIN_REST_HOST:0.0.0.0}restPort:${SW_QUERY_ZIPKIN_REST_PORT:9412}restContextPath:${SW_QUERY_ZIPKIN_REST_CONTEXT_PATH:/zipkin}restMaxThreads:${SW_QUERY_ZIPKIN_REST_MAX_THREADS:200}restIdleTimeOut:${SW_QUERY_ZIPKIN_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_QUERY_ZIPKIN_REST_QUEUE_SIZE:0}# Default look back for serviceNames, remoteServiceNames and spanNames, 1 day in millislookback:${SW_QUERY_ZIPKIN_LOOKBACK:86400000}# The Cache-Control max-age (seconds) for serviceNames, remoteServiceNames and spanNamesnamesMaxAge:${SW_QUERY_ZIPKIN_NAMES_MAX_AGE:300}## The below config are OAP support for zipkin-lens UI# Default traces query max sizeuiQueryLimit:${SW_QUERY_ZIPKIN_UI_QUERY_LIMIT:10}# Default look back for search traces, 15 minutes in millisuiDefaultLookback:${SW_QUERY_ZIPKIN_UI_DEFAULT_LOOKBACK:900000}Lens UI Lens UI is Zipkin native UI. SkyWalking webapp has bundled it in the binary distribution. {webapp IP}:{webapp port}/zipkin is exposed and accessible for the browser. Meanwhile, Iframe UI component could be used to host Zipkin Lens UI on the SkyWalking booster UI dashboard.(link=/zipkin)\nZipkin Lens UI source codes could be found here.\n","title":"Zipkin receiver","url":"/docs/main/v9.5.0/en/setup/backend/zipkin-trace/"},{"content":"Zipkin receiver The Zipkin receiver makes the OAP server work as an alternative Zipkin server implementation for collecting traces. It supports Zipkin v1/v2 formats through the HTTP collector and Kafka collector.\nNOTICE, Zipkin trace would not be analyzed like SkyWalking native trace format.\nUse the following config to activate it. Set enableHttpCollector to enable HTTP collector and enableKafkaCollector to enable Kafka collector.\nreceiver-zipkin:selector:${SW_RECEIVER_ZIPKIN:default}default:# Defines a set of span tag keys which are searchable.# The max length of key=value should be less than 256 or will be dropped.searchableTracesTags:${SW_ZIPKIN_SEARCHABLE_TAG_KEYS:http.method}# The sample rate precision is 1/10000, should be between 0 and 10000sampleRate:${SW_ZIPKIN_SAMPLE_RATE:10000}## The below configs are for OAP collect zipkin trace from HTTPenableHttpCollector:${SW_ZIPKIN_HTTP_COLLECTOR_ENABLED:true}restHost:${SW_RECEIVER_ZIPKIN_REST_HOST:0.0.0.0}restPort:${SW_RECEIVER_ZIPKIN_REST_PORT:9411}restContextPath:${SW_RECEIVER_ZIPKIN_REST_CONTEXT_PATH:/}restMaxThreads:${SW_RECEIVER_ZIPKIN_REST_MAX_THREADS:200}restIdleTimeOut:${SW_RECEIVER_ZIPKIN_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_RECEIVER_ZIPKIN_REST_QUEUE_SIZE:0}## The below configs are for OAP collect zipkin trace from kafkaenableKafkaCollector:${SW_ZIPKIN_KAFKA_COLLECTOR_ENABLED:true}kafkaBootstrapServers:${SW_ZIPKIN_KAFKA_SERVERS:localhost:9092}kafkaGroupId:${SW_ZIPKIN_KAFKA_Group_Id:zipkin}kafkaTopic:${SW_ZIPKIN_KAFKA_TOPIC:zipkin}# Kafka consumer config, JSON format as Properties. If it contains the same key with above, would override.kafkaConsumerConfig:${SW_ZIPKIN_KAFKA_CONSUMER_CONFIG:\u0026#34;{\\\u0026#34;auto.offset.reset\\\u0026#34;:\\\u0026#34;earliest\\\u0026#34;,\\\u0026#34;enable.auto.commit\\\u0026#34;:true}\u0026#34;}# The Count of the topic consumerskafkaConsumers:${SW_ZIPKIN_KAFKA_CONSUMERS:1}kafkaHandlerThreadPoolSize:${SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_SIZE:-1}kafkaHandlerThreadPoolQueueSize:${SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE:-1}Zipkin query The Zipkin receiver makes the OAP server work as an alternative Zipkin server implementation for query traces. It implemented ZipkinQueryApiV2 through the HTTP service, supporting Zipkin-lens UI.\nUse the following config to activate it.\nquery-zipkin:selector:${SW_QUERY_ZIPKIN:default}default:# For HTTP serverrestHost:${SW_QUERY_ZIPKIN_REST_HOST:0.0.0.0}restPort:${SW_QUERY_ZIPKIN_REST_PORT:9412}restContextPath:${SW_QUERY_ZIPKIN_REST_CONTEXT_PATH:/zipkin}restMaxThreads:${SW_QUERY_ZIPKIN_REST_MAX_THREADS:200}restIdleTimeOut:${SW_QUERY_ZIPKIN_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_QUERY_ZIPKIN_REST_QUEUE_SIZE:0}# Default look back for serviceNames, remoteServiceNames and spanNames, 1 day in millislookback:${SW_QUERY_ZIPKIN_LOOKBACK:86400000}# The Cache-Control max-age (seconds) for serviceNames, remoteServiceNames and spanNamesnamesMaxAge:${SW_QUERY_ZIPKIN_NAMES_MAX_AGE:300}## The below config are OAP support for zipkin-lens UI# Default traces query max sizeuiQueryLimit:${SW_QUERY_ZIPKIN_UI_QUERY_LIMIT:10}# Default look back for search traces, 15 minutes in millisuiDefaultLookback:${SW_QUERY_ZIPKIN_UI_DEFAULT_LOOKBACK:900000}Lens UI Lens UI is Zipkin native UI. SkyWalking webapp has bundled it in the binary distribution. {webapp IP}:{webapp port}/zipkin is exposed and accessible for the browser. Meanwhile, Iframe UI component could be used to host Zipkin Lens UI on the SkyWalking booster UI dashboard.(link=/zipkin)\nZipkin Lens UI source codes could be found here.\n","title":"Zipkin receiver","url":"/docs/main/v9.6.0/en/setup/backend/zipkin-trace/"},{"content":"Zipkin receiver The Zipkin receiver makes the OAP server work as an alternative Zipkin server implementation for collecting traces. It supports Zipkin v1/v2 formats through the HTTP collector and Kafka collector.\nNOTICE, Zipkin trace would not be analyzed like SkyWalking native trace format.\nUse the following config to activate it. Set enableHttpCollector to enable HTTP collector and enableKafkaCollector to enable Kafka collector.\nreceiver-zipkin:selector:${SW_RECEIVER_ZIPKIN:default}default:# Defines a set of span tag keys which are searchable.# The max length of key=value should be less than 256 or will be dropped.searchableTracesTags:${SW_ZIPKIN_SEARCHABLE_TAG_KEYS:http.method}# The sample rate precision is 1/10000, should be between 0 and 10000sampleRate:${SW_ZIPKIN_SAMPLE_RATE:10000}## The below configs are for OAP collect zipkin trace from HTTPenableHttpCollector:${SW_ZIPKIN_HTTP_COLLECTOR_ENABLED:true}restHost:${SW_RECEIVER_ZIPKIN_REST_HOST:0.0.0.0}restPort:${SW_RECEIVER_ZIPKIN_REST_PORT:9411}restContextPath:${SW_RECEIVER_ZIPKIN_REST_CONTEXT_PATH:/}restMaxThreads:${SW_RECEIVER_ZIPKIN_REST_MAX_THREADS:200}restIdleTimeOut:${SW_RECEIVER_ZIPKIN_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_RECEIVER_ZIPKIN_REST_QUEUE_SIZE:0}## The below configs are for OAP collect zipkin trace from kafkaenableKafkaCollector:${SW_ZIPKIN_KAFKA_COLLECTOR_ENABLED:true}kafkaBootstrapServers:${SW_ZIPKIN_KAFKA_SERVERS:localhost:9092}kafkaGroupId:${SW_ZIPKIN_KAFKA_Group_Id:zipkin}kafkaTopic:${SW_ZIPKIN_KAFKA_TOPIC:zipkin}# Kafka consumer config, JSON format as Properties. If it contains the same key with above, would override.kafkaConsumerConfig:${SW_ZIPKIN_KAFKA_CONSUMER_CONFIG:\u0026#34;{\\\u0026#34;auto.offset.reset\\\u0026#34;:\\\u0026#34;earliest\\\u0026#34;,\\\u0026#34;enable.auto.commit\\\u0026#34;:true}\u0026#34;}# The Count of the topic consumerskafkaConsumers:${SW_ZIPKIN_KAFKA_CONSUMERS:1}kafkaHandlerThreadPoolSize:${SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_SIZE:-1}kafkaHandlerThreadPoolQueueSize:${SW_ZIPKIN_KAFKA_HANDLER_THREAD_POOL_QUEUE_SIZE:-1}Zipkin query The Zipkin receiver makes the OAP server work as an alternative Zipkin server implementation for query traces. It implemented ZipkinQueryApiV2 through the HTTP service, supporting Zipkin-lens UI.\nUse the following config to activate it.\nquery-zipkin:selector:${SW_QUERY_ZIPKIN:default}default:# For HTTP serverrestHost:${SW_QUERY_ZIPKIN_REST_HOST:0.0.0.0}restPort:${SW_QUERY_ZIPKIN_REST_PORT:9412}restContextPath:${SW_QUERY_ZIPKIN_REST_CONTEXT_PATH:/zipkin}restMaxThreads:${SW_QUERY_ZIPKIN_REST_MAX_THREADS:200}restIdleTimeOut:${SW_QUERY_ZIPKIN_REST_IDLE_TIMEOUT:30000}restAcceptQueueSize:${SW_QUERY_ZIPKIN_REST_QUEUE_SIZE:0}# Default look back for serviceNames, remoteServiceNames and spanNames, 1 day in millislookback:${SW_QUERY_ZIPKIN_LOOKBACK:86400000}# The Cache-Control max-age (seconds) for serviceNames, remoteServiceNames and spanNamesnamesMaxAge:${SW_QUERY_ZIPKIN_NAMES_MAX_AGE:300}## The below config are OAP support for zipkin-lens UI# Default traces query max sizeuiQueryLimit:${SW_QUERY_ZIPKIN_UI_QUERY_LIMIT:10}# Default look back for search traces, 15 minutes in millisuiDefaultLookback:${SW_QUERY_ZIPKIN_UI_DEFAULT_LOOKBACK:900000}Lens UI Lens UI is Zipkin native UI. SkyWalking webapp has bundled it in the binary distribution. {webapp IP}:{webapp port}/zipkin is exposed and accessible for the browser. Meanwhile, Iframe UI component could be used to host Zipkin Lens UI on the SkyWalking booster UI dashboard.(link=/zipkin)\nZipkin Lens UI source codes could be found here.\n","title":"Zipkin receiver","url":"/docs/main/v9.7.0/en/setup/backend/zipkin-trace/"}]
\ No newline at end of file
diff --git a/team/index.html b/team/index.html
index 3a1bf91..571245e 100644
--- a/team/index.html
+++ b/team/index.html
@@ -7549,9 +7549,9 @@
 
           <li class="mb-2 mr-2">
             
-            <a class="link" href="https://github.com/pg-yang" target="_blank">
+            <a class="link" href="https://github.com/xu1009" target="_blank">
               <i class="iconfont icon-github"></i>
-              pg-yang
+              xu1009
             </a>
           </li>
           
@@ -7559,9 +7559,9 @@
 
           <li class="mb-2 mr-2">
             
-            <a class="link" href="https://github.com/xu1009" target="_blank">
+            <a class="link" href="https://github.com/pg-yang" target="_blank">
               <i class="iconfont icon-github"></i>
-              xu1009
+              pg-yang
             </a>
           </li>
           
@@ -7648,9 +7648,9 @@
 
           <li class="mb-2 mr-2">
             
-            <a class="link" href="https://github.com/liqiangz" target="_blank">
+            <a class="link" href="https://github.com/nisiyong" target="_blank">
               <i class="iconfont icon-github"></i>
-              liqiangz
+              nisiyong
             </a>
           </li>
           
@@ -7658,9 +7658,9 @@
 
           <li class="mb-2 mr-2">
             
-            <a class="link" href="https://github.com/nisiyong" target="_blank">
+            <a class="link" href="https://github.com/liqiangz" target="_blank">
               <i class="iconfont icon-github"></i>
-              nisiyong
+              liqiangz
             </a>
           </li>
           
@@ -7688,6 +7688,16 @@
 
           <li class="mb-2 mr-2">
             
+            <a class="link" href="https://github.com/zhangkewei" target="_blank">
+              <i class="iconfont icon-github"></i>
+              zhangkewei
+            </a>
+          </li>
+          
+          
+
+          <li class="mb-2 mr-2">
+            
             <a class="link" href="https://github.com/bai-yang" target="_blank">
               <i class="iconfont icon-github"></i>
               bai-yang
@@ -7698,9 +7708,9 @@
 
           <li class="mb-2 mr-2">
             
-            <a class="link" href="https://github.com/zhangkewei" target="_blank">
+            <a class="link" href="https://github.com/Jtrust" target="_blank">
               <i class="iconfont icon-github"></i>
-              zhangkewei
+              Jtrust
             </a>
           </li>
           
@@ -7748,6 +7758,15 @@
 
           <li class="mb-2 mr-2">
             
+            <a>
+              <i class="iconfont icon-github"></i>
+              55846420**
+            </a>
+            
+          
+
+          <li class="mb-2 mr-2">
+            
             <a class="link" href="https://github.com/TinyAllen" target="_blank">
               <i class="iconfont icon-github"></i>
               TinyAllen
@@ -7758,25 +7777,6 @@
 
           <li class="mb-2 mr-2">
             
-            <a class="link" href="https://github.com/Jtrust" target="_blank">
-              <i class="iconfont icon-github"></i>
-              Jtrust
-            </a>
-          </li>
-          
-          
-
-          <li class="mb-2 mr-2">
-            
-            <a>
-              <i class="iconfont icon-github"></i>
-              55846420**
-            </a>
-            
-          
-
-          <li class="mb-2 mr-2">
-            
             <a class="link" href="https://github.com/heihaozi" target="_blank">
               <i class="iconfont icon-github"></i>
               heihaozi
@@ -7787,46 +7787,6 @@
 
           <li class="mb-2 mr-2">
             
-            <a class="link" href="https://github.com/IluckySi" target="_blank">
-              <i class="iconfont icon-github"></i>
-              IluckySi
-            </a>
-          </li>
-          
-          
-
-          <li class="mb-2 mr-2">
-            
-            <a class="link" href="https://github.com/qxo" target="_blank">
-              <i class="iconfont icon-github"></i>
-              qxo
-            </a>
-          </li>
-          
-          
-
-          <li class="mb-2 mr-2">
-            
-            <a class="link" href="https://github.com/wendal" target="_blank">
-              <i class="iconfont icon-github"></i>
-              wendal
-            </a>
-          </li>
-          
-          
-
-          <li class="mb-2 mr-2">
-            
-            <a class="link" href="https://github.com/alonelaval" target="_blank">
-              <i class="iconfont icon-github"></i>
-              alonelaval
-            </a>
-          </li>
-          
-          
-
-          <li class="mb-2 mr-2">
-            
             <a class="link" href="https://github.com/CzyerChen" target="_blank">
               <i class="iconfont icon-github"></i>
               CzyerChen
@@ -7837,9 +7797,49 @@
 
           <li class="mb-2 mr-2">
             
-            <a class="link" href="https://github.com/zhyyu" target="_blank">
+            <a class="link" href="https://github.com/qxo" target="_blank">
               <i class="iconfont icon-github"></i>
-              zhyyu
+              qxo
+            </a>
+          </li>
+          
+          
+
+          <li class="mb-2 mr-2">
+            
+            <a class="link" href="https://github.com/IluckySi" target="_blank">
+              <i class="iconfont icon-github"></i>
+              IluckySi
+            </a>
+          </li>
+          
+          
+
+          <li class="mb-2 mr-2">
+            
+            <a class="link" href="https://github.com/alonelaval" target="_blank">
+              <i class="iconfont icon-github"></i>
+              alonelaval
+            </a>
+          </li>
+          
+          
+
+          <li class="mb-2 mr-2">
+            
+            <a class="link" href="https://github.com/wendal" target="_blank">
+              <i class="iconfont icon-github"></i>
+              wendal
+            </a>
+          </li>
+          
+          
+
+          <li class="mb-2 mr-2">
+            
+            <a class="link" href="https://github.com/tristaZero" target="_blank">
+              <i class="iconfont icon-github"></i>
+              tristaZero
             </a>
           </li>
           
@@ -7857,9 +7857,9 @@
 
           <li class="mb-2 mr-2">
             
-            <a class="link" href="https://github.com/tristaZero" target="_blank">
+            <a class="link" href="https://github.com/zhyyu" target="_blank">
               <i class="iconfont icon-github"></i>
-              tristaZero
+              zhyyu
             </a>
           </li>